From aaaea14214ed46ac60cf7ef5766374f660b05777 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Sun, 3 Oct 2021 22:14:44 +0800 Subject: [PATCH] import Unicorn2 --- .gitignore | 6 +- .travis.yml | 147 +- CMakeLists.txt | 1252 +- CREDITS.TXT | 6 +- ChangeLog | 71 - Makefile | 424 - README.md | 4 +- SPONSORS.TXT | 7 - TODO | 11 + bindings/README | 8 +- bindings/const_generator.py | 14 +- .../dotnet/UnicornManaged/Const/Common.fs | 13 +- bindings/dotnet/UnicornManaged/Const/Ppc.fs | 48 + bindings/dotnet/UnicornManaged/Const/Riscv.fs | 150 + .../Gee.External.Capstone.Proxy.dll | Bin 6656 -> 0 bytes bindings/dotnet/UnicornSamples/capstone.dll | Bin 2484736 -> 0 bytes bindings/go/unicorn/ppc_const.go | 43 + bindings/go/unicorn/riscv_const.go | 145 + bindings/go/unicorn/unicorn_const.go | 13 +- bindings/java/Makefile.build | 4 +- bindings/java/unicorn/PpcConst.java | 46 + bindings/java/unicorn/RiscvConst.java | 148 + bindings/java/unicorn/UnicornConst.java | 13 +- bindings/pascal/unicorn/PpcConst.pas | 48 + bindings/pascal/unicorn/RiscvConst.pas | 150 + bindings/pascal/unicorn/UnicornConst.pas | 13 +- bindings/pascal/unicorn/Unicorn_dyn.pas | 2 +- bindings/python/build_wheel.sh | 14 - bindings/python/sample_arm64.py | 2 +- bindings/python/sample_ppc.py | 65 + bindings/python/sample_riscv.py | 71 + bindings/python/sample_x86.py | 39 +- bindings/python/setup.py | 113 +- bindings/python/unicorn/arm_const.py | 23 +- bindings/python/unicorn/ppc_const.py | 40 + bindings/python/unicorn/riscv_const.py | 142 + bindings/python/unicorn/unicorn.py | 298 +- bindings/python/unicorn/unicorn_const.py | 13 +- bindings/python/unicorn/x86_const.py | 4 +- .../lib/unicorn_engine/ppc_const.rb | 43 + .../lib/unicorn_engine/riscv_const.rb | 145 + .../lib/unicorn_engine/unicorn_const.rb | 13 +- bindings/rust/COPYING | 339 - bindings/rust/Cargo.toml | 27 - bindings/rust/README.md | 45 - bindings/rust/build.rs | 20 - bindings/rust/src/arm.rs | 146 - bindings/rust/src/arm64.rs | 321 - bindings/rust/src/ffi.rs | 230 - bindings/rust/src/lib.rs | 784 - bindings/rust/src/m68k.rs | 24 - bindings/rust/src/mips.rs | 246 - bindings/rust/src/ppc.rs | 42 - bindings/rust/src/sparc.rs | 94 - bindings/rust/src/unicorn_const.rs | 158 - bindings/rust/src/x86.rs | 281 - bindings/rust/tests/unicorn.rs | 683 - bindings/vb6/uc_def.bas | 5 +- cmake.sh | 37 +- config.mk | 30 - docs/COMPILE-CMAKE.md | 57 - docs/COMPILE-NIX.md | 164 - docs/COMPILE-WINDOWS.md | 184 - docs/COMPILE.md | 140 +- .../Micro Unicorn-Engine API Documentation.md | 5471 ++- docs/unicorn-logo-text.png | Bin 0 -> 49545 bytes docs/unicorn-logo.png | Bin 60383 -> 34508 bytes docs/unicorn-logo.svg | 31 + docs/unicorn1-logo.png | Bin 0 -> 60383 bytes docs/{unicorn-logo.txt => unicorn1-logo.txt} | 0 glib_compat/README | 2 + glib_compat/garray.c | 1649 + glib_compat/garray.h | 99 + glib_compat/ghash.h | 77 + {qemu => glib_compat}/glib_compat.c | 1213 +- {qemu/include => glib_compat}/glib_compat.h | 75 +- glib_compat/glist.c | 154 + glib_compat/glist.h | 44 + glib_compat/gmacros.h | 59 + glib_compat/gmem.c | 257 + glib_compat/gmem.h | 111 + glib_compat/gmessages.h | 35 + glib_compat/gnode.h | 39 + glib_compat/gpattern.c | 400 + glib_compat/gpattern.h | 34 + glib_compat/grand.c | 384 + glib_compat/grand.h | 37 + glib_compat/gslice.c | 91 + glib_compat/gslice.h | 36 + glib_compat/gtestutils.c | 34 + glib_compat/gtestutils.h | 57 + glib_compat/gtree.c | 1255 + glib_compat/gtree.h | 55 + glib_compat/gtypes.h | 80 + include/qemu.h | 27 +- include/uc_priv.h | 134 +- include/unicorn/arm.h | 21 + include/unicorn/platform.h | 47 +- include/unicorn/ppc.h | 62 + include/unicorn/riscv.h | 167 + include/unicorn/unicorn.h | 157 +- include/unicorn/x86.h | 4 +- make.sh | 138 - mingw-w64.cmake | 17 + msvc.bat | 3 - msvc/.gitignore | 3 - msvc/README.TXT | 273 - .../aarch64-softmmu/config-target.h | 0 .../aarch64eb-softmmu/config-target.h | 0 .../{unicorn => }/arm-softmmu/config-target.h | 0 .../armeb-softmmu/config-target.h | 0 msvc/{unicorn => }/config-host.h | 7 +- .../m68k-softmmu/config-target.h | 0 .../mips-softmmu/config-target.h | 0 .../mips64-softmmu/config-target.h | 0 .../mips64el-softmmu/config-target.h | 0 .../mipsel-softmmu/config-target.h | 0 msvc/ppc-softmmu/config-target.h | 6 + msvc/ppc64-softmmu/config-target.h | 6 + msvc/riscv32-softmmu/config-target.h | 6 + msvc/riscv64-softmmu/config-target.h | 6 + msvc/samples/mem_apis/mem_apis.vcxproj | 175 - .../samples/mem_apis/mem_apis.vcxproj.filters | 6 - msvc/samples/sample_arm/sample_arm.vcxproj | 175 - .../sample_arm/sample_arm.vcxproj.filters | 6 - .../samples/sample_arm64/sample_arm64.vcxproj | 175 - .../sample_arm64/sample_arm64.vcxproj.filters | 6 - .../sample_arm64eb/sample_arm64eb.vcxproj | 175 - .../sample_arm64eb.vcxproj.filters | 6 - .../samples/sample_armeb/sample_armeb.vcxproj | 173 - .../sample_armeb/sample_armeb.vcxproj.filters | 6 - .../sample_batch_reg/sample_batch_reg.vcxproj | 175 - .../sample_batch_reg.vcxproj.filters | 6 - msvc/samples/sample_m68k/sample_m68k.vcxproj | 175 - .../sample_m68k/sample_m68k.vcxproj.filters | 6 - msvc/samples/sample_mips/sample_mips.vcxproj | 175 - .../sample_mips/sample_mips.vcxproj.filters | 6 - .../samples/sample_sparc/sample_sparc.vcxproj | 175 - .../sample_sparc/sample_sparc.vcxproj.filters | 6 - msvc/samples/sample_x86/sample_x86.vcxproj | 192 - .../sample_x86/sample_x86.vcxproj.filters | 6 - .../sample_x86_32_gdt_and_seg_regs.vcxproj | 175 - ...le_x86_32_gdt_and_seg_regs.vcxproj.filters | 6 - msvc/samples/shellcode/shellcode.vcxproj | 175 - .../shellcode/shellcode.vcxproj.filters | 6 - .../sparc-softmmu/config-target.h | 0 .../sparc64-softmmu/config-target.h | 0 msvc/unicorn.sln | 370 - .../aarch64-softmmu/aarch64-softmmu.vcxproj | 238 - .../aarch64-softmmu.vcxproj.filters | 149 - .../aarch64eb-softmmu.vcxproj | 238 - .../aarch64eb-softmmu.vcxproj.filters | 149 - msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj | 234 - .../arm-softmmu/arm-softmmu.vcxproj.filters | 137 - .../armeb-softmmu/armeb-softmmu.vcxproj | 234 - .../armeb-softmmu.vcxproj.filters | 137 - msvc/unicorn/{unicorn => }/dllmain.cpp | 0 .../unicorn/m68k-softmmu/m68k-softmmu.vcxproj | 225 - .../m68k-softmmu/m68k-softmmu.vcxproj.filters | 110 - .../unicorn/mips-softmmu/mips-softmmu.vcxproj | 236 - .../mips-softmmu/mips-softmmu.vcxproj.filters | 128 - .../mips64-softmmu/mips64-softmmu.vcxproj | 236 - .../mips64-softmmu.vcxproj.filters | 128 - .../mips64el-softmmu/mips64el-softmmu.vcxproj | 236 - .../mips64el-softmmu.vcxproj.filters | 128 - .../mipsel-softmmu/mipsel-softmmu.vcxproj | 236 - .../mipsel-softmmu.vcxproj.filters | 128 - msvc/unicorn/prebuild_script.bat | 21 - msvc/unicorn/qapi-types.c | 293 - msvc/unicorn/qapi-types.h | 228 - msvc/unicorn/qapi-visit.c | 428 - msvc/unicorn/qapi-visit.h | 51 - .../sparc-softmmu/sparc-softmmu.vcxproj | 229 - .../sparc-softmmu.vcxproj.filters | 122 - .../sparc64-softmmu/sparc64-softmmu.vcxproj | 230 - .../sparc64-softmmu.vcxproj.filters | 125 - msvc/unicorn/unicorn/unicorn.vcxproj | 373 - msvc/unicorn/unicorn/unicorn.vcxproj.filters | 505 - .../unicorn_static/unicorn_static.vcxproj | 377 - .../unicorn_static.vcxproj.filters | 504 - .../x86_64-softmmu/x86_64-softmmu.vcxproj | 242 - .../x86_64-softmmu.vcxproj.filters | 164 - .../x86_64-softmmu/config-target.h | 0 nmake.bat | 2 +- pkgconfig.mk | 13 - qemu/.editorconfig | 43 + qemu/CODING_STYLE | 107 - qemu/CODING_STYLE.rst | 641 + qemu/COPYING.LIB | 22 +- qemu/HACKING | 159 - qemu/LICENSE | 28 +- qemu/MAINTAINERS | 2916 ++ qemu/Makefile | 115 - qemu/Makefile.objs | 12 - qemu/Makefile.target | 84 - qemu/VERSION | 2 +- qemu/aarch64.h | 5899 ++- qemu/aarch64eb.h | 5899 ++- qemu/accel.c | 130 - qemu/accel/tcg/atomic_template.h | 358 + qemu/accel/tcg/cpu-exec-common.c | 58 + qemu/accel/tcg/cpu-exec.c | 581 + qemu/accel/tcg/cputlb.c | 2420 ++ qemu/accel/tcg/tcg-all.c | 39 + qemu/accel/tcg/tcg-runtime-gvec.c | 1402 + qemu/{ => accel/tcg}/tcg-runtime.c | 79 +- qemu/accel/tcg/tcg-runtime.h | 261 + qemu/accel/tcg/translate-all.c | 1960 + qemu/accel/tcg/translate-all.h | 35 + qemu/accel/tcg/translator.c | 168 + qemu/arm.h | 4851 +-- qemu/armeb.h | 4851 +-- qemu/configure | 2071 +- qemu/cpu-exec.c | 463 - qemu/cputlb.c | 426 - qemu/{util => crypto}/aes.c | 597 +- qemu/crypto/init.c | 94 + qemu/default-configs/aarch64-softmmu.mak | 0 qemu/default-configs/aarch64eb-softmmu.mak | 0 qemu/default-configs/arm-softmmu.mak | 0 qemu/default-configs/armeb-softmmu.mak | 0 qemu/default-configs/m68k-softmmu.mak | 0 qemu/default-configs/mips-softmmu.mak | 0 qemu/default-configs/mips64-softmmu.mak | 0 qemu/default-configs/mips64el-softmmu.mak | 0 qemu/default-configs/mipsel-softmmu.mak | 0 qemu/default-configs/sparc-softmmu.mak | 0 qemu/default-configs/sparc64-softmmu.mak | 0 qemu/default-configs/x86_64-softmmu.mak | 3 - qemu/docs/memory.txt | 244 - qemu/exec-vary.c | 69 + qemu/exec.c | 2662 +- qemu/fpu/softfloat-specialize.h | 1161 - qemu/fpu/softfloat-specialize.inc.c | 1083 + qemu/fpu/softfloat.c | 8225 ++-- qemu/gen_all_header.sh | 4 - qemu/header_gen.py | 4085 -- qemu/hw/Makefile.objs | 4 - qemu/hw/arm/Makefile.objs | 2 - qemu/hw/arm/tosa.c | 45 - qemu/hw/arm/virt.c | 74 - qemu/hw/core/Makefile.objs | 3 - qemu/hw/core/cpu.c | 154 + qemu/hw/core/machine.c | 47 - qemu/hw/core/qdev.c | 344 - qemu/hw/i386/Makefile.objs | 1 - qemu/hw/i386/pc.c | 181 - qemu/hw/i386/pc_piix.c | 78 - qemu/hw/{mips/addr.c => i386/x86.c} | 26 +- qemu/hw/intc/Makefile.objs | 1 - qemu/hw/intc/apic.c | 230 - qemu/hw/intc/apic_common.c | 274 - qemu/hw/m68k/Makefile.objs | 1 - qemu/hw/m68k/dummy_m68k.c | 50 - qemu/hw/mips/Makefile.objs | 2 - qemu/hw/mips/mips_r4k.c | 57 - qemu/hw/ppc/ppc.c | 1569 + qemu/hw/ppc/ppc_booke.c | 373 + qemu/hw/sparc/Makefile.objs | 1 - qemu/hw/sparc/leon3.c | 72 - qemu/hw/sparc64/Makefile.objs | 1 - qemu/include/config.h | 2 - qemu/include/{qemu => crypto}/aes.h | 17 +- qemu/{qemu-log.c => include/crypto/init.h} | 37 +- qemu/include/crypto/random.h | 33 + qemu/include/disas/dis-asm.h | 519 + qemu/include/elf.h | 1227 +- qemu/include/exec/address-spaces.h | 35 - qemu/include/exec/cpu-all.h | 312 +- qemu/include/exec/cpu-common.h | 114 +- qemu/include/exec/cpu-defs.h | 234 +- qemu/include/exec/cpu_ldst.h | 467 +- qemu/include/exec/cpu_ldst_template.h | 193 - qemu/include/exec/cputlb.h | 28 +- qemu/include/exec/exec-all.h | 662 +- qemu/include/exec/gen-icount.h | 99 +- qemu/include/exec/helper-gen.h | 54 +- qemu/include/exec/helper-head.h | 72 +- qemu/include/exec/helper-proto.h | 17 +- qemu/include/exec/helper-tcg.h | 58 +- qemu/include/exec/hwaddr.h | 3 +- qemu/include/exec/ioport.h | 37 +- qemu/include/exec/memattrs.h | 71 + qemu/include/exec/memop.h | 134 + qemu/include/exec/memory-internal.h | 37 +- qemu/include/exec/memory.h | 1166 +- qemu/include/exec/memory_ldst.inc.h | 71 + qemu/include/exec/memory_ldst_cached.inc.h | 126 + qemu/include/exec/memory_ldst_phys.inc.h | 147 + qemu/include/exec/poison.h | 96 + qemu/include/exec/ram_addr.h | 162 +- qemu/include/exec/ramblock.h | 25 + qemu/include/exec/ramlist.h | 19 + qemu/include/exec/softmmu-semi.h | 101 + qemu/include/exec/target_page.h | 23 + .../exec/tb-context.h} | 29 +- qemu/include/exec/tb-hash.h | 57 + qemu/include/exec/tb-lookup.h | 51 + qemu/include/exec/translator.h | 177 + qemu/include/fpu/softfloat-helpers.h | 132 + qemu/{ => include}/fpu/softfloat-macros.h | 324 +- qemu/include/fpu/softfloat-types.h | 182 + qemu/include/fpu/softfloat.h | 989 +- qemu/include/hw/arm/arm.h | 22 - qemu/include/hw/boards.h | 82 - qemu/include/hw/core/cpu.h | 615 + qemu/include/hw/cpu/icc_bus.h | 79 - qemu/include/hw/hw.h | 41 - qemu/include/hw/i386/apic.h | 29 - qemu/include/hw/i386/apic_internal.h | 147 - qemu/include/hw/i386/pc.h | 52 - qemu/include/hw/i386/topology.h | 271 + qemu/include/hw/m68k/m68k.h | 10 - qemu/include/hw/mips/cpudevs.h | 13 +- qemu/include/hw/mips/mips.h | 7 - qemu/include/hw/ppc/ppc.h | 115 + qemu/include/hw/qdev-core.h | 353 - qemu/include/hw/qdev.h | 7 - qemu/include/hw/registerfields.h | 99 + qemu/include/hw/sparc/sparc.h | 8 - qemu/include/libdecnumber/dconfig.h | 39 + qemu/include/libdecnumber/decContext.h | 255 + qemu/include/libdecnumber/decDPD.h | 1214 + qemu/include/libdecnumber/decNumber.h | 201 + qemu/include/libdecnumber/decNumberLocal.h | 663 + qemu/include/libdecnumber/dpd/decimal128.h | 99 + .../libdecnumber/dpd/decimal128Local.h | 47 + qemu/include/libdecnumber/dpd/decimal32.h | 97 + qemu/include/libdecnumber/dpd/decimal64.h | 99 + qemu/include/qapi/dealloc-visitor.h | 26 - qemu/include/qapi/error.h | 88 - qemu/include/qapi/qmp-input-visitor.h | 29 - qemu/include/qapi/qmp-output-visitor.h | 28 - qemu/include/qapi/qmp/qbool.h | 29 - qemu/include/qapi/qmp/qdict.h | 75 - qemu/include/qapi/qmp/qerror.h | 155 - qemu/include/qapi/qmp/qfloat.h | 29 - qemu/include/qapi/qmp/qint.h | 28 - qemu/include/qapi/qmp/qjson.h | 29 - qemu/include/qapi/qmp/qlist.h | 63 - qemu/include/qapi/qmp/qobject.h | 113 - qemu/include/qapi/qmp/qstring.h | 36 - qemu/include/qapi/qmp/types.h | 25 - qemu/include/qapi/string-input-visitor.h | 25 - qemu/include/qapi/visitor-impl.h | 67 - qemu/include/qapi/visitor.h | 64 - qemu/include/qemu-common.h | 241 +- qemu/include/qemu/atomic.h | 390 +- qemu/include/qemu/atomic128.h | 175 + qemu/include/qemu/bitmap.h | 240 +- qemu/include/qemu/bitops.h | 226 +- qemu/include/qemu/bswap.h | 208 +- qemu/include/qemu/compiler.h | 239 +- qemu/include/qemu/cpuid.h | 67 + qemu/include/qemu/crc32c.h | 3 +- qemu/include/qemu/ctype.h | 27 + qemu/include/qemu/cutils.h | 41 + qemu/include/qemu/guest-random.h | 56 + qemu/include/qemu/host-utils.h | 211 +- qemu/include/qemu/int128.h | 205 +- qemu/include/qemu/log.h | 116 +- qemu/include/qemu/module.h | 30 - qemu/include/qemu/osdep.h | 491 +- qemu/include/qemu/processor.h | 27 + qemu/include/qemu/qdist.h | 55 + qemu/include/qemu/qht.h | 225 + qemu/include/qemu/queue.h | 294 +- qemu/include/qemu/range.h | 259 +- qemu/include/qemu/thread-posix.h | 7 +- qemu/include/qemu/thread-win32.h | 9 +- qemu/include/qemu/thread.h | 10 +- qemu/include/qemu/timer.h | 719 +- qemu/include/qemu/typedefs.h | 111 +- qemu/include/qemu/units.h | 20 + qemu/include/qemu/xxhash.h | 129 + qemu/include/qom/cpu.h | 629 - qemu/include/qom/object.h | 1270 - qemu/include/qom/qom-qobject.h | 42 - qemu/include/sysemu/cpus.h | 29 +- qemu/include/sysemu/memory_mapping.h | 44 +- qemu/include/sysemu/os-win32.h | 51 +- qemu/include/sysemu/sysemu.h | 20 +- qemu/include/sysemu/tcg.h | 19 + qemu/include/tcg/tcg-apple-jit.h | 45 + qemu/include/tcg/tcg-gvec-desc.h | 54 + qemu/include/{sysemu/accel.h => tcg/tcg-mo.h} | 60 +- qemu/include/tcg/tcg-op-gvec.h | 387 + qemu/include/tcg/tcg-op.h | 1330 + qemu/{ => include}/tcg/tcg-opc.h | 157 +- qemu/include/tcg/tcg.h | 1561 + qemu/libdecnumber/decContext.c | 432 + qemu/libdecnumber/decNumber.c | 8196 ++++ qemu/libdecnumber/dpd/decimal128.c | 563 + qemu/libdecnumber/dpd/decimal32.c | 488 + qemu/libdecnumber/dpd/decimal64.c | 849 + qemu/m68k.h | 4360 +-- qemu/memory.c | 1616 - qemu/memory_ldst.inc.c | 497 + qemu/mips.h | 5561 +-- qemu/mips64.h | 5561 +-- qemu/mips64el.h | 5561 +-- qemu/mipsel.h | 5561 +-- qemu/ppc.h | 1707 + qemu/ppc64.h | 1707 + qemu/qapi-types.c | 293 - qemu/qapi-types.h | 228 - qemu/qapi-visit.c | 428 - qemu/qapi-visit.h | 51 - qemu/qapi/Makefile.objs | 3 - qemu/qapi/qapi-dealloc-visitor.c | 224 - qemu/qapi/qapi-visit-core.c | 313 - qemu/qapi/qmp-input-visitor.c | 349 - qemu/qapi/qmp-output-visitor.c | 241 - qemu/qapi/string-input-visitor.c | 325 - qemu/qemu-timer.c | 103 - qemu/qobject/Makefile.objs | 2 - qemu/qobject/qbool.c | 68 - qemu/qobject/qdict.c | 699 - qemu/qobject/qerror.c | 39 - qemu/qobject/qfloat.c | 68 - qemu/qobject/qint.c | 67 - qemu/qobject/qlist.c | 170 - qemu/qobject/qstring.c | 149 - qemu/qom/Makefile.objs | 2 - qemu/qom/container.c | 50 - qemu/qom/cpu.c | 284 - qemu/qom/object.c | 1691 - qemu/qom/qom-qobject.c | 44 - qemu/riscv32.h | 1373 + qemu/riscv64.h | 1373 + qemu/rules.mak | 221 +- qemu/scripts/create_config | 61 +- qemu/scripts/ordereddict.py | 132 - qemu/scripts/qapi-build.sh | 14 - qemu/scripts/qapi-schema.json | 39 - qemu/scripts/qapi-types.py | 464 - qemu/scripts/qapi-visit.py | 597 - qemu/scripts/qapi.py | 605 - qemu/scripts/qapi/common.json | 30 - qemu/{ => softmmu}/cpus.c | 205 +- qemu/{ => softmmu}/ioport.c | 84 +- qemu/softmmu/memory.c | 1346 + qemu/{ => softmmu}/memory_mapping.c | 118 +- qemu/{hw/sparc64/sun4u.c => softmmu/vl.c} | 67 +- qemu/softmmu_template.h | 1091 - qemu/sparc.h | 4296 +-- qemu/sparc64.h | 4296 +-- qemu/target-arm/Makefile.objs | 6 - qemu/target-arm/cpu-qom.h | 219 - qemu/target-arm/cpu.c | 1110 - qemu/target-arm/cpu.h | 1548 - qemu/target-arm/cpu64.c | 224 - qemu/target-arm/crypto_helper.c | 435 - qemu/target-arm/helper-a64.c | 528 - qemu/target-arm/helper-a64.h | 48 - qemu/target-arm/helper.c | 5791 --- qemu/target-arm/internals.h | 383 - qemu/target-arm/op_helper.c | 842 - qemu/target-arm/psci.c | 242 - qemu/target-arm/translate.c | 11639 ------ qemu/target-arm/translate.h | 116 - qemu/target-arm/unicorn_aarch64.c | 248 - qemu/target-arm/unicorn_arm.c | 243 - qemu/target-i386/Makefile.objs | 5 - qemu/target-i386/cpu-qom.h | 157 - qemu/target-i386/cpu.c | 2633 -- qemu/target-i386/cpu.h | 1386 - qemu/target-i386/excp_helper.c | 133 - qemu/target-i386/helper.c | 1152 - qemu/target-i386/mem_helper.c | 130 - qemu/target-i386/smm_helper.c | 317 - qemu/target-i386/topology.h | 134 - qemu/target-i386/translate.c | 8806 ----- qemu/target-i386/unicorn.c | 1528 - qemu/target-m68k/Makefile.objs | 2 - qemu/target-m68k/cpu-qom.h | 83 - qemu/target-m68k/cpu.c | 242 - qemu/target-m68k/cpu.h | 259 - qemu/target-m68k/helper.c | 799 - qemu/target-m68k/helper.h | 52 - qemu/target-m68k/m68k-qreg.h | 11 - qemu/target-m68k/op_helper.c | 225 - qemu/target-m68k/translate.c | 3220 -- qemu/target-m68k/unicorn.c | 122 - qemu/target-mips/Makefile.objs | 3 - qemu/target-mips/cpu.c | 170 - qemu/target-mips/cpu.h | 901 - qemu/target-mips/helper.c | 826 - qemu/target-mips/mips-defs.h | 91 - qemu/target-mips/msa_helper.c | 3436 -- qemu/target-mips/op_helper.c | 3713 -- qemu/target-mips/translate.c | 19748 ---------- qemu/target-mips/translate_init.c | 948 - qemu/target-mips/unicorn.c | 169 - qemu/target-mips/unicorn.h | 23 - qemu/target-sparc/Makefile.objs | 7 - qemu/target-sparc/TODO | 88 - qemu/target-sparc/cpu-qom.h | 87 - qemu/target-sparc/cpu.c | 923 - qemu/target-sparc/helper.h | 177 - qemu/target-sparc/ldst_helper.c | 2460 -- qemu/target-sparc/unicorn.c | 151 - qemu/target-sparc/unicorn64.c | 115 - qemu/target/arm/README | 5 + qemu/target/arm/arm-powerctl.c | 356 + qemu/target/arm/arm-powerctl.h | 93 + qemu/target/arm/arm-semi.c | 1022 + qemu/{target-arm => target/arm}/arm_ldst.h | 26 +- qemu/target/arm/cpu-param.h | 30 + qemu/target/arm/cpu-qom.h | 90 + qemu/target/arm/cpu.c | 2092 + qemu/target/arm/cpu.h | 3785 ++ qemu/target/arm/cpu64.c | 388 + qemu/target/arm/crypto_helper.c | 695 + qemu/target/arm/debug_helper.c | 333 + qemu/target/arm/decode-a32-uncond.inc.c | 301 + qemu/target/arm/decode-a32.inc.c | 3375 ++ qemu/target/arm/decode-sve.inc.c | 5080 +++ qemu/target/arm/decode-t16.inc.c | 1141 + qemu/target/arm/decode-t32.inc.c | 3017 ++ qemu/target/arm/decode-vfp-uncond.inc.c | 225 + qemu/target/arm/decode-vfp.inc.c | 1250 + qemu/target/arm/helper-a64.c | 1155 + qemu/target/arm/helper-a64.h | 105 + qemu/target/arm/helper-sve.h | 1578 + qemu/target/arm/helper.c | 11968 ++++++ qemu/{target-arm => target/arm}/helper.h | 306 +- qemu/target/arm/internals.h | 1228 + .../arm}/iwmmxt_helper.c | 238 +- qemu/{target-arm => target/arm}/kvm-consts.h | 103 +- qemu/target/arm/m_helper.c | 2658 ++ qemu/{target-arm => target/arm}/neon_helper.c | 379 +- qemu/{target-arm => target/arm}/op_addsub.h | 0 qemu/target/arm/op_helper.c | 934 + qemu/target/arm/pauth_helper.c | 494 + qemu/target/arm/psci.c | 54 + qemu/target/arm/sve_helper.c | 5374 +++ qemu/target/arm/tlb_helper.c | 187 + .../arm}/translate-a64.c | 7744 ++-- qemu/target/arm/translate-a64.h | 129 + qemu/target/arm/translate-sve.c | 5588 +++ qemu/target/arm/translate-vfp.inc.c | 2935 ++ qemu/target/arm/translate.c | 11761 ++++++ qemu/target/arm/translate.h | 301 + qemu/{target-arm => target/arm}/unicorn.h | 17 +- qemu/target/arm/unicorn_aarch64.c | 355 + qemu/target/arm/unicorn_arm.c | 495 + qemu/target/arm/vec_helper.c | 1265 + qemu/target/arm/vfp_helper.c | 1364 + qemu/{target-i386 => target/i386}/TODO | 0 .../i386}/arch_memory_mapping.c | 106 +- qemu/target/i386/bpt_helper.c | 327 + qemu/{target-i386 => target/i386}/cc_helper.c | 16 +- .../i386}/cc_helper_template.h | 0 qemu/target/i386/cpu-param.h | 28 + qemu/target/i386/cpu-qom.h | 62 + qemu/target/i386/cpu.c | 4855 +++ qemu/target/i386/cpu.h | 2134 ++ qemu/target/i386/excp_helper.c | 695 + .../{target-i386 => target/i386}/fpu_helper.c | 674 +- qemu/target/i386/helper.c | 521 + qemu/{target-i386 => target/i386}/helper.h | 61 +- .../{target-i386 => target/i386}/int_helper.c | 74 +- qemu/target/i386/machine.c | 23 + qemu/target/i386/mem_helper.c | 184 + .../i386}/misc_helper.c | 225 +- qemu/target/i386/mpx_helper.c | 138 + qemu/{target-i386 => target/i386}/ops_sse.h | 496 +- .../i386}/ops_sse_header.h | 81 +- .../{target-i386 => target/i386}/seg_helper.c | 1261 +- .../i386}/shift_helper_template.h | 0 qemu/target/i386/smm_helper.c | 315 + qemu/{target-i386 => target/i386}/svm.h | 182 +- .../{target-i386 => target/i386}/svm_helper.c | 508 +- qemu/target/i386/translate.c | 9373 +++++ qemu/target/i386/unicorn.c | 1601 + qemu/{target-i386 => target/i386}/unicorn.h | 6 +- qemu/target/i386/xsave_helper.c | 112 + qemu/target/m68k/cpu-param.h | 22 + qemu/target/m68k/cpu-qom.h | 46 + qemu/target/m68k/cpu.c | 307 + qemu/target/m68k/cpu.h | 563 + qemu/target/m68k/fpu_helper.c | 658 + qemu/target/m68k/helper.c | 1047 + qemu/target/m68k/helper.h | 130 + qemu/target/m68k/op_helper.c | 1000 + qemu/{target-m68k => target/m68k}/qregs.def | 9 +- qemu/target/m68k/softfloat.c | 2900 ++ qemu/target/m68k/softfloat.h | 49 + qemu/target/m68k/softfloat_fpsp_tables.h | 642 + qemu/target/m68k/translate.c | 6452 ++++ qemu/target/m68k/unicorn.c | 166 + qemu/{target-m68k => target/m68k}/unicorn.h | 4 +- qemu/{target-mips => target/mips}/TODO | 0 qemu/target/mips/cp0_helper.c | 1692 + .../cputimer.c => target/mips/cp0_timer.c} | 115 +- qemu/target/mips/cpu-param.h | 25 + qemu/{target-mips => target/mips}/cpu-qom.h | 42 +- qemu/target/mips/cpu.c | 208 + qemu/target/mips/cpu.h | 1272 + .../{target-mips => target/mips}/dsp_helper.c | 104 +- qemu/target/mips/fpu_helper.c | 1910 + qemu/target/mips/helper.c | 1498 + qemu/{target-mips => target/mips}/helper.h | 401 +- qemu/target/mips/internal.h | 451 + .../{target-mips => target/mips}/lmi_helper.c | 9 +- qemu/target/mips/mips-defs.h | 105 + qemu/target/mips/msa_helper.c | 7419 ++++ qemu/target/mips/op_helper.c | 1364 + qemu/target/mips/translate.c | 31409 ++++++++++++++++ qemu/target/mips/translate_init.inc.c | 941 + qemu/target/mips/unicorn.c | 239 + qemu/target/mips/unicorn.h | 26 + qemu/target/ppc/compat.c | 330 + qemu/target/ppc/cpu-models.c | 945 + qemu/target/ppc/cpu-models.h | 504 + qemu/target/ppc/cpu-param.h | 37 + qemu/target/ppc/cpu-qom.h | 213 + qemu/target/ppc/cpu.c | 47 + qemu/target/ppc/cpu.h | 2625 ++ qemu/target/ppc/dfp_helper.c | 1331 + qemu/target/ppc/excp_helper.c | 1344 + qemu/target/ppc/fpu_helper.c | 3461 ++ qemu/target/ppc/helper.h | 761 + qemu/target/ppc/helper_regs.h | 184 + qemu/target/ppc/int_helper.c | 2973 ++ qemu/target/ppc/internal.h | 216 + qemu/target/ppc/kvm_ppc.h | 467 + qemu/target/ppc/machine.c | 852 + qemu/target/ppc/mem_helper.c | 628 + qemu/target/ppc/mfrom_table.inc.c | 78 + qemu/target/ppc/mfrom_table_gen.c | 34 + qemu/target/ppc/misc_helper.c | 308 + qemu/target/ppc/mmu-book3s-v3.c | 66 + qemu/target/ppc/mmu-book3s-v3.h | 121 + qemu/target/ppc/mmu-hash32.c | 606 + qemu/target/ppc/mmu-hash32.h | 130 + qemu/target/ppc/mmu-hash64.c | 1289 + qemu/target/ppc/mmu-hash64.h | 161 + qemu/target/ppc/mmu-radix64.c | 398 + qemu/target/ppc/mmu-radix64.h | 69 + qemu/target/ppc/mmu_helper.c | 3117 ++ qemu/target/ppc/timebase_helper.c | 206 + qemu/target/ppc/translate.c | 7751 ++++ qemu/target/ppc/translate/dfp-impl.inc.c | 240 + qemu/target/ppc/translate/dfp-ops.inc.c | 165 + qemu/target/ppc/translate/fp-impl.inc.c | 1554 + qemu/target/ppc/translate/fp-ops.inc.c | 119 + qemu/target/ppc/translate/spe-impl.inc.c | 1283 + qemu/target/ppc/translate/spe-ops.inc.c | 105 + qemu/target/ppc/translate/vmx-impl.inc.c | 1606 + qemu/target/ppc/translate/vmx-ops.inc.c | 301 + qemu/target/ppc/translate/vsx-impl.inc.c | 2118 ++ qemu/target/ppc/translate/vsx-ops.inc.c | 401 + qemu/target/ppc/translate_init.inc.c | 11202 ++++++ qemu/target/ppc/unicorn.c | 225 + qemu/target/ppc/unicorn.h | 20 + qemu/target/riscv/README | 4 + qemu/target/riscv/cpu-param.h | 23 + qemu/target/riscv/cpu.c | 390 + qemu/target/riscv/cpu.h | 380 + qemu/target/riscv/cpu_bits.h | 578 + qemu/target/riscv/cpu_helper.c | 978 + qemu/target/riscv/cpu_user.h | 19 + qemu/target/riscv/csr.c | 1604 + qemu/target/riscv/fpu_helper.c | 371 + qemu/target/riscv/helper.h | 79 + .../riscv/insn_trans/trans_privileged.inc.c | 133 + qemu/target/riscv/insn_trans/trans_rva.inc.c | 227 + qemu/target/riscv/insn_trans/trans_rvd.inc.c | 473 + qemu/target/riscv/insn_trans/trans_rvf.inc.c | 474 + qemu/target/riscv/insn_trans/trans_rvi.inc.c | 613 + qemu/target/riscv/insn_trans/trans_rvm.inc.c | 133 + qemu/target/riscv/instmap.h | 369 + qemu/target/riscv/op_helper.c | 208 + qemu/target/riscv/pmp.c | 379 + qemu/target/riscv/pmp.h | 64 + qemu/target/riscv/riscv32/decode_insn16.inc.c | 477 + qemu/target/riscv/riscv32/decode_insn32.inc.c | 1430 + qemu/target/riscv/riscv64/decode_insn16.inc.c | 504 + qemu/target/riscv/riscv64/decode_insn32.inc.c | 1749 + qemu/target/riscv/translate.c | 959 + qemu/target/riscv/unicorn.c | 351 + qemu/target/riscv/unicorn.h | 21 + qemu/target/sparc/asi.h | 312 + .../sparc}/cc_helper.c | 77 +- qemu/target/sparc/cpu-param.h | 28 + qemu/target/sparc/cpu-qom.h | 48 + qemu/target/sparc/cpu.c | 571 + qemu/{target-sparc => target/sparc}/cpu.h | 353 +- .../sparc}/fop_helper.c | 232 +- qemu/{target-sparc => target/sparc}/helper.c | 90 +- qemu/target/sparc/helper.h | 168 + .../sparc}/int32_helper.c | 22 +- .../sparc}/int64_helper.c | 82 +- qemu/target/sparc/ldst_helper.c | 1878 + .../sparc}/mmu_helper.c | 409 +- .../sparc}/translate.c | 3410 +- qemu/target/sparc/unicorn.c | 191 + qemu/{target-sparc => target/sparc}/unicorn.h | 9 +- qemu/target/sparc/unicorn64.c | 202 + .../sparc}/vis_helper.c | 3 +- .../sparc}/win_helper.c | 126 +- qemu/tcg/LICENSE | 3 - qemu/tcg/README | 247 +- qemu/tcg/TODO | 14 - qemu/tcg/aarch64/tcg-target.c | 1814 - qemu/tcg/aarch64/tcg-target.h | 76 +- qemu/tcg/aarch64/tcg-target.inc.c | 2927 ++ qemu/tcg/aarch64/tcg-target.opc.h | 14 + qemu/tcg/arm/tcg-target.h | 71 +- .../arm/{tcg-target.c => tcg-target.inc.c} | 1512 +- qemu/tcg/i386/tcg-target.c | 2471 -- qemu/tcg/i386/tcg-target.h | 111 +- qemu/tcg/i386/tcg-target.inc.c | 3918 ++ qemu/tcg/i386/tcg-target.opc.h | 35 + qemu/tcg/ia64/tcg-target.c | 2446 -- qemu/tcg/ia64/tcg-target.h | 183 - qemu/tcg/mips/tcg-target.c | 1816 - qemu/tcg/mips/tcg-target.h | 100 +- qemu/tcg/mips/tcg-target.inc.c | 2714 ++ qemu/tcg/optimize.c | 1243 +- qemu/tcg/ppc/tcg-target.h | 82 +- .../ppc/{tcg-target.c => tcg-target.inc.c} | 2363 +- qemu/tcg/ppc/tcg-target.opc.h | 33 + qemu/tcg/riscv/tcg-target.h | 179 + qemu/tcg/riscv/tcg-target.inc.c | 1920 + qemu/tcg/s390/tcg-target.h | 156 +- .../s390/{tcg-target.c => tcg-target.inc.c} | 1457 +- qemu/tcg/sparc/tcg-target.h | 38 +- .../sparc/{tcg-target.c => tcg-target.inc.c} | 798 +- qemu/tcg/{tcg-be-ldst.h => tcg-ldst.inc.c} | 61 +- qemu/tcg/tcg-op-gvec.c | 3254 ++ qemu/tcg/tcg-op-vec.c | 806 + qemu/tcg/tcg-op.c | 3372 ++ qemu/tcg/tcg-op.h | 2784 -- qemu/tcg/tcg-pool.inc.c | 158 + qemu/tcg/tcg-runtime.h | 16 - qemu/tcg/tcg.c | 4473 ++- qemu/tcg/tcg.h | 1012 - qemu/trace/mem-internal.h | 50 + qemu/trace/mem.h | 35 + qemu/translate-all.c | 2013 - qemu/unicorn_common.h | 54 +- qemu/util/Makefile.objs | 10 - qemu/util/bitmap.c | 438 +- qemu/util/bitops.c | 40 +- qemu/util/cacheinfo.c | 189 + qemu/util/crc32c.c | 85 +- qemu/util/cutils.c | 109 +- qemu/util/error.c | 129 - qemu/util/getauxval.c | 13 +- qemu/util/guest-random.c | 81 + qemu/util/host-utils.c | 76 +- qemu/util/module.c | 57 - qemu/util/osdep.c | 90 + qemu/util/oslib-posix.c | 231 +- qemu/util/oslib-win32.c | 50 +- qemu/util/pagesize.c | 16 + qemu/util/qdist.c | 219 + qemu/util/qemu-error.c | 80 - qemu/util/qemu-thread-posix.c | 14 +- qemu/util/qemu-timer-common.c | 21 +- qemu/{tcg/tcg-be-null.h => util/qemu-timer.c} | 31 +- qemu/util/qht.c | 761 + qemu/util/range.c | 78 + qemu/vl.c | 156 - qemu/x86_64.h | 4806 +-- samples/.gitignore | 5 - samples/mem_apis.c | 16 - samples/sample_all.sh | 2 - samples/sample_arm.c | 193 +- samples/sample_arm64.c | 138 +- samples/sample_armeb.c | 156 - samples/sample_batch_reg.c | 1 + samples/sample_m68k.c | 16 - samples/sample_mips.c | 16 - samples/{sample_arm64eb.c => sample_ppc.c} | 59 +- samples/sample_riscv.c | 594 + samples/sample_sparc.c | 16 - samples/sample_x86.c | 223 +- samples/shellcode.c | 16 - symbols.sh | 6296 ++++ tests/regress/arm_apsr_access.py | 30 - tests/regress/arm_memcpy_neon.py | 52 + tests/regress/arm_wfi_first_insn_of_tb.py | 15 + tests/regress/hook_raises_exception.py | 39 - tests/regress/mem_64_c.c | 39 - tests/regress/mem_double_unmap.c | 48 - tests/regress/mem_exec.c | 278 - tests/regress/mem_fuzz.c | 116 - tests/regress/mem_map_0x100000000.c | 31 - tests/regress/mem_map_large.c | 17 - tests/regress/mem_nofree.c | 70 - tests/regress/mem_protect.c | 300 - tests/regress/mem_unmap.c | 291 - tests/regress/memleak_arm.c | 176 - tests/regress/memleak_arm64.c | 120 - tests/regress/memleak_m68k.c | 183 - tests/regress/memleak_mips.c | 169 - tests/regress/memleak_sparc.c | 123 - tests/regress/memleak_x86.c | 304 - tests/regress/mips_cp1.py | 13 + tests/regress/x86_ld_crash.py | 21 + tests/regress/x86_set_ip.py | 21 + tests/unit/.gitignore | 3 - tests/unit/Makefile | 65 - tests/unit/acutest.h | 1837 + tests/unit/gdt_idx.s | 3 - tests/unit/high_address.s | 6 - tests/unit/pc_change.s | 9 - tests/unit/tb_x86.s | 90 - tests/unit/test_arm.c | 249 + tests/unit/test_arm64.c | 8 + tests/unit/test_gdt_idt_x86.c | 127 - tests/unit/test_hang.c | 102 - tests/unit/test_hookcounts.c | 281 - tests/unit/test_m68k.c | 8 + tests/unit/test_mem_high.c | 131 - tests/unit/test_mem_map.c | 236 - tests/unit/test_mem_map_ptr.c | 77 - tests/unit/test_mips.c | 90 + tests/unit/test_multihook.c | 111 - tests/unit/test_pc_change.c | 100 - tests/unit/test_ppc.c | 8 + tests/unit/test_riscv.c | 8 + tests/unit/test_sanity.c | 88 - tests/unit/test_sparc.c | 8 + tests/unit/test_tb_x86.c | 306 - tests/unit/test_x86.c | 1194 +- tests/unit/test_x86_rip_bug.c | 269 - tests/unit/test_x86_shl_enter_leave.c | 433 - tests/unit/test_x86_soft_paging.c | 210 - tests/unit/unicorn_test.h | 43 +- tests/unit/x86_soft_paging_low.s | 49 - uc.c | 675 +- windows_export.bat | 43 - 837 files changed, 368717 insertions(+), 200912 deletions(-) delete mode 100644 Makefile delete mode 100644 SPONSORS.TXT create mode 100644 TODO create mode 100644 bindings/dotnet/UnicornManaged/Const/Ppc.fs create mode 100644 bindings/dotnet/UnicornManaged/Const/Riscv.fs delete mode 100644 bindings/dotnet/UnicornSamples/Gee.External.Capstone.Proxy.dll delete mode 100644 bindings/dotnet/UnicornSamples/capstone.dll create mode 100644 bindings/go/unicorn/ppc_const.go create mode 100644 bindings/go/unicorn/riscv_const.go create mode 100644 bindings/java/unicorn/PpcConst.java create mode 100644 bindings/java/unicorn/RiscvConst.java create mode 100644 bindings/pascal/unicorn/PpcConst.pas create mode 100644 bindings/pascal/unicorn/RiscvConst.pas delete mode 100755 bindings/python/build_wheel.sh create mode 100755 bindings/python/sample_ppc.py create mode 100755 bindings/python/sample_riscv.py create mode 100644 bindings/python/unicorn/ppc_const.py create mode 100644 bindings/python/unicorn/riscv_const.py create mode 100644 bindings/ruby/unicorn_gem/lib/unicorn_engine/ppc_const.rb create mode 100644 bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb delete mode 100644 bindings/rust/COPYING delete mode 100644 bindings/rust/Cargo.toml delete mode 100644 bindings/rust/README.md delete mode 100644 bindings/rust/build.rs delete mode 100644 bindings/rust/src/arm.rs delete mode 100644 bindings/rust/src/arm64.rs delete mode 100644 bindings/rust/src/ffi.rs delete mode 100644 bindings/rust/src/lib.rs delete mode 100644 bindings/rust/src/m68k.rs delete mode 100644 bindings/rust/src/mips.rs delete mode 100644 bindings/rust/src/ppc.rs delete mode 100644 bindings/rust/src/sparc.rs delete mode 100644 bindings/rust/src/unicorn_const.rs delete mode 100644 bindings/rust/src/x86.rs delete mode 100644 bindings/rust/tests/unicorn.rs delete mode 100644 config.mk delete mode 100644 docs/COMPILE-CMAKE.md delete mode 100644 docs/COMPILE-NIX.md delete mode 100644 docs/COMPILE-WINDOWS.md create mode 100644 docs/unicorn-logo-text.png create mode 100644 docs/unicorn-logo.svg create mode 100644 docs/unicorn1-logo.png rename docs/{unicorn-logo.txt => unicorn1-logo.txt} (100%) create mode 100644 glib_compat/README create mode 100644 glib_compat/garray.c create mode 100644 glib_compat/garray.h create mode 100644 glib_compat/ghash.h rename {qemu => glib_compat}/glib_compat.c (52%) rename {qemu/include => glib_compat}/glib_compat.h (54%) create mode 100644 glib_compat/glist.c create mode 100644 glib_compat/glist.h create mode 100644 glib_compat/gmacros.h create mode 100644 glib_compat/gmem.c create mode 100644 glib_compat/gmem.h create mode 100644 glib_compat/gmessages.h create mode 100644 glib_compat/gnode.h create mode 100644 glib_compat/gpattern.c create mode 100644 glib_compat/gpattern.h create mode 100644 glib_compat/grand.c create mode 100644 glib_compat/grand.h create mode 100644 glib_compat/gslice.c create mode 100644 glib_compat/gslice.h create mode 100644 glib_compat/gtestutils.c create mode 100644 glib_compat/gtestutils.h create mode 100644 glib_compat/gtree.c create mode 100644 glib_compat/gtree.h create mode 100644 glib_compat/gtypes.h create mode 100644 include/unicorn/ppc.h create mode 100644 include/unicorn/riscv.h delete mode 100755 make.sh create mode 100644 mingw-w64.cmake delete mode 100644 msvc.bat delete mode 100644 msvc/.gitignore delete mode 100644 msvc/README.TXT rename msvc/{unicorn => }/aarch64-softmmu/config-target.h (100%) rename msvc/{unicorn => }/aarch64eb-softmmu/config-target.h (100%) rename msvc/{unicorn => }/arm-softmmu/config-target.h (100%) rename msvc/{unicorn => }/armeb-softmmu/config-target.h (100%) rename msvc/{unicorn => }/config-host.h (50%) rename msvc/{unicorn => }/m68k-softmmu/config-target.h (100%) rename msvc/{unicorn => }/mips-softmmu/config-target.h (100%) rename msvc/{unicorn => }/mips64-softmmu/config-target.h (100%) rename msvc/{unicorn => }/mips64el-softmmu/config-target.h (100%) rename msvc/{unicorn => }/mipsel-softmmu/config-target.h (100%) create mode 100644 msvc/ppc-softmmu/config-target.h create mode 100644 msvc/ppc64-softmmu/config-target.h create mode 100644 msvc/riscv32-softmmu/config-target.h create mode 100644 msvc/riscv64-softmmu/config-target.h delete mode 100644 msvc/samples/mem_apis/mem_apis.vcxproj delete mode 100644 msvc/samples/mem_apis/mem_apis.vcxproj.filters delete mode 100644 msvc/samples/sample_arm/sample_arm.vcxproj delete mode 100644 msvc/samples/sample_arm/sample_arm.vcxproj.filters delete mode 100644 msvc/samples/sample_arm64/sample_arm64.vcxproj delete mode 100644 msvc/samples/sample_arm64/sample_arm64.vcxproj.filters delete mode 100644 msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj delete mode 100644 msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj.filters delete mode 100644 msvc/samples/sample_armeb/sample_armeb.vcxproj delete mode 100644 msvc/samples/sample_armeb/sample_armeb.vcxproj.filters delete mode 100644 msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj delete mode 100644 msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj.filters delete mode 100644 msvc/samples/sample_m68k/sample_m68k.vcxproj delete mode 100644 msvc/samples/sample_m68k/sample_m68k.vcxproj.filters delete mode 100644 msvc/samples/sample_mips/sample_mips.vcxproj delete mode 100644 msvc/samples/sample_mips/sample_mips.vcxproj.filters delete mode 100644 msvc/samples/sample_sparc/sample_sparc.vcxproj delete mode 100644 msvc/samples/sample_sparc/sample_sparc.vcxproj.filters delete mode 100644 msvc/samples/sample_x86/sample_x86.vcxproj delete mode 100644 msvc/samples/sample_x86/sample_x86.vcxproj.filters delete mode 100644 msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj delete mode 100644 msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj.filters delete mode 100644 msvc/samples/shellcode/shellcode.vcxproj delete mode 100644 msvc/samples/shellcode/shellcode.vcxproj.filters rename msvc/{unicorn => }/sparc-softmmu/config-target.h (100%) rename msvc/{unicorn => }/sparc64-softmmu/config-target.h (100%) delete mode 100644 msvc/unicorn.sln delete mode 100644 msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj delete mode 100644 msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj delete mode 100644 msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj delete mode 100644 msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj delete mode 100644 msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj.filters rename msvc/unicorn/{unicorn => }/dllmain.cpp (100%) delete mode 100644 msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj delete mode 100644 msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj delete mode 100644 msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj delete mode 100644 msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj delete mode 100644 msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj delete mode 100644 msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/prebuild_script.bat delete mode 100644 msvc/unicorn/qapi-types.c delete mode 100644 msvc/unicorn/qapi-types.h delete mode 100644 msvc/unicorn/qapi-visit.c delete mode 100644 msvc/unicorn/qapi-visit.h delete mode 100644 msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj delete mode 100644 msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj delete mode 100644 msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj.filters delete mode 100644 msvc/unicorn/unicorn/unicorn.vcxproj delete mode 100644 msvc/unicorn/unicorn/unicorn.vcxproj.filters delete mode 100644 msvc/unicorn/unicorn_static/unicorn_static.vcxproj delete mode 100644 msvc/unicorn/unicorn_static/unicorn_static.vcxproj.filters delete mode 100644 msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj delete mode 100644 msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj.filters rename msvc/{unicorn => }/x86_64-softmmu/config-target.h (100%) delete mode 100644 pkgconfig.mk create mode 100644 qemu/.editorconfig delete mode 100644 qemu/CODING_STYLE create mode 100644 qemu/CODING_STYLE.rst delete mode 100644 qemu/HACKING create mode 100644 qemu/MAINTAINERS delete mode 100644 qemu/Makefile delete mode 100644 qemu/Makefile.objs delete mode 100644 qemu/Makefile.target delete mode 100644 qemu/accel.c create mode 100644 qemu/accel/tcg/atomic_template.h create mode 100644 qemu/accel/tcg/cpu-exec-common.c create mode 100644 qemu/accel/tcg/cpu-exec.c create mode 100644 qemu/accel/tcg/cputlb.c create mode 100644 qemu/accel/tcg/tcg-all.c create mode 100644 qemu/accel/tcg/tcg-runtime-gvec.c rename qemu/{ => accel/tcg}/tcg-runtime.c (63%) create mode 100644 qemu/accel/tcg/tcg-runtime.h create mode 100644 qemu/accel/tcg/translate-all.c create mode 100644 qemu/accel/tcg/translate-all.h create mode 100644 qemu/accel/tcg/translator.c delete mode 100644 qemu/cpu-exec.c delete mode 100644 qemu/cputlb.c rename qemu/{util => crypto}/aes.c (66%) create mode 100644 qemu/crypto/init.c delete mode 100644 qemu/default-configs/aarch64-softmmu.mak delete mode 100644 qemu/default-configs/aarch64eb-softmmu.mak delete mode 100644 qemu/default-configs/arm-softmmu.mak delete mode 100644 qemu/default-configs/armeb-softmmu.mak delete mode 100644 qemu/default-configs/m68k-softmmu.mak delete mode 100644 qemu/default-configs/mips-softmmu.mak delete mode 100644 qemu/default-configs/mips64-softmmu.mak delete mode 100644 qemu/default-configs/mips64el-softmmu.mak delete mode 100644 qemu/default-configs/mipsel-softmmu.mak delete mode 100644 qemu/default-configs/sparc-softmmu.mak delete mode 100644 qemu/default-configs/sparc64-softmmu.mak delete mode 100644 qemu/default-configs/x86_64-softmmu.mak delete mode 100644 qemu/docs/memory.txt create mode 100644 qemu/exec-vary.c delete mode 100644 qemu/fpu/softfloat-specialize.h create mode 100644 qemu/fpu/softfloat-specialize.inc.c delete mode 100755 qemu/gen_all_header.sh delete mode 100644 qemu/header_gen.py delete mode 100644 qemu/hw/Makefile.objs delete mode 100644 qemu/hw/arm/Makefile.objs delete mode 100644 qemu/hw/arm/tosa.c delete mode 100644 qemu/hw/arm/virt.c delete mode 100644 qemu/hw/core/Makefile.objs create mode 100644 qemu/hw/core/cpu.c delete mode 100644 qemu/hw/core/machine.c delete mode 100644 qemu/hw/core/qdev.c delete mode 100644 qemu/hw/i386/Makefile.objs delete mode 100644 qemu/hw/i386/pc.c delete mode 100644 qemu/hw/i386/pc_piix.c rename qemu/hw/{mips/addr.c => i386/x86.c} (73%) delete mode 100644 qemu/hw/intc/Makefile.objs delete mode 100644 qemu/hw/intc/apic.c delete mode 100644 qemu/hw/intc/apic_common.c delete mode 100644 qemu/hw/m68k/Makefile.objs delete mode 100644 qemu/hw/m68k/dummy_m68k.c delete mode 100644 qemu/hw/mips/Makefile.objs delete mode 100644 qemu/hw/mips/mips_r4k.c create mode 100644 qemu/hw/ppc/ppc.c create mode 100644 qemu/hw/ppc/ppc_booke.c delete mode 100644 qemu/hw/sparc/Makefile.objs delete mode 100644 qemu/hw/sparc/leon3.c delete mode 100644 qemu/hw/sparc64/Makefile.objs delete mode 100644 qemu/include/config.h rename qemu/include/{qemu => crypto}/aes.h (84%) rename qemu/{qemu-log.c => include/crypto/init.h} (51%) create mode 100644 qemu/include/crypto/random.h create mode 100644 qemu/include/disas/dis-asm.h delete mode 100644 qemu/include/exec/address-spaces.h delete mode 100644 qemu/include/exec/cpu_ldst_template.h create mode 100644 qemu/include/exec/memattrs.h create mode 100644 qemu/include/exec/memop.h create mode 100644 qemu/include/exec/memory_ldst.inc.h create mode 100644 qemu/include/exec/memory_ldst_cached.inc.h create mode 100644 qemu/include/exec/memory_ldst_phys.inc.h create mode 100644 qemu/include/exec/poison.h create mode 100644 qemu/include/exec/ramblock.h create mode 100644 qemu/include/exec/ramlist.h create mode 100644 qemu/include/exec/softmmu-semi.h create mode 100644 qemu/include/exec/target_page.h rename qemu/{translate-all.h => include/exec/tb-context.h} (62%) create mode 100644 qemu/include/exec/tb-hash.h create mode 100644 qemu/include/exec/tb-lookup.h create mode 100644 qemu/include/exec/translator.h create mode 100644 qemu/include/fpu/softfloat-helpers.h rename qemu/{ => include}/fpu/softfloat-macros.h (74%) create mode 100644 qemu/include/fpu/softfloat-types.h delete mode 100644 qemu/include/hw/arm/arm.h delete mode 100644 qemu/include/hw/boards.h create mode 100644 qemu/include/hw/core/cpu.h delete mode 100644 qemu/include/hw/cpu/icc_bus.h delete mode 100644 qemu/include/hw/hw.h delete mode 100644 qemu/include/hw/i386/apic.h delete mode 100644 qemu/include/hw/i386/apic_internal.h delete mode 100644 qemu/include/hw/i386/pc.h create mode 100644 qemu/include/hw/i386/topology.h delete mode 100644 qemu/include/hw/m68k/m68k.h delete mode 100644 qemu/include/hw/mips/mips.h create mode 100644 qemu/include/hw/ppc/ppc.h delete mode 100644 qemu/include/hw/qdev-core.h delete mode 100644 qemu/include/hw/qdev.h create mode 100644 qemu/include/hw/registerfields.h delete mode 100644 qemu/include/hw/sparc/sparc.h create mode 100644 qemu/include/libdecnumber/dconfig.h create mode 100644 qemu/include/libdecnumber/decContext.h create mode 100644 qemu/include/libdecnumber/decDPD.h create mode 100644 qemu/include/libdecnumber/decNumber.h create mode 100644 qemu/include/libdecnumber/decNumberLocal.h create mode 100644 qemu/include/libdecnumber/dpd/decimal128.h create mode 100644 qemu/include/libdecnumber/dpd/decimal128Local.h create mode 100644 qemu/include/libdecnumber/dpd/decimal32.h create mode 100644 qemu/include/libdecnumber/dpd/decimal64.h delete mode 100644 qemu/include/qapi/dealloc-visitor.h delete mode 100644 qemu/include/qapi/error.h delete mode 100644 qemu/include/qapi/qmp-input-visitor.h delete mode 100644 qemu/include/qapi/qmp-output-visitor.h delete mode 100644 qemu/include/qapi/qmp/qbool.h delete mode 100644 qemu/include/qapi/qmp/qdict.h delete mode 100644 qemu/include/qapi/qmp/qerror.h delete mode 100644 qemu/include/qapi/qmp/qfloat.h delete mode 100644 qemu/include/qapi/qmp/qint.h delete mode 100644 qemu/include/qapi/qmp/qjson.h delete mode 100644 qemu/include/qapi/qmp/qlist.h delete mode 100644 qemu/include/qapi/qmp/qobject.h delete mode 100644 qemu/include/qapi/qmp/qstring.h delete mode 100644 qemu/include/qapi/qmp/types.h delete mode 100644 qemu/include/qapi/string-input-visitor.h delete mode 100644 qemu/include/qapi/visitor-impl.h delete mode 100644 qemu/include/qapi/visitor.h create mode 100644 qemu/include/qemu/atomic128.h create mode 100644 qemu/include/qemu/cpuid.h create mode 100644 qemu/include/qemu/ctype.h create mode 100644 qemu/include/qemu/cutils.h create mode 100644 qemu/include/qemu/guest-random.h delete mode 100644 qemu/include/qemu/module.h create mode 100644 qemu/include/qemu/processor.h create mode 100644 qemu/include/qemu/qdist.h create mode 100644 qemu/include/qemu/qht.h create mode 100644 qemu/include/qemu/units.h create mode 100644 qemu/include/qemu/xxhash.h delete mode 100644 qemu/include/qom/cpu.h delete mode 100644 qemu/include/qom/object.h delete mode 100644 qemu/include/qom/qom-qobject.h create mode 100644 qemu/include/sysemu/tcg.h create mode 100644 qemu/include/tcg/tcg-apple-jit.h create mode 100644 qemu/include/tcg/tcg-gvec-desc.h rename qemu/include/{sysemu/accel.h => tcg/tcg-mo.h} (52%) create mode 100644 qemu/include/tcg/tcg-op-gvec.h create mode 100644 qemu/include/tcg/tcg-op.h rename qemu/{ => include}/tcg/tcg-opc.h (61%) create mode 100644 qemu/include/tcg/tcg.h create mode 100644 qemu/libdecnumber/decContext.c create mode 100644 qemu/libdecnumber/decNumber.c create mode 100644 qemu/libdecnumber/dpd/decimal128.c create mode 100644 qemu/libdecnumber/dpd/decimal32.c create mode 100644 qemu/libdecnumber/dpd/decimal64.c delete mode 100644 qemu/memory.c create mode 100644 qemu/memory_ldst.inc.c create mode 100644 qemu/ppc.h create mode 100644 qemu/ppc64.h delete mode 100644 qemu/qapi-types.c delete mode 100644 qemu/qapi-types.h delete mode 100644 qemu/qapi-visit.c delete mode 100644 qemu/qapi-visit.h delete mode 100644 qemu/qapi/Makefile.objs delete mode 100644 qemu/qapi/qapi-dealloc-visitor.c delete mode 100644 qemu/qapi/qapi-visit-core.c delete mode 100644 qemu/qapi/qmp-input-visitor.c delete mode 100644 qemu/qapi/qmp-output-visitor.c delete mode 100644 qemu/qapi/string-input-visitor.c delete mode 100644 qemu/qemu-timer.c delete mode 100644 qemu/qobject/Makefile.objs delete mode 100644 qemu/qobject/qbool.c delete mode 100644 qemu/qobject/qdict.c delete mode 100644 qemu/qobject/qerror.c delete mode 100644 qemu/qobject/qfloat.c delete mode 100644 qemu/qobject/qint.c delete mode 100644 qemu/qobject/qlist.c delete mode 100644 qemu/qobject/qstring.c delete mode 100644 qemu/qom/Makefile.objs delete mode 100644 qemu/qom/container.c delete mode 100644 qemu/qom/cpu.c delete mode 100644 qemu/qom/object.c delete mode 100644 qemu/qom/qom-qobject.c create mode 100644 qemu/riscv32.h create mode 100644 qemu/riscv64.h delete mode 100644 qemu/scripts/ordereddict.py delete mode 100644 qemu/scripts/qapi-build.sh delete mode 100644 qemu/scripts/qapi-schema.json delete mode 100644 qemu/scripts/qapi-types.py delete mode 100644 qemu/scripts/qapi-visit.py delete mode 100644 qemu/scripts/qapi.py delete mode 100644 qemu/scripts/qapi/common.json rename qemu/{ => softmmu}/cpus.c (56%) rename qemu/{ => softmmu}/ioport.c (69%) create mode 100644 qemu/softmmu/memory.c rename qemu/{ => softmmu}/memory_mapping.c (60%) rename qemu/{hw/sparc64/sun4u.c => softmmu/vl.c} (54%) delete mode 100644 qemu/softmmu_template.h delete mode 100644 qemu/target-arm/Makefile.objs delete mode 100644 qemu/target-arm/cpu-qom.h delete mode 100644 qemu/target-arm/cpu.c delete mode 100644 qemu/target-arm/cpu.h delete mode 100644 qemu/target-arm/cpu64.c delete mode 100644 qemu/target-arm/crypto_helper.c delete mode 100644 qemu/target-arm/helper-a64.c delete mode 100644 qemu/target-arm/helper-a64.h delete mode 100644 qemu/target-arm/helper.c delete mode 100644 qemu/target-arm/internals.h delete mode 100644 qemu/target-arm/op_helper.c delete mode 100644 qemu/target-arm/psci.c delete mode 100644 qemu/target-arm/translate.c delete mode 100644 qemu/target-arm/translate.h delete mode 100644 qemu/target-arm/unicorn_aarch64.c delete mode 100644 qemu/target-arm/unicorn_arm.c delete mode 100644 qemu/target-i386/Makefile.objs delete mode 100644 qemu/target-i386/cpu-qom.h delete mode 100644 qemu/target-i386/cpu.c delete mode 100644 qemu/target-i386/cpu.h delete mode 100644 qemu/target-i386/excp_helper.c delete mode 100644 qemu/target-i386/helper.c delete mode 100644 qemu/target-i386/mem_helper.c delete mode 100644 qemu/target-i386/smm_helper.c delete mode 100644 qemu/target-i386/topology.h delete mode 100644 qemu/target-i386/translate.c delete mode 100644 qemu/target-i386/unicorn.c delete mode 100644 qemu/target-m68k/Makefile.objs delete mode 100644 qemu/target-m68k/cpu-qom.h delete mode 100644 qemu/target-m68k/cpu.c delete mode 100644 qemu/target-m68k/cpu.h delete mode 100644 qemu/target-m68k/helper.c delete mode 100644 qemu/target-m68k/helper.h delete mode 100644 qemu/target-m68k/m68k-qreg.h delete mode 100644 qemu/target-m68k/op_helper.c delete mode 100644 qemu/target-m68k/translate.c delete mode 100644 qemu/target-m68k/unicorn.c delete mode 100644 qemu/target-mips/Makefile.objs delete mode 100644 qemu/target-mips/cpu.c delete mode 100644 qemu/target-mips/cpu.h delete mode 100644 qemu/target-mips/helper.c delete mode 100644 qemu/target-mips/mips-defs.h delete mode 100644 qemu/target-mips/msa_helper.c delete mode 100644 qemu/target-mips/op_helper.c delete mode 100644 qemu/target-mips/translate.c delete mode 100644 qemu/target-mips/translate_init.c delete mode 100644 qemu/target-mips/unicorn.c delete mode 100644 qemu/target-mips/unicorn.h delete mode 100644 qemu/target-sparc/Makefile.objs delete mode 100644 qemu/target-sparc/TODO delete mode 100644 qemu/target-sparc/cpu-qom.h delete mode 100644 qemu/target-sparc/cpu.c delete mode 100644 qemu/target-sparc/helper.h delete mode 100644 qemu/target-sparc/ldst_helper.c delete mode 100644 qemu/target-sparc/unicorn.c delete mode 100644 qemu/target-sparc/unicorn64.c create mode 100644 qemu/target/arm/README create mode 100644 qemu/target/arm/arm-powerctl.c create mode 100644 qemu/target/arm/arm-powerctl.h create mode 100644 qemu/target/arm/arm-semi.c rename qemu/{target-arm => target/arm}/arm_ldst.h (67%) create mode 100644 qemu/target/arm/cpu-param.h create mode 100644 qemu/target/arm/cpu-qom.h create mode 100644 qemu/target/arm/cpu.c create mode 100644 qemu/target/arm/cpu.h create mode 100644 qemu/target/arm/cpu64.c create mode 100644 qemu/target/arm/crypto_helper.c create mode 100644 qemu/target/arm/debug_helper.c create mode 100644 qemu/target/arm/decode-a32-uncond.inc.c create mode 100644 qemu/target/arm/decode-a32.inc.c create mode 100644 qemu/target/arm/decode-sve.inc.c create mode 100644 qemu/target/arm/decode-t16.inc.c create mode 100644 qemu/target/arm/decode-t32.inc.c create mode 100644 qemu/target/arm/decode-vfp-uncond.inc.c create mode 100644 qemu/target/arm/decode-vfp.inc.c create mode 100644 qemu/target/arm/helper-a64.c create mode 100644 qemu/target/arm/helper-a64.h create mode 100644 qemu/target/arm/helper-sve.h create mode 100644 qemu/target/arm/helper.c rename qemu/{target-arm => target/arm}/helper.h (61%) create mode 100644 qemu/target/arm/internals.h rename qemu/{target-arm => target/arm}/iwmmxt_helper.c (73%) rename qemu/{target-arm => target/arm}/kvm-consts.h (76%) create mode 100644 qemu/target/arm/m_helper.c rename qemu/{target-arm => target/arm}/neon_helper.c (86%) rename qemu/{target-arm => target/arm}/op_addsub.h (100%) create mode 100644 qemu/target/arm/op_helper.c create mode 100644 qemu/target/arm/pauth_helper.c create mode 100644 qemu/target/arm/psci.c create mode 100644 qemu/target/arm/sve_helper.c create mode 100644 qemu/target/arm/tlb_helper.c rename qemu/{target-arm => target/arm}/translate-a64.c (61%) create mode 100644 qemu/target/arm/translate-a64.h create mode 100644 qemu/target/arm/translate-sve.c create mode 100644 qemu/target/arm/translate-vfp.inc.c create mode 100644 qemu/target/arm/translate.c create mode 100644 qemu/target/arm/translate.h rename qemu/{target-arm => target/arm}/unicorn.h (50%) create mode 100644 qemu/target/arm/unicorn_aarch64.c create mode 100644 qemu/target/arm/unicorn_arm.c create mode 100644 qemu/target/arm/vec_helper.c create mode 100644 qemu/target/arm/vfp_helper.c rename qemu/{target-i386 => target/i386}/TODO (100%) rename qemu/{target-i386 => target/i386}/arch_memory_mapping.c (68%) create mode 100644 qemu/target/i386/bpt_helper.c rename qemu/{target-i386 => target/i386}/cc_helper.c (97%) rename qemu/{target-i386 => target/i386}/cc_helper_template.h (100%) create mode 100644 qemu/target/i386/cpu-param.h create mode 100644 qemu/target/i386/cpu-qom.h create mode 100644 qemu/target/i386/cpu.c create mode 100644 qemu/target/i386/cpu.h create mode 100644 qemu/target/i386/excp_helper.c rename qemu/{target-i386 => target/i386}/fpu_helper.c (61%) create mode 100644 qemu/target/i386/helper.c rename qemu/{target-i386 => target/i386}/helper.h (79%) rename qemu/{target-i386 => target/i386}/int_helper.c (84%) create mode 100644 qemu/target/i386/machine.c create mode 100644 qemu/target/i386/mem_helper.c rename qemu/{target-i386 => target/i386}/misc_helper.c (71%) create mode 100644 qemu/target/i386/mpx_helper.c rename qemu/{target-i386 => target/i386}/ops_sse.h (83%) rename qemu/{target-i386 => target/i386}/ops_sse_header.h (85%) rename qemu/{target-i386 => target/i386}/seg_helper.c (66%) rename qemu/{target-i386 => target/i386}/shift_helper_template.h (100%) create mode 100644 qemu/target/i386/smm_helper.c rename qemu/{target-i386 => target/i386}/svm.h (63%) rename qemu/{target-i386 => target/i386}/svm_helper.c (63%) create mode 100644 qemu/target/i386/translate.c create mode 100644 qemu/target/i386/unicorn.c rename qemu/{target-i386 => target/i386}/unicorn.h (62%) create mode 100644 qemu/target/i386/xsave_helper.c create mode 100644 qemu/target/m68k/cpu-param.h create mode 100644 qemu/target/m68k/cpu-qom.h create mode 100644 qemu/target/m68k/cpu.c create mode 100644 qemu/target/m68k/cpu.h create mode 100644 qemu/target/m68k/fpu_helper.c create mode 100644 qemu/target/m68k/helper.c create mode 100644 qemu/target/m68k/helper.h create mode 100644 qemu/target/m68k/op_helper.c rename qemu/{target-m68k => target/m68k}/qregs.def (50%) create mode 100644 qemu/target/m68k/softfloat.c create mode 100644 qemu/target/m68k/softfloat.h create mode 100644 qemu/target/m68k/softfloat_fpsp_tables.h create mode 100644 qemu/target/m68k/translate.c create mode 100644 qemu/target/m68k/unicorn.c rename qemu/{target-m68k => target/m68k}/unicorn.h (69%) rename qemu/{target-mips => target/mips}/TODO (100%) create mode 100644 qemu/target/mips/cp0_helper.c rename qemu/{hw/mips/cputimer.c => target/mips/cp0_timer.c} (51%) create mode 100644 qemu/target/mips/cpu-param.h rename qemu/{target-mips => target/mips}/cpu-qom.h (52%) create mode 100644 qemu/target/mips/cpu.c create mode 100644 qemu/target/mips/cpu.h rename qemu/{target-mips => target/mips}/dsp_helper.c (98%) create mode 100644 qemu/target/mips/fpu_helper.c create mode 100644 qemu/target/mips/helper.c rename qemu/{target-mips => target/mips}/helper.h (73%) create mode 100644 qemu/target/mips/internal.h rename qemu/{target-mips => target/mips}/lmi_helper.c (98%) create mode 100644 qemu/target/mips/mips-defs.h create mode 100644 qemu/target/mips/msa_helper.c create mode 100644 qemu/target/mips/op_helper.c create mode 100644 qemu/target/mips/translate.c create mode 100644 qemu/target/mips/translate_init.inc.c create mode 100644 qemu/target/mips/unicorn.c create mode 100644 qemu/target/mips/unicorn.h create mode 100644 qemu/target/ppc/compat.c create mode 100644 qemu/target/ppc/cpu-models.c create mode 100644 qemu/target/ppc/cpu-models.h create mode 100644 qemu/target/ppc/cpu-param.h create mode 100644 qemu/target/ppc/cpu-qom.h create mode 100644 qemu/target/ppc/cpu.c create mode 100644 qemu/target/ppc/cpu.h create mode 100644 qemu/target/ppc/dfp_helper.c create mode 100644 qemu/target/ppc/excp_helper.c create mode 100644 qemu/target/ppc/fpu_helper.c create mode 100644 qemu/target/ppc/helper.h create mode 100644 qemu/target/ppc/helper_regs.h create mode 100644 qemu/target/ppc/int_helper.c create mode 100644 qemu/target/ppc/internal.h create mode 100644 qemu/target/ppc/kvm_ppc.h create mode 100644 qemu/target/ppc/machine.c create mode 100644 qemu/target/ppc/mem_helper.c create mode 100644 qemu/target/ppc/mfrom_table.inc.c create mode 100644 qemu/target/ppc/mfrom_table_gen.c create mode 100644 qemu/target/ppc/misc_helper.c create mode 100644 qemu/target/ppc/mmu-book3s-v3.c create mode 100644 qemu/target/ppc/mmu-book3s-v3.h create mode 100644 qemu/target/ppc/mmu-hash32.c create mode 100644 qemu/target/ppc/mmu-hash32.h create mode 100644 qemu/target/ppc/mmu-hash64.c create mode 100644 qemu/target/ppc/mmu-hash64.h create mode 100644 qemu/target/ppc/mmu-radix64.c create mode 100644 qemu/target/ppc/mmu-radix64.h create mode 100644 qemu/target/ppc/mmu_helper.c create mode 100644 qemu/target/ppc/timebase_helper.c create mode 100644 qemu/target/ppc/translate.c create mode 100644 qemu/target/ppc/translate/dfp-impl.inc.c create mode 100644 qemu/target/ppc/translate/dfp-ops.inc.c create mode 100644 qemu/target/ppc/translate/fp-impl.inc.c create mode 100644 qemu/target/ppc/translate/fp-ops.inc.c create mode 100644 qemu/target/ppc/translate/spe-impl.inc.c create mode 100644 qemu/target/ppc/translate/spe-ops.inc.c create mode 100644 qemu/target/ppc/translate/vmx-impl.inc.c create mode 100644 qemu/target/ppc/translate/vmx-ops.inc.c create mode 100644 qemu/target/ppc/translate/vsx-impl.inc.c create mode 100644 qemu/target/ppc/translate/vsx-ops.inc.c create mode 100644 qemu/target/ppc/translate_init.inc.c create mode 100644 qemu/target/ppc/unicorn.c create mode 100644 qemu/target/ppc/unicorn.h create mode 100644 qemu/target/riscv/README create mode 100644 qemu/target/riscv/cpu-param.h create mode 100644 qemu/target/riscv/cpu.c create mode 100644 qemu/target/riscv/cpu.h create mode 100644 qemu/target/riscv/cpu_bits.h create mode 100644 qemu/target/riscv/cpu_helper.c create mode 100644 qemu/target/riscv/cpu_user.h create mode 100644 qemu/target/riscv/csr.c create mode 100644 qemu/target/riscv/fpu_helper.c create mode 100644 qemu/target/riscv/helper.h create mode 100644 qemu/target/riscv/insn_trans/trans_privileged.inc.c create mode 100644 qemu/target/riscv/insn_trans/trans_rva.inc.c create mode 100644 qemu/target/riscv/insn_trans/trans_rvd.inc.c create mode 100644 qemu/target/riscv/insn_trans/trans_rvf.inc.c create mode 100644 qemu/target/riscv/insn_trans/trans_rvi.inc.c create mode 100644 qemu/target/riscv/insn_trans/trans_rvm.inc.c create mode 100644 qemu/target/riscv/instmap.h create mode 100644 qemu/target/riscv/op_helper.c create mode 100644 qemu/target/riscv/pmp.c create mode 100644 qemu/target/riscv/pmp.h create mode 100644 qemu/target/riscv/riscv32/decode_insn16.inc.c create mode 100644 qemu/target/riscv/riscv32/decode_insn32.inc.c create mode 100644 qemu/target/riscv/riscv64/decode_insn16.inc.c create mode 100644 qemu/target/riscv/riscv64/decode_insn32.inc.c create mode 100644 qemu/target/riscv/translate.c create mode 100644 qemu/target/riscv/unicorn.c create mode 100644 qemu/target/riscv/unicorn.h create mode 100644 qemu/target/sparc/asi.h rename qemu/{target-sparc => target/sparc}/cc_helper.c (85%) create mode 100644 qemu/target/sparc/cpu-param.h create mode 100644 qemu/target/sparc/cpu-qom.h create mode 100644 qemu/target/sparc/cpu.c rename qemu/{target-sparc => target/sparc}/cpu.h (75%) rename qemu/{target-sparc => target/sparc}/fop_helper.c (59%) rename qemu/{target-sparc => target/sparc}/helper.c (70%) create mode 100644 qemu/target/sparc/helper.h rename qemu/{target-sparc => target/sparc}/int32_helper.c (85%) rename qemu/{target-sparc => target/sparc}/int64_helper.c (60%) create mode 100644 qemu/target/sparc/ldst_helper.c rename qemu/{target-sparc => target/sparc}/mmu_helper.c (66%) rename qemu/{target-sparc => target/sparc}/translate.c (64%) create mode 100644 qemu/target/sparc/unicorn.c rename qemu/{target-sparc => target/sparc}/unicorn.h (55%) create mode 100644 qemu/target/sparc/unicorn64.c rename qemu/{target-sparc => target/sparc}/vis_helper.c (99%) rename qemu/{target-sparc => target/sparc}/win_helper.c (75%) delete mode 100644 qemu/tcg/LICENSE delete mode 100644 qemu/tcg/TODO delete mode 100644 qemu/tcg/aarch64/tcg-target.c create mode 100644 qemu/tcg/aarch64/tcg-target.inc.c create mode 100644 qemu/tcg/aarch64/tcg-target.opc.h rename qemu/tcg/arm/{tcg-target.c => tcg-target.inc.c} (69%) delete mode 100644 qemu/tcg/i386/tcg-target.c create mode 100644 qemu/tcg/i386/tcg-target.inc.c create mode 100644 qemu/tcg/i386/tcg-target.opc.h delete mode 100644 qemu/tcg/ia64/tcg-target.c delete mode 100644 qemu/tcg/ia64/tcg-target.h delete mode 100644 qemu/tcg/mips/tcg-target.c create mode 100644 qemu/tcg/mips/tcg-target.inc.c rename qemu/tcg/ppc/{tcg-target.c => tcg-target.inc.c} (50%) create mode 100644 qemu/tcg/ppc/tcg-target.opc.h create mode 100644 qemu/tcg/riscv/tcg-target.h create mode 100644 qemu/tcg/riscv/tcg-target.inc.c rename qemu/tcg/s390/{tcg-target.c => tcg-target.inc.c} (61%) rename qemu/tcg/sparc/{tcg-target.c => tcg-target.inc.c} (66%) rename qemu/tcg/{tcg-be-ldst.h => tcg-ldst.inc.c} (65%) create mode 100644 qemu/tcg/tcg-op-gvec.c create mode 100644 qemu/tcg/tcg-op-vec.c create mode 100644 qemu/tcg/tcg-op.c delete mode 100644 qemu/tcg/tcg-op.h create mode 100644 qemu/tcg/tcg-pool.inc.c delete mode 100644 qemu/tcg/tcg-runtime.h delete mode 100644 qemu/tcg/tcg.h create mode 100644 qemu/trace/mem-internal.h create mode 100644 qemu/trace/mem.h delete mode 100644 qemu/translate-all.c delete mode 100644 qemu/util/Makefile.objs create mode 100644 qemu/util/cacheinfo.c delete mode 100644 qemu/util/error.c create mode 100644 qemu/util/guest-random.c delete mode 100644 qemu/util/module.c create mode 100644 qemu/util/osdep.c create mode 100644 qemu/util/pagesize.c create mode 100644 qemu/util/qdist.c delete mode 100644 qemu/util/qemu-error.c rename qemu/{tcg/tcg-be-null.h => util/qemu-timer.c} (74%) create mode 100644 qemu/util/qht.c create mode 100644 qemu/util/range.c delete mode 100644 qemu/vl.c delete mode 100644 samples/.gitignore delete mode 100644 samples/sample_armeb.c rename samples/{sample_arm64eb.c => sample_ppc.c} (51%) create mode 100644 samples/sample_riscv.c create mode 100755 symbols.sh delete mode 100644 tests/regress/arm_apsr_access.py create mode 100644 tests/regress/arm_memcpy_neon.py create mode 100644 tests/regress/arm_wfi_first_insn_of_tb.py delete mode 100644 tests/regress/hook_raises_exception.py delete mode 100644 tests/regress/mem_64_c.c delete mode 100644 tests/regress/mem_double_unmap.c delete mode 100644 tests/regress/mem_exec.c delete mode 100644 tests/regress/mem_fuzz.c delete mode 100644 tests/regress/mem_map_0x100000000.c delete mode 100644 tests/regress/mem_map_large.c delete mode 100644 tests/regress/mem_nofree.c delete mode 100644 tests/regress/mem_protect.c delete mode 100644 tests/regress/mem_unmap.c delete mode 100644 tests/regress/memleak_arm.c delete mode 100644 tests/regress/memleak_arm64.c delete mode 100644 tests/regress/memleak_m68k.c delete mode 100644 tests/regress/memleak_mips.c delete mode 100644 tests/regress/memleak_sparc.c delete mode 100644 tests/regress/memleak_x86.c create mode 100644 tests/regress/mips_cp1.py create mode 100755 tests/regress/x86_ld_crash.py create mode 100644 tests/regress/x86_set_ip.py delete mode 100644 tests/unit/.gitignore delete mode 100644 tests/unit/Makefile create mode 100644 tests/unit/acutest.h delete mode 100644 tests/unit/gdt_idx.s delete mode 100644 tests/unit/high_address.s delete mode 100644 tests/unit/pc_change.s delete mode 100644 tests/unit/tb_x86.s create mode 100644 tests/unit/test_arm.c create mode 100644 tests/unit/test_arm64.c delete mode 100644 tests/unit/test_gdt_idt_x86.c delete mode 100644 tests/unit/test_hang.c delete mode 100644 tests/unit/test_hookcounts.c create mode 100644 tests/unit/test_m68k.c delete mode 100644 tests/unit/test_mem_high.c delete mode 100644 tests/unit/test_mem_map.c delete mode 100644 tests/unit/test_mem_map_ptr.c create mode 100644 tests/unit/test_mips.c delete mode 100644 tests/unit/test_multihook.c delete mode 100644 tests/unit/test_pc_change.c create mode 100644 tests/unit/test_ppc.c create mode 100644 tests/unit/test_riscv.c delete mode 100644 tests/unit/test_sanity.c create mode 100644 tests/unit/test_sparc.c delete mode 100644 tests/unit/test_tb_x86.c delete mode 100644 tests/unit/test_x86_rip_bug.c delete mode 100644 tests/unit/test_x86_shl_enter_leave.c delete mode 100644 tests/unit/test_x86_soft_paging.c delete mode 100644 tests/unit/x86_soft_paging_low.s delete mode 100644 windows_export.bat diff --git a/.gitignore b/.gitignore index de6684ae..78e294ce 100644 --- a/.gitignore +++ b/.gitignore @@ -12,8 +12,6 @@ *.jar *~ -qemu/config-all-devices.mak - qemu/aarch64-softmmu/ qemu/aarch64eb-softmmu/ qemu/arm-softmmu/ @@ -29,6 +27,8 @@ qemu/i386-softmmu/ qemu/x86_64-softmmu/ qemu/ppc-softmmu/ qemu/ppc64-softmmu/ +qemu/riscv32-softmmu/ +qemu/riscv64-softmmu/ tags qemu/config-host.ld @@ -67,8 +67,6 @@ bindings/python/unicorn.egg-info/ bindings/python/unicorn/lib/ bindings/python/unicorn/include/ bindings/python/MANIFEST -bindings/rust/target/ -bindings/rust/Cargo.lock config.log diff --git a/.travis.yml b/.travis.yml index c08466b8..75da008b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,19 +4,10 @@ env: script: - | if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then - if [[ "$TRAVIS_COMPILER" == "clang" ]]; then - choco install cygwin cyg-get && \ - cyg-get.bat default autoconf automake make gcc-core clang pkg-config libpcre-devel cmake python27-setuptools ruby wget && \ - export SHELLOPTS && set -o igncr && \ - cmd.exe //C "C:\\tools\\cygwin\\bin\\bash.exe -lc 'cd /cygdrive/$TRAVIS_BUILD_DIR; make header; make'" - else - choco install cygwin cyg-get && \ - cyg-get.bat default autoconf automake make gcc-core clang pkg-config libpcre-devel cmake python27-setuptools ruby wget && \ - export SHELLOPTS && set -o igncr && \ - cmd.exe //C "C:\\tools\\cygwin\\bin\\bash.exe -lc 'cd /cygdrive/$TRAVIS_BUILD_DIR; make header; make; ./install-cmocka-linux.sh; export PATH="$PATH":/cygdrive/$TRAVIS_BUILD_DIR:/cygdrive/$TRAVIS_BUILD_DIR/cmocka/src; make test'" - fi - elif [[ "$TRAVIS_CPU_ARCH" == "arm64" ]]; then - make header && make && make -C tests/unit test && make -C tests/regress test + choco install cygwin cyg-get && \ + cyg-get.bat default autoconf automake make gcc-core clang pkg-config libpcre-devel cmake python27-setuptools ruby wget && \ + export SHELLOPTS && set -o igncr \ + cmd.exe //C "C:\\tools\\cygwin\\bin\\bash.exe -lc 'cd /cygdrive/$TRAVIS_BUILD_DIR; make header; make; ./install-cmocka-linux.sh; export PATH="$PATH":/cygdrive/$TRAVIS_BUILD_DIR:/cygdrive/$TRAVIS_BUILD_DIR/cmocka/src; make test'" else make header && make && make -C bindings/go && make -C bindings/go test && make test fi @@ -25,52 +16,28 @@ compiler: - gcc os: - linux + - osx - windows -arch: - - amd64 - - arm64 matrix: fast_finish: true - exclude: - - os: windows - arch: arm64 include: - - - name: "Compiler: clang C" - os: osx - osx_image: xcode10.1 - python: 3.7 + - name: "Linux arm64 clang C" + arch: arm64 + os: linux compiler: clang - before_cache: - - brew cleanup - - find /usr/local/Homebrew \! -regex ".+\.git.+" -delete; - cache: - directories: - - $HOME/Library/Caches/Homebrew - - /usr/local/Homebrew - before_install: - - cd /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core && git stash && git clean -d -f - script: - - cd $TRAVIS_BUILD_DIR - - make header && make && make -C bindings/go && make -C bindings/go test && make test + language: c + env: + - PATH=$PATH:/usr/local/opt/binutils/bin + script: make header && make && make -C tests/unit test && make -C tests/regress test - - name: "Compiler: gcc C" - os: osx - osx_image: xcode10.1 - python: 3.7 + - name: "Linux arm64 gcc C" + arch: arm64 + os: linux compiler: gcc - before_cache: - - brew cleanup - - find /usr/local/Homebrew \! -regex ".+\.git.+" -delete; - cache: - directories: - - $HOME/Library/Caches/Homebrew - - /usr/local/Homebrew - before_install: - - cd /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core && git stash && git clean -d -f - script: - - cd $TRAVIS_BUILD_DIR - - make header && make && make -C bindings/go && make -C bindings/go test && make test + language: c + env: + - PATH=$PATH:/usr/local/opt/binutils/bin + script: make header && make && make -C tests/unit test && make -C tests/regress test - name: "Linux clang ASAN" os: linux @@ -81,9 +48,7 @@ matrix: - CXXFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=address -fsanitize=fuzzer-no-link" - CFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=address -fsanitize=fuzzer-no-link" - LDFLAGS="-fsanitize=address" - script: - - make header && make - - make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh + script: make header && make && make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh - name: "Linux clang MSAN" os: linux @@ -94,9 +59,7 @@ matrix: - CXXFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=memory -fsanitize=fuzzer-no-link" - CFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=memory -fsanitize=fuzzer-no-link" - LDFLAGS="-fsanitize=memory" - script: - - make header && make - - make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh + script: make header && make && make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh - name: "Linux clang USAN" os: linux @@ -107,9 +70,7 @@ matrix: - CXXFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=undefined -fsanitize=fuzzer-no-link" - CFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=undefined -fsanitize=fuzzer-no-link" - LDFLAGS="-fsanitize=undefined" - script: - - make header && make - - make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh + script: make header && make && make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh - name: "Linux 32bit" os: linux @@ -164,42 +125,6 @@ matrix: - cp libunicorn.* ../ - make -C ../tests/unit test && make -C ../tests/regress test - - name: "Linux Cmake Static 32bit" - os: linux - compiler: gcc - env: - - CFLAGS="-m32" LDFLAGS="-m32" LDFLAGS_STATIC="-m32" UNICORN_QEMU_FLAGS="--cpu=i386" - - PATH=$PATH:/usr/local/opt/binutils/bin - script: - - mkdir build - - cd build - - cmake -DCMAKE_BUILD_TYPE=Release -DUNICORN_ARCH=x86 -DUNICORN_BUILD_SHARED=OFF .. && make -j8 -# temporarily disable test for static build -# - cp libunicorn.* ../ -# - make -C ../tests/unit test && make -C ../tests/regress test - addons: - apt: - packages: - - lib32ncurses5-dev - - lib32z1-dev - - libpthread-stubs0-dev - - lib32gcc-4.8-dev - - libc6-dev-i386 - - gcc-multilib - - libcmocka-dev:i386 - - - name: "Linux Cmake Static 64bit" - os: linux - compiler: gcc - env: - - PATH=$PATH:/usr/local/opt/binutils/bin - script: - - mkdir build - - cd build - - cmake -DCMAKE_BUILD_TYPE=Release -DUNICORN_BUILD_SHARED=OFF .. && make -j8 -# - cp libunicorn.* ../ -# - make -C ../tests/unit test && make -C ../tests/regress test - - name: "MacOSX brew" os: osx osx_image: xcode10.1 @@ -289,14 +214,12 @@ matrix: - export LDFLAGS="-m32" - export LDFLAGS_STATIC="-m32" - export UNICORN_QEMU_FLAGS="--cpu=i386" -# before_cache: -# - $msys2 pacman --sync --clean --noconfirm -# cache: -# timeout: -# 1000 -# directories: -# - $HOME/AppData/Local/Temp/chocolatey -# - /C/tools/msys64 + before_cache: + - $msys2 pacman --sync --clean --noconfirm + cache: + directories: + - $HOME/AppData/Local/Temp/chocolatey + - /C/tools/msys64 script: - $shell make header; $shell make; cp unicorn.dll /C/Windows/SysWOW64/; $shell make test @@ -333,14 +256,12 @@ matrix: - export CC=x86_64-w64-mingw32-gcc - export AR=gcc-ar - export RANLIB=gcc-ranlib -# before_cache: -# - $msys2 pacman --sync --clean --noconfirm -# cache: -# timeout: -# 1000 -# directories: -# - $HOME/AppData/Local/Temp/chocolatey -# - /C/tools/msys64 + before_cache: + - $msys2 pacman --sync --clean --noconfirm + cache: + directories: + - $HOME/AppData/Local/Temp/chocolatey + - /C/tools/msys64 script: - $shell make header; $shell make; cp unicorn.dll /C/Windows/System32/; $shell make test addons: diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b46a055..2c90b72e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,34 +1,23 @@ -# Tested on window10(x64) with vs2019. -# Open the "x86 Native Tools Command Prompt for VS 2019", -# cd ${UNICORN_SOURCE_DIR} -# mkdir build -# cd build -# cmake -G "NMake Makefiles" .. -# nmake -# Or Open "x64 Native Tools Command Prompt for VS 2019" for 64bit binary. -# Tested on Ubuntu-1804-amd64 with gcc. -# $ cd ${UNICORN_SOURCE_DIR} -# $ mkdir build -# $ cd build -# $ cmake .. -# $ make -# By Huitao Chen, 2019 +# CMake setup for Unicorn 2. +# By Huitao Chen & Nguyen Anh Quynh, 2019-2020 cmake_minimum_required(VERSION 3.1) + +# Workaround to fix wrong compiler on macos. +if ((APPLE) AND (NOT CMAKE_C_COMPILER)) + set(CMAKE_C_COMPILER "/usr/bin/cc") +endif() project(unicorn C) -set(UNICORN_VERSION_MAJOR 1) +set(UNICORN_VERSION_MAJOR 2) set(UNICORN_VERSION_MINOR 0) -set(UNICORN_VERSION_PATCH 3) +set(UNICORN_VERSION_PATCH 0) -option(BUILD_SHARED_LIBS "Build shared instead of static library" ON) -option(UNICORN_INSTALL "Install unicorn" ON) -option(UNICORN_BUILD_SAMPLES "Build samples" ON) -set(UNICORN_ARCH "x86 arm aarch64 m68k mips sparc" CACHE STRING "Supported architectures") +option(UNICORN_BUILD_SHARED "Build shared instead of static library" ON) -# Deprecated option (CMake has this feature built-in) -if(UNICORN_BUILD_SHARED) - set(BUILD_SHARED_LIBS ON CACHE BOOL "" FORCE) +if (NOT UNICORN_ARCH) + # build all architectures + set(UNICORN_ARCH "x86 arm aarch64 riscv mips sparc m68k ppc") endif() string(TOUPPER ${UNICORN_ARCH} UNICORN_ARCH) @@ -38,16 +27,9 @@ foreach(ARCH_LOOP ${UNICORN_ARCH_LIST}) set(UNICORN_HAS_${ARCH_LOOP} TRUE) endforeach(ARCH_LOOP) -# qemu uses assert(). It is not recommended to define NDEBUG if using assert() -# to detect error conditions since the software may behave -# non-deterministically. Remove the NDEBUG macro. -if(CMAKE_BUILD_TYPE STREQUAL "Release") - string(REPLACE "-DNDEBUG" "" CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE}) -endif() - if(MSVC) include_directories( - ${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn + ${CMAKE_CURRENT_SOURCE_DIR}/msvc ) else() include_directories( @@ -56,17 +38,18 @@ else() endif() include_directories( + glib_compat qemu qemu/include - qemu/tcg include + qemu/tcg ) if(MSVC) if(CMAKE_SIZEOF_VOID_P EQUAL 8) - set(MSVC_FLAG -D__x86_64__) + set(MSVC_FLAG -D__x86_64__) elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) - set(MSVC_FLAG -D__i386__) + set(MSVC_FLAG -D__i386__) else() message(FATAL_ERROR "Neither WIN64 or WIN32!") endif() @@ -77,8 +60,8 @@ if(MSVC) -DWIN32_LEAN_AND_MEAN ${MSVC_FLAG} /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/i386 - /wd4018 /wd4244 /wd4267 ) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4018 /wd4098 /wd4244 /wd4267") if(CMAKE_BUILD_TYPE STREQUAL "Debug") string(REPLACE "/ZI" "/Zi" CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG}) endif() @@ -89,59 +72,91 @@ if(MSVC) string(REPLACE "/MD" "/MT" CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE}) endif() else() - # detect host arch. - execute_process(COMMAND ${CMAKE_C_COMPILER} -dM -E - - INPUT_FILE /dev/null - OUTPUT_VARIABLE UC_COMPILER_MACRO) + if (MINGW) + execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpmachine + OUTPUT_VARIABLE UC_COMPILER_VERSION) - while(TRUE) - string(FIND ${UC_COMPILER_MACRO} "__x86_64__" UC_RET) - if (${UC_RET} GREATER "0") + string(FIND "${UC_COMPILER_VERSION}" "i686" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") set(UNICORN_TARGET_ARCH "i386") - break() - endif() - string(FIND ${UC_COMPILER_MACRO} "__i386__" UC_RET) - if (${UC_RET} GREATER "0") + set(UNICORN_CFLAGS -m32) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m32") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -m32") + else() set(UNICORN_TARGET_ARCH "i386") - break() + set(UNICORN_CFLAGS -m64 -mcx16) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m64") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -m64") endif() - string(FIND ${UC_COMPILER_MACRO} "__arm__" UC_RET) - if (${UC_RET} GREATER "0") - set(UNICORN_TARGET_ARCH "arm") - break() - endif() - string(FIND ${UC_COMPILER_MACRO} "__aarch64__" UC_RET) - if (${UC_RET} GREATER "0") - set(UNICORN_TARGET_ARCH "aarch64") - break() - endif() - string(FIND ${UC_COMPILER_MACRO} "__mips__" UC_RET) - if (${UC_RET} GREATER "0") - set(UNICORN_TARGET_ARCH "mips") - break() - endif() - string(FIND ${UC_COMPILER_MACRO} "__sparc__" UC_RET) - if (${UC_RET} GREATER "0") - set(UNICORN_TARGET_ARCH "sparc") - break() - endif() - string(FIND ${UC_COMPILER_MACRO} "__ia64__" UC_RET) - if (${UC_RET} GREATER "0") - set(UNICORN_TARGET_ARCH "ia64") - break() - endif() - string(FIND ${UC_COMPILER_MACRO} "_ARCH_PPC" UC_RET) - if (${UC_RET} GREATER "0") - set(UNICORN_TARGET_ARCH "ppc") - break() - endif() - string(FIND ${UC_COMPILER_MACRO} "__s390__" UC_RET) - if (${UC_RET} GREATER "0") - set(UNICORN_TARGET_ARCH "s390") - break() - endif() - message(FATAL_ERROR "Unknown host compiler: ${CMAKE_C_COMPILER}.") - endwhile(TRUE) + else() + execute_process(COMMAND ${CMAKE_C_COMPILER} -dM -E - + INPUT_FILE /dev/null + OUTPUT_VARIABLE UC_COMPILER_MACRO) + + while(TRUE) + string(FIND "${UC_COMPILER_MACRO}" "__x86_64__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "i386") + string(FIND "${UC_COMPILER_MACRO}" "__ILP32__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_CFLAGS -mx32) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -mx32") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -mx32") + else() + set(UNICORN_CFLAGS -m64 -mcx16) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m64") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -m64") + endif() + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__i386__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "i386") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__arm__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "arm") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__aarch64__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "aarch64") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__mips__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "mips") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__sparc__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "sparc") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__ia64__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "ia64") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "_ARCH_PPC" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "ppc") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__riscv" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "riscv") + break() + endif() + string(FIND "${UC_COMPILER_MACRO}" "__s390__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "s390") + break() + endif() + message(FATAL_ERROR "Unknown host compiler: ${CMAKE_C_COMPILER}.") + endwhile(TRUE) + endif() set(EXTRA_CFLAGS "--extra-cflags=") if (UNICORN_HAS_X86) @@ -162,7 +177,14 @@ else() if (UNICORN_HAS_SPARC) set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_SPARC ") endif() - set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-fPIC -fvisibility=hidden") + if (UNICORN_HAS_PPC) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_PPC ") + endif() + if (UNICORN_HAS_RISCV) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_RISCV ") + endif() + + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-fPIC") set(TARGET_LIST "--target-list=") if (UNICORN_HAS_X86) @@ -183,10 +205,17 @@ else() if (UNICORN_HAS_SPARC) set (TARGET_LIST "${TARGET_LIST}sparc-softmmu, sparc64-softmmu, ") endif() + if (UNICORN_HAS_PPC) + set (TARGET_LIST "${TARGET_LIST}ppc-softmmu, ppc64-softmmu, ") + endif() + if (UNICORN_HAS_RISCV) + set (TARGET_LIST "${TARGET_LIST}riscv32-softmmu, riscv64-softmmu, ") + endif() set (TARGET_LIST "${TARGET_LIST} ") # GEN config-host.mak & target directories execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/configure + --cc=${CMAKE_C_COMPILER} ${EXTRA_CFLAGS} ${TARGET_LIST} WORKING_DIRECTORY ${CMAKE_BINARY_DIR} @@ -255,678 +284,859 @@ else() OUTPUT_FILE ${CMAKE_BINARY_DIR}/sparc64-softmmu/config-target.h ) endif() + if (UNICORN_HAS_PPC) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/ppc-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/ppc-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/ppc64-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/ppc64-softmmu/config-target.h + ) + endif() + if (UNICORN_HAS_RISCV) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/riscv32-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/riscv32-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/riscv64-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/riscv64-softmmu/config-target.h + ) + endif() add_compile_options( + ${UNICORN_CFLAGS} -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/${UNICORN_TARGET_ARCH} -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE - -Wall -O2 - -fPIC -fpic -fvisibility=hidden + -Wall + -fPIC ) + if (APPLE) + # This warning is disabled by default for gcc and doesn't cause any bug. + add_compile_options( + -Wno-missing-braces + ) + endif() endif() -if (UNICORN_HAS_X86) -add_library(x86_64-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c +set(UNICORN_ARCH_COMMON qemu/exec.c + qemu/exec-vary.c + + qemu/softmmu/cpus.c + qemu/softmmu/ioport.c + qemu/softmmu/memory.c + qemu/softmmu/memory_mapping.c + qemu/fpu/softfloat.c - qemu/hw/i386/pc.c - qemu/hw/i386/pc_piix.c - qemu/hw/intc/apic.c - qemu/hw/intc/apic_common.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-i386/arch_memory_mapping.c - qemu/target-i386/cc_helper.c - qemu/target-i386/cpu.c - qemu/target-i386/excp_helper.c - qemu/target-i386/fpu_helper.c - qemu/target-i386/helper.c - qemu/target-i386/int_helper.c - qemu/target-i386/mem_helper.c - qemu/target-i386/misc_helper.c - qemu/target-i386/seg_helper.c - qemu/target-i386/smm_helper.c - qemu/target-i386/svm_helper.c - qemu/target-i386/translate.c - qemu/target-i386/unicorn.c + qemu/tcg/optimize.c qemu/tcg/tcg.c - qemu/translate-all.c + qemu/tcg/tcg-op.c + qemu/tcg/tcg-op-gvec.c + qemu/tcg/tcg-op-vec.c + + qemu/accel/tcg/cpu-exec.c + qemu/accel/tcg/cpu-exec-common.c + qemu/accel/tcg/cputlb.c + qemu/accel/tcg/tcg-all.c + qemu/accel/tcg/tcg-runtime.c + qemu/accel/tcg/tcg-runtime-gvec.c + qemu/accel/tcg/translate-all.c + qemu/accel/tcg/translator.c +) + +if (UNICORN_HAS_X86) +add_library(x86_64-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/hw/i386/x86.c + + qemu/target/i386/arch_memory_mapping.c + qemu/target/i386/bpt_helper.c + qemu/target/i386/cc_helper.c + qemu/target/i386/cpu.c + qemu/target/i386/excp_helper.c + qemu/target/i386/fpu_helper.c + qemu/target/i386/helper.c + qemu/target/i386/int_helper.c + qemu/target/i386/machine.c + qemu/target/i386/mem_helper.c + qemu/target/i386/misc_helper.c + qemu/target/i386/mpx_helper.c + qemu/target/i386/seg_helper.c + qemu/target/i386/smm_helper.c + qemu/target/i386/svm_helper.c + qemu/target/i386/translate.c + qemu/target/i386/xsave_helper.c + qemu/target/i386/unicorn.c ) if(MSVC) target_compile_options(x86_64-softmmu PRIVATE -DNEED_CPU_H /FIx86_64.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/x86_64-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-i386 + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/x86_64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/i386 ) else() target_compile_options(x86_64-softmmu PRIVATE -DNEED_CPU_H -include x86_64.h -I${CMAKE_BINARY_DIR}/x86_64-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-i386 + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/i386 ) endif() endif() if (UNICORN_HAS_ARM) -add_library(arm-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/arm/tosa.c - qemu/hw/arm/virt.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-arm/cpu.c - qemu/target-arm/crypto_helper.c - qemu/target-arm/helper.c - qemu/target-arm/iwmmxt_helper.c - qemu/target-arm/neon_helper.c - qemu/target-arm/op_helper.c - qemu/target-arm/psci.c - qemu/target-arm/translate.c - qemu/target-arm/unicorn_arm.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(arm-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/arm/cpu.c + qemu/target/arm/crypto_helper.c + qemu/target/arm/debug_helper.c + qemu/target/arm/helper.c + qemu/target/arm/iwmmxt_helper.c + qemu/target/arm/m_helper.c + qemu/target/arm/neon_helper.c + qemu/target/arm/op_helper.c + qemu/target/arm/psci.c + qemu/target/arm/tlb_helper.c + qemu/target/arm/translate.c + qemu/target/arm/vec_helper.c + qemu/target/arm/vfp_helper.c + qemu/target/arm/unicorn_arm.c ) if(MSVC) target_compile_options(arm-softmmu PRIVATE -DNEED_CPU_H /FIarm.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/arm-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/arm-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) else() target_compile_options(arm-softmmu PRIVATE -DNEED_CPU_H -include arm.h -I${CMAKE_BINARY_DIR}/arm-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) endif() -add_library(armeb-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/arm/tosa.c - qemu/hw/arm/virt.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-arm/cpu.c - qemu/target-arm/crypto_helper.c - qemu/target-arm/helper.c - qemu/target-arm/iwmmxt_helper.c - qemu/target-arm/neon_helper.c - qemu/target-arm/op_helper.c - qemu/target-arm/psci.c - qemu/target-arm/translate.c - qemu/target-arm/unicorn_arm.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(armeb-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/arm/cpu.c + qemu/target/arm/crypto_helper.c + qemu/target/arm/debug_helper.c + qemu/target/arm/helper.c + qemu/target/arm/iwmmxt_helper.c + qemu/target/arm/m_helper.c + qemu/target/arm/neon_helper.c + qemu/target/arm/op_helper.c + qemu/target/arm/psci.c + qemu/target/arm/tlb_helper.c + qemu/target/arm/translate.c + qemu/target/arm/vec_helper.c + qemu/target/arm/vfp_helper.c + qemu/target/arm/unicorn_arm.c ) if(MSVC) target_compile_options(armeb-softmmu PRIVATE -DNEED_CPU_H /FIarmeb.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/armeb-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/armeb-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) else() target_compile_options(armeb-softmmu PRIVATE -DNEED_CPU_H -include armeb.h -I${CMAKE_BINARY_DIR}/armeb-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) endif() endif() if (UNICORN_HAS_AARCH64) -add_library(aarch64-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/arm/tosa.c - qemu/hw/arm/virt.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-arm/cpu.c - qemu/target-arm/cpu64.c - qemu/target-arm/crypto_helper.c - qemu/target-arm/helper-a64.c - qemu/target-arm/helper.c - qemu/target-arm/iwmmxt_helper.c - qemu/target-arm/neon_helper.c - qemu/target-arm/op_helper.c - qemu/target-arm/psci.c - qemu/target-arm/translate-a64.c - qemu/target-arm/translate.c - qemu/target-arm/unicorn_aarch64.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(aarch64-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/arm/cpu64.c + qemu/target/arm/cpu.c + qemu/target/arm/crypto_helper.c + qemu/target/arm/debug_helper.c + qemu/target/arm/helper-a64.c + qemu/target/arm/helper.c + qemu/target/arm/iwmmxt_helper.c + qemu/target/arm/m_helper.c + qemu/target/arm/neon_helper.c + qemu/target/arm/op_helper.c + qemu/target/arm/pauth_helper.c + qemu/target/arm/psci.c + qemu/target/arm/sve_helper.c + qemu/target/arm/tlb_helper.c + qemu/target/arm/translate-a64.c + qemu/target/arm/translate.c + qemu/target/arm/translate-sve.c + qemu/target/arm/vec_helper.c + qemu/target/arm/vfp_helper.c + qemu/target/arm/unicorn_aarch64.c ) if(MSVC) target_compile_options(aarch64-softmmu PRIVATE -DNEED_CPU_H /FIaarch64.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/aarch64-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/aarch64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) else() target_compile_options(aarch64-softmmu PRIVATE -DNEED_CPU_H -include aarch64.h -I${CMAKE_BINARY_DIR}/aarch64-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) endif() -add_library(aarch64eb-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/arm/tosa.c - qemu/hw/arm/virt.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-arm/cpu.c - qemu/target-arm/cpu64.c - qemu/target-arm/crypto_helper.c - qemu/target-arm/helper-a64.c - qemu/target-arm/helper.c - qemu/target-arm/iwmmxt_helper.c - qemu/target-arm/neon_helper.c - qemu/target-arm/op_helper.c - qemu/target-arm/psci.c - qemu/target-arm/translate-a64.c - qemu/target-arm/translate.c - qemu/target-arm/unicorn_aarch64.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(aarch64eb-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/arm/cpu64.c + qemu/target/arm/cpu.c + qemu/target/arm/crypto_helper.c + qemu/target/arm/debug_helper.c + qemu/target/arm/helper-a64.c + qemu/target/arm/helper.c + qemu/target/arm/iwmmxt_helper.c + qemu/target/arm/m_helper.c + qemu/target/arm/neon_helper.c + qemu/target/arm/op_helper.c + qemu/target/arm/pauth_helper.c + qemu/target/arm/psci.c + qemu/target/arm/sve_helper.c + qemu/target/arm/tlb_helper.c + qemu/target/arm/translate-a64.c + qemu/target/arm/translate.c + qemu/target/arm/translate-sve.c + qemu/target/arm/vec_helper.c + qemu/target/arm/vfp_helper.c + qemu/target/arm/unicorn_aarch64.c ) if(MSVC) target_compile_options(aarch64eb-softmmu PRIVATE -DNEED_CPU_H /FIaarch64eb.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/aarch64eb-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/aarch64eb-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) else() target_compile_options(aarch64eb-softmmu PRIVATE -DNEED_CPU_H -include aarch64eb.h -I${CMAKE_BINARY_DIR}/aarch64eb-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/arm ) endif() endif() if (UNICORN_HAS_M68K) -add_library(m68k-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/m68k/dummy_m68k.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-m68k/cpu.c - qemu/target-m68k/helper.c - qemu/target-m68k/op_helper.c - qemu/target-m68k/translate.c - qemu/target-m68k/unicorn.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(m68k-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/m68k/cpu.c + qemu/target/m68k/fpu_helper.c + qemu/target/m68k/helper.c + qemu/target/m68k/op_helper.c + qemu/target/m68k/softfloat.c + qemu/target/m68k/translate.c + qemu/target/m68k/unicorn.c ) if(MSVC) target_compile_options(m68k-softmmu PRIVATE -DNEED_CPU_H /FIm68k.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/m68k-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-m68k + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/m68k-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/m68k ) else() target_compile_options(m68k-softmmu PRIVATE -DNEED_CPU_H -include m68k.h -I${CMAKE_BINARY_DIR}/m68k-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-m68k + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/m68k ) endif() endif() if (UNICORN_HAS_MIPS) -add_library(mips-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/mips/addr.c - qemu/hw/mips/cputimer.c - qemu/hw/mips/mips_r4k.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-mips/cpu.c - qemu/target-mips/dsp_helper.c - qemu/target-mips/helper.c - qemu/target-mips/lmi_helper.c - qemu/target-mips/msa_helper.c - qemu/target-mips/op_helper.c - qemu/target-mips/translate.c - qemu/target-mips/unicorn.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(mips-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/mips/cp0_helper.c + qemu/target/mips/cp0_timer.c + qemu/target/mips/cpu.c + qemu/target/mips/dsp_helper.c + qemu/target/mips/fpu_helper.c + qemu/target/mips/helper.c + qemu/target/mips/lmi_helper.c + qemu/target/mips/msa_helper.c + qemu/target/mips/op_helper.c + qemu/target/mips/translate.c + qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mips-softmmu PRIVATE -DNEED_CPU_H /FImips.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mips-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mips-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mips-softmmu PRIVATE -DNEED_CPU_H -include mips.h -I${CMAKE_BINARY_DIR}/mips-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() -add_library(mipsel-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/mips/addr.c - qemu/hw/mips/cputimer.c - qemu/hw/mips/mips_r4k.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-mips/cpu.c - qemu/target-mips/dsp_helper.c - qemu/target-mips/helper.c - qemu/target-mips/lmi_helper.c - qemu/target-mips/msa_helper.c - qemu/target-mips/op_helper.c - qemu/target-mips/translate.c - qemu/target-mips/unicorn.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(mipsel-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/mips/cp0_helper.c + qemu/target/mips/cp0_timer.c + qemu/target/mips/cpu.c + qemu/target/mips/dsp_helper.c + qemu/target/mips/fpu_helper.c + qemu/target/mips/helper.c + qemu/target/mips/lmi_helper.c + qemu/target/mips/msa_helper.c + qemu/target/mips/op_helper.c + qemu/target/mips/translate.c + qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mipsel-softmmu PRIVATE -DNEED_CPU_H /FImipsel.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mipsel-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mipsel-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mipsel-softmmu PRIVATE -DNEED_CPU_H -include mipsel.h -I${CMAKE_BINARY_DIR}/mipsel-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() -add_library(mips64-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/mips/addr.c - qemu/hw/mips/cputimer.c - qemu/hw/mips/mips_r4k.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-mips/cpu.c - qemu/target-mips/dsp_helper.c - qemu/target-mips/helper.c - qemu/target-mips/lmi_helper.c - qemu/target-mips/msa_helper.c - qemu/target-mips/op_helper.c - qemu/target-mips/translate.c - qemu/target-mips/unicorn.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(mips64-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/mips/cp0_helper.c + qemu/target/mips/cp0_timer.c + qemu/target/mips/cpu.c + qemu/target/mips/dsp_helper.c + qemu/target/mips/fpu_helper.c + qemu/target/mips/helper.c + qemu/target/mips/lmi_helper.c + qemu/target/mips/msa_helper.c + qemu/target/mips/op_helper.c + qemu/target/mips/translate.c + qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mips64-softmmu PRIVATE -DNEED_CPU_H /FImips64.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mips64-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mips64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mips64-softmmu PRIVATE -DNEED_CPU_H -include mips64.h -I${CMAKE_BINARY_DIR}/mips64-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() -add_library(mips64el-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/mips/addr.c - qemu/hw/mips/cputimer.c - qemu/hw/mips/mips_r4k.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-mips/cpu.c - qemu/target-mips/dsp_helper.c - qemu/target-mips/helper.c - qemu/target-mips/lmi_helper.c - qemu/target-mips/msa_helper.c - qemu/target-mips/op_helper.c - qemu/target-mips/translate.c - qemu/target-mips/unicorn.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(mips64el-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/mips/cp0_helper.c + qemu/target/mips/cp0_timer.c + qemu/target/mips/cpu.c + qemu/target/mips/dsp_helper.c + qemu/target/mips/fpu_helper.c + qemu/target/mips/helper.c + qemu/target/mips/lmi_helper.c + qemu/target/mips/msa_helper.c + qemu/target/mips/op_helper.c + qemu/target/mips/translate.c + qemu/target/mips/unicorn.c ) if(MSVC) target_compile_options(mips64el-softmmu PRIVATE -DNEED_CPU_H /FImips64el.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mips64el-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/mips64el-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) else() target_compile_options(mips64el-softmmu PRIVATE -DNEED_CPU_H -include mips64el.h -I${CMAKE_BINARY_DIR}/mips64el-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/mips ) endif() endif() if (UNICORN_HAS_SPARC) -add_library(sparc-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/sparc/leon3.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-sparc/cc_helper.c - qemu/target-sparc/cpu.c - qemu/target-sparc/fop_helper.c - qemu/target-sparc/helper.c - qemu/target-sparc/int32_helper.c - qemu/target-sparc/ldst_helper.c - qemu/target-sparc/mmu_helper.c - qemu/target-sparc/translate.c - qemu/target-sparc/unicorn.c - qemu/target-sparc/win_helper.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(sparc-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/sparc/cc_helper.c + qemu/target/sparc/cpu.c + qemu/target/sparc/fop_helper.c + qemu/target/sparc/helper.c + qemu/target/sparc/int32_helper.c + qemu/target/sparc/ldst_helper.c + qemu/target/sparc/mmu_helper.c + qemu/target/sparc/translate.c + qemu/target/sparc/win_helper.c + qemu/target/sparc/unicorn.c ) if(MSVC) target_compile_options(sparc-softmmu PRIVATE -DNEED_CPU_H /FIsparc.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/sparc-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/sparc-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) else() target_compile_options(sparc-softmmu PRIVATE -DNEED_CPU_H -include sparc.h -I${CMAKE_BINARY_DIR}/sparc-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) endif() -add_library(sparc64-softmmu OBJECT - qemu/cpu-exec.c - qemu/cpus.c - qemu/cputlb.c - qemu/exec.c - qemu/fpu/softfloat.c - qemu/hw/sparc64/sun4u.c - qemu/ioport.c - qemu/memory.c - qemu/memory_mapping.c - qemu/target-sparc/cc_helper.c - qemu/target-sparc/cpu.c - qemu/target-sparc/fop_helper.c - qemu/target-sparc/helper.c - qemu/target-sparc/int64_helper.c - qemu/target-sparc/ldst_helper.c - qemu/target-sparc/mmu_helper.c - qemu/target-sparc/translate.c - qemu/target-sparc/unicorn64.c - qemu/target-sparc/vis_helper.c - qemu/target-sparc/win_helper.c - qemu/tcg/optimize.c - qemu/tcg/tcg.c - qemu/translate-all.c +add_library(sparc64-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/sparc/cc_helper.c + qemu/target/sparc/cpu.c + qemu/target/sparc/fop_helper.c + qemu/target/sparc/helper.c + qemu/target/sparc/int64_helper.c + qemu/target/sparc/ldst_helper.c + qemu/target/sparc/mmu_helper.c + qemu/target/sparc/translate.c + qemu/target/sparc/vis_helper.c + qemu/target/sparc/win_helper.c + qemu/target/sparc/unicorn64.c ) if(MSVC) target_compile_options(sparc64-softmmu PRIVATE -DNEED_CPU_H /FIsparc64.h - /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/sparc64-softmmu - /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/sparc64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) else() target_compile_options(sparc64-softmmu PRIVATE -DNEED_CPU_H -include sparc64.h -I${CMAKE_BINARY_DIR}/sparc64-softmmu - -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/sparc ) endif() endif() -set(UNICORN_SRCS_COMMON +if (UNICORN_HAS_PPC) +add_library(ppc-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/hw/ppc/ppc.c + qemu/hw/ppc/ppc_booke.c + + qemu/libdecnumber/decContext.c + qemu/libdecnumber/decNumber.c + qemu/libdecnumber/dpd/decimal128.c + qemu/libdecnumber/dpd/decimal32.c + qemu/libdecnumber/dpd/decimal64.c + + qemu/target/ppc/cpu.c + qemu/target/ppc/cpu-models.c + qemu/target/ppc/dfp_helper.c + qemu/target/ppc/excp_helper.c + qemu/target/ppc/fpu_helper.c + qemu/target/ppc/int_helper.c + qemu/target/ppc/machine.c + qemu/target/ppc/mem_helper.c + qemu/target/ppc/misc_helper.c + qemu/target/ppc/mmu-hash32.c + qemu/target/ppc/mmu_helper.c + qemu/target/ppc/timebase_helper.c + qemu/target/ppc/translate.c + qemu/target/ppc/unicorn.c +) + +if(MSVC) + target_compile_options(ppc-softmmu PRIVATE + -DNEED_CPU_H + /FIppc.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/ppc-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc + ) +else() + target_compile_options(ppc-softmmu PRIVATE + -DNEED_CPU_H + -include ppc.h + -I${CMAKE_BINARY_DIR}/ppc-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc + ) +endif() + +add_library(ppc64-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/hw/ppc/ppc.c + qemu/hw/ppc/ppc_booke.c + + qemu/libdecnumber/decContext.c + qemu/libdecnumber/decNumber.c + qemu/libdecnumber/dpd/decimal128.c + qemu/libdecnumber/dpd/decimal32.c + qemu/libdecnumber/dpd/decimal64.c + + qemu/target/ppc/compat.c + qemu/target/ppc/cpu.c + qemu/target/ppc/cpu-models.c + qemu/target/ppc/dfp_helper.c + qemu/target/ppc/excp_helper.c + qemu/target/ppc/fpu_helper.c + qemu/target/ppc/int_helper.c + qemu/target/ppc/machine.c + qemu/target/ppc/mem_helper.c + qemu/target/ppc/misc_helper.c + qemu/target/ppc/mmu-book3s-v3.c + qemu/target/ppc/mmu-hash32.c + qemu/target/ppc/mmu-hash64.c + qemu/target/ppc/mmu_helper.c + qemu/target/ppc/mmu-radix64.c + qemu/target/ppc/timebase_helper.c + qemu/target/ppc/translate.c + qemu/target/ppc/unicorn.c +) + +if(MSVC) + target_compile_options(ppc64-softmmu PRIVATE + -DNEED_CPU_H + /FIppc64.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/ppc64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc + ) +else() + target_compile_options(ppc64-softmmu PRIVATE + -DNEED_CPU_H + -include ppc64.h + -I${CMAKE_BINARY_DIR}/ppc64-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/ppc + ) +endif() +endif() + +if (UNICORN_HAS_RISCV) +add_library(riscv32-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/riscv/cpu.c + qemu/target/riscv/cpu_helper.c + qemu/target/riscv/csr.c + qemu/target/riscv/fpu_helper.c + qemu/target/riscv/op_helper.c + qemu/target/riscv/pmp.c + qemu/target/riscv/translate.c + qemu/target/riscv/unicorn.c +) + +if(MSVC) + target_compile_options(riscv32-softmmu PRIVATE + -DNEED_CPU_H + /FIriscv32.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/riscv32-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv + ) +else() + target_compile_options(riscv32-softmmu PRIVATE + -DNEED_CPU_H + -include riscv32.h + -I${CMAKE_BINARY_DIR}/riscv32-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv + ) +endif() + +add_library(riscv64-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/target/riscv/cpu.c + qemu/target/riscv/cpu_helper.c + qemu/target/riscv/csr.c + qemu/target/riscv/fpu_helper.c + qemu/target/riscv/op_helper.c + qemu/target/riscv/pmp.c + qemu/target/riscv/translate.c + qemu/target/riscv/unicorn.c +) + +if(MSVC) + target_compile_options(riscv64-softmmu PRIVATE + -DNEED_CPU_H + /FIriscv64.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/riscv64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv + ) +else() + target_compile_options(riscv64-softmmu PRIVATE + -DNEED_CPU_H + -include riscv64.h + -I${CMAKE_BINARY_DIR}/riscv64-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/riscv + ) +endif() +endif() + + +set(UNICORN_SRCS + uc.c + + qemu/softmmu/vl.c + + qemu/hw/core/cpu.c +) + +set(UNICORN_COMMON_SRCS + list.c - qemu/accel.c - qemu/glib_compat.c - qemu/hw/core/machine.c - qemu/hw/core/qdev.c - qemu/qapi/qapi-dealloc-visitor.c - qemu/qapi/qapi-visit-core.c - qemu/qapi/qmp-input-visitor.c - qemu/qapi/qmp-output-visitor.c - qemu/qapi/string-input-visitor.c - qemu/qemu-log.c - qemu/qemu-timer.c - qemu/qobject/qbool.c - qemu/qobject/qdict.c - qemu/qobject/qerror.c - qemu/qobject/qfloat.c - qemu/qobject/qint.c - qemu/qobject/qlist.c - qemu/qobject/qstring.c - qemu/qom/container.c - qemu/qom/cpu.c - qemu/qom/object.c - qemu/qom/qom-qobject.c - qemu/tcg-runtime.c - qemu/util/aes.c + + glib_compat/glib_compat.c + glib_compat/gtestutils.c + glib_compat/garray.c + glib_compat/gtree.c + glib_compat/grand.c + glib_compat/glist.c + glib_compat/gmem.c + glib_compat/gpattern.c + glib_compat/gslice.c + qemu/util/bitmap.c qemu/util/bitops.c qemu/util/crc32c.c qemu/util/cutils.c - qemu/util/error.c qemu/util/getauxval.c + qemu/util/guest-random.c qemu/util/host-utils.c - qemu/util/module.c + qemu/util/osdep.c + qemu/util/qdist.c + qemu/util/qemu-timer.c qemu/util/qemu-timer-common.c - qemu/vl.c - uc.c + qemu/util/range.c + qemu/util/qht.c + qemu/util/pagesize.c + qemu/util/cacheinfo.c + + qemu/crypto/aes.c ) -if (WIN32) - set(UNICORN_SRCS - ${UNICORN_SRCS_COMMON} +# A workaround to avoid circle dependency between unicorn and *-softmmu +if (MSVC) + set(UNICORN_COMMON_SRCS + ${UNICORN_COMMON_SRCS} qemu/util/oslib-win32.c qemu/util/qemu-thread-win32.c - qemu/util/qemu-error.c - ${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/qapi-types.c - ${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/qapi-visit.c ) - if(CMAKE_SIZEOF_VOID_P EQUAL 8) - if(MSVC_VERSION LESS 1600 AND MSVC_IDE) - add_custom_command(OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build/setjmp-wrapper-win32.dir/setjmp-wrapper-win32.obj" - COMMAND ml64 /c /nologo /Fo"${CMAKE_CURRENT_SOURCE_DIR}/build/setjmp-wrapper-win32.dir/setjmp-wrapper-win32.obj" /W3 /errorReport:prompt /Ta"${CMAKE_CURRENT_SOURCE_DIR}/qemu/util/setjmp-wrapper-win32.asm" - DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/qemu/util/setjmp-wrapper-win32.asm" - ) - set(UNICORN_SRCS ${UNICORN_SRCS} "${CMAKE_CURRENT_SOURCE_DIR}/build/setjmp-wrapper-win32.dir/setjmp-wrapper-win32.obj") - else() - enable_language(ASM_MASM) - endif() - set(UNICORN_SRCS ${UNICORN_SRCS} qemu/util/setjmp-wrapper-win32.asm) + if (CMAKE_SIZEOF_VOID_P EQUAL 8) + enable_language(ASM_MASM) + set(UNICORN_COMMON_SRCS ${UNICORN_COMMON_SRCS} qemu/util/setjmp-wrapper-win32.asm) endif() else() - set(UNICORN_SRCS - ${UNICORN_SRCS_COMMON} + set(UNICORN_COMMON_SRCS + ${UNICORN_COMMON_SRCS} qemu/util/oslib-posix.c qemu/util/qemu-thread-posix.c - qemu/qapi-types.c - qemu/qapi-visit.c ) endif() -if (UNICORN_HAS_X86) - list(APPEND UNICORN_COMPILE_OPTIONS -DUNICORN_HAS_X86) - list(APPEND UNICORN_OBJECT_LIBRARIES x86_64-softmmu) - list(APPEND UNICORN_SAMPLE_FILE sample_x86 sample_x86_32_gdt_and_seg_regs sample_batch_reg mem_apis shellcode) -endif() -if (UNICORN_HAS_ARM) - list(APPEND UNICORN_COMPILE_OPTIONS -DUNICORN_HAS_ARM -DUNICORN_HAS_ARMEB) - list(APPEND UNICORN_OBJECT_LIBRARIES arm-softmmu armeb-softmmu) - list(APPEND UNICORN_SAMPLE_FILE sample_arm sample_armeb) -endif() -if (UNICORN_HAS_AARCH64) - list(APPEND UNICORN_COMPILE_OPTIONS -DUNICORN_HAS_ARM64) - list(APPEND UNICORN_OBJECT_LIBRARIES aarch64-softmmu aarch64eb-softmmu) - list(APPEND UNICORN_SAMPLE_FILE sample_arm64 sample_arm64eb) -endif() -if (UNICORN_HAS_M68K) - list(APPEND UNICORN_COMPILE_OPTIONS -DUNICORN_HAS_M68K) - list(APPEND UNICORN_OBJECT_LIBRARIES m68k-softmmu) - list(APPEND UNICORN_SAMPLE_FILE sample_m68k) -endif() -if (UNICORN_HAS_MIPS) - list(APPEND UNICORN_COMPILE_OPTIONS -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL) - list(APPEND UNICORN_OBJECT_LIBRARIES mips-softmmu mipsel-softmmu mips64-softmmu mips64el-softmmu) - list(APPEND UNICORN_SAMPLE_FILE sample_mips) -endif() -if (UNICORN_HAS_SPARC) - list(APPEND UNICORN_COMPILE_OPTIONS -DUNICORN_HAS_SPARC) - list(APPEND UNICORN_OBJECT_LIBRARIES sparc-softmmu sparc64-softmmu) - list(APPEND UNICORN_SAMPLE_FILE sample_sparc) -endif() - -foreach(OBJECT_LIBRARY ${UNICORN_OBJECT_LIBRARIES}) - list(APPEND UNICORN_SRCS $) -endforeach() - -add_library(unicorn - ${UNICORN_SRCS} +add_library(unicorn-common + ${UNICORN_COMMON_SRCS} ) -if(WIN32) - if(BUILD_SHARED_LIBS) - list(APPEND UNICORN_COMPILE_OPTIONS -DUNICORN_SHARED) - endif() +if (NOT MSVC) + target_link_libraries(unicorn-common pthread) +endif() - set_target_properties(unicorn PROPERTIES - VERSION "${UNICORN_VERSION_MAJOR}.${UNICORN_VERSION_MINOR}" +if (UNICORN_BUILD_SHARED) + add_library(unicorn SHARED + ${UNICORN_SRCS} ) else() - target_link_libraries(unicorn PRIVATE m) - - set_target_properties(unicorn PROPERTIES - VERSION ${UNICORN_VERSION_MAJOR} - SOVERSION ${UNICORN_VERSION_MAJOR} + add_library(unicorn STATIC + ${UNICORN_SRCS} ) endif() +enable_testing() +set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} unicorn-common) +if (UNICORN_HAS_X86) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_X86) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} x86_64-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_x86 sample_x86_32_gdt_and_seg_regs sample_batch_reg mem_apis shellcode) + target_link_libraries(x86_64-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_x86) +endif() +if (UNICORN_HAS_ARM) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_ARM) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} arm-softmmu armeb-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_arm) + target_link_libraries(arm-softmmu unicorn-common) + target_link_libraries(armeb-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_arm) +endif() +if (UNICORN_HAS_AARCH64) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_ARM64) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} aarch64-softmmu aarch64eb-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_arm64) + target_link_libraries(aarch64-softmmu unicorn-common) + target_link_libraries(aarch64eb-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_arm64) +endif() +if (UNICORN_HAS_M68K) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_M68K) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} m68k-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_m68k) + target_link_libraries(m68k-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_m68k) +endif() +if (UNICORN_HAS_MIPS) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} mips-softmmu mipsel-softmmu mips64-softmmu mips64el-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_mips) + target_link_libraries(mips-softmmu unicorn-common) + target_link_libraries(mipsel-softmmu unicorn-common) + target_link_libraries(mips64-softmmu unicorn-common) + target_link_libraries(mips64el-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_mips) +endif() +if (UNICORN_HAS_SPARC) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_SPARC) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} sparc-softmmu sparc64-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_sparc) + target_link_libraries(sparc-softmmu unicorn-common) + target_link_libraries(sparc64-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_sparc) +endif() +if (UNICORN_HAS_PPC) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_PPC) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} ppc-softmmu ppc64-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_ppc) + target_link_libraries(ppc-softmmu unicorn-common) + target_link_libraries(ppc64-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_ppc) +endif() +if (UNICORN_HAS_RISCV) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_RISCV) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} riscv32-softmmu riscv64-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_riscv) + target_link_libraries(riscv32-softmmu unicorn-common) + target_link_libraries(riscv64-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_riscv) +endif() + target_compile_options(unicorn PRIVATE ${UNICORN_COMPILE_OPTIONS} ) -target_include_directories(unicorn PUBLIC - include -) - -if(UNICORN_BUILD_SAMPLES) - find_package(Threads REQUIRED) - - foreach(SAMPLE_FILE ${UNICORN_SAMPLE_FILE}) - add_executable(${SAMPLE_FILE} - ${CMAKE_CURRENT_SOURCE_DIR}/samples/${SAMPLE_FILE}.c - ) - target_link_libraries(${SAMPLE_FILE} PRIVATE - unicorn - ${CMAKE_THREAD_LIBS_INIT} - ) - endforeach() +if (MINGW) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} pthread) endif() -if(UNICORN_INSTALL) +if(UNICORN_TARGET_ARCH STREQUAL "riscv") + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} atomic) +endif() + +if(MSVC) + if (UNICORN_BUILD_SHARED) + target_compile_options(unicorn PRIVATE + -DUNICORN_SHARED + ) + endif() + + target_link_libraries(unicorn + ${UNICORN_LINK_LIBRARIES} + ) +else() + target_link_libraries(unicorn + ${UNICORN_LINK_LIBRARIES} + m + ) + set_target_properties(unicorn PROPERTIES + VERSION ${UNICORN_VERSION_MAJOR} + SOVERSION ${UNICORN_VERSION_MAJOR} + ) +endif() + +if(MSVC) + set(SAMPLES_LIB + unicorn + ) +else() + set(SAMPLES_LIB + unicorn + pthread + ) +endif() + +foreach(SAMPLE_FILE ${UNICORN_SAMPLE_FILE}) + add_executable(${SAMPLE_FILE} + ${CMAKE_CURRENT_SOURCE_DIR}/samples/${SAMPLE_FILE}.c + ) + target_link_libraries(${SAMPLE_FILE} + ${SAMPLES_LIB} + ) +endforeach(SAMPLE_FILE) + +foreach(TEST_FILE ${UNICORN_TEST_FILE}) + add_executable(${TEST_FILE} + ${CMAKE_CURRENT_SOURCE_DIR}/tests/unit/${TEST_FILE}.c + ) + target_link_libraries(${TEST_FILE} + ${SAMPLES_LIB} + ) + add_test(${TEST_FILE} ${TEST_FILE}) +endforeach(TEST_FILE) + + +if(NOT MSVC) include("GNUInstallDirs") file(GLOB UNICORN_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/unicorn/*.h) install(TARGETS unicorn diff --git a/CREDITS.TXT b/CREDITS.TXT index 3e6fd5ac..8f5970d6 100644 --- a/CREDITS.TXT +++ b/CREDITS.TXT @@ -5,7 +5,10 @@ Key developers ============== Nguyen Anh Quynh Dang Hoang Vu +Huitao Chen (chenhuitao) Ziqiao Kong (lazymio) +KaiJernLau (xwings) + Beta testers (in no particular order) ============================== @@ -72,5 +75,6 @@ Philippe Antoine (Catena cyber): fuzzing Huitao Chen (chenhuitao) & KaiJern Lau (xwings): Cmake support Huitao Chen (chenhuitao) & KaiJern Lau (xwings): Python3 support for building Kevin Foo (chfl4gs): Travis-CI migration +Simon Gorchakov: PowerPC target +Stuart Dootson (studoot): MSVC compatibility with PowerPC target support Ziqiao Kong (lazymio): uc_context_free() API and various bug fix & improvement. -Sven Almgren (blindmatrix): bug fix diff --git a/ChangeLog b/ChangeLog index 1e5019de..8ff87b51 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,76 +1,5 @@ This file details the changelog of Unicorn Engine. -------------------------------- -[Version 1.0.3]: May 26th, 2021 - -- Fix some building issues - - Fix build with LLVM on Powerpc64(le) - - cmake: enable UNICORN_HAS_ARMEB when ARM is on - - Better support MingW building - - Better support FreeBSD host - - Better support VS2008/VS2010 - -- Fix some issues in the core: - - Fix wrong sync after UC_ERR_[READ, WRITE, FETCH]_[UNMAPPED, PROT] - - Support querying architecture mode besides arm - - Fix pausing within Thumb2 ITE blocks - -- Arm: - - Support Arm BE8 mode - -- X86: - - Fix FPIP & FTST instructions - -- Bindings: - - Java: remove deprecated javah and build with javac - - Python: handle exceptions raised in hook functions - - Rust binding - ------------------------------------ -[Version 1.0.2]: October 21st, 2020 - -- Fix Java binding compilation -- Enable building for ARM little-endian only (ignore big-endian) - ------------------------------------- -[Version 1.0.2-rc6]: Sept 24th, 2020 - -- Add uc_context_free() API -- Fix context saving/retoring API (core & Python binding) - ------------------------------------- -[Version 1.0.2-rc5]: Sept 22nd, 2020 - -- Add cmake option to build Unicorn as a static library -- Fix error handling of mmap() -- uc_emu_start() can be reentrant -- Fix naming conflicts when built with systemd -- Fix setjmp/longjmp on native Windows -- Fix enabled hooks even after deleting them -- X86: - - Fix 64bit fstenv - - Fix IP value of 16bit mode -- ARM: - - Fix APSR handling -- Python: Remove UC_ERR_TIMEOUT - ------------------------------------ -[Version 1.0.2-rc4]: May 29th, 2020 - -- No longer require Python to build -- Fix recursive UC_HOOK_MEM callbacks for cross pages access -- Remove UC_ERR_TIMEOUT, so timeout on uc_emu_start() is not considered error -- Added UC_QUERY_TIMEOUT to query exit reason -- Fix UAF when deleting hook while in hook callback -- Ensure that hooks are unaffected by a request to stop emulation. -- Fix block hooks being called twice after an early exit from execution. -- Fix binding install on python2 (MacOS) -- X86: - - Support read/write STn registers - - Support read/write X64 base regs -- ARM64: - - Support some new registers - ---------------------------------- [Version 1.0.1]: April 20th, 2017 diff --git a/Makefile b/Makefile deleted file mode 100644 index 5128a022..00000000 --- a/Makefile +++ /dev/null @@ -1,424 +0,0 @@ -# Unicorn Emulator Engine -# By Dang Hoang Vu , 2015 - - -.PHONY: all clean install uninstall dist header - -include config.mk -include pkgconfig.mk # package version - -LIBNAME = unicorn -UNAME_S := $(shell uname -s) -# SMP_MFLAGS is used for controlling the amount of parallelism used -# in external 'make' invocations. If the user doesn't override it, it -# does "-j4". That is, it uses 4 job threads. If you want to use more or less, -# pass in a different -jX, with X being the number of threads. -# For example, to completely disable parallel building, pass "-j1". -# If you want to use 16 job threads, use "-j16". -SMP_MFLAGS := -j4 - -UC_GET_OBJ = $(shell for i in \ - $$(grep '$(1)' $(2) | \ - grep '\.o' | cut -d '=' -f 2); do \ - echo $$i | grep '\.o' > /dev/null 2>&1; \ - if [ $$? = 0 ]; then \ - echo '$(3)'$$i; \ - fi; done; echo) - -UC_TARGET_OBJ = $(filter-out qemu/../%,$(call UC_GET_OBJ,obj-,qemu/Makefile.objs, qemu/)) -UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/hw/core/Makefile.objs, qemu/hw/core/) -UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/qapi/Makefile.objs, qemu/qapi/) -UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/qobject/Makefile.objs, qemu/qobject/) -UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/qom/Makefile.objs, qemu/qom/) -UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-y,qemu/util/Makefile.objs, qemu/util/) -ifneq ($(filter MINGW%,$(UNAME_S)),) -UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-$$(CONFIG_WIN32),qemu/util/Makefile.objs, qemu/util/) -else -UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-$$(CONFIG_POSIX),qemu/util/Makefile.objs, qemu/util/) -endif - -UC_TARGET_OBJ_X86 = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/x86_64-softmmu/) -UC_TARGET_OBJ_X86 += $(call UC_GET_OBJ,obj-,qemu/hw/i386/Makefile.objs, qemu/x86_64-softmmu/hw/i386/) -UC_TARGET_OBJ_X86 += $(call UC_GET_OBJ,obj-,qemu/hw/intc/Makefile.objs, qemu/x86_64-softmmu/hw/intc/) -UC_TARGET_OBJ_X86 += $(call UC_GET_OBJ,obj-,qemu/target-i386/Makefile.objs, qemu/x86_64-softmmu/target-i386/) - -UC_TARGET_OBJ_ARM = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/arm-softmmu/) -UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-,qemu/hw/arm/Makefile.objs, qemu/arm-softmmu/hw/arm/) -UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-y,qemu/target-arm/Makefile.objs, qemu/arm-softmmu/target-arm/) -UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-$$(CONFIG_SOFTMMU),qemu/target-arm/Makefile.objs, qemu/arm-softmmu/target-arm/) -UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-$$(TARGET_ARM),qemu/target-arm/Makefile.objs, qemu/arm-softmmu/target-arm/) - -UC_TARGET_OBJ_ARMEB = $(subst /arm-softmmu/,/armeb-softmmu/,$(UC_TARGET_OBJ_ARM)) - -UC_TARGET_OBJ_M68K = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/m68k-softmmu/) -UC_TARGET_OBJ_M68K += $(call UC_GET_OBJ,obj-,qemu/hw/m68k/Makefile.objs, qemu/m68k-softmmu/hw/m68k/) -UC_TARGET_OBJ_M68K += $(call UC_GET_OBJ,obj-,qemu/target-m68k/Makefile.objs, qemu/m68k-softmmu/target-m68k/) - -UC_TARGET_OBJ_AARCH64 = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/aarch64-softmmu/) -UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-,qemu/hw/arm/Makefile.objs, qemu/aarch64-softmmu/hw/arm/) -UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-y,qemu/target-arm/Makefile.objs, qemu/aarch64-softmmu/target-arm/) -UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-$$(CONFIG_SOFTMMU),qemu/target-arm/Makefile.objs, qemu/aarch64-softmmu/target-arm/) -UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-$$(TARGET_AARCH64),qemu/target-arm/Makefile.objs, qemu/aarch64-softmmu/target-arm/) - -UC_TARGET_OBJ_AARCH64EB = $(subst /aarch64-softmmu/,/aarch64eb-softmmu/,$(UC_TARGET_OBJ_AARCH64)) - -UC_TARGET_OBJ_MIPS = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/mips-softmmu/) -UC_TARGET_OBJ_MIPS += $(call UC_GET_OBJ,obj-,qemu/hw/mips/Makefile.objs, qemu/mips-softmmu/hw/mips/) -UC_TARGET_OBJ_MIPS += $(call UC_GET_OBJ,obj-,qemu/target-mips/Makefile.objs, qemu/mips-softmmu/target-mips/) - -UC_TARGET_OBJ_MIPSEL = $(subst /mips-softmmu/,/mipsel-softmmu/,$(UC_TARGET_OBJ_MIPS)) - -UC_TARGET_OBJ_MIPS64 = $(subst /mips-softmmu/,/mips64-softmmu/,$(UC_TARGET_OBJ_MIPS)) - -UC_TARGET_OBJ_MIPS64EL = $(subst /mips-softmmu/,/mips64el-softmmu/,$(UC_TARGET_OBJ_MIPS)) - -UC_TARGET_OBJ_SPARC = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/sparc-softmmu/) -UC_TARGET_OBJ_SPARC += $(call UC_GET_OBJ,obj-,qemu/hw/sparc/Makefile.objs, qemu/sparc-softmmu/hw/sparc/) -UC_TARGET_OBJ_SPARC += $(call UC_GET_OBJ,obj-y,qemu/target-sparc/Makefile.objs, qemu/sparc-softmmu/target-sparc/) -UC_TARGET_OBJ_SPARC += $(call UC_GET_OBJ,obj-$$(TARGET_SPARC),qemu/target-sparc/Makefile.objs, qemu/sparc-softmmu/target-sparc/) - -UC_TARGET_OBJ_SPARC64 = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/sparc64-softmmu/) -UC_TARGET_OBJ_SPARC64 += $(call UC_GET_OBJ,obj-,qemu/hw/sparc64/Makefile.objs, qemu/sparc64-softmmu/hw/sparc64/) -UC_TARGET_OBJ_SPARC64 += $(call UC_GET_OBJ,obj-y,qemu/target-sparc/Makefile.objs, qemu/sparc64-softmmu/target-sparc/) -UC_TARGET_OBJ_SPARC64 += $(call UC_GET_OBJ,obj-$$(TARGET_SPARC64),qemu/target-sparc/Makefile.objs, qemu/sparc64-softmmu/target-sparc/) - -ifneq (,$(findstring x86,$(UNICORN_ARCHS))) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_X86) - UNICORN_CFLAGS += -DUNICORN_HAS_X86 - UNICORN_TARGETS += x86_64-softmmu, -endif -ifneq (,$(findstring arm,$(UNICORN_ARCHS))) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_ARM) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_ARMEB) - UNICORN_CFLAGS += -DUNICORN_HAS_ARM - UNICORN_CFLAGS += -DUNICORN_HAS_ARMEB - UNICORN_TARGETS += arm-softmmu, - UNICORN_TARGETS += armeb-softmmu, -endif -ifneq (,$(findstring m68k,$(UNICORN_ARCHS))) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_M68K) - UNICORN_CFLAGS += -DUNICORN_HAS_M68K - UNICORN_TARGETS += m68k-softmmu, -endif -ifneq (,$(findstring aarch64,$(UNICORN_ARCHS))) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_AARCH64) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_AARCH64EB) - UNICORN_CFLAGS += -DUNICORN_HAS_ARM64 - UNICORN_CFLAGS += -DUNICORN_HAS_ARM64EB - UNICORN_TARGETS += aarch64-softmmu, - UNICORN_TARGETS += aarch64eb-softmmu, -endif -ifneq (,$(findstring mips,$(UNICORN_ARCHS))) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPS) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPSEL) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPS64) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPS64EL) - UNICORN_CFLAGS += -DUNICORN_HAS_MIPS - UNICORN_CFLAGS += -DUNICORN_HAS_MIPSEL - UNICORN_CFLAGS += -DUNICORN_HAS_MIPS64 - UNICORN_CFLAGS += -DUNICORN_HAS_MIPS64EL - UNICORN_TARGETS += mips-softmmu, - UNICORN_TARGETS += mipsel-softmmu, - UNICORN_TARGETS += mips64-softmmu, - UNICORN_TARGETS += mips64el-softmmu, -endif -ifneq (,$(findstring sparc,$(UNICORN_ARCHS))) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_SPARC) - UC_TARGET_OBJ += $(UC_TARGET_OBJ_SPARC64) - UNICORN_CFLAGS += -DUNICORN_HAS_SPARC - UNICORN_TARGETS += sparc-softmmu,sparc64-softmmu, -endif - -UC_OBJ_ALL = $(UC_TARGET_OBJ) list.o uc.o - -UNICORN_CFLAGS += -fPIC - -# Verbose output? -V ?= 0 - -# on MacOS, by default do not compile in Universal format -MACOS_UNIVERSAL ?= no - -ifeq ($(UNICORN_DEBUG),yes) -CFLAGS += -g -else -CFLAGS += -O3 -UNICORN_QEMU_FLAGS += --disable-debug-info -endif - -ifeq ($(UNICORN_ASAN),yes) -CC = clang -fsanitize=address -fno-omit-frame-pointer -CXX = clang++ -fsanitize=address -fno-omit-frame-pointer -AR = llvm-ar -LDFLAGS := -fsanitize=address ${LDFLAGS} -endif - -ifeq ($(CROSS),) -CC ?= cc -AR ?= ar -RANLIB ?= ranlib -STRIP ?= strip -else -CC = $(CROSS)-gcc -AR = $(CROSS)-ar -RANLIB = $(CROSS)-ranlib -STRIP = $(CROSS)-strip -endif - -ifeq ($(PKG_EXTRA),) -PKG_VERSION = $(PKG_MAJOR).$(PKG_MINOR) -else -PKG_VERSION = $(PKG_MAJOR).$(PKG_MINOR).$(PKG_EXTRA) -endif - -API_MAJOR=$(shell echo `grep -e UC_API_MAJOR include/unicorn/unicorn.h | grep -v = | awk '{print $$3}'` | awk '{print $$1}') - -# Apple? -ifeq ($(UNAME_S),Darwin) -EXT = dylib -VERSION_EXT = $(API_MAJOR).$(EXT) -$(LIBNAME)_LDFLAGS += -dynamiclib -install_name @rpath/lib$(LIBNAME).$(VERSION_EXT) -current_version $(PKG_MAJOR).$(PKG_MINOR).$(PKG_EXTRA) -compatibility_version $(PKG_MAJOR).$(PKG_MINOR) -AR_EXT = a -UNICORN_CFLAGS += -fvisibility=hidden - -ifeq ($(MACOS_UNIVERSAL),yes) -$(LIBNAME)_LDFLAGS += -m32 -arch i386 -m64 -arch x86_64 -UNICORN_CFLAGS += -m32 -arch i386 -m64 -arch x86_64 -endif - -# Cygwin? -else ifneq ($(filter CYGWIN%,$(UNAME_S)),) -EXT = dll -AR_EXT = a -BIN_EXT = .exe -UNICORN_CFLAGS := $(UNICORN_CFLAGS:-fPIC=) -#UNICORN_QEMU_FLAGS += --disable-stack-protector - -# mingw? -else ifneq ($(filter MINGW%,$(UNAME_S)),) -EXT = dll -AR_EXT = a -BIN_EXT = .exe -UNICORN_QEMU_FLAGS += --disable-stack-protector -UNICORN_CFLAGS := $(UNICORN_CFLAGS:-fPIC=) -$(LIBNAME)_LDFLAGS += -Wl,--output-def,unicorn.def -DO_WINDOWS_EXPORT = 1 - -# Haiku -else ifneq ($(filter Haiku%,$(UNAME_S)),) -EXT = so -VERSION_EXT = $(EXT).$(API_MAJOR) -AR_EXT = a -$(LIBNAME)_LDFLAGS += -Wl,-Bsymbolic-functions,-soname,lib$(LIBNAME).$(VERSION_EXT) -UNICORN_CFLAGS := $(UNICORN_CFLAGS:-fPIC=) -UNICORN_QEMU_FLAGS += --disable-stack-protector - -# Linux, Darwin -else -EXT = so -VERSION_EXT = $(EXT).$(API_MAJOR) -AR_EXT = a -$(LIBNAME)_LDFLAGS += -Wl,-Bsymbolic-functions,-soname,lib$(LIBNAME).$(VERSION_EXT) -UNICORN_CFLAGS += -fvisibility=hidden -endif - -ifeq ($(UNICORN_SHARED),yes) -ifneq ($(filter MINGW%,$(UNAME_S)),) -LIBRARY = $(LIBNAME).$(EXT) -else ifneq ($(filter CYGWIN%,$(UNAME_S)),) -LIBRARY = cyg$(LIBNAME).$(EXT) -LIBRARY_DLLA = lib$(LIBNAME).$(EXT).$(AR_EXT) -$(LIBNAME)_LDFLAGS += -Wl,--out-implib=$(LIBRARY_DLLA) -$(LIBNAME)_LDFLAGS += -lssp -# Linux, Darwin -else -LIBRARY = lib$(LIBNAME).$(VERSION_EXT) -LIBRARY_SYMLINK = lib$(LIBNAME).$(EXT) -endif -endif - -ifeq ($(UNICORN_STATIC),yes) -ifneq ($(filter MINGW%,$(UNAME_S)),) -ARCHIVE = $(LIBNAME).$(AR_EXT) -# Cygwin, Linux, Darwin -else -ARCHIVE = lib$(LIBNAME).$(AR_EXT) -endif -endif - -INSTALL_BIN ?= install -INSTALL_DATA ?= $(INSTALL_BIN) -m0644 -INSTALL_LIB ?= $(INSTALL_BIN) -m0755 -PKGCFGF = $(LIBNAME).pc -PREFIX ?= /usr -DESTDIR ?= - -LIBDIRARCH ?= lib -# Uncomment the below line to installs x86_64 libs to lib64/ directory. -# Or better, pass 'LIBDIRARCH=lib64' to 'make install/uninstall' via 'make.sh'. -#LIBDIRARCH ?= lib64 - -LIBDIR ?= $(PREFIX)/$(LIBDIRARCH) -INCDIR ?= $(PREFIX)/include -BINDIR ?= $(PREFIX)/bin - -LIBDATADIR ?= $(LIBDIR) - -# Don't redefine $LIBDATADIR when global environment variable -# USE_GENERIC_LIBDATADIR is set. This is used by the pkgsrc framework. - -ifndef USE_GENERIC_LIBDATADIR -ifeq ($(UNAME_S), FreeBSD) -LIBDATADIR = $(PREFIX)/libdata -else ifeq ($(UNAME_S), DragonFly) -LIBDATADIR = $(PREFIX)/libdata -endif -endif - -ifeq ($(PKG_EXTRA),) -PKGCFGDIR = $(LIBDATADIR)/pkgconfig -else -PKGCFGDIR ?= $(LIBDATADIR)/pkgconfig -endif - -$(LIBNAME)_LDFLAGS += -lm - -.PHONY: test fuzz bindings clean FORCE - -all: unicorn - $(MAKE) -C samples - -qemu/config-host.mak: qemu/configure - cd qemu && \ - ./configure --cc="${CC}" --extra-cflags="$(UNICORN_CFLAGS)" --target-list="$(UNICORN_TARGETS)" ${UNICORN_QEMU_FLAGS} - @printf "$(UNICORN_ARCHS)" > config.log - -uc.o: qemu/config-host.mak FORCE - $(MAKE) -C qemu $(SMP_MFLAGS) - -$(UC_TARGET_OBJ) list.o: uc.o - @echo "--- $^ $@" > /dev/null - -unicorn: $(LIBRARY) $(ARCHIVE) - -$(LIBRARY): $(UC_OBJ_ALL) -ifeq ($(UNICORN_SHARED),yes) -ifeq ($(V),0) - $(call log,GEN,$(LIBRARY)) - @$(CC) $(CFLAGS) -shared $(UC_OBJ_ALL) -o $(LIBRARY) $($(LIBNAME)_LDFLAGS) - @-ln -sf $(LIBRARY) $(LIBRARY_SYMLINK) -else - $(CC) $(CFLAGS) -shared $(UC_OBJ_ALL) -o $(LIBRARY) $($(LIBNAME)_LDFLAGS) - -ln -sf $(LIBRARY) $(LIBRARY_SYMLINK) -endif -ifeq ($(DO_WINDOWS_EXPORT),1) -ifneq ($(filter MINGW32%,$(UNAME_S)),) - cmd //C "windows_export.bat x86" -else - cmd //C "windows_export.bat x64" -endif -endif -endif - -$(ARCHIVE): $(UC_OBJ_ALL) -ifeq ($(UNICORN_STATIC),yes) -ifeq ($(V),0) - $(call log,GEN,$(ARCHIVE)) - @$(AR) q $(ARCHIVE) $(UC_OBJ_ALL) - @$(RANLIB) $(ARCHIVE) -else - $(AR) q $(ARCHIVE) $(UC_OBJ_ALL) - $(RANLIB) $(ARCHIVE) -endif -endif - -$(PKGCFGF): - $(generate-pkgcfg) - - -fuzz: all - $(MAKE) -C tests/fuzz all - -test: all - $(MAKE) -C tests/unit test - $(MAKE) -C tests/regress test - $(MAKE) -C bindings test - -install: $(LIBRARY) $(ARCHIVE) $(PKGCFGF) - install -d $(DESTDIR)$(LIBDIR) -ifeq ($(UNICORN_SHARED),yes) -ifneq ($(filter CYGWIN%,$(UNAME_S)),) - $(INSTALL_LIB) $(LIBRARY) $(DESTDIR)$(BINDIR) - $(INSTALL_DATA) $(LIBRARY_DLLA) $(DESTDIR)$(LIBDIR) -else - $(INSTALL_LIB) $(LIBRARY) $(DESTDIR)$(LIBDIR) -endif -ifneq ($(VERSION_EXT),) - cd $(DESTDIR)$(LIBDIR) && \ - ln -sf lib$(LIBNAME).$(VERSION_EXT) lib$(LIBNAME).$(EXT) -endif -endif -ifeq ($(UNICORN_STATIC),yes) - $(INSTALL_DATA) $(ARCHIVE) $(DESTDIR)$(LIBDIR) -endif - install -d $(DESTDIR)$(INCDIR)/$(LIBNAME) - $(INSTALL_DATA) include/unicorn/*.h $(DESTDIR)$(INCDIR)/$(LIBNAME) - install -d $(DESTDIR)$(PKGCFGDIR) - $(INSTALL_DATA) $(PKGCFGF) $(DESTDIR)$(PKGCFGDIR)/ - - -TAG ?= HEAD -ifeq ($(TAG), HEAD) -DIST_VERSION = latest -else -DIST_VERSION = $(TAG) -endif - -bindings: all - $(MAKE) -C bindings build - $(MAKE) -C bindings samples - -dist: - git archive --format=tar.gz --prefix=unicorn-$(DIST_VERSION)/ $(TAG) > unicorn-$(DIST_VERSION).tgz - git archive --format=zip --prefix=unicorn-$(DIST_VERSION)/ $(TAG) > unicorn-$(DIST_VERSION).zip - - -# run "make header" whenever qemu/header_gen.py is modified -header: - $(eval TARGETS := m68k arm armeb aarch64 aarch64eb mips mipsel mips64 mips64el\ - sparc sparc64 x86_64) - $(foreach var,$(TARGETS),\ - $(shell python qemu/header_gen.py $(var) > qemu/$(var).h;)) - @echo "Generated headers for $(TARGETS)." - - -uninstall: - rm -rf $(INCDIR)/$(LIBNAME) - rm -f $(LIBDIR)/lib$(LIBNAME).* - rm -f $(BINDIR)/cyg$(LIBNAME).* - rm -f $(PKGCFGDIR)/$(LIBNAME).pc - - -clean: - $(MAKE) -C qemu distclean - rm -rf *.d *.o - rm -rf lib$(LIBNAME)* $(LIBNAME)*.lib $(LIBNAME)*.dll $(LIBNAME)*.a $(LIBNAME)*.def $(LIBNAME)*.exp cyg$(LIBNAME)*.dll - $(MAKE) -C samples clean - $(MAKE) -C tests/unit clean - - -define generate-pkgcfg - echo 'Name: unicorn' > $(PKGCFGF) - echo 'Description: Unicorn emulator engine' >> $(PKGCFGF) - echo 'Version: $(PKG_VERSION)' >> $(PKGCFGF) - echo 'libdir=$(LIBDIR)' >> $(PKGCFGF) - echo 'includedir=$(INCDIR)' >> $(PKGCFGF) - echo 'archive=$${libdir}/libunicorn.a' >> $(PKGCFGF) - echo 'Libs: -L$${libdir} -lunicorn' >> $(PKGCFGF) - echo 'Cflags: -I$${includedir}' >> $(PKGCFGF) -endef - - -define log - @printf " %-7s %s\n" "$(1)" "$(2)" -endef diff --git a/README.md b/README.md index 1a19f302..502fe2db 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,6 @@ Unicorn Engine ============== -[![Join the chat at https://gitter.im/unicorn-engine/chat](https://badges.gitter.im/unicorn-engine/unicorn.svg)](https://gitter.im/unicorn-engine/chat?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/unicorn-engine/unicorn.svg?branch=master)](https://travis-ci.org/unicorn-engine/unicorn) [![pypi downloads](https://pepy.tech/badge/unicorn)](https://pepy.tech/project/unicorn) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/unicorn.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:unicorn) @@ -11,7 +9,7 @@ based on [QEMU](http://qemu.org). Unicorn offers some unparalleled features: -- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, SPARC, and X86 (16, 32, 64-bit) +- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, PowerPC, RISCV, SPARC, and X86 (16, 32, 64-bit) - Clean/simple/lightweight/intuitive architecture-neutral API - Implemented in pure C language, with bindings for Crystal, Clojure, Visual Basic, Perl, Rust, Ruby, Python, Java, .NET, Go, Delphi/Free Pascal, Haskell, Pharo, and Lua. - Native support for Windows & *nix (with Mac OSX, Linux, *BSD & Solaris confirmed) diff --git a/SPONSORS.TXT b/SPONSORS.TXT deleted file mode 100644 index ecdc4573..00000000 --- a/SPONSORS.TXT +++ /dev/null @@ -1,7 +0,0 @@ -* Version 1.0.2 - October 21st, 2020 - -Release 1.0.2 was sponsored by the following companies (in no particular order). - -- Catena Cyber: https://catenacyber.fr -- Grayshift: https://grayshift.com -- Google: https://google.com diff --git a/TODO b/TODO new file mode 100644 index 00000000..7191e3a9 --- /dev/null +++ b/TODO @@ -0,0 +1,11 @@ +Optimization: +- avoid checking BB exit after every instruction. this need new OPT option? + +---- +- cleanup code + +---- +when we hook both BB & instruction, we insert 2 consecuritve calls to uc_tracecode. +optimize it by removing one + +support Makefile, not just cmake? diff --git a/bindings/README b/bindings/README index 6a15b81e..24dd2c18 100644 --- a/bindings/README +++ b/bindings/README @@ -1,4 +1,4 @@ -This directory contains bindings & test code for Python, Java, Go, .NET and Rust. +This directory contains bindings & test code for Python, Java, Go and .NET. See /README or /README.TXT or /README.md for how to install each binding. The following bindings are contributed by community. @@ -10,14 +10,13 @@ The following bindings are contributed by community. - Haskell binding: by Adrian Herrera. - VB6 binding: David Zimmer. - FreePascal/Delphi binding: Mohamed Osama. -- Rust binding: Lukas Seidel. More bindings created & maintained externally by community are available as follows. - UnicornPascal: Delphi/Free Pascal binding (by Stievie). https://github.com/stievie/UnicornPascal -- Unicorn-Rs: Rust binding (by Sébastien Duquette, unmaintained) +- Unicorn-Rs: Rust binding (by Sébastien Duquette) https://github.com/ekse/unicorn-rs - UnicornEngine: Perl binding (by Vikas Naresh Kumar) @@ -34,6 +33,3 @@ More bindings created & maintained externally by community are available as foll - pharo-unicorn: Pharo binding (by Guille Polito) https://github.com/guillep/pharo-unicorn - -- Unicorn.js: JavaScript binding (by Alexandro Sanchez) - https://github.com/AlexAltea/unicorn.js diff --git a/bindings/const_generator.py b/bindings/const_generator.py index daa18f0d..30b870d8 100644 --- a/bindings/const_generator.py +++ b/bindings/const_generator.py @@ -6,7 +6,7 @@ import sys, re, os INCL_DIR = os.path.join('..', 'include', 'unicorn') -include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'unicorn.h' ] +include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'ppc.h', 'riscv.h', 'unicorn.h' ] template = { 'python': { @@ -21,6 +21,8 @@ template = { 'x86.h': 'x86', 'sparc.h': 'sparc', 'm68k.h': 'm68k', + 'ppc.h': 'ppc', + 'riscv.h': 'riscv', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', @@ -37,6 +39,8 @@ template = { 'x86.h': 'x86', 'sparc.h': 'sparc', 'm68k.h': 'm68k', + 'ppc.h': 'ppc', + 'riscv.h': 'riscv', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', @@ -53,6 +57,8 @@ template = { 'x86.h': 'x86', 'sparc.h': 'sparc', 'm68k.h': 'm68k', + 'ppc.h': 'ppc', + 'riscv.h': 'riscv', 'unicorn.h': 'unicorn', 'comment_open': '//', 'comment_close': '', @@ -69,6 +75,8 @@ template = { 'x86.h': 'X86', 'sparc.h': 'Sparc', 'm68k.h': 'M68k', + 'ppc.h': 'Ppc', + 'riscv.h': 'Riscv', 'unicorn.h': 'Unicorn', 'comment_open': '//', 'comment_close': '', @@ -85,6 +93,8 @@ template = { 'x86.h': 'X86', 'sparc.h': 'Sparc', 'm68k.h': 'M68k', + 'ppc.h': 'Ppc', + 'riscv.h': 'Riscv', 'unicorn.h': 'Common', 'comment_open': ' //', 'comment_close': '', @@ -101,6 +111,8 @@ template = { 'x86.h': 'X86', 'sparc.h': 'Sparc', 'm68k.h': 'M68k', + 'ppc.h': 'Ppc', + 'riscv.h': 'Riscv', 'unicorn.h': 'Unicorn', 'comment_open': '//', 'comment_close': '', diff --git a/bindings/dotnet/UnicornManaged/Const/Common.fs b/bindings/dotnet/UnicornManaged/Const/Common.fs index 1491194a..0b91cac1 100644 --- a/bindings/dotnet/UnicornManaged/Const/Common.fs +++ b/bindings/dotnet/UnicornManaged/Const/Common.fs @@ -6,13 +6,14 @@ open System [] module Common = - let UC_API_MAJOR = 1 + let UC_API_MAJOR = 2 let UC_API_MINOR = 0 - let UC_VERSION_MAJOR = 1 + let UC_VERSION_MAJOR = 2 let UC_VERSION_MINOR = 0 - let UC_VERSION_EXTRA = 3 + + let UC_VERSION_EXTRA = 0 let UC_SECOND_SCALE = 1000000 let UC_MILISECOND_SCALE = 1000 let UC_ARCH_ARM = 1 @@ -22,7 +23,8 @@ module Common = let UC_ARCH_PPC = 5 let UC_ARCH_SPARC = 6 let UC_ARCH_M68K = 7 - let UC_ARCH_MAX = 8 + let UC_ARCH_RISCV = 8 + let UC_ARCH_MAX = 9 let UC_MODE_LITTLE_ENDIAN = 0 let UC_MODE_BIG_ENDIAN = 1073741824 @@ -34,7 +36,6 @@ module Common = let UC_MODE_ARM926 = 128 let UC_MODE_ARM946 = 256 let UC_MODE_ARM1176 = 512 - let UC_MODE_ARMBE8 = 1024 let UC_MODE_MICRO = 16 let UC_MODE_MIPS3 = 32 let UC_MODE_MIPS32R6 = 64 @@ -49,6 +50,8 @@ module Common = let UC_MODE_SPARC32 = 4 let UC_MODE_SPARC64 = 8 let UC_MODE_V9 = 16 + let UC_MODE_RISCV32 = 4 + let UC_MODE_RISCV64 = 8 let UC_ERR_OK = 0 let UC_ERR_NOMEM = 1 diff --git a/bindings/dotnet/UnicornManaged/Const/Ppc.fs b/bindings/dotnet/UnicornManaged/Const/Ppc.fs new file mode 100644 index 00000000..2e6c91aa --- /dev/null +++ b/bindings/dotnet/UnicornManaged/Const/Ppc.fs @@ -0,0 +1,48 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module Ppc = + + // PPC registers + + let UC_PPC_REG_INVALID = 0 + + // General purpose registers + let UC_PPC_REG_PC = 1 + let UC_PPC_REG_0 = 2 + let UC_PPC_REG_1 = 3 + let UC_PPC_REG_2 = 4 + let UC_PPC_REG_3 = 5 + let UC_PPC_REG_4 = 6 + let UC_PPC_REG_5 = 7 + let UC_PPC_REG_6 = 8 + let UC_PPC_REG_7 = 9 + let UC_PPC_REG_8 = 10 + let UC_PPC_REG_9 = 11 + let UC_PPC_REG_10 = 12 + let UC_PPC_REG_11 = 13 + let UC_PPC_REG_12 = 14 + let UC_PPC_REG_13 = 15 + let UC_PPC_REG_14 = 16 + let UC_PPC_REG_15 = 17 + let UC_PPC_REG_16 = 18 + let UC_PPC_REG_17 = 19 + let UC_PPC_REG_18 = 20 + let UC_PPC_REG_19 = 21 + let UC_PPC_REG_20 = 22 + let UC_PPC_REG_21 = 23 + let UC_PPC_REG_22 = 24 + let UC_PPC_REG_23 = 25 + let UC_PPC_REG_24 = 26 + let UC_PPC_REG_25 = 27 + let UC_PPC_REG_26 = 28 + let UC_PPC_REG_27 = 29 + let UC_PPC_REG_28 = 30 + let UC_PPC_REG_29 = 31 + let UC_PPC_REG_30 = 32 + let UC_PPC_REG_31 = 33 + diff --git a/bindings/dotnet/UnicornManaged/Const/Riscv.fs b/bindings/dotnet/UnicornManaged/Const/Riscv.fs new file mode 100644 index 00000000..5fd11e95 --- /dev/null +++ b/bindings/dotnet/UnicornManaged/Const/Riscv.fs @@ -0,0 +1,150 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module Riscv = + + // RISCV registers + + let UC_RISCV_REG_INVALID = 0 + + // General purpose registers + let UC_RISCV_REG_X0 = 1 + let UC_RISCV_REG_X1 = 2 + let UC_RISCV_REG_X2 = 3 + let UC_RISCV_REG_X3 = 4 + let UC_RISCV_REG_X4 = 5 + let UC_RISCV_REG_X5 = 6 + let UC_RISCV_REG_X6 = 7 + let UC_RISCV_REG_X7 = 8 + let UC_RISCV_REG_X8 = 9 + let UC_RISCV_REG_X9 = 10 + let UC_RISCV_REG_X10 = 11 + let UC_RISCV_REG_X11 = 12 + let UC_RISCV_REG_X12 = 13 + let UC_RISCV_REG_X13 = 14 + let UC_RISCV_REG_X14 = 15 + let UC_RISCV_REG_X15 = 16 + let UC_RISCV_REG_X16 = 17 + let UC_RISCV_REG_X17 = 18 + let UC_RISCV_REG_X18 = 19 + let UC_RISCV_REG_X19 = 20 + let UC_RISCV_REG_X20 = 21 + let UC_RISCV_REG_X21 = 22 + let UC_RISCV_REG_X22 = 23 + let UC_RISCV_REG_X23 = 24 + let UC_RISCV_REG_X24 = 25 + let UC_RISCV_REG_X25 = 26 + let UC_RISCV_REG_X26 = 27 + let UC_RISCV_REG_X27 = 28 + let UC_RISCV_REG_X28 = 29 + let UC_RISCV_REG_X29 = 30 + let UC_RISCV_REG_X30 = 31 + let UC_RISCV_REG_X31 = 32 + + // Floating-point registers + let UC_RISCV_REG_F0 = 33 + let UC_RISCV_REG_F1 = 34 + let UC_RISCV_REG_F2 = 35 + let UC_RISCV_REG_F3 = 36 + let UC_RISCV_REG_F4 = 37 + let UC_RISCV_REG_F5 = 38 + let UC_RISCV_REG_F6 = 39 + let UC_RISCV_REG_F7 = 40 + let UC_RISCV_REG_F8 = 41 + let UC_RISCV_REG_F9 = 42 + let UC_RISCV_REG_F10 = 43 + let UC_RISCV_REG_F11 = 44 + let UC_RISCV_REG_F12 = 45 + let UC_RISCV_REG_F13 = 46 + let UC_RISCV_REG_F14 = 47 + let UC_RISCV_REG_F15 = 48 + let UC_RISCV_REG_F16 = 49 + let UC_RISCV_REG_F17 = 50 + let UC_RISCV_REG_F18 = 51 + let UC_RISCV_REG_F19 = 52 + let UC_RISCV_REG_F20 = 53 + let UC_RISCV_REG_F21 = 54 + let UC_RISCV_REG_F22 = 55 + let UC_RISCV_REG_F23 = 56 + let UC_RISCV_REG_F24 = 57 + let UC_RISCV_REG_F25 = 58 + let UC_RISCV_REG_F26 = 59 + let UC_RISCV_REG_F27 = 60 + let UC_RISCV_REG_F28 = 61 + let UC_RISCV_REG_F29 = 62 + let UC_RISCV_REG_F30 = 63 + let UC_RISCV_REG_F31 = 64 + let UC_RISCV_REG_PC = 65 + let UC_RISCV_REG_ENDING = 66 + + // Alias registers + let UC_RISCV_REG_ZERO = 1 + let UC_RISCV_REG_RA = 2 + let UC_RISCV_REG_SP = 3 + let UC_RISCV_REG_GP = 4 + let UC_RISCV_REG_TP = 5 + let UC_RISCV_REG_T0 = 6 + let UC_RISCV_REG_T1 = 7 + let UC_RISCV_REG_T2 = 8 + let UC_RISCV_REG_S0 = 9 + let UC_RISCV_REG_FP = 9 + let UC_RISCV_REG_S1 = 10 + let UC_RISCV_REG_A0 = 11 + let UC_RISCV_REG_A1 = 12 + let UC_RISCV_REG_A2 = 13 + let UC_RISCV_REG_A3 = 14 + let UC_RISCV_REG_A4 = 15 + let UC_RISCV_REG_A5 = 16 + let UC_RISCV_REG_A6 = 17 + let UC_RISCV_REG_A7 = 18 + let UC_RISCV_REG_S2 = 19 + let UC_RISCV_REG_S3 = 20 + let UC_RISCV_REG_S4 = 21 + let UC_RISCV_REG_S5 = 22 + let UC_RISCV_REG_S6 = 23 + let UC_RISCV_REG_S7 = 24 + let UC_RISCV_REG_S8 = 25 + let UC_RISCV_REG_S9 = 26 + let UC_RISCV_REG_S10 = 27 + let UC_RISCV_REG_S11 = 28 + let UC_RISCV_REG_T3 = 29 + let UC_RISCV_REG_T4 = 30 + let UC_RISCV_REG_T5 = 31 + let UC_RISCV_REG_T6 = 32 + let UC_RISCV_REG_FT0 = 33 + let UC_RISCV_REG_FT1 = 34 + let UC_RISCV_REG_FT2 = 35 + let UC_RISCV_REG_FT3 = 36 + let UC_RISCV_REG_FT4 = 37 + let UC_RISCV_REG_FT5 = 38 + let UC_RISCV_REG_FT6 = 39 + let UC_RISCV_REG_FT7 = 40 + let UC_RISCV_REG_FS0 = 41 + let UC_RISCV_REG_FS1 = 42 + let UC_RISCV_REG_FA0 = 43 + let UC_RISCV_REG_FA1 = 44 + let UC_RISCV_REG_FA2 = 45 + let UC_RISCV_REG_FA3 = 46 + let UC_RISCV_REG_FA4 = 47 + let UC_RISCV_REG_FA5 = 48 + let UC_RISCV_REG_FA6 = 49 + let UC_RISCV_REG_FA7 = 50 + let UC_RISCV_REG_FS2 = 51 + let UC_RISCV_REG_FS3 = 52 + let UC_RISCV_REG_FS4 = 53 + let UC_RISCV_REG_FS5 = 54 + let UC_RISCV_REG_FS6 = 55 + let UC_RISCV_REG_FS7 = 56 + let UC_RISCV_REG_FS8 = 57 + let UC_RISCV_REG_FS9 = 58 + let UC_RISCV_REG_FS10 = 59 + let UC_RISCV_REG_FS11 = 60 + let UC_RISCV_REG_FT8 = 61 + let UC_RISCV_REG_FT9 = 62 + let UC_RISCV_REG_FT10 = 63 + let UC_RISCV_REG_FT11 = 64 + diff --git a/bindings/dotnet/UnicornSamples/Gee.External.Capstone.Proxy.dll b/bindings/dotnet/UnicornSamples/Gee.External.Capstone.Proxy.dll deleted file mode 100644 index cb71aff4a5d650585d82fe416b00d4a5d89ea6a6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6656 zcmeHL3v83u6~1Hk2< z)KqjPbk+}Rx3P!1jSXc?RbOkhM`_uLX`rDKu(WH_OhqV~*kqarr7CN(E#u#JuWbmA zcG4zI)7mSYd+&MNbI(2J+;jcA{=p|n1|dWTP*p;XV5H5-`cLo1!OmLzY8Ls$%ri@l zXzI=^Y48U`bEgn#7rY^Jt2Z2uNah{9S&+l#K-gUKz$SAj;^T`lGiT+eR2Lk4cHFC; z>`O!N`Mkazz=O{(=-bZ5g1!(N^ZQyc{+eX5(W{>5>jJ)LubIg$+B-ib|HTx(kc}Gy zt$wOIeG<2ekUEW?~=3q1d%nUcNxt5R_481mz?I2L+ zp=pUz!xqtXbO9SvYL*gm5Y#xvO3HQ;G6LzFnwyYipg(CQ#Bmez8etfG=QWhs8Jv#a zFfWq$E(x?bGtp7I=vh;CW5$}9knKf+&ntNe*`@~x1a{z*Pk^!J#MVVgG15$v#{$p- zasbns6UQwQM4=Tlc2*FW++2*)nv?1i_+SKr^y~yaU~+lWgfMjya_{w&&$0j11DlmG z#bt=bEiL^OnFypw)_&A~aR^au^zVjpHMhY`Oijvoqnf)2v%Ur!IybuBk7d0r?%dWN z{SryMa2p~1`FX^YcnddVK$pLk&QD_AWQ%X>$1WQ-M0<>6Q{$HG-sOn#p*-x>8_)iF zE|A)zwNxu5*;L;l8unPvl;YNn>MCBBvTSkXx8vC_&F+oMgQ?1ROoo-&d!C}_D5W;17{f8y7;7;^q5izkTwWplWlQpO zLV;SmDQm;}<^-_!~i@7G%A*Ft@yxu4q6xmenGDy!UAI9p9*jE{j zxeTK}j9yVC16;S2Ix)2g^Ir1A)Yuq1NO{jxc5fVp+TxFATzr{LF8&+?yr#+S{V8Ti z$JkV5f-SzadopYvuPtX9JG35YCY)nLEJsWIyJ3M}LN}nNj-xJ2tfuj4f?L1k@1*P? zx3S6OGvg+xxMWDW%64t1=-YVy>xP ztShgg+Ozk(gS8ctl0K?xWyhgYuo#A9r41Qm3TV(@f?SH4==q1LUS)i58slA<#TZdl zBNySccVWpUYU0tP!EAmHmZx``nG#S3dsS*ghJ0Uomw6DI7+x5^lb%81(kt#x@m1J1 zx=^VfQ#1;Rxkfg)6FU)wrlx*{PuSj9KEf)T8|i&TRn^37n5n#xz*9x~75Tz|p85g| zm0l(_HwOpr(DYT(aO6OS+B^qiw2G3TRW@{J5`Uu?!#`6;;)%c0X$xKa*jPT|*D)_) zggl(5@6a}#S>9*1!q=ILXrG%$`#W-IUuvR#l@Wd0zFY?zGFPzS4g(wXIyMv#Hmsb2 zoj-U&Zvm#bNMAl1b$!b)?spmbIEwe4n^M&QeJ4aUL_-cyX5R{619~%hYVd$QpK(!5 z^p@1DL$`HjoDA&dfYbLTi7Tvb#p?8vz>kj*B~u4J#r>ygLZ5~|k&peS)dC-IwV)aO z=~RP1Q2%UiqcAD8l^Lh9UtW`w{c_WcXgn|Dc<=ktLgb;+X{@*+%v5R(8D}g0D$Ke! z8edlNdm&+qBh#UGdV{`>EB+CrwEq>9K;I#lN!-yf12#BB>#Nl@X*=oO?;7{@QC5sk zAmVi92bdC%ppTx=AuJ8BYbB+*lbnr+ee{F@oK=c5|9E2Y#9(wplS&t*e%u$!gfwYN z-oAdtHKq{Tn~ymVOnPY76$I9uC zC)xPwY@}^l`G&mg17Jq0Xb~e)hArOT*rDq%tGO%S-=uiG8rAU+FoA-3BpjVpk3qtPUZV z7;ah^pBS!XS1CG{6Xh?%_DRBm+qmV}{eN~IeGT_ftr(Cpd*6^|P7Ln1t@4D<( za|ujRkOQvEC~Ru34yct*49UrU*BFG1RH%M@ziY}?KW3|+vYo2*srRgG zRE}qUVI3jIM@+L%A@0yW@m7rGy=NQEb%c>s-Z{}KS|W*zx`i@r#vE0a7T(4!uRV<{ zJx06H*~$#$l~x^$zM-p_5I*#5>j(cu%BS6}__$4OXj2R7(EFXV4!uaz@A~U-I(ZA! zQkt4dO};4>%s1@^+&yybLQLU8;2x;*q~c1~KRm8i?@0gjWKE{`hGa-`P+OB3;-77T z8PpMpVr&&Xk=N3%7Su8eYgq4OJ;(ZGthcb=O}ihN zKlc&m>bma+@4EkITkdYA^y*d3n?+s_o7eb5ysx>27k5gL&gN=wrzl0j{AMW-Y~IKR zd9TPfujBb5SC_;KVQ;V~Em7naB3<1@oxUB+MtX$QF&Lii@QB>9(ccJ^F_;?D_~F}= z=kLV02au)@Pm}vYE-{S(==wDO^fW%DFPL{NHff)?EhTuY3H!ew17j`l2EcXzlcpL0 zdx1X%c#hJsLQ6i)1HNF8q@A)x2-&$BUh)R6quoH+>aeFMgyc`pE#w+*Hko4k(!KHY z()=~k+0E_0nfnoq$5Y+exwW-ZS{n!=7sx!V#~TvcJ$zRHALh{+Z352|ayY{i@&@rK z>}eGwV$ARa!T|~5A(DA*?%|1BO9ebWeuv!N&I=xYB(f78$1Wgm4*R`fUy%2?x?1^8 zDG&*hXOo-;XbObA5|8X_J$mRXR^#t_$oCyh3+1+$56wTpRCq)!9mt{=7No{?{n<=gLd-kJY5@LK$FnC5?YR zewI9-A@sQc?7sfgw1%_4`EH`)Q;of~+nt2;(8uA1R=K(>6g2O`ONlB}RcI};6q00;Ygal7&7y>I<)vk%s<4|E3s>KhIjhnuihO8Cu-gm;VX>-E7Q(Bfx}wvN-KGAUDL~mMGq91LBrx zS!!8fv0ChwI?Gl|x8-rmKFd+dTbB1MA6UjMv#fKii>=G8#nv)wyES5!tlic}t$VD$ zvJP1+)$oX4$qjAKNGqvk62Dy b1}!<(JnNu!*m@3Uo3!SY+|qad2gH8@o@x|_ diff --git a/bindings/dotnet/UnicornSamples/capstone.dll b/bindings/dotnet/UnicornSamples/capstone.dll deleted file mode 100644 index ec524e2d1d99919d0625f8f8802a496cbc7a3f15..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2484736 zcmeFa34D`P_BZ~dP1`^MPk^WussspH1+?0#IHg*kKvmRYX#%pjGPU9|kWigL1=9-U zu@2+7FXK8RGdeTQsDOJzfU=1!E})}=8@EKDiqaNF@_x^`&yu7mI`jTM|M$P4w@;qu zx#!$-&pqedbI)C#i!WcIWGaecM~cT4Wd*MEuay5D{g1%wp8a3zsl43nodGLs-kWFN^vl^--r&9Z${TOIDd@fGTJP-8jo#~S^p;&(>Am5mYpy-5N00749rgZh z8~Qz8{l{`6owfF=^1tBzProQDf0?hpE`NisW6D3mb?$>VmM_Ki`RZrOcjEd%uQBBd z_KYrY^8-Ba- z^F>_g7yZsqlq|Z#KjW%YoWxA)dDBUFU20<>%1Z!zm6w9o`Y5~b*7HUmC7)i;AFRBL z`%eFs3|5|fvD4E|51w4sC%J7v=l7j*Sr1MNUVBRrx1|s1^`g2`V<-IcD#{h7&A#T! z;FXGUZ?A5$?!9sS1M|SlSqe0#$!v=EPrVdnb00;y3)dHMP56g=O{dMCGy7^huF=9k$0gWXE#bn&^Cp@_z_ zvlOxYOZ@pFH%Adi&h4g%b05c_?_b94Z$Ij$tjQt(TXduRr;YbH3Y*krTl|6gt+yYe z1$`cEp3kK&a}6)v6zpDFymH=w8D}<4kIs0}HXT6z0KL}EsXrLXp3ykgK0T_9b=5Zo zyH~`Iac%D74zIGu=Z=@ZYpQD$zxef+-4ws^($kznl`m|XLCElL!8bQ&Dauo=WrN*? zs~YNjvv8-?`R=C6eZE_8@oO*hHBcCDDDw?1Y-*V3bKzHBcK11`hygtLwJRoZ%7Otbbjb*;_3Q+(% zqRsN<0U~^RsiG0$mwnYR*9|ZDuAobu?;2c!eXH9+lq{ej{3VD02xX6TZ{}O#S;J$# zNd$AfqhYiwKG#!G1>9bgG_(SLCRM~=_B{zK+E{m$KfXD<%3IeIysELzcME_j#IsOw z)MzShW}?k~GaJg?@t|*_Ha5Q!+2eCv$a*(r1+ptDw6Tu(=BoJIp}3n>fmct`t4hE4 zR}V!|m(TJ|46iC`Sw@tiWaimvVH#T6BV_t1a?wgc4JlX3z9(#hvI>D$<+FF z!GieQE2x4)%6ua#Ku0;JEyP{7nf*tgBsTwmD>COcjCK%A74qV@t8sMiie`|78Z}(+ z7ArsQrquM0&z;mbdSHbJLewc^LF4FA72+VM4zKcA%mg0axCLdt0&4M62+%`PDtw^L zQQV={EubzW8b=GqCBX!fG`F%DJ% zM}3o8_Xc5@>MKAtcjoqH6o>~(EPFweK+qN>2jPjnJO$wOul^X1 z*T>bmKT&1?OJdZzrF0MZ8xqmWCIP5QEV;e`S8a%N+9_-urIJZop**1-jQu&vL zz5x(vED#TexHY3~6s37=0bL85VwZBm<^zRqL6lH5K1vzlX%G>ADr5g3s&t&d2NWd2 zgubZ3rZt1d^H9HOQ)WzuqIT>s6_w3^`3J@2b(O@shvV_^syxGX5zvBf%7E3fAEmeh zZ!2&i3or@SDh=9(D;biBxKe`zv^RWH>GwQeZv8>E?mE;OqMnEPQ!ODsa4WP-?IJtc zWjKTkid#yTyL}_*@`A5`E@K=3@-$pb`WT;Mj~9~m6jY$zL*oU|#P~U^iaw~~oW?re zc)(PM>mW{sXT@ z1>$E{hy@@I5D+e(?YvWlzi6`+hb!$$EaS{32m`gyjOo*+Kp4IO;DS}uj?s+(u-7+* zvRBwqbW|%XNNjAhcO`_)L4A-LBXa0b%f8AkDU6W22u-P*`Y2Qc`7hD8b=N!ge}jt z$K$$08~C(xZ`$(w)kLq}x3ofU(50yWa!g0rQWc}z<%aKoojom~qd?w2qb2Io6RgfN?dne%! ze&pBAXc$=#K9Zv@ECx++-z#q%uei0v^j2Lqp`w1PTF16M=yQi7*PMGs$W|JTuk!E4RFW^a#cusqUSA0&O4Rzegy0??-l8D6;Q61&0ND9ukk5*&fH{t=Tgqa zX3i0ME_ZUS;q_aC)v`n*hbuWp5ps-`Iff_E?@i7J%7Q1#d__rRf!|PGV(#?llHcSi z?x&CrAiaZh0+>1)=~|>lBod+js=X>@g9W(7_Q@7Qx8m0Br4}n(r!JdaL00c0YK?ht zP*z?8waR7w^+nqiH>-93z`K#*mHEY+X2)hs2fW}CQ{|^c8P7gnv z>)$neFod#QfZgAjcSZFFgICLPnQonB!BGJ5rUyZFg9Q?#h)6X+RDckLMk(-xBU7Tf zkQpV%hxJw5vyi?)dI4!@KinfNMfw;?H&uKImynkW=j~spT)4z8Z{Fn_a&GWuCEt+i zfSXtN#w~BiHwlRA4#eZ2)e(+6)W$k`KjO_Nt1w6j{@RZrfh~RvVbx&l95jk(9BZ3C zBMOt}{xkBQyTZ$KOO(uXVGct6P)M44t3mmT-Kd4GB zo@mX}r7y!sJq-ngO*;M=&4fo;0XJQVu5i%bjn??->Dw`Wy7&|fogVrSf1Yt-!1UHo z44pcDi$5HRD9e|c&9 zBY>dnV2}T_|DzF^1LWIV{_?Ha`H}tlFYyhK{b>*GPXiW8!WV9L)?OO-)mQXa+$+ld z(cK+gF_Hh?%zy9Wzc2IO&HVQp{_FZDV`!5u`lSL zh+PPBv7ki3pd2>}S~ME&aLnbliRGyv6dt$v+o@@sX$u0H%%?kBHQc+gWZ`}jp@xA+1xTuQut-5@iduH+4 z*%?}E<&ZU*a1{L&jpN|>`8UDUo9fH2oY_$87E3`Yd`F0lvr853(Uo1 zSEWC|PCbUD?)m2_jh7Cb5*)z&#p3J{-r%t#x&?cWm>0@ilkHucJ$8|8Br=7*Tw_yk z4dyQ%Ya5GP?a1|g^%J11aoi|>h4|gZZYasMX5lV8f-f0eGj0@J63rR=i|+c5Lo{-9 zEXth{rBSonPknPhT{cFks4jz1@ryUmG(grkBKs|3eDSAq`!jtAgq${^dOY$3#6!q4 z7MU{Jix15Hnw(eos}On37H-d}?GK53ae(4JIMVJ$D>mB4u$_6)9(^$*`l2oBibv5u z-isVh>FK$I-_PPVC6aGxre-#m+7)F@X|}Rv{;AYhTrn~rR26mEh4usLP0Gm3bqCZt z?CXz=vdi~?Ujsmc3xrQyRw`fW`ly+2Sw9`6k)3ulF6SZq#2rL{F0>bKy1l;^p|>lC zY?A&?V`(djEp?;NQtt(!R^WFxUSi8!H=U?SB8A(5$|sp?a7c=XUyDbAq|HZ~MlBl2 zb;;b?P_~5{$hD?yiyhX*?_F7u?vM|f<6luRI(&+q`W z0f&iP=f@|d)+6TcD2H~LP6ERxzGwe={;hVhy+B0e7dGKOBjw&^-2;cCVI)_uVWd^H zWd7)6QF&;u48#}0I}xQb?zafOrScql4DQ>=zD#In&*IkESsbF%#yP^fa$Y0(-S+v6 zI!`uF224kHYSozNWqNG*>uTTza7*GmAPx_68%B~8<~NM=;xe>hqz{+8hLIjzybUAs zaPc*a%*CakVPrloMGYf|;xZgVgy_ZzV4#ZzX|Cc*`|Pa3rYV!7T4tl`-YQT%eh~(e z-o~PGxHCa@d3JbBMq{3PCM?8EJh_T6tINxKq8o9m%VXM)k^Py)9dm}$bL}0gYUl&& zJUOTqI=MY}`utEeHU5{$njh;o?*nI6mSc-D(Lc2EB+h{Kb`d_Ha9pR>o zM$hpxVP1#gNj@3BCF*h&anymh?}Z$NA0yAq%q_^HmPNzO85rt*f`CZVl<17<>hhKF zvC;m`^Euj%>Z~#CG7Ag-5GfRNa8uOGy&{gr*6yL~LolMUpMfYWc^bC6=n;%w!bBT#@3e~ux)lJ5eQTmf`d)sZtv~91bUS+(=*Wb{?jmE=1`a`4Q z`ZcAzrAGEIkLcw+W<1%dKQYUD(Ri~$e?#RxX*_&Jf0(|!J{8p_;#<>!U!y++eJ;|` z`lfm3-l5j7n_Y-s{qD#LcXUI@c0%fa?eRMX&I3%_oPKRP z!kZOPI-$DS$kvX~Alf!+y$TwyKG9#9wIF^`+sy4czc&;STk!+Zyb>c!KR06}C{%a3lkUqHb&GON3Fg`Za~u z+F(K4W5dmk;sf(Gz2ZtO z@hrT`A)l$smVWn!2NN&B5tYJ+DxnpMTZ5+uL^EfzqdVw3Gkm@)VhLcD3d zaWM0Kn7yiS-3s!ihGVp-j)_!MiXA{z;ja`nM(L4$o#f`mjp1)N+UN;1Ln#PO>_Ikg z*@Mzc?xqOr!WKLZo?SvSueF0pR9xn`hW#7k%iw^paD)oNw~kP12Xd)1P@Cwm2>=Ub zVeA+3Xd_3UWqX9%Z9xZ$uv6!aXH{?Z#1Fj5QkTtg6o!9PSI`5sOc1iUnehuoz^4|| zK}*2o*+a~;3%MF5&*l@)FpBa3*-2VFAol!VaPGKD@i=D&=R%{$hNF&RO#jXPSB+8? z5aENwm4JwG_T2D~JwsD!l-T59G!Y$cFH#p)L4?$0^S>XexZ(PKg9`&*s004f1PnIU zWCp}+dJb1{LO?u-S5S0cc$H`RlxfTp%2B~N#@Z?ZBGfsD7Jqh#;(iZF0h4U_$A9QY znt!S96um*owuOZf%q31%E)3{@(A5YxcfdC%vb4A24+F`tj?01L1 zR`DjcD!f~5AXnxtR6pw~?hP>7?N@s-_=Qj77-Q5*c;=uQ; zDlBDGK}UH|rRYL$>GOnFd844!O|=XkvA{z*_{%Ba>$(Q761$N(tC~A#kT;!wV>l2K%q~WtFhnXUSvb$)om zr-CX`dyJgC$*&N1bN856*QD0XL3@%kWQBKThTAjMh19^%ob6y4$E0F?xbIpHt*huiI;?)4wPia8P(fe7k09FvjGwS`TY>*^2IP}sn^HnPj*+%Ta^JoFuj zf-80!^ih0_8)QciNY5A9Wp_rxYweMpE+@?|Rf%fO?@6r0@G5bq*Q`YIzZu6+*#5$; zs7l#-2o)lYBvcsF{xD2&7o8?o&7@I9VyJQ--hX(MJQf_~*5b{lEACB5?<4&S$+E}! zl{}R2m1+^Fq_%|=2ZK~`EUD6-V{n@n{@kWT5J&Sxgadi-Xei}4u5GingL1`Ok)4k4 z7b;Y8gw}!AUcBD#hW0`MBQ3dak&=gZk^UB8FjCq$+V(1uh5G{az6ey*Q`i(fWUFGf ztM`}rjuYDuqe7SPD%BP+`6YkBuiCyP6dak;J?je6miCO=Zgo4RibSVF63)Nn6I-LNO|Gu*DK3;no*f_-^5>;HSWpgk#icM=3MG+=R=uTSGeHaI<{ z#qX;11<1`I3@W2#Anxc5VvfBa`92$qmRi^sRywVvAUTr3XI z=ji!)%j49FR<)=U%5M~7FFu^WYM{?kLN{2Yvrnh`beaK-|nmtvme-rl%(DHZuB3x7ssJVSjJq4L;n7Hb`U_6W)+BD|i zU$8<|;`@Ct`I8767PBD!dI>n{8qOTWs4#7hbR|IG?sV#r`WQV@e$=ELG@3oyG`&k2 zkOIF}?=I58SBXl_ZI@~{IaTe-A>Rd%Z?ljwdLeJ@O(IgQn_+b-=?a=Z&(}u@PN@=K z08nZ*F(kLP91QVFhU9j^kcxz|Xl_5ZLfjyoHEOS~!CZ__JZM~#N~3c>HUScyyIS`u zG9QHcuAUXmT7S2Y0e;2~k zit^A|ihC;(jDUP5f3TO!84~1fDN%LV z`UUHVoEilqg0ETZjqdOl2{~7B)44L(SBq_52v5(Olm7Gv7 zKSnj7#PYOF>}hS=$y8pR_-Z4x!$BocY|tzS67CRp%CgwJSp!kmX)>{|PRT}ms{$m1C}K+d&Q$lw(=rpR8h9fgH1)(%jw z<7=FA1aupA>2@wx=XcNYP>;r&Z868(PvBkeEj$?BhhfMi7~Jm52= zJGrs;y99ivNqlCPVWIrULitKslx&Q=RpQazFyKL~#j{d=m3RdnIlNW2LbKVz_aI44 zriz?e4hkm-AhoBqHqrv8y}v)atAF5a(uv~8+?=;byy1gxip!8tNB>~2s#vr}?CldFn_T#E&?!;!st3#bN4WOoKe{h`xpiv8k4 zJc-1Pi|o!0e~}T1XFEUBqL5X;_}kZnm_ioJvMEty!F#Ql-!8#hlwV*ZEE)T&$KV4E z5M9KBm9k}^l+)PxG9p)!3Ft{2O3+i22_z124=~EcK^TKM?hwCDe8&2sZLX|>21NGs z0_XPkjo?)&U`1(FOYp;{*1v^I zIBt`R42VS@%=TXc52c`Hl3%Puxiw0_FaG?M-W3)2#pC=N6}=BX!mBWBpFRWAbT!C! zuaW=Og#0&fHZNjI7f+wTqDdkUt?~O&j89Bw7@!-&`8?#ADZzn>S;a4mJOgAN#)47_ z&kUMwu#5v(8ve#j@dPtg;HD8FnFow5998PFs21b#2*)bD5#bKtU1YC=#SSvy$BISp zkFw{5zp#m^T&TTd1O=Z4mz+hxr~Hye$Y$-367sO;^ewpye4Kqk39SR1>xy}@zM;qr zW-Pmhf9xK-DEy-{I2N9QUtIB}(Wm>xMEr*A7lc=t&VglU{h}VP!?63!y4~kcgXu51 zqpTbt6*H@^aaMcD?L<>eBt&UwHG17 z8#2R-xC}cT8A`rE9cs_Oo9*(=MqEHM#T@LDAU6nO`W5Vr@tD3l0H`w} z)h74FP2=HA5Y-RqBqSvR)k#S7oZj<0n=J!>+1Dm6XW2-x0BFRUWR&$!yUui+wJ!=?9VY=qK{ESn{a?8)G`zN>eINr0#X&3c%rO5Du52xur1d^{b5 zGy+M@W%yt4lgYK_&{aDsiG3vS3L|^n6mSyKHPfxSbhL&hI$D>BV?ePkd_aZZGZA5V zNAFwSQF@09u0&8R{xRq*?g-{IjA;0`Uoj{L*+$CAi8%PL6X>?!trg*VIn|3vdY1w)k(#k|ZYH}q4Fg&tBvr*I{}%_?r&DzShIx2%~ZjtS1X zA&zdTlar%H1ih;vz>uGV^`E*BcEXx2c(UW;q$XGEeq{+$Cj=1Wd~_&2Ua*Exvp9@- zJVOeH9|ceXkOWtWad1_1U*T@}XK0e4siAW$J>h?37_{}3+DDNVw3b6#S+(E`OjwW~ ztSWxzb{LI-b|@0d#FnG|ZXTjV_E4A_pY>UV^C-$0Trh7IRs4 zjJhRf5E&v$R331!6aScTl84Maau8O6f6O}VkQDuuiK4HIID*M3Ogv;pqto&8&K>_H zKQBlD{8{|G#_D^14nJ3CTT=TU@pA$!2LwnOq}5n;h3>hV>DFQsjKl;_O?IQl6KEN327YFbgrpplE(2#^k z02DA6@gnc`WDfT(5A~t|gKO>wlmphfB_+s!VHdCO;;xN!(MXuk{+!nyutaV*dZ48I zJf8Ig#AK!nVJaFr8hUD&Y-W#(msj{P^bctDUbvoGseLJ}wLmQ32qL;my`gM`ez4YJ zsXOGNU&dMs4E=JoHIPvo_JAnC>(yn4!*NgTRCU?HN#l7iG#(diJ(dLTQV@7|MuMda zKBr%WP0p3Uu~p)Frp{jS7;H@FY#Kdw$9y9B%NbZw5YMc2W6G1JYBnjMkUjw% z#Uj`oU7czaXz1(@h`TQCn(=%}bjcSNVZIG%5J-SGOjzhI|Dz}(&7o6&_-E*LJV+5S z!#_VM0avnuPM}~I8cS=7>~m|MMPhlF>sFUHqZ@FM&rWg#7fr1{j2jx_`$YpIvzOdY z14e)F;$ipTRk+=uE_C1tCQM(rL~&Q=;&*Ib_3?BGRrjUKjn)0=^2_Qxx?E9x0$nCo zpGcR>s{7OBqUvAKrEJ&(xP;rY)rB02G5OP}hwba7SJ z(j}|<7F_(|e5TaVh6TIWf<1t)rAqMH8U^Zrh=cq6L=Hog4Esn|dY{EgifAa-NYb#r z)4Y5@?xQ%OwA}%*h{Q+Q?uWYo zF|%`3#Mcl|>hOFO;!${f=X{YQyAOR1PwlFvy7Y!gC%D zQUqigkBG27W*h3cCcKJQlb}`DrR`;SIox3pZe9|TgS-Lp5Bx=hpyU}ed+p^VPvbJD z*RTLFyFIh^HV9!w1!mIpvZ(7e$~ulAEg?XwaB&wT6({@0$f42U@> zNB2qE*~Q}c#dv5kn%`}}F4=Oa;-=IWq)?LTtM>=Z+JVS^_D^@)$VEL_4pL0d*CTG& z$|9FKK1p^6ois>GoFqUP57g$0&$h6_$O!^zaVhNQf?|Z=W>1$pAp_!($>s{nkYdm=PsQYCuFW-OE;n}>xmusblvyn4nMA3gFe3sGld ziYmCpMt*ArF1yo6v^&^3UB)aqMvoZYfy)$k8`2S^EEH-~D*TO$U4UFUo3f_Nr_vfQ z?k~l=jNFbs&0TVyjAstX`V~}+Vv6S@YPohI_wh}*3>%3ILD#S^Py@LZ@I=@O;Ok=( zzuW{iiv_@@VaJRM(&ma~6mErMmRG+w%WY3IXKR1<+Q3huGi#KyQ z@Zn!t(X8;kb8pcmeDu)h2Xu_>e0!ux+*C#f=-+`KViOTuX87E{zotj$atG z)2}5P!P`AZaENH;QUQNKhDVFQz6&?TrpSc$k#obZJ57;qnf>76FK@hc`9 z;*+$_5;LZdl{_(Jos<=3NOZ{s$bB|a2~rsn^-q|tZIv>?lMUEM*ba3wrW*RCn`$U- z*F?Ixg;F`Ua5f4BxBjcS1s%nKwaHRo3>1E1Zb9##&G&vUpPmqJad(PW zali$NW5lamUBs)f;Lu+D?oY(41}2oRD@5m*P`Y?kOLHo@~;68*i0x}f4_ zNb+Hhn0v@2<=|PD{(hCXn5nTmv9vbKjG0fLMv;FmjY1j}AF=#>gm6^p*d+V=sW4OF z?~|L5AE*+aA{3>!lI8EG0LaW_3#-KK7$1;dQB)zK@F8KMhFjx$P{#xN#O_nB|5#gv z^<<6!dPjAc;AuRq4V@mX_MtvPMx;@KN0h80f^>JhD`r^Z#c9$>hocPWT=DlPw>{6h z=_bn9PefySeNq_fX=oAFz8;krM~ER@qJ7v9+Jgh{zb850IUycn>Mm;3@Sb3 zir;ftdguIMN=Tb~84NZnMV?NQ>^_oM!#6}9t@k;=#PQtqf-mA|+{nIe0isU>f~*~E zUa+pTZ*3n88y>oXhYd?`0ck<_h}TlCw+>SE?InjG8nrmyY5p5B-^;ix*Z^nRj0#y5 z35fM8CCd#jBgv?8MX(A5!NOc#WX4P4#xaQQBt+Yygc^&{w)4wVlU}yYP~1NtQM}OF zprhZ(%aUBscOnwsQa<_5xb5m{HU9gW>c`uZ=5;&zi_Ambefv9sD3{&M2d zo9>YWm4Q*W79dQK+JUw^wkBQ;p_uO%cQ=_TLTfG)8ZWXlgI9^wB<7IkYmpUmYpea* z57>sz3y}6@Gd&coB`q4XUwpw}_F=>RLp{2P;dG5p)gzv7OfapVknt%;WPdjr1ZQb2w771kE&N3e*4AOo91iE?rDuMun7a<$2JQYU ztfn1`h_9n2ym0Pj;Kx*eU;L7PU@MVyls&RvaZ5*~+5 ziK38GsIY34p}Y(Qv^kvFQ~z=BY%8<6tRoz^-<~7qV4@5+fZ+tP>q{8ld6QV+At>r@ z(PXGU?M>DN9P;53OOc6RQ$#^s7Qs}uZlI)$bEa@&CPO;({lb_o$DVjVM)s+Q_U)x9 z6ku?+Qn2AGRoj7yub3mUvm1tg&Q{tu#Cw3Su`kkcOxtE>L~83&Td+s*9ykQCP!`K2 zv!!^OTGrlpb4K`34|Fw%P-NlgqeChx;xX44$)Vf?aT8V9WfPYwFDgT|wX z>~~`(I5#8ejsPy4IZs4*$e-hVn6yJht*?U&p)Pq?{|8QFy(v4;aC?63HMFL%T1Mw^ zslE}C7(1z&X7XeGtNnDLd8$xO^-J{IRXv?9S=BRf@r$!1Us2Pci3#gpFzko?Vv2`k z(~S9$f%RfDdx_zHlYuuDRiE-YHxj}xG7KQsR$2igb8ew?3s;G6UQ2G_wZ<-ybnuvS zQi8uQXrP#aLBk&t(2xV@sSqort45nYZbtiojc(MnAR%EM{{+l_N(WP|yO3L%saI1< zhv&FPvSj)%ao!!$ETlV-0!V*F!b#nU|7rzvTQ7qI+@%`&|mp zz~I9F+=VEVOWUtS7JSA2;LaZC$ee=w(%Vp(T+GLz(pj$?GBHbCh(3XKjy+bxaxo7V zDHr)tAO>*jV~UWaq>mH{SM}eeWRRTrMV@3NxMWVoVCHWuj2Vjs#azf$gL)t;2=zqrW)Otvlu6R5*?Ya6Px7@B>{UNk$V1KD0g{g?uR@Q3?Vi1#JuxJ*GK{YOh7;wW6kPY8g3Pm3&6e(hGjBJ5K zPNUyFA9Pt_K#@U+fxA*HT+U)paxJy(&6T27-Vb~JC)>JG#FojnZr0n{Dcwc1^~n&l z0xfc+2{$U?R z${Z{u%_=5y+Z(aV!Lke3s>e}aHK*}ZOF-Q83iICVx<4ro9#8Wu@3S$&)C~@%2=%0G z4fA8&Se0UX+CJ6D;luirmkF46N5Szb2Tehw-yr#sl;rJg#x3syqpe`N_+x%tuD6ip zuMOtUMP{#gFs&zp%wubXs)X(!p7}+8(3`c~;K}v1SAr)m-^4t*4;N|IQjHyVe0`(_ zq)7Scvi^{A(}9Pvn|y?2jF&m+s}`vLlQyqPJi_(151T}smNrlJ7O;6Q;8~T}&N&=a z;wC*SJtmuH@6zUBf96Zv7Mfx6GT8^k-bKsiY42*yq>>Z_eL_W#>L{WgjyX4=xC_uu|B^w^G?y$ls#%#H{(!!YEIR2lqCH1`~ zKkW@C7K(vk1MduG8;AL{&W@_?b!zc>HzU4+v>54kNFgK#s+;VJWa+L*mhOsV>8?l? zyCTN;OxrP7xmm>`6ekCIcy*{pE=!c_Z*hVjFTjo2o!i9&|DY<`OBO+}Lz6L~5+_$4 z`{Q)ZblMV(D&`7>q0ITBqcSFOW~CEu2YLpaXb_|A4(tKaJxtOQFEZy%d*&!lD&yId z;es9oLwM>~1~dXjhHYYdjV5*>f`ql*p-LKM#}ISP3}bF)F6L&+=g=WCG->A+TmMdl z%fK$A1*2oPGZGuA|G)tbv9sv6!lAOZW2F@GFcB@nr}c zMnOZ607i!xuF&`p>m;4j)@+k`koX}6iSH+^!0)UIz?3r8S0#Ef&0UV)jKHxu^T(D+ zpHJ)5>c+x#!{`6U-;Uba&v&)(KViKz8V<{!+lRAT!@>R83HH)(Wtl2-y)?qiZQoGI ziDScLPlN#suhVE5c_n@0VYXkFn{bDhD~!2T@!P+$2qiALPgp+&NzA1+WN1OSh^@Tl z+J;k4odU69h}K@$U=pD4*SBEIPbZ4tyhR+thz-&^-8A{@jL)uR}Sup>4j=rtN9eB?wHxlchyXeTXwTP>YH)H94tM~1|V(W`hz zD$;>zT06;&Ox(|n^#PIn$I^65yr){%K&-|Xy^eUUZK6f8UPVENjC%1=Hxx+Ah8xow zkL=2FuBBVM9zNUU9IUivDv=H+h0&aA(2==0wt3Ie^M_VW#q%YZ!cDZeanOH@?f^iQaum`8iW5lx@1+KfD4(*fH;dd@wJNBO52)N zlo@)L*P``A`LJQ^8px0&d8>{ZY(N~1S9C4L6_c=-fVzW8{`fBhlE*d|K|kLA?qbUU z;%{Vp_7eKEf?B_jI8}xZJ1lR)S=c*sXl>qZhqK)n&|5(Tzj%+)>PsvxN{$+{dJ!#u zmzfNBD47Vl6bQk2Tl3(okGcnLw z;ngWV?iVLZT8G_9v1`1~)>raGiO9c5cn*wS!oPXrwoVV15AoQCaBB`$v>0)ENe6Mv zTn*_KGg3%NU2G<|?|a1Vr7R}A_AJ@F@q8Mhn8s}9n=&W21T!dw9)mCKY3X^Fp)J;= z(u3~>FgyE_{u|53+~Oe#X1W1bV3G=Hl3qhcfH=gp0FmqIaqAJ;jKO1(ZPt?7H?lJ~ z(tezbdb`9&&oMLfyhi=RaoPune#8>qboIy#EW^DX@{k6*|3f z(htev_LaRpx6TTtDiLpzn+LB#&cIIR zav1&e8M=m=h2%B?L={hb|y6lRf89RkP*F ze`e2QX3uhzoX8&f?H5}%Q^oUmcLAhusW-Sr21Gjg*Pei#o}pa1$FqsnAO*xfpOt)C zgPDdVJ@jl0mUtC{KC95b)=Nv|`)!Fqil*tnAqmXvGT;$J$ zbxqUI^ig&Yx9wE_zLLZ>uac%Q#XyjpwVVf}m}qy@;#4i^{hUnNu~RPmk6ZF|8_QFr zSj;p#Qo{3c2R9XFhUZ~9yy!|1J?gYEwo;TyET(+DZN%x*AW8>sPli|PeghFW3Jx~L zxs_tx)68)iV4g|D(KZ2d&I0QfTnH;u&XD@oP56M>xM4J*8HOcT}8FyPc) zw4E6nER3bP#9j|gOJ-|ZFjoVaClx1?u-RBx-yPF9g?njs4M!pJUPkwj)xojl!PCow z-FW7b${;cO4v1RpJf^dScV(jE>BtOy(s-RMHn?zaY{0>YH}ozBkT}ye=y?-c9F$lO zI^Xtno)*P)DD@wC;X{`1TE)%i7tyC!+|34nF_kNG<$NTSk3~M_VUH@a%k+Sy^~8A+ z30U-i>m%%=QK0FSbdJ)7pNq;Ys!nv4PYDff|aySw0xckfbZSyQ!@v?B&aZqOz&dwXc1ycY_~>IseBwi8NBN}Ack z8af|^XI*V*Xs_BLgeP5f9EGyuV(2T5Uf1>27mlC6O~fipM-gD}P8t<%enK)Ld4^4n zf~bn73e&dv9(1H}r5MdHF7)SKV^xOb?Ny4PL}`lCKMh?>gVoT*);~yH{E>7KB_-)1 z0$1}9PSx9V411E?HdS${i*m~jk$d<~#oaTEI2DwP&i=Cg@_938Z^R@l{RD5ZYF>*# zy!p6c46CG?64OmZ#V&^y>@9?~E&NzlRJrdZFeDnBTpm2j(o}bp)bKm4gTJXmtE6|^ zpoHp}?FY+nnmG+-WT97cV`YY!3{B+}qG03lW_Hqz2FKY?l4v zh6F6SJyMMd>C_n{=Vjc^2 zp#yfIvZ`W&UyMK;R+k|g6Ov>X+_Ys%tN<#=5rG3pIuG|@5?Y5_$L=A^-gB@Em9+Mb z568iH)XMdYm1_Z84-8jTqLIe7hV^hq_4zsYirN7*4DSJr?8CSo-2}pF=q0om!Yx>I z!AB&=vdAa;0*iLk(IlSaz+mFQGUxJWm4NvE(d6cX9B{Cu(+2-Wdm)bU>MQ}+pO=DS zb=N4ScY)%@6con%X(uYsWb}QAJW~6Ucy%VTJs|EzOjqao1okJH@8^)$$z9BVl(1R>iSpFA`!A4(r$FxPU#nGAGWxr zJG#kTzolx8_90Z8&f?-1ccx%j*|owp+OG!|Y&f#yR))}^p9xs>7M%PHlSeIhJ+(VR zDRfeC5g1t!uB5Lh0Dv?MnI31rFy!#M3C%Q2a6fT?x&k|5#ZxV(Xh zj0si<4X|?ht(?`!Y39PH<}fB^butR9T%!z=^Vox2jYN}UxT<`${1qBqd?b58T3W^( zpA=Er#z5z7#9UP*)q6;)_hzvid}Y&Cj)CWbM{r#5$Q2-m)PwVtEx(n|l8W^UcVvXR zHI8^6$l;Z87~S4K#em+C7 z;C>@5k1 zF8WupXtx2AB3~Z3BzZiR*VdEdw2(c-1N_$aGLk)XX&*^pLnlbb8sH>JIu=U0UkC+2 z^19=T=a4~I$RW+aE6aoDTXxYCrI3Q121Gp}e?jp!?J?W78$<6ncJ4tm;d$lSv^~KQ z_j9Zg$YL7IjQ}_^V8cC7fWs?nAE$i?#=^A@AX@*n;MF-jMDWciopPVV%4# zPm~aIy|B))(e5N381|2RIMe07jVqO zu?CLN@rL{AVOQ@RAVQ^*Kw+f+WcBtRvopxlo=()lw+xVjLZ!g`&a-mhGB!7JXQ9)(5Bby6>v(`!_iv`RtKu$AC{Cw$mh@m#=tcjU zmfoHT2$0^FYuiwK3)+bFk z!Y{sJK+~kEb?0%zN<(PX#5B_LFmF$`ghGMP%)8j-mlN{#qeA+{M<^=mL8(Y38dYT+ zXtX~hRS7hPu`V6G2s&0-1lb$l*RA9A>@u>wd4%i@q!{#ZqzQnbW@5u}C;~62qc+}6 z`kmsw2k9v!lxhC^uMSEzcNCdumV5!czI(AQ83{)q(J@GzO#HSCTL%6&{+Q4Ihkh94 z(bYt06^CD1{P&>kJ*2$f|KFF!QJ^h|^uo`gQHyU!zBtlY*kJtxD&{=34D&%=(2g;IXz@N=X z{~|t-j1ZmX48TE6h8c3|T2wEV6nn-LFX>JL^23Q7L|t z0=%KCs993fu9W5+E5&&!!1r_ooP3!Q`f3VrV^@vPx%sQayky{?-v-nk*e`??-MS-i z6}f6%HG`>8QlDrDKwtKvZF^eFmVLCoU~g}%#zagp8*9c8OqL^QePg*BsHok2ty)(ZNB@ zSW)4l32>|mCIuc}9xN+b|6nnS%UVGecByTP6~dZUKqkw*KmVf2g2j3K7#4NVyI z8A7YU^1f#cAIbevG!xrKN`GSGmPKG$A*sJXnMlr@vB&$K|J=kCPeF&BtLPf@b7`=cjANS$8H|A~P~%%!697^>pCF zQoz?4p`gy-%;~=_ULlS`1A?e~> z56IV+1e|6`fcb$Api9f2f9RncVnf<|ht`>zpXsRV_ zd>xtNA25NKYN@Q^waaqlGKmZO1S|22$LcIk+77H`j=4#O_R@m%?pg;ew}s^3yfj*! z0$WjB9_(Kpx`pOh=3t%$>*=&_X`-b7n4}DxL~|_x?Yr>T@H4z@8srOJX>lIb#qY}s zf1QKg0n(HQvFOyt1Vj}o1pXQf?EH07DgJ!Xk`>f=ka_O*EkV zfAEPbc|I3uHOV4bHy&!|f?@le%1k=j&iV z$UD!*x#JWMOXx4r=KO(M=US2|ic!G;t%Tbdzyal03=nf`hYAm}o0A|%xkJ_juPP6o z2LtpJ*&QP^-5a54wT?^?AAz>*Yjm*(&G2+Sg{RLmrHp2<^NkR-ItEj~W`J5ZMTr4w z^o10lmNG}CD2>eRy6q=q&QjQP;7_N3cak{^92O)X1|mP&_$a0*ApVA!vfg}i*x{qd zQj#eOh*M>`JUg6bqT88FmRk1{rUAx^r_+TJ zgz;v?8ui-zk=;P0Itpv42!SjY)i8D7!Iv|&|a}cV! z`!5`-nvVl-+{^IWrJxBWICk-{>uGld4hqT&jZD{V&E<@yc~k2Ue^$D3qe;*y z=x7c{aGnB3mSwzcHR$RJ)GubCq^u`+Y>cq=t+hQ-fhEs#1s35#Au}vk@{2DqT?e}r zI~i394m68H?Cn&PrMKFC_PNk%K|JqGm}?EprM1<-ocjV}cHv^Iwn_|$-ysWO3rHx@ zn>{AoIEeMNvWJ}Aph&7iwl z(mfRybm=}s(0K|uU&eP=JY*8FmpwJo!V-gA36PNpu+Rc{!vxU3n}GfMC;|H)ZcQeG z4wMj*jDO)G(aA_We3G*)j5Q|4GYk@p%oGMkW`~s-2S%V-S@_Dpp1^AP)oh z9E<9|a;r_n#lVE!3WVt`OfT!~Kfs zxMw51gv5a*jzh_#<2kfM_Y}cPe9js1jQ;-_{}QvCKoT_Z?@wbgqHY{*)3YZ;-hLWE zN)8jB7B63_xK$*C=b-yCa3_D5LA>PWoRM-;=AZtTnB4@Dp!#1eejUnx1}RE(B>YRE zI^C^FfC+|u%meC((o5B()d79NqUfwhOo5vSJh(IyPX%Xu9-WQAU?OE6C5p zq1qSGC2PecxX@}qhj=g%HA140jdOtHJfty5vjECgjYt8e{KI{zzmQ1c?XJwPS~AEMRr?k~VH-qF|~JlDJU z{qQXgELfW|JEyr69WsE+9g94Uc&Q^wNVE?ZZ`gJSiyw+V2pvyZ6Z0*e=<^^izCV%p zV)6SxG5-@Up^mLa==f#l2~M-KE_{ewH@>OH`*G%N}0D zwR&w9s@+t8O-(vT8XZjKWn#&vx_=542XEaPoeKx7nvpUCo;7s#)Y5K>T z^~Vb$t@yQ|qZLG|ciuonL6j(9h(U~^t)2fHe8Cgvc47a0hq`PneJrwrKJbcl>AA67 zt(|`D&sCSLi(o4yzd_EXs`wl`{5I4XkMYvkL0(M}VFs`{Hv1vuDzg2=WUhV8wiGq zS9pdg55GMD*r+0~xnKo)c%_nBI;WNm2pn&HSL=SnJM8=s87Lb^N{7K(v4pgX z<(oeT6R5sI+#M4Nm%CVIEkH;af(m8!u%>pkr@9G=V>)FpOp> zMU(CUMHS*9Jg>0@v{rFD-!H;_MdRoqe}(v@oj`o_*-u$>2y?z&y)y?^n3@~h_mLF? zciq8(_)Z3_)xO-w*JP6*5l$Rp*n~T0D+z@==4CsFhX@(c<8NWVg~5lC9{(q&3JVAvqs~7raWk2qAG;Ex7~U0SzjOD#RbqsKDS2 z2p=9Ie`$DCuF;6%iquk?sFcM?rKt7S!YGQfQnRw@oMB~!`8bPq>9Yuj+k1vC!1v|T z0-c%&1fEe#QzZf{>f^z1suu*>pcCIqs7YZ5TjX6*u*q;=1{!Hyc27FAtsylxo6?s| zHOME3dq~JYZ!YNt#~woG1jHp0RKs@_sE;TejB>vRpkjxzvf-YUh$eIqNTttUj8H>2 z1cY0M-h|Ucqj-U=czc4g{G5na(vbrBJQbPee9J3w`|!k*P$mj4r`WK(_GbZi48Y2(fug`UV&?A z=$oAgRL~{RMG^>VOF$)wK!*{oOV^5O{jo_v4%|^|g5V7L_MTdI@W%v#UpCOB)N>0m zNib4*0((0V?6xkz9x=hZI@qE_u(4f$U2TFvaJjSziD0S$ChK8`16Cmpjn)}pP)?#7 zQR|M=afNr5GNznFOsg#*T22FlBa8bS>-Zq3V03sL>becLW}E)53z)x27<5n!^F$)d zGy?`+!(dXziW)_f8xK>3qmDLvw#*^IJH0BWQuyT8mMY3%)KFvl%rF<_a8EA zqCIv^IUYiYS4$2&fR;BK06E%Wm z#8pRHl1EtCbdpcSla?)MF!qwqlDP6?hol3B3v~REa&+R#4&+Ite>K6GALCT(UeWQu zo&e9ZG^PJO4VF7}D98w)PEK3;lr(VT2#z}@X{OJ?eR!9bkkF@1Q3aMg=tIjL$+9mE zLVdPGKnepR+AbX*HsGuB5@`|ATS#9Zx!=b6R-}87Rv~q3 zQryQQU4nEq(gLIhke)(XhO`0c1EgI@;cvWP&WP~3IA&oT{2f{>2%)dfLYU>(XXyn$ z^TO|eXLuw|ck~orqxG<_*tX#F`2IMfpm+KQo{IRGDMYV)P+0{>hmKd5<-obhuC0P& zmA@Jh?q!!|sLKMjaC0W`;m}-nC}%}Hj&|zJ-Na1#PT0(HTyRQ`x_tZM^+YUw58A#h z282F@VGN{mpO@1e@Diir)IifrwGSqvwKj~T@(R~sc>*m?@h(0QA1%PySXe-nDf%1= zM{>r_KRkbqz`LR$ZJ9^tL;-c#IGegm#rNpwIKZ{)^3}ztL-6;R9kJh<6d#q;L7;Lb zwre$K&fF$I3n*rC@|m1`lboUO-t&%564fv*S>Qw-@D>ZT?ht&D%#M}=xR^EXR8-@m z)Cxy03}Xfl7eeri76zV!WFcoUnl=KQ!^dx-9 zRi;8M`4GK9-!jkkVYLOrzR^PMgpV;0n?pX2+gU>TSWomz!{!R&Q>xcMdGwZv*75}X2Iitg=m(A z=LJODS;_Q{!`dF5-n%U@l2rn+)p!Cez{(J77=fw{uaI_$e>mK6p%6tq?-lO zXoBDi0wV!ZlQC0k1v6$wzRnZ{crer1iZ)Pip9P*A6nTu-m%x)f7Ek_umClpkIZ6E3 zU6jm^bFkh=r+=OSqSm*fAClt9Yber-!ec3@@_uciI>SH}eAQ&b4Jq08-fCvgFtaa~ z*};PIkUfb7!|))L1;;baL>BD6+r;|9m4;ZL=)#j#J&DNCD`h&5*rJX zSu@@u@JbU^-B&;*L2&%}!zm!gTOg-dApbN$a8UZp6p*iP!SHC&11z>_=yGBIK?;PO zW74d0>#i`WSa-2m2&!UB0og*qY^kLWSRj2(5L9K;naQ>Pg9UQ61+p71MTVF$+YshNghLV1Yy|kTE6*s?|}DT&-IH(z4)alz#}}YMFr?u!Y@Fp?vWQ zaG-MXV9nv;gG!<<7uC)s7Y_EJtCpLWQux0yEu&Ylmq~B^E;NH-YP#g7?M_g&?)OLG z_zdj$XnF2vIUa{`Y^g^M5bUjS)i|&}_H^!Mw06vq&W!A~i*rZ}c2`@o#+^1Fk|ZtC0J-5b`cy^e-S<6Z^g4c#vxJeT-K( z`jY-{(TlYd$HBO+gmM3{#BBl<0mF~AVK#Xi1H0T>CAR{c02^Prvh;f(=+K>X$ z%>p^m0(sg5akRV-b?uVqU!ljuql9W*{C1!SQG@|XqUH9_1hm0eN* zbSs7?vu)8#lY)yabX%}H(qvnG3P=?|TI^jGf)1`S1!9m@h?uNbGxUUEB)a65D+SHJ zt~F_W%0Q#m4K`_2Vs)mTv5QOQ2WIbBtiK+j>kJ$N_CH_515;;Mn(x6xyxtb0E$qz) zxjA+J6VuAI3~2ew<1C_F7T6MiNzVsrR+a*C-#jpIBW5X6Yu@tG;jV-S;``&N$xZhT zd~KcJdncK~b1X_W#dm!Q$RifW3=8Co9}Eyj%WzOYY%>xW3TDq8DX8B44Jwh7rV_Gn zSIY=Ez^N6~-&Kh-Qm{p~n{`KUMi%kq0Pn=h{xcZ>kPZn zD!ZcN+yFhG*VDT^3K;yC55`6#yrlo>3dE7(NNLy?%2Qf5BNY}tADQCM+O3dBLU z8akD)`wdpqKipyUsx*G10D$C#9QDxMyZrTAu3eniTQb2xZf&9q= zIn@M#^ZMz@W)+#P=@!Tw3#9EkqZC(5>CuS(Z3?;>aR2pY@mc7Wn&?3E)D)1R7Ra{~ z&3cAR5Di07j9m$wPt_Nn zpE1Z-ezGrR4WaCXB%-oaG-k#Y{#jxyC5fVJGnOclr3Hy(t0Y3!P$5f}Y!TXc&k{xU z<^MhB-gnP^Z+^cipU?f6_ue_*v)psfJ@>x%dyn5tiN`(h4*wR3*zc+9%-48zale4^ z%F_##{>DBFFv5)SF)%EiDo%glsUtjV!K3pEyZ1S;>4-{n9u~E;irQlhOEzYyYqR7Q zmLC(0b6P7FQVic$>}Z^^SV(Fb5-dNq2+5m5a=Q_%a@WSYEL{soo^K~CN6#tquwlTu zPOw(PwqhXSJ|otwBR4IRX8a*n)mWQ`h+m z^)$r+`fb@$AyIdK4d+xDV;6pJpjl9jjbIl3;z|%W`Wt`n1#&_ z8y2!W!TC_Ax(U_EKk4DHl!D35hk!)h$No$<+MmhnOi(AVLldJ;48aK$uDO@f7vvJ0 znU9isRpH;E?Vs&W;D&PGtcCsp8+O5`cq((J&4pRicr zf+N8>*xopOMhi1)N|7B@**FJFh43>%zfh`#^KdNvdB$_9^3V`LZe)QXIaeo+%LeZ|H zqPeKJ^Q5OD230CpQbH6O1(UnQUUXi>F|A=?GKC87-F9MF-x~I5AiD$5ORVi+wW~yb ztD$;t0QJrYsM&2)u-0(05$J^B86=hOUIs;0x%V^3Fqz^rj$=deMf-$1|GR|=YTq@ny2pYtGO!K^yScIo|>td zU8^rwfW_|1d32nWdXqQivpQ38f(~JW*_a=}a%gPfW(7tWHdyLo_H*Au-X${-V7z5W zf2+=I84%MTD5gpTF~w{#z^dW=L{rGDp-~3|zL#0d?}F~KdU&;|aY4wja_^S`mYTxS zR9JSL@K|Ep5R#GA310HRWQ_0G0}b|jQ&h5ZT&mt{O^gOZ0B5$I!gwF0D3R5O_;P}MH z304uongJ{$9s&NOExr^M8rlITxNzhmwWrMjjf>eVA9OG*OASjJ=3Y~eQMQR#R%WK$ z)N61Cb3`>-vo{4rw8HbPLNMAcJj0^ST}GYM`M>da!yMTO3~Ib3IH?ctcr%48%994T zY>L=#q!>T=hzWpz7mmjlqzlI%i zEiP=trA|^LR6(%rkFcIx6uUVveyL1{zB` z^J;f9EUyVmCe0G2b=0x|%arcO1@#7|E6T!dMbzm~Byy4&k15d23O000j63_c3`kSa z#qQw&JinGm!3W%uoT^Zw8*bBuy2ai~DTMD4(6R46-9z`oQ-~aMpDwB<`LX-pz6$uL z6mKJWx$Q`SNLd8;4QJ&IWRYKF#=5V8asA38oJBI@_m^PsvdHB7h(O8<6h%+rvdJQQ zTbn*PS>Isw0hUQT7`Mf3mRZ8GTv#4Z;$z%9E@^M8A4hu|5HRH6?Eymyi6OlO>93#J z5PX(?9vD1Bw$l&pe3W3Qt+u~K{-XQqoA>+L&#()!f#DI9EDh$|Vo&BcTZAhXL;ASAAJiY7+RlFBp7kevTm2V>M3H9CMhl@?*VVN$A3&eF0MvmrcN2KozdB zzN%+Tevq(u3cG-(o4h_Nq>Mu+9I?)3(l+4xE7sZ5)wvTo!R(AX;cYPatlKSlwVY_Q z@Q`QSGnSN*kP;XU4RP66S3s;g^q{e>BVn<11uU#Oz)~R=W1THSXF0@9<`z%nV!4go z{1>8h8^U5G=zWHbNi?p0LX_G4lQlNc+athc^#4vKtfIp50a&0DdOFF{>6L1pRrRg2 zUDR3hSq1Ct6F5r&$%f^zF4V*Yj2--J(tAK7(%6A@hjRt2TiD82_k3OB>I(<`*3AK% zvChHHn2x;%g=IZhlyw6^?8V-+s-AVvSZ7D7v#_qYb@q9j)z-Nk{)R!=lcA{~*(nPu zd*|v=rj(tl$B$b58Q8fxnGjC3~a|etMLMd6X z|I0urVS-|y3F(0T6wj0vpt&{>9&~K2fUtLemyRAFa{0g@|Ww$7~3FtW-$X3~uM=q&0v{ZLLn8whHbVW5<- zJ3RxnZ`~H?`af~PuXb~6O}fXg>*T^)+gpNMD{ul;U}G$N8*%y7*o@5P&Z_px?#3;g z`mAmZx?B3qkDlG?LFLJya_3*O#XJ)f(=sD5A7O8jq5@(La_z`so;G6Khkf92LGTON zGO*!NI|Dvgz+(iQf0;2!*PBYM7Vk z{zq2g(|5`G3D-d3y1pCJRfwKOUjkyfE$$cH5Hb@-!c`attN&_x$9~cBVM`7#R3Jp9njcNDFYrm3M)zPXmkh*!Qji4&G@t|gja87e553B~W5FPKjc|Nhw8)bZZbaHY`;YSRz@ zmqi0L{JY~s#t!JFmBR2pRoOV;x$S-j)ECviWh#~b43*A*p@0KioUk0w6dRTq4gZzG zuZF~YoCpkwEWpt2ctA8+x6O7U5C7f*rh22jUzs)763l4$-y{4=nPTacxx1B7=CgZE zv{&<#$!PfRh%vrW@M~})VC(k@-tdo{LS7GIyqq3mANCowzv4y3@mtgR>p=li{;Gr% zR_DDTKpXBcwmupF!t{LsKZ4eyP!*oP;e-X$NPu1zpc~)wV9%cHECsnY*bkHn4)(~N z7%DQAH=o0afH${cr%-z$By!UrSB4))PIUoOo+yPAmH{IKXhkLCiMjzGc;e*EfF};( zgauSbfSwbe^IPDFOewb|I}1SWd16P&|K^F8M5gjYSDXlVVgt4swI?oBGzJWU+<+(U z7BJuxA1cfQx#x**O8hrZ z^b(oM6YX#!;EC=nj3>^Ztklz0U&sx3qPT!5PvpW0%Yfnn^pOCS4*qE(@hwhR zK>MCF8nqLkqnmhi#3wsbK<;^BW%2*!iDyKn@H{gl<0;W8X z87C|Q@(a*=0(55p2%gw>E#Qe&IAH;8d%}34r2y^yjzK?xhnBwWFb0twdA#X!~~(OCc*cXvCE18rY9Q6vZ3 zSK#06%iGLo^UhtyzGAk0J1<4HZw1aruy1`+gZQW*e!s!8Z-yp;eWjU%?VAe{vF{bo zxPP~0`_!)|0+CQ#)KY_@JWiN6rjBq%V%O5`aT05?Xt!RwOajF?l42Gnzg#rtD%_no zL4ysodHQ{>(h#A0vvnEczr*XjeJ(e<3#$xxY7o3%M&K_sG2quq8}K0lK7zob41&Te z3L~X+Mh=+W6Al>L1{Ovx*7o2luR6GuH#Xn|dBb=*!6}Z7O)Kq|Wtb+lWQf^On(m?JRFu&k`GoYo&R0hBge_fzXMtx z#EF2G#~(4^Jq>v3*){33oGxHW%W*hiX?X^FgIdc{1}N>oziZh@fK@P5Pp@Tt(em5k zo(oc&q}TG&U&dsm<#C*_w0umo9BhEn&f8i>$vx8rS*bbBM&{g1`lmSPv(WhOJPmtabB;0qz@4uN*8}JE= zH224Z=mr!Oxypd-wp=!#yNI2Bhw)oU5z9TW`>gSVGGIMU7*8kz?i8d1LE694xB}0f zB|D>;Q5z6X2IM6J%-q;rB$3JooMfD!Nf$!uBk9B+z>Sj{G$ip01XxfXt+_J`dywQj z6lnWVMCt;q9m^&v`dm>BXRxM1#j%*Fc#5j2sN4w%Y0=}@pT=UP#}7F1uX+>_J)RUj z_IyP?5IrtnS5SBL-J-{*ipt&M)1!4zkH`Lt9uu_$_+wL1BxVQBATDh}ngHkF!7vWajNZI@2^Y-}w+Zy2Ee(i4k-<127x z;6$J&u3^tmcXJg3p1Sl)%NC{OGXkcxZ0XnX3jsQT3R2H@Jpn{d_~LthAoqIW(VS6x z;+h-K`6Nz6?TH7qP>hKeSSXK)i%Tu-(GvwlwdSJQPMb>E_?ie;HulB|E82EoTTweJ z)&Ti?B1+*=Mvzs2<+YJ{D(EZVAEScR4y*tz%{~JPk3){}k5ch_oG>b?X>b1nn8Su& zf{&wUGetm@Hfc5pYx7$LV`?GMX4(?JHjfLC(x$Erq_k;5+I*RTHrWJ3X>;j>WfE(X zDB8T9-}ok1K$~^o^QE-UY#^o0syfidA#EbhBZCA(DU*T|fiW|HFs5FOigAAsqilMN z>V)z0wWt{9j~iQ*9>3y5K##Mvfl(TBGkg^J3GO@Y1H~gvGk9)#Y-hp%MYLE+2`bVC zCrrR`?<~iDq8=8f^P1jyez8ARN(zvQm9Pz@dZ)J_%@U-tf|P|)$^Jh`16{CHn1GV` znIvMif($%4=Qh0<0{g{&SU8m1~L_jdF} zl)x(GSL&RThm`V9i+u!qHlTD0C2k0Q^A4jn2M*x zhBlS5@vq+kHXg$X%fCT~L98!v^!_h;bQe9&pv2U1-#4Jg!=lF~rec3I(|SBWdPJU) z++#ka&N&BgB9J)#sBXeFMZo`DXbn9zsHO>+N*rTx!URx)GYvp)+y&byJ%mNYTx*ga ztu}6_H9@BwLzSnU(Z$xpZ5BOCUOyTz_7qM8jJ+(Y`d3*5dopB;P{uA2FlFovoUn{t z1R!^o&)C7jqKxfH@}n8sM{5FO&*w144p?BhJtJdt2Zh}{lFrz8F?IrrU}N*yB9yTk z!0Ri17UP6v?1ri^_Mw05o*geN%GlRPel%l;0YDG?EMn~I^Ml4lEk^3MhXyMeu!#{j zw{HDxOi=p#i4(>I&TiMMm_Q#EPabD?#6bzWNM0sDs^jO_K&sUE0!Z!zDg@2&A0lid z|A5m6C*Y5a>xLd$B?Q5r*-a3j7A&&Cl3dlau6p1BjR{v zcpQ~Hpf8d1GO^;$TQ&%U01m)HoPtj!{ z_sYAUZm^ht!Sf}J4P`#)KD?E z7Wr>56-RR(n@ah0-2oHK3HW`d(K=bU4$byrWFm;^0r^b$1%4*{6!P4%=#K3MTAyQU z3i#DH)77s5+z#W)VDmZlPF7I=P%6P0QpBjZ)S#q2Z96tnk&W=(yFY;Z4NjOY2JQM~dBD4xlPVg+q+)X#__iKN__5yjG0ptxQu18W~{ z4T_&KqG-YOPev4`T_H%H!5s^zoOoWIOyp}AxjpJ*)<8z&Aqvtk17bj6cWe zr8&O0t8_x*d?jQO=gk@X;s*WtrnfW=rlTaL;k5aaqCvupWnIQXjwt{-fWRCIanA{E`S^EqEd z2nM{)=ZD=!cebVSFw;|AM6xqo1p_)O(U}%tIZa(r29l#jTuOlB`=Le%eG5y^@*}j6 zEEI9~2UzBkkaaaP5E2*Q=qf_q6CrmROCV(TF2C_#U{&e1jVc80ni=4T5g|22$XC-W zcl8di^d?LGsp0P^_+G{J2TgHxf3&MDo8anKxXP|Z9g48#*R&+z#hm;Dw_7{?CUqQQ zTs?`)O^)C~q#(b`6Hje!14~LA#!Vc8ipI?<#;|{jyoGdbptZutme-ht=&9(}!2teT zG?*FT<#Y(ic!12kRUMK-2+8*`^1T?<|10xLke~xZmIbd1c~O(4msm(i%zbJc$#k26 z5V4-weVG104gzR-=ll=ssOYb~{Q?y_P1Jbr%m6NN`MopO-s9dQh(xFOYsP=An1uaz zK#`DakkxR;K$6Y?iB7N5upu98h&&z;MQw$g*;l@5gq*!@#zQ|(h}&yrn~v0;ZXgY|lWg0X2eJ!9EvK0Ccb=xvavWasy@ z&}S5~JHv4TuV+CKh4;1KV+t)8Wg7);W)H;<9-t&Uj}uBf!UtM#@YH23?C^V?5H?iJ zVCHO$HIX|L6p;o7-^D$?-EZ46l;PSsGhiO$N!RRTUfMExVe%ThVXeUmXnm5r|Tms6qsRAPSjFnpif((`Gqa{PR>Z088 zsg`ohM7gjiH%XDC;B7-3CXcWxTtz239l3e~K5nhc~E5K4qShim_*0nS&c>P~ofMsuH==H|E+J%wy+Vj2NAA1HGe{>fi zrzcx_%?z+meXirKrm%cvShBg;vr6|WD;J|xowTMfx7AIRmLe`{==YrNx_hEo1DC}G zRErbU-a|>L0wS#|WO%DE6Xf4-`IjIc6RXTbkUzm#JW=ePWIV$mi0{72EHH9_@k~)8 zKJ^2`qTY4h!e&Vj7U!a;acVEcLZdO!iP4InApKTInkW*~xs@=^3nn;MHcQxYDeT~c zHO`yd3vc$Vh2c0Y79luJ3dkcn zHknRimK|6d>xk|wEGH&{uMryI_$cuo03Clh{RP0K?i}D4TOPe14-s z29p@QFUCET5ydZ+K=BnQB9#_M*@@1r@BHQr$zjZybiu?_Au@+^4bs!L0L!))A*7Wj z#2TT>XK=uK0gfUfq_PNE@|N;hEbZ4l3l{9-V6!N*dQo~3P6S_|`!M6$POEUp3X>EeCx?a;S=A06svSe_S_3llsR_-I~$Wgb}E8|9&w zx08d|#6*kV^XN|+y%gYhNrc3Ukns?rSm4%%0hWfsa^P=cN_)-1ZViLQ=hj=_q<8Bn zoWuL^u)sF&C3a*#@Ku2hF*Jq&eGVsIvhxKq@^DUaA0%rd>?xq4Oms#IC6A(P*^Nu@ z5d06vTP{ruu-t{^iMya&R3UCd8u^hn$uWwv0nE3%nDqstz_h=N?@B-cy}QUi{@=CH zL_&h|BlgX7wO?gkCPmH^+T7|%7=X{5$=_; zQ6t8^enTfV`urv$DT#rQDq-H;0P&euUUXf9k2dNkZvb5l8v^R@4VHQTJa5ctDh;$WX4iVbl*Y3W0)|l`bz|BCd7GU{; zYUih}!%xvhT}*&us0f)XLJC8O8Fky&8z-?|kCTuzzYyi4Jdp>McWXRen1G4cY~$d4Y|TZ1WT!aRBHAmz|7pC^9ax41E3d*f z%W2fqnq{%D)KucJxpw+%5g&$l|ARU-q9f(_W#H0FT6W8QQp7hCDL;%wckxC-kz{8C zSg^B&9G`;*NUZY$P6Q{$4vcH(_`%iT@ZU>E9T~|^g@7&<`k2sJcg8rWA#^dldTE_s z{Y$+K%Q#^PYnD(7)W>(<{pv5liJvhW&r`P&j(F-i6hH@&8kZRU~+Lvk`6bVm>gneWDsb-W2 zNpRYtNYhFc7na#zK~99H+k)5^C{5@@Hu5!pMkSk53n#49+U6GyTX`=i<=BYpgb!#z zKDA^9Ro0oxpy&?@%g4U)y?RYxe6Q+heB9!c*IlVUj^^==0X7y4-uPa0j0j|WW3cwZ zWj&ITo#%k%UMc|zWI+V~*CXln?cN7P`tFlPdJ;*euOvG;jr0Vks*-Nc0e`MG(i5D& zQj8@d1uHwSGz;IOUIi9k_bk8(>YPj|S!m+p0(T1I>VbgYAJZTxo`GH6K<~4wtLSpo zHFmu=%ILz|HkDZnmmmpFHDTE#EX}}zJwMFQnLzBDq5oRt3!~eQ-qcPzfD5Y25}fP! zdJST!)rfEEVr58QHlQkztY_2^3phG>Nrlye_>ta&c72%8kKLZ%vj|= zyBn29IOKL#Icq@W&6)}-Kh0FUaH7wJnnl252-0Eg=11y+$70+K7~@gL=AJtdBiHUw zW0R<;k)vG)g=f^-vXTg5qbrGS>ra@#d4zOBh0lAUSH3d3G0Q>RhINNQawH-A?VL>k zj=A8_DXhH+IhLw~AT-^Le=AchLukvahPx0KU{%=t65|R2DVC51x^{M^~e5XMFQpSJo$R z1Jh{xK3I5D@HV|;{W@dVn}YKSLT)%Cxk&-JNh0?Y*NEJm&)ITIlU)A5&~5yIp}aUI zC)xWBx0aDPwv+B4Rnm%N;DW8p!#S2+u(gYEK_^k=cib5?E_eql6v@YOY8Q-R4BG{9 z1t2#kE6k#oUZ2UZoI7%ZQ>G-LUBlil{*{|4V<6 zyo@s6E$=PZA`OB80glIrqgY0JSC0fZ@)JjY;*iCTuTIGa2H!d)Z)ekQ^L{m0#K(W`QkvY?ge>*SI)wpF<)f9@rd;OHVku5jJPBmPw*1lvwk11wdA%^ zOpl}K#E7S_lAMYnJ=u9vST+jFV_?BbB1@8U^E1iVrFI81c7z1T&xCCTm%ZClUZgxN zQhvc*Na{X*CTwzm<)8M((G`T{GsBX_U3xIOYaRod&ov(l+@V9pH5~zGyCy73Bs=|t zWuCB90t+;TYkvDwT$2^=E%tAO(F%&+@wXaebnlB&Deb}H!?OqIKRgtkM@zs+c$6jZ zNcbLi8qKaRs7|#ro+u%Dd;}^g)smfdMvo+C$j*Qs(18NIyi=3Z$mp?vR4oR|eDxD~ zh?RK|V$eqfk?8zSG?~IAJjE1IB-D){tKn?@#B_e5b1zujq%bnSr^qL`oS`D#_l=-P z6;WjA023@MrK>Gvb6ex08wXAA4pS^j_cDYu@}!3i@V^!s%UFl? zsOa?sG*ZN7C$W4#5Z~rq8(_(G!>}X>%YNKgR4iEVj0KCT%L{N_J_yIby8Ky&wd?Xn zuzqvTFGfQS=)61w%HFyUDo>mE+VCrm|F4+BVC=uHI z`28!6F@0qGgRerFRQ|)m$2Wc??fKU*!(QOwMEQ@y^6!wzlKYv8(|0>XMY~GLPHvWt z5Rd(uJO9?l!Os6wKqd-E0S18-+JM8HvM=aU7R*H zkn-*_>@vEeZ$$JiJL4_}!Ild}9gXvRHVC`-1z_l~or}^>C)J8Z8y6cck$Vr3^zBDH zR1n8m8U9Ng`;F~<%fL4l_-)St)1GYxGoz%+=BWR*R(T9t+P7C^Fp;Ms?hw6I4?y01Hz7?9M{4=LW!qcP#!v(sqOG=GEDUQL|FO?%Rjis2o|@F9q>Wv zZv~+KCs1VtYJq}M1GWQLD7@T>*LA4#IDQPL;l-$T8nS9+)L7e4GU{Tl1&}e{z<=f4 zqL*yEOS3Y@TaFT)`~ zjvJk(7;U6UFyd}w5*~4HC=xiQ7{_ouNHR;pxA3Hl?#+EanBHuwSfEEGu;oZ_vV#XH zkfzN9p5?E6$Y~zot7rK{ryC}0_q90oN`$R~9Bx-+L^6xQ%T)7(kN2Mg&Yof4!!e}W z?+JbEy3w?fX!?0C&;pn_L1<+Oqb?lbzlPMnK7W092R+;=x=ClkRr zrw~r~lhaGsSakD|;pzI2A&!3#fY^%@fljOj3>|LIZui11?Exreru|5@ItrH2jE)!5 z?##gTor%aQ)Dm#)HzI@*(fuSc1Y1$Ule3tJ%tUz`pSI?P%QG_QzGk{&L-!5cX1Xsk zZm!zhcbC+4ZJCO?FRekw&fpLm1mU)wJ#p-}voH!k?dY3ZJx8Yw$9+;0&zO;_Gmg|Y zrPJZ;k_Qcz2~Hgw2Z45Kx~ESZ&uoWq?AK=uaNPfeq7SWk41uAAxinIgvfU$$994+- zODCr!$vH9}5D_+FfRX4t#5^)N<;F@ud+NvUji*}U7O37wOLTGsSPr8K*DN0j%Uy;A z`;`Z$kq1P|d|~M$EPp@iNx>%B#{rg$vh;jFSk`G4JU*D{^b4>w#QIq)mJKZQer?Rp z)psMV8hwuuHg-MTe807Nb4+;kmYDFS?_u8^#)jAdvegkb0+Ag%8p?k4kV!}<)2BPKvH6}a=?FQN}zr}<*{tli0 z!2f7R(8eE+2`@(5gEsg?OgOI-6JCRs&5a3vgZ9|TnD9}w3upySK~J;>Xs@BoMB9M& z#_5=F^D{Bw^Ju&Nj0rC|3tP|O++Q)_SI}Dg9TP5lJ|>)kb_wmX3o+rY|HOpnqlGWV zgnOfXk9Or!O!(?$*mWf)T>C2gj&>OB@-aGjR}RXghJui z*iblU=1{m`Tqs;JODOy(T2HiQSwrE9*+Svx(B4A(6Rp{8q44u)6VVc~hr;>dL*cz> zgK~g3XDGZ0ZFH_sxJm9%IC~g$Xhrgb!uU#W7@ryrSIQd-pFzWytiuEHhr;y>gu*+} z+7}FkPomvhC=`AHZ5i4Dw5w>V3WvfCi-f|Ni-yA6(JB-Rg-4k+Wloh;Uj2s?h1wPD;o;mL>pf& z6fRvp6n++M4BA4p?PzDw^4uK?S4T@k8-VsM+A6f4(QcuYtN>YPUC~CNoknX>F%<5D zmbFqSJOphL+7+}8_k_Y5(aPToU!!$FJB)T`AKoiCUrX z!)RU5wxA`}M!%pPs)Igx0R4ehylyD`0NPEo&JTvdo6$b47Yf&Y2zH<~svin}jMk+= zC_EBv9$MLlL65e+VJQ4!qfqz~TFfJ%a4EF=(WarDLd*OpVh*h$+HACM(N3WCdn^>5 zjFz);DEtmuxJf8n4XqX0OK9()twcMGc4t%g7|lg{uNnFQ?bYU?@Ly;zJ{}6+{X{4{ z7j5N}p>U=aq44`?ceF%oq1Af|v5M9r0r7@brBx_=9c^`DDBL;;dL@U#f1v%+8vbY# z3P02~6#f9MT07{1wi~Ttd&CslRdn` z#C0oIcC!Fx7FnBY`ldpkTj*cV^jYa;*i%8Dk{JbZO!E5Uan5LFH(%$iEtAcTjb;|C zN)t8|RYFlNKV=I|l*RCS!gC`=DM`<_yN|%|s20cBV53pfYl|NjFx-j~i}y~pJk(m~ z?-2T-h8{O$=YT#X6D*E{#j(y5I?NpJIbal{mGM3XoDGlMYWdqYXa~Gl!)JV=Pd@f( zH2rjsK6e_n0n=Bq!Jf`(MUpo1usL_uBM}h5jX>KZso@Rb8U)yE)M^H-ue#w`4}<{$P_oCGZ6W{uKic zb3d@@O9}mnb;jJLh92fVXVd?UnqQl{T#xuye>6=u;ZB=->Gddf+=g`K9>i5Ry+le5`c=gg+M)AQ|!+ zIc=8kT~hqyq6~DRRYF9|5Km8n4=WorPwn1f;$!L>&$863*!WPL1?LyH>1To7{R^Wd z@}xlb6UJNc{RF<2z`qZ8N_sr4eGPhRT3gKQG_An^+&lT7r0I*Rje>0$ft*geV#&@R zEi}m)b_@!BI40Vx1qGk96&#K|c0E3(3j9@U9hj_KoD`H33rbUF&;dm2pQ?Lr>%1rk^GB%Z0v(q0i># zK@>;TwX}_uBv?HKYX>$#O{|`tZ)-iUsJ9dm803Y(1GXIC1# zpAxJ@XMjzA^*eM!lc@0MhQ>DeR*-AAPVnIS&+!I#WEFvt*~SUA?{cz>S5m-nnzBf0Vz2&2%+?0kwtWGSz*sdvCAcm6A5dlGGCo6-(@I0KkVkL#=7 z8lH8+QwlsPu5mNE5rQCSnVTi5%aA$p+Y;Up2~UfJuP+*N6P-Vh`b;!*7y5gIeuAOT z=Du|e!v@jdH5%IZyN3H7x;dz+@+taYs4XN`glt}6TvgEsL0Gj=^lsj?3?{USHfW#P z<{o1|0p0hHW$85}YGY|V(7S6>qop8hC2zCIpG3W{V=te;4*x z%E_Sj97q4t1~NI%>150Lji&Muq*}yL2X|Jr$(Mp$tM>A8&z)(%B5=H+0j==5656Z2 zOgWbX$FxYo^D;zTD4-A$^=?p+qmrBhsD<6jBcgiA{rUoW-P=qKmf){z95yj0S@jW- zN3;r&+s-C`6D5MSYW6bEs?;ptH+ih0P2WN2`w9KN^Bz5FsB5FG{=Zk~8w&l$hCYXT zFe6jYBpCa*+lyiBcr{zs4fEx<^$C@6?Y7W}X9=-b~7QHxS@*e(H^<#f*0lRyn zU452HvE{gGoZBCe7>?!+DGi6@gv1s}+&i>hAZMU=K3PJc#J!UnYJo=ga1E!bRx8P& z^@4WZV5VnhA3ewP(6M}yGf7CO{?xaUS1S?};04*zBgnMCCmZXJJx3GVHk?rt98^S+ z*IerBJc9G0#_{fZPGK&}9n(PXt{%dXwp|?nL}8)8j-d|M-8yHn>9cI4H9N8jsC~{`q!|!5FERti&Hg|A&KM)G zPZ-#=^H?c(Q$^(Dj45X=CIQlogQbDsWHC6YJs?dbXC(7CZTdVyzwdKnPHj!kKV0iz z)9*S6ZxEk4MaKadk#7@g0YVKNOoKOR5 zlGE4;ZEX5&s3_fv82R{NVO=V&ylFO2mzDvORhQ;x3{!V}EL$(_MC#hpJP-4hBRl2j zFspl?f@JN`a)u|ZJBpWo2@^LnLJ7vE&yu-;<1QKmUwKwwbRyp637DsC4UVBU_u~IE zIS}lo0kE3ns^2bc)ka4!!M|!wEB(Ecm__yhOoUaf@rJQR{YWV^$@!DAtC?>~2v$wOdIwmB2g6|{c+~uKPAb?q zq3>?f<98yj8~U$>e(wTfTP@Jz*B)7uoKB#`nk3n|eKmNY*~dNqgx!gl zeWC@>rK35349|i-m!9P;sFjZqvi4j|}}!QfZ&% zYLPNkT1Ct*UdPyZ6o0D~L_^9GZ@=hIo#twPS~vH{RZ}@qJwy)x^GHs1%BXom7eh&+ zLrW<%eq+!~;h`bBOCOF%tv11JqDx;rpjVw=a!h&*_po}JzP)ZiOPm)mLF!^Rh zij8wV#33bEJT3PU!v;&Lp$2&*6~Z6)i{534AeFcpCFT3Hk4;?N`VETk3|)>Cp~P|X zeSe&wW8$??{2oD2k_<}PRBUB=9TP2pHc|REQGP}0*jxrDwJ)@>qx@~~r~uj`^hdbV zbNmowJlhVHfPKg}|a)u+}Q{e<0JkEIuXY5twMJ&qn7+dv`>EJsxj<>3;&s?;sd~`ER>DCvpo+)KA zH04oUb;Fc)jL)X5{lzk+v=rvGM1}Vi3B;&sXU$8hSCgEESY&9Gvk84qP4Aih2I&2! z4`KwlYjRCo`1Neh^fdgON}`gg(saK_#(YynE5lA+g4$Z^Gtl6q9y;dbaqN!7*z~cR z41HsvU#RKB^ltQ>$f;(Y=uE+O{yBesqT$EV=h&p>sm$LTAO)F9bWJhAx(jd>eu;52 zBb(q-`n5ts_{ALoG6ZFbltM@=PJPPpms3Kqw^9lvIn{7PMVGp%mie8)O|^)iM-XT3 zz0ox5gnZ|QNo1sn1h@6pQ35u3V6n`cS!Q`Xm)hef4=}uL;4M(|xb8mHpOsK}Q7Tv2 zVWE`WUw>#&N(03VdL)EL4E-RXpCI(-kFZoYsi-IQ??n@lI92Dct{+7iC53mvZ0`bqkBSeYlY1e7oys_7K#)f{e-P?wstYd)zy< zMxBDXd%RN%aB9RiTHQ6?*-1pm>Mrn34P!!Iu6`5Ol*1>{1irLa^w{bQQn4Wsduw_$ zFZjSjMNYCIHYIyhKRUr1m@(36&zyA7)kC&xQI9u)TW&nBE&dUwV22rF7#+Xhnv!*5Jvu9E z&nTS*wU&WY46L=xsCHwz>8wsr&A?+!4L9%uCFO4y`YL1!JEXOzWMyRvjeqY{F=Jgd z@6>I|6lGvB@9g=B+*wuN&z%omp)aK%qgLK+KV>kbZZY`L|h_UAwW04=@ zYw^WDT(>bsYa0U(2fo4>(1W&NGCMu}7=wY~j_YoPUcih8Ja%P9B|rNaS=_uJD(pSr zH{()o?Fw}h+rgpbiurdGBR@u0>XdJvjc&$^9=kH5tDn6rvG>My zOtj9d>1QwNXZP-(pe*h6o*9ro?Du=&FJxnD%k)^m5magV)>8u;17>vgFq9ds{22Lx;r8zu z-Hb9GyXuYne)h81&~$&N2PQC@uz1D&fb%InyMMS$+O}*zmoc3y7+Q^CGC+Rai@kgtz<>hDVh!8XKAvE0C~;OGTDY!bou2_;73c zaVtc{J*Jm5!0jryn}75)cQ^ZRf1w;I334|M;S2Ied;Q@_SlSq?`vbbH0B%mfeIB^x z4h^WjkmBP6SA3ieo7z<93G{pqPp;~DO64j76P!MRFom={<`LV*@J)>B&vRBc547={cfj`m8cNd7cY1?#Uzw zw>{HXQz1QVXDaqg6`M+V=1EVk@=SdkdY;KF7-IzE^lnQMCQ*1Mo|(uqEv14=GAwDw zk>bq$hZ!Avk-9C5zoSeZ(c;1So(f_iBo=qE^*}5n$(@%&T;#FE^=v9-@zb7MW$_a@ z^elb`yOZv1SS>=4!0%dlK$r4ZRR+jQDyyHhVwXR`DNXDHqqF~p9BEnnqe_|dtJwH+ z?qf6$L>9Zy&o|A-hc^UR31O(8v@elHbm{=?GwOu!2^V)9M!9!coePUvYAS@$NTy<^ zx;rf@<4VMsm{O*0re~nBYWZwecK1&HH(Wt_C%bmA3Sksx$+l0F_@{?6GwkY=ZTwLv|Qzh zvpDoTu|zOx3C8j8^De~V^I zwm7e?2P}>gZC__9w)o7BbQb6EW5X|_0X5Oj zKEh_tXh5~~^ELDF@qmgv5KH;_^4fef{-Lh!tkx4u6|!o3I*;(G(9u6-1*fjQ0g&=Xj$>bzorlWXJooh?RyyiEMWTPnL?TqByj> zsiMg1B=R>h7n%odCTZyLKF00!>$ z;KO~LiIwcGlltH6uI9;7c9+JXZFhB%*Indo-I~tslbw8aAHtz$_gQS~X}ik^#;5=W z?EcupPIP~nED;SjoV|)Muc7Nt!D7!zfFggo{GtSuE3>b*>c)Z_^Sjq0D!~dJz z4|}qd-PLhu+kIXZ@B>BOp)Kj`zS_ZO_bD8Db}tf)8iFx3fC0Ogc^Jy>xfVuG!8nJC zQm=LrfI;^(@%sm2CA*&*_TTJo>d8`eKY&Bq?rXB?FjVCIwmF^MnLHdd6P$lq>|QPy zbp_*t00!({3nrctl--LgjDCV~`E_G=J76HrF}_C-E7{$4=zp{ODNmNNyCDv3yJJP( zXpwhnQ#!k|dpOGOYwgAEb+}QX$9E&am>a-=-QR)9vU|COF-S0C1fw%Buu+8ZJ(gI> z?q`Ms?e=z013NrzSQuWvF$GVB!Q|lUQMlTz;c2A2bC=PGSEI+V>eq(#9OHFH+Jp8( z45k)4*-qQ0kPhLH{G5Iv{0sK?2o#k`0S4vD|EByp~_g?*G;iq={CnhJ~&E$8?DkMT&+vs#!Xk+_&WR~4kLH?Iqd#v zH{Hd$bBB-q#xv24J7q6#KJ?>GjEdV>_B*zZG(k{Ba9vnVCPTUeTQHS1qP2^l+Gn^=;>TTFnS2aw)N0CHYE?9|4q_5Z+-c{>AaB`WXZY|hqlfyVtYaR=n4us zt#f^9=6O1~>KV zuhZ#v6zSBO=62%H)9v@E2BWaSNTbiZna!if@j2GRRJskOjBSmnx}w{96oT4sWzy@` z(8E!Ft6}NZSaf^UV5H@<*Wpo;$)8A!R;M1tp)!~=>pAQn>i)RP;HHjV=jA`up$j|= zrP%uh1Gm-jgNcO#atsxpHn5?9#L}BDyD}F&+VXzhP>ehF3HDON*`SUmPdT)rk;gjM z6>)t<+>dMhI$uN%v^x3(4wc9H=!1D_dgz8=aHT~ zrSkwIk98g`;?|%z)GjI^;&|isVGl$3xw^qnIyV%MfdX=9jWHaLabJ##J4A4oqFU6| zQ9;2igqRlGDt_FOQE|%&ZhOI9x7zcK`y$~+9=}^#`kb;FhxYj0hAluHDj|mEIk5|% z19@+RhoidhMI3s$WwctLjNPJtu%D;Rsvs)8VNU^64Iq((O$gdb-ULjQyzXbe5|F40=#9 z*?ERoDavd2jq37x9=wW-$~cVd@|s#0qCAC#ag=XgVJQrk=N6S3iAr;AD&_L;kSDGA zVmS^yv$kSGP>+-tgW-31l*zZR2da#D#(?sq&`)sZp{&&Iiid7$eWet;$HP$mzSCf+ zN#V!`Nb;{;j=q{W)7;nZM{uYNV;#<7(@)!0Msygp+^@rY4@2qjp21K$v?U!r%Rq-7 z9>4NKTf@&f^b{RtqEyv>hz;m)mxrNrC}1#@4*O<6ha;pzUTacF0pslDAmKJYc}EP zgSZNFB34tUoBAyO8HZK?{E1ya9RQ^V8t0Gv(jNfxJPhUhcMXPe{(J#BGQfZ|5Rfc1 z{5vxjRVFyFKbjP$6(uq+<;dzX3&tCwa+?uK8Q z_*}6YhjtMBik(62i-KatD}WBn^zV8&${nxc&`TCG1Y^%j#ufJgBZZyNfmkUA*6bBE z2iEc6l`ksdFmev8s)fN9-B=j=V#^XsVdTJgQK`PDG~1?9sb)iC?H*;;5*&JFZNkQ% zcFpzvo>~7X2lnyLb{(HsS6GxhjbAQif;UXLAe_`h3 z>c`Cy6?cQ)Apq`Ug1caG6x@xE`Yd0H!^rxk2=3W_6ibD@$D>fP2uKyqzo33z>AFbl&Af?NQ&n1l48x(L^ zJ~3^8E#FN&{>+z`g3T zF1tsqhS)VqtP!nS3|<-)umIPt zm33`B9A#ZI9D3H}ChNxI6**!BEra?ErC?QVnAQx3b5f zj4y%1$YXM!-W)*Jwqpg@__d#U3aDrJKG#%Gv!+oqtlxe(aBviuBus>|F@45Wa5F6FUY_lYYWqiGoCc zlby_D0-j?e@C0Y1fHxEH&lbWX>Nl1AVB*gDo)`E@?e#eHx??Z)1N97=T>yFmfV4|? zCKD?aGQB#pcJ2&p;^@G|Babink)Oq3pyZ9usW_GZoJZ zM;BOBSk~i}88;tN?#1^Pu$IsUG!d3hz@iNJ5tO*&6vEdGvbftbaO^@NG>*MTVE;x5 zr?XrlX<|8c0C+f6s#~(%Su6TUT!J$g`;A(!#Red)yFCWg@P`+x_+d(tCJ_ffISmRg zwr5cG;=1{ke;_&}qVF@JQ{T0zlowaj^SoFOivq1}tgv(gi}K=|poAAQ;j7R1D8xu? zA4QALE<|bEczl4tUD(sOF&C*~xv>g(EH{>9wUircVp~!xmSOvb$eo8^6dR6K>u*HP(xE z{l8k>(fdfnf(}=-2>kG)< zP=kjx{pJAE>JUPRMM9wWO2sX#A8C`nEATtI8ttnZ?E#-;)4PiS|8i7#IH#OVUPs`c6ZqY;EazN8VxyQ~-!CBT zivT`i6d(Q^SFcx%a`nwcxOxOvx!>>!D+DpWfq=dn$v;0BXN&sa598P$xoYExy`NMA zJ>CM1=N~}N@05r|48}Q+Gds;`tZOdn$Ua&d$dvIMqVZfD)b!+0E;V4onpwYz=R@q$ zxK+UnNwMx&jHQScr;tCg6ImAgKD~=3OE2sTWJ?m3`_x+`&aFdDNx*<#bTC-~EqBg6B^d(S3+72%`ng-hA5!AANl)20Us4$Mhgc#F^hJ>*rz~js{Y*LO6Oj3s@>N1mja9<-A5 z79GapBE2ar-WiTVmZap2AUU(|jWVzv!2-oi!&TT?1$C2eVDTNg>I|pH@G_L}+RcOh zXWV>bhBvTs;r(}{O-`D7FtBnt7my9Dx-M-LJ>5}l3ApyfRZdG1;)X%O&X zic{w!m0hE}ZnS{b4{?5|6Y0lsUXS$izV3V;^|Ll%1(R@`7iAL6(d82`YGFoQ$g#}0 z$C#1mtRD%bTHb@}o>I9pRBD)(2&EnsrRGn!l2A*E z8+yD^5xIv)nB@w$ zgm=;!b}kk>zBGmYQ)`g-KY0=T#JZlbrPO5#z5 z)W#mlI7IJg2I3)g>OJEOjrq_tpDR6MoqyL zdZKd+t1{i8liGXEOTF~2XS1@Qu(9C>q3T0q0`+T=`=IV80dQ({W?-jI zQ3A+lH%o-zFoX32fSI0tN`#+iXOwM9SUk9q44$*;O9}mQp)X?SF}NQBJ^yx<-*#D* zj%hPOtUatsVjvdPB?{0xB)5jGB1{_F`6;vtOQNt&b_&__TZO)p z&~McAv9v4bAOrK3kMiC@J~ZC@9*6dOLSK>1#@qt<0s~@*RKcLnm+VYpE-VAxm3Kvp zBY3*ECgZ_A^Lb<1m#vNG3ISR53_SOsP5(945N?&|@GdIO$Qy2O>Um1pX2WgH0VBGO zh@L*#Y`F1HKi7eW;v&K6FZ9`kzBlMm`NO4?LCIet$%25)Bf%|6_E16COKor`@g^`j9rlz4g4nZnzp*DJ*|>81B)HP!L7<#P=2iC0 z5&AkpKi#IskAT-1`u;+nUFds*UfT~!+kPB+_BZFv6J+!ZzkY+*XsM-fY@}kBUFVp0 zlZ-*v-}Yhyqp+k+-wO0@|LfMKV{lacjr?W?`CS5EN8qOe-i)dZHvK%2yXGG#r-XeKe&e-BQbP%>)#Cp-0Q@qhhj+%dA1ajN^4)n~bE`t?Ge zB=lctdRF%c(y*oO4|FK%evLyr6Ha&+`u=q_gBW|0IrO5bfym#Np!Mya>Xm&_vwu?? z<5kq5Is_(q7`apbnPAym#HN27^!S)fl$>#va;M3GB?bFF!G0ImcFtG@9+fj1U`e2L z$s+VUK(G4#El^s{98QPg%zil3DWP#=4nI5yPzBz=d&OuPv6?EhCys`p!uEd z4!6Qp%a6VR84GzbAivygbZ`2UiJec!dqZO{vM>*gPag9RNDQ#AaE&~G`VA=V(K`Vg z#ldf?WUlWz))%3BTNBc zVadRYqr*(_x0;VGmi8h!!6`yfTqFY{wt!foWRQpti}-#bo=cF)EP!UpW2>& zDxaaQZ^ue-UBC7Xo;`>^(V4ADu$ZYOdcMyjYX5;%SzXHK*H7s%6TBLa_nKyMX5dg| zh(xCpmYnW}Vz}<sU&3T>d0Eighev>v-!2)Af^>g!f1;jbk$)hPGmAIE7gW z+6(+a=(`C0CPj}ftwioka2%vu6)SvO;vgNurUbVf7CSUmxE(O4tpQ1cQw_@PX)yBV zqW(dt4X)vg2a=gBZpWg={Q{&sSR$3(oow>FqWiojOx(n4TY0CkicP-}laZSc1)jgF z5VFZ9$=uaM;6E8_T^i9yqk`vs{rp{23RE+{<~Onz_${+O0Zs(R~(oJ1lQg#NP*| zhfYyqd?4D@>9iKQ16R3I(k;bM$!{ezXo~XjO49l20fqm-DaGTMdbCc<-JMr=<^h}(aWZWvUJLB@I^MKFM}Ch zJWSFj7Rtcd`nL4*TaEP5&5X6DMqAc~NnQ>}CwV!Y5VM=Tz-q5TUMa|nII#1L!_~~u zu3n_M_;|r6yv`fAx;fg_mDH;XqFo(FH)+PYf!AaDK~f=M%X7u{Knhl zZxT7G4Yi7j6z+$SuuZskB=V_5E=zM?QSlk5rKsriz3GyFnwTzWJ<1xmsJdU#67c+r z(0?oR+>l3-09P2=N6++SHa&r($WWc>m+ifDraU2eNCZ% z$EJt=D-HegLLX=7)9NE_nxdBKqDZN}zXn0<)o{uS&Kh)%9=IhS*X%bvV$)yVjBXiw zM)zVQ_yRWh7Le5!6JNyjYHpa(Q=P- zLC1N#3CZ=6WgpOE_J~Oyd(L@=dCl<53vPErK0Od38Clg9wiFe(7WT3TJDlnniS=>F zrcV|6CPM#-p^r&)d#ULyatAI&nlv7Jc9Ri)9Tl4Hz>WqC!c%Sf`-Og+&{sC}=#f@7 zeO95LCiJ&Pc*f;$-#Zm88n6MM&Bj{wozb<0U@b9N*myXAv}bG^EA%A{eOhOv952Yx zfjJPz<(-)-c}J;1AJ#yqLH9pGPU8Bm4g>>06|o%wfRN|S=qgSgVfa`WZZn#yYOkVJ zHqrUzTVvTvOwaLlaJc6sb+hUkaw{2^=qwicM}>aAq9=o}KQAPug=C~5!As?Pg2X+P zCt94+{SPgX$9i2s+||g0N(~^o^&)Sl;4uQc|CnY3vz?MGAmaq&>@btq@FY-@lSj)% z{uKK5Lcb35D1fshIVTF}M7y?jFwsUxVEkCpQm~j9+4PO^{Pl*$sE)v=1jdggrP}mU zg?^jRS2pxn+!}`>PCD$1+JF*YG!v|$g7w=_zc1pn_++Q3(6+QH_roU06M=4?R|VBnO@+1AHm2f8 ztxP(guupc9l@3YHy+IuypTfAjQ-f59y574_)aWT{>=?owLi~wNUeTek=&(?cP@jRU zhO-?BSdEt7HXzQ==Yn%Q6&9=H9A&sK0X-#Tc*1F5JkczG-`n7i(Dd-h3Z`eD6tz|E zL@GQF6`)%B{gE#g;p$Vk8o6>pX&$46v1KDrzv~I3vB+Ea8eGJ)6TSeuGp`lnd%76< zLP9^t(4*9v33{$}3sAK?k&a})X9R@;F_ zGdm_?zfo)ddVMqE&b-RAfINqqi@9{Y{uYXK&66ZNUx7#O@wyH7>RBY++z#(;xZU!` zBZ8G(uzCZ_cyFRjKe@A!{S%5gJwraI>G_RL&w!o{R*&;1&>?-lEQI1ctJ8?l;J(<4 z>QSi9n84l6IWp+(JhlRputw0vw-E(a54IHk1xehbwBMgG^mhyWI75%_UJUw_tmq-7 z;VfoH4|y7{$HtnwK0aa>^V8_h69BD_I9{Id_?3MdWfeTHp7B^b6tGvp_jnNM`+syK zA@_gkz63CeV)=V_vq=`Rumc2*8YM{3h=^#Sh9ep@LQsTY$S#5cD&i9HiLf!KD2baG z!Wu=z`$C@wDhi@vP|<{N1XSb_@eYU3YdE3^1X1?;RrOrEIl%Y)fB)|b)Xq$IRdscB zb#-;mOpn4G$>h)_@WA9`1gXK3u-@CO&0$aIpA^uO&B-QMhfCA+a4XekVhH5q2)PDt z6MDJIwW0{@m z24^AMS3SY7etO4RG>sIg-a!5uep&YfqYV5m2L62#KZvQDQK!gqA2RdlXz=;EuU>9n zbGc)I54(&FQg}_)j{2sE!8A&=IMjcHpq#3Makhxra#+2+&@|LPsiMGit}{xmk|-#K zKV2`j!I#RRURmvh(Mn$^K1O?0fhrmpROhd{3T8nSchW zj6uFur8|(@a+Sp8dX?TnK&s9Z6mykUs3@q?EJoqpu;&i0wO%C`3qi4dXQN7^4E&`M zpQ{AZ%@sewTgyI+O4!Ahbjf27E@MR%E_rnu9zIWO!w?wU7Hz84T=MlTHdIHWR$n8m z)8;=?1ZwC!u`f$k{CkYTwK_4PR()6us?{ae>i8uF{;mSng3(Y8t_J#+m8)^Mt=gfv z8mCY-PC+#wc{5X8XYds`A_TjX7x@L^1>j`3{}*wELiP7qtvBh??Yu}g_`#5l?7$;# zCw%(IHl7)th(#aJ>UZ|_F%G?VFqe{lLa?p*R-+Lvu}@CckWvUqAE4TCvgWEoZBSyf zZ%E~-(H(qnUzU{zsONm9xE;?foNL5@@kWgbfrT1jTzP@`L(O<;EILT*M$J@-UF01= zG>r{6EYC8QK=@q_c219e_g2Qy>uoVl{RBt*XO@IP_E0)1(SwY@gZ}bmvWl~E52^59r(b$DdN=^!_K+Dp{-BaA2YT?ZF*ST_d z($yYw1$FG*n5x(Q!rRGV1y)oG#X#MTs9QyyXQ0NZs0Q9g240bYw|%lc5@7v{#Z`y@ zI9P$oH^;z#SH;J}NLc@ZH`Is!(UNvj7_a4S@EyS(ToE@qijSP6iEN_U%^1 zImN2n0$2cO1e8y;D$fH>_F0uwz)Zj{fTzH!+y__#Xm^KIISVinU@x>P3jrPPv?{#; zLji;DvMT9!TNO9p0H9<#%DKm?^Z=9tW&;xM1rERs*hA+ta1QVRiXgMI$f{ffxB<`r zxT4sqtOPuBpH=DPw<;e2VoR*be87lOt1<;J8!+~M@B_S3W>q!;UYKcB{N<2umQ~3B zj0TJc+y__!_!@8kP#3T&6%SaI;T2Zp)CaA~QNWsqtje3St;!<6oex`;%tx%s41n!X ztFjs}dyZB45^&yQs25->;K<`B{|T$|=#$VtpzTvu)Ax&p_{hZqGsvz@vb` zbCB_;w#cfi z0$lkn>I7*2o>jRD@BrWqKrLYU`_L(1=3=Wd=mY2$a0oE0(yF`$2mw4xtjf253qFK= zfKDG-mB#_U0$d-X&Vcqyt;)lI-GGIkKsTRSl{Qt750F!BRlWfXS%z`|uL70>wgD_P zR^>E6f50ffG{ED4_W&CJ4S@E`Ve5b^0A9cA7B$8Yo%4W4X_q)&MMRoP!DKR zYgNt!JOS7Q_yKSPU|$V-Krg^WfDZvDt+6UM0cHdK0Nn5y+6vGB=<+$_0bKM2Yzr`9 zEo28A0yw`!+W@8jZeC|q;@4Z1Wq>-sQNVQ@tjhO*TQ;H}Z-Na2ssROGL0^CyHsczQ z^tDwv2q@SB*}kzV69Fp#1>ZuyfOi0Ae201gUicn$|G}zQe}pXpQUJMtMS!D#6Mq6f zK!3n|z#4!6++1f>N&yD|Q@2``?EvdG)DMsW7!9}+Fc0uGAZ9z-0&wvTtMVM6^UvsG zfDqu#ov`^`sJnpf0U^NML8}t?i&c3D(6QdCJO+3daQ$xB3gD$ZXsf+e7D; z^L3QZO@G7>Bo6=KqyneEHWXOXEi2g$K9$4+EY@UitW%SS3{TtG zwd=8}n$*?WnTfZC?{92hczr0jbgspwRMvHiQ)()A@UP(C-Td3YzrXWuBme%%zm{ae zxAU)qf1UiR@NYc-Ch+eX!p}q*0|7$-R|7@@#sJ0x@&S_pcL1gX?gPvOJP4QrAbI_} z9i$SMrz)_1f~%7~x%wA}+rO=gYw@q;zGTJ!NBe?K&_R6ahE)~>WtPCoG5+O_n!vJT zXMab*1ruEV8j`FK!untQ+oGzsMXj=6MaF{kWfNTcYbUri4@tHoF0cnylG>U0;IbAd zla56hSmv*5-``n~>fdHvVgvT2Y!YX$soom3v9ZGW6_P9)cVYob6*&pls>uF7PIHwa z&I%o@w))z90!y-_Zm4&VR3zWQYMU>nc&!EL2dnLTit~fj4tZ(;nS#?bM0c<{iB2VM zCZFQGWc8hth~;T2eU&8>r)t@Tks!;S?QgUfc87t?w820==Vo8Wznl1X3;%x4zud?b zgDuws^=81WfT@5x0h+ympES7mDDYdz#!|dw2L4Uw-%9$0EbpM~4**L6HGtKCF9BZxz5{Fp>;mio><1hMgaFYHPp!YpRrU;N zD!J5Rhs=_Hf8t*>J=tNY%*5#ulE9T!T~jFq8bR5WBMqc+ghb*qlB~@%Mw)IQl@Jou zospy}o@J!D2GTr2TBsp$Ju7=t_U!$Txeo9xU=v_1;3L5MfCYfp0gnS_17-nA0Jj5f z0o(+jrB_Gb{-h{`l+!XTzRLsmD>x6#+=1%577xg&uotb$svb|yMa`Nmvo6i4a27#b z&ho4^UNx(TvW{lvjH%Hq>UjBI_8;#q1W8HbEw2vB%x zMiCBEd-MRPAbpce{~U)v88!UM%)wT9Rx~Z!QjqUX zJ(wGa?RWo^Jx){#&ImmIERKOS71vm2SqGF<6&ybMv*A~xV#(rz15`0*X_c>?f2E`N zXdReMSy6wc#x~oUS`~PRikMwA-P+FLb7cA(-=J3bJJ0s!VCZEa<9CFP5AvYEIDuI7hwo>)AMmj@0XwnK`zx*O6bjKL-5> z0BoQeV8tI@8wc&O#UmsCKl)>?np!PYQt02T1Fr8e^g=lNpIGSJv=ctcpBDHKnf_;R zYQoE+_NU_AndlTRHe_{IQkR#PB=^TvZs6JEAvop*<|mK95p(&E`NUIxZE~VG>10fs z8~n@d#YaDch_13a%*S0NKVp_$d~_S^z*Y7G#&TE5_c*T@_^`k6Dp%Qa=w8%+;<^8f z$06T_?=2-@GA?RQqNM|;ztQO`E#S2L?jgPr zV#mPi!|U>e%z&<3rD4`@}-q#_jM?43%x%s#$l6)hw8_%Td zlaNiTeO+0Ye&Cgp8~T8Jh4|NavTF90%lnhRca_x93NiLG<%37;K9S_aEki_CDdI<5 zpG*VzX__QEk>Tafz~H7y^R>5a>Xpgsxy^=bh z4MN^Ox%{>$hTZ3RAP77}mCp$bkRt4*rdRb5DS~or5q^S7S``8BUjqf&f{^{AT_sN< zL{-I_ilXyG1t}G_=d9ivReG?X-6u(?n%g_?-L^`7C;!nlcR19G7`OLXB<6}EXirUK zr)uMO&n8*OG0+oHleOQ8mk0jw{VFD(O7R6AdE|)vd zvlEJsz6D?6a~2X}25PsFS;2m1+jq6Xd-k6-951?0(M zA`0~{kH>;zNtqp{l$D&ABhr9~vm|$pIEnL}2%Jx}_)Z{GDkCn)P00~Gaq3^L1t{sDM`@{5=~>Aac_4(p%7#+=i*6u9>`Lz`_G_4$gk22JGq`T< zcr@f0C_*?uvVILy>hegK+#E5aD_Nd1*q;0yJocYJ!S}LIgyykp(1XZ}ZR4EXEFXON zpRUr^sc-!ea+S`-iMN5B?R*IBeFJ*)hz>wy>wfY&-4|CN8(l^`#>^IwgtC1#CpZZR zT8pzs_Fav2Z1!`WW4HIk%yuN$N`rH@hUE(RG|1M z-TNr?`lb}`tpkRyjejr3pZ75IQJZb-K#JOf0yRZFjU2C&N2FKLTHZyhg*ag3l4c*d zmbAbwxD45>nf7By`@Ms+p}^v!MJUfznyVHtg$sDVR6rjpz|I9+P6fQf1-t<~F5o<) zfHSlL-h%3>fOENkmk`Cd%4}fcDv9AL4xmbx_)+QNqjXEoRk{i3RKNWS)vukYcnStQ z;v=M!Lv4(9-9Ha0YP-J&i&5MCBu${Xqp9#j$+@7tcQW6#3 z=ah#j>tt#F|%QsMT+%jL5cL$^A5c)!(!)UR+ z4E9$TMdxr-WV?Dz&jK1S@;u@p7oP=5VR^Y8F;>1?5bTQU;-jyky$b}|dL^8mRoH-> zBNCwqj9j0W(!AmCu=o6@BjzNxMegt^>Z=u z+gMBS@=Vax%uelSQ_|Yol#e@L4WW%qIjIxoKz8HLYMh3j*}^aF_7*Us0-*^`S;plk zgZW4OfAwc&X>HBPY@Iy@`kM^64=@MtDxeZTQx;k(2-r)1KPgU$PU&)@zAm6nXS96Z zX~?fjZkm6*>nrH@|Izvl)?e}15eC{sT9!yecUTjLk$tF@hW_Cm@iAY~klY(rZ{sSk zB^Ujnw^-!FIrdD%BNTg`hckI**Q)(~L~>Ht*k3-PdYo%cwVJE1zxk`8 zusS!>xy+lyEE0QPLA&F%#J;?PS(euQib{zKh7N@afN$BP3lqgSphdiwk zNn8o6pr~3;QnkLTt*&*fcPSPbbxdka#9Z+bh;s2B&C@tmi+07_2yU85!gC)O0g+`b zMnM4o@+4ITER7X(TA7)x#=!dIj@%TSe$8-coe5-}39!zdZOqnr8efrhrocKCT;<@r zr#JyJz&7!Ke09BpEmLjtA7BV%m#@rF?eb5ek+MjCohO!%v6v^(zHYjPG`m~@vL<$! z;1+KoT^eP{V@508434suzB9GbaHP7$-Jl4YwdRR*lK|XAu9A8%(}hS-n<+C=h)FQl zk}4fF%*HUj;2DEZsD3wSSjY{Ej3% zQbOLO1`5m*8#w{b&uWGYsZT5oEg^4FN#4SBdF^oNB|4Xe3@5@WBYy!kr-*aN5dG8v zp*!kru96G{D*-?CxYE$r$R--kew=f|vY-?%bwAF*Lmy3jgv=VPcc2>b>C)(=bR?Kt zd=Hyp_ZfxycbeUU}SQi-n z6%6v=K}O;j?m-|x&s?!Eht!xru0d<=&uQp#l5=pvIk=)xCvgqP5k-O~-tB=|#|h6B z99NDVQzM*s>W^h!B3(48_7FMVd~DMVq8z(E ziTSvuF(z3oLSbgJ*oJGE1&WPF3AA8QH%Kj(_Ef=el*A)Z9@{rilEn;?*K8hnqCbpR z)39nD59tmaM2TT;fhG-bB--F9`{7yO8ZkCCZ3s|K0-;7}ZX+5gV393k`G^XFDB>Nc zLfViVLL8{N9Ye^J3j6zn#Lfq*O7u6j$ze0ogDdv+T5u)DmniCGs;`%()g#hKU|JYo z+TxCRV)=+_UwxZHKVnY<(|8f09%~veEIyaY!)!GASV1kGOS{1;bVm;FTUn?@u^)$( zDu&xt1*h-?3m6I79P3#&F2~r>HDq}S9EQ2Y{YZs>0$o^hy2Y&~iKI)tLQ0g|q#aEv zm+g2x*fraZT4lySiU|MJ4pl(#&i>0v)xp#9Y<0P4h~DBymM0B8e3ZXPvgJQ@_6;DwIT;&|&R``U63$ ze+t+CbSa@*Q^04TzakVchNG2SQEZ}ZMOf3|1^eSrMd4e*@EAiOu-1H+;ye$nmLg`N zNM3*~=t#;2-u;(po~4EYo0If>u2l{6{qJ9u(S4^R6UC z($uQ-pzBF285bW#3m3Vv8cJ6CIs{f8+LGfxdgz|Byme=xwa>&-ZW27f!F$grJvePE z8N$7>-f?6IPOn$m!)_qr`423s3GAa`vG zF$%KANoUM)du!8y(-lsF_gInc0iCyZNmm4b=hO)z=(mDfZH=qjl} zcWPcpIU#6v{PvQYl~iNL=wWnC>A^dl+$FsW=`LeOwg2%g$7sIE2q_Z9qhy1snxrpo zX;rcXHJQRVZ9qaSl}nGfIO(-HJjg!dFe#)QY6&!?ZaB1gNq3TXCA4yAU1qnHhkgwt zK9-XZr{KTPlB*9Qys@_o~@f8#Z(v-juvkC00- zRmmf2U=Lty_BPS%Z9HNgnHN7y0|avFV_hZpQ8A@73u`hEbA6j;m0ydnRVHio{hJIZ z?hR*Pp2^7glf0FKZOSb&8%cdvK#74ykGD$NPm1`44D3NUr%dqQ39mVzg@?K_sgg~f z_bQ(bL1eO46Fmxsf1uJiNEV+SA^}!I3^ZyAlo#0YDQV{u+&;2Y&;xXq(*3^k0xPq9 zr)K*)`Gw6F%X8!UPL;{HGgR-aOrk?{Wg-q(m>L7ov8uCv?XWvnB2w(+orf-)CkDyn zkGSoJVDvT~_;Q&a_$nWnDWiU=EBZC-OJQ+OCUF}{*f0G-o3F!QUo1zW_0AnU)VdV(;&$HWq*mLNaWgu`%#jbuhO27tDiK7j28J-Wuat{9DmBTrNAb<&-MK zJN~*ZakYV!)mv@;x~RC?>Ya(zTd}(9T3l1S6Z*}>n?*?7+9_^r`HoJS)r^gFDxOTNXOGaH^MA6 z9VByC0}}aPNte>Z4L6w&4F_WxM6`Gt934AX90p!~Ti)+PoQm^^$l_Z{QX50Yz?vXT za~y)yqG?(c#N?#R{bBNNq^-N4GxUdw$yU{2ME1?Y)bjrmeUn@Unuzu~a)|A}k}!K% zq>DD-IUb}YiKQqTix`H_qxBG8d-sU-!1V;uN|UPFDq=YH$7&Q%{h3?$ff3V<{W%Ms~6{h z_&`7Pjx6?9gLS&;2>|*un>t_L%xMU0Gmu7)veBbtZt*E63wS6+L9@rpS2`T}{#G#?Tc#sLJ)op)0qRx8WjP zyo(?QiVxCKF_(7&)~bR%wX51xZT82~p=xJfQ5QC)$%}GJhJg1Ua3rkDoqch&o39|D z<6=rirM^TmY4$750iU;j3FCv5?^zQ%XY8@ICPNkfVNml^4_NB|^*)Hxph^0kkAHcJRu8mjPZUyrftx-})s;?rxbx#Y;siSF;--Dq z>HBK+@$UR~xF1iaZCLHyAtVjT()2p`x6UBDTZ|_J@DRk|Ojp^PIECF^;wq!p#$k5} zqwzQdnL+R|BzP^ju^tT2nH^_t@iOc^zpY2i$1V^sWKBX;?+>(wT8|k(qISEM=p0Np ziRfH-K6E`^2qi20j2ka8m`*qf<75*~`}%La34GayE}@_SD2mLz4;RVH-eim@@@qVI zrxxi{iwxq432#rPKa8GDO0p6?L_Yi_iLBCl(;4S0!hw4K0uGCnX4GN{^6=DE@Nr_9 z$>^^VIz(Bkp+_6&vs83?iQJpb=!JxiUigBBzWIFQ$v!q(MW>g@y*Z4YP3RDKvWEVe zf!`{RI`B9;EV4X7mRL9hUo^hF*W3jy_pMzlzYOG5U3cjS| z4FGc;6OZ@>8)cr zH_@Hw5%+3z?;CV|HM$p7y7ol3o9Rv?I>fO9HM)BYI-5o}QKj3AMPZ#+$}S=Mhp&@EU}}-b_yELMis(AW9j+ zDeWjFI(Px4jO3IfyOGi>*o{)gb4rj>9KjBhGMQ7prW6bqq0iLzzOWbaq<{Jdd8`il zo{yUL8mFP#wnv(Od6wEirZKZw%-$mwY0QG>=!E}59=Xyhsq7*~A5G}kRm88Me`ugz zqM|1f`bXwWJwnW|OWlWSR3-k}Q?K_O8nYjeMz((i0)6j%rW-|cr+UPF z8r>3u?qZGZ6(g+&r=dn2!i1<~X6=aCX&!Nb#%z*7`0FpKdPW*)KX4kDEpITZXE`&g zLS}v!e2-_fTB9xo;maDcIHXaHrXl9^u4B5HM2D`hP@~&*wyudu8r?=CZ6v2bh)cqR z_@0^dBW7Ja;zo_x(*|J|joJN1TAar0w|(Xs31+r82xg}vUecIdWe{#a9$QZzq*0At zMSSV~o#_@6T{n;Tu~yYYXM^r(jm~PM-Op*L(XC-ZSnx=gTTCQo$sX~r#;mr7uAZwj zX73qkeKclCVa%M&ERmRX_lPWw+1&=AMPqg|(x^t85Z8JWKqnuWO2ipfW|EoFMgd0X zhu^JKt9U*WC8~Y9CnLR0NQg`p1BpV8w>9KwMpnO|gI&AOpUI=r8`TzI7MiCk*jPJ&M36KmZns;;80wm#d#oN`NkS8xO^AM7avTOICx&V9#f9f zai#jee=`CeE@uP;vcH`{vZ*tJTv=Zqu=hq=q^v(`_2kjLUVT9XnLPKU0xtM1p+;`|uHFcK#JAa_|${2Aamu z8JPUC=>*^8BpBHwEy*n&)>uvhOMXzmE$%^@$*^F4{S1O}8o?kUkm1Gy)*;G;V6V%jW*iL^&Lx=1JQr&L=4I=e6&^l`+ z{Q09^dn?lX%X>-&dI+Qu zGW~hAj#jLd;5B5r6=@MNtuZKuX%rV4GWFv$$n;V-(vvC^_TZ+)gSD&_b@f3_8^sF> z`icawYA+y4ea!|bF<5B|PuaoUz>KqvyO^E=36fX>Jq1eX1@|1z=g-qQ1vfkuUgbMY zUE;eN;Sr5TSlFA4Q;bI^yUHfu4C7HFk4JE@pGx8&-HBz>94bh`YKh)V9)o*qrG`Ue zzQcoIe6SZ|L%zv7P1tN95&juP%r^#`t##BE@xi?m=VUSOwMe4*_2y5Ald41S5YAgf zd6*=8XwGAEb*N&$!V7Xx%b{@fg5gcyvuMHlG?Wo)!tGQzJu2@O!_~r1f~BEwUJ3M$ z70hr%8>>i6Fh@hjbr>6VukgY_GVeVUe5dFC*(nWzb!S_aWdzv zr96y>o5S*^bKWA#>xKG<<;~>0$0?7#w-A;$oAd6XymJxXhvhxVd1ENAw?|9}%bUx2 zmr~w&9&v>^k6ZOgtm~;g_EaEo8?~l~vjsUiHi}Kf9g7I$$ zR7>?DO)d4-@KU3KdpLXq;dXRhReRY$Vu==<#*G?W4))-cOJ7_{Jn7t zCLTAduzg1c(3*KqA=n-1-uX;b1S(jY8X{x5aEZZjd?d$aA6()(zfM%rMf3s{so@|Z zFgHy=_9W^ub~JIM=a8CzIG*e2F*ttzo#gnk#_<_&ByVzEG&oC7QwbdIcwLB2*6n4a z;oZmJg+O!Dyy-?ml9|X0s5h)wTP@e^3!;*ByIPgxWR?URI~W`C9p4%Xt@adLz}jqppXxO3S- z={^qPUq=jOu(xdsSv3S0j5M0M+@eJt>9`-JA}wVkxZY%mq(_7(=AD5$+T$A9U;Jqs zeRd11D{%`_Wj0yE`kJxy!3tR2jhkOrXjt!rV?l@)GIUxiGn(vVJe8JZdbxmP-S^PL*!7*(9kPE;(_~DTn+Q!7uCy*C3!6{W8sToMEQ(4jZ?mn-@&0Eqz#@Yp&;a2 zm>#{2eS)j(OPnGI>Fg?7finalBggP+jYs_YjXw6Bu~}`_AB?n)oQ4r@wUKu8E5d`_ z-^68RYvUkvkt?ML-pW=FL;9^rbf4M9R7FZXb^cE4AFZ#lY}WjdQqR} z1a$urZ5Mn(OPCaHI;iI;bBec$t#|V#(i1o@<(q=Q`u$5{q<2xM@N`3t*MLsgw`p=r zfE=nX4Iode{V_b>I--&_%Tqa?s&R}rIQBVCj_~RC8XTikj-SC9u&$QHqcrJDcWpdA z8BxXg&;vL9cHBfW^-8Fy+H5f1<*djGa3;;<)tGvf>i%v=o*I0&SA*{g3`S)4r@P8% zT!P(qb(KxS8SH+cX7@`Fb?E((WqB=pXoD=BvHYv!sL}GTxT_J~v-hDg!PCK>dNruN z1cqvUn>K_w9b|4Zn>M32$WMD|od`H+hs3(~Lf^ZYjZ067 z3eIF)=q&?U4i@nh`76G0iEcIl_vuW^w+B5M!2_Li8|r|9N%!AmQHx_>Th~!<@dN9R zufC=MH*R+nPAG)&Z&g%>uEX%rMGWwoaBYi_3uc#YP#gBJX%QIYhh#Ww2si zVsq5>f!?}~7)l3}2Zro``fKP@8NH`wY^Xndzgssp9-Q}?M?ux51E+(jbg33F)vZSD z6qAr`>J(u6El;v^V?;_iZR;wdePz&5)5oe$ z%DBaYUvd`@er%z3frc{>%&Prj3i8-TpJk+t;WQY_3LdM|wAzC5U8Ja#=^EueJeOv2 z%D`F*^Ec@PDW^0^xzpTjRek@wmIbXpK{cqaa7_mF+yGV&RXsti`k&}(RCTAT>{pzk z>QKTioY4XSs?PO{hO~h%@FL7s)Lel?an-Jp-+-i+S0KwP{u$Xxsa0_++`fBhT)v9$ zQ^FQX*pukifr@P4$-dq$@B&GU3`_KcBV!);b9)peoqJmprP*d{!8{m#X?9=SHqRM+ zmVI%2d@$803+ID+S_(IE6TWy_(%uy`)Abqfc4#~us($DPT%th??bD?IidbPFEb5vZ1Ed$>(fL9 zoOdua)Kd=hwkfXjY|2>xKj2}&3xKx(9|2Yaz6SgZu$~WGKvzI2U;tnk;FpVSN^F)* znTYed0TqBFfS6R9(gBbNI1_L_;8s8(;3YgRT8!g!fF*z*0J{N7noa2m*pOpWa&Vjh zcpLBoz}L&l0`MguHr=LN0w@GL0(c8h z3-}W73*dJ^Odp$)0O$ee4;Tt~2CxQj7%;G}O?edX86cq__UQ#Y2lxeWQGc6q8{lI= zn+(VUxDs$Rpb+pfU_Brky2=5J1RNY@Q+zny2Y3wd8l?@wUcopfWJ|r?c!^CZ2P6%a zI(r7^9{@H18UW5ro00;!9B>0*2H<7DYQSDVrvWx49dI4sCcrd60Pr#3AfU~qHl;7X z2Y3bW6Cma?&;X_ao(6ml(Au5a`4qspfB}G9z)gTcKskWgbqkI`z!AWSm&3LI{Qw@o zQ~=fGAspud76EDin*jR&)b3pd0w3T3Oa(jycoVP+upJNrbcdd92b2Py0K5QL1XvCr z88+j%13>z03tf^vF9Ns$QvqdwrvPsO-f*LiIBo(Q29SItM}Hiz0E`4o0K5XI1l)QB z_6q`hcO_&7Tr>pw2lTzlri=xY0_FoY02%?O4MjfS7Qn-RHvvll>j4b_8|)w+a0Vb9 za0OrtU^1Wt@HpTN7070&ep!$&4{$Sp^mr$ZvjEQmNcNq8J%H>yn{x7Sn^FZi&&DwW zFbq%tAX)Ce+NL}S*bA`a8tv5)=gEL{z(BxFfI>hy;2FT%fGPmVvH{0$0Rj*;!luLl zZUcM(SPeMjG0GkVc~$`q0vyo$$$&n9s{mB@Cvc?p*a2t@xu~i z9EGQPk6i@ll;740Z1Hb*g>JWt^fiBl5ZhU?|2ytS#Vx}okeH)*#5kb31C7FqBPLE6 zPqo2@W--OPLI@&?Ll#6|g=xMH!~CI6uCfIjkSyIj9S#e*&RrVzK_zH^mS;5ISWSB2yn5*<=ido|2yGlvo6Jij@47!;Zcz@SQ zc#aSpj9@b(Y|;>>6GAj2Z2h+(g7POUP(E(bzeMsS-EQZ$5hgwT!=E-)kX)({pD zLVHGtHzTBL2oDm1ixKv}WU6hZhHxt(D2%YdjF6=v3?YOLjPQ;bAzMQ@n-DrO!fZ1_ zj)u^d5IQl!ZDxd#8bT1}Mr5v2$J{U>HM4=~EJ#8=kTYU1lH;oN&E*T$u8pKItLHB`C>Z%`(>N*+QN^v(fp zp14{TMq4)hMXAqnYJZt}u*Tu*gX2`d z9Kid4%>YVo4h|tN0eO7@qX2gU{z+b%&o{qsI`?CGh3bu^(&yjzw0wywMNF>s~&w z{DN*r`BrJgFspn|yF5EfeX-QcFNgV!B7SnfXr$4C`nR!vrh@%5WBFS#^|sWiLD;YA zF}%J!3Hl@ND^-kEm9gAPa)~ z95EWzLJI{NN;ed=$5sx7ZLW5E$I+YRu{art6Obi~EGpCdiX&c1=GXh0yw`^Tr@43C zn|+-e@4J}Zx9!c6^_7z8@7q?AEYaR7DM=!p*DxfJFSZY*&lNNyjW2dA?KLu{25bk;CQMX`^O(HUt}_o`w{6V2g;sQQd#n4OS&9OF#f}aDZL$b@f--bB9MY2zoyR?GB_)ft63;+d36#Hvi5)>!bZuj;IDV zcJ_#-jXlnmZ{c69s@LMLYV>}vLM2fSh6kamnuoqI#Z>Be)6Qz+YHj@QoP1pRLsXvcndGZZ;eAlY4#LgsnqP=r8 z33gJ?IxY!X_AXKavp(t;lRkcd6zHwI(I3R;4gOe+&aYF5BKP{s(sl&x6p^Uz^p_+q zGl?jj?myuGYhWSreceDGdcyeUgO2Tv;n~*azo{$B?eFT~3R}7GLj{IJYki zUFXU=IvTr%j6)}ee95oF)Ym~n;K_|xKrAw93=7nI3i!{S>>Gzo6Hl} zQo(q20S1;FIFu{;zstJBK37V=Mpwy;5VPMQUwgh2kSBUED+f06Oaoqy82TQl1GM#t zzMpNJsV364^5mwK?iRLKlgg{yfs(|Ji->C@c)J!4%R!s+hKgT$B?p=r==xi1_$na=81>7E2wv{wGL$7Pp*i$&jNQ${js0icfJ|ayAr&1Yz%?tKglpZ!x*b= zQ{r_Z9eZTO(;?ccFxJ_GrS=`(qGXJ(Pctr-yf3Ep*sAvj{Ax~kHr3L#7@zX87nKiL zmW~bMG(puP?bPFhP)@g4{|+gm9zv6_Rlak0o`L*$@hW;!cwbS>`=sDjzg68QMXrbF z`)%C=p6Z5X*gi2u+=}Zw1SF*>McbEwVj|c`8jSv8Do3rXy7O&?UK>c#!Y-sT(3HF8 zoej?9FLKqrF;!j3%%dotok?A+ny+oiEc1WV^6|kI5hXc{i|{wOq7qoN)A2$a@m&{e zaqOl&jyqtMsNQ}vv>c(2TNmmM6}u)Ri|hE>#{2aR$2o1&mnLoo<1p=9o7^8;Xe5#K ztJ+E7PCHe~Ms3t2&Nhw3vT_~N$^kpEIpu6c`KogKRnf3ZiKJ;r^S9It;(g!-8n~C{ z`tfP{QW$x~Kr$TqwqOt3DjPBti=6ar!TPS1^l`!ZlPl@#g4h{|J}-!!f#~~!*cphn z(nZ~}z<{~}d{-{}u3T{*lJdl8Dk*$#umtK+VZ%w1)PuH1-`MF8FBGaBL=? zggRhgy;A8Cfw-$y%_x2v|h6P4na=O?plBO2lvGC;MibdL{%u-@ym z{r(n%EIdfb6OSMb+xNhbJhGpNap06E?&5UBo7lTlRj+9Wm|XD(3{Wdi>h>6$WVSI0 z*z&^~+yk)RDeCXwAUHl({PZRnK#;UT!?Eno9#P3%AYR?M?q=S(4)G7krS1;R1Cgub z3o>*bPZ2Xkw0uiDS)+q^L8j7~_N&IebqSF(!IO9<*xVj=xL;0otI8wwG(a7;k5dxF zO4>lL`L?lRA_vF_dwS~)eLNE{;+(9B@9N6^RqusNWKRRT(gvJ!Nxk95bR7vDW=s*T zYZ7R}Es6c@1?O@<(@H1~VJ)@`*7h@9yqAVfdiEh~KhtwjI;rze7gFbNkVUAo zy-B=g_cJ{eG|i~0BkSah!F0bXwJpM3r!bvad9Ic@n51`BoH)D0$H$7R}b zd?Ej7d|%+XSEYeB;_CtrQHiR@lGfUi$Ko)d=`61q zkCi8Spg7t!|8yL+Xw;J{{JdKNKVW1;qmljd$5)L0xdG?0eg@1zS;iy5bTKz z%!4;3^29r+e}r}Yi~5<`Ud{H;dq5LzTI}xJNY?CNxN8A+CQ~i$R4_!C*xb0ejBH0O z>$t|v^(KPj>x@nO+w(6Q`g;WDQh!^Z$#DH`4mPX5VUU3IcOsso!T0e%7NNghCJ~#} zUj}HJ(cc+J*7TR9s%{&ZCF{qng*IR6Nf4Jie+$Ebw%73UCW7NrS;Y8x>%UP)QOIh; z9dg8EqdDa!r1;{6Z49flR1o75m>uX zfV5v$aza6@cMFYz(cbTIK)eSt8VYmix4-b~X5*iippxlWjA-yCAk@ct7YML^50OZF z@y+aZeT=Pj@xC`$U`${qgZ(hbki&zfNA%#QNyvnC4K*%`%#zA~n_;D2WES=Hpd#*F-Uh%q^_z{Ud9i)40TR(u=I9*0T?g35Pk};p!CmIKDbr`vTQt8+c3C()xCr zvrrU7d6bFX0#QY_RTe#5lji0o(x~@~@{U8AzRY-pDorL-8@_m1RjsZ3V^x{Bz29V8#YjR&ppz z{D-2@ps7rB6^NSJS8Nk${)T;h`z+P*cd#dETCp!F&NDD6i1RX2k$#%m*EFDnE9Y<6 z*OkopCRH4K4Y%dylmfMt0(D}#b6B8A``QKL_>Tqp@EOw2Ca{+V-->;uz{ll^J5d@2 zT{+-J_^<0o62pIuVcye7^lj_*V>pa8FexL0<#P zf2_Z|nDHDiK2C?xM+$T)(_PB~*=6avLp|ZY5h(Z+)ig>M$lkJr(VMfmVjheN8hVH6 zK7&{l*)b6Ul>+5I*3d|1JOzxUhGJR~h+V>oC{4F8eJJLW2z%QDBQV+9-Y2Q*4$`cY zR`X{o2=71jXHPQMMW_YK-=e+A{yGXpL7O)+(LEq)YHt@ck>+nWkhaV?S(E124rDbf z0RnA!f^@SR)D_v0_Vy}J!qxLP1iF(MKMKaOd0VkJsiAZ!&;X_z%>qT*Tj&2qAn`cW z)TRs6ioHpX_8JVPHG9*dvl+mn`75F`JV;f{#XLi`2sgHoKnvHVTgD&!*$f%_d(=JI z>3%|!J2_`EXVOn|24~z^;2V|ro)v6?#HY1Q?jM@GUn0Yhcl~3emU`kHF7G^`{a5l9 zGUtcE`55v_b*D$lYl?qK6l452jTxsm75M-fH(cbuF@Ds}Aw_Hh@8-nS?+B{_=PVS3 z?(--Uy#=DC@yX3ir1=~1Nnd6>LY1bK_(VGEBd`jHV`nPTPgD1@1t{Ul`5V3Y%|}T& zE5Kg%=2qepx=o%d@===6c_RGtRV0ehon2mo5EZ#)kJQwJ!OaZ1?`UZ+&`P@ z*$zZa6}Sp0;nMsK1wPG;--ArYRG<{-8Y$2XOg9~LO~=~{{~LkYFyn5zK&_0ovWM2f zsdDP~9A0g-3qy`C}|TyTxJX5W9`JaqD6)PLY!tXyo-Ke9s!JScQGb4x$nwDsass4v-Zfq+%0^iS)HEYB@mI%{2D zf09IMX>v*bAJ7+>m0P?B*2mD-g8|alJZR0Nucu(Y*c*WC>p`4`+1FJhZn(bsnriS5 z+t*K2JS#`n*XMfDKW<;QgY_}=HD(s+YZ|m>($_?oo2sud(((-ZI+er?*H>Fp4gN8G zN&8wr#fR(bSv~0=)R(lc0bt!kU-;OsJur(uc6TRj!#5XqJ7#4iJ7x~ZE~yIKM<0+< zKjs(t)Cb>8VT2|(Q}~Wavsv?u2>C8~Ny&(diYJ4ao_;mt2B^ zyf5QTFt7Lg6BV%+ZSVS^3Qu_BMWB*zkmJdu`x8+40oGShx z3s5m1keFTNb--3W;Pk+y_VNK|h)p_Hew2IWfF5|kC@Dv*^=q`atA{2m`g9IBSPI${ zefk)k_!1;1On!T;%yOJ=4ct#+AI!r0`?UTtWHpBaGd@Hj=HCpCrtk?X4q4rKm(*5pN1h(zQ6G{ zEqD6aCx)|#$p5`pWPZD%=Gwoc#rAJ!ad%g&tD@)Tbyt#}#Y@i7RXCf=XLXczAlfVe zJyPyCG|2u?C^Q2vhGBO`%UFCQC@s|4vaXXtZzJ_Z%hE;=mrkU6&`Nos^WY%6Qkm1k zqAVG=z@n@gXuOW08DJGxBgKrD=3!8=KPdi&xp#j8jC)=&8zJh4^jN{fYB44;c!X|)Do z@dTQEUCrnI-9DrLOh)eX)T-RTv)pm;*v5Owv!NkRy+gfyJiCu`#pjz66j#X_JYJZY zv4t$5KDDYoRz*PmaC%uk*PCXn$SJjd@lDR8+R0&4gUpO5AI8gd>E ztgqe|6{z;_a#jBlSB_O7KfW?3VgrY<*G%~cRBTr)I$}I_k*Ts+ke^t5n9f|KM<53_ zp7CKH8kehd53W-8tc45$+zI+cvlY-8?KMF%&HCif>u+iw%RHZt+wxaY%E&V zd*+FOb#W_5yn>C`&k#ctP!dqq#MC_`ZD0+`oCsd_xGxG^NFR^u@xANMs~Deb(YX5z z?(xlYk0Sd!rivc!PgJ^ZXNte}&|qRPk|g zF}2yAl_h*)39MoH16%0hbp>%KXFAH6j#_9XEE=clC$aew5(_rMKVzluOezI$IpDRf zT>9={lK0wcC}J9PRV+Pt)ic!DiO!fO54rN-kq32iaJ7w=4V7(#W?LC$(OPc4no4c# zTu}vcY_XAx!%%Bs+2N>x2a=1R2QmwoM6QZ$@`)sU1SH~`AcfM!GkBORb+vzalD{#^ zHAnBa8VhLe0&i{EN!Ul(TX!EF@b7GI`UmpV`!Y<~yKd%5bT2$z48?eWJj_q&1BQ~{a zH0~qRsIQ*{_6Lqs?~V5F>QY^=;4x?a_AcmeN?tjZYheN@Vt+A-V$XO!7TYXL_MPk( z=lL74(ZW=ejF%ac{>?8l+HvrQx)ds*w55|r(_0{;nGO%GcPUH^*x#qbCFF(ujWI5~ zHI^s(st8BAxXNfZf>7FXlCrDJ2p$0TIWM7D-eim3Ek}7nWO->gz+d)86??)Hrg(B` zjW{t+#3Kff7ioDSR=*gTC)R*d@OkiTmTriS3c5@53(!3Ry84OD678a_)y-!8Nm(Z~ zoAnN|bP?%OWby(>Y;r$m}0)b zGQ0%!OJGg)9$07>Pw~&8ODvY^;HiN>0zXvmi}r7|R@XbpGJolUJQsAcJ<1p7KVtR8 zj3q7bxEWu%!8h37*rgy5VdKdtFIj^ex40vQ6dGUOkqonxBy@{q_i{K*)_POGzLXM| z*x4{+>j$;OXemtBU_^H67IzwqVBu``W(#+VkrE-_lIIqG-h=fI`{?RATfPN??ZG8N zJ#N}DWAP3K_wU9ocTluNqK;u@Iz+tW!H6bzApL=XKUu@y!HgZp!f&`v<^Ns__`@NP zGf+#9sWnswqXUBMK#+%!FYDEHL92Q2ao>3V&t2eLlEfNtqW8J%#Yg(UjtkE$K5`L` zQ}ZhP*WnGIjB@DMRd$2y#{FMow~>NFg@0uuBA&)kuCmKzW}VEerGs>@qa$eF0L zPboe!kFFiXN9YqY1+IPv3gX}oc-I4b@sv)HZ;_|yAJ^0bLY7ytzYpLVz+-^Zpj6;k zZG>sH|F8YY@&h4~EfX*uu>z!=H0}_)lu`k3T;A$>5 zv(ZR!mq2uikER*=gCnH&LkH6IK^>F%?O?Rl^?^P|_-1F#wDVe^C4-45k6N8+d@azJ z@tf)6Oq<>UEtSWw2P=Uec-|KuI8coZCSaoo&&2giW-s02Dt{G9@;8p84M@7+?LyTy z6SSak&vkyDz4~KS~V@^(x|Gznuu#$GjGFN z%1y*9xdy2ir18A|9fplTZu2`&#k*qrF;QeoW)iAXRAB(`dNU`))FALTdF!3P9V=(LZVL+^1r(o=+VjK?s?J@b5>g~2%e19AG zhSNoZV#B3MDA>_G2Ul}H2Os+77g=7yu0$j zR8(gt)ef&n;`uYWRD;L}bLHn*obsi!K)nZZygkv?^&O}I(8ELU8ryq|1h9^NoAqe- zH%7b4il7<1qFlEC`v=w}75KDfp$=$j7I+o6xSI=ixWxuOv1eS(F$&^q^*WbYwj;J% z7PF^dHQz1z;DCiTZpb}2&&RiH#12lBfiTwrKNQ-JgA7vvkw;C#SQv0%rKuYG1==lm zm$XgsYKx^Y8IDuWuPD2UUVd+ES2zeZ`tc&R(f4quC|kloZ@a=?$k-$^KF5I>BQQKs z9W?xnu@n=!MI6`1Q5-_AwG?)9m3)oHDqfXK{6qL&!W@pB%PjA589TtLc-1FV25l5r zC^OS!W&#eVMJKD@Gz_@K>$g+;IADvevMpo@GG;~l$&6qP62s`oF5KdF6Wv@%Co@K> zbO9tHxJpBJ=nX~pL>-gmoEJ;q`wxafxIlUqI{44Hs`qGekvnjU4m$tBEXX7i|D4Ri z)}P-Z;~v;0(O{g2E0o_KUr7K(B3X7~z{#_}N*Joee;9hM9qtzYmW&}|0=tIG_}W=C zJvqhV>(K9qf_CI3pic|t<4O(EJ6U|~z|ZIC_mgjXzkR-+2m#G65u`m6>g+8GF*gTL zjA{H2gj!1f;T@Z@5YPsVc@K`412O^A02INK+}LdiK{7lOruaCS9f?mo2%qSf#;#If zz7GEySaG{MB%+8V`*>KK#iVep)N|G@gjSmg+t9l1+Uy92fK^ z*Qh*)R*(j<2JwaEC|Z`5&P6u!vU5+&a8ozwl95MK(B9`PM|=1ivkNapZ)A}*9gT**abP^Wa2T>{a- zaJFPP2?w}Ae2?c=V&DvZ1SGZOH0&NPk!OCurF(JUx$WaX(x;*AWzoT6BvNoUT@Cf{ zeGca7sLzMf(^o!F*0cN3k*tQ>RS z!S@`bFo6~5IUdmlSJc%}WlXCwExxX=(_W^&lW}FS7jyz2U;BRh3)<=(%9z5c>s&Rn z$GnGqrvYr~`~!8t=#3xKsgY>Hl==E^#b8TQo)LuROW=Qx@^6B*oB_xIbO8K;w0<}) zz_AWTi^bt^SgrVR*lhUW>!>KYw%fJq=;*LB2VXm4VhD=~WD3+1ZHdNziaOBC=_1BL zwEx%tVzqjc4z#*cy{Z1P-m$TxK&@`&!|RJ{auIy}5m;8eFE;QSf-kfy*18cgG+K&x(qbdxFDz^A>x31} zB=#n2Emb^DD5V>wwWldQqS6XA{885gXwH-?rb8ACmK>HP;ha}Hb46!P*1{Rn1j4`D zA;aSYinNmgt0)H9hrmGz{1jM4(V3vo!(SIwy{|2jll;3bsjiEOTZLtL#LTqBmLP6s z?)Ho#`0&D$eK+_;?~KE!6-O5-AF>09$06f6XtJ=YukShc0n=4d3>?3R_MPV!QNBWL zL~_)e^CIwkV=2q!7j|Esk>!VwQNDftU3LCYl5ZE9i`T2LxWRL0#D5c60Tj8?>Xsbw zC#Z?=5V`7le*FN+>SZ$v(xF?VhqI80-3%7s%TC5E+RH><7*BAE1zvWfJSahA48)Ea z^wV)mu=h9MX22qV8(_f-m$A$heq~zxvf1rXcDwwQ$u=Yq{QvgHGE+tLp`A~tzjenv zH;SCE%@^Gchd<>4y;F?6+0Z9qW%|?C9}t)+p1@z|leA3F6F7A>UTk49d&V_nZRh!} zJf{|;Y{mv~c9qd4#TWv=|Co)k5(jh@w>U6?OA6A;?KxvW=Wp!hD*K5@78k8s%E$|G zKs@Ctdl<>;{UpS{T$rPNhIvo|#~ZOlM*YnZiO=swV%X>h%)xt*7&iJL(N2kBqaPAK zCGxP*4~cz&sgHg#G%&BNxWz=So1Cb&ll9asR3FcN`pBl-2yG+-h5}rGKakc3$3-|U z#F2&)2M;9<8cG~|%|nTUh7x(r!vW{WfkFQAP$Cf=|0@Fq$=CERcD7b$s=romsx#G{ z>x?lXmWGj78b)Gi7>T7}B$kE|nPvp5uurmQJS?EC4n8+N!h&Uf7cKvu=JPMn@~>+? zKUT}nXfA&Vt)%;F9cS0(TjZqkgh({*OY}qXEJQg7!tdSlSlaFZ8Gf^LcjkghZyHIoS!3DAEUS5dd zB+jbLba)YL`el%<%ES{Z5lKa~zjo8q+GbN5n@vrD2WiS-WV5L=wNz*s(|o09T7t!g zy>>g`fhlYCHb>k#oY=IJsQQy!i-$**yETIa_uh+*krJv3iFc z0Nc}2!iO}G8Z)81LycS9dp(D`v?QQvSG&)tH7FEs&-PuF?K>}UQZ@cMJ0)HcmPq(mm7|O^n#r(HM!T?#)YTe8t$3_|B+B@s=Te%QdR8IC zz|n%;2Nl=i7<|ns##J^9&+@3x>tGqkvK;Pd@lcy<@j$D8Wfb4*JDgMyk2RJ+ZMPB0 z=^KB+!!SufC#XK8T5Q}FsJ&w2HdEHGNFM0&R84SoLi`dqN)LmiuEw(kv<{FstBY$f zsyt(3-BVGb({KLj*g-`{ilz$W^*;M()FC{G5Ko1>7Pt4Wv{lzR;%fWRJ2513$>U%h z^=t4U9HfwLs~^%u`)fnkM32R(3#`fBSl2BpIUV8$)=;sKWustek|7NmH+41GB~x_u zpGlJ-rJoL;0aRS?LqhO0N^2Vm%P7U|vpaVqN+SbRG3u zkk*`0pMiRHFxTShq(oH5irRJhMN{w2vyXx*>X1qX2?Iw-H6VpPT9*-~m@eC;6g&%X zN1$|tFK#Jmn+&Pm&e~AT>HbQHtwofHHMAZ+5DQtqZHlo(q4fieNJ&J(RwSV4tiTqG zvOC7|t9RF;<1x@<9piHIL{AjqU(O#!Lm#Y}eRDOwV*+q4!#8vQi)!!> z&dtkqdor>io~z7-RVWyB(Xc4GansubJ#n5UMq_dtj3T7wa^LzMxj)ZpA@>Dr0~B0^ zT(Vq*_dT)Flmb;Gxjni0(OB>lr}m@Z!I{HFDR`KI?hN$K5#7NT^`WiFUq&~Q_|e#( zwyz0)Ay#aI-qzTEVn*Nk6QY+FS`_6GGwubLLIo$2_ijn@8_np8!8$mgHS{zy`fCW8 zf*o5!k2a&vBlPXGoYRspoBNp5vSb_3-)jy1H8c8M#Jv>gXpv~NLyDM*Y!uZWEtZZJ zOGAtGMhoQ7%5ZYpQKd`rcqP%9mU9jdL$ZJDJOf#;ci)LV5J<8!Ox5gEd# z^76#4?K;Z;q3*lmqpG^DNhb72MvBrw2u11Adle87M?w-n6hWi}5QHGTnS>S)6r?H& z(gc*E!q9t_-a$Z6fja~tASLu~?R{!6Gw<_$-}BGM?{|jD+3T#c_TH!6J2#Wk2C_s` z(#Wjj+{6w`V9Q&3n_e;&a?wr>0En^`!wy2g` z%Saum%wY+U$fcOkZMJDm^#f}92U(Jus%cH@Y)wI0)542dQ>f}RcB0he>w?a8r~9e~ z3dAte>9$ikcMU*bcRU6hBGBhC4dslS%y`Znja=3FReRvP6|nOl(|Kdl`3?}+ooCgZ zkG!fof5CMArNGm`@%6MjKT;)-=kYRx7`1y&YR{WK)A?uM5S~{uozGxAd!F0sJQ)kJ z4tr$4&NG_Mo14ym1%d7P^SbkU=e6hgP3QO(b{%$nmSj3VRXLF7vj0!X2;(vyirMrjBtMW9V(~LL(b2^Lx-UkpAM~_T=z-^S0A?Sj4mkh?wCZj^#{@%lR;Uhk+Tus4x?Gy7 zs{=rxjc}eK9=_FaepQF1(y`)7VAU5Z+CFYGQ`I@9Rd>Osm?G9c0UZ+r)uGX@yUu+4 zBt;L3N-Nay00_~|NOUlXrjMz5@N=bw`LX5llym&OB+@a2K0*q`-R+u2$NNUd?+=8I zD5n(ZNC#jr_nTWYrN{55$xiY2)R6<e8#W@1rjR{qzY}r73Y0!L`KJ1aIws zIE+a}-j>i+F#P=tp0=sb`{+!P$v?VuD)bXVPx6m`Qo(k!H=#TFM;}Uso=fPfg!YrS zIRw{1=}Yj26vMP3$;gSx$Rl~+jU(r_{7h;}h zK4aX)E+;p~d`D;?@)=`h%G0O7PE!ky`MO|4BOTKc?l6--V zEln}*bQB5(q)f)HC!>(dP)@Na_e%((aO;qc!pDH2N~Age?um} z9#t@>rm4)$9_6yT&t^_TeO34U60DPuh+Nfx{bV+7mNotSaMz05<&%0z#=Qq*h?nGX zfM_?5mSs1CTz6waePOnjYsi9e4o|Bgca`WAjo-WQ&;ty$oJ2{D?Q!`*qGm74fm-}- zM#(6rtLc3LL%8=)yZ1=+fmIE^fBZHLtK4XdDARGel2aWv8dFiuQ+VoIJ=7Xg1xTCU z3@kI}hMY#*ybj376sjzp}8ROrSV4G<_{%B921|^rhoa*z}cP`btTw zuh*zAzkXTgp<1S|b+;|Q{=%unm!I}rwPM^V9f-1(Ipz5F>oj(?DJu}=9@-EU-Y*r%@pXnWd7pN?mrXgH zC{Nm}?*gLqJ0-dr%C8M&eo*S6@ETFd6R?=$5dl^Ha$Y;$%uw#RWjX!@AifH=%5(u$ zb~Kdte$|eD0!pp2E>Sk4T?!{vnvc=W(pT_P4=e`wbs`MV^1PkY!b(sH=d<=W?#cAb zGn7R^$uK_e;O}{ZLL80WI;xkYSjO|h@Xt-LEJZtYfwa!ID|t~I$*Ea z{5ZiUYW@!Aw0Bb(&)((I-gN-d@~$O*l-`wS{;YSt0#EHM6c42Hw2UfNOfAFJ>XSsjtES5@V*+4ok9@aXG8=dp7r;cY* zaiwz$ZWT$LcX#Q$_XA$Ua|@7`&XxF4>Ky(0zv4L*E3TPGqk#3B_v%LHFz%Q0{`l|I zIvZ-8K66rSVRY^XUg*q|T4xrc^Y^Cz(s>;Fm(h8?SOA@$%RFKpo!F`K9{G&US)dSk zf8OX^z+qA5IjPCN!Z`}pr6!ycf%TjA21e#+?v~S@;Tf6Dwag;sXdZ23 zo=8$Mi=>j7)5!ES{+G<(@7pq+qWH-iu5jIi4Mgl=QO+zJsN`(Fuw7@pJ-XGGI#@mD zgF>816R{o3S+tz-ob_^A=3pTCWZM$5n4&3qs~6?GDm!Y5?`P+Rm1w)Vd)4X)+f^;y z(GjH0x3-VHT@?!C2;rq8FB(Ut+Wc_jY!T(i$`jg=E8Fz2=nT9V7At{dN5;NpJK}pa z)y^~?D8ECXu8|baUGNZap$ol3L(=O~QiO9uo+q&4w=$BB`jn(XBuU?sc^*H>k$Sn& zzlOkBTz8p~dg-MAGN;RwV&b3Ms)s;3bZg6;3kosua~qjIFrJ4%X)SXAkVa+bWyFFtO`lUh-# zTOsA0U-#-Lxs9ZW)P=f?IaP>{(emEOTI`)NtiNy8KD>tp`rgSj2|*;+GlXT=l(I3s zOFHr3jFGoZ@RTv`&h|kK=RMkK(tw9&b3s@y+9@tY;^^9)3Hj)H^)mMfo#F$W@?fA5 z7wzoC`N#L;N7e^8IbpNX@=Op2FMY~td>RLS9fYF=?I6rIXPz^gbP(DILF8bx^QnX& z2&);wL8wBU>ITwqaHbAIPt?dDG^n2{2)?^GInp%-hwnAu{JV3{q^Oorx9*bVP6!Ch z{jde*$dT1F7>5Qrr*RSjcbtvnF}u@+IF&ogaR^WC&TQ03cly>#?M{g&mOGyS=ii-@ z#+{zrH7|?T{<7Su3G`RMs|}VxowW}wcQOO#KRfG8u+tfJ!>McK&hEUn zJ2B=!ba0*V9}V?tvEWSYPDj*8cdFJ-ot-3{D)mxO133TgT-&a* zQ^BZPe$jI0ejeMM?&b`V&NLW~2HKsb5(0P5t<_6GJK|JxXFfPnyE7Rz(w(lgQoEDi zcBeaV{@p2H-08?&^HOl}g5}ODsoV+OYWz11mZO1o$A@i?rWf4FX55)ToXVXOx%r1s zF?aC!)(%uiZ)Vm^?M;jOEJm!;Ujyggo42-UaZ}dlp_9Y*rX2{()G2FDULTkSfBbHF zlhvHJ>oJ7K+9p7>H<_OG=H83in|(D>dov=%^5!6LI3uQcr@Oo}(u8`aS9|js^{i&j zXCT1cGkI&g_h#+>m!?5ZG|+e0-jopJrVm5do15n?Z)$*(k7dt8x@cG_%DF9*imQj} z+>oyxyl4mr^oSF~lNU%Q-8!EFyl+Tn>%{N}Z2p}kyj+t{uh!1CCSl6iIUq33_BH3M zMW#UsA&B<$D90lqa5jM<>}-hb?3>_ZXTz&&XMNe8a<5{#Fxp|c#oEy4R zsJ(fR13%D5ONa3xw46V_b&o9Br-1M;d)!7X`{2)7b_?o4&4g(nFtRfn*|SZ90=DeS z@=O8QgBilIADppdH&n9oTCzhRn?8Pn+J8R__&i$BWT!>k`FXuijUMut9;TxQ@yHCG z1-zKu!f#)v@xGs^o)vY~czpBL3LD=F{Jzp1aW}%BoKJ1|hlYNfq0g-8A*ZE{zXQCl z2j)E{N19J0gab`Elh;FrzLcR~e455R!a*NA;PHs$=Y_-3&WW|!+lMQ)w}XJkrk*3p z*$E`ZOtkZLCH^=P&!_TVcEmT|_|Y>3EsUNjM$at6pZ6I(?`u7cjh^lXfAthpxMH&V zx(5hEzEmA=FhoTR(Hs!jfwD0zv zF7wgR;4hv~?u%``gvRd&-Z%ff0Mqaxc2B)3oL>$2rH26mxX*_7GHQ-3)1xCotHG@O zDit0lB;Tq)^gV63+tBwj^tX>IXF*@v#up~_-gn`wpGg=7w3&nz4E+m+{!7r?bK$q= zM0}#1E!Yla>@WYQyiFL4C07q&KmASvhv|0>WJ;ddnkT-%GyQ&``+eQ?`;o>c46*vv z8%&bqJoOGi^{E2Xs^#)j|v{+$40|7lj?2RqdO zaG92jnm%CxM(h8SUN2}&de2a|6YsOLX>Y`1>~2e$ZYM2O5sVKrUPU^sZTv$VH++3k z0?@x~!}noxmtIvc^vjRvphY_Ovsh6$i|tY3gA9Iz#=Cu^G4OO!H=wQmq`oCP$AJ5J z33b9rttiOEfGchEea;YG0NuJjI@DKkqNKz~=g|hZKMvzdOQ;c0La6iy5-J!8%MV*) z-nUY_oXtp>ARy>QNI05VL@Cl42ufeZK)cLBiDm80OhlJ0)(I|a=p&u}HvTG|oIby6 zdDt3e|3SVg<-^0bn4g1u;E?6vF5_WoBVmbvXb*2@QXY083B{0S^qTe%-XXE<;Ua9F zzAg5!SOBws#np`BufNyBBEPLT7a+bafle0_7U>jrxAE>MXNAT`8~l2MuL(SkYgv8k?*xc!cgez$ZGk;r z3hir%ZXK{@Z%rFt(BKHLtcFB)* zp(^w9y0B`WwJzMiJ|`vQG!iBYh+Y>?hlrR&I-3msulZW?2O2LmQV|#kc{q<8Wg&ymix%>egx~`D6hn9oi6$YqL}Ve~x}=c)#cd%xw&G!< zxfA6qrXU4JSb6OVoBtryFxuK1ZRht|+Fmr;Y8h?o0HMo8))u0){e~xqo_V0w30B&& zk+#k)Ep7W?^RI0-_D>p6h}*XBble))+6GGq;`S{=IBtbV8?Go>TUYeK39S2XZOyf| zK5){v_w4}3yTTI70e6{xOPYQc?y;s@a**(w@!RMaxBLN=s3CJ0X8+UeyT#h|-E+0; zO$0x=73qwS5V$^{A?$j%k-#0tNEMNe2Y8?3@qay2mlWw;?Jzv_y>J6neilP_D)?N2 zKf2`~eleZR-k0F7{bR*r3)%f%Y)r<+Du;r1j*dsw-PZEcTS5?zFB!t|$SELt`Du%8 zXr@Fu<;_{~Lyb=uX`j&jomX;65{__d^y~ZXrhm_`;CEe2TnC!8UH_J}u*{yjQg4Po zp^exKXJF@*tLjFLPxuGhj^2d*$^1x?u*|m&&nKEE{?1No#MZI#7wHi61lyG!v4zMF zzZtrTomyt-1?J~P?n}WhM(hCzL5BYMM!Vj}NMNDfC&Kke=RK0pCeX2Myu@hJw!WY4L-u)k8HQ_IGEK<)+DHB)k3{xl!ukWOO zYA!)`jE!OO*W?&y;$eU&RlvJz!~p6J#o%`rJZSq4ul}# z%oh@cxwD#4>`WDsfdc}bk3kpgOdH661lyU;u!++_bHnnnVM*S~EYIQjKgE*6zcaZ5 zIg^CVP_6=Q-HPiEc##_upIXq;)t9q=&Qx+S$UDRcEKC}3oK1r!}Yq8;bHbY@fje>t-c zHsQ=l!*X(_4oQ??$r|mjub{Mg*7_r}Pw)T=+_(db~j646*;2+G;&JC1!R}4Px*=pnOtcIN0f#giK;Z+R% zEJL5ymh*v)&tUM~4gTsTAwPuVRJ8GpBOl}Ww14pISYM0A z0pdGPV!2@xL%(#Ij#nuoC(?Mk7KE}}d){BVn2z}MYAD^v9bPa=FBk??8f^B>;x~Dm`krlx|ZwHEC zKgo~%{LGgW9*$re!7n1%(nwg%5V8Lm$?Vt=bR!~I#NbC8dxh&z^b6Nn5lsHan&XoVK4pp?0R1%{b9}RnchS+eW*|AA+we+;eukmX zt?9Akbg=P_zS8o$8N7o}B;^c^bSl{RV{}ek+8{u5f^GOE?1R2Gj{+S3j^eXky*+$l z)ciJC2dAx6!^81A8$Sy4a)Di9@UKWb568YXzA>r4UO#~PMmD?zsXrG;eJ&gBHEIeN zHD9fF`9 z{7Y_ZBro|~FBh-al8YJ%V;I8AMaE|&zo{ksgN<4yrM1DITrHBQ7Tf|F-;nOk&cb)^ z>RmP{pybJtA5l)3q$T$k{36P|jfCS2Vacte1hq%ZyrH62&*0}9e7MGo6)r~Om($%h zr%r%bP)A~Ug?nkbj!0uezhjj(th3qpo6|u5;@tpq{`c$FEZ7RT4EU9adKPrH>6h8~ zFNnTLAo>wD`~yQj#?WW9={+{SBGLB;z0PL^nEekym*hu2OEN#tnuWhuv*yB7?O}Q& zAzncAtl9gw%I9+6eLEn*?;bu|VmUt}4gG@8bO4JP`bg&^8~--Z-vPb$@GY4Ad-#&% zhlh=spFQ01v*qCv?0hnZMU8~H0-`;1t|<=>0PibyFMx;ZC6+z>#?Wt{pgnA8=p&u+ zHoh0pM}l5^*b`>|9@dro@URo}vxk4;?1D{D%*p&lLVY7)vw&z1GiVR5VKen51oH4q zk~Il704@Xg$9U~w2SXp}EVl6zi2fJb!$g?F#Ks8rDL23O4_e7f%);f#idU6cU!)Lhy^E zjFu1_rk65=7s^u4Nd8z$h&K2k2LBL;0y^xdwY|Q^zetidRktJ;3n=;C7g}red4{ZsvB?M91#t=?) zgJ&d+K{sNoeMAzf*b;gKlu*Y=_`pc`W0@6&d`3b&BVm((=qRMu5|SoCLT5-YH~*X~ zG7R+HN%$-!%KZLsgfrMkxG+XXp*``dZ&NE7;Y1o0YZ=9XsIE~YB-$B{jwlcjPBDWY zXYiQ~9)lkDKQ;bCsFv$cTZ2E1GY;;Q;^tE^Ao+L6bMkkny?`J20qqDUDKp=V!h^Bl z@}{T13($-BmK%NI-#$@y=o_u)LsxW2Q<6{oD{TA}gI{Ct)g(Rx-LV;IpV4`s)2XB-o;JDI6twX-!a+Pdhz;x8}N8Q^ul zwA2>qWHAsX%4?L!z$a2ggp(hteH1|SoA!N?W)jQt%NY8WhMqnN)x$Ecjek5|C%A~g&yjc@ zmRIpjuzJ|Pi5ri;4=Y+b%dar|&*RsUA9>swuY*!w@Qa;gkdg2WLpUM@jbwJLlT;Du zG&A^K27i6Aa8c#4lE%-(%}L+NSLIa$eV-s^lt>8;MtuH39^iu-vGObdy|1kbcpC6C ze;By$dv%eW25^N93?@cBm8X)4K8Q?fD!*)E_F0^c*>mKeHIQ5;3|$$;VY~9ZLWzM? z1|12%jwb3%S2Ot4HXbYLyE^)ng#XyC<)dK5_hCH-9V{f;~ElUz_|hCI8V zYPY`u%CFmfWQQ0k(>~R1zn}%uXo+x|8(AMSirr3Ll-lho+Kv1MU*F(2+jzL0LF2ED z)owqp@d=rqnib4s@z8|;?tCYS;Lh%m+MOo4W8uzF)7@M~u{$qml*nQibR@F)9Nh7h zh8kWD(in8LNRuAS=Rq&Of;tVX+rs+h1Q_yHemA7PCe*u5={{-Vk&GpZKO|G%4f4$x z5lqBj`v@I_h6_`F{4`Lauq4iA6vv=|Mu`}FfQ~2z`1>IS|B=D}y+E-7kCax__&f$* z$KW>rFGg}^Ao+vG3qj9*Fy-gLk(1{U5B;18?dNi)oF7hJvi^5%J@{ps@6d+yV4A>b@FJ`?=&FX7G>a3;n)f`;ZVs-YVjAlr0!gzBOh1e9Gd&==RO>XHAcR z;~VPFS^~R_uRW}E8bszxV>G7viq=)<>;RoI4r~~v15^tRn9q2loKMk41Za`LPcZmU zau}(_-*X2N35#~hrNiH)N*|LY%6UyySp61beYB0$E&fw=6d{N<2 z7T>(x_NeGc()^8zOE?LUQjChW%*3POG>$$(sTdWTBnm5SCDY$e8YM=>{nNsqC}*O< zhZuYu@D%cB=OB>0axQozLLTj`S5=cym1l3Y2VvYKn?g!<1Ssu>P`Yp}wQVp6^ za_BrH6Vi8(4)CpS#n~?heK1|q#=l3)*s79#mcaLMk?ETbYfK1+WjLJ$AD=>yv6!+~ zhaNscVsqQVW6(#ma}p@Zc$D)Pdy6j;6ucj&rGJUUvi>^I@2iRRI>s%(xMe)d=nU3s z&im*u)q@c+8Iw-o_D-gxW3~po7s0DX*NCS|pmW-eapx-9scq|bgAdK|AW3o23-mtV zs!Y*N4pns(RdL&!qCGr&d{Sog7}uuu9j9gA%%A1i;~8JWDE6Dsn&QH~(s^IzYS#dr ztH$4mT!AannP}sSd}^C`>aRWQ1QB$x#S!_z z#@D0lmn@!oG}4^ocztR^bAj6TxLAO36=B0a!MR3;;~k^q%-1T`$W2ij-`L984Nx_<8jO8fQ`R7Ob6mb zKOKljjnCp+zx`?2N42T6jARR!>28MuRS*2(8qDe_{&ET|Or7kWMw^~`n4Xem!4vU4 z(9upM8-LH>YZ?4HjmN?fV&h8~e5k=EN_9F+RdPG=6IRNs$@FFEm7JnCzpr3F8E#2O|sM*Z)7wfgt88mM3B2fs|?6B=M-sK=qz!YBV1i~T=z=1zJE zF->b$;sT=qIx!1mry)9~uj^jocjFURJu5NGvPoeWF9x5-;HLwRS%&5Jufx1CL_6zl zi%rEh`nDL%&n(qY;sw?yIWt%*+0;kJrw%dmX?~ny?jV@aS$|d4LUtDAbW&9{QH4$N zeJt1i!~NO{XrgzcR#4$9v?ah=5p2W1G@`Q@(PO7uYsHa67X60?f97NDWE+jgS~1tg zS0*_H3j4_>LJ$kIiD3pqKiJSem}X@Yajz$P!1p!aa3t5{Hw=ESjYsy~8o$8c^J#p- zR}ajX^n1Sf^`I4o>jQPaM|$n^Fw_@pnDyO3YW#zz}`Esam8Z!J0g7L~vE+rx0U;ko^h z4ntq`YcDEm&_-VESoHrnDoAySdILSl@d}B zZ%%7*;E};Ar|IHv~}i1bE8{N&W4_iFt6!223wRQXNBY>DMWJddM= zuk5V=@Lo3jk60a&{ylUE@8UyVy|&b~@f|@g^W4PXcWON5Y?zJDPPbOaEdI8{X8YDPziz6z zk{tTU-?;1nW-~6cfm-IfZx=t)J|oF#ekVY_D@!a7 zk44z#rMI;W{rd4%zSGCYi;z{eFP3J=tdYHOFveFkA_m3yaGE8~?Gv zpXj80i_~~%oo(am8~g%;FQ)NXeDvTrPwBKS!du#0#lYqBT@L^+YQyj1K;UbV8~}cM zs}+fp-2qPw#J!7DGfqVTQeL!*M zd;NcpKFZMfMb4=59rY+(Hb%`5 zjOvul7XDX*PiOGMH6EjSw~e1g>KnQPEFLp#_*rbjQqG(XT25h24>{dz{8EGe*x-}# zaio~wZeKZUR7kNHNoCO1|46za$uN?_nT$u$cY;ieqpX@7{1>Q zcweJ1>j9y~u=$s878|e}%yZt?gSoIRVU2`fFb`u059Y_Cl*>XwLoHz}@V+hCED0q7 zN+7(HP|!&DTJVb~bT$$mw%1u2^o)ef8&wplkc2vr;CD<;kyzfKx)8i=poRHE8(z(* z>1otlOV9!F(TTus*LiHNc3sf*@7US(ka@5nP`(H+gLU&gov+@aZxO8O5`ugkUm@-20 zDkCAEkuXz0^m=(=y^1Qk?Lq9T*OC-QUVJtAyq^=`+@h}Jq{1` z&73SsLt4+`^_2XO5NsrjB!25zye3A)*>`jx-q9!;LD9~fwIX&A&Wog?^J!Z}JdD`y z(thTH@N&`(VSWz8!;$K^PO*q`ZepjG^C-%wIIdAbMLww_%GqM@#SMPGjmJHyq%}e{ zz7HDPpX9&WR(YA=J!4&b=%D_BR-D+hf|^@)jG+F&MC?v!bZiIpmg#P^?k+xqMhSPC zOL-B_7r^`IFC?1OmJ6g}tK^4@5=O-Wd@^dQ_=;EyKM61;_Ij-;(bI@N(niH7{%s;v zkA=^)@wJJ5Fs6jx3EJI;*D>^c4gIa*7JW?{pN;70>mK~lhuZMBI_M1NH1v~g`m?Jo zZwmqMo4+*x{SP+0u%Z8@wGK`tn|`Q`PsVQS8?!9{eG41@lA(_>^v8!;!6|6tI}m+| zjRELytg@Va6>vHH??r3%pV;)9p2hp31JO^i;Vx1$TEWwJTC*e2v_jCP=^OJ9vi+RZ zs}pdD`l|2Wa6*u>>lxXbhN`uLrcsoWUP_5}mKeO-;3wL6T#Fz7MMMUl2fnBIFGQ)Z zBy>D!tqOE5Ps5?l!J!=reI)ac2C%CIjBv6*pzpH>{^8YA!3RU1?`H~^zsrNn(ih$; zh8TCA*Y3nm8e$Ei(?46IqB+s0+aT%Fx=vke!~e$i?5lqv0DQ0w-$?LpE?PTgJD7Q4 z9m2nhQwf1BkEO*E$$wn>I`EWitZBFrr!EbT3}*FW$NW`7a5^i`5I&tP6A&5<9ELkT z2_GY!StMcnp#bsQBeC3YDRxO;n==96Gi~^DM$Oz-YV^dvWYk1DF*g3O!S^xvn}bAp z@X%kRQ^UsBCiTAr2RL|Twc)c3eGWtag``JtBAwq>TG?+w-s1Az|KM6`!|Rfo;!Xhg zP#d27j-FZx5h^$dQJ0>XT4{ZjP}$}=1s>njCjq+6V8ds?DY;@~)7{2T7-&s_6DzDB z3?%w4f#?_7@ER1e@8$A5mS?>PUif?f_Q^;yh0Kc2a;`p7Jdc^e9vhgZUr&wCJ;#bfI%0(yGJ>3`V?tC@aT`37Tl)2Y} z-4*^0c27bnd_CAb110+n#1~}&O8Vqrw-@CKO5>aG6Q#hdVE30O75)i!|BUhorPS?U z_c)XcchE0N=-psXDE{&2fJ^f^m`KQo`-T2CHQHudp62`l-JU^-1AUM1i9R!P%;O*+|^JnqPSfy z_c@fsAuf0S^e%U!3@&$Hl+RHLXLPxjq14Iba<@YX%j|N`MtM7n%Y6Z5a#okSNT|!b z0_6fqujgP#DU%I&l&~kQ97V>K}kTljS^APe4eF&v}HJAG{l)q8(SBEZ?W+;yF?Q%CqIf(KErA!^<79|lSZ(WyrDoUPuF85a`h3dQ9El_5n>_HjQ zz~wI25V1r#fl|4V%k4#}^P0>3Tw{zM6dy{{CcvO%XzFr*h_W4}=<6{bC-J&O2#)`?kXt5P<}uOZQ*j)M2SHegR&asGD@zuT<(3= zzbG7@GU4;81N6XeNc`s5SEHOU;)l4Duzn#4M`8;{IiIK+H`ioa6ND|M;^u@s?}dH2 zd0WLj`t82BYnjG14-XBC-rPAXM@V@5o`IPwZe9!Y)1-`vt-?bRL&EQFsTZ4Jp^p1f+ct^On?U}Z{4bn{53_=sP;7?Q~ z-U?f~GedOZgDp;mu%&xLX6eP~3&OSr1j*JfUf!B-e!3 z(vv7-ADf%+b-Naq2bd+2j|G#MRygwUh!$Yig&}7?21b% z9MSXgE+w>oBVGr(*@?0b zKpmGeX;Pq0bws~=s#jA^7 zL+CYb_fzV#Gf>@7dZF|~8H_@l?C*eju6~8_ca&qp-Od~HGtfXv*c_V;woHj5{sPO+ zdX2Pbf&fZ5)qn_ppJ(R;8x7|%+C2o7pZG?}VNfJU;75z5BiamHTHJC1h zN;~uEy^G$LcV^(d$N3IaF(HwOr<}3;^JM&-n1T<_YCa5`Z~(#e)ORSU-vLdT{;ZQj*ri-!h>uqG}iqq7|hVXInHiJlPp zx@*taMh99;Gw1tBy(;p@|A1vfp*VIYZRU{aRRxT z__^@3nZWOAwAaIo!ni^hTU42mip|#s3k0H_yVS!b&idUP`NFA_wE(qgB&!oAD-T9} zM3j>q>5-Xgf|}?kCmq$q0^?*j#cBFx*T2$q4P#um0|oj2lT77>7K#YrwI9z$gcJXt zuvs$IU8xHhBd_v%heuplx0iqm_ZishZWc%OKRCS$wsdQK>QirZco zGbyK7G-5-=f)N{1adT37UgZ)thke;7JiD>eB9h7;L4X&Xr%Z-jJS?7e=m=*c^@Z7r z#HADpOQ?*W6OM!>l)?Yt%@Gz~RJ_X`7N3W(v|G^N zbc#TQ#~F{Yiq$0inbjmcj8UmplIX6bMa!nn0E`&5IusVG!(2Vj{-f1l!H)mtm}?C- ze7^&&?mY5B{SD)2Xa6ytIGwlCDA21nQkFBqsQ?-zjP!D#oHB!ucJ|D0Yb{ z&M{`llz3?56WUh#6^naNC^pZ8WB(+*c8+_86BcISv>Q^xC zLEYGE6B-27+!dBkmcVrpzjYE^LJY`$=|PzkCbzbPjDVq8Si%#6(n*1{*iJ}#W_@Xl z+JCPv5nZeC0^>z;v7D^HRE==f9p#9$+GcWEUgUI6$p3aJIS7*f-cmABve`?C@6kiP zUy;*^fHfj9CGnQ?I^F?O^F-MAWCUq=CPa1AWxV46)r1vqo(WJf)M5tZO}C~wg@pFa zO^xtQ4*9S~_r?AhEkl-op#XfjbZhD%{NBQN343iz1eTF{&YrCZUGwmIrFOp^<+MX= z<*NNT+Mwq~&LRNndGPxY6nCfD5jr{4!|CDz{zj9UN0~&-%h0C7i!ECIN6Ys+Aph^h z{5i=M;cp7bjp5(h_OttPBJ8L24z9hrnT+*+hm^I#GPnR<2+W+HyjTR zfHt=iNq_F@Usjsigv_$QZK^kd5C9I23S;PLq%pIFd+YTnH>p| z|6XP*N;WL^CI2t8EAk`GcgHpfasO{wbtZsD9<`Dq64&t2aUfH_RizY;<2iZ2OlTZ5 zV{D^?t;e=H`0QCt9l+J~3r*!r33vNtTT64)ON(%+t?zBCcw`M8iAtCFG;!xq2iG1U ziOEU9Sq~rf^}DVyOu^SHDPjG);L2xvCtROh;H$B}_|@UpYy6tbuebR%gYdV~* zw|PVOH50#v@~fL)bMk9$em%5FT(M1hAN_Pj>4DM*WdO=Blz5bJD3eg8qI`ui7iAI3 zk0?K*tVL12)Bl)pnaGEPJz??Tv}J6b_Ig}O!LZR;Xh955XxbS*#UKD_I5 z*Mr_&rFT8)-35C05xqM>?|RX@ee|w3z1vFfKBjkGde?{E{YdZn(z|)|E|%WSq<8)3 z-6VSV3B4Od@A}ibA@ptlUXuEG2hx8Zc?Z#dUA%+ozxTZaNNVF9O8>R=4x|6x@D8W{ z8hJ<1f3>|M>Ax!8PwBsM-Z=WNq&J@ad&!$X|K;^khe#&+jWP%&9_2HX=_qqimZGde*^IIafP+mqUkMd0ZLMGIITKf;^3mbimURRB1)D%5;=fDCvS;3V!J&*GvC@{|iZj9B4~jOjiwVXj(|_RSbk4MI91m2d6A3*}pNSgYrFw@xD&q)3~|CyH|^-LuP4R@ax^XD*w`(t#}3)jPPSA|ruImuO86H= zxx#Z$eaX3Y;rSuCHeK9>jhlhN25D7bd&q(^!l*~flM4)iHGc2JdS%UDq70@21vanDHi%7!}He5qPH1Wiw z$M5HHoHKcxE{vx8aq+kN1f}zi*`C$ytBQBg&Je5fLy$NZ@+bt^Klir;~?~ACreOVN~Iz;YJ=V;0BvT1NLx?^l&^|P6!KY zhIjqB@^ew?iGgy2xr#ET7 z{Ik*A0A7&hTd4CmwG?$&{L9jCnhpP;6HYwT?GmE!{J)|Pi+>1W%G44@-w|j^;UCiQ z1sj%HG$RyE2}AQUq2W^}M*nDNW=X>vZ1~Wksjg@)a3v4Jo`go`3NSj_(6o|<5H@5J z2D(>M69(_k6ry3hC`8+SG9jW5_$fpOQRi`dXsTun?TzI1FKl@#EaWwh1Q@W4gT72-Tj zH$!t5`Y3G+7@aK8G#t)J!vr=gR5WmQu%anqXqFKg>1)sE8HQ%6G&E*|M;MUTu!JG7 z(0#vH{4a*_>K+uUN6SsDT2pJ(W#ZOFRO^gk33bQ<_pbx(ajKaPi%G+AHrzl%&T(jj zoF7>HV;Id*yoWBu;$MS1A8vK77XL(MnlG4e3p6Z2E&d;~Wu36ti+^i2^iiE>jc^)^ zSa~>KViNG;UzU*r6opv)^KfNFt9eG%{70^&@yUz-jb-Hi%epx){%5%IFOa}>v-r2} zPT~C;Ml}@G^!c6}Y!eOGLp6OSvgNd}pi`bcgSc{vsPx3ZyRIUl9w$u8 z%HvSJIQC)jPe4rL=SN1T2sEYek~DnDhGiB_oT7Qf(5xmjrcW0}uP`*TrJ)5Ix>_`~ z6wRMp$-}TWp)q|5GrFyziI#?pYgc_ml2H8j-*mW`Q83MTeJZD6;Tl6Q4X=V}A{jSY903=3aa zgtJFT$M-*~&cz0{f{_me5`&I6u&=msm1>UhGfp+Hpqujs)`yWxb#vap-sQ?JqB1Te zx4nU#=t6Gh146l}W?%_!RbREj8lz_5b8Ly$o$w4ySwg;N7nPi*%R-vRnZT7ag?I)Y zU}SFHoM+$~t~|;*5Fxm@P&IF%o68NX2y5H6$KFMk zNUY<^9W0!-2i`~)aODM2iRQeKe9o1#G%}0#7@cXtKBQsX3P$Dma@5d}c%K_ARxDxh z)1;vZ8#)LBUy4;$lFxD_d)|%E7!r9H{ehwJNJBa{ABMc%ML3kJKGzLILV=gs9 zT}y8Cm0=qz4b|BYAq;%=R6@CNfGar~?+_Z%$jazQL(@PSZY^a0EE%IaOX@LwO|%nGR8+mT)D9NbLpA0*b_|hDPlL z2if4`p5Ys&$_C*Q{{D}m5qrUWM&1xewHHj{%EhWVLNiP?FQ%LGUeKA5-|FVP7rf1t z?}$osF~c$0+ks*d284=9cSMO|l8;+`sHno?TS`MP8}chPa3)!Z!1up%CC4P3&=@wy z=2J`x8k(%qu#OGKxo5-#q5WRbcnr-s=%ZmXfzc-gn)1C}8e-Y-nW8~#Ix3pXhGr_E zF>D$$dW4}FDh*}WP*oWC!r%i~=)&L~{4d7O?Dr{Ff6gUB;m_lAXEeu3EwzgneM_LpNww5YWW)E02InCf|4b9EXv9+cAtS$06k@6M zaAjLjsg~NBT-i?}d8sYVmE|>3F17R8ljpwgjOVW*7BoukpbqC>#e(y%G@NC_UGTBz zki1tY#rc=f?D-QA(K8=CJ4jcBxC^mm44 zk~Gv~LtBfctWtBFD_KoPLL)@k8Qt8_ye4+_h9R_qh_jiKVs% zBjXi?ICdA~%DSpK5|u+WU#pw*Qv2{*^1iZe&P(lOu1p3AoG{03UTWL7qnOMCLd7H< zqC~@HIk$QsT9GqX042jTHmnc^K42$^P4JJ_? zMRSQOIp2Keno@+uu=(v9icc;>lU*9NvSA;{ zM2vis9?%gs5!D-EhgvOqHz8)eV<)Y-wgXr_X*!)s@-gbCA{faS<4g zGemVR?zV4WWJX0H7Vkw|N%7$+fW2(0YF=A6=X3HfMtTKOos+w9WnWRrH(b)~fQMJw zkcXvVRO3!v?p5OkEk%P=ciRiHrK{%W%e~B8SyWVVXzvMC$Uj%o)Z)v%-@m3fysVq^ z<=$Sd{EKyn1D;nkZ?BtY@rIJ-imZ76S}4uOP(wo^o>>kE7E1g&Y3R;|1dG0nqNjYx z8!i(FjZD>M^k_p9D-GdnsAJJ&S2Sz5lE+VDLSwGn{fZo|XK2bu!#OtG1DQ(YuR?^! z>0@Y~wgwI6$}&d#1e%<>EDclH@U1Y=61yJ;Z+;4q8}IP>4ghM_te{4y>%ff$i$;92 zVj5ZCbrZIHZu%J}4Q1KTR2cYDJin5=jVsw}576Gy)cNFS>ipC!lOHu}{z9F{dBU>9 z-5xb-_Od0X>0Hg46>RubG~v#_5h23g|5TleSu>iEhXoRYjjt~IaOHTb`TMGQM%|oe zO;bjGs+;qXry^I@7L{h!tcW5H@6I$Hs#z1v4f2Zyscus`K;!(c`FYkH<;uU&h(`ie z%`HNe$LY#QJ!`&aSu+g=F>B7Wq7W4V zK!s>9H9}n}Zq(1Pb(DteY$&ZnVO372L@ne>o*R`2jS&4cogz`z(BzYbU)gYpdlt*z z8sU=1dEe0d4Sf`euNi$spvkvm(h$#v*%l3rf4&sRV`vr-8X<~e^dv(wK^khap@}e1 zh+<*z)}pz+I)XyL-ahM5uz=O z&T42<&?6ZZv0*EEQB!5AqN#0Y_7WN)8pP=B0!<-WAq^eaFxaAbOVQlrN>&p`XoRRD zqk9^f7-`7MhHzn^5WNk92+@?56r$5pO^7N}Bh+0&9q#`bw*1ntj}4c>sWybQ!X@1Q zGor48m?ANo(T+fqZ^xw}feqhSG<_9KUPH5x&poSvx4L9wTRFNQENueH8c~Yp)MQRSTto6%`vXz5Op9lLi9YN z-!U{zr6FZ9Ii202xhzEB{=cEgOK5~>FQY>YO^`IKV8c4}B17c7g%Gu)QB(@=qMhCV zs1WT#4TWe3H`*y0QHXw$h7Z^FDMe#3{5donS_jYcS&9FTLl+-R^!w&WX`y)in5COvZ z+KAduXowoh=+y#Ek@{I0I=jTc?QDIQkBv0LXtptX}H~fHDEN;93(V68_wuG0!>3>l{9o_ z!w48air=(|;-7mnFn7)5umt>~75bwn)P#wnFn6MknX0KQ06}=X>6uhL z&jO>^E>cC3DODYrYpLL(;LMPQ*V*uaFtEs~N~F-kG44)iWF#M>KQuHgr6HIN`7N4c z;UYf&;7XQNIQx!RYR_PS5)$>s-Wbb;!2&WJdCU;khnc0Qsv^> z0;X1`Dk)KlY^g+IR$ZX6)VT=>T*7uCkR(&8R-+E*N5Ms@x(+BAX0ySIu6WzTHP?6% zT8|_2aEvz-8ma2d=(Pe(Gz+C6nhhUYG_NU|Yh1}R0||{(z0ByohUR@~aI@hR7(}Yl z@y~e~$f>Hz{n4ztG)_jL`39Y;Jg|C`DH81(QzYgvCUvTQK%K|A2Vyv^QZ=0`SBgp= zu%kqVJWd%UUo3zhF>;JR>RHvAYx|g5nW}oI=kc^~y_r>g8*L)dlv<)L1$Ee;OyA|C;deINXIhcW{X)6NiDfiLB8UYXnMWQ%N9Ek~Q zbR^osDkAZ91B%2Sqhus-EUH5-QFk469;c+~yO=Z_X9L9p^O}yMp~U7?MO@!vwkZ}{ zGHQtJTW)klG@^i>)S5gMu8!03#I<~DjH z!#p-@WIx2{|3cAJH#9p5jhuhN=*sbNIzA~ZtOpV360^9` zCDDj{J0cCAvf(RX;8Q_w5gCv3q7gNZ&T(wMiioD($Jm_1BHP{(VI%tZLVbBMiLq!s=(;MhNi1DIjNNB}4O)G#q9_63EaSF3Ps>&%fbkx&Nfpg0IzRIMl?u+J}PaH`2P` z7wf{}SL1!{vUqkPEPgXyYwssp!Y;sYeG48ANe~{M#LwNB_dDjLGh`vEY{ixDiAw%> zYhMX;bfhLRyl5Dz6SxF}J7@;})7sg&GQX&dO9_&X5+5R(M5Z}3Tnk~E3;5aN-h84oVQ$2x$iaoV7i{Np*hR_(t-5C9o zq4`!CJZy-uXzD1MOI*n`eF%+lT!hg*3{4wp$jpYKFo$9=0) zCcFUf0L4wooJ4}U?c6AvVSflH8J4hN2m0X65qGs_2p4hxkI^i0KcOLVD5G}?G==PE zY3Rg;VTuM*2+g?0_~r1h_$+uWbC{;xUYd5J-vN|< zGxSgX&Cp60=r==uA0iz?5^GY&(4T}lJP%|P6_JJ`Y`6wp%CXJDAN>BOQFI$R$gz2h zzAn(@*l*I1$cFhA%|Jy{*wFj{w6`5OmRyxmP#LXM3O*+$)YWIEMAO9pX(-8t238j@ zs4g~eCC9Fr6!c&)Io!z5RFH;?YbSQ%uq9<4Sh84ba~4 zr1YyQ6w`}?OiXJ_L4TtTzyD^s2$zOqY`Bh&RQ9(Df%yG5)5Tp7lhOr@P8Mi#_?$FM zV8cR1gQMeMMN`DkECbrRh?L%{OiHVwm5S+PVnSU*W}0BS7$gm^u%VIa0x`|4y4b{( z9E|2t(4&FmaAQMLQ5yba!(;9l*$2&GA<*LtFf{3b_O2(TOS)C=#f9pZ#Du!nm}$7_Vu&=9WsAZt9JkN$ws&n}8RP^a_7H}o|P?6B^;m=Pf6lDxeUTN6NhQGLHObmQ+npe@ZH#FCv zk9_!w(W?ZSeK-P(H$VB1i+yMdgz{k(wSo_wxz%#fiURnxG`z`%E~<0*P*Zg-^2t8* zA~bv`#OQ8@CQ2GIu%WP`K^|@k=^kemS29gWLc@ns{U{W-&<0s!AF{#XEl)nYU4im2 z6$s_SE!2=p!G-GkSqQvkzNg@%A7e(z6fEfKWaxpjPl9id%gvT2TNe zNJDKlM61r>Ln+m{$R~H+p3v|ijL~fjO=D@e+lQQUD;nhCFCpFIByuIwep)u4s^l!ir`WS29g2Lc@m;Mi&%l&O;4Yyi>@Bp=HR2tsfg73Q{Z7 z9YCGO$!$7MFAcx2;UGxWeE3ep&EtrCa_1*OOg@Ze^kIRfP;8WjkJvCq(csY0PSLoG z50eNDAL=o>tw6I6pTOc>L_Vx3O+Gy7ZG31;tx%VRTeUKs*O!Ky0DGLQs&kB`-$kDu znNRj12chA^Hby^ZXda+PGAw4pcJ#ubzylD|6-`}6GtE9i!-pY^o+{An!%A4Z>&b^Z zrO1aGK&Y`am0F>$F}Io|T2TOpN<$eoG*O+yhkUAYkx%aYEkZ-;(_ZB9>xQO^G+bsw zIz@v#923$#&S0)&noNX-4_-zeVVz<=T!(V+A@ZRD`|t@6%7-JUAs;^FR{KON@?niM zbYnxDFz|ecR-KD{a_5PJh7UCvonUDCNJ9}e)KWCagIm$8=1Qh%L}>VM`y&cPHi70m zyaJ2&FY=-1E0l+oKqw!wQ7hDKC_P5GoI&s1@p}aH|of^PbXBfDKhu=a>&!Rp%m~9KgDS zhSVE9C=}HVO>t>B!G>h+8G9V^uw6*U`Jd5DlLCF@!y-m+6=?S1JS^VAW%S9c1<6SP6{*@wKacq@?)@0OrE%mqUE@B}sF!;jo5MYJLxE=j|eY*;1? zJg4JC+&nU$-1%xk!-p=6USVivOG67bbX7F)p_ZcglPj5~H=*G}VMf;wX!aor7H?DX zVMcNC;e3qop$4@=-Br}#{;%o$C22U!h9r=x`LIdE&EtrCa_9emn0)w-(SHjxh2o4f zjAO$*MT4<4K+zO3J}f0Pd}zn$PXwBM_yQJhJM!UpG4dfV5GoI!P%G3G=T^N<=kH3x zb8IM~I>%UgDEjose6kPa2n`?p4_R*>uyfUY|078VsZ1eBloTl{R5FzzU5_~mQKBSM zk|bqJk|~u$Nhz6Rzu!O4qw`vO?X&hd?|Z)Pdq;bY z;>Ku`H-IGqJJs2~I55j?noBL4H1@{~??^q<)XK05BWc|sYZsnQnxj= zGQ0|sOksu}iZMerDEu7SvI=W?Rn;mse=vZvZ5b|~H^*JtZ#`4tJeA=Zs*z!})CHnV zmH=i8tWjrs|1i;Q>PRh{&Db9^43c_+sg+>?NV1R_GAct&C_KXiRv|-uRgJYO4)C=A z?h&|;@b%T$d_9brp|8|GnVO@R8^BWn{oLk?2I;15b6$(eFobGks4R73 zQ!7IkkmL+Ae02>ooN5y@G-egnvZyK*o8KM4Ul6C#mw0pBrLDI0R5(vH&qXydd@J>( z(dHOB5_}@C5M4A1oWm%$sU)>*mQjriJ)|CCYGs%PlH~po8U8843>Bd8yEKATSgWe4 zAy&l!z7W8T0@b`ZGUW5-cAjcphiYu~bZaKA5pBu^@Vmenb;dJ|b68_rPo>WfGIW8$Gpxc2GxS!~GOJ>SuL5{N;AL-)3=O=wou`@)q#7CS zkouKq(=mV|0(ZI%GURZZFSRI}DpVuGp~pCiY^GL*5+KQ)%+S6t&tV!Ao*^5nur^m! znPc;RASL)vU>>&M&%>KGw^TS!HUFM!WaujOH>Tz&CI`?=pqty^ESODt;4yZGp2b{w~+d97gDYa~lVt>pq zQR)SzR)#GgNjGLFtPHK8@C*xBg$x~4l~@%A_;CP@1fKQg$WYCj+j*+_^Hd{4NvS(W zn-&4&6u7}{u4<4zV_V1Hr_!Qq%2JICJ6muRr%;2#SBAVG$slHUxFFAA92B166jqpF zvZ{_+6*K%6z;J=j4d{K`%ii41Q_a7m8X4M1JvG{l381#X<8FiB`+TR{?9ifYI#7)a zg`}=vYGtSol8j}Bw+b-BmPccT3arA~9<1T-6UXL-1Ncc`FHHS;m}hfKrR_Y`{BIaD z!+5FxG&M)DF@OOAAGi(9p_|)W9W#7NH8M1nx{IlmVJJv4gBjN5XNJ5`_&Ic871oNW zszYr4Z~&JIT<6VkmyTOc{Qd(iD#J}wBg585IEw3|O`ZT232akmJP$aB8E(@g+U&;u zm|>LE(@d=l%R!RG%y5}9)Q7?|Ok))?v{2QjR>c9n6+jJvmfjp0Zu91Lp33kf)yR-f z>c^r@y#UTNXSn=sgA503>#6ieEy|`S)yS|$>V4X$Jr7wxlFiIeD<98cC={MyA6A%Q zjH-596*H_2;1z-Q4Cr}y)|=aTs`*D$BSQnJ-;Xwf1E?s_&~0#+Zg88`T9nNrR3k$U zsY{qz=TH?SIlv74^D@KoW-&tvR$*-;)>7%BvH4{Id?T<4rv5y9ZgWee?L5_d2aK8F zO{sq|HAk@|fbIgr+y>|HxZ7lp8Q!5986J?jwW*b%H%O9UIWl~A6*FXk!q1^KtFV?w zRgc8xwE{T#FvDHp&2g9ZTF+ECPi458YGha;^;OX(QvfpsR;n|e2b{wPZgaoXve|(B zF+)G8$C+9g=7JcAK51_ojL*5)2uJh)0o@(BN zYGk-X>iG;ky7j3%qVX&qGUZZs)1yBdJD)YEr)uZF&Y!N}#&iAVYq)`Bsav zsY^97oN2O>je>62mF*|^^0)5;D85+9HX)P+lAgYn!PO0mgS{XWmBu$y&i#*J5tZ~dx zpH*1Pq^i2H`CS402{B&(;mvWEHrdwk`j6N=JJrZAPwI=K&0%yTm>@78T{H@080I#2 zM4QD_BSSZ-hniX$CW9oMnBlL>nV}pM{!9;L71r)i)gY_l0G|usdVzbrIWk<~&Fwrj z!27AjR*yGg;`^e_Edgv7IHAt=Os}-9r_x=emQ4n#@l4N%?X{{<<*`vP;Y1%Do1x4Gf> z%f#m2QjHAHO8vE|If_pLXe`jlZSc0Wy4&p6qB8WL8X0box~i#_p#?}Xiy1!5#SD8I z#0*tgg|)+2!{0xM&2J3g7l9)%^$bgG>-hTzvH58jGsEXn|7&Vy_&tCT0#n=u=g`M( zN=BPmR3pRVQuj8sGK>L9Rx-n{Ihmmt6n+l9S%tM*RMjIkeD4 zVJ@qX;VD(kvMLVn!vOv#@U%BahI_raou`_2r5YKollqxx(=32&0>#}1cj<&}J(V7> zMcI_58X30zkE1w-8XUedX212!><9nAu!2+-p9S@ z&FwtZd^*+0&{FEpqRr?4Y6`S+8{DPa-Day6Wz&voWXLadIa4b`J&+{p3S=0bgBdoa zVuo_8!rJdx!_Q~O<^=-yQD7HL{dt&Ub4#V|Jk@+ZjG5s*ssAuFN3kw|R|Uqp4bGvH z+vJNGCQ^+I4W;gAYGoJOtWd zCbJ3|nyG4{RdImB1GrD1xi?3KGTz+IQ_b5@jSN>v{b;nQ9l)u240om5;2hGnb^QKo zEy|`4)yS|?>OI=0Jr9{clIqM*BOA|QFch9)4_26Aw5oPm6*K%4z)J#e8_@Icv^Tf& zRP*svBf~>dzZ-1^1W-XBSCtWIot!1G-ZZYE@g%#55^3| zScSE9Si|opjm<9&;A?^PF!kqQlFbc&|0Fiw3S(w?UFx4r%~320po_o|w?T$hZgWYr z8AUZRRF}Gisgz^;!-?8VoX6BUhg(3BPnn@}7M{awD3k%8 zi{@b!))uKMM{Ir)Qi9I}zQ-2)d3f9AhTne`o3Ef68D5n72UBwtGXrQX(93O*A>}s5 zwWtjJsK)0Hw@Y2m)XMM_NHU)pre4ephikaqkYoch{F#{`tcnBd9Kf{#mApBghg{y=&QlqxQH`x0d4Qw1JKB^AV2i*}b+%`EnQc9l zepYJPoWcH>VT#mCOsx#RgCqx-;d*6w3JTA#gjLAUT~!OMiW#N`@TkD^-W(a~cyl{X zHGi3EWGEx`3(=-+0C@$GMcLd*H8P~{=O{9nS{aIfB$-zt!($im96o`< zpXp4j!rDw#ok1sD8D{tsQi68{zBHg`dZ5h>zyBvTpGP$^JT3KC(Plyb^#wY(4Zix= zLlG$a9D1?}Yo%1xEjE8FfXfAL^5(cp=dCAR|DZ(;@HVQEVOI@~;^t^mFo5L(yVV)b z1I{6Fo0ie$0QScWW2K&LYGqgll03i+S1Cg?C_KY#Rv|-MRn4#}4)FZ|9u#=en%dR?`c;TBe5Z9CTR`iI#3ssMfv*a1_2 z9=^1>;q?!(`Cb?^!`o8-W@?UNRRFyO-fVYGg=B{gkPdVE{-ngc*Ls zpL52?cTo5_JjE)k6;##ZvHAZ3$S82NH^*K2+j`>l4_ed!ucI0n)>q{y3P+o40n8QH zpw4(6a1Nik%|p><8}`QxL!|!1)XK03B$>zz7b`<;C_KX_tU`vys`|jHIKVdoxLcr! zH%Eq>yt$pHGPI-`8FESeaI~ozz_I%nF1Op@9Cq8*@%jfX$|gV6$goW6-?dMB9?qRb zhJ+dJ!@uzH9TcA7cdRhO2vu#fDrWdGfENTt8qo9bq&K(oRP*4WOJr zUAIAoqHeQPi?V4*H8NzDx`?TD4i!O?mCW$c8D>~iC1xnXDy*%-8opl^n_nEjR|2bH z>d(VRHaEQfAvWIxV`dmA^;%PN6bl0AC@{!vkl_)xxhUEUqZ%2iNZriT%FrDo*~tuF zpJs-0_r?s(ScSE0s%jXU-xt6?5aaa^-W+#nhxNqkAGD|eUO_c7d@uE7(dHC75=<6Y zj4m1lo`-kb=H6(tl4@k=CG}`iE5l5X@WY}O^$Lk-oD4VQQBSRwfI_=Y*hlAL8 zlJzHKxK$atL*W_LVTBo9RnRd8xlMHAgWmfEEHh+y>`R*KLkyQ5pJBjSOX_u5D^%XbX~* zWrojAFvG#hF+*)uVeJ&w@cM_?{MG<|6F3c1&#>CIj@LiL<`+?o3|~rp*3`_f4;=}{ z2+VRDoWmfuDHCnxQ;iHAq#j^uWtaexRA+`ijx$3kD0B|^Tyy}duvS4;ePZ)x0w^qS zhd0ObklmZxd1`?7QjM+tU5TT(E83I@V57hxb+(WB7TebG`Uk0Ha{~M0=MSGsy~xzc zupK06$_&>kLt7|3!y;B8Ll;%ew<-?svj83z=<3ap;eKy!=c(o|QjH8ZN&Q^3X&pcw zfzob+bI9N}le8$C+o?u|-4!{CbEv`LD?>q$q!TkddW`2V0SeD>4lB$sO;snXiW&Y0 z;4Oja2J}q#wYlN-53%_is*&MIslSLe;{vEB(9Ug;p|acT(xNhSq8b^BN?p;^%Fq}j z8N>|l{>u#8?}`~JvI=Yau!h$^#OBuquvTC{O#ONI*0zqKK^HptM!ZLWznQ>aFUN2Km8L|rS7c4S_xHkip?Jl;4*=d z-W+%7wDrX6AGD|emZ2IMcHGHP+!$^02Czh6r#j<#z-ujMxlQwElg9q|Our-bOj9ev zDv%^$hRccC_KYKtT4kkRi&+p8Ga6+ zzrc6{dLFuZb30EppF}k>G?DtlXfrf`djy)g4PLif+HKZrQ8ulpMuyx{mol}^p(aSO zlNnw+%nYk4#0;fag|#hM!|NYn^UDMHPGBoc{dt&fbHnQ&V)I=vW`>bc|7vQEVtD{P z1xC3IGPHA>T+wDM)yPm+>b9m2x{EGxo;}gQT8dYGqgel4MlT7CKR4w0;`arzN*Gr6$kiQ0Cx#A@aD)+(wp0PDnoOsks-U(jiOD}01n^IQRHwN zoWoArI$r;vMcG_QH8L!gdb{>%?;lPbLWV-ja5w&ikME%H4BN573`145*{Yb~hX9@v zc+-HMhc@2a&Qs0brWzUUmwI@#=^el=0uQ(ip6Nnv^Su^jlcE|KGDuy})H;U>AW2zf zc;RnmSa4g+P>@wvTaGn+zbrP-7{C_-D`4u+!+4t;UjGoAuZJ-+^p*N2Q*#t^19(cH zpW7fqQ@1&RMn z$6eZLTgU4kV)I;7Bg3~+Um9(Wp(DX30t?YaqrhDne7#_H3S5=u~^M4>E_)uUTw&2ghn>IJR{vkI1o@!+1D)l#}<|rly&`hA4 z+aSXOZu7Smm7y2a$WU79nx@$M z8CKZV@%o3@{5*`AVY<{OP0b8@0vIhY!)@?P_j8+@qD?|IGPINWRZ}a&IFMu(GyJxX z8A?Fm=kO}4uvSi0y<+qB0Td9p&70#cUEjVy{GoA;W z!$R9SUjHDqY>r}o%rH^v1*TSpEg;ECW+ z&70eKs`>L&BST54J4c%q0pt|8!EJC3XKd?u{eu=|Qs(Y3Q1kT)XGpFBst0qZ{hF$;^Vv0F+&AbVQmlA@cM_?yl?gJeA=|s*xd|)Q?4*dI6j(#c=uE2Ip|VwvN|7Xi+vrsYZr1Qt#6~?Rm%o zl2m4fT6=g7L!s~t`>?_cV^p=A$8+uQ!RiaH*yr0xeX@zn@s?}|3BJXMKwn8gVg6?hUb8x z&ir9uiok3G{7u`t0Fs6rQ3?DD-`|D6N3@hhSbJVY-$l1+0kjb4VL-oxu&!_Fh!&Nt z57ih^S*d$Qna z-8P8{Qg1UgNAybouL+Daz!5D3un}E^f8q5V5cr5DvIuJpRWvTTy&ga%fkwWmiyEYF z^i8eNqDJ&6)fmxbQa6b<)dM(MlA&{6VDr00!tZ~MHhHPWh?Yp5JKCJxWniknQU$;p z1>BIfsq}(*%~OGe!iPv2If> z+N_}(BkCjda#M3ea{_o$;1##Qh#I-g87*ozuThN=-6eI`XwxZxq5`E1a70Z2Y(!&r zazs0d#}QRx5!Uu&4IlqSw_*XT6F2}*J^lDy8w)KGKK_fFItpV>VxrUsP0egu19)BF z;|pwFaGRphW-8Sf(W6pNiZ-JHs4mdLZ7`y8Zu5&4HKMjuV?=qSPDPss12}sf(_UhL zBf1?RX~#F$&+p)f=0o7OC_js^woFBNqT6{$38oADh-MlQUh?w3MZ))2;-=P8jS;;f z^$JsSL`eYc1YW(srm@?c)uOTurWzxvD0RPR(>Z`^1uD4>MwH8KW=ET9RAWR(ig6M* zN1IXsY!TQEGlb%ZE(fp?HQde-y$gYl=wGaG5+AGRZ!2Q9Z2=4s_{4yoke)Uf{CsuX z)HJFwq83tr8g1SRpoT!p3v6z4n=M+@h@PYxBg!ZBW6`Ew0Ozh{==^Sj5go8i;NzcY zQL&9SZk@G z&!XGt0BQ=f@=f7g(e1vetyl!eNvS`KHsb=QC(zUYN7Ns{Mzrh~ zj_5K7G$Q=zg7z%JS^*WciEgO?G6)p(P2mO%w5~8mR4Ce98o<{A z>($wgf0EnOi#A)aKaS{ised*#N3yL+daO%Cbfq86V({%OHwa2HK#EvfX4-TyA3W(eYZKOMcE9X8bhrhb*E_a zOaO%iN*dra8UiE}neFXO%(m_7cwz2j5!TXJ!|M~H+cg2K7WfmM%9f!)`WuUsNeo0{1+2QWxrg4-Zlceg1VZ6;HVY|W&e7;T0JaGyYPx50%e<2D<$s1dcH z8W-jYsUM6swF5X+kZCg;;D~M|z=iou;ObbBKY(Qd>tGn(-JRcvq0Zt^tKwhy`7Q|j z!tBBdhdNS4zgiKeu{?mD0;3G1p2ASu*%0vYc#JWYY7Dim)T5(K-vDkCc+hPy)N9>l znHDvTMpR>{7fXF_w5c4xfdZVyG2oExI)G#uv$g%1**=HB-@;tNBCO3(QO4+Y5K@A1 z0*SflOB_Qi5N%$Xi?c-pc>h3l)7uQX%#?j zfl_XR3vDG6?-A7B=yad^F7nDY1+Uf&IYUzm+pVYUG( zT4P0=)HeYiml!Xm8Qs-hR8Te|@A3zRdUp=R+IQ?;mRRHPb1 z-FFqIaeK76Hh{GP`_EiAYIBgwN{JD)`Dte%PDo!Xj3DAf3IZdTyBF4 zv&$mk>mSi3AJxdVRO-v4&ABxOrU@)l0J|__-KJW!Swl50OdqM|nwle;6Tp)KoegkA z;|OqJ>In>r75xJ!FK}N{+_P^{AKP2 z>7Q-)sdO(bYN(e|jiG)m^+nO<5IPckATSqQ^!Q+?L*1r)v{^(ohT28yDW>K$J`3Ps zfi?y>jbQ-xE@}NLX3GzOUzlzz!dh_^b&PJ!1IR9Ly>AL%qc~}MNu@u~qO#pgH6r|) zhm$B7ZLSPpvA}k9wufP++dLd?_F#X^Hd^YPre?OE0(eQ_ZMVU@q^I2`PqZ0NH7?9U zQoj~$1_V$+po#&G=otcBn8E^0VnxFMG7IF46_o(&!u0-$L;e2pcww$#5!O~?4WBPY zw=4n77FYvMoiv6z(RQCo*O6L7-3((6b&%BSOwDP08^E&yuel8_%%g6TIob@T8biHL z>KCI;j{t5EC})7vXbE5!X2MEl+nYOHm})G-+7Ybb^?%W=Q~+B9j>1#f@B#fY+Y4U* zC$+Mjfibg9k@}danQd19BL$|q4YIxBHaA3@uc$`0C#0SpZN>&rSD>xi;KEdNo8Ppk z-E^cH7p92RO`}bt04^5DWq>29On?h>AQ#6_B34`%z_Xsp~8w|Cy z+pO23rqPOO3^ljZwWH1b0UXcC&>0Od+bsY|?ls8Pc{#JqhQQy#?X4&!}ilbekAJLxGM4^r2W)n`J8frxrDh?o?x_C8X{Y zZ5|EaGJ%qAgQ1?bA>ikKqfHsAG1MJ7IE@0)CT{>s1lGVTOyle_zc5wsuO{jK5cq}p z4J*txTt!>0h?80tKo5Zt2J|qr^-WzCH#LT8WUDRpsA%&_0JjR%aT{d2#%-2pQ6p+V zHL_hK^@Gu-VgUQHGjwLR!G+muo51V;qfHK~abf05eGF!p1VbHJYTzS*sRsB=Z6Uyg z87#0kR(uygXMq=E#isyCQ9gzHmvE?MA@B<`ghg1ZtfB$Yt!n_+3EXW!L%rN%BwExo zYEX@#{+o@V?~OL41K1{TOr7oh{Bj!tzWx_&&S8HX>NKg-rsg#M2;eP&@dh}Jl>o`@ z%$7^p8baU~W(JFpt(A%Ne3P6+i}of^LHg zbI>B;>wnRv7}dzO?oy7ZP_(%;fUgDC!_42_O>&!h(Prxpu)&3SUFsi9%@HjMpo>5+ z102z31h_B{3yh2v!vd%xP&-yU4`3H&?qXh;^I7ACY0e_7Wmi$7=vFm=!w}==e+}p@ z%ud@qe*RaB8tRo)W2lRz&KYe^qa(rR0zaUOrh%cp>o!%Q%}-Qgs4q!9$JCt0tN7@0C@$<`lfJUGW(`J*P^oBNi`y*FX1H0 zMVo5^SS|3UI@`nWjYY!O|Dw%5*dMcfDD{3*Gu!3>1_?}X8|&^lJM3?P?4fml%!AgRu$@U=x8>Z&a9!jxhW*0x{`KmQ-y zE)U>4fvxcLdq3TFkDvdST0`9hV-9ts)W4dV(^wurPk~WxgA3EnZE{7Mu~cKIb)_B< zZTbdqo50-$IE|+OlBUcy^*d%ed~v)m^;m?pb6CUg|A=no1NcMWJUo>R--BCkd%^Gj zkXqR;p&HrdNS!g-97IQgaRQ0kAlnePxh>i(q#D^eNj=xp9MPlznh11u8(f$gZgW73 z+RY18g49M8-#+!`xx31GWG8g&>0E3N{t&yt!h8s$8@w6Tl9EKVcS*KQ93;%o2eNvEu9k15*X&#ERVj zc4125UwC~d1b$(bu?TBDRJ6#7IF0E6v=n&3fIfzK(1wsoAJw8_yh1gGdaKkgMw@m4 zw$nF3c|51it3bCvj}VVt7v$1>m9%?0uLC_Pz!mC@3p9Dq^QPFGe})K z+T0Pq-t(t1jf`%Cq5f<`z}Nqx&81Xhs9#HcMElgI0Ee(~f)4~H8{k`*O#pUb3MyMm z2t3<779rcyD*7tAO$eaAKnDYQ7^?WD_G(etx>1d6#ii~TZJGy=UEq4RLAH|?319z< zHaAm^2)~}=B=SX@D+5?8unK13q)*R9wsw4Xdk_AF*Ed1n*>+%s+1^yqFIL1`uq1%) z0>ca_+vC2e>~T}?P>pO4NIfFjyc|Fofm&{ZY=zxsu@*I=|51%>8KtfiZSD*peU_mQ z1BYza03S`oADgOp&5z$^pGHpn8S(q-bN=2MMq9i*Ne zZ9WR1fxt6vgKYP?OXsqs=1$i5zyg8I@WiC_yiKu4sdR(5shu!pwqa6lF*QfDG=S#?hPw^2J>fPvqRqQh zBU>%0`$n5S0hAS}Xn@(C1h8zA=P=vfr(?D{EW+Aptl{-v(XCtnzYCm!r?TNa`Wo8{ zUjHSv2A-K}WSb@RIa4#+esm;wPv9%JLAKZ2rfjtNhH7McM(Rn?W?}#h1zH$jw$}lY zcX)2AW;5Fr5cr*ZmPJ^*Mn&zTTjKz-2o&{A;m!R~+Y5gGlNOcj2C5NZ^C^y~ShUF% zz_$XwsIz@OG}UbyMw{QTKV};)^$JsSM9Tu`A@HgJW}6OR*$#ijYZr zMs({LKq-OhzA0qO@0mMWuc!+B6B^5`k;o26ysbiHn{^>D6kx6;qBz{S;&?! zTSfc}-`|D6vu(u+v%RjOpRI_KS`XNvrQB)&ab*YC&n->GPNuY+? zAX@>qS*S&g=s~KH?d)-8yCd4%9>8vagD?x(t_HAdExu&7k09`D=dr?U(^YiRikNLr z0HXzF7|{EGeikW}zA0`hp&HrRNj)>#j1S-;f%a~LYQkm>w!P>`@V3Ag zZi8(7-KKQ3`I>5EYcKUj(dL5yQUc8kFxx-?%eH(5v*m)o@8r`g!rIj;dLp{j4S0D@0cKkbVCR-c*_uG$ z*}h~Evb9ms)aW)QfZ76&`=;={<~w~;JG7{59jHdOLQ=PlHVp!}NT9IW;7dMa+~>D)QDa~ouP)NL|Ho8eR= z+kH~M7;Sn4aDzZO1I*SEAj!HG=QiPUX50IB%vOy>SUZ9>{QN_7D;2;NfurzLHhes} z%=Utxe~?-OKLcZCn-x04^cP2Fho3--rsuSxyAsX3yB0dx}RX@J=#0VMf(Zu>rCwlWZSw%1vNwMr`L zAKkhHP)wk*Zwg<+&+VIhC)(T;z*d2O)!DNBXp!*xmuPbq`(w7L zQtvf2v+WLGl)yLx%(eo+-UsAVwgwP*w&^TFww5aTEV_*jpr$}8-xQv=+kI18wWw_E zs7AK@Qn!va4+U`kfSzZ!!JYieBH{Hf(dJsJk!`Ki*`rO?01|;k3ZU;2{QaqCD~EsK z_014?w)I$Hw!SL*$%;6sxdA*S(9eM0PB!&To!6qW4WSy@DoZ^e+H?)zI)S_02H7rm zni!-Oovsi8iGJ*e0+CX5oFn6##Z_jVCeNdk`oaeg(vFtT5Xo75!sH%(gv% zp#q;8(EGPuHs@6Oy11zsR3lp}sV7I9w*$Ce;IRvA%Dc@rEowyVsYbQ}Qn!gVsQ@wv z6uiLZplt%*|Bg1rsKyt4*X`qo4xk2;=zAhduKrm;k{kGV*7<~^7zTxp;s91Sim|HN zV^ti*`T+V0yl+6q+}WGwkIg@(8lz|=_4sHrB!J2SjW4h%iyqD z?rv||clTTm?(8`J8=wEUWw$n5{9TJ@VCwJp*4Sq7^)D$g*=~1nM~Re&O^LMCj zQ*E*DiL%e-?7!~0Y_^L++Qnl~*oCK07yoGSzg?7*vcrGn z+C^b4mbAsbCyHLUi_sr(7F+lFE^_t11N&n1y;#HdL)|J@zajyw5=g_;XENVLfxmy{ zyUEq>5R5s352W5=Y99Ee00s&SH_-P4PU;&Fp4362Le7)= z;%IXi8z-0`Fkb=ijuX2X<~Dako5fV)k?JP(P*dxz%VdzGIxp>CA957spzu))Wfj)$ zQPm);;)I_I;Cg|3y*WOxxx$;z)uKjmKh+q;@jrC`qs=V=Y!^78&d30pm9}+!{vT~J zP>sYhq&{V8?q)AGPVlzC7Z=#{cbn4D=4+}kiuO|XHMM?K%ydmKXQKP6#HAZn@H%Cz++FTdFdV#;x8KZ#BceeFZ zx?QyS7yC;Z^3d+oPG3Tm@AMa}aHp@yX}vjdObY|(Brw=p;Hxbyyv4<_#R#gg)2dPr zi8e0;aHBxA3vBYa%>peda2=?V4&3R`@to=UUGaLY$zVI(fi?X8)7S!E_f~`@0z1)2 zZwGLE^M;YgA*W??^q<)VSNvb>*daRjVs)6>l|LaPb)}4!pp)xf^G4<8HU} zmQ<6{=8Nw-cDsg)w`lQBE6=_NZ*TiC;goSq_|NY#A#SXKoW)uyeZOx4zf>}S-2&&} z=Ly$Y|5W+~sWHTEEtzhvlp9RRbhQ{{qE-*uDx|k_@l7p$c7f?jZd&|5(`?Bw?)k*~ z-1FV2@Q40gq|nbh4P*$M!1@=YU{s%4U;OkS3eP@!oZnU{8fIolZOfI^> zW|M6WZ~vlAcB+wSp41mdo5R>R!32T%3J~7@xy>EXW---#`zQ4@Q}dztJb>l`PZ;1Q z4kG}P4>+Qq$8tmkA@C9T#Vw(tPSMRT?qvcceN%XYcG~uWpa0dOM&uWF$4*Y-q#E!S zKH0_nQL9(Dxn0~@T3lsPy``Ms#7XQ1r8Ku11luW0KyMG5Wmv>{kxEr;2Nc*t&1B)#be*gD> zrs3k=t3CHah3|PMR(NrT$z_YV@QjxR@VvnA3+(N@v*PD5-5$UHm}*?yT2haUHhlsp zD^U9anFU;_Uf}swN zF)%@3iUD5SUjXdl7Q(;q^Y0M&i2UL{qoO&{%`a|4fsO|B(L`0>)Sp`XKNq)z)KyIV z-&@?=T5aa$c5zSt%CuKqVEm_rOzFJ(F6MpFTimr$X8*5TZ*k{qaS5CeUl(@AO4!Oba*28>;V7zc393syG}^ovz)b=*F0d)! zHVd^Vn+K`JB{;i{>FzzY?olAlv(~O*Cm*v#qZ$!e=b4!x0$dfpeNjj6<&e? zDp+F$oYyx2JR>mBfZmKY_l;!ojpXY0Ce^qE_e%X*wCNr|34tmX*j(v0^R%czJwP=s z!HF$QUp3m431El7Nto#pVBSC3Ch+}_Xp@m@T!Js8-fwD#N{=@1j=%&1yacNO>=NX` zzwr7d2s9%6f{8C#gtay*ni}241W;Sxao-fb|KXe3p+#luKs82GNb0uHra=G~2^7A- z<{yiM?|(#_>#4?we*T3cx+dD>2w=XzMwp@7>xa@)^xsSH-+dS7uXG5%|L-kiUC9ey z0{_C#cR}D;mtuukpI6a$D$?7~X#unl=wU!Fb+7B2JfcNq?L##-Syt+&qRo>5g8a|4f+PFDxs$A&(JJ#^`f1_K00DctM1y9`ye3;Jrk2jR~`@eBh`(eyY zy(cy22Q_cSx&U4k7-E2%`WhfP!A+eR$xYn{flumv7Gdo{6}=tZ`UP;iKt10SZbdQQ z)Q?)!q#B3bS){HKZSD@>uT2bnT%Gl$%;Eq^j&<17liJi22z*mnL+?aInWEd@kaD-< z1->>nea`fT4G6CJzw7DXH?*)EUXM9Kmn=jm^X|(wR z`(w7Zq~2j_W?LCRFM-hj_WX3ve-DsQ&(B8?Cbu)|p5e@T69meN-|#b#CfMX1DtaZl zcMPD2z?}y4>lkzRCco68vQ`OuIrKBLR*W_!1K1?+k2>3je?M5nRJud7If?!8?cirp z?=m&B?hIgQ2CO||z;<&K+rpK5ICL8%8un|=Y@E^trW)YAY-CvIxm zo1D~<^2B_qGHuSYV<7eVo_B za^n4qcA`_5LN(_0h}7Lpt*?uXAi*fAiVTSjUI^eufoc~t$mb0fXi*L7K%ER?(xF2+ zpY`kFp_QQ%)^=bGU;l_L@&>R(U?&>+2L`ij>-hRdY>|dBGrS}9OjGL%TB}Hqp|{8~ zBfJ$~1@MHx%NI0g;0;b`Q5gn8Z5fuo!3?=jfokVvFKbBg2NZ9K}I3u_-FDx z1x9%boI^WrVdse!-Fk5UDJjRe1-?yz#Y$RibU``qzdjmSqeYhA#kb3sOKJ1vSImlY z@yu!_&tm0$Ph1neLGS?&9Dt<5E>|o+17@NC~D1EJG)cKh{#G z($%Eaby-6-j;@c?%T3Mra{_o$;FSw(?0oSNCeKTsljAEsR3q(OQg@9uodPH-P|5(` z-!>t@piXL>@bR_hu`B+Jlyls|u6R=|cDbOurkDSv#s9AOEmF4ouUuzb zK#SMeV&4<@UUeFg}@&$_4r@gc2ntAQtOKQ6aK!`drf^|{C)!CF0irl#TDmy z*_WO4gg29Nlv`N*8d`kF7CnAhEk5<1Qt_|W;`O$OD;-W>S9#O`#{Xq`j9(rD;e-4K z*6{NXZk4NFp#WA2?18DrpKIGqr5}@8@ejh7Pxv^g)23$pp9APGF#ZCYu5QEg3QzbX zs_}$3k$QNv85+Pn0uLDA6aE|lp784hTE>b;0>~kd*8tw$=jvY-gim-y{C~2HPxwpy zIhI8~#<3J(HP%*P4L>gxZ7&YsD}mK8_py9rG4S_qrPf$B!I)ziDD_%Xb1Vx2=qNDA zZSd~n5x2Q0+6^Ard9;Mu7trj(k7F1&tIiTgWVK}`;znLszU;opC9o4>WFguSRn!qQUL zG_?}821#-hLLHa`zzW|%JZ zNmDb!o&ZJ*%y1i=Qa`u3DcU4dBSSl>Up2Kdi~~uEGsACvn4ts|e$!rM71qkBs#k2@ zK7axOw|R4%!zJF_&Qk-dL^U$(U(8XIk2b{uSSN5mo$;vv|NfJ09l!ryYS|pc{+MB+ z)C)|l3|l~wJDH)dGPH)mGb~^gGIUf`VpSaA#{o1Fc-EUELp5)1=c(q;Q;iHIrS2SU zS_F_&;0CurhBLNx{QV~_%BC#U$guN!j^Y$*{E5#ClGJ5}hhOG7jDx~6oWcq-OjgxV zt73-V0vImvxdA=#FMD%4Pc{FNYGi05_0(uHCV<)kkGl=-(w%OzLyNNMKs7QHlDdMa zbq@7Gl2**{R&Qq5vM6S#z$&cm!5aSlQ*2&1fS&~R!qlIKc{Vrv{ioRcZx}Pfc&Yz1 zHAk^AfB^y@xDC#so7-F+Z9b(M8JbGn#nj3$6eQ`!3~OIvhP+VtIdowa){3dBLu~$V z0GA3}=go1Kj$2Rs{U-7@uuYxuJm4H=xJ{F2vm5(khEY;a zGqo}-2T2Ao!)3})9}3SfjaA6dLRFty6$kiM05t?!dUIsB&70eKD#MdhBSSu^AB#5i z0ywvj;qto;zG{BJwvM0w)}m~RQjH92q~51}+Izq(Ajw!}sMU+-Fcb>Uun#NDFh*6o zt%@1e2Jni&dj|A8JnPNvJk|Uos*$0A)bB@|!2whhXy`WhLi7!8vs#O?d4y_Y$RTwJ zQ|lb6f+RDTq5q4_u>9MYp#-b2wh?QobkW%SvH-pj*aTC59zM6ZrP6kuYQ6);%Yfnjcgb9mfsvd0YXP>l=^NZs1h%Fr7mSMGYnSM z2CHI*?*iy7@VWs#4=ug9ou`_Qq#7BjN&QB&=@~#Nf$DC9yOiH;zSW{^>QapiXXZ0; zUQ_EF%7G*qHXy@uJ$Mdtq3{fOS%tMFs>&UkpM{iQs=!ig!JmisY;O4aS8TqTYGim> z>K{$bQOpjYtw0~Q!8tT^o6}lUhCx&#!<|ysH?=Z!1WB%BhA*CHhGX+$hWf0+S|(N1 zjm_^0;7^G0^IzT^cWIMt9Y6mPn`fsQ8RkiSakM#%jsz0~=A(;7fpZwro3S{WvTBsVa_U)`CZ929;ILs^BjdsH>ZsyM*s0=Qn_UT=;JS9o(fPYv*XsNeB0D4WVu zBg28OIf^W%R)%6ANs1Ysc#h{V84Aykg;iLat*Q*M`CpI{ye}{ZTkz-Mb(`R-EH=3Q5kwrjSM$PUDede&;lfRoEbjs#teIsn4v1G zuyz=0sdVMo{Kf!&5jX-<&#=_Co=V$!s`+UcGsEXn|7&Vy_&tCT0#n=u=g`M(O2!Pc zs78jzrS5HNWf%jJ^k9ZxyD~#DDEu6HvkGgssH#V7{zL#*36$~XxJws#b30FExPxkB z*gJ=#xHZ}o31F2#TAlGcAj5pyI)46NYS|pZ{+Qtdsppzn88(6>Lztm}GPHogGt6Zb zGCZZKSysgXei*?21fKTh$Z)SWxARogRuT9i#` zs*z#aY>wg>YH;|>dLYRxW*FX?88&_uGn8W$ z)_%ttzCItD7YN`-fn6~5=V6Y`4L|=9o9~A)GrTADAExFg)&=mYz*x7zIdpQHe9>ki z)yU9L>W-#XhQT1oGG_Rx6Eoz2!q1^2tFTr?RqbN)Mgd$baE&*|T{>btQ{g-{z>-uW z!=_mr#kJ8UX8;QXHmft9hpQT-r?^c6sb#Yh`(uV-QcpIuGAsc}wlTw{%1{pq&oG%) z$k0qx6RnB^93H@Z0?oZSGL-S=cAje9hH7NELh47OP3-_qeaUcFx=ro|>9lPo3Dh}|K0B#ki(kUX=O=Q*#tE186PK%WZHDDYrSU zMP=wmH8R{Tbv;un!&4whJ7$>r6f+#27Bb+6GU~AkYv-_r-+vpMmk;0%f%7o+4C`&{ z`2DxB`6W~%!yKtIMw^4^NH9(yaT|ONG{kLgi#7|X#&=jcNj=!q%J2zD(u*1XY|jk0 zK%sL$hQX}DT18d$vnmd-a{$*0RPyF{9&&keJ5LR;8r9h9k*OTT-O;9009yo(sxzKx zWLRcf$M1iTS~h2}KW3OB^%7Gn!|x!;C}y}`8J>c|Gb~{hGIUqfLaSnisR2AH@Vqxi zhC1Hd&Qr}_rWzT_Nc}>zX&XRZfwFFc44K{Lb1ll|PO6b1J%yvlWNKw70+LK&hR53R z96o`cn+hX@C=8s!VD8sb-=2a;ghXxt*t)e@-8Cw^Z8BQ_c6nm>J%d`ZrT^6srR0E%1)p;CblaHdn+9A5e`9DXE__wK5C< zNlG)rk8PMCClr1TPq7MX1y%KUZ2rFhG74Pn&2g9hww|eQp2~0?)yS}Z5=T)u+GGo0 zuD}L$wvYKfb(@Ezmd!Toj~RwY{fViSVG&4Dg&8hZhT2eghEG_942@OwfmLyUZv=3+ zKof6{3^#dmJ5M!lNi{O$lKSCjQ!{{LpDp$vFG{X15eVT7u-Srs$<7{ChxBMsLs7R`szuo}q#ExZGD}^=)H;WXAW1uBcRbvhmUM-`1zmMd=reBVW8A&P0dj(2%w|DAh$t=N8IM3XfupzWT+x_GgB)= zcaWqHGko2O8O}|N8Je*QYuQxQFgCw0fPWxPrL%i;+@&4XGZoHL1H6K2WcXg{%c9LG zbR?K8uozu53S@Z4ZSIv?HY=${hF(&SHnlR$1WDduhJRZ!Lq#b3E{$dt)@rJ1m{oCr zF9mS3!2RAF8Lsx`cAjef5Y^b~`Hz^mR zY0tw!>^zyl47VyncPRXsUWXNCcvV#^t%@1G4xqh2Ujup`ns{?NPc?s?YGkM+b^mD7 zC4gcAmE8tkU&`$^bF?U%>Qv*IK01M;$Z2YwLn)AC88bZ7g6A+33eS*}Raje~s;sg3 zzmO7qEbuM1;LpQIn;TwV6Pquk8X2CK`a4r|6w?A|A<)BZkfE;I9MPgO^r0FV%1T|^ z)XLBnB-zFcpFPS92S1D%YO@M!r?8evSC7qa4d6F{(=hc6t8MG4w4JA#Uqm%Bd@1!= zQ!~RpbR-xfFw1T5waP(mQzmAZPc<@hka~crm0dleies6B)spcWZdThQ=UCX=Zr088d7j7c*3371s7)4X;m(&94n$t-yYm`t$Iu%?*G5Ha0&3V`lhB z>c33QQEUldu)svO!8!DBn`@%Y6snQo5vjYIS{X)wBvqK<=ZBf0AQUdoyu)qE1w$k0UU52MY{0PYcJ>Ndzw+HKZrQ8ulpMuyx{ zmol}^p(aSuj~QNT!VIh4iy2C>3Ts=ihQ9|Bn_nKlcLG~s>d(V;n;ZWARcyWs#>_BM z>R(OGQ7jLjr@$z;L56m2lPlVcr5YLPO5N7f%J3>kGL{*BXv_@Rpzya#ZCQo2ysByy zn?D%9*)a^4&zs{e?YEw(aGo0AHB=+RYN-oEn=Ap$7FeUscpmVxa1-68j?}W*jQ#PM z9whYyQ!B#)kYpA!WK@QlPZ*J$Q=FO=_hU`)| ziZ)dPIQ%Y0k;83}VW({!fB#O4vbmCKWLPZqcJ0&NKb&fW49l3|Zu|=$-$CISwqu1E zhN^0_RWZX40X!%0rU5+~YMny`kYpP(ywH#t7Q7QP6l4|FmSYXyFN@7H2JnTz3YhxyFy7{dzpoIRuZJ-+ z^p*N2Q*#t^19(cHpW7fqQ@1&XRunKEcRW-z_IKUSIxKW^*H%Eqi-rUYp&FfH& zt)3pu#5JN#xd46_IHS&Zrg08yZ0q>_k5bDfGu6m2OX^joR)&4pd6J(QZdQgaPIcC#pD2*33PKCWO%@B{??*0^r9N?A4*GI)6~k)8YHR03?HYMVc)2j zp(d-ab_{EH{X=Yia{#{z9EYiASYcbo>mOqC^Dt(H=~ACGH8bo9V6?ytw?T$}ZgW$# zNvKAKc2d7;YGoJ)k~C$8-yULy5>WU#yvi!9l~Yx(*t~rJ1q5#M=D16jcyl{X4X_f` z$gqDTM^Qf76boRTzyWo(_i+nt>v;Wx)Ur8>{V~HtsTY`98Mc5VotUApGPH)mGb~^g zGIUf`VpSaA#{o1Fc-EUELp5)1=c(q;Q;iHIrS2SUS_F_&;0CwBIh?VrBZw8;^`e1Rn}!;JU}#3K)4L?3WO74R>7e-{ED(I%{LL<3c{)`~cy1p#yv z7-T@-Ykb5vby3{ZFsd=4DpC)QHqQr8QsBM|Y_4*f`C8P7YEg|5ogBt&)uK(=0CozT zf|)=3t85dg^mEZB6V({eOsNl;nxX!zYv5ghj|^}`YXEFSSKwcGeFp?SqFF4$+T$vk z7Tw+pppL*3zA5}#_q%*kJGH26&rppK6_)zRXwxu&%mPI&usLjzQt5Z2O$n+oqK$8I zL`9>`WdVF6unA^9qR-u?eze(9$0qTn)IXYzj&q8r$RYl)zOqN1WAqN3s>LO6tt;ZjjiQ4vr9K_@N(3P_M6@3*SEw>JmR z&)@t0|1W$xQ(NEa>ZlT1fm#ds*J)z>}P~6*#E{%4pD28Mu@)8rw}#M zYHCVgD+BwvWrPT7)+jaUTFqhDN0C^}(g%bzg=m)qrZMoOQiBlPtJK73HP4YWLe!t7 zeOgV91llohvj8YW_W=+gdMk}WbZVgv(QQ--yo=yr{in6vAc3O{T!KzcZS29{D{R5~ zkEJ<821z4C>sb1VkS5=PXpw*y82B(u&3vV%omTS^Nh3rPS$cz3vr+;>7rL5OZq zYKoc3);vto2vIAR9;4Nalt2vz>I;BE)Ehv-I-0KM_M;H(@#_#JQ6=#9gNNV0)7qR8 z*v7yQ(8(dfmV|eNOD=1OHtGlzQzV{b>4QR=LbO`~(;0XwOwD+uriNDYJV_%&x3csS zt!9=4u4mvjr3N8ts?_XcCWk1Uq!A)JOLx<1ZkE9B3&`8EfZ-H{s2KneqH%pGL~lYs z4YVk#1m1^SsZ>-V-%dc3faMIV7Xbg}^8(=#_E&48wvseL^dL(YXf>}(U?>9*g{kSK z)cnp&_H7zTBSfuPda_nCN&+<*NK$GLqD#UEtbeqcb|j4u9iC5-XsFfHm%uItzJf9t zqAPt=hQ%2R)M!i7N2+?gUy;Q52 zErA;t7!al=L8;lrOb*cyl17LOmcB!)=^}yPJaRfpsX>T-5JtGH9Id7XNh3rbvGjQ; zLnO$lv%Lkdl7V*xKv{GUfXJdIps>D!fC|wzss!FcTxp}$wn_pS3``OL&$GRiQ9)+1 zZ_`N{AxdKDDO$~F2{;)@4pVbkxa6`{YBkrBG(vP_E=8h^Rudc`~Tj_SKInfp;D}JpZJ%wUxk643t18_cVgKM_7;NpIDlM zY9na`buCN(Bc#cV-_Rlf&oc17QiGu8DK#lt%|?<&?Cxjj*R+}y5^ynamjEala{&|_ zBHunuCEuJ7Pzmz@RRXUOSGreg8!UlH2I7@bHr)Rgw&4ChGugL9l7(UvSvp6n zc}xP=GSEo?6ryniAYoz|xI@>tO#&Bv6$Wgh5bW5Z2@QN0#QGeo4{@s+Xld5z-Wm^%5A*K#o#_pbk-LE;5tV z%p++8wKGdk(P|!&KvM>i1whfr08n820lq!goqYRgwoaIvsS-c0d9P7+9&)z_;m2O;fFA4N1ed5iGq*t9e=i zy%-p&)F5H5S8Db%lS6bbNh4wEvh+Z$W`G1P%_4831wbKcPXH3;7X}*Z8Vx1zDFgeU z5MiJi5dcKO^zKGMoeu$(Fvr26pf2S~KZ{BfjXe^W$-pxL;F}^@A_y+4mUiPMl15PP zVCiLAO`Zg9WMH6DgP^uhYCdHqM66QY)oDcxnU))6i&oS_tuCW(Dfk~sV4k+CJfPhMvbyNwwaa`$Lt?dN~+{VDY z0^p$DBx32Z4l|RZF_EMZ)W$4*pH?$O0tN$3lo|x}FA)UX|Ilh$ku-w(-QyGuQ>&>V zfsYv21!XcCXK#jYiR4=+P}tuE0hKWO!6Dz~aiyPeD@Z#`MsuhmSKKoSGDC^hh{o>KD>GdV=Jk~Dn#=P~lFw^nn#1dcFp5z62jFFL;z zM&SO3R%0h=B+Lga{fCeyr+&Xl0M9e9QUEkxd_@2fW)=gRb&UcExEYwDYrG6V%rHkg zQ&3w&KqZWiDuLIYD?O&QWl5kR104jwL3JoM-eV?5qbo@xs3&GpG&*WEEhO*_1AlPK z;yhm{f`H%u(Q3}YJ__pVEd8^Prf3|Jz(NL|5dcM_2!NPjqS?3GAfOWF4XOm+GP%+# zTH6y6=+3|}WfU&JQE|0p?KOQ}J^92YLRtc6-l1CoYs zpRjZtt;QySwG4d94v2(#NvY|h)qLFvYLGCqS$d0*rVwqE!2JwN6##{31p!DH7Xu4) zjkyxVA7<3Ru-VczdZ3G??OI$;J=CGaA-(g3aPCJ7Wn81Fw20N;fEURaO!A25@H zTAid3)Xgj%rPcg}772Kjfh}l-hYy0fSgGlx)odqe1l7aR?+R&(#@iAY&cM9_plCb= zKqSn!Hk!9MbB8A~4((&XEh67VwcoKk~R(nO^uR;zi1q>(U#SlX}E z%#%Q825u1mh3FvykT6XdxKr22kU%s8rmoQqfJm5}4iwZK({;i$qe|d?1)j@lq_sID zP{6>~(8)cGpso_uyR6$;nuB@(iYcfISbCq3rf3vOU@`-Kr3MKzTB(WFYMv%(1hp4S zKc>|@B7tid=pX=!#@zr!!mMmhz7yJ0`SvbLmk4R{?YIP%GVq>K1K)f~%{5xh29kzv_p$W*TFpxmxPt+=QiFu) zs?_|#OtxkcNh4trSo$ulCQ|}c7^o!x3Q>0gkT547rZ6Pw8Z9O8EdxKJ9>PF1VgQJQ z8F&K)bqNI2Dd{vg6x0=5>330yqVc^1<}&b#0C)|PErNjOi?thXk~D&PCriJk)hw1k zDg$>ZH3({3rRG~^ax})0G=f^2r3Yv={UvZAo1C@_fP8BQKqSo6>&dqQ2&fq*mMVd_ zoh#MU+AczrfVUagfyOvt&_&M)m+<|%HtKVdhHsCt^e!PyAzCMaaSS}J)WEmFO3ejk zvTt)p8oqU8=^U+Qq68W<&`GI*HPw}x|7bNmNE!)q%0rPz)@oWw;5!BmLz(RU8U!F= zK4Rd!0LcEc?F6urfp>L{g8)RrGy#SCe-Ka!vyCc&_YhawsI{$3Odk&etcWOh0 z@HTLz2(9fTL?7Znvh!tKX+WmFL}**6zS!#5jC->TJglE7~dlG71N4f^d{;gZXmsnt{^ zY52C0rO&XP{2kC~7*4bk*KfL#7ba0 z17ARyoF^|PE8n_$RHKB#_R)M5PA4 z^;BvsX0mVDBn{tMvh--JW`qP{7^o)z%E?{;L{83WO}>5hfcC95RRZsO@UZ_wYpW@N ztqkmkPM+X!|6z@A$z`Q$qYgtc`L>v)4+v=r(Jl#0W8g`p2EN^^)Wm2t&yh5I>(A0Y zttLkT?HIUO0OZ?!0EBOEwIbh6Woh4Tqe|di1P}W^w6+^0aFl^d(8<2xs&22a1^Yi( zngec-G<;jf(pQ8u`4&Wr1iZk&he{26o3GTg(`r5X9i?W7R&xaQk#A43^k+hvLbO`~(;1j80P^iU0K&Jw63Mqt5Kz87O_jjw z#g+24wnrpz4FkQEQ8;(UDWkSAlYJXN((vt)n|$l5)wGwuFAQAfmW6L$371^fbgd?e zq~Y6#EPYBylW!+rI04HUSS0}R?Hd5Xw|eZ`a0sw(*iyZLD#5q=xY8_9iGTF!MM7Zi z?TkFD+sTzc2L|pCfGaBszyGJq{e+q9bq1shme54@Tnh^9PxtA@_u53`zk`SOAB8o( zo)9#?myw3L@fRhqfq}2s6T}MX^tuR<%Nn2>_wbK?=d!$0~xotaOCRUfaa z*9cYTpP;>iCFg~z^RJKI#LQlT$v;86l04kdoIJcVUVHee^w7Z-nu`h?-g(lIlaR*y zPeS^qlG{Hp^9(BSL;&x039In_N0vlrJ^b6r@3G_sAqi#&%7EvYxh|~wJXQU=Q1xmB zYsqFi+nk0PDkW5}3}G!VDb@I`An8mO28M>IPgd%wztnF9Ne64NbZa3U`eESn_mbN^ zm2%N5N0}+&CpvYnFch%6#!>fKV=B@m-wj|{wenBShBy6B=5La8Zh@BrC9We!OSLMO8*j0 z;rpME(rN{J$mWR&Wb=Q!_*8=YR)i|M|BQKBh%Wk^dnl$eG;h8!1ZBjqLi97TFp%) zjk_Sl_mER9w3-A7>}B8pl))*Q%c=k7Xerpqvc!4W@C@S$_+e`?& zp2f�wJ$C69Bjwm@NQTmLuKD5HWLEmzhZeZaR6~oh9cf72@}jl9|~{RmXU(uc}jj ziG1Oo1S=g&p57Fy&OaFRJ2THh3B2@`nDPgMcuu*e3Awo%63R`Lz3aHv7phjYGf&Ma z&ogpNATXTglssm>ESNl9PZb@3{dOT6IeUk*FS?RIT6y zby_nYnxs0f|At6T5eUUow=PP@Ee42{S&7CTVWKgf0TM>ZdH;-cWq>8yH;}= zEfTPTflX+I2Lbx=38m&nt>$BrMqf^0=`BK<(&0@B+{wUHr3TLSQ)*5!lLx`0B#rEC z!_xO^HDe`En}K)%pkJMNX+Y?gk5X|IY3z%qSnVF8W0gp?!P^HOp8tgo&I#bCX$^SSrOW77o{;2+qFd6T^V@XOd zJ|%Bv$#X&yOg<&AV&+=G)TiXcMik!r0hS*=*<4|fsKAffsNpl5k=H}|dHD2Y<`}_b z^ExQ=4l zr{vWPe2hjo?-9IblzQr~a;M}+SbB+&hU?kfE902^uHeEMUMLM@rih>Dm2OJ$F_sMV zN=ud;9jeZ~VluO#VCqxy6Aj42Z$@el)ui?_c(@;~wCGdvP6iG`pYm{>a16hH8Zv}W z$;(-Cy^tjDxL4*dcdy`bCSd}Y%uIin($Pw3bV#W@B_D_*o13DB%A`l(1r5|TtR-8i zcIJ+iKy3z6!qnT8`Zt)#(?|!B#^60Rf?RK_)ijd8XAB(YmPH?I6vpBA@3fjfU>{9t zFS7J+LYl_sJ_+P9@KTtXDN0QPt!5QTV^Ygt>BU-2z6824&`$t#O7;K{bJ>Ra6rxMR zb%=&hCGZ?vskx}cgRMIu@cJaAvHsQVw3NWN44gtm?r>bi76~iy{y&!H;GIX4=JMsQM(I(#i$4nYMysY@`E(*i-N`+Wfe9g=gY>VjlfU169s5&nzRT+2ztS)uUi>RS&z5ouT*PE>6 z3>rYLQ12g>z+wj85&+NIk1O>pm3qAYm86l2V_ABQR`Z+$`ZI8kQiFc%q}2SxOwPpz zNE*4=h^6nhVW7b-V*?O=o60N{XZc?_=NEQOXdm5&^6pp=Dw^Hi)*-4X5JO1l=2^^ zSC{oIGs(MZ1&#x-`OVs7^T`Zt^R4iL()Azku>Mi1ow?Ub;0Oa3(Fo6Zn5@1Oregn_ zQt!;QlQh!x1D3ulq{;Q)(INrQGq5g9%{-;%I<004Nh4h+u=IMZ<~0eVGcYksO;4r9 zVkT#OHc2C0Te9?Mt!9J-Vi>3=06Jmx0#Fb~A(~T*LiE{C9iqqxXanzi@NoZEYb%zC zyOn|c(8(df3V)4o3HN`sQEFyh%+d#hG=)gb%+nZnQmMhst9zB27^Ozc%(t`Tvr2`S znL9Icu&R!ZYNe`w&P>V!o|$X1WLl^?&&(Hwkau>$)E9gllPM;}WCJ8rs@Fl3C?-W* ztA4(V1iZz-cC^GXfiug557=L$tiktRB#rEt$Okn?$&RDQpT16tl^>ESR(oZdJj=C@h}z_&n)n`PV3`2;j6nI%Be;&XPEC9+md;W} z$d&TV%*|EG#l+W=nLWZ3*HMZ$F_Th)C%%h=$z_TkJOh&_zF(Pn8cM*_6W_R+lB>L@j0;v2$}FDVsb;_Jc8J5_Z= z_*zwcA2TTrc;c(ek^@84dE&D%vzlP)B`HsQGpbWewhq)WX^1FM+I$Kg-hZT2IddZ< z@IC{dK`F-s&b%yq!1pW48vOn*6jOHiSo(7zO)=Rdfh-1cl^TS0m{L>9Oz!W6B#pG` z%F=VRnyC_K!9X{q2C=E7)D&nneMlNN>3^MFNKy_!Y|F6fN%}FVgbvb+CC3 zz@v!~B#OOY@3ZJM*+4Kct+V_83?TPAge2Mh zT1ntL1`eYhhr@~euR!SZZ{S!kg2K|T3Wa4MBvn{ciZA2ZM@8-M6!$XloB;UbI8j6c z^NaFFr1(&ld|9awDW1yAyTYosRn@;`W@!4W$C9p4b?)N`W>yzWUrCSf6n`X|oY{7( zc1ET6XW+T4x=NK!@&7Qe2TBnmc4md}!DaPR*60+^W$7=3ba;wCz(8J@nmd&mn^GfE zyeCUOs#J&+Phn<9!Bi<;pP7R~lI(sYGh+mk)*ChRzRs>zz$yM{6uBShU(x+N;NkZ& zl`8H22Ml};rON$Rgt_>A6_(~{TaEBMmi|&mhr2(4fjMDnME@eislPbIh5NU#YP zxPL7(I|-(8KaQD$Lz3*iotZTR(^uk@?sF>7G|2tJH0^$41cXNT=ip&Klv3r)H6*Z( zfiIv`x&Nv#7yAoYn%(~nifM$;Vd=d>n%w_L0uvdS8>VKsQbYYENBAO=#t84h(lfN0 zhb7R8fp!9*5k8UtjPM!^^wTwZOWAcKKPN)4`J zdMh;B#p2nvGf$JX0!yH3?zrCxh#yp{%5V`dXh%0j`XFFTtpoX3C`#BffRTs z6n8~ZC|-sHhXRjAUj&Cdew%BZ6Sc^PqY`+Efi(i)b3l%2ytQt89Z4e;<5>D#t>y&@ z+{VDYN)5JL-K5kUW+sPXBBTrEk)>;K4UhFrA8qL%X;1`fx=lCHSppV>vHn+0VCnRo zFc9m1-9$BM&?c5Pw3)M|<#U9g-ijb=-4QA>*M831ZtQfm5WHDA+O2I)1MrI!n7zJpx=q+l&V;j+$g zdK>e5C&u|tFL303;%Soq0$^Ltg#?c?=R#FawFP%X^v(Sw`()7A8Exx3)IT9H&NbAE z7yjpTw|VYymn2plgHm=-ah4yODG29TC2rk?BMGJGVxsxd!3; z0$dgOw#Nn&oLd7!(_DC8Y*(8t=)@Z2@t(rz|;%5F|a zX|cp^ezDYU&Un^tCO&63+dgkMJHKEz=b-FEX};WU_Ic56x?i%J{+I3Myq^rymK+gj zPd1{Wlj&cTWJlF%$<>o%_*2-Q8p$=C$!2nLt>om|id`o;Ho0zcz2y3F4H_z5`D3H< zjQ_tr$2V@$G@)7Z7Ks%bY?<6DIk|Oma#EMq?dEZmMXT)QO>fxEUr_Q_qfg$nn@3PO zzhyTsp#b~9nC-8_b}dYj#x5U`u=3hm}u zl!A}#=FB3ynY!I>8awRfZj^;P?WSv&-HhFBH}|6~|HN*N`4s*AnccKd*6gvHlRmeb z9lo%eB`90Iw3|75?Pl+kFxGp`2QRHx8Q#< z{70GhJNysA{}b>ZW!WF_|0MiB1^-c2pN9W`!vDYEKT5$F_G@l zH89NcC<7W9W>UOiKGxVU?`&e2?>05e1qp`vb2G!-+}tpqZDE*G6Ag1OZ)2F(r5I-QYYp=^l%lqV`SNv!`Di=XaXsv~!7!ce4Ks+c ztAk;_cB5f>JEEOVu&Fcfn+)^En+?}L46|Q%jOCuNyO&{Z?E{_t4D-D- z!|dJPFn>UK@K(dL-)5MnP%aNJ%%^ULuQdKK{4x5g1RQ~?fog&3ftWyzK+S+NU13=X6Rh6IKNG6I=_VSzgXcLjzAMg&F%Mg>L( z#suyTj1Alq7#FxVFg|c!z#X_hkQI0!Fd-27H}S!TCQY6a2uuwGJb^%VAn)|Gm9NQI#LxdAA(@y|v{Sp^xwXC-6aF z9ew;T5LmyVVB@CETNL=w)@_S@cOVZ2BClMA`6bHJgADWGA%;0&xM8*%VVGx83Pu{{ z%u$A!I@&OeF^0JtW#Qe1=^AU8vG*9}ew5|o40FuAhM71XIfk<4KEs^kHp~w98)ga0 zmMp`}c>uXJ!7v>Y4Ra65;s*_L$U}x1H_0#$p{$sUaWn;EWvXGGKw0ZCOi#99c7E6} zFQEjc8D{Qu!%UlDm@$tS=3bN~j~eE%nT8qvm|-49S^2nOy1j;(l4F>sQP#~e%;~cY zv#Sqzn~SlGGA|Efa*kn|a}9GJ%CdQeIbyzHCM+<_V<@Zr@P8rv&xijg1y8{LMHp|3 z4fEqC4fFU@@L>tY>QckJ;~B%8{4DHPW|$kEGt2|eW1KEWJYPgSUP8Ms8|ITM40GZu zsQap6zVsT#-%7;#b;Rlo*tFU(hrVf;*S}?$bKb_>vj%$JK|j2Ux#vB@ylt&vwtgSu z@IRO@P&R*n`C=XBiw`khtcML7Fkcj4zChWw5$$aKqO-$r@jJM_~5%p(U4v*jVwLE$m(jqpZ#?Owwh<&E}M@jAR!z16(c zy)oVz-kM&g*Ywu%*7nx%#(L{|>v`*YhW_>L)3;w*f3NpeulF{ucYxP>J41I2#KAQPe+H-H&yb<`laYx(!|ueN zyN2V>h>`d+YV?@9$KEsU-tqUj@6UQ*0yv?66TKMo-pNy@dcB@(ulHf^v=9_dOb`F? zf0&-}$p5VH(f?VbQi++5c^}`y|1vU5J&C4&W}=Dr!`*bT)PFj|ntn51V$g-bY%Gz} zGpvr__)q%}I9HyDEc`Xnz99}*ZgpOc$KUh}C`hzodq84t0vNvSrf+(x&C}mM*ThHv zykpSeT;H=}zL9DV?CM?U!iA2lQgFVfaM=72g(D=GLp86Yb{sx>RFSZD&z?c)L)qkb zm>iGye|4rSjc3X5{e_m@cesknnm^$Rj3qOEsp7j*#S`Vb;>hkn*FB?IdIqF%`Qo>% zf&BS?eD-aR^IbL|T4H3Ayl0&6auj`xqK{QPLo=+WNbQ8gM3=j35v|)l?o!_4#+5n+ zk10;Jyn~(x-L{rgbr-6p4-M7@Yq;N&nBYE}T;0sD))G6zy1#3A#T~DvI2Kn*R|(~a z!no0+Mh?F-6gd&WK^bT;hB`eq!y4F$qIV~vkl`<}?#2;O>$V^LlixeBx+^ogihJ)b zab_^ey#fbo@B5eO(lcjfVs)EmybC(wtxJ>{b40&8sNbEr?~zS&uG05?uLjPaSvXA8 z8a>Lddj8HLvNYbBLm>%o#dG^rTj1=fta)kyoiPFyDEm}9qerPYQFt6YCNivc9Vt9F zBS`)dybpf*l~VFK$#Rg-PY9pmd^3>iwS_hL10#g#^V8bNob{bZeC2fSmS+3yxb{rI z@ACTVOFV0Amg)9j1R_P_vJ&taB8jmfa`xH>vP zg>S!2gePlURz#WVn??01!EJyd%Rnmxp<}at&J23_~P=n)e8kr5Qe?LC?zwv)oSX`y>cO&O$73ca- z3XeoP*Z-As)Nro<`27Ax0pH=K)pz@M79NT89ky5BUHEIfbA8eI?+T9?zQfMyyJ!81 z`7*WeNS*3m%pZ^#cZM2?bK(*Trv2rt)jQF|Un9{y4r7v|^kxS%TxKb7yEuQbeU3^N z1ehx-KZ}umL`y6r^kj|O_Ug2jPB|ynFFEbVW4sR@K^p9G_Z9}FSaD$n#q-)^s8E@a zUWqYbtCO{~0I_a@I_LU3IwF+M<;V25e=@)K7sKWc`9dyJ^x10k@S;8G+*`4nPu8jH zD4j0AEc6xDPq?|~|Ma=Ru0fdJ%_!So7z^tYtok?Vh{dphcjrt`s}K^8(-(*wJ&M;+ z^WzJL7ge^RVw?R%&ymHISR2YYj68~=??j3|KPAz*sWaXBOwHhv6W$L3<1_so@`jDC zZdyI?q5UV-zwrO*H}cl{tDAg3b!W>`>{QCn>9M(i=}NbLr@W(c-51U^g~&$Jcce%A zfOGB!Jl`>;s_#N;=iD_Q`};1ma?X7fRB2*lBL~|)7s&LIvvruckTWbD+r)y}&z4m<4 zyt*}Mz6G!vrm;vmeCJ}o{GF+x#8U&V_ym1z9d_bmw`PmRtAx4b)QulQs*qbFRO6sc z?bZ$+6S2jk~t6Jpj0zTi3$;%G|0e+zOr9*{!#c z4v48E%&pG4@fx!6JiMjRw_WtsP1LyPt&#AW)fG-N6rEuyEu1C>d;7QDJV~tsCzs8-EuasN5P$jSIKx zOSislP>JS8%4voVRkY?qZG~GylxDc)&~AMMxSU(w=*r!?5B%`O7Q1zs`U7A8gt@h# zV<@JhW#iwY1C?9;<{`II0aI^XiK|5OYvnY-l+^r~(hRq*C(UAP{d#Q$w^rke z?aF#Tv)KUwo0>Sr3g_<1+}a3!`0|z;A1E4+2y^QY-is$^!uL~z zTgj?%*!E*W$gQUUlUuI(m1sUk{edqfm1b0(tu<##&0nGem0Ld2EZpi0xLgwcgDdvR z+&T(=_`00knl2hQRO2}PMCry4v=MF%R*l29XzkW!z~t79dX;EyQcg3fzTG|)upFtm z1Rbc{T7w6NrCWCcF6UN@tGHDi{GoI~!Ka_y*U|YGUB3+%nX+?YWm^l;Ryuv%+6xV# z(c-;WFUhK?koYW^Mwk%%nRAI1kN)&dQ=cj@z0l&1|O3 zjMHsRlx-bH{MpRqc&L`lOx9Il=C5rf{3G5wCq2ADR(*@B!pyrxRcU6Oie`R`HGXAg zegytdO#dR9k8oRPbh2)1XR-)McV*@|tb@qR*}5uf^iXDY)K&ems$H46NmP|)P5@jk znr*LQW<72Tmv!9f-{7K+JN?skp^!9{Z9R*e<7nO?%{+*8@lXtJQDYfbMW-*)RgWbJ z4@YoSM6*sAGj{+kXXae!tR$MjDZFCx;MC*3gzw++z8BPGlLJ^2m0hs?bvkSb7?BdH zlA)^PSE#Zryh=^Z%or?ROm8IV}JVimg(6xOcWv38Y~ooY>E z*-<1d3@Rk^9m%67Nak$g&wqe6JR7 zDaE&vxQ(^fCUJD~txEhAE&g{ap?wjFm$3FBPBjXi|>-+V<4WMl}cJY+%aa@ z;=9!;)t3n*kX}I`DkNZ%zzr1yj-6B*93)VyfBl`tD#4G9<|@MQ&o zSs{T)5_q?Qz?hIgI}*sRAkZTu5Jv(NDhMp7S)A`kQ@RfS6oZcDvPRCiv`-LoSwrXC4?$rr8&{jI!(r0B zSY#Ey2_{Yn#V_y?+@HtGo6?~xDop^`gc2WLFSq9TKpUz>o?8(IJ5Z5@=sR z;JBsYfCL!V0I(OICQ@>Q z-RnWi|FjNi{IaQRc_(fb`L_2C-d;gLN;w5EbE8)91S_EVQ!0qze4fUiVsPKi^zU%4 z+1c3lE8lW^3D=ew>Q|DTbDsuzhwlm&F!Mo`CVsdVL*0Fdld&_j(sY*EqkJN~`kM%$ zjGPmKw7#LQtK@3tQWKHn2I>}nM|`n@IY#_w{Qg6C@V63QtYh{NA2HoW{8hvkv(`G| z!`Uswe}ed8{j{9;ux%yrA11z7GvyK=fmlfVQN$Okq6x%D)IG%SOZ>w;D-L1(#pkf* zb$1r`zlg)zVfS|LRD5(s4`tu5o^oi@u-6CY>XT`^xOfrq#VYbe;-l$r!4E#--pzrU2PS>X(ZLKnX!|)HT^L!s!JujT zPn>g~LK#0NMQ22hNIeeK6CZ7erp-=blCty29l|Peq zl8tlKRy^Z`pP=Hx;eHiPnHPQ$rgQE6xQ$!(^mZT|z7x05%fg#Ou)TN_g2P%qhQVdU zmW0G2$iJS=&`sh7;ol9ZN8*atVQxuB><>dj@L+nD8T>9i%LsnO3Xe2~yqJP_LY|}Y zx#sV^>2^Rdh0R-XE zZ+!DJTw{MFc#H+tlOTNCT0t;D3GQdXd=kX?Sz1Bx{1KVF`&e)s2_ktXR1o|^3Ud74 zIaEHF?yOCdE`;m5s&t5;N{UaiXP2w8T1-dCPnFd@@|=6jzeFsfImx zn;iQa>7x-^?!JYO|7y7_W|~K-HbVCS-wfqZT0{0bjXfHrvCg^UL89O5IOmQ4g;8pY zrBRAVR{yDd1l~EU(g|I+McfexOU5TcFlxV80e(orc+DQEKk*B`XcB|utNLP)EW%kS zOEmt4Mn90~WI_$DyH=x1C7MpCBN&a5h2q7)wy6~!;VnCN4EtH8rex z-+dt@u=j2VN$ezvnJlrABrty02nkF}H;}|iNZ@X3u*T4=sX+&BG6wDPJ{-_a(+QqLA;c0w73G6The?&opb zjB*I&GD?$(D6jeI75$!Fk4s=8jDu}8$u>W24aH}FiNETs3%fDV zPK)t0_g$#zY3#euBN#pF!Uvf7J?3EA+7oa6gU~=acthUT(;7{Q@|9Md7Mtl_{y{a` zZ4_^HF$G*_aHe~hU?y0tm}&Zb?Dll*(7eO%qh>R%<};&yv3X`!C7Wj$5BFI*JnO_f zIH0!B0Ne{DSW*E(&U(LhW*FZhBJvSqfuUVYD5Pc%~LhBR!dmYZus`+xik*+{VgZ`1dN zZPs@3MD?TEBlo}b?{YQruSx~~=JEdFyh{ArOvB+{`ghC!TmKH=LGjA{8-!j$s`8M^ z$h5k#{r%8OH1PHh%A7v}L#Q=#uLlP1yniTaSJ{g{ z0As>dy#K#RV1;NaUjN@D*k(%f{5~)7c4kT>=lYys4RP*D%?guykk3>*nY}p3-WDM?wC*mR=zrS-U&5`Nu5gg4DbgY^oP2Sdd)eZOD zKb4w;A%;7I*l+JI&1?{5K8&&yWgW^_C^m!sMeyIypGfM1UTXe4eHU#Clb^XK-ez*BC9V9#2pzj_&HQf&$JA|@t61>cMS9&$d0jZ z2sV(=-uoy=PQqmh-m9;bPZg*FxA>cH3h`KkWf>j~^rvpxVe5{ve5* z;KJjd8ZF{K5B-2M4tW!VH~4)d?ie`eE3J|}nfG?`xn~>Z1?7Nz_+iu4A{Tn_EH;AVbPD?78|$;a~J+=%gSYT2PZej%A(~X&ObddoqU!r>FX^ zO!pz0*f)4-VviFKhTJbq{DpEhd|tvsVlTo!3qVJqNR}KbPi;P|74)>0##3c2AopQ_N!-SWi|s54kvN#7Tn} zMOF*e-3_|MUg((6=3=2ncMQCb&F>X~wF|Db`8ELWG!{vfJ`}J?-&72>_C}Jq2cVet zsmIVR^=RLbD!vQGw66Fv(wz`x{$13=nfp1Z0kbujGr?>h2sSMs1A}n|bao2XDxlL? zuv&pZ2fJ@~X{O6MiXL&f2Z-)SwCYgSUiAIegXW$T%R}MF6RTpe;J!Z{&qG})O`~C6 zVxQImXRgI)Y!j<)xQSKQ*~I6om`&Vl)FzHnUL}t@%^y-2>#r1;iRZFL4li2N6xS!^ z%1V4*jq)DKdX$e)YQcm!l%^=HP<$t1()ryK8JStOp;mqb-6k07KW+JliX4jPxMmP# zAL`$4Jp?KP&$&%Qpfe%!5RU0szx1~51vL~@sx=%GuJ7S?yY_$V>Ya!?&scmMY+vfT zfH$;Uf}=o9d`n!zIoE;nE&8Bq60V2-cII8c**P!Z%=;5Z)HG*epGZLfF(EY+n&S5n zuDrjqo0HUMQfgkf6fZ9Pkn&x@zOU5~@s&h4^In#+OO)(<`hAq_XRhZ7W$KQb;r;MJ18WyiT(I8fu3sf5ht{?CU0jvYW6mwWZKH zRuLgoV8IN}oO$PzsuLinp)D*GC8XXLikx|0uoyhsA;q?{m|ci1DAV4nQe+Q{7((Pe z*3MWzn}lR2Al56xD57O|lBsLZW}N*E0ubUTTeAu-gm6@LyfV#kNr5XXv9vXd0M z{lVf_C9i|xZRRZTOLl6A&1pe92}tpP5E~3z@kso|0CmoN1kJ;&CDN?J2n1V?hHjEo zPbi*fsJUd9gxK5=?S7<&jIs@#1?aoraOTxQLo^6zM9ej~)%=xcl_I?bCQ+ns=Scqs zO^9^cG*1Jcg%|F8FEw}m7~y`rX7O^Uq?S_fc^ht%+lm)RPCRk&;zwKYbZWLF+Bx@m zREB@srGI;1-8NeA7H7yhhloRK+~VGnV-W{8_-w^DNKOfHxH}Uh$3ZRO{;aLICOF)| z?LwV!#G+o#Sh9_X=E{-ON!~{J=c0%*P1G8P3KxQi#y9& zZmNZs3)_lYfkS;o7csi8x}aWoU$v2XQgFt|e>|c1G^EM*1bp6xyQj9|pCl)iI8BJN z2YW^gCh z7IbqjEf8(Q%QohbbRB~Glk_tBNRxN)X8O32KGvm=ZRq1#`iLD8wqQJc+(I9*FT@tC zK_7S1N9-2270}zGX#JLGJ$>i+ffcxA3%M#CVNbwBF+OXT&7Mb|Iqr-WU<$Oqe7 zkdbAtUtS|iD!Y86jPG?dkW)!q)#x>{x_EhvTcJTj-@=s5!T;qIV<=Dc!|dn~--&kW z<|H1E#to1Tv;cIpKL|3FR)CK7e}HslfQ%)At|eT~t)~&6_>8<#XD*GsOnyq49%fs3 z`JYnX&X=%V#&-qZqOSw!TtBBn3g798$H7ed2WP425Gc5_Wj1ab@;XoVlZNWMpVP>}lV|Z;BjG|XV+J^Fj zm+&!(W;?o-K7v#8c8KCsKTPh2!53aHe@wGbyCOWB+_O!TnU8We%F8IzP<}(PMMXqK z;maQR&mLv?OFswyD9S#RgD5P=_kSWow|@-#o;~5;4+PF3zQL@*u$#a$FUJ6hp3O4zmD)o=QTax8QwCH#8W z2K#Y63|syC{V^{t*+5TEAKoLLo=&vBqUoT_7ZEOX1zJhwQ`eOl_OvXP!~Uw5p7aE6 z*?)9@$INsO!Im$)?*bMeg7R7FL1)R_;be4%l?#Z!6V0%a$xb;vCGwSWiZBFK%}ntf zffdV@o1k*J%VBIN3b#SNt|X3{wSGQBEnYxX_X<4S30{hSKPRcR?mQZunb;Wv3)^P! z`?uWVcuOnUhq(baqw@Te3|VzEt!EY3U4hwnN}^7t^|%6BhJeB8Sq-qCR{=30;HLDf zSUlgWfHSa)+sST_PTz62SHJ-Uc@5+kA&3C+rjcz0PPq#mq_lGn05@i7HXa}q3 zuS^7O%O96W10ca~d`P(!MXBX-Pryu&X>~w@y|6mz@}s9FZ!*I3JHi7jTVnN^#hFXf zkyw{h_GJ*KomkZr9D}6Q1{0KX{Xly9n>5MP`*N6E`pisBDwn*N@4JS;l!{<#2#gO6 zr!0IK_*4hl66Wxq?((B^c3r&T!F>ryOa5BViqO$l@Zq^dnGTx_{{mj^%%Df7C`T4x zR-e~1EjJ)q@=Pvg1f5uD+LeY>IO_}pc2#ghWgwnQ7dK^%LV}AzjurV%Owi{aH{Gqm za?F!BGr!jWSLr(by38F%`%;E|-oUh;z$kET2%sO4H@QDRRptXFZJitD-UW1iw7*v> z@dM!dpP=5!e|7|bGtS2&arzoCc)atw2>&gP`8^%hc1lSpuQd#zlc|EVesRL)92T!P0khk-C zcF5V53k|mHsyW;8i15S-O?ds$H9LDdn&b8+nSCYon$bAROyZYcbl*ZJmH z%Z1ETyd6K{;cDG-h_2Q?P{h;}hu_!{|41XV2*;SmaV$IC?SWdytP9(b##39&y6_4e zQRVYB4?(njz+ZY(%#O7)8=0P}slJjnQ~rcrC|U=ORD^sp+$&Jcy%trhd~$m?Mz-%l zX?9%#v)Z^xfi)Ef`2OA4b#$TB*{4t_m~~a=9i~fINl*ft5HM7vyRjeih^z9DWnzS{y7vuEU{NkOesWF32r7(Dxls7Qo?z zAh+Z2hah+3a8i(aa5yE%y*Qi}*WT_xckR%UTx=9%!$T%D# z1sRWnU62Vl&_Bo~YBEWaDVl7j$qt&NyAe`DS52mBvbQGFG&w+%d@EkqFhp~QX_9V= z$R@^Ua-1gJnw+4?Nt*O%a=IpGYBEQYxtg4($%UF+tjQ&sT&Bt8np^==iknQspm5mA@cW{(@Bb3sU7TNR_`JRsMoh`3q9zFG!WYAXWZ?RQU^1iknQspm5mA@cW{(@Bbi=~vxUyv$)L8|-(sqz=34OJC(6dP@y%O-|6{ zBu#oWNq0|WwV9gC(PXYB=V@}GCKqdRi6)n6l5XP4CRS*2r6yNva*Za}YI2<>3pBY! zlL1X`*W_+Z?$P94P43g=eoY?I1 z%7PrC1%_#IgeJ#ma-1gJnw+4?Nt*O%a=IpGYBEQYxtg4($%UF+3{uXiOF+swbs0!G zr!EI6=hPJ-<(#?_q?}V%gOqdX8jx~MT?Ikfl-z1PSz9}GOeA|JP@$CRo#ikn@|etDkjDiX2U6uPNR_`JRsMoh`3q9zFG!WYAXWZ?RQU^1 ziknQspm5mA@cW{(@Bb3sU7TNR_`JRsMoh`3q9zFG!WYAXWZ?RQU^1 ziknQspm5mA@cme3yVcF5iknQspm5mA@cW{(@Bb3zFjN$3;9ytZIhfpwbwmN@I{JjX|n32C32* zq)KCuDvd#^GzO{C7^F&LkSdKqsx$_v(io&lV~`Z)ZY%L;YMkH5rETJ1;){@H-lRR$ zba$snF#}+GHJk58()}v~wWrv{y``>YZ&LC7O8pJ8OWcFxmx?;@QqfB8RAQE^{AQ$l zIjFcBpOF9`%(&B@NMXHvAh0+=??Rc@luXDAC{KjV{|F?!OuS zSqWHL@HYpI{j*HaVc&blq|z-udWq<#F{t{#k*9lfz8bcSmS*IxsFg!ontS`=3+AZp zpj^SKppJn`IEM$2^i4tb|9n3K_mjPzDg_vFw$ViySshC2+V<8l>D#7FchZ-ij``DK z=1+@lAK2B;u{YCh?&@bwS`lfE+SM;UwGUe9m*l#h4hex9=#ZGhhm;i%EfWE{xf_Em z8dHq+fl)?%nDXP!B;_{Vwg~o9;RYAZf(jt;}l%_&IS7e_Tx@+;bGHn6`rbF7&LvqH7&f1 zcfvO|y+6|g+nwIdq5k5doLInC- zx;G$XF1dH*2X+q|dE;!Jcqx(`5+RQc3K4e7%XCb*Q|wQ8^mkMAFR&Xy6Ym&?GkA?K z^+R?Pc2Ep2;&15Ki|^RTn>kXBU>A5{x@p48uv$r8Yd1}`8CE@fX6sTcKb#hcK@N0d zi{cndv>pm!WO*}S{0(Xn-k-dm92MO$D*Ri+|1b8_DI>#rgiTQP?_u6}$!qQ2&7U*K z6w0p@>od5b?SG8zQuemwBrsueEy@e}0Bc5%yOP z*+}vK=zEH*vi}s^RD(9ddIgxWzYoh@CH~6_4@W*{$F~D@h9FS_hVN_JU>fu>R!y!Z z5>8BV)L(^%s~473r%F=nx9`Y`aIw&YDCDCHOQOT%H~e?`W%hv5oL^p2&UZ~87V}*M zPgmy=9^to~?^bcO%H}&+zlyBywPsPR5qQNk_EF=d%S~ZH zmLtQtp=+8oo*=O63O1b-5;L6}I;2@K(2TEMqi}F;D}tN&J@#)YdMR>Lm;1)CoWGH5 zZ)QqYGpskEuke(aWy92qcfvo<>XT$R!cMK6Q|018V`lF$?X*^yl8kXm)8mN5%?jq>#~{%!q6 zT>rlE_3QoH`q5m!b@}?=eNEA+%zl;MjWVo15$CJq_jd$Un%^^_8Tp-!gUas)+}OVx zkMV5pG*TbF-fDGKdu6=UdM%dgcPd~1*RQVTFP^S*H1^P1-%Y}*tzBMoe z%zCJvb0OZD_X=3NS$XRu2xCir?jmsLDU(DzWfJEvBC61$j!eXRL0q)wEv583?6b)W zc={7JybM*yb(wfYhqd*cOBfEWq1Mv9ymP2)VX!*HyJGc12RzIIMJaiK>?-)tPFl>+ ztDc5h_d!~nf5LvvlO~KTnlT;Lw_nmupr0X!3xL=*yL&pf6^cO>XC-s(I1xU;G3~#` zEMam_&H5D!c{_fB$yaF9`FPe5xO%$!-s*o2rfs^WbrNb-!(v@su7Bw({CF!9=};y= zqPfY+@|otRRD=NaVys1B>Sf248?V=}{_{wHGWu6>#me-TtKWv}Zzx}X{J*WA!u6+> zuixsb^*JB;M?ez%!OW~{3Xex;6ke`pbs!Z;Q#_E=6t1WfX=AXF1L*%uaB)6KqSNRw zYsVKnY`5Z&X~l7CiQP(YwKc82r`EMgk<9c=Q5?32uR-?ucLcA!_5x5((=<<=G*1=G zu#`Rd#yjXxThPomCNfba-=IaMjnB)qpj>q>Ps#T})gqKRD375~ewB;owHy!1$1?p< zk5&Db{!r_Y+FZYN`TE~|ezo}k*Iqa)CC~~3tH2EBwo_Iy!r}k2?3yG#&H9?yG&WDw z#fcW?(Nbl)b&yrZcFRC`va4b5V-_j;9b&IjRUV12feAY_|dwuPNO`*DNflS$Np4 zmIe1!lvD8#@-+%eYJ{!7J6&DAOn?4d7_2humr|T(3V*5ZFDkrZpDi1b%2w5cXgL=laR^aF!Ah$E!9nP&9%u9D~vhh33bUj7<0HUD(2vT8*}y$K&+X zq7o5vAw(K05swpRqIiA^a@6SY^J^91bVoOSP%#0B1J=dSNh17gO53Wbq?ArJtY5B_ z;ak$k9ajlz@AcarCSuey9x(j;4BA z=N_EVB7LZ9(BKUBObTGBLj|xO>ZG7f66z%2eo}oYmKuQA{*YJ#KmOVQrch~@;pQM0 zDBM#EDiS%!ct#ZVj4HyBObX=}s{OlozgBs?u|SD&&-}U(E^pj9d+C7x?c8H73(`#w z8M#*gM?;;hCZEtC=&YNk@owNnYVDkJYgEBnj9TW$P~patKbz{^d#7-@Af~ZGx!pD%#Gp|Ye$tEaZn|@Nv zGB!E$sw%~%Ex5F-$IiSH=n1p}w}Q*cV|xW=!*5#`gABpa!1w>(e2Q92x!a-=(>VuoD5O5!NED?~sl=4DxgAN0q7tj5 z>zGi9xygRd*X#Yh-tX&d=Zrp&$M>&apU=yA?REcpy>GAgxz2SFjfxjhixTiHQl{Br8IuOt===!W3!~AQ|PE8xg;ZB^o@Vtpdd%_!kRx2iR%E zHd_w110RHt{|!$|LlMx^0}&zErC@WS$s)QU{O$S45f~mqcE05G48bc9Zj#~V1u|Ks zo&G~KkrfxAuQc^f2-#~{)>GyPEDyoGn7L>f*;2L5zhz6E#Dm%>f!9Ll{s0~)8yOK8 z9)kY~^WmA>9G4)bG{^ih{toO8q2>)zd&E?WjtEo|UZoV-COEXabc)YZTH1WYt$d+Sgg@dr;%rzLC{x*2>MGf==#nP*hg5EAm0a#253e^U~35W zT*;286?i)YdmPbVJA>;uLJGsyq!7a0t-{_8TDWfrVLSm-OdOE`yws^$*r#!^NDWN$ zI%R)I{%v*^9GMDrG4EjOq34~)U$M+P5g49S_WMj?mHNyv4a;FO1%Feodzt7}WJgFI z$yDXtN;J#3xIxNhg^<0BWo7U5+NCDum4HJ}GjcJo`;Ov;hTPZXIN5LhEyu~1^uj|# zTu$^V5r4?Crl%#QCPvNcA%?y$NP4^}jqZ;Zx5}w9l8V`6LFhi5X{r>6R!hgpEeUTWLPv$YaW zoQqyfGY!7piCtPNzxC4SyxOaeo?UgeJYi`p(8{x`{wc4sG$YRe>dCXD0!dO^>c~r& z@>AuRb8KYF7FsKb$6w@4bf)u<1UF+>hQ%uaPvq)tUs)TQ5d21-ey@j3$iVw-4*W=` zmVIT9NYCad{(|XLa()7Q_)D7&aCX{x2!5P_1`y|mcI^DwU)pQu&lc7R`0QbK7vE=Z zl!-I+li2qqwck<*@7TvD&$t}##PpO+-dXlSl+oz4r=)iGf-?o<4Diz1oWxaK@2GQl zMg@ZCfqkjnv$;Zej z1kRU_3$sMh*UO}j8GG<*MwA3UqKv1Xt<6WHMV$*gix1?~L#2ctUvf&RRZ@yXhK&xH zB|g~ZB($Q=Ni{_2KS||*Drz#j=0K@yiiYvQXnxGe9lniuar|Rc>PF?!)N*ELn))Hu zCvg(v*^s;*mlcj4_&VgpYoa5f=!*mTNo*nrg&@$yN^gHfwlVWlOn`XhunBYyH{9Ld zNJqBvA=YGU_Q&_)S(bpI!|`lNKwb3MAtUhHQG8m*6HOxhvwCK`JJY=&_Gxq@20Hk{ zBB+W(A~>Mm@h1CA2`dTMJZ1Fj!c;stC$5yNKU}-PE`pI1(#4WQ+T%A6w~l zfSm+*VS}DfCk8#nD8leY@5)4*ur~_d@^MzPyiP4xmxs7YJn`hxh zaPdxD!y)*MH9sHxk|S~WdI*8IjmO~%Zs1K85WiUTZ$?hZP;Op{lRpH7@BblIax^A4 zV#;qs!+Ani5o0R8E)_A3lrf$|DU#^`I~7M`^g-W4^*847L*H>~ZTJ3(j>-c5O)Ms& z>~DvTZuQ0hR@_ctC2P@5!^(U(o5WD@?a6vi%~x#Dzl z{VqBK*}EO~-2Ewr#+v_y=C@^j$q|QW(Oe2RWN)yTe@d$#!Ac{{!+_aa9@hNr%r7|t z=hR{1$h_GL&e3Eid%)2+Dlc+QGt#v7Y-m^M1T0cdi#()6;Dmgj&C{}tay*V3y__m= zo033t?-cala+(2cqJTnl%LUrlIqWm5M1uITBAp!;Yab3=ss4LQF)KL4nXxj836t(4v9cXlEU= zM_4-4MCIN09-3H;B%V;n10$b8-w|0EZ{?LwTsuJ@EB}<_o70MLvJJHrA!HLW#MO)$mPt!s=;t@T@(xX-=zOM%s znBX#0n;zms-6ks~@BWk3>fknpYW`>7mmG-+bfaL+_6jPmo(EI3+Vmi^JmR^$HZ3xO zEjJ9zN7$iZJ}3feRxOn>nXlvf}f@NzvF$}*eo20!2};Ao>Fv2s5zF$ zHANmc4GlCDHX9{ITwp&9^z&5Wa_w#slyyhbl{oXG%4m=h!K$WLgzq2`n2ULaHey(j z(QoV#fxbvj5&~TnP7NFAM{#2660z#p3sGlL!*dIGwBKK_-=<2?QYR~s|84cb9^Pb> zFnJ7(8aO}~^kJP#$=5M<0@%OQASuHftUF344ztRJpp)h&DZZt_lN8@)KWC1M^MyD5 z8`UP8@ly9{y{yMxlka;Br9aPLDAScT|e=v=D`+<*+iOt?Y+U#K| z%vxS$$_zPQSmuMp+FCTMSts)E_Ec|Mnz(`|a;#T@7tun@!B&_)ac3!gh}NDP#tcbF z5c~E=MH2+ASfS@TOZ*4ESJlY;%bf2l3)Ke2_a!4p(FBx4wPn|$^t4j)9{tlaYH>PB z@qLAuWY5#CvfXY}O5PV?l>293h;^E*HvLM?2(38`nz~2G>ACDXRf7!hXXblJS##EZqxIboUQxh z=;|h){9jCdDmo!8i)VwS{nwaKl=swF=EtLlR%{B$QLvHMjmeZA0`MZXGJ zEz1NwRw;SA$~X|E*#MarTrG>qr`5dXTZay%`!`*4@(GUUgL1>qE&cWmv-5aAIcj!N zxdnP!^IlOr-12H7X`~OR+xC{B_JXFEzO=^Fk#TvPms2hboi4W#*PK^cEsU?N!?-9dAc^BdWY^ z$6Bs3iPa*BN~GjSLz*ry(c+@=bukcVn4G|yOob6N(e_XSWU$;h1nus0Krd=rZz)^6 zB~k&6*CNxDNYI|frhDQjHGQA2e97C*uIPK{f}6RGip57?%3RpkTdBi~u6fsBY27Jb znBjTn;H9(GI@EtOe+=`3I@B$K8$4RhSCRV4q;EjY$ywt3ZkHLS5AWl&%mTEv4b$jL zwwXHKkIKys6x`@fYLU50B&ZiAE63i^N&>yGr_c;e{XD%eL3?v5TfqVS@wtjQQi%lh z!pUEo621%Xh2QT~N;jS|%NMetER)Fy6^l=IsE~7^WxFy=jcZ@06Q-y0DRorKm#_8( zE3MRn*`WINv%ij?fN|;j1r-ok3Em^-LHUKo0zHNla`f<(oE54;C z3{rgGn>BYIi_;ccRNmJqqn1;w7|p*(@hwgdmYVuLU(G`vSMgEekYudV@vg%OhF*B| zwd>7anr6MPD3PEMcA1uVLP=oAHx|Z2w!STnudgQ`#%9`_1*&bK>0BB|NuR30=)FwY zSJMFAuc-O|;0Qi9HW&Bmr4;a8*>0oqRo+SKBXsP8a&7eLJsU*eyaO&L(&<`-v+sZ} zn$mC<-;<5YvXw6$7-P*8I;?225dg*&#f91 z)qsw7F*t-P3NhQb2MXw|c;aI`3X!gDCImUoHg1rP@jeA5^V zTZYN2O_1Y%w9OxJxNXg=mU6tu6w3GOEpvz#+3|!mrw{b>_+%B+Z$MBq+bkvIOI7=z zFp-Z5G9P6IW%o9#V!o#`zfWz@eP5U@f{x0U`t(U4dQ(D>@%xmXcN5OUa_i>C1vX&E z8d`VHL~Gt}nY#IVLi-UBgn#ttPt3hXUkWMYrkKH%;e5JcCNV8N=N(K(%t&Ls} zf}&+!(?aVZlp7N$!OY1MQi!Ji=5cEY-q5}^rS!EMUHA}th24x$2;r}@N9&T&_e$#8SrVpirkZUYYMM2*mtnBxky-8U6W%yHWv z6WQT@Xf5Q0P%hp$>o(p4o9DQxxWNdAmmwzJnGgvZ)KD%N2X&S5EP~3^)l5(<5!4MT zsHR%zD9-n325}pgm_hw9KU`2JAFUy%k9iNyLA?m0)=Isiw&&i-iY0?)`cT4*DLa-%4yV-sXh zn_sF@DVkE`H(hi9htozFjL9nG1D zv{?vM-93bk;YO=wJsrdE`z$duwh9?KhEgSj7>n z(?Xd_2r-Pb3f&=uF02mMST#55jr6ZSTIx{GDzs1w6)PdcVBp?R)vAvRp`aM3@u{;J zYaxk!O9yn0bd2gWbPoJ_pg;eJu7eT5s@zG33Hzi3`+7IqQ~9Nv4$_Q=r0XnWy%uVw zgd8l?Qwc2)LZO4ZqQ>BUVO0)pJx=ZswFvHW?e}d#r?JC+cjSw@WErn(p{Ytp1oyTQ zno?76UWkRST(+)U3u|yU=@zT6{EFb(E5A*&P~Zo%?L~0)mC*5;g8MD6rr;jny**d1 zP}moh>#Hv)#wfocxaVMydQ_H?s)c$hAragdB~&}~)kB!^FV(n->S^=8;{;XBL5;0K zIh)PP8sEe-=GAbxDei}Hmcg)An}5n`ex+5YP@7+LUh@;Jns%k>y~=9dZ56sv#o*gy znpXp|EhNKttD~DJac;RUaV4$#{BEZ6Jt?aMUG@3BW6hSkMhW>wot0Hnem{a(c)!y) zFZS!U`W>b7y}63t;mYrg+V8w-emmFb_p^;E-~Bi?QTe{Xbk29PpsRcjk1_N8TKipH z&F@Bth0nLP_Itn8@6ior$MfD%#c!VSyHyLdRzhNg+@XY)e8!eSPkbpg26u?X}~RXf3Qh=ni8le#GVtbTKKW87H9?@;Bpn-cPs zpR4M(LydkvUavaA_c#Vq`M%0@?gZ-uU3G%r(80xY^@I}gjWhis#n3a+a;e5Oi&C22 zd$sv2!*3PZw@!74o0Je%;QNG7AP>Z_?z&hlC1`iYakwp8FUv@>3LQ1w7>0UX;{vp%J)dbcQ+aD8N?ne{F$ zv=~CUwdexjOR)K><@KklTp*}=EQ3hcdR$ekhTZq9Y z=xmmbx^g2as2k0o8dMD`Tc{Boy+9?lZ{fL#_ro zx-n4FDP$YZ31cp|ad@rj$cK)ay(x_OVL|4v(dHXgF@L=>Ur!174xi6_WRUq^l&1GX zoYq*Q-;*nPh1*!Gg`R>?Zmqz~L*dJ_5PG^i*Qm_B{i-+^uN>6VLT$-GWS|W>2-lJ~ zNkwkSIYnk^y?@~BNVQ}Wt57GALD0e(u6fHI2#Q~up9}NW0;Ojx6|h9ozMK0LFV(c`CT|Au||I?$BcfzHh;yr^F+TMTDozc zU9F75nUWH$n&nNDWY)aU3qMQ z=fhn%x)S+|spn2@e6?bUsb}B)qBPk?`bskb-;JvB%JoXfg?0U?c73O^FI*4Nu5VH- z;rd?Xdi6)j^?>TGJJ#g7K3ANx-iKo-Yu0~h*XtEaxHj%Hv%X2YK7GpU5Gw0p$c4@N zmX9fFc73n1FS1Ty7SHuP6-&5&NV&d6by#1*Y3Um3=B;P+DQ-X8Dg=W^bhutsPuhoe zno)*M8<_cL4fD&s5uy?-UZTUEgM%40zpS_@Ud^wDTZ%DUMeFtAq)ZiKnN?`A=sp+LG-Iur-L(1U+I(Q( z;Tjt2YO8?+m6`Vd&O^)@Mm_ zT>COj&5&JxXsE6qYBPZ^(i*{op07u|Z_X*+jW`KX%R%F8c={PY=qvahx+~Rjkb}+Us zR})8Tcm)HB^gE|C;|^&*+gQRvJcZa~Xl%Nzh%VcR7SRc9aR}8wY5#YH%6>CWQy@gj zc>kZU8WeTty$6nS*Zs%RTN>+l@?lYQx5in<%T}QR)g!zvC4{TJ309#&LMSK(Rk=Qp zL_lHATW7zgDt8PgD+q$pf_e<2j)H*D^kq#CzM6K(!cZ9dCb zWEDF8t}6KeC4}53T7|w8LKhZ8SF7f;+Wg-*w~_TPe@X$%QsY|$cZP*c*bh$;5(`s-K`EEwhEOfA>S`Y%?U&-zPRkU1)^aW->Iz1 zi|;|4oTxF1cSFWW7kFN*vhOW{5c&kJdk+thmw4;(901nKL?d^~Hrlgv>V>pTU>$aM z(5m`1G-ildFt2B!{mWDtZl(z820HHxH^F1Xxu8tjZ0>chDP7;jBW86%F0|0R$g24O zGy~Tl#4v3Y$Rl2;X%q=;yW5(;t8SDpdZeGT+TKAA0Qcg_@52wKtXC2J?72gdP_3CP8`+YCYRNnGS^o zU8iy6F1QH0zGQHy+;+2ZYC`$MB}=8mAV^83tEvmV~RaF&GNgkL`^hxnOfwePdhd`IpSX>d+mw8cfjB6Y+p zV~bTNNeqRcw4h!6G9@a+`WjL&_YF?u{1)FAEqQ*yiY3)8hMNrz543i&n?x*h;I`m1 zH9%1OY-2qH#VBf`g&wsEVYn?&LdRcM%`*T(X!)2xzdbcfg7->QZi?$vr}bV0Z8^UM zX8jd@T5A@R$ltmxzgZic$Z5Bk$PO*E+$w}bzEMJNDw4E35D>Zv@ToVQF;jHIcgyMshjC*LC?k0-8iz7 z+2-hND>gaj?^DMYR_{}=RlVPLeB}ZK?^#e?F!TzIf!a+*%;Bf#zEyz(4=OA&%QBL! zLhD1!1E{FDkKsgCO%2s+JPT(4N=_gy6v;_JMh2GmrP54B&9 z)$b3=r1!xperLm=D9R&RsJjvpMR`yO^%X)veKB;0z@13xQjHV5SeyS1hc;rQVK~~Y zLVv1t-MdZ+p=%xLFQQ~iUkIU~7*xX)Lo)n0oXe`*F!u=iqSrj8{q|CRMZ=7euCt6R zCFI+GuEtpgxp%NeJV?@=I4g^T+R~S@{4-X<(MY&Rx*Te>6{TL+-x#L! zygP7}P2<6Boau{gWt{(5ZiKt!pu;$fKjZKRo_$i1$6sEp8b)n_e> zkUpl2dg%fM>xP(FLzf-r3!M=zB?p&jhC@};fq24Q)TA>AePlsVficHQdsJ z+i0tW>RE-bL|>zXVzrQgV|(1(7!x@7CAJFU5(SqUF_6uzgGR5-jehbD+33hIBikq) zij9{8gj+(PcXidyuU)*k~*g~;mp|Td>L{W{m zrB^qO$x1#shVG?p99RzD;Wp)N7}Nu$m9prq1Gj3RU<|aAp4~>Y7CP`D z&vh|@pSOi^0nOvRtu8j{7JVJs$^{1P=Mbd6>oyi?p(*4dDmNl9nOs=1)13zM&}^y7 zlhrSxyHW9Ofwr7c0uw95#0(jyF_eM|dpUYFHykgpk-`EgWf@I$gMRjXh>Gi&uy&rk z&C;N+h@nVbVPvJ$MB|7(;@6N2AIxG_rx|%l z=zCq5)q${_I90n1p!x}$~=b|uqc5rT1U z|1o1CTThHTCV$D@TRHHiA_}F3MXRq2B->~is@kCzSq$t;p{3`J-*x+afddI`KHK=S zr#Y}owa`)s;nzVif!9kd`B;Cc1{X!p)GlUd?c6FZo>neKXrXT8g1=fC4K{zZ)Z;;3 zfHB+LZIaSK!UCM>*^nkq;KHPv8WhX9=T&#zhbvoax=|{Y@05_Q$*+3UQ`dJIdwYo7 zFg1txu4)0R@~EE#`_}KOUV|ddli9|j%D(qb2&pkU1rofu7?{i7q*=eK8lkkyb)_y9 zE-0>SqnWT8G~#C68u1dI7fJ9cV5h`QTEzW=*Bzgu0B-|U0lo!v$+0;e16=+9o;?fL z3wS8k=J*3J_(7Ya0Pr+m1Kym;%3m1Atqe zM0)`C0UCO2j_!a-fc=0BFJcE==d(FJ2Mn8PbNmf>bQ+!q3Yan-wJC%?;N}@<7r>)_ zJZ~3p{Y;zVIY9kcc*hIi9N?v=Y>q}xH{(!#$ z1LoNr9|97dMVkV40op%@=MMw^0J#2ZbIbw6KW}qP1Jrr}eFX42;2S{ti#Er@fTe(+ z0bO1~JptZH~tQ ze*u!-v^lx~9tL~?$bAd(117(X+AV_|pb{|T9h>8Mz!`w^UDyUZ0yqswe-F<}2Gm|| zbEE<813V2l0=WHsn`0T^AfVd^Hpkt7VnDMGZI0o9Re(PLPpv>-`v`pj@CslBpwUXS z8Q=xLS-|vFHpg!OybvUwUI-GM*V?|a0lyFA=h(O%{PGWpM3oSN$uhQ3yuoKZlpGPH z&=(IMj$}3zgq+f}DnL@aPehtDb;PGt5_q(yqUrdD?JEnmeTsTHeKYYsy zC?5ZoH6z?${4oD}jQ>5(|5ot7XZhcV1d@;Cf8)i!Y{$WP0^AAbKEaTlg zBSxIAwBZSsY*GIy6YdPVO_@Eb$IOvDRG> zhi4n3`~=4Od+3d5^-v@f&Rd+bgCjW$Eb7^j>TGw5RA_ z?X>D+AUKp0+UYvBh>;mGWqo+Nqz1kNVsB~*x{@6pm7 zEz-S7dSx}~bS+H}80X+&djLsKSJD)EAQQ=V-ibFujGt)tE{C_a@gV1QZ95R*=3VbG zYT12TK+b6lGDWiIA61z5K{ss=Cfli|?c7G?J4H?2)Z$N?;&?|&c1{KO zQew)}3w*9Bu{k0EXFkJPUyOYK;9J0(fC1=y5w&aAjs#TuuU1s;Xp?9&k$?3WV|~6j zVvNplA{`)!#yJcvApF)y`ZR98l_>O5k2beY`xXvUuQJ?bKH(2NOq|`bDn#+h5TAgWjvHR{Ly)~p)%zCd|^m<9X z4_I#-#}O~}D7K;vq&J=Q9=7N;lzNY`-n<~a6Ob<2L3%FMyUwEbJt$O@uB{^>`lOXYS~;xMM``5`p`3=W#`8i0n~YnDcqS|1dcTzY4n#)Krt=PQ}NyazK*bjRc>~ZVtz5!qq zbs}r;vbA}pHOzD!NC_SmveF@;L=#R^$(Y8BMPR6zE{X29BI}sx64^gbQNP1D1(wAc zuV_=dB%>2EvIWDH3p1@vGhZ>0&Ad+1nCa@Xbc&GfOsl)yHv>E|*B!vi13SM;Q~R_! zcnq%HI|Y+{=lckq_6*5-Gh0YGGmMBDLORt@T1n7P&d`kkl;Z?G`-BHeNFFnDLZgpdC5ep`ry{MvTt(2ly4) zUJI;kE?OsVaN9r8FnP6m+I?#{tQa#aB=Z(1RDy==p|^HDM>0-jQz=OuEar#Z!zb%;82RKBx4RU-WCkAK}Gwb(>G)P z+@KZkS~QUS_hbJZO#c{LdqJUWGTGP=(?6PK9dY2lHFGj8oTbD;?{_e#myYad$vBDk zX1R<=!JrB}0!GocmT_4B@z%t)XE7!^ze-(v+F)>S8It-f?{dy!vy*K+4|T2&;_E~v z5#M4qd4wb9T~0H}@H1lx7;5Zemy<8DcNs|>1ef6txTb{eWQ}pc6!*g2#6dzgGpA5H zSt}VWn2{kEV*g`0_=JhCFGI7milNH&q&6Z{*<6iFsSVxBWn}L+AcIZ);|cC)`m!> zO{^55t#y-(cbKt?1K@Qcm8=zzwW(}vq-hO(#UYg@u+loAMCqS|C_7ER-myizQ<+GhP70n%@JWWnG3t2lgwB zQ+~Iz#(%Y`ZIY4Bj6QvBoOlpGtKYUWqY) z84n8v=hxLV6JR3ex0a-lUkjEVrp+vojC#ztSTLx!&jh1rA?0@v%R26-cj^2_lM-lq zKyw*aYHM!E_>39f!KO98H$_@5!=(djP8vvK7HjN=hU}m5l99)Zrv!uZyT>$BN1NG) z7%9K*EM1_@Y?O?)%(z}KD8FT36fL9tVmQCUcj)|PlM?c)0L^7|)z-#K#(rj;f=z3F zTSPoAV>D}U;k%Ir@_UChPC`TGmns?anDMS)aDGpkW?E@8=dg@Ze)qBTLT%<($+(>v zZo#1Zwt-PpMESJR=P)9`>$kF_Mya^fMLxqAmVWuvsr`l8$lY# z?+ezrSexoC87r8voxO_-_g76bw`wz&lQi<1%F@L`n!347G9F^ac)_6jXk%Hl0r{2R znr&PKt~nL&1cQ?6$Qt?D)W?!>B{NdBsYPUJ2Tb9oZN@x|9=`!&{(lOdz}~6pti?|7 zBWBnOP9^8nD>zjrFSbX)%@O%W%A+^cj_5(hDp~!FPeoE-fZTEKBY>&-Z;0z3!U2H1gf zn23h|cmL6meM4JJ!-lnE8#b&{w;>`|tLK!>J}nDarE}Kvd$EJX zjY3n=_p@_r;k*sKl#d3nAH)a(&u!3t1ot%a7ya3}k#6H#>|CVK+dL){%KMV?yS->a zDY}jR9gFYo(d+%pPb|Nht-q`l$C}m)LDSZw+{OeZ#LGk)cz&`|7M(CfP!{#=)2;@J zEb7^(wFHJNmLrRtkvJvA{S|B$FxA6m-BquqAXf6&_+~CRGZWPim6Jx@=miEgrxEhzT zA+#;t`Uzpt{@Jxhx@4{2QKRiJHKptkM-&%w-q6 z2&m|S@xV|e$B*cObfjM1Cno~ud$`_VqoTLvc-hY+E6D*Vd@+&4!oxW#)ah}~QFdEV zo1WnLH=@z(%hq4!`2}No{EcI6o-@(o+u!Cn6E(hVrSs*UaL?6A|jN#7nYjpeKxQ%20hvoRf$e{g~ z*#LYPZb!etE0cZHe`Kq@D3nF-OX1|ESxFt3lxHPnFzG)aajQ;%7de`5XBxW7aH92Q zxf@tj+;)_c#C2_9s&*!^WTrJ_G0~(cHhs=uChcL8Y`9?{ zp_THOv(_~54dqL%jq?tsq2)?I^K1}v!R*Z4Hb-y37{Cm`TYxVCb@$-i(SW%aRBwEb z@5zDp0t^E90iOWY0lonk0Q+~i4+C&NU=)D#CgXE4zyO>A)P-Qw1zii6r87VN&gp8>C zL4J>edeQU)m=|!@LfWk8jLn%P9YBi+&B_3$8p~50MT*fS#DqQuB~~J=a}GB)rHSrY z-6Qc+fe|2bTjTjsJ+Ut0=K%ePL{05BT7o1a+mM1V;e-MnI)nqY%V-KH&~q7S{JoZ$ zU)4`~r#a_rUwKP($5Oi|16?J%FW$#`3w#{I`qGX1OMEu%T3>}hm zkNm%lY7tu2>_raroQS+`q5E+MbcvAF&nwA5sF_vz8%3_V8TT#Gz0rp!b%DzYx3RJ- zcPeV3PhcD8flP?@SNOi3*b)srQFS8aY_sC{Lhv{~`D7G`co zU)wG-MaNa04egtV#6H5^^Sn@4Qs)} zX{3l&s*;7`e;euV8e7;U*7%sZq2vz(zu)vO@#62l z?4xS^WuM%op1-T3u*+q}Y>Q-`ki0@j`fC+-xzgV?y0A+NBUeeG!qdC7F8FLBWFzwH z6nypsA$f^H2UB?KO_{M*M*62xaOW~wQ^%!y%Q`Q@-{Ww~ANfe#&IN?m#r)Io9LzY)gEqo-Pp9?}ffJHj3^Mi{7~R#>=ciC}VWat>EXBfnj`sV6h*eFVrjT z-Aer>RqahVOed;~j%7A?wBJzZb*A92&CyN-g~h`eO`R5>u~&%C_$$SyqnY?jAVKgG zTZ_-6HsUk6srYnWCO%WpAxMr+Z|E}((JO84hfDpOR3SgIt@s?>PJHHFB|d3~MD{19(Wlp0fRC#M z>cz)30-cVJ_5z)W4|>HtWM|{!T7k~P$8`dI9v{~WlvV+b)9YLalwEn9i-B@gyw0US zIbpAJ8Bor}>s$_$)A2f20OgFl&LW_kl-IciC@wB3hGKl+i!VYq;6r*V1uDI511i1k z04lxh0xG@j0V=)i1uDIj0hQhk0F~Yhpwio6p!njE93R6+yg-lR1K&gwwE`db8lKRz z_`p31gi?eKfkp$x4J$;A1=>)cbVpPpfjWRD2s8m`V}T|Dy-1)*Kra?(GSEbUI)OG3 zXbMo=g+ngrb}8JkLn!sZB!Q*_<(}$wb^ywq%j={IWbVgaX9mz@A)5(QX515~%$Sbt zWX3L_GGpqVGUI_jWyXVn%8Z8rl^G8QDl^UjDl;AlRAxLHsLVJIsLXf*P?_;WpfckE zpfY1GP?_;`pfcl`KxM|Wfy#{M0hJj)4^(D6A7}@W{6e5N3Uo2hP6AyD^d^BW1DYYw zeVdkVA|XfJ_o0NPuir9k@#bQ@5YKz9J8y(*e;7tjF$-2*gR zpnHL~5NH|DmI6HhG)15W&{hIH3{>WJ4CoDldK{=svjS*GK|KppW=zAXv!K!xDU*){ ziUTByj4om`1nL0VMW6{ly9zWB=q&*io+7sv0hy;GohK<^SL&AfvIIuYn#fffM0TcBQ`Lj*b<=um;q1d8UM zQqKm8{}Vb7pK_La9;lq<<^z?p+(MvomRk%|&T>nE#tI9|fXZ2JIZ!#vtpF;=d=XGN z=GOp~W4;)u9P=B1$}wLGRF3&=K;@X<0aT9pT|niS-vd;R`Mp5pm@fk=$NT}Ha?Be* z<(NMVRF3&$K;@V}4pff$3ZQb#p9Lz%JXJ}K`DmbW%*O&v7ImPnktCHib8?6z0F^@| z5vUv@NkHWgNd_v1h!dzBBDDI;A(9GI4v{pV8NypSP&q{Ca<{9Xb^>~fKr?{KA(9DH z4w0TfDB6}rz%rotKcUOK;;P73sjDPGN5t<8~`dufB{sFfWtuL2sj2*j)3Dpbs|tX0+N8rzMc#;S(MKSR9Z*@DlMb} zl`WJ8RJKq$P}xEqfXWu?1k@*tWdNNj&`hAy1WM=1PYSd*P>(?ATX3&H2LMeL=s=*d ze1n0?@(lwj%QqaTEME>#Sw8x@U6yY&P+7h_pt5`ufXX^d1S(s;0H|zvFHqU?(}BvC zp9xg9{A{4I<>vvFE&n`F+4A#&$`)D(RJPD!plDF)sY`+4|J;A^IZU9-f&NFJD}df3 z&?2Ds3Um$7;Q}oNIzpfufZiw2QlR$>bQ{ndf$jkMfIxQv%@yb#pbrXkFVK+!Ed%bQ35>-bhJQ^0UaaI<3Ps>v;ycjfu04LCr}!V`2vjw`iMYdfsPkwJkSY1 z{n)2sxS~i7P#zV`1fY)zG!f|I0!;!sQJ~2{pAe`M=p=!r0G%w*RG@P5pqonYe{zwI zPdPWx1(}>1Isuh)Lk3XU`!j*cRiP(Pxhl}zD{@tE0hOx)eYYZ41-ej^tHNNQa#a`x zRIUocfo6zsa)5Rb=t!Vl1v(n&EdtF0nkmo;K)VTaBGB#vEdbg>pkAPIRhSM`t_m}O z%2i=DP`N711KL}tJrDGDfzAioN1zLV-XYM%K;^2i6sTMkmI0Nk!g8Q;RagO3t_np! z<*Kj-s9Y6_fyz~31JLHeMJdn%fo=mTC#4-g<)pL==oBHa2dJDT_5zjDL>W*yO&kC! zrwIe7oF)zfmD9vApmLfx4pdGP6+q?Oa2BYX8>qL)xgi>;oEu_+%DEvPsGJ+<<}5ik zBmk9jLn2T)HzWa-b3-yvIX5_gV%$V@o`L>4F3}(D_0Qx(iuT)#=+jXVQ#R1~#d?!i z4D#=%6nWb=J};RL1Lk>2LaU%d6;Hj0?B&9y1rN9<4W&leDW;rym zpnM=hhw8B>dqg+G$!%V3YSxIM>$7vyD;r1Phi-JhJ!1qNdc_uwk3Z2PIc%`zl)T`FX67)Rs;LV`&=@K|+$h$K;<) z4auL-TI%l&!*3_~Yr^o`O8)%N{0fslF$~}IKR7ghLaMaiDGa}jFEgRzgj zbx^H=dZXd!Bs2cx*C)UAY>Lc{@O&N74ByT)8{kaHy`^8aaY=l1#1Mb9Z}wI34o^ia z_uG=PTC0zviQ1G_UJb*sMpm2PW>Tkwp2K()135_jl$zL$L#8CfHZT^z>BV zu7yejDeuq0?^!#YM)z6Sr@V%L^Jfcd5y;)`?&ACGjc^#qFJVt?zojtGNzcAcn$gt0a=oW)^3JjsqKrm= zdP-{dC^%Cv&Hyj1&AF(Ph4}d{Ik&H@@7YwVq%1bBwBtzJmq8(WyTf$UAAy_kA%e_~ zr08s((#rC9I!%b!S?1r`b7xsI93vup|5kE~Saup5jT6z}QTpw-z?>5=Ku-MMqDd6d z6u}>&c^@C${o8S0(TvD0oeIXCYO@`YQ{}*%2PXV?SMD>NPIjMC#eF2AX>`ae$H6w| zpcQpesv%ncIZ6eph`;8jF^W5gI{IKUKj!2P-^RSSPpLZ8@a54Qy=?xevH0p4UnjRO z9z?%zo0aKwba!PNA1sK9h{CrG{t8eML0AL=Txa_C;2W;Rt?<1Yy53*{LWcFvGKSWo zFW}PYt7)=y@}$jiV};EzAJF=g&G9Fo2fzTB9Tbtq_qVSaEJlIDzHE~NU6fX&h!Vp= zPo{YTsOMa=-Ln7)O|SSua#IR*_vp@Z;c$FvUSium^glM#q>As*?R&h^U&F4J|)uec?H8Z3aSWgW1-< z-S{mg8sCc=5w)Wu`q39Z4&z$3633!?z{5X^j=vF3s)G1nuo%D^;aBM1aapxkur`cC+ zVr>`yDmf|Wkt1-B3XA;qa<=NkV@#AsXr~)@veiVgS|3)yrx&T(F&L>R9xYC(@JAmajq+{5EPAw}$$DQ^MFfpW1iIvm3k1n@1c$k#xD%{CzTK6=@ zJ^22O!DHf=zg04OAd!o&udyy{#6aXm-oRfKS83$1Wxm)`R-1=#TW$48SPfC;2Th5* z;pVt6x}r+;&hKaAaT$HyG0!2`usbyacq^m?f~>4mK~AkI6MDQ^TjH5NRev`yhIm%PV;(&R zYl}5toZtsfm!Wd63*TXLug6w6_b1^+PRCJ5eCOp=5)U1+wIz%HP$6F{Lbgm;2qE2A zpd}iaNy!C(`I7`cI4QhC8{H_$zmFDgT7Xka%BcZUJ-=W@?C&;8wbf5?-wTTnpP6rK z=0cOn-GEj}vkOesLXSg;!dA2B*2_=`n#)Y#)gXLz#9X%CCN^D4$fx3lUmDf6B2jy* z2XA^pQ1P6=lQHF*I~x7yJZr8p{FzeouE2a4!tl#lVs4OxO80RsF&Yv`|5ZEcDw8HM~Trc|V3)h|E1&;(18m&bUn_0c2gv^HbB3 zvgo+d+=#wzu{{T|Nu>6U6C1tFM73sm-Y(f#dut7E!CkWAh?%3ep2@7%C3h+z zpX*#CZCO%`F-?N2$TsPMhZZT`Fwv)FmMS}}YWNK{fBsEV4n|ba zb#UG0#W;MAqe+T)H5XU0%+qJpYo9+8;XeGe&p39*D?<*PiH>C1uJ;$g1UE;h^=C%S ze#*-!DfSZ)%FpM_PYm`Gy4mF8*7o6X-va(nu25k z1Hid}i;(Wx)|J(4iK4(Fb!l_W;UJG5G8@!4{9mZ;QEJ{Vush|EqAz&TjUvJSwiT5dwOOTj>(6E<831R7`Y9Bkt`xV(gW z>jg-1346m$`F}5ZJa3WdN{u~d3(rA`gTXI63p<@P&~{^B zS6;M7V(;lr_?I)|rr%WKeu5pXG(}gs@l{NzsnB}h5g3aliQ`pMy5*FFGaN2F1Z9X` zm*oGlBv9ACdCE^T12?DXIIgrLfN8jIpb;)6!tVyvj;F4Uy%HNky<)3vopDfQ_ah|a zBus^B*C@D97cdT9g0TSJeR&~c_zsf2{=#IN=^#5GNL;{g!fa<*XK~b+W^~q44OuFS zUjabxjz~9>$=xYh*#@KQjFg9H=j76Jl%}}fn$j~R(~ZCJ<3@QnvlV+vT{P<&Fk1^P zQ9>AEs8YJy_&{-eP0{@4*=efg8l=>`E@jj*QM!#Av_v~dSk}zR4Z<{+x!g>rovFD7 zgfOmM`26)L>^Z|!?-?zX!&27j zJwop8z#=Jjb?6+F9C~Jo@sd4E8;1|R^k-Pxfvx8S1V10LvgyCVE;CH<-)gD1S;`vx zYH~Lh>(}|4{wZzeUf2oU^mUYw?*+^i)dy0@5!_yBd1J$xe)lIcTi;E}G;gH!I8QNB z>R(XFEpw6atTODKfsHJU-^PKFScQUW!dqrJ{g_jk+qhHf_JMA-qPUIgm4xpcE^}+@ z#MYo9v|0vw!-cB7S!Nc=yB1qGxwFv)N;jrKBCrq(W0($R4PubCHdpO{b=UtZ3nZwHX>@vrT%U$5g*F&D!~tSY%V6&ja&H} zVS2Q+?hj~h3$W#sb_% z?P&E?EqgI!OOC|SszP5ivb1wZiStjUcXn!l$qtJ)KhH~b; z8?y51k|KZCRweS#!xl#zqQrsGXSo$aU2L7tk`1u_8p?al&A2DH)zF&p~!Pz(3QM#v6ArJY8LPU43~Xl*4^7U zwBfZ{;xB9%;W~IBULg2Y+ac6&vhD!x*JkGnvs?{Jjk{@y#~>jV;(T)<<_S|B-MWoc zkr?*!TLj!JI{`Pdx{W_Uw6EG;@{_|~QgW(kU)=nJ`@v>{>e-Nt>qPNLnK#ekT{!>X zT{wWnfX@IY07K5<{U?BHD{YQV0B1yuV?H1{GRENsECbkT#WA zXo*{#X5ljcxFvqp7{~p9V!$Q!V;my@%K=*fX8}#^F^;DI9|H~o zvK+7r=-eR2(GO4pNNR|<0J8y=fGZlsIBo~50bHCQXk+m?53mE!s&R~C7~oxi?IPp= z_y!Prag5_;zzcw5fCmy|90h<=0C$rZ#}UA!OJW?2FGZe!0ZF(O3{d~F7)N`+P=FV( z9Pk@p&gC(VQow0IM$;I_c))T%o#YtDlK|XL9Una|9_{I9zvaHdSo%>t9$Au2hf49{ zi3le%=mzwHyi4sXuf~u1BTEj(#vS9=%19)tyor6~81X_GNiv1Rr=OpQARdR{E6#J| z(Czd%U@+eODhfAH#^*+3YH<=|yK_n(#P9a6-6e-so*m&NeoPSz9dch`gY|fy%DGc~ zPJtUD+iw}lG3#6AkVv$L9w&1bKQAG-O}5eMJ8}{W$IWvxTyzKEs=bnu-w5*YLqueo zGtzOJPsIA%w%NwtdrX}MB%uUw(^G_YhI{-xhZFj9v7Bxrxe|e4(WW~!8{i%>yUzuK z^c=H;|D3`T%#I;&Jo{)mre$LnQ}z)hnka3Jbxhd?%8asWzMg^8Ma-?!hdb@l2RQ3@ zYHFVr1!ElsIP2wKXJ1+RaZ^On<}3Tk4IS6zUzU@Jy!soXFgSA};n~gh@$iTbBFm#^ zKp*Df?JJ8{CzDpGePyPzUdNLBRJ=II-ZZ~?PH$FEBlTLW{w#u7j_*Sqt)YnU;>%;b zU)#L}Q16e;obl@k+~&!in6sM>w=5h;0%U@vvaE4Okpl))%JL!tZ)7FrG4kRsfN6VYZ9 zs-KgS!$GV@x5)P>O+4Yp}&*eTp#-T5BeJw&6e_i;_v)xC^sjk(T?fyC1nl$r6mEakC4{MGqm)R^*kO8#ApzmW&*4Q6lmF&*AwWgfA} z1KgWTZ_f$=Oi8J@A!pS-^wMLfvhPHG3rX`(&y}=>E(pCEpgv+Y1a6M+8!7IO_ZH`$ zYO~dR8WPC3XI{_>CSG0HHt%X7E1wr?<+aIc%)B6lcKL}y;Uy$R_umU!(=)HG#Et6- zxIcO6KNLwU3QVu3#-krMpTfi>pKcS2*)bxok(`pJAAk+aaEXOo(z4B$Q_-`LXr$3C z%=9XL?-T7jl3J$Xsa>;uT277_c( zUN?A3qNBDHMsJlo`Iw^{^pZlWR_yuh(?a#X+o#3dfp2E-I7#ENu!3h0bfX5`U9yeZ zf5@)99zv~IC>=unitLi#Y8f{GbycpU5*YQ)kYplsW1`k3g5u^!E5S~Zu?T}nW+ERA z)#bd!;<{5AJ;l+O_dAtTDHNV)D{riqA7b&vP=5_!(iHYG76JRTD~IA=@Ae^x&dBpr zjI~dDVmB=(1uLRza|gzU+qmikB1EV>M@8jNpedjuXf7k4t;J&q-Alw4OiU9*PepwG z?Y^(`Z>2>qo;le{94`;S^G*LI8w~`}m21m3e&rftf}F`j8k=>CdiTYnG;jKrmg^PlO_r<3D{v#(QexoZ>*1 zo~w*=inlQFcGE& zoZ{6?yiE`}#ipY1T*f9QiWI#R#RQIG|6jo=9>w1-gX(8V@p~p7fN@UIu2U>#&i5vV zQk+M|FJM5<2lUgo@`#5u)2Ow18PPO+0{JeTnc6Ge&( zDT-%zQlZcO8JuD~TcG+`QalMEy#GQFJr$`s#UGh-5=xvrrMR4ocVOeMXycsXdn_Fg z(q@V;GI6nKqMc5$fH_;0IH&j^6McfnDc&g>&t*hdQd~n(eDF1;*aRBZvDk_&Q2i_^ zCNS}G9fcZ;r~jbj6HE@JxQ&cwvGFzFTBn*nWO>j^V zPH{RDUll}7@ez@k%eagUi&or2QGEForFb1QtSNS93sgT#idQqSqmDwQcrkOXHaV1{ zLB_|j@$Zf)Ssx@4-*IKC{&8+%;{rtD8=Z0F#a?fKXFtU=M-aDIzt)f z6#syb%QyoT_w_zH#of%gLWy&V>zKG#h;xd|MP@F;&5|NT2SrhbqZqBD;6w^Ku`hETG&z)FG8td?CDpHyHqI$FW9hq?mES#%voo0D8)Q79>vCUwQ)}IF_vB-q|HTR1QSP@CVJ=;Z)480N}N-? zj)^Woevb%=s(aVTthHKbnW^V8G0!w* z*}J-!^P&>x)H^aUOAtBrR-#c|#ttTm)aO$qPkv631P+<0=kqjl3V*wd_R1``?g1tq zfpJdRvUlCe98o{41k}3A$ar%$K2sa#)}7DN-wHcsiqn`l$24x)yFSF6RZ5&w9LU76 zg2*X$7meyNerKXcv52C0WhqjA3H3AaaVw zMP@GJB_@g#hfx%_a}+BM1gE$Gf4dB-pC!fjn7CRHJr#X)iqA6VJ(EKzjwa*BHc*Og zZJblQpQRTmf!R0~znZ#@_{38s`+( zvUCNzR7?MxO#Dy~Jr$O{>(k8nPKk4hk1=tcAaaU1qVZgY-IC%CilU^1QcQt{h{Ep6 zMqIQ-O=l|)o2KpF&XRE{Gn%Vt?7n8;(BAb@lEB{eR2ik*$dtg|H4>bnGIHo*hpQl= z_O7+Wx$An?`iDbNslUs_B0&r~cb&(aeM+2DpUlJ;1d&sJNMwxj7!F6Y?qP~#Q!%C9 z78;y-EwOj)%ofHgv%GgrVd6D93N_Q#XO5_!+`Cqg@dw!Wj{V9ww{98!b{Ur_<2((0 z&ctsx4w$g)UEgQU8No4kJ@c8kQV==C86q>6aWxwjDMk!H6u*2%DfWVfHO0Hw0@csb zfx9vB4jqL`u?=&&nH=iC@nn1|8~=TuGR`UfgTGzI^~yM>XfW|ljsq!L_O9ERlc>bG z6<0HHrx529-w>I(j6N(WQcR#IA~=fsp%N>ik&Em8##(a(6FZXK3kyrSyG(O#McFp zcF?L7pJdK_lS3)?B;!A>p%gQW$)UYIir*~r|4i}YeD1` z&xy=j#u6rq6kQaBo1<9!LvV`Q@wdyM`dLz3&BXOE4r0*W^%dr@=* zIsSGTR6k3K-!icb#yLgH-gOOgzBM_N;%G8{F&m$xjdO}mv2>}BHdB0*i5}C0W$!wa zId3X)PO%pghYKR7*g-U&%h=CEk>Ui3Vh(OBa2plh1*aIx7N~xf6#s&d%lHQ@PSLV= z{f;?*nH);dOUBdL_)FS2r}#EY|0<--6rW?_Leqq0?>dn=o0K@Gcs~=T2qLGLB^u9V zoMoa&aVACa?rKW0F*K}Wu{m3y`dLzRF!2&W4BES%*h9%XOb(^^JQ?rH#*4s}#yQ1O zmae6Y^H}_liJx*D7zLKS>kG{JMR3e@(aXdIg2*Y36^)1cP1vwVaUn(Vc@d>}4K%DN zc4Z4xKTC>jnRva9LQOY~nA6teP>RdQ_`_`co88Jdr}!)Wb{S2TaZYgu6Tjy;kfLSp zx{^5&N}OA95feWV;+*2sA~Tn99ZQN7S5OqjDoXJVXjoGm$`<|~b8iA2RS`vtCL|;Q zBHchlKqfUP&M1gXQ9uM_vYEmt$RI`rWe740n1m@HI3c5gs0^Z_0yc^mMnO9n|k;66>K#g*zT;zfIFqv^E%x;l<{ zotdfD_luI^R5An=?(9w^!#m1=V$B6b1t50Uv6Y^Kqh|RQsoU@zG z>>ie!vl}8<-jF+c*He4N{OvZK77q~oi_E&~`AD7N^A^lhi^oL8RaEg9DqM@tl0o|C zEa0gGpHwjBzUxDbOf?a)cuvSaSS=QB3dm`(5wpiwa$3BOU~NO;>|IMU(%q)h;@^A3 z_azE)77weli1!3RwV2WoDw5RN&B4RO3$bWfL zEVc>AX|V^h=U8%Dyo+FaL*eXQZ)9YYO{c{Q1Zycsiz&w9YXsF|RZ-E4Dz<+bYjGc* zM?C4DV{tRV4?)hu#re#@az-|rh*-Q%$P-CEARwp3G0a}0+2-}KAHl(f!r8mFVq~^W zr^QhQisqKETMEHk}qb5PVQUT5PP77x4}eREw=d#e96- zZzrz^D%^!ynherE$Kv0+#dorTF?-h^8Tp$e{^_Q(kl#k~7ZLTzX>kp+f7fhdaS_24 zhQisqPGV%YO{c{n1oIW7#hyBOxF6$K>?m%dZ5A{i*bxp zFcF!>!9w1R6v`9E-OSY!s-l7ArAwtBHukTp=Gx^6x&fNBN;*VFwVi#1n79Svk^v|)_ zfnbk7g|%3Zkq#yz78eNl( z0%>urI>YCwnW+|+iHZ^_#97Sjk;HWbd@^{-uGzKKnz#h(dYAhTNBqs}7UXlAO#^`hcBs(2C=?kp}L zgY?g__!z-C3dY=b&1U2=6A_Esh5X}{VzG8WPK!;MJ;9RG;!OnW8wzLddI=-FY&tC# zAy`pC&f+n37V(}Ts22B%io2*{)lOqEl=m$V*}JX>7V-WB7j1@exB7u447{QN+yxHh zy^M(LU6)7(_O26|G2dih?>ZHc=m8mU-^*hAc4XN5uKwp;Te2$845)k8+5{UKDrfIn zk&%8joz{~HUZx>QADcC(oMyd~%CwkLRxA$Rt!bs5RA>9m+m@HPc$v9z(cj-Xmh z$%KjlRPpJKSc^a4dBl_cITk-8_yx!T#_V0+WaL8=kp)~%$V-xZctB2zdCY!Kv(4+} zV1lC!g|m0fWMq*|r^QADyD3PEH|pd?yw3@$#dJ|IWw|u^_wBJ3FW`B^lm0mte4283I9mdGJHk}rG5FDu>Ew<9h zi+Cpps>Oz);>BfRF%=a&4fzYTA{nHAj>Tkxr4)?0?|Sro=}5APh{cGIHz)aPi2CHT z_#U&*a7g_IRqP+=`LVWfwNh{YF#{5g^*1?03?me~BvnsofbbOc!bQlB`8p55$}Fxs>M~JqC8bhK!s~@78#^}j>Rzq^AwEv%s?+j#+Zm$ zED-YbFG&Bc2*_!%CbI`wa$2lRu)3je_O6MHw6W>5`1@w@T}(mF;y3Cn;^h)li@QWc zZK_y^3fJN*WRU(j7UvLrUcs3Au8%M>$3(>9ej)#IiCAnHkkev>+0!gJE!HP^r=f86 zu9q<~z^2n;D#0riq{Y9~S;SjRP%V0*qCHi-y(!k>2Y4Rwq<@aZH3Z)SIl!2`>wHGm zn21;`6!Jfx7mN1<gSSs>Q@k zP%)S)zStOR@fSRgc+x+|;->`ngPazf&kRHv`P4+jVw#YbA^GTloEE1sdy8fpi^Ba`vwGGP1&^(|SvS_bEv0b#+oB-T{JYy@4osVv$%s^PaJ;d)H!QxWm%&zUxUO z;qy!g(X#V-*RL4S{$Xj$?6wl}TS&ejAm{9^WcESjF&3X8xY&?8d)IM{yl>NKF^k}% z3esYxPAcA?BB&NSi;CBt6N_b0;hu)BB!l$NS-_((fOHPXi@jT*HwdAz;3Bj+a0~VdV z>syS(+jLrdp5S^-r^Q+744=1PrdpgODh@3aixE_~7Vjm4^v|)_l3>R`g|&DSBP~rt zEG`oA=_Eg~-jdT|0<$wLIW7K%q=;9D5RZZzg7-l7GE&*5)8cysztnVEd_|o_ybjD% ziz`J%5>*TeRPaW~I5J589E({5hXyLF#Wsv&nTS|iFXYSdC;gqga6nFrRhiw@lG9=d zf)^VKXYYFa9Wh_urqkknf`5`(cRe4dvxql@nQC#nsJMbErlG=}#d&0q{y7%&2+mY6 z=DzCyM)FKVEbbHXZS%$A^#M68W-xn%C8x#e1aCGJ&fc{oBb{wJEuMK>e3wy>v-pcT zi+GO_REs}}iiT9N6cw(;x5yy|`ID6M?85wTVX|XK98Vb^4qOtfgLA4my87l6fiY@D6E$+qhh$sDXEWSr@ z2gm`&>|I}E$H*BJ(Y#n}F68w{zBnMK#n+jAShJ1A1q5F*6wcoDQAT#zbXpul zaI%85*i9!d;uUc$b`TY-pAn0fqQX5EuO)-@&#_pRVAVi{J)@pmE9T3Zh*<0^cCOp>|LK@$Q@By=yUMTwyY>cP)iTbb<_c zF9&>ojk9<4KkvE+7(O3o2Gskm8whTvK3I45t}ih1n?{WFnFN0H1o32xJLTAZiOB3>hAs>Ma3;>1&8u`4QEi~Y$U{c{#@ zM}j>A71m;XMmm~^SX?ROvq}E9n|m;E!N} zKw5lDokhGJ%v6i3MMan@Mg%Ij>zPaj>7Qe9D8aD`#(ZX=Gb2MyL@X8v`I}El|0)FJ zw0I4(`&x2ZyqI7WL*eXQ&%Pn%@3iT(c#vS6g0#3-okhGc1l8hpQE@$0JcSB(7GEHP z^v|(4li*whWA3{SXJn>{h{e4^zWWKWcxymTi_Mrl(UQ~R%>)}53TN+Hj*;FrofZ=a zR#K1_kE^qYH;15F+%GCxP{r%7$6DNo=lFYZ@HiG%5?rfb%;#O7U}U9nTW!C5^>31l8g(QE@+2e6%Xo;*YX#4;u<+@7j)$1vZ@) zGYEE4kQT4i$%}ZO5>$(cU7#X&mNfeCYq1v3;yFI=ZE1N%JxK5v$Z65}yz4GT4w{Hq z3=8>HB!4U*r^RQPy-%}^#i<0JG!)L>^&v*q*>qa$N-$eNT5PV97x8{4s20nKip4X< zVhSqUW3e0=q<_vVCJ?+x!I-`4p;x6N2__;I(}lbV$zMj)C#S`Cn0<;v+V?1rZk@m?XHPx2xGIW3lEb}LIxi+{f?zLO1wvv>WGk?UlCtVQWp8V>*IdlAqGZifS(x{fr#u~ACe60 zU0XBbev^T{Yez()*)re@`O?!8$guZa{k`iYtZHip)V*sFf~khe*}Hlxq$xMsbXwm- z@FNw(MF;lIN6(tp@WFjiu*?pW0JCy;)1<$l&2~JWl<}(Am8PWb>^~vl` z5%PDZiOuwYoU>bt*$-NB&TbWgR~vF??^=|RyKOox9$PNHiz`Tr->S2Smq$=7&Jq>3 zQpK~Va4o({2I-%(faeliqF~H@*YS+ZH4(A6K*+zEDi#|B*b6LwCS{X5y7hzq{Xx9EaE*+P%XY7DmqZbJIi7%?!{}XZ(ZlnIC;f9~aSy?7Ku(L!=Uvw^vd2Wk;uaxKBl(zsoEGz$y;ZY~#fJ$_ zFci+-wL2p(*>qZLMzEKHw0Mh7Uc}o^P%Z8f6^~7pMxS~q)?!gINdFv*$B-29{z8Zr zozJ^|&d4zn5sUkT{3epm3CL-28MA-TY-90Bf(s3Wvv(cE$Yz^Pi+u?`q984{)5*h^ zKoC@m2Smlolf+_aRJh0Dc!`2Bd)E^$iusF7L@XW=@-`%2gQ!nVi(8p} zfkWC|&nkj%D;V>cf!T~4u<5iok>Gp5fwk@ip5r_a4mKxgY?g_*o zzQmH#;srd%=btP&E&fXIBz3^z-NAbxyBVoq(`oS?f}d$REiPAQ5ii0_wOCP96rqX- zP~ln}MF#1gW3ey62Llz>Vhcw4nuu7eF61xfNdHm;a$2m&>`Y5ei^&8_8473bdh~he z$gMV=7WWbSgUtHUzfGM*yg|%Vi#0?=6{?to3fJOnGD!a%ixUavD;RU%^?pVsnuu6z zAmp1K6^mB~9kmy;B^YpVw|zKl%QJdEGoKCMZqFtF_iZk5ZSx#0*24y8|qN*XMW&K2HwRW z@Xx*LJBY~M^-amZ-t}>2ylgVCcb$t!w66^4O%U66n*kF-dE0ct#E3NYw@z${dZbP}{qypcT{pWMys$oEN0eU3x4FjjC zOKEm$uOi;ifaViqM+b_Js`N1#Ra_hJu|}BS?MX7dqD0C%ACsN&Z}G=3L$d3g@h|mz+WhI`}l$S#`~XvfZZ5Y+Vv_c3OPJp7DKsj`GKbycufPGjvPaCO+Ygh+0iND zW8Wj<<91Y;J`NHlU@ggXpE9wJ?InUf-o(h5VDNf=pd14?+rin#1T+2$;_PEF8Gnw9 z-f{aR}LXeGUfy10)jI&A_iHTCgfZNAX5_F&#K#IhFol8V zHGsj{$6;pt3c>iJP!QcK<8L1$^Z5@HQ?CB8P~In!h<3lmz!?U3_(%P~Yz9_Y zhDm(BC}5b0#OP1r^xM(m^iCw0BY&8X0Bg&l`2l-ZKTwB(b_m#Yk&x+Ku6>MnJ6J)h z)ujTCl2{Q5s6u|==v?VRvIZjkLG!gXD&jpF(0mV#;`cpf7Yk^%`+=7jD8MKf4IP`! z=d17>U%oCKdk%@wW8(L>QR25F5{%y*ApzElMK806-R$oNnlsQF0oU)%M(CHUpx-)D z0l#Hgac@9X#Si>FNBmx*0s1|xHu3kb0nKq}6u%!cyL>>i&kw9+U^hllzc2B50iOGQ zUqfOvt{eQGA1QwCLxS=9q>um`!lHLrM8Bi`KxYPqY5*d+lJ77&eP?CY_2a|BTo#$3)IcvLwRk&kWocF#HDD64=GSLGUQUdOrUg&wZy`kr)k& z(=<9Ag9JK_59KWv5;&dAqKzz~(^-DtK?X)@pv0z7-UwlgOR#`eQ!}uAh#OuK-*t~S4n%qhNblqg zMGhO`>v)tk(odW?!A>262ugqSB_`ABW^@#Z)o}+i$Juh~xPf3DUk5gKMn?_Nk@K+V z`1R>n9e?3D{{GDpQOC~&FM!VI*rUVa@86hd`|%DlrGILJI$k2UO2G_VFBlySM910T zqN6u*+Zr*~>7S#cGQsK!X5iAy=x8oFhEc~?PsQr^ z70)AHElWflpAq~WbfzDhwG9!kf2@v|nJN8qbj&8WNWlzT1sfe5M91N5=|?-{xc%tO znw6G_I$9IF-_cRu==g!4?Z*|&l>Rw7N)jxmUINO>l&+LJ~O3%j*dwLXDOJ0J4{B$K+*B_FzH8QUw!I(9=Uk^G3ORur@rden>&AV3BIYMJhx2N7Dc=pjJE8$qU#}MF4s(y zrwiDXU|$7u3gi7VORgw+eW-M*95VcsLlxCoG1SuVYPk%->m3#GM#VCMvT~^6(BsmC zD{MJc>?QauN5Zy0(N{4`R9s6HPawlp@jNSzXocy;41!A)WG}{O3vj#Uy07DrU3dfL54ZOe8p8LH6PyqoQoAicZXY+?G>CV}h-WiqgJ{ z)uLkVVCh91GF%m5R&=#A?8Vuc;_zZe#UUL$;>{vxd+`Y~OW1O%SWj>pM?x>k_$mrS zMH#9XjSOD}Uw@~uVy{-97d)Q~Cpc3<_M(qbQQWHF>u*bD=Gk(--qa@8P{E9Eu}koE z>=GUM4@o!vcq~>&5!SS_MAUH8xCz6*5h z6CDZE(Ld0^qjfZEwpb$0%6$aKE69GdH#+_zXlJE9Go^p3gR^oa!5RwY6jt=R@sp?+ z_n>s+rx~^zRB;l|@$Vm48mjn?;2Dsav;2qJ1pNC4u`1Ru^LNcei>P7|!4(ST6jt(8 z91|6%2Z@Rv$Z%KagRF>J8mj0*aG0Z_nNjf@LEDQPnfZV%r-}*$(-q7qtl_ITCn|bR&Ws_ei;yH7hI;b+jaSpQEF$(Q$yFU7?pTvzslaj#PqW70kfDD`}3DG||zE zI=1J>>ez?pI6qq=>ex*18_=14ysmA)_dmtzn9EG*pRQ2q$R+rgf*IwojrZrJs_6J~ zfb^pwa@>AISToNOQAd4(9UUE48y&j|+J2N`ru5Izac-J;OHweSLZG9D=x9qFQRKKf zwzH5fL4YsACy&Tpdx?6tP6q zv4r5eO2mFVu5G~gEywB@#Z2j+qoXgu2Ynso10DB@j&%=6KdK_f)lq{rqb(72TuSg3 zM@KQEV--Q$kKgnQ6X^ftAGF5wBZuHZ1=)`wMn}0=9bK3y z{d06QA=t*(5e{@L5*>5zmwv<{$JG&LO;=0Aew>{oE-!X;9Ma*j{~>7m@d-1fe~ymz z1h;Xd)Qr-Bj#Z+g6m^V5j;mt|Yd+N)(~n^UXDG;i^fEe9Vs+fbOzEGaqZYy2d>v_l zjsnp!wV(9kSYE7-IM%eVMC`|5B;oTdj*c&N_=q=wpzX&-W=j7Y9V-d0RWPG$pkue_ zh^LOMf9M#=noX97I{FYCry%>$*61iCXm!+Oru5IzkxuXi1v5$oIu3}AQTItd4&=u6 z;~1Xf-{-PK)bTaJQ=kJob?neKM7+VVI$md{^v}_;fZ$6CW|RqZ91|VK`-+av$Z^+4 z7Hi(HMAXrq;2=jwBcsD3X!~(BGo^oyjx>Um70kFe&=GeJbUZ*EyK`c79Kdt@J%lBq zj-3R52A%20I&DM5yEj(HVrEMJ939gM&Q>tvl0ZjTbbQ}O`q3OYZa+G+=6Op*9d{7C z$I)?<(Xo%9?MHcLO8*=ki3Ce3nDJMz52`9Ux==^Kqp>=6<2n95*Ah|3I|M%io$1GN zZ39045Ub;HW=j7Y9b*YjQZVCepyN8x@kwv#M;+w2{bu{tI) zQ~KxV7)bD8U&r1+M-$Ppv6u9tI&xecby)MLC8CZi2xd4sN*Ntdg0>%LCWv+EpQGa^ zg1>R3)Qq@bokm1QUFukb99PFG*8HV4=Da?i;2H(lkEz;*h*u+4#}H;p{~R6N3HI}K zd=>QLUeWRTz0!|z$Z>U4XU$Mc#D0_^c)g<|-so6H(Dvidcs4~>kew2+=F}359BbgPy_7C5N!wEk1%>dxg#VSH^DkG zduBk>!w)oIpsBB`3Ur0?8Y3om?CRlXbfoB;+Ee;;?2$lUWvK^t4j4Y49S|4w13xnG zH|Ws?=;NC%AE`2Y|0@g7vQX|`aErt@Sa3uOB=C+On9INt4P;zIiO(S>5}(1(Xs$>+ z*h3_?My_e$7^w%=h4oJc#J&AMLk3#-#24bkdsD=O_%_Lbs*23%7Zg|X0|^XN(f~e@ zK!3lft%&zDar!$6VbNJ4@eWE{F)rr5C$E^-0>i&gYDUp}I*SRugK@NjnV!xc-czY< z==EO82xh*nnK*EGh1i?mKn30RROX15x9*X4RYs1x@^4_xNK3?(U!Gu1M@O=;`6@xX z@_!#IHmgdEw>7S#c zF~L^84(C0U?V{tEZqkqQqhocHWKCyF#D1JX68`?*(eaB8kH7yXX#4RoGo^oyj<*SJ z0W(1NJ(azpBSak!|3k+l*6h|A(~ltp^A%)2dKw)`u{xSDQ~KxVxS3!BU&o|ieH;)S zle$g`ALz-z0~!!rSqMZEdqCG)ouyxu zk>I=sH9~Y1ExWgX7Za?apuCCH3U5z0kKpxhgjjECv-wfUUkJu4$eI64RY$zB z1m%dpAO4Au8z|&yRQLxxZzsPUqGDMmX-5bZu8JyT=wfN9qBy}c1#=2Z2P%#|EWM~_)2ZTXf~UyrU;k+f zaQ)|~s4gllr;4ela8=AD!yaWo%Y*hwx>U)^r1O@ed6RV>uV=veO>bR3&Pe(^hqvI=r+K*PEV|qvF z$BAsKgDW(F3>lV|v+^5~@cu7CShn)MiB++ek;*omD&8abrKWR*zM@UQ_y0I5I*W=V zsu=VS730Y8mNJ;NkwtK*QBf&S(T0&ZHk~T!5^V3NxXP&bh@keOuc(;NL3;7au-IOl z#&dk%p`~Rnejs=R8fS`6`tmqg@9bXQO)o}>Vas6Xy zd8F(m_$|oMjf_e$I--mfu?5tzgy6eczS=@oPKjM_W|5{pdx8`O0AW z(TZTEuY>O+*)>{&vC+1GIxZ!6i=(5M(Xoo4_Twqh(VIGU42jk8C7xq{WNF!tEd)OT zx%7kYBUv5G82dwG5KbLW5`0BL_G6;90r$Th9g9TAS8b&qjZoqCqb(WoEiH94AlTH` z!S|7@j;f6Hw*}Nuf?zdA#~B?S_rD2hKURv4cGR(MaIB6Uc#gjhx3tu;n&1YIOF#HN zlGQPXu^%*M`jJC$p@Qs(tpA)XxH>B5=vXg0K8Q#^ZbF6Ik49t|V`-`5I)b%*9ef|j z>IgH|)fTWHXCD$b7dtv+{Zq#*g4&N=qN6c&EJuZ_V;vbzP@VlC+Y1C=RZ!nYvO1

bQvv11v53QIX)4z7D>RWOc+b*1{H0 z$KeO1A4MGmCBtTAF#X6T_=tk~K9beZnX%_>0d?F#@E%8ptbgj*M^O9GKy*yQ z2WC5Y#|Omfh$lmROUr&7K@#5oMM%zLd>_f`*v(i4TRZnPuwWCAUKXq&;sQu_JI!3pYeth3Q zwjamw9Pj^HTJ~cWCuY>O+Ssis4d(;+C#}x!K938U$sUu2I`!QB@^rwzb9*EWPJ)YzJ zUrWn=>>~I%$fX~AAIa)i#n@jOgK+AYPjHQb?1!v>>Zsx9m@YbgxJ&wR7b@I-bRol& zmXhpd0va7e$|-iM?S$96=XkT{ZmI(N5@Lh@o97EM_p97{kV$^IhK|>Y7xB6 z*TMIZtd4Sw^|l4)G>#k_9H4fT2RO9_s8nki0AnJC-A_!S*I%ru2oRq zN3uF*G4_=$ppG#FpH`6lko8X;VMoUn(XpwS^y6w&xc#V0hGCYL{YWQxgRg_{BUv3O zjJ2}`)N!JpxVgyDA?u$yrV!MA>=qsMsADlITph2I;V{+Nek>sPl7jj^lGQPRu@7tk zbqpYwry%z_K}93A^b#~V$hALUV@IzoBPpiJ)fR3l4ILmkS!!4IS|P}bKY?zJdseq_@tjG=p;;TyTrAo6!?kYP0ZEz<{l{u7@2fAO*hiP1vwdpUmV z#6U|8 z&~Ft(vyC9zR!y?uH-Xu$1DaGn@M|AwL?Qy}_bWa>iFDuZk4@wzIbGX@Dc*9--p#^#H$nt9V->^+m#hB1XO+eKqCgaX@GujG&G+R zq~BX48-B|$`<{TNq96FHxA;v%K>hy8=ZQ?@-$b3b1AfEew=sU}n@%sf3@wZ#VZz8_`9*Fqe%A!!f{*fP8 z$-p*@;rgAeHgSBhfrXlEZ$fAIM|iIRxA`J*3^i_n!p9hD!z7)SeX!1FG(RpdJGqG{81h zH#8p;WSee~Y_utr*_i>&#eU#KPceRxp+TFzR}uL4&jXrY8frJTGW!C^{7L)B53FQh ztp=n`YY>PwkTxaaw@%&wB*3%&*$h_H+Bj|1Fc!WWFfQ~1T^Y#M0R1&L!hRag+O!+EXUQ4F_cn|5owYI#9fcps^1PP~IzNSLF)VW=}Jcpo} z@(+Q?l)ub^Q(7QXzS<8w!GQD!O!P59`x^1CGD1SRlO!9m`!f60fM$>%Xvx5R4ozJ{ zbATYH`*z93bXQ__r-0^4Kaj{kNe#$6U4%fiuXyWzn|RxN&wtO;PT;s-6fl~3dV_&o z;5GghsjZ0D%m~wYn!?OaH4~G^c^XD=hEv|ll&9Eo&eL7Y%(3O1r&n07pFXzBzs&qy}rX*jcA(`;EakNAP^4D{20%+q}cL}!V&)eXekrAV;HQ_)b~ zQU9>Hf`x+u#=U-^I0NZM7!LG8)f4faBS?QQNjCiL?2GN75^2R1YC4an#ZG_M<) z&V~kWb>R@2%gjBRiRo@5U@pPO6vTYh?VpIheY5xL<#r`*4#r{pOJH6YVFNdj=)mw{)4ia~y$B?BEb zfOdrP+9M*{_m=n>T`z5!S5Mj!kIesC8O6xZKA<)$<5(9lTU-Nd&9`a=-~UH2W@T(* zc4|OlR>pD$He(dCGM+Isw;38;8RMC`PBYEQ=ua?PL9;SC5}au>xiT6OY@uMVGImP~ zo~bJ>INv!~8P}mHvNDRZs)L~l<(BaS$B~TddkxV3cPcmHO(ID9KZ96S#%5+;6wvJQ z11lKVf>Dh9Ifmx;faY1r#>yDW?6sOLDbiSWTQ2`nEi4<)87v?XP~!3Q`^vd zLy&43NH)$i6`0*2pt-^i#4}Jr1JdE*51 zpV7Ep=x}BHhU;5onp4u7QVr~JR)4M4(w2FCU?c-iAmDbmw{`^o9$VlkODfRermUD1 zP~GhZu4kZ`2H4?C4b3KkY|UkojSgSvC`RuJXj1&ZK?dR+n!PFl-+vy^d{rAX=x~(T ze_&{TzBl@T=NMS70qO8E382H17$^uTX8M6a42;kKJ3Jf_>F^-@jHXG4*WV%?z5GLaGQ`%2i-{kZhlI@Y!&WBEc{E9vJq60o$)gm0`9s z@B=vQeBmC&8`_wN*M${qbW|!ZU$a?pfRcGcJm&|-G4PBAxG5Z9Xi5h(LnIsXbvLu; z1~i@hKy3yh4o$kD*-4OUu0eLRy0maqEotF__JQe=!UXIHFnm5U7~&LCC9sEqqZr6F z{jQ3{{hwf%%~Ao=i&=3P75=hV;RmKL@Vo|S`e8#;DWDlE*)ZLe*)Ie%ef&Tp2D&*k zHyWDH3DWc}$d1+#)44Up^x<}a>GHw^>@={5*C-ew&JTRcKp_TlO>b9`5$~a3n2$g% zrdP7!PgMA(*Z6@a8F)nlG(FMKR1at-OEyg3$L!Yvng{(rD+c;GH1!P4_XKIWA+n>* z#q`XZ#q^(T1Jmik1gr>|S_MOt@B@dCjQ6(@a82(~k@)_PV3;pKE~ejNMSMV2;0K;% zAc~RD4Vs>AXle#DPe?XQ4`%k;nk^^DQGTEk14A5|I}Obdf;8O{+0o8odfrW9Iv!Pe zhQRI;&r6Y|rJ={)C<8nf5wl4O2H&nQP-xnd=51t{6O9K%sUA*AI-C9N`2aG!s0`AC6@H){1KkaEIFvUF z5uu*qZ^>&gW3(yNi>*ru-l8C2tA~XBha+*pec$rgvO1^7xpY6!u61vyhajjAN8iZj)WnUC9Yn!1@_E2q4wDSw}!^rU$7DKRzb z1~GM@^}kIW0fy^6Lme!?Jq&zoC077$#kAWcnEFZjHfRYg-n znYrGU)6_i#M>ypzP5B=L)zl&}^}_XHsst+jx%@66OIt%7m^$A|+@@%NhwDF&sr}5j z(v+&H_X&Q_k@U2-M!iJ5?p75|Eo9~|nu(s!)D(ixJLM0X@=CTG|BR2A`uI9ART~w? z)F%IY(3mV^4Rt7YxgWTiftnh?DJ_&Y(?3y6#!r0aH-!o9Or?4{(NbD*kt6DR)q?wf z1nnWUgPCP)IX$f=_z`t#OP)97ciD3MYlY(J?6u;lH!6&$S#T-m)d$J4Oj+bSF~SdY zWT2-8xG%g15jpvFlnlHE)nvxFptOM>sK7uiN14tyK4|qCK^P0=mXvIq?~b&PHfX=O zQ#%E*64=AQQIMGv;YRg^?|%qr-bZ$HmzXSz-!cv*;{CPBRVv{Bxo@Q5`^pw406mLo z2u)sq1bkoF>Z1I+9$&AxkhGZ%Ux!6RK;FA$G`t)X{q8pf;&LYb3$UE;zdS& z*9g}=rf3$y6$gKa$G?|lX{q8(g71P{&yRtMxs3d%5z~uY zg3l_*UJNxV$~!7jdPBv|>e7pwQQ`KYF&V~MTB^97;H^eQkw8TmM((lc?8W(J;-s9T zLe@Vo$e$#ry(lLt?xc#9sPI+ruMO6d;ZJJA@uGiiu$18I3c7!7@E8O8Y&J!VBKV|& zY{mUXL`g?PbrDf;m9*k2RG3poN63;>M@_N}GSs2m27aId1L;N#ZkSz;h@3hqNCwtr zVN>aX^b=DjCxduD@ErqZIGUc+K2$;Y{u^fV)UgMeqIE>wQ%lOYq{g;2yB^%!r#qOHFwj zTh14WnatdwnOH1UFYjW(lZGXf+s+TvV4#H~ zq_PpRl_1-dF4<^&5oWgvXiE5jLrtU|2@cH{Y9r!363~1DeIoxIW}l)?E}kuZ;CTjC zYJe}pD-e-&_dI?^`-;iq>0+`oGXHDB?j^&DfZ9x0GX{ETfOmOrF$(t+l*5+WuS(3k z-%#j;r4YQ_DgRS_MZAT!oD=r#9XvnSa!%L|f_p%M`R0UWi3}Fc}tP$PS|GuKzfY~hm=8#nk$0Y z3@kF#|8eb>&5RAERA;Xz!BGmbKdrSb5$_~HnP=`ZZ(*kNlczA%P>`lZ>LlUc zKeei8>Rx89u;pBmEeYP|l-D)o2M9_}@GpOhsV6QIQ)e3f+f-4q+-|4?Q%8`D&%Yu} zQ~z=8_5m}}OsUS)8iF5d8BHxwFA=YWRYg!*9@bZTf`$sAUfB()* zyZqKNGtQRN)MA3~IOQ`JQ@{KUKKHQH!A*VsvB9JRu=b|YRo_@4gBY|TUBQC zH>GN-1i@;KsxvAE@Bb0BroO*TdUCBTr>R{84}t{igFSiMl=rgb_y>^0R2iBYjS6GR zT)RyoOM$Y8DRb>Mh=CCr_^)fXR!o>}D%I1i1lu~Isu@wc2wG1UF|(5`r>DXUFlbGyUNlb2R;GX660^~zr_ZPU5yP9RvxQE^-c z$G`tdP}U6ofuufA(Sj;ouOF*oBc9{+5UR+7AhEY*2P?5?=rcI}c)AhtjDM!W6I(WpJ zPEdPMRaD$Y6)&Q~Rk4N)zf+rB22sT#f-4ln87fdQiILqlohpV9%vX@T=xJ0WIVx(1 ziYRU=bn>d8!dJnIu^Y+ofTiW1epeuvZd3$+`W??eOPfs*N9sx|k{l6V>DUo(B0+6M zLlIGvBIcn&MTGLU1CevVOJq4fc{~zd^8+&&ct!&}7d(xKoC{`129Aya%#eQir;%(w z(2jwD8en@G8fCu_L|H0Xni_gsf&e^?B>erqg51&W zE(?bCWOzGZ;M)ivFgejO@NI-O1m6QWPXXpjn(q}MsiYZZX+n96fylVyS+!29Hk0iJ-Y98L93>%1Q+1 z&Xc!F3vLLc{0({ue9gem9EP8J{0V_*Hs_xT<{`n(zZ5~$ODsC3MPlqVKQM!Vr3hgE ztp7AosZ4K#&L7Ty9G??8PAVX|8!KK6sQUVW#td}VK%_rtY8aX?2$H6@WWz=pvwH+I zmHfcj+G6}-1TY#t-hG(Qi!zZPhdp@-{EijBci^{9-U=ibzx#lQ-?c1?3)tWD0}B~= z3xm6UC#y~Ln-vr~Ln>tbvtkV@%=-5O?HCxS0s3udXnrB6>tC|rH=WrJ1~k|Eff5W< zLqPqWy@mdnNWY0lj7}H7?aPYaw~>(FJ@~+8p?|TniG``b2;cdEEl?Zb7ak=xiZ#c?=B8DZD z`;#Adj)8T`#7@Zi=k2vx%w{Ln`i6%v`yI`ePVjOcE$Ym`P=}_ep*c!WIx$R4-@wd~ zh60!S0Lv4s2~ayxjL)S%{6Ih697QKi*5vVTE7Jx1j-d2|XSD?Xqj#&M9kp>gyOXyN z8UJj@a#o$tD)C|3F^ho}8elsf(Jn;1Y8*JG9le>Y{pM?kX~$g*^l@lzH8kH6wC$+E z%m<7TZAWo}=}vi}4vzc3ww$Zuz|GQ=3bvf9V<*ASKw?(M7HP*-WuzUCA!yoBBwo)A z^H}hc7Dzh|`Kx0h1M@Y&c06Q+lr=)Ii}qJXduD6D*^cM^Ks^RJI5gD_&Bp|#9V2BS zgqhjZD8VKL;Mtpa{5$13-`I`Hd||PooJ-^rW|pw!Tq5fUZUb}764@s`DN|Z{G8!4C zC(D4y`EnAgc59XNWTqb&#K2??uqWNL6}aDS)P!<-Nj46Q+nGHzplR+0u4JI0Lz8A` z-XX}9Rzb4Sg2Ecog2n+&ydU_Efiob}Bj;m2KZNK0pTg`b1qK`_YZ*Ak04L`?Kd_L2 zr5fNx>WhfT3;9C)j2@9T9SP%&p`(d$aXE#_p~-o`FQn(KN|N`3ZbNx+qe(aaEl8Pb z#?r0kEd-?>oCasxeJLaN*>vvmk_c8+kpJw_No`BSn@>lY ze3;DO7CguIMS=$dBDsfvuMu3QAkLh3$t2Aq_>;|~fINcFDM$gsjDQM`fYiPauqz}2 zYNCS6C_&qB2N}j$TJE@SAXrDioWkP4jysK!9yXmSF5Dm{$~!8K>fjOYDT3ODilU+^ zRlI@Xwz19)(c#&*gF{nL3&wbh#uE3@aFOm3HKMX-RFH&OUuX@HmPv z*;KPt+Y|8y8Z}rh4Ma(0D#=5IKU-WbPm|$eWiZQS0>Nntn&mQp;2N7r0ht5`I|7;* z0f!0dM70tD3onvZB%;DyE@@7tIsy#y9}IB{sWSE+299DNT?72K zwGC<;{)17rNCD2O&$D2^7KqQ6{lHWPp45PRD)b2iqGLtZ_a#JEb0nCqJ?szPfkn>- zZ1?zq`V4f8)pe!O^&xS(zs?`NI18EtOlADQ@oU8TSq{!Coxh+aI$d<#MO~|q@Xy0* zJ&XR-A}nmKtEB{AS1{)An!(5yHl6EgB*7;XUW=1T$?W1)RQGoR@M0$ol6#bvi*kyjF^U+bG~gRJe!N8Z!J& zS@LqA_df`(P%!53n#9O%n@$x&2<9uuHuN+qk{lK5MMbojw4n+r%tC1mU2=F`OP2c# zbtt!%A1KX0MWY7S)D;ks!>hDpV2K~AE^Vk6l%ByT68M6F;~dRD|EVCH|Cr4MwiDUW zU81f6)lEdzKbOl4GQF=%rcWaYKB{2Ma_PayGMmmm-9@mEqvlql=39a~Tl+=HtQ6_f z>8nhi^u;@w42>);m&+eWig@P`!U1lU%QpnCvY8aHo!}3e$%$I4U5I%1I0BA|fDi>d z90=fNS|*X9Kp9LMh7cU1V9avq#K>ZsP8E#_c6U_NFe+re%-J~cKB$#eq`V%20=S`Ha-jzSuQ`~r>u&N%qS6*e&7d|F|bKX*`L|k z7W{pMQ5MQwfb3|Rs5_h_>e`{oSuTA0wg;J(DHHm{zm&U+U>}G6Rzv?SL79Kv*ssFO zF1DO+-xepBrXc1S|45;T=tmJBRtsA6l7F4@B`|#dmthIz&hP^T4154SwkT!z2z;p| zb-%=j_FH#{=-1-}U)FTCWxRF+pEr!veLpi7YbMN4cRPaJ6{PN3qWjgN(!7fiG-rav zzP!s=(9N*m?wTJ+VW6~SqU00l;xfZzYpcH{SV(d`@OP=wh&POxw$-bd`I}}M$+GQ$$Ix+4QUH6X`^ZFPM{@=S^9zLH==1=(tyZ(P*9 zL(u9zcLmqKEoZBLA$S}ldMYR$Z6&(zpzak28r}PV$WgSG1;?~NUhCiU0}B~gssX+J zK}3$Cg_40)F@_migVJ0-(3^p=8lZ=WwmjmUCI}Cq+)T;FZle~nrGJntfeb%zDFf9s zAiIrp1freA&#(lUhp#UG_i=X+7`~s~uxV32A$S;k7=nV{(_TirenyZUcT1W1y=I!l z@C3mH3UV>@6%oG`5fL2`G)*;&p%)A08L;GX{sOiwTkp2 zQ3Ld_Po?AUp9%7~`&Q1SIPTUnTl(i8cU%3y5(Zw;fE;%#5s1!#pN{x`102cu9^B-g zn$|ilcSkn<34^NHp$TV-Wfz~;CcbIzj^V!@PTZCh*A#c2%XMt;-Kh(8!fkQc_huEQ zmk77_ZVsT<(~=sb55I9*)A*+C8(bJ(KD41te9pEacZD`2HIKB#zxRo2(wr^f!&O2X z3gky7%(X*d>D_< zCMSirPDu){nVJ;tnvfihi!YLvkd|09DLL)`^nWSEikC>esAS0ShSN%w#_wfjRm@Kc zkDQqlzGg{M_(MDfzmOE(wJa&zcX?9y93G3;Cxst>Hz_=ELsEFidr9G51xev48D|=kb{ED|&v|?@#gg z;__b+{y(93Vp0C`|Ns9bQDXA{zR;9n`;RAuOPs{I`7*qC`RHbFZ*r*R}1*Ccw;y z8!`FoG5J|PT~PuPoRx;xj>D;0De|Le)`6fMOLRWPw^vSl{{c$yi~ko6~sc~w5I5^ z-pyDS6VquehH3pN@bR@ym%oz!z9e?Yi^$L{cE~~`;r$gut3wvoA@?(O5M}5^-lsZb z2@H887&0qn$Z2-SU?t5a%Wh?nEumRS;~1E1=!2D%8my!)%;{ru>`Dp+E9n;Ij1LAd zE2$y_wKZT?QnD_fi1#Y-n3Z(6tTdx20{^j+cF9UQpere?D=Af1(y^c&U3A&X>Ufr9 z%bC0&VJbF?m!Ljd(Rvwon~OOGe}f$|Jo zA%O#>p`+%f@&`i$v8J=KP6o1{$NUS=uNO-LPI82D_^_y>Lp87CLP?R_4b0unTpTIo zom|yCLvuTM*~t8-;t8yNTdQ5g_cJg=0#P)6;=8>BKdGKKUA23B1q1_n<;M&7yjPoyX~^3k$fYe)dm>vstiP z3&iOzKQNAg$r`|!bmp4~$XPNR%2_5k7_AF)hH1`>!SOiLcD-fv!7z6(KnSBHj{#QV*vQlA{6d9|Ovoj#b#nyYo_cNe;`<)5=I! zUw{BK&)R|B&|^{L=FhB(%krF24dX(&%jEgwFrMd6`3b*=);6k}Q1E_}jSU+9M=O)8qbcu9UukdG|``6}<5gbEPy^m!Y{*g7bp>>D-p}>~T-Gn!CIz-xi6R zxJ5Q`o3Bky4&RJNT|64%(F~6^cyz?0J08`GC5Lasqc$EHcnmF(93F+o1Ux3=(Ya)D zcmN)c;PE&fyYOEoZjq*T${(Aanu+)SOG&>+p~2`k_6TX2opK9?m+(&GhCYy$U>xLpo`OXpi>4-@O~dB}4p-x^A#A?c{Cm1>Ch(i^70Z;PqK)Uqz&C#JUk_;n3u@_!ZLdBEJ&9LE1S;<4Nw&A^=&_&G1t(+r%@y$~elh%Hbg5 zXoDqi4rixKuP6eUykGIROm72Otedma89Rja6R_0}*mg_6vRxjqy@deTdR!uGO_Z&o zjO)#hVLJ+!zP3kW*!I$X4Xv!o%3d**Zw8gkV=CvlmDO2!RZQimpt594<-HQnew@Pk zp@Ws@90%8t?MuXheq0-{6-dDLqjbQw)UdTF>#z;HMmnD#!}bl{UGe)N>p5WCfH&h)k;$_4dYl88*x$|j7hM65q9JfCvQw8xJle;79}bQr-mt=1iw6#Xt$3H?(2dAq`1D`mz=Y!tg&DwW&3UBxGh7Cv zL+hcxah~H$4+WIa{g*;|#)!XTXe%20_(ER$-JmF;xIAkwc?mn)GUc zp&lkusuG(5EN4q~e-z{=$BY8tU<@FBvAAM%H2G&omf!;mF&Vvi7BWo%$8ja~r{Ge* z|MCha{Z9!^{uIBW|F|IC&KH9jBhi0XIG z6$Riodzz^_j?Wnc%|_Az0DjVaCTUlIwIx;*->=2OTEQ$WC19+ADaq-dnSWI0Z^gO` zt=wEC=SQCS=cxCy@GkG+wahsS?>Y3oa1PfV=As3UCqoWkR7_(5RLAg#J9AN0l3!c% z;CNKFZFW&2Ej~sifQE(cvIS73^gn>xWUKdK&4QfoQ$ST>V<`7WsPi2)y2y95(Mznk zBj*6F@8gHp&D$~j7EaU?BJ<~lC`RR0{gW!cG8vVb^1WjZ(uy9yxLGnhC2vP4_ieng z;a0)$G*E>zv8!tyV;JqdH3qdV+dXZAH`!S9`EDvJFqi&G$@lCo`%O3JkI0M zDlIuY50AZgl)PATFT>B<@wf+%QFwfU#}9ZM!y~?Ia<~{C)$nM6M>jks;IRsijd*;D z$B%fN#G~jX%GVY@C*tu09&h1s3=bJAzFcy+H6A_icnFWtc;w+R8;_-UMDdWljr=E@ zo0R;?-1CoYm@vGDqgbaNIe`VA4N_2*%Fz+M2O5JGJ-H{jv3> zr8YvDGn&Om+Iv`d5QQb(jpJl?l8q~xiW`?z!JdCC)5`)sbjx+8yy4Y8)4LK#1bfMW zH(}SfEo)Y#ur~}xtAFjaB_)4f&Zs5{!||S-bFql0U$#nt4jaJmI=B3w!+klwB;L zpy$-=jZKVZ9Gw3H&9yeFW>>PzFz2^p{=oEbW+z;cEliY!wX`TC6n64@mbh>sKVPnA zvH#d0`{(?rKVgWejcVk#!bbj+{0q~H)Jqy(0iH`wYaCy%DgKZsXHyZpmxb$^pK2By zE_!J}&i44cf>2%|>Ly-@$B92Q^KqSL3@) zydZrVH!J7;zj7`-Frp>;b6r*Trz(E%1~13k!`0{aM{W%feb+$&RGe|A9D|d&iPo1) zycg81GOla!7=p)SJnqEfO*|Ij@f{vVKLvZ7tZsm_RQ1uM)#lY1c z%!P7ak%RNSWB+?DPbK`*Wv~AiPH4XAWoP_t8ce&dF9jD0VfO==%RdIg8xLN(@%*L# zJU>^`X)Bw*?SFWNuKq7GjsAD1%)Wk@8$SP^>@T>r@GpSoV-@2jQ@PBB*hxN!9j|{8 zlq0zAbGB6EcHw_vx1h&QikJv@%Jl9&FAblHw)^WV2WtrUt;8ECjwOcI8XPze`>&8i zIRkRG@-|=z=1<~g3-4^Oud2dg)sn;O zWUq4gx^>cp!`0Tw#`AFHb$Ho`JABDHT-U}O4zI%|61TF};Zz=no1i_Yu5Kbi`2I@#up`e>{fZF%pmQc$ofSUyvBu5T8@95dTQ@ zh;-cPOTUt@R^{+x!l?LRGaAJY56x&)WHX)&#n0>_`5QOqUnq|7-{2{OpK(Qs_-B5e zvE}!8%YV_}g_XlK3-Wekr{#A@PpSV&b|^nJFB@P(Tn`<)xnT3EcyqEtk-vYfd7o*Uqpu1%g%`3bz;s%E z;?{=1d-j+bzbgLyM$-H^`B2Wy_)w0-XQYkKPkcHBgG&m2O~tQM8zrua*Nw1mFz3iX z^I?J}=~<#aTaMPTGwx^=xv)At7pz&GL7p`@S}DxqgQw`QoT2w4*l9rF4WYLt4@Gop zQvMwc1>X!j|FNI&{~6C@BXA=0ND+J_p;fznq0i&<8>dWdl;WL~v!b6f0XglaG)mRy zCqfSuIT0G)=%W0_&|68*pMK_3nt7RK<`Ahh2u$-nex@N+A3y>Q( zyx4^I7RoQ#<+KKGkzBK_Ny_E%`IAK8`G(i@PrzFXxUThv{8{KrB1N5^Xyf1>a?gIi zwj$V(4wPxesVgO88Y+k3h=_l3g)HzbCtuJ@w3$u{4@*6{|U~N zngtWjyo8AzRyF@Z-PElqnbpGCB^!cn=<)o-51U|Cr8*VxW*ILr;}RTi!@!)kOc*g% zKuSGtK*P`k;3!*0~1_<;3!zKX(ghe)yAfjY2K!BjAfshHxsv?Y$<3X6PNQ8mOJ_I?6 zii(QsK}E&o1VkkXOM<9~xS_ZJD)u<2xEu&;{`-EqdwLQAukUib*Z+MVdZ(Vc>#4P? zs;jE2YK|Ph49lpZd5K@a=Fdx1X)I6t69%`vL}2A0)8&67@iYkTd$EB)m2*IqlzRaF zz@C6d-7h;Ly?9Jwe06GKZ{#kbI5)9h@ub87>TkOu*1y~B4lb-oswRks!ADOPh~=VmFkp!=Tf+1yKdONifQTUwMZq?w_Y;u zA7Nb=>9OL2MB{Z;Ic?^t}1n{gFF8k5+=x4mtCg{LJ-Wflsg zSM`iyOaF?1AMwlpw;UW8`0JhSQum_k#&gE3c-6MN{hpa6d5LSGP*iXUOXwcF8{>npZyx{ZsqMJu}Nwtiyk0b9v7Jl7+L zh|76sd%tew_cB%jsnyvL*|F}^>pR-BBiuV*q=El#aIbz>6Wu={FYbhwI&wNPNbOt0 zcj=(8_JC5bqO#f9$=Us%^%`q$Kh!aJXxyB*`O$;N+U7+2cGgbW=C|}3YA=cF2hF%C zMQE_05ZHeGOVg1Yu8+lT>W=t=4z9E{-h_(*B1+Sb2ODp~Vr;}_TD=S15z2_I>GDdQ zjSdDSidTl&e)0VZDT!yZb0}qZT6|~HbA}*VzRBhwmNX@AEV^G9kE3vWDvsq$f*pR^ z`m4G!2H(yIC2tHk-+Q%zG@57Pu#@ zjkegIrDZg*89|g^V)B>j{8Qw6nS2|aFCpK@ijVB zvH@F5&qkf^Nq&&2|AfxB0Pj1~T*HJdi7n%IAKed9moqU4Hf1~qb z$#*vSDxJTQJeQ;O^t`O|oyhkz`3H5r5&1}yU##=LA(!P=lb@mUUyxT!{(7CSB;VZR z2kQLu}qt5?>yv^iebbcXuobhWtHl;XD-H+$}Jeu)R+ndjU`Fj)ffwkAu>P4JLt4BdP`CNJFz_NeJ{CTL}JGJV)0Y>F7R$tW|OxmCN zO`e5(bpg4wiZ4p=CG~lZPQcFz>~YM8=e!Am$(AkPH=xiSPZxK!P-0UFz_nyE^rVy0W|L$Yi|ui z*ete4Ta?XawME-(wisKizqbFSSlI+02Tvp0uj*`OZ0#7ALcVrR{fR3w@sXHix;n!h#X7?rfgF-7UaUlp?@6K z>ikmXHt&{8P__}C%QMt=!dRWdp_LQs{(D=z@mgi5kvct?bTfAk&>8A=n(wTtf)$J* zsW#KU;%!LYWF?}sH(oxqV3o2F5ox}fj!L0bdwyRHkk`lt8f84XnxPKByAAKkz=zN; zDJTM$Y?~9i!4lXk0aW-Z#v!pCf0a6jG0fATU06_FgBBCGpPp7Q1xi`v*-BTYe^u2= z*!+1_t6-#$%P108b+LSLa|CC(GSuI@NsPfskXs9H3u08mF;9m2Y@+5kTq!HyFv)iy z)>n~@6s-A0QV{G}Tb3u9i*(I)$R(zwueeIBL!swEl2DbgOx@B~9qFrWtSmo-?`o5$ zPV8SXWg84W3OxS}NwM}L!0jlD?J%E%c>?CuFw=p%f$l&O5Mj~&;pxghR*x9`!}4Yf z4m!vamrp5nr7V^OKL08_eMhiK=`%U1eN8y6c^z6{NBU=P6OXs$Es6@rFI>XKep=t27z ziKarg2n_!W9a+D?)-k_t18aFrD<#hl1z$~9C2yGsR;BO@_{U06!cc6-dg~47;pH^| zZx!EL92ZBTj@fgPyl=5_&&+9+eUVF@7nJf&s>L_Cz%@|HzQm>898O+N5;v%h2`3Xk zGPG=)OHG19c^|}uUZHf_0&&w{k1zDR?h_b9 zqgjB%kL;L0Pa3vB7Z~nC2syyvhx z0Hu%dC%a|feHyI;J7}~G@Y7ays{n@}?ivIs!;fr7;5qUg0#DFr6Ie~>FYza^0)Kq! zMNWpO6wZJT)XYDTTV#3_Grb=U!8_bs^9fFOGxp$Q_ct_L?r&+_;66Yj&3%x@(CjWj zXLh&XmDxRlmuGhkUgkc5FoQj_6NBBddj&gZcMo>Vz9o23_N~Fz+0%n9vTqAE%bpQz zoINublkLVV_I7qvE;TtIHD?n%V;>L3Nq%1q{yyg|!m z8nT&+Y^ETa$;jp)?D`#mJoZB#dn1oMkjF&iF#&muM;_ZEj}GL~jy%R?-yTG_#RWMm zC`fLvS0%SD^;r?D&LgOup5e})Xe2vN(zx9DD~-O+Gc>w8f5gpNo&@I)G}<|TrqROr zJ&h*LpJ-q+NHF?M79e(WJb)RUvqlT&XDyslS~x)(3C_b>I6*C(Ls~fMx#3hfPiX#r z(fpmJ(aw29^Y@+RFQECucUgIWcxn%9^m{mSe?d$uZuy?m|npz1a!r^S>33 zWNyM3HbP0Q^nciECp2O=-35EFdFDG~Vdfp%bQzsT?3kQiyV)w5yP|d$`{~o~<6qW| zui@gW_|;dlL@9h2?^)E2X~WTLB0Uj-t#B;I-F*>(O_VTji3q$*2{+h91fHQJP393w zxW+9au#!>-N(GcSvk&A_lD0F465B_>O^Hn;FohCZNnkuBHk80Ml-O1RDU=RS8c69N zrQVeAQjG|7ri9T*M4&CD9+a9**DQA)ii9j4Ts(pQvjp|qFM zt(10Bnoem8rQ0aIL1_l17bwl7w3d>a(u0(4r?i~X97@Y5&84(}5{EW{J1ON*noemR zC7GVjr<6`^0i}_Y7E(&4bT_5`lonCq0#JD_rH+*Dq2!>nm{Ma(ODIK9@=`j6ez|-p zr6ZJW=iY>0vjk5QF@M2F(r;9%S$NTN6AmAh|+RO ziz$^-@=#hqDVx&0l%`QyNy$a&AC$P0sho49z%WYpQ{q}g`6^0%DY177BvN{i(nXXW zqGYGU&N>i7>0wHzQ3c8$q4YbYM=7b4*wqETq4XG~PbfW3X%{8-{DBHeYbljcdXmz5 zN>5RGiqg}RR#SS0(h5q?Qp%&m*GS-QO8=xZo6)zm<-GW?gAD8%Yb5FCGZgNB=92e zFQ6QF7x)HUVp|;#eFrqUe}d7zj9lRNz`*9B?Br4VVtNfmy(8zymA*ase-p2NVIzftA22U^TD? zXqFsnZwqt=dIJN26yO?QJTL`t19N~}pa572JOVreybNptwgT@1p8*GepMXDsvw+PR zYi|L>1Kogrz*Rsha6K>qxCNL6%mchY39t%y99Rdu2D}Ya0egTif$xD|fEplj2+9s< z4I}_PfdRk}APpD?Oaf*Avw^#TJYWT|8h8p=50n8Fz%Jku;2S^%eg{qiF;~ah?Z8Dq zBG4BY1PlYl04`t}kPUc%#Xu2oAFu{^4%h%}2DSscfqlRs;0SOGh)BVj6yN|l0zH8K zKr%2ANC$2LZU)@I-+_6+JwP7d2krwN2G#=q1YQRI1-uPx19kx)0i4hJy`jnCBn9Ix zP8KsDIHS-LDTQeeG5-2wJaQE&li8VH%P(wgqRc}WCki+n#t`q@-V>UzB0H3ljBT7{KlEwHwIVpMJWHAeTd%oCU?N#_L$s3 zgL^x~Wp@#eFPcO*As#V_9zqnGL{}jem_%lb9<+q)B*$`0evh_H%?dU=s6$_|PQg z3$eu{76|c@Nh}m%jY-@sgx@3<330bcy!Q2zQgoagHh6NR#7iQ@H*n$9bo49ZiliP~jSz+`R^O?9)&ga8@dMhfI$1Q{i@- z9A~P+Z8ka1S%rJfouxvg+6ljAH`xM3#8`L1w%O^!2P z;Vv>c&VhxCF*(kLh5P-JP#JJuEZjFH#|fWsyG)LAW#P(9Zmq#RWpYm%+zON9Oj`Wj zZE~Db3pc~$o-w#_CdYZU=nXNs=M1i=$#L#2daX^4Q&{05O^)+%;ePozR0f=x3-_hT zagHurmC13oF5GJ-$9cPOkDDB4@WPdt+$#n*&*V6(7rk3dj`Mrrt~WVO%7weiP_$#D%q zxB`>oYJzZcOpfac!c8$bt}qC9jmdGXLAZe?S7C6SO|H`5nwcEeBgF5?k3wa@#R=gK zn;cgvgxhO!T(%Hyi^*{fL%0`Ajtd&XJ!o=V-4Je>$#JPexI0ab>mI`0WO7^t5pJZ( zaYaP9{wBxe5#c(T9M?*OYix2K8r-oxp)%kqis&6OIWDOPw>!jTj}zh@lgJQay-AE0 z;!%^x6r#i=_<_=Q(65@bK3=`r*lNc_<7L!O7;w6(9A;cP!7%7C`Bt{8ww@IW4al1*37Gi=) z^cP~3NhAp|&?E*3(bXg_7s6o@1BHk(i7SNoeRnAPgM>I}5?2bb$0V*2qTD0~3-Pi^ zBn$DFNjQaAZW2R;SY#4c3z2OSDMH+65<`VZGl@%txFSU0^RK`5L(HWAPsfT8vGyl` z4L~KZ7x)hN6Nny(br_&0Fa#I}%mD5NRsc@{Wk5Bs4+sD!fhMD{Mg;T$h5$DJ(}25x zWx#{L3&0j&FK`$*2{cQ?`W27_qym}13}8M`1UwA90BiyF0*8TC2Mh$R0VV>o zfO~)yz!Sg*pc2>%d|SPCySJ8Au1F0}FuVz*^u9U?=bua1^lKfHg#* zH;@912i!m|uo8F%*aW-}902|VZ0UH`KtCWAm;lTIyud189q=}=2lyVS0pi9Y|G)qs z4VVPX2J(Q_z0n35MfR}-CU=MH*_#KEs`*r|bfq}p%U;=PEa5vxw)&MU7TL67)ml#Wr zaEZy>FV6)mZ0$ZkV|gN%4yvt*k&W%q*#2&{G-_&vY1{kpC!fNM`|R_5U%<5Yi|wnx zbVS9qu)%D5X-uDHFyjw>_x%qr6Au5V24E((Y~8jM%pM&b9ooR`{pJ3zzJ}TFn{N*s zggKx~w;o+#4(grQt2@l(TW+0x8_bj$Gu^ktOucF1q{%STrc9l7Gt4n_=6Z5qrq7$d zU?I%$cQ4Al2j+yuOT0^APFm*6%ZE9wps=VI=JXQ(@=}=Y75A?E2h3Ub-M{Jqn6n>z zX!XM|J&!!P<}sKH9)Du(lQ46idit4XtMd}QD23<#x$b$0oc+D<;`)~$a^m*#D;r*g z$T`<*ufOpxh@38!ZQS%GM9y?JzxDPz5IITNQeIIBk@JhK+qUn3xE5kn_0D%8a#X+T zz4t$W$N~B85BGcokz?s`8RIh{av1E&ns6gTj$q@PT-F>S2cHR@h7E_vabxO;k)t41 zKujCmKM5j7YXdGHcm+fbv<6*y)nJGmBPBbBTn&-Ko0OrKVE2aAnm$%NQ;knR+H=R= z7;L9sdq20+vyNz+(7CZT&R@ulJKBc!UT`utw0D49nr6p__CAo_q}98G8`>+mp*>E0 zf}t0Ne_#UbP>Vq005`rd;rt2?b;zDWiD+r{_XqR0>){U8<(9S}?_SRWk-0UeXy{-jf?w8( zhbGsSahrkqsvB)eDIS_4YWx;gG&HrgjN4Pxd~rjOPc0srX56i%@BgxTH~ofGW7E`d zs45%1+EKg4IXS^o3YjbH#<5M2-8d02oXujPy-3Ns2u{wyk>d3# z7)mLhMxQI_zovsy$TdKC-K2TlK(8yP-C4=wQeYn^EOOl!r%c?7rYY`&(TAVCohKy) zoT$U~sOG9Nn7v5!k}l3YFqBd*4D{jd9AC|t`G14vpHn5RM_@QFX$#F1&m8BcjUim@ z{20bP)BAh@pFFcS1nOg^iFzuGK3~C^d^hlJjh{xo7lQHCTs6NL_&klzg@F-~chet; zKf+`8Zera~a$>8ztY8uNz;I?V`c(inZ3A3Gg64w1A;$~KA@%>75Q?NB!Avhxb3XWM zLwc47=+(26`>&<#|1e+7CG#r(t*lcf|6xuHf96BS^EkJX;2U`;+y54zg?ikzmt0y(w;w#_!9i8{)6%0A~Ud<0CGQQSjIL8R|NrT`)HXaKsvu+vWOzdtrZPF`gt z)mEh@Hj7Bw-(UjTG?;*aOn|}!%tHc7#w0dFA{y@;RYd8OZO$7=N82KsUJzl)n0R9- z4P&*^aNw3$oYCFCvcc#Z&VOGs|7#`xlKq4^ZzI}xBlp&@XeT3H-;p@|h?kO8F+*vI z(G}F~d;mxQk0_p}2pUeq;7}UoA`N)CI#2{nb~gEG6pl1He5X?8yoTTti*3bjt5y!S zItjw&>rhe1coEuGY@&=&3i$%UXWHc3w2!(a-U%MX&O332!a*t93C+05gV?-W5w){4 zu5`khrgqu81Kw8RwO?l!WxgWMRkq)y*aNX}(v)}Z(3F2fH91##=V-fK_U>rP-Y+#} zF1Vem?A;1wu19LhgMQVNOB+-k^f{EhA?2f|sO*`3Zm36tc6;z9Ey#7r&8dagRQ)1Y zR|Bm9n%3GUnoX@WlKHD05}w(8w8n z>xi^&O>z7eRV1+@xq@U)fb=z_OU5TcZ**kIR&gxbn$x^ejlx-ks7mTgpiVd;VV&#g zE8l^u!+bCj)FdS8U5yoQIP-Ye-JU0~|t8yf>- zE4R6drOQHY9K{9Fp|GVCOhWfiEWHg}5{g$$pk4Z);sOpt)rVT+RgbHTDsmRP)XjKz zYnHoQ>PyjvW%`UO<=l#AqD%d!909Q6aK-gFpWM<=8S0lVu~&gaP%sTa`)ZW=J^3pZ zwvvL`uouPn3%oFujS;2Rlu~Pczx(3s#c~QF3Fk88FsQT>;}25I{8^blM`?OZRgA@V zGIjo?O4+h?Gi4A{1f$Ag3C34hY{52FmXu(NDoaAJX_dtuv{hLmgHcr$tMBAUV{o>t zhg9~lTTHb5R+z-pA6BypAq)l;Z2|Gnie|XQgOI{q7KdHSiZ|56txi+Cv-;6jhp4w89Z4 zCx*!bLUJ_c_#vCGVpf~jx9oj?9#l~yH>AJ3NV_Tq1V z-yD4>EO^=6dl?cIe9L#j>e(=r4g1DzxQL;N0@EZWFs(-ZK7T<;@wxy zkwAo}F^UK)C2Xz7&~@Unf~62M5jsl7l~vkXivi<^boD4)Bw;z%&OlXZt8A=Fhhn-~ zD3RbQ3r~Ng?1q_16BY+ zRd5Rqc0>lezlh5b@DfZ!U4jo>C&FI|GgFK^l&mgPj7;~n&FwP~Zr?QL zQbaPh8=hF49~Z#IoUTR${fF&c^F=3Op&yzoK>~RSe22 ziNJjW$^#X=3~1%eNXFlQg+M;=58yH2c|gYD-YvWVJ3`6lMurj@6!(-taZy1Ib6UVd zoK*!>!n`C(DJ+2=^0((Myt;)(ThQGxnnTJkYUzKJCv$ChgxZ}c$`1 za?Zy}`33YQ8-eD$$O6!U(_@w~$xtLI1MmXt0IYAwf`WHT52PM97PE6jvbJH+?7+|D z*-BQ-MdpGrMPk;?{5-gzPsj7bJi}QA` zgwNttyH~+DTR;+tW2`6}D|{9hK9&4g=pvPkpo0~<<3gh>l>8g$sD;Xks0Un~CjsLb zG#L2vrhlbD|59>V7{wVplA{ga86kG92jQ6`(Tz=yj$Up6Usx>F2T86>{kj`ou z)ZPXu*i&N@Lu@8lhsJXCOVb?zsbmFTZzYx9gO)^V#lfh^ipC!`-lu(Zh|0OBWCdT@ zC6&kaB-$JALoGSJr?Msz@*eb)Q4)8_)orEL#+sLjQ~6k{3V6K`dS~xh z3px_KW{`4ZhFyX8q}9{Ldos?`!h6!;X{wZMjWVKpR*Q~laH+M;^|wRLqrv6Wk_RzS ziw18`GfSMWR#g`;+l)p!84oHi@;04$I~kMHu6{;Pop3f#YWUWPc-%}G|Am5Qpj&kZ zE%BW7=ot&t;Mk|>&cHN}fp0YjEwMWH9yl3(aIX4KPprLN4!#uw*}yK?4*`uKV|*k{696sh?H-j9ck<4YVd6lN9Q7o8J?M>}cUk*r*hJ}q9 zi)SB}pmOlBMo-4YKGo(Kmgeb_rsU5>scW9JKw=OGy0yO>-oxFZHMfk0+%Q6=zbis+ z=+j~EuGu+)Y=oFFeXA!wDUuxMFPamo-VH1H`|+5lojfRKK#_dy({(;I zbvEicMUrV;P$$LCl9{qC`Ko&U9y9$dY0w{wne}X^FVM{68trk^4VI;gizw6`a_=pg zo05M8!qHQ8wHbP+21CaeGkDi~OD&-+jmTXK+ z%wM|@3o0e-&MwhoOXcCHCP#~3TMI4^v#eqS?mKH$mU~68-Lt54WL)tw zIWncl;WRUzvfehWGOe_S9 z_4+KA@%cg>jq^l4{50;q{_RSM3Vm@9ql_lq!UQvZ}2VB~?uz`5aItP%_=bd34F=ORXf+9jHH zvPH?`M_%6+_GcC)znjFqQ^tS3K;*{NE5HH9Az6{fYyBY6DtSSX}Wy5zerHX7T#*387c$G&FO6z&s zM@LFeCDrEd_dJcCV=6xa>;1+n_#X(9^Lf<3Vz~imgzrqWvb-l+((!NG*ZQmZed$K* zhkGASY~bBkQ-S$9a2>D%_8~A`z)ip$AQvbARsvH1H}D8xtZi{UQM*V-KRX(MUkXN% z7=1ZS^LtwVima;C#1;|wjwGmT8U$vC{nfz|9 zjhnQOcWCo15|8qMtVSTgIK3)M_LPagh{%XH`HS|@~ z`gi$0svRuT6D6M;Ee5aP2LmOa)smU=p7`B^@!}s!J~xky<7Jl}8jh?fB9mempV*4N zqTwrN48ES6O)l`n!K_JMi9v2{Fj~3}jD=1f#iBryYENuc0q?)E&_jFg$s+vr_Q_%_ zMx3ldASbJ_!$5P_A9v0o@1djKlPNiKLnYnrLXX^omu8)kMjl#eUN7X$8S-WnJ9DR$ zeN;M3dqzs(X*?;68;c6$IZcCsA%bj=z`#hhom8dvw^)kTeAW;7D_;K@r^iJr@KGAK zQlK?f7jOv-|3_4fycD!Nt915^CszRlFP|H69e=^kf9f(VgQ}Ta?9p2l(`^V<2s|?<;7}dt= ziPNH!OkP#DLDaSR<@1(bK6?2o&la!X%A0RbZE*n?;`AO*$zO+%IMr-Xx`k_wMY8RQ zMC&Y9k&M&S;J2Y{^o4e*bdgM-$+re?WN&<8OC>*3g4HH8vBy8EJP}=?TD=EjaZT<{ z@4G2QG7}11N^jB+8MUjfP43_Dp4g6Im{}}un{U1+@Z9xiJr|)5Yr`_d>$y&l!NyvV zv5wR&l-f|CXvGn!m5q{*9cvNn5=$TFcsTlhm&rFZQ&b`a z!8|1ts4Hz1J)OFAVRAdzJjp%#`*uQ`#KPtfYgCN-C|d zS>2~`;U{1do2#^2tyoy2{PMjT=@^!`S9zdv5QYMrOUGS@TEw+v z8bvES@I19h#z#dm>rSfBR-=km%!ZS`ONMs{D+p#)rQDE&_fW75b3rAaUlB^A3ScpC z<~o~apeRg*MwP6U*wDpuGHU>!ffq&kXu zdqY_Y3mEZ-JTcm$6--2U&QRP~{bgydbl-z<7+{RG&SzeD+ZHlqc)YOU|AH%DlK$F) zCD?-w+zH$b+y^`kya0RvL;^>Fh(_8!i*82hW|VGT*x+p@7uNXSbG0?H*~}5KTwfh0 zyQ#cenxRM{JTX(JBvnktWKwE;RRLLxERNj*iHl=Ru{f4vFLie+UC=yedk3pX5vBJW z)fO5v^byqK+;^)DhCtO>Xl%(~v;lKTdws}^3ngVa@1qJ8D~6GYu@#3R8}Biu63t5F zHHo!680 zm;tN?UI#t|f&iZ)*B1QCxID#0sZLk{;J^dR2%LEx{|N7(6H&ec{Bh2xiY7aQ-O(d? zTC%HZ1XaGtg7_T-Yfhr>k%NmUDEl@`pO2e-55^XkaDI*t6-d^$u{;y!z_%XW^MU*Q z#&Gh(kBOeR^x9T&_>BQf?SnkQ#v2r%@M05DaH;n_%wI=Y)1Tvp1eAb(4?33O@wm-) zVG6zwEC%;ON>?wMba)hW|h(E2_8^6P6@ zLys#S>ehY?#-BdDzFUdaUbNIJo+L5n(b!-H>-`?`*_26vT`=)#vhv=1er9-cHMh%K z@%{2L$*S=?bb6HH@4uHq-psH6Vu6>1&_HIb#Y;KaJYRmTlHdBd!OtQ@q5{Qv7suek zb^c!bG>ZIqjz493i8ZolWDLHPnq7rReEhtJBu;(mK^z`9>QaAth}n;G;q{cDz61+? z=OQ_!g*|$0NG+rM9>t1_8ji}loau4+Ry2J ztmAr)W*e>rbZvz7s&H33ejHcQ?51mDms%0->M&fdp(}og@u_gv1xCod=^E!!^TJ)d zhHEprHg%~p!d>;B^@~@JqHbk3!+18_RsVUvcy%vb6_?sG+_lIE`3<_-UHb1~==oh~ zxIReN<}US@OT$WIwc)yet}R^Zo^aQ-hU-mqZHf78xa&H@HJPrcd8@)*HyExR>DmhS z@rJu@GF&6*+S;W~2zRY8To0iZXSZ>wSB1OoG+ejSwXI8y4|m;TxIRbMb}sd7-?04d zGhB=4id%vYgu5OvT(jxg-lc90cU2A7F?5Z0sn3MF{%N@OrE3R#j}3P{ZMfR$+7ZA1 z8SZKwp|_OZ(RQ;hcB$8dyT%!=pU^eIrS=YUUB4XPiU1XeDaE-#U>GnBSPZNIHUs;B zV}N4??&Si~fjfcazze`m;4oml7w1cX6krOF3p@gB0zL!&1X`fq=m%U6um|EXD_fR& z+kMhcI{AYUN?{jt`1l=PbeKN1m1hd(kiUV+QfpU=7GCE{{*0b|ZvHbcHzkg2VGnR&AWK~r7HpC3`wj8^i{_i%_uqAg zN5$dUuwXOwV7Yd)DS^S}Nw8y$U}LcHNcTNieE-u7mY>L&jZ|@%qB*2z_mua@rS~)O zwsiF~;jtk|$@>vuZ|Z?qf}NZT+oGKEKb6?NyI0DFUv#!pB^fFi_?4(ds5E#3Vds8Z z|55KC{IFC>Gw+E6PgC!SMjqQp?k)Z$XvKEFk=zA^o8Mp1B87uzp1TG=*O{R{WKy?g zmc%3{<_s?xX&ISZo6|?xg5Q9tio8zQ5|fs5wcC!mnxW2wenw_iRbe7t7B;m8AF_Sh z9l-=;OGQQ+e)ymo=?*He;TIpjWCX{w$0wC=o!u{Ie<~hhD|DsODQQ}Qlj21%G>2NHDu((05spJ zm2S8h-p_HIbs@*u7jpFAA)hzc1@u{+TFITUP-0HmC>*{8R-5NacN$u7mij3wfL;pn zOto%i0UhIHgcSsxTr%UKT#n;1PK-?)& zQ}y37)P6P47!8^`nY8hPyJoobKX!Ev1HdShqg;#@tGq1n$iQHH{?JtAuiL8iW_ER0 zL;BW0lY08BQAn&^ZPAdvEwCTUxZ&X+G17AgE$W=~SOPEB)yH8MDeWxvP$~2=Bc*N#cdM8hK1i(gVNrFkkWiZsbhVmuT7u`3dSV%Sg+C zRqd&(_#zMrTa!zzuA^d8V>OlFt!}m+oKLCaZc%@XlURborR;AQCJmE`Bu<71O<3oPJ!cSK28ppst1RF*x7$ z_&_zE^-58zFx0Lv)z<53YpJ$M)E+j}?50}2uC|bBt3_>&p%!Q!igb#u#v7&GsiJnR zp|%xjcywC2*NEGtaLaCyrM3yr(lg@ZpcCtB*Rb5K7pD_`IJL}D4~9FvAx_`WsY{m1 zUtSB9(c9v*jZUb2FNQm97pHY}LJO}*4D+>9oJ#4`I7`h5_vIC*IdH-w3f}JC13P{| zVIPg2?h3qzT?j;fTP6kY}iRmu7cJKe|tzr0AJv$8YnVAx!=DTZwwZ5G3} zi?%=T0L9-dyj=r77`DZ9``oa3>GrPJklskT@%syU%fL$pJBIADhL4W)@sQzWr;WSS zrGLGJwnc_5mHz%_*bcXcjo)8Hvj&=GmxQQ~!?_MI5WedWIrTNH^@%R^iPtU)4+CAU zMF}Sm?E!cRyo+u|N~%9nr9Z>0Vm?>&y({FSWY|ljDKq(?ctHS9Fn}V%_8V-$d*t=h z+6=3SgoP=?vSy6l2n$W)E)aTcJ6y!N=m-uI-wP0so{|sX;L3*Y)H=S=4t!8Dizqrv z?JOE`e6R1g*!P3N-s<{jsbXguD}}=$Ugta0NGZG$Qf=Z(MW|wWEHfo~TQE^iPL^(W zxQ|Lb9|e1sdRZrl(XSnWyg2DyrSKO#Qm(UHq!fM!5gUEm;pb~0VZ~*GUWNH#P73%* zJdnT+!}bSl8^wl#zD;^`<1G~>7vqbk_o=qh@U*)fY|9K=0^K-P70o%c@p^o*d8j$T zu+5%ph^y%PtUPo$C)4*)!*&)Eg*wXjz_||b zReg<#^)+6oPuvc|9f9c)*1HPOomsQg!yOrHy#7*0GM=T5O>`ci4ydhlbWmenrH=M; znFk%I;6KaiuTd$XvCoKtxT5 zRti6agqqOy*-%Y8laE0weA+72CfHy_pS(hYJ{6_#V~FtCOetimgwKBPiF5|uF$(w{ zBrotU%@+8l!EVH~Kz?#Zn$JSo{$aSCqK#t-7YnWfv!|8i93+L*BhuG|mE{Te2#y!^ zj`g2bQxL&H;?|!*X+_?hL7~WRs2@}_s$)2TXnYtH>1C$qWwrs8jyXq3v4=&@ab)>X zNL;Tp%k15}n%c@>3ly5~JNyW-@02B{6+VeJcV(m1_eQWr1@oPXG+Yp2i!c}ILM&l~ z+b(tGP6W-D>VwiF>tm9(;*>!r^l^*Aoob{z7zC#*^>=YFnk*vhgmN>IB3a(h+9dQs zKZD=bW@&T{4^=C~Lv%tRHV$)=*TwA!#;v`xw?N00jT*?Gv(h`O7lcC}(g~bz`vARB zAn7YjkNJkj)3%Vuy9|%~IV;m6isN%CX)R@pC?T+IqNKGH>`6ZS0M>_q$OmzU70?mr z4O|7J0WM%VFbD7g&jP!EUjfHMSa$-FfT6$*z+~VKU?H#ucnR17degRGcjaTC? zFJLe*1y~F`4m=OM0h9x~fKP!bn7KU!>;dc#;|w`41h@@Y4!jP02^<5OKZ5iFqkvn0 zdw|uzQTTlw<~Cp#@DXqjP=Q~8W5C2m@f-5MUBD8c6nGSP9@q%%06qrx0|DST5WNOx zhJjANr9cWW7PtkN3*-S00M7v%fgQk4Kn)P{7}5(21a1I4z!rdMWtw+G{s#E@N$7%W z1M~of105g7ongQLU>J}AOayKR<^%b_{lJsJE5JLzd%#z~&%i05(G&Q60iYL<3|t3H z0hR&J0q+4l)@t!F-i=7dUV!Pj1$;KZ@D{-I0xN(uzze``;0xd<;NL(y_%Y&{1etjV z4^zKxL)HZFi=Hp#*3_2XX~BXW4n$|Cl3(2Ms~t$44;avjRHcvjVd;_+3v_>^`z zKBfIUH&IK4KgH^g+o4YFV#lw`AA!;3gDzXs$I551V0(Njv*T0QnMU&(jmy-ySP1+) zmZAO~zpsgDlB|ejs87OXWXvWRQ}m1(i(%d+_>S19L>^_)DC{G^6f|rtilXm`gTGW7 zuPrn6&dR95bu|%c=G~}QH(31Y*o@*?iM`bku&1jxEJBP&@yRW@bW|jcoAyZ0PN+GtDZ8Gx$M-9Z8y)p-aq8|_{&0-~T?6l2Lt(bAz;BOr<3L66=#F(A z5P6-t$#vp%wI>Tgr}K1 zUBbIj3lE8k#4~#l{*Z7aH_ms~l0$uay30LYRDVGQKn0XQwFWXv3zZEdm1AnP7YO5- zTm3b91Cgiguh&3wwFcs^Zn9O$`wdS@(xv2iA?hEXYt0YPwfq46t@Z&r%RNETGet{} zOzKRx>Ebp_cY8@f%jE~?T7H1ea?cX?&boUk+(9l7vZ+P}7a{&3XtlL-`)E(-+*bQx zsMThwpWzv!T!N`+zU{P#S|Ch*>CRN&)E&EnVLQ%L|EV*rB*XZz-&N^xp!W!EljfbN zZiUUL9Y*uk>TIZaFOudh&7x?yG;d5Q>$Y&-mn1(Hn}9X@gwUN;s%IWTKk8E7o-efq zy9JcIk5CfoA`t$4{k8Si~YruDrW_! zhJE}%1vy98&~)p4mo!Mk!=$N*>q7 zoYfHLcXd{g@uh&lhd*D zom78SaFq}_OZzS|8UK71dFr3rdf^6fw6~^3)oZ~TboYTd5 zBaC@*!Hc3F`4PQRjb*-l>ET-V#^D$qY{j)@`qPhaj({D3<}t-SwVjf;N`%~;EA#iy zZ#gI0cec@dRB3kwT9RA{fshbX(*6uyPry46+?(k1D7-mGAux|Ce}8UAi<^gQK^Fm!QUV86a=_F7fiVhn~Ixrl0x*8=>B$?0f+z zd7SGu*rXiNP^RMLvtSAuR<2ZTZWan_U#L2I`^w+Dxc(cY2peJH9_;CtabC%=Ns>%_ zeDv5#mWfkLi6r>Kqdtz4*s|-5HSqg)Y;I{U<8EExuk9=4)~Wx*)>7GN<*yCJVR~&K zlyE9c#vap<|1jDN>i=p9Uj|1Yvxa42gB7 zZk|oI^GSap*zaH{2?3)c{5+>N-wEGOc<0|ZtyzF_<#6{v zl(CA)BfW1)Dp8?uW1Rd`u)SK&HYh;nGV3KI;SNYQzl zI@nvtExQ<;X*#_Poo{9{oYmC94#R2TI)eA`ofcg7> zY(GjyCZ<-7tkTJr441n04nEDzvrr9l6MNu^V#?{j{AE}c96$a9^BQfR4jwg^0XpbA zRK}k>!!ssZSnrp%rcJi8)V^$IdLSH=8@p99UF!Y#P_J)Qv4v@ATU91cJl|#&bAQTS zwr-4vFb74tL;F*DVdDTFtp3m3pYl{g`qsdyy8B1ui#qBo_P;cwZwtIvPd~IjWmrS{ zmcXO+^y5SOUmDWKI1HcEjKsq<(|-tU@0|Qv19#Nbm;HA}Avv@?1Ci=;@QY0GcLdvJ zse|OJoddU{BmCY*dX_a5enOy0L*eV2e)RCI>c~3b?~^lmdicpk_?;UHKPB)Yl*98I z+8^{g>t+4tYYQx@rynZ+tqtj00@v2l|K8Y7bRYF`J3Q)-e<>Fg*wN!~NKgK0#SgLGw~u~-{Z)-qo%L2v9JUx#WB7`9Mg`5n|}qDVoD!c4*m~iapbre%4Zh; zFkA6Zn>vU!N8It|DjFKM^k**jdy=lp8R%PtO=kW41@v5%(i8{8G^tr@LsHrrQm3e3 z2K93;H6mLrk^Mb#+#kx4p-uh6?8QSnsJr;w%-A{wI~oCm1#bo#X#}<&f%zlH6%Xy~ zA7(8c+D%bW{EX3zpQ#CtU+#m8>%X7Jj=G{z_2IW) z94o|MKDS)#{rw!WOC3G~)v5+}II-!eJMhQHbz@Is_ku;(a{C;V499gSZnRQy^wO~y zj#UzT#M0UTX6;J;GEnZ{A-dGj2-#f(J@j@7>K$$lR_w1^g16v2!)@V_9*}SuX{_4~ z{osH)f#zK1{zU?;fiL%LBfyJ70XBC})5x}Jw2(IKWKhXAS;cGRUmmngBB=)^Gjb`{~+) zGoCm4CawF@-_c9YSqEm7V^zh{U1^o#Z(zom3%j#2A z%J@({HHx5KdM*l2PiQA^Rchq~-KV_Y^fdrk0N~J5n_9cfCE)twX6?&}wp5myXn&Zi z+s4ARyLdcSw`Emfif=)xC1X3ZSvV9XiNxm+?LBC$2%@Iedp-@+moarR z#JbBq4a+QJunca#{-V%AjMtwh`;H2{^t^oOIX^^(=6Cft`+R|n`D*6OzYZ&8t14xM ztO&-^vu)5$r{J{jwmy5H>)Z{!`8}Z2cB?4aVDL)DrDtD8a}~vAJ~8owq*DMW-H0&&06wu#REjmeq3Z06kHIkto~tI*ID@G3z2%ndjqT z0dEhuLMa|b1F7@Z{(&^@253e?i6(Jew?{<&FV@JvR_j@B|C(l)7RsZ}eMKyP9H_M4 zqQA9t`)b4PYZ~F|YPVHKug~miPv3zb)r)=~(jJkHKuY*`bod}l`v2pjtiNF$*%P}7 zJwpTzm1Ga7Z54sDyZl0P(+t!S%n9em`IcC1n-XsBVt=#Wv@Z7QQMQ#h_>3QqsvgC! z5D~?rrsGU=^(cGAC5U!Ze8LtQJ&wLfBVo^5G!j>e(VN+lo*U`6@uoZsKW|0f7`KT* z`%c(4GahBc9_Ft&eR^>kMu9SDLxr8kQ_FP7XLopKP4`7L(JRhfUlOEnDIc$jei)XK0X1 zZ4HGiUU=2CCx=ql%!PZaaxoX=gb@43`I+RLL(t81*BkM*2d>`5(Gi}>;iptehE7Vh z%)<{;RA6naV)4+Vz5ZHXZsTc@I9qSU@tu;?%JibV#$Mz@^ES%2wV(Nt9$b_AQ82Eg zKwgYXH7(;6sO0ezk4x>K=j?KH=kpUhSM~V~iE(lS8Tv-!epg!FG#GsbK@Gprq!cct zMwvHlm$;?C&{A-oGuZ`w8#!0O793#R0BpL0?&^}m1k+I-R9s9m3nqu-8n?3cVrDo(nZBT1Bf z8_%7S>uJojo;PElh^gUu52xG14yT(1QhmOo-?j@M{l+cCIO@K^OoBd^oD?ayV^?F} z7oaUcpZnKXUq096Ke=dqz5ve08xhwx&#T|CL-B?3_t(O%(}9GYf6u~P2>s4DVyQW1;Z4I|G>M|x$I0U~=e8?B#5@|X4-m1mM^QW&u5XIqPG5jaao0{h}`;SKw@v+Fi)0Kqfqm9<6yBJ5xcJV&UkhZg#?HymB0vp5)yPlhyZZ6$z zmF{M&vk$@P2;F;`aFS_CmXxhVo!*9A|_IA3wCuM z&R^@Xd?2mV5oi(~3ywEoKo^SC68N50ZdSh@AqKwEYypn?w1n9MJo_UX$9qFV5Bn}X z>{mn|8_4aZTc+-Izqnyn`k!3q)(Y-Q-EDp-{K&wQIDCVXV1W$LNB5G4GlhY1VzcNO zv#7N#LxKE>+8{^A9!0rA>K)}HP%Dba$`|raf~s*OBNY|UiRwm zr$8R4L)z4@NxoNyj8u1$9J@3H*TlbieR9m z9?ARKct38^i|9;j|4E8y?gH2t1IZN2NvGSUrIAiWrFNF&a6u zsGB-LIYzu1q*Z;IY=z{YL(yEN(9fto%^;Y_SwT}Nt zbqE?yU`-v;s$N2}unuWc9VF+}A@O`s%L0>yG#~m&JVCwKYR730UCXdVy;^RyHcsCd zUaV@h>E-=8UaV?dgI;XvD$~oub-dWrFHUfHUO$#d^=8w{j5=QMJ*|!xqh%(jLvRLE z&r1@#u=}{n!bcFLSHl#wy{Tf?Rj#LsUca)V0s~>yYzcweu@Ou=uZ7z~^^m_9tM?CH zvme+Y9NN_^aOPPJBU-!q44#MX|6%AdWt>HsDN9h7nkw^jl{Z8M$Lm^|QQf4wtrjzNiUe4z_00gC-5LS?TE{M;m{o~ z1LxbG7HkP$T6jq3JE&o3BKnJ-?$QQwArI0g3ya>{i!oHC3B8WSP%>a|6 zNq7-7PuRGBVujF7`$)IhMxu%fOwjuoHAW{!fpAY3&%@~%r*?aVdDaWO2b?eh3bdoP z*7{me8?En*+(fS*ESe<&oxA}~$lswbC$kM()GxJjjC!wSHj1q}|GM6X=w+h5p!1Ik zZ#)~jdLQ_}0*wr0!*qKj{<|4J&SBpY7QcQDdn%lmk#uTnXRgQSl#`TpzWPd?>ZygT zKWv-Yi4n$9Z#DL6xYXvlUSu6Tt9k<6Uf^5lC87f#!IXX6fwov8G3!>Ox|eF3>hv&n z^&OCS{(+b2%`5qN;dN;{Gat%GLe?go!ixj!(u7Q&&)zDZ82+-I+&9v=R);83~U zr{%EqkDyWR((%h_Y?u0pM#*_m_gbVb*j5f`#=&Ac;uP-Zy%6LjSan4??qAVSN`?J^5D~J`bZA>S^x<&OC}~yk8R3 zmT}sXvP%DG@G0sk^mu%NN6@Xv*>abvj?t>!7t-SxeOijT2da7#+W{5H+boG0x7@hY zSA|19CXmw#_%Y$|f=!i#v+!MXdmA}dx&v)NPkLi!J~v3K+Q@`~Q zI@bv@QvD1g&vU&-s=M(C;9TCSmNm%R)W;j-Eowm>-l&Wg_09%)tLkcy$M~oYZ)Dw~ zb_1_Jf%eFvjI_6+0q9(;a2OK4Y;u2$rXF5tk4QsF)9R*Hi$8}O`UC%tN;OhHCCpJJ zci=Tr3VGW$&dTDNziT1kqr?W(7~}bCK7;*b2z#{zZ44+axWz#Wf4;62CrzrIrp4G| zB)0h(*jA387afCGV&{)U44-IqGU$W{9{)x6`JFUQ>AreMaQe*{`=k}M0)_j(v@ttV7ing2lqhI% zvdFYXl%QUU{VICFCmLyZ&#dis;gIHy;LJgrQT=S{ujpFM>TU3L_45Y#1hu>lkB2bD zjO^)AdI4QSPAiyug+nJdQ1s+sEduA;o~%C;BQGiHt?cHt+06v;hVra6t97aod{CNz zamv`}tnF$S(|=nd+H%vs)d=%NlRG+6Px3<&rq%>lxs3At5Iv%vrtPAOtlVMx-yj^) znPqZoj4+3Xg{g&?BE9M&GtAkBZX3}Joo!z8AzKjyaqp=U|t7I1a1cw1NQ^#fYAt-{@;Q8 zF7PRE7-+p0_x=I>ffQf@@Hb!?@Di{U_#8L_SU!e6FdE1OUI7jPzkh=B%AX=%Fna*x zu7LS0)=Y-OG~`^!PXJqiZ-B#q!O<_nq5l>Li{U*Buq-NIenc5y8J&S?|4b{ZUNA=j z%-a;0^MDn=)4+>B74QXc1gHfZpU2v}1Ia)tz%nF%J>>V1-dkZ3d=89@VJ(9FL4f`k z5A|MzTm>8ijsd@Yjq}#~P`>at9p)XtVu1Wkn4bfOfu>)?+PeaV`2=JqkPf^C>;et} zzXOq9;x}U8zXi-Q2zM;vHOkU3U2vNT+zh=1Fdqfh1KWWgfm)yq@^KZw{EP##fn~rW z0Q17SLpP)DorHY-S6bScAJ(0l0lFLVLdbM0g5GN2d4OTP408*x2kD{xQ=f(Psn!2@96IL=sexY86P3)oyYEQHO|dWk@MIYS*ilt|8wWDuVSWWg`LOFL`h^~ ztgatl{b?xNwY@>x|D*HRSU<(*TpL2f`aI7&6mZC_Ru3`D6~9|`p+ne{>Kww}fBr+* z?}d(Yh7Mt$;vwt|;}CX+et7ez6sE==w!NwTv1@tP=oN(LFR_KTEw|9Nb!BB_swb0? z_?pt;k?EPmxrzPMpI|S&mItU4#%8Kt!3ME+>2Pa$rn;NDrScJZdjjb21JtGb6q$AY z1JwEQsZut58JA4v*F8Y3Y1BPU5jsGvIm#9@b*2-0OSA*j4r80yEPuGh2weli3PWMG zu5eEH+Ts55M7{tIICQul`v-717}31(CQ76A`1yR_n|{SJHj^R?nJ_VjDvjz9ka zYTQpN^~uI1e|0?q*Xvd!2I<1N!%7|13kPa=%D&D=s$3+nhksj>Y9;S= zWB|d~)yuS?adpc$*b~%FA^R|#yy2Yytd5fWb59q)*QFY#4|!5BI0*-D#^O*hs^c|T zdUZ@g2!BWjy?wQ{OWNS<;;G|v?Z@}~K94y5Ewp$+cd)Z*motkk5K`=tw-D0&vj2~} zH-WdQY~O&7`55;xR7fa8rbyz8< zwrOs#+imV6lATuu@4{AW=O2j4BsJ&9uk zA3-?KPv#Wwe(1H~{m}B6hXbu}KeUI*XB`e)gBnsE9L9U0V>RGj=*Ho_(7X}P?uBl^ zd!dmR`y51-;5j+O;*XW@QSM(JD_43)2lm^)IXZBsH0}$0)D9=PSDJ?nzJ(df?HFlI zV|>6l{w0bxIJ+APaVK>(O}M?A`v7KvW4iM4ZTCx$yHa1Z z*VW>yw!CN`z=4nZrJn@y|N8yX)9NJ5cxkN@U*^4e?td_4xtI8advjz-690QD{c6g| z*AVmxXEkzxGWL5ahcI8rMM``};}7^JUcPtyhHI|w_f#sQbh-2Soeg|5A>sE_k|m0J zp2zu7_j@Y)Ya@!gogeh0o%F8CGQoO@;y&m0ezdzq-;rpZ)S#{(b-$-FPNMidm6Cq6 zr*R*YD1J}n_s_kW-S4TimneQuWrrW_Z``^P#qX(n>_^@2sg#r`eotkpA9cT{@;ml5 z`8}24esqW_?~o{dPvtH@I@F>cOBBDS(#(&#-&2_?QT(1t6+h~JPi447@p~!-{OCwi zzDuGwX+F3<3lwXokmKcnrs5Djpx> zA^R8a|8t+NN{VD`lb2fk9I<%B%d}}yvsrvts-$RJI|GfNVax`6YIS%V{?>}$% z)&J`L^Tjwqv8d*v3-2f3ng6jj@BZ_~_{4MUqiZ9*tp9fZd3Zn6Ki@7~2e(c8w+mNv z&-HfU60zH=v1Cui?=TILyGgU_WX?U0pSDd7R@I=7EE#Wq#ijYVT>km7{0aY<@APY$ z{$2kre{PAGew}~FAH9yB;tQ7VD?Iwt@5^#$ zMju(er}*--#Si4cMW`Rk;oH2bB*9RLw6a(M%SxJ$4$^e)NnrQHLb+!iVl1XXBa90DCxA+B~8wnRWPzrCB4!%CS&MT6Q5H5%fc8n@X154X> zN_+WHR^c&xJnszh_c5+Zwg}qV2{*gR-d56^HL6On> zIxas{7{9u;RN655WctrJ(1N3`1*g#=4E^8^?PTDkSX}pN7kpZc_A(EpoeyvAg5G>y zNDD(%E2SHyW8G*MOw#l)9Q!ps{Y^gJue^MB@#UwA_fC?U3k9V;J>K6Z^^V@RJSdgA z{7{k6UoPJ-HFv}avhMUBa*RHSuP`j}m$ly)UwaWPCjHNHgIi?RnIop!1*fah5Pn1+ zznw(qSA*{PB;HqmxFjTfQ$jvfyZB!Mf$&Cl@@5 z=X;Wu?@Ng+_@v;`OB&q;~9i zYIcQ`U_Um+4>W)hk-c*fE>R@i!<(RMCEl*HzIRgZd_B9Yu8`6@sps64@(2GND<7+-0?(RZd)0!DqR1;m|LEukIgMBK_0s(d#O-uUK#Me(X=> z_Z0%`LvgYBhtcb9+5CeSlAnia7U{U`o=97E(X{B1YcHGQ;zkg%HhW`(|13y zdH2LzLGfSHm*=}_=pRD|1c=qFem2)hTqZ6d8;>j~g3ppK-<@1=-L-OoT}sY)0n)jS z^2bE9$kZHCT^{^6Qi6;TZ?r`7%hauwzJ z>i@2ql3ERaqWtJSEPz74vIS^6zg7Ot7J6s*&hDV5&yShj7%asNlaMNdeHkb7nUm1# zT)*y{fIsll%-Lxo8$(`|1Uot4uK0dy@1E*s!OC?qMni?8&q`}RML+(zM(m39bX+o_ zIWX_ZjhU;{3NDdu%Z_v)H@gwrou14>*_i0~)4gTnex(x4qN%|hxZNU`d_MdXkTL+; z0Klf3jQ$oYl=hYR4JY!QrF_VJF5Pj3wFWj3dutsB?aFZ{f5&@gf8hIs@u+qN_XJ~S zeGz}_;jirUa?rHv&^)zc`l%+75nJ$yTkO$UDamrZx6J6Z*k1Wc+1dGInn(@4sEBVK zKU}JJQu^KqYFLJ+;N!x3Nxpi-QLb0YB;G-dI%7*~3#U{%S%P*)lN#C1G{-8ZKoRs2goc#8} zWjMC<(-LS`ndQ54?n}d`VxcbH%Zy%w7s4uX2y|=*-40SEBx(lTDM0tFQE10@qt~D_ zVZ2cozk=}&>3hn_j!-kGq@nn2|F`WOIpMm!3zOeD+uIn%monY~ox|tab(6!jt^c!4 zJ{C&${i{uO4dX3zlg{Fe8j9au7uQl8-e_?7b{)_U>`<|>FC6Nf!E~%qwpI6yL-=u} z*bXPZWyO6%9Ci#gUwqEK@fV}#jm~RMD)SC}N0S{P|4h0sB)#&_ByT^d=SUfsqUdwR zt)z=*eydZ>Z7Z?MStWnnsUPuuRk2QN!n}1(r&fmX_YudOu;+TTX?FSeHc9uf)0daO zm1Jj;e?<3vsx!5ga2Cas*Ks;rO5!+C#5WZ82)7W$7MP&(gp5_Y;D#_>)8fJgwQ?9b z525A16p{KOIJ+#AP#+FEEn=taEn36@E#k*=cw>48ZxrP*_!*{FuL%BPMJ`M~ox_wF z@hQ4bO$_5BEiQFJ{c#JWJ>Y7@=dsJh_t4e&H;t<2RH~0&Qwfu`Oug-biy1rdJ_`#em}xZ{m=C74y`sx(7Q!L@p@9(KfSbkrA5dK z>oGYL<%MOR>=pm@Md{j*`JrRxK>PnIb6VJwp^kaMzhSP$#RDCk%(lq?hpE(?6>GW8 zWp@16q^{V~zTc#}|JK3UlG*FubgY+T9{x9)PoTH^R{xvK^F;H&zsamFnior^&2)b= zi>&y?nw;@HJ|yZL>^j}P7RHxbJT~2qaG}icxF1Q!$2Rvj^GZ9%$#~G@bar+wToCiC@kD*BrS^l+Sv-Qb+lfE&X2GKN$qPW*cOUxZ zTJv{)y7Hzm?o0t*GdMId+TKG}=rx0Re=+c0k`Od;`(G8@#hsocEIwlE{(D>*86B{A zu`@qC=x+{#vhrE}i(HVv$$iJ*iW2dseNvFDEk>{8zqBN~x{w+qg+hn1V8BH-F6j3a zm)})#01U`Z+znN0Ud(_0g>jgXI=L=SqiQ64DKk2{*slj__ zd;0!GdwLGV77k4Ax$GqVJm0gtJbQ*<@1z4G2;)&Qp^*Hyx5dW0!aml9TUV;H{Dg;7nlzp4h8U2qwT*QDUIN|2|RT%#|9an-xG@VK~e za!MK=Q}Eb~#~C~-6-iEc8jm0FC>BXhxe$*gc-)FdTRb}BaR(m#@OT&x`G4&CVD3El zes_Ek9d5~c5$ji6AAGb}a!OW3+;q4ytJ5XL@!y{+$iH_~mH!Uqf937`m09vu^vbL% zZ3^MPU6|RO|Luhoc$C7U5*}CL(GU;eOZ+zc zy8|Ag(HalQzZnnVi+&IMTln|lA=|nSj{$f{zY^ZxPP-}Lz3qDG%ktn;W@~c~!*vOJ zj2HVDSUvmQdW3WHi^VI*J8$Xr%i!x%hy4VpwT&8J?VDsz zn&98^Mr!)OCew~IsgzQC6)tv*cvLSIpq5B6GiX=-=Ka(j=hwITTtQP)P|e!XF%=r0B@}fij3HeXZRws zjPsJ^L!oK-<9}FS1)@+avTqFj;RoK2L@f%991>smH!b>t7OXglspqB znKl4&g`+`JVWH>zthjs__%`5?XmFX}RXn^g@S+F2qSCI^%pYr`LHq*Tj-yA(fQ-7K zW>5rl1vYUMuMobx51*{X2XHWqjcTtChM>RctD3CDh`fy-pG7+Qk9Xwod?J3t z0gXpu3*;lWMJzYiahRXMdZPr6VLbZaQ4#j=u7~{3|6a`)FRlW7T^LIGH-aIEm-c;7TV`PQV`s9^@7a z6*GI?S$St4`lA~Cfq zUwm&4<9Rfw8ZWbr;7{Bl2E0g2W|_#mN12xrWM+`@gEDNupCkB+$>fd6tP`2ulxdS7 z(}Rpj@iHw4rkYG7CbLmwDp00?lZj;3#w(&6%jc67HK|BO=~&>;yy7^Y$>ob=CLto< zj(2J`zLf4;kzHR#di4P^{expAnSXL)ZU=aWV3va6)U#V;GAYwHl;PC#C>aZ!45yxZ z2|l18r=CC*8dITKsKBY`8YZVX1r>$Lnvr%ua#< zw}`Q~sppKy%%{xc1ew`n>`{h|{fh*rnT$<6xhq1ZA7wfu$lOiFtMM{Xg8wlYn|ca~ z%%zkmk|0x-j85?~`3V*`8Jl`aip=iP(wnup<9qWBo=1bC@iHq2uICof8=HD6ip)gH z3{8+3N5-ehu--gEFwZu_zZ7EYfLFQ&M(&J_75WLQ0Z0e~eG6gB~Yp!@xImu`c zFLMY<(cmPv2vauo;73bS_N^)B_?B2Pfw9KTvAMZoin~Sd+))*;b{RPsZ4|2p>gwIUMwpQYKH24!bI9G=3odUi?FiSz1|61bIi&acu zWT#7K6;BYHs35Dj&#EZlRWVtrXvQio10&W3R?(0OgPkm^s6w!oRgo{Oq6j0MTso^b zog_|*dsXb!jYorL2x>28OBKt`mtMRDhF`@(Dja2Pt{1NnoUI@}#uipFl95d=omC7V z__TuTMGvbY*{fo)RB;WfxEKt-iYuwm$H}r6We8TbD)NL?&BzOaDv*4^-{$=yw#r?OajBNVipyCW^Jw)FA$unpqv=F{o+wZ*0^+5aWBDP3bGd+ ztco)PwTj(R#pSHxJTUw!DpBEfC(B->5G<{r%-F7q6KBPCBbUx9z9;wxmGKp~I7YtE z7DR(VUKK~AiWw!O7h}PQRWVo&hgYevRTXfI+#%po1ScpcC(tc$j5Nn;{Q(9(a@j1R z3&BBN5w}8A>YRdiLBNw@JR`D^xZ#A90n5`{{2JO5m z?vg4pi%Kv0CRFh#6&9$1_2OQF4_Fn2!zx-b@}f&;74-cazXmFEP#TcpLAy&~H z3_V67nWGlz=|W$sys0X3y3jusXvaWTt0of3OhrUa7g|XMUem8+#v>v1s#u^T1GT)$ z@>*qc2%nkv<8k4TSJoIZKZS;q!o(IDA2p=TY72(H9N&?h_Vm`?B)1u?E{ z(O80SE65f-rk#lf6|Io80$FpVh*TEQ5Db4BX-S0d74W1>az1S^PtST(Mm<5Jk#Ya>)&f47T{hI{mDv0k^3lE(!jC}3VS;b(2;}v8t z?zSolc~u;dDz0M{mB8?;xQYt*J6ZOkJi%&Kg?HAGmyxJTXB9_JNG}R{RqWD@M}sE` zYA=#1qlzs2OgdiQ!SJhimkRq>o9o5P1g9$)ch)hKkxyMZtLQ^8Q$hBkvsIDgRgof9 z)M6E-z=&1BvyQ4%xYNnS?bbRMR$S^E67%~vMP=f)GBI96_>J#B4GGc zl%+x^C(GR;Kf&S(#+`K>#%D94L4B9bDz*_kMrD1yU#BgI2KReaG?FT&6_8$x1jDc5 zB`R!E1$&K4C-}62ac3R982P}Zvx@cv`*~H|XjSYZsJ)0v6&JFKoM8A>6r(~5C(B;^ z`I|V-r(oP!$B&F$=F(Zk27&>VwHJ%E1<~M6uZr$c#q;^47mtGBS22bPYgEB{F__>m z1>?>-x-#;XOJ^0$3EtsVQP-;Yj-d9Uzf=)n6(@d;uOcrM8aY|^;wX}$!5;{50{6~3 zzG38Im(D6y5Zs~Z+&kxL3!*_suZp2k#VFi|lN$6-sA4D;K2!zkMIV9#tqSj~qYWdI zTso_0Krq#-qMB9lB|+`QIH@8RtJr_kdJ)Mi{+^z7oB)RNACrz`{61F$-!ZTk8(~jy z*0CEAIqUdVGH}-M5i|0H)TOb&YYZ$_D*H1|+Y$|~w#p(I;RjRuiKP{3Pv;d3y zOY9ww9zRnQ||-MopQ^n+E|J3MnmrYvRhgfhI$dL9|AoeVFtCJ`*4ATP775``Uk zrI3|J;tSb~=h2|BQ(z$<6I{oQ;dI$9vwkZw&r{~n1eq~ptWk#f8BB1P$=GGqgCf(4 zGFK(YG$rGSc$u06uQ3_B%$js5Wb#qwm&5U<&flHM#xkXb`UQoPK21V3Wo=#5=wohLFQDRX~<%oAiRQik=WH^DTMvCFLM zMCJy{RB)iqN+nyKa+}N)Qbf!WT2AC;WBGkL}Ug#AAhrUi0poR)m&=u<-zz~ zeUInS;6jtssb@XGuUP?1+SGGIWL~GtQ=tr}o|nnkq70bi)RReYtb&|+lBz(V2Njx! z3VesB6O*5H3Y>Zx6KrXP*wm9EGNmb#BSEGp8MnsEoH-yS^O}rJJ>^AaGj1Ej_q`*- zpOHT$BX_*ae1c24MYP4Ho|+;vhB5;ZWYWo4q6{1Regub@j7>d_M5ZZaY9z=sAY)Lx z%%udcG#Q(Eq9St^-=mWn{JcNj)NwqI23N$(>?XLMTZAc_db*3uLdr}@ka>rUKpAFg zBEi>9#-^VBBGaET9TQ~kC1YB=Ok09oOva|3p(0b2GARi%<;nO@yi7rY=b4O6JrhLc zhiv)Mxli`R_hu`efDM5YsEu1}C@O~$BrnQI6(F&UeB zT8KV^hyak!e7g3JEf`$+$0GrYylKCSz02WRW@khnU*&b9`?O z;(0Wv953??!SA_6^v0&1xgzrpWu8xvd4r5y%CO#yBlxn(*wnL1WbUO*s|1;DWV{qF zb2GtqCSz02w<1%XGWil@E+C^#yi87lg-ph#o`WK@^>^vbvY+C6vk}juL4kOgj|i^j z7SS7bR zW85N4+0-*zWR{X7M1nWhOc9mp6PFVl$NEhb}APYIE^fHG%y#hWTZ zM)P=?(@4VmpC)5dPZg2bcwBn3(8=KX&lH_{R+9B6>%;}#NXFV&U?v0eG=Lv)iDb?} zM5dlM@HeZT$Ua2b?#K*3*AmJ62%r0v9}(|Kh4(}0*J6Rz4D`}KRDK1fA;$-wTKS2f zT4mk3KSvz?=t6EnF97EzE>aToEi`#15i_PfxB4a#ON7wPipvZVBorBVbnWG(@gVKOtGX=wQ(f(ql zIL-XN<6JX&$%>j(Xyy=-aXs9tM#me~$ar~XRx-1&NyI+?L~tF{trt4Jpv!o9W-^%h zv1W2!z@Xeu@F4}mk6VruGtGXLk+}>Rwujls)F&(5q(U?08Mwx)M#me~%qQp$mlNB? zb|aHeGv5>ZgIm;r{X#w9^DmChky*&hqnc@EUL!bL!SKa#vY2`3sF>-Fj6^ei$y#7i zp_z6J+~-wugH;m{bY`kDbAU;xnGysq_vkr|{HfCi;9lU-RvuZyCA}2-(I5U zikaC*#LRQZNHp^*S^JbF$G#miPckr713YHZw4=D*^dxF)@(i3%qC#C9%@pdnMDk2hN$iIZ>fi9 z(8^UqGh>;#RWoe@c$nY>kKWVh5l5$)C^MgObeg%AU{eLd)0N#~rWnnf{`OomdB|#Q zQlXiHNXGRrubORIL^K#i(Cua`m|4gqbY$ibTnlyU#U!KO=;$<)&dep6X~#@If5@o&WT^(LWab`$)aThs|) zgL;St109`a7BKUOW}2C41ZOE2t{Mx~fSDnO#LVr;NHo)jtobGtnrX{GU$2@*R?S|5 z&P)|%K4=nZrZ~aNJbDhJ&v0~_+4r^dqJpDyH`_vRCqy^_*a>7sF*EC+n0Xc%iDssd z6)4M}05TbPRRf#=253idzm2Pg$INZae8VKvOf!PrJ$fCZZzt#`fHKTXb#$7^N3f`Z z;c09GF|!k2$(U0LjrHk5|oBEg~8W zCFpju<;*N-5;`)o39f;<_2Olt-{9ypGn|==HPeon`v?wJFua=3SIjiQH-n}I)sd07 zn_W%TFp~<+lx3ivS4|3_a@=;Kv$y;ZY^Ak0KE8c8-j zW?O;T(oYP81TKvQ3NTR7lR2ecqQR@7%<1uv!715}P12Y1Lz&&Nz)A)_hZrXWEOXW% zBFmhQ@i%L-n0z)6lMf;Dua-H(sPLI8_>)6V28L^ZlS8Ui_$NU(YS%Gyq)F(gRVCQi zqhDb3j~$((mi?uetMBMMp7s+w1`!&wYd!PC(DJ=vXa+KD)NGkEkF2xG5;L~UnZ&?+ z4baRe?J_=Z;Hu#==K*GZWD>f}=|XUjN594BM+r(VxXh`=%)XA!WzIzeFIAAsoOR;k z^d9lCdt>OsPNok6iw1Q~B|MZiGjIrcc3>~p@}q%_m$x!_o0-39rcH7a2)^Udb$oFy zF5~5`40K`}Gs$m(TMp_#l4l=rI9@kTW=UY?m9 zUr0YHn}qIRYYBeOE$Y0W;|scsmuKb`W`3iYHZP1M_^g8AGUu3>x&3DunP$jHG}D%> z$tD$=sn0+=uNob1RI`DgJ6s~n>}(Qh=JyTau!Khs)I&5F>*zdOHZrq-qjO|FBDk6b zd)EMyFN2w9abG%)e`F+@$t3FwW%LD7`an;bwKxQu0Ogm<}6MWdCw=(*1g3e4`X7+bIX5iYyG)q z4gkab4<;3w*~Gv>h}s0OOg-THyj?XkGlQAGYNnZaj^G@RKFsJ<9GxT6i=nA!fB^rMnV=x(-#;BIbF zCxCfIzs=EUW->Fs)=ZlKMi3mYV7ScbE@rxYFC)_w8Hr}vkTuDqLNoOkXzNu|*{b=R zpfgj1nVn2R&7A&J92WQJd({K(|8jJi`GT4G9i1cdA;DEF7!!b9p&lY;#_tw0Ly(bZ zCWEXE%JL_GJ`7}PfX7T{t0u`+!`H3|FOcP`znu(J2vPp$zu4bT(S4}0WW<5b?rZ6)*nuMA;wMHBk^XPlj zLo|5W(P?G_Gm{;iBeRI$N*0U>z}`POB4)Xg>M|#ttk0Dt6TpaApf>{< z8sIV0$*RdF2s4q4&XSE~P6K93Ke1CvpiwMPfq{meOi`1`3T28(HkLWZS4#`72xU%Z z#MVCy?13031T1rQAtKA1Z}2xO`Er4q^dNdwW|c{>L8 zYqzo9vI_OxoQ}+#W)eDTHxlgP(Q6w08-i|fD9OyWj?PibMX<1f;al3}#n85GVrcoQ zaMWy>^Eohl9>%1?WzIYXHb9h<1I)ap9-_g`t{N_Lo?_+}&BOu0JGTcC9P82VGCqZ{8 ztzc#$lh8q#LvStBtqnT9I2V`k@>T}v%v_?GHZSxeI7Gp4nbTa%G~FtLQUe)wD4Cgh zWDPf|&`dc7>U-7bc%vE_FK?N1e1+J)-Xzq_Zi2saiQY^Gzx=)0Tn0UNt)2sAexgH!@Y2`JhRtnc@U5^XNHr zK%&76N9QDmclpta3XaahWedTb5aB#vA9Y9*Gqb+o@sEr|GgHV4l;t1$nGC$D0k(F4 zb`;k?T{V0yy^Wb~n1q^XMzFg_uVeJ>1l_S;hMB33PBZxk7F96(tk*~}v-4{)vwGRN zX1)Z5>z^hSj!YHdC{hR6m*FRk~H1iBIztv3aCwvF=A%f3&^m~n7#L+o2t(iH> z(P`!yf=v_*mpN0#Oi`LSwe(ywxyfo}QlXgxNXGR~ubQn|1g?J)bi3JdW)?IFH8Y#w z8mL<@UN-s-j!rYfnYmaq?U=cb;9v#AWloluY4VkfOm$==?q*k$HO!W#GVmot zZKr=jJw$^Rt{R$onwj5drcD4t2tMo4?=gB|N9V}2V&+IkrzD@1f7}RJ`~#xO+wA= zBzTHj)Cpj{dWZ%C9Gzz7G4mJAG&8Rfd|SbAnbSnf4B9AWx*{XdOfRxNFsaZ?l!4w} zHP=}+KNECjDl@acNvN5k1gm@WZ1oTg-gI=D*}F)3aj~OwH`_#T2Shjl*fOV!n0fmP zF*6PsW@gb4UFJ+CYmc&I0+<>Lq%-h}1~>scVAYhgY9bjAN;Z}`9hfcs#KuLYb1cw+ zfmBbXn#p`g5Iu=xRF`ZlbBZv#c_>pX7C4?Ijvj-U7pE0R$ne#aVdm(B^_ebg>8r=o9G%OY)&x5%$Yst{@ll;ViXmuwpREkalF-vs z!b2$!1LeFze$(ya`X@nmC~aRLt*B%YIw)%h?&cPCUeNKyxwwp%w=$T_%&#@m=7kXi z$14~vbLNSeZtG=Gnj$07OdGN$nN(<|9s_N?YIM9&jf|JK%qhamP9~vdPR|#I#XY)? zFX%E}o|!M0ncva5hkZzJ6${4Y5Bn_jdNDH|zu|=IpU6lwlR?%7W%-9o9|kftz{5qy z8`UJaYWP}u12adPgqo>Au$f0MtpgGbmJ@V`OHO7saCDkEG*5bQ5+Tk5w#?ZrW>(<~ zX>t4`Bhk!4vT~SII5JZh$Wl3)8KWL>{nJ%LGil8HSTnJoaAj~i!G}D0OQRnn=*(Qj z%=;akX37(+reL_tIVNUKej;XeesHdt{lIYj)1*Q(Uovn2qIT>rRS&rS>8hcbx0rcU zGtJDi1ZR8np+>*d(K(UzWaev*PBZNYc2zK3<`k+4Gu3FOC^8a9<|4B0HL1`{ZU)MF z)%>bO;QA*)Hvw#WU;0teBy?m}6Wqlu>ICqC(f{MT47Bm8sbtlxC+N%+W@bl|P&23A6Nkk-`X2Rw>z|HJGaHzh z?C2bsMFdx}U`zn^p|yHqX53maGZ-0(X41*}Tv`4E(3^n_4e*%hWYuI7bY>bc^C^>1 zGt~(;_2{LHzRb~SCI>UGb#$6JI9Gac0wGQSw#;7)aLukC~2E%~^uZ%=OG1WfE$p8o?$Wy`<5XIy%iHG4mQn zrvc$}ySYR>(3p7A8PiaT-ee+gL zBxAH>W0}*B+0svJTx1@M1v)TrzbDh!WcCw8Pa+vjBpb_|OPSpxl&KyI6k(vG26%5y zQAFh3oc#EkwOdSnhi{+7^^aM9z06q;9PiJB()N+|4;c6y8{p)Sqhl~l{Y8W3R-rC) zMlo}SCM(`PreuvTXJ32@07G`ERI!Em)g4Zh;E^|(ap+YqD+sts(Y?+e{4DZjF zRJhF9!$6W(%@!>J-+x6=c0n$47Be%iN$4`?ErQEYll9_xqc?DLE^{7Z=0eScCYL!q z2|lPGmpO&5gpVf6WMHZzXnUV6bFL;~n5l&GLRkjtd4&|RLS($WmBF#Mr4`qigdR$} z2%hE^G09;}b$oFyF5~5`4CXWQux8pMHBS|E&OK~1!S5i#dBK)B^~B7~ zkHyS*WF(q-g{-~G@+Y|r2Bv6$t<~{HHKklNd@cPCGpCz`nrTY#c8^|L2P7J7BPcs4 z50?v>*}>6iCNIGh1;cxDT8NqNmhkw0>s&J%f#Le6NrfY`kby5DYWw>e>H*h3T{SfG zG&8@^Oq9rF&Jcpndh~mYUf9uTrWG?sIy%kNC)ilQaGBFx%%srF$v4k6lZ&jDCKa05 zk7Qi`^s4z*i@^0yg6`#K88Zu*gqoQ}a5dDe7ZZ)%$kAzL7&AZ8Ogm=!5*(ypxXc+M zW*RS+k*S7^#NDheSwl@KG;u<}aFQW?m)uwu0d@XPlTBgvkrnKar7WrWaWsm{e#c%0O?gn(M5Zp9wlMm6_S! zB-Bh%g4I2GwtB$zPe-Sjz0;)^7dtw4vrPndK!g*3Epw)enYTX_GvknvXl625dz9r* z0O<_8q5)0-4_GxNT{S#rIy3WilTb5F2zK-6wT!-%pql_nGqb&;(@Y+M5e35!tt}Qa zI~IwVm9L*`<_ln$pG+zonFS1NgsAQG)73*XxYbodGh>+fwPxA`Fqq(YkAAn&3pqMR zrX@2+I6BSLBY2~N;WB57n2FHLiPz3Glas6#CKa05hh*Fj?N#%Q77+~|C+K#wrOeE4 z5;`(739f>=_2MO?U+?HNGnAPhYNj1CeFzRzFkI#w6f-ww$;ebiM&fRE63RpGo5p-sLohG&$n1q`7j^IgdQ73@U)I&6Q(9vn;17;r5Ofxfu;0y)BWlru| zFf(wWnCXIyL^D0fdf%i%Gp!lu2UOm2cj6bwJKR!7WipD$)syc(K0&`p;)8-PWF zk|q_&*c1!QXW$En+D`wvdWZ(MST&K1>5`3Q&Io2pKanjx866AsV_>8w)5TBV{(717GjO~=6H41MXFUUZ zumLmrp%xYmdNAAVg0C_2pk~@k_cXz`Jo+O>zr@kG3*O1hR~((A){0;!1;b^|U1F#b z4HZI$?Si(h zRtANc+0i8QP&zeP+EC1+>-geaT*k{=8Ejx?vZHfQ77<*@f^qr7zLRE*m>KuJm>Ghf6nt zLp*v59gt}78$mb8)nR5oN2i%`1gk0-E_1TP%n5w)ERO$|&o#3T7_NVsRA^=+1N$Lr z$NtCa0oOlWH8k@kGmmJdnHf)TmPdcW=#?Fv$Ns&{oaX2>)0SWt1;b^|dNEU#W>S!m zI5K6(y2qqKGr1VJ$gAe47J=)Z1l>fob)xj6f=TFZwu<0RZc&f@_l@4!(P`#oW^U0; z^nzwG3651TT;}{FX1cs9BhwfeiDp`pHPNI(Gj$n=dev04YCa?A%oJi~s!6DslP`(G zq8|Nc^?>W2j!rY5Gc%u~(@Yk@6)YGNfPG`q88I{V9WgTq8Hr|wleJ!1{shpAfpiV< znCWQMoF(YYT+hr=CZT4k5p3eoOB#Ktqti?hGp})UnmO>I^x`-|oB(W@lTsUIR?HDI zZzCho%zU!4mF1h6%)kN-(9BcXQC$CY)$o{kkeQ23Ld|p~IM}1#YV=#v>|w#g5ff!k(jAMGZADYj?9H*-EC5#nVbxi@v1qZ zMMQ%a3A)|v+vlYpmzac(%u0gaaf>6+#GN+T6xowt= z%#FxMG}DT#mrN=&a}@)vy=p30HJ=i6W(qR1gGs2F6BEQ?ibwxRJw$`ij!rY{nVHwo zIWh|gE@#1*0POp``iq%oW{R1C$VfCZjI7U;zPPcCTdSpAexreOZ5pV3va6`edV2l8NuU z#qU2ML-!s0jv<~uO2!%|(@ns82@X>*+8b4Luqw_Flq$HeXvoaLj?RTe6@s-CT~cc#GhB3Wh6!Q&Pp_ zZ%P~PMn=uYrq1zANatKv98S4CZB_IGsd`xg_eu3-4VtU`5A#i=)> zid|#lt2m73m=Bx`tJp^H7{s{GV=&ff3-J3V@m0)a=6=nziGC8nHxvxd!OKe(53-8R z$nbm7i;NGP46A5Qu%B1OjaJ1zg02_Un0c3@vlr(PET>?2@?1x%I5=H;vH6+!Dt^Rs z{2s28VHFz)1`xAeEY=p__>Zq*Ix}}_ruAYh!HEinXT8m(ihEc^OJw-H=t{<0PKH%9 zCwPZfMO~}nJA$qkWtrK@(b5?{qj%-p1z){At4Pb(OHYN)?d@gG)k12X(xv>{`XlVKGN2&Q^fRI@6+ zBip*2%Dnngnn0swit!tS0Dsk(Zf` z9G$&5I$C=12e*W?82fV5*;2*2Y0`^#k>OXdl#KjNhE>cYxJqT%i zP&2I;eFzS;D(qZpnN)EjtEh?$zl!=~q&pc_QIX(vUKPcxibVunFMfYYY}avg_Tp!P zhq)#6!cKy=NEM5xN-thVhF`@8Wc;ZNm|+!D2xckBUX0Ne;QB&*6=}?T$I)5E?F4&S z74}(_K&q(ADlS5XRuRc;cC((OR3&SGNkuX)iv>~`sOD93R*S&(Z-VGaBqO`Io_72& zO8QbJl-Uyte8RwHZW+D7XXV!;BBvdz@Hgv>RQJ-WNlBO8oFgfz;d4nzNs$pV5s54q zTIZVlDcKb>cht|Hk{a}Y&R>0!z8CeSpL|ITPx7M-^wt2ospE@_NitsEm9fgq>~9i! zDp{0Zb&sB{uJQh#qw~CY??|zJv7>V^HWAzb5tNm1Zu9mO*(Jsy!v@2yy-p@;kFun_ z_Pt2y47{QNnt8ygDe0==wb#zfeBC7U+G`Vn-8_0Nqi-cB`!`RKN;9*)qw^Fg55b6n z;VIJhs4g{FHzM?5*Iu^(iw0#)B^;EG82Ac$_A>CcmLCmdyu3jf&&=(b$s+{k$d3|y z!K3T=VoYVcyvvnsnK{PMIVcSY-lSl{nN|s!`QyoR&EzAijY)-OenB#R-_ol_#~ani zczI@4F|&wC=pObi!B3!Wz0mOmUB=5ZlgZ4bnrTyCe}WGym~g^&(_|T$%aLJwn2pRe zWM!CCXy#%DuJx+X@kTWZ2+A(PmDkBkv3-L{sF@!Kp5YetHEpAMhz5florg;nGk?`g zGxIvZISMA6_dPsG%=AD;qM7^1T4++Cnf46y^QyVgs@X@-nW@IiG?P#>=MlWZqvtaE zOh>1g0~ykbN{-Im>}!I%A;M$dz88t7k8@rYGZT=JXl5!|`<3OdiAOLnO#?jk2Wm$# z-@0lzGF_SZmPx3Y<^=EX=yi?$9YN^@&6H(kCr785`~-_Dm~a-ld!m?Gn|`jD&A{;e z&n6X)%!dqYfvD~DGt>jV|JhYTGvk=KO*2uVe*ZJU=RJBaqo+7J&9q_WXh)}+1_YZa zm~b*%oMwI>ey*9kWJOIXG;;{aI3M(?*{(%k{Y%j8W-FOl*d)}~`Y&S9q zHS;~eKe$Dm0KQNU`2J@{rlTb4y2wv{da~l0^N2i(nL!}oL9i6+`R|I!KgcE>$FA~qtXTKn3 zo* zIy%iH6D+1+!ioN_=f%vLC(bpq2^i)llL|*>5d)hcYCHW~>H**X?5d%evCQ16nKl7D zOmKon?`iaiqjO}U%zVnxY35pjO%)8Uy>h)!jAl+hey*83WVJS_(9A(3%kN2i%|W-if8J7)S39HL;tvZLt)8JQZ$NZifp zku}_;LNnzUsP9!%*s7UF(3v^@nApDFB-G4qg1>W%Ist4@5BUBQN2i$u%sir*W@Z|} zSqg^NUb%W1f@J}|{{$IkW@H0hz4Re#zDY$g?vDl9GSJtnrjb>%mmthUGH#G;oL5$4 zw)7JNA%Q9k>^t#mb~b+B;Zc9-q1Quy1%}T*83Qdqh2If8$vU+KpQ&?v{>e6~FSQ>q z^N?m*3#Jg9phUy|S@3i4#Xq7-m!ycDqg5mx}OL>|C%eE*?SU>m+Bcmz7OTdz|0(Llz_ zJF%V3OzE%I&tvpOg3}ZX&(iBj75!L62V}(Bz$)%0<6S4iDxw5?dsSR#Rs2j)+Q9v> zGBdk7I;$v3@InQ{)9V&eMKDf!@x{aORqVoZd_Sv`VHKYe{0U;#i!5zHG`Ky!imA-p zrkS=&j3)TJg5h~_cd6nIR&g^j{9be><8>#)Dw+`N=2cP4s@O`<^`bO0+dDdYk%wSJ z!SH%$npCl4tn^~#L-AE?#&ewiIT=>*F~M&kX1$oLEr=0wkkd+=z39vnav%Yy*NEYoaIt5JcXPrRcw4l zda)20eiduUDC%Tb#rp);vtXSnrfLhK!A4!iwvl~l3nY3ao*WcXEl zL`EJb!z$h+xJ+eueltN^5Dl)4ui{Z=F3?Qt#k~X{uqy0q>szVfI#y8$8GaR4k@19+ zVHM>GUgK3!#HyH2(DmZPAhBK3(bJ6&;;bd_`~Kxc!uB%1=)*7 zwFUTmQG6A5F>{Kevx?RPJ6jcYJ+z8cQJGZ~Mut`q$*fyP&nQZfb*D*1GA@b*k{BrM zRr8A$5e=Rvh@M0;4%gP}p_|gAFNH#xuVaBl46Njq(HmS3U5toa51ogca)2SXorUF`>8 z(V&K@glE;CFz_Sv>_u;(mLCmdyu6EquQGF=X7U)ondwsmr+aiAUtE@x@$$~4x-;`- zN9Um2O0a{13FlIm&`f@0*r1r1^T@j0q(U=i9uSx3d)4T8qZ%16&&(!fmN5z4!I zhFjDZJsn@rWxPBy&oT3J%|tI~=5c}}6-+oK>o7t_rU5b%%`_+LS(6IQT*1K2UNt)2 zsAd&GcevzXW^0pBGe`PMFA8|{o$4VPWI8$zmo>~xa&($`kKjiv7>A2}cL>kyMn1{o z9~p^e9wBSBvixKJ4hA0801ua_RdbS{GgFV5!%afXR3O;UqZc)LmZQ_m>HEcYZAYh> zp9uZ}5gz+?#giw9A7zS}*N~BD<{h$5Da$wWJOl4)fM(LQqtT$6tA-Ouen8?0A?D!jib}dv&>wtnKl7D zMsS3J31_|S(`96?MMk2To5&h(QlXj48EEcRQ^u-UNzj?e&CFINp=J*Em0slc=-;V_ zXprIPG_#tS*{lth7j!n8OYlPl6HcB-3>P!~kdbKSVX{`4RA{CL1CMBc$4qOh<^(}! z=4xgRGYK_w3Bd*)J;mq?9i3)Q^%2{(9G$z_j|2}vgcCsAIrxXe#LP5gB$}B+)=6dg z6Tk!p-q8Tf4A+igeeJ5@G1Hrw^Grg`bRc-YM{jKO{RG_vP@S1~J37srPq4g#38(Uh zhKiXjz0WnX8yMy%lL|*>Ed$>})Fy!W>H)uh;;NyUSD3k1Gi?GGN$@p~KFH`7IXXwC z8#7;Wbeg%DU^@lFE1p~tl&6_|$VeQS;$(F-snE>ty~Je+ubM!M!0(?Bbi3I`W|lSy z9hr{^e#I^71n{=eqmE89xtQ6~B-G3=J*5}P9({*;!27?BPBW{Rd6u=g3E*9Vixdp6cyfL64 z-{gwtv3tV}Ulh|%BIA~jkvkUHi{xmK!)-V_tzwyAlQtk4JQQwwb7kH30unYtkCVks zk<3MiWJP7^^Ymk4Jk8twE5ZUcl&nvcB^77H0{1d73<0c`%G)v4#tJw^2Ft?9 zt-S*3Nd%vsy^xW+!;O@S1#&V_Mg#2b5jBR-zlR$*TFHE@epGzSz9;PCSBQy^EMU=K z6ZFjeM-su9OkpI;i%gRU%F1A5iWeCnTk0YmyNz4A(YBN$lG!J=rB-B(RF-tCZ7gsV z1J|2eawM}JBC-apDY6-x3gYvxls#~F%m8Fd#Mmc+;q%U+>>m}?*-i!mRw66FNapv5 zi0oGU%^G8c&C4D$e0dK)O zM8wxTEH>vLBPPy|FE1e@pOfL``WXaQs0=N>pw9693d|IXT&X_J%q-0W$dzg@f&&!f zN;N|YXvzX=AQ&s)7ID{r$*ETIO#`W0nP~TT-J%39^mJbm-9rzFu`PExU0#6y3D2X! zl$8MWC!~1^(oy8T* zJOdHVw`y??!G#Kj3)*#3MFzfWHZ|xIR>9r#5i*`PM$F>fL|=^;j}rWkrP{rjJ4F0a ziWdpRX|XIBJ)I0K<|kM|L0UYi&Z5B-g3jWu9@30bj?Pj1l;9R_$y??IQpGfUS#D}D z5*hv|zC_0R#)w%QPIR%Nwu{_HaImG?8v2ZgH=%g-P@ER8B_q?x&|+nRjTEHC5@zuu zg3jWZ?qad7qtoJEf=9U}ShU4&^3|x~<3VEa4P^Kh=acb;F=7^9CAw8nv-lLj36^T& zWkkFO#hZuXwAhJ^*PIM3HYV6rL0Y_02QM0YOVC*?#msh&PK&t-7Ev%<64#I_wht7G zD{hasxCzgr!9no!RZi#8#Y9giY8Kxj_@1TO4@@@_@y98CcPLJa1IbwGWN5KF!G{&3 z#a23axIdAgvsjmz{T-baFD6)B!Em7*l`2l9iN#&r;w>J=bFBZ25gWy=M2p9ZuOhes zn_xHWp3l2Pd=kZnhvKyOEEzkLflkrlV+3DRkQV#s;NgCBZqQk5$IPc4offYn*xahH zn>G7O73Z;vGhO2?79yjvF=7@^APet*#EXAV@Bj$3XyO?no;5)F_i89ki|>+gS{Y{X zWrFh*q{Wfy4A+~vL1(c)GiN$FE#5}(ZmYuHk(wY?)M6E-kl`PTRmo^5V7LI^AyxEc744AWTfCEu$BYrP z*pldoc=37!Z?sf=y`iGM7vN2*7dlQ|msP(S{!P_m>#7l^H4T={J z#c8nu8TUCES}a1aih{IwMx8~2*9kg{KXsO7ly!7k{DRIi>1usGJ?)x z4rX5K=(KpSllVTtEy1F_#n@b`SlLf3&P0Z9@k27U7$atJI? zDBdy@r^T*hyyawQu{psz6r{zvI(X6GJA%$)S!Q-}bXv?$u(*PFzcRLj>MK?3zE3Q! z?PwMwnMcmorQl{@(cloc`tl#i{8}Pd$}VE$H;u@mbV)2Ql>zM+dWkniUq?h%kCX5> zYp87Ser_-onc?a&l4);Sb)&+JP};tRrzr!sYk)1Nt%Hm2KO`tE;1#$FnR&NK=yjgF z1j~E$W7;_U{;{KTowy@aY@hGw?BH60UqXbvu%F20B6Lb$>0l-@bSH^q;=>!fOfa6T zS;o2vi)36Pc!cO`MNu?A6nziDp$gJod#mpcf^LKxFmsTjv%X6SUa4UC#x2*eXYoa^ zslm@3!ul4(>N^SypU*I<@QvGV8Tb{Vu*+)JXp^GBeXbh5ar+K4PiZF1@JAme68ylU zk1+b>j?QK4{mgvZ(YYM{55c<>441=o>Ej{ zS=!n5V!5P(baqfHiU!XSls>bxEzB(9=qzn1!B1F2A(qA!^`zd?;^7EdX|_Khx_UC~NjMV@Oxzh?M;0odXaw{{RHwoQe>Jp54^omCRjG)_J3Nf>pqjP^b*-qM! zL&1cF`G#I%Wtw3nf-@cvqSeSMkvrlYgI zjs$zymg3g-m$Sa}kYV*%vo9vAk4c5iPG;Z|ubLBDL^ODXpqo5)ww1-e%_waW)*G%G?%5A8^COecW_KYt$fMt4^rHlwnOe;3>*zFd5y49p4BwOI#foG1 zh?(ut(2O;EFEAVr#)>ujOQILL4Y1jZ34V>u*xWc%T}6X7%yjiV%gptfX*4y3C4;w`!1sIxDBw`tqJy0ko!_Y ztM4a*uGy8CdAp-?XGkGfTEVbBUU}Jjm(;hRRal?x+1r5O^Kd2=HhVb(+aYR0|E@ME z8l<{vxM#n}%Q+q0*V^`Wt1&3=mL7Da7m7)Wreg50z3)E?vWNb&XE z%FIkhXMJ@EHnJ^QeY_e~nDrgIHLTB?okZ3xCKWb&FOo6;dDVQSMMQ%~3A$!4VP-y) z&}PpdxB}{SV7y@ThK|l=KhDf7&9ptI7r_AvhMx)HMXg3Xq}i7uBeB`F$!ck=ShFu8 zdav66_v~bXmng_)pU|@K`cKg9**kBMMwE1PHv1ETn^^;1Amgr!P3>ry;V=glZZT1F& z0k^1g;$oxU>FC_Er!#Y>X4;-Tmf%DM6E5`KgKwry4O$|@%viHKlQqCtv1T_W`n1~s z_w36FHdm0%E@SnrBujYog`d2H5Q71h-=|HqE`OuJHQLOjqBF%=}U_ZD$xxaEyXs zeY|Sfne{b7Mq;yDkoAH|h0U(bKufQhi>;b<1l^vUpPB7ULig-rO~rQ+kN$&thz6q^ zoqP7D%*^fRZ1#MDOIa}HM7utjB$s%`+$PN)fDAKZ&3=NciN=aGyBE<96t$h9J;8no za$ma9>f1-qHM<%!?{ak3cOJoV3WoLZ`su;{NPU}|g!S2;y$cvVpJq~Fv)3@N8=^M! z^R!9${YO^~_w32c+@qOhW(2`$9(|zE%Q!ll-IbXyIy%iXC)ifOgv+qyXeKW*5}RF& ztQy9OHTz6svDnOQfP3~{f;kjqv%k`^@cK{CHG2s&PjYi^9WaC7`wE8j@k;I!ou%3L zAj9gjW9$ z(b??n1b>DI=R~_^%L~HqcM>xbkzrULmEGWv~<&Ss}GbBSi!oY;@x5Cs!1O*ic*&8~qAZT7&<{QL_Usm8cz zU}L%1SD9#kCw{qrMG00{kd4mPlJNaM1Ys?bS#4i-_U*Nobz7MlENkSpbx5|g5znK+ zepY}1rUrAT$3V9V7p7;D?BYuihkAXm@_GkduT%emnr+1c3= z=*GYh4MclGqJ>HPMo>!5Xo>8saWd6joGLv^2FrHHD@6%d2`XK#N~k@Od4WVw>hIT! zt0v*b&z{x5_YC|2qVM4|EgknihXt$`2FiVxjMLmaFK>Pj3%tOE-PRr8Ej;3 zVZg`U>!c-DhEfOq)W8M?0@j7|KgcZB^6~wDq0ACwXU!EK!m`)j2o)k7KH$cMl>27Z7TC;6m~!2*-%YBKoQ2pFEi zOzpRv9&{6M6v5Yxjw%Kly{x0x>75JT55dfdnhAP20dFDLUO~R;azt8Exs9}>FoM>O zocQ`V8QM}L+-WM2jD4|oBr#ChE94j5KJNb{$ae506RtO1Bkd>?%J97#1Qs!{nOnxB zfOfoPGObJo??uY^GjpqE^7S9!!vrT79qs68bQv$Zg$~aD59ZzkuBzhwA7_<&1-(Zl zrKF@pv$Qm^QbWaZ&k9AL#4Rhkp_vP$l?!$;#T#Y&X1ncHwy9+nh=G=xON(WTTIsoB zrfDu&@OwYcoSC`jTtKSt=l}ZKYr5QLo9B7YGjrz5J!dAJB^N05WP;-oCiWFX{aasD zH(=5G4XUk)RXcKgvs73 z2$Rhhhhp+QXz3>5X^mL2@2p|Q4=^e*d0WJgZeAn3(U?5U(mRB7FeWpZ@rX17OvXzy zO{E!uNd`+kDNP7WdNT29sSZp!NcF8uRG7rG*hyzV7T-WWufJOl|RCeN}K;pxC+J~N&Z z494VE0R{F~k=|%bhO_iyZAN0!n;ALM3^3^^&3wf~g-HvRyjGeJm_#r!QK|!z1EO-g z|Dx0xlWi%4NmHfHn3OSb1B@^x@-gNagvsLr36nXHaAH!-T7;(qlRReJBN&XySm}*h zdZRJv$I=V68Hvex%;+!80F#!|%xWeoOd?tG3TZ-MvcEG$>XPcfTe*5 zBQc3)Mk{Fsm>d$Zq??aRGXj(Cod}aQ(uBaIoQXSNL}K!;RL@cBjL9OF{7OhlOy)81 zK0(yK`mZ2NM)f01`a;5q$q?2eJRO*HWya7TZ`w+4HZakKNkf(%rp-u94tAtSW2G5j z@}r0a&%a7D0+R}sY$8nvOx|YVdUjmo$um;DzfxyR=CkB#At^DL%EXz1sDCeBPnZnq zOPE{$2`45!Sc~vneY&Zp4Rv>A!X_slrTHH*3ZXCfA?e@HU| zlXqD1Zy|}Gc+U0|6IV!ej3;+X^%SMfnB=kK8%mw~&S)myB#8Q##vO#oMSTd9(;?x+ z84!{Z zlLbs%EY*R@bgAA-sWT>HSn_eD&X`=z#7sfdzo8u_OwPKDFmXe|iODIfMR+b8u8F|tSuo)oD{KiD~W*|x965N5M$7wU2EJibCoGuu6u-h}YIXLt%a$|h1NF=Ow zr4d%^&aHV_Wi#m5pH-Wd%PK3Fu>}=ypjga$QN)*SUZFjd%PI?4`U@dVfy!l-$;|Le zGZ=qIOEV3m8L_O=pC#{;Cd9HzXC@Ao>hK~-s#h=(IN@1VVB})SOO-k=s~kFqqWc>x z1i{NHod}Z8dl4kdz;t%0)vORFt?1k3SC~;IEb*e;Bf|Z3v!`@KPh0L_=}&}o@NLjo zX3UUg07tqsbCih`58noLWyw3G2{H7xW?~Pi4mg@fb!xAmWtHRYDDo7g&RyzfChmt3 zz@e8_`Vb~7dlDwkLBfg28?1FqXi+focKIP@yeSxriBCX*-NuCnhRPl38-1G$AnYF!3Cz4opOQV_9XfQs-gjm$M0zR!W^Q*~G+eVT3V} zFAiKyn7q`3Fu4a3PD~zWEy5F8!I;cp#uI|UnA{?uz<4aZ(U@eg^po0*#H2ejhDkHP zq@6VLITIBo21^c?CIluY+ES#aNOfQm5S3&98>P;eY-Y(=rOudq$i#A3W=!N;D-#Hl z2T}=>+ack^6_)%-NMelQJb93bPYI%4R(X&x8Q+~S843v}CZkx3 z@N{6(ml>mjyh)MXe9J@|CM{U{T5U#RQq`IwO^{}Q$vzPa-hYs01SVgw@(@dYE+i!;1x#EZi24QHmkE>XZiGosNH{SW#9D->1CtAwae0t8 zt)w^QOtfJV$F{z%W z)ESdGEV)uhN=zm&@m4|9&nSOLm|WJCFgX_zPE0OhEyB}*$?43vILMn=>CM|rv|;kw znG||AZAN196*CTS&0;FIM#O^spQRar$r6@4A|w$M-v&Lx#J8k6#*?{Hy@OI`Om1e$ zmy|m9ovWERRuJ{F$|l03^jm^{joyM&~~#K*)Zr8+Q~EY%Z~I%9GTOFp2~8IwLtyiySL zvdYhdNz01}lL$ySF=@BL>3z%rbq=KcJXfqO%H<+=OYZjO+ zl4dTGW&|dMEV)rgN=$BHVv$q_CL^VKlu~C*`m*F~rOudiVq$ke)XOTz2$Pr#36uS2 zgkth1Xn6iXn$mN&oy<4^qX3IX{Lh6GIRApBZJ4~n(tiu-;Cbxh%y?It!FV!Xn&~Lb z2uvoi0nImV#YJl3^2))W?D)!0+SIe z`Jyx-FzL_4Yo$6cxlpQq$3%t6=`5M8)EN^O6PpU6URFsWOn&T4n3N}mVzLD^{QfIV zX-wW{##R`Wn7kxn!1))_8;yydrN0!?!I(^8#@*5kFc~Aw#7Hv&lL0Jwzce8*Nnzp@ zQXQC_Db?39QDNd{$;*^FV{*79MOOtDg3!w4B#E@=wm)>Yhrm=LXkPgOV6f>qvGr(l9G;@fF6cpbBc45g`(uBaI6%)Hj zbzss+s=uq$c|7^!Gz!0?QfEwdFmW%8FedV$)&Ydc@=k=wQ;?9DRD2_rRbFK+!qdX! z4U2IfGhP!6#$>jD0>3v&Z#;zsB#oyEu4C!fwV4|&#vo=~FU?-92_KI(^Nq#$kQwDL#u#B)DY|ta2cMg6u9$h-H=SO#A~z5FmT8PQa6HPEzW;tnwC1Rtibv zB`>QiV&aQ}$jd5|2$JdN6C|U-l<6a%RvFIT1nYbH9Bpe?|b!xAmWtHbx@;f03 zIJip{Gw~5Y)XOS`gvs^i5hj;I!ih;XYdtGXX-s-Dl6yTpEe^g`H2~S zbIoEZw^_uJZcdkG1SapZWTZ49Fj>sRkEA*tjJsc|cT?(&$?Yupu8@?N+`z@&Zf#CL|FQ-v%vY;;T{}n9P#uZIn7=GL9vmQR>`xhA=Tl5cRT331QOiT*AZy z2`45kS&Q&=U=qQM#2{}D2q*CSKND@3e9??BIYXO~n5<;Rmt3>JcAvdsy9*UjLA@zyj!U=COw$gUl8@O$`^!5{5gcliKd~L#IP3O>A++^ zgz@`-kT>56C-D0}6K$B3uyjLhMq=_3GfGhl?DH=l4@onr(u}}lCQDWbNr{PY(lV$`aH?ib9(uC+c!6#* zrFwvg3X^kL@_MDtn4H4IR)VOPRsJGO_F+RhP5(wLMpV+V{%Ox_hS z;Qb%zjmG3rmfj_#gE5)SjK`!IU~;oG6EDpOOfp&WX=y@WlE%d0QXQC_FV(+fqQay( zOQtJz#w47Hv4W_VRg6?%vJE@KW|<#*LNWOaG_03PQyP=EnXw*5B__{^7;yfP^hRS+ z$kH2ybTB64nK4(I0VdZ-Ghxz5ax@5=(y| zq=PZZX2xV`2AK4fX7(|Wg5ukt^H_44G$AlaU}9&f4on(I_1Bd;kFon3QTXkYI%Dz; z6Mupc#zgMP*_AL^+L|zV1QJe6o@Xt>(}BqXW-JyA#$>vH0>3v&Z!{(&So%e6Mvk$U zF=M1O157TEX0|a=VR9-H4frJy2J6MbGbYL=;88ZZfF-e!+9AzSc(U^2$=~>#0#H1xNE|g|~iCdakD$R(# zbFd+We~C09F!_#&M_>f*u@|2TDALWzN}VxznsmUMHGG$Szi-c6XC zElmha)-rJyj7UtDN%fIRoiTZeCASGliAfO?9}qE?WGW>m&c`<-rCF)B#kFAX0h}NAx-Igr^UE|8Mg=qKds^ghn7{w z;B&s^O$ff-F%vfG)VB4X**&F zl5?dAv76gxO#B5#xbcD0`%-<3Qs?!MXIXN)kc3TM54oF(4+|o%hb*O7yr)wvSAi+p zofk4d%VC8Vq!s<9>ZQyWAss;+oungQGm+xpcd!#!a;!9gBt{}cGVye&jyMhq_p$z^ z)H#X2X+SVEQ|jEY*D!G-i~t6`kWoUxJdsGj+zAP1F!!+5Hld}i@DR>t#=U~U@1|ZS zy`lCJ%PJhq7V^9l?n9#R>$qSJ0|V98307>Kx3`Xo@gg5cLYgCJJU_ zOA6*aNH~KjWvy6gO1I3*%vg<@I7QGhqP^iw59JL9Gm9nH3Q05yAtYrmiMQvCv@&cRG!$t6migBi)h8wF9{^B=% zIh(a6OH(?S#>{9Zy+O!>S&n*A_lB~D0L2ITLfXzRH<_?WlY=vBRl}ecS7PRn8y<+m^qMe22;#h zUkk0^0U(bV_Xq|L0Ar;$ZsiT9*&vpDP?`|IT*$<$q&k9WE!96|qUtlTEZJA7b1+B3 zDZ(&8)VIg&D3}c`D42I4;S8pPwHiuOx@BHsMk#8NedZw%1J2i0-f%E8S+YV%%0A;| z;+;|*!DLDGznQ3l>B*AQl{yD=4ii%ZQQz05QZQj0%$~5&U=D$X@kyG}!FHvt`<{)+A|42h)fdXG?F;XHEzwu>TPgRiF8(`UvniPnr;EwvmY@*C+;n52gBb zN}YpQ%#uF{NjU)A%f!b7QU5?chJqP)3I%foB%Hxq!&=WvQ#zP5W(=3!Aei%|H(xSQ z1=E}*$4C<*m~bYxlGYwg? zk5cDg{`i|BtOkpNk@s%%DVX)mD42I3;SA#@Qs-dWF|o5C>YJ`b6ijtf3TF3Tp~3tP8pbDS zN(b`|GY-M18~`dr4C&@XU*$MqB9(`plogiFET>CIVVd;fW8# z-O>*y2ou5+p%Y`5#rT97KXT1tAb3Zb8LrK|OVYT_d4#1m3u!X*gvFS}jQN7Ww>fjb z!EH|A&?aGF);28NAQXIu^SAZ-es5OxYpe2y<95urR62(kn@i_bF_B}GKOF!0C&k!D zn~^^p?_|aa7~^(F!+kD7!}CAVjQHVrIZK916XJ*C=a~4RREHM}rTWE6oo|$;v*g=C z5^2LXO4l**Wkd(u!V$Je3(KL5{>oN8Vr}!y$h-t~^d~ zT&T^+MT0HO*vB=CCNB|Tq?Fle38LeEtG$OXX$lD^CTFmgSDMn8L^GpRkT-{f6L|lRi8f5WK1P_Fsm(}C zK4Qi;u32ERM4HKvW&|b=vg8j!QerZjiH}KjU?SQZH%jqJori`@mb_P~GbU+F93+U2 z+qM>s36rX$p_nveEyC04({i!;AcXP$UywIH3McUX9}{hul(BRpZAN19Dl^ug7TIkc zm1cTLGXj$Wmi$ymN=(KxajsMcCf7*yFs06z^k&IBlsaS5fr%FhqT}u_vJqjj_edxv zM?k~-f6|oBlkb^v6h=81c=MTX0`LE^v<;ItS^Bt;Ru^|HHayIXrP2(Bl0s>wy)+{* znZT0EqzPQy(RZ?$I9aO0i@s8Q9~0q)xFbG~C2v&fj7b6$&k{t(P2_K}gvnQbgkrJ_ zG`#;OO=(QlG25X2PeS)Qb5z@h!+{uhZ(hM+}B+aytW&|czv*dHq zguvu7CXSTqz~lm{zKw|rlT%qTL#Z<+QA}(sh>m;F?F|W&)rUhd*$5h*mzSnAChsxh za~PGFJTGEMHwQ^?G$!*{db5xY#$+Nh=1VicWP~&mDa{B>`mp3Z(uBaIBNGQobzpM3 zRA0kHg-J9^_EPGM$$>)@-Jf6~2>rt`-_lmN36nP<;T&T>U@gKES`>^NPo8DQN@0#C zf%l3SaNe5qMq@ISrB?~*U`&QHBTt$ECIh6I-U~q0||ZwM^UsBaDeWA%JhNpN=6+?t+99llxhV@N{5u8#5jd494U}=}lwljmG41 zmVQW^k(hL5#$ag%m?TLv6--o^xL9(iG$AlKbbumlB-Me*PEk45|CKspvW_Jql{#bc zE)ze3WyVAvQNj1={sx4}6i7HRnZ;U!rvsB4m@!*07?TX?&7Vv}FdCDKS$d8(BQZIH z8C|6rU=kFH_-ebw%ge3YSWAY3W zUll~h?f$fA!ekUAoS5XY7UAi@WC%0H2YJ&=db68}HcVQx^v&9g#H1lJlBF47a!ka6 z-#?@ofys{jgvq(mguvu8CjJ5=5|j6(`WU6om^{mp+l8dWD9T$1>f>a-@)ESdvmaGtx5|imnyi*YM^Z2~@a8)E>auFn)m|V(Qgr@_O zbC_{ikT)%)Hy<(4hRKP26nbB6Mq;vy8GmujVs5`l#FB1KlV$`aD_Al@nh=;g&%{+y z9plM;QoXBEXH0Hm$#;aL#N>J=P83AP8pb6NgvnWuaAI;kYZ0CfOip1&#~^Par8loK z(T2&MO2Xs{#De`Vr5SO7vLGjX^eI+jAt#%6z6W@AW5OfG&^EQOrLT7;*C$r% zn`@;Rmo_t&q_LvXm!)TGGXpI~2WDI(7`&p=5ghvA_-uTxSc7Gwj7;orh#uiL0hRyn z1y_U>uFGpxycmx+g@qLyPs;NY9Czn6TyRx*|H5_o`zxc$W5W9TMUqMV4GqbbI zUV;g0d1+St3u01}^II7N%R39(+spuKAW~Js=vR zKlJk}{GW`>89l0OUvrmnV@sE@tCh>Rx}D3|*vVxKywGK=>E<$e^>i5@T-(2p-jT^?+WFxLo z@xZY6A9WdBUT_(&z3wvFe&8}*M47nKWo$#~_@T?V3uVoxE@L=+Z@SB6e2enjPc9?( zXP43W7nkuT%8;Wj<7JoIs6N$g>}=~6ah_uT7an2z9vQ`7qZ`E7$p0r_T<%~q4P)aP zHTD=ynl_7HZ~e>4uFgv~u-u1K8ncJGT&{VfI=gsj7&^E)5q~4OdVGk~o0gDwvCq81 zcWnH$?w(}@GcG}p$#e7Y(W5_Prx`u^;bUsE9$O%s7gu>obUSSCi(9>Ev2&^nux3X3 z%8e{@!B8Lc(+=0I1pO=Iy4n_*!^t7-%kAI$lul@pBS}dB>2?>UFxe#@vp@0?^ z{(tbV9#+C6J{eir0-Wmrjz2Fs!JjyHZ9+4{U)H7?%V5O?y*9Tt&{^UsmNLXqYR36? zcm`nl<(an-u>hmBz1&7tE*|e1jY(MG8AOpqyOs{2Z};=J@IWCxv3V-d*D)Z*=utUk zS-FvwHxn7kZwqqeIPa#jnhASPgXH}SBLdFD2^^_r?Gu>P_|_x~ZH#J_`0l;CjASN3 z#;W56(oAYc)Zw7|)^tz`d$DIR;_#Q1nuds^v1i`XXg@@97?F6x+Qee71rWU3$Px2Y zG7ATrtUX4_5|~4*@0eAE{K<{ZHjh*sgLm*|BzYq|-US|eJdiy`Tj!K{e`d7`Qj5wq zKbC53wc2Y^t!a?jK2}>uYJIcKtG}c%5Qk0!Q}1DBp+2YBXwLUCTrj-5k}W#RdqtqcLTTYBH7fX>5a0rRbFC7xNrt;4(7tNCDFxEX%$g((4JvVCOGAN=A_j`ujn(U4>ZE)_ZmzY;;liUuS7#IGD z@6&Lx!06s2|Fps_c}*9jhj$->B?Mndgs(KBa7+HrDJ7NBElYfB!{?Udr!i_+@BD=> z0+)rS36Mc1?pbKwp+aUjdw#>RQAj4ouD2t7>h;S%IO<}Kv$ArrOv<5tZ` zYG%wSoxZ)bF(k|EO&*w)SYYvQ@~uhm`v`;4be-}P&O$O3Odk-I*Us~tmhP)MIy1(* zh6vH#_4we+`Z_eM01GY8m-P&S${Au_Lkco)P9L>-% zMtK+I01EV@qM~S$5Mlib{+mehdsHL@Px24=(&ax6`@{Wsq&gZG4Z^<164c_EcO{&~ zL}*e!o(Nr!5A5&sJ+}@Pb3Jou@S-8r{R2&_GGKsvf~W8li*W$Lc>Y^3s8^);j*auo zrNIlyX?P#y)cgRsn%ff37r=})6?h55AV%$_;g}!MAo>y;+6e*C*QOEiekLvwMEV8q z+PPcuN76-bI&o!IrrNY8w9{>OPlR8y+IIgS9;P>w+F8KZ( zKG&hhxio!d`X^v~22SzCeFvv!dEWVr zf@ire7cxaLzL_-oegU5UUK(+WAD968A$Rdw{*d2wmue8-wj`E_*8 zsF7s|)w3+_lO7oH$m=TQD`t`VwyL!xnv8l-O@LL=9D4@R^rj(TNUsGKEs86E0 zBpYS;JbW=e?dt3OgOgn;C3B9q0M1jB{MFs!J~6V|76vW^DvW7VF%YyG7#Zt)X{PuEu`KZ6=L4x{GZ#aB^2EQsk}SdxFd z{qd9|m67=FkNYF>5`?_DczAg6^~qriA}(eA)=U1pwWGhH{p!e17BuN@SaX#j;oZ|| zu2LF->55~nlGmu8uPSoN8>2^!Oew+jHR_K97{)g-5wLI|e|aeLrXf!AdvrUe%QnP- zHzp|4rUKWwUAqDqARR0~?cqCi^^{1^0r`^eTH7J_H3|ccNfpuWX0|cX2f2-k=r8cs zGv~zMufzB&>5u4O4N7m<(f(1tRaei>i&=qN!m!b}?dwu~O4ugcWn%WS;7wl9DVi2n zG%Y@*q@urTVKbLe(cc(zJjNJR(Z9nO{+bv@mGy6zhT*QFf6~$c^pQLXAEScZhexkpNjjZ z&LugpIUr@ddGT{J4<20RpTqM_ULnEs-tZnn&g6!rn|HoU0&$+@7iId((~J8g56i&% zDYQJIl0^Sk$88#<`C4~BUJfd3CpP8;XhzIL2vV);1FR(yhzRCyf&?ax7``;Q`T(^J+V z#$nJl7M$T(R_5C=d+UzJ!tu4)USDZUzk(A5Q+KyN8d+6@I|0wK{^6cw9xQ;C?r_Df zg?bX&z{ro9`)giAZ{Sx1p0MGT7Gf0W`!+*a5#l83>|bX*Dt zCwJxXHYoB)v)P|AZj+j|me^BDE}4+r74tH9dP$$;)W8W$sQT+lj#3pV>&ORK8CIVf za;tNzGX=ygEQmLm_u-O6*@)C6XmLj~_esIyGqWELdI1OHKE=YaZT z1Dd^S`=iv@kyS|d4y{*1GacRyXkAHJ5^Lrs_&@WnKDznnrq+X#jn=RpyVg^<6KiIx z5Ds^Pz}NVO{+HN4J=>fyAS^6$2D;q=P?A9y4+4!2WBkW(KKs@q)z$AN(;|}ngYIC5 zWn^cXsp#FlHC_E{0}El*U)|vfx3LJN-IZ?R7)m?zvzxBM+64Zl>r`%7ZjATA;mJmB zn78XS^nw54yalP&pJTn-z=ZX#=C!CCrh^81=mUW$WO(GX@#w_UhhXYV7rY%wxK!j# zrpa0ur96_oOfbTUadiz07cqL(z%Yo>wgyH7F&fzzDeGiy2N7pOxYHUX6-8c?;YA$A zk7bzVh^5+i{{jDxo+TBS^>`Mqjq@xy6d${(bZ6Y0J+xk1xt&W_a_jL`nCJxe~7Rs(VV z)v=|p7xfTbY0`mjoVgjgxQoHx31)tt=kW-^`*4v;BXfKF^Zh zj5)ikI)9})sp;~guwoVIkMdjf-o*7HrevH)hQIw4XtCH&^2w(wxNh3Akkk+JvG7qgJ(%uLUyT{;IE>qiNDmdWTT66E)j`_IoX`ur zBf#T=^08KkHav1&B+$aFB0ilak#XidA(|vF>l=a!MGfOZEWe74 z-(eZm?)C`FL;j29s6W^(vYaj~2l>3GM$03B#tfiA*LOxEBNGvJ@hmAPFr-&jykz(& z_GK3`00Tg#W|_r0pl=ixodV+^et)X5+Ilv^)4=+~J>E zjt@$l+1m1FJuIIwR$#CYE!+`@Vg94s2{zvOcLm7Anv8GZg(YWV*51fuTQ>4n2bJxn zuNBr(OYT7&((@i~}3v8zzQxc2;9V9Fl3SKoi%V zhIIzB;ebk%0CrVpXRTn~)~t7rV{YF`(9&$y>GARxlf! z$tyG~O4ca5YDWBoFDzqZ?5+LdiG z^gq~r5hod0nY`;FM_d0XW+Js*o9jfVmv+|=v%v@@Yt|%LqimD;|GviW_o_fnb=0_8 zv#Kt1v`O=tYNU>YL6)NzQr63UZk4eX#OdRuP(tWHTP&2sZE#tz(Gv! zROX*#N*%4id<#~%<<#tBg{Y#Ij5&$r%e8;mA(rp?FP0CQ3b((ple^vLs2VNTuR$Kw zh8M%IW43j>=H8{aa!S2QFdb0Uui7En@OjFxYUdQq^(d~aw)uakwxKq>taC`T`!J2H zoQDgsxHGGA-iuVkA^rDwXjLWEycfdOurof$lweNL%qzj9p6D+}es;*n%=Rofuxtl%tzbGV zQ|NT#DsJd$$+bejq=!;Yn&H~?MeH;Wy&L@%kfY?1cm2;H6RfsutmVp)G;(&7rY-I3 zC>zDIq{^kU#~dN7{7YYF{j6YEplZN!h11s~Xei6qv05(J*Dl&pnv!t%x>Q*Cm%eWK zN%{H%c3T(N`^!B`j>XYcLEx5cUannTuEfOJM7DX4uu+2tCtsKts_P%646EU+i{_>& zt{l!<)LLzQ63VSj|AH;mYYTiRlE`YKkCd1dc;Hn$8^fKqJ{3N?OCg=7EY*+|JJ8Tp zjf1sZa8_KYEuE_*99i+Wuu?;t)W_FEUBg=JtzH{A7iw3ZRbnzLw(aD2|D^|y3vId& z>8DKBfY#ec3k$6?v|KP+>Dtm?cq~zlF1G$YL0I{hzV6?lTJ~{OLuenWfLLGy*6n%d zs&$GOfkc2u{rBXoRgj^wo3=1wga+&1wV zxiLz@k;$9TtgvE_vEIFIsP?t$oH~5X6}3=IUun5uU%lGWyGp|0Yw~~cwcigakA|um zYRLJIkYpCF=W4lNUq95ATuQ>>>vUnofonal^q+l?Tpo(qbA4|eJ|%@S|A9g`QaFHF zXT6)cf;7(gCqwc>q!EvG&wAH!9cipb=d8EJ1<Yv$Q?BwzsTWXWY}x!^|4{!Up+ zQ4)?ueO_2`G-`b(S#xy_o3Y({tsT3E_VpDl7wqfy?aJ4AO2Xmm(RTl(V}GM-XrpSV zp|jj4YN3w(cN-N`u&ss-Xs7e?hZaIWR=a1^apk=Mq^G z9hHQ`*QbS*f0+YkYF{@bE4)Gn_iozP7qnckul=>JJ|*Gs^$?m4@%&3)Pti4;scNVJ z*J4o%<-qaQDyCpxpTIFp7Or0_35T!!YxI?tizZ-=egj5A@oOZOqp>>YxPN(ES8yvT zpyfN<%i>bqHPf8PhK-@=fiakr;#NrgIsnVG@(3{Z>@+n5uWcgH$RMFv1=5OrN_h(A)EuX%$NyhwRpB}R5U#xwa zs%o-)8gG;Q9*t=sI#5YkKJ~UqHWN1g0Z!pI{pY__IQ^I;qqlsbj-YWGsC{}*Nm@R= zZj*dd+w?0*muO|5P4W*kO=$10tCcUa>HqSD^8QR!ljZ%XHc7vziAI2ixuO=qO`<1uC3^5Z_6WTf`v7A0xoaFCtk7@z0RciDa zYIpi+cL+H~?-J=zrnyN-*5OX$`ni*>-PxMxXv|B%3rsj!uXq6|tr3kOosq{KZ94}t z^}wd#W|dj}oX+guq?+nn!LwynE9ph1xkgKdDakm6;vdKw_BS&yiS7dG8hO@9S!k!4 z0lw@4sSv*FAkmF%Zj5#+# zs~#cm_*`X12VqW!yoPx^<`}9fgTkd^MoYgk&BwLm?o);Pc(_7IZb!CQ;B>10B{o@H zTC>4_vZCc{a%`F`cvbQT+GZ!wBhk?vm`@H4CTSzV4=Ud}jpfPmNvh)+GXxU;=>(R0oF;bk&LlYbW&}kaQ ziv&-)NN$?#r5BmzGidmbNrlawDoE7BooNgwyHiFRqGxBIDywhhfLF(OXkqgZyCP>6 z*}8eA3VRfSNQCjhl7X%#13fiZ+}yYx*SZ(#Oq5ug!t0;G`^nB$+Jl^_2P~7IY+du0 zGb&lcIR}bJMU4Pbk(vMu15aUBhLAs zsyNr2;)rtsc!8U1ooHFj|1c`jU85a2GZKSdoeeeCrgNC;`B+TOba(1mzX0~=03w|M ze8xO(s6Fu2*+6=c8Av>t z$KS$79pxez4wTi>-wm17FWPjD>EXVxiO4mT)iZ38_tZD5RZXdOeO=RO&YGIqBny#c zA=kz_`_C5Eke9H$PgMU|YKWMka+5?)ec2{CNK4*jNhZjiHp?cNB)qq$cAY-;wCV3y zqkKwq`V?=IT&5*|_XzKmPkWIsy1f%|MGe8pmaQK^z8=|{!Z2{}pDn7?y}yNq=sL?C zfT^jBW^8lXpnISo)p z4bUA{RlxvFLJb0-zybVtX=|Qg^{agYhs~qRsm|Kp0xvM4mQJEztVFu#)SpQ@J>d)@ zui>`{3;N|Yoq?oNaI$)^(Lzu?r?B6gI@dPTjc}85#(o8~>d^@0AFK3lD$MCdc$Ilv z)e*O1vbw6q11-PlkMyP3K{4eOCiV zl;0v}G#(Sk`YGs^bq)wG+H?l2RIOELS$D}9;0&AO510rATA$4K0XChzA1Lc>oz~B= zNfzj7)?d*o%D^=zt8W`pOe1i+u5X!TMMd+5O)?4J0-3nb)``{aHl0Lm{c5N6berUA zq(R`kleI*mP3H|*4@^E;ye2H$;|)AU-NJISi7O_te?&b8HIl zXw#!D)A90lGTSCe^S6)-L!C9Xu;~Yu!?g!(1O5@D8Z4abN4__qX)XAzjNtI~mE-~D z@!tdCt6KW6` zn-`J`H#=SM)X#->+J!H|9I?e|7gh+K&4t&Ix|R#O-h&IY8F8JdIYTONThAh$8}^+< zXMj!TcG9_kbnvzC9q9V@Q|?EuihY*5UvS|wpO2JxyD{*O(^t|?o%KGSWq&2k$B zDBq$C$#xsBp+sNpHpZfSjMD8Iw^4|)2_OWzcPIV-`x=+ug5aKU(le9y;|UpIN#c8?12&G`?R_>wsi?~ znn=3^h3xJHL66$s>#v)_!bpb{$VA}8&0%Wub!_7ipu>Fhb{^{~mN@Js5yhcKWzjyk z^D&T^Wku+s2mEpGC0mYx+A(M)g^Xn1~9xGTUA<@ zGBXE$_{9Fa>u7*8Pb0_v1j&D3ZuN}Tv$4VJl+*E-F*_Rj>fY+k&hpN92RrCqfh$Jldi4 z;`ylg812oI?VXXskasdyqlGMt*ChA=bKYf+jjg3CX(xwR0X%w?Jp*~yAl{@mVeWQD zw;JefXLPH9ZU#JJbn%CU?g@czHKF^tzbph@8;#IKyq&Vl-??E8-;CsVWXy5+oas%B zk2A8&1d0@UJ-{>U7T3}r^*ZHXG^Jp=cLMOpGT&q6I9y^9u(4ziJCE59Q;rOy#EjET z=PA5bwGkRPLG2DLal>UyHsMmrT5~oQ_dLt4?1KF=k-eGoqb@*&iT(mk03k9$imaji z{Nf=tzt07|oryr%J;Rk=G{QB^kp{fcHZVvE%l`ugf-~)x8myco(^6fi!pgLuZN$6i zAx89fgdb0XR&H2vzi{k~w5*WQu4FC`cSF;$8U13Wd?YB4_!$!iOmeNP)dR?o3?2>> zcw1TMO<=vdpjUilIG#jnHw5PgFl{W;B9PUf`L~&uGIcPheiOu0K-QD;2Kag%r)3zQM(7%fksRRdsy)49T*J7R7qn`f7#o!xL*X?%Dj_WGXnS`z;w~V zUohR?`b7pUSQuqK`dD33Kj z@t9XM9alX2U<3cvc_0~NlX_+1+)1#g&I73!%X`U1k#XC3Aa2jRmob0Fwt5E^(|I7+ zuKE_fQHcxOho)mfV`QzhL3A0Xa(v}$+9pHne;eo&Iny>u5)3J1V(Xqd@(gb#fd!;~@h_ zlw*D#2>82I-A$Zm@`Q|5*zGXhGw(uN4-sFQjPDD+M~Dk7;&>3S)BksbK;Lp zQ~98do&)k);Gm8^bel>obg6JqHNY0_(v55cfS)0Zkc?%2Fj%FPEs^)WpFEdiVDrrD zfpn=(e)2&H&HJc9A&f&gZi8X<9Y$|*eDC%rN2VO17psEh10q(zhyFFe_VN8O9sL{V z&U_HaTupnNX*ghIGuMkz$4be~!2lp5!}WJ7*wL(tDw=bZ*l*WTy(>5bs5Uqn%k(RvjniSc%#hH0*;Y>VUgLp=0&buUQ6qqcGdF^valqcKq`g=~-dH z*VDA#(^9Xk*7GwBJ~jnS>7ki+0d}jt8>QZUWVcmchSXc2>%)HC1D?Ts6v`A7Kgts* zAETU!F|04jOqBOfBA>;53raFd0N=advk!_FWje|tlqD!Xp&Ujbe$;brqbUl>pM}p$ zP==w5MIk#c;FEMF;q%4k-NrJMa+IAY2T{m&ICQJPe-DM^8-soZ%K0cgQLaQ8g)$L^ zu0Qf#i9XF+X3*uumCj$DJH&0QryeA3J5MAH;Y-sYyBIpu)SjHg7K^bzkhZL1ZV~C4*#bbJ| zR}^(Y(Xb1mELA;c&vAq!;-Seq&PSPa2VHk>M^wD;ew^pyW7v9s`YGAird~yGrikD) z(2n{mW;R45;=~UZ{iP25_+pkbj=|Q+C>2?L=RFmN!sn(u=6~yF#8pcm`o3zk41~{j zvHV|pJNf?sq9E+!{vq~>W>O74^C>M^w%LuE!!pl*67U^92m2ew{x*ca@YeIuvBHw+ z&9YM!(yav238{7AJb^OJmM{MsoH_Yr{#$sb-9~s1LKNix@)G~CY$VF9*2yr@>^?Fp z(X613^8byKj4vNz7oTB1J)InU1#Z#}cijK>hCA8ZL78WTX>i}|fAYSo=FRq-ZIN4{ z9XI%VzLIGa-lqtXgQH`4Y5;!_d1X6Q@A4XE^eU+xlYQkC1aOg4w-nkl`-l>p-%0G> zxxzosvSY~Y8vJ_|`6E#iBi{cqgW130%LdaJAMY*0$m5-qkAookpdaz5Cr%q#gGj}? z#`t6BdzR0e0-2%)zB|%ztN$XQa1RYr#gF8Hj*ZxDr;Xf3*>3R*z~|FO_`CD8k*k6v z9~6>j3eDitM$S=EXl6QXj^{?AGS4SYLW`Hr`o zip{5U%8ryJ<+*&vTjl-kD~|88T3)&{f)5jj zD{>XbJ%nbUli7^e&vJ^alQwwi%D|T6W~^^VE8nqGJ@cAko{C$Mg10c^!Ysv0jhkg& zmVbn{`=F{ zVKdu=%H7GgJIdZyZ0~Q{b%5n#v#GowgLe z2ccY#ayQDmDBq!kL+2cnAt-VXp#C2h)c>bURek>^Ti?&xycesJ)cuRQh>l){&aVc7 z*h8^v>9@pqGj|eSzkOA)(^}<*gOp_MLE!$hD9`e!Npzh`Fdt?aV-#<}o>N9NS?JZF z1oIQV;f%+zM;M>8{0B?-H}LIl<*RDxxs!ekIZ{gFQ&p?HS-$dU-+pU++MO*1r~}2R z!oyNJ84yF&P8zCkBEo@Sr-nM63KvfKs6n%mjCmdI! zY^eX#AIbkX`0puv5Z`PuP=h3H-b=?!8QFw)-O+T+4bL#=P1g8kn%&?dQ77A|t=aP= zitbe6W+?7-?4=xS#Wgag*2p^ZvPQg$Ok|L6UmOnpaAnbfJXqVL{{e?&Gah&6IIMAv zfitY~Z{e#9ElDT(PQ*@2F;1LE5lIDhtL&b!h6@*FH{##xuS+WgA{?#jRGYWA;_t?FHD-*EaDYZF=}g!q-f z1gv)mpZw)G`Aqo4*TXpfQI*hBqPgdbE9~wJ|LT<(io!OV{;!JT(%NtKe_pyPqBIbN z9FEo2Gc@M38A1i zs?8sM$5#4tE45EM>Jo|Mn4yyR9%Y@9IL&eazhEhe2{wr{lthA(7-W<9YrN>-BkgG< zhfA@(k749i%#(09C5>;1W{fhy<6FGBOjxn{l2XPW)_mym(m;&AV$RO6umj<3us#qL zyS6l7%sEcSEPCdBPX8}_7XSAJ;^rJ*h=lOW>xR7WESuw6$;WYi_klHg`UT7^Ga7+W zzmo;Cr#o#IW$*7S_dOhwGV}?#;WoM4mRaVCP@xNLLSFhUCd*tHD%Z#^H;?7Kp>n_2 zua5;Rmli6w$|jdPJIibtD)gvL$oo1QJCqv|=_H$6Zr?0(W2n&OcAZEZr{ zZ@9)ep>kE~@;)b!h~}JgoFetiyO|nz*_<_^Ef?W~XSm<8oo=CamMJ?VmwOt{V-FP? zVxPmj!^ZaA6avFJHo4qevdoH5p(vY>x0sDR9V#cDPf`<|yI5{osN67{z1-ee=Fm`~ z*|spf`~a1CZm8T#He=rRxzdPGx!FpN$Jiyy-NqLvf1#YY0?(?TOh$POqjZtx zI8WUmFOs%7PK8bAHcP06IZk6{#npZNRQ@f*uR6-VTIM)|ZBdnHU3q!mzY7&HGYKUVemD6zD=ULlF--qaW;tr zAu+n9YkXb)rUm)ynR_=z9h#RU@QJWAFX`m_J9Zi-u{5v&|hg z>iwYN_cBas;z{58ZqxBrBHo`o=6r12FXP>Ycq!BbS`BR{Zb=0CSZ`APc+ppXzgEP1 zg5u@JVMV-n9!kkkpZ?U6lUDvjo5;4u`Qn0`8;5=!aoU8 zBINaS#G&I+o|6%O)Hw^FEL7+2+%z53@$K_X(aa+6M-&>W~=B$CG&+ zDOp}>8tLX!cu5e3@pKT*SdPqp8eVjuKEwAB9MF2o3tk!^P-b!UbQ6)RKzY?BF+xdb z*yh_Lx+@935#d_K*(4H_M1rcNk4@spXm0x)>mot1S-AT4#Y;SsC{*j7m*Gsp%MuXf zeie=x9>GC)L&0*Ra0jroQx0?fD2cvGLc>D& zqiwWS5*ilDA1x8CBs47N*(81(#jwB+zK>^UV{H;9IuHcB+lc3S;v1Sdib_qWdYr_54ob$PaRu79*v_ad4|#BNRW zGKB*)(L06KfL75O5PXd{D|5ItYMS1Oh;#Wc3>k5Vzf+aJH`)BnkcpUBQzCwYL=0N+ zJOb2h{=UFo;rUKl-i$Z<2`8=VSPjdFpYY)laFYDX_8%|(t$}Y(D_=F%Kk1PSv3^*M z^+UQb^HoIqezVp<_rOoOz8SQ1c&u1CyC-NN^qbgY1l)zZdXa1nRL;@_>%?%ne8bBw zW+xkC7Z`N)#>I-4-JT@KGtE1g9A_;Yj;WI%ZtyPS-e`H@v(QPpZ0tANH`F1U|h3U2|}j1-$GOR1z}0o7`;-b1h-ACJ{PK zG2A54^IT!0bH)|9mWE$bhWZntrVK%ikvOm!&k^G^Y&yAFSi@H`NTeA0gV{qeypJ}9 z-A>|mQrX4bsAeL_=>z{ocoTpf|JlSlA5>J; z)qIPQbevc0+m|Sw=XU#L%;R+X6bGwk!T_y=`kH&OJ{G zltQeZJQN!T!_u4yhhhU0no2Bar4Ag&vD^5SFs45pH*SL0seM)CQ93-^v#cK)wXv@p zFB1^R%RhmQc$~PM#s%TMQwydZZ@&?-5S-x9Dn8ukr<}m7hfm;k zI)L-o6ZkxK255KtDriOGSaUwB9-9{9$X#FtRZK^5k6*Xz8_08K&%Y8>lZvFDkrgtG*2`7VKVYDw$q5juNMAepd^6s1_*GZ zkAE8$2H(o&+1>~wCl@-?b4*62xenRJ58ly)GZDX8RjhLxjXuRQbSO)bU@jT zQm_22MjGh+-Lz5WZ$F*CN%`@3?Z&?r@6mMKx|a?g4txl$4^YCk?((l4v~^byioX#`B#T&e(uyw0|#r=*&&ZkH)(}m2up+sA~<_x`Q$n>FWQK+82$!)l|8HrsiBp z{+G!LZxTeF(h?cGc}wIe$Uq&8OsCI1hv2rzjmZxQ!1`?nH8ZjnC=$q+uF8%9W9c z|(^_Lno_ zxU;0sQnLYC2)B>D1SYsZU+3H1%U}N3P!PWzg1HzP5;F#FEBu}4+tCX==(aOgf(;g^ zK}}hE6@Q5~!npgSNZB&W9yub5NR-WdcWsZBgfZdiK(}(}!B|H;kexv{%MCIq1Q_UK zJ|?TkdlKh@A&{)fH_0+|IU8*Wb~mqW*ej`3_$ofEsGh>&yeqpfx!1OuL)>lFZycgCaB4)VCd+;H zh6)SJ8p6*&W4ug7U*R}^zY^aUx<7Xt7ol8-vJhnz%3hQfoA7Ku%4C$MPB{E5;RmzXIiPou0y z*-oD!*Q58u_2}L0bzF~fYZ$8~)D&BSSOfbrG_aRrshYt@ zB4c=uEk`3{n~Mg7wt#=_oSifYz!HDyPE5Ue#jec_qs6s$=FJZAllbt4=*7tI0=_1( zIJ$>_E&Zixp)azgA%RBZEzO}qGP}8+L=BUcJ%y|PvZBkD;bkDc%-He(amaR15n0@z zmWfh`;LQ38F0LUhDVzQD+zc+bT zogzyr`jmEomVE6@zPbWV(Xi64!RwDzX`L4I5ARQ}#FaxMH7P$ye9u3Oml6}Z5 zQ#cb=9Pis$KBMo|z?rXo4vM6_PieOrgb{iwtb}lUB~id7idO%y@>Kd;aIoI_SQQwv}Z>v5mvT%*UXGTJ7O&bzYRx4t}E)Z zF*Qgur*e?_GHti`KWNd>XgB(GW>gMZZaFqRwOUT*E`8FX7eAaupIPQiYNNY2r}%Zo z$Wi%GuKW6@j(ylq9%J9(*A3>>X-wlOQ8Ioe> ziX8RsUZ=7w^Zv^$7xPwRnVI-Pk77j4Ju*F>Uc8!Rz4aFSf@f9e3!Vw)%D(J=yv==2 z;k6i9YYf2!kMc@WrO)WhAz`wF<#UkB+$>@-+`#hW+n1tZ{Ai603Jy#W8x-JGi{h83 z#p7p*Z1Xz!m79}k=Cq+f0{1U@jc6@C%Ur-KL~-;IODw-w-hM#d=>zgSnOWvl{O~q! zfmK#8D=OmF4%xX8mGSs9{MIwGbHgj$_%rO*<~RmC%WT@4@N?m2jMlFxd{}IRmt3$W z@A1sL3z1BHT4k2k4ZtbzBpIVZ5Ej8G#Jwd0#%8 z6rQ1!v6788!GJ!A;!m-^Aa*YO?@Ze1*Fm5tfzrZRW;)}dDnT@Wdlnah7 z|I6BqMpVAy?Z9gO5VfajSBnhXf;{wgVa*{*U~$4G~W0i3~1wue;)ssJ5A z5-W|Vl%>q>Fsrlbh|ygfof&6V5MRJ_f_VYN@G@f*R--b`jm2mfF=-&Cp6|KUoiysq z@ArH@&;I`TJ^7@%s?L2s_uPBWJ@=ebpo^*nJoO(ytU+6^NRZn&q?I6!-tQCS4i5Pp zL9D%gg4_j2ZzmX?sx>@p2NC-v$l}uu$E||pfE&4&v3n7X_pt<+Ovd;SPDaCSM;NV^ zpwe-2PZFrqz9}0NItU5{nFm+_(&*pgS>7;`8t3Bi>ml6!g<6W;UP-xem-Jn^gaNlsj7d(3Fp$AQ#VquQGvu!~K*4u3yJUnsp`Pp1=zY!8!Z$U~x3|jg+kQc4* z)A@wv)ZT+Yauk0W2k}l!k9FeR!1at6uD2lh@IW(hUXC=pA68S=Cd|iS=!e^Q{2823 zc*l_A#Au{UvwYt5Hk$QNy^MLWt{=z!J-Eo3#wYe$#TB=Tskcf}Sui2M27sAwRN}m0V9CM(XW&69A}?*Q#Xa0qoSxQr0$4;vm(?VR#C(M}NVL#oA+1JbuV~pERKa05Be)>6j_}{jlmLs;9z48-a zONV(`qG_UMPqTdf{teS~JI)t#U2nY%Z3b~nz8tZxKdU<4&MH6>)$!I|fLl%b7T12F z2p(T`eDxyoRNsm1@5at`OOxu4iQ^hetaJTVxC{&1V3c2fOgimp))OW-%Nc<$dIlI zx>n>StiBt4pAulpk)#~CJ+CLRyHTQ&&geUgO1j#2rJ_V1S%*l)pG!|76vY9vWx1{9Xctkj8-H+z}}(e0&P>! z%#DYj(m?O*2tHpJD|ZD7X8Qmb!BGp|fFu25_S_}#Uzu>VRX<2bKa*Kn<~gZVpAIH@ znw5&4@-JV=^|_=pb|2!kTkshv3wz-ztUx&WFIIg{RO56&@iE2MqxszaQnXU@^|bp= zqggfncHf!#l)^rP)+{$_Q+#@Jl3Lu;aG6f=pd|e`GAl1fx35}39UmQkuCh?4xs42z zs_$VEN*!q9ya#k_q|xDCn-B^iWBuyJM7hn{kR-QTb-u`D^R@#i;`=U=A&!i|!UfI-pF{)9WBb z1My7M$KVNIJUKkac&?axkgp5)N4uyDsUM9HLYruskJj3oLgFRK_GN4DqiL6TQ$2jR zjL+Jb#mK*+OAV_>?QPznjZSYwc(4kM6<(XZ7Bp?$ko4uVy%Pb5nrJhX?saGz9ZHf` z;{eFMVJudNF>;T`l4dgx5Z`3-a&rTq)eQe;_{}qxV-6gIB%yOKyFJ0wT*+S@&A~w1HszY=B73XI?Q8G@$NRU6a%jcczule#hBWnkR zZdcpMVLj|E9qV0?3fMcqRA-z;vMeo~!f}5BuiLN1&vIN_a7{bfnAniiSZHl1vNjgN zO4rsm9rtwHQ*lql-HkhiyKsjd$|FwPowz%2cU;;x zCXQZ4esAD9jH`#|9qpS!J1`K}7yKQqjq3-r4RK7^rG01WkCv%zI35Da8#)G(P>;$V z)r3Dw67Pw!Z`(1p6KSN=921}kX=M(^WD>5WG&suus#yileUm3^=T zpK9kRMm0`on8;JWoIXC!h0>Shy%!*jp>;U+hi!%ns|nSxM%aVZZ+HPo0oX;_%+4== zrB_hAx(l@^Y)jnQab_l@66$&Rf@A&l@XL()y0VYP7F($whe16~dzfx++zw)zqIM>J z`2vbdHsgJU6%bvFRTsYwVh$OtP@F?dU}xu8EGIv%Xx%#@D;eBwEtV|X%w%`d7Cf{k z)Z_PG@!FoyfTzFkr-$*h{vQ!#JCE`+{NBjZK8B|kc$6pbG@d^_ji(_zjj-JRcYPgR ztN#DHzE++8d+RHOe&_nSCBoueWlzn{|?8E&eh*o zUwIw==K31VV->Qx4c7u(^Ki|=buF$eTp749C;uNlDHm@6J^|#n1ra6+Q8(0YQ2Jy4$J=KxywMoZh z-LWxMdkLG;A-Tmd&^4{?`C9ymMSNH2fxO`s#ZHU zZ4&k@bEz%ikDa5tDusLB|EsX>q(zu;@>N=Sr@e#y|M8ye^PXearwEn87so0E@hGlC zmBPCRr*%l_xOYzL5cY-b?byoux{yF_ck5lH2zYN=_tB3$&Dr5;9gC(NS`mx1pk-BO zD;0~CKNkjv?NpSQChQTs2ZaU5M$9ZSfs&3YwIDz2_^a=CB(qE9lUMRmA z)7nv|X4J+URqAjkk8|oFox0aCDp6K8S*VHhAe$i=AzSE=#CWsw`t~-3;VKl&wy+Y^^du zU0S6qad^|_6+?hDcBf?tgi%H7q$HN=!G1qcEp)8%CY)G|z%+G<(+n?I2bs{u8xUUT z08lMhx639?kUMNj!8(*<)ss~#k`_29ylrJgivys`;3oykM2gMI`}*WAYTWZaZ)cCuPg!9uh%Lt*t(1QR}$*#2#0= zn56ppQr-8&fD?#eMSKfVKAS2Tb)bc{B2#skWjH;fPc5IbmuBUlEU^Ac)t< zB+cgx+8TU=4HaLv`GzdzdK$YN3J#FCwOg)19t{mU!zNLHq3`TJc~> zneoytM4-B}$8ee?^RF|c&GDTR>VHBlHS5REJRzNx##0rAJ-&!IJz7mY10)pz=sc(8 zOPkPQN}ULWc@JX%Sfp|2P_5uicx~Kdm&2KAxS|yTNOTH)xUw9=JNjM#X2Kdu+39W;{phz9Iv@6nS-KRenzx>Gs_H&%TEYhcRKf!L`7pW2VE zALGTy-{f4M1T!($M(YyKF?8&T7WeQca!?BPMp=%s(!lUAB~gv=5Divg&sg<+YT9w3 zz8CNDgO$i!JuAOs2zxFAHRCK~%0QS#mGn6_8DlX%Ft*=)AL7M?Hwj!RY<>TAno_29 z*In$xebdz#-;jF5k8k}T4Pgu%WOqz> zo%Vh-v+IyqaQ^V@JYM+_vgJ^;OV7|th zIf!5E$Oq z_+u+%hRakkc}s0fLh#%caU#T)DO`w&mt@itL+c@DH$iPuueQ}_yxUfPTAV>3+uQ@v z{>B13Ql&Rm8aQs4dqDh9e}N;dv88tx78b3@APlPQ^c&mi@jGaZzT4!6ms6}u(kP<6 zfsa6tr2`*vDtCf{Q|(0W&Vi5Y$Kz?0GN?VDzag#meCCEk;klOMJ*{e&r@8i{%a4DI zPV*dV<>3>|@LzWwztD_hXwGnnJ$JOW{I!|8;TeeT6PS2fCEX=dvXh&cj@>&|2W$-#;2SQ9_v zNC?`>-~NZDj^#K+OF2NlY0dOI7^j-Wm z^I6=Ucr>9KH`QlpI?V5mrY_typR?%{y@yWW(7!z)9nV&*20JOR=>(3f+7mkPOBHTU zIE*LEB{(|jjy?q)%YW2tX|FwncN;>?ca;mAmD>^`cn|kj)Rc1&K+}8G9S@9mv%{Dk zqbD~(Cuu)eqG-r4(QxP)6AhSN|G!4VX`tbMukom)(m?Gu+tx?gx6#81(06naUcZV9 zL)0!Z^~F>+!Xtqa9h~m~(=+WL_@txe#CORoN|PRON>1X8THN|6;f=7pg=Sz+=Ulho z3uCn4P~2IncB24f>5TG&X^qL4Fyh6RdUvSiXN+#4&WFD(~36VfE&{JgL7NO z8!(4_Hun?pYRoMkL;D$^G=Sa(C=sA{^q;O=#iMKUhmaUazcz1!bmQ|$+!rJSsa=!xdf@UDt19()iN)NE#fuwK{hZCw^ zGEHR-H=6H^J~gTLvcVz@)wFaB-lmL=7`odEEtaM;G>8vie1@9uPhy9VM1P09HlN-% zXR@9J{9$6y!lpx4jL!K+8wm_)dV5ecb~7UCoq*o6lYpV-tq(42q9;1#l=HGy5buz{ z=%9qXh+3$U8>e{!OF}xf5{0p`fVTVSTRsMJBFo+e{Fu?k$ax*Nl0|$1^B`)gz7EIJ z(n@1CLD>m_s6H#I-arSX5PLqxS z8SzG8Y%D4j1^;w{-+<~^$lPoW{0*=SL?d?p+IcVyoQj;`;|i z=cJfzN)#u{L<#6s2J*8gIdp>iNGwg2nZ`+J5Zlx2b-iWrVdHQ$R1#*cKdTk|MUIT! zln_i=UeT)6EkLHhG3DNl$<5Qka`V^!c#bU}==b_u8w9o;Nn)qR zdWrB`en4$4^mNiudRidpjGe*r!|@Jmcdn81=w#d?;^Z5RPC@<^?tKn98I=yim8nYvKG}2}u7V;2)NTZ9WIRPYAfvJL59y^I9 zipO6{pkCmwS`FF)-qUi-gcGYa17JvlM%pYBlEAZyE>6@xq&`zlz_e=-1Ns}rY&Yf! zvut&?8-p3=|NUSAp$eX2+`#_%LnOetuJpMX3f<;A0a9nX5Y=;1nLx!_2!){%0PIX1hVC_?1lUQt%swHUvt)%dnZN9RX(38zqtx4Z zyx9~5>L#Et_OQ)lbw0wq+|-w#9pT|1JceM#xCwJEYRL&d&ktl<#LKGa&Yq!}33W>5 z$R$uYyZ;L4T)2Oi?5-L_)!ExbDm0zlQgwDI-c?NO?T)Mn!m7?ugpG(G=Q+_r#1ki1 z(Gyz~%@__)f;Q@Gf<{S#^nj^X|C18Znt_j~nRucP`*dI|Vlu>_3b6Z8R)A^z_b&eX zFN-Nn`pVv^7-}@Qe3%iSK;k)>-6l8NaXOMKxymLR1Mg0YY;>!MXq(OG0wvty{cwFB z%pitG6yyRpLXzC-7|e4v<0#^ICD-~hpnUK{zrF>nriU>;{*Y1SrwR0#0!-*m_9%s# z3&|j8+bYy|qAl$Sw5kBtmPViZnM@*$YL`U_JpqVGu~ln{0HbH zQ9JRD)?cL7QwOdDpGM)L6VN|#jIp=!F~bJ!?8;jSMG>o*hL<#Y0kSWb^KuA9(uAr( z!0=`@#*W>_^mlL|N*$?gl3u!+11WZxWmiE{Z_tcLrkh5)PgBKMyV)SUEO^dH%SX+LGZ7cbLx#A(b5cFj=vdlN zpF#!$mAM&9#Zd)y=g*;GJ;g&cr3R9y6HAqw}iu_&TE_J-MR0kT^G#jsVLw^uUf zf17N0Dg80B1~`l%i&YjhJj|xh>WOul_barSI+qLgukCa%lmhAvYRoR~&+nLb2K zz<}$UG4iR~F~BbaeWVq*MB%6oxBx)-17e@SgSn@M^BrDYKf5Ex96#g1*9p{Sv&sK{Ex_n^ z&)cim*pp_r^Xb{yv<6HE?_FayJq|Mr_6noNWV%L(0hVc&!=7e8{jpAzP8=Nev-?+#gKIMFc;iJ<6K?hQX~E02c?N@H9U2+ zTSgJFEpcUnI(jFgNBhJ1VhV%h2*R#JnEr98=VX7_DvnoHI{L$Iv05#3${!6Wi3+kR zgx1X$2q5Whq3e0_gDzdeA~g7#GrfJHi=E-m`QAQhbQ${)$f#6uY$fBpfbGUkK@NsB`PYlH1GJpgBC%ZoNqE%tX0*m5lR+$+9sdPEgz7 zY(J8K@8H1J)b0vx2S)XTSKSa*_ZK)a#nhm!w6u)5N+A+#0YS?V1A^5h&PJcJ!KzOH zXXaOY(ByOa;oUP<9h{!kbfy0!B_W;!9_smWXpCSs_t=r&+$A7v)& zf1}+Qf`wSS!rxVn!Egklkb>n%lJL+bgpWr~F|vx~VhbBfn3gi#sZ%<2lEDTpK=a+=-MEATf*^zEXP!mN5<-Jx0cX3c@2X0w`RL4vZ53NUfdyM|{=N#nBRIo-r_S|s{vFI0(h%p7VjtQAvhFKiM* z*CMp;kOjZJNx)_8oVAiG9{@K1{g_zfL+;+s`8~7u9PVcNM$5b#qzrW*(MWBLbrxv9 z9m?TI2h8;DN4$BSxbW}A!ap%;(6*;2e2q~5t0Y+H{*zGu4BhV&>VH9Z2`aR7-z?NO z(tWv5{~+#!7@`6q7jvL!q3wA03wv7p?2}9k2h20#9IPz_HTZgA?B0tP_A1h=8r2nR z4jgYagb1u~2-$9|LHKv2s{WOzeC6aTzkI>*E`X1{tF^eUMsu_BoGCQsgGy3j5Gu_ZbTxu9Mt#(P)#|^UWdj8SOtkV zdVd6n99btk`UwgW>gjVHa^xQ2;c^cDq43D9MEQ|_64%I)d&TK;@x=yF@P@MuA#0$nuuZnl04ZKo2UXO!{h5LZy0!J^ z$DpyBRxwOx#%?MNqmMGucnX+O%qC?uljnid>Y{ugqwddWmPNcm-be8*;7QHF45=Wg zuEpY(m)Gv2rR9fG23W1QNTlf&7wM*%LTCoM3O#}L6-GzL1VGHDP)`Mu^bDD85b8e! zM+bVX|2g$7gNpi(kS2JwiF9DuvfNy>>rV>G7Q}Isw1rNf5;+Z_Kj79LqLl?5Zs}b~ zIkY=G{U%nZYsdDt=8K8Kn?-~A7@~dMNq}S^>~S3UT+kELrGx4a!7{uSaGJ@7a_HFo37%u~^;G$jfxRQt=Iz>QQ@hmg=yoYl*-iD* zpkLXt*yNl;%{7S>Cg$N(1!3f6t%TW&L22O}<;i-06wWl`BcRe8hgvuUHz>#ajY4J8I@Xz*dR?> zL$mi$%Tx}{j`X*>q^Xz8Sx+5nSA2QWnf%ub^b&DEHBXf=sD!u^NK)`_&^0O=JqM;K0}B4<>59bBabtM*dIp z{Hq`W;B%gIb1nyoH%6-yUk=h4yUhe5ic@(Pp;{`>47~Ku^%+q0FxfDD+IKZ5z~=Ib zA`LYl)VX;Y+9Q4&HGc*oQ6n>&%9%NIYMP(r95M_~bGbP^#DZxBZFGNw+Y6;*=b(mb z_dS777|E{ywQG;znX6mSZiqRKAO?=2*oW{G7Tv>6CZ~JYpSE8}Pj=fTCcAIIrQmuO z*J)h;2>80olHIg}TITAX@4~o}AAot)b;e@H<}s?RvDn$*G~hiqtP9}CZx{AMeZzn^ zwsMFSum#0FPIej)n)uv?d9Fv>qEP?Vsbyf)iC3< z)$lpxIoM^(H(eu*_m>54W;2JwU>11@vRWxY^`$F`vAu>oophQkbt+{p)pr`YB#Q4`-PwP-_nt+n z@67Cb#e3E&JUR}^eQb8oJ@-nZI|kkhS|@cb9|{L8<-#-ZITW8!iX-q7;yXRP?`A{D zYfiLU#A$h}6KxhTBX1qu((?R?c8fR$K-`i5v|}k!^Tx z)u~CRm!qsi_`BluQIxtah@(A$T2C{u z*%}J6IK}VryIZ_QKH@Z^V5N>m*_A1dmycwcV3M|&DOat;M^uS2F{+@w{zbD6^^$v` zg0&jg-MH2@TS`mR_waEFsGDq7-4m91Nh*47jU5T24O!^^EK|cNX>1Q?l ztkaZQly$4r1?8|Cq41(RM1fmdA{72}+_%C=>SIC7K+tD45n3i&0d*|qrcO7j>droXyLudo<{Z9Qx)9b!QXro@dsl+8Fjz>gl<`fOf+=9i;JaC(%*Z|B zDmmg4@025z(q)E}$rHp*%{~U_$;DA}T;bsRP!?WF-z(Pn+( zcLa11lXDux0OI1XZVojQefsMLudtgA1o| z(kS)|_yY@0(A`?m=!U7@j!mhofgo3|Qsyvn2ibd?=KEz%)u!I*P<&^|kwWTp1GUYg zAL)LaL^VqiCLeEyXx9pcf);#mW2LS1cGgTO@d!EgqwH zDRS~O*L?!q)Q)Zyd4_UzI!+AJFU@k`Br;VOJ0_o$Kk31_x23N}ZoS&mJUXmY^hsA! z`)a^t02C58)7CFoP0ru6~e0GN~Zr?euBH*a{2P%LNiZ2v zQsbg16%0yA;T$#6Fa|_YTiwGI8&C0-wjE1vR4hwV03GCx$P@!uHbzFCTPyCP6X#6! zmp`Dz6;2$(B+@y}rcrDjdKaTBLtWpZ^sKrQ|N}+tDt$_d6-VceI zt1~QOrdpitX@<1um(702WpZZ%xAs;)`Q7(plYj&I6=c-x*=@2^g_FJemI8cx_YD;Xn)v2Pr zEzzegOIPQ+YUh9-1+^}|KXKrF>ZstF72e}poNCMC0~akAtI6@Nc1ys~pD+ifjnsf2<0={79HDpSku@* zQyS_o>Z~8qHWTN))LYb9yIbNqE9YCJ7uSHUDA~;!Zoqi9;sf4*e}@lDC`11zx-Hs< z@hsmAsx^aD1YyDObS{*uyJq6x6w4Y0q?VBjF+cRUv%^|33XoD^5TABw{Y#Nux@MhcTTL8j|qG(f2)oTe}YExO2Q zb-4L^pa~H-(K#8o3JCwA#Oubq=ED5O#vMg8Z~@hQjmaoFO?@b~@XhTIljajp`l``3 zzrI*CtEIpT$ADrci@u2r%wZcLnC*g!G*SJK3=ige8* zT-V$jJl{UlizU>5C{_i|oN1G)(mM_e^jbknu zS1`#O)7a6NSjcsC=fK;=H=2hn&U>GqO8>B@|3JY5m#DZ9(Sd221X^<9(DffImVCeF zxGCUgayxaOd*v$VI8?!S*@7Rd#nX?^v++rarg7WK)&TvnG;nf<1D<|k4;ez5NB_z; zdWLF(JTzndr~G^i{p#r~4HT5AVJL{$)b*QbV&X4}>hEho4{`77x5T~tzTuKU?9$g~ ztf<=>>4MUl=u&S&8_ax?4CockEHk}5hIKfK@>TWf;GVr1Gdk++ZS(!q#Bz@yja!7S_H?vFWwUO(#TOX92wod7~OtD>M1ciCGH&W~C!qI-eehKkgEl`wkRjnhn7!}(d`JU4jCXvI|uDsq#h zToUSr^2xXdN3w;Ob?H2x4plHd<-AmA;=r6F_%$oW8d~0-KmaSsG~MH%fSJ!CK)edv zyf{f_DdL!gWL{L!x^pvlGu%Y;w$8k*r26Y0;gdu!9kL*ZzKWOfJ+%Pa~cNwSyg#G!)CF(N-?ybxL$bSg*QpJEc;@k<3_NgWRoz!Vk%~r zRUA`$+nCGTjrA#m$Xj>svWaN~$s`DJYp5SXsM3=fp5PA!-gnlA_442^wVxAQJ+wT# z1)3Ge%YM|3r_*?Xt^gS;CG(uMf=qq9ypQ0QZ5pFF#?&snEK@7!X|UjP(h6l4kA`KcM5E?XTdytsxBPvBzDvygq&?=ya zmyFY&(l||F?@Xa_8gy5^M78|EBw+X*gDs&tLIL)|W{>Iebu>V{~TRfEK)62&xZd#Px;yHNiVM>05R)ZHnK=WlstFZkJ`FB7Jw zkOFA5@|U3gdRRoF&UaA9w(8TMJR(i4yJ#jZWw%peR)Rf^DUHVbXQ=Hg1!y+6K$-D_ zn`Fu0vTnGDZt5RX1y??7px_;l`L-KdXH&w#N4xj6@*JR1A! zKdrmSzNQ1bHoVnc?B&m|;aPa4)o-NThUtRliCut@)nKVPgAnLJfcQkF6{eT@+4Oyw z#-M$3zy%H7{e<{iax2)zrM1*)65Dm1}r|@^SshcRGh$Yt=F!11e`IR_zdHHx^(96SHCZM|0f7x(n3l8^$+2 z)P-oy1rsit4`<(2Z`Z~@VYY*foOA?|=OSo=BMaGZtlwLThPM}z`jCa)cRsGlAX{yT z3yb>pQ^;ynrE1e|w^b^TrP`K)muHJmXLQyo%p82TD}wNcce~sOmGHZmRH#Z*ZCqqK7!c)sZbL$zN%Mb3N6U z^pqgXhXf^B(S_43wIq~@ms*VMO`MFB0-~Fxl9G5Tr}GXRj$DELxq5^j+d28M9mXH+ z^%i>Dv~^Z&u=~{FEDw&9q!bjcLVwNyl^|c1r#aU*gEXzNBS{r@G;5BxsH6NPM%o-u zH{ujWkciA*evE*vWu_+oS@LOktG!bzaC*ZVZ^mv&bNDbbTJkIo^uc)cGSH-+f`Dpj zyZ!KCEHhw654{u}iQ=es`+xs8@YqU+2RK|UjzC5Ar1*;x5?-W|iglJ*{_rX+MR+AK zUlJafHV@fD;tR5a(gV=^$1W@x@X%4I~VAC)5d*+H!SU=6;Dv_J`{)Ho4L*e>&R z9)F$39yed}`D;FFG+!6+*9Gh+=IcWKx{$4<*Sf8DRXq$XYLs;syOSVT7LKO^C&X;4 zORBI-h_3Qhtn%jKm{h{9qo}bZr3^I&!~Fy$1B?h;-MSW=hG}K&E*xZ*d-J%xd}^J+ zS!I+DGtSULIhNfenPFB3zwQ#h}QHtlaF+M^F;1vRh#)!hKGf z7FTM7-N^F+#Ysgm^Q`CGloGR9zvzjXW?jd#N5^v0tm()*W|~EYS#ZlZ3NF>DIi7x$ za2LpK6%`kBRz;}CAAlk&s@`76J3y{OoT5EnJIP)VY(o0MEw8wXFM#IK%^` zP_svET2TQL<@2Z~Y1`%D4{R%N#Xd)tlo9&@5@5qqwQF9oY5UPcok6F{;wOO^#x@AU z7@HZx*w;wNSA@xA%tmg-FU1%*m6ferUInA(*5NKj*M^W%PT>Z1<-JwtM_@5?FR(ZZ z9kZW^DwppI#cNRMRD^Tn1gfab>^zZKc@sUyutd6JEDvd^kxK|yFk5oa?v5CoowX9! zrUdfFC?;Y28(kHx6~^?<=t2su9}Y#B=%7#|tsa3~#a&8PpEP z*ghNsc1JNKcwP#3x@x3VAu4diiz~qqx0Ae3^xr>y=Q&E*Z`K_AIy{_`zXce#L>_<_ zQiVr^AiRGigyb~CVH4P(z_2RqVdSXJCfh1CQuQoAWyT-PY^q+Owu&WS&Qb1x zB*3vy?x6)fp>b-Veh^1_4HZW1#hKD*7y&enDQqYDQFn^Et3&NM5DqG$zj5EIQ{c7ac*T{zcHeN>7#zx0H?; z5?tUS&@pehG^%k4`Qk)T*mro24GciDMfIIhmi4L2A|~^TqkdseC)Jkx1v!(BnW+|^lMgz*@2wv%C3t;(8zGW+%Lj41 zB&O*J!f!vcccYCsgra6Xg|H>cvIzeBTb$BpM6zRV=sBq`dK2tZO@E6+%II&gN^{i~ zxyL3t>n>s}i7oK{ih(vBV^*Q%kr=HUW?Fg#^hZ-;L$H zIlq?-yyOe8&8O{QFJ6d1)s?H2!qxKq4kxS)0Sjh9CVrQ^CdmnfVOO0&g>CsuT`mN? z&Fa=QlB>ONjSN^Xl%6bgxd?8SMgdlbS6!;h{b>wPPk=Rbx&Imi&@!r**5yJm+D!5w z0p{1`J{kj1miqN|x&IOaG!h`QE_b&rnuL_9b)hczWdbCjSdu#%8`r>`_*l8Jb&WpC z__2f9ORpz>0tMf4 z8i|9!{9xv)GIj-IZxG*Lr*sNCrOCk|@Z$a|TBO+rIY29v)PU3#;L4iFm}U}4 z@!8w4;rhz<0*5789%U%y&f2@PY8wl(vT7O%vH+i+oUD}3X(*p1hx3v7PHr&MruiPm z%+r85rc$on581?Z)UUo~+A3wyqmzU=&CuZJhKYi&XRle(mISKdX-|KlB6a1aP#vl> zgZX9b``|f-t1pCp2CS((7_rrhO7)50&^z?pVHo<;O~*W-3srQVp*dFg2s|v zPRLe_wC$)9qG52&Q`=#tq7KNPULO85(LReCwkXmm)duA;UpJIk3LQb%^nn-C3W4sY zK-R#MQRqZ^wfJ-qibcG9v;kfulx01-thjbl`>h;%s)N78kVwe71c~29o_?6xjjVeu z81QJ4$3PjfVHdPls7V7<&)K|xq*L-uhByw^)nkPHg$`FOD%(q^Gi`;AI?~Ar_Al3y z;nId|{N?7a&>LilNa7A~nzy0(3jrcz;4u$HDr2S_NKSqhaM*_&^Yzg(JR)GG!;AA9 zxcC?7WtzCqCz-JkL7BR9HF^vgCaR9T%5ry6ZywqKBIN!Z;2yw~OLy#6B?pr3gf6}SqVYMY~O zX#4l4ySMB$ZM92EVkZp=tO1A3Si!hG>6;V@N(7}1Q-1aXtg~s4QP#7`9fp#Z3M)_S zP8@TlLWyU<6r>FLOJUVcJ6yG55C{GA?G5AR4L-;LVC=LZPfLB=dvyH`x3e+W(sQd= z@gHI-HODXgI2XvI#6C*AP{|_;_9hE{Ace7Gu+fg$+$TdSf zBe0AdTyepLjlRi{lwAg=377|dH!l+_;3(;NBu{E9GAz1viF{AG1z&s<>JOp{SR>6C z_Cozj4t<#_#y!$GmARbmv8JUO7HE)rI(1uNLG4XPJ*uReMaN_Vs+-BpBoc1mX(6H&o}+~Rb}iNfd18wF|kG4UF% zg8RWtYLWp(I3R|%x6rWjyH00=cDxz(Lhk&`%+!KRd%0fhGJH5_1v0YBR|2!B@yMA} z0$Yi*VeAFSB?=_2*t|}p>G0=h>%2VCS`=W)Xz2cz+xw_;lCBk5gAPACOfy+T$s_j# z*qRyQK{Ja|U`o6x!0tez+MDvqwdGdvN;*pUcO+1f_%1F#Q0ixY!oD5D2n+Z^==$3U z!%~NtM}Uob3M4+As$`L`#sUb(MIcrpjB+USw!#!0#j;PHq*(OGO>%KoU>bK`4F=6r zAWD_*Ifv83hcEqtBHIU>6K6q$E@%yjSOGJ108c8 zoJM5e^?G`E+I-M_XAtQLid5VO$)l(Ej0d8h4(Q}0y}A^6>+aAI32ydN$aooEj7N7U zDYViI996iFB=8j=e5~lEa}~I>OWQ@cML^evC;=0^?YAq^^sZokpb6e3F!po`ZK2H@=i13wlE_#IWsUVVbZ2`5q7i&R>xJ}UYM zJ>6le<3O3GpO_r}fsUTn!N@>>2kirCdTzpFm^U^~39v`N$=0^z@H9~z97r3tj%(3xUzfDY%cc&y?Ea8EBC4z^=W6QzSP zXWtqng&xaSnR8)D%+DD=(@iJ+_dr>j54I3LacUdfo;)I=b|2ZwleJc42r3jy9rhz0 zR}T9c!$2)^s!Ltw`(NnY=j_kNmjps2dxahH_KxMoEFNg2y@UZ~k{v}>hf3AUF)}C0H4dvd76(wYte3JGk7Me(8f$pFv(8wz z6l-|*3UV+rM#i@mv7Noaj3~D`>xOvn#~@Y6u2vJHyZLJ*&g0HJpw%4N zJJA4(Uq0g;8O{+5=i#x7agDZ9q$*r)wgtZ?-QR(lV~Mu7VV6lEw*#f1e~gH^YJOIj zT|UqSzp7Eh#m@jHGlZS63KNvNISJT7K_w6lk>cG;eLI1P6cZn>7T*b(Z!Tnl+?@#_ z2FljCejm;ZT(6DBtWlX;LTm@-!=M9dGAVR)2Wp6SBGg3wVzB-v1k;zVYz2o&!c-^$ zJWVv`01L>~aJ--{u1|xheHt2isU8hheHgm3$%qZf`)M`-v8h*aaAZ2DiMdz$qv=kD zR#~{L!93~h79ONsrFt zPl1-?JLjn}7>%h~A^aGvzX`C{qcHLMuwnCS8GGZrC2=887HCVm)uGsNl19(G_2@!# z&l%%SX#pd*Y6IQnlbZ*+7xIIbW01MrUuLg0ox?L^z_Tq-ls3&SV<8cQG!4=Zg*7?wgQByldieN9(egbfz}R#_ zHyEl8v{&k;8ER;BjUOtRZSXId9Tu;|<;1lzJ1i|^#Z*`5Fmz)Tg%e?;_xm7=cHe0# zYA5~ZD~(Ne1y~8dF!kc+!>UCO;DZ=U5O0uKqBw1#IF7CVuVL}mZ!q=fFC!1C1MTPjpFNSFIq#pKZ4AJZi)s^2S=4%a=OOIxiX>3{(FqWMv*Czq9YDJF<%$c!|gH&=j`Z+4577QZg_u#cL>v{0Q7I8do7TC1wZ)*hxm69E9 zg%Qv2)7QgKgV84tG3nw%Cq^8PB_3P@mUn!EP@3dvh9x#z^gFI`4_*9iBwcMIZVdb_ zEpz0Sc9C=rNls+{Rd@wT1&#@%TY!ONUnG4_n>?wD@<^aOuGC+nvVb;hRH6G$KEYk4 z+0tcHR3iHTC)ZH!M^Te`Q7EZQ9m+lrqfulnKw*Jry_m9Ig&KZHiJqrKSL&VSV60S~ zQ01sZ!Cz(joJS)V>J+dn72CxClB6IcCAZimU4nx2X`R=} z$`%jT>7iGt`KS{GOVuefUIJy1rNBlW zUXmccGLF-%!q_hCBf(?l&dxxtQD;&lxCm4^F%XTky)@DwzLu^sk4@!BnGgzMvI%@M z27D+0T$PDr*rv=vI+C5;44B_IUUe;jo2U>oj9~LPEb|hWpTn}^VNKWK4K$WowRR+L zq;Hx5xLVCjheU`l87eZkC1J991I6b`XT&LRb=$iSV}xGZ25+S~l?Clksp2_Ip_vUk z%cZ$_RFYVPyp7rP2*dzSu>ABfkj{0;JKH#l-$^)sC<&6@NeSufFJpiu_WR)3bTnik zb>QAJVGOvFT84bHkRKvUfwezQqv5%dS_inxgeL?j0O_K&qqyWir^k>^BZwxHVI{wg zpMoaA2Jc)0Ha!qbT}hKLH-Xm^y=~P=YFiV9&{qE=9D_EIbc?UCQ}Oj;Wj2Mej!Oa( zf|I~Dc#geF{Ho%z$>B`xc1PX$J}7HfCvowZ4i*dtwF|NJugxPTb{2R!z4~&@JF_W& zHtrG7-gJ5d6SLR)$TLqfKKXbTZAzE8EVWp;AowfH zM7KrRaB~pIk<{u5JnHqd_w>2S>lX6pWX0@)(quG&IplrO(!%R6&*O!q*R6ILH86@F zF7b}9umV7c!c&@DDwR}B=SK4R)avok6j}kQclCEiCfyGB7Mu3s@X%WY{V6{5)O^hK zW`=M=B&D#S7bp@j+xi?>oo9Ku0C+=&I^u9hb#NiBuz!KmA@|^nmvs&&VhPBZI728) zT3!Y7aszsfIET|ftvoaYA68X}4EviM!)bc=pfsqJH2fRx-q|FN7G!R_@365L;zN7Dhe5*cG_0l$G0 z*p%UN*$2Pk89Z?|t4zOepO?fN(J>sI)AL3@BhfRK&UW>!2JsKey{~BKukx`Q9Uto3A3iaPZD9ib8bbP0pzc#mF7n-1j(h=Kl zCc>se(dKl`toae;MK15PeEsA1VxLE9C8lSog$6Yf*sTCSVpNL-+XsGcgXQ+A6T zN0M;hxa}m04BBqjLR4WtYX{+iGI3lW)F*JHQk`J{OI6mdf`DT;0bpVgA*3-bGgN4qQfMC;l#I= zu*wV!Ycu0YZy3hpZMIxa7my*REcYwCe^~ydv6dE^HBx!*Ci1<@+`@y;V_@|1bJ!|gjjPtncnc!5GC)&42$t3 z5n-5A5BEJpI7GZ-C{2Z1*+B>+#c~bN;F)3HIUNPJm;)@E6QQ?t!-B@-`S}gn<%nB* zKG{s9@-YaH;~dRxNhw7IjXNkt!^9$$XIo(AQg_~f0@C`uu|A~|uy+n+A};zCb_@jh zJ3Tud)MwvK(R26F@0JN>{%T9Cg2ICZh@nZ$(^A@mgpiu5{PBvMUS;k_t?9>XJB z{@n01d>I~<^xigr)q|i6A@mv0i#b);wgs?}VJ|;NglW;9oLX+_DSx5xTZo&t3ISFL9pt{0?{|IdL9;Z-{03X6c0*#=F zZ?`zK5P7M5U}YaP+w48G!7IFSBbtkkqbfY8(z~0tdt5MORV5vV*~}!o zVbB6o)E(5~+x=RIGRF5#_`>oxX$qSnp46^UGIJU-GxLUPeydN44lN!Bh9rE_GsliM zEIj%P6s~ft;ZsT`TK{Sy-utES-~phucN3zZGtF8NBe9#Nb7K}1fMU~O9^{-Vr)s!| zAn)sC?5Srk`|I<}JpaZDnJ{MDQqQ@&hJE_F1)U*aPjh zZ72sEbTGXtlX$9+AzvZK-BLFlq*puPQxnozx0#&H<2e!s$2SqA#q%Uyy!^OakfC}3 z{ss&!c2??#@+X&vKLKwQ`T^Ck;#7>c0;yQ=p`vWmS*Gi@BT)2)1{QWJX&rp+k_3|= zU{8xP?Q;D%WKGaWfCJ4>W38`UljHarCvxgCnAArPjg5pH?0W+H$#+|THG%-(9@eQ8 zkgRo-yvXHLew?n4#e+jDNCmjPARTj#MJ;g3cRMVKmE;ioR1U`Coz~oxS;!4MhPmT) z)7cBe8u+4-G!CK-XbDlib|4O8-LEL)Aro6?6T!N}=wy#nKhKF0nP zsPgrB`g30*UQ7CZCC*-7B@sQ=wvAp0lLX{Hx-3x2{-Y5+bHK$WASd%HSZp*N)tpPU z!ebvsN76(jUDW`~BC6bb=ApTMRDfgE`h}V1 zImM~(jP1l9km}>BGcj77hkl8MT}fh<1G_Nz?n8Ug<8($C=L4e+;}XaGQMo#O4vW6e zGXp}Rk?-@Ep9eY%l5C}q@4y*IoB0tQy$H{D0YaaZ#RpEl@x31UJXR`V;Gk_F_IWJ& zWRJi<*)wWMHUj9IJ#f*E#BpEkfoI0!_*Oj1S!sU1M<#`QZr@SzFF-85T1MYNOCwdF zpo65mMQ`&iP&=Tar;z3p&?LO#DuBvcv=gEh>ES39#y%1@7PPms3+iB|LZ1UO0ubb6 z4*^?Ii;$@33d%Pmx*BDQt|l~$VOKpBqnsc7Cu&79A}+UTv^F?|0to0J{rQpvR-+px zu=7t+Z|L}TpZ+dw4wt3!k*7WY=UdVQ_C5u38Q4UgS8qKULEc9;!ef;iUU%RG*rCF` z5hT>P@H#Sz?J_ew4EGSF+Dfb1iWrj`C*s3XiNgM4MY=thb7zA)e^IEjrWd8P(MD+V zxZ0a%Z%NVZZI+ycwR3XCfgzmpUoRyX^BwxQQg#tDNJ!15{@(xm4^^kasr=0w%_;c6cY$tFUF9D0%mX5c($7uDxH zj{W5GHaI|W=*igaMPTQN+N`LpKFw>>L+XYQ%c!kpZLS-s4bc%%Em0G>(+4_vElWxu zX2AKB@XF@I*U`zfs^0-iIZNH2Ndy9)K&C4p)7g$v9Z}Dct^htrQ8z^HcSNUFXPF04 z!t}jH4Q^L}zQ#coK{MSIe#aH=N@cTCU#`_z5y^8BK8UD#XTzVTg+CrW`mf=SMvwjy zj-FxDrEG90qeK0hufkeB-rlbstZN4^_rKUX7x=1*YyVG95)34esAwam5W_3?l#8GivC>MZ z)q~L%(ek?1{J(4V-skLu@UXZ4d+-1Mx#yE#)|#0$Yu2oJ?7e5t-Wv=0#edQgb5S6A zaVWs;I4t040#8i*iAtUlj%@yE{;0!SHe@GI5-W-acwGZ;j&kiO3MJ|>)S_RLUUOmd zM+q(L5&QhH$0{ESUE3c=`wj#quw~12C=i|aB2QdSh-q=X#}0_(n?tKO4dAgF>UF8% z-GC>77st;E?Y`4!Zfm#5$w%#9xW47);*9o;kC}0p#TqY}=wfC{I==t-@0<6l0FeRt zGH3396cT$+BrQ3TmO`3*2~eg!zHiQ~WPJ%RiI0;RQ=fe6Po}=)=)FvR`?VzV7t%?4 z+xEV#zYO>eUj{@TDc;~kN^*GW>upYbNq#-`B{Sir=&4Unf`yAOFuz$Anf-3^S#c$E z<{C30lL5^^=V(9P-hYcsA&(#Y{K)6t0rZfbvc3?S?Udof3i;g+T2so)oWAfjojx4F znQgq?Lt~NQQI*)xRa#izs^FdJuRO%0CnL$2@)B3dX+JMF&x(ziDi1{zg+?rq!!`Hm zV&lx16eFg0R&0V9)5C~K&WcSkV@@pAxl*!Xdz&%u8!@R_vB_r4n?}rltk@JY<|QL$ zY*y?@GiI|9GcGH3v>Ed~BPKg5cB~n5j}eob6+6z1Sz^RY%8Jc4W2%gp8?s_^&6t}d zhWR^Ockj$#x2#zGstT%ojgcWHEB14Fc?B^8jhMKsSY@iX*oYaK6+76Bi85kFXT>_r zn7=LRR7~Q^daf|#anm-)cXgVpBqv;Ky zYc(wjrE5ApG*r_Wp)^frhx%xGTj(N9%R@ahtqM6btqq-8C^c&c9o5tu`cTtFp+lN3 z3;iA{COpmbr$W0V)_D2H7R|1a*rzosHwr^fXjYE#p$(dqJ+-0rnw2+rgw|^IVPThR zc7w29&2AL7TC;Li7n-A4dFCoKO|#DnJ4v&x!hTJ&FAJNl*&V`O39C)sC85i7g6uC0 z_0sH{!bWR$zpy9#Ql|sLeyrJp!XDD>d&0h@*^sciG`mOGEt>sE*rzpnMA#=Z`I|ZRxnw9N&p(4%p7S^TNWMQw> zY>Kc~X*N~Z0h*O9(V@#UJ6PCWnso{rt=W;np72TiM+^J0W@Wi1bV#$~gndi1*~0G9 zY_70dV71AUB=ifNAj@T;O`0td_Pd&$F6@1pogwVqnw>4|GR@v5>;hOGE;fd_?J?hA zVG76N(;weWxkaAeP2~(ip55hW-udxeZYk?kX1V^mb26V0<%(h<&$(V6e$KTdjwI`; z-7hdgKj*qD@|^1>*knBC`i$*4*Whuc=^wVlTrQh&CvwQ+@h zbwly&e$Nneu@;pxs-eL~#k3x6)` z5qa*&d^+#)jGa@a$aBYbt5IS+E!R0Vv`$oW*}4Y@SyMcxwz!zrXC+LNyWlnBXVu(# z>#&o7;v`O-Z@rbPfyBzJt?r(cSuZW*)^cj%Gu(j-?vY#4L?=|TV6>2JOT@Rb;BzCd zDdpNAwlZsvm)AbBuj!?^!?w(#r`a|?!A@!61Hi+05GOHu%jG-cx6C}egj2e3>;+b= zynA1A;%qsCk<$@5*(jb`G-XQFZHs$Ydl-$=-IqDFi7O8b#BZ{zab$3YeDu|saT!~w zxIg60LdWRlQ(t&vvX1&kw2bz*%>G4EFg`FTp?PVH^6k=ZQsBiEq1Ok?+h0Trf%gcs zZ7%!I9Qe3-jLAVkksDLJ9Hg> zrzezX8LAO*=m%Lj$jWDshqNz9Xt?ee~Y>L4SvW@>vu9<=b6U#_PzQF zB={kVN?Z71;#F9sQ+JZy-u^y)2q)F3sa4&U^hlZ#8qL;mvvbuBh?_d4-8-On=$E=X z^TES7{Y}J=k{B?02t==BT(}a}7WPhj`UtxVSKcu4GG9M-c^4-L}5NPP{;(;OF z{K!Qe8^&c|%;D_>?5LOX|Ch|M#pW!zc;qB(y{?hb+oX?ktMJ4(WOYE_dKj*+|c zTG;v?-+tXaLr=UTpFChA&84B8urrUv-p1L3!+Ql^yD4Ie8fmdL7x1;4ny4xkZL-;W zbN9Ekqv4+6@>n)c8z*o+o<;rv%`~&^s+&Wc>FG`Nu`&+(@yQvLO#|}Dzu|wDyCx?F z@tFVqnQWAbY+cc>@ZrNxc?koe$2ojC)^}<$ZwX#E>`u0h+&SVF&RMuuct}QAXcn4i zaMq9#$+NO`abokqfAB$^whQz(WL|BbD<4AN+FZ#xPA}QaM}x}R4i6pouF95WxLI$q zf+@ESuV_3#V)j1tUeWkD+ZDK8@g2<4x2_XgR+CBJlMcpg#vdho0H)b{3opTZg%jhM zvcFM!rzN3NM>ccU?xLd5yYvP9&~iT>gL$uw&r8zdQP}{jngc{samc!p8L>0rhT;CF=BV~7rmDavBZoU*|Ig&GB32c9=$r$ zJYX|5_K6l))qIV=I?FMQ)-U2UvdB^1!HvH=7etCRp;*Y+uzNg>Imm5voSPRDR(azBJpjI6E6l&hN)EMs1=TxPf<&ChP4C76aNiKWDDyatgyPdxdr*O_D)M!y*GXY9W+9`fJ~k@`W{ zE3@W$mkpHjpg7KfisT@>(0g-D_x6P>O^y|BHXh+BJsvUlh^9Mv7y97fa&OxxD+fzK z7AjvemrOgJYiyr;cr;Tem9{UVoY3>Qxs38iE7PL9DvezW z0~qW^u$_G{vWn5h%Le2rcv(q}n}Qb)>QcRjSjLT$=~^F{<;WW|jE}1x-_w6b@a2Ju zGdLdb>=E2Db6rpcVg_s-F3Z%b_!P1Be{N)b2}g3^VFPmyCPuZ}!N1~ER4b=n1B0Iiw`R^>#dpE7+2pnC zv%|lNkt1A7;}KpnRO6U+_=kiaSwvl1d`EbpeW3WjW4sS=S4}@Y-1u0_q^RJQ2Opc$ ze_!+-HZTN=UtaO2eyk}ij?Hi0d2sx&PZ(bEWZrK4{Ki3MnN_*#k0h?#sE6en2aoTO za9d2~?Da={J#w=4E&Kc7Rg&h~^=h?}0_}Ktt)I<{byKhwY$b^u9qgSm9XD zyKi>EWm8Hd(JEe&aHZHN%Pz@SgoZ@gC9j(qj)cALqdxW*Sm)`g=zW88& zDqpw*uMjzbt#w&{T=s6z^>Rz`i7f{m8Sm6MhG})Km-(FUvUncW%x&SE@x(2d#V4>s-! zZm;S0pl(YVa>WNmOWxG{+QEs^9K&8?7I@=>;H!LFG@2$kn8kYEL9-1Hc%r2ZbpsBj z0k1F`&}_YfR_ld@7QbUP-J4d^VMn;(zCG*}tKoVsqn)g}N(1254t3>`+~C$XKEU54 z2amtSt*x2EUZvYh^z>Cng0G6xaaaHCcs?*4PZl1K4tbFNnvGo6wqif_xwP-sjZ`jWVA8}gIiV<%9VFhxU-dO@a@n&y0b&ICxf5T)NjuiwiQ2$l{oX4rZc4nZ#TS1Tpq6Equcc0 zM(sqN?%Qtj<7-{{G1%BDJv3IDmbFjU^KZN_5An)%NmKlhN1BL^_Z?|Ikrsh?DzBXEsv4ZW?3LzcCo|KB#_-ix*WJL8^h#=S?HnHg6+^}jQ&?|=M^yN-7H zpEcvI!6IwM73==z%(xF@jm)@Wp*7=5oNdMxga4<@xZ0M}XWR!c@qA|7#bx0cm#?}q zUE5A`UA$?sN$vo;9>{3r>r2_(<69>ut$~tz`M}b_K*>{qk_~~9je(Mf10_#px(2Wx zIf1JVbFE_%canL%VH_)3JYN#z+qRq6_GmiEe%Pjy-Mw%<*nD`hr!P9E?F07Httot> z`9yr;-Ag%F^M%#8?Ueg>J z=IzJl7klZi*U3xZI7MyyagF0wiWrM3;@7W5XVXd7oe52c%bQL{CEh)Rh?;JPmvIa(sEI%9g=(FN ztBYVe=}lZYl}NNDuEg8C?Pl}g4_TxzOd)#N(4gz7ppTL>mw$!V0DHPO9UhszCUIh0 zR33gof^&c0lR<9YK6RSve64y|;!3drvlCa|0dv^JKcj$vYa{Bp>iP~?Sak<$3u;=K_>87p_C!jKfPrZ;u%T%_n*!uBs!@Hi?t%Pci=Cwtmty8F%dF zQ?kA%nz!wV{$#}$MZNjDO;4lWQbyx}wu{ylKJmg7qIvrh@^Pm7J}r>uEqoxrhLa8Q zzM!|b@7H`H*?-wu*YR<&%VNK1UE?|)EPV2&n&k0R@yWJm0m#1)x{a{fjmleQZ;W}Aw-?`Xv{EyM}5ZA-Bb9A&XIq2Fb+%R8?VQAtv3ed$n zo7v@&c+XYvzuv(N(fr|U@&d}9gctYt_2B8e@8FNbWtQ|#Ty;c`qe?E04*vy~9bPX@ z-||Itp!msv@2Td4eCTvH(Rb`fcvePPw2bb`%qaJT!B1KmpKU38mhfYbJ=R|M>%?ak z_Xrm64sOqQDNwS~xK=KClToO5G4$UPDB0it1l!tLV!o32%n=KN_HKVH#o8k?;DVJ+_j5dwj2$-52JAhUteo( zd|CP8kZpxHMWB@usL(6=sFd|<^kM=RcLd^1qI zxsvzKX03XCQMz1QzqRGVe!(5lt+GkurD&N&IL(iKT`zg?#+~@R(MQO_8UeYMzasyt zbz1Zh_{Al`l9g;=*w0N1rt|0{$6Rkld7~z9aT0uK%fWs$*510!-Cdi5uFYHi)Nk+K z_I?4N;XMf%+TjbGVz&0BDV#7uWSh=wWae) z>sNzYIrzQNGB>KNzcz>Wk*KB`2m8*rwtz+yKikfX4cYhb`k#yxfJ(5;{pM#5AdL-QsehM!7dXJ>fpwprGpf6x8YQ;xu zC^&MQPJD(>Q|)9A4$n$2XJh3H(hz}?d;0I&?%E@3U5U^5lE(KxvfcHjgtoi(@7nbr z*9VH5=+3vuy{m59COC$-#qd9sIYtE8S8t!pMn^B|)h>@qgj z%NO5z#-y&9;0W?1qX{u1)!_qi#sFP)4>>q-_%)=dG7!&#S=LKFK4$OjB@e4+^BT5> zr+I6~L|(Mjq`Vh>jYt*}d0J;=)gnpgiTFT*S5!3-Y?%;Kb17ex+s|YzcNY^^{f2_J zO^A+d>sPfVA}~23@US7k$FmMkgiU;AU+}dphm+~+A>OVKXng-gJ^lUdHoC@#+g+RF zPFG9Jr0uSIWsMQ!VC!LwY(nnE2urzGlu` z=Xze|YnfLUCm!GbqRgbtf4j|$P9~Zg2*F~G;;ma6pO0?dK0VO5PS2qqF&~BJ(ANTm z4}|B?mznAb>p66r%%QqngRT#ml^9{P^eETHfkLnFZREHXy*<#l;l{*gSi{*|nMKEC zoUG&%dA!+>w{ip;pGs_=L1Dj=9F>`c8#p2*Hs{H7nALc1;@#s6?n#+XLx)`w;s}yD z@wh9br^&{rd>1ppCek~Mx%9)s!$qjMaf9+*I?pK!~wN^6UXL0L+8TT`GOYdOLwPxI7`VEV%(X9dBk@zio`ieeD78Z2L zB{OdO@&(VztQIX1iz|X9t-Ycp+-6Kq_$viJzEXW}i3e27tI-qyW! zt=(O%ylGBm-oNf`i~pp(aI>@-Du`;-=GX5o|w4#<=z_agnv<%%b98`v$iMR^?iEQczOUsOIDM9&uf-W(v^*U zlG33YpMO#Maeo$QPxOB+;94o4Ts)zz#mkjhJB(9?plcmN>R&XBIE`?;LC!y2ojH~)s?ebBXu4F@@sUeoDF(ldqKF zlKzyq>c^TlyI0~WS)Y_HW**$>Vq1L6w4RA{oKtPfg2kNRg@?$4GCs`Fu?eRJ8`m6j z9pSuWBCky4RUxl3K4eU^aW=9)e(&DDwij-a)-OKddyCT%W|R{$C=yo|(kgVo;49gc z@jK|!M=0m;I3o2Kh1IWpCv*ot+PPCdAL-NqpY&*Yr0&$0>rVZAm!0~UD>*7ycu&yv zEN^(_hMITG1G0wxEVIB>a&U+;+?VNUiX1A(%NFi6dVqFMzzOt zv*`Jj(cFxP|AdI(E4?%$e%V6enHGDbLb2&G4g0E)1WZVFfTL`Om z@*HmAv)-t@plgQ=k5dSq@I>WB`*!iDR`IjJ*U%U07btEG6d!EqeSQ2Mw70~J!Rq)e zLAr%2v?Y{`TT9ybHW3%OldzjwFc{;RT{sdSzD{O*s@M4N+VSliEyi8F%+=PtDKpa- zw=HU9e%5QAPeIK1%JyzwL~(jPbVt+S=rxm~6JOZ5W_A?M7iEXf-X?HCFYDCufy`LR zc?|5LJDjuSFBo~)Ay=;N$X4`ep{v=QC7U)lL&|79wk63M`)m`A=Jw)?idpB_mN>5? ze*3X4G2YmoDn=rB2)U=coq-n>3db$V7Zlw{zV*z0Do6ab;NIsHCmj9X@b=5v=e;!R zEpL3*8@`Kss@u3QP3WnzA!JOIk2!Iyn%n=CIkQ%rgymr*O&w%-#dl#(IgR8|`5iox z-ZNYC`bKENLvJ^J&LIChV<)dDjjlu_CHqCciu~TfD zb~9IZ&&j*1g8PixVd5Yr6{BmT8E$xH24ye~1N9{2SUZG5{| zYjtpD*ZOOHpY|oA9KIpI9a+9^8+UwP|4+C9b0q7N#Fam!dN+rz$D_#eJ)skC*PWmm zoGP8jIA+vhj;_USqZYf3TI_b{TI@Dzv0G}vX0YHZ{a^JC#Z_W>3e+(CRo|s6c5B0X zK`9CocWdJhY2%ZqBC%8eMY;k|2aOH$^GpR=c7X{~8kH^I?T~*~pU6r@GsLET~I_Wx{s-i1$e)ZTA zzdK{wZ-gp-D0}p{eGegmv5;-1~xb z1GDGv^Y*7Dq=}3Hx=#iuLqi5A-AM*0y(_qP#Uc4Zq&ER!qpmjOD@4??bHzKY7JAM>~D@ z`rzpgen!VM^WdlZmeBWqFZ-qVk{YMY)^nfHYz`XCGoiA@kL@Y%8Jl8w5>~EnB|#ZV7D`bz4LCgvCQU8uT`!P#$%&9&fmX z8`fOH^$wM5IlU)w19EgM*Redd(q-Np`o~-HV%Za+AJYYSrR#IDN6<;%p)mZ)!({V-^xX0a_ED~ z@J)jL6war{et_s!49MmcQ|M{?=;qL`Ig8ZK%rbLx=+@tH3)h}OqT04wZx#7MM7LV| zfcTUq@7a`h0DXi*d1-4=QB5>|RpiO8x|aom_De#fJSzK!#4*hurN|>p8LvNkV>VUh zq@9mc1g6CVrp09zCnT(R&LJm^T^Vjh6wMCl~YQ}NZmrF^h8;7XEnpe)yW#$F5FB+PTz z+j!|fLli4ryg4n(Nbtzg37h|x@>0$ayIT@2#gMGsoFct~0CzMd$8hmr3UFgK==+Rv z8$XIZ)?9H!MYm&#wu_@8hOGDPK$cW)8qca#1}5~-Q#LQ(3`uAF4<7L~kFLvj?|R&(Y;QsH z*B&mGml=(h9iNFmDUNF!p3pyLaBp5Y!4ufBN?VhroUKcHW-B(bI$YP_(=vov`2 zm-V>G-JGi0ZoN75HQB^@+RJ+QGTn64F=*O)g%*X&GDf4dMY9JI_LuY2G4Po}E-@_3 z3FivkSxa#{+C!gjGG`LmIDboMpDvnrvlr=C=<{U@U-x7B1|6@#;=>z!`XM_v%6lo# zmpy@KO;qqm=m(N#X4W=u$}OS$g*S&O^Fj}624oxZv0Fk%Wj|=}HQ7FTGFDz$l+Yp@ zKey#(#^5e*!p))O^N^4|&0JTRyEbL_v!2IN`5)))BhOWNC-9o7%7o+pAs2}}9v8@q z3*^NxPqh#1%{0(H@J;^XivRunKkR{#zqwdtgB!pMPzjcR`@tsg0@wo%f+OGy(EEjp z)gUkqOa=2mJy;3u2OGeX;90N(yayaFUaW?KbdU#b1(m=H?gICK?}49#RIc`Ssz{n~J9A-*h9nb1S^1 z)wLdMt4Q_Lm--2m&a)Yi>zY13|ArwdcX54bU3HmN!Mf7=(%UQQD(b!Ds;;m0R#U?z z73JXqsQEP4^x`5hFt@t0vcfI0Izi5-4(SJ{!)}x*Qb_OS9nt^>Tj>Ek5oK2-!)ykRCv5@Uzwq~K6TpU{G#0P)5oWF@T3*1m5!LV*h}-Pk&6b7OkbpwGu7g_XoRh; zU8MT+--k^9;bw&9jYuoT6j7GQa4XM1ANlwvc@U-uLl2A@RFW$BBuzx86-`wW4J@qo zR*f&3HdW&0jwPM=`uPoI$}Ix?s!Ymz8+F)BpG&u!Yo!U*5oRWDnISBBbi5fcVS+B# z(_regr{$s^%)L3UC|$}G-n6HvwAw9p37snj&%NE_n>TmV-0J$dy2R7dMoOAC`3z~= z^fRPc=AWs|&ec7W+`05nymke3rDoG`x~{ju8x{3PMMs_fcEqa9)S z(kjQ$ZR)^L9nviuIvFrUw@B~nYNLAVSf-vaGk=_@+1Lw>V{oy#}%b?|(Ld|e21sbh4< z*6UKn=FHQ}uqB?gUPcpi$ZvERJ^VVvpO4SFk}+GkD_yqkWwooVw{+;jr`ywY`-J;~ z>FRJ1rBe>Ch_m%C-Nh{%Y(5w@H&PE{B%E$Xq#mc+ z(a}k{Q+%c6dgNe>$#)ne(pNgfo27T?(Ot@P>aktQL>zXea%>IKg&nrmF!ft{I#ojF zwZ+L?G{qR=M*Yt0;nUJP^umtiY3F2g9IB_~?@(#-rvmx0w4wR{R({o2z~w-GtoO(Y ztV#yF_E*1CO`af>pDe$zHl$cyR2KsI@u_(Iglj)2UmsIkI4ilnWKmPG{HuSL@V^)6 z4rIrLEHlbNj-1V21m>#-wLrO*M|qV``PD+TNG(=N)KaxfHL5$+aux(usJm3NTFHZ1 ztJU3VjS8r>DyZ&J+2og>)aObd^$|M;1F4tPM{JQgWdS+yk#ePeV}R5r8%W*6ZmHLG zU>ryRLxK1~Y|a2uUnjT*Tm_^)sbDjz!{sIq>!e;^Q=M1(08knxJx(OXaDFYLd!V1!}Uo zK^3YS@j?%*`jL7VRjh7Sx2PFvrn*(lQnOWwnxp2b z+f=EVr^-~hs!)~ccD|!g&0_N%s#evhdg_~qZ!e^tTvuWJrFcCVFZH38Db%trHA}_1 zG^`y!4F}OmgK34K)Z9s}((&qOJTwN6j-}?;(n{m-(sg(&8?UCTn5YaD8#P*Wi@I7} z5H&`{MO~x1M~ziIqB2!{)U_%hDogc@8mD?ieN81sU8j(Y19$89nlebYt$h~4LB8*h@(v#~*K6E%a%9%nke z!_nuo^s75gz^A2ObB^?gW4kIl+~B9_wH=L4OB-WrNPKy|zfm%onZKd%yy-}N)fQ)^ ztzm-GV8UA?lW3>kZ%Nl6{kOJse?I$nr#i;Bg2$#ha()UIUc~gfD@vTC1}2DX0Aq zuSYo*Bh!9|-%T#1Hh@*wU6sc%YNCq!)kLMLf%6v^bq57tEr`GKXBTw`1z;_RziN@% zYR5f(wbhPktJsdk zl=|X_Q1DIc^kZilc5cVUb+qZ6C4l_vE-+xP;DvsrZXo>z(l11h;Ctk$A+CnF?~zBa z7CY7w7A)oQ0}a|rQ`&8*z}9x(OpZ~4C+KSeH+|r{KkE_oo$gW1k3yFYG_3f8EsxzU z;XnP3oDp!BphlDMzfb!_sW|Fs{UmIFJaRH&{=WM|m9X-PDD_CUsOGhl*&e`O0hPTu zU-c9`l59zGj&NC&33&R?hRaL{pBa%m8#fG}^~ImH8Bwa1?@`AT%j|$JB1%QuP7!1L$ke*Pwre{uO#Z z{uRjYXP3uzF!5r>v&JAOR@K-{=h0f2xFVQ&|)WQD_Jh)nA|BU`R06+cQ z&m9!>UsM&Po+WM+aUT$OJ8=V`KO`)_ufTsF{#NL3p>g!hg~%KwJRiE4ym!!t-T|Ay z_px;?eeqfRc^hT7QnyU%;2&)0a}xg(!llq%&|bIWcjTUC4p27D!}-E8~Qf%*K62R#Jtc(xQ%cg<1w#CltU2H&4gbWJi4r341S7c{6_zqatOXl zS&gI*p`6{|LHt?1L^;aQ^WX&rB=36kuP09Me0LLzmBHg*>-Ri9@9O8eR4I)?&@uR& zwx3D9Qx!@*Mw{K_RjQt{GqGhZzWgb+bpG9*Pk#`Q?{>4CaoxFY01p+Ipba0QZGMi9 zub_Ju_1i?hxR5prNgDCd#6L&;2;d`p034_6Wcr@`mYIFyQQ~_by9K$?ppo#O!7;EK zd;3vNEBy7O-2*)Wo}ezbkmg0-b=0MjxTmPgKNcoO{bOMtkOKOGRFD=`;7b9?ppT78 zoTT*ysWzGxm06YS$gJuEQb1pj3ep@eRQ6hJAO$`d^s!Ni6Pdms)kf1CyYdrO@5=9G zLrTuB{AAF_MpJS|-rj4qU|NlVLzM}ue^)iY@w=))V6Y<(yYjFr54-ZPD-XN!ui5~^)GzRrrS2-A1l>00ATHvQW z1Zy~l5X`IYwc5Z?$Gqx6V6cr6mp}BbdDMMg{veYMh6*oq=v`x|uYnZDnA?*<9~+gp zG|(5M+9+`oQ|=lwk-QU=P1*-4ywH@pX5vEwDUO-=W+uLgP>D+eeL<>?5?4Up0`e9l zo3sy9c%dnGWuZHZaRGjXDz*XoIPCnQe1yH{cX{wxS2Bo+jEB{mf}mB7z2Lhy4F zKbm5M--k+EM&1aOkv9@>WtulyJy$SBts-8q#l)|WmGIx861UF89;jdk@e&uq`K^Q> zHSsP~@D}M3cQNS_ey(7w`enfgFcPE#DOYf9l7VqhP;>1z1LL3o+2;zbBTbNPpyoQ# z1lbxN*Ced|SjI_yjxvx76*vu46R$yBwSai=P3ASh8%uIPE^ylTJRM%^OW3y7*DH*V z7AI``XmPJFzCrpoNdJWRPl!KSldF!>zmC?VfK-s?*i)GTl0hFEl{iW33sP+~&5@6M zKJxji>&%5vdz;a%`fcqjaD_?)T8+=tA4$iU~q zyWpMhPWa&UQ0M@%a?|yXa;Jgm)?yw*Tb)e zehd07>_H|MnOtOC$mAlEi;N4ITx49xqNg3`QgYr(eFfNI5JLTh9i@6Gxpq%J@->Sd@j5TJ`X+*ej>aR-U&ba{$AxdQ+s)H z?&}pZ;Xe3V;J*$3?Ox@%#OD(4B0iV+T;g5CJBfD^Kb&|c@lN80f4i4Aztyt6*UJ3X zUf#*J@MOY;ws0Zg9EDEc1U5cqf}$KSocMF;wDOtM%3Gc@1Dy|0j|Z?FJ{R5v?}T^4 z4~NgW8JYW$!3VkUx$rJ{C%hA0`=&!XA-@j!b;x&$&n4bPyt7lhllbB5M#SV>u2Cbr z`D#SWWLtPL;X+%uP>m?hnK`077dU~9_vTzS!kY`6mx)eG*9fnnV}w`dy_LMc334L5 zH>VGIfwRvD+Q%wz#Jc>}5i28YHDcXl!jZNbv97Rn1nom!-~=`ve~+N8M$lI0)EUla z<`3un(!MFCe!vNGBD@#gOT3qO)8FDB>u32~d}aMCzl-0jA3A5DA2@-H_vT!Ve&D>^ zwUHBU^XJ-_uUs2Bi42D)6TZO;!WSx6dCqKCc`k4Q8}H3Yad~rrGevY-x?CF#9cJDV z@&YHwiSXW>uaXxyzv`k5tpZ(7<+r+M!$^2C;YfSBo+@m0(H7(dPGI9{6Bq62qCL;4 zGn~)NZ{_{+zL`$_fD`0IcrUz{crWp$PVtZRvwSYTvVNA|#c$RRopaC+oWRC=bNZqm zIQ#D9I;dimQWa3iXAdu_R!We^n$bm5BItiduhj-p9R2S|27PQ);?h80kZPl8j%(4= z4LuePRdF7xk>WVSx$q&*g(FnrI4*Kte2DYn2u*XW;&|OR?y{)9ahHRB;EJfei;_Vf zkOKOGRFD?6isO11v5DJOXGe=X^SXJ~`zR1<58-peZ@a={L*i4}YRv|3rH|Mm-+Gt_$eng70e#>h=BV z!N8(i`&OllkFH7veQZ?X(m-F3YNKi6uczIvrwy;C4I@U%ev*q=LR6&2a#G58&Se`1t@nK7fA@;O7JQcpKL$+qhQQ z#rsN#pT=YQYt$;k5l3(*eB@MlmvjKWEMW^P4ra5{r26`|CdeHZK(Dz#iw-9b2 z+(Ni{VX~unVIN@9+o4?dHJh}aK2lE~smJDeY){L-H75-_)3Ea@_^aTr()?^_UyzzJ zhxVF7`^}*pBQzx^NEt!O3RB7mQdZEUH{+9=t8N9!ChcSLLM1H?%m#f;nriYw(;P?1 zdz8FKvGXW$N0B>mqTO$zFWf{wFsQ@{ zZ>6Qd&jx)>nriYw(;WZKI>x^tRV=t=Q@?(=!6^)q1WZ+ zsOxfbffGddo!GP!8+Kwtgi2f*=nGP9G|iF5b?INZF10X~a9^%jEzBW2l(2;w!eaCGmp2lY6cH17SD3;17gLXs;4{T7pjv+Q;ODN?ID24f>ii)#Qby zIZolLQ~2r>zFJ(1ss6Aq59FFO$K-`Nk)5a4F}}eu{2LsDAHvrU;s1y5 zy+Qk!yiiF?1G7P2lct)y(6p#S=sZMuhbYgWT&rtds8fCHnY88OM<>1ev8Mo90KEZv z12l)UTp%(wp6mOMxxWASEZ6rRm*u2_E6A7jarA`#@B`on!ViL82_5`#SuQfU$mAm9 zLM9iPTx49xxXb{FobJ^4m{`gL%22=!nMIUb%u4Ax~#nAIZkvA9}+WR#E|k_ z_*{4wyc6CDKO8=12r@&F8HNmeF1!og3GalbPUSfr{EK`V^8JzT6rW4Hi+E?Jcqj40 z`)6*P6s~*b#{73PHx}5!lL_Bo3m3kdNxd@hO(wqSz*E0We3OZ9&Z#r3!_*bdn;&Jh zQ>Hh+UuMi?TX-_zLR+}7UnbYSnOp~Favj`(=Q(GQ%!#&aEve&D>kz#HjX1uOI4E$~MAR>8{2gd=^cU}YiU994k-3h-YC9v>9o zzXJSsZk?76lUjK<%4cYdU%|%wegzu~Y~jg-<=Zq?dSSl;t~Cp|?kwQCvjfj{CpH4& zO`W1IlGo5%urcC4OLxJ>i2u+z2mQbaY&_SU=m*Ze`?=n%<+`)hyzaDzpW{4SP|W?3 zZ5%hYRSg1z9YZ)i4B_}Pgk!=Gjs-(Fz6@dhFXsIDIOkf&Yx;l`&=;fvBTdqaIj=6} zen~Os*ajVJ@q(dO@p>R@-(em4J2g2rD#x1!;O6+` zMGtG~HD2^&Zz~~h2|5g#t9ci3F5(Qzng_i6y4o{g-!E~l`=+A9*jwXQYX)(1)b{)B zrGE7V*QG*#OPW95Z|s{Pe2VbK%M3ia%Ab8N_ogEIW&CQrsY~|1NV@EKxp*V{ULNXZ z?s@UE?*&x(+3RAW+OLc~F8*`warxj|-BkZ|-BboRwn|;qeYLvE*ze+3_YLu@Rcrm( zJy)xdo>O}3qOR3xdsnzGY3*HUTd20$anhr<+F{?fBD{Uyitv#=D~1oBp5%9A<2%`p zqEX>j?c7UkcR^h|5AMUavQI@omT%snjD0HDB>Pn6kXQDqXx#2s>xf@B5jru3IKR4q z^b5(SQR!C~;{OZtpm{OG`4wGN?^}_*E8ipk_sIV}@_&!~-y{E8(q(^&27cH3RcHtK zEzOT1-ryzt&1ApE6STKL_O2w;ma=!{t1(f{OR3}8_N|Bx`CU>Iqb^wz(_#P0r+pT7 z+Pkt)@mf3eDRR*Teia8~-^v${ss!1$!UOHidf!TZr@bpxMj6Jwm4)nGA@61Ej}Q#8 z_OJNW2Kt7zFC}`i8U@m$?@$?k%~PX5`d?S8(SNN}8E;Qiqd@xGcdOBF->x#g++Gzw zdsY1GRq?A=+3zKoYvO+F68!A)@HEbT6n}V6%IW)0{Awj*$-ejGOYJAw#NH3ur>p%W z|FXR#+-pA5K9YA=EmH5UTB1Bq545X&BcCw#?C9=QTkWv#84=#TXGHkZ_Knczt$ibr z{UUz$i})RR^n3PzI0VvTVi-FyjGY+94EsVH8sz^jdnWY05O~=aQcgbhhByS0??J}Y zgTz0`SXxgS`#u~3Nqe60^gPGB=NJ3c-oP@63jbWR^$FyKx@gyqdq4c<-VZ;o1Jql% z)sl&N%Q*W!{07kd5Mx54#(&yAkLl;L&x1a(j5hnv@9*%N`#U&xY+HWKZ}H=Aw=ItX z25*Ih0_!JxGEX71ap&?VaE0^7kg z5y zhz+`5VTbNR(vN@(zk+KtMMm}yW#+L@Xrg6Vgj)N7WY5oa$UN1BOho!$#ATmDPp9;_ zvx#?-wTtTKM4D_dXOXo@ofn&`N3SUw12r)iR-S!0Ei7@iAKN?CnP3x(u-Oq-xJ;0Z z4x#ev95!OR{?8EL*^3B7$_fiFN1MnIr(<{uX){{^5~c8+!8D!dX%|Na+a&KAFeBTcW;B6Thq{-r{nu4`S8<~2Y<7fgu`9Rnsonak z2MDKj%TOD#86Hfu?oS~K*_|RghwaJXw4@I1+Fv5D9zO z&YvyKE@$UYFVTn*nScJbDNdLKt#}SCj2^HSY@(VKe}lo0 z)5i7L(p1NO+gYs$Rz08_u3{(rJ%Bn@yH3yL?xCmj>A)lj#&);avJ#D#z6m<)&iA7rk@W;?p7f<0Y$t9@mz>JkaWcZINW`|+zI zpSC+%eHlnOa&~3MnfmR&Dn5(ywu1gi#&g;KEdT1%81=?|F{(4&k6**+4R4_}M(sBd zc4a3ZZ;WKA*NaZylZ5O6e0x%jza-*c5Gr;7dh|Y}GoT#x>UYYmh7g8ANt;BP{6Z zC+#NfEx+WnP9VOkzF@VrQ0tevx|`UrWY{Qu1FoMoA1{4@3Z-#lH9-E;Xx z+S3|?!iUF|wDp(3xytFBE@k~^ewNMlIm3#x^h&sMx&NJEvCD3w-Cgk)NAd27X&Y5%x}UZW!#Xb&+IsHm zdwYHVf8t-glnq$s?~Q%k)Vac8|A{Zp;KTFQVdXddZ`tubgT z=Dc-u^38_Um2s9Wg9kE)og;ic*G-lV(P#b6H_nn5YCn8gV^ruHm#*t>$^CiIxx?q< zzjNsf>p0DSz2&Zn{tb@00RNpme185jbcjCdXX!mxoFy;xO#ZW9XNsIXY+ZMr?OZn* z#TL#cXB{6c*)vhg(2z2@Bg)Qv4Og1`-Hdu%=VVOD+V(3{*}mn1-tO+ zytJ>}x9-q}X4{9Q30rddWwgfjbDb=EWdzcVHlfSvPT{=5nEFIV_-F4An$ISwD4hH+ zWvu#G6y?Io&!VRXbc~WQ*5yyfpU*vf$;}$lzXZMGPuJ6>b!M4oO6wTk)xME)jjeN~ zTVq%+x$EWeqy%dmW#e>fTy`FlVwp8ArN1XL9+O**@u;%nl-8#VUH0+TIp5jRI@Z2Z zNVn^4M4GVem*^zfv0s~gt3w~x8P2DV*!#z6rPv}wuE7?z*xO)Cbh1~*^#*$? zo5=ebC?NkZJR zjojbC0Pt&YJ@UT=*CO8vt|pJn5kCU*B;g-Xi6Z@*;4;!51Q(LN z4kVy&J~BtZW26r!Z8I25+7qBZ=>*jVkdB@gfD5_bfl?Cw1ZE?50LTLt2f!W3{2i=7 zrYGqE5KH<>pwKrLqDcQS@dJsIJ+P_7%kR73D$$K)$oEAKZ@2 zC!i6T?xe2*QKUD4&%rEYJ_mAM@)R;JlKyMrW!}prZYFU);u?sPH##jRZas0|CGG*@ zJjDHk@UI9zMYt#7uOTyqxH{shh?_v%LgHG8TTk4*#PN8b3Q(UPfv*FZYaRei;QL@c z@Bx{lDuK*P-vW1&Z!o%K?47CZ(~y(-=Vc&c&tCt|ewMK(?fNa`F2laZ!6yWdOPDB^wv zCQ z;4|=T^nDdwKL%H$>z80Sx;BHs=xatM8JUN`705_`8HbGYnQM@dev^)z%s>AIq#yh{ zkg@j{AY<0)rd0yraIabL$vKPo)vJ1$3BJhkn6P)Y# zC*$k{kohi?{4(E-Ccn&gS5Y28wGmuPd9MH&e=^_682lK>I1B-q^WOt99%a6hF?kv3 zGA?Dlld&oDT^TZQ$oz_UnO9|O%e*STN5D0tZ3j1!|Bs*=xx=6wx%Ys~a|eM3nSX!) zGMAA4P0*9{dq5oeWS)~_i=0o$F-49iaxA%+csU+a5GTh=Iac08oE#$`A?|VF{J#HPIS8b$M%s0Zdo?0DWYo^!Lmg;cbe7`|`MtYsUUWezqEo$WF>ybL%?OtS1 z&peYF<#^_q6vd*+Q|GJI)Mv=~%y^#}KU{40xD|E*w?e-{x1xHgzJfl0j#?^S$Mbg$ zwLG6q*k49GVbOa#GPPbPsD&>@rV^bbt5TuFRhQG>izo7jJ&S?J634d*`L8J&k@N74 z8&8!a3l$NOk;@OEV};fu1GEg(lDXQH87*`^GC<2fEtyfK%t({UXMjX+hDpbmRJx(W zi}^z5nUtDvJOs{EW6tPa>sBKw$BdS^5tSL~Lc5nXc-(XAmz4RPsg(_$GPlGv@G=-= zuMjzpAveM^YK+e387oAM@R{*GGu~&$SDX5)&Gc$Bz1mENjHnznCSu2!(Gs?7itr+1 z>cHj*pOGHUi*4a}>++zf%{>m)cIN?hKiVT@R!Eb(}M`zwUpM;7payBK^susT@;h zm1r^IjNT&@N9nLQM*Jml+DRsllZdPHSGq@H z5A^6Sj6&sequ9hOliGijq~QlM)#Q=5*8CpnweaF6`8uh{8F`td!f~3n>ojGxejP9U zZXP4He!k3N%thM62A#{vL3o%hxLsA7iJFpVP#MD{@~be@H6`7kk@Q8TZlR)EQ!}~R zOsg7+ySlE_bBCH&>ZyRY=-j%}MXIvYSL@ZO9Czl|d)@PE4ZE>P4AVte zlnA328OKMaX1#tW6Kk1S_BdaC`E`UF@K708Dqf>8mk#tvlgc+@`4SxU1^C_iJgnx$ zsYd%ryOx6r!tyQS{(7ItC}z)eX5U$&lLnqO&?n6-eJ|o`!bWcvYV>Anj?`4oo029y znqvS5*ivtWV(L`#B~|M^xT~3+h%>Lz+U(U>3&QYSABS!hCkzf=^ z2N_^AxEhQB*MPAg6FtbKBbSa`I&xH^JRP}o|20+3$Ski_AS7^1=zO$`xao|0_N4`q5N z(?gja%4B*3*MPAg6Ftb`6AwP|;1dr%@!%5=KJnla4?gkW6AwP|;1f5x-RO3s+l_9{ ziR63;xK(L|M<3&%#yG6-RM*j#goP-+8LZ}2mI3;HHS~@KFVSiq^_$0ckmoIeE~?>6 zc=Y4)27$O)mDRc{NP;er{;kT~Wf|!RX^hnWxA`({L+m?k4F>=6#-3+u!u=m)o~UBw2e@=#0!-OPA7p!iTi-g1I5FnxyuAb zJ~sg!XGDlw>pbZ7@jXL-y_%2OhC8Zs-Jo8vt)aGD>w&^os|C`j_z9KyMkrky>XELX z7L*$T3*-ZG686wgn(827-#ncUD*4>=sM&I6^VJ&lg6i_P9x~F!O!9M#HRBDte6@yMKGQC! z=y7{=e-KJG3G4nKREIsKXdxad9SKS0V>Z6DE!R;jI;QLiKpo zRP2hb)?l9HhGP?g74FtI*qx4u&LXaD|A@vHs%V#rfy@d zAZ*ml%eP_~N**o%JqsHIhTcV9J@I*jQhA}ePcPELSi*YTLA745yHvWkqJ{K07An5+ znPbOijvHTjCEsQ;#*R<=GP*TI7^?fU&+OAs?PyIUy{=UESAD%Jy5)LTs5yCPDstxJ z;WH--sG&!wq%*J~nwA=2?F-^{J3|+dOuEs|Lbbf<6QAi5p;CFM_L)%WCs6GZf9X7J zuTaqkEf;%*nqleogv;cTTT?N_k6VoR^75*BBW&a=*ORAT4zMC82Umm2<++4)J^b_J zOoDKkT=5Il>V+EVWpY(7@p{+^wc?i;@hpbu^tuLZSDA5K^q2X)QW2q2VWEbc-z)Q+ zgfUlBQ6OOv@SE-JFZUUG<=`yw6^o4gm34-HD(eh?RMzVVKU5<8(oZy{A%xb-*`lU; zyb;bY!=p{A2M+NZ*R`Bc9w*@S80wc3amim-U2lY|#caZQTGCYe+nAOltQ{)n6Gr@e zj&33^vz1UiU1@5h>tyAxuh7>T{(3{PP+gyTBZHrXAe~UhIo ze**?fctNRN_<EJB89?G{r-j(sG*8Q~id?nu;eSERKXOaC41eP>)cz?ze926#F=6%g7KaBSTYq zqENjSMm2Ri8r3FVJR<2@J(Lp{9i|vfwR+;Y*wbOJ(LY)9FvCXw^mvW_sn0t_pPYAU zDrOTl&S`||aSt`@@XeExCcjM3LcKa#sL_uZXS&{Avp>lqrpWo081hTxih}cUWBO3# zoLq~6Ld8I#VxY_((j_TYpQCv>$a?2-zCEvA&kJSqmZ%z^F*lUC7dLoWLo6@l_)zM0 zS68?d!@Rm2e^xe>y6gO76D4Lp;vztXs7fEvE7Y|y@?WX@w*fTiEn z&-Yra@>cn{GGHB8=&1SiwTn$22Q<(hQHkA*bUcdP^SS(x>k3ugKndE0I>j{TpX>FP z*3PY{9Z8%HmwBzA8IGhk)K&PAagQ|Rt?;N$;q*@7j85Uvox)dl3Xka&zNS-nY^QK$ zr|>A#z_5L$f#I-eU^r|V7!I2ThQp?T;jn37IBXgi4x0vs!=`~&IK7j7>7DFL?_^(k zC;QSn*_YnQzVuG^rFXI~y_0?Eo$Sj9+gDarSzYUO+x+WY+)!b40I81-+w9k2oBcX$ zvtNg8_Uo|CejT>iufsO`b=YRV4%_V4VY357>@y6Egbf2DVZ*>k*f1~>HVlk}4Fe-# z!@x+`FfbA}3=D_O4iE`O= zXF^~y*8Kc3jwo*7S?a5-zTJo~^(_j^RQN0Ey@W=RByv`Glo@8RthUyW*6Ci489>&v zxSsNid}U>k_<8dpJg4Pl71dntBfr3=$6psSlbvciwD@#!``!{m*^40*p^543nG#Nwp@_-fXE zE6R-e8oW_ogC|2+h74hzBUYGaZ7b+u{J(4!m~CR*3TMcgtwK; znq?&3RxWFpk$79VtZ7E#ZRN7Y8IEVIGs5d~>F;IIZ6#jw(!ng=Yfm6nxqUS{cm59@&s>wypJfe-6} zx9c(6LF+WzLGw}p?JMM^0v0b72=f#i<|#PLQ*fB4;4n|Y@a0@vTlQ<-v|sb4{hBxJ z*Su-J=1u!GZ`!YU(|-6moBle>KRQ05zs~ZHj*sZCv;3puBl_#)c8AzsR$XpXAj}(n zFyakA7`)*JgE#zO@P;1@o&v&pC;*;Id!v8|ug8HIug8JO>v3T6dK{R%9tS3`$AQV~ zaUkUy2b4&CjDw01*0Pp>WrHO^xSE5$TdwWkxmw{cQQ-~q+y_zBWo2{cHMr$ON1E0O z%b8|4+)yWHnxk}@=(eYsVQGGy?r~RyWeq;v7N4PcPEZ;u>Ww0#d?Rd_&@pVtdn&3c z>ditWy|iJZKGpp{?7azmROR*me-k1k7$Jcngsl!cNJO%LsA!V_0*c6vD45JlG9-{? zGBe3UlokTj+KQD*T~JG~QnfA>wbg315k*_YR_d(Z|lmsCFHEUl7;gYmzc8F!zcB39 zuZi22@mU)%d9k0Em->l$sqf~E{lt87ePchdJh6UFK)OKMwWgxJmhX*?YR$LsZ*&75Lygv@!2vyWlLVzJmbXe2BQj|5puL}6|O}>MVC<3C3g6-G@+hj*740yxVXqY zJ=JJ@BCkFPA9GrcPd3%LW_*%zBVcMv-Y8Umv_Ad_jr*f@R47tdt>=GeQ5YF$u}uqO zCSPRJVw+B|%O{vT;la=_VF(ogrQ}z)_^euPI-`p3Ir$#dP6tf7S@;24ARq!FMs&pxb*>)L zjyzwY#Ql+U0za+?eq0afxE@K{@>RatpsdM4Wj(HEz1JDd-p%*SRZ$+OV@S&pg0iqr z=CTZkMxF(9(#ZLmqdc?bi#A8>@=(28kC__=CN0x#0?*=AIn^vI#dCqWc+z6aFtQ<+ z$E~Y)#urb?J-yYnRVqziRaJy|jz~Q)X|c(^QB_fu!1pDj>+78iF(D^;Q_0O2sS-CY z>w?Y+nhwxI{%HhT1)#plV=k?i=gtXHL;TGzJvp*AY_BuA}EB@_J^1 z>UjyOXCE*GGu# zwA{H;zRU(WdJ13CaNRWM=JNtCB*{iEl&J@*VsZk%yniP$%Ni=?Z zJca5bvD$`!4j5wABD*d0`**Qf-57UCeU!Z8w3JurX~mp1BP(hPov|uqz66Hk$xd*} z8zatAS;*Q*T3uv3RaXbX#YH;eWo_k~9y`tRwVHC)S#_Jb)m)jI0%C2kD?73Kxf;?! z%1E23XVS)oNt-3cq|NeT(q?HfX*oaf?3n72^OKX7^OKX7^OKX-^OKGVQ%BEFiFxAy z4Q`jm*Eu^MU+3(6e4Ud#*Ya^&frPktbSHuD#uW&=aRu~TX#5O>-M9i_H?Ba~jVlm# zknh-e08SagmP4 zMLHfA>3CeElj4$mVqB6>j7#!~aY;TgF3BgwmC#=?Vs`q7v>O*=H-YcQmC#?vyKyD- z7koFag#Hp+iTx${q`1f@#YH|TF7ionk&nm4T&?jtla+DIJdR&%G8RJIo^;}Iu4R+* zoS!t$9X0OgYLf4^Sush<1a7H!GkTxXh*(ZqWSq1tvc80iT_-IvPFiFFA)m8cSk85s zSy_tZEa#ZEB30EUGAac^_F}8`HLYtpC%CwLjZv%Xj;%*!qEUZ*y>!wdRl)6@`|m{@QSp@HOqzNBc~g=aA$4V6DmoL;9)&->b^*j!xS_ z*-Xs{nqYCtj>qLA?F?ckErB^{2~5&X8#-x6M))FAZ8wjh#IE^b{NSm3yAgx-?r3Nc z&t*%K7F#tThr>0DEyFoo3iS*?n%3Z}iKyY&c{9Y32=S;zB%YJJJ?6Dczo*cOq54+) z%RomDdHEK@HWaFFj#Sg~Zk`>|u}iMYzIFC*Mpg07B~gS^BA+i zX&xEb66ZUsLEw}Bntc5nx{6Z{1H6x;>w20sJ$ zfP2Bu!F}L<@C(oZ9soPRgWw^s3p@C9jpcGKpR*OHh^z{^T7o`-Yb`PG*1RIfV?9+3!DN@1@hkKX1Dp>o0N(@` zf^PwNzw{!o5nK$u11#@!>%lAZ!5_h^;5G0k@H%({{2BZO{1v(2gn3jpeHySWP>9>FVGw0fLzcA^aUQ! z59ERVARi0>1Hm9L7z_bl0Y?Ha7z&1g;gXj&;X_i_Fj*J+gF!$(-9Qe=2VVh$!QntY zM}Q0<>qst;&j64Ijs(5I5YQ831DWsg=?8j&EN~d`fK1RA^a1iI)&GrOqe+WjqHhck z|HLoRDZZTm#saA?eog}7S1Ax5#lHA=5|{!chNFPQBesiy_+0>w1;+sK%L~fDC?Nhy zToU{7K>VBtz6vCc6M?ju#9IW$f$>1vVI+{4j{_w@VjTzy!O>s@m<*&HCxGEV+G-dW z3glA*YQajd3e+5;+Q_CVDedv-jcUt3hcd4m6sl=`u4VY5> z-_|h=2gMg&S+i+{yv}Y-qX3Be*1UOwidb#5ra~V3pHNZTP+=;maAH~EA(r?Pn}^byTxJK9Q1@T4dvINPLHgkk4RTO({o>??JE*+##!F~{gB(cqKe^axz=Zk-Ihb5Oxww=12L+H^|5N=>HgHh^w09hIM5IVwMA?WlZD`>6cQ-b0=DD*G?R~zi}n|%TXEK1w}R{kIG0(#4F?z z;W^4{Eu)VW^!GCQdKvw^jDB85KQE)7ryP|rRKbQmWngCMlz};GrwsJ8PZ^lsF=gQ3 zeXIIgYk{?jc)`$B{WIG^>8k$OUQoHJe@+KjyQ;sZ6tu7EpI-?&R`r*zmmQO3$OeK;?P#8CZKB{RZ04qwhe+d9;b(&U|YvuWz&p3L^mo^0=CPfo{XkEe9AC%eg{9TW)>+y3vKCZ{Vhw<%U{CXIl z9>$-C@#SIsco-iZrv6^^??wM!^zTLgUi9xp|6cU(rF=K_cSFx*4gfDG1(m}XGe?0> zXt%q@W~3c{R7TpHq_>g&3z$68pc_0v1fF0YX+gnUuA|sF9tFi=uAP|w7eOcWw}yhs zN&PdSnchkLv!U6glltdCb13&fJ@E6P`Q!&f2Up5CK8f+=(4CA2NefL%GhTMm@7H1P z9m)i6&<1Z1qsV*kO~E+uFa|vI`$*bPK^u;w4M);`QREcJMUjglcQfs%pe=8vEpMhB zM(5=RKX;AFCwvHl2u6m>^YcNBI1fbUnr6G(a`e%V+DUAK~Py^=9)(~gyl zXRz;Fy8|EZz(<>Q+<~uPUmH3U=xOVl-O)zf zUC1k_vnxNlV;6F7j2dCRF)Cx|w&5eJZHzyMI{ahlf58yiu2ayKLukt(wB=jKbP8nN zLgp=GUO^@i5B19~eW;&jZDwwEduFbuV+G}%0v#*Ru>u{RAk!(3`2?9yq+gNg6v#x8 zi6YZuY{t+|F(9+F$AIk09s@k>;k?Ysa9&RPoI;O(PGSDF8_?S+(7OS>8_?Suy`2KR zz0unny-pk4K-=GdjScAEO5dhnDEUMjx*Q+2;=@*aNYG>I$ICL?k@2bwW2HZHIR!(> zC*siM#MGad`V&)v9#cPFmf4PsS7ioxN{_;(VnAm5QSgs~cM9WQ@GfPaFy203u6@8< z`+&IyDnDSZ34alL_b2@_LALjkexA~D=2JQIseE)sDX1)GK8cQtkb4*X@1p-*^p9dL z9-#i)=zSZ#Z=)AfzKvet&qn?LdJmxY0D3dgn~B~`^n%Jv^oowNksF1MQRo;&-4EAg zWGD(Vul=wvd)S9@>M8%Q&{OXFW=4i$a^|(Z$=SnvaZ1@_Px)%rVK5Z*uxWC7z!=F7 zT|FSP$Layuz|-Ra)@#9I_$?U3nw^A!neBrHW_t(4si$<%Ku@V7{}^#TMw~9CjF_Kf z{wvsno@5VtlKp2MeIocJ_5^#dC%6z<1+o_+d!g*9j9&%o%Xy5k^B7;l*!KkkhuP?p zSvs&!cICi6p7tM&$QY{dWS0KOlU?~E4|9_FpvYqm<}nBJJnbvtDd4Syw-Vl1&Ik(5 z2xB=PjO9#l6m}GxFOC|RU3nDei__pK;GG8VGD#rIqBeG|Sb@O=}$Z^HLy zn0pH5&NIxbXP8&F&<2V=neDgq$@bpT$5UE+Ciid8;Qq-1?lDX*$OKu9lP5qYjBT7e z20CU||h)&5f3K2+^5vh9;s`{dPs;=YY| zJMjAn=sxmVAAfU*(+k!5#66dMC3&q+{*vXipO5;`j%w;d_tjD#%AKAV@x`v6YsH8! z_DHD}Bfi*cP~waAm~O?0FE;u#D@J^=8RUsCwql+YBfi)Lr&$Z|Z$bMMYXN>Oz^?`P zwSc=W3-D_Jel5VS1*Ioi3-D_Jel5VS1#8LU*MjzHYXN>O=%}SWbYBzop?tHl2EW!^ zAGOxt*P2Jpu-4$$n%AKCwWh~8)*Ad;GkT4+2EW$KAdg>bRy12{@N3NlYph+H_)cKk zLTlF^=*!S|plM63UEJv0H5AGX*Ii|ctX=bufKr`|ei*AD0d-=IG9tBNs&-xx^)_USuKYF^go_N;JAWuB&SDa?8C!X~eOvmnW?ABU)@oVn` zP1uLNyb1fzw4gP4*ygmcBQ~d9INIO@=DeT*eg*t)V+_*arQ&|pjQh{e7@E1L!vxok%2*{>bd-VkdZ^DuV=}T|?4NGEI6B?h z$zCJa-OpOPn?2hgr(<_Nfv40UzjAj!!Qg#c@y8$s>amf(>M5=*pJLB@iv8{>_PeLJ zwtR{`?kaKGm( z-0vB{{hk5b@A(S%d+uTHRp8q__;&pmuee~nHZ)_XBA0aT;C-{b8AE61@cm;>&f3{I z`5m)!uus`Dx$N1wIcuNE^|U{eo8R#avLl)Mf=7r$FbP|Wu)he~lki~@_7`D04_*%W zJoq{A_>0W`Jox?z`E3uDWMBJWiD##OLUy@-g2(@0N&fyf8DnoU#@=LXy~)^mlQH!s zW9n+!;qZRh?T7dCbX=X2U3ztnr}A*xV<7h`26A0Cuy0PsK(6Zs_RX&xNIyI{DL?(e zNmrLYI4QI3!AaPeHu$gVDZGB0!OlX=vg|SZv;m&6}|~7MnlG&9Cg>EZo6a_$Rs9-k;=pN;?MRS5D^KBlsiZOu-!b zBiO~P zGR{hwC#9TiOPM33%#l*&Mk!-&1bsN2ew|LAj-YQx(67_!(>u6db_Zvx5bT zb2?^meK4zke&sC2|E2T`SPR-g2iW)F0Bh}sT#tM>AgAL)`rt#_{=)%-_x*D|-`An1 zH+p)br#E_fqo+4|s?h`1f_BgW_7yNE3%LF$$jj*{;QFJ0`=15O(eoIaFEBO@JUd@t zjGo6BHSqY)W30ZwSbZP4+a_jTYv9>=+eFXK_mMO3_}@qFwu$-s*J9g-7g{O!EBChs zftTom&GdnRr(-jH@e*Urz*G4WV{S8JZX9!W9P>B{oTbNcmLA9bt8tvC$8rB^9P|53 z`uj}AToUO2GZ{-~G6v3M44larIFm83hrUSyaqm%hcJ3kWF!308c+k(|zn-y{1meG5 z;n{gT@xO_@fye(Q@|nb#1n%i&Dsnn9xv!VWeLaKx%1rL>ZD1@UfwtM8@N{gTZFbW( z2A;~@w9WIZ$ImkdlED0Vp84}UYxDEWo#$DbpC{H_d`SX+UkvgqbMa+1 zb6__9B!M|Gn>jI?IWe0#F`GFtn>n$7HQqr9^J4+?V*&G{g!xgz{8+&JxC9voA!IH= z<`QH=$b^u&1epV@A4%Xo+ySnm4sacHAU~($0M}9n^7AVX(9b8~TN3d1B*8)a%=VM; z`y~7}$muu<|4+hygZ#>qh~p&U5KP5S2gC4lDt=DI&tdpE3_qvh=LN_(2qJRv$p5R*V3D$@wSR_v0lJ9FJPP(FwP6;`|XVL?W{rDS%bE- z25n~z+RhrZoi%7XYtS;zPYgk__)?&VhZnKIqueFM@hgn5AJFOy5xmA?!w~7Yu|2gx#g>l@%82&k9 z_~(q>7UuOz?5@P_O6;!0?n>;g#O_M$uEg$5*u4q6H(~cC?B0ako3MKmc5lLNFYNZh zZZGWi!fr3@_QGy2?DoR$McDn=limKY$J22Ub}z#2$DaIs+u6fzWDmQM^>aIW*mm}? z8(Bw-xn3*gdb;>FDA&`)Pe8ezE`A$oooE$v-B#>{a@|(U^>i`UZN-)3xo#`wdfL=4 zwd+r_>yy{|)9w0DtzTx>C$II%UxJRO>IY>yy{|j*e};i|0ab+m=&Y34LN)ZgB|u_O?Fln|PjS z+Z3yP5A(M>v-NW_h9`x?v zdUOwZ_iWnc*@NCaJGS-PgWf%Rw&m?X@1A$I_1}-){alakNAG^FNB5(5f7*%Ge)R4i z3PtaJu1EKycfXH3diQfZx*xs!H*L$^kKX+|w)NeQ-u-*F>J9w3ANRaU=e#FrU zZyMKdAY&SSJNT@1{}s8V_<9O9|BQcv734pL3eJGL4|LD{Hovu}gT&$Yhd{GGny!TWo25AS}~wk=~a2K>om4NS)1 z5C7zGF@yWAg6_v#gOV}$qwdGMI2IX!_${Ba_`Xy=19P|z$l*F5C)+zPhiicxt_5|h%W+i91M>+32%2{V6XP1?nSsvwV_b%5V z?{XdTF4rONavkz6*CFq69daG(X&=_{KCI){v7TPXdfJC|+{?b>W#93#?|9jFyzDz( z_MOML4tb30kjJyQ_@4tX&zyZyyHPsfW~hfL$F2-bpj z&;j<{iOmVvoPfIpMFXLZR@A~{j{&2_Vv@ge%f~-XN85d zWfC|yEacpj-Bb)G05qdj=kyFGsv%;j@{|l6`YD~2jj4PDz;C>_Bd>h!}h7zewMNFEbCAb z7(36h<~+;zd6x0>EaT@{#?RTD`_^*qi-U9DTF!lE(+6jB?pw>bZxds86XUgn_AH_O zN@%+h+OCAQE1~TI#1$Z}0C5C}BY^(_{14!F0H3d8t-X%%bsgjDI?h?wF~+WAj9teV zyN)q-7y9o)e-F-EJveXm;Jnp?^HvYeTRk{$_29hKgY(uy^!pn6eGUEn5dHlS{k(>L z4q-op{Sfv;*biYpg#8fqL)hPe{T&b=<}k_i@uHM+lao6=-Y_Cjp*Bm zzK!V9h5k$UPd1xGWRcCE$sotiyl^^U30YT9MH*9<-I`edRM0j0LIr zG3YzpL4mSrGm(IcPEsMJmU zWTDH^nM-*t`P5HWe346|K@WvgUH_fD_(ykYyh5d|mC^u8>2Ia{r<5_FlvtGh52!DF z)zui0_zqPrG5jm|H2tX8&#C;M2=VEV_!s(K?8xWy(xIlBj_*&KBO;nM%OHzqtg;R_ zm7SR4{UMFp$)_{9u6T|vM~$pMBJ1!qpW0rOiwwh6;mF4G=#aEyGtRfyx_JI`$~pBu zjbi$cA7|blQs_U0Uq}l$W5A)QN*%sKQ_BGx}3Qtd8bTxPWsd2{#|-=d1ED)7k6@*uX1jQ=kZO`pHl9ujgJ4H zg*xk<6O%);E}}g)x@{8AOFK!~r<8YQJ8mnbe>#soD^m3Cp8{x*TwPvyZ`M7#NCqE(HH2UY?1?~Gz`d@r^*5x>z1rO+T zt?bRR4|5Hf+XS4wnZvQ>xrY3#a$?K*NPDG1%7o{9IxBa|K8=e10=;&ZeM$Cjx$m(F zID0xVXr35SZ8`Z*vy)gp)XoT1yXZaCHCSR@Njvu%9KMqmDxb8Jv64lHb#}kUNdJ5O zcg9GW_+vxTPQELj;$uX{k&Gc3KZ#=}g&*%X=`-ms=_};B;(rm*cBQ*3z7v0(+G8%B zx7#PxH>dnid`BjcO1sHN+fUjsCGV7V<*zfQ9m=%Ocl#4(6Z4K-A}>*}ohp7HSK4MdC$oP`6WzQeSudb-$YghiI;oYIo zm0pM6Ro#EL-LBfBEB+VK3I9>@f4&8rGlN6Z`Y=C0{JO%qzPu6MufXdzBo98<^<<(v zEt`71C_Z$_OE@FPsnj{Fp4Uk2wRI}X_=fc(B< z8gjn_>yaCS+~XjOTz}+#4t&V*GXwe?nfb{40#qR*ZFDPGfXw^g8ssiRPJUbTx6qrQ zycS@+2eu&pRb*cQ7a}_z*(bp;gq5$UO~K zBX=Zn4}n_b{K!p5#*54&pbi;X!+#3SLPmZJay$4w$#uv*2i*Z(jO<6C9r@|Vz5%|2 zd-wyo~_zrR<$UP6fhTL%EehKQ43nDiQnGwi5213a6L*^b(flLN6H-c-C z`v!6^LhpnwMfRWI2guJv_OIZ($RCUB@4$HE~w@qLT)HB{{TNgZYDB+1>Z&HSY&<&)*-hVx#h_G z6#6W3=OHr`nTNqDWO9)Ci5TVcGPnoX3gj}7yAjMm?!Ukl$exJYBxK}wVc!K;Ayq$lL+WK;~m`EpjuE`3u;D z+_#XcM{XDN56FEJnWK^UEr=mA7?}sa5tP3LeuZo!avtP<2F^yV8*(>*(~vtEnX$-Z zAafI#i_Cjq3vypY<`r-ua&5>}BX=M4a_EW3^7@bUD)=_?6OexztVaGwaHtfkGg^Ejt6u2g^YXhyeNY244YVf&A{P{PwK0%OX$-Rsotr$I~$A(O@!| z3S{go1yw+P>s1~R?hn4~w)uZO{x7@z|NplCW%u`g?0>oI&HwA|&ys6-8|<=o4gp1A92gId1S3H|a2$}m zQuaG(YuRIEUmO8sZ{#aAYXTS!I6mlg*HEZ@8aT|2pX|5Bm(*C}iz3$e!YVMyFJmzkmb89pva?nIRT&ZXMYEhgt?Q&UM#GoCkjpi(%IKfbwbS}1c%8H>Z~nxt;ggmEt!w_&Zaf|5 zn?K=e>nT<}(K>%^R>B`XHhJ@Rj5VF`2afGBQ;01)AOG9MPPxM{_3b~T?8@ukLXMZ& ze>*v@Oa4%DTsHoPiIo|zqknlhZeRbVa=dwH}A6eG4{fCzAv?_4;k}>kq4zABK*W!F8RJ?9nCLX&hR~(nqzuO$I17H4} zfjt$h;UkCF)iFaP@5Ro@Kx=elfOTQ`$Y!#XkDnYAe#HC@sPQA4rPvzJ|J0pOI7zEh z|16!WGHaN-I(e9@hL+UW)aFmN^OOC`FYpeZFrGzWd|{K3E41q|_oWPTMkh(tDO7YS zMW;~Fsnk?aAB5)~|mW!q#0Vm%+BY%~u;XUwj`v*;>xx6{v6U zjz}-?j`*Zon$(HsIqbRV5ueEQoUf`X;vJEOuvF-lT#h_{j?FGs*+fw*8i_h~8!Y$sm+iCk%-yP+ycx4$-!o4-*FTr%`sdbl{d479|BPHXxd98s7v;GPI62O98_?ys4r<$L zUDrWvd*!(fx^k|Au3gu`c)z*rSE6>sW!Jr;c%QrOxpJ<1u3Un9@wy4_#cjCmxpiIl zTpO-`?l^G$bNj>f&y{oibLCwBT%PNnTi5l^m2>@b<@&HbGrR3M!rp0*x4uby4zD)f zlf-w&PQN5R`*B@WQOfv{J~OnFRb z^Qt(xZ(N@1lFmt$bKP-yt~+jB*A-XJb;XrS?i)raF^WIQeM6WwKe=yQ-N}8!`LnL7 zBso^yBb4WMfy;Aa)p4V8ZmcfPjn%E|#_Gzsu^PFQaU%1iM0xHwp|bY7;k!;I^Ib<7 zmFKw6HH#$GGT~hi+#+CAu`^IgH#9QW^OMaIUL=;nt*Z+Am35Xm+?u*B&(5ccpv#=ks3szkuSpe30U zk3;P>Cj~0?lkJgDuvgz)r{?TS4mrQBvKdx=()qcYRo}wdSokUw&*AZF8)O^9$!GHTnL5R;%B5U)#CZwV9WKwk?uhde zt~=s(QsM}OgIsESl9ne21ids{ZYA?H-<%*MU1x=>qgKNT=h~^8r0q7~8Y_|7*29Bzo|f-{vJPTQ<*KvRLzMp1WMJ9@sp0sd9Pl zQsvflmnm1yU8Y<)_e@wg$+6-3$GUZpf3BSCpDXA3=ki?t+`6uRuAJ+iE9d&hzGwU5 z`o|u2kbkb6>z^y<`seao|J=H+f3BSCpDXA3=eD!!pBsnUzpk9?pDXA3=ki?t+`6uR zuAJ+iE0??vu}j+RmAntJuh{(LeaP*b*Fxv{!($+2>pv27>E%GuB6C&%jQPL7qcUnHvM5jR#{E0pK~f0pl-Y=u^HC;PLWDdgNEr=^J89kA~aIy~;{39q@6eOzv)*KlLH zhBdE-wV;MMQp4D(;fuE#=3ULPfH7Ia7i%?z6F?y-0>z*NOazm_v0ySd4n4>fAySh?hW|1AkKum|`!Vdt zuph&I4Er(c$FLv6ehm9D?8mSl!+s3=G3>{%--_*4Y`0>&72B=YZpC&hwp+2?itScx zw_>{$+pXAc#da&UTe01W?N)5JV!IXFt=MkGb}P19vE7R8R&2LoyA|85*lxjQ3pQJ@ z*@Dd$Y_?#t1v@R+X~9klc3QC0f}IxZv|y(NJ1y90!A=WyTCmfCofho0V5bE;E!b(n zPK($f#uj31p^bh1h$x`{1X0Uh7q%P#0ldRyQfHenquSqlVVh+B9Nlon2mMm-DRx1%ZaB6$l54ivq2Nw!({4 z*J+&yH}*vaDozF>%v@+yIHWqOnnT>5Z?LN6AkC+`HHx=Nt)S1(x4S%-C&4QXsDnXY zV1)<=3Ec2aLj>)hl;Ua74h(AQLA73>UIT%zfk3q_gi3%x+xcLiCF;Yn(q==|9()N- z_;vJy(0W_0-j-{y^9@ekmT$1-C3>kBvddfP70I_EAau1|zFNwwYPElZ^(J5_Ees#p zQ18%?oo`Vp0ZF-34A$3bp9?keLPcJv$k#R)dDBP1`Z~V3mU;{hrH!_IR#we7Yu^T0 z3&oM3=~Ae15UPSu6%2-}1JbZcB`(QRF&M6Gh+;`8BTcEqBYbHOp*W`0k>|_$U^vng zj#k?=Vg)5Dei-+xnwEf7O9yZtSD@ob?wcY)Uhh}gX}K3??z5OOUs&R+tMY|;yg;eV zZ7!XbaF31b;vyek&}A^h9X9u{nn;V-qe$8 zep7Dh$u+*@)it4Zy;j@zR*lgfGIGtYa~Ud+ElOU;1XLb>;28?-TDgW-IqA}d#;6sN z{jP}TyoDksROIE_*rv^r2s1QgaWb@s>*5wpMsj`J!buAnsR_&2SE~7Dzsh5`hJuLC z?-M&pDHjS6T4nRA#IWQIzsgrfr$n$Dr;Jj~s~-3&UsWahlJKQpc-}+(jxfjMd+kV# znNyJ(4U@cvDHN#7O4VkKX{1P9E!X5?nEK2y85=@nk_c5zO$gk&aNk&2`f6dOtw?Z!(J@R2RKSQPBw2~T>^rgoW`xJ(}@ z6myi*LteyzrWJ+?eua=~858WklGp<1^! zDq1BkmLg%(L6IohCYj+IfiNMGUH9P?}cdw23vZV*|d9jc6g~ zVZEl2vp%TPIVDv40;>9=b*9guB0#xLI;CPSYSshJ`lQYL*K@X(nf*=A)tYa%bvD~J z%zTbS&3qPWY@7Lvt9k}dDzP-10l^tpLbsq+lmQg=H|V^M`ddk_wxZz%PAohpV1~5S zY|l5P@P$e_ROg#gJ8$@AzBS9RXTU3EU@OHBr7Caco8+ZUd9F(5TeCgin(g`4Y|o`; zdmceGaG^3^gsNS89yQzZNT}hPdDLvrBc)o;%%f&|9?5xK+E35z;-@`-n(c9}l)-9K zBt%B1fKnO9l9y(K>LS@}k6EQ6Z|`sPpiWM?8!Me5G;FBuSLAgdHrwM>sgxUWp&|}7 zPBz=)wi&ZBZk1{|m*^V<#6R&%bc$~$fU!X8i=Q-|`c(=QAH}}-C+`l)yA2Yz!Ytz&@r}@YGX<(N3N^2@LccR4-9SMQuZcsbo!^yQpxI zn3`MH9Klm@BF+cZ)#?V z?(BG4`6_b=T~4rF*>W0S<}3}9xDBP93yUU~OM647*>t8&m)Nx2jht2>a%YH%@6;2| zKi}uF;ja95NVkKw1fG=cSirli(|M1sf@i1aSm~FX%saaCt@OYGEB(i3Tj|4=Tj`TY zUj_D6Sm`&PY^84~v(m@Tu+o2n{6EjK($AP`rLR2IO8>`PE4@#pm43`LEB(aNNTcUY zpOyXz`Bl@AMaNUnN1#_i*OXhs3TIivu0I_tw}!1jZX5L~sh97!(x1du7+ZPRx({1_ z#+LZ_#&d+lN?{hP$G09nD?W!5mEr&HgN5&HZ>tK5Naj#d6NiOMw&m-mA;O+rs3CO+WRH^{3m`6qTR;QZvVv3 zcKqx~c^}HRQ2tHIUnc)Y;<$(MWt7uq*2ib218)X^mRcY1a@`01?v}LWf;nCd@7JVP z@RH%=le=5n7F*@Q98P-k9Nt%4#Cx6SS-P0_Lj%0`2k-argf1a}0cDkx32h|b4*ylk zD@bpIw}P@C!uu<{OUXBopU8OlV5T*!k@3*TczB%th|(X$#Ic{TIAw{!4;dGy(Z`jv z#alL{Y&86zfv@>_Cm1xLdphM4(OrWL$-hAU8|0<@9vijr{thmMFT8=}UjIuoz5a!# zgXLcTE#=;}AI|c&J$$;i?QhGyZA&OWe3sY0j`DSs@0IdXylqqGdfPk|-nPrn;=OEi zZ9tb`4!U0k7umc!@UcTcncy7!3R8X(X|Lrosm|23ZKiQCak5F&>H1=EC|C{Vq>+p9e zb{>a+eQ9^?`#Ja^NW-Qw68<|IA0)eq{yLKR`#o%b58K-rXD>1C7BbFmW*yjyjb7Mj z!$uSv3(&OPZ*>P&Ygq>9Zheb9uvQP}{Z)Ua_0D1|GjEZVx%EsdvjlqhSytvT;10&- z6{fr=3d)^N>&55?b7wIo&mi{GTnwds1tS<+f1;hvrk&1ajk}+G zf5zGn##)%MHnEg(1NFq*Pski84p?a2NUUFoM;!=6RgIp|tN z`5MZ5Q+Fh7I)}NwjJaOO8u=u;+G#rnN3*8>n)2(QCxG9Pz8yM~_5F4F;up|tVk~3L zpShH{iT~SYSm`@iU!SLsrjR~>tv4vYp7Ido--9=rI%ko84Y{S{`^Z{CehK{5(EiA^ z!Y_yaJo%SrF*c!7DQ|>!qr9H-@s$6X{BI~94t;_?cRt^pY8Bjbs#Q=n&nl>|v9nko|a%G{V{+(J${+NBF3nb7g|l93*IFyxQmxz9h{Bq(d=`A zj~NpVCNUla8ORDs>Ek6c4K|?To*4!uq~Br;2(}`#1HND!{HLITlSwyYUyy;!tH=s2 zBmENPg1N|KarO}Gfd3V23g(l(2D^gWkvSfoAPE0M>ItggkEEVp7yL)CBN#-QxnnUm zdI4)tpu07Nx&B-B%73tqKMme_s?NHRb#wgFbyg|s!V}2fWn(MjNWmUC6}&;;2&OXb zj{$3lmLS)d)cX<#j5^ajSw2*&7K#^@5p-MNg>;~ArWV~k3@WvokrtWvA| z1m@K<&|3Wa1AgU@UQFNah3>}R_uw_-=Z*Bq39K7Ku_5`{*m#P0vYUBwB>LaQ-eu^2 zkF|FVYws%7-e1o4wp}yN+xAtTx9vD|T?B4peg~lZNz3$8*(Yu%eGK?JvCo8V0rw&E zb?8&*szKK&=(-+V-$d6}(6v$4QR?1@%njgAORVw-@b@u%`wnyGFSOrG*18$&lRMx; zbGB2qxXdcwO8yek3n=>;>6=OSm-U}GU!vY$!S6u_?fo)sCHOOG!8F==61HBZ?!DA| z2yCQ{f5RGeH1U`Ft@O4^tNfm`t@1z3waSN7Smh^D=LjHmo%tp7A;zJD^BFG!`FxFe z{q+QlV6Fq1}3@1<{lo`8>8D+EQv`Yh}9aAc0azWBbM z_7J*}Iq*E+4R2&&&kQQyHh#-rf;8Q!*2mwDTY@gWnxyEBeY#kV8Lzl%S=AdbWF@iFQ; z$e^BJ8FhzJ_s7WGmVhIOPw)}r@K)x(gPr&w_y_(JoXz=(dH)h|oeTX?C~dbBABt$( zbFle0d}t;=gtq-TZCgV5<>W7-{6osa)QcqGuZ#h~JLp*DS>2o@)v865^B`x>|c)iTB z?q@ueFuqF|-@jbcVV!$2zt6Iy!>TRydIUc%>#(kz(P4dvo<++#^m^bQXO=2$pWk5# zYOpKxZR|QIT-;&Jp^iY(f*#m(@DK8dpXKzAAcjo`CokG(IjHn^Sc2u~5WHd|6aUUd zmxK0`Ij`ftgDc6Oa#n|R4nBSZ-2%}gJg2Q3-;ZE_?8g2GwY+8fKnLl4RxbHUe?G7> zfcjDC_vV$ere8GE+jip_-nLBkg=J@Z+kQv?f^?zhP4l+3pXP0=nD1@t@AtM{!1!}O z-E^t{2G^W0PIp2Y2 zNtZ(%9AK|;!SCCUzi)9O`0W+)Ct}ya$067niLG1k@hW_Lm-FiToP8h1$3}cS7azUI zK8$bgu@-hPKRv8#Cs1!H^&X?%gVgJTk9qj`XMEm>kCy#eNxgpTjjs^b`^;Z{gggD4 z)M=s4Z_x21>%k4wIfXhG)3%FQtGBZTIk=tr4(`EU!MpgdgSHk-#uv#uaQIG{lz$r= z4t|0k4&I|oa11sc_8Xi@x#S%@dmotVI zGlqIGhL+QQ4ydd3_ffBedbz}-pdXiTW)i$adRA$gr6B(?eW-ZKq+frgr}Zd(sIb#R zSW_+qZ-e3BO7M5^IA@!`<5vr9H3aNpd|t_T4M1PwzD#CkyY;C_e;FiVB&a)wfh^yd?PW}5VMz=hC}bk46w%8+*ufXga(fk-ibWhkS(eMCdQ6 z^M>e>x||*Eg}$)NDn9^jzL?)i0%ve1IbbdOTLFGxXR`M)cD7;|FCl6}@ ~8 zkUr%4wC93-)}ZP8tV-JDkDN5d-&G9$UtJ2FAi2%!gd&+-r=N-!k5oGB48>Smk}DTjl3Mf5vq{ z56bsa-bi^2`VjN%y7^Z5F4|!>G7FL2!ga$)e_ClkR}~IOz_!pOfxj1MTuh{7day$@|K!f_}5Cf^VM=mRki^Pqnh2KGn)T zb)J>|gGwuV3D;ixXt$r>+cr8LTmbk-48eG0Gl!HG5`0d*n36#2oUK0@v57j;m$yKr9Zc-ryhNW*X3ZM5XrCpJbcD6|_Y3z~8D|+t`i(QK za)FdR#h4KM-o~R)!7DbdL+*HV^`XAdiDy~m0^wgs{Xe5mAZhXAc-G5bv0na`_0qxN z#3&FQa^FEPfHB&?)Jh)%eWJ|U_C3xkgE(_M%C+j-*uIkMm=Ubk-exSuaJphV49 zT3S-pwfVm)b#M#^bN-cp{@Wd3_hV9T{wt3B%XPnChfrsK{$lC+;0i3Tz3K)yn|x0&4m=G{esfoTul#8s0`Eab zk_>@6z}sL9h=8Ah{{mw3XlN_A7rYPTw~mhoXMytoB5G4?iygt6+Rspg36CidM0I~fd5c`Ede24(?<31q1)PMpI1NVRfAfK}1z?tA& z@LBsC(SJ*O%UJ)i+eqeG>RkD<+ogo|{yIqY>sWYgU?-RYE&z`JSqHuf{T6r>d;+}i z%77oN2h!Q)kd5GR$-)~BrUCga?eoD_@JleA@=L%>a4C2K90q?Bm;q|Qh2UE77|5WU zuLAV9|DT0s!;|08o&y@eCE#aZ0A*F+$KWr(18*W&3}WC4@BkP}Ssi$fbZ6_@mpx{- z8HPu{CqG=M)a4c0a)rSJo-J1xb$La$Tv38tku6u0AXjY56(`6Q+j7OO zT)5h|N_-N^OTFS>RV{NLtmMsRaaj6(JYUL5CQ{}1OF#0cd$V~5xEt?SnCIbn*CHUl zrAebZuLoPv71fDpyG}aK#p8;+4~$OIF2BK*kNJ3ElMkaS(AdmNm3*prejbF2NC!i; zRdQ~rkNB+m+EAppNZFc}r(uORsl@Oi)wK;_9vO~AtE$D@NX#c;AQK2TilC-Lk-|y! zja7{BU}#c_yf50&*x+l31Vi<{XpPA84wzFU>Dq<_1>_ri0p?v})OKf*lg5onydPi; znL^_qK2H+8!c!qTE%B+GLo3V}RUT=HO420>d`Ua}iFSHLsJKYtSYgMrx;os5O3OC^ zuweSo<13~Q$n(07#p^y+RPwZ6bRxjJC>xASu;_7|JCIw2+zMJR?9=`T1R5J;`IdeO z_`>0H>~wXIF$Ax%IU20B4PO9L21-GmSt%>El#E_hq=17#sL(PX8={WR6oB`qs zOt5?b-VY8Gfnwh4od60!5s-WdmaZ7%O)s%-q)iSDl^2UPL6)JX{YXu@|`C6*O zS|(J=TB;?hRCN0>W@w#=lb8JkN>0{g&*yQbU)?&@q@kXE2H~TDmwCvBu@9HKI(f!&-D6)S` zfqzA{DPK|DZ0F5BzM^_HTey@%kypx|t~8?5kyqO*qISKgEg!Y@N6mSFt({jqS%;xw z7b?yw)qIrKS|uOVWz}D2pC_PdAFB4CTE8x8{AS%ZeA|AV*hi02{8ozJO7R=&$g7_E zhSl0;P~5Qm^_mqbXBvUA!)B#&_WN1zrMjMb)IWMm^)~RLwDPInXs20WR#cm{*Uvlr zs>gnppM2ELo4&*gJ#mN*Ic-1#zCeu}m;%+cYBNyXBvr^aS%ILL^HABsJ}>HQQN1e?NbZCWl$edpR1TpQb(vTlu}2iD68_B-$&rhNS$X@K5ZWOOj;>-28g7^ zX+yQ|(8vga4^>_E4N@DvM+lTfZr9H1X6&Z8_5@RpgDFE`7*1*Qj3m9dLPRSe#W#flw`PjE2OKDxty; z(O@tP<+tubzNXd++c%R|p0K1$>xO)yO7e2#5eoE34dw0NczsPP&(t^TJ~FzH8Y(um zxrEyK@F(1;FXJj@04k+dsv>?p`&310?0w$66J8amtCMjmR64K9em_&F;fH-TU&lUt zTRv>dn|JK1a7}`%uJOz7U4$e)q4H}UhPEgbUF5?p6=9txcG~7e47FuMM9SHk45bEn zrygfT*;9SGCI@9Q@(J1#4yxBUP#t?gUhFjGjc5}&xdKzFg5*^Ys)9mQuDZ2K26?c$ zRbMuRHiu{lrDzHU`8@|Z*U)CDS?8g~POWKYE_PPnt_4*GQ-&P9P zG~^mZj6&5mRLAv7{wjqm>MMCEcO{xu$`3NsHggprX?WC=@;bkuS?fUg{Rp!TLW?OT zUkmDh9zCF~Dj+XY*VVKF>fm`@T@}BhFN&RCdq zvu2>A(Hpb%u!72~;WA)_hAWzsO5;ji8bv5Y6TB;>xdWFg9u%j18e;!wgZOnpPX|)CN7wlFJA{&Mdh? zb?^u^GA%ks<@I%q9jdWIwH&IkLp64@WLqrNh3c1CvdP;UF))6ZDCIVHsyBH)WwZJ#|HdiayK&Qx^8=z#uQpEPSII7ucF@8FL*%-j=cLN6h^@MwwnW zDwVj%t1hU<&F|DT>lGsUC2fX_rk!%>B+B#71-k^`0R*V4cT1W~$bJ5g$@hcy0^Pws$>%{m!k0AY5AwkPFc1s^gMoba zB;V5=3A|t^7zT!e5nv=31xAB0U@SNai~|MWXfPfe17txi1k(93nHPg4;0&-7oC(eX z%fNDQHmCrVzz3vFrLSb|(DMzH+f(}8m~@1kDP$p(i7D3>a@dhgP6mn$b{QA42Fm=B z4*My%3)~HU2JQj(f}ex?!2RGCpaVPrc7g}NLtqzp7|8cEzXZPm@?FhiU^jRi{2KfQ zJOTEA--0K>Q{ZWE8Mqu=0j>mFz*XRCa1HoA*b3UgHgGNY0r>ye`wqY=imm_Iy?6KS z?hS-1NG=_b-h>c}A`nUvI!Fm1Ad&)!Bm~pN77-N{6}u=_Y={NBK6`J^-h1y2yS``n z|IV42-Ms-U-}C!_-v{h+IAzY9X=loA_Rd@kxCC%1;4;AFfDM2v02={U07aS8vr)~{-@i&7I~-x)B_FzEC(D6I0SGg;4r}9fCj)3fE9ov0V@GV0geV7 z16Tz(7H}Nkc))7F34jv;Cjr&~P6nI;I2CXj;B>$sz+k`-z@C7;07C(L1BL;H14aO{ z0V4sU0HXn806BnMKptQ$U>smPU;UB1L$f3hyr4O1c0tqfR=y` zfOJ4RfCoqe*Z{hA1vCXv-CF?a0}h}$U>5*g+5ErES2xg4G5kxj}Q`AZJ~ z#ix9dPf@e4_yFMX7cYW0F{;Mx<8-~pfiB{pX#w6V0Qr3RVTo1 z0J^Z3$!LuiOrA20^hn0Lt1uyjqo<7;Hv_7GjJ=+TOwAp`QKO&^(` zBW3y1bD78+B6-3{Yc{7vG;Mhk66Pwsd>KuQCYvZo{&ZzerJX;0EDQ3dPm}@3JC*r) zH0n!$_W@L&?*P;FDloMJ=`0643ZQwDzR5Tl@Dv~&I+Fnuw+`VC0cb9s0-$ed9tY4_ z+Y90apP&1N1`p0f5T^ zvk-n6K)nLTmX0z&>rFQ0p|e5 zBm8W@CxE^Ps{~vN*bLYM;Y$Hm0_GxoBVYyK833&-=zEm40Q#nhzKO~QoCbIXFbX=S z0Nw%6_Xh_6HUK^XbVK+ez{P;+2wxBQ1~3?5^nJ@e06zhSAiNH6BcKT3HvoPB3`N-C z0Q$Cwz604C;Rge51(YEC7QoMdY=o@@+y^ibHWJVPxD!y0@H+ss?#cnscMVT423P}l0zhNM34qrDyCJ+7 za53OrKqrJR1kgG4lM#L%;0r(|!fF9G0=@+dKzJo!6JP76>a>zOY=z?hHe1y zj|^y%5nnY{!hWC$u7#Z7ThIg#P7nn& znqbZt39Ck7Z4Ky&GM|cc_X55cnBtt8<0K9Rly=1U3E7z4FrFX}eZWV-SAd@g{-5Cc zfS&_?5cpBxli;rae+>9j!IyyF1AH3zB=~ngpAM)5e;D|g2ww?)A^6$gZv=n*Ls4;( zZHa~14aU($O~lbf3BaxjS^*QZ1MCLq0_cI?vi4?p4ERt5HvF%hw1o8wHQUsr(4hYVJ?65bYf}kO0;The+I?xg?JRTK`z<&(76~IFrf_D+;F|_4I zz^j1U0M0#8F$DHo0_Oscfh~gNi2pU>b^<&Fdket74LSjc!WO}6uy-G9tOq;?xB+0m zPJh^G2AmCCg18c}KmG~WNB;ziVFv6;z$y^7UW6?Qdm29Q9DK2QlxS!&3G0|qnAgCc z1bz_sPrx4opVA{WjUPM0~tc^7yw@TNIJG$2pMp)3z$ z;gX$?wLRjE2W}7C4|Q=T;7Rbq!7l>*m?s?k*4;^tYlyU+hjr%ohRA`yx6EmXd^I0& zW;H~51CImF1D*w(5Bvo5XF#uGenX@a_#Ys@7I+x&Uckk`#lU-@O~y?@dVsw^H_yeo z5|jF9VEpc0lmLGUJO&HBvp{3-B{2$ZSq$B8z+Vl%7W_#FzYqK|;OBwQL|N|#UJbqW zm>f4lelGZG$Ug<{2;KFtI~Dj@;2Pi`foH?c)u5L{e+u*tLVOqTUjV-b{Cb4HjPTRI zmxA9H_TB)#0(v8$mjr(^_;tV*+8e*`cCc5BSdTar(D@a3E%+yZ&j63T2l+Q`MI)PZSMTfS(NB8jW{|s6P$e(O(I2;Ex(6W3C{W2)`xx z*BA+#&+u+)p@DF>Z_l&IY~*`Vs8&b^`c)!Jh$K1pF*)VV^N^Eb8zOqKAm4SqaKFfHh!9)_U4lx;2|j^7!P|(t5AsJa z3GoPCLO4Me*drJ}QP>30dX}i|4m`mTfCI-j*aYS9%Y$c#SQq$@J61U5GqObaj3Ep$=$(Odj(|;q4dBD@ z^>ouKw`@@ZScUN25l*lQHcvo5JLpkU5LAOdsYOKeK^o6zN5%8mwn&FwCVc)uj8Ut{ zV7&=n`UYc5cl5VQ!9NC=jPOSw#~x1&1Z^u9x7vSH5PZO+$4T3i?_7ZsLzXYUnI?7XqF={+)&Hy|E z|7r?64fuB8hXDTsYy{i`I1p{$6!AXAT>S#@WmtRHW9>rFa_2L#O!@8+sDlK1#wo%mV~jh)+-idKhpFb#Wp3Q!di@8U5+Au~G3h`rsqGp$`Io z51g?($_@HO&<@~W)K@pimLqR(j*ANHSLHaUt9huum(V}!VejdYn0ryj3D|uacHbR? zdO)0Zh*JRk3+m`D=$1kE6zE=o_WB(4^Z<14hwd8K!ai&bXCwR#gyXl|A{(~8L-@xC z&luYfA-H~)X%RHcU%}84eoSy4`s>TU-E)NdD#F(xd@}gnklzLVMDPQ^C*c2Qq7OHj zZCdw2)&~ARZ$d+a;3I^81YLqF5Jqq^WCVMoPh!tX(G>Kt02g+!|ABii^~Jdj5d!R2 z;`T%OLtvZWQN$-`1zmz%gg-@N0$@1iY=VI(-zlgYf-;nw;33on!M{NhT#fc8zJ)X+1iLYUJKo7_{R5mPJ0RMOV1+ud4!%x^s|C(yUqs7bv8fjI5$7w zI8RS^oCk6o=WN#$8EIxi1KRF`al=K!q^Q^o|II_&=S_@?O_0-GZokbF4nPdSnsyar zHvw9qOg{iGz{`umCT7F4m^U6m`yYw+{CXew6#C6Wn431DZ)`?=egl3t_~yHiodEnG z&tRK)w)i>;od2 zAb!VL4VfKhi6PyAy92ktJo&GEg!3`d&w)(?@vy(6m=68Lpw9%o3G^nE?KVIU=)8_J zyF;~i~21F3KNqsMfNuc55x#g2){JRbJKP5PInW(IFGe_Eg@&$kvxSDb zSy6E}e7Y@sdSB=fd;`CD9c3bLVV8hpd!U_PMqa<3isugG^HJd8m>aRbhVv5EIJ1F& z2FCv3&Znq51GoWt*G_2|b;Hz#QMZGB3vrGFe>C`cpxeOac<8N#Y#n4vfR_S4fHn9{ zm`{hI9cG}u=c5feK`#?95pfu!DWuo$0Xq3G7}1z17gW9D2>6R|mcZ{BrO=13!+sx(jtR26Z(b`ioFk zJrVZ>(9a|6VT4UW*dp*12pf*DH$aymoz+O^Db(9~q>FtU#Tm%gLdcJRd?Ci5gV2u# z!YBGFm<9X^e0LZ4`3LA`z<@jNB8OFXjcqyL&{zCep z_1}0Ixc)|Sf7O)hwr*`0^oI#y;7+|*oGSW1z@aSkZ(GeoB_{YUm83f^jQRY3V1RhK z0!d?PE1@zMzZa>okyL(L2f6^iKL#wM_`rqqBLj)?+nz4`lOo2XKs6*X2XvosAaudYK+wG!g@IRLiyPS_UfWzuHi85TnD^tQ+FVR& zs4)7iZ4ixq8>WMZPPNIrA1~Er@qvNOrT7hQ*e3BhpG%8VZ8EQHE-g;A$-D|Ht6r#$ zQcplVf&VR{Y2PRW6b7CHiS~`EFwQ8^iyPTwUfBelVpHEKl9yq{2d3ted1VuH zicQJ;@ltFe%+Kf2;#8Z=E1RHGY)W3*1f612@-hs+p{rKE&qf`|KE;@jUhLlSiToz?R z(yFY=hLp*CNLt!ZG~4DfDI1bjWmYz%OxlpNw4rFWQN};v@2xu~$_ayhUt>c$kit+v z4kVh4g~Pzhu*HpRGOuicrb_bTN#2i_ViRG$P5gE>6piW%ys`;8#irz?P0%SeB`?G9 z`^^-a%qyFqQ*27!kC$Q-VSYYQFX2u~^`r+VrhTK>w}HaIQ%7XlHyZF9hI&Y`DS6p{ zi4NOTydN)YlfpQou#b93u_<|JljyKb#j{PK!!{MqVW@``o069{i4NOTydN)Ylfra9 zQAcWc;yU6von&N-`XbW`lMO9HFGHUU;u1|^gmqk|l}yJa8O24p{B((?xP*0Frj<;` zB^kv<{{6T_Q(VG2F4Ib;a#bvDGN?OTO zT$Uj&7|4@TS|h-_ElN7vXn1kpfarwxA>5#qx;uMm$P(<|Nb zf`!za`2AWzWj`$g-9JQUhUftyIx9pE4AFx^^kAQ^FZR=1R9Rg}JDYI2pwUi}E~+Y2 zO67}+s!FRQ1A1W)x2m!-Q|p6P8N%=I7HOsOMGMOVeQbu}=K&|)R_M6Z1tq9km2Sa8 zwbcr(%aNYX)1o)bXEx#o_&gPYcEi#7k`LlbK8P=Q8Nbv|PsR^;89(4<{D2SQhtsF< zaQXos#1E%mR2bBIX^1W=4(ha6wFZ^1YFY6jzp21i>*$~thV+)z2JKK?pqiY1g9xz+ z9P&BPzR!VHP0TvJ&q40{9BAL?K>I!i+V{EY@*ponD@Q1=W?!Ts3HGJW5koZl92n44 zbfqJGPU=XXleF|XNwd!>efBv;!{>ZETnB+1z9{2LUzD`;y?(uQnB@C6;xilZ15)_1 zj;Zm>I;O@i>zEq9tYZ*g`c@EM@W1Y|R%y|o9@w>6H$@h;*=}8`U0d5tl|xzE4b1xG zstaX-g!oON)QV8Sm{%H7fbhbw0`Vyduu)bgcfDFxUCcyvk$zzi5!hl%J(LrzANq;b z57tDN1rcBme4xlYMiHuXWSDHDx?~eDj~_Os@QWLT<41`p;Ycea;IvZsnh=NdaAq~V z0YEm0t}NyWMAjAaJh}LMU8C^2;&6CfP|WJOP`%Uz#jUOj)k|HYdZ}wvFLj}Mf$ea{ z$$l!|s9umxYIvi1p|nB@)pe=$Li>?bV*3HYrC8%o{nX2!#fth`tUZdBDer@v_^J?J zk6+|g2fdEwzxqZpngi?W%PSX`1bWo|ENj$b5f?j{X+5ml=*5_Bb4>Qc(*|&8CHd@A zloE{alwcV@^bo-DQ~2d6FIy;nC>Z+|@uU53hAsVq`7$J~1e`zcIzwzfgr2S{P7taccAEeXQyv~E>IUjgF(7etM^Obnt zTd=f-X?TK6tEz(eGO);#46DSxT9n5!dKUzYJ**hx=hKXr)<|4cp?LManDu!NE69Oa zADH!lS)X{)2gdW5EWg_8X|cviUb9q&FRhWds)8}LBq-!~a;q!auZ&)2vpp&R@=dRf zc^@-Uq-QP7R}qhN6kkpe={ZjE%bCaX6YFPJ_}`yv)OL$s?fxreGhNKTti>^CsnwaH+)DX@z;-If0%r z>l9Bv;HT$Cu#8!MX?59hM8H-TWy=v#SXq2vErnOoFYpQDhk+U+K|EWsfSxgBI3vg_ zc)ysU!U8G+_OU9Ef1fBUpk#=rZBPlzzf%B{06f<(VX9B8{t77q_NL-;5vD(|0CT7cSIvRY2E- z4@ z8erKbz}Tq?dqFWS0WK?|N@83uaf#xw^$yF)IE7=&9Kt0f#c~IX5RDKOR#Bz5$B?>; zCk588!2UpT`-~J`h1Zp!BSR0kUSe!Q$$W*}OH0yJ9_UoaJ%yN7Rs&kbuM5c~P4e>M z%Eee&ah?>7okx_M_aO0UXn`&%UR;I8(uE;}#7`ER{Dd zmZr6TBVF=!Wv3!kZ<3bvK(wwmNozSL!1=FO%n3+3RBsibdShCZi)o!M%MGC=>J9Ge*BjHm{h+*%2X>HU-wtJ8)msG@OV$ifiR2m%uz>71zpni!jZj#kG|@Pcla9052`#u^xZ<=WzN0faE+VEuscgv}#NG zGJwWA!ZgMKW3pk42@ANI25X6F90QNRT4Ke^^r~sFCOHkL3ac@iWHi<>4rFyouMPvR z%vW_?ozknT&@yZkrW{7<5HB0p=anM_e4}vkH9Yl~E}|*{E!PNgRsxUNiRT|+Qdop( zmgZZ$nk}VyiZKL?Q6~~(pFd2MG^Z~rTvkQ<&@L*hR{Fp-%O#Ih0?FAfVI{94Ii^ca zhc832ndt4vPD${O1Dbs%K5mg1pD!o$o(@3vz-!(jd&$0 z?Gog(MCB7$>Zp9e-^Y-?Nk9z5QOP@5V(9%G4^i*rVpoR z6vn=6oHk(^bb+ZTj3tk~?JA*%rU9_e6DI!vCPe$(6R=|e7s@c%m%uAs*_W!S(a|`L z>}QbJ;;w>lbR+zr9Y&H~Jxg)nsz3|ZVq2vge;<~9JP#w4B|+6mLc zI;s#*%I`yIzUB9#8l%mLuf;F|%)UpM+6Ne;En^;lz{_{U*i(+>sDWw>CG>b!SX0M% z5k_3%t1y@|E>RehEAcf22%REk6<#2l|U~WP;YgW1}Azv>z9A z5Vtz8k4MXp4&{>4s;;VsCj!?n#{Rn1Iz8-_OJpt0krLNa>fr0SE(lYZ2~(MYxh@DR zIoCxw?NnQZ+9e%q-l&*bY-B3!VJBLuL-5ibFxx9#D(wNYJz$oXQ7fX=C=Oona$I26 z0cM@b#bruP`^u6Y4Pv#Js=7AjcEN?)p^+F~BHMVQwu(uP`((9{f& z`uKpg0v#2+j90~v)5Od8So+eeM@!&pY6rrl)Y?9+;;}r4Lv4lW3$T!s36mmWQUX>s zDph=W76;_Y2L8|lXQ8Dh`xr2f2ZX7g6Q=$R%;N!L#nX5|ywcjdk zWYnjXZ@{gxcqK{sgo{zUnq^B$(4vgVFNi0<04`JVG9@n~xm+e=kIQPzX!zX{VbnA2 zZ%GN(S7B|6O}&AOiWp-ssQ|%Yv^e%@S|Br1UsZ@dtH|}JqU&kxt7$5gq-#R*ntCm- z;$6h0PE|dnDrsmd8jEUB^;I-rEL1Y0%YC|DhL=)~pod!Z^+5Z2HP{;(;j{qfs6-bp zmNMTsWWI5twJ}Arv3jhGXvckt3l%O`n9`u|<%EkVC17@VNfW8lhjmULvltbU%UM0SPPs2l;{l_8sEL3YRMRrP-vuBinxv$ITylQ^hN!+a z)Nh#9df`6AGzZkzm+=d8iBX>VGM*)Y`6Uc6kC5_jymjjDx`b&0mCqjdyDtBs3K%c5 zOA$tsPaXcYgTIO3d;-d+d_x0_H!#E(;7@$~T^wlsCXO+E3r9TuUDwT}{2h?OWx)L1jl#s!cQd3%-^{Q+zLz14Z)Mn?`c6jL=4UmeslxD` zmP(TzPUsmH2pHdIDa>^PK2z~rH{iK$fVpmfxo&{DZsf4MU&AeEUzB#o!eN}MGI9r_kTRN_oA`m19l&IF?;UHaQB zm4m-aU@U)2Mc*cXg$~9P(t*FS4r6A)@WPZ&N{fD^6Q4)L%@ z2V7AaKx~Jx{4+S|l3ArM(`h>?>p$3AXC{QdH^5`Fc2^ZFc>fduqU8D4H1NCh#;J)FbxsJXDLiW z1o8YW1~3f~#1BzeeeVHhz_k!wQd?Yza5w}moO{8iO@U|o#1DdH@SILTMa2^8IN&)w zeD9}4B;&NvgNat@WBpzQ=fPDxu%9Xe90=F4WrbPwcwj>~=L!5E$-|Lwp*~bu>9ic? zRapn9vJOyX9iY-0pvpQxm34qBYnHN?rR-%Xds)g}ma>}4r?S!^$F+7JPtErWAr zBOgSM95s5(g0UfbBGIzW32Wv23H=3t?d4C%1f2yK!m?SD@Z0p0QwBP9EFmgrT216}u1fkhC1)Q`sEq5wMqPSED%F`BU`r0a-Dpco>D;GN=O@wM&W5T)QIzXPtjZsg7vSghwD@IS5JY(9FiCl6Dg9>a9%34rYT3xaL zv&e!WbbO_h`E?i$=kqNW;7FMuvs>8MQYV~2Y8XyaIT+JiR-{8|DrIo0jN$bA4?+fI z`hnZQpJZ9`!(|G!kxC=5?3Z`Tylz=1Tk3?%LvBIo%eLn-P;1KuRfSOL&!Sv1{X6M5 zvMrrV*Kts$lmr6%f%tclka_TZkTdzGA~wp~@0MjNZvDIZ)Y!jQH|Pex8&2In$RE4T z@9K}-vfE&PkQoAfX9x|T`P2Gm4ngN(%shUx%)I>Va{jb|JSi~_I#7LiIk~4Aq2m1$ zZ9DEpF17R8Spz$3U}p_*4J6WnifBD0pv_4EXHH7^A?74wves%h&77>Yh}K%|rWun> zT3q2GxsJhd6&p62N)4%_x#sCwiVa341swGK5ac1QwO~K4wTRYQNGl%7LtMvTxr%LO z1f@4K3@2pM(IL=x%nTU<`;OLPxrzbW3(^AZX?FAomp3BjIHht?t>|zG;=8=HbeoB&{XOewgBcOfHMp=B%Cb{-(mWTQR*GV(7sp^F3 z4n5j;AsJol!vhjp2mUPS4!zoUlN#7}ws-sPnub37+}DSnEB&7ByJ{WXZ-|G!r_08) zsw1X@J{S+_#7!kf?)_c@TJ<`VF`hyPKJSI(UZ^h&3(1G+9t%6eL-vOUtpvI9Z0HON z=?n|$3=8QD4|+M`s-DmJR3idtFQgL><-rTl@laeZplKY|bxY%9i1yoB*BOQF(nR-m z_(rCodID%KpkW7Ng4An0MfXPNU{6+kwVtv|6HS}g%U54!M@>OLKcK0vX-h;W)XYQu zE+qHo8rEqlIwOyZCg% z2y|Lc4QM0B&7plQ=`!-%JQI5=GCkHEYmQZPuA6J-D%#hEevbBLrhQ++Jf$o`?rG~} zUt4E7@cF#J$1rcI9B|x#_HzUI*pRP|4g3UgRgR$(*SQDn=bq^x-Q1LPRSBWv>C(!$ z+RI?mFC*H~_aM-|*O>g4K-JY`vyG%+C!Rug)N<3Gb+9j{KU#tIM$O-s3?(H28>e^ENRFCaA;LOQyPWZiEsltt4R2Yfqi`f6?T>q*9V&9(J= zFXWR0xiLOe#__=z)n*SJ6YV=ORQ3r0je5h|KY4+kpy@XKG=)7p`vf%X_~V{2A)wI~ zft`3@CmC;{Mjzxko|5Ol&WOOyuA#ZrY-{*)ia8>nG2-}sZVm|KsDFQULcIk#7<+XI zAP;Da!@fKoq6ZjiHHmujSD9+=_Sc!nk)koS2Xl?T`c(QVgvNQTh5o2k!>~U&m@z>6 zy$$^_(21wW{kewAt5%mxtJNjOw_vV8d9{C{{(Txg>C+rHBQ%C9zh$|4#$Y-wb%GTe zHu3XYk;)sk8LV~n{6lrE^TM?MG(vu^dji{;tU1_>YZ_|+zn%jRKpkQY?CW5S>eDoT zsb@3PXJE%)p&~A=$<^G)WgOtw1FgYyU#9gH(;a$ikKw)<&L>u>GBx;^pBCj`bMU3W zud>~2vz@eM5S^{)k?u%G&E*DYOO1TSNH0fol9pPey;PNTF&K_^(M7=G(gTq}@g>zX3Du1%%`9jv-!tCLOgr9^^wRTfE8 z?c^)DtSMh^jZ|`3UMVMfgp!j7=(y|+fjp|?j&g_F1C*TT;fiM82+>+T${l8B`ufA{ zVTxwo2+>+T!t&RL#)we;jEIKh(V$L7#<&e-J?P#=bz%l`zjsl6>YhV1N~OjDj0vh0 zAs-Qno2`2$*$lZhRXpg#uv6mmTRai3KyBc|0jPINRRkE&Uebu>-wrFDqbaY6fWi4N;)n#K?vm*`Mj zUyiuGe0XR)0d4DX1GMe|$eTYi81YbliHCf{3(38ZJg!GH)Uy|&<9g&Gc|ddfsyPKS zdbS*`a0X>tw5gIiBLbS+UCXK66&(#d585fugFFiI3_xR`9`hwF%OGj#e>_gAQJ-lw za?{wAP%{9H2?;gZC^`2TCFj0@xI7XFT%i5gNy%AX$yuNMVL*btjOc{&GDZ7x%%Myp zF10rITSbTDa(t9>>7jDINN{TtolvcQDUXjDx{MeZRP8B=Yh&RMBir(riB)ny}9`tJZ8x zt2G<7w5{bdCx+y)cG7FuGnB`1YpLgMqN5?&R-=g7&doNmWgg77uD^mdM%b#?VGYwx zYk>B(*`K-1DYo6#8?9Hzi0hBNsLv7JXvqf@$~eOI<0>8ISx1GSUu2VQx?=6TF`6^7 zgLxBiv%N8r4=Ad`V4PPv*<^=xn8KMZ`G#5*5$)w@4*F*rZfD$is6JS&Ro-1U0^_fx%w_e|xTIP)d+97^Lq+?OvaEDt@CV*R6LO`IK<@_ZUv z4Fs z9zl6_^crcTreHr8KBh)uBNsKIIF{?UQZC1(Jl7vtjXcli@J`hqO_e;G9MKp{>wk7h z(4Nn+Tt9D0xg0ac=o}y}Xx(1M7|-V zT%dhBRCdL2{is$#9R}rM9W_SLT2Hr&@6D?0%ImovlosBf2j!yr(dEhsm5b!se~8lg zmvZSpxuN`1{pfrWrR!hHW&P)c@=5-t^Fx%bXDMe+_G{VNvTw>7Qhk$GL#jXW3W(&i zzNH>Xa$4UqE&Fevj}WON!`TP2O@G~vXTW^ft6>K@^J%i9-idJ@^t)V=t9N27SMS7V z{ZF>(!kWRKyKOuRNGaqajNF9c=(#}4vHtP(>DkbiX9xOL$iCG!SkG7?`=E0I`+*K@ z`f}vWUkf2Wz8vo{RI5`xU~bhuFf#NEPUDgub3n@;$ol@eh2r}D03CmfB3j!-yV2U2 zE_(l_>2}kjt#nPan{Kt%Ifb0)Y|^JUW%_xUM{4!l&2sf@#eSmmN#nLki+oVGuS;XH z;>aUY5y?O1-P(CoA=?lAnU~vj9H@$r$XbA$|RBll1kwO*{{&H)tqtc2Lh`KlDB! z^bD?78_@UP^y!@cS-j=>N)*!+5fK%Zutf|9!@9y_Nt`8VO439$73rcG&g9-jG#4$z zt~jZwrO3d+GOa{w(MGft?I7PZ#TMBndw6n7u^5&IcJVhX1dweJvaap77aasaM}+PM z=mgjupyi!$uy?MYZy~QvwT#OLeV&os(;cwlPv31Z^kCfDWdDkh=52quduVyf6@tIamempbqw0`jsyL+h5-I2>ndvT*pe zXdO@Eu-?{WevOUlkfq6B?7+y6H7&fSSAh>m4GTyE%1vgfn9Ayy*LQr%Mq^QEfEJp z{}9Lzh5Rt&P+wg|t~eYw;jWB{ZV=PN5y095r+{DK_oP1_OS)|_4*hW)nWJvpXQ|(A zc^xTMilZ>b9gU!4#42&DI8GcNqB;w@yD(^LH0IHblf5Bte35)hX;_Oa%8T%7b%!NZdGBVe%D6ZD?G|w>)70DkkrEnay1~X6ahtfEg)$EPe+T+4 z_1rMsRon@?cZQ??kI;9CyTv`?UT(#8;y&E^bASGSZRfv&gVuRnypFYkCEfsj zUA&1EgIq7XCEgbAh<9j?Wud@Mc@pW=z38QYVR&&22A z3o%2jI=+P6&EhN0z{k+1X9=98^rw)_P#;rPl)?3@o&Jl;%C^JD}E8bir?ha!YS{^|1aP)aImNm zF``C+ungNkK8zUf&B#el9MJ57@{Gp$z^U9Emtqj1W62nvK`Vjqh#6jobcBAmFX?BA zKfl(XRROJcf;A6GgOaqeo!T8pt0&+ ze~m|LJ+1X;aok8Vw5}X^sF!F7)qPk)%aH|s0@PT`HP&*Cb-QTX#5hVcWk@%g8qGlI z@#(MjQHn=#G!EyUa?`j@bUF!R7o#~t3&uL6*mEK?6`YtVB{JJeZYx!VSvUR6J4nTV9PjR zTU*BOXiO0VbkP_wMg7l$KlxvHZSmK0@K9&MWXN6yeu!Y~jeC$W3^#=gH}H`ed|@PDlrh>E1Dpe(7&<25 z#z1~Vb5E|3XN)z*8HC3h6F8-CyvBBf7>GRWjnpR^_?fCP#m7^PeT-?wbVK7A#tdVo zk#Ed0W*c*ieT})sJYzoae#Qc0f1{6Bh+Gx`3IRocVq+2hUkuP8C7??I2lydNz?A{Y z0ZRcDfMtLK0hNF%quQX7)flymXCS@VMx8+`fqLU0W4Up#aR`U>7Ka*#8HXDU#u3O% zDkr5nF0I&86BL*j`J^4dpG zPIca>+*H0*#-&LAV5EN;(mzBHPxM8`<*;`s>}`O(!(i`n*xSJMBkP57MY+*A`8)kF z>?!)a!WG6w<4WTy<7(p?BTUm9#ZAVw#&yQ^fEyU!XxxOGxEqa|aZiJv-D2Eo{KL2n zemC8?-MGWJ)3|F3{Acy-WO`Y(4x>>HS}+{~aNpy)oYP5eH$Uk(fq68nOTAddhg(c*c0vc+PmpjeE8gDCy zf9FU>E3<81e{V`Mo%ap-&I#|Yc(nh}_{jL!_{8|s_{{hmBhnXuF9Dm4uZ*vaZ;Wq^ z@4$Tz_yO>v@e^jIe;NPA+#tlyfL{Rmolju#S6KYb5GGkP@PgF@Y&Ih1*GAN|OtNT0 z5(E5*AGI$xT+=hjVjPk*fVQaTBl4OiW>ZtY?V_@LWc;V8{O2XtCD!@<$VfLoF`AhP zE;;?~zd2yD(E=sk)ojTn&wwNe_|a&ElD9V7aLL<3(hl&yp;R9k?NN#jW=Af?Zpc+9 zz-D82l%liQg-g*Dl5T(>jqWH#53?tiq8B8+0gdaWkGZGuz36Kagy}uZrGlUy{_-_E zt2bhPWNbEm#Cyc8QT@#RrhZ?UX$~-d7FqC#f#x7{usOus)1rfjN3J_2)D*%?v{FO#E!!{5OvszZk#xGeToqzsF}9 zP0~0e zZ6(v#R*2TnShpHI`_MCxe$LVIndZJmzS&320?svOn>pqJV~)8me7%pDYtA!CKGvLX z=9%-1{mca{CmDrPS@g&FOrmm+l zv)o*2R+!7o1I<_hyjbESEdd9-#i|RHcv56HBU27H_tH7G|w{EnrEBma5&|Vo**@*diuYR zPkP>zPn<%8>p3i^_V_)ZKC=#eM$Un8^qJ+@9O7K`6*jH&Q?{YUcxpnH7_%%{>cZnB_kb5TjzoFwCu0u zXUo43$dz7rWhZGGxx=7)&=BNfPt-;@Y-_aE4~J=4*oKx}Zf-EIVA#m`&$JG`srW0P z)75F}6{p5pPI$}lhuY}RjWqvP;$CT9WnOJwgL{*Ct$Ce!J#JbR++f~l-elg4dy{z! zZ^ON{Sic{GM#y};N1AEeZr*{LMoW_2Y2Jk!Zx+-Kfr-frHHo8tcO&YkqENtiFEV7?5$9c=8If9#8? z8e6lur5|qFf2ZM1%ygr#c)+C5=s}E1zenk`A2J^{9|1go+2p~$@V}0)$1)lNi4J3} z(-=Q$K4$*YeB6A(eA0Z%eA;}*eAaxus6;ZF?%c=Om!F=Jd9l=+`iJA8fpCl1xfR<0)I<{YUUOIQdrn4f9Qt z;4SlQ^BwbDQ~%xWJ@b7$yS!z5fd4-h-A2Riu6OF02(0YR2)YIA|OoyM7 z!t`*I;bY`V{)J!^xKF^z(#R)*zw7S}lq$8Z#)HgHP5KMSaPu>h{tD8#<+jzLVYzO7 z!o19E^k487UkVO?+e=^e3VaQ?D>mHo@V~?@4f+nd6~6pW{c8-F{m=g5nN+_hXpg^p z69iwS($}demti-t((ER}wVPQ9yP4@Z>9M9xQ*#%mSuEi+i^OF(@K(c{H*e8mS6mqx z$z&^BE!EXpU4InbrcK+n?QnJI(6QrgxZ10$le+%9!*}1kbLTF&x^?T`y$7zY>guVk z?LA!V;O*b5SMT0^aP6^2zkdC3^;K7iRAw%}WHAr21scU-= z-`V~<+kZ!%|A!9Ud+%YmMvTbL9*JwXx<;vMdk^2y`~T?CW5(p*%F7!&b{wu;b&Xfo z_8z{YxBr9*6DLl>HD$`wsr%rXtgdP5+TO!=w*Sud-;wA4bIw_}?p$2wpMSvx7vefk zT^Ffqdk^2y`~Uj&7hik{uFEdF{PGRBE>+hR>e}AJcl7q(xbey>uflcBHJdhFi|cB2 zU8k}Ee^kC5z5h?2K4ZpA zT(f4)o;?RwzPk2R*Y+O1qqqOux%1}D$F*R={`)V)wV%2Q)U~~b@96DcSXfk4jBD}Y zl9Ezfi_~?1y0-W5o$bG~{deT`-;yO|W#zakDwZuf5Z6+5RjO-y58u)Ie^pg=bq%h% zy88NqaMh}7xw^LZ@EyJV4?g&iLk`7t_~8u=N8mb4T`SbJy@&5?|DEl>BhUXw9=USm zQMisdX4R@=aUHF$|96i6JM#Fyy|3^7 zsC?MJ80Uk{#Hk$>I3a9boKkiGc0zyKIl!40+dnowHYK(owlFq1HZ`_yEHC!DGfZS- zFQtAsUvDJ#Iq8q{_IilVoEqmd_e=Lx_f7W;ceDG3`%l%EH2($Mkdm)!swjnf@pqpOmt{;WOR6RiI^MPTYTz1 zB);&9k{!)*F+Vn3eC|FXHhYVbolKkr7wairc5f7~dDD_j&3rL2)T-VZ}+%ZFY$_dlX%^mo=i7qiAk}(;&t~{@s^jL+{N5CJ~uug zJ~2KlIXpQexo5J_USbcjN7|)!xt(o~vIpCv?K`8`CBx3K_jOX!n%jtP85@*5&MY=Q zGutKKb;_Kz?%B~|dx*Wi-QT{%zS{1K{SIfubK{%5YrU3MhV_;EwY%6JW-oFNb1k=v zJI*`MyV`xoEp?A@9k;tX(W~~Zbsu&2bt~PUoL25Iuh_fDz0IBP*0?`AZQX3I#Jj}3 z)1BZ>cJD_XuK*O-sd+51Q}5K!zn|-dIuBn18|4Sb1*hC$-Tw|cvr&HDGw+GEvpQO5 zMoy2M6}dQ40RSK?9O(Vxj#4=?ohALyTHBG zo$FS)|8iQp!@Wh`diQp>*ge#Zx}Dvz-ZJkh_d&PBZE$05H+Ol5{o#IjY#WbvuC7jq*bIxq)p`CrWt7+=@q%u>lA4h=@?nmxU!EY~3S zcX_vBH}LR%pOJdsh@Hd3_p{ikIDAtYUL+pbP1R#-RRB`X|dbAe|TQJTl`k+Fm-kG^2n{;onCv`?+{6k{9@kkJ?-7+9Uiaea<3Hs zvVX-kG`q(C?cI-k?2g8nisAcbJN3TJOTBR_RLa=+iNUz^WhemF96vbi5-$no&XXTk5|zUa&YzRP~z zdBM30()o>Mk^LI+gOl$$<<2yFsy)umvnRS!+B=d16 z-+9GZ;A9}x&z-`^qR2m;qR8UNX_2L|JTW4+n|R8-QatO8OIqf5F)G$sJnLQ~UhpO+ z9doS6j&%}GyH|L)n>=|yE`;F7w9qcXe z*10#jv)l^zd#9zlmsjAO@8050a;LiYA*Bs~&W&cTo_6Zp*em~TzVYnU!RlnKk4$q9 zaKCaA?m%z8ceZ=IJJT(9zja!;L%jXHbKRTV>FyHuYiAdCkhh}`8t ziFe(*#0TEIWYR1W`LV&`1NUC>v9}=E#+(+P9v_Fd%N0W?Q88Ub5i^|FKK<_&b0S;>)qd+4(@1giFdhsk6Y*-?3!*TH`iP0ZFKK< zCpPknd^`32yZxZCy~cjg-rCK&B(k4d>;B@j!(sBJ-lgtc?n3tyt9YGi7BpZL&N z^Vs|LfsqT%eImz2&ccZE1xB22c*HqAvNmPJdCr+)=h{a{PJ`B`(31XgOyqQ5i~MD9 zG;D`FDr|=|PWEp0o{9FheoYfisN|=5H+s)T`&hrF8O(R_j`JRkJ{3K~6XLn(I?N>V z(@#lXmAE+}>#4onB(}u30-u2zrufkvPB+DQ9k0p{>SQIc(6kJXmnP{H>`&}Z zZQ8ebfw{l=gZHC1$J*DToruR+Io7UrOS_fb+Rm_(_OaG+)>YQk*0t7k)-~29>v(Im zm2Nk)o7*kyguRP>jJ3+Dv1+Zuti!E0t+y;Hd*kb8NWTGmzSU5ii~#v&z=zui|4GR| z0lv(-+}dbeX>G8suqv#TiK7zNCvHeoTUFMfR)e))bY?Uw+AF$ObXv53v~RRHIybs7 zIy;&h9TpuO&5pM7+ItyZ(sMn}`^^vixtdabRBqXzVtxQ|N=;;d8Jb<`82qqm9wlpfdz-x9_xXv+uC?NcKziO7>3nG<%zC;TLyX z_rU)3)-m2H6EB>lEvG>jlJq!g6iT zj@xPWKdr~D3$2T+Gp)0%wbt3z!`36#`PK#2dh25ATkAXPE9-0P66;dyY3mv5S?f7# zIbvJ3ZO3fKe$;x*df9r#I@LPOI^8M(A?T8(p!pv}>)p(XW#k(P8mh0>t>pc>E z^1ocF#Ax^W;_RjHM5HIqnv<0!k*pg&fvQkuukL-_aeWt>9-uK=t zYqmu@NsqEdTlYH;IFCAyIS)DyId!r6*rwREv72Hy$F7TAA3Hd9NbGj!4(DFyKIcy7 zE~h3|8++J!#JStK$2rVC+&VEWkl%Ro8o=s9Oz8Mxqh`?y;tMac?FT; z2%XK+1AQ_ZdnP}KQR<(7XY6OuTJq|Ka|1`<+(OOwz{!9ku|s(lcC~J0w8rkg9gSP< zf7m^eJ(FF{?q<5v3}tvSc9eH?%DA*&dg{2ehnwjhhVx@DbuM!*a4vM#I~O}g#8$+n zIn$jB?Te6pALlyfdglh`MyH$8-8ngSO6-`}s@Soy<6>=`w$4ehHL=rTr^nVg=Q?Yh zvz@bIYhx3gNzPYV1xaArEqofghv&f&=a5bJVhgL9K}vvY;B(YeyO$~iQ4SnT-N>e%72 zhS+8H<@Tla2Kx~EP@B%=SckNZ#CaP_%rf&l?*gw@)QivTFKjxuBOY%OpKHyx{(-&d zbFHz~(atf>%ET#=>k}tOaw55r@sSCUJ#dOngE#_JTyN1?AeW&HZnST*Z?JE+udz4T zSK3$EbmqsoNQchzpff)%@-FsHch7X2xasb4oCLVp{@SK9LFoLC^mrn^z*=bCiJkJt zTNABg>{a$k=P2i5`$_v~`&hfsDRN5V2gI|if!08r)DxaRZrRHGA;iJfd~-i@p;O>2 zj+ewUtpV0)jaK!idpD(wcYpUNzSc{y8tnOCdc<=bzIA@Gb`lHhw z>7+SALO&milRzJae?0+s z)qdT6#eNN5C9gfO(|;aLA=P|;oXju|C*tglJ=NP8?fE?8d+i78yY2hzKFPkxvpD8n z+gy9}ceC8C#^3D;sq@~MjaFbn{E;UdjnVW*jHW{DX&sH8cq4jZ<8@>YH}&r2rrw>B zsdwjO>aA-~qYt5c% z_G&XBnkLheW-^j|q1j8#o(KO)`lsn1r+=3IE$Htve#q#bmYIgN3w-*mX4ht1m$542 zxQyu;Gc#U^zZ!og{%riN#NCOO$&BPh@%8ab5|<_l(hAdtr;SK^CF9kMmFY*Nf0FiT z+LZLE>8+B7MGuWOz>W}yM3{hkb6Ta9r>TC226GA_;7J<&PQDbXd7kw_*I ziCq%Mq^(N3EA8&IGti9K@YZE7>tx4OEc17BI86RYv zn7$_c+qCb~=A_R}f1=rw&3d%#+48q$2PYnH_EfWOX+6>grwvK76S0Jqa1vjof1Q3r z+KRO6({4zsN~=!0IpfxhOXHWvHzYPD>eA}d-pF_>V^#Wb>0hL6PMe-SGrdpCzAdBa zR(e``lXM|!P@cMIhqT?&Zi?R=&rZut>j3G*#H7T8#N@;)sL_`J_#;>H=Zs%6o&)_X z;GXn*)9+5dFa1l4hYJ3H3z0quA#+4Z_#9xd*5q~m%TjKV_F3IM})8l8v z&q}OKPJ^Uo7sBz|itGSV}eWmp+@#@mT^5*;!+;!hlD8Q-OUpFXPD=w@A7c5T_X z9f-dj($#>Ht%eG*?*FlOCh#&>~_k|arzBuVoB-e=x;^Y;A>&hz>D-Ls$jJNGwd&T`M-e;c&S zCj3R~@zjtjth23Y+H`HB_Pw@HTck-LIphubLVLr1hnt4a5BGQ7=o*mvWZcC6H@p17 zVDL1fhQSl4PmWHBz807lc*lCznrKb3W?L^=0WGL4Ha{@;h5tc=+l{T(&(?P9SL;J# zsnJ?%qeZncS~=}_?E&LK<2qqN-W3ln0xy1}=VNKTz?OtP~(J*>ew2yVY)ylfi>R?@IeP*sSuQIwB2Th54 zs}x#nePHdhez!ieR$A+=Z>?RCKOz;i6SVN^rw>l*C3-8GcEx+i#V@U7t6!7;%n zgTsP%2ImA{4fYS-M7s|m^bR|QYfoq#Iyn%&)pd*OHhkJd+D2MOE{pif1j_suJs4dY z`d{e7(6Z3))S0;|V)TWFU@VM>jp3K0uSD;S-WP2hZ5#bMurBaa42d74#i|P?0qD0iljNtRZ zkHag%RjpI3<>61lb%XVSJ3_yOs#~X7j*v6t4+TQ62j&Nw2G0vt;K{EVIVEy(q*}yP z#$D#`=)UN)p^2dhp-G|bsWrZq8eRwY!4vQljD`*2nb8-bUk26$J_&ppSQuFp373hK zc_=U{@JQs*NXuZWV29w9!4INKqBn&4g}w-`4mJp#8Hz-;Xr<_hQ8Q{qH%ESqY}0D@_nRS^!TVJ>W!WgZW69!otWh_yAa<8p;wln z)E57~&^Vj$$<|}3As1PVtg+ft+A8e}ZHD%|_MduCO$m8Ijl<`L<)AY-A^L3e)xg}q zBI_;d32Tfs#hPkKnykHNzHjo0Y;3W9vbI^jSRWWmj7zjjHB+;+vf6Rlea8L9xmr`L zt#+CAkTuG>!RlwV*RIfBG-sQym~+fY!O6j&(DerEJ8P5mgY~ZQp7DdZ*;F)Di)fm5 zw{efrAbMu>TC2Bpj@87v*t*18Vt#0LF}fND%>T^ytoN-Q)^FAl>qF~vYn8P#@_VGb zRzcex`ID#l2HNU+cmSS+r=d6Wf%{<$jB~Y*Tp4MXI&ZRynyd|f9Qri$5zodhTz^LQ zL~90X1uH}Q&07knr9Zg6bysoa3$l%ER#E~P8h!k-~9t=Jdd^z|^uwU@T z;JoPk==8vhz|6=Ck%7V6g5O3rMDGjTANn=8BiJ@{S*S($!tngy8^OWBA;HgrD}&9+ zr(Cdn@U%$H$f@XRYG`_BGIjVB|KFp#qSb?^1^){_6{--f6!t~_4IhvHe&`jSdZs2yF^(4PFqsI5a!>QtbJHsQwON0Ll-a%gVgJ%Ta4=)KX3w{)Q7x%2_i_wn3PCmEIzJEN|s9=5`jt&>u1?MeQ>5zb9@@u`Bm3C@Gb+Ei`5wn3YxEzlz5 z`Ly+nRYj|=ovzi>Znkc-wn5e* z2Y1jDoEV)HecpP(;+Ju>^R)}L23linm^Iv*5}X=bXKpl4kJgJ`W?gPAHCLElSYKOL zL^?$-r~WHKD?`hvGrd@(Pq<&WH~GyD&k4^${;K_^ouZ$r|EEd%e<4TsY3&*9OYJM| zMeQZ+U(!4jel+|*>K(WuEyEXwFW|aHq;{k_`92qVJ~V|m6~mRo=?$T3qUswr0t4-3r*1pkR)n3y)gpUo65062% z3wH>&MV=WsJ8}m3y&RewnoWMEhEETlg8vPXnT9=H1FbL3wPv;GY0>6Z3v0RciM22CZ)8e% zdUz6XuL@rs?t*L_Yk*mdieoDq|4k#)&orZvk-(R^A#Tn@NqPYsIx{}VXP94~NImKCf?x2R{SNB-1K(L%AkjEi7A%L( zK`&`uC(X%F6IRiaolKhlQHQ@$^Jqi7=D|nE^YK*o#lfY7_b0D4gq)R{r!VLvZF4vi zz9EmAb{@#pU9sImij*HHo}d6K5FdHu3Vgja=H@CE6s5@#8C>?QuA zq#KR=AAUCx|0~ipLb`~10W^g5q^V`6L4HZPXGt@eG+*L(CvkryO)KOn#H|ghiFXQN z9})L&;yp(ECy>kV8$i6Z#6KJ9CTS||bYs|0BQ=>JaA6uVV|3nT%riPyncMD$=K0nebvM}^+XmsGoz>|@2 zk>$}(qPK)@3#|)&8$2h}G;~I|QP`vTwN~22+SytY?K^XmSvPt{^kef=bFKA_wZZ+J z`wjO3_nYp8?$_P(-5Ke|`CKX;FC-|Zgh zzR!J+`(Af-|7rfJ{!{$b{HOZs`s?{?`fK@X`|J1zGy1(9?(&ZC?)Uub+3We+v(NJn zKMybpRG_CDf$$UDm0(%Z`WjAw#J=2x)~(w#5zzUA%jxyf^j z=T^@E&&{4X-n!n?y*0hHytTb&cpG@n^fvU?^VauX;BDcZ?wR3vit+|~hIr2LHu0Y2 zZR9=M+t_=ax0&~RZ*%Xt-lpEyJ@Y-4yeE1qdr$IK_nzjh<~`MGdM$6%TgF?_dxE#T zw}Q8gx2^Yp=ReOgtk6$|$uLuTL7E{wFHMuCOH-xiq{-40X%J=I2Ioo5r1tz)^5y(G z@&xHwsh#5r$58x-zy#7h2UB38G)d~jZzy+kbatFCHJ3h&hW@<%TXbjim+1Cr``{J9{no$M!GIK8 z99a@sAK4K3Eb@8eRI|Eyyjk8n*{o`wV>U5cnyt)wW_@$IIm2A3eXeccGizJqm&mEu z%gIm;s=#1#h}qY?!5nDbWCYQ8jd{lF#&^a>W0moRalO&k z7-ZaTJZDTZ#u`r<@0jnJnxPwWt=FuP)_qo2>ndxYcAGXryIbq4-Jp%s?$dfQ=DZqy z8g$_Z*EwT*h2daF86ovuEwexxo}pH!Yw zzE-|b&Q;D+u2y;~lIl=TR4c2Ot5>MEse{xP)ECu{)fMW~$}`Hh%6H27$_2_bN-xEw zrl=>WRn#lhPU`LIVD%;S74;MKQ}zA8;=sFs_X2MR-U%!UycKveurSck-^u^Ew2F2$ z<%sNywn7<3>3^POo=<(B`9AP0@%)vtH{~Axz5WH(n^sTj8ml+= zdM#|_UU$H+-0MGZhjOR#zVd-`igK#bR=G_1S^0(Gpr)R!o}>0suT>vcpHLU7i`2W6 zyOj@>|0$;_rz`E0%avc1-XYhI>RamD>b=T+%16q_N-d?1(q6ej z`Ca)#F;z=FU%f!>qh7B*tv;i^qrR(l_IL4@w<=hw5ijoHt<(Hv>sXFg^=ZVol?FbC?l=^;I=Pd8>5FBr3o zZ;cJc|BPkEbw(fK2BV)b#h7Y5WQ;OiGv}EvTd!Cnth=p_Rwt{UcB3{}8>0O|{rYIv zYxijPYCUL=?(hTc@w447w8wrJt&CCDC~K9*N)zRPa!_fjwo_-QGu5%mIAxu(UTLZ{ zQ)JbtwpTl-v((w@cx8gJLD{G@S6V1;)uVP)JF73NbJTg(>sB|byVZ*_u7NF-u^oP) zjK5)+GF(}#EK#Z{)s=0^cBQe}L>;Y;QAa5EC`*-PN)4r^vP0RaG*z3aW7To$NacQI zxw1m3t<+U^DZ7>CY72F|Izjyb9X5@g7riYyD0-FBO{t((RBu*qQU9kdQ!kEQ5*-n} zJNk~cSc~YU-XwZ%^w#LWXltd75>mrzPxTsgf%>M}Dtck`&gk&yX7XC1Ez_cUIsHQO zV)JtI3bVD@*1XE>Zgw;~oBNFehTHU*lIbu*X2kTHL35Awm-W5zgYm2Jo3X{%YW!vV zZR|33bH}%9?ShvF7wU`j1A+enOCu{HmRZ(3!K`eaX*M#?Gn<>Io3+hd)*sf7l-tX^ z&g^gAY~F7^WR5nUG>4gYnS=D(^%sp-jgO4aj2n$xjfagVjMvRY=IKT~V~+Kzb&qwg z)!FJ|HIAMWy*YYIbbxlV_9tyTSieIbsXu_WZ`Bs-AL^g#pX=ALbL%BAU=o}{8{7f? zl>y2;Wxi5QDX+{>W-2w++Uh;(NVS{ZOTQr6B04mBM|7|CH{TitY4i0r^nHPU0$)VF zj_kJnv?iES&E?vs+I#x@dU?H)KHqrDSZ%B`1{=eTamGZWfzjBQZ@ppNZ#`i3u&%Zm zMbD1j6de#9rroLiMLkF9kLttqyQ$}0+G_nPeVP8Tel2x-5j1!fPNt4S;Ckf-s z#Zt;BQ+~D-X3_Jbw?_v@-`76iqu0{^r+uUyrys9hWL{#nH#?YZ z%*)JfW)HKI*~R?V_|Hf&z5EEN(+r!M888)dld;*@VeB-1GJZDp8vBetj6aR#kxwE~ z^Ek7Td6L=CJlkw$USQTV>zLP?eaxH8Tg(T{QRWloSo2PEg!zgw*Z9O(Y20iKG#)p` z7;l)h=}pF240epGu8uU#I`ut?o??e=_=X7U5q-)KP)9=#n(ZAH!>L2MV^q(mG zSJ+Pe`@lh+UGNy?j884AA9bAzWnmiBAl3-=2E9M2epmlYt=H)2*evSBEQ9)2YWSGw zxGcjj#rK}*yL_M3$c=p+xbKR7h`S8#|M4AoHdePB`okkIpYOXhk$tGi%V5G}s75)% z;3lqLhvVUSsDIzjPhP0&yV3Icog2C{94E!gnvo+MBFQg zUlI2L{MW!x7!M8Mdi*~k-pgFCfC2CrEFfKN}%%{xZ=H2ET<2B<`<8$K{<2GZo@uacPeA}#RoMH6WZqj!188KA9Q@>w- zh#K9ZEz$p{uhhS=uhHfLs0g#5E?T``y+-e?uZ?^YnPN^e7a8vuUmNRv&LlO zY@>-WRJ%jlL!OW5kLx4!d&y_GwnqP2U#@?`^^N578XO1H;dJu6TkWUcq)&8Da@X*m z?tj4dpzl84{k}(hkNO_+jq)9k{*(4g|4Pq#CVC$6JnH$#v)uEU=X1};o)w-gj-MPp zuiyKa^Koau8}zQ@*UQ)Po8#Z5Y)qNtne2JRGsp9yXSU~E&wHLlp0_-+q!*=6JfC_L zuj;+qcaN{L)k66+I3nb?L=*z z{*As--=weCztvSGq+A#56Fk|dY8)3T7dpkLX8apG5d1s1FL+mEMC5+$0d1T##5Iu)a4=NVdXaEcI7kWb7hh8wsMAYrgEZk zlG0J>tX!yEto)_?t^A<;sFYR9seU!6UZ`HIo}r$p_EY<-SE=3AN$M2!A@yN(vARTk zO?_SMsCCk2>M!UM^-1~w{bv2fX#eO9(SFe_fu91K0zU+LY1e9%wUab86bd;Mr*ciO zSI`KVA+O?7{t506-WeGlxkbNKe_DT5e?oss-x2sNur2UQpoey~_Mq~pGEfMLg{m6Vf}J<31ICgmrkm3paKUp-5ms6MAYs6MK`rN6Du)nC(}A+IONYn1Yc zGDsP$tW;JhZz=C64U~pTWu=O;SJ|g*R<yy)t}QlMLS2ekRD=tw$e(!SZ}PKtCtCt4Fwg2=ly?J#>cP>Rs=3JS{oM|mlzis z7a12AEsT~%E8{YwozcceLTkCD~uKHDaTm3S?WzC&8FSG$arK4zrTUrrx%#!b zPW?(#((-jonft!Vt5R%;WJ!@+b|5%IKil7SVoj*dYj~y z_euQ&yCg-fO|DvuMbuC9- zP3l^Px~5Rq5OwuY*C2IWt*%j5s;kt^(vQ*>=_l#-Y<8)o74omc(kUn6J?O`s`$&ER}HkiX{mT>vfNLbw>eOQE$L$X^@$+Cn>M4;}F9 z2%YRe{yIaKTe%B%&I_D*&$Vf0r)M1Mer894e!9a@E*KR+QqO6Ho-3Z zNG3VJ2`-0Ia^og`UJYNu8u$vEnJKP@~v`%eqc&i>QF zbFzQC{G9CH4$sN{?eLuJ|4G}e;s69sE5C8 z|Mig(WCQ%{{=XseEU@j%w*N+i+x>rI{7%5P3G!TMiob3D=OOL>|9t%H{{I4G3uuYI zZU3#1cK?47es=$V3DW*c>;Eq$-0uI|;NJ}Y%VOba{eL^c?f$<#{&xT0Ar_w2|6fVC z-T!yO-|qjr#KP_Vzbn@{aTVb?(T(t&=uUV}^dS6d>emylp+0GUY4+a>|3f>ahh^RoMYdk0QY^#A=_hx-3vlx6q-k09;-|1qT9|Bpu6{r?!`SQrQ6Ni%`+ZTq*+ z!N`u62(!=Rcm-_xw@;mT71HeA?*HfFw)_9rkhcBLL)!iS>qxu*pO3Wr|2L3!|Gxlf z_y2DqbL;;X5+{2O_5X{A^D(U8zCMA^T!;Gq&+WAI>308LhTXVjA$|XUocmD!Uk+*a z|Mo8MqUiq{k4wcY|fZw82=yc|1*C7KibFt!v2N* z=f(be(r1bJzaxA8?;6Gd_W1u=+R`5XUxz&OcWC_Ihj4rR-xq&-{NFEKxIO;Ak#Kwb ze-r*E5PoyIaC`iJ3*q+oe<1$$_$Nz647s1=`E@R%b`M>u` zpB?sDDj%fgpZ)yb62k5IzYmf2_LjX^;PxA?@-1M@W18zZ_|g|360NHvV5h zoa{L?{{MtHScY6UOGAD(D=V9(jNaeAUy3a8Rq|5P@kN} z{~b~ejsLTn|2uRa+0Xx_g=aVamlmGg{9jslPV;~E{Gx6D+0Fmi;kNx}H~(jc+xDN` z{NJHEA!V$*lip)&Hlh|CiMIpZ)$$#`(XJSpO6Df7l8$ zUgg&R3;WN0?cd%LAlCo$zW=AR*8he5XSDjmZejnK_y48e|6|Yp*>;{C!v2%r{>A*? zVSCSbRg(69B+mb3H}99`FXsPB&;Ii||0nD}qtzdF3;P%I|3~}$zp($q_MY)7H~TNj z`Twl?e|!EvI58C(um zz?IMmy1-S?9j=CJ;99s2u7?}oMz{%XhFjq_xE+ST9dIYy1$V=}a34GX55dFmC_D~N zz?1M4JPpslvoHy!#P3Izp2IyIo=3+oz?<+9?yun+_?EceAveONc)Bzl(BAlO!GA0K z4BOxr*bcwK4)_gr!tbyP{(#-^C+va0$YU?;gZ*#-4uX9Qo)g^Q0UrcFfe=JM2h+=b z1tdB!f;vzS&VV!FEYh4!`ipbHwt-8C-x}INJ7}L;F7L^6-4QxNSLg;k zpeOW#-p~j7LOzg26BphQV+c0r$YjSiJkWeh@~%Bk&lEhA}V}#=&@) z025&{OifLD$o{9{E-m}tPT4zPC+teC)1fvy^uMtG!rK3C^tvYty(WwO@1<^k!#?Vn zmHq!i*nSZ9pMU%R7oFy2|Nr?MlFIRxSHP9AU#iJZ|KEvpUEnI{4p+l9a4lTtcS_eI zhrk_hC)@>h!@Y1HJqjQ|Nl1E@51}=0elGmgOA{2_yj(K&*2OB625}3;T!lCzJu@K z2lx?wf}i0R_!WMG-{BAV6CM8rDFLTs1?b=46gZV{p?3d&8rP@WFQKIV(bwUw1NGnx z{Lh4ja27Ozv!OAZ15MyuXbR^+GdLfb!v)ZSJPO$|}2|L;cL z3-_VV2Pik^{-5t?#C-;yg-I|4o`dP|JiGue!b|W9yb7+lA=35(!uco*J>58y-i zAAAHK!zb_=d=6j0m+%$!v%i0Q&Gk3%Eqn*x!w>Ky`~*M4FYqh;2ET)SHmbc>Reb*! z-~Yw;|6}|6e__V|fAU%K7yJ#yIsX5L{QrgjfP?XpYW(k@K56}b#__+4uoUouJ^uG2 zgP=kf3OoMSf;o@>4a%^8P2$iW=gitPJ?)b9sd``{!ga-Q{YrcZ~v#^F3S18)5)(E)PZ`CO!I$dP@gm5EXZj8 zXXAGcoD1i{`EUWWgbSfC_J1+yFU`gNvzz~GL)y#uTsXSt{}?ZDkLl+RDQwj|4^?W{udfJIyJzWO#Hi#eTRHL zV^_B(_#K^hp0~SlO1A|a(tYUUGx$8Fm$do6FK`$4`d@L(|F4eSM_$(dzU02Yg0JBl z_!ho{@8Jjd5q^T7;TQN7euLlP4=AnmzdtEEFYABV&Hw*J`ea!DOF#erH+A|4{^glW zTmMTx|NkFh95gOD6o=$OUJUm9-=)Yla2Z?;B&^w=n(xq4b6A|MNQkKZ^S|f0P7jFK4HEp{l zW(W2&nXL2w_WGZg|1aA4|FyJzUgrPB`v1|p|0lctKUwDg()a&j{l94U|Kz3rx7Ys) zWB-Mn|Ie-ePuu?|=Kr(a|CihR|2q1B_38TmGJJ;Jj;|An{zk{bVyqRs66e~%y^gJiP* z(%Ao(w*FVp@xNIAOI-i6$Nw4aKlk;&(RnQ%-hHKzj$jkdbxsCq| zvi?_+{eNk_|1+A;z`VTwlim8?7}6)x`#<*jUsnBp`uBf|bN%0b|Hrof^zZ+S&9(ne z?)QIkU;oQ_bdY;EQ95+0zQS6unJbg8dwYKU_ESrjrl4oZ+DPB)}~_4-`-E~ z1NZSG`~*K|Dvv!=nf%h%u%OpY>DMC7m+zJd`SG4fg1eygJlfN-A1lOuEO3R=-z)FD zB)HRu737-tXHxUy{Ta#%7HQrEsZaVeyvswK!k}fUmn<3hTTlj$gX5tBoB$_gil08; z+^%`|gFH?yP#(OqLE2Lbls30|9f{$r1tyg)*q4}8^~tVHnUmu=ssr_kwZ42;P0gS8 zY$)rjVl6AXd(F;Ymd?($KD_%web0sSV)Z|t>kFVIT$pXT?8?gM&m363@?f76<+jOJ znv7*;yLX4H z;TpIWu7m60hC+nX1v%laY z_!{@m@C*DJuT%Pa09E3df1`M#f8cfQYt z`92G-a9+yddoFRk;D;cn5C#nlu%HYa2ggGNH~~(Cli*}H1x|(2;B=@3b)X)c0cXNl za5kI+=fZh#erg@*Pq=Og7sADGDYSvh;BvSEu7pm|1+IebdAh&cba3>I|F6!MUc~tS z8lI(VL5%-%vz4PUyyV9J*Tp{TuIE|1AzwCH+xh<*MB|lHJ82{%-TZiKwh2#HxJ;S9p{vU$96=wW@2kDDq{C_9; z+y!F%e{7Eb@22hVjoS?KUAP~JyAOX-6zqz5GxB>lH9zLg;`tWk`akn=l<`Emyr0DV z6g(aG%dYm!+Y#?sm;_VcIhYR5!wc{tyacbntMD4U4sXDlx#pi;xq0(nRLuG3trkZ@ zES{$XU9q<$!F?p$Luu6U@c93|1bs0tmf$XI-Hwj+|0URbQLg_lCBJ0_)-!vR3Y@mE z?U`)plW+XL;)rOQ!{h%?i?zOD{Xc7qVUJo$AOEi@7G1Je zG*9`mpEOVLimSEQ%gXb#AzlaewBp_rcNex?&Z!{3Evfm*($;vsMY;YjOWP=8d%C=L z;NA(l;(qph$kOfv_aVPM#M>M9=eMwM?}r0$5abAF)PWm3;DZ1t5P}HkU_uniLOCc8 z6`>MThAL1MszLS0p|WdmT{Gg8YC}_Ki@OK(gkGs}?0D(__{}N8`oz-r%`#5<_{YjM zzrU5B3x10$!JVBRIIjnN-U0(*5DbQ)G5rqXdN_=LdtfBo4-dj9cmy7U(J%(a!Z;WY z6JR1thN&QwUv~GM=FdJC zp7A4n{J$K1tcdG_z5BRV!m7Am+WlsfV*H;`BS)rNjQA2*^|6=_=s}>UD|E#o6i3JSe>LCr*Tlc;Gxvgf9juT09lBRB z{x8^5lKiQ}_`hIn#M~ctvJp0Au$9dT?t%&?A^*sOwWA&Fc)&sx|9xXrR zWQ@yNLaymqYoHt@H6H5?3GrEbNN{J&|5$Rf1|xKl#0}2+SnL^&@;sLnZADwLPME76 zSVu%(6=S-qln|eF#RPW}H@~Qw(7-XF0oM7k*Q!u0N84l%0C8$$io<>Y{A)wqOkwF2 zC`Eny*lGlJNHB9>U5##@oe=bCcLIcU7fsz~lH%k31l%>Y>TTPPcbFuWs z|4nn%gDf>eU(I8>YY~qxORey~DDF;{=c%;nA;$j&)4VKQ!gJKR*q$Xp=+%E-MH=nJ)u{&>GD!OdwFwB&t6~3=^HD%UqXELxF)#s zQs&Vbhkd+47X@fs_CE7`4JHhOR{vS#D`!l6aN~Iium12*7p>=y?jQ>aR96eI#d(Ezic|U^uN2lh`I}*iKPIl!B zf1v@P0il6p)Bx{`73%Ylcg+fwJ{ff)>LfI9tZ0DuLq>sF>|J(cj zmt;y`x_2*1Pp^R^WBq?A&(X5@bHtg_xL3s8>F=i?*PO*iek)V+<2=41%eNqP$u14& z>1JCm&K05lYhfL%hYhe1Ho@j>(`8pye*HN=fpWGMdpY^7SF*+DY>5ONalS-?JK5@+ zZ1v$Rjs$%%FOc9)w)&PveK;qfNOi)Q2zky=miA1jZgS`^BDIRm6n=}YgrPf;jhs`gemKv3uyrbuZ`*ee!)j zoaI5fezCmz$J0Bd0lHHf5qIb1{&=rCU-9#j=aCwRcgquW!Ml|S?!4R=?^Dt?7+AfRtKN8%B{H7)N@vcV(_e{#4mEf1Y-n{RTZCLtv`MKsD1`@qc=4h-;yNV@m^K{9k(Q&3jRW>c4rPs!-`m@6JWZ$)W+?eG|`6 zDK!S~78XkPyq8$0^rdv$qSz$T0PnRj)?8m~&k^s-lK;kH%Rh-!neVFb4soIM!u!XC zN}ulviv&lV26(?5``lV=&k^rslmGT&%m1ir5OG67140AGga$Zsp-`WToY7FI^v8sf z#Z3wgB&h~CPoz-x%lRRNN}r@^5!DhJIAS%xIX#Sb_QJkgZIkmqGR5JX57Hh4*~m=^ zoJWvroZRvd;m5WH#QOiS-Tvb4k8KTz_5adqf6fdlRR7IcLxoCTdUq~L78(#5;GD2R z*)L~|6)L@G0iglTr878(uGpR<&bK3fz1Z?Ul3S49L@AVBWGPyx^hdJpMBR%<1F}>W z`z%*%&k?_0L;e+uE&rm?LiV}HQYGqNIbMIx=EPkUszLR*fA%?u5TOB~0il7+8sI$H zLVbR4{%oPrXReX(5gHI0C@BqaW^|$Km-D3ymA<6zQ1i4(Ce7-JCBOzdoGe@Rcm}ObF);$^`e}nBwY4(Ew+N7OD+7f3#5Pj}$$LdI}93 zjT+!A;6m9i=Li=n{n4mXaZmZvfGqXryXJsmdyZu37V;lhZ29N!Zu1{omIhJ(!SVXb z(oo#PU^tA3`{zGr`;6#&;_02zNW&?Ojl1)bALmEsD}G+`JeK3g(oEWPR<=6j{OyD= z_RhxLoS_}R&Q6D%g`E(GbFnkH=TrWI1i$ooao%gTVd>+E>(bW%XL}Z^tvK7WQ0Yrw z1EK~(1IMlgI0v*)_RATfg-U*cTlK82MfVxye% zS0d#Uoz_Y|C(f}GIx2a!ixNvn1Dq8op6?RUwJ1htAS(@U-e#U{kaHFD95?H-))y*$ zR(T4)!fAlBEEC#c`H7%kAn8*j85kHcEYaI_T3Lw=d~ro=lquhx1ASfx@7W8D}(bQvJFd%BV=-DfU`LY)mEI#P^k3D zp#@P3p@Cyl1DvT+DEsByltQIHHnlG9T4O0P(JQow55vN zga(8Lj${pR=24+O8#s5UQ0b3k-6d1408r1IeTT&In5QyyrZi1b1$A<-DMT_?#J(;La&MX9p$3G zxlHu)J>$CO+$G$-p%3(p`xm?fkusSyz?mx9wkc<2B!qEpMuI!H``|2$g!r6`k>Iw| zbKXQIzqEWgha%gsv^e>doM%zwd8V%eXJr&?{^`pS*FpnA1408wiUv4CNPIRPDUBuS zDKt>T8sPjeVgE&}Q<0s}z>%T>&i@nkf21^)sHf0C5ooX`zqlz{EBe_{*=@FruBe?;Og1XDnW5xOm%F^g!&HpH90&xpM z140AGpax`VjQDIk2HRcSve1C70a+R=>|ZWJU?Zs^i4am}XVgKSW z5E?i}H6Tk9g#90*ZJ#W+ElU%N^%<0<$;Fy~vgolO^^m2hLN^6boJdn-8jz)F#rh2L z8`Z^{e~~F7n~eBPXx{Uk6~7050~+_sFbC$w{j*6fyoCmY280H()BwN#EH z8aVnjAWQRw{U80!EbdTfAWIF%(gI=sS!yOZJ!NTOLVL;5TM6#mYAs9eB*d4c_Y&MW zrI)3}332&d`UH1QX+>CaYCx8jiZ)EnrV+Ig8Yp%R$kH-l|HZClQ9w=_kfr6t`V7j_ ziek+_r+X`kFj@MPdH$90dyu77xL3m(SR3~*NLjM9u2AU*(J1h<`DmbPT_OUqZ5wq_fa7Dq^-0il6oM+36M>5|+jlE>@w z2ZEBKhQbj|H%u#9rtER$jxS%K;t7>btb9_HldGOm?bPb0)i}Lot=e_!)~kO;gEJeR z)#&WT=QKID>3Pl0Z+=0GmaQ(l=wj)TOIxG7HZcXZwdG|}yUW{4S9IX=%9wvgTXvG9 z&WJ8CzqsfsNmt?M*1gBoJ*8`UUEBM*KG#cqZ|HYp|C9 z1-oGn?1g=>9}d95LVxb@Tg?`~zYK0GUQ=-)G?1hk;CH}zR(%iv1ws%>nrD~a#iTxF zf$JmeKS>`vQLUuVz|n92Q9eJ)LOCc86`>MThAL1MszG(Ak?C`W-{nj2<2U+m$4&G>XM(%ykoJ3eBK7w1}tUcfW971eZYTxPRKi!S8?J-wxVC2j~c$ zV|jPwx*PO>p3n<=Lm%i1{h&V#$dqqd{ztpyw=lT>fiUQp)j>{eT$J|D?}nh~p)d@F z!-zuboU?1mXJn>)IMXhZUsm@i`v0u7BK(8~4r!n$?Y}s5D!%_0hgLBaniR$38$LIXvkf#kP;G5=pQEiG~p8py1HDR1Icgy6Ir{NoF`k)O7pTL z*8jx%pIHCP$~J7jqO||wS^vwf|DRgm&xGuBA^e2~ga(pR1N?r7<& z8IYXKC2A!!P;46DeB_w@za{Ly*t9G%78*!S4RF3Wa{}+d;#}>Y^UI016qdnqSOK5H zN>~M}VGXQ>b+8^bz(&}VkU!@rW^ivI&ejZm+wj{CJ5v2QbByaVIVcYmp%PSvDo_=wLG{%5X|2gQC-~Kb z+6n$?;rWrAg+e~{3!P7X%1j@Z^HwOoVUd;3xh<)A$x`E(Y(oA`p&2xfm%|w@xLd(R za0#@Aw$KjRLkH*>kDtEI_BCg?5Z)EKK@aE&y`VSrfxgf$hxGjk9{{(&Ko|srVJHlP z;V>fG^cm|@6mHJypv;kQKRgJdinV;+0W8-1i$W(N51|2}fx>HmJpfUsG&-tNn}de>L=!ar{3A zKYRQ?H&^}T)>=p3_x=m(+>2oeEQMvT99F=muo70q<7d<( z^G<}Xfwiy>*24za2%BItY{?=0R>HTzcGv+sVHfO%J+K${Wt%=@eTu?e^5g&gx!xah zYv|!1$Yq#Y12=fU2LVtZ1QF1|gea7Sa%Ix%g*mpko4Gc^D?w$b0##GPm~Z2{I@E}{ zYjRy1>Oy^J01eaS(+GECXaY^488n9$&F9?OvEy!oNQZ$P|_qC$EzEC-NBxg9@EjUdv1$k2x>O9$I8&Gk2Dn7xQN^IfDG} zfst^3yd37YaF2pVz@Gnm3~A5*jYit@e`An2&Hs%hd>o7id;V_%(w_gDi0p%$jLd2N zZz|!_U0Kx{NF63J^wcwS+etg>Dwh)u7&+)W&bbpY}occC;mJaw7rf#`=1-L z`*|@rA3ZIAh45BfUwjw9{T?g^yZ>K;wEO?1NW1@EhRmt|UrzW6J1*G$|4O9Y|F1&Y z{r_rYPW}HH!q>t&u>1e@NW1^vfVBJnjmVPi{|ovIiFEdQ(5CpaRoeRCyJzg#-;y+S zDs7zBV~AL}yGpd&;?(VtkT+vCKCAO@|9s9DOFJ=kj6DbY ziY5Oep-oD%j_d)6>H9!T9*jx3Y{}NcnOYuW99!Q!N&V{%wb4$rY{*5*ii4~&e(zdt4)jLA_srq5WPyt-Mh%ralrFJtA7 z&N4i&`HOgY*8pq8ab3&O*!Xqca}Y6w280GmLj&yHh}$cBIO5mo>%yLnEdA5su-Ajn z|H&{l$9QS+(@XY#kZwk5I`)3Vsrfpk`Pl!`Vksjp^$>A{280HZO9QM) z#6El0iqC}-Y#P>p3Z!q|`7BVn61+80)Dffs-W`tF|3>j#m0-KECR-qVvu0bMbR~Fe zq9~z(V^Ra0-B4n-$^L=Vve*w0liOo*2jk0~3H4wdBf))4Hck=TG;2ZVb`R{0>3UyG z?vKd>IqI2pAkxa^SlcOQSBLeTnDoS?FI5&nRTHvc?I_E9nXitO8_F^~Ars-qlLnac z&sCSK3GfVv8)2C(aAd_GEaB-bC4Y1}yyH$azFrrC1$zGDwve-`& zldWU2E#EKOCDennX%pNz-Gc}_`ZU1)AJP6L)U3RFpR2ApzZ)CvnrpnQs?0n5sp)u+ zKPG#|WUrX)ohs9(X&P@>QUjj|c*%T$FXU@DxEsOK~WAc`m9LQ(O zpoDs`Co#cY0;&*2B#{Q#Zz|fqgxZ6By}9a|J-oTb$*sEV|4q$<^JHRjWK7&A zF}XA*ml1b)hPW#d+<7TKr#PH{6Dw;CInRXbzlf%8O&QvCQ*kkosOZd7>;ko5o zQsL~~rhXe>W30ZLVsdj#ZYh?!vsbKG@+~R79ZBV|A1_y3bM{BBadNw7&H+iygS~z+ zxhp1jXUdyBYnl9V%Rjep_Q1u;+gIdyXID1oDPTJXGL^@^x>#N0+FD8|k3>8b=*^^c*`IfdmMfpMlLIXkr88sj~oG!_o;*q>Qe;}wxYA77h zbi=fw_D?L?$N1RWwCyg#{#Yyz6{L#N2~s8LM6T_>%7=t>l2k=Hnd_=>3RH{vpNgyw zr^VbgV)ArkO{f)frx&$x)JgA=dRTe?)75J@n$4kF}t% z|0DWQ7L_l6297oR=iML1j%A@7l!uB?2`WRCOrM3kr-FYqs17wUg=bVb@23#2Hq^~E zZpO4l;pS{F%5DG+Q_JVQl@hH7@54}b6KD#}O0?{v(BH9^M^V~8`|*$H_7?sBvG(+f z`z=cQKhm`+?Egq_K~ewwYCzckVT}p@~|6=~H zc%N%A|5v>FE$ZBh(*DK#Us1P?$WLgXcr{Rz_Fud;E&kla_y6M8u_z(AG$8E%aC->* zKdcLJb!=!r*#F^n7WRKw7vk#J(15W2!|g2W|FAB^)v=+0W6k~xKL6i>{jB-f|J$lm z_CkvNzsH8%7S&xArTri2^Z!MqQ<0C*fY3m}G{BkMMbd9_rgv&S{7yXmcH4NKoa>Fd zJ#>JM&>6ZyH|PWP0Z=nZ``h1>Bs8#eCG`K83`4+G+UX?c}|l%-oJdms!- zEuV8s6Y9Y^mV^z1;V=U3NeItx9c5`GdE5^V!YFtI9)r;^CR5&=0f+xM7!MO*B20#< zFfCL3jP>CxH{#8NSuh)3hB+`7=H;3`V}8P2bQ<7%E}{RTQ>VyDXyBOB0B80U$!AE> z_x}s&U-Hxczg3|7;GDwLbevUK!1THG|BJbgB~VoT|5EZ=2Fqaui;?0FJ?zOc`kOr?wETI*LxxD z`~N{xh%eFku!%Ge0cWD#i$Wkcg zA0fRCCPblZLcIJ|nR67#qdZiEO1b96ISa(A0%_m>t8!fp^7{S1I&o`2+V}sOT-OHi z{Xf5LcQnT5+zpX?NwJ1qwAlWn^>_6FBMAS!UplCI~+CJa$yC!@WXPrO6o!=7KBS4-#6Y^kx0Dirp zPlA7b^Uh0r_8L%5zhW;ZFLe=dga(pL1MI!QJ_e-P2YX>+a$thJus0^bU66ZVUk&99 zjpxIj8r;LkG$1q}G$1q}G*HqSi2ZJ{Bzf!~ zE|#RAqDoRIEJ+cKs_0;V2^QFYQAqnu<5Z7y|E2xLaXPOey|iC95MNP>RHa8zX1q(q z8qZ@eI@^bYcdQ8?n`JofSraxMCS)0ItBUup37;Gf=RFME)6%(T#IFmYTHe*pQ+nR# zpxoK;GR(;}4exspZywBt1-ZsANZoTvBTEY@_bqq_-YfC)bE=yNJ32H_I`+Sq=Xpu| zdFH%@_;o?FD(t@?S`%r621-W*vb3~N_Fg)*7NrUe2n`g41~|(>*nd$dQsf~tAT%H} zP;d>%Bz1rjTm{dj_)|Ki<>j5yI?-?1g=> z9}d7lkSlQB4!FTnA?XxAg*cCgx&=Uq*N^iya7W^9yWX6;li*If7tZ1!ZWPMK!#P(6 zclo&cQ2L4q?!)OT5w|i_$uc}GUU5mMRJDRrYEUX=7uP+BeDkJ(!rOlfp4(P%5nKYT zp)It7_Rs-3LTBijl+W0)ZvT0EQbeqyQ3JeBi(U3eO1nH7^)2p6Xdsz1AnZSxT0_)D zXh3K{XrNRzaKy}87Dh{)0VBTi7Dj<0tO)|C6B=L>+_%N=F02{!2&iqEw-QWYB=H|72(dQ3s)c($Rph|I*RB zC{<`688jg5KN(s<)In&VbTlCBzjX92N);MN1`P=NPli?ybr2dT9SsQkFCD#$QiTSR zK?B16lc5zv9fSr-M+3tCOGodbRG|T(0igk*0il7S(t!Q@w(b;3FFERFh7SPL(Q2)uj`q)1=B$4e2E5bg7C|Q#x6yB~_JbOQ%S6h*OX7`h+(i z>`cNM5_T40mq`~$?W7jcIPn1uPDFr^%GG*^P29C`*T!83cLTYue5PDaZYbB6 z&yvrO8{s}1cVpb=;BF>2kwchseW_ zcOpl~!{xi>yX1S2_ag6?N6HV#_sI_;A3~0m$H-5~Ps-zvPa`MG6XhxLBzY?GIphp^ zn*6*xU7m@20XbWqCBG!UD8Gz+1vyuqBfloUD$hf{j(kskM}A*^S6+<#0J&UVCVwn{ zB(Fezf?O$oDt|72Ca*$%fm|oAmA{d{me(V{MQ)Tg$luG~$(xWrAh*by<)7pq<*mq{ zkvrub^6&C*@-E~b$iL)0vf^+#REOIUa-=xI4v!<^@H#Yy&!Ic~4#N>}n2w;sB2F2? z%MxCWu;U3UPgn)Q>N*-a>Oo^iBgZ+8vmH&4=OQn6v~zTDT;Uk)c---X<59;L$77DC z98WsNImSAk#yuYQB*(Lk$&QJRDafhF*^U<-Z#d>T7C2sayy=+lSnBxDvD)#u<4eaX z#~Q~Mj$a+WICeO;J9asCI{t9{?l|c9&mlV{XT%wHYG62ZrwLJ~Fna{=oi{kIarSfe za^C2?*4f|L+j$e=0|>vF@LLHRNZ26!?{E%r4s#B5-s8O6d9U*>=Sb%W=L63BoDVwh zcRqxB6z(z3C!OP*PdTSMr#WZ9Oy~2?7vLr5Ea%J47oD#-XFKO`{VLaUxt`~I%{kxs zy7LWK=xXV@2zfE`dRJf94X%E!+g-z4gI#yJ zhPZ~ihPv)@-QgPHy4N+*^?>Uk*ORWXuBTn&T@zgsT$5bSx+Wv1Ag8&ex~9X6u34^k zUGKQwhYwv#T+3YlbA1FKyOz6Fz^ATHT%W<`u9dD;u+H_B>l@cv*Lv62t_{fVkUzRM zxwg1|aQ)=k?D`qG4Y|YhtLrz|<@(+A2kdqI>H6EX$FD5xm|AA?RGod z9;6qkx&!WzJLnF(6}N`ekx_S9cNr+}uIR1+C%8{^SAr_;%I=fhC%LO4PeGpQuI8=| zHQlGXYe7AC9d~_qUH2Kt2FTIw3GOG{&$@qc?{IH-|K|SHz0N zX({KW)JSQTa(c@7DK%4?r_@ThfV3@Axgez_vL!y3kjJIu(V9Hkq|!R2EwU{>?a8kL zd0k0f9m%T`d37eQF64E4%55owQwF6BK@LT}lJaKCTzETWVamHHi&7S+yr1#`*B_=V zNm+_}ZORuZ>rz&y{E+f}%I1_!DL*2&Aor*Ilj8HFc>ErZC*bjV!XDKV@q|1YQb$HT zWj*CQ@9YI#oe)b>>O)bX6=sq3lXspmP}Q{Qu@=L}CnPXo_c z$VSK(o^w1cJxx5VJm-2Y^fdKc>^aYKiKm(8QqTFG)}H2`Hl7PSZHd#4@XHCmg0K#R zT}fC+!mjgN>*?d^?dj*a(KEs`%5%5pVb65W^PU-=nV#1@Z+PZ=7I;4KeCDa;t>dlj zt?ND8dycoUw~4o-_X=+(ZwGH@@0H#zTzBRADz3YGyLo$hdw8$$UhVCLycT(#x3~9t zZy)as-oD;`(BFHb_a?a6JHUGj4D{aWeZo7|JI4Ez_et+K?{nT6-f7E4^#HUwXg7Z!Lab41-QwK}+q^$}e}P}U+r7VecX;=DcX|Kz{^8x{-R=Fy`=@umcaQg9?_b^n z`2UChLHs4(pI+Iw$LsL@<&F9xzA`?|SJtQdj`JD5az4{{ywCEL$G-yp74bg-|4R6u zh<`0#O#O6d2WR>k_!`2wzVm!7eHZ#J@?GM)7%ug-@wJ94eV6$<`r7$A`7ZZ$ z_Og(XU3jc2ScgMd6{#WDQ6aSlh*ZT(e`ucA6-Qc^$*UxvW??&H1Uw_|i z_z%MWcKiq9KLr1w_)qaY@c$71uYD_h>wKU4zVWT{ zt@nN5`_{MGx54+N?>pZb-$vh8zVCf&eVd50neZP8|B0}zg#Ap|Hp2e!{ps5cDSo%# z16BMd`A>#M{`&s2{b%?a`y2Sr@t^5$;&13b*MFA3DgNi--wglr@o$d*1^Bo1U*f;a zf2qHnzqS8ze;a>$+*jc4fcr|^J^fw%*Z8mU_wsl1U+eGg@9pp5zs`TPzYqS`L294!+*Pf816f95668M?uYyj`bWVS{}cWv{iFS3kx%=_ z`NzXt|2+R|FyH^W{|#90|HA*Rf3<&u|4aXO{x$xM{;&Ms``7w6`M>u6;9uw8?El99 zBXNErd@JGG2>XSw?f+-*OyH}iuE&4hyoezn49VpAh^P?(1+_w0iquwQN7rfkAwuP{o9uL ze7>A}&OLXXJ9qBfnHPwAlekZ^KF->?S4OYQUOV;b*{esdSg+h(gL>`NYjCfDy@vGK zyVsOnhxD4-YkaS1y$+oKadd=uHxz|kM<`F)Ea7C|`y;k)q@3k6P zi7fLc`z!q^ez`x@ukfe&tNiKyYX305(m&j<@@M$f{!G8dpXJy3v;8`Mj$iN3^%MR) zzrjDkZ}gA!oBa9y8h?S`>@W0>@fZ1P{l)&V{u2K4bS5k+o zsKd9Z!`0N`8tS03ch8Pw?~&ard(Z6N+4p*gXfVL7|z?3a_5vwzNRIS1qn z$oWdn?l}kMk z*PM59-p+Y9M+IIG38KLsx%=ksnL9LhNba!QeRB89t;$`KTbdiV%>5*{SFBI0cdT!0mzW>hHI^5P$99YLj}3?&9xIK_h?T`= z##Y7_#L8m}V->MQu~o54V?T*q7P~cedF-~>cVoB5u8aLN_Py90vBzU?#h!@$Irf{_ zUt-V4{uB01_{{ji_^kM%`0V)N_?-9>e3lV^6!FW6TR~hQaYe-aYY&(a{huLjokx`a z9C0VrL+z}%5vVNHOYvB&c2Rx!x3BV5Kb5_8qHS6@Nh(*x)UGP7`l~#(8_%t~^YpqW zsq%Stm17{!rh|Ak9l~?zXFK-YMzWziBk#A3_^zY&=gImjJXH_piMl|I~`s#at^y3=dr^PJ}zM2qfKqprVG_3b&=W} zj#l4NThzts5^cLwU8XKqSLpDS>MHeZb#*)6Yt*&sJLH_Xl_p1lggUP}kQV**~)T2o~6?iNa422$7PpDsp^!CADwO89|PpYTXuiII=Ql3`N zSeIwjbDcW>hM4C&jWrQ3sNWKJQN6^!FRNFK@!#_A@6_+rtLhK7tk=}*HuOjJhWe9w zQ@y4Byj4B^qW-GhR_~~H)qCoF^?~|O{Z0KHuaDHn{QF7L{~zj~$*@wN@&`2{5izh? z6Um6|q{AtP9#-Eu&7Sh$frq%PNH6QrJF-ipPo!_ekMzTm4LOm(#^pw0>`2DhiOjQ> zR6aXGyRlEZyZk4!pZVC6wE3N+--}(}z1i^{oEAUC>ieYGT_yIl_Myr6VeA?2&wjCZ ze1-jD=c&~2P&j4XIy3^m0_!_c%TeqmvnpV>nN}(=uvo}2#%RKg_Q}!d-onC;xFtW&6 z7e|&P`7kOUfeBNWU@>XMG($D#KPhQAdkc%RlKV$0dmEpX;rAyh; zVObYry+p@c%KI3lF6V8GOV_!)5}&KK^7}S#aIWF)P3P&-W4^Nu-|zAc=zGcd>v;?G z1K#_X9RAPB8?C&_${$*()Q=)JM}8c+CGwNVt&!W3)>Qeo^WNwV-Xf(e;n2@`uXI=B zZes6={33EM@09N64UAcV|Le%!we7dOLwt$%hp+I)P^sVR zTwdiJ;%mG=RO*lL2K*`VCjY+08$_l4!W%`U-iFV1yc2mh^4>O5zt3CB57XlR7WsRc z<+GLjQRL&uCy{@U*FPhlvgH!-JWr`8WWY|)19s+(s8T(7=c!aL-gvU#0)3$U(UC=sc(1xU?^O5n^1Z#hf!^NUAaAfYWE<)C@%ANl zs5i{|?C0&@&ijCN8Y?NSALtE7j_?Z7VwD=no7~af81JjzSl;N4<4x|tyx0B4R(`I| zhw!HNP;Y`a(VOH=_NI7Ky=mTb@32n&UAped!@U{aOmCJq+neLf_2%(L`ABa*@02@F zztCIcE%ugpOTA^@QQpzsa&LuK=oM`vML4fwuf!|$%Dk0cxmV$>@>YA5q^$C)`M1Wa z_3FI(t#V3u4O`j5dAe|;*VHXP=igqg@tV7x`WSDmcdU1ucf5CkccQn>JIQPD)_W&+ zn0|wIig&7ans>VQP45iv%np6d;%)soyv09{_xKm^2EUE>_!sgHzkLmjc9FN)`xbBW zFXr9;rM%bQ+UIibiZtt$-c{bWwe4!}nzXpB)3>)=>wQP3_%7cLw2%4h+V{Ncz3+QJ z@NV$_&%2SY5q`**2<*-9RRVi6-cP(+z1zAd>-G-gf9l=g-PvJ`)b(eb#whi3?=J7| zZNzt{t@rRX#l7Br-u>PK-h@z2g1O z`@Q$7_XqDa?{)8we5v~lSYR{?P-XU(q!v?eW>&g7T5j`Xs4%hX6Ej+$ky>MuB!Z!UTMkj4E{(r4kceJsrQO(>8lBco z?|x2Sgy;0=VeM*jIKQy^BFgzs$a5yYGn>tC%;t8p2j)eO;5TSnw`@MYO-NOextgI->0^(O(l92zf`T^SE_YsaqT_BT75LZFIgL-P5h3vIeHAgV?CC% z$MI`crA~;R$ZuCq>ULh?e7hg634QusPWT2`8nQ`FccSOd<}0T~N}c)@rOtuy@r_|h zh5135QZJqDr+C!&UNU58Im`FH%IEDb_34))<nEdDI`rv;K*u0ThI!yd)bN$}nwH)pD25-udejkg3y_Z(_zD2^`)%b&VDE{EB z<#VF7eLc1A9HrLvji}7UO0DHQe%52{?0nV2SP)t*14{jD2p}f1Pmv!7?+>&5IC#6E zgV$#^cG|SlY(EczuNC>5q4nqV!PIm86p#DfR6~ZAbA0b``gN+`FGb3yhx2(+eCFWe zNcm(spQXAie9nbbe3py9agpB|=HhvNZ}289_j`kviw@om^e=ehHu-AYB0mWC=l7QR z=R+%Ow6dMP_aZ+V0*Hy6w$RT8kui~fp57Zx1P~MXd-4O3F_G_69*B&I{D^oE854OS<$=hU$fM@?*${hb zfj0E2OX-xdZTYROV%!`Eq{glS(%g{9@zJ>Z|D8EIZJ{qS}mllnQpS93GA6j9f$lW(7 zwL9(FJsvTV&xvQ{-)Fe!rz$@Sum4nIdN`_A@|a4=Xc8ddwN&7Gexc)s#JgOuMy;1{ETmZ!exYaLgpgB4rjpw zFcdC_7hoRz2;PS0DJ%aXKgD7{$TtkIGB}C(Grxbo6e*u<=kuQUEXF5EGM||9snliR zbCLMz_&r?Qhu0VM{t@%lhi?^l$d(VUpU}Hxx#$x-WXtY-qUyuz=k>YdNdN2)-It?650FY<>M`saT5R*{EnIr{fewQ83P_3B1ny~@1vYOJ4;3yf@<86nPuE06T^ zA?Q))=R>d`_rbFkDs?0rC)Y05TKJg5btk;cb@+pa{0!&;S>T1p4BPgZ>&GB?>L@=C zf_;|w`4D{PC_f*9spt?C%=HICaQ8xgAS7kQ3V$F3_Z{KKKxDSah82DcL}rWp?GgSU z2%cN$4}zrJjr<|_8WMsZlRqToqxeJMeekv-8vwmsyQDwrG6M0 zJ{6sYZ(5eG?qxk>@sM6oq3>HH?49rm|C{tcPn}HtPNr`6Q_cz0=d&Is6y>Y$Gk&4v zD8?!ax!uEz&g@j+{HS} zRpx@Ze*T!F{rm^;2d|m&1m5}BM`M2+J9sBz2QN-N{=%9Vyl=8*1FxDgZ^s7SLTun| zV0>&~Tx?*BXjx?C6-MrIF5evc5w**l?tS*Wiu16Fd1V#%1lVHnkgXcdk&W+xe!e9Y z&lMtUhba@ zEtfD(q2;tEPgZjWZBQ>2Ds>v?>aXxB=krAR_KiY6Pk4tPtc;=Dsqi}Md*N-%O!_;4 z?`+1{xs1zm8JFiS?p{BR;=GLFoSaU&(@A%FH`B>IMD8DQ&yf2?`uxv4+Xp=6*Gv%E zLnQNSCW!1Il6lq#udBKADz5j{++!fviF*tLA6Lz9{rGyQDn*vIHV_YilYQ`mz{&Cz z%nfkw5!)t#&$f7h^upeg{eckN%zXfYbbUVY5IA}8WPcC@k8q!WAYHE|9s(zypo}ml zP_HnnXfFg#X3$;;oP2Y>ryOHvddi{X^K#1%usfp~w2leHJ6>PrDaWJuICAlEG*I8)bHJ$nBTg53g;ZTX}ll69$K#Jr$l}@Nt3N*Mn5HTk7=5ik^Pj&C5LMUXnBUO zM4n6f@8kc}RMsEp&6IofM8CC(a%ZCtI^1t{bE}hop&kcgzYTdJ{@3GwJpTJ(I}QB~ z^s~`Nq4#AT$me+vf^;eCGQB?2>oV41ti@zaR#@ccoyYvfJK{Msxi`e~XL8Ty9J8O(Zcih;cj5LYM9Mw{=yjJ~Td|%xo4Hu#X+1|vIHDHv zOq+z~*@X_CV;4HM5QfFH`3&~nlCZ~ihJ$^yGaP3Vh6H&h8G!CsM%b}M{Q9ZULwcyf zl|59}wLMh)wH|6IW4)1U_!~7WxAqfNU#cA}fjv16Wsr_a^sy@SbKp2wpwwK=3+=*FGUX z?JNEp86#J-h5&CSd4hK;d4iWup5S#7uYE#(+E@Im7y9`S@TZAl5ZrN;p9$W2=4A+q ziHD#W9fCOb%ik>Z1L4T|mfU`daJJKb&2yD-_R@Z81Z#caMZzyG^id zql5Pi@&j+iGXFGa`M*VeCU{fk_?h5cOTOSW;s@R{=-?f|y%)USVh3*#`GWV?BmGS9 zE+&8QREgg@oG^H!@dfW}>ImKuNbq(i9=uPB{7mpZEcQ=^mVVg5Yntz8g4c=;-X_Wd zZ#Fu3C!vG)L;9->8+boOf_FY;f_FO-yw~X$@D?odGr{{AWrBAp?EV`AapKB7aC6WQrU^n;=spOBf+j0*D9k#nX?;5a4-L>pmb=Q_#)LqZtA4*HUMJDe*56M(*nLw~-RbM^(pSh^xI*$Yw33$)O1lSP!Q~rV(67IeXB-HGF?;k^^1K6qunYIp zUD$Wsg?lP8dzS@T#x7dy4+NPn+#I3hv|R!wc=X9&*!KxT zaQxE#`@6hWk@u=Xe-L=WnS*k_vbfaW0|FQj3J1Su&J-Im+u5xxc#rzNH_y+9K)8px z-?QA$hd`j7dExW8f-)co$pN9T*xTuM7Wtc?rG)3d5}yA`c+S(3=Q%BTZWDPd_ZaYo zagPD-1nw~w343wwH5Lhbu~NS^Oz|@jRm?s>1?5ywMt|0`#pL^0kK#r7YBBenmo@_T z@0Vf`BNtf7^PSiuT;B^=KevOj7ldJ5y&z0DPQuUn!`sD&eIR%H@QxdHPiDv;~O%xWS%;cXXWJjX@FfzrTlt0zg^kWNv^F}2Zi#= zbUsbGJhSc!#b>$r^nHr?Mdzv~>F4=Hm#Hk)z4MEHrjBO59J_B1HL<*hn)P}Q#XURE z-naAYeLK(Ix8wG{9hdv|(qcaja{rFo`*+;lzvK4)9k=)IxV?YJ?fpA$@85BI|Bl=H z_a=M)j@$cpT<+g5uqNXE9S3hL_Y9DGc^tgUDI2^g#r`H}xowHR30kvopc~;pSY}jSP#YRnkYV+K7F>t&x3&VP~5JG;=9vVpYr|{ zyeDW61f{eGf>&nx--MRwNBE~e%h5~y)1W1^c8c4zQ(V?g(>W*L{hD(EvX+Y5wN!i) z_WkDjaq#|t9lSpHgLex4AZx9-U2DZ3IcF$msS z!n;rO$L9Mn2t3{`LQsQ01pG6C1LpW~kTqZ2uKD7!<|}0WDr7#=Qo_tv$?#$mzMAX1mTUS!o?Re-Y%60T zuVM}rZnEr&?_peH2cq8<;t9fz0`e4CZ^mWKxijwx7n~JQMQ23R$?$tPjz6EmIy7$A zqH$S^K0Djr46UaX`HE!3gaap^5pLmcpY+Tl`6L=4#xzI|d?;0_7rzfSc zU&=YD;QY06u7oX?>ySc!;wEx#_a%I5hzAHehLMNh>cCn$F6-$&n;utvHoXMC>a&RO zBErlged153KJk~KUwiu^?2E83O0#dmz6txL_V)R#dn^Gm*q8Mc_#k0#+?L1HxGgUM z-#9EIyo@mQ8aLnxHEzJm(67CH8TMt^xmULH--3M$_ATx0$FZIe&b8FhC;QV^GuX>< zJTRMnvK&O*8}#Q9>=lLi3HJBce-$bS3zMa<=~Llt(!4>Ou$(yIYsB9`oNznq2*)gR z#}xcWa}Gvx4n}hhMsp5Ea}Gvx4n}hhM#o6ls;ZcAGfp9$4lwsrSx&wKDE?8A6j9f z$bIJdmqF`YNBfsSYa#Dhp+)$3g?}!HY`K9xyO!}`36Q~|j1TZZ!d`$+Px_}P{nL~F z=}ABIqIzeXQHD{K^bF>AWh&+=0| z!oAm!p(XR`-mD={-=$xQlur-m^JDRuiH{@Ylj(f+(q-Xu7MS=f7dPzc3^o6?4E4{I zJ=HGz?4*9rm=)fpPkskCvOawgo+$T&W8p5i9R2OKhB86Fyvv#WjwPioSBeGal->&N!XIIGw^c4cjMN;IGrLR+}KOcGWoj2y~5!Ka~1GD{K_`b>3S; z%QJi@1})#^oH%~YwH!V^Wgfiwa^U=FY`;M3R#cf7*0Gz8yau7KbNJlk6&986%& zut+#qOTOWwCu8FfI0n8-9j3t9yc>v}eUIAs=}*+cL2c?9##a*diVu7xq7LM} z{(F4eBfJLdpN**X{XOJPDp-)O)-QIjCr!MtJ;#AX>;-duCil^0PyJEfvt($=I`#mr z^W^?|ANJH!K0Tbz&&9{>yNgWuWICVmx-7FNABxX%@!#Q{HF%SFXARzdyldZ&Zzu}r zj{^FvjB9Q``e}QP{rHArD0>eU59!^_H>DN{d*6ICqQ1!=+dB7b?$x}b_=s!#o6H-Z z_4wwjeDw_T*apt^3S=2$LYQaygzpg{fSAZ3i{IhB9Q1!1`L-^0$l||tTKq@!<52bp zHLMMX3Zf&g;5-Z+*w5hNL??UzbLY?^2^(Ffv*B;*Jh5M4;-qksuhDO0O_IlY;ZEc} z$cK@mkiSKqLc7Apvpg3-0NGZ?M0&g*0g*A0yh|Qv-zN`}_sMOnJ(ogn`fMEMy0hbS z&d+_6J%v3c@Lp!TfLDtS-k$6^fw#Eazoa*70pXqr4wZY)1l87ip2~$bV|O;=dkyOb zp>n!I9${e@ zo^^x_zCD0o9N!p2>zTZ3HK^#BhpM)@yuUQ4=-jEQZ3}%6W+LC|gb9cb^8@M==JayE zHFu)Q6@EJ1Fn}~+eoUG$pAaABVag2i$_zihKYKxu@v1E{-p|WH=8X3Xe$2RlATh}w z2*DA|u@GE&m_G=T`o~qHTYpFyA75`}Rqp6fyu1DQ`rN3KTl-9IEv6nHUvFjA;L*=> zoHKlwSbBjnn)wAhH!bPizO4`uRha_+ug1Wr{xzM7D}PlelY-^2b83d73{C zM7E0lBYZ(*>rH&O2LX(=a-hg59zvj_55GdLOfrPIi48b_!An1<{qUVc#7dmY9 zeahZx->Ep6#W{iC4$e7|MH5xq z&PS+RXlrjXzE5-g2#Wj+{P!M*skWz=t6XSnZ!^BX;QHx^Lav*RxSs1;SVlQvnpq1p zvj%vLZ~0y;=04E7|GN8T@I43eU}V#o{ns@i|JgfO_fOf^H^ykyqQ>f_E)71Ja>CbI(EBzJbVEQvm3GrdRL4R(GdGuG9 z7tzCf!h8NO*U%XX7^hcO;h!1ld{jn{Mr~P5F7++!bAwJAK)NflnNcmxAP`@w@#D`f< z{kBCB<%jtTdYInmVTLgNJLC0zEh~^ah&f#obMDWI*9WfVXoU29{m^QWb`oKqd&uJp+C39X8J44KAi6` zlZX#9lm6_C9Oe??7Wz)`nM;Hg!oq9xXJ;&+FNG!aqi~~T5K{ONeJLCz*AC30KU1ja zEc){U>?u_A1FpMA8K2wYY5FV7V)`@8NyLY#pg*@o4gD476ZA0s(ZdX;C|wSHQZB$#|R525Eh2d^ACPxg1S@WLr44L8>g#7M6RzLp3}E(c+La0 z!*`uuH+U4Fd$Uk#l!<^1}33D6qVHOY{<|)p_k#ZG`m%=Vn`7jQm-b)xBI#b- z)5zV33-c%9!u*!8Ak?uI5}a=#Ychk%Eo4o$8*4IXYi~3DF2A0v$qXvDCu_1lu_lAI z_BK<#D>q%bdZfz}*w5H?8GESgWqq4-9A*Y>3UfB?5%#1#g7e*vHW^gzezfV^JcmPD zdz}sU15lrgwH>( zzvA~7VAj6GS2AMhF#KLqJ2rHuwo1B2GB4Lf!Ch&m8g(4{YR_=q~- zJEkk&sgLwmQ)l`G5NP=J4VmZk8+r%ohTa*G8)o=QGyMc@gad=ttqnWe_(b9ztaY+(-H zLe616e}9i`^*;B@KUrF^w_qQ^Hqs9!#Z zT#`q)zk2$7$aXSMhtI#1`HOE8jbz>91h^fnX!^4+MnsMZX?91Xo}O(ep)5ug{GS_ER^q2MMhe z+|zD6OhqgBZj(u>$~$Hbafi=qe z+}HQ#zWxSsFXURrve26~h=F$itU=bY27$KrHsd>#aWDLkHHbmw{*X1uP}U&O*4}1( zk7w+4#IB5wj`%L$IYR(3k(sPR?pwrfd8um{x5jqzU9LAMfIyfx$sYsf&zq!jp)DmP z&e--i($9ke2!ujCZK$JG=`3E5&ZY;wLO!Vklr=*`T5YQ&byRDRvWfz8Tab%t3qu8Rjd* zhiM@`%-3mGn8W!SC}E<9`mF{Pjq(l#tQ5Nmx8P$?(H4BbO0k>pRNmbfRCFrm1gsRh z3E#jspkY?h4+a%oNk4#1%!a2fp-W-{r+ zRLt`CfWXNJ>pTcQjUZ4*Qe(VK)=V!yo`0tna z10ZnnR>p>~g}I_5-mjWG@eHo5Gq~>GzkYJmg`cKBQm~h;_3lc)b$_1o+M{y!=b6pe zoD`q!vrpx_-S(*5sY?a1JL$qscd~Q)3!JYMD%XU&E7|+E$0dBD3`u|2;GQm9-L?ygMr z-*@RxNcQJd^e42wUp0T?A=Ld4>h=Eh^P|adRq4dN=KHOC%@=!VG#RdHoOlQRcWD2{ zs0-iAxE4k;9|#BHBdp;qmsO+{gS zIQ+B8?CDNcQCJ@i=doAR5%(X(eYXQj=J~fkt3yrnZwApXb2gDV%l%tGq~_Y`{>>oa z%bZQ*1@rtHp>?l`{;eQVGmbdn0n!LxUFzQmt;44JH-kvcb@b15^bf2LhxcZWbngi& z3hTq+hv=V&=pR_$E=>PCME}70heF{u_zl4ujFY*1hc%bEt&HCglu?gQuk_b_$~X|N zo$ZhBeW*G_jc_T zlJ6U=?Ln>`h&{-)18phExAxZdL+U3CWS$VlGd~D-U=t3&CLE4U_#W4@@Hg5aTu4~> zGhyM!goVp3KDIE8*u%_vs{guK*k3@CDQ}%!QO2W*Bu89-~}?iatiUV5Qhi*!g}yxdz@P zP%c<0b`yS;a>MLGxnVZYR-uA&4LmDSE?6mc6L!AWQm#QoucchDQtT!?i*m!PquelY z+A92lat$i_3(5s6#csmR_f5(*sOX!N3s#EVgdd~aFq2fECaH|bB-Uw@#;aUtd-cKo z>eUB(MqcF`omUT2xzKhK&m}@N{?+(b<6n(Gv~9V&zuIzl&&ZaEyl3FMB52EF|0s|B zqdfME@(x$I&~^}M4b~27>4K4(U14LK|Up6L#?#{0;|#*C+EgU?!_vXd{en z!Y;mVv7ZM4#6-T$yaK`NQ~ZM0r{K#PoiMry6Bm>CN10b3_;iwA@F~x1&_)>Dgo%qu ze1zZoKrmz)dtB30F0>IwH(?il0DW=*eR2S8Ie@l6+oSZ|qx9XQ^xdQM9q{R0pcdk{ z5Wj`^LE8rUZUcR{fxg>7-$7fHzKha#QTi@Q-$7e7eFwoG=(|6V7TO4-o3M-TOW#58 z9)0&7{RM4=(M{OJ=h1f%KuqK(^c@86(Rc6BU(iMv-GqsYN&K_)9R$55`vtwo6WR!) zn=o-PiSJF{K~O;770_SMMi||MUHpYSLkRPEwlb*P`IHS-irs{rZz?Z7dZzGQ|uTe+g1?p*F4NpD6O0k=;^SybZ-)dk_mh%c$ zirs`yVO}t(=qbz#V5Qhics|d-Vb0}V<0zcvZ??1|HGe$AVfvqxr7pTRORc#(OO@Z3rB0gZZ-!Rk0Dcz+B3oZv>~986mHL3l*6a8^8HjAX1ivuP%=T}D)_o53Z-!RFn?*mh*uN1(ZWeht zI*8mX@=@{$b0lSkc@m#68#uQ@4t|ailsSTDab#;p()o$plXf`x1G6_Pp52kH9ZBaW z@?h>G!tW*-oPHm1LKfeb2~NL>dUwQV`owYnc>m`6nHP|)9ZBaW^4F97n}5xNWKFn+67v>n!2-8R-IQ@ET!U@;}r=LN7VeXyC zbFxN7?`2K}E5&ZY8)=t8MK?01f|X)7;X3BjFlS8Qedz>07&+Y^2f;wbj!?>b2;oZN zArKD72Sf(r@NpcEpW{gO{w)Dg$hT;HpF;ZH^7;h-R%ktUx_>LQE+tQ)jr@cmJo`fH z&-uL_w60)IcYMTmDh_;Zv3|FR-$v>T!8fQc1P73(P)nY|_lbu~qKtBjhf0}j(m(VYQ(_iNO4+JpI%6yUkM;$@rI4ko-exE+;jKipRXPm`1D9^Ho zA>2m23@Z0Fe&6uyT$KxL?QO=lfjSGt)Y-sVnRglu{JsI&+S`op*QjGhJjFTgh&Sj1 z2q0hNt+X9P=8NQS<_)xeJ5NjgX5Ju>zn!P$Wcs@!UZUO|QGiAE>uaal-Z=Ugcy~OjrXUWi#-|d}2-pRj(#&7pRK0TaIjNc3;|3(_W z;|uv@I-f>emihZ>q4+Ep|2V(Pf#lyTeUf@s@V8QZ{zmG3{H@d;$ae}eR335@^)Kf) zMMuzvNAO#RzT?=@1OF*KkohI6s84;QPH`(gYxP{aI+LF@-;UDdW84>%vo~e>wnE0W`iBtBre}NxU^Q?A$Ih@zpV%IDe1G`Z{2-k z$N2rXovtrcL2~}x80e^C|7v_kd2X*Qp9F4v?i<`spDW_g{}qr3L>^3(P1=JUVg=dSP0AK|U(sd-A zt*f&CljHNL{JzkcBx=X;U(f??ET_x=f~M_g!j2x;(F0xdK>Bqb*7HfbuO%GD^gnfk z`{aMB;a{T0=2I6Li~qk4_Y5vl>d}=h_562^o384U9-m&nd<@bqCkgI2cJ#oG9{6AF zfzy>5Pupb769_ zUwufQch*;~PsZaTeKHNcO!K3gQ_Ah!;&iIH)gkqlz6o-s9naEx#S(|MQ)ncJ$|eM}IO6Z`Uyg?@ait zQVNrGxH~djpc^ur&V|R5E?l4O)ny)(`?Qm8eX+>eopg46AH?6u&Z;f-kVE=Oj`8|m zqf2;Obslcx$6D$9r=fqD)?`#Cd5kCY-`1vN`FEZTi{Pi~nk=Lshx6S` zc!IW=f41ej7MH`i@C`+G7A++bR33Zh7=+0EF(ODaT*xv&hQNOJ6~LKJ^}UDD{0HqK3^yE;eP! zwI=Tz(#w=Iaq2y+kob!3rAg1@v+8aPx-r?E`ghl-t2U+gd3X9DJx!N-+B1=R@8hIv zcX~fKom11{k&RWxqdUT3XA8?bl(0`Y9QJczSH4KSM_b1jItCa&f*dP|%X-#?#U}5} z!u^pYX^56FC-y8!Z}k--?YQh{Ov-rjxe&9adn!F2pRt-Xop*_8`N_DKG2d0baW2-Z zKW?3GT*|A!o|dnSBe^cR$~V29>G^b&o@OQw>6`E&y7Ze!>5tEne>nY){-hH*IvTgz zYY$%^GWNGum-6I3b1TSwCj9I)44b2xREeZ=YA^>hBy2xNjw=X4kEUr+}fqQ&*R$J-fMQd{i&yGTV^O6?l*~-Lu_({b(hxpJADOy-Cg5}@Aj^P zTsb?+{bKu>Q=^#H^&#`UoAW!eS+{Qcca+D!!{+KK`-^UmG2Av6?__uVoWGN+2ncd) zEC25LbybfYeU}=arq@&dZmaLyT(Sc9dN|#?}_Ha3!g~_Vh<#*-VSv^*eP|EMD z4lXVn-&I**-*9+)(v{mft+4X5_NU9Iqx9Q5he%zzy2jJX`MmULwjVl^M7KM()$U9@ z)9bYYTSxYEzjVE`be-8w#R)t(wV(Gd4_$Y z?XLZ>>yr&HXH<4e>7 z2Qv{Yf=A#GSPG3$2~WWUI2z7`7hp0>hY~1+bKq5Y7#iS7cpj#~?_m=h2XDgpFb7VC z`=AZJ1;@kV&NZ^3);HR4;~0oZ{4TNq5Z5^jQz;9%060LK&eYuFuI30w#7z$oIHU^Q{~Krd|b z;39Yl1`<~SHN@QyeX%Woi{bY$gt&6J8Qy}4_-ujq;b7v|!-F7)++KQvyyJ<(9xw=o zK{eb5eej(RTi|yvm^@d))$jty+;=*>0z-+fg&)H|VLb6I@DrFy{AKWW7*E`(@F>VT zmU++$55Z#MW&b$=4?_+_prQxB#Am0mK~*m%-Ceiq9R;hcPJo^?$|oK70h9LI$>;Fbc*(40eNj z7!1SUK)4g`hRfkWrdXqX+QDHYcMp_H;O9pH8LJ58?;$f zmKv$^Tb|JVjX02}&WL=qByVDMdWlxmD~-K+rSa3{nXFBjIN>5iD_pdcIxHvkvGvsl zBnnIAvL##+3O9zrYm?z|_?0HcEfxR#{qohY!Hp{afc)XjgDWd#Y!N1eZNHFhKWjTy zsr~a44f_rss%lGms@gEr8=;Ge!Z22l_q`!KE*m*TawReh;yDW zlB`r6G!keeaD4u;YmeVQAOB-@6OSFyP%~o8@k-=aS}byul?BMc!V;)b#Z3)~l7d7< z&q0y>^9OzEF`QL%qM@d~w7#1Eo=7#-7S-1jmJ}Bz8mfv4>J(#Rq|6s#`-^B;YDJ{R#np^`iP`nD|w74Na+l|`Vk{jdTE^w z*N+$#w#g91Z*rC)TWn0-`%cId8+HYlp zEpKGlX385eDr_@lju@S?RoHUJt}%IuE@d8Mb<;mOoi3}fMCudL>(C3FUQ5cka)Krl#`6KJB)aBPy;VJg&vdW5LnwbiVUuk_+LD`6cvEn1)id2|# zwXMSS6Y&)VwbF2Fua)cBhIL<=aHV!ihHUO@gN86lV!j28er%@M5{OZzUR+d&5CK3!!TSjA%WN6B3ERsZp)g`jz za5i0_*rX?{-x_Izw$TgDCi!5qeJ7M5jn!r>8Jl!mQKHQHn1n?nGieeflBg)LrcmbGq>V&2tQ%j{P@L4| zoEOpiCf@cbnqP69=O;ucWiG+9+7jklJ8Q65fVosB!lQAbfU(6{oy2%$^ zuOq~kSj}yMe9DT;ZGT!Dy~EI!?$T+S6GiBzzRg^*#g#HoR24QS>Ij!rHYUogz6O0| zvYyS%iN%!_CXRWrxROS>JeePht8AW(HMwIInmA4(=|rcdl!H!Mbh%*|UDBc_X;%wH(6wp3br+W0l= zxku8k1t2y%Mr)c9&H4h@b!klc=?h%^kVHdd0lj9a|zyT;1$dR-4~WAu{_8-t2NY;r*u zn~c*MRwY*FIzpFbnfOY^M{Q$;?T^OFMqN!+TvtQ9d0r^4FCJN-{cD){=&PoNk~(uc z);^M#wb^`SL7{E6B?TtGrrP=%S$*M$&D}?_QLxw&VLu8nHa8FBN6WD3^{$lHRGTO) zVf0v=EjLkI*wCPF*kZ3J(5G2^t(|Mw*xUOv0>qw3_vZ#~?RI`iloy&dNF#K^$eSCS zu3NGZ+Gg62tgyD3Ht2%P`AL*lN`8z0iRaWv*j~%%^2V|v!VQHZ zc=(odjb$Z@MT8BP>oH*Ll8ST{=7xr?Vr4aH(WL{7&UnCJ?!yw#c&RW3;w$yI6IIUv zqU-$Z^@A>TtZ0&9D*07bGPxVcc$Ivdotj8FX8b1VrO!&tB`p5+g_4iCe2s4MsV{7* zQ?lr>`PUbckLXMn+E40NTFp66>cq3GC@ki5I2(D34PnxdcUeJm)rh2Crz-7@P3j`O zR=G-FYU0l_gt`|=%XNCauqrD`G}yN2#gx7dO&@U+WM1Zkh=)$km0DI>Rb}hbSk$Pe zBvn>cUn(bC`!wpynLaZYtF}p>iLUc);EAS6*P*PU+UBio(kxY0SyGHUd5NuBJ7O!X z<^(vKIAW_T(DwrIv$|e;HdfabuP&DfS$tM`6ImJO zrj%8#OwN9h z*4~aE4+YeyoqwI=S;6gjD?9!bGM1Bhk1%;xGna>L+Rw}xA-l=9I#KG{*DlVaA&#}U z%}WyKg>9&Qb#x6jnL6;}R-%2lE_7Jp8n~JyPP$RjH;hc_I$oA|CZ3Z~Q7x-`r%QZO zX{}uQOkT~!<~avjWi1QeCfnaeXKz4sU2k?OSdoY>=a+aM&#W%-^`+!UdYQtcPUupo z(J2$1sX;ZFX$cQGFNp@3u$_%;v<+cxl}SIDn2cYg9ypD%o}wa6YfITrGtWQ9#hmc>V;_1kChG+cNxZ1+JyD1Kc=>!xJW;@ zlAg4BRn%Op?W{aZS=vs;tld1vyN<%g*h`DGzw4}|y+-G=iifRc9>1jyt9TG==0Yyw zezl6nndZtuy;--444UgpD@$$vttxI_EBt z;Tjo{l*1(MbV<32E3}z;T-Sk{ZZk_e8T_kwc#?3z=rM$A2+JO;*zqy9QNjteiYs9i zgJ)H}snaU@XEmE6(gz}?p(0s=8Hqvb3|1}WKF8fa!ZLnGmQ>fsg($z{l2nx?Qezn(=@rRBmD9)QI$TvnJrm^>WesLsYQiRNwZ2yM z+|sG*s2-g*-N0XS#IEsFYcJRbpi`?@|g?Nga8V>Scy1S|j0vsx3p7vC@^b2X8DA zReEbjRjw@2W4D@Z0J(Qpu1u)P3S(n&uH!1J%zR#1Q=)GgBITrsly-}h_Ey%IHJC`5 zL`2Fy0n?{$zew%R3{=S#-poA}H>G#8kZw0eI)$AXJ%5Un`AT+b#95@)ZChlcPur2& zZf{7k<03j!jyxag=Pe1_ZD(yKEG{~nNckia$aKepq)u#?+pdn%48`!)^%q%JP$zn| z418prJk5y|zuH1s!5b+FM3?@mEi7K0uzMvYyhdLa5@z{s!|e8I-xA$fwe*=}DpE4l zlFUTPV}Oxsv@~T@@(fmwZZ>2^k{rEhrOCHxrS_9o9Bk3(R~Lu)kz)C%~#?%FFIXndr#@DJ{ISCJ5GE!$!vci%}PR~(UaR& z#3i>ZOqi_-ZPV!+D`iFyUA7RqG6D%W8F?wv30;3bX#VVST^V zVV#Z#sZ_i?A&@s01S42W@hPVa^J<5=q=20&Y)D;SnOme?BBfm-rCoB<(DRA8iRk%7 z;$@y8p`K@SJPEX&`Nl~dF9XQDbCa!e!ZOTAr^8aT4eQI#hV|v=!byL-?W^r}yVr%2 z`K)bD<+HXql@DQ?kKVo|oy>t|-jhkFwx(9Lv$T|PiO#DdEzSC)rq=WqQZiy|qpY;S ztl3?-x}h*>Zy28Pk+bG}8aQh%tjjZ7hRj{et!x)+$!ux2`;hf|kA|IY{oJp&l%#w! zm+ANZb}qAFR%v8h%S##g`az^LKuc+-=ro|7&mru#he&;G))&{S`jXW1QGH2j9a&#u zo`dR3YI$oz`jUEmH>@v7l$MfjeMv*Y*pXZ@^)=*QQ?IXex6NT>0z2Ct3>G8N6IEcg z2HbW)!mj%gGy)`RWWAvCOt2BiqqLqUL{bH<%efI<`j*EBd4;4ULZqIrMOs~=#V)UU zw6wZxDTyvaL8L6jw3G*I(PdH)DRUZ9KX)VbT!9p`O4xN)!mblJBO|I3D;W*MnXtZw zC0xyXqRSvGTScRc505z}1wJNB3KJHu0>WmG18MrDoIWZy&jmBur zNXfj64NuVhB~ljG%vSo|AyPWfPG7P&pyx5IlLF1;iPU})Z~gUo;Zk8Kt`f)aj^1oX z>u@u77;G%1`JRe1v6@k39~t@nr$Ci8$Sy24d26gCK9a9IGqaM_Ql8bdUTdY*@gomC zuFR#Wo1$!3s%gUJ_UOXe-o!0JytTw4QW~hG({0~qo9!Q7kS0G^uwl}qK1{G-ZL5%X zt?3^iP^YFsS>v*wq9yxNveu#+2Ryc3c>6KQqb zXXv`m>`S61`*NtMw4T)k{-yen9cebU%^4ND)%6(+>G}+&baTc-dYZpEkLu3)vl++WdNPM+kFk&R}sW5R&5=Pgj1f6eOjPJ-1Nnbr2l-%J{aS6U* zaJm_H@>NSRZ%pz{t#fLoEKD6n~SgALCB<;{0RB!qR-Hey|WIvb;>XZFo#!G#; z-I(f=?Ka~@U)#FhNKbsij1lrTW5%?B2BId+xFybv7xA$1db~81Ri`cxea@0Pp5m)@ z+k@@&aJ+7~^EIO)>8l$Ziqq+e>&Lphb)$>x4|2LxT&6))%m<>OI+(^AyIvoq@=W^c zwUV{#&LEGGp*&2dm^@_pZAvg_DR+$*=J?7;d+_=YT{XREiE&WeQ?f<4pYn3 zoDmbo&RJ-(?+sVe^AeU0TFp*&6MHMtIjg^F!$ee7(O&zRr^Hq9@;IsMc-m^>-K#%* zXluwO`BzoZ)}&3pn!`qWLpI|_d!5a^?!%AvhHT=O(oJ4rJru7aZJfNyQ%&~8r;)6s z>`P4}xqf*_Y~rz*7lZni>B9E*myENwOQ-AGrS+|FzKI6fXYz8lOY*0E#-{r{(Lmem z#yP)TFxQ{kJg1%5%(WNVOvYxeJ8P5kf^8)3ydN>25K89Q%r(cpfb}ugn&>88zrP@j z-i*edGq3Z|o6_iGjcz{vKtIUpY{#?TP2HrzO;u%$ypq+rRJKXBG)$f3)r!$gd3th? zc)55ay;M-^HeM>&R8?6>hTNr%k5m#HpXwW3D%n(3r7IF1UA0ZWC==UAmls=oqMQ1PPosYMBQ{fKZA<25>a1rF~iSM!?$+9Mm5_>6RQm;Fk9-(toL@p=Q3G}y#N_kldxpxg0c zHZ{?uSvuZrZep`zCAGn;kpa#@W?2JAT+_!i#iv z{Gi(nP_eo8@$9QNMaAaMjm&nUyYnI&pQ5|-A{(Hhn{(oJ$xRw_PWUkqkLTiJ&IdY= zn4+6=QC}+KOmuTD>Pr)iRk95rHam{$8|5KgbW>OJor&nCo@T>UbW=~W@hZBhr`doN z-PF@;#ENd}xTd~>od?lP9h+;)*f0{^)Umn7m9Oh)U&s(|f4{<3xM_nKD-vJF?*iRe zu`g`U%{U3YxWQ(|O6UcS{4Pe?-(+6o=*?biX1s)6?qDNG6DBs@=f<{1>LoVa?_$Fuy6*Q{{QzC>e01N-3nNn( zGe-1_B>fv7eLd4g9=7#wfz0^Oy2(@bz38UyW_*Zl>R`r(eGx?+%-9g!)X9tuvmq`? zq@HGc7@PhkO>Cx~=AoXVw4WX)#!tWB7C${!+y=Y#lMW93eo60rh_9)8@_{{BzI1r0 zeSoO9va+Ul^>NtiOL^T%fNfd%d6(#77AaL^{%8(t3ic~=RANz4rHe=GiCNED7Fw-}}G+_y7J~K0nUP znKLtI&YYP!PuY#%^p}kioOG$_f|v8{nx1s4_1z=<*3WdA*ymE~`I6{*P2K9qkbL9F z3Aeg)dA@OHfm^+bUaz88G0^E%V0~UiKP&$#I^KnjccEi3(C;o_eeOb6D}NXIev7`} zqHi(KRX*QZ6upbn=)M)*x1zfSI&anR^H8^nsXj%;9CE95VN=w)BDY%E@3f)uDQR&3 zn$w2HS<=9PyaV~s$d5+;L_r1B_C|kI5aX}zTkNl%7^>8by|haI(to?zWOsTLk^tM25xlixyq3;DMW1gN*{g~o{6v;0PU z?Q$MrPL41q#b7>;2$%~^nTJH?A(43~26K=o!2X(I`)^leh z9>>1NvF~j!M5xqnSb}|)^aRjtly8H-jdG!%6%-hsVdu}Vx6s9Xis~(%Yjd2A zE2?*TrOh!BohPER(P)Y?zH+OP^f!`zM$(_q+sWS!o#FyUp|OhoR?*KY`V)E|<@Ztk z9OchZzToo$V*z8#jso=xjbjH3jAOkDjfitL6>&$Q@yoFS zLSr`dW>aq}^`=tq#31$!p3Y8OA7P?dp)nD(c`;XM;6rUdt!0LvhJ!SC8Q;)oLK-x} z4>ahr5qOF9rg0l-FlSwa(m;nf;H4LGl?J-J1QzEQej4bs7(9SJ8tC!>FwjQ>T?`O} zJ{sr}1ROW5Y~jF19yjt@JdTgN&kq&6pTT$f`J(g-&~ z58rWT5B-GA{o8PFZ{giQ_}O3sXs{?y4dGyj2QKP73-99XPpXTopHl!IsD&U-3JvT7lcn<$Rwj|#;hK~<#@}wHhnz6yR!H+{e4*4p`S3&*-_%Fae zjr?ikhxIoqcx&Y9Svuc>dM&7Tg?d-0*L`We(fyQL9rdXfk6b)*)d%Gp)sMSXB65ky z)q-CO{tD`^p#C}fJx9Oi;h%@U2KhC}=Odqwd|mi;;jc%2J@Vfp|2^`L`1E&AkJ5TJ zre0&}y+ysZsP_x?excrKpL&&=KB+35cdL)-_hb51Ir)alcdN$m8^aGCoNokwP9A<^ z_)|uf?&r2Iv>OaR82)|m?}L9Y^7kTt7yP^6 zuk~qn1nrKX-DX4bjbFxPBhfbtczl>E^ZdERy8GkZqQP7?)lsjum_wIupekj+7YGv9IT&WuznnZ zC=Vswu}6eo$5}2DRmtxUR2_wagm#_Vx9D3`FJWTm-Yw?Y90~0b+AoQhd_wOft89)B zW_{&KX!ms5)qX1-2@{|0eRaOg@#E)Txlh+SJ3FD>j%71P6v5xod*&LOqtTeN8{IR5 zRNq%3RA2U?U~~^g$LOVoUo`vBjF3Q8z+PVvQfPeqX^{H3W}y*#Hb})bE;Qy2RVr}+ zHU(8kH@nAs1t_(KbtmY@dOdJ^l=@^qzI_65#euI~>XV}`HGy-ZI%m1?s-HCT%X=3Z z%R>u|-wFfNZw(5KjVFWDM)spag+b~N`2*w+klzp*tTrIGtWL06#(r+mlZ}Ga$wufs z9Mml|9%Em9jQ#B!Z2Jv%t-v{3f&J*de!=QK#{#k6|}z{x*fWJ_7_lY z(e=o$CvVYQ#-|uxV`vrjq}MplpBv>5gyJ&;0WpJ6i*M!U+!m%kUsO;x6jo3h zZ}L~MXDX<_Lo=AaOwfov68iJ|AF=-G?dSb@57VCm$zRQ&Y&vuLHoqyqhk1@et~0uI z2cz!B=J#?fBl;n?0=d7Dj+7sP`3FArsS^s0GL9zDEb2Z@yt(s81$CCb>cDFWZwq}_ zAw6q3zekZ1tPU&;R*_4CRU&1VKoRwSFV3>msd^|#xr%~RB>ohMKW!o|+jNG#68>=b z-QahF|1SJ@;ZH)3(Zp<{54qJF#B6U6vpq?lcf-3I-jnd2gx3iFZG``B!bdjYzq$Bt zZZy9wgZ~c1e=iX~N3#}s-xO%6a$Q<9+Fup%P`rpUspyVKe+|y2qH`{%Acr~18N|^u zDBs8-p3vaT$RVx}95({haTmY$!kKuSI75T8@i_5@;I>(T>b4&E8vA+$=I|r-?q1Ng z?7h>N*G0_bBG#~=-avrvLM^_PU&uOM$lBY?I^N7W-t~G#bu_-naTLGKKJ8N3r-MTe zU#_UWObb%gST`4#tA6-SPv&z6wypJbfJWiMKy^R;PXTEM12i6o3W~6I0Xn@&ohq#N zR`_N1KWNAEtlI_5iH&-9P|y7j){`?t&JQ^=a5jI#*<4{rKKm_a_x#hweOzZ*d#FXNJ=CJX*kU(l%x?DO7|xg&&Y02do=F=$ z@tNME+wER9dgAWhhbTWpxkVF^OGM71UnHEifAN*eIi2g3>0g%6iCp7MJmzv*^y{2_ z``27oSk!MwzTNMH%W2Vj=byIU`>D%0(-y2|+6s;NoCEXO_kHuZl;=`z(bbW`YBl+F zoD=Ji_sxG!`R9~dbRl{!M9(br%R;{^=ye6Xs-RC5^x4IDcQIazMl;@M#%s}g`v)ou zk8}@I7P`QNwZRf(FA1jeF5e;J?0-`jUPQu||VQ z7m?nZQ%Ox;AFRS(3|2QU3szO(pA#AQGvIfDe>?o=$M^%K?&o~CpYx&kzR{l}O700Q z&Q-xEop zA&FSrLIN?jU@BwoMV}V_wkTIw7><4xRx=-hCurNkGqi1C5^Y;Zq+P*=st@}e9sG*= zDCY+C^Z7f}CVYJ(Yh@7_Is`j$X0E|rU!%ic*zeD=_kVfLroJRTpUZW`T;lURE?^WI z?XdYxFIG|&mQ_*<6B%nFzr)s~@Id5^g{Q9ER`}hHkV5t01)KViYw~nr-*jT%0nvbK z@OihJSj2h1i1Ypd;>riu&zlnuHYW}|Kpc30_^-@zk4&+gM^@yI<0B5*bkOEBVu)$P z5NEjOc7`)xGIEoVDYM)oQ!M9^75PVw2B}BbPiJs_FoU@N?23HjY@u64voA-(Z%=u9 zXn*qkp^dTG=ZsOXu9gQ2a@D%e3X1bL6WS~lZH{|h$~W%$!mW1G&TeUkb}!QI z@K=y026~wK4^w~bynJIVcJ4&GooH8~heD4o3-XOF%;84b-$?r_&byq2$P^;8p1#)8 zmqFVGZC|F&W$GNH{2=8$DDOdec4Aij>~%Ir9{fD``)G3?ZKl#@Ds6V6UMK3+B43Mq zTk>tm|H!psb9l|+U4>qSW}6oC6rg0LIY(+5l>2oOZ@}6x$uec24+C`1Rq}hhGT45dPxj`Nm@GAfI!r zc^j}kNi8cY!JQi-N^oaWb*Dz5ap#@jqK&*K?Um0-tE>d?vhFor%vJAdM5uR%tkaNw zip!BJ&a+>5^WQyT({c}?%S75-|9WMW`dwvZtrfj?Wc@6QP=9IKRP7qPPQ9N~*>CRt zZhn@``$Hbg zb#AV|T;o)&UT6qhD|7wjddC$;8ic$Op+eYCU*sHlk^2J$-0v>ne!&;F2CFY_rEhDG z@X(<%Px%ks7odjX*MdcBPx&w6{9VNUDQM09ws)=y*xTo{VXRUCM*L}`7QEl!{RVFc zydm&DLzmkd78<>2Yvqz$|7^~4C+B+KdAa`kR^|HtGG7JEUt z3suxT-vp|AxF5WOxxIKaQ0-v;FS37r!rAf*bMncl0;B86K=lhc2(K%9WD5GG91c<` zHAsWnt8@Kpe-)%^bKgbq<>>&^Cp_~wi9ROLN0noNstWgF_S2W7_tTf;8y48qRQhPh zefX)gUvX4qgNlE)InHqXcV;xd5&Asw!yxu^uID~?asQ6%wz@r(`Wx4ue>>z>eK}+L zV(%|FKQuli4aRYPXfWP!poo~dh?x4`<@xq|d6w`qvC`t3{FOh~`%CHL>7h!kJ^6lmqEQ?t>6q!fON|#a zjWQ|wUe@5H#qpCBM`bZN8iTo((P_6z;$CNxSA3O}?@}6E=a^}?O5l3iin|i_yOaq( z4|QL2pMtNBsO9(a`>)W~pwm-GL-VSR4rn-JbigAQMhDE;AE5NP{b_*G=l1ykWuDua z$~?C-m7Lo@{y9_q$bLMQ=W6EpovF<8J5!nGccwDW?@VQ$-{cwpky)9_JijxQd46Xq z^Zbr%V4mNZ$~?a_m7L$T?^c}Q)a6V$#hGzxzntaN<*eyJ-#yUfBz}JqzaKL^vcZ@o zHpj#CVV?1sO3wI*oJu~uD#zf2!<%1c|V8+vq@w8(+={>Ry%=S0QTpOeFVpW@glQ<JjcPmtt;U`kf_w<_6Oo^Yd^+;!$nQsfKk_Y+Z;AXZ$lrqegG+ce z{Uz54=pr#*!n%ATfi-cJycz3dDv9-$V&kRQco%+d#(bGdV!pfB=k8+v6IzEotqyya z&=x$`Y*8;z)6MrN^&)F#^97sQ%(crXVv$jt7d<#rdTQK zT}9pv=7KN5X0QkJN2Vi~vOH5s%z1fXpc>7dykbV6`Uup0mU|$t1QLs8Dl-<%R1%BE zRLk;BQ#Sdl zTVC^5-JZ`?)06oAQQxOv%youS5FalJk^-=S!_uEjnn+q;B^tCdB&5*^F`LmrdRz{7y6&Ty@E}& z*M)v2eC}5M&Or4J{YPTpJ7u$6zLw63-(hc z$bq+qGQqGR{LKMkh_%EOg_ONT*%|n+w1tliLb1U?=HLPJ4#frs&$-nSY|s)LJjNU> zVNCoqgL;fP_?9_Xh7CSo4!(uI4E_hqK@V*3AvUOk4SK-;khbbzg9O^YpZ3RL;{&g~>oQDH=_hbNZe`4BeC<+K1w04Vlr#S9fTil}qTCW3b*Zz00r_@KkGfSj?}CMMzpWT2 zT`F9_b7M_U;z!*h{FZPJPGg@-E#Z0c5=|NVtj>NPe!g10RWHbIU1DSPJ$r@V4(xs_ zDCC)AwXwwNV^7)Fr=PNa4-Ub5kh9^z{ygjBe2DAL*+ITPdEd_uvjWu*J$NsL^0z5} z0{R5>JoG$t26P5A_QgOI%Nkiqeku8=mssONm0As54XwL^^Cy{#~qB?x{Y=Jyi$yR5#8bhTG#-KSO_pzP~V2y-)n~@FS589)8y5co+V=@B^RC zRDpYmYoMXfHuynB{NQzbtqpTm5kGkSoLkMu4;tYIgYdQa%xfd&dl0@>gdZ%#57y#q zMerBGUyH9r;scB&D-=IC_F9D6+r-xA*d?xsiDiG-n5zcgXlwPucf?MM zZH|$z8Gbj{wRIRt`OU<*C-Iw;_)Q!9rVV~G6#ZJEUkZLYlr~$TU&?vLhkkv~Z}JPd z>V5j_gC3Ld(<;tDwGI6e@Y5>DZ-bwJpN>Jl0`zmC-x%ZzD087-5&C_Fens^E75dp; z%T;OUXCt46ei`VOiGCU6Gtuuy^!pM0dZAx0^xMpPVw-tStOC!iEAV`8E_2d@IdL*4 zPS(5kr{%q@L9VrO#_+d7nESh;iFw!~Be@SAnWR)MysPAgaV*zxEibD2?id@^9 zkaVH3ls!X(J!2_&mVMRxlZ0=q0Ple)d^!be-+5N1Ah~+7ifBv>m~LA>%P0DCtYf| zfW1Id_JY;y1=c?dH%(@5fP$V$k{yZB^CJs$voFij? zZ&anr-@``J`kgj3KINV_sJG^{p}`(s5A5Jx!__+eY9G(Qe_6!)8uar(&(iJuGdUYR zeU^Rf1LpcPe)l)%F6g(=tK1(Jt`^^EWf<6fQy zI!V7_P%B@XSm95g0hj^g#r4IYgHRuz-_3QwZghE^>w?GW*A)%u<71v%ea!urUK=|lYGL;U3i?lIkPhWG8rpC|ti`G?3) zPe0|FRz1l%jd&oiLlUS^GjM81Kf$y^Ar3*{Hp-^j9D-@pqqXe8uq5X}Vu!4*NzSZi z95u5lHSZ@lSQO$AWbLNx8Jk0JFf3ZjzTNM%{cWB}Yigy*_gr(@-t(-7T50l4(W@zX zHKk9X8T6S!pBc0#G-7S6GlF-A7tD@zE;!(FK0^Kx@*j}@fc%@hWBexblfXO13Gin; z7+G`1leW+qPr7U~wgx+@bcw84WtuIt$~2d)%FbX%of=8bI@|}SQzKf_$qkd7lX+KX za>Hm%XQR(-^q7r4LXV-(G4wcwK0^P(+1r0xh@(Gt`fPWI<1@y-s85z{(Og^TqCQ2{ zgyzR(+45HkEvhDTLfb zrMkPSs>&W)mA&<30Pl+h6dJGLAFtsbMGpt7q8m!`quX_z;-rt!6T9|y(*Ednf1W?C zLmv4*_Zv5r{KRi;$h3FTe~HYfDKE!Mol(7CUL|!ZEx6KE>Qs94$}>`@N#N*qquRe5 zFLf-LXVR|pTS?h7y|2udI==bc%bp!kMEUOC&#sa3(s}EB2#Z?pLs)d#Cjn|1&&_^) zvMPTgiudJsj;8Y~$Twj9?y9P?gQ3B8^(*h*32*d8u5FU+YBu*nHaull8v>z$cJ(Uv z2*y+PB4sa9mYQwn@7h4A`w8_@pC(@sTG6h);Jv*s;C&HGT4)3Au|xL{v#b5Ivx9bb z(C!Y}-OoFB!dw4IkXnzfwcFcO?F!^WdFPINg*vw76}V3|#b(!I5n0h$GAvnSQc!D)A zn9ANEvP~K#Ih%atat?kt$vJqsqvqfnn)eel`6|RA7|eSWgQwdZf+mfkwd}2rp0?lm znaioEl_tN1J#h>Bp{7=v{OzpS+gZ~K<0AtXvQ8JW9)&L9j9x+v5Jik6ziX6V#kM?u zugm!;^ik;SDz;6tp>;xRhwAKeIn%4y4yBV{U&U6i9=erx6t}YPS@hGbA&yU<4C&W* zSBRr;aPxjiABH%RriAoMnqqS#eVCmhxIozj$}VKv92a&;K076yJujX8Fa3}`G`)JY z&~)}cZ=`-=ccg|^bEH0J)A%d-zmoqedBM%(Zzg}Uq``aaneVZGzE@-qeJ`w9=zHv+ z-gvvv?s&UFHOJd4Y#JSl?2e9M)f^pH+BBjlkD@$^a=|k8D~P25c#)i#_XN&$`&N zF7~X8J-yKud$q-0ZLyW$9rEvxe@D{b)Iq!BRJCf3Q*%gz5!iDC_8fsdM_|tp*wY(N zV6P{z*Av)E5R5&8v1c&$6f~r~A>|Dz6Rc)js~J~AbZ?065sWK>aYZn$2*xDXLfIC| zwlJJ6pdQ0fRyP(+gB!@-K>h|vgH_mL74}$#Jyv0lRoKHDs~PWV#=Ba^3R+{2 z*4U#pwh;89ycgxYC=*1WUj+I^pkD;~MWCNII?;b8`tL;Ff}P}dlHVz5@E!B@9rN`a zb0x?|zijl&M!#(I%SJzM%%J}n^gn~X1vjGKjp%nHx(V7+-j?#VlnJu%e+_&)OT!kL zh3^acY_~i5WD@hi55(^~Lpwtc;Qt5k|J(3?4Sf4H0se1c4E{d`|6h&2ug2fcLeD}| zp{dXU=DUFTj%H4ynbRkj?J26Pt5mE%(Y+=_MC)0Ct=S?*mDy0 z^u}cDHyQg)#&!aA$nH?pt2q?cyMo4)H>SKXWrB~e=SSG{BkcJR_WTHYdSg5G+K#=p zV=F-~?AZ%@_L8{Au*t0A4?20|RV$ZJF(;I!TS0C)v2U`g~CI2b;PbCe0!9Rb& zKYzhD1#_|IT}la~?D;tMd;|Nvf&H#P zuRte5CqtKTKVwN>yIR6M4M8W~4d|3$SDhM>2KN(t-%sp)gqZsXF?Y%Jf_0r>T_2d& z1s#bAIuaAS&UM7=oU0F$f0+Dc@|(%8>ysK-b?PNU=#{uRfirhsGK6mbaCu;d-OB_2 z)L}*7uU%IJ7Lku7zm$C4hgSsN-8VIGEA@n~ik}l0G5wMubQISqqqsg9#dV6%OYu*vz9?o2z%Lt~|ux*-790grIo$F}yza z9J%(c$C&CdmU@gy=+j(JKFxeT&Abb}z%}p%u74*{e-d>q`7w-X3}YI@cm(wrk6;XA z8pC)5zHOf8I{ZA>;pe#q7d(9?#34A(b@+L%Lj}J5>i#`CcX;bBvNRhE@9d?hGyn~=I;dE?CbbC8>P9obXJ7L6OelD>+Ni@TmII!OPXeoqXw zs}nr`Z_4#iQ?8Fr@J^THU*}z}*V%K*qi1HE@;_6#DE-Wy0ClGFb17$LWTu>{Txe*_ z$RZE+xJ}GZ70*<*jXyJ^syfpR*>1D0BWoZVHvY^WWM?3|2b9xpn`ck?x2aN;-ezxr zYE$L8ls3<1rnITT_jv`+W|0Sb-6oz@728y?jc@a8Rn_JZWFMJz9a#g}u<>p7BKs_| zdqFwv4wrGb9C1?Z@Oj=Rxu zH#+V{$KB{yzN~?4*!bP(xEmdJqhoopmzb+d%+)34>JoExiMhIjO*NROO9JLfgSooI zTwP+WE-_b^n5*(-4P?W{Ut+Eu2mJ6a$YUvjMCuU$fcZJ zR=yScv|tbKLhk8v+FqA;A?wDUw%3Fw_!a)I@CU;m4F6O18bRZRCVHW7FUDt5GreL- zZs3X~r~FqeQ59D#F+i?=pLw}~z(Y^z^xjpufxzFSr*!)D1-VM^10Gt$y=lD{TU7RD z*^|MAOcii}`-2zwu0|MV=Y{RrDPg>O8Akjfd4X^JGweTS*l+G)|GA6(=M3**O8yf3 zOVAGRJHWpLU-G{Cd_Ua?_?~-^-*cAk;OzMRGpwN~c_)*|Ms)rj985b%y@twwq2wTK04eKk9!R--AvD$7c#wHkcoHX(Hz<&87z z&p~eHb!1N=TQsiLYWn&LxrFP9>gYRQ23jROX%e4;&R482D#fD71#qjnx9x z#M=W^v&Ffp8Skqe=XvyT?zw$SjM9iReLT1sj0RVU4X+Xtb|(huP7KnW_(O1&=KzB4 z#30>?KLl5aA4RqgaZMlA!d_yKy~H4Ui9ZB=h(82-i9z-fe+c>z`-tob;*JxO?)SI=@A0-L4(W9(-zg`?lGwT=#!k{PwiRnn=RL~lvxrCe z4us(S?N!zL+v|tD-?5&F?a!sWF9?t_H)(guB;L84G{35v#P=lxgLhU{gS!NT4W8y_ zV&`)yg9SlS<|ge<*~I&qo7PoTo0=9Hf{UY)ofp4xIWPV>R?`tZlbs`YCNY9%6GDHW z{0GW^pj>D=<>~O#DHpn>d$My2_vW_DjMcPpuViOq-r>5Oo94V6QLpCZ+!j|Zt5*Fk zNA#(9*&XV*%y+Ub=Vqr|c604BCsxZ|7?$k3aMI;G^ii7g&{KXj4?R`w%Au#K_dC?7 zPsKw|g*py>BxTttho0h|-OI6Bc4d!b=Stp*TsbRN(+i2T!F!Mw)=>`qjPlPY|BP~> z!^R{#hh2cKkJa=|#`PxSd6RJo9Ul`}WBhVkC^S&0|JF38|AX~v`fqJ<#ow=0KmP~& zRP=u`)Zx$f;QY5{r}#fI3NllWnS#s|WTqf91(_+xOhLwfC+$r0tLZ=nifb@qx3bk4Db zLTd)tamu={LVD71#qFIbyjuqL53gw{R~sA}_!%Ax~_^9B;< z)r-tG>gBmp>g5#}^|-&T(@?9t2Yc{air;2o%|FMQe`8v{@dkeQMi4u- zJk_kwuxR4&$Qp@DY@s_xM%LIlGRwB}_E6GbEj$p{ZdApJeIsjD>^rAo+{97+D{`N? z;^AONSLVMf^WK&D?^-p~VWw>+zlT?Te;P7gd6B2yn!d@-HLTY)^I}Qo)Lv6#Q~xzJ zLLDa0=FsJR{c5hMQN7=qzFz#pX~@@u->To5d0u>JhxQ(bN_IZLySoqM#DcB0AILe> z|ABg;4wGkd=<=K#_?!Aa5aq?sPeb0X<^ws^`#q53#g}$yFB1Qc#P1{V|H!7H4l~XD z8sR5;<RHccC-IK*B(Har9lWFLz&C%xp1)z2->}bb*wakg9GZ_ki}Tp? zH|$xQ7kTP+el*$HnRC4Jf>_cywL90^)W37BP>0FmzMYgm>Q}RKt?K{6E@K?aw$_eZcBp@(gYjt|pT-W${Axxn ztKKhinHOK$q3)~f7q7Bkyvly@YO_#>nYKAJe~efDLK-q&d6B1{WzTHvneAoIJF(}T z*fXx(Hx-Sj$eKo!`0Y3SjT~F3kss`cYgZ{~;-ir@Cq63tN&_Y?u!Tijk)vc_-gZK2gDd)dnVw3U4+jy*Dt{ctIJ z|5DBX5VVxNe<^2x;4=H=W%f-Fbea9=GW(`rQg*0g(nEbJf}ly<3!EhPRY4TzK@{f# z2#Vr7h~iujEa;uu1x#Dedrr`T-me8M_#`+~a5*-$3z&8}c23ac*w=zC7s3Ols^UV!GWiq-t8 zHEh9ES>G}KVZkx96GL1gG=q8>)H_YgahiIU3|sJ}Q!eM3ibX+Z*w4?f*9(0G`@DjE zK+r4L=M`)ts6QptQNMkkiXf;y_etw>k5sS>`z*sgAZQu(S%z%{iP$F*`+%TC?30LX z1dsC_g2!2hJNOR44)k2lcL>(AZ!G6K1j||fg798*0xNI5WC)!wJTeLLZ7s4Xi!wk|Fd2?=76*Oo%wiwHa~Rjo$>Q8+o7oXYxOjuZs`V#RuwgMhPuo zuPIbu&pSdpjnd_ti+AW~nOT;Xfh*d5Tvj{za9tY6l z06GXZ)9z;4?M{2$X{S5w2~F#fRU?hH^+Dg2H9p{6nH?2ZeKzZKT4Gl9Y3ppEA*`Jc z)=mg(Cxo>V!rBR8?S!y)LRdQ?teqUzP7Z4)hqaT#+R0(<59l9RBVeH|v|CbSjcyxkq2IE` zzh#ZLCf}NTRm!VU{sa6U;BRNV+Zpe6#wys$Ub~mQb1!?S&{ef;1*_me53PcC%wa1y z#=Z$Xbd3G*JNEwX@SX42%Y`;Wwi)%VZclSw?O3no)$J{=Tn%W|?`p?B6|ZK7I<9gZ zca>|ntK9c)MtdTgO&pX>9F$EQluaCzO&pX>9F$EQluaCzO&rAE4|1lkcc!q13XNpm zBAKs9=1u4n`kg{s;dyD!@W^^K!}D5P3AeTC7arNCVt7`lBRo&avQxsdh|TC%WcSkV zUi#fjzkBI-Fa7SN-@Wv^mwxxs?`=uR&f6~XTzx~Vrf)LdHyP)fj8|x3zhq}2G5=x4 zdzkSaX1s?P?_tJ!nDHKFyoVX@Va8iHKUT}e(r+yN#?o&r{l?O7Ed9pPZ!G=B(r-=1 zTa$6tWV}M#Fy1zdvkl`Fn!1RnZ8rjW@VHF}OAn4B`IK5bhrhp^YKb7qp{p zJLQIY%(cZy)!aAYWN$o|!k{cBWKRW&MdR*D9D!zlIz zLBsyYP;M_MFzoDazw8W9zf8+a(O`f0g}p&wl{dpC&9F%`#?p*&2<+I-j_vG>-_F~-V@)p>x z1-2Q7?Z#m{4Qw|Ky9rugJ1M^z+ue-qf_DX|;Oxv4jbO<$Mqrgk3{Q4O@cS=UxyHQ8 zHRe^WFHNvtab{lS8uKdWr66Jn@9QCZC+qS~*65wAO~Dn?SA@bBI+bgzsa#u4R8M*;9_Eh#ayomHlwG=hO;k)voD3SFNL!&g|jb(voD3SFNL!& zg|jcUXJ2T~zR;e1p*{OTd-jF)>e zsGYOc&KYaxtQC6WgDX?RJLjeT)FCgmL6^MLg&|pf3%QnE$hGVq`rbpId+4*tZCQRz zxChXL`voU?U-BgHQl8|UNqf~SKRf%io&9+JP0Y2C?~`rB z&Kt4wVXl`C)AnKd5E@<0795RyBfp}cM$A(*ZAgCN9k$?!$WNrM&{gDDX`1CHv>9hb zGdUCKTj&9N-~jaw(7({1nctt0`I~m^nDiTnOE%ih_1VM279mB~8q_(~S?SP=}TG zmK`5h6g0k5WY~Dt*?892_@`o>;{!t-R(?AAO-G-O=+_bbrlX(akD%8P^g6>FpJA>w z^-AA`Ed^_^;~MNKv?KQHh&?-E&yLu$BlhfwJ*~XA>~`$99eWDR!=8EAGY@;_Vb475 znTI{CytnL2==&1-4M5)k==&1-O8x-$Jb?Yqq4zoTK7c(Xe*_yI!Je9WrB~vIEAhXT z_@U4ri2;7Zwg%q<5 z=4NkmbN|fE{^sU7z|G#~#^!FW0o>SqC~HS#U&L=;#9v>;Z-pKwK0Horc$`>K=pM@V zz~4i;(7x<_eOb4CS-(R2vwr)tZu_%-g(f|iN63@6aHDtnvcd&BPyt;T*>jdps&m;4dze?2h=D0U|YH#+`-t4Kp*;9M7r}k#vd$XtZW=~zj zp1O!VbrE~2(CJg`YI=o2qtY|H-&u+8m~=dCf0i~^Pd{y6{V~^JlrN|Jo0j}WDKZXZ z+O{e*RwB0&Is0F^*NW^GWQUNR6>nD>4GWD~0>0a#;mK!lZ(8FldnebXO}HjqTPZAf z?QWOzBij6kHjmKW5!zeEweK>nh0pPQyK_bK({E+y7T#eI4B$PL0lb?cKrYB57bJ2E z(0Ktm*F@i%=(~t^7t!wPwEH^kZlHVv){rVC@*h}=fx9E@W(vb&K@B7K@M zX)uP<0>-4_$-m5)G#Jw@oEI;2U732AcTW27&dDs^ISJvNlPl!MlHW+aIr)9aJ@Rl~ z>N~tg^bYS2-Oan@ck^EPJG@&Yc|YDG_v77=x4Y)q-hM{%Mb#ufl;=M~dDb$NXEU1e z>_zB#bUcqf=T~_sI!Zo}cP}*Xp+JFGehqqSp!XVqSN>JrLFvacjegkwFYS{+g_?ii z-6%mno@WUD@*!p10}%A%nTE)|%sVkJGY8?^UkK-!MmWzi1TXX520=LU9M1C$!OOfi zBeKu)9OHSeXEe3axz=q|qdp?^EiL!)_y zrhhxnL$@-nt?0V7kB6eW-W2WY)uP_1(&_&RqxnO} zKP>QHZ-F3IjNo7YbW@oD0^b&d@sGf_-tXmo+x$KGKg$1iTfpys_wfH1{eQ#)zFubk zkM#OKrxR<}{-2}q{|-&9d9kSYu{ZvASmBRR{$YVXEbxZ~N?X7>(|u`asee4@+m4n0 z$6NSA^8bGeSY8pJ0)he>&#TPCCN-~y{BM2ADlyM*`QN(gHq%y}-=nmN$UM76e@}k9 zLNonO`ZH^lN0q9=paf-ogq<0{uSI#NQr&3Jn{#c6B~czNtDpZ`p6F~%a+}IX~}rKH5%x5BZ9(_lv!eWp4oOuE$`GnE+@Vx z-eP&7blQ?Hf9EhIthHk0{vA7+)pu$7pXxp#P|V? zS*uN7YPID{edVmYC0Cwq-u!<>*6PQXT5Xk3 zZ`+oaMw{#YlhzKiX5`#8iqBzRPTBkAK)++fd*1cZ-Sl;(>F3_*l9Bv-QrFYL>Oo}6 zinY9BI`6bq?>|F*=Sq6@oeN)%Z=K)E`;Me6Ym~^8y{DATEvaRu%9bjd``=ktPDb;z zC`pMvCAoi3O4_VaL3UE}f62?IES_0Xc3D~SzPx{4z^?l=S=Gl`0Ot{#z8d z*I%gtU@v$83DY~pUqWKU=}C>d%#$57@PoIKq27I-KZzP zdGH+Q2{OSBFdy(|chn?s089W!!70!UoCR+19q0?DfnHz)$O0+gW$+}HI}Yk$$`RmI za24ELfxjsS-UR1Cdt_b!--CN7p8|G)Eb=Eo80kc?3H%7|rOsq9iL%eYt?**OE8sF{ zP1!gwnzHTS26(-}VsHvLD2oNjl&&U^kFY zAdpM1P*4Zl1{#1SfImT`c7PCM`+*lh0SKeb1h5Kx0i4KWgVW%4%2UAG;8zeq`4q4j zbf$bM_yI&v_B7znswq2Vy}>;2G2kmn>UEF~_JHch3<5deENFsEB6tOy27Kj4%>rjZ zbIQko_dx(Ps4wwKAf;z|y2dlsZ&>Wdm zun~L(_=!F>A6x`&D1QQM1C=Q224;dh(1-GMz(!d&Fc%yLw@@|=ECokE95Nq(5PUEc zd+X;CDgLWIUy`9w3HKDM#*c9K#SD>9F_7@K_e zq^5@OB;BH|nMsK?f*((hij!PQdP!PjT6@X2^rR;lk3S~sMpIgw1Y*XGO^tTX;@g8xQ~j7N=!@*#x|1SLy@`(U`ACN+zjM{Ig~ttt^;HZ+yZU@@~Hyk zM5zd>0r}hrYJs|-Dv*e}5|FhdYf3&fK?tCGWdk)p5U37hy~!s+|8MHmh_vV>?TO7r zKhaCt6y4-2WnxsRFM5jKi(U~xbd>%@zx#psq>Mq7spdfRGQfkN zArO6KTr&1>AbPd|4*(g*eL!p`<82A<0CxhhLl}@Tw*#$#j8$yi0yG1%uiOp9jkz88pMEDAZsQb^alfg>>-1|U@!y>1;aoO z&=d3my+I$)7xV*<0?|qIl(C3CWKLu*#O`8C8L!w+#v|hv8_HN@jA9S5v5Z}8Ci5e9 zmNAP>#GW#TGB;vJnJ2Mp1Q7eT2mV0jSLRjbOy);yDt3~25!;Dhh;79OWKLvGW$wk+ zGH)_J;xjV;;ukVkGOywX;#)FT;!g$;e-YcuJj*;JfMoES=QDzIidTLl=~S=$DAH+O z`O&1)z4D2q$9m<*kk0VRCy^fKmG3~hBZvf@1RXl`j!Q^aoqB4b9!eUk6CFK_huXIG z&JQ0ya_orV=^4?(L1RPBg4)UNhV~@?bGw_s z4WKfpqMnCehw?+vRLaMr)7AQZ>S_ll`2NNal@F?6*O7*wS_S?dsY6Ep=cy-oSXjUk8rz z8--l0K~+J#U#&r-{HEZa&w@3e5L^U-Y)G4+AAn=PPW~p)65I)L7oh^H{>~}vOCXQ_Qn{$?fqqSxPZRNeO_8G*3AeIZmG2l=qr7;_xUE9kgP$0upa*h-#(!R*u`@KHg5WB9 z(#46xDr^OpkQ3aGoS^ZapVyd2T98J+f^*0S;^7IFRVQv?e1h-s(finYE;w8&B+R8c zum^R4PJ;frnYiU$jswudgBQb2DZzuZ`wHz$qCLTE=G;OSdBH+>-eA?}k6er02_z00 z3D$tqpcXv&bOZCiK2QOk#8W-NBJc?agvXTY82v@)XP^>1iKhmDrQkS_dlW2R9qX@v zo&=%r7`Ac&>Hj_;I%EOSV+#;n{t84NjvxJL%KxI*1R%P-4Me|lKyacY|2q1_p=#iC`Ld1$+pKz*SHma75}@d^&Uk$ODaO^9N`{ zz*5q&_e|((U?(^Weg?OLhd>6H1Kt21gFC4I8&vGE2FSQ&tTH~ay^KS2?+X=MR|ZRg z*mnq2>{)jLT3d$?3YE3@#or7*L|nIFJyh^$$ZfEpDs~RgD^x5-c=^>v97c zA2TK;9gfgg50&e6$;VNkDFvER5S2!NPras5256d+L?h<^*feZpOiWYZTPcx~Qcr=@ z^U4`v6N=@{RI$93HRU}yBP>29#z_a#juAFdXmpgm-JqhAEE=a1(I!#R2_Al;ho9h~ zi5`AJhAEFBB@rDHW9DPhO^R_fKPgVP6g|#UKF(7f#b~8ol11apgy`l;Mkjd661~b2 zywZuD@`Q|%y69tN(8)@oUwTOz-K|6i zo+eA>GfFcUV|+BqmzF8bFlEZ9mxKYlDr&jXtoZ1R(hMylhV!k5r~5F`%a?_&8P_o5 zqmxV36dRRTNXD1slh5$UC-~&Ckyis^AFr&|!J9$3k34qqst}J8d1a-3DMoxWL-!J( zyc9#*p>#PmC}GI>OLDTFOmmnkR!$Fi9yck?^O)C*DgG{vd9hf3W$>(7@Dh|^_*S-D z%S(**JU;5hWU8-W>NVq|j9xpwJiUfWGtIT+!;>{tnx&#;ZTa$i7^eG_C`;R=YM9gO zRj$3kD{szLF~c+6C3)>$#aV41?@n*<;vY&Xf#4e@sViRwM_5t=6UmiB`W{EvY z`TQgFJJQFnZ8ua@c5Ef-t;Al;qfIaEB2D);N9GrcFwVYM$7- ztomZ>viR8An{Bp>Nxhkt4%*hG3$(o0x-`4Q)@AXvtxFf^cC@X_;ESzGvrBAU20yh7 zzfLW~uTzt-i4L(n3&1nZR3EnJmg?gLnpcirr{ccApWMYdW zwbJ7FW2Nk5?<<`FOR-^KoC1^JNL|n)!!!jYo@|Z)K75 z-Hb)f*I{*gK1_Jmc(Cw%qXBr=crLtaycXUy9xJ>u+Vk;L%YLN|t@|kLIv)F7Bj@9# zy1i?5U)sCIL*e;&r>-mZE7gg}`R=&zuJKCYF<#%g*LWjx*F+BRuJJGc`-XdZ z>0^`A;?&q;i~1RN7!4}047_+1)Y#&Ez^lBNC*{S9zgWPFXSR2(h#FhWqmANOC~m@w zM|sJN6bpFq%sxtHs6?iOXUY`MSg{Cg7aN^Ei)XJ`z>DW;qhvNMA#^FOC_-aPX4ew& z=9%r3%(j`| zN;758j!i5}t~Aq>E2A#{?%j!&E6p;;kx`ndT=7?n`!o2isfmKMc`Fg%O!hr zJTYw<_TzfMtENY;4EquHmmyc0Dg6@Lmmyc0Y08yRw+#Dn4N^vXrJ1_D(x%dONiD;E zsijTjkt@yA?Ui9aVxenvA@01ESBCwFFUycC&6IwLYs!!-%{1l8s9T2ph`-8cuQXG) zSBCx4%dnrs#NG|+y4?QuW@?^~{TNy)>ya;IIr62fM!wVlrMwh_JqBJW!-z~NhDWB1 zdOnuYZIv2>v{i~>w&i0bUC+l#re0;#^|6wcD`h1v!4-$Rif`2tv<7X#-73Dd6ToW? zuQj~Z@Y=v@1FsFdHt^cOYYVS!OO@c{8D0y}60`z5s7z=B+Jd`yR!aFu%12T@lJb$1 zkEDDgWr>s}QkFZC7M4n>Gv&wX>BUHsmd-S6z{b))*aj^+;ust-< zqVky3lTOl5lBAX;dnkFTWWmZbkW?hV!JQmH0`7JHJrZFQ%r;m}*Q7+GEJv1)0gcp}8 zBQwhr#+L9C#+L9CwS`QX#JCcEk{3V8i?7E)e_Dswk>hl~P|c4WlaWXwK6Y&KNa+`9 z^2hQt+RV$O#m7xZk>SUWz=>!;Qv~8i@RF#`LnS|QY_e`2s@s9;b`s42#|ss?#EkS2 z|wedRM&&1F=ygPQkKRIm*O-x1d7wKrVn~#wBPA;x)(pz^iNa9E6wIw z(vIo79vQE8y!f%EUz;*s>2$Aj>|-8%Jv7~<+6I{V?(~caGOO{D&PXgt>y(F2ItriF zGLp_P)24kg5@S7$NO`QM5lL&i$s(8e5Gr;~GM!daosUbBBrLGepnPg9Q!Xe{~)$;~5_0r3a`WexhKf#>1c%jnX1kXG|^>`+D+Jl<%o^d`F zmzFGg2t|TEy&(Qoaabu#R@ElprqM}bU(>y`j}I=jZUP*j!Ykmyr%5uDak2v45~D2 z&pR?wq|&3tX`f7Iuainb#b)VoV@HTBIhw}CCXd(WM*7I8F}jZDdpt6ox(-zH^*42> zlV*O;Cw;6Qrb-{13MMGbul0gze-$c>vAAta%-C_#hM9(HVGqw!F~g&yP-!5;)4$LO zXe(6O$VeJFCIxCL8;jb;T7#7msTwTtVwPA}TF5H6u#412t}B zx&LZBmgjQ@oGWvqj6hJ?(bxU6K|VD`%jCeB{N3n}A$6*9L!l-@tq) z?m=uJ@3w`2THpo{4nhIpq{@~5pJ=G%epV97O(sA%!qV9HR9&N57efMp4b&hl+zVy-_o!%vVlf0<- z$LDt~FzXh(+KK)Dc5k%W>-1N*@qLZqby5Px*NF%n1dXfX4wytb5t<221I=y?P$BJk z50=Gp(LP)K1h(LM?||>^OI3wx|A)Od0k5jM{{GK7_wGytav^~*8)sAkVU9?DhDiY# zBA}q65|TioH-kw)P}HcXRH@=vtP?6$RH|5Us#a;M6_vKsTD2`zoNb-3TBWV6@8`Ss zImt~3Vt;-A@9+OS?|bsBeD_{^t+m%4&p!JMcdeJ(Kkf}hZWJl=NN!UfZKN?NP$FfIkD* zL*2U7rw!^>(mNaM)vY;g1mS(bZ-E1#Kf?EOOM3qndRzArX!nxd z-v!roZ%*4xcmucvd;t1Wullsi-X*=?06*!~oYs}_-@)Es0FCKdpEj>+N$(Nhgs#nL z6@;gOHQ;g3diYzxwc!2me@pl=@HOx$s0_R3^eX9n5?F=Z6@=@-Rp6P>zWMcOWAjUT z_XZEiZ%&&`cqlj%91Wd8xvr!fr-92T*Y$)i0B-{?g=%x_(=Mm1i@+th&1u&WJ`cPF zyaeh_y^f$ByMXSeg*II8dG?jd-$8)U>jm=%{l|uKC=bg}`+_m0ZXejv>BX=8p?0RT1ee7cBldilI z0i8o1y8`NaG|vq~pY@;*=CAe^(8o@M{?TKt*D0U8urm=mFH^=}LU&Wf$Do7As~_}3 z%J>v?7;+a=&U2s>uyq>rTgtcvit4)|-dSU#2iOLcd1lRp@Zihe9tS^EUKMV(p<9=B+(6 z*}ue_+$ZjRr~gLp;ocj)8&Bm~c<6@yo4rr+HuH|jHg6&{y-$fZxNnI!5BgT$9fzI< zHNbxw{z!P`&_wZ|vOYV!9ni}BopuhnyYCKfS>86U9^S{$uXDF~SCe*XzY_0g@|@6p zoA<|j&p(UsumQ%qxh~E7A!$FX>*mjg_bB=W$avToNBRrMHo-rT@L&3G^Hz1=>3t8K z++N$fP4%tQR#l%JhrSCPHXz-5EWf~4%Ki9v2X@Yc|2ecBU)GYAB){p{JRcg3{Oxra z-gEFC#)pOQHlud{dXq`Jl6dC9CQIRjXckSenA=kcOO#{JSR_oSPE_h z58(WHcOLKC^y57w&WZ05{)qGD6~zAv-Nal!woj3N9_`mHvE$HxfxW>kw9QcPJ?iQn zY>Bq^cO;)po~!qH-Aio!kLl%5X@`bse+|>FPDy*BcifvdiTAN4u%7C*P2ye}{moC5 zjGmlVGP+Z*lF`?J&-5-C{Svqp+zt+-K7ZQ3WOPn<(x|sv(dnhW!*QwVl>Yh@bPE37 z55=Kt;lGXCnb7m3Jpr!|bR+!zDBH`}Il9|6??lqFh?kIdJ86d!euK18*uS2>aRd5E z_}xi=nSS*>`uIY^Gw^8+GH=pvd(kg0ApSJ`Mc@nYmq7m{%{s}y0%Wct?F(!_ip~C{ z>&Qe$#~tv#B%e3PqY>Wm$R3YOPxQV`czTa*-r`W_KzM85>F7j#FX&$CkLy+H7ZLsk z;p+)MPrWI973DM7Js+EIKy%RjE%6JlAPsuv7T)uKu0}2!{z_y%h87{Wo%rG4onc-N z<9mP)>F}0Aw-R5Eo@ifWMZ2i8zf*>ru;C+fCbSd2NXLB2c^~rEf&<`PK>7g2)4|NO zGoeeE=Z;Jy#vRosF(nE97@U#d{VV$OIp7Q6h2ZbNKjjyBJ9}`=(zD3>vOn*;(7#`z zUw@bIiRj*n?iFC~zKJQ%um=5r^v~%3_Y%Gixqm^(&O&vL=^M2g-k*nbn7zhS-E4Lu#*@=7gMhvQ*#%6J$R zrUgCR_*t8h){Q*p;@cP4Is*TGNnSro$?H|}`ZIa`j`SX3UOhv5bzxpJQtW*zOv?!K z`hH4UK6#x?USDGCNb=fIl;xF-j(I2LC1#2)g=``CcT-AOydN@V@1w6RfL{&Ohv}{C`d^LTpF^*a-&y%wdy?mmDBoJ-jzi|#$o?e1)cZN%yO4Vi zojZx2MR+^36}^Xht@a)y-+OzOjGl?_pTHY}o!JS-DE;f(U@raZO{kXi(X2^lAb%SSgI{wy3)vwH~t)B5sV ze#>Y1D_;vB`KY77{{MgbLTaah&?smyWS|0QEOZd0{-?hBKeaEU&EndsO0PCqWGyJB~fbg420}I^OiIT~x29iCY#$hs9U8Qj&R6fFx zpA7lrYWYdauUX_JYbzGWNV2vv#6=5Te37$V+d$e5vq#_)kQ$tb%Y_`b{*o&Jh}#Sx>rli zb=a-LZXI?ZYgadJ>O#Aq)d#KJy3j5d@}1o}?AD=Pht2wh6(ZZW8>$-@s51$x2nh9_ za06{7 z&K734KNIpo`wgxiG&Ysn`UP$OXl$wov6A~Ym0@^cC_}jzcWR)ndGqyhb?nUIFzT{R^EZNBgbXOVzmWl z2Zps_(Iu3lZ{-|>epH5jfXc?{g`pNdq=r4mmK_O}JFs@-wEVDGZNd4W@~ZtTx&-N4 zIR~L1PG4&o>xa$HR+Q5x!R0S|${2Lu@|WMrMo2btn0#%><&VGkZ_|~(tRGQ1U|4;N z%3t=BG3dbR+xAm7t{wzo{wlvqP(9dmojwegzc7?j{!U-u@|Qo# z#`$kMGkRng`ft%ju@lNU@OZb@&MBuYvhHa7*nq~B0zw{uu;WT`ji(4DupnH^Dlcr{ zfz`1AYXgq6VaIHP60D67rGlQ#H`FB-=~%QUENoaik&K*%XMMN!h}*G_GVeZzWwKQy zIUA543J4nrNMCX3MJRyDEJZ+KdjIyU*Y3&hk_FO-;HVImrA%<$6I*x3F zWv~HfBl5}BkrJGKSSA{-8LFY?_6oxQ5ao=wvvA)n-%wc*fmWDCX6pRkNbhW_A-d-ex?uBx2A z0@eotPQT9S6Sw(?D3t}MpSH-ROU=qFVD$)q_L(8+iEr~IAe6j~gA2;ES_2z+@H0de zi1VghqYHI1FJ}z_S=sW6g*H%Lu_(l(Wt3MWEu)+@2IOQbL)pquwlb8h3}qKOqvZ>o zLOH8}@`dh1u2n?&qA-0?n9j1HJY1`QE)P&Xb5<4EwFwyU>|%x1AkD1@%NK?Axe_g} zVqGC!)XJR-=+-yotZB+w)08Kjlc041v=X4pE9o3&9?~3U9R@>Lx6UDMg@tn3B2I%g zSD|tRU70{zrlfPWJZUeRNp}u|VLA?4d+yqbIS-kzoJqI7E@z!oUf*ahIyH-wlU&Zl zMR_CqaLrV{G;F7(VLL4i+i7XoPFl;-2O#U8EmaQJE0OlmmcO< z$--@6wKHb1jQzA~*TxoY+zIoTA>EVoq^{gpFLRkt9j?1UTYga01>ao@f+63n;j8Ow zykuFK$gWL6D=!oySyn3w(->D}x>6M1)n$^8KSLpJnU`F^JV8|?!$FY@J3LEXTi$T# zOS)uSIg;TuyHMpwR=7GzRz&HpoJlT!l9jG*EK+vOgvrWKPH1_^aalwQiERDS{X$#_ z&-Kr+%PDRPLsuj0#)-P0EoV4h)Qn-*^XO_&H3Tml8%f4Qk~KbL>lbwT$whWv;S$m^ zK(|*CX0Et02SYuVvEq(TUdoU|`9FM?xjgSZEwH9^#f^%QtwBCOiq63>^v`1`UJ?A?>pchPcG~+Uqdw z73*3|`^h>^a$vtHrl9t5b)4eBe&<=i`C*(UwMTFM_kpyxtZ$gH1@CDay#U%5+7H?v zIsiHlItV%#8UP&v1yBsq{;Pq~pmZn$%7n6@PEa<~8Pa}Z4wQhpLfxQTC_3^8bdMsS z_9pXF;ynrXN{P>OpE2h7K^G_p(|k~6lYi1z8Fk24PTf$c~n*%@>k_jc~u80gUYXRseC$0?0>f(t@@K+vZs2KfAUK<<(vAG>Q?&l zQ`g?|s|1pd%3uDeuc_au4C+TJkMbS{$#3cjGrOZfj| z&jS{amyZ9}PXGUIC)Y~f?Em%Z)ZuXz;rpPAQg}*wj)5UMn~r}4v@`Pn?^ne2twK>( zI`Y`80`GTIxJM3cWFUwhMYao=O25x2@I((~I_iP0XixluNjtb91IqS_$z#T4jxJ+x zRUX6h&e%_-Q;|73QR)rLi2JLN*$C~zkGnJ3AIsu9mc)M#y$|nAbT7xotLx``zrJg} z_g%uZ_*O`GGHIgunT|#i7M&2%t>_mc|01?tM86T-0-Xn42i*=0$IeU7dfp1knvJ|@ z7rfh{A3^VY>Ur<*!yfMpgJwaEzBl{sX79ASD!lheFOn>6Q9zy%O(HBR4e2%Vc{Z^z zxHNOK*C}hWH#%bnwAu5?t36H5+Tn?wn8Gsz(Bw;fOEWVH;BR(xH$3Pmc%oymKP1cR zGz|QL39mai<2m2kihc`XX9hG6`5EY1!rlz%;7l)}Z;wBLj{~!~H=XJA>7C{E=}FzC zQ!k>|v87wZpQ8UM@dm;tURGe~!K*jM$KO!kolX2A$iHj8w}$k6v*>e&IMVOHWRupD zGVi*ypoR7&9>8DwzV9tXW`DvTkp41Q4?PbZip)9KNu|y3??LA)`njkN`H3n+s-X_X zQpeMvQwfiQ(x3yN6R8JLWd{CJ7nZ1p6QL(F_}(g1NZB{vHQ(z7ZGoQ5F7RIK6L=rN ze-nTHa0uUb#?~3+XR;2kq;aTo07>@TFc3tI^Fbbic& z1{Uze0u~+v3+e^l$)n5b2Nnn`LQFQec)dO&3cu0v2NqP+OMYNMV~FJzPsX73l}{es z;GI;+UOC@G(3PPu$=0gyOc+hPxKY!f>&mhQlD#v_s+%eoh6uyP7xVpTw!lf|dW5+J zY9!FeC1@qw$_nEAJH4{rJH4@9??3X668pQ2;T{DqTP;YIS^WjrTF^jz8GGlIOSon` zU2f^xr_$O_Hq?i4dx2ZtP_E^fS5eMKS%sCgO=RKm8e3CCCD$v}OB!vuKKMXF1uv>7 zUMYFXVG&Mc#bU3Lf>n}S$#-EZYb&a2CFjg}6i>wx%DlI*q0;VQ2xXA$D5XV(r7EQ6 zELvZQ+Y2|aNlxkhfzqpM-TNHXwe@GZxT~1c!!n&7I&pef2GHsydCx`i$?)YYy_n|W z_Vp+DWpY($?ItVEBwW2jLdo#mmL%`OSeeGf+?OmbmtR83)SOml>!9ZJ8v7n<-D2BC zJkUA{T1P?aPkm+O>9W;Osk@LCtv|SH>x`~ct~*4as9Z9jEdxkv+qkVe;`NP42xS~> z(A^`@)+N}aLV;2PUH@xrT&CNM^jEzBCM@>EQbTO~Yi450oC6a*=N_0)S`EMUQO;L> z>81P);`_$z>%bph*D!HubyH~r+y9M}gek}?IXgGO6yU7|8Q$J5P~)<6d2T|+)_@wz zJ3+1QO3%qn_UW%3H>- znpE=@4Rai^PMn`17xW#^?!7~QvE+&U6YnL%k zO;-|L2)_X=2FLx7psK1p_mj#g8qAV}p* zw6>4zs4jUg9IYn&D1hX(t3$O#BOI5mo}@NXk8mxM>fccAgGT;+^LjlM>#nX;D}!yl zs^%1vN0E0O|+>nKDRanyOAbV;n%?-*;yfSD3eMEDr(hK-fgl0fD*J)N&OF9kLl7p<*>iJP! zw9eB}9ccV~{XFW%y!wgCqMY3fThl?GiSik?m-<>viDYdWyK$4*ty5pGqr~DZwDU?0 zA=Q$r56zjojeLvw@1%@S*&)t1sVK0E5?|6kNMS4-rkT2hOty*27| zeye8ICAIcpat9rhm<#pUvtZ zH7bWnFE4r03~y4c6xb8T%y#J$@l)sj26k1m_$#%sMf@+r!e}gLFR#80*VVnVuiSXD z3=3!SUps%=E7qT`#k7)e$22@Dw^np+TZo4R>{+W`qqD|%_L~ob+up;eNG-7bG}tf7 za2;K{IGN-kaR520&n4{@qsu2v+n>JnD`RP1T9AlOoH)_w7*S;R&ba%3RE^<+i{kinm#Ugcp$MMdE9i*geyp=+SE&}r0F zAv^Y^q@Be`FJzvaPMD|T2p>aS=~*VaiyK_BH>IrBJBoW#)5;pWu{`TJ4gD$PIfYmB z$MWMHv$(dL!R6HSx+UI(%5upsE^AmOodz4gUv$f>K=iA%&PU%~Q({9)Y1vt<3p8vs z@ks@HeTU6Dg|Hj3WWOamb{cDQXxg;F6%~V*Sv!0Tq+(#fG>!rVmT`EKCXKGC8Lg`c z-Q!RXLx;K)&L%EL=I~z^Lpsyw!s#e_NHsW%w$L?@)6<&KW)teS#+{u;Y&Z#LPphI< zp0myKe=Sl`K9N?GpUz#ayeZg}uSa1`7mS*1bWJtZjw2VBS5v~W3022q!A0OK^yd1I*o9i<^o0|!%I75rZo=C7Fk1~LOEU4J?^I*z2A z)j+@v$94$aGj+y$FO*bicHla`*5nbj$~bC;Y%*KWCa-FxKs?A5zZ-+uk~ zDcE7R{8?!3Qn|Nw+;!)f~;RF5?Ny%23uN)z{S}XIJwb8vJRIPhrks|LUl^hIYc^*$i5M%|>s0 zvaYeRU8>6F{Ao>)9r4@b5i;A^u-v2c?WJ%>VGgCQ9LFcRD&04DRB2PIx%JzyY)(~W zZRE_%N~XVBx&xItesMzsRTCPhYnZ+`oS#UWTDM4b*^)N7mXG_?l&NAPzh^D0Sx}d( zE}vRmd-@#tJfU)7+2Ul=gk*9;bpwr1*HCRgA>>W0U0gG38BgcdOs&IcWuw}ruBN`M zp>kGJLv`&U;^aAj#-bX@#l@Yla&~bwL2CMOjg<{yo=$XnS&bdAlN;$L)fH`+Q^!v$ zt8dAzP5Eh~vPG4XE9ArM>hjaa*DbDX;?qNlA61>KOey=gWs}%wOuDY4GVdXMj_%iO z?xe=#VE=V<+y%YsQo`RMtn;Og%TvNvq=Z+egs)5qUzHNRIwia*XtnuZN;sQ5ux0O4 zYOhJNz(mftZtP89#WWQz^C7!a)W*0v1`OpN%UD`cIj6do@9QW&wX8AZmDbfSX7Yun zc9^l?G+I2RrWkHYi+ZOuzz}mCh_tlk=8K!pZ)O1!ySVw{3!3wcVFhy0MHf+VMMce} z%_~_=3^=&BbWStt3l<@zr7KY@7|^`387+@xNL5u;Mn?M5`ua2L>)Wh9NY>KE8*pQ; zeoEad0%Kq?sCFm;$;K-MSqFJ@LG6jp2h|=`pz5d|)XHorsJ3hdRgWvd&fsdW3%CZ% z0oQ^Fa6PDfyNzHs@DVT<+zjS{Tfpw%Hn0cy3aI_O9bhl;Jy833J3;N|?E z&|ZE9sC~TzsC~VjU;$VF?h6h8wXatM?hh7&2Y@Buf#4+YAg~lX7@P~TO!Vf1hk{k$ zVPHKt5L^ltg3aJya3!dX(AD55a1E#p^R?jN;CfIi+>PLo;3HrOxEUM^ZUM)G+rSCn zE8tP!4)AF3J#aF(6PyC>0*?W`n{yM>fedg4m;g(`p5XCd0jPdA06Y;a0xQ8{@I0^t z^yvT-zyK@-W8ge64r+~Rz$)UphtmY6gUw(DxC+b!zg$Uw0M`)D2G@a|!Hr-S@NqB) z+ybh@y$E&%wI1#UYCW6_?tq^MegNuT+AgpM7{7)704Bg*U|*0|*S!HCpH=XNgMGmg zkXM$ysUWW{dvn2kzzVPctOxf6mxKF(E5ZH2Ye3zfS_>WsZU7Gg9{~>rp8^j7w}FR( z+rh)Y_rQVRXJ8?iy_Ws}_5_E52Y|!DB5(va1{?`a0!M+f!D4Ve$WQQkNpLi{6g&c4 z0sa@b8axub5i9}MgJZ!>;COH|I01YGoCv-J9tG|Mj|RU4CxaQc(tp4_@EEWFoC*#E zr-Q}d3~&Ni3YLO1!Fk~EU==tAYywXNSAdn^HQ?#sIxq=t0?!9GgFX|;Rxl3g+D8{e zuMp1!-vYD1onSWjC8!A}<2L#OmEKo{6MO~C0^b6&!JS}l@JsLrFzgjNK`!7*SuI0?)I zXM@F_4G+y?drw}VH3pMgFT zV|*R*U;@kn`-0iv08qd2I~+U$oC^9(l=Hw$unNoqo50>+Gk65J2J{Q)S70W%3CsdF zD;?ab^aJoi>EL@x2R~Cf=-om30Q>Um z8K2^VE5!$|5xBCc~yGk6I&7rxd6tB6yDb}gW_ z!5ZSXf$P94!HuBKO^<^QfLp+OK%HZBZhMjVWndNd(!sZg>l{{3JOkWG{C4n5@E2gl z`rO0@Fb{kdEC5;Cc>}>8fyLm{-~@0JSPH%b&I7lBRp2pT6Sx^{27d!CCBIB?74ct# zYrsE(>%f=6jo@?Ohs-cLlXZIuhIj|J&eG;4E-6`a{8O#Fv3{i601VC%ytOmGso7@Q3r z2Wsuy4V+K>Ixq>&2A6{01-Fo%3$7sk9dHMBM}n(~p9^jyJrBH*`0?N?#Jhv*iO&Hy zfos6c;3{w%cni1#TrWhv2e^~?T<|^O`QVqtPXM(R9Rp@;$W2@X=7D#D1>h2JAb2CV z3%y=oG4YeY68zi`oItz+ECrtc=Yu~3>%om+Gx!L&8hjpH3qAvS==TOT5}yZZZhsSe zocK?{7r`HcAAsKn<3GqvTn}cT*9YuN{CnU4@MLfc{9)j5;%9+cLzjRh#4iPRl3om! z5^n}~5kDN9NBn#+LA)fwRGS(CtL>+>qUA^w>|-D2?`~$FsYp(IS4MMeSbxu+ShWZ(kt%d7TVoc zkDdJn%h&xv9d38lBh|_29+nR6nd{IVxeneu2+_rL4^W5hP3q7+3Wpj!vg`6`#Db1S z?~D-DD`_9Q^C@(4+M(4^O-Xbt4)wH`siU!_TrR()-En0+hWYb$u$9xjcpX*YzGWFZ z{yHkLsG}ydTSiUj;H_MXy3?pb_W^b2c^Vz3hw`U~_8Pg5qGNGrSNCXj=&2ALdJaT~ z?$SGS{_7r$j+&Nwap^T7pD#Dqbk($5=a+HawBA`bZ&~GYP9-6tWXO*@hczSbX)5Lby|HcroEg z_B{sm6Y-Uo`ijD{39Ek;vJ)=(TD((u24VG+LUxwLS6`9OvakM9NJui;8CUo?!s<85 zOET&^DzE%k|0$&8vcs2#Enj6(KPtq7Nj5Cq*;ZHmN%=}weM){#Agq3+y=Yg5D&si& zK8E_2@=`tUOG<<%+V?Kh&kD))c=+mT3ah;8Z?Y*r)#nP4m`+&zPJT`!tiGr4iTs~R ztSM}3^~I>Y)E^6xmY+&5L|VQrwdHc*v#h)etKUX4>bq)3)r0!4+EMkXJ}lqlPmT3? zn)O-twWD@X|5m;_hp3P1KG=yiEdP(O{YrgZVcA!ISG~0_i~4`mb_=ZkE?)RWbz>>gj>eQ)S*4D~ zmaM3CdDb;7c^@hSMN2pPN&;C(i|Y1$W9Jn%%wPV+n+08?)%;RleZnWihWk}7>`RB?I zcxHcJQ^`$dD%-@Yz*V>Ve8;Q>6akd?zx=^2W{cnoZcQUiXJ}0|lZ96!b zsdnr~W!Jje*>OGD$sA+v?K_#$aJ)MIbWU+HV^iAWINSFee_V=uX$t??uq|cB%^EJ> zS$0No;Yne;NM<^-M&w(m^*?$ZVUjJGYbQkTNAtX%mWzBVqfAkF0dss5uCOZ+=Vzr| zHM(%MU2(baX?Atv!l&C6j0-0@Q%82(oo2^ZpNRN%w00CeBZaToIO1!y5{2Da&e>TO z`mA$@o2ANA<|oH-|M>z;%lT1jzCt{cO}EZ*t6_J3(Q`lIt3LTo4F6Mg-C9UB@7BnA zj!7#(s{lM|vO4Nn*oIqsL^&!VU z%eKAimuex&xO%K>3F`^3h_C0XTEngK6)Al8%!9Mz>fMDG(VmfRm2ES}uVHNy@%7AH z6m~0UC!?orTluZ|*4sYeWON3MWEQ9J^<n9hJj@Fo`+cN8VNa01;a_eGuCFa)4?&{2~nOuK$>sGC#)|4e;i&Ao{?OB`@w3`b zF1YI{*WTJ^5MS-;u321rxa&bZnWc5HWO%R3hIP(zE9KNOJC6IGS_XFwxWvwL?%K`e z>(1{Qi<*6+eH_KrgInY3=T5fD=I_FKt~cU0+Og*NO?Do4*M%-0+4{fvDDc|<4dD4x z_t!q9qxdAANd3CM72fWDZROARTK|0SoBgF)xWH@u3%qak7wH%L-`w`W%sQm}* z);D&SEntNo)n>=Id;70wr*XdD`saVcBQ>uJ{MNs~|7L%Yen%zx_jnoYN)uWBruYpD z9Ev5pw({Ssw5?JJuQlvKzORnZeEtO&uDIypOICjS(#uwT=khC7UwPHl*Iawuch_A1 zy&G=4>E>J3-g?{Z>+bmeo$K$q`<@Lyxc9z|_y6#LO%Fcw@FS1@=&{Ft{KS)+fAZ6x zJ@xd@pV{*4bI))6#V>!g?bk2-=EdK>^ztje`~4rb|MAt=cD(+^n{U1S&b#mZ>Cf+f z@Zm=v@BHL1pMLh&&%fC9x4-}6%YXjstN-``*F8VL`i&lN{hu!X|8)NUasBV@0pmTV z+XsyQr^`Q@T^a2k9ml@I0r0hd`QJTu zPB<{J^C5TNE3(mCn&r)3mc_GAx_iF+@vpDG`bykaLX6^PXXwcu&x_(OrljX)WhDxs z)byuP(#2nu(>6U-U+MoG^7ZL=jA)y_t)2aXQEk)P`~UQ}vJ#Q+zfK9amm6_ptK5W? zaC^B!O4{bPyq!O%Olh0mUVqH2J*O|5ot0Pxsa_(V+t=4I$F)sQwLkXwti(hpRsZpJ z{^iYWo8G=YZ#lJX`j&S3|EOx4zO`NY<;z{Ve3(l(Do6W#2A$K^PHH)%`*^$b_WAtn zwzm51>*=qzw@pvAKk55fi3&*h?8(0|ce-@RM8}xl-Eo}41&`%=I=SVAH1|ij-g;;k zRFBoo&^9G@2YYa@94cZGD23KSo1vnf+^g;dkDKHr+z>C~CU+^c7TO6l?*l@G2hqm*(-t7GJyK zQ!ae7sx#k`%3;mXm2XJp^4s3^eD1Nfa!>`Iz9?@zwOsEmD8R?5%Np5Fi9)AVE=`H> z@&VrxY&^BHfu9OiK;N(sE^4S-Tz_h9R!Vm zW~T4)7y9kdqO06hY2f!>0GCv&|+p`p-G(23AOs0q3NS_Rz<{Sev=y#jp%Wo*v% z`a{LgOehJhfNq5Dg`R?5gx-Pv4rNo;zR&<@I5Yv84OKx)ARVPa_|*SITZi7T*rdv` zdizR_H`s>Vt2!fXcxI)GEs9NUoKU%7@uEeQ4W)cxgx5^G8hGLB0Cf#V@n+8AhRP^y zi%%}R5>mt46}(SUIo9iA3MI3 z-r_ zsJZvGmMDfxF4m-&?Lbesn&Trti|bX$Io_hy`f}AtZT**cs=e9E>MK3tPpC}tU6*!m zdwC05HKUql304?Cc5!9HveL>1`}$ICd8I8Im6RpyXxH*n_9_FTJOfcGd7hk`G;?9*|*dF z9-F~;aTfDx6=TU|NsimyXsKx{QH@SSQ7w|cjEw|ncnJG}3EcY5nFzb75|R<{EWc78=l zik_8=8C&E41i`txY6~CqLMCI!szoJ&z zj&*KCDe`Ae`R*IGpHLY`@ecCS`vG=9z0K;#cJQle+j+M@pAWwtN{62j@(bWEhqn?+gP$4li{M`aZ!KitJCA(782$!$o1i$-okICZ z@HfLddYj(g@F|)6{K}gO|0(!eAYHL^4&`UV-wJP=hK)w=J>nC*f~^w-wS3a-WdD0{(V*yPz)c`iA`V z@H5i$6Fs2}YO`O+-vWOCyy1{)xqrxi5B>yrE1-S&!sI^iwWquj{(5*@AbxGyD+u}V zhq4kMz{}1MKa+9hL;id4SHfFw`8~P! z>Fj(7e>1%8P#5wY6!Hrm&Pwcpm)A-Bo_s07$(O(%2yX(EjeJqauYf-vUOkjSZx|Bt zSHNEd??%hd=Zg%^&N}#;;5}veJ-uNe|0(#};e8PDhlhOcku2Uh#2+Xf-iVMt0RBLD zV=OvkcJc!s%}Qi<#!o2Ds2daVr@|iyZ#Wc#|E-W;4}U7W*&+YGLjDH$ zNqFlZ^^+s%>FPg^z<&zfTafxmNyy&@-|K=bq;?z|@;`%L46hVYdyNbEJ%5CKc*`NR z*Z7cM41W#0b&%?3Ldc&Be>1!-km_M#$X^QoEqDbv`Mig5AWtY4@aOx@@F&5mg80R5 zp6zt}Yv8Yfw*kt5KPlv|hrbQpPRq~ZIXWl55q>;@eW(ler-b}%vJbDq@((co&qg}?o8UhJZ!;7_ zes0Kr1%5m?KQR!BG0&e6@;`&$lXK|-P=Nf2A%FPhtV9*Obx>!{7bn@C==)>fe*o`G zD1)=qypUfCf8eiqmkCOTe{#sLhrbElW+)B*DItF~{KN~qI|Ui|r-uBE@HfJn0L3{w z&JX!pCI1`Vaf4#;%R>Hp@GIbLh64BtLVo-wS&7f!6}_l+LLSe(lu$C?XB`=6ZPp&6 zbz+1m>q)!xjMj>Ns2?HxuT78SB8=ptxDZOYMzijW5sj<*>*bc26K-&WM#@{7t9q2&LQdfr=pQF&+&D;mWky>D8tk$(I3 zjO?|h+iGZ;e_Q1vzQYtf%V_N{kzCvU&^~?d`jH)nQGG|)YVYgwiR2@U^rHBlu(jOI zesn&Ryg2`^UR2&aVf*}22^{2JmR=uTOcIUS(*T1VD`Psf4Q9e<*_mm#VwNIB_c^27^u)SPV?kFGR>=EIf z>_>W$-S)UAz4q-F#oOoC-d+@M?|;OPur*&-j`s5VhV3g<-l=hEM0VOEvSGhy+ujlX z>yR|{Kl0=ESi*Y>5$>%XBR?Yf2xU*6q@w&+JVL^I%{Ns((o4lCzX&6LQ}d1d_$GR- z<&ku2eYdt-+xp$Jd|SCxe`Hr>*Ae+0VI;S=^`YdXpMusV{@7zr{zmrycj^5b`Plya zP5e^^-q*=1gZA>@B%gMAwtRcji~381t@5^*E&U-S-6llw zJ(VL$k8n?N(Rpm&u>T0sm z+y|rd2qQaDe^I<6xmJ5^`za7^SzCr>&n+ zdW2DVBmD>?{RsCa*J`ih{HV(g>nF0mxAa!~l$;~-Gr}mnJ?_o#DE|oeBu|#EU}3*V zjd$cvRG!G6J=rDQ9?09{8^~EdB=6QKk=}0gZWAIu+RK|gv-%~mF4cnu=eeuQHQ+_FWUt7+- z+2MNyE&3|Jp7KkPYmxuDez-UJJ=I6lei5n;bVPnd?dDMH^(fuCMciSeA0c*9{0>_5 zBKzOeZc4e@_$9tf>#H^0wc9uC2Pyfssdw>Z)1eS;ZF{BoA@^JSZOyl>eEa%rACLUrQ@PvAw~x1NAD-w5$9=2a z|I8odCtD6%`;m*c_V1{jB5dzx`}p4ccJ}}Md^xX1{ljA9PlSH2Sm3SgW z36yeo6IkT=J=mJaV=E=oi2ImPp{G6Eh* zi1TPbEYs(K1&YpN6lP9lP|_Rr7%$c>;CYu=T8yV~lz6CT9u2aBW`AFov!rsY6b+3O zGRO0sd0=j)pGM^tAh0SHOZfi9G3C)5lY?i)(yr*;++1w_>~{+MZpc)L?=Og@HTU<- zr9pNO?Bm(KM5V@#iJ9kv&VfHZ7LOn4M4pRv31Y)y@z`K%@Q|3seedp&y@UGlzjTh@;X1`~&o_({_3RaaYT(?!mVJzQ zpRxyqt{%J6OqGEgJ{426-6^!)>>wVLTILkToR<;sarSuZh`?+Mas#9A;J~~{I8EXG z1M_Rb>3t|weqeq}*w4YqF18>UVL^_i9Dnm&Ic8Ff4}Cj?4srgfG5+AIxBnIy8sqXH zFYuq`sfDLQomrIeQ9mAk(AU_wPqN-!PWE|PCHSQ)f16SMxo(55@XZ%KV|KaA5Rbhx ze6x#j?^pYNMm)$K>U^NG%p5#-uKKsl7ADS61_9u4WYo#4+0*eBhPjxgK|7OKi^bPd1s;6GPSSQ)LxZu?$XrW zk)k@)q2a5I5(ASuQ#myb zzu&|BPIJl4J>s$N^)T-Sy#teyd`S=UAuRtqlF#m8-Y1-vl3dlp{9fAgNuJlkyhn2S z>>e$~C-*QPNqa2G-|AsLPiPVQg^dqtC07C+O`c0QPf$Ys3 z?7i+buM+C5E4?StoMXEWKM!JF@)=s`df3-C8j3J?a4HhM?sIOvr@L~zqq}*-?;rR# zb@vnTz`Ks02x)2Kzes#Vck`(PmUp*9=WyTe!6c~>bXs>)$q3TqaB6pRH)roa>{;DS z&sY!>ds26EFWmUIUB5qrW~Q4lePv_^U+r^f_jnrTA}`*B5>M=7BeW!uar?y6vb&q3 zdIkK_f)oB{UOXLPmwxy@@$?V!%xF4-ZgISxXQ#g_nf_wG<+o0LohP~H^USE6z<&zH zW&~8Nzhlxjvz#+yoHLw%(r?Y9CUS1bi)F-fuFW&Yu=WP43{ z=DA?sz)xbgDzrPJTaczXYOV}3^APeia-Pq)oLGR+j=-3T&7+a?Mq- zg9HEfxpDJSuBMM|x#sKvLC&*!-^eX?p2#&nkjz8;B+L)3%!XWZhd&@lUzckqz=K+* zCUaG;Ccss><{C|XE3n#ZHO|U4=P>D|{1;1?b zX*ctL=CSv?Sv?jX(o=)|uAA)#cln0}{x6Vxwwqb+4-EXD5dLvDa}P6Ka7;@%y}P@` zdiwslZss;kg*WbI?owy&ewjNL6KCOs%ry|M#R;@x3%j+<=qFio$8|Gz%e!g2nLEOn zdr0O6W8$!G=0Ta;pYT52%p-E*YTvHW?{-xZE_XGk4)V-^LO0g>c;@L?p&M&Cp6OTU z#u~%=hXFsB^J3Rn!sPt2tJ!~0;6KyVu1H&(Ec3yx=8yj1!2bci6?9iuH!S`bD++w- zG3+dvS9Ue8NaowfTolTb7TGT8E$(Xm>XT&+Y1LiLFq~S@)t)_iyM~arrCrSe5+)-v zv8%Z=Ae@pqw5#b3=P*(a?rQEKHK=!)kM;cQc#zYzYdovFr}><99RrGSAJ52P{GPgR z+;5jaMvsyo9`ve zc7IslUq$$G%UPK)pTG&$a++e&W9HYTlV-Nsgn1-3Jn$DHyC7j6kBy*FTa#zN@<~2A z5%0u#y>DC-GlPmoS`qMTMS~OOC;kX~y%AaLn=lW_VjdO~3G<`as34Y-;XiZCk7V($ z*!U#JJmDAH^H`+)YK~be?UzV?A;+wX9UhqR&Y!+soTtIt_^?tw+>>LzCyTeE(k}gC zrC*j~ZuSqi=esDErLdGsJu1l@a~nRUSLC!*$_Y7UjVvCU6VJ-GIdWFY_jw(Mhj4G% zaU=~jWwakp1S|d(W8TdkYh{kGG9x@wc0_QIOBi4iSR$d!5^4Ss!No3t{y<&QA4tI3 z8Drw!TV2eJR7PxIrr({DOO3lIIL>vblnQ*ji+NPOZ{qB@v5R>!_N~BQ-=)R(o4S}E z%lB)$?ABrz1^iyrIbF<8eN+~Au~Ya)&-QtmT-UT(iO=i8?Cl@lCCzK@Z|)0@4E!nN zbd=?mIPOMq>2H3KxZa^%%mqP75O5~a$m{29<#mbsEuH1H2)Hp_DC>32+nrdG`VbQQnPtSZl2WH{1P{V6I!#GWV0D( zvl-(|{UfS_fi|0iI-3hs2mPJ#d}o{=I%}2L#Ksly#W zazVCv(m$H&ccuH#Er<7aCUs`}s*^3=Wh%(0o#NidF8-9_DPpfU_IJeIir5wx|2gp> zMeKgZUM;aZ5WCgIpCKMg5xdl}uaelgh@Iu)TZqR~#L69emBi*CHp|7ICZ2wDCtE8j z!WhTCT4F_r4RrBm73X}XtrAVF-8-4T1(O25b0@ngk#fcKk1Vav{+ea}9!%y^I4kb| zDN7Cbrse-J%Ul~2Fyp#PV@IlO`~FJp`$(2;->tN-zX1bndcdzGah;R+rFy`nNL)-7 zW}jx8#aUVAIHgrif5L>34$qE=AmFpz}T@p zqG z{wV8Duuk}qINqiVH&dwS>#?`a%H5oyYFU$EURM!TXSAPX&b8@Fo$f!So5W(3J~HNY&oHmb;m)XLTJy0C^M;)I zNBSNnnm5wTE$T+U=Vv5eN;kJ_qWNWd%S7|DbaTDB%wr@!oNjK?L~~!dE#U4G&2{Ov zcK)KWT#CZQF8;3KDPz9Qu|E^L9I;bf{7=MFCz?r){a1;NL9E!t|4ck}qS@E6KbKf{ z#Jalp`@~Zxn!l%6-#(StM`>~I&o2HR@$^^IS|*y`IQAD3dj_$ex%dZ)r%W^((#+pu zOf>7#+D|lBrD>vBm1h1CW1?A+7SC)>(?oNYZsW8UmK-EN9{2O9GS!f72!|3%U5~~cI3$SW#Ou5c{rFUVJRLJ{<7s!9do*VLNVrFb(w|BCyD{?) zp5{yXvY2_5a8HHLirYz+$={0Oe3JurX9(|vx&2KsLU^9Erv%w7EGYLpA+qi53F z3AXGY70i7dr}k6a-ZxNX2rPfE%KO5tw7rKAL-oDXc^wW_XA z7Ju>0DrNC!KPBZNrM%*s?~oGfP*t`cMpe1q*G5$-H=#;Oe6GtC3zS^Dhpm}!^^fEJ zpKqS`j}QEsR104PSUAZym&mPIsVVfGcx@%R+aOq$F5@75%3s`5Gigt+d|J?ozs z`1_@1@UDN7R(9qcf1ca!q02oRJ2{=%-HYz=wz&r%-Q$gr3ub$Kc^=g!zkb!<&*Q@Q z45NKMJ}2+zX9jO+Yaxw923XF9V)KKvQv!coaXe>jv00yXYVdZ96-mz3#j!N51dGj+ z@%h0!sVd8g%>z<71C^R$b1y3I#wv)+w<;$Vo1dcMcaYhU#pdG9XpTa7Sg|?3bClUZ zRwy2`cZ#z8DqH`E->0`*4(b7L}lb%w8B}9+jKVA^h`E zW>ZVa9<`bej53#XZCA2eMwut2@?BJ}9%X)HOLi%di>*rYD04X~u?{k;9A&OBXr7Gl z38T!VCMwxft9jHYb5+-NB^x@*Tq~7BQ8{RoS&hoOv3^8)S(RLpQHghu*{+f1;w}pU zf9J?}&IcpSSDnklHhXQP>g0DL&ExTkw$=FbNOQSVevHbaBh4jU%E@dakquVm`yQ0Q(%gs2yRq>^j_wkz5DBg`(Tyg4GC^V$gWDJt*A zULx|s2vx?`5$27~6?V9`@A~(TFze!I-i7cTBg`%FD6=(I^QsZ%`u1J_+!5w(sVqUI zVT8E@m3L#+L@KSysUysdsAP0dvI!&1Z=^W};o=cyYfEN_SWWGx zDrSUP(^Z*$IXs@TYq(1Gm*M7GR5B;J!K~Aj#fP9UjLoyGdZW@St+$3U519+J;T zAUtZQd8B=r_aAC5WpefVke)x(d?)L)wq=eDHIGaGA4B3fUkou%*fM`Kq@~Pn4l&=6 z{vSwxd5F0J{n(P-tN!sJW|d~$O^9tAVlKB6bxPIVIK*7ZwCi6(`syL(y-w}iSU$vD zCH*GS>xYZ-;3fopBI_$*eU$OqLvDHqsV;79Pa;~^p}dvBQ$zP^Z8>%<{nMX z4+; zVAI^`^tQde^I&tM&H;vWf3UgPo&$CbYN>z^2bpW6|MsAG&g+BB1)Y*XW(Sku&j*=X zbq;s}u^$aGw=>! z2AQYq86u@NPZ(tGkx$2>Ic<>nII9NBQYo=2qX(I1?CD}iYi0)xGWSZQKPtTknNLvZ zRKEM*{i@J>rpfk;Le_tU=JQTEW2fZ&TA}%fa`-Qk{+mLxtCMnmuCS%Ho-8z1WTXEu z=?@f|E3;1xvfG?K-?pdEHHGF=dycvSxy#6hh<{|_uOPT$j+qI03SOS7Iq z+AlQsw4XIU9B6*5p8NJd+JB(AA*;4+|M|s0bD#8|CjBP^&HZ-Pd}Lrtjr?Gs*)07# zNWXQUxhbno*F3wOOfMd2F3_}pE@Ed5G%M_xBBkON4m3|O@B62ae&YWk?K|M4Dw6-_ z&3iSoIq$N8g(b6Kz<6iQis4MBho0Fpdn)F9EbCe)iBxgi|5>#>&R8Ub^Q9wZi zCH%itZ+fO@rgwJV?>~OzZC8D(y1F{Q4(}8i7;DDWr%*?hzZm@UPod7zZJZG?qwy)U zoaO6)UwjG;f*GbhdTRKFiMr ze|mHJNW3wjdDt88L5Ah~gWtP3b%VUA4|%0Ib>TjsHDLd3PF-XBkUN@F5AH*52LFcU zv^zD%jK4Rho-E%2{8O4!Z|OrCAW*kC*Ty1{s^~*bG@~}$2OMtZWd7ES+Q;@G-!!9n z+=pxie|*5c9WbmJ{mgwxKk$1squIy@G8BiGo6&gg z5S{_-$!7F{^dVYtXw{6~Mvq|K1pf8SXnL#}e``ilS^jkJn>V8w(xKFkm{F@4b!7QW z@KeEu8K&OlKsoK>-eFI_%Wz^~nu7C?Ttor^$bQ#im%Zb~Ooo{R2% zes99A+24ddjqQE5H=zk^^M)o)=9(t7FXefpnR}llBC?V5h?shxHcjXt_dc%z_F@zIJ+}9Gpb7oL$-EQ%+ndlIsT}hSk!UV!LfyFcIS>4E znoy4_dY|GZ^qcfP*}$bE9`1dpNu>8V+L-pIJ_{EeXpH%PWBL`@l5WVBb&aVX=k7|t zK5b0>k-Jg7<l)I<#;E!%hozr4UN6*GIluK79@ZWAs!=zlkfxs(`Ia{7%Zv^_f#?+BpyUX$S#f{N1{%HTLF?B+tmw9?)$pd|IRl6}A zN1go9&ILTPF@44!58`i|MR(c98qo>VQ~OXOx0AYGcEs_nPW47>TO+FAHg0_*Cv$Zp zYKu0GcXcZE{6;iR#=l@sZbTE%_9TsM6lM=;M5{Q_eZcO~h}KAzddwBa0%IyBsIP*B zH^SJHkJocO124EC-Z@P@0x#nIPb2CTcnRa55DuI2eAGAavL@9h@QNnYJMgNL*`N{i z3%rJe$EPT%5q0Dg`JnJvL+TupqRNIeg;VrxLnrg=hBS>+w5ee@MV~dKfm{rhfW5FG z4VFqiv!TT=1SjA+L_F}moDTu;tAim8>3G`f^7b*aM?Cdz`{FWy6a6=kOA+b9fQa=ib{i7ju0NuU{VwXW3(>1Zv8q)CWH{ICkH>45S|8rw2 zXh@4dw=*D?(vX&d?r36vG@!qfk$W4^2^hl*Wtlq~&>w8%mImnm8_-eEc`ZPtZBYXn zq>P-=fQBd|$2Fk#Y~*l=4QW8{fX*9zAtqWoHK2uTOdGJ@Y(R@)Op?nMk3G?Vj(ft8$WOED(`b%+GQ=jy*?dKv0YoAMsjSILF}UX z^gg>+i>+CGTB~BKQ=itW*mCOACXOuyVuAX!1$5o72kX(#Dz@GAXupbWdp#P?v2Cb_ z_P-vDV!tl0$A0C_%9#u5(FSO;XMjDW9&P2=;%h;_dNe0BE`v67URx7aoS&u%5@$04a=<}4fTszLLN5@m&atm0~dQ_42mftRe zSnYb$4u9*VJ-HsuRK^@HquI)sU(0Ad8&g^4Wd2Y_AAv3nU8p9nFQX}J%x7RPE2C*K zZl7L8pDAO;meDa~%-}LQiNEdM5bIt>ZDCB3mRoO>(F|ow>oS_9jCrVx=CLt%L+p++ zS^#>o=9#O?=tDNfwrNFOjQ{G=0JfEmpUE0T*^x}D2U7DgawX93il%@;oQeW0| zHZ--UOZ}iJr78YyD4OcjrTwhQDTr&B?YcCPJ;;Ulk2*Al%NGB=w+_t(-TuA~+P^w9 z19ZLcd{KwSDX~R$Xo3=(QHN%+*ar|BSBK_+u4i-KIy6>^b*e+-mDrngXeNuj2(f4D z&}`84RNq^N`YW+p>(D?Yc6A+U!(tah?EE^^4)oMB>x8qvNgdj++Vhe+^qXqWGwaZ2 z=@1J-%&tSL&>7KWx4JcvQXma*WhpIz@?`eL*QK-SV4fr30*0gDxzkrED@o zrb9v{# zaT0O_r|vdDZ!V=TU~-_go0&XNLfyoD_L=9F(g&$+EWF!C04E{Grnbd_7!K4frD@Eu zbDK!B zNz2-F6l^K37uKdfLATF_Sc}^97wEzAxWZmgn>NGwBArs3)^W{cO(#mI6SCAkT!Q&; z3AIO_>4kkq32jhf8%k)C5?fwE@37d%5Sw2@9U;cmH+ymk`(=Cy{Xy-l=<;7e>y@TX zCA3j#db5PyW=$_b)3YVi0h*%9e+f-ewf@!;`cT#St4pXC)Y%t9?EDhy33aJwmV}+v zq=fb>XO)!D0p+aB68e>$6@-{wLcf8YguyZoWx1>zD5jCw?NN$$7i0ccOyf9!Eiqxh znqnFyfMozIDW>rpNYMZv7SsCz7!AOPVw%7K?2`#==vqu;1kfIUiembJ18FZ8i|Hsx zSumx64~zD)11a;~VmrsO?kJ|tfwy7TO%S@im^udDLEv%(E-t2b104`Jr#M!$fryqC zQP6z}Mu#2f{pfdu07KwFp(`6$;ZFiBS1J>D5w_tf;lK(d;H|vY& z8%!P!S}Th%|0$v$c_yI)rWVn50gMCS{UZ7v0lyCDQAA$~paTHyis)Me?AMDTQ9M&b zTLth401pZS}Xq@4oj z1wgk#+JiupS6)Nx0(cgHrwZvu1nh@3Q|>IJuLW>305=rU4-9xDK7-CEq@$AProc5U zq?0`F(oM-Kq{9LT0$>-?F$Da7)rur~e=Yi506*5k__r4Qg@9v7^zvG?B<(>YdLeN0 zYSDTGqGB3Ti#`^>PyhzjqBRIaCHn1J^pODG0N|BcvLlCfkD2RCF z%L3{xfVBX8Q9y$c2pIf7wSd~Ea8Vcs-1`O88G$HMdK6F_0dxSMT>*7K!2h}y(=!EB zA%I5!c(8!pK_F;|>6!xSf%1s*58U|$)DMBEm>L&QR{@j)Pz3wA^k!=@Q31UxfMfY6 z|M}Dlfh0prU+2?GOm+`i8-ZJ!Pn9SUQ86vZr)2_|4#4Dm`jN}j*!+n12IteK0_X)m zw|x2mfMo12j%{9^&!>)3;hq5Qk$n0KX}5Sx`0sqXFzGi7Cn4Vk-+w*W*SMrh^Qk>Z zDF(Y6<O-sNjK(Ef4tc{Xk7)}m*vq>Ett?sNgH!%HLo~)kt>_WI|rH#=J7B4R1&J0);Di&l?w~_6GI{{;ljCIm=+FWV_^XWk6NgidowAG5Ips4U zG*w6w9a<@*krCbf9a@;qDeneYXNQ(apiKnu65>nels^T);|_ho0lUh`uaTIrQ+7)_ zr~K~_I$KDmVXLi>8bx#$JG3#KQ=Sc2I`#xhz=;5k+O$5MQ+~k4`%#;=alpCO&GXaU zJkLk|t+V+E=;t;k96M!Kii;OOW{xnX*|bcYIv%9aLVC}pPuPuAWlG)@Z7#8kf-z zBl`H11jRZ_BKpfu^U^r(-~3MIK0haMkDq3zaXdRf+AbvQl%1Id&A}>DG96P8%bMh; zX`=0Yhz=LhAU}O3q@FmlgnGkO;T_D&B!)Kud(}_NCD1woc-T*qI6iD_#QOn1eaeBP zDkDEvVrt>1o*Dfi)C58egjDLM?P*NPi|9`FQ;&=Rp!onhX3@4Z4jiPyjzorj?_<)eO@RkYhZ66(CUdGk#X)bO|aU20hz6*Wyb;?jkF$u{6338i23g=Ht zA05MSQ@?cyAQ$-P1P3y4$%ramIX*fl04{5g{v81)6Q>oR3@UK~0>sU%l24NT18&4N z?)B+bQm9!!(`Tpn{ipb7n^=Mg3%|!_Ww35!s%Bk^s2T+l+V^zYBKVH!qOgrY;kzOn zt#FGIr*JDs*#D4DT?GF(Xu@lEm3L&)=aus`!qU|K7u2E4Pq~)(b$ZUXp30vaaZiYG@6kHMDR|WbL2@FC+i<+^Z^qO z@=3@{oP_*@L)N)C>xdk9&fS)57$)bZ(H5+)9kkLQmXt<25QrKN9ZIFI1+X{O$=sbv zKOhh_Ox}=6UkTtd0G6fFE(Gj_sjg}KE@4_KZ4P&7o^e-0PM3gQ<|jGZ~@c-pcpA* z4F<2AOrfu^YIx8(oPzfsDYOfLC{w;kq3;B+34nDe^b-P6Uim16z7@a>0H&l+B?9(1 z96MCA<{>GxLjb)2=$=Bm8DJQ&zK}wPB+*X-_bAR~A`oTDKU3(40Imbz>J&PGfd67G zrZZFM4*`?|&?tqDBVd}mrKHgOwErN{7I4Rt>2n04VyaB0#RAxw>|}nGOzRMcO7x0k zS|osv0hpgmYZ0)gCr1)JHklR*U>E>{l4&&prpen|$<#}5F9G*lG7Ul?%9MMPX@CH3 z2jG@u8je7eDHkPEe*v5gKntALM8IyUnNmBM`U)TyfXrkXihybIb|{HXVjc9LwKoan zKZ)K>iShr2Bx(oRLF+RBa1yc;0{(?bkwi~RqP7AU55VXo>WF}8^42+tx}ZFw`~&x9 z67@tND$$Q8QFj5{55V0?)R#-|ZCXq%lc<{jE&^*QWJ)xZWJ)yAeSucxXy^wSi0{C0x{Oh1 zxi@3bydk$LFg_a-EPxXA>9zseVI+0(>djT2kdMd-IGA`2%yZSP3fHSS^(tY zBp?S2Q=%g>IrvS6+CTegiMaTC46Ant<4ZrS5T~vM=?ft(!(gA?SoM_XebF||Pk*G3 zhXs8h+EYkf{4^$G0!VEmWBHf;bUb|`Xio$7gr7dh_yB0?{*vDIA{_nmMfCK-E*CdnBe-7R^oP`Ol5e zdYzE2v}l6NN^qZ=8Zfs&TGDwI)C8~w7LAu#NpS>_Y0>O-o|Oau!1khXiKj$6G1RuK zZMc|DVp@mc(<&h?_t97(EsTuTaoyf1iEjd6V|+A50z)H!UKn)Cl&BK`m^qB*fN|!q zQO4g{qh;oRT-q-98!=tOROA|i!fu9{!+LOEWX3GUOvb=KAO(4n@rz-Ul&Vv#ls)n#7jqwyKjLoYIGLTDhT-h{y4745BRfiMroVn{|E!s3=Cx8sK89i-vSxPf?NWx5dK9L?MbJ_jPWbvN@kHjX)|f8dBcW&MhxADCkuYWpW2%qd4$Oy@oG7M62`r#uAG(&ZjV!Wg#a#s<-x4K`RE-{c^V{}38SGfa1m(-QOSwG z1RZJR>jVg2Zl$qED>6`>WsQBxlsFsyh6~!s7W6%KE38B!7ta{+B;UZq`umniVot2$?x&<_YdHw=DikZT*3uq4WD($2BV#aTH?;kdGb6_EK z@5rYu_yd>VxlVZ_fm8NL;wI(O0Zv@BQ+5fjC)PTc=kKV;R4Kd{^Jy>hD(xqA=FrTK zVa6T#G#r2U{Ws-T?TxjPs#Ei6J*O%<{$;|;&Zia3^C#(1d@j7-aS=Fss3o>Pdc(vkXPo>UZ&a?;MSH%?1y#aZ-Ok&qd%BhX=mk0Mknhj z+?BZm@gB~l-T1@r|0UP;urX_vh+KJE0^06yznjbdsr+_Bpu07ggyrEWd;^Z)DO^#9NX}qw$B|Zslg) zm{8`mF=gJi%ugbjw;2Jvt<9nBOpQt5Y90BRAgAWg8m9Wk<>+>A)RAii*&&BEF}2cu zGe=77A<0)^Ievf;KjmQlSQ8}uhRZhXRepVa#|L@2L2$6HgJd_7q-UV4Sn0b}<89KACw48OaXn?ew`B`C~ z#C2z1N!ku((l?y8==j$QZ%Za^WS)O@rsk6^!keE-Uox-Ko~kn^OUpSdlcsUYY4y#N zUbRdrU#0zy4xJ#TKA%aG#MCD;+0;t=K^^mnsJkhXmb1D*U0#0i`%*18RZHEvjN7kN zQ;4_fZ%acQ8o9eK=-W$koSaGTNOK&G-z80F6#jc@K)=kOADD;N_qzUH;_*KVetZV) zW1c-cL&qPB#~%^=s~L2hd6o7v8Tgz)OvU+18tB_IXoob=H)Loz@T>67%b-KTYk|ul z(Fi8&p1SI7_`A}EA5EujhBo|Xy}jKl?d_U$`h}@6ZTRmx@{k}$r_*7k+C$RAK0xQ# zUXQ&(Vt+lI-ezj0{cO7B!2Bn$`Velz2T2>=DxHRM8*bg`LaxEiD=s3J=+MamJtdu{ zF*FD?J7c2TG_6h4Wp|L066&%mq>TKKMjfS$M0dnLN*;!gJ0$n8nz5UyF+SO^Blifh zXBr(~s@*Y7_sIzz`KurwO{0@ct+emK;zQUOA4vajT^dc6{^2q#VQ@QV?5n%$p6)1~ z&Q7BqOpWOu_UXB|TXGMp8T*+Ulcqm(%cYy%nL;a>YHh)yTx5*(sgC?akh4-~8B;55-0K)g+!xX@ z4NRd`(lPZ&aWf2aium}y(>vd9rSrW%g}!I1)ha6f9Xj$GL7tmJJDFN(pQ6XVQ97og z6xt-Cc+3OYxtOGJ^!|`Ff8tXUk;E<06Sr6r_hm9IWvYLzd-vi>T$RY>ZgG_0SGrZ( z#j4@S^fgnHzjU)9ergr(hL8357fSqhV)2Bj*3D57F3^$l1$lZheZlX0 z2_sz821-AZnoL8apYey1Va$y$_0(*Y)T~dUZA`URCg~2_tRpuGa$FK^VQQs4EJ@7h zkJW^*8PlaJYoA23q-T5`cQqnY?hG|hxN(%7@^DlfbM>Uok)&RkMDyIFp6|wSmLA84 zl2oi_OqHb8!iAEYV-ae*AO&}6 z2VikcGPk$h(Dsss_KhI*k%snpT=E$yhFx{!yMnwmNZpuPX}1hYH+h+E)K2LNv6}I{ zbcKy@!(?a-7+=N)>#fru1b8)rhA`Fo8LvAcX7tsOeFXV=fci1D(q4==m&mz!SlNwKdQv+{QXdUa7p7YGMp@HCM|KzFMFHx`)Jpp-*P2sw zYkn7}6b0yrI3+8Pz$v}-r1q4g?j-8XRBH={PZ6gK(2@NGISc#Mn2Pp~#3`e(CoJrg z>Ee`L*e}LTNgaXF8Vbzyc)z1H_0Q`PQ>3ZCpJ=)?^{t2vue5K{F}in?r~6{0RFq|T>f#)`UM9GbxD(s=leISkrLX!UP)Tmx1+x+9hcRzzlt z%MieB28UKKmA^^CfVj)T@CAp~G1VUud?+M%ca-2_U2u^IV$Ep@Q|*vo z^N?VJD8bKk!IdKDcj$AbIw8T|Y&VTR*?Jm3(FK=^;7Xf5WhyPQC5>}Kf>WaeSLuRZ zh#=OSRx>p{+PSZWiiVNbhD8UiB;BXNf?Wd7URnF}j66_i!_`WVU zN(BGwr_oF$tm|_D!ZmynBW?oJOdYEWjuFAL{4|cK8D!!9LcgzxTL5c^ih@?%@XYfi0`m4;QD(y<}!qDzO;AOByJs9eQpvyu~en}XH9u}<=Ejo%n{DBa3 za0uF4hjtOI6}S$PwI+q2twYeqbm*U=^>&Mnv(}UwWIXS`Dg?bqhjtaMcp2V}wPx0H zXW(7j9u*xdw>qJy;3RSDasA!xZ%MqMhSiMU@dr*byB^#YnzOFgp+5=qV;}t@&{-~Y zatJzBhwc+-A8cI{sL>qv?0lvDv@X~#na#NivnKrEw{CUgK(i^9U!g-!N*pbGR3Xq% zBGo85>SaqC(Tca&ZShCYS1+`d5&LxV+=ULg#}Qw};01`>^|p=MjK8pHLH-vgH%$82 zwlNFG)vm+xSD}2&b2ql(C0i1{r`OD#mt($^i>qC~6(J6Ew&^_%*llbl_f6c_$^qw8 za8Q_RCmq(c$zN?bxp;q%&j4@}}0kgVM{YF0pB7{#q0_4xDMzLJkDW zZ5c)3;`iJ-b|C1ZKKbk5!W`Sl%dq+5b4j?lR!ArPPA)dAPR(Brg-_xeYiRUY6~6Me zA%6p8IIzu6>o~yY<8oK|X$=Q#4lMH1haAAwn@;X@tnzX|3>$-$VGf941N}6f1Hm4M z?pk0j8 z8{{Z#eafv&XTVV*%NJVIK7R|OX1arXq$Ym?Rv$S_tr3=!kE>m`NuUod(g+E4!ai0J zs)z{TYS&$Y`!9qZv*>#X+-ErjxY~7a{#K~FA`M$rO>XH#Db7F2pEa}&`e>O%%A_BK zgbl09nZ#wiaD$LmVR1SCD`@@%s6|4W=c5%OIMwGAO!Cnnw${egt}49)v4b`LYgpUE zjheIkZG15={~OTW@Hs`V@cNPOIV?-(@4)Co%N4ALt;*jC*zJ(H1&d?(-=YUS2LY}Y zX(#Y0E!vrXKCfM6uv)xj`Ht1{5=1WQ%$Eo_xp*HoApbk`4qRs;h}k-L@8*Bc>tR+J zvZci56u0o8ES*PIPVys$b-d9Z1 z@_t5Z!lYY;bVD)CU=lt><`i5~Ow;pzK_dXC6rL&kQxGY(Y8TVcynU$r%qS8@t}xPZ zT9X<0x{*`p6ULuKEODrahA<S9DHf-)goG#cK|G={nw>E zTtpj~=5Ti3AsTKKJ8vkW)j6<}m~n|P&J)I2MYNU~L5{Prkjjc^P0p_&_v8vTT^PY4 z8p~L5&!2^y*h7Wvg?)we0W-us-wI>9Fg6v^7-pog-&P1?i7;@KaXd59nK4-yMS!5?o%$h48{cE}iEV(kQ6}r$VNj8CHWrE}f-?QaTF?>20kNa5Ol|_z@R)>(5$F z!QonNQQcRIK1EU0E5Y_!9L=U$9L<_q^uAPrWsq6I3~NCxj%F5y*QjfgYtcJeCE#d= z)S`u4;H};e=^l!vLoHf{Xt>8Lc)b=)$vfosm<3Pc%wi4#;)naIrg!>4=IPMtpZUb}w0urs{ znB1di^0{0>m%9)HjX;dwg(F;^dmIs7Q9xsJVVaG5-g&SeG7SU02y!?gb_!s@D43?vN93MB&?=z!S%YKp zkdgePNU4_&kpT;u@U2>I~ z**V9MnO$-WnRzI?0(VuMl~2ECx50GDkeRA7{61Kx_~P?a!l)>=80~q^rrG zZYG0P=CMH|@tMXv7!(+o7gxGgNpuxBK`9x4(#4fK19|t~Jg!xb;DBQu=6g)}w@_}w z285j3Uef;@i`xZpr8^#eAzC)X+1l}ochCk&L>G-j{Mr2%Xd|NUt$%~j^i&85N0oJqN~ zfX!jd2z9x1T#Z9tJnXndF_xH`MLB*c6+3mo17%< z8Q=gfK}RY}m)y4j{5gk4=YnTP@!k{O+8i3mJZE`MTr;ytqWLg~hUIWHPgiMAryRED z?Hu|o`yH6T_PmlqKeIhNohW!Rht_i>WJt@qB71EPEzg5&;8K`zVGe!4aR2k}#}6+- zexc)Slq0*aLQM%8a5N>1+{9(4lJ~rDaE%n*?7z9YGiuMp! zT%ZBN3n|3fF|RXXZJSLU^149l`7jw-$ccYCn>Jx`|WI7&MJg?>rMu`K!~4>4K?vt$gs z+p;pC?uRTZE7d6UHKJw#>P}|GSW;3kma&As`*L-Un*9kOpC1k#nK{n;`d3 zU4_&^x=3uOT$+c;!hYWP$kw&?Ic0D*syrtvx2NB!caM(-=Jvuzuvoh6CXv2YAXi|f z1BkzS=6RSZq)Cfa|1<$$L*+o2${$j%Ut1^z0E5C8A=by;bx0IC zT`j~*VSR3Ie3cL%tSC53NT(vQw2C^7u~#sskKg|mCP!>4icAAccD~8&3mgcMliKGDfPj_h6dcd6u|oex1|7>Dh`p%$@Hs{_BiPl> zjUHyYToLfvh^N0?aaL#8_@2{>44RZT2xX5+^E2!u%bJ-%LEccr+bIL}KZC|I-g*OpS2Ac~-Y^6{b|ZY#jS%?}K*!z%kst|J1%|3A zx$i;MD;QJcycaUyNo1pCJ&bJ3VFT`VODr}j@XV7fEEHk*>BHRNh$Q5vB{?IY=`_rD zbC}u)zUD;v&3hwxMv!pd^S^jc5gb(6QA9VC~v{nCdFqh5x6p)=CaRhe&L3M z#`&ya*k>OopKXoz*?P0j8isu~QTN$8<+Dw?&o(HZMWURbe6|rja|-&WaRZCR|IO?p zC*DUdrLm7%r_qwU_e0tGpEUN-J!$lj_~^E@_;R&Cd~|*qEzTQ_TstGpDL6Ha7K)D= zB2bn_i}J>xE2-t4bkqv+U1*WH2aPi(|&Z;`$teU20 z)t4%(zSgs9yUMCalv7n!eWi+bWr`DFPnY7oIfcIAl-R4VVjkTjggv<@Y;xF>-&N_! zZyb@Nb{Zi+k^N{g*08360m<}*XzK1p)-l;B2}Ax8$cxEzgdu({vIiux zyCkyPlj$!E;{DbQQITD&o4s1hZjQs55?OsWvJxHgr$CZmIYVTw_E!=o>GvcXGsFE! zw1Ovb&Zq7IO1V3>KqFeZQDCc+Xl?Fzxb4#T!Wof7vxPGN;66g? zh9!R{`8Dus!g(Qyz7+7206!|E2a;$jlN=5F58+&sL|^5x85aY5zL3sNqU}r~4O}Li z;w0KD;B0`?g%nJppO_TTzz2iuhhKv9oq%@*ozk5`+7_g5nUth~mkDQ4kY?sifElxc zPQi2`O$ySY+=&?0YT$vw=^doG0>(!j3*He@MUWOVI7I{hS2&LZX@P+60r*ZK{WC}( zGdT6SpcFUGluLwjUXbPqxCOwc2&r+9mN1x>xszj5l+v6a&qmXP9|+Pq@isG#2AFX$ zAnC_hM#&&%>=ec}Vc;m^HpvWTEEmRNVayNE*X$mCAyxXJFvbaEWPpAaCo`k3FuDt) zQ-Hn~hgM)~0BR|x-j-I_4lH0h|ZoX};#F%LH%%HVUvd z>&$>tdYX`$2B@=;G;p49G6J+v(oFzcLi&s7BT2Ca-c9U_Z;8GZ@K!?k7t(5?ufz)) zc%E?ZQOCm)=R|i$aSp3I*o}7$gY++!MRwNq-gyX~AIRgH}aT*_VB>(Xd zMkb}~a#Y?l{!%y_9O^IN&j4N~q(u((Wm0gqBYDFa{ei=UX^ilPJJd-mXGU*fbQMMi zhvtjVnem!1UJ%C94s93zGUEYZ+$D_L9XiCSq?EN3#^u7e(4k+L5je|nN>3L?xiA_y zw2K+`k61-U!NGu#%TLtMZYbh%3UF1;v7AZp8Lp}+z*RNJC4iYv2wxiRp8sTgDHi{C zZpHGUM0>3+ zepGnp+Vq{MHi@4U2o9Y7EaK=XBbHUbMNa7-e)@%Zj!C?YKsNempNM}BeAppvg*V4f z`e+Um()f{AA0J9oSJ4RBaxT@xm;Bi$=DXyydodY~WV?aw? z#{n_n96$Y<2S^eR_;KJAomqVxQqBDk0CF@ggULkAzt;eagSzviQP9FQ2E@=+HK$RhszINh6vfJFCCAAQaoiSBC5 z!a0C%jXSyL`RKF!FR(zw0bKI4m^pkPEEm(IkMn1t{Bi)(rLN3TOS6k0!l{(8&d27%UYGfZ{L=q-LtI4DYQOiHrz2M#VMbW zLT$3=poUI#XEiJ`Tto(mNFNdDmO_hD=0fCcY(YlhLNdznA^PK4^FVtRYMx>dAFf|{ zYCe*TYRpzJ>b4YWpS=M3uLtUy6l$0K5!`_5SGbJ6iaf#tUp`_uq*y zoe!yOm>BpNDw13i0ceKnSGuGu0^m@x)9gSpRRk6zusc~4p-n6Q3Lkg3_(SwlSWoII zz*Y)rX)=u#$$806`K)BS!0(%qjCG=v3CT9f%lpZ^0zV9wx1=nA#%HQ>>l=`?te2A6 zt*tRVv8*R>T}#SR$UFeW_axKz$?VqKu)P?GVz*u^B3FpWMI!RIWLlN-2}DlAmgZ2> z8{h->$?Vo*s3~9(Usf_b9^=;IN%UhfyY*0#Q+^=PzhVCkR??E#<;^jNO8FG^@&mjvCV$bn zkusE(L~XN|!O|dhEQj25EJ!D^mczJ%L8p9wklj=nq^DQ-{l%IjK15$3lB=+-I3&3Q zx2%ZdTu9Co$;mGKUSz z3jCOG9>kR^sSLgo;M;|C6FzvK3UKMw*s=^WBA@zVQIzNAr{Xde@nOTDb4po|`isA6 zxw;vU7Nj8pumLz3puqw-9FV)e(PNkI4RC?{Az%k7Z%2TBN?8fxn6X|MUkGDafPQ2K zZx$$@D~##Fm=vJh%;14S`EX$j5=Nf@?O{ermw;2=K^Sd>@p^zNnPESN?Znafb$@_% zC3AkY!oKB@*KY{W&ScK7E1>5R*5f-bKu^Tv7Y60uiDYR+GABUah-5M(NhD7aJ!a0Y zABgj72hmc8vtTnh&DIO)3*2VuaDFY7tG*<2=LlySz7(I$;PC*D7Sek}Z)bCU^&=|~ z@HhPhT`JmG^qHsPdd){agPJ!e?D=PjJ^v(L;yb>FiN0g6Sl9!CjhepO$tu7biyO%) z$0wsPq{5s2Crh02i-^8q9FOJk?Lk)1OcXT4w1yc0&WKvV$QDK_K0wb5e*Ip4!eNOc zcmsnEaXa)qGbB%ONe&o0gz+WrtYn7d=@-JloCOk?v+QI>%529epDv6^!Wiq&x6CLn zb1y>Y8h|+qJlfTvwJDz?T}-MF(rfrcd@4x%2E6}Z^q}v=?!zA9( zQjY62p$pe*e#|7^(Nd1WomNj!B@PWs2a5M{oR%RZe29J|lX!qz z{ug!#TE5?HT9plwZv_dn=G2bBraLaE*H{;7JbNqnP*u}v(trCZqY0z zov!&PUpTlVXQBi1xMVfMH4I3^F^u*zKoTGAmsM2^m-mK!^p%h{<4u0pM=OQ2)JOBf zKAMF!wSaF5M$=&*jS>{*ETfcF&;r0%YUdBB<8Oldb2YJ~?*W4!23i zL>tp@AdfbQ`Iig-LeYZv zD3vhDuW3P>#HiYWDi9P-x&J6yFu-YHyZmEVRJVMGrFI_h(+|*M?{SZXg~nu@n9Y7s zwnktp{j^8aEb=4$E@!52Ci`hO6j)PX{XfMZ5H(r$#s^~VwKBTCY(ihO##U?>^)cMoaFLx z|UEWAr}hhWG7pCr3dYVG&9dY3sF zT6Ak%zzP9i3cn{D-AtGBp>W3e=*Mt$gI!J^;dJxSZi((~MAydUye6C%eDnizGPUUL zcL8?_08{v_;pnj8fHUY4;b02CIUL<7E~k-jFooYD(XpGc>yTx_ZPF%i61GXzwJkpV zoCVM4FO!yKmtE|0vKI3lPHFS`LMUvn=hA?eZ1en&;v0QqEGKKEu!jh{pRjwN(*xVm zmB@8dX>#3^)ml(bSoD!7c>qdq-4xFcA0VDk|#{-7ZC&SbT9QLhXCMISZ6 zm#&@$9UT@M@*kmNd&|du2->$r1hKt^0hbE^_k}fv;5iUPhsAVkkVk|KeYAuH)7T(v zIpJ?ZW+7ELFCl!qWLxNeRDZw2&ARWca3aqLIHK;PZD$28I4Z5{L7P6|@9;q98N0c0 z-EHp__BLT}#L+wcG!L^^xa`Hko-gc~HZ9|?^-QvzGse5@k-`plAj=_bX(qIDQEv$9 zB~jAarZ1HV54r4ngxyNmx8OQ^Wx`c1`(k1LUD!B+vz<+71``^)?7G4(5;k7Mf5p<8 z2|gEf)X!cxDDCVoe)?9K@GUp9yX|el-YD$Ve)>+Cu*7B07k0P<`JPR{>hl?+T=o!Q z_Y>(Je%b|TTQdRQ_~STU5){6;jNHY|kL#2P=*^hjO4!)u5BBvqucJ)3#AW|o*k=m+ z6r3Jn6B@#VGM8N>?C_v=BcvV8gg?1K-EAMV;Dz(UgW7)R8Kf+1ci9_-y;^G63XA&l zw=uaaEO6OqBOpCl*yAl44rzOY<(x6pWuuJ%yN9qlTQq_{W~Q0&vWr3+0n`(MdI)Ff zlnLlS*g~`sU|%ont8lHoGT}UzeWtKa5q4vX-e(i)zyut@VQIXGhjhBIgBFeEkCUOD zLA9539C9nzFR0gn^LI-%`qoFiSP5RlpRvhhuNHQ=1L+MVK~2eI7lk$gN=Ay3A=rGz zO8P(v2DNNSCt0yUzI_JT2>x8`-3+t-h0Y4YEdPyQT<0>b z5XME=s?FmsO%QDa3pNmbDMrw^Dwz){oq=s>%#b$1LL2c{@_kWlgfD!zpq!;qXJXW| zsxhqMtEU=~GXf88hOP9)2=Fg{wMaru6ZX@hV(XJY+)UkF8E@H|wuT zFIeGvmIzlbgySdURJbaCsPq`p$Kr8*7NXFZTfILn_g&$;67RFG_Fdy^>ATi<9qx0u z0Uy4&(U-y*Bxz!u{J%=OibL6^=qj((Q{fc9UPzw`E1Y@)pD?}}$a9jzcEUdSDgEkM zRXkqt6tXvgTqu6mj%&l|cZ*7O_ISl3UGhsHCw%4qx*@!>Lp`hVk5|0%PXf6pdsKN+ zPi3dFOFgTyPo-bUt7if|QSnFo<~l~jr=Hct`+t>Jeu~J)m5(SpRk~uI3DTLcUd8HB z;RO1k?71skuK#a-tATt}e9FGqr^+YwtS-M|RV9aTLOZ$0e zeN$n@S5HM(^;?A%Up*C_pKKqVHHu&1YiftWS7(oEm#TVJRbFsihuB@UTy%aWz>A)m z#)DWzaaDL_kHV?2^1pg2x`s<=hr+36Rq?BIDjf1Sp}h*Hp2{v|r+UWf)$j@JR&o)& z0*|XdiqB6jmsO=N7N^41;Z-`+Q^~2PqN}G$UsZY)zdCvFr5&HBKX_!SO%8r@UfHeUS5MAs{#D0^9{E*;Q}L*0O>zHsy~>`N`Cr+^PgNgG<12+z_Q!^m-_$df z9|_0X#T3Pl_&qVMIy{%#SiP)U*{|$W&saQ%6X9dyM_7JU>4z>!kdjkRj-$GG^mM?k zgz>0!@RNbn>1RIwBKC_|xcyfBrK(39W*J3S{!>py*YT{sD!-WUiTnYc7?>VOpM+KX z>Z$Z99O9RxD!zIuT&%r{u5jw9^zk!fr%`V#PT8&WsAnV|cL1*FRX@!*rC-w*vdc|( zgl^7X_(zJC!l`Gh9T7U`D(7oe`l9J7|CKzR@>BGx^eCKqR)tgftMp6a#Xc2Jv^@%^ z2C4m#wrfq-e7-5*q)k|&&+Un&=MN@(^THFJmRxMX6IM5jEkU;!TuZ{ zcK6|D!Y?r3b5(h$2pOiulD96tJmkTjH~O*39xF+)zuE?TKJ39Jd!(YRz;V(b{JYJ7 zF96;z_+D}N_6B?{Ul3b}HU0Tv^}ekJe4!7=Z!~-x&7M2f$EUZ*hYS6HcS80@wEKLu z4ftZ<*?!08+OOm181N-XFWYa@zte!Pjr39#_`(hG_LqkJFU>fg^a%e<{;w1Ee~#dE zyvhG{!~S;_<1|b7XYzkp*#9nG_y1!C`|DxzGyfc{RmT3t8SwRiH~F)0W4!;dFA;#6;~PtRWyW7=(BH%(zA5sTYXH6} zB8KbvYbp=rz$Xj7X62z7$^*^~vVQSNb>*Qs%0sG#udY0tg01lU!%+))>i?MRKQ#e< z#nKg(lzI6GV(;_WfD59fF+59fL;4>+R?U$B2d{k2wqP2&OFe}|Ew zgX@Juzex4PJ{kSU^|bQc1anHQt#Viz+Z}yyx`ktZCpq!X-9m0 zxeV#$nr_nH!hpwt2&un2Lw>2y{A0>5Yzo909NQ9VPc*zKzpnJ?pQ})BxjlI-RDX4Q zS{m%fM)#2Yu01;5WY0Ar{33(>rt;8I>Iu?Y5gK`G>Ak~X&$ZHiGCtYOB@I8ofWJ;q z8eXf9u7c=)uSfrj>!d*pHEeG4E}#_#{Wk!c&3JiRhxb)lduZyf{{d`{;N$x*lYU&> z;LFwZYvY+c277LTf3SZ&Y>(c5W$lbF&o=|>#r|7>ckPdFZ%y{!>e2pxdbIyGlsEqI zh&A?Sn%~_X!Z{U}#lns4!@p3!ytL=7q};GA@$J89es_o1Z^mD0$S<5lfk9eYweoz2 z0sn80c-%_oNk4Wsd&1x25r3~o{C#16hT2cfKdlY^xnJe4YELx0slR@}BmO~;VL|k{ZD(;|BNdC zsz24rv#I>IR^{2yUYW}OvkG6s`tlt7_^RY^s6C8iug}yTJ|E&YRQ70i)1NPd^64Q1 z-t_+G#Sp$>4c`~Nj;d&|V~FeH-8ZzTy%8Y8da1m#WkMnn(QWVSI{Y zn6AHt!G3&zCY0Wq+4E)?Uo-#wFH~Q`{V8HozncyAyyX#J5re<98vSiz@MYED+s5Ff z`h*f-vcH{2eES%Db@t=jD`Jm~H>$J$9W~xIjOSY${NKSNzGDntx8J0{Qw(0W--PcR zgZF9qWx{vyh<`T*U!DD3WAN44-wk<%qqaV)0x!8a=d||3)ZcYSfAOG(e@Mfd`qLgB z_4o9szn4e-y`di`&wWo-xb07s7+KW=98l-|A`?dj)He}8G;xQ2zspIQR44E1Gz zFX+FW#~Y#k;&N>jY`6hG&?9~j@c;6lf3QdWLwtcK{q;5bw;Jpp8lzvwo6xI z@B<9^kumrN)!;{={9GgYYu2B>?-4&bjIUY$J|>K>nLT6S|C?O@h!=H#ZZ-I49Li5C z4X^vtRDQ-IzpBC~eIH*QCM3XLYQRs7!Rz^D!he8ui+V_@maVGz_+BySp9Fk;7q9h1 zu80Z$A@Iy~4C|pL{ABpQO!H@T{+|N>*VFLT`F|?>Qx)Fi|7i*ECVx(k!B^-186ML+ z(_?yP#n_|!$CTdL3Gk-$&WXWSmtNc%>z0Qa_Am3G{|+sGtLtCpd&DmQ{!S12Kk}%5 zA@WPtulEd+=wD3lR~G}{(gS`8%4b~-ulGkC4ei5Hz2DGV(OvQF z|7W6~@$u^gmm2V&^UhFi%?<5pI)o2E zBKDW`LwtJIpuDku_5P|t6S~xZUyJh76!!D=;ITi`dhR;l%RS)N1E0+oM;r9FHrTTP z_~xuX%YFZ;+cdy{--z_4A-(L;*xw=peiQsz75;z$zZrVHte0)U1XI@Q^>P_9%(ULO zHRMl~UltDzLcgXz#^*1tGJ*fO#K~|~LFIX~0ly97hg{f#5)o&SY5cGqczlPz=hSe# zfg84^Jop{IjLdcWTN~{8TI`242E1v#>>IrQuPVI*4ElG#{v5WSUlGUrOzGVT`&&rq zam$Zx{}h9MTq#o}eyaih9m;3AR^M^YOXRmZK7YSQ`PA`kwDQoxfZv7m;zMMdzwWP0 z{ek^phW%zgc*OsR^#ajuXb&GV*s~kyb@B1-?KlH|k7{q#ct*=LQ+td1Dv>}=ZK(Wf zcvE}(Q+WL-)Sqhjtp zKZFlpl{WU*!l3^E%CndD|5xD8MjC2b-+u!>OE3Rgc{bIrgRuWx){p=AS6zKMg!+=H z>DTMa{f6}Z4*g#Cn;iz;%YL&zV2_vmW=A}(#~p<~{|5g!p>dO*$EN+Ce*%v?w0us5 zwvizuWE$W81w7B+Yl_E}vYznAf#>;YP4%Dfh(C$;;Sts^?Mf)23DS$tkNO_FU0J< z%JGb0zhaR`e6dG-iAOv>Q0&S6QjhpL9`SWO;>#>=`?u>Qz?;Tn^>OR$nIRA1;q#Tr zzw(c1eBJ>43;#^}$xY)Wd?^Vj=Kij&$m#!>#siJezt~maP3Jor<9nfA&UZ9{ezphW zH|db{{g9^jKTUym&)2x^gN`@tcPfWHY=3xssO^U|o$qKSqrC{ejn_MeXOaH{loHEVBMc*Nt5Z%_Kq^ytsC z!v2i+zsa9xd-Ufy3H)g~Z*?y0anCQrm(Rk=`1#u3;GZk?@rE|+Hl4TnJMid}PHjnn)?h*g5 zkUvf9FIhjuxBsm?mWMk$;_vi`|F=i{UBIJFHv*#5^n;vctye)Pd;-{Rkg-(jeaPXJ#Gyy<;-4+H*5kM`hmIiAw{lt=v29`Vlr ze~yRzZS4{NZ0z_`>rcbuPvpM(J?9bsyhr>C9`P@F#J}Vb|FTE?D<1K$qCGi9uV32y zVuhhRzlQvE@uBuX!yhr=@l|b4^Sd`Z;@|X$|DU9l+mrb9zMBpEOYvoJU~7)Ah{0c~ zm1mRwHZl0JYVd7i@V;vB?L6Y!$Kb28|Lqujb@sm_@4MKahViq({~bKyJI3I3`%U}v zJH_C2`%U=HF?gTme-ploNBp}n`0DKM8iTLS{%+y?ayLF#xUUDLdCfBTzq>~~zWN1^ zAb&&a`?~(Jz47DQo*wbN6yBH0g~+GtZ)woqTj6VHPaluz?F;-%_}{|Eb=aQj&QtV* zJuhqc>dsU2_o#n>NBsjm>L27$|6q^$hj`RK6#Ac%5*KPuI1@rYQ+XbS_j^x6Mzudn zwEh3B4dv%OkNDxhKO_2G#k&4+2K^(#`fG+CiT+yGKT@-Qr$PUy82vilWY7Dd^=Myc zV`&dfzv+C$Xy|W^^g6yPl-tA4te@lS`xuY-v1s2O)BIVX*<(6?G0qD5UU2bJAKS^l zu1Tiz7vu5%_C*c9RKvG4*gqi#KdKu1M3m>pS$_@NpAS6ZCnGO_7nKW%lho6(C=k^b{X*Q`mF1p>ee@x z!yYf|vn#^uajyRA);Cu|JMXW~GR;Q}_dk3F|LfyheLqsuU*h|_&pqP5fPY#@c?*>v zK0Xlo-E7dmDvS?}f2zT+4&!U4e+}%fUxhst27A_q@$vSU@aw|(czaCv^(bweUvH@S zyK)Ymru)7&B*2d|*uPQmKIo7CneIE?lmKtizZt%~3HI0Yey1(K^PNgQzx#etuP-YM z_lImn?d3a_!uqxS0H*fyOSEsdYIwcAWEsl;Hk5w$zv;fUc#rvf+kxl$a=*$iZGG8< z|H>o&Ymff?#-l%Xc=YE^kN*4?<=@Nxitjw)zxRmWh4#VA{)8V;{@wO4v_D_VU(@^B zA7PJ|{S3Q-_p%_p)B_7wqw}UT_R}FYV!RkN6WF@h3fwmwbLTev2Rfn8t5b7#~0W zG2#7wZ{soBBi`|dC)nRZ%1@~M(e{H)F^sPQ9`Qks_#}_`WRLh1kN8yJdsg9}od*A; zCD5O>FMfWJ?h&8i(Vk3?_$-h1WP8+~<57REM|_?~d-6Tv3q0CW3-xiXl%LT41g(Bu zYA6qd9`QvU@x>nTB_8p$J>pA&pI3!{9y9o-4)F7n5G1|IPZJ=)XAqyEMo^*8Z|Z|c#Wa*z0C9_?xFcPxHK9NvG`L5;?IzBz{Sb_(#b zqVSOc?p6c-RN#AQczr*xN&jiU_pSne#GwCl;N3ORkR96oXA`~!@Vq9f(p#N9XL!V) z>Cv9EJmSyxXwNw)5B;?C>if@4_5EDnc}-XON5`AmlfQY?|96l2&jWsuW{vS9!!=?Gb;CM|?|<_-j4luLIu8c;kBDy^J?*0RGJ?%Co6` z_y_Rrp0@bc7RK{#!lb|LM`5+dSfL_h`?*!usRq%clHl1-!cl zF@(>Q7v$1N_b}AQJ3Qj=On{$b(Eo3b_`862%X55sP38G+%#Ymi?7lzI+n+4M_~aho zUH$QR)BO5gkNWTPsQ-T8-94c3={4E^0PyY}(0IJb{s-Ytcf3)v`u-o_-SI{|-sJy> zfFIzY{dpL8-b3b7`{%X!si{0a;t~I-M|&Rgh=1IpJx^eKI7sUs_3^D~Jp5z=ylH&? zUyt~w65vhtJe>e{~I3hZ+f)ne;)B~d93_!~zC!}M$)1kLFE8ywC*Zxb51l=hpDrHD&$}M+T|L^< z%_F|MM|*mB)Zf#i{$3vOy*=8~$0NS4M|=7uD9@()(mw&-RQ?Be#1Bk>H`y~N0p4WK z-~@P+{vjUmLlfXl_6$paH`()E0=!B8aF6&A3GgO+Mkc_U>=~5+Z_@w1NBrmnc#}P2 zfY;|c)s3IW0Z*P}i2{Elz7%zs1gTdG;l znjgl8=10}w7XUv;n_pLSaOL=g*H**(pO5@@$ewupI}Puz76Maje0;`dwI#Q!0?we`NN1M%xSpC-VY?sr<2 z0B_R2JOSRMe?=JYp6{;E%3GPi{*?*vEe!b265vhxKTm)+>Hh-x<@#Thhe-Lk)L_r5 z1o-<6_|?E~ucH2%>em|Jzw&@z3;fqn^*3U_$)0s#|J1C!txteAm4^)p@TT&%F#+DB ze^VG=v+}k%0p3&|wj{ut%G=fic$5Aw!}yx{e_H~)$v@i@;7$JjDgoZ4|LZWmX8!*s z0p8@F9SQIz|L;tIH|hU2jIWvhze|8O`RDrtc$5EkCBU2X{}9I4%>O?oz?=NDI|1J0 z|2+xtCjFIYulP(zct3{LUtDTvKYs$=OZ&Vx0sel2{-1&O(mwwJyqETQAMiW0^y>4e z3WGiSf!|pL-ZXwbkU;+t!}#r2kNSV}sQ)0|&*=77XycjThV&i+UiXiVUtz%i?ot0? zkNW>eklq6ZdyaVY=g|cEP5JewNBw_!)PKyQKaYF#=LwJcPkPkvv&p&va|YF(RJWhX z0{*%x@TUDae%rAQX!WszKh_fZW&IjIKDQI#TNv<80=!8-CBU2X2NK{-`hyAZCjCk9 z=RVCpzU+v9O!Yk(`2AJjZ#MWR#iRaIkNVR*>QDEmKLdC#`-L)r_p%=;%MMuYYW}R? zk41%kuNeH9?Gc~j5uXcu*DCB8XRs&FBR=0FzQ7~C7VxVz`}Ow3w4bC9_%&7Fw;Ix0 z1pHPFU)_G2V&K250&m){QsUA6+8*sM^=N+`kM`H~Xn&bU`|EksU*Dtt2B^P(==qf% zsgFs&#rGEtfj{B_-^e4ru}6EFc*Hja-n}EQX8WDWfp_o7i^rSxZ#46WZ|>2aQ#|5N z^=Qv&9`&E@QGW}M_%l4(bEZf9Ssv{<8+i8)#L)X>Z9ki7|IazVyLTYQ<4xt~T#xv_ zd9>&69`WaSwC8+}`Y-UP|3Z)Wi#*zMu}AzR9__gl_+lQi@^yn}BIGhJXCenVFiIb}dS!omQ!7 zS5zvBq?N2SOPi*eW~N1!Mv@X0g%CohM2jK`*+WQ$5JE_{5VH0Dzt36T^StkSpYzW5 zfBmli_4{Af@67d{=bZa|&V9b;KF@a6A@!V1>gh}B=@+7Bn-I@)Li8LI_;W+_)cP&4 z{q0ZcIgivcfYft7sb?UmXAr4paB8|$C!l-{6yiA~H8bVJfbun0;D?g_hmrm-ApKuR z`X8R^e2_WK`<|6|@`ZG#u)i0P{KX`H3CUkd@|ThP<*Dg*M5OqrBZ=j41j&yi`Rw@o zV0=)xv!Ev@K0g?IfxzdI{_{xxqe%awN&jO||C9|VzfyZT7JLPQABXy?VgN6-zgM8X z3gV^qcRciz3-B-L&xfA!0lcJV0`vs&lAek1pF#Ma1ph(2pSyrz@K>q_!?%^;80Rq?!0Uu_oZtE;iUG3D3b_Y3?r*xw+&%I}Hw z`L$&JnMvlK5;Ff>7tb5}E5&CP^vBdE1~0|udgu@0rTEN-|DgIrs()^P{~%tfALhV+ zP< z`P2p`Z?un#1U*lZ`ZtpLH<9|EBK1E_>fcQ2e}>e*h1CBnd3|Op$v;Q(&y)NMB)^U1 zUnH*=y+raall&`WetR_}za13H<7*-Lt-&9O=WDN%`E5I?{|!?Ao234?Nc}s={PQ-+ zzeDoxlKf6Gzr9EDyU;#X3aJ03{@!l1k5vMAslT@e?PCxx_4nQ<^?X3;`H?ienLF)OE)bkao=K%Cn4k$0u_3E$Vd87X(UBCJU z`j0dH8@zP=>Rad!;-%|X2jl%4{X^;c(s%Lx4PLswbSQp*4PJ`R_wl^3zfydDh}UoM zQha`l*KhDre13}eZ|Ile^K-m^gO}oSIDUT(UW(5z@w}m5iqEg{`VC%+&u{Vi4PJ`R zk$C@xekne`$NM*UDL#Lsrl&SG^S{B575YDaB0e#^p1{s^gGrE&4M(zy7ixc^{Ox_>Q|uT+vxBl$8UUzX(4 zNj@Vj`1ywheZ%#>!Pe}k8<51g1*G3{iN zH`=e2f}VP${`#c;2BiLmr2a;v{>G&KCZzt8Nc|^=d<&9qN%F1I zxaZSOA^FxMe=3>Z+Jxk{Dn}FB-?kz7t*^jmk@>A1slPp`zXPeiBdNa=nSVNyd>4{G zjpVzM`K=qtpN{@X)qwh6I-fctt&CO83pgu zssBt;e=kyhZ&H6BQvX?`{S7QHmJleY$ zK1qKgAMM>KrhkK%`Wq9{%9Uwp@MtVo7nAy@kou>R z`F|S8Pbc{+Nqz>&Uq$j)ljZRmlE0SZXOj7?gv<}uk^X0q{;wzf&nErfKpx*YB!45x z&n5YJB!3gh&nNTS%_M&d$uGe5hZX_t#Ui1-ScvN(F}%?pN!M=|;d)4G)4#z>*Kco4 zt6Zj)$s6t0Q9;jbr2fUE{w1XT+e!U*koxZ=^)Ds$-$m-bJ0!nJ*KhA3`DG-3FUj9W z^7oVca`O7s10??-$v;Hqw-q7zO}c*ja7ccWuHQaF=C?;l{VPfRt4RH;N&SzJ`DYEu zKTh&bko;OQzpW$r_2{1*A5i~G=TjTdpRZ~1M*AXNzkL$@`4a+o>3nJ<`aiWy-e`}c z^Qldw{-;R&Pm}sLllq?_^=~2dKTGQ0O6q@(?2kQ9@-LA5Hj;ml$sj4RDPxFx7*45^9HW(1=T0g_1iZ|J#UfwyMy%qHo3p=ko)^Csb?p- zzweR$cai(Mo7~?$q@MT5^7sMCe@OCsN&X{}|Cr=IA?t^IB>ySNe}?ue745rKl45^v z?ZABh14-NWCtkn(JS|P782oI5KP>S3N&jD@rCG}9Z#ds2D)GP1S@3tFo-Y&p`@E$8 zs|5W%FX=x(`u`gIzxsQj+fr=DW#;?-ht{fI#gDEt^$qy{0{D@G68Ue_QmyCh?~%Hm z=K3BR3`yh<#_{I&Y$g7?5WdwFiT)48@#gn<3nnJ=-(!Eb*!oNNebFDlGygCCNAS-C z_?Ny{`4jl90lf6R%AZO8FsbJklK+*|^Bd{^27c ztbF|w!b|1r-#9*5`T7t0%apHU;F*&gaOKM)`4sR>c~2$zG*V9)(tlafe>%x$ zka{vnz8tBie3`P=PP@K!|F4_Teo5bNuTUo4de7vm`|asmp?#?cepdkB>ZZi@s1oVF zGU>kx_Se*J)(`7`Ozh7b2j0}5jIT=a)krvN&hXdzd^@Ws^40M#8cv1k^Cv7p4O!QQ%SxJc&2`9 z3*M}M%=|3n|19uk{bTY{d2EN{Yx+0q2dVyP58m`|@>2cQAtb*^{yT=`H>rN>1l}y~ z$@FvvZ`M~PFV$~d5Kog&7SGeb2jze1cy$HO)NkFuoBNwg&*`N8Ge|w%%cNP~8tp}i z@&5s(|9{t`jAe~dY4}sA{*?-6YSF!k?MqMeCx3VR+t*4;Zu0wk8y`sI&jkN15L}gt zcdzydd@tz#-q3IUzdz~!$Mq)pJ|uq@$)8Q~eM!C__#Xn|BmF|Br*f z|7!X-^8akX|6u6tE{|WH_ip?k9U+5v!w-eF-ad@{rH_rFl3;rjC@Oc7X z5W-9TCx`Ho|3d8VtHk~mk?|=e`6;BHsigmDBtISeYccuF?tHHlP_`e4E=S_2ejr_bs$e-7Oe=C50Mc`+Gck)|ed6E2=gyiQV zg8%El?;!Nd0`J5pQBRW;c{y(^Mes@y{Fa5vl`DFj(W|F@pgqQkP3rKz;_)SK9lAZr90{>JD@7F(4|NmC- za}3@*Kb7Kn8~7UocRC_n8%RA*hSZ-@eZDb-m)e(2B>z+hFX?%j(QhW3&$-hSOuao?Cl7EBb-vrO(hqp-oJ4pU*l7ENf-zE8-B>x`D8^H=c9 z_0Qjs-&Zt$cyK6$I6m?I;kgvhdF zey-~0|Eq=T;f+bY3CW*C@+XsgQ<85+^36%U1xG09&-@|S{NYP1*T^;PNo?lSOq1@KaPaXHD4AoYwS z{b!T@b4Wgy)RRa0A4U2fP4Z(%J!47#<4FHkkoHcpa$roXNUkr$+R6i7he<^^M>Ypjlvmijf6rZW! z7Y6Xs{!SzPPbc{+Nj)=2{wh+>)g*rn$zMzApGop1B!3<9+bkl#%>sXY059d|>q&k# zspkfgpF`@ok@P>8^goZ}ZzA=~C;6L6J-3wA-`&Iebjtpmp)#M(uW~r?dEf<6-hJ=U zc;7+d7l!b?_aypX6y+23v=Q{&8p2C@Zj17z)4w=`m-H`*@}<*%dk8P-zaz?*PXC=D zyrh3=lrNqByFz$L|J_l(bo%cJ;U)dcqI~J}-;4Uz$^Xv#yk`EF+TZ&^_`SmWYxjrn zlKxWfm$@GDE_mj8$WAa^m-Orj;UzuqhwzgB4@myQ5MI)=H-wk;d=$b< z{y!%9PeOP}&%O{|((`EuFZutBp4G@{9%&+h2(!F`QJ$X2+99W@_&&0pCP>T zJj2ltUV1*^FOvT|gqQUE6T(Y+{te+J|NoKvu@GL;69FIdJ&+{NFRAp{@|EoQ4U6Pc zz%zPMN&ji2|1uZuI=5wra=`t#EB##KoE zIFhd#uiwxkoex$E;idDt>LgzygqQRjAHqv|YKHKV|5_w}LI^MEsU5;gdg_GmlK;9S ze_{wP>8TgOOM2>u@RI)qB;PQEm-I9O|D1il&%NJa^yg;_{mI7QpO4|4^Add)utwmU zq^DV580Edh@IO+xUUw4sFJpM$zjQtSK;K)Q}99k)wD+vuMadM`R1ga z7Nq}{r2keVe+sFmHJN`-CG$@k3;y}|9sN_K={`~n)khKd(Ii{{4MeN(jbx_ zO!7lWekjQgBl!!`)9qkL@li(-<8vX&4=4GHpg$%*EC2fBHX(mr9G@Qy{;|1)IXg9_~C;8ob>cSey+ghLQlH@ z|C0VZ=xHCoOL|5@PY^HZ84dq!3IAi@KZuw7k0p=SIP!R1K_0L1UWN&l~mCHle%}V}CoF`P1MJ3j7r8ZxCN=Wn%p>mCQfW$ow;% z%s*Gg^Tz&4@tFbr9fYr=iKZuv=hil}^GGpXkmQqKZX&q7kqB2v$-q@LSIJ&Q>_OGrJpqrcrdAV2IE zj_)1lug36ZeJk{z??ivKkLlmwrT+HP^vqb^$j`M_CALR*k^1i@_1{D4Uq3mQ+Uw)9}A0qh`B>ynUKSJ`4lIOE4Nq!Z{uO{=`W7yxI@;*{1 zk848m+aiH~oXl@ekown>`qz>A*OU4;koo6HlHW-3n@IjCGQT}d@|)2MQ8kPwM}I)c+-^|0`1e0aE|hr2cP6{oj)M50d?{?@0a-$$wAsKal*7B>xlH zpZuBR50m^aWPbY<`x{i=mkQeXq^0iO+jfAo+?UUy0-^lYAADKQ1Hq z`e%bb6XR2r~x!O}d}hh2&2o`K~12jpR=!`7==e2ldCK=Wn}{d=HZEN#?gR zvA;p(T{{2m6_Vei^WWZNe(OW(Ka13VHmScaslOkYf6gKKb4k8G$)88&w*e%7eny7s zVYU}W{XbWz{|BP|jp2>=f8t9%Ej`Ap6!C3A*BAHr2b)~{tHO` z7n1sillm_r^xUU6e-+7Jol(XrgZ_Y}QtkJx4$QZ^>-K2-{>1Bn*JPxrG=raQ z@P`HdTGIba__v(?iF4ji;=dnP@OPq~k_7)gFX_K7LBG#S`e%{;uSb43W`Cc-d0(~6 zeBb}jT8ZcHv%yCK_>qGW`5T~ToALbzWqdC}`hNs-z&~v8=KuGRzK?e!$mXZGNCH>z=^7oT^ zmS>brS#QVR`96>N|B`Awo>+fA5W@Er_y?h9m3e%t`}O&Bfqw}6V~!rXJuNZ#o>k!21n6n-MB?#U4gQG$ zUh@B#&nM^~DEMCkejVZeai1@h|0fX7uN?n2Z+_otw($L>wcroL@P7R-eLrelMw<1t z%{!dwzlZSssP*8#3E-vg|7;-rKbhd)*Dv|sNXBOq8K0*}|4)!hCTB>x7f=S}d;_xIic&wPJx2g(cc{k^xrGvD8Pht&TrsedP#|KB6| zUEntojuisFFOnrju8`(HsR!KQ~%{1q6 zIC~B0`o=!ww;Y4dGkEFx$)}|M&q)8Dlm7RU{=Xpoe@XiP3jU|+c&hSt|36v$55WI4 zgHIO!uSx&kkp90V{U0R#e@FU1MEd_eL;rt9{eE|1c_|V4|39EVZ-Guxv+OcbQo&B_ z(ISEW5qyfx-+~pt3UcZQX`&w1^!4hp6>r&Hh8rzaesf0#?w9DG59tD z{|DM@9Z%(cAJ5c3R^b1Hp0eOmyzi+R{-ykI6nwhPC+e5-!(Y(fHNd}=AO42_tN>op z^AGs80lcK=-;7j5CQa=Z_ILYdiPziz!|`p7`bs*!bA@;wgPs-v{5pY;fM?>bGQIdG z<_BqitxPWdDVfffBAjz_vvetSP>4@zW~!=;cuM(gzp%e);C1}B3Heisf0<0(tiyjv zd0UK`pND^*c)ZGze0rviqwK%0;6EcX&8nh#A^#i|_BRvpJWlf|iN~wf`o!{H4t&)B zzO%rW&n%-32k^rMz5@LJ62MD(DuVwtfS2@Cf}Y<3cu7y?Ox=9xc-q}JWgg$UI}?v@ z74LWn{A__g4*4y}{|{AbR>8uUp=xIA|1&c6lnC*xM)K83z6Qx3Px3WMzE(7z?)Tx1 z_M$+zA9_OccqQ^u`KnFwbx6Lhr(dW~B>g9nd_C09OnqKITA!D$J#9eN=M72yjYz&R z;y(jDGO7GZ<*|uEd%rrhb1)*UyRNx8RG!`nfg9pGxv=NWLw}XOVn6 zl5bD)9Z0?-$#)|8&LrQ33e!o|G6L9 z_p=QBw;29wZAk3@pOcxMI?{;$LWA!s@aK|zf5d-;qsPwArvDPb|9Oa~(;g+Zck2Xx zK(u{HF59umhV%g;mO@@KOAJS>h+mY**m^YevdejZNd z=Zm2KQX@Z``lb5hV#M<@qr4=G|0QvJviM&LJ%bGY$>M)m9B=BE>XXY6f7gGh>XQ+O z|6sd>IrXhspG)=0NW_1Lp+8xDl1;`xCoZ1J;-4GGCyRexTs)J-e^eZwEdHa(_>Upu zKbDODIOxBG$p2R$o);O%H(C70$MMPHpAS9fX#d-s>)XlVKOv4!7XOKezv(|&dpZf% zAI{bJe;aNg#SMFf_N5@CzLMJO$v9s9jr~nlUJ9Y#xgX)g$CPwf&{Kr?IDBIHt@31I z`(6w^%=PUl;F;^&Q%V2RNdMDAuCJ;CiT$N3Nq$DAet%Z?ClmV*orQQ_Me3S2XZ^}%uPPY34iS^-9fuHZ{Nl8nwU%)WWPge^3&9)xs7kKIZ;4P@1 zo7#F3^-J~h0_Z?{Wh`wyq)Cl@ajY1dYx2%-bwOHqxTCEuLrIZ?hoFD_~_ClT|boa+ufx9 zd+h#z?Ek2cf0mK_y~v*(?e-$^`h2ZTiS@~S;F#e{uj!lRDPF(?+{R4dI)+R zK>hhw06$jX9|X_bzjz3|^E^Oedn(B92d{XD#kG z1?iFQ$E_pvuaDD{O#g;BKAHX}N&Op1{hLtUTLtu2rTXWoD4$qAOZ?L$znSEpA^9yN z|18OGjq-{0l@y=nqI~J%`8>(LfcBAjo@5)zzew^gk^IZv{bHfNA@v7dA^BHH{xy<+ z9rZJFKE541b3XnC@zsRnE$2r^c}RPe+T%Np7(nf``g;C-xB>x<#8v;zen=BqVbt!pS_spQ&N0(N8^*o zOYzx*_%P4Mz7PM-^M{G?87my$53s+ziT2_{@XZ3^zj;^U`N&@IZ3B4eczs0jACvqi zB)^a3KPCCkLh2u>Kl3?`@96=@OS&GlpX9#)-y`67N&dei`L8lFQhEjGUn=C!1Grx8 z@QL|Z((^URe?#)$lKerE|BmDjk;nIYlK+9^e>MQ4ZMWTPHe*QDcC;FH8qu{#+9AAn5i{$?X-zlIz zk^KKd^8X?~oN4<{ygnes|3A|IF_Mpvyeh}V#{%D(I9@3vpIXj)zB)1gNbOx3$(JGd zvLv5gu594>^ED(k`SK)Rp&a)-XGM~)MDmqMz6!}7 zNAgukz8c9_C;1vAe>}<8B>7q-e*($ZCiyxfU$>mTcB_BR^;PryPI^A=M3S$E@@V@P z?(eh_>WBIy-vITMqu+Vo*3>^-@ZXU1--z7b#^9azf1U5?nR=w>W1B#a^ZsulFFn6{ z66ybB(tlI%g+}{mUhkEjPiuzwPqKL@|LicH&zvj7zj++LCJEmHd;$EYsb2Q=2XnMp z@ZYjr88zO}V_x5u^tX!RlkM**C|_d@|1tLq1U;?GrCU>M-f17r=SypCPHZ1fCHXec zQ|#!`g;<&XrRP`MdgVnZj{^lgS!8)>=lOTve>d~LR36)t{yUKQvtwL-OO`)7fuC#~ z-(>l-Gk7L{c8SYx$@1rEaeT7;*_F(n-4M?rBY!6Ie|lVe%y^av`R9x{-i)WjcPHc7 z1M$f>j+Yrv>3P(ialE;|5`QM*d4=KMEbr3u>b>0l_E`6MCo}&`{r}#Gf6(=9>3#n` z;GOsVo$IS+eJH)}e-`4&yzhTDc=viZ+Eo?v{;Bl7e_vO>bpKa+-@hOHJJ-(>&&Q?r z{m(&t=v*I8Pe>utvbH8Rp zIZO4|$G0RiA%9BmzmJ3-=KfALc;+Y)>&1d^Xf@{>ru0DRE%gi?GaqdW%j((x?>AN0J9bUm;r zM89-Buoyg3UrizTsU$y*Y~QCx`NZ~G>OWi=HtKDIz(0{lV{xvW450`$Oct3q6{HNOfz4-Y4)ia6b%OxRv8-c$rsweSyjTHD< z;I$sxFXhjh?fz#vn>X% z&nynaqV`;g;D3qhf4`Mj9wq;`BcA$uS=9yq`-SK2??8Sohgjyj@ibTKE1bXF>FUXM zpWie2rGoyYE??U3cim;jCk^p&_Sf_;eedgTSN}F4|Ev@A+(Yuqa6a;@j*W1Bv`z4T zZAJyvfA&V>GC>2u|0?k31@L_Zes!Gxxk>y#7U$pOCI4&6rC1GdgpFQCG{Y4L zdL9Sgh~S@qo}qewOLQJp%8ZYsXKfsxO#eFY=NR#vmn1&x(O#SUO$INu*Bi>ET4(Ds zh7#fZwQmyh=abMgAV80_zZ-r1Iy}nMQzFEFliT0G6T42w#$L-AtrYmD;9vLOr1u}U z3;fe$e`7P*-*|@fzlHSwEcRDNBTb!YM8=ssEbQ-A7YP|1#Q(Jmfd;``aeJPT*ewKMK6`{UV8f75sGVU%0;}@vkBNPG8cC zj~{=jJ$;?zx0C!EWPW}V$2aJFP&&SE!M}5U>g}(eAEf^Ij?%Q(#;d+>qkK)X<1gH= z7mn{cxPChxy!U*CS^rDzYc79kzs;va`_E=P zeafHu0{!81oj+5&{=dIWst><}{{A>#rM(~V6&e2nB){xG3w3|+pHdA%mo^Dlim%HH zOO$%)pV)2TzZAtOz`6dhc;@67U$MTGT4V6$GNoEX*Yl^9I_7Fn>Q8suID8Rjn@aVL zPF1wm_SRG?-(I5BJoR3r4Sp5Wm1?eQo2zz3PQ}l(-Kcg)TH|Nh=BPc9Q}8owH>md` zt?)B#v(*QYmiU>r>(z&m7WkRAS!!>jIewk9*qVeG#3$5INJHD{rUIM|#2s0Z?lQf`3+`M6JKY-T<1W+d zKkv?cW~a|a&T^OO^`CR+D%$Cm$l2~Pz5Z5r?sGeRCeqhkrq_SgovUQ0nn_vlZ+7Rtu+vSE{_Zlp{?qPU6+7J+InQ0D*MG{L z`_fLIj0|v>>Ge0cbH~}~hRFHuGQIvrckU}YT^||fF4OBj>CV-_@l)ytdt41iPpRtm zxQ0DG-X7Pq$F=P73HJDLjCH=S?0ku{H?=d;7C$f7Doj^&OPxXq^rv?j)4Pr7J;wC= z#`Fiq^oPduUSs+rWBOxbdNWk+v~#ah{uQEZL5l9 zmM39xN||Tb0E<)BJj;4moD%0**1_VGJI}Hf7N_)imM36w%AjX)_COz*Pa<>Ncwejb zMNY!+1~m`7QcLaeUH15Hdwh>QUS^N)wZ}RdsI%?y*Y@}ud;G0EK4_1>v&V<*@%Q%l zust@*z3$U`rCv|#GOtsQ(dTdu;Y?|}p3>H=h&ScXj&*=-^*y9UJvdW6>@wlxbSK|d zu!$aaIdV#elh-S{cE3b9YgKaXekpX;s_feRlIxU1=gg^xT`xGL-6^}rxx8OPIOV;n zYxipvXRT_k-LHw9dZoH+_iHbwE~??${Tk1yzm9k9x(0P>NvF=M>DqN1FLS4LDPNC> zdh@zQ*kH{2rGI_IWBl4;gKN7VYbf=&J@$^QuG#ffQSa#MX{W|lxNfM_&FUSLK3(f8 zb*=g|QWia%e`fx<&xSC2Y_3UTacAXzqA72Ae|x!E z2frIJ>5A@too91q9ltALu2tFP2H2e0hP)p!*E-JS&bK+UjoKSA*Q(}n18vT113!tF zYt?YMK{jW$v7bfEwQ9QDV4E{r?JpwcS|_;N5O9~^mxEs}etGzf!f!NwWAGb`-{pv- zg?l&WsvDzoHPkuwKQVWf9k2SKzvhuO@m2hrqI0#>*=l}t?gVufa+*ihMn1a5p40i< zY2(&K>Zsmofo;_}+i5YKws8mg77Ohqx@C3RV5hZx7d?_i_7Yu7IIX(Ve(yqVz13c# zTYsmY;PeRIkMywr|I+0~&%^0s?2U9+i|uuE8F%_JPOs&Y$Qf#hy~OViIsKr|BB!g{ zvE*L-GVO?2>T9e$9oyF>rZ|GTiLU({*r&PnDX^!z_HSXI>e{EmUe>iAgnf!@p9Xsw z*Zv*s#jbri>}jt35bQ;+{Yuz>i98&U%H(|5rLw&zqDxm@l(8$|rJj1w9yi1P$eH1K zlS;DoCe;YLU2i9a+E9BB+qbvftu$7-##T->wqn+bL3OF#N@@0Whr6{VD#zGb8)Iu`-5OLQ z>#db$-`{e#c9P0Aw$|3znpr~!)$4j|rHR3t?$%CLBaN+P8Cx^!`JmQ8Z>=;jc*EUV zQ#HcaT03KFrD+e7Wii{IM_`@cax=mGcTD#*op#1K4|djB1#7;`m4N&AnC@XZt&Z6?t$}sC%UuUf zx5_E1JI+R}m3XV5tZa(Jp+Rpi>!jyBysa((A8J}|f=y^>! z>2s0BO1H^tpnD$_*Tprld075@fW2o7<*YTwUZaV9+?{g>^L(Y88lfJF=$}$e?5hUO z`Gk4yQa*->KO=FxRfu8yYZ0cmiZM+5nTn&WQVbJ+*5Y`p9K*z)!8qqVRbrU;WXAF3L|#GmCj=SJ0H80S3lk{FwJ=97Z{s2OkX3@F}io(aV-GtY?Pm-*LS zoU_sOk=X6|*KC}#(hV`obuYLl%EtiY<%f|UbYEAOocB<2oGTXRV984KENd&XBwT9K zJy8~Q$@0sfW7lQPnR5Ky==#!EWwgHr$nmDfN3dnjm$wyd(|v8NW*5{rS4;X~?N#W_ z#`?Gf+tVK2vugxjtz&n*%ym8JYp~i&LufGlJc_uOUObJSUHA00mfg_eT&L*^FRRgi zj`ejhqM^0yviH>2>ewAG^IQ-5+OPJ~2wF@(Dq_#) zjq`RPjvSsHB>A4$OC2MB?;JnQR&REs(#d zJ*oAPPH|fwf@7r6MsR#jT&>o}&5xhs=PB3EB6pkmu9n_zbL_9V<+Txq_jHz)_qOZB z&-3$4q#=54dVfE`{yNvr_3@6~@?faaJ-^bkJrO^By!^O1J~q2PZgsb%@6_p5wFUOG zqp{U>5r_Bmm6rFm>&4CUQ`J3hQMfPFNS$u)@jmRab00>RXq~t8-Izf*HVS6|rJ;Hv zYR=<(~-Ms4eOxZl1bwgJALk2FC%d*W(!Pbkoq7ZNPa z70R!1mbeMaow;w+m64~SbKl|`-P6&zgSaxcIXd?puE#wSojZi9a9g5t-{acbv-X_s zyXyMs2V8aAYFl*g)VXHqoQ*ES+3Iul65aoFu30+gp!ee(^?59L*dAY=aPIja&P`vi z*Ni{gd>Ch=+w3Lr=a(yS{`sQ4B>s%@F`RL}WG{(7hkOF(m@i|=G5jL<;bBC3_;)7G zoN-L`_&7Y@=S=Cl2hO~n%Cw(D(MvsRimGNm?e8%)a5n1rNyXjehG^@3GSQ;f4YF!a zo`;IIvPxjn;Ik3>Vohw%+U{u8o)VYq{n6`G#4>X|Z>_LpjyA0#czu(piC%g9b`teD z{xln_Cl&W*8mgAjXeG|;HRBn_Rv+gL-nx3dbj+O$U#F;cG5%z`<0EXD_Md!t9h6LS zyX6rL9sl^FQUOogJ==K9=i@iU;6BIv4E)L%w#yL6xv+IGY`bB*1GbxSqu2A-2bK5=*j_Pg zYhl{}TXn|II4V%8tcL=s7hOIW{kK#Adur0-W zS!mSXobmjX$9#4C4jQ(3m~VvNd4>&@x}AUY^RZZ@vT=M1U0Yehw$HUSH*5pkzacw{1nMiNDOj$rx;{EmS4Y*QfWP1p_@wu7*J1KWPXb{Tx9 z!2W?@dj+<7uADq4O^Rdl&EhV6RSw#Cikn)}bSJz?0ca`$(RvFTy8Bzx zu)Xct(hOU7cYn_@Y^z*bd&Ab!-H!)QcY6D=(Y4JtY&~4fpBT2yuI**RcB-qnrD0p; z+Nv40>aON7hHaK>JKwM!c5P3hF81_Y?(WAuhV6V;^UsEDk8AtTu;sa$yBfB=uC1|Q z8{%r7X4u|vZ6gd@XIJ0LZk?@_zYztglnqm9bwY4y8SHQpCMmF++r%yjkrEPN!+YheoTeP(v_YZ7cQO{ir zb|t2DzMY2sXbRgUNIlyxn63uf0x+Jf5c?4UZ`!_w?fW>}#nAj=ob5%}UXHVM#Qc-4 zZ8)~G4%@pI^N+f={;-`1;SG@96K7ii+rl_oduYBs&bAJ=X>qou&^#Wt;aKKv;{n)4 zz%~>=&-Oi*=VSZ2Y<`SsEK>R4I>gyNhwWH;EH@PU`+A)1S=biE+1kMAWv)$^&no!c z30rH|HXZZ2LaB`;)wP`mn=a$`!L|yL@E-342Z zIGa9ZRj~oR4gK5J2W1)lDq~+f+pmrAwgqfe<80rzJ< z^@r~F9RybZ&a?HxvRUACUi56bU-|{OD-2GTpR2&(wWlbz1JfUa%QLvMF3i>91(|vW1y92fz;D#GqMjD54>&Y@|YQCTL9ai$luKkPPZ3d$JwrhZ8vP|am+n`M__v&{yEPDOgJYdeJLS;z;k zAiWi~9B|sU3tVq(>qghMAGWsOwz{@D;5=J9=+m5Od)&3Xk7ML+x8=A|3IAzS*$pI?uBWz;>6zrsM6| zT41|+pLHAQ*{Z>ZK3DnDu$`pW!|x-*ru)fN@Y8(&Z`}&mMg)1_b=WYe8bH%QOk`pH zdHnS7QX}X>+5z*rPpSuPyA=LAV}6ZcI~UWsFZ+;T)7Kcz!2I2YO@Gp%C+3%7d!E1X znCK1L3d1%M^Gz|o89!ae&4#bO_*KJve=PHCx(I(+2Iqe%KyzZN;!1MtiB-_4Y{hcE@7+R~*Zs*haHBTQ4lH8)s_@0g5KIVunDOpH}m;G0zwJy%MVL?V%y$*zs3V0#0x#lhI-P;B)A zY*3oyb2m&N zWX?5x^qWjxfv<%7I#ukuar&;9z5}7}a_M^u`Yr>`CbvbpCu8d2Svq|$%e%uDe@_TL zBDm3=ur0N%yIQRtQ)|@YxOTTzt@1?2r|9UIT>N5t{$HOFmm?6N+jz-YKTree486WM zY;)EXm!X~x^ocSuNfoHcs!$cFVl_ogRnydTyxTcLU4?^x4c_#csY=v!YL>cQ%~m(4 zIe5osF5cz23GeLO9J@K4m+L3@7qe1HWMt&S?g>ZCHQ&gd0)QRS`ER0aIc>xx!4RmnPCRkqGRue!TB&g!A6T0NuA{*CCo zEDeVv=}EWYtfy-bc{q%|MwW3;t+VQ)PE%d+JjCgE5~e$fi>JiV5x!5}HbSK+qyK1L;Pwi5>(GPzg*Zx0L zd(}tkWA%yJr#@Amsn6AZ^@aLUeTDD%eXYJx->QS^yEtFQtgcgVB$N3y)`*=%y_RfW zV^{FzB9Zlo&nCp?8N}r|#N=M0PWxx)l-KhG@Qm zNNz_IcX*pfNQVs@hU=Q)dJ|T14Uw5)F$!7>b#~>j?aAv!x(97b=3(6yNsV78nC+er z1DTB^#98Ks4U0dPGPinlEGJVq#w}uF7R1er(-*`It6|0_h*H}s$HYbEx<|{3yd2hT zsVMzU-jCsB&YlB*VBz9MToL*1#Cs=IJ!IB0*WS=Fr?*6~(NtCn?wRokj#)wNEv>RI)z271?$49WLD z!NGoXMq-Ae&>eJ)VnAmmaeXU({e-pl$M4E?9JAP1C9YE+s$yf4z$6%srpK<600@To z{TGk~{hj0VOY2mQ$(ISl{&-90nEj1knmB*dh+2_gxJ}Mv9krSv)Qeu z#I74NJ27@?fiYV+ZX!19Y_w=p{+Lnj^n`H}Cr;xw&-mDulZ`O^OJ?qBIo_1qqPrEU5rk$8YdBiL_jhIEJ5i@%less*dX&p0f zTF1(1Tg8V$>{ZYlECL*1ZH`-lqthYWU3i8KgDt)x_LE;D@$-ytTpa@54 z(o~Gc<`=^@z5rvmXg$qc*-LXVf$dryTQ#+)7^A{bQ`=?PJ7ecOjF9o{z0z!!o%igl zr$>cZ_PSYmU5xVc+S&8%^gKrJ)v>)cV^mb!UKLF*&d$N5@F{MJd3RPPMPrMm*t1?b z*_khL&F^zOZ zmBySzZw5wZ#vb?YcUGUF{m$z%qR)Wd>davSdiU?6dY^grhzol5AJ#`jmkt@)bMR0# zH9IF8QPsZ*7#9`mv7H}vd?t*`))CT)eSlTbgbDdDO|-}MX!HckkDjQ&)DaOyvRDae+P7M-&W3_Acb<5~wGOkW9^hTneZLQi4 ztu~_*P&%2KojYa}LST>bF;zG!w?G>v=y74u>k*uewGZT2 z`#_GxA;Wxe{#ZbHh>=bUoe8MYi!jm$W2`Eef)O$}#)!JTzFvfY+8%Ub2h?6k&*h9B zr)w-62YXaF4wVkBuIuUxe)V|DgzW5m9X>rquyg_$$WuV<(_ED2nF@AQ5 zFVvMPa(kgJs2FJxfHAUn71N_86{B5s!tSbYI?UH1tsCbHX#?v#XLG0-wB)W)TtVdWn)?SJ+ri+ji=8VHadz726v5CcM zEcQn)$E+5h-C?r0$k|%4ljPeeY9PJRRClbUI*X_21XT@|j+%~<&WBTUsZm9FdHKAC1W%i7O0HQKJHr>oI+MLiw4dqOtGdW5D6W3(uGJXwzsA57|z zUM(-jSxu*?9#O;5MQikQ#CuSnaoz-58U!m@UH z9-N-e0*CoL{EBo6*KO2?ytP%mJB-khw4tqvih-cEYH-^&BL*YQ!*4Qv zMfkY_umL@db!}l<6gd2fwB6Y_1n6lnwkRzK41PuU4Qhi;sKI&z@DKO4IK2%OjTM|{ z8B*<8CstY>e&`_KHyOVoP5SY|CiT3Q)kaIR=ks7I(spgK$J(w2!4n+8PnNAgLuz!v zxQV*g6%7qG-VPqIM%yfV8b{1iffQXc{3hd9WOMpx*+Ib;u%4s9p3cL3k+#8=Glsu* zPCOuF#GrO+aMp;ya0q|xR3T_!`p84Jz3uj<4%pgb7wl+g9r(3}miExn9$GZ3bA#Sx zY$przz7~5v54Iw0*T{(8dIfu;ou0O114$zW!6|&US9q?c2n_bkjs>W;9k7Z{b&lGdss6wou`Dk3Q{f?#)JN&hD7ksm`ihu+{Ctt9*eZ4XM6xpMeyQ8L28r@hG3Fg;c*^+yE&a zM_M!F1X52X58vA#$LA{~hIBBaBSUW{}k()+=UMXKMaDL}d$^V5+& zfV2c@5$to3>SqTRA)StNDbm}JE=T$p(v?WxN4gH_&qy~T)z7$WLyE*6;7+Y;=IlqS z7Uiqx2YA*rFJDDJ4%Zp;cvMjJK#Io*RbQldtVRt)s`vL&q?t(bkm3nCm5)^KcL7p7 znx%@7Rz$i4{_u#GnvQvW=k02wT0jZX!^k$^hkuE}tM>EtCr22R) zMXHa-GNiSTE=PI-(iKScyD2M?)6QlHbc4}X>+6pkmB(Zbr5Mwq(2~Sh4e7e zQ;;4(+8XImq^BZPEz4JFgCqlKTclNx;?-DH3u!x~b&=`=~$#^Bh5$J7ij^~en^Xvo`ZBc(sPksjkG_~5~SxLosCrI z_qj;VM|v~Tfk+o29fWiV(!ofVA{~Ns8PcIhmm?j9bOq82kgh~}A<{KSha+8wRF|8L zNOif{j8vDGtw=9Lx((@NNMAvEInwP&M z(gR3GBRz<84ALKvjzxMH={TfEkY0iGDAMsr|3#XQRJAH!Wdf2kq!W>5Ak}6){9n2I zzj8Me`Q(4H+*LsS(&erSQeEz}N>;ceP^6g_~V&!^~tcx%cW!gUX#=)n{{ zgrYCNyI#qLXpzTzZwx)0qA$WbXUT@e3zxab#rAg|!g-&*1n;sX8+`iG=%?_4*YWAg z>@ThalX{6yUykntCL4Tu1m3GlHuxoNq-u~1R*SFmkZioKmu&Fq9MvcpZ2w0l2g;@B zJc=Gg(WCLr(PYD|_j<&b($F4^w@LN&m+)axbn!U*%cs#7K zejj2cMVC8qIGgRlE~ik?l;H&FB(e1A9D;J4g2QuJJko=4F);j6*P24DAl zioThmZ=vV~_||cr4V&>pNFr09q6u6?q> zKRPSyFOr1wKK-!j5sqT1Pd^fU-#>UIpMDhIQS{T4lFbyo1=op^4ZiMY zDSE5wm&~=1zn`P%=PCLHirz-iFH-bN6#WLi8WeT1TaxBsVoxN=|j9~Av3 zMIWW;6iYv`6F&GyC)Lu=^@I;Too4B$f5L~w3ue1O8Hz4z=_iW92S48F6rEv>40nvB z{x#}MiY{kmC-)sC%3C?n>7WUJFDqENK~U80(-kfKtX25n)0Hg!L{|9V)0Hj#+*bJD z(^V|}bXWM`)5lr*8L;rdr>k1y!ci>s>1x&$1YO-4pPY`~pRZx%C#R!VhL5)<5OhsT zKj#)c_~o*eH7Oj$Qa>gqP;_mpfbd<1qU&0d3EwADbUmw(@Livxn_5ML?`9O;oT6J$ zbW5u^=J_D$9*mz)T2b^V6y2JlPqn5H@or;HCFr)+G=k2urV|ltN73ymx&uXbw609P zm(iN7lQknb9VI$jS4F3TCj7dpi*8CLWrb}8uY)~rCvTj2ZdVO{ST15=;wX?gvzK-hT3?@a54V9Z_O z`|f4UA?V)LjY(<$$oH}467*S?^E6c0D!%Wtt?0u%fh+iQUu%9K zKG1taEMy)3D*qsBNeJgG^6A0&MqskR*F6MZ6HGSv^iX_vFxlWM8D=d_1}mSw5Z^LP zHu%SKIKFzAZ1Cxe&?%UuNAe z5~k6oFSnMP3&d%k9$`Ho5~k6=J~+~P&|Dx+`@XZSheX0O`o43l73KnQ+V`DnJuDKY z(WmpQN6ZD{v`>$+9u*1G=+mREmF5C*+NZ}@t3<*y`lVs4wc1=DPW$vY>oJiqjlPoc z)*5qxIPLq+w;mS>)9BL^tS8I`;6IPLFcfwf*FOruXvwlq*g{Y4m*;Q}h&Tqv=MR_I*#K=xNp_(VuDbeNU(8E3KzYH{!JKdxrJ2 zNSH>SzRKEcE)b`E`fBSLkuZ%ueT}unTp&*S^tINrB4HZ+R&ORnmr(R|6g|t@YDyHR zecjhn^lXa0fuiSF&xu-1qknX6r0BU6J&&SqvYt03iqpRC`4oLKMc+cv3#=DJt)|g` zHgTb~&0HW(`}88~MUgO#K7Fh8lDR;f_UYTKmqo%f`t)M!6?1_&?bA!FS4F}!`t!Lr?==)wu(RW$fO*i7S@B40wzQ=k)^k*7<-^(caKI=`>jX3T5 zzMrC(TW^W}Or!7n0g8T*q93B@6%_p_MX$7Wn7c1d`#ro>rJA~Pp`G!i&}#we0rU=D+r3(eR@4bZ=jSsNzqSP zyQBJoCVbsbQ}kwQPms%~-S_+62g`Y>qcqISO@`}+NkELvr@ylf#VnC#eEN{}y#&PAeENIq zhnOYOj8Fey{U`x3HlO~{`YC3KG~?G0KUqIZK#a|&4_k+0mPj)`{j2qh1jN{U-M?AC z#w?L$eENv>n*_wzeEN6mNX!yx#;54&B#J(nqMK55Gm36b(Jd&tWlBcUJh&+O zs7I^P(9!1ilv2=sd)bX7?nlw*lp=zD{yvwY`&0CJ6g_~V&!^~t6g`Nd2UGMAiXNI$naJP6DEfjF z`xE=ovLsFT`Q$>19!}90QS`+WeF;ThnsOWw@5?Cq@|3EC?-3L|GNl^fJDZ|&QmPZa zb16EHqDN8mXo?;~(PJrk97SJ2(c>vPpQ0yF^hAoDM9~ElJ(;2lQ)&?TyNIHTDS8S; zPo?N-6g{1yucYW16nzy%Uro{1Q1rDFJ(HqKDEc~zo<-5uQ}k?#zJa3WQ1p!yJvZfe zqRh{u=$j~dK1JV5(YH|a0*YQp(TgbhR*Jrjq8C&25{kZ^qVJ&SJ1Kf8Mc+lycT@B| z6upe1@1^MbQfd-q{(g#HPSFog^n(=r5Jj(`=!Yr#k(63QydS0Ll@z^-qE}P&V-&q6 zH#HD0)juotT)U3m1O; ztL4w8)FtSxrJ*6to}jZ%RW)WAbfj=xDqTmWGb{{;o8&Pt^CJlqN}!j=z`RQ}hp|p#6IAM~eQb z6nhzMiq+2)eVC$uq3B;J`ZtO`Leald^dA)cCq*Bn=)WlXZ;JkhqW`7n|0wzxMMo%F zrJj_u%=_mgR%z&H?URyva#G)ZxlE<#G>R@m(Pb$*ouV@+I+LQyQFM8Vu0YWhDY{Z> zQzD;KrsyiE%?RJeQFK*`u13+-DY^zlA5YOWQ=2D^;BC=9M6J@$(Hi1}QqVUcpoP6ti+wS5zc zK8d1Frs$>=-7NK#q!IM@vN=V!py-wq-HM`5q3G5WeJVw_q3E_0okh{@D7t-WYa*X? zpy-Yi-HD<*Q*;-KK8>QgQgkBA@i6=rbw07e)7`=spyE7Db;; z(S0epA4Q)-(dSZhe~Lbjq6bj)`4l~nq6bm*V2U0>(L*VE7)4(|(HBzm7>XWC(c>uk z3W^?2(fJfTfubi;^dyQdpyMbDtK{tqUTWbjTAkXqUTZcO%y$!qHm_?TPS(~MK7f2MHGE2Mc+oziz#{uMc+=*cTn`5 z6up$9@1p3tDf%9YUPjUPrnVu@ckZL;`zd-kML$5%4^s3)6up9?AExL>DEd)~UP;lb zD0($TKSt4ODEe`VeuARcQuGFjev+b}qUfh7dJ9FrLeZ~N^mdAVi=uZ?^lpmYL(%V3 z^am9EAw}<{=#MD+V~YNSqW4ksrxg7eMSo7w`ziVhivE(KzoO^^6n&7QzoY0w6#WxL z|4h+`Df$RS|4z|=q_!n`cz;p!-xU2HMIWPRD~;L9@)TWxqAOB#C5o<0(N!qAHbtLE z(e)_0Aw@T$=#wb=REo}`=ynv{k)lth=pGc^lcIZ5bRUZDOVNWVdMHI-NYNKj^raMi zIYnQQmPND;6DWEjMNg*aLW-V3(K9K!grcvb=vfqfJw?x^=mivgD@ET%(YI6d9Ta^R zML$H*k5KfZ6up|F*HiRHirz%gn<@Gkirz}muTk`Nihhft-=^rD6upb0KTB(uv~BR; z5cxczLHH|F+M;QqcaJiQiH5p;FM-M;|Hu zKCM$y-To7pKT!0K6#WxL|4h+`Df$;W#=D?{ylKrUv`dBbcCW+8Agdk(J2(2O3`T)U528|Qgk{+XHaw| zMVF)K@?}m-ngBuB6y2Djn^5#gWx6G; zef+xWN8Mf z4^r8wDtoZX9-^{`s_ZnCJxpZ}SJ@*}c6xbMRdAVjMwY{N*U2cAJz8aFsO&K+d#uVH zr?N9uc9zN>ud*ko>}-`iQDsk3*^^avj>?`Qx18~+>8|goDtnsB&Q;mdRrU;(JyTkq zaZcRNoxJkcrPq^L<*?oPo~^RysO-5ad!EXkud)}Y?0l8IP;M>9xePugT2vl8D0>#m zZHn3j<*`d^FOgQ|s2yBZOUq*iWzVD1`i!;RHMmS=KPI;;YA;vWD^&JMmHoKNUZt{E z%N>e2S);Pos_b>OyRrX6N`(>5AMPDKA^JSQ`zsU><^@^VonaK z>HMf17{gvFSSl`E0_SY)= z8sIuc!cD%|?P}zwpyOPST?CYSI?-gWvd>c4XRGY$D*GIjT|;Hp zRN1vu_PHwiJe6HrWhbfZIx72oUq{9IzCdNyRoNG+?0PEuB9&d=_poA4E>_u>sO$#5 zPKuEeZA`J zG*j6(sO%e6c5{{8LS^5ivTs(|x2WuvD*INIeVfW|rLtSA?AulL9V+`ymEA^V-=(te zR@wKc?6xZVUX^{H%D&(Ch~oO)PGvu!vL96052@_-D!YTPi(*bXs_chVb|+u5VkDha z_9H60i^@(`*N{cD!Z@B?x(W*`?@OD_W+eWP-PGD zbyJKaRb>xW*+W$JP+xb&xYJbjFkcTv?cpkWgs-QfcDl+Qsj^3@?9sknig9PC>@g~P ztjZpzvNKh7man&BPR6V32`W2VWlvPulT`L(m7Sxqr>N|yDtnsB&Q;mdeJN*LC+@%R zGou`~`|tbARM~muu-(78HOtpWG49#ru-$RbQQ33LVY}m==j*E&_xy6$?zk7I?EG@r z?zk8F`YFb}s2sLC?!_v*pd7Y4?j^qdig7P3kL~{cw{L)=_Of!=?wmZPvX_^`cIRY; zZ=hn_E6ZWK<9=LauPTS_j(fFl&>6?={=WShmAzJFuT$AisO`zqorz-n1m3`PZOtHQ{SJ_8Y z_EFz(Z&c~Gh{z51JNL2j*ri3j@QqN^{<1uFY3;9k>5AIN%VU?;{@OQEQTvxj36=em%Klkp|Dv)_s_b7?_HQctci(8m**T@M z|4`X~s_eg1_Gy*-gWvd>c4XRGY$D*GIjT|;HpRN1vu z_PHwiJe6HrWhbfZIx748n4~kVZ}(psx}ZFE>5XFD^4Lp)`i14OgR-Yy%-A!I+r2Zr zNM+Yo*%zzqOH_6PmEBNfU#hY%E6>>}J(ri4$1XjWjbg@~@m#uV@QU)-rM0h&$vk6i zcb#0NvKy=Ht5xx2o*hRCX(s-CAYeuCnil8Lyb{J5_cYm3^1WzFTGAqq4iG>|~YQRb_Wm+1*uk z50%|hW%p9qy;XLK%I>4G`>O1ID!aeR9-y)Zs_a23J5^;5R@p;T_HdOwLS?6`>QQ&jd;l|4;m=c?@KDtm^?o~g3) zRQ4>DJzHhZQQ31<_Pm%0ihH(vmAz197pUwdDtnpAUZ=93RM}6d?2RgWv&w!^Wxu4d zUsl;$RQ4+>d#lQRRb>~d>}@LhHI=i8 z&U}>0_NnX`l^syoRaN#`D*GIjT~lSBr?Qh&cEi|-iue5GD!Y-&zDi{`R@qHd_Dw3g zrOLilWw%z@_o(dqRQCNU`$3ickjn0;vU{lPUMjnf%I>GK2deB;m7Nhg>5T7r_y3$X zCU&yI9;>o5RU^q#)gG_1C&cC`jxt+iPmG;%=Gvt{xtWyR`Q6<*`d^zfc~#wD#um%wTEl7t3Rp)_$oxBPp%@a(V31+FQzFm)3rzJa%dA zt+BHd@5@(JcA?7Nrm|mC+1pk2j@a3XIoYYQUsu_?RQ7I_{f5eZvplP+bPc{$9=mi6 z?kSI5TKnzt*rm1imd7rw{Z4u8(%MC_a}?`jpUU2^vfowN2UPZZv2zti`M%2jKxH3P z*&nLxk7JvZp4YM(b4oi7sq9Z;o0jfbR>Q6RsmlH=_S&*amga8l!z%mp*y~Dbl+|!+ zA5qyyV_z?;WNGf!KBlt2h}~6MqpXHo`%9JmRqXDvN|xqs?c*x@>)1C+Yn0V+Yk#A% zzl-&HLjQ(#Xgl~?`N4+g`~TRb9U%?Z{^39N|6lv(SSO@PI{r&RVID*Mma`HC6*OJ$#qtrX$7!Xt0VVR}7($NIy& zB5b*H@=t6aLVtMV+W*E@2=9up<=P&<#`dc0D1Tyv*A9>J=PfJ=qWx!{!Qi;P9`RQW zFBD-bKUmu1^H+(`A0D}OjK6AlSA?zn8No2tUoF@ylfiL&JbwRKVOFGfl4}Q4b_JDP z(SNpLBylP`US%ivtA~vw(uO-Hi7LC2%C77`C(?_DW%)A;O17)`YlQVg+Hmcv{+f}5 zu*|iq`D=ysMA~rev;5~q62daqKHGm@SWl!4*RJlb9Z3kw+_Q6zf5Dlru``1^*YM{n z?3(_C3cHp+DflvFa(-5@>s)`GV6#lR_IdvE%d~{TA{a2Sc-e5=RbbGE**-cb-Qs0pjD!ZA=zQNzL z%<%?0+;QKivYY$Q{O#YZeUr+**?)a-cxBS9eT%`FgJGF;?fX@BJC*%_%6?E~w^!L6{I`}l7r_p9PCEKq2E#Jx+7GMj zPAa>z%6>#;C#&qP{#IqqMXMNBi#z>xs1C*3R(X9Z3kw@)rj;$N29FHp`@I=lJg} z(+YODCH@X&)(m#IGx(^#V=yd}uDwiUKc=#m`yW+Y*;c6Rl`8vj|HEa@MXs9uX{w`&XAlOlQ zke;Vh_R}hRgUa6MPYw>ROu9dVc}8VFtFkw#?C1Pl%N%d8BY$$R^}N4ZuvsQu`vrgZ zGOb{TYj5`V2!>_SwO{o2EYk{hxb{o_Ucs~QV3RrX$${f>WF z@CD1HyJCy{!^^aS9j?94KOz{GN!Q-*Pgnfj%)9@wBfWq-g9rX&|Nr;M@A*eYDi@Zy zwcq!T3hRlqF}_s)z&|>Y5SF=fa?oFTDGxJ3H(dKee@7W*8aj@`qd{?2y46c zm;Ta6%3((6hHHQ2pC8&0vF+N&{R<)*p`mMk?RSqj)Cg_6_BZ~8p&b$1?z#NdzbK*+ z8oKs({@{uU?GJCe_V@naZ*+tkp|FMA~re6aGgd31OLQ z|Kwj5))Q&NwSV?M7D))p+@t)(zdWob(uQ06q<=*uAuMxi|LR{E))Q&Nt^J$-@kl~g z=GwpeSB3RN+Hmbt{?(C$u*|jp@UIE$iL~L`fBM%(62daq{>#5EtS8cjYoGQ%5lINk z-1p+&{`Fx!kv81g|M;JbB!p$I{jdM2u%1X8uI&jt9Z3kwT-zJi5Y`iE!?m5j#z;a~ z=Gsw#XTy3TZMZWSt+FMsDN?zx%&qNH*)f6V!|FuZaBIf~UWg=wW$v8#1DnHoB5k;K zAn;-&AuMz43W1lxdLnJOcE!NUk%X|!{f&}1l^w6L6I6C$U`yB-BW<|juB5UntL!Q& zyK3N-NJ9D_cBKva|4B8 zJ&`tC`@F!mNJ3cV+O-3(h4n<*aP6eP_DDil=Gt`vJHmP*ZMgRNft`_ru*|hD2u-v*c;XpX~VUz3cM3Z2+Q1`OEnG@h4n<*aBE*3 z*dIv<%Ut`Kz`n4aNE@!*L}g#AvaeIw*Q@Mifp;SvSXkyBD0q-9lyG zq_S^T*|!AV3p=n1VOzx;?oqb-j~zkwxOVHn3dPgg+f}viP}z3|K8!Tfu+05gU7Ns1 zVLg#H+@riJ@Npy|EOTq$9XJ%$6KTV(eNW(%NJ3baKPkA`Ht=b%Sti}u_Xbv$`|NnV z_Xi$V*bf9g3md`z|Md1jmHm*)ZXftuF_I1{yQ9i}IB+CvB#}1U?>n6WMZ6qNqbM2mi@4|W_ZMb%?!1s}au*|i42Yv|aiL~L`DS;m&31OLg zcKQTPg!M$)aP7WGum$Lj6l>GZMXKAK={8v5O#Pa8}mzl@G>^wL=wU>*B+;`GgWq$${w$>C#dXf zl|4~q=L90%zl2S<$DQvff!Oe_2wSc_H4y%rqY>ng+S62aZlF@6a$#9;Bwo+-K%{&B zFgau-GXfREyCQ4_YkNF1196_v>VZghX_gm=KeIhG_%qH~<+6jt=LQmj%`zGMd8pT$ zubRO{fmP*x&wD+K1FIEwLEx;g@kUya?tCu^oUO2zs_aKq_Od|ru#rUCaL4^v;QUBJ zSmxTx0~d$&MB12N`oq>0flDF@VVV0haaG{ju%1X8?nu@Inne=AGS^#><%6=-)GE%v)%&q-&;I^=yNE@!bA#i&nAuMz4jVk+DmAy%2zYsX{-zRa$y;)_y z7&!CaCvj`Pq_VdJ+J?>T|9^#eYrUbLQ=#qPF0Q>ZuufsW9_Sw|Q6}dv$SvvH6&O&a73^^B-GPC@uuQu4 z8-X+bmZW=>Zw7{zSu@z-+HVEIf5$IqgwnP51TsQ9BDRB{+aAx`<@h;?TYGOgeuCiI z@2Kpeay&)~)@n%08g7-zQ+FiqtLzU{_Cb~XVc>mFq<3!U@^Z)h zQ8}Kly6>Hj%kj+AwGRglhrVdUcJS`esLs;4yLkwJxqt@UNsZm0CV7tFb_6|`LG3C25*9Ept=9t0B?bt zp}A))gtx-o(A*Uk!B+4fYz+@XbN_c7-T}XZcfu2}4Lk+!f}YDNB;E~T;XSY-H1{!y z@LpII-Un;J`(a(!4%UYcz((*v*c3hlTfp|PB{X*-t)aQUXamiiLtEGhwu7BvC-?~L z0lUEdFd3%8t}qRDgBh?p%!WN+F6;^C!d|cd_J%8A3S1BSz)i3(+yeW-9k4�|&q& zXg=!?!a?vbOohkcV0Z!!fv4b5=()T?Vj75s!(c2N4in)BSRJOrBsdb*g`;3YI2tyF z<}0$3g9!z8#6 z)`yE=Be)ngg$1w$TmoCerLZl06t;)UU^09RroiPe6|R8ka3#!ykHZ|e3g*GpFdwdg z%ivnL2Cjn};1h5&Tn`K3lW;eD3Kqeq;X$|o9)=s?arg{80iT7Z;3nv~qC(qLFlfg1 z=P;V>Bd{ggM`0Uy47P(`z)tW>*aLnAQ{Zu!3crTw@Ee#3zlAyQJD3N*hxzaaxD5UX z*T55S1N;eYhCjnX_zT<(Pr@SjD?A8)gNNbo@HjjLPryImDflP!TvZ|QFAxh)!$kNu ztPcNyN$_7-A9}p37igZ2H-+Z8cME8qXt##Zuq_nW9{ON1jDabTfAZFo3jHu0@^IUe z2`j)HSP|yIIG7LP;WC&2*T6)$0ak*WVP#kdtH9l`DlCH4;6Zp6JPgl<$6VxEWpv3*l98H*5@x;MMRTyapbI zP2h3Z6rO;88(GoVGGy| zwuaqdTi64(hdp63>;+R`ZGU^?srGhttt1N*@|*dOM@0dN@{2-m zHicQR1so4s!wIl0%!cjZM3@XG!4xVu19)wT9L-1*M7;b>a;6@nD{?EXQkZSfnHa&HDolMveX2HhL+^4mG=02@8 zH1}z3p}9|M56yjAGBo#TsnFb~Wk7SEmIKXwS}rvAX?f7xr{zO)pH=|PecDQB?$g%5 zJh&d3`?O8a+^21U=00r)oC|lud9Vo1hlk(-cnq5Rv=h+Wr=5c4KFxD&@qJn>H1}zV z(A=k0h2}o37HkTy<4-I8oQ`FV{ER!mSoyPvKa2U(jK1snGm~~6f6RYDwr$PW`LOE& zy*-!L3vYls`MqPLg`7%TDHHPFHU=8vuuqNCAYtg?4p3C+DL$(hY;va+O zv3(NOrr&crpD}F5!V6(l*p$4MA=`Bg*{>nImb|ecc{4-umhd|AHt>4b4mP8|6TE@# z9)@i9H^fhaH?o}xo70~I3vb~#4ZCxkhC?|{xQ%>`A^8SF^3Cuy@Pa zLHIg640pjbb(voda~X7C0gQs#P+$i1!E_h{lOdBJ>r`a>JmRXbHmn6>c{A0A^o5Fu#OPI{OwSz5S57-T+!JA<=d>H1z4qq}p zvklk7&TtDf>v|8&&SYHh80+_#+2*)Uz%O~7Q?M&_V%u`>3ai5ourBNj8^cFnOV|ar zgAVnQVGYq%mSPLG9b>T_a z5PI(A-W^ti&0uxd64r%nU?bQLwt$^rTi638!~QT8rol{@0rQ~10vHF^z-n+4JQo(i z3*jDkDLe?ThR5Iy@C3XSo`!e9*!#GrhSgzbSReL;P2m988V-Z);TV_#eJ~Bi!%TP< z%!TK{d{_^zgqOh$@EW)U-UxTY+u#9sH#`g`VGe8p=fdW28EggD!+YRn_z>I;yTF66H#`mp!BcPqjBUsJ z4_1endCU_mV7oC~30uI|Cvg7YUYHC$MXWEw!>lhTFdJqTavlxYE-*a7d4yTyn+(Yd z4axVw@#F_#4*6k2^5bv{+b0d#_V76|mFAjAc6y24DfqglnMZHO6gtgmD}G%($VK{D2|(Aw%+GFqZr~ zL-Lb`%+ZDA6Adsqi1!}_p4ycnjz zEWAvZ^BU{bknMa!UVkN=LcYO}e2XFZZkR`2WJrF{ko+*rXZt(o+0Jno9_2U;f8jVD z;yS^0B3wp)b$FQTS6%oyYz&XUmhdQS2fu*H@Hp%bzlIs`8<-1EzykQw4%RRH8E%2U zkne%!;oL#PFPLXT9u}Q|)yYpAlE=1(tT}Nm&~gnpLb26L*BxWytN^DTbM5usP9V>N*?0xe!{_8081)tF1DgM_RG}f;d!TvvcF2(J?+pKCoUjsk z#SW0XmLYjVSed+;A$c1^@}z4y9`Z);9M}TZfNfz5m<(@%sqki)32%XUuq9juZ-pD+ zZLkowf<>@3JPdD#C*U2>a~;n#U?OY-li*#j5xg6=fcL<*uq{l6_rg?oAIyaJ!#vmy zE`txi4e&u&2p@t)usu8sJHQjLBlKL)=Oj#oonR8|3>(2mU<)`7wuQ+s8Fq!Kup7*T z-C-W=0hhs^a0BcG3t?|q1XJK)*ax0~eW9lr&op2n><^RR0N4l)ge~A8*cPV3WH=b6 z!XYpd4uyFz4K9Pj;08Dx7Qzv*2&Tisa3nkdM?udGJkx-QFasvRF|ZLF3tPb09M%Uk zUv}HWB-jHsf~l|t%!I9B9&8Vn!DP4|rohcG74CrPun2nLA!z;qpX1PhCt(13Ze%^e zim(Q(1#gD+;T^Ckybrd9(Xbt?2$Nw|*dI2A8L%15fh}P^6u1V)!OgH5+zro#2jPYA z7`zmofLFuQ@CF#$ob?T>!ntAHi5aY zIn0N7a3#!#8{jgy1+Ia+;RbjBZia_pA^Z;RhNoZ=jBdd*e3%Fi!&>k-tPfAX#_$wu z0kdEmI0d$cvtSRn5T?S%UDZ1@q(gGXQi z{06RpKf_J%Pgn>iz&&spJP7B&V{kD%0aw7&@Cg`uGe1LuRpHAp39f|op=S#73eCfy z7BC66fsJ5$*aG%|ZDA@*h8Zvw=0Go;3oF58unt@guYjB3O>hUi7Z$;;@DLmfk3$EZ zgq5M^7M{7oittKU9o`J3<$ay zP`Cz0!%eU*EQBrL9@qmOgaVJj3*iZPD?AN*!q}GF2g9nc9!!F_!G^FGYzkvwOL!4% z3tPcXus2MBnJ^9J!A!Ue=E4my9~Q!uun2B|hv61@0`7)c@Bo|z55vdccW@&-1-HTI zTUp;Q5q=J9!JlA#I36~IQ(Dxv;zWe22Hd^{^55N1I^} zws#wHKX4HCWcwJr>r3X(Y?Ggcy~r!x#yr4UhU5)l3fs+KOV|e93Om8uV1L*OX28}k z7v2sF;2m&1yc2GLExzPD!kgeBcr*MC-U3g<*05qL_J_6L9k3z16E=hGVQbhCwufC{ z3Ty+@VKU5yU11*V1`A+!xCZusn_y2^2z$Xjus1viQ{XY!2cCd^p@-*#{a_;O50l^k z*a!}UE#M$%{!2Yj-9B3~?Yw=}?e8|!FRE^zGbNJkey6x?`g6!*O@se-(~c@`dJfa} z(l-8a+Ld7vf6}QJbawBpo@g@jpSMPo<2)GcJvYknWP7~T9mjK*$6Jl9Djsiw*YQMq zWLAR5@jkOuUWxZOjvKFyC0@K#*2Z|8=zu3G!RwvAR9-;xOEs7QBYl*hcXn!t|LbJ`d%^=u&vY!_`xM$L&P+${5?SX?$JiyZ*-XdqC1yHOm&i&p9qve0IlN|vCGr@I zy0>^F3~&|m?!DO#rP&f$o!~f89gD}pET82>U9?1AG)GP$Cdf?8W;UY|m&i-zNZsjv zh&q23NFS>IQV=}+C8qkZ0&}3B7RXYnM{g;vKKj2WevUcP7YpQ74#K=z$$FS+;^hVM za`*&1VJ2W&fjq%MM!EKC6Q>o(YIk0G708QbULGkh^U}UR9yjyijyu^Le}N2Q+}9V6 zo3(Q@;iUz3Nz^NdigTjKS~_xz?E|{GqD`BIV59a4xvl31$me<%0Sup!cv~4BY z0i(UKShkyO_cTp43+9={GTtnhb&G=wrZH*3VmqLDi%SM{jU!h(j&EBT6FS$-T*_i8 z44=?E6W_a7wuDdUdNXs^Etd7+6S~I47c7=F?u1rbEH9Y}jazIcG-k1^G85_^!CJG> zPArlYW}zKl6kH1H%u@Jpkv)X{i_9S~SL+iTd4g3DQ&{X(w#VWy+a1~YiAD05*=xlj zbFDCGKhk-{HD&~Btf0nnvu?i`=shdi@jUL>3(VzSZ=j;%iRE3`eUUs7AMJQe`=Ld$ znzm!wcP^60X~&v&^F{JvT(sji?W-2abF}&USkaz~7Rj@;E0}i8Me;Olu7%N_gheth zUYts%9kobi(XMRTzb}-Tw5yx;w+rRNIC0J~?avmvYmEq(_XMpUZtI6+S3-wzi}~69n&7SQ2wHQzG)9# zD1XqdZ`!>V%Gb0nHtmNO$``aRG3|R6%2C=4S}lzB+_F$Uqix>Y+gX*){qEG&Unq~o z#}-drt%dR=Z98>|3uP_sC{rqWp{$^7r}0$2JQ){TJdNMw%geOwG#<{E7iimQJdiIN zY1?VsoiEek{l(L`C0`cMw$r#FU*^!RV(PEVm+7?al;-El+j0KlDb3B7k7?T}&CHh% zXxk}G%a?t$?UbhE%PVn#;wkNvFK^MdQ`$CPcG0#|+A?3Z(Y90CG+%y=t57_p4f7={ zzC!VoCgscDMK)$7Ir}xAH`HFUf;|roa#}>%vw9R_o z#`Ma81@fA?82s--eQAMAjwxBFPc4uMwC!v!Um#hu?F`RfAYVwyLY+!w+IEJ=E|AY? z+Zi6RKt85zXSmk_`N>zZP&+P=?`hi^zMIj~wzGQk0+~nKF4Sul$Sm4+Rxeo~Gilpd zJ#T?bqituk@&fr)N*1b*`n2t={yATMq-|&Qhxzg?Z9A(+=F3~Yl7;&IeAz|YF4Q;Y z%TC&MCb!O)$7$P{d}h8pM%&Khs`>IL?FLKdM|&2{m<rLVem?HbWNbxcTz8uVkSP zoi9^jN)~GG`7)8VU8oPwmw$2WG~P2`K9rJ$ddqzIing7`CiCSeZ99z(=F2Cv?KIZL z@s%vpD)Z$R+IAXaIAq#(q5d^bzNT%b^v8KJg|?m2qw{0|Z9AnO%#%5^?UcSXPo~qh zQ~E0VO36ZfcAorB+fM1~dGZTwJEe=~$@jGFl+K(d+k7Ppb;3M(o3@?O5%c5?+IFG# znxE-f_8)E^P)Z1&yyvz%|d;W>6Hfaa8MXwM5a**I4=kyJ3r<2HG8u52LT=eW_HIX0O-SLVmZIh9P3 zWs}i!Wj0A=lMJ#+pShApQr#q-ZSv4u`6w>VImaY-+T_-`@;*rolU!?)tLDl9lA0#D z$R>5>N)bsdlT@=w!d!VXF5WrUBvCf`dyc$La-Kq_#;uv&n~Zq>v=Z zByZbf*Br@=PjKp(YHSNO=iuJZ%8gS$wZrsnZjLOE=hya3k;`mSe~vsw zVozu-o18UA){#URCt#E4IkJ+(p5jxp&4K?sTb_zbEI!3w+T`$T*+ODZ@d2Ccoh_S5 z>?wZDCR=99GbHvDZ?MU_*^(PysrVExwMqVL$tSU=IM*hVX3JcXDrP*RY?3xxW{}vE zo??@3vt@5wrQ(zRpiSD&mO~`=q_?!mjkDz-i9P98+N9xZ*-v6mdXi0Q%$BWjm5Wb$ zyiH@{-{PtipY+#lQaDS-$5$yn z>6>iw)GYZ2=K^zF%WYCHOHPs4Wsql+sk7u$61xn>+9Z9Jd`DuJL4TX{nkC0c8ay&9 z+SAb{?Pkdl67x3SWj>Eu&XPA_)c?NCFR^~zS&|b|@;0w-lghIso5ZdPpG`cBpTw?# zlX+$ye#nzArQ~gX#3qOGus_sPk#25yv-Nc zWNx1PKw{TGj!nkr$#*37j1RX-YM#s|v2XJpHhCmZW|P=6exFU+xBGa$TO} zlGrnTnN8~F$!}8fHm_xqvltJFJ>vnJMCZu~5_`r^&9v)(rhG?Y&-j-%IXqML_)6a9 z2W+x;rtBuMZ}Zn|vSp^cPGZmR2Aiy#DXU2A`CV$0{F$ zi9N-YY*JySd?Y1r^M7ZU1OH=&94E1-_r zZT^ByHqMZfB=!_PZj(o6$O#hrHlJgY=`-XT5_{6KY%+R=OeL`=eUMH1%#eH%d(u1G z{;sNc97U*u+}CkrpqD{ zy9^fCWY%<9Lt>Y~M4OD8E-OhIjF`^#-zEd5%Tf~aHhjd`&0QUU_nD$D~}D5?{;7ve8Hz4b7G3;?H%) z+kyA8QF5*sc*k6s5PzOC!FJqbJ6hR}n{wsp_}Wgk6Ec!ZZU05NGBG~M@z!P}HEmMO zjwB&hCdb!tCOW>{(u4hLnmO2Cr^&|n^BwQ^)1p0J+vMmpbFiOGlV{^EaJ=tL^PX4y zsA-S+7QpFw-Fz5tohEOXtMOWHwm3Z#qDoKCN?T;Oi-7nJp+*FzDWBr75FP|#^@?FBai0=7QWdhw%Gp3dnpDK57=n$6nQM>(vn5G z%|fM|DC^cr^qYj%U3masWe44 z@=YOh{{G347fttXInkb_W#dDo5s+?jdv!%#nrWOJK+{dN@aBneO}P zz9&cKneT>C0R@&71WZ^O0JKAwu7)#n;k>TuWK+ z7ZpGDHP^iplg(-WX0n`&YwUQBOpf+^YLkPLWorD@jyFWzX**t>EX(4ral9{}K5LUF zC(HBkO&sT{;;QCiyU2FTootrLjLGstTvNw8i5@d>iLv21_l5C2<#qsu> z6ujQc!tFTk}f$i8g(VW7!CdxWrYsb5t9bUEhi#C6Dq7?aV z4?Bg+Z1*DDJ$It~=DUOE|Lm4y^YJ#%m?#A?cSbse-K^DVqOA9|;pZRh*VZO?OqBh; zyLkRRv2!}Iyk2#!GZjr?kWRoc}hwhmZq*I!qi+mK@OWDb+FEz zAYYmy)!ZV12{P9dap?YMyey(fgsC|;UOqKN4vmlY92_r4Op&7TrBkzWyv#O5UZMNN z@iLzxQ5(nqcWNFTFCUpA3$W&lmqVt=G`C3Rc$r~}q|==?Uh>T0^e^t_n*B=2wR@Af zc0Vv)UVyPrRI^XD>iXEj`xqOXwR=%ve0>m`#;)WXUU^Zd&m1Z?N72~b)ZA>m*#z0 zQHj2!x3c7Ue@7?ky5izo3KO}7FhkgwB}<)$iy!_zo+bITqnA))VV2CL?VUw?dX}uI z(7}lg8TqIz*`Lsf&p)~cX31av&W@NtcFW@Hh__Rg9I4pB@iF?~q28V)n=5zV%(KG{ zj7dCAvg8TI6cfj84eV|gW=XdWPHf1_RLPPz6CQEA71%2#OIF1_Q2eOpuT1kYzh=rd zjx%tX`!Yu| zlf1YNPGycRvE(`WqH!|M+tH~r8-K<)dADMh;xEMG?G72^WIfYSHDo&aASI+Y-mdJ> zd7SL=_i?J3>A2VKaOXJrnH|mwSrARf$*X2M8nHveab`i(8z;Z>nrEBcYS`VXj*~xG zc-2E*$2(4TCG_R^$8!8*Wl=)!;&0(!k2ObfbgX>Lil`AX?L}kdxLFaq$Cj>$SH{XQ ze|N|G95ptMm7P&tioc~lZfiU`R=%f3t&kVZ9V`11`Z?ZgcE}tn2YH{`$FXU4hk;|| zQ-25NypR`tc&xl@4(@*UMO%-RB^6kPEvV6qc2xi3uk?*g%Z8|vThn%I8W8`gfmcARq_unzHS~?bgy+1U@Ovd|TsiDRGh6VWHh&&KUV2wnOm~g2^_?8Y5X1`V>DQ z7-o|}W2A`rx`f-n;Ogl#M!q*i%>}0I7}*@v(P?DbEyu`vfq~8y&3Lh)QFo*hWhFsdgX=G|0 zw!udkGP}Y9&J`xvYm?m>vNpPdbEQePWN=Ze^L&QnQt>L2th3I_4A~n!z`5Ea`8JuI zA#Xb!oNG)n$?lbzAu|FUohBwpv(CT_`H9L+P14OKoib!DYvfv!w6)0{8FG+1H<;u` zn>5XkXW6THqYRGUCiOC8mau3Zi1x$mB3zu7yKiF2kPXft$NTqaZv}2r%#8j# zS{5YosbT7VIoiC74v&@%T<+{o^A3!bC5fG#Xk+dkEl*T_z!7uqORkL1jh5U*mgJM8 zx&DooL%hj-MqM&mHdgNJ#LOFAs!pX+W!^HTd+ca=GSJ^i9L{b-M$4?k9*(zf@sP|g zx{Nkwq{C==Igx`g$z3*SHCpCWD*05p-X>R%mLnwg$=}5`xqv}3P0?nrvu#pov}~h} z{csVR{5wkav6s&{zl<`k_5CP$kA)ud#i(e{=QjCxlzc+sZNwb%gL29CZCLyKNB7({y5raleb1nCQiI@ zUL9%1^WsQ3%H=3w1QcfleU?SW(n`=8}jFcY|`xMXFc$;L5lucYyL+7l| zNO{G~Sy$?H9x3@;N9=jM*Y0rVNLkPMuxGKkO|Bg&ySNnjLZ-dmNcqB)IFAxFM#@G? z*y)P5JH(EZ`CMh~be&E&2mV{SoMK#dy1uc=v2@vOrpw&l_=+z-`_g4w@ba@WUEWUQ z^7BeM_kZc~DQ)|-_({9->U8Uf{PT|Gih_-S`6zTj{lV(yK&Q!+Q8d~jDTxl z`q6ONn~?5!_tU+1xctU}*~x!xxS6yq!{w8TsbTl0Ylq7-l{xyy*kQ?Vb0J+YT-GtZ zm=0%{k3qxDeDoPE2V^AoKkVOmxa^lvT*ztPJ6wunwBx;<_N~L^eaUdVH_*OzxO7bo zyVt#FxV)Y)hVTFM)*LR6#ibUX`2;&;|8UvD{Dt1@{x(cLHYe|gVWpSpqr>D2#_aug zShVMZVNzImtW)9k;_+Wod_K>2zfe3s%-m3J7$(^j#yQ?K?6Pv0OsJ5_??EkQW5F<) zR3VFv8Ej0Y7&BI}R&g;u*O3v!q$puL{R8RmJ4{wHV7m+-8D{3V{V>_;O?BcgFW$p^ zaNIgfUN>`mgFE0WhskpZW1YlHs8Mg2eCwa!brKk(xjRo_m&(JU5~E`)4wJw5Hys>r z6xzRO@)U*nx6-U-hVosS>`a(Q|Isvl{+%Xu(~EC!52Trq>`jv?jIDA#dsVQ%)9`Yd z1Olea34b?UE9i}tQall#UN-|6PteP^dh0mof6W}(aX~0-m%5Ey%*RL z=cdUC)^qic7Y(Gz_Jqm2sFxT0d#EgAaoe9h{WR1Z; z8tus#D(~`+viHELc8|V8tI57hYm)iAO0bE4sQeH}FTNfAdx$ya--pPpsbROnUks5~%~}0qNVMm} zA+lD672giOZT($CG_}yB%IWL>@II7ExmU5LrnHdpkVU?vOo1 zzKBgNz8y}t$&ev(iecFwnDw$r@(_88xx3_QceUL$M7|8Jwws2?OB{A1vuv*(BKrbU zyv`L3dA0gOC`J5$9WH z_F&l`onCxLJjo`RgXJA3wfK%W&F(dDu*?n&E50M{W}Qxh< z;u~$!bg&$vj=dvpXp?$_Mg@)b| zFHe=piLB5?shs~*IlzU%-V#qumG!7G_3fvWeVq}hGKcG{UCjMbWero1*qz#P9{yoSX)$fBOD{-t7Z+gESB){@zPdGY= zJ-OJfcl(vE^B~zCT;ul+k~jEgcAP6(vvvRR>eF9$kZfc}dyiM$ zR8oj3rzLj2Xg;EP}W3G^A=CqmjlfjK0Hu1a0U|&(tW@rjrIqVy zxB}Rf)66E<43O>SAWfZ1Y*KfC93Uw^|2C;SKvr>0uwQ_DHt`IQx0ojTHaXefjOT~` z@;8qC0(8VChx*HxIQGp_WRo}hOCgE<0<_g8oBPWK_Da~$Kiae2Cae0(5)yw_W&51= zPLF+RYetuA`zQ66pZQjAk9(9&()!C>zJ1%{PO(Y1{&JF|jOs|$2W`@}zs%>DydmXo zupLeM%Ss-1*m4bQa$$ej%r_8Q?i`y`=`XAJ1c<6Yxfq*x`^!s|b3)4f(9hgXAMYp6 zn#Y`GyoYS^em{B2JP9z#n>N|ePo6Wk+*|rZdp6tTnSS!LNkYm!+Rset!hZ5NkKSy# z={A|%PxAP_YRiqb$*_L1nvayI0hH@wlkWXwHft<8q+DCuaYsKn#WP}C?naw5?I(+P zHet&(v`M{w@~4@*^C(xtCRO`MfmtCT<-E4zufFmb&l7FAAN!i|eoYb_-6uBrps#$+ zjabw>eWN{Z*<>dPqx13g(EP<9GcV7W4$u0&@|<}Fuo88dO&0Z)7tHmZZ$U;iPnss% z8d-g17%yys;WkL^E93aqY=WLP=+akGdD-^VYG;GH`^wmKCpP3Dn%VrCzOsS`+V&tW zu}R&&vW90gc226>q;g+*oO{=(0M&dp@${8-=B*x5?)yGw2EOVeTZ8NAW1GB35?oJj z*kpSjDKzWpF&@+KID0l6H@TDK62PxH*RzbUe!mwHs4nou*XGxWCx$8QMKtV zc?w;%k4)oQUZ*16v3=xYK8-7c9QUs&=D2@MkzK*HaMUKBq{!ajTG(fkw^HN{vlh0e z@chFjFQ!P5xf+I)TV*?zrN~;I<=e9|*CsPk>MYg{PN!N7RTXT+|t{eu^W2J^T9c}!X^!R%SXXEscn;Uddo{@PAX9; z&L%OvHe-K&;NVMhulh52|1$odYb8Zr>Cq6zCX6x6<(k-zD|*V0!O>l4liEGytKjIW*d(r}{A5Nal=Ir;uO9L> zqdP04-0>b9tmjA%*<#)v=48F!!+b6l^^nJdpMpEAv$cn8NL#9-!N^l4p~t-Q)vvZ!(^8 zV{MY&P5$86&J8Ko({^;}CLi;l+MeY5ZE{yP`P)1$Hz)Zfn_S;bJ~8F4qTJ;+xwxDB zOS$u={m0YV{Qjp6o=%o$nfdei9i!k~VL`H)$N9;!i)*tT+fYp;zd^$?}%D z1oxp{&tzF+zB_kLE?v*}CCkgE`wqHqOO}OnUl1}0SJ@FinC^MivFMX-P_!8 z*#i6f{tsqn=b7hy=Y8Lu*`1x++dR;F*m^;}ZcBp^AL}xxTO`Qbw)7Z1tdN_!QBpCa zEq$k-d-@5|t1TVU&pn+4Y15WY@VTc6)Fla0yDfc#b5G$7+N=!o1MX^k_Z``mZgYSN zftwFLui?~H$NE?p! zhi&MR9)(_lbZbLD=}~ASNV7I{g`-d(nraJDwGEv`6pFeX&M2uU+J<&=kIf5exFA{E z&|VyoVs0&aONH8ozR|D9k6N=Wf48RFK%~cB6XciHbd0aaXIk5o9|Sqpn*IV({6j5t zVcKEf1=V$}DGQz|_!895TT?t@$>*_ot+_kTXifj(>pzRz`s>ivlodya<2|4?t-#@k z>;=*HTT^Kccc<2A{kVB+ddBNo4NzUDHLV4;L_V!~J{cv5UD2(nFibBBPC)^3x2B?4 zwsmtmSa5r5nt@D1EmfhSk|1ST(OH-p>(*L9smR@mzQ;O^H7lr@1PN|M zM}U-aqrPa#hCOaclW?zL{ktXBKLxqol2+k%G0u(pV@r9W5EZsq&>jPxv5B&inc9jBe&KRDjEt> z2ZjNukl2z3pd5z;sfcb#Q+a{A2&e@FiD*glLA5W>;JFCpE%j+qmK^r|5n97Tec(kk!7upZ|4+7=KrE2yg5zg=_8*cG71vdoIY?~k*xx}Xh!4M zk4Me${8Kab{*Puf@bx98bAq30MoaW1rf&q<*Nj%^OH7*tS<{S`@!jkSXj>}Cf@bsu zY;#{?8Y>mUo6#=4w+s{{1&H2Tx(L#~8SUZT(j4j<3sMgVf^4wF6w{0@@(oAPW@*#X zh-P$ymzcsJ9@>odc`Pwmn$c)pVtU%tru@^C&U!2{U2960_=e*`(=>%Ao6=2QVmbne zgH7oGFEQ;zweyB!T~iv%OH5y&dU;bi@3F)*wJC>pd{a84_lCiO3~EZp_1@4^kgiSX z2=|8eP}EwGrcLPtdV~8CQx&Nw*OXT3OH7f16lqGU^d+WTf`m1tFZrlApe{fV@20c{ zN5y@K>0uML?yn}aU!Oi+735+QI;l?|PYQCh2_50-9Z58Q7V|M#- zV_L7f{f8je8`D|c?Q?>hYD`<%?PHDc{u4p=HKq$d+*grSOU25@6z;q=Um(D&#x!4F zMH(l_h{hCwS>h*9Hb{WJjcI|$DpCijXw{g;>8nWf1*zGXM(`Pc2YNX{;u_OLKEfrS zu81J{8q;V5&0rPDuQ5%*EHT0g?yE+0!ebTbP9vJb)xR~u^N)?_Cy!O6?;6oGo>v}1 z^}a^*1Fs@&Ym^rG)s1K#SAUM`C5`B;$12jKMjY)ijcAEJ1U?pIKqFeO4}tdu>D-7` z@(^ePMJ)tr)QGlX2)M5zRg{X-jp&HJid0;Xf{p05zKWDxkW7u}DBnf~LY-X@TO+#T zv5IuBAzSxHL;6#-IFn_y2-yYDhO>T_(57*b=T#<~OA6 ze6Xg2I$4mh4QUq+jr$7G0MXH>AwAMBW1R(Q*O2DuEjJRRZbN#)m$9l)S4ogE4QW1F zc3&YXC>6OI(hPltD3c(;4QZdgLSz%F4adrl`)W_9 zSQC^)w{#z@g1l%zmvtZRHIU=qfc{_~t~J2-pMsogKv&^|`)bc&so2|qPU{n>je@Lh zK>z5gJxc_c-++GN3+!~Ln=HuK2J{3TxUcpMkcvJH=)At#(^-&q4d}VP+S5pox(({l%fL=mf&h!zEXh7fd$u|t#&;}ggpawLX7kfCuRv=#CFY42;yujlY;oJ4; z2R`3kt&iXT)n~D@^=TefaI7bR90%eRez-ne@mSN@P@ki>sy=Sij?LC`g_9^t*n?T~UzI_30t6=|n+YaX|{! zr$0Q_bV8*fs6JIkK-hho059v&8huUYem&{`^{5u&d%Yf>{}tduJz9_W>T5b~>+$>Q z(E&bHZUc8yJ^C6CnTqN!>T!sc)}sx4H|{1dxgLGXXUEYH7+#N7^K!*U*dHhYDfMU@ zYTeg#+KB}%>d{^O>Qq;d>h)-eeswA%NNhcNz*nbYP*+HhJoRWL5cf5mV5zX9WxhkU zfcmm7`~0LXt@l{dxh@r#>e42CP3M##-_@l(`kKx@L3Y%oZM>$lp)T(K1o@&a9pE*c z^xt4is!K~bVxzzvR+lz&#N6W3yDnYk>aM6xu1gD0?H<0Qy7UEyuO_Oi)}^f+zH)Wb zu02t8=^9rTMs@zWw9sR=3i7ND-S$}Rxmky!aGZP>|hq=q2~P&2_N;FUZ$*=#j^2&m5_kR)>!3=bll545>p0^>a@@L3-7p@A=&G z9@KRbq)i<uq@|fsY5?wUsi$q z>(EKQ2e&~?sYBHoheYPH`wFF75O}jl-bt0vrNNqZ#Uqy2X5>}gT>Z>{df_T@aZ}}?vs+LW8R*UU@ zREzEaDLzJvAb#$|lSy9HxloHTp--L$^<*vjhhys2Wp~x0$NX6LCRDGhMbm5uv)dZa zXSFB{$A#B;rq!aS2zKN+Fh|v*1P=12wbFWTUsUrNPj^&ztwpnJudnem5i9H0q5|l= zHNZ&}pnNSV^75boBb5tvFu zx-A*~A{9T^q&;}*Th4CB1UXcbw(@h}d@9{0$cCDFW11?^FF`iHCktd8IRRi|C3b}nd%{XbRbIGwCccVNHu2uKG7*r+;CMWdW#j1esQyo}q;@8dbCo79E4C(*f3+0%}h|x>lz{yd35h(k4<- zzdC)Z_rofJl&enr^?n#BNRjGvjQe38sLLftSamuKq1}JP(KpyHxe~(5s?ibd zZ58HKlXEgFnOu#dIJO$?MZd5N1?v+*23Di}=mmcJh!^UtT7;z8wo>1`8ZG0I+5i%D z1gTbyCSjy{@j_zhYP@b5!?;qs8g0bf&3!2-n+Swfqv85ekgp)tYBa#v7oSvRyY5$| zVLY|GT@}Co66A7K>W{@1Zsbf=ZsY{R%C}YN7tDzK-0a*e{Iyl-Fy9ufgob5;d{&jd zgPj3x)NxWVqADHa`O3$jejrF{Rr&!)U`6e@bBPhOhE>y6JgZmbX=$aZ zv;(UE)&%H?6{JK}nu8nvAh+hSi}FlWX(Klm2&!EWTU9y-#NkH0m&jrGBaz1Qz4Wz2 z%>M;Bmq;UFKn6GJVX4@gNXyvYt)OlcWOX8KgukI~)Ok`dBa!}M>I6{72r?{@Zh)GJ zqdOpxquYmZ<^4q3fz~p+sc0?&4HM}f?gh1=qM9HT6KO6~WMvgGqN2E{D40mU@hyQ{ zC_|+>D3MNZD7_QYLixN3hw`5)G>2#Se^s$5w*~pF3S9ycfr}YT)1H5+!tps)g=)Zx zgJA9zV0#r>%M)b&nK`bRIL)&FpM4iqp+lIKSZ9Mfy$WsODav^4kFLV*4y{6~Fk14s zIoeAUcB?}7_+WJawT&Rns?ahZ`75PsqFfbP&P~LE8(D>}a1({9q*;)&3T;6f)=a1l zsY0J&zp&Fz?qIJfvmyUhrdup`uX38)jmmU_XGy4We(;im8szCU#fH$yh~-; z%O|n6kZmbQv0{7(qRl-5BCJWxS68*{7 z7seYhZmCm=ovv1io}y*>BBiwOV}xJ45*_3#hntJpq$0Eu9p!)qz%E}wtd-~hUlr1K z+glZBJG*$LB7Xl~k%q#Dh%?y#p&}c0tRfu-QsDrQJ&ahkRpcOUs7U(|ME~Uh91Nwh zmERPjex?1+pp~lnD$s&}*-p_J73h^e3L-kX0_-s3FHa)^7NT&-Mc7`Rw%8W|TL;3b@-#ePF$&Ad zOO(rLQN}Cy^(c=kPZK1{LqY$fyfr{o29~GI{%k`pK<^9Gxjb#~=T=(-YA#U2^0eNc z+o`Fw&A(|YU*4L*ia)VK$HlJ_2!YUMFHe*0+!0h1UX)`6 zkIT^w8@iwMuX3tCUfCR{DS#gCr`*sP@bLU!Ioj*Xh8`|wQ}&mmUA}DMHWW6Mqn*Am z&Nsd2ToIjKj)sYkzOjs}Z&pP(?P9KG^`Xiy&%dX(d_^ju{p~ikCA3Xh>cc~_iA#5_va~!zACl$D z(sPGCBukd1Z|zHIDnp0uOCfrv44(fi zLt7j?Brlao3&!a(v?xR$lHZh}#}0i+ZZAUz?aLs#4x+2d&;|z&$z^3^NM3!*ko>p| z-IGJs7fm^bWVbT3N4iRTC~RGZhKBHvY~&);C__7?`&0m-Oc{FR;2|01A`~b?J7lQj z1R-k~dgB(cT3~?SDHuUjnZ`3!5#eGtG&f)+qb3YZ z`_IPH!hkQGqQ~NC2p2?jcRX#he+AJ^5M38fU-_e;u5^hmh^Nm2zIKXEj;Fy~5Yb`r zw86d#qJtpXKc2quM?vio|4vu3YVq`gM7bQAat@oQc-m}V4IPD`Fkd_k3t%_0y9kbW z+HPM1%m)NpJdFxqW1q#PX}B9l+w5zB{T_$+Kg7|<05<7-oJ2YOm26)eO_C^Y1$|>2 zkDJwTw9cPxSPp2hK=a~glRvjQ70^V1M#s@ce{Sbft!*ARedBoC^o*ku;#cQ5&nsCY z5G+dFIQmuyiO^Otj;7hUBgCN)EeeXo(M`FM<%I&nE14g7s!DOR!Bpk`z}{7ghKhF^QCM4whOl2jD^OTkibv6cQuNFU z6Rp!r$$aqfyIsk;l%o6MW}8xJy|7^^8Xv*~hdXf9Qgp|Gf^?<$QuG`5W@`zwTci|C zkq(^OC7Kyn2oEV1wU?rM4irT6MJ(O&M{l-1jK%Z6u{2XU@b9r{tzL|!$srtL7CjM5 zcO58*=>Ax`>5txQ-3HN3u{1+E@Yk`@Thd?2ro~b!pJd0mbPtZDg(3QoOpT>~9Qu&# z5=#f{>!7<0bhn75RSxdU4P2sCV`*84J|yE~>6t?xl0{**5<|PC`}6~$PYewSVPm_x2yJ3$mkgE0AT)@f!69r?^%x0q`YT!S z80yPG&JT9(7#@+?V`!g)dus+jK?3>4&>;tRcLmV%XlDHrO$Qx3BL9p|yOLdv<`H=@ znjVN-r=mSaiBCtqbPfvK)oQqF`P${VpSNDij!w$Pd6%)zoMj zC9(P+M7u=OIEhOe6k0^nSdM|gh%7In@zJzId@KQWk!bo{yvvP3&S+ZBeg%c1;E3iC z=@(72Z7?z57c1MNG?4pwmCr$Uqxc1vx1(sQ&qj1|hJFEr_rFHb^ngw17~e<9ug7^T z9EhTwKAYj{_9)!{MDcHT*GAFA5YW^@+8O~Xm@f)uM$!C$El@B4HsQ*yiWd|x?=9@*J|1n7(v14=~EKmKfIewUru zqUcUA(?UQC6q-*I{pDbq=DRfgFp_=$E0Wgud9-srk~a9Uf*&IB`-ez&^qWX}>Cf)$ z2DBZL7IkAJJ#hfC;X1(VHaP(KXnsoY2Z+u-CFz#W4jkmJASVmdwj}-Ovs1O$ z>%07{B{hj9X}sSqXD5jhD6%9CbpVpk`1?Ns{aS*4W_f=%@=>WdSb~oDv9UWq-XhSt5_Fu&0dC|4QZuUr{UPLu zAdeMjcnSKO$$=VKZ0?O3y#K2NO|f(D>;Q5bftr<|HNk*_>Xk_A8&$Z*TCN2BYh`i_ z$R!0TT!LoW00la<0j@P5gDD6ERq-uBn|*deMuE)p;tc&$oVNSyK{VY=zbZ8si_=LP z$d*&Zas3nM+v0S}2C~KNplp(wHO1*~`(EckSt8K<;&dYzkT^I=pfSbifgSSJ!BF{; zK>dr;?O=x7+V3bet&7ttKbCIQ7AluwLiqA;AU)bsVP{Dp4mW_W1dZ*&|>s}Avp{_0$GaDOB>{^{}zS+MaBQ3^bbSs z!|j69oGD6U{SG))9ua7NQ5x(3Bq}!vw5BLc^kbEuL*){I<`<>m4nXe1ZM4)3ElNMI zO3Mcz_Y)HEqdhyBEVkZTB3xhNfFazH{+S#jjETXCr=Sd?xFITy&; z1PU!me=<49zbIez@Rb*ye<;eUm(Pmes;E3FBK{YlA8ZHFS-ARY5w5-{)jvt~$s%-) zt9`#IBEyjHYIjQgW~pCWgr3_D!Tl9QF#nZ`1yV7q2tDKq^ey=Vni&)|7p@U_$!-0^ zi0Qy0)PVEq+DQ&@*>6xpUL3iF6j~O99V>R`@s15O6_FG2DCAzKSjiRM#f#8F-mx6g zRAI3`GaBN*3aKiNBGl>#^!gT|(>~u~$B+M5r1G>djqnA|UokV8QC0b^FfFq%`$A#* ziP_fIbR8;8O}KVfVfvA4Z5cITSk#vB<%Q`C;{B|+tx%YY3598uF$f zDJ)>#!qnwBoN@@3NwDC;^jR>=!BhUbn6P4q{X4k!xDfY&dxdDV<$KsHe?(KgT8Q2| z0h7-cLX_0gg=k?gxby^XkB22NwDU={iIsN#lc-QNO#K06*2z13)XoI`2<3AN(J|g( zQ?gi;afPflRT)u;2CJvELpca~$fakvdRo&nly_LqPok$`Av(@GUNsA;p?K|NPDNGl zmN(Sz4LYL{PY`yjg$mI{J9bp~0mHHuqK~;Au|vtf5Pi=qa*2<(a5eNy;2rDjf;5qL ze4OhFA8QLmJ+P96zibC5g00rW1)(d~-h%WUUYugFR)#4)mUd_Xbzc^w3F^PLTt5Nb6q>p}4Ee4cEJ%}?8WnY1^* zyaY%{%7*;3-scL;|B^|{vix*SikDUS1E2WLhyT0`b5&(jei|8c6CG2L6VfYG^}MD(0u3SWbOSdExvt$H%2STYfs| z{krnWQf*07e%PCp-_J+ic(d|b`B?e2d~`_UUQ_;EK3e3ae0M&&<)M6n`TmFal zb?BUtkM4Niz|MG4J~AKOW;yF?%6sRdg>K3_ouzoV02=ZH6U_(Jg~?|5V9Chz$0ITQ20yjHuHayKt!$8q(&nOBY#h7PJO zBpLMyR&{4o^D?j`iVl)(TBQOxj5s17ccUdda>;gFN~XyCHq;S|0jc`r7$C?D6S5bUl4v+ntBzyB)DL zdFZsq5t|~_Gt-p+Bu8vy9{O31*vF!LKpr|Ha<6rV4tZ#aoAQQv=z)jw8B(2)ru>og zwL*F5AL(o1qC9IJddPCG_cdD{TI{C$QEvLnL-};6zL;AN%U!Yjcy78UmLJH?VcC_N z{$@EosyFq%c2@6ekM+KGU;3KXp~mLszBW8JJ?HM=`(bXk&ek(Gcebv%X{>a%cG5-* z0UPC}G2Gc~<ff%P4_{M9^S{tOFy&bj*!{w6os^pTA&ahY0#jc3c%l z?SG@9Mg)!W!w$E`^PdPB?T6OL#oHguyzmHm!8=}=BbeuipqK0j4Kf%e55sAK+c3Ev zPES0B$#AJY9S>d$Yt+R~|rx9FqJKxWe>RxHe zNBQt6xm`Go_Tf`5LLd=#6Z_oqL;*WU=PL_VT=xeU9T5?JGSx(x@W$*hr(_g56%}Ep8dc}#H z^unW8jF9ThIrU!gDv&RCpXa2(L43JekdtFOD<{27-z$dXq|eKEd<5HFMakRbdGi)Q>UE$Gqye_GrL*?QdFb!=qZbLtv@wIcS6RUl6$v zl!JD1fxo}7T5`}R`%}b%3y-qX@?aErT57$SoklTD47!k=CUe1yYrfA;mu)DBb^Ef@ zH5+7nxMpK^`j%@8nS%}7Gr@>rfNpks}H#=Rmy>R-S83atO zHSg@%>2ofKcQ3NhVlIexceBwfE_nZ*&1Supjn+!d`D|&yJdusIT0oFs?$1UCnIOU3 zl#S+CUpj-iG8@gazH&A+d6 zBdo(L7K>ZoU7MAL+J`$umu96!!6-=Iotc#`Sx4Y+YE6LXn5^`i3)VVX-(9Bl-LYEV zUEn%~?X%K)>qw_#jk3}XF7T6;)@oU4q`j5A?9*C0D=iDgpW0#XivwqpxD}C=CUHUf zXU42_gbUI?y|dB}T#)|xG>jH;LHg&PVfb5)C`kXj97fl;5cYGJ&3Y<~2HQ|@@1J|Z z=mq}doz}woFnVqo?Nquvj9zg~miZ8!9Y!y?#?oB#jXfA1Mwjqo36=K;g3LdhU=O;7 z(Ru4+^c}8f7e>qR1UA3B)Y=GUVxQk#YONkdGqLYo0m{pS(Hh31!}OmATMLHK7Jjaq zsoBHm0IK=5sMg>xnrltgRNKPnOW>mVNfz43>h5N-S?^?_dDvHf!~W$g+$YaxpLjv&?0ceoNifYGxo*PHd$yf_W3QX)}$=71^YfV(Pq^wG#awja@bGELc^`& z(CtcQQSE+~$ubV|W}yq#@fOQkH7h8Yve0bn1a!H8EONqyD?!fT^(-?jv`vI7_cPn9 ze`TiIQg9=j&rBn*B7*OKK=>{*jpBmEjj$~~EtbFdMp(Ou)yzy611F=2ikbN$Rys5N z5;z6LNG>WxGSdaQ*1o1VG&9Y0yViPTrf+4E^qS(kndq1|EB-wbE54G6j(fA>vs_eu z%tS{;<~7ATGtoFVi&tf$ztUSgD-+!foC-}7Me*oNbT4okil2(&4>Hl;EMt95ai>i5 znVZFpGts5=7MIUNSG-wqtSBy#iLQFH;{2jGJQH2kr*yySQ@Zc;o0I#Dcs~!NYm8X# z`skPD@4crZ)@$U)2>gA4dN!0-G=2=F+4dPYiuleg7@enPy0;y#72cPj{B`cKPDlJ7WoauaEO4;9HF!rzy0DH5!E63{X%IT zyTN}hLw%W%^*_l-3+#A-J(KQaWM6M&q`6Gumj$cmgmfw+Edt5U&4#@h>7c)E!-kBs z&tJFU^Nh6LpKVx>5zqf+qyzXxIloj`ognH*Wu*D|*`T~s_(Mj#`(>oP_(f-6JYwr( z31_D|W~6OF^DUP4TxgP!HU}-RSQd%%)ictTpwBE=94H6<2^ncCUa1fS{Yo*>Rv;s- zg*N_c)oM0rF*GABM2imQ_zK6Gk;d|0>M9~FMsOo{Gtl?`3oVuoT)38jj{D;xzF0k% z0ndMCpzr)~TF&6+{=N*fIL-Zy8R)Y#_g7?~h2s8a5S^QW7Qy|DY|O+A9Np32!Wj9Z zULP^y-9H1}0VlJY_D&h-VcG$1nt|@69pIW7=)N4_iqKv*13kb2=0B*dmK5!UVHULK zbPHwX3^dDcku#L`3^d(uu^!6jAvD7eee1G%Kg6c|6+$!db7=WXSyw~YvWp>94Sy*M zT|@aH1fAuwdNhRg;fI%bIF!4Evo(bNM!56hoQS(>>59-U= z3}h|iG%dqJXfd=D#-b?>xP-q?2yM2)lnyxAE6E|W1-~_1EXTW92p#8|%X~diYKGA7 z5WS*SgzmB-^c{Fb-P$e`LYt)RaMWjqQf|AL8^H%2NHkwL@cTyx9pGpd$Jc1OCBHjp zS(+v19ke{nlJ6Y!P_u-80I|nGp==0SwZTD;K)a`Y38kMqs8Bmg6y`>{%IOXoo2GJ< zgT{fYb>=}1y3Lcy%W7{(c6ZPpxOT_5ncKoa-}x;=EE}M{j)T7UTW+zGYUt7(@1XH~ zevrTwanJ<(Q7uh(P6z$Oy0tI{!v>ykYGJfE=x4NaMg2Dz-~R>EI9&GQ@N#$@LJ9iS zVEU8ie%5oSKNC!U;o6(vR=+=(PFuKs8|pU&(-~aXOXFJwc&s(PIG7%5j?DsNS};99 zbLBOLX3(d>G@T8?&sMtE8*3(ApHI*h%bESq~cT%eP{auTvmTLh}G{!1)hHiq6hv^Z!L!_Ij##h za@-1AxiE;n^7|4pGofs15Pj|U6>7$yFhUf38pKaod=Ny>{9y&T+1FWAw+o`x_OG4x zH54=I1kq`+uL`tO5c?9OB07l9uzlWRrG36U@m+KY?K=}T+7ZMTCO^?kK{VTc6^wZv zi0A(T*^m2yG}nJMpj&{h3v?-v=J~Gylt*)oeL5CMTi7RcKWMuH*@3NrG{>LKTm$GU zNUF;6K-$XA`hKlh^MTBAc<#(bK^=dQ!w2G{KsKs>Ag%Ua3#C1vp<5uW@n45RdlXs+ z(pvxZh;E}mPti&uS|*U5`LSpe*u?|sxgU$>Lm?uNUid*&zM#%1W(5Y)W7`I3Bh;&b zd~}}$u)U80=!tD3W>{E8)ceR4sksn9C;T=+%Z~y0{ewV90_ddQW~`{VkvB@s>HymB zw*}ur>p~s~a$kXZ2GH+J=0EiVviuE&mH|8&Ym9n} zS}%ac*|);Kst~CpP?-Q4Ye)ABiV8>*DIg*dTyM=5Ko6`e5(1GxfqVk!zLk6IX&+hg zd2QbHhd+&Vo6ueKr(5YKbcg-vkHBrvu*;u~+u~1m0=J{Mnv2Rxf4ZH13Od!F7Pw95 zhWXRE^b@)s{&c~c6(@`0w*GX{n-w<^#r6H^ygs4(Q=icNB1>~+@u$}C+fnXPg+w%u zKTU98a$;vFlRthn6%p)DKlAn7k0GnDUii^5W(6{I*N=Z&b=!|-g<#$h!q6`aMf~hX zXB-gBz|c`YcKM(mtz*H=3~do;ogd9+C?`Y9MAv72^b22j3o|rDbdB?)J$xZ3&d?_U z4fLZ`3`OD8fuGpP#W2~ArZHBsl^@>!=10qHn0=ITBUJIDFN2v-8iZ1Q^et|DED5Z% zkXVz)kAAfb#rqK$$|O**AMIqQyf;MfCpqv3Jzm&pHEZ^5;fmiaJ3DyIPGcRI&q@4# zvU3)T-r8Q7$@8}Bdc%hE;rEv}%2?_28 z%#BF(rJs0)DZ%X`toFY2Ewd!JjhGc)*Oz95VCE~qttf(}ed)9Vf)d9#{EOpnLLwwttX5w{hay#*>H;3S?H=a*uq1`+j7v!)v4acj6 zGO*NkK{k5RT4t5>rG`sepLo+no^LafBAo8tv>I>Z z$<0W6;k5Lob^3g}o^Wb-(<0cDpE+fPQ_7nz@obur!i+@Z^`@WDw~K52Ih?z@!<+u$ zcXjxBWB%h!Q?caa91T)^-;1tbf>!dj7k>ZlMLW=O<*&D%_u?3w_M#2w+|KbK0{gt^ z0R$qpL12>?ZO7bA{wUQKBCynpZt3G=mLOBSXg`mSG2oBz;zReT7ac+~{#XjZ$vZ6l z?4Yd|&kmY<(FB>S)Q4nkFPbFNl*%ZS_oB%>LCKL`G>?ep@S>Ftp66r)JJ^f9bnpzv z8wINuedT~?M0(Mi#I0T>+9Gq3b9m0`4AEAZlN?9k2+=m4ljKP+x=KV>5Dgd6&%mBb zG(tqDpfG`GB#Y)tFPb8v-HCQPcskPw>~=(Z96X(Aib6x8y$*;LNG}>EqLD-kMYJH; zd5IQ@Xf_lw6D?*@8C!PZ<7p%MA{f(x3eWH~*<%|+_iQxT1}Ix5RNmL^{kb@{Q1#Jo2+}#VHi3j&>k!8W{7W(t?LE)+Dc>Wd(k%- zS}4#QE3F6yl$W9L0*wTV1@kfVfk3HNn#xc?hPnvU-b%+9D#TDDf$Cc6G(#mBswhxt zIKfaPL&XFtV5P4aif1URKpCxcgCXt$R&Rk+D-E*&s?4EyXrbZueR!=Z7k;GIUF&uNWH0&_#iMQt3KFA29T- zKnGMB%0{f_`$;^4&5~YO^jDY@SNLF-G5DN{`=Lyf)eXM5Xa=lkHwA zJxD*M5**=Mf9Qn&pZzESWC^Xv5m1PQDO7F@gGyqg-)b*C03yyyjgKkQJi0)AH7*|4pE$0 zp_BT3!!`ZB;S}y0ycM6tFI9Zus49x0zS8%IZ{h_TBDIQg#hDXOu2b;X5C?StC|g-wy`yrU@>cF&6<5KM6>O67JGO36>DS( z*S;diVA(2o`V!GZ;uv$3J!8V{a#Xl3HyGuST!pYRG(3jYW0kMWpS$%C^(VWp^249k^KZ)FBxM2BPC=73BQktzFt6s3W` zqTB<<^?fW_&e#(trgM7(dk#$JU{_&ms#AAoXMQigqSSZVrq?lc*@=Dc#?y5rx?yY_ z+v1K*GQe_z3EwBu&M@r zoHoEZ8Te7$z~;XVu&)jLC}&{vaRaQCfz7uKun!Du_A#(|mI2n$z~(##HZKP6OW<9R zBU7&AQ-58|Du`Jh6EOSYi0)$=hat)qroFyi*2P0BrE& zeP@7;ar)tIbC`i2D-EzX13wNMU`-7C_{{)IHSnXnfz3Ffr}J&6fgg1ZY`$oKy>DRi zQv+;_fz4SAY+hl2r5M;8ZD8{<@OUhBK@Jm?$gGOe9%aU+xnXym*aXu&iA`|&;cjyd13$htz{(i-ant~7Vc^G2 z18jhSAC(Pkwi?*H*8od0u=%n9*2}==mj>7b1DmrO*!-0N*5AO7I0KtEfVTzsN67iJ zFGNv#qRiMA8YI{YCwAQp8xP-@*UAmc20uKpFAXrf*e4zDhym6NzIp1qYJl-wk0fYmne4r^mV%6NRobZi# z?${~=46jT}$2(?#wS;e;`mP&bxnYAR@0tO&$myHAeUS!!>@~pZ8~E|70oKF7kLL#1 zI0HYL8rYoKz~<8iSZ4#99~xl83~a`;OX=db*Z?bOVDlaWY^;GFwGC|E1|FJ`$sgz3 z|N4UGiQ%_a>9DB=SOk3YlwE6p;eD3rc;6dfZQz@yzMBSEegi*#H^7z|_!ev6$3X+E zk%1pq4X{22eqiY-ogb47u$BfkXEU(*tO3@|z~(0g*eC;=Ga1;t+yIL)u=#)!%kQ+Q z68g|YryuUvPGCKp_CQ1#0^R_~cw%1Qcw#dQu)OfYlefVD!>fYR@qRGC+QTSMeSa8W zytk0SU~LU-&ShZpMFWie_l&`F18kgu z%~=g>{=xu@H?X-SY*~%O^EPiJJR_;xc480QutBhec_ZC0U)bV_%{IUaK&K~fvjJ8W z_IdJ78en$V;K}>e02|@-!`^H!Y4E(rkfb}x)qqKp|ycX{1+g1ZV zY8u#l)&P6Yz~+Apun`6}XEd;RsR7p0z~&MLHZK5gxmzE)i>1`QusI6J2Y4>jCm~s3 zpC>le0E>Xlp1idNSOwVZ$vbL*K{OrjfC2UiZ1vO^VBp7m1FVRFAKMMEng)KHHNf68 z@FU8=W**C)Hm^1CqmqHmCk?Q61~&g?fPHLWv%i7O^9-;q1~wNku$kA@YQRRl4=qBu zA$YttpkuwUe+?(xyxyu~UhvHmn_+4F*_c z`0L60&H&?cvM2Ab0X9TU&vO{~vB&@`Vc^GZ1FWur9~TX<_YM3gWnlAT18k##A5{%( zK5c+?GO+o+0XD?IX8aY4ba7s2fc?+F<{}0*&jGIllHP~*g3}bKJ?z%8yMc=GJ}`eI zzHf*{x&nKjIzn=Oo8DQC9gD3BT0XEy| zhr7*%4E)$?fK@l}<0k{Gvw*f+-j zD`;Tz76WXkfge>2Y+eIi7U25v;Op-&*v#`$G_UZzI(C&1*zAcdHo!_k$dk9n0ILU^ zJ$dI0uu!z^$vb0!O@Wb~ya)q7)*4_H4E*@s0Bd95#~lOgLjymm8QAP?VDmu(tciim zzZqbu1~yv^?3-eMg&Wws#sC{+;71t)n^%DM95|YiKC}pVCzN}*VSN34fV9&M zd>P8ejtqY{sL)>EbfO0LyD& z^9BR#69Ydg7})$3czh1j`w(A$-{Z0y#@AmT#TQRp_$mHM0G{ax!hFwxXF4+Asf*+EG=m*2l$Q8rqqWjTX{)qT+AAHDj!GvbS?P@L>fVC}=f|mUCB7~B zQu#{xT3My6R@NwMm37K`+@)_+HYuBxEy`BR7`7`rl%2{hWjAINdzF34e&qn3y*i{E z#)B3Jol;TP>7F~E;~1@sQN}9ca4ndCwWUeQWMvATotTC-@)^oZWtK8qnWM~A<|*@) z1P%SXrVhRhB8smCqF(@gAT5pX~m>`As-J1|O%-|2N$RdKq%+M%R~^f1?~x zzEzGY$CTsBcgpw534FuwgYu(tN;$2ZQGQZ>R?aHtl=FB3@uKpJ@~d)5xvX4Kt}54X z<+Wf1215h!H~tJpTexIvA7{MznIYrncw&`ON}Lj}Bq*hoGD=yc9G>{AfId-4sjO7N z^DI^Iokn%#|7!0@-5!rzxAp&~A3weF(5n~!|Ix4TBT0rIbNUTc2nan^KBm7Z*OeQ} z@5)W(mU3J9L%E~;sr;qG@4oUtd8j;6{!tz)Pn3U^r^++sx$;7Jso>0_s(8hX z6=$vg{@iu%7dp+^Ke{U2l>aI3E8UeIN>8Pi(p%}Hq$sIMU!|YYUm2hbR6bA!;i=h= zl#i89luwnx$`EC!GE5n+jKEsoC_KmVW_i6i$#EvktL9Vls|D18Y9Y0-T0||X7E_C> zCDf8?q#C70t1)VPU5zI$9m0 zj#bC0)3>O6J6xU(NewVV1s^?kLw+C%NB_ELMRebf{+Rqd;>LB$) z^&|CT^%M0|b+9@_9jXpf-zIO)rXc0b`i7b6$9(K~Y`;k-`(VySt)Kn3PAx9xbgG!6 zzuC?dQ&w^x^M0e>=Dc?G+5h(2Y$REyDr5G)b@CG~@BZO2@ZY-3dCa!Gx$nHYUCd;C z{K?YMFL==xVZoP?!ewSHzb;b4f!_pH@Jq%6WY%oKB&i_qcwiexhXqIAo{e8Ik9Sn4 zF~Da7KP&uDJQJzmWr5!X_L1OWf-*l5c*(EAf_o!{Lk~Imb%2itb{r`ea4#obW1~Iz zCa_CG!-CmDp26cU11rtJzXSa6C=KUd5VQVdZWnmW*KB7F+ z)$oD97XiC0cn;jDYWQH_%Bry7ib&zm@GfM*9* z7AY9NgUIT{lY!p^=ChvhSgZtS{K3F00&DNYvpezmz-I&7iWCGrIh^=e;CF!eZ(uwY zYYCd3jLCk%iNM+;1%jW;i8la#1lV+>aQrqS9CvPPXLH~_8`1xfg7FPQgcBbKyg9Ie zf=6PFLgUW{{xz`UPCSnj-wIsW1l>r%(4W_dp9S6+SYMRMgW20&7%6lC;cIb}0pEFPGliZ`bAgRv0pI%Pw*HM_x9FoaSNy$??BsCuoli!`wW!4pEDO7{j?gEaPAK5D?2PTkSh^vZf6j0IoPbJFK=Mf0IqT7 z90(~p^X|#fngFuYzS!VMl#1aaflX>(Yyd=-!ak0Hb|hdcq7Eb_HH9H9OVtXp?UK^1 zXJ1rxO6e3G)zO)E!cKAzY`Q0RNRf=UseQX^g}z!z+e)E(azCvk8KeekOvxbU%q2@s zg|2SA>7JaTWqqAF>;v2d{-d?7lJ(QFEDZ0; zI;VE)q2-))O6}GYS=av9r1GW<`W<#nz>-1h{DDBk4nRVwPgsh*Ib?MppJz#w#>y)aMFkHJjk0EmB=+qA?BQkbmCxu55?`av29hCThOU7G1 z;8N(Tm9(uC_<##TGCAPF9H^l<;8H6YA7-Z-EyrO-xl@m>-OxsNY_X&!M2}ld#CVzmmb}E_LRKCK*@Qqqm0^q z$h5PdeiGE^+;+E~Y!+|PJF)HD3mfeOi3&MCav2V%FjL>@#U(j@obx%7SvW;9M>6LL zQWx&$Byf)7jjeW$L_Iv}jUo;MvhKXWk?GJKIS!5Fa-_6!GUGCmIP1X$aa?xTbL@0a z?&-vlv5UyqM=tf{ZHMF(Y{h)Ruu1NR4fk!X=ph;U3zzz7S@&cv@cB(L;dScKxerT4 zrtmgOw$ZYUk!=TTJ0hVyCSb!I2pM-EE@=hXcI?goMj;;qY*KjBD+QYlsn~SqO%K^i z?u*@2ZNuBn19{7%lwsb=80BoBKp3H1!&?q|hg9SpI%10xnq>WC+p!ZMoKvyINmcff zdE1k>7{R>ljtwp|yoIJ7$h-A)79{6WowLk&j}$JX@RfKOcGnQjI`zb+XD;{ z8TTZV+KFMlsI|jJb-+Hig1KHt zY^1sq?{rV;jEv7GobgeT9H$7b?Kp@9u<6vDHywLQnYS`Vki#2ni}&rGn%p_HQ>PBy zyN4@XAsw!CEUwh4S*>d0nsuwTt6DEnsobPqVx6i=V&&@XnpUdQq^hg7QR7Mt8!H%k zsY+)YeI+rjU1EH@#DsQz774jX z2u48=EXqkJC!w5#az~_Oq^?MP zkW!HbAPq!v%0Mo%T_TH$2*e`WB|#E`kb@YcAOsnR=-$EDNF~iZm>LNeBjI8sT#ST^ zk#I2*E=Iz|NVu5h9!!O#%RQJ1S)^p7u1I~5QjrEA4McKtk4=T0Y3{MDa1pXdU6J}A zr6M5|PPbsFfxZC+a*B;B9MRp_@ILgvdcgiSqes`83-dw!N^iD zvJ{Lg1tUwr$Wkz}6pSo|h9rV)AQEJ?2(p1lkVQ&H>Wb6{DHUk|(m7Cq&OHU5o80A;$UPPjEsYkaWFCtM#jO&I2aiRBO$99 zh#aygI}Jn*S)^p7u1I~5QjrEA4Mfr{Y?lZ*r-kj3AOjIdKmdHu!3No7AdHNMk#I;e z5P3X|jE9l&FftxS#>2>X7#R;E<6)#kkPSqFtQJ8w5DBtK$w*z1`XHqu4L}-*Br$}A zxGE?TLs*E5g3=KpkbnUApo0yv7C|-;DFH@G1ld5O1Q?kBBNJd`0*p+6kqIy|0Y)ak zNJQUtDS(lXbzKTzBxI4gBK1K^MH+xK5GfI&Y$2{&N)p7_LR_|#ju3$a1i%L!Y;03| zD+30lCvXZ-#+bN9!YxIh1oLXi`8qmd!uKgN51 zjNAVBk5SbhqoY6mqwn@dAMB6+=pp@ar2FGPjzfRMvp@dB+y1b+e}dMuiET!#%)ae2 zG>s5yik-<6JF_Wv7E|mnQ*8V}+BA>ogM4%?%%Vy@2Anzc5;|v z=k#EwsWIHiWr`jCn$?C1@pOd#iNK2f+deLKZd1DRn9`ls6g!_OOYkzdw>jw+lLDsL z1x>LFnPL|<#V%rsUDOo2m??H~k7ttJJa$T&Vn>={N10;Bm}1A8VwW<-j+0+9zPZ!7 z#$~)Ic7iE(X;bVnrr2dYV)ABx%bQ|XFvYHDie1?hyNW4xqA7OOcVN5vNi|dK>ZaH= zOtEX4V%IXou5F54#}vChzTtiMPXl}l{_dY7JjMOC&)_sdLwt|^wvUV5NS?lUGvCE- zjPKmv_HnVB$P+Ct{+rMC%}lYIn_{;##ct)nc6ng5*TxjPttobUQ|u1!z;=!Kj;7e1 z-ZRDShG#q6KX2}_@0()x@L;>QlYYOKDR%F7 zV7rcdA5-iUQ|wez?7pVh{Yd7v%o2xEIpx zf9UZmf;Y22!ZSqg`cc)7@nq4ve?GzUMsNGLV)Cge_F$YL-aHmvEvtCSA1?-c$B&Bp zJ2UKIcuMMBKPrA{V1_*c&#t+9>As)7&5@?qqfD_!n_`c_lVI+)y6>mAd#ow;I8*HL zrq~nY{om;=G)5-MJ3Nf*(xa11u_wO++ckEkm|{;g#hzx0J>3+0hAH+;JQw)xpIN5Z zv)_U3iuW8-?761c^Gvbln_@38#r_P>TfXh%ipfG#>_w*7i%qeYm|`zA#a?ELz1$T0 zb5raUrr0Y@vA;0I{?ZitD^u*RO|e&*Vz0(?!*Bbz`u7@B?6s!Y>rAoNn__P;#olO& zy~z}Nvnlo#Q|zs#*xO98x0_<`FvZ?!ioMGedpCZ$|F(~-fA2BH-fN1z&lG#VDfR(V z?1QG*hfJ{#n__=sihaZs`&(1&qo&x$OtFufVt;3f{XKri@wShvf1fbLK52^mgDLip zrr4)Uu}_;~pE1S$3BQkd+s75}pG~pPnqr?b#XfI}eZds_qAB(-rr5vYcSmpgxZ-`u z6#KF%_7zj?tESl3OtF76#lCKeeMA0w(VHXgiudoP*f&kFZ<%7>HpTwK6#I@T_MfKM zf8lp>Z~M67{kJLhT~q9Prr7sQu^*UXKQzUDWQzTd!hdn=%^z31ADd!7!EYGf_HpU{ z*A)AyDfTl{?C1Ek?At!W(g-i`7sTH7aj{?G7rJlzxY)0h;%_74O*LsBmEX7d=8uc5 zswLk{MXif%Q6o*Ttt!8P_st)dZkrnY+RE^o<;WDP~%@Kzqt^ont3)gNaZ(xzw0MhE%UBa@EmGc6YLPR+*{eMb5#aY z?2M+^p{CfGOtCYYVrMbM4l~8hYKooB6g#^qb`DeQoTk{hOtB+W{#$MD`pIL8omZ{! zu8P6SZ;D;O6uXcqcHwtmyT)Y^Q|zK@MH4Y8rdBe+E^dllLaqE(-7YUnnqo((RZQrP zHpPxH#f~+_E@g@xrzXDD%OPoucvI|zcVN4YPH9u@GN#yN)v9Se{MU}ll5(cl^rO|h$(Vkeqn<6Vqt_Wjq6%Wk~4@xMrExU0wF9gk_~f9<$* z*D%GdX^LIT6uY)5b{$hOsrwFWS4`@eV%ImtZlKnDZMJSTI7F^vnWWY-!ER`Z-N=+B zjn&!)mc9M646uXNl_Isw-UDdh=8x-GrkY~+HO1~{iv9mlcIE+A&Hw+u=iKu;_ujd8_L*ki znyIFmYHEr$3L!)xgb+d~LMWnX-#6|1p7u?vN=XrwlBg){Dea3EMZf3!eWyA3{>*23 z`seX}zs}>l?|VP*d*@!gN7?Ti4gYr&an)&MTgq-n*&k5$hm_skX!NgE{_p4fN0j}s z@i0+#2g=T;?2eS(iE3qM%I;!3@~@N9zdEQOW z@z{S2$iHIF=!~H3k(51(vPT>0U-Wt0|%AQHtvnYG^P1w%p%%SYL zls%8K=Tr6q<0)cvKBMe~l)Z?u7vF^KjLs6uUP{@^DEo8DUT!>1jLr(mUP;+sQ1+^u zu$|FaP1#>k_8Q9min7-lO^Y2Jr)dd%dClCHnvUpR*%>OWEI1_6Ew{ zcoVkM->ZT7ld^xI zPSn3r_AaAYu_N#F;BLzP&3K+Ddynx#v9>b@_fqyg%HB`e2Ppd>WgnvK!<79yWgnsJ zqm=yzWgnyLshhB!m3Er4&rtSR%05Tg=PCP7%DzC^7b*J^WnZT3zbN|( zWnVR3BzDceDf=JFzDC(Dlc~2s*(PPXDchoKp=_J7J(TUGY#(KZP`0141265ZI zQMO~3qwMmOonyXML~Wh0Q%D#>2liSTVoyHfDa{8ps zO_c4}bt(G}%D&TVT|{l2uv3$}D7zkI-%Z)~Q1-plYN&6%--Gs8A zr0l0C`)SH)Z5#_3|Q+7+rZe_mzKS~W=a(eI$%6^lwTT}L1l>Ihk zzeCw=DEnQ?evh)>zlo7|#?TA)(rR;9z2SnN3DZ7XHAyIZu%I;;hFScxFbb3>EAIk1a+5ITHzxff- z$^n!;kg`9a>_L=0*!-Ai|OO?j~%fl^ZB~BV}(g zdlj~(_{8^S@8YFGW3$<(FkO6Ni`lnW$?20HD0?eq|47-}DElYM{+Y73Q}zzCU$On| z2s_RGPNs;IGdjOe_OFz^i?Vl9_HUHEhqCul_CCtqPuT}3`ygc>qU^(z{X1nJq3olS z{Rd?qqwM39eS)%2QuZmzK26zYDElmBpQG&al>H}VU!d%ZlzoY^FPj63*tMOovw#0Z z*;gq0DrNso+5b@XHOh9mnI1GK+oWtaWm}XjlxcT)CUlwFUq z@22c~DEnT@u20$bQTF|m{QzY*pzH@J`ytA1NZE}j`(es{gt8x{?8cP+7-c_B*-y9! zIg7i9l(XiWQ1+9Q{S;+CP1#K;`x(l9ma?Ct>}HhxJY~N?*)LLdbIN{+vR|g`SKNb( znAJ|$ncpoa`&G(*jj~^-?3R?>in8CJ>^CX9HD$j=*>6+!JCxmqvfrib_bB^)%5F>9 z?I`;L%KnhD+f(*O?jg?NE+XZu`Hv~P17+t^c1OzYMA@Awy9;G^rR;8$-JP;~PQ1(*y_+op)5tdQ*=ajvis`m=YUP;+sQ1&XyUQO9wQuZ3k{))2K zx+gf(pxD$p%KnM!HVzF&=gzqSO17&Zd>`j#YJ!NmE>@Af217&Ze>>nw6 zn|qQou*If+qU@h3dpl+CpzNJgpZr4Ezf$%t%HB=czftxc%HB)a`zU)qWgnpIgYL=2 z&T2yD$$x#tm6ntl$~tNb#5&-l|q%BO4(`FykcM9 z5z;9;!|iC*_2(@T3Bp}j!@28o`IVWmmM8IJXv?szlkj*3x22bcD*3UBz1F+*)iZkFu*;pBG!A^Y`Lvlzj_j z-%8olDZ2(`*QD%PlwF&$Z=>wnt>wg6)}idWlzj(f-$~hbQFcAbzMHb|q3nApyS}x8 z7|Z)8`+mxPfU+A<_JfrD5M?){>_(LRFl9ettt@sd9pO>RZft$w+*)kvG0J}2T2*X` zj_?F!H?vkdw-%dv-ukjw#}QumKepSs^dG4gtu_A=u;$DrLV$*{@S}OUiCV*>6zxo7URGF)J$JjAd)eev7i-rtEhpyA5T(YppA)D+=RI zpS(xe?^AYL%5F#5A5iv(l--`PKeE0q?AM|aPJe$)+4Q_{#J7xEv>|T`J`zCCsPx?@HU&`)B+5IVd0A&xfzAb9t3gb?nd}4i97%eK1 zUwC2AO_c4}gKwg2rzS(J4TY~$RKl@`QuZ)wV^Kx^M|?PCkD%<4ls(GY^dI#rDm2>q zzNkxuai<5zSepx@MI{`2ELHY6s_gNUJ%OspL~Bb?jr))IB+8zA6SmWXQz(0?^}~PE zuc**8Yim)L{v$q}vS-|c?X+?xWzVvH{Ezw-6`Fk$wo{Wils%WS=TY{2Yg62~Np2BER3CI44vVW#(vYoPbP&L_U?JcTtg>k2qzfktCl)a0xcT@Inl)cB=SJ?KV z5>B7&rR;r_y`QoVQ1(H}K4k4Lsw)cPPM;j6?B6N-2xT9o>^~^`n027A?L{S=J~>X= zCn)`y5r1^OXH3Rg(+W;i4K>7!fbVwkTUD+oo&}WqT>xN7*4aVLL0$PuT&=4yEic$_}UO2+EG6>?q2P zrtBCwR_xkwgjmWhLD_MX9WTe7Y88>nFN`P1iNbVI3CB*9lSN5|amP-QQ-#r@5{{iL zr;CybXcoBs!2`Cu0_?P zwp=Z$afNZGmA6s$?UY@Ivg=az9h7~i{9V}gq7qJ@+(p^-DEn^8zK62!rR@5YeII4t zFaH$P&HoX9K(75q^xq+8bQ)0hgOvRcWjCbkMwI=qxQu`I!2h3f`s5MHew4BsQ}$z& z{kRxJE1#h3CY1doWj{sPPg8bNF^laJXD&P=?qY3co;*w0&rxHiIzfRdLDZ3SAzaci!CvS>}uv=61Ta^7aWxqqUvJGXw zOWE&H_WM*T+lrSMop$0Q><=jWL&|PX*&k7@{Ft&kPwM%cY6yANgerR;uGEBjOS0LmUn*`H9Y93@k!*Rw9UzA4l2aDSHBCPo(Tgls%cUr%?7( z%KntHr&0EFi6lmT24&Br>{*mOo3iIn_FT%IN7?fwisHfHFQn{65>3>5F=a2I z?4^{wjIuwc?B$fbg0fdi4AI|TQ1&XyUQO9wN-RnZzN z%KnbBH&FIQ%HAX;hwxvJX@C@05LnvX4^sAC!HJvX4{t z3Ccc6*{3M`v?LPqrR;N*eV(%ar0ffneUY*+QTAoZ{)@7&ND?vfS1J2%%Kk@^ ziJDxa%68dID-D~eiAmXRswS45LiCAHwoTa{JC&%3mnz#w*&$R-{FEJ_Y7%Ov5q%Oy z+2NEOLD`X%9YxvElpRCav6Nkcvg0T_-cBb*K7q0mDLaX>lPNoevQsHLjk41zJA<-I zQg$X~XHj-3$}UaWWhgtFvddC-Im#|i**TP5fwC*w8N`~eMA^BNUD+;4)T9bkb{=I{ zrD{@*vTvbka;u$5^htHfu0h!~?JS}uwWzXdQ}%6CO>U>`I#f;S+NFp-xr4Irr0lyW zyB=lVP1*NQ_Pvx{pR(_x?E5ME0lPFY@(t`Vg#938KSbFLDZ3G6KTP$>Bb5CpRg=bc zHqqaY*<}g)ams#zvYSx$la&1w)hAC=c2lY*&)DUN{(hFSpQG$%l>Izqzd+e9+U1Ep zX-?TMQTEG}{R(BbpzK#E`!&jb-OeHUyCr3}qU<*)`%TJjP1$cz_S=;Gj$MK1?>3bE zE@i(*+3!PyB}rur|bcgJgguF}CsQ?C zLbP(4eJf#4r|cP2O=eQ|EUG56?dn7;=TP=s%AQBr^C^1)Wq(H53n_aMWiO`eC6v9C zvX@cz=ajvivR6>{O3MC%vR6^|YRdkSve!`dSCqY$ve!}e*OdJYWv{2~Zz=mb%HBZP z8!3AeWq(iEn<;w>W&c3gTPgcT%HBrVKT-D2l)as@cTo0D%KnA2f2HhQl)am>f1~U@ zl)aa-_fhtK%058Z2PyjyWgn*O-zobDWgn&NKPdYcWgn;P6O?_DvQJU=Y05rB*=H&H z9A%%U>_6=q#QU8Klzox1FH!bo%KnS8uTb_?%Kn?O|Do(_lxN7*5i?f29q)_j1nLn%9qvcoAmg0dqiJBqTSDLaO;V=21?Wyeu=JY^?P zb|PgbQFbzAr%-k(Wv5YgI%Q{gY7uL`BxPq(b{1uqqU_R?U52u=DZ4CXm!s_Rl$}G_ z6)3wRWmlr?T*|IY*;ObzkFu*$b~Vbrg|ct;)Gl_-JO6*S>Ym#OyN2g>!mdf#wLEo* zvTIZJZJxSB*|&S{AnZDxI|;ikW#8esizxd}%D#)T>rwXIlzoq<9?{BsDZ4&p-$&W^ zQ}zRt-GH(mr0j<%yCG#aqU?t$`w_~1l(HLB_G6yAi5dGiWj{gLO(^?G%6`go57Ek} zDZ43UKSSBiQucF{-HfuIr|cIf`$fub?zxv3%a7gR*;i9wf@{MU~x~vineWU&`)B z+5J5a5q&a%vIlw^5@mnlX++qAJP#B0V9Flid8F8~o&Qh#P|u@;J&Y=QI92ut${tD8 zWR$1zzgqdf|3%_4ls%TR$5HkK%AQEslPG)g|FNA<$^I)fm9jsj>}ixegR*B*_AJVt zeG|5`F6U78T*{tD+4CuT0cC$i*$XLqk>|00t@;0&r%rz_rtBq@y_B+-QTFGQy_~XF zQ1(j7{(`bsQTA%j>qwJqNO^fYsXDolF z>|Z_46kE14mb)l>H)a1u*?TB^FJ{x?B6N-2xTAjJV*54 zAD(7}eT=e?Q#Cn3*(a%*obo(RwDL4%pP}rtlzon}&r|lFo)?Hdx!`$`urE^fC8{Qu zDf=&~CRaSoiB?{v?7u1dAIiQ)*)A{BO2hjS(I+NlyD8hEY@uwMvOV6Hi9YdCwvVzy zDBDlj0m=^bzC!d#7-ff3b_8WdQg)QL1<}fA@2i9zL)o!ZO-fL9995He?`uRW6TGhz zb|PgbQ8h`X>=dddsos{ww(|PB43{g7veUh-iY>ciVc{8+U6QghDLad@OHp=d$}U6M z**9T3@9N4@b~(x}PuV$?U4gPIQg$WE&ZX?ilwHO9MzL$BaOt@6D7z|USEKA(yl)m; zZ)XnPO4-#Zy9Q;~r0iOhUEAB5=#$$h`*zB%L)mpH`wq&!)B9Gjed5gTyS#4~Ydf>5 z9%bK6+4oTPy_8*_vhVZ0L-fi0l>Go@H=yhXDf=OB8={pBDZ3G6KTO$=Q1+vg-Prps z(I<~l_T!ZO1Z6j&>?bMvDerqkpFB<3O)2{s%6^uzpYy&?w6YmxKTp{&Q1*+I-JG&t z^0p=VHjj%GW8oC1tmw>^G=ZzUlpd7@gMM4+;A%%6^-&-=XX_ zR4d=5?Dr`9eadc2wX&VJJux~TQ1*wE-JY^P@_tloy`7Wk$KH<#yMwnwv9@!L&G+UL zc1OzYMA@Awy9?FIu9V%4vb$4u52}?ty&Z|s=|$PSDZ3A4_oeK9l--}Q2T=Av%Kn71 z2T}H5Zzp2phfwxV${t49!zp_NWsjunQItKJvd2*NSjryf?M#gPc*>qY*%Q58h?-2I z%AQQwQ>dCurR-0snoRR{CHiDKWzV4OnUp@U6DiJGjT%KnP7*HSfEN7-Lf zHTlNdgXoj>l>IGbe@EFHD0?GiZ=&q)DSI2qvVW%R z?UcQP>XV(6{R>r-U%kDF{@z8|yS;sgvVWuOJ(RtdviDK;e#$=J?Mw8@LCQWv*@r3n zcgj9O*+(h+56V8~?ML+YamqeH*(WLc6lI^L>@$>oma@-z`xE_rp0fY+4j{_DK-m{5 z`x0eertH5c`wC@WrR=}G1Bw3r$NLFkU!!c7kEw}4*`|-Fx7#;}Xr)EjLfJNDdnnsW z**?k+p=`fzFwx%u$_}OMFvGr|cTO(L@i{r0iOh zU7NCRqwL!$yAEa7^^GBV@DATt!oHKT@1kl_kFxKkYI2Wn9MQ^qDZ4&p-$&W^Q}zRt z-GH(m^o=Ka@FB`>NZE}j`(es{gt8x{?Cak?Q{8xcmp{S;lXLtL>i_C?8hH!61PbWd z?T@g5bX@m8u>^t9`{Fq zjggNAlaY@B)4^D<8R8|tEaczgM>%O= zJ>=8D9>`~a^^q?LHUcxjCSVrW3@in<084|d!7^Z5FdNJV%Yr??a$rP7e?)n30Lsk) zhl3Ts@nA)88dwSJ13wp>hkRvlDOd#@jCdY68mtOV2CIRyz+1qD;H}`1Tm2E$!P8(3 z@Df-PT#IsQfh*wE2G=8hn~EcUyUK%gz>SF4RYd*{a2t3hxC^`s+zZwNkAQcBr@?!` zOW?hr>o_U%myC>bHRte>R>~#F4zdH4?YYw0v`dJfRBRB zz{X$;@G-D8_&C@Wd;-h|n}9vQC&2;WQ{Zs$X>dH)6r2V=1I`1V1($-)fvdr0;Ck?R za0~bXxC49<+zU1bkAN?Mr@@!OOW-S@YmYyo1>gl=1tY-MzmRn_%7HQd=G33z7OVuZNVO3J8%H_0XQ7|5F8J- z2d9A_f%CwR!KGjaa5b0@t_M4UTfk1>4zM%07wiHa0lR{y!EWFsusi75>yPLGc)^}v z1lUW(EBGUNt2o#Pj7PjLm<;v<1Bmwr(~%zlW`P629PkscDmVzN4h{zEffPTncUjmw{P`e-3)F|11Xs;0iDY@s(gz@C&du zxC*QXt_JJE`%>k>HDCk8zXIzczZPta{5r5H_%+xJ{06kJzpe)x!TT0$j`(+AOK=0& z2HXg?2RDIT!0*96;AU_zxCLB``+rc^xB4Ttf?JUP5gd){+rY`-PvAzxe+GA;z1zWA zi0=Rwf;+(#;4k1>@K(zkvtAJ>YS0FL)l@2VMpDgVuh3!~rk>9t2~+ zLtrv^7|a5H2Xnw9V0tBg#8I#+@_&G}!DH$=;>W?d;0drkcoJ*`o&uYIr@?068L$O- z7HkWi16zaV!7kvR-~jLfSP$i21RH>tz{cQZuqpT#*bKY^wg9h!t--&+w%|Wtd+-|A z1#}r056}PygC@8Uuj2+sBX5C|K>=rhHn1*jAfErA7t94Cz{+4eSOrW6^T2GdDwqpa1FM6#fOWxJ z!TMlzun|}TYz)=}n}D^zreJNb8F(Am0=yk;4b}nMf_1@s@D8vCcqcdjybBx-)&s|b zcZ0JkkZ_02_lZf=$8ZU~}*#uqF61*amzBY!9{oyMV8PeZbei!QkuQXs{(X z8Egg40^a~zRKfEfT!?&Ya0U1lxE6dH+z7q{ZUftZyTEtBgW!AMaqxZcJlGbz3bq5M zq1+EZ>oA`GU;u0n#(*Dz$>7Ie7T5vI0rSCmxUVBv75PqJZLl*~59|Uq0K0-sz;0kO zushfS>;bk0dxCAjUSK}h8|(r00SAD6!Qo&(a6H%_oCXd6=Ya#krQj#vYH$#^9vlpA z0f&G)z@gw?a2R+591flaM}U{Wk)Z2$JpVy2I2w!q$AIzRSTG$N2WEle!5q-UZQjh_ z`;&OkMm_?J0<%H2&kqOHe4GbP0ITA@iC}GT5?BwM3~nrg^8&a9oC@v$KLvMz)4;vp zbnqZJ18nd+_GN6yjlgSQ6L2Qt^;YBjpss_n!7S{zbHJt;ueo3&oZshxZE$@)c&Q}L z4`2g4CqDx%)N>)&4CO5XTY!tf*5DGbEw~iS2bY0aDED))D!3d>M!74%t0;FRSUnHt z2e3Z43Jl=-YOn|H{}Sv2t^o&tUx9-`besMUSyvX$3;AFU*b%G`b^@D#>bx0I3J70B!>tfIGkk!Cl}(;9jsHco1v^9swT)kAshZr@=?T z^I&7}68IQ+6?`0Y{Th5fYJp9V^@2}=0q`j>0(=^b0h@yH;4@${_$-(XJ_lxj&A@E% zc`yfj0n7zo1gnD0!Rp{kU~TYaurBxtSPyIg#+MJiA58{dLp~jR9n1n-g4tjzFb8}C z%mv>BtAee;>fl>oZSZZdF8B^u4{QV02j2x7fbW5g!1uw%U|V$^^=PNAgCD5t;D_ou z*dA;GegrlJKL(qD9l+*bKG*_O`$J2x6WAK;47LHgfNjC9V0*9|m=AUbyMR5wKBz}e zZ~)i~91Qjbhl72<(O_S2JlGGM4E6`7fdjx<;6QL5_zAcW90V=}2ZJlXA>e9oD7Y3J z2CfH(gB!sS;1+NsxD6Zy?f^%FyTCEvUT`dU5F7^{0mp;K!3p4La3XjfoCID1CxchP zDWD7c|5U&NKLx$uG_VKyVLI3coB<91XM%&lS>SMRHaHrb1C9shf|J2{;52YPI15|= z&I3OK7lI4HrQjlP1-KYo4K4xKf=j{m;4*L{_&K-*Tn=snSAaXfmEbP$3ve&E3Oop| z29JPWg2%x%;A!wH@I1H{yacWTuYzBLE_^=u4Pb%mK`;0%7y!QmBft${47d@D2RDJq z;P+rUxEag>w}9E;4`2?s70d;H1gnDE!0O;nU~TYcur9bAtOxD@>w`PN2H-DXBk)(S zF}MqC0`3Nzg1>>yz&&7da4*;b+y}M<_k*p$17I8QAlMc>1hxkcgZbd^U>EQR*aJKY z_5uF@hvWHr44e!e2d9B2zyY{^5*!Sk0*8aA!O`Fua6EVxoD7}=r-A3eS>T`GJn#az z5WEO31uuasz{}uj@Go#Jcm-S!UIjOTe}h}Vf52_vHE;*$GBBK=0qzA&@F3_0kEnQ} zZMxn@4#;yEwwNxj%W{tvnd1*L4V4}sat3KrrMrmiM%vw8EO%RxRUu)fN5xx;j55Q` zfJ!$L*=I+XnA(NKJt#869~ms}4v`Z`>*A`3>_pnFZYwMDMM$J+d5}&KsT*aAN=Av? z6J^@U@Q6$dLW?zRW?7bdnkCDTj!$yxJp{LTqfMhPF88$L52SO{Z68_ETVl-G?^=e9 zj#Hu6EIAd34OZw`OD-ZE`k~7RSf(osmmjd)I6K{K$>o5{G*qO9j^tT#1rbw4%IHXj zC4VF0R*_g83Af}LB0lvpZcE07mN3KA)r)Rb<{7t)vRr2PQMctjq$7LWas?5b-7NQy zZn=P}q?FNBvD9s=-R@X%eeRaoR-7pdaN8UmndX+uh@`9AM!Th3Xsns7(gWPm73rLD zx(GdPAGzgtNPKYI-g3(+q%EbsUXe z9&V{?%2Ab8)3vH8cP0hI(m6CWIHE5aaz2n69MLBX>4dmmfDagQ z4spEz>lo4#alHWZ3|Z_+4K|~UAt&KPs%9h^atY-}M;n$q+>q~)j!|*9A;0_5%-9rc zY@y~A-HuZ(`OceeUWq!a+V7IzkcodHII`-V%`RE($tY;QOV%Q-+q}djQ<2tfnCX)F zfs9}q#<^r6(z@P*T(UDH!wl<ThaQRz2ba@;OyW~uaxE?MMH zGE1rS6Y!19;EwTtOO_&DLB;F1WPq7zR#hXK=aR?L%xWr}?UIWq<~Eg1cF9SkZ&&FE zmn;b+n{`wg^XWTJGH%rS_Uz!keM@ktOH?)NaNSA+x1aN455X-4Zoi1#K#it_+yC$( zb}UaO`m;;}yF!GiHz?VW=eEVQHBZ)iOPR)Z*q6T6k=1#!3lS5$V!?beX6MNsb@!A! z%bJiUD?+mLZeJ*yvReb#)On^JE9Edesf#dc!r!l&;R*}&tHoBnL=wPvd8;k9yiv6py4DeMlW6{RYlatwqK%`kpDUmwhuPnQLk!EQXxt6P5=yI-142?9)sK^N&`8`*D zgi}sMe$|nma%CP~s=SJPt0Q0K$}&XMBg0&hE2jfh%*rY~GgtmZx{6AV%a#1lDrTNa z56YD;NLN+q?zwUt&Mh5sE%U=%>5jOu66-CvDH|aiJ?o+}+gtLn`vEGAb5A+6?Zm^W8Wz){bK zu&b4%2jc2L5I9{){zRE-lMNiKBpZ;9LfY6-NiHHCtT0J(5p_OEnajV{Fj9!(bqj{^Tp2Wt-mDIXxTS<d4ELl2*Ou#((WUAN7sS4lqeSJ!Pbs#lW5NUO)7kyA+?tr1+3 z8I|OauZC`>5nD-Sgw`;_)rrX-LtNuOfYKcAqp(3D3g#@Lll@@Q6?hoo>I{YOsFW6k@lzIc@$yh zuU02!t4~E4hm+L!)kdd^mi2K(xrAKAdld~=Y;eF|uPA@1Q=jU#=PJroq}2)6c(kJY zgR~lO<6hi`bUf1Gwcuk9Pea;QsiKTTI=xIqDqg(|UA@>A3Gqr+@ zvhN9YW>f{afVf^gb_H39w0aPkf90q-cs@r4+xP0$7)Nwue~x@2_Xb<@vyN=Zk&}q% zdAe3dzQ~cQhy+wQ3v^^wj;ss0*9=vW@j5axM+S!8tB>VoKOO0jBU{XS^<*^L>&SaK zvIpfv;u%&j!{o&r+2E~j8c*j~@&qm;trnv^kR!*u_vytbb#i1o(iwQM=saCeg&g_P zeZQH7h>@WqNjdV9`(C{R#)RvLFGs#}KVas<3H)7N?SL1{%PyQlE33#c9XV89x>*g( zDk`#5M}90XJK^N1$TvFjWqJ7 zxm86v=t#Trk`Jf4inP*^SIWy@aB8T?(>n54c{zeeO%=IMNA4;w7ZItYBDd;D7N3>~>rPWB*D=Uh1}aH^c@jicq{BqEW{oQnRr zoD7u*^_+_SuAB_FAJlUy`ipXM4skW7q8F5t&yiMhDt2l)HK)dxlY#a_dQL?T){%bY zq`y3*=Tvkj9cf=qjv=DvRCH?{d99pWMnui2*k^U5NjX^)(ooN-*akXsPdVuq+ECA_ z*qS<0wVZ4+8|pa~o2?@y%gHWjc;lQZ5nfK#dC@FSIje-boa{te&8ZR>%gQ0|!+K7Y zI967EKssGjXir(yjGbj=qr0J=Q_-7rOtdbYx&z>0&*q=Tux*9qCY3w!=|#D()Q}X;oIPAfo0}-19o}bXhrzh?-Mz z4Rz$cveFUds5upPn~vOCR!+fDb1JU9j+81Z{~)5~R9u3NM3Wy_z4s5upPKu30G%TYwsoQnHFM>b^3UPRQKid&^4%d+JFB5F>>&DN1m zvt>6Tb;e~|aier(NVXhDBnGEY>{F_9jZWFBbK7Uj7A((tR>SmQUcTw;p!MY#9tcRi#U0 z%TV}fVcC`)k}ZSaM?G8k;4?0kQ4hW|Wn@6XgU>jmBYVn7SM}iAUgr9?6xdis=3Ia9 ztu7<;uRr(}mXW#Y!8fan6_{2=79bt%Gt{H!#>Xb!$9#7EZoCm$bp39;5m~72MqL9q z7B4nx>Fpfd6#sM?w_jY3m67$HC-g%r;XWO?tBeftG|`JI@m3wFjOrlbRd zj4TQ~Y5EXJh}MzNGIAv3DZP{u4IQ~sTBZb^(jRRkohmJ-JWrb$s7CUE(sIPpRDaBo zw7sNt8-uSXQgGmuc=ub>7-9f%NE3Ij4N#=k18#j z5LfR|jsB%&4UR#vD&4uX^uaNxgqjNPmsV5ZExbAoJ#i}Xl8!uATDGgh)*(G>Tm|Rs z`%3Hc_?@LKPvIMcT^^jXb4$xH&oib`;AI58#2dYR%JY_%{qWSs7M`o6R7YJXB_na* zwF>V1y_9~1y`^q=g%e)f`F$z5>V3wP0&i8&TY95+R(VrP$vJpxo_j{>R~TGMy11X! zk19_O9qCj`&X~`dbw0q|@0F5WA?5VE^}JR}PT<^_x+qc4fhsN|9)~WR?d(RSdX4{YY)$ok%1TW>DTU)C8Gn)^^wlb&yq2Lm-L4wcAG326?j=6 z@9Y*?f`8#kpXcqSS#sIaT%UREMp^Qg=cV8ws9u&_^1Q6i^LF(txdLB(XkzDN3H~K0 zedx8*(H!{dJ*gd&C0F6AbH44(lJVXb^*P_Znkf^#&GlozKAkC(yf5j;fPFAiCU{@g z)7IXRDU;!=X=`uHl&SDbsPyVg`4oQK(oD-doqJvq%Kd$l&vQENTr8l$~F_7 zq|!Yzgi`+E-44$tG74y ziITD(zFMvJzLIhXzHatUCFOVcy4l~Blq2xtl(Vv=9EPtq>-i<6kLQ)(W<8~(^z*a` zZq_48N`KF*X41ftmfg3c^!2=E`Z7(U1U~*%MSM_FruttujjGs>jbP}Nk}}PY4-B6x zX~YzS8kdw${Vh$S0Sc*K(j9HM?kp)&{H=mrdP_;!gfiSWl#y9dHmfp{oH8Ox%J-@a zFUqiV8UJL+MwFr6quFOOWFN|q8_L*|AqP|$+cOI5u`xsTt1{MRVEi*wJwDHny(mMy zNwMc-$Wr5V({n=^qcY@k17!?y%IJ|H%M6r}k22cpGTzIOB}S{@?0+poCY!IDz8lJT zB15L4$Bl+g8TB$`iit96qm1ghj4By2$!ryzGNm)5E6VWSP)2lybXR5goibb*(oL0d zDc!Qqr>nM{OqVVwLr<^$>GF&5h8cQ88Jp8(m+_`)e3M?-A1l)3SEIFQEJPXebQv?! zWvB60a4L>Xm;ERsd}w;%*!E7BgQ|?qP8n^}<$x-qHOgqI%Xm3m_MwcBXrpkNJen>W zL;gKY>ZZ&0A!t%{r>F|)vMB^jDutplbWus^vLU2knuMpz7L;-0G`X54TU8n7(+Zn( zG);a`W$a6{?A>XqTehdkW|X0)$p&2gKhtDsn*6Ay$vmfwDJWJ=lQAe`gf3%Hnryi~ zO}eMaA(Y|ikXG0&ZPMg-RYpsvjOWwjuqxw8l<}A@qhXpHL>Zx-olT~8n#}aS9o%Gc z(`2^)o#2YdN|U+%Ho;9MK22u%-wjqjkS4#tS08!Wt~A*NU(ek0sq!0q^#QhhBvpQe zuik3gyHe#Oe7z}cNtM&^^`^8oRnEfKo6^!$IR#(Onpvr`+;}^Dn_vch;bxGA+yl~2vLgPT(8RGDtR6Wo-Vr^*boO>k3ck}A{8cY~W!gH-7O zUvElvQ>7Pty(v{qmEQ36rj(s3J>lz3DLGYk8}9`-rHEA7W4s^Sl&n*vDd zWSIkBKNn6U%RKn{xv(!;=EK*|g`bjTE`0r5_%>M~)<+tSIh12aQEZWEJbCuW9z+=eB~cFYVt zv(03=3SZCc3rX@1d_A-ONRn&t_00Y)N&befXZF@4$#)lY>(@!r$z9N`pC?IYcR{z# zNs^B4f^MCdBuB5$?4e0=?E1{^l_bZn&+Lzr-*Ca}5bd5?bNtCka z8I_!sDCN;HDmfuha?md-IW$qq6?9AQMCpkQLeI7iiP9Szgr05hBuXD_5PG(~k|@2f zLFn1`bfWZyuV-7sMClJ-&$hb~WdM9V+ip#ie(?2dE1xJG;p?}IX^GMqzMgH-iP8nW zo^75)=>%WTwkrwJ6~3NrrxK()d_CI^BuEeVX}>1m{F@-%;72Ao`|jEV>4QbAXWP;Q z>4!zEXWOg<>5oOMXWRG$>5E0I9~*-cWWe=B-y=ajxxVPzC&-}di@tS&47|SRnOBVL}y2&m+yczFf`ppt9i;$PLHBfwm$|-z?s+#}4tm}TK9*jMm&2a- zgO8ZfBg#gs*4arZ|}eU(dQP z<75VWJ?j?5$#U#jde%*kla<%^xG{0^#q~XIV4SSDzQ=WqlU3LExOQ>!<@G(TRh+C* zd)!NLmi>I3tcD-`tKQ@EOnV3+>x1A-yA!ur9|mXIEhx@vAMCzzaq<-${ZvnlleKX4 zQ#}er!_hO!j+3vgkAhwHR|)yX`Z(BSCrijWt3z-W?JptU!qKzn=MwTA96gJ^Dm3E2rpFW?0w9h^1z z+pGO%2^s6pH%voiUML~s{T&U{RGG(0$OL~U!*r`m{Sq?H-`OxNm8pg5`@0yXs7yuF z+27SLZI#JDrTyIu)1xvmsI|Yl5u81q5^@Z)Q+LIcSUG{&sTbF&SUHK=sTbFQSUHZ_ zsTbGwSUCk>FRl%-at6NcmQ}HG7QXJ5&tl~?eEmfIG*-T{3ufD>SXpNm%(niq^0i$s z+d9X}TDxGjy&o$Z;Op*sJytfs*WLA8tb7k&&$dTnWg~n&+wP5(Rq*v}s}(C>!q+hSx?==;H$w?0NjhqevQycIDrGPGTAkDC`G+peE2C&$Ro*Uy&2V`TgFvt^$c z`RV%EGCxL+Tt8d3iIG39pDkO&$g%5Z%ce1M6uzEqjbdb>{XuZH)r*lu_J_gQRy{^O zv)c!|FDFKpz|q~89wSTP=tV(`F zm)sy)*7^HkQr}Q=^=SFp-@l;b^19?w(X!S*03|1*_%bEIIUtC{^bTQL@B61SPMDvg|LSRGpVb$y)DF z!wjgBXGY2As^lpH+@F-cKN)FF-dZSB}tkflc7-faNuS|ur(rHXk%7xQqG?68L$X7mlk+!iIj*dtKEswgH`7gH`uw%Q{NGe#AY5+%FsQHEJU z6%(P035k+DDimi#6^_k?NZG4m=7~tlJ{qaoc_31LQ=!Bg-fV4-lrhpKxFEiYl+p5T za6v4Il%s$h`8RDIz&iM#Px!ECqjB4 zt{2=Z5i$YswC5r$`{@W7k9blY=WtO!LPnu^`iWROLWZDu`s|e(A!Bfx9)_$48HU^R zFvLg5CHQ(60ugc+z8(fwgsehb55xIzS%$bCh9lv!U)lzTVOO~9lXk&j*b**h5ZA-7 zHe604uDg3_xU9Y2-Lt}Fjp}YS`NoIKY8h>qkt#DNT)vbs1=F%yxXiL~C01Q&7cR4H zT+v!ekNRx~qN)lUa!CuKG4i zW+JY;YGs%##M4mE*!l4BG}JS8N|>C$8BTZAh%h;cGn}5WeZ%A*oYnM3r?{= zhRQyi0d((u6Dr%0*1fkpRHlZ$9qhfiq4EztJkY&2DOB>!_kz7QEL0{S9sgtSJ1%O_ zyM)SC8C!7Fc|TNsmT}mA)RoszxQxdJi%jS~O)l8zlS5^-y2q!A4iA+z>YfmlF+=4mb&p?V{tU>M>K?s? z9|_1>bx)|e@@qi8R`-Od%=ZELM%@#>E@0VT24tPOCt+zoe|1L*vjZ{#-v$Yu;Km1J zyg<;+8WfPZHm>LsT(^MCv2jJ8;MxUbzPh4Ma4iEePhHU`xaR^gR9(?0xJLpqKwZ(# zue$@%SzXc1sveL|>WXew`G8zgHPFpU4ah&L2D(|10qLmj(ao{~lCSR3&ARB9o$8)& z)r3F%vP0dYm+)@Cd?Ql|mhcw8e2uSam{Cgm$}iulE72;m*e~nV75&Va?w8H#ieB8K z{qnuKQbK9{{qlpl5~nhq{IW$|iC39-{W4wEEkR{k_+^@^TcXN5?U$MAN|MSv>Po0zhIl?T%nX%r`DK{O zlvJ5>Au?QLGF9eqhzwPkES1?ABA5Kglv0_EA@Y~XlvbHlA#z1!$}A4C>;)lmS!L4R z^65WZL9oV#$W?1vaGb4yA+p(?j$tYn{GuNO|3@M6jb}zs@V5?;u_|M~5@PvZ43T`5 z3E;`?e>_BeYpzjVle7{*VwE<3mPGD*xX;8LKjCGfO$`lS)XdIhAtICqvaOYB%$5_el!UYD)Px_@tw{ zptiG=FMLuNX*Hu#7Wibix}Y8j{wY35MOsZL{|KLSQWw}y2V^mvT^Qk|EHuuSbklC0w>ZT`rk`}2qJAVV8bjCOQ)CBO?^~osToZtlT z=lNu`%BTsDQragIRI}1je3n1KC*xEm^6p@_sTUES`m6Fauk>}#MenK5S+5Em_ey^i zQioap9v#~0m3}IuR<3`eS1zf-)XMd*@=7l^UQ>7S0G%{dF7&dF|~aCk9eh92r{~}@AgU$l~K#r zU)?K5m8N^UyjQxYD{3kFQ@zqPWL|J7`Xjy4U1ijQORI+C^U;ypJXXpUkMvTRsQYzCs-G&PtnjGs050}O?1JD%=bzz`Wg(xTyHumb z=*z=AvLs|7I!%Ro=};Gs{N=+fqC)TM&|4n4szU0t?{BU{&wAvF3VBrMVI6wFBY&%q z`lG)8b{(qjkr+hO_UF&>$Wry{esxEhzMSBZ#p>1dN($(Z?U8?ci_n+Kx@@Z^oU`Sc z3Pq}N59`ogTS_d(&ZDGlI`X|OM-7CMRp?6{T5d~z2wpu!h34qcG+R2UP^t=z(xD-? zbX1|VKDOoWp+lW)>8wK0_^Yz|X0odN8@BZDFDY2g&)YIwWz^%t-`JKe{-p(z@LpSn zs*IY1DK%{ws5CVRQ!3apNM+O{Oi8z;kJ8jiPl>jrpUSAmhu^lPyVBHB_g@wnpfbAu zj*E0wnx21qM0%@?o_||KdMZuLKmR(BPgF+FzojBwm8R$4Op(4SqvzjPkzW2~1@mv9 z$Y7OG2N*woE!52)6Kg@&nulU@`n&Rg=2 zx}chx^1CIUsRgB4m-34xOH@W}Un!d`Nyhf2?p|X_K6WDYxnas;OOhh>=`Cd@evyOk z1*=A;jI(4SzE%-8%(7AjSu!5!^xC>9YJ0Kr@hcv2eSiz0m{a&&v}Lu@p;z#Gq7a1Q zpT+m`pVFbmmYh(b-2TDYqSjtr%N^==)x=lREqv=kMJnn@wk7)zQQz-KN!F1PmQ3|M zqrcDT5780JlFe|`x7GZY+^U?jZuuS&^?gqNVIA4)mhFhBZ|eHD>B#qPnSyfEx7GY> zbYz8Fw!l$ex=ER*BQxCc10rgDri|5*;cnTBi24@2zmJY|bIWgtsPDM>KhTjjZrOy0 z`rf3!g^s-7mY)$(U)l9Pt|N`yvIi0Tklgau(~;ZVat7aIiBgd&I+BB*86gs*BI!Dk z=$3Pcs4v?1!*s;!mh*_jBa(8}RBgXt%0`r&pdx?h$U*$f1`+k0^^_etvelFgh$MYy zS}9-a$ZAuzAyT=lGdpLS^7!WkvvYzeeM69m7>+vzn>g(vQ@y715X#r3d|*ls{LL`< z5Nf4Eui*DS_!D99A@r0EHO5bxELeJ1y;q0sFeP7EdRMKgLlsTwrpnU0YK9KsCq$i; zrFT{QJP)n)n$lHSdRM(-sL-E=bX1mp5*^VY{95QA^@@5Y{Yi)LYoTivEcMxje~k{U zFr>RGD_n)<>d|a%|$-aq@ z*YzrX8{}V&y|S5M`J3v{uL&OR zj7&pDdyr9wyO0uwe4#SxaA)`oS)wxPaA#a~Nhg(2hdbkxOUA2=I^2ce*FtMlCR<(k z$t6FAAX8Rl*1P1Y%9K;ua+k~vAX8plnd6daDpOHinc$KIDpLuW)WI(K*8B?V7nwBt zT4<6gy0W_G1D7oGtToIk$OKxsWVvUZVdklOnz^KR2=1wdO!}iP>87+>kO{lTB|Vi^ z9htNmF8Ntirv@@cj!R|)pw&bskcM9(1)$xAOnMZ4{S<;Lw<8lKF4-5d4u7x3fBN@A zoA4`8ThE(}(jN6AkaUmC^L!nA&Spe=+;(_Gs7L;^%jg4MhT)Oj_{J9gCkl=pD!ER_SLygNTZUQRV1B3zv-E|j`oefy zhNug%m@6d**{Vu?ZJBGW$F>;!ZIIgaGuqj*7~87*E!)ayg$E|m2{&AR%9h#c@*}wX zkS!ySPQ2lAEn6m8xSWT}m26pxGicHcmy>OotS-mka)d3P<8m^7U52N%dM20r8$V06 zzQvk6FILIZB1=^!Cfyin6GWOQD#mam`M_P!SN5rV4-)pOLK*`7Oe6_v{9&Iw}i&{N* z3O<;rW~SCevA2i}RF{>TfuGuLG&Tg!3>nEH7mSU;GebtW$d|?@taLo^GE9*UNoJ(D z)kg*D$(nK6k}1ab!Q(*&erh{iW$>GQHLcWJ$&AhTfosTS91PU3;o*5MGOs_u8 z$Y^QFZ0|k#!=Q|2mUKZ{eHfI1hvzt?(;8S-Mtw^zc{pEX33?HAA(1g3_Lu~ zhHMSKN6kortTMW8Vb=d6?rq?#nEpTTGjs3UwsvVFL}9llJ#4kxGonz0kQ5)6O}s-~aXhPtCkP=Y8Ju zxt}vL_dMO1S?9|}AB)(msPj`A5|-L% z5k zf*0#LJcCOKUaTu^=Lt&iVpZDCEtKHJ!p)YCQGyriT-&*m61-Ta+Rnq2;Kk~P=XWW= zi`C6`?xzGVR%_e2mlC{K2ieX;l;Fie<9QDyn$}yt1M?;^b1@=&+^MAs{^yr@XQ`6)$H?lb9pwJOPk?Q+wW=o zVxNf5Io@(kMFrm$-IvDrjfN`V&Rp9L+6X(+a&C@p!09>Iincz$a^^>lukZV$SR)5L4@w{RVR$YJ~hgE09>u zoChdD&0jViwXL%@LDm9^bpDt$TB@C~uMzHzcs$qo#%c0Gu8?NiZoGI28=-T4b zvzT)l?BkOn`I$$8kZ!QQ;f<%o(s?o49>F4q#aIjG>}rA?SIQ+;;=z zu#T9#pmwGUQLQ+2Fy=sr>Pl2&PDRq3Ux?~P)bHpZ-~rl3&aXuECF)x~(#8bBw&*sT zH588?5~kw4-N^ZwIBR92XuB1SoNp*`Eba3I-{;{*&JLpb5w(z0w>EON6LoyR&9p|& zPVVM1xT$I6d`pS`@kY^RrF@@)M$V5!;RQ|f(KN|()R~RwNr%Dc+eEjc7U|z8depI; z>ea}3hNxy6gAYzqZ@a@`yll-IIrGxKNBu%nBTk_YbZz4wu+N@YwBv4cWbn*9|Co+j zX@uYGYjikbplGnykxw8r!vw-^Qjz11d;*~vCJ8OFQu9WjAG)YO>sBPfcfV~tB;0^u)EX*f)oP3$Q&6W9@bpjVrmZr4T6 zMwn;BoRwV3bO$DBF)ksud>984PQH_UDl6tsvrYitrUg&;lVyo*LHJt+9YalUX! z@RJkm_`TzNju})p6cR`w@qHk`wJ*&8cz(LKOXM+Bu(77dp!)U|8Ug8x>Lp?o^dzNSP*sA(;hu2<9ePklV@BVb@oz5Ps*s`8F8Lb zg8mg{^rDROdB!) z&q(7LHac{aaWrN8M(+t}_H%>^xIN-*rHo@JV*}6lglC`+^gCr7OBt{8jF))Ea}noz z%IHTKkMN8|Jmc<&^EG80M;SNqjO%&E%!sp@GLEN=Nj#&9XT&kJKpFiDBhj4EJmY+x zaZbefkunYlR0sVdR2>|RE)}m1awE~BayZpK;yg#xfoJjdl zIIr=qZW!J0dS)`!jVPKR?Uuu-_UM}tMH8gm z4&~H=wlkk78tHe7fpQo_Z)`sVkxA6A=oL8ZN6Wd5sAfcM;uQKow-VJBRPKA6dJ9x^ zx7(e_eSuTUEN4DZ9caggICZb(+-YIQPDIV;)D7qXQ3KnVsHvQ~0$r?hP&pWKb}P52 zCtk`k3M}W(#vSY~xI(&}%lV<4Kh1JJrjaEL(z{^-;qNBDxr6j>m_T?b9SQ1-bi)L~ zm-g@OP`n!^5H?bRhSl9Lfl!Nqk$o()qTMioa2+L%yvn^&Ac$^5?p;GE5+RO8d|RLLMd%zM=_){v<=U<_qNT2?Wd`L{WJ6AKs;9#p*q~ zIar3SC2bGp+e7%a1>YXZw}(dN9^QupW-}2&}E@2V;98-?nF+>Q6;#G}bX#FTqOhPNVmSVU)s} zVC{{y5nh}%9P50n?_hlo>+e|q#o81v(`rX6yS08SOVsy*JqT+8>oly_V!a;gY^-yz z-hlO1taVuL#X5(5j_r>6?>YWZ{{zSG>vwS6UjHMUVSey8btgo9ch(_*MNM95>eQ;rM0!pBy*T|HZLs{e{*Sh^JTmNNXeT zTddz<-HLS^)*V=PVf_Uwy*(9uTC5Qqbq7bCt_AAgr2!iwCyh1X=Yh^g>hdY|hd~a* zO88_`#y%Gur9G}UoOx-Fdq$!A0UUiUX>6=nSaIV<(!M9);=m0P|KVO0{z|Y9?N7Q@ zf-4Phn40|0zH3urp?%*`aK9_K9EHDZ1^2CD-+vU`Qwr`0!O^wtmF+FT#RJ?5!IcL% zdcPv=6XfXK%BkF;IN+(=D+=yT_fxJy<3m= z^_Dsql=jxC(9rvwy)+?l*&;rVC+;l;_b+%)5%XFFH%t-pIt6#Jq8w$4avZOS8N2?cZ1ld*)bc*5*!Mg| zd4E%I9Ta7IT*2ik%J!6k%TbiKry^$DKJ=e*Y*cVF6fw6}oVO8*m_Jo;=PP3VLctX) z%2BB(#~?+_i=f+IXsN#c2JzDS^qZw{?NGMnDcrl@s4oXOZ!2hD8|64Gg_{MA`g=iL z6Og_{koO$vwSy;%Dc?q+ZuQ#dL^P73!e(%n)xdgpuh6z*b_qgM*I7hK;It{cj6 zYzj9@q;D0NkAs+p&!Do-4{(_4<=nggw?uGr1KdNxzE|K)=!Z=f{vHT$9mKvt?tQ^M z9MCz4nPLfY*9-2kfNno=T?M%t1xN2gCLa_F9qE=!ax$i1oH{z>VtQOwgt*@F8zq8vf)S_SutBEI?9$=i>RKI1q=EMF)% z{P6ytedjB15o8ui%LOP~f za6NlkP^=E=N5sB&3mgyK)neZu*IAS|$bG2b(nQ&Ux*HT6{Q(e)E2x{N;BbS$e{{Di zIGUSD^+)ecPvurB%2uK%M^v24V0?WP<#=Af9jhqEOA4-!q8$AdF*jGlyj{WFs3=Dl zMLEVOV*Ww_nH9LMU*$l zeXQVCiGARLUk_*F8mGTIM7x@lyrwD&RZi|9zATZ&}@xJj}f>F z1GRfa%t7un5nqtorr=tN_=3876XsptKdFX zlMHXMC!dEe3PUW6fa9b2yy29V3&`lGckpY~G<5UIi zBG@#3Y>YJ@<$qqm{h;8Q;WJYA%~Nn^E4c9r?j{BIq=I`_!3|Z!cejH3RKfkD;MyoY z<0J)FsNkk6xce2{OA78hMJx{}xQz-9e|q9S=cAK?J59k|qTsGmaE~asRSNE6MSM#X z+?lw}>6)*`N;n3-CP8-{Y#PJRG62`))!-Ju_P!slqLO|^!OaxhR=y`=S=e_rxH|&- zevN(KP;k2x+#!f3b>BV;?mPuIQNi7&;O60T7pBDbu7dkR!5t2Nsrw$M;4V^dS1P!> z6x?$P?n4E)KoQ@E3T`sHicLXHG=p7(G5K|DHoJk%W%DradMnQH9qbME7Wz!@viH~r z>_hf3`ycz1eTKUUzGPpqui3Y(6n7ZJ@oQ=&tHO0Nfeo{UTjyFMtqZJC*2UHstH3I< zN~}`r5-V<%TlC9lU>PxD=!g+Clh0V)Bab}NB#?!>6Od)G``IG)0DF)<#1>=z^%3?c zMrDt&$JrBXDSMJV#hzx%*fZ=|_8eP|`Og<{ufq!V5`JlYg{@?-ve%M4dWk*t_4T;a z`7)*6LegSKu@0CI@5DM|2D}UFiaGCG%v1Mc$FbvCe>Q-fz)oZ*v4Lz5JDHur2D4My zY3y`1gq^|8WM{FVEDtl{Jy=i7f%j&8FzbCZJBFQ&I|0sN!`TRSE<2BnWaqOB*oA0; zE@Bts{){m!A3e@OR>X=~iHPQ3g!OOz)kx9ZB^|9!*1`C7panY=H!f#c*;Z?-jn&p_ zXSK)u%ZFGktV6BCtd>?Q>u~ERs{@_^YJw*So8z8=1M#dr^pLN_ zHE=bqf~mL;rsGPOiEH6nTn)9j9d<97aSO!|!A zq4hF2_kaDIRk#x0#I?8@SK~Xl9@pTCd>_~3T3nSM;ksOhEAtavo9l6Peva#N1Fq1G zxJEbOD*Xo6>AseoxE3%3`2Xy9Z)e}5F5Q9O_>u_oZboO8W!Y8)&xbiy%xYw%C4YZX3-vDw1I7RGJDKV=kaoY5VRc4e2QvA0 zJggo`*~d?M^@35o;_t08+%g2eTl8I`-{tTZ1snd(piaRpLF`DD#j;sz{La=E_eiy; z9jDNDHq|F)k+l5XiB{t-%s}45>e#*PKJu5k?4kN9gZ|J7%fxSuP1%0*>*C(Xf6jH% zjw42}E$lnCm2FF+QxltT&7@ly)_&Ih4PRfh<0OXKg_bn_TPFTGCcign3+MK|$wF3o zxOex4r+vFg$q^Skx+N(YJV|56Om1Zj;IN0b>+9=rR?fDDS?8dw8iCg8JhWHmqs6)q zZPrC-wML`e%16sph_n6*+JG=oVhZzotL8Z z9EbL^0xjrxw4n*KqBVS^!6W9IDQFS?-@g8Xu~V!^=$sqxIPaveR?+?|%-L(jA)gdpnlsL0bAOoJDtsknMmf zJzc;}l;7JGAT-P@*4fIjx>#MUZg^lg&+2aVuzFg(tlm~1tFLvmb&Pea)z3Q4I^ODU z4X{qIPP9(423muxldV&%!PcqPX?jtrTK9cLC$#_9xc;yE?<>3{{{M$^<=|Z9;ym@l zIqHk^(+}rn0M5%moRh)2=R@?K>Gur%#TBpLj=%N2t7YbTtbO?%9#Ka3I}cvgBs1-o zmYIXGW})9{fhB$u^p)U>IFG-zDEMvQmx9~L`TbBOxcZ&o*{>}#2Vl)Y8%ZquSbOMm z!7b)|b4+i#Jo_GhI~m*x!N&xj20n9l%gmuzvrq>#a(ULT0P%oZ%K7$aja)yO(60yA z;y2Q_#ykq=Sqt#}z{Rn)KzW-8J_r0la4WE8qD4y=e)@r53vMUp4@cYU`WXs7^Y@mS zxmYvd2Y-|g|135M{1|Z6SR13fc$SxVYZmzjw*+e%N{gG`xqdeIW#CqEzCGG0SHA%K z4sd%pe-K)ESHBQ^^FLZ%dono5cCn^nfJiH-KLVt`4gO|M-)x#ItSS7lB(W_(KH08~hS*O=pwd!WhN%vlskQ z@XNUVa9lwyAN>J;`x@MO&bLN=>+%`k$Lv9Q!Dr!kw#1oCL!6Uve(S*JV9mnVs+HiI zL%#_e`;(4;Q(O_Qz7_aPaJiiCh7!Ac4*3UHjkPiS94YwW;1`2ijx`N_vIJiWel@uD zobQf)p6h28_>8|=W;Vx4k+&B7eDE#6Wnqn=N7%;YSsnO+;D!jkt>Bk{uLM_(mA+f; zgr8;LCxM&H`8K$M+|OMHz7E_X&bP%l)#Z1CUkz>z=euF_>hgQR?*+&HCjZ?qrgiz~ zj`Yl2aFtlIFwX6SD-M4w+sXjH8r(v`cNTne@M(Kd4`3yIj^JB?uLD;t_%4EP3w{$g z_79zxP8dPE{s)530hf!_!sxo2;4jIKR~2S8W<}#l3Udjn2_}pihwZp(cUuWNUQ~z| zN+kHUu!L;d12C?nltVR#!fH2*Lwq775_}skn#2<>#B1D?3-O6=$_0EXxvj3G&q!3| z_rL=)1(g+vvBhK+CE{@xt6Z$AsIDRc0b+{)NrcciR1+W!%Hdl$hYjb%7Zu`77}x__ zXvpT<%BteR;+n3NrCrAt#BmQCPmfO}M7k)2Z9IW34%Rr1)h^~26^+jCT7V$R z%3T9YLQwgWTm$=)q&!|x=;GxZ6Y(NSRu*1b!Hu%`1YwjXCKOZFghXixVL^HESVA}| z;}|dEm|yN9<#VDSQ5+WvfmJRRl$9r3tFU;ytEkGaa`!2WyGEi)808f%F-aipEF_ij z{A%x$Tq99aFo82DxN8)QyWEwGD|d~OvUuDL+l49)JS3t~ZcZRFQOGL70>tH}UF1;+ zJm8IIQ3j#SW$VI(9xmeg-{78T2- zu#?{0*QE)jge>sOlF|?OdbuA@8wZ)A%5{XiLUB<&G99KEE zsHSVY*iQu$zKsBC=*!)hAp&7lmK9#gsYEF*J<6*pu5=B!cMahl7oGQEHC<)Gw<(;_ zRjxDoBw_Q<@_0xlczi)(JWe>dq`-tpYGHb%st$Z?+)toXu1!001?}p(qfZf590$}Z zmB&?i9N%N2t3(+1a=;moZwm_X%^F)w+j81oQbuNeoSPhwd%0`GJ)_(+ipPtrvDKbN z81bS?xX7<5;}92ER8&?&M0}zEN)!~2Cn+vIO2n&aJGQvSO>iimK#2<4LN?JAT+Ri> z91swt5Dw3)cJnxt#4E;j?^@KDl@w2^xN z95#1nVU*K>B!<`L98sSGOW@U23Zs0SFsP=7z^&g@d#m{VnQ>-gDdf3%jZ@} zp^Fr4LQn$f;BXCi4T=r!XrHcr>%;- z@@5)Hy$4E{6yiV;CIr%9aG8?v?jCe%NP=>6EW{B_5FuK0Sf#j(P+`I(rz)o&IfIRYkfXh%z4~sXL+!Fl&;@p0Kn=9N_R9Cu|=n)_jkx)MX7Cur~ zZjS(z+arKcQc_kzAC-5J8m&w@_(_oC=wh5P1Trs_=IcCRG(ahZ@3l<{7-Fg3l?bLTTs&MVE!B#CY^w zc%EmW7lSk|OOB#91BRc{8p?A|DL2I_XMdb#QLk?7NS zABlEw`$$B=;RGHhip34LkAy6@j|8Ix4!DH+O0-V}2e)3DNQ_%Aag*0e+;ZzBm&5^g zIfOtvpu2@FYALjqJdKW*OQ78(3!mXKg-8@3Hj19MMTt^u<8f>-#NcD5%3?C|D_tzY zX@P63)nkiM)cnFy0BUZKw5g^I+!I-tTSD7BzU|JpJ+Q?%3|nZh#qb5bc{5!Kjt`|^ zfT32JDBeiJpvD=%n_iA+dWl8@3ve4)7;Xbg25-Y)@Ky-OTOA;8bqJ}|5j;0gd%H*@ zC(3{e*W#^?yDQol%HpjKXQ=rRV%HL_4spEIAr)_RV4z%RbztxogL2Sf5TJbkxa|WB z-Yx*qk`z@?4qZs2$8(F0mTT3-g@Mc}nd_M~abZy_K+dW;@ah{$UVQ_3^-XDBIm6(U zGf>pYutc2<%d3-#q(BnnxTw-M-Fg_DTMxr<>tUCndKi|dhdIZ`W5l`ju$wE~y7e$9 zQ4hl^o>Wa^G*~Dz)x(tQ*2u*1F&YfFMiwew_riATUKr$rhHEhC^5TZ5hG`c*T!X=D zT_CRnfuimsNj3jmIwv(`i3$#uSHTfU2Zd+5$A_D4rG{L$QiI`EYA!<+7%Wk#agJAN z#JQE4n=9P9l^Q5fslnn$g*)L#gYluv^7Z$}TOxjQ8dnOut&xDo=EIz|CY5YnTsqi~~bd_t1CZB&Y zjTF7zxIu?_g0N_$NSSm%JdQ?+#NmoWiy^G>_zK8mBA7&h0$KSKvAY}3kV#zj!9+g! z@IQcilqugrTjj-&@XQ1SH?D@ZIBsP)>X_)_Z4I_)V94g%qWn_Wn6#vAHEp}*$IHf+ zXLWDVIF3{iZE&oxMMoFgYQCk3E%IGWJSKy{^KB8vueeHSa|y2Es^a`hu{(8Sy2f22 zK?a7)e9i(EA1ednGz^Bz%6xoza2azu;oCS+#qKF}^O7bd-HX52pOZucMc6+<8#Gt6 zMH|W?&xP(T^dJ~V=fgd5NV&6{{D^vaegZLgrw4h7%Myk8aTsnH$l!&5L9?<*7LVt4 z71@Qk)g1C%=pco8-U z+62y%uj8l2%kvX~^Mt%hXvO>(Iz1KG;L=4vG)<20bW!On4hU%w;aW7H6&B4nh)kMk;Ch-#5M03I^9nq_#LLCZfVZz} zx|0Lo+{pp1#-sodG${b2iEe;59Ea%*$6<=$I4o~CP9(mbaoUH5Zfdfo$-~dw$9AJ3^2O{~v z0R}D`+Et7gT#JSZltn`YAg)Ag-713&US+s^6{S&6&^C{6yYp=iY$xWIVT%Itnidx@ zP7dty3T!BTT*L)6ps5iA<3a&0RU%!WONLVDI{`54z&2jWnHmnrFLQGUs9L9#3lnmr`3cJ8{W6~s zoh3@6)aWY0KN1`~<3D832MGeBJ=%rFf0Th3U5k(Zcn-}vQWhWoxmv+M>{?>{ zM=C!4BNZS2!9Wlg|G_{b!yE6oo1orPJ}NWh@R1?ygZ?Zf-MK`vXfBa5(3b_cV@2Yr zx&!htBTu8=Bcz!_LaOQr`B;(+nlA*3F(p}u3*$$!_?S{;((qB_dZs(31j(ls38|Lk zX{rx7;TafiA}xMJA&X80P83XjGF+1eW3I(JuaczSRcJ4oZ3Ob+DoW^??c_Uny{;VSj=n!!!PMv4Eqg~{> zqg@#8XxC+Ev5d{`(E$^(nz*pU5Q1{uUoPD} zXjJU(AuM-P49Xo9!|>)Q$rSUHF!5>bJSA)z6>>M;Fa#V85Z!XPKW!34!C+tl(>;&A ziL=LT)D<+F34uGVhQY_xKt5$jNX;KlQ=7*LVepYR5xnJtfiD^TV#+l%{mAopa|jEg za%{!4BP=m0hvkjRVfxdKB%l*nLp!NlwAv-E-ZfJJ)4ivA#_LRFuZ9;GWpr!e&`H&pX(Z8+zy#LZYKku zF@ZkS9k;`9$L%oqv?EDq{0^k^$Bh!|41mCZp0;Su0CB7d!qQ2+D}XK9ZQ4%6hI$3q zqTf$D(AMoqAcfHylIZ7C7P=HXi(>&sWJKZzMQqWXKrHBMkf5M(7J9#h*wW-dmS~#W zUS3>Pk>jQsdkGW}t$o`>B{B+edGzR>MMdG5xU9S|i#6^#v7)L7yCHWR0J#9tx}2+b zGhC*|O#slJLHvZ>1?C}Wp5|EC)g3El<+35Co;+yysb>rtJ?PAV?8Fgg4jeLw4LtFb z(dV8pWW*qud(Q9^h7ETM*S0F3`uJId#ia$9r>IF3qu)|6v5b{eRFGB45{dGPN;2^{ z5OS&ze^qfc?^f|!6if1Q*+i@*-E-N%bB3N(Q+;wge=HIcv6l1zV$H|@LL>`mlglp6 zW#cO2xvVmmC30DHE}H-ua`N)vDi5ymg04VO3}qd?2SgpgUZ354r*gA|L+?kyl8Y?zDe*R@t2;y0dB$VCNp#xd-xk1a}4rA|L+? zkyl8Y9xTy=RU-p-UQg`Q6FUL1LV!(Amgvc@JFtGFbK+p#QAXW&l>BACzSTz{fd3_=13jq*oNnchCt8btbdA&#D zO}ZsmClYoCVl6qERl_=(go92#mEnS@ETqIqCt*|@*qxP@VYtiai@-`M*;rg3m6ScS zBEj;j$vWpG{D{e|lk%&oF7t0=rMpzy_D#>60$hJ^M&?xDX5cj7HsEyNPT&mS9^g#i zUf?yrv_mp7uLWiTX8~IQYk_Tn^ekfz@OofR;B4Rk;2hu(AiYarIPgYb5pXWB5_l7E z3UD597Vu`^eBgXw9q<<5Vj#W4Y$@FLHflJK0vHKw2AthFJ@WwI zTp&FQG9TC+xBz$%a3PT1gHs2jciSxjwg4^$9ts=8YHhz@vaGfgONrf%HuNCSWJv zPGD!?9$*eIdT2&w7hoE&D=-7t4VVec1vUrf0b2mO16u)m0JDHSfo*}kfE|FnfjPiF zz+7NoU{B!Dz`np^fc=2S0tW#50S5w)0}cir4;%vQ4;%^{02~fH0XPzPB5)M&B;Xj} zKwuGY5U><@GB6H21y~6j46Fv83Y-Ky4LBKiI&cbb2yh1Q4B#x_nZVh=vw(AfLxJ;w zX9E`ihXEG?&jHo}hXWS@M*tTC&jl_4o(EhC90^9&Kfun&tfMbBOzDUo^2hIi-0OtY=f%Abyzy-i!;6h*tunssDxCmGZTnsD&E&*Nw zTnfAtxC|HvE(eYSt^k$;R{|@5tALfj)xhzO z22KKQ16~H)0lXZz6LZ&T_r)hFCHq5Dbqi8z_$)-*ruzA@cT zD%H~WA$ZatY^n7HEpX?0*iw6|r=WY&!xkC$VT|h5h8L-BX{2st6v^Q%mJ~+AXXgk! zGZ9=GRv&4krn}~YOY2}l*z#Ov^S4?kBms(F$2?8XZiFpT-Ns1W)=1qBPm6>tGLrU2 z>Z6R*9gNf+`J3t+j$Fpw$w=MVNS$M(?!y1dLBny&Z(3J8gA=w$bvOQ|Ua41o8>Bi{ zLoKiGJUmAdTpIS>T|+H>_rP-#VT(MMJ&n}8jMTl2)P49{kQi$OR0UE9A^?spF5Knyjgu12xpr_n?$-vFdZA?~^su()X$Sud%2+N>27TPUG+TmN_a*s!zvLbzzH) zdx(+x3_O_^w#X~?Oby@in4G1dmcGx?h(wlhxRH8xm#jr(| za-@;^d?WP*M(PXkEM?dt;~r(CzQ{;@v61>}_YD9IhAzK5Q}|y-YEaCidMcg^4O`?f znP#M(Zls=Jq@KyL!otS;e7xlvBlWdL>RCqWT0B=9w#c}zGg4n~q@Hc0o|DukP?tc) zeS?wuMkDoHBlS%h?d5F0IP;9uHyf$v8>w$GQr`+S%^P}4(Dpc4%G-?83p9EK()aBe zEud82$^T-97oi&G`}9J*btt%G1rx&dYBme+6`~nYxnxPxx1{=RE+m#s z*dl%3i)Yk>iyce|+v`1Acnea;r2K}}_v4B4;L@;q5uQU2E)A<6;C~Y(sQ<4mOZgz) z9v561_Wcl^lMgNps~2nd_IfiGdl=8!hb=PhNASM8;L>m;kK%oI!KGpK5)I!n?#J+! zhOkA({W#vb7hD>S_TUw+{xFhSksDO@+awVfC~8ulGoO!|Lbo2F0*NwwKF|)Xy8KUocX?i1$8*Ei&#E zM(UT0)Gr&UU%}fdgNyR)2!X{`8mV8!doY8G(zlpfxA5l80YR~Hp1~W{S?qPZUo*I5 z1rx%S-?UYD7iVy3IFdIs)Ux(@Q{$LO^;>u+XmDvblGS*BXmDv*{Wjh;8eEiWA+YdH zOcV9Hc#COp$qFWf?X|`9{`pgq@|9};%)f`XqXw6T<9?r=8Wbx>5#I4;c;9Du+)}+3 z?`RD!%1HdUKg9c7gNsrv1Qy;o8h*UJNG!acbV$&3$hTpgk@|ml2W@c43MPatzdN7c zeYL?wSxWzye2RD51{bAT2rRsNHN2Gan0$t}=7ue@j{V$7{e_WwgOU16yw^8uk#TP{ zQh#Nn-ejcyn*Uwmh9j48e`BQn)=0hCNWBH`P!3yU+}|0gw;HLp8L7A9&Cg-WRA2Bt z-Vzi_@EOKdB{Tu%~0aBeM zP2ZAj@$YzdcyP%ICWI}|&L8~mz)MNPk?dh(q!d+vr4utlnw zRT3r+%-&jfXK-+7_?Xy6>WD@6N6AI$+dn%|t4vDwVaZZDM(UW6x{;AO&7!-$8eU}F zjg8bzjMV8y>J01Bq@a~&M<^_|pA}D1XCX>O-tbSwvW>Qp$7L!lJtq8(zE~ zhs6%F=pM#~7jJ~lSW6AHcP?>ftVQ=lHoSOehp}v{x?w4DrMitp_gFT(NOfBybvq+< zdn5Hx)`W(GMy`yzgOR$Uk-C$Sy0b<1u*yYs4SI1~EXPRQ#Yo-NNZrjyoomq@x(zSh zFE$qbI=P9uyG3{XHoQpRJ&e>nExMDq;YIrHWu)${@tya4GuFqtBJ^$xPi?W|jnw_E z$p*epHd3Ez(LL76!nqZaKYW2akJHS8FC3g>5Psl8C} z_bj}i_KC8#AF83QUEn!7TSF~<53}eF_=Xp+_F?QCBlU12^#~*Nxz?11gGR25`#dA{ zNF(+6M(PVJuZOVj@MW*yLJhS%m!qs{`*tdFawBzxk-E}IJ>I%*U*Yj$L-8%WByFEdhKZlu1#NIlu|`UinPQ%o86l}75TjMP^f zsi#;sq(u4Oq>Ov2k$Reudb*K%hUN9%{u{5y$Zy(A4YmBfT%)0u-?VG3x!wnI%uY(V z3E8&KGE&zXsjstcQhatYC3`T}8>wd-spnYpl0H^-Ok^ZC7^!bGQqQ%#XD*TgN=nFc zc~gqI;kf6esFR$Eg!Fy0k$S$7`W7Sgttsk;OL<$0y5UkTFjC*1qHZ{nJ5tmQM{=i; zdZCf}E+h5bM(TTv)OAMcdyUlh8L96#QZF)6KVYPO&`AA|k$SO_`e7sWBSz{+jnqqw z)Q=gdA2(7zVWeJaq<+#!{gjdVSxb(QRW%QBvZwT%j@lm#FV|7~ANstJ`UUG2f6sjx zGVT|3)PCG6bkzQbzGS3++483feGjRI93{S@p_V7X$|Gw1 z?&xbq>en^YGVWCxYB^{6hLQSBBlTMvY8m%x4R!6E-m!dJLoI#3qoJ0*-_=k{-)l6~ z^31=dp_abi*HFuI`GJvot&Z9|-0VXQwT$~C4YfQbAM2?7q4PQowT$F{8fsZXe4?S2 zzCX2Y_m9uM4BUAnzV++%)S{f9>8ZuXey*n$k$j=27LjbwQ;SHx)KiN{HtMNGBwy*N zMI@W_)FP6v_0%GgZ}ik6l5h3YB9hH|Y7xm6J++ABJ3Y0CWUHQ9M6yj!Eh5>jrxuZX zucsD~{Gg{6k?hb@i%5RdQ;SG`(o>5_e%4cqNOtO}MI^iQ)FP5!^wc7fU-i@?lHGc0 z5y@|QY7xordTJ5LA9`vL$sRqmh~!T_wTR>|J++ABZ#}h$WUroDMDmZGT14`%o?1jw zucsD~Fk7p26_HqaY7vR8rxuY!^wc7fsGeFx;^?VGBr!d;h@_F7Jg<4E;DaE4NlCNs z2xchn7m@15M(T9i@3<#@Mlez9pY9BMVK75!*WT{^LbjiMmr@ZVrSJXiyMq}@TdJGd z_b3%Xveq9aX4-YZ45cmA&Fp)XiXbV~2iW%oGnBSeA86mNR0K(>Zf-9MW+?62YrJde zARV=TOb)gmQ2Gm!()S@cYTtJY`@x_ar7cT&sQr*q5hSJhFne(@LuuFglO`=S)NAiwB@fVZS5zOiXbV~?d+$58A@Bu`?NPwA7!NOV5IJ7Kdp>4NXoc7 z8L2xPsdJ3fT{Nyi`Q7PiFAEk(Y0vQI9=mC%<##^Uen#mpNcyf>p8aevLut#Fq`Uo` zQV}Gj?;iH@V209`ard;JS1N*}RQIx92xcg48A)&ZMWrH0%2M{RR|GSZw)EZCeo3ha zk}~e2?U#cYN?WRrv0q72k#QfN5_gKw_3gKS{q5IPf+S9sdVrDo1S9o{_Nt^$RV8F3CmE>+8mR}_Z>T;z ziIb6>Y@|NLNIlqoGwD-R2^q<$M(Wdy)Ti5TC4G3iq?DVG_n!>0S108+tUkkjJ5)W@ zSDb0TQ;&v5&L zq#V^Tk-kURYYo)r+8>6hVT*(*8I}txCx2`+Oty1xD%%?RBaTPvT@Gqm0xS z8L2O}|CjWss)Rh=qm9&K>`zqgk~n#tlhtang6G{bf>)Dk1wmW%fpuAc>ReOLTnu z`(A2)<=gu*r0=-BXv8zfg=h-Htj9nX6Pqeq~D_ozK zzjjWtxBKkA3_0e$OhYZlX_wpI@5`mn*ZLEVS7@lE@5%NLzK?wwGVUww9Z7FDY@^sk z*2q`cKPIU=DdbXpwf&QUdW!vXl3L-KN#9fLok@{)OiH;4sh(!CArKt03$ z)j&Pd-ff`1#{SJfeXaewfqItxhk?4*-jk$OC1juNI{QzRAc>Re>ou-ed41n#|CQuN zm5_H&%+*ng-wznONkc6onWv$a^~uc|YU%qH`)^gjk~rCCyVbUna#RUG zzMnNxKW8^G@V(qf{k)xK;QNJ?-n!yz$z;3pqK?{ccUEYq<#*>L4Yh1{Ue-`c->(=Q zla+R3gCqE=k@_{eiGlA`M(Q{1bOYaS8L3y>83w-JHd4Q1?`PoqT_g1xdw&Dp?-{Az zx0@RH{=i7R*3LBW{h^WiBfFV_?~irV{*PXt=&1cl=2Ii}diwx_xIZ&ee{LTb>RaB4 z{)LfxgO1vN6U&!cYOj*rsG*ki;8z-I`6iZ48fxkLYrA>q5tO~hZ;aI6+6Ni<-fX1a zVjpba`#U4`R{IbG-`kAT+wB$xzP~q8|6m^)>Ra{-c4(;Ob@HQzT7J`h(oy^M$U z8Ocuju+UPX{^#zyKUk+ufD(~Z;_k#+{Y_cK!OA8BvkyQz^nGjf!H z?`Dw>q3T+{7j}S_+N)3w)KJUsd~*%8?1deqp_aZ6HaaGUL^_5Z6IuJTh;#~7%U0@8 z9kq7=*c_j`T5zXVJsr$ml5h_w3Q z2Bo~%NIlv}J;q3#uW>GAKeixpLQ*MJ_m_BmWQ!F>PE1l~`A16{o|>_u$VuU9|CNx% z8fqDLiH2H6QWhB)8j19MNo26|r?#vsBBzHQL8+c>q`opTB*}MH z(vfx(o^Qskiky+8R*0E*oiKK_k$Otx%uwG`{STa~;ai@aX_2!+eapC~8>we#sAVKG zBSXU@@!u(MP2}uw^^FU}QMpz}J;zhe(ooB|Ya_$LBU#|R-QYSMwO?0VA2~)rMj(QH%`~=;iaed3PbE}d1HY4={BlYc(gS-oDpD1gHJ0coy@9_QHso`6G(-ua~ z4?jD;@4JlDcSkM=^*z=1eUFB38FyXe!cgDx2;OU?zE49fBe_2^Dl`&QRiY=mC~{G# zT7EAd&{6v}+k+aB$haTUP|H5s6OoHUywolYI#gvjg*8&BEJok;u5;jFg3{rSG>QmxQXP`sZ?WYlnpCfi#6yp_RDWcoUaxVyrSH#-)SnxvzkvD( z{~A*pGLj9EaaQoj(xC0_>&+D~_GP3z^a#rD&PF5kSCNWP-?A;mH_jAo;C2nQJSN|3sB7nV zzJJhA%d@is>fkjUw7p`;NPdh|g_csDou7=lKpi)KJT~cSRCOajV`VB9F;0 zM(ST9)ds$I8>xSb)Fk=#&PWhtee%1J`j5zjP~Y-g?lDsTX{7$kNd0$YqCwnyjnw}b zssA-n*GDE9#Lc26YRgD%8>u7F%R=Lp*I?90?L;pR^)0W#n31}Xkvh#t-8g!MLEKG@ z)agd*3?ub^(aE84`@dqb{i9cgtNp5|sfJqC$eGcr4170>ULCHUDPA1R4$xA2HxM4E zp_Xwsk4~|2{4%QzIZiuBLoIzDtf7{(M2A4F{(Q+!_01L?Sjb0n-TV8|RjMTYC>O3QL_vkEx zxO*6>dm5>G8L9h5YeVCf=kn<2b)o91evNlb^!iY>^nGk}cBopO`F=*~#z8 z=NQC2z({?9k@`d<^-0kiLgSY0&Oi;dJUfG;H-`GIy>X$KSvXlkEh9NaLtQ(^^F3Hc z?TOi`n(8`!ji08WmT{l1p|15K8KR+!|(f zWSE9p9+Pu4)bctRuA}yk$p|C$xf*I2_jwv>d7X^ZP)pzEM{f$hPW?jMSHCsAb%jYN+LP64y{m-{Yb;ht6orUu?>w^TV$y z|F`H0BXwo;mQdfaogZ(cu8Q7jP|5_p>vYuq-JRE`sQp75FsAzU>=d;s5*hcL=-o+ick=y6L;AiU zdQXyCS?lYz9l2MHM%HCJ>AzxL#l6!K9HoAHH}KWAo^gET56;v=QwVU zK4hT2BibrSt*SL;ui(z;;w1G^G63J2>RStq)OST6Ht>D7k@}wKBL=?fG}Q8HxHtM} zsBfvh&q#fLbV-tL|Ih{udF?DRQa=!VEXjAnzx_WLeLP7W2*5XGDIbbHk)&2$Rel}2 zIJz`R?K=z@Q+@m46yJVEz>w-kQq-zQWbN~4N+c?^tTmTtsHN}6qECjFQhtjck3JQu zmS_G6BlS`v^^->Gr&8*Zz)A5NkzlZyBjqN1qFgn~%J1pcC-6hFX5p-qBF=kr(&z>3fStB(jv>8L78w zsAb&SG~$-$a(ndU&?7k2zrMeZz7ndI@67lix-wKPkKhg?^^ZpCpN!N$M_)CFd#90l zmy!AxBlWM**9_v`ZKVFqNd3E!`j6=ARVy~jxXXLMEQF_Gi>zoKu1s%0d9 z8>#missAxj{~LYNAntl2HFMrF@NF5XZ6kHWNF8-nC&k^tzk;M8&!ywMourl;X-Rd= zNZrVJC&_oik)%2ACaLA;OG}QW8arzY)J>fC4Akk)`=RREJG|k4hK}0*rD;FsgHYeH zl--@Rq3Wsr;pyRgXrS)td=#pdEq5;?b#Le6P~S4{K1S-k&N>6%M;ob+asC(TTaKcS zHB$F;J_+|d(>s~RIiDJ+kJnJko>G5}NThmzvpzf$@23oQf|2?}9ku@k-jlS{UMK@K z)UuR=G}Q79yeDg@rSH?7&q9x&><YzU1+zD?ph=gUyFyn}wEj@oZ`&ew=U#(jZ?TGoRXIvX|O_G-3K z&R5~;TfElwA|16~%8Q*%;lBNM9*x#f`|ms&qoI~ZFyHw)JQDw%M+G`+zm$c}HyXaZ z<6Yz|^-PWp|0YnZp_ZjAalQ?Y#Q)uOth3obUFvKJSNm@oD$`N>ZyLJ9`7YeI-xgnL zq>gK-iirU$yC2yo#yN`P)~PuC8--87tb(K&vbq<@O_Pu`da5#1K+cZ)V0oT1K-yf zsjqi_OY-fXwtz9!w`Xgpbsr2265kGq^>hk-)p44&-o`bZrP^YZ=_yiq<+BpH#8D?<{vatKV+m{Y@~kJsSk}? z9>GU6)biW#sKybL$7G3-`Y|K*;~Hui_Y)dw8TV3$*?uFm4?`ZoCymrkX{cq~Pdk=D zB+HD{&uFM+B+ojwK_t%^sh4Z0WhBo#5rarxFjBv$p_Y-XaH64+$iDqcj$@#H-HC;& zbMQV@B#mM(VWKp$2g`Hc~e+Ql}fKGh&As#J!)9 zdVeEzQzLa|tffKR&5YCs7^x35Qa6va3XOZ3f2|)BJKR8haO{XAwdz|eeIKHsmf!gn zu_Fz9AF832z7LCKCHeM?A28&1zNL}6m67^zBlQunY=gLuG*V|7sk4pLtz+SBn(P&{ zNx4n}C&f2qPpPejTKaAmYaMz7<=-c1ucP+=KFLuUY8gp~Sex)j{PK1*Qg@2AHSpb8 zLoMUZiM0#&?cKn`y6C9=-?_WS+K2o0k4d-KQK4%2*4A7NwY;kGG}Q8#bdPl~h@^*x zT1L`SLoFle73&xpiTvvty*1R*cOQ*wP^$aJI)z7ai+3`Q)=*2|$HY2^`}SP3V>Q&$ z_ertv`v>KdL<3{t_Yz6R`C z@0&3`)=fk0#a$K4HBcus)H3dBjYy=rCYGlWiC4-A8fxi#qQ()F>PfNg;gNWOu*)>m z()Z=D9tOUz&`?X?lVd#%d|#=dmcFlw^)m2%wT4>yo)YUF?%V%fPL1`^PIz zwTxuChHt5!5j#56w|t6YriNPjzDDDiNcFX`V+?njN(OElCn?#DFZmOX0kz*{g{9hSj= z`pn8n&x|157U?K32j~EM0%O2_z(&Awv9*8-RxcHv{Q!IBo+T3ETjg2uq|+K^NdV-|62>B zX$CqANbj`j0HnL?bATOzJ%RMz$$r4jz=6OV;1FOJ;BffqN{Dnf;3!}&<%7=y79rgo z7zg$MRs(wiCj)x{X8?NxX9N2H=L7o!7Xpt4E&?6{Tmn26xD412xB_?_a24=);2L0m z;9B4S;CkQ*z)iptf!ly50e1oi0`~w10a>ez%##6Wz*B&kz`?*4z*B))z|(*ofTsg< zfkS|OfoA{*0M7&t2A%~R3LFX?2|OD(1~?2@3Ool`2^ z4$sIe0i*%P0yBW6z)WBnum$iEU>5LFUS>HEcSCU7nA8sK{1wZKilS-@?;THsFLb-+Eq z>w$ZLvw_hgGBW1?Gk`Y$n*(nI&O*F%fvu3f3D_1m510eI8MwD~dggp!Po!@F_5CWxxl4D}WCHR{<9T*8m>|t^+;-+yHzOxEZ(vI2h;iG2jlQ9|!IR zJ^|bdTndaHnUVPV z0!IQ@0LK7d0+s?_237)J0Zsz01Wp0I3Y-Oe4LBG0I&cAS6|fHY25>R(P2f`CTfpVO z)xcH2w}ESb?*P{U-vw>}t^sZaz6aa^d>^X~7T{vwcfh58&));rpdI@GxB<8WxC;3{ z5)b?dxDNO;i&)f^h1^*AQFYsUB0AM|EFpya|UO)>t5@-X*03*OsU=&yhbbyn9 zG2j$nBj7Ax8gLsvr!jCY(oKL1fa$Vdk2F*960Lh z^volGd$8Y;K-L=VKQIlL4a@|#2DSjU0cHW)0y_ZP0ds-vfqj8T0n;|6XLbM%K)NF^ z6KVP#a!+1*W@q4F@HxPtz%Ia%z^=eCz;3`&U@ou{m<*j)>;aqs>;+s1 z>;qf^>H0Z#+A1)dJf1r7oB1fBux2Rsuv z0C*N~FmNcP+oorp4IF~>Fv2eBndbn9BRw2A3OE8d26!&82zVZF8+0Rqaiq@&Rs$~p zP6A#CoD3WVoC3TEI0JYwa29Yha5iuZa6T{}xByrHTnH=#E&>(-mjH`_%YY@o<-oDP zmB3QqDqtCK4e%1+THvL?b-*}q18^L$Ev~w1P%vY1snyu8dwCJ0*nKv z0;_@3fRlmKfir+JfU|)!f%Ada02czU1=ay)0T%;nflGnc0ha-<2d)6l2Cf3m0j>ew z04&0Fbt7;c(sP0Ra9nNzHplmL9&jMiHv@+N=L0uDcMEVc@K)ds;BCO&zy-j)z}tb* zb{Uy>05gDh0-FOD0$Ty^0=5O-4a@=F1MCT`1NH;n3mgc%4>$yPKX5p35pWdn0bmjE zL0}yC5U>)s7&r;|FmMX+5#TJ~qrkbqCBOy1$AERf$AL?LPXLzzmjYJ+p9HP~J_TF@ zd>Xh8xD2=f_zZ9}@LAvv;B&y;z~#Wbz~_O{_GtfsX}}kOV{rUd05g$(3D_L?GOz{k z6<`){C9nhVRbVdgHDF)h>%alPRY2AuJ@XCVV5Hv!4h6mi90^t0^9`r6u1qz z9=H?u8E_Bqb09kk?LTmG=k&}Cz%-=41ZDs?0yBYM0gKRHZvr+)`fFe-;5Wdwz;A&$ zz|Fv1;1*y{;CH}&z^%Xmz-_>R!0o^x!0&-QyP^FDrr|o@0UQqgN8l*nPrxz2pMgcd zoxnJ77qA-m3ve>a5nIF;C$d8z=gm)z(v46flGjY0ha;)2Ce|^ z1+D`A16&RK7q|vk4_pUiHm)xq{i3=FNWXq=1JW;*JAw2x&>kQ?=d%|`Pw+%Lp#2AC z0O>c%=0JLOqy>gc zK)ScKAKC@F=d%cDy00`2q`Nw+fpov-WFXyJIRi-dD$WJcy?+aVbT8c^Al;j`1W0$C zEd$d1WGjGl@7F3|7H|!a?t5AXq`P-E0O{VD%|N=(WgC$0N!bac`$zTw`JEvhG5$w7 z3Z%Ot(tvbN!X#W*bpOBG8{<1iHro&uN`B6^vgskkbW1a1k#LuHIU}pCj)7&b_S4U zVP^wresw-@0B|9YW+K-jUYa#rgfz_uE&an3|wl}HZ;t_Ge8 zTnogv50{jc^pCMW>h~86+06oW^MHLwz&IYz3hxIto2y&m0}V4La`WgSd=w_6~&smycTh_$039Ec@@IZ5R&b! z#7z-8kIO>--znUaNWUStGsOPH?NR8iQ*hH2+)IKxH{h=YxJe=n;7c*KCt#)c9IP~k zrSV#jYlh7BScBYnq$j0tRR2@kaRT-SaDVijpzm*xy9w#?6mA#ND^s|$5pUa+vQ5P? z9x40}7f9S&3hqz@L2(3imnt}FS5tLVx21CV3hrqI=ZKhtpL>adBmb$NQK{g*QgC_L zF?C<+@1=6o=B9E~@1=5=DY$nPT&|)VM=0W(qu^*9oBFwDD&nhCaMY%z?o0JrDtD`b z+os^oP?Y0XMSL_Dl=>O1uy3{C0cU*Vf!xh|v3T}^ryGT)v(-kq(oMGzcmVqPfP^@sn zugUcat`l}l)lF7#)bCH#Wg+HNu1dkJQg9s+RO-H$E4YIcG0#wNG~br$Z;&Ep8h57Z zsPCW3QU5EIo1@^qQg8zm<>;=6?_LF$sfeXO!O^%fH9qRwr*b0|+#?EZuY${0l;bQ# z%r7gr^S~iQMtZ_~(I29@=Txo*h*a(p1xMrHR2_{)Qn@h-?kNQqMcGpKEmLp^`9Hb} z1-DVbCdoEi&_Q&PD}3ho^R*G*B5!xiz-Z#$`yLihNab1~xKyrO!L3wqG}cYsw@AS~ ztKia5w$y#cDLCqvr|PKxk;;9m;Cd-yzE;7}+)JuI8vmwpS1Py<6kJb5Ia({?yGg;( z#gh8D!xS9N>!s>wZY7nY@og%1hl1Oo;Ajkzx^I6)%=DXJs%{LpI97zj+g}UU-vh^>b^8~ zOy#C4xc@1*V-)4+pos4_1s7AqGE%`kqTpz*E;Z&e74g+6xL*|9d5UrjQpCJW!BHIL zSP>FmvT^-=0V|d30zoSG|Iqd>a9Ygo|NorcduX?ItL=Q+4hW&cmJmv%B9#iEk50SW zF1xkuZo5fHg;0bLLI@#*P=pXd2qAC?xXj{2l;@e0}{Ier*iMSG;k9b+A9_2nw%aa=7N#nqXUIcpwn z4YN+NhH^f2f;Ge%Yz?xGw+31Rtp3(1Rxj&tu5a3w{MNu#Udfm8f63=(OSSy^bUywr z@c(;#em<#>)DR|r`5<#vt9cytFfJKd-gsn%dT%L`){j>-M~(=|FN3d zDO~#BYh~KW_P^HeRz`I7^^^UlE45z8Rr_=+(`slnvg%t6tPCz*CtF!o6RWA!%xZ46 zuv%KJxR&3>YHPK#vaR-3539S?&Fab({X?xotS;8U)RR%hz~YkzA$tCQ7Hx5RF8 zXIRq2%u0P7K1If_EAsn^8=0T~=kd+3&b4M*=UEk2r8Ubs-@3rM(3)*sWX-YWTJx-n ztxK#+t@+kv)^+?%{I%9K*45S`>niI?YoWEky285L+G>4ZeaIemoAt5viS? z+}dOPV*PCWWbL+ow0_|Cie1)s*0nrO^Ylro#^@a7G^}dy3)rvo7 zKOeWAu+~^>ttYLgtaaAY)-%?#)_UtX>v`)1>s@{&dB=L&ddu3(G5s6X>((agHO?+y zu{QEs#7owT)&^^db)$8Yb+dK7b%V9ky2ZNHy3JZ<-Ole;cUsG>yR5sdd#n}Kz1Cya zYU@$!5q^7FWj$m)Xgy%9wC=a=qb*`TGQZ^`?NrmpUmnI-r(0)OXIiIPr&(jI|5;~Q z>GAELe>e14yli^~@$tk)N8&b7iLWNUir8|#?a@1t_(tM;iS^EFlbOwy z$Bo1{6E7jQO2?CRRPt;kK9*P!-xSpHJE6q46JJ9tgIx0KX_AirYwsk!j`+Ap+|OiN zyNRzS{!k>I;^(pU65l|4BVU^;QzQAkZv3qSu}ysYT8-ISr0~zS(}|DIXS(=yVEdXD ziDwbtO04x{Kb{_mXA|#BY#!hC=<7$~ImA~G+sQYZUp^Z|;(dtkCbofG*^Df&$+qL&tY~nDHOo#?GGo<~$&O>gbx_pLz$Ew> zOo30qRQNQk2cL!M@CjHSu7M5UTF5OF>iHDM;RsJ{fQ{fwuqk{RHis|37Vt&b621&u z!)GB!d}=*x`)Nn@9CWS?%KVA+Qs&Q4%KQa-@CV3&r}_~}*}I{X{U?;N|AJEXJ}71X z4yEk9P|E%TO4%<$Df=ZTWxouw;PbEvd;vBc+)-_S-V;Gt%c)Yzx=X5scS9-bJ}70a zgi_XnFd5zprM$IJ%6k$@dFvp@%IYyFWj+q2%qO6fxduv^>lwY2`5cro7ZYy`-+@x@ zwXk_T%T23i-P}Pv%=2(tT-(9rR+WVv)JC3noAgdl`esr{>2E_ReG8Pupp?25N~s@0DRmo^Qa^zia1)f0--1%|J5Wk~7fQ)*LJs@Y`%o5)&!8+A zUqD$fzJ#sd2T&G}kDx3dAH%lrQ`qkD_Ud(*a!xQ=i)aCvtgE5Si7TPZiL0PY&H^Zt zvk*%8mqRK43Mgf~2BnNwp_H)^%IIH#Z3nein_#_1gV8tuH<^oVL!B0*QC_b zQ}zyzsFfz4mz?I*Yi!v~_^lLoi=FP+FSq6Gk@cOlF#Mj~z_C}g<@J^sPCEP8{n8w+ zSE>WjoCfvv)G_EjFH?1F=wt+ee+YDL=^B`w>umm*ZMHP`HFhIsy%=|6%$5dU zj>rLo*=vaopqDsP(vwzoYf|+rLc9 zZQ@vmv~kmttpnPqTUAphyVA1jyAF3r`Bllv;Bb7YZnm3Io9_;1t-_rx+X?#iWgjE1 zY$$NMS!x;XvWVM(-M9=JaT~J+;%<+)OwnU@GxbYqbEkb|)VH*b8TBnPcsS~ZajT4a zgSg?SZ^f-L>f2=4aMX|BRvGo9jCv2F?j)6GSRGrde^fK)IEwnrZs82|qxD;>e~AwB zy?f#X4u% zdR1J=!w&5}t<)meBn)58vjpw^TB*gbSs1>KhX&dWTB++{^9C$aEoF87Ofg>7M(=8V zg0idGp3+9|@ji$A@r*j5jnf7UZsWAQgi*6Vg;_7=(KGg%mg-Vr7@jZg?Jd=1&Gbvbmy@D)7i&n|1J7C`TumU@||j@dj&PqxVyL75A4pgXjsnhV?QpUzRF!-XDw z0NKvpJ~4+l06(xL?{RJK*zH@2*QzCN=+P5iip`Y5HN-ihM9*+Okg>7;jtmg79vLfxtMa~iCbB+jEP)LJ~Ji(ira zWhJZbEiG6UXYZBCSe4vNJ)FG1<6NdkI=6*dmVAKYzT)epHPEBGSao)sycTK>)J^J# zFOl%D7HY0gPleOBg_?)w`d;@I>QX#ex5(6~h59 z{n=c7izoYZyj{)Je6lo<&55!;;g1j?_H&^Su z10Aoat!Ib)nW$g&J{|%l6aO!idHAY2*m37KSFgb;6ZeP6rZrbrCL{BkLe#H_`i$mk zlaP5VNhyN8{lMnxW7UOCesl2-Z?3lEd0`{YCXt=pT+M|^)rL%Kt}ahzNLLT}cQdsR zFZr;l$#*tWmx#n4)rZa0Wq4B0TT;iX&D7hlL0INbHsME`@$RjIos9dNNtvSFE#akX z525BawPyUFP24J9e4BNk%>cF(g zU+NM})va<^Gj$YocWx$)(xI6;hABIws-QtLbvOcF%@*ZIf4bh4(fr(0y$4gm(PS~2 zZB13vgQ`yKuQuA3n(`{EgPe?KnnovfkAxp;%3G)?Do;PB^_(-e>W)&=C@5k_*VfU=~iT8grz?@er~Zo+HrD@Qj~dwDcc+o4TmxZ@cSUStXG z-Bi8ob#qbtq*kUohE99%Hdpsmab>LsAJDIQTL#eRgR(_Zla!5JtNx+ z>&_->J)W#`^t-N!+Ms&rg7GeGqFz*oJ1MfIsb`%lxva7#YI$;RCsks~%G0rNP1K#q zhdcGM^*nS&G_lh-K$2zaE=IddjN4U5bsyWEv=;t+W_0ZQ_HUx@NN&cNLKF1>J^3SX zNt&sX)@!2fl9B(GrS9P|Ck-Ma`Z_BZ(bLfpJ;sP$P)9f!496HY=;5Bp zQg`!cW|_Ba9#>_l`-UX~O*E=#ST)HY3g`G_nTGm;f-O7BfR(rFu6vk#Brg~NrJ^BnLlQyn*ckDLza&?sB{M1-|;Pug~J;i;|SZ(!=j%;eJcN?pB=@;3! zzT8;7%`;@=kO}45#_BDewc;=C&i#$m0@WwFIlZN^xnb%l-#S>Z5wm5o&>W^qdV|6`++TpCG^^R)HnPLIZF zvFgW3W3U|1vu$Huy-Cle6zm-p-BYAAR-3(k(aoi_`Hwt-$oZ*}+5;m~^#w`9fYv!~(kn4JEA+z)< zEG5XrR&|-vOx>b}I`)q|gv7SK$y8r59j*KlKgv{J;mKy0zpZeRa$h4bqRnH&d=1ks{*VnPHCAVWO%;c?|eVz1iv1{OiNhdk>)J*j-tTJPs zlkk{KwE;E{=hs>Z_sdl4V2dNjnu6DfeQHFZoKH0I~$&j7u>lwT${$wXj8eCb=X2>+H%}~Fp z;r@nES@-MMT^VYRIypKevY~!j#&BhZS`VvC$zw9Y$_%vzwvY+*kL@2%8tK^MGt?8X zb+vj<%up}6OhSKM&(Rs`RlGLUB%L$Vb0W#sl9n0jMZC6+_-T-4Vy;lR%22PlOx52F z#QVL0dIPUP0m9$_ycZj&K_~gg@P5loudkpiQls=0lxtuI=EWv&l+zKu0XxB6Nu%_Yl-+QD{1>5I zNqHH{m6Sc`&Qa4i zo=DQm2v^eO(24!#0rk}f?rH2E>xZ=b$vM*gEzon~>@j5YhlCpND zt6$tP&VIi4VY=Fb*V*?rrK=~rG0uU$_e{E4gLjbctx8v0P#)}icciO#@w%|8FownH zlKIMX^{98cbBG_Ctz$FO)oNm0{aBHX<)^Fbh;{R0V{~j}y1JfNce1m{4oFuQd1p91 ze6Lr!nuC|)>pN?Ghjev6`ksC)OUD|dtChrh)3$%RXF5l~f8deucgUY%sJ*ZcyxKd{ zIT~IAkAVx}v2UcQD`B5MlWmWA!m}G_Q912!{nhrXl(CL|Z<_ig<$sO}!}C)KUz4Wx z5SATkME!@T&r4Ik6HX4pKT9|_P3S=_27(5>81|>j=w=7YQ$v@K5#B!-QqUi-bRt@Q3x(qlD#rClY>I z!Y|ZQYYAtB;fEx=vYvX1uw15ysDG8oxvrkt$K=Rmib(i3313i8{Y|)O82(1Wlk2H} z2sdNqa$I?OJ@qf1Y&0XoJ}zTAuAbV!m}H|F39pgx!S&QjgjZw=pT8E;?MD$~-dVz48F#Lpsx2CEW3AYWyH%s`rRP{39c5725<)f+U6+C+^*;zDl z{i--{;qN$PC;MIMc*njn)#ie)xSvUe3scn>!b-xfW3!}k3GeJw*$L;Ss+}nl^uFn8 zByy4IDkxW!u7Sw|Qq>|E^h|#|?2jYk`9{XGf2y0}SlOxSbI~-fnm$dZSE=gjNctO* zNctO~q`wI!f0?3|Kxd{UmB~)SwJk14e@#%%i}9XMQ9DJ#W<}bc;yJi_K&Hdumxf)b z6CIo1AFe|p7bs|$OqTlosbv<5kaf*18Q`BM^Zd{6+;(Dh` z%o>%Vz9QySdU~BZiX9~_$6^t6kyX{4g?42LC)+2H2LYF2+mhp$CvjC-`!*nky37v?N<=lVyfInyfBQ z&2`-S^C#yO1vgahr+@7Teu1HpwrA}~Cs!i2|WOa9H zUUaHPC9Ajbe#cC z`j%xcOIj~jo+m5yy)2O9bgI*p`Vp^5HOT;_z7t6wE$OAymv~L9N!ly*lSo=i{v*?D%$O-zwnw@lYHcF>pwB$ZdEx>D8 zO>&*5R*GbymR#zo^?0qSNlHDnLL_HvNv@}!$CG2NB@c?`e}V^s(DhoM$TU_X6CCYJ*^dn6hxNTj>}1+3_z&-Hj)|l+y2e zN8N#!@uef)r;fS_uVFa)FT>G4@2D&285#X*NBtF!{%%KIO25eHmpJO}aP*fu>NfgC zMnB6@_uxfFKgCgZ;$^VHG5Yb2x*1QdAmNR4q(z51YK3>2)54D(qhq}tbuY2jY)Yh+ z{3+q0{Vuo2Y2kZK9Q74mbr38jkfw! z24AlSf6`W;h*xd!yKMC%LD|b;OE)uYrJ;V2G6(E?r`ui zw)!C)`~+KlD}x`W2k&F6op_NW_d{*eze^Hi$)zOH|M~>uA(6IshdP+== z?Id@IVN!#BSN z0lpPw($Pkacs-@li~e1$Nu^@*#p~9MeBE%R!#BS>0lws;!#BSN0lws;qm3N#dP=7k z{kxG*Y`)@kCtr7Q>F~|(PJl1@=xC#dcsbJPDV<&-B8S*~@w)XOM-N=-@XhZ|fG_>& zXrqUCInwDVonDA~kV9;~c-?Zyk%KE8zWLn=@FfQwZS)W?M>;*}pTpofU{&CC>&f6f zWpFxt^Scw^%iwgh(L=l(>GY(3PX^bvRe{&77lZeb!RheL?@oX(gVWJQ5AkxO)06(a z7+h?=c-;0lo}QM;krF%aKk``XA2VV)Mo8)|?Id>NXKHhPGcBb}b~r?;4VkFX|9uqI8iDoV>H zRODGjrPieK(&%4_b{sf(+^DmMo;_|X* zhB0jwmzR~4W_Rz|ONN!{IV3tg+L#&jea3$P(b0?vkFhv7hzPPYZhjVi^ON)F?E!BNXC*(?Yh7}Y?I3;2f zPcJIaJtpNB`YhKZUL<~*?oMHp;*?4^x3EGmX-av~OfkrqCp{@gh+7=>wUJ*cZhmRm zw9@j3U}jVh7^0YxHyK1)sQ^=2Nf>FRs5!ZOx|n55Ss|QMR9ZxtVlselP;aDHsWu9y zP`gA*rrm)JA@>xKpM|cZTlfPncMSlRqUwmIm$1 zLhSblEEW*ISJcqUq7KPIsEyJ*A9|S)Q)Z|R(R7;9M;kKCbZxO<2E7XkY?lncb^&&dD|G1F@N1rls*o59yz1(VEDc&g~2J{;}aNMaQNA){( zprjjcLjQh4hmIQEum6byhMzVpCUnY(s_7=sFcrlGd6UbE^Cv`{sa2fHFkV?!7Dz;2 zSXCc&&__*@P7j@_Rh-I5Ja^JWMmcd(vU80aa=&qC^GG_VTtq_{}x&@2kff~kS2r-KP7LeIWXy0Z5X$j($JDFsE9ey>O$ zP{A@h5fqnBE{L#5SgN~aez#QjnO>AvCdPz{$(qwk&MDAFSxHfOVKh{vjbb)~k%6Xa zBeFx2N@V%h{U*#17U!3XCrMzLc3AE5rc0D@X_C+9^rETSi0lnn{Ml^Km2C#lyF!N3 zqw|NDFhj`Efb4JR&X`doyG2&9QlEJhKFN?z7!>+qf!-kNV7|CB*g2GFqarlQbYx;_ z;S?GCTF-h%6% zmH5&Cm0~dLOwlo{kX1F`_e8@6gSApydR6sfvZnemZ4^%`#VyXC>a#q`h?`&Li%Au+ zNEMCAnjA?bh8%f_t0C)7fp|r^8Z;Wzt5SqT!cyHW^Sh3*^*Nq|{ARU3NQ7Rz6ibU#)lp$rboq$305O&3E}A{}5Qiu9PO4KlFzM?dJcqI#ANnUAjL=%y4Fhyp4m=!(B z(gWytx%AY_7GxPHDVk88ug&7T(#d%Rp;EdM~()?!ULK$8S|0r4$aKq4{O z-s--xmx##NnEIiDz>;mP-&Z!ZeqU`=u!c<~;BO9~-Wq7KF@RG`wOp5vMQc?`cm~6X zAq^(XW8u)gZaA_0!$;{OhmlYpMd~3ULJk=H<`eoyl3J$YMP(8#;{XGcOJ`z{Z2aiR zp_vbkFm=Nf=0Q2oB%kh9=q^=Ch=Dlg0s4f;Dxx6NG&CEfiWzjv$~9P0W{Qp-j&#}I z$g)>9y|{Q{F%}EYbZxMx_=fZgt+IlsN*l#RqSIzz=mB)7G`|qF-|AxMX7>%+9YYS3 zWm4rhSyNCU%b?(#>C9W-V2>Fou2Kvcml)w8$Q0l44?%oK@Aji6JNSsiKLn|wFO2lj zhaiE71t&U$e+UxE6p8zX@|40f(uWD5Tby6!v)rfNC>BiBrOE106q*Hv;xLnRw^a9# zEhxr}GQC@zkXs^6R>dxpy=EjFny6`xKpog+Rl=cJMhZW1)F@*JKN2;{CRLQph{QsZ z==3lenwy7mMM`MB1Rdj{Zt*21}TXQ5L&V|_It~@k>5M8%7Rc} z=#92NfHaJ6l?P@;6*CA*4by$4oa5+bW}E9<6`>V`WPVj@U4_gq-(*pk;m0C|>_9lF zWC7HRm$qnQO=&1?L|HU-a-I&z1o&2|kGvTr<&l7%)&B5>Q=)1)MP(d%!u@^|W@xg~ z_)82&XJY6LgKw~5z>rhfaOp48wqE3Y(_iExVd|XVn@oal5b_PZ_(n~Z*@!6<;+ve0 zM#d`_F?^fF(l=NdBN4rD=_pT;K%eL;G4{OG4f*Vq72)Nky4vxlh^aNK|*rrOOorCS^v^R3BVQU~gEe!Eu=HNmq`wwUC{! zC^YgaG}w-EU5*NTuvL|DaLg;Q2*hD0M_2FiL?D*~5opL|3yP8TQO2dg&aF77Oaii- z^9@;2B9Y=8f_^{Wpr1D6oJEq!LLk)Zj2JSo#3R&8r3CcVj$p2{)XB`%OF&?km9UAG zxLz`3$&6}3yR5{wIq;UO{w5TwI6t3mk#soKpzBXqfp1ML(?)TDR24Bf`it0s!Ne5% zy_u@%T1P+Mkbc^tS0P#HAt;iL94i5KZ5+X1$RURIIir}0ky}QGeK)xy2Itg~<&LXP z5vxe7e7zV)+G~15T@W$Y_eUa?(TFzWijL00_(ede-OD1@MBk8UjP{Ef6Q}ElOrjWh z6{Ha(vS!ZEc4Qt!ZJ9@*hK;G{wI;M>9!b1nQhC`7pVNh$WfX9RBFDqp;b=Cn3*>@B zRd0IeRaQo9j?);jazY_p-B9`49A zOz9TrZl!c(e#-jKB{~^z1pbNQgb9@rEB3GDL>)QrtI{WMWVUnA7RmyL#W=r8ugan- zj^vIGBuhu&P(tWPhR`dn)R%RlW6~FPd`D_1i>8x_2pnBQ;E^HhCDRx5ks1O=Y6u)% zL*Pjbp~F0iIC7S&Ta84ToWe*WmQL}>S+DjbThu5NOP>hC88hVK5{90YGANk}@%8mm zUsn{>iGfbQvLMjsNCHlXnA62{%F3l1J^P<9CF00wf8gku8upRH^>7%D#C^w~bJ0F} z&P5$PvjUkuN{^1Gs-x#fAd_Z~CeZUB>gZ+646FcMV|Pa#zq+J z%LXbKQY?LWrbLXAb2PYIQzS-F?u1f7rH|Z+;!(0@Q9;D@SyUNuebO9dGANBEteBLq zUEP(fAu&BQQzKS%+aHbVjk@myTmQiIH~$f-zx~INjkusvLpJ8(u^9)_wAzh$8WiRC zD-ihQXb~H0f0zg>b(Af!BxF-8U|TGx&_e0TOs&+EEr#xwCy@5{$%3JM-E-nhkre1I zJv4P!t~by}I$U$`w<1Ecv?C`1z`sHuhCUJS4Ne3wcoWWbP7x#cCLc}0W! z@#hcGpJRp&?>AuFs8f!UpJS>A8REaeVf}{H5RDB+f8VK|aLn+L|4kUgsE=*UBI*r+VqSEncYNqoG3hZR?1AZ@v+%bA@;`C}6ZQVPh;-{!L$kyN{bq@c z>P4hV&~K|Mfv{O(gVHP#8*utCe%c$}zjw9i487wq|1&4U-ZJem3O!q6Vt)BCL0x?q z3+HQZtLhl_Tn%M z@P9Ye4OS=uS(EueVQ{~wEvrpcuPF4QD^Acmx(NuIGpLOQI(6j8{(-NZp+k-zIC{uQ z1IG;C>upmC@68+yt>tK-Olqec%O8T|p4K}QT3IyCZgtkrSo0O~wp z^pJ=()GwG=)Y2<_Br2;X@hU63-y^cBv(Dy~4?1q-aI51fCAkyI^RUD5_s90fEmqaF zJ?hE2UbXG34ol02gKMYyY@v|IZ1U%d)p5j#{yn-68qQ$a8Zo55cJu`IA2HernvGwg zM~;@CunH&W;}Lndo$QGD$&M@F(k<3qzpa@3)6SrMEcPk6qASglZe<>UNriNZbvIYc zLOD&HP*lX__EI`}#}M5wa6N%PHnQ>x^9rY0<)sr!_;ZL75&F7_Osg#8kGw^z?jN~< zx$d8)F={vApz@@Nx__D*spU4$x_{&iVpTth>u$w8dR0HwhppO2?AAu=HnHl2rL;9t zx6`-HhV><`JKIRzK31Ku?hZ!kj{Ny}-9Md-)cfhThlSY_x5@rS>H}ic3EQMI&vC5# z=RhO%;XGxr>XcQr|2xj!M(QJC)d{EaNF()8Jn6A&tyS&+*4@WQeRQlkVco|Vsr&I* zle&M7Gg9}DQ70_h8o*5jb^i=BQXe0qPF%?#BlTeYoYk=M#JPtUsZZb;(p5i+bDwCW z9$_?hrdjo?7Lw4md4g_(go>&?ZJxNBQ8kdzws}IYNhQ;bDyiW4-12UZx_oTA67@s1 zNasZQ+x85e{GIk-DDM)QHCX4OeyECiRSlYGZi{DhcjPI_;U{tK^Q=w=>RCK}yi3*O z;b{J2**qhB|7z;Qm0ZA`Ty<~n3sKj-y=NP#FXEZJb)P$PP}hC#%r#QaGpgibqZYh` zCk$7em5JxhrB>r=^YTBlyGp?3Iq+H4)QNLnW~9EHO6vNtdGfr;R4y=5FErXhTxqn0 zxT+3Yh>L<|SY)(?xY}qNdkya>sJdQOwSxj8hRu`zn^jvc6Ro2*ZwhE`ko!6#_4P*T z8+Z#t)pb76RPw%ip5|D0^^HdAn~c;q^W^WeDx2g)b=y2=zm@gSs&K7OJa=y4EU>P* zEuPYz07DTZ#PokVKnFOG*T}&T6gX;Qs2${LaNT~s`h`^%X?5)o!eFI|EgE; zCX%Z8s@nfm-)p44&q#g0Jh4AqV5?OXTM6%Tx3Ue?4;ZN*G*Ulg@eYMLua~P(*L}Tw z*y4Q-b8!7xPx1gALU0@b;>@f41_7ZFF`2d~WrwyO?*A9bur}VKkLr#>}0>Bl^ln{Wb6QtNUjs?*Xj)=NsM` zSohDjyojRipYM1VVckEwcyD3dKi^w@>n`SPb^Q#~KU&Ats-7PxcJmg+x_^G+dU@SH zKgXyOZ%=-SsU)#_4{w01`{!5Q7+LqvZ!zk`mHZx4Nn-V0-dl>*X*u!hBHavRFutylEGwqWN)D7*C2I@xkCJIj#XfQ5Abw@il zt}5CiRCluTqQSTf)%)4`aaGYCp?ZINax@s1q51&3Ag(IfBUE>`3!}lf4AlqPXUA1V zdxYwP>~o^QxD3??+eL9z(H^0?i(MQI#$~8J#GVpY73~qK54ER8gK-(E53@_+s-itY zbys^@G#Hnmx|>}ZR~79Ms=M1|(O_JL>K=A^TvfD3sLrvcM}u)0s(acMaaGYCp}H5p z(AV|})ra#7eQlpm-P=C5HgT1$<`MQx1ND*ic?RmE>{$ltKKA(r>Z9!o4AjTi7aFLK zwPzcs``Q;7sQcM-4AjTja}Ct}?Rf_30d}~|#2YMB474waCko=B`gr@&AR5mI)r0Kt zT(frdVEeLoq97jT9%5e}MB^Eu`ULxmc%mR4s!y~R1krd#s2*xBj3)}>q535I${-rg z2-U;vtKx}*c&Hw3FAAdZj8HwozB-;Lh==Nv?Q4Q)JR?+(w6BdP3gV%9l)X5J#xp|o zX#2W&q97isPqD8LqVbGSeX4y!JW&u2)u-7@f@nM=RFAQ5j3)}>q55?DrXU*62-RoU zH^&nN@lbuHy)=l%GeY%P`<8g3ARem!XWtq`;~Am)Ec>>2q97is$Jxt*XgniSkGF4+ zCko=BdV+mN5RGSq>WTK9@kBv9R8O*(2hn&&sLr+TiYE%wj8J{HeSbVr5D(So*eio*JR?*W*$>1M1@TZ_ zY(E%8;~AlPiv3VLQ4kN+Q|(njG@cQvOYFqIqezBOJRF~PS z9YlCP!BUGPjKN(LH z#6$H=`>7xr&j{7$+3Vtof_SK&Wj`H6;~Am)eEXSrq97isFR-5tqVbGSeWATRo+yZi z>e=>lK{TEbsxPvik0%P^p?Z$}LJ*B-gzCBWhIpbN9;)ZrF9y+gMyS5nekq?KJR?-!Vt*1(6vRXIt@fuuG@cQv zZ?m_@69w^5z0CeBh{iKQ_3ifO@kBv9RNrBL5k%t|q54jHM?6sw57o=R6k(<5Kk1uL-m99k3lq^5vm`ucgGV2@ld_W{wavYGeY&l_RsM| zK|EAHV*e6E;~Am)QF~83Q4kN+tLt=7v%>WxO~S7OwO zr}9-J^=tONTBj`(Y%)^69-~fN_Zvp)H|@W}vTC(9$Edx8L%bEEPAInCj!`Gh{f_-l zwR`pw=iXxfYoLBNMxD5l_hKqZtlr9dwrjsS6P=n5Y}-KnVT?L)B_Hw9%(_>yjkk%` zUHx&4I&meR#8i@4{b@`KCRT5^Jv-W(wevG0_2;%S$o)l(I&s~+1<)Y(moe(ZxxccL z4Cdw6cCvwbXN)>=CEwU72D!hDQ76v*UCdM_R`0S?4J!GbcOKV$ZTKNZow$-8?KFej zyJOUebN>`Gm5J3q+vx_C{9@NPQ16LRC$8kzm`W0>f3q7HRPwu>VW8d{qfT7OA2F3A zR{v@9;?+8T{)$m2&b`lWXfTz3+l>s=|HP;hSMqO6C5hFR)7YRA+sQIeJ2C3SmAEmL zBvyM)6N7oFV$_LqC&j1}FEYt7>cqKIoTdhKr^cug=dKr{PF#0dj5=}dbf=lYoUiXR zH&8c-Q75h>Bc_tX>P)AFK_v~HmImrZG3vyXG>)kxu{z6XWw4wyaatRwo5rXUSJEt| zlEmufP8)+3Y!RbQoV#U=I`Q(|Dn^|+cWbAuLEUX))QNMqjZr7AyIqVraqeuVok890 zW7LUrcZg9ZuDfH5I&tn!PPRea`^Bge=iWa?ow)7;V$_LqcXrwv)O}!#I&toUoDK$S z@xe|<19g`eb>d16iK!&9`cS8nK_!R9s1xVz8lz4;FT2I46X)*k>}N19dpP?WsB>b} zi7V+DQ%PcVFXsS*N)C5A8>oB7s1sLmL`)@#)yFyq8Z0M$or7vshksi-$T_%Hb-3&d zHc}69x)|g>!8yc0J=8hWKz)*tdYE&VLGIy3>Jd&?gWM+@sYg29404Y$Qjd1J8{|I4 z>0zKg)yXkXpXT&5P>*qX8K_Tp4mVJbHENRyPH%(U6P+Ur)RUYe4b-{LQ3mQfr;mX; z-#OYqJ=rKV=ugWTsDsb@MT800?BNIlCr(IEHvM(PWk zp$54xG*ZuYPBO@Sku%IdJ;xbdt9o{DU^UknVW6JpoNS=J*coY{zQh@2puW@@ZJ?g- zoMNE9%sI7IbMNWv2I>XQ=?3bBM(Qh#rt&K141-D*IcFNEuQpO&mqoC%0C0AhvFE@(k3=jMTR~`F2`3s#>f&P}hD(Ni@lw&g5F_4$q>OI|a42cQmtQ z-Q^V8T|&1O>u%Jw-`f&Ra*uO%t#wDWmbJn;$3T6rk@`NT$RPLqPO*V{r8C7q{eUyo zK>eUm-47Y5R~e}vc1mikdv-80A91GDst(uUN1f7I)j?`&wUPQUr>t7;#OvteG3vzY z=o3cjH8JYMm8^BjtJNJ&e`tm4{FBb~YU_OBzq@$KsW4Elb1DtgPdhWJ)tz|T@=T06 z@wVkz=iF+!!P(SCKXP|!GNd1B{%OLj#=X?Y8i_QfG>X(cvdD%$4(YdhJ zN@fQ$>J?{pt?EE*z3N>aSwdiCge%BlS*afkEBh7^%OF zQ75kZJ7=LmCA*B&-^ZvESMr0A`bVSXWVey}C+A9oHu>2|{fp7u+2dSgP|2^(A_Mhb z&egT5=LWm(ea>OKst$Ai<1DUK9i+DYb*`&b9adtw*Vn2JbK5cM#7nH> z-e8d1jZr7g?YT<~ax3>n19g&-Iypw2xb779CWA^+W7LUr*NbVB#OgHnW`j!7W7LUr z*LRm1c%nZ#C2yGshhaB8r0o1MxD5lR_<*Ex%YRM z8K@6%Z#PhPcJDAyAL!m`pgzc5ZlFFSMxD3?4|VS{$lWbQoj7+-_wH)Bt2yC@J7V>6 z@2MuJ=KM!}c&s{MDZOJWNvJ-;T~Tf1)tvuSa-@52H9f!Dq)zpdS{0R5aYU*kW=zpA(-PH!_k?vy#>QV0F2I|r769(#2 z+%*R3Q{A-&>eJjO4b)@Yrwr7myXy?pXSh!rsLyntF;I_npEXeb&q#fiyWSx8IQKaN z^>`!o1owG^+!Kw|liU{!a_1VU^V|&vx%1r@4b+p}mugkd4z?5p?#l-1LU*Hq`fT?V z1NAxXs|M;K_qAHp!9-fcM(Qc(c z-OUEMryHp&+_$RzR##I?gn#u>>Ar2Cp5eY@pgz~#Vlb66jnwDGs1q+Iv)p&9RZ`9Q zPXnEAq`tsNePOIRVd1mg_w1^*R<-|Awsnz_dQPl5VI^~o)brf;tCd>K`A^{&8>ugG zw;Hs`rAF%cvFe0va+#6(a+jx+RGIdwc9n3%yuwJmAXc5Q?uAC`D~;B)YuyhMPZ-lT zFPPlLZv8qxZ^pK+Gg4pgHZaJ2gOPfP+oalCgM$gGVq6rlZTm*l4<-I{3@iG^0oz{Y zepG7&>;7P%e$xHXK>d`v+d#d} z{mDT6wEMGx`Wg2Z1NF1+9s~7y_g4e;bM9{j>gV0x4b(5VdkxeZ+&>J|FS>sks9$pb zGEl$l?lVwtbpJL`zhb0*)&0jH_iIM#P42%2xnGY_rv}@O&3E~`#sk)$o+ndI&to;t}@8|L5wvOB_t!@1oo;=D+}{|fzjYfJ z+r~isr`y&*{g>O$K)uh+Hc|f8K%MLzY@kl@x)`Way+aJt^}It3)M?&f2I_RLtAV<{*UdoP!0T?H z&hUB|s58AB19d~Mr-8bW*ULcN*gM=no#pj5P&e_8Fim})#~7$vdB+;4TYG&C)NQWwx~ck@m$PG*BPwO)^mT^>Pi={k%K_^>JRlfx5pp z*+4zOD=<(G^a>5s$9rcRs0VrH7^nw(MF#32Ua^7t1aFFg`b2N4fqJM{VxT_Bn`WRM z=9L<#hkIoP>JeVKf%;@`x`BG6S7D$Y2eY%nQ3~xrQ zEg1fKa;A4~t?KZX>#^QU1NHyB^J-Oxm7Hay9v7o_!$dV$FPxQ_=$UVtO zooh6edENz1Eq^v+g@4JI?_Fr1p6tywP#1U?8K?`rIR@&py}1VJbG&&5>LTxA19h=? ziGg~Gcd3DTsyE+2UE*D4pq}PkZlEspt}sxSc?%5G<=#RA^>pt_19gRWm4Uj_TV$Y~ z;azQ@KG(a(Kt0pD)*)R%d87^pAz?le$e;Vm~%FYxX% zP%rfEHc(&b-D9A>%3EQeUKFFQ>6hzp&waIbuR-o>V$_LqU+djxkb7~AI&tpny!#Du zUmv4RocjiEr9ti`G3vy*Z}c88$bD0cI&tosy$20)FZCWWP~YOMGEm>@J#3)9&3nW^ zz07;mKz+Nn+CY7W_n3kDPVaF8^>XhC1NB|r8Uyv+-dY3oJ>HWB>J{Eo2I_mgbq4DD zyr&J+_j}J6s8@Q=8mJ%e)*GlF^qw|{iyenfqJ#~ zvVrZiOn4AkqqHx1NJdz%f^&veQgje+`o?^^@)R_{9l^#|TA1NDdA_Xg^ZydMnI+q@qQ)E|4h z4b-1_KN+Y$^?o)`Z})yNP=Ds_sr8C>csBaE_p5>W3-31r^$zcM1NE0i>aV=L2D!iX z{xDGQ^!_wZf8+gSp#Ij|XQ2Mh``bXh%lpSb{k@k_?XKR$PiOzZ``1aU^7BMu^^abr zmDVMcCsyzF8dg(xNhtrxYgDc7#Oj|t%dJ{>V)ZXx<7$;8EX3O5WmQur&K><>OK!2s z?vkeeRDZza^i28fYc!O%P%dwoo~d9FeiEDoli>oG0+&L$A~BfZ>%rCdQsV}gE@b%n zu!Q~%;5OnJuyd>Q%uKiwzaiWU8$qieJ+m>`K>sY5j^6~fhD~7?*bMf8&EWvp0*;0) z;aJ!T7QxoA9JYb8U|TpJwu4JxHe3PQ!!@u2Tn{_K@zmQ1Zp7aYcKSFibAPxA{{XlJ zc81&Ffp7;r2=0Oh!#z-*yT1<}0=>fY%tK*1JPc;RuCO)i20Ov-unX(~b6^ha1AD>& zuooN(4~L^+Z#Whn0dwJzum~On%V8fl3my&U!DHY8cr08D`@*HLA6yQPgDYWwxEc)=4R0Ui%G!9j2f91OR@A#gi90q%e&!d-AE+yhU7``|F>ot>UJ9Hzh#FcY2(o5PW? z6C4G*!qKn~JOvJhr@|5NG&ly1f#c!numGL`%i)=DHXI8V!2iJ|@GQ6-j)SY0T6qds>I184;`EWX11S{ZDSP56a8E`c`7p{jh;U;(<+zMyG9q@d(8(skS z!3$wZQF`WVm<2C_*>Dc*0_Vcsa2^~0FNP!FC2%af6c)hwupC|nXT!_k0(b>n0vEvL za3Nd;uY~L1Rd6F*1h>Ge;dXcp+y$?Nd*Nc}6{lxj2Q%UIur<5^c7{t}4!jZeg*U;W z@Mbs$E`_=97FYsrg|pyoa6ViH7sK1(GI$4E3Gak!;BvSD-UThZ(9WIA&z?JY#xCU;98{k`TGkhCvh3~)} za0}cG--Y|&d(bOM&wL-I!>uq2egIp;4`C;pf61K_7{DBKRmz|UYV z{2Ug+FJL*`0cXK4;XL>iTmZj@i{VbV6n+Dj!*AhA_#Ipgcfob=d$5upc7ngb9QZpN0QbV7@DDf!{t3s!zhD8}2TS1Ja3}k( zf1veATIRoC264+~enELG&V1;=MbL#ypa+*h1y{f%xC$o2H82IPhpBKQtOqy4G`JO} z!<~$;KHQGq0Pe=mkT`y(_^=@?`7|xF5!{2{7|y`Yg8T5BK(CbjKb%jz8C(RL!*t>; z;1c|nFblsGYz_F63&LB;Cwh5E`q1P#qdD7g&W}i;3jw$+yckJ zZE!r?0Vlv+a3b6TC&AWdvj2zs@bjQo&i)^!!^toU7Qoi95O#uR!!Ga~m;;MoA6N_r zz$tJjoC-(75;zu4gSoI27Qr%D4$I*zI33P|6>tHpgp1(}xD=iXm&2KGB|HzVhO^*0 zcs|?!FMylig>Va;4Y$FI;0`zk?t*jS9ykx~gBL?@I{SZ^4ljjSa6TOSSz6|0aMJ;4 znU_m^fA;^-g9~6LTnJmEzY=D{t6*oi2zG^6!`|>3*cV<42gAj1D7+4ihS$Tf@CKL* zm%t);BP@qE!CCNTI1es`3*aqqF}xKng}1?!1K9t=<@mS5mGBO@8r})l!R2rRy1Rt< zcf*bF9=I8CfJM__aKD9nbd zVQ2Ul>nU9V~!P!xH!moB^MOv*CI;4?YJMz~|v& z_ySxCH^AlaMYs~a1XshC;X1exZh)`AP4HE?1-=Hi!A)>Gd>!tDZ@}H~O}H0shE^r} zf0zQ_hMDjk*c@(w+3;Q18NLU*!uMfsxE1z=AHc!zLpY-;`+qnBe;XVPKZaxACvZIc z6c)hkumpYvXTZ6uthp-dYhr6LXXJ9XsCkS}wvj2zaup!KXjbLlo z7B%6-inq1>^&8OpuLTcO-#yaUSp!@Hr}8N3h5J-;b4 z+5f{VDEH}RL%9RDGwcex!fvoP><;_F9&j+sfg@l~I0p8Dy++nl|j)!ZY+!eGQ z%KbhYq1?%{8Ol96TcO;2vmMHPH9Mi)F|!-Wy)b*BzRP76`+xitD0il0Lb>OpIV^^q zpxh_Y70MkTeP9V345z^nuoRAgWpF$!hXrstEP)kp2CRg$;S9K$?cKR>KK@L&2%ZO5 zAISb6u7T&nCB!d)%ix7@1)L36!HeKJI0tTobKw>^4{nDS!(H$axEEduz4O`s!%TP? zYz;4mo#7QQ2QGkp;6gYUUI|CTtKe9;2c;WFr5!2Ta*!aHDdcqi-xm&2~`F4!C1 z4g11-;9$4{j)3>VvG6`v0Plz8a3!1#AAk$sgK!Ca2rh@K;41hqTn8V48{wmH3tSDi z!^hw*_&D4PpMc(l?EhgVTnk&nCt+v!6wHC^U|;w&915R-W8kwe7p{jT@HsdOJ`d-^ z7vLhe0WN_r!sYNKxC*`u*TIc&BYXvJhOfe{@HMy{Zh|}E>u?Wz16s4$|HE{+88(M+ z!EE?8>@X5TVVnG0G7iK;cWO3TmZMhCGcao9DV{fxC?fM-@_dE z1MCZbgoEL3I0F6z$HJds0sIA)!#!{|{1wiJzrjWDcen)Zh0EX{a0UDmu7ZETHEUU^#38XThd$9&82|z~*oa5ZcL*TJ@M18fI3!ECq%wujqb2e<=vgu7rTxCibB_rd+4H<$fC%!Hj` zb9f-kh6lmU@L<>#c7eU&A+Rqz6b^=m!4a@490R+-@vu8AfIVOd%z-mtPdFR)f(zi` za53x+m%$_8N_Zq(1CN3mU>~>{9u2p_W8hABEZhV8!hNtG^yabuhv~3C%z^`8Yd8>g zg2%%ya1hLagJB;y1P*{Fz!C67IG*#up>PcTNpLJ226N$XSOiDFa(FVF1xLbpa1`9z zEiH32T!4QHESkXnAC|+@;9}xq;8J)xTn^8GE8&@NH5?1q!T-Sx@GQ6yj)R-wc(@f# zfZO3jxD!r-yJ0Te3-h3LG5dd*0w=>vSOA;DLYNKDhMnO#uq!Nrydm37osQxw_O5p-L(b^3jM&hzWu>q+iC-#@GTqXeV7y zl4le5lI|nPa|n3`ADcSun`uDAC89+SOjBm0_=bjVJ6RmC&4UO3`fJsa11;XRZa21>ldnmsGE{9cc z1-ufjgjc~{cs1;U)o>NO2KK{i;c7Ssu7PvmT37?u!CJT;*1-)h1UJHZxCu7E&2S#v z0vq90*aWx1`EWa20C&JJ9DvO*$bQ=b$G}#Y3nQ=yE`%kp4VJ8{sl|6I>2E;YxTj?1Q(!et0Wf2fN@VcpKaX zZ-+bK9nk1DpSxzlJIRiLcfnlv4Ok49!7}(wSOLEUtKhfcT-XB};oUF-zXLnqJ+Kol zhu!dA*aPo_E8urwFI)jv!TaHA_&vB5ejl!fE8#}?0Ne~8gsbY!=dNpDFZp%wVYm@K z0=K|N;WpR@2jF9H7knHV4d!##Ot^~dSokE&gHORJ@M%~M`(ZVd<8K(sF?cEb5$uMm z;R^U;*aw07_!#D}y?4SjEN@vq8=$P8El}3a4k+tq7nJpr!Tibk84YFqWW#Ql4`uz7 zK%hQSzn?VH%o=$XWjj61iUTl6+K03+X+Kg1X#{D1l8l>4I)Eg{^@B(vMmn4{hI9n!NYYWHv81C($B^WhoJ~58bUbMsDTj0d=|s{=q+HU;q*F+zlH?<8 zmo}eHI)ju?Dj*e-#*>Oj6G#(DlSsv+$)qz$XOX6m&L*8hlH+0tX)5VF(lk;jX*y{J z>3mWd=>pP)q>D)9q>D+HkS--vkS-(5B+VjKk}fCBCS5_QB3((kigY!pnsg26T9O=> z=8|eiwWK;yh*VE%Ak8B+lA1{KNef6}(zi(8CiRf+Cdqyz`_OXIy(HOhWS^1!MfMfh zPh=mF{X_N**)JX<^^zVYJwkev)JJ-Z^f>7W(kjxEq^C$vlln=UvdOee|qF8h`2OS1pSK2t&}CCUCE`+$57AZ3#JI95uWLt1Uc;@NbRsRxNS zSb84uW-FHZGLJSrVd5Q@9wHvJ;$_5pthkFfgX89bFq1UeigSo_thk)G$clr+rB<9r zTxrFn#33sV6UVGLMBHh`%ZPidxQlot@u4u2w0o8r=MeW>dO2~V($s^*Yb-sFxX98= z#c#!7;&qlMM7+U@ml1EW;x6JX#D~F5(g5R1oI~1X>E*3PICR$NM)%lZ{h zn3QMfA>tw{UPe5{io1x*iF05ksoIJqAF|?d;+Pc&iMy;gk9fHimlF3`ahSN@ibKTf ztausmCM)hD-a>o=%p|R-GUFWL?Ur6noOPwC2Z;wQJ&$;c)9uV58{|5s$Xw zWyCpF+(leOd?L&wbu+HSIizw+FDEvxGW8%~$kOwOH(35s;whFVOx$UCLc~2*yo|Wl zio1wc6B~w)$ghU`sNolT)yp}?7kdv@`FrG%o$-GQoA3`daU zD6n_$XM1H2-Me>CarfSX$oV;|V1k@qnB2Q}gv$FlyAVC#@Boz$QkUp^sYB>vDD+!A zm^hn%hVk-O^q6}MqnXrB$})EE?S_w$UM6kdyPe@jqz=w#2H}^a|B(Jm+Cv)L`=7mE z?foCoS1R`<@m>}E6Wxpkp+vkyt(J^QKhu{;&YAXMXnx$tR@nA2iCfMC#Q#2vOzvZe zHakvx4o&**D@gm2m6lKXC-I4PnWahJCEEM@(O+AdjAv_a5!_B*`aqekJlB2}?LK1Z zx9s&G_tVB$noK{NN?t~jeZuxlqu_NkIiHHxkK$+M|sZInm*x5-00+@pQ99-6$1lw9BC9`!xrq5aQ8EAy!D9uH04 zy-A*K4nndv-$T3DW4cdyX#eogN<5}3@24eCSKcj3*2a3YrPf1}^Zw*M-(BxgPsAMm{pLu9Lk7ZEqp?%jw zlXuUOr;&@0thIP((r1frF3E1+_dT?aJ+uOAy7A*K_0Z&r2-1Jlfahp~<@+$>Z96^jz9$uMfE= zlI(lKLmTVSmU$l9Gaj0}512fSb3EGD<)O(tBgx~Q?9o0s&r9}ALzDTdAxZy#Fnw|_ zOxFJ9p-rUC$-bLCGRr6NBi#b(BvJYCwI>5A78X?O>1gReNYpcxdw8adKOVJ=%AphbHf)C67DKqkWwo+PfZFu18y<9@=vr zTF|2{7kX%NeU;p1d8aK|o8ZwtxxP;J$$M4&SI-6Hcz-wBpPwLE+wS3$cgvD}fA!Gh zov~z}ydRdV$-7|5S`KZI`|r&p7Mrn<_T_qL`+8_2J+v$j?NATxNDu8;4{bG?9G}7@ zY5QfgZ;gj0?}sG&d3 zvBmg4R zsi$Ff9eV=*ugivN33v4a|F6qN^7OQTyGz;f6g0!n=IytZVTO@$a8NxXWl43mvFFEl zvbBh(S{>|BOL&^KlqXp?@)WC+Cs?=e^s0+HQn&Nes+%WPcX4NC8F%=;#Z$r_bw|ry z-oicH#|-eyZ1_6k$B=PYy*%SRjQeOub1&#np7kDM{L%Q6vCVkb*lxUU{MmR9$L^ZM z@tmA&9x2D>4&JciCr1)25S=Q5N-Ey7jlQToTtr~@T9pyB>S_+H{>40c!Bu(srxvy?n4>7 z(I;gR8OB_nsY_f&JO*Z{ayghBAu@*tQTh0WYfGwa>HlW?9LHM#xjgkhi6`TAJQc6! zPHH1h!W(!hp2J%Ja*ZIL-Ewo0kF1A6V?6g-C-BU368CT?bMN^qo?V{Jv(Iyl67_Ub zIn}nDs?DlWXJBsssoEs=-PVGW_6+YgE8i_QGx>;r_&GLxgK_l_=NI>2T-|P4PWamL zbIW8|*E`F0<4ALwc_7&%B^#~!6U)u|rCgT9aQnekyb)2&+Yr|pbBwv_Do(~yA6Xo6 z)08^Rk1VhF`L4J0^>J;n%iWf+ENIJc$9H%BPF6L_yqlW2OhlA>j`ETDjh}{SvaH=Y zL@}3-_}m)YZrWXDCn#?tb=!4{AoC-6vsCt0yMAR?Z5ca- zXJ9S7F%eO3ONh(*u$2E_@7Mn~``veVyJa~~CGO+N*b3fg`5textmI9W2YHL7mp51* z;q8?^uDl-St(8^0vGNpetMv1x$`6en8LN#S+jAR7vky#FZ`=s>0#nD1XN;d1YmA>7 zKQn%AtTldN{L=W9vCeqbc+PmB z*lft%7q5@mrc`x%jcNDKTf2ET{P|w^ZB-+_oJ;NWpIX}RP+J#^=KPO zLC!toNwD~Lp>IOlqV#}0hR?W@zkNa5P0FJGx5n{hppRL9Oja&Q&K&nK>++38&qpg! zdL|`m8on{;c4s8=@ zKh9}KT6z)sRSE84pRg(7Q zOkMuaRoXuny&5ebc962D zWwfPtq3=c;^P=cS)4!YkW$49dRiuN^53}?Y=pAS)Ed6jx??YdUwuN*c{xO!m4t*C| z@FiKl`}0npS^q}#EVOK;2YJuW)VHALpcN@Sljnb?z74$ut%9^4+E`29jb4csvh<@Z zJ>xEx4_b%PN2)DOmO~bL7us^AXL6ow);|Wl7pah6_+z7A~z zDS&porB|bGMB8lX<19Udz6EV7$&Z#}>0$J3XxlCQ1WPYlvwzkOv;mT=-xHO-*XY8( z6K&AaPqO?g(RZN*H^}lC$#Fb`5}&URJqxXZB-_Ku=o#q!=pnRj(!pG{onq;0&{v?X zC1r7~cdDhYN8f<9L+J<8kD2vsMBj}z=4J68$Q7WeZ$r;TDV(#y~{qwP@o0i3^?)2%??g%*58wzmT~t26aR z^wDTJO5c}rJyTzXo`*I?>6x4Xn)-6|GPEkCAHaE`sjozDMC%~!M|&n&dOvz6T948X z;2hHQZ$s}zTW$F#TYBa)_7}8`O3&nc)AWx<--5QCv@i9aW$Ah718BRHegNmDroR|H z_$teuB>uB4J%T<4Er%rU&75QD%h2=Cij{r(I;4Do8>6C6>Msy$WrvrBAi= z;5S(gXc1BX|9O^Pgx-PHY3b7}y%N0}Z5c`4=P9-H82WOw6_!5T()-YR(fUX}{4*?l z3wl4=I#LGrG|uOk!gg+KMc;(BjU?NDnW_7>qYt3vZZx<53)DM6^k3*x(5gvtJiO4- z2hk&FtCjz7uA;K}=kx7G--tG#{D*RFW$MPaco*t5`YBR2N2`ljawYgP(5umwlCn7J zU1I5rYTH_CLUlFKSkw5zj-u=lMw~LSNGS_yT0*r_5@~K-)E;%?xKN2=Y>9I&bmFKR zs%SxLo0Ki+Xt4u}Ev+$Cz?969>)V=}3PWn@gfT00THUJNvl`rGX&+Ga_$;rfVop2Eh7}f!k87BlQw?*oR>R^1Mt5_nVNSNKE>cir*4h?b z*cNj#b&4V{Mv|S{h+9Zf>^%b2YjKtGKDf z4C$F~I?Nzqxy_YsyUm>1uNFFS)D2a{-XdjcZ!t}_U~*BU z!wOi5(Wb(vGRX>cGHL^2QJX~#_Ik3ivVp`G#%I-PF@4r*u`;q+;&eL`sj0P8Ymu9| zNOMz*89Do|nU&qtn)8|tE2?X4X$Uvf#Y{cCq^?DU3phHMl2eB@W>ZVMQ->9CKv-mI z(Qw#~+RSJH6Se$ukXY=*ORSh>C)#2Mz5m)RlV);& zvX*EMpE7e!Kjjv(^%T&nSwce2 z>8Dho?1CZ7X!TjPF5Q$Jt3FF*tbR(xcEhA|u>|%6H)kLJ;tI`Uo-HKgoPJ7GW%g5M z7&ZGTTc>Msbvq;dlv1hF46N zpHgMkhG=H&4bjX=KV?=*KcxbeyYw1%thx}Dak>yQXD>q2sTR0&A$Fm;0L`q~g{Ts{ z3o$dQ3vqS25Hn-<94hB*#Ac!F$f7S$N0Qpb60-LR?Gscdj=Hjn7DOAH8e(C4oEFE} z9$o0faiJ2~Th$1%x0-=PRhcQV$El*$HaTLbkah|qZS@WHvAV`s{s<%5SX0m(omZfu z!U+@2*m0mo8dz3MF*AxTRFU)sp@lW$3r84rZFK@uX_wHd(u~Y1ow$v~NXDsOVzYiC ztJaA{BaDWQXw9N}Gir;*TH7R4T5BXGu{AvjWqMMtsP!GOwwk(FgIqX}2ynxtbx&5Gt+L8y~iqSZ%{$aqIqz>im9+Uy1*(AE7r9(n|ZDvOrbeazfeWYRhtTA z8L5CiMUexO889_7kWPjowgCanG-ZtKW`t8k5!#~y+G3V8(<{lS^)Xd9Mo zKAi<>mbW&yt3diBf=B}sVN+~UU`M8dQswFxw}?=dqf%<>7DS_q?XX>ik%pSOI;o|_ zV#Ex%v}M{g_0dp0L2bCMNg*nTu`e#Likq6PpuUBmNd~ns1~oMyS1V6*|<=tWWvKE2_Cyk=19YxVfe-YL-i+ z!nRPfzMjHJQ(=ShNY^3xhQcP$)Uqg2X9bHQ5;W9?WhevL234lX%5hz*8Y>sLs+j9r z*c^$OL4?gs6|e^?a{L`(w1wIfsZOqAS%R(Y0!|8;9TrOA_=1R;kI$I>NPH1)*BYNO z`(ejw)+%QcPOVPHIrDIea66^W1+lcwIqkqICml^@eP)kq#>nc@7`r&+$~b<${AzMsX9B6z&xi{<+Q%h4%od^Xq~$_POh?L zKILlLoL@7DaLf=v>?)ugTycvu!Kg(LSvat+h=_oHr}dj22EGDUdTX6-dWy zm$j?FJhZ4#U9RJ9u8GP?uj!L3cQm@pB`S~x)-M#vxt5m>u}vv*m{oxsX4Po2;MtC& zDr0ZuW=^erw}=Jr7-cNB)3~vmoVC^3MT}(^S?6prB|>$|X2nsiN!`}S^}o!Ky=_`W zGJUrwE~l>LWdvze+^8yaBx#pjXooh{mZJ*WIcPMg$O+g4lwjT)U^>$O&_L;bRQu_G zpjw$qh_W(O;B+P`$J$fGs3^UMDOi+MXqKpcN$QT!FDW`zsT^%YW%UpZ%~SN*du-#~5fOv1dw5 zh>9|%1oYV)aiXy{J8rUKsZ-sCGMgF~nm42@S?)*Cs;~@Y*~e6_j^naY6=O>jqBUWb zSseq;1!R@7_PAdPn6-u`Gl-f&TPt!+TZEy#bexc-LmZAWUfSCm{&r+Wp{e{+?+p)~KiltNF>ZG=QUK6Bt!J_7lRxrO+HarHh!2z|wnOQ3b zYNcOgg6x(+T@aCD7m{44p5c5cmhMSvU=W@!L$!8*v{$lfCCT*2$n=1k9&Bl4AR9k5 z%0Q;oCgeaR#4fp7b~GteLuODiK&Xx$-1IXebuZ8gX}i;KBUC=L#!R=^3SQds3J9cQ158XHi(QqHpBFQQ7NP2#Aft0dtpul+QaM`DZU%HtDo{7?tcEU9 ziq#cKEGtdQWFuJYMD1oMt>AW^loQIGJsG2p%iK0LVi`nbpf1p$xOX_!I_25bxC}!oSV6$IvG`~le5mv-8_?UGR}30)M|AdcG#%u7LltJ5$&rL%OF=P z5;~V&avxOXWftv>eH!Q%$t>EXY8LH`bBQ9gu$|N-RIf6OOXVt5mnP2iOskrh3T>A< zt7I&56gyB&wnwo=hg^n8z2>EfMCPT5ME0eL#O94t0_)O5G+G^QRAUo4fn20mrH$NH z#b;fvNN65!%yPNuYSrS{RG=%IbQO(_7h3dAcilTBoBcTWvlJOeLb<_ghU#KPRi#=ZcaWux_NHm( ztWD7_lGfNp8Ozq~(5AWdTlF!L^4T=6Or%hqsHrlJt4%7PyD-T*5DTl+G;ri5uhx-S zAnQX#PT(A>mCTw@WE7KJlBgERB?&=oi~&a`hH_w1iX6B?F=RCiH>p?+=_f^3L;5PK z=b9%$YlNqyj42OGxw&PcNn&nJtMlIa2(<6(64lglyIQJHTNiU_-X{z5GKiVsW|)Lg~dVq1-(9A0WAVr|rfROANgOh?(wecvoL_hd6x*GpDNO=i^= zWfgRo5+SETl+v;~0Syyc{ip>?^q=S3>wVqMYMw@f4R({4IBAu=T{+f~p=QzETVOXbqtEaqBnu@k9~ONg2dE=fXl z3_0y%NOP6a%FsOzwleBOn3%I788*t0a~-!d5+zpKk-`#k^Xuz6mH>0urobsDNBY_k z(O7Q&$$8maUyx7Aws2jZSe}&Uayp?rE-M$ZZW$L>@tH_mPMXo4n0K@CT%smS^LT8? z{e2aNTjgg&#b!8e_SC5}&!01~C|jBra?Y%&v!~BNnlp3y`Lk?S zjAQB4aJaS35-PaqGNY}gx!yL^#}cyCI(*(=NOGf|=9juv}en=JfLEQ)eZL9dg%BI#LGo zd(EJ*Lx$Id3p*kOT0)G`EzDZEx&v^)mNQ2{rn3qF~&_T zy>Qyp@^ZHRX&04VdPS0Qd4)0V;^`NkH+AOB3#QL>D^)uS_t9#YY>mN;)^Y@FyN>g; zIyvr4Tt_*nxsLm_ z5r&JJ8`@hW6N}1Nb?OFaLrBg)8P-MGn;3>eMh9=gIukV`nP|vp;y4gu%#g9TmdBE~ z7l+hZDy-+Com0+wquE5%1m*RddCJ?HOuel>+TL6*HMi6ob)kjzoX6HJY^PS1Jnt@m zwt8+}MC*-6O*=z%oS7bJ3b)Q{w{tC_3T|R50qY{Ex1+V8f!iI84a_tHxvk5PnPO70 z1`OdE=DU%3o7Y6^8PtWFq}>t8HnBn%H?^=cH$-Bs?M>YMt6QK>gyk4n6IS&JTDeO? zWzBML6)HrplbIvCq@I_t37T75Vw{6;*QBP|++Ub~?in^4ZBY@}&{}{RMw?ogcIdj6 zn&zguY*~#9m>!d_i8Zyh7fGC0qVMx^Rd>5d1n)-JDQMZ<=l+q z4oiz73)x&8faS4JEf5)+8avQcbs-`uf&SD613soRD+Uw1%K(Y;HRv_7VW){djUeHk27-i`!s%dMg z;T%0X+O(uTWG$ZThSs+1X#KoqX<*1)a#8MR)rA?azPVY(Ze_p%7ODwE%?W_IaEvmJ zErl^wle&6Wb1!qu24gfYu4$sArHR>AOQ8cchLL!Ch804+TsRs$#)uNf7|4{{jSlv_ z+C^;23Nq1}#cHcrY}C~*b{4uB$wFUj9z*Rj>zWRCtLu=hmbd%mq=8U9D>1{ic6I(? zL>EZ=RJmNV$+l`5)M#||_Go4(qe;YW%QKW3^JIimTSrhQ2d1D-;X+cR&Zx{toyQP1 zFt7C>99m-Ew}~|5^MpBj#P!^~G2gt8Q_Nc@k%oe=SscZ-PR-X-dlH5R=a%oC_a z6>&x(YjdKihO-N1UMUP^MNTvpC{bxjnn(>3sbL~DsG@>Ns)k9bhDoXhr4%ZK;Q}Qp zO-YleVG=b=q6Sq|P^@YwRy7o>8kAC~6o#})2`XRAs7flBtVWxxMw_fgQ%a#y7&4j? zRDLp}O*YtAIaettkOAj3tU?(S$zTG5V$x*EshhnnbnnvD7=B+(1khB=BGA^taojVP%$0!j6?^pm9eCP=D}LK1xv{SbW* z{Y^u-M!K0OQ_SaJTu>l`LK%#gL6Hn5FeoNX<`~V55JL{Fa`>#Tg(NvpaxZ|kK~jCw z5~e5Ly5oL)gj>r-@i%IQkv(G+f7`})%R;`r#@LO%50u}f=j(oqvHXq>UmItPy)kV?JH)w@?9I{W1~? zTm$7h3pT>j;5K+V9E9?HX~FVQS^01bEP%PN5EjGnQhqzXD+jB{%lBI~Liv8@4mb&R z!(zA+PKK-DnQ%Qk3vPz;I|Mu6*>E?M@1M-Lc$E5mqtUR0d^Vg4^Wk~01WtqHuoPCq z>97&bfF1CB*bU3z3U~or1uuka;YDyGEQed+#c%*#0(ZkpVa6q+)bAUOg_n`fgEL_X zoCPajB@DsKVF#QIm%%GwFRX%V;FWMAyb5lESHnSA4TG1CQonDs=Xmq?jdIA(K`(}L zVL7aUb73uv!8+ItLvSUmhpS-&+z98vZLksUf=w`^VwC!QqcLy+`8*hgC9oM*z!o?c zw!#REz%IBD_P{pS3!`uil<&{m2;1Q{xCjoy#V~l;DE0eBW8ih<^Pue2rSN)K4VS_g zya6tQH^N?c6I=s3;YN5f+y-xfgYZ_k2X?{CnWNP2=Zt~3lh1*7z#`ZUgU6b`pHo5p zF7k8XH(&%VgPri3um^q%_QG$&)vyN+u;1SeHW6b!?sVJGZ|%i#~;D)>XV4*m#khO6NY_+z*mJ_9o^ zAC>hJI2NvfdGM!j3j7(YfIo*JxE6N6U%+MXm#`Q93a)|c;70f?+yL#IX1xhJ$p0QLgIi!9`~zGE--27<+i)k`3XQ5!S?_?+@Q*ML z{t1@BZLk`?3nTE)uoJ!qm%;6DC43+D!w=v(_#xZ`cfjrNBRB{@hQ^hnvOWP>Z~$h* zzrcL>S6Bl71}ouC*a-g)m%>kB5Bv=F!9kc&G&1XRxR(4Ea3lN@ZiBnvF8B|aan-1- zf5NfwU$6-7hGp>Iup0gcM&N&8H{1hz;s4-T_!Zm?_rd{a_^2QHpm8<*Kg@&yI2Q61 zcSatR@9Zss`@(X#AFP7%JvxnW1nhwO!!9V_vDyPO;YxS_Tm=t=Yv4g}E#!~fjdf7I z6LUQr1vkP&;3ha4Zh?owZSXL-1Il-(?}YMQ(YxRg(5N1jbtKG!N5QdBzN0r69u14& zF|ZUK3oBtZoC}YG5qLc8gyUcj%z-Q639ujXRe;8NcoN(UbKy34G8}-Xz+LcE7`%r5 zA7;VRU^bNR5YLBaz!I1bD_{Yf3kzWcj)$GF2=>4Ua0Qg_;O&Ex;A&V5*TTtg6Fd{{ zfbw0bJK+?#3!V*)Yw7=C20Rz;Y2&*^;8^lgVIDjW=EG^Q7?#2kI2~r4$nP6M`MuZk zq5N)n87!mx0$2ergjMh&I2V?~MtCudz)N5UycBlA3fK)VgPX_k`-ZTG{4BT}R>BqV za@Y&ydwN&FE8rSf1=qnV;RbjW+yt+NTVOTZ2CsoT;I(ikoC9~kxo{7xff;k?|KVs@ z2eV-a=D~Vc3>#o6oCnKcBdmf=a4wt=8{q;NfnnGI<$HNMVGHbrt*{41;0m}9_Q5u| z8b;wd7=s&NJKO{p!7Xqx+y*<~4tO2h375cK@OrohE``Cl^#3pu-Uvs-o8Va333K4h zFc01Wi{Pzr3haWV@HSWuZ-WxEg*3u7&r&^>8`d2=9ek;C*mA{4N}TE8rl!AMS?VgGLSgKMcZ^FcUriN5co< zSojdkfxR#fJ`9WCBXA0Q6qdq1SPmb9mGE&`4WED^xC(~hlQ0IKf=l7kunYFXW$*`Z zIs74934a9p;A+?pe+<{aXW)AH6SxVkfm`8E;STsSI0%0Z_rSFu@#vEnEwK2iL>Ra3g#JZia8dZSePS0B(VU@DFe|d5NfOFwT zun~R?tNFd`PvEALMrI92Ir+c92<3l;G59yQ6z+sw@b7RL{1j$$9Q_QgAU_Cu;pea) zegW6OFX4K)3vPt}fLq`{;db~hH~@FUUGU$~2+{w;4ESG|1^2))@P9BHeg$*kUYHLJ zKg$F9UK6BoC(LkSuh(` z!d!Sc%!jjKF}wn9<$R$EP9c9KEQMFWa(FeYgw?Pb%6Gko;I*(3&Vdm)7k0oJ*a>T4 z7p#NJUMsq91R!2v2ZcWfgLapUI&Zd5;z534@==vSPpN1Rq#d_f;Yhk?1W3< z&9EEZ0++*EVf6j3o{q-ooz4&{eGASzX$W- z_hB(y2}|Guunay3E8s(L7xnbQD)JA*YWN5Y!AD^j_Q4o@3@(L_!!Gy)Tn1Oc;d=N1 z+z3B}o8b<)6@CPFz>nb|`~>cS127}Z^*03-H5`PE z@N*c0U%*cIC0qt~!4>cyun+zdu7>}D>)>v<5&j!)f&YQq;eX*yxCicn|ATwrS1{O2 z{|_^v5un}B2ggGBErc8xfO#+or@(z+Iouak!~I|wX27Lz1nhzP!7D8I*04o`s9@I)AfC&4Z#zs;}$ zo(%ipDR3P;6>fs^8bsp4ucV{|KVtu4|8AvEP{ow6pn|Lun30W1Q>%8VHcbP zdtfo_h4MR*tKpe&9Xtzef>Yo&csAS#&w+d3xiDiP{XZN7r@|a~9xR5_U>PihbK!K@ z0cXG-cs}fdWpEw50B(U7!U1>@+yl#DRvZ04%z>A{Vt6SmgB7qEUIxQ(ChUZ>U=OT> zeeiO)7S4v7;1zHil;0;Dgjd2~l>Q%%hF8O}uo~vVYhV$)7M8#{upG{XRj>wzU@eTm zI@k$Aum{$|KG*=)!g+8LY=qlk6Wj&o!(fd5AC87$m;;+(K5T(gU@I(z5m*iv!b;c% z=fWrq!x&r&+u<^}2(EyOVL$AE>)~~9Gh70vK<8U{80%mm3|HCoxNtg?tf>YqrupIWo zYWM>fhChUz@JFx*u7Tn9t&S=a%egUjIauotd}tKkc9J$w;vhA+YGa048KFT>yx`hPeEz6x{UMmPn& z2Fu~EVKw{>48u)uDSRC+gTI9<;qPES+zi*jH{fRYCfosk4|l^YF!Or)f0zy5g2nJ{ zST=+HAI^pEzz+CFxD5UY_QGv&4SW}FfPaQt;CpZf+zxlc_hIJw^#5=y{1E2B9k2v` z1S{dkun~R&m%;(q1OEd1;9uby_&2x#?u1+5-{B7UDcl7=gTXTTe>fU`4s+laFb{qS zi{LIe1^xq;!hgbY_%B!mcf%0;H;lmlz@_lNunX>iJ@9{UCHxAmf_vc_XargB&<8g` zKimcbZ~zA3AlwJ;f&0SX1@!+g3ueF^I0ELw{b4B_2`gbH48a3n3?2x(;X$w$$_obl zke_Zb*27V76FdZNhoj+ccqq)gkp3U$z{6n?90N<>5wIE_2^-;2a48%Md*IQq4;};8 z!eikkm<_kX|A%8>4$Olmz$x%VSOHIhA(#te@MPExPk}4psjwgB!FBL7 zxEY=fcfd2?ZkP`<%jy5&SXc=2;CNU9i(omN0IT6d7>1MJQdkUo;AGeb&xC8>S#SfK z0=L4m;ZArCG%lw9hncVhj)7BQE<6tw!)dS-mcmLn9nOU_U<95IJ7F2@ffvADcp+R3 zFM{h~Iou2{hTGvKa1dS!jY~#lRlqEG8O(t*VKJNq%U~s}f|tWEoDDnS6>vGMf~(+_ za2>n~ZiZLG9k3eihS$K%OX>gN7&r&!!MU&m*1$?w3mah_Tna<52iC(r*Z|kTd2lmq zgafb%?t$}RRt5b(%!XlD1e;+QY=Kp<6*j^M?0^elH*A9|U=;Sl7+eqA;TE_E4#355 z5A1+hm(l;j9JmA)!|P!MTnZcE4X_j52v@+HU_b1H>*39C3%msmz+2%i*ad?#>HlFC zyd92(cfdT@4U6HOuoT_}tKc_aBU}bM;5T76{1#jRzYSNx9=I0X4cEi(z|HU;xE(Hs zgYaHx%%cB?8SuMs3|s+o;r*}}eh-$z@55@i5{BUea4CEcE`txjm9Q7~!-wHI_z2tt zABEds9~^{_!C)o*KO7C8fVprLoC2SO74Rt-f=|O3?1$a(2XH0)AzTfA1UJCda4Y;V z+zFq7#^vzU2r>G4&R4;@B_F8eh4?f9dHZ$2oAuH;coZ|%(#;NAC7^4fw}On zuo(UgmcgB{8vY$d;HR()eg;>-LD&yJhnu-C^aWf`{!7WjU2p*Y0~%M+|HIMnUoaQ$ zh9&UduoC_UHp2hHrEm{i4*v)H;a6}2+zYosV;{CV=!3hV9|n1z9)MXe2y@^*un6u8 zOW}U70%pK!I0A;@{%|QA3A+@+Y#zLHcm^_dp1VgdI@cQ|gBD zp3({^?MSJ*73U7p{j7!!7U;H~=4o zgRl?ofser;&$AzgSy0|n8V%(=r5p$}KBEm;Kf`~dMds&FL)PN(AJ=`DA?tehkL#A- zTS@hC-D704j{6)N7k@p>HumM03fxb8?5cKjoR<4|E%!JrcaE0(1TFW8TJDpy+__rr zleOHZXt__-a_4EePt$UruH`;M%bl;~F3@rpYPrX2xr?;i6SUkDwcL}m+{Ie%$y)9+ zwcKZExu7`%yi^~z-HWx{9a`?|wA@Sh z_0?1#cbQzTLYhW%+F0)?oNIgGu6kf`(`coE&Q5d{Ac)k1-ELs zyZCj*_|Ncl-=^ihonLQE|8qxDyN4h1J}vk6`4!&y^UL_yo{oEtyi&{kKss*c9Qi>l z_e1i2W#i>4S-F;f_$N0tLR3EqQ$FB2- z+HmV$rR9E-UtUi2aqE6c%l&l1)@>x^lBe3ARPMDmxa0gl%l$(w_m8yPtF_!e)^b0i z<^GA5dySU+r&{ixX}N!{Udz2+%l(3u`$aAH zOIq#?TJD#%+^=Z4U)6GN)N;S3<^FXVZueU1H)*)tBl0FK_v>2j-x^1{6C(2J8{~It?Y4VB%l#KE_g}T#e>0BPX!lMn_usYLpK7^3GsbDOdr-^$xt9A2E%%p3 zjz+t8X}SNQ<^HFZ`(MTh8tvY#<^H#p`#)Ol{~9MIwA(8qZ||6&JzDPn87J8z4oSD} zue98Ija;K`$Xd+;w{F9yr#O%YCe`z@6ok z%OqRNeVms2crEuhEq9KV`vhNM!ZH~?bE1~}B;WXi>W1G2bG6(jYq?L+a-XW@&eL+A zrsY0e%YBBHJ73FPpye*qa*y{FX>2D&TJ8y2?ulCNNm}k=E%#(C_nBJmv$Wh(wA^QF zxzEvZpX-~Tv7MA?xuxi8RiU#R82$Tv}AJ1N(4 zU##W6M9Y1tmb*gBeVLYfrj~n_mb+5ReYuuZ@) z;?zO2@7&ZT$I;>|apIu@_t{aaZ|YDcNAGl^i0?cnP8}@tO-pTZ9BsZ*Cr%wiebZB1 zl^cfq&7}E>X}R0A+>5l_i?!SxTJGz7Gc=aT5-s=jTJEJTO?@#uD7B z<$g!Y{YNeLpS0ZDwA}CduGCm2f7Wupr{&(R<$hnw{ehPIL*G>z%VdX^`y(y)$6D@B zwA=$)?!Wl1*4Wej?yJ^Vf}d)+Khts#YPmnxa(|)a4*IW2Sb|>n2VKKHTJC+d-1}*{ zGql_zwA}k^xkqZbGqv0YXt@v6av!AS&eC!ptmPi15_|a|;93Q(ax1%%sHFh{8-L;di zb&se?_+5A*$xi8mp&(?BZq2;d9a$l+CzDmn|wU)bD%YBVM z)!&;LhP%&StL2{KkJ=l-kaX*wtL3io$A+xaE^zCv)pFPQ+wCfbq+54L%U$nZG-RE2 zfm?TjmV2Imv0cTGbn9-^ayR)qhOE;raO?Tcet?shHrA}#k~|I#7bYZti7yF<%;o&N^AiXrJP zlO$TiVwcI!OZyd6{c7eN0Zq#z$D;#Yq|g9@3m(k z4gS~N)YEY9@w>;Cq3kz11^@Fu>cpvoul#+fO^#!)mfHx(?>naZ@oBmJfydLW7+)aZ z{uOTO(Sm_hsZEY!pTLt&oI2Py@KkD($c#9y-|7**yV-06Rg2>c{{vvFi#jfVTE zz)v;YV*@|aa33A`xrY0gz*-IWv4LM`xU&Pl)Nmgc_?3qH_`o_1_qf2b8t$CHa~kdw z0?%u>PYkS2>2~jKofLQ>rQ0>+23~YCLqu-fCkI{{!s8{M64>A+522kJczK8tFF7x8 zl$V^Kx!1*~X}M1i=zOF18CvfAz|je9^vbyHF3@rp22!0*^Bc|11jlQ+Ck9?|#vCee z+dV1p>QE*}cUw{%*yyN31#aDwwcKY0UK^@PM|WFtmX>=;;MYzaLj~?rgR=v_8Or47 zuKS$8CPy7AaF@xsTJDm->qAxP=H$3a}7Rv%}dKbj0Zc8rEa$gwueOxWxpRLdRHa<^-_7iqZ{Yq>jc`{S0k|A_cwXk38`uhVib z3A~+9!EiMj#`U=4Uke%Dy)>!r_r@Ic_BgugyHE+)m|>1_r#UOVIXla(#inIPp+{>wYZo*P%>~zK_$A z#{++JR0)Q^I(kCOy-LgdOkk%*OMar|UK9AcM%_Qva{o-ry*^Nr&=RkVdk5+TE%%Fo zIkh6K%B-oMjwZ`N|Zq2+!vpz~K8zt?ha33MjRzgNcHPX3_f zeoM>!ww8Nq;AXF;CTQ-ud`HXuM=kfCwA}9oK22!j@W+!sYq{SGe3r6qcbRP0a=#z= zVEBtfrrhOB{R1uchk*|@>fWK{{wVOd(<%wPmbd%&5+7^1KS{&wE|UQ*_g?~EXtevU zTJFE4;da}-Q_KDLz?TW__R6?*f2!sFEU?R~x&+N#mxEgF&(mI>ppM%Y{8vIfUKw|p__f@DG~8~xgIex=(r~-&-dD@LUm9+=-5FZ$5y9PF zi#S1Z+r7V*d!&{-Q_Foo@ZSk-^vbyHK2XbjP#SJ`d1q<44^G4FwtJM8`;au;Zo5Zo zxepEg$7>NMXl}a?({dlKhTGk~bF|zi1i#W~_la8WlhSaz?atM5pB&uld99Ez zGgiiDoRWsS*KwbkhTEQw&&UfJ!~X53?G*Zq)6#JFTBG=k)6;NUX6im8=+kIPej0AK zB?W1?-If#vhksMXb_#vQ_%z(T)+j!sC=It|rtS&BfG^X|4ng})yw8{z4Ej>s|Sd)5gIL-o`&0P$&57IZcEM&?(eg6L($nA zd}Uhh3$)x9YPl~;!|m*szH%-1#c8PX}I0GD}p06wv)@!aJwy; znTFeK$*f?eMoTKwaJwzJJPo(olG(unG+J^+8g92GRcW~0mRuP;P@^SRrQvp4a&;PR zwa(4ue)|kudwA@Ry+}CTlmuk6h2p*GiF5T_>MlJVETJBCQ_sv@FTY|@?oJ)7V zyfv7e((P{FUBTlt+_wdfPw94-_wB)PDcx?n?+E6kbi3{D4xXUlzB71Yf_wP;Rd)qX zN^m=!MqJ>|<+p>m8t$Ip$r|pvgQsY?zY{z)rQ2QJ_XP7&y4~fyJb0Rh``+N`Dc$Z8 zyf1i0O1InY?*{Wzy4`lK2o`9#?++HHbi3{TUT}QEnbPp1em__=ydiGuGK`g4?gzBo z4{Es|(sK7|xgQQTB>pXy*N*8dmq&s+zg6%kb;tje!Xd-f-G_UeGbO_qH>~?H-0?4o z_=j~r9@P1TfG2_zQf?>rIW1ikoS4$>zGd@da8gRQQ?>Dwmiy^oaYEh0zr)%eoUGyg zLGVls_Ybw)Khkoq({eu>JS(By?y~iYo=e01uruQGX}ImHI>T6>hTEyycp*5&H#+VQ z?&D+I<+kKS+&Ou+ZW!^gt-39FDR{QVGTETzemQtf%DUb62VV)Eo6>E!-!NX)a&Odf zzZNV>*^-ByruwcNkea{n%PUdooZ%Ve{b`;9c*?lO5Z zI4xyM+-356E%%mSsYcy@&~m>O%y64UG;AkjyshQls^xx1%l$_!_n)-f+qB&8YPtWc z<$h1gyb54bB#-87iK8N%lNdJlSUr3)v z`fsHFK{~?I5R7~&q)Q`R2I;a$dytMq+KaRg=_sV5k&f{+0wW)bbR5$0NGBkjh;$Ot z$w;Rlor-iC(&?VYrHcJh)ris;{KC5o&plx}<$^lXb8ncga6w(p(Xo#l*2SJ?Kwsi%4)mp-7DiphE$T8)OS5+0s9lb7mq+>vRLPYncLh%?<6gR55>XN9 zN=R2mx(d=&k-o~)8uZE4NLNF;y5~Mn$u&q{>uD2~`@(mR*CAa4>6%E_^4t&Vu8njZ zq_0Q1F48w3UC;ACsSAG(jQl3fWufEvM$uJ5EB_H^xo5VUJP(HTcS%ps&oysG`W8=H zV{ZKygjm%&G1B)SeJ>iF zCP+6$x*5{VJr5h_pw90Wp7ubuM7ovd5o2yW8d@WLpQnQ{x9(*dr0@4UYRs*5KY;Xu zs0Z63{SfNGc1S;rdaymxk09Lv=||D%bVND}=}t&zdpa8Dpw90cq&s`EK<+L`clC5K z=GJ4U8`8O+Y>>M<(mgym#@yQ9J(2F^>1@ocebO80KAtYd-1?j0@;qIQ_k&ZGh3@3~ ziZ=T_;1Cbu&>2A!eBR|;F!>F~AAxJ;w z>1oWZl{}90P^5<;J>1jFSoi$U!4NS5>5-n^AonPwM|=8!++&a)>&Y|b*0CIi^b?-G zAoqBrCwTHf?ukfG@)UsFlaZd{DFnHvB0bGh1aePDdWNST$UPJ3S)TqN_iUu+cm{yn zbCI6s83=OEM|y#05Xij{=|!Hw#@u=yT#WP*&k$p7J)bN^dYR`jkb61OD?E>b+$)h@ z>Wu$j` zCK+?<+5Q!zU-e8j=GHm*8q%+OrWkW;-ESbh+cOp9eiP|Eo@pTWTS&j{nGSNlgY;g{ z43PU>q~G(*1i9b$%ra`7RUaU|&okSYTW8gNq(Ah`0l7ay`eV;rkoyy)KlRK5xep-y znP)!8{W;PHJqtkYLr8z&S!m3ybMP?IM?8y+xpfX6Mf#X$u`##S{Uy?0d6t0O$C3Wp zvlQh12I&)?Wyajv-zSkih4il+zxVIg7jsPJ4&UbXS)B= zS5ia2-Qe=b=So$mrlvyGVk^2i*t#&`Vui(L%d8q7u@Tk*;E)buX)0 zXx+=JEVS6#W=$Fi1%*0HQ@p_jRXSqJIsEwr0DqAt=m zAYIQwYY*ONp|uBZw$R#xw;+A1h1MRt9qIZ=-+}adM&>!gmNDoAM5YmH@9uoPYu}}1~g2#}4JktCV^g6$XB0UW0 z;Yg2&e971+TK7n#MX-H3xe8ZSq_i_f(Gb48!b8FqRBHuJ>tz>rO9;2S)p1|is zzGc)}$=t}dfu4u-{K$7e?gf#1javI;VdT3;J#mm<9k-K{T2dIi!e zkzN(~eyNONUr&>9!S94w9r=N=zjY3-L3%CHPa^#k>XUUyuaDek>=V7?cpB*qvX}Ow z^GxJ^P{~H5H${F3D%p&Voh?Xjjr_>CmwG;V7U^wBZ%2AZr}FyeWn zUqE^%(k~+Y64Eb6eroI!{p4&H%KeH(Ztd?^QSR4}ejQcv2FkrV@_?}i_5A%N(tD78 z3+cBbKQq>?$NW1;??w7uq~AkVlJ}AR0O@^5?~nZ4*x!0Ie2DZ%NPmp14K74FCiz0&bao8sFGYv_o{!86<^Brk zGDWlf%KI~S3tTV(v^^|jC2*Gt0H|B(pMv04e9DgUxW0u zNMDC^4Ww%#T?^^jNY_F7dZg=mPlEIJ4M^8R`bMO0Li%Q;Z$bK2q;K=y9oF9$zCXDg z>H0|Df%Kh7--UDor0+($A<~VIZjAIjNZ*Te6AQh}y&r38pI2=;RB1I2rb&ZlVnzEEMy(PHt=7U3Br! zOp7Hdq1nU9ZOwanzb%zq?58I#_`?$sy}aK6-P?N_=ssTZU8#9U_w|zRO3g>Qz)QX> zwGindZ(i6PUJ1h;p+{Ojr2BjOg4_d;9_anvm|Oo$E+{2I_?lpf8tL7eo^hmGydssF1D5OVw&ELbSxyK+q)@%MAR?R&Q=_kDA z?_t&4A7C>H^OS&^N^nJJyR;*t*hjQp6wSPz0muUu}?JjBBU34 ze>Ub`;O@>6q?dZl-;%5Sy$tE)Uh}u)YVH+Cuk`+6>=W(pRYF2$_ht+-IdGZ3%FL}>}<-YJdc^TkVf;44kPrc?3i8S{Cq(AeP26caq z^g(YKkoyqQUwF%c+=r1q;`M;sN0C0}jRd*BMEWbQ7vw&U^w(Y=$o&n{C%jQ0_erEr zd80w@Z;}4a8v}BmM*4eiEXe%>(m#6RK<+b0|KyDaxqn9b7jFW{eHQ6oy@??AZ%F^{ zCBI4H9MXSy$#2*A6Y0Oa5~MHnl>>cp8Pb>gE&{pBBYlPMVvzevq$~I?0l6z8UCB2cjAdn{tN3Ps+*Ogj z%6BQ~ldF-g=DQ5!u8#CIKJ#w^(&O@4-{qi^>yWPDD-UwlM7oym3Xr=t(sg`Sg51|5 zUDsCuzf*hhYA**!?#>#J_8d${}QZ4;!M`euWxR5PTT`{sb$Es$>M zyT;fj`VOWQ(ye{w-?pY#sr!&_<1_!ZHO+lL(hvBqEq$3g4Gq2+|#pe$;ne*yvWps+|bcs#v?`}xvBHbP79==*(BOf+F{%0?G z`f3}s?qx4uo$#Zfgo&a@LvM?^wV*y0S}V!3&{|1f3$2ypTWGDMz(Q*!g%(x>!64raKo9oS1A2(>MxY<_-DK46PDVWL zTLAidDAL1x8M|yx7awA`U&e3NRRg| z0l6n2J<&&g^XVj{C;M&#{XNBJ{ynKWtEM774e9B=<;MQjN@gHE6X{t#^Y2O3N@gQH z2kE&!^Y2O3O6DOwAL#`?^Y2O3N){r$2ZRjp(N z(kqc(|Z}x2fxwjy_)o1>#s@f;dBE8MG5!AgM=^eiM#&gwh_tx||q@VZQ5th4z zH+2#ES=b9m@APc~eexpGFZniu+%F@&%Xep3pSWFHtfBMd6{KJFZ2@(^hV<(`^KY8f zy5B&0x6k~WW;OSlNbm981^W9fq~G>!1O5FD(tCXkz~?9LT4s_EEwom0%0g=;-&$y`IaJ9XDEFU8|Ai_!k8=Nw z^gpPQe^Ks;C^BP9iMF4yrBUuONS8&Gcu?+0q`jyTAIcpS)gs##dw7d|5=i?%-+Qc&(xq|;C(=_q#w(wV4|awzvjNMDR9 zxdi3D6zR)QC6}Yz<&nMuRdOZDT>IMCrSkVuKXcEaSeJYHFk18QaW8yRX#TM!S`?u+eTM zEp4=0N$aQw!g}z+bMQXV_Sg9~DEIwHKY%KE5an)*^h2nUb}0A5NVi9oJc4p}K>ATs zNk^1B3+Ya%l5CVa2kFkJk}fEBSERe4N^()|?okhd^GOfU_UDtHD0eTUd!tJFpxk*# z_eGWDqud2Z7otjvQ0{(6_eYfsK)DAZJqT4Y808*<^kb-!$5HN~NDo7m3`e;~M70e& zpSU+J#TxppXd{sxh4g5o$3#73tXqG=VJy<)qS}GnPar)$>S2(30@4$ao)py{R5BUq zDM(L^dc;_XPRD6c9gJE(shp1V45ViwJuB)_Q1@)4=O8^7>3LBdOBerVtP8(`nIDxE zwwD*y3q;%BpDaYV7a_eERk8%-UW)WGRLOFbdj--fQ6;NT?$t=IL6xjUxt~P(DOAZi zlzTnWPoqjUpxn_EAnL;87C$qOj=PNZK%mAr&C;GmkMs{n|A_P%q<@O)Qo3}8UJ;5li(l4)?VqE%mQE=~iZx5nzeIH_U7;8$ z)+|Avjmj-urWh&KEJ6Pp)jjN*Rst!$DVpd~(LF%!(nyzy?g?_2MLH_F7swrrbWC(_ zkUJLXxadA0cRbPw(RmweNFUGko#JsuZtcAa@Rn*X7q57 zyB5;5qeqlB{SH5!Cv}j%K6+$W-Fo-tPP}4`x}xnr*SrDcu7~uEsFIsd?wgUm1yyn@ z%6%Ksx1&nxquh5OeJ85qE|j|g(s!du8lv2dkZz1Bxd-LG7wIOblBOtkv*=Oa4AES4 zvC~V$h0i`MQ0|r}cPo^;HOhS-%H0O#z8~d&0OfuV2vO|89r$ z!$`MB`jO}{VRc`4#&!^Gf4zJZ-BBey zQ0|^c_d=EQM!EYSorfyvi*n~9U4SYnM7fKi$AU9NKhdGHqZ2QD_UVsu4?wvGqTGW} z?!hSc5S05dl>2d%dnn314CNjkJuYnIi_d~kTd$WRkRFNjD5OV6KVhs}|KF}LNRN#k zZ_KTq`;0^SiRcN&-1;fec%&y-X#JFEV)VqYN-hr#cZqRo^rX@$B|f4Xhq=vZ(UZe+ zyAPDun2z)eq-P>M3+dU>Q@~!%L3%FI^N^k&J=IvZ9y<$=UKl;im|KsXMMy7>o^H&o zbuU4BDbmZ3ULHNeShx1c3Zz#ey$b2o(KEy9zVI31spwf@x}uiXgXVfl^cqV$jQR`T4j9w7_FV~u=gx03$h2j5lU4qd~o1+(% zu28v(+s&1$=wtue+7i7u{9msBmlWHwHF}A0FO@%w^fsioBfTSfX;|GCp0Up%{d{zH z-DPQAIQI)k?~LvRa=#e84D97gNWYBquIS~Ul2=gfSCM`V>DQzGEL*(8FFclSAiWRG zs{PRoi~p}GhId{cBK;B4A0zz<(w`!I0O`+={v7FpNFPG_3#1PteFW*FNFPJ`OQgR- z`Z&^GBmE81Cy+jg^eLpjMfy9WPb2+3(mx>mBhqJ({>eh?JM^C|v|ed`LHaDxzecYJ zyB6!KYD?c8{f6}K(JPI)^{)3E(tkv+3d?=rYw@2*{}sI&Mb*FyUCm@US--LfOJFVanrZi;j>q?;q%0_m1Ww?eu#()S_V2I>2eegNqQ zk$wp2c1S;rbf=iDVdp_T=atmTj(IjrU-*urYs|JVUDCB-tot(Ekj_QAJJLOn?um4- znC-?s(d%Vzr28P9hjibVAKa008YPXT-rMIRT@dr5F}L1-7b0C0v!itJ&&$hQaLV?J zc@F6QG0%tT3#N`35c2}iLt}OVJuK$Muvrz>o&U2kJmw{%*0CIc^vIZ(jk$F!MkRBiNsxkK{ca$a|J<&psadS^XdNR^eke-V4G^D2^Jp<{P zNYAp+I`XrTo)hyLm{oI;o@b$TR?SCx0n!VRUWD{wq?aJQ6zOG1FSpP-t5zVrGUj#T ztkRKRh4g9*t#z+KdM(mVBK=g%8=z0tA-�Yo9!g^aiA#L3(4%ZeyQl-J6i!jPw?y zx5m5)>V6jKZAfoNdPmG2bKN6C7y9Rrem>?ckoyIscgDO8a=(c5OEK?&+%F@&D`qdq z{R+~r#=Hx1zlQYdG4C03>-qZ)q<16zCenLi-UoHRh4kAv`~9q~AmO zeWX8#*$3+0hxC4=KScVYnEjyckCFZa=}(b95c46Z`!l3JNBSVrU&MT5tXt0~hmk%K z^D)SM6zO9zpBQuNarq_EUm<-Q>91ox1$BRe^a-R-B7G|60I2(0q`yP@G}7P4d=FcMiYs?oQ_isr59&^~3TaWp37Fzq{ z4-2h*@+Z=N#T)^3pSRFj_um#;>;4Dne=W53cSNjtFSYJcu}49llt#J?(q)nMSZM9< zNTj_;`;d-8I@&_($j2ZZi*y{)@v+CitV%#S5$PnPlP$E)suZMCkxoN89q9}Ut+Of< z>2gS4g!IL+Um9nXj?N`WUyAf)NMCNDwZF?FeFf52B3&W&E70E+k*PTN>p|wx0Mf$qfpF!O3#~oaJoZ~-pJ?4J zkZy@|tJv?1m5g_<9<7nS&qC{7wn6%S3$1lOfb@g0r;UB0W7!tzhb*-ANjsz;M!LO) z*1dcL=?<~qgC2Yo>5dj!doT;>P8M4GBpd0R*dL62qBFKL(p@aH_DNTyyIE+hI~VEh zu|FF7MCbWfyv#hw9`^hUamh1OY>hjd>Htv#5Jbb*D|x(ksmiv7jCs{B_&>+Xki ze+#X34?ucg>`!2x3_^M^(nFAbEcWbwb+fx&`{Z$?hgxVI%V9_lx6oSm2&6~G{_5`g ze>F7sD5OVQXzi0RNRPG9TK71lpRnjbt$RGu6D%sx+!K+W6#LtM4WYYTb5BNkiiOtx zo{IFe*q_1qWIEC_EVR}=6X{tNTKju8(sL}d);$;Ld9lBM{+^HY0;Crry(spqv2N{? z#j(Evy#(o{7FtJs8PdxwwC?4K*x!tMsg)wX+cBFS8{aoxHZhimN(7K;T`UMNE{k;?E7cI2b{SwkITWGC&7t*i9{`p_s>~7b( zUq$*gq+dt+jo3fHxoS7kZ^r%!a_>RDa%Gvr0$)d!&D` z&|3G8NS{IaC!~Ll{Ri~PFG!!Y(Ap=zBK;fEzaxDv_FrS4Xx)Dx{by`M8TV%4KN_0* zFQm^S{WsG8#FjGFt-JFt(h+f`LGDsWmyRoA%&q-h2I;bKWsSM@eBwbmGR^~Xdy)3V zMH+MK&)Y{K9UbR2=GH!mK{__h2XeOLMfy6VYs4iO=eO2f6X{w=*G9Td zT%xgVJua_Dx-QZ;AYBjX86?+h#o`L6^W;{fZ$tWar0ZLp?X~VZkiHY?yO3@W zmt>sZTKC;ZH$=J-(v9PijdkmBc@NU}BHaY(rbstKx;fG<;!=z~sCT_Bk!}^2YRs*( zsx{L0SyZCA+aP^^TpFnR0i+*{OE>1$8QT`=hmdZE^utKENBR+@JH%xedr)WWqeyp* z%QWWJS(Sx!CyPoncQ(>FapgeWossSmcabr-_IFpLyTx4$a_1u5J?;{ay9d%ek?w_b zZ>0OgT?+Ox59z*0=ObN!bYa|O#=X?DW)af;knWH40Hg;ZJqYQ+NDr|%iuGLe7}Ad; zJrwC-7RQd(Jsjx~NRLE%RNUpp`K@)2MtTg=W04*gSKe5+9+yubJs#-^NKZt164H~A zo`UpLiz}eclW9m#M|uX*GcC^cTK6oZXCpla>A7)N80WXvJrC*mNH0KoVceC*y7iuI z5z>p}DuCQekX{;B5#(Nm^m3$EAiWalRdJQTUam%Z4bp3oeiG@Y;wpo^T!-}fxGEs` z(@1ZKs|s>IgY-tEHzBDQ2c-Qw8My5B&0H_~q+y(g}kaeiyvZz26Q((fR>H?F#| zZapsFMfyFY-$(ibr1v4cAL$Q~{>b7AsPp7wq(4FWQ=|`Aob9#l&yfBc>4Qigio3=* zzqRf!kUos`5u}gCU2CjckIQ37e~I*0NFPV~YoxzH`UKJ^E#7(QJUNB*w@817^l6Lt zJ6iYmNdJKJk4T@1yUsYjweFvg{u$|CkUkq%!&tW-m%k$Y8`8fceGchAkp2_tzmPs} z@u`K*lfRMv2kC#2j)*sZvY>UBLb^24Wsoi#U(-0hwQdj6kw|-y_QlsS)~&~76w=X1 z#~>YxbR5$0NGHVCHuj)?QkjT!QhXg_Zk<)hNT*m-qPbI%PK&=D)SZrWMtogkZk@52 zNS8zUBBU=y`VyosMf$S%8;m`uGxl<%%g5I<=GIwt1=3eqRHC^nAYC#3Mo@Ppq$|hY zWX!GoT?Of?@i&9qS0R0M{4F4NHKeN}eGSsrB7I%_tza)}AYBvbT1eMMx=#FU#=X?D z=JiO|MfwJ$>mhw3(l;S}Gt#$M9L0LBx)tf$kiH%1`WDBI)_n)kcOrck(hcHoH_mUZ z`);HgBHak-#_{!yb?b3?57PG{-2~~TNH;^eInphVZfS7^)Opeh>DEZ!hjbf@v%S`R zKhh5%{UFk9{?wq}w6=Fw*Vg?=;q}$K@kPcR>14q&p&=g>)yRv*YhF_MqPN z<{;fUzJW2f&Z;g*ceSWQb9X~JH~wx=cXy0U_pM!FBuc}Vv~IzPUV zu?Ka=79d?1-`JR2XH^l>{VXcc-2IUr5PuJ-dmz$-;_o%)*8U!h^pN-_AopWPKOWx{ zG@y)k4Jg}(i4%Mg!E*jryxDm z;waX0)ik81BRvD@nHI;6);$a9*+|bpdTx9RwX&P4M;zO z^v3ws#=7;m+=TRIq_-fw73pV@-iGw{`1_1KsCT_PkbW+{jWM^*s^^h@!J-n)y%Xsd zx|un^eafeiu7wpzmD`9Nbin+(Aa}IW8Xx2PkdWrZk<(cA^o;R zC7Sykr1!=@1nPbl>G$H>8FMdmztih|q(4A`iSKQ!M9+i&A{~)Ha+gB7bV46ccNwJ1BJDvsG9eGt z?M2#$bQIFj34KA`F-XTE9fx#$LO!TF0qMkq0%LAHE|ZW>PACMqQ;<$gC^F{OGejEF z=?VRexwTI+kj_l#4|117`l5sZ#@u=yycp?AEVTB?rAS|vFc8#zInw1527%mHBn&Q- z;ohy3+!(biwEaraRYEKO5l2l6Jy0QG2&kkY(v^^|jC7TR$3Wdxk-iG)tC6mj@HnWu zI?~r5eJ#@0B@6|1*Fd@^(zTGToiGg4T?gsw6NZD_b&Q*1QSnn~}Z+ z>06P$4e8sFu8;H`NZ)Cp^?G?1(hbnDb2rirEws+6Mo2eC`W~e3MY;*nO_6SfbaSLz zSZJM9Es<_zan{t4Z;kYQ7Fz3WgY^AKKY;Xu7H50yleS1dWTCZB+9CZg((RFcBw>W{ zdO0j~V;a#RVIk?xi-7Sx@KboYdD zAa@U>d!o_lg>-KVt!IcnNarEl7wLSY3y>~Ex(MlhNcXqUdWIN)^gwj%3_^Oah1OX$ z1nI|+ejMqcNDo7LIMO4K9*OiQ3$3$iG}2=%&YC*%W04+bp|$QOkRFfp1f(Zgob9zw zCLul9LTjH)L3%3E(~zE?@PzStsdr&B62_NFaqr1WZVYojdz>k{LdnS8I?Vm-aaO_v zP|56siAJqsImbfl=*+dyntPsw*4*=vUXbv%+f)D5(A*1=UWD{wq?cIeF{?v+v=r%O zNH0fv1=1^#UWN2(q}L$57U?IEehTSzNUulwX{0wG{S49@k=}&#W~8?uy%p(aZFK06 zkJyIvcBFS8{T$NIBmDx>JCS}7>6egx8R=aXTCY;CApI)RuOa<3vA=NBTpgKSKIrq(4FWQ=|_d{Tb4q zBYhC*Lr8yt^kEzAp2UtIeH7_qNPmg+S4ba6`fH@WLHY#JCy_p7q4g^DEz;j1eH!WS z6DAqop=;egApN6-)_3SA^p3B*1FFj{fC9t zy8lG_uY@V){&qiyIFIz-NdJTMzeq(lJQK zA{~cxJkkkBCnB9>quo2DWTaD&PDMHm>2#zskj_N99MTsdeKFFPBu)iq`%96&4C%{} zE|2sTNMDI`1*9t?T?y&RNLN9+D$-XWeKpe6kgksOHAr8J^mRzrK)NQEx-QZ;AYBjX86?+h1?gK8ry0-o`o7^dq;E&MKGJs}eJ9d)A>9D!yA!7y z`+KGPUaBF|jgW4P^gT%5i*ysDnE=kcK)NN;t&nbw^nFOTLHd5AA3*v+q}w9> z5Yp|Ce%MC4SJ(DPKZ0}zq#s4PBhpz&cS1TF=^UgxBi$u&hVg8#|HjxA>265pBHbP7 z9!U2@x);*Dk?wJrLdp7_KzX`IMPFr z9)|RAq(>k<66sM$k4Ab7(qj{6g0uZNq@O@~Jkk@8o{01$q$eXi1?j0sPeXb-(ld~r ziS#U_XCpla>A6VHLwY{a3y@xj^dcMWp6wSSy#(o{NH0TrInpbTUWxQ7q*o)o2I;kl zvy5kZ{Uql}q@O~19n$NOej4cwNI!%0Mx-|(y&35(NN+{@S){iiy&dTtNI!@4^GLsd z^iHH-MEWHg?VjylMtT?0uOR&@(yt-?I?`_-y&LH_k=}#!TZyxcXM6p3lDCn52kE^? zzl-#HNWYKt2T1QroMY^7{aLyF7FtK=!^FA9+^Ro9`s2iTAonLof0{Vom|OegfQ8n* z{48;SF}Lc^kv^EX5ad3D^cRVXK<>jxA4yycavw$dSmF|6Zv9rvFDpb};aj9`HN39JdKVhMDFHa(U%0`C@iTKt+Yu(=^E(3dc+CuAIevkAIHrn0G zA1$=*<(b6gU@w2N(7KmDBmIkwcK7nEh1R|NHIe*m`EN-7ZlU$qIfwKgiRS;4UvvM7 z^j{WwjQb4)=aK%~LTlasSZKWh{%fH%cSO<(@emYo8<{on)c4?qsA>l2(B}Nkux%LTjI- zBb{NPwNEmUE|)~EI~O5+v4z&UFG2d!q}9f;)c(E<>C2HWkMtEtUumJW2P+_55$Q@u zS58`E938E@3er`Pz6$B9EwtV}Rztcv($^q;Ez;K^T?6TwNY_HTHqv#Fz8>kiNZ){T zJ*00$`X;1rM*0?{Z$bOWUCM!F%=jgW4P^gT%5i*yqU ztyih0NH;^eInphX)`F{ROQc&N-5TlpEVN!-+aP^E(hng0AkuA-ehBGyNI#5pd!!#h zx&zXWBHa<`ETlUjosD!3(w&j+f^=7;yCI!xqur}(ccgnD-4p3vNcTp%57K!^_eDA% z=>nt+Ewo-;i;(Vzbbq7=SbWZ=pDYYSdXR*X{Pn6GyTcL^wUXOK%ZdP~w)W1r~nZ`g|Tvo_kj zx}9_bg5-ih>!NsofMUqbq2q<10xO44>v_p26K z-#5Hwp>-@@x6qpVjiepMy7e5p8|gQZ-h=d8NWYErJ4o+E`dy^ov(P&7?^|dc`423# z=H6$aHTV9c=fJG`5b2MQ{ut>`kp2|u14w^{^yf$)OnTlpIy&-)EVPdN7ZzG`AGXk% z`$*CY#=2D>Mf#Y9*1Eq$`m3a!pzh;He{G?)?r)Gjk@O;{`y|q*EVS1BEz;j5y#(q$ zjr8{xTI>D+=^v9`26dl7`X>vmb^na?FG;&V-Di>h)k5q2$!|#ij`TUC|3Larr2j(t zJkoz7{f~v#S@kc{5y`KBSyc+@(iU20RT-qqBJDvs5@|2eKBS|Njz&7hLhG!GMLI6| zRWPgKkxsDCT6ZGSNy)E)x|5MkvCvv~D$;4muYyWO2bWNmdAzd5kI!IrSbX}xxNPZiPPCcY=MEWMAZ$|nS zq;Ey~Hl%Myx_v3z6a@hk#2%? zQ>2?EziS*FomI_|ZegKyR<%UBRq}hF?$$`(XQ8$3Hb~!}{647r0i++a&{}s}q#sKD z0My+M>4%YSkMtwS`;2w#*{1{2k0$R2xjQ1AmHeSGw;q?Bkj}QyI!|(t?wtG)sJjc& zT`jcM-3{s7g897~=`|Kw|G((9NI!}6Q%J8vdOgxlBfSCXXOP~A^d_V?BfZ5& zyT|2Lq@P848`9g6-huRU7Fv(_=aGIP`4~9ncOw0wh1O&KC8S?QdKc2KApI)RuOa<9 z(r+NW8|gQZ-h=d8HrhSr-$wc!r1v8IF4FHI{l10PWBvoA_a%P`j`{sae`ulgKK3J| zKSuf!q(4Ra0Mef!{W;PHZM1vr976gFqz@x~#6s)*K8o~L$zPR8$#i!*BE6ipLoLmH z9O)Cu$HQ{xUoiJcq)#P(4UWrik^as?>v4G+>F<&L0qGx+K7;g6NdJuVFE%1LpUiNdIM_^K{)sU`^^fgFdi}ZCi+C6q^AYBvbT1eNn&^o{CAbms1sj&Hd;rEyIkiI$P z+pydhet&rj(zm937na){wqgzaH{IKiz8&fMNZ*0E>?-;BBS=Lb6?orQEKq_dIELAo>2 zU6AgIbhnh>!RX{7-5u#3NcTj#7t+0v?t^q5(tT6T8AnI&HS>`!NcqE47PK8SB>EgODDK^bn*UL;7)~ho+ndeKHK`;Yg1_dL+`LkRF}# zH|UcwNRLH&9MVr9Js#-^DgS^znTYfxq$eXi1?j0sPfPjN*e7~#KON~ANY6xi7Sgkk zo|6($w%Er4B{sEB<{~{0>G?=6Kzbq4i&9D%`$Vtvi;-S}^irgkS!lgpE=PKWh1Nb< ziS#O@S0lY9r8F4JwMajS^ixQ$v(P%0>yduiLhD#=K>8V^HzK_$rHpYb^&WXM(pxOF z_V-q#pGA5b(%VzYfD@@bnc@X~vIpt6kbWEKcaYwT^t&lO&?oO9{XWtkAiWRi{YZb95(WC? zBcwk@`V*u-Mfw2JpQS__`(%`R$NxFf2UB7|?n6j_krE4XA4d8}N*u_26zO9~f0+_* ztVHi-zOvAIZ8&bBHTTyk380d1EVNc~!a{2$CsPtZC8sR3R`RWd)=IuhNdlFew$NJ1 z_ZC_!`5`6QSc$$v{}JgkDJdZLPe}iqk_vMFg7n#xG?4pOq<>3EH|EyS`5ozV7J8I> zM*aioKT|S{b!+awEVSK1o74 zIrR!--I_ZE>D1IKjk)!hPeVFAwF1bUfplhSMPqIq%W_CxWTCacFGl*3)Jn#>HTR_! zTKDoY3$3{?x6qoqd}?K5-8xUMK>EtmD#qMeNd=@UT4?Q)N=R2ut!k`Wb5}vSYU))W z_f<$=oq9FMT@C5#sntO4YmmM+wYo94{>0RENY_BRCepQ#u8njZq_0Q1F48xoUIRv_ z9?~}=eG}3*BYg|fw<3KT(zhdBKlNJU=;-9(mi8FP-9GhZW8JDBLAry5*18`>x?}1s zpzbWBJ0YEobWZB6pzh8{cR{)<(%n*T19j&j-Q7a#Jn4aS&(zyN-Mx_RZK3tE^*%`F zA>9}0e54DIE=0Nr>3&G}x6nGP1|U5!wZ3sy>BtX4dN9&MkbVs5$B`b2^f07{BRwMZ z4&&(PHEkr)qb#(}s?kV~Nxc))Jr?P47Fz3m0_pLocY(SmAU)AS>yb7I>B&e>L3%3E z(~zEy^bDkDB0bAO>#UlM^qkZN##yB!KNsnFNY6)l0n!VRUWD{wq?aJQH1%%d=;%3i z8Pdxww9cv(NUuz72_mfCJWuf&*TZi;|q@PB51JchRy%Fh6 zNN+}Zi-p!%wH4`SQyUv+m5%&2q_-oz1L^0Geje!;klu;(i%7qedXI5*^c?##(z`6Q z&Z<|Cel_)8Q1@#{ziy$m?l+L$o!SJ{{U*|TEVLeJZz26Q((fR>7wLDAeh=yQk^TVb zeHL10)qbQuOl@kMRXXw?A^kDZpCJ7y(g%?K4C&92K8W<8)Mm!f(R1t93GJj`Y{5Ex=xWgY*fcPa=H^>2Fh88uwD?;CB{UXVvM{R>s^q zPrgU`2c&;Q`V7)PrM3op`7_eLAbl3;Uy=ST^**qdzaxDP=|7PE6Y0NF+km}1kM!S2 z|AX|uNJpgI5B9PY(xs6ugLGM>J!ubsy^KWKi?k2vD5Rs)9t3+CgLEv?aY)A_osiZR z>}4X-Nk}Inoq}{~+C#>@)N64X(&-jjuf-WiXChq=>5GuQ80kwa^eFe|W-dkgvb1(! zEH6j8yoJ`WyaMSfk*+;khjb03Ya(3>>DoxwvCuk}*CSmw?GZ4RHy~ZlLhD%Gi1bZJ-;DGvNZ*R|Z5CR` z@^+-_r*!~hc?Z&WT4){1yO3^x^xa4|M7j~ujV-i}oA#)2EVaLzAl(${W=J265pBHbP79u|6(`;M_E(!J6;gR$(5 zbRP??W0{9^U!?PqEh1Rj0jP#VW zTrieXk)CFubu6bNJp<{PNY6rgHqvt}w2tLmr01n|2V*%O=>-;A$8sUki;!N7^b(|( zBE1ahqJU$oFV zt6oC-<+MIvR_#Li6$`C(zl!v0X?dXT*O7k1LhI468|gQZ-h=d8NWYErJ4o+E`dy^o zv(P%L-bea_w7$k!r6a!&>HSE5i1bHDe~k1eNPmj-0i-`m%Lk+LInoD_K7{laNFPS} z2+~K9K8Ey{X$8j7(fh%#kUnmq^_c$}>2J~sjdkmHdYwS}B+{qSia;gbBK;lGr;+|1 z=^xVifxY|@=`%?Gg!Ioy|B}`p?B!Xce?|H?q<=^HT-pGzmwzDrC(?f*eIDt*(*_#% zQqOGvApNg}9_9YKR7ASD64j;B2N~5l>JNq^j^^^6>ev=?b# z`cP0w6w=X1$D|JfmBb<)hje`Ua8OAC(uqhXrH=rWBqN=IbZYuYP)Qon=}2d!j{=os zB3%yYi_%AfN-jqF5~MFpA7iXU&)=6JeYu4m<(|LGBYj2sSWx$sNLR4XdTpqPbR`R| zeNq|eD(T}upHxNqDhsW3UyXFN^d~^w)seo<laEB7I%@cu;o@q-$Det-BV|wbLhn zy6Yf)y@l4g>mq$a`b1E7J*026&|3FRNZ*`33DkXyh1N=LO`i;M--h(l}O#>9$Bels*;A!FEVLY@u~lwMY693$6Fx9gu!BeH!SKj!0)&Xzi0u zNM~DU?UNj&JEu~Ex(Mlh7Fy?Ef20Q>JurQyaSrOp4?=n{(nFAb4C%*_9*XoZq=%=^GWNHgV@Du8 z(n9N4jzW60h1R;qAU!sHwy{sN?r}&zkv_+mTk9T=^aP|QTIf;kr*e~!o{aRA^tqr< zrdnvNds_NDkb64PGmxHXp>l|E%^m?S9PG11#;0C0hvCulJHX^;r zLhJQ%Gtyhq7lJ<7iuAJ35Lci}brlzh|L!R=tn( z2kA?}tlEe4ehaO$>O-VILi%H*KSBCaqz@qd8PcC4eb7SdtU84B7wOButU8SJ5euz# zA4U3D`f^bBmq>qQp|$SgNPnHa0@VEt(kCpm)_oG`Q|T)~-QObporTuAPb2+(`YKTO z4@m!Lp|$QaNdJ_++E}-qk$*<|m-ICt_gSQWOo=r-Pk$2RK8N%l=}(2_ zF6Y*$jZtp%Po)1s`aIHqr>_gE+bywJL*JA9gY>@^TKgm-!(55#QW@((-KCK(W1;of zDT}nnLhD{eBJIt18uW<|=_m`WeG-jyjD^-diA6dtV*}`uc%&07wAP)7bW+AMpzdU( zQ!KRBor-i?#zs(gI?@>yTIXOU(&a3)_Q^#^U!1WC^vNYiUuvPXPcB3Hatp0}QXc6m zGB$%gxf1CL7Fz4Bh;*flEylXFzbhkM1?j3tUxoD5NLNF;I?~r5eQm~8Fgn*ET?6Tw zNY_HTHqv#Fz8>kiNZ*k0tZ{VoUb7z3H(F>t=5Ip!=8SE|y7id91?gKewu9WaA$@zs z4v@P((syJ$2Xfzu^j#Ux8*?vq-=8!<`fj8fX1o9@X@qoRr0>bt2`af4=_W`w&3F-1 z(hTY5NVmv%$ymuKcb>FFx)su`k-iVkH$d*rNO#HD4RUuyx*O8D z8E=A0x+C2K>7HnGdLi8#jZPn=^D_1rdr*)0zDVb1yajR>AYGX8HppFsbia&uK<@rX z56IYS%&m7}1Cbtt^x%wlK_x?wehlfyGu{K03`Kev(!(>}2bGLKdL+`LGCnX?GRnO+ zj7EA4(qoYxhx8Ljk4Jg}(i1cGf&QL^^kk%`Wb8LqqG#l(86N^YE#o7jUggfJ=@}mb zJtN~2pl4=$3iPau13=Ht_zdVd8J`0^H{&4C^D+(@wT|Wdj4yy*ka5_kN4aCU5a~r0 zTI*hn^b!lLbuUGFS;i62C(DsuVWG88RwBL1LTjI_MtV)gQP3xAk$%!bYo9!Y^g0Wz zeX<_ur!$U$KG}fuGZtF=WFyj>EVTB?W~8@dd}-_xt$Qod&mz4I>FpL;`(y{w&msMM z##f*RUqE^%(k~+Yl7-eDd>QFoNWYSC+}ML!$*UP(1N|D(uUlw68s0#9H_~q+y$9*H zkbWEKcaYwT^t%>X=gE6Wzn}39m{lJjz0X4HtlE$Che&^f^v6hlg7l|IA3*vuq(8UN zI;##MeJJCEaaQTbe}VL23$1k@LHa1t$B_Oq<0R;luaG`&p|wxGM*171Pau6V<5XCm zT=*wmPa*v+(%&I{I^$bV_xDKufb@?@pUL>!eUI!kO6qStE`LJ$XA7-!@E4@dX8Z%{ z{uSxpEVS1BJJRPez6+ZtVLK6A(cFI^{ilW2vHT0^^A=j`{u}9kGERd&`4{PkOmlAS zlTt{Rw$R!qWsokL`90_p57LnqTKmL{w9i6opF|-Yo%sXklNh99EwuJY9MbU?TKgmc z>BP*AWlB1S6yK(~laNkEItA&}%q&oM8q(=VXCR%K`D54|oIYdbtl4ws&YQnr;iAP$ zmM(MpUHiLS<{6+b&HO1$7azm_ZC{r8GtgIN{$kXVriEUm0@4*T&w|{QkglBht1)+| z>WGLcnZFtJbeC7n{N1Q0yZWlkHl^GZW8-S1t07$->1&X_7U}Ddu7Px|%yVI*lTt#T zhsLs0MD5H!!gL9^*rqy2UypQM3++Em`0KgBLc51$L_G_wyLlthHz9p9(zhUeE7G?i zeS7AgVe{m2?XD79^)vq}Td8WbDxrt871~lf-jR78=sPq22Kuhde}Ha~`7h9SXGVC6 z??*}P(2*7q(J-@=r*bh;wL-C#|7kYLEN#rKPimamDC`_t(z{XB_aJ>Q(oK+VigYui zn6S>hLb^55_aWT|>HCp>0O<#jZj1CoNVh}!VWit5{Rq+>kbV^Dj!0)A-3jSz zq;rt&jC2>IyIN?yo9Sku_57WSba$kCWbV}$`j6HucY9Bydu6`sKJ&jCGu;PzBi$#n zjPZJ@?@979%X%(+{dLQbM&C@2QR}^CerBXmYxaUnuTg96!c3o0Ywn`VD5KWg{W7CP z7r#T%O#j>JpBdw+Q0%s@c>90#fXrB+2WG~3s=D`%5fK%Nwf{Hwpv?HNk-zZ0kMsN( zS!&9XB+DhTl$YfSSu!P$b5qvH8xi5WYdRPmhz;cl&iYN6fydLum6r9bvN+psj10s< z^2vPLT@;t|t53b)3^f$}8lfQ*L?zlKvaq{dA zZa*6p*q$xkar>oC$8{F(tk;z2_}gSZob7dkdB2tCIgkG$(Rtki5*^o09(UG{OCBe7 zkj07Zaq`rX#c^e2 z#d-WHS)9j*$chuc@CD|;5sA(mI404V2j566FY%N_XFi;k=**K7!R_A$w;z%yyBu*? zqVxQt5}oINEwQ}BlMn_o8eI%Bbm@m59^SoS% z&hvT)AMYD{yilU^yzaqq>Jl99ZW5jSoGSh8#6`jVenOHt>*FOl{WBvWupTdu%YPAz z{+fBzuYcWp#|9?47{@>%{{~5=}|F4gOp>mu!=b2<#21{H1^S2+9 zhBN=2?LFiebfV+B2HTFy4YnN@`txtF@o%W%uK(XHp=Si*zrjBdrAn1{{&W5c{#`co zr*zq}(WOh5_PKu?ARFb6yUFJZ>FD4ed4~T_G2(BA9#1s&EEXxsDj>>XOmI|^|mZhpJm1LoI{rE$kPxd2UYABGUP?jQD`pME?mI1O1lx2`CgJl^a%VV-UF3V6^ zhRHHqmJzaylx37Gqh%Q*%UD^)$?}9O<7Js3%S2fw$>O}{m?FzmS*FP{U6vWL%#_7> zA2nN+IkL=^Wu7eaWmzE0LRl8cvRIZSvMiNlnJmj?Ss}|xSysuiT9!4ktd-?SS)P(* zoh<8Rd0LhYvOFWpMp-t=vRRfbvTT**Sy{HpvR#%PvOFit^Rm1k%T8Hdl;tH^UY2E- zEU(D&sw}U`^13W<$g*3OH)Yu)%UiO%Ez3KyIPW5zcYE*2^1dt|$g)qC;(ui5A}01q zPAn}kvtmqQ8HsCUy{yC?5w>c4s5@RHm zml!LthQv6D^(8vm}}z zSXbg+i8n|*AhDjr;}UO_ct+w)66^F$PP|!S1BtgtY%B3riFp!llQ>S|?GhJCtS@n^ z#5*MJm3XJb;}Y+ZcuryiiLv>~iFZpZFR`J-IuaX6Y#_0*#I_Rek=RY*y%L8=Y$9=@ z#HJFLN^B-^tHkCK_eg9Z@qomZ5>HEPB{8BPIkB~boXpt7`y_Ug*hXSS*?zyo8WJCn zc#FgbB{r7WR$?284@vAOv7N-85+9a0L}Gi16D2+(ajC=(61PfxRN@|q9VH%;m?iOy z#7+_i$oOYVjFowvBQa58XNjp2yGYEG*i~Z7(Xol$B({~9D=|l6cZux_lM{P5j~|ao z>?yI0#9k8HO6=`CF0qfqwc_$5?v&V9;$DgQ5)Vi$ka${Rp~Q%y93k;sA*gBu89#2FIHOPndOhQwJC>r0$1v5mwz61z#9D{+X# zc@n2eoG)>u#03&}NL(m!kHke14@q1s@r=YJ5+epACoYwcC~=v@@)DOztRr!S#0C;q zN^B!>mBbv0t0fMQxJKeQiEAa!mH4E@wGy9_xI^MPiF+ijmv}(p(-Kcg+#vCs#AhV> z1|}zNl$a@TlfctzmV8e;$ev$B_5GjDDkMoaT1S7TqyBNi5n$;C2^O;;}Z8u{9586iQh;( zE%AiJ^Ab-=jBT8pcuL|W68m+^?~|32omEhnTf5ePI_0AxifyS|$G@dVR_~ln&Lerf z`t&O*&~~w=f1?9?7n6P6c7ZnitsZ^yoyU3%=pAYVWA8qN{s-ih0#ELo-z&FPj{nuA zSr}}(Rrl&2+)|LA6;K65odOL>Q7}N;gF@}BoE!%@#Rj0;=$cj7t#f{GYe9BaVWF;d zE9w%=-?L{he^IDakRROCsb}ZjIlTf87j`Zv>>X@N<+{<|^mkQYo6~9jR#wm4u7OsU zUQ(p&NX^={^862V=@qutJABnylP46@VQameZ0>`3{vHja&F)v2(>M4M(k?2#l2;IX zzAL1G%w5gT&Z}7`kTt)cZ+@Y>lATwpUa{?= z71`;)*1)jkb*YgPSPzbzZU}*z{}+TdNL9sCg|pyVe(xMf+_Q7=HDo>nTK@j*8EE$^ z3N`{0K|%20+}?pkcCWlZBUD+9zJYZuJ-8tRYW`mk+7JNYj|SIra{C2eH)}wkkr&i~ zi5GmazJWFWv90X_ZTees#5&G@#0H)a+7O8S@&*JO{<$inTHt40LQ^8R>deLB4;Ekb z&wzjn&Vb;GGXsik53S^7bqa0`j!j@KuUBsGK+8Q%1Xi6hLU3oZ1_WE#eR_B4nVVf0 z*xqwcc5i>Phn$E4o7^11XXf@Ua&rV*a_;FD*jCW9XQ-7QX!Vd64SMH%)Zc9n3by6A za}Kp`&M8jXg#|ga3UY%h#oL7i1;rjH)^?szRQxHr;yg#!!dG%@73A~`zK)n;YtBkR z*h7IeXHRm9@|@>5J=v{TyJxVS=k`{wPPO_vGH|+an}xxx(q(~dauf#|PRaq=meaL= zyVE6kIkj>^E5+MR$LRy1cAiYBK+2-tdD%U>6<_HYT#*CS-*QgF0qZsbPwE@?tk7EE z@CiN?JbVIcQl`HTLuCefG_>aIft*}}y&bmV>_f4K+?6~jF}U5i>=q0NzN)kbh1xRP z0>#S(D9{L;cLMFe1t`##Q@C#QH~kl&z&7Us*V9kF>I!))03~X{ISzuMt7X@2c z1A;B*kPK{d4#{9k4oRow9FjrrKP26@9FqQa@yl<4GidJRH?Zbje)U0_b2a5+9o!JM z=3ajN4@wuv_y=I{QXJauTzx}r|E1W!61@ES+o8goYi4kR96!1iI@cC^FmQeiZ3tU) zFTeg&fy-~8SrEAVhPKNUSGR{&oXf9&YhbGQueSbmy@ut41G(CUgU|-4s<;ei!8P~t z8%W_?e*LZB<=5YKF2DXp@bc?|z*d+z*=au3W5&?1}C@{8l1pt;L7NCp(|rx#eZeg?Q&%dtb{Iz z{x$b_2|VbWPMqyM{HMB3{hg+q1DqJT#dq5Uy4m0AQP3^7OJUE@b9%e5sGzUgF4pvK zlyipv3C=kq&3M{-)IF#Jv2@T{;(LcPp$B6;aSFt7fl)t~LFw zTG!VNv|S-B7z?CbfZ;{gs<_fd|x!krh+hoN}?6d=k zr4>w9w@#>8yu z!S%ok%WHMdlGl?pxr6E(Z2B7ooeO0()R(=TmF)cN+O;}K?3FhlyIa?qb+uhjnsVTD z%aT~osZ(IB$cgekRsPQD?EIbE+xfenbFG&+AP{5{`!^QI*+Ck;J9m}8gD>HKAgiCV zrMtf^h4w1wAp`Lwm0@Asl%DTLw6Z#Yt1~!W?3bdp@{H;9MV}C=k1|E@!@1&0Bd?3RqVp_C8ff@Wy!~WbuAKcO_&do@va)*= z6!Z@@i~P;JE?L>xPMY4qm=|dD%j#Sp!yFaSsb_YsKNL8jP!9bb5}k*0dj%VvdrKoL zE5|8ACQTs9X93Q-tmbrbTk^Spf1}hKP{HVQjeIuXJSI)&V*;lnd1@$yzai`X=-)$s zD{$%$Z0(hmU7#(0GcUJR7r%36qcrlm)XJ5R+gmz4*pRs(Te@`W>HO{d<)40mD4#9( zD{?+$@IN4*F-Yu{R~TsI$(-~*Aji5t%H;Pu=Q9N9xjseyb@z^;cFjEJ@xUc0v=X=i zh1Npv*4=g4=USna!1XA!R{ZmcP*&&u$Nv)U?UDO}lE{73{V>9Lo_lX3)VVXt4WtNM z)B|nV?EhFoxFeiTCH$`_Ib|fBdmr~@+?CLkRUdM8#ihkpLf2V8b&fr!kyp^)AEmnj zFXMl-ShJhFv?dR<{4eTnhCL|H@^5q|WAG_%%m3p3jiF64ivD%~H1oIeossmnf)mc) zb|#g-5qP`eZ%XyuLJc`m{LSF)k<&$fDW6YB)45Ud$3Fc64d*=IY;d^0A$7=S6%KVi zvyhl0pIAr`=Z~TL9%oyheE%jnzxkt-(Od3u2C5*!C+o_vj4$Q^iK}|mLfSHhsV@5_zX7>Wpo-vT<9!{3Nncz zg5NeorZ7s$CAi3ws0~k&s`4c7vPh8=fkjzUAeDz=8TTfCN3gyuSL_0xmU~1}l&KpT z?o{Y~RABBLB7a2Su}MOO%Ek(@sv>H@tU&~B!2)w7Bs^VUZXHpgzP>>a8v)O=&wI4Q zj5zCP_+n7kRJu^G#YY*OpwF$+J5Hhjl>M=Le>5$&W!2QD->Hg7HFF!ApAlTK#e$48i5Y+p zGtMNQ(=74A+Qy2~as(M12-J8eqfgYv@}`Okc|w9Y zbRin+CFT?~Hw1zwCAj)Yp!3pn52Z3xr|Br|q1>fVYa|krx_Vy_oFg}Ju%Wy{fFmSM z%vI2~h+0`AZBbLnEka$r#0?skHi}#+MGT7Sp%St~NeC{Rq{wB1oG5K1=3#(X8l0lE zI1zX{ki=@0>m}yyNj!&{Jgsa6hI$xjJV$PDXqEKIiZZ~~WS$=2DGN^x2#Ioyc~TIi zLgPw}t0dN1@&vlsupI`nHzOxmnF6LH%@WH41|WGoQqrr+n}H?|5;dOYLqfGgV}+V% zOpP+NE*WKt9c=cIClAr+2LS3?Vp;_t4@d&i7Q7?ZG!7i3EW}NBxzs1NVSfnn8n@GDU$#w*kx*ks^v%E3&jmKzm8?f+Ai3+9vhKt$}1| zNP^M~KrBR?LtB+ZiSch=O3X3@nJpdILZtzgF8 z2=t`H`g8?KBM_Y)Rat$;LXx%-GEC}g7DdV8>0-pZz9b>a6u)L%EXX+{rq?B=*CnRc zCDtEBxUZD^vM-W<{KKq8Cux;Eo{qko8K`1P)cE zRP<0NF;l}L;wq4&R(;a@b-+JO6&Pye2SQaKBm$vM1Rauy5Zq4?gZqiH-c3Z=r0CyI zAiY5cg6vD&aAP6n#-g0wSo~CB)cdexTz3xQz6#pVo>fKo~p<_ z#MAisgLc{^F^&{v?gmwgO7vTvzW4f_l>c<)#7X)$b?Q2|+jJtI6bw z`D*At5h2s_6Vvk()6t7utujB1NNqJXHLD~)l*k@6PAGU!G{QCuOIIVZ^!=bWVLPb|_Ulq*zfTqUs_H$S?F&>#?uHv(B(mnih}#PsuOOXaC13H0oMbnv3lvy0{f z9lVfq@PJlQl}KJEFDcTwgWAc1QX0|aCmhMZLjtIS)%pezc`{*&RMV0V4rp>)0OU@f zX`I9bp1{)sg4_jF*I#+WGZ&`E_>T;d%HGvT`l>}pa@PQ$6}^d~1wv`R#7I97f%oULXP{v2< zR#gw@)}hFK2S`qX$ZU|Ov!kREJY|PT>@k_@KXrSF$yuaSuRKqOS(A!-O=|w!RH(Cy zq>`LP60dZf@)Qz?)hY_f+>(&N+>&n~b4wxyGd(g(jXL8=~Npd=@h z#0yQVw4HlZ=dbCXz(*CXz%7 zCXz(fi6jxcJD=vE)QRdqN(yw#Nsiyy$l^~fC3AZ$%_*=`K?gM0cw_A#pPEQ7?Agjs zb9iK6q{wWOsB~V6!EBRo%rXhcY?Gk$jr`Q2GUaCGNm#EtQBi*{Q>jU&qa;Ic5iDJOWM82l$oq7RGg0ZMzFxTZN^2;Tx z&HxEgYT`tBMV(+>Q?m+k3$BOI!TTcazZIp#I%fB?*46uhhYe^l zUlOE>z#}Lz<2;`f^d6DtBQf$NX5>rE$XC)TB$kJjYE9CZ(Ges)I5ERtG1HlRQJY0g zXw2wWlcbABkWsJd40=Jz>j^Ud!c&ow-ou_}CjFG!OZ)V5dICcr5@ggYP@!?9##IvI zkd6sK>%+;=)R8tQ>nOyK5w>EozZ4sKBqil^EKrH2;0ZD> zuS_*kLw%FaGB*z^eI>v_sEJGyFG5N%js|2no#a(yGNiPHQg8#3!C3ixUs+FXXliG4 zq?bdDLb;&{d2U5n%VNNb8x`bco4a*W^PnNy3@+BuZi>boy{@bxh2W4=G?^d?v*M^R zMde`EQ^Q1EEYQD%9qNJsmFRcood0-Cw1)Myg2L1w0o=kl^x{Sb|FX1oXq_+wD1)N;> zfxm#0OI$y_~wh*2PFeQ#YR##-T?s5S1hQsq7?bspFzMXmL~ zHieaGc|RW$FiBa?N8lqV>KO@qgsjZ%2DV9&{UuqM(+&0^(?o$R6F$By=PU4$6!nbx ztempIma`JrCPkfF2y9bWxflBRIKHGTx0S$0Qq(gV_y}3)#{=7>$o`V7oL0}kJFjS^ zE&h28WX@|KYX9L`c=pszMKVKDAxBciE>+g~j;$M$d8iLhp~L3fm&?u($r6a{TSXaU zdT$OK)l^z>40a668C|Z=1xwA>x02=JA*a4rLl-i>uXbFpIB$I`S$!99v}(C@i=BQ4 zjU78>uUR99)6y%NbNf|P&jJ@T)(tM^F4Fi9ET+ngEx+^4*nfYupE_;qlxef3jy-6? zET~yiChRpeuxe(cGd7W^FY}2>3@-L;EUm2w3@V!2sd$}IQI%rsY2)@k@Ss_HO&KJ%YPb?Ba$7tK~xWW&@GL9xLo;GpnIJ&}Fk}S$BX@f~cesc_%1?59!pNT3D z>W0dGhJ{Z3{frzQQr>=qN$w|5)N4{n?gyr^qJ_A;w7Iloq#i0uHl2z4>^WiDKKoCY zHQ|8q&K}bb7(a1>Gk%Y~W*szk;`9l)pRvb;z4kf4={I)#zT?JDoX7<;Zolyd9=ski zeUj5}{|Wo=F?Pz7eI`r^o$B?);<8e9Tk3G2bDWm4#v^&MD+{Eu%0Q||YOHA3S?_5o z(RVeGnka3aO8SBsDCk zDaWYXY2ghsgywR1ghfP)=CG)&Tu;K#3MWy?GiZfV>p_zTl?0(>CQ)CtAkgZ{C-I1(1f~ncWlMc!B_sdp zN{)nRVQr$6!dFwssu~JORFPdLZ_+|4jEJfD_UxxslNon z+PY?jhs?~E)_PYCGY0&gR_io22|<&q12k@`spA03kE|=Lttl&(6FRS$J=gQHU`>6U zH2r#~Y(9~my9OzdH)_|(EFKl=8@ylm0R4pzOA?eD{xNzMve@d}$i^;g8WGMi8Wm$C-r*C0tV@)Y@1I0}>iz~|g^H*G1-&ovK zQB^B*^yS{EZDPW{EJ41C+FHq7Pecn4l>Jharl2Sycm?(-_KU#TAPc?Y&}N#K&QvS{P2PIUs2W#cp)Tr(bVSkKdh+)N}pU*NRRPs~}!HL4&xDP5!&2Qf;eD#Srk z@g!*~i<~l^Gqt@e()O~*yB7pctPI6yv6NIsNTP7<@;0yz%8+#_kqpp~8yD1hbkjU3 zwu(GHWrA!=%r7Xqp+;T`MT1gF6 zv`7uh9}-~G$3clpwvl+K#=|rouJH(sM`}E38>gkVwq%HNq{fRiE^CB<9;!4kG%?ID zWr$&j=P9Y2QbXt?hA;y+#3?6;;}UgTBKF8QE>Xuy4^^6294kXCj!T?*N_v?!FOD@k zj=iLz>UgLo4i(2k)v?k;l_s9&kz(pmxDn%_xY4ANVd`d>x)~;JhN&B+hbm1xPf6vJ z8ipG&9)=rDDjBYBhO3+5;%2zIQF^G-#PgI?PO0Iz5o2!Y1gWhN>Slzx86j>)s2in+ zDos33N#&Fpfg3R%fg4R?a3E)Mq`G0Cz`-j+EGKfLGfzq7lp2YLk$4!12Tdv&r5;A9 zhf(T5nW4%M&r?!4rAFal6dp$5L6b^GtB29*VYGTsW~eg6lu8NJN0U{llASc$PMU2e z&8EyyWr)e9gz7tyZ6}Au%CMuPL?VVKoI;6)OEiLLG+`%EhLjVPNHkQUVG<3OXavz{ z!cOH*X|*$l+jg~6UhPzr*Knt(aad{3U9iUCPCSR(Wx^@ve+_q1!kh+>P%(%59HF8H zBvdqk1nwH#66o*gxhoT;^uP3o6on3|q(q{j5)G4RxI`m}MiX|Tn_|Arksd_)gNivI zfnJ750FYjxkx)^ynEiPdzPq_G{IN^7cf>e(PJ};~d3A65M#M30mE#scZsfR|xyH$H zqRuJ3?M(iXS!?g8b98Te7-CA|+}`#Om@*P)^|pud_s+U|IU|lukg#uWJI^(CTyJ#; zSpzwux1G!1PutkDEMWi9I7xdsc5nWwTA$u_A7dhIj=PaHF7Apb2+8#k5$v>vIj*>pRIZY zhdCLtXD>S?Vxm3u%y54Adt_0^er%%lq=+?kSFcV|PQ&Lt?WCA-KkONGR`s+8BEHcx zc>qKna zIliaeB5vHHdfLr%IYp^SRXyzvB+c$=x6d_(LP;(PO|FSO?Y|6Z<9pgQCf7vTCGObM zej6ciU{Cu^B-cdSCHClP@07&6p7stBZM!}GQe?Lw?fW7-DA&Z=<$J%#ej$5YS!6$F zk2&oUA1$(9iqrcr!fCvn(`7}r4{4Va+1|M(w_U!IitI-wkMmz-KQ#F!uU%qwk$qj9 z&MvaA;WWQp;(kT;Y)KqfWX~e8pk3k)MfPS%98hF$BC$)m#O_7*#R&V%EwV2}@=e!v zi9h$SFH6qvdf1o9*{xmTyFKh5QU$N|u)k9Uh3$^=p&qsuY4`N7J#$U>c1c(Duv?RK zaSz)+*Ys%T^28o?wYWUGhy4VXMeP!+df2a}Ftd8tuP989{s-oPByo??THu zs=zK6v(W*uxQo3J`(Et|v2hPO5Tl|Vc0jJ_-EKc^-R);mh+n(gPboy7c8Q;nDBj;E z5vP6IIeoUf-H5cuy4$|FX5)7GZtiYhk$l&5w=2lENxQ@|yW2A*@ucqd3=%hOm)OwV z-YSXJ-R&(TZr(0&YIpm*B<|PUK1bpf?Gi_Jx4+7P?9ko*mjfwom$*@P`+(%^-rfF_ zoLja_{Ik$LEr~xD+NVg|s$JqIh4y|)e7DfvM`FKriBA>UCnfQrLi+@XTMzY45;g_? z)tcj6S!hoen+pr=sgbV6J*zNM;JS1YOCsIuY2+G^E?0G-y-0G+F0>a&t{FkD^T~A~ zxdx`oHMG#4C%LvQwC6~!t%6)j$#pKd292VG^g`16b#Wqv_TMIIHZRm3@t1DG{m{)` z9EqAOzU~%vKJO;2?4xe>8B=Js>q&3S>!z1&EDrm&ETWFgC6a;JveCTQ}Lc} zq;GxRbdNe$bh95udKmYDZf-vJ$2bY6b+a#;?#3M7&At!1i@W_j?{lUo>Xde~A4GZ@ z_mD8}0p08irpTB*yV>`^?c1G@l5X}l=~K4pX4lcD?9i2+ZPqPfyE=Wk+3z^k9ZzxG zyeK^&J*jm}H~Y5fYK*7-+||B@7V)&Ny4p9;tf#H&YTrbQdD=@|?JH`yPeV1z61{0+N(IP_7`=Pu6=%2`*EbexHVm+=F7U; zk0M>n@Mn!{xj(SCuGTq#a=E>Ykwe>~t1!ECwf}Vcn20b#l-ahcec0s#EMAUHyV@_w z5tH;nP0#CUKPN|yFn@IsAM3i{8R$jOF;xbT=}4fTz0~n{nV7?j~jexXf&1 z_5`mq8=JkrtIQ^5AMj)B_XXd_en0Rd?Dq$+hd%(k7XCo+I{3-7EO$s3yBcna$o;$6 zPmm80nZL;XFXTf-F6d&{AeV~lbg{P~&k_0O0(&cRg~)3P>>bFJB7az5Z%3|SKWCXu zO&xfHyP4sySV!F_3hdXU@t3fp?!5)}E94_ZUS43=A|L0Uv`Y%?6=rjDysw^CU~i^2 zPw-Vb@l(wf=0smj6xbV0v037)hZorEsmwEc^?(9;B&9|pej1ha%kQp=W=QaGz zydL8iE)*Jy*JG@7`$wI5`S$(D){KAiwa1V)HzPmN!*!b#O? z--~Qx+-LKWrF%#n+?!_~bq5)9t7xcegc>)A4yqo_z*6 zmXh3@XP=ei1j*HT_9^6?l;r7o_Bl!3pX7b=?9<5IrDPFz=REsvWINNt)3(pEDa_eDQVC|3 z$PwhoM!EK6u%E_lEBSK8 z$J=qs1#9wze=%;kv`xP7kH#(6wc)S3M$KJuTastGBf2-l?Z$bghp?B$?T&e-NZ6%u zyGfquDeOscJ0#Ea#)Z2$ZeL|s+J~0z*2U$DsgB#H%?_ro7|x8_t@6x9h>^*0J2B5} zEbN|f+b_>-Qbd-W<8rqcp&56K+aq^0n~HJ&xShYT*-R`pQi~$B=n}V!b~IZEZIu2q zNA~#N9NV&^DVC%AHpgD!4l!Fw(LTwsXV58cCH$K?_A2;(!atv5FNW_g{KGl+O8BjX zzaz(<2|qyiYjf-}_<_P-lw&V}A0+(gIrcL6ZG=BI$DRhit?*6Qz;7pfRgOIuez5R| z<=D&Nw-^4v9D6$a4#JPiv6sT{DEz1#dmem=@Z06scjm{i!aG5Z^x_^!e{ zsLVYv`v=T!Vt-@IenWY7M|LlZ*>91@hL_Q&ATj9rxyeMWbg5Mk2%!}Ex zkoOaLR?HrU&Hl2Nsk)c_WA+yIGD%X$#_XS1%B|0gh}r)k%dO9B6SIFIPZfEym~BIz zCUWevY#Cdyc3| zn0jSuto?zz8XumBn!~MKN?tx95jB&oJ%_w=#d3n~eRpfWbBCFE!VFVpdu#vg4mSz0 z-_qJ^IhI4bSwzkMqV_X!FMWt?!joJ&jyB{e`lQOkYdl9$N#bRF?wLg>hD3PZ}?Jwj#TI7AA_9ysbMBX)O z*TMgvoR<>yu$`7+>!|&YTVjqCek0|Jlp+0k|Uo+B>4kPep{1Yi`Y|0UMk~H_bE+yBx0X6Bg{EU-J#S?5&JIGc}iWb)P)iI z4%7wGH@l}s>?d@17pZb|#I8oUSdHt|xF%vhFeA++O3hU2pom=s^@e8ICt~k)2b+~< zOeE^=60!H7jBfWNFfd|oHKUBXMa13$?$mCuofnb8b~IvtGNXe@%a_LDt1uDwnQ)W$;u z*nYC-8^_))_4=7(?*iM;CA{L;JI!v4{~UVQ-`_-Hm%4%rZP6 zx1TmR-LbcuF~&W?vA2Q!+f_y#PeGBf#;tVh72tpr_fysV{*JvSvIplM_hTJts^-B{zx+XPp) zv1_=kG`^xu@Z+|As`c0S$~M6d+qkx{8=&!3ZG!K&4ODHA#>?6SSG8@U+O`^B-6r@> z+jgokyM>2q+63Qf+g`ODG`_Y?aAn($s*P_GyX!Q*UgH}yzER`l8sDVx%^KgL@t+!Z zBj+^2bizS|g9$SThY$`W97cG(Z6@&>etbCc$~KAL^y4Ft-)fWiZ9krc{7#$1@A~m< zI6n~NYbq`?H;yB- zD-w%FpQ2x{a5M$F-F6`rCbpiv*%z_FD`mf4^HcvD+4|A95;pAl$ZaH;NIP*fqm#kDSV# z2{#<>N-<&+JK07NQn``nyAduR$R>6Pa4aE}yA*wI!WjhF#7_QJ_kM&_?my^L2qzO{ z6T6Kmi`21jhEM{xyU(4F!j-4tj`6wUQtYJO#a}A7htD0GV%L&}yTRvIp zx&QgxQ7Lv?`DIDvlfSpIek-*ArRtrgHoG+=3Ll{yz7L&$+Zyxhf^E z{C#w(Q{k5Q-18~i{XX|h3inAGF7B7_$rQWoeeSUoZi>%6oWdB|CcQ=b!C;kKl$$u%x|D%YBZyFCpzFb#Kj z8cxc({ywD7T%Y?a4VUj9Z)*95q~WHg;iP|Ee;>2ca8l>%+dYzo`!WsJHO+p9rQv3z z;kHlXuPhBG?PUG(J)VYJlZNY_X1^oTaEGPghNSUVnTC_Ly?*(gO2d7dhU=MTzdNPj zj!44|!(ZI@8S}9Q?6Dpv*JIH31G{J82CQfIJ$i7=fn6W=8}w}fw+r0(_41a&1=n0) zw-|1QZx;clSiUO-C!;W_uYi+dIAT3c?&YQHan0!E>v7khSFOjrjy`ui?l<(>^|)~y z#RUD5CkbpVgnoRfYinv|pfkwXh7rnke1&X#X9s6TYNmv*b`E1SGQt_j_+zxQ6E(B5 zvx~DUBahvkF^o0#P(SPV<{d+Wvp+R@Fg17xcge$?na<(V(ky2-^};$|YNA5FfTp{2TG^+J z8G^B5NBb<|OJ=2rKLsr_~7gCd&4bJz6<@o$FG9F7;csF@_T!nf3CY4ezkC~i=Qr7d;A*s{_x`o zJoPi4_IRgHQQ;75ClLxc!!gcrA%5cUv*8*Dan5~?&o`DAZ++xoUMFj6EGv<>%6LP) zzF8z6>KlE5zgEX0W?8AUV2&rSL|%l7@~R3Yco8&_ljOm^-SPb5kupv4nG{?-kzBFKTTU|tBy=?SEWAmvm- z%Na;@9w+4#O3SHfl+(jT56i0OHL=bP&$Sh$bt-7ledE%mfU0WttQI6jw**r8X#RLc zprp#>qnj3l3BIg{E!I-x(SD&o<-sl2U*5kSHGPtQ{KJM$P1Xd`oW`n(#e+*lNCZMv zK=@eaqX*bhpZM^;)xyefd5LWf(E_4o;<7TzRbJjwCPFik5_JkqtTGpMfsb;1hDb#DMpf1t^t( zlv0q=Nty>0Ee)Q)8gijo;;yzHw0M9&Arn5blw56C)eQ2w0%;tJ0IQP)8fq&z1=PR1 z-(1OpY#MMKF>fiz3khW66$8myO;pibUE|eViwG?gQ-#X%=6ZA~k^nh@IdXjk*J2`Q z?7lD>l%tjQSc)t!5wHV^o7EDbN9Ls6Qa&xVq;+R%X~k=>^Pfvg_mA&rbSQ_f!>04wE<3AvCOII6lD z0gha1Pd>+?Ek{5ah@ku_C!m~K5cGk17mXGnK)nml6a>gAKoih3ngGb(@`0lX2+#z8 z+KSparHv^yP73Srr9YEcE(rmN7l|a@auX<3O}{K+fL_lmjP!l{bqvp3(KA<3I!a0M zddk4OWTcw&$~Z}K5eec(RUh^G5ki`Vrt|0CRrQdjE2>nQ(iK$#RF;5LxFD4#K&1)L zW(BA;0dfk+^&?o{C^0o4AT^=r0TnBnyfGk1oeIdJ|6=czaQ|HE%Y@C6fwOlr@vPRfMLgGe=^{SKtHP@rbWv+r8 zD7rKfF>RKJ{R2`ANx`(Sd~qPq!hLbRih9|oKk2glq^3|T=Sm7PsHi=Y6)rDZAUJlihWc)=4(mU1~PzXtu5y zwVn1jTh}9S7abLSD;*Djhd>x1Oa*XN45FH z{0#|}Y2A{g#iImxeNRpR@72}t8W=aE=JNVQb@B%WpIosSE{`6F~X@w<^73D-j%2v zMi@@uRm;kePSr>!fl$H=4?_u~oT^by%_t`^%Be@?t%pk9ZKxbZ7)}^L;6HCQRPydZ z3dI@O*G%@M3Ti@N!U|>z9F>f!BeXgUi7!z+1s$@J?_`@WWk;^gF(*!G7pp zfc?QW;MU-G-~ezfD05psgM+|z;5Oi&pv);byA|noe52rC)Ht|3SOD$-7J@s1J;5Ph zU$6wcmvRgR9|4Df&w#_h72pW)M7}RO65JH~QD8AR8tf151P%gaE@CjaGdKj?1so3U z3XTS4-e4DScW?|i1{@EL1@{K`04IXuz)9eEa2hxPe2e4T6P$s*7dR8#8=MX91D1pP zg4N)DU;>;7Hh}ws&7jPIw15YKM}w2VR&X+SA~*$H0!{_b0H=XV!RerU_xK?2eDs6C zRg?$FVq@ueJ8L=K2S%awrv9&TK}tG2IZ1AomG$NuV4ZZ${EhTn8@g@9JVd7L_@qHU<~TBKp>_Y{ zY>byDT@8lbw$93W0JCVRTj#z9W@SBy`MC|>w#mwR+YPV|+sStNi)`m*39ScbWxf3d zSciMwAuH=0nJ3G%h3#YrvtgOG(7Hr_k?!0+wBCzZwhi4}W?T!~UGtsgbeWya#(H07 zdN*`)Sv8%F^+cWN@7((d$>jZcpCi+DoNwSV^PG+KfmvBk%F22&GuD~5;A&v`cvjX^ zv$CF+mGyMy*)wfnxev<9`rxdrXJlo4$Og3Uu$>&5mGxm+S&CtSk5hDbp5~yD}^5s;sQ5v$C$)fL0Zjdu~?N z^Rlu|WMy5eYppu3@vz)=Sy|U-W!;dK^?ZJ1%(R7fsYZU0lxLw0+nTbnZqCYjf&OOR zc{zgmcK8KTp1?cbf?iWrO38D2=Ue!c(Zc9H(-vAE=@e%&c0%jL{OX&!bv~j#Dl6-w z`8_$)7LM7D(KUjd+lSWw$M5u+wy;$l%WwLbw$Qp&S4MPhAC}}e)(T|WLhIvMSCDB7 zOL78h5Hf9{^@+NovUB^eBqy=9A=4IGpUgUlOj}rzQ&=;RX$!5Fu)ZSG7G682vKAxL z7FwUCD_Szy2kED?#$&^`GguFjX=_ayoXOge4d2evRj-}fhiB(()}&vLHblW7aB&tnZurY*ESUsob^ZXa4-z*EMCo=w-eFe~eeSof1@3;WoM zStFFXbzZ7VSWlE`3y<y$EWq4nj?m`uh_XnlpQweCDaXnm!>R;n}m z(E2K8TxW)w7FsXU)oGn)2(7PP@7X`y*=bEWyC!A5n2_nLlg7@qtmoSB?K)kV-no5i zGWmLDA~t-xfi-2Bw$`M=3OlzCM`5?Ho-WfC zp7~o@dzWbouj1QSr`D`0PC=QKg=r7Oj}s)N3h=TazBdohL`)X4X_T&{Wxn^Gi_nHpU~C7o!f`j zPqHR9(-vAk<;>1xoO0%$)-}kTX9#^i!@Ar|TWI|(Yj`tlq4jgR^0{;S(E54S24~tr z>laei5qFd;w0=?7TX$sV@uBrgtozNhh1M^#MmWz&R(iv?l~`~1HUB2o8-C5dwE@=QHUBm%yfba#z4#qg zgJ;@8>vvf>o@ooMSFy@G(-vC4$D*4|TWI}0tJgPt`(VA5?&)%dzCYAA>eAVSTxk7~ zzP}Krq?1~c5 z)}K57m)<0W4X?B>oMTh?^kQiJrPG?;BrW?@%3UR`0c2PwU#C1*Wwu_EmG6IL<@*~a zoTuriBw>Ajn{o`9t-sSZjXSdQ_|W&ilkehooFlaU-Z`aXGe0G?{z2az>^wtgy*8^F z{Eu^L=Z;AYegEj3mWB0C&gofL|D1fox#LV>Nq%w8?AXjt39bL@oRx+3ug=+7Sg&)I zW?}uCb50i4zdPqK$ha>-#TetTJt(b(_AE+qr#c?YI|r zZc1v{SGn#b8LdO##=SHP>xg?<7S>Vs@+_>adqo!3G55-h)~!j49QUfE+>sDk$K7Qe z872A9I@i5A$#x`!qp&>pnvRT;e0VPN-D{I_;9l2}QIZd>ySUdU*^Y$Jx~qFb zM@DJ*Ztjg~*p8?|cX>xiNxn5Hb$4${%9({8?#&sE!lNv5Z^^>Cr+aHg>+nkJ<=&Qs zb#M3fEUf#ucVuDR*S#~Nbyz1Gxp!q@y|H_DM(faE6Zf7_$tZ2=-kZ^;HEFS#dtXxS zNC>aX&E5MuGD^d5;r=rX+n#Dos>SXDNjbByrTbt;qp($N#)8DxQ}IFJrw6tLBWxab=Ep|*+)??jR zP`nN?EcYH8V0}{3&p3BQQtn6y@5STYS2{9E^5Klg1ozb>+mR6Z-qU?8ij@nd!RNzr?~|4FzV7Q8JcoZZjK3?EmGwmTjVyfcpOy6i?#e8DADET(B=^lMd{55G zdW!p27QUxuWj)P(I}6{_v$8(OeJ7*u@M<{NeK!m18ChLvhipLm4*Qcsv$8%ctCGxg zS7kiPuq20PWqpMEUPj-c!7TUvP(kXvx0s!kb*cM77QW|XWnJce*v@z7N_ke+6&qk3 zo}Ehfqt16uYG_@Rm38$7Scf~W$;x`J`*E1H11TJ>&vQTNz$T=_t2p7V4(Sf0@V-;) zewxKRj zSy{KZU$yg{P6@A_BeSwz?0((OUFYk0j>^jVM0ZU)-|3XF+$U{-b?D(__g|sXffQbs zr?}sAU=z~eFIP+4Z$r8RDJ=J?Sy`Xve%FD=boA+2S)bwlyPcEH+u)g5S)b*8-`;mS zZGGkJtgM&1KdkTIZ+ckY=VWDluDkYczB(q(%gXwE_dgxG@>9ZcUyzmch3=0TeTN1Y zWo3P_`%{=TqjZV;b4Huc;L@zDFLQqhvu2bo&&v7=_rDoEh6Yz=WqpG^K##smGy1z|56KscM4)4s$`Y!jcFl$EX?yRiu zaa}i~%cQ}*Sy|u5mmyNO&d=rjSy}(n``AL#edoR($jbUbjz&b3;qi#0uc0QKXeC*>{&9FYfx0J(jWqSI5GArw++(;I_pU%qqnGHCX;n{gM zE9>XnXuEQEKGu9bE9)0r`7T7~TUhQFv$B54jkWXL`SZ@pSy``ebK3jv^s;*;E9+NX z`Bp*aTR5}unw#6sy7QxaJu7QI@Yv3G=Ve;y?$WtIyVN&b`3v?Ny1m6mbu-zvCJo-^ zi%Cf(qwvlK^qHaWcilZRdJGL#xrc>H2hwp#{yleQQtn6yt>1Suum29MKVbGY(-vBP zn3eTMuKczCaO=Dcew>x{CvLZPzQetQQg}72b_=tx{?wJP0EAno7qRnMR@R^MRpZce zdMWJLzSsck(8rfqS%2jgwJS;INBMPD)@xk(PC@5ec$ELj%KDoPunzb9ZC2KfzO!e^ zK7|k=OeI7K(+R>$8y5axg7AkEgg=5H{49d-r3B&U5QHxy2wy=EzLFq(6+!q~g79?& z;pG|@KG>u>FXr$38E_dWHp@Y=xf7JU4}dYScyPY{J<2E0Sw_ zD>wt(4J-$D2OGdK;L+e%a0$2vcs@7|Tn3H@Zv`iS4}g1uPk?)YFMxZ4uY>!5t10ik z;41X}LBq& z#~5%1xHotRI0-xyoB_)HZ#F1*wrWtWqXtl}qoYB&j+TJ4!Sg}6o|b`fJ>3eHfe(P? z;4@$a_&O-p)rX*5S8Kp(@Mo|FboMAJoC_3y^T57f0_+dU^*0!l>u)$H*WWH+12`U> z4^9Fb!I@waSPeFV&ENvC6VGk~9Q`QpPVi{(0q_{`3Gn~G z7rySO_iw`+}!}#o%e+An z2zUlK96S@;1w0Gf8$27F1TF<A3S0v&1J{CAgX_R+KxceW;k7^DKHd<1*~d=z{Ed<=XYd>mW_ zJ^`)xH2k)3)xCUH-{x9$?@EdS7_${~w{0@9!U_s%(!L{h$ zgX_Q_KxfaQ!nHupK?Q~X0psXDf}_!Y0t?Z92G=eqDEtL1SXfZ_UvL-B$FE>3`Z};L zcE5p}g1>{s;2+>1@K10E_&;!6Nq*s9;NU(5g>B$yc*mu=gEFhNYMcB*nTME&E^`Jm zKzRnA4a)O!H7L)v)kC@egAM5N%-90T(_kwoPi{*10g@1<_)#=aqZ3W-L{y1>y zhxyu%twxuA>|} z5jG)gM%aR|C1EQEwj&HCY){yMup?mzLH2J5Qido&$`K<- zS>gmKPaZ+aR6vk&btOpINMED0`XTmOoT?xApb|;J>j3w+r7)KaE7*E)fuoq!(!ajt33HuQy z680w?Ksb;vi7=Tkg)o&coiL3c^&xd2$1TSy$0x@m$023kk|1?0^(=KP^(%EN^(u8L zo3>xti?q9PP}=XzPxH0Ce$mfs?>)Jmq`k&LX|FS}llIy$iNC`R7T5LA_WCFG((c!R z((e0XFNK_ey<9&Za{WmAZw2N088n%{hYlW1x_m2fU6I$06X?>u8$fCA&7ib@DUW>9 zvju$v}@Vs#l^*)XIpTfD2GvaT5wnb9EDz*=2b1%;91kUg{&iGh8`)Z4&Nq#3R zgI6k}O{XOdt=;>YZ2B*R;k4aX&vv3Uj-q9bpoI>jwUz{bPYc8F+h`qH>3(}~g`|FU zjGiFkebH<-&sRtA%vBm3z2Kj6J<9ja(x&23(tA>;t#kSp4nCxRVX>EP-~So}KV;|rg_8({*azSL8UjBP?r7!tXfm}U-wWFXehJ)iLO1M%6@G8= z4|g&_Chg^W-@;FVe*kV3LB0zX^S=u=1O7v}Kb4Quw|L(Rn++e`rGH_6LO10+J^?=% zZWjWdFXvkHz9ZHGKL&1+@?GecJpM%Z*>DYn9Io1YpLgSNlXI-&Mx%TL+~Kq5jxpdh zu45wLYv@t%e9(fAf-&$)FbBK`jDwe8p9`)+&jYVT&j(*cF90t9yMT{?UBQ2W-N3($ zW6yG3P5YlQhU>tdWh16Lcp}&XTm}|_k47V=C-{SnnqJ`Rk%;LHo)(LmKH#Z2QPUS( zialRmw|uJJY|NL}#zf5~s83?HIqI%avjysdsDr?#qt=WDUy8=0KX?1a%^1{Gu^clN zTpo>^J-}Dc$AL?uxn^(hqiBxV2fQ(oWA?@Cpggl5>hf5gnFu~$^UNghWwK2J-$kDe z-ii4^;E(7BgMVN@1H1uuhkz%MekksG=9$A#uO#^h)E)E8EYzD(XM=CV@=Z1Pcr@QM zfN#bM%zW@sytaVPM+?jnmqto;zMpTOi*+%l`s&O1_I}jUeD#rh`xNRqa_)bKbv5UT z@Vf14&KKchgbPG?Invc!D8g?r7m09uPFHh1UoCeFqjHJZi=y4M0bK%01G+L=s10Zt zC=KW{+f5tL7oaqtf7wE9K;MGWfc^(B4X6#226SSqP#e(6pfsT0qTRKT{0>S3`V;K- zSDyVJcq6}_2IcrA)id)EM&30P-Pq)D(zn^f>jxptHOCf`dc z%C*1h|l?c=eS>E&ss#O`7>Qc-rb5do|iPPg|K|ABzq& z<9ldDNNb308+9JZu`foqGZT{RGtq%j=Y|~nY;>^MGs(Uz?9v>&0(LK9d|)%jo)#TyD!iP($Lw;BvQf0C^Igoo8yRMrxwUe- zKaR<5dR5HcVuzar9`}-R&&2G_c7$2zarY~CXUyJeN1COs+)uBK*`<+Dbc~{07_+BE zM$`9;P73^tiPmSzlZt&H6t^GW*w^{C& zO;|#X;Ogky)aW=R11T??LDYW(~G{46& z>^C|o=y86I+7HnDKIW^aeFM$!V^&4&duVa*B77-ox1JnyIFCl{4YA2VhjT~N{$p~` z;an57_mbpyIOj+0^LBF3;hY?`Cr77vT}<3*iP{S&m*2(AjoLHp)S!!*8MViw`CZHb zQF|qpeit()Y9EeH4Z4`2QTsNU->LME+JB?<^~&5kYPXse^eMSfdwpzL(5L(nv7@I2 zeaiO{dk;x|pRzh)pR?0~K4oRZo)n$#^&)ZSnTWlRVN$Wz819eQYa^4)mY%jeVxK`9 z;Axje>;nwE271~V5&Ie15KsHRh`l^A)s%QzL&QFg*6N?{(un<#Dm%_sr$p?psK@*2 zxQK1dnQTt*)!`9)cJv@~qOWcpvEOpaPV&{>5xW-kWM9pR*hlOXbBeF7Gxj*n#uC@7 ztbZ9hbh0>A1+T&<59`wQwtzNg!bJ)Szc*!OgmvBRSN#Ts|c zG4?jbjGyshvm0m7bDVJt7);P5xXT>-tC?(!$mcW2b0?dK$V(ji7JO9XqaAw-vK6_( zu}>k#L@sBrjGQC#3rn_<3Dqbj9JUAyxP^Q{_9SLQ(1-iOhT zK@INirqP=ik+2Q_#da!`X0A_p~i6@G#m{1`r{!JFWN z8vF`AsKLvSgBpApIjF%O7#zAwIkV;v^Rph&33|IZh5@EM)cjYe2BqdY_Vn1H=2xW- zSL$G%QF0D7>$GzARcgFr#~x~aQ=`$!40Y_Y>!TYaCLQ>wl%h|W>pmq*Lg_i8&^eSd9B)%PFh;`{hmsrp_QEjQiX zDX@QlPr0*_hy1Lq&_jO8R_Za`9jns)-cG(a1L!R)OfIllrH-mcRq3dDxVMkXB8Tff z&W}{gzw~QePj$YV?>sM3?$` zH#Sp`;T?22hI4F<`u`riTaN5zqgV=;Hq+bU7c_qsw`C z4wU-80u=urkuLTB1bVl-^X*FX7485ne>}&DMy|}aJIvL#9XT)G-X57}x}BaMjhvEi zS8%8+rJH!kCd^wRtU`EOgu5aM^Nt8V!MrQNpRt7b+`X3B=(yI)<+fJK{aUnE>w1ei zJ*C&i>a?;))M;(pfhwha+SX}lPl(oQD><^>*nBjcq;}kiqx7d!H2;9;5)He!=TcB4h{h? zg=aWv9|ebkS5TVa;Fahj!82p?&2Hdlu?BsX{}P;x{-SNvXZrtu@=X7Bq(PtQ&xkeZ zGyUl~jrvS~6XlR+`mabo6g<;y)Q9@7v6F}T-5d3x{$W&ki2o4x@(_P72i*w1j@|^G z7p*f3!TX{OW|8ou%R~Mb=tqLL;eIjhwr?~?q27eMqrrzLU8|h352&8wMEEbl@glTQ zaVLxLu&w7ViqJ~cEpc&DQpcSW;#N$Sitr74^JWJSbW-3;_N$WuKO*Q{KwG4qdnUPu zG;+5j(=C+Y3Ar;y2IkuPqfK0{qHLUNzar1ms&vV=H7PY{@om#EcZR` zvB`433m=sGZsefc&msrqz5zKX_ru6Rxv$1gQ0~?6LAh^%56Zm;J}CDU$U(VZK@Q6O zA9HNf-8E+a>MNLO}l)IfK@|7w)%H4_bwJJQy-Gj156&~eoM)^h+9_4OD`H{cP zqGv3ZGj_XUf8pviMwF`^d%KGg5ruK)N=b=|atbY36f4SNZUdskM5%S`)uQBxGTX8D zi^8K&)Sc?s%f(x+D0|XRND5OgQFkZiBPAtYl)-eEl2RZFBa(|n=^{!G$G#v+S5Yjz zHA>VSVeM^nGv=rD4&yh*`pLt%+eHZu;~hx}4&yFSg2Q-5l;AL~7bQ51heQbuW0`mh z4&xI^2@d0CNeK?)Ye@+X<8o1g!&o6oa2RV*qHbN({=wr2qdkr;Iy-7_i=N~%&=YM+ z)ZQW5ud0oY+Pg(tr`pJP*^AvXT(ih`^s7Ep$)T+b9Q_zLIyZ3i zRN&~dz|ryI=pu1+^lp0bh=&#KnO;%86G4h{hCz^`JR*z~ZNdG3lk z*SL1u3taP_QWq#iQTt!$n)j7DUa6y9y9-tyxJ^79)hkuw+MTfaw>z7kQMvKu^AHku zrn>ft=tW$mo;u#O*Kra}%u`3W_6AXNJavF;-{#jLlk2H{Tzf|3QcCSj8N^+?@nu}0 zUgGccRWZ~8PyLpkm*}EQ7f=0=791h9tEf@u702FTuW(H_PkoF|Pt=~C@f}Rz$JkFV zPvyq$8B>thz5dTNxpEUkO<27xv0%1*?tUp-OCaVwDw8`3oHEpu; z;K#PU&Q}E1RwSkWCZ%;rX?5EmY}RVLx=ntsU#s!zHdz<3R^!!e@*DnIjaRqHI*GL! zuWpmy_19{=x=q$itkrmRoBYJQ_O0Htys;_ zpVbTlSj{lVk=pUrgQ&1xB){WJkh66cCq>pr$Xb&v39?t=wn)Rt8V|ACoDkTJOT)<; zjP>oR(s1=YCu>Rqe~Z#^$EM*I09n+PX90)L06 z;fhJ@2QPIhWjKy7C=E9rPTFu_cNg-+G~6^eX|I9Zr^qwYaD(Aw%}rn@>p=dIhO2-R zn__~T^X^>8b!oUoJ}2vA0=w2U+^K1}bJK7a`kbsA3jAG~hPyHi$B+A3=G45ieNNUS z1^#N%aE)oWqtb9E`kbtH3jCd!hLg2J(k`nBvdLN_Ip4y~A;>1&2*Uc@UOqPyluhgo zA&6ZnH_zu}Etza$*F+GzRPHFB3s%{P-3e*9(|vAVP&ToXUReC4a#Fv-jVH(^cG7RI z&+X=Og0hL7ltJuLIjIlfWZjo+Vs}14+TUD))bo!q$GO7i%2T)-(r|b9+z}~u52WFq z^tl-+cF(5aUi3LBS5TH$(r|D1oUAts?9NWZUF>svrr2GbhP&D4#-!NYlZJc5=VZNH zu;2H6uFB_DQh%TOTxkmTT^jBepF1qY?yoeQtWA@1G(E*m*0rtA$=XZd4otC=^=RvJ zvc^ofeNyb+MqZ!$*yqNj*nORbTkCVXrr7`%Zh)_{JrxC2I39v%-M0tyc6&_aB zc-SPywWZ~Aa-A}X$|b6msA;~6O%f+sRQBT13Yap966F%tdU1uwP4ks$($p4>tAtL7 zb)u>)*Qp_tCWy);N=Q^ELOoGav&3^6RV1oiNYp}Ts4SPgiG>LDWfh(=fKugaf^#K; ztM)lTF$r`P=4xD4GgsKMDjyR`!6zF1bRQE{f#4x->jJkb$bOQHC(ZSMU1@^s%M)a0 zz7z>=_Yk*rqRVbnko_bXPomN^3l*a%Rpt2zrGTPhJexq! zO7pCJiaj){k?bldHHwWgzEBsG%I5>UN(72TET>UGiXo^cQ3ao1kDBhImPOM&LQ8_~ z6~q%7108H?CMvC}lwA;&CREgziwCAjR z5}uJjU5&)`8n%rhr7BgFSPsHZ7Nm!ed=gWwDl1T}C{(Lb5;xaM+|(>_X$?`CMw|r^ z=Soa-5s=oR=mDLGqVNpDg%NI?qT(L`dNfI}yT-&64wd9bf zv`R&dX^SwmUcAtUYT*)^@c-C*8?Y#w#PiNFtk9%ZZ(oD%okv99-`7I+^P9 zI+(r=E98=uR;+}Dqes$pLh@uu;gU^`8Hpw~ijGy-VCo8DBbNlodbY#iAZ~mFXJ-*R zF-sCkHqs&C4rEB4)s7&^&Xw%MG>3@T8N^yG`H7WFEK=x5X9>1+14)GiJjssW<<4+I z+(**=O=57v0=U0ne>KK>LaS*Q^9wrcz`p$+b_Sktl%jR`R7v_HqYsj)|8PnNo0?(<=ct zh$E2LIPWA4VxvWx4XnsaCx?aPDT7$6rF_9A$Hd`T?(`9^lq^ShhBKUy+@h_1$$=wB ztt>#rC>Dlk$xij7G#GU zAUzNGNXLWhi34d)E_ZqzlzJuPz({LkIFKnJdyNB*up9|XlSsnoRsd*=NFK??RgH+{ z$;2jBQf9W~-bn1FDa1;ZY&52cC(Yq;f_Tz45*tN1ag+OkY(WAk%SoUEq(~>e3U&^NN&`vBq)Q}C`?H#ER^14v&5T0>}+DCWf1>r zDUjG{nZ%ix4LhA!X*tBoT1V_$Vr?WgxvP;NOgapDb2&swHjxcGJB!%q>xhk25s*V{ zG}(x?0i{6_tn3_OrzJ~HVyC4@PGTpf!b&1m8nM!el`UC0#75Z+JA+uOiG`{Rb}q3J z(_keLE1Ot3>98_jR>LG^!%8Psb`GpunCR(Kr^HU5F=fe=nUTz-c{3xU@$Bx$vPWX$L?*bNLAgWrdOOrqoa zD1Kl7y#640eiHzaXKI09FE9us7eX*d#;?imQOWrd1_VgoM=^rryon$=cM%{te~}ZWBOqev?6RZKQ%KFbyQ8pCEbRGMu9)_P`xi6k`GFsg)WiXI_7$8>q65>WQF zca4;O{Vs=gl#@JH^eE|f@-DBJw8Nxd)cdL)-7d9`aw4ZSMk;k>e@E&(SM)fk{C0WU zZAlJyxW-Gr{Fg&JN}JRfy2lw>c-EtbOE;Z^KcZIvVRU=b_C%*0q zceku+b!kyIxhve=ts0qvREx=e&uX1iKl-ZHOJ7p(Rc(;!P+!$XskZf1ZIbG9&+0r^ zw3$)4+{EGem{_E`US6Rib)G9)l)i%D@^+ZEthP$eVLhwmSR3Aua9=#<#4ddsLe_Vr zmSZENHw9$BXZ1wsn-iYZd9LV5(pv*AZ->W3jxA&yZ-2Cgc+Sb4%$Qc8M9=EG@Qn=5 z3OO+r&&a|l)J1sb!S}RxGvqBEdGWL+i+PbJ$WpjGtM6(3E`)O(&+207+b44Jp4InC zZ*2IgzEAp|im&QD(wA3!RqvJFSMXJRzw|{HS?$^Nfb{Jb&l)-QLD$;?a#-iC66t#~ zoyU_H(i3tl4@qC1Y4vqXS^co|jhc>Ct)a4dpY%1G*6@xgt4pQt-gK;L4VBgVU2j#i z#$8QW{fP7}ovRfchdjzebR5xPXc?X|`@S5IzU1Sp{-E^DAJ1xeFAuriQt5mRSzV5& z<=z%q{h0K{A3tJl)z7$pgQ%00Eq&J$b3hpG}xb#h` zP9m=gepdQ&)m6cr1XM}y>U0u$Rq%7Jx5%!F>LB2G=^L^gM06N*0&l7KzEn%^zxk?v zfm!IQ6uKAjttH==ldd=Ld|9VP`kt;YN}_p5`s%K)>X)T&^ZKfOg-P&L3f-x;U!?M~ z=ButZ8odam=+~rg2m7KVn%9|RU!~C1O5Y>)b%oQ?SBrgBzaf3w*jM$Nt~Wn@S>p_| z!WWrS^Op2hq^~Qy&5-v*eO=yR(tMRdS0{bv+1C}`mA(e;tNK0Zo6)|i-)Ay?l|pxx zSwo@zfXSjze<-~-=<5pSm>gfF(A6_*Dbyb^>nPM8OYh+Ny22++uCG$)&NCY*)SpUk zS^B!dXUxsMN}>Cl*+ijkU~ZvMf5F^Jq5hH~zftgY`HIQ&RSI3B^iHa;D}2q!BXr)^ z`9^wo+`G=9_|~mo<$awdX1jNhOYxoC{Hm_NP95Jfw{;qKb;Jef4c@E6I}W+X+}?3S zhoL_(cXSxuao~>(c`x7B}!*HLVblPp-?j{d6(YTg=HW1RSF%)?xRpE*is5L`AxjAqY_*%un^VQU;7#HdKa_pZ z&DXJ0&i63(WXBS>P+2{kt#J$Q*eR<+*_S$&xP{8<5%{Etw?$TuWMAd9BJ@HWIFpzCmK0eR$tG4D+hPtlGQh` zO`T}uU|Ai_ekTWa;*!-f*zY^h$icFDCVN2+_Uf9&Ui7Mw1u^Uovd61yHoj--`!a|9 z$*Wl2%eibbg*uk~nL<5}{e?n(Bl|0bdOmxJLLJBc=2a~V7O=m|9>pk=vS1P0 zB6~V<$!8~?{j(E|94xCBvwz9Kow#K6P3+&DXyjm7y##Ygcw1z30{f3Qv0Fu*ceJLgUe<2La?k4J?dA#htWNb|=5IMC zE85NF-BH?d*vfXZbIUOuxmNiwJGZ=ETD$qS0z>Z?a2PXZbLrv%Frm4>LN;>KvAQx5?LKtq=1k%lTf{Zf4|;wj!(7 z`!FA|tj=Z0yn3D&S-pWJvyFLPWc5atd_TtXBCBs^gIV8ub(`1$6zW@8GOL=e%dIT= z4vcRL%Wh`L3~#TRs5O-?)=dzQ&(hf=7wv%`E= zGt3TlIEDH)mV60FUOeR+U;SZ6v{B3Jb>zC89Z8|SgB|5ny~VYeohdCE7-Jj&WA(k^3ZehqPeCEqNQ7f(51n1gHt zh58UXkwRV0PV%aj-+pdk3mF#q{X}7BJ)mZj=3iVO;2CwQZuEdYAKEFSGAnh zv+PU?^>gejuWC6b&$HzFf%4K(zU9~x>};=UIqhn84u$#!cCJ^ooRb$>@(o&f@tp6I z>^ur}4SOSn`XzQgh5BWde49~TJm>orRxL~6@}y>&Q|tnYv|nX46luT4E~H5Nb#{?g z^%mFGYgzK`V0r0iFXa;AG`rZVT2AW?_9hDTo9q$_^%;E8UEWg1XGhNWTddYy?B!g( z%_e$HTTZTyT}q*T7t^1)FC884)_9hAkHu)1+u}*hFz>U;>@c6zAF$-h&+_6q?Q`t1 zHfnjjj$HLr>W`?@A5*D6VVAebx97b)&+6LjjwdzCe9ER$r2QF7zD6xCp3=tK%j`-D zbpyMKLj47sMxp+aO>d)?_n;%!S1kE=YF#D44S5XuZ{rBAmmLXr`;3N40%(_ z@z-EdRN2?DOHR8N=6Q41c)p7RF&Uh@+OxYimAVh+m-C#Sdu(4SbwA8U>iBih&(IYV^!Q;6zbuasL1o#c27H$8|-u1EHi=|LZKds zi3HuZ<=GvE>5)7u++#;$PHuO9M|-@@B^ZO*kz5`)JgdiIxOnT|EJFG5W6NF=M0eYBlC6cUOCMYq$s7s68KVEhav9PtUVjz|83GYR_&RrcU>) zaF5k<`@E{<-jNYAvAeJB*=@p9?e2<>_EL^FV=i~u*YOoA$6L5ZDbymShL_j%mbMi$ z$NR3fVPbjT)pqVs8@2nISGyxHOa0ZMorO%qB=()fT^&9NbKGAY+F8hC?gMm>SueMvjl?PEn{AbFWgU zZ^Bf}@|HSw$=CN1%+=hnrlZiMTrGtC=k(lT)3`bcbvpMhg*pQ>$GdOKvwJo7K7~3HlhJ!l z&pmbxW~_Jjdv<4GT6@n5_thAaK-hjzm zJuBQ}H)2Y0cfV)%&D^IH>P?uT+;e*FvA19fb9cXI_pMw5g*uP>qK(>f`+E!KPj}zC zXZ2R@D+={COcm~X+WDBy-gotO%slS9dIv}IJFnX~n%{X9a5TU3x}Br>o!1?hQr-8x z+=)5YeODK9-%+UV#1!tnr+pW9fkM3tvxobhb`fT4c2|3L@8*7_P~XG-M4>L`nkm%x zaz9h3@8f=ZKL*l7w^TK@9sydd-@JjSt{o7ao;agL)b)irn;kr_&D>;7(^;29o3iZ=m zcMA10Tn`HMQ7(W&eT?f#p+3%CL!o|_>qVii;-u+4yj-cR(a&+cDbjwP>qDVF!S$t3 zS93uW>KC|v6zUhbYbn$xx&9RD8ZMYZ{Sr5TLj5v#9fkT8ZXkvF6c<9Few7O?TquS59c~1Lx{e!3p?;SeMWKF=3*&~h z78juB7T#HIG)3AUaAPRcA97f2#6DZW5a=ce{p6h&k z#;LukU1Fwz(@?0t3iSofK%xGTGg7F3;!G6kX3p$YEuYJu zIg3}dd@g_CL<;qTZfC3UznIbP9D3#r0m*^0^F9+~8F$pUa+#=r(FOrX$xi ziWwB@UW%D*)Sl~xKq_@_#jG}Ica+<$uD~Eg423#aG25%!l{7OzF^58ZonmeqwdZ3p zP!UU^4pGdbP!CqzNTD90m`|ZrDdH&9Llp}s)WZ}DDb&Liizw8gig*h32*qLw^+?4{ z6zWlmC0^Beu8S~Ck>FMB5;LO}i4^KFilr3lv5F+l{hD@n%Q6zcJc zWfb|Ipjb|!<`tfKhwN0^(LTy*9 zr%*>Iaw*gk6&u>9JsmP)RBr!Ue)rsoT9jeLOoS+Ya4Y(XHX8Crr1oO zj#A`#Rm=IFuGm5`R&ax2tJk#Ue9us9qfpOO)ORTG?AmSV=-$b5#qU%+ z=2h(yw{wuk>7k)L0!ad@#J+(F;s^b}a8L>IU>DE`b_GSyAB+IIfstT$FdFOu#)1K0 zJlGRV2Co5A!Cqh{7zh@o_X#9>mJ5F$a1+=U%m;(O0-J z@A+du^2_l!P!Gm~1~6h#pFkrR4Vu6Ngqy)s&;n+HBA5$W!936g7JzoJ2#f%W!HHlA zI0-BTCxhi+Bv=Vf0jt2NU=278tOcXMI&eBz4_*(R2X6oyz-X`uoB=k2Gr<;c7RV&^ z4U7T&!P#ISI0p;{=YlFQ77PRDfdY6V7y-@)qrf;Y23!EffeXO|a1oda#)FyQVlWrH z3CshRfCXRzSOg}5d%>k(DVPM7gUMhem;zRV%fMQ2Iamj#g7x4EumM~NHi4_aW-twG z0n@Ix>ZJO_q@^`HQL1d8Ct zU?lhn7!95WW5G|scmvFdn=NCWHTgY2d$LE_emp1TrkH1CRv^K@Kbi6<`VI z2bO_KumbD?9tFFSc$_bPup0hu;3=>>cn0hNo&^KI^I%V~5xfSx2=)Rmfq`HZ(&-Jh zz~2YF4E6>6R-pa|1HgV@5O^&Z0`><(!C){P8~_U7b)X0i1jBGYg@BRp4+5jX!QiD~ z0f9rnIBZu1#v*(u7!M8u%SZPJ91bSK9}1>|Bfv~>B$x}10`tHyumBtl7J*~HVsI>2 z0)~TS;5e`X91k7^CxF!;51sbJQ_>|XMmaDOfVOm1?GVKNtWe zfq`H$7!0O>DsUMX3N8o3!Bmh3SCDw@|4LARe-&s1(?AhS2P44@FdAG9#)6q(Jh%o- z2D89aFdIw*bHE&MEtm_gBjMQ2dN2?ETreNp02YE9!6NWxuo&C~mVmc_W#Fw~1-KbJ z3g&@T;1;k3+zQr$+rT<7AFKzrgAL#gunD{kEcmuZU;)?+|Lx!<@DA`YxD)hCL;Vj1 zfOmpH;9Xz{xC;ygi@>ni0fD>0aPV&8NBBL&4;F(0crPe|_koe%9xw{r3&w!=gR$TP zU_AIBm<*PHso+CkCipNY4i5<22j;+E3g&|Q!A;;JU_SULSO}JZ#oz(31Uv|qf``Be z^he6Ua`+zuE5XOXD)0%gXIYf|3N?Sc`yJx0S1B9U@-Usr~+RE!@!du57vMJ_!20B zFN2ZbD_}Hu3XB0?1#@uRUIXLce;temYrzEYG?)y&0j7a(f|=kMFb8}K+yuT2=7aBm zgenKk!>H0Biz-!0*5i@Ov;6 zya0xS7eOBU0ZbdyBk)J!$94V_Xhe83D1twOk>D?2H25nRfa~KD7z_VzU_AIcm<;{_ zrh+YCCio|q3;qQb-_$4YZ!i!3%U}Wc4_E~L3+@H4fTbYA;razxuma@3N>Bk-fqq~O zs03@lE?^zl6|4uzo8ApzH?Rrp4qgO%fS14k@G{sF^jnPj9}EC{fk7Y{a0mf=gDS8O z7z*|U!@wXg9P9@Q;I*I;><>nO!C(|P0E_{z1LMGfU;-EdCWC{(G;lDO0}cT7PU_SiAzyfeMSO|uK#o!3A1RM#Lfuq23Fbu2&M}t-17_bH$3)X_+U>xq#abO+% zr66Q~87K>=(5bs%#S>VME5G=PDi5ex=RUO?0jGd*;8ZXHoCca_DcWcA!Bgn1q;9pU=g?x3b3&48tcJMrS2iOSi1TTVx zU^93pn1}W50$bqU1u_Y!|3QCnHy8-s4F-evfGV&U3V&%`CPF{)8EDoEu$*jUwb2>!;k0 zt#ZlEtzx~&&Yhe_wh8|d=RPJl_c77ALA2tqK_r6aUF*&*bZqD`c|#eF4ZVU5^;2?+ z(XLGW=*(1wtX=8QUSzu{zh#x2-(Xj|!yE@KgjM*lZ$XkBdx#bs~TI|AmEX3gZtiLi=$#s>}jdY}YN=`S`k;~KAqXA0J-wi21>7Us(DbjKcD?>Hma^i!-O-KW`Z%15B>-j~6YI2?nh2DUrL z>Nw?BU=KO?Ard^8Q)j7Z#{oJ* zjvX?*hEv|E+t4dnXCQO8+7>6$po zc2$0esB429YhgQCj?HCuRnAp#{pD4jb*A!RbM2zgiu{K5-+Ls$BpU3SiLcdq*- zv*f~gi4EwAqM03ta%=%ZinAf-6w*P1B(TTDUcDLRpdgNQ`mjlTkQ9z}zs9sYL5?CT z;##TrD924n%mzybd{eOUQFZ{w-snhtqe2p7JGRk_QS|F#AA~LW1@vc>c5En6 z3L=~H3&1vBV6P(*m;V>GAMqpR5ZhmQ02|>3A>}}B-`|SCjzp2iG$aHh1t|yi;}k<$ z1^?h|IdWIaUMDHygOqo%{Ww2WtK?&s&1=q0Ee%p`?#FcvU=*m1{>FP^$|tdge-EcWkyB3T$8~G<&f%0(`*Gco z_3Myh?0)wyqK#92&JN^QJx6>zr~DGW9&IE8Ib{iv^pzw%Ipse10@_GgSmhl=(#(=n zzGs!Y;Op5&f`W55k-Q~IUT2ki;kyR41<4Cav+i9u@hO(Q)=@C1L!?_?6(SW3^FpM8 zVQq+V!XS>_$x7mUmw4=8Nj%#jo)0k&-}7vUa`X_c*KEfY#yGYx%e9gfTY&6s*0I)l z$0jy}D92$FVUA4H;J>Y6yo%l8yt;u*#2cz*d6VE7zhjIhkoQx<&xs8P)LQ<)v$wbqhG;5@W1E`Dsa!YUq&JT-9*Wd zIN~1dByp34aDtQWVc{Kw`E?N1ql2((Itc6CL0C`+Vf{M@yRL(sx6TtLju3>sHWXPRV(YU%giEqI9C|ZrNi@qZQ^ZSgz zGjeiv{P4#zdl7ya;Xy$o$c2m$DV$Y7L5@F~p-we}^cie{K_1Y?h6#iThEc(!B`0R8 zvefEu6|+1gCp*jO&Q8fuC1oOzIFef(*~F2Qm88|E$ckz`;&X_NFe7op8m@|toH&2# zbcf67m>cVKOo>`DXG-k6IWtN0#K`L>PmGS98#{6G4Ux0vUllelhNz~_nK)z0lGs^G zCeE2Qcgd`&Q|C^JRWVD`5|M}OwM$7(l3*n!C##sm3|QC?*h`nGn3NP)>l0Vx5T+z0 zrlo-yDa%!O^pld4MI4zaDXUaW`l|G7U}>6)$w|r1NlStwI~CR{vJz5HUkf`WBY8!J zls@c~+??c06_bU72CPcSa4-w2lK@0ZSc6Pur)H^`6&Vt8SHqo@mW3ltjx7Pn3fT*_ zJ{vhnOiBW>Qqp0kWy3;r4v@TJ9c=7&4s4he8A-57Zdgq@uOD=BRwIfZklL{FIg|o7VY%?=kcFV40SA0_1Du+9LMEQB$%)`rCr)>!GnC{(7RWBgIw{F(osJZz19}|Y)_gc!NE%1DH4w+S z)tQyN!R1zEI(Au=vj}HoMGj$hPGXkiAm=GNGbtR_BK2bFT)BCr*z7DRR^yD7y;5vS zvJ|Uz#>!q2o020%3XVuOA2}D&#VMVplw{exBAMJ2DNHiCSmj_`B5t1SOeR^n&zNNF zG43xW2gfHJ$6gktfmY&Hn zFj+<>%fuvME13)uPs_qOtqUnrLybL1(J3v&e{?;@Nx>?Rl+0vYm(*=JASrX>Fn&bJ z-FUDom;jRUHyP{(rh?tUG?3JPnP32z1NH=SK~e_>g!ho%lL`b$27!)x{j=ZzFdXA10L6lC422rq6bf~)RA>3R!T_mu^HqHv zYDeFffmG@csebh>b(S3@)x^H38D=o*Z1Je1xkyPqYXwUh&uuK@0dNf*!zAh{?hDtpaZBSnqmI$Xq|`sWI=thM zDGX^%__|DGNI%@yWg0WEjns3#qnMC3YR{hO4C(#*x?GRi*!SfIW{B@%CYn)EsAn)k zDbzEWVHD~ZhKzLhy3A%mDXywHRO-3R2w$@ST`ZM)9y5|6?Hj4o^O;c;$0UwQy%4Pk z-!-t4kn__UV8T2Mx`!jr}lgVZ>Lh<(N66--#e+)cT%bEqEhdI+VkDzz9&Ug z>fOvl_m#c4@1|1U!%U*ccQG^B%huha@1;unJ}UJdD)nBdeP8Zxr}lh(KhRF?c{>lb zQ+rOkq@CJx+7D5w_d)IZ61p%GB;AFtC;x|>gSj^ z3ib1NfzZ8Vb9Y~z%n2&>3(Uf+ljt<$MP^Z_aaTv2Wa6(5>@=i?O8pX*`emwozrrl; zG?!OLoMLXG80UW#Iq_Uax^Lk%W{JDki~DsZ!HdE@x|ZsgoMsZ;SN7t5gIVfD;U4{F zyPSBwzRxg8?kjtAv+P?`>bEJ>4D$|^x{gWqnAf(E?=mSA*U5X#vbL*x^u14|KFcik zm|EM&52(~1GO2A>_vkxErLJdIcucKrAoKl=36Rt6O%!a_IJ!`3ibC)CWZO} zvxY)_k;$S^|In_q_q^(lOtxo1o7kV2oHl;Xo@QpPXF;3TpP6-S{GL24`wO$)lb~JP zuS{;c5KrDqRO+@VvGA(!{u>9@<2^P77zIBmpBmsN@7*Aky) z2q*8|B|uN!+zHqbAia0z2a-2pE+L-0snG(GcQ7u4}N5KY8y?0LgoIB1oQ;M}p)Txc`s<>HB2S@RR4(u^@RO z9S@Rc&B-8nikt?jz)X-l`ON``fw|yta1$5`=7Z#!X~Xay()Y;<;U5KmF-V>pmVl$d zGLSs$s{qMUyrUp_&Q%4HXHqpFdD>J9P5|pb9;^q|U;{{=(=>tPNk%hBo>8=b7ivN^^v|$ zRtZ1pRaJqczf=P*0Bb?gYpDZCf21BHJ&p4qX}LFoH-Q&H(tf@KCV-bg(u(!l==eTa z07%-BL0~c%0;YhWAZbs9gQV>f|78#9`(%}=eWdS`2?!^x1ra3efJl(kl;`ogTv9u< z;P<|yB#%NkDWlVHACS^F2V4Vg0<*w;FdHlcNx4-Fk`k!|BxOw*NJ^2TqXVSxlU2YE zue8kkCgQ|X15QS+-ODi9Fyy|z7KYr{#E|=%klfe7Fyy`__a?co$vxZ^hTOCMPLeR< zC-*gp>jBdmN75zh5|aCwq)ReF!g|B>aT*d%?k^Hfu4CdS*BkMZ>x}rxbwJi3=bwa= z^G`_5GfAJEU$Sj-JV|&uj0%PvKXQ+g<3{pI_Jip2U`U>~IJp%h@!McXd_D|GXE)4f zn1`MI{UFIt84TIpAsC{27KY^SZ5R@N4u-@B4$lI=wtp|8E?X|2HlSmhxQjEohRqu=MKmI0}gK4*KnD*B5uu-SPuP5kAD29$`O5)z2#p=P=u4meAG{L z>W@SJ&LfSFT*rLG+V->UHXK-we;rzDSZmXV<p_G6mboDGn<`D)SMB45zu#LNSb6J< z^`R9Jb&LNQ@%43g%^GlFTg00C&fPot8s(j%k31al=gHE@AGY+_wPovb5hukD4!(0` z`jFh0-;Y@RT0`M?d8em6e^2?u>g}uMjWXT&M`pvON!6d;_1T{f{e9uq$7fI8|INCu z&ipcZLjIPKkzs-TA8$Dv_|>bY{))VC=vT^Jtm8;>ggZm&N%e-V(Y7mEju5+d*PME2b=e+YGMx*8WP|A zXXnD5^&=MF{^uvlAHTQd*HI@Qsa^bOUPkok2luG%;N~Qp8bP|&%v)#PI^$UWwy&{v zKlf-Rv0Cg4lP#amx^quPpKZU{UwJ<~Wx$tl{hxjJXM0_*;rY`BPOEtJ(2w?rgU45- z9Z|ku*m1$GwN_8s`1J?HCqMkw{_;Qnyq%ced;OUw8toIJ7kx3~)QiRUeskJ>_o=y- zwUKWHz4*Wp`)A9aFE&kia`02{6x&0GzxHMI^A>gD&W-lOc?({;^{Kg=`kYu|KlkH= zJG;Di_x65&N7_T}2fi~M81?<;@-g;XD)u*4CvTcs_vt)a(#>7Y43AyE?aJ;g*4=-Z z2W(6lF+N=z7c2a6edSdpt^c6{2tFu=x%NdunI^CR@oj!h@ zdYCFbF=NFt{J#3el&tI(t23;_)Z_VKww~QpDlsQ(ZFbJ|jAg5ROik-p4{K(pB(2R_ zk+VTchjih@tduotv6YnMn5-4+R-~mYPsw&gx<^mR#i}{TNOa1&lr&Wuv8}@rv!`dQ zTfHhJYnW>7iit@{DcRV!Wr=CoDZ^~y1XtQ^S9YcErnqYB;)FJN6(_iM8mo&FT92nK zW@605$jIq4r^UrN9sd(Q8IY>N`QvykU&!y}ALC!=)6{RNcWHKMy9r|jn~)>iEp*j& zGYm4+8h$r)H?B8Un`%s_Otq#nraIGEQ@v@lIaCZ2!$n>cM58E*5#k(iiI^sC5O;`& z#AD(Q;w7=Gb&@sPT3{`*?zNU$%dH<-FWXpqS9?!;U;6<25c>%GSi9P8uv_hu?bGcs z_E>wIJ>H&RPqwGp)9jh{9DA;PlYOiG8GEh$OZzW&QbFV*-^2My>SgM?)RpR^>MC`$ zx<-9UU8_E$?xWwO->*NRuhU=A_cn|*L>N{YHXAAprwkViU5sOmR%5a;*Z7L@TVuLu zySdQ3$9%y2k@-h+PjQ&IRlHriC^n1VTYt84wu!deZAG^Iwnp14c5Dzg0oiUoU%;31 z6?{E^o^RkA`R~;~tH)^7+D*bXp;$O8m~?mQiggEcpX=u8&+6;-=k*QxOZpc5Wj$l? zGx{5YjUmPyV^>qA>7;4B`E_%q?FHK@+h?|~Z7sGdwqpBUdx^c$e$-xNKVz@6pS6>U zl8o&osSm0d4X;Vi6lwm_jMp}3mk4`>k-D9_9Q_;m5W{d|yfNEUVS3m6hUHyLv=}4C z;^;@=c=xka*eY#DZB@2vTMf2VYdd4Bvz@io+s@k>uy;+iCH5@)TlUZFWUw(4+lt|1 z`8YnF-_BR5pV9oJ8L5ra#%mL_$=bu(W7>g2jc`hst~;UsRWBHf1`)^jZo^B4%Z3TXtCUS%(m42wf!PFrU}mRMm~ri%NOy* z{87G&zo33x^SW-7KH4zXaK_Nh7-Ot4o-)=N&lu~BXN}3GRi?q_P3AmvzPSMBugF|% z-fLcE$+C18Q*rHOiaBDgxJk?t^F^yQ(z?T1Z#{2qur^v}+v03}?2aQ`YA>^w+beLS z$qvUMD=YamIHS?(7KE$Q>4zE< zjV~E{nC>wRH!n1QX)%ift+TC@ZF#mh`zrfW_Mh#f#Ee9`8g;Jvadori4~@Tepmw}= zi`Gx@7k(2a=?Zi&>R3ah;Won`hH<8B({rX;OHbTE4~eU7&)F3Axpt<=am~#@o>J5g zst>E{)t{?>R~s}1nq8WMnr}4*txbEI_ATvM?IrEs+CW?p`-B$ZvcTx#bn&_b-DMr4 z_tTf^%k<^ABgpl5)=+O~!MS3LR`W*lv*zE-FIlcwMBKAxYmRlh?Pc3yJ5!1=cjWzc zzLsC2E?1XoKG)=EgLHFs-|GbZWW)8iqx_6Lj5irmjO%b#t~CuZJ!C2~1(^pr&r5-& z&=Md9ia}z$m>?#L=fwuGQH-}HSd*>Ct&A<)rnTkTHsN^X+X`%jxX-?{y>0)BToaXe zeg*7MKdC;e{#!jvGhOqfrcN_nxKkLXo3DRC-=rU8SY{|Td}0`ByvJB=>}_Id@Js;f zgsXYApf;*Sb%Z)n9i?_Zo5|`_b(%U;orCkSNu8(8R~M)Y)kW%J^Q|;{j9jE zG^s-#>ec7f4eCa9llr21r6x=luH$ur&ZrZ05xVzvxyDV#JY&AGz*uN3GFBOx1eU2n zr5T}()V5wbt#?nVHcgwU&C%v+H)-><`Pu?)p|(g{tlg_E(Uxku_%CwdZlaH)@--7q!he(=FP|T1Ft(QGgIA1PQ@Hh@cWeg)kvp-~~Z2 z3Zf7pL<&(tv=Af23UNZbkRT)rsY05NiE=$x*d*i$`9gtEC={X8+AEX@r9zodE>sAW z!cn10sFv>WTH%aPhcc~RI4?8^jY5-fQD_z}Ij=B3oxd(X7pM!;1?xg|DqX0q?Xii} zMd_k-F}hgiwV13+)urh&bve3R-6mb0E+2P5p{@v}Un zU#UN;uhLiRYxJk|wfZy8yM~nFjkxkI>YL@#+~98rFa#Qc48evFgUS$U2s4Bmc!S_9 z6C({#hG;{KA=VIQh&Ln{k`1YbG()B#$B=8-WXLn*8ww1Ch9X?adkrOqQbU=c+)#m< z>Zqa0P;IC|ja7^Cug*a3*z<-4L!+U|aM93gxa7QlNo^8f3^WEwb&ARuY78@m8+oH( zG#W)?gfY?>WsFAY=_r#EQ17K0(~Oy@ecE2L#pJ5RLsgbJit9Ga9B$^#g4t*m%@O8E zbCfw6rFX13&Kz$}FejT+&1vRLbB;Opsxr95Txu>emzyihmFA=7Ds#2D#(c_LYd&MH zGoLlro6nmY%#G$I^F?#B`I5QCeA&!c{4D;K085}H$P#P`v8XJemM}}Wg|`S6qeZkt zSRyS^mS{_iCDsyWiMJ$Jk}av0G)tx>$C7K=WXZGSTS!S$WGS}nwUk&&EoGK+ONFJ< za@0~~skYQuPFZR#XDoG=vzB_xc}s(((b8nOXlb@wvb0z(TNu$#^cNlFRInH#s>Jr~ znMg4Tbrz{hgx25ig63)z9j04X_4UgRH^U5Ua`>Y7Mi7TY0NsHCjb$1nRFSYqT}S8f%TS zI!ez}Ynn9^C3UWKlQqwpkCs&-+E&GAU6r7HRfZN;g|*Up)LLb&w$@lrS!=CltaaA2 zu3E6kdePc!y<}~%UPd|WXY;oO*aB@qwqRR`O=SzUg`sTbZGz2c6KxT;NL!RG+7@Gr zMco;1ORy!|Qf+CrOj}O7(!1EU*H&UHwUyb*y|y+k+L~>b(Av3dW9)u*e|vyE&>mzD zwujhN=*5NE!|l9Xup8~7J;EMokFrO1+A1WY3?lj-B0rIz#?RpA@(cJSdfd{@OAuIwB#0RmTFQp8JZl;Mok{-)w!k(!8iSrFjE2(1)7ynlCj?njbZn&;nz$U9Rey?c*rnk85AjzNURg`+@co?Kj#BDAE7&TAn|Q zHszBj8%OEK>+Slf`Xv2I{muHVsM#y@q*NjIbv|mLGpLXIn>L$vn0_?%G?$n|EYDaz zw0vc`U|A$C6<-vM)??O<_WSJ&-e@VuZ9kIF`@6>%YLp2LEOEhaV z+cfuT4q?Cl(cFc64?)c$*CDmey1~8FejVlYw_D4)07|B@$X6s< zf^V4KHRYH$n75ffL22;4`497M%YBx8xND!Z9JkoTsp28)Ve3-ca$H~A(IP+wRh;yW zQZ;{?|B(NRABDQRR2`|Q(Y&r{LF?>*c9d{fIE{MhoZycd=Cm#Z<@-naaKjr0(KsFD z-xtOZ({Pi)WHWU)KW%;vrPAx>&&^XUGf=8LZF$ZzO`Iv76F(EHt>e%F`Ce-4b4&=X zt9$q-&>sDn7ttzQuKo$_pre{M(H2^v%|IQ#ReMPLy!JEg58CU5al&jN9d&Ana71`h z_)Pdq=&2ix7DAkEmF_m(1G?kL*Vm}Gg7l;Glk_*?%Xty-_0=6Fv+mQa5HLw zqbTh!8vJp*BG8J?MeF9U@iflo1>6T@dt(J!^&s*Ncx$i0tL=P<%t>Z$JQZI-fp>NjRIud=e zXzN^*C5NpqTR*Y>Wc9}#ztpzP_B472mu&vHC$;uz_H_FJ`^)Gb{6&sU1jp0@*P`DT z!>8j8JIue&xA241lhmu#Th;ffA5*`k{uq6gzM4^*NKKsPR?Xd-Bbrw<-)nknM`$N& z=WB1pS$j@<4p;tJv{k3zDCOurK+W!lT3keJor7Ar0<~`=j*iigj=C|_IN6wBe9rip z(a$s#eW!z_mrX5bZ(eWCFmE+KjQXY7Jkl}=wL-DwPs=QEySN|c^j9&`x&o!+ar6K< z+i2S?+gjV*woh&7MWU0#F%8n-Mi3vyKZJewmj8o~QEx^&`(5=9>fsu@=6=mFT2hmp|$!8fF{rGCXAnK^eyC*$wvPsc@FjWtJu5u zH6Lrf(EO%}mU{DX7|*yx`?YoldX?{@e-o#B4dwGi-5`Ai`UQVttfnX0^<{=J#))V} zZ!rF0++Zp)?ZMcW1|vq7&Hku6_K4lAgV17|XFX?)L@%}{+ARi*TJ6Eu!V~uANIvp7 z>GxLm@+Ew@TC09peOld3Q=%D*{@n~=1>}4`xY;7?{qc&R( zS#08b@keZ<#5xuuHy_#39Q#(}*tbHA*ohdgyGgS_6QF%aJ6_lzd@Ib;J*Mlg-;Vlk zn&BA6PSzX0Hoj~+XS!q>U^bu~pM$cp!Tg;$60PbrmR*)3mX|H(EWe=4{KujYyNW%< z-lADtAl@XVh^sM*vQ4~0+$}yOJ|_y+Bx}x^e`lE#u=xigow~X%A3c_^Z7K|@`F7(pbbtiH3&+ER@1?bKC zM2xOHq<>ofre5w%x%Y#X8E-b;ZhYAIj`50df@z6q7g`m)%y#o;^AU4@%LvO<%L|q_ z(Z*jd9u~h92U|bH$WNfHzq9T7q3tVMJnFNX?G5%udlN>XnsLoAl^oNEeL2dX;NL=v zu#pc|4^itdjxYmbjN8=@tG`zNrJjb`zY14Uou(13z>At@v;$i-mo;r$fJ?Nyv?sL| zVUjRcSSTz*ulfn;3TqNL-8QKQOZui)_e0zF4X+r|j2n%8Q0w1}EAN2mMbrDHCe(Pr z<`A>W9BMvfIgDP-C#c)EbkNsm-vhwT)u2rSWbjGqwqplIfr(H^WBGFFQ7nx^mbymC zj$%Br+FoNng;Cfu(zs+jt^xOB*kZqo@k0gEEY+!gygwhn2Vz_=m=EDqD1*ZIaGvJ{ z-pGr51Ru#q@zH2M#7g6g34Ai2%BS&}d=8(>Z$gVSAET^=(um+*+&i9I@8_j)*e3oW z-^^b^OXM=zvVLlRb$~ih9i$Fchp1KRP<5C({QvjSp=R|ZT=|#L_VLsBYXUTZnjlTE zCPbssglfVx;TXLYG)9f6i9ow3N)xS#(Zr&C6pu08Wb{AMG@0mu}1pO+9)g4ehQ-M%(t@>#TnggKk&(zO!ok?ZVnXWfI zVSdW|GI|DYp>OcB`AzXtv>ugKHQL8>tV=Lzxyrf^ecLm(KWshhz3s#7qwM2R$0`_q z^b^8)4SJwA@N@Wud?NareQ?%C;cT06rl+BA880LX%h5l`Mj5?X*e>i8b_;uihlEFk z$IxFmCY-={dTW2GzQeILUKjO08q4|`BUjb`fia~U%}dPJS_WC}Mfp0`I@6}Je<#(q z3h6sx{nU4>b2MSv8&InMj$Ysk#y3qrnmKX2HO!`$>O+Ta0iUi`p!U6AsMeYE0}byO zelhkl8_`3()wWR32-qBpEP1i0E6d12Hpnln*=M5hl@{Qy1YIM>?W_K9sD zS*OOaP7->h7t}{I*;<3}w{TLo4Yh9%)KK@B=9>pvKDRt7E=L{ylkFL@PMu?&^}Gdj z3?Ax!FXT_VXc02`movhQ~SZ60c3%&Dmng_K@gyHCE zKcQc3(4uW}!nD~u8GXDr#a-69w*K}{$U4mm>04s^P~MNyT+kfRW(x-0-@23fZH9x! z)255&UZTc2&z4KHc-k8Z`$7Ii^%ojHVS!Gkzr(N$>FzOKwA6}&Y=7Dq{Ng1Hnwk6< z^=8c?ZKY7C`$GSMp%2CxCR>JxIo2540g{%=PulJl{zLWC7&llcOw|q3|EvGlaNKye zDZ_ldWu(YiKeIk>yU)(>e$uzSviO89vY4Lgm8@x+fD(fKwl+GLJ%RKO>zpVQo{ z{a$!YHxO5Ev?Txi{4OC{PmKl12{ z3E}PPxtd3`tAz~RcKsZ~Y-5aRIa=}|;%nl&)7j?W0e1KkJlwt$sd6|Mu#?!>C$+!x)T=E;M8r?l3%J zc;4_Ho`Cf-PBN}FmK#4YUSpbQT3}jZy501!>9FYq^qZCDNoF3;h;s0J=Qqn>aT-QD z?-yUjIOiZdQ@P2SjwdEhSesDiW!rXR%-{>;EYW@o#!93y1N@S+0d2XS`~=>{C-7VO zH~8=P|MnWLeyZ`m-D@bz_Z@AuAYK%6`#WgBFseBkuA@|^E9B-Y7v-}QP zcctjDGAbpLDxF_XJF@^LPp+?26@jsa^(~TXB$L zrdF@h>rv$F)Eo3hy@}o3qDw}A5o81#Ax4-HZbZ-nMH$gXj3FDUp&J(&jm9QVxYy;i zU}n}+3q%~ijVA18M2Dk!*T;w8^p85r>MS zgbsF#^L;S5F7RdNDE!^Hj-sX(Z0q(BuP$J5d4@JcBig?{DN@2j6s4%QaTaY>~e zC42Vpk$oek5og333C0K`(MU3qjdA^hl_IHq@9nJ`9fZ3>tE&^Y8LH$})U>_H0TeVQvP}hyF$3aQ@ac17>e%3uipRS){ z1bKWQ!n-|Vy>HTcUu(`VPq#Yz#`qTdZi$s@I!Y?%-}IlCmYvsw?7Kc_lD7x zbr|Ql-E)`cKG?Li;56@mz>h@v@emB|bA0+w&~k*C15Gbn?nHEe*}u9Tqruu|&7zK& z?GcAz@Hu! zy-9gjS)=Y&W3-XlXb|{7*JbFwmZ9+4tM@fdKEz3S21=n1z3JwQ=19~uPT$?W<50*p zgI9DCe*6pMt+MWTn)+Y?6>P4uO_>UUH%xn2JJmHG-r^R}pEUhbeK=}|)6hcofV*n) zUJoNZ-g?T4^Znh|KQ_l+FLC~#*GZmCt$86D!_!eUM4`0^gqQkA&CoWYA})2g-1oXq zKxx^ck2T&kOxAy2?_%%qF!^7bH=}Kx;(OLNG=iJtH{ zbuOyC8JqMEbhTz%pIVu|*L-8pAWJF!pXZV1 z!0+D$5_gUBD`zsA2e0}J-9?FZl?sIiQ6ZeVxYIF~yjn#(X zxgQP7b*$QP=6m3E&sisd!*-0F1$!gqbP|}~-FJ6BlNBCAxQd_?08x{K|n2Y~sOymt->G;wy)7cN@=2Z1_ z^#-j26<(-&F52v8^^1(pjcm`+=+%xf?}wKvu}*;%*5OoG2c`b+k$@g48Lep=+)@^b zpgc7MZDb_cT^UVa92)3Eu*($QR|fjEA~Vm*@?$QQokSg#EZ^u@;=D@v3r}FT_N6OC z4>eBlP4-u;u_9jKfxqc8oB+1DCwb@iPL1~48)-OaxH zeV6l%h31JApXq;mbJZnknYvEhjQ00a?xybANhqEt zXxD4^aV~|rhPWY=)_fU_4DyVyoCEM;)N9M}am&v@bQkI{6H4?VHdP*WpX) zf>wB_dny{>689_a|GGP%-OJV&>8tdo^jA1-jy5_Q-HcG9C%XE6a8tJD;cDZ0Vm7$+A%8mecp%HT*jA*o3d+@rZz;n2 zYo};7ll@(l^&)!OO~Uzd5jeX=-_l@oF{QXbshl<4`8Q-^|NOjY$*-!}zr!}`CI(6g z^s}3OV&xyz?gD6|E`XngBkj?rd0#ePH#=F~tRa@mIu~vFwbl%4iM7Idn(FKg>usSc zFXe=-#bLJ7*Wj~NHd0EUw3FpzD?e<#hpqB((65d~d2p+8pR!t6kH_WTN;mLe7wVaD zxK(ZfQ(LS)1~=Y7XVeGRjmZskC|`-}`m5_Imwoc&ealnvLw<>w=%xM0$_>U%)S6Ej z1;R}>Z@-hF`3GDIIo2K4dvpg&@YD@NQC;S*DI?&aU+UjN5p?@3a3HJ>4 zIAI&^q?X$LBW{P0)@W;wZ?8|v2^5^2UH7CM!Y6Z%Uicl%@KNIl?^-zL{^pr@BbJ&g zV4ruv1q`sxv?f|N!UwFyd$@a$91*uxq@6 zK&g&DLS=_M^00M4m*NFI8NBgs=Q?Vi9_lc37mumi)E?UD+U@v_9?{;@?r^Q-j{4Gd zJ$;p_U##D#2OB-WKx4s@r*PBWX)FLY+hA-p_87-{PQ?W?*)zlQ0KHt5=Vj09p7+qu zHG(#F_4f6S@xF@tFU}fEZ@12R)q22;#V>SU?!J~je7Z5q*a*Iv z2PaU4&v!5QaESL*Z>)DTUZRhv5_?*Gth=oH#M$);zL?|i!Q2jaRf9_<0R6iQXL7v# z9!mplJEo`yZFQRMAF0A|Tn>x%CXVT2>94<5`)Oxsm+tQ#J_7CPgEo97+VE}YR!*mf zUjW8+l;JQY8nfxWqd{y|pj$Z6dooU^GCWJ$ya8soso(=m!NFQ&uI8+*Hx)cUS$KP1 zL^%#aovJNnM=Si?C1yd6}Y^=(tSMZYrqnVIpJS6-lq@mfQIrs`uWYCXm3}u zFHd_U&-!-Ife*~RW)Ew;HHjN-F>B&od;`bRo6X_$dIgW_F(|)Wv6Eu|7JEnR6DU#+ zse+WG`as)GM@`z+`-Q6*%i!sB&6xunGNeW1#RK zcXk-KV459YgA5wZF`#Al!X3Vfvc9V_2*&FYweVDI}Of$J_@3X$ud|+e|-ZA zxoG_&IJ~dXJa6RO2=?^!oP>t0;2`JUb9R_5DLUc&^L<}v>{)o{&X2t?c5>_$_`QMz z|5=V!`(XFm@1>vJ;rNJMzZ<>m(WqsQK`Yw}rR>S*WSyvFecX-1Krlw5kNw7-ceu*f zLevp|fu&ub4&3iT{vT7Z4S(+`ICNLjk)KXw_$t2k@vvwgvofy3H5TZd0P6ao_cZew zvjSXU7+7ho)z>%0w*s$}my8O~KrV%U;w%z!v7Cf{k%sjZWey91C}I%bdvRyq4^Rc+V^Wr`r2~ImJ@gWgEec{eS;K zd8$Ko9tHP4S54E-<$nFZy;{#P&h_---ISR#QKj}4*s%ZaKQ7+{3a%<0IR_rprjl53 z6d21~Bh90Ek2YU5=UJEfwW$R`0!!{KFP2Xx3BZZg-v^RA3_gD}>d{H8j?3|0--9Rb zaebyS6dmmQ-cobAH4Me&c5vVzfsZ~W&u|P!kF!Hrg*wrNll%;D)xUh}ea|ohtc7R} zy?77&&pZhK#6q-yCbh&H>LM+}Ws$kEMK3Y_?zzPKDyqrpRM)8}hb++kNE_{!q{<+n z+wTD0{{iVOFY-;7gZJMJ-hUPK$ORy~y6b5awhK`9<>G{#N`A{QZtHw+4Cx?uTjMyx zUIZ5o61>%=vdi&?W0CVrk|avi>9}D&LI*kqg=v+M>T!@OGT-!pSuV3xqxC_8=MR^6 z$eo?9IUi6as)Nw*1h~WX7^;FKFD#r@;485|XJe4Sr_1GPN4>L23BvV~;J!$|!noNp z2c6+M>tDVP?Y~D7v8_Y>O78CLkCt}0cCqVf_e1(B<9W|ppxE5SoMWBE?>&Xpd7g8M za=m&N>VYTRFX_9D^`5(72?klat!I5WpxBF@#qS-DW_6FF$~jw^pbo}``LTPS9_ks0 zigTnj*>^4bt+Q0aU!NsEMBd2@%2xH?S_hEa9DNq5irxC5RTAHD@;?XP>`e8UiAym6 zpN&e*agX@~{_wr*{%)|x@hHu2u{Q=0=91m=H7m^-dovEB zWk2l0i1UA+@2fvns9Ycqe1Lw|UpFlfz64pVwG3+P>2gJBz(_urFjW zDLiZB^;C~pt%)A}oTHqxozI{apUD}&L6ZtP3mkn1CuCTAN(9M6k-sn{Vj)_RM~w?S z6X=5P0a;j0r&34vbc}hjIoNcVXPLvOjZTEkIES==G~Z=t>}LAb+k3FMvlLD(947a} z^)ngVXE9Fhx9Nlbf?N2Xa5^#0v+&)oqXYggtXIDBf-+bgiRbV(7>IT1zkZ{{kyBg_ z(Cl;YNqp|==+8^XUVx5hs&SR^tf98%clk9gH=8vFwlja9v9Eouy_zW>@)GR$4TD<$B!Q7jQDzNkT7?-qRhI z)m-@SP&9m#m21dNnhi6)N%>6ast#Aj;&FYPn|GIbD|Ol$7_MzvC)atdDX#1Bs8$?S z9!v*gAjs0i^q`O9YEDrb=WUK&a7XMb%*9Te;cb@XoKS4KImrEZ+}tz;v+ z()Rn(u6N#ouk7JgSJ`IgKBwcL>j`L0i|J&gkPtE5cbm<7G0h-`?;j|ykb9~9$f!uC_Q_IbkZ*Jc zIw(64Yl*r74r{IYEZGeIq8Hq$enLO^HR*OewSgcDaoW||b=r;EEu^WHyPD8bHCb@ZkHLs*Lzr(zj z1h5t6Dsv6lF&oLHc%MYNFTmR^>nu`WCR=GBLAi7bh1Ps>UN&3Tke~GcnytruYw@Rj zjH=mos71uaf?FlVUO<9g1$k?zR~SOk#g1H)f=Elbm>T~UJeK$1u3UoewVE81cc~(e za&)DO9f6NG-+4KCZ%@*%cGZsM3H5Oea1CcitVegW*EPapcvpZM5PQ=voF)-o$XoX8SL$4 zF2mE7ERqVhSzlR0e41~9?>aPIe<#!CHTvn)*vnCiFT(}9nWT_U{8(087b#Dyzfjz* zKJK_AlJh2!Ky!mU69?;qJl|FH-0OK3pUAsmk&kx_atsmf;$ zsfk+o9Opj`(Ms&*L85IH%=UWdaV-bmdC2Y;HumcDr(qG6&E3Vr3cY;{|%L zt)!D3NB-6m?p<^m0buG0bkGl@Jl+Gtat6C%K03Z#hKH)}4ltHVu(x+n?0$^b@&fOL zc)Y$uuWgcOluuv!u(gWL`g0U^$Kc$K@}2Ly2-bcHtGk}{-IIi%gxE{T@B1eR+?)8f zQSwkDb`f<(Ki)x+JQ%nAFcRPX;ppS^@?3^HC*rNWgXi#&^B;68M^o)4DCa6yaSPn3 z>?S*}8%YPTI1xTm_ka|IlAbpi_2gD^$99QxF3L5U6tRhD$FAXoyA1`|Y?qy0@R@55 z`YbOwenZhgj3MRg89ZDckkfUsUZmHf)%k=oJ<@w7i2)aq$?`XHt7duc@h(JJ{V16? z73iy9;yif6`wrE_r{3LE6g|vA<`8aVAL&!)bLu~8zJOk&D;ZG(aipG!BXumQ(k%S= zvv^ty$$0wG>g4MV(~^QVssd+2IDJC0-v^k|MNIx2DZeUra~w}T-V(>v&V|lp^w3>M zHT;au_ylz;ZjnZ`3OigwNL)z)Mfi_f)34O;0z;jO%Oan|m0g@_cFszS={6^Ti)PT1 zv-WxPV_Ch^@%}GxtadzydvX`+vzxQ8(*b@niRXSRJN8r*?;qpP&2}$F^HAr0&)o;5 z$ba+}V~yvZ+cP8ZP!_h#R{DcgHvnPQPJu!Xb!W3qT|)&5S3+;G=r?rYp98Y@9XhI&SM z#*<{$?CDNY&TC!=2sJ;apo_pT)}ZWIM`Fzjr2V`~a>N$rR(8t!&i~+|+(2$kGg{a? z)fatIvH$l5JFevWe?f&2j^}rp^ET&V=TmsJ?mNG&`O_wM#+VbHq=fQO> z=f13f@u#pHo^36=j3hzPY58H5HC9lTIR+`>POzR0*A68E0%2x7a39Jh!9PX(9(8fDC{zo@Er? ze-&p(Joj1>Oi3!*$V@83T<(=Zandd)MYaM3RW<6VI=bQpoct{?62YL75xDnb#Q7SJ zXFmx%G8M0WCcIrPNnwTLdDzZ|GI*dWajMpFCpDl`YjFmMgs=$Q4l$^HO;W)|P*sf+ zXK5xb5?cjaNV-r7Dw{GgL8{1m+ypwj9Ytgl&XNFlnlSkAC{%1JnU3+~IVQo3r&8l& zqQ=feM^{MZR0)2FG88&ha3!1Q+PA}(Hwh0)5c!PZWHUzNf!3*W63AmrCKoh~H=e~k znFk+QME2rRau>^Cc{Y%@Sj(BWlWM1#+AfHE#c)!YqRCa%>5me~Q%ojHF^wF>EHV`H zNNOq~J8>y0)pBa&4P;u?l9jj!YjL~m#OczE%OwbhOE~V9XcT!m9*P9~6Uiuk)5ws@!ZVSFU!n+R zXemC4ay$|n@DbO7%kIP%(M&}egdZZDyp3ow5p}q_1Ts{TJt?FhW{`lGLjr99b!svA zMJb7gm82ckkaTFD`i-0|67^~b%F{@gF&X`J9Q;@!NroxZgBhSeIaGuNBo`KQ7nITs zRgzX%LsDTqynQ36utYjx2y9@a$tEog6}43zPdA#|CV^*|$8+1jvubjO(A(=gnJloE zBA&!1oRC}cTT?$nMBZn#NI~^$F_J~nY=%J8{mRK=k>yz~v~V>dzjLQZ?v%))4Qj<gMTT;t$lVMQj!HlFq>JR?M3JwVCXzLCM5bnuNYgCE zUr^4|sb)phi|ov1k$M{<@-m}EQl{T!m5c)+r8O5bPo%i|y*uT?M^z)9rQbW{cTM^I zQnGML`5ien51Q8M!twiW{LY(7k(OO+*a%^x@Y(nsHgbG zGF&8G$|AGd@8wD%p*O8HHMu}!SuPcsU6mrgvQ{KlHi8#7n?WM2GE(GJ>LQ^sQDjr5 ziB!rQkw;l1k|;|>2Bp1j!=y<5Naz2pwqm$P^0_k_d?UCC>g+qWGEC@Yf;r#QIm@d# zw_`Y?^Erll;_A?Mm}F7ULzUWM#E@m0&;1xd3Q{(Cp><>xn`B4N1B-1jV_^95 z;rJS?2yVV?Zn-*NF!xwGcUHCkeyRx*+Rzy8qI~Y1204P8B^%bD&Jj! z&*xNcP$D?DvpJ*d)L;}$>G*N0NwAHfp3DbXXdu0Ngy692+`&`>>7b|8^vW@yGx<2T z8jJ}1x7pn3b#Uw^9*%irnzn%8k0Z^mf;`a(+(y|r73!>D?(1~!=4$TY81CGB?pKnw z>;2C-g8MU@yRr_Xz~l~`$9>lV{x*(#tb#i$g8M0(yQoeH=C(=a9H{0NiD7@|vzHq{ z?MJXjm$M&(*?Z~ivugHK4Eraaz0yF=;s~5*%USQiq{pUHiBwZ{#IQc|S&I$c2%Ll2 ztgJdSn6;BGRz%AIYa)V5A)Bf|tc%9qZ(TI};+1inSj#cryDsMa(N+eSz;V)Yw&CHe zjo)ZhMEaj^@b7R|{Ngnsx4|_L=l{%ywMu`72x2ak*(*K#;RDUA3!LSn8(bSH#|s?h@Xn3j5Y@~L6396ybD2OOTdxKK)oz!p?{!RQks$X<+rbu`JW7{OG)ap=R+NgK^(;zvID^7F(j ztuh#yDp;gVupHarW}4uW0$?4(nDG~dvtNbzh=;F9f@4aB1IdKB$%SnygbgXdhgb&h zR0S`x360Bk7^o%~k^tuPg~8B9!8)oiJn^tnNw6lVa6OsuQ@QXbg|I#)FjZwRDOK=4 zo8YXr!>KgE1O<@P7>52hiiAcL9xEQ6B?)#Y6-Fx)#w8b?s1R^z zB5n;?1@&YVG%`s>VkTV(^XMX(MJF?dE)KO>B5JA>RK^)(z2z`rr+~z_Viahl%*Ck0 zi&29Uv>w+{qhDuP6D|b>a1byBBZLVUk?4_SCSSyneUQk+ixj3^WH9L>2On_(YPDi! z)0Hx5tP)Rg4LZzv(g7NoWFaxdB7_MRkxZ|UnQI!yOw&a4f+;B5GMHnUgX6b=`K86o zE-l4VUWp&MhLojxp%QFn4n+_%D8g~oMKg^+XBt5QimYVjO{6hvB1_D=D?po7%yfxT z{7#ijm8fB&L_PYsAoY|z&%q7*CNt(bs(qv|lrZG(`i+O2z%pWa6ZLk!* zK{*LC8*p3Ik~g#ym3cGMwu0LAD}M_8N;El|I`hF3YzHmQ_B67YvzXDG$L!1^6n{(2 zQnQ?SnH!juS&QF&Cz|JG=3@pi8#A1_n97HM3BQ=Ix&=UW&h> z5+!5}%F%jKo*S8PDKXtLgvpkXs0L*wTE^j=Ph^s13J#SFCJg4FBrITZWihJBQs!eH z@spq)RUb8>^rt@u^2I5zoHL+`6QGX$-(=_emYyI5^ZnbEIy{S%-eUez*`XAV8NH@UH=`9s=R<@r3hpT!1HO>Ie@5bH&Yfb?% zu^m|1R3HBIjiqAZcBPoLT?=AT-#TAANKDp_6f?DTba`>DbF|Y$!gmfj`n=Yu*>+Z1 zIbBFKeMmhz`kk%Qu|vdM>}W9&J5J2PPDaz0LKl)rACf2Ld=`rtedXLLm2@F?BF#=Q z)etsK%Hd=CHNN3u3ZE?I?>^NdXZv= zo{nxXjvge5E+h>FVFo=&E?r2Ga2l4PqA#Zhsiq5&A|s@6_If@7y>ENxdmw6Ozta!cA$ z%oqs=iHyWgqvC)_0EtY*Pm_wHCJQ7o2R}_Aj+&*Y-EEg>6%L46bno@}X_|1<1mR!{ z!A}!~qee#yABUeN2}eyD97+a$np_+;MX)Hv_-X8X0X|1gAA!M-Xm^hNEa%v-QAM>Q z?tc!%y$#&xczOFsP3?Dj`<>o?r~hiYfg`CAI3jg?aUZGRPl=%<9nv*!yMCi#l;hx& zlVOuH;F9xTl8fPy%VCjg;E;F1AWQJa;jqUt+;IZTaSFU~7OZgrobgf^<4XAATG--7 zxZ)s~;z)R69hNu|jyMg5I0t^Xh*W`6Jd+zp6sV`~Y$i=0ge-w*k_6)5gOky*WZm<4jVlIe(`dqVcB>N}MMT=Q;cA9Vk4(oB$gc}#9?jExJux&B;v46r6$bc^W@;L zE~F-0%4aIYVO@pCx)xncJr3(8Jk~+zYC>A48|vt4;$Wzf;Hc8j)nvd><-$=Fp{pr| zp(=x;+JLU628L=o991*A8VQCf42~)qU5yMw6%R+1jIJgHhAI=BC=YhJz$!*pvlND^ z0*?vRRcJY(6Je-Q;i$53pXRjANnVN%wG@V`3Y@4G zM`}G>brU#I5NQh`aMe+;)jBTKIJoL0*y=R=su^(Ax!^=aI9H3|s>@)jH{fBdfvesQ zPSk9>S!?=;>KeSy#QSqbM8|@g)h@Y1V64F;_D%$b!KO#Qf-r2QxcIm%OI#6W$ zIdEjMf!YzCcC7Yrxbon`P6>-RoQyEJugC%a@p{0v6DY-rXv79 zPH-#V5rHcws+ILn@#dK5Vl#0O7s7N@wQ?MFcyL +# + +from __future__ import print_function +from unicorn import * +from unicorn.ppc_const import * + + +# code to be emulated +PPC_CODE = b"\x7F\x46\x1A\x14" # add r26, r6, r3 +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test PPC +def test_ppc(): + print("Emulate PPC code") + try: + # Initialize emulator in PPC EB mode + mu = Uc(UC_ARCH_PPC, UC_MODE_PPC32 | UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, PPC_CODE) + + # initialize machine registers + mu.reg_write(UC_PPC_REG_3, 0x1234) + mu.reg_write(UC_PPC_REG_6, 0x6789) + mu.reg_write(UC_PPC_REG_26, 0x5555) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(PPC_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r26 = mu.reg_read(UC_PPC_REG_26) + print(">>> r26 = 0x%x" % r26) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_ppc() + diff --git a/bindings/python/sample_riscv.py b/bindings/python/sample_riscv.py new file mode 100755 index 00000000..3bd032bd --- /dev/null +++ b/bindings/python/sample_riscv.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# Sample code for RISCV of Unicorn. Nguyen Anh Quynh +# + +from __future__ import print_function +from unicorn import * +from unicorn.riscv_const import * + + +''' +$ cstool riscv64 1305100093850502 + 0 13 05 10 00 addi a0, zero, 1 + 4 93 85 05 02 addi a1, a1, 0x20 +''' +RISCV_CODE = b"\x13\x05\x10\x00\x93\x85\x05\x02" + +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test RISCV +def test_riscv(): + print("Emulate RISCV code") + try: + # Initialize emulator in RISCV32 mode + mu = Uc(UC_ARCH_RISCV, UC_MODE_RISCV32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, RISCV_CODE) + + # initialize machine registers + mu.reg_write(UC_RISCV_REG_A0, 0x1234) + mu.reg_write(UC_RISCV_REG_A1, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(RISCV_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + a0 = mu.reg_read(UC_RISCV_REG_A0) + a1 = mu.reg_read(UC_RISCV_REG_A1) + print(">>> A0 = 0x%x" %a0) + print(">>> A1 = 0x%x" %a1) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_riscv() + diff --git a/bindings/python/sample_x86.py b/bindings/python/sample_x86.py index 25d947c1..8d12e549 100755 --- a/bindings/python/sample_x86.py +++ b/bindings/python/sample_x86.py @@ -16,6 +16,7 @@ X86_CODE64 = b"\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\ X86_CODE32_INOUT = b"\x41\xE4\x3F\x4a\xE6\x46\x43" # INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx X86_CODE64_SYSCALL = b'\x0f\x05' # SYSCALL X86_CODE16 = b'\x00\x00' # add byte ptr [bx + si], al +X86_MMIO_CODE = b"\x89\x0d\x04\x00\x02\x00\x8b\x0d\x04\x00\x02\x00" # mov [0x20004], ecx; mov ecx, [0x20004] # memory address where emulation starts ADDRESS = 0x1000000 @@ -386,7 +387,6 @@ def test_i386_loop(): except UcError as e: print("ERROR: %s" % e) - # Test X86 32 bit with IN/OUT instruction def test_i386_inout(): print("Emulate i386 code with IN/OUT instructions") @@ -464,6 +464,9 @@ def test_i386_context_save(): print(">>> Unpickling CPU context") saved_context = pickle.loads(pickled_saved_context) + print(">>> Modifying some register.") + saved_context.reg_write(UC_X86_REG_EAX, 0xc8c8) + print(">>> CPU context restored. Below is the CPU context") mu.context_restore(saved_context) print(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) @@ -628,6 +631,38 @@ def test_x86_16(): except UcError as e: print("ERROR: %s" % e) +def mmio_read_cb(uc, offset, size, data): + print(f">>> Read IO memory at offset {hex(offset)} with {hex(size)} bytes and return 0x19260817") + + return 0x19260817 + +def mmio_write_cb(uc, offset, size, value, data): + print(f">>> Write value {hex(value)} to IO memory at offset {hex(offset)} with {hex(size)} bytes") + +def test_i386_mmio(): + print("Test i386 IO memory") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 8KB memory for this emulation and write the code + mu.mem_map(0x10000, 0x8000) + mu.mem_write(0x10000, X86_MMIO_CODE) + + # map the IO memory + mu.mmio_map(0x20000, 0x4000, mmio_read_cb, None, mmio_write_cb, None) + + # prepare registers. + mu.reg_write(UC_X86_REG_ECX, 0xdeadbeef) + + # emulate machine code in infinite time + mu.emu_start(0x10000, 0x10000 + len(X86_MMIO_CODE)) + + # now print out some registers + print(f">>> Emulation done. ECX={hex(mu.reg_read(UC_X86_REG_ECX))}") + + except UcError as e: + print("ERROR: %s" % e) if __name__ == '__main__': test_x86_16() @@ -651,3 +686,5 @@ if __name__ == '__main__': test_x86_64() print("=" * 35) test_x86_64_syscall() + print("=" * 35) + test_i386_mmio() diff --git a/bindings/python/setup.py b/bindings/python/setup.py index 836dab80..cc4dcc6c 100755 --- a/bindings/python/setup.py +++ b/bindings/python/setup.py @@ -15,7 +15,6 @@ from distutils.util import get_platform from distutils.command.build import build from distutils.command.sdist import sdist from setuptools.command.bdist_egg import bdist_egg -from setuptools.command.develop import develop SYSTEM = sys.platform @@ -23,36 +22,18 @@ SYSTEM = sys.platform IS_64BITS = platform.architecture()[0] == '64bit' # are we building from the repository or from a source distribution? -ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(os.path.realpath(__file__)) LIBS_DIR = os.path.join(ROOT_DIR, 'unicorn', 'lib') HEADERS_DIR = os.path.join(ROOT_DIR, 'unicorn', 'include') SRC_DIR = os.path.join(ROOT_DIR, 'src') -BUILD_DIR = SRC_DIR if os.path.exists(SRC_DIR) else os.path.join(ROOT_DIR, '../..') +UC_DIR = os.path.join(ROOT_DIR, '../..') +BUILD_DIR = os.path.join(UC_DIR, 'build') -# Parse version from pkgconfig.mk VERSION_DATA = {} -with open(os.path.join(BUILD_DIR, 'pkgconfig.mk')) as fp: - lines = fp.readlines() - for line in lines: - line = line.strip() - if len(line) == 0: - continue - if line.startswith('#'): - continue - if '=' not in line: - continue - - k, v = line.split('=', 1) - k = k.strip() - v = v.strip() - if len(k) == 0 or len(v) == 0: - continue - VERSION_DATA[k] = v - -if 'PKG_MAJOR' not in VERSION_DATA or \ - 'PKG_MINOR' not in VERSION_DATA or \ - 'PKG_EXTRA' not in VERSION_DATA: - raise Exception("Malformed pkgconfig.mk") +VERSION_DATA['PKG_MAJOR'] = "2" +VERSION_DATA['PKG_MINOR'] = "0" +VERSION_DATA['PKG_EXTRA'] = "0" +# VERSION_DATA['PKG_TAG'] = "0" if 'PKG_TAG' in VERSION_DATA: VERSION = '{PKG_MAJOR}.{PKG_MINOR}.{PKG_EXTRA}.{PKG_TAG}'.format(**VERSION_DATA) @@ -63,12 +44,9 @@ if SYSTEM == 'darwin': LIBRARY_FILE = "libunicorn.dylib" MAC_LIBRARY_FILE = "libunicorn*.dylib" STATIC_LIBRARY_FILE = None -elif SYSTEM == 'win32': +elif SYSTEM in ('win32', 'cygwin'): LIBRARY_FILE = "unicorn.dll" STATIC_LIBRARY_FILE = "unicorn.lib" -elif SYSTEM == 'cygwin': - LIBRARY_FILE = "cygunicorn.dll" - STATIC_LIBRARY_FILE = None else: LIBRARY_FILE = "libunicorn.so" STATIC_LIBRARY_FILE = None @@ -84,7 +62,6 @@ def copy_sources(): """ src = [] - os.system('make -C %s clean' % os.path.join(ROOT_DIR, '../..')) shutil.rmtree(SRC_DIR, ignore_errors=True) os.mkdir(SRC_DIR) @@ -108,9 +85,8 @@ def copy_sources(): src.extend(glob.glob(os.path.join(ROOT_DIR, "../../README.md"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../*.TXT"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../RELEASE_NOTES"))) - src.extend(glob.glob(os.path.join(ROOT_DIR, "../../make.sh"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../cmake.sh"))) src.extend(glob.glob(os.path.join(ROOT_DIR, "../../CMakeLists.txt"))) - src.extend(glob.glob(os.path.join(ROOT_DIR, "../../pkgconfig.mk"))) for filename in src: outpath = os.path.join(SRC_DIR, os.path.basename(filename)) @@ -130,7 +106,7 @@ def build_libraries(): os.mkdir(LIBS_DIR) # copy public headers - shutil.copytree(os.path.join(BUILD_DIR, 'include', 'unicorn'), os.path.join(HEADERS_DIR, 'unicorn')) + shutil.copytree(os.path.join(UC_DIR, 'include', 'unicorn'), os.path.join(HEADERS_DIR, 'unicorn')) # check if a prebuilt library exists # if so, use it instead of building @@ -141,35 +117,37 @@ def build_libraries(): return # otherwise, build!! - os.chdir(BUILD_DIR) + os.chdir(UC_DIR) try: - subprocess.check_call(['msbuild', '-ver']) + subprocess.check_call(['msbuild', '/help']) except: has_msbuild = False else: has_msbuild = True if has_msbuild and SYSTEM == 'win32': - if platform.architecture()[0] == '32bit': - plat = 'Win32' - elif 'win32' in sys.argv: - plat = 'Win32' - else: - plat = 'x64' - + plat = 'Win32' if platform.architecture()[0] == '32bit' else 'x64' conf = 'Debug' if os.getenv('DEBUG', '') else 'Release' - subprocess.call(['msbuild', 'unicorn.sln', '-m', '-p:Platform=' + plat, '-p:Configuration=' + conf], cwd=os.path.join(BUILD_DIR, 'msvc')) + subprocess.call(['msbuild', '-m', '-p:Platform=' + plat, '-p:Configuration=' + conf], cwd=os.path.join(UC_DIR, 'msvc')) - obj_dir = os.path.join(BUILD_DIR, 'msvc', plat, conf) + obj_dir = os.path.join(UC_DIR, 'msvc', plat, conf) shutil.copy(os.path.join(obj_dir, LIBRARY_FILE), LIBS_DIR) shutil.copy(os.path.join(obj_dir, STATIC_LIBRARY_FILE), LIBS_DIR) else: # platform description refs at https://docs.python.org/2/library/sys.html#sys.platform new_env = dict(os.environ) new_env['UNICORN_BUILD_CORE_ONLY'] = 'yes' - cmd = ['sh', './make.sh'] - if SYSTEM == "win32": + if not os.path.exists(BUILD_DIR): + os.mkdir(BUILD_DIR) + os.chdir(BUILD_DIR) + cmd = ['sh', '../cmake.sh'] + if SYSTEM == "cygwin": + if IS_64BITS: + cmd.append('cygwin-mingw64') + else: + cmd.append('cygwin-mingw32') + elif SYSTEM == "win32": if IS_64BITS: cmd.append('cross-win64') else: @@ -196,6 +174,7 @@ def build_libraries(): sys.exit(1) os.chdir(cwd) + class custom_sdist(sdist): def run(self): clean_bins() @@ -211,12 +190,6 @@ class custom_build(build): build_libraries() return build.run(self) -class custom_develop(develop): - def run(self): - log.info("Building C extensions") - build_libraries() - return develop.run(self) - class custom_bdist_egg(bdist_egg): def run(self): self.run_command('build') @@ -225,6 +198,10 @@ class custom_bdist_egg(bdist_egg): def dummy_src(): return [] +cmdclass = {} +cmdclass['build'] = custom_build +cmdclass['sdist'] = custom_sdist +cmdclass['bdist_egg'] = custom_bdist_egg if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv: idx = sys.argv.index('bdist_wheel') + 1 @@ -245,14 +222,28 @@ if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv: # https://www.python.org/dev/peps/pep-0425/ sys.argv.insert(idx + 1, name.replace('.', '_').replace('-', '_')) +try: + from setuptools.command.develop import develop + class custom_develop(develop): + def run(self): + log.info("Building C extensions") + build_libraries() + return develop.run(self) + + cmdclass['develop'] = custom_develop +except ImportError: + print("Proper 'develop' support unavailable.") + +def join_all(src, files): + return tuple(os.path.join(src, f) for f in files) long_desc = ''' Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator framework -based on [QEMU](https://qemu.org). +based on [QEMU](http://qemu.org). Unicorn offers some unparalleled features: -- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, SPARC, and X86 (16, 32, 64-bit) +- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, PowerPC, SPARC and X86 (16, 32, 64-bit) - Clean/simple/lightweight/intuitive architecture-neutral API - Implemented in pure C language, with bindings for Crystal, Clojure, Visual Basic, Perl, Rust, Ruby, Python, Java, .NET, Go, Delphi/Free Pascal, Haskell, Pharo, and Lua. - Native support for Windows & *nix (with Mac OSX, Linux, *BSD & Solaris confirmed) @@ -261,7 +252,7 @@ Unicorn offers some unparalleled features: - Thread-safety by design - Distributed under free software license GPLv2 -Further information is available at https://www.unicorn-engine.org +Further information is available at http://www.unicorn-engine.org ''' setup( @@ -273,18 +264,18 @@ setup( author_email='aquynh@gmail.com', description='Unicorn CPU emulator engine', long_description=long_desc, - long_description_content_type="text/markdown", - url='https://www.unicorn-engine.org', + #long_description_content_type="text/markdown", + url='http://www.unicorn-engine.org', classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], requires=['ctypes'], - cmdclass={'build': custom_build, 'develop': custom_develop, 'sdist': custom_sdist, 'bdist_egg': custom_bdist_egg}, - zip_safe=False, + cmdclass=cmdclass, + zip_safe=True, include_package_data=True, - is_pure=False, + is_pure=True, package_data={ 'unicorn': ['lib/*', 'include/unicorn/*'] } diff --git a/bindings/python/unicorn/arm_const.py b/bindings/python/unicorn/arm_const.py index 088eaa46..8526f2b9 100644 --- a/bindings/python/unicorn/arm_const.py +++ b/bindings/python/unicorn/arm_const.py @@ -120,7 +120,28 @@ UC_ARM_REG_IPSR = 114 UC_ARM_REG_MSP = 115 UC_ARM_REG_PSP = 116 UC_ARM_REG_CONTROL = 117 -UC_ARM_REG_ENDING = 118 +UC_ARM_REG_IAPSR = 118 +UC_ARM_REG_EAPSR = 119 +UC_ARM_REG_XPSR = 120 +UC_ARM_REG_EPSR = 121 +UC_ARM_REG_IEPSR = 122 +UC_ARM_REG_PRIMASK = 123 +UC_ARM_REG_BASEPRI = 124 +UC_ARM_REG_BASEPRI_MAX = 125 +UC_ARM_REG_FAULTMASK = 126 +UC_ARM_REG_APSR_NZCVQ = 127 +UC_ARM_REG_APSR_G = 128 +UC_ARM_REG_APSR_NZCVQG = 129 +UC_ARM_REG_IAPSR_NZCVQ = 130 +UC_ARM_REG_IAPSR_G = 131 +UC_ARM_REG_IAPSR_NZCVQG = 132 +UC_ARM_REG_EAPSR_NZCVQ = 133 +UC_ARM_REG_EAPSR_G = 134 +UC_ARM_REG_EAPSR_NZCVQG = 135 +UC_ARM_REG_XPSR_NZCVQ = 136 +UC_ARM_REG_XPSR_G = 137 +UC_ARM_REG_XPSR_NZCVQG = 138 +UC_ARM_REG_ENDING = 139 # alias registers UC_ARM_REG_R13 = 12 diff --git a/bindings/python/unicorn/ppc_const.py b/bindings/python/unicorn/ppc_const.py new file mode 100644 index 00000000..18162dac --- /dev/null +++ b/bindings/python/unicorn/ppc_const.py @@ -0,0 +1,40 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [ppc_const.py] + +# PPC registers + +UC_PPC_REG_INVALID = 0 + +# General purpose registers +UC_PPC_REG_PC = 1 +UC_PPC_REG_0 = 2 +UC_PPC_REG_1 = 3 +UC_PPC_REG_2 = 4 +UC_PPC_REG_3 = 5 +UC_PPC_REG_4 = 6 +UC_PPC_REG_5 = 7 +UC_PPC_REG_6 = 8 +UC_PPC_REG_7 = 9 +UC_PPC_REG_8 = 10 +UC_PPC_REG_9 = 11 +UC_PPC_REG_10 = 12 +UC_PPC_REG_11 = 13 +UC_PPC_REG_12 = 14 +UC_PPC_REG_13 = 15 +UC_PPC_REG_14 = 16 +UC_PPC_REG_15 = 17 +UC_PPC_REG_16 = 18 +UC_PPC_REG_17 = 19 +UC_PPC_REG_18 = 20 +UC_PPC_REG_19 = 21 +UC_PPC_REG_20 = 22 +UC_PPC_REG_21 = 23 +UC_PPC_REG_22 = 24 +UC_PPC_REG_23 = 25 +UC_PPC_REG_24 = 26 +UC_PPC_REG_25 = 27 +UC_PPC_REG_26 = 28 +UC_PPC_REG_27 = 29 +UC_PPC_REG_28 = 30 +UC_PPC_REG_29 = 31 +UC_PPC_REG_30 = 32 +UC_PPC_REG_31 = 33 diff --git a/bindings/python/unicorn/riscv_const.py b/bindings/python/unicorn/riscv_const.py new file mode 100644 index 00000000..e4adbcd7 --- /dev/null +++ b/bindings/python/unicorn/riscv_const.py @@ -0,0 +1,142 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [riscv_const.py] + +# RISCV registers + +UC_RISCV_REG_INVALID = 0 + +# General purpose registers +UC_RISCV_REG_X0 = 1 +UC_RISCV_REG_X1 = 2 +UC_RISCV_REG_X2 = 3 +UC_RISCV_REG_X3 = 4 +UC_RISCV_REG_X4 = 5 +UC_RISCV_REG_X5 = 6 +UC_RISCV_REG_X6 = 7 +UC_RISCV_REG_X7 = 8 +UC_RISCV_REG_X8 = 9 +UC_RISCV_REG_X9 = 10 +UC_RISCV_REG_X10 = 11 +UC_RISCV_REG_X11 = 12 +UC_RISCV_REG_X12 = 13 +UC_RISCV_REG_X13 = 14 +UC_RISCV_REG_X14 = 15 +UC_RISCV_REG_X15 = 16 +UC_RISCV_REG_X16 = 17 +UC_RISCV_REG_X17 = 18 +UC_RISCV_REG_X18 = 19 +UC_RISCV_REG_X19 = 20 +UC_RISCV_REG_X20 = 21 +UC_RISCV_REG_X21 = 22 +UC_RISCV_REG_X22 = 23 +UC_RISCV_REG_X23 = 24 +UC_RISCV_REG_X24 = 25 +UC_RISCV_REG_X25 = 26 +UC_RISCV_REG_X26 = 27 +UC_RISCV_REG_X27 = 28 +UC_RISCV_REG_X28 = 29 +UC_RISCV_REG_X29 = 30 +UC_RISCV_REG_X30 = 31 +UC_RISCV_REG_X31 = 32 + +# Floating-point registers +UC_RISCV_REG_F0 = 33 +UC_RISCV_REG_F1 = 34 +UC_RISCV_REG_F2 = 35 +UC_RISCV_REG_F3 = 36 +UC_RISCV_REG_F4 = 37 +UC_RISCV_REG_F5 = 38 +UC_RISCV_REG_F6 = 39 +UC_RISCV_REG_F7 = 40 +UC_RISCV_REG_F8 = 41 +UC_RISCV_REG_F9 = 42 +UC_RISCV_REG_F10 = 43 +UC_RISCV_REG_F11 = 44 +UC_RISCV_REG_F12 = 45 +UC_RISCV_REG_F13 = 46 +UC_RISCV_REG_F14 = 47 +UC_RISCV_REG_F15 = 48 +UC_RISCV_REG_F16 = 49 +UC_RISCV_REG_F17 = 50 +UC_RISCV_REG_F18 = 51 +UC_RISCV_REG_F19 = 52 +UC_RISCV_REG_F20 = 53 +UC_RISCV_REG_F21 = 54 +UC_RISCV_REG_F22 = 55 +UC_RISCV_REG_F23 = 56 +UC_RISCV_REG_F24 = 57 +UC_RISCV_REG_F25 = 58 +UC_RISCV_REG_F26 = 59 +UC_RISCV_REG_F27 = 60 +UC_RISCV_REG_F28 = 61 +UC_RISCV_REG_F29 = 62 +UC_RISCV_REG_F30 = 63 +UC_RISCV_REG_F31 = 64 +UC_RISCV_REG_PC = 65 +UC_RISCV_REG_ENDING = 66 + +# Alias registers +UC_RISCV_REG_ZERO = 1 +UC_RISCV_REG_RA = 2 +UC_RISCV_REG_SP = 3 +UC_RISCV_REG_GP = 4 +UC_RISCV_REG_TP = 5 +UC_RISCV_REG_T0 = 6 +UC_RISCV_REG_T1 = 7 +UC_RISCV_REG_T2 = 8 +UC_RISCV_REG_S0 = 9 +UC_RISCV_REG_FP = 9 +UC_RISCV_REG_S1 = 10 +UC_RISCV_REG_A0 = 11 +UC_RISCV_REG_A1 = 12 +UC_RISCV_REG_A2 = 13 +UC_RISCV_REG_A3 = 14 +UC_RISCV_REG_A4 = 15 +UC_RISCV_REG_A5 = 16 +UC_RISCV_REG_A6 = 17 +UC_RISCV_REG_A7 = 18 +UC_RISCV_REG_S2 = 19 +UC_RISCV_REG_S3 = 20 +UC_RISCV_REG_S4 = 21 +UC_RISCV_REG_S5 = 22 +UC_RISCV_REG_S6 = 23 +UC_RISCV_REG_S7 = 24 +UC_RISCV_REG_S8 = 25 +UC_RISCV_REG_S9 = 26 +UC_RISCV_REG_S10 = 27 +UC_RISCV_REG_S11 = 28 +UC_RISCV_REG_T3 = 29 +UC_RISCV_REG_T4 = 30 +UC_RISCV_REG_T5 = 31 +UC_RISCV_REG_T6 = 32 +UC_RISCV_REG_FT0 = 33 +UC_RISCV_REG_FT1 = 34 +UC_RISCV_REG_FT2 = 35 +UC_RISCV_REG_FT3 = 36 +UC_RISCV_REG_FT4 = 37 +UC_RISCV_REG_FT5 = 38 +UC_RISCV_REG_FT6 = 39 +UC_RISCV_REG_FT7 = 40 +UC_RISCV_REG_FS0 = 41 +UC_RISCV_REG_FS1 = 42 +UC_RISCV_REG_FA0 = 43 +UC_RISCV_REG_FA1 = 44 +UC_RISCV_REG_FA2 = 45 +UC_RISCV_REG_FA3 = 46 +UC_RISCV_REG_FA4 = 47 +UC_RISCV_REG_FA5 = 48 +UC_RISCV_REG_FA6 = 49 +UC_RISCV_REG_FA7 = 50 +UC_RISCV_REG_FS2 = 51 +UC_RISCV_REG_FS3 = 52 +UC_RISCV_REG_FS4 = 53 +UC_RISCV_REG_FS5 = 54 +UC_RISCV_REG_FS6 = 55 +UC_RISCV_REG_FS7 = 56 +UC_RISCV_REG_FS8 = 57 +UC_RISCV_REG_FS9 = 58 +UC_RISCV_REG_FS10 = 59 +UC_RISCV_REG_FS11 = 60 +UC_RISCV_REG_FT8 = 61 +UC_RISCV_REG_FT9 = 62 +UC_RISCV_REG_FT10 = 63 +UC_RISCV_REG_FT11 = 64 diff --git a/bindings/python/unicorn/unicorn.py b/bindings/python/unicorn/unicorn.py index 5fe77ee4..34e598bb 100644 --- a/bindings/python/unicorn/unicorn.py +++ b/bindings/python/unicorn/unicorn.py @@ -3,12 +3,12 @@ import ctypes import ctypes.util import distutils.sysconfig -from functools import wraps import pkg_resources import inspect import os.path import sys import weakref +import functools from . import x86_const, arm64_const, unicorn_const as uc @@ -105,6 +105,8 @@ def _setup_prototype(lib, fname, restype, *argtypes): getattr(lib, fname).argtypes = argtypes ucerr = ctypes.c_int +uc_mode = ctypes.c_int +uc_arch = ctypes.c_int uc_engine = ctypes.c_void_p uc_context = ctypes.c_void_p uc_hook_h = ctypes.c_size_t @@ -130,6 +132,7 @@ _setup_prototype(_uc, "uc_mem_write", ucerr, uc_engine, ctypes.c_uint64, ctypes. _setup_prototype(_uc, "uc_emu_start", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_size_t) _setup_prototype(_uc, "uc_emu_stop", ucerr, uc_engine) _setup_prototype(_uc, "uc_hook_del", ucerr, uc_engine, uc_hook_h) +_setup_prototype(_uc, "uc_mmio_map", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p) _setup_prototype(_uc, "uc_mem_map", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32) _setup_prototype(_uc, "uc_mem_map_ptr", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32, ctypes.c_void_p) _setup_prototype(_uc, "uc_mem_unmap", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t) @@ -140,12 +143,12 @@ _setup_prototype(_uc, "uc_free", ucerr, ctypes.c_void_p) _setup_prototype(_uc, "uc_context_save", ucerr, uc_engine, uc_context) _setup_prototype(_uc, "uc_context_restore", ucerr, uc_engine, uc_context) _setup_prototype(_uc, "uc_context_size", ctypes.c_size_t, uc_engine) +_setup_prototype(_uc, "uc_context_reg_read", ucerr, uc_context, ctypes.c_int, ctypes.c_void_p) +_setup_prototype(_uc, "uc_context_reg_write", ucerr, uc_context, ctypes.c_int, ctypes.c_void_p) _setup_prototype(_uc, "uc_context_free", ucerr, uc_context) _setup_prototype(_uc, "uc_mem_regions", ucerr, uc_engine, ctypes.POINTER(ctypes.POINTER(_uc_mem_region)), ctypes.POINTER(ctypes.c_uint32)) - -# uc_hook_add is special due to variable number of arguments -_uc.uc_hook_add = _uc.uc_hook_add -_uc.uc_hook_add.restype = ucerr +# https://bugs.python.org/issue42880 +_setup_prototype(_uc, "uc_hook_add", ucerr, uc_engine, ctypes.POINTER(uc_hook_h), ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint64, ctypes.c_uint64) UC_HOOK_CODE_CB = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p) UC_HOOK_INSN_INVALID_CB = ctypes.CFUNCTYPE(ctypes.c_bool, uc_engine, ctypes.c_void_p) @@ -168,7 +171,12 @@ UC_HOOK_INSN_OUT_CB = ctypes.CFUNCTYPE( ctypes.c_int, ctypes.c_uint32, ctypes.c_void_p ) UC_HOOK_INSN_SYSCALL_CB = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_void_p) - +UC_MMIO_READ_CB = ctypes.CFUNCTYPE( + ctypes.c_uint64, uc_engine, ctypes.c_uint64, ctypes.c_int, ctypes.c_void_p +) +UC_MMIO_WRITE_CB = ctypes.CFUNCTYPE( + None, uc_engine, ctypes.c_uint64, ctypes.c_int, ctypes.c_uint64, ctypes.c_void_p +) # access to error code via @errno of UcError class UcError(Exception): @@ -199,27 +207,103 @@ def version_bind(): def uc_arch_supported(query): return _uc.uc_arch_supported(query) +# uc_reg_read/write and uc_context_reg_read/write. +def reg_read(reg_read_func, arch, reg_id, opt=None): + if arch == uc.UC_ARCH_X86: + if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: + reg = uc_x86_mmr() + status = reg_read_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.selector, reg.base, reg.limit, reg.flags + if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): + reg = uc_x86_float80() + status = reg_read_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.mantissa, reg.exponent + if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): + reg = uc_x86_xmm() + status = reg_read_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.low_qword | (reg.high_qword << 64) + if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): + reg = uc_x86_ymm() + status = reg_read_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.first_qword | (reg.second_qword << 64) | (reg.third_qword << 128) | (reg.fourth_qword << 192) + if reg_id is x86_const.UC_X86_REG_MSR: + if opt is None: + raise UcError(uc.UC_ERR_ARG) + reg = uc_x86_msr() + reg.rid = opt + status = reg_read_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.value -def _catch_hook_exception(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - """Catches exceptions raised in hook functions. + if arch == uc.UC_ARCH_ARM64: + if reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): + reg = uc_arm64_neon128() + status = reg_read_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.low_qword | (reg.high_qword << 64) - If an exception is raised, it is saved to the Uc object and a call to stop - emulation is issued. - """ - try: - return func(self, *args, **kwargs) - except Exception as e: - # If multiple hooks raise exceptions, just use the first one - if self._hook_exception is None: - self._hook_exception = e + # read to 64bit number to be safe + reg = ctypes.c_uint64(0) + status = reg_read_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.value - self.emu_stop() +def reg_write(reg_write_func, arch, reg_id, value): + reg = None - return wrapper + if arch == uc.UC_ARCH_X86: + if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: + assert isinstance(value, tuple) and len(value) == 4 + reg = uc_x86_mmr() + reg.selector = value[0] + reg.base = value[1] + reg.limit = value[2] + reg.flags = value[3] + if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): + reg = uc_x86_float80() + reg.mantissa = value[0] + reg.exponent = value[1] + if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): + reg = uc_x86_xmm() + reg.low_qword = value & 0xffffffffffffffff + reg.high_qword = value >> 64 + if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): + reg = uc_x86_ymm() + reg.first_qword = value & 0xffffffffffffffff + reg.second_qword = (value >> 64) & 0xffffffffffffffff + reg.third_qword = (value >> 128) & 0xffffffffffffffff + reg.fourth_qword = value >> 192 + if reg_id is x86_const.UC_X86_REG_MSR: + reg = uc_x86_msr() + reg.rid = value[0] + reg.value = value[1] + if arch == uc.UC_ARCH_ARM64: + if reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): + reg = uc_arm64_neon128() + reg.low_qword = value & 0xffffffffffffffff + reg.high_qword = value >> 64 + if reg is None: + # convert to 64bit number to be safe + reg = ctypes.c_uint64(value) + + status = reg_write_func(reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + + return class uc_x86_mmr(ctypes.Structure): """Memory-Management Register for instructions IDTR, GDTR, LDTR, TR.""" @@ -306,6 +390,8 @@ class Uc(object): def __init__(self, arch, mode): # verify version compatibility with the core before doing anything (major, minor, _combined) = uc_version() + # print("core version =", uc_version()) + # print("binding version =", uc.UC_API_MAJOR, uc.UC_API_MINOR) if major != uc.UC_API_MAJOR or minor != uc.UC_API_MINOR: self._uch = None # our binding version is different from the core's API version @@ -319,10 +405,9 @@ class Uc(object): raise UcError(status) # internal mapping table to save callback & userdata self._callbacks = {} - self._ctype_cbs = {} + self._ctype_cbs = [] self._callback_count = 0 self._cleanup.register(self) - self._hook_exception = None # The exception raised in a hook @staticmethod def release_handle(uch): @@ -340,9 +425,6 @@ class Uc(object): if status != uc.UC_ERR_OK: raise UcError(status) - if self._hook_exception is not None: - raise self._hook_exception - # stop emulation def emu_stop(self): status = _uc.uc_emu_stop(self._uch) @@ -351,100 +433,11 @@ class Uc(object): # return the value of a register def reg_read(self, reg_id, opt=None): - if self._arch == uc.UC_ARCH_X86: - if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: - reg = uc_x86_mmr() - status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) - return reg.selector, reg.base, reg.limit, reg.flags - if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): - reg = uc_x86_float80() - status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) - return reg.mantissa, reg.exponent - if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): - reg = uc_x86_xmm() - status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) - return reg.low_qword | (reg.high_qword << 64) - if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): - reg = uc_x86_ymm() - status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) - return reg.first_qword | (reg.second_qword << 64) | (reg.third_qword << 128) | (reg.fourth_qword << 192) - if reg_id is x86_const.UC_X86_REG_MSR: - if opt is None: - raise UcError(uc.UC_ERR_ARG) - reg = uc_x86_msr() - reg.rid = opt - status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) - return reg.value - - if self._arch == uc.UC_ARCH_ARM64: - if reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): - reg = uc_arm64_neon128() - status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) - return reg.low_qword | (reg.high_qword << 64) - - # read to 64bit number to be safe - reg = ctypes.c_uint64(0) - status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) - return reg.value + return reg_read(functools.partial(_uc.uc_reg_read, self._uch), self._arch, reg_id, opt) # write to a register def reg_write(self, reg_id, value): - reg = None - - if self._arch == uc.UC_ARCH_X86: - if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: - assert isinstance(value, tuple) and len(value) == 4 - reg = uc_x86_mmr() - reg.selector = value[0] - reg.base = value[1] - reg.limit = value[2] - reg.flags = value[3] - if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): - reg = uc_x86_float80() - reg.mantissa = value[0] - reg.exponent = value[1] - if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): - reg = uc_x86_xmm() - reg.low_qword = value & 0xffffffffffffffff - reg.high_qword = value >> 64 - if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): - reg = uc_x86_ymm() - reg.first_qword = value & 0xffffffffffffffff - reg.second_qword = (value >> 64) & 0xffffffffffffffff - reg.third_qword = (value >> 128) & 0xffffffffffffffff - reg.fourth_qword = value >> 192 - if reg_id is x86_const.UC_X86_REG_MSR: - reg = uc_x86_msr() - reg.rid = value[0] - reg.value = value[1] - - if self._arch == uc.UC_ARCH_ARM64: - if reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): - reg = uc_arm64_neon128() - reg.low_qword = value & 0xffffffffffffffff - reg.high_qword = value >> 64 - - if reg is None: - # convert to 64bit number to be safe - reg = ctypes.c_uint64(value) - - status = _uc.uc_reg_write(self._uch, reg_id, ctypes.byref(reg)) - if status != uc.UC_ERR_OK: - raise UcError(status) + return reg_write(functools.partial(_uc.uc_reg_write, self._uch), self._arch, reg_id, value) # read from MSR - X86 only def msr_read(self, msr_id): @@ -468,6 +461,33 @@ class Uc(object): if status != uc.UC_ERR_OK: raise UcError(status) + def _mmio_map_read_cb(self, handle, offset, size, user_data): + (cb, data) = self._callbacks[user_data] + return cb(self, offset, size, data) + + def _mmio_map_write_cb(self, handle, offset, size, value, user_data): + (cb, data) = self._callbacks[user_data] + cb(self, offset, size, value, data) + + def mmio_map(self, address, size, read_cb, user_data_read, write_cb, user_data_write): + internal_read_cb = ctypes.cast(UC_MMIO_READ_CB(self._mmio_map_read_cb), UC_MMIO_READ_CB) + internal_write_cb = ctypes.cast(UC_MMIO_WRITE_CB(self._mmio_map_write_cb), UC_MMIO_WRITE_CB) + + self._callback_count += 1 + self._callbacks[self._callback_count] = (read_cb, user_data_read) + read_count = self._callback_count + self._callback_count += 1 + self._callbacks[self._callback_count] = (write_cb, user_data_write) + write_count = self._callback_count + + status = _uc.uc_mmio_map(self._uch, address, size, internal_read_cb, read_count, internal_write_cb, write_count) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # https://docs.python.org/3/library/ctypes.html#callback-functions + self._ctype_cbs.append(internal_read_cb) + self._ctype_cbs.append(internal_write_cb) + # map a range of memory def mem_map(self, address, size, perms=uc.UC_PROT_ALL): status = _uc.uc_mem_map(self._uch, address, size, perms) @@ -500,49 +520,41 @@ class Uc(object): raise UcError(status) return result.value - @_catch_hook_exception def _hookcode_cb(self, handle, address, size, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, address, size, data) - @_catch_hook_exception def _hook_mem_invalid_cb(self, handle, access, address, size, value, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] return cb(self, access, address, size, value, data) - @_catch_hook_exception def _hook_mem_access_cb(self, handle, access, address, size, value, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, access, address, size, value, data) - @_catch_hook_exception def _hook_intr_cb(self, handle, intno, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, intno, data) - @_catch_hook_exception def _hook_insn_invalid_cb(self, handle, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] return cb(self, data) - @_catch_hook_exception def _hook_insn_in_cb(self, handle, port, size, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] return cb(self, port, size, data) - @_catch_hook_exception def _hook_insn_out_cb(self, handle, port, size, value, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] cb(self, port, size, value, data) - @_catch_hook_exception def _hook_insn_syscall_cb(self, handle, user_data): # call user's callback with self object (cb, data) = self._callbacks[user_data] @@ -615,7 +627,7 @@ class Uc(object): ) # save the ctype function so gc will leave it alone. - self._ctype_cbs[self._callback_count] = cb + self._ctype_cbs.append(cb) if status != uc.UC_ERR_OK: raise UcError(status) @@ -631,7 +643,7 @@ class Uc(object): h = 0 def context_save(self): - context = UcContext(self._uch) + context = UcContext(self._uch, self._arch, self._mode) status = _uc.uc_context_save(self._uch, context.context) if status != uc.UC_ERR_OK: raise UcError(status) @@ -664,14 +676,16 @@ class Uc(object): class UcContext: - def __init__(self, h): + def __init__(self, h, arch, mode): self._context = uc_context() self._size = _uc.uc_context_size(h) self._to_free = True status = _uc.uc_context_alloc(h, ctypes.byref(self._context)) if status != uc.UC_ERR_OK: raise UcError(status) - + self._arch = arch + self._mode = mode + @property def context(self): return self._context @@ -680,16 +694,34 @@ class UcContext: def size(self): return self._size + @property + def arch(self): + return self._arch + + @property + def mode(self): + return self._mode + + # return the value of a register + def reg_read(self, reg_id, opt=None): + return reg_read(functools.partial(_uc.uc_context_reg_read, self._context), self.arch, reg_id, opt) + + # write to a register + def reg_write(self, reg_id, value): + return reg_write(functools.partial(_uc.uc_context_reg_write, self._context), self.arch, reg_id, value) + # Make UcContext picklable def __getstate__(self): - return (bytes(self), self.size) - + return (bytes(self), self.size, self.arch, self.mode) + def __setstate__(self, state): self._size = state[1] self._context = ctypes.cast(ctypes.create_string_buffer(state[0], self._size), uc_context) # __init__ won'e be invoked, so we are safe to set it here. self._to_free = False - + self._arch = state[2] + self._mode = state[3] + def __bytes__(self): return ctypes.string_at(self.context, self.size) @@ -708,6 +740,8 @@ def debug(): "sparc": uc.UC_ARCH_SPARC, "m68k": uc.UC_ARCH_M68K, "x86": uc.UC_ARCH_X86, + "riscv": uc.UC_ARCH_RISCV, + "ppc": uc.UC_ARCH_PPC, } all_archs = "" diff --git a/bindings/python/unicorn/unicorn_const.py b/bindings/python/unicorn/unicorn_const.py index 4015cb37..a517572a 100644 --- a/bindings/python/unicorn/unicorn_const.py +++ b/bindings/python/unicorn/unicorn_const.py @@ -1,11 +1,12 @@ # For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.py] -UC_API_MAJOR = 1 +UC_API_MAJOR = 2 UC_API_MINOR = 0 -UC_VERSION_MAJOR = 1 +UC_VERSION_MAJOR = 2 UC_VERSION_MINOR = 0 -UC_VERSION_EXTRA = 3 + +UC_VERSION_EXTRA = 0 UC_SECOND_SCALE = 1000000 UC_MILISECOND_SCALE = 1000 UC_ARCH_ARM = 1 @@ -15,7 +16,8 @@ UC_ARCH_X86 = 4 UC_ARCH_PPC = 5 UC_ARCH_SPARC = 6 UC_ARCH_M68K = 7 -UC_ARCH_MAX = 8 +UC_ARCH_RISCV = 8 +UC_ARCH_MAX = 9 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 @@ -27,7 +29,6 @@ UC_MODE_V8 = 64 UC_MODE_ARM926 = 128 UC_MODE_ARM946 = 256 UC_MODE_ARM1176 = 512 -UC_MODE_ARMBE8 = 1024 UC_MODE_MICRO = 16 UC_MODE_MIPS3 = 32 UC_MODE_MIPS32R6 = 64 @@ -42,6 +43,8 @@ UC_MODE_QPX = 16 UC_MODE_SPARC32 = 4 UC_MODE_SPARC64 = 8 UC_MODE_V9 = 16 +UC_MODE_RISCV32 = 4 +UC_MODE_RISCV64 = 8 UC_ERR_OK = 0 UC_ERR_NOMEM = 1 diff --git a/bindings/python/unicorn/x86_const.py b/bindings/python/unicorn/x86_const.py index 9c0fbadf..f9b8cc14 100644 --- a/bindings/python/unicorn/x86_const.py +++ b/bindings/python/unicorn/x86_const.py @@ -254,7 +254,9 @@ UC_X86_REG_MSR = 248 UC_X86_REG_MXCSR = 249 UC_X86_REG_FS_BASE = 250 UC_X86_REG_GS_BASE = 251 -UC_X86_REG_ENDING = 252 +UC_X86_REG_FLAGS = 252 +UC_X86_REG_RFLAGS = 253 +UC_X86_REG_ENDING = 254 # X86 instructions diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/ppc_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/ppc_const.rb new file mode 100644 index 00000000..e8f33330 --- /dev/null +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/ppc_const.rb @@ -0,0 +1,43 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [ppc_const.rb] + +module UnicornEngine + +# PPC registers + + UC_PPC_REG_INVALID = 0 + +# General purpose registers + UC_PPC_REG_PC = 1 + UC_PPC_REG_0 = 2 + UC_PPC_REG_1 = 3 + UC_PPC_REG_2 = 4 + UC_PPC_REG_3 = 5 + UC_PPC_REG_4 = 6 + UC_PPC_REG_5 = 7 + UC_PPC_REG_6 = 8 + UC_PPC_REG_7 = 9 + UC_PPC_REG_8 = 10 + UC_PPC_REG_9 = 11 + UC_PPC_REG_10 = 12 + UC_PPC_REG_11 = 13 + UC_PPC_REG_12 = 14 + UC_PPC_REG_13 = 15 + UC_PPC_REG_14 = 16 + UC_PPC_REG_15 = 17 + UC_PPC_REG_16 = 18 + UC_PPC_REG_17 = 19 + UC_PPC_REG_18 = 20 + UC_PPC_REG_19 = 21 + UC_PPC_REG_20 = 22 + UC_PPC_REG_21 = 23 + UC_PPC_REG_22 = 24 + UC_PPC_REG_23 = 25 + UC_PPC_REG_24 = 26 + UC_PPC_REG_25 = 27 + UC_PPC_REG_26 = 28 + UC_PPC_REG_27 = 29 + UC_PPC_REG_28 = 30 + UC_PPC_REG_29 = 31 + UC_PPC_REG_30 = 32 + UC_PPC_REG_31 = 33 +end \ No newline at end of file diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb new file mode 100644 index 00000000..2122099d --- /dev/null +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb @@ -0,0 +1,145 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [riscv_const.rb] + +module UnicornEngine + +# RISCV registers + + UC_RISCV_REG_INVALID = 0 + +# General purpose registers + UC_RISCV_REG_X0 = 1 + UC_RISCV_REG_X1 = 2 + UC_RISCV_REG_X2 = 3 + UC_RISCV_REG_X3 = 4 + UC_RISCV_REG_X4 = 5 + UC_RISCV_REG_X5 = 6 + UC_RISCV_REG_X6 = 7 + UC_RISCV_REG_X7 = 8 + UC_RISCV_REG_X8 = 9 + UC_RISCV_REG_X9 = 10 + UC_RISCV_REG_X10 = 11 + UC_RISCV_REG_X11 = 12 + UC_RISCV_REG_X12 = 13 + UC_RISCV_REG_X13 = 14 + UC_RISCV_REG_X14 = 15 + UC_RISCV_REG_X15 = 16 + UC_RISCV_REG_X16 = 17 + UC_RISCV_REG_X17 = 18 + UC_RISCV_REG_X18 = 19 + UC_RISCV_REG_X19 = 20 + UC_RISCV_REG_X20 = 21 + UC_RISCV_REG_X21 = 22 + UC_RISCV_REG_X22 = 23 + UC_RISCV_REG_X23 = 24 + UC_RISCV_REG_X24 = 25 + UC_RISCV_REG_X25 = 26 + UC_RISCV_REG_X26 = 27 + UC_RISCV_REG_X27 = 28 + UC_RISCV_REG_X28 = 29 + UC_RISCV_REG_X29 = 30 + UC_RISCV_REG_X30 = 31 + UC_RISCV_REG_X31 = 32 + +# Floating-point registers + UC_RISCV_REG_F0 = 33 + UC_RISCV_REG_F1 = 34 + UC_RISCV_REG_F2 = 35 + UC_RISCV_REG_F3 = 36 + UC_RISCV_REG_F4 = 37 + UC_RISCV_REG_F5 = 38 + UC_RISCV_REG_F6 = 39 + UC_RISCV_REG_F7 = 40 + UC_RISCV_REG_F8 = 41 + UC_RISCV_REG_F9 = 42 + UC_RISCV_REG_F10 = 43 + UC_RISCV_REG_F11 = 44 + UC_RISCV_REG_F12 = 45 + UC_RISCV_REG_F13 = 46 + UC_RISCV_REG_F14 = 47 + UC_RISCV_REG_F15 = 48 + UC_RISCV_REG_F16 = 49 + UC_RISCV_REG_F17 = 50 + UC_RISCV_REG_F18 = 51 + UC_RISCV_REG_F19 = 52 + UC_RISCV_REG_F20 = 53 + UC_RISCV_REG_F21 = 54 + UC_RISCV_REG_F22 = 55 + UC_RISCV_REG_F23 = 56 + UC_RISCV_REG_F24 = 57 + UC_RISCV_REG_F25 = 58 + UC_RISCV_REG_F26 = 59 + UC_RISCV_REG_F27 = 60 + UC_RISCV_REG_F28 = 61 + UC_RISCV_REG_F29 = 62 + UC_RISCV_REG_F30 = 63 + UC_RISCV_REG_F31 = 64 + UC_RISCV_REG_PC = 65 + UC_RISCV_REG_ENDING = 66 + +# Alias registers + UC_RISCV_REG_ZERO = 1 + UC_RISCV_REG_RA = 2 + UC_RISCV_REG_SP = 3 + UC_RISCV_REG_GP = 4 + UC_RISCV_REG_TP = 5 + UC_RISCV_REG_T0 = 6 + UC_RISCV_REG_T1 = 7 + UC_RISCV_REG_T2 = 8 + UC_RISCV_REG_S0 = 9 + UC_RISCV_REG_FP = 9 + UC_RISCV_REG_S1 = 10 + UC_RISCV_REG_A0 = 11 + UC_RISCV_REG_A1 = 12 + UC_RISCV_REG_A2 = 13 + UC_RISCV_REG_A3 = 14 + UC_RISCV_REG_A4 = 15 + UC_RISCV_REG_A5 = 16 + UC_RISCV_REG_A6 = 17 + UC_RISCV_REG_A7 = 18 + UC_RISCV_REG_S2 = 19 + UC_RISCV_REG_S3 = 20 + UC_RISCV_REG_S4 = 21 + UC_RISCV_REG_S5 = 22 + UC_RISCV_REG_S6 = 23 + UC_RISCV_REG_S7 = 24 + UC_RISCV_REG_S8 = 25 + UC_RISCV_REG_S9 = 26 + UC_RISCV_REG_S10 = 27 + UC_RISCV_REG_S11 = 28 + UC_RISCV_REG_T3 = 29 + UC_RISCV_REG_T4 = 30 + UC_RISCV_REG_T5 = 31 + UC_RISCV_REG_T6 = 32 + UC_RISCV_REG_FT0 = 33 + UC_RISCV_REG_FT1 = 34 + UC_RISCV_REG_FT2 = 35 + UC_RISCV_REG_FT3 = 36 + UC_RISCV_REG_FT4 = 37 + UC_RISCV_REG_FT5 = 38 + UC_RISCV_REG_FT6 = 39 + UC_RISCV_REG_FT7 = 40 + UC_RISCV_REG_FS0 = 41 + UC_RISCV_REG_FS1 = 42 + UC_RISCV_REG_FA0 = 43 + UC_RISCV_REG_FA1 = 44 + UC_RISCV_REG_FA2 = 45 + UC_RISCV_REG_FA3 = 46 + UC_RISCV_REG_FA4 = 47 + UC_RISCV_REG_FA5 = 48 + UC_RISCV_REG_FA6 = 49 + UC_RISCV_REG_FA7 = 50 + UC_RISCV_REG_FS2 = 51 + UC_RISCV_REG_FS3 = 52 + UC_RISCV_REG_FS4 = 53 + UC_RISCV_REG_FS5 = 54 + UC_RISCV_REG_FS6 = 55 + UC_RISCV_REG_FS7 = 56 + UC_RISCV_REG_FS8 = 57 + UC_RISCV_REG_FS9 = 58 + UC_RISCV_REG_FS10 = 59 + UC_RISCV_REG_FS11 = 60 + UC_RISCV_REG_FT8 = 61 + UC_RISCV_REG_FT9 = 62 + UC_RISCV_REG_FT10 = 63 + UC_RISCV_REG_FT11 = 64 +end \ No newline at end of file diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb index 3c61824f..a09f3e82 100644 --- a/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb @@ -1,13 +1,14 @@ # For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.rb] module UnicornEngine - UC_API_MAJOR = 1 + UC_API_MAJOR = 2 UC_API_MINOR = 0 - UC_VERSION_MAJOR = 1 + UC_VERSION_MAJOR = 2 UC_VERSION_MINOR = 0 - UC_VERSION_EXTRA = 3 + + UC_VERSION_EXTRA = 0 UC_SECOND_SCALE = 1000000 UC_MILISECOND_SCALE = 1000 UC_ARCH_ARM = 1 @@ -17,7 +18,8 @@ module UnicornEngine UC_ARCH_PPC = 5 UC_ARCH_SPARC = 6 UC_ARCH_M68K = 7 - UC_ARCH_MAX = 8 + UC_ARCH_RISCV = 8 + UC_ARCH_MAX = 9 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 @@ -29,7 +31,6 @@ module UnicornEngine UC_MODE_ARM926 = 128 UC_MODE_ARM946 = 256 UC_MODE_ARM1176 = 512 - UC_MODE_ARMBE8 = 1024 UC_MODE_MICRO = 16 UC_MODE_MIPS3 = 32 UC_MODE_MIPS32R6 = 64 @@ -44,6 +45,8 @@ module UnicornEngine UC_MODE_SPARC32 = 4 UC_MODE_SPARC64 = 8 UC_MODE_V9 = 16 + UC_MODE_RISCV32 = 4 + UC_MODE_RISCV64 = 8 UC_ERR_OK = 0 UC_ERR_NOMEM = 1 diff --git a/bindings/rust/COPYING b/bindings/rust/COPYING deleted file mode 100644 index 00ccfbb6..00000000 --- a/bindings/rust/COPYING +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/bindings/rust/Cargo.toml b/bindings/rust/Cargo.toml deleted file mode 100644 index 6edc7875..00000000 --- a/bindings/rust/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "unicorn" -version = "1.0.0" -authors = ["Lukas Seidel"] -documentation = "" -edition = "2018" -include = [ - "/.gitmodules", - "/COPYING", - "/Cargo.toml", - "/README.md", - "/src/*", -] -license = "GPL-2.0" -readme = "README.md" -repository = "https://github.com/unicorn-engine/unicorn/" -description = "Rust bindings for the Unicorn emulator with utility functions" -build = "build.rs" -links = "unicorn" - -[dependencies] -bitflags = "1.0" -libc = "0.2" -capstone="0.6.0" - -[build-dependencies] -build-helper = "0.1" diff --git a/bindings/rust/README.md b/bindings/rust/README.md deleted file mode 100644 index 014e1a60..00000000 --- a/bindings/rust/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# unicorn-rs - -Rust bindings for the [Unicorn](http://www.unicorn-engine.org/) emulator with utility functions. - -An extended version for fuzzing with AFL++ support can be found in https://github.com/aflplusplus/unicornafl. - -```rust -use unicorn::RegisterARM; -use unicorn::unicorn_const::{Arch, Mode, Permission, SECOND_SCALE}; - -fn main() { - let arm_code32: Vec = vec![0x17, 0x00, 0x40, 0xe2]; // sub r0, #23 - - let mut unicorn = unicorn::Unicorn::new(Arch::ARM, Mode::LITTLE_ENDIAN).expect("failed to initialize Unicorn instance"); - let mut emu = unicorn.borrow(); - emu.mem_map(0x1000, 0x4000, Permission::ALL).expect("failed to map code page"); - emu.mem_write(0x1000, &arm_code32).expect("failed to write instructions"); - - emu.reg_write(RegisterARM::R0 as i32, 123).expect("failed write R0"); - emu.reg_write(RegisterARM::R5 as i32, 1337).expect("failed write R5"); - - let _ = emu.emu_start(0x1000, (0x1000 + arm_code32.len()) as u64, 10 * SECOND_SCALE, 1000); - assert_eq!(emu.reg_read(RegisterARM::R0 as i32), Ok(100)); - assert_eq!(emu.reg_read(RegisterARM::R5 as i32), Ok(1337)); -} -``` -Further sample code can be found in ```tests/unicorn.rs```. - -## Installation - -This project has been tested on Linux, OS X and Windows. - -To use unicorn-rs, simply add it as a dependency to the Cargo.toml of your program. - -``` -[dependencies] -unicorn = { path = "/path/to/bindings/rust", version="1.0.0" } -``` - -## Acknowledgements - -These bindings are based on Sébastien Duquette's (@ekse) [unicorn-rs](https://github.com/unicorn-rs/unicorn-rs). -We picked up the project, as it is no longer maintained. -Thanks to all contributers. - diff --git a/bindings/rust/build.rs b/bindings/rust/build.rs deleted file mode 100644 index 7df3e61c..00000000 --- a/bindings/rust/build.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::{env, process::Command}; - -use build_helper::rustc::{link_lib, link_search}; - -fn main() { - println!("cargo:rerun-if-changed=unicorn"); - let out_dir = env::var("OUT_DIR").unwrap(); - let unicorn = "libunicorn.a"; - let _ = Command::new("cp") - .current_dir("../..") - .arg(&unicorn) - .arg(&out_dir) - .status() - .unwrap(); - link_search( - Some(build_helper::SearchKind::Native), - build_helper::out_dir(), - ); - link_lib(Some(build_helper::LibKind::Static), "unicorn"); -} diff --git a/bindings/rust/src/arm.rs b/bindings/rust/src/arm.rs deleted file mode 100644 index 69178664..00000000 --- a/bindings/rust/src/arm.rs +++ /dev/null @@ -1,146 +0,0 @@ -#![allow(non_camel_case_types)] -// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum RegisterARM { - // ARM registers - INVALID = 0, - APSR = 1, - APSR_NZCV = 2, - CPSR = 3, - FPEXC = 4, - FPINST = 5, - FPSCR = 6, - FPSCR_NZCV = 7, - FPSID = 8, - ITSTATE = 9, - LR = 10, - PC = 11, - SP = 12, - SPSR = 13, - D0 = 14, - D1 = 15, - D2 = 16, - D3 = 17, - D4 = 18, - D5 = 19, - D6 = 20, - D7 = 21, - D8 = 22, - D9 = 23, - D10 = 24, - D11 = 25, - D12 = 26, - D13 = 27, - D14 = 28, - D15 = 29, - D16 = 30, - D17 = 31, - D18 = 32, - D19 = 33, - D20 = 34, - D21 = 35, - D22 = 36, - D23 = 37, - D24 = 38, - D25 = 39, - D26 = 40, - D27 = 41, - D28 = 42, - D29 = 43, - D30 = 44, - D31 = 45, - FPINST2 = 46, - MVFR0 = 47, - MVFR1 = 48, - MVFR2 = 49, - Q0 = 50, - Q1 = 51, - Q2 = 52, - Q3 = 53, - Q4 = 54, - Q5 = 55, - Q6 = 56, - Q7 = 57, - Q8 = 58, - Q9 = 59, - Q10 = 60, - Q11 = 61, - Q12 = 62, - Q13 = 63, - Q14 = 64, - Q15 = 65, - R0 = 66, - R1 = 67, - R2 = 68, - R3 = 69, - R4 = 70, - R5 = 71, - R6 = 72, - R7 = 73, - R8 = 74, - R9 = 75, - R10 = 76, - R11 = 77, - R12 = 78, - S0 = 79, - S1 = 80, - S2 = 81, - S3 = 82, - S4 = 83, - S5 = 84, - S6 = 85, - S7 = 86, - S8 = 87, - S9 = 88, - S10 = 89, - S11 = 90, - S12 = 91, - S13 = 92, - S14 = 93, - S15 = 94, - S16 = 95, - S17 = 96, - S18 = 97, - S19 = 98, - S20 = 99, - S21 = 100, - S22 = 101, - S23 = 102, - S24 = 103, - S25 = 104, - S26 = 105, - S27 = 106, - S28 = 107, - S29 = 108, - S30 = 109, - S31 = 110, - C1_C0_2 = 111, - C13_C0_2 = 112, - C13_C0_3 = 113, - IPSR = 114, - MSP = 115, - PSP = 116, - CONTROL = 117, - XPSR = 118, - ENDING = 119, - // alias registers - // (assoc) R13 = 12, - // (assoc) R14 = 10, - // (assoc) R15 = 11, - // (assoc) SB = 75, - // (assoc) SL = 76, - // (assoc) FP = 77, - // (assoc) IP = 78, -} - -impl RegisterARM { - pub const R13: RegisterARM = RegisterARM::SP; - pub const R14: RegisterARM = RegisterARM::LR; - pub const R15: RegisterARM = RegisterARM::PC; - pub const SB: RegisterARM = RegisterARM::R9; - pub const SL: RegisterARM = RegisterARM::R10; - pub const FP: RegisterARM = RegisterARM::R11; - pub const IP: RegisterARM = RegisterARM::R12; -} diff --git a/bindings/rust/src/arm64.rs b/bindings/rust/src/arm64.rs deleted file mode 100644 index 8b7c1254..00000000 --- a/bindings/rust/src/arm64.rs +++ /dev/null @@ -1,321 +0,0 @@ -#![allow(non_camel_case_types)] - -// ARM64 registers -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum RegisterARM64 { - INVALID = 0, - X29 = 1, - X30 = 2, - NZCV = 3, - SP = 4, - WSP = 5, - WZR = 6, - XZR = 7, - B0 = 8, - B1 = 9, - B2 = 10, - B3 = 11, - B4 = 12, - B5 = 13, - B6 = 14, - B7 = 15, - B8 = 16, - B9 = 17, - B10 = 18, - B11 = 19, - B12 = 20, - B13 = 21, - B14 = 22, - B15 = 23, - B16 = 24, - B17 = 25, - B18 = 26, - B19 = 27, - B20 = 28, - B21 = 29, - B22 = 30, - B23 = 31, - B24 = 32, - B25 = 33, - B26 = 34, - B27 = 35, - B28 = 36, - B29 = 37, - B30 = 38, - B31 = 39, - D0 = 40, - D1 = 41, - D2 = 42, - D3 = 43, - D4 = 44, - D5 = 45, - D6 = 46, - D7 = 47, - D8 = 48, - D9 = 49, - D10 = 50, - D11 = 51, - D12 = 52, - D13 = 53, - D14 = 54, - D15 = 55, - D16 = 56, - D17 = 57, - D18 = 58, - D19 = 59, - D20 = 60, - D21 = 61, - D22 = 62, - D23 = 63, - D24 = 64, - D25 = 65, - D26 = 66, - D27 = 67, - D28 = 68, - D29 = 69, - D30 = 70, - D31 = 71, - H0 = 72, - H1 = 73, - H2 = 74, - H3 = 75, - H4 = 76, - H5 = 77, - H6 = 78, - H7 = 79, - H8 = 80, - H9 = 81, - H10 = 82, - H11 = 83, - H12 = 84, - H13 = 85, - H14 = 86, - H15 = 87, - H16 = 88, - H17 = 89, - H18 = 90, - H19 = 91, - H20 = 92, - H21 = 93, - H22 = 94, - H23 = 95, - H24 = 96, - H25 = 97, - H26 = 98, - H27 = 99, - H28 = 100, - H29 = 101, - H30 = 102, - H31 = 103, - Q0 = 104, - Q1 = 105, - Q2 = 106, - Q3 = 107, - Q4 = 108, - Q5 = 109, - Q6 = 110, - Q7 = 111, - Q8 = 112, - Q9 = 113, - Q10 = 114, - Q11 = 115, - Q12 = 116, - Q13 = 117, - Q14 = 118, - Q15 = 119, - Q16 = 120, - Q17 = 121, - Q18 = 122, - Q19 = 123, - Q20 = 124, - Q21 = 125, - Q22 = 126, - Q23 = 127, - Q24 = 128, - Q25 = 129, - Q26 = 130, - Q27 = 131, - Q28 = 132, - Q29 = 133, - Q30 = 134, - Q31 = 135, - S0 = 136, - S1 = 137, - S2 = 138, - S3 = 139, - S4 = 140, - S5 = 141, - S6 = 142, - S7 = 143, - S8 = 144, - S9 = 145, - S10 = 146, - S11 = 147, - S12 = 148, - S13 = 149, - S14 = 150, - S15 = 151, - S16 = 152, - S17 = 153, - S18 = 154, - S19 = 155, - S20 = 156, - S21 = 157, - S22 = 158, - S23 = 159, - S24 = 160, - S25 = 161, - S26 = 162, - S27 = 163, - S28 = 164, - S29 = 165, - S30 = 166, - S31 = 167, - W0 = 168, - W1 = 169, - W2 = 170, - W3 = 171, - W4 = 172, - W5 = 173, - W6 = 174, - W7 = 175, - W8 = 176, - W9 = 177, - W10 = 178, - W11 = 179, - W12 = 180, - W13 = 181, - W14 = 182, - W15 = 183, - W16 = 184, - W17 = 185, - W18 = 186, - W19 = 187, - W20 = 188, - W21 = 189, - W22 = 190, - W23 = 191, - W24 = 192, - W25 = 193, - W26 = 194, - W27 = 195, - W28 = 196, - W29 = 197, - W30 = 198, - X0 = 199, - X1 = 200, - X2 = 201, - X3 = 202, - X4 = 203, - X5 = 204, - X6 = 205, - X7 = 206, - X8 = 207, - X9 = 208, - X10 = 209, - X11 = 210, - X12 = 211, - X13 = 212, - X14 = 213, - X15 = 214, - X16 = 215, - X17 = 216, - X18 = 217, - X19 = 218, - X20 = 219, - X21 = 220, - X22 = 221, - X23 = 222, - X24 = 223, - X25 = 224, - X26 = 225, - X27 = 226, - X28 = 227, - V0 = 228, - V1 = 229, - V2 = 230, - V3 = 231, - V4 = 232, - V5 = 233, - V6 = 234, - V7 = 235, - V8 = 236, - V9 = 237, - V10 = 238, - V11 = 239, - V12 = 240, - V13 = 241, - V14 = 242, - V15 = 243, - V16 = 244, - V17 = 245, - V18 = 246, - V19 = 247, - V20 = 248, - V21 = 249, - V22 = 250, - V23 = 251, - V24 = 252, - V25 = 253, - V26 = 254, - V27 = 255, - V28 = 256, - V29 = 257, - V30 = 258, - V31 = 259, - - // pseudo registers - PC = 260, - CPACR_EL1 = 261, - - // thread registers - TPIDR_EL0 = 262, - TPIDRRO_EL0 = 263, - TPIDR_EL1 = 264, - PSTATE = 265, - - // exception link registers - ELR_EL0 = 266, - ELR_EL1 = 267, - ELR_EL2 = 268, - ELR_EL3 = 269, - - // stack pointers registers - SP_EL0 = 270, - SP_EL1 = 271, - SP_EL2 = 272, - SP_EL3 = 273, - - // other CP15 registers - TTBR0_EL1 = 274, - TTBR1_EL1 = 275, - ESR_EL0 = 276, - ESR_EL1 = 277, - ESR_EL2 = 278, - ESR_EL3 = 279, - FAR_EL0 = 280, - FAR_EL1 = 281, - FAR_EL2 = 282, - FAR_EL3 = 283, - PAR_EL1 = 284, - MAIR_EL1 = 285, - VBAR_EL0 = 286, - VBAR_EL1 = 287, - VBAR_EL2 = 288, - VBAR_EL3 = 289, - ENDING = 290, - - // alias registers - // (assoc) IP0 = 215, - // (assoc) IP1 = 216, - // (assoc) FP = 1, - // (assoc) LR = 2, -} - -impl RegisterARM64 { - pub const IP0: RegisterARM64 = RegisterARM64::X16; - pub const IP1: RegisterARM64 = RegisterARM64::X17; - pub const FP: RegisterARM64 = RegisterARM64::X29; - pub const LR: RegisterARM64 = RegisterARM64::X30; -} diff --git a/bindings/rust/src/ffi.rs b/bindings/rust/src/ffi.rs deleted file mode 100644 index 3252a3bd..00000000 --- a/bindings/rust/src/ffi.rs +++ /dev/null @@ -1,230 +0,0 @@ -#![allow(non_camel_case_types)] -#![allow(dead_code)] - -use super::unicorn_const::*; -use libc::{c_char, c_int}; -use std::ffi::c_void; -use std::pin::Pin; - -pub type uc_handle = *mut c_void; -pub type uc_hook = *mut c_void; -pub type uc_context = libc::size_t; - -extern "C" { - pub fn uc_version(major: *mut u32, minor: *mut u32) -> u32; - pub fn uc_arch_supported(arch: Arch) -> bool; - pub fn uc_open(arch: Arch, mode: Mode, engine: *mut uc_handle) -> uc_error; - pub fn uc_close(engine: uc_handle) -> uc_error; - pub fn uc_context_free(mem: uc_context) -> uc_error; - pub fn uc_errno(engine: uc_handle) -> uc_error; - pub fn uc_strerror(error_code: uc_error) -> *const c_char; - pub fn uc_reg_write(engine: uc_handle, regid: c_int, value: *const c_void) -> uc_error; - pub fn uc_reg_read(engine: uc_handle, regid: c_int, value: *mut c_void) -> uc_error; - pub fn uc_mem_write( - engine: uc_handle, - address: u64, - bytes: *const u8, - size: libc::size_t, - ) -> uc_error; - pub fn uc_mem_read( - engine: uc_handle, - address: u64, - bytes: *mut u8, - size: libc::size_t, - ) -> uc_error; - pub fn uc_mem_map(engine: uc_handle, address: u64, size: libc::size_t, perms: u32) -> uc_error; - pub fn uc_mem_map_ptr( - engine: uc_handle, - address: u64, - size: libc::size_t, - perms: u32, - ptr: *mut c_void, - ) -> uc_error; - pub fn uc_mem_unmap(engine: uc_handle, address: u64, size: libc::size_t) -> uc_error; - pub fn uc_mem_protect( - engine: uc_handle, - address: u64, - size: libc::size_t, - perms: u32, - ) -> uc_error; - pub fn uc_mem_regions( - engine: uc_handle, - regions: *const *const MemRegion, - count: *mut u32, - ) -> uc_error; - pub fn uc_emu_start( - engine: uc_handle, - begin: u64, - until: u64, - timeout: u64, - count: libc::size_t, - ) -> uc_error; - pub fn uc_emu_stop(engine: uc_handle) -> uc_error; - pub fn uc_hook_add( - engine: uc_handle, - hook: *mut uc_hook, - hook_type: HookType, - callback: *mut c_void, - user_data: *mut c_void, - begin: u64, - end: u64, - ... - ) -> uc_error; - pub fn uc_hook_del(engine: uc_handle, hook: uc_hook) -> uc_error; - pub fn uc_query(engine: uc_handle, query_type: Query, result: *mut libc::size_t) -> uc_error; - pub fn uc_context_alloc(engine: uc_handle, context: *mut uc_context) -> uc_error; - pub fn uc_context_save(engine: uc_handle, context: uc_context) -> uc_error; - pub fn uc_context_restore(engine: uc_handle, context: uc_context) -> uc_error; -} - -pub struct CodeHook { - pub unicorn: *mut crate::UnicornInner, - pub callback: Box, -} - -pub struct BlockHook { - pub unicorn: *mut crate::UnicornInner, - pub callback: Box, -} - -pub struct MemHook { - pub unicorn: *mut crate::UnicornInner, - pub callback: Box, -} - -pub struct InterruptHook { - pub unicorn: *mut crate::UnicornInner, - pub callback: Box, -} - -pub struct InstructionInHook { - pub unicorn: *mut crate::UnicornInner, - pub callback: Box, -} - -pub struct InstructionOutHook { - pub unicorn: *mut crate::UnicornInner, - pub callback: Box, -} - -pub struct InstructionSysHook { - pub unicorn: *mut crate::UnicornInner, - pub callback: Box, -} - -pub extern "C" fn code_hook_proxy( - uc: uc_handle, - address: u64, - size: u32, - user_data: *mut CodeHook, -) { - let unicorn = unsafe { &mut *(*user_data).unicorn }; - let callback = &mut unsafe { &mut *(*user_data).callback }; - assert_eq!(uc, unicorn.uc); - callback( - crate::UnicornHandle { - inner: unsafe { Pin::new_unchecked(unicorn) }, - }, - address, - size, - ); -} - -pub extern "C" fn block_hook_proxy( - uc: uc_handle, - address: u64, - size: u32, - user_data: *mut BlockHook, -) { - let unicorn = unsafe { &mut *(*user_data).unicorn }; - let callback = &mut unsafe { &mut *(*user_data).callback }; - assert_eq!(uc, unicorn.uc); - callback( - crate::UnicornHandle { - inner: unsafe { Pin::new_unchecked(unicorn) }, - }, - address, - size, - ); -} - -pub extern "C" fn mem_hook_proxy( - uc: uc_handle, - mem_type: MemType, - address: u64, - size: u32, - value: i64, - user_data: *mut MemHook, -) { - let unicorn = unsafe { &mut *(*user_data).unicorn }; - let callback = &mut unsafe { &mut *(*user_data).callback }; - assert_eq!(uc, unicorn.uc); - callback( - crate::UnicornHandle { - inner: unsafe { Pin::new_unchecked(unicorn) }, - }, - mem_type, - address, - size as usize, - value, - ); -} - -pub extern "C" fn intr_hook_proxy(uc: uc_handle, value: u32, user_data: *mut InterruptHook) { - let unicorn = unsafe { &mut *(*user_data).unicorn }; - let callback = &mut unsafe { &mut *(*user_data).callback }; - assert_eq!(uc, unicorn.uc); - callback( - crate::UnicornHandle { - inner: unsafe { Pin::new_unchecked(unicorn) }, - }, - value, - ); -} - -pub extern "C" fn insn_in_hook_proxy( - uc: uc_handle, - port: u32, - size: usize, - user_data: *mut InstructionInHook, -) { - let unicorn = unsafe { &mut *(*user_data).unicorn }; - let callback = &mut unsafe { &mut *(*user_data).callback }; - assert_eq!(uc, unicorn.uc); - callback( - crate::UnicornHandle { - inner: unsafe { Pin::new_unchecked(unicorn) }, - }, - port, - size, - ); -} - -pub extern "C" fn insn_out_hook_proxy( - uc: uc_handle, - port: u32, - size: usize, - value: u32, - user_data: *mut InstructionOutHook, -) { - let unicorn = unsafe { &mut *(*user_data).unicorn }; - let callback = &mut unsafe { &mut *(*user_data).callback }; - assert_eq!(uc, unicorn.uc); - callback( - crate::UnicornHandle { - inner: unsafe { Pin::new_unchecked(unicorn) }, - }, - port, - size, - value, - ); -} - -pub extern "C" fn insn_sys_hook_proxy(uc: uc_handle, user_data: *mut InstructionSysHook) { - let unicorn = unsafe { &mut *(*user_data).unicorn }; - let callback = &mut unsafe { &mut *(*user_data).callback }; - assert_eq!(uc, unicorn.uc); - callback(crate::UnicornHandle { - inner: unsafe { Pin::new_unchecked(unicorn) }, - }); -} diff --git a/bindings/rust/src/lib.rs b/bindings/rust/src/lib.rs deleted file mode 100644 index 27c88402..00000000 --- a/bindings/rust/src/lib.rs +++ /dev/null @@ -1,784 +0,0 @@ -//! Bindings for the Unicorn emulator. -//! -//! -//! -//! # Example use -//! -//! ```rust -//! -//! use unicorn::RegisterARM; -//! use unicorn::unicorn_const::{Arch, Mode, Permission, SECOND_SCALE}; -//! -//! fn main() { -//! let arm_code32: Vec = vec![0x17, 0x00, 0x40, 0xe2]; // sub r0, #23 -//! -//! let mut unicorn = unicorn::Unicorn::new(Arch::ARM, Mode::LITTLE_ENDIAN).expect("failed to initialize Unicorn instance"); -//! let mut emu = unicorn.borrow(); -//! emu.mem_map(0x1000, 0x4000, Permission::ALL).expect("failed to map code page"); -//! emu.mem_write(0x1000, &arm_code32).expect("failed to write instructions"); -//! -//! emu.reg_write(RegisterARM::R0 as i32, 123).expect("failed write R0"); -//! emu.reg_write(RegisterARM::R5 as i32, 1337).expect("failed write R5"); -//! -//! let _ = emu.emu_start(0x1000, (0x1000 + arm_code32.len()) as u64, 10 * SECOND_SCALE, 1000); -//! assert_eq!(emu.reg_read(RegisterARM::R0 as i32), Ok(100)); -//! assert_eq!(emu.reg_read(RegisterARM::R5 as i32), Ok(1337)); -//! } -//! ``` -//! - -mod ffi; -pub mod unicorn_const; - -mod arm; -mod arm64; -mod m68k; -mod mips; -mod ppc; -mod sparc; -mod x86; -pub use crate::{arm::*, arm64::*, m68k::*, mips::*, ppc::*, sparc::*, x86::*}; - -use ffi::uc_handle; -use std::collections::HashMap; -use std::ffi::c_void; -use std::marker::PhantomPinned; -use std::pin::Pin; -use unicorn_const::*; - -#[derive(Debug)] -pub struct Context { - context: ffi::uc_context, -} - -impl Context { - pub fn new() -> Self { - Context { context: 0 } - } - pub fn is_initialized(&self) -> bool { - self.context != 0 - } -} - -impl Drop for Context { - fn drop(&mut self) { - unsafe { ffi::uc_context_free(self.context) }; - } -} - -#[derive(Debug)] -/// A Unicorn emulator instance. -pub struct Unicorn { - inner: Pin>, -} - -#[derive(Debug)] -/// Handle used to safely access exposed functions and data of a Unicorn instance. -pub struct UnicornHandle<'a> { - inner: Pin<&'a mut UnicornInner>, -} - -/// Internal Management struct -pub struct UnicornInner { - pub uc: uc_handle, - pub arch: Arch, - pub code_hooks: HashMap<*mut libc::c_void, Box>, - pub block_hooks: HashMap<*mut libc::c_void, Box>, - pub mem_hooks: HashMap<*mut libc::c_void, Box>, - pub intr_hooks: HashMap<*mut libc::c_void, Box>, - pub insn_in_hooks: HashMap<*mut libc::c_void, Box>, - pub insn_out_hooks: HashMap<*mut libc::c_void, Box>, - pub insn_sys_hooks: HashMap<*mut libc::c_void, Box>, - _pin: PhantomPinned, -} - -impl Unicorn { - /// Create a new instance of the unicorn engine for the specified architecture - /// and hardware mode. - pub fn new(arch: Arch, mode: Mode) -> Result { - let mut handle = std::ptr::null_mut(); - let err = unsafe { ffi::uc_open(arch, mode, &mut handle) }; - if err == uc_error::OK { - Ok(Unicorn { - inner: Box::pin(UnicornInner { - uc: handle, - arch: arch, - code_hooks: HashMap::new(), - block_hooks: HashMap::new(), - mem_hooks: HashMap::new(), - intr_hooks: HashMap::new(), - insn_in_hooks: HashMap::new(), - insn_out_hooks: HashMap::new(), - insn_sys_hooks: HashMap::new(), - _pin: std::marker::PhantomPinned, - }), - }) - } else { - Err(err) - } - } - - pub fn borrow<'a>(&'a mut self) -> UnicornHandle<'a> { - UnicornHandle { - inner: self.inner.as_mut(), - } - } -} - -impl Drop for Unicorn { - fn drop(&mut self) { - unsafe { ffi::uc_close(self.inner.uc) }; - } -} - -impl std::fmt::Debug for UnicornInner { - fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "Unicorn {{ uc: {:p} }}", self.uc) - } -} - -impl<'a> UnicornHandle<'a> { - - /// Return the architecture of the current emulator. - pub fn get_arch(&self) -> Arch { - self.inner.arch - } - - /// Returns a vector with the memory regions that are mapped in the emulator. - pub fn mem_regions(&self) -> Result, uc_error> { - let mut nb_regions: u32 = 0; - let mut p_regions: *const MemRegion = std::ptr::null_mut(); - let err = unsafe { ffi::uc_mem_regions(self.inner.uc, &mut p_regions, &mut nb_regions) }; - if err == uc_error::OK { - let mut regions = Vec::new(); - for i in 0..nb_regions { - regions.push(unsafe { std::mem::transmute_copy(&*p_regions.offset(i as isize)) }); - } - unsafe { libc::free(p_regions as _) }; - Ok(regions) - } else { - Err(err) - } - } - - /// Read a range of bytes from memory at the specified address. - pub fn mem_read(&self, address: u64, buf: &mut [u8]) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_mem_read(self.inner.uc, address, buf.as_mut_ptr(), buf.len()) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Return a range of bytes from memory at the specified address as vector. - pub fn mem_read_as_vec(&self, address: u64, size: usize) -> Result, uc_error> { - let mut buf = vec![0; size]; - let err = unsafe { ffi::uc_mem_read(self.inner.uc, address, buf.as_mut_ptr(), size) }; - if err == uc_error::OK { - Ok(buf) - } else { - Err(err) - } - } - - pub fn mem_write(&mut self, address: u64, bytes: &[u8]) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_mem_write(self.inner.uc, address, bytes.as_ptr(), bytes.len()) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Map an existing memory region in the emulator at the specified address. - /// - /// This function is marked unsafe because it is the responsibility of the caller to - /// ensure that `size` matches the size of the passed buffer, an invalid `size` value will - /// likely cause a crash in unicorn. - /// - /// `address` must be aligned to 4kb or this will return `Error::ARG`. - /// - /// `size` must be a multiple of 4kb or this will return `Error::ARG`. - /// - /// `ptr` is a pointer to the provided memory region that will be used by the emulator. - pub fn mem_map_ptr( - &mut self, - address: u64, - size: usize, - perms: Permission, - ptr: *mut c_void, - ) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_mem_map_ptr(self.inner.uc, address, size, perms.bits(), ptr) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Map a memory region in the emulator at the specified address. - /// - /// `address` must be aligned to 4kb or this will return `Error::ARG`. - /// `size` must be a multiple of 4kb or this will return `Error::ARG`. - pub fn mem_map( - &mut self, - address: u64, - size: libc::size_t, - perms: Permission, - ) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_mem_map(self.inner.uc, address, size, perms.bits()) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Unmap a memory region. - /// - /// `address` must be aligned to 4kb or this will return `Error::ARG`. - /// `size` must be a multiple of 4kb or this will return `Error::ARG`. - pub fn mem_unmap(&mut self, address: u64, size: libc::size_t) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_mem_unmap(self.inner.uc, address, size) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Set the memory permissions for an existing memory region. - /// - /// `address` must be aligned to 4kb or this will return `Error::ARG`. - /// `size` must be a multiple of 4kb or this will return `Error::ARG`. - pub fn mem_protect( - &mut self, - address: u64, - size: libc::size_t, - perms: Permission, - ) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_mem_protect(self.inner.uc, address, size, perms.bits()) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Write an unsigned value from a register. - pub fn reg_write>(&mut self, regid: T, value: u64) -> Result<(), uc_error> { - let err = - unsafe { ffi::uc_reg_write(self.inner.uc, regid.into(), &value as *const _ as _) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Write variable sized values into registers. - /// - /// The user has to make sure that the buffer length matches the register size. - /// This adds support for registers >64 bit (GDTR/IDTR, XMM, YMM, ZMM (x86); Q, V (arm64)). - pub fn reg_write_long>(&self, regid: T, value: Box<[u8]>) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_reg_write(self.inner.uc, regid.into(), value.as_ptr() as _) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Read an unsigned value from a register. - /// - /// Not to be used with registers larger than 64 bit. - pub fn reg_read>(&self, regid: T) -> Result { - let mut value: u64 = 0; - let err = - unsafe { ffi::uc_reg_read(self.inner.uc, regid.into(), &mut value as *mut u64 as _) }; - if err == uc_error::OK { - Ok(value) - } else { - Err(err) - } - } - - /// Read 128, 256 or 512 bit register value into heap allocated byte array. - /// - /// This adds safe support for registers >64 bit (GDTR/IDTR, XMM, YMM, ZMM (x86); Q, V (arm64)). - pub fn reg_read_long>(&self, regid: T) -> Result, uc_error> { - let err: uc_error; - let boxed: Box<[u8]>; - let mut value: Vec; - let curr_reg_id = regid.into(); - let curr_arch = self.get_arch(); - - if curr_arch == Arch::X86 { - if curr_reg_id >= x86::RegisterX86::XMM0 as i32 - && curr_reg_id <= x86::RegisterX86::XMM31 as i32 - { - value = vec![0; 16]; - } else if curr_reg_id >= x86::RegisterX86::YMM0 as i32 - && curr_reg_id <= x86::RegisterX86::YMM31 as i32 - { - value = vec![0; 32]; - } else if curr_reg_id >= x86::RegisterX86::ZMM0 as i32 - && curr_reg_id <= x86::RegisterX86::ZMM31 as i32 - { - value = vec![0; 64]; - } else if curr_reg_id == x86::RegisterX86::GDTR as i32 - || curr_reg_id == x86::RegisterX86::IDTR as i32 - { - value = vec![0; 10]; // 64 bit base address in IA-32e mode - } else { - return Err(uc_error::ARG); - } - } else if curr_arch == Arch::ARM64 { - if (curr_reg_id >= arm64::RegisterARM64::Q0 as i32 - && curr_reg_id <= arm64::RegisterARM64::Q31 as i32) - || (curr_reg_id >= arm64::RegisterARM64::V0 as i32 - && curr_reg_id <= arm64::RegisterARM64::V31 as i32) - { - value = vec![0; 16]; - } else { - return Err(uc_error::ARG); - } - } else { - return Err(uc_error::ARCH); - } - - err = unsafe { ffi::uc_reg_read(self.inner.uc, curr_reg_id, value.as_mut_ptr() as _) }; - - if err == uc_error::OK { - boxed = value.into_boxed_slice(); - Ok(boxed) - } else { - Err(err) - } - } - - /// Read a signed 32-bit value from a register. - pub fn reg_read_i32>(&self, regid: T) -> Result { - let mut value: i32 = 0; - let err = - unsafe { ffi::uc_reg_read(self.inner.uc, regid.into(), &mut value as *mut i32 as _) }; - if err == uc_error::OK { - Ok(value) - } else { - Err(err) - } - } - - /// Add a code hook. - pub fn add_code_hook( - &mut self, - begin: u64, - end: u64, - callback: F, - ) -> Result - where - F: FnMut(UnicornHandle, u64, u32), - { - let mut hook_ptr = std::ptr::null_mut(); - let mut user_data = Box::new(ffi::CodeHook { - unicorn: unsafe { self.inner.as_mut().get_unchecked_mut() } as _, - callback: Box::new(callback), - }); - - let err = unsafe { - ffi::uc_hook_add( - self.inner.uc, - &mut hook_ptr, - HookType::CODE, - ffi::code_hook_proxy as _, - user_data.as_mut() as *mut _ as _, - begin, - end, - ) - }; - if err == uc_error::OK { - unsafe { self.inner.as_mut().get_unchecked_mut() } - .code_hooks - .insert(hook_ptr, user_data); - Ok(hook_ptr) - } else { - Err(err) - } - } - - /// Add a block hook. - pub fn add_block_hook(&mut self, callback: F) -> Result - where - F: FnMut(UnicornHandle, u64, u32), - { - let mut hook_ptr = std::ptr::null_mut(); - let mut user_data = Box::new(ffi::BlockHook { - unicorn: unsafe { self.inner.as_mut().get_unchecked_mut() } as _, - callback: Box::new(callback), - }); - - let err = unsafe { - ffi::uc_hook_add( - self.inner.uc, - &mut hook_ptr, - HookType::BLOCK, - ffi::block_hook_proxy as _, - user_data.as_mut() as *mut _ as _, - 1, - 0, - ) - }; - if err == uc_error::OK { - unsafe { self.inner.as_mut().get_unchecked_mut() } - .block_hooks - .insert(hook_ptr, user_data); - Ok(hook_ptr) - } else { - Err(err) - } - } - - /// Add a memory hook. - pub fn add_mem_hook( - &mut self, - hook_type: HookType, - begin: u64, - end: u64, - callback: F, - ) -> Result - where - F: FnMut(UnicornHandle, MemType, u64, usize, i64), - { - if !(HookType::MEM_ALL | HookType::MEM_READ_AFTER).contains(hook_type) { - return Err(uc_error::ARG); - } - - let mut hook_ptr = std::ptr::null_mut(); - let mut user_data = Box::new(ffi::MemHook { - unicorn: unsafe { self.inner.as_mut().get_unchecked_mut() } as _, - callback: Box::new(callback), - }); - - let err = unsafe { - ffi::uc_hook_add( - self.inner.uc, - &mut hook_ptr, - hook_type, - ffi::mem_hook_proxy as _, - user_data.as_mut() as *mut _ as _, - begin, - end, - ) - }; - if err == uc_error::OK { - unsafe { self.inner.as_mut().get_unchecked_mut() } - .mem_hooks - .insert(hook_ptr, user_data); - Ok(hook_ptr) - } else { - Err(err) - } - } - - /// Add an interrupt hook. - pub fn add_intr_hook(&mut self, callback: F) -> Result - where - F: FnMut(UnicornHandle, u32), - { - let mut hook_ptr = std::ptr::null_mut(); - let mut user_data = Box::new(ffi::InterruptHook { - unicorn: unsafe { self.inner.as_mut().get_unchecked_mut() } as _, - callback: Box::new(callback), - }); - - let err = unsafe { - ffi::uc_hook_add( - self.inner.uc, - &mut hook_ptr, - HookType::INTR, - ffi::intr_hook_proxy as _, - user_data.as_mut() as *mut _ as _, - 0, - 0, - ) - }; - if err == uc_error::OK { - unsafe { self.inner.as_mut().get_unchecked_mut() } - .intr_hooks - .insert(hook_ptr, user_data); - Ok(hook_ptr) - } else { - Err(err) - } - } - - /// Add hook for x86 IN instruction. - pub fn add_insn_in_hook(&mut self, callback: F) -> Result - where - F: FnMut(UnicornHandle, u32, usize), - { - let mut hook_ptr = std::ptr::null_mut(); - let mut user_data = Box::new(ffi::InstructionInHook { - unicorn: unsafe { self.inner.as_mut().get_unchecked_mut() } as _, - callback: Box::new(callback), - }); - - let err = unsafe { - ffi::uc_hook_add( - self.inner.uc, - &mut hook_ptr, - HookType::INSN, - ffi::insn_in_hook_proxy as _, - user_data.as_mut() as *mut _ as _, - 0, - 0, - x86::InsnX86::IN, - ) - }; - if err == uc_error::OK { - unsafe { self.inner.as_mut().get_unchecked_mut() } - .insn_in_hooks - .insert(hook_ptr, user_data); - Ok(hook_ptr) - } else { - Err(err) - } - } - - /// Add hook for x86 OUT instruction. - pub fn add_insn_out_hook(&mut self, callback: F) -> Result - where - F: FnMut(UnicornHandle, u32, usize, u32), - { - let mut hook_ptr = std::ptr::null_mut(); - let mut user_data = Box::new(ffi::InstructionOutHook { - unicorn: unsafe { self.inner.as_mut().get_unchecked_mut() } as _, - callback: Box::new(callback), - }); - - let err = unsafe { - ffi::uc_hook_add( - self.inner.uc, - &mut hook_ptr, - HookType::INSN, - ffi::insn_out_hook_proxy as _, - user_data.as_mut() as *mut _ as _, - 0, - 0, - x86::InsnX86::OUT, - ) - }; - if err == uc_error::OK { - unsafe { self.inner.as_mut().get_unchecked_mut() } - .insn_out_hooks - .insert(hook_ptr, user_data); - Ok(hook_ptr) - } else { - Err(err) - } - } - - /// Add hook for x86 SYSCALL or SYSENTER. - pub fn add_insn_sys_hook( - &mut self, - insn_type: x86::InsnSysX86, - begin: u64, - end: u64, - callback: F, - ) -> Result - where - F: FnMut(UnicornHandle), - { - let mut hook_ptr = std::ptr::null_mut(); - let mut user_data = Box::new(ffi::InstructionSysHook { - unicorn: unsafe { self.inner.as_mut().get_unchecked_mut() } as _, - callback: Box::new(callback), - }); - - let err = unsafe { - ffi::uc_hook_add( - self.inner.uc, - &mut hook_ptr, - HookType::INSN, - ffi::insn_sys_hook_proxy as _, - user_data.as_mut() as *mut _ as _, - begin, - end, - insn_type, - ) - }; - if err == uc_error::OK { - unsafe { self.inner.as_mut().get_unchecked_mut() } - .insn_sys_hooks - .insert(hook_ptr, user_data); - Ok(hook_ptr) - } else { - Err(err) - } - } - - /// Remove a hook. - /// - /// `hook` is the value returned by `add_*_hook` functions. - pub fn remove_hook(&mut self, hook: ffi::uc_hook) -> Result<(), uc_error> { - let handle = unsafe { self.inner.as_mut().get_unchecked_mut() }; - let err: uc_error; - let mut in_one_hashmap = false; - - if handle.code_hooks.contains_key(&hook) { - in_one_hashmap = true; - handle.code_hooks.remove(&hook); - } - - if handle.mem_hooks.contains_key(&hook) { - in_one_hashmap = true; - handle.mem_hooks.remove(&hook); - } - - if handle.block_hooks.contains_key(&hook) { - in_one_hashmap = true; - handle.block_hooks.remove(&hook); - } - - if handle.intr_hooks.contains_key(&hook) { - in_one_hashmap = true; - handle.intr_hooks.remove(&hook); - } - - if handle.insn_in_hooks.contains_key(&hook) { - in_one_hashmap = true; - handle.insn_in_hooks.remove(&hook); - } - - if handle.insn_out_hooks.contains_key(&hook) { - in_one_hashmap = true; - handle.insn_out_hooks.remove(&hook); - } - - if handle.insn_sys_hooks.contains_key(&hook) { - in_one_hashmap = true; - handle.insn_sys_hooks.remove(&hook); - } - - if in_one_hashmap { - err = unsafe { ffi::uc_hook_del(handle.uc, hook) }; - } else { - err = uc_error::HOOK; - } - - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Allocate and return an empty Unicorn context. - /// - /// To be populated via context_save. - pub fn context_alloc(&self) -> Result { - let mut empty_context: ffi::uc_context = Default::default(); - let err = unsafe { ffi::uc_context_alloc(self.inner.uc, &mut empty_context) }; - if err == uc_error::OK { - Ok(Context { - context: empty_context, - }) - } else { - Err(err) - } - } - - /// Save current Unicorn context to previously allocated Context struct. - pub fn context_save(&self, context: &mut Context) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_context_save(self.inner.uc, context.context) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Allocate and return a Context struct initialized with the current CPU context. - /// - /// This can be used for fast rollbacks with context_restore. - /// In case of many non-concurrent context saves, use context_alloc and *_save - /// individually to avoid unnecessary allocations. - pub fn context_init(&self) -> Result { - let mut new_context: ffi::uc_context = Default::default(); - let err = unsafe { ffi::uc_context_alloc(self.inner.uc, &mut new_context) }; - if err != uc_error::OK { - return Err(err); - } - let err = unsafe { ffi::uc_context_save(self.inner.uc, new_context) }; - if err == uc_error::OK { - Ok(Context { - context: new_context, - }) - } else { - unsafe { ffi::uc_context_free(new_context) }; - Err(err) - } - } - - /// Restore a previously saved Unicorn context. - /// - /// Perform a quick rollback of the CPU context, including registers and some - /// internal metadata. Contexts may not be shared across engine instances with - /// differing arches or modes. Memory has to be restored manually, if needed. - pub fn context_restore(&self, context: &Context) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_context_restore(self.inner.uc, context.context) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Emulate machine code for a specified duration. - /// - /// `begin` is the address where to start the emulation. The emulation stops if `until` - /// is hit. `timeout` specifies a duration in microseconds after which the emulation is - /// stopped (infinite execution if set to 0). `count` is the maximum number of instructions - /// to emulate (emulate all the available instructions if set to 0). - pub fn emu_start( - &mut self, - begin: u64, - until: u64, - timeout: u64, - count: usize, - ) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_emu_start(self.inner.uc, begin, until, timeout, count as _) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Stop the emulation. - /// - /// This is usually called from callback function in hooks. - /// NOTE: For now, this will stop the execution only after the current block. - pub fn emu_stop(&mut self) -> Result<(), uc_error> { - let err = unsafe { ffi::uc_emu_stop(self.inner.uc) }; - if err == uc_error::OK { - Ok(()) - } else { - Err(err) - } - } - - /// Query the internal status of the engine. - /// - /// supported: MODE, PAGE_SIZE, ARCH - pub fn query(&self, query: Query) -> Result { - let mut result: libc::size_t = Default::default(); - let err = unsafe { ffi::uc_query(self.inner.uc, query, &mut result) }; - if err == uc_error::OK { - Ok(result) - } else { - Err(err) - } - } -} diff --git a/bindings/rust/src/m68k.rs b/bindings/rust/src/m68k.rs deleted file mode 100644 index 6c04e851..00000000 --- a/bindings/rust/src/m68k.rs +++ /dev/null @@ -1,24 +0,0 @@ -// M68K registers -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum RegisterM68K { - INVALID = 0, - A0, - A1, - A2, - A3, - A4, - A5, - A6, - A7, - D0, - D1, - D2, - D3, - D4, - D5, - D6, - D7, - SR, - PC, -} diff --git a/bindings/rust/src/mips.rs b/bindings/rust/src/mips.rs deleted file mode 100644 index a1b590c6..00000000 --- a/bindings/rust/src/mips.rs +++ /dev/null @@ -1,246 +0,0 @@ -#![allow(non_camel_case_types)] - -// MIPS registers -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum RegisterMIPS { - INVALID = 0, - - // General purpose registers - PC = 1, - GPR0 = 2, - GPR1 = 3, - GPR2 = 4, - GPR3 = 5, - GPR4 = 6, - GPR5 = 7, - GPR6 = 8, - GPR7 = 9, - GPR8 = 10, - GPR9 = 11, - GPR10 = 12, - GPR11 = 13, - GPR12 = 14, - GPR13 = 15, - GPR14 = 16, - GPR15 = 17, - GPR16 = 18, - GPR17 = 19, - GPR18 = 20, - GPR19 = 21, - GPR20 = 22, - GPR21 = 23, - GPR22 = 24, - GPR23 = 25, - GPR24 = 26, - GPR25 = 27, - GPR26 = 28, - GPR27 = 29, - GPR28 = 30, - GPR29 = 31, - GPR30 = 32, - GPR31 = 33, - - // DSP registers - DSPCCOND = 34, - DSPCARRY = 35, - DSPEFI = 36, - DSPOUTFLAG = 37, - DSPOUTFLAG16_19 = 38, - DSPOUTFLAG20 = 39, - DSPOUTFLAG21 = 40, - DSPOUTFLAG22 = 41, - DSPOUTFLAG23 = 42, - DSPPOS = 43, - DSPSCOUNT = 44, - - // ACC registers - AC0 = 45, - AC1 = 46, - AC2 = 47, - AC3 = 48, - - // COP registers - CC0 = 49, - CC1 = 50, - CC2 = 51, - CC3 = 52, - CC4 = 53, - CC5 = 54, - CC6 = 55, - CC7 = 56, - - // FPU registers - F0 = 57, - F1 = 58, - F2 = 59, - F3 = 60, - F4 = 61, - F5 = 62, - F6 = 63, - F7 = 64, - F8 = 65, - F9 = 66, - F10 = 67, - F11 = 68, - F12 = 69, - F13 = 70, - F14 = 71, - F15 = 72, - F16 = 73, - F17 = 74, - F18 = 75, - F19 = 76, - F20 = 77, - F21 = 78, - F22 = 79, - F23 = 80, - F24 = 81, - F25 = 82, - F26 = 83, - F27 = 84, - F28 = 85, - F29 = 86, - F30 = 87, - F31 = 88, - FCC0 = 89, - FCC1 = 90, - FCC2 = 91, - FCC3 = 92, - FCC4 = 93, - FCC5 = 94, - FCC6 = 95, - FCC7 = 96, - - // AFPR128 - W0 = 97, - W1 = 98, - W2 = 99, - W3 = 100, - W4 = 101, - W5 = 102, - W6 = 103, - W7 = 104, - W8 = 105, - W9 = 106, - W10 = 107, - W11 = 108, - W12 = 109, - W13 = 110, - W14 = 111, - W15 = 112, - W16 = 113, - W17 = 114, - W18 = 115, - W19 = 116, - W20 = 117, - W21 = 118, - W22 = 119, - W23 = 120, - W24 = 121, - W25 = 122, - W26 = 123, - W27 = 124, - W28 = 125, - W29 = 126, - W30 = 127, - W31 = 128, - HI = 129, - LO = 130, - P0 = 131, - P1 = 132, - P2 = 133, - MPL0 = 134, - MPL1 = 135, - MPL2 = 136, - CP0_CONFIG3 = 137, - CP0_USERLOCAL = 138, - ENDING = 139, - - // alias registers - // (assoc) ZERO = 2, - // (assoc) AT = 3, - // (assoc) V0 = 4, - // (assoc) V1 = 5, - // (assoc) A0 = 6, - // (assoc) A1 = 7, - // (assoc) A2 = 8, - // (assoc) A3 = 9, - // (assoc) T0 = 10, - // (assoc) T1 = 11, - // (assoc) T2 = 12, - // (assoc) T3 = 13, - // (assoc) T4 = 14, - // (assoc) T5 = 15, - // (assoc) T6 = 16, - // (assoc) T7 = 17, - // (assoc) S0 = 18, - // (assoc) S1 = 19, - // (assoc) S2 = 20, - // (assoc) S3 = 21, - // (assoc) S4 = 22, - // (assoc) S5 = 23, - // (assoc) S6 = 24, - // (assoc) S7 = 25, - // (assoc) T8 = 26, - // (assoc) T9 = 27, - // (assoc) K0 = 28, - // (assoc) K1 = 29, - // (assoc) GP = 30, - // (assoc) SP = 31, - // (assoc) FP = 32, - // (assoc) S8 = 32, - // (assoc) RA = 33, - // (assoc) HI0 = 45, - // (assoc) HI1 = 46, - // (assoc) HI2 = 47, - // (assoc) HI3 = 48, - // (assoc) LO0 = 45, - // (assoc) LO1 = 46, - // (assoc) LO2 = 47, - // (assoc) LO3 = 48, -} - -impl RegisterMIPS { - pub const ZERO: RegisterMIPS = RegisterMIPS::GPR0; - pub const AT: RegisterMIPS = RegisterMIPS::GPR1; - pub const V0: RegisterMIPS = RegisterMIPS::GPR2; - pub const V1: RegisterMIPS = RegisterMIPS::GPR3; - pub const A0: RegisterMIPS = RegisterMIPS::GPR4; - pub const A1: RegisterMIPS = RegisterMIPS::GPR5; - pub const A2: RegisterMIPS = RegisterMIPS::GPR6; - pub const A3: RegisterMIPS = RegisterMIPS::GPR7; - pub const T0: RegisterMIPS = RegisterMIPS::GPR8; - pub const T1: RegisterMIPS = RegisterMIPS::GPR9; - pub const T2: RegisterMIPS = RegisterMIPS::GPR10; - pub const T3: RegisterMIPS = RegisterMIPS::GPR11; - pub const T4: RegisterMIPS = RegisterMIPS::GPR12; - pub const T5: RegisterMIPS = RegisterMIPS::GPR13; - pub const T6: RegisterMIPS = RegisterMIPS::GPR14; - pub const T7: RegisterMIPS = RegisterMIPS::GPR15; - pub const S0: RegisterMIPS = RegisterMIPS::GPR16; - pub const S1: RegisterMIPS = RegisterMIPS::GPR17; - pub const S2: RegisterMIPS = RegisterMIPS::GPR18; - pub const S3: RegisterMIPS = RegisterMIPS::GPR19; - pub const S4: RegisterMIPS = RegisterMIPS::GPR20; - pub const S5: RegisterMIPS = RegisterMIPS::GPR21; - pub const S6: RegisterMIPS = RegisterMIPS::GPR22; - pub const S7: RegisterMIPS = RegisterMIPS::GPR23; - pub const T8: RegisterMIPS = RegisterMIPS::GPR24; - pub const T9: RegisterMIPS = RegisterMIPS::GPR25; - pub const K0: RegisterMIPS = RegisterMIPS::GPR26; - pub const K1: RegisterMIPS = RegisterMIPS::GPR27; - pub const GP: RegisterMIPS = RegisterMIPS::GPR28; - pub const SP: RegisterMIPS = RegisterMIPS::GPR29; - pub const FP: RegisterMIPS = RegisterMIPS::GPR30; - pub const S8: RegisterMIPS = RegisterMIPS::GPR30; - pub const RA: RegisterMIPS = RegisterMIPS::GPR31; - pub const HI0: RegisterMIPS = RegisterMIPS::AC0; - pub const HI1: RegisterMIPS = RegisterMIPS::AC1; - pub const HI2: RegisterMIPS = RegisterMIPS::AC2; - pub const HI3: RegisterMIPS = RegisterMIPS::AC3; - pub const LO0: RegisterMIPS = RegisterMIPS::AC0; - pub const LO1: RegisterMIPS = RegisterMIPS::AC1; - pub const LO2: RegisterMIPS = RegisterMIPS::AC2; - pub const LO3: RegisterMIPS = RegisterMIPS::AC3; -} diff --git a/bindings/rust/src/ppc.rs b/bindings/rust/src/ppc.rs deleted file mode 100644 index a28827d3..00000000 --- a/bindings/rust/src/ppc.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![allow(non_camel_case_types)] -// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT - -// PowerPC registers -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum RegisterPPC { - INVALID = 0, - PC = 1, - GPR0 = 2, - GPR1 = 3, - GPR2 = 4, - GPR3 = 5, - GPR4 = 6, - GPR5 = 7, - GPR6 = 8, - GPR7 = 9, - GPR8 = 10, - GPR9 = 11, - GPR10 = 12, - GPR11 = 13, - GPR12 = 14, - GPR13 = 15, - GPR14 = 16, - GPR15 = 17, - GPR16 = 18, - GPR17 = 19, - GPR18 = 20, - GPR19 = 21, - GPR20 = 22, - GPR21 = 23, - GPR22 = 24, - GPR23 = 25, - GPR24 = 26, - GPR25 = 27, - GPR26 = 28, - GPR27 = 29, - GPR28 = 30, - GPR29 = 31, - GPR30 = 32, - GPR31 = 33, -} diff --git a/bindings/rust/src/sparc.rs b/bindings/rust/src/sparc.rs deleted file mode 100644 index 21e09db4..00000000 --- a/bindings/rust/src/sparc.rs +++ /dev/null @@ -1,94 +0,0 @@ -// SPARC registers -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum RegisterSPARC { - INVALID = 0, - F0 = 1, - F1 = 2, - F2 = 3, - F3 = 4, - F4 = 5, - F5 = 6, - F6 = 7, - F7 = 8, - F8 = 9, - F9 = 10, - F10 = 11, - F11 = 12, - F12 = 13, - F13 = 14, - F14 = 15, - F15 = 16, - F16 = 17, - F17 = 18, - F18 = 19, - F19 = 20, - F20 = 21, - F21 = 22, - F22 = 23, - F23 = 24, - F24 = 25, - F25 = 26, - F26 = 27, - F27 = 28, - F28 = 29, - F29 = 30, - F30 = 31, - F31 = 32, - F32 = 33, - F34 = 34, - F36 = 35, - F38 = 36, - F40 = 37, - F42 = 38, - F44 = 39, - F46 = 40, - F48 = 41, - F50 = 42, - F52 = 43, - F54 = 44, - F56 = 45, - F58 = 46, - F60 = 47, - F62 = 48, - FCC0 = 49, - FCC1 = 50, - FCC2 = 51, - FCC3 = 52, - G0 = 53, - G1 = 54, - G2 = 55, - G3 = 56, - G4 = 57, - G5 = 58, - G6 = 59, - G7 = 60, - I0 = 61, - I1 = 62, - I2 = 63, - I3 = 64, - I4 = 65, - I5 = 66, - FP = 67, - I7 = 68, - ICC = 69, - L0 = 70, - L1 = 71, - L2 = 72, - L3 = 73, - L4 = 74, - L5 = 75, - L6 = 76, - L7 = 77, - O0 = 78, - O1 = 79, - O2 = 80, - O3 = 81, - O4 = 82, - O5 = 83, - SP = 84, - O7 = 85, - Y = 86, - XCC = 87, - PC = 88, -} diff --git a/bindings/rust/src/unicorn_const.rs b/bindings/rust/src/unicorn_const.rs deleted file mode 100644 index 56b3f275..00000000 --- a/bindings/rust/src/unicorn_const.rs +++ /dev/null @@ -1,158 +0,0 @@ -#![allow(non_camel_case_types)] -use bitflags::bitflags; - -pub const API_MAJOR: u64 = 1; -pub const API_MINOR: u64 = 0; -pub const VERSION_MAJOR: u64 = 1; -pub const VERSION_MINOR: u64 = 0; -pub const VERSION_EXTRA: u64 = 2; -pub const SECOND_SCALE: u64 = 1_000_000; -pub const MILISECOND_SCALE: u64 = 1_000; - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum uc_error { - OK = 0, - NOMEM = 1, - ARCH = 2, - HANDLE = 3, - MODE = 4, - VERSION = 5, - READ_UNMAPPED = 6, - WRITE_UNMAPPED = 7, - FETCH_UNMAPPED = 8, - HOOK = 9, - INSN_INVALID = 10, - MAP = 11, - WRITE_PROT = 12, - READ_PROT = 13, - FETCH_PROT = 14, - ARG = 15, - READ_UNALIGNED = 16, - WRITE_UNALIGNED = 17, - FETCH_UNALIGNED = 18, - HOOK_EXIST = 19, - RESOURCE = 20, - EXCEPTION = 21, -} - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum MemType { - READ = 16, - WRITE = 17, - FETCH = 18, - READ_UNMAPPED = 19, - WRITE_UNMAPPED = 20, - FETCH_UNMAPPED = 21, - WRITE_PROT = 22, - READ_PROT = 23, - FETCH_PROT = 24, - READ_AFTER = 25, -} - -bitflags! { - #[repr(C)] - pub struct HookType: i32 { - const INTR = 1; - const INSN = 2; - const CODE = 4; - const BLOCK = 8; - - const MEM_READ_UNMAPPED = 0x10; - const MEM_WRITE_UNMAPPED = 0x20; - const MEM_FETCH_UNMAPPED = 0x40; - const MEM_UNMAPPED = Self::MEM_READ_UNMAPPED.bits | Self::MEM_WRITE_UNMAPPED.bits | Self::MEM_FETCH_UNMAPPED.bits; - - const MEM_READ_PROT = 0x80; - const MEM_WRITE_PROT = 0x100; - const MEM_FETCH_PROT = 0x200; - const MEM_PROT = Self::MEM_READ_PROT.bits | Self::MEM_WRITE_PROT.bits | Self::MEM_FETCH_PROT.bits; - - const MEM_READ = 0x400; - const MEM_WRITE = 0x800; - const MEM_FETCH = 0x1000; - const MEM_VALID = Self::MEM_READ.bits | Self::MEM_WRITE.bits | Self::MEM_FETCH.bits; - - const MEM_READ_AFTER = 0x2000; - - const INSN_INVALID = 0x4000; - - const MEM_READ_INVALID = Self::MEM_READ_UNMAPPED.bits | Self::MEM_READ_PROT.bits; - const MEM_WRITE_INVALID = Self::MEM_WRITE_UNMAPPED.bits | Self::MEM_WRITE_PROT.bits; - const MEM_FETCH_INVALID = Self::MEM_FETCH_UNMAPPED.bits | Self::MEM_FETCH_PROT.bits; - const MEM_INVALID = Self::MEM_READ_INVALID.bits | Self::MEM_WRITE_INVALID.bits | Self::MEM_FETCH_INVALID.bits; - - const MEM_ALL = Self::MEM_VALID.bits | Self::MEM_INVALID.bits; - } -} - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum Query { - MODE = 1, - PAGE_SIZE = 2, - ARCH = 3, -} - -bitflags! { -#[repr(C)] -pub struct Permission : u32 { - const NONE = 0; - const READ = 1; - const WRITE = 2; - const EXEC = 4; - const ALL = Self::READ.bits | Self::WRITE.bits | Self::EXEC.bits; - } -} - -#[repr(C)] -#[derive(Debug, Clone)] -pub struct MemRegion { - pub begin: u64, - pub end: u64, - pub perms: Permission, -} - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum Arch { - ARM = 1, - ARM64 = 2, - MIPS = 3, - X86 = 4, - PPC = 5, - SPARC = 6, - M68K = 7, - MAX = 8, -} - -bitflags! { - #[repr(C)] - pub struct Mode: i32 { - const LITTLE_ENDIAN = 0; - const BIG_ENDIAN = 0x4000_0000; - - const ARM = 0; - const THUMB = 0x10; - const MCLASS = 0x20; - const V8 = 0x40; - const ARM926 = 0x80; - const ARM946 = 0x100; - const ARM1176 = 0x200; - const MICRO = Self::THUMB.bits; - const MIPS3 = Self::MCLASS.bits; - const MIPS32R6 = Self::V8.bits; - const MIPS32 = 4; - const MIPS64 = 8; - const MODE_16 = 2; - const MODE_32 = Self::MIPS32.bits; - const MODE_64 = Self::MIPS64.bits; - const PPC32 = Self::MIPS32.bits; - const PPC64 = Self::MIPS64.bits; - const QPX = Self::THUMB.bits; - const SPARC32 = Self::MIPS32.bits; - const SPARC64 = Self::MIPS64.bits; - const V9 = Self::THUMB.bits; - } -} diff --git a/bindings/rust/src/x86.rs b/bindings/rust/src/x86.rs deleted file mode 100644 index 03c92176..00000000 --- a/bindings/rust/src/x86.rs +++ /dev/null @@ -1,281 +0,0 @@ -// X86 registers -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum RegisterX86 { - INVALID = 0, - AH, - AL, - AX, - BH, - BL, - BP, - BPL, - BX, - CH, - CL, - CS, - CX, - DH, - DI, - DIL, - DL, - DS, - DX, - EAX, - EBP, - EBX, - ECX, - EDI, - EDX, - EFLAGS, - EIP, - EIZ, - ES, - ESI, - ESP, - FPSW, - FS, - GS, - IP, - RAX, - RBP, - RBX, - RCX, - RDI, - RDX, - RIP, - RIZ, - RSI, - RSP, - SI, - SIL, - SP, - SPL, - SS, - CR0, - CR1, - CR2, - CR3, - CR4, - CR5, - CR6, - CR7, - CR8, - CR9, - CR10, - CR11, - CR12, - CR13, - CR14, - CR15, - DR0, - DR1, - DR2, - DR3, - DR4, - DR5, - DR6, - DR7, - DR8, - DR9, - DR10, - DR11, - DR12, - DR13, - DR14, - DR15, - FP0, - FP1, - FP2, - FP3, - FP4, - FP5, - FP6, - FP7, - K0, - K1, - K2, - K3, - K4, - K5, - K6, - K7, - MM0, - MM1, - MM2, - MM3, - MM4, - MM5, - MM6, - MM7, - R8, - R9, - R10, - R11, - R12, - R13, - R14, - R15, - ST0, - ST1, - ST2, - ST3, - ST4, - ST5, - ST6, - ST7, - XMM0, - XMM1, - XMM2, - XMM3, - XMM4, - XMM5, - XMM6, - XMM7, - XMM8, - XMM9, - XMM10, - XMM11, - XMM12, - XMM13, - XMM14, - XMM15, - XMM16, - XMM17, - XMM18, - XMM19, - XMM20, - XMM21, - XMM22, - XMM23, - XMM24, - XMM25, - XMM26, - XMM27, - XMM28, - XMM29, - XMM30, - XMM31, - YMM0, - YMM1, - YMM2, - YMM3, - YMM4, - YMM5, - YMM6, - YMM7, - YMM8, - YMM9, - YMM10, - YMM11, - YMM12, - YMM13, - YMM14, - YMM15, - YMM16, - YMM17, - YMM18, - YMM19, - YMM20, - YMM21, - YMM22, - YMM23, - YMM24, - YMM25, - YMM26, - YMM27, - YMM28, - YMM29, - YMM30, - YMM31, - ZMM0, - ZMM1, - ZMM2, - ZMM3, - ZMM4, - ZMM5, - ZMM6, - ZMM7, - ZMM8, - ZMM9, - ZMM10, - ZMM11, - ZMM12, - ZMM13, - ZMM14, - ZMM15, - ZMM16, - ZMM17, - ZMM18, - ZMM19, - ZMM20, - ZMM21, - ZMM22, - ZMM23, - ZMM24, - ZMM25, - ZMM26, - ZMM27, - ZMM28, - ZMM29, - ZMM30, - ZMM31, - R8B, - R9B, - R10B, - R11B, - R12B, - R13B, - R14B, - R15B, - R8D, - R9D, - R10D, - R11D, - R12D, - R13D, - R14D, - R15D, - R8W, - R9W, - R10W, - R11W, - R12W, - R13W, - R14W, - R15W, - IDTR, - GDTR, - LDTR, - TR, - FPCW, - FPTAG, - MSR, - MXCSR, -} - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum InsnX86 { - IN = 218, - OUT = 500, - SYSCALL = 699, - SYSENTER = 700, - RET = 151, -} - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub enum InsnSysX86 { - SYSCALL = InsnX86::SYSCALL as isize, - SYSENTER = InsnX86::SYSENTER as isize, -} - -#[repr(C)] -#[derive(PartialEq, Debug, Clone, Copy)] -pub struct X86Mmr { - pub selector: u64, - pub base: u64, - pub limit: u32, - pub flags: u32, -} diff --git a/bindings/rust/tests/unicorn.rs b/bindings/rust/tests/unicorn.rs deleted file mode 100644 index 203d3251..00000000 --- a/bindings/rust/tests/unicorn.rs +++ /dev/null @@ -1,683 +0,0 @@ -#![deny(rust_2018_idioms)] - -use std::cell::RefCell; -use std::rc::Rc; -use unicorn::unicorn_const::{uc_error, Arch, HookType, MemType, Mode, Permission, SECOND_SCALE}; -use unicorn::{InsnSysX86, RegisterARM, RegisterMIPS, RegisterPPC, RegisterX86}; - -pub static X86_REGISTERS: [RegisterX86; 145] = [ - RegisterX86::AH, - RegisterX86::AL, - RegisterX86::AX, - RegisterX86::BH, - RegisterX86::BL, - RegisterX86::BP, - RegisterX86::BPL, - RegisterX86::BX, - RegisterX86::CH, - RegisterX86::CL, - RegisterX86::CS, - RegisterX86::CX, - RegisterX86::DH, - RegisterX86::DI, - RegisterX86::DIL, - RegisterX86::DL, - RegisterX86::DS, - RegisterX86::DX, - RegisterX86::EAX, - RegisterX86::EBP, - RegisterX86::EBX, - RegisterX86::ECX, - RegisterX86::EDI, - RegisterX86::EDX, - RegisterX86::EFLAGS, - RegisterX86::EIP, - RegisterX86::EIZ, - RegisterX86::ES, - RegisterX86::ESI, - RegisterX86::ESP, - RegisterX86::FPSW, - RegisterX86::FS, - RegisterX86::GS, - RegisterX86::IP, - RegisterX86::RAX, - RegisterX86::RBP, - RegisterX86::RBX, - RegisterX86::RCX, - RegisterX86::RDI, - RegisterX86::RDX, - RegisterX86::RIP, - RegisterX86::RIZ, - RegisterX86::RSI, - RegisterX86::RSP, - RegisterX86::SI, - RegisterX86::SIL, - RegisterX86::SP, - RegisterX86::SPL, - RegisterX86::SS, - RegisterX86::CR0, - RegisterX86::CR1, - RegisterX86::CR2, - RegisterX86::CR3, - RegisterX86::CR4, - RegisterX86::CR5, - RegisterX86::CR6, - RegisterX86::CR7, - RegisterX86::CR8, - RegisterX86::CR9, - RegisterX86::CR10, - RegisterX86::CR11, - RegisterX86::CR12, - RegisterX86::CR13, - RegisterX86::CR14, - RegisterX86::CR15, - RegisterX86::DR0, - RegisterX86::DR1, - RegisterX86::DR2, - RegisterX86::DR3, - RegisterX86::DR4, - RegisterX86::DR5, - RegisterX86::DR6, - RegisterX86::DR7, - RegisterX86::DR8, - RegisterX86::DR9, - RegisterX86::DR10, - RegisterX86::DR11, - RegisterX86::DR12, - RegisterX86::DR13, - RegisterX86::DR14, - RegisterX86::DR15, - RegisterX86::FP0, - RegisterX86::FP1, - RegisterX86::FP2, - RegisterX86::FP3, - RegisterX86::FP4, - RegisterX86::FP5, - RegisterX86::FP6, - RegisterX86::FP7, - RegisterX86::K0, - RegisterX86::K1, - RegisterX86::K2, - RegisterX86::K3, - RegisterX86::K4, - RegisterX86::K5, - RegisterX86::K6, - RegisterX86::K7, - RegisterX86::MM0, - RegisterX86::MM1, - RegisterX86::MM2, - RegisterX86::MM3, - RegisterX86::MM4, - RegisterX86::MM5, - RegisterX86::MM6, - RegisterX86::MM7, - RegisterX86::R8, - RegisterX86::R9, - RegisterX86::R10, - RegisterX86::R11, - RegisterX86::R12, - RegisterX86::R13, - RegisterX86::R14, - RegisterX86::R15, - RegisterX86::ST0, - RegisterX86::ST1, - RegisterX86::ST2, - RegisterX86::ST3, - RegisterX86::ST4, - RegisterX86::ST5, - RegisterX86::ST6, - RegisterX86::ST7, - RegisterX86::R8B, - RegisterX86::R9B, - RegisterX86::R10B, - RegisterX86::R11B, - RegisterX86::R12B, - RegisterX86::R13B, - RegisterX86::R14B, - RegisterX86::R15B, - RegisterX86::R8D, - RegisterX86::R9D, - RegisterX86::R10D, - RegisterX86::R11D, - RegisterX86::R12D, - RegisterX86::R13D, - RegisterX86::R14D, - RegisterX86::R15D, - RegisterX86::R8W, - RegisterX86::R9W, - RegisterX86::R10W, - RegisterX86::R11W, - RegisterX86::R12W, - RegisterX86::R13W, - RegisterX86::R14W, - RegisterX86::R15W, -]; - -type Unicorn<'a> = unicorn::UnicornHandle<'a>; - -#[test] -fn emulate_x86() { - let x86_code32: Vec = vec![0x41, 0x4a]; // INC ecx; DEC edx - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.reg_write(RegisterX86::EAX as i32, 123), Ok(())); - assert_eq!(emu.reg_read(RegisterX86::EAX as i32), Ok(123)); - - // Attempt to write to memory before mapping it. - assert_eq!( - emu.mem_write(0x1000, &x86_code32), - (Err(uc_error::WRITE_UNMAPPED)) - ); - - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - assert_eq!( - emu.mem_read_as_vec(0x1000, x86_code32.len()), - Ok(x86_code32.clone()) - ); - - assert_eq!(emu.reg_write(RegisterX86::ECX as i32, 10), Ok(())); - assert_eq!(emu.reg_write(RegisterX86::EDX as i32, 50), Ok(())); - - assert_eq!( - emu.emu_start( - 0x1000, - (0x1000 + x86_code32.len()) as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(emu.reg_read(RegisterX86::ECX as i32), Ok(11)); - assert_eq!(emu.reg_read(RegisterX86::EDX as i32), Ok(49)); -} - -#[test] -fn x86_code_callback() { - #[derive(PartialEq, Debug)] - struct CodeExpectation(u64, u32); - let expects = vec![CodeExpectation(0x1000, 1), CodeExpectation(0x1001, 1)]; - let codes: Vec = Vec::new(); - let codes_cell = Rc::new(RefCell::new(codes)); - - let callback_codes = codes_cell.clone(); - let callback = move |_: Unicorn<'_>, address: u64, size: u32| { - let mut codes = callback_codes.borrow_mut(); - codes.push(CodeExpectation(address, size)); - }; - - let x86_code32: Vec = vec![0x41, 0x4a]; // INC ecx; DEC edx - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - - let hook = emu - .add_code_hook(0x1000, 0x2000, callback) - .expect("failed to add code hook"); - assert_eq!( - emu.emu_start(0x1000, 0x1002, 10 * SECOND_SCALE, 1000), - Ok(()) - ); - assert_eq!(expects, *codes_cell.borrow()); - assert_eq!(emu.remove_hook(hook), Ok(())); -} - -#[test] -fn x86_intr_callback() { - #[derive(PartialEq, Debug)] - struct IntrExpectation(u32); - let expect = IntrExpectation(0x80); - let intr_cell = Rc::new(RefCell::new(IntrExpectation(0))); - - let callback_intr = intr_cell.clone(); - let callback = move |_: Unicorn<'_>, intno: u32| { - *callback_intr.borrow_mut() = IntrExpectation(intno); - }; - - let x86_code32: Vec = vec![0xcd, 0x80]; // INT 0x80; - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - - let hook = emu - .add_intr_hook(callback) - .expect("failed to add intr hook"); - - assert_eq!( - emu.emu_start( - 0x1000, - 0x1000 + x86_code32.len() as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(expect, *intr_cell.borrow()); - assert_eq!(emu.remove_hook(hook), Ok(())); -} - -#[test] -fn x86_mem_callback() { - #[derive(PartialEq, Debug)] - struct MemExpectation(MemType, u64, usize, i64); - let expects = vec![ - MemExpectation(MemType::WRITE, 0x2000, 4, 0xdeadbeef), - MemExpectation(MemType::READ_UNMAPPED, 0x10000, 4, 0), - MemExpectation(MemType::READ, 0x10000, 4, 0), - ]; - let mems: Vec = Vec::new(); - let mems_cell = Rc::new(RefCell::new(mems)); - - let callback_mems = mems_cell.clone(); - let callback = - move |_: Unicorn<'_>, mem_type: MemType, address: u64, size: usize, value: i64| { - let mut mems = callback_mems.borrow_mut(); - mems.push(MemExpectation(mem_type, address, size, value)); - }; - - // mov eax, 0xdeadbeef; - // mov [0x2000], eax; - // mov eax, [0x10000]; - let x86_code32: Vec = vec![ - 0xB8, 0xEF, 0xBE, 0xAD, 0xDE, 0xA3, 0x00, 0x20, 0x00, 0x00, 0xA1, 0x00, 0x00, 0x01, 0x00, - ]; - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - - let hook = emu - .add_mem_hook(HookType::MEM_ALL, 0, std::u64::MAX, callback) - .expect("failed to add memory hook"); - assert_eq!(emu.reg_write(RegisterX86::EAX as i32, 0x123), Ok(())); - assert_eq!( - emu.emu_start( - 0x1000, - 0x1000 + x86_code32.len() as u64, - 10 * SECOND_SCALE, - 0x1000 - ), - Err(uc_error::READ_UNMAPPED) - ); - - assert_eq!(expects, *mems_cell.borrow()); - assert_eq!(emu.remove_hook(hook), Ok(())); -} - -#[test] -fn x86_insn_in_callback() { - #[derive(PartialEq, Debug)] - struct InsnInExpectation(u32, usize); - let expect = InsnInExpectation(0x10, 4); - let insn_cell = Rc::new(RefCell::new(InsnInExpectation(0, 0))); - - let callback_insn = insn_cell.clone(); - let callback = move |_: Unicorn<'_>, port: u32, size: usize| { - *callback_insn.borrow_mut() = InsnInExpectation(port, size); - }; - - let x86_code32: Vec = vec![0xe5, 0x10]; // IN eax, 0x10; - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - - let hook = emu - .add_insn_in_hook(callback) - .expect("failed to add in hook"); - - assert_eq!( - emu.emu_start( - 0x1000, - 0x1000 + x86_code32.len() as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(expect, *insn_cell.borrow()); - assert_eq!(emu.remove_hook(hook), Ok(())); -} - -#[test] -fn x86_insn_out_callback() { - #[derive(PartialEq, Debug)] - struct InsnOutExpectation(u32, usize, u32); - let expect = InsnOutExpectation(0x46, 1, 0x32); - let insn_cell = Rc::new(RefCell::new(InsnOutExpectation(0, 0, 0))); - - let callback_insn = insn_cell.clone(); - let callback = move |_: Unicorn<'_>, port: u32, size: usize, value: u32| { - *callback_insn.borrow_mut() = InsnOutExpectation(port, size, value); - }; - - let x86_code32: Vec = vec![0xb0, 0x32, 0xe6, 0x46]; // MOV al, 0x32; OUT 0x46, al; - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - - let hook = emu - .add_insn_out_hook(callback) - .expect("failed to add out hook"); - - assert_eq!( - emu.emu_start( - 0x1000, - 0x1000 + x86_code32.len() as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(expect, *insn_cell.borrow()); - assert_eq!(emu.remove_hook(hook), Ok(())); -} - -#[test] -fn x86_insn_sys_callback() { - #[derive(PartialEq, Debug)] - struct InsnSysExpectation(u64); - let expect = InsnSysExpectation(0xdeadbeef); - let insn_cell = Rc::new(RefCell::new(InsnSysExpectation(0))); - - let callback_insn = insn_cell.clone(); - let callback = move |uc: Unicorn<'_>| { - println!("!!!!"); - let rax = uc.reg_read(RegisterX86::RAX as i32).unwrap(); - *callback_insn.borrow_mut() = InsnSysExpectation(rax); - }; - - // MOV rax, 0xdeadbeef; SYSCALL; - let x86_code: Vec = vec![ - 0x48, 0xB8, 0xEF, 0xBE, 0xAD, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x05, - ]; - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_64) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code), Ok(())); - - let hook = emu - .add_insn_sys_hook(InsnSysX86::SYSCALL, 1, 0, callback) - .expect("failed to add syscall hook"); - - assert_eq!( - emu.emu_start( - 0x1000, - 0x1000 + x86_code.len() as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(expect, *insn_cell.borrow()); - assert_eq!(emu.remove_hook(hook), Ok(())); -} - -#[test] -fn emulate_arm() { - let arm_code32: Vec = vec![0x83, 0xb0]; // sub sp, #0xc - - let mut unicorn = unicorn::Unicorn::new(Arch::ARM, Mode::THUMB) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.reg_write(RegisterARM::R1 as i32, 123), Ok(())); - assert_eq!(emu.reg_read(RegisterARM::R1 as i32), Ok(123)); - - // Attempt to write to memory before mapping it. - assert_eq!( - emu.mem_write(0x1000, &arm_code32), - (Err(uc_error::WRITE_UNMAPPED)) - ); - - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &arm_code32), Ok(())); - assert_eq!( - emu.mem_read_as_vec(0x1000, arm_code32.len()), - Ok(arm_code32.clone()) - ); - - assert_eq!(emu.reg_write(RegisterARM::SP as i32, 12), Ok(())); - assert_eq!(emu.reg_write(RegisterARM::R0 as i32, 10), Ok(())); - - // ARM checks the least significant bit of the address to know - // if the code is in Thumb mode. - assert_eq!( - emu.emu_start( - 0x1000 | 0x01, - (0x1000 | (0x01 + arm_code32.len())) as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(emu.reg_read(RegisterARM::SP as i32), Ok(0)); - assert_eq!(emu.reg_read(RegisterARM::R0 as i32), Ok(10)); -} - -#[test] -fn emulate_mips() { - let mips_code32 = vec![0x56, 0x34, 0x21, 0x34]; // ori $at, $at, 0x3456; - - let mut unicorn = unicorn::Unicorn::new(Arch::MIPS, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &mips_code32), Ok(())); - assert_eq!( - emu.mem_read_as_vec(0x1000, mips_code32.len()), - Ok(mips_code32.clone()) - ); - assert_eq!(emu.reg_write(RegisterMIPS::AT as i32, 0), Ok(())); - assert_eq!( - emu.emu_start( - 0x1000, - (0x1000 + mips_code32.len()) as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(emu.reg_read(RegisterMIPS::AT as i32), Ok(0x3456)); -} - -#[test] -fn emulate_ppc() { - let ppc_code32 = vec![0x7F, 0x46, 0x1A, 0x14]; // add 26, 6, 3 - - let mut unicorn = unicorn::Unicorn::new(Arch::PPC, Mode::PPC32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &ppc_code32), Ok(())); - assert_eq!( - emu.mem_read_as_vec(0x1000, ppc_code32.len()), - Ok(ppc_code32.clone()) - ); - assert_eq!(emu.reg_write(RegisterPPC::GPR3 as i32, 42), Ok(())); - assert_eq!(emu.reg_write(RegisterPPC::GPR6 as i32, 1337), Ok(())); - assert_eq!( - emu.emu_start( - 0x1000, - (0x1000 + ppc_code32.len()) as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(emu.reg_read(RegisterPPC::GPR26 as i32), Ok(1379)); -} - -#[test] -fn mem_unmapping() { - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_unmap(0x1000, 0x4000), Ok(())); -} - -#[test] -fn mem_map_ptr() { - // Use an array for the emulator memory. - let mut mem: [u8; 4000] = [0; 4000]; - let x86_code32: Vec = vec![0x41, 0x4a]; // INC ecx; DEC edx - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - - // Attempt to write to memory before mapping it. - assert_eq!( - emu.mem_write(0x1000, &x86_code32), - (Err(uc_error::WRITE_UNMAPPED)) - ); - - assert_eq!( - emu.mem_map_ptr(0x1000, 0x4000, Permission::ALL, mem.as_mut_ptr() as _), - Ok(()) - ); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - assert_eq!( - emu.mem_read_as_vec(0x1000, x86_code32.len()), - Ok(x86_code32.clone()) - ); - - assert_eq!(emu.reg_write(RegisterX86::ECX as i32, 10), Ok(())); - assert_eq!(emu.reg_write(RegisterX86::EDX as i32, 50), Ok(())); - - assert_eq!( - emu.emu_start( - 0x1000, - (0x1000 + x86_code32.len()) as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(emu.reg_read(RegisterX86::ECX as i32), Ok(11)); - assert_eq!(emu.reg_read(RegisterX86::EDX as i32), Ok(49)); - assert_eq!(emu.mem_unmap(0x1000, 0x4000), Ok(())); - - // Use a Vec for the emulator memory. - let mut mem: Vec = Vec::new(); - mem.reserve(4000); - - // Attempt to write to memory before mapping it. - assert_eq!( - emu.mem_write(0x1000, &x86_code32), - (Err(uc_error::WRITE_UNMAPPED)) - ); - - assert_eq!( - emu.mem_map_ptr(0x1000, 0x4000, Permission::ALL, mem.as_mut_ptr() as _), - Ok(()) - ); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - assert_eq!( - emu.mem_read_as_vec(0x1000, x86_code32.len()), - Ok(x86_code32.clone()) - ); - - assert_eq!(emu.reg_write(RegisterX86::ECX as i32, 10), Ok(())); - assert_eq!(emu.reg_write(RegisterX86::EDX as i32, 50), Ok(())); - - assert_eq!( - emu.emu_start( - 0x1000, - (0x1000 + x86_code32.len()) as u64, - 10 * SECOND_SCALE, - 1000 - ), - Ok(()) - ); - assert_eq!(emu.reg_read(RegisterX86::ECX as i32), Ok(11)); - assert_eq!(emu.reg_read(RegisterX86::EDX as i32), Ok(49)); - assert_eq!(emu.mem_unmap(0x1000, 0x4000), Ok(())); -} - -#[test] -fn x86_context_save_and_restore() { - for mode in vec![Mode::MODE_32, Mode::MODE_64] { - let x86_code: Vec = vec![ - 0x48, 0xB8, 0xEF, 0xBE, 0xAD, 0xDE, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x05, - ]; - let mut unicorn = unicorn::Unicorn::new(Arch::X86, mode) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code), Ok(())); - let _ = emu.emu_start( - 0x1000, - (0x1000 + x86_code.len()) as u64, - 10 * SECOND_SCALE, - 1000, - ); - - /* now, save the context... */ - let context = emu.context_init(); - let context = context.unwrap(); - - /* and create a new emulator, into which we will "restore" that context */ - let mut unicorn2 = unicorn::Unicorn::new(Arch::X86, mode) - .expect("failed to initialize unicorn instance"); - let emu2 = unicorn2.borrow(); - assert_eq!(emu2.context_restore(&context), Ok(())); - for register in X86_REGISTERS.iter() { - println!("Testing register {:?}", register); - assert_eq!( - emu2.reg_read(*register as i32), - emu.reg_read(*register as i32) - ); - } - } -} - -#[test] -fn x86_block_callback() { - #[derive(PartialEq, Debug)] - struct BlockExpectation(u64, u32); - let expects = vec![BlockExpectation(0x1000, 2), BlockExpectation(0x1000, 2)]; - let blocks: Vec = Vec::new(); - let blocks_cell = Rc::new(RefCell::new(blocks)); - - let callback_blocks = blocks_cell.clone(); - let callback = move |_: Unicorn<'_>, address: u64, size: u32| { - let mut blocks = callback_blocks.borrow_mut(); - blocks.push(BlockExpectation(address, size)); - }; - - let x86_code32: Vec = vec![0x41, 0x4a]; // INC ecx; DEC edx - - let mut unicorn = unicorn::Unicorn::new(Arch::X86, Mode::MODE_32) - .expect("failed to initialize unicorn instance"); - let mut emu = unicorn.borrow(); - assert_eq!(emu.mem_map(0x1000, 0x4000, Permission::ALL), Ok(())); - assert_eq!(emu.mem_write(0x1000, &x86_code32), Ok(())); - - let hook = emu - .add_block_hook(callback) - .expect("failed to add block hook"); - assert_eq!( - emu.emu_start(0x1000, 0x1002, 10 * SECOND_SCALE, 1000), - Ok(()) - ); - assert_eq!(expects, *blocks_cell.borrow()); - assert_eq!(emu.remove_hook(hook), Ok(())); -} diff --git a/bindings/vb6/uc_def.bas b/bindings/vb6/uc_def.bas index 823819cd..a8ac2d2e 100644 --- a/bindings/vb6/uc_def.bas +++ b/bindings/vb6/uc_def.bas @@ -59,7 +59,8 @@ Public Enum uc_arch UC_ARCH_PPC = 5 ' PowerPC architecture (currently unsupported) UC_ARCH_SPARC = 6 ' Sparc architecture UC_ARCH_M68K = 7 ' M68K architecture - UC_ARCH_MAX = 8 + UC_ARCH_RISCV = 8 ' RISCV architecture + UC_ARCH_MAX = 9 End Enum Public Enum uc_prot @@ -130,6 +131,8 @@ Public Enum uc_mode 'from /bindings/dotnet/common.fs ' UC_MODE_SPARC32 = 4 '32-bit mode ' UC_MODE_SPARC64 = 8 '64-bit mode ' UC_MODE_V9 = 16 'SparcV9 mode (currently unsupported) +' UC_MODE_RISCV32 = 4 '32-bit mode +' UC_MODE_RISCV64 = 8 '64-bit mode End Enum Public Enum uc_hook_type 'from /bindings/dotnet/common.fs diff --git a/cmake.sh b/cmake.sh index 7806a0e1..23af8f91 100755 --- a/cmake.sh +++ b/cmake.sh @@ -1,17 +1,40 @@ #!/bin/sh # Unicorn Emulator Engine (www.unicorn-engine.org) -# Usage: cmake.sh [x86] [arm] [aarch64] [m68k] [mips] [sparc] +# Usage: cmake.sh [mingw|msys] [x86] [arm] [aarch64] [m68k] [mips] [sparc] [ppc] [riscv] # By chenhuitao 2019 -FLAGS="-DCMAKE_BUILD_TYPE=Release" +# FLAGS="-DCMAKE_BUILD_TYPE=Release" +FLAGS="-DCMAKE_BUILD_TYPE=Debug" +TOOLCHAIN="" +GENERATOR="Unix Makefiles" +CMAKE="cmake" +COMPILER="" -UNICORN_ARCH="${*}" +# process arguments +case "$1" in + "mingw" ) + TOOLCHAIN="-DCMAKE_TOOLCHAIN_FILE=../mingw-w64.cmake" + shift + UNICORN_ARCH="${*}";; + "msys" ) + shift + UNICORN_ARCH="${*}" + CMAKE="/mingw64/bin/cmake" + GENERATOR="MSYS Makefiles";; + * ) + UNICORN_ARCH="${*}";; +esac -if [ -z "${UNICORN_ARCH}" ]; then - cmake "${FLAGS}" .. -else - cmake "${FLAGS}" "-DUNICORN_ARCH=${UNICORN_ARCH}" .. +if [ -n "${COMPILER}" ]; then + TOOLCHAIN="${TOOLCHAIN} -DCMAKE_C_COMPILER=${COMPILER}" fi +if [ -z "${UNICORN_ARCH}" ]; then + ${CMAKE} "${FLAGS}" ${TOOLCHAIN} -G "${GENERATOR}" .. +else + ${CMAKE} "${FLAGS}" ${TOOLCHAIN} "-DUNICORN_ARCH=${UNICORN_ARCH}" -G "${GENERATOR}" .. +fi + +# now build make -j8 diff --git a/config.mk b/config.mk deleted file mode 100644 index c3621fb9..00000000 --- a/config.mk +++ /dev/null @@ -1,30 +0,0 @@ -# Unicorn Emulator Engine -# By Nguyen Anh Quynh, 2015 - -# This file contains all customized compile options for Unicorn emulator. -# Consult docs/COMPILE.md & docs/README.md for more details. - -################################################################################ -# Compile with debug info when you want to debug code. -# Change this to 'no' for release edition. - -UNICORN_DEBUG ?= yes - -################################################################################ -# Specify which archs you want to compile in. By default, we build all archs. - -UNICORN_ARCHS ?= x86 m68k arm aarch64 mips sparc - - -################################################################################ -# Change 'UNICORN_STATIC = yes' to 'UNICORN_STATIC = no' to avoid building -# a static library. - -UNICORN_STATIC ?= yes - - -################################################################################ -# Change 'UNICORN_SHARED = yes' to 'UNICORN_SHARED = no' to avoid building -# a shared library. - -UNICORN_SHARED ?= yes diff --git a/docs/COMPILE-CMAKE.md b/docs/COMPILE-CMAKE.md deleted file mode 100644 index 6e8be6cc..00000000 --- a/docs/COMPILE-CMAKE.md +++ /dev/null @@ -1,57 +0,0 @@ -This documentation explains how to compile Unicorn with CMake on Windows or -*nix. - ----- - -Requirements: - -- Windows: MicroSoft Visual Studio(>=2013). -- *nix: GNU gcc or clang to generate dynamic source files. - -Get CMake for free from http://www.cmake.org. - - -[1] To build Unicorn using Nmake of Windows SDK, do: - - mkdir build - cd build - ..\nmake.bat - - After this, find the samples test*.exe, unicorn.lib & unicorn.dll - in the same directory. - - -- To build Unicorn using Visual Studio, choose the generator accordingly to the - version of Visual Studio on your machine. For example, with Visual Studio 2013, do: - - mkdir build - cd build - cmake -G "Visual Studio 12" .. - - After this, find unicorn.sln in the same directory. Open it with Visual Studio - and build the solution including libraries & all test as usual. - - -[2] You can make sure the prior steps successfully worked by launching one of the - sample binary (sample_*.exe). - - -[3] You can also enable just one specific architecture by passing the architecture name - to either the cmake.sh or nmake.bat scripts. e.g.: - - ..\nmake.bat x86 - - Will just target the x86 architecture. The list of available architectures are: - X86 ARM AARCH64 M68K MIPS SPARC. - - -[4] You can also create an installation image with cmake, by using the 'install' target. - Use: - - cmake --build . --config Release --target install - - This will normally install an image in a default location (on MacOS and Linux, but this is not supported - on Windows). So in case you want to change the install location, set this when configuring CMake. - Use: `-DCMAKE_INSTALL_PREFIX=path` for instance, to put the installation in the 'path' subdirectory of - the build directory. - The default value of 'CMAKE_INSTALL_PREFIX' on *nix is '/usr/local'. diff --git a/docs/COMPILE-NIX.md b/docs/COMPILE-NIX.md deleted file mode 100644 index 6964d681..00000000 --- a/docs/COMPILE-NIX.md +++ /dev/null @@ -1,164 +0,0 @@ -This documentation explains how to compile, install & run Unicorn on MacOSX, -Linux, BSD, Solaris, Android & iOS. - -To compile for Microsoft Windows, see [COMPILE-WINDOWS.md](COMPILE-WINDOWS.md) - ----- - -[1] Tailor Unicorn to your need. - -Out of 6 archtitectures supported by Unicorn (Arm, Arm64, M68K, Mips, Sparc, -& X86), if you just need several selected archs, choose which ones you want -to compile in by editing "config.mk" before going to next steps. - -By default, all 6 architectures are compiled. If this is what you want, skip -to the section 2. - -The other way of customize Unicorn without having to edit config.mk is to -pass the desired options on the commandline to ./make.sh. Currently, -Unicorn supports 4 options, as follows. - - - UNICORN_ARCHS: specify list of architectures to compiled in. - - UNICORN_STATIC: build static library. - - UNICORN_SHARED: build dynamic (shared) library. - - UNICORN_QEMU_FLAGS: specify extra flags for qemu's configure script - -To avoid editing config.mk for these customization, we can pass their values to -make.sh, as follows. - - $ UNICORN_ARCHS="arm aarch64 x86" ./make.sh - -NOTE: on commandline, put these values in front of ./make.sh, not after it. - -For each option, refer to docs/README for more details. - - - -[2] Compile and install from source on *nix - -To build Unicorn on *nix (such as MacOSX, Linux, *BSD, Solaris): - -- To compile for current platform, run: - - $ ./make.sh - - On Mac OS, to build non-universal binaries that includes only 64-bit code, - replace above command with: - - $ ./make.sh macos-universal-no - -- To cross-compile Unicorn on 64-bit Linux to target 32-bit binary, - cross-compile to 32-bit with: - - $ ./make.sh linux32 - - After compiling, install Unicorn with: - - $ sudo ./make.sh install - - For FreeBSD/OpenBSD, where sudo is unavailable, run: - - $ su; ./make.sh install - - Users are then required to enter root password to copy Unicorn into machine - system directories. - - Afterwards, run ./samples/sample_all.sh to test the sample emulations. - - - NOTE: The core framework installed by "./make.sh install" consist of - following files: - - /usr/include/unicorn/unicorn.h - /usr/include/unicorn/x86.h - /usr/include/unicorn/arm.h - /usr/include/unicorn/arm64.h - /usr/include/unicorn/mips.h - /usr/include/unicorn/ppc.h - /usr/include/unicorn/sparc.h - /usr/include/unicorn/m68k.h - /usr/lib/libunicorn.so (for Linux/*nix), or /usr/lib/libunicorn.dylib (OSX) - /usr/lib/libunicorn.a - - - -[3] Cross-compile for iOS from macOS. - -To cross-compile for iOS (iPhone/iPad/iPod), macOS with Xcode installed is required. - -- To cross-compile for iOS ArmV7 (iPod 4, iPad 1/2/3, iPhone4, iPhone4S), run: - - $ ./make.sh ios_armv7 - -- To cross-compile for iOS ArmV7s (iPad 4, iPhone 5C, iPad mini), run: - - $ ./make.sh ios_armv7s - -- To cross-compile for iOS Arm64 (iPhone 5S, iPad mini Retina, iPad Air), run: - - $ ./make.sh ios_arm64 - -- To cross-compile for all iOS devices (armv7 + armv7s + arm64), run: - - $ ./make.sh ios - -Resulted files libunicorn.dylib, libunicorn.a & tests/test* can then -be used on iOS devices. - - - -[4] Cross-compile for Android - -To cross-compile for Android (smartphone/tablet), Android NDK is required. - -- To cross-compile for Android Arm, run: - - $ NDK=~/android/android-ndk-r20 ./make.sh cross-android_arm - -- To cross-compile for Android Arm64, run: - - $ NDK=~/android/android-ndk-r20 ./make.sh cross-android_arm64 - -Resulted files libunicorn.so, libunicorn.a & tests/test* can then -be used on Android devices. - - - -[5] By default, "cc" (default C compiler on the system) is used as compiler. - -- To use "clang" compiler instead, run the command below: - - $ ./make.sh clang - -- To use "gcc" compiler instead, run: - - $ ./make.sh gcc - - - -[6] To uninstall Unicorn, run the command below: - - $ sudo ./make.sh uninstall - - - -[7] Language bindings - -Look for the bindings under directory bindings/, and refer to README file -of corresponding languages. - - - -[8] Unit tests - -Mac OS X users will also need the GNU version of binutils (for gobjcopy). -It can be easily installed with Homebrew: `brew install binutils`. - -Automated unit tests use the cmocka unit testing framework (https://cmocka.org/). -It can be installed in most Linux distros using the package manager, e.g. -`sudo yum install libcmocka libcmocka-devel`. -On Mac OS X with Homebrew: `brew install cmocka`. -You can also easily build and install it from source. - -You can run the tests by running `make test` in the project directory. If you don't -build some architecture support then the corresponding tests will fail when run. diff --git a/docs/COMPILE-WINDOWS.md b/docs/COMPILE-WINDOWS.md deleted file mode 100644 index 94744bed..00000000 --- a/docs/COMPILE-WINDOWS.md +++ /dev/null @@ -1,184 +0,0 @@ -To build Unicorn on Windows natively using Visual Studio, see docs under "msvc" -directory in root directory. - -The rest of this manual shows how to cross-compile Unicorn for Windows using -either MingW or Msys2. - -To compile for Linux, Mac OS X and Unix-based OS, see [COMPILE-NIX.md](COMPILE-NIX.md) - ---- - - -[0] Dependencies - -For Windows, cross-compile requires Mingw. At the moment, it is confirmed that -Unicorn can be compiled either on Ubuntu or Windows. - -- On Ubuntu 14.04 64-bit, do: - - - Download DEB packages for Mingw64 from: - - https://launchpad.net/~greg-hellings/+archive/ubuntu/mingw-libs/+build/2924251 - - -- On Windows, install MinGW via package MSYS2 at https://msys2.github.io/ - - Follow the install instructions and don't forget to update the system packages with: - - $ pacman --needed -Sy bash pacman pacman-mirrors msys2-runtime - - Then close MSYS2, run it again from Start menu and update the rest with: - - $ pacman -Su - - Finally, install required toolchain to build C projects. - - - To compile for Windows 32-bit, run: - - $ pacman -S make - $ pacman -S mingw-w64-i686-toolchain - - - To compile for Windows 64-bit, run: - - $ pacman -S make - $ pacman -S mingw-w64-x86_64-toolchain - -- For Cygwin, "make", "gcc-core", "libpcre-devel", "zlib-devel" are needed. - - If apt-cyg is available, you can install these with: - - $ apt-cyg install make gcc-core libpcre-devel zlib-devel - - - -[1] Tailor Unicorn to your need. - -Out of 6 archtitectures supported by Unicorn (Arm, Arm64, M68K, Mips, Sparc, -& X86), if you just need several selected archs, choose which ones you want -to compile in by editing "config.mk" before going to next steps. - -By default, all 6 architectures are compiled. - -The other way of customize Unicorn without having to edit config.mk is to -pass the desired options on the commandline to ./make.sh. Currently, -Unicorn supports 4 options, as follows. - - - UNICORN_ARCHS: specify list of architectures to compiled in. - - UNICORN_STATIC: build static library. - - UNICORN_SHARED: build dynamic (shared) library. - - UNICORN_QEMU_FLAGS: specify extra flags for qemu's configure script - -To avoid editing config.mk for these customization, we can pass their values to -make.sh, as follows. - - $ UNICORN_ARCHS="arm aarch64 x86" ./make.sh - -NOTE: on commandline, put these values in front of ./make.sh, not after it. - -For each option, refer to docs/README for more details. - - - -[2] Compile from source on Windows - with MinGW (MSYS2) - -To compile with MinGW, install MSYS2 as instructed in the first section. - -Note: After MSYS2 is installed, you will have 3 shortcuts to open the command prompt: "MSYS2 MSYS", "MSYS2 MinGW-32 bit" and "MSYS2 MinGW 64-bit". Use the MinGW shortcut so that compilation succeeds. - -Then, build Unicorn with the next steps: - -- To compile Windows 32-bit binary with MinGW, run: - - $ ./make.sh cross-win32 - -- To compile Windows 64-bit binary with MinGW, run: - - $ ./make.sh cross-win64 - -Resulted files unicorn.dll, unicorn.lib & samples/sample*.exe can then -be used on Windows machine. - -To run sample_x86.exe on Windows 32-bit, you need the following files: - - unicorn.dll - %MSYS2%\mingw32\bin\libgcc_s_dw2-1.dll - %MSYS2%\mingw32\bin\libwinpthread-1.dll - -To run sample_x86.exe on Windows 64-bit, you need the following files: - - unicorn.dll - %MSYS2%\mingw64\bin\libgcc_s_seh-1.dll - %MSYS2%\mingw64\bin\libwinpthread-1.dll - - - -[3] Compile and install from source on Cygwin - -To build Unicorn on Cygwin, run: - - $ ./make.sh - -After compiling, install Unicorn with: - - $ ./make.sh install - -Resulted files cygunicorn.dll, libunicorn.dll.a and libunicorn.a can be -used on Cygwin but not native Windows. - -NOTE: The core framework installed by "./make.sh install" consist of -following files: - - /usr/include/unicorn/*.h - /usr/bin/cygunicorn.dll - /usr/lib/libunicorn.dll.a - /usr/lib/libunicorn.a - - - -[4] Cross-compile for Windows from *nix - -To cross-compile for Windows, Linux & gcc-mingw-w64-i686 (and also gcc-mingw-w64-x86-64 -for 64-bit binaries) are required. - -- To cross-compile Windows 32-bit binary, simply run: - - $ ./make.sh cross-win32 - -- To cross-compile Windows 64-bit binary, run: - - $ ./make.sh cross-win64 - -Resulted files unicorn.dll, unicorn.lib & samples/sample*.exe can then -be used on Windows machine. - -To run sample_x86.exe on Windows 32-bit, you need the following files: - - unicorn.dll - /usr/lib/gcc/i686-w64-mingw32/4.8/libgcc_s_sjlj-1.dll - /usr/i686-w64-mingw32/lib/libwinpthread-1.dll - -To run sample_x86.exe on Windows 64-bit, you need the following files: - - unicorn.dll - /usr/lib/gcc/x86_64-w64-mingw32/4.8/libgcc_s_sjlj-1.dll - /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll - -Then run either "sample_x86.exe -32" or "sample_x86.exe -64" to test emulators for X86 32-bit or X86 64-bit. -For other architectures, run "sample_xxx.exe" found in the same directory. - - - -[5] Language bindings - -Look for the bindings under directory bindings/, and refer to README file -of corresponding languages. - - - -[6] Unit tests - -Automated unit tests use the cmocka unit testing framework (https://cmocka.org/). -It can be installed in most Linux distros using the package manager, e.g. -`sudo yum install libcmocka libcmocka-devel`, or you can easily build and install it from source. - -You can run the tests by running `make test` in the project directory. diff --git a/docs/COMPILE.md b/docs/COMPILE.md index a5ec77fe..c499526d 100644 --- a/docs/COMPILE.md +++ b/docs/COMPILE.md @@ -1,25 +1,131 @@ -To compile Unicorn on Mac OS X, Linux, BSD, Solaris and all kind of nix OS, -see [COMPILE-NIX.md](COMPILE-NIX.md) +This HOWTO introduces how to build Unicorn2 natively on Linux/Mac/Windows, +or cross-build to Windows from Linux host. -To compile Unicorn on Windows, see [COMPILE-WINDOWS.md](COMPILE-WINDOWS.md) +--- +### Native build on Linux/MacOS -To compile Unicorn with CMake on Windows or *nix, see -[COMPILE-CMAKE.md](COMPILE-CMAKE.md) +This builds Unicorn2 on Linux/MacOS. The output is `libunicorn.so` or `libunicorn.dylib`, respectively. -Then learn more on how to code your own tools with our samples. +- Require `cmake` & `pkg-config` packages (besides `gcc`/`clang` compiler): - - For C sample code, see code in directory samples/sample*.c - - For Python sample code, see code in directory bindings/python/sample*.py - - For samples of other bindings, look into directories bindings// +``` +$ sudo apt install cmake pkg-config +``` -#Building unicorn - Using vcpkg +- Build with the following commands. -You can download and install unicorn using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: +``` +$ mkdir build; cd build +$ ../cmake.sh +``` - git clone https://github.com/Microsoft/vcpkg.git - cd vcpkg - ./bootstrap-vcpkg.sh - ./vcpkg integrate install - ./vcpkg install unicorn +Then run the sample `sample_riscv` with: -The unicorn port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. \ No newline at end of file +``` +$ ./sample_riscv +``` + +--- +### Native build on Windows, with MSVC + +This builds Unicorn2 on Windows, using Microsoft MSVC compiler. The output is `unicorn.dll`. + +- Require `cmake` & `Microsoft Visual Studio`. + +- From Visual Studio Command Prompt, build with the following commands. + +``` +mkdir build; cd build +../nmake.sh +``` + +Then run the sample `sample_riscv` with: + +``` +sample_riscv.exe +``` + +--- + +### Cross build from Linux host to Windows, with Mingw + +This cross-builds Unicorn2 from **Linux host** to Windows, using `Mingw` compiler. The output is `libunicorn.dll` + +- Install required package. + +``` +$ sudo apt install mingw-w64-x86-64-dev +``` + +- Build Unicorn and samples with the following commands. + +``` +$ mkdir build; cd build +$ ../cmake.sh mingw +``` + +The resulted `sample_riscv.exe` can be run with `libunicorn.dll`, and some dependecies DLLs +already provided in `bin/` directory. + +To prepare for `sample_riscv.exe`, do: + +``` +cp libunicorn.dll ../bin +cp sample_riscv.exe ../bin +``` + +Then inside the `bin/` directory, you can run `sample_riscv.exe` (from `CMD.exe` prompt, for example) + + +--- + +### Native build on Windows host, with MSYS2/Mingw + +This builds Unicorn2 on **Windows host**, using **MSYS2/Mingw** compiler. The output is `libunicorn.dll` + +This requires MSYS2 to be installed on Windows machine. You need to download & install MSYS2 from https://www.msys2.org. + +Then from MSYS2 console, install required packages: + +``` +pacman -S mingw-w64-x86_64-toolchain mingw-w64-x86_64-cmake +``` + +- Build Unicorn and samples with the following commands. + +``` +mkdir build; cd build +../cmake.sh msys +``` + +The resulted `sample_riscv.exe` can be run with `libunicorn.dll`, and some dependecies DLLs +already provided in `bin/` directory. + +To prepare for `sample_riscv.exe`, do: + +``` +cp libunicorn.dll ../bin +cp sample_riscv.exe ../bin +``` + +Then inside the `bin/` directory, you can run `sample_riscv.exe` (from `CMD.exe` prompt, for example) + + +--- + +### Cross build from Linux host to other arch + +This cross-builds Unicorn2 from **Linux host** to other arch, using cross compiler. The output is `libunicorn.so` + +- Install cross compiler package. For example, cross compile to ARM require below command. + +``` +$ sudo apt install gcc-arm-linux-gnueabihf +``` + +- Build Unicorn and samples with the following commands (note that you need to specify compiler with CC). + +``` +$ mkdir build; cd build +$ CC=arm-linux-gnueabihf-gcc ../cmake.sh +``` diff --git a/docs/Micro Unicorn-Engine API Documentation/Micro Unicorn-Engine API Documentation.md b/docs/Micro Unicorn-Engine API Documentation/Micro Unicorn-Engine API Documentation.md index bd4eee6c..a3c8d68e 100644 --- a/docs/Micro Unicorn-Engine API Documentation/Micro Unicorn-Engine API Documentation.md +++ b/docs/Micro Unicorn-Engine API Documentation/Micro Unicorn-Engine API Documentation.md @@ -1,2754 +1,2717 @@ -# Micro Unicorn-Engine API Documentation - -**Warning:** ***This is an unofficial API document by [kabeor](https://github.com/kabeor), If there are any mistakes, welcome to ask.*** - -**注意:** ***这是由kabeor制作的非官方API参考文档,如有错误欢迎提出,觉得不错可以给个star鼓励我*** - -之前对Capstone反汇编引擎的API分析文档已经被[官方](http://www.capstone-engine.org/documentation.html)收录 https://github.com/kabeor/Micro-Capstone-Engine-API-Documentation ,在实现自己想要做出的调试器的路上,又遇到了与Capstone同作者的国外大佬aquynh的另一个著名项目Unicorn,不巧的是,详尽的API文档仍然较少,更多的是大篇幅的代码,因此决定继续分析Unicorn框架,包括数据类型,已开放API及其实现。 - -Unicorn是一个轻量级, 多平台, 多架构的CPU模拟器框架,基于qemu开发,它可以代替CPU模拟代码的执行,常用于恶意代码分析,Fuzz等,该项目被用于Radare2逆向分析框架,GEF(gdb的pwn分析插件),Pwndbg,Angr符号执行框架等多个著名项目。接下来我也将通过阅读源码和代码实际调用来写一个简单的非官方版本的API手册。 - -Blog: kabeor.cn - -## 0x0 开发准备 - -Unicorn官网: http://www.unicorn-engine.org - -### 自行编译lib和dll方法 - -源码: https://github.com/unicorn-engine/unicorn/archive/master.zip - -下载后解压 - -文件结构如下: - -``` -. <- 主要引擎core engine + README + 编译文档COMPILE.TXT 等 -├── arch <- 各语言反编译支持的代码实现 -├── bindings <- 中间件 -│ ├── dotnet <- .Net 中间件 + 测试代码 -│ ├── go <- go 中间件 + 测试代码 -│ ├── haskell <- Haskell 中间件 + 测试代码 -│ ├── java <- Java 中间件 + 测试代码 -│ ├── pascal <- Pascal 中间件 + 测试代码 -│ ├── python <- Python 中间件 + 测试代码 -│ ├── ruby <- Ruby 中间件 + 测试代码 -│ └── vb6 <- VB6 中间件 + 测试代码 -├── docs <- 文档,主要是Unicorn的实现思路 -├── include <- C头文件 -├── msvc <- Microsoft Visual Studio 支持(Windows) -├── qemu <- qemu框架源码 -├── samples <- Unicorn使用示例 -└── tests <- C语言测试用例 -``` - -下面演示Windows10使用Visual Studio2019编译 - -打开msvc文件夹,内部结构如下 - -![image.png](API_Doc_Pic/iyodlNFY7hHEOgS.png) - -VS打开unicorn.sln项目文件,解决方案自动载入这些 - -![image.png](API_Doc_Pic/fOnNpSKvjYyc7QB.png) - -如果都需要的话,直接编译就好了,只需要其中几种,则右键解决方案->属性->配置属性 如下 - -![image.png](API_Doc_Pic/F3rSByYuNTGDtC1.png) - -生成选项中勾选你需要的支持项即可 - -项目编译属性为: -1. 使用多字节字符集 -2. 不使用预编译头 -3. 附加选项 /wd4018 /wd4244 /wd4267 -4. 预处理器定义中添加 ` _CRT_SECURE_NO_WARNINGS` - -编译后会在当前文件夹Debug目录下生成unicorn.lib静态编译库和unicorn.dll动态库这样就可以开始使用Unicorn进行开发了 - -编译到最后一项可能会报错系统找不到指定的路径,查看makefile发现问题出现在此处 -![image.png](API_Doc_Pic/YCMNcEVyX8GHoPb.png) - -事实上只不过是不能将生成的lib和dll复制到新建的文件夹而已,只需要到生成目录去找即可。 - -官方目前提供的最新已编译版本为1.0.1版本,比较老,建议自己编辑最新版本源码,以获得更多可用API。 -Win32:https://github.com/unicorn-engine/unicorn/releases/download/1.0.1/unicorn-1.0.1-win32.zip -Win64:https://github.com/unicorn-engine/unicorn/releases/download/1.0.1/unicorn-1.0.1-win64.zip - -**注意: 选x32或x64将影响后面开发的位数** - - - -### 引擎调用测试 - -新建一个VS项目,将..\unicorn-master\include\unicorn中的头文件以及编译好的lib和dll文件全部拷贝到新建项目的主目录下 - -![image.png](API_Doc_Pic/I25E9sWcJpGyax7.png) - -在VS解决方案中,头文件添加现有项unicorn.h,资源文件中添加unicorn.lib,重新生成解决方案 - -![image.png](API_Doc_Pic/OVaHwelNQ4tcLmo.png) - -接下来测试我们生成的unicorn框架 - -主文件代码如下 - -```cpp -#include -#include "unicorn/unicorn.h" - -// 要模拟的指令 -#define X86_CODE32 "\x41\x4a" // INC ecx; DEC edx - -// 起始地址 -#define ADDRESS 0x1000000 - -int main() -{ - uc_engine* uc; - uc_err err; - int r_ecx = 0x1234; // ECX 寄存器 - int r_edx = 0x7890; // EDX 寄存器 - - printf("Emulate i386 code\n"); - - // X86-32bit 模式初始化模拟 - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - - // 给模拟器申请 2MB 内存 - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // 将要模拟的指令写入内存 - if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { - printf("Failed to write emulation code to memory, quit!\n"); - return -1; - } - - // 初始化寄存器 - uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); - uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); - - printf(">>> ECX = 0x%x\n", r_ecx); - printf(">>> EDX = 0x%x\n", r_edx); - - // 模拟代码 - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned %u: %s\n", - err, uc_strerror(err)); - } - - // 打印寄存器值 - printf("Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); - uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); - printf(">>> ECX = 0x%x\n", r_ecx); - printf(">>> EDX = 0x%x\n", r_edx); - - uc_close(uc); - - return 0; -} -``` - -运行结果如下 - -![image.png](API_Doc_Pic/bpu4r8hgzUvO7Pm.png) - -ecx+1和edx-1成功模拟。 - -## 0x1 数据类型分析 - -### uc_arch - -架构选择 - -```cpp -typedef enum uc_arch { - UC_ARCH_ARM = 1, // ARM 架构 (包括 Thumb, Thumb-2) - UC_ARCH_ARM64, // ARM-64, 也称 AArch64 - UC_ARCH_MIPS, // Mips 架构 - UC_ARCH_X86, // X86 架构 (包括 x86 & x86-64) - UC_ARCH_PPC, // PowerPC 架构 (暂不支持) - UC_ARCH_SPARC, // Sparc 架构 - UC_ARCH_M68K, // M68K 架构 - UC_ARCH_MAX, -} uc_arch; -``` - - - -### uc_mode - -模式选择 - -```cpp -typedef enum uc_mode { - UC_MODE_LITTLE_ENDIAN = 0, // 小端序模式 (默认) - UC_MODE_BIG_ENDIAN = 1 << 30, // 大端序模式 - - // arm / arm64 - UC_MODE_ARM = 0, // ARM 模式 - UC_MODE_THUMB = 1 << 4, // THUMB 模式 (包括 Thumb-2) - UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M 系列 (暂不支持) - UC_MODE_V8 = 1 << 6, // ARMv8 A32 encodings for ARM (暂不支持) - - // arm (32bit) cpu 类型 - UC_MODE_ARM926 = 1 << 7, // ARM926 CPU 类型 - UC_MODE_ARM946 = 1 << 8, // ARM946 CPU 类型 - UC_MODE_ARM1176 = 1 << 9, // ARM1176 CPU 类型 - - // mips - UC_MODE_MICRO = 1 << 4, // MicroMips 模式 (暂不支持) - UC_MODE_MIPS3 = 1 << 5, // Mips III ISA (暂不支持) - UC_MODE_MIPS32R6 = 1 << 6, // Mips32r6 ISA (暂不支持) - UC_MODE_MIPS32 = 1 << 2, // Mips32 ISA - UC_MODE_MIPS64 = 1 << 3, // Mips64 ISA - - // x86 / x64 - UC_MODE_16 = 1 << 1, // 16-bit 模式 - UC_MODE_32 = 1 << 2, // 32-bit 模式 - UC_MODE_64 = 1 << 3, // 64-bit 模式 - - // ppc - UC_MODE_PPC32 = 1 << 2, // 32-bit 模式 (暂不支持) - UC_MODE_PPC64 = 1 << 3, // 64-bit 模式 (暂不支持) - UC_MODE_QPX = 1 << 4, // Quad Processing eXtensions 模式 (暂不支持) - - // sparc - UC_MODE_SPARC32 = 1 << 2, // 32-bit 模式 - UC_MODE_SPARC64 = 1 << 3, // 64-bit 模式 - UC_MODE_V9 = 1 << 4, // SparcV9 模式 (暂不支持) - - // m68k -} uc_mode; -``` - - - -### uc_err - -错误类型,是uc_errno()的返回值 - -```cpp -typedef enum uc_err { - UC_ERR_OK = 0, // 无错误 - UC_ERR_NOMEM, // 内存不足: uc_open(), uc_emulate() - UC_ERR_ARCH, // 不支持的架构: uc_open() - UC_ERR_HANDLE, // 不可用句柄 - UC_ERR_MODE, // 不可用/不支持架构: uc_open() - UC_ERR_VERSION, // 不支持版本 (中间件) - UC_ERR_READ_UNMAPPED, // 由于在未映射的内存上读取而退出模拟: uc_emu_start() - UC_ERR_WRITE_UNMAPPED, // 由于在未映射的内存上写入而退出模拟: uc_emu_start() - UC_ERR_FETCH_UNMAPPED, // 由于在未映射的内存中获取数据而退出模拟: uc_emu_start() - UC_ERR_HOOK, // 无效的hook类型: uc_hook_add() - UC_ERR_INSN_INVALID, // 由于指令无效而退出模拟: uc_emu_start() - UC_ERR_MAP, // 无效的内存映射: uc_mem_map() - UC_ERR_WRITE_PROT, // 由于UC_MEM_WRITE_PROT冲突而停止模拟: uc_emu_start() - UC_ERR_READ_PROT, // 由于UC_MEM_READ_PROT冲突而停止模拟: uc_emu_start() - UC_ERR_FETCH_PROT, // 由于UC_MEM_FETCH_PROT冲突而停止模拟: uc_emu_start() - UC_ERR_ARG, // 提供给uc_xxx函数的无效参数 - UC_ERR_READ_UNALIGNED, // 未对齐读取 - UC_ERR_WRITE_UNALIGNED, // 未对齐写入 - UC_ERR_FETCH_UNALIGNED, // 未对齐的提取 - UC_ERR_HOOK_EXIST, // 此事件的钩子已经存在 - UC_ERR_RESOURCE, // 资源不足: uc_emu_start() - UC_ERR_EXCEPTION, // 未处理的CPU异常 - UC_ERR_TIMEOUT // 模拟超时 -} uc_err; -``` - - - -### uc_mem_type - -UC_HOOK_MEM_*的所有内存访问类型 - -```cpp -typedef enum uc_mem_type { - UC_MEM_READ = 16, // 内存从..读取 - UC_MEM_WRITE, // 内存写入到.. - UC_MEM_FETCH, // 内存被获取 - UC_MEM_READ_UNMAPPED, // 未映射内存从..读取 - UC_MEM_WRITE_UNMAPPED, // 未映射内存写入到.. - UC_MEM_FETCH_UNMAPPED, // 未映射内存被获取 - UC_MEM_WRITE_PROT, // 内存写保护,但是已映射 - UC_MEM_READ_PROT, // 内存读保护,但是已映射 - UC_MEM_FETCH_PROT, // 内存不可执行,但是已映射 - UC_MEM_READ_AFTER, // 内存从 (成功访问的地址) 读入 -} uc_mem_type; -``` - - - -### uc_hook_type - -uc_hook_add()的所有hook类型参数 - -```cpp -typedef enum uc_hook_type { - // Hook 所有中断/syscall 事件 - UC_HOOK_INTR = 1 << 0, - // Hook 一条特定的指令 - 只支持非常小的指令子集 - UC_HOOK_INSN = 1 << 1, - // Hook 一段代码 - UC_HOOK_CODE = 1 << 2, - // Hook 基本块 - UC_HOOK_BLOCK = 1 << 3, - // 用于在未映射的内存上读取内存的Hook - UC_HOOK_MEM_READ_UNMAPPED = 1 << 4, - // Hook 无效的内存写事件 - UC_HOOK_MEM_WRITE_UNMAPPED = 1 << 5, - // Hook 执行事件的无效内存 - UC_HOOK_MEM_FETCH_UNMAPPED = 1 << 6, - // Hook 读保护的内存 - UC_HOOK_MEM_READ_PROT = 1 << 7, - // Hook 写保护的内存 - UC_HOOK_MEM_WRITE_PROT = 1 << 8, - // Hook 不可执行内存上的内存 - UC_HOOK_MEM_FETCH_PROT = 1 << 9, - // Hook 内存读取事件 - UC_HOOK_MEM_READ = 1 << 10, - // Hook 内存写入事件 - UC_HOOK_MEM_WRITE = 1 << 11, - // Hook 内存获取执行事件 - UC_HOOK_MEM_FETCH = 1 << 12, - // Hook 内存读取事件,只允许能成功访问的地址 - // 成功读取后将触发回调 - UC_HOOK_MEM_READ_AFTER = 1 << 13, - // Hook 无效指令异常 - UC_HOOK_INSN_INVALID = 1 << 14, -} uc_hook_type; -``` - - - -### 宏定义Hook类型 - -```cpp -// Hook 所有未映射内存访问的事件 -#define UC_HOOK_MEM_UNMAPPED (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + UC_HOOK_MEM_FETCH_UNMAPPED) -// Hook 所有对受保护内存的非法访问事件 -#define UC_HOOK_MEM_PROT (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) -// Hook 所有非法读取存储器的事件 -#define UC_HOOK_MEM_READ_INVALID (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) -// Hook 所有非法写入存储器的事件 -#define UC_HOOK_MEM_WRITE_INVALID (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) -// Hook 所有非法获取内存的事件 -#define UC_HOOK_MEM_FETCH_INVALID (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) -// Hook 所有非法的内存访问事件 -#define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) -// Hook 所有有效内存访问的事件 -// 注意: UC_HOOK_MEM_READ 在 UC_HOOK_MEM_READ_PROT 和 UC_HOOK_MEM_READ_UNMAPPED 之前触发 , -// 因此这个Hook可能会触发一些无效的读取。 -#define UC_HOOK_MEM_VALID (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) -``` - - - -### uc_mem_region - -由uc_mem_map()和uc_mem_map_ptr()映射内存区域 -使用uc_mem_regions()检索该内存区域的列表 - -```cpp -typedef struct uc_mem_region { - uint64_t begin; // 区域起始地址 (包括) - uint64_t end; // 区域结束地址 (包括) - uint32_t perms; // 区域的内存权限 -} uc_mem_region; -``` - - - -### uc_query_type - -uc_query()的所有查询类型参数 - -```cpp -typedef enum uc_query_type { - // 动态查询当前硬件模式 - UC_QUERY_MODE = 1, - UC_QUERY_PAGE_SIZE, - UC_QUERY_ARCH, -} uc_query_type; -``` - - - -### uc_context - -与uc_context_*()一起使用,管理CPU上下文的不透明存储 - -```cpp -struct uc_context; -typedef struct uc_context uc_context; -``` - - - -### uc_prot - -新映射区域的权限 - -```cpp -typedef enum uc_prot { - UC_PROT_NONE = 0, //无 - UC_PROT_READ = 1, //读取 - UC_PROT_WRITE = 2, //写入 - UC_PROT_EXEC = 4, //可执行 - UC_PROT_ALL = 7, //所有权限 -} uc_prot; -``` - - - -## 0x2 API分析 - -### uc_version - -```cpp -unsigned int uc_version(unsigned int *major, unsigned int *minor); -``` - -用于返回Unicorn API主次版本信息 - -``` -@major: API主版本号 -@minor: API次版本号 -@return 16进制数,计算方式 (major << 8 | minor) - -提示: 该返回值可以和宏UC_MAKE_VERSION比较 -``` - -源码实现 - -```c -unsigned int uc_version(unsigned int *major, unsigned int *minor) -{ - if (major != NULL && minor != NULL) { - *major = UC_API_MAJOR; //宏 - *minor = UC_API_MINOR; //宏 - } - - return (UC_API_MAJOR << 8) + UC_API_MINOR; //(major << 8 | minor) -} -``` - -编译后不可更改,不接受自定义版本 - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - unsigned int version; - version = uc_version(NULL,NULL); - cout << hex << version << endl; - return 0; -} -``` - -输出: - -![image.png](API_Doc_Pic/q3JtOQRPl5xTFKp.png) - -得到版本号1.0.0 - - - -### uc_arch_supported - -```c -bool uc_arch_supported(uc_arch arch); -``` - -确定Unicorn是否支持当前架构 - -``` - @arch: 架构类型 (UC_ARCH_*) - @return 如果支持返回True -``` - -源码实现 - -```c -bool uc_arch_supported(uc_arch arch) -{ - switch (arch) { -#ifdef UNICORN_HAS_ARM - case UC_ARCH_ARM: return true; -#endif -#ifdef UNICORN_HAS_ARM64 - case UC_ARCH_ARM64: return true; -#endif -#ifdef UNICORN_HAS_M68K - case UC_ARCH_M68K: return true; -#endif -#ifdef UNICORN_HAS_MIPS - case UC_ARCH_MIPS: return true; -#endif -#ifdef UNICORN_HAS_PPC - case UC_ARCH_PPC: return true; -#endif -#ifdef UNICORN_HAS_SPARC - case UC_ARCH_SPARC: return true; -#endif -#ifdef UNICORN_HAS_X86 - case UC_ARCH_X86: return true; -#endif - /* 无效或禁用架构 */ - default: return false; - } -} -``` - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - cout << "是否支持UC_ARCH_X86架构:" << uc_arch_supported(UC_ARCH_X86) << endl; - return 0; -} -``` - -输出: - -![image.png](API_Doc_Pic/NExsavSgu4yMbBQ.png) - - - -### uc_open - -```c -uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); -``` - -创建新的Unicorn实例 - -``` -@arch: 架构类型 (UC_ARCH_*) -@mode: 硬件模式. 由 UC_MODE_* 组合 -@uc: 指向 uc_engine 的指针, 返回时更新 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) -{ - struct uc_struct *uc; - - if (arch < UC_ARCH_MAX) { - uc = calloc(1, sizeof(*uc)); //申请内存 - if (!uc) { - // 内存不足 - return UC_ERR_NOMEM; - } - - uc->errnum = UC_ERR_OK; - uc->arch = arch; - uc->mode = mode; - - // 初始化 - // uc->ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; - uc->ram_list.blocks.tqh_first = NULL; - uc->ram_list.blocks.tqh_last = &(uc->ram_list.blocks.tqh_first); - - uc->memory_listeners.tqh_first = NULL; - uc->memory_listeners.tqh_last = &uc->memory_listeners.tqh_first; - - uc->address_spaces.tqh_first = NULL; - uc->address_spaces.tqh_last = &uc->address_spaces.tqh_first; - - switch(arch) { // 根据架构进行预处理 - default: - break; -#ifdef UNICORN_HAS_M68K - case UC_ARCH_M68K: - if ((mode & ~UC_MODE_M68K_MASK) || - !(mode & UC_MODE_BIG_ENDIAN)) { - free(uc); - return UC_ERR_MODE; - } - uc->init_arch = m68k_uc_init; - break; -#endif -#ifdef UNICORN_HAS_X86 - case UC_ARCH_X86: - if ((mode & ~UC_MODE_X86_MASK) || - (mode & UC_MODE_BIG_ENDIAN) || - !(mode & (UC_MODE_16|UC_MODE_32|UC_MODE_64))) { - free(uc); - return UC_ERR_MODE; - } - uc->init_arch = x86_uc_init; - break; -#endif -#ifdef UNICORN_HAS_ARM - case UC_ARCH_ARM: - if ((mode & ~UC_MODE_ARM_MASK)) { - free(uc); - return UC_ERR_MODE; - } - if (mode & UC_MODE_BIG_ENDIAN) { - uc->init_arch = armeb_uc_init; - } else { - uc->init_arch = arm_uc_init; - } - - if (mode & UC_MODE_THUMB) - uc->thumb = 1; - break; -#endif -#ifdef UNICORN_HAS_ARM64 - case UC_ARCH_ARM64: - if (mode & ~UC_MODE_ARM_MASK) { - free(uc); - return UC_ERR_MODE; - } - if (mode & UC_MODE_BIG_ENDIAN) { - uc->init_arch = arm64eb_uc_init; - } else { - uc->init_arch = arm64_uc_init; - } - break; -#endif - -#if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) - case UC_ARCH_MIPS: - if ((mode & ~UC_MODE_MIPS_MASK) || - !(mode & (UC_MODE_MIPS32|UC_MODE_MIPS64))) { - free(uc); - return UC_ERR_MODE; - } - if (mode & UC_MODE_BIG_ENDIAN) { -#ifdef UNICORN_HAS_MIPS - if (mode & UC_MODE_MIPS32) - uc->init_arch = mips_uc_init; -#endif -#ifdef UNICORN_HAS_MIPS64 - if (mode & UC_MODE_MIPS64) - uc->init_arch = mips64_uc_init; -#endif - } else { // 小端序 -#ifdef UNICORN_HAS_MIPSEL - if (mode & UC_MODE_MIPS32) - uc->init_arch = mipsel_uc_init; -#endif -#ifdef UNICORN_HAS_MIPS64EL - if (mode & UC_MODE_MIPS64) - uc->init_arch = mips64el_uc_init; -#endif - } - break; -#endif - -#ifdef UNICORN_HAS_SPARC - case UC_ARCH_SPARC: - if ((mode & ~UC_MODE_SPARC_MASK) || - !(mode & UC_MODE_BIG_ENDIAN) || - !(mode & (UC_MODE_SPARC32|UC_MODE_SPARC64))) { - free(uc); - return UC_ERR_MODE; - } - if (mode & UC_MODE_SPARC64) - uc->init_arch = sparc64_uc_init; - else - uc->init_arch = sparc_uc_init; - break; -#endif - } - - if (uc->init_arch == NULL) { - return UC_ERR_ARCH; - } - - if (machine_initialize(uc)) - return UC_ERR_RESOURCE; - - *result = uc; - - if (uc->reg_reset) - uc->reg_reset(uc); - - return UC_ERR_OK; - } else { - return UC_ERR_ARCH; - } -} -``` - -**注意: uc_open会申请堆内存,使用完必须用uc_close释放,否则会发生泄露** - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - uc_engine* uc; - uc_err err; - - //// 初始化 X86-32bit 模式模拟器 - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - - if (!err) - cout << "uc引擎创建成功" << endl; - - //// 关闭uc - err = uc_close(uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_close() with error returned: %u\n", err); - return -1; - } - - if (!err) - cout << "uc引擎关闭成功" << endl; - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/dqKBwAWUL7XvypE.png) - - - -### uc_close - -```c -uc_err uc_close(uc_engine *uc); -``` - -关闭一个uc实例,将释放内存。关闭后无法恢复。 - -``` -@uc: 指向由 uc_open() 返回的指针 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_close(uc_engine *uc) -{ - int i; - struct list_item *cur; - struct hook *hook; - - // 清理内部数据 - if (uc->release) - uc->release(uc->tcg_ctx); - g_free(uc->tcg_ctx); - - // 清理 CPU. - g_free(uc->cpu->tcg_as_listener); - g_free(uc->cpu->thread); - - // 清理所有 objects. - OBJECT(uc->machine_state->accelerator)->ref = 1; - OBJECT(uc->machine_state)->ref = 1; - OBJECT(uc->owner)->ref = 1; - OBJECT(uc->root)->ref = 1; - - object_unref(uc, OBJECT(uc->machine_state->accelerator)); - object_unref(uc, OBJECT(uc->machine_state)); - object_unref(uc, OBJECT(uc->cpu)); - object_unref(uc, OBJECT(&uc->io_mem_notdirty)); - object_unref(uc, OBJECT(&uc->io_mem_unassigned)); - object_unref(uc, OBJECT(&uc->io_mem_rom)); - object_unref(uc, OBJECT(uc->root)); - - // 释放内存 - g_free(uc->system_memory); - - // 释放相关线程 - if (uc->qemu_thread_data) - g_free(uc->qemu_thread_data); - - // 释放其他数据 - free(uc->l1_map); - - if (uc->bounce.buffer) { - free(uc->bounce.buffer); - } - - g_hash_table_foreach(uc->type_table, free_table, uc); - g_hash_table_destroy(uc->type_table); - - for (i = 0; i < DIRTY_MEMORY_NUM; i++) { - free(uc->ram_list.dirty_memory[i]); - } - - // 释放hook和hook列表 - for (i = 0; i < UC_HOOK_MAX; i++) { - cur = uc->hook[i].head; - // hook 可存在于多个列表,可通过计数获取释放的时间 - while (cur) { - hook = (struct hook *)cur->data; - if (--hook->refs == 0) { - free(hook); - } - cur = cur->next; - } - list_clear(&uc->hook[i]); - } - - free(uc->mapped_blocks); - - // 最后释放uc自身 - memset(uc, 0, sizeof(*uc)); - free(uc); - - return UC_ERR_OK; -} -``` - -使用实例同uc_open() - - - -### uc_query - -```c -uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); -``` - -查询引擎的内部状态 - -``` - @uc: uc_open() 返回的句柄 - @type: uc_query_type 中枚举的类型 - - @result: 保存被查询的内部状态的指针 - - @return: 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) -{ - if (type == UC_QUERY_PAGE_SIZE) { - *result = uc->target_page_size; - return UC_ERR_OK; - } - - if (type == UC_QUERY_ARCH) { - *result = uc->arch; - return UC_ERR_OK; - } - - switch(uc->arch) { -#ifdef UNICORN_HAS_ARM - case UC_ARCH_ARM: - return uc->query(uc, type, result); -#endif - default: - return UC_ERR_ARG; - } - - return UC_ERR_OK; -} -``` - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; -int main() -{ - uc_engine* uc; - uc_err err; - - //// Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例创建成功" << endl; - - size_t result[] = {0}; - err = uc_query(uc, UC_QUERY_ARCH, result); // 查询架构 - if (!err) - cout << "查询成功: " << *result << endl; - - err = uc_close(uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_close() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例关闭成功" << endl; - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/ZtRKvUoaPTlshJ4.png) - -架构查询结果为4,对应的正是UC_ARCH_X86 - - - -### uc_errno - -```c -uc_err uc_errno(uc_engine *uc); -``` - -当某个API函数失败时,报告最后的错误号,一旦被访问,uc_errno可能不会保留原来的值。 - -``` -@uc: uc_open() 返回的句柄 - -@return: 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_errno(uc_engine *uc) -{ - return uc->errnum; -} -``` - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - uc_engine* uc; - uc_err err; - - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例创建成功" << endl; - - err = uc_errno(uc); - cout << "错误号: " << err << endl; - - err = uc_close(uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_close() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例关闭成功" << endl; - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/IZhyWrGebA5tT4i.png) - -无错误,输出错误号为0 - - - -### uc_strerror - -```c -const char *uc_strerror(uc_err code); -``` - -返回给定错误号的解释 - -``` - @code: 错误号 - - @return: 指向给定错误号的解释的字符串指针 -``` - -源码实现 - -```cpp -const char *uc_strerror(uc_err code) -{ - switch(code) { - default: - return "Unknown error code"; - case UC_ERR_OK: - return "OK (UC_ERR_OK)"; - case UC_ERR_NOMEM: - return "No memory available or memory not present (UC_ERR_NOMEM)"; - case UC_ERR_ARCH: - return "Invalid/unsupported architecture (UC_ERR_ARCH)"; - case UC_ERR_HANDLE: - return "Invalid handle (UC_ERR_HANDLE)"; - case UC_ERR_MODE: - return "Invalid mode (UC_ERR_MODE)"; - case UC_ERR_VERSION: - return "Different API version between core & binding (UC_ERR_VERSION)"; - case UC_ERR_READ_UNMAPPED: - return "Invalid memory read (UC_ERR_READ_UNMAPPED)"; - case UC_ERR_WRITE_UNMAPPED: - return "Invalid memory write (UC_ERR_WRITE_UNMAPPED)"; - case UC_ERR_FETCH_UNMAPPED: - return "Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)"; - case UC_ERR_HOOK: - return "Invalid hook type (UC_ERR_HOOK)"; - case UC_ERR_INSN_INVALID: - return "Invalid instruction (UC_ERR_INSN_INVALID)"; - case UC_ERR_MAP: - return "Invalid memory mapping (UC_ERR_MAP)"; - case UC_ERR_WRITE_PROT: - return "Write to write-protected memory (UC_ERR_WRITE_PROT)"; - case UC_ERR_READ_PROT: - return "Read from non-readable memory (UC_ERR_READ_PROT)"; - case UC_ERR_FETCH_PROT: - return "Fetch from non-executable memory (UC_ERR_FETCH_PROT)"; - case UC_ERR_ARG: - return "Invalid argument (UC_ERR_ARG)"; - case UC_ERR_READ_UNALIGNED: - return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)"; - case UC_ERR_WRITE_UNALIGNED: - return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)"; - case UC_ERR_FETCH_UNALIGNED: - return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)"; - case UC_ERR_RESOURCE: - return "Insufficient resource (UC_ERR_RESOURCE)"; - case UC_ERR_EXCEPTION: - return "Unhandled CPU exception (UC_ERR_EXCEPTION)"; - case UC_ERR_TIMEOUT: - return "Emulation timed out (UC_ERR_TIMEOUT)"; - } -} -``` - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - uc_engine* uc; - uc_err err; - - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例创建成功" << endl; - - err = uc_errno(uc); - cout << "错误号: " << err << " 错误描述: " << uc_strerror(err) <reg_write) - ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); //结构体中写入 - else - return UC_ERR_EXCEPTION; - - return ret; -} -``` - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - uc_engine* uc; - uc_err err; - - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例创建成功" << endl; - - int r_eax = 0x12; - err = uc_reg_write(uc, UC_X86_REG_ECX, &r_eax); - if (!err) - cout << "写入成功: " << r_eax << endl; - - err = uc_close(uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_close() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例关闭成功" << endl; - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/DkztJcigHCdmnRp.png) - - - -### uc_reg_read - -```c -uc_err uc_reg_read(uc_engine *uc, int regid, void *value); -``` - -读取寄存器的值 - -``` -@uc: uc_open()返回的句柄 -@regid: 将被读取的寄存器ID -@value: 指向保存寄存器值的指针 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_reg_read(uc_engine *uc, int regid, void *value) -{ - return uc_reg_read_batch(uc, ®id, &value, 1); -} - -uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) -{ - if (uc->reg_read) - uc->reg_read(uc, (unsigned int *)ids, vals, count); - else - return -1; - - return UC_ERR_OK; -} -``` - -使用示例: - -```cpp -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - uc_engine* uc; - uc_err err; - - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例创建成功" << endl; - - int r_eax = 0x12; - err = uc_reg_write(uc, UC_X86_REG_ECX, &r_eax); - if (!err) - cout << "写入成功: " << r_eax << endl; - - int recv_eax; - err = uc_reg_read(uc, UC_X86_REG_ECX, &recv_eax); - if (!err) - cout << "读取成功: " << recv_eax << endl; - - err = uc_close(uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_close() with error returned: %u\n", err); - return -1; - } - if (!err) - cout << "uc实例关闭成功" << endl; - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/ABkexFCfphu3zIg.png) - - - -### uc_reg_write_batch - -```c -uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); -``` - -同时将多个值写入多个寄存器 - -``` -@uc: uc_open()返回的句柄 -@regid: 存储将被写入的多个寄存器ID的数组 -@value: 指向保存多个值的数组的指针 -@count: *regs 和 *vals 数组的长度 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) -{ - int ret = UC_ERR_OK; - if (uc->reg_write) - ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); - else - return UC_ERR_EXCEPTION; - - return ret; -} -``` - -使用示例: - -```cpp -#include -#include -#include "unicorn/unicorn.h" -using namespace std; - -int syscall_abi[] = { - UC_X86_REG_RAX, UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, - UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9 -}; - -uint64_t vals[7] = { 200, 10, 11, 12, 13, 14, 15 }; - -void* ptrs[7]; - -int main() -{ - int i; - uc_err err; - uc_engine* uc; - - // set up register pointers - for (i = 0; i < 7; i++) { - ptrs[i] = &vals[i]; - } - - if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { - uc_perror("uc_open", err); - return 1; - } - - // reg_write_batch - printf("reg_write_batch({200, 10, 11, 12, 13, 14, 15})\n"); - if ((err = uc_reg_write_batch(uc, syscall_abi, ptrs, 7))) { - uc_perror("uc_reg_write_batch", err); - return 1; - } - - // reg_read_batch - memset(vals, 0, sizeof(vals)); - if ((err = uc_reg_read_batch(uc, syscall_abi, ptrs, 7))) { - uc_perror("uc_reg_read_batch", err); - return 1; - } - - printf("reg_read_batch = {"); - - for (i = 0; i < 7; i++) { - if (i != 0) printf(", "); - printf("%" PRIu64, vals[i]); - } - - printf("}\n"); - - uint64_t var[7] = { 0 }; - for (int i = 0; i < 7; i++) - { - cout << syscall_abi[i] << " "; - printf("%" PRIu64, vals[i]); - cout << endl; - } - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/l1AhdxgKE2U3tZB.png) - - - -### uc_reg_read_batch - -```c -uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); -``` - -同时读取多个寄存器的值。 - -``` -@uc: uc_open()返回的句柄 -@regid: 存储将被读取的多个寄存器ID的数组 -@value: 指向保存多个值的数组的指针 -@count: *regs 和 *vals 数组的长度 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) -{ - if (uc->reg_read) - uc->reg_read(uc, (unsigned int *)ids, vals, count); - else - return -1; - - return UC_ERR_OK; -} -``` - -使用示例同uc_reg_write_batch()。 - - - -### uc_mem_write - -```c -uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); -``` - -在内存中写入一段字节码。 - -``` - @uc: uc_open() 返回的句柄 - @address: 写入字节的起始地址 - @bytes: 指向一个包含要写入内存的数据的指针 - @size: 要写入的内存大小。 - - 注意: @bytes 必须足够大以包含 @size 字节。 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, size_t size) -{ - size_t count = 0, len; - const uint8_t *bytes = _bytes; - - if (uc->mem_redirect) { - address = uc->mem_redirect(address); - } - - if (!check_mem_area(uc, address, size)) - return UC_ERR_WRITE_UNMAPPED; - - // 内存区域可以重叠相邻的内存块 - while(count < size) { - MemoryRegion *mr = memory_mapping(uc, address); - if (mr) { - uint32_t operms = mr->perms; - if (!(operms & UC_PROT_WRITE)) // 没有写保护 - // 标记为可写 - uc->readonly_mem(mr, false); - - len = (size_t)MIN(size - count, mr->end - address); - if (uc->write_mem(&uc->as, address, bytes, len) == false) - break; - - if (!(operms & UC_PROT_WRITE)) // 没有写保护 - // 设置写保护 - uc->readonly_mem(mr, true); - - count += len; - address += len; - bytes += len; - } else // 此地址尚未被映射 - break; - } - - if (count == size) - return UC_ERR_OK; - else - return UC_ERR_WRITE_UNMAPPED; -} -``` - -使用示例: - -```cpp -#include -#include -#include "unicorn/unicorn.h" -using namespace std; - -#define X86_CODE32 "\x41\x4a" // INC ecx; DEC edx -#define ADDRESS 0x1000 - -int main() -{ - uc_engine* uc; - uc_err err; - - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { - printf("Failed to write emulation code to memory, quit!\n"); - return -1; - } - - uint32_t code; - - if(uc_mem_read(uc,ADDRESS,&code, sizeof(code))) { - printf("Failed to read emulation code to memory, quit!\n"); - return -1; - } - - cout << hex << code << endl; - - err = uc_close(uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_close() with error returned: %u\n", err); - return -1; - } - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/l4HhgDzcJIVvFNU.png) - - - -### uc_mem_read - -```c -uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); -``` - -从内存中读取字节。 - -``` - @uc: uc_open() 返回的句柄 - @address: 读取字节的起始地址 - @bytes: 指向一个包含要读取内存的数据的指针 - @size: 要读取的内存大小。 - - 注意: @bytes 必须足够大以包含 @size 字节。 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size) -{ - size_t count = 0, len; - uint8_t *bytes = _bytes; - - if (uc->mem_redirect) { - address = uc->mem_redirect(address); - } - - if (!check_mem_area(uc, address, size)) - return UC_ERR_READ_UNMAPPED; - - // 内存区域可以重叠相邻的内存块 - while(count < size) { - MemoryRegion *mr = memory_mapping(uc, address); - if (mr) { - len = (size_t)MIN(size - count, mr->end - address); - if (uc->read_mem(&uc->as, address, bytes, len) == false) - break; - count += len; - address += len; - bytes += len; - } else // 此地址尚未被映射 - break; - } - - if (count == size) - return UC_ERR_OK; - else - return UC_ERR_READ_UNMAPPED; -} -``` - -使用示例同uc_mem_write() - - - -### uc_emu_start - -```c -uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); -``` - -在指定的时间内模拟机器码。 - -``` -@uc: uc_open() 返回的句柄 -@begin: 开始模拟的地址 -@until: 模拟停止的地址 (当到达该地址时) -@timeout: 模拟代码的持续时间(以微秒计)。当这个值为0时,将在无限时间内模拟代码,直到代码完成。 -@count: 要模拟的指令数。当这个值为0时,将模拟所有可用的代码,直到代码完成 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count) -{ - // 重制计数器 - uc->emu_counter = 0; - uc->invalid_error = UC_ERR_OK; - uc->block_full = false; - uc->emulation_done = false; - uc->timed_out = false; - - switch(uc->arch) { - default: - break; -#ifdef UNICORN_HAS_M68K - case UC_ARCH_M68K: - uc_reg_write(uc, UC_M68K_REG_PC, &begin); - break; -#endif -#ifdef UNICORN_HAS_X86 - case UC_ARCH_X86: - switch(uc->mode) { - default: - break; - case UC_MODE_16: { - uint64_t ip; - uint16_t cs; - - uc_reg_read(uc, UC_X86_REG_CS, &cs); - // 抵消后面增加的 IP 和 CS - ip = begin - cs*16; - uc_reg_write(uc, UC_X86_REG_IP, &ip); - break; - } - case UC_MODE_32: - uc_reg_write(uc, UC_X86_REG_EIP, &begin); - break; - case UC_MODE_64: - uc_reg_write(uc, UC_X86_REG_RIP, &begin); - break; - } - break; -#endif -#ifdef UNICORN_HAS_ARM - case UC_ARCH_ARM: - uc_reg_write(uc, UC_ARM_REG_R15, &begin); - break; -#endif -#ifdef UNICORN_HAS_ARM64 - case UC_ARCH_ARM64: - uc_reg_write(uc, UC_ARM64_REG_PC, &begin); - break; -#endif -#ifdef UNICORN_HAS_MIPS - case UC_ARCH_MIPS: - // TODO: MIPS32/MIPS64/BIGENDIAN etc - uc_reg_write(uc, UC_MIPS_REG_PC, &begin); - break; -#endif -#ifdef UNICORN_HAS_SPARC - case UC_ARCH_SPARC: - // TODO: Sparc/Sparc64 - uc_reg_write(uc, UC_SPARC_REG_PC, &begin); - break; -#endif - } - - uc->stop_request = false; - - uc->emu_count = count; - // 如果不需要计数,则移除计数挂钩hook - if (count <= 0 && uc->count_hook != 0) { - uc_hook_del(uc, uc->count_hook); - uc->count_hook = 0; - } - // 设置计数hook记录指令数 - if (count > 0 && uc->count_hook == 0) { - uc_err err; - // 对计数指令的回调必须在所有其他操作之前运行,因此必须在hook列表的开头插入hook,而不是附加hook - uc->hook_insert = 1; - err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb, NULL, 1, 0); - // 恢复到 uc_hook_add() - uc->hook_insert = 0; - if (err != UC_ERR_OK) { - return err; - } - } - - uc->addr_end = until; - - if (timeout) - enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds - - if (uc->vm_start(uc)) { - return UC_ERR_RESOURCE; - } - - // 模拟完成 - uc->emulation_done = true; - - if (timeout) { - // 等待超时 - qemu_thread_join(&uc->timer); - } - - if(uc->timed_out) - return UC_ERR_TIMEOUT; - - return uc->invalid_error; -} -``` - -使用示例: - -```cpp -#include -#include -#include "unicorn/unicorn.h" -using namespace std; - -#define X86_CODE32 "\x33\xC0" // xor eax, eax -#define ADDRESS 0x1000 - -int main() -{ - uc_engine* uc; - uc_err err; - - int r_eax = 0x111; - - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_open() with error returned: %u\n", err); - return -1; - } - - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { - printf("Failed to write emulation code to memory, quit!\n"); - return -1; - } - - uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); - printf(">>> before EAX = 0x%x\n", r_eax); - - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned %u: %s\n", - err, uc_strerror(err)); - } - - uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); - printf(">>> after EAX = 0x%x\n", r_eax); - - err = uc_close(uc); - if (err != UC_ERR_OK) { - printf("Failed on uc_close() with error returned: %u\n", err); - return -1; - } - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/K4HMijIVt6lofvT.png) - - - -### uc_emu_stop - -```c -uc_err uc_emu_stop(uc_engine *uc); -``` - -停止模拟 - -通常是从通过 tracing API注册的回调函数中调用。 - -``` -@uc: uc_open() 返回的句柄 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_emu_stop(uc_engine *uc) -{ - if (uc->emulation_done) - return UC_ERR_OK; - - uc->stop_request = true; - - if (uc->current_cpu) { - // 退出当前线程 - cpu_exit(uc->current_cpu); - } - - return UC_ERR_OK; -} -``` - -使用示例: - -```cpp -uc_emu_stop(uc); -``` - - - -### uc_hook_add - -```c -uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, - void *user_data, uint64_t begin, uint64_t end, ...); -``` - -注册hook事件的回调,当hook事件被触发将会进行回调。 - -``` - @uc: uc_open() 返回的句柄 - @hh: 注册hook得到的句柄. uc_hook_del() 中使用 - @type: hook 类型 - @callback: 当指令被命中时要运行的回调 - @user_data: 用户自定义数据. 将被传递给回调函数的最后一个参数 @user_data - @begin: 回调生效区域的起始地址(包括) - @end: 回调生效区域的结束地址(包括) - 注意 1: 只有回调的地址在[@begin, @end]中才会调用回调 - 注意 2: 如果 @begin > @end, 每当触发此hook类型时都会调用回调 - @...: 变量参数 (取决于 @type) - 注意: 如果 @type = UC_HOOK_INSN, 这里是指令ID (如: UC_X86_INS_OUT) - - @return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, - void *user_data, uint64_t begin, uint64_t end, ...) -{ - int ret = UC_ERR_OK; - int i = 0; - - struct hook *hook = calloc(1, sizeof(struct hook)); - if (hook == NULL) { - return UC_ERR_NOMEM; - } - - hook->begin = begin; - hook->end = end; - hook->type = type; - hook->callback = callback; - hook->user_data = user_data; - hook->refs = 0; - *hh = (uc_hook)hook; - - // UC_HOOK_INSN 有一个额外参数:指令ID - if (type & UC_HOOK_INSN) { - va_list valist; - - va_start(valist, end); - hook->insn = va_arg(valist, int); - va_end(valist); - - if (uc->insn_hook_validate) { - if (! uc->insn_hook_validate(hook->insn)) { - free(hook); - return UC_ERR_HOOK; - } - } - - if (uc->hook_insert) { - if (list_insert(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { - free(hook); - return UC_ERR_NOMEM; - } - } else { - if (list_append(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { - free(hook); - return UC_ERR_NOMEM; - } - } - - hook->refs++; - return UC_ERR_OK; - } - - while ((type >> i) > 0) { - if ((type >> i) & 1) { - if (i < UC_HOOK_MAX) { - if (uc->hook_insert) { - if (list_insert(&uc->hook[i], hook) == NULL) { - if (hook->refs == 0) { - free(hook); - } - return UC_ERR_NOMEM; - } - } else { - if (list_append(&uc->hook[i], hook) == NULL) { - if (hook->refs == 0) { - free(hook); - } - return UC_ERR_NOMEM; - } - } - hook->refs++; - } - } - i++; - } - - if (hook->refs == 0) { - free(hook); - } - - return ret; -} -``` - -使用示例: - -```cpp -#include -#include -#include "unicorn/unicorn.h" -using namespace std; - -int syscall_abi[] = { - UC_X86_REG_RAX, UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, - UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9 -}; - -uint64_t vals[7] = { 200, 10, 11, 12, 13, 14, 15 }; - -void* ptrs[7]; - -void uc_perror(const char* func, uc_err err) -{ - fprintf(stderr, "Error in %s(): %s\n", func, uc_strerror(err)); -} - -#define BASE 0x10000 - -// mov rax, 100; mov rdi, 1; mov rsi, 2; mov rdx, 3; mov r10, 4; mov r8, 5; mov r9, 6; syscall -#define CODE "\x48\xc7\xc0\x64\x00\x00\x00\x48\xc7\xc7\x01\x00\x00\x00\x48\xc7\xc6\x02\x00\x00\x00\x48\xc7\xc2\x03\x00\x00\x00\x49\xc7\xc2\x04\x00\x00\x00\x49\xc7\xc0\x05\x00\x00\x00\x49\xc7\xc1\x06\x00\x00\x00\x0f\x05" - -void hook_syscall(uc_engine* uc, void* user_data) -{ - int i; - - uc_reg_read_batch(uc, syscall_abi, ptrs, 7); - - printf("syscall: {"); - - for (i = 0; i < 7; i++) { - if (i != 0) printf(", "); - printf("%" PRIu64, vals[i]); - } - - printf("}\n"); -} - -void hook_code(uc_engine* uc, uint64_t addr, uint32_t size, void* user_data) -{ - printf("HOOK_CODE: 0x%" PRIx64 ", 0x%x\n", addr, size); -} - -int main() -{ - int i; - uc_hook sys_hook; - uc_err err; - uc_engine* uc; - - for (i = 0; i < 7; i++) { - ptrs[i] = &vals[i]; - } - - if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { - uc_perror("uc_open", err); - return 1; - } - - printf("reg_write_batch({200, 10, 11, 12, 13, 14, 15})\n"); - if ((err = uc_reg_write_batch(uc, syscall_abi, ptrs, 7))) { - uc_perror("uc_reg_write_batch", err); - return 1; - } - - memset(vals, 0, sizeof(vals)); - if ((err = uc_reg_read_batch(uc, syscall_abi, ptrs, 7))) { - uc_perror("uc_reg_read_batch", err); - return 1; - } - - printf("reg_read_batch = {"); - - for (i = 0; i < 7; i++) { - if (i != 0) printf(", "); - printf("%" PRIu64, vals[i]); - } - - printf("}\n"); - - // syscall - printf("\n"); - printf("running syscall shellcode\n"); - - if ((err = uc_hook_add(uc, &sys_hook, UC_HOOK_CODE, hook_syscall, NULL, 1, 0))) { - uc_perror("uc_hook_add", err); - return 1; - } - - if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { - uc_perror("uc_mem_map", err); - return 1; - } - - if ((err = uc_mem_write(uc, BASE, CODE, sizeof(CODE) - 1))) { - uc_perror("uc_mem_write", err); - return 1; - } - - if ((err = uc_emu_start(uc, BASE, BASE + sizeof(CODE) - 1, 0, 0))) { - uc_perror("uc_emu_start", err); - return 1; - } - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/aU1lbmxMjXA5g3K.png) - -对每条指令都进行hook - - - -### uc_hook_del - -``` -uc_err uc_hook_del(uc_engine *uc, uc_hook hh); -``` - -删除一个已注册的hook事件 - -``` -@uc: uc_open() 返回的句柄 -@hh: uc_hook_add() 返回的句柄 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_hook_del(uc_engine *uc, uc_hook hh) -{ - int i; - struct hook *hook = (struct hook *)hh; - - for (i = 0; i < UC_HOOK_MAX; i++) { - if (list_remove(&uc->hook[i], (void *)hook)) { - if (--hook->refs == 0) { - free(hook); - break; - } - } - } - return UC_ERR_OK; -} -``` - -使用示例: - -```cpp -if ((err = uc_hook_add(uc, &sys_hook, UC_HOOK_CODE, hook_syscall, NULL, 1, 0))) { - uc_perror("uc_hook_add", err); - return 1; -} - -if ((err = uc_hook_del(uc, &sys_hook))) { - uc_perror("uc_hook_del", err); - return 1; -} -``` - - - -### uc_mem_map - -```c -uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); -``` - -为模拟映射一块内存。 - -``` -@uc: uc_open() 返回的句柄 -@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 -@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 -@perms: 新映射区域的权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) -{ - uc_err res; - - if (uc->mem_redirect) { - address = uc->mem_redirect(address); - } - - res = mem_map_check(uc, address, size, perms); //内存安全检查 - if (res) - return res; - - return mem_map(uc, address, size, perms, uc->memory_map(uc, address, size, perms)); -} -``` - -使用示例同uc_hook_add。 - - - -### uc_mem_map_ptr - -```c -uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); -``` - -在模拟中映射现有的主机内存。 - -``` -@uc: uc_open() 返回的句柄 -@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 -@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 -@perms: 新映射区域的权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 -@ptr: 指向支持新映射内存的主机内存的指针。映射的主机内存的大小应该与size的大小相同或更大,并且至少使用PROT_READ | PROT_WRITE进行映射,否则不定义映射。 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr) -{ - uc_err res; - - if (ptr == NULL) - return UC_ERR_ARG; - - if (uc->mem_redirect) { - address = uc->mem_redirect(address); - } - - res = mem_map_check(uc, address, size, perms); //内存安全检查 - if (res) - return res; - - return mem_map(uc, address, size, UC_PROT_ALL, uc->memory_map_ptr(uc, address, size, perms, ptr)); -} -``` - -使用示例同uc_mem_map - - - -### uc_mem_unmap - -```c -uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); -``` - -取消对模拟内存区域的映射 - -``` -@uc: uc_open() 返回的句柄 -@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 -@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) -{ - MemoryRegion *mr; - uint64_t addr; - size_t count, len; - - if (size == 0) - // 没有要取消映射的区域 - return UC_ERR_OK; - - // 地址必须对齐到 uc->target_page_size - if ((address & uc->target_page_align) != 0) - return UC_ERR_ARG; - - // 大小必须是 uc->target_page_size 的倍数 - if ((size & uc->target_page_align) != 0) - return UC_ERR_ARG; - - if (uc->mem_redirect) { - address = uc->mem_redirect(address); - } - - // 检查用户请求的整个块是否被映射 - if (!check_mem_area(uc, address, size)) - return UC_ERR_NOMEM; - - // 如果这个区域跨越了相邻的区域,可能需要分割区域 - addr = address; - count = 0; - while(count < size) { - mr = memory_mapping(uc, addr); - len = (size_t)MIN(size - count, mr->end - addr); - if (!split_region(uc, mr, addr, len, true)) - return UC_ERR_NOMEM; - - // 取消映射 - mr = memory_mapping(uc, addr); - if (mr != NULL) - uc->memory_unmap(uc, mr); - count += len; - addr += len; - } - - return UC_ERR_OK; -} -``` - -使用示例: - -```cpp -if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { - uc_perror("uc_mem_map", err); - return 1; -} - -if ((err = uc_mem_unmap(uc, BASE, 0x1000))) { - uc_perror("uc_mem_unmap", err); - return 1; -} -``` - - - -### uc_mem_protect - -```c -uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); -``` - -设置模拟内存的权限 - -``` -@uc: uc_open() 返回的句柄 -@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 -@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 -@perms: 映射区域的新权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint32_t perms) -{ - MemoryRegion *mr; - uint64_t addr = address; - size_t count, len; - bool remove_exec = false; - - if (size == 0) - // trivial case, no change - return UC_ERR_OK; - - // address must be aligned to uc->target_page_size - if ((address & uc->target_page_align) != 0) - return UC_ERR_ARG; - - // size must be multiple of uc->target_page_size - if ((size & uc->target_page_align) != 0) - return UC_ERR_ARG; - - // check for only valid permissions - if ((perms & ~UC_PROT_ALL) != 0) - return UC_ERR_ARG; - - if (uc->mem_redirect) { - address = uc->mem_redirect(address); - } - - // check that user's entire requested block is mapped - if (!check_mem_area(uc, address, size)) - return UC_ERR_NOMEM; - - // Now we know entire region is mapped, so change permissions - // We may need to split regions if this area spans adjacent regions - addr = address; - count = 0; - while(count < size) { - mr = memory_mapping(uc, addr); - len = (size_t)MIN(size - count, mr->end - addr); - if (!split_region(uc, mr, addr, len, false)) - return UC_ERR_NOMEM; - - mr = memory_mapping(uc, addr); - // will this remove EXEC permission? - if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) - remove_exec = true; - mr->perms = perms; - uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); - - count += len; - addr += len; - } - - // if EXEC permission is removed, then quit TB and continue at the same place - if (remove_exec) { - uc->quit_request = true; - uc_emu_stop(uc); - } - - return UC_ERR_OK; -} -``` - -使用示例: - -```cpp -if ((err = uc_mem_protect(uc, BASE, 0x1000, UC_PROT_ALL))) { //可读可写可执行 - uc_perror("uc_mem_protect", err); - return 1; -} -``` - - - -### uc_mem_regions - -```c -uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); -``` - -检索由 uc_mem_map() 和 uc_mem_map_ptr() 映射的内存的信息。 - -这个API为@regions分配内存,用户之后必须通过free()释放这些内存来避免内存泄漏。 - -``` -@uc: uc_open() 返回的句柄 -@regions: 指向 uc_mem_region 结构体的数组的指针. 由Unicorn申请,必须通过uc_free()释放这些内存 -@count: 指向@regions中包含的uc_mem_region结构体的数量的指针 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码分析 - -```c -uint32_t uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) -{ - uint32_t i; - uc_mem_region *r = NULL; - - *count = uc->mapped_block_count; - - if (*count) { - r = g_malloc0(*count * sizeof(uc_mem_region)); - if (r == NULL) { - // 内存不足 - return UC_ERR_NOMEM; - } - } - - for (i = 0; i < *count; i++) { - r[i].begin = uc->mapped_blocks[i]->addr; - r[i].end = uc->mapped_blocks[i]->end - 1; - r[i].perms = uc->mapped_blocks[i]->perms; - } - - *regions = r; - - return UC_ERR_OK; -} -``` - -使用示例: - -```cpp -#include -#include -#include "unicorn/unicorn.h" -using namespace std; - -int main() -{ - uc_err err; - uc_engine* uc; - - if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { - uc_perror("uc_open", err); - return 1; - } - - if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { - uc_perror("uc_mem_map", err); - return 1; - } - - uc_mem_region *region; - uint32_t count; - - if ((err = uc_mem_regions(uc, ®ion, &count))) { - uc_perror("uc_mem_regions", err); - return 1; - } - - cout << "起始地址: 0x" << hex << region->begin << " 结束地址: 0x" << hex << region->end << " 内存权限: " <perms << " 已申请内存块数: " << count << endl; - - if ((err = uc_free(region))) { ////注意释放内存 - uc_perror("uc_free", err); - return 1; - } - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/kbrF7NdV6LDxnYI.png) - - - -### uc_free - -```c -uc_err uc_free(void *mem); -``` - -释放由 uc_context_alloc 和 uc_mem_regions 申请的内存 - -``` -@mem: 由uc_context_alloc (返回 *context), 或由 uc_mem_regions (返回 *regions)申请的内存 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_free(void *mem) -{ - g_free(mem); - return UC_ERR_OK; -} - -void g_free(gpointer ptr) -{ - free(ptr); -} -``` - -使用示例同uc_mem_regions - - - -### uc_context_alloc - -```c -uc_err uc_context_alloc(uc_engine *uc, uc_context **context); -``` - -分配一个可以与uc_context_{save,restore}一起使用的区域来执行CPU上下文的快速保存/回滚,包括寄存器和内部元数据。上下文不能在具有不同架构或模式的引擎实例之间共享。 - -``` -@uc: uc_open() 返回的句柄 -@context: 指向uc_engine*的指针。当这个函数成功返回时,将使用指向新上下文的指针更新它。之后必须使用uc_free()释放这些分配的内存。 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_context_alloc(uc_engine *uc, uc_context **context) -{ - struct uc_context **_context = context; - size_t size = cpu_context_size(uc->arch, uc->mode); - - *_context = malloc(size + sizeof(uc_context)); - if (*_context) { - (*_context)->size = size; - return UC_ERR_OK; - } else { - return UC_ERR_NOMEM; - } -} -``` - -使用示例 - -```cpp -#include -#include -#include "unicorn/unicorn.h" -using namespace std; - -#define ADDRESS 0x1000 -#define X86_CODE32_INC "\x40" // INC eax - -int main() -{ - uc_engine* uc; - uc_context* context; - uc_err err; - - int r_eax = 0x1; // EAX 寄存器 - - printf("===================================\n"); - printf("Save/restore CPU context in opaque blob\n"); - - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u\n", err); - return 0; - } - - uc_mem_map(uc, ADDRESS, 8 * 1024, UC_PROT_ALL); - - if (uc_mem_write(uc, ADDRESS, X86_CODE32_INC, sizeof(X86_CODE32_INC) - 1)) { - printf("Failed to write emulation code to memory, quit!\n"); - return 0; - } - - // 初始化寄存器 - uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); - - printf(">>> Running emulation for the first time\n"); - - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned %u: %s\n", - err, uc_strerror(err)); - } - - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); - printf(">>> EAX = 0x%x\n", r_eax); - - // 申请并保存 CPU 上下文 - printf(">>> Saving CPU context\n"); - - err = uc_context_alloc(uc, &context); - if (err) { - printf("Failed on uc_context_alloc() with error returned: %u\n", err); - return 0; - } - - err = uc_context_save(uc, context); - if (err) { - printf("Failed on uc_context_save() with error returned: %u\n", err); - return 0; - } - - printf(">>> Running emulation for the second time\n"); - - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned %u: %s\n", - err, uc_strerror(err)); - } - - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); - printf(">>> EAX = 0x%x\n", r_eax); - - // 恢复 CPU 上下文 - err = uc_context_restore(uc, context); - if (err) { - printf("Failed on uc_context_restore() with error returned: %u\n", err); - return 0; - } - - printf(">>> CPU context restored. Below is the CPU context\n"); - - uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); - printf(">>> EAX = 0x%x\n", r_eax); - - // 释放 CPU 上下文 - err = uc_free(context); - if (err) { - printf("Failed on uc_free() with error returned: %u\n", err); - return 0; - } - - uc_close(uc); - - return 0; -} -``` - -输出 - -![image.png](API_Doc_Pic/juNPWvwGUlraKRh.png) - - - -### uc_context_save - -```c -uc_err uc_context_save(uc_engine *uc, uc_context *context); -``` - -保存当前CPU上下文 - -``` -@uc: uc_open() 返回的句柄 -@context: uc_context_alloc() 返回的句柄 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_context_save(uc_engine *uc, uc_context *context) -{ - struct uc_context *_context = context; - memcpy(_context->data, uc->cpu->env_ptr, _context->size); - return UC_ERR_OK; -} -``` - -使用示例同uc_context_alloc() - - - -### uc_context_restore - -```c -uc_err uc_context_restore(uc_engine *uc, uc_context *context); -``` - -恢复已保存的CPU上下文 - -``` -@uc: uc_open() 返回的句柄 -@context: uc_context_alloc() 返回并且已使用 uc_context_save 保存的句柄 - -@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 -``` - -源码实现 - -```c -uc_err uc_context_restore(uc_engine *uc, uc_context *context) -{ - struct uc_context *_context = context; - memcpy(uc->cpu->env_ptr, _context->data, _context->size); - return UC_ERR_OK; -} -``` - -使用示例同uc_context_alloc() - - - -### uc_context_size - -```c -size_t uc_context_size(uc_engine *uc); -``` - -返回存储cpu上下文所需的大小。可以用来分配一个缓冲区来包含cpu上下文,并直接调用uc_context_save。 - -``` -@uc: uc_open() 返回的句柄 - -@return 存储cpu上下文所需的大小,类型为 size_t. -``` - -源码实现 - -```c -size_t uc_context_size(uc_engine *uc) -{ - return cpu_context_size(uc->arch, uc->mode); -} - -static size_t cpu_context_size(uc_arch arch, uc_mode mode) -{ - switch (arch) { -#ifdef UNICORN_HAS_M68K - case UC_ARCH_M68K: return M68K_REGS_STORAGE_SIZE; -#endif -#ifdef UNICORN_HAS_X86 - case UC_ARCH_X86: return X86_REGS_STORAGE_SIZE; -#endif -#ifdef UNICORN_HAS_ARM - case UC_ARCH_ARM: return mode & UC_MODE_BIG_ENDIAN ? ARM_REGS_STORAGE_SIZE_armeb : ARM_REGS_STORAGE_SIZE_arm; -#endif -#ifdef UNICORN_HAS_ARM64 - case UC_ARCH_ARM64: return mode & UC_MODE_BIG_ENDIAN ? ARM64_REGS_STORAGE_SIZE_aarch64eb : ARM64_REGS_STORAGE_SIZE_aarch64; -#endif -#ifdef UNICORN_HAS_MIPS - case UC_ARCH_MIPS: - if (mode & UC_MODE_MIPS64) { - if (mode & UC_MODE_BIG_ENDIAN) { - return MIPS64_REGS_STORAGE_SIZE_mips64; - } else { - return MIPS64_REGS_STORAGE_SIZE_mips64el; - } - } else { - if (mode & UC_MODE_BIG_ENDIAN) { - return MIPS_REGS_STORAGE_SIZE_mips; - } else { - return MIPS_REGS_STORAGE_SIZE_mipsel; - } - } -#endif -#ifdef UNICORN_HAS_SPARC - case UC_ARCH_SPARC: return mode & UC_MODE_SPARC64 ? SPARC64_REGS_STORAGE_SIZE : SPARC_REGS_STORAGE_SIZE; -#endif - default: return 0; - } -} -``` - -使用示例同uc_context_alloc() - - - +# Micro Unicorn-Engine API Documentation + +**Warning:** ***This is an unofficial API document by [kabeor](https://github.com/kabeor), If there are any mistakes, welcome to ask.*** + +**注意:** ***这是由kabeor制作的非官方API参考文档,如有错误欢迎提出,觉得不错可以给个star鼓励我*** + +之前对Capstone反汇编引擎的API分析文档已经被[官方](http://www.capstone-engine.org/documentation.html)收录 https://github.com/kabeor/Micro-Capstone-Engine-API-Documentation ,在实现自己想要做出的调试器的路上,又遇到了与Capstone同作者的国外大佬aquynh的另一个著名项目Unicorn,不巧的是,详尽的API文档仍然较少,更多的是大篇幅的代码,因此决定继续分析Unicorn框架,包括数据类型,已开放API及其实现。 + +Unicorn是一个轻量级, 多平台, 多架构的CPU模拟器框架,基于qemu开发,它可以代替CPU模拟代码的执行,常用于恶意代码分析,Fuzz等,该项目被用于Radare2逆向分析框架,GEF(gdb的pwn分析插件),Pwndbg,Angr符号执行框架等多个著名项目。接下来我也将通过阅读源码和代码实际调用来写一个简单的非官方版本的API手册。 + +Blog: kabeor.cn + +## 0x0 开发准备 + +Unicorn官网: http://www.unicorn-engine.org + +### 自行编译lib和dll方法 + +源码: https://github.com/unicorn-engine/unicorn/archive/master.zip + +下载后解压 + +文件结构如下: + +``` +. <- 主要引擎core engine + README + 编译文档COMPILE.TXT 等 +├── arch <- 各语言反编译支持的代码实现 +├── bindings <- 中间件 +│ ├── dotnet <- .Net 中间件 + 测试代码 +│ ├── go <- go 中间件 + 测试代码 +│ ├── haskell <- Haskell 中间件 + 测试代码 +│ ├── java <- Java 中间件 + 测试代码 +│ ├── pascal <- Pascal 中间件 + 测试代码 +│ ├── python <- Python 中间件 + 测试代码 +│ ├── ruby <- Ruby 中间件 + 测试代码 +│ └── vb6 <- VB6 中间件 + 测试代码 +├── docs <- 文档,主要是Unicorn的实现思路 +├── include <- C头文件 +├── msvc <- Microsoft Visual Studio 支持(Windows) +├── qemu <- qemu框架源码 +├── samples <- Unicorn使用示例 +└── tests <- C语言测试用例 +``` + +下面演示Windows10使用Visual Studio2019编译 + +打开msvc文件夹,内部结构如下 + +![image.png](API_Doc_Pic/iyodlNFY7hHEOgS.png) + +VS打开unicorn.sln项目文件,解决方案自动载入这些 + +![image.png](API_Doc_Pic/fOnNpSKvjYyc7QB.png) + +如果都需要的话,直接编译就好了,只需要其中几种,则右键解决方案->属性->配置属性 如下 + +![image.png](API_Doc_Pic/F3rSByYuNTGDtC1.png) + +生成选项中勾选你需要的支持项即可 + +项目编译属性为: +1. 使用多字节字符集 +2. 不使用预编译头 +3. 附加选项 /wd4018 /wd4244 /wd4267 +4. 预处理器定义中添加 ` _CRT_SECURE_NO_WARNINGS` + +编译后会在当前文件夹Debug目录下生成unicorn.lib静态编译库和unicorn.dll动态库这样就可以开始使用Unicorn进行开发了 + +编译到最后一项可能会报错系统找不到指定的路径,查看makefile发现问题出现在此处 +![image.png](API_Doc_Pic/YCMNcEVyX8GHoPb.png) + +事实上只不过是不能将生成的lib和dll复制到新建的文件夹而已,只需要到生成目录去找即可。 + +官方目前提供的最新已编译版本为1.0.1版本,比较老,建议自己编辑最新版本源码,以获得更多可用API。 +Win32:https://github.com/unicorn-engine/unicorn/releases/download/1.0.1/unicorn-1.0.1-win32.zip +Win64:https://github.com/unicorn-engine/unicorn/releases/download/1.0.1/unicorn-1.0.1-win64.zip + +**注意: 选x32或x64将影响后面开发的位数** + + + +### 引擎调用测试 + +新建一个VS项目,将..\unicorn-master\include\unicorn中的头文件以及编译好的lib和dll文件全部拷贝到新建项目的主目录下 + +![image.png](API_Doc_Pic/I25E9sWcJpGyax7.png) + +在VS解决方案中,头文件添加现有项unicorn.h,资源文件中添加unicorn.lib,重新生成解决方案 + +![image.png](API_Doc_Pic/OVaHwelNQ4tcLmo.png) + +接下来测试我们生成的unicorn框架 + +主文件代码如下 + +```cpp +#include +#include "unicorn/unicorn.h" + +// 要模拟的指令 +#define X86_CODE32 "\x41\x4a" // INC ecx; DEC edx + +// 起始地址 +#define ADDRESS 0x1000000 + +int main() +{ + uc_engine* uc; + uc_err err; + int r_ecx = 0x1234; // ECX 寄存器 + int r_edx = 0x7890; // EDX 寄存器 + + printf("Emulate i386 code\n"); + + // X86-32bit 模式初始化模拟 + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + // 给模拟器申请 2MB 内存 + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // 将要模拟的指令写入内存 + if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return -1; + } + + // 初始化寄存器 + uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); + + printf(">>> ECX = 0x%x\n", r_ecx); + printf(">>> EDX = 0x%x\n", r_edx); + + // 模拟代码 + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + // 打印寄存器值 + printf("Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); + printf(">>> ECX = 0x%x\n", r_ecx); + printf(">>> EDX = 0x%x\n", r_edx); + + uc_close(uc); + + return 0; +} +``` + +运行结果如下 + +![image.png](API_Doc_Pic/bpu4r8hgzUvO7Pm.png) + +ecx+1和edx-1成功模拟。 + +## 0x1 数据类型分析 + +### uc_arch + +架构选择 + +```cpp +typedef enum uc_arch { + UC_ARCH_ARM = 1, // ARM 架构 (包括 Thumb, Thumb-2) + UC_ARCH_ARM64, // ARM-64, 也称 AArch64 + UC_ARCH_MIPS, // Mips 架构 + UC_ARCH_X86, // X86 架构 (包括 x86 & x86-64) + UC_ARCH_PPC, // PowerPC 架构 (暂不支持) + UC_ARCH_SPARC, // Sparc 架构 + UC_ARCH_M68K, // M68K 架构 + UC_ARCH_MAX, +} uc_arch; +``` + + + +### uc_mode + +模式选择 + +```cpp +typedef enum uc_mode { + UC_MODE_LITTLE_ENDIAN = 0, // 小端序模式 (默认) + UC_MODE_BIG_ENDIAN = 1 << 30, // 大端序模式 + + // arm / arm64 + UC_MODE_ARM = 0, // ARM 模式 + UC_MODE_THUMB = 1 << 4, // THUMB 模式 (包括 Thumb-2) + UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M 系列 (暂不支持) + UC_MODE_V8 = 1 << 6, // ARMv8 A32 encodings for ARM (暂不支持) + + // arm (32bit) cpu 类型 + UC_MODE_ARM926 = 1 << 7, // ARM926 CPU 类型 + UC_MODE_ARM946 = 1 << 8, // ARM946 CPU 类型 + UC_MODE_ARM1176 = 1 << 9, // ARM1176 CPU 类型 + + // mips + UC_MODE_MICRO = 1 << 4, // MicroMips 模式 (暂不支持) + UC_MODE_MIPS3 = 1 << 5, // Mips III ISA (暂不支持) + UC_MODE_MIPS32R6 = 1 << 6, // Mips32r6 ISA (暂不支持) + UC_MODE_MIPS32 = 1 << 2, // Mips32 ISA + UC_MODE_MIPS64 = 1 << 3, // Mips64 ISA + + // x86 / x64 + UC_MODE_16 = 1 << 1, // 16-bit 模式 + UC_MODE_32 = 1 << 2, // 32-bit 模式 + UC_MODE_64 = 1 << 3, // 64-bit 模式 + + // ppc + UC_MODE_PPC32 = 1 << 2, // 32-bit 模式 (暂不支持) + UC_MODE_PPC64 = 1 << 3, // 64-bit 模式 (暂不支持) + UC_MODE_QPX = 1 << 4, // Quad Processing eXtensions 模式 (暂不支持) + + // sparc + UC_MODE_SPARC32 = 1 << 2, // 32-bit 模式 + UC_MODE_SPARC64 = 1 << 3, // 64-bit 模式 + UC_MODE_V9 = 1 << 4, // SparcV9 模式 (暂不支持) + + // m68k +} uc_mode; +``` + + + +### uc_err + +错误类型,是uc_errno()的返回值 + +```cpp +typedef enum uc_err { + UC_ERR_OK = 0, // 无错误 + UC_ERR_NOMEM, // 内存不足: uc_open(), uc_emulate() + UC_ERR_ARCH, // 不支持的架构: uc_open() + UC_ERR_HANDLE, // 不可用句柄 + UC_ERR_MODE, // 不可用/不支持架构: uc_open() + UC_ERR_VERSION, // 不支持版本 (中间件) + UC_ERR_READ_UNMAPPED, // 由于在未映射的内存上读取而退出模拟: uc_emu_start() + UC_ERR_WRITE_UNMAPPED, // 由于在未映射的内存上写入而退出模拟: uc_emu_start() + UC_ERR_FETCH_UNMAPPED, // 由于在未映射的内存中获取数据而退出模拟: uc_emu_start() + UC_ERR_HOOK, // 无效的hook类型: uc_hook_add() + UC_ERR_INSN_INVALID, // 由于指令无效而退出模拟: uc_emu_start() + UC_ERR_MAP, // 无效的内存映射: uc_mem_map() + UC_ERR_WRITE_PROT, // 由于UC_MEM_WRITE_PROT冲突而停止模拟: uc_emu_start() + UC_ERR_READ_PROT, // 由于UC_MEM_READ_PROT冲突而停止模拟: uc_emu_start() + UC_ERR_FETCH_PROT, // 由于UC_MEM_FETCH_PROT冲突而停止模拟: uc_emu_start() + UC_ERR_ARG, // 提供给uc_xxx函数的无效参数 + UC_ERR_READ_UNALIGNED, // 未对齐读取 + UC_ERR_WRITE_UNALIGNED, // 未对齐写入 + UC_ERR_FETCH_UNALIGNED, // 未对齐的提取 + UC_ERR_HOOK_EXIST, // 此事件的钩子已经存在 + UC_ERR_RESOURCE, // 资源不足: uc_emu_start() + UC_ERR_EXCEPTION, // 未处理的CPU异常 + UC_ERR_TIMEOUT // 模拟超时 +} uc_err; +``` + + + +### uc_mem_type + +UC_HOOK_MEM_*的所有内存访问类型 + +```cpp +typedef enum uc_mem_type { + UC_MEM_READ = 16, // 内存从..读取 + UC_MEM_WRITE, // 内存写入到.. + UC_MEM_FETCH, // 内存被获取 + UC_MEM_READ_UNMAPPED, // 未映射内存从..读取 + UC_MEM_WRITE_UNMAPPED, // 未映射内存写入到.. + UC_MEM_FETCH_UNMAPPED, // 未映射内存被获取 + UC_MEM_WRITE_PROT, // 内存写保护,但是已映射 + UC_MEM_READ_PROT, // 内存读保护,但是已映射 + UC_MEM_FETCH_PROT, // 内存不可执行,但是已映射 + UC_MEM_READ_AFTER, // 内存从 (成功访问的地址) 读入 +} uc_mem_type; +``` + + + +### uc_hook_type + +uc_hook_add()的所有hook类型参数 + +```cpp +typedef enum uc_hook_type { + // Hook 所有中断/syscall 事件 + UC_HOOK_INTR = 1 << 0, + // Hook 一条特定的指令 - 只支持非常小的指令子集 + UC_HOOK_INSN = 1 << 1, + // Hook 一段代码 + UC_HOOK_CODE = 1 << 2, + // Hook 基本块 + UC_HOOK_BLOCK = 1 << 3, + // 用于在未映射的内存上读取内存的Hook + UC_HOOK_MEM_READ_UNMAPPED = 1 << 4, + // Hook 无效的内存写事件 + UC_HOOK_MEM_WRITE_UNMAPPED = 1 << 5, + // Hook 执行事件的无效内存 + UC_HOOK_MEM_FETCH_UNMAPPED = 1 << 6, + // Hook 读保护的内存 + UC_HOOK_MEM_READ_PROT = 1 << 7, + // Hook 写保护的内存 + UC_HOOK_MEM_WRITE_PROT = 1 << 8, + // Hook 不可执行内存上的内存 + UC_HOOK_MEM_FETCH_PROT = 1 << 9, + // Hook 内存读取事件 + UC_HOOK_MEM_READ = 1 << 10, + // Hook 内存写入事件 + UC_HOOK_MEM_WRITE = 1 << 11, + // Hook 内存获取执行事件 + UC_HOOK_MEM_FETCH = 1 << 12, + // Hook 内存读取事件,只允许能成功访问的地址 + // 成功读取后将触发回调 + UC_HOOK_MEM_READ_AFTER = 1 << 13, + // Hook 无效指令异常 + UC_HOOK_INSN_INVALID = 1 << 14, +} uc_hook_type; +``` + + + +### 宏定义Hook类型 + +```cpp +// Hook 所有未映射内存访问的事件 +#define UC_HOOK_MEM_UNMAPPED (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook 所有对受保护内存的非法访问事件 +#define UC_HOOK_MEM_PROT (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) +// Hook 所有非法读取存储器的事件 +#define UC_HOOK_MEM_READ_INVALID (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) +// Hook 所有非法写入存储器的事件 +#define UC_HOOK_MEM_WRITE_INVALID (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) +// Hook 所有非法获取内存的事件 +#define UC_HOOK_MEM_FETCH_INVALID (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook 所有非法的内存访问事件 +#define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) +// Hook 所有有效内存访问的事件 +// 注意: UC_HOOK_MEM_READ 在 UC_HOOK_MEM_READ_PROT 和 UC_HOOK_MEM_READ_UNMAPPED 之前触发 , +// 因此这个Hook可能会触发一些无效的读取。 +#define UC_HOOK_MEM_VALID (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) +``` + + + +### uc_mem_region + +由uc_mem_map()和uc_mem_map_ptr()映射内存区域 +使用uc_mem_regions()检索该内存区域的列表 + +```cpp +typedef struct uc_mem_region { + uint64_t begin; // 区域起始地址 (包括) + uint64_t end; // 区域结束地址 (包括) + uint32_t perms; // 区域的内存权限 +} uc_mem_region; +``` + + + +### uc_query_type + +uc_query()的所有查询类型参数 + +```cpp +typedef enum uc_query_type { + // 动态查询当前硬件模式 + UC_QUERY_MODE = 1, + UC_QUERY_PAGE_SIZE, + UC_QUERY_ARCH, +} uc_query_type; +``` + + + +### uc_context + +与uc_context_*()一起使用,管理CPU上下文的不透明存储 + +```cpp +struct uc_context; +typedef struct uc_context uc_context; +``` + + + +### uc_prot + +新映射区域的权限 + +```cpp +typedef enum uc_prot { + UC_PROT_NONE = 0, //无 + UC_PROT_READ = 1, //读取 + UC_PROT_WRITE = 2, //写入 + UC_PROT_EXEC = 4, //可执行 + UC_PROT_ALL = 7, //所有权限 +} uc_prot; +``` + + + +## 0x2 API分析 + +### uc_version + +```cpp +unsigned int uc_version(unsigned int *major, unsigned int *minor); +``` + +用于返回Unicorn API主次版本信息 + +``` +@major: API主版本号 +@minor: API次版本号 +@return 16进制数,计算方式 (major << 8 | minor) + +提示: 该返回值可以和宏UC_MAKE_VERSION比较 +``` + +源码实现 + +```c +unsigned int uc_version(unsigned int *major, unsigned int *minor) +{ + if (major != NULL && minor != NULL) { + *major = UC_API_MAJOR; //宏 + *minor = UC_API_MINOR; //宏 + } + + return (UC_API_MAJOR << 8) + UC_API_MINOR; //(major << 8 | minor) +} +``` + +编译后不可更改,不接受自定义版本 + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + unsigned int version; + version = uc_version(NULL,NULL); + cout << hex << version << endl; + return 0; +} +``` + +输出: + +![image.png](API_Doc_Pic/q3JtOQRPl5xTFKp.png) + +得到版本号1.0.0 + + + +### uc_arch_supported + +```c +bool uc_arch_supported(uc_arch arch); +``` + +确定Unicorn是否支持当前架构 + +``` + @arch: 架构类型 (UC_ARCH_*) + @return 如果支持返回True +``` + +源码实现 + +```c +bool uc_arch_supported(uc_arch arch) +{ + switch (arch) { +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: return true; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: return true; +#endif +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: return true; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: return true; +#endif +#ifdef UNICORN_HAS_PPC + case UC_ARCH_PPC: return true; +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: return true; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: return true; +#endif + /* 无效或禁用架构 */ + default: return false; + } +} +``` + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + cout << "是否支持UC_ARCH_X86架构:" << uc_arch_supported(UC_ARCH_X86) << endl; + return 0; +} +``` + +输出: + +![image.png](API_Doc_Pic/NExsavSgu4yMbBQ.png) + + + +### uc_open + +```c +uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); +``` + +创建新的Unicorn实例 + +``` +@arch: 架构类型 (UC_ARCH_*) +@mode: 硬件模式. 由 UC_MODE_* 组合 +@uc: 指向 uc_engine 的指针, 返回时更新 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) +{ + struct uc_struct *uc; + + if (arch < UC_ARCH_MAX) { + uc = calloc(1, sizeof(*uc)); //申请内存 + if (!uc) { + // 内存不足 + return UC_ERR_NOMEM; + } + + uc->errnum = UC_ERR_OK; + uc->arch = arch; + uc->mode = mode; + + // 初始化 + // uc->ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; + uc->ram_list.blocks.tqh_first = NULL; + uc->ram_list.blocks.tqh_last = &(uc->ram_list.blocks.tqh_first); + + uc->memory_listeners.tqh_first = NULL; + uc->memory_listeners.tqh_last = &uc->memory_listeners.tqh_first; + + uc->address_spaces.tqh_first = NULL; + uc->address_spaces.tqh_last = &uc->address_spaces.tqh_first; + + switch(arch) { // 根据架构进行预处理 + default: + break; +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: + if ((mode & ~UC_MODE_M68K_MASK) || + !(mode & UC_MODE_BIG_ENDIAN)) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = m68k_uc_init; + break; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: + if ((mode & ~UC_MODE_X86_MASK) || + (mode & UC_MODE_BIG_ENDIAN) || + !(mode & (UC_MODE_16|UC_MODE_32|UC_MODE_64))) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = x86_uc_init; + break; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + if ((mode & ~UC_MODE_ARM_MASK)) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { + uc->init_arch = armeb_uc_init; + } else { + uc->init_arch = arm_uc_init; + } + + if (mode & UC_MODE_THUMB) + uc->thumb = 1; + break; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: + if (mode & ~UC_MODE_ARM_MASK) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { + uc->init_arch = arm64eb_uc_init; + } else { + uc->init_arch = arm64_uc_init; + } + break; +#endif + +#if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) + case UC_ARCH_MIPS: + if ((mode & ~UC_MODE_MIPS_MASK) || + !(mode & (UC_MODE_MIPS32|UC_MODE_MIPS64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { +#ifdef UNICORN_HAS_MIPS + if (mode & UC_MODE_MIPS32) + uc->init_arch = mips_uc_init; +#endif +#ifdef UNICORN_HAS_MIPS64 + if (mode & UC_MODE_MIPS64) + uc->init_arch = mips64_uc_init; +#endif + } else { // 小端序 +#ifdef UNICORN_HAS_MIPSEL + if (mode & UC_MODE_MIPS32) + uc->init_arch = mipsel_uc_init; +#endif +#ifdef UNICORN_HAS_MIPS64EL + if (mode & UC_MODE_MIPS64) + uc->init_arch = mips64el_uc_init; +#endif + } + break; +#endif + +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: + if ((mode & ~UC_MODE_SPARC_MASK) || + !(mode & UC_MODE_BIG_ENDIAN) || + !(mode & (UC_MODE_SPARC32|UC_MODE_SPARC64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_SPARC64) + uc->init_arch = sparc64_uc_init; + else + uc->init_arch = sparc_uc_init; + break; +#endif + } + + if (uc->init_arch == NULL) { + return UC_ERR_ARCH; + } + + if (machine_initialize(uc)) + return UC_ERR_RESOURCE; + + *result = uc; + + if (uc->reg_reset) + uc->reg_reset(uc); + + return UC_ERR_OK; + } else { + return UC_ERR_ARCH; + } +} +``` + +**注意: uc_open会申请堆内存,使用完必须用uc_close释放,否则会发生泄露** + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + //// 初始化 X86-32bit 模式模拟器 + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + if (!err) + cout << "uc引擎创建成功" << endl; + + //// 关闭uc + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + + if (!err) + cout << "uc引擎关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/dqKBwAWUL7XvypE.png) + + + +### uc_close + +```c +uc_err uc_close(uc_engine *uc); +``` + +关闭一个uc实例,将释放内存。关闭后无法恢复。 + +``` +@uc: 指向由 uc_open() 返回的指针 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_close(uc_engine *uc) +{ + int i; + struct list_item *cur; + struct hook *hook; + + // 清理内部数据 + if (uc->release) + uc->release(uc->tcg_ctx); + g_free(uc->tcg_ctx); + + // 清理 CPU. + g_free(uc->cpu->tcg_as_listener); + g_free(uc->cpu->thread); + + // 清理所有 objects. + OBJECT(uc->machine_state->accelerator)->ref = 1; + OBJECT(uc->machine_state)->ref = 1; + OBJECT(uc->owner)->ref = 1; + OBJECT(uc->root)->ref = 1; + + object_unref(uc, OBJECT(uc->machine_state->accelerator)); + object_unref(uc, OBJECT(uc->machine_state)); + object_unref(uc, OBJECT(uc->cpu)); + object_unref(uc, OBJECT(&uc->io_mem_notdirty)); + object_unref(uc, OBJECT(&uc->io_mem_unassigned)); + object_unref(uc, OBJECT(&uc->io_mem_rom)); + object_unref(uc, OBJECT(uc->root)); + + // 释放内存 + g_free(uc->system_memory); + + // 释放相关线程 + if (uc->qemu_thread_data) + g_free(uc->qemu_thread_data); + + // 释放其他数据 + free(uc->l1_map); + + if (uc->bounce.buffer) { + free(uc->bounce.buffer); + } + + g_hash_table_foreach(uc->type_table, free_table, uc); + g_hash_table_destroy(uc->type_table); + + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + free(uc->ram_list.dirty_memory[i]); + } + + // 释放hook和hook列表 + for (i = 0; i < UC_HOOK_MAX; i++) { + cur = uc->hook[i].head; + // hook 可存在于多个列表,可通过计数获取释放的时间 + while (cur) { + hook = (struct hook *)cur->data; + if (--hook->refs == 0) { + free(hook); + } + cur = cur->next; + } + list_clear(&uc->hook[i]); + } + + free(uc->mapped_blocks); + + // 最后释放uc自身 + memset(uc, 0, sizeof(*uc)); + free(uc); + + return UC_ERR_OK; +} +``` + +使用实例同uc_open() + + + +### uc_query + +```c +uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); +``` + +查询引擎的内部状态 + +``` + @uc: uc_open() 返回的句柄 + @type: uc_query_type 中枚举的类型 + + @result: 保存被查询的内部状态的指针 + + @return: 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) +{ + if (type == UC_QUERY_PAGE_SIZE) { + *result = uc->target_page_size; + return UC_ERR_OK; + } + + if (type == UC_QUERY_ARCH) { + *result = uc->arch; + return UC_ERR_OK; + } + + switch(uc->arch) { +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + return uc->query(uc, type, result); +#endif + default: + return UC_ERR_ARG; + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; +int main() +{ + uc_engine* uc; + uc_err err; + + //// Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + size_t result[] = {0}; + err = uc_query(uc, UC_QUERY_ARCH, result); // 查询架构 + if (!err) + cout << "查询成功: " << *result << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/ZtRKvUoaPTlshJ4.png) + +架构查询结果为4,对应的正是UC_ARCH_X86 + + + +### uc_errno + +```c +uc_err uc_errno(uc_engine *uc); +``` + +当某个API函数失败时,报告最后的错误号,一旦被访问,uc_errno可能不会保留原来的值。 + +``` +@uc: uc_open() 返回的句柄 + +@return: 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_errno(uc_engine *uc) +{ + return uc->errnum; +} +``` + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + err = uc_errno(uc); + cout << "错误号: " << err << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/IZhyWrGebA5tT4i.png) + +无错误,输出错误号为0 + + + +### uc_strerror + +```c +const char *uc_strerror(uc_err code); +``` + +返回给定错误号的解释 + +``` + @code: 错误号 + + @return: 指向给定错误号的解释的字符串指针 +``` + +源码实现 + +```cpp +const char *uc_strerror(uc_err code) +{ + switch(code) { + default: + return "Unknown error code"; + case UC_ERR_OK: + return "OK (UC_ERR_OK)"; + case UC_ERR_NOMEM: + return "No memory available or memory not present (UC_ERR_NOMEM)"; + case UC_ERR_ARCH: + return "Invalid/unsupported architecture (UC_ERR_ARCH)"; + case UC_ERR_HANDLE: + return "Invalid handle (UC_ERR_HANDLE)"; + case UC_ERR_MODE: + return "Invalid mode (UC_ERR_MODE)"; + case UC_ERR_VERSION: + return "Different API version between core & binding (UC_ERR_VERSION)"; + case UC_ERR_READ_UNMAPPED: + return "Invalid memory read (UC_ERR_READ_UNMAPPED)"; + case UC_ERR_WRITE_UNMAPPED: + return "Invalid memory write (UC_ERR_WRITE_UNMAPPED)"; + case UC_ERR_FETCH_UNMAPPED: + return "Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)"; + case UC_ERR_HOOK: + return "Invalid hook type (UC_ERR_HOOK)"; + case UC_ERR_INSN_INVALID: + return "Invalid instruction (UC_ERR_INSN_INVALID)"; + case UC_ERR_MAP: + return "Invalid memory mapping (UC_ERR_MAP)"; + case UC_ERR_WRITE_PROT: + return "Write to write-protected memory (UC_ERR_WRITE_PROT)"; + case UC_ERR_READ_PROT: + return "Read from non-readable memory (UC_ERR_READ_PROT)"; + case UC_ERR_FETCH_PROT: + return "Fetch from non-executable memory (UC_ERR_FETCH_PROT)"; + case UC_ERR_ARG: + return "Invalid argument (UC_ERR_ARG)"; + case UC_ERR_READ_UNALIGNED: + return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)"; + case UC_ERR_WRITE_UNALIGNED: + return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)"; + case UC_ERR_FETCH_UNALIGNED: + return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)"; + case UC_ERR_RESOURCE: + return "Insufficient resource (UC_ERR_RESOURCE)"; + case UC_ERR_EXCEPTION: + return "Unhandled CPU exception (UC_ERR_EXCEPTION)"; + case UC_ERR_TIMEOUT: + return "Emulation timed out (UC_ERR_TIMEOUT)"; + } +} +``` + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + err = uc_errno(uc); + cout << "错误号: " << err << " 错误描述: " << uc_strerror(err) <reg_write) + ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); //结构体中写入 + else + return UC_ERR_EXCEPTION; + + return ret; +} +``` + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + int r_eax = 0x12; + err = uc_reg_write(uc, UC_X86_REG_ECX, &r_eax); + if (!err) + cout << "写入成功: " << r_eax << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/DkztJcigHCdmnRp.png) + + + +### uc_reg_read + +```c +uc_err uc_reg_read(uc_engine *uc, int regid, void *value); +``` + +读取寄存器的值 + +``` +@uc: uc_open()返回的句柄 +@regid: 将被读取的寄存器ID +@value: 指向保存寄存器值的指针 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_reg_read(uc_engine *uc, int regid, void *value) +{ + return uc_reg_read_batch(uc, ®id, &value, 1); +} + +uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) +{ + if (uc->reg_read) + uc->reg_read(uc, (unsigned int *)ids, vals, count); + else + return -1; + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + int r_eax = 0x12; + err = uc_reg_write(uc, UC_X86_REG_ECX, &r_eax); + if (!err) + cout << "写入成功: " << r_eax << endl; + + int recv_eax; + err = uc_reg_read(uc, UC_X86_REG_ECX, &recv_eax); + if (!err) + cout << "读取成功: " << recv_eax << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/ABkexFCfphu3zIg.png) + + + +### uc_reg_write_batch + +```c +uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); +``` + +同时将多个值写入多个寄存器 + +``` +@uc: uc_open()返回的句柄 +@regid: 存储将被写入的多个寄存器ID的数组 +@value: 指向保存多个值的数组的指针 +@count: *regs 和 *vals 数组的长度 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) +{ + int ret = UC_ERR_OK; + if (uc->reg_write) + ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); + else + return UC_ERR_EXCEPTION; + + return ret; +} +``` + +使用示例: + +```cpp +#include +#include +#include "unicorn/unicorn.h" +using namespace std; + +int syscall_abi[] = { + UC_X86_REG_RAX, UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, + UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9 +}; + +uint64_t vals[7] = { 200, 10, 11, 12, 13, 14, 15 }; + +void* ptrs[7]; + +int main() +{ + int i; + uc_err err; + uc_engine* uc; + + // set up register pointers + for (i = 0; i < 7; i++) { + ptrs[i] = &vals[i]; + } + + if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { + uc_perror("uc_open", err); + return 1; + } + + // reg_write_batch + printf("reg_write_batch({200, 10, 11, 12, 13, 14, 15})\n"); + if ((err = uc_reg_write_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_write_batch", err); + return 1; + } + + // reg_read_batch + memset(vals, 0, sizeof(vals)); + if ((err = uc_reg_read_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_read_batch", err); + return 1; + } + + printf("reg_read_batch = {"); + + for (i = 0; i < 7; i++) { + if (i != 0) printf(", "); + printf("%" PRIu64, vals[i]); + } + + printf("}\n"); + + uint64_t var[7] = { 0 }; + for (int i = 0; i < 7; i++) + { + cout << syscall_abi[i] << " "; + printf("%" PRIu64, vals[i]); + cout << endl; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/l1AhdxgKE2U3tZB.png) + + + +### uc_reg_read_batch + +```c +uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); +``` + +同时读取多个寄存器的值。 + +``` +@uc: uc_open()返回的句柄 +@regid: 存储将被读取的多个寄存器ID的数组 +@value: 指向保存多个值的数组的指针 +@count: *regs 和 *vals 数组的长度 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) +{ + if (uc->reg_read) + uc->reg_read(uc, (unsigned int *)ids, vals, count); + else + return -1; + + return UC_ERR_OK; +} +``` + +使用示例同uc_reg_write_batch()。 + + + +### uc_mem_write + +```c +uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); +``` + +在内存中写入一段字节码。 + +``` + @uc: uc_open() 返回的句柄 + @address: 写入字节的起始地址 + @bytes: 指向一个包含要写入内存的数据的指针 + @size: 要写入的内存大小。 + + 注意: @bytes 必须足够大以包含 @size 字节。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, size_t size) +{ + size_t count = 0, len; + const uint8_t *bytes = _bytes; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + if (!check_mem_area(uc, address, size)) + return UC_ERR_WRITE_UNMAPPED; + + // 内存区域可以重叠相邻的内存块 + while(count < size) { + MemoryRegion *mr = memory_mapping(uc, address); + if (mr) { + uint32_t operms = mr->perms; + if (!(operms & UC_PROT_WRITE)) // 没有写保护 + // 标记为可写 + uc->readonly_mem(mr, false); + + len = (size_t)MIN(size - count, mr->end - address); + if (uc->write_mem(&uc->as, address, bytes, len) == false) + break; + + if (!(operms & UC_PROT_WRITE)) // 没有写保护 + // 设置写保护 + uc->readonly_mem(mr, true); + + count += len; + address += len; + bytes += len; + } else // 此地址尚未被映射 + break; + } + + if (count == size) + return UC_ERR_OK; + else + return UC_ERR_WRITE_UNMAPPED; +} +``` + +使用示例: + +```cpp +#include +#include +#include "unicorn/unicorn.h" +using namespace std; + +#define X86_CODE32 "\x41\x4a" // INC ecx; DEC edx +#define ADDRESS 0x1000 + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return -1; + } + + uint32_t code; + + if(uc_mem_read(uc,ADDRESS,&code, sizeof(code))) { + printf("Failed to read emulation code to memory, quit!\n"); + return -1; + } + + cout << hex << code << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/l4HhgDzcJIVvFNU.png) + + + +### uc_mem_read + +```c +uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); +``` + +从内存中读取字节。 + +``` + @uc: uc_open() 返回的句柄 + @address: 读取字节的起始地址 + @bytes: 指向一个包含要读取内存的数据的指针 + @size: 要读取的内存大小。 + + 注意: @bytes 必须足够大以包含 @size 字节。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size) +{ + size_t count = 0, len; + uint8_t *bytes = _bytes; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + if (!check_mem_area(uc, address, size)) + return UC_ERR_READ_UNMAPPED; + + // 内存区域可以重叠相邻的内存块 + while(count < size) { + MemoryRegion *mr = memory_mapping(uc, address); + if (mr) { + len = (size_t)MIN(size - count, mr->end - address); + if (uc->read_mem(&uc->as, address, bytes, len) == false) + break; + count += len; + address += len; + bytes += len; + } else // 此地址尚未被映射 + break; + } + + if (count == size) + return UC_ERR_OK; + else + return UC_ERR_READ_UNMAPPED; +} +``` + +使用示例同uc_mem_write() + + + +### uc_emu_start + +```c +uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); +``` + +在指定的时间内模拟机器码。 + +``` +@uc: uc_open() 返回的句柄 +@begin: 开始模拟的地址 +@until: 模拟停止的地址 (当到达该地址时) +@timeout: 模拟代码的持续时间(以微秒计)。当这个值为0时,将在无限时间内模拟代码,直到代码完成。 +@count: 要模拟的指令数。当这个值为0时,将模拟所有可用的代码,直到代码完成 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count) +{ + // 重制计数器 + uc->emu_counter = 0; + uc->invalid_error = UC_ERR_OK; + uc->block_full = false; + uc->emulation_done = false; + uc->timed_out = false; + + switch(uc->arch) { + default: + break; +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: + uc_reg_write(uc, UC_M68K_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: + switch(uc->mode) { + default: + break; + case UC_MODE_16: { + uint64_t ip; + uint16_t cs; + + uc_reg_read(uc, UC_X86_REG_CS, &cs); + // 抵消后面增加的 IP 和 CS + ip = begin - cs*16; + uc_reg_write(uc, UC_X86_REG_IP, &ip); + break; + } + case UC_MODE_32: + uc_reg_write(uc, UC_X86_REG_EIP, &begin); + break; + case UC_MODE_64: + uc_reg_write(uc, UC_X86_REG_RIP, &begin); + break; + } + break; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + uc_reg_write(uc, UC_ARM_REG_R15, &begin); + break; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: + uc_reg_write(uc, UC_ARM64_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: + // TODO: MIPS32/MIPS64/BIGENDIAN etc + uc_reg_write(uc, UC_MIPS_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: + // TODO: Sparc/Sparc64 + uc_reg_write(uc, UC_SPARC_REG_PC, &begin); + break; +#endif + } + + uc->stop_request = false; + + uc->emu_count = count; + // 如果不需要计数,则移除计数挂钩hook + if (count <= 0 && uc->count_hook != 0) { + uc_hook_del(uc, uc->count_hook); + uc->count_hook = 0; + } + // 设置计数hook记录指令数 + if (count > 0 && uc->count_hook == 0) { + uc_err err; + // 对计数指令的回调必须在所有其他操作之前运行,因此必须在hook列表的开头插入hook,而不是附加hook + uc->hook_insert = 1; + err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb, NULL, 1, 0); + // 恢复到 uc_hook_add() + uc->hook_insert = 0; + if (err != UC_ERR_OK) { + return err; + } + } + + uc->addr_end = until; + + if (timeout) + enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds + + if (uc->vm_start(uc)) { + return UC_ERR_RESOURCE; + } + + // 模拟完成 + uc->emulation_done = true; + + if (timeout) { + // 等待超时 + qemu_thread_join(&uc->timer); + } + + if(uc->timed_out) + return UC_ERR_TIMEOUT; + + return uc->invalid_error; +} +``` + +使用示例: + +```cpp +#include +#include +#include "unicorn/unicorn.h" +using namespace std; + +#define X86_CODE32 "\x33\xC0" // xor eax, eax +#define ADDRESS 0x1000 + +int main() +{ + uc_engine* uc; + uc_err err; + + int r_eax = 0x111; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return -1; + } + + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> before EAX = 0x%x\n", r_eax); + + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> after EAX = 0x%x\n", r_eax); + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/K4HMijIVt6lofvT.png) + + + +### uc_emu_stop + +```c +uc_err uc_emu_stop(uc_engine *uc); +``` + +停止模拟 + +通常是从通过 tracing API注册的回调函数中调用。 + +``` +@uc: uc_open() 返回的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_emu_stop(uc_engine *uc) +{ + if (uc->emulation_done) + return UC_ERR_OK; + + uc->stop_request = true; + + if (uc->current_cpu) { + // 退出当前线程 + cpu_exit(uc->current_cpu); + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +uc_emu_stop(uc); +``` + + + +### uc_hook_add + +```c +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...); +``` + +注册hook事件的回调,当hook事件被触发将会进行回调。 + +``` + @uc: uc_open() 返回的句柄 + @hh: 注册hook得到的句柄. uc_hook_del() 中使用 + @type: hook 类型 + @callback: 当指令被命中时要运行的回调 + @user_data: 用户自定义数据. 将被传递给回调函数的最后一个参数 @user_data + @begin: 回调生效区域的起始地址(包括) + @end: 回调生效区域的结束地址(包括) + 注意 1: 只有回调的地址在[@begin, @end]中才会调用回调 + 注意 2: 如果 @begin > @end, 每当触发此hook类型时都会调用回调 + @...: 变量参数 (取决于 @type) + 注意: 如果 @type = UC_HOOK_INSN, 这里是指令ID (如: UC_X86_INS_OUT) + + @return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...) +{ + int ret = UC_ERR_OK; + int i = 0; + + struct hook *hook = calloc(1, sizeof(struct hook)); + if (hook == NULL) { + return UC_ERR_NOMEM; + } + + hook->begin = begin; + hook->end = end; + hook->type = type; + hook->callback = callback; + hook->user_data = user_data; + hook->refs = 0; + *hh = (uc_hook)hook; + + // UC_HOOK_INSN 有一个额外参数:指令ID + if (type & UC_HOOK_INSN) { + va_list valist; + + va_start(valist, end); + hook->insn = va_arg(valist, int); + va_end(valist); + + if (uc->insn_hook_validate) { + if (! uc->insn_hook_validate(hook->insn)) { + free(hook); + return UC_ERR_HOOK; + } + } + + if (uc->hook_insert) { + if (list_insert(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { + free(hook); + return UC_ERR_NOMEM; + } + } else { + if (list_append(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { + free(hook); + return UC_ERR_NOMEM; + } + } + + hook->refs++; + return UC_ERR_OK; + } + + while ((type >> i) > 0) { + if ((type >> i) & 1) { + if (i < UC_HOOK_MAX) { + if (uc->hook_insert) { + if (list_insert(&uc->hook[i], hook) == NULL) { + if (hook->refs == 0) { + free(hook); + } + return UC_ERR_NOMEM; + } + } else { + if (list_append(&uc->hook[i], hook) == NULL) { + if (hook->refs == 0) { + free(hook); + } + return UC_ERR_NOMEM; + } + } + hook->refs++; + } + } + i++; + } + + if (hook->refs == 0) { + free(hook); + } + + return ret; +} +``` + +使用示例: + +```cpp +#include +#include +#include "unicorn/unicorn.h" +using namespace std; + +int syscall_abi[] = { + UC_X86_REG_RAX, UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, + UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9 +}; + +uint64_t vals[7] = { 200, 10, 11, 12, 13, 14, 15 }; + +void* ptrs[7]; + +void uc_perror(const char* func, uc_err err) +{ + fprintf(stderr, "Error in %s(): %s\n", func, uc_strerror(err)); +} + +#define BASE 0x10000 + +// mov rax, 100; mov rdi, 1; mov rsi, 2; mov rdx, 3; mov r10, 4; mov r8, 5; mov r9, 6; syscall +#define CODE "\x48\xc7\xc0\x64\x00\x00\x00\x48\xc7\xc7\x01\x00\x00\x00\x48\xc7\xc6\x02\x00\x00\x00\x48\xc7\xc2\x03\x00\x00\x00\x49\xc7\xc2\x04\x00\x00\x00\x49\xc7\xc0\x05\x00\x00\x00\x49\xc7\xc1\x06\x00\x00\x00\x0f\x05" + +void hook_syscall(uc_engine* uc, void* user_data) +{ + int i; + + uc_reg_read_batch(uc, syscall_abi, ptrs, 7); + + printf("syscall: {"); + + for (i = 0; i < 7; i++) { + if (i != 0) printf(", "); + printf("%" PRIu64, vals[i]); + } + + printf("}\n"); +} + +void hook_code(uc_engine* uc, uint64_t addr, uint32_t size, void* user_data) +{ + printf("HOOK_CODE: 0x%" PRIx64 ", 0x%x\n", addr, size); +} + +int main() +{ + int i; + uc_hook sys_hook; + uc_err err; + uc_engine* uc; + + for (i = 0; i < 7; i++) { + ptrs[i] = &vals[i]; + } + + if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { + uc_perror("uc_open", err); + return 1; + } + + printf("reg_write_batch({200, 10, 11, 12, 13, 14, 15})\n"); + if ((err = uc_reg_write_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_write_batch", err); + return 1; + } + + memset(vals, 0, sizeof(vals)); + if ((err = uc_reg_read_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_read_batch", err); + return 1; + } + + printf("reg_read_batch = {"); + + for (i = 0; i < 7; i++) { + if (i != 0) printf(", "); + printf("%" PRIu64, vals[i]); + } + + printf("}\n"); + + // syscall + printf("\n"); + printf("running syscall shellcode\n"); + + if ((err = uc_hook_add(uc, &sys_hook, UC_HOOK_CODE, hook_syscall, NULL, 1, 0))) { + uc_perror("uc_hook_add", err); + return 1; + } + + if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { + uc_perror("uc_mem_map", err); + return 1; + } + + if ((err = uc_mem_write(uc, BASE, CODE, sizeof(CODE) - 1))) { + uc_perror("uc_mem_write", err); + return 1; + } + + if ((err = uc_emu_start(uc, BASE, BASE + sizeof(CODE) - 1, 0, 0))) { + uc_perror("uc_emu_start", err); + return 1; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/aU1lbmxMjXA5g3K.png) + +对每条指令都进行hook + + + +### uc_hook_del + +``` +uc_err uc_hook_del(uc_engine *uc, uc_hook hh); +``` + +删除一个已注册的hook事件 + +``` +@uc: uc_open() 返回的句柄 +@hh: uc_hook_add() 返回的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_hook_del(uc_engine *uc, uc_hook hh) +{ + int i; + struct hook *hook = (struct hook *)hh; + + for (i = 0; i < UC_HOOK_MAX; i++) { + if (list_remove(&uc->hook[i], (void *)hook)) { + if (--hook->refs == 0) { + free(hook); + break; + } + } + } + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +if ((err = uc_hook_add(uc, &sys_hook, UC_HOOK_CODE, hook_syscall, NULL, 1, 0))) { + uc_perror("uc_hook_add", err); + return 1; +} + +if ((err = uc_hook_del(uc, &sys_hook))) { + uc_perror("uc_hook_del", err); + return 1; +} +``` + + + +### uc_mem_map + +```c +uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); +``` + +为模拟映射一块内存。 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 +@perms: 新映射区域的权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) +{ + uc_err res; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + res = mem_map_check(uc, address, size, perms); //内存安全检查 + if (res) + return res; + + return mem_map(uc, address, size, perms, uc->memory_map(uc, address, size, perms)); +} +``` + +使用示例同uc_hook_add。 + + + +### uc_mem_map_ptr + +```c +uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); +``` + +在模拟中映射现有的主机内存。 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 +@perms: 新映射区域的权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 +@ptr: 指向支持新映射内存的主机内存的指针。映射的主机内存的大小应该与size的大小相同或更大,并且至少使用PROT_READ | PROT_WRITE进行映射,否则不定义映射。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr) +{ + uc_err res; + + if (ptr == NULL) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + res = mem_map_check(uc, address, size, perms); //内存安全检查 + if (res) + return res; + + return mem_map(uc, address, size, UC_PROT_ALL, uc->memory_map_ptr(uc, address, size, perms, ptr)); +} +``` + +使用示例同uc_mem_map + + + +### uc_mem_unmap + +```c +uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); +``` + +取消对模拟内存区域的映射 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) +{ + MemoryRegion *mr; + uint64_t addr; + size_t count, len; + + if (size == 0) + // 没有要取消映射的区域 + return UC_ERR_OK; + + // 地址必须对齐到 uc->target_page_size + if ((address & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // 大小必须是 uc->target_page_size 的倍数 + if ((size & uc->target_page_align) != 0) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + // 检查用户请求的整个块是否被映射 + if (!check_mem_area(uc, address, size)) + return UC_ERR_NOMEM; + + // 如果这个区域跨越了相邻的区域,可能需要分割区域 + addr = address; + count = 0; + while(count < size) { + mr = memory_mapping(uc, addr); + len = (size_t)MIN(size - count, mr->end - addr); + if (!split_region(uc, mr, addr, len, true)) + return UC_ERR_NOMEM; + + // 取消映射 + mr = memory_mapping(uc, addr); + if (mr != NULL) + uc->memory_unmap(uc, mr); + count += len; + addr += len; + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { + uc_perror("uc_mem_map", err); + return 1; +} + +if ((err = uc_mem_unmap(uc, BASE, 0x1000))) { + uc_perror("uc_mem_unmap", err); + return 1; +} +``` + + + +### uc_mem_protect + +```c +uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); +``` + +设置模拟内存的权限 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 +@perms: 映射区域的新权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint32_t perms) +{ + MemoryRegion *mr; + uint64_t addr = address; + size_t count, len; + bool remove_exec = false; + + if (size == 0) + // trivial case, no change + return UC_ERR_OK; + + // address must be aligned to uc->target_page_size + if ((address & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // size must be multiple of uc->target_page_size + if ((size & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // check for only valid permissions + if ((perms & ~UC_PROT_ALL) != 0) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + // check that user's entire requested block is mapped + if (!check_mem_area(uc, address, size)) + return UC_ERR_NOMEM; + + // Now we know entire region is mapped, so change permissions + // We may need to split regions if this area spans adjacent regions + addr = address; + count = 0; + while(count < size) { + mr = memory_mapping(uc, addr); + len = (size_t)MIN(size - count, mr->end - addr); + if (!split_region(uc, mr, addr, len, false)) + return UC_ERR_NOMEM; + + mr = memory_mapping(uc, addr); + // will this remove EXEC permission? + if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) + remove_exec = true; + mr->perms = perms; + uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); + + count += len; + addr += len; + } + + // if EXEC permission is removed, then quit TB and continue at the same place + if (remove_exec) { + uc->quit_request = true; + uc_emu_stop(uc); + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +if ((err = uc_mem_protect(uc, BASE, 0x1000, UC_PROT_ALL))) { //可读可写可执行 + uc_perror("uc_mem_protect", err); + return 1; +} +``` + + + +### uc_mem_regions + +```c +uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); +``` + +检索由 uc_mem_map() 和 uc_mem_map_ptr() 映射的内存的信息。 + +这个API为@regions分配内存,用户之后必须通过free()释放这些内存来避免内存泄漏。 + +``` +@uc: uc_open() 返回的句柄 +@regions: 指向 uc_mem_region 结构体的数组的指针. 由Unicorn申请,必须通过uc_free()释放这些内存 +@count: 指向@regions中包含的uc_mem_region结构体的数量的指针 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码分析 + +```c +uint32_t uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) +{ + uint32_t i; + uc_mem_region *r = NULL; + + *count = uc->mapped_block_count; + + if (*count) { + r = g_malloc0(*count * sizeof(uc_mem_region)); + if (r == NULL) { + // 内存不足 + return UC_ERR_NOMEM; + } + } + + for (i = 0; i < *count; i++) { + r[i].begin = uc->mapped_blocks[i]->addr; + r[i].end = uc->mapped_blocks[i]->end - 1; + r[i].perms = uc->mapped_blocks[i]->perms; + } + + *regions = r; + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +#include +#include +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_err err; + uc_engine* uc; + + if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { + uc_perror("uc_open", err); + return 1; + } + + if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { + uc_perror("uc_mem_map", err); + return 1; + } + + uc_mem_region *region; + uint32_t count; + + if ((err = uc_mem_regions(uc, ®ion, &count))) { + uc_perror("uc_mem_regions", err); + return 1; + } + + cout << "起始地址: 0x" << hex << region->begin << " 结束地址: 0x" << hex << region->end << " 内存权限: " <perms << " 已申请内存块数: " << count << endl; + + if ((err = uc_free(region))) { ////注意释放内存 + uc_perror("uc_free", err); + return 1; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/kbrF7NdV6LDxnYI.png) + + + +### uc_free + +```c +uc_err uc_free(void *mem); +``` + +释放由 uc_context_alloc 和 uc_mem_regions 申请的内存 + +``` +@mem: 由uc_context_alloc (返回 *context), 或由 uc_mem_regions (返回 *regions)申请的内存 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_free(void *mem) +{ + g_free(mem); + return UC_ERR_OK; +} + +void g_free(gpointer ptr) +{ + free(ptr); +} +``` + +使用示例同uc_mem_regions + + + +### uc_context_alloc + +```c +uc_err uc_context_alloc(uc_engine *uc, uc_context **context); +``` + +分配一个可以与uc_context_{save,restore}一起使用的区域来执行CPU上下文的快速保存/回滚,包括寄存器和内部元数据。上下文不能在具有不同架构或模式的引擎实例之间共享。 + +``` +@uc: uc_open() 返回的句柄 +@context: 指向uc_engine*的指针。当这个函数成功返回时,将使用指向新上下文的指针更新它。之后必须使用uc_free()释放这些分配的内存。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_context_alloc(uc_engine *uc, uc_context **context) +{ + struct uc_context **_context = context; + size_t size = uc->cpu_context_size; + + *_context = g_malloc(size); + if (*_context) { + (*_context)->jmp_env_size = sizeof(*uc->cpu->jmp_env); + (*_context)->context_size = size - sizeof(uc_context) - (*_context)->jmp_env_size; + return UC_ERR_OK; + } else { + return UC_ERR_NOMEM; + } +} +``` + +使用示例 + +```cpp +#include +#include +#include "unicorn/unicorn.h" +using namespace std; + +#define ADDRESS 0x1000 +#define X86_CODE32_INC "\x40" // INC eax + +int main() +{ + uc_engine* uc; + uc_context* context; + uc_err err; + + int r_eax = 0x1; // EAX 寄存器 + + printf("===================================\n"); + printf("Save/restore CPU context in opaque blob\n"); + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return 0; + } + + uc_mem_map(uc, ADDRESS, 8 * 1024, UC_PROT_ALL); + + if (uc_mem_write(uc, ADDRESS, X86_CODE32_INC, sizeof(X86_CODE32_INC) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return 0; + } + + // 初始化寄存器 + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + + printf(">>> Running emulation for the first time\n"); + + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> EAX = 0x%x\n", r_eax); + + // 申请并保存 CPU 上下文 + printf(">>> Saving CPU context\n"); + + err = uc_context_alloc(uc, &context); + if (err) { + printf("Failed on uc_context_alloc() with error returned: %u\n", err); + return 0; + } + + err = uc_context_save(uc, context); + if (err) { + printf("Failed on uc_context_save() with error returned: %u\n", err); + return 0; + } + + printf(">>> Running emulation for the second time\n"); + + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> EAX = 0x%x\n", r_eax); + + // 恢复 CPU 上下文 + err = uc_context_restore(uc, context); + if (err) { + printf("Failed on uc_context_restore() with error returned: %u\n", err); + return 0; + } + + printf(">>> CPU context restored. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> EAX = 0x%x\n", r_eax); + + // 释放 CPU 上下文 + err = uc_free(context); + if (err) { + printf("Failed on uc_free() with error returned: %u\n", err); + return 0; + } + + uc_close(uc); + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/juNPWvwGUlraKRh.png) + + + +### uc_context_save + +```c +uc_err uc_context_save(uc_engine *uc, uc_context *context); +``` + +保存当前CPU上下文 + +``` +@uc: uc_open() 返回的句柄 +@context: uc_context_alloc() 返回的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_context_save(uc_engine *uc, uc_context *context) +{ + struct uc_context *_context = context; + memcpy(_context->data, uc->cpu->env_ptr, _context->size); + return UC_ERR_OK; +} +``` + +使用示例同uc_context_alloc() + + + +### uc_context_restore + +```c +uc_err uc_context_restore(uc_engine *uc, uc_context *context); +``` + +恢复已保存的CPU上下文 + +``` +@uc: uc_open() 返回的句柄 +@context: uc_context_alloc() 返回并且已使用 uc_context_save 保存的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_context_restore(uc_engine *uc, uc_context *context) +{ + struct uc_context *_context = context; + memcpy(uc->cpu->env_ptr, _context->data, _context->size); + return UC_ERR_OK; +} +``` + +使用示例同uc_context_alloc() + + + +### uc_context_size + +```c +size_t uc_context_size(uc_engine *uc); +``` + +返回存储cpu上下文所需的大小。可以用来分配一个缓冲区来包含cpu上下文,并直接调用uc_context_save。 + +``` +@uc: uc_open() 返回的句柄 + +@return 存储cpu上下文所需的大小,类型为 size_t. +``` + +源码实现 + +```c +size_t uc_context_size(uc_engine *uc) +{ + return sizeof(uc_context) + uc->cpu_context_size + sizeof(*uc->cpu->jmp_env); +} +``` + +使用示例同uc_context_alloc() + + + diff --git a/docs/unicorn-logo-text.png b/docs/unicorn-logo-text.png new file mode 100644 index 0000000000000000000000000000000000000000..08360ff8a1012c8a981fc74037c75e80692aba17 GIT binary patch literal 49545 zcmeFZ^;^?#*gif50VPERX_OF2=?(!&0YPfwNNE9y(IIc7OG&9A-JOn-kSQ%K2t$w@ zFp$oD?)f~&_iyRL~Tm#>js<-M+Xl~xwK7^1HHSl@eQ za}Mr(+h~?(N0JTGBFJw1rs%eP&a=F2mA3i<#ku8Bq%_>i3l){L$loa%tU$fkC>Pi+ zzcw{W#R`>f`F_#j+a~bT(*_}|;D-Il_D-+w3b-0X!EFgji~j}!2@hMKz(2mfO7-jC zb9#{fr18H{xS{Zd82^ph6_RcIBjh2ZlL`OGdX2T20RK%sgwPBB`1bbO|9g}Fo0I?B zC;z`cJ5&$H@;g7t_&G1MQEFvp!$l#G!OU++aE8^?MegdS@KRpv)Ct)l-p@^X_I+A{`ccAYtFS#CgXt0n)xe@f(AXX1VkGIE)Aj-T|5!C2E z7ignLh`@0Ix-jv>q&H!m*RNuPu0YcH%DF+b3Aed@=Zx78**QMs%4nOe@`IU|mjFU# zgpW;cNL18Ij=rz{=~!X@BWh!iynawyc^mRcQVIXwA%d9J%T&|nD+AKU{l&Z)1=i)! z*C2L7_;|PQ#Y6V^hwX3sG0GD~dJ0SnDKwSzfA8kYmCzz>%D@wH+q>cZ1#~wsEuM9i zwBTL4ONAhikw*cuhhJJ)M$D^St0rT!u9a(s;e^PX#%N>((EGklwg~=LP1Z0fi1M|6 z_pU$?yzszm?L?X(C`wmX-8yahb-AOKS0;A1u(|mWQ{r0>FbFF0{J)Ovm z?N!r(%t`T9&ywV0bC>|6hvtG^AtR_m?2JEKQ4t4NL;k!~Ac&vD{He{{pB9!|t7@9cZqH%VA`e zb4la2cTb1@+sQBa{e3KEv{og6=swBe-q4}UvAi5lBK!bCMT=k89r3zcJy<(;I?u?n zZ{?*nb8+J;1_o#-)#-+`&*{^;q1kVB5o7DEe}$%Gy{Mu?;iM2&(|;S*woXQENboAKQ(URJfPCO?I!gij`9AhCa29ljEoDN6#GSN>W7&tFG%+B z@BrnS2{kHmnPI$u7t5O)8jv#0)Hy#Z{q2JhWUD~Mj5oece4t}6RVf}71tL*cH~G+0 z4#Tpoq+4^8u_$^u2p=(in%^)M^lslDTaCZeC>*v;O3?^s=|!rm50>xgb!7L|t{;s( z`zZX>@LHeZJZH z0|pQP2TL7<@BYzGPEU%sTJs{Z`*EhW++cT;Ge^DsU>Z^O$~`emdnasdx)YA#i;GsR z3#^5_x5F>_mmk&TV3{&BOG-_Rdp?neJ-+jG|D>!1@mGfNnD68paxU6f#I|GSXx6G06PE4XM~ZNeB_JLW9@?%1k056|ERy_HU6d&>wfpKz(;K-7)8+P?CV zg7nD+=ppIw>N`Ss)7UdLlcvXrj%ToH<5?u%sAoDO%Jtp!u+$uUn8|XAKk^|nB!T?j zx{R|ta=edz)WBSaoaCyCm^#-V8~3`s24Yy(eJ4MJZgoFb)gwnod#+y}>ms|io6i#n zH}#aeYmpvM@80Z<1nVCi4;N*fQ4>~~OO?yA+ulYL5cPj?OQNnc>eR9^EDvg6z4;Sb=b=hS{%vyv6qAQFv}nb**=aj8jV96-FKk;@ zW|%Qi6S=9Jz&K^P%;;Az!Y_SzjX>cBKJuy=hCNogSJybS?`!`^*&uoBAZUu*LrOhe z_1gd|g{qbKj10sfy-+^E;sLQmO0uedB_fsO(kZIeF0$R-mQA%%JYjD`(BIehwMk!E z;rVLjNPLH#++zulSG9ulRuct#-^Xg;zgvk3v7*{;ncBgbx zD_A`lTqV4lOx8xB8)GK)A|t-%g0V2^ulXX3{c%npR@L69Yi(8Ul-(aZ%E1PXH?E48 zY{hbr#PRUQ`gpbI>XiPlY-Oc(H*YW(8Q3GnORVcVk3IvpCQY4O^S3&`mZM@ka9HOOGc=biFGN8YUma4Z z77kYpWK>nY$g6-l-oEo3np3kqD@RglUtg<%kkf=#PYWHtK`p0FYfFAux>aM(m7Ltm z%bSPgFK_W`ZFF^i8C{d*q1Z~3_#0%Bh&fJ=4Lj_}yk}pe4Lla<+}7w?*XRqrQL>U# zyO;2X(W}D!m#Dvr@5^igNWK*QkWJ?WzXjMpW|`9IfIcrbfm?$&{IPhlyUXJxH@?5N*6=-KI-S&h{{q?fr zF>=gkynE-4$lXP6gxBZe!>t~Qtx)hal=bd#D`M{Prsut)#5OD9+4Ul~D9g#ob?F=9 z6Q3QnU&sc9Lrt1+I|1PN)lM8#M+Tu*bIAr!E-`>ztTbH9PZRrU8!ZxR&2^9C(`@YN zL}QN0aS@*s>2LJc+3-ZCN>#C7TK~Gq2`^!_?Gz)v((f_Q7g&#D<<;Iyi8)c=1cm3f z8qK_et-pPWpf_I&eX^0uk%KN;%V^RA=kel9K5mKRWL3UvFZ1jDP4eBJPRE87-tigU zGCdB8Q}d_M~pDGTUy(_PwJD zO`MHbL7otyrUiTG&zhb>Mqc92yK*d*9GjBV`J)_|nCua6UTxnrtQb3OudhG)>|>wX z%SrK=IHXk5c4JGHI1J0^$5CS(F~_`T@^Sf&ifhdX$ErezLJvZ=9K}*LkVadj^0)P( zwT_Q~($V89Cwb_2a#@I(e;zNCCPTgfw*rq_l6d_7^x#0|O}2Hkb{5e@;U zof`AWNe9Qthnhxq4B`Nd+hwIX&|`8p>ayKa{JPpUYVW{tei6|JI)pWvw*R)fblR}< z?3diXN!-yXL=+c#8iO-0;L95$P4e8Nw5Ys~Z4v#}@<&gP z0=&KkMlwL`uHdoRVw^*IBexNvMIlXCM}SwOk;1$Gedp+x%N%S_VJ%}khLlNKqO!Rs zaAjSwDDI%Q_Zjq17U#BB=3*6Oq`NO8GVe2(9PXb3eV$io%5Zwr_O znIYF7*Ie63AtF})f{N8`nrj`W+6cKZu7}nz%>dbk$7G3FOJ=8kWf*Mm`HT1eip*pjt z^K)2oPeI^QPIY6T((%#5ob15Z6G)CPxE=^j8^~QL?VeU4NJP|5&n=zm$}5mh);Fa* ztC@p%$q5gfW=&prPKmh~fSh1hHj>jV&+{ZoGOM_>l*jPv2LGY^kOxe@@wa*nveIdu z{(O@M=jc*W=C(BZ0@1`MI;9{JxgBMsb#w)*)e-W;A) z&0cL?;Vv=DZX*Vz%Z$mko>67os77hbe=tzW}{6`skTED7gFE3c%%GonOOfE zmY)*cP7mRYxDFX%$Itn|GlK)UeedHb^>R`RrOlt+J_mO}RM~;1F`^#2uTIUo3Xq59 zA&Xg?otr0RFXw&3LG=tuh`;;LnMtSXcIhI=Z>GqnX0+u+uN+-{n?j&u4S{^T_n$eF z;7eHD(Dd^zBn{C(I=%I_e_rv&rdcp|pe&DFq;^_bv8#N@G>P8(a?y%{VVi5*Q*jMV z`%Hp}t1a@29#Z^m-}b%)q_Bnco|!^MM)7!{IuA3GQgM1HBP);%f4GTwd`V%wt7qkP z6YCZ!ne|6tT75EC0*4rlB%c7?rbHEpHh(vCSMMV!Pyj7z{ zBOBh&9ll*%Ljo*Oi7u|_Gf3f$^(N+;g%-8+!g!bh?_z8Lv%&(9vchH>+*DS;4A!P% z((!|fS45*k_$b38QDcJA5lVWZKDJC2T~_#BL(W`&a9% z)-Pwvrkm=EBssN~h6eVtd;6{aRyx{;#ZnJ6oA0c zn6AZ+Zm>B42JmgOd8BMjH}>@Tondzw9m3#S-f=tla)l~*RU*!T-avHd(c#?8q|mGC zvv%o%dm$9Hp5zdPO#CL_RGo?~b*L+xEH4$};XYWD1wij#VU!o8cqh4(JHb~|X;W-< zx{T4t%(eP@6$G-&Uds=N?uRh(KAxDrZgus)KzY;GH`u2Eka~HEU|A_BV)HnQ7{)~; zyUk%dkJ!l9uI1rQ54q(#PLrrWaC(_{aZ5^K);Nv2=xTUj8fiJ-%|4p}LoXtx+J&5fZ6@{)HEoa1yRir^rZgc5M}jZ4PxAONaA8AVJ-K^dV|KB6s)Y_LaT+L?%NO z>R3rEFnPa`RDMK=Li(%vt7=^ z&F$7xjWdqCNcI|V!B4vQ3;N^+2JO^UDWY*nZc6hf`y!SN^mmd;ws-GGH74qK%)O}q zk$JKJ0ui|aRz$?Muk~7vOW=E4dOz}g@1jXlEg4<@N~I`Ineo)9%yr4EOe0g9j-9J= zY+3jz*1{7|gPBlWx=SmY<5YCLmWlC{8%fW)hbjDXyH(NoD&u5>hz_{Y8`Gdz>*EjQ zq##xif-BXNfH=@s-q95_Fy6FUQ@IQw?1YaUfV9-UYE~Ew!8oKdv>T-CAY7ue?#)m_H>)JsR_)Fgpd&nBN7rM&nXD>3vCUn|q@&g;#_!hE< zc9RW|{~<76-0cS2yw1gRUN1>NRH;_ z*;_iw4<<0Cy$Ye?1M$J`b4;yUo_T7mhtF}5S?u2%hSnrov)2CS{8&$;EZG@ADaj|V zgsCM2?E7pJZ?BHYlskfZjlwh2Z`YnVfK6@6LLLSvt8PF&bN6TFKGRwv3)vvDrO&k2 z>;A*FEYY`4Gy&HhE6M$6Ffr4(W#hzD_BWE9W_NXA8C^ z(YT_#A6$VEf0Ax@kC1v|@XUo`n;qpG!%u=MV=SL)pLSaeQ$Hp;jL!(Lx9YpWxZ!t3@^XaKHsOH=^?`l&~``C`HKDerr zsXZAF2y_B46KW3nDS$!>@IxR2^FWQDYy9f;B9r&IR$ls$jHl{%ApJ=^-k zq)ny2zo?U{2oN66r3$CIZC`nF{!)F(4?ofH>CGoc_iS z+1G11E*)NTJ`CC5KIk9{4)~Lj|M($!=%3G|NgXQ=8Db268pWksO#5BP+U7TNuF)RG zL8V;3OM*t8MZhKb{)0l70~XU_#LaHHNpY;DSWy0$#T}$FFb{O?5&QI8gl*lgz=urE@DR@(KvPNKR^MTn)&f;{d^GF zjrb@XC5wc{G2cA@Lkh6*-A4qF9}9R1lkcO&h=Kw_u55dI*C0=TGVr(rXbz%PyN}2h z$7BE{eqW5i_U>=92hX84Fx`G?goxUXY+X#m~GI zbm4@lQJuv{G|UxNgB%nSggW;0qVLy9gE(5(3r(;QWx&VN;GQ{N6@T&Ho`min32lV% zFxPxHGB+inuNJo~PW>5^0#bR8Hq(Vbo5Srpbx%;nU)SeYaB z^XBQg7Wg;_DJN)eRQVp~&W9@s!gzXr*ZTClv#O)4ZOPR72lZsbBO4}oruDbqLAc&W z03AY&lG42qVTv1zU*pP9uUaokLG>5GHLAc`yIHx4C22jIx>!i26dI8f|gA`RkKxp1$i|AvB!}?tcIW4;ARW#I}GFZ zmu`Khd(_mY6OLu_x8aN}r_31LNwS|gjg2gMxiN(j+9Zj4zktdHv=ztS958l#KHAN7 znpq4ujvNtRgZ`0-t0R-{5<1=q3M?BiQMeNd*L@h{C85~yltL%HuVoG}b4M~~??xKx zpwIhtXT~eN>Pn@KcWtkm%zbnHZfDt)0iXXei+!$TwX+FfwZ@0@mt4^)-VbETd0!47 z-~UmF*xG-7vR@xW!9_~rUickZuFn~jX>p7WM3&+_5IuZc>_Nz21h8y64jE0g{Or((Hs2RvOr6)e z=F#b*qQ0G#DUELfVstfl6WXmEcZ$r8n5(lyxC0i2Q!WEy`}#P|c&SJN4xB76l_R<> zqz_(cQ`vDbp6@)TC*wT+ennveaGh`OCKe6u$X>MS8uuXIvs+4Uy~?o)2h&seGWN+6 z%eSkN-TH-tk4ha6Ii11z@n1xrgddchFrEQhQeCb| zdVQ{qx8n9>4nB^Q+;Upf%K+4rjJv00AI*&QY@Br1lEly>MY90Mq!f;GFnSIA=s)VF z@*l~bliSLv?f%>7yGM2!2_Gtd{WN+GI%Y`Js^0I?_Kc7B1L%#`aIr zN7kM1I&7!E+Y&bK@ZO&{TWcXzG7C)HAtr`ARDT5naN^cXA>TX-A*P|haN=v(6qzMW zDhaUpYar~%$!19a;#eYhmE<>B;s$;49ROpw_=G)aH)b1))>gDYHMc`YhdZAvFBkHp z1%lR%-#&G*`q%`$fH9jKrwqa*K5|A@a=Di# zb#ZwjfKA_Ott*ggOOsWGsNKY$76KcC2JHIk0h2W4U;4#rpyx!(`l%6|st25sY|T)I z1BS83W|=o7KsDh!yz)7;FsFL`DC16Y-``}wU(JMRmDI)|M)*zWy{FqR+Evsa$|XfS z>{F-y-5m5FMNgxgW_4Q7#@c$J;>cd&>FdVcr!oa_Q>P^xOk^JQGm)AXTdSD`6;L26 z)fBPs4^rCe$oFpEys8idY$|WP3n3hrb-+8N8P|XHPa#c_gRev?x<|=}NN$!~)y3*d_;!DJ!%4@M%^}KSn zu%AQH(mO-MfIBH?`O>L&aW=u=n?rlYU8S}It#(IW{N3hjS2Y5kR6UKCmG$AABO5PH z;^{K`zh3XUSJwi4u4y338DvDM{_T4WnY(qKx`?tl1R~7p3^Hj{W zM?)xg{0fb5$x-qHGs{Se5b)f3$`}_!BM{G_7%^rq>-%oJ!)#&eC?q?u^Hh-W)6*8M zjj8Y@Tpih4UHzvM<9AmRH=ibw{CAPN$9Hk+`U@v;2||olJDg{DEIuvxZLQrytOuiS zWoe~*dM^ii^R2S4KtJ^hk+w|-H`mwo6{rC=`yy9ZubCUrK_W7BO@C(}(;u8k0#@^8 zi>GUR$Ai5x9hvy2?};c_oxNV_oyi^ViyXCiA+gBVgG9TJBkgS?ITt}JX%AhekK>nb@-`NW@VolKeirM7bs{^$bZO9u72#1(O!I|PCdDyw zA8x2`dYUfaZ?A???91N1)%-3cW_B-P(6c(wOOcxPmt>sFM4yl)o*@DSY!b+&+Z;D6H*1=b1QNAY7&qF}Kp{8zWlu~b0EUSJVgToH_mv0Mjj4Z(&PDQ) z0Jl&+@DZtcdE%v=0M1*Fb5>|3V~vpxAk-0NU(z^H;=k61Y^}vCh6{~v3y`lpsCPLU zw&@1D2hi_8L`jo-4pGwsF=^zLz5xMC0H+6L!)O0WjXOoR1|Hy?v4jU+CWm#EXKMub zs}SLE-Kn_8ju^X90gJSm!(M0IKgu7boignK>E2s8r*9xJ_Q#}Y#rsmBBHJbnmcQaobceXkgcogC(YXbk5J}cHq+0%|_O-X1V@*!0Cf3%if*-6Mrr_}PzcUOj zq=6?|)P0oBZB<5?<2i3h!T(T8 z1XnTFHS+Hq63?Tu#W1bq**%>*>+AlLft|3i1gG_k5LRdL>=g^x6GghvuPR-s7XV?V z5Rv~nil{ZuX?GOIgkOQE<28>k(>5;8dV2wxgtl8*NG0DD_bYJUUqfTsU4 zJaTXdJlo52xbfZA`o|m1U#kYN&r-*ON0FJa3a|0{=LVDh3jYYG<^%6S-Ye@ZtrA1( zjeR@$(YJ=b)vea@=i8GEyDJi)%_GNLzsvobm!j_$UkEzD2=q%Gm1uBVOukTmE(kCQ=96R?;HR)Z|Anh$c zcC`hQdmP8aBu)1rT;^Wbq2uzBe*bqd02tX5Gr#`wO7(#)?|9V>bX6cZM$C>utTvDu z@}3u8i_nDzT~;`scmE@6B9rX(U})SU*Uu3XeaL2|TQl_ldAa9KFQ{Fl0FlaQ2ZJpx zcwbm z&rM0RCnp-KLu==F3GZ`K~0x-8#>>4G|4)0<8$UB8J? zDgd@UIEm4*tlPD>2gydJL%cIY22-^&+|lb-EnNfsYASn(!{iLW%#)I81BoDC@a#hM zV^OWy>L0j`3>(&#EmqoOD>+6awo@Z5$AL2xZJvYAlV+_k# zfYR{}U!gte7D~3JCv%u>EbBKcEpw0UvaGA*Uq@12>kK?hOUYC9#s+cq*_ZgGi8iRf z2QSgDLJxmhYCo{jQGdEkO{tu4d3_uF2+$AVQ5#^!Lb8LFkryk+73<)nN5I_bV-L_$ zKvn!l976d16;`Q$rNoUoe(#8knQo}Vthk;Y`uhyEetbDnUMi;9e<-h=sSVse&L@3; zPKtlCz8jKM%n#!`NNdZ-<={xc?i9&Rc>={ukX$SRPT z)mdb+S6t^kWKpib^4aL)NLKZFb7pPSBp!f) z_1V2e57!(x&%L6tOH^#FTR{J0eg@-J$n~Vw?1`%idu#_7@TGL;iAjs;Fggc2T|&Dl zq|sl(JN7Ih%(|2-((LxA+dc^}u1SuKPgaWaa9ep>=M_(L@y!MaJhiM-4>;S9zL6mO z4w%in?rGLI($<*J2B5Sj;T$)|R*$s&WtXz176mnvhx@2!tQ{G|u{aL+pcK#msl&4l zfLBfo9}j;-OgPX5<^!Ua5%ojiOL>KGSt%GSULGhH!nBUB3KKlm9xJYNN4AU4>1iBM zXtU<3ZF&xg;7d3!i=>~a%`b->tAKbDrYMM>oG&gFyAj>|L3Inqfw_(fEz;5%XLE6w z3)6+QVcBXKCH0}zAGvQ}`Vu05;TP0>nbQHO4op|-7vgMV z0DNtGea9178SQ?m=2d^=;-;yct#y`yR0j)!T;HSoK_biFmp=Q$knO9Tw6yU@$v@o# z1ll(IR<2oh=5iV7vm2HH?)&I+X8L*V@-nLRny+bKNVf|RPzpLj5T*ZR%g$Dt z^s?tck*IFM#{>RiCsAO@)wnBVL(cH9DDht~mEMMun^A;h}RI>I+Dk3Pvb z5s~-*FdK8*k7%3Q%CrEU-ldvqWV@AZHw)=b>sF7hdJA!)uA$9s8mwPW;H9L0dN>@W z{$Rg*__T3Lu%mpWu;I{v23ix_LASPEEqd=Fc2U zOJ8iyc#B%h5!Dq2U--V`&*D=e{<>bMn;ZOA*G0L76?hPmhQFD^UX1F}@*C3d#%+oE z#Agh@KGw~+d<*P5qk)D55Z6!;Rn?1|Ks#ST()IS~N*|i%~m9ALQp2x~e@assZ zO%8sLPE_S0ebPfv!P1opgw|v1n*V+E%8W`>0_TBl=?cCyqU+YA&WQDo- zCRAtNUlM>f%`u%G5`%)YQoJ_XhrZ^;4L~LaAWz!Gr`=q!E;x<}5$o-9BEMH(=VdEUEVrgwRG&iIEpbN- zK+faEbNn_a##ro(&$Bh5WmW`5<7)f{fhf27#qs z%WQy%f$`nJTHYZnA+xj;cryVS-=p1V@x%sYPHzD}+JjFMHy`nEC!dfW1+j2=ZY}z3 zKAvB)ftkDgXmln;Ip<67dPhuneyRfc1Ikm17m`m1ZpK`FcvjbM>Y|<26ofeH#%E!` zh7!4jR(oYQ)$_*VZ}45~<-RTs@^^fSz)YV^#De(PD4r}6U>FkO511A|gm01>HRfs} z2~aoa>MR?sQrFBKKiT}T zh5pf2Ck}(bUCqUJ{&@q-)WKX0!=v30vcdJ==5CCyeybdOjN-0h&tDZa4m08YLY=_$5d% zmX|%H5xGmIlWpxA@WU)>nyDx7OO8;cy@SbOtsXaoT^PR`xJvDTLEQm02Rbg2zrPsQ z8)iNrvK=#j^f>t${rSdh*i`82xjxquj_;Cl&(IChGCthoL=4C2TvGfp@)rfRiJqdT zJ!4|86L9A|rx<|q>RwPDTWd6sGeDc}K&Z(9Af^6_PfTblovAsTd26bf3`vfr?NIIT zh=oQ;X7dpxz%xFRW|@^!(%S6bJ!ifHZMmGezcV!O_zA3`@b>}nwNB3!dpc}8Klz&1 zr)5%;+w>${L4(nOq*pDQ_J~ioLi7SKo|8`3-YWk?44*8ZmyV``4dP*e$C815NzCb7 zQFF-gY`qfuNZxzEkE)`+ehDCB<4_sD&d{c&QqGs~?7My%6#G{}x}sxgb&@|LBg^>(R^{+0l%yZ@vO#ij0Z5m&PUuX zeXt+<4p?y!QfKLX>RG*W+2e;CxB|O0V+S8<6W0xgAYc6vjT08?5B*$0uKv>Y%e=aA4gaX6wtA>S+N9|lCm6B^1~`uRgc6B z5-7wZlb4DaC%v{U=bnK*05(DdZJxKz!W^Z8OPh&FHgDb9yGEx-u1RUNcgjHzMEd$d zvICwRvE+8|HQtJEV)PSfdEoI#Ou{T z%5!tL4%MjN1vFiEe6OoNfXsose})IBR;SxtUl_2Ha{(PlH&M)sjax6Qx8$joS!aLs zP#PHEOt3!`%y9zHFeZzRwX2-NMS@8j{pVF3PLn>A7kZDYvwZbV83c;j{{FXj>d31^ zbki%L{az$79KL-|K1-rmi7Y)A^aVWK5hHkv86thTldm^+*9Iy1lmfE!eh1 z##cGJ0mfluz&r3N$*4Fw1=P$X7;#|qu+ehbGrj#4V48|qJ31kWznT%m7rfs@fW>eO zqZww;0C|5GfA0)4D-~ciuU|-|onDkXO0q7xeefl)0xnlc$qc^EXvOV2=f}ceO20b8 zFif<52Ock7({Lw|`UXE`D?$t}*He3(z0iTe55x{vBSsoyFS_=&Xc1LdUZ9e%`<$Gn zY^gj6h3eYt8;TzPRw@OlsL|{Ve>Q^V=lY*#Aba!o_NAhu%FjceIxzISp1E&)f97ylOqrxNtoLod%LhuvTS+Z0g*Yl=r#{=oFJs zeAzTVJ#PJ`#~B6&{5etkQ_g$fb4g03`^upL{R}nGBDf+$IRQ+lg#mU)w`%&}vrS!JxnOxLGA*TesrtOXJ7GTY9w5foappZYcd=!`O0DA7> z;=(1GocK^xbP%Yca~eSA#UID_pn(cFDST$g+RSM&FX$_BVuJx8Vr%W@6Qdy%^W}_1 zJ%lp#^sw^iPX9<}9FG8H>#p`93N21OQU3*%ofeo4G>YSafx9n1tVYhZxxD0nrlpiy zq~wMD)RupOt~Edio27M(;DzV)SgUk^8UUXhy%{@rFGN~ZvaU1?&US^>wYroHBEnRV zlO0&m;-$E75&!&QgFCUBV(S;rHpM*e_@Koykyf-SjSRMVQLkT+jc(G@4j3N}Rk2F( zz(=v@%K|vFgLg}^l=In_Gtol~m9c|o?+9fO=UJ|sx5dfwN~7Snz;ytK`(zIe9*92u zxXoyX?U`|tJNY@4j`t%iDSPwY0fbC|4XVevg8m)&p2WQtzS+MN!FnKV03)s%h|%*+ z_bI}y*QkzUy

LSAw|L(`o*2m=pBBu(8Z5t>0^RPPci&f#JORujNO<3ap|Zi~j&{ z8UcQEEVWe=#i-m(uzT&X)N9C6-&5iJEy};;9f!Cm>oxP$X2<)Cjvo|e(&6*r*(8Cx zT2~Ee9*AX#*nP4+Z==I@SWEATOlryB&C4C-;)&k*qa>$3CQ?>blO(OskFQkT*a6Hn zGKK!`z5=@R^rSd|Q$jfABJ~Y$Nc@jp6xNV7AsRe|rjhod_B%QqruCk8{fU>#fU}=2 zW$G@lrQw#RfZ68P(XV^;F0|O$n@zKz7%nkmnYAw>l)q3kX$n3du!E2j4vY%LjVe(Z zqjA5KE=}^r%PT==&$nv_S6CV8=)^3$&CzV4vKo8YT;~Q^xtzyQk3I&;@;#p9=6X2ee^Y&v(t3U@JebA1;W? z14E*E3ylSPY*~4(gVNwlLVE>Vp)>pKZ0cBGY#@}s^v6{h$IDOp3>tiwXL1*yCwUi` z@iO3`d4U!!Qf;oyNEQ8rUV=4X(cNA^j_r?!=l1V6@6P^;t?+g~3VA%KMfjHR zF=*Za?((5WpdWMONZtS@Q+Svny$r1z>?>XQ(SMn3n3AOT#C}H;G|;55h1c@G2R(UV z%K(Lh6+JEMD318&|Mi@C82;;-F!XMiuJIqg!>{QWXFVD|^#QX7i=NT|eCqx_Hmh+;{7MK zJ_jHbiPGBra=?poqYU9w!_)$#7$kIAS_ZTg2U_ej7a%vrxmg`t|Re|u4KG7%ZPhdpk zgrH5$`bWYSM;8N_x6h!cIFbD2Q=Z0AWr+$9N-~YBYg%>a7et$VGC!_BC z#PlR5X9T}O*Zn?1zAyrrG5w)NNu_(K2Iv4h-DkR{(#h|%hqw0N?T@a!bS+q|#KR-T6IR&*>?hxWMm;x$lXk?~K$pq&KLx5G%D}hNNEud%_0w zX#bq)ubF#8116>`_Z16HgTz%LqFcMIy(dO-Qrra?X-{JIi8_E#;|cnjyt)hXeY5ih zvam}M!&a10IHoLYJ65}{kP}qQ<)p8#-dDHmXuocHSJf_6i_x>PQEYb$Y$rWC+V7mT zBDWpJQ4f=Ze3Y(2o&t(RL^{{*e+V|9R8EzH@?~_-3v~Srlyc(%)R5$&+b;hDSIN-B zt*@0?p_QvXiR zHnW^(Lt&twDit0a%W=2~L`=@(LdBNXiyoZqH9(hX9Mh2;K}i#)s_0X#OXZa(0-7e{M|Nxj}A0}XzLAc z1GlGt%WKesHos(;%1;@DbHDFLJ~8nb6dsZzx$i)bIgI8XL1cfFVVgVBTN&6(D_qUl zJ&W@K?n)qu3x+&~|GY7IO)N^}lRQ?#BW%T_fqJH4qG^@mu(I`{HkyxsSlLr7dAavI z?^BY{%SXfzYVbJ#pPo=juSR6KgKisuXV>=_uC;>{|xUA(TYg`rV~D100n|NXB} zj{}E2vm9US@Br0|ro~z7XqXgGtVpulj|XCWT_yMD;@ej_IlZA|)`u^r_{5pth`IvY z?XL(VIH$QVWy6W$zE!>*?x@w@*MVUeH#yr5n@NlxpF8^0=z0reC9TaLeA}32Y`Dax z9=aX$>qTZ#ngjE~Qf)NbJYiIrwYGf^(bFWAWvv}(A#T*T2~P)dNDsD$^qK|$%^(PVt#ciq05UAr=Xx^KeIdrH(Xe^%y9V zfO$k%x4vO>+s}*a1?P{9IuegrWrnX`t@#--sPS5C@y%wI)y#AYy~;wj&Q+K8)Z_7X zBkvh903?ak-vb70|ZGaONW!Aq%WTaa;alf-!{e_}RBxzJY(82YPI+ zZ!k$cg6hq={dop}i1qF$P`kilY<*WPP$T}nX>B+q;zmZMRTw$`KcCOAWA=`!$>dGT zl5gS{KbC9rzVQ}8jY2^=x;`PGjY2_SL3lK4%Cy318WcR$+<#g3oo3r8<$wDu$N`4@ z(tQM2>xP#o!G}M9&daaJFJafgZCBKO{FM&)?hF)A8q-8YUE4k^7h529k2FGvRN!}D z;$@|hV>0_Z_Rub;Va$0~7X%sUadpo$>Odtrv1-H-&IQT0W-(O67@sBynYx)!?HGTuX@U zm7f&2EMnSMal$QA_)ix=u>iHu0tO=h>K}FMfu1brf6(zeISG(2jJN~n`=XBHn6pd2 zjfw^qI{K7Bg-^NybJDAXkQYS1BOf5lVW7kLcBeqcZpWqee^c)zsq*T_J%0%*wrqy9 zhIvVzG&&`H)lBp<@-dje_d4G;roaI4XiJvUa|oC*$0l8#PQM&bsm*px7kNcxAt-sk z0^-M{g(|Qif#&pHM1q|F3UC0bpPf5?N*-zev>16iqY2bsX+Sv8eKGU#D(}S^-~a@- zDJdWc_)idGSMwg#$Msi@NeN&SFS9G%3+<(s^Ful`Hb)Rtxs`od^9|Bm6MJ68j#JT+ zS-=H?Hik;l=@QU0P2IUn+4va-r=M{w%EipdC8 za})tOxb27Keg`ZwTZR@B3N37Ze2ilEuwaGrkz6wFL`L({up7&G$ZR{Y!qh zODXL1ub3yWjy!A#h&hd+_N+Ij(x%QZ%}f{qXfY9>39EiuA5x64e!a49e?wZAq3pRb z!F_|o|L~*&SlZ2zCb^HrXrME)+ieBO{>tI%Y%z~u=tn4Yo^t-sGY#)Dg_NuldM`q&_$E zZdJ&Ebr6{(Clywi9CaBF%6U_d$8{z=CTG5l(#Qc#5nb5KvlQ^Q=7Zg6^YI_N>Gs1& zw@;3t)8=<+Ab+(PcJ4S`Df~bKDaJsB7SxCKl0218g4g4=Du){y1XpF0$aYqwf0?|v zsLzV+dnw~4or?7Zge;knS?NZ7lyegFkQ@Q_U0rLYi0j`kN~{|YIY&RA-v;7G zPm-rPbbFot`Ww+eit%Ij18L6S^ z#sk&CtN)zhKR-cmx<Kc?O5BKD0YzMQC=2u&@35a^_U1X#V69dWd^y#J6ULDoy9=K7cM5*^AXobmpOfG!3zTB%`Z#%vBLPzyhY%Jv`8TnM| z&vR~L%cf`^)Xj@( z^JrbHCbr7}7Y1qQx0=KECiCHVtL5^!nxV2`hhJ$;DE$X|To|P$0)?X=|2ZeYgH&i~ zQh|Rl3*^-6SU5T$P39Ok`lG$Qc>QNj->T9@-_^|;qgNxqVd7V}9K;@V!Xj(Nez5A|<$NyOz)aHA}-;w45 zWSV4H_e{zE>Wb+dj7Rgl#gJx(({y5O0JtqV&6>S3Q@OHOVwO}uL|czJk{h~uu&4*h zBLY+9*9D0EGSdrftyEKsa#uOcYv(SDbeN^5HvQi*(f*>2u2etSdAvKnjABjc()%2ZT`-cjH^lpC;2LWScY_oJIGPIk2CfYqq?&&RJh zYCLVm@zGc>W_7>zPLjR#O?3Ag1l~B)o9DALfmDwPx|xoX`Mr`Ja%<{RGW1Y8eUYYc z3G}B*@`kmw$RA)Bj;5}TJ5aO@VbYTn5|j@8<}zwFDmUL-6+R|0qXB{@k%Kr{aeI!< zSa*p79#myLg@L)s0zADt#q;R4!6l&F*xE`w|vOQQ$RZ$nTsM5L=X=Qc%N zC|j1Eq-f2Lvu~2~I@DGc;M&^P8j7AhJk#)yw!uR{aoU<6kSjO5WFPj3^)bKeX#Q8+ zISmi^1?p1QW$)h+v2kyqJ^I3_qv_J`)ttf-k=R~lJ5%oJ*yv%0S*>`CAPKE#I)j6< zf*5u$bUu|s_m!+Thk|OrTV-W4G{hSJJ*3ijbOx6}GbZR=cd2zw#p!P-y-9nX2T&=< zNkAWIlHTRcsy%2BNzy_wU4s(dT*Sg=PEK4o7X>o}^YmfK)2KTaPkx`1_y@$2Cx$)I zf_4{LgA~Stzq>Q~+?u+t8qjx}cz&+>aHsXVn7zKYe9NFj6B~P9@GjqTvreiyHqLGr z{^#d+tKN|;>rj)RE+M^e@|3UPB2MltM9-ls=e29RtXO7cg_ZUspDK9lPrN3wA(4|@ zU6|~k89eXxfJoGbwe)$me|C0UMQPUK(96L$otJAd&i~h~3C9?HD$IE2M7cDN!Mt)> z&T~00ekv5Fa#C;m@dEJ^F!LY6#vSxUATrLqN@WPhI>oWh?4PN?uD5-p)^VM`qEpb{ zY5D(}BwyEh7w&jjh$}#=_;i!P%C1IhT<2G;X}#8U=lGcb=A;b9mV+JMrcW`O3@#4W zDsuDN?^{{vXhrCMDB`j4>Umqy!tRV~M(!?icKcWQcB?W4|RQ^oLLiIo*t`$9UZ{)znT+N~9B|d_5b-uGoS4s28cZ6;?$fWWy35{vD4_TMi~UcHeoV zojDGy`;GA`gaSUb#uco_O+q>n4sy;mses<^+_)h?*&t{0vgHaL9Z7Ni^}nCf0sIzc zUxqh+I*^+iRIR4-#}-#hQR7AwPit=WbDtd34+9aNCms#c4=p^l%_3+nmHg@(yG^nDas_RvZ<>pilNX@+iz_KwVZum>}O6j9nt2spz$K zU6X_LBE78OXn?n?yeja#@~gIOhhsAYoAhrH(@P|@&wsZ7>;&)!t3{Qe%Aa(vg>C2~ zrdVW#)@P_g=tkf88>z5n1=pbu7nQCc;L0B+XfdGWgL&YKt#-(uU3iKQtwjXG`HAJ9 zuws zAIbe|k$IMgNgVV%R4VqTa(W<#Q*yqpC7mcB91mICTRt3=JFy&=LeYuB}i2s({ z87{x&98Lkfb*u)c`2e*X04{)^zv>!cVR@CGosgR2ag0??KVO)bD;)v8_@P45dIs2F zfRx;JUZwiz#IXN?=upsXs6$W+p4z8BBm<)|=sC-2PN+4y*4% zxP7SefUoh}i5C#yj73*yAafru(CHY5j6R)HC*5W!^7gegK+olQpF|@JYUb9?0)3a= zW9Thi7kB{ZFPn_yTJl;9Ic#W9ns}Z573A@TM*sA3?JU`XPSv4~*+s!bIv+2Sosrp? zA;Hidew!|Sn~3zI=QR@|NUv_d#)Exl{?^XCDSX`NSjJ=wJXA=jw{c_#1AoviRIHaU z3+`CI0*|wPev8>77nT0;^7^$4f;itDGUO$PsAyaB8^tqUmoIhbcxB+^$5&uc$K0#& z6I4cwMwR6pOz5R&6&8Y{kJZ zhCY@7Jt0KXLZ?8W$jQHc1>K-@O*@7ZMTGdSo_S$s`d67aFa zGrsE^CsbE$+JxK=IC1i+))@($JM&a^!4&X#Blob=p*PVY**){p4eEl=UX~FFUx(7i^U1D z(AVk*1WMQDhE7=+c~39DEwBVMPZEr2@3lG~WI>mqafbvzQD7Ayh23%JQ0>pxnr&5s z%KrEn>g(ZO6o4cUhz-zZ z^Xj*KugKrbab#Edaeuwy-r$$pEPr~;q>rNi@=URjaFm}bQNJ@&aT8UKhZNveee{>c z?kvYP-B4Oq?*YH_CYG*l5`9mHo}V8si9bE|WGzpuY@R6N~fedt{eVA?taD7%wxW zS9rd9jszDl8DYo^aEg7~x7YT6K|OUlE5`vyOp@X!mZAr2Aju$q?x10h0YF`4ltsh9 zzkCi$({aFYgu9tq!Wq%V7Zh}E)ieC$;Bo!JlYd?m+~93X$c35t{vDOP)?f-Z0tv0_-%hibN+6 z!J-B6M+r5y#cE@RhI};i?&WT>PvIaA^iB30>yCc?At68hlg38tVDph5bZnMVJBmNh zfK+P8vLZxRC3bW44zb50#h-p|J`z7o&og*L`U9u=M3?$tof`P=hx40L-rO?2L(skTnh z6L1boqb=^;Y~V725*>`_7x3OFD;FoblrI0Hh?>c%6+g=5)KM^DDe7ywI$RVN2SrN| zZ{UC_b&kdda~br>Khn0xgMit~9UG|bQR{qS*bvSAG^N_2AP+x*u% z^5d&rwb^KTlr(!EA4qQkaa_~TUUBqon1vO#>#0B^qv4u}eFa#*@RJo>EMtwC=%nkE zW24h9BZ7S){LWWb<`DJX=Xf+XKXC7CH~FK0;RXyX>epr1M^e6Wk(;x)RN9-5>WV;c z_wqed>L9dzGl@=*^!gf%E$wQAkD8u$;Q;)@ z+TRxOrRMRa(Dd#Gv>0frQh97o^z;~4?3FElqsVPy$ZJiydbCAogX5TTsjrMigkZkEE6t_X@s81?$z|64u%Mg+Kqqb4G-@Bj=Ab3K&F zwm6wHY`h%4E@PrITrPOjvf9{ww*9Va#@hjFsSi3n#N>F8J{=C4k>#`jY`<#k^LLVO zHUn}_bv))wg}M~Sdq%hSCNr8x&Xd=ufwCzsU#~$aQM>FMiUyGy;5$Pu9Xbax!BmRj zk!G!t86CAuvF`vM^$GsDKvIw>(Cl7oZ&a3lk6!!{JxPDtG&|!h73Upr$%CY5H;|IR_tqU)J={+_BQb=uafBHp7l0gAb$AFo5zA3|vM&t+E z{P5=4RZp@u0zsf^wBwqf-%tQr0|kh>nBp^RFPz$Uq09189>;%r((%QPqSUTnm(AB5 z(c>7!SYAVDzF>PVS16kI-M&SA3wnMce|TJ(H0~R_FjH4(ux~HB9ps~pI+7%D15t4_ zzm>&ePUEQCe%*zj2Nb^;+w@XKr4IPE`c~wD0^vuhl2?eooJ_8Cmk#MC)o% z@3VlS#HcnD(2XnD$WY%95S}>z8l|!QN*aB;wJpGp&DBE@11Pp{h6BPQ5Fswqh5G&V zL$xt#a%tF~(RBy$Vl_!MH%!x_wK8-1$^0KJ4zvC(=G5o|J?iY>6gnCx>)VG^ybtS| zV@8E9A{h^P?f5DE(LV>Ju$ctufI9%g)+<&*{~Q`9O`HyZM~F+b-vw?0@M2|__(3;J zn>hZfK{CdgW&1F#+AE;0{h;t#IL6fi5LxB|*_12bZHp7V=^_g7=s}|w?9t8FrS>0B z2dUc@EwO&TzN&mz9H9433nql-hGm^<nIVEGqy{-O;2~PZmlkm087cHYO#~8hr!Ljn zO{4yNa!yytT+Np7hxN8^%9cFM*$DKl*Y^j zkSapoK`9g%m#fafq7|&Jc`kqjd-Gs?hUi8!Za_(R&~2M?-WLD0J96npb8V99Q~CJ{ z#(T^^Vte>t!9n;au0nIZQwiwz@@5n&$d#QdFfVjI5cW6~S4ze{0G&4Yy@7d5;^#eE zP@GRnra2z_1>pYva;qDz!g#1Qs1Qm#et19RrT)m_nxp7Z4>w4d1XcHttG-RAt#Ibx zJ>lDl3c5D59!ndd{8#DQ}=&TIl2V8dS>eCWd3gh?1%lj`0CV7)716YQQnC-{zzTd1c~L z{=pHo>m|)oJ>7rCT;ARTZH0Bw?F5xd)`Q17?-?g6E$BbS{y>Ev!8_BvEUg)xcSX2_ zwf#?7mjzAQ;>IT+%LDc9UPU^a!nuPq(e=Bm7oq-=kj}dE{NcOSMLM@ePGGhU6y8XV z?fR??YynHb+?rDFx6sRIYIZ~qSF&v3ZUV0pByCIqA=(v|{Pnru-um1myN{G16R^q9 zAg;AGY43Rv72b^80~ttXVXS|u=&)?BP!6wGLo$uI6h^~xZ@4YVu4l-8eSRy(aDr`H z*%P`$FV-rVfLei`6gPd9&hEVQ%R|K9Q#3jH*7iyor>^Aj$)2WZ9P$dbbPnDts%z|H z^8y+!x9!nFYBbKvUH?MWf9B8Ed_7wxXAjS0DEM$9$SG;_ey+VSS@8ZKO{|SpLnq=A z*D?z$?%vXx2JPMm*)&m+G5dxz`m~cd*6bF|yB-E)#2Okg-_^BdH}Z5Z+2}F8s?h)t zCJ62U%S)}Vyk8~@(WQ>fXw{RWj0IAFB}?fo(x<=@t|S$s`$$R)Eb7DOaBFjhGhL4~zZU zQT;8b_Mq_LKQIrVVNZ7v!of|QSbU#C5Gw%fhwPDec`TM#Y<6&r9}w!JqPt&yb`c1; zdHeLsaR?k{ab9#Sux{Qo z`p(d}A}Q)4E6Jg*;kBs=a#)L)Z5jJ}W^wPx#O^Flu>SRFT~qeFb88o#@o;4ryavUP zB}(o-vK}Ob!+}hA25xGgRf~AE{!7aHb%%RBrjBp%0vfIp(@)Si#DdITUK2yr>iig} z1A!Xcgk!6`%`HeK_d5jBJGh-!>djuz&zscSQdhWWP;>L*XmW47i2elStv|dcy;z&O zIoSerOjLySLc2a`NinNlYxfp68z%DKX@}9xLe7{wN`k_dWs8=%VD4tlrhor{r~NKZ z&0c2KRfAXbvL=E@n|9TK-(`8j!aM$iAME}}5@-gYWdI9pS6fDX>L2y~9#`>xJJolf zm|n0K-F0_NT~pv>m^ZmQ(A3K z57;-U+!=fZ1f8GUP)5i^mB-ZpU%hDClmQhFe4Y0srJ*8_2nG+>;Jc@(sOx*+&MM1s*#KB4T-Qm5%IzYe7yjB$Nq4~Jd(oh4BV*%fnWa$5Z zTnDq#JtDVu&u=@?OrUS=Kk+N=^0~?xQ&RI*_F2oI=7xYDt(eXEjjywmqG2kIV8MD(BDyQZPXBj=GeAX#L=g?&%6V|o54CI(W@gH>Ty9^0X|IM-}aSVOT6_wEeUw-zRZ#REq#m&{1@5^{i)A`@_+cZekaZ?k^()gM0 zB}x^tK6wi4>vOM713Tng#=V4DFM^_k>-04!G--bN>@)_jVf**L4JRo z-u*Hl*aR$6;?QXYG-#-TE=X<;jNC%{4hbfEcbizKU(uU^Q?1z}h|gD4fOxeT(Ga?D zH{E7z5g{(NS9*~S$tJUIFVta5v^Nk0X9YFxHj1eM0!NhW5`U9M%<5>h`fyR(X?Ko{ zj+?L5mp89Pw!^gZLWtQF$A1hT4)jcB^_&XUnwbuD2nP!!<~|N4hFXj@RJAb#C^=Cu zkH75cS+|!aF*7m%f>wY!-El|FNaiO-W7%O0`oxM0*M$08%f`Mfx~&g7&!_Z-hVBu) zHQYFik;KyAsMqc?{tyopYJv$|Ffy!!0&jSMby!K*4?$Fe(B(x$*6yGti<84)mZeWj z_*a6C#={&90JZZT zstNJHqF^=6&Q#H4I9PaqB}GMs>Znb7YzVpyeWx8*)44$oYC zjwV2Ty9uezBQFp;j=p;I(S_w4YR?B2tbkOdB-tSysxdn!q3>&fRCe){Uod}g zGsRAV4Cn)NrTSbdh<$Dv%~BHOf9l~aPvMGmeuu)?MlzF>C!|up+aqKtzIeL ziOL1{mVn7z9O`v1BK5xwf({tliv45_Ziuzvhy!YyKJ8u&HL?GUm*(*~0WuJ(fw+av zWCp4J58CL&^zdQH>k_r}6>>GPe*Y&Nd10Vktj@bV7EvL~+yaR-9=nIFcHi>GGxOU` zM)l>0An2bYblry0wj!Y-lr=X z7Ew{|j9%Ge-qBqtAWkNv`Q}|C;qjWo{9*EPjQ;1_ZyN&p7OM||$M6tjQeV#^qw44& zftbWBn~qJN%mt{ZE&_ZDcmdj@wQP4jo__v|`5JC?L}7TJAQ+OJMohR2G-1;Lxs7P~ z*CR5k^x;6Yq}K|Ylv+=|0aAgh)5=Y^EBsj%?^)rr%Py;aSlB!?^hPg@hjqxAp<9>% zEQhhJ5uAAta`yE%Bwxk9brH~Fa@d%&dR7Z59fayHp3A!G^SNMW}L(1>yLICUf%bub(<@F^&GDT`4j@!W~yXepkfs|eOE zBUSwbxQT_$Tgz88j0y#s&{m=+1M}SHQEOp0kE8KXf1xrk3`eVed2II!BDAx(4>N+U zSKQL*1ATGz9_?Da$WH(@ozBDgYQ+{ROvR(al zR4^2RV!vaJ4(tf$YheTBA^5=)l`ZA4)tfsoS+^hFfkBlx;bw#ueu2A}VM zCUS4GxJ%=DY7Bd=vtZ=UV*o5|RG$Aks6{|VJmrX4sp{b|62M8KzPKUsSdCD+@L-5# zDzVdA75FqW5i{=2=vJGXY2E51S)<6`(yUWACtTU4S zRqwW6h;F&72s{Ez#c2i?+B>|buEz`-DcTRtGBt7n)0n|099r?nhC~i(oE#awF&7A0 zl>V-=gSO`)gDW4@6^WA+Omt3+ZxLV6DLcay4chJ9z)(h*_OLF9yiwNBB_`Q z^oM~<)@wP?eDmD07(FJ1a{P`cL(+yng0ysg{TZpuuKNSszZkZjE<&*mAWM!&+SxLO z1-gG^f{rme(eY?W1VEXGmN(KV%X!r`O44@0UPv@d?3wWXy9?s(XHfxy$lpu=$jz}| z22}s)PI%HFruu;}+h;xKqGw#)+7=S1euJ8uv4TIL;>nl*ccH0yHs0!2{P`Xsex12##w}e+=PlC!YnG1~4fl$fq z#aU!eH2i)F!g6Q8uq{v|&;4g9$;?vBYhILi9El>fF8U(84Wl}t#Q^{=P3W1xiDRb#7!h+2`N0)hsN$HH3-IG0pr_%a`sDqyq`a|LDo74`yC^paU} z#LoO%$mx;KuqTy;iE%F%;s+9ET|<2)M^tLtLY8xT*dbXn;Aw8w=|4+G2-LZ)>!9Bi z(9(chs0*Ro)%!uW5Nz(Yv(kAvQQryZguwmQPd)?h^IvtGj2qYn)n*30gwDtvtESAu zbb{T{GUguuu|t?o0x&OaS8P2uDW(HM_WITh(7{N@fp(5Ey)&ZZ{9mE2K0dAjyZg^! z=zR#>uK8I+54n8MzK^or(gaP2`ZbO`(+Yr&Je6y{aG~4bLNdVXa31$5oAz}OgpqCV zp&H~vH9($Gwd#pYP{S8X&PtZc)+`WkhZOF7ux{_`8&#>&o$y9y=_> zrO_arefmn@xey^bV)GD*>6a47g?iUtvye7Z7iq)EUyHCgqeccFV359*4 zhOYWgk32dr;dw#+g*b6!MA;@W)U;Dluc1^4k;3Wu)XG{2em6=M_&AI6&&P2i^GTdw z2oLDZ=)|0b(~mqJ-xJ=wZI?G?lY|A;$TdSzNBw8+Dc!Wj`QZ!@ajc;5G0ubL91Iz;1rK>O+5Gd;v4| z^{-Ty(2?9+NWV^)XdIj%RN;Sj1fxbc_WH`89}DXl#r)5)2#ydkp-M&wxosn$3ZqnC ztEkc)EDnPOM#RQ+nX<^`i;^6DH?v#r+;|U*f?{e!cAN>@CbeJniBeKcc#jkIhXhaL z!B(lE zVY&cnAVdh|SGK+ayk;Zj9;y==WGAutu&yX6neoI)6(IKrJd-})nj{c(rpuQuw~)Wx zZ+(L8LxFe>tV|m*vENo=MQx4prQvdXl}T_G1Qs zg>qZo$S`xo0QMdzNFWO(wZtDvhNV0MdmUEF{0vbY8~dlZR?^eG;!@FMULx%IhmTy7 zlphx-`;;Ch6xW}41+rZ)f-w!qJUCq<1q%UCIHm^;6~MjF5itK|J)gvy!sfX0LVVX6 zV35!|VETRz#fKPm-Rs~N)=Cy&N`{Qv`A;U;nUbeckpXqKf|j4Rzfmk@Bz~Y|ID;ZV z29oM>d6~U|o(2qQf{_wEbF%oJwPZ0M+*Xq0Tm)hHowuYg_smK9|0W;n3T3l_Ocm(n zz+6m#%+V3O+H0g?gqcglJPl)2Ana-)dZH~R8iM?dECe$@VFzF^kmtl$r*!b< z9OZIY-U32*Mh0vlxkvcJN^~s5^BtJ$sMk@59NE+4YMTmxjlf8zYS9k1@87&{?gd=* z1@Zu#WS%2AC@Gv843ooYN1%l-C7>`5g?6WX0#Tm~6JiiebofkWfVK|!Jm}{ccQ91BhUuvM#ju$M)Nwh^GdY@bZ!g@r zCMGeTaASMv4T}*9HHEAI(`EG@U?$(;HXJXJhPi30LpE2aKuI0)G-0E{i`Rw-6CHrq zUvD6qHdgg6AMHvS{R|%!APn0CnNL*>YCJ!Np^phXJz>Ju4FZ@|w{;NryEzh&>^EMO166wC=ogxrkhqvir>Us? zjlG31o6Gf~DghNPNgP)4&>AjK?Iq02@zJj5JrdGiLrJtC>r5uA9&N#7j>X1mXvkQ^ z>S)-gs{@xg1V$)U)@Ek@b4;}Ht6!!XDusV$iiZi7i~O*8d!;shr8ax5M3K>r!q;eu zNl@W8;ryd62U*f>A3RA8gL#kR%ID6cRwCm+fPOY4)PqYCVGw(De4}A`0~vi8Wt~ zLNO%esdQS=6PO_g14rNy?n>1$(-HI(Jh>mDrjebk-Xl;q!PcbmG49r+Tj-qNLne>Y ze#@C$Dx)8lgJBFr{lY&qSGy%k$}gQfs25WOYUkCPKGG6`BoE;_%biCkaw-a^r_?@- zR&J6&D29s58?A6L^jQ93SGj%x z{(uHLpaDTl7p0}8NyU%WM0ZD~cMrikpgM_kIHMOA7iAgT{+%4IlH#2O2lWh`Hq-5Q zAc9$Cuv)QN?J;N)Gcz|=oPQDZGtZNrk%H%SCNbz8m=lIPXoP47 z9Y$cED2w^VRVV?FodykaO5b)^V8DO zz9UPTqu8XsZXB0x5%)&oOG-*gQBgN;d&SFr@}+hUUk~3j`t?nD{`j^9#}zNf*x1-S zy)xV3QX5~!(G=<6XfCuRjSSD}^S&DwecMbO1n3#UnwFj6aZ7ARFL^YBwv2?nywpQ= zjMw2#3L9KCkozxRn-dO;e)X57L;Un;pwL)tO88FN7Vdiu*X`Ruxw;w}_{4OM?SgX! zw)hBY0xDk|nz^oflm0ivMnMzQ39Cc&wxS<*P29UO9~Y5mTd19@X|?s+>-)yWhR14^ z%i+#)nAibZomsbBx<&QL0WL@Dd+3s*Q%Mkl@fCG!OFC;GP`lu z)nZ-;^ltwmZpeI)d;GXqH*dYYrzc$zc60v~>F0er^CqUIrB?m<4s-3I?priGFaG?A zxFPH~|EzfMa`FVn?c26LXQC@BD`8K=w4EaFyFhuFtq-JlU_C&#PRWItzmDfJK{oQ7 zTRt*HaCE_uXTfD3zhjdtCMQ3iYl=fV4@-lkAn`|z&(YCQAO-8rVjoAWNB>1;68{@< z9H49#(Bbq`ziVX3w76lR@Fl%0`}X0nUa9r|&aj*qp`-IZ_6dPSV8V^#3KqmQFG zCk5IU9Gw@sXO@aPs<*lyd+aRH&b$pVV7qer_U(h^vT^UD^(MIGZ*hv5ICq`@er#>I zn(QNQIxP*@_hzaX!_k7}Y?=-ncj2uVZQs!|*|Plahu>zX-kzTMrm(Qb+Qe@y1GgEV zjDC)drJV_jOjp&|ZlE@7ZE5kC`J^S}veJ4mNG}<6dfWK$5rHZ-^tvmb#PXQTwI_bB zIXPHwVoaV$HX~#C-e2LmK1C|F_iW?eUr;-WdUm(m0$xEpf7Q`OOAOk1Fyz6Qz>7a* zx7J1JN$f_-*NZ#E4mVo3-~ML8;X9berBi}oxN^mFXV9$an#&0CfVG!cODy|>oY3sA zRdxUSIcQ?^*;*RhgjExtOK^a@OZfAEHLBatdq3^Vmx&+!23G}NePg@cC&3#tWOsZf zas=*xt7550S^StU%bk9b=}C`%v1wZ(Y*E;xX$qF(G0$nnJvZCrSrhr5*S@P7O$k(g z`g9EzJ;`~XNGhs$MOp9XAyLZTHn}sh5hl1`A^YN9jUnf%irhW z#!c$gZbiPy$E{W$uFS+3in(pPQ0%$v(8KaL`olG0SdQepygZX*R0@wp_qVQ8*_zXn z!$8t6S+?7YeKGL&ACZcxtExs$=*7guguyX^mzSHJZFEe7nu7S3iQK?zyD#3Z+HDl$ z1o_rE0zXMRxE(MNE~{g0nCO84Fq3^+hR5&}{&kb(W1t@;|M3>udw+(In(M{r{6J<_ z)=yl11%ua?%C}z^D>vJP`vGP)+ZIpZX{ZQ9@Wm|Q)k=y(i~nZBhBG1e2QNa{>%b9a zgq?e#16PV+eeyZH;AEkG&An=O#GwN$3U$BpF}(ii#t;rp&KIL2G#;C658z`@(dafz zcrUNRZ0l9KN-+B&fi>IE8gc2<&}xh2_21?N$|Edmy$-y``~CWx;Deef@UO%zriO{R z!B_I@nbJ&x~{wcnCc3JTU>%ld$Gg2M|6 z&9Akg8X(7~p_C$JZ{AGm$X8a2GQs4;o>zMc9av-A(DVaC~_0~slC$M&P;V!dc zr$?Lcc!l}b$W<36WeSicf|h%dfZ?a<+FU#{nCgTw!&q=fI5 zR%#StutH=Y)8s%hD!_c(@fU)J>l(Bz^w+U z9&Gm;HoZ{pfw!HNMtp4_w!X{qPy;3?ss!$KHd)KHY7DW87+ii!3m^wXP}jj{{z`gI z111lrp@l{~8umvCQ!q967{-YME@}fiplkK6hN>!$S$jfsX=ihisK+Y@2R+>!V@Z`L zSZw^WXG%@xF()nDHTw1ql(3o1j^$9-A;}Bp)Xdj+gY93lJLUlr@$6u3I?u|A{V5W0 zSs7t#O8;(96*uYl=zY3!@(KiDS`m@6t+8sFnvEEO(CKoG98ENAqhzlgT8P__s!zBS z0RaJn=h|;LXEAR5>cw-rhg!MX7di!ZM{5DnA+f+sHvebGczN7Wbw$!$?0)YYHWObyvPFYkAJhXxBA3| zbfSI0yn-3zMq{ENJPd(!Ec^plbqFm!X&LOpO+Vw&7lEjO<-Hzx8dc4Ba&j```gjh6 zSj|PP81h#5RhkJ2MbH>nu#YyHffjKP36;Z!tvOiafRYd2e?#U3zC=bwf`RAY=3f2k z&(Pf3`U0&~Tm%8c{99pWDYADEhvf7rTnx1O4H`}=mR44&${|tRQ&UDgeCHnqOl(0M z@Yzw2&^J6H;(6Jyn^-kkE=m>MpOcfLp%LP(S(lHl&|YS_DX$7g3PP>xbEY8hq@_ho z;Fq%caNviP)~o6{b}%l-JHrs-oXn?&=>qwF21QQ+*@LSn}%6xg^*RKI;wC!cL3t zR!hQ5;JmB;f&&bD)WGL>+c->A^U8V$|Apq3mQEFXWUU2wd$S3Nj5m?{%OC+4{aLyFNlI<)Ifj_B{1nQ|}JL(Jl4cgKAU z!TW<>3ZuDqJ>Rb3%=P!6+m`kG_+Xpb&~qLBjj(RPm!CDt?xjUd)_>s0 zFP{RB8hoogE`4F!{i7!_cBUl;zNrCBtY~$QfhS<{(i>y8;Ky_;**B%-Vh=y8}^zCWR>9+knJg|L4E+f689{B@E~85yt%CI$#v{_U!!f5OO)Ee?JJHsa;%ojd<`7 zy8E~9+-ZwuwM0F8(4HWurmp^?nc<(nV|P>2+4-(iH8nNGSQY%c5L+`D!{;HJbaZo( zxHZP_JkZYYigof&P(ngNqOePx0AV#)!*`0Ago@QS z;aVk0D=6rUiakR}Krnyry0Ai8cU@iG!FP@zqlSYv*)WFtGX*MH>L({B8;iHq1|i{q zv}=EVF8ML~B0U|)Pt?u?q>T)4L`p1r-VI>FyGtXUo>;l5Lj5U@Z=U#^?@Dt=Mtd=+D1qB8F+9xnRVGc^kHH#o1BrFbl%MT9%p?!Zo`mwycMdz0{ zy)(ZG*W|7ha6pC)4__ZZ4$Bk)osPZl^Yi9;njB7)v(NeH%4~-DVe3N)Ljg@l59R5C zM@^yEuHG}MKiI~lL660|KUash;?fFb_1;u)#0^>7@oMy<Qzy0 za^ATD()RZDh=_=t2aa$jn}LCW;BD@3aD2p23E+7gQk;D`4o5qh+fdBB^9!Wk@P#OU zY_>)Y#tEl@_LCmLI9dJnJr6l>x&8F}-XFhC$Vbk2XzJ>^Lz0JBuAlyA~kUr zTa1MT(mILSo!U@%6!yRrZO8&47y=`tm?)%?rmC~@BPcjHhEs>fb!`Ijd-+jYRH$TX z7ZEK_{rI@y5D)wo*Z;i+Y9o-8>pBYU!%2d|mrAM(l6-XY(>e!EkjO2ZR57}^>r7+sT)4(2u)fNL|^!r&jW*fC+Xl-o`i@@ogToEUw(!1aY!Jk2)k+g)w>lp*N z@BLbiZl@6B=M}f!p@*4B1(dK8!3>Yp`1owjw%rkmT3i||20sFI9z7M6h~u{Q3%{J- z`S~?8G`M+qJVyWVx#WA-vNsc5;e6j_s5gc6*K||ZF0S7W_7j}B+n<`LxX{n=_gXJl zw|rC<`w0aTSS$fyVZ9X%+p$VPqxt|a2^B7@*?pE-SBZa-{KR0|9cnNg+QV1;An zIPvQZybj%x;U{2uAnhA-oert26lpKH3+3@=4s?9x1+Y@<(@h@k?z$@)L>KB{A5vVt zyfRkB0WWOeDm8UXeSQ7O@gdyq=@mLU3^)J?A4kE!y!xKfj{_SiX4?8W{(+sssL{YV zxM*;R?lMFj@QPGxyuE^}JK>$TCknGVIh+}w%+rJ`J}_y%5-cXjmza}-D>7;6?&*2! z>&tl{31Lso%SqqfHa}6=OQlwOYm;zY+n)50UO$wC)BgUxVv>j(oD^GITh)HmJNEYW z@cYofzz7W4#47BUcKGzwX(9Zz=;<3&Y*s+s^8XtByAW>>374Lo#o-j=ur8} zsw!BI%dmW|>qgPazBbp(&lTN!@h9AUdl6FHIq(Up{bzrXvwvz1qVy9H6}`%5PDV-D z2S**2PW)g#RW-(x!I!DNsmcHSd+;cmrzfkY-?;EI!J__=hx)0nuWxsExAW2fv(ugM zZg3rtiCAG7wZ8Y`9EE&*d@!5M!7{1qySpy-_F3&l3BF93>e+C9KE=eu7rqY<4+pQ% zpQFVsARxfW$$9&BGgk)dDg@n7Lzruehj>&h%j(R@FOt(S)FBlu?N87BjSIwdmFtaH zSy))W_FCkkD|R3s&mPqMaN+G&C zJXir^g|MuYl zHH=i$G&G+hBQL|{?8Yh~q|a(ObA`b4)0r0vsa@)=8z+#o20^On)@~*Dsw7WkcX!9eaRv6{MEnxG8VW%8!E!+UW z!|!X(za!M-TwV7zW?DXe{0Khfbx2TPpiYT-1g$?`KuY3jNd2l+c%{=@wMd9H2&Ms{dN>!Gj0v>w#`v! zG@SX7^YRcS1qB=y9`M$Rtp-pnpQT&Y;)VJ^`jux22kN&K4<_y&fA5n5YnNW>q>!c> z`Wv058bZr^i=Q7`Z)j*z17$)eNe> z6lZ3#@$wc6eRMnJ2HSkUTHDFlnV;q736wtk{QRQ5gHLK9`n7KUAO!c9o2 z1z-C4vJu|H&IQNISUZ`?$w}};$$S=(^mYWg9|N0?V8b}CPf2G5iNE^y2kKB}%?$eb z`h0YXZQhz0J+ZI)`FVLap{Ra;h7@}R;`F_(9%aQ#D|8_;ka((AE`VVehn7lb=TnE} zPIEp65!aXC{#DCbv+*7V1OyB~VlFA^3vs|C*~r(YpXrUHRE{8x1OuBl?j~`i;lqay zb^fIBMHCOoz#`@8|D4%7JnR4)3Ww!~WnUzQY8WzcljbmLN=l^uKGhJc`Tw-{rO{OH zZ{I4F(@C66A<@h>4`rsDWQ=SxnKMK(Pi3eiWk{t>GL<3ooYXd@GGr*|`@6o^^_jl5!l<$iM2Rz)!&+Lm_*Q1Wf6tsv zPmGF+;(N<~LMH+K4%p}Uktv}tYj1CFclSBq2#3bNCnkC2<>g2CB@4n>`aq4FOJ9BY z@&)pc2aE+=a*w~iKRT8{o{6nCH+Nyc+)US>9&L`LCV3z3eL94@!^g*mIe6)F2pd=~ zewy;8NFF*A6KK*ZpHjPnohZ zGpC^x{p@e;k?+9_2wYwEn&{x;;dzk~B}IFjQF#`s^U83vHpCnc{2Pi+NvISa|K)HBfe)9yeiE{2qFu(#y*) zSy~Q)3ZCj#J9lmzg!nu7W2PR7|3^Eca@%W$!4<67l*_cwaYh->udYCN7|f85qHM)- zGEK|!Z`PPSI(>%f$tN!#@cCtk-R|#8Y@k5T?;g*Cq;|%`)wOV3M-~PErXAkBj!k96BNa^{wnqlX4+J^j|_Q*UxT{;39bO7 zf;hKiwB|&`psJezY_^TEzn_dd#M{1qn-pDT&%3XPtkacC)0Uq$5Q2MR)geOjoj@ z6+LV;LN*$x3YZ=`UVYQCGrJSaqh2#vR8;hk!aElfUH`SNP|P|4iDl)8WbBc@nD+QF zl}df!6pE6?hz2{PP;Kw}L1=&=irX$+|C{ql`oHT6^yJ+01THd}4I<;OSl?L&Ym5H7l>&zR2a2Dywl&UkY3yD=Ca>`-n@8mEGwsQkECQ7ZX;xS z*-_5_z$~d2`$|CJT!J3;>y)@qJ=q#V|{-{C`zdRe=y3{id1$wpa}$bW8g5LKQYd zsZ$qBO!9=1C4~8(i9B@RK|W97X9QvY4OCU8jzn!Uqk48fIj374xSA$(LM%T&zo@7P zKU1G0(hkha&aahFq}fEPSV44jcoz+@ci^W+w_KH=&4q(k2*hKCO034g!FzT`L3NJ&de1It&Ns8bW*;jzS| z1M$o@D}UNidh8Pxc9WuFH4*~xZYCD-Bcr27?OS(WG80*NgkgTa3Eo0K@u{+$UG}=Nipt@G2i5uooM1NBg8OlO_Raz!iC`CRzN9h)O9Um+ z_~y+Ws@qOtv`Ah^c$IX{N(hU5g@+DX&-EI^5UvpKA5@%v9K!&8b$M}`-)rxO$L-^& zxv9XBLEu3shHX>gDX(j5y}o@io2WB3KwD5(59{aNy99*q1VRHu1Hb{66^kT*$gZ>t z2|tqjM&nR7pcV6EwvEsCJxe7;o@4@f^8Id^^=HvT7=eCMABzsP-mnj{wzh_!5O@E6 zrk>1)xV3+;fkbni>!W zPPp71A@{qX^u-FA+S!N4#G~>#p#M*rb2{iYHeyl9o9t4P>AMFtpsek7r3j&~8{|Ook@o-TDn5hXiop+j4Aw%HY(3=b z>l?94)EX>e$B`lykjDCTJ#YZvDIj=idAWvJI6J@~?w9cX{re9bI5A;zKG7s^=6!k- za6MP{{>S*sK=nE*vFE8Xzn9?XY_s^=DJU_(lLpVM`mk=;J>4x8KY_bG71^kqIg?=X zcySxh&=h}ml9by%KEAB9v=b9`-L$19{ouHOyJFSs*-?YHs=thk9D$PtDZ3i%oZMx7 z7x$J+!C#7x?@ffq=kCh7NKSCk!1rltfRYm?xhK-er7i=3kiIZr7v|@qsR<9D9X2*J ze5~;Efa;d_A?hgR6Pi5qkGo!F?DqC!fGnG~?vTSqx+N0XY%#yIWC23rS1+>rzhUV0 z6UxfCG+1zMs98UrXG1PfD?lCtv2t*`FQB}KOV@6wr%!w=8mbB2iayheH!ChA@P!8$ zakMoN_}@f!^4MJ0#D_dtRH<;)&wZkzZ4pEBxdy)>r@*qwfe1Rup!a=t_GV<{DzF<| zfU|4^0sj8ix3jxrPr!N$4Gop;VZ_H?%E-vb%lAi=L`^O%<6a?ib7~@{`!=+Zw2X`b zlhQ^v)|1B~rTZ>(c0`1Rsz7R@dXBp<{}{*m2EyAIdz<{WsVSP8ub9z3zHk42!qdk- zg*DmN-5vLKki)~xjU6IC8VlxZYmz*_m$bo`sVM??LzfPEdF_wT9bC(U!3!8xNrzZWyWIf-NT-e7Z8|2!s9m8?Ws~u zCyE*`=n1oxP3X*R>h6z1S7oy`7->Ow09KJPl97~L0+a>0dWA(2I`iyyT--0XJ1nfM zVJVrXc}Z(diFZkBIh~FGfo51FK-ll!zoV{HR8%hc`59ZiRMBBR!V$?=`B(0ucjtZe zbc6&lQ2Vh73I0F7TUDC7b!|v$nQDm`(}(($&=!OAN5Jf8Rc|JEgz>)+bRSHAONEaw;n;Eqp(Rj3oyt z@bgdp7;i@kUuE|C&zj{CQsz zyCbzt#>LPUH%LKY4JsD65wc5?O$sH&#ejwjGv(h5Jl@^8>4K^L)UVSl4y6q8j#56X zmGBd;vc_Eb^=YQ_iV`pCuy~*!%=kYW+t}#n=-60nOpM9rXJiruvad`c0!zAB(NYs1 zAD=t}r;$-MX6D@pv(!zqC>Ozmb2p|f2Z23!(6;5lktLT3V037n;49^JvU2Xjs=%@> z#gA(&!TfGSL_BfV6GY$S@g5;m`Jkl=7~ zeK)ai5u^P>o~gzW(__#_aBZfrI`a7w{$+Oh;?Dq1S0FO@sHv&ZoA+CaU^uf1nly}P z#wS8ZS)5v!>@p10T7b&-k4*laNB*kp<5h89biC3q{`@qY|+v z798BlJXUW(!(fll6Vz3wPZiI3Tt^saMi0}y#0CO&-t~_-Q7DM~DMKE%3t=kGJRj?D z^fES%gHCsu3m~B$+9=iMTfQF2*~3>dH)?Rl!$YVn2ny_+oRibj&MT=^G{VS%LfZ65El$aPK8*(sY zvYrgK3m#`gLza`1OS_N{!gG&8>96oZ8qX-T4Lyip5XF+IZNAvJeSLjll29WYOMAdG zvpU^neApxFph|zje~RnB-a_cW@|9vspS8C~d%71J%NcnR*8n|We0+Rz(i9D)p`igL z3xrBVMMZmi`>dvdeEeYa%Nr@YbV{{Bn>KF5^`zxbGW@u>iX57Wfx#gbPu*di= zF)7cY3Ly|*P5M7?{WlarI4n{~o|Tm?0T$KW;&M{| zfw-K8x;iw4D1a6oI*34+dCJa4G;=Dd>7po-F)!;vuDrZ z9mD?U11B^MuVxVAh{Rgm3J(|Oh=h5EU80vg4*#7{_TI1YpsOJhR_Q(xM$U=|5097e z%I2%Lhe!aa4@P-n*x>_^-Owa@pSjsMIAnIvuM#0n;x&8Av>%-|;?+0Y-HGts6fVft zGwY9rI|(x;8;W%iBBM|!$;mEly=EjzHjI~vr)&8Y6~D2O`FV-QeOaIU`1oGr1Pq2a zhgp0i=I7>ST;4P`iXoC&HusHk?_L5t3BYIE-sa|J|Bu{gJc@4MhD?W{eJ&v_M?`}F zZc0afLL&ON&5T4o^)UKmFS1KaTpTh=&S_qZCgr~;E2SH=x|<5BxiLvma?oP z1)>P*f+o}9>vEMxZ@l?or=#<_@o$UOkKSZ3%CA*6T1f-7{0=3XaU$~c?PDUahgy7 z+}vh?b@G7^f6a|xUJjtdwFlmS$AaC$PHO7vGMpIgxd~i%@W27zix+!VXZ@0S=s;RA zBOFW|P+*Or?-vA(tn74Nx>V|Yp?lnXhJGZnsWz(Ek593W$Klp2NA zSQ=IM&Q|!%tE;Q;`R%Bw*$jWH8w?wu0_1=B=g;5cT385(_rw&3EfxeX*&=Aw3!V?< zn=wo{Ev;5K$m?BN-wOjFUOXv?N57+h@$w_H!O-wLSFP2LLfpJv0#JTsw zEEmiU~PAr27CltAqg$9 z!D@1_CKxZ`?LF=ei4OfA)@UQAK1p9m=_LpVg4HgKt--;;Sd!lWO~AOn*XmxpcmV~b zp+WV~tVjVUMo{BDK9*?Z9#I|Ug^QIP`ZSq;V9FI{L)`H7^Aj|q>P;v$+o6!(ct1$j7{4kT_Or zbeo5cp>^!p#RW&`FYMxWs%DAo{_CqtsHDu|qM-FPal2*_L&pL}CMIzxX4r1>pv#0b zhsBrk_2<}r9mX-n0(xWkqZpd=^!5j7SPz5|`6)C=Lb?dDK_A6p0z}qFuRaXdqjl@L3vzi|qfvJ9q%;vy%%bRuM6pmdKk}x4vOdqE`e8_X?kR4c>{-LYQ zKxnF}M5r@esBn~sACsbQOQ7GtH70k3Ozc!H%FWG1$znb^Lp2%^hGwD_CpLoKP403C z#N64beT~LwgQUwtNUF$L_Csa2iM8n`8SENIWkGyN%U6cnS_Pf+#}CiLhchOGUtlr) z{C?p?#XJsOsG0?y`iiPBC&V&j^ohtA@Y+(fC)zlAt4r_GJOEpAtT7XOVyWYbJS8*b@lL=>?zIZuzjFwZcZdZp)l-mWS6Qm zM}zC%uxZQ3(ZYVr9-{HZ#`cbEHHH}l@#f;vj39isJvCfNK)XRoKvWf)A`igu{+d;6 z9+2e#SJl1+xkbA@o7!n^G%5dzGkE$Q@jOetO3uplA9}YYB`vLH_VOF@N-dupWxw|2Ldu}$ zxQBs;gg=DpmELt!&vtAB!0S1xp_!f@vBHg}PfVPlJfG^$;0;671Cs=mo9Nb--UdjT z-reh-p+_QcFp3a210GNRz#2qPVjG~sFqu&YdsrA|9lr--rrH%~fSi~Y1ZCRO%~_Xx zlgrgy)SDGdivd|oZAhNDH{Kt@|29!icW=187s51la^7xd?IbyAxDr_J z(2x){v($U%e+!%)8flZckI(tsR=fUfn|^ZqYgnQEHZ_c zI4*$~28Kj*1J@#WGPS5tSUI)rzVCYZvu8h{Ccsh78eL-8BWE;GR`Utm1Y=})WF$Q! z!?1NG*?;a0T>Kb8Q-gAE=Pajt5rWC8V2FaT_Ay)3%{HD#XGLY{z~2V=T%T_DlLjg} znJOwJWi+7_$i~RX$ib0K&dMHO(`BxOiG@p3Gb@)CGt3k!?q z0w%u(ha+&yuwsv=bhGpvB_22i;$X|u;2nBL8)|D0ii#2-7jGvwp?_(*cWh*Yl9VKr zI;Io^nKBEw)Ff~5(Zy>ZnJ1N%yU%wC2n!d0fg5JT$L(feV{39_W)!#*tpGpa3)BT- zXr|}C-1}p?*9Tgw_pi3O(;bcrn5jS%>$6coecB9Lf>HVO;F;J>?rv^(sCCt!aSm)| z#@tUsN2d)b15KjT`V31p@7c2lSORbb0#5etGqxy0JP(Rs?p#hRTTsyY-=RCYyDKke zW@f_lCr^-Q!b@HmN7IX^Gob5gK<-!i!YEbKYbfHsFK3l zNDX{(2j`OtNjl8azAuYI(S-6)Rt&qPz7M~NUXn!dw#JFq_xA^&GamiKazH}j$y{?1 zPQpRu13EvFq}KE5)es1-M562)Z7LXP`ZD8gCs1lPDiwE+CBu!iHq>%X_XL*tob7@7&R`-MIvohT0 z*Zx(K4UJzAHyB-+LVZ##X8oED;zFY-}ULrKdSLPL%1VyR{)25o!~OlgJ!koz~#4*!e#KoT`L_ zN{PK+^lkGe=hHy0U z&+?CR?MoKs=2aNi81u-Tl+P^!F=6s+^k1w8$oz_osya?%)+@ViwftLxqXjW&-2a1K zdl??#XaeMG)4`uV|?I|XbB%bf6$*yb3i-qR0~lGX+t3oyvdtgXLbp!!UGyb~Y4 za(aajkI3Zr82pmXJ$gn)Mk6@@Qn$NI&q^jFCc^j;1vt+7GfO(1JpS#QuonxB<5j=D z4LOJ*MOoX}v?$a*s3 zNNCCo3{iks_%mXYr6xG)h~-J7P+;R!PEAcAu7+N_HlBm>Xik!s6%uOg?>C!Zm6ZTm zMU{?@j0E7gCAn+vU07Jyj@?q0H|5Kz)RQVIOnn=gP^R85FYH?hu(Gm3k_SQj9++9g z;-L!@0c{Qr8Eo93)kzD6z~D9qkR?==B>x)XuoU%l9y&}jnfX^x+|$$2;GhiEC`cDp z^^`i{n1qC^Y>%y>0WDsBM zxFbzM+8nlT+g2PB+5w`59RrEo%iaB7gp4`$SdN~N_7Q+R5o$(7qrBnj$P-%F$UwV; z1mY9$E{XjUh2wgt>SU}C;0ML<^s)sEew;TjfdLG6TgZ$GoLO)8t$5%tosynj2cAQ} zC77HA+;f#jZuT`3xl5mdz7ATBlLWPqR>v9lBm}Qp!9qsv@f6tKU&=)o!U_rskXsm# zErd(Pf2gGRYe;g9C4xqvDR-!~m*ttxe`e*fEI>?ka^b@-etZsDYMW~R7sYeM(6TEz zwUrC*hS?z}YFah>JEN9!`*lK~1Jj2h1OkJ@E){M=ClM1qcP^{O(iMmp$u~46)3O=l zHg>YI0s>86zDxtfSXTaFc6&1^_MqLb&iJn2dIYW{R>KULYr{CFpZfVj6V8#(Am?=c zh}j@O{3aGc@roNModIZR8ZuIps1Lhh?op)OA%>)5fK>?k#ns|4gcc_&Ms%q+4=pSpRg-> z*%MD-Ctg9<#m>{m%H5Xc_*oZgTOKuMD|=hAt<_mSw>P$O_$C@PWvvtWN|smu3u`V~ Aod5s; literal 0 HcmV?d00001 diff --git a/docs/unicorn-logo.png b/docs/unicorn-logo.png index 3e86b3c7d36471e4bfa9f896528cacc49b8b03c5..2dc4d0b3c7acaa39a01ab1850ee5de137152ace7 100644 GIT binary patch literal 34508 zcmX7vWmFq&6NZ5VcXtTx4#l0~g+P%&@j@x3I24!QPS9e-3Pp-bad(%Z#ogWg%X_{b zIXP$d(b?JEnd_d70Ba~=V^ClqARu6?C@W|qARxm2`=O!0j|8;88G-+iyS|0EzIXWK z>Td3Ag&=F`U||JPu`{=}(zY_U^mH1ul0ZNRwO3J))$#auWQ?4eq)RIGS71@4C^!(U z$gIAdIvg#WK5~8;iT#fSDhgSVGG?A@xCIE3L(haA2x_GMd%fy%!g*9J-uzc!b;L9K z^<|)1T->GowE1-Kwy4E~hnGwR_ya<2u^pkys@Db?^?Chq_$c<0#-!H-v9TccQQQ4s zST$`?>{Ahe8==Hm57Ah44!0s5rcW^_Wv?3QWhefvUP@ne6QRqg7jZiNrRu9s1lb@G zmjj}_EciY=CmEs?KGR0Rw{`czc~z!f6^(w;MlF@Vb`!6f7WZ|?XX_#klF|E*Zq|fW zmX9prRi+xXOIaqz9lZ7-EW)i6q7*eFTRJP1r>vM0c99*YJ{wu5T3S{okxI}eM5YeH z>O(Xm+Xq)QMY@$hSR&mR2f&FD_15jE@9AdWRJ{<{7DI-8x3qf~yUmcy^Lw6|^^~f{ z@DA0p^(W;@cR((9(H&6Hv}KUI$zIhH{p`3!8Dd(7Id7^AStN=Ve*e8hWVzJF9u~jW zXgJQB@atyCaL+&bSqzvtA4(8OEo}Dl4Ql$r8|c7vmRA# z$ZW@5X7KZ9^BI$xr7kj|C4wzXSkfIAW>^mIkQcnW{<9jWT6Ra!9??KHx$i?48Ubd{ z1EkaxddUQzHc93~mHidIlI;qPA~2w!)AVk@Qq%zzzNNwP8p!u3w{}P&RHwv>mmd2& z#6RM?7?KEeYx>W+eYsK_4K$YeB0=~ZIr&4Mt9Al)NIt=rLWHCq`Emc)7bMY?_wtyR2jELSoI6QvXmx6#@D^y2JQ%HA&zG3ShbME)TW#7 zUhIVTzyWhVi&l-HrjVI4(pH9Txm8K+mch-SkRn2Zl18;_;}>n&A7|lsmJgaB^d!uGA%S}aK4_}fOQAGO=8GO5SusXEBs)R~2yuu)51wQ^K_)N% z(*L2;>_`cJpn8=Q%JB!jhigaZ_+~?9ePHJq=JY==fXL(+unUx9LLB;=W~C3U9}LjGx7B)S8$l|m;h?dq|nmJje9CA#iNN2rM;lX5;85Mu*y6@ z=HNNPfCpz#XE+a-|AnpU=3I{#$|&Z@zNTC?wg%q;0m`C};xg4r|J=3J zX~szPFH&gHdZ{W;h)`KBw+=!JO@a(;9mCs`xIeVO5FfNTSU_oo2igNX&B;guj#O1JUzQM2>9IA326rz6;D-Y{@GV9O4@-4c zf^~4r>i`C;%g}6OlpuV1Fi9KFz!I7_h%InTiZp2uK3y;%DI^ z?Uo!%5oiW1p77-&RCWdkz3%dQ^5$V{dWu4<$C1ZZd|Swi5i@II|6-%UFMh=?ls#kg z-o#oVPE^7yO%OSf`v|81U=QdonQ37ucFT$-J|tm=ZFRAMIG8c+Xsi`5Z{AAFs$?JL zJgqBrwpv$Is<^HFYe*AjzE<8MggKw7;M!sM^d-tLH)7ncd{O|I{3afujSDStUjbAs zAeBVB(*B(9Y_%130Dnn98_H^sDe)A5vJ$cOOUaj}_-G2`A!@#5N|F{KD;4+@B*wCf z;5osUhob3(?x1~;Fd$MzW%14mNsFd(5_Ol7w(pjofkNgq?`XEU^L&>dyt_N|iV}me z^m?4%f|h{JupO|=!8|((%deSDye_xjZZkY^8{JM%*Fo%WEU%j0(H1hS#TPGE; z&_*rs80pciOA0?fB%Q~7!$UQ`{tJDExVQ$=2L46r>m zToFcz{9_??ZUG}i^efh5K7tH`rdNlp zkW@IQqkptSpK2-AuH9Yo@Wwr2>FnFcE~Qa8p5Da?^B7Q1P4QR0Hjs_{><`-fzKV1o zA8X;urf0(M^I}=t(JWf!^X@px1pYfq0n0zVf*ejWe+@;Nw2+MtTaMVX2FCB5-ayuyi*o za3t&$cZXQZ13x49ME~Ls>kk=jl0%`kX7b5k+ru|SUbeuZzv`W@yk4M!g0&ZSal&vU zsHJ&^IPH-?CxGQ39i3i_Jf`-nz4e>8rQ{ntep+g>q2X1o*>;Q8K_G#*87DqRx%1r{ zYlWFdG6BRTad)VgFiT~RCpD=8vPl$iCVZ;4!Dq3lf`1rmsx5qE!Ar-}X?WVx`*gCN8_ z)}6Yt|CFKTnY?C`S7nwa`T;gT1GW*`vMH*m#%W5`ppVO}Hw~+8!%t(lCLfpu^r{GR zB+E#V9FnzdlTU^kP_cSc0y7?q9+PZ}_hIqjB!Q@R0)2H4U3ieR1*cBhTu8<=aWaX4`B{%F6?6Bzs8~)zl1RbbONGoa~hr;z|$t^IqvpM^DQXHM9g~1Y7$1cpEfA z7bNSlM;CE{&zHnxuLv0e5>RCk{+5)ck4~jkeLYUP$2A7h;?|`TRF`8@5=P3j=*~>r zX@(Vx$Six`YLmWsGwnId(fN_@JvX8Y(l2|`p#Ahar9`a(yxoZp?Wrg?*?!2HP7!9G z8~Y*r@BH&_!-o(>;%Hj&wein}5Y6sd#eS$q+D_+NhrDJvjsJiNUF|D0Q)>laE9vH( zm=Y5zo5@&Jf4FD;*aP@HYVns$??~aQk|Zq6_A=!zK4b>vWVyNX`qt3#sSXMLn21L{ zEiTHJCz9lG%|UzvD?6l?1qY>jm$4`X}q+dH>&nLb{%5j1&Npv%1~ z0ZNZK3!VYh;4Uh`+>VOUiL@) z2KN(`oE_ust_&4QXWO6)&#zQD{8UQ2Wodja_xn3-s;n$6i$c3WJ8@$FOQK z4CF`=O2#2zAPur7DThAPciqC#?jBX5>k^D_@L8q+KR7;%=Ew$fzkZdzTfU0%V#c;Z zY?q@lr=P9`S2rD9CvBY+`6M7O1`TFdQ-eb3 z_q#ib9!ie^u$4wI)C6HHy@S!0QB`3wqnJjB1Rh09wh*X)+iX}4PeG5G8}V4n!fQ#H zXelfaUil?TrE`**Lc+I^1Jl(d-itjzXv}WGZ5oR#3qITws5P}l5)BYs4J`_YckW1) z7)$P30x-Gtu2zYm5!R>P^#Xo$;iS&bd{OQX$N3)Z%04Q-EY8_ zqVm3jSqhh+lwFc5k8!WJ3&9Duz;roGYy9<*;k%vM_NthM$4KI4 zUk-|o&5?d4dU)?Xg|2f+JN?*=V-+Ug^(T4}d2GhE(__Dv&QmePDO+~tks*Y%CJN`D zS9dJESG73ZbKfBYh5c^_Nsl6``hy?0;b=~?ij3W1MAZb|No?T^&$EaXo$P5O22siAR`1T2#nWnv&x5CH4-PEFf2TStCbhr6Ve;&*dzkd@ z(AJ?6W;an=u4W?RX+xMoH9~FT?g#p;5g9upGiCpz%i67jbj%+U3{Vngsv>()5C??@ z9EZKVfXyi0I(7PX#2!5@yk}U%-9&{yE}#duih&;&ocZ~S*s&x_z=J?+%&NJ6f9yc+ zTWAy1t3u?oT>F|5PYp6YSh{P zjSBxqhM>S!I#0NN{o?TMyPB2VP$a0*$Y6ST!fZ)Sp88vA^Mn}yW?f<%U^xDT2)pk2 zIVcg-i&d?(b*jSz%e9ci#@coX74fLzn1AVxTQhnV?yXZ{maDi-hV@N$Dx_g#9KD&% zc11!zEHGZI@4O|lLz3k|A6AT)5eyUL`VPI1O;~xhB)4r+2|da4S9V>%Cgl(&>)u)cJmI~=65xoyyFDG0l6PW^$0R|eIHl;HEm=660)x4g(AswEN0Wv}4oYegr%` zYRvx<8R1yb&CLNl>(FKU3B%#irLu}t*%;|Z{rgx!*Slfg+R^AIKb_G3d3{%t1~b%! zQab0ngR^Ok{|f>zS?tqep7gOLYw88YW_q<)63{N`!`PMYiLwprEckFPf7GO zPprPtIt??fRac$W?@Gq++QhJOUvUiCoHl|G<{tTX)Jq$PINq@ywOoZ)f@0An(oevn zDF4u-ITPS>G0WqYgsT=dv3c}LqrH40FtDI4)Mf0KjTc{hfctQ++WZe{cF&yeTF);^ zN&PwH-cEOk=Ide6x~RK9eTcs)SDRHqQC~OC(djHKT7R*RleWpxPuAK2M7vFhz1{Rm zq*B77u5%&J?>hmVO{%4)W80e|=pY>PX?N!cz+{yLQ*SZfeFO=s)Rws|N$)WR0`C(Kowy7Q_CO*PS&C zKLeN@EO$H9yTfz6K(_f$>PL_G~|T^o%5 znrAGv4Ilo5YFGFZ^q9ud;kfRJP2lWw%V9O=DHsmHqd2fxKOOhjTM6(3UYD?&dJ`UD zwvk%yB_-=j-UwDj{w|@oVxdB9JZ%U3!;#=FLR{jWEwW^S-CGzB&56_n)IE`rRuDv4 z%%f+~#&`2tP=1sDZD_?SXSm8icUdKS?M3?4lqX>7!wE(MDSgo@mogzoF2=4pz>V<7 zBV^_4%ijCS(G1)Z$;bQzJ5DbzNB>&GfriI#>F;TD9n=>(g2mWPPgLF@b?#r$o?X8) zhU|_6MTXG=a1HMxH0aL}4CZxn4yPk7`1*#MXYO}P%unTJ4jta=&EsYJugh-vNgzcp zH{Ux#{+~7^`@V5@QULl___hR=sZ!CGqxZ46w6MsQ}c6=mQj3S1b z@p=jji0(!cg&t>|<{Z5-S1S?{)JU4FR0;_5dKd+OE(iNRmkawYfBOC|HbYp6Gbto6 zIaEL+=!yJ2#fy6sGN-9}(1Ce(XPs?5D7H3LC}apdLWB4E1Rd_r5fSu`)e@g9s1Lwx8O<#@k+G{NwoYCJB{uBx;nt*b;*6D0Q@q+uvT0IQy}5EC@MvI=O?x0Cqm_{gkybycEJZAKaTMF z%0~hWymkw^Pv`#vn{B7vi#_4wQV*pi(bCc-8oOtu6#=R~m}owLA8IsEpV?TB9r7?y zQ(=lKq{ed}DWBYrSDRh7O!^ z5C=H7od|SuSitz>;Gbg@?tM$cc9^??&x=T1x`EJdBDQJu9q;^~{}WqjtSy1tS2r@E zkxZn6ozaYzZ;%q(jP?LVlP@j#;VhRb{p`5>Ck@E_G)-|lUUzpYrI@{u6qiaFG8(tO zXxOI1L=smcpB&4LuxAZiR6>_(9B8klz=cgF?@>^-~K!Z7=9Rrhhe#pUWT)C%85PwbC5lvlN-lh?1CxF&!9KH?SI{zQ>y| zO|Lj;H`jE%_OAu`sGSSB{~~#fiz6{CT#qFTpU;L7$8qE5BpUhc3Z2`i-)mwIP6R@* zeh~FadJ30}_jG*D@x1JlyV_95UeD??(}b297B&h|jzZku_lE3NUwf_#RhvW0+t0)# zWhs zr_U$>?dOO*s%G!QH;(12$}?J7c@~bR^N8hkiz;CeJ$QUR&|odXgtbZ?j8(y@_qmo5 zs%74iZiEj~jcy5FD&mcm;*?;RD)CExm_?LYpic_Ok3DjID`|W1unx`i{8|-ez5I!x zqO-=Dry(k;^MeUa)|7KPRv1l)?IX^EuzDVW7Kmp}zG8Cd3He*JfIe>*h!PGVT$SAO zjKZoGds|q}yBRE6;p25B>+@lAGyqi~o$rm1P7<^+#67=ZINzO0JHFNvE-B*_6ihFu z#h88O^75fWR&}*4aPKr{Rm^V$P`)+hR&G9_81Oq`16x%gSY+a#4~E1u!_;KhPpU^L zP*~?cXo!bL%LP`>|FZ9zl|1m0l4>D?H8LA#hF^95##MbfsKUc7CFQW=C5c%H(sKbN zb(pg#Mz`R^@j_gc{sbNAo1&G>1>?J6;UCs1FyY-u(IaFY=zZI5McwF64bHC(;!!b3 z3_5!@))m-DbRmrPa6!e8^=c7Zz-u1VV$O;OLje4w1o3S!WddcEs+0(;4O=29RfopJ zUk_D_BuL8o^@LxVwSRfHnweeXf{9JAc`$G1#GrpvUM!}_WO z5zl+(C=8X}Gv5z+jaOot7H977Ti7D7M?$Co_+-A6Y)CdNvtBT!mS{A@kV936h8Smh z*4qxoE@im8l zC=AdR93vY^Z<&|q@id8~Fj+jP;;FI+D3z(ZN1fX=aSx$CVngEVZpq9Pp*in{3Qbq) zWbJ3fv>=**TFQ!DY%Oj_lYtEnZ;)iFL>}h5Lp_-EvkUL_K62I+KS7?Fn1=*ao|A45Ot`RFzw=%8UJKiyFvQZuUt2&qPql2O=O(C=AMqBo|Y`pHp_ zdgDeka);#F@4o&{Maqm!jkH>eQW5Qtw`w+`*I-$s>%9riA5T;HYo+W!mt!$-v5<3H z;G6U0E3Isf!!){_u|eoXXPgUEhTfPm$yj=qbE1KaH@X>H=tdnaY z5+iTK5q3~{JHOVyFAuU@(sCmuoPbOno~zV;>>{LW0dFjCqm1T;1%4S7GoZ4Mb>NHSqG7s>-9Ug?mIO$3MpTe>$hy;N&*3pj^qrbMY zxMRv z?9p&SgEyd0Xxj22>!>%T-viu+bR0QNRX$p^?wRia4fd{!ZS1^zBzK(UPWIv%gVtB- z)I4ZgKTC--+mfSeyy$W4#hqe!X2&4A*=rcBMtP2S@M3D_F9bIFoGQ?G_Z(nV%S(cQ z-s$Fx;_g|(2#gp>WNd94?5xM`00{Ft#5w5?gNtCRPeUuwI*ho}U;<#e-{PnOjVqdu zAcaJr!W{y5fQg8v+IR32e13K5!tMyz8=u>F{h6su`s55hr4B5MYHBBg^;;m z{zD8fLdb$S?1k$|&i)g+P4i{Vbt-_4X!S3Z7g~ryEPWxip3yG>joBRoBVrg|b69r&@A1va}{29lAvR#QFdj^3BK|uXIO~#TWEq@j*Mi`H?~*2g=6sjk{Z~@Mw2&=hUJk092hkC^Th-` zO4q&X+^pqj>KRw61f504R8kEqli!y_r(HOq z!U3*zmvaQ2vm+s=ci@uUEp>xH*0E8*4L zGOAMbqK%%HS;{`h@4d3>Rhyv`yJ3TX5CWDPP=YUn)fXKCpAntDZcDZ@X0* z(j7P1evS=e7YKTx`2zr96_%{i2Doe=H288~J-ln>MxC3r1D`KNRZqp?vt^=Go3UE} zotc%dmy?2Q|9)cKpCN^3;;Ee0v#1zb(*(FRikLQl^52(M!`}r`B{mQaOB-QMughQ& z2NW_5abyXr?e%y5L?+lJ*Ltl&5^W{}ZoaaMe}v$gAh187z$cesVBF#dg$j}=8xw6y zu|Q74oY2}*rB>+gyEDv*$I8IsN2abv5tr9FIJ3q)wQDQ|f9yG7oH*>P^Tgcd60}>C zS-I1KT@T2w34I85BYOC3d4+Ja$^%?%xp#W}nm${G9C5|qNj0M8HOz4rsDwkR6e+_i zzQi%}t>WMqAN=hVq)Jgehu?x~(bS~*gJ<15$WT$e%Lx>J?104!R*7eT^ z2*LLozjOHa8ujVsk82iX7x!6tGkq0*REWM*Kl-(MWA^-?7v=@{3$l`uoQLwW+*6P}N6jICyYvr8TU=Js2LIaL~(wB(*;c zXKJuEJAhLA)pHa?KMdy{n?G;~&yp)Bq?^G>{|zaF29*UbEVb}>@$}Z83(hd%W4{Ae$w#+e`F<>Al z0x^`d8t6t&@5VuXKYrrY-tvjs?Z{=@FjGZ=>d$Ni>9mkQ#WCgr4{kw=E-PbT2-Da$ zN5}bL0^Ga0k$0gt;eYal!*a!T4}{>>0;uO_bkV^isx&l0ew27Tk@IS10uM5eI?%d~ zAI1)R|8n0PQ4xfg!R&+%6Uqt)Q8;}xcv~HjK2Km$oVW! z174CK^XIW#z%U5Wg=79@ARP5`hJ6pQI19J#d>Kw?@Z)~oFC3z?{ZxZBvo6qwMtYnR z@ul?Fze2wFDb5A=;)Cn@4vBB{*yHD~u!e0g3nPlT8yw*l(U>eAfJ^Kr|JWlRkZ{Qu zr{aD&Qm?4`(;E{lp9fdhJL-H<8{T|Hn(G@~qrNGrc@>5>iIKfuZGVR-fX`!ZwZU(<9v{=rpo`+~&!}8A5<(@;9d%bx7gHN-Jfie+A%h#K7#;9N)mdn`8Bscd<&ht!RvbD%TJH~{4nPs$cM8V?*MvMI6yLQeL^%v#PQ zD#z|sGrx!xy?iEmrWW~1!QBK^FZvM3geO5huCepO39i-+g-Kwux2|_|#$=&1T{MVN zi%(Dip9fjAmFj-!T6(d?m)0u%DYQCR4ic-?8YSJVPmR{en?_A2duialmJ2ZXjV&Q^ z8zc$4Gi8vyX!_P4@ayem=;^Q^p=IkGr{|Ce*I(Mn=~r1|&)qwLb9%gBS-eoI9V#%U z{K)w*!TD+p-SL5x&w97KZIxrfe5e_Eei>vm(M?~Xz$)psV*zQ)H#`gz_mn~tpj1J@ z(-!b$?P9u!9SwUnIeWnv9N)4cp1OHcB+dR3J+<1-*&tZC>plV9$MpqEYdK3h zY!rf*YIxwFm59*-h_?2o$D#8yj-_(gEdP;4`POn7dm5!W4;WOLu9ZM{*!dV-;EcQ+;8ggS z!Udw`>X4cnoYD_M%9l3ct~PQrjxD~`Jn^r8@5xh%;E?ISi9Pr|Y1ylOWLbzKKf(4H zy+6Lw*>O+HePj|Q?n9Qe;z^4y{~o(eL|^$w8vbou+BYlBN{#H}M?A42KYzwlFW0C> zzEn#QQ_dwBmmFXM$Lwo@6N}M@*N_WY`GGGQ?N!p2OK#$yHbPiA}@`O=sFcQ~!-~#<*Bd zDZ598V7{3YXN-G7CHZ~3TV>HZMLAh^r?|Xb0*Ad=Nt9`_zLKK6@lfKpKOs?_uAahe zsB)-=v&Ov-LSUrMozDCbf3rbuFPeM8el+*nrC^`9SgYY&ubwdes)~RMlL`^oRll3v z*{X#1ZD6GM10i6;q~v(AB>zxG*LiloMp=SL1?jrF*3K~KqzETR{9J@L(}hG=xR_#? z>9bG{i2{*(7ExC6OFm)8%?uLQ<}a&jqzRfHYMvlIgQY80sLVf>O&kcbze;1sr*^KZ z4-o{K9;#}&7x&*g#awW7h1lNq;H$~k^}E{N2_z>)7N$SP1v3+Kx|JE99JmI$EPJsJ zhw%K;z6+CsyhMsAx0w<`2<@?8vxUOwN>lya8uwNl9c`caMhne@qkmE|ewAKb5?YXw zj9t@7i-tXRHlwT1%-`uOJsMW3KN(3)t7;+i@rHXS3yo5o2w&%D|BjTzdn@Y^y;p4J zw(-J;@=h0Mq0EWYT|$reENUI}$5@=ciz1=cM=H6()9)R6S50NDU5FSG7T~8iF;sL`A(>RJSL&OnLXYcrm=m4| zCOv>uQ7n1iF!wz2nRv7jU0*Hxf6f$rf%IL6{pLpAv16nBHV(NU$B@8Xm$rQS|j)Gy>*ERc3Qy(p94XgwevUMpX(qe!79`yNzI zXf7~|hi(X#8EQqaZo8&RQ{2b}lSaM;6-5xg3jajvejO|4IJ6fG*uLcH{LoqRS01QG zTOgze`mFe7(>Ica_3L$Kf7h3B6#{hkidTEjBQ_aawSD&LB9MtTebJIvA9+VPLFjju zRTlA-6!^OgJZk%%c;_!#MR5h40Ny9QxwBz{BCpNkhIZ2B$saO5GQ7c)L8K#_EaxE@ zPHMekReVUDFSvly_rc@yL+q^0w{*Oix8!eEwAn(-J5lF7)s-G&vXeyhyE4P%>S2yv<*#2+ifJM`cIB&3%;4 zk|8G{xGcS1scf7Pv&AtjoN(0s2z2H@)!Z_B-1)$I;)^Zf_e!W^V1RC=s}w7v6VunM zRirtXr1DH_XZZdzN}2xDdkZ9+hd+3aTio1GxPF+kAbuYBawK4x=2K=H%0TrwRD8Ea z4iV>h&>>vrELc3@mIWzHLsv+rRS0RwzWw{b7-7kyF=U$YJ$|ttN^Iu3;RL`Ep1|>5qZDbzE}#v^UjFzQ_XRNcb8pF5v~a)P6-X{44?O& z@ReyCkt8j1oNWf{gfTA25MtZ)l&z+KY7KeTNltTiIti+m)b!?7kztb`-vrG~@j`PP zne@8M)Q#fxYR@W_!4uYv3BrRk%Dj}y+QP{^Z73%{nm5lL3rMwDa0Br|sikTn!1kpw zwQ#H9Z2F>K$dI5GZb#XGza1KbmkMd;{qM9w!_*^${cvfC;H;%{{@9{&EQ@2Mkk1)4 z`0qMp=VFI6X4H>?>7U?d)+tu;O8tWtqk}eUU{y$$OScyCHh{LwAJ&fmCtT&bb|2B^ zT76MPQYt$v(B>;<*%k$CzW7m{7m~_lX^Xf@Pq_|Eu85IG~ z{n;IttK48KCDvjdPS=W29Gdk>!J?fI7kUiLBo3MIxj(Za`o{gSW*N6a(rBIbKY1A^ zI=aAm|6-m$s)735l1&&_f`qgXIbZNX|CSjSpf^OCamR4Xcq|10^0WOcU!=Hy=N`FTvAoz`xnDjz%Y!AH~6UeiJ#) zr%P{V0;2;aaq_=JlraZ~_RtAdU+C!XxVf%hW){ZJFtxD5NMRCQds73By31Z7q3Fs;Zk6 zQ<95^2N%hJ&ze03!}=O~$zYtTvuR6~%l(j|JX*TEA#3f)%7-w{>Cp_Ri3goY^N;K} zFb&&!)gtK&+78+H4xB@-IMQesne5UI-*o{|gT^rzG(iK*qAfhw0Bg)G&1Wp<6>l}j zUzYba^vp4n>GX1)02DQG!cs9*!rce%{=i~)O|h1;OH%M#^`6|nK`zH$?FB6dPZASu z2M{G~Ih7|TUIRE3C2rxqk&$aRbp=1Kvz`%s?}?Q${PXqhTyU@Rk7N8QyIiO#l|hC~ zC1=#zk_^Y7zYoUP$O7#FfexSKPX_R>+>i0YPQP3AF91Y%y51Ww4fcM`l_$n^eh_vF zu&{P%4KeL+M2YSVj;DO=-d)}LVI+DYo9Xss8GOFw!Y9z=`XlzsS^B!Slidp{m0HS02s#c?)zmCK zo?_1I2pw05u0JvtBWi8F1Tpa%TLr>x`=Sc}zZz@W1~t{9TO?c%J-+E(l%m~+%TGNm zQM#sU){6g%1AYA&<_u5y~RdoRc4``ALJsJn)K= zE`Dkz_tT2#oP*R1{4XMM#7h7o!ge=2R^y+aQt%bOyFGBi`Mzwy_GIYIz)gPOJClpf zw|H>wIF^PRdO5z0Nhoshg;1I9R4E)ca1srk>K42exx9K9O&ktmP$`v zN9?aq>kzZ$Zg^uKxl~*@BlGa1{`fWF_>uPKzxDuZ_wskIchBzsC1!A0VQryVQ>%JU zM6>I9{7;jqKw8K2t*WEcBY{vppPYc0<2>X#9JdpcMXsy&Nyq<3lalxb-?z1#n4BSKG;RvalI%)YYEMEK`Y zrEhpM-B8GQT9sdxzoI-ksM*ken9i9SRB`nq*gjaI8e(GOzULgkk;E2Z_5XOZ^6qBJ zLxhxoKg^80tbymZQcT3lX_sp0A3KT`X*OW93rq5m0Ft5

k#b9d{Z& za$0CGIAh@wwGyBp3oeKN-Arh1gM(cb*HRaLH0NyG>*r~Ll&EAqm84IBUsrjyFbPB) zuRPxnprsdN0wR9pZo?ALdnF&`W-8q~jdd6ci1Y9r+{8f7zYufAB=FvUvs={23)2ca zlRIjEKB-SvH-_Q%VU88xWmId361#EEiNnOcA-o?SW?)uNt7fE+W?d{5WyB|XmlMu< zM~)UseJ~GJe#;px6JV%3dIlV}M2euvw4IgRcN3QXuTI1EP_G22^_C+dI$htupnAXR z=DYhT-NH2lk_b`R`By!2dYfB#0-i*}kg*rkzR%BMjNzE@nM4 zyuJx9ij~%jvJTQAqiDM2jn*}iGjxycplYH(8oaGSXGky!TTn;8NLZN*crG+mj(_uS ziX}B?)?M z%UL(P%nq#|Q-z@z^q7~O_aMIIL*vkWTfQKtNW)-OTS|vl{x%v^TC*#APOSeIL@mL} zCQE~fhTAiJdLXuf!aQEpq)-qOg8{nk#@v92?*|0}P%m;d}S zw6(+vG$))t*2;HKSQ(q{Y{d3?-x?XLGVC>36-a6RflOmx#( znE|iGpvXLpX_XOmXj9Kie%Uf2Lp*XyKa*>FM*Qlst8{!lv%fEBRV|CA`S)el)QAXQ zBz?1ZD*BerC+E@F^_ai&L4Fe)c@|wBYB2pe(=BCE2(JJ5Lv-;}+BRL7 zFl$Qpno*8`0y4%SA*y3-KhZQg8j^XtimzuM$}iMTfZR1v{K6|H7o`9~zV}0y8%qC5 zSMdWMFCAVdQRHZs{rpm%Ye*dbs|p4yUMN9|k>lu}eI<^LZJ9y_LvJ=hZA~Y#)3>MnxE{bl~ z%|+d?>-mh2664a%+=c_CBh)0Zr^o6%Eb_QgVQs#qaXenyKHLw_PUYJn5G?xuEg$B) z&JL@-XxMwq`5I-{d0YK7gFAKFw(PH~j+7W~)n)Bx-I3@%)6BBvcUOK^Ib}H>6&X?< zbZrJNTm%Nydn>?yU(^^8^HEp))G4t9KI|rV6zC!2 zu3q?Hs)$t8JC5|LB>j}+A)AnXMSm972^S-^bha_|F5rPP{>n^>Vmt0`;jTX);?%2;1v`K{@rEl8xIoSVzf<=PHlvnjNOCf#XC9?}r16#Gl* z*QXC3s8Jee+fm`NwVLxr(eK__y8v$^6P(#%0S0e1dZ^$|{NK#0DdreJA0iUpxzrus zQQ5|s_ck_b;|qyn`3KDt2e~m!ub| z7obD2VPm)=c&I*f*np-FIO8Dq{QF999V+NB0f1fXpGzR?RR4KxjrUXgR!1EdSSpK) z!J}X>wO3@TcYWe*_Cu6~_h*wE$H1d2W6aw(CO&kd{+(>`OAR}_vwUR#6`6+ z?hrah^${ki7TN@DrMR)XrMMM#=7m9Z@^~8poQ)&B@KHr%2>TIj`AgwUIkbmhAxux@ z`4A@#%AzFHPgd`P(R#A!PoW>R0NnP>SU^Jw^6M5{sfx|<_MENs!`v@DsM_7^TwLp` z0%Q996BPST(=NPj)%?R52%}zIQSx?`q4_dLySTS>!3OSaAPs;NqhML9wm{;Xo@JCRqSHSv_0Fmyj+WYHZROow5HHVg%lIuV}jK$i}QWhF$;JrZZ*`WdK z`2f>QpZvG`6+RTed}r zhVG{ptx?7S&;p(u>9k#-2(=VP|4OxmybYA8r0qRjpRNw^-w%b*qHN^!mQZlo+7;Q- z#6NGifPJ=wh^8WT&{o0|!_TN3U3n9f;eu<9%_My=wl;oR{;C%t7bC-M8-4oRv0*b! zCZ#a+vHiyy8#hzJbrWABr=OySLsrR`vmB&qRj;=>@ACQ$m)d8^YuFr!{3hP2y=93p zLAjC{;wXw!`3tiiFOmlx=sayHld=B$bpNEby8df|vX{dJE5@MSL}HB~P*%tCSyw`c z=%PIno`)QKr?C~U4U0xS35lfBgkng_G8qg94me%eEyTFXoD|_6j98)X?{jkI6u>`m zktypYl>Aij&V%U<>T#jrlZnJx<HApCpSr#)Alw`Y!QxT}6OM$`fK%Lc-Z^ zY%RXL;^(uF%BALSVZPXjR62M+w9DR+p~&N=yM zjtgMut>rNF8ij`;tB%xUcsXXBn*f-jWTocH!wPVW`DB6DiXl0{h~2 zCgeeO9d5A47jZDJ1BxZK7h3IS39|vU{dA+E;1+9KPY-<23!MoMoM5r_A8QZp;_np@ zfC>DS{_QYTmtj=w+f7YzmfbWD8i*NKNaT3y6-X} z-Tp~$*hyMjqg};(BE7GX7oU*%{4vS?{Ewn*V2`tjqK&4%gp;Z(`9v+X=fQ^|Gq;4$9)=xh>OZIjxCCZg&C-)9uX2W} zxKx|K^TGF#{e|SQM4v-Fg1p5KP(k;41;6~4?TkD3$yaSlM7}0^J3>Qf3VpDpp!zEK zZYDiyNA*FCqWS2sp32K~RI{Y2k1Kh?*cG!r)+UgiVK`8tef?CEzw}5lJ^^eMic<)1?;qRBE~tCfXIS2OA!0kGK*Sf;0)7K{S&XvV9@(kz+5^! z1K|$Vk?5!CU@0a9BWtn+9yFP`vjo5+NlWs|gJs~$tB(2+SjqT9nxVLa0<0#gIO!F; zSb6$YSVMD9zx4mTA?~Hpzc5m=&Nkt zN)N8#a0)oAdtSjL@II31q!t-N^ksY(z8RR#2T!{hXWnQK{?bD!-qaF#UzjEZj3(BI z#$sAsWknA~T=vG&Ldn18RYvk@znSVC-KG`Dwe}Khyp`;VTu3 zFRafH(}+@U#*Yc<(Ic~2dJ{tKLt=OD;Bo2~3H~eSPJ_+&m}#8;^jCz{(Dk>2NLqc( zPdEJqNiQF&G7{1^B zQTS@Xk@>EQL&QU}y_g^LNPt9O}(5TcS=6Lh8E2)AcIA08#|d zn>x7I>PHE}^B*OxJJS4yBVSh>o)a$3DBsV3P+ot* zXAR&$T&F5hSCI^kC=+tg)wWPnRGxR|!D#GTQ`k5B)R{9Co0)%>0~e)mf_Kr@mT+^( zKv@WqbBMbkQoVkMsr}J^xKxpU{R^juh=xkrmqlk<2;wPuNEOdbELGWz>_@9Ghu4nc zwp7!u%O z-FR9Pb~3vM#!l;jV<9eXe%caw>W>#Yk8C;V(waCj7vxLWS1ui6mK9LOx|(mcLwRnP zihS|u-O)PvfqWcBDfZOiNz#rPUt_5Ztw@S}&{qeqOrVsdvT1(lTBtyJaP>HOKPVjX z?M%Y4(f~Eb@5Q`cjw-XpicWML&4INNvt-Sg&IJ8uh_ojb^G;pc#|A_O*E^FIpdDn* za;FFS3t3MV(1BK^TJfyqxYucucZBwk_(PI@q}RFx?-3u~tBeziwO%tg?y412=H`%c zU#8#(f(U842zQo~?n=!b9UW2udR;FA3frDOFpq9-l(C5^Or-RbQO_Z{daH(+f~LS_XlCsSsIq__QTO6aH6U^x;&9Yxq%(3Z2%20V zLE;jm0t>ShV>2%7glc9&Easu^!RzJ znG;V?Kp}6ry06z)dr5vek!JVN53g*2ehmu!AqyBvVjjMghB;w9=&(HsAoin_XoNEz z#JQ7x&V;t*@%kl#AAy%UR3PESjq9PH@%aH5nfY)P^ zpPV=S8sc7syotTcgu{0?-+G#yk{v>8ATv??h^yWgk}(dMc4$9&bL~D7J2FR<9UYk7 zMMg$Mpo4f5Sy|sNuYWubDW@nyt1iE)sI3+Q2h`fc9U>}3Og}X>g}t}kYq^_WhGMwE zgQRtQ{D4+2MkB3acd(|cRYVni6GFoo;eoK9f~Q?`zA_=BETOZp3RFlYMOzcJCy_bU zZ_I{IGn1dA))_%!as&4H(;aO4Guc(P5U#&&SY$}hLj)k1u+?UZF_s6|?5O{FO}#kG zch!ApqNiEEA7SRX_AqU0H|c)Xr~M&8`$2Xt&e)=aWn340Smp7J&W2FH#&-zCG7Ay# zd&Eqf{$kpuV=9Ui+ZImQWj`hu{yjjI5Dr4$A)3MJ$dGj}$yU8EHG81da9^$DzzS!C z&zcQH4K!pSXhpuBb|glmWK&OSVtJRG!j}Cczd-@NbUoNejS$+IpyvQy55i%pmtZ;- zTq#By2#obu>B@fTb)0GByA6EsmO-MwVCp#aceHz#_S#`Ixdf&GnaYC{_(mLgCtZnk zlV40yL~s#dok+|eB*D>?rBP_3%0!`G4`iRMaZJ$RpPfsU4i%!~Lv|RlmZ&@76zunS zv=GIc_=B&RWQ2fG%Tq$QJn;i6^r@B+4XoFKuWxc9-wjkEiWbgivHRrsQGKAdbIW*a zx&2Ce{3rK0*Q+!>^GDeK zN{pit|HE82Se4{n4Dp9NBAm~6cyPu8d5@l)_EQn}CNtyC1s!ynvWQ}t-{3+U3A}>e zOW<+zIbelRS7(u&9O(X^K~qfdB*NfrXUCdI7b`ThJ`58R!nvn54wC9yH4lSH_&Bi4 z6W-yuG>+ua{Z?hqy%Dh@lL0h33EcMT1F`H$HN!(a>RQ#2gzjWPc@`31E+jv3=Pe?B z1Bik+;dcev>B4i&nqa_E0$EKw>TQc=^@n7(l+af!30~ptqaIFyo zZiZ&xYYtinh7ec#F^IQSBJ?mpob)3mN?k6t;TtPAI{G(rL|(lpp4y}@H~BAl##0zG6}txAw98T^*ED8|CKIT3`$R z8IqE3!OJUw_phiftzc+4;?$ngPNT@JTx}ps*H*?pJN^}Z)!7aPQu0+hNIH0@a=zDb zlQ&PW-fv=)pHmW6Ev53B?h`vcx7PsdLXK<>L3LZGBp--Vo#hzfzq^7J{#uZwp{!N{+WEhFa^4JP&e1D_OO;vs(Zd{mU z2kZ>Do$=WnV31}4aU{Ly{uw6PwDx^|<@arhlv}lZ#Zbe@WhCr)VW^z? z9wG(1VNk-uDlTG)GD{c+1MwFyl>jO*{`{Z4DD}3(4O5p{^%&3tmYqyz>R16Sn}Tsi zu`%h=oFV~*plAmTS+)W!+Gmv^!gb}p07Kka+NGiAA?tKh+AH#`Y!wR!1x(KqgueyX zuE9j9Gr0OPmZPRC+~)Ft%UXRLYCkS1-UMAnNJsoG>=3^uloUmO_-I&OB{Y^|j1*6T zGjS@(FwS2SDTk`ppo7^t+J4PM0Pb$ppI&SN3+c5S0k_sYlVO}9UeD{On^dw6C)*nI z9QbmZd|EX@bAQf*%6}S_D2D^I_z1xbx)q3@5<+MTqTWmb?!4;-Jn|h4tQ4M{h%kW4 zwM3oJW%_Sl5W#uocPWp>EK_sL{>_G3E}!SQDB!khT7qv5Ik#t` zn%cO6j6D>`BI-JK^O#6@BtUU;;W~esKwkl2LFrE_Z7Y5ca%;4WS)zk`>kPN+(uaU|*cM&^*(nq$97oa{HXb>roGN^e; z*Jd_9_s7#2=A`5W>3!cZ5rmz>t_c3P>Vq74yEGWVef)HcFx!bem|Pw;G6QHx0jE$d zwW)=m5H4V7C7IV*+4tc&>xrCecsnu@MY5SAty(|Z7ewvr-+Gx23(=F zC8l);q7MmL@E#}e(*KSQgJjbCO^`E`qjg*-xSPsB7?&_E#j4l8ycLp6zGrHRLsbB>?ft~oW_u)N3u5XZ&v6|^T6&RR zsYLuEj|lfi90=<=B$j~FS45eR;o#gf%SriZNUvU;Q}Q}phP*jUSZwtTN}RYeG{pa! z=0|X)Wlmmbp(_9KDa0@_!L75DTxFz+r)V#S4_mO;Zw7OO8m`t6$U|`LK&HwTcZjZ< ztqmspW-lGiQ2k_ve8Yg+q{g6m-M2pTQtyM<1aVFtBG#Lo7cz%4X`tq2pOwg@;my^h zV55T%n3-a&H>orNA@gNOd&7%0`WOcA?tXap`1kIH!$Zex|182vJzo?5;vmykUJ*Do z(y3qS9YCoraObOCV+L@Iw6EW(&DR53vYw2auFXDAn1prl4F-#hjd`(?BLhRNfe+qb zmgLXGNO1!e3;kRH(81bCa{MNE+z|Tw?!}G98GP)R6J|6btJx2uyr6vIV7y}vv{K`H zK!f3qc=w}LlBl(1uc{OacG7+`CQ>)j-tqO7VUipHQ9CH?MhEl_6%3J=ymK#28W42G zrr>SdI{19olBeW`fUxj_PMq$xra&8Dth`d^xp8G+{c630G_4FD=!|gneM)VH^K#8@ z8yqD4L#DUsiNw)<9jlYH;Ey<)ly>`6X+)8mZEnarx(g}Ym(3+MYsCM(GOUijh!bTr zKWhtpV2s&!!0D%BBF0F%COAD1DFcL2hC?hg_SZ5rNnDQI8D|Fz$SBGzpx75+)47QhHab+c z;4z~}Kw1?&T@EA;5|n}oO{b_omQY}E`!I;%b5ZXCx&I2sqQjrxAA(yHG%Lw}xuoW# za-9;HA0IE+YR`RG0LBcG#AOMu$MsnXvVdz(R`-tdq%+E?5eer{_)M)I8&fZ6bn#Qi z(0_Jw#bQ{EF-;o8HV48H2G((dNd0BKFU)q2w#)i<*cgqMNu%;Yn>IiP@d6Oo@q)6G zZi#_AuP&qxXEgtllmBBh?~$^A(5TP_&ujv&Aj@gP-w+f#ey?4cQ#S~`F+Gk2=OYwe zn5Sk)&z6elBc{sh9A~6IVzN8-E8}lDeiPxl5j3XAaK!)mMAh{i4A?6oyEvwHdul&N z{ zsbsN*gNC$#kt{I0V+IvAyoL)}j5Ja>$qB9&JRR;*=cVxNQY5zYjPc}*sqKW19P?`^ z%qJ9_9Mm?F*gOc$W2kBv#Rf4%aj={?Iai}8LX*wS=igg>fxuudAAvUMbQcoPuiP-+ zK5Os0Fhi0jfRRmB19Tt(Uz}R|jlh{WNV*@9O%$bS_q?l@%fo*lAfnj}$o+b8qkVNJ z29PNuCuPAp-{>%nOAa*u91PuFtASLhW&uv+yk(da@&0d5k@+kQPl6sJl+Ako-(GFn zsiVzM95|$^S8f6$p&jxF#vKN_qmOH43Gkoch=!t6t~<}p;j9E0hq5T%ZLXck-s1G~ zjNcWm6@L6QRPZ??`<1ef>|oM5!{(wi(AA+Huj2fXO&|Mj{NXi8#PTE0= z;8tffB+fJHVv=pVT2@jXZCX^GMd<9|mdzIoSjn79Eo9ir%G0i1C}}XqV&Ubd+1#j8 z^jA^ojl7B0Kh7fs688^P4>UJd;>j*iM4`1<{kFp-OX7d*-GWEXIB2*C7Qo6^x9c7(9UBmpcez9Prwzb!nllDpdG4Yro$9naP%K>mAop5pa)melC`PYk83ET+G% zsmul0&=j;D;>K*f`teF;#bTLG_gMxmGUeE7W}(#ct96ppmv_#*ZQISE;Z7Iu^0~=c z*Ym7b2^;%J%@=y7N#Q_C0k2)}RV`}Uq#Kx2{cXNfSPz}L#H>LJ;e|a!;U$ck*KBO| z-M{d6YAg6i=gO*K0pn5L?(vbD97d1;OP#}7ob+r?kwJi+Yd|h3v-)Qd@;bje6hK`> zJ1n_hq-s@2t7#JTg4Hz8NZ=s&O2AWUil7>Tt7l0` zvQ}XBnamokK3<%)bEb<$5jk&DF6&2W% z|Hw0qNr(vY>dX^9Mcy0p_g5wQZd=o0;+z`+P%Kt1SwH0T^65Kx{Yublk_PcZ}sd$gO78?0h1J9V#82A zI!NUHJD4wqIF@SMl=<%zW+8hDe!%ZC_+O@jI4a)DHA(wk+(B0*QYVB*C~$_YM|<;$ zF}_0EQ9Pj{>+JYCnus2CzTxl3zCbrdv>^O5#Hw~YnE5$y{yl|2Dr$tTw4f7OZtzcl z0VLJ(7UbYYO$w@aiXha4n#o+(<)5~MyWNI7!GbCdP&xM8arJz#&kLy3`;AGc5AsUr zT^qJ}uzO5EJJJ&RbFYXDF*LF+QOL zJm(p;{v?M&gaK6wIv0h#Oj6|NpNXf8>ADV>Y1BU_wbA_-N7iGpaC# zuX=!+F8<%If;v+)zl5RKNC=>xG2?%>VYmbz%CX{@)`{Hxxv1-u#UvK?CN*)3=g%_k zd4;ptUxXD(5e^q8jXa2c5GnU_#zjGc$B3y0YFaKUzUweC=KV#vuN`g_YCV?NCyAs) zh&b$uJl`2K+VdsMa%o@ORH3Ahptht5Y2sq~lGmuhGEtS(Y(-V>sN(Lh0m~;-`WQwo z$p51u!y44ZG}3!sBUm4iadL(;b#PXYv_CTk`4)O24VqScCB<#VFj{W3h450N4s4ga zsSQBYB-XKXg!RXApp&91OlSg1G31C)U8(djiGum_i~Zr%Gak(rz=qkw z6#y|{Du6;viubSq6Sg%G1awN+hND!jJhB?wkKrvdnQqnZ1oXK9zZ_CRVw>Ggd+C0} z)(ZGL`Egc(kOU!evwGx`sU=q-d5=TsVA-*|FbApJ%towq__7k-b{iJN(GXUE)T}e% z*AZ@$6`D3FjHpG#C2RKl+e7|*@WrQEg=euTr>JJNq@PH|6*ZxqbtIy{FBRq;T0tC> zNJa0Ogi)awCu+Z;|9Buh5tqT-gpTg+G=ypWqx~Td$nOt<%}#h+S5W4Fs9vu++pfKW zr40x=Q`p~@GJ6&(q|FVDIt>TqYviWI2hE%*WGQ9tfe0}4{Z4p55F`43N{k}mWg245m~ z663r40s>o=j}9e<5ZA6z(}%x8x_R?$InN4-s8CZ;gh(({H5|?uS0wp!2YTJ|@FH@r zDFxO(&sYDGU)1|Qm<))9N;2~_lcBV-)9Zc?kK{<*p<_yZs&f<{vd6JrciPVbEydjF zQ6fMGnd(L}N|mf|UPr7#hQ~1H>8vNV#R&fI4PJPG5CGG=&wP)PLAxix0aoFw=xmWz z;M3mh_K@?G70K?gweg7`%z;cTym)|^)>zqK`4{xX_{qYd;BS54g*%}EOAq@^j7@+p zQ{O#u8z!8bRyWv={T9O!WUTrvg1klClbALoJQW|$W1X<>=OCDO%)WwcuFaO4xTjaM zkGy0@Gnj`y$J;Qx<=i_E%i9bhYsU~6qwmL&NW!5{#gLIl-5)Ih={KWCe~A@%c?f}k zDDM?AvzmE`HPFAZ{1m6a%)t!Y%PQQ7Z4`v8G@!M)==N%f5ap?4w_5 zq^HNjqCPV6zxHF(JPTP3Yjewdx6QU6kAJ4LcIM2fmijxG@AZ{JbR|Sk8{pTp{>c5+ zo=!Bu4JS`pT2iYTMI2c@ufj3POiq03AU_y%`=ykf{mET^#ilxKQ7>2_i-2p+xTb*E zFVFXK5Aae;syEeKK3E93l3-0_!XA69%MFNS|C@a@=<{~JScNNU*%H5DO(JUMy^+=7 z*S~;@!lANj{MY(i{#lvF!<7R8I{B|Ky}e!|zRc+#nojSlH;)`GLMicHnAprZ@d(^mZ{U5^$MJ0bAW(!9^@TvJ7@8yAOcO&w&;Q#J9&&Th zRM}i1q3p>(q^`@vqgfeR??a3wF$Y&3-i35-&{62|;db^3#>gKQm>$3=ibAB4Nw@!n z6fcue`qr``{)RpZe={(;t=qu-T3g48UrYJN)s4iT`egS{-UYvd6*t>E`Cy~HsdWjT zutUJHWob`YczXI^l5v`!5^VAk!@;_p`wTEfEZ!6Z{1oIfxvs#7vUCOj@5pb%?E>N?X*shS0|uP6Wy^f@W5#jY2gt z8J%|)qz^g~GYEPIywu6vOR=glnA?@VJ5X&b&`~}y^Qzm-VYLdqLwmPZb4`>~%g5fvs`z$)1XbQ=r?6M<9>!)wqx`~iUifkvmAZ?eZhEiw?E>*yy`o}#EUmKz zU2j?ZJV+mr5mME`1QhVk);P~U`l)wG^n%X`Anfmo`)(a28e$b5~ zRG5Wim59He!CiFSQC%?vFIUo}rhq2FA#p3y~x{suILK@}<)sU%MFMkgIC2-!7Js|K~}c?xDo3 zD>xPMfqRRr!-wRTIcnKLG;9L0;;7Gs&8`6Gr{bS*6q6&BjM!8+VqFQ+6p1b3TVp4f zC`whuuFj~olW5*q_9!gR!Rn=jR&>TUpK&&0@v00Be*=v02@)=RAUCS$y87_ZxBiN? z&OmQP3Z&+IINRLp&R<{MhQ7J=QQ3Zo9h`l1ce>L!d0}(y?F5wF_I$tf>=>I(sHgH;F2K=Svid})mND=rksW(za zW?0ilB*a8NajnH#^QKw_ezp8aXku<;%bcKnAL;(N7OlnV!A^nwO(EQKJEO2_$OLDK zK_wk2TOvN(<{fcxnil2N>mz1;%|!v1gKo-KpRSprlKjJBkNg@he4|q7HoOBu1A=n+ zP|~P*oTd&Wiie)!J$QValOj=0N{zm$kTPQlau>>gHzv!U^8*rPEgGwt?1BaoVWf&y3!euLADdkWX1k_k`adUi`TdVkgqtEnY zO=|r1<#=XA;^+t*2-}+;jT(tw%k_mFJl#UpnbY4)A;Y>bD;P{HHSe%FwHzVf(gn3% zpRa4Q5pb|$0_CCKQweIaV@Y0AjX2p(^@u6pHDu&?iGYg4mlDoAEV-a)T&3PC#$5#s zC8ecePn0|fs?1y?B}{Gyj81=Gr@j)%9q^)+XN#ZLyCoi~hmS#{0d zynOR$I1au!nBElhX#*tZB1Ai;a0=hrmfrnLIy_P$8RzI<*}(m2^FE&-VPHgXEoMr| z^!d^=I7P3!rOB?^ZN!9{f4TP2LL%}IW^9mPwdp+HO02Ex_%acPCAp%61V~9ji~Tu? znkjHL5a?9I$zicHh_G9}c{Sz9@lxW{wmuX1yCa$y@h?YZSBem2D_@)u%BZRjeIrpn z!>rfT-0kc~!63HqRgn+*UKq^DGx82&G6hMdMHwY^&&>^Jf0I+p_dIhay&LUa%x7e? z!MHEvZIdIi0DGqYS~d+E3AapW3aB4$Pp_X)ky^ z^dlsWIt#C`Yd76AP3%!s0_5 zP1}YBb38<9;ez(Xe$MgT+V6F(#gmg6eRCE36Iwd#fG>F1Qt#yH(M0X=tdfUq4Pd`;e?iA*XWlTcaPuc5?GbheoWZ`$+FPj*QWNw{6%E#BE1ZXaT?UwRzix zy~h=UYQYlG(McoVgXVK`zVc)QIHw(oBAs4q&pDOI3s z5MvtfMfkA!TwP?au@u|-uPE!_I?kXnpyz#%*^R{=M7G`(|rMhnx& zECy2o3ZD-`YeCY>pJc9rk63dTx%PX|Y%6}(rri>am2)0e8@$m>iYSf@;S0C6tv+Zb zW;O6tDGP`w?>LK}G^g`C99UL%Lb&BZVtjic^IA`L)*$urUkf6X<4GRyqC{f*>p8xc znA@-2!8K9E64Ew#z;%j6%I$ub&WRG*252MdD<_+Sh={r48PO?(E3Q zgH!^?#KG#$ltXICC?P(B#G%|326J)dC>{^4#8HMLs9H6uz2Br`^OJb?yveasOsDc4 z=XlyZb2<-R9E|*;51Rrn7jNUNjF)-6`m5lg6!lT_yQAN2-B^T#+8%>lt&Eh=Ctjrt zLRr=2DH$uDFoCrq3wew}@Y7|+*s|I!S>=$ALyxOJ`|rHC>y+9$-EON=LU_RgYnjm} z-OhBI8|jhrIr%A5nd~c>un(cImnVwzprfPgUP^aK2k4(!6e7;32>)tSht(%pLWL;a z_A|$zy2(z%tDU8xp`=L^VBD|mxs&XFi=GvfCF#AkmR?xPyf zZ&Fy-K4AIhR;?S;hxfH77x2PU5=r7N&?z0Ns3N+?WzJMHP5d;1b%{A-<)ZJtbj)$o z9kf{<2N{Kztt7LOGBjC5YMkuNC(IO|5gQyuYpC*t?XMMX@p@}Sb+oo{#0uom5B&A7 zdfI{IjZH+byG8#FC#Ec^NubE}<>g^-rn@>7$KddpvxIT-hUk&#e$_?!cJBL=hk4og zmn{u{hhxUj`OBBQc{)YXx$!UN7KqYL%ox@75upssH4C@Asu+_eGqPPY+#t48KiauO zO;wCJ3q@(w7QM0coum71OezxlGS5(=uQpT#@hNne2eL3Op&`ntS~_)qMcNU3G%9c! zgoOyg;suy11ip7Rw@(1jZzJ4JcMy=b%*D!oRy3-1Rl0fMEBem!#&pEk_sXS)0t=u}ZJNpsYMiJB?eh~s;rAb4y$({d)iOA8)ocAU zzUNN1lbPe&`aOM1CFPDLG}jw%k2i7`l55{m;hE~d}udQF`d>_Bm zS$heQqlBXyS(?+80i zWP6-+n>CG?x4bvb8<*;+5lgG=lifv$QNpOyn-o?un;i98d6Yz#u3CtW4k?Zhaj1g? zUuc?V`%QTEeniUFNi#mfD-m!Qtp(R4&nbY4Zs31Hk!?Js>UO0UIXGy`kc=OR?-a(% zPga($AsifxKEKMK9+>Q+b1r9%x&Mzbhc%1r4{tf>azj|-(MQLXl1TU$JAGu9pW2k4x6a4N9#pScZI_(6m8QUfv@oh#t+t5QlNg^1XI=M-#Z+UiuTUwG@! z^K4B1NpknMFe#Q%|o7uvv41^lS+?KHcx%?*IIK;RDJW<5M zpfmor&}v6@6aC9998t;hD{cB^Lds+0!7CxJu+5fX?QK_VK+T6XeyM9$K(=xGYkZIk@aX7j)vUr2ctYApId6MMXn#u2bi|OiN>tQB>#f`6cIbAX&03nwx|; z+zCTonOnU2f>ona!V&SJ7QG~Tt-#ocz!|hLQgd0t z3!epz`S1|RwAM2v(bSKfBRs<5w}=eHK;;Acn=wZLUUM&$N!JZ%k;M7TO>Rv`Pgq=O z+P@yTg&eE<{qy>t?!$fFeybCK1^Dk~Aq@%duk%?CJiSXAE7|Z@DZScmq|m9ipfD*4 zM=EMe)e;3&4cZT$dUBfGjW;Cc7f`6rpL^$S^VyT8EFMeTDvt#F_%ra=Y(c2!wI)x| zxCkjzUm`^kTKouihd{omIeRm+26D3lB-R`kcjfc_OKgIa{;5fErFbF7YR{(yy+ERl z?Wc3NI3ZjqdD^eX?&DyuzV5Y4Z$eaS13^MN5;>Bh7`OOZ)Fxk1pwE}d71m0FfsNCk zExID&_ynNZS2*fIpBSCKI5;v;+=is|Fn%2>(u#lOdE zFgF@;Lb%Ecg^c=AtdtEc-@RE+b~U3x^i9^W((RT_;a1Sg_44S5;%W^v;%SnfVqg-t zhh$P!_`Hk@1sXt1MQu|!#bM6kz`j4i5zozV-Yut(mzJWZ#17ErNz6j*XVnNX4_Cn9 zmL7^hGU4gsVi~!5Wv&dVH8>L~SCTimKpJ1T)$kMA-{L2p(y$pPj;EWpih*d5J(R%> zFVo!@oeH5RXE)MAf%)v`7G?oh{UI&>c^2XP}i(9kQCZjV%ZBo#f85nG<~etuEcTV>3Yo~Lm3CG zTzo%)Po*LH&|8E|h@1%%TEYo>%Ft%9p#V2MQi&-9GGpEdYf10|KE_%zdg3rSZ)Ys; zF?FjNV?dCnBk96M3vT%pH6_m>lL!F2#4XukJk0Pg!+D)n&;y0LKP?FfCbGSm@cs#* zOSbE?$4DClQC`n*S#qj3n8*MxuFw>1LQzo((8Zd(M@`dQp!}&NlljGA2y!+XGmo#0 z>+_GgW5HknO4cC?e!H__>1Gd*HhL}fu1CG>m50?2edTFBnCdoS3>NSdt{EQ9Kbdv! z`yC&_g83B`T1sOZQwy8#4RukyF+#)xy}8ltTm!H65201xnN)8rizZ>AiJfI+l8 zLoMp=0n(tJ=3`*3Uf3IUIX5fU_elKH<&jbA_jQ5l8PoEbn_7P3QUiitHPyg=i95h> zmloiS_jDzNMkzRop+b759{Jz6-xSrM>+rtjI?_YyNwSBnl?xbYo@*XB|iVfVy2}d z*+UnHj5-sE6HA2GuncRY9O(MTun)AG)8|HcUrorQsEKp2dg-zcp}|ubAo*$gPU-`* zjM~ww=Y!oY_BacK2b@FHgUoamyQqmf8{@VnBuJRB@0)!614s*7iJ3ItWZAS>%#M1e z3fCk^BiamB>K%C$Ueu=ZtY%f|hDM}y6(+=`HSM_>1??rT2d)JCn_p){N)en$(qYTL zV$YJ6#@!|Gqd(yMN#Op@==*XnZrO0nN>9xNg=$&15LP%Oa=IPVNqS(JQ>(JL6QfB3 zCpP1>6tm+<{(eS?rIZN)P$S=S_^1fl_J2mfy!zvb(lpw{EJ#!LyY4)7QWJXBLLtma zuQ;2Spf}M2g$~hjBHZIb=x^CoLLoYJMq%8pDV=zdz;|5WmP0UbW@C|iih9X{<42S9 zFkuJ_D}tTvuw+4`Rth@Zt8#|zh;s{0r~ScKIySb_h@eWG5jbG8c9T(iV+`kvAtUt* z->0v6(79Tlq99S{l4NXm9UfCqMOrp$t{z|D6W!i$xlM8zJmsf*3AfkX#nm7*Ekr$r zO3l{I8INu572uJQn<5SJ_b)CKpD}rdcdp2l)k8yCVyHv_ zNu*vX^h(vWe?slb&yjjT`?Q=uMKhTuPLdEnsc^PirKcgju0)r5iJ3s^j^tA~;*=a4 zxRN<@C907?Rp>_zg9D6SMukwR|ljYI*pbsW|%4 zcg{%}jyLr{*ZUz_cK@U63v-3vO8RKS||JwLrvey zDRC;v7f1ilvcF+i2?)-9QsG&PCPZxTLk$F%?dmxXdH9Jgo^&fgDG^S}MPBG-O?OB8 zeL_CW|0fWasFo5&8BTX&A1=g&WI)79Q5vplo@o7=au_;QA_BFp>2e52!@#T}CnR|JL$%C3cPcA|8@k<-emMV>nejtMxTD z6CBBFanIjRp9dW}nVOn>y}V|U=W1q0y0cF}ZV5O)M-(OJP>8<8QRMsa+pAXb>GYQF zF&QGdlWG)Er`(P)S;BV|8BN=Wc}U!9&R%q;mD{1n*J&*TC!Xr zs+r{&PeR+RvYQl=jqLU{!c9b&a>JM62g>w8)9mND#)8#@S7mcJ4AnJ+OcbgR45De= z4yZwg?wF(p?$Ny}5fd^}a*XWNiS8=xI4*tWUt6p#--ALK{)`buL%5FKFj>{@@Br zk`Q58e=_Q7=CZ7oKlI-rum~6k;bN&zlG*PvJv#~4w2~}U!7n!~-{$T{TqXQ*C6_Ov zv%<%jBsmph2}Wfo>c$m;voaWi$@dq_MBs5Gnn)(pn&HgJ#IE2DN>yE~VJ(K+HuUAx z0Rm%@okqlx&G|Q_YGU{@Qmf##rCFg%qUQ#ujMY5;VRKa*DS4x^w$f9a*8l4vfs~?d z1kcIFT~4BZKHbv$w03%F`vJQNaAwL@T2C16)FzJjLg>~hBjvo}UF;^`JwZtT{2H*~ z2S{Yiy{~7@rHDE_xFf+A?P&JGo%Yxzk5%{R%(9r8@$iBL7X+MuY1bbK1 zLx!o@j6^_v7V!V^G>w7_-GEvgC02WU7l2<55Dq(fA=%z&zybI@Sx(01S7!?ZUQ8n1 zloQ{5$%nYaqw|{~c84pUca*QdU`G07a73SQW*H4{&!Y3(a3atIg6~$+txQ*5c z59ReDX$!v~bL2%4NpXmp;T9vfTmr`^iiY{~J!uM|oUh%~hW(@e3zX1~x^G1NHqPN{ zn;vn%(}w_jfdB7_254XZ1L?oruHK95GQ%dfflDwX3;B*5uGw(0S(yUSH!pEdmJoN% zQpuI!YzrfyJC)D%H2sbi2H4gMRSXeq$MdRBa&bz(vA85xr2a1*R}-Wrn4z3MUL5F& zms0qz-rq|cnO}DJF9}|~o-Y6VyTrkg8^z9j{D69bsh>>@CmW?~6wV`u=x!Hso7)-5 z4c0nGAcuQqc_KN$ijx5CgKuGV;PVa80Y4j6CC=w^z*J<2y6xYN7@;og=# z${k#;96n)DFxSDef*Qh~ubgY4%0OU*OgN2uy{g1S*+ex7Iv9FYbfy0S{{sO0MLHRB zFb;<-EvElp{(EGUhwufX2ZEdf9g~?JNa zY>coiycE#|XAz?ln#tTr7!A4pn3sd8rch#>W|Lpe&VwTmrgD$~73ug;rOAc7985Lf z52&%xG_NC%mxCP$Q*{!9j%0+eb9gzJIOK0(FGF>b|IN$64usJj8X7^eC^QhKHZKPg z2RNass@QR`17X6%8Bo<)cXLF-#3*z;MMKzDLnX=?b|6fcp@~FvtkqLkb6yT62B49$ zuRP6Mke7prLJkF;#`D6mc{$jDFwqA!Ahr`W3fxD+L=I%7hb-uysMB4)PLS5_TX=Y|=p35U4pMjd(ej5c0dQ zO;E>FJcFDa2opz8o}?pG()}u7W?m9T1Lz?_$5e}iy#k&;&JKi$dq{8VCSikvKQl~4 z`AgVJVQYka2bNW52f|c4DCtgl5=yvtfKmizn6e-_^^b&Y7PdC%L>P7;41*fNdI{?e zrMtz6cZQ%&aT54^Ti6O=dw5A0b|B2jAeod~g!LBoFJ6vn$&bM(gY>C><>g@5fiRV! zrLfC{T?-{K&%>_b6Th%eguN^5tKiRWUKWNO2%{A=a`#5484Z^TJC~Ovra36=3t`^~ zdnfoO!>|KkLQayf*220$CE?E$X5wXuYUqZi#5X0vUlaCUu&fBf4upvUXL6RXX2A%h zT`*cLjgrm*y& z!=dA_n!)JB7^UBVZZ;<_PpI4l9YyUA{(B6=4uoOC03CUy3Og-mrw1Jl z{ah>frwvMxn9OvHk;5?rO&>H}&<;QN$^6F{MGBO#_8em_sN91y7%?0Q+QFb5{@+vq iRm{IKCf50X0R{kUv{*p~r4Tx062|}Ro!pfR1`mnZ(O7nKcKOW4i$^9Ra0BJ8yc;~21%2p=|UR0&DbiW z$#rfTQ`a`O(`{9s_5yDV_yd5l2Of}kLK+Oj_Ok5(v`JGz71bo9J#^YYXp{DWs&KBa zQ@dTpxRI}aIp=pi@6k0t$5)!;m`NF6-tt{FpOKHBn3g+MAqmexC-gw4rh87hTrL7G z#)U`L!(So6-Zux@>;H3gR;i~0B%VTSS3P|m@o9jRsXML@Al^p#@G0Lx-0?i(9WEw_ zSYddU<1E8793KxjQ|c&UmW!mTC>k>?{om1c9S zUx<6_jj_!T&^M{wWM#>IBbOSf*xP<^F{$j$aOQ5Y{cT zROCL1M7^NKKL z&(yA}mSw#iM0^;IB{ZO5!wl{^Sg-*ysE~&Yz8!E;Qv(A`lu*=Clo*MpVGd>OdF6n^ zam1Jntk;<}MrqIC5$=Q>n{*R}?8oOIDUw5En2dl--Xw34!z7E+5pr-OgyQ-soSab)C%saskMla`aQLVzg0+MZf20tJU&K{hZoBrUc+U4e9&3o zw|KmGEe4#xz17wBu{f`SS_4i66?j31EjY7n{zGfhONK~c+td!TS#B}JoR}5UAd7p& z5phTyXSkK0xCeD3xaYP^o&J~#Xp9xFb0C;HHml5fA<%h1eR|qw7wxF+oNL9T1Aits?sKNIwvGaN)^WO$I^cUV)HzL_| z1K?{9p!>B*)`xfEv!4N6IG{J&h49W#Bz^(#YWw%`e_a{8n{G9m5AeR~_yl0%<7V@p zwW&gn96aH$kr5D-tT zAuAq;PB+84&n^bK8V0;wt;24AT@17j1D=r{@$@T@vWd~9KLu6*5lzli) zD*rxJ)~k$$bKhMIbVUrf#HK5Lb+^y$z<_6@W=14%)*KnQomD|2r24fb;@{pQhPSVD z{~B)So@r7s$O_htb1~3WFyIoKuJWbbex?Nj5Sbvcf!uftq~M+-MGCA6-dSWNT+Sbw zq<6I257;rJ*V!*{f5Cc^x{})K*I{u9C9|Yvmc)_)9p(}F)%|ahIA9}{HRLS)k|RX zD5;5#j{*~c(7*_J?qkyRC2>x(G@mnEYTu6mbR%30bV3ZcVzf^9QQaI%rL3_+$!BFj)R~lVq|bXY27qEAc5x_r1`8ZWzk|Q zcsI_)Kt5x@$z?3Pz-QbbC_L)?AnSsfAho?k)C!i(edbO9$P1kyFsXGHO(vD zL%T`gEQrvd(Xj5ji-CN=fRi}cmBjepS7Xyi7 zKsgyiv=b2%V}zm}Es_fLP13Z@2&tQsNG_MNNd!wSOqa$0_Q=I>7v-rU0r6MQLMeG! zuar)u3=V(Z7LewvBJ+CsQ|1(Qq+=rE-8UBl@i8z{25hXBWpFYx^e}M>ghtpzO-z)~ zXp3B86aTxo5M3MtLu3F%0v&z(g0Dony_V{IHn_q0b3ly0Mm3>Nkrch5CaF>J?yHLd zOAP2COs>VaqgaaOF{ZIB^AuPD#)*W`e0+`gZ;AsJk=))16j7M_;$omPVgM1tM(OAm z<&5kuJzvC}H0TR0*)oTT|57ITzs>oSYbp@WYc9btW5s=r#=yszE6}P z0o{CHVfFz!na3H$8>XXoa5XO>(tRM6vRq=53f9~_T?zv%)Vah!E2B~x$9xzWs&a9;@y%lqE}wLucEK^Vu3QMSILUg zQY|N0Q@2#>ar3zt$Xg7sJA8_T{HV-yAn*w16i!c8{;h49DgAfk%+0&e@^==6najk# zixQ>GwAn2+-!M=mQh0>)A5kQQI|W2`hFc%YxJj5}XG}HW(RsHmq~@bk`|c8(R59n~ z>oOSNjLrLWc9%?b7}qpUJS8g**fNl?@SC2Vh+`*Ugt@v-anNxlWl_wpM4C> zmWrKuN4%BTp-jfwQeYR7XCXx78;r`nHpKH? zp?KDZ>)D>o%RXQT5nQ-D3C!+3qsIAH(bO$<)FoGx0E8 zDmN=d8Ehd6S%>$5ov|QNMX?(?0S4yC2AdX0(Y@$h_Q-H|*0Jx|b%@k`mX3OowuDJX zrEVE62J!_1hfDwUJBrXKP9PR1Vvz9nWbrdvsu6`4Na12aH_lf)HC3cnOo|(kR~XQ< zS$Z%NH$^i$fY7kpG0sjGp;1J;#3rwXo7>oq#sFs&AT}KvAUNmXx9ZQy)~Z)gLvSKI z478N7Ngwg0bT%T{Hn{Ma!2sOM=-tHkY0JS(i_6Q>Td(T-(k*^N{$n;O4ghU!TBK?3<7o(KCw=Xj<W#Hg=<)JJpoGZSw)`6Cbt>VhgF8LR;=>i)nr8)lCW? z;mvX0>p2VnPS6XrT64M&9fkqrX4qu>1<%ayLu7__RK@6M)^P9Apd0P-ve<+WZ*uU_aQplbExh-Qr1SMI=`{l^gvGkVzPJPEEWG1 zR^l`!{8+daD?@^ahG3>TyGV*3*M%=4!F|an2JmgS9cJubz=dAMMuW{pM+E~`*f=fK z3;q&mrAurQ!IBHpg)xvAXTwBYyDhSaG#RFz^Ag_hWNI-2qJ?sfLJkT zGG!$XYg7%FnkSRbmv|1B*d&fC7qSatfCV?vo&AG}9DExti)J^=$}#9g-b)Uhp3$3z z9Cs$1ZK%|~4q?>*;67vw1KY_jKXJzDm?2WV z*&12B9=^!-WW#VRB2m3kA`M0-)=g)O{CkMhIBk@yGbS195}PCu=7M)M40uWs>2ae3 z?n?wtVMfS`U%r#-@l0}B0^nm7D#UX~&Xo!lGwXovX<;bE z!#2C{fn>NU>k{4AWH2yVmMt40{{KP*t{=)+xmv0rKJ}xU#kdgS^8$arBvYOP><0_Q ze>&+Dnc2X`%?&OFTnyM@07Wf+r@WWjrHA5;srZiC(HSN-^;MCQO~m&cVw3%1Q>|kl zP)#Uu_eJFKwlzd-EPc3?KHMCgLOHW0h<~(|$LUbpDmg(O3UFe#4wLFj!|(2I`!Fy~ zDhBlw&m)vF62iZnl^%C6#?R;o7lCV>XL{?JrAeJ<)^MG~Pq)Y~HBsNmizsv}j}-4s z{=N7cLs^M(;o-)Avr?AtxUH5f6QWT zo57TzXIj=1-+!qi31Vd|Su5*CZzVtbO-0#0B1bD8B^CI)@@^gt#7_B~;o`TlToZi4 zEeAemAfZIANgB7^LKe@6F2j9K4g-2~CS2-2$d$0aJ%~{Qi}jE2s^4?=i@(6P7!`hH zNaI(FWYITVR^#1F%6H{wJmv1gcpB=|D$4t{K>}k&NzHsGxOAqu#3r5Cbj#{=7=SCd zgGKs$r;as9^YD>UJI@KmDeVjCxj_%<^9eAUIKf4weK1L?cdttDJOuqXn0~wvRR*ud z;u`U9GEOAsaxJ_`*RVj8hx0&iK0&8|?Ll&W+rs&~`tMW-cWBWT+fOtTN zJoEWRbNOQ|8ufg;4Iyef5zpssJE#k6Bb$tU5YxSr-gY>bMtj`gV!*|K9R}t}<((`J zYO-t@n7Ev$sZRJ3D$7EMoF@Yirxbp|j=i;4fly(5>;k;;AJ zLxv#O&06Z7JW_DUbm?Oy(uq0e!D4yiH0j$r(K@?2+>s&$xE;C{Lc@)!p1~c94-Xg8 zXq_G|M+>^T=>zeZkPSkk{dS3sV!;hA22#WT+{MQ!W{t@Pm+}NV&--H%Y$H7QFbO<7 zLk9G>1u-RzkeUx6XfYOW?0oxnlkOLqKwBO?-7Vra=qaUtjE#3=qA{T7W^Ry5L{;7@ z7X8BL^p1Y#+uw&6^oEYEH(Z}fY{Hmue_ae%V!&v$jBbte%x-;5!^8LDzlcR)LE=Su z2jGH6#aao8!~@ao|jFkmz|HL^kK=Q^b)n}#XTYV_|XusAd& zp4W2#E@;k51YQr7B1(BC8Wu{m-T?9Gd5d5;NXmWCFT3@jq*w}LbUfj_UHMl{#?kZr z9d71%#c3|!g0b|;*Wy1m%5=KJn`SqdI=mrng)Rni6a!AgW(DTQHR*0QC;xHUY5Xmj za#`4JohcQL=C0xLFk!xt@}W7AfyLm$`?#P>W4lSgh{)8)H|jfbw)E$odN+hI@TK@R z=iJm(@OW-GF5ABtbbPf+RvdJo$eL`o#U(b`{sM0OIgJ6%G}um3R_h;We9WSqjt>1I zHB}tCR4bx{0>%Q}i#Q~x3h8(mTF%P65)Fr5VbECd9is6bY2GS2J@Vb-EtI0`BjdXC z8?Gh-W3lvF$dFi^3z%SF)~=Pn_>MZX8L!VSv5AKvH@!<^pg;mE?GS)Yqt-61z42p$ z-l2`8cE}K%?XQ;#3q^lyYwD%ZP7D^j$Au8A2fCgzmZA~a=$Ue^A!u~w;=tifHcgXEK9zxvheWmhDeV=;u{B!$Aka01)4GE<$92|cwKm&W ze18K6=fvwHTnxq~#zKB^A)|kR1s-XfyG)kO3yDcXB_N6%&i6f_K)m~d%Q2K~#Vy6F zj&uW?_U4m^OWmUh0CzZuOKdtED-!KQ^$VyUP{fqw}y`oCtnKT|mWQZALc zYc5ZsuH|qX!}NM;r#y3|@^UN+ZgPUrMQoX@*g)^=;i~8{dR;NQa+4u~(Y!`H!$wQB zC7zti&Yx60TSVWV<~wZDzE&H}5S4Gi;Y>K9S(G(%jRd|#zw*OO>D{MDdh7!ic@XL9 z9U@_G4G^>oaGi8V``VVd+;DMja53Owzzzey%92HPX-sZgOEtSZsl<=NNhXWabxG#! zT_7cK&8KlO{~+Z$>JaGXSK*7{M#|!k1LCj-Qo?o zOlY*_F0rxyer}430l~$w8VD@6LIn$tC9$cpRvKFsk#OAm;(rL>ZxYTB1oRx^bU~Ve zC=r2>z35!75g|tWRjMZdzc4b7_k=mpD<=*yMz*VcZz^g;KXW65;RU{T-}aT}oLkF> zquGtW3(*rkUMPjL5v@&z`xy>_e@Y#%@~s|&_(yoHAntCiiAYM!>fDmdq`Jf=GkA4N z%>V|_#ug#g8mNaC)#6GQf(%p>QzVnya#x8Ao=sn_zZIAx+A4>_lM-ZQ7Gz5y+Cv{`nkSISN?!{z%kBEtdKi|=g~{wv@%paoe8y&-hF zvru26?cn)DTCqNVa~PMuFeWCUFFP+QElaY}6ctfP*5ZP@7>JJn4}@eulXwRgOTorW zs*MpP{Duiufe4|3ncmFqZmt){mmD~JAGa%WD3bY>`M}Qddls8560ND1>X1dM6NOI{ z4pZo-7Ko1>tY=uFdBEe4-$?nzSZUjxtC0y(`}PgugP=xQplKZ-xM78?_)m0wBc%E@ z#6EBEUB-vgAZ&U@M@4LwL^uqW55H;{QY^)IcoE$8OZs=MRQc`Iz_&|m za%&uQ`qn5na*b4MRv?~HUc>_|P($eSP!_e#SQvY>4M<}NX;X-QzEPUq9wmzwW(zsY zb^bw*I z8Q^+e3F~VjQ((2E;WG3XdqSkb9Ofomc`#lHQ8a8my(2a|7o4f#M{(}9JMi{GsX0nH z-rQ|tWXmikOC*T<=3*c<47@DTeUJ zuVH5UIt$}0V4yYG547?t>sH{QBq}`30>ycYO{1dI_BsLXW+L~asDe4sLLuU1MN)Jf z@V3r68>H!(Zc=1HY~*gcdiX{&QJg^1<0S|hV%~*XoClJ`4#c&5G#TG0?wi_DKYw6)D z{Jyqas^8v0WJNCac#c%;TqxcTqT7ZA(1j>wZ#Tyy3LNc~;uA25j#;?PaP4_!fjM7o@SxACh0GoxT zfYc9)XL4;-RyO0N%fP|ir9dsg*c9b%fY3}^C2RNGR(@TcyREctnU!?F8q$q*F<`=g zio@ngzpZg?f5ANQJ&P`(u2eh=5b-?B&5!J$J#aCxgoedvfm+q9lg1rc$SV0e1GgT} zDWR##fEAai$tgdsm9=nLmR^#}q6_6}X9%pUjgC*}`%J09BFU&uu1rrsffU^0kO$gg zu-Jx!!BOe~`NDmI-$)@IZV6YHFY#_E<5D#iH7mh+{4-#*ul3SCh1wyc| zF08|)<}Isyh%uk#+n%p!P@X%skeY<9yhe9(BVxLf!kY_zkEaMH!e&){*5X#ly7^_& z?R@&Y69i{h3_=j~X7(GV(AjS|q`OzHcyLak{fXV~mrB8Ip8Usvo}2JU&%IfI#-no? z&*G+5Fu}mYRK?^H;(GF{B9v~ygqHtE{|=U)vjL4y;Sf-7EKDeH#qsuA%A)@z1l)l@ zh?rD2p=PJ8fj&N{oLOrG4_IhDk8Y|K-&AqknV9n>`#cpVkbR}{C-U`5CNFLNoHqZK zJUm7Q=?DN&1ruzy3T{y3I{$Auv{kT?R7-Q4hdKK=er%GU2Gz4l-y zz6V5yJy805m!*OSHflfOVPMy-rS^wZ^5;q(@aYB@18o?XD+3S%c@NTEFcWQuw%uxQ zbz<=`K8L{YPx_EHv@K`wX*mnkWyDZc_L)v>)a(sWA6ysUD@oC8=C-fYIB1 zq8pSJusK7dI^nZPaf(h`@@yG6u22f_Z{}g6U?BVT)bhO$Q3bx?7hHP3GNkdV?`6@~ z`&n3fL4Arxh;L_z{JzW=He`axEi-|6RaY;8-L{mP1(~U#V@n0UZs>v-@XnHfyK=MR z(Qq+`u|Py|6FLcng~ND(oq8cK$1piZnejw?HGdCrNz_B>Bn)bP+E*(6j{}T1nd{4i zX9&r6;9;<-E#I)9ZWex@8WL?Shwy%lHQMIk_)PxNrk7~bfyvf^k;ZZ0cv7Z{#|a?aDw`XZhZyj1DtZSF;W1Y{M=>!fqWb`W zVKEIn!Xd_~q%(QESMl4s4KBD7d;^`tJx@x_t5p_vb+o~z_M~EsdE(jNWzOiZg04xe zl+5H}7yL5#I4@i}#I{w3rE#TF5qxLYJBU}-walOFhP(O))_+M88;HWywC|~OwsDr! zJiDd%mN33Hpj=&b58~$*ea>chm8NVW75CDvlM`=;I?MFvhJioNA$(I5lf^PhDjQ)l z;DC&o5Nxazy)&1~8muXQx${Fo*&~&EAyPb$v2p!wmj!EX)^bc`Tam!r_feSJ|nQqrUVCr1)7D ze)ch$3v4`=!U27WCE^SS(HB_He8BK~Fw?aaepxz2fyT)Sm6k+#Q! zafwYl4CP@ucVhG{ksimgyE>i^yd#4><~uVv*r0jtX_gg_d8OyJ?9l$mR(!0vyA1sA z5bEE)QmP+85Wm8lmdWRkRg~}Z8u9PUN|kYCdlG%|BDVU0g`@T0BWAx_Z6S+UV7MJi zD))mzzoO6JZU5m?&89l@LB&HXYU-0L$zV)Jx3qPVnH<5@z@$Km*zvREqVHQJt2fBj zxe)Y!+dzb0Fn=${#-~3KgBjy<2+-%aW@5wU82~}zz_T#lV&^2)FGpCg*AS`uEd5Q% zeu>KJyP=a{Ku@}(r2SWkl=&eZh*xwsXCBl+W&R6rGUM?SJDk1ecLmbxHMpKP%!GzT z;&T+H$37|5o8#x|&WdHhnz#72MMF`u(>_Bf!`?n&Vtgz^gKHJ zoax_sS&it^NI~T28I7K0?{u~g2dT69cEQDxMYX7>l(hlol4EUVPO9Xfw^g=7#nlO;!w`a6tYAAj9uT0EC}0$VOb>JLA0_5FxztQ@5!nR z=w!lm#rs_+_4hF`FHy78c(5IruI-{eB#NQXjF;-R=YUx`yi+(n)4wXlV0}DySIN9O0Vh`5el42Ojy_?@_)5bJqZ2BmFR9ZfyjKMq8=9V z7V(K1o*Az)*vG)_AS?F26H`hSBDRvB{Sdzw(INdk3)S0J+0w2@!T?O5R%GL>~&*oy;#Ajy+~e|1Fcr6A!jQ)Fk6?G3OGSjEtYm6k5gNh0B;D;9}k@l#=NjoI918BDff>8S3#V zyUW8lOA!=&|Fzp>(1QiA-sEqMalQe9D+C&f<9c;SdKb7@194x*M;bsm?SDBeEhxxZs2@O_fA7{F*TqxfqHo5Q>;*r**(Dtb{iu?go-@ ztlmnQ&NiaI(s4@gcuS=;qeNHR-b=c@OdgKBmC!JT_UkHyM!$$lY;t&9;#IjE z1@4Vy(&GoX7pL)uBrOc9s8IR$@Z5Sc%fiFW~cey0;Y)J<# zCo?`5{f}9F3b>nVA(W*h;(1!nF<8N5Bko)A-w56@IW{(WnKMD!1`gpx5gYFj9ox2b zkJ2V2r%8trHYBGfoy#G^TB(0gxn~7Il(wS-XuFeGuoqn;HNy>Q{wIA5toe_CUx(%m5}>SE!@rHa1En25Z@#w!}U_mO;q z)tgAaq1q;|G-J9N5jI^Q1AiM4lff6gLk7b9W{Oq(KApG?>9#zdXnE zV}|3m=^EN|R2w%E+d2m96b9@B^MF-@0rWU`0=-LH4r2*3?Ai~+|KOQG__<{b8 z32ZhZT*f3*E4V#a`;kl@Cs@w8To?8E%(pG=+UfbFUrO&E7Bu?b@$i@%8y8FVtU5J_BN zJ}juBrGDbOo)3u|mC&hwW?Lfz`|s;y^~CV$_AGWUEmdPhcMZhmdMyk*+=j^TqO=s8 z=0a%aDkX~HVs(L8CjLs47AwMWnfz6e-ez26M;lTQIU27(7&-Bto}icO!JU7urLC0Z z9PV7Xw7n&(XxS?znCyFEI7Y=aa{wa=1I}I+AcCFG@7&2!_f=J-89#?}%eSVC{UBDj zWiqyP+T69b%`N$ZR%cyFM-z=Vm)JyOqyyg*B{EE+A934%&2XvvH_byL77FvE@-P&! zw=+>T497ueUT=`Vg`=cKCt9ZoQxFLb6yHolGC}JvaB>IYLnSz^_{fFMVtT7>9BFO~ z9K&UB47Zr$GWm?Y?h(esNy*otoI?E|IE0$y^IGOWY3SqdqGtp2S-Xx1T{~e3m`Egs z0He??;h!$(doGbIVVHWx%L4vp6TybfJ9CAsnTbEMY6Z|M2MpbsIcVKd?g2-G|ixdJv^C(3J z7gja%B>&4~dX~H`gvR)Z#KTekW3%762#un299I|6zq8r3vL4WD@%$fL#FI*d^gUMs z-(-4hTf7&%nl^91v86}HAY#=`NeaPJU`SC4M9Q|R09@!>nu<7Wm@QNS3F-<6Li1q? zghop*A>SzUpU3fcqnj z;B?VWU;fFXDrWO~j{qrVa_$N)0qPwS^L#o}AuBdRhz?j8+rzWXs15(|I_x)T2>o8gwv_UXeM#0S5Sm}Gkn6O* z&a8WFAM1+I>~WK}ls7opyb*rB7{r%QsHJ6DxH1Oz|FCL0FWN_vx^2PZ@B@^wdl(i{ zGqqWo&QO0;ZE>CZNe>8immV|QI6CdGu>`!Gpl09M?mfDIZE^YkS#&#e;aMxIHt8rG z8Y0%aSVU7Ka827qkH6^tB>Ju*Dvy+K_mMn@*z<@)^C_HK$4!)qzhQuJelq#e4XY9c z+CL3^uBL2X=r44Qf?ZvV$DaezuxBz?qT7|#S>jz{li4rnnJWWN^h)7PAiCVCDhBhf z+$D4M5UIHrxS+VK1>OUze-1ll%JB5mI34`W;<*YZnRi9Owq)ttKXb&V_roKeF}oxQ zUifwoBFD?eW!F=5tDYkIEAUbO0rz=sbbKb>=Summ1yb-{DrGaiR3DZ$uL79yBQBPD z^|_a5d!}$!tO8T_FbAJ#zK(-a>4SOvnQJzdaFz33VQ=8(l&<^?v35fyFu)Cd+aogj zq(BP)aF#c3ev zZ;|cuF!Md{OkZL*j;5$6kywUkkbSSYYxLO9Fkhc%$yY?xRq7NAN33EaArFD49x2y43Fj}g= z?(mxPq(bJT8`_Hjy$+a3a&Pqc3afJ2ZO+EC#wqGrBRXBZ0ALaG6g%^<s5GAP9{^88RpaTfiMCC!?1^79AOg z=!Ksr7A8I184B3u-}{TyUNlAo5wIJgF`zn+jb(#gjZ)mJL;_`aOepFmMSlTq*JxP# zcY_0m@;-~jb8s2|;oE#?51U#F3-!4iBK*5uZrH2ZMrqgu7bf4@!L&C%8 zbKnf0_e$VJHcOwAmK1lWx5<)WmX+9eeeoU6u6IY~z|IuQ;ZJMjdsKU}{z`v7UoOkX zZ!aq?MY&mTV2(AwB{oTZ5Iw!$OS*I3C8W5Imt6_R*HU}&fp`gyhE=hdSBjs8D+=!% zm$L&r1GCe~(Xd_QokOer$^%lgDLC1bMQ3C1ydet^?nJx;`Q^{Uz3x(!0es9wvHv#2 z=UqdZKH+>sU3=?Lh4@6Qs@So!Z?F!0@9&q%+Fin~Q>>vL{LbP6ESs`(&-G z-mjz1O*vyXKT`RgZb%dZh+s}d%%#d*$Ag(#9)ocyzLw6woY_)It5*Rqi5_54?H+;; zOskjXqcX9>*79>L=ptBw#zui#eeRVDZB-&bJ5;YWgXb9mX`ELjOTJ?l z(40*B)Vo1nDTNcT_c9ET`)UXb9yOC>m7A(d}b~zt7;`e@*>I0#OD--XD3+WP@1jc~M_e-T4 zmSCR!5&(zL1ULuMaH8HX5)EI4()MWa{RJYUP9$4`aX;>6{%>5HTbgh}r}>>G6@#Fv zV?yE*VW;oilEIe=4kcqagXlC-xSm~6OEdkeAr5ZTvy8s{%5yz&e18w?ks2J*OYs&% zM8}bSy1v^5@>VxWVDFJqJI@X#)p$f{zX$L4rJ6s{eDvW9`uTXX)K9V+`}1VLNc5J4 zg;J~{HR!9J%s03Ls9-66$$kN0OKIm~IEKY|i2j9ogE%zT|AqA2phS9)E0DlA<|D+X z)tJT%ma%8r{l?c)eVCP`B4d?yl7blmiJEc!{NpL#Zs^*lXDLU>z?#^P^al!hC0Gnz{QCi zg3`7ui z#u3Eb_znW|Jz~!9e?rGL9``Yfv)^E#vlmi?Jt!066FM#sul!WfA&f6C6#tRzID?yt zRLbbed0b*+^Yuf7p^xfj4z>Y_h-Na>F}W~j4LXaANYfYeGouTn_<1J7M*7ac<=TMy zL)ig-q3@j`Xdf{FKd=Mu22UFLbwv6C+|8YgiE7`du z`m>a|@hBXNKjd;LUEFNYJ&_+GGe00)Pa^@Y5Re7%l0_bA-iluckY^|Qn-=bwemwJ@ z_>a@`UCuprQ|&Oow{Sx=2C(@bk2A%GK~8yen$f`JAu z|2YFFzV2{U*0to7b)LWASw|XRY4ObT6vVB_1yQxt6?WYsTuG+xGeoJAR~ETVW!*t!qq;AxQ*AdWJ2<9-(mJkoxPWkj91^nzIY z2E>`ypB5<|A?4_OynExpdLPDv zazQN|dtTr$!A2UoGrSbToxdiS!39eI16`Wfw1~~PoXK-e0=No-pr4txPU;V_b5EgO zpK->izPd(f1{UdBOhNbOkl^%0>&P~o9!9)gDi3BidolgMOxX=NgaOXAG+lqK)IFYU z9fBIsluGaY86#YyY(RiI1J`3T!_gqIsCh7lzImr;N|z-z(`De`Qt?h^m$q%9Uy=zm z`D^i?e4vFl04+m(NFj=Q1El^YRHiX+jWl!0xbFMVdv5>#F{jMe=in-qJQvW?O>?`o zUA}d5F_pgMJMmA-1ykHNIHbFwR2+ydPxW~F@O8T7+ZSSE*a-K?`M8>=@~!Rp(B3Xd zY&gA-c+2wyJF)c>{YY>$H*@;=O5j!Z5)(q#0ef+J7&EPG=21pEy5L z2ghO^FathCffYDoyf_}3)ypI#Mo?;eWDwf|{b;Zag?GL8`^FnMuw>4r{*0X6e| znfqg2Tr733j|;kn$!-so+vz6VPiOv}$KRmrwx+Fi_1fU&o*g@5pEgMMsl6EJ62wOR za+QhaMHW_07AF>v-~Q4d4F`;p#h=*aP**nGsQD1(2Dv0eXLllMQ+)^7b2F$25zbD{ zuSeu&3*AoP_nKiljHrPAgh#^*2e= zcqa#}^EhpsM(71Zq+9U*os4<^&B=bdjf%m*GKkNIh}%9O<%^ZFa(XuIV^N|rR(z*1e+XGyRGi1fX?HMD zTJRUB;wpjb;+Nj>5Ncu8M|^+87~(P-Tio$2aBIwM3c8kH zV*~VtJ(qnJ*_w&DWVa+k+v&rnt&MQsCV)lc;CRy|-FcAH1#nx;&HhxMjy;pwkf>WzTn$ ziu)O~C&zZzK88i^v*og4Unh}5G@%Jd^B;#v?Tz-OW+z22oGOtX=w*__LF%my?v8(g zC~(`Qe9P(bg%F8_jneepI$7)IZjzrVo1}IH4;PX7xFvNIrHrJGF%Y1!Joj!_scz?y zsN^uxvCy50*nAkg4X~1fnu%T697Z*R@RMcY-)9Gr72&w(U+z#p8!Lc+ggGVhQzLU^W&{bD6+ghSf1a{n+6hazk`+BWTA>)XG^} zH)IO~e1l&Z3k%p!;O<`*F#qN;F1}`5EM#0@ppaacDHTKcwt8Lq$mDW5K?fe0BsFJL z<*=2gQ|miJ8snmK1a+IveKD`FBf?)(;(>+pV+hT@PJ||O9kd7c6(JoBVuhS@(Dz2r zw7XI-c{@Gv-aA9Gb6vxPpT;lOBO9OB^SqV#za|&!OUH$(phx0?J7Z#wYm`6$uI7IM zDf}G^+`%2!3b&@@F<=B3DAoMI>3U{y&&;bcrS_epS}s#_Do=Hd>x=JR^8L;UE|D|{ z;H|@?nzLjbJ8lzcTUUbV6vSqh^xvddir?hJC1pwpq4{Vjgl1=lyTn6A2A;q^-r(F=0pBWGz@%s^n+}@umi#|UydzVFdX34;@MN)VI zYs60cO~xnPechyFZxyr9&pz$xtD84F7zm9q0aA<<~r1D!$tO= z@}Ag0Xhs!DF^(^jGoOUed!{=FSc&q(}1JLYqob z7)~k@d#_nM|3$j+-xgEv4&_>++pHooOy*96vY!Z=>mi`G;g9aojipz&0x7!>&h?1s zJnp+S1~}At5g${HGQ3!27P7%ygmfx^_XG8Qg*xM@Lh>D+e|)z6lW;7UgHIk(#r4&+1|v z?&lO1f(r5J1Ux1HvOrD092f7GK49_X`GiwJZ#PKO+t144Ppfi3hOHu#zT!I?0s|rU zK>v*|v3|O&N@|`}>nGd+?ard8-XALC_0dgl!vMFG#NGw(S9++vdbTZzW zA^rp6?Z7$;lc58e-e;+AM7%m#2x>WS`Wm~u*BWHq+oNR}hx&3Od?Do<;d%RfzTqjM z4TJF|-{)EumzQawab)nSgNg|(OjjqntIdrbX15i5`$n!K7;SYzewLIE!^olsV*|7F zz`C_k)KDso4U1*Ts&VZ!_R#NkmH0NnPJK%-Fot&Q!8deJ_6HUtc00x-j`4{l8%*9Ow&(ZRvtdLm9eq14Iciv8Z`N^)=Gi1PT zN~Gu`5K|t4F{UpO|Ja=!^g4EBWg>pARPM(ve-dT)vM=BG5;4V#cmjSAx%ijBG2e41 z;7@KlxJEt1cNm1Gkngg9=W95x^bEhjw(4CeJ*3wYEG`*6cab|-$yiH17$VhU!*~tX zGFQqkoKX;U5vH`4OjmQ;*5>8u9;>jI&SOAQg$RAKb~Y zeDmMNRvK8eT8!z@?=gH!N~QaP|h z0vk19c{VCEzxcHF0X^#&40_aLeHSwn_55#^_;+=ZAcQU_?8f;XQ5}+Fpb>8lmFn@1 zKyG4(VIZ%F4U5&GELKm^Xe0oM2Z+W>L0F937Q=llq%6B=nm$9xd7+t(d> z>YXRP$q<^u6o$#7H}>tvjeT!B^km{pS|@=gb5%rU#*`}ldx%%X zBu9q{2kl2B*?g|m-9S_Dd4@B_fAb9vH<#l+rxyc!gJ)TMVM26G4+T98T`0wHJh7MO z;cjU6Eu@amV;>+aBlvDx8{(~BE5_7qrRGP4Rx#a1(ssz0x|i~I_j0NEYcwIzjU*h- zv*EGAq3Lsmi{FuRVb1f!QI7+xYKOF5%iJwF6Vo$z_RR9g>7;3^mhnJi=NR~ z%pALbc{wRumi{siZ>0?v>oY`Y3Pf>teYOj5WUo->es<~M_;u2_u>}_$PIiAg5Cb_) zZ1j8u_Wb_=A4yrADgGcdlUgiMSnI4q==_F94}=CeCIN@rC03R~y06*uVq$Dm$mD#V{nq z3Hjzi@CK1skI1jjI0wIK5Su?y-XFCjr@(o#aqe}`P8jOAG&YpE8PsBht%FXax+bWu4a?HYY&G9awNp3u-$V}z__$>ox$M09&i;D!);b!tWqikY7J^hXf(fa zNYwF=5+!{fA+?V~;3DdSBRYet$y%m8MY3BGM@or*% zcb~I|ft(~ZEiP#O!=UYvENa+Q{lLXY2RrSGrgBCFZR86~nDtu9U&cK)hwYprXK9k{ zbC%$}$l5PNlro3q<$EwMe;Z16{J*rD6r2}^7%sK9GdJG}zw0jq=~NYy zhvR;hnumD~-ZR7IWutJs)o+um{DgL*UUd%Wl7>1%xTzh5ft({Y5SmR|e4Qoff<{GW z#+qhnI5dtKu$P-F9|OU|Lr69POt9Z^`0<;LLVCW}qdV3GQaMEr1x9WNj8|96@~xb# z_|$zotm=ELVH@T<9`e2YO#VyIl{_;A1C(%I5Ix_@JUW5*wGgCj;ebvFr-gnK7kN_t z?xws3BGs!+?=6pB_IRP7n-i$qGx~N`ZDz#MP=Y`ktAZ;thhHov*g*~382 z5gTO3&mn?28t=+1NKMV%z7=ZY1Q6NR1 zst0lE4Xz1#0D7rZZ;7tu8H@p5R;xj66N!rxZ5wm9E*drHgLZ5t1!p8&)LC1&Ji0E8jK-Q_Zh_JJO<27rudjgl?CIG z`Kp)%Q-q4=*rol8Jrl(RSH3ymDnBC1U)~Ri=%~FVj&JEedKrR!T zAZ|DVuon*#>=9-Bv__U6GugphSx-#2u*i^>&REJnf!S zAl*QrY+~*)1pe6{#lPbyS+vj`AN`DcvU{2IdKp6FXk8SY&wbAh2H1(=B(>x-EDR^9 zA&0gCg7elwss2Nm3>dLWYJOmo@niz6tHkUe632)_g7|Au0kFS1z%M!bvt>bDG%b;N*bdlPpvbht0x>Qlf$7*->@}&%$u) zNu?et+baAHD{~D-1pf=ayT7p*Kt53dp?Mg=$v;SqkgK!U+=i(0sPS<`9Y!mvaw7f%SsnSY5t)cnq(q-SX2jxWt~t<+4R{-4bGEuKeNa8DLsZd4{Q zz&f0pfinRAX3(il#WJv?c^<2o!%1X}TL%>bl!~Fch87E&z#>i*Z^^0XBrV;n%kPY_ z^JKtCj}+>qbc3V%#~mleiEuPXEaL|izqN2h@qzOeLMGb>P9vNQ`WfT5NGFuYY;2XcC9b|nR#b$=+IrxZ zP2_hD@DWLjwQCt`RS}U+F^dOtSEs;$)4-VR2^ zK%0o~6T|^6rjG{O^1!}W*}P$wr>n>7?Wdd?S3h*UqHwX6#pKXLafchAJqob&gJ4;T zKcmXd=~qOC+~p$Ev6`C0B08T7>8i1);`Y<}U%B;k1&wdrfmd<5exD#mK5)HaY3}Qh zl5+BG$lt~grA@Kq8wyd{EFXyybFQgL8n;cvPanoO10Xaz(&uf#&k*L|Q2MO;p9pa* z@2$b=&Yjl8I1lRf2yYaaX*&^n<5h@dgt#tFv2JpjF~Il9t-wGX!=cW1?FWw0tZX_i zFg|cM0)OCZny6!XP>QOMh3ApDcY3^``c}^px zrE%coX+4&ZVq3jfC9c<9x(_s@us@p-F46Ps-d2_|17gssS8a<=>tTOL151UJe}ZY-7WhZfO9a^wySWZ+o~Wb2;vFY8c2RV#EF7m1+>c4%<2~*gt(zs&}d~T_GF|8M@lJ&rHe* znp3hHYvQu5on7m*XiSCP!TJ9o=D96;l7crl8e{wxQlrY+gaW(Ef3Q%ZtI4Mjc)LmD zxkX#&)`s*?Gtq|{+^*M z)#F+Q+CP)_N1W@i1au=af`ObNHi&Wlz^?8tyI$(zw@zg1t;GM89gN1acs?IVu#7*$ zo?qpZr>mx^BTe;h7Tte{dV|63ritokG=Jy;vf@eWXzZ${_C8|?W$>N zgkeQ!nDEW~osbNnQCBqR9UjORTtZAAM65a@*9nc{b4#gNKs&40Fb!t|fVXAieu(7C zj>I+fH8CizNH?L;KIqM!82eyMAa5#nT_A;*q}n$wY?}jq9&*YL6}rqR-A&6J2C~lC zs15#D@l9oC6LgVb-POR`_)6QGG{&TL_*B;0u(x!dMQU$_&Jzp&2Hbz|`n zcPjX1hZ=prPED(te-EKahHxPM^ruU$&ayNFE$!vVqIU5MgwN8XacX6x5I2|1BI$POSh+TZ}3)6Hunutjk@tn<*#a_w}}1x^^?(!^VIlj{WIrllVPS>0HEQ-i;C`S`K zBzpw$t>;FfVWQvpe%w7&Gy`JOBRVa|-j_E?J?8Sx*H7eFn-gwM73X`75#Q+;dhqBS zkZ?u^KstS`aGryO z3U0)n(6H*g2ccmTVjomr0HL|n3;~;bmP=^N7}0y!J#-U$;L zU_E@l`}2U%tYfEpaAzen`jx9#Fm*$=L~1r;5&aWU^%~#45Lnf|{MpUDR@VI<`Nk5v zRxoalI9e(mRsVf_i^x^pX#o=;n!Qa74$$8wxn1SNwPy`Kj7 z2yQZ&{2d2t+>egt@s6u6H)>L=O%7j


SxqEnYjtJwK(pGv-5%EPAfZN~Q%PHm!} zCnAPd5qwtGLZ`NMwzLjMu)6EuyFScE#+b%Bu-l$LOKSgWof?YO-8wt<+srVW5T2k4 zLUUYKf4)vCZ-mh7VTA|uG!s(c0?VZA&sJQLRUKK3Tll^BTiq39u^m~fns5751NWan zaIic~_`hbbE za3#hiC?n5g6T8yBGoIh$Y5oq%N%~qgQtIBVk?Jj3aPLl(%i$wmibAwAmO)P?#8E%fes@!v!(oS7OH)nn#v?Vhp@EP z31+NR=z*=_CYZG=WbGeK?_K^8lg_Il#yeOc0>XTMDsEbo$a^G^wAjt64VjKJgv01= zu*HB%4weNSQU)He1!_qs6z>h{og*GxzND*Iu}=K^LTsP4Yco2|%ON<|+oiklsbQc) zh|Py0WenQeotnrZID4{G!{XZq^%Bb>dW{(Z^Txk!C%^n;j_bSMr1W2%={yP? z-nwhJmj`RThwQ#2WBnT3Fh#62cCc=IVi@QUVza(fPGAA7bZQb#1}~|KqjM>hZnsiS zS-2bsRNXJ)zd8Kg)qh{*KAA!(dj>p+jE(48X{J5$Ed=5jD5xQ0jv3llSR=YJk7J|zevta^$y&f~+S?iH)F zcyW}gjyng%gUf|VnH8tVX|!F_qNqfr1ky;z^!VKxeY z^`CdG)I4lmYS)X^3%6JhF1P79PAKc+;n*^36~+eOKBc^t1!g#s@#JnkQp2%29hz@f#rve`Y)A>B<%00S8%Hgm+cJA~$7 z=XMwma1X^?Cm57<=nEpRAi#|A8fAr@@^zs!8o>j**Uz?)+ULx1UHN&A zl;gHXcD2F5oDZbdR!m@%CmhXz8qcNSmpu7hmnZGa*=8OnvIc#|Y50{pk-4_Y+)#*l z-y$h|BXr@RIVJC(1Czvm8vSJtYZW|txa@Ad3(6|hjf;fkp zS+&Fork_+?$^ai|hQUt$D&7&#yWwSAir{X;_FRA(Ua%c&Oa)W;u;aoy&*)V0x_R;+ z1A6xllzSVd+Lw{N@ebh5nRn60ZWS4y-)~hID7`2C;~_XNS@*=_Wlej4&D{ys>29nO z1~Niy-Vo^lfw<18Vm|27U!}&8^O-OGDp>3ylyfNa-b=-wms4BvIc+oXUCaciwCV$v zHvb(ewXa*HB@*Y6f(sIX%Ox0aVQGn%OKr@jwy0Cj8u4>&bMw8C?e_F9l#(eehC7i- zdHkjuvs%oeS)bmq?uG~bvG-wA1V>q?yRmi{$Oy3+B<1Hvb0g6W;A8$}hqz|A-L7Nt z=wZ$aS987jy(>R|Aw4&s|2ASS1J8?woxwm{w1r#?8E`gv7o0p)O{iNO$P~M+yjRhmXDSGgiT6h{$b;>?9wcJ_H z)l1u-^;UG!28@)&-!7DzotcYwo4d&3yF14mzT^;OK61n6uCzX@K6#A<#?x2JL&B_c zI!~U#6Z$@_ncN3k45XFVpfDWBj@t16C(6d3W{dxywk;xIfeidDAMp?~4CQ``Pp>!4 zab5Yjn{?B&FWszg0Ir@JCW~fSr6m%_H4vDqo3C$3BvB_e7CSZ$Z2>b(*DXy8IyhL@AA<7xHta+` zHA_Q;sd!{+LT`C(ZR3%`4HN363)C(Qu=#r;AWb_!a7>~mRiJyWK2|E8~I>@ST zZe02?FkBYRUMl|ILU5wbr|i;4N~h0}N=Hxc>2FbHN<@74FyG4*cfR*s zot8puY}Ewxx2&5Ky@qujXOr9zje)cf8$AyLqEC%3-2BcWxU^3E_nG6OpY;Y#8ZzM^&9m(W7|FnTg;Md(3fN`5Afe z_HZ>@S>b@eziPFtwVeP~d1B*w><5RM_f|xV$6@NmC$@iXOh;e<{l=mv#lJVkKz}pW z$JnYYlHzxLE0VZqtD`>zh5^qEm8CV&@m=+ugO(0N-ScF7?{PuFd0=P2trE<^(%+IOcZUAu zLm#T}Gd4{87pr+RL}yL7&1?jUuvWW`L!eK87<955PRXxdwWw{7Rfp5+xppn5uLHY@ z_rGv+o7lm0rTBIco6zpjbFMSOj6l5@jy}-zTn*kX_KYqEivX9aX+etO8qN1V{E0-)H$OY8oZWe_bO@ z`y+v=Gxsg(*;gb*?;w@$Y<}FJ7}!)Q4q<}YxnlVSS^3B9!4#7o_duS0?I|Od@^X#!Zn@N+uG|*HYBTF) zCFarcx)~3Qw;^6BoH|!3jyA{V)AMMl`<`pH4m5Lb(e0<7gSZFizn{u_8%C#frT6v_ z8>~#^|KcGF1^=B+FnTWr7~+|A`qEk+E!nMX?ARu3RfaGfNkRSILQS}8UEGRH+f<4f%i#Knj@5WPJT z4V&z{eoU?@ID$JT{tj`OQ7C0gFhY2q-Ni+;?H$an)+AeIw&2;NA7Nj&(QY?Bp+mWm zx1rkCmrp64L-SQMlTTPqrLCi+X8u}Py#p%+($2P^;wG;Ye}vb8W7}f$=jmOk!9l6< zu2YNY;}&Jo>rK6m#R;ynrKLn{ii7S_A{}MMA|~)}Mv{qDj)YLo0+z?h?TR1K+muM? z_-|81dLj2c2*RRfl{5I5i?K+WOb5?pvF4CpNK{9iaqrTJUt7mPgg7wIIQtsyuCTSw z5uIX4(a7ky&hXBpF27DH9B|hedYQj{Q0c|h;y(zT<4q8qRtX3jLM$)6Pq0zECEqp+ zXX37BwHPPyRczP3&=>QJE|#*_b=}I}FPL1*lO{HA1h0JdC^8)bBj-UUfX$`iWCm4? zd=?GcB=XrR+QMa4;Y3YOBeMglg1CItM|?jcl6w)tazP7mVXcV`>^kD!r4ySgdoi1e zZ+B`hwQ2!4uHk~TFRap%iNhK&j_xg)(sB#7K;-z`YLVL-GX^GO%7yspO8lxF$J~sN zHn2EdqHb>TO^>s!)clCSz(wYMT0OHsZO6Lx>J*WlR_UEBE@@)3kyIQ568bnJ!uT@I zEs-7+(&t%~;gzPRt>QIK@5@2wax%o_DO{K@DTdu<2XrmOWek6XcKO2bh|j){cb85W zHdlTbkhWfPJra16c0zO77tp_TT4KXEJ7Svj?cF{Mxw$iqf#Fi~WI!79Qnnu>>+ZY0F(Yp+XxLiG7DyQLZs0ME6Ve%dYOdNt|E+zEL$G?e;4Y4V?9Ant(#$f-H z*hB}#&`TRhl&PEU&1u<(yS_kk+LF|2Ep)q3v3Hn0F# zdL4imJWD1+&)zH$-=85e^U=G^0e&I_ZX4?6C$#_}tNu>>kEE76JGu1gXGDXxgusx7 zccWW|#oaZvV&8% z4Ip+ut8^OE4-V#k#ZoYr@pL6bCPDFzUT9=YrS3oR_mNfE$b@gE(t4}3T#dtN;W)dt z>4kE`WofNBJ#-hk=UO~NC;VMo6tI=%aysGj2rlv3rxW(8Q+DZ0lXC`e-Mpd`(MuQM!+F9sqMn);TaZ z{_O;TpCDZ1d(hij13A>cd$N_bIX z5kDbbzXIZUN?Bk#xWQ6!XncYdn;QbNlEL;Mmon`HffNMX|&md9pIrpkw6IjO*UWd5t^DHcHFV4A+w4*@@5gRofWidM_UOi3H zFv$sqo%ipZ(qq$%FI7zTgZRsF;&%q#V?U0C3B}+!b`RP(G&&sx-2>@%eG^?rCf)}~ z`PL9-OK~HUuJH${o1yjMqmB#Dq8*}oN)l~TbC)7EGIp*E+&@v=xNy0H0p!UmSpyD3 z|M6%3m;#}|obSOrc@~4BxaV5Jd9wA(I(D&2W)iKR#rfuZAf!q3Cq8ITDdi(Oo8^JT zYKTFE9>HAlm*U%;MR7NCoXzt(2GgVKxB~+{%xD#pjTc#M1KS$SphFiF85ofb%Vq8D zk+J#lW=O$Sb}fY^|3-}+^spNP@KF}*!bL!;lQTt0AsQRcSn7s*IiJV7%>=m6DBr%vM%Ki5PWTMy;Q_(OmcrjxGfB0 zAao&H({3y_HQ_k@Weqs<7^(aZ3rEhnfYGx==$x-Ix7+DiIX8}fJe|vZa%#UhEiSQ% z=iWYCSfeyf4nIf#RyYT##PD;*JmEjDZv5xZC-VvHKQ zv0;flw#0OeF>2NrHPL9I(HK8VEV1|4D`Lfpq9E7+-3B7kVcWd#e|C4+H*a2@vO6>T z?vG{OyZ797-o5wSd)l1>tL!lDEZ>Ad^p~Av`L`Az&(|U;RxP%YWWd<9$#MA+1HO{9 z>f$Yj9XT7rScq$6ay-ubi+~ZVkA> z64 z#EueT3jnJ@t87DjA)PW>7JTGhvUe&*q7(QKqpXmyn*M2x%a?a{WoX$%`a89ko%zwE zJE|;0{2lY!lsVWYuHd5-hw3G~;5%C29prewiVf>gu;Bm1X@Q|150UlZc)U7GDt3>@ zC=_iye2XLLNUqfBO#EsswA$>(&@2v0~Y_2zL5G2 zQ31S|QxHTaGoehPC)ku!_o~!CWD}nkQCuIpxm!aaG;7tI?chic!n5|xacQI-tDpGWmxjubn zmvhVMF7`fF_*{F34X|lCZPH2RH$Sj_{lXR=Tx1xxCi=F0D7^j`R*xV!_q0l@Dh@Q5 zdRERDzZZ8!KR3nY%;Q+8z1GSGbZZ=I!;dgz-Ny|kSJPaHO|cYlN_7tu@X{m^)RpZ+ zXPW7FYY&%7>_$?dHYS#Z)o5EO;_7uemAbk!4{S+C7MY)RfNbAFe-s3I?a4-Q$@n=0 zTZXgz;5CHFeGI7L0N$2S95CMRYHcK1EON~(sme*C-bGS(3T>U))n4}}jFYdb$zc_5 zr`9_X8_u0YF_Hb9GREUM2Kv^ji08@FFMRi7Y|45@FE6r*&!?!ghz%X}O>E$E+i3CS z6RI-N*67u9WW)}cC@CzX=P{t>4j54G4u|6rb8GOH!Br)d|02$D-7~{?*Yj=|tRlxg zm`&nhpLLF%ER{d@37a!uj!3^ihVQ_UZC$UHwn@u#UNG56@APVLfVFJcdU?+l;k1!1 zvGHv?gWQ5mU#|~k7|}kEZ#P2r`mg4K0QgKRVXGaEP0JN#Fof)9qSbXLi$qRsvXEnc z0dbtniOi=~ZuG*MJUiVu5}R@<^5hFbiRUVk%pbG6p8HDK!PHf5u8;OK+$Z%Xl-Di8Mqn=3Gm#D?9qC$XUuFZEPM&@zG3mlzmO zch#g}uaj6>&%Lqx8kT15g9A(?&=v;80C@lYM5%<{grSfy0KvHh#esbo=xfvIi{2k)>A3ts43X%)*X>(*tY-Y%yb)cL(dUcp1->Y6QcImnhV!^#F zUQJKn@2Zbv#2$8VPH`RA9!Gv)O~**|HfvnIz2Dz{Q=c&?-F6R_5x#t$@tqO7g&~ds z2+m2U%3ctE4}S{-16xYtlr>`P44eJ4c#jYh>@ifTp2;;=FjJ-ebY!W|$J^=hJC+lg zual5V8Qqq|T9X_)JdH*ZgloxAdhKP)ChGp%E-fDSzVu(OTn3JZs%^vGaV!3CFlmf3 z$qMS~rFPRvE*fp|vf{sZ?mZrZt=r@8`pd56J-othZ;s2yXT1=Ldpc^m)9ct{CNyf+Y_w^qukeYThH zmd4|H`d3d}>q*rs9qXlQYnoc6X=nAh<_eVqiMITDSZEV%sgS9`EbjGILi4UvZorvz zTPEr@e9Ub@6a+@9OxZdg&85dK3qokL095}DvzE#B{0?heKE4-8>A+018)Ccx<#-Ys zzL8omhr}iU=@Hxr8#i-p#cKHI_a!8SFL{lDv9fIO{jzMI@$J6GA}45PSgUsGFMXe% zA+dejuC(vYj;n^+2eRgXK2rWBb98-|y32~C?0&dWJLXE;o!V;4MuQYgbjlEqhlTd3 zy8BSw_1RLbR=VE|km!6q-6II#E&@TSDP}`x;w9*5#F+nK!0VbH#|V*~xP^sgv_)ji z16?KO?-K$-Kg6afCT)Lk>m?u7ZGf7yuC=%}M~2T4t~-t18M?G7ypeDuE@JR3e-i7@ zg{(sCn^~>!%26u)U+8o^oD-c^-*2tbwlD2mYLYrn>78r@i@DzSV8xWpjM}vwryayPx09rE+iJ`j}xEEmOzHq6TwZzCM>okq5C+ z7EkP?ucYCc_(ZYmt07T6*?0!N-6~I@UJ3wiN*Lq!>jiba6a|IcZ3E9He{K`w7&_T* zl+GHqZLth^F_#DqMyj)7(xl2-OWZ$3bT8yLw*Zs0rn~3szSN|Kl}P_xq4P=cnQju) z`YZgG-5$K$$RXA|Xm8wWjmzJ6!_|o%hYoW@R z43EH4OLB|UYcWzl9fwVNFY?XrvvK>Tqg#=!k?A~@OIm7dq~E8F`?F@Qa6>)B^2czP z{sj#Cq}&ThM1@vP=#FM0*ZKSDCf#>Gb*fhb7Tf*JuzY~lWwPR7b6kEuM@pUBZ+!Bl z)SVYkr|+}l&(>e?w23v&{yma2LQ(tn^?WRIqLdT-U7twxX+7VXumzdMfGW~7b2D&! zKB)|6s8l{OC5P;Aw2dv&cAyV}b2?&;|I@WH(|Ag^JR4$T?38YyUin!F7cY>yo4w$y z>3@_;y-vWwt}~{la^ae_?A&=b9ElBbnls_&GOXi4{6>qZKG3Q zb95@V(LV5oMcm*7-(B%mV#9lM$Lv>b+%QSR^+;&fh7yS+@y)BYTe+>iGA9!sNQEoYOh*Z9kOE;D#AaRxq9|IOW z1k`Ecc@0-cr+O!YQgu@>jPGR4W2T6_e?$&rqmFL0wMlEzj+$NA47U!KNXclobWLN> zF7$#4(-SeEhM(i5`uTTdfaR2(2p7KP9|lxm2~D*Vxv1ois(=HB;5gW)|H!4DsAub# zwCqm%zIJOD<8=GM()aD4M)PiErjwRHY@p@X#$-1j%)f^ttD|7ssrJr0<1q;B3M`cG zs)yIEm_m4pAlBAZww&z>VyzcvH(^z#^5@bPL{qjDzK4JW6wic%?h0q3I^ z`(dkRvtJ3sCL%=%i+Oe`HdI?8aEMANrk2HUPjtN%#uGa!3$ZcUnk4!cyWXir z8o)_JC=N?mp2P+<>c%*H_!}4lo8Y7GE%j&-IA@q4=P@u=8t%dcR@|rBm7H*XluM0P zJDep__dPbBGJ)8Dvq!d(+RvO?55aZA=QyW)@py#Z z1$Huw_pEz<8JGRkbrKptH|3f@wtFy8*tMvfv`J#Su+g2zigTy>bj+n5qdL3{H(fts zEmQ+lM=po9S+(qnrT9J6qc5D&1DbXTCj}QC0TLyAYN4SnS??KpH=-aqy%ibA6vjtx03gS#kngzKc8kv#gEx7-w|&; z!oZ!BM;m{){^G&mOw@91PmADt)kRV_5Z4NhgK&lnAIa6oUs%fx-&4VW@geRQj@(-6 zK20Tem?w8J@SzMJRw~8+V{NQzlja@pMV50Mflp)vZpMo4U<0&)4@`c^!gMr=C5Mgf43Y8WBJZ{2SoodPP0~3Tm9m^tiH`SH zPhl%$)mF-V@-e4JvUWc-N2;f?!QDMxMmB%bnh;9u*%jAPNQqY?S=96NZ#D0Q>v#ULl3jO}fiIlz%vg<#K?lJ0dz%DHw$Kp8O z=WZyTC1-ziQ^P^-Fr9et*k5KsP3)P*8c!j>Jx%%QJyU=F*Q$OHH|4tSh!k(lBG0BN zR>?RW#Cp@Wqj$y%)|GE~SFNl$EU+`CO2dbz2Q(HBh^Z*^&w=Jf>E>&e@s z_=u0C_JSh=y7Dp?&f2_Fsg(TH|M-u+&?<7Vs#crJ$>eid#Kw*YI5iG~#O+5*!+TEY zZt;BFrx;`?2*iT{bF!(bQ2e3Ad!@f0%ZN!OQv4FT^5qRuS2@XtJyJ4$x=Lj6{X4A7 zUw6eMKau}M@VjSnN`A$#AT?lR{rz#$@QJpZe!WD$Kw*!CrN}ma!=iL6EueW}@WQ}M zS#JcyV|0odbkQ|#BL!F-| z#^m1{W2^x|?xZ-O>F7a^vj^FQM{mXm^m!K1P4f?{-PhuoQn?#4<43q+<7;G=S9flH z0udT*E&2K}p7$-5=(V`#|4KI;u!#=MbWg@g?X{e$Rx$DVXQJ#*j>e-Mgr>bG+muW3 zo2V>5thhdJ;Y;<-KSRo&T{dQnW;SK_74O-y3IQ!}Qyx#>3?X zyFf=DYyR8NbC7xpj?7^4Uc};rSSJi|4B-A}J8tWp z0&#d1B5?@ctT>*Q-mjtdV>dI8{<=UKo=&eU``lYf{p*uDH;48R{$giK#r7y!JxKpc z6LYA?is+$=8wl~c5T2iXB*TU$!uWgBi{aStU$#n?YymrxwWZ@~1*hD6hiWA9H4wZn z^!u=3I{7Ubu&xulpZJuC4O$PA{Zv4w+6-w$9ktC|#D#k1R>drr?7%nqdJZA(^ z5x-Cxrn6&NY}Z=`O~0e0V!T~iJdQ!`XsZ*4$=zSI@-~h~I|vQR!~jm#B*ygUTOTIx z#g$V1F^k55J=&5SsC*ipN;q{~&ti(eJ+eopngrYUGvt)V-XnEYcmg<`1@>78LN><% z!z}fn*BP)Gd6*IyAI^a}x*fRsCvuoNHZ9M05}r+*bJ(YBD=WT1i}$oPoO-f0cY3hOs}@b;Cy@#5M7_Q@nh-qhcZ!_8p;SFyrYA_`Xi|rO{Jsk3be+lz{?sJ;9aR|!pTN=E~&V?2~cN`f~ru?kfz&%H^N}UVIPXb zz$nc*sLP)YEiPx6TugyQ^6pfrzrd+&oOo6{rS*KgE*Qnr)fn^xS+}a^+tWQQP&=}6 zDVfTak+sXXSLFz7NDo^hEfZFYu?6?GY^uscuy&6oUO6I7ep}YcwIyuVb8&-V+l84@ z^?eA9PY#i75gLgsFmhV46x|7{Rte7n#?;gNUaB4s^aSaY6Xk=st+I^GyphBeaLS|c zto@UYlFBpOVC>RT;*XNar@+mKVF0B^#FWAesf531HssWf8U=f*i6bdBgR$mkP~722 zpXk!3qs++e{4%+nHKN!QOb2AIcpGy4myR}b!aZ4uvr}7fYPw20ox34YMEg!Hlk#I= ziy&}baphQ9X3GxE-9Gql*>tH!eHmRRSu=8!EUNd&`5x)_va!9C+iL*BKs>+EDwGi( z@KP+sc`UB)Oz?2Q?^RBBrE^5NB=dDehJmYnnalg?Nn(&xt(uKJO|ux6Z6)?>-06eRtgX01yU=)jZG`JXbgy-!`psPNa_rVrb$i1JVP`j( zOr+`9qMjd@N>mk8{#awf{!%#(C(1+NZUi|MjtT1ZJTS3&U;3{{CQo%*MHG9ON76%B zvf3_=@!m+7ZNGOsqpB>J5fOpc`wgy>w< zSNhLWHb~IPoMU~-SaaAc1#$l~2DdV*J@pQY>rXSOx|^{MQ4n4Tj?ZLyUS)y95^AkE zTB;s{H8j8qk7!~Bvf|%cjfW=$DlP+>w{eXP) zu>~l6?Zf~C!|`q(2AeC3Y^5}(WjxlM9|2->@gyI{t`TX=SC-Tu=GgiDW#Byc?$>e8 z)Rx_e-(0H;8Rm6xG!_MEm)_;V@^&5ap)b*(nx{5AdTK;YawP0|J>~ma!-CWq1eP(p zUl91r!AADu%Il{P@b!3_gUAg9H3&=FGM z$|Gh1a>4;PCfEP~KmbWZK~&D|z=HcXKZHhmL4Ur@?cVrr9UF+;kudVsr-UA^NuK{Bq03+NAjHhw3 zNsEOB<=-)0R@ZOahh5L%lN|H z=26KaIh0BJ+rDDVQCl5l3*Db&d7k-tbD>1|PEG2VmL2&vtMTkK~TaNbSZX2F(f zNw1A+zY>`Mjky?ub>ux&7%*1fJWDF~Is#|6>6M+B8`(&8VvN5xHczee_YrOo{W-^x z!&E&Sb?;wr>b;h5o4^iQ44^iR?MBsX7Nw-!69e=BGMt~3sWSrmhF1_!J0* z4-8;Ou?jtcGD&G(_jq5u8=y>v$X-m~y~az`!r5Znwi-WdZWrx&mzh5hjPuWFt^QrF zt*$}KYF`;LVz88rz=#?3Ut<_2p#CIrKUbuj1+(_fM4vEr9mVLneEWn+E1I zY-$Eb#X-dqIf1dkTJ0`dr+=lKmj6Lp_BOYN6XgF!O7(Z7uVao+e2)2+?Zop@p8vqH z>tPlnPa7K$GX>d-U&3VD0CpA}l_TT35{=kAE&@Y4VP#ib_+*Cxh|L8|m`m)^R!Qr| z+eqU)yENjsq^`1oO~r7B^d5{hj3zP0*x3rWH40oPZ(bLjUJwj*cr}Fu^`?1J|A?Lh zdO@eN(na{m1BOWD1PIh#%!mD0D7S8)8>VJ+A@+Uq05Zi7$VB){lXZcoND$)^P+G#%|V>U*-? z-CK8r<4$Bx_1Zg9s%mkdc$FQSqe~P^eBuiv1d?yH@(KhJ`h;|>sVNUI5TC)B^4!j=bl&>Q`@|=5m&!p1s z2UDv#Ixn?+`JD@*gKQ`AYZj83obFyn_He51*7LhpJ=gj%vVdz+>VJ>SaD8MXr_d5? zOp4x8cSG;WI)j3=jD>t|5O_9@(}TfxL95@uHMswwj8D2^6Q5u?{=lkm(t`G5B9!^f z!1{AdVEMUkVpHznU(3{#)ib63ZPR<;$EXx-WzEEnJ0vz%q`17Z8@k0l+1M59W z)r)R72MWq}&0uJH?>E}XK>mvs0*b083rNFOmyFxP|M zw0XQQF~@a1N9O941zqp{o_rG<08u{h4p9B~j!fe^Q>_7QkR)QmaWEt{-soQo@s-p) zz{|3=5o;eJ(TJ2fo^I61DJ-k^)`I821MT@}TWOe|N>d=ZX$y2ysW%t%sO$=&b8^T1 zC{>?8c+S|r4x!xuwSDR|aeJAysqg{3nuK%A?C!>Da-6=~8vLW9mU5&V%Wcjz; z`GhvA@wHtW85@%=@k~2=ZW0M8DQfv9HZ;T+e-d?5lp@j#G%AVMu!~U5lQ8(iK!<$G zxYrJxfpxT9yeaN+({qnMLNL-`?L~Y2$;R#jG6uGQ@cdP8#zYzCab!D(KNau_X1j8G zTigUTbYrCUTsFg#xMB2n7)2iI2M27*pYQ{xFnp_xE|!X8{K7lIsh>(T>L$ZtN4}pF zHen2?7M?lGGrx2_$3E(M_W$II*ytc+!dn|+TML)+q)lv~sH3cfqeMT~8W+C1V}PB^ zjdl?5Pj;RzBQ~;2(>VOkJ>rzsGx4$KQ>~Z2gePJbL9Z4$VLId|_5aZ|g(<`Ru^Y31)%$z3VxWJz}%5RBXutWf9nSRc(I4 zs4y*J119=VR{~P$bjxMZXnXuK*Bw45Vt}2@`wZwe6JzyFd9+22J1`Z7qQUZ6a=xC3 zF=9)lKIJj>nlygK_-XEmmgifYC={&A?(=7zMvb#{QSD84?U=up8q2OHRiCrS|C9Oh z1OC+hW&{5AB<}KJe6Fdtf%{GCB65b+qowxe=5m~#^_0N|{ywL4YdrN19VC5U`ar5S zx26Ss|4i!tM4mcpJ_zMRa~z5T-CUxW1@@-Nl-11n{2FS3@ z_^=5lE{M<9$|JSHhVR}Oz$V~&J1D-@3B-e1`@jbPVHgP^aMr$6*ZtVvT#P~OR4YMN z56XVp{JGSgrXB(Oz;?oH$_IG=6RG$;yRU%(@+E*Ch+#AuKuT~D0?H0dwXVRmBWt?e z);={oBK2RaeGBlAu&6ubW3u`pHsp8Ec5o^=Ii3NYM7cpsc&bT8m9LnY(g4wjB^`&E zESsT~w5wAgEz-iKEhoZT>9&H?-OO~$=)L*dFL>KmZFefAx>2>+_{+Epmt4WGvJsgZ zzwzIm$b@n!e;wA@CjRquKz=W*fkHH+Qf5i0LkD0pG>bv6s)PlNUc0nNMwJMr#6mC^Or%mKl`ZU!qumVPoYSv+U|r>jf9p z{U!Qjs_d=pW%(jbIW<`QSFe$lof-G%pt1HJ#_lt0mT$fma>Kvt{6I6m^!lINcqUOW)(L-Lm!a+AOR83(x*iPPS~I zdM2=AOPd8|$cpz-EMjP>^U5$}8Uy(G+8o6Gk(KF>5T18gc<)&)Ywk-|ew*obI+gc0 zjyNGS|DoNU>!DK_8V?S(94u9w7V3_4gY!s>VNeOp68+0+pq{zNd8&azw#}0TX$xZc;2Xv}7XO8ke-nQWe^bLin>7DjzmYXJ z1V;&w?INs;1*h8Qt$DifeKq&hTlmVsYac7i7SqL)gqp!?>)kmZJh(=QT*W3uRfS(i zW%$=AvX{CIk(;4a#ymhVDgD6ubp$f4Q`0Z0drDw!ea2i9J2De1Fj{F&)90wf=2+wW zzIzfIj#a)`XbiCV9ehB)RVTw?6jH*1SYb_JAcVxmij;uw>XIE++I`8{BBPlc-%pP3 zh~c{r-}&Lz>%v4NQ7&dt6qoI_FFs_Y-$JH zQ*kq7_(%xkFEd#`N=}!|kU{HYvdnCi1&i)QbJ^@V&FioE888D)qXnOv;e$MT9vhkI zHf*irDpX4$IW}0#1Q}k<;7lTbhBPk0dBc|BiFyZ^cm>DP3=%_q#$S~dm?P}yDHB5lnq zc&%6qlp=TQQ!J${e62%SRJrFxZP=pNix?Zjli!SQ-#7bxQE$|;ixQuxlwAqyM(wyB zWtaRI#*pe7DMK5I;635m@IRBQEO<`m9Q1WWsr18ea_Py=P`Q!vu{2B@CsnJu#(`lH znM5`*w~~+a^`ta>x_J^C5Vw)JLYL=HUDA9d!Lloj{liftU(Wxn;Gi zct&M$X%-!QUnoTYD+t1Nto<9aAt5R|fr7V$@T1{cbOD5?f&M%VJLl>Bv_c8wDOu!vMRp@i7ov>r{^Nbn@1g3{oVHM@?uT!oes9s& zDGm zg8;?&0#%iN`f{u<;M9Qz!=rpkHF{GoBKtWZ?lVla^x-**XK)lf5(Bj0^Te27Z?44V zHS|TH&L$q7bK%^BqPwFsL#*0b00Uy>1EH%LD_YWeZ<5xFD4@ww2%4*9=UC#_zQX3X ze(g&)J+w;H);CkC)w5{lBeD7UXlb~dKC>qDOz5<9e-4(=@38~fsYzC>hg|4zTp`_R z=ba)`t>n~G%KEbhgy%UDtQnj68c z5=E`Z4(2$0=A=8%8W-f=|R)51AC5Ld5Hz}Qo3dM%@0xAHYx*N z&egkv*fe}??hAZHPM9*$9H-A5DCSw?g1oyD8)#Y5;_p!S>JD25hg9Fu#&9br3*Z`S zT=b9hoXJXHLp357%nNS#^s0n?Z86)gI1AW@Vy=f%*riTo0GXUu2{-m%jD^7ZQ*G? z7}AtLY$|2gcuqD+tUqk^=BHDYE3rXvlvJTzWgvQiU3~Dkhz)TwLShpfk)AoWl@;G0 z@Vhh_Fmk2}h?C<*zLfg^5?^PRmYujrOXkaMUlcN5Y=%o@VSQM?QHG1ndU0Dz-KVhq zF5~Lt9k|LH^=fsyV!WT>iZ?Bh$W(QaQ^PJiXxXJAa0Y`1-2lKg+#RC&8&3Mq=p8nk z?p_!xwbzc4x`SF}Stb4by$fq`6MR2T<3`UzgV&zD9Gp~9!0I~jqd>HWO>N8cL~9f8 zJIa;Vs9wIGl9(8ce&D=ejG=Wr2W{r zF_|<^w*+>O87DS|v_94?jZd=@cFcJwUrOw>YPuok`5O!Ic55Uyim`*E$yiz%O; z((p^J)iVaJMWj;+%P{aNy^l2qb8)^kF6g@lu}NlY*;N2RNc&`SKSyN!5Z3Z(U{*9= zto8K9cO@Q7{v@^;n~bxaT#bJ#^*1t+(%Rwp#Wfl+uH(~hIcB)gvfv{YjoIe7Tzuv@ z|4J^lE7dlQm-;y@#5kI)ABBId?=gmMrmVEJOj$E_#Y4e6oECNu(6%Juw|8EHyOdl1 z;|(XWo5=DRQhOkC%K^S!E&A7+NcDTjNIUIY{FIzc7&A-VpbLqCiI~7@OHB3m<~o@n zBEC*GRF$rA87}RI+OlpKlzAl3^@sFaf@1Qs(>I>Cw5iWvDgTotuKiUf5|ij!yR=Nl zvAc)kk@*0zVQ-VuVM(X`OHMa3#m1>i{nyZH`w1%56>ZXb0=wpy)06K+a28zjJZ!aN zyxOEJVhB_c2gNHN>(G2;Q@%IVM`A3=+&Y=L0++y1ReWr@6u-eW+&8={0|K|ntIfgE z<1A}6<~7^kb(8`vXb4tbGPEnP0o)e?n7nCQ{Tfp2pc>f4OH6Kl7?TMMtmZ0U%1Mxo zF(HqiCBwJ0D}SY|{yUQ|V+NOYb!Z&I;H(fE-1gk-RA(;4gWCkwW-hcT8*NaP>M_!I z2QrXJsN-)6o9!$KREqk#LMVjdrjI>219>gZr->v$18Xr1sV#6+WV7$DM|MBmcta3CtiG+DPry8_VV3QG;#j6HX=(8?546pY}9QJ3;NqxYIY`yU6hGJ z@~I3p99BD?52bE>7QRzB?R*xz{So-Ck!!iR@+_ykjH$j(X>Re1mu9zgx3sZR_foU8 zj%QQ;v|pGbaNp0ohJzkwWLOA_-CrSQ1_N}r7}3Pw#(zb^*g$1 z%C#redg`5_ll^q-MN&5!*Gk7TCcg@XiFP*7bu8FN+vVf*lyHK#gRzTa$D7tzR2CeH z68kA3rF^u%oI+$h6dx|{BmKVCE2=+{;p3bjGZ_z_T{m{9v!~6;a3wY%*w@Jh9^dkG zI}T;FB{q~D5}S09>m&bu?aBgeXIOdXe3xtPhecs6Vv>k1h<~-lXYw7*kaaRyp1G`6 zS#b~Va1e7bWXoSyF80;FO%I?L#0`{pHb|Y??VQg^XO%sqU91Y=qqokGVLMw>n`Cvp zHO|jFo8AqMP|j|e0k+z$t7Y|A7Wc>9iejVJpT+rSQ7M^$=GwHGQvGvvj_H;s%{12& zAL~HcNgfMgm21ZNcRKEGY&2=&8x7boDe`$`3KYt;0 z*TY_z8-LIAFLh^)*mM{n=d7YYpG@O2U&~`tvqs(ZV7BcP76@KBdA_VsCGFJP{dZ=@;O+8JF(-whW^xZ6Y8t{ozp2#fdyRJ`HV^<`d`dok+6}>)-Il0u zYH~f^xgjx33yuKo+WtKO6?y`ny<$d&ii{v^)gO9*c?^fg3Ri- zJRhX)abAO%#Pitw_LbqDTF3Mou6BCCbwG1}I29!8TwAN;h&2)Z>MPdxAn#o2bpapq z5^Emi)v^QdMf9wB!}ruNfEp&dC(%>R+5j8l5O$Z?jh9Ku>x>oG@sya$`;CeB zG4>c)6_7G9Cu7mA@|&CrtE6?~ZKQEtPBkir8lw(l@#FW9?)1Hy#TxP!YsdsM9P7=C ztT%g_A!c<<&~Nx#$mV_# z9I0jCh7(m}DBO$RxRtj?S|+eLd&>>RD-FVEA3B6#-h@VTqfVFs!%KT@2$0Y>)#IUk z&%#(g7{Hayg?!oRiLlXsS$W`^zLhL|lfisfB5e9^Op5CCOK~sZeyM+uNjTpfs{2#9 zwbpYH?fzEfPK_*l3mago@$Jwha1Sc;uzDE4spRS8O;HXJ8OBEJNxf@Fwcct{@XzMd zVl_j+sp$!{4{f~It;{RmCmp9UHJ-UN1Uzsfs}k7AWETFP@q4~%zIhQE&Ch*h-|BBV z!GqZND>^2vX%ibaD!!fIt}6`PiUF0!tdhu4OvtsoWCm9ot` z88>)}Z;eRLvTOlr@SKjO(UrD|Js8Pp3yx+O+%PZ+z@?fJrY z1_wY#V&edB8_kg{ZDPZ*)bYsJN6K9_C2SiNMy8Dcz0Q`2d}d-nkyB?%)%O$QMAUtM ztH?#x_;labmCCH2Zt(Y}JIJ&xq|@!IIX|1P10b~J=D6_L76UOB zcN~sQrm_o}BIyN3VuMn(@515zIhNzMdF(DZnc4?jUS~}O*1pCtHjaU@(r_0$tN+B` zIeM!R$Za!ZP|`PfKx+TRu=%hHHvOj(F*Z!U6!#KTNZTfIxjR(1rw$$_6^C_ut@&A@ z!6j~Ia0$5TgG!|MNgQkLYgbQ(;yB6GK80N8z?d{~ z+KK&+aXULhW!n^EJfWB4*(3yxawIl2dJCxOLLIk~V^ZYE0&~lkm=rMa_`xX}rJ_?-pXCGAb<9FHEDU8P5uP zX8Tz(B8mNz@rnwhKs}*y>*S%VTVLCCiPOyicJUf_B(^(ftX(w~b`|6IFqH#Ad>-cS zWfrEne%C^Z>ce{VsHz=1)s?CEF|w$Eli3x>twy6Qwlninum4EftxUXql-2m1thTVx zHarmD$sl|?m3C$;PmvT~5o%-l$F45GY>bZJJ0@vyK|~Js za^=slyk)f1KCHrdh|gjCO+x*CH zN*LRn7{Gw@MKpNMHPty7TZj9fvfMbRCn4PU|AhSmV>aCYJHGn@SA#bHDl_fQSfj`6 zE)~DcSZTeWAU$0pXS~STRkX+Y_LG6Xx5oQIO~faQvBC5dm{C7Vly#9uqKIHo==(rD*~he;;JC zk^(C`iXF@w5SX3J{bH=PLWm}~UGHp$_wgLz1jGI+dM;y~VQLM=H{8`TL9bA-rgZna zdXVPcu6QlX#K0@w~NjO!i@C|hTuR6ow(VUl$Oj_pUYo4PmtuD{RX?q~z4 zn(k)|ne$nnTO_vImQw$^H9lMKvt?*yM9Ou?noBD8Uo^?`9k!CCD2j%`4+iGQh#fFT zv}22Y4lZ}hG%i&W4P~h-RMuu`->l`bau>Y^$`aqJUR~K}B(Cs-4T!-&KXC3a(I%&K zsP2YNX<>Z#V?dAUi=_4l*a&NP+7OhRtaoofiB@uH)bYBTOsAD4B3q}EKWN@jvZx;7 zc}~!LS%%-FL2gIh}D6l`L48r+SY8sGg&8& zlgnN$d+FWG~He0KZ>2$%EQ@x)Vhg;rnb6co)&h#Ku zsEhVplHx;prKX>oMe?wbQuVZ*7l-sqFPsvN#CC=;_mz8(BBT3=@xI;(16;=0Tzbku zfocMqmN^t&QZ6NTP06yqKllFjT&gRv;bh~>(Tk5V~F_*CyxZ}wKz;Vzhk9w<_cN06NK*>${9IAs?%aC zr_+|6%QFs5ASr*SMY!~DRx!QfwQiCa2@q1%l6+}jbR+F_u>wxJDkgpP~!ibSsB z7Qr8xVYifqhhQtDdt3WuQ}ZqpLA!Pe(F*!zalBZ0c0hPD8pcf0oJC+ z?Bea>I6?1LF2!%6vi|^HoHNS0RZX-DrS<^Y^DoxAGiORAV$a?cT!{@6ticaD*co`* z;&_$Q>?t4OlD;tHGzLaX<5aY`^ltT4a3rD(uK#`{!xD(ZRH?`Rlo8n15_T^;_f+R6 zp_REBSqb%fY>2wQh0MKvu$XVc_URE-+3x-pT3j!{E>kTvmBIBXk> z3jOV4Xt$%Mi|%TRT|G>UL)11%We+vIY!S1=M?7vneBLn@&yT?n(!bmyJ2h4DRr0oA(Us_3T}VC%56tZ$Z<@0qQrKavr9;34-p(tWG( zu9UWoRQA&Y-C0{DeEpLcqqnf{ah*GBGxWd%C(Dmm%zgvmf4&E*&&euTTkhq%vB^m< zFqZMT7DEmcnaVR3_pNWM=2I<Mauzl^{8`Ryao zJ^a9B69(;~|8TVVbvElrw`_Lg<6x~l$8P+&-mGeC#^aY`btsXRN9e`d0dGNd<$6B{@5KT6Ks+`0*pqFGPd_f z50i5U1Kfi#gA>iJC$J z-`74>>duDX__EYzH@Ro7bd81UkKBKgf-Io>d%(Qf7osx_nZxrq8Qq%}5Y;OKT1NHP z;iqgv3vB+P_hP;f?A4jF^Q*eEePwxZ~c3Zw#$@%=% zJO>?v^KLYOj`r##ALxH*S{&>J=a$Y6>?RuIqQ+?~?!7Vf%KGL^2L%O7+i){cNn% zKAoIeSTTNxjic*-3o2%D&qG=UW*k8;WHEAOPvnA-*wXLt)jtLz2u1&d4bC5!z;n4> za4%##b2U3`o{h_;=uPaYTsITS2GQNA|0=T&i9OM^un3erlV>5e0HSl_Qdx6TCNIg# zGbDI6SKZl^iE%vR_5{Z5`kon*0NU?}d4t9Fc}`FtAoE+ya}}JL!}vX%_e){T=GlfN zsgs@&o@_KqP=33gFcLSwe2>%vY>D|^xlu$2S!rHK%H4EBClwR!C{8!S%cS^~8B#e) zo2x{JI5m1(;nsXk5ji2ZF}IPM9e<%L7;{{HJV%GawYX@o^qC7+;Sn4~?w9Ty&X0Zv zYvndl`=%FHc}V!`xqytS5e?l?Kse_D%P@5u=4T~+6S>Q z=J@Pd`2|v$mfQvk|CLu=d?fT1!jR7xfZh6lA+0+z+18Xu!niqf*QAH70CR;PxpaKi`Z-vbC%fXN6`gnl`P$s z)7kTd@sZTeU1SeVJva1|K@D@HdMd28-+mxfn-@m^((i?y$nTT7ZQ;)Rf|G6MbsOI0 z0{e{K@aYYi{}8^24PHt=^w(8mJAZJ_P$2&^=hS#g@x=I3_*)nlPg~DR_UyK<2AK{j%-3?=#wfHV@Aj%q3C(I*;b~UlRZhO7`yw_NK)vUu zL(15C2Gw?}fmFecwIz;Si?D5>RkOu25L417}Eyu)U@l*Jl0TVjYv zaZaCZI$LX~mjwK0jmL|^i-|obj5k<4aDc!b)|Z-8jf=1!MRw`vvKgvIVlWe zboK&y4!3mHAKNCfKdiOSbhE)5t#m))!l_5ROSOlXDHX>ROVOS7sqt7Nwo;ak(A{G^Cj2cd4Cu*bkPP|@6Z2iz zY&WV!wyKWCdrAB|5TYI9@A>en_xmv6{-!6hd}wb5+Q>p`yumL2br#y!mPo@psFJSE zK)GQNd58hVy}tl2{cAnBctNv+y8>s0zw?4~O4r?Nv6M}BN{`3m#{S1+rQyHv_wctc zF#vl89hJz$m9pxReo}r`REj#)iw!Q#X&Z@B$-I2(`4sL(86lOwBY4ECd_JVRs*U4BZ zRT%cQ^M;+kM0uimRCNRh6AA_cjEVjgQgs{~nRoEjw^to0vaI^}$hzoypkWlRsa|JDI3E-^$>)(*hxCpxtWARLAuu3&5Mo zZx`|W4TR}C#>Wzf6EdTw#bafesv?J{B#_XP!8fs~!S-RYRDH*Bx4&0|ITl}w+~yZv za8Bv!3AGrnv`*>q_;lq%@p9aLZ^ZrgZnEBnW96IA-QdEs?!|z9mp!Fw`xY@aYth9;kl%DVJADi z4Rln8kNT3&W{m9Av-b&}2sT&hr*|Bc6XR{(HbHxB8=LryMRCH~lSzII1qQ-Z#u^Y7 zt3f&HWr$|}7^hD$m-ly#WbM=`A*sg!E2 zcCNVL6B$0vB}eFi7U z50Mu?@rhKV?Fq$HcKFzdfk4ED!Tte*S_N^Qf!)_(ovK zY1ekb3+{<-uwhz=f$qe>RmeX`%AF0|n$ZcXIqZ&QB)1-;N(A3lYi%5AdoFN81P) zo_D}u?8l}n_qa)QXRj8+mB7GfYv7=Xn*qDYFjj!Dt@ zA~D7=hR3j&Zo&K=kfkaMt=MH~ww;Ryi~plz;mI-0uZu_`SYW`al7KH_QzFI6;!26w zw2JXcaXV{3cN^{jt(_kOk#S-}`*(=fOrQG=k_vdd4L8N(!ru@Bxr_lcDD`ynOy^&` zsTi&dt!R_dkr9dU7ahqex<0%4Y9?AW&lL#G2Sb!R=Uu$O0{$I{-?x8rr_a94vixK{ zO%`xpv);A918SxW98xR;7BU!NF?j~nN>gpIly(K2qCRAi_i;y#vG8T7t3JJ5|K77r zz5(!Mbsc=hpT;xLiMR9Jh%nxl@HfPO69!bRe4-3puSKFmqf#;yxS?$PhO)~a3{mRG zJm|~9(UWsGmh+O%3qn)^$0Ym*eU7R%~ZE*dk6q_nJAipsgQm%oL_vQub+7O_30Xk9JsaAed-53gh|E#-ynEk z5q_D$GSN>?e3IBixC7r%K(r$dq^G`jp?X&OA-)S9|pUmKrCfq8C4u;Sj4Z1jRr~ zC!4or$ohk%&wP%)e)2EH{C;#3jV@8AqMgZ1CPTN94RCA>)0Ge_GMvnWV`7J2hSC| zDFMr;KK685Q##q~D2u;laevom;|HxHm52C*O#qwJ!P9*uA=2T6!BJWwB_}x~2aYb6 z*eJV99KHLMNuP|>V^>EQ7h)j9K=)$cKy3RkpnVV3gd+K74n^K*|fVAx#h) ze=akqWTJZNb#8`p3XIes-lIoi{rdyw()_2yT!;;XXk+!z>%X)RGQ>cLfqcUN-l7-l zMUc%B<67@zBXczK4*f)`PRO@@^;COPCN`sFQ9U2vWq%EiL~yK$oGy6SMrygkTitQ^ z3+y=7sw(@-VGqSo+3{qfIj}|ewSCSoHpD=PfkMK7%4RNM_Fex zsXW>n(hj1{H?iT&5k+krxvs%%5MSev;0L$~BMSP7#-3=}X1)DI&v>%Hk>yPrf@X~p=UJHLRtoRO}jMQpSM z2-1USWQ^q&&V5s9IWiL8EIZ^aeFw=vuGdX1;QGQRv58=4RD3~dwS>7s41^fSR}AQN z4;X|V&L$WaJn9}kdQ2$0asQfW6PqoiUata}s<)v&=v3;q3gOv#aI(WWb5NpxY^TF)&NZ zuBJIsi3V8oI=eKxxE5*NdP`Z@?z-PD-6;-VWv_E&#TkLW&?ara#zn>7{Xus@1@XzI zx!B#@hwyN6R|(-$h=C9T*}}jSJd0FF#VFY2o3kE_q@D`?R3ohYkGQ_f$x^n0rFgbx zjf<}2U1iJa{IikxdgT4}5Q@)m-udYwS@crMfsn1kPX}-QcGVg<)XTe<`&i_Y-OIn@8ceIO$>4q4%H1 zh_y(|wy6+M|8<0rAqILM1`r6Bj+SBDbKM6HaU(l2|5?lp2#haRS-ud)I?+Oz9|8Oh z1nA{9X`RA7Tnl_<2B_1umF6$QQSW9r>iyhr-rfY45n`hV!xQvB;mQ5XAM%V?6W6N; z^)2+StTU*e^sQH!jK6vmD)uvOG`!wFya+_?JqGo*LGmzIpJG?Df3Q3ubclfv16B;o zkl`asrQ|^5w0lDcCNdXOVuxGHHNUgrRapxs99hDjo_aXh=xIpGIWY~Q?ghX+wNfH? zZ7=m-5$_siOXXisfchQRB>ZZO)cwO1xR)kojM%7cOSwdl-RNBw-hmJKUsz9i=~igjeY5wdgS={gdyfl}#MY$Hban=xXeWpg5Uj9th@fd~>10e?7Fre)BAyRo1 zoQjiJH+;)t-OAK7SYfLu?>QF!$Ck_L7u9jFSDs9y&ywmZ@Xz=YE{s$gy>Jb%AT^`J zW~NkpuULwF@9Y^2cd`pO-qI$|SBRR=8$9Dh8Q**-b(=y3+V$e&v39@gW~$ebz9|n* z2-~p`w!7`h4&y=$gcuOvKA2(1V9tif{FDV~olM|_m0jg7PgB-^Ka;vA)r)hA*d^|wdE`_jN-~~EIM(iL`{I(aIQ#$p`Uo3T_gRt$K$_nE{3=|Ru zrpt(p%BA=>5QUS2$z7eXzdIUs*%u$4;LpM2!;O~9PWLT&n7SOpt9rHAn$26_VqK9Ui8!0C80 zD#gn7J}wi4hRx}lZ07bsHoE1;z9ghs!bdJU0% zB2w}$9F~)<-*Gu7AJWsT!&+)#yl0%)fEJ@gVwVI#4-PH=3OwJ)f5119sxt?&pDB{a ziC*`Iqw=_(|5i9c_9>I%UZeh~wh)e(2ori62BLV>IDEELzf~&5Q<&EK=>nZD5+E`& zV3!@tiD4JG28dLWgSv~s9oEkw0`HVc(JVH77eQb!VMxDzYi)E0gVD&%+E;U$s!7Kr zg8V29Rt#i=*a$n0M_^m|mc#JTcAbS*inVcW?|PpObnSLGnB+9|2wigh5$ejwIi!8E z(W2?oL9?XdQ0Hn2CuOWup02El!s$`ilb{%Ys+4nzIAe}f&ncyyRaabHvxS2v0-s>^QDkqH*iy+$nylsriw+RG^h ztmku6Unun|R@Do^LRWwU{Jbn6T*H;k>G6B${iQzumC%x_Qo2{yt#bZ_0jMbGkZ84g zPC8;Xw+C2xpNQ3i&j8xv3`!N6!kb*EF`Op3{e?o#T_@*z6&}e%MH|+=_I_9m2moRd z8T}Fp#5lKwgq!EuRkx^RZ{z9zQ|pMwo_!U)qGJj^Rrv|29~syj3TG$+{m3a@NI4og zJ)$q*gKhBHAAES5z8}gZ3Ki=Sw?B%~e(bGmEs};W$wLEPZ%R&%KX4TNd$SVMG>0tf zcj{P&s8R$`OmOiyPpI5e*?74_W=-oPZdO|h}7xxWODok14_n^TmF)=$RjLGTL zT}2tC5}~Nu3iCJF-n^2EoR1+~1s;wQ4b<*7<1{th`x0w%DB1h3ZU)@_PqI^AUwar= zHLWrc6IyAdzl4}2Wl|<647ED+X$tMx6#e^}-IBcF?b7hz**|@v4V94)Rf7xOi{WYS-Zr?f zW*h-FqEr-|31T*Me(!EWU=cbdpqT1bsqt(l6jx`7{u&0+!yja*nX=|X>m@{{(1;+; zg=s5`SXX=iwfycT99&Kv3heQ}WM+zd9pf&EZGRLNpU42xrD&L!lxH&aV|4#+qIAK? zETyezjwudSh0ckZ2aW5zq`X4G{aUM7PyCsHFn4IME$_+W zN?Pp9Q`cZo$;H!dvvj^Pm`F8muxCBIkyxt$6j(dkz=Hi=LTZ35QN8}pfZ54AUf?q* zq0KQw4ZPgIZ~A?3%#_~*6#b!JR3;a0k|xS2XPn#+mXe-B{&yOp$2XKp>$()IyW^yp zXF&O{J=@Jf;vi?$f7?d$G^yGAk>l8E&xC;I4GSDBh|>~TlWZ@`bJEdc`1jje^Dt4%ohp*EcbT@%ri+ECm6-k;_n3?x6!_J}iw-&LbIrPz-C>P$Rv3K^qgs9upYb0kC8&h75#I1}8>|UYnFFi%i4`EHJWrnp zrWNz@Sz4fq^#u;x$Vs3vy>HvI+V7cjv$Fb1X-IMp%O9Qam?tK&`1gIXl6rFJ$|c&t zE$v0uP9Z^B+|Loo(o?6UJrUYOq1sDaO3`Qolc$i`4Q3C;6AQNu0bs965!qA@^pw;~)_MplOm1lD<_PChBM|O|{M7g# zAnT5p`^1xRap#IOPjm6HCv13L6ql8}`-E8oIdStAZW{J6{h~xb(!)I6twwE|opki= zHLey!8h*X?56VI4wwDX5o8A7i)z8lEt|q^w&as4BzPXNv2(_0!5#i`lJCuW0#_C}| zL9J4ebGDvnb6?I+`~P&_{1!deq@c?XQJb)z!Y@T6XvF(-*TR$m2eOsyCC8nF*L6j9 zlsr;B#{ZmZIQf04P&jz&*)QV3Y7e;Oiq;U5RcR~o;>sX|;jXXE4YaCOvp<6df;%{V z>Q^TBhGlKd;y&pTrsTc%@@&sf;tn9nEyeAB<^+w6-})A8EL0Cy?tHE>y0fu`nC#_$ zuml{VXCa1_=cp*`uJT<@9Wjlj5h1pf?2HqfblmZe-_^AfwnJ*+UF?6+`VdLoV!td{ zmtc;G<`0Ads?zmqoIov= zRY1FR-?e&}!rSfZp9DyavQUq#rMdKONx<}egAXm&lhy~TP#!6#c){$@&A6_j{2>&1 z#t#Q&Cc((Exa6t6iiF_liL|ToS)*DYP+WZS?Jsy_V#`{GFKfv$SZxEC@w+&|Yawi+ zD7Cl0#iv%S6CC^5cMq}K$M9Q~LRkF9Pb#zPsqZbnnS?i~ezJav%8FTGI61-6lCQpU zV@eq;%lyTAF_o1GKNWKwH2q-xAzY9m9PnD;gmPO>nndI2eS--~MUFc1waNsm4nj@a z(-c~m<#iK204rx>dbRz6oY+N_&Jo_c69Ey*E02qWy4m;{IrgN|b9=s29JGeV8H>ca zKUrN<6~)eQ+3W#m@Jf_Ke43@PMq)YR?aQrU3AKbzPAKj=!q&D3wZo)r8ehgu9_7VS z4gaitLOv}I9=!OA3nMHEoOJ+_9F_@rZ%8JeP-yejDlDrU88n^7P;+26*=a}WK1W^; zZTHLe>Jk2Nx9mc0HMce1L=b-+B#F7W>Fm;SW`lv=;4i((6q`BfI`+)+*mVx;Mus_J zMKRFzVCRj!BWJv3V8K2dNiV$Jbew*uZD0e8nJUyZ)g2^j&s<&VE#%W!EkV!3wmdGr zo!L1z)lF@YsI}p{-FWrgk>D3l>(;xSKTrD$e@#;Zf8x;)6up2s$CLcYw=dTlAcVp7T*-C|maLNNzkSmS5wpBm`A+4gBW?K-xAldRXIVm1YLF3S z(drE*skAi*rM<1c*w8bL=@m}u3GU72BD61S{FMQPSJa8aN+6(;Xc_sfira|wpz05X-e2n`rd1~eA?q)#Bto%_8_s~9&T%>hD zc}Qdo7^TE$roj94589r%IkcdAy$?2wEMg2(Ep4&Z#3mNKufWDyTS-^)P%|B-c=yJl z=E<^*%lh7|dnQYBx7^S%v9g`_S{C+bjE;;eO!?42y!#8?y#hiFobB|}|M36`R-`ti zlTdphDoV3aSvUGPXN!jFIE>&xU@I){XLI2Fktzz^!r- z`TU*VvLWTE%QZ=}rzZG5CSgQVZ7s17p~=Ng)G;sp6?n>_T`zP7X<_cR0xM+?C; zmR}E_EnA(tbj?|W^KshHwQH;fEuF!c@XZMqp>930I z&@z`9%}gCWD0$nIgYAmxD-RI>Fr`C{lCrTk&)*&}f_~5ody%ZcB(~hX7!MMb90`mY z-H7K`5AdCsX`xc&XkvZcU$h;4Pnu>ezE6qg?2}Y}Pf&|yNjMOc|HkT0JI~7P+)}2|m}~vl1;AKA|F*8I{5FRVeh$N{C!8nuPfI+b$h#A&j=5XbKZ0D9axP4(3RM)X z#7t-b3?%*Fp&~o7k^7|FpL`3>-E=WKDNnYt4mKtFzpF<$v`V{-Owd^Jr&64a%#F1= z>JZKLm+DeXOg6?0w(J1ibEAFkC7vltthsUDsbff(K=1C+GqO zU2|o>U)~(a;km!|h#dyGevK?uD~4Co!6KJwNi>356<_zn9{&-#)qTB3K8GEkvj1!) z=#b3Sehixwbg2w@7Q$V=b*{E$*ast2HI&ryULorYY7!ic z4iIJ}^_e53GvTP*A_cjJxO+Ts_VD{m$@n^*{eu>GAcLPR4F_!_@qt8O^4z@ zKD$lkWl+WASy+dkx5AnP$!9^iREw>_Pg_qV=!Km6`1t&13&@qG%S_ z!tHmLQ74+r4;j#J{&sZl`)f@ZwTNLk_x|T3yXtV#f%Yns75$o;I&oKBH zN8&b>1=aW#V%#VDQ$P$-@0i2hK4E4LOX4ymcpt{uhhd`Cxxhcpm};Qo+au<@^p5%y z+EGt@75=8_eGWl&NfP3Z-IhgB@N-%qH5bNG6=>utUtBEdqZ=b*oWG_*gdKb-RV&*7 zdv3ntV;3T{!AOdB^A<+99is1W>4NL6qX@fQc;v<#Soa}&JU6`!Kv`Y(&;q|pMzSS7 zr!HTYsH-0Bx+p4`@jC_WGp?!XXv&$h8}=!&zLQ+IZJ%oc#te|Z<2E!} z6d~;RmXXzeUDgK`+vWlp8W%+Nm};h2IV1@U3;+714KMt&^atyEAEz|+uGBhK;bj&% z!`2eHT-;EG{n_*G&>35T%tIn#yLm$G;ior%qfDEmI)YmRg_Ab`FXXm8hcpb z7x61L&y_NJ^^L|k;c$8Nq0UTRJZnqG9zpA;{=P#oxcgi+%Afztys?Tr zHGp%GMbAXn&Djn1 zZ|3jB$XU|N9x=a95IfOPyrtj&Xr1kef#11(-jmCxFcl1-x@{mF;6wW?(r{Amz{H(QGYV7yw2sWrBP zs>s`N$uFdh>1_mNS~HT;ldPQk?{V#Eq*e$}u=TRkGAj8PO5iYxqD z@!xl--aXWz`r9-!&^GsL(!TR7SclSf!J-thwsf|xhDUo=cAoG=dZf#Cufxr|kRW2u zHr}7MjeNb-9E7bWTmIbBC$;C5g4ZY6Rwj0bV@y0~qQ|fnR_OMn-8`j??@p^?EY0Eh z!z!3%MgWyw`wwJKiGo^u^t|;VRqk@z`iE_7lZXlW&8J~RT2+ROw^7(94;TN#=Wcm0 ztX>O_0EvZ2x=sa>PCmXNc6DNn2UFYQ#%!*zRsZk+hp|e{=U(G+86A3O)fvA~--iQX zmEv&|PZaSY_RZ!C^VU51}oTL?NcNT(!6SBofM;nIFFAaX^H@KR1Y1y3ZM z>r2>1S&(Pnu%L(1KbkLYIoT{hB(#&7U<+EjXUxJfxX0yBP_m1+R?xHrM8>@$7+SIt z!`WH%FxUzrHps_5n}dGHQbca5s}N8PhA6ebl{bn>u|HEvn|b*n7qUwGlX9PQT{>Hv zBFZ<*GXpAD!P2crZscHiqV#aATn-!UKON4c-lnCqUyPK`S?cZXJp8X>7wP)dsMFz| z7S-@90exRSO1BKV)MVF(uTkJkOXh$8CF-RY3=276eb(@H?C`JRTwR!(HTtg3W>hh^ z_7)b}MY79!TQx);mPLt|nKD$-;UDPp7hKIhs$STd&{4Q*y0TFj@97>Y5M^1V4BP&C zv{X*oT5j-~s`MCq#UGS2&L`=m9_y_(8~?Rg&}T3gfTsbA6c@Pv{2wd0Wq#aSzDU?|Qm=Zztnt8ht4m|J#C)#?_2cy30Rmb_qkZub{&Iz-rWuD`e81!k7T+>ao~v zn@hwbi9{|2i;v3GkHdq|#AUzUX801?Wd`8NAHnW{`Qyiy+K$Kdb*rcFu*wkCoG}T}+B|jK??HKmhANREQcR?xnKBLk=@#5r)tiv@#HlzUvz&vR_1lje z%DJkGYvHfej`(_?2n){0+VsqcCH?2lm&gb%=tA=b9qS0w7q`}0B%>LFR_BuJqR3|B zL`%?Rn=;uT%<%IL=`9#bqjLou1xYZKnF@iNK9o`4y1i_9Zl9vKe{)5DKnRMOFKUR` z!j#HIvW>N_S@c9p$J7|q+o#{Ef#rN%*ue?|2 zY&2`B!}j3$%o2h;{u9BRn)>mehUd?4E9_z|*kxkYghK9r{h*{12r=(h7)RD2pb-A9DoS zY66eXVPnU*%%)et>Y+M_nF$%IIbl3~s8ORy9>YV_Ky2x}vqGf_pGX7_6fd~Q8FFRn z#Wt|%mp_4FTw@{3RI`$2m5et7JIO{06P_xvVqR#xo2dnk+k`2`UWzUg`B;T( zLG^Zxl{6+oJgAE@cviAaom%!y4nF@>JY;`jb@JsF0x1;@3cFXAh1ejVTfC?C-|3xk%LGy;7w4m=sR=Qr1z$#`n`tWQ;QHsd~ z#Xn*xhp9efY4D)d8T-V=RVj{UW=RY~ipvXe6O!H3#$x-h*Lvn31}r77g++n)o( zP{!kHr{hwJ>KeVU_4cIj4j!I7jk!004d+f5++`5FS;c84}!fRJe4d z%`#mj7Wxw0z3sfTg}dLmlwJK6Nw-qlT>s)%Kl?sn%g?|RFBlmAPMT4;Tgvp1ah+z> z*O7If%XF_I%K#^adG|wer#VCWI@dS9!_D^1M1BGCsB@Z}fomq|dXpldc_z7_yM=6< z0)q%f!Ub#OwU0SNi}yK!yk^hpU1pzM%T}%y_^curvDKx*kyNa^Cm zhQ*3mRGLmBxa6)sK3oK?tgggMpq9l2!kca+M1iGF~BVhg6#8xAQ+T}eVMxNk8!K^)a#l`9lfQ3J#=FE1>!jd3m$M3^Q;hi+zDYH)=ZekrXAYlWpn1yQx<$!t!QT5FO%qn#AqJZ zWG%G6S^?FD-Vcz+At&!wEnyS5Uu657TwP6`&=Qx12O9d9vLCbPH5xM_Gu*Sli{Evi*k zqxv0VKh;Ix?tT8+E4jhEcpy$C;%sR1;L;fCKOQB-;ykJXSuqEDuCpw;tu0xtHl}(Z zzv{ntCH{~a&?YLpf(KZrLuo+OWEAW*n`VVU6CjzXr#=Kae%asYJ|UYupAoq`_trHY zlSzRgF1D4sOlaoX2-kew1Ab$&$MetWRzFp1{PD^%-Cjr#@{>C^?G$`*_nMpZL+8Am z<%5k1yiaYf=U?HMJJVMub6zya0r{-G6cMZQ;^qz8x%i#%MW)oHN(0TgVq_9`Y6ZCl z&H;e{pwmjnds~d@moQWFf-BCX(l_Z-qxRX>u>IR+wfk4+u~Y zR7fk6MDs@qU)=2s3_p{w5>s<47I||;d$7X%r5NUC(j&H5IQ{v<*g!zr+Q1ptpzn8* zg`hoJfphu{7h%K5;7SmR=2GYqrV)hMawq zVT@_{2?l^s)I7L#f~hU68!{1uv^dZx1^(8_AZG`2w*>4wc4%gKmAx&i+n*3@)N@i0 z7Zz`Ma(ay{7dSh6x}%6Oqo6aYQRWUMEhZztMl)b17~S;Ry>W6}c9>&VF`u0j6t2u6 zOluZQ%tCl-^#a}wD)!5oG2Ii|_W-}a^Z3~1W(8#y&3HUg7!Hu9nQ1Dw^dffuRuOm( zCm_d#RODYXx429%`CG<6%$XF}*&NPl*&KZi$b?rdPJ1H%G8`h!GUB6u^`@}WYsf*G zuwTU=V-KI?E<-IFZPb^%1~+UM>T)Kgc87rN#D-Vbewy2Il>xxS4^g5fabJsPxP_u2 zN2kzp(-1(frpF@h)^tUD;%K@G%#Ppc++g{80pXmB(EA4A7Lqe($VRC5~(QSN=dh`2TzS3aXv&dJ0o3LouK5BdZZD#p1oCaZvScvD&Mr9K5G z3q`K83gRsO>8dK|WuIH_#ji0)Nz)bAVfh|YdM^y)9p8dpmKPVFU`F!(P0@JjTH+35 zCs)6xfT2`76o`eu_8OPSz{F(H1lvXCkGU?URlV~ix>G`$LXIpe$w|@I5(&Q5Vo^H- zG%}w)6tb(!$AE*ED)XUY$(zn4$AiTeGT1!tbX94)nXw%o%`EIMd<-$FDMo;E4&PH` z#>y!T<;)T$^$RaF-LA&t&wWMOnz|Q*_no1(N-3%dud0-B|GILk8_ukwe|jJDyS~FA zj%(b(8l}#r^MEWSe@tU~kG|YPS9Zd6Y{_7Q9M61rek+*k&7%OwD=D1?f74#dx<%bi z;4|TpUedR1Yfj;U8pr`*ymYg?b(Z)xM1#;Z+NhnD%uF*>qBgWOt@+omj(M&_x01lv zH4IMNUs%*93asfuM&VaEvFTdU@!F1I5CXEM;fS1T`c0 zxJMv@IV--GUn{I;bjW-p5mHk)rSIMl9Dgs$@q1&*4NsH1b>xR?Dg|^j389ndrei`U zt=#PzN^>fNGb`(%q0j3(34?Yg=jk7ae#%i2f0vdtf5__aa){>Ps-)HLB=p1I+La09 zw730hEGv=yVXuSYHfQIIDCt$Atq{q4Uu86eH{0Cuga8De*gIZ>L0QRdMv8 ziyGBoV@8f`K~lW$eRQI*;MG`(-Vj@~35yLD3>zra*XF?)x$dgagW7iy4D#2I=Z_=8 z1zp((#$6H~FUx*kXzzGk^P^y?STJ*r+C1g4JVgrml^BCeRU^R0N3nZNAi-jsVNGeO znw;j>7F6$4H|*IIb@U}MS?kH~NSHJN+@ht6Oq$5^7&t{F7W@oda8Khj?ks|?{?jRQ z`q9?KxIDFc&(UX42sB|xV7svJq$!QHk-I^2h4m89mN9^1;Vj=z$5 zKgY$B?}bf0kzgt5>H{WxINCMyPZoRU7Qj|AzSQJEw*xz#Q-NVVg7 zc8_#jj1sT87o1lH*6uGb|;Wf7Qt(Ab=ZP zAt;ZdSHUJEkibxEz^h1Jrmdn&^mjM8FOwfu>~fi?slXCwlt1Y%XCWi=#yBM!-2dc9 zsz8*G%C@iW7JO`mVu$!e|BWegLmc&-I`Zxa}8x4dO+YK88%h zXSE>*EHF-={}$o6Fb4DkWPRfOx~>$fp_}JL7wa~X(>?LFdWAGA7UuUNe>^S|-7)NqWsWE}X3&mK9$EHN4XqjgoZxuh9sX2ds$~ zNh0_=+0OV6tmh#r*Ae1&Jn{)##-$y^%+y2za5V?wKU!KBzsqB@_f(EACUBPz{#1=G zy%Yd~2TpwU`=kP=cIH3mHNtn8+FGBX;#EZBQqI(+_hPldzJ`HE!^yHuo<-TRJlTgx z_c%*rDZt}k^SWVzGZ9`X{^~gz#>GmnVbC5Jk z`d)I49Vc@s6UdoRB;+aOYCYRsh^p*&TC-~a=Fg6T(@~1@PXbe6i=K67#N^y@m}RE4 zn1>$_fL3fM>WnjqHlh&@oo~GbP7Ha|3o7OOhUa%(h9TQ3rUZR{DRUD7Xfq2ua>g@E zVE;P7abKEVD&Apb9L$K!K?XB?365w?XFeU#;;Yxb58kh-AI9?Q*{1Xb3)PMOD6z&l z1N1niQmtd`Pnmun%=|eRvo3dhP4)x|7@_HKn(%{-*`Z7*{F)_OlsDJk)8GMzgYo!2 z_ofeeb3U=<0qZ~Yf>|u)K$wBXNr*kE3_kOV9|paOjXI7e3I~==c}c7((Iws4Pca9& z6zw3-u920Q!*?dY2^0h_MCoEoC3YQ+?&HvcI(+aS{zch4ZOYT)qHv#5$s~CPZk7D8jVE_ zdDJd`w*HYU*E9*;`c2lvHoi0>(^dB`@IId*X18iQME8y(O>>oE41Eg=id+aa^4rA6 zyEB~4-nFxHDy~2X&sX93!q&fgLo7~e^niY8s5gSO-(eVE-(xiXPLOa*Qa-{%Q4?*r znEfNf`mV8$1@2i*x&7(Xp`;-g(%b04zrF)n9FSc~ruWK=ASJ$?iLY z8i}I%`QA7z1Py+<^b1JmDYv3R2|{(32O53}F$9USg(ul4!L~3U zuB?C|KlZx$=aN!xJPXNE&C{EEq?4!%4x79t&#%$$m9LY5KC@{by5k_0KepRnyoq(} zo|tgJFq$p1pID%S?~)FBh4P=+zC&x*F*865eEW>EZfN?JNX%cKbMr_?W5i%?q{9E2*9yDKkSAS(ve!L@R9lRP>`wI` z6;~GYWlsP>rp;j3ve#cp9rRBNG~A7d-geTlTC7`%PjEK|^QxE)rK>O zum;d<>^IANlrJGJ+(oaa?7Ct-V**(fye7Ic_J1O9MkL?#|NcH8eAr3pP-(jZVybfx zehx4s&o#v^f4j4`5pUKN5ux~}n$So%v@`(_aYQ8RC8J;x^ydGa68RI|(XJ4bQEk|f zfU|0Y(6@X66JdLZrh%j{DRo=~aH06v9udKQURVKhPJRD;seSUOJK`*ItfV!u<}jv| z)Ww3p7gO<={!S?9P62v|_#`qmtYNo&!F5fl`XmKCA4A6Tm1eEpf3NdRleWsJzKS9O z+VpWBv2E9&O&#ro{A&1saD3x$HK>z-|0bP@81gKN)baSm3I`b~L?U=j3eY26?IcUp zXC1<~WrRquArRweW!v@PIbL_OOq#kT=+Ely{{1#yq-uOpBJvkSE`SdD|FwKZ2zZC7 X7i*8*QDFD{^zq1iSCFU_GYtG6NDaSh diff --git a/docs/unicorn-logo.svg b/docs/unicorn-logo.svg new file mode 100644 index 00000000..e56b8a79 --- /dev/null +++ b/docs/unicorn-logo.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/unicorn1-logo.png b/docs/unicorn1-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..3e86b3c7d36471e4bfa9f896528cacc49b8b03c5 GIT binary patch literal 60383 zcmV)tK$pLXP)4Tx062|}Ro!pfR1`mnZ(O7nKcKOW4i$^9Ra0BJ8yc;~21%2p=|UR0&DbiW z$#rfTQ`a`O(`{9s_5yDV_yd5l2Of}kLK+Oj_Ok5(v`JGz71bo9J#^YYXp{DWs&KBa zQ@dTpxRI}aIp=pi@6k0t$5)!;m`NF6-tt{FpOKHBn3g+MAqmexC-gw4rh87hTrL7G z#)U`L!(So6-Zux@>;H3gR;i~0B%VTSS3P|m@o9jRsXML@Al^p#@G0Lx-0?i(9WEw_ zSYddU<1E8793KxjQ|c&UmW!mTC>k>?{om1c9S zUx<6_jj_!T&^M{wWM#>IBbOSf*xP<^F{$j$aOQ5Y{cT zROCL1M7^NKKL z&(yA}mSw#iM0^;IB{ZO5!wl{^Sg-*ysE~&Yz8!E;Qv(A`lu*=Clo*MpVGd>OdF6n^ zam1Jntk;<}MrqIC5$=Q>n{*R}?8oOIDUw5En2dl--Xw34!z7E+5pr-OgyQ-soSab)C%saskMla`aQLVzg0+MZf20tJU&K{hZoBrUc+U4e9&3o zw|KmGEe4#xz17wBu{f`SS_4i66?j31EjY7n{zGfhONK~c+td!TS#B}JoR}5UAd7p& z5phTyXSkK0xCeD3xaYP^o&J~#Xp9xFb0C;HHml5fA<%h1eR|qw7wxF+oNL9T1Aits?sKNIwvGaN)^WO$I^cUV)HzL_| z1K?{9p!>B*)`xfEv!4N6IG{J&h49W#Bz^(#YWw%`e_a{8n{G9m5AeR~_yl0%<7V@p zwW&gn96aH$kr5D-tT zAuAq;PB+84&n^bK8V0;wt;24AT@17j1D=r{@$@T@vWd~9KLu6*5lzli) zD*rxJ)~k$$bKhMIbVUrf#HK5Lb+^y$z<_6@W=14%)*KnQomD|2r24fb;@{pQhPSVD z{~B)So@r7s$O_htb1~3WFyIoKuJWbbex?Nj5Sbvcf!uftq~M+-MGCA6-dSWNT+Sbw zq<6I257;rJ*V!*{f5Cc^x{})K*I{u9C9|Yvmc)_)9p(}F)%|ahIA9}{HRLS)k|RX zD5;5#j{*~c(7*_J?qkyRC2>x(G@mnEYTu6mbR%30bV3ZcVzf^9QQaI%rL3_+$!BFj)R~lVq|bXY27qEAc5x_r1`8ZWzk|Q zcsI_)Kt5x@$z?3Pz-QbbC_L)?AnSsfAho?k)C!i(edbO9$P1kyFsXGHO(vD zL%T`gEQrvd(Xj5ji-CN=fRi}cmBjepS7Xyi7 zKsgyiv=b2%V}zm}Es_fLP13Z@2&tQsNG_MNNd!wSOqa$0_Q=I>7v-rU0r6MQLMeG! zuar)u3=V(Z7LewvBJ+CsQ|1(Qq+=rE-8UBl@i8z{25hXBWpFYx^e}M>ghtpzO-z)~ zXp3B86aTxo5M3MtLu3F%0v&z(g0Dony_V{IHn_q0b3ly0Mm3>Nkrch5CaF>J?yHLd zOAP2COs>VaqgaaOF{ZIB^AuPD#)*W`e0+`gZ;AsJk=))16j7M_;$omPVgM1tM(OAm z<&5kuJzvC}H0TR0*)oTT|57ITzs>oSYbp@WYc9btW5s=r#=yszE6}P z0o{CHVfFz!na3H$8>XXoa5XO>(tRM6vRq=53f9~_T?zv%)Vah!E2B~x$9xzWs&a9;@y%lqE}wLucEK^Vu3QMSILUg zQY|N0Q@2#>ar3zt$Xg7sJA8_T{HV-yAn*w16i!c8{;h49DgAfk%+0&e@^==6najk# zixQ>GwAn2+-!M=mQh0>)A5kQQI|W2`hFc%YxJj5}XG}HW(RsHmq~@bk`|c8(R59n~ z>oOSNjLrLWc9%?b7}qpUJS8g**fNl?@SC2Vh+`*Ugt@v-anNxlWl_wpM4C> zmWrKuN4%BTp-jfwQeYR7XCXx78;r`nHpKH? zp?KDZ>)D>o%RXQT5nQ-D3C!+3qsIAH(bO$<)FoGx0E8 zDmN=d8Ehd6S%>$5ov|QNMX?(?0S4yC2AdX0(Y@$h_Q-H|*0Jx|b%@k`mX3OowuDJX zrEVE62J!_1hfDwUJBrXKP9PR1Vvz9nWbrdvsu6`4Na12aH_lf)HC3cnOo|(kR~XQ< zS$Z%NH$^i$fY7kpG0sjGp;1J;#3rwXo7>oq#sFs&AT}KvAUNmXx9ZQy)~Z)gLvSKI z478N7Ngwg0bT%T{Hn{Ma!2sOM=-tHkY0JS(i_6Q>Td(T-(k*^N{$n;O4ghU!TBK?3<7o(KCw=Xj<W#Hg=<)JJpoGZSw)`6Cbt>VhgF8LR;=>i)nr8)lCW? z;mvX0>p2VnPS6XrT64M&9fkqrX4qu>1<%ayLu7__RK@6M)^P9Apd0P-ve<+WZ*uU_aQplbExh-Qr1SMI=`{l^gvGkVzPJPEEWG1 zR^l`!{8+daD?@^ahG3>TyGV*3*M%=4!F|an2JmgS9cJubz=dAMMuW{pM+E~`*f=fK z3;q&mrAurQ!IBHpg)xvAXTwBYyDhSaG#RFz^Ag_hWNI-2qJ?sfLJkT zGG!$XYg7%FnkSRbmv|1B*d&fC7qSatfCV?vo&AG}9DExti)J^=$}#9g-b)Uhp3$3z z9Cs$1ZK%|~4q?>*;67vw1KY_jKXJzDm?2WV z*&12B9=^!-WW#VRB2m3kA`M0-)=g)O{CkMhIBk@yGbS195}PCu=7M)M40uWs>2ae3 z?n?wtVMfS`U%r#-@l0}B0^nm7D#UX~&Xo!lGwXovX<;bE z!#2C{fn>NU>k{4AWH2yVmMt40{{KP*t{=)+xmv0rKJ}xU#kdgS^8$arBvYOP><0_Q ze>&+Dnc2X`%?&OFTnyM@07Wf+r@WWjrHA5;srZiC(HSN-^;MCQO~m&cVw3%1Q>|kl zP)#Uu_eJFKwlzd-EPc3?KHMCgLOHW0h<~(|$LUbpDmg(O3UFe#4wLFj!|(2I`!Fy~ zDhBlw&m)vF62iZnl^%C6#?R;o7lCV>XL{?JrAeJ<)^MG~Pq)Y~HBsNmizsv}j}-4s z{=N7cLs^M(;o-)Avr?AtxUH5f6QWT zo57TzXIj=1-+!qi31Vd|Su5*CZzVtbO-0#0B1bD8B^CI)@@^gt#7_B~;o`TlToZi4 zEeAemAfZIANgB7^LKe@6F2j9K4g-2~CS2-2$d$0aJ%~{Qi}jE2s^4?=i@(6P7!`hH zNaI(FWYITVR^#1F%6H{wJmv1gcpB=|D$4t{K>}k&NzHsGxOAqu#3r5Cbj#{=7=SCd zgGKs$r;as9^YD>UJI@KmDeVjCxj_%<^9eAUIKf4weK1L?cdttDJOuqXn0~wvRR*ud z;u`U9GEOAsaxJ_`*RVj8hx0&iK0&8|?Ll&W+rs&~`tMW-cWBWT+fOtTN zJoEWRbNOQ|8ufg;4Iyef5zpssJE#k6Bb$tU5YxSr-gY>bMtj`gV!*|K9R}t}<((`J zYO-t@n7Ev$sZRJ3D$7EMoF@Yirxbp|j=i;4fly(5>;k;;AJ zLxv#O&06Z7JW_DUbm?Oy(uq0e!D4yiH0j$r(K@?2+>s&$xE;C{Lc@)!p1~c94-Xg8 zXq_G|M+>^T=>zeZkPSkk{dS3sV!;hA22#WT+{MQ!W{t@Pm+}NV&--H%Y$H7QFbO<7 zLk9G>1u-RzkeUx6XfYOW?0oxnlkOLqKwBO?-7Vra=qaUtjE#3=qA{T7W^Ry5L{;7@ z7X8BL^p1Y#+uw&6^oEYEH(Z}fY{Hmue_ae%V!&v$jBbte%x-;5!^8LDzlcR)LE=Su z2jGH6#aao8!~@ao|jFkmz|HL^kK=Q^b)n}#XTYV_|XusAd& zp4W2#E@;k51YQr7B1(BC8Wu{m-T?9Gd5d5;NXmWCFT3@jq*w}LbUfj_UHMl{#?kZr z9d71%#c3|!g0b|;*Wy1m%5=KJn`SqdI=mrng)Rni6a!AgW(DTQHR*0QC;xHUY5Xmj za#`4JohcQL=C0xLFk!xt@}W7AfyLm$`?#P>W4lSgh{)8)H|jfbw)E$odN+hI@TK@R z=iJm(@OW-GF5ABtbbPf+RvdJo$eL`o#U(b`{sM0OIgJ6%G}um3R_h;We9WSqjt>1I zHB}tCR4bx{0>%Q}i#Q~x3h8(mTF%P65)Fr5VbECd9is6bY2GS2J@Vb-EtI0`BjdXC z8?Gh-W3lvF$dFi^3z%SF)~=Pn_>MZX8L!VSv5AKvH@!<^pg;mE?GS)Yqt-61z42p$ z-l2`8cE}K%?XQ;#3q^lyYwD%ZP7D^j$Au8A2fCgzmZA~a=$Ue^A!u~w;=tifHcgXEK9zxvheWmhDeV=;u{B!$Aka01)4GE<$92|cwKm&W ze18K6=fvwHTnxq~#zKB^A)|kR1s-XfyG)kO3yDcXB_N6%&i6f_K)m~d%Q2K~#Vy6F zj&uW?_U4m^OWmUh0CzZuOKdtED-!KQ^$VyUP{fqw}y`oCtnKT|mWQZALc zYc5ZsuH|qX!}NM;r#y3|@^UN+ZgPUrMQoX@*g)^=;i~8{dR;NQa+4u~(Y!`H!$wQB zC7zti&Yx60TSVWV<~wZDzE&H}5S4Gi;Y>K9S(G(%jRd|#zw*OO>D{MDdh7!ic@XL9 z9U@_G4G^>oaGi8V``VVd+;DMja53Owzzzey%92HPX-sZgOEtSZsl<=NNhXWabxG#! zT_7cK&8KlO{~+Z$>JaGXSK*7{M#|!k1LCj-Qo?o zOlY*_F0rxyer}430l~$w8VD@6LIn$tC9$cpRvKFsk#OAm;(rL>ZxYTB1oRx^bU~Ve zC=r2>z35!75g|tWRjMZdzc4b7_k=mpD<=*yMz*VcZz^g;KXW65;RU{T-}aT}oLkF> zquGtW3(*rkUMPjL5v@&z`xy>_e@Y#%@~s|&_(yoHAntCiiAYM!>fDmdq`Jf=GkA4N z%>V|_#ug#g8mNaC)#6GQf(%p>QzVnya#x8Ao=sn_zZIAx+A4>_lM-ZQ7Gz5y+Cv{`nkSISN?!{z%kBEtdKi|=g~{wv@%paoe8y&-hF zvru26?cn)DTCqNVa~PMuFeWCUFFP+QElaY}6ctfP*5ZP@7>JJn4}@eulXwRgOTorW zs*MpP{Duiufe4|3ncmFqZmt){mmD~JAGa%WD3bY>`M}Qddls8560ND1>X1dM6NOI{ z4pZo-7Ko1>tY=uFdBEe4-$?nzSZUjxtC0y(`}PgugP=xQplKZ-xM78?_)m0wBc%E@ z#6EBEUB-vgAZ&U@M@4LwL^uqW55H;{QY^)IcoE$8OZs=MRQc`Iz_&|m za%&uQ`qn5na*b4MRv?~HUc>_|P($eSP!_e#SQvY>4M<}NX;X-QzEPUq9wmzwW(zsY zb^bw*I z8Q^+e3F~VjQ((2E;WG3XdqSkb9Ofomc`#lHQ8a8my(2a|7o4f#M{(}9JMi{GsX0nH z-rQ|tWXmikOC*T<=3*c<47@DTeUJ zuVH5UIt$}0V4yYG547?t>sH{QBq}`30>ycYO{1dI_BsLXW+L~asDe4sLLuU1MN)Jf z@V3r68>H!(Zc=1HY~*gcdiX{&QJg^1<0S|hV%~*XoClJ`4#c&5G#TG0?wi_DKYw6)D z{Jyqas^8v0WJNCac#c%;TqxcTqT7ZA(1j>wZ#Tyy3LNc~;uA25j#;?PaP4_!fjM7o@SxACh0GoxT zfYc9)XL4;-RyO0N%fP|ir9dsg*c9b%fY3}^C2RNGR(@TcyREctnU!?F8q$q*F<`=g zio@ngzpZg?f5ANQJ&P`(u2eh=5b-?B&5!J$J#aCxgoedvfm+q9lg1rc$SV0e1GgT} zDWR##fEAai$tgdsm9=nLmR^#}q6_6}X9%pUjgC*}`%J09BFU&uu1rrsffU^0kO$gg zu-Jx!!BOe~`NDmI-$)@IZV6YHFY#_E<5D#iH7mh+{4-#*ul3SCh1wyc| zF08|)<}Isyh%uk#+n%p!P@X%skeY<9yhe9(BVxLf!kY_zkEaMH!e&){*5X#ly7^_& z?R@&Y69i{h3_=j~X7(GV(AjS|q`OzHcyLak{fXV~mrB8Ip8Usvo}2JU&%IfI#-no? z&*G+5Fu}mYRK?^H;(GF{B9v~ygqHtE{|=U)vjL4y;Sf-7EKDeH#qsuA%A)@z1l)l@ zh?rD2p=PJ8fj&N{oLOrG4_IhDk8Y|K-&AqknV9n>`#cpVkbR}{C-U`5CNFLNoHqZK zJUm7Q=?DN&1ruzy3T{y3I{$Auv{kT?R7-Q4hdKK=er%GU2Gz4l-y zz6V5yJy805m!*OSHflfOVPMy-rS^wZ^5;q(@aYB@18o?XD+3S%c@NTEFcWQuw%uxQ zbz<=`K8L{YPx_EHv@K`wX*mnkWyDZc_L)v>)a(sWA6ysUD@oC8=C-fYIB1 zq8pSJusK7dI^nZPaf(h`@@yG6u22f_Z{}g6U?BVT)bhO$Q3bx?7hHP3GNkdV?`6@~ z`&n3fL4Arxh;L_z{JzW=He`axEi-|6RaY;8-L{mP1(~U#V@n0UZs>v-@XnHfyK=MR z(Qq+`u|Py|6FLcng~ND(oq8cK$1piZnejw?HGdCrNz_B>Bn)bP+E*(6j{}T1nd{4i zX9&r6;9;<-E#I)9ZWex@8WL?Shwy%lHQMIk_)PxNrk7~bfyvf^k;ZZ0cv7Z{#|a?aDw`XZhZyj1DtZSF;W1Y{M=>!fqWb`W zVKEIn!Xd_~q%(QESMl4s4KBD7d;^`tJx@x_t5p_vb+o~z_M~EsdE(jNWzOiZg04xe zl+5H}7yL5#I4@i}#I{w3rE#TF5qxLYJBU}-walOFhP(O))_+M88;HWywC|~OwsDr! zJiDd%mN33Hpj=&b58~$*ea>chm8NVW75CDvlM`=;I?MFvhJioNA$(I5lf^PhDjQ)l z;DC&o5Nxazy)&1~8muXQx${Fo*&~&EAyPb$v2p!wmj!EX)^bc`Tam!r_feSJ|nQqrUVCr1)7D ze)ch$3v4`=!U27WCE^SS(HB_He8BK~Fw?aaepxz2fyT)Sm6k+#Q! zafwYl4CP@ucVhG{ksimgyE>i^yd#4><~uVv*r0jtX_gg_d8OyJ?9l$mR(!0vyA1sA z5bEE)QmP+85Wm8lmdWRkRg~}Z8u9PUN|kYCdlG%|BDVU0g`@T0BWAx_Z6S+UV7MJi zD))mzzoO6JZU5m?&89l@LB&HXYU-0L$zV)Jx3qPVnH<5@z@$Km*zvREqVHQJt2fBj zxe)Y!+dzb0Fn=${#-~3KgBjy<2+-%aW@5wU82~}zz_T#lV&^2)FGpCg*AS`uEd5Q% zeu>KJyP=a{Ku@}(r2SWkl=&eZh*xwsXCBl+W&R6rGUM?SJDk1ecLmbxHMpKP%!GzT z;&T+H$37|5o8#x|&WdHhnz#72MMF`u(>_Bf!`?n&Vtgz^gKHJ zoax_sS&it^NI~T28I7K0?{u~g2dT69cEQDxMYX7>l(hlol4EUVPO9Xfw^g=7#nlO;!w`a6tYAAj9uT0EC}0$VOb>JLA0_5FxztQ@5!nR z=w!lm#rs_+_4hF`FHy78c(5IruI-{eB#NQXjF;-R=YUx`yi+(n)4wXlV0}DySIN9O0Vh`5el42Ojy_?@_)5bJqZ2BmFR9ZfyjKMq8=9V z7V(K1o*Az)*vG)_AS?F26H`hSBDRvB{Sdzw(INdk3)S0J+0w2@!T?O5R%GL>~&*oy;#Ajy+~e|1Fcr6A!jQ)Fk6?G3OGSjEtYm6k5gNh0B;D;9}k@l#=NjoI918BDff>8S3#V zyUW8lOA!=&|Fzp>(1QiA-sEqMalQe9D+C&f<9c;SdKb7@194x*M;bsm?SDBeEhxxZs2@O_fA7{F*TqxfqHo5Q>;*r**(Dtb{iu?go-@ ztlmnQ&NiaI(s4@gcuS=;qeNHR-b=c@OdgKBmC!JT_UkHyM!$$lY;t&9;#IjE z1@4Vy(&GoX7pL)uBrOc9s8IR$@Z5Sc%fiFW~cey0;Y)J<# zCo?`5{f}9F3b>nVA(W*h;(1!nF<8N5Bko)A-w56@IW{(WnKMD!1`gpx5gYFj9ox2b zkJ2V2r%8trHYBGfoy#G^TB(0gxn~7Il(wS-XuFeGuoqn;HNy>Q{wIA5toe_CUx(%m5}>SE!@rHa1En25Z@#w!}U_mO;q z)tgAaq1q;|G-J9N5jI^Q1AiM4lff6gLk7b9W{Oq(KApG?>9#zdXnE zV}|3m=^EN|R2w%E+d2m96b9@B^MF-@0rWU`0=-LH4r2*3?Ai~+|KOQG__<{b8 z32ZhZT*f3*E4V#a`;kl@Cs@w8To?8E%(pG=+UfbFUrO&E7Bu?b@$i@%8y8FVtU5J_BN zJ}juBrGDbOo)3u|mC&hwW?Lfz`|s;y^~CV$_AGWUEmdPhcMZhmdMyk*+=j^TqO=s8 z=0a%aDkX~HVs(L8CjLs47AwMWnfz6e-ez26M;lTQIU27(7&-Bto}icO!JU7urLC0Z z9PV7Xw7n&(XxS?znCyFEI7Y=aa{wa=1I}I+AcCFG@7&2!_f=J-89#?}%eSVC{UBDj zWiqyP+T69b%`N$ZR%cyFM-z=Vm)JyOqyyg*B{EE+A934%&2XvvH_byL77FvE@-P&! zw=+>T497ueUT=`Vg`=cKCt9ZoQxFLb6yHolGC}JvaB>IYLnSz^_{fFMVtT7>9BFO~ z9K&UB47Zr$GWm?Y?h(esNy*otoI?E|IE0$y^IGOWY3SqdqGtp2S-Xx1T{~e3m`Egs z0He??;h!$(doGbIVVHWx%L4vp6TybfJ9CAsnTbEMY6Z|M2MpbsIcVKd?g2-G|ixdJv^C(3J z7gja%B>&4~dX~H`gvR)Z#KTekW3%762#un299I|6zq8r3vL4WD@%$fL#FI*d^gUMs z-(-4hTf7&%nl^91v86}HAY#=`NeaPJU`SC4M9Q|R09@!>nu<7Wm@QNS3F-<6Li1q? zghop*A>SzUpU3fcqnj z;B?VWU;fFXDrWO~j{qrVa_$N)0qPwS^L#o}AuBdRhz?j8+rzWXs15(|I_x)T2>o8gwv_UXeM#0S5Sm}Gkn6O* z&a8WFAM1+I>~WK}ls7opyb*rB7{r%QsHJ6DxH1Oz|FCL0FWN_vx^2PZ@B@^wdl(i{ zGqqWo&QO0;ZE>CZNe>8immV|QI6CdGu>`!Gpl09M?mfDIZE^YkS#&#e;aMxIHt8rG z8Y0%aSVU7Ka827qkH6^tB>Ju*Dvy+K_mMn@*z<@)^C_HK$4!)qzhQuJelq#e4XY9c z+CL3^uBL2X=r44Qf?ZvV$DaezuxBz?qT7|#S>jz{li4rnnJWWN^h)7PAiCVCDhBhf z+$D4M5UIHrxS+VK1>OUze-1ll%JB5mI34`W;<*YZnRi9Owq)ttKXb&V_roKeF}oxQ zUifwoBFD?eW!F=5tDYkIEAUbO0rz=sbbKb>=Summ1yb-{DrGaiR3DZ$uL79yBQBPD z^|_a5d!}$!tO8T_FbAJ#zK(-a>4SOvnQJzdaFz33VQ=8(l&<^?v35fyFu)Cd+aogj zq(BP)aF#c3ev zZ;|cuF!Md{OkZL*j;5$6kywUkkbSSYYxLO9Fkhc%$yY?xRq7NAN33EaArFD49x2y43Fj}g= z?(mxPq(bJT8`_Hjy$+a3a&Pqc3afJ2ZO+EC#wqGrBRXBZ0ALaG6g%^<s5GAP9{^88RpaTfiMCC!?1^79AOg z=!Ksr7A8I184B3u-}{TyUNlAo5wIJgF`zn+jb(#gjZ)mJL;_`aOepFmMSlTq*JxP# zcY_0m@;-~jb8s2|;oE#?51U#F3-!4iBK*5uZrH2ZMrqgu7bf4@!L&C%8 zbKnf0_e$VJHcOwAmK1lWx5<)WmX+9eeeoU6u6IY~z|IuQ;ZJMjdsKU}{z`v7UoOkX zZ!aq?MY&mTV2(AwB{oTZ5Iw!$OS*I3C8W5Imt6_R*HU}&fp`gyhE=hdSBjs8D+=!% zm$L&r1GCe~(Xd_QokOer$^%lgDLC1bMQ3C1ydet^?nJx;`Q^{Uz3x(!0es9wvHv#2 z=UqdZKH+>sU3=?Lh4@6Qs@So!Z?F!0@9&q%+Fin~Q>>vL{LbP6ESs`(&-G z-mjz1O*vyXKT`RgZb%dZh+s}d%%#d*$Ag(#9)ocyzLw6woY_)It5*Rqi5_54?H+;; zOskjXqcX9>*79>L=ptBw#zui#eeRVDZB-&bJ5;YWgXb9mX`ELjOTJ?l z(40*B)Vo1nDTNcT_c9ET`)UXb9yOC>m7A(d}b~zt7;`e@*>I0#OD--XD3+WP@1jc~M_e-T4 zmSCR!5&(zL1ULuMaH8HX5)EI4()MWa{RJYUP9$4`aX;>6{%>5HTbgh}r}>>G6@#Fv zV?yE*VW;oilEIe=4kcqagXlC-xSm~6OEdkeAr5ZTvy8s{%5yz&e18w?ks2J*OYs&% zM8}bSy1v^5@>VxWVDFJqJI@X#)p$f{zX$L4rJ6s{eDvW9`uTXX)K9V+`}1VLNc5J4 zg;J~{HR!9J%s03Ls9-66$$kN0OKIm~IEKY|i2j9ogE%zT|AqA2phS9)E0DlA<|D+X z)tJT%ma%8r{l?c)eVCP`B4d?yl7blmiJEc!{NpL#Zs^*lXDLU>z?#^P^al!hC0Gnz{QCi zg3`7ui z#u3Eb_znW|Jz~!9e?rGL9``Yfv)^E#vlmi?Jt!066FM#sul!WfA&f6C6#tRzID?yt zRLbbed0b*+^Yuf7p^xfj4z>Y_h-Na>F}W~j4LXaANYfYeGouTn_<1J7M*7ac<=TMy zL)ig-q3@j`Xdf{FKd=Mu22UFLbwv6C+|8YgiE7`du z`m>a|@hBXNKjd;LUEFNYJ&_+GGe00)Pa^@Y5Re7%l0_bA-iluckY^|Qn-=bwemwJ@ z_>a@`UCuprQ|&Oow{Sx=2C(@bk2A%GK~8yen$f`JAu z|2YFFzV2{U*0to7b)LWASw|XRY4ObT6vVB_1yQxt6?WYsTuG+xGeoJAR~ETVW!*t!qq;AxQ*AdWJ2<9-(mJkoxPWkj91^nzIY z2E>`ypB5<|A?4_OynExpdLPDv zazQN|dtTr$!A2UoGrSbToxdiS!39eI16`Wfw1~~PoXK-e0=No-pr4txPU;V_b5EgO zpK->izPd(f1{UdBOhNbOkl^%0>&P~o9!9)gDi3BidolgMOxX=NgaOXAG+lqK)IFYU z9fBIsluGaY86#YyY(RiI1J`3T!_gqIsCh7lzImr;N|z-z(`De`Qt?h^m$q%9Uy=zm z`D^i?e4vFl04+m(NFj=Q1El^YRHiX+jWl!0xbFMVdv5>#F{jMe=in-qJQvW?O>?`o zUA}d5F_pgMJMmA-1ykHNIHbFwR2+ydPxW~F@O8T7+ZSSE*a-K?`M8>=@~!Rp(B3Xd zY&gA-c+2wyJF)c>{YY>$H*@;=O5j!Z5)(q#0ef+J7&EPG=21pEy5L z2ghO^FathCffYDoyf_}3)ypI#Mo?;eWDwf|{b;Zag?GL8`^FnMuw>4r{*0X6e| znfqg2Tr733j|;kn$!-so+vz6VPiOv}$KRmrwx+Fi_1fU&o*g@5pEgMMsl6EJ62wOR za+QhaMHW_07AF>v-~Q4d4F`;p#h=*aP**nGsQD1(2Dv0eXLllMQ+)^7b2F$25zbD{ zuSeu&3*AoP_nKiljHrPAgh#^*2e= zcqa#}^EhpsM(71Zq+9U*os4<^&B=bdjf%m*GKkNIh}%9O<%^ZFa(XuIV^N|rR(z*1e+XGyRGi1fX?HMD zTJRUB;wpjb;+Nj>5Ncu8M|^+87~(P-Tio$2aBIwM3c8kH zV*~VtJ(qnJ*_w&DWVa+k+v&rnt&MQsCV)lc;CRy|-FcAH1#nx;&HhxMjy;pwkf>WzTn$ ziu)O~C&zZzK88i^v*og4Unh}5G@%Jd^B;#v?Tz-OW+z22oGOtX=w*__LF%my?v8(g zC~(`Qe9P(bg%F8_jneepI$7)IZjzrVo1}IH4;PX7xFvNIrHrJGF%Y1!Joj!_scz?y zsN^uxvCy50*nAkg4X~1fnu%T697Z*R@RMcY-)9Gr72&w(U+z#p8!Lc+ggGVhQzLU^W&{bD6+ghSf1a{n+6hazk`+BWTA>)XG^} zH)IO~e1l&Z3k%p!;O<`*F#qN;F1}`5EM#0@ppaacDHTKcwt8Lq$mDW5K?fe0BsFJL z<*=2gQ|miJ8snmK1a+IveKD`FBf?)(;(>+pV+hT@PJ||O9kd7c6(JoBVuhS@(Dz2r zw7XI-c{@Gv-aA9Gb6vxPpT;lOBO9OB^SqV#za|&!OUH$(phx0?J7Z#wYm`6$uI7IM zDf}G^+`%2!3b&@@F<=B3DAoMI>3U{y&&;bcrS_epS}s#_Do=Hd>x=JR^8L;UE|D|{ z;H|@?nzLjbJ8lzcTUUbV6vSqh^xvddir?hJC1pwpq4{Vjgl1=lyTn6A2A;q^-r(F=0pBWGz@%s^n+}@umi#|UydzVFdX34;@MN)VI zYs60cO~xnPechyFZxyr9&pz$xtD84F7zm9q0aA<<~r1D!$tO= z@}Ag0Xhs!DF^(^jGoOUed!{=FSc&q(}1JLYqob z7)~k@d#_nM|3$j+-xgEv4&_>++pHooOy*96vY!Z=>mi`G;g9aojipz&0x7!>&h?1s zJnp+S1~}At5g${HGQ3!27P7%ygmfx^_XG8Qg*xM@Lh>D+e|)z6lW;7UgHIk(#r4&+1|v z?&lO1f(r5J1Ux1HvOrD092f7GK49_X`GiwJZ#PKO+t144Ppfi3hOHu#zT!I?0s|rU zK>v*|v3|O&N@|`}>nGd+?ard8-XALC_0dgl!vMFG#NGw(S9++vdbTZzW zA^rp6?Z7$;lc58e-e;+AM7%m#2x>WS`Wm~u*BWHq+oNR}hx&3Od?Do<;d%RfzTqjM z4TJF|-{)EumzQawab)nSgNg|(OjjqntIdrbX15i5`$n!K7;SYzewLIE!^olsV*|7F zz`C_k)KDso4U1*Ts&VZ!_R#NkmH0NnPJK%-Fot&Q!8deJ_6HUtc00x-j`4{l8%*9Ow&(ZRvtdLm9eq14Iciv8Z`N^)=Gi1PT zN~Gu`5K|t4F{UpO|Ja=!^g4EBWg>pARPM(ve-dT)vM=BG5;4V#cmjSAx%ijBG2e41 z;7@KlxJEt1cNm1Gkngg9=W95x^bEhjw(4CeJ*3wYEG`*6cab|-$yiH17$VhU!*~tX zGFQqkoKX;U5vH`4OjmQ;*5>8u9;>jI&SOAQg$RAKb~Y zeDmMNRvK8eT8!z@?=gH!N~QaP|h z0vk19c{VCEzxcHF0X^#&40_aLeHSwn_55#^_;+=ZAcQU_?8f;XQ5}+Fpb>8lmFn@1 zKyG4(VIZ%F4U5&GELKm^Xe0oM2Z+W>L0F937Q=llq%6B=nm$9xd7+t(d> z>YXRP$q<^u6o$#7H}>tvjeT!B^km{pS|@=gb5%rU#*`}ldx%%X zBu9q{2kl2B*?g|m-9S_Dd4@B_fAb9vH<#l+rxyc!gJ)TMVM26G4+T98T`0wHJh7MO z;cjU6Eu@amV;>+aBlvDx8{(~BE5_7qrRGP4Rx#a1(ssz0x|i~I_j0NEYcwIzjU*h- zv*EGAq3Lsmi{FuRVb1f!QI7+xYKOF5%iJwF6Vo$z_RR9g>7;3^mhnJi=NR~ z%pALbc{wRumi{siZ>0?v>oY`Y3Pf>teYOj5WUo->es<~M_;u2_u>}_$PIiAg5Cb_) zZ1j8u_Wb_=A4yrADgGcdlUgiMSnI4q==_F94}=CeCIN@rC03R~y06*uVq$Dm$mD#V{nq z3Hjzi@CK1skI1jjI0wIK5Su?y-XFCjr@(o#aqe}`P8jOAG&YpE8PsBht%FXax+bWu4a?HYY&G9awNp3u-$V}z__$>ox$M09&i;D!);b!tWqikY7J^hXf(fa zNYwF=5+!{fA+?V~;3DdSBRYet$y%m8MY3BGM@or*% zcb~I|ft(~ZEiP#O!=UYvENa+Q{lLXY2RrSGrgBCFZR86~nDtu9U&cK)hwYprXK9k{ zbC%$}$l5PNlro3q<$EwMe;Z16{J*rD6r2}^7%sK9GdJG}zw0jq=~NYy zhvR;hnumD~-ZR7IWutJs)o+um{DgL*UUd%Wl7>1%xTzh5ft({Y5SmR|e4Qoff<{GW z#+qhnI5dtKu$P-F9|OU|Lr69POt9Z^`0<;LLVCW}qdV3GQaMEr1x9WNj8|96@~xb# z_|$zotm=ELVH@T<9`e2YO#VyIl{_;A1C(%I5Ix_@JUW5*wGgCj;ebvFr-gnK7kN_t z?xws3BGs!+?=6pB_IRP7n-i$qGx~N`ZDz#MP=Y`ktAZ;thhHov*g*~382 z5gTO3&mn?28t=+1NKMV%z7=ZY1Q6NR1 zst0lE4Xz1#0D7rZZ;7tu8H@p5R;xj66N!rxZ5wm9E*drHgLZ5t1!p8&)LC1&Ji0E8jK-Q_Zh_JJO<27rudjgl?CIG z`Kp)%Q-q4=*rol8Jrl(RSH3ymDnBC1U)~Ri=%~FVj&JEedKrR!T zAZ|DVuon*#>=9-Bv__U6GugphSx-#2u*i^>&REJnf!S zAl*QrY+~*)1pe6{#lPbyS+vj`AN`DcvU{2IdKp6FXk8SY&wbAh2H1(=B(>x-EDR^9 zA&0gCg7elwss2Nm3>dLWYJOmo@niz6tHkUe632)_g7|Au0kFS1z%M!bvt>bDG%b;N*bdlPpvbht0x>Qlf$7*->@}&%$u) zNu?et+baAHD{~D-1pf=ayT7p*Kt53dp?Mg=$v;SqkgK!U+=i(0sPS<`9Y!mvaw7f%SsnSY5t)cnq(q-SX2jxWt~t<+4R{-4bGEuKeNa8DLsZd4{Q zz&f0pfinRAX3(il#WJv?c^<2o!%1X}TL%>bl!~Fch87E&z#>i*Z^^0XBrV;n%kPY_ z^JKtCj}+>qbc3V%#~mleiEuPXEaL|izqN2h@qzOeLMGb>P9vNQ`WfT5NGFuYY;2XcC9b|nR#b$=+IrxZ zP2_hD@DWLjwQCt`RS}U+F^dOtSEs;$)4-VR2^ zK%0o~6T|^6rjG{O^1!}W*}P$wr>n>7?Wdd?S3h*UqHwX6#pKXLafchAJqob&gJ4;T zKcmXd=~qOC+~p$Ev6`C0B08T7>8i1);`Y<}U%B;k1&wdrfmd<5exD#mK5)HaY3}Qh zl5+BG$lt~grA@Kq8wyd{EFXyybFQgL8n;cvPanoO10Xaz(&uf#&k*L|Q2MO;p9pa* z@2$b=&Yjl8I1lRf2yYaaX*&^n<5h@dgt#tFv2JpjF~Il9t-wGX!=cW1?FWw0tZX_i zFg|cM0)OCZny6!XP>QOMh3ApDcY3^``c}^px zrE%coX+4&ZVq3jfC9c<9x(_s@us@p-F46Ps-d2_|17gssS8a<=>tTOL151UJe}ZY-7WhZfO9a^wySWZ+o~Wb2;vFY8c2RV#EF7m1+>c4%<2~*gt(zs&}d~T_GF|8M@lJ&rHe* znp3hHYvQu5on7m*XiSCP!TJ9o=D96;l7crl8e{wxQlrY+gaW(Ef3Q%ZtI4Mjc)LmD zxkX#&)`s*?Gtq|{+^*M z)#F+Q+CP)_N1W@i1au=af`ObNHi&Wlz^?8tyI$(zw@zg1t;GM89gN1acs?IVu#7*$ zo?qpZr>mx^BTe;h7Tte{dV|63ritokG=Jy;vf@eWXzZ${_C8|?W$>N zgkeQ!nDEW~osbNnQCBqR9UjORTtZAAM65a@*9nc{b4#gNKs&40Fb!t|fVXAieu(7C zj>I+fH8CizNH?L;KIqM!82eyMAa5#nT_A;*q}n$wY?}jq9&*YL6}rqR-A&6J2C~lC zs15#D@l9oC6LgVb-POR`_)6QGG{&TL_*B;0u(x!dMQU$_&Jzp&2Hbz|`n zcPjX1hZ=prPED(te-EKahHxPM^ruU$&ayNFE$!vVqIU5MgwN8XacX6x5I2|1BI$POSh+TZ}3)6Hunutjk@tn<*#a_w}}1x^^?(!^VIlj{WIrllVPS>0HEQ-i;C`S`K zBzpw$t>;FfVWQvpe%w7&Gy`JOBRVa|-j_E?J?8Sx*H7eFn-gwM73X`75#Q+;dhqBS zkZ?u^KstS`aGryO z3U0)n(6H*g2ccmTVjomr0HL|n3;~;bmP=^N7}0y!J#-U$;L zU_E@l`}2U%tYfEpaAzen`jx9#Fm*$=L~1r;5&aWU^%~#45Lnf|{MpUDR@VI<`Nk5v zRxoalI9e(mRsVf_i^x^pX#o=;n!Qa74$$8wxn1SNwPy`Kj7 z2yQZ&{2d2t+>egt@s6u6H)>L=O%7j
SxqEnYjtJwK(pGv-5%EPAfZN~Q%PHm!} zCnAPd5qwtGLZ`NMwzLjMu)6EuyFScE#+b%Bu-l$LOKSgWof?YO-8wt<+srVW5T2k4 zLUUYKf4)vCZ-mh7VTA|uG!s(c0?VZA&sJQLRUKK3Tll^BTiq39u^m~fns5751NWan zaIic~_`hbbE za3#hiC?n5g6T8yBGoIh$Y5oq%N%~qgQtIBVk?Jj3aPLl(%i$wmibAwAmO)P?#8E%fes@!v!(oS7OH)nn#v?Vhp@EP z31+NR=z*=_CYZG=WbGeK?_K^8lg_Il#yeOc0>XTMDsEbo$a^G^wAjt64VjKJgv01= zu*HB%4weNSQU)He1!_qs6z>h{og*GxzND*Iu}=K^LTsP4Yco2|%ON<|+oiklsbQc) zh|Py0WenQeotnrZID4{G!{XZq^%Bb>dW{(Z^Txk!C%^n;j_bSMr1W2%={yP? z-nwhJmj`RThwQ#2WBnT3Fh#62cCc=IVi@QUVza(fPGAA7bZQb#1}~|KqjM>hZnsiS zS-2bsRNXJ)zd8Kg)qh{*KAA!(dj>p+jE(48X{J5$Ed=5jD5xQ0jv3llSR=YJk7J|zevta^$y&f~+S?iH)F zcyW}gjyng%gUf|VnH8tVX|!F_qNqfr1ky;z^!VKxeY z^`CdG)I4lmYS)X^3%6JhF1P79PAKc+;n*^36~+eOKBc^t1!g#s@#JnkQp2%29hz@f#rve`Y)A>B<%00S8%Hgm+cJA~$7 z=XMwma1X^?Cm57<=nEpRAi#|A8fAr@@^zs!8o>j**Uz?)+ULx1UHN&A zl;gHXcD2F5oDZbdR!m@%CmhXz8qcNSmpu7hmnZGa*=8OnvIc#|Y50{pk-4_Y+)#*l z-y$h|BXr@RIVJC(1Czvm8vSJtYZW|txa@Ad3(6|hjf;fkp zS+&Fork_+?$^ai|hQUt$D&7&#yWwSAir{X;_FRA(Ua%c&Oa)W;u;aoy&*)V0x_R;+ z1A6xllzSVd+Lw{N@ebh5nRn60ZWS4y-)~hID7`2C;~_XNS@*=_Wlej4&D{ys>29nO z1~Niy-Vo^lfw<18Vm|27U!}&8^O-OGDp>3ylyfNa-b=-wms4BvIc+oXUCaciwCV$v zHvb(ewXa*HB@*Y6f(sIX%Ox0aVQGn%OKr@jwy0Cj8u4>&bMw8C?e_F9l#(eehC7i- zdHkjuvs%oeS)bmq?uG~bvG-wA1V>q?yRmi{$Oy3+B<1Hvb0g6W;A8$}hqz|A-L7Nt z=wZ$aS987jy(>R|Aw4&s|2ASS1J8?woxwm{w1r#?8E`gv7o0p)O{iNO$P~M+yjRhmXDSGgiT6h{$b;>?9wcJ_H z)l1u-^;UG!28@)&-!7DzotcYwo4d&3yF14mzT^;OK61n6uCzX@K6#A<#?x2JL&B_c zI!~U#6Z$@_ncN3k45XFVpfDWBj@t16C(6d3W{dxywk;xIfeidDAMp?~4CQ``Pp>!4 zab5Yjn{?B&FWszg0Ir@JCW~fSr6m%_H4vDqo3C$3BvB_e7CSZ$Z2>b(*DXy8IyhL@AA<7xHta+` zHA_Q;sd!{+LT`C(ZR3%`4HN363)C(Qu=#r;AWb_!a7>~mRiJyWK2|E8~I>@ST zZe02?FkBYRUMl|ILU5wbr|i;4N~h0}N=Hxc>2FbHN<@74FyG4*cfR*s zot8puY}Ewxx2&5Ky@qujXOr9zje)cf8$AyLqEC%3-2BcWxU^3E_nG6OpY;Y#8ZzM^&9m(W7|FnTg;Md(3fN`5Afe z_HZ>@S>b@eziPFtwVeP~d1B*w><5RM_f|xV$6@NmC$@iXOh;e<{l=mv#lJVkKz}pW z$JnYYlHzxLE0VZqtD`>zh5^qEm8CV&@m=+ugO(0N-ScF7?{PuFd0=P2trE<^(%+IOcZUAu zLm#T}Gd4{87pr+RL}yL7&1?jUuvWW`L!eK87<955PRXxdwWw{7Rfp5+xppn5uLHY@ z_rGv+o7lm0rTBIco6zpjbFMSOj6l5@jy}-zTn*kX_KYqEivX9aX+etO8qN1V{E0-)H$OY8oZWe_bO@ z`y+v=Gxsg(*;gb*?;w@$Y<}FJ7}!)Q4q<}YxnlVSS^3B9!4#7o_duS0?I|Od@^X#!Zn@N+uG|*HYBTF) zCFarcx)~3Qw;^6BoH|!3jyA{V)AMMl`<`pH4m5Lb(e0<7gSZFizn{u_8%C#frT6v_ z8>~#^|KcGF1^=B+FnTWr7~+|A`qEk+E!nMX?ARu3RfaGfNkRSILQS}8UEGRH+f<4f%i#Knj@5WPJT z4V&z{eoU?@ID$JT{tj`OQ7C0gFhY2q-Ni+;?H$an)+AeIw&2;NA7Nj&(QY?Bp+mWm zx1rkCmrp64L-SQMlTTPqrLCi+X8u}Py#p%+($2P^;wG;Ye}vb8W7}f$=jmOk!9l6< zu2YNY;}&Jo>rK6m#R;ynrKLn{ii7S_A{}MMA|~)}Mv{qDj)YLo0+z?h?TR1K+muM? z_-|81dLj2c2*RRfl{5I5i?K+WOb5?pvF4CpNK{9iaqrTJUt7mPgg7wIIQtsyuCTSw z5uIX4(a7ky&hXBpF27DH9B|hedYQj{Q0c|h;y(zT<4q8qRtX3jLM$)6Pq0zECEqp+ zXX37BwHPPyRczP3&=>QJE|#*_b=}I}FPL1*lO{HA1h0JdC^8)bBj-UUfX$`iWCm4? zd=?GcB=XrR+QMa4;Y3YOBeMglg1CItM|?jcl6w)tazP7mVXcV`>^kD!r4ySgdoi1e zZ+B`hwQ2!4uHk~TFRap%iNhK&j_xg)(sB#7K;-z`YLVL-GX^GO%7yspO8lxF$J~sN zHn2EdqHb>TO^>s!)clCSz(wYMT0OHsZO6Lx>J*WlR_UEBE@@)3kyIQ568bnJ!uT@I zEs-7+(&t%~;gzPRt>QIK@5@2wax%o_DO{K@DTdu<2XrmOWek6XcKO2bh|j){cb85W zHdlTbkhWfPJra16c0zO77tp_TT4KXEJ7Svj?cF{Mxw$iqf#Fi~WI!79Qnnu>>+ZY0F(Yp+XxLiG7DyQLZs0ME6Ve%dYOdNt|E+zEL$G?e;4Y4V?9Ant(#$f-H z*hB}#&`TRhl&PEU&1u<(yS_kk+LF|2Ep)q3v3Hn0F# zdL4imJWD1+&)zH$-=85e^U=G^0e&I_ZX4?6C$#_}tNu>>kEE76JGu1gXGDXxgusx7 zccWW|#oaZvV&8% z4Ip+ut8^OE4-V#k#ZoYr@pL6bCPDFzUT9=YrS3oR_mNfE$b@gE(t4}3T#dtN;W)dt z>4kE`WofNBJ#-hk=UO~NC;VMo6tI=%aysGj2rlv3rxW(8Q+DZ0lXC`e-Mpd`(MuQM!+F9sqMn);TaZ z{_O;TpCDZ1d(hij13A>cd$N_bIX z5kDbbzXIZUN?Bk#xWQ6!XncYdn;QbNlEL;Mmon`HffNMX|&md9pIrpkw6IjO*UWd5t^DHcHFV4A+w4*@@5gRofWidM_UOi3H zFv$sqo%ipZ(qq$%FI7zTgZRsF;&%q#V?U0C3B}+!b`RP(G&&sx-2>@%eG^?rCf)}~ z`PL9-OK~HUuJH${o1yjMqmB#Dq8*}oN)l~TbC)7EGIp*E+&@v=xNy0H0p!UmSpyD3 z|M6%3m;#}|obSOrc@~4BxaV5Jd9wA(I(D&2W)iKR#rfuZAf!q3Cq8ITDdi(Oo8^JT zYKTFE9>HAlm*U%;MR7NCoXzt(2GgVKxB~+{%xD#pjTc#M1KS$SphFiF85ofb%Vq8D zk+J#lW=O$Sb}fY^|3-}+^spNP@KF}*!bL!;lQTt0AsQRcSn7s*IiJV7%>=m6DBr%vM%Ki5PWTMy;Q_(OmcrjxGfB0 zAao&H({3y_HQ_k@Weqs<7^(aZ3rEhnfYGx==$x-Ix7+DiIX8}fJe|vZa%#UhEiSQ% z=iWYCSfeyf4nIf#RyYT##PD;*JmEjDZv5xZC-VvHKQ zv0;flw#0OeF>2NrHPL9I(HK8VEV1|4D`Lfpq9E7+-3B7kVcWd#e|C4+H*a2@vO6>T z?vG{OyZ797-o5wSd)l1>tL!lDEZ>Ad^p~Av`L`Az&(|U;RxP%YWWd<9$#MA+1HO{9 z>f$Yj9XT7rScq$6ay-ubi+~ZVkA> z64 z#EueT3jnJ@t87DjA)PW>7JTGhvUe&*q7(QKqpXmyn*M2x%a?a{WoX$%`a89ko%zwE zJE|;0{2lY!lsVWYuHd5-hw3G~;5%C29prewiVf>gu;Bm1X@Q|150UlZc)U7GDt3>@ zC=_iye2XLLNUqfBO#EsswA$>(&@2v0~Y_2zL5G2 zQ31S|QxHTaGoehPC)ku!_o~!CWD}nkQCuIpxm!aaG;7tI?chic!n5|xacQI-tDpGWmxjubn zmvhVMF7`fF_*{F34X|lCZPH2RH$Sj_{lXR=Tx1xxCi=F0D7^j`R*xV!_q0l@Dh@Q5 zdRERDzZZ8!KR3nY%;Q+8z1GSGbZZ=I!;dgz-Ny|kSJPaHO|cYlN_7tu@X{m^)RpZ+ zXPW7FYY&%7>_$?dHYS#Z)o5EO;_7uemAbk!4{S+C7MY)RfNbAFe-s3I?a4-Q$@n=0 zTZXgz;5CHFeGI7L0N$2S95CMRYHcK1EON~(sme*C-bGS(3T>U))n4}}jFYdb$zc_5 zr`9_X8_u0YF_Hb9GREUM2Kv^ji08@FFMRi7Y|45@FE6r*&!?!ghz%X}O>E$E+i3CS z6RI-N*67u9WW)}cC@CzX=P{t>4j54G4u|6rb8GOH!Br)d|02$D-7~{?*Yj=|tRlxg zm`&nhpLLF%ER{d@37a!uj!3^ihVQ_UZC$UHwn@u#UNG56@APVLfVFJcdU?+l;k1!1 zvGHv?gWQ5mU#|~k7|}kEZ#P2r`mg4K0QgKRVXGaEP0JN#Fof)9qSbXLi$qRsvXEnc z0dbtniOi=~ZuG*MJUiVu5}R@<^5hFbiRUVk%pbG6p8HDK!PHf5u8;OK+$Z%Xl-Di8Mqn=3Gm#D?9qC$XUuFZEPM&@zG3mlzmO zch#g}uaj6>&%Lqx8kT15g9A(?&=v;80C@lYM5%<{grSfy0KvHh#esbo=xfvIi{2k)>A3ts43X%)*X>(*tY-Y%yb)cL(dUcp1->Y6QcImnhV!^#F zUQJKn@2Zbv#2$8VPH`RA9!Gv)O~**|HfvnIz2Dz{Q=c&?-F6R_5x#t$@tqO7g&~ds z2+m2U%3ctE4}S{-16xYtlr>`P44eJ4c#jYh>@ifTp2;;=FjJ-ebY!W|$J^=hJC+lg zual5V8Qqq|T9X_)JdH*ZgloxAdhKP)ChGp%E-fDSzVu(OTn3JZs%^vGaV!3CFlmf3 z$qMS~rFPRvE*fp|vf{sZ?mZrZt=r@8`pd56J-othZ;s2yXT1=Ldpc^m)9ct{CNyf+Y_w^qukeYThH zmd4|H`d3d}>q*rs9qXlQYnoc6X=nAh<_eVqiMITDSZEV%sgS9`EbjGILi4UvZorvz zTPEr@e9Ub@6a+@9OxZdg&85dK3qokL095}DvzE#B{0?heKE4-8>A+018)Ccx<#-Ys zzL8omhr}iU=@Hxr8#i-p#cKHI_a!8SFL{lDv9fIO{jzMI@$J6GA}45PSgUsGFMXe% zA+dejuC(vYj;n^+2eRgXK2rWBb98-|y32~C?0&dWJLXE;o!V;4MuQYgbjlEqhlTd3 zy8BSw_1RLbR=VE|km!6q-6II#E&@TSDP}`x;w9*5#F+nK!0VbH#|V*~xP^sgv_)ji z16?KO?-K$-Kg6afCT)Lk>m?u7ZGf7yuC=%}M~2T4t~-t18M?G7ypeDuE@JR3e-i7@ zg{(sCn^~>!%26u)U+8o^oD-c^-*2tbwlD2mYLYrn>78r@i@DzSV8xWpjM}vwryayPx09rE+iJ`j}xEEmOzHq6TwZzCM>okq5C+ z7EkP?ucYCc_(ZYmt07T6*?0!N-6~I@UJ3wiN*Lq!>jiba6a|IcZ3E9He{K`w7&_T* zl+GHqZLth^F_#DqMyj)7(xl2-OWZ$3bT8yLw*Zs0rn~3szSN|Kl}P_xq4P=cnQju) z`YZgG-5$K$$RXA|Xm8wWjmzJ6!_|o%hYoW@R z43EH4OLB|UYcWzl9fwVNFY?XrvvK>Tqg#=!k?A~@OIm7dq~E8F`?F@Qa6>)B^2czP z{sj#Cq}&ThM1@vP=#FM0*ZKSDCf#>Gb*fhb7Tf*JuzY~lWwPR7b6kEuM@pUBZ+!Bl z)SVYkr|+}l&(>e?w23v&{yma2LQ(tn^?WRIqLdT-U7twxX+7VXumzdMfGW~7b2D&! zKB)|6s8l{OC5P;Aw2dv&cAyV}b2?&;|I@WH(|Ag^JR4$T?38YyUin!F7cY>yo4w$y z>3@_;y-vWwt}~{la^ae_?A&=b9ElBbnls_&GOXi4{6>qZKG3Q zb95@V(LV5oMcm*7-(B%mV#9lM$Lv>b+%QSR^+;&fh7yS+@y)BYTe+>iGA9!sNQEoYOh*Z9kOE;D#AaRxq9|IOW z1k`Ecc@0-cr+O!YQgu@>jPGR4W2T6_e?$&rqmFL0wMlEzj+$NA47U!KNXclobWLN> zF7$#4(-SeEhM(i5`uTTdfaR2(2p7KP9|lxm2~D*Vxv1ois(=HB;5gW)|H!4DsAub# zwCqm%zIJOD<8=GM()aD4M)PiErjwRHY@p@X#$-1j%)f^ttD|7ssrJr0<1q;B3M`cG zs)yIEm_m4pAlBAZww&z>VyzcvH(^z#^5@bPL{qjDzK4JW6wic%?h0q3I^ z`(dkRvtJ3sCL%=%i+Oe`HdI?8aEMANrk2HUPjtN%#uGa!3$ZcUnk4!cyWXir z8o)_JC=N?mp2P+<>c%*H_!}4lo8Y7GE%j&-IA@q4=P@u=8t%dcR@|rBm7H*XluM0P zJDep__dPbBGJ)8Dvq!d(+RvO?55aZA=QyW)@py#Z z1$Huw_pEz<8JGRkbrKptH|3f@wtFy8*tMvfv`J#Su+g2zigTy>bj+n5qdL3{H(fts zEmQ+lM=po9S+(qnrT9J6qc5D&1DbXTCj}QC0TLyAYN4SnS??KpH=-aqy%ibA6vjtx03gS#kngzKc8kv#gEx7-w|&; z!oZ!BM;m{){^G&mOw@91PmADt)kRV_5Z4NhgK&lnAIa6oUs%fx-&4VW@geRQj@(-6 zK20Tem?w8J@SzMJRw~8+V{NQzlja@pMV50Mflp)vZpMo4U<0&)4@`c^!gMr=C5Mgf43Y8WBJZ{2SoodPP0~3Tm9m^tiH`SH zPhl%$)mF-V@-e4JvUWc-N2;f?!QDMxMmB%bnh;9u*%jAPNQqY?S=96NZ#D0Q>v#ULl3jO}fiIlz%vg<#K?lJ0dz%DHw$Kp8O z=WZyTC1-ziQ^P^-Fr9et*k5KsP3)P*8c!j>Jx%%QJyU=F*Q$OHH|4tSh!k(lBG0BN zR>?RW#Cp@Wqj$y%)|GE~SFNl$EU+`CO2dbz2Q(HBh^Z*^&w=Jf>E>&e@s z_=u0C_JSh=y7Dp?&f2_Fsg(TH|M-u+&?<7Vs#crJ$>eid#Kw*YI5iG~#O+5*!+TEY zZt;BFrx;`?2*iT{bF!(bQ2e3Ad!@f0%ZN!OQv4FT^5qRuS2@XtJyJ4$x=Lj6{X4A7 zUw6eMKau}M@VjSnN`A$#AT?lR{rz#$@QJpZe!WD$Kw*!CrN}ma!=iL6EueW}@WQ}M zS#JcyV|0odbkQ|#BL!F-| z#^m1{W2^x|?xZ-O>F7a^vj^FQM{mXm^m!K1P4f?{-PhuoQn?#4<43q+<7;G=S9flH z0udT*E&2K}p7$-5=(V`#|4KI;u!#=MbWg@g?X{e$Rx$DVXQJ#*j>e-Mgr>bG+muW3 zo2V>5thhdJ;Y;<-KSRo&T{dQnW;SK_74O-y3IQ!}Qyx#>3?X zyFf=DYyR8NbC7xpj?7^4Uc};rSSJi|4B-A}J8tWp z0&#d1B5?@ctT>*Q-mjtdV>dI8{<=UKo=&eU``lYf{p*uDH;48R{$giK#r7y!JxKpc z6LYA?is+$=8wl~c5T2iXB*TU$!uWgBi{aStU$#n?YymrxwWZ@~1*hD6hiWA9H4wZn z^!u=3I{7Ubu&xulpZJuC4O$PA{Zv4w+6-w$9ktC|#D#k1R>drr?7%nqdJZA(^ z5x-Cxrn6&NY}Z=`O~0e0V!T~iJdQ!`XsZ*4$=zSI@-~h~I|vQR!~jm#B*ygUTOTIx z#g$V1F^k55J=&5SsC*ipN;q{~&ti(eJ+eopngrYUGvt)V-XnEYcmg<`1@>78LN><% z!z}fn*BP)Gd6*IyAI^a}x*fRsCvuoNHZ9M05}r+*bJ(YBD=WT1i}$oPoO-f0cY3hOs}@b;Cy@#5M7_Q@nh-qhcZ!_8p;SFyrYA_`Xi|rO{Jsk3be+lz{?sJ;9aR|!pTN=E~&V?2~cN`f~ru?kfz&%H^N}UVIPXb zz$nc*sLP)YEiPx6TugyQ^6pfrzrd+&oOo6{rS*KgE*Qnr)fn^xS+}a^+tWQQP&=}6 zDVfTak+sXXSLFz7NDo^hEfZFYu?6?GY^uscuy&6oUO6I7ep}YcwIyuVb8&-V+l84@ z^?eA9PY#i75gLgsFmhV46x|7{Rte7n#?;gNUaB4s^aSaY6Xk=st+I^GyphBeaLS|c zto@UYlFBpOVC>RT;*XNar@+mKVF0B^#FWAesf531HssWf8U=f*i6bdBgR$mkP~722 zpXk!3qs++e{4%+nHKN!QOb2AIcpGy4myR}b!aZ4uvr}7fYPw20ox34YMEg!Hlk#I= ziy&}baphQ9X3GxE-9Gql*>tH!eHmRRSu=8!EUNd&`5x)_va!9C+iL*BKs>+EDwGi( z@KP+sc`UB)Oz?2Q?^RBBrE^5NB=dDehJmYnnalg?Nn(&xt(uKJO|ux6Z6)?>-06eRtgX01yU=)jZG`JXbgy-!`psPNa_rVrb$i1JVP`j( zOr+`9qMjd@N>mk8{#awf{!%#(C(1+NZUi|MjtT1ZJTS3&U;3{{CQo%*MHG9ON76%B zvf3_=@!m+7ZNGOsqpB>J5fOpc`wgy>w< zSNhLWHb~IPoMU~-SaaAc1#$l~2DdV*J@pQY>rXSOx|^{MQ4n4Tj?ZLyUS)y95^AkE zTB;s{H8j8qk7!~Bvf|%cjfW=$DlP+>w{eXP) zu>~l6?Zf~C!|`q(2AeC3Y^5}(WjxlM9|2->@gyI{t`TX=SC-Tu=GgiDW#Byc?$>e8 z)Rx_e-(0H;8Rm6xG!_MEm)_;V@^&5ap)b*(nx{5AdTK;YawP0|J>~ma!-CWq1eP(p zUl91r!AADu%Il{P@b!3_gUAg9H3&=FGM z$|Gh1a>4;PCfEP~KmbWZK~&D|z=HcXKZHhmL4Ur@?cVrr9UF+;kudVsr-UA^NuK{Bq03+NAjHhw3 zNsEOB<=-)0R@ZOahh5L%lN|H z=26KaIh0BJ+rDDVQCl5l3*Db&d7k-tbD>1|PEG2VmL2&vtMTkK~TaNbSZX2F(f zNw1A+zY>`Mjky?ub>ux&7%*1fJWDF~Is#|6>6M+B8`(&8VvN5xHczee_YrOo{W-^x z!&E&Sb?;wr>b;h5o4^iQ44^iR?MBsX7Nw-!69e=BGMt~3sWSrmhF1_!J0* z4-8;Ou?jtcGD&G(_jq5u8=y>v$X-m~y~az`!r5Znwi-WdZWrx&mzh5hjPuWFt^QrF zt*$}KYF`;LVz88rz=#?3Ut<_2p#CIrKUbuj1+(_fM4vEr9mVLneEWn+E1I zY-$Eb#X-dqIf1dkTJ0`dr+=lKmj6Lp_BOYN6XgF!O7(Z7uVao+e2)2+?Zop@p8vqH z>tPlnPa7K$GX>d-U&3VD0CpA}l_TT35{=kAE&@Y4VP#ib_+*Cxh|L8|m`m)^R!Qr| z+eqU)yENjsq^`1oO~r7B^d5{hj3zP0*x3rWH40oPZ(bLjUJwj*cr}Fu^`?1J|A?Lh zdO@eN(na{m1BOWD1PIh#%!mD0D7S8)8>VJ+A@+Uq05Zi7$VB){lXZcoND$)^P+G#%|V>U*-? z-CK8r<4$Bx_1Zg9s%mkdc$FQSqe~P^eBuiv1d?yH@(KhJ`h;|>sVNUI5TC)B^4!j=bl&>Q`@|=5m&!p1s z2UDv#Ixn?+`JD@*gKQ`AYZj83obFyn_He51*7LhpJ=gj%vVdz+>VJ>SaD8MXr_d5? zOp4x8cSG;WI)j3=jD>t|5O_9@(}TfxL95@uHMswwj8D2^6Q5u?{=lkm(t`G5B9!^f z!1{AdVEMUkVpHznU(3{#)ib63ZPR<;$EXx-WzEEnJ0vz%q`17Z8@k0l+1M59W z)r)R72MWq}&0uJH?>E}XK>mvs0*b083rNFOmyFxP|M zw0XQQF~@a1N9O941zqp{o_rG<08u{h4p9B~j!fe^Q>_7QkR)QmaWEt{-soQo@s-p) zz{|3=5o;eJ(TJ2fo^I61DJ-k^)`I821MT@}TWOe|N>d=ZX$y2ysW%t%sO$=&b8^T1 zC{>?8c+S|r4x!xuwSDR|aeJAysqg{3nuK%A?C!>Da-6=~8vLW9mU5&V%Wcjz; z`GhvA@wHtW85@%=@k~2=ZW0M8DQfv9HZ;T+e-d?5lp@j#G%AVMu!~U5lQ8(iK!<$G zxYrJxfpxT9yeaN+({qnMLNL-`?L~Y2$;R#jG6uGQ@cdP8#zYzCab!D(KNau_X1j8G zTigUTbYrCUTsFg#xMB2n7)2iI2M27*pYQ{xFnp_xE|!X8{K7lIsh>(T>L$ZtN4}pF zHen2?7M?lGGrx2_$3E(M_W$II*ytc+!dn|+TML)+q)lv~sH3cfqeMT~8W+C1V}PB^ zjdl?5Pj;RzBQ~;2(>VOkJ>rzsGx4$KQ>~Z2gePJbL9Z4$VLId|_5aZ|g(<`Ru^Y31)%$z3VxWJz}%5RBXutWf9nSRc(I4 zs4y*J119=VR{~P$bjxMZXnXuK*Bw45Vt}2@`wZwe6JzyFd9+22J1`Z7qQUZ6a=xC3 zF=9)lKIJj>nlygK_-XEmmgifYC={&A?(=7zMvb#{QSD84?U=up8q2OHRiCrS|C9Oh z1OC+hW&{5AB<}KJe6Fdtf%{GCB65b+qowxe=5m~#^_0N|{ywL4YdrN19VC5U`ar5S zx26Ss|4i!tM4mcpJ_zMRa~z5T-CUxW1@@-Nl-11n{2FS3@ z_^=5lE{M<9$|JSHhVR}Oz$V~&J1D-@3B-e1`@jbPVHgP^aMr$6*ZtVvT#P~OR4YMN z56XVp{JGSgrXB(Oz;?oH$_IG=6RG$;yRU%(@+E*Ch+#AuKuT~D0?H0dwXVRmBWt?e z);={oBK2RaeGBlAu&6ubW3u`pHsp8Ec5o^=Ii3NYM7cpsc&bT8m9LnY(g4wjB^`&E zESsT~w5wAgEz-iKEhoZT>9&H?-OO~$=)L*dFL>KmZFefAx>2>+_{+Epmt4WGvJsgZ zzwzIm$b@n!e;wA@CjRquKz=W*fkHH+Qf5i0LkD0pG>bv6s)PlNUc0nNMwJMr#6mC^Or%mKl`ZU!qumVPoYSv+U|r>jf9p z{U!Qjs_d=pW%(jbIW<`QSFe$lof-G%pt1HJ#_lt0mT$fma>Kvt{6I6m^!lINcqUOW)(L-Lm!a+AOR83(x*iPPS~I zdM2=AOPd8|$cpz-EMjP>^U5$}8Uy(G+8o6Gk(KF>5T18gc<)&)Ywk-|ew*obI+gc0 zjyNGS|DoNU>!DK_8V?S(94u9w7V3_4gY!s>VNeOp68+0+pq{zNd8&azw#}0TX$xZc;2Xv}7XO8ke-nQWe^bLin>7DjzmYXJ z1V;&w?INs;1*h8Qt$DifeKq&hTlmVsYac7i7SqL)gqp!?>)kmZJh(=QT*W3uRfS(i zW%$=AvX{CIk(;4a#ymhVDgD6ubp$f4Q`0Z0drDw!ea2i9J2De1Fj{F&)90wf=2+wW zzIzfIj#a)`XbiCV9ehB)RVTw?6jH*1SYb_JAcVxmij;uw>XIE++I`8{BBPlc-%pP3 zh~c{r-}&Lz>%v4NQ7&dt6qoI_FFs_Y-$JH zQ*kq7_(%xkFEd#`N=}!|kU{HYvdnCi1&i)QbJ^@V&FioE888D)qXnOv;e$MT9vhkI zHf*irDpX4$IW}0#1Q}k<;7lTbhBPk0dBc|BiFyZ^cm>DP3=%_q#$S~dm?P}yDHB5lnq zc&%6qlp=TQQ!J${e62%SRJrFxZP=pNix?Zjli!SQ-#7bxQE$|;ixQuxlwAqyM(wyB zWtaRI#*pe7DMK5I;635m@IRBQEO<`m9Q1WWsr18ea_Py=P`Q!vu{2B@CsnJu#(`lH znM5`*w~~+a^`ta>x_J^C5Vw)JLYL=HUDA9d!Lloj{liftU(Wxn;Gi zct&M$X%-!QUnoTYD+t1Nto<9aAt5R|fr7V$@T1{cbOD5?f&M%VJLl>Bv_c8wDOu!vMRp@i7ov>r{^Nbn@1g3{oVHM@?uT!oes9s& zDGm zg8;?&0#%iN`f{u<;M9Qz!=rpkHF{GoBKtWZ?lVla^x-**XK)lf5(Bj0^Te27Z?44V zHS|TH&L$q7bK%^BqPwFsL#*0b00Uy>1EH%LD_YWeZ<5xFD4@ww2%4*9=UC#_zQX3X ze(g&)J+w;H);CkC)w5{lBeD7UXlb~dKC>qDOz5<9e-4(=@38~fsYzC>hg|4zTp`_R z=ba)`t>n~G%KEbhgy%UDtQnj68c z5=E`Z4(2$0=A=8%8W-f=|R)51AC5Ld5Hz}Qo3dM%@0xAHYx*N z&egkv*fe}??hAZHPM9*$9H-A5DCSw?g1oyD8)#Y5;_p!S>JD25hg9Fu#&9br3*Z`S zT=b9hoXJXHLp357%nNS#^s0n?Z86)gI1AW@Vy=f%*riTo0GXUu2{-m%jD^7ZQ*G? z7}AtLY$|2gcuqD+tUqk^=BHDYE3rXvlvJTzWgvQiU3~Dkhz)TwLShpfk)AoWl@;G0 z@Vhh_Fmk2}h?C<*zLfg^5?^PRmYujrOXkaMUlcN5Y=%o@VSQM?QHG1ndU0Dz-KVhq zF5~Lt9k|LH^=fsyV!WT>iZ?Bh$W(QaQ^PJiXxXJAa0Y`1-2lKg+#RC&8&3Mq=p8nk z?p_!xwbzc4x`SF}Stb4by$fq`6MR2T<3`UzgV&zD9Gp~9!0I~jqd>HWO>N8cL~9f8 zJIa;Vs9wIGl9(8ce&D=ejG=Wr2W{r zF_|<^w*+>O87DS|v_94?jZd=@cFcJwUrOw>YPuok`5O!Ic55Uyim`*E$yiz%O; z((p^J)iVaJMWj;+%P{aNy^l2qb8)^kF6g@lu}NlY*;N2RNc&`SKSyN!5Z3Z(U{*9= zto8K9cO@Q7{v@^;n~bxaT#bJ#^*1t+(%Rwp#Wfl+uH(~hIcB)gvfv{YjoIe7Tzuv@ z|4J^lE7dlQm-;y@#5kI)ABBId?=gmMrmVEJOj$E_#Y4e6oECNu(6%Juw|8EHyOdl1 z;|(XWo5=DRQhOkC%K^S!E&A7+NcDTjNIUIY{FIzc7&A-VpbLqCiI~7@OHB3m<~o@n zBEC*GRF$rA87}RI+OlpKlzAl3^@sFaf@1Qs(>I>Cw5iWvDgTotuKiUf5|ij!yR=Nl zvAc)kk@*0zVQ-VuVM(X`OHMa3#m1>i{nyZH`w1%56>ZXb0=wpy)06K+a28zjJZ!aN zyxOEJVhB_c2gNHN>(G2;Q@%IVM`A3=+&Y=L0++y1ReWr@6u-eW+&8={0|K|ntIfgE z<1A}6<~7^kb(8`vXb4tbGPEnP0o)e?n7nCQ{Tfp2pc>f4OH6Kl7?TMMtmZ0U%1Mxo zF(HqiCBwJ0D}SY|{yUQ|V+NOYb!Z&I;H(fE-1gk-RA(;4gWCkwW-hcT8*NaP>M_!I z2QrXJsN-)6o9!$KREqk#LMVjdrjI>219>gZr->v$18Xr1sV#6+WV7$DM|MBmcta3CtiG+DPry8_VV3QG;#j6HX=(8?546pY}9QJ3;NqxYIY`yU6hGJ z@~I3p99BD?52bE>7QRzB?R*xz{So-Ck!!iR@+_ykjH$j(X>Re1mu9zgx3sZR_foU8 zj%QQ;v|pGbaNp0ohJzkwWLOA_-CrSQ1_N}r7}3Pw#(zb^*g$1 z%C#redg`5_ll^q-MN&5!*Gk7TCcg@XiFP*7bu8FN+vVf*lyHK#gRzTa$D7tzR2CeH z68kA3rF^u%oI+$h6dx|{BmKVCE2=+{;p3bjGZ_z_T{m{9v!~6;a3wY%*w@Jh9^dkG zI}T;FB{q~D5}S09>m&bu?aBgeXIOdXe3xtPhecs6Vv>k1h<~-lXYw7*kaaRyp1G`6 zS#b~Va1e7bWXoSyF80;FO%I?L#0`{pHb|Y??VQg^XO%sqU91Y=qqokGVLMw>n`Cvp zHO|jFo8AqMP|j|e0k+z$t7Y|A7Wc>9iejVJpT+rSQ7M^$=GwHGQvGvvj_H;s%{12& zAL~HcNgfMgm21ZNcRKEGY&2=&8x7boDe`$`3KYt;0 z*TY_z8-LIAFLh^)*mM{n=d7YYpG@O2U&~`tvqs(ZV7BcP76@KBdA_VsCGFJP{dZ=@;O+8JF(-whW^xZ6Y8t{ozp2#fdyRJ`HV^<`d`dok+6}>)-Il0u zYH~f^xgjx33yuKo+WtKO6?y`ny<$d&ii{v^)gO9*c?^fg3Ri- zJRhX)abAO%#Pitw_LbqDTF3Mou6BCCbwG1}I29!8TwAN;h&2)Z>MPdxAn#o2bpapq z5^Emi)v^QdMf9wB!}ruNfEp&dC(%>R+5j8l5O$Z?jh9Ku>x>oG@sya$`;CeB zG4>c)6_7G9Cu7mA@|&CrtE6?~ZKQEtPBkir8lw(l@#FW9?)1Hy#TxP!YsdsM9P7=C ztT%g_A!c<<&~Nx#$mV_# z9I0jCh7(m}DBO$RxRtj?S|+eLd&>>RD-FVEA3B6#-h@VTqfVFs!%KT@2$0Y>)#IUk z&%#(g7{Hayg?!oRiLlXsS$W`^zLhL|lfisfB5e9^Op5CCOK~sZeyM+uNjTpfs{2#9 zwbpYH?fzEfPK_*l3mago@$Jwha1Sc;uzDE4spRS8O;HXJ8OBEJNxf@Fwcct{@XzMd zVl_j+sp$!{4{f~It;{RmCmp9UHJ-UN1Uzsfs}k7AWETFP@q4~%zIhQE&Ch*h-|BBV z!GqZND>^2vX%ibaD!!fIt}6`PiUF0!tdhu4OvtsoWCm9ot` z88>)}Z;eRLvTOlr@SKjO(UrD|Js8Pp3yx+O+%PZ+z@?fJrY z1_wY#V&edB8_kg{ZDPZ*)bYsJN6K9_C2SiNMy8Dcz0Q`2d}d-nkyB?%)%O$QMAUtM ztH?#x_;labmCCH2Zt(Y}JIJ&xq|@!IIX|1P10b~J=D6_L76UOB zcN~sQrm_o}BIyN3VuMn(@515zIhNzMdF(DZnc4?jUS~}O*1pCtHjaU@(r_0$tN+B` zIeM!R$Za!ZP|`PfKx+TRu=%hHHvOj(F*Z!U6!#KTNZTfIxjR(1rw$$_6^C_ut@&A@ z!6j~Ia0$5TgG!|MNgQkLYgbQ(;yB6GK80N8z?d{~ z+KK&+aXULhW!n^EJfWB4*(3yxawIl2dJCxOLLIk~V^ZYE0&~lkm=rMa_`xX}rJ_?-pXCGAb<9FHEDU8P5uP zX8Tz(B8mNz@rnwhKs}*y>*S%VTVLCCiPOyicJUf_B(^(ftX(w~b`|6IFqH#Ad>-cS zWfrEne%C^Z>ce{VsHz=1)s?CEF|w$Eli3x>twy6Qwlninum4EftxUXql-2m1thTVx zHarmD$sl|?m3C$;PmvT~5o%-l$F45GY>bZJJ0@vyK|~Js za^=slyk)f1KCHrdh|gjCO+x*CH zN*LRn7{Gw@MKpNMHPty7TZj9fvfMbRCn4PU|AhSmV>aCYJHGn@SA#bHDl_fQSfj`6 zE)~DcSZTeWAU$0pXS~STRkX+Y_LG6Xx5oQIO~faQvBC5dm{C7Vly#9uqKIHo==(rD*~he;;JC zk^(C`iXF@w5SX3J{bH=PLWm}~UGHp$_wgLz1jGI+dM;y~VQLM=H{8`TL9bA-rgZna zdXVPcu6QlX#K0@w~NjO!i@C|hTuR6ow(VUl$Oj_pUYo4PmtuD{RX?q~z4 zn(k)|ne$nnTO_vImQw$^H9lMKvt?*yM9Ou?noBD8Uo^?`9k!CCD2j%`4+iGQh#fFT zv}22Y4lZ}hG%i&W4P~h-RMuu`->l`bau>Y^$`aqJUR~K}B(Cs-4T!-&KXC3a(I%&K zsP2YNX<>Z#V?dAUi=_4l*a&NP+7OhRtaoofiB@uH)bYBTOsAD4B3q}EKWN@jvZx;7 zc}~!LS%%-FL2gIh}D6l`L48r+SY8sGg&8& zlgnN$d+FWG~He0KZ>2$%EQ@x)Vhg;rnb6co)&h#Ku zsEhVplHx;prKX>oMe?wbQuVZ*7l-sqFPsvN#CC=;_mz8(BBT3=@xI;(16;=0Tzbku zfocMqmN^t&QZ6NTP06yqKllFjT&gRv;bh~>(Tk5V~F_*CyxZ}wKz;Vzhk9w<_cN06NK*>${9IAs?%aC zr_+|6%QFs5ASr*SMY!~DRx!QfwQiCa2@q1%l6+}jbR+F_u>wxJDkgpP~!ibSsB z7Qr8xVYifqhhQtDdt3WuQ}ZqpLA!Pe(F*!zalBZ0c0hPD8pcf0oJC+ z?Bea>I6?1LF2!%6vi|^HoHNS0RZX-DrS<^Y^DoxAGiORAV$a?cT!{@6ticaD*co`* z;&_$Q>?t4OlD;tHGzLaX<5aY`^ltT4a3rD(uK#`{!xD(ZRH?`Rlo8n15_T^;_f+R6 zp_REBSqb%fY>2wQh0MKvu$XVc_URE-+3x-pT3j!{E>kTvmBIBXk> z3jOV4Xt$%Mi|%TRT|G>UL)11%We+vIY!S1=M?7vneBLn@&yT?n(!bmyJ2h4DRr0oA(Us_3T}VC%56tZ$Z<@0qQrKavr9;34-p(tWG( zu9UWoRQA&Y-C0{DeEpLcqqnf{ah*GBGxWd%C(Dmm%zgvmf4&E*&&euTTkhq%vB^m< zFqZMT7DEmcnaVR3_pNWM=2I<Mauzl^{8`Ryao zJ^a9B69(;~|8TVVbvElrw`_Lg<6x~l$8P+&-mGeC#^aY`btsXRN9e`d0dGNd<$6B{@5KT6Ks+`0*pqFGPd_f z50i5U1Kfi#gA>iJC$J z-`74>>duDX__EYzH@Ro7bd81UkKBKgf-Io>d%(Qf7osx_nZxrq8Qq%}5Y;OKT1NHP z;iqgv3vB+P_hP;f?A4jF^Q*eEePwxZ~c3Zw#$@%=% zJO>?v^KLYOj`r##ALxH*S{&>J=a$Y6>?RuIqQ+?~?!7Vf%KGL^2L%O7+i){cNn% zKAoIeSTTNxjic*-3o2%D&qG=UW*k8;WHEAOPvnA-*wXLt)jtLz2u1&d4bC5!z;n4> za4%##b2U3`o{h_;=uPaYTsITS2GQNA|0=T&i9OM^un3erlV>5e0HSl_Qdx6TCNIg# zGbDI6SKZl^iE%vR_5{Z5`kon*0NU?}d4t9Fc}`FtAoE+ya}}JL!}vX%_e){T=GlfN zsgs@&o@_KqP=33gFcLSwe2>%vY>D|^xlu$2S!rHK%H4EBClwR!C{8!S%cS^~8B#e) zo2x{JI5m1(;nsXk5ji2ZF}IPM9e<%L7;{{HJV%GawYX@o^qC7+;Sn4~?w9Ty&X0Zv zYvndl`=%FHc}V!`xqytS5e?l?Kse_D%P@5u=4T~+6S>Q z=J@Pd`2|v$mfQvk|CLu=d?fT1!jR7xfZh6lA+0+z+18Xu!niqf*QAH70CR;PxpaKi`Z-vbC%fXN6`gnl`P$s z)7kTd@sZTeU1SeVJva1|K@D@HdMd28-+mxfn-@m^((i?y$nTT7ZQ;)Rf|G6MbsOI0 z0{e{K@aYYi{}8^24PHt=^w(8mJAZJ_P$2&^=hS#g@x=I3_*)nlPg~DR_UyK<2AK{j%-3?=#wfHV@Aj%q3C(I*;b~UlRZhO7`yw_NK)vUu zL(15C2Gw?}fmFecwIz;Si?D5>RkOu25L417}Eyu)U@l*Jl0TVjYv zaZaCZI$LX~mjwK0jmL|^i-|obj5k<4aDc!b)|Z-8jf=1!MRw`vvKgvIVlWe zboK&y4!3mHAKNCfKdiOSbhE)5t#m))!l_5ROSOlXDHX>ROVOS7sqt7Nwo;ak(A{G^Cj2cd4Cu*bkPP|@6Z2iz zY&WV!wyKWCdrAB|5TYI9@A>en_xmv6{-!6hd}wb5+Q>p`yumL2br#y!mPo@psFJSE zK)GQNd58hVy}tl2{cAnBctNv+y8>s0zw?4~O4r?Nv6M}BN{`3m#{S1+rQyHv_wctc zF#vl89hJz$m9pxReo}r`REj#)iw!Q#X&Z@B$-I2(`4sL(86lOwBY4ECd_JVRs*U4BZ zRT%cQ^M;+kM0uimRCNRh6AA_cjEVjgQgs{~nRoEjw^to0vaI^}$hzoypkWlRsa|JDI3E-^$>)(*hxCpxtWARLAuu3&5Mo zZx`|W4TR}C#>Wzf6EdTw#bafesv?J{B#_XP!8fs~!S-RYRDH*Bx4&0|ITl}w+~yZv za8Bv!3AGrnv`*>q_;lq%@p9aLZ^ZrgZnEBnW96IA-QdEs?!|z9mp!Fw`xY@aYth9;kl%DVJADi z4Rln8kNT3&W{m9Av-b&}2sT&hr*|Bc6XR{(HbHxB8=LryMRCH~lSzII1qQ-Z#u^Y7 zt3f&HWr$|}7^hD$m-ly#WbM=`A*sg!E2 zcCNVL6B$0vB}eFi7U z50Mu?@rhKV?Fq$HcKFzdfk4ED!Tte*S_N^Qf!)_(ovK zY1ekb3+{<-uwhz=f$qe>RmeX`%AF0|n$ZcXIqZ&QB)1-;N(A3lYi%5AdoFN81P) zo_D}u?8l}n_qa)QXRj8+mB7GfYv7=Xn*qDYFjj!Dt@ zA~D7=hR3j&Zo&K=kfkaMt=MH~ww;Ryi~plz;mI-0uZu_`SYW`al7KH_QzFI6;!26w zw2JXcaXV{3cN^{jt(_kOk#S-}`*(=fOrQG=k_vdd4L8N(!ru@Bxr_lcDD`ynOy^&` zsTi&dt!R_dkr9dU7ahqex<0%4Y9?AW&lL#G2Sb!R=Uu$O0{$I{-?x8rr_a94vixK{ zO%`xpv);A918SxW98xR;7BU!NF?j~nN>gpIly(K2qCRAi_i;y#vG8T7t3JJ5|K77r zz5(!Mbsc=hpT;xLiMR9Jh%nxl@HfPO69!bRe4-3puSKFmqf#;yxS?$PhO)~a3{mRG zJm|~9(UWsGmh+O%3qn)^$0Ym*eU7R%~ZE*dk6q_nJAipsgQm%oL_vQub+7O_30Xk9JsaAed-53gh|E#-ynEk z5q_D$GSN>?e3IBixC7r%K(r$dq^G`jp?X&OA-)S9|pUmKrCfq8C4u;Sj4Z1jRr~ zC!4or$ohk%&wP%)e)2EH{C;#3jV@8AqMgZ1CPTN94RCA>)0Ge_GMvnWV`7J2hSC| zDFMr;KK685Q##q~D2u;laevom;|HxHm52C*O#qwJ!P9*uA=2T6!BJWwB_}x~2aYb6 z*eJV99KHLMNuP|>V^>EQ7h)j9K=)$cKy3RkpnVV3gd+K74n^K*|fVAx#h) ze=akqWTJZNb#8`p3XIes-lIoi{rdyw()_2yT!;;XXk+!z>%X)RGQ>cLfqcUN-l7-l zMUc%B<67@zBXczK4*f)`PRO@@^;COPCN`sFQ9U2vWq%EiL~yK$oGy6SMrygkTitQ^ z3+y=7sw(@-VGqSo+3{qfIj}|ewSCSoHpD=PfkMK7%4RNM_Fex zsXW>n(hj1{H?iT&5k+krxvs%%5MSev;0L$~BMSP7#-3=}X1)DI&v>%Hk>yPrf@X~p=UJHLRtoRO}jMQpSM z2-1USWQ^q&&V5s9IWiL8EIZ^aeFw=vuGdX1;QGQRv58=4RD3~dwS>7s41^fSR}AQN z4;X|V&L$WaJn9}kdQ2$0asQfW6PqoiUata}s<)v&=v3;q3gOv#aI(WWb5NpxY^TF)&NZ zuBJIsi3V8oI=eKxxE5*NdP`Z@?z-PD-6;-VWv_E&#TkLW&?ara#zn>7{Xus@1@XzI zx!B#@hwyN6R|(-$h=C9T*}}jSJd0FF#VFY2o3kE_q@D`?R3ohYkGQ_f$x^n0rFgbx zjf<}2U1iJa{IikxdgT4}5Q@)m-udYwS@crMfsn1kPX}-QcGVg<)XTe<`&i_Y-OIn@8ceIO$>4q4%H1 zh_y(|wy6+M|8<0rAqILM1`r6Bj+SBDbKM6HaU(l2|5?lp2#haRS-ud)I?+Oz9|8Oh z1nA{9X`RA7Tnl_<2B_1umF6$QQSW9r>iyhr-rfY45n`hV!xQvB;mQ5XAM%V?6W6N; z^)2+StTU*e^sQH!jK6vmD)uvOG`!wFya+_?JqGo*LGmzIpJG?Df3Q3ubclfv16B;o zkl`asrQ|^5w0lDcCNdXOVuxGHHNUgrRapxs99hDjo_aXh=xIpGIWY~Q?ghX+wNfH? zZ7=m-5$_siOXXisfchQRB>ZZO)cwO1xR)kojM%7cOSwdl-RNBw-hmJKUsz9i=~igjeY5wdgS={gdyfl}#MY$Hban=xXeWpg5Uj9th@fd~>10e?7Fre)BAyRo1 zoQjiJH+;)t-OAK7SYfLu?>QF!$Ck_L7u9jFSDs9y&ywmZ@Xz=YE{s$gy>Jb%AT^`J zW~NkpuULwF@9Y^2cd`pO-qI$|SBRR=8$9Dh8Q**-b(=y3+V$e&v39@gW~$ebz9|n* z2-~p`w!7`h4&y=$gcuOvKA2(1V9tif{FDV~olM|_m0jg7PgB-^Ka;vA)r)hA*d^|wdE`_jN-~~EIM(iL`{I(aIQ#$p`Uo3T_gRt$K$_nE{3=|Ru zrpt(p%BA=>5QUS2$z7eXzdIUs*%u$4;LpM2!;O~9PWLT&n7SOpt9rHAn$26_VqK9Ui8!0C80 zD#gn7J}wi4hRx}lZ07bsHoE1;z9ghs!bdJU0% zB2w}$9F~)<-*Gu7AJWsT!&+)#yl0%)fEJ@gVwVI#4-PH=3OwJ)f5119sxt?&pDB{a ziC*`Iqw=_(|5i9c_9>I%UZeh~wh)e(2ori62BLV>IDEELzf~&5Q<&EK=>nZD5+E`& zV3!@tiD4JG28dLWgSv~s9oEkw0`HVc(JVH77eQb!VMxDzYi)E0gVD&%+E;U$s!7Kr zg8V29Rt#i=*a$n0M_^m|mc#JTcAbS*inVcW?|PpObnSLGnB+9|2wigh5$ejwIi!8E z(W2?oL9?XdQ0Hn2CuOWup02El!s$`ilb{%Ys+4nzIAe}f&ncyyRaabHvxS2v0-s>^QDkqH*iy+$nylsriw+RG^h ztmku6Unun|R@Do^LRWwU{Jbn6T*H;k>G6B${iQzumC%x_Qo2{yt#bZ_0jMbGkZ84g zPC8;Xw+C2xpNQ3i&j8xv3`!N6!kb*EF`Op3{e?o#T_@*z6&}e%MH|+=_I_9m2moRd z8T}Fp#5lKwgq!EuRkx^RZ{z9zQ|pMwo_!U)qGJj^Rrv|29~syj3TG$+{m3a@NI4og zJ)$q*gKhBHAAES5z8}gZ3Ki=Sw?B%~e(bGmEs};W$wLEPZ%R&%KX4TNd$SVMG>0tf zcj{P&s8R$`OmOiyPpI5e*?74_W=-oPZdO|h}7xxWODok14_n^TmF)=$RjLGTL zT}2tC5}~Nu3iCJF-n^2EoR1+~1s;wQ4b<*7<1{th`x0w%DB1h3ZU)@_PqI^AUwar= zHLWrc6IyAdzl4}2Wl|<647ED+X$tMx6#e^}-IBcF?b7hz**|@v4V94)Rf7xOi{WYS-Zr?f zW*h-FqEr-|31T*Me(!EWU=cbdpqT1bsqt(l6jx`7{u&0+!yja*nX=|X>m@{{(1;+; zg=s5`SXX=iwfycT99&Kv3heQ}WM+zd9pf&EZGRLNpU42xrD&L!lxH&aV|4#+qIAK? zETyezjwudSh0ckZ2aW5zq`X4G{aUM7PyCsHFn4IME$_+W zN?Pp9Q`cZo$;H!dvvj^Pm`F8muxCBIkyxt$6j(dkz=Hi=LTZ35QN8}pfZ54AUf?q* zq0KQw4ZPgIZ~A?3%#_~*6#b!JR3;a0k|xS2XPn#+mXe-B{&yOp$2XKp>$()IyW^yp zXF&O{J=@Jf;vi?$f7?d$G^yGAk>l8E&xC;I4GSDBh|>~TlWZ@`bJEdc`1jje^Dt4%ohp*EcbT@%ri+ECm6-k;_n3?x6!_J}iw-&LbIrPz-C>P$Rv3K^qgs9upYb0kC8&h75#I1}8>|UYnFFi%i4`EHJWrnp zrWNz@Sz4fq^#u;x$Vs3vy>HvI+V7cjv$Fb1X-IMp%O9Qam?tK&`1gIXl6rFJ$|c&t zE$v0uP9Z^B+|Loo(o?6UJrUYOq1sDaO3`Qolc$i`4Q3C;6AQNu0bs965!qA@^pw;~)_MplOm1lD<_PChBM|O|{M7g# zAnT5p`^1xRap#IOPjm6HCv13L6ql8}`-E8oIdStAZW{J6{h~xb(!)I6twwE|opki= zHLey!8h*X?56VI4wwDX5o8A7i)z8lEt|q^w&as4BzPXNv2(_0!5#i`lJCuW0#_C}| zL9J4ebGDvnb6?I+`~P&_{1!deq@c?XQJb)z!Y@T6XvF(-*TR$m2eOsyCC8nF*L6j9 zlsr;B#{ZmZIQf04P&jz&*)QV3Y7e;Oiq;U5RcR~o;>sX|;jXXE4YaCOvp<6df;%{V z>Q^TBhGlKd;y&pTrsTc%@@&sf;tn9nEyeAB<^+w6-})A8EL0Cy?tHE>y0fu`nC#_$ zuml{VXCa1_=cp*`uJT<@9Wjlj5h1pf?2HqfblmZe-_^AfwnJ*+UF?6+`VdLoV!td{ zmtc;G<`0Ads?zmqoIov= zRY1FR-?e&}!rSfZp9DyavQUq#rMdKONx<}egAXm&lhy~TP#!6#c){$@&A6_j{2>&1 z#t#Q&Cc((Exa6t6iiF_liL|ToS)*DYP+WZS?Jsy_V#`{GFKfv$SZxEC@w+&|Yawi+ zD7Cl0#iv%S6CC^5cMq}K$M9Q~LRkF9Pb#zPsqZbnnS?i~ezJav%8FTGI61-6lCQpU zV@eq;%lyTAF_o1GKNWKwH2q-xAzY9m9PnD;gmPO>nndI2eS--~MUFc1waNsm4nj@a z(-c~m<#iK204rx>dbRz6oY+N_&Jo_c69Ey*E02qWy4m;{IrgN|b9=s29JGeV8H>ca zKUrN<6~)eQ+3W#m@Jf_Ke43@PMq)YR?aQrU3AKbzPAKj=!q&D3wZo)r8ehgu9_7VS z4gaitLOv}I9=!OA3nMHEoOJ+_9F_@rZ%8JeP-yejDlDrU88n^7P;+26*=a}WK1W^; zZTHLe>Jk2Nx9mc0HMce1L=b-+B#F7W>Fm;SW`lv=;4i((6q`BfI`+)+*mVx;Mus_J zMKRFzVCRj!BWJv3V8K2dNiV$Jbew*uZD0e8nJUyZ)g2^j&s<&VE#%W!EkV!3wmdGr zo!L1z)lF@YsI}p{-FWrgk>D3l>(;xSKTrD$e@#;Zf8x;)6up2s$CLcYw=dTlAcVp7T*-C|maLNNzkSmS5wpBm`A+4gBW?K-xAldRXIVm1YLF3S z(drE*skAi*rM<1c*w8bL=@m}u3GU72BD61S{FMQPSJa8aN+6(;Xc_sfira|wpz05X-e2n`rd1~eA?q)#Bto%_8_s~9&T%>hD zc}Qdo7^TE$roj94589r%IkcdAy$?2wEMg2(Ep4&Z#3mNKufWDyTS-^)P%|B-c=yJl z=E<^*%lh7|dnQYBx7^S%v9g`_S{C+bjE;;eO!?42y!#8?y#hiFobB|}|M36`R-`ti zlTdphDoV3aSvUGPXN!jFIE>&xU@I){XLI2Fktzz^!r- z`TU*VvLWTE%QZ=}rzZG5CSgQVZ7s17p~=Ng)G;sp6?n>_T`zP7X<_cR0xM+?C; zmR}E_EnA(tbj?|W^KshHwQH;fEuF!c@XZMqp>930I z&@z`9%}gCWD0$nIgYAmxD-RI>Fr`C{lCrTk&)*&}f_~5ody%ZcB(~hX7!MMb90`mY z-H7K`5AdCsX`xc&XkvZcU$h;4Pnu>ezE6qg?2}Y}Pf&|yNjMOc|HkT0JI~7P+)}2|m}~vl1;AKA|F*8I{5FRVeh$N{C!8nuPfI+b$h#A&j=5XbKZ0D9axP4(3RM)X z#7t-b3?%*Fp&~o7k^7|FpL`3>-E=WKDNnYt4mKtFzpF<$v`V{-Owd^Jr&64a%#F1= z>JZKLm+DeXOg6?0w(J1ibEAFkC7vltthsUDsbff(K=1C+GqO zU2|o>U)~(a;km!|h#dyGevK?uD~4Co!6KJwNi>356<_zn9{&-#)qTB3K8GEkvj1!) z=#b3Sehixwbg2w@7Q$V=b*{E$*ast2HI&ryULorYY7!ic z4iIJ}^_e53GvTP*A_cjJxO+Ts_VD{m$@n^*{eu>GAcLPR4F_!_@qt8O^4z@ zKD$lkWl+WASy+dkx5AnP$!9^iREw>_Pg_qV=!Km6`1t&13&@qG%S_ z!tHmLQ74+r4;j#J{&sZl`)f@ZwTNLk_x|T3yXtV#f%Yns75$o;I&oKBH zN8&b>1=aW#V%#VDQ$P$-@0i2hK4E4LOX4ymcpt{uhhd`Cxxhcpm};Qo+au<@^p5%y z+EGt@75=8_eGWl&NfP3Z-IhgB@N-%qH5bNG6=>utUtBEdqZ=b*oWG_*gdKb-RV&*7 zdv3ntV;3T{!AOdB^A<+99is1W>4NL6qX@fQc;v<#Soa}&JU6`!Kv`Y(&;q|pMzSS7 zr!HTYsH-0Bx+p4`@jC_WGp?!XXv&$h8}=!&zLQ+IZJ%oc#te|Z<2E!} z6d~;RmXXzeUDgK`+vWlp8W%+Nm};h2IV1@U3;+714KMt&^atyEAEz|+uGBhK;bj&% z!`2eHT-;EG{n_*G&>35T%tIn#yLm$G;ior%qfDEmI)YmRg_Ab`FXXm8hcpb z7x61L&y_NJ^^L|k;c$8Nq0UTRJZnqG9zpA;{=P#oxcgi+%Afztys?Tr zHGp%GMbAXn&Djn1 zZ|3jB$XU|N9x=a95IfOPyrtj&Xr1kef#11(-jmCxFcl1-x@{mF;6wW?(r{Amz{H(QGYV7yw2sWrBP zs>s`N$uFdh>1_mNS~HT;ldPQk?{V#Eq*e$}u=TRkGAj8PO5iYxqD z@!xl--aXWz`r9-!&^GsL(!TR7SclSf!J-thwsf|xhDUo=cAoG=dZf#Cufxr|kRW2u zHr}7MjeNb-9E7bWTmIbBC$;C5g4ZY6Rwj0bV@y0~qQ|fnR_OMn-8`j??@p^?EY0Eh z!z!3%MgWyw`wwJKiGo^u^t|;VRqk@z`iE_7lZXlW&8J~RT2+ROw^7(94;TN#=Wcm0 ztX>O_0EvZ2x=sa>PCmXNc6DNn2UFYQ#%!*zRsZk+hp|e{=U(G+86A3O)fvA~--iQX zmEv&|PZaSY_RZ!C^VU51}oTL?NcNT(!6SBofM;nIFFAaX^H@KR1Y1y3ZM z>r2>1S&(Pnu%L(1KbkLYIoT{hB(#&7U<+EjXUxJfxX0yBP_m1+R?xHrM8>@$7+SIt z!`WH%FxUzrHps_5n}dGHQbca5s}N8PhA6ebl{bn>u|HEvn|b*n7qUwGlX9PQT{>Hv zBFZ<*GXpAD!P2crZscHiqV#aATn-!UKON4c-lnCqUyPK`S?cZXJp8X>7wP)dsMFz| z7S-@90exRSO1BKV)MVF(uTkJkOXh$8CF-RY3=276eb(@H?C`JRTwR!(HTtg3W>hh^ z_7)b}MY79!TQx);mPLt|nKD$-;UDPp7hKIhs$STd&{4Q*y0TFj@97>Y5M^1V4BP&C zv{X*oT5j-~s`MCq#UGS2&L`=m9_y_(8~?Rg&}T3gfTsbA6c@Pv{2wd0Wq#aSzDU?|Qm=Zztnt8ht4m|J#C)#?_2cy30Rmb_qkZub{&Iz-rWuD`e81!k7T+>ao~v zn@hwbi9{|2i;v3GkHdq|#AUzUX801?Wd`8NAHnW{`Qyiy+K$Kdb*rcFu*wkCoG}T}+B|jK??HKmhANREQcR?xnKBLk=@#5r)tiv@#HlzUvz&vR_1lje z%DJkGYvHfej`(_?2n){0+VsqcCH?2lm&gb%=tA=b9qS0w7q`}0B%>LFR_BuJqR3|B zL`%?Rn=;uT%<%IL=`9#bqjLou1xYZKnF@iNK9o`4y1i_9Zl9vKe{)5DKnRMOFKUR` z!j#HIvW>N_S@c9p$J7|q+o#{Ef#rN%*ue?|2 zY&2`B!}j3$%o2h;{u9BRn)>mehUd?4E9_z|*kxkYghK9r{h*{12r=(h7)RD2pb-A9DoS zY66eXVPnU*%%)et>Y+M_nF$%IIbl3~s8ORy9>YV_Ky2x}vqGf_pGX7_6fd~Q8FFRn z#Wt|%mp_4FTw@{3RI`$2m5et7JIO{06P_xvVqR#xo2dnk+k`2`UWzUg`B;T( zLG^Zxl{6+oJgAE@cviAaom%!y4nF@>JY;`jb@JsF0x1;@3cFXAh1ejVTfC?C-|3xk%LGy;7w4m=sR=Qr1z$#`n`tWQ;QHsd~ z#Xn*xhp9efY4D)d8T-V=RVj{UW=RY~ipvXe6O!H3#$x-h*Lvn31}r
77g++n)o( zP{!kHr{hwJ>KeVU_4cIj4j!I7jk!004d+f5++`5FS;c84}!fRJe4d z%`#mj7Wxw0z3sfTg}dLmlwJK6Nw-qlT>s)%Kl?sn%g?|RFBlmAPMT4;Tgvp1ah+z> z*O7If%XF_I%K#^adG|wer#VCWI@dS9!_D^1M1BGCsB@Z}fomq|dXpldc_z7_yM=6< z0)q%f!Ub#OwU0SNi}yK!yk^hpU1pzM%T}%y_^curvDKx*kyNa^Cm zhQ*3mRGLmBxa6)sK3oK?tgggMpq9l2!kca+M1iGF~BVhg6#8xAQ+T}eVMxNk8!K^)a#l`9lfQ3J#=FE1>!jd3m$M3^Q;hi+zDYH)=ZekrXAYlWpn1yQx<$!t!QT5FO%qn#AqJZ zWG%G6S^?FD-Vcz+At&!wEnyS5Uu657TwP6`&=Qx12O9d9vLCbPH5xM_Gu*Sli{Evi*k zqxv0VKh;Ix?tT8+E4jhEcpy$C;%sR1;L;fCKOQB-;ykJXSuqEDuCpw;tu0xtHl}(Z zzv{ntCH{~a&?YLpf(KZrLuo+OWEAW*n`VVU6CjzXr#=Kae%asYJ|UYupAoq`_trHY zlSzRgF1D4sOlaoX2-kew1Ab$&$MetWRzFp1{PD^%-Cjr#@{>C^?G$`*_nMpZL+8Am z<%5k1yiaYf=U?HMJJVMub6zya0r{-G6cMZQ;^qz8x%i#%MW)oHN(0TgVq_9`Y6ZCl z&H;e{pwmjnds~d@moQWFf-BCX(l_Z-qxRX>u>IR+wfk4+u~Y zR7fk6MDs@qU)=2s3_p{w5>s<47I||;d$7X%r5NUC(j&H5IQ{v<*g!zr+Q1ptpzn8* zg`hoJfphu{7h%K5;7SmR=2GYqrV)hMawq zVT@_{2?l^s)I7L#f~hU68!{1uv^dZx1^(8_AZG`2w*>4wc4%gKmAx&i+n*3@)N@i0 z7Zz`Ma(ay{7dSh6x}%6Oqo6aYQRWUMEhZztMl)b17~S;Ry>W6}c9>&VF`u0j6t2u6 zOluZQ%tCl-^#a}wD)!5oG2Ii|_W-}a^Z3~1W(8#y&3HUg7!Hu9nQ1Dw^dffuRuOm( zCm_d#RODYXx429%`CG<6%$XF}*&NPl*&KZi$b?rdPJ1H%G8`h!GUB6u^`@}WYsf*G zuwTU=V-KI?E<-IFZPb^%1~+UM>T)Kgc87rN#D-Vbewy2Il>xxS4^g5fabJsPxP_u2 zN2kzp(-1(frpF@h)^tUD;%K@G%#Ppc++g{80pXmB(EA4A7Lqe($VRC5~(QSN=dh`2TzS3aXv&dJ0o3LouK5BdZZD#p1oCaZvScvD&Mr9K5G z3q`K83gRsO>8dK|WuIH_#ji0)Nz)bAVfh|YdM^y)9p8dpmKPVFU`F!(P0@JjTH+35 zCs)6xfT2`76o`eu_8OPSz{F(H1lvXCkGU?URlV~ix>G`$LXIpe$w|@I5(&Q5Vo^H- zG%}w)6tb(!$AE*ED)XUY$(zn4$AiTeGT1!tbX94)nXw%o%`EIMd<-$FDMo;E4&PH` z#>y!T<;)T$^$RaF-LA&t&wWMOnz|Q*_no1(N-3%dud0-B|GILk8_ukwe|jJDyS~FA zj%(b(8l}#r^MEWSe@tU~kG|YPS9Zd6Y{_7Q9M61rek+*k&7%OwD=D1?f74#dx<%bi z;4|TpUedR1Yfj;U8pr`*ymYg?b(Z)xM1#;Z+NhnD%uF*>qBgWOt@+omj(M&_x01lv zH4IMNUs%*93asfuM&VaEvFTdU@!F1I5CXEM;fS1T`c0 zxJMv@IV--GUn{I;bjW-p5mHk)rSIMl9Dgs$@q1&*4NsH1b>xR?Dg|^j389ndrei`U zt=#PzN^>fNGb`(%q0j3(34?Yg=jk7ae#%i2f0vdtf5__aa){>Ps-)HLB=p1I+La09 zw730hEGv=yVXuSYHfQIIDCt$Atq{q4Uu86eH{0Cuga8De*gIZ>L0QRdMv8 ziyGBoV@8f`K~lW$eRQI*;MG`(-Vj@~35yLD3>zra*XF?)x$dgagW7iy4D#2I=Z_=8 z1zp((#$6H~FUx*kXzzGk^P^y?STJ*r+C1g4JVgrml^BCeRU^R0N3nZNAi-jsVNGeO znw;j>7F6$4H|*IIb@U}MS?kH~NSHJN+@ht6Oq$5^7&t{F7W@oda8Khj?ks|?{?jRQ z`q9?KxIDFc&(UX42sB|xV7svJq$!QHk-I^2h4m89mN9^1;Vj=z$5 zKgY$B?}bf0kzgt5>H{WxINCMyPZoRU7Qj|AzSQJEw*xz#Q-NVVg7 zc8_#jj1sT87o1lH*6uGb|;Wf7Qt(Ab=ZP zAt;ZdSHUJEkibxEz^h1Jrmdn&^mjM8FOwfu>~fi?slXCwlt1Y%XCWi=#yBM!-2dc9 zsz8*G%C@iW7JO`mVu$!e|BWegLmc&-I`Zxa}8x4dO+YK88%h zXSE>*EHF-={}$o6Fb4DkWPRfOx~>$fp_}JL7wa~X(>?LFdWAGA7UuUNe>^S|-7)NqWsWE}X3&mK9$EHN4XqjgoZxuh9sX2ds$~ zNh0_=+0OV6tmh#r*Ae1&Jn{)##-$y^%+y2za5V?wKU!KBzsqB@_f(EACUBPz{#1=G zy%Yd~2TpwU`=kP=cIH3mHNtn8+FGBX;#EZBQqI(+_hPldzJ`HE!^yHuo<-TRJlTgx z_c%*rDZt}k^SWVzGZ9`X{^~gz#>GmnVbC5Jk z`d)I49Vc@s6UdoRB;+aOYCYRsh^p*&TC-~a=Fg6T(@~1@PXbe6i=K67#N^y@m}RE4 zn1>$_fL3fM>WnjqHlh&@oo~GbP7Ha|3o7OOhUa%(h9TQ3rUZR{DRUD7Xfq2ua>g@E zVE;P7abKEVD&Apb9L$K!K?XB?365w?XFeU#;;Yxb58kh-AI9?Q*{1Xb3)PMOD6z&l z1N1niQmtd`Pnmun%=|eRvo3dhP4)x|7@_HKn(%{-*`Z7*{F)_OlsDJk)8GMzgYo!2 z_ofeeb3U=<0qZ~Yf>|u)K$wBXNr*kE3_kOV9|paOjXI7e3I~==c}c7((Iws4Pca9& z6zw3-u920Q!*?dY2^0h_MCoEoC3YQ+?&HvcI(+aS{zch4ZOYT)qHv#5$s~CPZk7D8jVE_ zdDJd`w*HYU*E9*;`c2lvHoi0>(^dB`@IId*X18iQME8y(O>>oE41Eg=id+aa^4rA6 zyEB~4-nFxHDy~2X&sX93!q&fgLo7~e^niY8s5gSO-(eVE-(xiXPLOa*Qa-{%Q4?*r znEfNf`mV8$1@2i*x&7(Xp`;-g(%b04zrF)n9FSc~ruWK=ASJ$?iLY z8i}I%`QA7z1Py+<^b1JmDYv3R2|{(32O53}F$9USg(ul4!L~3U zuB?C|KlZx$=aN!xJPXNE&C{EEq?4!%4x79t&#%$$m9LY5KC@{by5k_0KepRnyoq(} zo|tgJFq$p1pID%S?~)FBh4P=+zC&x*F*865eEW>EZfN?JNX%cKbMr_?W5i%?q{9E2*9yDKkSAS(ve!L@R9lRP>`wI` z6;~GYWlsP>rp;j3ve#cp9rRBNG~A7d-geTlTC7`%PjEK|^QxE)rK>O zum;d<>^IANlrJGJ+(oaa?7Ct-V**(fye7Ic_J1O9MkL?#|NcH8eAr3pP-(jZVybfx zehx4s&o#v^f4j4`5pUKN5ux~}n$So%v@`(_aYQ8RC8J;x^ydGa68RI|(XJ4bQEk|f zfU|0Y(6@X66JdLZrh%j{DRo=~aH06v9udKQURVKhPJRD;seSUOJK`*ItfV!u<}jv| z)Ww3p7gO<={!S?9P62v|_#`qmtYNo&!F5fl`XmKCA4A6Tm1eEpf3NdRleWsJzKS9O z+VpWBv2E9&O&#ro{A&1saD3x$HK>z-|0bP@81gKN)baSm3I`b~L?U=j3eY26?IcUp zXC1<~WrRquArRweW!v@PIbL_OOq#kT=+Ely{{1#yq-uOpBJvkSE`SdD|FwKZ2zZC7 X7i*8*QDFD{^zq1iSCFU_GYtG6NDaSh literal 0 HcmV?d00001 diff --git a/docs/unicorn-logo.txt b/docs/unicorn1-logo.txt similarity index 100% rename from docs/unicorn-logo.txt rename to docs/unicorn1-logo.txt diff --git a/glib_compat/README b/glib_compat/README new file mode 100644 index 00000000..70314880 --- /dev/null +++ b/glib_compat/README @@ -0,0 +1,2 @@ +This is a compatible glib library, customized for Unicorn. +Based on glib 2.64.4. diff --git a/glib_compat/garray.c b/glib_compat/garray.c new file mode 100644 index 00000000..2ffe559a --- /dev/null +++ b/glib_compat/garray.c @@ -0,0 +1,1649 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * MT safe + */ + +//#include "config.h" + +#include +#include + +#include "glib_compat.h" + +#define g_mem_gc_friendly FALSE + +/** + * SECTION:arrays + * @title: Arrays + * @short_description: arrays of arbitrary elements which grow + * automatically as elements are added + * + * Arrays are similar to standard C arrays, except that they grow + * automatically as elements are added. + * + * Array elements can be of any size (though all elements of one array + * are the same size), and the array can be automatically cleared to + * '0's and zero-terminated. + * + * To create a new array use g_array_new(). + * + * To add elements to an array, use g_array_append_val(), + * g_array_append_vals(), g_array_prepend_val(), and + * g_array_prepend_vals(). + * + * To access an element of an array, use g_array_index(). + * + * To set the size of an array, use g_array_set_size(). + * + * To free an array, use g_array_free(). + * + * Here is an example that stores integers in a #GArray: + * |[ + * GArray *garray; + * gint i; + * // We create a new array to store gint values. + * // We don't want it zero-terminated or cleared to 0's. + * garray = g_array_new (FALSE, FALSE, sizeof (gint)); + * for (i = 0; i < 10000; i++) + * g_array_append_val (garray, i); + * for (i = 0; i < 10000; i++) + * if (g_array_index (garray, gint, i) != i) + * g_print ("ERROR: got %d instead of %d\n", + * g_array_index (garray, gint, i), i); + * g_array_free (garray, TRUE); + * ]| + */ + +#define MIN_ARRAY_SIZE 16 + +typedef struct _GRealArray GRealArray; + +/** + * GArray: + * @data: a pointer to the element data. The data may be moved as + * elements are added to the #GArray. + * @len: the number of elements in the #GArray not including the + * possible terminating zero element. + * + * Contains the public fields of a GArray. + */ +struct _GRealArray +{ + guint8 *data; + guint len; + guint alloc; + guint elt_size; + guint zero_terminated : 1; + guint clear : 1; + // gatomicrefcount ref_count; + GDestroyNotify clear_func; +}; + +/** + * g_array_index: + * @a: a #GArray + * @t: the type of the elements + * @i: the index of the element to return + * + * Returns the element of a #GArray at the given index. The return + * value is cast to the given type. + * + * This example gets a pointer to an element in a #GArray: + * |[ + * EDayViewEvent *event; + * // This gets a pointer to the 4th element in the array of + * // EDayViewEvent structs. + * event = &g_array_index (events, EDayViewEvent, 3); + * ]| + * + * Returns: the element of the #GArray at the index given by @i + */ + +#define g_array_elt_len(array,i) ((array)->elt_size * (i)) +#define g_array_elt_pos(array,i) ((array)->data + g_array_elt_len((array),(i))) +#define g_array_elt_zero(array, pos, len) \ + (memset (g_array_elt_pos ((array), pos), 0, g_array_elt_len ((array), len))) +#define g_array_zero_terminate(array) G_STMT_START{ \ + if ((array)->zero_terminated) \ + g_array_elt_zero ((array), (array)->len, 1); \ +}G_STMT_END + +static guint g_nearest_pow (guint num); +static void g_array_maybe_expand (GRealArray *array, + guint len); + +/** + * g_array_new: + * @zero_terminated: %TRUE if the array should have an extra element at + * the end which is set to 0 + * @clear_: %TRUE if #GArray elements should be automatically cleared + * to 0 when they are allocated + * @element_size: the size of each element in bytes + * + * Creates a new #GArray with a reference count of 1. + * + * Returns: the new #GArray + */ +GArray* g_array_new (gboolean zero_terminated, + gboolean clear, + guint elt_size) +{ + g_return_val_if_fail (elt_size > 0, NULL); + + return g_array_sized_new (zero_terminated, clear, elt_size, 0); +} + +/** + * g_array_sized_new: + * @zero_terminated: %TRUE if the array should have an extra element at + * the end with all bits cleared + * @clear_: %TRUE if all bits in the array should be cleared to 0 on + * allocation + * @element_size: size of each element in the array + * @reserved_size: number of elements preallocated + * + * Creates a new #GArray with @reserved_size elements preallocated and + * a reference count of 1. This avoids frequent reallocation, if you + * are going to add many elements to the array. Note however that the + * size of the array is still 0. + * + * Returns: the new #GArray + */ +GArray* g_array_sized_new (gboolean zero_terminated, + gboolean clear, + guint elt_size, + guint reserved_size) +{ + GRealArray *array; + + g_return_val_if_fail (elt_size > 0, NULL); + + array = g_slice_new (GRealArray); + + array->data = NULL; + array->len = 0; + array->alloc = 0; + array->zero_terminated = (zero_terminated ? 1 : 0); + array->clear = (clear ? 1 : 0); + array->elt_size = elt_size; + array->clear_func = NULL; + + // g_atomic_ref_count_init (&array->ref_count); + + if (array->zero_terminated || reserved_size != 0) + { + g_array_maybe_expand (array, reserved_size); + g_array_zero_terminate(array); + } + + return (GArray*) array; +} + +/** + * g_array_set_clear_func: + * @array: A #GArray + * @clear_func: a function to clear an element of @array + * + * Sets a function to clear an element of @array. + * + * The @clear_func will be called when an element in the array + * data segment is removed and when the array is freed and data + * segment is deallocated as well. @clear_func will be passed a + * pointer to the element to clear, rather than the element itself. + * + * Note that in contrast with other uses of #GDestroyNotify + * functions, @clear_func is expected to clear the contents of + * the array element it is given, but not free the element itself. + * + * Since: 2.32 + */ +void g_array_set_clear_func (GArray *array, + GDestroyNotify clear_func) +{ + GRealArray *rarray = (GRealArray *) array; + + g_return_if_fail (array != NULL); + + rarray->clear_func = clear_func; +} + +/** + * g_array_ref: + * @array: A #GArray + * + * Atomically increments the reference count of @array by one. + * This function is thread-safe and may be called from any thread. + * + * Returns: The passed in #GArray + * + * Since: 2.22 + */ +GArray *g_array_ref (GArray *array) +{ + //GRealArray *rarray = (GRealArray*) array; + g_return_val_if_fail (array, NULL); + + // g_atomic_ref_count_inc (&rarray->ref_count); + + return array; +} + +typedef enum +{ + FREE_SEGMENT = 1 << 0, + PRESERVE_WRAPPER = 1 << 1 +} ArrayFreeFlags; + +static gchar *array_free (GRealArray *, ArrayFreeFlags); + +/** + * g_array_unref: + * @array: A #GArray + * + * Atomically decrements the reference count of @array by one. If the + * reference count drops to 0, all memory allocated by the array is + * released. This function is thread-safe and may be called from any + * thread. + * + * Since: 2.22 + */ +void g_array_unref (GArray *array) +{ + GRealArray *rarray = (GRealArray*) array; + g_return_if_fail (array); + + // if (g_atomic_ref_count_dec (&rarray->ref_count)) + array_free (rarray, FREE_SEGMENT); +} + +/** + * g_array_get_element_size: + * @array: A #GArray + * + * Gets the size of the elements in @array. + * + * Returns: Size of each element, in bytes + * + * Since: 2.22 + */ +guint g_array_get_element_size (GArray *array) +{ + GRealArray *rarray = (GRealArray*) array; + + g_return_val_if_fail (array, 0); + + return rarray->elt_size; +} + +/** + * g_array_free: + * @array: a #GArray + * @free_segment: if %TRUE the actual element data is freed as well + * + * Frees the memory allocated for the #GArray. If @free_segment is + * %TRUE it frees the memory block holding the elements as well. Pass + * %FALSE if you want to free the #GArray wrapper but preserve the + * underlying array for use elsewhere. If the reference count of + * @array is greater than one, the #GArray wrapper is preserved but + * the size of @array will be set to zero. + * + * If array contents point to dynamically-allocated memory, they should + * be freed separately if @free_seg is %TRUE and no @clear_func + * function has been set for @array. + * + * This function is not thread-safe. If using a #GArray from multiple + * threads, use only the atomic g_array_ref() and g_array_unref() + * functions. + * + * Returns: the element data if @free_segment is %FALSE, otherwise + * %NULL. The element data should be freed using g_free(). + */ +gchar *g_array_free (GArray *farray, + gboolean free_segment) +{ + GRealArray *array = (GRealArray*) farray; + ArrayFreeFlags flags; + + g_return_val_if_fail (array, NULL); + + flags = (free_segment ? FREE_SEGMENT : 0); + + /* if others are holding a reference, preserve the wrapper but do free/return the data */ + //if (!g_atomic_ref_count_dec (&array->ref_count)) + flags |= PRESERVE_WRAPPER; + + return array_free (array, flags); +} + +static gchar *array_free (GRealArray *array, ArrayFreeFlags flags) +{ + gchar *segment; + + if (flags & FREE_SEGMENT) + { + if (array->clear_func != NULL) + { + guint i; + + for (i = 0; i < array->len; i++) + array->clear_func (g_array_elt_pos (array, i)); + } + + g_free (array->data); + segment = NULL; + } + else + segment = (gchar*) array->data; + + if (flags & PRESERVE_WRAPPER) + { + array->data = NULL; + array->len = 0; + array->alloc = 0; + } + else + { + g_slice_free1 (sizeof (GRealArray), array); + } + + return segment; +} + +/** + * g_array_append_vals: + * @array: a #GArray + * @data: (not nullable): a pointer to the elements to append to the end of the array + * @len: the number of elements to append + * + * Adds @len elements onto the end of the array. + * + * Returns: the #GArray + */ +/** + * g_array_append_val: + * @a: a #GArray + * @v: the value to append to the #GArray + * + * Adds the value on to the end of the array. The array will grow in + * size automatically if necessary. + * + * g_array_append_val() is a macro which uses a reference to the value + * parameter @v. This means that you cannot use it with literal values + * such as "27". You must use variables. + * + * Returns: the #GArray + */ +GArray* g_array_append_vals (GArray *farray, gconstpointer data, guint len) +{ + GRealArray *array = (GRealArray*) farray; + + g_return_val_if_fail (array, NULL); + + if (len == 0) + return farray; + + g_array_maybe_expand (array, len); + + memcpy (g_array_elt_pos (array, array->len), data, + g_array_elt_len (array, len)); + + array->len += len; + + g_array_zero_terminate (array); + + return farray; +} + +/** + * g_array_prepend_vals: + * @array: a #GArray + * @data: (nullable): a pointer to the elements to prepend to the start of the array + * @len: the number of elements to prepend, which may be zero + * + * Adds @len elements onto the start of the array. + * + * @data may be %NULL if (and only if) @len is zero. If @len is zero, this + * function is a no-op. + * + * This operation is slower than g_array_append_vals() since the + * existing elements in the array have to be moved to make space for + * the new elements. + * + * Returns: the #GArray + */ +/** + * g_array_prepend_val: + * @a: a #GArray + * @v: the value to prepend to the #GArray + * + * Adds the value on to the start of the array. The array will grow in + * size automatically if necessary. + * + * This operation is slower than g_array_append_val() since the + * existing elements in the array have to be moved to make space for + * the new element. + * + * g_array_prepend_val() is a macro which uses a reference to the value + * parameter @v. This means that you cannot use it with literal values + * such as "27". You must use variables. + * + * Returns: the #GArray + */ +GArray* g_array_prepend_vals (GArray *farray, gconstpointer data, guint len) +{ + GRealArray *array = (GRealArray*) farray; + + g_return_val_if_fail (array, NULL); + + if (len == 0) + return farray; + + g_array_maybe_expand (array, len); + + memmove (g_array_elt_pos (array, len), g_array_elt_pos (array, 0), + g_array_elt_len (array, array->len)); + + memcpy (g_array_elt_pos (array, 0), data, g_array_elt_len (array, len)); + + array->len += len; + + g_array_zero_terminate (array); + + return farray; +} + +/** + * g_array_insert_vals: + * @array: a #GArray + * @index_: the index to place the elements at + * @data: (nullable): a pointer to the elements to insert + * @len: the number of elements to insert + * + * Inserts @len elements into a #GArray at the given index. + * + * If @index_ is greater than the array's current length, the array is expanded. + * The elements between the old end of the array and the newly inserted elements + * will be initialised to zero if the array was configured to clear elements; + * otherwise their values will be undefined. + * + * @data may be %NULL if (and only if) @len is zero. If @len is zero, this + * function is a no-op. + * + * Returns: the #GArray + */ +/** + * g_array_insert_val: + * @a: a #GArray + * @i: the index to place the element at + * @v: the value to insert into the array + * + * Inserts an element into an array at the given index. + * + * g_array_insert_val() is a macro which uses a reference to the value + * parameter @v. This means that you cannot use it with literal values + * such as "27". You must use variables. + * + * Returns: the #GArray + */ +GArray* g_array_insert_vals (GArray *farray, + guint index_, + gconstpointer data, + guint len) +{ + GRealArray *array = (GRealArray*) farray; + + g_return_val_if_fail (array, NULL); + + if (len == 0) + return farray; + + /* Is the index off the end of the array, and hence do we need to over-allocate + * and clear some elements? */ + if (index_ >= array->len) + { + g_array_maybe_expand (array, index_ - array->len + len); + return g_array_append_vals (g_array_set_size (farray, index_), data, len); + } + + g_array_maybe_expand (array, len); + + memmove (g_array_elt_pos (array, len + index_), + g_array_elt_pos (array, index_), + g_array_elt_len (array, array->len - index_)); + + memcpy (g_array_elt_pos (array, index_), data, g_array_elt_len (array, len)); + + array->len += len; + + g_array_zero_terminate (array); + + return farray; +} + +/** + * g_array_set_size: + * @array: a #GArray + * @length: the new size of the #GArray + * + * Sets the size of the array, expanding it if necessary. If the array + * was created with @clear_ set to %TRUE, the new elements are set to 0. + * + * Returns: the #GArray + */ +GArray* g_array_set_size (GArray *farray, + guint length) +{ + GRealArray *array = (GRealArray*) farray; + + g_return_val_if_fail (array, NULL); + + if (length > array->len) + { + g_array_maybe_expand (array, length - array->len); + + if (array->clear) + g_array_elt_zero (array, array->len, length - array->len); + } + else if (length < array->len) + g_array_remove_range (farray, length, array->len - length); + + array->len = length; + + g_array_zero_terminate (array); + + return farray; +} + +/** + * g_array_remove_index: + * @array: a #GArray + * @index_: the index of the element to remove + * + * Removes the element at the given index from a #GArray. The following + * elements are moved down one place. + * + * Returns: the #GArray + */ +GArray* g_array_remove_index (GArray *farray, + guint index_) +{ + GRealArray* array = (GRealArray*) farray; + + g_return_val_if_fail (array, NULL); + + g_return_val_if_fail (index_ < array->len, NULL); + + if (array->clear_func != NULL) + array->clear_func (g_array_elt_pos (array, index_)); + + if (index_ != array->len - 1) + memmove (g_array_elt_pos (array, index_), + g_array_elt_pos (array, index_ + 1), + g_array_elt_len (array, array->len - index_ - 1)); + + array->len -= 1; + + if (g_mem_gc_friendly) + g_array_elt_zero (array, array->len, 1); + else + g_array_zero_terminate (array); + + return farray; +} + +/** + * g_array_remove_index_fast: + * @array: a @GArray + * @index_: the index of the element to remove + * + * Removes the element at the given index from a #GArray. The last + * element in the array is used to fill in the space, so this function + * does not preserve the order of the #GArray. But it is faster than + * g_array_remove_index(). + * + * Returns: the #GArray + */ +GArray* g_array_remove_index_fast (GArray *farray, + guint index_) +{ + GRealArray* array = (GRealArray*) farray; + + g_return_val_if_fail (array, NULL); + + g_return_val_if_fail (index_ < array->len, NULL); + + if (array->clear_func != NULL) + array->clear_func (g_array_elt_pos (array, index_)); + + if (index_ != array->len - 1) + memcpy (g_array_elt_pos (array, index_), + g_array_elt_pos (array, array->len - 1), + g_array_elt_len (array, 1)); + + array->len -= 1; + + if (g_mem_gc_friendly) + g_array_elt_zero (array, array->len, 1); + else + g_array_zero_terminate (array); + + return farray; +} + +/** + * g_array_remove_range: + * @array: a @GArray + * @index_: the index of the first element to remove + * @length: the number of elements to remove + * + * Removes the given number of elements starting at the given index + * from a #GArray. The following elements are moved to close the gap. + * + * Returns: the #GArray + * + * Since: 2.4 + */ +GArray* g_array_remove_range (GArray *farray, + guint index_, + guint length) +{ + GRealArray *array = (GRealArray*) farray; + + g_return_val_if_fail (array, NULL); + g_return_val_if_fail (index_ <= array->len, NULL); + g_return_val_if_fail (index_ + length <= array->len, NULL); + + if (array->clear_func != NULL) + { + guint i; + + for (i = 0; i < length; i++) + array->clear_func (g_array_elt_pos (array, index_ + i)); + } + + if (index_ + length != array->len) + memmove (g_array_elt_pos (array, index_), + g_array_elt_pos (array, index_ + length), + (array->len - (index_ + length)) * array->elt_size); + + array->len -= length; + if (g_mem_gc_friendly) + g_array_elt_zero (array, array->len, length); + else + g_array_zero_terminate (array); + + return farray; +} + +/* Returns the smallest power of 2 greater than n, or n if + * such power does not fit in a guint + */ +static guint g_nearest_pow (guint num) +{ + guint n = num - 1; + + g_assert (num > 0); + + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; +#if SIZEOF_INT == 8 + n |= n >> 32; +#endif + + return n + 1; +} + +static void g_array_maybe_expand (GRealArray *array, guint len) +{ + guint want_alloc; + + /* Detect potential overflow */ + //if ((G_MAXUINT - array->len) < len) + // g_error ("adding %u to array would overflow", len); + + want_alloc = g_array_elt_len (array, array->len + len + + array->zero_terminated); + + if (want_alloc > array->alloc) + { + want_alloc = g_nearest_pow (want_alloc); + want_alloc = MAX (want_alloc, MIN_ARRAY_SIZE); + + array->data = g_realloc (array->data, want_alloc); + + if (g_mem_gc_friendly) + memset (array->data + array->alloc, 0, want_alloc - array->alloc); + + array->alloc = want_alloc; + } +} + +/** + * SECTION:arrays_pointer + * @title: Pointer Arrays + * @short_description: arrays of pointers to any type of data, which + * grow automatically as new elements are added + * + * Pointer Arrays are similar to Arrays but are used only for storing + * pointers. + * + * If you remove elements from the array, elements at the end of the + * array are moved into the space previously occupied by the removed + * element. This means that you should not rely on the index of particular + * elements remaining the same. You should also be careful when deleting + * elements while iterating over the array. + * + * To create a pointer array, use g_ptr_array_new(). + * + * To add elements to a pointer array, use g_ptr_array_add(). + * + * To remove elements from a pointer array, use g_ptr_array_remove(), + * g_ptr_array_remove_index() or g_ptr_array_remove_index_fast(). + * + * To access an element of a pointer array, use g_ptr_array_index(). + * + * To set the size of a pointer array, use g_ptr_array_set_size(). + * + * To free a pointer array, use g_ptr_array_free(). + * + * An example using a #GPtrArray: + * |[ + * GPtrArray *array; + * gchar *string1 = "one"; + * gchar *string2 = "two"; + * gchar *string3 = "three"; + * + * array = g_ptr_array_new (); + * g_ptr_array_add (array, (gpointer) string1); + * g_ptr_array_add (array, (gpointer) string2); + * g_ptr_array_add (array, (gpointer) string3); + * + * if (g_ptr_array_index (array, 0) != (gpointer) string1) + * g_print ("ERROR: got %p instead of %p\n", + * g_ptr_array_index (array, 0), string1); + * + * g_ptr_array_free (array, TRUE); + * ]| + */ + +typedef struct _GRealPtrArray GRealPtrArray; + +/** + * GPtrArray: + * @pdata: points to the array of pointers, which may be moved when the + * array grows + * @len: number of pointers in the array + * + * Contains the public fields of a pointer array. + */ +struct _GRealPtrArray +{ + gpointer *pdata; + guint len; + guint alloc; + // gatomicrefcount ref_count; + GDestroyNotify element_free_func; +}; + +/** + * g_ptr_array_index: + * @array: a #GPtrArray + * @index_: the index of the pointer to return + * + * Returns the pointer at the given index of the pointer array. + * + * This does not perform bounds checking on the given @index_, + * so you are responsible for checking it against the array length. + * + * Returns: the pointer at the given index + */ + +static void g_ptr_array_maybe_expand (GRealPtrArray *array, guint len); + +/** + * g_ptr_array_new: + * + * Creates a new #GPtrArray with a reference count of 1. + * + * Returns: the new #GPtrArray + */ +GPtrArray *g_ptr_array_new (void) +{ + return g_ptr_array_sized_new (0); +} + +/** + * g_ptr_array_steal: + * @array: a #GPtrArray. + * @len: (optional) (out caller-allocates): pointer to retrieve the number of + * elements of the original array + * + * Frees the data in the array and resets the size to zero, while + * the underlying array is preserved for use elsewhere and returned + * to the caller. + * + * Even if set, the #GDestroyNotify function will never be called + * on the current contents of the array and the caller is + * responsible for freeing the array elements. + * + * An example of use: + * |[ + * g_autoptr(GPtrArray) chunk_buffer = g_ptr_array_new_with_free_func (g_bytes_unref); + * + * // Some part of your application appends a number of chunks to the pointer array. + * g_ptr_array_add (chunk_buffer, g_bytes_new_static ("hello", 5)); + * g_ptr_array_add (chunk_buffer, g_bytes_new_static ("world", 5)); + * + * ... + * + * // Periodically, the chunks need to be sent as an array-and-length to some + * // other part of the program. + * GBytes **chunks; + * gsize n_chunks; + * + * chunks = g_ptr_array_steal (chunk_buffer, &n_chunks); + * for (gsize i = 0; i < n_chunks; i++) + * { + * // Do something with each chunk here, and then free them, since + * // g_ptr_array_steal() transfers ownership of all the elements and the + * // array to the caller. + * ... + * + * g_bytes_unref (chunks[i]); + * } + * + * g_free (chunks); + * + * // After calling g_ptr_array_steal(), the pointer array can be reused for the + * // next set of chunks. + * g_assert (chunk_buffer->len == 0); + * ]| + * + * Returns: (transfer full): the element data, which should be + * freed using g_free(). + * + * Since: 2.64 + */ +gpointer *g_ptr_array_steal (GPtrArray *array, gsize *len) +{ + GRealPtrArray *rarray; + gpointer *segment; + + g_return_val_if_fail (array != NULL, NULL); + + rarray = (GRealPtrArray *) array; + segment = (gpointer *) rarray->pdata; + + if (len != NULL) + *len = rarray->len; + + rarray->pdata = NULL; + rarray->len = 0; + rarray->alloc = 0; + return segment; +} + +/** + * g_ptr_array_copy: + * @array: #GPtrArray to duplicate + * @func: (nullable): a copy function used to copy every element in the array + * @user_data: user data passed to the copy function @func, or %NULL + * + * Makes a full (deep) copy of a #GPtrArray. + * + * @func, as a #GCopyFunc, takes two arguments, the data to be copied + * and a @user_data pointer. On common processor architectures, it's safe to + * pass %NULL as @user_data if the copy function takes only one argument. You + * may get compiler warnings from this though if compiling with GCC's + * `-Wcast-function-type` warning. + * + * If @func is %NULL, then only the pointers (and not what they are + * pointing to) are copied to the new #GPtrArray. + * + * The copy of @array will have the same #GDestroyNotify for its elements as + * @array. + * + * Returns: (transfer full): a deep copy of the initial #GPtrArray. + * + * Since: 2.62 + **/ +GPtrArray *g_ptr_array_copy (GPtrArray *array, GCopyFunc func, gpointer user_data) +{ + gsize i; + GPtrArray *new_array; + + g_return_val_if_fail (array != NULL, NULL); + + new_array = g_ptr_array_sized_new (array->len); + g_ptr_array_set_free_func (new_array, ((GRealPtrArray *) array)->element_free_func); + + if (func != NULL) + { + for (i = 0; i < array->len; i++) + new_array->pdata[i] = func (array->pdata[i], user_data); + } + else if (array->len > 0) + { + memcpy (new_array->pdata, array->pdata, + array->len * sizeof (*array->pdata)); + } + + new_array->len = array->len; + + return new_array; +} + +/** + * g_ptr_array_sized_new: + * @reserved_size: number of pointers preallocated + * + * Creates a new #GPtrArray with @reserved_size pointers preallocated + * and a reference count of 1. This avoids frequent reallocation, if + * you are going to add many pointers to the array. Note however that + * the size of the array is still 0. + * + * Returns: the new #GPtrArray + */ +GPtrArray *g_ptr_array_sized_new (guint reserved_size) +{ + GRealPtrArray *array; + + array = g_slice_new (GRealPtrArray); + + array->pdata = NULL; + array->len = 0; + array->alloc = 0; + array->element_free_func = NULL; + + // g_atomic_ref_count_init (&array->ref_count); + + if (reserved_size != 0) + g_ptr_array_maybe_expand (array, reserved_size); + + return (GPtrArray*) array; +} + +/** + * g_array_copy: + * @array: A #GArray. + * + * Create a shallow copy of a #GArray. If the array elements consist of + * pointers to data, the pointers are copied but the actual data is not. + * + * Returns: (transfer container): A copy of @array. + * + * Since: 2.62 + **/ +GArray *g_array_copy (GArray *array) +{ + GRealArray *rarray = (GRealArray *) array; + GRealArray *new_rarray; + + g_return_val_if_fail (rarray != NULL, NULL); + + new_rarray = + (GRealArray *) g_array_sized_new (rarray->zero_terminated, rarray->clear, + rarray->elt_size, rarray->alloc / rarray->elt_size); + new_rarray->len = rarray->len; + if (rarray->len > 0) + memcpy (new_rarray->data, rarray->data, rarray->len * rarray->elt_size); + + g_array_zero_terminate (new_rarray); + + return (GArray *) new_rarray; +} + +/** + * g_ptr_array_new_with_free_func: + * @element_free_func: (nullable): A function to free elements with + * destroy @array or %NULL + * + * Creates a new #GPtrArray with a reference count of 1 and use + * @element_free_func for freeing each element when the array is destroyed + * either via g_ptr_array_unref(), when g_ptr_array_free() is called with + * @free_segment set to %TRUE or when removing elements. + * + * Returns: A new #GPtrArray + * + * Since: 2.22 + */ +GPtrArray *g_ptr_array_new_with_free_func (GDestroyNotify element_free_func) +{ + GPtrArray *array; + + array = g_ptr_array_new (); + g_ptr_array_set_free_func (array, element_free_func); + + return array; +} + +/** + * g_ptr_array_new_full: + * @reserved_size: number of pointers preallocated + * @element_free_func: (nullable): A function to free elements with + * destroy @array or %NULL + * + * Creates a new #GPtrArray with @reserved_size pointers preallocated + * and a reference count of 1. This avoids frequent reallocation, if + * you are going to add many pointers to the array. Note however that + * the size of the array is still 0. It also set @element_free_func + * for freeing each element when the array is destroyed either via + * g_ptr_array_unref(), when g_ptr_array_free() is called with + * @free_segment set to %TRUE or when removing elements. + * + * Returns: A new #GPtrArray + * + * Since: 2.30 + */ +GPtrArray *g_ptr_array_new_full (guint reserved_size, GDestroyNotify element_free_func) +{ + GPtrArray *array; + + array = g_ptr_array_sized_new (reserved_size); + g_ptr_array_set_free_func (array, element_free_func); + + return array; +} + +/** + * g_ptr_array_set_free_func: + * @array: A #GPtrArray + * @element_free_func: (nullable): A function to free elements with + * destroy @array or %NULL + * + * Sets a function for freeing each element when @array is destroyed + * either via g_ptr_array_unref(), when g_ptr_array_free() is called + * with @free_segment set to %TRUE or when removing elements. + * + * Since: 2.22 + */ +void g_ptr_array_set_free_func (GPtrArray *array, GDestroyNotify element_free_func) +{ + GRealPtrArray *rarray = (GRealPtrArray *)array; + + g_return_if_fail (array); + + rarray->element_free_func = element_free_func; +} + +static void g_ptr_array_maybe_expand (GRealPtrArray *array, guint len) +{ + /* Detect potential overflow */ + //if ((G_MAXUINT - array->len) < len) + // g_error ("adding %u to array would overflow", len); + + if ((array->len + len) > array->alloc) + { + guint old_alloc = array->alloc; + array->alloc = g_nearest_pow (array->len + len); + array->alloc = MAX (array->alloc, MIN_ARRAY_SIZE); + array->pdata = g_realloc (array->pdata, sizeof (gpointer) * array->alloc); + if (g_mem_gc_friendly) + for ( ; old_alloc < array->alloc; old_alloc++) + array->pdata [old_alloc] = NULL; + } +} + +/** + * g_ptr_array_set_size: + * @array: a #GPtrArray + * @length: the new length of the pointer array + * + * Sets the size of the array. When making the array larger, + * newly-added elements will be set to %NULL. When making it smaller, + * if @array has a non-%NULL #GDestroyNotify function then it will be + * called for the removed elements. + */ +void g_ptr_array_set_size (GPtrArray *array, gint length) +{ + GRealPtrArray *rarray = (GRealPtrArray *)array; + guint length_unsigned; + + g_return_if_fail (rarray); + g_return_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL)); + g_return_if_fail (length >= 0); + + length_unsigned = (guint) length; + + if (length_unsigned > rarray->len) + { + guint i; + g_ptr_array_maybe_expand (rarray, (length_unsigned - rarray->len)); + /* This is not + * memset (array->pdata + array->len, 0, + * sizeof (gpointer) * (length_unsigned - array->len)); + * to make it really portable. Remember (void*)NULL needn't be + * bitwise zero. It of course is silly not to use memset (..,0,..). + */ + for (i = rarray->len; i < length_unsigned; i++) + rarray->pdata[i] = NULL; + } + else if (length_unsigned < rarray->len) + g_ptr_array_remove_range (array, length_unsigned, rarray->len - length_unsigned); + + rarray->len = length_unsigned; +} + +static gpointer ptr_array_remove_index (GPtrArray *array, + guint index_, + gboolean fast, + gboolean free_element) +{ + GRealPtrArray *rarray = (GRealPtrArray *) array; + gpointer result; + + g_return_val_if_fail (rarray, NULL); + g_return_val_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL), NULL); + + g_return_val_if_fail (index_ < rarray->len, NULL); + + result = rarray->pdata[index_]; + + if (rarray->element_free_func != NULL && free_element) + rarray->element_free_func (rarray->pdata[index_]); + + if (index_ != rarray->len - 1 && !fast) + memmove (rarray->pdata + index_, rarray->pdata + index_ + 1, + sizeof (gpointer) * (rarray->len - index_ - 1)); + else if (index_ != rarray->len - 1) + rarray->pdata[index_] = rarray->pdata[rarray->len - 1]; + + rarray->len -= 1; + + if (g_mem_gc_friendly) + rarray->pdata[rarray->len] = NULL; + + return result; +} + +/** + * g_ptr_array_remove_index: + * @array: a #GPtrArray + * @index_: the index of the pointer to remove + * + * Removes the pointer at the given index from the pointer array. + * The following elements are moved down one place. If @array has + * a non-%NULL #GDestroyNotify function it is called for the removed + * element. If so, the return value from this function will potentially point + * to freed memory (depending on the #GDestroyNotify implementation). + * + * Returns: (nullable): the pointer which was removed + */ +gpointer g_ptr_array_remove_index (GPtrArray *array, guint index_) +{ + return ptr_array_remove_index (array, index_, FALSE, TRUE); +} + +/** + * g_ptr_array_remove_index_fast: + * @array: a #GPtrArray + * @index_: the index of the pointer to remove + * + * Removes the pointer at the given index from the pointer array. + * The last element in the array is used to fill in the space, so + * this function does not preserve the order of the array. But it + * is faster than g_ptr_array_remove_index(). If @array has a non-%NULL + * #GDestroyNotify function it is called for the removed element. If so, the + * return value from this function will potentially point to freed memory + * (depending on the #GDestroyNotify implementation). + * + * Returns: (nullable): the pointer which was removed + */ +gpointer g_ptr_array_remove_index_fast (GPtrArray *array, guint index_) +{ + return ptr_array_remove_index (array, index_, TRUE, TRUE); +} + +/** + * g_ptr_array_steal_index: + * @array: a #GPtrArray + * @index_: the index of the pointer to steal + * + * Removes the pointer at the given index from the pointer array. + * The following elements are moved down one place. The #GDestroyNotify for + * @array is *not* called on the removed element; ownership is transferred to + * the caller of this function. + * + * Returns: (transfer full) (nullable): the pointer which was removed + * Since: 2.58 + */ +gpointer g_ptr_array_steal_index (GPtrArray *array, guint index_) +{ + return ptr_array_remove_index (array, index_, FALSE, FALSE); +} + +/** + * g_ptr_array_steal_index_fast: + * @array: a #GPtrArray + * @index_: the index of the pointer to steal + * + * Removes the pointer at the given index from the pointer array. + * The last element in the array is used to fill in the space, so + * this function does not preserve the order of the array. But it + * is faster than g_ptr_array_steal_index(). The #GDestroyNotify for @array is + * *not* called on the removed element; ownership is transferred to the caller + * of this function. + * + * Returns: (transfer full) (nullable): the pointer which was removed + * Since: 2.58 + */ +gpointer g_ptr_array_steal_index_fast (GPtrArray *array, guint index_) +{ + return ptr_array_remove_index (array, index_, TRUE, FALSE); +} + +/** + * g_ptr_array_remove_range: + * @array: a @GPtrArray + * @index_: the index of the first pointer to remove + * @length: the number of pointers to remove + * + * Removes the given number of pointers starting at the given index + * from a #GPtrArray. The following elements are moved to close the + * gap. If @array has a non-%NULL #GDestroyNotify function it is + * called for the removed elements. + * + * Returns: the @array + * + * Since: 2.4 + */ +GPtrArray* g_ptr_array_remove_range (GPtrArray *array, guint index_, guint length) +{ + GRealPtrArray *rarray = (GRealPtrArray *)array; + guint n; + + g_return_val_if_fail (rarray != NULL, NULL); + g_return_val_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL), NULL); + g_return_val_if_fail (index_ <= rarray->len, NULL); + g_return_val_if_fail (index_ + length <= rarray->len, NULL); + + if (rarray->element_free_func != NULL) + { + for (n = index_; n < index_ + length; n++) + rarray->element_free_func (rarray->pdata[n]); + } + + if (index_ + length != rarray->len) + { + memmove (&rarray->pdata[index_], + &rarray->pdata[index_ + length], + (rarray->len - (index_ + length)) * sizeof (gpointer)); + } + + rarray->len -= length; + if (g_mem_gc_friendly) + { + guint i; + for (i = 0; i < length; i++) + rarray->pdata[rarray->len + i] = NULL; + } + + return array; +} + +/** + * g_ptr_array_remove: + * @array: a #GPtrArray + * @data: the pointer to remove + * + * Removes the first occurrence of the given pointer from the pointer + * array. The following elements are moved down one place. If @array + * has a non-%NULL #GDestroyNotify function it is called for the + * removed element. + * + * It returns %TRUE if the pointer was removed, or %FALSE if the + * pointer was not found. + * + * Returns: %TRUE if the pointer is removed, %FALSE if the pointer + * is not found in the array + */ +gboolean g_ptr_array_remove (GPtrArray *array, gpointer data) +{ + guint i; + + g_return_val_if_fail (array, FALSE); + g_return_val_if_fail (array->len == 0 || (array->len != 0 && array->pdata != NULL), FALSE); + + for (i = 0; i < array->len; i += 1) + { + if (array->pdata[i] == data) + { + g_ptr_array_remove_index (array, i); + return TRUE; + } + } + + return FALSE; +} + +/** + * g_ptr_array_remove_fast: + * @array: a #GPtrArray + * @data: the pointer to remove + * + * Removes the first occurrence of the given pointer from the pointer + * array. The last element in the array is used to fill in the space, + * so this function does not preserve the order of the array. But it + * is faster than g_ptr_array_remove(). If @array has a non-%NULL + * #GDestroyNotify function it is called for the removed element. + * + * It returns %TRUE if the pointer was removed, or %FALSE if the + * pointer was not found. + * + * Returns: %TRUE if the pointer was found in the array + */ +gboolean g_ptr_array_remove_fast (GPtrArray *array, gpointer data) +{ + GRealPtrArray *rarray = (GRealPtrArray *)array; + guint i; + + g_return_val_if_fail (rarray, FALSE); + g_return_val_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL), FALSE); + + for (i = 0; i < rarray->len; i += 1) + { + if (rarray->pdata[i] == data) + { + g_ptr_array_remove_index_fast (array, i); + return TRUE; + } + } + + return FALSE; +} + +/** + * g_ptr_array_add: + * @array: a #GPtrArray + * @data: the pointer to add + * + * Adds a pointer to the end of the pointer array. The array will grow + * in size automatically if necessary. + */ +void g_ptr_array_add (GPtrArray *array, gpointer data) +{ + GRealPtrArray *rarray = (GRealPtrArray *)array; + + g_return_if_fail (rarray); + g_return_if_fail (rarray->len == 0 || (rarray->len != 0 && rarray->pdata != NULL)); + + g_ptr_array_maybe_expand (rarray, 1); + + rarray->pdata[rarray->len++] = data; +} + +/** + * g_ptr_array_extend: + * @array_to_extend: a #GPtrArray. + * @array: (transfer none): a #GPtrArray to add to the end of @array_to_extend. + * @func: (nullable): a copy function used to copy every element in the array + * @user_data: user data passed to the copy function @func, or %NULL + * + * Adds all pointers of @array to the end of the array @array_to_extend. + * The array will grow in size automatically if needed. @array_to_extend is + * modified in-place. + * + * @func, as a #GCopyFunc, takes two arguments, the data to be copied + * and a @user_data pointer. On common processor architectures, it's safe to + * pass %NULL as @user_data if the copy function takes only one argument. You + * may get compiler warnings from this though if compiling with GCC's + * `-Wcast-function-type` warning. + * + * If @func is %NULL, then only the pointers (and not what they are + * pointing to) are copied to the new #GPtrArray. + * + * Since: 2.62 + **/ +void g_ptr_array_extend (GPtrArray *array_to_extend, + GPtrArray *array, + GCopyFunc func, + gpointer user_data) +{ + GRealPtrArray *rarray_to_extend = (GRealPtrArray *) array_to_extend; + gsize i; + + g_return_if_fail (array_to_extend != NULL); + g_return_if_fail (array != NULL); + + g_ptr_array_maybe_expand (rarray_to_extend, array->len); + + if (func != NULL) + { + for (i = 0; i < array->len; i++) + rarray_to_extend->pdata[i + rarray_to_extend->len] = + func (array->pdata[i], user_data); + } + else if (array->len > 0) + { + memcpy (rarray_to_extend->pdata + rarray_to_extend->len, array->pdata, + array->len * sizeof (*array->pdata)); + } + + rarray_to_extend->len += array->len; +} + +/** + * g_ptr_array_insert: + * @array: a #GPtrArray + * @index_: the index to place the new element at, or -1 to append + * @data: the pointer to add. + * + * Inserts an element into the pointer array at the given index. The + * array will grow in size automatically if necessary. + * + * Since: 2.40 + */ +void g_ptr_array_insert (GPtrArray *array, gint index_, gpointer data) +{ + GRealPtrArray *rarray = (GRealPtrArray *)array; + + g_return_if_fail (rarray); + g_return_if_fail (index_ >= -1); + g_return_if_fail (index_ <= (gint)rarray->len); + + g_ptr_array_maybe_expand (rarray, 1); + + if (index_ < 0) + index_ = rarray->len; + + if ((guint) index_ < rarray->len) + memmove (&(rarray->pdata[index_ + 1]), + &(rarray->pdata[index_]), + (rarray->len - index_) * sizeof (gpointer)); + + rarray->len++; + rarray->pdata[index_] = data; +} + +/** + * g_ptr_array_foreach: + * @array: a #GPtrArray + * @func: the function to call for each array element + * @user_data: user data to pass to the function + * + * Calls a function for each element of a #GPtrArray. @func must not + * add elements to or remove elements from the array. + * + * Since: 2.4 + */ +void g_ptr_array_foreach (GPtrArray *array, GFunc func, gpointer user_data) +{ + guint i; + + g_return_if_fail (array); + + for (i = 0; i < array->len; i++) + (*func) (array->pdata[i], user_data); +} + +/** + * SECTION:arrays_byte + * @title: Byte Arrays + * @short_description: arrays of bytes + * + * #GByteArray is a mutable array of bytes based on #GArray, to provide arrays + * of bytes which grow automatically as elements are added. + * + * To create a new #GByteArray use g_byte_array_new(). To add elements to a + * #GByteArray, use g_byte_array_append(), and g_byte_array_prepend(). + * + * To set the size of a #GByteArray, use g_byte_array_set_size(). + * + * To free a #GByteArray, use g_byte_array_free(). + * + * An example for using a #GByteArray: + * |[ + * GByteArray *gbarray; + * gint i; + * + * gbarray = g_byte_array_new (); + * for (i = 0; i < 10000; i++) + * g_byte_array_append (gbarray, (guint8*) "abcd", 4); + * + * for (i = 0; i < 10000; i++) + * { + * g_assert (gbarray->data[4*i] == 'a'); + * g_assert (gbarray->data[4*i+1] == 'b'); + * g_assert (gbarray->data[4*i+2] == 'c'); + * g_assert (gbarray->data[4*i+3] == 'd'); + * } + * + * g_byte_array_free (gbarray, TRUE); + * ]| + * + * See #GBytes if you are interested in an immutable object representing a + * sequence of bytes. + */ + +/** + * GByteArray: + * @data: a pointer to the element data. The data may be moved as + * elements are added to the #GByteArray + * @len: the number of elements in the #GByteArray + * + * Contains the public fields of a GByteArray. + */ + +/** + * g_byte_array_new: + * + * Creates a new #GByteArray with a reference count of 1. + * + * Returns: (transfer full): the new #GByteArray + */ +GByteArray *g_byte_array_new (void) +{ + return (GByteArray *)g_array_sized_new (FALSE, FALSE, 1, 0); +} + +/** + * g_byte_array_sized_new: + * @reserved_size: number of bytes preallocated + * + * Creates a new #GByteArray with @reserved_size bytes preallocated. + * This avoids frequent reallocation, if you are going to add many + * bytes to the array. Note however that the size of the array is still + * 0. + * + * Returns: the new #GByteArray + */ +GByteArray* g_byte_array_sized_new (guint reserved_size) +{ + return (GByteArray *)g_array_sized_new (FALSE, FALSE, 1, reserved_size); +} + +/** + * g_byte_array_free: + * @array: a #GByteArray + * @free_segment: if %TRUE the actual byte data is freed as well + * + * Frees the memory allocated by the #GByteArray. If @free_segment is + * %TRUE it frees the actual byte data. If the reference count of + * @array is greater than one, the #GByteArray wrapper is preserved but + * the size of @array will be set to zero. + * + * Returns: the element data if @free_segment is %FALSE, otherwise + * %NULL. The element data should be freed using g_free(). + */ +guint8* g_byte_array_free (GByteArray *array, gboolean free_segment) +{ + return (guint8 *)g_array_free ((GArray *)array, free_segment); +} + +/** + * g_byte_array_append: + * @array: a #GByteArray + * @data: the byte data to be added + * @len: the number of bytes to add + * + * Adds the given bytes to the end of the #GByteArray. + * The array will grow in size automatically if necessary. + * + * Returns: the #GByteArray + */ +GByteArray* g_byte_array_append (GByteArray *array, const guint8 *data, guint len) +{ + g_array_append_vals ((GArray *)array, (guint8 *)data, len); + + return array; +} + +/** + * g_byte_array_prepend: + * @array: a #GByteArray + * @data: the byte data to be added + * @len: the number of bytes to add + * + * Adds the given data to the start of the #GByteArray. + * The array will grow in size automatically if necessary. + * + * Returns: the #GByteArray + */ +GByteArray *g_byte_array_prepend (GByteArray *array, const guint8 *data, guint len) +{ + g_array_prepend_vals ((GArray *)array, (guint8 *)data, len); + + return array; +} + +/** + * g_byte_array_set_size: + * @array: a #GByteArray + * @length: the new size of the #GByteArray + * + * Sets the size of the #GByteArray, expanding it if necessary. + * + * Returns: the #GByteArray + */ +GByteArray *g_byte_array_set_size (GByteArray *array, guint length) +{ + g_array_set_size ((GArray *)array, length); + + return array; +} diff --git a/glib_compat/garray.h b/glib_compat/garray.h new file mode 100644 index 00000000..020539ae --- /dev/null +++ b/glib_compat/garray.h @@ -0,0 +1,99 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_ARRAY_H__ +#define __G_ARRAY_H__ + +#include "gtypes.h" + +typedef struct _GBytes GBytes; +typedef struct _GArray GArray; +typedef struct _GByteArray GByteArray; +typedef struct _GPtrArray GPtrArray; + +struct _GArray +{ + gchar *data; + guint len; +}; + +struct _GByteArray +{ + guint8 *data; + guint len; +}; + +struct _GPtrArray +{ + gpointer *pdata; + guint len; +}; + +/* Resizable arrays. remove fills any cleared spot and shortens the + * array, while preserving the order. remove_fast will distort the + * order by moving the last element to the position of the removed. + */ + +#define g_array_append_val(a,v) g_array_append_vals (a, &(v), 1) +#define g_array_index(a,t,i) (((t*) (void *) (a)->data) [(i)]) + +GArray* g_array_append_vals (GArray *array, + gconstpointer data, + guint len); + +GArray* g_array_new (gboolean zero_terminated, gboolean clear_, guint element_size); +GArray* g_array_sized_new (gboolean zero_terminated, + gboolean clear_, + guint element_size, + guint reserved_size); + +gchar* g_array_free(GArray *array, gboolean free_segment); +GArray* g_array_set_size(GArray *array, guint length); +GArray* +g_array_remove_range (GArray *farray, + guint index_, + guint length); + +void g_ptr_array_set_free_func (GPtrArray *array, + GDestroyNotify element_free_func); + +/* Resizable pointer array. This interface is much less complicated + * than the above. Add appends a pointer. Remove fills any cleared + * spot and shortens the array. remove_fast will again distort order. + */ +#define g_ptr_array_index(array,index_) ((array)->pdata)[index_] +GPtrArray* g_ptr_array_new_with_free_func (GDestroyNotify element_free_func); +void g_ptr_array_add(GPtrArray *array, gpointer data); +GPtrArray* g_ptr_array_sized_new (guint reserved_size); +GPtrArray* g_ptr_array_remove_range (GPtrArray *array, guint index_, guint length); + +/* Byte arrays, an array of guint8. Implemented as a GArray, + * but type-safe. + */ +GByteArray* g_byte_array_sized_new(guint reserved_size); +guint8* g_byte_array_free(GByteArray *array, gboolean free_segment); +GByteArray* g_byte_array_append(GByteArray *array, const guint8 *data, guint len); +GByteArray* g_byte_array_set_size(GByteArray *array, guint length); + +#endif /* __G_ARRAY_H__ */ diff --git a/glib_compat/ghash.h b/glib_compat/ghash.h new file mode 100644 index 00000000..a4916a9d --- /dev/null +++ b/glib_compat/ghash.h @@ -0,0 +1,77 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_HASH_H__ +#define __G_HASH_H__ + +#include "gtypes.h" + +typedef struct _GHashTable GHashTable; + +typedef gboolean (*GHRFunc) (gpointer key, gpointer value, gpointer user_data); + +struct _GHashTableIter +{ + /*< private >*/ + gpointer dummy1; + gpointer dummy2; + gpointer dummy3; + int dummy4; + gboolean dummy5; + gpointer dummy6; +}; + +GHashTable* g_hash_table_new (GHashFunc hash_func, GEqualFunc key_equal_func); + +GHashTable* g_hash_table_new_full (GHashFunc hash_func, + GEqualFunc key_equal_func, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func); + +void g_hash_table_destroy (GHashTable *hash_table); + +gboolean g_hash_table_insert (GHashTable *hash_table, gpointer key, gpointer value); + +void g_hash_table_replace (GHashTable *hash_table, gpointer key, gpointer value); + +gboolean g_hash_table_remove (GHashTable *hash_table, gconstpointer key); + +void g_hash_table_remove_all (GHashTable *hash_table); + +gpointer g_hash_table_lookup (GHashTable *hash_table, gconstpointer key); + +void g_hash_table_foreach (GHashTable *hash_table, GHFunc func, gpointer user_data); + +guint g_hash_table_size (GHashTable *hash_table); + +GHashTable* g_hash_table_ref (GHashTable *hash_table); + +void g_hash_table_unref (GHashTable *hash_table); + +/* Hash Functions + */ +gboolean g_int_equal (gconstpointer v1, gconstpointer v2); +guint g_int_hash (gconstpointer v); + +#endif /* __G_HASH_H__ */ diff --git a/qemu/glib_compat.c b/glib_compat/glib_compat.c similarity index 52% rename from qemu/glib_compat.c rename to glib_compat/glib_compat.c index 946e4f0c..2bb71828 100644 --- a/qemu/glib_compat.c +++ b/glib_compat/glib_compat.c @@ -1,21 +1,21 @@ /* -glib_compat.c replacement functionality for glib code used in qemu -Copyright (C) 2016 Chris Eagle cseagle at gmail dot com + glib_compat.c replacement functionality for glib code used in qemu + Copyright (C) 2016 Chris Eagle cseagle at gmail dot com -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -*/ + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ // Part of this code was lifted from glib-2.28.0. // Glib license is available in COPYING_GLIB file in root directory. @@ -31,13 +31,11 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #include "glib_compat.h" -#define MAX(a, b) (((a) > (b)) ? (a) : (b)) #ifndef _WIN64 #define GPOINTER_TO_UINT(p) ((guint)(uintptr_t)(p)) #else #define GPOINTER_TO_UINT(p) ((guint) (guint64) (p)) #endif -#define G_MAXINT INT_MAX /* All functions below added to eliminate GLIB dependency */ @@ -56,7 +54,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ static guint g_direct_hash (gconstpointer v) { - return GPOINTER_TO_UINT (v); + return GPOINTER_TO_UINT (v); } // g_str_hash() is lifted glib-2.28.0/glib/gstring.c @@ -79,18 +77,18 @@ static guint g_direct_hash (gconstpointer v) **/ guint g_str_hash (gconstpointer v) { - const signed char *p; - guint32 h = 5381; + const signed char *p; + guint32 h = 5381; - for (p = v; *p != '\0'; p++) - h = (h << 5) + h + *p; + for (p = v; *p != '\0'; p++) + h = (h << 5) + h + *p; - return h; + return h; } gboolean g_str_equal(gconstpointer v1, gconstpointer v2) { - return strcmp((const char*)v1, (const char*)v2) == 0; + return strcmp((const char*)v1, (const char*)v2) == 0; } // g_int_hash() is lifted from glib-2.28.0/glib/gutils.c @@ -106,151 +104,151 @@ gboolean g_str_equal(gconstpointer v1, gconstpointer v2) */ guint g_int_hash (gconstpointer v) { - return *(const gint*) v; + return *(const gint*) v; } gboolean g_int_equal(gconstpointer v1, gconstpointer v2) { - return *((const gint*)v1) == *((const gint*)v2); + return *((const gint*)v1) == *((const gint*)v2); } /* Doubly-linked list */ GList *g_list_first(GList *list) { - if (list == NULL) return NULL; - while (list->prev) list = list->prev; - return list; + if (list == NULL) return NULL; + while (list->prev) list = list->prev; + return list; } void g_list_foreach(GList *list, GFunc func, gpointer user_data) { - GList *lp; - for (lp = list; lp; lp = lp->next) { - (*func)(lp->data, user_data); - } + GList *lp; + for (lp = list; lp; lp = lp->next) { + (*func)(lp->data, user_data); + } } void g_list_free(GList *list) { - GList *lp, *next, *prev = NULL; - if (list) prev = list->prev; - for (lp = list; lp; lp = next) { - next = lp->next; - free(lp); - } - for (lp = prev; lp; lp = prev) { - prev = lp->prev; - free(lp); - } + GList *lp, *next, *prev = NULL; + if (list) prev = list->prev; + for (lp = list; lp; lp = next) { + next = lp->next; + free(lp); + } + for (lp = prev; lp; lp = prev) { + prev = lp->prev; + free(lp); + } } GList *g_list_insert_sorted(GList *list, gpointer data, GCompareFunc compare) { - GList *i; - GList *n = (GList*)g_malloc(sizeof(GList)); - n->data = data; - if (list == NULL) { - n->next = n->prev = NULL; - return n; - } - for (i = list; i; i = i->next) { - n->prev = i->prev; - if ((*compare)(data, i->data) <= 0) { - n->next = i; - i->prev = n; - if (i == list) return n; - else return list; - } - } - n->prev = n->prev->next; - n->next = NULL; - n->prev->next = n; - return list; + GList *i; + GList *n = (GList*)g_malloc(sizeof(GList)); + n->data = data; + if (list == NULL) { + n->next = n->prev = NULL; + return n; + } + for (i = list; i; i = i->next) { + n->prev = i->prev; + if ((*compare)(data, i->data) <= 0) { + n->next = i; + i->prev = n; + if (i == list) return n; + else return list; + } + } + n->prev = n->prev->next; + n->next = NULL; + n->prev->next = n; + return list; } GList *g_list_prepend(GList *list, gpointer data) { - GList *n = (GList*)g_malloc(sizeof(GList)); - n->next = list; - n->prev = NULL; - n->data = data; - return n; + GList *n = (GList*)g_malloc(sizeof(GList)); + n->next = list; + n->prev = NULL; + n->data = data; + return n; } GList *g_list_remove_link(GList *list, GList *llink) { - if (llink) { - if (llink == list) list = list->next; - if (llink->prev) llink->prev->next = llink->next; - if (llink->next) llink->next->prev = llink->prev; - } - return list; + if (llink) { + if (llink == list) list = list->next; + if (llink->prev) llink->prev->next = llink->next; + if (llink->next) llink->next->prev = llink->prev; + } + return list; } // code copied from glib/glist.c, version 2.28.0 static GList *g_list_sort_merge(GList *l1, - GList *l2, - GFunc compare_func, - gpointer user_data) + GList *l2, + GFunc compare_func, + gpointer user_data) { - GList list, *l, *lprev; - gint cmp; + GList list, *l, *lprev; + gint cmp; - l = &list; - lprev = NULL; + l = &list; + lprev = NULL; - while (l1 && l2) + while (l1 && l2) { - cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); + cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); - if (cmp <= 0) + if (cmp <= 0) { - l->next = l1; - l1 = l1->next; + l->next = l1; + l1 = l1->next; } - else - { - l->next = l2; - l2 = l2->next; + else + { + l->next = l2; + l2 = l2->next; } - l = l->next; - l->prev = lprev; - lprev = l; + l = l->next; + l->prev = lprev; + lprev = l; } - l->next = l1 ? l1 : l2; - l->next->prev = l; + l->next = l1 ? l1 : l2; + l->next->prev = l; - return list.next; + return list.next; } static GList *g_list_sort_real(GList *list, - GFunc compare_func, - gpointer user_data) + GFunc compare_func, + gpointer user_data) { - GList *l1, *l2; + GList *l1, *l2; - if (!list) - return NULL; - if (!list->next) - return list; + if (!list) + return NULL; + if (!list->next) + return list; - l1 = list; - l2 = list->next; + l1 = list; + l2 = list->next; - while ((l2 = l2->next) != NULL) + while ((l2 = l2->next) != NULL) { - if ((l2 = l2->next) == NULL) - break; - l1 = l1->next; + if ((l2 = l2->next) == NULL) + break; + l1 = l1->next; } - l2 = l1->next; - l1->next = NULL; + l2 = l1->next; + l1->next = NULL; - return g_list_sort_merge (g_list_sort_real (list, compare_func, user_data), - g_list_sort_real (l2, compare_func, user_data), - compare_func, - user_data); + return g_list_sort_merge (g_list_sort_real (list, compare_func, user_data), + g_list_sort_real (l2, compare_func, user_data), + compare_func, + user_data); } /** @@ -289,101 +287,101 @@ GList *g_list_sort (GList *list, GCompareFunc compare_func) GSList *g_slist_append(GSList *list, gpointer data) { - GSList *head = list; - if (list) { - while (list->next) list = list->next; - list->next = (GSList*)g_malloc(sizeof(GSList)); - list = list->next; - } else { - head = list = (GSList*)g_malloc(sizeof(GSList)); - } - list->data = data; - list->next = NULL; - return head; + GSList *head = list; + if (list) { + while (list->next) list = list->next; + list->next = (GSList*)g_malloc(sizeof(GSList)); + list = list->next; + } else { + head = list = (GSList*)g_malloc(sizeof(GSList)); + } + list->data = data; + list->next = NULL; + return head; } void g_slist_foreach(GSList *list, GFunc func, gpointer user_data) { - GSList *lp; - for (lp = list; lp; lp = lp->next) { - (*func)(lp->data, user_data); - } + GSList *lp; + for (lp = list; lp; lp = lp->next) { + (*func)(lp->data, user_data); + } } void g_slist_free(GSList *list) { - GSList *lp, *next; - for (lp = list; lp; lp = next) { - next = lp->next; - free(lp); - } + GSList *lp, *next; + for (lp = list; lp; lp = next) { + next = lp->next; + free(lp); + } } GSList *g_slist_prepend(GSList *list, gpointer data) { - GSList *head = (GSList*)g_malloc(sizeof(GSList)); - head->next = list; - head->data = data; - return head; + GSList *head = (GSList*)g_malloc(sizeof(GSList)); + head->next = list; + head->data = data; + return head; } static GSList *g_slist_sort_merge (GSList *l1, - GSList *l2, - GFunc compare_func, - gpointer user_data) + GSList *l2, + GFunc compare_func, + gpointer user_data) { - GSList list, *l; - gint cmp; + GSList list, *l; + gint cmp; - l=&list; + l=&list; - while (l1 && l2) + while (l1 && l2) { - cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); + cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); - if (cmp <= 0) + if (cmp <= 0) { - l=l->next=l1; - l1=l1->next; + l=l->next=l1; + l1=l1->next; } - else + else { - l=l->next=l2; - l2=l2->next; + l=l->next=l2; + l2=l2->next; } } - l->next= l1 ? l1 : l2; + l->next= l1 ? l1 : l2; - return list.next; + return list.next; } static GSList *g_slist_sort_real (GSList *list, - GFunc compare_func, - gpointer user_data) + GFunc compare_func, + gpointer user_data) { - GSList *l1, *l2; + GSList *l1, *l2; - if (!list) - return NULL; - if (!list->next) - return list; + if (!list) + return NULL; + if (!list->next) + return list; - l1 = list; - l2 = list->next; + l1 = list; + l2 = list->next; - while ((l2 = l2->next) != NULL) + while ((l2 = l2->next) != NULL) { - if ((l2 = l2->next) == NULL) - break; - l1=l1->next; + if ((l2 = l2->next) == NULL) + break; + l1=l1->next; } - l2 = l1->next; - l1->next = NULL; + l2 = l1->next; + l1->next = NULL; - return g_slist_sort_merge (g_slist_sort_real (list, compare_func, user_data), - g_slist_sort_real (l2, compare_func, user_data), - compare_func, - user_data); + return g_slist_sort_merge (g_slist_sort_real (list, compare_func, user_data), + g_slist_sort_real (l2, compare_func, user_data), + compare_func, + user_data); } /** @@ -400,9 +398,9 @@ static GSList *g_slist_sort_real (GSList *list, * Returns: the start of the sorted #GSList */ GSList *g_slist_sort (GSList *list, - GCompareFunc compare_func) + GCompareFunc compare_func) { - return g_slist_sort_real (list, (GFunc) compare_func, NULL); + return g_slist_sort_real (list, (GFunc) compare_func, NULL); } /* END of g_slist related functions */ @@ -414,27 +412,27 @@ GSList *g_slist_sort (GSList *list, typedef struct _GHashNode GHashNode; struct _GHashNode { - gpointer key; - gpointer value; + gpointer key; + gpointer value; - /* If key_hash == 0, node is not in use - * If key_hash == 1, node is a tombstone - * If key_hash >= 2, node contains data */ - guint key_hash; + /* If key_hash == 0, node is not in use + * If key_hash == 1, node is a tombstone + * If key_hash >= 2, node contains data */ + guint key_hash; }; struct _GHashTable { - gint size; - gint mod; - guint mask; - gint nnodes; - gint noccupied; /* nnodes + tombstones */ - GHashNode *nodes; - GHashFunc hash_func; - GEqualFunc key_equal_func; - volatile gint ref_count; - GDestroyNotify key_destroy_func; - GDestroyNotify value_destroy_func; + gint size; + gint mod; + guint mask; + gint nnodes; + gint noccupied; /* nnodes + tombstones */ + GHashNode *nodes; + GHashFunc hash_func; + GEqualFunc key_equal_func; + volatile gint ref_count; + GDestroyNotify key_destroy_func; + GDestroyNotify value_destroy_func; }; /** @@ -450,11 +448,11 @@ struct _GHashTable { **/ void g_hash_table_destroy (GHashTable *hash_table) { - if (hash_table == NULL) return; - if (hash_table->ref_count == 0) return; + if (hash_table == NULL) return; + if (hash_table->ref_count == 0) return; - g_hash_table_remove_all (hash_table); - g_hash_table_unref (hash_table); + g_hash_table_remove_all (hash_table); + g_hash_table_unref (hash_table); } /** @@ -484,23 +482,23 @@ void g_hash_table_destroy (GHashTable *hash_table) * Since: 2.4 **/ gpointer g_hash_table_find (GHashTable *hash_table, - GHRFunc predicate, - gpointer user_data) + GHRFunc predicate, + gpointer user_data) { - gint i; + gint i; - if (hash_table == NULL) return NULL; - if (predicate == NULL) return NULL; + if (hash_table == NULL) return NULL; + if (predicate == NULL) return NULL; - for (i = 0; i < hash_table->size; i++) + for (i = 0; i < hash_table->size; i++) { - GHashNode *node = &hash_table->nodes [i]; + GHashNode *node = &hash_table->nodes [i]; - if (node->key_hash > 1 && predicate (node->key, node->value, user_data)) - return node->value; + if (node->key_hash > 1 && predicate (node->key, node->value, user_data)) + return node->value; } - return NULL; + return NULL; } /** @@ -520,20 +518,20 @@ gpointer g_hash_table_find (GHashTable *hash_table, * order searches in contrast to g_hash_table_lookup(). **/ void g_hash_table_foreach (GHashTable *hash_table, - GHFunc func, - gpointer user_data) + GHFunc func, + gpointer user_data) { - gint i; + gint i; - if (hash_table == NULL) return; - if (func == NULL) return; + if (hash_table == NULL) return; + if (func == NULL) return; - for (i = 0; i < hash_table->size; i++) + for (i = 0; i < hash_table->size; i++) { - GHashNode *node = &hash_table->nodes [i]; + GHashNode *node = &hash_table->nodes [i]; - if (node->key_hash > 1) - (* func) (node->key, node->value, user_data); + if (node->key_hash > 1) + (* func) (node->key, node->value, user_data); } } @@ -560,63 +558,63 @@ void g_hash_table_foreach (GHashTable *hash_table, * the hash record again for the new record. */ static inline guint g_hash_table_lookup_node_for_insertion (GHashTable *hash_table, - gconstpointer key, - guint *hash_return) + gconstpointer key, + guint *hash_return) { - GHashNode *node; - guint node_index; - guint hash_value; - guint first_tombstone = 0; - gboolean have_tombstone = FALSE; - guint step = 0; + GHashNode *node; + guint node_index; + guint hash_value; + guint first_tombstone = 0; + gboolean have_tombstone = FALSE; + guint step = 0; - /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. - * We need to make sure our hash value is not one of these. */ + /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. + * We need to make sure our hash value is not one of these. */ - hash_value = (* hash_table->hash_func) (key); - if (hash_value <= 1) - hash_value = 2; + hash_value = (* hash_table->hash_func) (key); + if (hash_value <= 1) + hash_value = 2; - *hash_return = hash_value; + *hash_return = hash_value; - node_index = hash_value % hash_table->mod; - node = &hash_table->nodes [node_index]; + node_index = hash_value % hash_table->mod; + node = &hash_table->nodes [node_index]; - while (node->key_hash) + while (node->key_hash) { - /* We first check if our full hash values - * are equal so we can avoid calling the full-blown - * key equality function in most cases. - */ + /* We first check if our full hash values + * are equal so we can avoid calling the full-blown + * key equality function in most cases. + */ - if (node->key_hash == hash_value) + if (node->key_hash == hash_value) { - if (hash_table->key_equal_func) + if (hash_table->key_equal_func) + { + if (hash_table->key_equal_func (node->key, key)) + return node_index; + } + else if (node->key == key) { - if (hash_table->key_equal_func (node->key, key)) return node_index; } - else if (node->key == key) - { - return node_index; - } } - else if (node->key_hash == 1 && !have_tombstone) + else if (node->key_hash == 1 && !have_tombstone) { - first_tombstone = node_index; - have_tombstone = TRUE; + first_tombstone = node_index; + have_tombstone = TRUE; } - step++; - node_index += step; - node_index &= hash_table->mask; - node = &hash_table->nodes [node_index]; + step++; + node_index += step; + node_index &= hash_table->mask; + node = &hash_table->nodes [node_index]; } - if (have_tombstone) - return first_tombstone; + if (have_tombstone) + return first_tombstone; - return node_index; + return node_index; } /* Each table size has an associated prime modulo (the first prime @@ -624,75 +622,75 @@ static inline guint g_hash_table_lookup_node_for_insertion (GHashTable *hash_ * then works modulo 2^n. The prime modulo is necessary to get a * good distribution with poor hash functions. */ static const gint prime_mod [] = { - 1, /* For 1 << 0 */ - 2, - 3, - 7, - 13, - 31, - 61, - 127, - 251, - 509, - 1021, - 2039, - 4093, - 8191, - 16381, - 32749, - 65521, /* For 1 << 16 */ - 131071, - 262139, - 524287, - 1048573, - 2097143, - 4194301, - 8388593, - 16777213, - 33554393, - 67108859, - 134217689, - 268435399, - 536870909, - 1073741789, - 2147483647 /* For 1 << 31 */ + 1, /* For 1 << 0 */ + 2, + 3, + 7, + 13, + 31, + 61, + 127, + 251, + 509, + 1021, + 2039, + 4093, + 8191, + 16381, + 32749, + 65521, /* For 1 << 16 */ + 131071, + 262139, + 524287, + 1048573, + 2097143, + 4194301, + 8388593, + 16777213, + 33554393, + 67108859, + 134217689, + 268435399, + 536870909, + 1073741789, + 2147483647 /* For 1 << 31 */ }; static void g_hash_table_set_shift (GHashTable *hash_table, gint shift) { - gint i; - guint mask = 0; + gint i; + guint mask = 0; - hash_table->size = 1 << shift; - hash_table->mod = prime_mod [shift]; + hash_table->size = 1 << shift; + hash_table->mod = prime_mod [shift]; - for (i = 0; i < shift; i++) + for (i = 0; i < shift; i++) { - mask <<= 1; - mask |= 1; + mask <<= 1; + mask |= 1; } - hash_table->mask = mask; + hash_table->mask = mask; } static gint g_hash_table_find_closest_shift (gint n) { - gint i; + gint i; - for (i = 0; n; i++) - n >>= 1; + for (i = 0; n; i++) + n >>= 1; - return i; + return i; } static void g_hash_table_set_shift_from_size (GHashTable *hash_table, gint size) { - gint shift; + gint shift; - shift = g_hash_table_find_closest_shift (size); - shift = MAX (shift, HASH_TABLE_MIN_SHIFT); + shift = g_hash_table_find_closest_shift (size); + shift = MAX (shift, HASH_TABLE_MIN_SHIFT); - g_hash_table_set_shift (hash_table, shift); + g_hash_table_set_shift (hash_table, shift); } /* @@ -710,41 +708,41 @@ static void g_hash_table_set_shift_from_size (GHashTable *hash_table, gint size) */ static void g_hash_table_resize (GHashTable *hash_table) { - GHashNode *new_nodes; - gint old_size; - gint i; + GHashNode *new_nodes; + gint old_size; + gint i; - old_size = hash_table->size; - g_hash_table_set_shift_from_size (hash_table, hash_table->nnodes * 2); + old_size = hash_table->size; + g_hash_table_set_shift_from_size (hash_table, hash_table->nnodes * 2); - new_nodes = g_new0 (GHashNode, hash_table->size); + new_nodes = g_new0 (GHashNode, hash_table->size); - for (i = 0; i < old_size; i++) + for (i = 0; i < old_size; i++) { - GHashNode *node = &hash_table->nodes [i]; - GHashNode *new_node; - guint hash_val; - guint step = 0; + GHashNode *node = &hash_table->nodes [i]; + GHashNode *new_node; + guint hash_val; + guint step = 0; - if (node->key_hash <= 1) - continue; + if (node->key_hash <= 1) + continue; - hash_val = node->key_hash % hash_table->mod; - new_node = &new_nodes [hash_val]; + hash_val = node->key_hash % hash_table->mod; + new_node = &new_nodes [hash_val]; - while (new_node->key_hash) + while (new_node->key_hash) { - step++; - hash_val += step; - hash_val &= hash_table->mask; new_node = &new_nodes [hash_val]; + step++; + hash_val += step; + hash_val &= hash_table->mask; new_node = &new_nodes [hash_val]; } - *new_node = *node; + *new_node = *node; } - g_free (hash_table->nodes); - hash_table->nodes = new_nodes; - hash_table->noccupied = hash_table->nnodes; + g_free (hash_table->nodes); + hash_table->nodes = new_nodes; + hash_table->noccupied = hash_table->nnodes; } /* @@ -758,12 +756,12 @@ static void g_hash_table_resize (GHashTable *hash_table) */ static inline void g_hash_table_maybe_resize (GHashTable *hash_table) { - gint noccupied = hash_table->noccupied; - gint size = hash_table->size; + gint noccupied = hash_table->noccupied; + gint size = hash_table->size; - if ((size > hash_table->nnodes * 4 && size > 1 << HASH_TABLE_MIN_SHIFT) || - (size <= noccupied + (noccupied / 16))) - g_hash_table_resize (hash_table); + if ((size > hash_table->nnodes * 4 && size > 1 << HASH_TABLE_MIN_SHIFT) || + (size <= noccupied + (noccupied / 16))) + g_hash_table_resize (hash_table); } /* @@ -783,59 +781,67 @@ static inline void g_hash_table_maybe_resize (GHashTable *hash_table) * new node. */ static void g_hash_table_insert_internal (GHashTable *hash_table, - gpointer key, - gpointer value, - gboolean keep_new_key) + gpointer key, + gpointer value, + gboolean keep_new_key) { - GHashNode *node; - guint node_index; - guint key_hash; - guint old_hash; + GHashNode *node; + guint node_index; + guint key_hash; + guint old_hash; - if (hash_table == NULL) return; - if (hash_table->ref_count == 0) return; + if (hash_table == NULL) return; + if (hash_table->ref_count == 0) return; - node_index = g_hash_table_lookup_node_for_insertion (hash_table, key, &key_hash); - node = &hash_table->nodes [node_index]; + node_index = g_hash_table_lookup_node_for_insertion (hash_table, key, &key_hash); + node = &hash_table->nodes [node_index]; - old_hash = node->key_hash; + old_hash = node->key_hash; - if (old_hash > 1) + if (old_hash > 1) { - if (keep_new_key) + if (keep_new_key) { - if (hash_table->key_destroy_func) - hash_table->key_destroy_func (node->key); - node->key = key; + if (hash_table->key_destroy_func) + hash_table->key_destroy_func (node->key); + node->key = key; } - else + else { - if (hash_table->key_destroy_func) - hash_table->key_destroy_func (key); + if (hash_table->key_destroy_func) + hash_table->key_destroy_func (key); } - if (hash_table->value_destroy_func) - hash_table->value_destroy_func (node->value); + if (hash_table->value_destroy_func) + hash_table->value_destroy_func (node->value); - node->value = value; + node->value = value; } - else + else { - node->key = key; - node->value = value; - node->key_hash = key_hash; + node->key = key; + node->value = value; + node->key_hash = key_hash; - hash_table->nnodes++; + hash_table->nnodes++; - if (old_hash == 0) + if (old_hash == 0) { - /* We replaced an empty node, and not a tombstone */ - hash_table->noccupied++; - g_hash_table_maybe_resize (hash_table); + /* We replaced an empty node, and not a tombstone */ + hash_table->noccupied++; + g_hash_table_maybe_resize (hash_table); } } } + void +g_hash_table_replace (GHashTable *hash_table, + gpointer key, + gpointer value) +{ + g_hash_table_insert_internal (hash_table, key, value, TRUE); +} + /** * g_hash_table_insert: * @hash_table: a #GHashTable. @@ -850,11 +856,12 @@ static void g_hash_table_insert_internal (GHashTable *hash_table, * a @key_destroy_func when creating the #GHashTable, the passed key is freed * using that function. **/ -void g_hash_table_insert (GHashTable *hash_table, - gpointer key, - gpointer value) +gboolean g_hash_table_insert (GHashTable *hash_table, + gpointer key, + gpointer value) { - g_hash_table_insert_internal (hash_table, key, value, FALSE); + g_hash_table_insert_internal (hash_table, key, value, FALSE); + return true; } /* @@ -875,50 +882,50 @@ void g_hash_table_insert (GHashTable *hash_table, * index of an empty node (never a tombstone). */ static inline guint g_hash_table_lookup_node (GHashTable *hash_table, - gconstpointer key) + gconstpointer key) { - GHashNode *node; - guint node_index; - guint hash_value; - guint step = 0; + GHashNode *node; + guint node_index; + guint hash_value; + guint step = 0; - /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. - * We need to make sure our hash value is not one of these. */ + /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. + * We need to make sure our hash value is not one of these. */ - hash_value = (* hash_table->hash_func) (key); - if (hash_value <= 1) - hash_value = 2; + hash_value = (* hash_table->hash_func) (key); + if (hash_value <= 1) + hash_value = 2; - node_index = hash_value % hash_table->mod; - node = &hash_table->nodes [node_index]; + node_index = hash_value % hash_table->mod; + node = &hash_table->nodes [node_index]; - while (node->key_hash) + while (node->key_hash) { - /* We first check if our full hash values - * are equal so we can avoid calling the full-blown - * key equality function in most cases. - */ + /* We first check if our full hash values + * are equal so we can avoid calling the full-blown + * key equality function in most cases. + */ - if (node->key_hash == hash_value) + if (node->key_hash == hash_value) { - if (hash_table->key_equal_func) + if (hash_table->key_equal_func) { - if (hash_table->key_equal_func (node->key, key)) - break; + if (hash_table->key_equal_func (node->key, key)) + break; } - else if (node->key == key) + else if (node->key == key) { - break; + break; } } - step++; - node_index += step; - node_index &= hash_table->mask; - node = &hash_table->nodes [node_index]; + step++; + node_index += step; + node_index &= hash_table->mask; + node = &hash_table->nodes [node_index]; } - return node_index; + return node_index; } /** @@ -934,17 +941,17 @@ static inline guint g_hash_table_lookup_node (GHashTable *hash_table, * Return value: the associated value, or %NULL if the key is not found. **/ gpointer g_hash_table_lookup (GHashTable *hash_table, - gconstpointer key) + gconstpointer key) { - GHashNode *node; - guint node_index; + GHashNode *node; + guint node_index; - if (hash_table == NULL) return NULL; + if (hash_table == NULL) return NULL; - node_index = g_hash_table_lookup_node (hash_table, key); - node = &hash_table->nodes [node_index]; + node_index = g_hash_table_lookup_node (hash_table, key); + node = &hash_table->nodes [node_index]; - return node->key_hash ? node->value : NULL; + return node->key_hash ? node->value : NULL; } /** @@ -968,7 +975,7 @@ gpointer g_hash_table_lookup (GHashTable *hash_table, **/ GHashTable *g_hash_table_new(GHashFunc hash_func, GEqualFunc key_equal_func) { - return g_hash_table_new_full(hash_func, key_equal_func, NULL, NULL); + return g_hash_table_new_full(hash_func, key_equal_func, NULL, NULL); } /** @@ -989,25 +996,25 @@ GHashTable *g_hash_table_new(GHashFunc hash_func, GEqualFunc key_equal_func) * Return value: a new #GHashTable. **/ GHashTable* g_hash_table_new_full (GHashFunc hash_func, - GEqualFunc key_equal_func, - GDestroyNotify key_destroy_func, - GDestroyNotify value_destroy_func) + GEqualFunc key_equal_func, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func) { - GHashTable *hash_table; + GHashTable *hash_table; - hash_table = (GHashTable*)g_malloc(sizeof(GHashTable)); - //hash_table = g_slice_new (GHashTable); - g_hash_table_set_shift (hash_table, HASH_TABLE_MIN_SHIFT); - hash_table->nnodes = 0; - hash_table->noccupied = 0; - hash_table->hash_func = hash_func ? hash_func : g_direct_hash; - hash_table->key_equal_func = key_equal_func; - hash_table->ref_count = 1; - hash_table->key_destroy_func = key_destroy_func; - hash_table->value_destroy_func = value_destroy_func; - hash_table->nodes = g_new0 (GHashNode, hash_table->size); + hash_table = (GHashTable*)g_malloc(sizeof(GHashTable)); + //hash_table = g_slice_new (GHashTable); + g_hash_table_set_shift (hash_table, HASH_TABLE_MIN_SHIFT); + hash_table->nnodes = 0; + hash_table->noccupied = 0; + hash_table->hash_func = hash_func ? hash_func : g_direct_hash; + hash_table->key_equal_func = key_equal_func; + hash_table->ref_count = 1; + hash_table->key_destroy_func = key_destroy_func; + hash_table->value_destroy_func = value_destroy_func; + hash_table->nodes = g_new0 (GHashNode, hash_table->size); - return hash_table; + return hash_table; } /* @@ -1022,30 +1029,30 @@ GHashTable* g_hash_table_new_full (GHashFunc hash_func, * for the key and value of the hash node. */ static void g_hash_table_remove_all_nodes (GHashTable *hash_table, - gboolean notify) + gboolean notify) { - int i; + int i; - for (i = 0; i < hash_table->size; i++) + for (i = 0; i < hash_table->size; i++) { - GHashNode *node = &hash_table->nodes [i]; + GHashNode *node = &hash_table->nodes [i]; - if (node->key_hash > 1) + if (node->key_hash > 1) { - if (notify && hash_table->key_destroy_func) - hash_table->key_destroy_func (node->key); + if (notify && hash_table->key_destroy_func) + hash_table->key_destroy_func (node->key); - if (notify && hash_table->value_destroy_func) - hash_table->value_destroy_func (node->value); + if (notify && hash_table->value_destroy_func) + hash_table->value_destroy_func (node->value); } } - /* We need to set node->key_hash = 0 for all nodes - might as well be GC - * friendly and clear everything */ - memset (hash_table->nodes, 0, hash_table->size * sizeof (GHashNode)); + /* We need to set node->key_hash = 0 for all nodes - might as well be GC + * friendly and clear everything */ + memset (hash_table->nodes, 0, hash_table->size * sizeof (GHashNode)); - hash_table->nnodes = 0; - hash_table->noccupied = 0; + hash_table->nnodes = 0; + hash_table->noccupied = 0; } /** @@ -1063,10 +1070,10 @@ static void g_hash_table_remove_all_nodes (GHashTable *hash_table, **/ void g_hash_table_remove_all (GHashTable *hash_table) { - if (hash_table == NULL) return; + if (hash_table == NULL) return; - g_hash_table_remove_all_nodes (hash_table, TRUE); - g_hash_table_maybe_resize (hash_table); + g_hash_table_remove_all_nodes (hash_table, TRUE); + g_hash_table_maybe_resize (hash_table); } /* @@ -1082,23 +1089,23 @@ void g_hash_table_remove_all (GHashTable *hash_table) * for the key and value of the hash node. */ static void g_hash_table_remove_node (GHashTable *hash_table, - GHashNode *node, - gboolean notify) + GHashNode *node, + gboolean notify) { - if (notify && hash_table->key_destroy_func) - hash_table->key_destroy_func (node->key); + if (notify && hash_table->key_destroy_func) + hash_table->key_destroy_func (node->key); - if (notify && hash_table->value_destroy_func) - hash_table->value_destroy_func (node->value); + if (notify && hash_table->value_destroy_func) + hash_table->value_destroy_func (node->value); - /* Erect tombstone */ - node->key_hash = 1; + /* Erect tombstone */ + node->key_hash = 1; - /* Be GC friendly */ - node->key = NULL; - node->value = NULL; + /* Be GC friendly */ + node->key = NULL; + node->value = NULL; - hash_table->nnodes--; + hash_table->nnodes--; } /* * g_hash_table_remove_internal: @@ -1114,25 +1121,25 @@ static void g_hash_table_remove_node (GHashTable *hash_table, * destroy notify handlers only if @notify is %TRUE. */ static gboolean g_hash_table_remove_internal (GHashTable *hash_table, - gconstpointer key, - gboolean notify) + gconstpointer key, + gboolean notify) { - GHashNode *node; - guint node_index; + GHashNode *node; + guint node_index; - if (hash_table == NULL) return FALSE; + if (hash_table == NULL) return FALSE; - node_index = g_hash_table_lookup_node (hash_table, key); - node = &hash_table->nodes [node_index]; + node_index = g_hash_table_lookup_node (hash_table, key); + node = &hash_table->nodes [node_index]; - /* g_hash_table_lookup_node() never returns a tombstone, so this is safe */ - if (!node->key_hash) - return FALSE; + /* g_hash_table_lookup_node() never returns a tombstone, so this is safe */ + if (!node->key_hash) + return FALSE; - g_hash_table_remove_node (hash_table, node, notify); - g_hash_table_maybe_resize (hash_table); + g_hash_table_remove_node (hash_table, node, notify); + g_hash_table_maybe_resize (hash_table); - return TRUE; + return TRUE; } /** * g_hash_table_remove: @@ -1149,9 +1156,9 @@ static gboolean g_hash_table_remove_internal (GHashTable *hash_table, * Return value: %TRUE if the key was found and removed from the #GHashTable. **/ gboolean g_hash_table_remove (GHashTable *hash_table, - gconstpointer key) + gconstpointer key) { - return g_hash_table_remove_internal (hash_table, key, TRUE); + return g_hash_table_remove_internal (hash_table, key, TRUE); } /** @@ -1167,15 +1174,15 @@ gboolean g_hash_table_remove (GHashTable *hash_table, **/ void g_hash_table_unref (GHashTable *hash_table) { - if (hash_table == NULL) return; - if (hash_table->ref_count == 0) return; + if (hash_table == NULL) return; + if (hash_table->ref_count == 0) return; - hash_table->ref_count--; - if (hash_table->ref_count == 0) { - g_hash_table_remove_all_nodes (hash_table, TRUE); - g_free (hash_table->nodes); - g_free (hash_table); - } + hash_table->ref_count--; + if (hash_table->ref_count == 0) { + g_hash_table_remove_all_nodes (hash_table, TRUE); + g_free (hash_table->nodes); + g_free (hash_table); + } } /** @@ -1191,65 +1198,67 @@ void g_hash_table_unref (GHashTable *hash_table) **/ GHashTable *g_hash_table_ref (GHashTable *hash_table) { - if (hash_table == NULL) return NULL; - if (hash_table->ref_count == 0) return hash_table; + if (hash_table == NULL) return NULL; + if (hash_table->ref_count == 0) return hash_table; - //g_atomic_int_add (&hash_table->ref_count, 1); - hash_table->ref_count++; - return hash_table; + //g_atomic_int_add (&hash_table->ref_count, 1); + hash_table->ref_count++; + return hash_table; } guint g_hash_table_size(GHashTable *hash_table) { - if (hash_table == NULL) return 0; + if (hash_table == NULL) return 0; - return hash_table->nnodes; + return hash_table->nnodes; } /* END of g_hash_table related functions */ +#if 0 /* general g_XXX substitutes */ void g_free(gpointer ptr) { - free(ptr); + free(ptr); } gpointer g_malloc(size_t size) { - void *res; + void *res; if (size == 0) return NULL; - res = malloc(size); - if (res == NULL) exit(1); - return res; + res = malloc(size); + if (res == NULL) exit(1); + return res; } gpointer g_malloc0(size_t size) { - void *res; - if (size == 0) return NULL; - res = calloc(size, 1); - if (res == NULL) exit(1); - return res; + void *res; + if (size == 0) return NULL; + res = calloc(size, 1); + if (res == NULL) exit(1); + return res; } gpointer g_try_malloc0(size_t size) { - if (size == 0) return NULL; - return calloc(size, 1); + if (size == 0) return NULL; + return calloc(size, 1); } gpointer g_realloc(gpointer ptr, size_t size) { - void *res; - if (size == 0) { - free(ptr); - return NULL; - } - res = realloc(ptr, size); - if (res == NULL) exit(1); - return res; + void *res; + if (size == 0) { + free(ptr); + return NULL; + } + res = realloc(ptr, size); + if (res == NULL) exit(1); + return res; } +#endif char *g_strdup(const char *str) { @@ -1262,82 +1271,82 @@ char *g_strdup(const char *str) char *g_strdup_printf(const char *format, ...) { - va_list ap; - char *res; - va_start(ap, format); - res = g_strdup_vprintf(format, ap); - va_end(ap); - return res; + va_list ap; + char *res; + va_start(ap, format); + res = g_strdup_vprintf(format, ap); + va_end(ap); + return res; } char *g_strdup_vprintf(const char *format, va_list ap) { - char *str_res = NULL; + char *str_res = NULL; #ifdef _MSC_VER - int len = _vscprintf(format, ap); - if( len < 0 ) - return NULL; - str_res = (char *)malloc(len+1); - if(str_res==NULL) - return NULL; - vsnprintf(str_res, len+1, format, ap); + int len = _vscprintf(format, ap); + if( len < 0 ) + return NULL; + str_res = (char *)malloc(len+1); + if(str_res==NULL) + return NULL; + vsnprintf(str_res, len+1, format, ap); #else int ret = vasprintf(&str_res, format, ap); if (ret == -1) { return NULL; } #endif - return str_res; + return str_res; } char *g_strndup(const char *str, size_t n) { - /* try to mimic glib's g_strndup */ - char *res = calloc(n + 1, 1); - strncpy(res, str, n); - return res; + /* try to mimic glib's g_strndup */ + char *res = calloc(n + 1, 1); + strncpy(res, str, n); + return res; } void g_strfreev(char **str_array) { - char **p = str_array; - if (p) { - while (*p) { - free(*p++); - } - } - free(str_array); + char **p = str_array; + if (p) { + while (*p) { + free(*p++); + } + } + free(str_array); } gpointer g_memdup(gconstpointer mem, size_t byte_size) { - if (mem) { - void *res = g_malloc(byte_size); - memcpy(res, mem, byte_size); - return res; - } - return NULL; + if (mem) { + void *res = g_malloc(byte_size); + memcpy(res, mem, byte_size); + return res; + } + return NULL; } gpointer g_new_(size_t sz, size_t n_structs) { - size_t need = sz * n_structs; - if ((need / sz) != n_structs) return NULL; - return g_malloc(need); + size_t need = sz * n_structs; + if ((need / sz) != n_structs) return NULL; + return g_malloc(need); } gpointer g_new0_(size_t sz, size_t n_structs) { - size_t need = sz * n_structs; - if ((need / sz) != n_structs) return NULL; - return g_malloc0(need); + size_t need = sz * n_structs; + if ((need / sz) != n_structs) return NULL; + return g_malloc0(need); } gpointer g_renew_(size_t sz, gpointer mem, size_t n_structs) { - size_t need = sz * n_structs; - if ((need / sz) != n_structs) return NULL; - return g_realloc(mem, need); + size_t need = sz * n_structs; + if ((need / sz) != n_structs) return NULL; + return g_realloc(mem, need); } /** @@ -1360,26 +1369,26 @@ gpointer g_renew_(size_t sz, gpointer mem, size_t n_structs) */ gchar* g_strconcat (const gchar *string1, ...) { - va_list ap; - char *res; - size_t sz = strlen(string1); - va_start(ap, string1); - while (1) { - char *arg = va_arg(ap, char*); - if (arg == NULL) break; - sz += strlen(arg); - } - va_end(ap); - res = g_malloc(sz + 1); - strcpy(res, string1); - va_start(ap, string1); - while (1) { - char *arg = va_arg(ap, char*); - if (arg == NULL) break; - strcat(res, arg); - } - va_end(ap); - return res; + va_list ap; + char *res; + size_t sz = strlen(string1); + va_start(ap, string1); + while (1) { + char *arg = va_arg(ap, char*); + if (arg == NULL) break; + sz += strlen(arg); + } + va_end(ap); + res = g_malloc(sz + 1); + strcpy(res, string1); + va_start(ap, string1); + while (1) { + char *arg = va_arg(ap, char*); + if (arg == NULL) break; + strcat(res, arg); + } + va_end(ap); + return res; } /** @@ -1406,52 +1415,78 @@ gchar* g_strconcat (const gchar *string1, ...) * g_strfreev() to free it. **/ gchar** g_strsplit (const gchar *string, - const gchar *delimiter, - gint max_tokens) + const gchar *delimiter, + gint max_tokens) { - GSList *string_list = NULL, *slist; - gchar **str_array, *s; - guint n = 0; - const gchar *remainder; + GSList *string_list = NULL, *slist; + gchar **str_array, *s; + guint n = 0; + const gchar *remainder; - if (string == NULL) return NULL; - if (delimiter == NULL) return NULL; - if (delimiter[0] == '\0') return NULL; + if (string == NULL) return NULL; + if (delimiter == NULL) return NULL; + if (delimiter[0] == '\0') return NULL; - if (max_tokens < 1) - max_tokens = G_MAXINT; + if (max_tokens < 1) + max_tokens = G_MAXINT; - remainder = string; - s = strstr (remainder, delimiter); - if (s) + remainder = string; + s = strstr (remainder, delimiter); + if (s) { - gsize delimiter_len = strlen (delimiter); + gsize delimiter_len = strlen (delimiter); - while (--max_tokens && s) + while (--max_tokens && s) { - gsize len; + gsize len; - len = s - remainder; - string_list = g_slist_prepend (string_list, - g_strndup (remainder, len)); - n++; - remainder = s + delimiter_len; - s = strstr (remainder, delimiter); + len = s - remainder; + string_list = g_slist_prepend (string_list, + g_strndup (remainder, len)); + n++; + remainder = s + delimiter_len; + s = strstr (remainder, delimiter); } } - if (*string) + if (*string) { - n++; - string_list = g_slist_prepend (string_list, g_strdup (remainder)); + n++; + string_list = g_slist_prepend (string_list, g_strdup (remainder)); } - str_array = g_new (gchar*, n + 1); + str_array = g_new (gchar*, n + 1); - str_array[n--] = NULL; - for (slist = string_list; slist; slist = slist->next) - str_array[n--] = slist->data; + str_array[n--] = NULL; + for (slist = string_list; slist; slist = slist->next) + str_array[n--] = slist->data; - g_slist_free (string_list); + g_slist_free (string_list); - return str_array; + return str_array; +} + +GSList *g_slist_find_custom (GSList *list, gconstpointer data, GCompareFunc func) +{ + if (!func) + return NULL; + + while (list) { + if (func (list->data, data) == 0) + return list; + + list = list->next; + } + + return NULL; +} + +int g_strcmp0 (const char *str1, const char *str2) +{ + if (!str1 && !str2) + return 0; + + if (!str1 || !str2) + return 0; + + return strcmp(str1, str2); } diff --git a/qemu/include/glib_compat.h b/glib_compat/glib_compat.h similarity index 54% rename from qemu/include/glib_compat.h rename to glib_compat/glib_compat.h index 2d627ed2..30215a11 100644 --- a/qemu/include/glib_compat.h +++ b/glib_compat/glib_compat.h @@ -25,35 +25,27 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #include #include -#ifndef TRUE -#define TRUE 1 -#endif +#define G_MAXUINT UINT_MAX +#define G_MAXINT INT_MAX -#ifndef FALSE -#define FALSE 0 -#endif - -#define g_assert(expr) assert(expr) -#define g_assert_not_reached() assert(0) - -/* typedefs for glib related types that may still be referenced */ -typedef void* gpointer; -typedef const void *gconstpointer; -typedef int gint; -typedef uint32_t guint32; -typedef uint64_t guint64; -typedef unsigned int guint; -typedef char gchar; -typedef int gboolean; -typedef unsigned long gulong; -typedef unsigned long gsize; +#include "gtestutils.h" +#include "gtypes.h" +#include "garray.h" +#include "gtree.h" +#include "ghash.h" +#include "gmem.h" +#include "gslice.h" +#include "gmessages.h" +#include "gpattern.h" +#include "grand.h" +#include "glist.h" +#include "gnode.h" typedef gint (*GCompareDataFunc)(gconstpointer a, gconstpointer b, gpointer user_data); typedef void (*GFunc)(gpointer data, gpointer user_data); typedef gint (*GCompareFunc)(gconstpointer v1, gconstpointer v2); -typedef void (*GDestroyNotify)(gpointer data); guint g_str_hash(gconstpointer v); gboolean g_str_equal(gconstpointer v1, gconstpointer v2); @@ -61,11 +53,7 @@ guint g_int_hash(gconstpointer v); gboolean g_int_equal(gconstpointer v1, gconstpointer v2); -typedef struct _GList { - gpointer data; - struct _GList *next; - struct _GList *prev; -} GList; +int g_strcmp0(const char *str1, const char *str2); GList *g_list_first(GList *list); void g_list_foreach(GList *list, GFunc func, gpointer user_data); @@ -86,34 +74,12 @@ void g_slist_foreach(GSList *list, GFunc func, gpointer user_data); void g_slist_free(GSList *list); GSList *g_slist_prepend(GSList *list, gpointer data); GSList *g_slist_sort(GSList *list, GCompareFunc compare); - -typedef guint (*GHashFunc)(gconstpointer key); -typedef gboolean (*GEqualFunc)(gconstpointer a, gconstpointer b); -typedef void (*GHFunc)(gpointer key, gpointer value, gpointer user_data); -typedef gboolean (*GHRFunc)(gpointer key, gpointer value, gpointer user_data); - -typedef struct _GHashTable GHashTable; - -void g_hash_table_destroy(GHashTable *hash_table); -gpointer g_hash_table_find(GHashTable *hash_table, GHRFunc predicate, gpointer user_data); -void g_hash_table_foreach(GHashTable *hash_table, GHFunc func, gpointer user_data); -void g_hash_table_insert(GHashTable *hash_table, gpointer key, gpointer value); -gpointer g_hash_table_lookup(GHashTable *hash_table, gconstpointer key); -GHashTable *g_hash_table_new(GHashFunc hash_func, GEqualFunc key_equal_func); -GHashTable *g_hash_table_new_full(GHashFunc hash_func, GEqualFunc key_equal_func, - GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func); -void g_hash_table_remove_all(GHashTable *hash_table); -gboolean g_hash_table_remove(GHashTable *hash_table, gconstpointer key); -void g_hash_table_unref(GHashTable *hash_table); -GHashTable *g_hash_table_ref(GHashTable *hash_table); -guint g_hash_table_size(GHashTable *hash_table); +GSList *g_slist_find_custom(GSList *list, gconstpointer data, GCompareFunc func); /* replacement for g_malloc dependency */ void g_free(gpointer ptr); -gpointer g_malloc(size_t size); -gpointer g_malloc0(size_t size); -gpointer g_try_malloc0(size_t size); gpointer g_realloc(gpointer ptr, size_t size); + char *g_strdup(const char *str); char *g_strdup_printf(const char *format, ...); char *g_strdup_vprintf(const char *format, va_list ap); @@ -123,14 +89,9 @@ gpointer g_memdup(gconstpointer mem, size_t byte_size); gpointer g_new_(size_t sz, size_t n_structs); gpointer g_new0_(size_t sz, size_t n_structs); gpointer g_renew_(size_t sz, gpointer mem, size_t n_structs); -gchar* g_strconcat (const gchar *string1, ...); + gchar** g_strsplit (const gchar *string, const gchar *delimiter, gint max_tokens); - -#define g_new(struct_type, n_structs) ((struct_type*)g_new_(sizeof(struct_type), n_structs)) -#define g_new0(struct_type, n_structs) ((struct_type*)g_new0_(sizeof(struct_type), n_structs)) -#define g_renew(struct_type, mem, n_structs) ((struct_type*)g_renew_(sizeof(struct_type), mem, n_structs)) - #endif diff --git a/glib_compat/glist.c b/glib_compat/glist.c new file mode 100644 index 00000000..39ba10c3 --- /dev/null +++ b/glib_compat/glist.c @@ -0,0 +1,154 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * MT safe + */ + +#include "gtypes.h" +#include "glist.h" +#include "gslice.h" +#include "gmessages.h" + +#define _g_list_alloc() g_slice_new (GList) +#define _g_list_alloc0() g_slice_new0 (GList) +#define _g_list_free1(list) g_slice_free (GList, list) + +/** + * g_list_alloc: + * + * Allocates space for one #GList element. It is called by + * g_list_append(), g_list_prepend(), g_list_insert() and + * g_list_insert_sorted() and so is rarely used on its own. + * + * Returns: a pointer to the newly-allocated #GList element + **/ +GList *g_list_alloc (void) +{ + return _g_list_alloc0 (); +} + +static inline GList *_g_list_remove_link (GList *list, GList *link) +{ + if (link == NULL) + return list; + + if (link->prev) + { + if (link->prev->next == link) + link->prev->next = link->next; + //else + // g_warning ("corrupted double-linked list detected"); + } + if (link->next) + { + if (link->next->prev == link) + link->next->prev = link->prev; + //else + // g_warning ("corrupted double-linked list detected"); + } + + if (link == list) + list = list->next; + + link->next = NULL; + link->prev = NULL; + + return list; +} + +/** + * g_list_delete_link: + * @list: a #GList, this must point to the top of the list + * @link_: node to delete from @list + * + * Removes the node link_ from the list and frees it. + * Compare this to g_list_remove_link() which removes the node + * without freeing it. + * + * Returns: the (possibly changed) start of the #GList + */ +GList *g_list_delete_link (GList *list, GList *link_) +{ + list = _g_list_remove_link (list, link_); + _g_list_free1 (link_); + + return list; +} + +/** + * g_list_insert_before: + * @list: a pointer to a #GList, this must point to the top of the list + * @sibling: the list element before which the new element + * is inserted or %NULL to insert at the end of the list + * @data: the data for the new element + * + * Inserts a new element into the list before the given position. + * + * Returns: the (possibly changed) start of the #GList + */ +GList *g_list_insert_before (GList *list, GList *sibling, gpointer data) +{ + if (list == NULL) + { + list = g_list_alloc (); + list->data = data; + g_return_val_if_fail (sibling == NULL, list); + return list; + } + else if (sibling != NULL) + { + GList *node; + + node = _g_list_alloc (); + node->data = data; + node->prev = sibling->prev; + node->next = sibling; + sibling->prev = node; + if (node->prev != NULL) + { + node->prev->next = node; + return list; + } + else + { + g_return_val_if_fail (sibling == list, node); + return node; + } + } + else + { + GList *last; + + for (last = list; last->next != NULL; last = last->next) {} + + last->next = _g_list_alloc (); + last->next->data = data; + last->next->prev = last; + last->next->next = NULL; + + return list; + } +} + diff --git a/glib_compat/glist.h b/glib_compat/glist.h new file mode 100644 index 00000000..1cb95e77 --- /dev/null +++ b/glib_compat/glist.h @@ -0,0 +1,44 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_LIST_H__ +#define __G_LIST_H__ + +#include "gmem.h" + +typedef struct _GList GList; + +struct _GList +{ + gpointer data; + GList *next; + GList *prev; +}; + + +GList* g_list_insert_before (GList *list, GList *sibling, gpointer data); + +GList* g_list_delete_link (GList *list, GList *link_); + +#endif /* __G_LIST_H__ */ diff --git a/glib_compat/gmacros.h b/glib_compat/gmacros.h new file mode 100644 index 00000000..1fa0b149 --- /dev/null +++ b/glib_compat/gmacros.h @@ -0,0 +1,59 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* This file must not include any other glib header file and must thus + * not refer to variables from glibconfig.h + */ + +#ifndef __G_MACROS_H__ +#define __G_MACROS_H__ + +/* We include stddef.h to get the system's definition of NULL + */ +#include + +/* Here we provide G_GNUC_EXTENSION as an alias for __extension__, + * where this is valid. This allows for warningless compilation of + * "long long" types even in the presence of '-ansi -pedantic'. + */ +#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8) +#define G_GNUC_EXTENSION __extension__ +#else +#define G_GNUC_EXTENSION +#endif + +#if !(defined (G_STMT_START) && defined (G_STMT_END)) +#define G_STMT_START do +#if defined (_MSC_VER) && (_MSC_VER >= 1500) +#define G_STMT_END \ + __pragma(warning(push)) \ + __pragma(warning(disable:4127)) \ + while(0) \ + __pragma(warning(pop)) +#else +#define G_STMT_END while (0) +#endif +#endif + +#endif /* __G_MACROS_H__ */ diff --git a/glib_compat/gmem.c b/glib_compat/gmem.c new file mode 100644 index 00000000..917b57d0 --- /dev/null +++ b/glib_compat/gmem.c @@ -0,0 +1,257 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * MT safe + */ + +#include "gtypes.h" +#include "gmem.h" + +#include + +#include "gslice.h" + +#define SIZE_OVERFLOWS(a,b) (((b) > 0 && (a) > G_MAXSIZE / (b))) + + +/** + * g_try_malloc: + * @n_bytes: number of bytes to allocate. + * + * Attempts to allocate @n_bytes, and returns %NULL on failure. + * Contrast with g_malloc(), which aborts the program on failure. + * + * Returns: the allocated memory, or %NULL. + */ +gpointer g_try_malloc (gsize n_bytes) +{ + gpointer mem; + + if (n_bytes) + mem = malloc (n_bytes); + else + mem = NULL; + + return mem; +} + +/** + * g_try_malloc_n: + * @n_blocks: the number of blocks to allocate + * @n_block_bytes: the size of each block in bytes + * + * This function is similar to g_try_malloc(), allocating (@n_blocks * @n_block_bytes) bytes, + * but care is taken to detect possible overflow during multiplication. + * + * Since: 2.24 + * Returns: the allocated memory, or %NULL. + */ +gpointer g_try_malloc_n (gsize n_blocks, gsize n_block_bytes) +{ + if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) + return NULL; + + return g_try_malloc (n_blocks * n_block_bytes); +} + +/** + * g_malloc: + * @n_bytes: the number of bytes to allocate + * + * Allocates @n_bytes bytes of memory. + * If @n_bytes is 0 it returns %NULL. + * + * Returns: a pointer to the allocated memory + */ +gpointer g_malloc (gsize n_bytes) +{ + if (n_bytes) { + gpointer mem; + + mem = malloc (n_bytes); + if (mem) + return mem; + + //g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes", + // G_STRLOC, n_bytes); + } + + return NULL; +} + +/** + * g_malloc_n: + * @n_blocks: the number of blocks to allocate + * @n_block_bytes: the size of each block in bytes + * + * This function is similar to g_malloc(), allocating (@n_blocks * @n_block_bytes) bytes, + * but care is taken to detect possible overflow during multiplication. + * + * Since: 2.24 + * Returns: a pointer to the allocated memory + */ +gpointer g_malloc_n (gsize n_blocks, gsize n_block_bytes) +{ + if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) { + //g_error ("%s: overflow allocating %"G_GSIZE_FORMAT"*%"G_GSIZE_FORMAT" bytes", + // G_STRLOC, n_blocks, n_block_bytes); + } + + return g_malloc (n_blocks * n_block_bytes); +} + +/** + * g_malloc0: + * @n_bytes: the number of bytes to allocate + * + * Allocates @n_bytes bytes of memory, initialized to 0's. + * If @n_bytes is 0 it returns %NULL. + * + * Returns: a pointer to the allocated memory + */ +gpointer g_malloc0 (gsize n_bytes) +{ + if (n_bytes) { + gpointer mem; + + mem = calloc (1, n_bytes); + if (mem) + return mem; + + //g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes", + // G_STRLOC, n_bytes); + } + + return NULL; +} + +/** + * g_malloc0_n: + * @n_blocks: the number of blocks to allocate + * @n_block_bytes: the size of each block in bytes + * + * This function is similar to g_malloc0(), allocating (@n_blocks * @n_block_bytes) bytes, + * but care is taken to detect possible overflow during multiplication. + * + * Since: 2.24 + * Returns: a pointer to the allocated memory + */ +gpointer g_malloc0_n (gsize n_blocks, gsize n_block_bytes) +{ + if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) { + //g_error ("%s: overflow allocating %"G_GSIZE_FORMAT"*%"G_GSIZE_FORMAT" bytes", + // G_STRLOC, n_blocks, n_block_bytes); + } + + return g_malloc0 (n_blocks * n_block_bytes); +} + +/** + * g_try_malloc0: + * @n_bytes: number of bytes to allocate + * + * Attempts to allocate @n_bytes, initialized to 0's, and returns %NULL on + * failure. Contrast with g_malloc0(), which aborts the program on failure. + * + * Since: 2.8 + * Returns: the allocated memory, or %NULL + */ +gpointer g_try_malloc0 (gsize n_bytes) +{ + gpointer mem; + + if (n_bytes) + mem = calloc (1, n_bytes); + else + mem = NULL; + + return mem; +} + +/** + * g_realloc: + * @mem: (nullable): the memory to reallocate + * @n_bytes: new size of the memory in bytes + * + * Reallocates the memory pointed to by @mem, so that it now has space for + * @n_bytes bytes of memory. It returns the new address of the memory, which may + * have been moved. @mem may be %NULL, in which case it's considered to + * have zero-length. @n_bytes may be 0, in which case %NULL will be returned + * and @mem will be freed unless it is %NULL. + * + * Returns: the new address of the allocated memory + */ +gpointer g_realloc (gpointer mem, gsize n_bytes) +{ + gpointer newmem; + + if (n_bytes) { + newmem = realloc (mem, n_bytes); + if (newmem) + return newmem; + + //g_error("%s: failed to allocate %"G_GSIZE_FORMAT" bytes", G_STRLOC, n_bytes); + } + + free (mem); + + return NULL; +} + +/** + * g_realloc_n: + * @mem: (nullable): the memory to reallocate + * @n_blocks: the number of blocks to allocate + * @n_block_bytes: the size of each block in bytes + * + * This function is similar to g_realloc(), allocating (@n_blocks * @n_block_bytes) bytes, + * but care is taken to detect possible overflow during multiplication. + * + * Since: 2.24 + * Returns: the new address of the allocated memory + */ +gpointer g_realloc_n (gpointer mem, gsize n_blocks, gsize n_block_bytes) +{ + if (SIZE_OVERFLOWS (n_blocks, n_block_bytes)) { + //g_error ("%s: overflow allocating %"G_GSIZE_FORMAT"*%"G_GSIZE_FORMAT" bytes", + // G_STRLOC, n_blocks, n_block_bytes); + } + + return g_realloc (mem, n_blocks * n_block_bytes); +} + +/** + * g_free: + * @mem: (nullable): the memory to free + * + * Frees the memory pointed to by @mem. + * + * If @mem is %NULL it simply returns, so there is no need to check @mem + * against %NULL before calling this function. + */ +void g_free (gpointer mem) +{ + free (mem); +} diff --git a/glib_compat/gmem.h b/glib_compat/gmem.h new file mode 100644 index 00000000..7a32ae59 --- /dev/null +++ b/glib_compat/gmem.h @@ -0,0 +1,111 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_MEM_H__ +#define __G_MEM_H__ + +#include +#include "gmacros.h" + +#define G_MAXSIZE ULONG_MAX + +/* Optimise: avoid the call to the (slower) _n function if we can + * determine at compile-time that no overflow happens. + */ +#if defined (__GNUC__) && (__GNUC__ >= 2) && defined (__OPTIMIZE__) +# define _G_NEW(struct_type, n_structs, func) \ + (struct_type *) (G_GNUC_EXTENSION ({ \ + gsize __n = (gsize) (n_structs); \ + gsize __s = sizeof (struct_type); \ + gpointer __p; \ + if (__s == 1) \ + __p = g_##func (__n); \ + else if (__builtin_constant_p (__n) && \ + (__s == 0 || __n <= G_MAXSIZE / __s)) \ + __p = g_##func (__n * __s); \ + else \ + __p = g_##func##_n (__n, __s); \ + __p; \ + })) +# define _G_RENEW(struct_type, mem, n_structs, func) \ + (struct_type *) (G_GNUC_EXTENSION ({ \ + gsize __n = (gsize) (n_structs); \ + gsize __s = sizeof (struct_type); \ + gpointer __p = (gpointer) (mem); \ + if (__s == 1) \ + __p = g_##func (__p, __n); \ + else if (__builtin_constant_p (__n) && \ + (__s == 0 || __n <= G_MAXSIZE / __s)) \ + __p = g_##func (__p, __n * __s); \ + else \ + __p = g_##func##_n (__p, __n, __s); \ + __p; \ + })) + +#else +/* Unoptimised version: always call the _n() function. */ +#define _G_NEW(struct_type, n_structs, func) \ + ((struct_type *) g_##func##_n ((n_structs), sizeof (struct_type))) +#define _G_RENEW(struct_type, mem, n_structs, func) \ + ((struct_type *) g_##func##_n (mem, (n_structs), sizeof (struct_type))) + +#endif + +gpointer g_try_malloc (gsize n_bytes); + +gpointer g_try_malloc0 (gsize n_bytes); + +gpointer g_try_malloc_n (gsize n_blocks, gsize n_block_bytes); + +gpointer g_malloc0_n (gsize n_blocks, gsize n_block_bytes); + +gpointer g_realloc_n (gpointer mem, gsize n_blocks, gsize n_block_bytes); + +gpointer g_malloc_n (gsize n_blocks, gsize n_block_bytes); + +gpointer g_malloc0 (gsize n_bytes); + +gpointer g_malloc (gsize n_bytes); + +void g_free (gpointer mem); + +/** + * g_try_new: + * @struct_type: the type of the elements to allocate + * @n_structs: the number of elements to allocate + * + * Attempts to allocate @n_structs elements of type @struct_type, and returns + * %NULL on failure. Contrast with g_new(), which aborts the program on failure. + * The returned pointer is cast to a pointer to the given type. + * The function returns %NULL when @n_structs is 0 of if an overflow occurs. + * + * Since: 2.8 + * Returns: a pointer to the allocated memory, cast to a pointer to @struct_type + */ +#define g_try_new(struct_type, n_structs) _G_NEW (struct_type, n_structs, try_malloc) +#define g_new0(struct_type, n_structs) _G_NEW (struct_type, n_structs, malloc0) +#define g_new(struct_type, n_structs) _G_NEW (struct_type, n_structs, malloc) +#define g_renew(struct_type, mem, n_structs) _G_RENEW (struct_type, mem, n_structs, realloc) + +#endif /* __G_MEM_H__ */ diff --git a/glib_compat/gmessages.h b/glib_compat/gmessages.h new file mode 100644 index 00000000..e667d4db --- /dev/null +++ b/glib_compat/gmessages.h @@ -0,0 +1,35 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_MESSAGES_H__ +#define __G_MESSAGES_H__ + +#include "gmacros.h" + +#define g_return_val_if_fail(expr,val) G_STMT_START{ (void)0; }G_STMT_END +#define g_return_if_fail(expr) G_STMT_START{ (void)0; }G_STMT_END +#define g_return_if_reached() G_STMT_START{ return; }G_STMT_END +#define g_return_val_if_reached(val) G_STMT_START{ return (val); }G_STMT_END + +#endif /* __G_MESSAGES_H__ */ diff --git a/glib_compat/gnode.h b/glib_compat/gnode.h new file mode 100644 index 00000000..1b73ab4c --- /dev/null +++ b/glib_compat/gnode.h @@ -0,0 +1,39 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_NODE_H__ +#define __G_NODE_H__ + +#include "gmem.h" + +/* Tree traverse orders */ +typedef enum +{ + G_IN_ORDER, + G_PRE_ORDER, + G_POST_ORDER, + G_LEVEL_ORDER +} GTraverseType; + +#endif /* __G_NODE_H__ */ diff --git a/glib_compat/gpattern.c b/glib_compat/gpattern.c new file mode 100644 index 00000000..53fc0465 --- /dev/null +++ b/glib_compat/gpattern.c @@ -0,0 +1,400 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997, 1999 Peter Mattis, Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include + +#include "gpattern.h" + +#include "gmacros.h" +#include "gmessages.h" +#include "gmem.h" + +/** + * SECTION:patterns + * @title: Glob-style pattern matching + * @short_description: matches strings against patterns containing '*' + * (wildcard) and '?' (joker) + * + * The g_pattern_match* functions match a string + * against a pattern containing '*' and '?' wildcards with similar + * semantics as the standard glob() function: '*' matches an arbitrary, + * possibly empty, string, '?' matches an arbitrary character. + * + * Note that in contrast to glob(), the '/' character can be matched by + * the wildcards, there are no '[...]' character ranges and '*' and '?' + * can not be escaped to include them literally in a pattern. + * + * When multiple strings must be matched against the same pattern, it + * is better to compile the pattern to a #GPatternSpec using + * g_pattern_spec_new() and use g_pattern_match_string() instead of + * g_pattern_match_simple(). This avoids the overhead of repeated + * pattern compilation. + **/ + +/** + * GPatternSpec: + * + * A GPatternSpec struct is the 'compiled' form of a pattern. This + * structure is opaque and its fields cannot be accessed directly. + */ + +/* keep enum and structure of gpattern.c and patterntest.c in sync */ +typedef enum +{ + G_MATCH_ALL, /* "*A?A*" */ + G_MATCH_ALL_TAIL, /* "*A?AA" */ + G_MATCH_HEAD, /* "AAAA*" */ + G_MATCH_TAIL, /* "*AAAA" */ + G_MATCH_EXACT, /* "AAAAA" */ + G_MATCH_LAST +} GMatchType; + +struct _GPatternSpec +{ + GMatchType match_type; + guint pattern_length; + guint min_length; + guint max_length; + gchar *pattern; +}; + + +/* --- functions --- */ +static inline gboolean g_pattern_ph_match (const gchar *match_pattern, + const gchar *match_string, + gboolean *wildcard_reached_p) +{ + const gchar *pattern, *string; + gchar ch; + + pattern = match_pattern; + string = match_string; + + ch = *pattern; + pattern++; + while (ch) + { + switch (ch) + { + case '?': + if (!*string) + return FALSE; + string = string + 1; + break; + + case '*': + *wildcard_reached_p = TRUE; + do + { + ch = *pattern; + pattern++; + if (ch == '?') + { + if (!*string) + return FALSE; + string = string + 1; + } + } + while (ch == '*' || ch == '?'); + if (!ch) + return TRUE; + do + { + gboolean next_wildcard_reached = FALSE; + while (ch != *string) + { + if (!*string) + return FALSE; + string = string + 1; + } + string++; + if (g_pattern_ph_match (pattern, string, &next_wildcard_reached)) + return TRUE; + if (next_wildcard_reached) + /* the forthcoming pattern substring up to the next wildcard has + * been matched, but a mismatch occurred for the rest of the + * pattern, following the next wildcard. + * there's no need to advance the current match position any + * further if the rest pattern will not match. + */ + return FALSE; + } + while (*string); + break; + + default: + if (ch == *string) + string++; + else + return FALSE; + break; + } + + ch = *pattern; + pattern++; + } + + return *string == 0; +} + +static gchar *string_reverse(const gchar *string, gint string_length) +{ + gchar *new_string; + gint i, j; + if (string == NULL || string_length <= 0) { + return NULL; + } + + new_string = g_new(gchar, string_length + 1); + if (new_string) { + for (i = 0; i < string_length; i++) { + j = string_length - i - 1; + new_string[j] = string[i]; + } + new_string[string_length] = 0; + } + + return new_string; +} + +/** + * g_pattern_match: + * @pspec: a #GPatternSpec + * @string_length: the length of @string (in bytes, i.e. strlen(), + * not g_utf8_strlen()) + * @string: the UTF-8 encoded string to match + * @string_reversed: (nullable): the reverse of @string or %NULL + * + * Matches a string against a compiled pattern. Passing the correct + * length of the string given is mandatory. The reversed string can be + * omitted by passing %NULL, this is more efficient if the reversed + * version of the string to be matched is not at hand, as + * g_pattern_match() will only construct it if the compiled pattern + * requires reverse matches. + * + * Note that, if the user code will (possibly) match a string against a + * multitude of patterns containing wildcards, chances are high that + * some patterns will require a reversed string. In this case, it's + * more efficient to provide the reversed string to avoid multiple + * constructions thereof in the various calls to g_pattern_match(). + * + * Note also that the reverse of a UTF-8 encoded string can in general + * not be obtained by g_strreverse(). This works only if the string + * does not contain any multibyte characters. GLib offers the + * g_utf8_strreverse() function to reverse UTF-8 encoded strings. + * + * Returns: %TRUE if @string matches @pspec + **/ +gboolean g_pattern_match (GPatternSpec *pspec, + guint string_length, + const gchar *string, + const gchar *string_reversed) +{ + g_return_val_if_fail (pspec != NULL, FALSE); + g_return_val_if_fail (string != NULL, FALSE); + + if (string_length < pspec->min_length || + string_length > pspec->max_length) + return FALSE; + + switch (pspec->match_type) + { + gboolean dummy; + case G_MATCH_ALL: + return g_pattern_ph_match (pspec->pattern, string, &dummy); + case G_MATCH_ALL_TAIL: + if (string_reversed) + return g_pattern_ph_match (pspec->pattern, string_reversed, &dummy); + else + { + gboolean result; + gchar *tmp; + tmp = string_reverse (string, string_length); + result = g_pattern_ph_match (pspec->pattern, tmp, &dummy); + g_free (tmp); + return result; + } + case G_MATCH_HEAD: + if (pspec->pattern_length == string_length) + return strcmp (pspec->pattern, string) == 0; + else if (pspec->pattern_length) + return strncmp (pspec->pattern, string, pspec->pattern_length) == 0; + else + return TRUE; + case G_MATCH_TAIL: + if (pspec->pattern_length) + return strcmp (pspec->pattern, string + (string_length - pspec->pattern_length)) == 0; + else + return TRUE; + case G_MATCH_EXACT: + if (pspec->pattern_length != string_length) + return FALSE; + else + return strcmp (pspec->pattern, string) == 0; + default: + g_return_val_if_fail (pspec->match_type < G_MATCH_LAST, FALSE); + return FALSE; + } +} + +/** + * g_pattern_spec_new: + * @pattern: a zero-terminated UTF-8 encoded string + * + * Compiles a pattern to a #GPatternSpec. + * + * Returns: a newly-allocated #GPatternSpec + **/ +GPatternSpec* g_pattern_spec_new (const gchar *pattern) +{ + GPatternSpec *pspec; + gboolean seen_joker = FALSE, seen_wildcard = FALSE, more_wildcards = FALSE; + gint hw_pos = -1, tw_pos = -1, hj_pos = -1, tj_pos = -1; + gboolean follows_wildcard = FALSE; + guint pending_jokers = 0; + const gchar *s; + gchar *d; + guint i; + + g_return_val_if_fail (pattern != NULL, NULL); + + /* canonicalize pattern and collect necessary stats */ + pspec = g_new (GPatternSpec, 1); + pspec->pattern_length = strlen (pattern); + pspec->min_length = 0; + pspec->max_length = 0; + pspec->pattern = g_new (gchar, pspec->pattern_length + 1); + d = pspec->pattern; + for (i = 0, s = pattern; *s != 0; s++) + { + switch (*s) + { + case '*': + if (follows_wildcard) /* compress multiple wildcards */ + { + pspec->pattern_length--; + continue; + } + follows_wildcard = TRUE; + if (hw_pos < 0) + hw_pos = i; + tw_pos = i; + break; + case '?': + pending_jokers++; + pspec->min_length++; + pspec->max_length += 4; /* maximum UTF-8 character length */ + continue; + default: + for (; pending_jokers; pending_jokers--, i++) { + *d++ = '?'; + if (hj_pos < 0) + hj_pos = i; + tj_pos = i; + } + follows_wildcard = FALSE; + pspec->min_length++; + pspec->max_length++; + break; + } + *d++ = *s; + i++; + } + for (; pending_jokers; pending_jokers--) { + *d++ = '?'; + if (hj_pos < 0) + hj_pos = i; + tj_pos = i; + } + *d++ = 0; + seen_joker = hj_pos >= 0; + seen_wildcard = hw_pos >= 0; + more_wildcards = seen_wildcard && hw_pos != tw_pos; + if (seen_wildcard) + pspec->max_length = UINT_MAX; + + /* special case sole head/tail wildcard or exact matches */ + if (!seen_joker && !more_wildcards) + { + if (pspec->pattern[0] == '*') + { + pspec->match_type = G_MATCH_TAIL; + memmove (pspec->pattern, pspec->pattern + 1, --pspec->pattern_length); + pspec->pattern[pspec->pattern_length] = 0; + return pspec; + } + if (pspec->pattern_length > 0 && + pspec->pattern[pspec->pattern_length - 1] == '*') + { + pspec->match_type = G_MATCH_HEAD; + pspec->pattern[--pspec->pattern_length] = 0; + return pspec; + } + if (!seen_wildcard) + { + pspec->match_type = G_MATCH_EXACT; + return pspec; + } + } + + /* now just need to distinguish between head or tail match start */ + tw_pos = pspec->pattern_length - 1 - tw_pos; /* last pos to tail distance */ + tj_pos = pspec->pattern_length - 1 - tj_pos; /* last pos to tail distance */ + if (seen_wildcard) + pspec->match_type = tw_pos > hw_pos ? G_MATCH_ALL_TAIL : G_MATCH_ALL; + else /* seen_joker */ + pspec->match_type = tj_pos > hj_pos ? G_MATCH_ALL_TAIL : G_MATCH_ALL; + if (pspec->match_type == G_MATCH_ALL_TAIL) { + gchar *tmp = pspec->pattern; + pspec->pattern = string_reverse (pspec->pattern, pspec->pattern_length); + g_free (tmp); + } + return pspec; +} + +/** + * g_pattern_spec_free: + * @pspec: a #GPatternSpec + * + * Frees the memory allocated for the #GPatternSpec. + **/ +void g_pattern_spec_free (GPatternSpec *pspec) +{ + g_return_if_fail (pspec != NULL); + + g_free (pspec->pattern); + g_free (pspec); +} + +/** + * g_pattern_match_string: + * @pspec: a #GPatternSpec + * @string: the UTF-8 encoded string to match + * + * Matches a string against a compiled pattern. If the string is to be + * matched against more than one pattern, consider using + * g_pattern_match() instead while supplying the reversed string. + * + * Returns: %TRUE if @string matches @pspec + **/ +gboolean g_pattern_match_string (GPatternSpec *pspec, const gchar *string) +{ + g_return_val_if_fail (pspec != NULL, FALSE); + g_return_val_if_fail (string != NULL, FALSE); + + return g_pattern_match (pspec, strlen (string), string, NULL); +} diff --git a/glib_compat/gpattern.h b/glib_compat/gpattern.h new file mode 100644 index 00000000..cc50c5f5 --- /dev/null +++ b/glib_compat/gpattern.h @@ -0,0 +1,34 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997, 1999 Peter Mattis, Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef __G_PATTERN_H__ +#define __G_PATTERN_H__ + +#include "gtypes.h" + +typedef struct _GPatternSpec GPatternSpec; + +GPatternSpec* g_pattern_spec_new (const gchar *pattern); +void g_pattern_spec_free (GPatternSpec *pspec); +gboolean g_pattern_match (GPatternSpec *pspec, + guint string_length, + const gchar *string, + const gchar *string_reversed); +gboolean g_pattern_match_string (GPatternSpec *pspec, + const gchar *string); + +#endif /* __G_PATTERN_H__ */ diff --git a/glib_compat/grand.c b/glib_compat/grand.c new file mode 100644 index 00000000..b6e57645 --- /dev/null +++ b/glib_compat/grand.c @@ -0,0 +1,384 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* Originally developed and coded by Makoto Matsumoto and Takuji + * Nishimura. Please mail , if you're using + * code from this file in your own programs or libraries. + * Further information on the Mersenne Twister can be found at + * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html + * This code was adapted to glib by Sebastian Wilhelmi. + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * MT safe + */ + +#define _CRT_RAND_S + +#include +#include +#include +#include +#include +#ifndef _MSC_VER +#include +#include +#else +#include +#endif + +#include "grand.h" +#include "gmem.h" +#include "gmessages.h" + +#define G_USEC_PER_SEC 1000000 + +#if defined(__MINGW64_VERSION_MAJOR) || defined(_WIN32) +errno_t rand_s(unsigned int* randomValue); +#endif + +#define G_GINT64_CONSTANT(val) (val##L) + +/* Period parameters */ +#define N 624 +#define M 397 +#define MATRIX_A 0x9908b0df /* constant vector a */ +#define UPPER_MASK 0x80000000 /* most significant w-r bits */ +#define LOWER_MASK 0x7fffffff /* least significant r bits */ + +/* Tempering parameters */ +#define TEMPERING_MASK_B 0x9d2c5680 +#define TEMPERING_MASK_C 0xefc60000 +#define TEMPERING_SHIFT_U(y) (y >> 11) +#define TEMPERING_SHIFT_S(y) (y << 7) +#define TEMPERING_SHIFT_T(y) (y << 15) +#define TEMPERING_SHIFT_L(y) (y >> 18) + +struct _GRand +{ + guint32 mt[N]; /* the array for the state vector */ + guint mti; +}; + +static guint get_random_version (void) +{ + static gsize initialized = FALSE; + static guint random_version; + + if (!initialized) + { + // g_warning ("Unknown G_RANDOM_VERSION \"%s\". Using version 2.2.", version_string); + random_version = 22; + initialized = TRUE; + } + + return random_version; +} + +/** + * g_rand_set_seed: + * @rand_: a #GRand + * @seed: a value to reinitialize the random number generator + * + * Sets the seed for the random number generator #GRand to @seed. + */ +void g_rand_set_seed (GRand *rand, guint32 seed) +{ + g_return_if_fail (rand != NULL); + + switch (get_random_version ()) + { + case 20: + /* setting initial seeds to mt[N] using */ + /* the generator Line 25 of Table 1 in */ + /* [KNUTH 1981, The Art of Computer Programming */ + /* Vol. 2 (2nd Ed.), pp102] */ + + if (seed == 0) /* This would make the PRNG produce only zeros */ + seed = 0x6b842128; /* Just set it to another number */ + + rand->mt[0]= seed; + for (rand->mti=1; rand->mtimti++) + rand->mt[rand->mti] = (69069 * rand->mt[rand->mti-1]); + + break; + case 22: + /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ + /* In the previous version (see above), MSBs of the */ + /* seed affect only MSBs of the array mt[]. */ + + rand->mt[0]= seed; + for (rand->mti=1; rand->mtimti++) + rand->mt[rand->mti] = 1812433253UL * + (rand->mt[rand->mti-1] ^ (rand->mt[rand->mti-1] >> 30)) + rand->mti; + break; + default: + // g_assert_not_reached (); + break; + } +} + +/** + * g_rand_new_with_seed: + * @seed: a value to initialize the random number generator + * + * Creates a new random number generator initialized with @seed. + * + * Returns: the new #GRand + **/ +GRand* g_rand_new_with_seed (guint32 seed) +{ + GRand *rand = g_new0 (GRand, 1); + g_rand_set_seed (rand, seed); + return rand; +} + +/** + * g_rand_set_seed_array: + * @rand_: a #GRand + * @seed: array to initialize with + * @seed_length: length of array + * + * Initializes the random number generator by an array of longs. + * Array can be of arbitrary size, though only the first 624 values + * are taken. This function is useful if you have many low entropy + * seeds, or if you require more then 32 bits of actual entropy for + * your application. + * + * Since: 2.4 + */ +void g_rand_set_seed_array (GRand *rand, const guint32 *seed, guint seed_length) +{ + guint i, j, k; + + g_return_if_fail (rand != NULL); + g_return_if_fail (seed_length >= 1); + + g_rand_set_seed (rand, 19650218UL); + + i=1; j=0; + k = (N>seed_length ? N : seed_length); + for (; k; k--) + { + rand->mt[i] = (rand->mt[i] ^ + ((rand->mt[i-1] ^ (rand->mt[i-1] >> 30)) * 1664525UL)) + + seed[j] + j; /* non linear */ + rand->mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ + i++; j++; + if (i>=N) + { + rand->mt[0] = rand->mt[N-1]; + i=1; + } + if (j>=seed_length) + j=0; + } + for (k=N-1; k; k--) + { + rand->mt[i] = (rand->mt[i] ^ + ((rand->mt[i-1] ^ (rand->mt[i-1] >> 30)) * 1566083941UL)) + - i; /* non linear */ + rand->mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ + i++; + if (i>=N) + { + rand->mt[0] = rand->mt[N-1]; + i=1; + } + } + + rand->mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ +} + +/** + * g_rand_new_with_seed_array: + * @seed: an array of seeds to initialize the random number generator + * @seed_length: an array of seeds to initialize the random number + * generator + * + * Creates a new random number generator initialized with @seed. + * + * Returns: the new #GRand + * + * Since: 2.4 + */ +GRand *g_rand_new_with_seed_array (const guint32 *seed, guint seed_length) +{ + GRand *rand = g_new0 (GRand, 1); + g_rand_set_seed_array (rand, seed, seed_length); + return rand; +} + +gint64 g_get_real_time (void) +{ +#if defined(unix) || defined(__unix__) || defined(__unix) || defined (__MINGW32__) || defined(__APPLE__) + struct timeval r; + + /* this is required on alpha, there the timeval structs are ints + * not longs and a cast only would fail horribly */ + gettimeofday (&r, NULL); + + return (((gint64) r.tv_sec) * 1000000) + r.tv_usec; +#else + FILETIME ft; + guint64 time64; + + GetSystemTimeAsFileTime (&ft); + memmove (&time64, &ft, sizeof (FILETIME)); + + /* Convert from 100s of nanoseconds since 1601-01-01 + * to Unix epoch. This is Y2038 safe. + */ + time64 -= G_GINT64_CONSTANT (116444736000000000); + time64 /= 10; + + return time64; +#endif +} + +/** + * g_rand_new: + * + * Creates a new random number generator initialized with a seed taken + * either from `/dev/urandom` (if existing) or from the current time + * (as a fallback). + * + * On Windows, the seed is taken from rand_s(). + * + * Returns: the new #GRand + */ +GRand *g_rand_new (void) +{ + guint32 seed[4]; +#if defined(unix) || defined(__unix__) || defined(__unix) || defined(__APPLE__) + static gboolean dev_urandom_exists = TRUE; + + if (dev_urandom_exists) + { + FILE* dev_urandom; + + do + { + dev_urandom = fopen("/dev/urandom", "rb"); + } + while (dev_urandom == NULL && errno == EINTR); + + if (dev_urandom) + { + int r; + + setvbuf (dev_urandom, NULL, _IONBF, 0); + do + { + errno = 0; + r = fread (seed, sizeof (seed), 1, dev_urandom); + } + while (errno == EINTR); + + if (r != 1) + dev_urandom_exists = FALSE; + + fclose (dev_urandom); + } + else + dev_urandom_exists = FALSE; + } + + if (!dev_urandom_exists) + { + gint64 now_us = g_get_real_time (); + seed[0] = now_us / G_USEC_PER_SEC; + seed[1] = now_us % G_USEC_PER_SEC; + seed[2] = getpid (); + seed[3] = getppid (); + } +#else /* G_OS_WIN32 */ + /* rand_s() is only available since Visual Studio 2005 and + * MinGW-w64 has a wrapper that will emulate rand_s() if it's not in msvcrt + */ +#if (defined(_MSC_VER) && _MSC_VER >= 1400) || defined(__MINGW64_VERSION_MAJOR) + gint i; + + for (i = 0; i < 4;/* array size of seed */ i++) { + rand_s(&seed[i]); + } +#else +#warning Using insecure seed for random number generation because of missing rand_s() in Windows XP + GTimeVal now; + + g_get_current_time (&now); + seed[0] = now.tv_sec; + seed[1] = now.tv_usec; + seed[2] = getpid (); + seed[3] = 0; +#endif + +#endif + + return g_rand_new_with_seed_array (seed, 4); +} + +/** + * g_rand_int: + * @rand_: a #GRand + * + * Returns the next random #guint32 from @rand_ equally distributed over + * the range [0..2^32-1]. + * + * Returns: a random number + */ +guint32 g_rand_int (GRand *rand) +{ + guint32 y; + static const guint32 mag01[2]={0x0, MATRIX_A}; + /* mag01[x] = x * MATRIX_A for x=0,1 */ + + g_return_val_if_fail (rand != NULL, 0); + + if (rand->mti >= N) { /* generate N words at one time */ + int kk; + + for (kk = 0; kk < N - M; kk++) { + y = (rand->mt[kk]&UPPER_MASK)|(rand->mt[kk+1]&LOWER_MASK); + rand->mt[kk] = rand->mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1]; + } + for (; kk < N - 1; kk++) { + y = (rand->mt[kk]&UPPER_MASK)|(rand->mt[kk+1]&LOWER_MASK); + rand->mt[kk] = rand->mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1]; + } + y = (rand->mt[N-1]&UPPER_MASK)|(rand->mt[0]&LOWER_MASK); + rand->mt[N-1] = rand->mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1]; + + rand->mti = 0; + } + + y = rand->mt[rand->mti++]; + y ^= TEMPERING_SHIFT_U(y); + y ^= TEMPERING_SHIFT_S(y) & TEMPERING_MASK_B; + y ^= TEMPERING_SHIFT_T(y) & TEMPERING_MASK_C; + y ^= TEMPERING_SHIFT_L(y); + + return y; +} + diff --git a/glib_compat/grand.h b/glib_compat/grand.h new file mode 100644 index 00000000..c8947717 --- /dev/null +++ b/glib_compat/grand.h @@ -0,0 +1,37 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_RAND_H__ +#define __G_RAND_H__ + +#include "gtypes.h" + +typedef struct _GRand GRand; + +GRand *g_rand_new_with_seed(guint32 seed); +GRand *g_rand_new_with_seed_array (const guint32 *seed, guint seed_length); +GRand *g_rand_new(void); +guint32 g_rand_int(GRand *rand_); + +#endif /* __G_RAND_H__ */ diff --git a/glib_compat/gslice.c b/glib_compat/gslice.c new file mode 100644 index 00000000..3b1581e2 --- /dev/null +++ b/glib_compat/gslice.c @@ -0,0 +1,91 @@ +/* GLIB sliced memory - fast concurrent memory chunk allocator + * Copyright (C) 2005 Tim Janik + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +/* MT safe */ + +#include + +#include "gtypes.h" +#include "gslice.h" +#include "gmem.h" /* gslice.h */ + +/** + * g_slice_alloc: + * @block_size: the number of bytes to allocate + * + * Allocates a block of memory from the slice allocator. + * The block address handed out can be expected to be aligned + * to at least 1 * sizeof (void*), + * though in general slices are 2 * sizeof (void*) bytes aligned, + * if a malloc() fallback implementation is used instead, + * the alignment may be reduced in a libc dependent fashion. + * Note that the underlying slice allocation mechanism can + * be changed with the [`G_SLICE=always-malloc`][G_SLICE] + * environment variable. + * + * Returns: a pointer to the allocated memory block, which will be %NULL if and + * only if @mem_size is 0 + * + * Since: 2.10 + */ +gpointer g_slice_alloc (gsize mem_size) +{ + return g_malloc (mem_size); +} + +/** + * g_slice_alloc0: + * @block_size: the number of bytes to allocate + * + * Allocates a block of memory via g_slice_alloc() and initializes + * the returned memory to 0. Note that the underlying slice allocation + * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE] + * environment variable. + * + * Returns: a pointer to the allocated block, which will be %NULL if and only + * if @mem_size is 0 + * + * Since: 2.10 + */ +gpointer g_slice_alloc0 (gsize mem_size) +{ + gpointer mem = g_slice_alloc (mem_size); + if (mem) + memset (mem, 0, mem_size); + return mem; +} + +/** + * g_slice_free1: + * @block_size: the size of the block + * @mem_block: a pointer to the block to free + * + * Frees a block of memory. + * + * The memory must have been allocated via g_slice_alloc() or + * g_slice_alloc0() and the @block_size has to match the size + * specified upon allocation. Note that the exact release behaviour + * can be changed with the [`G_DEBUG=gc-friendly`][G_DEBUG] environment + * variable, also see [`G_SLICE`][G_SLICE] for related debugging options. + * + * If @mem_block is %NULL, this function does nothing. + * + * Since: 2.10 + */ +void g_slice_free1 (gsize mem_size, gpointer mem_block) +{ + g_free (mem_block); +} diff --git a/glib_compat/gslice.h b/glib_compat/gslice.h new file mode 100644 index 00000000..78fd21de --- /dev/null +++ b/glib_compat/gslice.h @@ -0,0 +1,36 @@ +/* GLIB sliced memory - fast threaded memory chunk allocator + * Copyright (C) 2005 Tim Janik + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef __G_SLICE_H__ +#define __G_SLICE_H__ + +#include "gtypes.h" + +#define g_slice_new(type) ((type*) g_slice_alloc (sizeof (type))) +#define g_slice_new0(type) ((type*) g_slice_alloc0 (sizeof (type))) + +gpointer g_slice_alloc0 (gsize block_size); +gpointer g_slice_alloc (gsize block_size); +void g_slice_free1 (gsize block_size, gpointer mem_block); + +#define g_slice_free(type, mem) \ + G_STMT_START { \ + if (1) g_slice_free1 (sizeof (type), (mem)); \ + else (void) ((type*) 0 == (mem)); \ + } G_STMT_END + +#endif /* __G_SLICE_H__ */ diff --git a/glib_compat/gtestutils.c b/glib_compat/gtestutils.c new file mode 100644 index 00000000..7b795e5d --- /dev/null +++ b/glib_compat/gtestutils.c @@ -0,0 +1,34 @@ +/* GLib testing utilities + * Copyright (C) 2007 Imendio AB + * Authors: Tim Janik, Sven Herzberg + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "gtestutils.h" +#include +#include + +void +g_assertion_message_expr (const char *file, + int line, + const char *expr) +{ + if (!expr) + printf("%s:%d code should not be reached", file, line); + else + printf("%s:%d assertion failed: %s", file, line, expr); + + abort(); +} diff --git a/glib_compat/gtestutils.h b/glib_compat/gtestutils.h new file mode 100644 index 00000000..dd3de5d3 --- /dev/null +++ b/glib_compat/gtestutils.h @@ -0,0 +1,57 @@ +/* GLib testing utilities + * Copyright (C) 2007 Imendio AB + * Authors: Tim Janik + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef __G_TEST_UTILS_H__ +#define __G_TEST_UTILS_H__ + + +#if !(defined (G_STMT_START) && defined (G_STMT_END)) +#define G_STMT_START do +#if defined (_MSC_VER) && (_MSC_VER >= 1500) +#define G_STMT_END \ + __pragma(warning(push)) \ + __pragma(warning(disable:4127)) \ + while(0) \ + __pragma(warning(pop)) +#else +#define G_STMT_END while (0) +#endif +#endif + +#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4) +#define G_GNUC_NORETURN \ + __attribute__((__noreturn__)) +#else /* !__GNUC__ */ +/* NOTE: MSVC has __declspec(noreturn) but unlike GCC __attribute__, + * __declspec can only be placed at the start of the function prototype + * and not at the end, so we can't use it without breaking API. + */ +#define G_GNUC_NORETURN +#endif /* !__GNUC__ */ + +void g_assertion_message_expr (const char *file, + int line, + const char *expr) G_GNUC_NORETURN; + +#define g_assert_not_reached() G_STMT_START { g_assertion_message_expr (__FILE__, __LINE__, NULL); } G_STMT_END +#define g_assert(expr) G_STMT_START { \ + if (expr) ; else \ + g_assertion_message_expr (__FILE__, __LINE__, #expr); \ + } G_STMT_END + +#endif /* __G_TEST_UTILS_H__ */ diff --git a/glib_compat/gtree.c b/glib_compat/gtree.c new file mode 100644 index 00000000..a790d568 --- /dev/null +++ b/glib_compat/gtree.c @@ -0,0 +1,1255 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +/* + * MT safe + */ + +#include "gtypes.h" +#include "gtree.h" +//#include "gatomic.h" +//#include "gtestutils.h" +#include "gslice.h" +#include "gmessages.h" +#include "gnode.h" + +/** + * SECTION:trees-binary + * @title: Balanced Binary Trees + * @short_description: a sorted collection of key/value pairs optimized + * for searching and traversing in order + * + * The #GTree structure and its associated functions provide a sorted + * collection of key/value pairs optimized for searching and traversing + * in order. + * + * To create a new #GTree use g_tree_new(). + * + * To insert a key/value pair into a #GTree use g_tree_insert(). + * + * To look up the value corresponding to a given key, use + * g_tree_lookup() and g_tree_lookup_extended(). + * + * To find out the number of nodes in a #GTree, use g_tree_nnodes(). To + * get the height of a #GTree, use g_tree_height(). + * + * To traverse a #GTree, calling a function for each node visited in + * the traversal, use g_tree_foreach(). + * + * To remove a key/value pair use g_tree_remove(). + * + * To destroy a #GTree, use g_tree_destroy(). + **/ + +#undef G_TREE_DEBUG + +#define MAX_GTREE_HEIGHT 40 + +typedef struct _GTreeNode GTreeNode; + +/** + * GTree: + * + * The GTree struct is an opaque data structure representing a + * [balanced binary tree][glib-Balanced-Binary-Trees]. It should be + * accessed only by using the following functions. + */ +struct _GTree +{ + GTreeNode *root; + GCompareDataFunc key_compare; + GDestroyNotify key_destroy_func; + GDestroyNotify value_destroy_func; + gpointer key_compare_data; + guint nnodes; + gint ref_count; +}; + +struct _GTreeNode +{ + gpointer key; /* key for this node */ + gpointer value; /* value stored at this node */ + GTreeNode *left; /* left subtree */ + GTreeNode *right; /* right subtree */ + gint8 balance; /* height (right) - height (left) */ + guint8 left_child; + guint8 right_child; +}; + + +static GTreeNode* g_tree_node_new (gpointer key, + gpointer value); +static void g_tree_insert_internal (GTree *tree, + gpointer key, + gpointer value, + gboolean replace); +static gboolean g_tree_remove_internal (GTree *tree, + gconstpointer key, + gboolean steal); +static GTreeNode* g_tree_node_balance (GTreeNode *node); +static GTreeNode *g_tree_find_node (GTree *tree, + gconstpointer key); +static gint g_tree_node_pre_order (GTreeNode *node, + GTraverseFunc traverse_func, + gpointer data); +static gint g_tree_node_in_order (GTreeNode *node, + GTraverseFunc traverse_func, + gpointer data); +static gint g_tree_node_post_order (GTreeNode *node, + GTraverseFunc traverse_func, + gpointer data); +static gpointer g_tree_node_search (GTreeNode *node, + GCompareFunc search_func, + gconstpointer data); +static GTreeNode* g_tree_node_rotate_left (GTreeNode *node); +static GTreeNode* g_tree_node_rotate_right (GTreeNode *node); +#ifdef G_TREE_DEBUG +static void g_tree_node_check (GTreeNode *node); +#endif + + +static GTreeNode *g_tree_node_new (gpointer key, gpointer value) +{ + GTreeNode *node = g_slice_new (GTreeNode); + + node->balance = 0; + node->left = NULL; + node->right = NULL; + node->left_child = FALSE; + node->right_child = FALSE; + node->key = key; + node->value = value; + + return node; +} + +/** + * g_tree_new: + * @key_compare_func: the function used to order the nodes in the #GTree. + * It should return values similar to the standard strcmp() function - + * 0 if the two arguments are equal, a negative value if the first argument + * comes before the second, or a positive value if the first argument comes + * after the second. + * + * Creates a new #GTree. + * + * Returns: a newly allocated #GTree + */ +GTree *g_tree_new (GCompareFunc key_compare_func) +{ + g_return_val_if_fail (key_compare_func != NULL, NULL); + + return g_tree_new_full ((GCompareDataFunc) key_compare_func, NULL, + NULL, NULL); +} + +/** + * g_tree_new_with_data: + * @key_compare_func: qsort()-style comparison function + * @key_compare_data: data to pass to comparison function + * + * Creates a new #GTree with a comparison function that accepts user data. + * See g_tree_new() for more details. + * + * Returns: a newly allocated #GTree + */ +GTree *g_tree_new_with_data (GCompareDataFunc key_compare_func, gpointer key_compare_data) +{ + g_return_val_if_fail (key_compare_func != NULL, NULL); + + return g_tree_new_full (key_compare_func, key_compare_data, NULL, NULL); +} + +/** + * g_tree_new_full: + * @key_compare_func: qsort()-style comparison function + * @key_compare_data: data to pass to comparison function + * @key_destroy_func: a function to free the memory allocated for the key + * used when removing the entry from the #GTree or %NULL if you don't + * want to supply such a function + * @value_destroy_func: a function to free the memory allocated for the + * value used when removing the entry from the #GTree or %NULL if you + * don't want to supply such a function + * + * Creates a new #GTree like g_tree_new() and allows to specify functions + * to free the memory allocated for the key and value that get called when + * removing the entry from the #GTree. + * + * Returns: a newly allocated #GTree + */ +GTree *g_tree_new_full (GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func) +{ + GTree *tree; + + g_return_val_if_fail (key_compare_func != NULL, NULL); + + tree = g_slice_new (GTree); + tree->root = NULL; + tree->key_compare = key_compare_func; + tree->key_destroy_func = key_destroy_func; + tree->value_destroy_func = value_destroy_func; + tree->key_compare_data = key_compare_data; + tree->nnodes = 0; + tree->ref_count = 1; + + return tree; +} + +static inline GTreeNode *g_tree_first_node (GTree *tree) +{ + GTreeNode *tmp; + + if (!tree->root) + return NULL; + + tmp = tree->root; + + while (tmp->left_child) + tmp = tmp->left; + + return tmp; +} + +static inline GTreeNode *g_tree_node_previous (GTreeNode *node) +{ + GTreeNode *tmp; + + tmp = node->left; + + if (node->left_child) + while (tmp->right_child) + tmp = tmp->right; + + return tmp; +} + +static inline GTreeNode *g_tree_node_next (GTreeNode *node) +{ + GTreeNode *tmp; + + tmp = node->right; + + if (node->right_child) + while (tmp->left_child) + tmp = tmp->left; + + return tmp; +} + +static void g_tree_remove_all (GTree *tree) +{ + GTreeNode *node; + GTreeNode *next; + + g_return_if_fail (tree != NULL); + + node = g_tree_first_node (tree); + + while (node) + { + next = g_tree_node_next (node); + + if (tree->key_destroy_func) + tree->key_destroy_func (node->key); + if (tree->value_destroy_func) + tree->value_destroy_func (node->value); + g_slice_free (GTreeNode, node); + + node = next; + } + + tree->root = NULL; + tree->nnodes = 0; +} + +/** + * g_tree_ref: + * @tree: a #GTree + * + * Increments the reference count of @tree by one. + * + * It is safe to call this function from any thread. + * + * Returns: the passed in #GTree + * + * Since: 2.22 + */ +GTree *g_tree_ref (GTree *tree) +{ + g_return_val_if_fail (tree != NULL, NULL); + + tree->ref_count++; + + return tree; +} + +/** + * g_tree_unref: + * @tree: a #GTree + * + * Decrements the reference count of @tree by one. + * If the reference count drops to 0, all keys and values will + * be destroyed (if destroy functions were specified) and all + * memory allocated by @tree will be released. + * + * It is safe to call this function from any thread. + * + * Since: 2.22 + */ +void g_tree_unref (GTree *tree) +{ + g_return_if_fail (tree != NULL); + + tree->ref_count--; + + if (!tree->ref_count) + { + g_tree_remove_all (tree); + g_slice_free (GTree, tree); + } +} + +/** + * g_tree_destroy: + * @tree: a #GTree + * + * Removes all keys and values from the #GTree and decreases its + * reference count by one. If keys and/or values are dynamically + * allocated, you should either free them first or create the #GTree + * using g_tree_new_full(). In the latter case the destroy functions + * you supplied will be called on all keys and values before destroying + * the #GTree. + */ +void g_tree_destroy (GTree *tree) +{ + g_return_if_fail (tree != NULL); + + g_tree_remove_all (tree); + g_tree_unref (tree); +} + +/** + * g_tree_insert: + * @tree: a #GTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a key/value pair into a #GTree. + * + * If the given key already exists in the #GTree its corresponding value + * is set to the new value. If you supplied a @value_destroy_func when + * creating the #GTree, the old value is freed using that function. If + * you supplied a @key_destroy_func when creating the #GTree, the passed + * key is freed using that function. + * + * The tree is automatically 'balanced' as new key/value pairs are added, + * so that the distance from the root to every leaf is as small as possible. + */ +void g_tree_insert (GTree *tree, gpointer key, gpointer value) +{ + g_return_if_fail (tree != NULL); + + g_tree_insert_internal (tree, key, value, FALSE); + +#ifdef G_TREE_DEBUG + g_tree_node_check (tree->root); +#endif +} + +/** + * g_tree_replace: + * @tree: a #GTree + * @key: the key to insert + * @value: the value corresponding to the key + * + * Inserts a new key and value into a #GTree similar to g_tree_insert(). + * The difference is that if the key already exists in the #GTree, it gets + * replaced by the new key. If you supplied a @value_destroy_func when + * creating the #GTree, the old value is freed using that function. If you + * supplied a @key_destroy_func when creating the #GTree, the old key is + * freed using that function. + * + * The tree is automatically 'balanced' as new key/value pairs are added, + * so that the distance from the root to every leaf is as small as possible. + */ +void g_tree_replace (GTree *tree, gpointer key, gpointer value) +{ + g_return_if_fail (tree != NULL); + + g_tree_insert_internal (tree, key, value, TRUE); + +#ifdef G_TREE_DEBUG + g_tree_node_check (tree->root); +#endif +} + +/* internal insert routine */ +static void g_tree_insert_internal (GTree *tree, gpointer key, gpointer value, gboolean replace) +{ + GTreeNode *node; + GTreeNode *path[MAX_GTREE_HEIGHT]; + int idx; + + g_return_if_fail (tree != NULL); + + if (!tree->root) + { + tree->root = g_tree_node_new (key, value); + tree->nnodes++; + return; + } + + idx = 0; + path[idx++] = NULL; + node = tree->root; + + while (1) + { + int cmp = tree->key_compare (key, node->key, tree->key_compare_data); + + if (cmp == 0) + { + if (tree->value_destroy_func) + tree->value_destroy_func (node->value); + + node->value = value; + + if (replace) + { + if (tree->key_destroy_func) + tree->key_destroy_func (node->key); + + node->key = key; + } + else + { + /* free the passed key */ + if (tree->key_destroy_func) + tree->key_destroy_func (key); + } + + return; + } + else if (cmp < 0) + { + if (node->left_child) + { + path[idx++] = node; + node = node->left; + } + else + { + GTreeNode *child = g_tree_node_new (key, value); + + child->left = node->left; + child->right = node; + node->left = child; + node->left_child = TRUE; + node->balance -= 1; + + tree->nnodes++; + + break; + } + } + else + { + if (node->right_child) + { + path[idx++] = node; + node = node->right; + } + else + { + GTreeNode *child = g_tree_node_new (key, value); + + child->right = node->right; + child->left = node; + node->right = child; + node->right_child = TRUE; + node->balance += 1; + + tree->nnodes++; + + break; + } + } + } + + /* Restore balance. This is the goodness of a non-recursive + * implementation, when we are done with balancing we 'break' + * the loop and we are done. + */ + while (1) + { + GTreeNode *bparent = path[--idx]; + gboolean left_node = (bparent && node == bparent->left); + //g_assert (!bparent || bparent->left == node || bparent->right == node); + + if (node->balance < -1 || node->balance > 1) + { + node = g_tree_node_balance (node); + if (bparent == NULL) + tree->root = node; + else if (left_node) + bparent->left = node; + else + bparent->right = node; + } + + if (node->balance == 0 || bparent == NULL) + break; + + if (left_node) + bparent->balance -= 1; + else + bparent->balance += 1; + + node = bparent; + } +} + +/** + * g_tree_remove: + * @tree: a #GTree + * @key: the key to remove + * + * Removes a key/value pair from a #GTree. + * + * If the #GTree was created using g_tree_new_full(), the key and value + * are freed using the supplied destroy functions, otherwise you have to + * make sure that any dynamically allocated values are freed yourself. + * If the key does not exist in the #GTree, the function does nothing. + * + * Returns: %TRUE if the key was found (prior to 2.8, this function + * returned nothing) + */ +gboolean g_tree_remove (GTree *tree, gconstpointer key) +{ + gboolean removed; + + g_return_val_if_fail (tree != NULL, FALSE); + + removed = g_tree_remove_internal (tree, key, FALSE); + +#ifdef G_TREE_DEBUG + g_tree_node_check (tree->root); +#endif + + return removed; +} + +/** + * g_tree_steal: + * @tree: a #GTree + * @key: the key to remove + * + * Removes a key and its associated value from a #GTree without calling + * the key and value destroy functions. + * + * If the key does not exist in the #GTree, the function does nothing. + * + * Returns: %TRUE if the key was found (prior to 2.8, this function + * returned nothing) + */ +gboolean g_tree_steal (GTree *tree, gconstpointer key) +{ + gboolean removed; + + g_return_val_if_fail (tree != NULL, FALSE); + + removed = g_tree_remove_internal (tree, key, TRUE); + +#ifdef G_TREE_DEBUG + g_tree_node_check (tree->root); +#endif + + return removed; +} + +/* internal remove routine */ +static gboolean g_tree_remove_internal (GTree *tree, gconstpointer key, gboolean steal) +{ + GTreeNode *node, *parent, *balance; + GTreeNode *path[MAX_GTREE_HEIGHT]; + int idx; + gboolean left_node; + + g_return_val_if_fail (tree != NULL, FALSE); + + if (!tree->root) + return FALSE; + + idx = 0; + path[idx++] = NULL; + node = tree->root; + + while (1) + { + int cmp = tree->key_compare (key, node->key, tree->key_compare_data); + + if (cmp == 0) + break; + else if (cmp < 0) + { + if (!node->left_child) + return FALSE; + + path[idx++] = node; + node = node->left; + } + else + { + if (!node->right_child) + return FALSE; + + path[idx++] = node; + node = node->right; + } + } + + /* The following code is almost equal to g_tree_remove_node, + * except that we do not have to call g_tree_node_parent. + */ + balance = parent = path[--idx]; + //g_assert (!parent || parent->left == node || parent->right == node); + left_node = (parent && node == parent->left); + + if (!node->left_child) + { + if (!node->right_child) + { + if (!parent) + tree->root = NULL; + else if (left_node) + { + parent->left_child = FALSE; + parent->left = node->left; + parent->balance += 1; + } + else + { + parent->right_child = FALSE; + parent->right = node->right; + parent->balance -= 1; + } + } + else /* node has a right child */ + { + GTreeNode *tmp = g_tree_node_next (node); + tmp->left = node->left; + + if (!parent) + tree->root = node->right; + else if (left_node) + { + parent->left = node->right; + parent->balance += 1; + } + else + { + parent->right = node->right; + parent->balance -= 1; + } + } + } + else /* node has a left child */ + { + if (!node->right_child) + { + GTreeNode *tmp = g_tree_node_previous (node); + tmp->right = node->right; + + if (parent == NULL) + tree->root = node->left; + else if (left_node) + { + parent->left = node->left; + parent->balance += 1; + } + else + { + parent->right = node->left; + parent->balance -= 1; + } + } + else /* node has a both children (pant, pant!) */ + { + GTreeNode *prev = node->left; + GTreeNode *next = node->right; + GTreeNode *nextp = node; + int old_idx = idx + 1; + idx++; + + /* path[idx] == parent */ + /* find the immediately next node (and its parent) */ + while (next->left_child) + { + path[++idx] = nextp = next; + next = next->left; + } + + path[old_idx] = next; + balance = path[idx]; + + /* remove 'next' from the tree */ + if (nextp != node) + { + if (next->right_child) + nextp->left = next->right; + else + nextp->left_child = FALSE; + nextp->balance += 1; + + next->right_child = TRUE; + next->right = node->right; + } + else + node->balance -= 1; + + /* set the prev to point to the right place */ + while (prev->right_child) + prev = prev->right; + prev->right = next; + + /* prepare 'next' to replace 'node' */ + next->left_child = TRUE; + next->left = node->left; + next->balance = node->balance; + + if (!parent) + tree->root = next; + else if (left_node) + parent->left = next; + else + parent->right = next; + } + } + + /* restore balance */ + if (balance) + while (1) + { + GTreeNode *bparent = path[--idx]; + //g_assert (!bparent || bparent->left == balance || bparent->right == balance); + left_node = (bparent && balance == bparent->left); + + if(balance->balance < -1 || balance->balance > 1) + { + balance = g_tree_node_balance (balance); + if (!bparent) + tree->root = balance; + else if (left_node) + bparent->left = balance; + else + bparent->right = balance; + } + + if (balance->balance != 0 || !bparent) + break; + + if (left_node) + bparent->balance += 1; + else + bparent->balance -= 1; + + balance = bparent; + } + + if (!steal) + { + if (tree->key_destroy_func) + tree->key_destroy_func (node->key); + if (tree->value_destroy_func) + tree->value_destroy_func (node->value); + } + + g_slice_free (GTreeNode, node); + + tree->nnodes--; + + return TRUE; +} + +/** + * g_tree_lookup: + * @tree: a #GTree + * @key: the key to look up + * + * Gets the value corresponding to the given key. Since a #GTree is + * automatically balanced as key/value pairs are added, key lookup + * is O(log n) (where n is the number of key/value pairs in the tree). + * + * Returns: the value corresponding to the key, or %NULL + * if the key was not found + */ +gpointer g_tree_lookup (GTree *tree, gconstpointer key) +{ + GTreeNode *node; + + g_return_val_if_fail (tree != NULL, NULL); + + node = g_tree_find_node (tree, key); + + return node ? node->value : NULL; +} + +/** + * g_tree_lookup_extended: + * @tree: a #GTree + * @lookup_key: the key to look up + * @orig_key: (out) (optional) (nullable): returns the original key + * @value: (out) (optional) (nullable): returns the value associated with the key + * + * Looks up a key in the #GTree, returning the original key and the + * associated value. This is useful if you need to free the memory + * allocated for the original key, for example before calling + * g_tree_remove(). + * + * Returns: %TRUE if the key was found in the #GTree + */ +gboolean g_tree_lookup_extended (GTree *tree, + gconstpointer lookup_key, + gpointer *orig_key, + gpointer *value) +{ + GTreeNode *node; + + g_return_val_if_fail (tree != NULL, FALSE); + + node = g_tree_find_node (tree, lookup_key); + + if (node) + { + if (orig_key) + *orig_key = node->key; + if (value) + *value = node->value; + return TRUE; + } + else + return FALSE; +} + +/** + * g_tree_foreach: + * @tree: a #GTree + * @func: the function to call for each node visited. + * If this function returns %TRUE, the traversal is stopped. + * @user_data: user data to pass to the function + * + * Calls the given function for each of the key/value pairs in the #GTree. + * The function is passed the key and value of each pair, and the given + * @data parameter. The tree is traversed in sorted order. + * + * The tree may not be modified while iterating over it (you can't + * add/remove items). To remove all items matching a predicate, you need + * to add each item to a list in your #GTraverseFunc as you walk over + * the tree, then walk the list and remove each item. + */ +void g_tree_foreach (GTree *tree, GTraverseFunc func, gpointer user_data) +{ + GTreeNode *node; + + g_return_if_fail (tree != NULL); + + if (!tree->root) + return; + + node = g_tree_first_node (tree); + + while (node) + { + if ((*func) (node->key, node->value, user_data)) + break; + + node = g_tree_node_next (node); + } +} + +/** + * g_tree_traverse: + * @tree: a #GTree + * @traverse_func: the function to call for each node visited. If this + * function returns %TRUE, the traversal is stopped. + * @traverse_type: the order in which nodes are visited, one of %G_IN_ORDER, + * %G_PRE_ORDER and %G_POST_ORDER + * @user_data: user data to pass to the function + * + * Calls the given function for each node in the #GTree. + * + * Deprecated:2.2: The order of a balanced tree is somewhat arbitrary. + * If you just want to visit all nodes in sorted order, use + * g_tree_foreach() instead. If you really need to visit nodes in + * a different order, consider using an [n-ary tree][glib-N-ary-Trees]. + */ +/** + * GTraverseFunc: + * @key: a key of a #GTree node + * @value: the value corresponding to the key + * @data: user data passed to g_tree_traverse() + * + * Specifies the type of function passed to g_tree_traverse(). It is + * passed the key and value of each node, together with the @user_data + * parameter passed to g_tree_traverse(). If the function returns + * %TRUE, the traversal is stopped. + * + * Returns: %TRUE to stop the traversal + */ +void g_tree_traverse (GTree *tree, + GTraverseFunc traverse_func, + GTraverseType traverse_type, + gpointer user_data) +{ + g_return_if_fail (tree != NULL); + + if (!tree->root) + return; + + switch (traverse_type) + { + case G_PRE_ORDER: + g_tree_node_pre_order (tree->root, traverse_func, user_data); + break; + + case G_IN_ORDER: + g_tree_node_in_order (tree->root, traverse_func, user_data); + break; + + case G_POST_ORDER: + g_tree_node_post_order (tree->root, traverse_func, user_data); + break; + + case G_LEVEL_ORDER: + //g_warning ("g_tree_traverse(): traverse type G_LEVEL_ORDER isn't implemented."); + break; + } +} + +/** + * g_tree_search: + * @tree: a #GTree + * @search_func: a function used to search the #GTree + * @user_data: the data passed as the second argument to @search_func + * + * Searches a #GTree using @search_func. + * + * The @search_func is called with a pointer to the key of a key/value + * pair in the tree, and the passed in @user_data. If @search_func returns + * 0 for a key/value pair, then the corresponding value is returned as + * the result of g_tree_search(). If @search_func returns -1, searching + * will proceed among the key/value pairs that have a smaller key; if + * @search_func returns 1, searching will proceed among the key/value + * pairs that have a larger key. + * + * Returns: the value corresponding to the found key, or %NULL + * if the key was not found + */ +gpointer g_tree_search (GTree *tree, + GCompareFunc search_func, + gconstpointer user_data) +{ + g_return_val_if_fail (tree != NULL, NULL); + + if (tree->root) + return g_tree_node_search (tree->root, search_func, user_data); + else + return NULL; +} + +/** + * g_tree_height: + * @tree: a #GTree + * + * Gets the height of a #GTree. + * + * If the #GTree contains no nodes, the height is 0. + * If the #GTree contains only one root node the height is 1. + * If the root node has children the height is 2, etc. + * + * Returns: the height of @tree + */ +gint g_tree_height (GTree *tree) +{ + GTreeNode *node; + gint height; + + g_return_val_if_fail (tree != NULL, 0); + + if (!tree->root) + return 0; + + height = 0; + node = tree->root; + + while (1) + { + height += 1 + MAX(node->balance, 0); + + if (!node->left_child) + return height; + + node = node->left; + } +} + +/** + * g_tree_nnodes: + * @tree: a #GTree + * + * Gets the number of nodes in a #GTree. + * + * Returns: the number of nodes in @tree + */ +gint g_tree_nnodes (GTree *tree) +{ + g_return_val_if_fail (tree != NULL, 0); + + return tree->nnodes; +} + +static GTreeNode *g_tree_node_balance (GTreeNode *node) +{ + if (node->balance < -1) + { + if (node->left->balance > 0) + node->left = g_tree_node_rotate_left (node->left); + node = g_tree_node_rotate_right (node); + } + else if (node->balance > 1) + { + if (node->right->balance < 0) + node->right = g_tree_node_rotate_right (node->right); + node = g_tree_node_rotate_left (node); + } + + return node; +} + +static GTreeNode *g_tree_find_node (GTree *tree, gconstpointer key) +{ + GTreeNode *node; + gint cmp; + + node = tree->root; + if (!node) + return NULL; + + while (1) + { + cmp = tree->key_compare (key, node->key, tree->key_compare_data); + if (cmp == 0) + return node; + else if (cmp < 0) + { + if (!node->left_child) + return NULL; + + node = node->left; + } + else + { + if (!node->right_child) + return NULL; + + node = node->right; + } + } +} + +static gint g_tree_node_pre_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data) +{ + if ((*traverse_func) (node->key, node->value, data)) + return TRUE; + + if (node->left_child) + { + if (g_tree_node_pre_order (node->left, traverse_func, data)) + return TRUE; + } + + if (node->right_child) + { + if (g_tree_node_pre_order (node->right, traverse_func, data)) + return TRUE; + } + + return FALSE; +} + +static gint g_tree_node_in_order (GTreeNode *node, GTraverseFunc traverse_func, gpointer data) +{ + if (node->left_child) + { + if (g_tree_node_in_order (node->left, traverse_func, data)) + return TRUE; + } + + if ((*traverse_func) (node->key, node->value, data)) + return TRUE; + + if (node->right_child) + { + if (g_tree_node_in_order (node->right, traverse_func, data)) + return TRUE; + } + + return FALSE; +} + +static gint g_tree_node_post_order (GTreeNode *node, + GTraverseFunc traverse_func, + gpointer data) +{ + if (node->left_child) + { + if (g_tree_node_post_order (node->left, traverse_func, data)) + return TRUE; + } + + if (node->right_child) + { + if (g_tree_node_post_order (node->right, traverse_func, data)) + return TRUE; + } + + if ((*traverse_func) (node->key, node->value, data)) + return TRUE; + + return FALSE; +} + +static gpointer g_tree_node_search (GTreeNode *node, + GCompareFunc search_func, + gconstpointer data) +{ + gint dir; + + if (!node) + return NULL; + + while (1) + { + dir = (* search_func) (node->key, data); + if (dir == 0) + return node->value; + else if (dir < 0) + { + if (!node->left_child) + return NULL; + + node = node->left; + } + else + { + if (!node->right_child) + return NULL; + + node = node->right; + } + } +} + +static GTreeNode *g_tree_node_rotate_left (GTreeNode *node) +{ + GTreeNode *right; + gint a_bal; + gint b_bal; + + right = node->right; + + if (right->left_child) + node->right = right->left; + else + { + node->right_child = FALSE; + right->left_child = TRUE; + } + right->left = node; + + a_bal = node->balance; + b_bal = right->balance; + + if (b_bal <= 0) + { + if (a_bal >= 1) + right->balance = b_bal - 1; + else + right->balance = a_bal + b_bal - 2; + node->balance = a_bal - 1; + } + else + { + if (a_bal <= b_bal) + right->balance = a_bal - 2; + else + right->balance = b_bal - 1; + node->balance = a_bal - b_bal - 1; + } + + return right; +} + +static GTreeNode *g_tree_node_rotate_right (GTreeNode *node) +{ + GTreeNode *left; + gint a_bal; + gint b_bal; + + left = node->left; + + if (left->right_child) + node->left = left->right; + else + { + node->left_child = FALSE; + left->right_child = TRUE; + } + left->right = node; + + a_bal = node->balance; + b_bal = left->balance; + + if (b_bal <= 0) + { + if (b_bal > a_bal) + left->balance = b_bal + 1; + else + left->balance = a_bal + 2; + node->balance = a_bal - b_bal + 1; + } + else + { + if (a_bal <= -1) + left->balance = b_bal + 1; + else + left->balance = a_bal + b_bal + 2; + node->balance = a_bal + 1; + } + + return left; +} diff --git a/glib_compat/gtree.h b/glib_compat/gtree.h new file mode 100644 index 00000000..f62dbe08 --- /dev/null +++ b/glib_compat/gtree.h @@ -0,0 +1,55 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_TREE_H__ +#define __G_TREE_H__ + +typedef struct _GTree GTree; + +typedef gboolean (*GTraverseFunc) (gpointer key, gpointer value, gpointer data); + +/* Balanced binary trees + */ +GTree* g_tree_new (GCompareFunc key_compare_func); + +GTree* g_tree_new_full (GCompareDataFunc key_compare_func, + gpointer key_compare_data, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func); + +GTree* g_tree_ref (GTree *tree); + +void g_tree_destroy (GTree *tree); + +void g_tree_insert (GTree *tree, gpointer key, gpointer value); + +gboolean g_tree_remove (GTree *tree, gconstpointer key); + +gpointer g_tree_lookup (GTree *tree, gconstpointer key); + +void g_tree_foreach (GTree *tree, GTraverseFunc func, gpointer user_data); + +gint g_tree_nnodes (GTree *tree); + +#endif /* __G_TREE_H__ */ diff --git a/glib_compat/gtypes.h b/glib_compat/gtypes.h new file mode 100644 index 00000000..c17b3f66 --- /dev/null +++ b/glib_compat/gtypes.h @@ -0,0 +1,80 @@ +/* GLIB - Library of useful routines for C programming + * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Modified by the GLib Team and others 1997-2000. See the AUTHORS + * file for a list of people on the GLib Team. See the ChangeLog + * files for a list of changes. These files are distributed with + * GLib at ftp://ftp.gtk.org/pub/gtk/. + */ + +#ifndef __G_TYPES_H__ +#define __G_TYPES_H__ + +#include +#include +#include + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) + +/* typedefs for glib related types that may still be referenced */ +typedef void* gpointer; + +typedef const void *gconstpointer; + +typedef int gint; +typedef uint8_t guint8; +typedef int8_t gint8; +typedef uint16_t guint16; +typedef int16_t gint16; +typedef uint32_t guint32; +typedef int32_t gint32; +typedef uint64_t guint64; +typedef int64_t gint64; +typedef unsigned int guint; +typedef char gchar; +typedef int gboolean; +typedef unsigned long gulong; +typedef unsigned long gsize; + +typedef gint grefcount; + +typedef volatile gint gatomicrefcount; + +typedef void (*GDestroyNotify) (gpointer data); + +typedef gint (*GCompareFunc) (gconstpointer a, gconstpointer b); + +typedef gint (*GCompareDataFunc) (gconstpointer a, gconstpointer b, gpointer user_data); + +typedef guint (*GHashFunc) (gconstpointer key); + +typedef gboolean (*GEqualFunc) (gconstpointer a, gconstpointer b); + +typedef void (*GHFunc) (gpointer key, gpointer value, gpointer user_data); + +typedef gpointer (*GCopyFunc) (gconstpointer src, gpointer data); + +#endif /* __G_TYPES_H__ */ diff --git a/include/qemu.h b/include/qemu.h index 0d7b5dd2..337f4f8f 100644 --- a/include/qemu.h +++ b/include/qemu.h @@ -1,4 +1,5 @@ /* By Dang Hoang Vu , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ #ifndef UC_QEMU_H #define UC_QEMU_H @@ -13,25 +14,23 @@ struct uc_struct; #include "exec/memory.h" #include "qemu/thread.h" -#include "include/qom/cpu.h" +#include "hw/core/cpu.h" #include "vl.h" -// This two struct is originally from qemu/include/exec/cpu-all.h +// This struct is originally from qemu/include/exec/ramblock.h // Temporarily moved here since there is circular inclusion. -typedef struct RAMBlock { +struct RAMBlock { struct MemoryRegion *mr; uint8_t *host; ram_addr_t offset; - ram_addr_t length; + ram_addr_t used_length; + ram_addr_t max_length; uint32_t flags; - char idstr[256]; - /* Reads can take either the iothread or the ramlist lock. - * Writes must take both locks. - */ - QTAILQ_ENTRY(RAMBlock) next; - int fd; -} RAMBlock; + /* RCU-enabled, writes protected by the ramlist lock */ + QLIST_ENTRY(RAMBlock) next; + size_t page_size; +}; typedef struct { MemoryRegion *mr; @@ -40,12 +39,10 @@ typedef struct { hwaddr len; } BounceBuffer; +// This struct is originally from qemu/include/exec/ramlist.h typedef struct RAMList { - /* Protected by the iothread lock. */ - unsigned long *dirty_memory[DIRTY_MEMORY_NUM]; RAMBlock *mru_block; - QTAILQ_HEAD(, RAMBlock) blocks; - uint32_t version; + QLIST_HEAD(, RAMBlock) blocks; } RAMList; #endif diff --git a/include/uc_priv.h b/include/uc_priv.h index 79d815cb..8c564396 100644 --- a/include/uc_priv.h +++ b/include/uc_priv.h @@ -1,5 +1,7 @@ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + #ifndef UC_PRIV_H #define UC_PRIV_H @@ -14,16 +16,17 @@ // These are masks of supported modes for each cpu/arch. // They should be updated when changes are made to the uc_mode enum typedef. #define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS \ - |UC_MODE_ARM926|UC_MODE_ARM946|UC_MODE_ARM1176|UC_MODE_BIG_ENDIAN|UC_MODE_ARMBE8) + |UC_MODE_ARM926|UC_MODE_ARM946|UC_MODE_ARM1176|UC_MODE_BIG_ENDIAN) #define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN) #define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN) -#define UC_MODE_PPC_MASK (UC_MODE_PPC64|UC_MODE_BIG_ENDIAN) +#define UC_MODE_PPC_MASK (UC_MODE_PPC32|UC_MODE_PPC64|UC_MODE_BIG_ENDIAN) #define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN) #define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN) +#define UC_MODE_RISCV_MASK (UC_MODE_RISCV32|UC_MODE_RISCV64|UC_MODE_LITTLE_ENDIAN) #define ARR_SIZE(a) (sizeof(a)/sizeof(a[0])) -#define READ_QWORD(x) ((uint64)x) +#define READ_QWORD(x) ((uint64_t)x) #define READ_DWORD(x) (x & 0xffffffff) #define READ_WORD(x) (x & 0xffff) #define READ_BYTE_H(x) ((x & 0xffff) >> 8) @@ -34,20 +37,19 @@ #define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff)) -typedef struct ModuleEntry { - void (*init)(void); - QTAILQ_ENTRY(ModuleEntry) node; - module_init_type type; -} ModuleEntry; - -typedef QTAILQ_HEAD(, ModuleEntry) ModuleTypeList; - typedef uc_err (*query_t)(struct uc_struct *uc, uc_query_type type, size_t *result); // return 0 on success, -1 on failure typedef int (*reg_read_t)(struct uc_struct *uc, unsigned int *regs, void **vals, int count); typedef int (*reg_write_t)(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); +typedef int (*context_reg_read_t)(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +typedef int (*context_reg_write_t)(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +typedef struct { + context_reg_read_t context_reg_read; + context_reg_write_t context_reg_write; +} context_reg_rw_t; + typedef void (*reg_reset_t)(struct uc_struct *uc); typedef bool (*uc_write_mem_t)(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len); @@ -57,9 +59,7 @@ typedef bool (*uc_read_mem_t)(AddressSpace *as, hwaddr addr, uint8_t *buf, int l typedef void (*uc_args_void_t)(void*); typedef void (*uc_args_uc_t)(struct uc_struct*); -typedef int (*uc_args_int_uc_t)(struct uc_struct*); - -typedef bool (*uc_args_tcg_enable_t)(struct uc_struct*); +typedef void (*uc_args_int_uc_t)(struct uc_struct*); typedef void (*uc_args_uc_long_t)(struct uc_struct*, unsigned long); @@ -73,8 +73,12 @@ typedef void (*uc_mem_unmap_t)(struct uc_struct*, MemoryRegion *mr); typedef void (*uc_readonly_mem_t)(MemoryRegion *mr, bool readonly); +typedef int (*uc_cpus_init)(struct uc_struct *, const char *); + +typedef MemoryRegion* (*uc_memory_map_io_t)(struct uc_struct *uc, ram_addr_t begin, size_t size, uc_cb_mmio_read_t read_cb, uc_cb_mmio_write_t write_cb, void *user_data_read, void *user_data_write); + // which interrupt should make emulation stop? -typedef bool (*uc_args_int_t)(int intno); +typedef bool (*uc_args_int_t)(struct uc_struct *uc, int intno); // some architecture redirect virtual memory to physical memory like Mips typedef uint64_t (*uc_mem_redirect_t)(uint64_t address); @@ -82,6 +86,15 @@ typedef uint64_t (*uc_mem_redirect_t)(uint64_t address); // validate if Unicorn supports hooking a given instruction typedef bool(*uc_insn_hook_validate)(uint32_t insn_enum); +// init target page +typedef void (*uc_target_page_init)(struct uc_struct *); + +// soft float init +typedef void (*uc_softfloat_initialize)(void); + +// tcg flush softmmu tlb +typedef void (*uc_tcg_flush_tlb)(struct uc_struct *uc); + struct hook { int type; // UC_HOOK_* int insn; // instruction for HOOK_INSN @@ -93,8 +106,12 @@ struct hook { }; // hook list offsets +// +// The lowest 6 bits are used for hook type index while the others +// are used for hook flags. +// // mirrors the order of uc_hook_type from include/unicorn/unicorn.h -enum uc_hook_idx { +typedef enum uc_hook_idx { UC_HOOK_INTR_IDX, UC_HOOK_INSN_IDX, UC_HOOK_CODE_IDX, @@ -112,7 +129,16 @@ enum uc_hook_idx { UC_HOOK_INSN_INVALID_IDX, UC_HOOK_MAX, -}; +} uc_hook_idx; + +// The lowest 6 bits are used for hook type index. +#define UC_HOOK_IDX_MASK ((1<<6)-1) + +// hook flags +#define UC_HOOK_FLAG_NO_STOP (1 << 6) // Don't stop emulation in this uc_tracecode. + +// The rest of bits are reserved for hook flags. +#define UC_HOOK_FLAG_MASK (~(UC_HOOK_IDX_MASK)) #define HOOK_FOREACH_VAR_DECLARE \ struct list_item *cur @@ -145,11 +171,15 @@ static inline bool _hook_exists_bounded(struct list_item *cur, uint64_t addr) //relloc increment, KEEP THIS A POWER OF 2! #define MEM_BLOCK_INCR 32 +typedef struct TargetPageBits TargetPageBits; +typedef struct TCGContext TCGContext; + struct uc_struct { uc_arch arch; uc_mode mode; uc_err errnum; // qemu/cpu-exec.c - AddressSpace as; + AddressSpace address_space_memory; + AddressSpace address_space_io; query_t query; reg_read_t reg_read; reg_write_t reg_write; @@ -160,56 +190,55 @@ struct uc_struct { uc_args_void_t release; // release resource when uc_close() uc_args_uc_u64_t set_pc; // set PC for tracecode uc_args_int_t stop_interrupt; // check if the interrupt should stop emulation + uc_memory_map_io_t memory_map_io; uc_args_uc_t init_arch, cpu_exec_init_all; uc_args_int_uc_t vm_start; - uc_args_tcg_enable_t tcg_enabled; uc_args_uc_long_t tcg_exec_init; uc_args_uc_ram_size_t memory_map; uc_args_uc_ram_size_ptr_t memory_map_ptr; uc_mem_unmap_t memory_unmap; uc_readonly_mem_t readonly_mem; uc_mem_redirect_t mem_redirect; - // TODO: remove current_cpu, as it's a flag for something else ("cpu running"?) - CPUState *cpu, *current_cpu; + uc_cpus_init cpus_init; + uc_target_page_init target_page; + uc_softfloat_initialize softfloat_initialize; + uc_tcg_flush_tlb tcg_flush_tlb; + + /* only 1 cpu in unicorn, + do not need current_cpu to handle current running cpu. */ + CPUState *cpu; uc_insn_hook_validate insn_hook_validate; MemoryRegion *system_memory; // qemu/exec.c - MemoryRegion io_mem_rom; // qemu/exec.c - MemoryRegion io_mem_notdirty; // qemu/exec.c + MemoryRegion *system_io; // qemu/exec.c MemoryRegion io_mem_unassigned; // qemu/exec.c - MemoryRegion io_mem_watch; // qemu/exec.c RAMList ram_list; // qemu/exec.c + /* qemu/exec.c */ + unsigned int alloc_hint; + /* qemu/exec-vary.c */ + TargetPageBits *init_target_page; BounceBuffer bounce; // qemu/cpu-exec.c volatile sig_atomic_t exit_request; // qemu/cpu-exec.c - bool global_dirty_log; // qemu/memory.c + /* qemu/accel/tcg/cpu-exec-common.c */ + /* always be true after call tcg_exec_init(). */ + bool tcg_allowed; /* This is a multi-level map on the virtual address space. The bottom level has pointers to PageDesc. */ - void **l1_map; // qemu/translate-all.c + void **l1_map; // qemu/accel/tcg/translate-all.c size_t l1_map_size; + /* qemu/accel/tcg/translate-all.c */ + int v_l1_size; + int v_l1_shift; + int v_l2_levels; /* code generation context */ - void *tcg_ctx; // for "TCGContext tcg_ctx" in qemu/translate-all.c + TCGContext *tcg_ctx; /* memory.c */ - unsigned memory_region_transaction_depth; - bool memory_region_update_pending; - bool ioeventfd_update_pending; QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners; QTAILQ_HEAD(, AddressSpace) address_spaces; - MachineState *machine_state; - // qom/object.c - GHashTable *type_table; - Type type_interface; - Object *root; - Object *owner; - bool enumerating_types; - // util/module.c - ModuleTypeList init_type_list[MODULE_INIT_MAX]; - // hw/intc/apic_common.c - DeviceState *vapic; - int apic_no; - bool mmio_registered; - bool apic_report_tpr_access; + GHashTable *flat_views; + bool memory_region_update_pending; // linked lists containing hooks per type struct list hook[UC_HOOK_MAX]; @@ -221,8 +250,6 @@ struct uc_struct { size_t emu_counter; // current counter of uc_emu_start() size_t emu_count; // save counter of uc_emu_start() - uint64_t block_addr; // save the last block address we hooked - int size_recur_mem; // size for mem access when in a recursive call bool init_tcg; // already initialized local TCGv variables? @@ -239,19 +266,22 @@ struct uc_struct { uint64_t addr_end; // address where emulation stops (@end param of uc_emu_start()) int thumb; // thumb mode for ARM - int bswap_code; // For mixed endian mode - // full TCG cache leads to middle-block break in the last translation? - bool block_full; - int size_arg; // what tcg arg slot do we need to update with the size of the block? MemoryRegion **mapped_blocks; uint32_t mapped_block_count; uint32_t mapped_block_cache_index; void *qemu_thread_data; // to support cross compile to Windows (qemu-thread-win32.c) uint32_t target_page_size; uint32_t target_page_align; + uint64_t qemu_host_page_size; + uint64_t qemu_real_host_page_size; + int qemu_icache_linesize; + /* ARCH_REGS_STORAGE_SIZE */ + int cpu_context_size; uint64_t next_pc; // save next PC for some special cases bool hook_insert; // insert new hook at begin of the hook list (append by default) + bool first_tb; // is this the first Translation-Block ever generated since uc_emu_start()? struct list saved_contexts; // The contexts saved by this uc_struct. + bool no_exit_request; // Disable check_exit_request temporarily. A workaround to treat the IT block as a whole block. }; // Metadata stub for the variable-size cpu context used with uc_context_*() @@ -259,7 +289,9 @@ struct uc_struct { struct uc_context { size_t context_size; // size of the real internal context structure size_t jmp_env_size; // size of cpu->jmp_env - struct uc_struct* uc; // the uc_struct which creates this context + uc_mode mode; // the mode of this context (uc may be free-ed already) + uc_arch arch; // the arch of this context (uc may be free-ed already) + struct uc_struct *uc; // the uc_struct which creates this context char data[0]; // context + cpu->jmp_env }; diff --git a/include/unicorn/arm.h b/include/unicorn/arm.h index a0fd83e3..9ce7b0b3 100644 --- a/include/unicorn/arm.h +++ b/include/unicorn/arm.h @@ -137,6 +137,27 @@ typedef enum uc_arm_reg { UC_ARM_REG_MSP, UC_ARM_REG_PSP, UC_ARM_REG_CONTROL, + UC_ARM_REG_IAPSR, + UC_ARM_REG_EAPSR, + UC_ARM_REG_XPSR, + UC_ARM_REG_EPSR, + UC_ARM_REG_IEPSR, + UC_ARM_REG_PRIMASK, + UC_ARM_REG_BASEPRI, + UC_ARM_REG_BASEPRI_MAX, + UC_ARM_REG_FAULTMASK, + UC_ARM_REG_APSR_NZCVQ, + UC_ARM_REG_APSR_G, + UC_ARM_REG_APSR_NZCVQG, + UC_ARM_REG_IAPSR_NZCVQ, + UC_ARM_REG_IAPSR_G, + UC_ARM_REG_IAPSR_NZCVQG, + UC_ARM_REG_EAPSR_NZCVQ, + UC_ARM_REG_EAPSR_G, + UC_ARM_REG_EAPSR_NZCVQG, + UC_ARM_REG_XPSR_NZCVQ, + UC_ARM_REG_XPSR_G, + UC_ARM_REG_XPSR_NZCVQG, UC_ARM_REG_ENDING, // <-- mark the end of the list or registers //> alias registers diff --git a/include/unicorn/platform.h b/include/unicorn/platform.h index bc3a0ad8..4f53d4d4 100644 --- a/include/unicorn/platform.h +++ b/include/unicorn/platform.h @@ -64,23 +64,6 @@ typedef unsigned int uint32_t; typedef signed long long int64_t; typedef unsigned long long uint64_t; -typedef signed char int_fast8_t; -typedef int int_fast16_t; -typedef int int_fast32_t; -typedef long long int_fast64_t; -typedef unsigned char uint_fast8_t; -typedef unsigned int uint_fast16_t; -typedef unsigned int uint_fast32_t; -typedef unsigned long long uint_fast64_t; - -#if !defined(_W64) -#if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -#define _W64 __w64 -#else -#define _W64 -#endif -#endif - #ifndef _INTPTR_T_DEFINED #define _INTPTR_T_DEFINED #ifdef _WIN64 @@ -111,36 +94,7 @@ typedef _W64 unsigned int uintptr_t; #define UINT16_MAX 0xffffui16 #define UINT32_MAX 0xffffffffui32 #define UINT64_MAX 0xffffffffffffffffui64 - -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST16_MIN INT32_MIN -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MAX INT32_MAX -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT32_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -#ifdef _WIN64 -#define INTPTR_MIN INT64_MIN -#define INTPTR_MAX INT64_MAX -#define UINTPTR_MAX UINT64_MAX -#else /* _WIN64 */ -#define INTPTR_MIN INT32_MIN -#define INTPTR_MAX INT32_MAX -#define UINTPTR_MAX UINT32_MAX -#endif /* _WIN64 */ - #else // this system has stdint.h - -#if defined(_MSC_VER) && (_MSC_VER == MSC_VER_VS2010) -#define _INTPTR 2 -#endif - #include #endif // (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2010)) || defined(_KERNEL_MODE) @@ -200,6 +154,7 @@ typedef _W64 unsigned int uintptr_t; // sys/time.h compatibility #if defined(_MSC_VER) +#include #include #include #include diff --git a/include/unicorn/ppc.h b/include/unicorn/ppc.h new file mode 100644 index 00000000..9fded44c --- /dev/null +++ b/include/unicorn/ppc.h @@ -0,0 +1,62 @@ +/* Unicorn Engine */ +/* By Nguyen Anh Quynh , 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_PPC_H +#define UNICORN_PPC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> PPC registers +typedef enum uc_ppc_reg { + UC_PPC_REG_INVALID = 0, + //> General purpose registers + UC_PPC_REG_PC, + + UC_PPC_REG_0, + UC_PPC_REG_1, + UC_PPC_REG_2, + UC_PPC_REG_3, + UC_PPC_REG_4, + UC_PPC_REG_5, + UC_PPC_REG_6, + UC_PPC_REG_7, + UC_PPC_REG_8, + UC_PPC_REG_9, + UC_PPC_REG_10, + UC_PPC_REG_11, + UC_PPC_REG_12, + UC_PPC_REG_13, + UC_PPC_REG_14, + UC_PPC_REG_15, + UC_PPC_REG_16, + UC_PPC_REG_17, + UC_PPC_REG_18, + UC_PPC_REG_19, + UC_PPC_REG_20, + UC_PPC_REG_21, + UC_PPC_REG_22, + UC_PPC_REG_23, + UC_PPC_REG_24, + UC_PPC_REG_25, + UC_PPC_REG_26, + UC_PPC_REG_27, + UC_PPC_REG_28, + UC_PPC_REG_29, + UC_PPC_REG_30, + UC_PPC_REG_31, +} uc_ppc_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/unicorn/riscv.h b/include/unicorn/riscv.h new file mode 100644 index 00000000..42a26627 --- /dev/null +++ b/include/unicorn/riscv.h @@ -0,0 +1,167 @@ +/* Unicorn Engine */ +/* By Nguyen Anh Quynh , 2015-2020 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details + */ + +#ifndef UNICORN_RISCV_H +#define UNICORN_RISCV_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> RISCV registers +typedef enum uc_riscv_reg { + UC_RISCV_REG_INVALID = 0, + //> General purpose registers + UC_RISCV_REG_X0, + UC_RISCV_REG_X1, + UC_RISCV_REG_X2, + UC_RISCV_REG_X3, + UC_RISCV_REG_X4, + UC_RISCV_REG_X5, + UC_RISCV_REG_X6, + UC_RISCV_REG_X7, + UC_RISCV_REG_X8, + UC_RISCV_REG_X9, + UC_RISCV_REG_X10, + UC_RISCV_REG_X11, + UC_RISCV_REG_X12, + UC_RISCV_REG_X13, + UC_RISCV_REG_X14, + UC_RISCV_REG_X15, + UC_RISCV_REG_X16, + UC_RISCV_REG_X17, + UC_RISCV_REG_X18, + UC_RISCV_REG_X19, + UC_RISCV_REG_X20, + UC_RISCV_REG_X21, + UC_RISCV_REG_X22, + UC_RISCV_REG_X23, + UC_RISCV_REG_X24, + UC_RISCV_REG_X25, + UC_RISCV_REG_X26, + UC_RISCV_REG_X27, + UC_RISCV_REG_X28, + UC_RISCV_REG_X29, + UC_RISCV_REG_X30, + UC_RISCV_REG_X31, + + //> Floating-point registers + UC_RISCV_REG_F0, // "ft0" + UC_RISCV_REG_F1, // "ft1" + UC_RISCV_REG_F2, // "ft2" + UC_RISCV_REG_F3, // "ft3" + UC_RISCV_REG_F4, // "ft4" + UC_RISCV_REG_F5, // "ft5" + UC_RISCV_REG_F6, // "ft6" + UC_RISCV_REG_F7, // "ft7" + UC_RISCV_REG_F8, // "fs0" + UC_RISCV_REG_F9, // "fs1" + UC_RISCV_REG_F10, // "fa0" + UC_RISCV_REG_F11, // "fa1" + UC_RISCV_REG_F12, // "fa2" + UC_RISCV_REG_F13, // "fa3" + UC_RISCV_REG_F14, // "fa4" + UC_RISCV_REG_F15, // "fa5" + UC_RISCV_REG_F16, // "fa6" + UC_RISCV_REG_F17, // "fa7" + UC_RISCV_REG_F18, // "fs2" + UC_RISCV_REG_F19, // "fs3" + UC_RISCV_REG_F20, // "fs4" + UC_RISCV_REG_F21, // "fs5" + UC_RISCV_REG_F22, // "fs6" + UC_RISCV_REG_F23, // "fs7" + UC_RISCV_REG_F24, // "fs8" + UC_RISCV_REG_F25, // "fs9" + UC_RISCV_REG_F26, // "fs10" + UC_RISCV_REG_F27, // "fs11" + UC_RISCV_REG_F28, // "ft8" + UC_RISCV_REG_F29, // "ft9" + UC_RISCV_REG_F30, // "ft10" + UC_RISCV_REG_F31, // "ft11" + + UC_RISCV_REG_PC, // PC register + + UC_RISCV_REG_ENDING, // <-- mark the end of the list or registers + + //> Alias registers + UC_RISCV_REG_ZERO = UC_RISCV_REG_X0, // "zero" + UC_RISCV_REG_RA = UC_RISCV_REG_X1, // "ra" + UC_RISCV_REG_SP = UC_RISCV_REG_X2, // "sp" + UC_RISCV_REG_GP = UC_RISCV_REG_X3, // "gp" + UC_RISCV_REG_TP = UC_RISCV_REG_X4, // "tp" + UC_RISCV_REG_T0 = UC_RISCV_REG_X5, // "t0" + UC_RISCV_REG_T1 = UC_RISCV_REG_X6, // "t1" + UC_RISCV_REG_T2 = UC_RISCV_REG_X7, // "t2" + UC_RISCV_REG_S0 = UC_RISCV_REG_X8, // "s0" + UC_RISCV_REG_FP = UC_RISCV_REG_X8, // "fp" + UC_RISCV_REG_S1 = UC_RISCV_REG_X9, // "s1" + UC_RISCV_REG_A0 = UC_RISCV_REG_X10, // "a0" + UC_RISCV_REG_A1 = UC_RISCV_REG_X11, // "a1" + UC_RISCV_REG_A2 = UC_RISCV_REG_X12, // "a2" + UC_RISCV_REG_A3 = UC_RISCV_REG_X13, // "a3" + UC_RISCV_REG_A4 = UC_RISCV_REG_X14, // "a4" + UC_RISCV_REG_A5 = UC_RISCV_REG_X15, // "a5" + UC_RISCV_REG_A6 = UC_RISCV_REG_X16, // "a6" + UC_RISCV_REG_A7 = UC_RISCV_REG_X17, // "a7" + UC_RISCV_REG_S2 = UC_RISCV_REG_X18, // "s2" + UC_RISCV_REG_S3 = UC_RISCV_REG_X19, // "s3" + UC_RISCV_REG_S4 = UC_RISCV_REG_X20, // "s4" + UC_RISCV_REG_S5 = UC_RISCV_REG_X21, // "s5" + UC_RISCV_REG_S6 = UC_RISCV_REG_X22, // "s6" + UC_RISCV_REG_S7 = UC_RISCV_REG_X23, // "s7" + UC_RISCV_REG_S8 = UC_RISCV_REG_X24, // "s8" + UC_RISCV_REG_S9 = UC_RISCV_REG_X25, // "s9" + UC_RISCV_REG_S10 = UC_RISCV_REG_X26, // "s10" + UC_RISCV_REG_S11 = UC_RISCV_REG_X27, // "s11" + UC_RISCV_REG_T3 = UC_RISCV_REG_X28, // "t3" + UC_RISCV_REG_T4 = UC_RISCV_REG_X29, // "t4" + UC_RISCV_REG_T5 = UC_RISCV_REG_X30, // "t5" + UC_RISCV_REG_T6 = UC_RISCV_REG_X31, // "t6" + + UC_RISCV_REG_FT0 = UC_RISCV_REG_F0, // "ft0" + UC_RISCV_REG_FT1 = UC_RISCV_REG_F1, // "ft1" + UC_RISCV_REG_FT2 = UC_RISCV_REG_F2, // "ft2" + UC_RISCV_REG_FT3 = UC_RISCV_REG_F3, // "ft3" + UC_RISCV_REG_FT4 = UC_RISCV_REG_F4, // "ft4" + UC_RISCV_REG_FT5 = UC_RISCV_REG_F5, // "ft5" + UC_RISCV_REG_FT6 = UC_RISCV_REG_F6, // "ft6" + UC_RISCV_REG_FT7 = UC_RISCV_REG_F7, // "ft7" + UC_RISCV_REG_FS0 = UC_RISCV_REG_F8, // "fs0" + UC_RISCV_REG_FS1 = UC_RISCV_REG_F9, // "fs1" + + UC_RISCV_REG_FA0 = UC_RISCV_REG_F10, // "fa0" + UC_RISCV_REG_FA1 = UC_RISCV_REG_F11, // "fa1" + UC_RISCV_REG_FA2 = UC_RISCV_REG_F12, // "fa2" + UC_RISCV_REG_FA3 = UC_RISCV_REG_F13, // "fa3" + UC_RISCV_REG_FA4 = UC_RISCV_REG_F14, // "fa4" + UC_RISCV_REG_FA5 = UC_RISCV_REG_F15, // "fa5" + UC_RISCV_REG_FA6 = UC_RISCV_REG_F16, // "fa6" + UC_RISCV_REG_FA7 = UC_RISCV_REG_F17, // "fa7" + UC_RISCV_REG_FS2 = UC_RISCV_REG_F18, // "fs2" + UC_RISCV_REG_FS3 = UC_RISCV_REG_F19, // "fs3" + UC_RISCV_REG_FS4 = UC_RISCV_REG_F20, // "fs4" + UC_RISCV_REG_FS5 = UC_RISCV_REG_F21, // "fs5" + UC_RISCV_REG_FS6 = UC_RISCV_REG_F22, // "fs6" + UC_RISCV_REG_FS7 = UC_RISCV_REG_F23, // "fs7" + UC_RISCV_REG_FS8 = UC_RISCV_REG_F24, // "fs8" + UC_RISCV_REG_FS9 = UC_RISCV_REG_F25, // "fs9" + UC_RISCV_REG_FS10 = UC_RISCV_REG_F26, // "fs10" + UC_RISCV_REG_FS11 = UC_RISCV_REG_F27, // "fs11" + UC_RISCV_REG_FT8 = UC_RISCV_REG_F28, // "ft8" + UC_RISCV_REG_FT9 = UC_RISCV_REG_F29, // "ft9" + UC_RISCV_REG_FT10 = UC_RISCV_REG_F30, // "ft10" + UC_RISCV_REG_FT11 = UC_RISCV_REG_F31, // "ft11" +} uc_riscv_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/unicorn/unicorn.h b/include/unicorn/unicorn.h index 8b78aeab..d7567681 100644 --- a/include/unicorn/unicorn.h +++ b/include/unicorn/unicorn.h @@ -32,6 +32,8 @@ typedef size_t uc_hook; #include "arm64.h" #include "mips.h" #include "sparc.h" +#include "ppc.h" +#include "riscv.h" #ifdef __GNUC__ #define DEFAULT_VISIBILITY __attribute__((visibility("default"))) @@ -65,13 +67,13 @@ typedef size_t uc_hook; #endif // Unicorn API version -#define UC_API_MAJOR 1 +#define UC_API_MAJOR 2 #define UC_API_MINOR 0 // Unicorn package version #define UC_VERSION_MAJOR UC_API_MAJOR #define UC_VERSION_MINOR UC_API_MINOR -#define UC_VERSION_EXTRA 3 +#define UC_VERSION_EXTRA 0 /* @@ -92,9 +94,10 @@ typedef enum uc_arch { UC_ARCH_ARM64, // ARM-64, also called AArch64 UC_ARCH_MIPS, // Mips architecture UC_ARCH_X86, // X86 architecture (including x86 & x86-64) - UC_ARCH_PPC, // PowerPC architecture (currently unsupported) + UC_ARCH_PPC, // PowerPC architecture UC_ARCH_SPARC, // Sparc architecture UC_ARCH_M68K, // M68K architecture + UC_ARCH_RISCV, // RISCV architecture UC_ARCH_MAX, } uc_arch; @@ -106,7 +109,7 @@ typedef enum uc_mode { // arm / arm64 UC_MODE_ARM = 0, // ARM mode UC_MODE_THUMB = 1 << 4, // THUMB mode (including Thumb-2) - UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M series (currently unsupported) + UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M series UC_MODE_V8 = 1 << 6, // ARMv8 A32 encodings for ARM (currently unsupported) // arm (32bit) cpu types @@ -114,9 +117,6 @@ typedef enum uc_mode { UC_MODE_ARM946 = 1 << 8, // ARM946 CPU type UC_MODE_ARM1176 = 1 << 9, // ARM1176 CPU type - // ARM BE8 - UC_MODE_ARMBE8 = 1 << 10, // Big-endian data and Little-endian code - // mips UC_MODE_MICRO = 1 << 4, // MicroMips mode (currently unsupported) UC_MODE_MIPS3 = 1 << 5, // Mips III ISA (currently unsupported) @@ -130,7 +130,7 @@ typedef enum uc_mode { UC_MODE_64 = 1 << 3, // 64-bit mode // ppc - UC_MODE_PPC32 = 1 << 2, // 32-bit mode (currently unsupported) + UC_MODE_PPC32 = 1 << 2, // 32-bit mode UC_MODE_PPC64 = 1 << 3, // 64-bit mode (currently unsupported) UC_MODE_QPX = 1 << 4, // Quad Processing eXtensions mode (currently unsupported) @@ -139,6 +139,10 @@ typedef enum uc_mode { UC_MODE_SPARC64 = 1 << 3, // 64-bit mode UC_MODE_V9 = 1 << 4, // SparcV9 mode (currently unsupported) + // riscv + UC_MODE_RISCV32 = 1 << 2, // 32-bit mode + UC_MODE_RISCV64 = 1 << 3, // 64-bit mode + // m68k } uc_mode; @@ -214,6 +218,26 @@ typedef uint32_t (*uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void */ typedef void (*uc_cb_insn_out_t)(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); +/* + Callback function for MMIO read + + @offset: offset to the base address of the IO memory. + @size: data size to read + @user_data: user data passed to uc_mmio_map() +*/ +typedef uint64_t (*uc_cb_mmio_read_t)(uc_engine *uc, uint64_t offset, unsigned size, void *user_data); + +/* + Callback function for MMIO write + + @offset: offset to the base address of the IO memory. + @size: data size to write + @value: data value to be written + @user_data: user data passed to uc_mmio_map() +*/ +typedef void (*uc_cb_mmio_write_t)(uc_engine *uc, uint64_t offset, unsigned size, uint64_t value, void *user_data); + + // All type of memory accesses for UC_HOOK_MEM_* typedef enum uc_mem_type { UC_MEM_READ = 16, // Memory is read from @@ -303,13 +327,13 @@ typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, @user_data: user data passed to tracing APIs @return: return true to continue, or false to stop program (due to invalid memory). - NOTE: returning true to continue execution will only work if the accessed + NOTE: returning true to continue execution will only work if if the accessed memory is made accessible with the correct permissions during the hook. - + In the event of a UC_MEM_READ_UNMAPPED or UC_MEM_WRITE_UNMAPPED callback, the memory should be uc_mem_map()-ed with the correct permissions, and the instruction will then read or write to the address as it was supposed to. - + In the event of a UC_MEM_FETCH_UNMAPPED callback, the memory can be mapped in as executable, in which case execution will resume from the fetched address. The instruction pointer may be written to in order to change where execution resumes, @@ -416,7 +440,7 @@ UNICORN_EXPORT uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); /* - Report the last error number when some API function fails. + Report the last error number when some API function fail. Like glibc's errno, uc_errno might not retain its old value once accessed. @uc: handle returned by uc_open() @@ -528,12 +552,12 @@ uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); @uc: handle returned by uc_open() @begin: address where emulation starts - @until: address where emulation stops (i.e. when this address is hit) + @until: address where emulation stops (i.e when this address is hit) @timeout: duration to emulate the code (in microseconds). When this value is 0, we will emulate the code in infinite time, until the code is finished. @count: the number of instructions to be emulated. When this value is 0, we will emulate all the code available, until the code is finished. - + NOTE: The internal states of the engine is guranteed to be correct if and only if uc_emu_start returns without any errors or errors have been handled in the callbacks. @@ -562,12 +586,12 @@ uc_err uc_emu_stop(uc_engine *uc); @uc: handle returned by uc_open() @hh: hook handle returned from this registration. To be used in uc_hook_del() API - @type: hook type, refer to uc_hook_type enum + @type: hook type @callback: callback to be run when instruction is hit @user_data: user-defined data. This will be passed to callback function in its last argument @user_data - @begin: start address of the area where the callback is in effect (inclusive) - @end: end address of the area where the callback is in effect (inclusive) + @begin: start address of the area where the callback is effect (inclusive) + @end: end address of the area where the callback is effect (inclusive) NOTE 1: the callback is called only if related address is in range [@begin, @end] NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered @...: variable arguments (depending on @type) @@ -584,7 +608,7 @@ uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, Unregister (remove) a hook callback. This API removes the hook callback registered by uc_hook_add(). NOTE: this should be called only when you no longer want to trace. - After this, @hh is invalid, and no longer usable. + After this, @hh is invalid, and nolonger usable. @uc: handle returned by uc_open() @hh: handle returned by uc_hook_add() @@ -611,7 +635,7 @@ typedef enum uc_prot { @address: starting address of the new memory region to be mapped in. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the new memory region to be mapped in. - This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: Permissions for the newly mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @@ -630,12 +654,12 @@ uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); @address: starting address of the new memory region to be mapped in. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the new memory region to be mapped in. - This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: Permissions for the newly mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @ptr: pointer to host memory backing the newly mapped memory. This host memory is - expected to be of equal or larger size than provided, and be mapped with at + expected to be an equal or larger size than provided, and be mapped with at least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum @@ -644,6 +668,29 @@ uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); UNICORN_EXPORT uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); +/* + Map MMIO in for emulation. + This API adds a MMIO region that can be used by emulation. + + @uc: handle returned by uc_open() + @address: starting address of the new MMIO region to be mapped in. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the new MMIO region to be mapped in. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + @read_cb: function for handling reads from this MMIO region. + @user_data_read: user-defined data. This will be passed to @read_cb function in its + last argument @user_data + @write_cb: function for handling writes to this MMIO region. + @user_data_write: user-defined data. This will be passed to @write_cb function in its + last argument @user_data + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). + */ +UNICORN_EXPORT +uc_err uc_mmio_map(uc_engine *uc, uint64_t address, size_t size, + uc_cb_mmio_read_t read_cb, void *user_data_read, + uc_cb_mmio_write_t write_cb, void *user_data_write); + /* Unmap a region of emulation memory. This API deletes a memory mapping from the emulation memory space. @@ -652,7 +699,7 @@ uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t per @address: starting address of the memory region to be unmapped. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the memory region to be modified. - This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). @@ -668,7 +715,7 @@ uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); @address: starting address of the memory region to be modified. This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. @size: size of the memory region to be modified. - This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. @perms: New permissions for the mapped region. This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, or this will return with UC_ERR_ARG error. @@ -682,8 +729,8 @@ uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t per /* Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() This API allocates memory for @regions, and user must free this memory later - by uc_free() to avoid leaking memory. - NOTE: memory regions may be split by uc_mem_unmap() + by free() to avoid leaking memory. + NOTE: memory regions may be splitted by uc_mem_unmap() @uc: handle returned by uc_open() @regions: pointer to an array of uc_mem_region struct. This is allocated by @@ -703,9 +750,9 @@ uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); differing arches or modes. @uc: handle returned by uc_open() - @context: pointer to a uc_context*. This will be updated with the pointer to + @context: pointer to a uc_engine*. This will be updated with the pointer to the new context on successful return of this function. - Later, this allocated memory must be freed with uc_context_free(). + Later, this allocated memory must be freed with uc_free(). @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum for detailed error). @@ -716,7 +763,7 @@ uc_err uc_context_alloc(uc_engine *uc, uc_context **context); /* Free the memory allocated by uc_mem_regions. WARNING: After Unicorn 1.0.1rc5, the memory allocated by uc_context_alloc should - be freed by uc_context_free(). Calling uc_free() may still work, but the result + be free-ed by uc_context_free(). Calling uc_free() may still work, but the result is **undefined**. @mem: memory allocated by uc_mem_regions (returned in *regions). @@ -741,6 +788,60 @@ uc_err uc_free(void *mem); UNICORN_EXPORT uc_err uc_context_save(uc_engine *uc, uc_context *context); +/* + Write value to a register of a context. + + @ctx: handle returned by uc_context_alloc() + @regid: register ID that is to be modified. + @value: pointer to the value that will set to register @regid + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_reg_write(uc_context *ctx, int regid, const void *value); + +/* + Read register value from a context. + + @ctx: handle returned by uc_context_alloc() + @regid: register ID that is to be retrieved. + @value: pointer to a variable storing the register value. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_reg_read(uc_context *ctx, int regid, void *value); + +/* + Write multiple register values to registers of a context. + + @ctx: handle returned by uc_context_alloc() + @regs: array of register IDs to store + @value: pointer to array of register values + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_reg_write_batch(uc_context *ctx, int *regs, void *const *vals, int count); + +/* + Read multiple register values from a context. + + @ctx: handle returned by uc_context_alloc() + @regs: array of register IDs to retrieve + @value: pointer to array of values to hold registers + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_reg_read_batch(uc_context *ctx, int *regs, void **vals, int count); + /* Restore the current CPU context from a saved copy. This API should be used to roll the CPU context back to a previous diff --git a/include/unicorn/x86.h b/include/unicorn/x86.h index cd2c66db..17e51a18 100644 --- a/include/unicorn/x86.h +++ b/include/unicorn/x86.h @@ -39,7 +39,7 @@ typedef enum uc_x86_reg { UC_X86_REG_AH, UC_X86_REG_AL, UC_X86_REG_AX, UC_X86_REG_BH, UC_X86_REG_BL, UC_X86_REG_BP, UC_X86_REG_BPL, UC_X86_REG_BX, UC_X86_REG_CH, UC_X86_REG_CL, UC_X86_REG_CS, UC_X86_REG_CX, UC_X86_REG_DH, UC_X86_REG_DI, UC_X86_REG_DIL, - UC_X86_REG_DL, UC_X86_REG_DS, UC_X86_REG_DX, UC_X86_REG_EAX, UC_X86_REG_EBP, + UC_X86_REG_DL, UC_X86_REG_DS, UC_X86_REG_DX, UC_X86_REG_EAX, UC_X86_REG_EBP, UC_X86_REG_EBX, UC_X86_REG_ECX, UC_X86_REG_EDI, UC_X86_REG_EDX, UC_X86_REG_EFLAGS, UC_X86_REG_EIP, UC_X86_REG_EIZ, UC_X86_REG_ES, UC_X86_REG_ESI, UC_X86_REG_ESP, UC_X86_REG_FPSW, UC_X86_REG_FS, UC_X86_REG_GS, UC_X86_REG_IP, UC_X86_REG_RAX, @@ -91,6 +91,8 @@ typedef enum uc_x86_reg { UC_X86_REG_MXCSR, UC_X86_REG_FS_BASE, // Base regs for x86_64 UC_X86_REG_GS_BASE, + UC_X86_REG_FLAGS, + UC_X86_REG_RFLAGS, UC_X86_REG_ENDING // <-- mark the end of the list of registers } uc_x86_reg; diff --git a/make.sh b/make.sh deleted file mode 100755 index 3b876de0..00000000 --- a/make.sh +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/sh - -# Unicorn Engine -# By Nguyen Anh Quynh , 2015 - -usage() { - cat 1>&2 < C/C++ -> Advanced -> Forced Include File - so for x86-64 this is "the file "x86_64.h" which is a generated file. - - -:: Other things - -* The Qemu code for GNU/GCC seems to rely on __i386__ or __x86_64__ defined if - the host is 32bit or 64bit respectively. - So when building 32bit libs in msvc we define __i386__. - And when building 64bit libs in msvc we define __x86_64__. - -* There is a tcg-target.c for each target that is included into tcg.c. - This is done using "#include tcg-target.c" - It is NOT built separately as part of the *.c files for the project. - - -:: Info from makefiles - -This info is compiled here together to help with deciding on the build settings to use. -It may or may not be of use to anyone in the future once this all builds ok :) - -QEMU_INCLUDES=-I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/$(ARCH) -I. -I$(SRC_PATH) -I$(SRC_PATH)/include -QEMU_CFLAGS=-m32 -D__USE_MINGW_ANSI_STDIO=1 -DWIN32_LEAN_AND_MEAN -DWINVER=0x501 -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -Wstrict-prototypes -Wredundant-decls -Wall -Wundef -Wwrite-strings -Wmissing-prototypes -fno-strict-aliasing -fno-common -DUNICORN_HAS_X86 -DUNICORN_HAS_ARM -DUNICORN_HAS_M68K -DUNICORN_HAS_ARM64 -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL -DUNICORN_HAS_SPARC -fPIC -QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H -QEMU_CFLAGS+=-I$(SRC_PATH)/include -QEMU_CFLAGS+=-include x86_64.h - - includes --I$(SRC_PATH)/tcg --I$(SRC_PATH)/tcg/$(ARCH) --I. --I$(SRC_PATH) --I$(SRC_PATH)/include --I.. --I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) --I$(SRC_PATH)/include --include x86_64.h - - defines --D__USE_MINGW_ANSI_STDIO=1 --DWIN32_LEAN_AND_MEAN --DWINVER=0x501 --D_GNU_SOURCE --D_FILE_OFFSET_BITS=64 --D_LARGEFILE_SOURCE --DNEED_CPU_H --DUNICORN_HAS_X86 --DUNICORN_HAS_ARM --DUNICORN_HAS_M68K --DUNICORN_HAS_ARM64 --DUNICORN_HAS_MIPS --DUNICORN_HAS_MIPSEL --DUNICORN_HAS_MIPS64 --DUNICORN_HAS_MIPS64EL --DUNICORN_HAS_SPARC - - - qemu/config-host.mak - extra_cflags=-m32 -DUNICORN_HAS_X86 -DUNICORN_HAS_ARM -DUNICORN_HAS_M68K -DUNICORN_HAS_ARM64 -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL -DUNICORN_HAS_SPARC -fPIC - extra_ldflags= - libs_softmmu= - ARCH=i386 - CONFIG_WIN32=y - CONFIG_FILEVERSION=2,2,1,0 - CONFIG_PRODUCTVERSION=2,2,1,0 - VERSION=2.2.1 - PKGVERSION= - SRC_PATH=/f/GitHub/unicorn/qemu - TARGET_DIRS=x86_64-softmmu arm-softmmu m68k-softmmu aarch64-softmmu mips-softmmu mipsel-softmmu mips64-softmmu mips64el-softmmu sparc-softmmu sparc64-softmmu - GLIB_CFLAGS=-pthread -mms-bitfields -IC:/msys64/mingw32/include/glib-2.0 -IC:/msys64/mingw32/lib/glib-2.0/include - CONFIG_ZERO_MALLOC=y - CONFIG_CPUID_H=y - CONFIG_THREAD_SETNAME_BYTHREAD=y - CONFIG_PTHREAD_SETNAME_NP=y - CFLAGS=-pthread -mms-bitfields -IC:/msys64/mingw32/include/glib-2.0 -IC:/msys64/mingw32/lib/glib-2.0/include -g - - QEMU_CFLAGS=-m32 -D__USE_MINGW_ANSI_STDIO=1 -DWIN32_LEAN_AND_MEAN -DWINVER=0x501 -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -Wstrict-prototypes -Wredundant-decls -Wall -Wundef -Wwrite-strings -Wmissing-prototypes -fno-strict-aliasing -fno-common -DUNICORN_HAS_X86 -DUNICORN_HAS_ARM -DUNICORN_HAS_M68K -DUNICORN_HAS_ARM64 -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL -DUNICORN_HAS_SPARC -fPIC - QEMU_INCLUDES=-I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/$(ARCH) -I. -I$(SRC_PATH) -I$(SRC_PATH)/include - LDFLAGS=-Wl,--nxcompat -Wl,--no-seh -Wl,--dynamicbase -Wl,--warn-common -m32 -g - LIBS+=-LC:/msys64/mingw32/lib -lgthread-2.0 -pthread -lglib-2.0 -lintl -lwinmm -lws2_32 -liphlpapi -lz - - - qemu/x86_64-softmmu/Makefile - QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H - QEMU_CFLAGS+=-I$(SRC_PATH)/include - - - qemu/x86_64-softmmu/config-target.mak - TARGET_X86_64=y - TARGET_NAME=x86_64 - TARGET_BASE_ARCH=i386 - TARGET_ABI_DIR=x86_64 - CONFIG_SOFTMMU=y - LDFLAGS+= - QEMU_CFLAGS+= - QEMU_CFLAGS+=-include x86_64.h - - - qemu/x86_64-softmmu/config-devices.mak - CONFIG_VGA=y - CONFIG_QXL=$(CONFIG_SPICE) - CONFIG_VGA_PCI=y - CONFIG_VGA_ISA=y - CONFIG_VGA_CIRRUS=y - CONFIG_VMWARE_VGA=y - CONFIG_VMMOUSE=y - CONFIG_SERIAL=y - CONFIG_PARALLEL=y - CONFIG_I8254=y - CONFIG_PCSPK=y - CONFIG_PCKBD=y - CONFIG_FDC=y - CONFIG_ACPI=y - CONFIG_APM=y - CONFIG_I8257=y - CONFIG_IDE_ISA=y - CONFIG_IDE_PIIX=y - CONFIG_NE2000_ISA=y - CONFIG_PIIX_PCI=y - CONFIG_HPET=y - CONFIG_APPLESMC=y - CONFIG_I8259=y - CONFIG_PFLASH_CFI01=y - CONFIG_TPM_TIS=$(CONFIG_TPM) - CONFIG_PCI_HOTPLUG_OLD=y - CONFIG_MC146818RTC=y - CONFIG_PAM=y - CONFIG_PCI_PIIX=y - CONFIG_WDT_IB700=y - CONFIG_XEN_I386=$(CONFIG_XEN) - CONFIG_ISA_DEBUG=y - CONFIG_ISA_TESTDEV=y - CONFIG_VMPORT=y - CONFIG_SGA=y - CONFIG_LPC_ICH9=y - CONFIG_PCI_Q35=y - CONFIG_APIC=y - CONFIG_IOAPIC=y - CONFIG_ICC_BUS=y - CONFIG_PVPANIC=y - CONFIG_MEM_HOTPLUG=y - CONFIG_PCI=y - CONFIG_VIRTIO_PCI=y - CONFIG_VIRTIO=y - CONFIG_USB_UHCI=y - CONFIG_USB_OHCI=y - CONFIG_USB_EHCI=y - CONFIG_USB_XHCI=y - CONFIG_NE2000_PCI=y - CONFIG_EEPRO100_PCI=y - CONFIG_PCNET_PCI=y - CONFIG_PCNET_COMMON=y - CONFIG_AC97=y - CONFIG_HDA=y - CONFIG_ES1370=y - CONFIG_LSI_SCSI_PCI=y - CONFIG_VMW_PVSCSI_SCSI_PCI=y - CONFIG_MEGASAS_SCSI_PCI=y - CONFIG_RTL8139_PCI=y - CONFIG_E1000_PCI=y - CONFIG_VMXNET3_PCI=y - CONFIG_IDE_CORE=y - CONFIG_IDE_QDEV=y - CONFIG_IDE_PCI=y - CONFIG_AHCI=y - CONFIG_ESP=y - CONFIG_ESP_PCI=y - CONFIG_SERIAL=y - CONFIG_SERIAL_PCI=y - CONFIG_IPACK=y - CONFIG_WDT_IB6300ESB=y - CONFIG_PCI_TESTDEV=y - CONFIG_NVME_PCI=y - CONFIG_SB16=y - CONFIG_ADLIB=y - CONFIG_GUS=y - CONFIG_CS4231A=y - CONFIG_USB_TABLET_WACOM=y - CONFIG_USB_STORAGE_BOT=y - CONFIG_USB_STORAGE_UAS=y - CONFIG_USB_STORAGE_MTP=y - CONFIG_USB_SMARTCARD=y - CONFIG_USB_AUDIO=y - CONFIG_USB_SERIAL=y - CONFIG_USB_NETWORK=y - CONFIG_USB_BLUETOOTH=y - - - diff --git a/msvc/unicorn/aarch64-softmmu/config-target.h b/msvc/aarch64-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/aarch64-softmmu/config-target.h rename to msvc/aarch64-softmmu/config-target.h diff --git a/msvc/unicorn/aarch64eb-softmmu/config-target.h b/msvc/aarch64eb-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/aarch64eb-softmmu/config-target.h rename to msvc/aarch64eb-softmmu/config-target.h diff --git a/msvc/unicorn/arm-softmmu/config-target.h b/msvc/arm-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/arm-softmmu/config-target.h rename to msvc/arm-softmmu/config-target.h diff --git a/msvc/unicorn/armeb-softmmu/config-target.h b/msvc/armeb-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/armeb-softmmu/config-target.h rename to msvc/armeb-softmmu/config-target.h diff --git a/msvc/unicorn/config-host.h b/msvc/config-host.h similarity index 50% rename from msvc/unicorn/config-host.h rename to msvc/config-host.h index 15adc1d0..e656aef9 100644 --- a/msvc/unicorn/config-host.h +++ b/msvc/config-host.h @@ -1,6 +1,9 @@ /* Automatically generated by create_config - do not modify */ #define HOST_I386 1 #define CONFIG_WIN32 1 -#define CONFIG_FILEVERSION 2,2,1,0 -#define CONFIG_PRODUCTVERSION 2,2,1,0 +#define CONFIG_TCG 1 #define CONFIG_CPUID_H 1 +// #define CONFIG_INT128 1 +#define CONFIG_CMPXCHG128 1 +#define CONFIG_ATOMIC64 1 +#define CONFIG_PLUGIN 1 diff --git a/msvc/unicorn/m68k-softmmu/config-target.h b/msvc/m68k-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/m68k-softmmu/config-target.h rename to msvc/m68k-softmmu/config-target.h diff --git a/msvc/unicorn/mips-softmmu/config-target.h b/msvc/mips-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/mips-softmmu/config-target.h rename to msvc/mips-softmmu/config-target.h diff --git a/msvc/unicorn/mips64-softmmu/config-target.h b/msvc/mips64-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/mips64-softmmu/config-target.h rename to msvc/mips64-softmmu/config-target.h diff --git a/msvc/unicorn/mips64el-softmmu/config-target.h b/msvc/mips64el-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/mips64el-softmmu/config-target.h rename to msvc/mips64el-softmmu/config-target.h diff --git a/msvc/unicorn/mipsel-softmmu/config-target.h b/msvc/mipsel-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/mipsel-softmmu/config-target.h rename to msvc/mipsel-softmmu/config-target.h diff --git a/msvc/ppc-softmmu/config-target.h b/msvc/ppc-softmmu/config-target.h new file mode 100644 index 00000000..664ea9e5 --- /dev/null +++ b/msvc/ppc-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_PPC 1 +#define TARGET_NAME "ppc" +#define TARGET_PPC 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/msvc/ppc64-softmmu/config-target.h b/msvc/ppc64-softmmu/config-target.h new file mode 100644 index 00000000..8a9550bc --- /dev/null +++ b/msvc/ppc64-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_PPC64 1 +#define TARGET_NAME "ppc64" +#define TARGET_PPC 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/msvc/riscv32-softmmu/config-target.h b/msvc/riscv32-softmmu/config-target.h new file mode 100644 index 00000000..2b3ebb5e --- /dev/null +++ b/msvc/riscv32-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_RISCV32 1 +#define TARGET_NAME "riscv32" +#define TARGET_RISCV 1 +#define CONFIG_SOFTMMU 1 +#define TARGET_SUPPORTS_MTTCG 1 diff --git a/msvc/riscv64-softmmu/config-target.h b/msvc/riscv64-softmmu/config-target.h new file mode 100644 index 00000000..49c85b62 --- /dev/null +++ b/msvc/riscv64-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_RISCV64 1 +#define TARGET_NAME "riscv64" +#define TARGET_RISCV 1 +#define CONFIG_SOFTMMU 1 +#define TARGET_SUPPORTS_MTTCG 1 diff --git a/msvc/samples/mem_apis/mem_apis.vcxproj b/msvc/samples/mem_apis/mem_apis.vcxproj deleted file mode 100644 index 61000dc9..00000000 --- a/msvc/samples/mem_apis/mem_apis.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9D588288-5A28-4AB3-96EA-442CAA508F8E} - Win32Proj - mem_apis - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/mem_apis/mem_apis.vcxproj.filters b/msvc/samples/mem_apis/mem_apis.vcxproj.filters deleted file mode 100644 index 570aef12..00000000 --- a/msvc/samples/mem_apis/mem_apis.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_arm/sample_arm.vcxproj b/msvc/samples/sample_arm/sample_arm.vcxproj deleted file mode 100644 index bf859c78..00000000 --- a/msvc/samples/sample_arm/sample_arm.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9F32C692-9106-43AF-A291-779A2D8BE096} - Win32Proj - sample_arm - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_arm/sample_arm.vcxproj.filters b/msvc/samples/sample_arm/sample_arm.vcxproj.filters deleted file mode 100644 index 76c03db8..00000000 --- a/msvc/samples/sample_arm/sample_arm.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_arm64/sample_arm64.vcxproj b/msvc/samples/sample_arm64/sample_arm64.vcxproj deleted file mode 100644 index f80627ba..00000000 --- a/msvc/samples/sample_arm64/sample_arm64.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B} - Win32Proj - sample_arm64 - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_arm64/sample_arm64.vcxproj.filters b/msvc/samples/sample_arm64/sample_arm64.vcxproj.filters deleted file mode 100644 index 1be43ac5..00000000 --- a/msvc/samples/sample_arm64/sample_arm64.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj b/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj deleted file mode 100644 index 6bbae8bd..00000000 --- a/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124} - Win32Proj - sample_arm64eb - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj.filters b/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj.filters deleted file mode 100644 index a5cdd846..00000000 --- a/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_armeb/sample_armeb.vcxproj b/msvc/samples/sample_armeb/sample_armeb.vcxproj deleted file mode 100644 index 108b369b..00000000 --- a/msvc/samples/sample_armeb/sample_armeb.vcxproj +++ /dev/null @@ -1,173 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {1945F27B-ABB3-47F9-9268-A42F73C8B992} - Win32Proj - sample_armeb - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - MultiThreadedDebug - false - ../../../include - - - Console - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ns) - MultiThreadedDebug - false - ../../../include - - - Console - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - MultiThreaded - false - ../../../include - - - Console - true - true - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - MultiThreaded - false - ../../../include - - - Console - true - true - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_armeb/sample_armeb.vcxproj.filters b/msvc/samples/sample_armeb/sample_armeb.vcxproj.filters deleted file mode 100644 index bdd745eb..00000000 --- a/msvc/samples/sample_armeb/sample_armeb.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj b/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj deleted file mode 100644 index af3bf85b..00000000 --- a/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {7AA02EDF-D797-494B-929C-F628F4E4EA62} - Win32Proj - sample_batch_reg - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj.filters b/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj.filters deleted file mode 100644 index 098f8333..00000000 --- a/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_m68k/sample_m68k.vcxproj b/msvc/samples/sample_m68k/sample_m68k.vcxproj deleted file mode 100644 index 572c3243..00000000 --- a/msvc/samples/sample_m68k/sample_m68k.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - {11727C54-463F-472A-88AF-6C3D6071BF0B} - Win32Proj - sample_m68k - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_m68k/sample_m68k.vcxproj.filters b/msvc/samples/sample_m68k/sample_m68k.vcxproj.filters deleted file mode 100644 index 033b3c0e..00000000 --- a/msvc/samples/sample_m68k/sample_m68k.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_mips/sample_mips.vcxproj b/msvc/samples/sample_mips/sample_mips.vcxproj deleted file mode 100644 index 7ec2ac03..00000000 --- a/msvc/samples/sample_mips/sample_mips.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD} - Win32Proj - sample_mips - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_mips/sample_mips.vcxproj.filters b/msvc/samples/sample_mips/sample_mips.vcxproj.filters deleted file mode 100644 index 95514576..00000000 --- a/msvc/samples/sample_mips/sample_mips.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_sparc/sample_sparc.vcxproj b/msvc/samples/sample_sparc/sample_sparc.vcxproj deleted file mode 100644 index f94937d9..00000000 --- a/msvc/samples/sample_sparc/sample_sparc.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {A25CA34D-2F64-442B-A5D3-B13CB56C9957} - Win32Proj - sample_sparc - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_sparc/sample_sparc.vcxproj.filters b/msvc/samples/sample_sparc/sample_sparc.vcxproj.filters deleted file mode 100644 index 306de23f..00000000 --- a/msvc/samples/sample_sparc/sample_sparc.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_x86/sample_x86.vcxproj b/msvc/samples/sample_x86/sample_x86.vcxproj deleted file mode 100644 index 30e3cc0b..00000000 --- a/msvc/samples/sample_x86/sample_x86.vcxproj +++ /dev/null @@ -1,192 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {F8053D66-8267-433A-BF2C-E07E2298C338} - Win32Proj - sample_x86 - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - $(SolutionDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - MultiThreadedDebug - ../../../include - false - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__x86_64__ - MultiThreadedDebug - ../../../include - false - ProgramDatabase - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - MultiThreaded - ../../../include - false - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__x86_64__ - MultiThreaded - ../../../include - false - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_x86/sample_x86.vcxproj.filters b/msvc/samples/sample_x86/sample_x86.vcxproj.filters deleted file mode 100644 index dd731b20..00000000 --- a/msvc/samples/sample_x86/sample_x86.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj b/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj deleted file mode 100644 index 3649777e..00000000 --- a/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9D96D09A-DE17-4011-9247-F0009E8D6DB5} - Win32Proj - sample_x86_32_gdt_and_seg_regs - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - - - \ No newline at end of file diff --git a/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj.filters b/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj.filters deleted file mode 100644 index 960616db..00000000 --- a/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/samples/shellcode/shellcode.vcxproj b/msvc/samples/shellcode/shellcode.vcxproj deleted file mode 100644 index a456ac16..00000000 --- a/msvc/samples/shellcode/shellcode.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - {F113B460-4B21-4014-9A15-D472FAA9E3F9} - Win32Proj - shellcode - - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - true - $(DefaultPlatformToolset) - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - Application - false - $(DefaultPlatformToolset) - true - MultiByte - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ - ../../../include - false - MultiThreaded - None - - - Console - true - true - true - $(SolutionDir)$(Platform)\$(Configuration)\ - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib - - - - - - \ No newline at end of file diff --git a/msvc/samples/shellcode/shellcode.vcxproj.filters b/msvc/samples/shellcode/shellcode.vcxproj.filters deleted file mode 100644 index 1f571b64..00000000 --- a/msvc/samples/shellcode/shellcode.vcxproj.filters +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/sparc-softmmu/config-target.h b/msvc/sparc-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/sparc-softmmu/config-target.h rename to msvc/sparc-softmmu/config-target.h diff --git a/msvc/unicorn/sparc64-softmmu/config-target.h b/msvc/sparc64-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/sparc64-softmmu/config-target.h rename to msvc/sparc64-softmmu/config-target.h diff --git a/msvc/unicorn.sln b/msvc/unicorn.sln deleted file mode 100644 index 03da7754..00000000 --- a/msvc/unicorn.sln +++ /dev/null @@ -1,370 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2012 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "unicorn", "unicorn\unicorn\unicorn.vcxproj", "{ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}" - ProjectSection(ProjectDependencies) = postProject - {740F3007-7BF0-4C0C-8FA5-2587C794EF31} = {740F3007-7BF0-4C0C-8FA5-2587C794EF31} - {006A7908-ABF3-4D18-BC35-0A29E39B95F9} = {006A7908-ABF3-4D18-BC35-0A29E39B95F9} - {63050112-E486-4396-B5E4-303C3BC12D39} = {63050112-E486-4396-B5E4-303C3BC12D39} - {8804AD29-E398-480C-AC0F-98EC1B7A51CB} = {8804AD29-E398-480C-AC0F-98EC1B7A51CB} - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} = {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} - {2C5AD347-6E34-463B-8289-00578E43B255} = {2C5AD347-6E34-463B-8289-00578E43B255} - {4A9F9353-DB63-460A-BB1C-9CB519DFD414} = {4A9F9353-DB63-460A-BB1C-9CB519DFD414} - {698C2D54-475C-446F-B879-F629BBEF75FE} = {698C2D54-475C-446F-B879-F629BBEF75FE} - {17077E86-AE7C-41AF-86ED-2BAC03B019BC} = {17077E86-AE7C-41AF-86ED-2BAC03B019BC} - {4478909E-6983-425C-9D9F-558CF258E61E} = {4478909E-6983-425C-9D9F-558CF258E61E} - {340D86A5-E53C-490B-880A-8EB1F5BDE947} = {340D86A5-E53C-490B-880A-8EB1F5BDE947} - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} = {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "unicorn_static", "unicorn\unicorn_static\unicorn_static.vcxproj", "{B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}" - ProjectSection(ProjectDependencies) = postProject - {740F3007-7BF0-4C0C-8FA5-2587C794EF31} = {740F3007-7BF0-4C0C-8FA5-2587C794EF31} - {006A7908-ABF3-4D18-BC35-0A29E39B95F9} = {006A7908-ABF3-4D18-BC35-0A29E39B95F9} - {63050112-E486-4396-B5E4-303C3BC12D39} = {63050112-E486-4396-B5E4-303C3BC12D39} - {8804AD29-E398-480C-AC0F-98EC1B7A51CB} = {8804AD29-E398-480C-AC0F-98EC1B7A51CB} - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} = {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} - {2C5AD347-6E34-463B-8289-00578E43B255} = {2C5AD347-6E34-463B-8289-00578E43B255} - {4A9F9353-DB63-460A-BB1C-9CB519DFD414} = {4A9F9353-DB63-460A-BB1C-9CB519DFD414} - {698C2D54-475C-446F-B879-F629BBEF75FE} = {698C2D54-475C-446F-B879-F629BBEF75FE} - {17077E86-AE7C-41AF-86ED-2BAC03B019BC} = {17077E86-AE7C-41AF-86ED-2BAC03B019BC} - {4478909E-6983-425C-9D9F-558CF258E61E} = {4478909E-6983-425C-9D9F-558CF258E61E} - {340D86A5-E53C-490B-880A-8EB1F5BDE947} = {340D86A5-E53C-490B-880A-8EB1F5BDE947} - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} = {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} - EndProjectSection -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "softmmu", "softmmu", "{857A09AF-FE20-461C-B66F-D779422AD46B}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "aarch64-softmmu", "unicorn\aarch64-softmmu\aarch64-softmmu.vcxproj", "{2A7F483F-CD19-4F84-BBDA-B6A1865E2773}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "arm-softmmu", "unicorn\arm-softmmu\arm-softmmu.vcxproj", "{F67EB1EA-DCFA-4758-A2AA-4B570BA78036}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "armeb-softmmu", "unicorn\armeb-softmmu\armeb-softmmu.vcxproj", "{740F3007-7BF0-4C0C-8FA5-2587C794EF31}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "m68k-softmmu", "unicorn\m68k-softmmu\m68k-softmmu.vcxproj", "{2C5AD347-6E34-463B-8289-00578E43B255}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mips-softmmu", "unicorn\mips-softmmu\mips-softmmu.vcxproj", "{63050112-E486-4396-B5E4-303C3BC12D39}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mips64-softmmu", "unicorn\mips64-softmmu\mips64-softmmu.vcxproj", "{4A9F9353-DB63-460A-BB1C-9CB519DFD414}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mips64el-softmmu", "unicorn\mips64el-softmmu\mips64el-softmmu.vcxproj", "{4478909E-6983-425C-9D9F-558CF258E61E}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mipsel-softmmu", "unicorn\mipsel-softmmu\mipsel-softmmu.vcxproj", "{006A7908-ABF3-4D18-BC35-0A29E39B95F9}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sparc-softmmu", "unicorn\sparc-softmmu\sparc-softmmu.vcxproj", "{698C2D54-475C-446F-B879-F629BBEF75FE}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sparc64-softmmu", "unicorn\sparc64-softmmu\sparc64-softmmu.vcxproj", "{8804AD29-E398-480C-AC0F-98EC1B7A51CB}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "x86_64-softmmu", "unicorn\x86_64-softmmu\x86_64-softmmu.vcxproj", "{17077E86-AE7C-41AF-86ED-2BAC03B019BC}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "samples", "samples", "{F8E85E25-4D67-4A6B-A976-C920790B8798}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mem_apis", "samples\mem_apis\mem_apis.vcxproj", "{9D588288-5A28-4AB3-96EA-442CAA508F8E}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_arm", "samples\sample_arm\sample_arm.vcxproj", "{9F32C692-9106-43AF-A291-779A2D8BE096}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_arm64", "samples\sample_arm64\sample_arm64.vcxproj", "{04DC0E3A-F247-45C2-AE27-8DE7493AA43B}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_batch_reg", "samples\sample_batch_reg\sample_batch_reg.vcxproj", "{7AA02EDF-D797-494B-929C-F628F4E4EA62}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_m68k", "samples\sample_m68k\sample_m68k.vcxproj", "{11727C54-463F-472A-88AF-6C3D6071BF0B}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_mips", "samples\sample_mips\sample_mips.vcxproj", "{E34ECD90-3977-4A4B-9641-4D7F1766E9FD}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_sparc", "samples\sample_sparc\sample_sparc.vcxproj", "{A25CA34D-2F64-442B-A5D3-B13CB56C9957}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_x86", "samples\sample_x86\sample_x86.vcxproj", "{F8053D66-8267-433A-BF2C-E07E2298C338}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_x86_32_gdt_and_seg_regs", "samples\sample_x86_32_gdt_and_seg_regs\sample_x86_32_gdt_and_seg_regs.vcxproj", "{9D96D09A-DE17-4011-9247-F0009E8D6DB5}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "shellcode", "samples\shellcode\shellcode.vcxproj", "{F113B460-4B21-4014-9A15-D472FAA9E3F9}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_armeb", "samples\sample_armeb\sample_armeb.vcxproj", "{1945F27B-ABB3-47F9-9268-A42F73C8B992}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "aarch64eb-softmmu", "unicorn\aarch64eb-softmmu\aarch64eb-softmmu.vcxproj", "{340D86A5-E53C-490B-880A-8EB1F5BDE947}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_arm64eb", "samples\sample_arm64eb\sample_arm64eb.vcxproj", "{1A42A5E3-82A7-4EE4-B7D2-8265B147F124}" - ProjectSection(ProjectDependencies) = postProject - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - EndProjectSection -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|Win32.ActiveCfg = Debug|Win32 - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|Win32.Build.0 = Debug|Win32 - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|x64.ActiveCfg = Debug|x64 - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|x64.Build.0 = Debug|x64 - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|Win32.ActiveCfg = Release|Win32 - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|Win32.Build.0 = Release|Win32 - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|x64.ActiveCfg = Release|x64 - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|x64.Build.0 = Release|x64 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|Win32.ActiveCfg = Debug|Win32 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|Win32.Build.0 = Debug|Win32 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|x64.ActiveCfg = Debug|x64 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|x64.Build.0 = Debug|x64 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|Win32.ActiveCfg = Release|Win32 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|Win32.Build.0 = Release|Win32 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|x64.ActiveCfg = Release|x64 - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|x64.Build.0 = Release|x64 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|Win32.ActiveCfg = Debug|Win32 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|Win32.Build.0 = Debug|Win32 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|x64.ActiveCfg = Debug|x64 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|x64.Build.0 = Debug|x64 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|Win32.ActiveCfg = Release|Win32 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|Win32.Build.0 = Release|Win32 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|x64.ActiveCfg = Release|x64 - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|x64.Build.0 = Release|x64 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|Win32.ActiveCfg = Debug|Win32 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|Win32.Build.0 = Debug|Win32 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|x64.ActiveCfg = Debug|x64 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|x64.Build.0 = Debug|x64 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|Win32.ActiveCfg = Release|Win32 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|Win32.Build.0 = Release|Win32 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|x64.ActiveCfg = Release|x64 - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|x64.Build.0 = Release|x64 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|Win32.ActiveCfg = Debug|Win32 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|Win32.Build.0 = Debug|Win32 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|x64.ActiveCfg = Debug|x64 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|x64.Build.0 = Debug|x64 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|Win32.ActiveCfg = Release|Win32 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|Win32.Build.0 = Release|Win32 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|x64.ActiveCfg = Release|x64 - {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|x64.Build.0 = Release|x64 - {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|Win32.ActiveCfg = Debug|Win32 - {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|Win32.Build.0 = Debug|Win32 - {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|x64.ActiveCfg = Debug|x64 - {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|x64.Build.0 = Debug|x64 - {2C5AD347-6E34-463B-8289-00578E43B255}.Release|Win32.ActiveCfg = Release|Win32 - {2C5AD347-6E34-463B-8289-00578E43B255}.Release|Win32.Build.0 = Release|Win32 - {2C5AD347-6E34-463B-8289-00578E43B255}.Release|x64.ActiveCfg = Release|x64 - {2C5AD347-6E34-463B-8289-00578E43B255}.Release|x64.Build.0 = Release|x64 - {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|Win32.ActiveCfg = Debug|Win32 - {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|Win32.Build.0 = Debug|Win32 - {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|x64.ActiveCfg = Debug|x64 - {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|x64.Build.0 = Debug|x64 - {63050112-E486-4396-B5E4-303C3BC12D39}.Release|Win32.ActiveCfg = Release|Win32 - {63050112-E486-4396-B5E4-303C3BC12D39}.Release|Win32.Build.0 = Release|Win32 - {63050112-E486-4396-B5E4-303C3BC12D39}.Release|x64.ActiveCfg = Release|x64 - {63050112-E486-4396-B5E4-303C3BC12D39}.Release|x64.Build.0 = Release|x64 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|Win32.ActiveCfg = Debug|Win32 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|Win32.Build.0 = Debug|Win32 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|x64.ActiveCfg = Debug|x64 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|x64.Build.0 = Debug|x64 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|Win32.ActiveCfg = Release|Win32 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|Win32.Build.0 = Release|Win32 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|x64.ActiveCfg = Release|x64 - {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|x64.Build.0 = Release|x64 - {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|Win32.ActiveCfg = Debug|Win32 - {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|Win32.Build.0 = Debug|Win32 - {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|x64.ActiveCfg = Debug|x64 - {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|x64.Build.0 = Debug|x64 - {4478909E-6983-425C-9D9F-558CF258E61E}.Release|Win32.ActiveCfg = Release|Win32 - {4478909E-6983-425C-9D9F-558CF258E61E}.Release|Win32.Build.0 = Release|Win32 - {4478909E-6983-425C-9D9F-558CF258E61E}.Release|x64.ActiveCfg = Release|x64 - {4478909E-6983-425C-9D9F-558CF258E61E}.Release|x64.Build.0 = Release|x64 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|Win32.ActiveCfg = Debug|Win32 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|Win32.Build.0 = Debug|Win32 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|x64.ActiveCfg = Debug|x64 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|x64.Build.0 = Debug|x64 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|Win32.ActiveCfg = Release|Win32 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|Win32.Build.0 = Release|Win32 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|x64.ActiveCfg = Release|x64 - {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|x64.Build.0 = Release|x64 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|Win32.ActiveCfg = Debug|Win32 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|Win32.Build.0 = Debug|Win32 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|x64.ActiveCfg = Debug|x64 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|x64.Build.0 = Debug|x64 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|Win32.ActiveCfg = Release|Win32 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|Win32.Build.0 = Release|Win32 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|x64.ActiveCfg = Release|x64 - {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|x64.Build.0 = Release|x64 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|Win32.ActiveCfg = Debug|Win32 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|Win32.Build.0 = Debug|Win32 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|x64.ActiveCfg = Debug|x64 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|x64.Build.0 = Debug|x64 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|Win32.ActiveCfg = Release|Win32 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|Win32.Build.0 = Release|Win32 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|x64.ActiveCfg = Release|x64 - {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|x64.Build.0 = Release|x64 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|Win32.ActiveCfg = Debug|Win32 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|Win32.Build.0 = Debug|Win32 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|x64.ActiveCfg = Debug|x64 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|x64.Build.0 = Debug|x64 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|Win32.ActiveCfg = Release|Win32 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|Win32.Build.0 = Release|Win32 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|x64.ActiveCfg = Release|x64 - {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|x64.Build.0 = Release|x64 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|Win32.ActiveCfg = Debug|Win32 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|Win32.Build.0 = Debug|Win32 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|x64.ActiveCfg = Debug|x64 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|x64.Build.0 = Debug|x64 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|Win32.ActiveCfg = Release|Win32 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|Win32.Build.0 = Release|Win32 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|x64.ActiveCfg = Release|x64 - {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|x64.Build.0 = Release|x64 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|Win32.ActiveCfg = Debug|Win32 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|Win32.Build.0 = Debug|Win32 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|x64.ActiveCfg = Debug|x64 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|x64.Build.0 = Debug|x64 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|Win32.ActiveCfg = Release|Win32 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|Win32.Build.0 = Release|Win32 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|x64.ActiveCfg = Release|x64 - {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|x64.Build.0 = Release|x64 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|Win32.ActiveCfg = Debug|Win32 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|Win32.Build.0 = Debug|Win32 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|x64.ActiveCfg = Debug|x64 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|x64.Build.0 = Debug|x64 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|Win32.ActiveCfg = Release|Win32 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|Win32.Build.0 = Release|Win32 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|x64.ActiveCfg = Release|x64 - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|x64.Build.0 = Release|x64 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|Win32.ActiveCfg = Debug|Win32 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|Win32.Build.0 = Debug|Win32 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|x64.ActiveCfg = Debug|x64 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|x64.Build.0 = Debug|x64 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|Win32.ActiveCfg = Release|Win32 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|Win32.Build.0 = Release|Win32 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|x64.ActiveCfg = Release|x64 - {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|x64.Build.0 = Release|x64 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|Win32.ActiveCfg = Debug|Win32 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|Win32.Build.0 = Debug|Win32 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|x64.ActiveCfg = Debug|x64 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|x64.Build.0 = Debug|x64 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|Win32.ActiveCfg = Release|Win32 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|Win32.Build.0 = Release|Win32 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|x64.ActiveCfg = Release|x64 - {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|x64.Build.0 = Release|x64 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|Win32.ActiveCfg = Debug|Win32 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|Win32.Build.0 = Debug|Win32 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|x64.ActiveCfg = Debug|x64 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|x64.Build.0 = Debug|x64 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|Win32.ActiveCfg = Release|Win32 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|Win32.Build.0 = Release|Win32 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|x64.ActiveCfg = Release|x64 - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|x64.Build.0 = Release|x64 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|Win32.ActiveCfg = Debug|Win32 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|Win32.Build.0 = Debug|Win32 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|x64.ActiveCfg = Debug|x64 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|x64.Build.0 = Debug|x64 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|Win32.ActiveCfg = Release|Win32 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|Win32.Build.0 = Release|Win32 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|x64.ActiveCfg = Release|x64 - {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|x64.Build.0 = Release|x64 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|Win32.ActiveCfg = Debug|Win32 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|Win32.Build.0 = Debug|Win32 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|x64.ActiveCfg = Debug|x64 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|x64.Build.0 = Debug|x64 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|Win32.ActiveCfg = Release|Win32 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|Win32.Build.0 = Release|Win32 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|x64.ActiveCfg = Release|x64 - {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|x64.Build.0 = Release|x64 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|Win32.ActiveCfg = Debug|Win32 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|Win32.Build.0 = Debug|Win32 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|x64.ActiveCfg = Debug|x64 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|x64.Build.0 = Debug|x64 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|Win32.ActiveCfg = Release|Win32 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|Win32.Build.0 = Release|Win32 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|x64.ActiveCfg = Release|x64 - {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|x64.Build.0 = Release|x64 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|Win32.ActiveCfg = Debug|Win32 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|Win32.Build.0 = Debug|Win32 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|x64.ActiveCfg = Debug|x64 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|x64.Build.0 = Debug|x64 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|Win32.ActiveCfg = Release|Win32 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|Win32.Build.0 = Release|Win32 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|x64.ActiveCfg = Release|x64 - {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|x64.Build.0 = Release|x64 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|Win32.ActiveCfg = Debug|Win32 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|Win32.Build.0 = Debug|Win32 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|x64.ActiveCfg = Debug|x64 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|x64.Build.0 = Debug|x64 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|Win32.ActiveCfg = Release|Win32 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|Win32.Build.0 = Release|Win32 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|x64.ActiveCfg = Release|x64 - {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|x64.Build.0 = Release|x64 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|Win32.ActiveCfg = Debug|Win32 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|Win32.Build.0 = Debug|Win32 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|x64.ActiveCfg = Debug|x64 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|x64.Build.0 = Debug|x64 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|Win32.ActiveCfg = Release|Win32 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|Win32.Build.0 = Release|Win32 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|x64.ActiveCfg = Release|x64 - {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|x64.Build.0 = Release|x64 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|Win32.ActiveCfg = Debug|Win32 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|Win32.Build.0 = Debug|Win32 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|x64.ActiveCfg = Debug|x64 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|x64.Build.0 = Debug|x64 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|Win32.ActiveCfg = Release|Win32 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|Win32.Build.0 = Release|Win32 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|x64.ActiveCfg = Release|x64 - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(NestedProjects) = preSolution - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {740F3007-7BF0-4C0C-8FA5-2587C794EF31} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {2C5AD347-6E34-463B-8289-00578E43B255} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {63050112-E486-4396-B5E4-303C3BC12D39} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {4A9F9353-DB63-460A-BB1C-9CB519DFD414} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {4478909E-6983-425C-9D9F-558CF258E61E} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {006A7908-ABF3-4D18-BC35-0A29E39B95F9} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {698C2D54-475C-446F-B879-F629BBEF75FE} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {8804AD29-E398-480C-AC0F-98EC1B7A51CB} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {17077E86-AE7C-41AF-86ED-2BAC03B019BC} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {340D86A5-E53C-490B-880A-8EB1F5BDE947} = {857A09AF-FE20-461C-B66F-D779422AD46B} - {9D588288-5A28-4AB3-96EA-442CAA508F8E} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {9F32C692-9106-43AF-A291-779A2D8BE096} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {04DC0E3A-F247-45C2-AE27-8DE7493AA43B} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {7AA02EDF-D797-494B-929C-F628F4E4EA62} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {11727C54-463F-472A-88AF-6C3D6071BF0B} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {E34ECD90-3977-4A4B-9641-4D7F1766E9FD} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {A25CA34D-2F64-442B-A5D3-B13CB56C9957} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {F8053D66-8267-433A-BF2C-E07E2298C338} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {9D96D09A-DE17-4011-9247-F0009E8D6DB5} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {F113B460-4B21-4014-9A15-D472FAA9E3F9} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {1945F27B-ABB3-47F9-9268-A42F73C8B992} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - {1A42A5E3-82A7-4EE4-B7D2-8265B147F124} = {F8E85E25-4D67-4A6B-A976-C920790B8798} - EndGlobalSection -EndGlobal diff --git a/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj b/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj deleted file mode 100644 index f7947e33..00000000 --- a/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj +++ /dev/null @@ -1,238 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} - Win32Proj - aarch64softmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - aarch64.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - aarch64.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - aarch64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - aarch64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj.filters b/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj.filters deleted file mode 100644 index 8467aeca..00000000 --- a/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj.filters +++ /dev/null @@ -1,149 +0,0 @@ - - - - - {8a7d2815-3656-4ae7-8eb2-d38da6e8d480} - - - {9a7f2b42-3f31-4731-84e2-38f535304a1d} - - - {c74d3c4d-1f19-42c6-bf25-26820a53ac11} - - - {0e231806-86e4-4e05-8ef8-3e3d36860b00} - - - {ca50b33c-f5ce-4975-a702-c607bb2fc604} - - - {1db81436-53cf-4cb6-a474-e76327883bd2} - - - - - fpu - - - fpu - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - fpu - - - hw\arm - - - hw\arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj b/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj deleted file mode 100644 index 08f11bd2..00000000 --- a/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj +++ /dev/null @@ -1,238 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - {340D86A5-E53C-490B-880A-8EB1F5BDE947} - Win32Proj - aarch64ebsoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - aarch64eb.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - aarch64eb.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - aarch64eb.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - aarch64eb.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj.filters b/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj.filters deleted file mode 100644 index 39d22d56..00000000 --- a/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj.filters +++ /dev/null @@ -1,149 +0,0 @@ - - - - - {4988fbf5-a5f0-4aaa-b301-8292e3bb6df7} - - - {ea984014-3856-4951-aa11-ed7d98e7d749} - - - {fac9d0b8-8064-4486-bc54-0a63831fae99} - - - {8bb5cb48-4c00-419c-9ec9-7fa2829dd28e} - - - {4667e883-fc0e-4fc8-afad-b8f471736a96} - - - {d2c6525e-91e8-4b37-bd22-c07aa4058f1d} - - - - - fpu - - - fpu - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - fpu - - - hw\arm - - - hw\arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj b/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj deleted file mode 100644 index 3094df0f..00000000 --- a/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj +++ /dev/null @@ -1,234 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} - Win32Proj - armsoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - arm.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - arm.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - arm.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - arm.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj.filters b/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj.filters deleted file mode 100644 index 157e6e65..00000000 --- a/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj.filters +++ /dev/null @@ -1,137 +0,0 @@ - - - - - {c29e2ed5-3ecd-426d-9245-04de2c8ce754} - - - {89b122b1-9e4c-41b1-8670-c6d9ee3716f6} - - - {76f837ed-af45-43bf-9ee7-193dbdec1cd5} - - - {1f03d4ae-6433-4037-a347-993db1a315e6} - - - {a9187bf5-cd27-47c7-8add-55b11a1150a9} - - - {015b3e43-eb63-4add-9f53-f3ac3033472f} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - - - - - - - - - - - - - - fpu - - - tcg - - - tcg - - - tcg\i386 - - - hw\arm - - - hw\arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - \ No newline at end of file diff --git a/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj b/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj deleted file mode 100644 index 3f2494af..00000000 --- a/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj +++ /dev/null @@ -1,234 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - {740F3007-7BF0-4C0C-8FA5-2587C794EF31} - Win32Proj - armebsoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - armeb.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - armeb.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - armeb.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - armeb.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj.filters b/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj.filters deleted file mode 100644 index c85b4489..00000000 --- a/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj.filters +++ /dev/null @@ -1,137 +0,0 @@ - - - - - {5c1c56e9-7777-47ee-b021-c79bd6ad8c38} - - - {9c860878-f381-4c2e-86db-b7ba81b46c10} - - - {2f89d71a-2c88-497f-960b-42c3ac537df4} - - - {31088e8b-a2b5-4e0a-a288-4f9c537c350a} - - - {56559ccc-d240-4e06-b74b-8bd230f7fe07} - - - {b6c188d2-6b51-4697-ade2-a3c42f88a39d} - - - - - fpu - - - fpu - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - fpu - - - hw\arm - - - hw\arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - target-arm - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/unicorn/dllmain.cpp b/msvc/unicorn/dllmain.cpp similarity index 100% rename from msvc/unicorn/unicorn/dllmain.cpp rename to msvc/unicorn/dllmain.cpp diff --git a/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj b/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj deleted file mode 100644 index ecd1f6ea..00000000 --- a/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj +++ /dev/null @@ -1,225 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - - - - - - - {2C5AD347-6E34-463B-8289-00578E43B255} - Win32Proj - m68ksoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - m68k.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - m68k.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - m68k.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - m68k.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj.filters b/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj.filters deleted file mode 100644 index f3487108..00000000 --- a/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj.filters +++ /dev/null @@ -1,110 +0,0 @@ - - - - - {45b613c4-9fdb-482c-b94f-0138ea9907c3} - - - {14f99bfc-becf-403c-9100-1612ab30a848} - - - {484590c4-abd5-4db4-8b06-b34087856c27} - - - {476366de-d432-4ce4-8e04-64aa34326aa0} - - - {63d2e327-f759-4757-a44b-90513ce433f7} - - - {574ebec7-47db-49de-8f59-3365337e42a7} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-m68k - - - target-m68k - - - target-m68k - - - target-m68k - - - target-m68k - - - - - - - - fpu - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - - - - hw\m68k - - - target-m68k - - - target-m68k - - - target-m68k - - - target-m68k - - - target-m68k - - - \ No newline at end of file diff --git a/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj b/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj deleted file mode 100644 index 4e051f68..00000000 --- a/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj +++ /dev/null @@ -1,236 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - true - true - true - true - - - - - - - {63050112-E486-4396-B5E4-303C3BC12D39} - Win32Proj - mipssoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mips.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mips.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mips.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mips.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj.filters b/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj.filters deleted file mode 100644 index 893a2551..00000000 --- a/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj.filters +++ /dev/null @@ -1,128 +0,0 @@ - - - - - {723b39e0-f3cc-46d9-b9e8-6fe7e38bdf26} - - - {19efec2a-ac41-4941-9dfc-4937f91829b4} - - - {0784e023-e00c-4034-adc4-9b1ad07d2eb7} - - - {d60f24b3-d409-40d8-b7d2-f3e71960841a} - - - {1c2a8ce7-cc6f-41e8-b532-a2f030f6799d} - - - {65e8fb9c-fe61-4100-9f0e-1eab5babb4d3} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - - - - - - - - - - - - - - fpu - - - tcg - - - tcg - - - tcg\i386 - - - hw\mips - - - hw\mips - - - hw\mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - \ No newline at end of file diff --git a/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj b/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj deleted file mode 100644 index c5910de1..00000000 --- a/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj +++ /dev/null @@ -1,236 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - true - true - true - true - - - - - - - {4A9F9353-DB63-460A-BB1C-9CB519DFD414} - Win32Proj - mips64softmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mips64.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mips64.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mips64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mips64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj.filters b/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj.filters deleted file mode 100644 index c5aac694..00000000 --- a/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj.filters +++ /dev/null @@ -1,128 +0,0 @@ - - - - - {d5143bfc-0d98-4c10-aa97-eddbc37aca5e} - - - {c27c2a6c-adb5-4f4a-ae2d-778d1aff259e} - - - {39b8b5d5-ffa1-4eb6-ab78-2edb05e49e84} - - - {a4e58c5f-5143-4c18-b291-2f472259d6d9} - - - {bbe2ce8b-4fcd-496e-9f45-6f65ada00d84} - - - {3c66ada6-0f5f-40f5-a62c-c6dee6596791} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - - - - - - - - - - - - - - fpu - - - hw\mips - - - hw\mips - - - hw\mips - - - tcg - - - tcg - - - tcg\i386 - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - \ No newline at end of file diff --git a/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj b/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj deleted file mode 100644 index ccbba530..00000000 --- a/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj +++ /dev/null @@ -1,236 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - true - true - true - true - - - - - - - {4478909E-6983-425C-9D9F-558CF258E61E} - Win32Proj - mips64elsoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mips64el.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mips64el.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mips64el.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mips64el.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj.filters b/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj.filters deleted file mode 100644 index 88aa508a..00000000 --- a/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj.filters +++ /dev/null @@ -1,128 +0,0 @@ - - - - - {2b2b58ad-804f-435b-b55d-1c21e050cf31} - - - {a6046583-2534-498c-9bd2-08dd8c222c18} - - - {2253570a-0bc0-4366-9eab-095257ab37b2} - - - {de33cbbc-f374-4451-a083-23c5a98c843e} - - - {b4ef3640-fe65-476d-9b4f-3c6d82a5dbfd} - - - {95533292-741a-46c5-a003-cbb60c8654ce} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - - - - - - - - - - - - - - fpu - - - hw\mips - - - hw\mips - - - hw\mips - - - tcg - - - tcg - - - tcg\i386 - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - \ No newline at end of file diff --git a/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj b/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj deleted file mode 100644 index d37dc6f7..00000000 --- a/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj +++ /dev/null @@ -1,236 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - true - true - true - true - - - - - - - {006A7908-ABF3-4D18-BC35-0A29E39B95F9} - Win32Proj - mipselsoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mipsel.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - mipsel.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mipsel.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - mipsel.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj.filters b/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj.filters deleted file mode 100644 index 3ec346fb..00000000 --- a/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj.filters +++ /dev/null @@ -1,128 +0,0 @@ - - - - - {00b0caab-f50f-47a1-99ea-a452f1e712e3} - - - {f666e049-ed32-4817-9998-b6898ce2b71a} - - - {b2a1fb8b-789a-45a6-a814-9312ad75bd70} - - - {e6bea1c8-7307-44c8-9956-25321f73287f} - - - {cce3d221-0ae7-4cea-a9bd-5fe10a932c20} - - - {9b5981d1-89fd-4210-ac4d-7b3dab34871b} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - - - - - - - - - - - - - fpu - - - hw\mips - - - hw\mips - - - hw\mips - - - tcg - - - tcg - - - tcg\i386 - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - target-mips - - - - \ No newline at end of file diff --git a/msvc/unicorn/prebuild_script.bat b/msvc/unicorn/prebuild_script.bat deleted file mode 100644 index 417682a1..00000000 --- a/msvc/unicorn/prebuild_script.bat +++ /dev/null @@ -1,21 +0,0 @@ -del ..\..\qemu\qapi-types.h 2> null -del ..\..\qemu\qapi-types.c 2> null - -del ..\..\qemu\qapi-visit.h 2> null -del ..\..\qemu\qapi-visit.c 2> null - -del ..\..\qemu\config-host.h 2> null - -del ..\..\qemu\aarch-softmmu\config-target.h 2> null -del ..\..\qemu\aarcheb-softmmu\config-target.h 2> null -del ..\..\qemu\arm-softmmu\config-target.h 2> null -del ..\..\qemu\armeb-softmmu\config-target.h 2> null -del ..\..\qemu\m68k-softmmu\config-target.h 2> null -del ..\..\qemu\mips64el-softmmu\config-target.h 2> null -del ..\..\qemu\mips64-softmmu\config-target.h 2> null -del ..\..\qemu\mipsel-softmmu\config-target.h 2> null -del ..\..\qemu\mips-softmmu\config-target.h 2> null -del ..\..\qemu\sparc64-softmmu\config-target.h 2> null -del ..\..\qemu\sparc-softmmu\config-target.h 2> null -del ..\..\qemu\x86_64-softmmu\config-target.h 2> null -del null diff --git a/msvc/unicorn/qapi-types.c b/msvc/unicorn/qapi-types.c deleted file mode 100644 index 173c654b..00000000 --- a/msvc/unicorn/qapi-types.c +++ /dev/null @@ -1,293 +0,0 @@ -/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * deallocation functions for schema-defined QAPI types - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * Michael Roth - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/dealloc-visitor.h" -#include "qapi-types.h" -#include "qapi-visit.h" - -const char *ErrorClass_lookup[] = { - "GenericError", - "CommandNotFound", - "DeviceEncrypted", - "DeviceNotActive", - "DeviceNotFound", - "KVMMissingCap", - NULL, -}; - -const char *X86CPURegister32_lookup[] = { - "EAX", - "EBX", - "ECX", - "EDX", - "ESP", - "EBP", - "ESI", - "EDI", - NULL, -}; - - -#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DEF_H -#define QAPI_TYPES_BUILTIN_CLEANUP_DEF_H - - -void qapi_free_strList(strList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_strList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_intList(intList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_intList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_numberList(numberList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_numberList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_boolList(boolList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_boolList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int8List(int8List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int8List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int16List(int16List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int16List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int32List(int32List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int32List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int64List(int64List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int64List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint8List(uint8List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint8List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint16List(uint16List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint16List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint32List(uint32List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint32List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint64List(uint64List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint64List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DEF_H */ - - -void qapi_free_ErrorClassList(ErrorClassList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_ErrorClassList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - - -void qapi_free_X86CPURegister32List(X86CPURegister32List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_X86CPURegister32List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - - -void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_X86CPUFeatureWordInfoList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - - -void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_X86CPUFeatureWordInfo(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - diff --git a/msvc/unicorn/qapi-types.h b/msvc/unicorn/qapi-types.h deleted file mode 100644 index 944e8825..00000000 --- a/msvc/unicorn/qapi-types.h +++ /dev/null @@ -1,228 +0,0 @@ -/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI types - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QAPI_TYPES_H -#define QAPI_TYPES_H - -#include "unicorn/platform.h" - - -#ifndef QAPI_TYPES_BUILTIN_STRUCT_DECL_H -#define QAPI_TYPES_BUILTIN_STRUCT_DECL_H - - -typedef struct strList -{ - union { - char *value; - uint64_t padding; - }; - struct strList *next; -} strList; - -typedef struct intList -{ - union { - int64_t value; - uint64_t padding; - }; - struct intList *next; -} intList; - -typedef struct numberList -{ - union { - double value; - uint64_t padding; - }; - struct numberList *next; -} numberList; - -typedef struct boolList -{ - union { - bool value; - uint64_t padding; - }; - struct boolList *next; -} boolList; - -typedef struct int8List -{ - union { - int8_t value; - uint64_t padding; - }; - struct int8List *next; -} int8List; - -typedef struct int16List -{ - union { - int16_t value; - uint64_t padding; - }; - struct int16List *next; -} int16List; - -typedef struct int32List -{ - union { - int32_t value; - uint64_t padding; - }; - struct int32List *next; -} int32List; - -typedef struct int64List -{ - union { - int64_t value; - uint64_t padding; - }; - struct int64List *next; -} int64List; - -typedef struct uint8List -{ - union { - uint8_t value; - uint64_t padding; - }; - struct uint8List *next; -} uint8List; - -typedef struct uint16List -{ - union { - uint16_t value; - uint64_t padding; - }; - struct uint16List *next; -} uint16List; - -typedef struct uint32List -{ - union { - uint32_t value; - uint64_t padding; - }; - struct uint32List *next; -} uint32List; - -typedef struct uint64List -{ - union { - uint64_t value; - uint64_t padding; - }; - struct uint64List *next; -} uint64List; - -#endif /* QAPI_TYPES_BUILTIN_STRUCT_DECL_H */ - - -extern const char *ErrorClass_lookup[]; -typedef enum ErrorClass -{ - ERROR_CLASS_GENERIC_ERROR = 0, - ERROR_CLASS_COMMAND_NOT_FOUND = 1, - ERROR_CLASS_DEVICE_ENCRYPTED = 2, - ERROR_CLASS_DEVICE_NOT_ACTIVE = 3, - ERROR_CLASS_DEVICE_NOT_FOUND = 4, - ERROR_CLASS_KVM_MISSING_CAP = 5, - ERROR_CLASS_MAX = 6, -} ErrorClass; - -typedef struct ErrorClassList -{ - union { - ErrorClass value; - uint64_t padding; - }; - struct ErrorClassList *next; -} ErrorClassList; - -extern const char *X86CPURegister32_lookup[]; -typedef enum X86CPURegister32 -{ - X86_CPU_REGISTER32_EAX = 0, - X86_CPU_REGISTER32_EBX = 1, - X86_CPU_REGISTER32_ECX = 2, - X86_CPU_REGISTER32_EDX = 3, - X86_CPU_REGISTER32_ESP = 4, - X86_CPU_REGISTER32_EBP = 5, - X86_CPU_REGISTER32_ESI = 6, - X86_CPU_REGISTER32_EDI = 7, - X86_CPU_REGISTER32_MAX = 8, -} X86CPURegister32; - -typedef struct X86CPURegister32List -{ - union { - X86CPURegister32 value; - uint64_t padding; - }; - struct X86CPURegister32List *next; -} X86CPURegister32List; - - -typedef struct X86CPUFeatureWordInfo X86CPUFeatureWordInfo; - -typedef struct X86CPUFeatureWordInfoList -{ - union { - X86CPUFeatureWordInfo *value; - uint64_t padding; - }; - struct X86CPUFeatureWordInfoList *next; -} X86CPUFeatureWordInfoList; - -#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DECL_H -#define QAPI_TYPES_BUILTIN_CLEANUP_DECL_H - -void qapi_free_strList(strList *obj); -void qapi_free_intList(intList *obj); -void qapi_free_numberList(numberList *obj); -void qapi_free_boolList(boolList *obj); -void qapi_free_int8List(int8List *obj); -void qapi_free_int16List(int16List *obj); -void qapi_free_int32List(int32List *obj); -void qapi_free_int64List(int64List *obj); -void qapi_free_uint8List(uint8List *obj); -void qapi_free_uint16List(uint16List *obj); -void qapi_free_uint32List(uint32List *obj); -void qapi_free_uint64List(uint64List *obj); - -#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DECL_H */ - - -void qapi_free_ErrorClassList(ErrorClassList *obj); - -void qapi_free_X86CPURegister32List(X86CPURegister32List *obj); - -struct X86CPUFeatureWordInfo -{ - int64_t cpuid_input_eax; - bool has_cpuid_input_ecx; - int64_t cpuid_input_ecx; - X86CPURegister32 cpuid_register; - int64_t features; -}; - -void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj); -void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj); - -#endif diff --git a/msvc/unicorn/qapi-visit.c b/msvc/unicorn/qapi-visit.c deleted file mode 100644 index 7733bb55..00000000 --- a/msvc/unicorn/qapi-visit.c +++ /dev/null @@ -1,428 +0,0 @@ -/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI visitor functions - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qemu-common.h" -#include "qapi-visit.h" - -void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - strList *native_i = (strList *)i; - visit_type_str(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - intList *native_i = (intList *)i; - visit_type_int(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - numberList *native_i = (numberList *)i; - visit_type_number(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - boolList *native_i = (boolList *)i; - visit_type_bool(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int8List *native_i = (int8List *)i; - visit_type_int8(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int16List *native_i = (int16List *)i; - visit_type_int16(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int32List *native_i = (int32List *)i; - visit_type_int32(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int64List *native_i = (int64List *)i; - visit_type_int64(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint8List *native_i = (uint8List *)i; - visit_type_uint8(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint16List *native_i = (uint16List *)i; - visit_type_uint16(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint32List *native_i = (uint32List *)i; - visit_type_uint32(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint64List *native_i = (uint64List *)i; - visit_type_uint64(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - ErrorClassList *native_i = (ErrorClassList *)i; - visit_type_ErrorClass(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp) -{ - visit_type_enum(m, (int *)obj, ErrorClass_lookup, "ErrorClass", name, errp); -} - -void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - X86CPURegister32List *native_i = (X86CPURegister32List *)i; - visit_type_X86CPURegister32(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp) -{ - visit_type_enum(m, (int *)obj, X86CPURegister32_lookup, "X86CPURegister32", name, errp); -} - -static void visit_type_X86CPUFeatureWordInfo_fields(Visitor *m, X86CPUFeatureWordInfo **obj, Error **errp) -{ - Error *err = NULL; - visit_type_int(m, &(*obj)->cpuid_input_eax, "cpuid-input-eax", &err); - if (err) { - goto out; - } - visit_optional(m, &(*obj)->has_cpuid_input_ecx, "cpuid-input-ecx", &err); - if (!err && (*obj)->has_cpuid_input_ecx) { - visit_type_int(m, &(*obj)->cpuid_input_ecx, "cpuid-input-ecx", &err); - } - if (err) { - goto out; - } - visit_type_X86CPURegister32(m, &(*obj)->cpuid_register, "cpuid-register", &err); - if (err) { - goto out; - } - visit_type_int(m, &(*obj)->features, "features", &err); - if (err) { - goto out; - } - -out: - error_propagate(errp, err); -} - -void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp) -{ - Error *err = NULL; - - visit_start_struct(m, (void **)obj, "X86CPUFeatureWordInfo", name, sizeof(X86CPUFeatureWordInfo), &err); - if (!err) { - if (*obj) { - visit_type_X86CPUFeatureWordInfo_fields(m, obj, errp); - } - visit_end_struct(m, &err); - } - error_propagate(errp, err); -} - -void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - X86CPUFeatureWordInfoList *native_i = (X86CPUFeatureWordInfoList *)i; - visit_type_X86CPUFeatureWordInfo(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} diff --git a/msvc/unicorn/qapi-visit.h b/msvc/unicorn/qapi-visit.h deleted file mode 100644 index 51bd0887..00000000 --- a/msvc/unicorn/qapi-visit.h +++ /dev/null @@ -1,51 +0,0 @@ -/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI visitor functions - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QAPI_VISIT_H -#define QAPI_VISIT_H - -#include "qapi/visitor.h" -#include "qapi-types.h" - - -#ifndef QAPI_VISIT_BUILTIN_VISITOR_DECL_H -#define QAPI_VISIT_BUILTIN_VISITOR_DECL_H - -void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp); -void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp); -void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp); -void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp); -void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp); -void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp); -void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp); -void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp); -void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp); -void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp); -void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp); -void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp); - -#endif /* QAPI_VISIT_BUILTIN_VISITOR_DECL_H */ - - -void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp); -void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp); - -void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp); -void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp); - -void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp); -void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp); - -#endif diff --git a/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj b/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj deleted file mode 100644 index da86510b..00000000 --- a/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj +++ /dev/null @@ -1,229 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - {698C2D54-475C-446F-B879-F629BBEF75FE} - Win32Proj - sparcsoftmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - sparc.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - sparc.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - sparc.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - sparc.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj.filters b/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj.filters deleted file mode 100644 index 95458e1d..00000000 --- a/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj.filters +++ /dev/null @@ -1,122 +0,0 @@ - - - - - {d424a094-0a26-4db6-85e9-e75f81c6e142} - - - {dec3e9f1-0fc3-454e-ae70-78d866b5c3f8} - - - {0a04b798-735c-4115-ab62-b6cfc772efed} - - - {10f0c533-e16b-4e31-b979-812af068bb36} - - - {b229bf2b-cc40-4135-b3b7-40c73bd9f597} - - - {133a0d6a-6f71-4d4b-be6d-f90636aa02a2} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - - - - - - fpu - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - - - - hw\sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - \ No newline at end of file diff --git a/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj b/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj deleted file mode 100644 index dd65e2c4..00000000 --- a/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj +++ /dev/null @@ -1,230 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - {8804AD29-E398-480C-AC0F-98EC1B7A51CB} - Win32Proj - sparc64softmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - sparc64.h - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - sparc64.h - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - sparc64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - sparc64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj.filters b/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj.filters deleted file mode 100644 index cf669e84..00000000 --- a/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj.filters +++ /dev/null @@ -1,125 +0,0 @@ - - - - - {ee6a3abf-6f14-47ab-8b40-f859d030230a} - - - {4a271b0a-7736-4457-98f8-8c21ad4d2601} - - - {46e355ab-da0b-431b-929b-8d77b3ab90bc} - - - {1f15e2b2-fae3-41e5-b787-70c44beb828c} - - - {7c7b0370-fe65-4c21-94e6-f4561470087d} - - - {51a4ddc6-4078-4db0-9b29-c68c558b2f93} - - - - - fpu - - - fpu - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg\i386 - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - - - - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - target-sparc - - - fpu - - - tcg - - - tcg - - - tcg\i386 - - - - - - - - - - - hw\sparc64 - - - \ No newline at end of file diff --git a/msvc/unicorn/unicorn/unicorn.vcxproj b/msvc/unicorn/unicorn/unicorn.vcxproj deleted file mode 100644 index 4b896c9e..00000000 --- a/msvc/unicorn/unicorn/unicorn.vcxproj +++ /dev/null @@ -1,373 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B} - Win32Proj - unicorn - unicorn - - - - DynamicLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - DynamicLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - DynamicLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - DynamicLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - - true - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - true - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - false - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - NotUsing - Level3 - Disabled - WIN32;_DEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - - - Windows - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - NotUsing - Level3 - Disabled - WIN32;_DEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreadedDebug - false - ProgramDatabase - - - Windows - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - - - Level3 - NotUsing - MaxSpeed - true - true - WIN32;NDEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - None - - - Windows - true - true - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - mkdir "$(SolutionDir)distro\" -mkdir "$(SolutionDir)distro\include" -mkdir "$(SolutionDir)distro\include\unicorn" -mkdir "$(SolutionDir)distro\$(Platform)" - -copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).dll" "$(SolutionDir)distro\$(Platform)\" -copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" - -copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" - - - - - - Level3 - NotUsing - MaxSpeed - true - true - WIN32;NDEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - MultiThreaded - false - None - - - Windows - true - true - true - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - $(SolutionDir)$(Platform)\$(Configuration)\ - - - mkdir "$(SolutionDir)distro\" -mkdir "$(SolutionDir)distro\include" -mkdir "$(SolutionDir)distro\include\unicorn" -mkdir "$(SolutionDir)distro\$(Platform)" - -copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).dll" "$(SolutionDir)distro\$(Platform)\" -copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" - -copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - false - false - - - - - false - false - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - false - false - - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/unicorn/unicorn.vcxproj.filters b/msvc/unicorn/unicorn/unicorn.vcxproj.filters deleted file mode 100644 index da3121fc..00000000 --- a/msvc/unicorn/unicorn/unicorn.vcxproj.filters +++ /dev/null @@ -1,505 +0,0 @@ - - - - - - priv - - - qemu - - - qemu - - - qemu - - - qemu - - - qemu - - - qemu - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\hw\core - - - qemu\hw\core - - - qemu\qapi - - - qemu\qapi - - - qemu\qapi - - - qemu\qapi - - - qemu\qapi - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qom - - - qemu\qom - - - qemu\qom - - - qemu\qom - - - - qemu - - - qemu - - - - - {1d58ccd1-7ea5-40e0-ba38-304ac34bf08e} - - - {438f79b0-21e5-4a21-8e61-271b88a3130d} - - - {88e75519-13ca-431e-8b6d-d915d5e12231} - - - {669d34d7-8f38-47ac-ac1b-1f485bc9d6eb} - - - {2182308b-b1c8-4ac4-a779-c9d86c3cf97d} - - - {70273f5a-23c0-4274-acc8-0c398ec327e7} - - - {b84e89c5-c18f-4505-a2b9-b6cacbf97d1a} - - - {c0e7454f-a22a-4410-87e9-bd8668a2fc5b} - - - {0d15a173-37a6-4507-a128-de4316618e68} - - - {fcbdd971-b481-4edc-a96a-3cdaeeadf2e9} - - - {01269629-99a8-41ee-9595-b1c745b1a044} - - - {e1755d99-2324-43b4-9896-2400610e0b31} - - - {daf8ad72-5390-43b2-8c7b-082ce1084aed} - - - {da199412-30ad-4c72-9a5a-a7b280c00021} - - - {5f01eb28-ffe0-4371-a677-32ded26a33e2} - - - {dfebfef7-1435-4d09-89f6-94fb929f3488} - - - {969bee88-382e-4c05-9205-074f24bdaf82} - - - {7195ce60-b300-4dbe-8072-3e812167a036} - - - {0f13072a-571c-4c81-bef3-513758b38832} - - - {0ae19983-bbd0-448f-a319-574d45f59dfe} - - - {5653dd50-a557-4573-8f43-7ef26d0d4190} - - - {36443fca-61fc-4f64-a872-2ddd3d823cb9} - - - {5e47a1c2-85ab-48d6-921e-8915438c2cbf} - - - - - priv - - - priv - - - priv - - - qemu - - - qemu - - - qemu - - - qemu - - - qemu\include - - - qemu\include - - - qemu\include - - - qemu\include - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\fpu - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw\arm - - - qemu\include\hw\cpu - - - qemu\include\hw\i386 - - - qemu\include\hw\i386 - - - qemu\include\hw\i386 - - - qemu\include\hw\m68k - - - qemu\include\hw\m68k - - - qemu\include\hw\mips - - - qemu\include\hw\mips - - - qemu\include\hw\mips - - - qemu\include\hw\sparc - - - qemu\include\hw\sparc - - - qemu\include\hw\sparc - - - qemu\include\hw\sparc - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qom - - - qemu\include\qom - - - qemu\include\qom - - - qemu\include\sysemu - - - qemu\include\sysemu - - - qemu\include\sysemu - - - qemu\include\sysemu - - - qemu\include\sysemu - - - - - - - - - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/unicorn_static/unicorn_static.vcxproj b/msvc/unicorn/unicorn_static/unicorn_static.vcxproj deleted file mode 100644 index 57551e52..00000000 --- a/msvc/unicorn/unicorn_static/unicorn_static.vcxproj +++ /dev/null @@ -1,377 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - false - false - - - - {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} - Win32Proj - unicorn_static - unicorn_static - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - - $(ProjectDir)$(Platform)\$(Configuration)\ - $(SolutionDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - MultiThreadedDebug - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - - - Windows - true - - - $(SolutionDir)$(Platform)\$(Configuration)\ - aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - MultiThreadedDebug - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - ProgramDatabase - - - Windows - true - - - $(SolutionDir)$(Platform)\$(Configuration)\ - aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - MultiThreaded - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - None - - - Windows - true - true - true - - - $(SolutionDir)$(Platform)\$(Configuration)\ - aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - - - ..\prebuild_script.bat - - - mkdir "$(SolutionDir)distro\" -mkdir "$(SolutionDir)distro\include" -mkdir "$(SolutionDir)distro\include\unicorn" -mkdir "$(SolutionDir)distro\$(Platform)" - -copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" - -copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" - - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN - MultiThreaded - .;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - None - - - Windows - true - true - true - - - $(SolutionDir)$(Platform)\$(Configuration)\ - aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib - - - ..\prebuild_script.bat - - - mkdir "$(SolutionDir)distro\" -mkdir "$(SolutionDir)distro\include" -mkdir "$(SolutionDir)distro\include\unicorn" -mkdir "$(SolutionDir)distro\$(Platform)" - -copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" - -copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" - - - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/unicorn_static/unicorn_static.vcxproj.filters b/msvc/unicorn/unicorn_static/unicorn_static.vcxproj.filters deleted file mode 100644 index 121c3eee..00000000 --- a/msvc/unicorn/unicorn_static/unicorn_static.vcxproj.filters +++ /dev/null @@ -1,504 +0,0 @@ - - - - - {13755027-3a13-46c3-9468-ed380fcef603} - - - {4f904f9f-fd99-4ebe-8db7-2ee2c644c6e4} - - - {1f3288bd-38e9-49c1-ae30-6ac4bc1b86c4} - - - {d98a987f-6e81-4454-9bb4-f79d49d1d8fa} - - - {9b261303-0cae-4e60-8bc9-c63cd6abd5bc} - - - {4078dd72-489d-48e6-a7c7-e27149f9513d} - - - {9264dcdf-55d8-4416-9b53-7962937b4db5} - - - {973f87b6-2729-473f-bda6-d61d8c799a77} - - - {235236d2-79fa-48f5-b496-cb79a9290f6b} - - - {4bb86c12-fd75-40be-9891-e4a84ca60703} - - - {b210c6e7-454a-400c-84c4-d2a10d96db1d} - - - {8bdec3ae-c802-4443-a6f4-e26bd030a1cf} - - - {e7cfa963-4fb5-4c9a-a264-402decbea01d} - - - {1890ac05-1098-492d-bc0d-50b6e8dd7fc0} - - - {76e19a4b-1143-456a-900c-9ce6c9c0d267} - - - {0e879645-49d7-4d24-9736-f85d69acceda} - - - {9ecedd1d-44a7-40d1-ad99-f06e49b39aa8} - - - {50ff932c-1464-4742-af74-fc9b42e4ef3a} - - - {ea61989d-dc0a-4146-87a0-63e5131d5302} - - - {044502cd-501d-40eb-86d0-4c8db24104c7} - - - {b1debb6d-f445-4f95-9778-d6b926541606} - - - {c5faa816-aae6-41b7-ac8d-40a9783786e0} - - - {62c5db3f-0d6d-4a3a-92b7-0cd602058a62} - - - - - priv - - - qemu - - - qemu - - - qemu - - - qemu - - - qemu - - - qemu - - - qemu\hw\core - - - qemu\hw\core - - - qemu\qapi - - - qemu\qapi - - - qemu\qapi - - - qemu\qapi - - - qemu\qapi - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qobject - - - qemu\qom - - - qemu\qom - - - qemu\qom - - - qemu\qom - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - qemu\util - - - - qemu - - - qemu - - - - - priv - - - priv - - - priv - - - qemu - - - qemu\include - - - qemu\include - - - qemu\include - - - qemu\include - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\exec - - - qemu\include\fpu - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw - - - qemu\include\hw\arm - - - qemu\include\hw\cpu - - - qemu\include\hw\i386 - - - qemu\include\hw\i386 - - - qemu\include\hw\i386 - - - qemu\include\hw\m68k - - - qemu\include\hw\m68k - - - qemu\include\hw\mips - - - qemu\include\hw\mips - - - qemu\include\hw\mips - - - qemu\include\hw\sparc - - - qemu\include\hw\sparc - - - qemu\include\hw\sparc - - - qemu\include\hw\sparc - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qapi\qmp - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qemu - - - qemu\include\qom - - - qemu\include\qom - - - qemu\include\qom - - - qemu\include\sysemu - - - qemu\include\sysemu - - - qemu\include\sysemu - - - qemu\include\sysemu - - - qemu\include\sysemu - - - - - - - - - - - qemu - - - qemu - - - qemu - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj b/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj deleted file mode 100644 index 672023f0..00000000 --- a/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj +++ /dev/null @@ -1,242 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - true - true - - - - - - - {17077E86-AE7C-41AF-86ED-2BAC03B019BC} - Win32Proj - x86_64softmmu - - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - true - $(DefaultPlatformToolset) - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - StaticLibrary - false - $(DefaultPlatformToolset) - false - MultiByte - - - - - - - - - - - - - - - - - - - $(ProjectDir)$(Platform)\$(Configuration)\ - $(SolutionDir)$(Platform)\$(Configuration)\ - - - $(SolutionDir)$(Platform)\$(Configuration)\ - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - MultiThreadedDebug - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include - x86_64.h - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - - - Windows - true - - - ..\prebuild_script.bat - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - MultiThreadedDebug - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include - x86_64.h - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - ProgramDatabase - - - Windows - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - MultiThreaded - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - x86_64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN - MultiThreaded - .;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include - /wd4018 /wd4244 /wd4267 %(AdditionalOptions) - false - x86_64.h - None - - - Windows - true - true - true - - - ..\prebuild_script.bat - - - - - - \ No newline at end of file diff --git a/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj.filters b/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj.filters deleted file mode 100644 index 2ee64ced..00000000 --- a/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj.filters +++ /dev/null @@ -1,164 +0,0 @@ - - - - - {db59a62f-c036-40c3-9dd8-8b30c9f06415} - - - {42fe7224-78f7-45a5-a173-9ed3bdeb1985} - - - {f33c9635-4286-435a-ab9c-3f2f245ce7f9} - - - {afdb0084-499f-46ea-97a2-6920a8f64800} - - - {dc6b560b-40ea-47a1-91f1-50718313849f} - - - {d3a1fd5b-09b0-4896-af49-5b3668f03a72} - - - {9b7b99b2-982e-46b5-aff2-1ff3a353d3db} - - - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - tcg - - - fpu - - - fpu - - - target-i386 - - - tcg\i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - - - - - - - - - - - - - - fpu - - - hw\i386 - - - hw\i386 - - - hw\intc - - - hw\intc - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - target-i386 - - - tcg - - - tcg - - - tcg\i386 - - - \ No newline at end of file diff --git a/msvc/unicorn/x86_64-softmmu/config-target.h b/msvc/x86_64-softmmu/config-target.h similarity index 100% rename from msvc/unicorn/x86_64-softmmu/config-target.h rename to msvc/x86_64-softmmu/config-target.h diff --git a/nmake.bat b/nmake.bat index f87ae422..e3542a29 100644 --- a/nmake.bat +++ b/nmake.bat @@ -1,6 +1,6 @@ :: Unicorn Emulator Engine :: Build Unicorn libs on Windows with CMake & Nmake -:: Usage: nmake.bat [x86 arm aarch64 m68k mips sparc], default build all. +:: Usage: nmake.bat [x86 arm aarch64 m68k mips sparc riscv], default build all. :: By Huitao Chen, 2019 @echo off diff --git a/pkgconfig.mk b/pkgconfig.mk deleted file mode 100644 index d09cdaaa..00000000 --- a/pkgconfig.mk +++ /dev/null @@ -1,13 +0,0 @@ -# Package version of Unicorn for Makefile. -# To be used to generate unicorn.pc for pkg-config -# Also used to generate python package version - -# version major & minor -PKG_MAJOR = 1 -PKG_MINOR = 0 - -# version bugfix level. Example: PKG_EXTRA = 1 -PKG_EXTRA = 3 - -# version tag. Examples: rc1, b2, post1 -# PKG_TAG = rc6 diff --git a/qemu/.editorconfig b/qemu/.editorconfig new file mode 100644 index 00000000..a001f340 --- /dev/null +++ b/qemu/.editorconfig @@ -0,0 +1,43 @@ +# EditorConfig is a file format and collection of text editor plugins +# for maintaining consistent coding styles between different editors +# and IDEs. Most popular editors support this either natively or via +# plugin. +# +# Check https://editorconfig.org for details. + +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 + +[*.mak] +indent_style = tab +indent_size = 8 +file_type_emacs = makefile + +[Makefile*] +indent_style = tab +indent_size = 8 +file_type_emacs = makefile + +[*.{c,h}] +indent_style = space +indent_size = 4 + +[*.sh] +indent_style = space +indent_size = 4 + +[*.{s,S}] +indent_style = tab +indent_size = 8 +file_type_emacs = asm + +[*.{vert,frag}] +file_type_emacs = glsl + +[*.json] +indent_style = space +file_type_emacs = python diff --git a/qemu/CODING_STYLE b/qemu/CODING_STYLE deleted file mode 100644 index d46cfa5f..00000000 --- a/qemu/CODING_STYLE +++ /dev/null @@ -1,107 +0,0 @@ -QEMU Coding Style -================= - -Please use the script checkpatch.pl in the scripts directory to check -patches before submitting. - -1. Whitespace - -Of course, the most important aspect in any coding style is whitespace. -Crusty old coders who have trouble spotting the glasses on their noses -can tell the difference between a tab and eight spaces from a distance -of approximately fifteen parsecs. Many a flamewar have been fought and -lost on this issue. - -QEMU indents are four spaces. Tabs are never used, except in Makefiles -where they have been irreversibly coded into the syntax. -Spaces of course are superior to tabs because: - - - You have just one way to specify whitespace, not two. Ambiguity breeds - mistakes. - - The confusion surrounding 'use tabs to indent, spaces to justify' is gone. - - Tab indents push your code to the right, making your screen seriously - unbalanced. - - Tabs will be rendered incorrectly on editors who are misconfigured not - to use tab stops of eight positions. - - Tabs are rendered badly in patches, causing off-by-one errors in almost - every line. - - It is the QEMU coding style. - -Do not leave whitespace dangling off the ends of lines. - -2. Line width - -Lines are 80 characters; not longer. - -Rationale: - - Some people like to tile their 24" screens with a 6x4 matrix of 80x24 - xterms and use vi in all of them. The best way to punish them is to - let them keep doing it. - - Code and especially patches is much more readable if limited to a sane - line length. Eighty is traditional. - - It is the QEMU coding style. - -3. Naming - -Variables are lower_case_with_underscores; easy to type and read. Structured -type names are in CamelCase; harder to type but standing out. Enum type -names and function type names should also be in CamelCase. Scalar type -names are lower_case_with_underscores_ending_with_a_t, like the POSIX -uint64_t and family. Note that this last convention contradicts POSIX -and is therefore likely to be changed. - -When wrapping standard library functions, use the prefix qemu_ to alert -readers that they are seeing a wrapped version; otherwise avoid this prefix. - -4. Block structure - -Every indented statement is braced; even if the block contains just one -statement. The opening brace is on the line that contains the control -flow statement that introduces the new block; the closing brace is on the -same line as the else keyword, or on a line by itself if there is no else -keyword. Example: - - if (a == 5) { - printf("a was 5.\n"); - } else if (a == 6) { - printf("a was 6.\n"); - } else { - printf("a was something else entirely.\n"); - } - -Note that 'else if' is considered a single statement; otherwise a long if/ -else if/else if/.../else sequence would need an indent for every else -statement. - -An exception is the opening brace for a function; for reasons of tradition -and clarity it comes on a line by itself: - - void a_function(void) - { - do_something(); - } - -Rationale: a consistent (except for functions...) bracing style reduces -ambiguity and avoids needless churn when lines are added or removed. -Furthermore, it is the QEMU coding style. - -5. Declarations - -Mixed declarations (interleaving statements and declarations within blocks) -are not allowed; declarations should be at the beginning of blocks. In other -words, the code should not generate warnings if using GCC's --Wdeclaration-after-statement option. - -6. Conditional statements - -When comparing a variable for (in)equality with a constant, list the -constant on the right, as in: - -if (a == 1) { - /* Reads like: "If a equals 1" */ - do_something(); -} - -Rationale: Yoda conditions (as in 'if (1 == a)') are awkward to read. -Besides, good compilers already warn users when '==' is mis-typed as '=', -even when the constant is on the right. diff --git a/qemu/CODING_STYLE.rst b/qemu/CODING_STYLE.rst new file mode 100644 index 00000000..427699e0 --- /dev/null +++ b/qemu/CODING_STYLE.rst @@ -0,0 +1,641 @@ +================= +QEMU Coding Style +================= + +.. contents:: Table of Contents + +Please use the script checkpatch.pl in the scripts directory to check +patches before submitting. + +Formatting and style +******************** + +Whitespace +========== + +Of course, the most important aspect in any coding style is whitespace. +Crusty old coders who have trouble spotting the glasses on their noses +can tell the difference between a tab and eight spaces from a distance +of approximately fifteen parsecs. Many a flamewar has been fought and +lost on this issue. + +QEMU indents are four spaces. Tabs are never used, except in Makefiles +where they have been irreversibly coded into the syntax. +Spaces of course are superior to tabs because: + +* You have just one way to specify whitespace, not two. Ambiguity breeds + mistakes. +* The confusion surrounding 'use tabs to indent, spaces to justify' is gone. +* Tab indents push your code to the right, making your screen seriously + unbalanced. +* Tabs will be rendered incorrectly on editors who are misconfigured not + to use tab stops of eight positions. +* Tabs are rendered badly in patches, causing off-by-one errors in almost + every line. +* It is the QEMU coding style. + +Do not leave whitespace dangling off the ends of lines. + +Multiline Indent +---------------- + +There are several places where indent is necessary: + +* if/else +* while/for +* function definition & call + +When breaking up a long line to fit within line width, we need a proper indent +for the following lines. + +In case of if/else, while/for, align the secondary lines just after the +opening parenthesis of the first. + +For example: + +.. code-block:: c + + if (a == 1 && + b == 2) { + + while (a == 1 && + b == 2) { + +In case of function, there are several variants: + +* 4 spaces indent from the beginning +* align the secondary lines just after the opening parenthesis of the first + +For example: + +.. code-block:: c + + do_something(x, y, + z); + + do_something(x, y, + z); + + do_something(x, do_another(y, + z)); + +Line width +========== + +Lines should be 80 characters; try not to make them longer. + +Sometimes it is hard to do, especially when dealing with QEMU subsystems +that use long function or symbol names. Even in that case, do not make +lines much longer than 80 characters. + +Rationale: + +* Some people like to tile their 24" screens with a 6x4 matrix of 80x24 + xterms and use vi in all of them. The best way to punish them is to + let them keep doing it. +* Code and especially patches is much more readable if limited to a sane + line length. Eighty is traditional. +* The four-space indentation makes the most common excuse ("But look + at all that white space on the left!") moot. +* It is the QEMU coding style. + +Naming +====== + +Variables are lower_case_with_underscores; easy to type and read. Structured +type names are in CamelCase; harder to type but standing out. Enum type +names and function type names should also be in CamelCase. Scalar type +names are lower_case_with_underscores_ending_with_a_t, like the POSIX +uint64_t and family. Note that this last convention contradicts POSIX +and is therefore likely to be changed. + +When wrapping standard library functions, use the prefix ``qemu_`` to alert +readers that they are seeing a wrapped version; otherwise avoid this prefix. + +Block structure +=============== + +Every indented statement is braced; even if the block contains just one +statement. The opening brace is on the line that contains the control +flow statement that introduces the new block; the closing brace is on the +same line as the else keyword, or on a line by itself if there is no else +keyword. Example: + +.. code-block:: c + + if (a == 5) { + printf("a was 5.\n"); + } else if (a == 6) { + printf("a was 6.\n"); + } else { + printf("a was something else entirely.\n"); + } + +Note that 'else if' is considered a single statement; otherwise a long if/ +else if/else if/.../else sequence would need an indent for every else +statement. + +An exception is the opening brace for a function; for reasons of tradition +and clarity it comes on a line by itself: + +.. code-block:: c + + void a_function(void) + { + do_something(); + } + +Rationale: a consistent (except for functions...) bracing style reduces +ambiguity and avoids needless churn when lines are added or removed. +Furthermore, it is the QEMU coding style. + +Declarations +============ + +Mixed declarations (interleaving statements and declarations within +blocks) are generally not allowed; declarations should be at the beginning +of blocks. + +Every now and then, an exception is made for declarations inside a +#ifdef or #ifndef block: if the code looks nicer, such declarations can +be placed at the top of the block even if there are statements above. +On the other hand, however, it's often best to move that #ifdef/#ifndef +block to a separate function altogether. + +Conditional statements +====================== + +When comparing a variable for (in)equality with a constant, list the +constant on the right, as in: + +.. code-block:: c + + if (a == 1) { + /* Reads like: "If a equals 1" */ + do_something(); + } + +Rationale: Yoda conditions (as in 'if (1 == a)') are awkward to read. +Besides, good compilers already warn users when '==' is mis-typed as '=', +even when the constant is on the right. + +Comment style +============= + +We use traditional C-style /``*`` ``*``/ comments and avoid // comments. + +Rationale: The // form is valid in C99, so this is purely a matter of +consistency of style. The checkpatch script will warn you about this. + +Multiline comment blocks should have a row of stars on the left, +and the initial /``*`` and terminating ``*``/ both on their own lines: + +.. code-block:: c + + /* + * like + * this + */ + +This is the same format required by the Linux kernel coding style. + +(Some of the existing comments in the codebase use the GNU Coding +Standards form which does not have stars on the left, or other +variations; avoid these when writing new comments, but don't worry +about converting to the preferred form unless you're editing that +comment anyway.) + +Rationale: Consistency, and ease of visually picking out a multiline +comment from the surrounding code. + +Language usage +************** + +Preprocessor +============ + +Variadic macros +--------------- + +For variadic macros, stick with this C99-like syntax: + +.. code-block:: c + + #define DPRINTF(fmt, ...) \ + do { printf("IRQ: " fmt, ## __VA_ARGS__); } while (0) + +Include directives +------------------ + +Order include directives as follows: + +.. code-block:: c + + #include "qemu/osdep.h" /* Always first... */ + #include <...> /* then system headers... */ + #include "..." /* and finally QEMU headers. */ + +The "qemu/osdep.h" header contains preprocessor macros that affect the behavior +of core system headers like . It must be the first include so that +core system headers included by external libraries get the preprocessor macros +that QEMU depends on. + +Do not include "qemu/osdep.h" from header files since the .c file will have +already included it. + +C types +======= + +It should be common sense to use the right type, but we have collected +a few useful guidelines here. + +Scalars +------- + +If you're using "int" or "long", odds are good that there's a better type. +If a variable is counting something, it should be declared with an +unsigned type. + +If it's host memory-size related, size_t should be a good choice (use +ssize_t only if required). Guest RAM memory offsets must use ram_addr_t, +but only for RAM, it may not cover whole guest address space. + +If it's file-size related, use off_t. +If it's file-offset related (i.e., signed), use off_t. +If it's just counting small numbers use "unsigned int"; +(on all but oddball embedded systems, you can assume that that +type is at least four bytes wide). + +In the event that you require a specific width, use a standard type +like int32_t, uint32_t, uint64_t, etc. The specific types are +mandatory for VMState fields. + +Don't use Linux kernel internal types like u32, __u32 or __le32. + +Use hwaddr for guest physical addresses except pcibus_t +for PCI addresses. In addition, ram_addr_t is a QEMU internal address +space that maps guest RAM physical addresses into an intermediate +address space that can map to host virtual address spaces. Generally +speaking, the size of guest memory can always fit into ram_addr_t but +it would not be correct to store an actual guest physical address in a +ram_addr_t. + +For CPU virtual addresses there are several possible types. +vaddr is the best type to use to hold a CPU virtual address in +target-independent code. It is guaranteed to be large enough to hold a +virtual address for any target, and it does not change size from target +to target. It is always unsigned. +target_ulong is a type the size of a virtual address on the CPU; this means +it may be 32 or 64 bits depending on which target is being built. It should +therefore be used only in target-specific code, and in some +performance-critical built-per-target core code such as the TLB code. +There is also a signed version, target_long. +abi_ulong is for the ``*``-user targets, and represents a type the size of +'void ``*``' in that target's ABI. (This may not be the same as the size of a +full CPU virtual address in the case of target ABIs which use 32 bit pointers +on 64 bit CPUs, like sparc32plus.) Definitions of structures that must match +the target's ABI must use this type for anything that on the target is defined +to be an 'unsigned long' or a pointer type. +There is also a signed version, abi_long. + +Of course, take all of the above with a grain of salt. If you're about +to use some system interface that requires a type like size_t, pid_t or +off_t, use matching types for any corresponding variables. + +Also, if you try to use e.g., "unsigned int" as a type, and that +conflicts with the signedness of a related variable, sometimes +it's best just to use the *wrong* type, if "pulling the thread" +and fixing all related variables would be too invasive. + +Finally, while using descriptive types is important, be careful not to +go overboard. If whatever you're doing causes warnings, or requires +casts, then reconsider or ask for help. + +Pointers +-------- + +Ensure that all of your pointers are "const-correct". +Unless a pointer is used to modify the pointed-to storage, +give it the "const" attribute. That way, the reader knows +up-front that this is a read-only pointer. Perhaps more +importantly, if we're diligent about this, when you see a non-const +pointer, you're guaranteed that it is used to modify the storage +it points to, or it is aliased to another pointer that is. + +Typedefs +-------- + +Typedefs are used to eliminate the redundant 'struct' keyword, since type +names have a different style than other identifiers ("CamelCase" versus +"snake_case"). Each named struct type should have a CamelCase name and a +corresponding typedef. + +Since certain C compilers choke on duplicated typedefs, you should avoid +them and declare a typedef only in one header file. For common types, +you can use "include/qemu/typedefs.h" for example. However, as a matter +of convenience it is also perfectly fine to use forward struct +definitions instead of typedefs in headers and function prototypes; this +avoids problems with duplicated typedefs and reduces the need to include +headers from other headers. + +Reserved namespaces in C and POSIX +---------------------------------- + +Underscore capital, double underscore, and underscore 't' suffixes should be +avoided. + +Low level memory management +=========================== + +Use of the malloc/free/realloc/calloc/valloc/memalign/posix_memalign +APIs is not allowed in the QEMU codebase. Instead of these routines, +use the GLib memory allocation routines g_malloc/g_malloc0/g_new/ +g_new0/g_realloc/g_free or QEMU's qemu_memalign/qemu_blockalign/qemu_vfree +APIs. + +Please note that g_malloc will exit on allocation failure, so there +is no need to test for failure (as you would have to with malloc). +Calling g_malloc with a zero size is valid and will return NULL. + +Prefer g_new(T, n) instead of g_malloc(sizeof(T) ``*`` n) for the following +reasons: + +* It catches multiplication overflowing size_t; +* It returns T ``*`` instead of void ``*``, letting compiler catch more type errors. + +Declarations like + +.. code-block:: c + + T *v = g_malloc(sizeof(*v)) + +are acceptable, though. + +Memory allocated by qemu_memalign or qemu_blockalign must be freed with +qemu_vfree, since breaking this will cause problems on Win32. + +String manipulation +=================== + +Do not use the strncpy function. As mentioned in the man page, it does *not* +guarantee a NULL-terminated buffer, which makes it extremely dangerous to use. +It also zeros trailing destination bytes out to the specified length. Instead, +use this similar function when possible, but note its different signature: + +.. code-block:: c + + void pstrcpy(char *dest, int dest_buf_size, const char *src) + +Don't use strcat because it can't check for buffer overflows, but: + +.. code-block:: c + + char *pstrcat(char *buf, int buf_size, const char *s) + +The same limitation exists with sprintf and vsprintf, so use snprintf and +vsnprintf. + +QEMU provides other useful string functions: + +.. code-block:: c + + int strstart(const char *str, const char *val, const char **ptr) + int stristart(const char *str, const char *val, const char **ptr) + int qemu_strnlen(const char *s, int max_len) + +There are also replacement character processing macros for isxyz and toxyz, +so instead of e.g. isalnum you should use qemu_isalnum. + +Because of the memory management rules, you must use g_strdup/g_strndup +instead of plain strdup/strndup. + +Printf-style functions +====================== + +Whenever you add a new printf-style function, i.e., one with a format +string argument and following "..." in its prototype, be sure to use +gcc's printf attribute directive in the prototype. + +This makes it so gcc's -Wformat and -Wformat-security options can do +their jobs and cross-check format strings with the number and types +of arguments. + +C standard, implementation defined and undefined behaviors +========================================================== + +C code in QEMU should be written to the C99 language specification. A copy +of the final version of the C99 standard with corrigenda TC1, TC2, and TC3 +included, formatted as a draft, can be downloaded from: + + ``_ + +The C language specification defines regions of undefined behavior and +implementation defined behavior (to give compiler authors enough leeway to +produce better code). In general, code in QEMU should follow the language +specification and avoid both undefined and implementation defined +constructs. ("It works fine on the gcc I tested it with" is not a valid +argument...) However there are a few areas where we allow ourselves to +assume certain behaviors because in practice all the platforms we care about +behave in the same way and writing strictly conformant code would be +painful. These are: + +* you may assume that integers are 2s complement representation +* you may assume that right shift of a signed integer duplicates + the sign bit (ie it is an arithmetic shift, not a logical shift) + +In addition, QEMU assumes that the compiler does not use the latitude +given in C99 and C11 to treat aspects of signed '<<' as undefined, as +documented in the GNU Compiler Collection manual starting at version 4.0. + +Automatic memory deallocation +============================= + +QEMU has a mandatory dependency either the GCC or CLang compiler. As +such it has the freedom to make use of a C language extension for +automatically running a cleanup function when a stack variable goes +out of scope. This can be used to simplify function cleanup paths, +often allowing many goto jumps to be eliminated, through automatic +free'ing of memory. + +The GLib2 library provides a number of functions/macros for enabling +automatic cleanup: + + ``_ + +Most notably: + +* g_autofree - will invoke g_free() on the variable going out of scope + +* g_autoptr - for structs / objects, will invoke the cleanup func created + by a previous use of G_DEFINE_AUTOPTR_CLEANUP_FUNC. This is + supported for most GLib data types and GObjects + +For example, instead of + +.. code-block:: c + + int somefunc(void) { + int ret = -1; + char *foo = g_strdup_printf("foo%", "wibble"); + GList *bar = ..... + + if (eek) { + goto cleanup; + } + + ret = 0; + + cleanup: + g_free(foo); + g_list_free(bar); + return ret; + } + +Using g_autofree/g_autoptr enables the code to be written as: + +.. code-block:: c + + int somefunc(void) { + g_autofree char *foo = g_strdup_printf("foo%", "wibble"); + g_autoptr (GList) bar = ..... + + if (eek) { + return -1; + } + + return 0; + } + +While this generally results in simpler, less leak-prone code, there +are still some caveats to beware of + +* Variables declared with g_auto* MUST always be initialized, + otherwise the cleanup function will use uninitialized stack memory + +* If a variable declared with g_auto* holds a value which must + live beyond the life of the function, that value must be saved + and the original variable NULL'd out. This can be simpler using + g_steal_pointer + + +.. code-block:: c + + char *somefunc(void) { + g_autofree char *foo = g_strdup_printf("foo%", "wibble"); + g_autoptr (GList) bar = ..... + + if (eek) { + return NULL; + } + + return g_steal_pointer(&foo); + } + + +QEMU Specific Idioms +******************** + +Error handling and reporting +============================ + +Reporting errors to the human user +---------------------------------- + +Do not use printf(), fprintf() or monitor_printf(). Instead, use +error_report() or error_vreport() from error-report.h. This ensures the +error is reported in the right place (current monitor or stderr), and in +a uniform format. + +Use error_printf() & friends to print additional information. + +error_report() prints the current location. In certain common cases +like command line parsing, the current location is tracked +automatically. To manipulate it manually, use the loc_``*``() from +error-report.h. + +Propagating errors +------------------ + +An error can't always be reported to the user right where it's detected, +but often needs to be propagated up the call chain to a place that can +handle it. This can be done in various ways. + +The most flexible one is Error objects. See error.h for usage +information. + +Use the simplest suitable method to communicate success / failure to +callers. Stick to common methods: non-negative on success / -1 on +error, non-negative / -errno, non-null / null, or Error objects. + +Example: when a function returns a non-null pointer on success, and it +can fail only in one way (as far as the caller is concerned), returning +null on failure is just fine, and certainly simpler and a lot easier on +the eyes than propagating an Error object through an Error ``*````*`` parameter. + +Example: when a function's callers need to report details on failure +only the function really knows, use Error ``*````*``, and set suitable errors. + +Do not report an error to the user when you're also returning an error +for somebody else to handle. Leave the reporting to the place that +consumes the error returned. + +Handling errors +--------------- + +Calling exit() is fine when handling configuration errors during +startup. It's problematic during normal operation. In particular, +monitor commands should never exit(). + +Do not call exit() or abort() to handle an error that can be triggered +by the guest (e.g., some unimplemented corner case in guest code +translation or device emulation). Guests should not be able to +terminate QEMU. + +Note that &error_fatal is just another way to exit(1), and &error_abort +is just another way to abort(). + + +trace-events style +================== + +0x prefix +--------- + +In trace-events files, use a '0x' prefix to specify hex numbers, as in: + +.. code-block:: + + some_trace(unsigned x, uint64_t y) "x 0x%x y 0x" PRIx64 + +An exception is made for groups of numbers that are hexadecimal by +convention and separated by the symbols '.', '/', ':', or ' ' (such as +PCI bus id): + +.. code-block:: + + another_trace(int cssid, int ssid, int dev_num) "bus id: %x.%x.%04x" + +However, you can use '0x' for such groups if you want. Anyway, be sure that +it is obvious that numbers are in hex, ex.: + +.. code-block:: + + data_dump(uint8_t c1, uint8_t c2, uint8_t c3) "bytes (in hex): %02x %02x %02x" + +Rationale: hex numbers are hard to read in logs when there is no 0x prefix, +especially when (occasionally) the representation doesn't contain any letters +and especially in one line with other decimal numbers. Number groups are allowed +to not use '0x' because for some things notations like %x.%x.%x are used not +only in Qemu. Also dumping raw data bytes with '0x' is less readable. + +'#' printf flag +--------------- + +Do not use printf flag '#', like '%#x'. + +Rationale: there are two ways to add a '0x' prefix to printed number: '0x%...' +and '%#...'. For consistency the only one way should be used. Arguments for +'0x%' are: + +* it is more popular +* '%#' omits the 0x for the value 0 which makes output inconsistent diff --git a/qemu/COPYING.LIB b/qemu/COPYING.LIB index 48afc2ef..4362b491 100644 --- a/qemu/COPYING.LIB +++ b/qemu/COPYING.LIB @@ -1,8 +1,8 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -10,7 +10,7 @@ as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] - Preamble + Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public @@ -112,7 +112,7 @@ modification follow. Pay close attention to the difference between a former contains code derived from the library, whereas the latter must be combined with the library in order to run. - GNU LESSER GENERAL PUBLIC LICENSE + GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other @@ -146,7 +146,7 @@ such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. - + 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an @@ -432,7 +432,7 @@ decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - NO WARRANTY + NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. @@ -455,7 +455,7 @@ FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - END OF TERMS AND CONDITIONS + END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries @@ -476,7 +476,7 @@ convey the exclusion of warranty; and each file should have at least the This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. + version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -485,7 +485,7 @@ convey the exclusion of warranty; and each file should have at least the You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. @@ -500,5 +500,3 @@ necessary. Here is a sample; alter the names: Ty Coon, President of Vice That's all there is to it! - - diff --git a/qemu/HACKING b/qemu/HACKING deleted file mode 100644 index 12fbc8af..00000000 --- a/qemu/HACKING +++ /dev/null @@ -1,159 +0,0 @@ -1. Preprocessor - -For variadic macros, stick with this C99-like syntax: - -#define DPRINTF(fmt, ...) \ - do { printf("IRQ: " fmt, ## __VA_ARGS__); } while (0) - -2. C types - -It should be common sense to use the right type, but we have collected -a few useful guidelines here. - -2.1. Scalars - -If you're using "int" or "long", odds are good that there's a better type. -If a variable is counting something, it should be declared with an -unsigned type. - -If it's host memory-size related, size_t should be a good choice (use -ssize_t only if required). Guest RAM memory offsets must use ram_addr_t, -but only for RAM, it may not cover whole guest address space. - -If it's file-size related, use off_t. -If it's file-offset related (i.e., signed), use off_t. -If it's just counting small numbers use "unsigned int"; -(on all but oddball embedded systems, you can assume that that -type is at least four bytes wide). - -In the event that you require a specific width, use a standard type -like int32_t, uint32_t, uint64_t, etc. The specific types are -mandatory for VMState fields. - -Don't use Linux kernel internal types like u32, __u32 or __le32. - -Use hwaddr for guest physical addresses except pcibus_t -for PCI addresses. In addition, ram_addr_t is a QEMU internal address -space that maps guest RAM physical addresses into an intermediate -address space that can map to host virtual address spaces. Generally -speaking, the size of guest memory can always fit into ram_addr_t but -it would not be correct to store an actual guest physical address in a -ram_addr_t. - -For CPU virtual addresses there are several possible types. -vaddr is the best type to use to hold a CPU virtual address in -target-independent code. It is guaranteed to be large enough to hold a -virtual address for any target, and it does not change size from target -to target. It is always unsigned. -target_ulong is a type the size of a virtual address on the CPU; this means -it may be 32 or 64 bits depending on which target is being built. It should -therefore be used only in target-specific code, and in some -performance-critical built-per-target core code such as the TLB code. -There is also a signed version, target_long. -abi_ulong is for the *-user targets, and represents a type the size of -'void *' in that target's ABI. (This may not be the same as the size of a -full CPU virtual address in the case of target ABIs which use 32 bit pointers -on 64 bit CPUs, like sparc32plus.) Definitions of structures that must match -the target's ABI must use this type for anything that on the target is defined -to be an 'unsigned long' or a pointer type. -There is also a signed version, abi_long. - -Of course, take all of the above with a grain of salt. If you're about -to use some system interface that requires a type like size_t, pid_t or -off_t, use matching types for any corresponding variables. - -Also, if you try to use e.g., "unsigned int" as a type, and that -conflicts with the signedness of a related variable, sometimes -it's best just to use the *wrong* type, if "pulling the thread" -and fixing all related variables would be too invasive. - -Finally, while using descriptive types is important, be careful not to -go overboard. If whatever you're doing causes warnings, or requires -casts, then reconsider or ask for help. - -2.2. Pointers - -Ensure that all of your pointers are "const-correct". -Unless a pointer is used to modify the pointed-to storage, -give it the "const" attribute. That way, the reader knows -up-front that this is a read-only pointer. Perhaps more -importantly, if we're diligent about this, when you see a non-const -pointer, you're guaranteed that it is used to modify the storage -it points to, or it is aliased to another pointer that is. - -2.3. Typedefs -Typedefs are used to eliminate the redundant 'struct' keyword. - -2.4. Reserved namespaces in C and POSIX -Underscore capital, double underscore, and underscore 't' suffixes should be -avoided. - -3. Low level memory management - -Use of the malloc/free/realloc/calloc/valloc/memalign/posix_memalign -APIs is not allowed in the QEMU codebase. Instead of these routines, -use the GLib memory allocation routines g_malloc/g_malloc0/g_new/ -g_new0/g_realloc/g_free or QEMU's qemu_memalign/qemu_blockalign/qemu_vfree -APIs. - -Please note that g_malloc will exit on allocation failure, so there -is no need to test for failure (as you would have to with malloc). -Calling g_malloc with a zero size is valid and will return NULL. - -Memory allocated by qemu_memalign or qemu_blockalign must be freed with -qemu_vfree, since breaking this will cause problems on Win32. - -4. String manipulation - -Do not use the strncpy function. As mentioned in the man page, it does *not* -guarantee a NULL-terminated buffer, which makes it extremely dangerous to use. -It also zeros trailing destination bytes out to the specified length. Instead, -use this similar function when possible, but note its different signature: -void pstrcpy(char *dest, int dest_buf_size, const char *src) - -Don't use strcat because it can't check for buffer overflows, but: -char *pstrcat(char *buf, int buf_size, const char *s) - -The same limitation exists with sprintf and vsprintf, so use snprintf and -vsnprintf. - -QEMU provides other useful string functions: -int strstart(const char *str, const char *val, const char **ptr) -int stristart(const char *str, const char *val, const char **ptr) -int qemu_strnlen(const char *s, int max_len) - -There are also replacement character processing macros for isxyz and toxyz, -so instead of e.g. isalnum you should use qemu_isalnum. - -Because of the memory management rules, you must use g_strdup/g_strndup -instead of plain strdup/strndup. - -5. Printf-style functions - -Whenever you add a new printf-style function, i.e., one with a format -string argument and following "..." in its prototype, be sure to use -gcc's printf attribute directive in the prototype. - -This makes it so gcc's -Wformat and -Wformat-security options can do -their jobs and cross-check format strings with the number and types -of arguments. - -6. C standard, implementation defined and undefined behaviors - -C code in QEMU should be written to the C99 language specification. A copy -of the final version of the C99 standard with corrigenda TC1, TC2, and TC3 -included, formatted as a draft, can be downloaded from: - http://www.open-std.org/jtc1/sc22/WG14/www/docs/n1256.pdf - -The C language specification defines regions of undefined behavior and -implementation defined behavior (to give compiler authors enough leeway to -produce better code). In general, code in QEMU should follow the language -specification and avoid both undefined and implementation defined -constructs. ("It works fine on the gcc I tested it with" is not a valid -argument...) However there are a few areas where we allow ourselves to -assume certain behaviors because in practice all the platforms we care about -behave in the same way and writing strictly conformant code would be -painful. These are: - * you may assume that integers are 2s complement representation - * you may assume that right shift of a signed integer duplicates - the sign bit (ie it is an arithmetic shift, not a logical shift) diff --git a/qemu/LICENSE b/qemu/LICENSE index da70e949..f19b0184 100644 --- a/qemu/LICENSE +++ b/qemu/LICENSE @@ -1,20 +1,26 @@ -The following points clarify the QEMU license: +The QEMU distribution includes both the QEMU emulator and +various firmware files. These are separate programs that are +distributed together for our users' convenience, and they have +separate licenses. -1) QEMU as a whole is released under the GNU General Public License, -version 2. +The following points clarify the license of the QEMU emulator: -2) Parts of QEMU have specific licenses which are compatible with the -GNU General Public License, version 2. Hence each source file contains -its own licensing information. Source files with no licensing information -are released under the GNU General Public License, version 2 or (at your -option) any later version. +1) The QEMU emulator as a whole is released under the GNU General +Public License, version 2. + +2) Parts of the QEMU emulator have specific licenses which are compatible +with the GNU General Public License, version 2. Hence each source file +contains its own licensing information. Source files with no licensing +information are released under the GNU General Public License, version +2 or (at your option) any later version. As of July 2013, contributions under version 2 of the GNU General Public License (and no later version) are only accepted for the following files -or directories: bsd-user/, linux-user/, hw/misc/vfio.c, hw/xen/xen_pt*. +or directories: bsd-user/, linux-user/, hw/vfio/, hw/xen/xen_pt*. -3) The Tiny Code Generator (TCG) is released under the BSD license - (see license headers in files). +3) The Tiny Code Generator (TCG) is mostly under the BSD or MIT licenses; + but some parts may be GPLv2 or other licenses. Again, see the + specific licensing information in each source file. 4) QEMU is a trademark of Fabrice Bellard. diff --git a/qemu/MAINTAINERS b/qemu/MAINTAINERS new file mode 100644 index 00000000..8cbc1fac --- /dev/null +++ b/qemu/MAINTAINERS @@ -0,0 +1,2916 @@ +QEMU Maintainers +================ + +The intention of this file is not to establish who owns what portions of the +code base, but to provide a set of names that developers can consult when they +have a question about a particular subset and also to provide a set of names +to be CC'd when submitting a patch to obtain appropriate review. + +In general, if you have a question about inclusion of a patch, you should +consult qemu-devel and not any specific individual privately. + +Descriptions of section entries: + + M: Mail patches to: FullName + Maintainers are looking after a certain area and must be CCed on + patches. They are considered the main contact point. + R: Designated reviewer: FullName + These reviewers should be CCed on patches. + Reviewers are familiar with the subject matter and provide feedback + even though they are not maintainers. + L: Mailing list that is relevant to this area + These lists should be CCed on patches. + W: Web-page with status/info + Q: Patchwork web based patch tracking system site + T: SCM tree type and location. Type is one of: git, hg, quilt, stgit. + S: Status, one of the following: + Supported: Someone is actually paid to look after this. + Maintained: Someone actually looks after it. + Odd Fixes: It has a maintainer but they don't have time to do + much other than throw the odd patch in. See below. + Orphan: No current maintainer [but maybe you could take the + role as you write your new code]. + Obsolete: Old code. Something tagged obsolete generally means + it has been replaced by a better system and you + should be using that. + F: Files and directories with wildcard patterns. + A trailing slash includes all files and subdirectory files. + F: drivers/net/ all files in and below drivers/net + F: drivers/net/* all files in drivers/net, but not below + F: */net/* all files in "any top level directory"/net + One pattern per line. Multiple F: lines acceptable. + X: Files and directories that are NOT maintained, same rules as F: + Files exclusions are tested before file matches. + Can be useful for excluding a specific subdirectory, for instance: + F: net/ + X: net/ipv6/ + matches all files in and below net excluding net/ipv6/ + K: Keyword perl extended regex pattern to match content in a + patch or file. For instance: + K: of_get_profile + matches patches or files that contain "of_get_profile" + K: \b(printk|pr_(info|err))\b + matches patches or files that contain one or more of the words + printk, pr_info or pr_err + One regex pattern per line. Multiple K: lines acceptable. + + +General Project Administration +------------------------------ +M: Peter Maydell + +All patches CC here +L: qemu-devel@nongnu.org +F: * +F: */ + +Responsible Disclosure, Reporting Security Issues +------------------------------------------------- +W: https://wiki.qemu.org/SecurityProcess +M: Michael S. Tsirkin +L: secalert@redhat.com + +Trivial patches +--------------- +Trivial patches +M: Michael Tokarev +M: Laurent Vivier +S: Maintained +L: qemu-trivial@nongnu.org +K: ^Subject:.*(?i)trivial +T: git git://git.corpit.ru/qemu.git trivial-patches +T: git https://github.com/vivier/qemu.git trivial-patches + +Architecture support +-------------------- +S390 general architecture support +M: Cornelia Huck +S: Supported +F: default-configs/s390x-softmmu.mak +F: gdb-xml/s390*.xml +F: hw/char/sclp*.[hc] +F: hw/char/terminal3270.c +F: hw/intc/s390_flic.c +F: hw/intc/s390_flic_kvm.c +F: hw/s390x/ +F: hw/vfio/ap.c +F: hw/vfio/ccw.c +F: hw/watchdog/wdt_diag288.c +F: include/hw/s390x/ +F: include/hw/watchdog/wdt_diag288.h +F: pc-bios/s390-ccw/ +F: pc-bios/s390-ccw.img +F: target/s390x/ +F: docs/system/target-s390x.rst +F: docs/system/s390x/ +F: tests/migration/s390x/ +K: ^Subject:.*(?i)s390x? +T: git https://github.com/cohuck/qemu.git s390-next +L: qemu-s390x@nongnu.org + +Guest CPU cores (TCG) +--------------------- +Overall TCG CPUs +M: Richard Henderson +R: Paolo Bonzini +S: Maintained +F: cpus.c +F: exec.c +F: accel/tcg/ +F: accel/stubs/tcg-stub.c +F: scripts/decodetree.py +F: docs/devel/decodetree.rst +F: include/exec/cpu*.h +F: include/exec/exec-all.h +F: include/exec/helper*.h +F: include/exec/tb-hash.h +F: include/sysemu/cpus.h +F: include/sysemu/tcg.h + +FPU emulation +M: Aurelien Jarno +M: Peter Maydell +M: Alex Bennée +S: Maintained +F: fpu/ +F: include/fpu/ +F: tests/fp/ + +Alpha TCG CPUs +M: Richard Henderson +S: Maintained +F: target/alpha/ +F: tests/tcg/alpha/ +F: disas/alpha.c + +ARM TCG CPUs +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: target/arm/ +F: tests/tcg/arm/ +F: tests/tcg/aarch64/ +F: hw/arm/ +F: hw/cpu/a*mpcore.c +F: include/hw/cpu/a*mpcore.h +F: disas/arm.c +F: disas/arm-a64.cc +F: disas/libvixl/ +F: docs/system/target-arm.rst + +ARM SMMU +M: Eric Auger +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/smmu* +F: include/hw/arm/smmu* + +CRIS TCG CPUs +M: Edgar E. Iglesias +S: Maintained +F: target/cris/ +F: hw/cris/ +F: include/hw/cris/ +F: tests/tcg/cris/ +F: disas/cris.c + +HPPA (PA-RISC) TCG CPUs +M: Richard Henderson +S: Maintained +F: target/hppa/ +F: hw/hppa/ +F: disas/hppa.c +F: hw/net/*i82596* +F: include/hw/net/lasi_82596.h + +LM32 TCG CPUs +M: Michael Walle +S: Maintained +F: target/lm32/ +F: disas/lm32.c +F: hw/lm32/ +F: hw/*/lm32_* +F: hw/*/milkymist-* +F: include/hw/display/milkymist_tmu2.h +F: include/hw/char/lm32_juart.h +F: include/hw/lm32/ +F: tests/tcg/lm32/ + +M68K TCG CPUs +M: Laurent Vivier +S: Maintained +F: target/m68k/ +F: disas/m68k.c + +MicroBlaze TCG CPUs +M: Edgar E. Iglesias +S: Maintained +F: target/microblaze/ +F: hw/microblaze/ +F: disas/microblaze.c + +MIPS TCG CPUs +M: Aleksandar Markovic +R: Aurelien Jarno +R: Aleksandar Rikalo +S: Maintained +F: target/mips/ +F: default-configs/*mips* +F: disas/*mips* +F: docs/system/cpu-models-mips.rst.inc +F: hw/intc/mips_gic.c +F: hw/mips/ +F: hw/misc/mips_* +F: hw/timer/mips_gictimer.c +F: include/hw/intc/mips_gic.h +F: include/hw/mips/ +F: include/hw/misc/mips_* +F: include/hw/timer/mips_gictimer.h +F: tests/acceptance/linux_ssh_mips_malta.py +F: tests/acceptance/machine_mips_malta.py +F: tests/tcg/mips/ +K: ^Subject:.*(?i)mips + +Moxie TCG CPUs +M: Anthony Green +S: Maintained +F: target/moxie/ +F: disas/moxie.c +F: hw/moxie/ +F: default-configs/moxie-softmmu.mak + +NiosII TCG CPUs +M: Chris Wulff +M: Marek Vasut +S: Maintained +F: target/nios2/ +F: hw/nios2/ +F: hw/intc/nios2_iic.c +F: disas/nios2.c +F: default-configs/nios2-softmmu.mak + +OpenRISC TCG CPUs +M: Stafford Horne +S: Odd Fixes +F: target/openrisc/ +F: hw/openrisc/ +F: tests/tcg/openrisc/ + +PowerPC TCG CPUs +M: David Gibson +L: qemu-ppc@nongnu.org +S: Maintained +F: target/ppc/ +F: hw/ppc/ +F: include/hw/ppc/ +F: disas/ppc.c + +RISC-V TCG CPUs +M: Palmer Dabbelt +M: Alistair Francis +M: Sagar Karandikar +M: Bastian Koppelmann +L: qemu-riscv@nongnu.org +S: Supported +F: target/riscv/ +F: hw/riscv/ +F: include/hw/riscv/ +F: linux-user/host/riscv32/ +F: linux-user/host/riscv64/ + +RENESAS RX CPUs +M: Yoshinori Sato +S: Maintained +F: target/rx/ + +S390 TCG CPUs +M: Richard Henderson +M: David Hildenbrand +S: Maintained +F: target/s390x/ +F: hw/s390x/ +F: disas/s390.c +F: tests/tcg/s390x/ +L: qemu-s390x@nongnu.org + +SH4 TCG CPUs +M: Aurelien Jarno +S: Odd Fixes +F: target/sh4/ +F: hw/sh4/ +F: disas/sh4.c +F: include/hw/sh4/ + +SPARC TCG CPUs +M: Mark Cave-Ayland +M: Artyom Tarasenko +S: Maintained +F: target/sparc/ +F: hw/sparc/ +F: hw/sparc64/ +F: include/hw/sparc/sparc64.h +F: disas/sparc.c + +UniCore32 TCG CPUs +M: Guan Xuetao +S: Maintained +F: target/unicore32/ +F: hw/unicore32/ +F: include/hw/unicore32/ + +X86 TCG CPUs +M: Paolo Bonzini +M: Richard Henderson +M: Eduardo Habkost +S: Maintained +F: target/i386/ +F: tests/tcg/i386/ +F: tests/tcg/x86_64/ +F: hw/i386/ +F: disas/i386.c +F: docs/system/cpu-models-x86.rst.inc +T: git https://github.com/ehabkost/qemu.git x86-next + +Xtensa TCG CPUs +M: Max Filippov +W: http://wiki.osll.ru/doku.php?id=etc:users:jcmvbkbc:qemu-target-xtensa +S: Maintained +F: target/xtensa/ +F: hw/xtensa/ +F: tests/tcg/xtensa/ +F: disas/xtensa.c +F: include/hw/xtensa/xtensa-isa.h +F: default-configs/xtensa*.mak + +TriCore TCG CPUs +M: Bastian Koppelmann +S: Maintained +F: target/tricore/ +F: hw/tricore/ +F: include/hw/tricore/ + +Multiarch Linux User Tests +M: Alex Bennée +S: Maintained +F: tests/tcg/multiarch/ + +Guest CPU Cores (KVM) +--------------------- +Overall KVM CPUs +M: Paolo Bonzini +L: kvm@vger.kernel.org +S: Supported +F: */kvm.* +F: accel/kvm/ +F: accel/stubs/kvm-stub.c +F: include/hw/kvm/ +F: include/sysemu/kvm*.h +F: scripts/kvm/kvm_flightrecorder + +ARM KVM CPUs +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: target/arm/kvm.c + +MIPS KVM CPUs +M: Aleksandar Markovic +S: Odd Fixes +F: target/mips/kvm.c + +PPC KVM CPUs +M: David Gibson +S: Maintained +F: target/ppc/kvm.c + +S390 KVM CPUs +M: Halil Pasic +M: Cornelia Huck +M: Christian Borntraeger +S: Supported +F: target/s390x/kvm.c +F: target/s390x/kvm_s390x.h +F: target/s390x/kvm-stub.c +F: target/s390x/ioinst.[ch] +F: target/s390x/machine.c +F: target/s390x/sigp.c +F: target/s390x/cpu_features*.[ch] +F: target/s390x/cpu_models.[ch] +F: hw/intc/s390_flic.c +F: hw/intc/s390_flic_kvm.c +F: include/hw/s390x/s390_flic.h +F: gdb-xml/s390*.xml +T: git https://github.com/cohuck/qemu.git s390-next +T: git https://github.com/borntraeger/qemu.git s390-next +L: qemu-s390x@nongnu.org + +X86 KVM CPUs +M: Paolo Bonzini +M: Marcelo Tosatti +L: kvm@vger.kernel.org +S: Supported +F: target/i386/kvm.c +F: scripts/kvm/vmxcap + +X86 HVF CPUs +M: Roman Bolshakov +S: Maintained +F: accel/stubs/hvf-stub.c +F: target/i386/hvf/ +F: include/sysemu/hvf.h + +WHPX CPUs +M: Sunil Muthuswamy +S: Supported +F: target/i386/whpx-all.c +F: target/i386/whp-dispatch.h +F: accel/stubs/whpx-stub.c +F: include/sysemu/whpx.h + +Guest CPU Cores (Xen) +--------------------- +X86 Xen CPUs +M: Stefano Stabellini +M: Anthony Perard +M: Paul Durrant +L: xen-devel@lists.xenproject.org +S: Supported +F: */xen* +F: hw/9pfs/xen-9p* +F: hw/char/xen_console.c +F: hw/display/xenfb.c +F: hw/net/xen_nic.c +F: hw/usb/xen-usb.c +F: hw/block/xen* +F: hw/block/dataplane/xen* +F: hw/xen/ +F: hw/xenpv/ +F: hw/i386/xen/ +F: hw/pci-host/xen_igd_pt.c +F: include/hw/block/dataplane/xen* +F: include/hw/xen/ +F: include/sysemu/xen-mapcache.h + +Guest CPU Cores (HAXM) +--------------------- +X86 HAXM CPUs +M: Wenchao Wang +M: Colin Xu +L: haxm-team@intel.com +W: https://github.com/intel/haxm/issues +S: Maintained +F: include/sysemu/hax.h +F: target/i386/hax-* + +Hosts +----- +LINUX +M: Michael S. Tsirkin +M: Cornelia Huck +M: Paolo Bonzini +S: Maintained +F: linux-headers/ +F: scripts/update-linux-headers.sh + +POSIX +M: Paolo Bonzini +S: Maintained +F: os-posix.c +F: include/sysemu/os-posix.h +F: util/*posix*.c +F: include/qemu/*posix*.h + +NETBSD +M: Kamil Rytarowski +S: Maintained +K: ^Subject:.*(?i)NetBSD + +OPENBSD +M: Brad Smith +S: Maintained +K: ^Subject:.*(?i)OpenBSD + +W32, W64 +M: Stefan Weil +S: Maintained +F: *win32* +F: */*win32* +F: include/*/*win32* +X: qga/*win32* +F: qemu.nsi + +Alpha Machines +-------------- +M: Richard Henderson +S: Maintained +F: hw/alpha/ +F: hw/isa/smc37c669-superio.c +F: tests/tcg/alpha/system/ + +ARM Machines +------------ +Allwinner-a10 +M: Beniamino Galvani +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/*/allwinner* +F: include/hw/*/allwinner* +F: hw/arm/cubieboard.c + +Allwinner-h3 +M: Niek Linnenbank +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/allwinner-h3* +F: include/hw/*/allwinner-h3* +F: hw/arm/orangepi.c +F: docs/system/orangepi.rst + +ARM PrimeCell and CMSDK devices +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/char/pl011.c +F: include/hw/char/pl011.h +F: hw/display/pl110* +F: hw/dma/pl080.c +F: include/hw/dma/pl080.h +F: hw/dma/pl330.c +F: hw/gpio/pl061.c +F: hw/input/pl050.c +F: hw/intc/pl190.c +F: hw/sd/pl181.c +F: hw/ssi/pl022.c +F: include/hw/ssi/pl022.h +F: hw/rtc/pl031.c +F: include/hw/rtc/pl031.h +F: include/hw/arm/primecell.h +F: hw/timer/cmsdk-apb-timer.c +F: include/hw/timer/cmsdk-apb-timer.h +F: hw/timer/cmsdk-apb-dualtimer.c +F: include/hw/timer/cmsdk-apb-dualtimer.h +F: hw/char/cmsdk-apb-uart.c +F: include/hw/char/cmsdk-apb-uart.h +F: hw/watchdog/cmsdk-apb-watchdog.c +F: include/hw/watchdog/cmsdk-apb-watchdog.h +F: hw/misc/tz-ppc.c +F: include/hw/misc/tz-ppc.h +F: hw/misc/tz-mpc.c +F: include/hw/misc/tz-mpc.h +F: hw/misc/tz-msc.c +F: include/hw/misc/tz-msc.h + +ARM cores +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/intc/arm* +F: hw/intc/gic_internal.h +F: hw/misc/a9scu.c +F: hw/misc/arm11scu.c +F: hw/misc/arm_l2x0.c +F: hw/timer/a9gtimer* +F: hw/timer/arm* +F: include/hw/arm/arm*.h +F: include/hw/intc/arm* +F: include/hw/misc/a9scu.h +F: include/hw/misc/arm11scu.h +F: include/hw/timer/a9gtimer.h +F: include/hw/timer/arm_mptimer.h +F: include/hw/timer/armv7m_systick.h +F: tests/qtest/test-arm-mptimer.c + +Exynos +M: Igor Mitsyanko +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/*/exynos* +F: include/hw/arm/exynos4210.h + +Calxeda Highbank +M: Rob Herring +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/highbank.c +F: hw/net/xgmac.c + +Canon DIGIC +M: Antony Pavlov +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: include/hw/arm/digic.h +F: hw/*/digic* +F: include/hw/*/digic* + +Goldfish RTC +M: Anup Patel +M: Alistair Francis +L: qemu-riscv@nongnu.org +S: Maintained +F: hw/rtc/goldfish_rtc.c +F: include/hw/rtc/goldfish_rtc.h + +Gumstix +M: Peter Maydell +R: Philippe Mathieu-Daudé +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/gumstix.c + +i.MX25 PDK +M: Peter Maydell +R: Jean-Christophe Dubois +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/fsl-imx25.c +F: hw/arm/imx25_pdk.c +F: hw/misc/imx25_ccm.c +F: include/hw/arm/fsl-imx25.h +F: include/hw/misc/imx25_ccm.h + +i.MX31 (kzm) +M: Peter Chubb +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/kzm.c +F: hw/*/imx_* +F: hw/*/*imx31* +F: include/hw/*/imx_* +F: include/hw/*/*imx31* + +Integrator CP +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/integratorcp.c +F: hw/misc/arm_integrator_debug.c +F: include/hw/misc/arm_integrator_debug.h +F: tests/acceptance/machine_arm_integratorcp.py +F: docs/system/arm/integratorcp.rst + +MCIMX6UL EVK / i.MX6ul +M: Peter Maydell +R: Jean-Christophe Dubois +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/mcimx6ul-evk.c +F: hw/arm/fsl-imx6ul.c +F: hw/misc/imx6ul_ccm.c +F: include/hw/arm/fsl-imx6ul.h +F: include/hw/misc/imx6ul_ccm.h + +MCIMX7D SABRE / i.MX7 +M: Peter Maydell +R: Andrey Smirnov +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/mcimx7d-sabre.c +F: hw/arm/fsl-imx7.c +F: hw/misc/imx7_*.c +F: include/hw/arm/fsl-imx7.h +F: include/hw/misc/imx7_*.h +F: hw/pci-host/designware.c +F: include/hw/pci-host/designware.h + +MPS2 +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/mps2.c +F: hw/arm/mps2-tz.c +F: hw/misc/mps2-*.c +F: include/hw/misc/mps2-*.h +F: hw/arm/armsse.c +F: include/hw/arm/armsse.h +F: hw/misc/iotkit-secctl.c +F: include/hw/misc/iotkit-secctl.h +F: hw/misc/iotkit-sysctl.c +F: include/hw/misc/iotkit-sysctl.h +F: hw/misc/iotkit-sysinfo.c +F: include/hw/misc/iotkit-sysinfo.h +F: hw/misc/armsse-cpuid.c +F: include/hw/misc/armsse-cpuid.h +F: hw/misc/armsse-mhu.c +F: include/hw/misc/armsse-mhu.h + +Musca +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/musca.c + +Musicpal +M: Jan Kiszka +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/musicpal.c +F: docs/system/arm/musicpal.rst + +nSeries +M: Andrzej Zaborowski +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/nseries.c +F: hw/display/blizzard.c +F: hw/input/lm832x.c +F: hw/input/tsc2005.c +F: hw/misc/cbus.c +F: hw/rtc/twl92230.c +F: include/hw/display/blizzard.h +F: include/hw/input/tsc2xxx.h +F: include/hw/misc/cbus.h +F: tests/acceptance/machine_arm_n8x0.py +F: docs/system/arm/nseries.rst + +Palm +M: Andrzej Zaborowski +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/palm.c +F: hw/input/tsc210x.c +F: include/hw/input/tsc2xxx.h +F: docs/system/arm/palm.rst + +Raspberry Pi +M: Peter Maydell +R: Andrew Baumann +R: Philippe Mathieu-Daudé +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/raspi.c +F: hw/arm/raspi_platform.h +F: hw/*/bcm283* +F: include/hw/arm/raspi* +F: include/hw/*/bcm283* + +Real View +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/realview* +F: hw/cpu/realview_mpcore.c +F: hw/intc/realview_gic.c +F: include/hw/intc/realview_gic.h +F: docs/system/arm/realview.rst + +PXA2XX +M: Andrzej Zaborowski +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/mainstone.c +F: hw/arm/spitz.c +F: hw/arm/tosa.c +F: hw/arm/z2.c +F: hw/*/pxa2xx* +F: hw/display/tc6393xb.c +F: hw/gpio/max7310.c +F: hw/gpio/zaurus.c +F: hw/misc/mst_fpga.c +F: hw/misc/max111x.c +F: include/hw/arm/pxa.h +F: include/hw/arm/sharpsl.h +F: include/hw/display/tc6393xb.h +F: docs/system/arm/xscale.rst + +SABRELITE / i.MX6 +M: Peter Maydell +R: Jean-Christophe Dubois +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/sabrelite.c +F: hw/arm/fsl-imx6.c +F: hw/misc/imx6_*.c +F: hw/ssi/imx_spi.c +F: hw/usb/imx-usb-phy.c +F: include/hw/usb/imx-usb-phy.h +F: include/hw/arm/fsl-imx6.h +F: include/hw/misc/imx6_*.h +F: include/hw/ssi/imx_spi.h + +SBSA-REF +M: Radoslaw Biernacki +M: Peter Maydell +R: Leif Lindholm +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/sbsa-ref.c + +Sharp SL-5500 (Collie) PDA +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: hw/arm/collie.c +F: hw/arm/strongarm* + +Stellaris +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/stellaris* +F: include/hw/input/gamepad.h +F: docs/system/arm/stellaris.rst + +Versatile Express +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/vexpress.c + +Versatile PB +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/versatile* +F: hw/misc/arm_sysctl.c +F: docs/system/arm/versatile.rst + +Virt +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/virt* +F: include/hw/arm/virt.h + +Xilinx Zynq +M: Edgar E. Iglesias +M: Alistair Francis +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/xilinx_* +F: hw/*/cadence_* +F: hw/misc/zynq* +F: include/hw/misc/zynq* +X: hw/ssi/xilinx_* + +Xilinx ZynqMP +M: Alistair Francis +M: Edgar E. Iglesias +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/xlnx*.c +F: include/hw/*/xlnx*.h +F: include/hw/ssi/xilinx_spips.h +F: hw/display/dpcd.c +F: include/hw/display/dpcd.h + +ARM ACPI Subsystem +M: Shannon Zhao +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/virt-acpi-build.c + +STM32F205 +M: Alistair Francis +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/stm32f205_soc.c +F: hw/misc/stm32f2xx_syscfg.c +F: hw/char/stm32f2xx_usart.c +F: hw/timer/stm32f2xx_timer.c +F: hw/adc/* +F: hw/ssi/stm32f2xx_spi.c +F: include/hw/*/stm32*.h + +STM32F405 +M: Alistair Francis +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/stm32f405_soc.c +F: hw/misc/stm32f4xx_syscfg.c +F: hw/misc/stm32f4xx_exti.c + +Netduino 2 +M: Alistair Francis +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/netduino2.c + +Netduino Plus 2 +M: Alistair Francis +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/netduinoplus2.c + +SmartFusion2 +M: Subbaraya Sundeep +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/msf2-soc.c +F: hw/misc/msf2-sysreg.c +F: hw/timer/mss-timer.c +F: hw/ssi/mss-spi.c +F: include/hw/arm/msf2-soc.h +F: include/hw/misc/msf2-sysreg.h +F: include/hw/timer/mss-timer.h +F: include/hw/ssi/mss-spi.h + +Emcraft M2S-FG484 +M: Subbaraya Sundeep +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/msf2-som.c + +ASPEED BMCs +M: Cédric Le Goater +M: Peter Maydell +R: Andrew Jeffery +R: Joel Stanley +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/*aspeed* +F: hw/misc/pca9552.c +F: include/hw/*/*aspeed* +F: include/hw/misc/pca9552*.h +F: hw/net/ftgmac100.c +F: include/hw/net/ftgmac100.h + +NRF51 +M: Joel Stanley +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/nrf51*.c +F: hw/*/microbit*.c +F: include/hw/*/nrf51*.h +F: include/hw/*/microbit*.h +F: tests/qtest/microbit-test.c + +CRIS Machines +------------- +Axis Dev88 +M: Edgar E. Iglesias +S: Maintained +F: hw/cris/axis_dev88.c +F: hw/*/etraxfs_*.c + +HP-PARISC Machines +------------------ +HP B160L +M: Richard Henderson +R: Helge Deller +S: Odd Fixes +F: default-configs/hppa-softmmu.mak +F: hw/hppa/ +F: pc-bios/hppa-firmware.img + +LM32 Machines +------------- +EVR32 and uclinux BSP +M: Michael Walle +S: Maintained +F: hw/lm32/lm32_boards.c + +milkymist +M: Michael Walle +S: Maintained +F: hw/lm32/milkymist.c + +M68K Machines +------------- +an5206 +M: Thomas Huth +S: Odd Fixes +F: hw/m68k/an5206.c +F: hw/m68k/mcf5206.c + +mcf5208 +M: Thomas Huth +S: Odd Fixes +F: hw/m68k/mcf5208.c +F: hw/m68k/mcf_intc.c +F: hw/char/mcf_uart.c +F: hw/net/mcf_fec.c +F: include/hw/m68k/mcf*.h + +NeXTcube +M: Thomas Huth +S: Odd Fixes +F: hw/m68k/next-*.c +F: hw/display/next-fb.c +F: include/hw/m68k/next-cube.h + +q800 +M: Laurent Vivier +S: Maintained +F: hw/m68k/q800.c +F: hw/misc/mac_via.c +F: hw/nubus/* +F: hw/display/macfb.c +F: hw/block/swim.c +F: hw/m68k/bootinfo.h +F: include/hw/misc/mac_via.h +F: include/hw/nubus/* +F: include/hw/display/macfb.h +F: include/hw/block/swim.h + +MicroBlaze Machines +------------------- +petalogix_s3adsp1800 +M: Edgar E. Iglesias +S: Maintained +F: hw/microblaze/petalogix_s3adsp1800_mmu.c +F: include/hw/char/xilinx_uartlite.h + +petalogix_ml605 +M: Edgar E. Iglesias +S: Maintained +F: hw/microblaze/petalogix_ml605_mmu.c + +MIPS Machines +------------- +Jazz +M: Hervé Poussineau +R: Aleksandar Rikalo +S: Maintained +F: hw/mips/mips_jazz.c +F: hw/display/jazz_led.c +F: hw/dma/rc4030.c + +Malta +M: Aleksandar Markovic +M: Philippe Mathieu-Daudé +R: Aurelien Jarno +S: Maintained +F: hw/isa/piix4.c +F: hw/acpi/piix4.c +F: hw/mips/mips_malta.c +F: hw/mips/gt64xxx_pci.c +F: include/hw/southbridge/piix.h +F: tests/acceptance/linux_ssh_mips_malta.py +F: tests/acceptance/machine_mips_malta.py + +Mipssim +M: Aleksandar Markovic +R: Aleksandar Rikalo +S: Odd Fixes +F: hw/mips/mips_mipssim.c +F: hw/net/mipsnet.c + +R4000 +M: Aleksandar Markovic +R: Aurelien Jarno +R: Aleksandar Rikalo +S: Obsolete +F: hw/mips/mips_r4k.c + +Fulong 2E +M: Philippe Mathieu-Daudé +M: Aleksandar Markovic +S: Odd Fixes +F: hw/mips/mips_fulong2e.c +F: hw/isa/vt82c686.c +F: hw/pci-host/bonito.c +F: include/hw/isa/vt82c686.h + +Boston +M: Paul Burton +R: Aleksandar Rikalo +S: Maintained +F: hw/core/loader-fit.c +F: hw/mips/boston.c +F: hw/pci-host/xilinx-pcie.c +F: include/hw/pci-host/xilinx-pcie.h + +OpenRISC Machines +----------------- +or1k-sim +M: Jia Liu +S: Maintained +F: hw/openrisc/openrisc_sim.c + +PowerPC Machines +---------------- +405 +M: David Gibson +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/ppc405_boards.c + +Bamboo +M: David Gibson +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/ppc440_bamboo.c + +e500 +M: David Gibson +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/e500* +F: hw/gpio/mpc8xxx.c +F: hw/i2c/mpc_i2c.c +F: hw/net/fsl_etsec/ +F: hw/pci-host/ppce500.c +F: include/hw/ppc/ppc_e500.h +F: include/hw/pci-host/ppce500.h +F: pc-bios/u-boot.e500 + +mpc8544ds +M: David Gibson +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/mpc8544ds.c +F: hw/ppc/mpc8544_guts.c + +New World (mac99) +M: Mark Cave-Ayland +R: David Gibson +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/mac_newworld.c +F: hw/pci-host/uninorth.c +F: hw/pci-bridge/dec.[hc] +F: hw/misc/macio/ +F: hw/misc/mos6522.c +F: hw/nvram/mac_nvram.c +F: hw/input/adb* +F: include/hw/misc/macio/ +F: include/hw/misc/mos6522.h +F: include/hw/ppc/mac_dbdma.h +F: include/hw/pci-host/uninorth.h +F: include/hw/input/adb* +F: pc-bios/qemu_vga.ndrv + +Old World (g3beige) +M: Mark Cave-Ayland +R: David Gibson +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/mac_oldworld.c +F: hw/pci-host/grackle.c +F: hw/misc/macio/ +F: hw/intc/heathrow_pic.c +F: hw/input/adb* +F: include/hw/intc/heathrow_pic.h +F: include/hw/input/adb* +F: pc-bios/qemu_vga.ndrv + +PReP +M: Hervé Poussineau +L: qemu-ppc@nongnu.org +S: Maintained +F: hw/ppc/prep.c +F: hw/ppc/prep_systemio.c +F: hw/ppc/rs6000_mc.c +F: hw/pci-host/prep.[hc] +F: hw/isa/i82378.c +F: hw/isa/pc87312.c +F: hw/dma/i82374.c +F: hw/rtc/m48t59-isa.c +F: include/hw/isa/pc87312.h +F: include/hw/rtc/m48t59.h +F: tests/acceptance/ppc_prep_40p.py + +sPAPR +M: David Gibson +L: qemu-ppc@nongnu.org +S: Supported +F: hw/*/spapr* +F: include/hw/*/spapr* +F: hw/*/xics* +F: include/hw/*/xics* +F: pc-bios/slof.bin +F: docs/specs/ppc-spapr-hcalls.txt +F: docs/specs/ppc-spapr-hotplug.txt +F: tests/qtest/spapr* +F: tests/qtest/libqos/*spapr* +F: tests/qtest/rtas* +F: tests/qtest/libqos/rtas* + +PowerNV (Non-Virtualized) +M: Cédric Le Goater +M: David Gibson +L: qemu-ppc@nongnu.org +S: Maintained +F: hw/ppc/pnv* +F: hw/intc/pnv* +F: hw/intc/xics_pnv.c +F: include/hw/ppc/pnv* +F: pc-bios/skiboot.lid +F: tests/qtest/pnv* + +virtex_ml507 +M: Edgar E. Iglesias +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/virtex_ml507.c + +sam460ex +M: BALATON Zoltan +L: qemu-ppc@nongnu.org +S: Maintained +F: hw/ppc/sam460ex.c +F: hw/ppc/ppc440_pcix.c +F: hw/display/sm501* +F: hw/ide/sii3112.c +F: hw/rtc/m41t80.c +F: pc-bios/canyonlands.dt[sb] +F: pc-bios/u-boot-sam460ex-20100605.bin +F: roms/u-boot-sam460ex + +SH4 Machines +------------ +R2D +M: Magnus Damm +S: Maintained +F: hw/sh4/r2d.c +F: hw/intc/sh_intc.c +F: hw/timer/sh_timer.c + +Shix +M: Magnus Damm +S: Odd Fixes +F: hw/sh4/shix.c + +SPARC Machines +-------------- +Sun4m +M: Mark Cave-Ayland +S: Maintained +F: hw/sparc/sun4m.c +F: hw/sparc/sun4m_iommu.c +F: hw/display/cg3.c +F: hw/display/tcx.c +F: hw/dma/sparc32_dma.c +F: hw/misc/eccmemctl.c +F: hw/*/slavio_*.c +F: include/hw/nvram/sun_nvram.h +F: include/hw/sparc/sparc32_dma.h +F: include/hw/sparc/sun4m_iommu.h +F: pc-bios/openbios-sparc32 + +Sun4u +M: Mark Cave-Ayland +S: Maintained +F: hw/sparc64/sun4u.c +F: hw/sparc64/sun4u_iommu.c +F: include/hw/sparc/sun4u_iommu.h +F: hw/pci-host/sabre.c +F: include/hw/pci-host/sabre.h +F: hw/pci-bridge/simba.c +F: include/hw/pci-bridge/simba.h +F: pc-bios/openbios-sparc64 + +Sun4v +M: Artyom Tarasenko +S: Maintained +F: hw/sparc64/niagara.c +F: hw/rtc/sun4v-rtc.c +F: include/hw/rtc/sun4v-rtc.h + +Leon3 +M: Fabien Chouteau +M: KONRAD Frederic +S: Maintained +F: hw/sparc/leon3.c +F: hw/*/grlib* +F: include/hw/*/grlib* +F: tests/acceptance/machine_sparc_leon3.py + +S390 Machines +------------- +S390 Virtio-ccw +M: Cornelia Huck +M: Halil Pasic +M: Christian Borntraeger +S: Supported +F: hw/char/sclp*.[hc] +F: hw/char/terminal3270.c +F: hw/s390x/ +F: include/hw/s390x/ +F: hw/watchdog/wdt_diag288.c +F: include/hw/watchdog/wdt_diag288.h +F: default-configs/s390x-softmmu.mak +T: git https://github.com/cohuck/qemu.git s390-next +T: git https://github.com/borntraeger/qemu.git s390-next +L: qemu-s390x@nongnu.org + +S390-ccw boot +M: Christian Borntraeger +M: Thomas Huth +S: Supported +F: hw/s390x/ipl.* +F: pc-bios/s390-ccw/ +F: pc-bios/s390-ccw.img +F: docs/devel/s390-dasd-ipl.rst +T: git https://github.com/borntraeger/qemu.git s390-next +L: qemu-s390x@nongnu.org + +S390 PCI +M: Matthew Rosato +S: Supported +F: hw/s390x/s390-pci* +L: qemu-s390x@nongnu.org + +UniCore32 Machines +------------------ +PKUnity-3 SoC initramfs-with-busybox +M: Guan Xuetao +S: Maintained +F: hw/*/puv3* +F: hw/unicore32/ + +X86 Machines +------------ +PC +M: Michael S. Tsirkin +M: Marcel Apfelbaum +S: Supported +F: include/hw/i386/ +F: hw/i386/ +F: hw/pci-host/i440fx.c +F: hw/pci-host/q35.c +F: hw/pci-host/pam.c +F: include/hw/pci-host/i440fx.h +F: include/hw/pci-host/q35.h +F: include/hw/pci-host/pam.h +F: hw/isa/piix3.c +F: hw/isa/lpc_ich9.c +F: hw/i2c/smbus_ich9.c +F: hw/acpi/piix4.c +F: hw/acpi/ich9.c +F: include/hw/acpi/ich9.h +F: include/hw/southbridge/piix.h +F: hw/misc/sga.c +F: hw/isa/apm.c +F: include/hw/isa/apm.h +F: tests/test-x86-cpuid.c +F: tests/qtest/test-x86-cpuid-compat.c + +PC Chipset +M: Michael S. Tsirkin +M: Paolo Bonzini +S: Supported +F: hw/char/debugcon.c +F: hw/char/parallel* +F: hw/char/serial* +F: hw/dma/i8257* +F: hw/i2c/pm_smbus.c +F: hw/input/pckbd.c +F: hw/intc/apic* +F: hw/intc/ioapic* +F: hw/intc/i8259* +F: hw/isa/isa-superio.c +F: hw/misc/debugexit.c +F: hw/misc/pc-testdev.c +F: hw/timer/hpet* +F: hw/timer/i8254* +F: hw/rtc/mc146818rtc* +F: hw/watchdog/wdt_ib700.c +F: hw/watchdog/wdt_i6300esb.c +F: include/hw/display/vga.h +F: include/hw/char/parallel.h +F: include/hw/dma/i8257.h +F: include/hw/i2c/pm_smbus.h +F: include/hw/input/i8042.h +F: include/hw/isa/i8259_internal.h +F: include/hw/isa/superio.h +F: include/hw/timer/hpet.h +F: include/hw/timer/i8254* +F: include/hw/rtc/mc146818rtc* + +microvm +M: Sergio Lopez +M: Paolo Bonzini +S: Maintained +F: docs/microvm.rst +F: hw/i386/microvm.c +F: include/hw/i386/microvm.h +F: pc-bios/bios-microvm.bin + +Machine core +M: Eduardo Habkost +M: Marcel Apfelbaum +S: Supported +F: hw/core/cpu.c +F: hw/core/machine-qmp-cmds.c +F: hw/core/machine.c +F: hw/core/null-machine.c +F: hw/core/numa.c +F: hw/cpu/cluster.c +F: qapi/machine.json +F: qapi/machine-target.json +F: include/hw/boards.h +F: include/hw/core/cpu.h +F: include/hw/cpu/cluster.h +F: include/sysemu/numa.h +T: git https://github.com/ehabkost/qemu.git machine-next + +Xtensa Machines +--------------- +sim +M: Max Filippov +S: Maintained +F: hw/xtensa/sim.c + +virt +M: Max Filippov +S: Maintained +F: hw/xtensa/virt.c + +XTFPGA (LX60, LX200, ML605, KC705) +M: Max Filippov +S: Maintained +F: hw/xtensa/xtfpga.c +F: hw/net/opencores_eth.c + +Devices +------- +EDU +M: Jiri Slaby +S: Maintained +F: hw/misc/edu.c + +IDE +M: John Snow +L: qemu-block@nongnu.org +S: Supported +F: include/hw/ide.h +F: include/hw/ide/ +F: hw/ide/ +F: hw/block/block.c +F: hw/block/cdrom.c +F: hw/block/hd-geometry.c +F: tests/qtest/ide-test.c +F: tests/qtest/ahci-test.c +F: tests/qtest/cdrom-test.c +F: tests/qtest/libqos/ahci* +T: git https://github.com/jnsnow/qemu.git ide + +IPMI +M: Corey Minyard +S: Maintained +F: include/hw/ipmi/* +F: hw/ipmi/* +F: hw/smbios/smbios_type_38.c +F: tests/qtest/ipmi* +T: git https://github.com/cminyard/qemu.git master-ipmi-rebase + +Floppy +M: John Snow +L: qemu-block@nongnu.org +S: Supported +F: hw/block/fdc.c +F: include/hw/block/fdc.h +F: tests/qtest/fdc-test.c +T: git https://github.com/jnsnow/qemu.git ide + +OMAP +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Maintained +F: hw/*/omap* +F: include/hw/arm/omap.h + +IPack +M: Alberto Garcia +S: Odd Fixes +F: hw/char/ipoctal232.c +F: hw/ipack/ + +PCI +M: Michael S. Tsirkin +M: Marcel Apfelbaum +S: Supported +F: include/hw/pci/* +F: hw/misc/pci-testdev.c +F: hw/pci/* +F: hw/pci-bridge/* +F: docs/pci* +F: docs/specs/*pci* +F: default-configs/pci.mak + +ACPI/SMBIOS +M: Michael S. Tsirkin +M: Igor Mammedov +S: Supported +F: include/hw/acpi/* +F: include/hw/firmware/smbios.h +F: hw/mem/* +F: hw/acpi/* +F: hw/smbios/* +F: hw/i386/acpi-build.[hc] +F: hw/arm/virt-acpi-build.c +F: tests/qtest/bios-tables-test.c +F: tests/qtest/acpi-utils.[hc] +F: tests/data/acpi/ + +ppc4xx +M: David Gibson +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/ppc4*.c +F: hw/i2c/ppc4xx_i2c.c +F: include/hw/ppc/ppc4xx.h +F: include/hw/i2c/ppc4xx_i2c.h + +Character devices +M: Marc-André Lureau +R: Paolo Bonzini +S: Odd Fixes +F: hw/char/ + +Network devices +M: Jason Wang +S: Odd Fixes +F: hw/net/ +F: include/hw/net/ +F: tests/qtest/virtio-net-test.c +F: docs/virtio-net-failover.rst +T: git https://github.com/jasowang/qemu.git net + +Parallel NOR Flash devices +M: Philippe Mathieu-Daudé +T: git https://gitlab.com/philmd/qemu.git pflash-next +S: Maintained +F: hw/block/pflash_cfi*.c +F: include/hw/block/flash.h + +SCSI +M: Paolo Bonzini +R: Fam Zheng +S: Supported +F: include/hw/scsi/* +F: hw/scsi/* +F: tests/qtest/virtio-scsi-test.c +T: git https://github.com/bonzini/qemu.git scsi-next + +SSI +M: Alistair Francis +S: Maintained +F: hw/ssi/* +F: hw/block/m25p80.c +F: include/hw/ssi/ssi.h +X: hw/ssi/xilinx_* +F: tests/qtest/m25p80-test.c + +Xilinx SPI +M: Alistair Francis +S: Maintained +F: hw/ssi/xilinx_* + +SD (Secure Card) +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: include/hw/sd/sd* +F: hw/sd/core.c +F: hw/sd/sd* +F: hw/sd/ssi-sd.c +F: tests/qtest/sd* + +USB +M: Gerd Hoffmann +S: Maintained +F: hw/usb/* +F: tests/qtest/usb-*-test.c +F: docs/usb2.txt +F: docs/usb-storage.txt +F: include/hw/usb.h +F: include/hw/usb/ +F: default-configs/usb.mak + +USB (serial adapter) +M: Gerd Hoffmann +M: Samuel Thibault +S: Maintained +F: hw/usb/dev-serial.c + +VFIO +M: Alex Williamson +S: Supported +F: hw/vfio/* +F: include/hw/vfio/ + +vfio-ccw +M: Cornelia Huck +M: Eric Farman +S: Supported +F: hw/vfio/ccw.c +F: hw/s390x/s390-ccw.c +F: include/hw/s390x/s390-ccw.h +F: include/hw/s390x/vfio-ccw.h +T: git https://github.com/cohuck/qemu.git s390-next +L: qemu-s390x@nongnu.org + +vfio-ap +M: Christian Borntraeger +M: Tony Krowiak +M: Halil Pasic +M: Pierre Morel +S: Supported +F: hw/s390x/ap-device.c +F: hw/s390x/ap-bridge.c +F: include/hw/s390x/ap-device.h +F: include/hw/s390x/ap-bridge.h +F: hw/vfio/ap.c +F: docs/system/s390x/vfio-ap.rst +L: qemu-s390x@nongnu.org + +vhost +M: Michael S. Tsirkin +S: Supported +F: hw/*/*vhost* +F: docs/interop/vhost-user.json +F: docs/interop/vhost-user.rst +F: contrib/vhost-user-*/ +F: backends/vhost-user.c +F: include/sysemu/vhost-user-backend.h + +virtio +M: Michael S. Tsirkin +S: Supported +F: hw/*/virtio* +F: hw/virtio/Makefile.objs +F: hw/virtio/trace-events +F: net/vhost-user.c +F: include/hw/virtio/ + +virtio-balloon +M: Michael S. Tsirkin +M: David Hildenbrand +S: Maintained +F: hw/virtio/virtio-balloon*.c +F: include/hw/virtio/virtio-balloon.h +F: balloon.c +F: include/sysemu/balloon.h + +virtio-9p +M: Greg Kurz +R: Christian Schoenebeck +S: Odd Fixes +F: hw/9pfs/ +X: hw/9pfs/xen-9p* +F: fsdev/ +F: docs/interop/virtfs-proxy-helper.rst +F: tests/qtest/virtio-9p-test.c +T: git https://github.com/gkurz/qemu.git 9p-next + +virtio-blk +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Supported +F: hw/block/virtio-blk.c +F: hw/block/dataplane/* +F: tests/qtest/virtio-blk-test.c +T: git https://github.com/stefanha/qemu.git block + +virtio-ccw +M: Cornelia Huck +M: Halil Pasic +S: Supported +F: hw/s390x/virtio-ccw*.[hc] +F: hw/s390x/vhost-vsock-ccw.c +T: git https://github.com/cohuck/qemu.git s390-next +T: git https://github.com/borntraeger/qemu.git s390-next +L: qemu-s390x@nongnu.org + +virtiofs +M: Dr. David Alan Gilbert +M: Stefan Hajnoczi +S: Supported +F: tools/virtiofsd/* +F: hw/virtio/vhost-user-fs* +F: include/hw/virtio/vhost-user-fs.h +F: docs/interop/virtiofsd.rst + +virtio-input +M: Gerd Hoffmann +S: Maintained +F: hw/input/vhost-user-input.c +F: hw/input/virtio-input*.c +F: include/hw/virtio/virtio-input.h +F: contrib/vhost-user-input/* + +virtio-iommu +M: Eric Auger +S: Maintained +F: hw/virtio/virtio-iommu*.c +F: include/hw/virtio/virtio-iommu.h + +virtio-serial +M: Laurent Vivier +R: Amit Shah +S: Supported +F: hw/char/virtio-serial-bus.c +F: hw/char/virtio-console.c +F: include/hw/virtio/virtio-serial.h +F: tests/qtest/virtio-serial-test.c + +virtio-rng +M: Laurent Vivier +R: Amit Shah +S: Supported +F: hw/virtio/virtio-rng.c +F: include/hw/virtio/virtio-rng.h +F: include/sysemu/rng*.h +F: backends/rng*.c +F: tests/qtest/virtio-rng-test.c + +virtio-crypto +M: Gonglei +S: Supported +F: hw/virtio/virtio-crypto.c +F: hw/virtio/virtio-crypto-pci.c +F: include/hw/virtio/virtio-crypto.h + +nvme +M: Keith Busch +L: qemu-block@nongnu.org +S: Supported +F: hw/block/nvme* +F: tests/qtest/nvme-test.c + +megasas +M: Hannes Reinecke +L: qemu-block@nongnu.org +S: Supported +F: hw/scsi/megasas.c +F: hw/scsi/mfi.h +F: tests/qtest/megasas-test.c + +Network packet abstractions +M: Dmitry Fleytman +S: Maintained +F: include/net/eth.h +F: net/eth.c +F: hw/net/net_rx_pkt* +F: hw/net/net_tx_pkt* + +Vmware +M: Dmitry Fleytman +S: Maintained +F: hw/net/vmxnet* +F: hw/scsi/vmw_pvscsi* +F: tests/qtest/vmxnet3-test.c + +Rocker +M: Jiri Pirko +S: Maintained +F: hw/net/rocker/ +F: tests/rocker/ +F: docs/specs/rocker.txt + +NVDIMM +M: Xiao Guangrong +S: Maintained +F: hw/acpi/nvdimm.c +F: hw/mem/nvdimm.c +F: include/hw/mem/nvdimm.h +F: docs/nvdimm.txt + +e1000x +M: Dmitry Fleytman +S: Maintained +F: hw/net/e1000x* + +e1000e +M: Dmitry Fleytman +S: Maintained +F: hw/net/e1000e* + +eepro100 +M: Stefan Weil +S: Maintained +F: hw/net/eepro100.c + +tulip +M: Sven Schnelle +S: Maintained +F: hw/net/tulip.c +F: hw/net/tulip.h + +Generic Loader +M: Alistair Francis +S: Maintained +F: hw/core/generic-loader.c +F: include/hw/core/generic-loader.h +F: docs/generic-loader.txt + +Intel Hexadecimal Object File Loader +M: Su Hang +S: Maintained +F: tests/qtest/hexloader-test.c +F: tests/data/hex-loader/test.hex + +CHRP NVRAM +M: Thomas Huth +S: Maintained +F: hw/nvram/chrp_nvram.c +F: include/hw/nvram/chrp_nvram.h +F: tests/qtest/prom-env-test.c + +VM Generation ID +M: Ben Warren +S: Maintained +F: hw/acpi/vmgenid.c +F: include/hw/acpi/vmgenid.h +F: docs/specs/vmgenid.txt +F: tests/qtest/vmgenid-test.c +F: stubs/vmgenid.c + +Unimplemented device +M: Peter Maydell +R: Philippe Mathieu-Daudé +S: Maintained +F: include/hw/misc/unimp.h +F: hw/misc/unimp.c + +Standard VGA +M: Gerd Hoffmann +S: Maintained +F: hw/display/vga* +F: hw/display/bochs-display.c +F: include/hw/display/vga.h +F: include/hw/display/bochs-vbe.h + +ramfb +M: Gerd Hoffmann +S: Maintained +F: hw/display/ramfb*.c +F: include/hw/display/ramfb.h + +virtio-gpu +M: Gerd Hoffmann +S: Maintained +F: hw/display/virtio-gpu* +F: hw/display/virtio-vga.* +F: include/hw/virtio/virtio-gpu.h + +vhost-user-blk +M: Raphael Norwitz +S: Maintained +F: contrib/vhost-user-blk/ +F: contrib/vhost-user-scsi/ +F: hw/block/vhost-user-blk.c +F: hw/scsi/vhost-user-scsi.c +F: hw/virtio/vhost-user-blk-pci.c +F: hw/virtio/vhost-user-scsi-pci.c +F: include/hw/virtio/vhost-user-blk.h +F: include/hw/virtio/vhost-user-scsi.h + +vhost-user-gpu +M: Marc-André Lureau +M: Gerd Hoffmann +S: Maintained +F: docs/interop/vhost-user-gpu.rst +F: contrib/vhost-user-gpu +F: hw/display/vhost-user-* + +Cirrus VGA +M: Gerd Hoffmann +S: Odd Fixes +W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ +F: hw/display/cirrus* + +EDID Generator +M: Gerd Hoffmann +S: Maintained +F: hw/display/edid* +F: include/hw/display/edid.h +F: qemu-edid.c + +PIIX4 South Bridge (i82371AB) +M: Hervé Poussineau +M: Philippe Mathieu-Daudé +S: Maintained +F: hw/isa/piix4.c +F: include/hw/southbridge/piix.h + +Firmware configuration (fw_cfg) +M: Philippe Mathieu-Daudé +R: Laszlo Ersek +R: Gerd Hoffmann +S: Supported +F: docs/specs/fw_cfg.txt +F: hw/nvram/fw_cfg.c +F: stubs/fw_cfg.c +F: include/hw/nvram/fw_cfg.h +F: include/standard-headers/linux/qemu_fw_cfg.h +F: tests/qtest/libqos/fw_cfg.c +F: tests/qtest/fw_cfg-test.c +T: git https://github.com/philmd/qemu.git fw_cfg-next + +XIVE +M: David Gibson +M: Cédric Le Goater +L: qemu-ppc@nongnu.org +S: Supported +F: hw/*/*xive* +F: include/hw/*/*xive* +F: docs/*/*xive* + +Subsystems +---------- +Audio +M: Gerd Hoffmann +S: Maintained +F: audio/ +F: hw/audio/ +F: include/hw/audio/ +F: tests/qtest/ac97-test.c +F: tests/qtest/es1370-test.c +F: tests/qtest/intel-hda-test.c + +Block layer core +M: Kevin Wolf +M: Max Reitz +L: qemu-block@nongnu.org +S: Supported +F: block* +F: block/ +F: hw/block/ +F: include/block/ +F: qemu-img* +F: docs/interop/qemu-img.rst +F: qemu-io* +F: tests/qemu-iotests/ +F: util/qemu-progress.c +F: qobject/block-qdict.c +F: tests/check-block-qdict.c +T: git https://repo.or.cz/qemu/kevin.git block + +Block I/O path +M: Stefan Hajnoczi +M: Fam Zheng +L: qemu-block@nongnu.org +S: Supported +F: util/async.c +F: util/aio-*.c +F: util/aio-*.h +F: util/fdmon-*.c +F: block/io.c +F: migration/block* +F: include/block/aio.h +F: include/block/aio-wait.h +F: scripts/qemugdb/aio.py +T: git https://github.com/stefanha/qemu.git block + +Block SCSI subsystem +M: Paolo Bonzini +R: Fam Zheng +L: qemu-block@nongnu.org +S: Supported +F: include/scsi/* +F: scsi/* + +Block Jobs +M: John Snow +L: qemu-block@nongnu.org +S: Supported +F: blockjob.c +F: include/block/blockjob.h +F: job.c +F: job-qmp.c +F: include/qemu/job.h +F: block/backup.c +F: block/commit.c +F: block/stream.c +F: block/mirror.c +F: qapi/job.json +T: git https://github.com/jnsnow/qemu.git jobs + +Block QAPI, monitor, command line +M: Markus Armbruster +S: Supported +F: blockdev.c +F: blockdev-hmp-cmds.c +F: block/qapi.c +F: qapi/block*.json +F: qapi/transaction.json +T: git https://repo.or.cz/qemu/armbru.git block-next + +Dirty Bitmaps +M: John Snow +R: Vladimir Sementsov-Ogievskiy +L: qemu-block@nongnu.org +S: Supported +F: include/qemu/hbitmap.h +F: include/block/dirty-bitmap.h +F: block/dirty-bitmap.c +F: block/qcow2-bitmap.c +F: migration/block-dirty-bitmap.c +F: util/hbitmap.c +F: tests/test-hbitmap.c +F: docs/interop/bitmaps.rst +T: git https://github.com/jnsnow/qemu.git bitmaps + +Character device backends +M: Marc-André Lureau +R: Paolo Bonzini +S: Maintained +F: chardev/ +F: include/chardev/ +F: qapi/char.json + +Character Devices (Braille) +M: Samuel Thibault +S: Maintained +F: chardev/baum.c + +Command line option argument parsing +M: Markus Armbruster +S: Supported +F: include/qemu/option.h +F: tests/test-keyval.c +F: tests/test-qemu-opts.c +F: util/keyval.c +F: util/qemu-option.c + +Coverity model +M: Markus Armbruster +S: Supported +F: scripts/coverity-model.c + +Coverity Scan integration +M: Peter Maydell +S: Maintained +F: scripts/coverity-scan/ + +Device Tree +M: Alistair Francis +R: David Gibson +S: Maintained +F: device_tree.c +F: include/sysemu/device_tree.h + +Dump +S: Supported +M: Marc-André Lureau +F: dump/ +F: hw/misc/vmcoreinfo.c +F: include/hw/misc/vmcoreinfo.h +F: include/qemu/win_dump_defs +F: include/sysemu/dump-arch.h +F: include/sysemu/dump.h +F: qapi/dump.json +F: scripts/dump-guest-memory.py +F: stubs/dump.c + +Error reporting +M: Markus Armbruster +S: Supported +F: include/qapi/error.h +F: include/qemu/error-report.h +F: qapi/error.json +F: util/error.c +F: util/qemu-error.c +F: scripts/coccinelle/err-bad-newline.cocci +F: scripts/coccinelle/error-use-after-free.cocci +F: scripts/coccinelle/error_propagate_null.cocci +F: scripts/coccinelle/remove_local_err.cocci +F: scripts/coccinelle/use-error_fatal.cocci + +GDB stub +M: Alex Bennée +R: Philippe Mathieu-Daudé +S: Maintained +F: gdbstub* +F: gdb-xml/ + +Memory API +M: Paolo Bonzini +S: Supported +F: include/exec/ioport.h +F: ioport.c +F: include/exec/memop.h +F: include/exec/memory.h +F: include/exec/ram_addr.h +F: include/exec/ramblock.h +F: memory.c +F: include/exec/memory-internal.h +F: exec.c +F: scripts/coccinelle/memory-region-housekeeping.cocci + +SPICE +M: Gerd Hoffmann +S: Supported +F: include/ui/qemu-spice.h +F: include/ui/spice-display.h +F: ui/spice-*.c +F: audio/spiceaudio.c +F: hw/display/qxl* +F: qapi/ui.json +F: docs/spice-port-fqdn.txt + +Graphics +M: Gerd Hoffmann +S: Odd Fixes +F: ui/ +F: include/ui/ +F: qapi/ui.json +F: util/drm.c + +Cocoa graphics +M: Peter Maydell +S: Odd Fixes +F: ui/cocoa.m + +Main loop +M: Paolo Bonzini +S: Maintained +F: cpus.c +F: include/qemu/main-loop.h +F: include/sysemu/runstate.h +F: util/main-loop.c +F: util/qemu-timer.c +F: softmmu/vl.c +F: softmmu/main.c +F: qapi/run-state.json + +Human Monitor (HMP) +M: Dr. David Alan Gilbert +S: Maintained +F: monitor/monitor-internal.h +F: monitor/misc.c +F: monitor/monitor.c +F: monitor/hmp* +F: hmp.h +F: hmp-commands*.hx +F: include/monitor/hmp-target.h +F: tests/qtest/test-hmp.c +F: include/qemu/qemu-print.h +F: util/qemu-print.c + +Network device backends +M: Jason Wang +S: Maintained +F: net/ +F: include/net/ +F: qemu-bridge-helper.c +T: git https://github.com/jasowang/qemu.git net +F: qapi/net.json + +Netmap network backend +M: Luigi Rizzo +M: Giuseppe Lettieri +M: Vincenzo Maffione +W: http://info.iet.unipi.it/~luigi/netmap/ +S: Maintained +F: net/netmap.c + +Host Memory Backends +M: Eduardo Habkost +M: Igor Mammedov +S: Maintained +F: backends/hostmem*.c +F: include/sysemu/hostmem.h +T: git https://github.com/ehabkost/qemu.git machine-next + +Cryptodev Backends +M: Gonglei +S: Maintained +F: include/sysemu/cryptodev*.h +F: backends/cryptodev*.c + +Python scripts +M: Eduardo Habkost +M: Cleber Rosa +S: Odd fixes +F: python/qemu/*py +F: scripts/*.py +F: tests/*.py + +Benchmark util +M: Vladimir Sementsov-Ogievskiy +S: Maintained +F: scripts/simplebench/ + +QAPI +M: Markus Armbruster +M: Michael Roth +S: Supported +F: qapi/ +X: qapi/*.json +F: include/qapi/ +X: include/qapi/qmp/ +F: include/qapi/qmp/dispatch.h +F: tests/qapi-schema/ +F: tests/test-*-visitor.c +F: tests/test-qapi-*.c +F: tests/test-qmp-*.c +F: tests/test-visitor-serialization.c +F: scripts/qapi-gen.py +F: scripts/qapi/* +F: docs/devel/qapi* +T: git https://repo.or.cz/qemu/armbru.git qapi-next + +QAPI Schema +M: Eric Blake +M: Markus Armbruster +S: Supported +F: qapi/*.json +T: git https://repo.or.cz/qemu/armbru.git qapi-next + +QObject +M: Markus Armbruster +S: Supported +F: qobject/ +F: include/qapi/qmp/ +X: include/qapi/qmp/dispatch.h +F: scripts/coccinelle/qobject.cocci +F: tests/check-qdict.c +F: tests/check-qjson.c +F: tests/check-qlist.c +F: tests/check-qlit.c +F: tests/check-qnull.c +F: tests/check-qnum.c +F: tests/check-qobject.c +F: tests/check-qstring.c +F: tests/data/qobject/qdict.txt +T: git https://repo.or.cz/qemu/armbru.git qapi-next + +QEMU Guest Agent +M: Michael Roth +S: Maintained +F: qga/ +F: docs/interop/qemu-ga.rst +F: scripts/qemu-guest-agent/ +F: tests/test-qga.c +F: docs/interop/qemu-ga-ref.texi +T: git https://github.com/mdroth/qemu.git qga + +QOM +M: Paolo Bonzini +R: Daniel P. Berrange +R: Eduardo Habkost +S: Supported +F: docs/qdev-device-use.txt +F: hw/core/qdev* +F: include/hw/qdev* +F: include/monitor/qdev.h +F: include/qom/ +F: qapi/qom.json +F: qapi/qdev.json +F: qdev-monitor.c +F: qom/ +F: tests/check-qom-interface.c +F: tests/check-qom-proplist.c +F: tests/test-qdev-global-props.c + +QMP +M: Markus Armbruster +S: Supported +F: monitor/monitor-internal.h +F: monitor/qmp* +F: monitor/misc.c +F: monitor/monitor.c +F: qapi/error.json +F: docs/devel/*qmp-* +F: docs/interop/*qmp-* +F: scripts/qmp/ +F: tests/qtest/qmp-test.c +F: tests/qtest/qmp-cmd-test.c +T: git https://repo.or.cz/qemu/armbru.git qapi-next + +qtest +M: Thomas Huth +M: Laurent Vivier +R: Paolo Bonzini +S: Maintained +F: qtest.c +F: accel/qtest.c +F: tests/qtest/ + +Device Fuzzing +M: Alexander Bulekov +R: Paolo Bonzini +R: Bandan Das +R: Stefan Hajnoczi +S: Maintained +F: tests/qtest/fuzz/ + +Register API +M: Alistair Francis +S: Maintained +F: hw/core/register.c +F: include/hw/register.h +F: include/hw/registerfields.h + +SLIRP +M: Samuel Thibault +S: Maintained +F: slirp/ +F: net/slirp.c +F: include/net/slirp.h +T: git https://people.debian.org/~sthibault/qemu.git slirp + +Stubs +M: Paolo Bonzini +S: Maintained +F: stubs/ + +Tracing +M: Stefan Hajnoczi +S: Maintained +F: trace/ +F: trace-events +F: docs/qemu-option-trace.rst.inc +F: scripts/tracetool.py +F: scripts/tracetool/ +F: scripts/qemu-trace-stap* +F: docs/interop/qemu-trace-stap.rst +F: docs/devel/tracing.txt +T: git https://github.com/stefanha/qemu.git tracing + +TPM +M: Stefan Berger +S: Maintained +F: tpm.c +F: stubs/tpm.c +F: hw/tpm/* +F: include/hw/acpi/tpm.h +F: include/sysemu/tpm* +F: qapi/tpm.json +F: backends/tpm.c +F: tests/qtest/*tpm* +T: git https://github.com/stefanberger/qemu-tpm.git tpm-next + +Checkpatch +S: Odd Fixes +F: scripts/checkpatch.pl + +Migration +M: Juan Quintela +M: Dr. David Alan Gilbert +S: Maintained +F: hw/core/vmstate-if.c +F: include/hw/vmstate-if.h +F: include/migration/ +F: migration/ +F: scripts/vmstate-static-checker.py +F: tests/vmstate-static-checker-data/ +F: tests/qtest/migration-test.c +F: docs/devel/migration.rst +F: qapi/migration.json + +D-Bus +M: Marc-André Lureau +S: Maintained +F: backends/dbus-vmstate.c +F: tests/dbus-vmstate* +F: util/dbus.c +F: include/qemu/dbus.h +F: docs/interop/dbus.rst +F: docs/interop/dbus-vmstate.rst + +Seccomp +M: Eduardo Otubo +S: Supported +F: qemu-seccomp.c +F: include/sysemu/seccomp.h + +Cryptography +M: Daniel P. Berrange +S: Maintained +F: crypto/ +F: include/crypto/ +F: tests/test-crypto-* +F: tests/benchmark-crypto-* +F: tests/crypto-tls-* +F: tests/pkix_asn1_tab.c +F: qemu.sasl + +Coroutines +M: Stefan Hajnoczi +M: Kevin Wolf +S: Maintained +F: util/*coroutine* +F: include/qemu/coroutine* +F: tests/test-coroutine.c + +Buffers +M: Daniel P. Berrange +S: Odd fixes +F: util/buffer.c +F: include/qemu/buffer.h + +I/O Channels +M: Daniel P. Berrange +S: Maintained +F: io/ +F: include/io/ +F: tests/test-io-* + +User authorization +M: Daniel P. Berrange +S: Maintained +F: authz/ +F: qapi/authz.json +F: include/authz/ +F: tests/test-authz-* + +Sockets +M: Daniel P. Berrange +M: Gerd Hoffmann +S: Maintained +F: include/qemu/sockets.h +F: util/qemu-sockets.c +F: qapi/sockets.json + +File monitor +M: Daniel P. Berrange +S: Odd fixes +F: util/filemonitor*.c +F: include/qemu/filemonitor.h +F: tests/test-util-filemonitor.c + +Throttling infrastructure +M: Alberto Garcia +S: Supported +F: block/throttle-groups.c +F: include/block/throttle-groups.h +F: include/qemu/throttle*.h +F: util/throttle.c +F: docs/throttle.txt +F: tests/test-throttle.c +L: qemu-block@nongnu.org + +UUID +M: Fam Zheng +S: Supported +F: util/uuid.c +F: include/qemu/uuid.h +F: tests/test-uuid.c + +COLO Framework +M: zhanghailiang +S: Maintained +F: migration/colo* +F: include/migration/colo.h +F: include/migration/failover.h +F: docs/COLO-FT.txt + +COLO Proxy +M: Zhang Chen +M: Li Zhijian +S: Supported +F: docs/colo-proxy.txt +F: net/colo* +F: net/filter-rewriter.c +F: net/filter-mirror.c + +Record/replay +M: Pavel Dovgalyuk +R: Paolo Bonzini +W: https://wiki.qemu.org/Features/record-replay +S: Supported +F: replay/* +F: block/blkreplay.c +F: net/filter-replay.c +F: include/sysemu/replay.h +F: docs/replay.txt +F: stubs/replay.c + +IOVA Tree +M: Peter Xu +S: Maintained +F: include/qemu/iova-tree.h +F: util/iova-tree.c + +elf2dmp +M: Viktor Prutyanov +S: Maintained +F: contrib/elf2dmp/ + +I2C and SMBus +M: Corey Minyard +S: Maintained +F: hw/i2c/core.c +F: hw/i2c/smbus_slave.c +F: hw/i2c/smbus_master.c +F: hw/i2c/smbus_eeprom.c +F: include/hw/i2c/i2c.h +F: include/hw/i2c/smbus_master.h +F: include/hw/i2c/smbus_slave.h +F: include/hw/i2c/smbus_eeprom.h + +EDK2 Firmware +M: Laszlo Ersek +M: Philippe Mathieu-Daudé +S: Supported +F: pc-bios/descriptors/??-edk2-*.json +F: pc-bios/edk2-* +F: roms/Makefile.edk2 +F: roms/edk2 +F: roms/edk2-* +F: tests/data/uefi-boot-images/ +F: tests/uefi-test-tools/ +F: .gitlab-ci-edk2.yml +F: .gitlab-ci.d/edk2/ + +Usermode Emulation +------------------ +Overall usermode emulation +M: Riku Voipio +S: Maintained +F: thunk.c +F: accel/tcg/user-exec*.c + +BSD user +S: Orphan +F: bsd-user/ +F: default-configs/*-bsd-user.mak + +Linux user +M: Riku Voipio +R: Laurent Vivier +S: Maintained +F: linux-user/ +F: default-configs/*-linux-user.mak +F: scripts/qemu-binfmt-conf.sh +F: scripts/update-syscalltbl.sh +F: scripts/update-mips-syscall-args.sh +F: scripts/gensyscalls.sh + +Tiny Code Generator (TCG) +------------------------- +Common TCG code +M: Richard Henderson +S: Maintained +F: tcg/ +F: include/tcg/ + +TCG Plugins +M: Alex Bennée +S: Maintained +F: docs/devel/tcg-plugins.rst +F: plugins/ +F: tests/plugin + +AArch64 TCG target +M: Richard Henderson +S: Maintained +L: qemu-arm@nongnu.org +F: tcg/aarch64/ +F: disas/arm-a64.cc +F: disas/libvixl/ + +ARM TCG target +M: Andrzej Zaborowski +S: Maintained +L: qemu-arm@nongnu.org +F: tcg/arm/ +F: disas/arm.c + +i386 TCG target +M: Richard Henderson +S: Maintained +F: tcg/i386/ +F: disas/i386.c + +MIPS TCG target +M: Aleksandar Markovic +R: Aurelien Jarno +R: Aleksandar Rikalo +S: Maintained +F: tcg/mips/ + +PPC TCG target +M: Richard Henderson +S: Odd Fixes +F: tcg/ppc/ +F: disas/ppc.c + +RISC-V TCG target +M: Palmer Dabbelt +M: Alistair Francis +L: qemu-riscv@nongnu.org +S: Maintained +F: tcg/riscv/ +F: disas/riscv.c + +S390 TCG target +M: Richard Henderson +S: Maintained +F: tcg/s390/ +F: disas/s390.c +L: qemu-s390x@nongnu.org + +SPARC TCG target +S: Odd Fixes +F: tcg/sparc/ +F: disas/sparc.c + +TCI TCG target +M: Stefan Weil +S: Maintained +F: tcg/tci/ +F: tcg/tci.c +F: disas/tci.c + +Block drivers +------------- +VMDK +M: Fam Zheng +L: qemu-block@nongnu.org +S: Supported +F: block/vmdk.c + +RBD +M: Jason Dillaman +L: qemu-block@nongnu.org +S: Supported +F: block/rbd.c + +Sheepdog +M: Liu Yuan +L: qemu-block@nongnu.org +L: sheepdog@lists.wpkg.org +S: Odd Fixes +F: block/sheepdog.c + +VHDX +M: Jeff Cody +L: qemu-block@nongnu.org +S: Supported +F: block/vhdx* + +VDI +M: Stefan Weil +L: qemu-block@nongnu.org +S: Maintained +F: block/vdi.c + +iSCSI +M: Ronnie Sahlberg +M: Paolo Bonzini +M: Peter Lieven +L: qemu-block@nongnu.org +S: Odd Fixes +F: block/iscsi.c +F: block/iscsi-opts.c + +Network Block Device (NBD) +M: Eric Blake +L: qemu-block@nongnu.org +S: Maintained +F: block/nbd* +F: nbd/ +F: include/block/nbd* +F: qemu-nbd.* +F: blockdev-nbd.c +F: docs/interop/nbd.txt +F: docs/interop/qemu-nbd.rst +T: git https://repo.or.cz/qemu/ericb.git nbd + +NFS +M: Peter Lieven +L: qemu-block@nongnu.org +S: Maintained +F: block/nfs.c + +SSH +M: Richard W.M. Jones +L: qemu-block@nongnu.org +S: Supported +F: block/ssh.c + +CURL +L: qemu-block@nongnu.org +S: Odd Fixes +F: block/curl.c + +GLUSTER +L: qemu-block@nongnu.org +L: integration@gluster.org +S: Odd Fixes +F: block/gluster.c + +Null Block Driver +M: Fam Zheng +L: qemu-block@nongnu.org +S: Supported +F: block/null.c + +NVMe Block Driver +M: Fam Zheng +L: qemu-block@nongnu.org +S: Supported +F: block/nvme* + +Bootdevice +M: Gonglei +S: Maintained +F: bootdevice.c + +Quorum +M: Alberto Garcia +S: Supported +F: block/quorum.c +L: qemu-block@nongnu.org + +blklogwrites +M: Ari Sundholm +L: qemu-block@nongnu.org +S: Supported +F: block/blklogwrites.c + +blkverify +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Supported +F: block/blkverify.c + +bochs +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Supported +F: block/bochs.c + +cloop +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Supported +F: block/cloop.c + +dmg +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Supported +F: block/dmg.c + +parallels +M: Stefan Hajnoczi +M: Denis V. Lunev +L: qemu-block@nongnu.org +S: Supported +F: block/parallels.c +F: docs/interop/parallels.txt + +qed +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Supported +F: block/qed.c + +raw +M: Kevin Wolf +L: qemu-block@nongnu.org +S: Supported +F: block/linux-aio.c +F: include/block/raw-aio.h +F: block/raw-format.c +F: block/file-posix.c +F: block/file-win32.c +F: block/win32-aio.c + +Linux io_uring +M: Aarushi Mehta +M: Julia Suvorova +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Maintained +F: block/io_uring.c +F: stubs/io_uring.c + +qcow2 +M: Kevin Wolf +M: Max Reitz +L: qemu-block@nongnu.org +S: Supported +F: block/qcow2* +F: docs/interop/qcow2.txt + +qcow +M: Kevin Wolf +L: qemu-block@nongnu.org +S: Supported +F: block/qcow.c + +blkdebug +M: Kevin Wolf +M: Max Reitz +L: qemu-block@nongnu.org +S: Supported +F: block/blkdebug.c + +vpc +M: Kevin Wolf +L: qemu-block@nongnu.org +S: Supported +F: block/vpc.c + +vvfat +M: Kevin Wolf +L: qemu-block@nongnu.org +S: Supported +F: block/vvfat.c + +Image format fuzzer +M: Stefan Hajnoczi +L: qemu-block@nongnu.org +S: Supported +F: tests/image-fuzzer/ + +Replication +M: Wen Congyang +M: Xie Changlong +S: Supported +F: replication* +F: block/replication.c +F: tests/test-replication.c +F: docs/block-replication.txt + +PVRDMA +M: Yuval Shaia +M: Marcel Apfelbaum +S: Maintained +F: hw/rdma/* +F: hw/rdma/vmw/* +F: docs/pvrdma.txt +F: contrib/rdmacm-mux/* +F: qapi/rdma.json + +Semihosting +M: Alex Bennée +S: Maintained +F: hw/semihosting/ +F: include/hw/semihosting/ + +Build and test automation +------------------------- +Build and test automation +M: Alex Bennée +M: Fam Zheng +R: Philippe Mathieu-Daudé +S: Maintained +F: .github/lockdown.yml +F: .travis.yml +F: scripts/travis/ +F: .shippable.yml +F: tests/docker/ +F: tests/vm/ +F: scripts/archive-source.sh +W: https://travis-ci.org/qemu/qemu +W: https://app.shippable.com/github/qemu/qemu +W: http://patchew.org/QEMU/ + +FreeBSD Hosted Continuous Integration +M: Ed Maste +M: Li-Wen Hsu +S: Maintained +F: .cirrus.yml +W: https://cirrus-ci.com/github/qemu/qemu + +GitLab Continuous Integration +M: Thomas Huth +S: Maintained +F: .gitlab-ci.yml + +Guest Test Compilation Support +M: Alex Bennée +R: Philippe Mathieu-Daudé +S: Maintained +F: tests/tcg/Makefile +F: tests/tcg/Makefile.include + +Documentation +------------- +Build system architecture +M: Daniel P. Berrange +S: Odd Fixes +F: docs/devel/build-system.txt + +GIT Data Mining Config +M: Alex Bennée +S: Odd Fixes +F: gitdm.config +F: contrib/gitdm/* + +Incompatible changes +R: libvir-list@redhat.com +F: docs/system/deprecated.rst + +Build System +------------ +GIT submodules +M: Daniel P. Berrange +S: Odd Fixes +F: scripts/git-submodule.sh + +UI translations +M: Aleksandar Markovic +F: po/*.po + +Sphinx documentation configuration and build machinery +M: Peter Maydell +S: Maintained +F: docs/conf.py +F: docs/*/conf.py diff --git a/qemu/Makefile b/qemu/Makefile deleted file mode 100644 index 8e5762e8..00000000 --- a/qemu/Makefile +++ /dev/null @@ -1,115 +0,0 @@ -# Makefile for QEMU - modified for Unicorn engine. - -# Always point to the root of the build tree (needs GNU make). -BUILD_DIR=$(CURDIR) - -# All following code might depend on configuration variables -ifneq ($(wildcard config-host.mak),) -# Put the all: rule here so that config-host.mak can contain dependencies. -all: -include config-host.mak - -# Check that we're not trying to do an out-of-tree build from -# a tree that's been used for an in-tree build. -ifneq ($(realpath $(SRC_PATH)),$(realpath .)) -ifneq ($(wildcard $(SRC_PATH)/config-host.mak),) -$(error This is an out of tree build but your source tree ($(SRC_PATH)) \ -seems to have been used for an in-tree build. You can fix this by running \ -"make distclean && rm -rf *-linux-user *-softmmu" in your source tree) -endif -endif - -CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) --include config-all-devices.mak - -include $(SRC_PATH)/rules.mak -config-host.mak: $(SRC_PATH)/configure - @echo $@ is out-of-date, running configure -else -config-host.mak: -ifneq ($(filter-out %clean,$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) - @echo "Please call configure before running make!" - @exit 1 -endif -endif - -GENERATED_HEADERS = config-host.h - -# Don't try to regenerate Makefile or configure -# We don't generate any of them -Makefile: ; -configure: ; - -.PHONY: all clean distclean recurse-all - -$(call set-vpath, $(SRC_PATH)) - -SUBDIR_MAKEFLAGS=$(if $(V),,--no-print-directory) BUILD_DIR=$(BUILD_DIR) -SUBDIR_DEVICES_MAK=$(patsubst %, %/config-devices.mak, $(TARGET_DIRS)) -SUBDIR_DEVICES_MAK_DEP=$(patsubst %, %-config-devices.mak.d, $(TARGET_DIRS)) - -ifeq ($(SUBDIR_DEVICES_MAK),) -config-all-devices.mak: - $(call quiet-command,echo '# no devices' > $@," GEN $@") -else -config-all-devices.mak: $(SUBDIR_DEVICES_MAK) - $(call quiet-command, sed -n \ - 's|^\([^=]*\)=\(.*\)$$|\1:=$$(findstring y,$$(\1)\2)|p' \ - $(SUBDIR_DEVICES_MAK) | sort -u > $@, \ - " GEN $@") -endif - --include $(SUBDIR_DEVICES_MAK_DEP) - -%/config-devices.mak: default-configs/%.mak - $(call quiet-command, cp $< $@, " GEN $@") - -ifneq ($(wildcard config-host.mak),) -include $(SRC_PATH)/Makefile.objs -endif - -dummy := $(call unnest-vars,,util-obj-y common-obj-y) - -all: recurse-all - -config-host.h: config-host.h-timestamp -config-host.h-timestamp: config-host.mak - -SUBDIR_RULES=$(patsubst %,subdir-%, $(TARGET_DIRS)) -SOFTMMU_SUBDIR_RULES=$(filter %-softmmu,$(SUBDIR_RULES)) - -$(SOFTMMU_SUBDIR_RULES): config-all-devices.mak - -subdir-%: - $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $* V="$(V)" TARGET_DIR="$*/" all,) - -$(SUBDIR_RULES): qapi-types.c qapi-types.h qapi-visit.c qapi-visit.h $(common-obj-y) $(util-obj-y) - -recurse-all: $(SUBDIR_RULES) - -###################################################################### - -clean: - find . \( -name '*.l[oa]' -o -name '*.so' -o -name '*.dll' -o -name '*.mo' -o -name '*.[oda]' \) -type f -exec rm {} + - rm -f TAGS *~ */*~ - @# May not be present in GENERATED_HEADERS - rm -f $(foreach f,$(GENERATED_HEADERS),$(f) $(f)-timestamp) - for d in $(TARGET_DIRS); do \ - if test -d $$d; then $(MAKE) -C $$d $@ || exit 1; fi; \ - done - -distclean: clean - rm -f config-host.mak config-host.h* - rm -f config-all-devices.mak - rm -f config.log config.status - for d in $(TARGET_DIRS); do \ - rm -rf $$d || exit 1 ; \ - done - - -# Add a dependency on the generated files, so that they are always -# rebuilt before other object files -ifneq ($(filter-out %clean,$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) -Makefile: $(GENERATED_HEADERS) -endif - diff --git a/qemu/Makefile.objs b/qemu/Makefile.objs deleted file mode 100644 index fcf5f302..00000000 --- a/qemu/Makefile.objs +++ /dev/null @@ -1,12 +0,0 @@ -####################################################################### -# Common libraries for tools and emulators -util-obj-y = util/ qobject/ qapi/ qapi-types.o qapi-visit.o - -common-obj-y += hw/ -common-obj-y += accel.o -common-obj-y += vl.o qemu-timer.o -common-obj-y += ../uc.o ../list.o glib_compat.o -common-obj-y += qemu-log.o -common-obj-y += tcg-runtime.o -common-obj-y += hw/ -common-obj-y += qom/ diff --git a/qemu/Makefile.target b/qemu/Makefile.target deleted file mode 100644 index 356ce1c1..00000000 --- a/qemu/Makefile.target +++ /dev/null @@ -1,84 +0,0 @@ -# -*- Mode: makefile -*- - -include ../config-host.mak -include config-target.mak -include config-devices.mak -include $(SRC_PATH)/rules.mak - -$(call set-vpath, $(SRC_PATH)) -QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H - -QEMU_CFLAGS+=-I$(SRC_PATH)/include - -# system emulator name -QEMU_PROG=qemu-system-$(TARGET_NAME)$(EXESUF) - -config-target.h: config-target.h-timestamp -config-target.h-timestamp: config-target.mak - -all: $(QEMU_PROG) - -######################################################### -# cpu emulator library -obj-y = exec.o translate-all.o cpu-exec.o -obj-y += tcg/tcg.o tcg/optimize.o -obj-y += fpu/softfloat.o -obj-y += target-$(TARGET_BASE_ARCH)/ - -######################################################### -# System emulator target -obj-y += cpus.o ioport.o -obj-y += hw/ -obj-y += memory.o cputlb.o -obj-y += memory_mapping.o - -# Hardware support -ifeq ($(TARGET_NAME), sparc64) -obj-y += hw/sparc64/ -else -obj-y += hw/$(TARGET_BASE_ARCH)/ -endif - -# Workaround for http://gcc.gnu.org/PR55489, see configure. -%/translate.o: QEMU_CFLAGS += $(TRANSLATE_OPT_CFLAGS) - -dummy := $(call unnest-vars,,obj-y) -all-obj-y := $(obj-y) - -common-obj-y := -include $(SRC_PATH)/Makefile.objs - -dummy := $(call unnest-vars,..,util-obj-y) - -target-obj-y-save := $(target-obj-y) $(util-obj-y) - -dummy := $(call unnest-vars,..,common-obj-y) - -target-obj-y := $(target-obj-y-save) -all-obj-y += $(common-obj-y) -all-obj-y += $(target-obj-y) - -# determine shared lib extension -IS_APPLE := $(shell $(CC) -dM -E - < /dev/null | grep __apple_build_version__ | wc -l | tr -d " ") -ifeq ($(IS_APPLE),1) -EXT = dylib -else -# Cygwin? -IS_CYGWIN := $(shell $(CC) -dumpmachine | grep -i cygwin | wc -l) -ifeq ($(IS_CYGWIN),1) -EXT = dll -else -EXT = so -endif -endif - -# build either PROG or PROGW -$(QEMU_PROG): $(all-obj-y) - -clean: - rm -f *.a *~ $(QEMU_PROG) - rm -f $(shell find . -name '*.[od]') - -GENERATED_HEADERS += config-target.h -Makefile: $(GENERATED_HEADERS) - diff --git a/qemu/VERSION b/qemu/VERSION index c043eea7..6b244dcd 100644 --- a/qemu/VERSION +++ b/qemu/VERSION @@ -1 +1 @@ -2.2.1 +5.0.1 diff --git a/qemu/aarch64.h b/qemu/aarch64.h index 470a62d4..ffc68d52 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -1,1367 +1,1288 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_AARCH64_H -#define UNICORN_AUTOGEN_AARCH64_H -#define arm_release arm_release_aarch64 -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_aarch64 -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_aarch64 -#define use_idiv_instructions_rt use_idiv_instructions_rt_aarch64 -#define tcg_target_deposit_valid tcg_target_deposit_valid_aarch64 -#define helper_power_down helper_power_down_aarch64 -#define check_exit_request check_exit_request_aarch64 -#define address_space_unregister address_space_unregister_aarch64 -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_aarch64 -#define phys_mem_clean phys_mem_clean_aarch64 -#define tb_cleanup tb_cleanup_aarch64 +#ifndef UNICORN_AUTOGEN_aarch64_H +#define UNICORN_AUTOGEN_aarch64_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _aarch64 +#endif +#define arm_arch arm_arch_aarch64 +#define tb_target_set_jmp_target tb_target_set_jmp_target_aarch64 +#define have_bmi1 have_bmi1_aarch64 +#define have_popcnt have_popcnt_aarch64 +#define have_avx1 have_avx1_aarch64 +#define have_avx2 have_avx2_aarch64 +#define have_isa have_isa_aarch64 +#define have_altivec have_altivec_aarch64 +#define have_vsx have_vsx_aarch64 +#define flush_icache_range flush_icache_range_aarch64 +#define s390_facilities s390_facilities_aarch64 +#define tcg_dump_op tcg_dump_op_aarch64 +#define tcg_dump_ops tcg_dump_ops_aarch64 +#define tcg_gen_and_i64 tcg_gen_and_i64_aarch64 +#define tcg_gen_discard_i64 tcg_gen_discard_i64_aarch64 +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_aarch64 +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_aarch64 +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_aarch64 +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_aarch64 +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_aarch64 +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_aarch64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_aarch64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_aarch64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_aarch64 +#define tcg_gen_mul_i64 tcg_gen_mul_i64_aarch64 +#define tcg_gen_or_i64 tcg_gen_or_i64_aarch64 +#define tcg_gen_sar_i64 tcg_gen_sar_i64_aarch64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_aarch64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_aarch64 +#define tcg_gen_st_i64 tcg_gen_st_i64_aarch64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_aarch64 +#define cpu_icount_to_ns cpu_icount_to_ns_aarch64 +#define cpu_is_stopped cpu_is_stopped_aarch64 +#define cpu_get_ticks cpu_get_ticks_aarch64 +#define cpu_get_clock cpu_get_clock_aarch64 +#define cpu_resume cpu_resume_aarch64 +#define qemu_init_vcpu qemu_init_vcpu_aarch64 +#define cpu_stop_current cpu_stop_current_aarch64 +#define resume_all_vcpus resume_all_vcpus_aarch64 +#define vm_start vm_start_aarch64 +#define address_space_dispatch_compact address_space_dispatch_compact_aarch64 +#define flatview_translate flatview_translate_aarch64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_aarch64 +#define qemu_get_cpu qemu_get_cpu_aarch64 +#define cpu_address_space_init cpu_address_space_init_aarch64 +#define cpu_get_address_space cpu_get_address_space_aarch64 +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_aarch64 +#define cpu_exec_initfn cpu_exec_initfn_aarch64 +#define cpu_exec_realizefn cpu_exec_realizefn_aarch64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_aarch64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_aarch64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_aarch64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_aarch64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_aarch64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_aarch64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_aarch64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_aarch64 +#define cpu_abort cpu_abort_aarch64 +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_aarch64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_aarch64 +#define flatview_add_to_dispatch flatview_add_to_dispatch_aarch64 +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_aarch64 +#define qemu_ram_get_offset qemu_ram_get_offset_aarch64 +#define qemu_ram_get_used_length qemu_ram_get_used_length_aarch64 +#define qemu_ram_is_shared qemu_ram_is_shared_aarch64 +#define qemu_ram_pagesize qemu_ram_pagesize_aarch64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_aarch64 +#define qemu_ram_alloc qemu_ram_alloc_aarch64 +#define qemu_ram_free qemu_ram_free_aarch64 +#define qemu_map_ram_ptr qemu_map_ram_ptr_aarch64 +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_aarch64 +#define qemu_ram_block_from_host qemu_ram_block_from_host_aarch64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_aarch64 +#define cpu_check_watchpoint cpu_check_watchpoint_aarch64 +#define iotlb_to_section iotlb_to_section_aarch64 +#define address_space_dispatch_new address_space_dispatch_new_aarch64 +#define address_space_dispatch_free address_space_dispatch_free_aarch64 +#define flatview_read_continue flatview_read_continue_aarch64 +#define address_space_read_full address_space_read_full_aarch64 +#define address_space_write address_space_write_aarch64 +#define address_space_rw address_space_rw_aarch64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64 +#define address_space_write_rom address_space_write_rom_aarch64 +#define cpu_flush_icache_range cpu_flush_icache_range_aarch64 +#define cpu_exec_init_all cpu_exec_init_all_aarch64 +#define address_space_access_valid address_space_access_valid_aarch64 +#define address_space_map address_space_map_aarch64 +#define address_space_unmap address_space_unmap_aarch64 +#define cpu_physical_memory_map cpu_physical_memory_map_aarch64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64 +#define qemu_target_page_size qemu_target_page_size_aarch64 +#define qemu_target_page_bits qemu_target_page_bits_aarch64 +#define qemu_target_page_bits_min qemu_target_page_bits_min_aarch64 +#define target_words_bigendian target_words_bigendian_aarch64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64 +#define ram_block_discard_range ram_block_discard_range_aarch64 +#define ramblock_is_pmem ramblock_is_pmem_aarch64 +#define page_size_init page_size_init_aarch64 +#define set_preferred_target_page_bits set_preferred_target_page_bits_aarch64 +#define finalize_target_page_bits finalize_target_page_bits_aarch64 +#define cpu_outb cpu_outb_aarch64 +#define cpu_outw cpu_outw_aarch64 +#define cpu_outl cpu_outl_aarch64 +#define cpu_inb cpu_inb_aarch64 +#define cpu_inw cpu_inw_aarch64 +#define cpu_inl cpu_inl_aarch64 #define memory_map memory_map_aarch64 +#define memory_map_io memory_map_io_aarch64 #define memory_map_ptr memory_map_ptr_aarch64 #define memory_unmap memory_unmap_aarch64 #define memory_free memory_free_aarch64 -#define free_code_gen_buffer free_code_gen_buffer_aarch64 -#define helper_raise_exception helper_raise_exception_aarch64 -#define tcg_enabled tcg_enabled_aarch64 -#define tcg_exec_init tcg_exec_init_aarch64 -#define memory_register_types memory_register_types_aarch64 -#define cpu_exec_init_all cpu_exec_init_all_aarch64 -#define vm_start vm_start_aarch64 -#define resume_all_vcpus resume_all_vcpus_aarch64 -#define a15_l2ctlr_read a15_l2ctlr_read_aarch64 -#define a64_translate_init a64_translate_init_aarch64 -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_aarch64 -#define aa64_cacheop_access aa64_cacheop_access_aarch64 -#define aa64_daif_access aa64_daif_access_aarch64 -#define aa64_daif_write aa64_daif_write_aarch64 -#define aa64_dczid_read aa64_dczid_read_aarch64 -#define aa64_fpcr_read aa64_fpcr_read_aarch64 -#define aa64_fpcr_write aa64_fpcr_write_aarch64 -#define aa64_fpsr_read aa64_fpsr_read_aarch64 -#define aa64_fpsr_write aa64_fpsr_write_aarch64 -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_aarch64 -#define aa64_zva_access aa64_zva_access_aarch64 -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_aarch64 -#define aarch64_restore_sp aarch64_restore_sp_aarch64 -#define aarch64_save_sp aarch64_save_sp_aarch64 -#define accel_find accel_find_aarch64 -#define accel_init_machine accel_init_machine_aarch64 -#define accel_type accel_type_aarch64 -#define access_with_adjusted_size access_with_adjusted_size_aarch64 -#define add128 add128_aarch64 -#define add16_sat add16_sat_aarch64 -#define add16_usat add16_usat_aarch64 -#define add192 add192_aarch64 -#define add8_sat add8_sat_aarch64 -#define add8_usat add8_usat_aarch64 -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_aarch64 -#define add_cpreg_to_list add_cpreg_to_list_aarch64 -#define addFloat128Sigs addFloat128Sigs_aarch64 -#define addFloat32Sigs addFloat32Sigs_aarch64 -#define addFloat64Sigs addFloat64Sigs_aarch64 -#define addFloatx80Sigs addFloatx80Sigs_aarch64 -#define add_qemu_ldst_label add_qemu_ldst_label_aarch64 -#define address_space_access_valid address_space_access_valid_aarch64 -#define address_space_destroy address_space_destroy_aarch64 -#define address_space_destroy_dispatch address_space_destroy_dispatch_aarch64 -#define address_space_get_flatview address_space_get_flatview_aarch64 -#define address_space_init address_space_init_aarch64 -#define address_space_init_dispatch address_space_init_dispatch_aarch64 -#define address_space_lookup_region address_space_lookup_region_aarch64 -#define address_space_map address_space_map_aarch64 -#define address_space_read address_space_read_aarch64 -#define address_space_rw address_space_rw_aarch64 -#define address_space_translate address_space_translate_aarch64 -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_aarch64 -#define address_space_translate_internal address_space_translate_internal_aarch64 -#define address_space_unmap address_space_unmap_aarch64 -#define address_space_update_topology address_space_update_topology_aarch64 -#define address_space_update_topology_pass address_space_update_topology_pass_aarch64 -#define address_space_write address_space_write_aarch64 -#define addrrange_contains addrrange_contains_aarch64 -#define addrrange_end addrrange_end_aarch64 -#define addrrange_equal addrrange_equal_aarch64 -#define addrrange_intersection addrrange_intersection_aarch64 -#define addrrange_intersects addrrange_intersects_aarch64 -#define addrrange_make addrrange_make_aarch64 -#define adjust_endianness adjust_endianness_aarch64 -#define all_helpers all_helpers_aarch64 -#define alloc_code_gen_buffer alloc_code_gen_buffer_aarch64 -#define alloc_entry alloc_entry_aarch64 -#define always_true always_true_aarch64 -#define arm1026_initfn arm1026_initfn_aarch64 -#define arm1136_initfn arm1136_initfn_aarch64 -#define arm1136_r2_initfn arm1136_r2_initfn_aarch64 -#define arm1176_initfn arm1176_initfn_aarch64 -#define arm11mpcore_initfn arm11mpcore_initfn_aarch64 -#define arm926_initfn arm926_initfn_aarch64 -#define arm946_initfn arm946_initfn_aarch64 -#define arm_ccnt_enabled arm_ccnt_enabled_aarch64 -#define arm_cp_read_zero arm_cp_read_zero_aarch64 -#define arm_cp_reset_ignore arm_cp_reset_ignore_aarch64 -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_aarch64 -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_aarch64 -#define arm_cpu_finalizefn arm_cpu_finalizefn_aarch64 -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_aarch64 -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_aarch64 -#define arm_cpu_initfn arm_cpu_initfn_aarch64 -#define arm_cpu_list arm_cpu_list_aarch64 -#define cpu_loop_exit cpu_loop_exit_aarch64 -#define arm_cpu_post_init arm_cpu_post_init_aarch64 -#define arm_cpu_realizefn arm_cpu_realizefn_aarch64 -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_aarch64 -#define arm_cpu_register_types arm_cpu_register_types_aarch64 -#define cpu_resume_from_signal cpu_resume_from_signal_aarch64 -#define arm_cpus arm_cpus_aarch64 -#define arm_cpu_set_pc arm_cpu_set_pc_aarch64 -#define arm_cp_write_ignore arm_cp_write_ignore_aarch64 -#define arm_current_el arm_current_el_aarch64 -#define arm_dc_feature arm_dc_feature_aarch64 -#define arm_debug_excp_handler arm_debug_excp_handler_aarch64 -#define arm_debug_target_el arm_debug_target_el_aarch64 -#define arm_el_is_aa64 arm_el_is_aa64_aarch64 -#define arm_env_get_cpu arm_env_get_cpu_aarch64 -#define arm_excp_target_el arm_excp_target_el_aarch64 -#define arm_excp_unmasked arm_excp_unmasked_aarch64 -#define arm_feature arm_feature_aarch64 -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_aarch64 -#define gen_intermediate_code gen_intermediate_code_aarch64 -#define gen_intermediate_code_pc gen_intermediate_code_pc_aarch64 -#define arm_gen_test_cc arm_gen_test_cc_aarch64 -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_aarch64 -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_aarch64 -#define arm_handle_psci_call arm_handle_psci_call_aarch64 -#define arm_is_psci_call arm_is_psci_call_aarch64 -#define arm_is_secure arm_is_secure_aarch64 -#define arm_is_secure_below_el3 arm_is_secure_below_el3_aarch64 -#define arm_ldl_code arm_ldl_code_aarch64 -#define arm_lduw_code arm_lduw_code_aarch64 -#define arm_log_exception arm_log_exception_aarch64 -#define arm_reg_read arm_reg_read_aarch64 -#define arm_reg_reset arm_reg_reset_aarch64 -#define arm_reg_write arm_reg_write_aarch64 -#define restore_state_to_opc restore_state_to_opc_aarch64 -#define arm_rmode_to_sf arm_rmode_to_sf_aarch64 -#define arm_singlestep_active arm_singlestep_active_aarch64 -#define tlb_fill tlb_fill_aarch64 -#define tlb_flush tlb_flush_aarch64 -#define tlb_flush_page tlb_flush_page_aarch64 -#define tlb_set_page tlb_set_page_aarch64 -#define arm_translate_init arm_translate_init_aarch64 -#define arm_v7m_class_init arm_v7m_class_init_aarch64 -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_aarch64 -#define ats_access ats_access_aarch64 -#define ats_write ats_write_aarch64 -#define bad_mode_switch bad_mode_switch_aarch64 -#define bank_number bank_number_aarch64 -#define bitmap_zero_extend bitmap_zero_extend_aarch64 -#define bp_wp_matches bp_wp_matches_aarch64 -#define breakpoint_invalidate breakpoint_invalidate_aarch64 -#define build_page_bitmap build_page_bitmap_aarch64 -#define bus_add_child bus_add_child_aarch64 -#define bus_class_init bus_class_init_aarch64 -#define bus_info bus_info_aarch64 -#define bus_unparent bus_unparent_aarch64 -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_aarch64 -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_aarch64 -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_aarch64 -#define call_recip_estimate call_recip_estimate_aarch64 -#define can_merge can_merge_aarch64 -#define capacity_increase capacity_increase_aarch64 -#define ccsidr_read ccsidr_read_aarch64 -#define check_ap check_ap_aarch64 -#define check_breakpoints check_breakpoints_aarch64 -#define check_watchpoints check_watchpoints_aarch64 -#define cho cho_aarch64 -#define clear_bit clear_bit_aarch64 -#define clz32 clz32_aarch64 -#define clz64 clz64_aarch64 -#define cmp_flatrange_addr cmp_flatrange_addr_aarch64 -#define code_gen_alloc code_gen_alloc_aarch64 -#define commonNaNToFloat128 commonNaNToFloat128_aarch64 -#define commonNaNToFloat16 commonNaNToFloat16_aarch64 -#define commonNaNToFloat32 commonNaNToFloat32_aarch64 -#define commonNaNToFloat64 commonNaNToFloat64_aarch64 -#define commonNaNToFloatx80 commonNaNToFloatx80_aarch64 -#define compute_abs_deadline compute_abs_deadline_aarch64 -#define cond_name cond_name_aarch64 -#define configure_accelerator configure_accelerator_aarch64 -#define container_get container_get_aarch64 -#define container_info container_info_aarch64 -#define container_register_types container_register_types_aarch64 -#define contextidr_write contextidr_write_aarch64 -#define core_log_global_start core_log_global_start_aarch64 -#define core_log_global_stop core_log_global_stop_aarch64 -#define core_memory_listener core_memory_listener_aarch64 -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_aarch64 -#define cortex_a15_initfn cortex_a15_initfn_aarch64 -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_aarch64 -#define cortex_a8_initfn cortex_a8_initfn_aarch64 -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_aarch64 -#define cortex_a9_initfn cortex_a9_initfn_aarch64 -#define cortex_m3_initfn cortex_m3_initfn_aarch64 -#define count_cpreg count_cpreg_aarch64 -#define countLeadingZeros32 countLeadingZeros32_aarch64 -#define countLeadingZeros64 countLeadingZeros64_aarch64 -#define cp_access_ok cp_access_ok_aarch64 -#define cpacr_write cpacr_write_aarch64 -#define cpreg_field_is_64bit cpreg_field_is_64bit_aarch64 -#define cp_reginfo cp_reginfo_aarch64 -#define cpreg_key_compare cpreg_key_compare_aarch64 -#define cpreg_make_keylist cpreg_make_keylist_aarch64 -#define cp_reg_reset cp_reg_reset_aarch64 -#define cpreg_to_kvm_id cpreg_to_kvm_id_aarch64 -#define cpsr_read cpsr_read_aarch64 -#define cpsr_write cpsr_write_aarch64 -#define cptype_valid cptype_valid_aarch64 -#define cpu_abort cpu_abort_aarch64 -#define cpu_arm_exec cpu_arm_exec_aarch64 -#define cpu_arm_gen_code cpu_arm_gen_code_aarch64 -#define cpu_arm_init cpu_arm_init_aarch64 -#define cpu_breakpoint_insert cpu_breakpoint_insert_aarch64 -#define cpu_breakpoint_remove cpu_breakpoint_remove_aarch64 -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_aarch64 -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64 -#define cpu_can_do_io cpu_can_do_io_aarch64 -#define cpu_can_run cpu_can_run_aarch64 -#define cpu_class_init cpu_class_init_aarch64 -#define cpu_common_class_by_name cpu_common_class_by_name_aarch64 -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_aarch64 -#define cpu_common_get_arch_id cpu_common_get_arch_id_aarch64 -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_aarch64 -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_aarch64 -#define cpu_common_has_work cpu_common_has_work_aarch64 -#define cpu_common_initfn cpu_common_initfn_aarch64 -#define cpu_common_noop cpu_common_noop_aarch64 -#define cpu_common_parse_features cpu_common_parse_features_aarch64 -#define cpu_common_realizefn cpu_common_realizefn_aarch64 -#define cpu_common_reset cpu_common_reset_aarch64 -#define cpu_dump_statistics cpu_dump_statistics_aarch64 -#define cpu_exec_init cpu_exec_init_aarch64 -#define cpu_flush_icache_range cpu_flush_icache_range_aarch64 -#define cpu_gen_init cpu_gen_init_aarch64 -#define cpu_get_clock cpu_get_clock_aarch64 -#define cpu_get_real_ticks cpu_get_real_ticks_aarch64 -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_aarch64 -#define cpu_handle_debug_exception cpu_handle_debug_exception_aarch64 -#define cpu_handle_guest_debug cpu_handle_guest_debug_aarch64 -#define cpu_inb cpu_inb_aarch64 -#define cpu_inl cpu_inl_aarch64 -#define cpu_interrupt cpu_interrupt_aarch64 -#define cpu_interrupt_handler cpu_interrupt_handler_aarch64 -#define cpu_inw cpu_inw_aarch64 -#define cpu_io_recompile cpu_io_recompile_aarch64 -#define cpu_is_stopped cpu_is_stopped_aarch64 -#define cpu_ldl_code cpu_ldl_code_aarch64 -#define cpu_ldub_code cpu_ldub_code_aarch64 -#define cpu_lduw_code cpu_lduw_code_aarch64 -#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64 -#define cpu_mmu_index cpu_mmu_index_aarch64 -#define cpu_outb cpu_outb_aarch64 -#define cpu_outl cpu_outl_aarch64 -#define cpu_outw cpu_outw_aarch64 -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_aarch64 -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_aarch64 -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_aarch64 -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_aarch64 -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_aarch64 -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64 -#define cpu_physical_memory_map cpu_physical_memory_map_aarch64 -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_aarch64 -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_aarch64 -#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64 -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_aarch64 -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_aarch64 -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64 -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_aarch64 -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_aarch64 -#define cpu_register cpu_register_aarch64 -#define cpu_register_types cpu_register_types_aarch64 -#define cpu_restore_state cpu_restore_state_aarch64 -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_aarch64 -#define cpu_single_step cpu_single_step_aarch64 -#define cpu_tb_exec cpu_tb_exec_aarch64 -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_aarch64 -#define cpu_to_be64 cpu_to_be64_aarch64 -#define cpu_to_le32 cpu_to_le32_aarch64 -#define cpu_to_le64 cpu_to_le64_aarch64 -#define cpu_type_info cpu_type_info_aarch64 -#define cpu_unassigned_access cpu_unassigned_access_aarch64 -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_aarch64 -#define cpu_watchpoint_insert cpu_watchpoint_insert_aarch64 -#define cpu_watchpoint_remove cpu_watchpoint_remove_aarch64 -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_aarch64 -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_aarch64 -#define crc32c_table crc32c_table_aarch64 -#define create_new_memory_mapping create_new_memory_mapping_aarch64 -#define csselr_write csselr_write_aarch64 -#define cto32 cto32_aarch64 -#define ctr_el0_access ctr_el0_access_aarch64 -#define ctz32 ctz32_aarch64 -#define ctz64 ctz64_aarch64 -#define dacr_write dacr_write_aarch64 -#define dbgbcr_write dbgbcr_write_aarch64 -#define dbgbvr_write dbgbvr_write_aarch64 -#define dbgwcr_write dbgwcr_write_aarch64 -#define dbgwvr_write dbgwvr_write_aarch64 -#define debug_cp_reginfo debug_cp_reginfo_aarch64 -#define debug_frame debug_frame_aarch64 -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_aarch64 -#define define_arm_cp_regs define_arm_cp_regs_aarch64 -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_aarch64 -#define define_debug_regs define_debug_regs_aarch64 -#define define_one_arm_cp_reg define_one_arm_cp_reg_aarch64 -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_aarch64 -#define deposit32 deposit32_aarch64 -#define deposit64 deposit64_aarch64 -#define deregister_tm_clones deregister_tm_clones_aarch64 -#define device_class_base_init device_class_base_init_aarch64 -#define device_class_init device_class_init_aarch64 -#define device_finalize device_finalize_aarch64 -#define device_get_realized device_get_realized_aarch64 -#define device_initfn device_initfn_aarch64 -#define device_post_init device_post_init_aarch64 -#define device_reset device_reset_aarch64 -#define device_set_realized device_set_realized_aarch64 -#define device_type_info device_type_info_aarch64 -#define disas_arm_insn disas_arm_insn_aarch64 -#define disas_coproc_insn disas_coproc_insn_aarch64 -#define disas_dsp_insn disas_dsp_insn_aarch64 -#define disas_iwmmxt_insn disas_iwmmxt_insn_aarch64 -#define disas_neon_data_insn disas_neon_data_insn_aarch64 -#define disas_neon_ls_insn disas_neon_ls_insn_aarch64 -#define disas_thumb2_insn disas_thumb2_insn_aarch64 -#define disas_thumb_insn disas_thumb_insn_aarch64 -#define disas_vfp_insn disas_vfp_insn_aarch64 -#define disas_vfp_v8_insn disas_vfp_v8_insn_aarch64 -#define do_arm_semihosting do_arm_semihosting_aarch64 -#define do_clz16 do_clz16_aarch64 -#define do_clz8 do_clz8_aarch64 -#define do_constant_folding do_constant_folding_aarch64 -#define do_constant_folding_2 do_constant_folding_2_aarch64 -#define do_constant_folding_cond do_constant_folding_cond_aarch64 -#define do_constant_folding_cond2 do_constant_folding_cond2_aarch64 -#define do_constant_folding_cond_32 do_constant_folding_cond_32_aarch64 -#define do_constant_folding_cond_64 do_constant_folding_cond_64_aarch64 -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_aarch64 -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_aarch64 -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_aarch64 -#define do_ssat do_ssat_aarch64 -#define do_usad do_usad_aarch64 -#define do_usat do_usat_aarch64 -#define do_v7m_exception_exit do_v7m_exception_exit_aarch64 -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_aarch64 -#define dummy_func dummy_func_aarch64 -#define dummy_section dummy_section_aarch64 -#define _DYNAMIC _DYNAMIC_aarch64 -#define _edata _edata_aarch64 -#define _end _end_aarch64 -#define end_list end_list_aarch64 -#define eq128 eq128_aarch64 -#define ErrorClass_lookup ErrorClass_lookup_aarch64 -#define error_copy error_copy_aarch64 -#define error_exit error_exit_aarch64 -#define error_get_class error_get_class_aarch64 -#define error_get_pretty error_get_pretty_aarch64 -#define error_setg_file_open error_setg_file_open_aarch64 -#define estimateDiv128To64 estimateDiv128To64_aarch64 -#define estimateSqrt32 estimateSqrt32_aarch64 -#define excnames excnames_aarch64 -#define excp_is_internal excp_is_internal_aarch64 -#define extended_addresses_enabled extended_addresses_enabled_aarch64 -#define extended_mpu_ap_bits extended_mpu_ap_bits_aarch64 -#define extract32 extract32_aarch64 -#define extract64 extract64_aarch64 -#define extractFloat128Exp extractFloat128Exp_aarch64 -#define extractFloat128Frac0 extractFloat128Frac0_aarch64 -#define extractFloat128Frac1 extractFloat128Frac1_aarch64 -#define extractFloat128Sign extractFloat128Sign_aarch64 -#define extractFloat16Exp extractFloat16Exp_aarch64 -#define extractFloat16Frac extractFloat16Frac_aarch64 -#define extractFloat16Sign extractFloat16Sign_aarch64 -#define extractFloat32Exp extractFloat32Exp_aarch64 -#define extractFloat32Frac extractFloat32Frac_aarch64 -#define extractFloat32Sign extractFloat32Sign_aarch64 -#define extractFloat64Exp extractFloat64Exp_aarch64 -#define extractFloat64Frac extractFloat64Frac_aarch64 -#define extractFloat64Sign extractFloat64Sign_aarch64 -#define extractFloatx80Exp extractFloatx80Exp_aarch64 -#define extractFloatx80Frac extractFloatx80Frac_aarch64 -#define extractFloatx80Sign extractFloatx80Sign_aarch64 -#define fcse_write fcse_write_aarch64 -#define find_better_copy find_better_copy_aarch64 -#define find_default_machine find_default_machine_aarch64 -#define find_desc_by_name find_desc_by_name_aarch64 -#define find_first_bit find_first_bit_aarch64 -#define find_paging_enabled_cpu find_paging_enabled_cpu_aarch64 -#define find_ram_block find_ram_block_aarch64 -#define find_ram_offset find_ram_offset_aarch64 -#define find_string find_string_aarch64 -#define find_type find_type_aarch64 -#define _fini _fini_aarch64 -#define flatrange_equal flatrange_equal_aarch64 -#define flatview_destroy flatview_destroy_aarch64 -#define flatview_init flatview_init_aarch64 -#define flatview_insert flatview_insert_aarch64 -#define flatview_lookup flatview_lookup_aarch64 -#define flatview_ref flatview_ref_aarch64 -#define flatview_simplify flatview_simplify_aarch64 #define flatview_unref flatview_unref_aarch64 -#define float128_add float128_add_aarch64 -#define float128_compare float128_compare_aarch64 -#define float128_compare_internal float128_compare_internal_aarch64 -#define float128_compare_quiet float128_compare_quiet_aarch64 -#define float128_default_nan float128_default_nan_aarch64 -#define float128_div float128_div_aarch64 -#define float128_eq float128_eq_aarch64 -#define float128_eq_quiet float128_eq_quiet_aarch64 -#define float128_is_quiet_nan float128_is_quiet_nan_aarch64 -#define float128_is_signaling_nan float128_is_signaling_nan_aarch64 -#define float128_le float128_le_aarch64 -#define float128_le_quiet float128_le_quiet_aarch64 -#define float128_lt float128_lt_aarch64 -#define float128_lt_quiet float128_lt_quiet_aarch64 -#define float128_maybe_silence_nan float128_maybe_silence_nan_aarch64 -#define float128_mul float128_mul_aarch64 -#define float128_rem float128_rem_aarch64 -#define float128_round_to_int float128_round_to_int_aarch64 -#define float128_scalbn float128_scalbn_aarch64 -#define float128_sqrt float128_sqrt_aarch64 -#define float128_sub float128_sub_aarch64 -#define float128ToCommonNaN float128ToCommonNaN_aarch64 -#define float128_to_float32 float128_to_float32_aarch64 -#define float128_to_float64 float128_to_float64_aarch64 -#define float128_to_floatx80 float128_to_floatx80_aarch64 -#define float128_to_int32 float128_to_int32_aarch64 -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_aarch64 -#define float128_to_int64 float128_to_int64_aarch64 -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_aarch64 -#define float128_unordered float128_unordered_aarch64 -#define float128_unordered_quiet float128_unordered_quiet_aarch64 -#define float16_default_nan float16_default_nan_aarch64 +#define address_space_get_flatview address_space_get_flatview_aarch64 +#define memory_region_transaction_begin memory_region_transaction_begin_aarch64 +#define memory_region_transaction_commit memory_region_transaction_commit_aarch64 +#define memory_region_init memory_region_init_aarch64 +#define memory_region_access_valid memory_region_access_valid_aarch64 +#define memory_region_dispatch_read memory_region_dispatch_read_aarch64 +#define memory_region_dispatch_write memory_region_dispatch_write_aarch64 +#define memory_region_init_io memory_region_init_io_aarch64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_aarch64 +#define memory_region_size memory_region_size_aarch64 +#define memory_region_set_readonly memory_region_set_readonly_aarch64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_aarch64 +#define memory_region_from_host memory_region_from_host_aarch64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_aarch64 +#define memory_region_add_subregion memory_region_add_subregion_aarch64 +#define memory_region_del_subregion memory_region_del_subregion_aarch64 +#define memory_region_find memory_region_find_aarch64 +#define memory_listener_register memory_listener_register_aarch64 +#define memory_listener_unregister memory_listener_unregister_aarch64 +#define address_space_remove_listeners address_space_remove_listeners_aarch64 +#define address_space_init address_space_init_aarch64 +#define address_space_destroy address_space_destroy_aarch64 +#define memory_region_init_ram memory_region_init_ram_aarch64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_aarch64 +#define exec_inline_op exec_inline_op_aarch64 +#define floatx80_default_nan floatx80_default_nan_aarch64 +#define float_raise float_raise_aarch64 #define float16_is_quiet_nan float16_is_quiet_nan_aarch64 #define float16_is_signaling_nan float16_is_signaling_nan_aarch64 -#define float16_maybe_silence_nan float16_maybe_silence_nan_aarch64 -#define float16ToCommonNaN float16ToCommonNaN_aarch64 -#define float16_to_float32 float16_to_float32_aarch64 -#define float16_to_float64 float16_to_float64_aarch64 -#define float32_abs float32_abs_aarch64 -#define float32_add float32_add_aarch64 -#define float32_chs float32_chs_aarch64 -#define float32_compare float32_compare_aarch64 -#define float32_compare_internal float32_compare_internal_aarch64 -#define float32_compare_quiet float32_compare_quiet_aarch64 -#define float32_default_nan float32_default_nan_aarch64 -#define float32_div float32_div_aarch64 -#define float32_eq float32_eq_aarch64 -#define float32_eq_quiet float32_eq_quiet_aarch64 -#define float32_exp2 float32_exp2_aarch64 -#define float32_exp2_coefficients float32_exp2_coefficients_aarch64 -#define float32_is_any_nan float32_is_any_nan_aarch64 -#define float32_is_infinity float32_is_infinity_aarch64 -#define float32_is_neg float32_is_neg_aarch64 #define float32_is_quiet_nan float32_is_quiet_nan_aarch64 #define float32_is_signaling_nan float32_is_signaling_nan_aarch64 -#define float32_is_zero float32_is_zero_aarch64 -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_aarch64 -#define float32_le float32_le_aarch64 -#define float32_le_quiet float32_le_quiet_aarch64 -#define float32_log2 float32_log2_aarch64 -#define float32_lt float32_lt_aarch64 -#define float32_lt_quiet float32_lt_quiet_aarch64 +#define float64_is_quiet_nan float64_is_quiet_nan_aarch64 +#define float64_is_signaling_nan float64_is_signaling_nan_aarch64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_aarch64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_aarch64 +#define floatx80_silence_nan floatx80_silence_nan_aarch64 +#define propagateFloatx80NaN propagateFloatx80NaN_aarch64 +#define float128_is_quiet_nan float128_is_quiet_nan_aarch64 +#define float128_is_signaling_nan float128_is_signaling_nan_aarch64 +#define float128_silence_nan float128_silence_nan_aarch64 +#define float16_add float16_add_aarch64 +#define float16_sub float16_sub_aarch64 +#define float32_add float32_add_aarch64 +#define float32_sub float32_sub_aarch64 +#define float64_add float64_add_aarch64 +#define float64_sub float64_sub_aarch64 +#define float16_mul float16_mul_aarch64 +#define float32_mul float32_mul_aarch64 +#define float64_mul float64_mul_aarch64 +#define float16_muladd float16_muladd_aarch64 +#define float32_muladd float32_muladd_aarch64 +#define float64_muladd float64_muladd_aarch64 +#define float16_div float16_div_aarch64 +#define float32_div float32_div_aarch64 +#define float64_div float64_div_aarch64 +#define float16_to_float32 float16_to_float32_aarch64 +#define float16_to_float64 float16_to_float64_aarch64 +#define float32_to_float16 float32_to_float16_aarch64 +#define float32_to_float64 float32_to_float64_aarch64 +#define float64_to_float16 float64_to_float16_aarch64 +#define float64_to_float32 float64_to_float32_aarch64 +#define float16_round_to_int float16_round_to_int_aarch64 +#define float32_round_to_int float32_round_to_int_aarch64 +#define float64_round_to_int float64_round_to_int_aarch64 +#define float16_to_int16_scalbn float16_to_int16_scalbn_aarch64 +#define float16_to_int32_scalbn float16_to_int32_scalbn_aarch64 +#define float16_to_int64_scalbn float16_to_int64_scalbn_aarch64 +#define float32_to_int16_scalbn float32_to_int16_scalbn_aarch64 +#define float32_to_int32_scalbn float32_to_int32_scalbn_aarch64 +#define float32_to_int64_scalbn float32_to_int64_scalbn_aarch64 +#define float64_to_int16_scalbn float64_to_int16_scalbn_aarch64 +#define float64_to_int32_scalbn float64_to_int32_scalbn_aarch64 +#define float64_to_int64_scalbn float64_to_int64_scalbn_aarch64 +#define float16_to_int16 float16_to_int16_aarch64 +#define float16_to_int32 float16_to_int32_aarch64 +#define float16_to_int64 float16_to_int64_aarch64 +#define float32_to_int16 float32_to_int16_aarch64 +#define float32_to_int32 float32_to_int32_aarch64 +#define float32_to_int64 float32_to_int64_aarch64 +#define float64_to_int16 float64_to_int16_aarch64 +#define float64_to_int32 float64_to_int32_aarch64 +#define float64_to_int64 float64_to_int64_aarch64 +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_aarch64 +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_aarch64 +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_aarch64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_aarch64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_aarch64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_aarch64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_aarch64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_aarch64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_aarch64 +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_aarch64 +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_aarch64 +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_aarch64 +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_aarch64 +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_aarch64 +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_aarch64 +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_aarch64 +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_aarch64 +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_aarch64 +#define float16_to_uint16 float16_to_uint16_aarch64 +#define float16_to_uint32 float16_to_uint32_aarch64 +#define float16_to_uint64 float16_to_uint64_aarch64 +#define float32_to_uint16 float32_to_uint16_aarch64 +#define float32_to_uint32 float32_to_uint32_aarch64 +#define float32_to_uint64 float32_to_uint64_aarch64 +#define float64_to_uint16 float64_to_uint16_aarch64 +#define float64_to_uint32 float64_to_uint32_aarch64 +#define float64_to_uint64 float64_to_uint64_aarch64 +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_aarch64 +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_aarch64 +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_aarch64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_aarch64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_aarch64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_aarch64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_aarch64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_aarch64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_aarch64 +#define int64_to_float16_scalbn int64_to_float16_scalbn_aarch64 +#define int32_to_float16_scalbn int32_to_float16_scalbn_aarch64 +#define int16_to_float16_scalbn int16_to_float16_scalbn_aarch64 +#define int64_to_float16 int64_to_float16_aarch64 +#define int32_to_float16 int32_to_float16_aarch64 +#define int16_to_float16 int16_to_float16_aarch64 +#define int64_to_float32_scalbn int64_to_float32_scalbn_aarch64 +#define int32_to_float32_scalbn int32_to_float32_scalbn_aarch64 +#define int16_to_float32_scalbn int16_to_float32_scalbn_aarch64 +#define int64_to_float32 int64_to_float32_aarch64 +#define int32_to_float32 int32_to_float32_aarch64 +#define int16_to_float32 int16_to_float32_aarch64 +#define int64_to_float64_scalbn int64_to_float64_scalbn_aarch64 +#define int32_to_float64_scalbn int32_to_float64_scalbn_aarch64 +#define int16_to_float64_scalbn int16_to_float64_scalbn_aarch64 +#define int64_to_float64 int64_to_float64_aarch64 +#define int32_to_float64 int32_to_float64_aarch64 +#define int16_to_float64 int16_to_float64_aarch64 +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_aarch64 +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_aarch64 +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_aarch64 +#define uint64_to_float16 uint64_to_float16_aarch64 +#define uint32_to_float16 uint32_to_float16_aarch64 +#define uint16_to_float16 uint16_to_float16_aarch64 +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_aarch64 +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_aarch64 +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_aarch64 +#define uint64_to_float32 uint64_to_float32_aarch64 +#define uint32_to_float32 uint32_to_float32_aarch64 +#define uint16_to_float32 uint16_to_float32_aarch64 +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_aarch64 +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_aarch64 +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_aarch64 +#define uint64_to_float64 uint64_to_float64_aarch64 +#define uint32_to_float64 uint32_to_float64_aarch64 +#define uint16_to_float64 uint16_to_float64_aarch64 +#define float16_min float16_min_aarch64 +#define float16_minnum float16_minnum_aarch64 +#define float16_minnummag float16_minnummag_aarch64 +#define float16_max float16_max_aarch64 +#define float16_maxnum float16_maxnum_aarch64 +#define float16_maxnummag float16_maxnummag_aarch64 +#define float32_min float32_min_aarch64 +#define float32_minnum float32_minnum_aarch64 +#define float32_minnummag float32_minnummag_aarch64 #define float32_max float32_max_aarch64 #define float32_maxnum float32_maxnum_aarch64 #define float32_maxnummag float32_maxnummag_aarch64 -#define float32_maybe_silence_nan float32_maybe_silence_nan_aarch64 -#define float32_min float32_min_aarch64 -#define float32_minmax float32_minmax_aarch64 -#define float32_minnum float32_minnum_aarch64 -#define float32_minnummag float32_minnummag_aarch64 -#define float32_mul float32_mul_aarch64 -#define float32_muladd float32_muladd_aarch64 -#define float32_rem float32_rem_aarch64 -#define float32_round_to_int float32_round_to_int_aarch64 -#define float32_scalbn float32_scalbn_aarch64 -#define float32_set_sign float32_set_sign_aarch64 -#define float32_sqrt float32_sqrt_aarch64 -#define float32_squash_input_denormal float32_squash_input_denormal_aarch64 -#define float32_sub float32_sub_aarch64 -#define float32ToCommonNaN float32ToCommonNaN_aarch64 -#define float32_to_float128 float32_to_float128_aarch64 -#define float32_to_float16 float32_to_float16_aarch64 -#define float32_to_float64 float32_to_float64_aarch64 -#define float32_to_floatx80 float32_to_floatx80_aarch64 -#define float32_to_int16 float32_to_int16_aarch64 -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_aarch64 -#define float32_to_int32 float32_to_int32_aarch64 -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_aarch64 -#define float32_to_int64 float32_to_int64_aarch64 -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_aarch64 -#define float32_to_uint16 float32_to_uint16_aarch64 -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_aarch64 -#define float32_to_uint32 float32_to_uint32_aarch64 -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_aarch64 -#define float32_to_uint64 float32_to_uint64_aarch64 -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_aarch64 -#define float32_unordered float32_unordered_aarch64 -#define float32_unordered_quiet float32_unordered_quiet_aarch64 -#define float64_abs float64_abs_aarch64 -#define float64_add float64_add_aarch64 -#define float64_chs float64_chs_aarch64 -#define float64_compare float64_compare_aarch64 -#define float64_compare_internal float64_compare_internal_aarch64 -#define float64_compare_quiet float64_compare_quiet_aarch64 -#define float64_default_nan float64_default_nan_aarch64 -#define float64_div float64_div_aarch64 -#define float64_eq float64_eq_aarch64 -#define float64_eq_quiet float64_eq_quiet_aarch64 -#define float64_is_any_nan float64_is_any_nan_aarch64 -#define float64_is_infinity float64_is_infinity_aarch64 -#define float64_is_neg float64_is_neg_aarch64 -#define float64_is_quiet_nan float64_is_quiet_nan_aarch64 -#define float64_is_signaling_nan float64_is_signaling_nan_aarch64 -#define float64_is_zero float64_is_zero_aarch64 -#define float64_le float64_le_aarch64 -#define float64_le_quiet float64_le_quiet_aarch64 -#define float64_log2 float64_log2_aarch64 -#define float64_lt float64_lt_aarch64 -#define float64_lt_quiet float64_lt_quiet_aarch64 +#define float64_min float64_min_aarch64 +#define float64_minnum float64_minnum_aarch64 +#define float64_minnummag float64_minnummag_aarch64 #define float64_max float64_max_aarch64 #define float64_maxnum float64_maxnum_aarch64 #define float64_maxnummag float64_maxnummag_aarch64 -#define float64_maybe_silence_nan float64_maybe_silence_nan_aarch64 -#define float64_min float64_min_aarch64 -#define float64_minmax float64_minmax_aarch64 -#define float64_minnum float64_minnum_aarch64 -#define float64_minnummag float64_minnummag_aarch64 -#define float64_mul float64_mul_aarch64 -#define float64_muladd float64_muladd_aarch64 -#define float64_rem float64_rem_aarch64 -#define float64_round_to_int float64_round_to_int_aarch64 +#define float16_compare float16_compare_aarch64 +#define float16_compare_quiet float16_compare_quiet_aarch64 +#define float32_compare float32_compare_aarch64 +#define float32_compare_quiet float32_compare_quiet_aarch64 +#define float64_compare float64_compare_aarch64 +#define float64_compare_quiet float64_compare_quiet_aarch64 +#define float16_scalbn float16_scalbn_aarch64 +#define float32_scalbn float32_scalbn_aarch64 #define float64_scalbn float64_scalbn_aarch64 -#define float64_set_sign float64_set_sign_aarch64 +#define float16_sqrt float16_sqrt_aarch64 +#define float32_sqrt float32_sqrt_aarch64 #define float64_sqrt float64_sqrt_aarch64 +#define float16_default_nan float16_default_nan_aarch64 +#define float32_default_nan float32_default_nan_aarch64 +#define float64_default_nan float64_default_nan_aarch64 +#define float128_default_nan float128_default_nan_aarch64 +#define float16_silence_nan float16_silence_nan_aarch64 +#define float32_silence_nan float32_silence_nan_aarch64 +#define float64_silence_nan float64_silence_nan_aarch64 +#define float16_squash_input_denormal float16_squash_input_denormal_aarch64 +#define float32_squash_input_denormal float32_squash_input_denormal_aarch64 #define float64_squash_input_denormal float64_squash_input_denormal_aarch64 -#define float64_sub float64_sub_aarch64 -#define float64ToCommonNaN float64ToCommonNaN_aarch64 -#define float64_to_float128 float64_to_float128_aarch64 -#define float64_to_float16 float64_to_float16_aarch64 -#define float64_to_float32 float64_to_float32_aarch64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_aarch64 +#define roundAndPackFloatx80 roundAndPackFloatx80_aarch64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_aarch64 +#define int32_to_floatx80 int32_to_floatx80_aarch64 +#define int32_to_float128 int32_to_float128_aarch64 +#define int64_to_floatx80 int64_to_floatx80_aarch64 +#define int64_to_float128 int64_to_float128_aarch64 +#define uint64_to_float128 uint64_to_float128_aarch64 +#define float32_to_floatx80 float32_to_floatx80_aarch64 +#define float32_to_float128 float32_to_float128_aarch64 +#define float32_rem float32_rem_aarch64 +#define float32_exp2 float32_exp2_aarch64 +#define float32_log2 float32_log2_aarch64 +#define float32_eq float32_eq_aarch64 +#define float32_le float32_le_aarch64 +#define float32_lt float32_lt_aarch64 +#define float32_unordered float32_unordered_aarch64 +#define float32_eq_quiet float32_eq_quiet_aarch64 +#define float32_le_quiet float32_le_quiet_aarch64 +#define float32_lt_quiet float32_lt_quiet_aarch64 +#define float32_unordered_quiet float32_unordered_quiet_aarch64 #define float64_to_floatx80 float64_to_floatx80_aarch64 -#define float64_to_int16 float64_to_int16_aarch64 -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_aarch64 -#define float64_to_int32 float64_to_int32_aarch64 -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_aarch64 -#define float64_to_int64 float64_to_int64_aarch64 -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_aarch64 -#define float64_to_uint16 float64_to_uint16_aarch64 -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_aarch64 -#define float64_to_uint32 float64_to_uint32_aarch64 -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_aarch64 -#define float64_to_uint64 float64_to_uint64_aarch64 -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_aarch64 -#define float64_trunc_to_int float64_trunc_to_int_aarch64 +#define float64_to_float128 float64_to_float128_aarch64 +#define float64_rem float64_rem_aarch64 +#define float64_log2 float64_log2_aarch64 +#define float64_eq float64_eq_aarch64 +#define float64_le float64_le_aarch64 +#define float64_lt float64_lt_aarch64 #define float64_unordered float64_unordered_aarch64 +#define float64_eq_quiet float64_eq_quiet_aarch64 +#define float64_le_quiet float64_le_quiet_aarch64 +#define float64_lt_quiet float64_lt_quiet_aarch64 #define float64_unordered_quiet float64_unordered_quiet_aarch64 -#define float_raise float_raise_aarch64 -#define floatx80_add floatx80_add_aarch64 -#define floatx80_compare floatx80_compare_aarch64 -#define floatx80_compare_internal floatx80_compare_internal_aarch64 -#define floatx80_compare_quiet floatx80_compare_quiet_aarch64 -#define floatx80_default_nan floatx80_default_nan_aarch64 -#define floatx80_div floatx80_div_aarch64 -#define floatx80_eq floatx80_eq_aarch64 -#define floatx80_eq_quiet floatx80_eq_quiet_aarch64 -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_aarch64 -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_aarch64 -#define floatx80_le floatx80_le_aarch64 -#define floatx80_le_quiet floatx80_le_quiet_aarch64 -#define floatx80_lt floatx80_lt_aarch64 -#define floatx80_lt_quiet floatx80_lt_quiet_aarch64 -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_aarch64 -#define floatx80_mul floatx80_mul_aarch64 -#define floatx80_rem floatx80_rem_aarch64 -#define floatx80_round_to_int floatx80_round_to_int_aarch64 -#define floatx80_scalbn floatx80_scalbn_aarch64 -#define floatx80_sqrt floatx80_sqrt_aarch64 -#define floatx80_sub floatx80_sub_aarch64 -#define floatx80ToCommonNaN floatx80ToCommonNaN_aarch64 -#define floatx80_to_float128 floatx80_to_float128_aarch64 -#define floatx80_to_float32 floatx80_to_float32_aarch64 -#define floatx80_to_float64 floatx80_to_float64_aarch64 #define floatx80_to_int32 floatx80_to_int32_aarch64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_aarch64 #define floatx80_to_int64 floatx80_to_int64_aarch64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_aarch64 +#define floatx80_to_float32 floatx80_to_float32_aarch64 +#define floatx80_to_float64 floatx80_to_float64_aarch64 +#define floatx80_to_float128 floatx80_to_float128_aarch64 +#define floatx80_round floatx80_round_aarch64 +#define floatx80_round_to_int floatx80_round_to_int_aarch64 +#define floatx80_add floatx80_add_aarch64 +#define floatx80_sub floatx80_sub_aarch64 +#define floatx80_mul floatx80_mul_aarch64 +#define floatx80_div floatx80_div_aarch64 +#define floatx80_rem floatx80_rem_aarch64 +#define floatx80_sqrt floatx80_sqrt_aarch64 +#define floatx80_eq floatx80_eq_aarch64 +#define floatx80_le floatx80_le_aarch64 +#define floatx80_lt floatx80_lt_aarch64 #define floatx80_unordered floatx80_unordered_aarch64 +#define floatx80_eq_quiet floatx80_eq_quiet_aarch64 +#define floatx80_le_quiet floatx80_le_quiet_aarch64 +#define floatx80_lt_quiet floatx80_lt_quiet_aarch64 #define floatx80_unordered_quiet floatx80_unordered_quiet_aarch64 -#define flush_icache_range flush_icache_range_aarch64 -#define format_string format_string_aarch64 -#define fp_decode_rm fp_decode_rm_aarch64 -#define frame_dummy frame_dummy_aarch64 -#define free_range free_range_aarch64 -#define fstat64 fstat64_aarch64 -#define futex_wait futex_wait_aarch64 -#define futex_wake futex_wake_aarch64 -#define gen_aa32_ld16s gen_aa32_ld16s_aarch64 -#define gen_aa32_ld16u gen_aa32_ld16u_aarch64 -#define gen_aa32_ld32u gen_aa32_ld32u_aarch64 -#define gen_aa32_ld64 gen_aa32_ld64_aarch64 -#define gen_aa32_ld8s gen_aa32_ld8s_aarch64 -#define gen_aa32_ld8u gen_aa32_ld8u_aarch64 -#define gen_aa32_st16 gen_aa32_st16_aarch64 -#define gen_aa32_st32 gen_aa32_st32_aarch64 -#define gen_aa32_st64 gen_aa32_st64_aarch64 -#define gen_aa32_st8 gen_aa32_st8_aarch64 -#define gen_adc gen_adc_aarch64 -#define gen_adc_CC gen_adc_CC_aarch64 -#define gen_add16 gen_add16_aarch64 -#define gen_add_carry gen_add_carry_aarch64 -#define gen_add_CC gen_add_CC_aarch64 -#define gen_add_datah_offset gen_add_datah_offset_aarch64 -#define gen_add_data_offset gen_add_data_offset_aarch64 -#define gen_addq gen_addq_aarch64 -#define gen_addq_lo gen_addq_lo_aarch64 -#define gen_addq_msw gen_addq_msw_aarch64 -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_aarch64 -#define gen_arm_shift_im gen_arm_shift_im_aarch64 -#define gen_arm_shift_reg gen_arm_shift_reg_aarch64 -#define gen_bx gen_bx_aarch64 -#define gen_bx_im gen_bx_im_aarch64 -#define gen_clrex gen_clrex_aarch64 -#define generate_memory_topology generate_memory_topology_aarch64 -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_aarch64 -#define gen_exception gen_exception_aarch64 -#define gen_exception_insn gen_exception_insn_aarch64 -#define gen_exception_internal gen_exception_internal_aarch64 -#define gen_exception_internal_insn gen_exception_internal_insn_aarch64 -#define gen_exception_return gen_exception_return_aarch64 -#define gen_goto_tb gen_goto_tb_aarch64 -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_aarch64 -#define gen_helper_add_saturate gen_helper_add_saturate_aarch64 -#define gen_helper_add_setq gen_helper_add_setq_aarch64 -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_aarch64 -#define gen_helper_clz32 gen_helper_clz32_aarch64 -#define gen_helper_clz64 gen_helper_clz64_aarch64 -#define gen_helper_clz_arm gen_helper_clz_arm_aarch64 +#define float128_to_int32 float128_to_int32_aarch64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_aarch64 +#define float128_to_int64 float128_to_int64_aarch64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_aarch64 +#define float128_to_uint64 float128_to_uint64_aarch64 +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_aarch64 +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_aarch64 +#define float128_to_uint32 float128_to_uint32_aarch64 +#define float128_to_float32 float128_to_float32_aarch64 +#define float128_to_float64 float128_to_float64_aarch64 +#define float128_to_floatx80 float128_to_floatx80_aarch64 +#define float128_round_to_int float128_round_to_int_aarch64 +#define float128_add float128_add_aarch64 +#define float128_sub float128_sub_aarch64 +#define float128_mul float128_mul_aarch64 +#define float128_div float128_div_aarch64 +#define float128_rem float128_rem_aarch64 +#define float128_sqrt float128_sqrt_aarch64 +#define float128_eq float128_eq_aarch64 +#define float128_le float128_le_aarch64 +#define float128_lt float128_lt_aarch64 +#define float128_unordered float128_unordered_aarch64 +#define float128_eq_quiet float128_eq_quiet_aarch64 +#define float128_le_quiet float128_le_quiet_aarch64 +#define float128_lt_quiet float128_lt_quiet_aarch64 +#define float128_unordered_quiet float128_unordered_quiet_aarch64 +#define floatx80_compare floatx80_compare_aarch64 +#define floatx80_compare_quiet floatx80_compare_quiet_aarch64 +#define float128_compare float128_compare_aarch64 +#define float128_compare_quiet float128_compare_quiet_aarch64 +#define floatx80_scalbn floatx80_scalbn_aarch64 +#define float128_scalbn float128_scalbn_aarch64 +#define softfloat_init softfloat_init_aarch64 +#define tcg_optimize tcg_optimize_aarch64 +#define gen_new_label gen_new_label_aarch64 +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_aarch64 +#define tcg_expand_vec_op tcg_expand_vec_op_aarch64 +#define tcg_register_jit tcg_register_jit_aarch64 +#define tcg_tb_insert tcg_tb_insert_aarch64 +#define tcg_tb_remove tcg_tb_remove_aarch64 +#define tcg_tb_lookup tcg_tb_lookup_aarch64 +#define tcg_tb_foreach tcg_tb_foreach_aarch64 +#define tcg_nb_tbs tcg_nb_tbs_aarch64 +#define tcg_region_reset_all tcg_region_reset_all_aarch64 +#define tcg_region_init tcg_region_init_aarch64 +#define tcg_code_size tcg_code_size_aarch64 +#define tcg_code_capacity tcg_code_capacity_aarch64 +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_aarch64 +#define tcg_malloc_internal tcg_malloc_internal_aarch64 +#define tcg_pool_reset tcg_pool_reset_aarch64 +#define tcg_context_init tcg_context_init_aarch64 +#define tcg_tb_alloc tcg_tb_alloc_aarch64 +#define tcg_prologue_init tcg_prologue_init_aarch64 +#define tcg_func_start tcg_func_start_aarch64 +#define tcg_set_frame tcg_set_frame_aarch64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_aarch64 +#define tcg_temp_new_internal tcg_temp_new_internal_aarch64 +#define tcg_temp_new_vec tcg_temp_new_vec_aarch64 +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_aarch64 +#define tcg_temp_free_internal tcg_temp_free_internal_aarch64 +#define tcg_const_i32 tcg_const_i32_aarch64 +#define tcg_const_i64 tcg_const_i64_aarch64 +#define tcg_const_local_i32 tcg_const_local_i32_aarch64 +#define tcg_const_local_i64 tcg_const_local_i64_aarch64 +#define tcg_op_supported tcg_op_supported_aarch64 +#define tcg_gen_callN tcg_gen_callN_aarch64 +#define tcg_op_remove tcg_op_remove_aarch64 +#define tcg_emit_op tcg_emit_op_aarch64 +#define tcg_op_insert_before tcg_op_insert_before_aarch64 +#define tcg_op_insert_after tcg_op_insert_after_aarch64 +#define tcg_cpu_exec_time tcg_cpu_exec_time_aarch64 +#define tcg_gen_code tcg_gen_code_aarch64 +#define tcg_gen_op1 tcg_gen_op1_aarch64 +#define tcg_gen_op2 tcg_gen_op2_aarch64 +#define tcg_gen_op3 tcg_gen_op3_aarch64 +#define tcg_gen_op4 tcg_gen_op4_aarch64 +#define tcg_gen_op5 tcg_gen_op5_aarch64 +#define tcg_gen_op6 tcg_gen_op6_aarch64 +#define tcg_gen_mb tcg_gen_mb_aarch64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_aarch64 +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_aarch64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_aarch64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_aarch64 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_aarch64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_aarch64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_aarch64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_aarch64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_aarch64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_aarch64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_aarch64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_aarch64 +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_aarch64 +#define tcg_gen_muli_i32 tcg_gen_muli_i32_aarch64 +#define tcg_gen_div_i32 tcg_gen_div_i32_aarch64 +#define tcg_gen_rem_i32 tcg_gen_rem_i32_aarch64 +#define tcg_gen_divu_i32 tcg_gen_divu_i32_aarch64 +#define tcg_gen_remu_i32 tcg_gen_remu_i32_aarch64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_aarch64 +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_aarch64 +#define tcg_gen_nand_i32 tcg_gen_nand_i32_aarch64 +#define tcg_gen_nor_i32 tcg_gen_nor_i32_aarch64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_aarch64 +#define tcg_gen_clz_i32 tcg_gen_clz_i32_aarch64 +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_aarch64 +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_aarch64 +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_aarch64 +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_aarch64 +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_aarch64 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_aarch64 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_aarch64 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_aarch64 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_aarch64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_aarch64 +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_aarch64 +#define tcg_gen_extract_i32 tcg_gen_extract_i32_aarch64 +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_aarch64 +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_aarch64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_aarch64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_aarch64 +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_aarch64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_aarch64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_aarch64 +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_aarch64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_aarch64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_aarch64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_aarch64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_aarch64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_aarch64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_aarch64 +#define tcg_gen_smin_i32 tcg_gen_smin_i32_aarch64 +#define tcg_gen_umin_i32 tcg_gen_umin_i32_aarch64 +#define tcg_gen_smax_i32 tcg_gen_smax_i32_aarch64 +#define tcg_gen_umax_i32 tcg_gen_umax_i32_aarch64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_aarch64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_aarch64 +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_aarch64 +#define tcg_gen_subi_i64 tcg_gen_subi_i64_aarch64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_aarch64 +#define tcg_gen_ori_i64 tcg_gen_ori_i64_aarch64 +#define tcg_gen_xori_i64 tcg_gen_xori_i64_aarch64 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_aarch64 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_aarch64 +#define tcg_gen_sari_i64 tcg_gen_sari_i64_aarch64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_aarch64 +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_aarch64 +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_aarch64 +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_aarch64 +#define tcg_gen_muli_i64 tcg_gen_muli_i64_aarch64 +#define tcg_gen_div_i64 tcg_gen_div_i64_aarch64 +#define tcg_gen_rem_i64 tcg_gen_rem_i64_aarch64 +#define tcg_gen_divu_i64 tcg_gen_divu_i64_aarch64 +#define tcg_gen_remu_i64 tcg_gen_remu_i64_aarch64 +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_aarch64 +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_aarch64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_aarch64 +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_aarch64 +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_aarch64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_aarch64 +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_aarch64 +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_aarch64 +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_aarch64 +#define tcg_gen_not_i64 tcg_gen_not_i64_aarch64 +#define tcg_gen_andc_i64 tcg_gen_andc_i64_aarch64 +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_aarch64 +#define tcg_gen_nand_i64 tcg_gen_nand_i64_aarch64 +#define tcg_gen_nor_i64 tcg_gen_nor_i64_aarch64 +#define tcg_gen_orc_i64 tcg_gen_orc_i64_aarch64 +#define tcg_gen_clz_i64 tcg_gen_clz_i64_aarch64 +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_aarch64 +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_aarch64 +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_aarch64 +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_aarch64 +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_aarch64 +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_aarch64 +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_aarch64 +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_aarch64 +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_aarch64 +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_aarch64 +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_aarch64 +#define tcg_gen_extract_i64 tcg_gen_extract_i64_aarch64 +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_aarch64 +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_aarch64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_aarch64 +#define tcg_gen_add2_i64 tcg_gen_add2_i64_aarch64 +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_aarch64 +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_aarch64 +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_aarch64 +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_aarch64 +#define tcg_gen_smin_i64 tcg_gen_smin_i64_aarch64 +#define tcg_gen_umin_i64 tcg_gen_umin_i64_aarch64 +#define tcg_gen_smax_i64 tcg_gen_smax_i64_aarch64 +#define tcg_gen_umax_i64 tcg_gen_umax_i64_aarch64 +#define tcg_gen_abs_i64 tcg_gen_abs_i64_aarch64 +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_aarch64 +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_aarch64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_aarch64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_aarch64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_aarch64 +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_aarch64 +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_aarch64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_aarch64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_aarch64 +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_aarch64 +#define check_exit_request check_exit_request_aarch64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_aarch64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_aarch64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_aarch64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_aarch64 +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_aarch64 +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_aarch64 +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_aarch64 +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_aarch64 +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_aarch64 +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_aarch64 +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_aarch64 +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_aarch64 +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_aarch64 +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_aarch64 +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_aarch64 +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_aarch64 +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_aarch64 +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_aarch64 +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_aarch64 +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_aarch64 +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_aarch64 +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_aarch64 +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_aarch64 +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_aarch64 +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_aarch64 +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_aarch64 +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_aarch64 +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_aarch64 +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_aarch64 +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_aarch64 +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_aarch64 +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_aarch64 +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_aarch64 +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_aarch64 +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_aarch64 +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_aarch64 +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_aarch64 +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_aarch64 +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_aarch64 +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_aarch64 +#define simd_desc simd_desc_aarch64 +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_aarch64 +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_aarch64 +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_aarch64 +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_aarch64 +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_aarch64 +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_aarch64 +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_aarch64 +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_aarch64 +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_aarch64 +#define tcg_gen_gvec_2 tcg_gen_gvec_2_aarch64 +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_aarch64 +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_aarch64 +#define tcg_gen_gvec_3 tcg_gen_gvec_3_aarch64 +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_aarch64 +#define tcg_gen_gvec_4 tcg_gen_gvec_4_aarch64 +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_aarch64 +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_aarch64 +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_aarch64 +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_aarch64 +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_aarch64 +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_aarch64 +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_aarch64 +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_aarch64 +#define tcg_gen_gvec_not tcg_gen_gvec_not_aarch64 +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_aarch64 +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_aarch64 +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_aarch64 +#define tcg_gen_gvec_add tcg_gen_gvec_add_aarch64 +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_aarch64 +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_aarch64 +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_aarch64 +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_aarch64 +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_aarch64 +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_aarch64 +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_aarch64 +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_aarch64 +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_aarch64 +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_aarch64 +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_aarch64 +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_aarch64 +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_aarch64 +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_aarch64 +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_aarch64 +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_aarch64 +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_aarch64 +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_aarch64 +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_aarch64 +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_aarch64 +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_aarch64 +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_aarch64 +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_aarch64 +#define tcg_gen_gvec_and tcg_gen_gvec_and_aarch64 +#define tcg_gen_gvec_or tcg_gen_gvec_or_aarch64 +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_aarch64 +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_aarch64 +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_aarch64 +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_aarch64 +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_aarch64 +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_aarch64 +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_aarch64 +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_aarch64 +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_aarch64 +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_aarch64 +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_aarch64 +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_aarch64 +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_aarch64 +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_aarch64 +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_aarch64 +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_aarch64 +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_aarch64 +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_aarch64 +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_aarch64 +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_aarch64 +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_aarch64 +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_aarch64 +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_aarch64 +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_aarch64 +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_aarch64 +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_aarch64 +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_aarch64 +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_aarch64 +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_aarch64 +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_aarch64 +#define vec_gen_2 vec_gen_2_aarch64 +#define vec_gen_3 vec_gen_3_aarch64 +#define vec_gen_4 vec_gen_4_aarch64 +#define tcg_gen_mov_vec tcg_gen_mov_vec_aarch64 +#define tcg_const_zeros_vec tcg_const_zeros_vec_aarch64 +#define tcg_const_ones_vec tcg_const_ones_vec_aarch64 +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_aarch64 +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_aarch64 +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_aarch64 +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_aarch64 +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_aarch64 +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_aarch64 +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_aarch64 +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_aarch64 +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_aarch64 +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_aarch64 +#define tcg_gen_ld_vec tcg_gen_ld_vec_aarch64 +#define tcg_gen_st_vec tcg_gen_st_vec_aarch64 +#define tcg_gen_stl_vec tcg_gen_stl_vec_aarch64 +#define tcg_gen_and_vec tcg_gen_and_vec_aarch64 +#define tcg_gen_or_vec tcg_gen_or_vec_aarch64 +#define tcg_gen_xor_vec tcg_gen_xor_vec_aarch64 +#define tcg_gen_andc_vec tcg_gen_andc_vec_aarch64 +#define tcg_gen_orc_vec tcg_gen_orc_vec_aarch64 +#define tcg_gen_nand_vec tcg_gen_nand_vec_aarch64 +#define tcg_gen_nor_vec tcg_gen_nor_vec_aarch64 +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_aarch64 +#define tcg_gen_not_vec tcg_gen_not_vec_aarch64 +#define tcg_gen_neg_vec tcg_gen_neg_vec_aarch64 +#define tcg_gen_abs_vec tcg_gen_abs_vec_aarch64 +#define tcg_gen_shli_vec tcg_gen_shli_vec_aarch64 +#define tcg_gen_shri_vec tcg_gen_shri_vec_aarch64 +#define tcg_gen_sari_vec tcg_gen_sari_vec_aarch64 +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_aarch64 +#define tcg_gen_add_vec tcg_gen_add_vec_aarch64 +#define tcg_gen_sub_vec tcg_gen_sub_vec_aarch64 +#define tcg_gen_mul_vec tcg_gen_mul_vec_aarch64 +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_aarch64 +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_aarch64 +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_aarch64 +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_aarch64 +#define tcg_gen_smin_vec tcg_gen_smin_vec_aarch64 +#define tcg_gen_umin_vec tcg_gen_umin_vec_aarch64 +#define tcg_gen_smax_vec tcg_gen_smax_vec_aarch64 +#define tcg_gen_umax_vec tcg_gen_umax_vec_aarch64 +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_aarch64 +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_aarch64 +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_aarch64 +#define tcg_gen_shls_vec tcg_gen_shls_vec_aarch64 +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_aarch64 +#define tcg_gen_sars_vec tcg_gen_sars_vec_aarch64 +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_aarch64 +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_aarch64 +#define tb_htable_lookup tb_htable_lookup_aarch64 +#define tb_set_jmp_target tb_set_jmp_target_aarch64 +#define cpu_exec cpu_exec_aarch64 +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_aarch64 +#define cpu_reloading_memory_map cpu_reloading_memory_map_aarch64 +#define cpu_loop_exit cpu_loop_exit_aarch64 +#define cpu_loop_exit_restore cpu_loop_exit_restore_aarch64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_aarch64 +#define tlb_init tlb_init_aarch64 +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_aarch64 +#define tlb_flush tlb_flush_aarch64 +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_aarch64 +#define tlb_flush_all_cpus tlb_flush_all_cpus_aarch64 +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_aarch64 +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_aarch64 +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_aarch64 +#define tlb_flush_page tlb_flush_page_aarch64 +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_aarch64 +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_aarch64 +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_aarch64 +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_aarch64 +#define tlb_protect_code tlb_protect_code_aarch64 +#define tlb_unprotect_code tlb_unprotect_code_aarch64 +#define tlb_reset_dirty tlb_reset_dirty_aarch64 +#define tlb_set_dirty tlb_set_dirty_aarch64 +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_aarch64 +#define tlb_set_page tlb_set_page_aarch64 +#define get_page_addr_code_hostp get_page_addr_code_hostp_aarch64 +#define get_page_addr_code get_page_addr_code_aarch64 +#define probe_access probe_access_aarch64 +#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_aarch64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_aarch64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_aarch64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_aarch64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_aarch64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_aarch64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_aarch64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_aarch64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_aarch64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_aarch64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_aarch64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_aarch64 +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_aarch64 +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_aarch64 +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_aarch64 +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_aarch64 +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_aarch64 +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_aarch64 +#define cpu_ldub_data_ra cpu_ldub_data_ra_aarch64 +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_aarch64 +#define cpu_lduw_data_ra cpu_lduw_data_ra_aarch64 +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_aarch64 +#define cpu_ldl_data_ra cpu_ldl_data_ra_aarch64 +#define cpu_ldq_data_ra cpu_ldq_data_ra_aarch64 +#define cpu_ldub_data cpu_ldub_data_aarch64 +#define cpu_ldsb_data cpu_ldsb_data_aarch64 +#define cpu_lduw_data cpu_lduw_data_aarch64 +#define cpu_ldsw_data cpu_ldsw_data_aarch64 +#define cpu_ldl_data cpu_ldl_data_aarch64 +#define cpu_ldq_data cpu_ldq_data_aarch64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_aarch64 +#define helper_le_stw_mmu helper_le_stw_mmu_aarch64 +#define helper_be_stw_mmu helper_be_stw_mmu_aarch64 +#define helper_le_stl_mmu helper_le_stl_mmu_aarch64 +#define helper_be_stl_mmu helper_be_stl_mmu_aarch64 +#define helper_le_stq_mmu helper_le_stq_mmu_aarch64 +#define helper_be_stq_mmu helper_be_stq_mmu_aarch64 +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_aarch64 +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_aarch64 +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_aarch64 +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_aarch64 +#define cpu_stb_data_ra cpu_stb_data_ra_aarch64 +#define cpu_stw_data_ra cpu_stw_data_ra_aarch64 +#define cpu_stl_data_ra cpu_stl_data_ra_aarch64 +#define cpu_stq_data_ra cpu_stq_data_ra_aarch64 +#define cpu_stb_data cpu_stb_data_aarch64 +#define cpu_stw_data cpu_stw_data_aarch64 +#define cpu_stl_data cpu_stl_data_aarch64 +#define cpu_stq_data cpu_stq_data_aarch64 +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_aarch64 +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_aarch64 +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_aarch64 +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_aarch64 +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_aarch64 +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_aarch64 +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_aarch64 +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_aarch64 +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_aarch64 +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_aarch64 +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_aarch64 +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_aarch64 +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_aarch64 +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_aarch64 +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_aarch64 +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_aarch64 +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_aarch64 +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_aarch64 +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_aarch64 +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_aarch64 +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_aarch64 +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_aarch64 +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_aarch64 +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_aarch64 +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_aarch64 +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_aarch64 +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_aarch64 +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_aarch64 +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_aarch64 +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_aarch64 +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_aarch64 +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_aarch64 +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_aarch64 +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_aarch64 +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_aarch64 +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_aarch64 +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_aarch64 +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_aarch64 +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_aarch64 +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_aarch64 +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_aarch64 +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_aarch64 +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_aarch64 +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_aarch64 +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_aarch64 +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_aarch64 +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_aarch64 +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_aarch64 +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_aarch64 +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_aarch64 +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_aarch64 +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_aarch64 +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_aarch64 +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_aarch64 +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_aarch64 +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_aarch64 +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_aarch64 +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_aarch64 +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_aarch64 +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_aarch64 +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_aarch64 +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_aarch64 +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_aarch64 +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_aarch64 +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_aarch64 +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_aarch64 +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_aarch64 +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_aarch64 +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_aarch64 +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_aarch64 +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_aarch64 +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_aarch64 +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_aarch64 +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_aarch64 +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_aarch64 +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_aarch64 +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_aarch64 +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_aarch64 +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_aarch64 +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_aarch64 +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_aarch64 +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_aarch64 +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_aarch64 +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_aarch64 +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_aarch64 +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_aarch64 +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_aarch64 +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_aarch64 +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_aarch64 +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_aarch64 +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_aarch64 +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_aarch64 +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_aarch64 +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_aarch64 +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_aarch64 +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_aarch64 +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_aarch64 +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_aarch64 +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_aarch64 +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_aarch64 +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_aarch64 +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_aarch64 +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_aarch64 +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_aarch64 +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_aarch64 +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_aarch64 +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_aarch64 +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_aarch64 +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_aarch64 +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_aarch64 +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_aarch64 +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_aarch64 +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_aarch64 +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_aarch64 +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_aarch64 +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_aarch64 +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_aarch64 +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_aarch64 +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_aarch64 +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_aarch64 +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_aarch64 +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_aarch64 +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_aarch64 +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_aarch64 +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_aarch64 +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_aarch64 +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_aarch64 +#define helper_atomic_xchgb helper_atomic_xchgb_aarch64 +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_aarch64 +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_aarch64 +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_aarch64 +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_aarch64 +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_aarch64 +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_aarch64 +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_aarch64 +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_aarch64 +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_aarch64 +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_aarch64 +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_aarch64 +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_aarch64 +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_aarch64 +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_aarch64 +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_aarch64 +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_aarch64 +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_aarch64 +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_aarch64 +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_aarch64 +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_aarch64 +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_aarch64 +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_aarch64 +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_aarch64 +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_aarch64 +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_aarch64 +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_aarch64 +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_aarch64 +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_aarch64 +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_aarch64 +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_aarch64 +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_aarch64 +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_aarch64 +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_aarch64 +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_aarch64 +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_aarch64 +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_aarch64 +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_aarch64 +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_aarch64 +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_aarch64 +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_aarch64 +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_aarch64 +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_aarch64 +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_aarch64 +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_aarch64 +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_aarch64 +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_aarch64 +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_aarch64 +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_aarch64 +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_aarch64 +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_aarch64 +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_aarch64 +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_aarch64 +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_aarch64 +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_aarch64 +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_aarch64 +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_aarch64 +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_aarch64 +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_aarch64 +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_aarch64 +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_aarch64 +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_aarch64 +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_aarch64 +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_aarch64 +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_aarch64 +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_aarch64 +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_aarch64 +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_aarch64 +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_aarch64 +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_aarch64 +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_aarch64 +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_aarch64 +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_aarch64 +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_aarch64 +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_aarch64 +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_aarch64 +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_aarch64 +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_aarch64 +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_aarch64 +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_aarch64 +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_aarch64 +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_aarch64 +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_aarch64 +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_aarch64 +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_aarch64 +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_aarch64 +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_aarch64 +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_aarch64 +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_aarch64 +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_aarch64 +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_aarch64 +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_aarch64 +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_aarch64 +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_aarch64 +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_aarch64 +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_aarch64 +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_aarch64 +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_aarch64 +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_aarch64 +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_aarch64 +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_aarch64 +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_aarch64 +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_aarch64 +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_aarch64 +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_aarch64 +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_aarch64 +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_aarch64 +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_aarch64 +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_aarch64 +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_aarch64 +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_aarch64 +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_aarch64 +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_aarch64 +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_aarch64 +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_aarch64 +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_aarch64 +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_aarch64 +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_aarch64 +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_aarch64 +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_aarch64 +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_aarch64 +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_aarch64 +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_aarch64 +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_aarch64 +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_aarch64 +#define cpu_ldub_code cpu_ldub_code_aarch64 +#define cpu_lduw_code cpu_lduw_code_aarch64 +#define cpu_ldl_code cpu_ldl_code_aarch64 +#define cpu_ldq_code cpu_ldq_code_aarch64 +#define helper_div_i32 helper_div_i32_aarch64 +#define helper_rem_i32 helper_rem_i32_aarch64 +#define helper_divu_i32 helper_divu_i32_aarch64 +#define helper_remu_i32 helper_remu_i32_aarch64 +#define helper_shl_i64 helper_shl_i64_aarch64 +#define helper_shr_i64 helper_shr_i64_aarch64 +#define helper_sar_i64 helper_sar_i64_aarch64 +#define helper_div_i64 helper_div_i64_aarch64 +#define helper_rem_i64 helper_rem_i64_aarch64 +#define helper_divu_i64 helper_divu_i64_aarch64 +#define helper_remu_i64 helper_remu_i64_aarch64 +#define helper_muluh_i64 helper_muluh_i64_aarch64 +#define helper_mulsh_i64 helper_mulsh_i64_aarch64 +#define helper_clz_i32 helper_clz_i32_aarch64 +#define helper_ctz_i32 helper_ctz_i32_aarch64 +#define helper_clz_i64 helper_clz_i64_aarch64 +#define helper_ctz_i64 helper_ctz_i64_aarch64 +#define helper_clrsb_i32 helper_clrsb_i32_aarch64 +#define helper_clrsb_i64 helper_clrsb_i64_aarch64 +#define helper_ctpop_i32 helper_ctpop_i32_aarch64 +#define helper_ctpop_i64 helper_ctpop_i64_aarch64 +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_aarch64 +#define helper_exit_atomic helper_exit_atomic_aarch64 +#define helper_gvec_add8 helper_gvec_add8_aarch64 +#define helper_gvec_add16 helper_gvec_add16_aarch64 +#define helper_gvec_add32 helper_gvec_add32_aarch64 +#define helper_gvec_add64 helper_gvec_add64_aarch64 +#define helper_gvec_adds8 helper_gvec_adds8_aarch64 +#define helper_gvec_adds16 helper_gvec_adds16_aarch64 +#define helper_gvec_adds32 helper_gvec_adds32_aarch64 +#define helper_gvec_adds64 helper_gvec_adds64_aarch64 +#define helper_gvec_sub8 helper_gvec_sub8_aarch64 +#define helper_gvec_sub16 helper_gvec_sub16_aarch64 +#define helper_gvec_sub32 helper_gvec_sub32_aarch64 +#define helper_gvec_sub64 helper_gvec_sub64_aarch64 +#define helper_gvec_subs8 helper_gvec_subs8_aarch64 +#define helper_gvec_subs16 helper_gvec_subs16_aarch64 +#define helper_gvec_subs32 helper_gvec_subs32_aarch64 +#define helper_gvec_subs64 helper_gvec_subs64_aarch64 +#define helper_gvec_mul8 helper_gvec_mul8_aarch64 +#define helper_gvec_mul16 helper_gvec_mul16_aarch64 +#define helper_gvec_mul32 helper_gvec_mul32_aarch64 +#define helper_gvec_mul64 helper_gvec_mul64_aarch64 +#define helper_gvec_muls8 helper_gvec_muls8_aarch64 +#define helper_gvec_muls16 helper_gvec_muls16_aarch64 +#define helper_gvec_muls32 helper_gvec_muls32_aarch64 +#define helper_gvec_muls64 helper_gvec_muls64_aarch64 +#define helper_gvec_neg8 helper_gvec_neg8_aarch64 +#define helper_gvec_neg16 helper_gvec_neg16_aarch64 +#define helper_gvec_neg32 helper_gvec_neg32_aarch64 +#define helper_gvec_neg64 helper_gvec_neg64_aarch64 +#define helper_gvec_abs8 helper_gvec_abs8_aarch64 +#define helper_gvec_abs16 helper_gvec_abs16_aarch64 +#define helper_gvec_abs32 helper_gvec_abs32_aarch64 +#define helper_gvec_abs64 helper_gvec_abs64_aarch64 +#define helper_gvec_mov helper_gvec_mov_aarch64 +#define helper_gvec_dup64 helper_gvec_dup64_aarch64 +#define helper_gvec_dup32 helper_gvec_dup32_aarch64 +#define helper_gvec_dup16 helper_gvec_dup16_aarch64 +#define helper_gvec_dup8 helper_gvec_dup8_aarch64 +#define helper_gvec_not helper_gvec_not_aarch64 +#define helper_gvec_and helper_gvec_and_aarch64 +#define helper_gvec_or helper_gvec_or_aarch64 +#define helper_gvec_xor helper_gvec_xor_aarch64 +#define helper_gvec_andc helper_gvec_andc_aarch64 +#define helper_gvec_orc helper_gvec_orc_aarch64 +#define helper_gvec_nand helper_gvec_nand_aarch64 +#define helper_gvec_nor helper_gvec_nor_aarch64 +#define helper_gvec_eqv helper_gvec_eqv_aarch64 +#define helper_gvec_ands helper_gvec_ands_aarch64 +#define helper_gvec_xors helper_gvec_xors_aarch64 +#define helper_gvec_ors helper_gvec_ors_aarch64 +#define helper_gvec_shl8i helper_gvec_shl8i_aarch64 +#define helper_gvec_shl16i helper_gvec_shl16i_aarch64 +#define helper_gvec_shl32i helper_gvec_shl32i_aarch64 +#define helper_gvec_shl64i helper_gvec_shl64i_aarch64 +#define helper_gvec_shr8i helper_gvec_shr8i_aarch64 +#define helper_gvec_shr16i helper_gvec_shr16i_aarch64 +#define helper_gvec_shr32i helper_gvec_shr32i_aarch64 +#define helper_gvec_shr64i helper_gvec_shr64i_aarch64 +#define helper_gvec_sar8i helper_gvec_sar8i_aarch64 +#define helper_gvec_sar16i helper_gvec_sar16i_aarch64 +#define helper_gvec_sar32i helper_gvec_sar32i_aarch64 +#define helper_gvec_sar64i helper_gvec_sar64i_aarch64 +#define helper_gvec_shl8v helper_gvec_shl8v_aarch64 +#define helper_gvec_shl16v helper_gvec_shl16v_aarch64 +#define helper_gvec_shl32v helper_gvec_shl32v_aarch64 +#define helper_gvec_shl64v helper_gvec_shl64v_aarch64 +#define helper_gvec_shr8v helper_gvec_shr8v_aarch64 +#define helper_gvec_shr16v helper_gvec_shr16v_aarch64 +#define helper_gvec_shr32v helper_gvec_shr32v_aarch64 +#define helper_gvec_shr64v helper_gvec_shr64v_aarch64 +#define helper_gvec_sar8v helper_gvec_sar8v_aarch64 +#define helper_gvec_sar16v helper_gvec_sar16v_aarch64 +#define helper_gvec_sar32v helper_gvec_sar32v_aarch64 +#define helper_gvec_sar64v helper_gvec_sar64v_aarch64 +#define helper_gvec_eq8 helper_gvec_eq8_aarch64 +#define helper_gvec_ne8 helper_gvec_ne8_aarch64 +#define helper_gvec_lt8 helper_gvec_lt8_aarch64 +#define helper_gvec_le8 helper_gvec_le8_aarch64 +#define helper_gvec_ltu8 helper_gvec_ltu8_aarch64 +#define helper_gvec_leu8 helper_gvec_leu8_aarch64 +#define helper_gvec_eq16 helper_gvec_eq16_aarch64 +#define helper_gvec_ne16 helper_gvec_ne16_aarch64 +#define helper_gvec_lt16 helper_gvec_lt16_aarch64 +#define helper_gvec_le16 helper_gvec_le16_aarch64 +#define helper_gvec_ltu16 helper_gvec_ltu16_aarch64 +#define helper_gvec_leu16 helper_gvec_leu16_aarch64 +#define helper_gvec_eq32 helper_gvec_eq32_aarch64 +#define helper_gvec_ne32 helper_gvec_ne32_aarch64 +#define helper_gvec_lt32 helper_gvec_lt32_aarch64 +#define helper_gvec_le32 helper_gvec_le32_aarch64 +#define helper_gvec_ltu32 helper_gvec_ltu32_aarch64 +#define helper_gvec_leu32 helper_gvec_leu32_aarch64 +#define helper_gvec_eq64 helper_gvec_eq64_aarch64 +#define helper_gvec_ne64 helper_gvec_ne64_aarch64 +#define helper_gvec_lt64 helper_gvec_lt64_aarch64 +#define helper_gvec_le64 helper_gvec_le64_aarch64 +#define helper_gvec_ltu64 helper_gvec_ltu64_aarch64 +#define helper_gvec_leu64 helper_gvec_leu64_aarch64 +#define helper_gvec_ssadd8 helper_gvec_ssadd8_aarch64 +#define helper_gvec_ssadd16 helper_gvec_ssadd16_aarch64 +#define helper_gvec_ssadd32 helper_gvec_ssadd32_aarch64 +#define helper_gvec_ssadd64 helper_gvec_ssadd64_aarch64 +#define helper_gvec_sssub8 helper_gvec_sssub8_aarch64 +#define helper_gvec_sssub16 helper_gvec_sssub16_aarch64 +#define helper_gvec_sssub32 helper_gvec_sssub32_aarch64 +#define helper_gvec_sssub64 helper_gvec_sssub64_aarch64 +#define helper_gvec_usadd8 helper_gvec_usadd8_aarch64 +#define helper_gvec_usadd16 helper_gvec_usadd16_aarch64 +#define helper_gvec_usadd32 helper_gvec_usadd32_aarch64 +#define helper_gvec_usadd64 helper_gvec_usadd64_aarch64 +#define helper_gvec_ussub8 helper_gvec_ussub8_aarch64 +#define helper_gvec_ussub16 helper_gvec_ussub16_aarch64 +#define helper_gvec_ussub32 helper_gvec_ussub32_aarch64 +#define helper_gvec_ussub64 helper_gvec_ussub64_aarch64 +#define helper_gvec_smin8 helper_gvec_smin8_aarch64 +#define helper_gvec_smin16 helper_gvec_smin16_aarch64 +#define helper_gvec_smin32 helper_gvec_smin32_aarch64 +#define helper_gvec_smin64 helper_gvec_smin64_aarch64 +#define helper_gvec_smax8 helper_gvec_smax8_aarch64 +#define helper_gvec_smax16 helper_gvec_smax16_aarch64 +#define helper_gvec_smax32 helper_gvec_smax32_aarch64 +#define helper_gvec_smax64 helper_gvec_smax64_aarch64 +#define helper_gvec_umin8 helper_gvec_umin8_aarch64 +#define helper_gvec_umin16 helper_gvec_umin16_aarch64 +#define helper_gvec_umin32 helper_gvec_umin32_aarch64 +#define helper_gvec_umin64 helper_gvec_umin64_aarch64 +#define helper_gvec_umax8 helper_gvec_umax8_aarch64 +#define helper_gvec_umax16 helper_gvec_umax16_aarch64 +#define helper_gvec_umax32 helper_gvec_umax32_aarch64 +#define helper_gvec_umax64 helper_gvec_umax64_aarch64 +#define helper_gvec_bitsel helper_gvec_bitsel_aarch64 +#define cpu_restore_state cpu_restore_state_aarch64 +#define page_collection_lock page_collection_lock_aarch64 +#define page_collection_unlock page_collection_unlock_aarch64 +#define free_code_gen_buffer free_code_gen_buffer_aarch64 +#define tcg_exec_init tcg_exec_init_aarch64 +#define tb_cleanup tb_cleanup_aarch64 +#define tb_flush tb_flush_aarch64 +#define tb_phys_invalidate tb_phys_invalidate_aarch64 +#define tb_gen_code tb_gen_code_aarch64 +#define tb_exec_lock tb_exec_lock_aarch64 +#define tb_exec_unlock tb_exec_unlock_aarch64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_aarch64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_aarch64 +#define tb_check_watchpoint tb_check_watchpoint_aarch64 +#define cpu_io_recompile cpu_io_recompile_aarch64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_aarch64 +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_aarch64 +#define translator_loop_temp_check translator_loop_temp_check_aarch64 +#define translator_loop translator_loop_aarch64 +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_aarch64 +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_aarch64 +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_aarch64 +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_aarch64 +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_aarch64 +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_aarch64 +#define unassigned_mem_ops unassigned_mem_ops_aarch64 +#define floatx80_infinity floatx80_infinity_aarch64 +#define dup_const_func dup_const_func_aarch64 +#define gen_helper_raise_exception gen_helper_raise_exception_aarch64 +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_aarch64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_aarch64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_aarch64 #define gen_helper_cpsr_read gen_helper_cpsr_read_aarch64 #define gen_helper_cpsr_write gen_helper_cpsr_write_aarch64 -#define gen_helper_crc32_arm gen_helper_crc32_arm_aarch64 -#define gen_helper_crc32c gen_helper_crc32c_aarch64 -#define gen_helper_crypto_aese gen_helper_crypto_aese_aarch64 -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_aarch64 -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_aarch64 -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_aarch64 -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_aarch64 -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_aarch64 -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_aarch64 -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_aarch64 -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_aarch64 -#define gen_helper_double_saturate gen_helper_double_saturate_aarch64 -#define gen_helper_exception_internal gen_helper_exception_internal_aarch64 -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_aarch64 -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_aarch64 -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_aarch64 -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_aarch64 -#define gen_helper_get_user_reg gen_helper_get_user_reg_aarch64 -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_aarch64 -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_aarch64 -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_aarch64 -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_aarch64 -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_aarch64 -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_aarch64 -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_aarch64 -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_aarch64 -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_aarch64 -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_aarch64 -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_aarch64 -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_aarch64 -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_aarch64 -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_aarch64 -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_aarch64 -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_aarch64 -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_aarch64 -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_aarch64 -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_aarch64 -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_aarch64 -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_aarch64 -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_aarch64 -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_aarch64 -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_aarch64 -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_aarch64 -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_aarch64 -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_aarch64 -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_aarch64 -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_aarch64 -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_aarch64 -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_aarch64 -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_aarch64 -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_aarch64 -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_aarch64 -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_aarch64 -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_aarch64 -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_aarch64 -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_aarch64 -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_aarch64 -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_aarch64 -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_aarch64 -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_aarch64 -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_aarch64 -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_aarch64 -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_aarch64 -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_aarch64 -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_aarch64 -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_aarch64 -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_aarch64 -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_aarch64 -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_aarch64 -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_aarch64 -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_aarch64 -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_aarch64 -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_aarch64 -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_aarch64 -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_aarch64 -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_aarch64 -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_aarch64 -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_aarch64 -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_aarch64 -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_aarch64 -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_aarch64 -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_aarch64 -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_aarch64 -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_aarch64 -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_aarch64 -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_aarch64 -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_aarch64 -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_aarch64 -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_aarch64 -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_aarch64 -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_aarch64 -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_aarch64 -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_aarch64 -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_aarch64 -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_aarch64 -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_aarch64 -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_aarch64 -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_aarch64 -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_aarch64 -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_aarch64 -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_aarch64 -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_aarch64 -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_aarch64 -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_aarch64 -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_aarch64 -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_aarch64 -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_aarch64 -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_aarch64 -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_aarch64 -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_aarch64 -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_aarch64 -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_aarch64 -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_aarch64 -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_aarch64 -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_aarch64 -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_aarch64 -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_aarch64 -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_aarch64 -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_aarch64 -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_aarch64 -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_aarch64 -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_aarch64 -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_aarch64 -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_aarch64 -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_aarch64 -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_aarch64 -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_aarch64 -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_aarch64 -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_aarch64 -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_aarch64 -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_aarch64 -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_aarch64 -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_aarch64 -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_aarch64 -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_aarch64 -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_aarch64 -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_aarch64 -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_aarch64 -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_aarch64 -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_aarch64 -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_aarch64 -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_aarch64 -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_aarch64 -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_aarch64 -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_aarch64 -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_aarch64 -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_aarch64 -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_aarch64 -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_aarch64 -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_aarch64 -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_aarch64 -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_aarch64 -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_aarch64 -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_aarch64 -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_aarch64 -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_aarch64 -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_aarch64 -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_aarch64 -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_aarch64 -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_aarch64 -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_aarch64 -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_aarch64 -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_aarch64 -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_aarch64 -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_aarch64 -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_aarch64 -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_aarch64 -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_aarch64 -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_aarch64 -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_aarch64 -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_aarch64 -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_aarch64 -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_aarch64 -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_aarch64 -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_aarch64 -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_aarch64 -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_aarch64 -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_aarch64 -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_aarch64 -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_aarch64 -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_aarch64 -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_aarch64 -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_aarch64 -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_aarch64 -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_aarch64 -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_aarch64 -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_aarch64 -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_aarch64 -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_aarch64 -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_aarch64 -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_aarch64 -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_aarch64 -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_aarch64 -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_aarch64 -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_aarch64 -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_aarch64 -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_aarch64 -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_aarch64 -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_aarch64 -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_aarch64 -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_aarch64 -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_aarch64 -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_aarch64 -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_aarch64 -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_aarch64 -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_aarch64 -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_aarch64 -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_aarch64 -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_aarch64 -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_aarch64 -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_aarch64 -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_aarch64 -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_aarch64 -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_aarch64 -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_aarch64 -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_aarch64 -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_aarch64 -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_aarch64 -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_aarch64 -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_aarch64 -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_aarch64 -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_aarch64 -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_aarch64 -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_aarch64 -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_aarch64 -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_aarch64 -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_aarch64 -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_aarch64 -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_aarch64 -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_aarch64 -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_aarch64 -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_aarch64 -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_aarch64 -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_aarch64 -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_aarch64 -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_aarch64 -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_aarch64 -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_aarch64 -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_aarch64 -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_aarch64 -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_aarch64 -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_aarch64 -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_aarch64 -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_aarch64 -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_aarch64 -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_aarch64 -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_aarch64 -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_aarch64 -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_aarch64 -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_aarch64 -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_aarch64 -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_aarch64 -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_aarch64 -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_aarch64 -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_aarch64 -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_aarch64 -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_aarch64 -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_aarch64 -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_aarch64 -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_aarch64 -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_aarch64 -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_aarch64 -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_aarch64 -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_aarch64 -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_aarch64 -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_aarch64 -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_aarch64 -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_aarch64 -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_aarch64 -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_aarch64 -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_aarch64 -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_aarch64 -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_aarch64 -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_aarch64 -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_aarch64 -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_aarch64 -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_aarch64 -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_aarch64 -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_aarch64 -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_aarch64 -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_aarch64 -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_aarch64 -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_aarch64 -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_aarch64 -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_aarch64 -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_aarch64 -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_aarch64 -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_aarch64 -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_aarch64 -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_aarch64 -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_aarch64 -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_aarch64 -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_aarch64 -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_aarch64 -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_aarch64 -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_aarch64 -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_aarch64 -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_aarch64 -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_aarch64 -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_aarch64 -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_aarch64 -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_aarch64 -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_aarch64 -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_aarch64 -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_aarch64 -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_aarch64 -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_aarch64 -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_aarch64 -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_aarch64 -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_aarch64 -#define gen_helper_neon_tbl gen_helper_neon_tbl_aarch64 -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_aarch64 -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_aarch64 -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_aarch64 -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_aarch64 -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_aarch64 -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_aarch64 -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_aarch64 -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_aarch64 -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_aarch64 -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_aarch64 -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_aarch64 -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_aarch64 -#define gen_helper_neon_zip16 gen_helper_neon_zip16_aarch64 -#define gen_helper_neon_zip8 gen_helper_neon_zip8_aarch64 -#define gen_helper_pre_hvc gen_helper_pre_hvc_aarch64 -#define gen_helper_pre_smc gen_helper_pre_smc_aarch64 -#define gen_helper_qadd16 gen_helper_qadd16_aarch64 -#define gen_helper_qadd8 gen_helper_qadd8_aarch64 -#define gen_helper_qaddsubx gen_helper_qaddsubx_aarch64 -#define gen_helper_qsub16 gen_helper_qsub16_aarch64 -#define gen_helper_qsub8 gen_helper_qsub8_aarch64 -#define gen_helper_qsubaddx gen_helper_qsubaddx_aarch64 -#define gen_helper_rbit gen_helper_rbit_aarch64 -#define gen_helper_recpe_f32 gen_helper_recpe_f32_aarch64 -#define gen_helper_recpe_u32 gen_helper_recpe_u32_aarch64 -#define gen_helper_recps_f32 gen_helper_recps_f32_aarch64 -#define gen_helper_rintd gen_helper_rintd_aarch64 -#define gen_helper_rintd_exact gen_helper_rintd_exact_aarch64 -#define gen_helper_rints gen_helper_rints_aarch64 -#define gen_helper_rints_exact gen_helper_rints_exact_aarch64 -#define gen_helper_ror_cc gen_helper_ror_cc_aarch64 -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_aarch64 -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_aarch64 -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_aarch64 -#define gen_helper_sadd16 gen_helper_sadd16_aarch64 -#define gen_helper_sadd8 gen_helper_sadd8_aarch64 -#define gen_helper_saddsubx gen_helper_saddsubx_aarch64 -#define gen_helper_sar_cc gen_helper_sar_cc_aarch64 -#define gen_helper_sdiv gen_helper_sdiv_aarch64 -#define gen_helper_sel_flags gen_helper_sel_flags_aarch64 -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_aarch64 -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_aarch64 -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_aarch64 -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_aarch64 -#define gen_helper_set_rmode gen_helper_set_rmode_aarch64 -#define gen_helper_set_user_reg gen_helper_set_user_reg_aarch64 -#define gen_helper_shadd16 gen_helper_shadd16_aarch64 -#define gen_helper_shadd8 gen_helper_shadd8_aarch64 -#define gen_helper_shaddsubx gen_helper_shaddsubx_aarch64 -#define gen_helper_shl_cc gen_helper_shl_cc_aarch64 -#define gen_helper_shr_cc gen_helper_shr_cc_aarch64 -#define gen_helper_shsub16 gen_helper_shsub16_aarch64 -#define gen_helper_shsub8 gen_helper_shsub8_aarch64 -#define gen_helper_shsubaddx gen_helper_shsubaddx_aarch64 -#define gen_helper_ssat gen_helper_ssat_aarch64 -#define gen_helper_ssat16 gen_helper_ssat16_aarch64 -#define gen_helper_ssub16 gen_helper_ssub16_aarch64 -#define gen_helper_ssub8 gen_helper_ssub8_aarch64 -#define gen_helper_ssubaddx gen_helper_ssubaddx_aarch64 -#define gen_helper_sub_saturate gen_helper_sub_saturate_aarch64 -#define gen_helper_sxtb16 gen_helper_sxtb16_aarch64 -#define gen_helper_uadd16 gen_helper_uadd16_aarch64 -#define gen_helper_uadd8 gen_helper_uadd8_aarch64 -#define gen_helper_uaddsubx gen_helper_uaddsubx_aarch64 -#define gen_helper_udiv gen_helper_udiv_aarch64 -#define gen_helper_uhadd16 gen_helper_uhadd16_aarch64 -#define gen_helper_uhadd8 gen_helper_uhadd8_aarch64 -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_aarch64 -#define gen_helper_uhsub16 gen_helper_uhsub16_aarch64 -#define gen_helper_uhsub8 gen_helper_uhsub8_aarch64 -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_aarch64 -#define gen_helper_uqadd16 gen_helper_uqadd16_aarch64 -#define gen_helper_uqadd8 gen_helper_uqadd8_aarch64 -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_aarch64 -#define gen_helper_uqsub16 gen_helper_uqsub16_aarch64 -#define gen_helper_uqsub8 gen_helper_uqsub8_aarch64 -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_aarch64 -#define gen_helper_usad8 gen_helper_usad8_aarch64 -#define gen_helper_usat gen_helper_usat_aarch64 -#define gen_helper_usat16 gen_helper_usat16_aarch64 -#define gen_helper_usub16 gen_helper_usub16_aarch64 -#define gen_helper_usub8 gen_helper_usub8_aarch64 -#define gen_helper_usubaddx gen_helper_usubaddx_aarch64 -#define gen_helper_uxtb16 gen_helper_uxtb16_aarch64 -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_aarch64 -#define gen_helper_v7m_msr gen_helper_v7m_msr_aarch64 -#define gen_helper_vfp_absd gen_helper_vfp_absd_aarch64 -#define gen_helper_vfp_abss gen_helper_vfp_abss_aarch64 -#define gen_helper_vfp_addd gen_helper_vfp_addd_aarch64 -#define gen_helper_vfp_adds gen_helper_vfp_adds_aarch64 -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_aarch64 -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_aarch64 -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_aarch64 -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_aarch64 -#define gen_helper_vfp_divd gen_helper_vfp_divd_aarch64 -#define gen_helper_vfp_divs gen_helper_vfp_divs_aarch64 -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_aarch64 -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_aarch64 -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_aarch64 -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_aarch64 -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_aarch64 -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_aarch64 -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_aarch64 -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_aarch64 -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_aarch64 -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_aarch64 -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_aarch64 -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_aarch64 -#define gen_helper_vfp_mins gen_helper_vfp_mins_aarch64 -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_aarch64 -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_aarch64 -#define gen_helper_vfp_muld gen_helper_vfp_muld_aarch64 -#define gen_helper_vfp_muls gen_helper_vfp_muls_aarch64 -#define gen_helper_vfp_negd gen_helper_vfp_negd_aarch64 -#define gen_helper_vfp_negs gen_helper_vfp_negs_aarch64 -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_aarch64 -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_aarch64 -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_aarch64 -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_aarch64 -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_aarch64 -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_aarch64 -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_aarch64 -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_aarch64 -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_aarch64 -#define gen_helper_vfp_subd gen_helper_vfp_subd_aarch64 -#define gen_helper_vfp_subs gen_helper_vfp_subs_aarch64 -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_aarch64 -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_aarch64 -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_aarch64 -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_aarch64 -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_aarch64 -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_aarch64 -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_aarch64 -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_aarch64 -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_aarch64 -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_aarch64 -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_aarch64 -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_aarch64 -#define gen_helper_vfp_touid gen_helper_vfp_touid_aarch64 -#define gen_helper_vfp_touis gen_helper_vfp_touis_aarch64 -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_aarch64 -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_aarch64 -#define gen_helper_vfp_tould gen_helper_vfp_tould_aarch64 -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_aarch64 -#define gen_helper_vfp_touls gen_helper_vfp_touls_aarch64 -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_aarch64 -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_aarch64 -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_aarch64 -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_aarch64 -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_aarch64 -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_aarch64 -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_aarch64 -#define gen_helper_wfe gen_helper_wfe_aarch64 -#define gen_helper_wfi gen_helper_wfi_aarch64 -#define gen_hvc gen_hvc_aarch64 -#define gen_intermediate_code_internal gen_intermediate_code_internal_aarch64 -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_aarch64 -#define gen_iwmmxt_address gen_iwmmxt_address_aarch64 -#define gen_iwmmxt_shift gen_iwmmxt_shift_aarch64 -#define gen_jmp gen_jmp_aarch64 -#define gen_load_and_replicate gen_load_and_replicate_aarch64 -#define gen_load_exclusive gen_load_exclusive_aarch64 -#define gen_logic_CC gen_logic_CC_aarch64 -#define gen_logicq_cc gen_logicq_cc_aarch64 -#define gen_lookup_tb gen_lookup_tb_aarch64 -#define gen_mov_F0_vreg gen_mov_F0_vreg_aarch64 -#define gen_mov_F1_vreg gen_mov_F1_vreg_aarch64 -#define gen_mov_vreg_F0 gen_mov_vreg_F0_aarch64 -#define gen_muls_i64_i32 gen_muls_i64_i32_aarch64 -#define gen_mulu_i64_i32 gen_mulu_i64_i32_aarch64 -#define gen_mulxy gen_mulxy_aarch64 -#define gen_neon_add gen_neon_add_aarch64 -#define gen_neon_addl gen_neon_addl_aarch64 -#define gen_neon_addl_saturate gen_neon_addl_saturate_aarch64 -#define gen_neon_bsl gen_neon_bsl_aarch64 -#define gen_neon_dup_high16 gen_neon_dup_high16_aarch64 -#define gen_neon_dup_low16 gen_neon_dup_low16_aarch64 -#define gen_neon_dup_u8 gen_neon_dup_u8_aarch64 -#define gen_neon_mull gen_neon_mull_aarch64 -#define gen_neon_narrow gen_neon_narrow_aarch64 -#define gen_neon_narrow_op gen_neon_narrow_op_aarch64 -#define gen_neon_narrow_sats gen_neon_narrow_sats_aarch64 -#define gen_neon_narrow_satu gen_neon_narrow_satu_aarch64 -#define gen_neon_negl gen_neon_negl_aarch64 -#define gen_neon_rsb gen_neon_rsb_aarch64 -#define gen_neon_shift_narrow gen_neon_shift_narrow_aarch64 -#define gen_neon_subl gen_neon_subl_aarch64 -#define gen_neon_trn_u16 gen_neon_trn_u16_aarch64 -#define gen_neon_trn_u8 gen_neon_trn_u8_aarch64 -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_aarch64 -#define gen_neon_unzip gen_neon_unzip_aarch64 -#define gen_neon_widen gen_neon_widen_aarch64 -#define gen_neon_zip gen_neon_zip_aarch64 -#define gen_new_label gen_new_label_aarch64 -#define gen_nop_hint gen_nop_hint_aarch64 -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_aarch64 -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_aarch64 -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_aarch64 -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_aarch64 -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_aarch64 -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_aarch64 -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_aarch64 -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_aarch64 -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_aarch64 -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_aarch64 -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_aarch64 -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_aarch64 -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_aarch64 -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_aarch64 -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_aarch64 -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_aarch64 -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_aarch64 -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_aarch64 -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_aarch64 -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_aarch64 -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_aarch64 -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_aarch64 -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_aarch64 -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_aarch64 -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_aarch64 -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_aarch64 -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_aarch64 -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_aarch64 -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_aarch64 -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_aarch64 -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_aarch64 -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_aarch64 -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_aarch64 -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_aarch64 -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_aarch64 -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_aarch64 -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_aarch64 -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_aarch64 -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_aarch64 -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_aarch64 -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_aarch64 -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_aarch64 -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_aarch64 -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_aarch64 -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_aarch64 -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_aarch64 -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_aarch64 -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_aarch64 -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_aarch64 -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_aarch64 -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_aarch64 -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_aarch64 -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_aarch64 -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_aarch64 -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_aarch64 -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_aarch64 -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_aarch64 -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_aarch64 -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_aarch64 -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_aarch64 -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_aarch64 -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_aarch64 -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_aarch64 -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_aarch64 -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_aarch64 -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_aarch64 -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_aarch64 -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_aarch64 -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_aarch64 -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_aarch64 -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_aarch64 -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_aarch64 -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_aarch64 -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_aarch64 -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_aarch64 -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_aarch64 -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_aarch64 -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_aarch64 -#define gen_rev16 gen_rev16_aarch64 -#define gen_revsh gen_revsh_aarch64 -#define gen_rfe gen_rfe_aarch64 -#define gen_sar gen_sar_aarch64 -#define gen_sbc_CC gen_sbc_CC_aarch64 -#define gen_sbfx gen_sbfx_aarch64 -#define gen_set_CF_bit31 gen_set_CF_bit31_aarch64 -#define gen_set_condexec gen_set_condexec_aarch64 -#define gen_set_cpsr gen_set_cpsr_aarch64 -#define gen_set_label gen_set_label_aarch64 -#define gen_set_pc_im gen_set_pc_im_aarch64 -#define gen_set_psr gen_set_psr_aarch64 -#define gen_set_psr_im gen_set_psr_im_aarch64 -#define gen_shl gen_shl_aarch64 -#define gen_shr gen_shr_aarch64 -#define gen_smc gen_smc_aarch64 -#define gen_smul_dual gen_smul_dual_aarch64 -#define gen_srs gen_srs_aarch64 -#define gen_ss_advance gen_ss_advance_aarch64 -#define gen_step_complete_exception gen_step_complete_exception_aarch64 -#define gen_store_exclusive gen_store_exclusive_aarch64 -#define gen_storeq_reg gen_storeq_reg_aarch64 -#define gen_sub_carry gen_sub_carry_aarch64 -#define gen_sub_CC gen_sub_CC_aarch64 -#define gen_subq_msw gen_subq_msw_aarch64 -#define gen_swap_half gen_swap_half_aarch64 -#define gen_thumb2_data_op gen_thumb2_data_op_aarch64 -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_aarch64 -#define gen_ubfx gen_ubfx_aarch64 -#define gen_vfp_abs gen_vfp_abs_aarch64 -#define gen_vfp_add gen_vfp_add_aarch64 -#define gen_vfp_cmp gen_vfp_cmp_aarch64 -#define gen_vfp_cmpe gen_vfp_cmpe_aarch64 -#define gen_vfp_div gen_vfp_div_aarch64 -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_aarch64 -#define gen_vfp_F1_mul gen_vfp_F1_mul_aarch64 -#define gen_vfp_F1_neg gen_vfp_F1_neg_aarch64 -#define gen_vfp_ld gen_vfp_ld_aarch64 -#define gen_vfp_mrs gen_vfp_mrs_aarch64 -#define gen_vfp_msr gen_vfp_msr_aarch64 -#define gen_vfp_mul gen_vfp_mul_aarch64 -#define gen_vfp_neg gen_vfp_neg_aarch64 -#define gen_vfp_shto gen_vfp_shto_aarch64 -#define gen_vfp_sito gen_vfp_sito_aarch64 -#define gen_vfp_slto gen_vfp_slto_aarch64 -#define gen_vfp_sqrt gen_vfp_sqrt_aarch64 -#define gen_vfp_st gen_vfp_st_aarch64 -#define gen_vfp_sub gen_vfp_sub_aarch64 -#define gen_vfp_tosh gen_vfp_tosh_aarch64 -#define gen_vfp_tosi gen_vfp_tosi_aarch64 -#define gen_vfp_tosiz gen_vfp_tosiz_aarch64 -#define gen_vfp_tosl gen_vfp_tosl_aarch64 -#define gen_vfp_touh gen_vfp_touh_aarch64 -#define gen_vfp_toui gen_vfp_toui_aarch64 -#define gen_vfp_touiz gen_vfp_touiz_aarch64 -#define gen_vfp_toul gen_vfp_toul_aarch64 -#define gen_vfp_uhto gen_vfp_uhto_aarch64 -#define gen_vfp_uito gen_vfp_uito_aarch64 -#define gen_vfp_ulto gen_vfp_ulto_aarch64 -#define get_arm_cp_reginfo get_arm_cp_reginfo_aarch64 -#define get_clock get_clock_aarch64 -#define get_clock_realtime get_clock_realtime_aarch64 -#define get_constraint_priority get_constraint_priority_aarch64 -#define get_float_exception_flags get_float_exception_flags_aarch64 -#define get_float_rounding_mode get_float_rounding_mode_aarch64 -#define get_fpstatus_ptr get_fpstatus_ptr_aarch64 -#define get_level1_table_address get_level1_table_address_aarch64 -#define get_mem_index get_mem_index_aarch64 -#define get_next_param_value get_next_param_value_aarch64 -#define get_opt_name get_opt_name_aarch64 -#define get_opt_value get_opt_value_aarch64 -#define get_page_addr_code get_page_addr_code_aarch64 -#define get_param_value get_param_value_aarch64 -#define get_phys_addr get_phys_addr_aarch64 -#define get_phys_addr_lpae get_phys_addr_lpae_aarch64 -#define get_phys_addr_mpu get_phys_addr_mpu_aarch64 -#define get_phys_addr_v5 get_phys_addr_v5_aarch64 -#define get_phys_addr_v6 get_phys_addr_v6_aarch64 -#define get_system_memory get_system_memory_aarch64 -#define get_ticks_per_sec get_ticks_per_sec_aarch64 -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_aarch64 -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__aarch64 -#define gt_cntfrq_access gt_cntfrq_access_aarch64 -#define gt_cnt_read gt_cnt_read_aarch64 -#define gt_cnt_reset gt_cnt_reset_aarch64 -#define gt_counter_access gt_counter_access_aarch64 -#define gt_ctl_write gt_ctl_write_aarch64 -#define gt_cval_write gt_cval_write_aarch64 -#define gt_get_countervalue gt_get_countervalue_aarch64 -#define gt_pct_access gt_pct_access_aarch64 -#define gt_ptimer_access gt_ptimer_access_aarch64 -#define gt_recalc_timer gt_recalc_timer_aarch64 -#define gt_timer_access gt_timer_access_aarch64 -#define gt_tval_read gt_tval_read_aarch64 -#define gt_tval_write gt_tval_write_aarch64 -#define gt_vct_access gt_vct_access_aarch64 -#define gt_vtimer_access gt_vtimer_access_aarch64 -#define guest_phys_blocks_free guest_phys_blocks_free_aarch64 -#define guest_phys_blocks_init guest_phys_blocks_init_aarch64 -#define handle_vcvt handle_vcvt_aarch64 -#define handle_vminmaxnm handle_vminmaxnm_aarch64 -#define handle_vrint handle_vrint_aarch64 -#define handle_vsel handle_vsel_aarch64 -#define has_help_option has_help_option_aarch64 -#define have_bmi1 have_bmi1_aarch64 -#define have_bmi2 have_bmi2_aarch64 -#define hcr_write hcr_write_aarch64 -#define helper_access_check_cp_reg helper_access_check_cp_reg_aarch64 -#define helper_add_saturate helper_add_saturate_aarch64 -#define helper_add_setq helper_add_setq_aarch64 -#define helper_add_usaturate helper_add_usaturate_aarch64 -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_aarch64 -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_aarch64 -#define helper_be_ldq_mmu helper_be_ldq_mmu_aarch64 -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_aarch64 -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_aarch64 -#define helper_be_ldul_mmu helper_be_ldul_mmu_aarch64 -#define helper_be_lduw_mmu helper_be_lduw_mmu_aarch64 -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_aarch64 -#define helper_be_stl_mmu helper_be_stl_mmu_aarch64 -#define helper_be_stq_mmu helper_be_stq_mmu_aarch64 -#define helper_be_stw_mmu helper_be_stw_mmu_aarch64 -#define helper_clear_pstate_ss helper_clear_pstate_ss_aarch64 -#define helper_clz_arm helper_clz_arm_aarch64 -#define helper_cpsr_read helper_cpsr_read_aarch64 -#define helper_cpsr_write helper_cpsr_write_aarch64 -#define helper_crc32_arm helper_crc32_arm_aarch64 -#define helper_crc32c helper_crc32c_aarch64 +#define cpu_aarch64_init cpu_aarch64_init_aarch64 +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_aarch64 +#define arm_cpu_update_virq arm_cpu_update_virq_aarch64 +#define arm_cpu_update_vfiq arm_cpu_update_vfiq_aarch64 +#define arm_cpu_initfn arm_cpu_initfn_aarch64 +#define gt_cntfrq_period_ns gt_cntfrq_period_ns_aarch64 +#define arm_cpu_post_init arm_cpu_post_init_aarch64 +#define arm_cpu_realizefn arm_cpu_realizefn_aarch64 +#define arm_cpu_class_init arm_cpu_class_init_aarch64 +#define cpu_arm_init cpu_arm_init_aarch64 #define helper_crypto_aese helper_crypto_aese_aarch64 #define helper_crypto_aesmc helper_crypto_aesmc_aarch64 #define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_aarch64 @@ -1371,1664 +1292,27 @@ #define helper_crypto_sha256h2 helper_crypto_sha256h2_aarch64 #define helper_crypto_sha256su0 helper_crypto_sha256su0_aarch64 #define helper_crypto_sha256su1 helper_crypto_sha256su1_aarch64 -#define helper_dc_zva helper_dc_zva_aarch64 -#define helper_double_saturate helper_double_saturate_aarch64 -#define helper_exception_internal helper_exception_internal_aarch64 -#define helper_exception_return helper_exception_return_aarch64 -#define helper_exception_with_syndrome helper_exception_with_syndrome_aarch64 -#define helper_get_cp_reg helper_get_cp_reg_aarch64 -#define helper_get_cp_reg64 helper_get_cp_reg64_aarch64 -#define helper_get_r13_banked helper_get_r13_banked_aarch64 -#define helper_get_user_reg helper_get_user_reg_aarch64 -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_aarch64 -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_aarch64 -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_aarch64 -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_aarch64 -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_aarch64 -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_aarch64 -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_aarch64 -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_aarch64 -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_aarch64 -#define helper_iwmmxt_addub helper_iwmmxt_addub_aarch64 -#define helper_iwmmxt_addul helper_iwmmxt_addul_aarch64 -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_aarch64 -#define helper_iwmmxt_align helper_iwmmxt_align_aarch64 -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_aarch64 -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_aarch64 -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_aarch64 -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_aarch64 -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_aarch64 -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_aarch64 -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_aarch64 -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_aarch64 -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_aarch64 -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_aarch64 -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_aarch64 -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_aarch64 -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_aarch64 -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_aarch64 -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_aarch64 -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_aarch64 -#define helper_iwmmxt_insr helper_iwmmxt_insr_aarch64 -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_aarch64 -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_aarch64 -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_aarch64 -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_aarch64 -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_aarch64 -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_aarch64 -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_aarch64 -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_aarch64 -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_aarch64 -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_aarch64 -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_aarch64 -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_aarch64 -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_aarch64 -#define helper_iwmmxt_minub helper_iwmmxt_minub_aarch64 -#define helper_iwmmxt_minul helper_iwmmxt_minul_aarch64 -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_aarch64 -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_aarch64 -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_aarch64 -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_aarch64 -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_aarch64 -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_aarch64 -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_aarch64 -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_aarch64 -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_aarch64 -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_aarch64 -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_aarch64 -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_aarch64 -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_aarch64 -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_aarch64 -#define helper_iwmmxt_packul helper_iwmmxt_packul_aarch64 -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_aarch64 -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_aarch64 -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_aarch64 -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_aarch64 -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_aarch64 -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_aarch64 -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_aarch64 -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_aarch64 -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_aarch64 -#define helper_iwmmxt_slll helper_iwmmxt_slll_aarch64 -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_aarch64 -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_aarch64 -#define helper_iwmmxt_sral helper_iwmmxt_sral_aarch64 -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_aarch64 -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_aarch64 -#define helper_iwmmxt_srll helper_iwmmxt_srll_aarch64 -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_aarch64 -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_aarch64 -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_aarch64 -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_aarch64 -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_aarch64 -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_aarch64 -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_aarch64 -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_aarch64 -#define helper_iwmmxt_subub helper_iwmmxt_subub_aarch64 -#define helper_iwmmxt_subul helper_iwmmxt_subul_aarch64 -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_aarch64 -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_aarch64 -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_aarch64 -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_aarch64 -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_aarch64 -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_aarch64 -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_aarch64 -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_aarch64 -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_aarch64 -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_aarch64 -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_aarch64 -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_aarch64 -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_aarch64 -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_aarch64 -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_aarch64 -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_aarch64 -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_aarch64 -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_aarch64 -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_aarch64 -#define helper_ldb_cmmu helper_ldb_cmmu_aarch64 -#define helper_ldb_mmu helper_ldb_mmu_aarch64 -#define helper_ldl_cmmu helper_ldl_cmmu_aarch64 -#define helper_ldl_mmu helper_ldl_mmu_aarch64 -#define helper_ldq_cmmu helper_ldq_cmmu_aarch64 -#define helper_ldq_mmu helper_ldq_mmu_aarch64 -#define helper_ldw_cmmu helper_ldw_cmmu_aarch64 -#define helper_ldw_mmu helper_ldw_mmu_aarch64 -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_aarch64 -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_aarch64 -#define helper_le_ldq_mmu helper_le_ldq_mmu_aarch64 -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_aarch64 -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_aarch64 -#define helper_le_ldul_mmu helper_le_ldul_mmu_aarch64 -#define helper_le_lduw_mmu helper_le_lduw_mmu_aarch64 -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_aarch64 -#define helper_le_stl_mmu helper_le_stl_mmu_aarch64 -#define helper_le_stq_mmu helper_le_stq_mmu_aarch64 -#define helper_le_stw_mmu helper_le_stw_mmu_aarch64 -#define helper_msr_i_pstate helper_msr_i_pstate_aarch64 -#define helper_neon_abd_f32 helper_neon_abd_f32_aarch64 -#define helper_neon_abdl_s16 helper_neon_abdl_s16_aarch64 -#define helper_neon_abdl_s32 helper_neon_abdl_s32_aarch64 -#define helper_neon_abdl_s64 helper_neon_abdl_s64_aarch64 -#define helper_neon_abdl_u16 helper_neon_abdl_u16_aarch64 -#define helper_neon_abdl_u32 helper_neon_abdl_u32_aarch64 -#define helper_neon_abdl_u64 helper_neon_abdl_u64_aarch64 -#define helper_neon_abd_s16 helper_neon_abd_s16_aarch64 -#define helper_neon_abd_s32 helper_neon_abd_s32_aarch64 -#define helper_neon_abd_s8 helper_neon_abd_s8_aarch64 -#define helper_neon_abd_u16 helper_neon_abd_u16_aarch64 -#define helper_neon_abd_u32 helper_neon_abd_u32_aarch64 -#define helper_neon_abd_u8 helper_neon_abd_u8_aarch64 -#define helper_neon_abs_s16 helper_neon_abs_s16_aarch64 -#define helper_neon_abs_s8 helper_neon_abs_s8_aarch64 -#define helper_neon_acge_f32 helper_neon_acge_f32_aarch64 -#define helper_neon_acge_f64 helper_neon_acge_f64_aarch64 -#define helper_neon_acgt_f32 helper_neon_acgt_f32_aarch64 -#define helper_neon_acgt_f64 helper_neon_acgt_f64_aarch64 -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_aarch64 -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_aarch64 -#define helper_neon_addl_u16 helper_neon_addl_u16_aarch64 -#define helper_neon_addl_u32 helper_neon_addl_u32_aarch64 -#define helper_neon_add_u16 helper_neon_add_u16_aarch64 -#define helper_neon_add_u8 helper_neon_add_u8_aarch64 -#define helper_neon_ceq_f32 helper_neon_ceq_f32_aarch64 -#define helper_neon_ceq_u16 helper_neon_ceq_u16_aarch64 -#define helper_neon_ceq_u32 helper_neon_ceq_u32_aarch64 -#define helper_neon_ceq_u8 helper_neon_ceq_u8_aarch64 -#define helper_neon_cge_f32 helper_neon_cge_f32_aarch64 -#define helper_neon_cge_s16 helper_neon_cge_s16_aarch64 -#define helper_neon_cge_s32 helper_neon_cge_s32_aarch64 -#define helper_neon_cge_s8 helper_neon_cge_s8_aarch64 -#define helper_neon_cge_u16 helper_neon_cge_u16_aarch64 -#define helper_neon_cge_u32 helper_neon_cge_u32_aarch64 -#define helper_neon_cge_u8 helper_neon_cge_u8_aarch64 -#define helper_neon_cgt_f32 helper_neon_cgt_f32_aarch64 -#define helper_neon_cgt_s16 helper_neon_cgt_s16_aarch64 -#define helper_neon_cgt_s32 helper_neon_cgt_s32_aarch64 -#define helper_neon_cgt_s8 helper_neon_cgt_s8_aarch64 -#define helper_neon_cgt_u16 helper_neon_cgt_u16_aarch64 -#define helper_neon_cgt_u32 helper_neon_cgt_u32_aarch64 -#define helper_neon_cgt_u8 helper_neon_cgt_u8_aarch64 -#define helper_neon_cls_s16 helper_neon_cls_s16_aarch64 -#define helper_neon_cls_s32 helper_neon_cls_s32_aarch64 -#define helper_neon_cls_s8 helper_neon_cls_s8_aarch64 -#define helper_neon_clz_u16 helper_neon_clz_u16_aarch64 -#define helper_neon_clz_u8 helper_neon_clz_u8_aarch64 -#define helper_neon_cnt_u8 helper_neon_cnt_u8_aarch64 -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_aarch64 -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_aarch64 -#define helper_neon_hadd_s16 helper_neon_hadd_s16_aarch64 -#define helper_neon_hadd_s32 helper_neon_hadd_s32_aarch64 -#define helper_neon_hadd_s8 helper_neon_hadd_s8_aarch64 -#define helper_neon_hadd_u16 helper_neon_hadd_u16_aarch64 -#define helper_neon_hadd_u32 helper_neon_hadd_u32_aarch64 -#define helper_neon_hadd_u8 helper_neon_hadd_u8_aarch64 -#define helper_neon_hsub_s16 helper_neon_hsub_s16_aarch64 -#define helper_neon_hsub_s32 helper_neon_hsub_s32_aarch64 -#define helper_neon_hsub_s8 helper_neon_hsub_s8_aarch64 -#define helper_neon_hsub_u16 helper_neon_hsub_u16_aarch64 -#define helper_neon_hsub_u32 helper_neon_hsub_u32_aarch64 -#define helper_neon_hsub_u8 helper_neon_hsub_u8_aarch64 -#define helper_neon_max_s16 helper_neon_max_s16_aarch64 -#define helper_neon_max_s32 helper_neon_max_s32_aarch64 -#define helper_neon_max_s8 helper_neon_max_s8_aarch64 -#define helper_neon_max_u16 helper_neon_max_u16_aarch64 -#define helper_neon_max_u32 helper_neon_max_u32_aarch64 -#define helper_neon_max_u8 helper_neon_max_u8_aarch64 -#define helper_neon_min_s16 helper_neon_min_s16_aarch64 -#define helper_neon_min_s32 helper_neon_min_s32_aarch64 -#define helper_neon_min_s8 helper_neon_min_s8_aarch64 -#define helper_neon_min_u16 helper_neon_min_u16_aarch64 -#define helper_neon_min_u32 helper_neon_min_u32_aarch64 -#define helper_neon_min_u8 helper_neon_min_u8_aarch64 -#define helper_neon_mull_p8 helper_neon_mull_p8_aarch64 -#define helper_neon_mull_s16 helper_neon_mull_s16_aarch64 -#define helper_neon_mull_s8 helper_neon_mull_s8_aarch64 -#define helper_neon_mull_u16 helper_neon_mull_u16_aarch64 -#define helper_neon_mull_u8 helper_neon_mull_u8_aarch64 -#define helper_neon_mul_p8 helper_neon_mul_p8_aarch64 -#define helper_neon_mul_u16 helper_neon_mul_u16_aarch64 -#define helper_neon_mul_u8 helper_neon_mul_u8_aarch64 -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_aarch64 -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_aarch64 -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_aarch64 -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_aarch64 -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_aarch64 -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_aarch64 -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_aarch64 -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_aarch64 -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_aarch64 -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_aarch64 -#define helper_neon_narrow_u16 helper_neon_narrow_u16_aarch64 -#define helper_neon_narrow_u8 helper_neon_narrow_u8_aarch64 -#define helper_neon_negl_u16 helper_neon_negl_u16_aarch64 -#define helper_neon_negl_u32 helper_neon_negl_u32_aarch64 -#define helper_neon_paddl_u16 helper_neon_paddl_u16_aarch64 -#define helper_neon_paddl_u32 helper_neon_paddl_u32_aarch64 -#define helper_neon_padd_u16 helper_neon_padd_u16_aarch64 -#define helper_neon_padd_u8 helper_neon_padd_u8_aarch64 -#define helper_neon_pmax_s16 helper_neon_pmax_s16_aarch64 -#define helper_neon_pmax_s8 helper_neon_pmax_s8_aarch64 -#define helper_neon_pmax_u16 helper_neon_pmax_u16_aarch64 -#define helper_neon_pmax_u8 helper_neon_pmax_u8_aarch64 -#define helper_neon_pmin_s16 helper_neon_pmin_s16_aarch64 -#define helper_neon_pmin_s8 helper_neon_pmin_s8_aarch64 -#define helper_neon_pmin_u16 helper_neon_pmin_u16_aarch64 -#define helper_neon_pmin_u8 helper_neon_pmin_u8_aarch64 -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_aarch64 -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_aarch64 -#define helper_neon_qabs_s16 helper_neon_qabs_s16_aarch64 -#define helper_neon_qabs_s32 helper_neon_qabs_s32_aarch64 -#define helper_neon_qabs_s64 helper_neon_qabs_s64_aarch64 -#define helper_neon_qabs_s8 helper_neon_qabs_s8_aarch64 -#define helper_neon_qadd_s16 helper_neon_qadd_s16_aarch64 -#define helper_neon_qadd_s32 helper_neon_qadd_s32_aarch64 -#define helper_neon_qadd_s64 helper_neon_qadd_s64_aarch64 -#define helper_neon_qadd_s8 helper_neon_qadd_s8_aarch64 -#define helper_neon_qadd_u16 helper_neon_qadd_u16_aarch64 -#define helper_neon_qadd_u32 helper_neon_qadd_u32_aarch64 -#define helper_neon_qadd_u64 helper_neon_qadd_u64_aarch64 -#define helper_neon_qadd_u8 helper_neon_qadd_u8_aarch64 -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_aarch64 -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_aarch64 -#define helper_neon_qneg_s16 helper_neon_qneg_s16_aarch64 -#define helper_neon_qneg_s32 helper_neon_qneg_s32_aarch64 -#define helper_neon_qneg_s64 helper_neon_qneg_s64_aarch64 -#define helper_neon_qneg_s8 helper_neon_qneg_s8_aarch64 -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_aarch64 -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_aarch64 -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_aarch64 -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_aarch64 -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_aarch64 -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_aarch64 -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_aarch64 -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_aarch64 -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_aarch64 -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_aarch64 -#define helper_neon_qshl_s16 helper_neon_qshl_s16_aarch64 -#define helper_neon_qshl_s32 helper_neon_qshl_s32_aarch64 -#define helper_neon_qshl_s64 helper_neon_qshl_s64_aarch64 -#define helper_neon_qshl_s8 helper_neon_qshl_s8_aarch64 -#define helper_neon_qshl_u16 helper_neon_qshl_u16_aarch64 -#define helper_neon_qshl_u32 helper_neon_qshl_u32_aarch64 -#define helper_neon_qshl_u64 helper_neon_qshl_u64_aarch64 -#define helper_neon_qshl_u8 helper_neon_qshl_u8_aarch64 -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_aarch64 -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_aarch64 -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_aarch64 -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_aarch64 -#define helper_neon_qsub_s16 helper_neon_qsub_s16_aarch64 -#define helper_neon_qsub_s32 helper_neon_qsub_s32_aarch64 -#define helper_neon_qsub_s64 helper_neon_qsub_s64_aarch64 -#define helper_neon_qsub_s8 helper_neon_qsub_s8_aarch64 -#define helper_neon_qsub_u16 helper_neon_qsub_u16_aarch64 -#define helper_neon_qsub_u32 helper_neon_qsub_u32_aarch64 -#define helper_neon_qsub_u64 helper_neon_qsub_u64_aarch64 -#define helper_neon_qsub_u8 helper_neon_qsub_u8_aarch64 -#define helper_neon_qunzip16 helper_neon_qunzip16_aarch64 -#define helper_neon_qunzip32 helper_neon_qunzip32_aarch64 -#define helper_neon_qunzip8 helper_neon_qunzip8_aarch64 -#define helper_neon_qzip16 helper_neon_qzip16_aarch64 -#define helper_neon_qzip32 helper_neon_qzip32_aarch64 -#define helper_neon_qzip8 helper_neon_qzip8_aarch64 -#define helper_neon_rbit_u8 helper_neon_rbit_u8_aarch64 -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_aarch64 -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_aarch64 -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_aarch64 -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_aarch64 -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_aarch64 -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_aarch64 -#define helper_neon_rshl_s16 helper_neon_rshl_s16_aarch64 -#define helper_neon_rshl_s32 helper_neon_rshl_s32_aarch64 -#define helper_neon_rshl_s64 helper_neon_rshl_s64_aarch64 -#define helper_neon_rshl_s8 helper_neon_rshl_s8_aarch64 -#define helper_neon_rshl_u16 helper_neon_rshl_u16_aarch64 -#define helper_neon_rshl_u32 helper_neon_rshl_u32_aarch64 -#define helper_neon_rshl_u64 helper_neon_rshl_u64_aarch64 -#define helper_neon_rshl_u8 helper_neon_rshl_u8_aarch64 -#define helper_neon_shl_s16 helper_neon_shl_s16_aarch64 -#define helper_neon_shl_s32 helper_neon_shl_s32_aarch64 -#define helper_neon_shl_s64 helper_neon_shl_s64_aarch64 -#define helper_neon_shl_s8 helper_neon_shl_s8_aarch64 -#define helper_neon_shl_u16 helper_neon_shl_u16_aarch64 -#define helper_neon_shl_u32 helper_neon_shl_u32_aarch64 -#define helper_neon_shl_u64 helper_neon_shl_u64_aarch64 -#define helper_neon_shl_u8 helper_neon_shl_u8_aarch64 -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_aarch64 -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_aarch64 -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_aarch64 -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_aarch64 -#define helper_neon_subl_u16 helper_neon_subl_u16_aarch64 -#define helper_neon_subl_u32 helper_neon_subl_u32_aarch64 -#define helper_neon_sub_u16 helper_neon_sub_u16_aarch64 -#define helper_neon_sub_u8 helper_neon_sub_u8_aarch64 -#define helper_neon_tbl helper_neon_tbl_aarch64 -#define helper_neon_tst_u16 helper_neon_tst_u16_aarch64 -#define helper_neon_tst_u32 helper_neon_tst_u32_aarch64 -#define helper_neon_tst_u8 helper_neon_tst_u8_aarch64 -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_aarch64 -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_aarch64 -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_aarch64 -#define helper_neon_unzip16 helper_neon_unzip16_aarch64 -#define helper_neon_unzip8 helper_neon_unzip8_aarch64 -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_aarch64 -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_aarch64 -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_aarch64 -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_aarch64 -#define helper_neon_widen_s16 helper_neon_widen_s16_aarch64 -#define helper_neon_widen_s8 helper_neon_widen_s8_aarch64 -#define helper_neon_widen_u16 helper_neon_widen_u16_aarch64 -#define helper_neon_widen_u8 helper_neon_widen_u8_aarch64 -#define helper_neon_zip16 helper_neon_zip16_aarch64 -#define helper_neon_zip8 helper_neon_zip8_aarch64 -#define helper_pre_hvc helper_pre_hvc_aarch64 -#define helper_pre_smc helper_pre_smc_aarch64 -#define helper_qadd16 helper_qadd16_aarch64 -#define helper_qadd8 helper_qadd8_aarch64 -#define helper_qaddsubx helper_qaddsubx_aarch64 -#define helper_qsub16 helper_qsub16_aarch64 -#define helper_qsub8 helper_qsub8_aarch64 -#define helper_qsubaddx helper_qsubaddx_aarch64 -#define helper_rbit helper_rbit_aarch64 -#define helper_recpe_f32 helper_recpe_f32_aarch64 -#define helper_recpe_f64 helper_recpe_f64_aarch64 -#define helper_recpe_u32 helper_recpe_u32_aarch64 -#define helper_recps_f32 helper_recps_f32_aarch64 -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_aarch64 -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_aarch64 -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_aarch64 -#define helper_ret_stb_mmu helper_ret_stb_mmu_aarch64 -#define helper_rintd helper_rintd_aarch64 -#define helper_rintd_exact helper_rintd_exact_aarch64 -#define helper_rints helper_rints_aarch64 -#define helper_rints_exact helper_rints_exact_aarch64 -#define helper_ror_cc helper_ror_cc_aarch64 -#define helper_rsqrte_f32 helper_rsqrte_f32_aarch64 -#define helper_rsqrte_f64 helper_rsqrte_f64_aarch64 -#define helper_rsqrte_u32 helper_rsqrte_u32_aarch64 -#define helper_rsqrts_f32 helper_rsqrts_f32_aarch64 -#define helper_sadd16 helper_sadd16_aarch64 -#define helper_sadd8 helper_sadd8_aarch64 -#define helper_saddsubx helper_saddsubx_aarch64 -#define helper_sar_cc helper_sar_cc_aarch64 -#define helper_sdiv helper_sdiv_aarch64 -#define helper_sel_flags helper_sel_flags_aarch64 -#define helper_set_cp_reg helper_set_cp_reg_aarch64 -#define helper_set_cp_reg64 helper_set_cp_reg64_aarch64 -#define helper_set_neon_rmode helper_set_neon_rmode_aarch64 -#define helper_set_r13_banked helper_set_r13_banked_aarch64 -#define helper_set_rmode helper_set_rmode_aarch64 -#define helper_set_user_reg helper_set_user_reg_aarch64 -#define helper_shadd16 helper_shadd16_aarch64 -#define helper_shadd8 helper_shadd8_aarch64 -#define helper_shaddsubx helper_shaddsubx_aarch64 -#define helper_shl_cc helper_shl_cc_aarch64 -#define helper_shr_cc helper_shr_cc_aarch64 -#define helper_shsub16 helper_shsub16_aarch64 -#define helper_shsub8 helper_shsub8_aarch64 -#define helper_shsubaddx helper_shsubaddx_aarch64 -#define helper_ssat helper_ssat_aarch64 -#define helper_ssat16 helper_ssat16_aarch64 -#define helper_ssub16 helper_ssub16_aarch64 -#define helper_ssub8 helper_ssub8_aarch64 -#define helper_ssubaddx helper_ssubaddx_aarch64 -#define helper_stb_mmu helper_stb_mmu_aarch64 -#define helper_stl_mmu helper_stl_mmu_aarch64 -#define helper_stq_mmu helper_stq_mmu_aarch64 -#define helper_stw_mmu helper_stw_mmu_aarch64 -#define helper_sub_saturate helper_sub_saturate_aarch64 -#define helper_sub_usaturate helper_sub_usaturate_aarch64 -#define helper_sxtb16 helper_sxtb16_aarch64 -#define helper_uadd16 helper_uadd16_aarch64 -#define helper_uadd8 helper_uadd8_aarch64 -#define helper_uaddsubx helper_uaddsubx_aarch64 -#define helper_udiv helper_udiv_aarch64 -#define helper_uhadd16 helper_uhadd16_aarch64 -#define helper_uhadd8 helper_uhadd8_aarch64 -#define helper_uhaddsubx helper_uhaddsubx_aarch64 -#define helper_uhsub16 helper_uhsub16_aarch64 -#define helper_uhsub8 helper_uhsub8_aarch64 -#define helper_uhsubaddx helper_uhsubaddx_aarch64 -#define helper_uqadd16 helper_uqadd16_aarch64 -#define helper_uqadd8 helper_uqadd8_aarch64 -#define helper_uqaddsubx helper_uqaddsubx_aarch64 -#define helper_uqsub16 helper_uqsub16_aarch64 -#define helper_uqsub8 helper_uqsub8_aarch64 -#define helper_uqsubaddx helper_uqsubaddx_aarch64 -#define helper_usad8 helper_usad8_aarch64 -#define helper_usat helper_usat_aarch64 -#define helper_usat16 helper_usat16_aarch64 -#define helper_usub16 helper_usub16_aarch64 -#define helper_usub8 helper_usub8_aarch64 -#define helper_usubaddx helper_usubaddx_aarch64 -#define helper_uxtb16 helper_uxtb16_aarch64 -#define helper_v7m_mrs helper_v7m_mrs_aarch64 -#define helper_v7m_msr helper_v7m_msr_aarch64 -#define helper_vfp_absd helper_vfp_absd_aarch64 -#define helper_vfp_abss helper_vfp_abss_aarch64 -#define helper_vfp_addd helper_vfp_addd_aarch64 -#define helper_vfp_adds helper_vfp_adds_aarch64 -#define helper_vfp_cmpd helper_vfp_cmpd_aarch64 -#define helper_vfp_cmped helper_vfp_cmped_aarch64 -#define helper_vfp_cmpes helper_vfp_cmpes_aarch64 -#define helper_vfp_cmps helper_vfp_cmps_aarch64 -#define helper_vfp_divd helper_vfp_divd_aarch64 -#define helper_vfp_divs helper_vfp_divs_aarch64 -#define helper_vfp_fcvtds helper_vfp_fcvtds_aarch64 -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_aarch64 -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_aarch64 -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_aarch64 -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_aarch64 -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_aarch64 -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_aarch64 -#define helper_vfp_maxd helper_vfp_maxd_aarch64 -#define helper_vfp_maxnumd helper_vfp_maxnumd_aarch64 -#define helper_vfp_maxnums helper_vfp_maxnums_aarch64 -#define helper_vfp_maxs helper_vfp_maxs_aarch64 -#define helper_vfp_mind helper_vfp_mind_aarch64 -#define helper_vfp_minnumd helper_vfp_minnumd_aarch64 -#define helper_vfp_minnums helper_vfp_minnums_aarch64 -#define helper_vfp_mins helper_vfp_mins_aarch64 -#define helper_vfp_muladdd helper_vfp_muladdd_aarch64 -#define helper_vfp_muladds helper_vfp_muladds_aarch64 -#define helper_vfp_muld helper_vfp_muld_aarch64 -#define helper_vfp_muls helper_vfp_muls_aarch64 -#define helper_vfp_negd helper_vfp_negd_aarch64 -#define helper_vfp_negs helper_vfp_negs_aarch64 -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_aarch64 -#define helper_vfp_shtod helper_vfp_shtod_aarch64 -#define helper_vfp_shtos helper_vfp_shtos_aarch64 -#define helper_vfp_sitod helper_vfp_sitod_aarch64 -#define helper_vfp_sitos helper_vfp_sitos_aarch64 -#define helper_vfp_sltod helper_vfp_sltod_aarch64 -#define helper_vfp_sltos helper_vfp_sltos_aarch64 -#define helper_vfp_sqrtd helper_vfp_sqrtd_aarch64 -#define helper_vfp_sqrts helper_vfp_sqrts_aarch64 -#define helper_vfp_sqtod helper_vfp_sqtod_aarch64 -#define helper_vfp_sqtos helper_vfp_sqtos_aarch64 -#define helper_vfp_subd helper_vfp_subd_aarch64 -#define helper_vfp_subs helper_vfp_subs_aarch64 -#define helper_vfp_toshd helper_vfp_toshd_aarch64 -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_aarch64 -#define helper_vfp_toshs helper_vfp_toshs_aarch64 -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_aarch64 -#define helper_vfp_tosid helper_vfp_tosid_aarch64 -#define helper_vfp_tosis helper_vfp_tosis_aarch64 -#define helper_vfp_tosizd helper_vfp_tosizd_aarch64 -#define helper_vfp_tosizs helper_vfp_tosizs_aarch64 -#define helper_vfp_tosld helper_vfp_tosld_aarch64 -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_aarch64 -#define helper_vfp_tosls helper_vfp_tosls_aarch64 -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_aarch64 -#define helper_vfp_tosqd helper_vfp_tosqd_aarch64 -#define helper_vfp_tosqs helper_vfp_tosqs_aarch64 -#define helper_vfp_touhd helper_vfp_touhd_aarch64 -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_aarch64 -#define helper_vfp_touhs helper_vfp_touhs_aarch64 -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_aarch64 -#define helper_vfp_touid helper_vfp_touid_aarch64 -#define helper_vfp_touis helper_vfp_touis_aarch64 -#define helper_vfp_touizd helper_vfp_touizd_aarch64 -#define helper_vfp_touizs helper_vfp_touizs_aarch64 -#define helper_vfp_tould helper_vfp_tould_aarch64 -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_aarch64 -#define helper_vfp_touls helper_vfp_touls_aarch64 -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_aarch64 -#define helper_vfp_touqd helper_vfp_touqd_aarch64 -#define helper_vfp_touqs helper_vfp_touqs_aarch64 -#define helper_vfp_uhtod helper_vfp_uhtod_aarch64 -#define helper_vfp_uhtos helper_vfp_uhtos_aarch64 -#define helper_vfp_uitod helper_vfp_uitod_aarch64 -#define helper_vfp_uitos helper_vfp_uitos_aarch64 -#define helper_vfp_ultod helper_vfp_ultod_aarch64 -#define helper_vfp_ultos helper_vfp_ultos_aarch64 -#define helper_vfp_uqtod helper_vfp_uqtod_aarch64 -#define helper_vfp_uqtos helper_vfp_uqtos_aarch64 -#define helper_wfe helper_wfe_aarch64 -#define helper_wfi helper_wfi_aarch64 -#define hex2decimal hex2decimal_aarch64 -#define hw_breakpoint_update hw_breakpoint_update_aarch64 -#define hw_breakpoint_update_all hw_breakpoint_update_all_aarch64 -#define hw_watchpoint_update hw_watchpoint_update_aarch64 -#define hw_watchpoint_update_all hw_watchpoint_update_all_aarch64 -#define _init _init_aarch64 -#define init_cpreg_list init_cpreg_list_aarch64 -#define init_lists init_lists_aarch64 -#define input_type_enum input_type_enum_aarch64 -#define int128_2_64 int128_2_64_aarch64 -#define int128_add int128_add_aarch64 -#define int128_addto int128_addto_aarch64 -#define int128_and int128_and_aarch64 -#define int128_eq int128_eq_aarch64 -#define int128_ge int128_ge_aarch64 -#define int128_get64 int128_get64_aarch64 -#define int128_gt int128_gt_aarch64 -#define int128_le int128_le_aarch64 -#define int128_lt int128_lt_aarch64 -#define int128_make64 int128_make64_aarch64 -#define int128_max int128_max_aarch64 -#define int128_min int128_min_aarch64 -#define int128_ne int128_ne_aarch64 -#define int128_neg int128_neg_aarch64 -#define int128_nz int128_nz_aarch64 -#define int128_rshift int128_rshift_aarch64 -#define int128_sub int128_sub_aarch64 -#define int128_subfrom int128_subfrom_aarch64 -#define int128_zero int128_zero_aarch64 -#define int16_to_float32 int16_to_float32_aarch64 -#define int16_to_float64 int16_to_float64_aarch64 -#define int32_to_float128 int32_to_float128_aarch64 -#define int32_to_float32 int32_to_float32_aarch64 -#define int32_to_float64 int32_to_float64_aarch64 -#define int32_to_floatx80 int32_to_floatx80_aarch64 -#define int64_to_float128 int64_to_float128_aarch64 -#define int64_to_float32 int64_to_float32_aarch64 -#define int64_to_float64 int64_to_float64_aarch64 -#define int64_to_floatx80 int64_to_floatx80_aarch64 -#define invalidate_and_set_dirty invalidate_and_set_dirty_aarch64 -#define invalidate_page_bitmap invalidate_page_bitmap_aarch64 -#define io_mem_read io_mem_read_aarch64 -#define io_mem_write io_mem_write_aarch64 -#define io_readb io_readb_aarch64 -#define io_readl io_readl_aarch64 -#define io_readq io_readq_aarch64 -#define io_readw io_readw_aarch64 -#define iotlb_to_region iotlb_to_region_aarch64 -#define io_writeb io_writeb_aarch64 -#define io_writel io_writel_aarch64 -#define io_writeq io_writeq_aarch64 -#define io_writew io_writew_aarch64 -#define is_a64 is_a64_aarch64 -#define is_help_option is_help_option_aarch64 -#define isr_read isr_read_aarch64 -#define is_valid_option_list is_valid_option_list_aarch64 -#define iwmmxt_load_creg iwmmxt_load_creg_aarch64 -#define iwmmxt_load_reg iwmmxt_load_reg_aarch64 -#define iwmmxt_store_creg iwmmxt_store_creg_aarch64 -#define iwmmxt_store_reg iwmmxt_store_reg_aarch64 -#define __jit_debug_descriptor __jit_debug_descriptor_aarch64 -#define __jit_debug_register_code __jit_debug_register_code_aarch64 -#define kvm_to_cpreg_id kvm_to_cpreg_id_aarch64 -#define last_ram_offset last_ram_offset_aarch64 -#define ldl_be_p ldl_be_p_aarch64 -#define ldl_be_phys ldl_be_phys_aarch64 -#define ldl_he_p ldl_he_p_aarch64 -#define ldl_le_p ldl_le_p_aarch64 -#define ldl_le_phys ldl_le_phys_aarch64 -#define ldl_phys ldl_phys_aarch64 -#define ldl_phys_internal ldl_phys_internal_aarch64 -#define ldq_be_p ldq_be_p_aarch64 -#define ldq_be_phys ldq_be_phys_aarch64 -#define ldq_he_p ldq_he_p_aarch64 -#define ldq_le_p ldq_le_p_aarch64 -#define ldq_le_phys ldq_le_phys_aarch64 -#define ldq_phys ldq_phys_aarch64 -#define ldq_phys_internal ldq_phys_internal_aarch64 -#define ldst_name ldst_name_aarch64 -#define ldub_p ldub_p_aarch64 -#define ldub_phys ldub_phys_aarch64 -#define lduw_be_p lduw_be_p_aarch64 -#define lduw_be_phys lduw_be_phys_aarch64 -#define lduw_he_p lduw_he_p_aarch64 -#define lduw_le_p lduw_le_p_aarch64 -#define lduw_le_phys lduw_le_phys_aarch64 -#define lduw_phys lduw_phys_aarch64 -#define lduw_phys_internal lduw_phys_internal_aarch64 -#define le128 le128_aarch64 -#define linked_bp_matches linked_bp_matches_aarch64 -#define listener_add_address_space listener_add_address_space_aarch64 -#define load_cpu_offset load_cpu_offset_aarch64 -#define load_reg load_reg_aarch64 -#define load_reg_var load_reg_var_aarch64 -#define log_cpu_state log_cpu_state_aarch64 -#define lpae_cp_reginfo lpae_cp_reginfo_aarch64 -#define lt128 lt128_aarch64 -#define machine_class_init machine_class_init_aarch64 -#define machine_finalize machine_finalize_aarch64 -#define machine_info machine_info_aarch64 -#define machine_initfn machine_initfn_aarch64 -#define machine_register_types machine_register_types_aarch64 -#define machvirt_init machvirt_init_aarch64 -#define machvirt_machine_init machvirt_machine_init_aarch64 -#define maj maj_aarch64 -#define mapping_conflict mapping_conflict_aarch64 -#define mapping_contiguous mapping_contiguous_aarch64 -#define mapping_have_same_region mapping_have_same_region_aarch64 -#define mapping_merge mapping_merge_aarch64 -#define mem_add mem_add_aarch64 -#define mem_begin mem_begin_aarch64 -#define mem_commit mem_commit_aarch64 -#define memory_access_is_direct memory_access_is_direct_aarch64 -#define memory_access_size memory_access_size_aarch64 -#define memory_init memory_init_aarch64 -#define memory_listener_match memory_listener_match_aarch64 -#define memory_listener_register memory_listener_register_aarch64 -#define memory_listener_unregister memory_listener_unregister_aarch64 -#define memory_map_init memory_map_init_aarch64 -#define memory_mapping_filter memory_mapping_filter_aarch64 -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_aarch64 -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_aarch64 -#define memory_mapping_list_free memory_mapping_list_free_aarch64 -#define memory_mapping_list_init memory_mapping_list_init_aarch64 -#define memory_region_access_valid memory_region_access_valid_aarch64 -#define memory_region_add_subregion memory_region_add_subregion_aarch64 -#define memory_region_add_subregion_common memory_region_add_subregion_common_aarch64 -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_aarch64 -#define memory_region_big_endian memory_region_big_endian_aarch64 -#define memory_region_clear_pending memory_region_clear_pending_aarch64 -#define memory_region_del_subregion memory_region_del_subregion_aarch64 -#define memory_region_destructor_alias memory_region_destructor_alias_aarch64 -#define memory_region_destructor_none memory_region_destructor_none_aarch64 -#define memory_region_destructor_ram memory_region_destructor_ram_aarch64 -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_aarch64 -#define memory_region_dispatch_read memory_region_dispatch_read_aarch64 -#define memory_region_dispatch_read1 memory_region_dispatch_read1_aarch64 -#define memory_region_dispatch_write memory_region_dispatch_write_aarch64 -#define memory_region_escape_name memory_region_escape_name_aarch64 -#define memory_region_finalize memory_region_finalize_aarch64 -#define memory_region_find memory_region_find_aarch64 -#define memory_region_get_addr memory_region_get_addr_aarch64 -#define memory_region_get_alignment memory_region_get_alignment_aarch64 -#define memory_region_get_container memory_region_get_container_aarch64 -#define memory_region_get_fd memory_region_get_fd_aarch64 -#define memory_region_get_may_overlap memory_region_get_may_overlap_aarch64 -#define memory_region_get_priority memory_region_get_priority_aarch64 -#define memory_region_get_ram_addr memory_region_get_ram_addr_aarch64 -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_aarch64 -#define memory_region_get_size memory_region_get_size_aarch64 -#define memory_region_info memory_region_info_aarch64 -#define memory_region_init memory_region_init_aarch64 -#define memory_region_init_alias memory_region_init_alias_aarch64 -#define memory_region_initfn memory_region_initfn_aarch64 -#define memory_region_init_io memory_region_init_io_aarch64 -#define memory_region_init_ram memory_region_init_ram_aarch64 -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_aarch64 -#define memory_region_init_reservation memory_region_init_reservation_aarch64 -#define memory_region_is_iommu memory_region_is_iommu_aarch64 -#define memory_region_is_logging memory_region_is_logging_aarch64 -#define memory_region_is_mapped memory_region_is_mapped_aarch64 -#define memory_region_is_ram memory_region_is_ram_aarch64 -#define memory_region_is_rom memory_region_is_rom_aarch64 -#define memory_region_is_romd memory_region_is_romd_aarch64 -#define memory_region_is_skip_dump memory_region_is_skip_dump_aarch64 -#define memory_region_is_unassigned memory_region_is_unassigned_aarch64 -#define memory_region_name memory_region_name_aarch64 -#define memory_region_need_escape memory_region_need_escape_aarch64 -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_aarch64 -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_aarch64 -#define memory_region_present memory_region_present_aarch64 -#define memory_region_read_accessor memory_region_read_accessor_aarch64 -#define memory_region_readd_subregion memory_region_readd_subregion_aarch64 -#define memory_region_ref memory_region_ref_aarch64 -#define memory_region_resolve_container memory_region_resolve_container_aarch64 -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_aarch64 -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_aarch64 -#define memory_region_set_address memory_region_set_address_aarch64 -#define memory_region_set_alias_offset memory_region_set_alias_offset_aarch64 -#define memory_region_set_enabled memory_region_set_enabled_aarch64 -#define memory_region_set_readonly memory_region_set_readonly_aarch64 -#define memory_region_set_skip_dump memory_region_set_skip_dump_aarch64 -#define memory_region_size memory_region_size_aarch64 -#define memory_region_to_address_space memory_region_to_address_space_aarch64 -#define memory_region_transaction_begin memory_region_transaction_begin_aarch64 -#define memory_region_transaction_commit memory_region_transaction_commit_aarch64 -#define memory_region_unref memory_region_unref_aarch64 -#define memory_region_update_container_subregions memory_region_update_container_subregions_aarch64 -#define memory_region_write_accessor memory_region_write_accessor_aarch64 -#define memory_region_wrong_endianness memory_region_wrong_endianness_aarch64 -#define memory_try_enable_merging memory_try_enable_merging_aarch64 -#define module_call_init module_call_init_aarch64 -#define module_load module_load_aarch64 -#define mpidr_cp_reginfo mpidr_cp_reginfo_aarch64 -#define mpidr_read mpidr_read_aarch64 -#define msr_mask msr_mask_aarch64 -#define mul128By64To192 mul128By64To192_aarch64 -#define mul128To256 mul128To256_aarch64 -#define mul64To128 mul64To128_aarch64 -#define muldiv64 muldiv64_aarch64 -#define neon_2rm_is_float_op neon_2rm_is_float_op_aarch64 -#define neon_2rm_sizes neon_2rm_sizes_aarch64 -#define neon_3r_sizes neon_3r_sizes_aarch64 -#define neon_get_scalar neon_get_scalar_aarch64 -#define neon_load_reg neon_load_reg_aarch64 -#define neon_load_reg64 neon_load_reg64_aarch64 -#define neon_load_scratch neon_load_scratch_aarch64 -#define neon_ls_element_type neon_ls_element_type_aarch64 -#define neon_reg_offset neon_reg_offset_aarch64 -#define neon_store_reg neon_store_reg_aarch64 -#define neon_store_reg64 neon_store_reg64_aarch64 -#define neon_store_scratch neon_store_scratch_aarch64 -#define new_ldst_label new_ldst_label_aarch64 -#define next_list next_list_aarch64 -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_aarch64 -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_aarch64 -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_aarch64 -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_aarch64 -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_aarch64 -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_aarch64 -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_aarch64 -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_aarch64 -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_aarch64 -#define not_v6_cp_reginfo not_v6_cp_reginfo_aarch64 -#define not_v7_cp_reginfo not_v7_cp_reginfo_aarch64 -#define not_v8_cp_reginfo not_v8_cp_reginfo_aarch64 -#define object_child_foreach object_child_foreach_aarch64 -#define object_class_foreach object_class_foreach_aarch64 -#define object_class_foreach_tramp object_class_foreach_tramp_aarch64 -#define object_class_get_list object_class_get_list_aarch64 -#define object_class_get_list_tramp object_class_get_list_tramp_aarch64 -#define object_class_get_parent object_class_get_parent_aarch64 -#define object_deinit object_deinit_aarch64 -#define object_dynamic_cast object_dynamic_cast_aarch64 -#define object_finalize object_finalize_aarch64 -#define object_finalize_child_property object_finalize_child_property_aarch64 -#define object_get_child_property object_get_child_property_aarch64 -#define object_get_link_property object_get_link_property_aarch64 -#define object_get_root object_get_root_aarch64 -#define object_initialize_with_type object_initialize_with_type_aarch64 -#define object_init_with_type object_init_with_type_aarch64 -#define object_instance_init object_instance_init_aarch64 -#define object_new_with_type object_new_with_type_aarch64 -#define object_post_init_with_type object_post_init_with_type_aarch64 -#define object_property_add_alias object_property_add_alias_aarch64 -#define object_property_add_link object_property_add_link_aarch64 -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_aarch64 -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_aarch64 -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_aarch64 -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_aarch64 -#define object_property_allow_set_link object_property_allow_set_link_aarch64 -#define object_property_del object_property_del_aarch64 -#define object_property_del_all object_property_del_all_aarch64 -#define object_property_find object_property_find_aarch64 -#define object_property_get object_property_get_aarch64 -#define object_property_get_bool object_property_get_bool_aarch64 -#define object_property_get_int object_property_get_int_aarch64 -#define object_property_get_link object_property_get_link_aarch64 -#define object_property_get_qobject object_property_get_qobject_aarch64 -#define object_property_get_str object_property_get_str_aarch64 -#define object_property_get_type object_property_get_type_aarch64 -#define object_property_is_child object_property_is_child_aarch64 -#define object_property_set object_property_set_aarch64 -#define object_property_set_description object_property_set_description_aarch64 -#define object_property_set_link object_property_set_link_aarch64 -#define object_property_set_qobject object_property_set_qobject_aarch64 -#define object_release_link_property object_release_link_property_aarch64 -#define object_resolve_abs_path object_resolve_abs_path_aarch64 -#define object_resolve_child_property object_resolve_child_property_aarch64 -#define object_resolve_link object_resolve_link_aarch64 -#define object_resolve_link_property object_resolve_link_property_aarch64 -#define object_resolve_partial_path object_resolve_partial_path_aarch64 -#define object_resolve_path object_resolve_path_aarch64 -#define object_resolve_path_component object_resolve_path_component_aarch64 -#define object_resolve_path_type object_resolve_path_type_aarch64 -#define object_set_link_property object_set_link_property_aarch64 -#define object_unparent object_unparent_aarch64 -#define omap_cachemaint_write omap_cachemaint_write_aarch64 -#define omap_cp_reginfo omap_cp_reginfo_aarch64 -#define omap_threadid_write omap_threadid_write_aarch64 -#define omap_ticonfig_write omap_ticonfig_write_aarch64 -#define omap_wfi_write omap_wfi_write_aarch64 -#define op_bits op_bits_aarch64 -#define open_modeflags open_modeflags_aarch64 -#define op_to_mov op_to_mov_aarch64 -#define op_to_movi op_to_movi_aarch64 -#define output_type_enum output_type_enum_aarch64 -#define packFloat128 packFloat128_aarch64 -#define packFloat16 packFloat16_aarch64 -#define packFloat32 packFloat32_aarch64 -#define packFloat64 packFloat64_aarch64 -#define packFloatx80 packFloatx80_aarch64 -#define page_find page_find_aarch64 -#define page_find_alloc page_find_alloc_aarch64 -#define page_flush_tb page_flush_tb_aarch64 -#define page_flush_tb_1 page_flush_tb_1_aarch64 -#define page_init page_init_aarch64 -#define page_size_init page_size_init_aarch64 -#define par par_aarch64 -#define parse_array parse_array_aarch64 -#define parse_error parse_error_aarch64 -#define parse_escape parse_escape_aarch64 -#define parse_keyword parse_keyword_aarch64 -#define parse_literal parse_literal_aarch64 -#define parse_object parse_object_aarch64 -#define parse_optional parse_optional_aarch64 -#define parse_option_bool parse_option_bool_aarch64 -#define parse_option_number parse_option_number_aarch64 -#define parse_option_size parse_option_size_aarch64 -#define parse_pair parse_pair_aarch64 -#define parser_context_free parser_context_free_aarch64 -#define parser_context_new parser_context_new_aarch64 -#define parser_context_peek_token parser_context_peek_token_aarch64 -#define parser_context_pop_token parser_context_pop_token_aarch64 -#define parser_context_restore parser_context_restore_aarch64 -#define parser_context_save parser_context_save_aarch64 -#define parse_str parse_str_aarch64 -#define parse_type_bool parse_type_bool_aarch64 -#define parse_type_int parse_type_int_aarch64 -#define parse_type_number parse_type_number_aarch64 -#define parse_type_size parse_type_size_aarch64 -#define parse_type_str parse_type_str_aarch64 -#define parse_value parse_value_aarch64 -#define par_write par_write_aarch64 -#define patch_reloc patch_reloc_aarch64 -#define phys_map_node_alloc phys_map_node_alloc_aarch64 -#define phys_map_node_reserve phys_map_node_reserve_aarch64 -#define phys_mem_alloc phys_mem_alloc_aarch64 -#define phys_mem_set_alloc phys_mem_set_alloc_aarch64 -#define phys_page_compact phys_page_compact_aarch64 -#define phys_page_compact_all phys_page_compact_all_aarch64 -#define phys_page_find phys_page_find_aarch64 -#define phys_page_set phys_page_set_aarch64 -#define phys_page_set_level phys_page_set_level_aarch64 -#define phys_section_add phys_section_add_aarch64 -#define phys_section_destroy phys_section_destroy_aarch64 -#define phys_sections_free phys_sections_free_aarch64 -#define pickNaN pickNaN_aarch64 -#define pickNaNMulAdd pickNaNMulAdd_aarch64 -#define pmccfiltr_write pmccfiltr_write_aarch64 -#define pmccntr_read pmccntr_read_aarch64 -#define pmccntr_sync pmccntr_sync_aarch64 -#define pmccntr_write pmccntr_write_aarch64 -#define pmccntr_write32 pmccntr_write32_aarch64 -#define pmcntenclr_write pmcntenclr_write_aarch64 -#define pmcntenset_write pmcntenset_write_aarch64 -#define pmcr_write pmcr_write_aarch64 -#define pmintenclr_write pmintenclr_write_aarch64 -#define pmintenset_write pmintenset_write_aarch64 -#define pmovsr_write pmovsr_write_aarch64 -#define pmreg_access pmreg_access_aarch64 -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_aarch64 -#define pmsav5_data_ap_read pmsav5_data_ap_read_aarch64 -#define pmsav5_data_ap_write pmsav5_data_ap_write_aarch64 -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_aarch64 -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_aarch64 -#define pmuserenr_write pmuserenr_write_aarch64 -#define pmxevtyper_write pmxevtyper_write_aarch64 -#define print_type_bool print_type_bool_aarch64 -#define print_type_int print_type_int_aarch64 -#define print_type_number print_type_number_aarch64 -#define print_type_size print_type_size_aarch64 -#define print_type_str print_type_str_aarch64 -#define propagateFloat128NaN propagateFloat128NaN_aarch64 -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_aarch64 -#define propagateFloat32NaN propagateFloat32NaN_aarch64 -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_aarch64 -#define propagateFloat64NaN propagateFloat64NaN_aarch64 -#define propagateFloatx80NaN propagateFloatx80NaN_aarch64 -#define property_get_alias property_get_alias_aarch64 -#define property_get_bool property_get_bool_aarch64 -#define property_get_str property_get_str_aarch64 -#define property_get_uint16_ptr property_get_uint16_ptr_aarch64 -#define property_get_uint32_ptr property_get_uint32_ptr_aarch64 -#define property_get_uint64_ptr property_get_uint64_ptr_aarch64 -#define property_get_uint8_ptr property_get_uint8_ptr_aarch64 -#define property_release_alias property_release_alias_aarch64 -#define property_release_bool property_release_bool_aarch64 -#define property_release_str property_release_str_aarch64 -#define property_resolve_alias property_resolve_alias_aarch64 -#define property_set_alias property_set_alias_aarch64 -#define property_set_bool property_set_bool_aarch64 -#define property_set_str property_set_str_aarch64 -#define pstate_read pstate_read_aarch64 -#define pstate_write pstate_write_aarch64 -#define pxa250_initfn pxa250_initfn_aarch64 -#define pxa255_initfn pxa255_initfn_aarch64 -#define pxa260_initfn pxa260_initfn_aarch64 -#define pxa261_initfn pxa261_initfn_aarch64 -#define pxa262_initfn pxa262_initfn_aarch64 -#define pxa270a0_initfn pxa270a0_initfn_aarch64 -#define pxa270a1_initfn pxa270a1_initfn_aarch64 -#define pxa270b0_initfn pxa270b0_initfn_aarch64 -#define pxa270b1_initfn pxa270b1_initfn_aarch64 -#define pxa270c0_initfn pxa270c0_initfn_aarch64 -#define pxa270c5_initfn pxa270c5_initfn_aarch64 -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_aarch64 -#define qapi_dealloc_end_list qapi_dealloc_end_list_aarch64 -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_aarch64 -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_aarch64 -#define qapi_dealloc_next_list qapi_dealloc_next_list_aarch64 -#define qapi_dealloc_pop qapi_dealloc_pop_aarch64 -#define qapi_dealloc_push qapi_dealloc_push_aarch64 -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_aarch64 -#define qapi_dealloc_start_list qapi_dealloc_start_list_aarch64 -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_aarch64 -#define qapi_dealloc_start_union qapi_dealloc_start_union_aarch64 -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_aarch64 -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_aarch64 -#define qapi_dealloc_type_int qapi_dealloc_type_int_aarch64 -#define qapi_dealloc_type_number qapi_dealloc_type_number_aarch64 -#define qapi_dealloc_type_size qapi_dealloc_type_size_aarch64 -#define qapi_dealloc_type_str qapi_dealloc_type_str_aarch64 -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_aarch64 -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_aarch64 -#define qapi_free_boolList qapi_free_boolList_aarch64 -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_aarch64 -#define qapi_free_int16List qapi_free_int16List_aarch64 -#define qapi_free_int32List qapi_free_int32List_aarch64 -#define qapi_free_int64List qapi_free_int64List_aarch64 -#define qapi_free_int8List qapi_free_int8List_aarch64 -#define qapi_free_intList qapi_free_intList_aarch64 -#define qapi_free_numberList qapi_free_numberList_aarch64 -#define qapi_free_strList qapi_free_strList_aarch64 -#define qapi_free_uint16List qapi_free_uint16List_aarch64 -#define qapi_free_uint32List qapi_free_uint32List_aarch64 -#define qapi_free_uint64List qapi_free_uint64List_aarch64 -#define qapi_free_uint8List qapi_free_uint8List_aarch64 -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_aarch64 -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_aarch64 -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_aarch64 -#define qbool_destroy_obj qbool_destroy_obj_aarch64 -#define qbool_from_int qbool_from_int_aarch64 -#define qbool_get_int qbool_get_int_aarch64 -#define qbool_type qbool_type_aarch64 -#define qbus_create qbus_create_aarch64 -#define qbus_create_inplace qbus_create_inplace_aarch64 -#define qbus_finalize qbus_finalize_aarch64 -#define qbus_initfn qbus_initfn_aarch64 -#define qbus_realize qbus_realize_aarch64 -#define qdev_create qdev_create_aarch64 -#define qdev_get_type qdev_get_type_aarch64 -#define qdev_register_types qdev_register_types_aarch64 -#define qdev_set_parent_bus qdev_set_parent_bus_aarch64 -#define qdev_try_create qdev_try_create_aarch64 -#define qdict_add_key qdict_add_key_aarch64 -#define qdict_array_split qdict_array_split_aarch64 -#define qdict_clone_shallow qdict_clone_shallow_aarch64 -#define qdict_del qdict_del_aarch64 -#define qdict_destroy_obj qdict_destroy_obj_aarch64 -#define qdict_entry_key qdict_entry_key_aarch64 -#define qdict_entry_value qdict_entry_value_aarch64 -#define qdict_extract_subqdict qdict_extract_subqdict_aarch64 -#define qdict_find qdict_find_aarch64 -#define qdict_first qdict_first_aarch64 -#define qdict_flatten qdict_flatten_aarch64 -#define qdict_flatten_qdict qdict_flatten_qdict_aarch64 -#define qdict_flatten_qlist qdict_flatten_qlist_aarch64 -#define qdict_get qdict_get_aarch64 -#define qdict_get_bool qdict_get_bool_aarch64 -#define qdict_get_double qdict_get_double_aarch64 -#define qdict_get_int qdict_get_int_aarch64 -#define qdict_get_obj qdict_get_obj_aarch64 -#define qdict_get_qdict qdict_get_qdict_aarch64 -#define qdict_get_qlist qdict_get_qlist_aarch64 -#define qdict_get_str qdict_get_str_aarch64 -#define qdict_get_try_bool qdict_get_try_bool_aarch64 -#define qdict_get_try_int qdict_get_try_int_aarch64 -#define qdict_get_try_str qdict_get_try_str_aarch64 -#define qdict_haskey qdict_haskey_aarch64 -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_aarch64 -#define qdict_iter qdict_iter_aarch64 -#define qdict_join qdict_join_aarch64 -#define qdict_new qdict_new_aarch64 -#define qdict_next qdict_next_aarch64 -#define qdict_next_entry qdict_next_entry_aarch64 -#define qdict_put_obj qdict_put_obj_aarch64 -#define qdict_size qdict_size_aarch64 -#define qdict_type qdict_type_aarch64 -#define qemu_clock_get_us qemu_clock_get_us_aarch64 -#define qemu_clock_ptr qemu_clock_ptr_aarch64 -#define qemu_clocks qemu_clocks_aarch64 -#define qemu_get_cpu qemu_get_cpu_aarch64 -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_aarch64 -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_aarch64 -#define qemu_get_ram_block qemu_get_ram_block_aarch64 -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_aarch64 -#define qemu_get_ram_fd qemu_get_ram_fd_aarch64 -#define qemu_get_ram_ptr qemu_get_ram_ptr_aarch64 -#define qemu_host_page_mask qemu_host_page_mask_aarch64 -#define qemu_host_page_size qemu_host_page_size_aarch64 -#define qemu_init_vcpu qemu_init_vcpu_aarch64 -#define qemu_ld_helpers qemu_ld_helpers_aarch64 -#define qemu_log_close qemu_log_close_aarch64 -#define qemu_log_enabled qemu_log_enabled_aarch64 -#define qemu_log_flush qemu_log_flush_aarch64 -#define qemu_loglevel_mask qemu_loglevel_mask_aarch64 -#define qemu_log_vprintf qemu_log_vprintf_aarch64 -#define qemu_oom_check qemu_oom_check_aarch64 -#define qemu_parse_fd qemu_parse_fd_aarch64 -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_aarch64 -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_aarch64 -#define qemu_ram_alloc qemu_ram_alloc_aarch64 -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_aarch64 -#define qemu_ram_foreach_block qemu_ram_foreach_block_aarch64 -#define qemu_ram_free qemu_ram_free_aarch64 -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_aarch64 -#define qemu_ram_ptr_length qemu_ram_ptr_length_aarch64 -#define qemu_ram_remap qemu_ram_remap_aarch64 -#define qemu_ram_setup_dump qemu_ram_setup_dump_aarch64 -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_aarch64 -#define qemu_real_host_page_size qemu_real_host_page_size_aarch64 -#define qemu_st_helpers qemu_st_helpers_aarch64 -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_aarch64 -#define qemu_try_memalign qemu_try_memalign_aarch64 -#define qentry_destroy qentry_destroy_aarch64 -#define qerror_human qerror_human_aarch64 -#define qerror_report qerror_report_aarch64 -#define qerror_report_err qerror_report_err_aarch64 -#define qfloat_destroy_obj qfloat_destroy_obj_aarch64 -#define qfloat_from_double qfloat_from_double_aarch64 -#define qfloat_get_double qfloat_get_double_aarch64 -#define qfloat_type qfloat_type_aarch64 -#define qint_destroy_obj qint_destroy_obj_aarch64 -#define qint_from_int qint_from_int_aarch64 -#define qint_get_int qint_get_int_aarch64 -#define qint_type qint_type_aarch64 -#define qlist_append_obj qlist_append_obj_aarch64 -#define qlist_copy qlist_copy_aarch64 -#define qlist_copy_elem qlist_copy_elem_aarch64 -#define qlist_destroy_obj qlist_destroy_obj_aarch64 -#define qlist_empty qlist_empty_aarch64 -#define qlist_entry_obj qlist_entry_obj_aarch64 -#define qlist_first qlist_first_aarch64 -#define qlist_iter qlist_iter_aarch64 -#define qlist_new qlist_new_aarch64 -#define qlist_next qlist_next_aarch64 -#define qlist_peek qlist_peek_aarch64 -#define qlist_pop qlist_pop_aarch64 -#define qlist_size qlist_size_aarch64 -#define qlist_size_iter qlist_size_iter_aarch64 -#define qlist_type qlist_type_aarch64 -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_aarch64 -#define qmp_input_end_list qmp_input_end_list_aarch64 -#define qmp_input_end_struct qmp_input_end_struct_aarch64 -#define qmp_input_get_next_type qmp_input_get_next_type_aarch64 -#define qmp_input_get_object qmp_input_get_object_aarch64 -#define qmp_input_get_visitor qmp_input_get_visitor_aarch64 -#define qmp_input_next_list qmp_input_next_list_aarch64 -#define qmp_input_optional qmp_input_optional_aarch64 -#define qmp_input_pop qmp_input_pop_aarch64 -#define qmp_input_push qmp_input_push_aarch64 -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_aarch64 -#define qmp_input_start_list qmp_input_start_list_aarch64 -#define qmp_input_start_struct qmp_input_start_struct_aarch64 -#define qmp_input_type_bool qmp_input_type_bool_aarch64 -#define qmp_input_type_int qmp_input_type_int_aarch64 -#define qmp_input_type_number qmp_input_type_number_aarch64 -#define qmp_input_type_str qmp_input_type_str_aarch64 -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_aarch64 -#define qmp_input_visitor_new qmp_input_visitor_new_aarch64 -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_aarch64 -#define qmp_output_add_obj qmp_output_add_obj_aarch64 -#define qmp_output_end_list qmp_output_end_list_aarch64 -#define qmp_output_end_struct qmp_output_end_struct_aarch64 -#define qmp_output_first qmp_output_first_aarch64 -#define qmp_output_get_qobject qmp_output_get_qobject_aarch64 -#define qmp_output_get_visitor qmp_output_get_visitor_aarch64 -#define qmp_output_last qmp_output_last_aarch64 -#define qmp_output_next_list qmp_output_next_list_aarch64 -#define qmp_output_pop qmp_output_pop_aarch64 -#define qmp_output_push_obj qmp_output_push_obj_aarch64 -#define qmp_output_start_list qmp_output_start_list_aarch64 -#define qmp_output_start_struct qmp_output_start_struct_aarch64 -#define qmp_output_type_bool qmp_output_type_bool_aarch64 -#define qmp_output_type_int qmp_output_type_int_aarch64 -#define qmp_output_type_number qmp_output_type_number_aarch64 -#define qmp_output_type_str qmp_output_type_str_aarch64 -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_aarch64 -#define qmp_output_visitor_new qmp_output_visitor_new_aarch64 -#define qobject_decref qobject_decref_aarch64 -#define qobject_to_qbool qobject_to_qbool_aarch64 -#define qobject_to_qdict qobject_to_qdict_aarch64 -#define qobject_to_qfloat qobject_to_qfloat_aarch64 -#define qobject_to_qint qobject_to_qint_aarch64 -#define qobject_to_qlist qobject_to_qlist_aarch64 -#define qobject_to_qstring qobject_to_qstring_aarch64 -#define qobject_type qobject_type_aarch64 -#define qstring_append qstring_append_aarch64 -#define qstring_append_chr qstring_append_chr_aarch64 -#define qstring_append_int qstring_append_int_aarch64 -#define qstring_destroy_obj qstring_destroy_obj_aarch64 -#define qstring_from_escaped_str qstring_from_escaped_str_aarch64 -#define qstring_from_str qstring_from_str_aarch64 -#define qstring_from_substr qstring_from_substr_aarch64 -#define qstring_get_length qstring_get_length_aarch64 -#define qstring_get_str qstring_get_str_aarch64 -#define qstring_new qstring_new_aarch64 -#define qstring_type qstring_type_aarch64 -#define ram_block_add ram_block_add_aarch64 -#define ram_size ram_size_aarch64 -#define range_compare range_compare_aarch64 -#define range_covers_byte range_covers_byte_aarch64 -#define range_get_last range_get_last_aarch64 -#define range_merge range_merge_aarch64 -#define ranges_can_merge ranges_can_merge_aarch64 -#define raw_read raw_read_aarch64 -#define raw_write raw_write_aarch64 -#define rcon rcon_aarch64 -#define read_raw_cp_reg read_raw_cp_reg_aarch64 -#define recip_estimate recip_estimate_aarch64 -#define recip_sqrt_estimate recip_sqrt_estimate_aarch64 -#define register_cp_regs_for_features register_cp_regs_for_features_aarch64 -#define register_multipage register_multipage_aarch64 -#define register_subpage register_subpage_aarch64 -#define register_tm_clones register_tm_clones_aarch64 -#define register_types_object register_types_object_aarch64 -#define regnames regnames_aarch64 -#define render_memory_region render_memory_region_aarch64 -#define reset_all_temps reset_all_temps_aarch64 -#define reset_temp reset_temp_aarch64 -#define rol32 rol32_aarch64 -#define rol64 rol64_aarch64 -#define ror32 ror32_aarch64 -#define ror64 ror64_aarch64 -#define roundAndPackFloat128 roundAndPackFloat128_aarch64 -#define roundAndPackFloat16 roundAndPackFloat16_aarch64 -#define roundAndPackFloat32 roundAndPackFloat32_aarch64 -#define roundAndPackFloat64 roundAndPackFloat64_aarch64 -#define roundAndPackFloatx80 roundAndPackFloatx80_aarch64 -#define roundAndPackInt32 roundAndPackInt32_aarch64 -#define roundAndPackInt64 roundAndPackInt64_aarch64 -#define roundAndPackUint64 roundAndPackUint64_aarch64 -#define round_to_inf round_to_inf_aarch64 -#define run_on_cpu run_on_cpu_aarch64 -#define s0 s0_aarch64 -#define S0 S0_aarch64 -#define s1 s1_aarch64 -#define S1 S1_aarch64 -#define sa1100_initfn sa1100_initfn_aarch64 -#define sa1110_initfn sa1110_initfn_aarch64 -#define save_globals save_globals_aarch64 -#define scr_write scr_write_aarch64 -#define sctlr_write sctlr_write_aarch64 -#define set_bit set_bit_aarch64 -#define set_bits set_bits_aarch64 -#define set_default_nan_mode set_default_nan_mode_aarch64 -#define set_feature set_feature_aarch64 -#define set_float_detect_tininess set_float_detect_tininess_aarch64 -#define set_float_exception_flags set_float_exception_flags_aarch64 -#define set_float_rounding_mode set_float_rounding_mode_aarch64 -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_aarch64 -#define set_flush_to_zero set_flush_to_zero_aarch64 -#define set_swi_errno set_swi_errno_aarch64 -#define sextract32 sextract32_aarch64 -#define sextract64 sextract64_aarch64 -#define shift128ExtraRightJamming shift128ExtraRightJamming_aarch64 -#define shift128Right shift128Right_aarch64 -#define shift128RightJamming shift128RightJamming_aarch64 -#define shift32RightJamming shift32RightJamming_aarch64 -#define shift64ExtraRightJamming shift64ExtraRightJamming_aarch64 -#define shift64RightJamming shift64RightJamming_aarch64 -#define shifter_out_im shifter_out_im_aarch64 -#define shortShift128Left shortShift128Left_aarch64 -#define shortShift192Left shortShift192Left_aarch64 -#define simple_mpu_ap_bits simple_mpu_ap_bits_aarch64 -#define size_code_gen_buffer size_code_gen_buffer_aarch64 -#define softmmu_lock_user softmmu_lock_user_aarch64 -#define softmmu_lock_user_string softmmu_lock_user_string_aarch64 -#define softmmu_tget32 softmmu_tget32_aarch64 -#define softmmu_tget8 softmmu_tget8_aarch64 -#define softmmu_tput32 softmmu_tput32_aarch64 -#define softmmu_unlock_user softmmu_unlock_user_aarch64 -#define sort_constraints sort_constraints_aarch64 -#define sp_el0_access sp_el0_access_aarch64 -#define spsel_read spsel_read_aarch64 -#define spsel_write spsel_write_aarch64 -#define start_list start_list_aarch64 -#define stb_p stb_p_aarch64 -#define stb_phys stb_phys_aarch64 -#define stl_be_p stl_be_p_aarch64 -#define stl_be_phys stl_be_phys_aarch64 -#define stl_he_p stl_he_p_aarch64 -#define stl_le_p stl_le_p_aarch64 -#define stl_le_phys stl_le_phys_aarch64 -#define stl_phys stl_phys_aarch64 -#define stl_phys_internal stl_phys_internal_aarch64 -#define stl_phys_notdirty stl_phys_notdirty_aarch64 -#define store_cpu_offset store_cpu_offset_aarch64 -#define store_reg store_reg_aarch64 -#define store_reg_bx store_reg_bx_aarch64 -#define store_reg_from_load store_reg_from_load_aarch64 -#define stq_be_p stq_be_p_aarch64 -#define stq_be_phys stq_be_phys_aarch64 -#define stq_he_p stq_he_p_aarch64 -#define stq_le_p stq_le_p_aarch64 -#define stq_le_phys stq_le_phys_aarch64 -#define stq_phys stq_phys_aarch64 -#define string_input_get_visitor string_input_get_visitor_aarch64 -#define string_input_visitor_cleanup string_input_visitor_cleanup_aarch64 -#define string_input_visitor_new string_input_visitor_new_aarch64 -#define strongarm_cp_reginfo strongarm_cp_reginfo_aarch64 -#define strstart strstart_aarch64 -#define strtosz strtosz_aarch64 -#define strtosz_suffix strtosz_suffix_aarch64 -#define stw_be_p stw_be_p_aarch64 -#define stw_be_phys stw_be_phys_aarch64 -#define stw_he_p stw_he_p_aarch64 -#define stw_le_p stw_le_p_aarch64 -#define stw_le_phys stw_le_phys_aarch64 -#define stw_phys stw_phys_aarch64 -#define stw_phys_internal stw_phys_internal_aarch64 -#define sub128 sub128_aarch64 -#define sub16_sat sub16_sat_aarch64 -#define sub16_usat sub16_usat_aarch64 -#define sub192 sub192_aarch64 -#define sub8_sat sub8_sat_aarch64 -#define sub8_usat sub8_usat_aarch64 -#define subFloat128Sigs subFloat128Sigs_aarch64 -#define subFloat32Sigs subFloat32Sigs_aarch64 -#define subFloat64Sigs subFloat64Sigs_aarch64 -#define subFloatx80Sigs subFloatx80Sigs_aarch64 -#define subpage_accepts subpage_accepts_aarch64 -#define subpage_init subpage_init_aarch64 -#define subpage_ops subpage_ops_aarch64 -#define subpage_read subpage_read_aarch64 -#define subpage_register subpage_register_aarch64 -#define subpage_write subpage_write_aarch64 -#define suffix_mul suffix_mul_aarch64 -#define swap_commutative swap_commutative_aarch64 -#define swap_commutative2 swap_commutative2_aarch64 -#define switch_mode switch_mode_aarch64 -#define switch_v7m_sp switch_v7m_sp_aarch64 -#define syn_aa32_bkpt syn_aa32_bkpt_aarch64 -#define syn_aa32_hvc syn_aa32_hvc_aarch64 -#define syn_aa32_smc syn_aa32_smc_aarch64 -#define syn_aa32_svc syn_aa32_svc_aarch64 -#define syn_breakpoint syn_breakpoint_aarch64 -#define sync_globals sync_globals_aarch64 -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_aarch64 -#define syn_cp14_rt_trap syn_cp14_rt_trap_aarch64 -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_aarch64 -#define syn_cp15_rt_trap syn_cp15_rt_trap_aarch64 -#define syn_data_abort syn_data_abort_aarch64 -#define syn_fp_access_trap syn_fp_access_trap_aarch64 -#define syn_insn_abort syn_insn_abort_aarch64 -#define syn_swstep syn_swstep_aarch64 -#define syn_uncategorized syn_uncategorized_aarch64 -#define syn_watchpoint syn_watchpoint_aarch64 -#define syscall_err syscall_err_aarch64 -#define system_bus_class_init system_bus_class_init_aarch64 -#define system_bus_info system_bus_info_aarch64 -#define t2ee_cp_reginfo t2ee_cp_reginfo_aarch64 -#define table_logic_cc table_logic_cc_aarch64 -#define target_parse_constraint target_parse_constraint_aarch64 -#define target_words_bigendian target_words_bigendian_aarch64 -#define tb_add_jump tb_add_jump_aarch64 -#define tb_alloc tb_alloc_aarch64 -#define tb_alloc_page tb_alloc_page_aarch64 -#define tb_check_watchpoint tb_check_watchpoint_aarch64 -#define tb_find_fast tb_find_fast_aarch64 -#define tb_find_pc tb_find_pc_aarch64 -#define tb_find_slow tb_find_slow_aarch64 -#define tb_flush tb_flush_aarch64 -#define tb_flush_jmp_cache tb_flush_jmp_cache_aarch64 -#define tb_free tb_free_aarch64 -#define tb_gen_code tb_gen_code_aarch64 -#define tb_hash_remove tb_hash_remove_aarch64 -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_aarch64 -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_aarch64 -#define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64 -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_aarch64 -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_aarch64 -#define tb_jmp_remove tb_jmp_remove_aarch64 -#define tb_link_page tb_link_page_aarch64 -#define tb_page_remove tb_page_remove_aarch64 -#define tb_phys_hash_func tb_phys_hash_func_aarch64 -#define tb_phys_invalidate tb_phys_invalidate_aarch64 -#define tb_reset_jump tb_reset_jump_aarch64 -#define tb_set_jmp_target tb_set_jmp_target_aarch64 -#define tcg_accel_class_init tcg_accel_class_init_aarch64 -#define tcg_accel_type tcg_accel_type_aarch64 -#define tcg_add_param_i32 tcg_add_param_i32_aarch64 -#define tcg_add_param_i64 tcg_add_param_i64_aarch64 -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_aarch64 -#define tcg_allowed tcg_allowed_aarch64 -#define tcg_canonicalize_memop tcg_canonicalize_memop_aarch64 -#define tcg_commit tcg_commit_aarch64 -#define tcg_cond_to_jcc tcg_cond_to_jcc_aarch64 -#define tcg_constant_folding tcg_constant_folding_aarch64 -#define tcg_const_i32 tcg_const_i32_aarch64 -#define tcg_const_i64 tcg_const_i64_aarch64 -#define tcg_const_local_i32 tcg_const_local_i32_aarch64 -#define tcg_const_local_i64 tcg_const_local_i64_aarch64 -#define tcg_context_init tcg_context_init_aarch64 -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_aarch64 -#define tcg_cpu_exec tcg_cpu_exec_aarch64 -#define tcg_current_code_size tcg_current_code_size_aarch64 -#define tcg_dump_info tcg_dump_info_aarch64 -#define tcg_dump_ops tcg_dump_ops_aarch64 -#define tcg_exec_all tcg_exec_all_aarch64 -#define tcg_find_helper tcg_find_helper_aarch64 -#define tcg_func_start tcg_func_start_aarch64 -#define tcg_gen_abs_i32 tcg_gen_abs_i32_aarch64 -#define tcg_gen_add2_i32 tcg_gen_add2_i32_aarch64 -#define tcg_gen_add_i32 tcg_gen_add_i32_aarch64 -#define tcg_gen_add_i64 tcg_gen_add_i64_aarch64 -#define tcg_gen_addi_i32 tcg_gen_addi_i32_aarch64 -#define tcg_gen_addi_i64 tcg_gen_addi_i64_aarch64 -#define tcg_gen_andc_i32 tcg_gen_andc_i32_aarch64 -#define tcg_gen_and_i32 tcg_gen_and_i32_aarch64 -#define tcg_gen_and_i64 tcg_gen_and_i64_aarch64 -#define tcg_gen_andi_i32 tcg_gen_andi_i32_aarch64 -#define tcg_gen_andi_i64 tcg_gen_andi_i64_aarch64 -#define tcg_gen_br tcg_gen_br_aarch64 -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_aarch64 -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_aarch64 -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_aarch64 -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_aarch64 -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_aarch64 -#define tcg_gen_callN tcg_gen_callN_aarch64 -#define tcg_gen_code tcg_gen_code_aarch64 -#define tcg_gen_code_common tcg_gen_code_common_aarch64 -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_aarch64 -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_aarch64 -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_aarch64 -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_aarch64 -#define tcg_gen_exit_tb tcg_gen_exit_tb_aarch64 -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_aarch64 -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_aarch64 -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_aarch64 -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_aarch64 -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_aarch64 -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_aarch64 -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_aarch64 -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_aarch64 -#define tcg_gen_goto_tb tcg_gen_goto_tb_aarch64 -#define tcg_gen_ld_i32 tcg_gen_ld_i32_aarch64 -#define tcg_gen_ld_i64 tcg_gen_ld_i64_aarch64 -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_aarch64 -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_aarch64 -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_aarch64 -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_aarch64 -#define tcg_gen_mov_i32 tcg_gen_mov_i32_aarch64 -#define tcg_gen_mov_i64 tcg_gen_mov_i64_aarch64 -#define tcg_gen_movi_i32 tcg_gen_movi_i32_aarch64 -#define tcg_gen_movi_i64 tcg_gen_movi_i64_aarch64 -#define tcg_gen_mul_i32 tcg_gen_mul_i32_aarch64 -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_aarch64 -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_aarch64 -#define tcg_gen_neg_i32 tcg_gen_neg_i32_aarch64 -#define tcg_gen_neg_i64 tcg_gen_neg_i64_aarch64 -#define tcg_gen_not_i32 tcg_gen_not_i32_aarch64 -#define tcg_gen_op0 tcg_gen_op0_aarch64 -#define tcg_gen_op1i tcg_gen_op1i_aarch64 -#define tcg_gen_op2_i32 tcg_gen_op2_i32_aarch64 -#define tcg_gen_op2_i64 tcg_gen_op2_i64_aarch64 -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_aarch64 -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_aarch64 -#define tcg_gen_op3_i32 tcg_gen_op3_i32_aarch64 -#define tcg_gen_op3_i64 tcg_gen_op3_i64_aarch64 -#define tcg_gen_op4_i32 tcg_gen_op4_i32_aarch64 -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_aarch64 -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_aarch64 -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_aarch64 -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_aarch64 -#define tcg_gen_op6_i32 tcg_gen_op6_i32_aarch64 -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_aarch64 -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_aarch64 -#define tcg_gen_orc_i32 tcg_gen_orc_i32_aarch64 -#define tcg_gen_or_i32 tcg_gen_or_i32_aarch64 -#define tcg_gen_or_i64 tcg_gen_or_i64_aarch64 -#define tcg_gen_ori_i32 tcg_gen_ori_i32_aarch64 -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_aarch64 -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_aarch64 -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_aarch64 -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_aarch64 -#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_aarch64 -#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_aarch64 -#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_aarch64 -#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_aarch64 -#define tcg_gen_sar_i32 tcg_gen_sar_i32_aarch64 -#define tcg_gen_sari_i32 tcg_gen_sari_i32_aarch64 -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_aarch64 -#define tcg_gen_shl_i32 tcg_gen_shl_i32_aarch64 -#define tcg_gen_shl_i64 tcg_gen_shl_i64_aarch64 -#define tcg_gen_shli_i32 tcg_gen_shli_i32_aarch64 -#define tcg_gen_shli_i64 tcg_gen_shli_i64_aarch64 -#define tcg_gen_shr_i32 tcg_gen_shr_i32_aarch64 -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_aarch64 -#define tcg_gen_shr_i64 tcg_gen_shr_i64_aarch64 -#define tcg_gen_shri_i32 tcg_gen_shri_i32_aarch64 -#define tcg_gen_shri_i64 tcg_gen_shri_i64_aarch64 -#define tcg_gen_st_i32 tcg_gen_st_i32_aarch64 -#define tcg_gen_st_i64 tcg_gen_st_i64_aarch64 -#define tcg_gen_sub_i32 tcg_gen_sub_i32_aarch64 -#define tcg_gen_sub_i64 tcg_gen_sub_i64_aarch64 -#define tcg_gen_subi_i32 tcg_gen_subi_i32_aarch64 -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_aarch64 -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_aarch64 -#define tcg_gen_xor_i32 tcg_gen_xor_i32_aarch64 -#define tcg_gen_xor_i64 tcg_gen_xor_i64_aarch64 -#define tcg_gen_xori_i32 tcg_gen_xori_i32_aarch64 -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_aarch64 -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_aarch64 -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_aarch64 -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_aarch64 -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_aarch64 -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_aarch64 -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_aarch64 -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_aarch64 -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_aarch64 -#define tcg_handle_interrupt tcg_handle_interrupt_aarch64 -#define tcg_init tcg_init_aarch64 -#define tcg_invert_cond tcg_invert_cond_aarch64 -#define tcg_la_bb_end tcg_la_bb_end_aarch64 -#define tcg_la_br_end tcg_la_br_end_aarch64 -#define tcg_la_func_end tcg_la_func_end_aarch64 -#define tcg_liveness_analysis tcg_liveness_analysis_aarch64 -#define tcg_malloc tcg_malloc_aarch64 -#define tcg_malloc_internal tcg_malloc_internal_aarch64 -#define tcg_op_defs_org tcg_op_defs_org_aarch64 -#define tcg_opt_gen_mov tcg_opt_gen_mov_aarch64 -#define tcg_opt_gen_movi tcg_opt_gen_movi_aarch64 -#define tcg_optimize tcg_optimize_aarch64 -#define tcg_out16 tcg_out16_aarch64 -#define tcg_out32 tcg_out32_aarch64 -#define tcg_out64 tcg_out64_aarch64 -#define tcg_out8 tcg_out8_aarch64 -#define tcg_out_addi tcg_out_addi_aarch64 -#define tcg_out_branch tcg_out_branch_aarch64 -#define tcg_out_brcond32 tcg_out_brcond32_aarch64 -#define tcg_out_brcond64 tcg_out_brcond64_aarch64 -#define tcg_out_bswap32 tcg_out_bswap32_aarch64 -#define tcg_out_bswap64 tcg_out_bswap64_aarch64 -#define tcg_out_call tcg_out_call_aarch64 -#define tcg_out_cmp tcg_out_cmp_aarch64 -#define tcg_out_ext16s tcg_out_ext16s_aarch64 -#define tcg_out_ext16u tcg_out_ext16u_aarch64 -#define tcg_out_ext32s tcg_out_ext32s_aarch64 -#define tcg_out_ext32u tcg_out_ext32u_aarch64 -#define tcg_out_ext8s tcg_out_ext8s_aarch64 -#define tcg_out_ext8u tcg_out_ext8u_aarch64 -#define tcg_out_jmp tcg_out_jmp_aarch64 -#define tcg_out_jxx tcg_out_jxx_aarch64 -#define tcg_out_label tcg_out_label_aarch64 -#define tcg_out_ld tcg_out_ld_aarch64 -#define tcg_out_modrm tcg_out_modrm_aarch64 -#define tcg_out_modrm_offset tcg_out_modrm_offset_aarch64 -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_aarch64 -#define tcg_out_mov tcg_out_mov_aarch64 -#define tcg_out_movcond32 tcg_out_movcond32_aarch64 -#define tcg_out_movcond64 tcg_out_movcond64_aarch64 -#define tcg_out_movi tcg_out_movi_aarch64 -#define tcg_out_op tcg_out_op_aarch64 -#define tcg_out_pop tcg_out_pop_aarch64 -#define tcg_out_push tcg_out_push_aarch64 -#define tcg_out_qemu_ld tcg_out_qemu_ld_aarch64 -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_aarch64 -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_aarch64 -#define tcg_out_qemu_st tcg_out_qemu_st_aarch64 -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_aarch64 -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_aarch64 -#define tcg_out_reloc tcg_out_reloc_aarch64 -#define tcg_out_rolw_8 tcg_out_rolw_8_aarch64 -#define tcg_out_setcond32 tcg_out_setcond32_aarch64 -#define tcg_out_setcond64 tcg_out_setcond64_aarch64 -#define tcg_out_shifti tcg_out_shifti_aarch64 -#define tcg_out_st tcg_out_st_aarch64 -#define tcg_out_tb_finalize tcg_out_tb_finalize_aarch64 -#define tcg_out_tb_init tcg_out_tb_init_aarch64 -#define tcg_out_tlb_load tcg_out_tlb_load_aarch64 -#define tcg_out_vex_modrm tcg_out_vex_modrm_aarch64 -#define tcg_patch32 tcg_patch32_aarch64 -#define tcg_patch8 tcg_patch8_aarch64 -#define tcg_pcrel_diff tcg_pcrel_diff_aarch64 -#define tcg_pool_reset tcg_pool_reset_aarch64 -#define tcg_prologue_init tcg_prologue_init_aarch64 -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_aarch64 -#define tcg_reg_alloc tcg_reg_alloc_aarch64 -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_aarch64 -#define tcg_reg_alloc_call tcg_reg_alloc_call_aarch64 -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_aarch64 -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_aarch64 -#define tcg_reg_alloc_op tcg_reg_alloc_op_aarch64 -#define tcg_reg_alloc_start tcg_reg_alloc_start_aarch64 -#define tcg_reg_free tcg_reg_free_aarch64 -#define tcg_reg_sync tcg_reg_sync_aarch64 -#define tcg_set_frame tcg_set_frame_aarch64 -#define tcg_set_nop tcg_set_nop_aarch64 -#define tcg_swap_cond tcg_swap_cond_aarch64 -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_aarch64 -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_aarch64 -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_aarch64 -#define tcg_target_const_match tcg_target_const_match_aarch64 -#define tcg_target_init tcg_target_init_aarch64 -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_aarch64 -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_aarch64 -#define tcg_temp_alloc tcg_temp_alloc_aarch64 -#define tcg_temp_free_i32 tcg_temp_free_i32_aarch64 -#define tcg_temp_free_i64 tcg_temp_free_i64_aarch64 -#define tcg_temp_free_internal tcg_temp_free_internal_aarch64 -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_aarch64 -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_aarch64 -#define tcg_temp_new_i32 tcg_temp_new_i32_aarch64 -#define tcg_temp_new_i64 tcg_temp_new_i64_aarch64 -#define tcg_temp_new_internal tcg_temp_new_internal_aarch64 -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_aarch64 -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_aarch64 -#define tdb_hash tdb_hash_aarch64 -#define teecr_write teecr_write_aarch64 -#define teehbr_access teehbr_access_aarch64 -#define temp_allocate_frame temp_allocate_frame_aarch64 -#define temp_dead temp_dead_aarch64 -#define temps_are_copies temps_are_copies_aarch64 -#define temp_save temp_save_aarch64 -#define temp_sync temp_sync_aarch64 -#define tgen_arithi tgen_arithi_aarch64 -#define tgen_arithr tgen_arithr_aarch64 -#define thumb2_logic_op thumb2_logic_op_aarch64 -#define ti925t_initfn ti925t_initfn_aarch64 -#define tlb_add_large_page tlb_add_large_page_aarch64 -#define tlb_flush_entry tlb_flush_entry_aarch64 -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64 -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64 -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_aarch64 -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_aarch64 -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_aarch64 -#define tlbi_aa64_va_write tlbi_aa64_va_write_aarch64 -#define tlbiall_is_write tlbiall_is_write_aarch64 -#define tlbiall_write tlbiall_write_aarch64 -#define tlbiasid_is_write tlbiasid_is_write_aarch64 -#define tlbiasid_write tlbiasid_write_aarch64 -#define tlbimvaa_is_write tlbimvaa_is_write_aarch64 -#define tlbimvaa_write tlbimvaa_write_aarch64 -#define tlbimva_is_write tlbimva_is_write_aarch64 -#define tlbimva_write tlbimva_write_aarch64 -#define tlb_is_dirty_ram tlb_is_dirty_ram_aarch64 -#define tlb_protect_code tlb_protect_code_aarch64 -#define tlb_reset_dirty_range tlb_reset_dirty_range_aarch64 -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_aarch64 -#define tlb_set_dirty tlb_set_dirty_aarch64 -#define tlb_set_dirty1 tlb_set_dirty1_aarch64 -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_aarch64 -#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64 -#define token_get_type token_get_type_aarch64 -#define token_get_value token_get_value_aarch64 -#define token_is_escape token_is_escape_aarch64 -#define token_is_keyword token_is_keyword_aarch64 -#define token_is_operator token_is_operator_aarch64 -#define tokens_append_from_iter tokens_append_from_iter_aarch64 -#define to_qiv to_qiv_aarch64 -#define to_qov to_qov_aarch64 -#define tosa_init tosa_init_aarch64 -#define tosa_machine_init tosa_machine_init_aarch64 -#define tswap32 tswap32_aarch64 -#define tswap64 tswap64_aarch64 -#define type_class_get_size type_class_get_size_aarch64 -#define type_get_by_name type_get_by_name_aarch64 -#define type_get_parent type_get_parent_aarch64 -#define type_has_parent type_has_parent_aarch64 -#define type_initialize type_initialize_aarch64 -#define type_initialize_interface type_initialize_interface_aarch64 -#define type_is_ancestor type_is_ancestor_aarch64 -#define type_new type_new_aarch64 -#define type_object_get_size type_object_get_size_aarch64 -#define type_register_internal type_register_internal_aarch64 -#define type_table_add type_table_add_aarch64 -#define type_table_get type_table_get_aarch64 -#define type_table_lookup type_table_lookup_aarch64 -#define uint16_to_float32 uint16_to_float32_aarch64 -#define uint16_to_float64 uint16_to_float64_aarch64 -#define uint32_to_float32 uint32_to_float32_aarch64 -#define uint32_to_float64 uint32_to_float64_aarch64 -#define uint64_to_float128 uint64_to_float128_aarch64 -#define uint64_to_float32 uint64_to_float32_aarch64 -#define uint64_to_float64 uint64_to_float64_aarch64 -#define unassigned_io_ops unassigned_io_ops_aarch64 -#define unassigned_io_read unassigned_io_read_aarch64 -#define unassigned_io_write unassigned_io_write_aarch64 -#define unassigned_mem_accepts unassigned_mem_accepts_aarch64 -#define unassigned_mem_ops unassigned_mem_ops_aarch64 -#define unassigned_mem_read unassigned_mem_read_aarch64 -#define unassigned_mem_write unassigned_mem_write_aarch64 -#define update_spsel update_spsel_aarch64 -#define v6_cp_reginfo v6_cp_reginfo_aarch64 -#define v6k_cp_reginfo v6k_cp_reginfo_aarch64 -#define v7_cp_reginfo v7_cp_reginfo_aarch64 -#define v7mp_cp_reginfo v7mp_cp_reginfo_aarch64 -#define v7m_pop v7m_pop_aarch64 -#define v7m_push v7m_push_aarch64 -#define v8_cp_reginfo v8_cp_reginfo_aarch64 -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_aarch64 -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_aarch64 -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_aarch64 -#define vapa_cp_reginfo vapa_cp_reginfo_aarch64 -#define vbar_write vbar_write_aarch64 -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_aarch64 -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_aarch64 -#define vfp_get_fpcr vfp_get_fpcr_aarch64 -#define vfp_get_fpscr vfp_get_fpscr_aarch64 -#define vfp_get_fpsr vfp_get_fpsr_aarch64 -#define vfp_reg_offset vfp_reg_offset_aarch64 -#define vfp_set_fpcr vfp_set_fpcr_aarch64 -#define vfp_set_fpscr vfp_set_fpscr_aarch64 -#define vfp_set_fpsr vfp_set_fpsr_aarch64 -#define visit_end_implicit_struct visit_end_implicit_struct_aarch64 -#define visit_end_list visit_end_list_aarch64 -#define visit_end_struct visit_end_struct_aarch64 -#define visit_end_union visit_end_union_aarch64 -#define visit_get_next_type visit_get_next_type_aarch64 -#define visit_next_list visit_next_list_aarch64 -#define visit_optional visit_optional_aarch64 -#define visit_start_implicit_struct visit_start_implicit_struct_aarch64 -#define visit_start_list visit_start_list_aarch64 -#define visit_start_struct visit_start_struct_aarch64 -#define visit_start_union visit_start_union_aarch64 -#define vmsa_cp_reginfo vmsa_cp_reginfo_aarch64 -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_aarch64 -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_aarch64 -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_aarch64 -#define vmsa_ttbcr_write vmsa_ttbcr_write_aarch64 -#define vmsa_ttbr_write vmsa_ttbr_write_aarch64 -#define write_cpustate_to_list write_cpustate_to_list_aarch64 -#define write_list_to_cpustate write_list_to_cpustate_aarch64 -#define write_raw_cp_reg write_raw_cp_reg_aarch64 -#define X86CPURegister32_lookup X86CPURegister32_lookup_aarch64 -#define x86_op_defs x86_op_defs_aarch64 -#define xpsr_read xpsr_read_aarch64 -#define xpsr_write xpsr_write_aarch64 -#define xscale_cpar_write xscale_cpar_write_aarch64 -#define xscale_cp_reginfo xscale_cp_reginfo_aarch64 -#define ARM64_REGS_STORAGE_SIZE ARM64_REGS_STORAGE_SIZE_aarch64 -#define arm64_release arm64_release_aarch64 -#define arm64_reg_reset arm64_reg_reset_aarch64 -#define arm64_reg_read arm64_reg_read_aarch64 -#define arm64_reg_write arm64_reg_write_aarch64 -#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64 -#define aarch64_cpu_register_types aarch64_cpu_register_types_aarch64 +#define helper_crypto_sha512h helper_crypto_sha512h_aarch64 +#define helper_crypto_sha512h2 helper_crypto_sha512h2_aarch64 +#define helper_crypto_sha512su0 helper_crypto_sha512su0_aarch64 +#define helper_crypto_sha512su1 helper_crypto_sha512su1_aarch64 +#define helper_crypto_sm3partw1 helper_crypto_sm3partw1_aarch64 +#define helper_crypto_sm3partw2 helper_crypto_sm3partw2_aarch64 +#define helper_crypto_sm3tt helper_crypto_sm3tt_aarch64 +#define helper_crypto_sm4e helper_crypto_sm4e_aarch64 +#define helper_crypto_sm4ekey helper_crypto_sm4ekey_aarch64 +#define helper_check_breakpoints helper_check_breakpoints_aarch64 +#define arm_debug_check_watchpoint arm_debug_check_watchpoint_aarch64 +#define arm_debug_excp_handler arm_debug_excp_handler_aarch64 +#define arm_adjust_watchpoint_address arm_adjust_watchpoint_address_aarch64 #define helper_udiv64 helper_udiv64_aarch64 #define helper_sdiv64 helper_sdiv64_aarch64 -#define helper_cls64 helper_cls64_aarch64 -#define helper_cls32 helper_cls32_aarch64 #define helper_rbit64 helper_rbit64_aarch64 +#define helper_msr_i_spsel helper_msr_i_spsel_aarch64 +#define helper_msr_i_daifset helper_msr_i_daifset_aarch64 +#define helper_msr_i_daifclear helper_msr_i_daifclear_aarch64 +#define helper_vfp_cmph_a64 helper_vfp_cmph_a64_aarch64 +#define helper_vfp_cmpeh_a64 helper_vfp_cmpeh_a64_aarch64 #define helper_vfp_cmps_a64 helper_vfp_cmps_a64_aarch64 #define helper_vfp_cmpes_a64 helper_vfp_cmpes_a64_aarch64 #define helper_vfp_cmpd_a64 helper_vfp_cmpd_a64_aarch64 @@ -3039,18 +1323,1653 @@ #define helper_neon_ceq_f64 helper_neon_ceq_f64_aarch64 #define helper_neon_cge_f64 helper_neon_cge_f64_aarch64 #define helper_neon_cgt_f64 helper_neon_cgt_f64_aarch64 +#define helper_recpsf_f16 helper_recpsf_f16_aarch64 #define helper_recpsf_f32 helper_recpsf_f32_aarch64 #define helper_recpsf_f64 helper_recpsf_f64_aarch64 +#define helper_rsqrtsf_f16 helper_rsqrtsf_f16_aarch64 #define helper_rsqrtsf_f32 helper_rsqrtsf_f32_aarch64 #define helper_rsqrtsf_f64 helper_rsqrtsf_f64_aarch64 #define helper_neon_addlp_s8 helper_neon_addlp_s8_aarch64 #define helper_neon_addlp_u8 helper_neon_addlp_u8_aarch64 #define helper_neon_addlp_s16 helper_neon_addlp_s16_aarch64 #define helper_neon_addlp_u16 helper_neon_addlp_u16_aarch64 +#define helper_frecpx_f16 helper_frecpx_f16_aarch64 #define helper_frecpx_f32 helper_frecpx_f32_aarch64 #define helper_frecpx_f64 helper_frecpx_f64_aarch64 #define helper_fcvtx_f64_to_f32 helper_fcvtx_f64_to_f32_aarch64 #define helper_crc32_64 helper_crc32_64_aarch64 #define helper_crc32c_64 helper_crc32c_64_aarch64 -#define aarch64_cpu_do_interrupt aarch64_cpu_do_interrupt_aarch64 +#define helper_paired_cmpxchg64_le helper_paired_cmpxchg64_le_aarch64 +#define helper_paired_cmpxchg64_le_parallel helper_paired_cmpxchg64_le_parallel_aarch64 +#define helper_paired_cmpxchg64_be helper_paired_cmpxchg64_be_aarch64 +#define helper_paired_cmpxchg64_be_parallel helper_paired_cmpxchg64_be_parallel_aarch64 +#define helper_casp_le_parallel helper_casp_le_parallel_aarch64 +#define helper_casp_be_parallel helper_casp_be_parallel_aarch64 +#define helper_advsimd_addh helper_advsimd_addh_aarch64 +#define helper_advsimd_subh helper_advsimd_subh_aarch64 +#define helper_advsimd_mulh helper_advsimd_mulh_aarch64 +#define helper_advsimd_divh helper_advsimd_divh_aarch64 +#define helper_advsimd_minh helper_advsimd_minh_aarch64 +#define helper_advsimd_maxh helper_advsimd_maxh_aarch64 +#define helper_advsimd_minnumh helper_advsimd_minnumh_aarch64 +#define helper_advsimd_maxnumh helper_advsimd_maxnumh_aarch64 +#define helper_advsimd_add2h helper_advsimd_add2h_aarch64 +#define helper_advsimd_sub2h helper_advsimd_sub2h_aarch64 +#define helper_advsimd_mul2h helper_advsimd_mul2h_aarch64 +#define helper_advsimd_div2h helper_advsimd_div2h_aarch64 +#define helper_advsimd_min2h helper_advsimd_min2h_aarch64 +#define helper_advsimd_max2h helper_advsimd_max2h_aarch64 +#define helper_advsimd_minnum2h helper_advsimd_minnum2h_aarch64 +#define helper_advsimd_maxnum2h helper_advsimd_maxnum2h_aarch64 +#define helper_advsimd_mulxh helper_advsimd_mulxh_aarch64 +#define helper_advsimd_mulx2h helper_advsimd_mulx2h_aarch64 +#define helper_advsimd_muladdh helper_advsimd_muladdh_aarch64 +#define helper_advsimd_muladd2h helper_advsimd_muladd2h_aarch64 +#define helper_advsimd_ceq_f16 helper_advsimd_ceq_f16_aarch64 +#define helper_advsimd_cge_f16 helper_advsimd_cge_f16_aarch64 +#define helper_advsimd_cgt_f16 helper_advsimd_cgt_f16_aarch64 +#define helper_advsimd_acge_f16 helper_advsimd_acge_f16_aarch64 +#define helper_advsimd_acgt_f16 helper_advsimd_acgt_f16_aarch64 +#define helper_advsimd_rinth_exact helper_advsimd_rinth_exact_aarch64 +#define helper_advsimd_rinth helper_advsimd_rinth_aarch64 +#define helper_advsimd_f16tosinth helper_advsimd_f16tosinth_aarch64 +#define helper_advsimd_f16touinth helper_advsimd_f16touinth_aarch64 +#define helper_exception_return helper_exception_return_aarch64 +#define helper_sqrt_f16 helper_sqrt_f16_aarch64 +#define helper_dc_zva helper_dc_zva_aarch64 +#define read_raw_cp_reg read_raw_cp_reg_aarch64 +#define pmu_init pmu_init_aarch64 +#define pmu_op_start pmu_op_start_aarch64 +#define pmu_op_finish pmu_op_finish_aarch64 +#define pmu_pre_el_change pmu_pre_el_change_aarch64 +#define pmu_post_el_change pmu_post_el_change_aarch64 +#define arm_pmu_timer_cb arm_pmu_timer_cb_aarch64 +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_aarch64 +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_aarch64 +#define arm_gt_htimer_cb arm_gt_htimer_cb_aarch64 +#define arm_gt_stimer_cb arm_gt_stimer_cb_aarch64 +#define arm_gt_hvtimer_cb arm_gt_hvtimer_cb_aarch64 +#define arm_hcr_el2_eff arm_hcr_el2_eff_aarch64 +#define sve_exception_el sve_exception_el_aarch64 +#define sve_zcr_len_for_el sve_zcr_len_for_el_aarch64 +#define hw_watchpoint_update hw_watchpoint_update_aarch64 +#define hw_watchpoint_update_all hw_watchpoint_update_all_aarch64 +#define hw_breakpoint_update hw_breakpoint_update_aarch64 +#define hw_breakpoint_update_all hw_breakpoint_update_all_aarch64 +#define register_cp_regs_for_features register_cp_regs_for_features_aarch64 +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_aarch64 +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_aarch64 +#define modify_arm_cp_regs modify_arm_cp_regs_aarch64 +#define get_arm_cp_reginfo get_arm_cp_reginfo_aarch64 +#define arm_cp_write_ignore arm_cp_write_ignore_aarch64 +#define arm_cp_read_zero arm_cp_read_zero_aarch64 +#define arm_cp_reset_ignore arm_cp_reset_ignore_aarch64 +#define cpsr_read cpsr_read_aarch64 +#define cpsr_write cpsr_write_aarch64 +#define helper_sxtb16 helper_sxtb16_aarch64 +#define helper_uxtb16 helper_uxtb16_aarch64 +#define helper_sdiv helper_sdiv_aarch64 +#define helper_udiv helper_udiv_aarch64 +#define helper_rbit helper_rbit_aarch64 +#define arm_phys_excp_target_el arm_phys_excp_target_el_aarch64 +#define aarch64_sync_32_to_64 aarch64_sync_32_to_64_aarch64 +#define aarch64_sync_64_to_32 aarch64_sync_64_to_32_aarch64 +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_aarch64 +#define arm_sctlr arm_sctlr_aarch64 +#define arm_s1_regime_using_lpae_format arm_s1_regime_using_lpae_format_aarch64 +#define aa64_va_parameters aa64_va_parameters_aarch64 +#define v8m_security_lookup v8m_security_lookup_aarch64 +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_aarch64 +#define get_phys_addr get_phys_addr_aarch64 +#define arm_cpu_get_phys_page_attrs_debug arm_cpu_get_phys_page_attrs_debug_aarch64 +#define helper_qadd16 helper_qadd16_aarch64 +#define helper_qadd8 helper_qadd8_aarch64 +#define helper_qsub16 helper_qsub16_aarch64 +#define helper_qsub8 helper_qsub8_aarch64 +#define helper_qsubaddx helper_qsubaddx_aarch64 +#define helper_qaddsubx helper_qaddsubx_aarch64 +#define helper_uqadd16 helper_uqadd16_aarch64 +#define helper_uqadd8 helper_uqadd8_aarch64 +#define helper_uqsub16 helper_uqsub16_aarch64 +#define helper_uqsub8 helper_uqsub8_aarch64 +#define helper_uqsubaddx helper_uqsubaddx_aarch64 +#define helper_uqaddsubx helper_uqaddsubx_aarch64 +#define helper_sadd16 helper_sadd16_aarch64 +#define helper_sadd8 helper_sadd8_aarch64 +#define helper_ssub16 helper_ssub16_aarch64 +#define helper_ssub8 helper_ssub8_aarch64 +#define helper_ssubaddx helper_ssubaddx_aarch64 +#define helper_saddsubx helper_saddsubx_aarch64 +#define helper_uadd16 helper_uadd16_aarch64 +#define helper_uadd8 helper_uadd8_aarch64 +#define helper_usub16 helper_usub16_aarch64 +#define helper_usub8 helper_usub8_aarch64 +#define helper_usubaddx helper_usubaddx_aarch64 +#define helper_uaddsubx helper_uaddsubx_aarch64 +#define helper_shadd16 helper_shadd16_aarch64 +#define helper_shadd8 helper_shadd8_aarch64 +#define helper_shsub16 helper_shsub16_aarch64 +#define helper_shsub8 helper_shsub8_aarch64 +#define helper_shsubaddx helper_shsubaddx_aarch64 +#define helper_shaddsubx helper_shaddsubx_aarch64 +#define helper_uhadd16 helper_uhadd16_aarch64 +#define helper_uhadd8 helper_uhadd8_aarch64 +#define helper_uhsub16 helper_uhsub16_aarch64 +#define helper_uhsub8 helper_uhsub8_aarch64 +#define helper_uhsubaddx helper_uhsubaddx_aarch64 +#define helper_uhaddsubx helper_uhaddsubx_aarch64 +#define helper_usad8 helper_usad8_aarch64 +#define helper_sel_flags helper_sel_flags_aarch64 +#define helper_crc32 helper_crc32_aarch64 +#define helper_crc32c helper_crc32c_aarch64 +#define fp_exception_el fp_exception_el_aarch64 +#define arm_mmu_idx_to_el arm_mmu_idx_to_el_aarch64 +#define arm_mmu_idx_el arm_mmu_idx_el_aarch64 +#define arm_mmu_idx arm_mmu_idx_aarch64 +#define arm_stage1_mmu_idx arm_stage1_mmu_idx_aarch64 +#define arm_rebuild_hflags arm_rebuild_hflags_aarch64 +#define helper_rebuild_hflags_m32_newel helper_rebuild_hflags_m32_newel_aarch64 +#define helper_rebuild_hflags_m32 helper_rebuild_hflags_m32_aarch64 +#define helper_rebuild_hflags_a32_newel helper_rebuild_hflags_a32_newel_aarch64 +#define helper_rebuild_hflags_a32 helper_rebuild_hflags_a32_aarch64 +#define helper_rebuild_hflags_a64 helper_rebuild_hflags_a64_aarch64 +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_aarch64 +#define aarch64_sve_narrow_vq aarch64_sve_narrow_vq_aarch64 +#define aarch64_sve_change_el aarch64_sve_change_el_aarch64 +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_aarch64 +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_aarch64 +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_aarch64 +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_aarch64 +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_aarch64 +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_aarch64 +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_aarch64 +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_aarch64 +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_aarch64 +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_aarch64 +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_aarch64 +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_aarch64 +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_aarch64 +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_aarch64 +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_aarch64 +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_aarch64 +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_aarch64 +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_aarch64 +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_aarch64 +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_aarch64 +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_aarch64 +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_aarch64 +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_aarch64 +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_aarch64 +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_aarch64 +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_aarch64 +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_aarch64 +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_aarch64 +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_aarch64 +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_aarch64 +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_aarch64 +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_aarch64 +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_aarch64 +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_aarch64 +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_aarch64 +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_aarch64 +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_aarch64 +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_aarch64 +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_aarch64 +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_aarch64 +#define helper_iwmmxt_minub helper_iwmmxt_minub_aarch64 +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_aarch64 +#define helper_iwmmxt_minul helper_iwmmxt_minul_aarch64 +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_aarch64 +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_aarch64 +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_aarch64 +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_aarch64 +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_aarch64 +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_aarch64 +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_aarch64 +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_aarch64 +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_aarch64 +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_aarch64 +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_aarch64 +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_aarch64 +#define helper_iwmmxt_subub helper_iwmmxt_subub_aarch64 +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_aarch64 +#define helper_iwmmxt_subul helper_iwmmxt_subul_aarch64 +#define helper_iwmmxt_addub helper_iwmmxt_addub_aarch64 +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_aarch64 +#define helper_iwmmxt_addul helper_iwmmxt_addul_aarch64 +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_aarch64 +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_aarch64 +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_aarch64 +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_aarch64 +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_aarch64 +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_aarch64 +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_aarch64 +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_aarch64 +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_aarch64 +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_aarch64 +#define helper_iwmmxt_align helper_iwmmxt_align_aarch64 +#define helper_iwmmxt_insr helper_iwmmxt_insr_aarch64 +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_aarch64 +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_aarch64 +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_aarch64 +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_aarch64 +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_aarch64 +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_aarch64 +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_aarch64 +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_aarch64 +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_aarch64 +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_aarch64 +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_aarch64 +#define helper_iwmmxt_srll helper_iwmmxt_srll_aarch64 +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_aarch64 +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_aarch64 +#define helper_iwmmxt_slll helper_iwmmxt_slll_aarch64 +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_aarch64 +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_aarch64 +#define helper_iwmmxt_sral helper_iwmmxt_sral_aarch64 +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_aarch64 +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_aarch64 +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_aarch64 +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_aarch64 +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_aarch64 +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_aarch64 +#define helper_iwmmxt_packul helper_iwmmxt_packul_aarch64 +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_aarch64 +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_aarch64 +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_aarch64 +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_aarch64 +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_aarch64 +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_aarch64 +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_aarch64 +#define armv7m_nvic_set_pending armv7m_nvic_set_pending_aarch64 +#define helper_v7m_preserve_fp_state helper_v7m_preserve_fp_state_aarch64 +#define write_v7m_exception write_v7m_exception_aarch64 +#define helper_v7m_bxns helper_v7m_bxns_aarch64 +#define helper_v7m_blxns helper_v7m_blxns_aarch64 +#define armv7m_nvic_neg_prio_requested armv7m_nvic_neg_prio_requested_aarch64 +#define helper_v7m_vlstm helper_v7m_vlstm_aarch64 +#define helper_v7m_vlldm helper_v7m_vlldm_aarch64 +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_aarch64 +#define helper_v7m_mrs helper_v7m_mrs_aarch64 +#define helper_v7m_msr helper_v7m_msr_aarch64 +#define helper_v7m_tt helper_v7m_tt_aarch64 +#define arm_v7m_mmu_idx_all arm_v7m_mmu_idx_all_aarch64 +#define arm_v7m_mmu_idx_for_secstate_and_priv arm_v7m_mmu_idx_for_secstate_and_priv_aarch64 +#define arm_v7m_mmu_idx_for_secstate arm_v7m_mmu_idx_for_secstate_aarch64 +#define helper_neon_qadd_u8 helper_neon_qadd_u8_aarch64 +#define helper_neon_qadd_u16 helper_neon_qadd_u16_aarch64 +#define helper_neon_qadd_u32 helper_neon_qadd_u32_aarch64 +#define helper_neon_qadd_u64 helper_neon_qadd_u64_aarch64 +#define helper_neon_qadd_s8 helper_neon_qadd_s8_aarch64 +#define helper_neon_qadd_s16 helper_neon_qadd_s16_aarch64 +#define helper_neon_qadd_s32 helper_neon_qadd_s32_aarch64 +#define helper_neon_qadd_s64 helper_neon_qadd_s64_aarch64 +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_aarch64 +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_aarch64 +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_aarch64 +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_aarch64 +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_aarch64 +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_aarch64 +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_aarch64 +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_aarch64 +#define helper_neon_qsub_u8 helper_neon_qsub_u8_aarch64 +#define helper_neon_qsub_u16 helper_neon_qsub_u16_aarch64 +#define helper_neon_qsub_u32 helper_neon_qsub_u32_aarch64 +#define helper_neon_qsub_u64 helper_neon_qsub_u64_aarch64 +#define helper_neon_qsub_s8 helper_neon_qsub_s8_aarch64 +#define helper_neon_qsub_s16 helper_neon_qsub_s16_aarch64 +#define helper_neon_qsub_s32 helper_neon_qsub_s32_aarch64 +#define helper_neon_qsub_s64 helper_neon_qsub_s64_aarch64 +#define helper_neon_hadd_s8 helper_neon_hadd_s8_aarch64 +#define helper_neon_hadd_u8 helper_neon_hadd_u8_aarch64 +#define helper_neon_hadd_s16 helper_neon_hadd_s16_aarch64 +#define helper_neon_hadd_u16 helper_neon_hadd_u16_aarch64 +#define helper_neon_hadd_s32 helper_neon_hadd_s32_aarch64 +#define helper_neon_hadd_u32 helper_neon_hadd_u32_aarch64 +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_aarch64 +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_aarch64 +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_aarch64 +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_aarch64 +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_aarch64 +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_aarch64 +#define helper_neon_hsub_s8 helper_neon_hsub_s8_aarch64 +#define helper_neon_hsub_u8 helper_neon_hsub_u8_aarch64 +#define helper_neon_hsub_s16 helper_neon_hsub_s16_aarch64 +#define helper_neon_hsub_u16 helper_neon_hsub_u16_aarch64 +#define helper_neon_hsub_s32 helper_neon_hsub_s32_aarch64 +#define helper_neon_hsub_u32 helper_neon_hsub_u32_aarch64 +#define helper_neon_cgt_s8 helper_neon_cgt_s8_aarch64 +#define helper_neon_cgt_u8 helper_neon_cgt_u8_aarch64 +#define helper_neon_cgt_s16 helper_neon_cgt_s16_aarch64 +#define helper_neon_cgt_u16 helper_neon_cgt_u16_aarch64 +#define helper_neon_cgt_s32 helper_neon_cgt_s32_aarch64 +#define helper_neon_cgt_u32 helper_neon_cgt_u32_aarch64 +#define helper_neon_cge_s8 helper_neon_cge_s8_aarch64 +#define helper_neon_cge_u8 helper_neon_cge_u8_aarch64 +#define helper_neon_cge_s16 helper_neon_cge_s16_aarch64 +#define helper_neon_cge_u16 helper_neon_cge_u16_aarch64 +#define helper_neon_cge_s32 helper_neon_cge_s32_aarch64 +#define helper_neon_cge_u32 helper_neon_cge_u32_aarch64 +#define helper_neon_pmin_s8 helper_neon_pmin_s8_aarch64 +#define helper_neon_pmin_u8 helper_neon_pmin_u8_aarch64 +#define helper_neon_pmin_s16 helper_neon_pmin_s16_aarch64 +#define helper_neon_pmin_u16 helper_neon_pmin_u16_aarch64 +#define helper_neon_pmax_s8 helper_neon_pmax_s8_aarch64 +#define helper_neon_pmax_u8 helper_neon_pmax_u8_aarch64 +#define helper_neon_pmax_s16 helper_neon_pmax_s16_aarch64 +#define helper_neon_pmax_u16 helper_neon_pmax_u16_aarch64 +#define helper_neon_abd_s8 helper_neon_abd_s8_aarch64 +#define helper_neon_abd_u8 helper_neon_abd_u8_aarch64 +#define helper_neon_abd_s16 helper_neon_abd_s16_aarch64 +#define helper_neon_abd_u16 helper_neon_abd_u16_aarch64 +#define helper_neon_abd_s32 helper_neon_abd_s32_aarch64 +#define helper_neon_abd_u32 helper_neon_abd_u32_aarch64 +#define helper_neon_shl_u16 helper_neon_shl_u16_aarch64 +#define helper_neon_shl_s16 helper_neon_shl_s16_aarch64 +#define helper_neon_rshl_s8 helper_neon_rshl_s8_aarch64 +#define helper_neon_rshl_s16 helper_neon_rshl_s16_aarch64 +#define helper_neon_rshl_s32 helper_neon_rshl_s32_aarch64 +#define helper_neon_rshl_s64 helper_neon_rshl_s64_aarch64 +#define helper_neon_rshl_u8 helper_neon_rshl_u8_aarch64 +#define helper_neon_rshl_u16 helper_neon_rshl_u16_aarch64 +#define helper_neon_rshl_u32 helper_neon_rshl_u32_aarch64 +#define helper_neon_rshl_u64 helper_neon_rshl_u64_aarch64 +#define helper_neon_qshl_u8 helper_neon_qshl_u8_aarch64 +#define helper_neon_qshl_u16 helper_neon_qshl_u16_aarch64 +#define helper_neon_qshl_u32 helper_neon_qshl_u32_aarch64 +#define helper_neon_qshl_u64 helper_neon_qshl_u64_aarch64 +#define helper_neon_qshl_s8 helper_neon_qshl_s8_aarch64 +#define helper_neon_qshl_s16 helper_neon_qshl_s16_aarch64 +#define helper_neon_qshl_s32 helper_neon_qshl_s32_aarch64 +#define helper_neon_qshl_s64 helper_neon_qshl_s64_aarch64 +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_aarch64 +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_aarch64 +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_aarch64 +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_aarch64 +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_aarch64 +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_aarch64 +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_aarch64 +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_aarch64 +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_aarch64 +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_aarch64 +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_aarch64 +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_aarch64 +#define helper_neon_add_u8 helper_neon_add_u8_aarch64 +#define helper_neon_add_u16 helper_neon_add_u16_aarch64 +#define helper_neon_padd_u8 helper_neon_padd_u8_aarch64 +#define helper_neon_padd_u16 helper_neon_padd_u16_aarch64 +#define helper_neon_sub_u8 helper_neon_sub_u8_aarch64 +#define helper_neon_sub_u16 helper_neon_sub_u16_aarch64 +#define helper_neon_mul_u8 helper_neon_mul_u8_aarch64 +#define helper_neon_mul_u16 helper_neon_mul_u16_aarch64 +#define helper_neon_tst_u8 helper_neon_tst_u8_aarch64 +#define helper_neon_tst_u16 helper_neon_tst_u16_aarch64 +#define helper_neon_tst_u32 helper_neon_tst_u32_aarch64 +#define helper_neon_ceq_u8 helper_neon_ceq_u8_aarch64 +#define helper_neon_ceq_u16 helper_neon_ceq_u16_aarch64 +#define helper_neon_ceq_u32 helper_neon_ceq_u32_aarch64 +#define helper_neon_clz_u8 helper_neon_clz_u8_aarch64 +#define helper_neon_clz_u16 helper_neon_clz_u16_aarch64 +#define helper_neon_cls_s8 helper_neon_cls_s8_aarch64 +#define helper_neon_cls_s16 helper_neon_cls_s16_aarch64 +#define helper_neon_cls_s32 helper_neon_cls_s32_aarch64 +#define helper_neon_cnt_u8 helper_neon_cnt_u8_aarch64 +#define helper_neon_rbit_u8 helper_neon_rbit_u8_aarch64 +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_aarch64 +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_aarch64 +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_aarch64 +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_aarch64 +#define helper_neon_narrow_u8 helper_neon_narrow_u8_aarch64 +#define helper_neon_narrow_u16 helper_neon_narrow_u16_aarch64 +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_aarch64 +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_aarch64 +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_aarch64 +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_aarch64 +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_aarch64 +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_aarch64 +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_aarch64 +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_aarch64 +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_aarch64 +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_aarch64 +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_aarch64 +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_aarch64 +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_aarch64 +#define helper_neon_widen_u8 helper_neon_widen_u8_aarch64 +#define helper_neon_widen_s8 helper_neon_widen_s8_aarch64 +#define helper_neon_widen_u16 helper_neon_widen_u16_aarch64 +#define helper_neon_widen_s16 helper_neon_widen_s16_aarch64 +#define helper_neon_addl_u16 helper_neon_addl_u16_aarch64 +#define helper_neon_addl_u32 helper_neon_addl_u32_aarch64 +#define helper_neon_paddl_u16 helper_neon_paddl_u16_aarch64 +#define helper_neon_paddl_u32 helper_neon_paddl_u32_aarch64 +#define helper_neon_subl_u16 helper_neon_subl_u16_aarch64 +#define helper_neon_subl_u32 helper_neon_subl_u32_aarch64 +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_aarch64 +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_aarch64 +#define helper_neon_abdl_u16 helper_neon_abdl_u16_aarch64 +#define helper_neon_abdl_s16 helper_neon_abdl_s16_aarch64 +#define helper_neon_abdl_u32 helper_neon_abdl_u32_aarch64 +#define helper_neon_abdl_s32 helper_neon_abdl_s32_aarch64 +#define helper_neon_abdl_u64 helper_neon_abdl_u64_aarch64 +#define helper_neon_abdl_s64 helper_neon_abdl_s64_aarch64 +#define helper_neon_mull_u8 helper_neon_mull_u8_aarch64 +#define helper_neon_mull_s8 helper_neon_mull_s8_aarch64 +#define helper_neon_mull_u16 helper_neon_mull_u16_aarch64 +#define helper_neon_mull_s16 helper_neon_mull_s16_aarch64 +#define helper_neon_negl_u16 helper_neon_negl_u16_aarch64 +#define helper_neon_negl_u32 helper_neon_negl_u32_aarch64 +#define helper_neon_qabs_s8 helper_neon_qabs_s8_aarch64 +#define helper_neon_qneg_s8 helper_neon_qneg_s8_aarch64 +#define helper_neon_qabs_s16 helper_neon_qabs_s16_aarch64 +#define helper_neon_qneg_s16 helper_neon_qneg_s16_aarch64 +#define helper_neon_qabs_s32 helper_neon_qabs_s32_aarch64 +#define helper_neon_qneg_s32 helper_neon_qneg_s32_aarch64 +#define helper_neon_qabs_s64 helper_neon_qabs_s64_aarch64 +#define helper_neon_qneg_s64 helper_neon_qneg_s64_aarch64 +#define helper_neon_abd_f32 helper_neon_abd_f32_aarch64 +#define helper_neon_ceq_f32 helper_neon_ceq_f32_aarch64 +#define helper_neon_cge_f32 helper_neon_cge_f32_aarch64 +#define helper_neon_cgt_f32 helper_neon_cgt_f32_aarch64 +#define helper_neon_acge_f32 helper_neon_acge_f32_aarch64 +#define helper_neon_acgt_f32 helper_neon_acgt_f32_aarch64 +#define helper_neon_acge_f64 helper_neon_acge_f64_aarch64 +#define helper_neon_acgt_f64 helper_neon_acgt_f64_aarch64 +#define helper_neon_qunzip8 helper_neon_qunzip8_aarch64 +#define helper_neon_qunzip16 helper_neon_qunzip16_aarch64 +#define helper_neon_qunzip32 helper_neon_qunzip32_aarch64 +#define helper_neon_unzip8 helper_neon_unzip8_aarch64 +#define helper_neon_unzip16 helper_neon_unzip16_aarch64 +#define helper_neon_qzip8 helper_neon_qzip8_aarch64 +#define helper_neon_qzip16 helper_neon_qzip16_aarch64 +#define helper_neon_qzip32 helper_neon_qzip32_aarch64 +#define helper_neon_zip8 helper_neon_zip8_aarch64 +#define helper_neon_zip16 helper_neon_zip16_aarch64 +#define raise_exception raise_exception_aarch64 +#define raise_exception_ra raise_exception_ra_aarch64 +#define helper_neon_tbl helper_neon_tbl_aarch64 +#define helper_v8m_stackcheck helper_v8m_stackcheck_aarch64 +#define helper_add_setq helper_add_setq_aarch64 +#define helper_add_saturate helper_add_saturate_aarch64 +#define helper_sub_saturate helper_sub_saturate_aarch64 +#define helper_add_usaturate helper_add_usaturate_aarch64 +#define helper_sub_usaturate helper_sub_usaturate_aarch64 +#define helper_ssat helper_ssat_aarch64 +#define helper_ssat16 helper_ssat16_aarch64 +#define helper_usat helper_usat_aarch64 +#define helper_usat16 helper_usat16_aarch64 +#define helper_setend helper_setend_aarch64 +#define helper_wfi helper_wfi_aarch64 +#define helper_wfe helper_wfe_aarch64 +#define helper_yield helper_yield_aarch64 +#define helper_exception_internal helper_exception_internal_aarch64 +#define helper_exception_with_syndrome helper_exception_with_syndrome_aarch64 +#define helper_exception_bkpt_insn helper_exception_bkpt_insn_aarch64 +#define helper_cpsr_read helper_cpsr_read_aarch64 +#define helper_cpsr_write helper_cpsr_write_aarch64 +#define helper_cpsr_write_eret helper_cpsr_write_eret_aarch64 +#define helper_get_user_reg helper_get_user_reg_aarch64 +#define helper_set_user_reg helper_set_user_reg_aarch64 +#define helper_set_r13_banked helper_set_r13_banked_aarch64 +#define helper_get_r13_banked helper_get_r13_banked_aarch64 +#define helper_msr_banked helper_msr_banked_aarch64 +#define helper_mrs_banked helper_mrs_banked_aarch64 +#define helper_access_check_cp_reg helper_access_check_cp_reg_aarch64 +#define helper_set_cp_reg helper_set_cp_reg_aarch64 +#define helper_get_cp_reg helper_get_cp_reg_aarch64 +#define helper_set_cp_reg64 helper_set_cp_reg64_aarch64 +#define helper_get_cp_reg64 helper_get_cp_reg64_aarch64 +#define helper_pre_hvc helper_pre_hvc_aarch64 +#define helper_pre_smc helper_pre_smc_aarch64 +#define helper_shl_cc helper_shl_cc_aarch64 +#define helper_shr_cc helper_shr_cc_aarch64 +#define helper_sar_cc helper_sar_cc_aarch64 +#define helper_ror_cc helper_ror_cc_aarch64 +#define helper_pacia helper_pacia_aarch64 +#define helper_pacib helper_pacib_aarch64 +#define helper_pacda helper_pacda_aarch64 +#define helper_pacdb helper_pacdb_aarch64 +#define helper_pacga helper_pacga_aarch64 +#define helper_autia helper_autia_aarch64 +#define helper_autib helper_autib_aarch64 +#define helper_autda helper_autda_aarch64 +#define helper_autdb helper_autdb_aarch64 +#define helper_xpaci helper_xpaci_aarch64 +#define helper_xpacd helper_xpacd_aarch64 +#define arm_is_psci_call arm_is_psci_call_aarch64 +#define arm_handle_psci_call arm_handle_psci_call_aarch64 +#define helper_sve_predtest1 helper_sve_predtest1_aarch64 +#define helper_sve_predtest helper_sve_predtest_aarch64 +#define helper_sve_and_pppp helper_sve_and_pppp_aarch64 +#define helper_sve_bic_pppp helper_sve_bic_pppp_aarch64 +#define helper_sve_eor_pppp helper_sve_eor_pppp_aarch64 +#define helper_sve_sel_pppp helper_sve_sel_pppp_aarch64 +#define helper_sve_orr_pppp helper_sve_orr_pppp_aarch64 +#define helper_sve_orn_pppp helper_sve_orn_pppp_aarch64 +#define helper_sve_nor_pppp helper_sve_nor_pppp_aarch64 +#define helper_sve_nand_pppp helper_sve_nand_pppp_aarch64 +#define helper_sve_and_zpzz_b helper_sve_and_zpzz_b_aarch64 +#define helper_sve_and_zpzz_h helper_sve_and_zpzz_h_aarch64 +#define helper_sve_and_zpzz_s helper_sve_and_zpzz_s_aarch64 +#define helper_sve_and_zpzz_d helper_sve_and_zpzz_d_aarch64 +#define helper_sve_orr_zpzz_b helper_sve_orr_zpzz_b_aarch64 +#define helper_sve_orr_zpzz_h helper_sve_orr_zpzz_h_aarch64 +#define helper_sve_orr_zpzz_s helper_sve_orr_zpzz_s_aarch64 +#define helper_sve_orr_zpzz_d helper_sve_orr_zpzz_d_aarch64 +#define helper_sve_eor_zpzz_b helper_sve_eor_zpzz_b_aarch64 +#define helper_sve_eor_zpzz_h helper_sve_eor_zpzz_h_aarch64 +#define helper_sve_eor_zpzz_s helper_sve_eor_zpzz_s_aarch64 +#define helper_sve_eor_zpzz_d helper_sve_eor_zpzz_d_aarch64 +#define helper_sve_bic_zpzz_b helper_sve_bic_zpzz_b_aarch64 +#define helper_sve_bic_zpzz_h helper_sve_bic_zpzz_h_aarch64 +#define helper_sve_bic_zpzz_s helper_sve_bic_zpzz_s_aarch64 +#define helper_sve_bic_zpzz_d helper_sve_bic_zpzz_d_aarch64 +#define helper_sve_add_zpzz_b helper_sve_add_zpzz_b_aarch64 +#define helper_sve_add_zpzz_h helper_sve_add_zpzz_h_aarch64 +#define helper_sve_add_zpzz_s helper_sve_add_zpzz_s_aarch64 +#define helper_sve_add_zpzz_d helper_sve_add_zpzz_d_aarch64 +#define helper_sve_sub_zpzz_b helper_sve_sub_zpzz_b_aarch64 +#define helper_sve_sub_zpzz_h helper_sve_sub_zpzz_h_aarch64 +#define helper_sve_sub_zpzz_s helper_sve_sub_zpzz_s_aarch64 +#define helper_sve_sub_zpzz_d helper_sve_sub_zpzz_d_aarch64 +#define helper_sve_smax_zpzz_b helper_sve_smax_zpzz_b_aarch64 +#define helper_sve_smax_zpzz_h helper_sve_smax_zpzz_h_aarch64 +#define helper_sve_smax_zpzz_s helper_sve_smax_zpzz_s_aarch64 +#define helper_sve_smax_zpzz_d helper_sve_smax_zpzz_d_aarch64 +#define helper_sve_umax_zpzz_b helper_sve_umax_zpzz_b_aarch64 +#define helper_sve_umax_zpzz_h helper_sve_umax_zpzz_h_aarch64 +#define helper_sve_umax_zpzz_s helper_sve_umax_zpzz_s_aarch64 +#define helper_sve_umax_zpzz_d helper_sve_umax_zpzz_d_aarch64 +#define helper_sve_smin_zpzz_b helper_sve_smin_zpzz_b_aarch64 +#define helper_sve_smin_zpzz_h helper_sve_smin_zpzz_h_aarch64 +#define helper_sve_smin_zpzz_s helper_sve_smin_zpzz_s_aarch64 +#define helper_sve_smin_zpzz_d helper_sve_smin_zpzz_d_aarch64 +#define helper_sve_umin_zpzz_b helper_sve_umin_zpzz_b_aarch64 +#define helper_sve_umin_zpzz_h helper_sve_umin_zpzz_h_aarch64 +#define helper_sve_umin_zpzz_s helper_sve_umin_zpzz_s_aarch64 +#define helper_sve_umin_zpzz_d helper_sve_umin_zpzz_d_aarch64 +#define helper_sve_sabd_zpzz_b helper_sve_sabd_zpzz_b_aarch64 +#define helper_sve_sabd_zpzz_h helper_sve_sabd_zpzz_h_aarch64 +#define helper_sve_sabd_zpzz_s helper_sve_sabd_zpzz_s_aarch64 +#define helper_sve_sabd_zpzz_d helper_sve_sabd_zpzz_d_aarch64 +#define helper_sve_uabd_zpzz_b helper_sve_uabd_zpzz_b_aarch64 +#define helper_sve_uabd_zpzz_h helper_sve_uabd_zpzz_h_aarch64 +#define helper_sve_uabd_zpzz_s helper_sve_uabd_zpzz_s_aarch64 +#define helper_sve_uabd_zpzz_d helper_sve_uabd_zpzz_d_aarch64 +#define helper_sve_mul_zpzz_b helper_sve_mul_zpzz_b_aarch64 +#define helper_sve_mul_zpzz_h helper_sve_mul_zpzz_h_aarch64 +#define helper_sve_mul_zpzz_s helper_sve_mul_zpzz_s_aarch64 +#define helper_sve_mul_zpzz_d helper_sve_mul_zpzz_d_aarch64 +#define helper_sve_smulh_zpzz_b helper_sve_smulh_zpzz_b_aarch64 +#define helper_sve_smulh_zpzz_h helper_sve_smulh_zpzz_h_aarch64 +#define helper_sve_smulh_zpzz_s helper_sve_smulh_zpzz_s_aarch64 +#define helper_sve_smulh_zpzz_d helper_sve_smulh_zpzz_d_aarch64 +#define helper_sve_umulh_zpzz_b helper_sve_umulh_zpzz_b_aarch64 +#define helper_sve_umulh_zpzz_h helper_sve_umulh_zpzz_h_aarch64 +#define helper_sve_umulh_zpzz_s helper_sve_umulh_zpzz_s_aarch64 +#define helper_sve_umulh_zpzz_d helper_sve_umulh_zpzz_d_aarch64 +#define helper_sve_sdiv_zpzz_s helper_sve_sdiv_zpzz_s_aarch64 +#define helper_sve_sdiv_zpzz_d helper_sve_sdiv_zpzz_d_aarch64 +#define helper_sve_udiv_zpzz_s helper_sve_udiv_zpzz_s_aarch64 +#define helper_sve_udiv_zpzz_d helper_sve_udiv_zpzz_d_aarch64 +#define helper_sve_asr_zpzz_b helper_sve_asr_zpzz_b_aarch64 +#define helper_sve_lsr_zpzz_b helper_sve_lsr_zpzz_b_aarch64 +#define helper_sve_lsl_zpzz_b helper_sve_lsl_zpzz_b_aarch64 +#define helper_sve_asr_zpzz_h helper_sve_asr_zpzz_h_aarch64 +#define helper_sve_lsr_zpzz_h helper_sve_lsr_zpzz_h_aarch64 +#define helper_sve_lsl_zpzz_h helper_sve_lsl_zpzz_h_aarch64 +#define helper_sve_asr_zpzz_s helper_sve_asr_zpzz_s_aarch64 +#define helper_sve_lsr_zpzz_s helper_sve_lsr_zpzz_s_aarch64 +#define helper_sve_lsl_zpzz_s helper_sve_lsl_zpzz_s_aarch64 +#define helper_sve_asr_zpzz_d helper_sve_asr_zpzz_d_aarch64 +#define helper_sve_lsr_zpzz_d helper_sve_lsr_zpzz_d_aarch64 +#define helper_sve_lsl_zpzz_d helper_sve_lsl_zpzz_d_aarch64 +#define helper_sve_asr_zpzw_b helper_sve_asr_zpzw_b_aarch64 +#define helper_sve_lsr_zpzw_b helper_sve_lsr_zpzw_b_aarch64 +#define helper_sve_lsl_zpzw_b helper_sve_lsl_zpzw_b_aarch64 +#define helper_sve_asr_zpzw_h helper_sve_asr_zpzw_h_aarch64 +#define helper_sve_lsr_zpzw_h helper_sve_lsr_zpzw_h_aarch64 +#define helper_sve_lsl_zpzw_h helper_sve_lsl_zpzw_h_aarch64 +#define helper_sve_asr_zpzw_s helper_sve_asr_zpzw_s_aarch64 +#define helper_sve_lsr_zpzw_s helper_sve_lsr_zpzw_s_aarch64 +#define helper_sve_lsl_zpzw_s helper_sve_lsl_zpzw_s_aarch64 +#define helper_sve_cls_b helper_sve_cls_b_aarch64 +#define helper_sve_cls_h helper_sve_cls_h_aarch64 +#define helper_sve_cls_s helper_sve_cls_s_aarch64 +#define helper_sve_cls_d helper_sve_cls_d_aarch64 +#define helper_sve_clz_b helper_sve_clz_b_aarch64 +#define helper_sve_clz_h helper_sve_clz_h_aarch64 +#define helper_sve_clz_s helper_sve_clz_s_aarch64 +#define helper_sve_clz_d helper_sve_clz_d_aarch64 +#define helper_sve_cnt_zpz_b helper_sve_cnt_zpz_b_aarch64 +#define helper_sve_cnt_zpz_h helper_sve_cnt_zpz_h_aarch64 +#define helper_sve_cnt_zpz_s helper_sve_cnt_zpz_s_aarch64 +#define helper_sve_cnt_zpz_d helper_sve_cnt_zpz_d_aarch64 +#define helper_sve_cnot_b helper_sve_cnot_b_aarch64 +#define helper_sve_cnot_h helper_sve_cnot_h_aarch64 +#define helper_sve_cnot_s helper_sve_cnot_s_aarch64 +#define helper_sve_cnot_d helper_sve_cnot_d_aarch64 +#define helper_sve_fabs_h helper_sve_fabs_h_aarch64 +#define helper_sve_fabs_s helper_sve_fabs_s_aarch64 +#define helper_sve_fabs_d helper_sve_fabs_d_aarch64 +#define helper_sve_fneg_h helper_sve_fneg_h_aarch64 +#define helper_sve_fneg_s helper_sve_fneg_s_aarch64 +#define helper_sve_fneg_d helper_sve_fneg_d_aarch64 +#define helper_sve_not_zpz_b helper_sve_not_zpz_b_aarch64 +#define helper_sve_not_zpz_h helper_sve_not_zpz_h_aarch64 +#define helper_sve_not_zpz_s helper_sve_not_zpz_s_aarch64 +#define helper_sve_not_zpz_d helper_sve_not_zpz_d_aarch64 +#define helper_sve_sxtb_h helper_sve_sxtb_h_aarch64 +#define helper_sve_sxtb_s helper_sve_sxtb_s_aarch64 +#define helper_sve_sxth_s helper_sve_sxth_s_aarch64 +#define helper_sve_sxtb_d helper_sve_sxtb_d_aarch64 +#define helper_sve_sxth_d helper_sve_sxth_d_aarch64 +#define helper_sve_sxtw_d helper_sve_sxtw_d_aarch64 +#define helper_sve_uxtb_h helper_sve_uxtb_h_aarch64 +#define helper_sve_uxtb_s helper_sve_uxtb_s_aarch64 +#define helper_sve_uxth_s helper_sve_uxth_s_aarch64 +#define helper_sve_uxtb_d helper_sve_uxtb_d_aarch64 +#define helper_sve_uxth_d helper_sve_uxth_d_aarch64 +#define helper_sve_uxtw_d helper_sve_uxtw_d_aarch64 +#define helper_sve_abs_b helper_sve_abs_b_aarch64 +#define helper_sve_abs_h helper_sve_abs_h_aarch64 +#define helper_sve_abs_s helper_sve_abs_s_aarch64 +#define helper_sve_abs_d helper_sve_abs_d_aarch64 +#define helper_sve_neg_b helper_sve_neg_b_aarch64 +#define helper_sve_neg_h helper_sve_neg_h_aarch64 +#define helper_sve_neg_s helper_sve_neg_s_aarch64 +#define helper_sve_neg_d helper_sve_neg_d_aarch64 +#define helper_sve_revb_h helper_sve_revb_h_aarch64 +#define helper_sve_revb_s helper_sve_revb_s_aarch64 +#define helper_sve_revb_d helper_sve_revb_d_aarch64 +#define helper_sve_revh_s helper_sve_revh_s_aarch64 +#define helper_sve_revh_d helper_sve_revh_d_aarch64 +#define helper_sve_revw_d helper_sve_revw_d_aarch64 +#define helper_sve_rbit_b helper_sve_rbit_b_aarch64 +#define helper_sve_rbit_h helper_sve_rbit_h_aarch64 +#define helper_sve_rbit_s helper_sve_rbit_s_aarch64 +#define helper_sve_rbit_d helper_sve_rbit_d_aarch64 +#define helper_sve_asr_zzw_b helper_sve_asr_zzw_b_aarch64 +#define helper_sve_lsr_zzw_b helper_sve_lsr_zzw_b_aarch64 +#define helper_sve_lsl_zzw_b helper_sve_lsl_zzw_b_aarch64 +#define helper_sve_asr_zzw_h helper_sve_asr_zzw_h_aarch64 +#define helper_sve_lsr_zzw_h helper_sve_lsr_zzw_h_aarch64 +#define helper_sve_lsl_zzw_h helper_sve_lsl_zzw_h_aarch64 +#define helper_sve_asr_zzw_s helper_sve_asr_zzw_s_aarch64 +#define helper_sve_lsr_zzw_s helper_sve_lsr_zzw_s_aarch64 +#define helper_sve_lsl_zzw_s helper_sve_lsl_zzw_s_aarch64 +#define helper_sve_orv_b helper_sve_orv_b_aarch64 +#define helper_sve_orv_h helper_sve_orv_h_aarch64 +#define helper_sve_orv_s helper_sve_orv_s_aarch64 +#define helper_sve_orv_d helper_sve_orv_d_aarch64 +#define helper_sve_eorv_b helper_sve_eorv_b_aarch64 +#define helper_sve_eorv_h helper_sve_eorv_h_aarch64 +#define helper_sve_eorv_s helper_sve_eorv_s_aarch64 +#define helper_sve_eorv_d helper_sve_eorv_d_aarch64 +#define helper_sve_andv_b helper_sve_andv_b_aarch64 +#define helper_sve_andv_h helper_sve_andv_h_aarch64 +#define helper_sve_andv_s helper_sve_andv_s_aarch64 +#define helper_sve_andv_d helper_sve_andv_d_aarch64 +#define helper_sve_saddv_b helper_sve_saddv_b_aarch64 +#define helper_sve_saddv_h helper_sve_saddv_h_aarch64 +#define helper_sve_saddv_s helper_sve_saddv_s_aarch64 +#define helper_sve_uaddv_b helper_sve_uaddv_b_aarch64 +#define helper_sve_uaddv_h helper_sve_uaddv_h_aarch64 +#define helper_sve_uaddv_s helper_sve_uaddv_s_aarch64 +#define helper_sve_uaddv_d helper_sve_uaddv_d_aarch64 +#define helper_sve_smaxv_b helper_sve_smaxv_b_aarch64 +#define helper_sve_smaxv_h helper_sve_smaxv_h_aarch64 +#define helper_sve_smaxv_s helper_sve_smaxv_s_aarch64 +#define helper_sve_smaxv_d helper_sve_smaxv_d_aarch64 +#define helper_sve_umaxv_b helper_sve_umaxv_b_aarch64 +#define helper_sve_umaxv_h helper_sve_umaxv_h_aarch64 +#define helper_sve_umaxv_s helper_sve_umaxv_s_aarch64 +#define helper_sve_umaxv_d helper_sve_umaxv_d_aarch64 +#define helper_sve_sminv_b helper_sve_sminv_b_aarch64 +#define helper_sve_sminv_h helper_sve_sminv_h_aarch64 +#define helper_sve_sminv_s helper_sve_sminv_s_aarch64 +#define helper_sve_sminv_d helper_sve_sminv_d_aarch64 +#define helper_sve_uminv_b helper_sve_uminv_b_aarch64 +#define helper_sve_uminv_h helper_sve_uminv_h_aarch64 +#define helper_sve_uminv_s helper_sve_uminv_s_aarch64 +#define helper_sve_uminv_d helper_sve_uminv_d_aarch64 +#define helper_sve_subri_b helper_sve_subri_b_aarch64 +#define helper_sve_subri_h helper_sve_subri_h_aarch64 +#define helper_sve_subri_s helper_sve_subri_s_aarch64 +#define helper_sve_subri_d helper_sve_subri_d_aarch64 +#define helper_sve_smaxi_b helper_sve_smaxi_b_aarch64 +#define helper_sve_smaxi_h helper_sve_smaxi_h_aarch64 +#define helper_sve_smaxi_s helper_sve_smaxi_s_aarch64 +#define helper_sve_smaxi_d helper_sve_smaxi_d_aarch64 +#define helper_sve_smini_b helper_sve_smini_b_aarch64 +#define helper_sve_smini_h helper_sve_smini_h_aarch64 +#define helper_sve_smini_s helper_sve_smini_s_aarch64 +#define helper_sve_smini_d helper_sve_smini_d_aarch64 +#define helper_sve_umaxi_b helper_sve_umaxi_b_aarch64 +#define helper_sve_umaxi_h helper_sve_umaxi_h_aarch64 +#define helper_sve_umaxi_s helper_sve_umaxi_s_aarch64 +#define helper_sve_umaxi_d helper_sve_umaxi_d_aarch64 +#define helper_sve_umini_b helper_sve_umini_b_aarch64 +#define helper_sve_umini_h helper_sve_umini_h_aarch64 +#define helper_sve_umini_s helper_sve_umini_s_aarch64 +#define helper_sve_umini_d helper_sve_umini_d_aarch64 +#define helper_sve_pfirst helper_sve_pfirst_aarch64 +#define helper_sve_pnext helper_sve_pnext_aarch64 +#define helper_sve_clr_b helper_sve_clr_b_aarch64 +#define helper_sve_clr_h helper_sve_clr_h_aarch64 +#define helper_sve_clr_s helper_sve_clr_s_aarch64 +#define helper_sve_clr_d helper_sve_clr_d_aarch64 +#define helper_sve_movz_b helper_sve_movz_b_aarch64 +#define helper_sve_movz_h helper_sve_movz_h_aarch64 +#define helper_sve_movz_s helper_sve_movz_s_aarch64 +#define helper_sve_movz_d helper_sve_movz_d_aarch64 +#define helper_sve_asr_zpzi_b helper_sve_asr_zpzi_b_aarch64 +#define helper_sve_asr_zpzi_h helper_sve_asr_zpzi_h_aarch64 +#define helper_sve_asr_zpzi_s helper_sve_asr_zpzi_s_aarch64 +#define helper_sve_asr_zpzi_d helper_sve_asr_zpzi_d_aarch64 +#define helper_sve_lsr_zpzi_b helper_sve_lsr_zpzi_b_aarch64 +#define helper_sve_lsr_zpzi_h helper_sve_lsr_zpzi_h_aarch64 +#define helper_sve_lsr_zpzi_s helper_sve_lsr_zpzi_s_aarch64 +#define helper_sve_lsr_zpzi_d helper_sve_lsr_zpzi_d_aarch64 +#define helper_sve_lsl_zpzi_b helper_sve_lsl_zpzi_b_aarch64 +#define helper_sve_lsl_zpzi_h helper_sve_lsl_zpzi_h_aarch64 +#define helper_sve_lsl_zpzi_s helper_sve_lsl_zpzi_s_aarch64 +#define helper_sve_lsl_zpzi_d helper_sve_lsl_zpzi_d_aarch64 +#define helper_sve_asrd_b helper_sve_asrd_b_aarch64 +#define helper_sve_asrd_h helper_sve_asrd_h_aarch64 +#define helper_sve_asrd_s helper_sve_asrd_s_aarch64 +#define helper_sve_asrd_d helper_sve_asrd_d_aarch64 +#define helper_sve_mla_b helper_sve_mla_b_aarch64 +#define helper_sve_mls_b helper_sve_mls_b_aarch64 +#define helper_sve_mla_h helper_sve_mla_h_aarch64 +#define helper_sve_mls_h helper_sve_mls_h_aarch64 +#define helper_sve_mla_s helper_sve_mla_s_aarch64 +#define helper_sve_mls_s helper_sve_mls_s_aarch64 +#define helper_sve_mla_d helper_sve_mla_d_aarch64 +#define helper_sve_mls_d helper_sve_mls_d_aarch64 +#define helper_sve_index_b helper_sve_index_b_aarch64 +#define helper_sve_index_h helper_sve_index_h_aarch64 +#define helper_sve_index_s helper_sve_index_s_aarch64 +#define helper_sve_index_d helper_sve_index_d_aarch64 +#define helper_sve_adr_p32 helper_sve_adr_p32_aarch64 +#define helper_sve_adr_p64 helper_sve_adr_p64_aarch64 +#define helper_sve_adr_s32 helper_sve_adr_s32_aarch64 +#define helper_sve_adr_u32 helper_sve_adr_u32_aarch64 +#define helper_sve_fexpa_h helper_sve_fexpa_h_aarch64 +#define helper_sve_fexpa_s helper_sve_fexpa_s_aarch64 +#define helper_sve_fexpa_d helper_sve_fexpa_d_aarch64 +#define helper_sve_ftssel_h helper_sve_ftssel_h_aarch64 +#define helper_sve_ftssel_s helper_sve_ftssel_s_aarch64 +#define helper_sve_ftssel_d helper_sve_ftssel_d_aarch64 +#define helper_sve_sqaddi_b helper_sve_sqaddi_b_aarch64 +#define helper_sve_sqaddi_h helper_sve_sqaddi_h_aarch64 +#define helper_sve_sqaddi_s helper_sve_sqaddi_s_aarch64 +#define helper_sve_sqaddi_d helper_sve_sqaddi_d_aarch64 +#define helper_sve_uqaddi_b helper_sve_uqaddi_b_aarch64 +#define helper_sve_uqaddi_h helper_sve_uqaddi_h_aarch64 +#define helper_sve_uqaddi_s helper_sve_uqaddi_s_aarch64 +#define helper_sve_uqaddi_d helper_sve_uqaddi_d_aarch64 +#define helper_sve_uqsubi_d helper_sve_uqsubi_d_aarch64 +#define helper_sve_cpy_m_b helper_sve_cpy_m_b_aarch64 +#define helper_sve_cpy_m_h helper_sve_cpy_m_h_aarch64 +#define helper_sve_cpy_m_s helper_sve_cpy_m_s_aarch64 +#define helper_sve_cpy_m_d helper_sve_cpy_m_d_aarch64 +#define helper_sve_cpy_z_b helper_sve_cpy_z_b_aarch64 +#define helper_sve_cpy_z_h helper_sve_cpy_z_h_aarch64 +#define helper_sve_cpy_z_s helper_sve_cpy_z_s_aarch64 +#define helper_sve_cpy_z_d helper_sve_cpy_z_d_aarch64 +#define helper_sve_ext helper_sve_ext_aarch64 +#define helper_sve_insr_b helper_sve_insr_b_aarch64 +#define helper_sve_insr_h helper_sve_insr_h_aarch64 +#define helper_sve_insr_s helper_sve_insr_s_aarch64 +#define helper_sve_insr_d helper_sve_insr_d_aarch64 +#define helper_sve_rev_b helper_sve_rev_b_aarch64 +#define helper_sve_rev_h helper_sve_rev_h_aarch64 +#define helper_sve_rev_s helper_sve_rev_s_aarch64 +#define helper_sve_rev_d helper_sve_rev_d_aarch64 +#define helper_sve_tbl_b helper_sve_tbl_b_aarch64 +#define helper_sve_tbl_h helper_sve_tbl_h_aarch64 +#define helper_sve_tbl_s helper_sve_tbl_s_aarch64 +#define helper_sve_tbl_d helper_sve_tbl_d_aarch64 +#define helper_sve_sunpk_h helper_sve_sunpk_h_aarch64 +#define helper_sve_sunpk_s helper_sve_sunpk_s_aarch64 +#define helper_sve_sunpk_d helper_sve_sunpk_d_aarch64 +#define helper_sve_uunpk_h helper_sve_uunpk_h_aarch64 +#define helper_sve_uunpk_s helper_sve_uunpk_s_aarch64 +#define helper_sve_uunpk_d helper_sve_uunpk_d_aarch64 +#define helper_sve_zip_p helper_sve_zip_p_aarch64 +#define helper_sve_uzp_p helper_sve_uzp_p_aarch64 +#define helper_sve_trn_p helper_sve_trn_p_aarch64 +#define helper_sve_rev_p helper_sve_rev_p_aarch64 +#define helper_sve_punpk_p helper_sve_punpk_p_aarch64 +#define helper_sve_zip_b helper_sve_zip_b_aarch64 +#define helper_sve_zip_h helper_sve_zip_h_aarch64 +#define helper_sve_zip_s helper_sve_zip_s_aarch64 +#define helper_sve_zip_d helper_sve_zip_d_aarch64 +#define helper_sve_uzp_b helper_sve_uzp_b_aarch64 +#define helper_sve_uzp_h helper_sve_uzp_h_aarch64 +#define helper_sve_uzp_s helper_sve_uzp_s_aarch64 +#define helper_sve_uzp_d helper_sve_uzp_d_aarch64 +#define helper_sve_trn_b helper_sve_trn_b_aarch64 +#define helper_sve_trn_h helper_sve_trn_h_aarch64 +#define helper_sve_trn_s helper_sve_trn_s_aarch64 +#define helper_sve_trn_d helper_sve_trn_d_aarch64 +#define helper_sve_compact_s helper_sve_compact_s_aarch64 +#define helper_sve_compact_d helper_sve_compact_d_aarch64 +#define helper_sve_last_active_element helper_sve_last_active_element_aarch64 +#define helper_sve_splice helper_sve_splice_aarch64 +#define helper_sve_sel_zpzz_b helper_sve_sel_zpzz_b_aarch64 +#define helper_sve_sel_zpzz_h helper_sve_sel_zpzz_h_aarch64 +#define helper_sve_sel_zpzz_s helper_sve_sel_zpzz_s_aarch64 +#define helper_sve_sel_zpzz_d helper_sve_sel_zpzz_d_aarch64 +#define helper_sve_cmpeq_ppzz_b helper_sve_cmpeq_ppzz_b_aarch64 +#define helper_sve_cmpeq_ppzz_h helper_sve_cmpeq_ppzz_h_aarch64 +#define helper_sve_cmpeq_ppzz_s helper_sve_cmpeq_ppzz_s_aarch64 +#define helper_sve_cmpeq_ppzz_d helper_sve_cmpeq_ppzz_d_aarch64 +#define helper_sve_cmpne_ppzz_b helper_sve_cmpne_ppzz_b_aarch64 +#define helper_sve_cmpne_ppzz_h helper_sve_cmpne_ppzz_h_aarch64 +#define helper_sve_cmpne_ppzz_s helper_sve_cmpne_ppzz_s_aarch64 +#define helper_sve_cmpne_ppzz_d helper_sve_cmpne_ppzz_d_aarch64 +#define helper_sve_cmpgt_ppzz_b helper_sve_cmpgt_ppzz_b_aarch64 +#define helper_sve_cmpgt_ppzz_h helper_sve_cmpgt_ppzz_h_aarch64 +#define helper_sve_cmpgt_ppzz_s helper_sve_cmpgt_ppzz_s_aarch64 +#define helper_sve_cmpgt_ppzz_d helper_sve_cmpgt_ppzz_d_aarch64 +#define helper_sve_cmpge_ppzz_b helper_sve_cmpge_ppzz_b_aarch64 +#define helper_sve_cmpge_ppzz_h helper_sve_cmpge_ppzz_h_aarch64 +#define helper_sve_cmpge_ppzz_s helper_sve_cmpge_ppzz_s_aarch64 +#define helper_sve_cmpge_ppzz_d helper_sve_cmpge_ppzz_d_aarch64 +#define helper_sve_cmphi_ppzz_b helper_sve_cmphi_ppzz_b_aarch64 +#define helper_sve_cmphi_ppzz_h helper_sve_cmphi_ppzz_h_aarch64 +#define helper_sve_cmphi_ppzz_s helper_sve_cmphi_ppzz_s_aarch64 +#define helper_sve_cmphi_ppzz_d helper_sve_cmphi_ppzz_d_aarch64 +#define helper_sve_cmphs_ppzz_b helper_sve_cmphs_ppzz_b_aarch64 +#define helper_sve_cmphs_ppzz_h helper_sve_cmphs_ppzz_h_aarch64 +#define helper_sve_cmphs_ppzz_s helper_sve_cmphs_ppzz_s_aarch64 +#define helper_sve_cmphs_ppzz_d helper_sve_cmphs_ppzz_d_aarch64 +#define helper_sve_cmpeq_ppzw_b helper_sve_cmpeq_ppzw_b_aarch64 +#define helper_sve_cmpeq_ppzw_h helper_sve_cmpeq_ppzw_h_aarch64 +#define helper_sve_cmpeq_ppzw_s helper_sve_cmpeq_ppzw_s_aarch64 +#define helper_sve_cmpne_ppzw_b helper_sve_cmpne_ppzw_b_aarch64 +#define helper_sve_cmpne_ppzw_h helper_sve_cmpne_ppzw_h_aarch64 +#define helper_sve_cmpne_ppzw_s helper_sve_cmpne_ppzw_s_aarch64 +#define helper_sve_cmpgt_ppzw_b helper_sve_cmpgt_ppzw_b_aarch64 +#define helper_sve_cmpgt_ppzw_h helper_sve_cmpgt_ppzw_h_aarch64 +#define helper_sve_cmpgt_ppzw_s helper_sve_cmpgt_ppzw_s_aarch64 +#define helper_sve_cmpge_ppzw_b helper_sve_cmpge_ppzw_b_aarch64 +#define helper_sve_cmpge_ppzw_h helper_sve_cmpge_ppzw_h_aarch64 +#define helper_sve_cmpge_ppzw_s helper_sve_cmpge_ppzw_s_aarch64 +#define helper_sve_cmphi_ppzw_b helper_sve_cmphi_ppzw_b_aarch64 +#define helper_sve_cmphi_ppzw_h helper_sve_cmphi_ppzw_h_aarch64 +#define helper_sve_cmphi_ppzw_s helper_sve_cmphi_ppzw_s_aarch64 +#define helper_sve_cmphs_ppzw_b helper_sve_cmphs_ppzw_b_aarch64 +#define helper_sve_cmphs_ppzw_h helper_sve_cmphs_ppzw_h_aarch64 +#define helper_sve_cmphs_ppzw_s helper_sve_cmphs_ppzw_s_aarch64 +#define helper_sve_cmplt_ppzw_b helper_sve_cmplt_ppzw_b_aarch64 +#define helper_sve_cmplt_ppzw_h helper_sve_cmplt_ppzw_h_aarch64 +#define helper_sve_cmplt_ppzw_s helper_sve_cmplt_ppzw_s_aarch64 +#define helper_sve_cmple_ppzw_b helper_sve_cmple_ppzw_b_aarch64 +#define helper_sve_cmple_ppzw_h helper_sve_cmple_ppzw_h_aarch64 +#define helper_sve_cmple_ppzw_s helper_sve_cmple_ppzw_s_aarch64 +#define helper_sve_cmplo_ppzw_b helper_sve_cmplo_ppzw_b_aarch64 +#define helper_sve_cmplo_ppzw_h helper_sve_cmplo_ppzw_h_aarch64 +#define helper_sve_cmplo_ppzw_s helper_sve_cmplo_ppzw_s_aarch64 +#define helper_sve_cmpls_ppzw_b helper_sve_cmpls_ppzw_b_aarch64 +#define helper_sve_cmpls_ppzw_h helper_sve_cmpls_ppzw_h_aarch64 +#define helper_sve_cmpls_ppzw_s helper_sve_cmpls_ppzw_s_aarch64 +#define helper_sve_cmpeq_ppzi_b helper_sve_cmpeq_ppzi_b_aarch64 +#define helper_sve_cmpeq_ppzi_h helper_sve_cmpeq_ppzi_h_aarch64 +#define helper_sve_cmpeq_ppzi_s helper_sve_cmpeq_ppzi_s_aarch64 +#define helper_sve_cmpeq_ppzi_d helper_sve_cmpeq_ppzi_d_aarch64 +#define helper_sve_cmpne_ppzi_b helper_sve_cmpne_ppzi_b_aarch64 +#define helper_sve_cmpne_ppzi_h helper_sve_cmpne_ppzi_h_aarch64 +#define helper_sve_cmpne_ppzi_s helper_sve_cmpne_ppzi_s_aarch64 +#define helper_sve_cmpne_ppzi_d helper_sve_cmpne_ppzi_d_aarch64 +#define helper_sve_cmpgt_ppzi_b helper_sve_cmpgt_ppzi_b_aarch64 +#define helper_sve_cmpgt_ppzi_h helper_sve_cmpgt_ppzi_h_aarch64 +#define helper_sve_cmpgt_ppzi_s helper_sve_cmpgt_ppzi_s_aarch64 +#define helper_sve_cmpgt_ppzi_d helper_sve_cmpgt_ppzi_d_aarch64 +#define helper_sve_cmpge_ppzi_b helper_sve_cmpge_ppzi_b_aarch64 +#define helper_sve_cmpge_ppzi_h helper_sve_cmpge_ppzi_h_aarch64 +#define helper_sve_cmpge_ppzi_s helper_sve_cmpge_ppzi_s_aarch64 +#define helper_sve_cmpge_ppzi_d helper_sve_cmpge_ppzi_d_aarch64 +#define helper_sve_cmphi_ppzi_b helper_sve_cmphi_ppzi_b_aarch64 +#define helper_sve_cmphi_ppzi_h helper_sve_cmphi_ppzi_h_aarch64 +#define helper_sve_cmphi_ppzi_s helper_sve_cmphi_ppzi_s_aarch64 +#define helper_sve_cmphi_ppzi_d helper_sve_cmphi_ppzi_d_aarch64 +#define helper_sve_cmphs_ppzi_b helper_sve_cmphs_ppzi_b_aarch64 +#define helper_sve_cmphs_ppzi_h helper_sve_cmphs_ppzi_h_aarch64 +#define helper_sve_cmphs_ppzi_s helper_sve_cmphs_ppzi_s_aarch64 +#define helper_sve_cmphs_ppzi_d helper_sve_cmphs_ppzi_d_aarch64 +#define helper_sve_cmplt_ppzi_b helper_sve_cmplt_ppzi_b_aarch64 +#define helper_sve_cmplt_ppzi_h helper_sve_cmplt_ppzi_h_aarch64 +#define helper_sve_cmplt_ppzi_s helper_sve_cmplt_ppzi_s_aarch64 +#define helper_sve_cmplt_ppzi_d helper_sve_cmplt_ppzi_d_aarch64 +#define helper_sve_cmple_ppzi_b helper_sve_cmple_ppzi_b_aarch64 +#define helper_sve_cmple_ppzi_h helper_sve_cmple_ppzi_h_aarch64 +#define helper_sve_cmple_ppzi_s helper_sve_cmple_ppzi_s_aarch64 +#define helper_sve_cmple_ppzi_d helper_sve_cmple_ppzi_d_aarch64 +#define helper_sve_cmplo_ppzi_b helper_sve_cmplo_ppzi_b_aarch64 +#define helper_sve_cmplo_ppzi_h helper_sve_cmplo_ppzi_h_aarch64 +#define helper_sve_cmplo_ppzi_s helper_sve_cmplo_ppzi_s_aarch64 +#define helper_sve_cmplo_ppzi_d helper_sve_cmplo_ppzi_d_aarch64 +#define helper_sve_cmpls_ppzi_b helper_sve_cmpls_ppzi_b_aarch64 +#define helper_sve_cmpls_ppzi_h helper_sve_cmpls_ppzi_h_aarch64 +#define helper_sve_cmpls_ppzi_s helper_sve_cmpls_ppzi_s_aarch64 +#define helper_sve_cmpls_ppzi_d helper_sve_cmpls_ppzi_d_aarch64 +#define helper_sve_brkpa helper_sve_brkpa_aarch64 +#define helper_sve_brkpas helper_sve_brkpas_aarch64 +#define helper_sve_brkpb helper_sve_brkpb_aarch64 +#define helper_sve_brkpbs helper_sve_brkpbs_aarch64 +#define helper_sve_brka_z helper_sve_brka_z_aarch64 +#define helper_sve_brkas_z helper_sve_brkas_z_aarch64 +#define helper_sve_brkb_z helper_sve_brkb_z_aarch64 +#define helper_sve_brkbs_z helper_sve_brkbs_z_aarch64 +#define helper_sve_brka_m helper_sve_brka_m_aarch64 +#define helper_sve_brkas_m helper_sve_brkas_m_aarch64 +#define helper_sve_brkb_m helper_sve_brkb_m_aarch64 +#define helper_sve_brkbs_m helper_sve_brkbs_m_aarch64 +#define helper_sve_brkn helper_sve_brkn_aarch64 +#define helper_sve_brkns helper_sve_brkns_aarch64 +#define helper_sve_cntp helper_sve_cntp_aarch64 +#define helper_sve_while helper_sve_while_aarch64 +#define helper_sve_faddv_h helper_sve_faddv_h_aarch64 +#define helper_sve_faddv_s helper_sve_faddv_s_aarch64 +#define helper_sve_faddv_d helper_sve_faddv_d_aarch64 +#define helper_sve_fminnmv_h helper_sve_fminnmv_h_aarch64 +#define helper_sve_fminnmv_s helper_sve_fminnmv_s_aarch64 +#define helper_sve_fminnmv_d helper_sve_fminnmv_d_aarch64 +#define helper_sve_fmaxnmv_h helper_sve_fmaxnmv_h_aarch64 +#define helper_sve_fmaxnmv_s helper_sve_fmaxnmv_s_aarch64 +#define helper_sve_fmaxnmv_d helper_sve_fmaxnmv_d_aarch64 +#define helper_sve_fminv_h helper_sve_fminv_h_aarch64 +#define helper_sve_fminv_s helper_sve_fminv_s_aarch64 +#define helper_sve_fminv_d helper_sve_fminv_d_aarch64 +#define helper_sve_fmaxv_h helper_sve_fmaxv_h_aarch64 +#define helper_sve_fmaxv_s helper_sve_fmaxv_s_aarch64 +#define helper_sve_fmaxv_d helper_sve_fmaxv_d_aarch64 +#define helper_sve_fadda_h helper_sve_fadda_h_aarch64 +#define helper_sve_fadda_s helper_sve_fadda_s_aarch64 +#define helper_sve_fadda_d helper_sve_fadda_d_aarch64 +#define helper_sve_fadd_h helper_sve_fadd_h_aarch64 +#define helper_sve_fadd_s helper_sve_fadd_s_aarch64 +#define helper_sve_fadd_d helper_sve_fadd_d_aarch64 +#define helper_sve_fsub_h helper_sve_fsub_h_aarch64 +#define helper_sve_fsub_s helper_sve_fsub_s_aarch64 +#define helper_sve_fsub_d helper_sve_fsub_d_aarch64 +#define helper_sve_fmul_h helper_sve_fmul_h_aarch64 +#define helper_sve_fmul_s helper_sve_fmul_s_aarch64 +#define helper_sve_fmul_d helper_sve_fmul_d_aarch64 +#define helper_sve_fdiv_h helper_sve_fdiv_h_aarch64 +#define helper_sve_fdiv_s helper_sve_fdiv_s_aarch64 +#define helper_sve_fdiv_d helper_sve_fdiv_d_aarch64 +#define helper_sve_fmin_h helper_sve_fmin_h_aarch64 +#define helper_sve_fmin_s helper_sve_fmin_s_aarch64 +#define helper_sve_fmin_d helper_sve_fmin_d_aarch64 +#define helper_sve_fmax_h helper_sve_fmax_h_aarch64 +#define helper_sve_fmax_s helper_sve_fmax_s_aarch64 +#define helper_sve_fmax_d helper_sve_fmax_d_aarch64 +#define helper_sve_fminnum_h helper_sve_fminnum_h_aarch64 +#define helper_sve_fminnum_s helper_sve_fminnum_s_aarch64 +#define helper_sve_fminnum_d helper_sve_fminnum_d_aarch64 +#define helper_sve_fmaxnum_h helper_sve_fmaxnum_h_aarch64 +#define helper_sve_fmaxnum_s helper_sve_fmaxnum_s_aarch64 +#define helper_sve_fmaxnum_d helper_sve_fmaxnum_d_aarch64 +#define helper_sve_fabd_h helper_sve_fabd_h_aarch64 +#define helper_sve_fabd_s helper_sve_fabd_s_aarch64 +#define helper_sve_fabd_d helper_sve_fabd_d_aarch64 +#define helper_sve_fscalbn_h helper_sve_fscalbn_h_aarch64 +#define helper_sve_fscalbn_s helper_sve_fscalbn_s_aarch64 +#define helper_sve_fscalbn_d helper_sve_fscalbn_d_aarch64 +#define helper_sve_fmulx_h helper_sve_fmulx_h_aarch64 +#define helper_sve_fmulx_s helper_sve_fmulx_s_aarch64 +#define helper_sve_fmulx_d helper_sve_fmulx_d_aarch64 +#define helper_sve_fadds_h helper_sve_fadds_h_aarch64 +#define helper_sve_fadds_s helper_sve_fadds_s_aarch64 +#define helper_sve_fadds_d helper_sve_fadds_d_aarch64 +#define helper_sve_fsubs_h helper_sve_fsubs_h_aarch64 +#define helper_sve_fsubs_s helper_sve_fsubs_s_aarch64 +#define helper_sve_fsubs_d helper_sve_fsubs_d_aarch64 +#define helper_sve_fmuls_h helper_sve_fmuls_h_aarch64 +#define helper_sve_fmuls_s helper_sve_fmuls_s_aarch64 +#define helper_sve_fmuls_d helper_sve_fmuls_d_aarch64 +#define helper_sve_fsubrs_h helper_sve_fsubrs_h_aarch64 +#define helper_sve_fsubrs_s helper_sve_fsubrs_s_aarch64 +#define helper_sve_fsubrs_d helper_sve_fsubrs_d_aarch64 +#define helper_sve_fmaxnms_h helper_sve_fmaxnms_h_aarch64 +#define helper_sve_fmaxnms_s helper_sve_fmaxnms_s_aarch64 +#define helper_sve_fmaxnms_d helper_sve_fmaxnms_d_aarch64 +#define helper_sve_fminnms_h helper_sve_fminnms_h_aarch64 +#define helper_sve_fminnms_s helper_sve_fminnms_s_aarch64 +#define helper_sve_fminnms_d helper_sve_fminnms_d_aarch64 +#define helper_sve_fmaxs_h helper_sve_fmaxs_h_aarch64 +#define helper_sve_fmaxs_s helper_sve_fmaxs_s_aarch64 +#define helper_sve_fmaxs_d helper_sve_fmaxs_d_aarch64 +#define helper_sve_fmins_h helper_sve_fmins_h_aarch64 +#define helper_sve_fmins_s helper_sve_fmins_s_aarch64 +#define helper_sve_fmins_d helper_sve_fmins_d_aarch64 +#define helper_sve_fcvt_sh helper_sve_fcvt_sh_aarch64 +#define helper_sve_fcvt_hs helper_sve_fcvt_hs_aarch64 +#define helper_sve_fcvt_dh helper_sve_fcvt_dh_aarch64 +#define helper_sve_fcvt_hd helper_sve_fcvt_hd_aarch64 +#define helper_sve_fcvt_ds helper_sve_fcvt_ds_aarch64 +#define helper_sve_fcvt_sd helper_sve_fcvt_sd_aarch64 +#define helper_sve_fcvtzs_hh helper_sve_fcvtzs_hh_aarch64 +#define helper_sve_fcvtzs_hs helper_sve_fcvtzs_hs_aarch64 +#define helper_sve_fcvtzs_ss helper_sve_fcvtzs_ss_aarch64 +#define helper_sve_fcvtzs_hd helper_sve_fcvtzs_hd_aarch64 +#define helper_sve_fcvtzs_sd helper_sve_fcvtzs_sd_aarch64 +#define helper_sve_fcvtzs_ds helper_sve_fcvtzs_ds_aarch64 +#define helper_sve_fcvtzs_dd helper_sve_fcvtzs_dd_aarch64 +#define helper_sve_fcvtzu_hh helper_sve_fcvtzu_hh_aarch64 +#define helper_sve_fcvtzu_hs helper_sve_fcvtzu_hs_aarch64 +#define helper_sve_fcvtzu_ss helper_sve_fcvtzu_ss_aarch64 +#define helper_sve_fcvtzu_hd helper_sve_fcvtzu_hd_aarch64 +#define helper_sve_fcvtzu_sd helper_sve_fcvtzu_sd_aarch64 +#define helper_sve_fcvtzu_ds helper_sve_fcvtzu_ds_aarch64 +#define helper_sve_fcvtzu_dd helper_sve_fcvtzu_dd_aarch64 +#define helper_sve_frint_h helper_sve_frint_h_aarch64 +#define helper_sve_frint_s helper_sve_frint_s_aarch64 +#define helper_sve_frint_d helper_sve_frint_d_aarch64 +#define helper_sve_frintx_h helper_sve_frintx_h_aarch64 +#define helper_sve_frintx_s helper_sve_frintx_s_aarch64 +#define helper_sve_frintx_d helper_sve_frintx_d_aarch64 +#define helper_sve_frecpx_h helper_sve_frecpx_h_aarch64 +#define helper_sve_frecpx_s helper_sve_frecpx_s_aarch64 +#define helper_sve_frecpx_d helper_sve_frecpx_d_aarch64 +#define helper_sve_fsqrt_h helper_sve_fsqrt_h_aarch64 +#define helper_sve_fsqrt_s helper_sve_fsqrt_s_aarch64 +#define helper_sve_fsqrt_d helper_sve_fsqrt_d_aarch64 +#define helper_sve_scvt_hh helper_sve_scvt_hh_aarch64 +#define helper_sve_scvt_sh helper_sve_scvt_sh_aarch64 +#define helper_sve_scvt_ss helper_sve_scvt_ss_aarch64 +#define helper_sve_scvt_sd helper_sve_scvt_sd_aarch64 +#define helper_sve_scvt_dh helper_sve_scvt_dh_aarch64 +#define helper_sve_scvt_ds helper_sve_scvt_ds_aarch64 +#define helper_sve_scvt_dd helper_sve_scvt_dd_aarch64 +#define helper_sve_ucvt_hh helper_sve_ucvt_hh_aarch64 +#define helper_sve_ucvt_sh helper_sve_ucvt_sh_aarch64 +#define helper_sve_ucvt_ss helper_sve_ucvt_ss_aarch64 +#define helper_sve_ucvt_sd helper_sve_ucvt_sd_aarch64 +#define helper_sve_ucvt_dh helper_sve_ucvt_dh_aarch64 +#define helper_sve_ucvt_ds helper_sve_ucvt_ds_aarch64 +#define helper_sve_ucvt_dd helper_sve_ucvt_dd_aarch64 +#define helper_sve_fmla_zpzzz_h helper_sve_fmla_zpzzz_h_aarch64 +#define helper_sve_fmls_zpzzz_h helper_sve_fmls_zpzzz_h_aarch64 +#define helper_sve_fnmla_zpzzz_h helper_sve_fnmla_zpzzz_h_aarch64 +#define helper_sve_fnmls_zpzzz_h helper_sve_fnmls_zpzzz_h_aarch64 +#define helper_sve_fmla_zpzzz_s helper_sve_fmla_zpzzz_s_aarch64 +#define helper_sve_fmls_zpzzz_s helper_sve_fmls_zpzzz_s_aarch64 +#define helper_sve_fnmla_zpzzz_s helper_sve_fnmla_zpzzz_s_aarch64 +#define helper_sve_fnmls_zpzzz_s helper_sve_fnmls_zpzzz_s_aarch64 +#define helper_sve_fmla_zpzzz_d helper_sve_fmla_zpzzz_d_aarch64 +#define helper_sve_fmls_zpzzz_d helper_sve_fmls_zpzzz_d_aarch64 +#define helper_sve_fnmla_zpzzz_d helper_sve_fnmla_zpzzz_d_aarch64 +#define helper_sve_fnmls_zpzzz_d helper_sve_fnmls_zpzzz_d_aarch64 +#define helper_sve_fcmge_h helper_sve_fcmge_h_aarch64 +#define helper_sve_fcmge_s helper_sve_fcmge_s_aarch64 +#define helper_sve_fcmge_d helper_sve_fcmge_d_aarch64 +#define helper_sve_fcmgt_h helper_sve_fcmgt_h_aarch64 +#define helper_sve_fcmgt_s helper_sve_fcmgt_s_aarch64 +#define helper_sve_fcmgt_d helper_sve_fcmgt_d_aarch64 +#define helper_sve_fcmeq_h helper_sve_fcmeq_h_aarch64 +#define helper_sve_fcmeq_s helper_sve_fcmeq_s_aarch64 +#define helper_sve_fcmeq_d helper_sve_fcmeq_d_aarch64 +#define helper_sve_fcmne_h helper_sve_fcmne_h_aarch64 +#define helper_sve_fcmne_s helper_sve_fcmne_s_aarch64 +#define helper_sve_fcmne_d helper_sve_fcmne_d_aarch64 +#define helper_sve_fcmuo_h helper_sve_fcmuo_h_aarch64 +#define helper_sve_fcmuo_s helper_sve_fcmuo_s_aarch64 +#define helper_sve_fcmuo_d helper_sve_fcmuo_d_aarch64 +#define helper_sve_facge_h helper_sve_facge_h_aarch64 +#define helper_sve_facge_s helper_sve_facge_s_aarch64 +#define helper_sve_facge_d helper_sve_facge_d_aarch64 +#define helper_sve_facgt_h helper_sve_facgt_h_aarch64 +#define helper_sve_facgt_s helper_sve_facgt_s_aarch64 +#define helper_sve_facgt_d helper_sve_facgt_d_aarch64 +#define helper_sve_fcmge0_h helper_sve_fcmge0_h_aarch64 +#define helper_sve_fcmge0_s helper_sve_fcmge0_s_aarch64 +#define helper_sve_fcmge0_d helper_sve_fcmge0_d_aarch64 +#define helper_sve_fcmgt0_h helper_sve_fcmgt0_h_aarch64 +#define helper_sve_fcmgt0_s helper_sve_fcmgt0_s_aarch64 +#define helper_sve_fcmgt0_d helper_sve_fcmgt0_d_aarch64 +#define helper_sve_fcmle0_h helper_sve_fcmle0_h_aarch64 +#define helper_sve_fcmle0_s helper_sve_fcmle0_s_aarch64 +#define helper_sve_fcmle0_d helper_sve_fcmle0_d_aarch64 +#define helper_sve_fcmlt0_h helper_sve_fcmlt0_h_aarch64 +#define helper_sve_fcmlt0_s helper_sve_fcmlt0_s_aarch64 +#define helper_sve_fcmlt0_d helper_sve_fcmlt0_d_aarch64 +#define helper_sve_fcmeq0_h helper_sve_fcmeq0_h_aarch64 +#define helper_sve_fcmeq0_s helper_sve_fcmeq0_s_aarch64 +#define helper_sve_fcmeq0_d helper_sve_fcmeq0_d_aarch64 +#define helper_sve_fcmne0_h helper_sve_fcmne0_h_aarch64 +#define helper_sve_fcmne0_s helper_sve_fcmne0_s_aarch64 +#define helper_sve_fcmne0_d helper_sve_fcmne0_d_aarch64 +#define helper_sve_ftmad_h helper_sve_ftmad_h_aarch64 +#define helper_sve_ftmad_s helper_sve_ftmad_s_aarch64 +#define helper_sve_ftmad_d helper_sve_ftmad_d_aarch64 +#define helper_sve_fcadd_h helper_sve_fcadd_h_aarch64 +#define helper_sve_fcadd_s helper_sve_fcadd_s_aarch64 +#define helper_sve_fcadd_d helper_sve_fcadd_d_aarch64 +#define helper_sve_fcmla_zpzzz_h helper_sve_fcmla_zpzzz_h_aarch64 +#define helper_sve_fcmla_zpzzz_s helper_sve_fcmla_zpzzz_s_aarch64 +#define helper_sve_fcmla_zpzzz_d helper_sve_fcmla_zpzzz_d_aarch64 +#define helper_sve_ld1bb_r helper_sve_ld1bb_r_aarch64 +#define helper_sve_ld1bhu_r helper_sve_ld1bhu_r_aarch64 +#define helper_sve_ld1bhs_r helper_sve_ld1bhs_r_aarch64 +#define helper_sve_ld1bsu_r helper_sve_ld1bsu_r_aarch64 +#define helper_sve_ld1bss_r helper_sve_ld1bss_r_aarch64 +#define helper_sve_ld1bdu_r helper_sve_ld1bdu_r_aarch64 +#define helper_sve_ld1bds_r helper_sve_ld1bds_r_aarch64 +#define helper_sve_ld1hh_le_r helper_sve_ld1hh_le_r_aarch64 +#define helper_sve_ld1hh_be_r helper_sve_ld1hh_be_r_aarch64 +#define helper_sve_ld1hsu_le_r helper_sve_ld1hsu_le_r_aarch64 +#define helper_sve_ld1hsu_be_r helper_sve_ld1hsu_be_r_aarch64 +#define helper_sve_ld1hss_le_r helper_sve_ld1hss_le_r_aarch64 +#define helper_sve_ld1hss_be_r helper_sve_ld1hss_be_r_aarch64 +#define helper_sve_ld1hdu_le_r helper_sve_ld1hdu_le_r_aarch64 +#define helper_sve_ld1hdu_be_r helper_sve_ld1hdu_be_r_aarch64 +#define helper_sve_ld1hds_le_r helper_sve_ld1hds_le_r_aarch64 +#define helper_sve_ld1hds_be_r helper_sve_ld1hds_be_r_aarch64 +#define helper_sve_ld1ss_le_r helper_sve_ld1ss_le_r_aarch64 +#define helper_sve_ld1ss_be_r helper_sve_ld1ss_be_r_aarch64 +#define helper_sve_ld1sdu_le_r helper_sve_ld1sdu_le_r_aarch64 +#define helper_sve_ld1sdu_be_r helper_sve_ld1sdu_be_r_aarch64 +#define helper_sve_ld1sds_le_r helper_sve_ld1sds_le_r_aarch64 +#define helper_sve_ld1sds_be_r helper_sve_ld1sds_be_r_aarch64 +#define helper_sve_ld1dd_le_r helper_sve_ld1dd_le_r_aarch64 +#define helper_sve_ld1dd_be_r helper_sve_ld1dd_be_r_aarch64 +#define helper_sve_ld2bb_r helper_sve_ld2bb_r_aarch64 +#define helper_sve_ld3bb_r helper_sve_ld3bb_r_aarch64 +#define helper_sve_ld4bb_r helper_sve_ld4bb_r_aarch64 +#define helper_sve_ld2hh_le_r helper_sve_ld2hh_le_r_aarch64 +#define helper_sve_ld2hh_be_r helper_sve_ld2hh_be_r_aarch64 +#define helper_sve_ld3hh_le_r helper_sve_ld3hh_le_r_aarch64 +#define helper_sve_ld3hh_be_r helper_sve_ld3hh_be_r_aarch64 +#define helper_sve_ld4hh_le_r helper_sve_ld4hh_le_r_aarch64 +#define helper_sve_ld4hh_be_r helper_sve_ld4hh_be_r_aarch64 +#define helper_sve_ld2ss_le_r helper_sve_ld2ss_le_r_aarch64 +#define helper_sve_ld2ss_be_r helper_sve_ld2ss_be_r_aarch64 +#define helper_sve_ld3ss_le_r helper_sve_ld3ss_le_r_aarch64 +#define helper_sve_ld3ss_be_r helper_sve_ld3ss_be_r_aarch64 +#define helper_sve_ld4ss_le_r helper_sve_ld4ss_le_r_aarch64 +#define helper_sve_ld4ss_be_r helper_sve_ld4ss_be_r_aarch64 +#define helper_sve_ld2dd_le_r helper_sve_ld2dd_le_r_aarch64 +#define helper_sve_ld2dd_be_r helper_sve_ld2dd_be_r_aarch64 +#define helper_sve_ld3dd_le_r helper_sve_ld3dd_le_r_aarch64 +#define helper_sve_ld3dd_be_r helper_sve_ld3dd_be_r_aarch64 +#define helper_sve_ld4dd_le_r helper_sve_ld4dd_le_r_aarch64 +#define helper_sve_ld4dd_be_r helper_sve_ld4dd_be_r_aarch64 +#define helper_sve_ldff1bb_r helper_sve_ldff1bb_r_aarch64 +#define helper_sve_ldnf1bb_r helper_sve_ldnf1bb_r_aarch64 +#define helper_sve_ldff1bhu_r helper_sve_ldff1bhu_r_aarch64 +#define helper_sve_ldnf1bhu_r helper_sve_ldnf1bhu_r_aarch64 +#define helper_sve_ldff1bhs_r helper_sve_ldff1bhs_r_aarch64 +#define helper_sve_ldnf1bhs_r helper_sve_ldnf1bhs_r_aarch64 +#define helper_sve_ldff1bsu_r helper_sve_ldff1bsu_r_aarch64 +#define helper_sve_ldnf1bsu_r helper_sve_ldnf1bsu_r_aarch64 +#define helper_sve_ldff1bss_r helper_sve_ldff1bss_r_aarch64 +#define helper_sve_ldnf1bss_r helper_sve_ldnf1bss_r_aarch64 +#define helper_sve_ldff1bdu_r helper_sve_ldff1bdu_r_aarch64 +#define helper_sve_ldnf1bdu_r helper_sve_ldnf1bdu_r_aarch64 +#define helper_sve_ldff1bds_r helper_sve_ldff1bds_r_aarch64 +#define helper_sve_ldnf1bds_r helper_sve_ldnf1bds_r_aarch64 +#define helper_sve_ldff1hh_le_r helper_sve_ldff1hh_le_r_aarch64 +#define helper_sve_ldnf1hh_le_r helper_sve_ldnf1hh_le_r_aarch64 +#define helper_sve_ldff1hh_be_r helper_sve_ldff1hh_be_r_aarch64 +#define helper_sve_ldnf1hh_be_r helper_sve_ldnf1hh_be_r_aarch64 +#define helper_sve_ldff1hsu_le_r helper_sve_ldff1hsu_le_r_aarch64 +#define helper_sve_ldnf1hsu_le_r helper_sve_ldnf1hsu_le_r_aarch64 +#define helper_sve_ldff1hsu_be_r helper_sve_ldff1hsu_be_r_aarch64 +#define helper_sve_ldnf1hsu_be_r helper_sve_ldnf1hsu_be_r_aarch64 +#define helper_sve_ldff1hss_le_r helper_sve_ldff1hss_le_r_aarch64 +#define helper_sve_ldnf1hss_le_r helper_sve_ldnf1hss_le_r_aarch64 +#define helper_sve_ldff1hss_be_r helper_sve_ldff1hss_be_r_aarch64 +#define helper_sve_ldnf1hss_be_r helper_sve_ldnf1hss_be_r_aarch64 +#define helper_sve_ldff1hdu_le_r helper_sve_ldff1hdu_le_r_aarch64 +#define helper_sve_ldnf1hdu_le_r helper_sve_ldnf1hdu_le_r_aarch64 +#define helper_sve_ldff1hdu_be_r helper_sve_ldff1hdu_be_r_aarch64 +#define helper_sve_ldnf1hdu_be_r helper_sve_ldnf1hdu_be_r_aarch64 +#define helper_sve_ldff1hds_le_r helper_sve_ldff1hds_le_r_aarch64 +#define helper_sve_ldnf1hds_le_r helper_sve_ldnf1hds_le_r_aarch64 +#define helper_sve_ldff1hds_be_r helper_sve_ldff1hds_be_r_aarch64 +#define helper_sve_ldnf1hds_be_r helper_sve_ldnf1hds_be_r_aarch64 +#define helper_sve_ldff1ss_le_r helper_sve_ldff1ss_le_r_aarch64 +#define helper_sve_ldnf1ss_le_r helper_sve_ldnf1ss_le_r_aarch64 +#define helper_sve_ldff1ss_be_r helper_sve_ldff1ss_be_r_aarch64 +#define helper_sve_ldnf1ss_be_r helper_sve_ldnf1ss_be_r_aarch64 +#define helper_sve_ldff1sdu_le_r helper_sve_ldff1sdu_le_r_aarch64 +#define helper_sve_ldnf1sdu_le_r helper_sve_ldnf1sdu_le_r_aarch64 +#define helper_sve_ldff1sdu_be_r helper_sve_ldff1sdu_be_r_aarch64 +#define helper_sve_ldnf1sdu_be_r helper_sve_ldnf1sdu_be_r_aarch64 +#define helper_sve_ldff1sds_le_r helper_sve_ldff1sds_le_r_aarch64 +#define helper_sve_ldnf1sds_le_r helper_sve_ldnf1sds_le_r_aarch64 +#define helper_sve_ldff1sds_be_r helper_sve_ldff1sds_be_r_aarch64 +#define helper_sve_ldnf1sds_be_r helper_sve_ldnf1sds_be_r_aarch64 +#define helper_sve_ldff1dd_le_r helper_sve_ldff1dd_le_r_aarch64 +#define helper_sve_ldnf1dd_le_r helper_sve_ldnf1dd_le_r_aarch64 +#define helper_sve_ldff1dd_be_r helper_sve_ldff1dd_be_r_aarch64 +#define helper_sve_ldnf1dd_be_r helper_sve_ldnf1dd_be_r_aarch64 +#define helper_sve_st1bb_r helper_sve_st1bb_r_aarch64 +#define helper_sve_st1bh_r helper_sve_st1bh_r_aarch64 +#define helper_sve_st1bs_r helper_sve_st1bs_r_aarch64 +#define helper_sve_st1bd_r helper_sve_st1bd_r_aarch64 +#define helper_sve_st2bb_r helper_sve_st2bb_r_aarch64 +#define helper_sve_st3bb_r helper_sve_st3bb_r_aarch64 +#define helper_sve_st4bb_r helper_sve_st4bb_r_aarch64 +#define helper_sve_st1hh_le_r helper_sve_st1hh_le_r_aarch64 +#define helper_sve_st1hh_be_r helper_sve_st1hh_be_r_aarch64 +#define helper_sve_st1hs_le_r helper_sve_st1hs_le_r_aarch64 +#define helper_sve_st1hs_be_r helper_sve_st1hs_be_r_aarch64 +#define helper_sve_st1hd_le_r helper_sve_st1hd_le_r_aarch64 +#define helper_sve_st1hd_be_r helper_sve_st1hd_be_r_aarch64 +#define helper_sve_st2hh_le_r helper_sve_st2hh_le_r_aarch64 +#define helper_sve_st2hh_be_r helper_sve_st2hh_be_r_aarch64 +#define helper_sve_st3hh_le_r helper_sve_st3hh_le_r_aarch64 +#define helper_sve_st3hh_be_r helper_sve_st3hh_be_r_aarch64 +#define helper_sve_st4hh_le_r helper_sve_st4hh_le_r_aarch64 +#define helper_sve_st4hh_be_r helper_sve_st4hh_be_r_aarch64 +#define helper_sve_st1ss_le_r helper_sve_st1ss_le_r_aarch64 +#define helper_sve_st1ss_be_r helper_sve_st1ss_be_r_aarch64 +#define helper_sve_st1sd_le_r helper_sve_st1sd_le_r_aarch64 +#define helper_sve_st1sd_be_r helper_sve_st1sd_be_r_aarch64 +#define helper_sve_st2ss_le_r helper_sve_st2ss_le_r_aarch64 +#define helper_sve_st2ss_be_r helper_sve_st2ss_be_r_aarch64 +#define helper_sve_st3ss_le_r helper_sve_st3ss_le_r_aarch64 +#define helper_sve_st3ss_be_r helper_sve_st3ss_be_r_aarch64 +#define helper_sve_st4ss_le_r helper_sve_st4ss_le_r_aarch64 +#define helper_sve_st4ss_be_r helper_sve_st4ss_be_r_aarch64 +#define helper_sve_st1dd_le_r helper_sve_st1dd_le_r_aarch64 +#define helper_sve_st1dd_be_r helper_sve_st1dd_be_r_aarch64 +#define helper_sve_st2dd_le_r helper_sve_st2dd_le_r_aarch64 +#define helper_sve_st2dd_be_r helper_sve_st2dd_be_r_aarch64 +#define helper_sve_st3dd_le_r helper_sve_st3dd_le_r_aarch64 +#define helper_sve_st3dd_be_r helper_sve_st3dd_be_r_aarch64 +#define helper_sve_st4dd_le_r helper_sve_st4dd_le_r_aarch64 +#define helper_sve_st4dd_be_r helper_sve_st4dd_be_r_aarch64 +#define helper_sve_ldbsu_zsu helper_sve_ldbsu_zsu_aarch64 +#define helper_sve_ldbsu_zss helper_sve_ldbsu_zss_aarch64 +#define helper_sve_ldbdu_zsu helper_sve_ldbdu_zsu_aarch64 +#define helper_sve_ldbdu_zss helper_sve_ldbdu_zss_aarch64 +#define helper_sve_ldbdu_zd helper_sve_ldbdu_zd_aarch64 +#define helper_sve_ldbss_zsu helper_sve_ldbss_zsu_aarch64 +#define helper_sve_ldbss_zss helper_sve_ldbss_zss_aarch64 +#define helper_sve_ldbds_zsu helper_sve_ldbds_zsu_aarch64 +#define helper_sve_ldbds_zss helper_sve_ldbds_zss_aarch64 +#define helper_sve_ldbds_zd helper_sve_ldbds_zd_aarch64 +#define helper_sve_ldhsu_le_zsu helper_sve_ldhsu_le_zsu_aarch64 +#define helper_sve_ldhsu_le_zss helper_sve_ldhsu_le_zss_aarch64 +#define helper_sve_ldhdu_le_zsu helper_sve_ldhdu_le_zsu_aarch64 +#define helper_sve_ldhdu_le_zss helper_sve_ldhdu_le_zss_aarch64 +#define helper_sve_ldhdu_le_zd helper_sve_ldhdu_le_zd_aarch64 +#define helper_sve_ldhsu_be_zsu helper_sve_ldhsu_be_zsu_aarch64 +#define helper_sve_ldhsu_be_zss helper_sve_ldhsu_be_zss_aarch64 +#define helper_sve_ldhdu_be_zsu helper_sve_ldhdu_be_zsu_aarch64 +#define helper_sve_ldhdu_be_zss helper_sve_ldhdu_be_zss_aarch64 +#define helper_sve_ldhdu_be_zd helper_sve_ldhdu_be_zd_aarch64 +#define helper_sve_ldhss_le_zsu helper_sve_ldhss_le_zsu_aarch64 +#define helper_sve_ldhss_le_zss helper_sve_ldhss_le_zss_aarch64 +#define helper_sve_ldhds_le_zsu helper_sve_ldhds_le_zsu_aarch64 +#define helper_sve_ldhds_le_zss helper_sve_ldhds_le_zss_aarch64 +#define helper_sve_ldhds_le_zd helper_sve_ldhds_le_zd_aarch64 +#define helper_sve_ldhss_be_zsu helper_sve_ldhss_be_zsu_aarch64 +#define helper_sve_ldhss_be_zss helper_sve_ldhss_be_zss_aarch64 +#define helper_sve_ldhds_be_zsu helper_sve_ldhds_be_zsu_aarch64 +#define helper_sve_ldhds_be_zss helper_sve_ldhds_be_zss_aarch64 +#define helper_sve_ldhds_be_zd helper_sve_ldhds_be_zd_aarch64 +#define helper_sve_ldss_le_zsu helper_sve_ldss_le_zsu_aarch64 +#define helper_sve_ldss_le_zss helper_sve_ldss_le_zss_aarch64 +#define helper_sve_ldsdu_le_zsu helper_sve_ldsdu_le_zsu_aarch64 +#define helper_sve_ldsdu_le_zss helper_sve_ldsdu_le_zss_aarch64 +#define helper_sve_ldsdu_le_zd helper_sve_ldsdu_le_zd_aarch64 +#define helper_sve_ldss_be_zsu helper_sve_ldss_be_zsu_aarch64 +#define helper_sve_ldss_be_zss helper_sve_ldss_be_zss_aarch64 +#define helper_sve_ldsdu_be_zsu helper_sve_ldsdu_be_zsu_aarch64 +#define helper_sve_ldsdu_be_zss helper_sve_ldsdu_be_zss_aarch64 +#define helper_sve_ldsdu_be_zd helper_sve_ldsdu_be_zd_aarch64 +#define helper_sve_ldsds_le_zsu helper_sve_ldsds_le_zsu_aarch64 +#define helper_sve_ldsds_le_zss helper_sve_ldsds_le_zss_aarch64 +#define helper_sve_ldsds_le_zd helper_sve_ldsds_le_zd_aarch64 +#define helper_sve_ldsds_be_zsu helper_sve_ldsds_be_zsu_aarch64 +#define helper_sve_ldsds_be_zss helper_sve_ldsds_be_zss_aarch64 +#define helper_sve_ldsds_be_zd helper_sve_ldsds_be_zd_aarch64 +#define helper_sve_lddd_le_zsu helper_sve_lddd_le_zsu_aarch64 +#define helper_sve_lddd_le_zss helper_sve_lddd_le_zss_aarch64 +#define helper_sve_lddd_le_zd helper_sve_lddd_le_zd_aarch64 +#define helper_sve_lddd_be_zsu helper_sve_lddd_be_zsu_aarch64 +#define helper_sve_lddd_be_zss helper_sve_lddd_be_zss_aarch64 +#define helper_sve_lddd_be_zd helper_sve_lddd_be_zd_aarch64 +#define helper_sve_ldffbsu_zsu helper_sve_ldffbsu_zsu_aarch64 +#define helper_sve_ldffbsu_zss helper_sve_ldffbsu_zss_aarch64 +#define helper_sve_ldffbdu_zsu helper_sve_ldffbdu_zsu_aarch64 +#define helper_sve_ldffbdu_zss helper_sve_ldffbdu_zss_aarch64 +#define helper_sve_ldffbdu_zd helper_sve_ldffbdu_zd_aarch64 +#define helper_sve_ldffbss_zsu helper_sve_ldffbss_zsu_aarch64 +#define helper_sve_ldffbss_zss helper_sve_ldffbss_zss_aarch64 +#define helper_sve_ldffbds_zsu helper_sve_ldffbds_zsu_aarch64 +#define helper_sve_ldffbds_zss helper_sve_ldffbds_zss_aarch64 +#define helper_sve_ldffbds_zd helper_sve_ldffbds_zd_aarch64 +#define helper_sve_ldffhsu_le_zsu helper_sve_ldffhsu_le_zsu_aarch64 +#define helper_sve_ldffhsu_le_zss helper_sve_ldffhsu_le_zss_aarch64 +#define helper_sve_ldffhdu_le_zsu helper_sve_ldffhdu_le_zsu_aarch64 +#define helper_sve_ldffhdu_le_zss helper_sve_ldffhdu_le_zss_aarch64 +#define helper_sve_ldffhdu_le_zd helper_sve_ldffhdu_le_zd_aarch64 +#define helper_sve_ldffhsu_be_zsu helper_sve_ldffhsu_be_zsu_aarch64 +#define helper_sve_ldffhsu_be_zss helper_sve_ldffhsu_be_zss_aarch64 +#define helper_sve_ldffhdu_be_zsu helper_sve_ldffhdu_be_zsu_aarch64 +#define helper_sve_ldffhdu_be_zss helper_sve_ldffhdu_be_zss_aarch64 +#define helper_sve_ldffhdu_be_zd helper_sve_ldffhdu_be_zd_aarch64 +#define helper_sve_ldffhss_le_zsu helper_sve_ldffhss_le_zsu_aarch64 +#define helper_sve_ldffhss_le_zss helper_sve_ldffhss_le_zss_aarch64 +#define helper_sve_ldffhds_le_zsu helper_sve_ldffhds_le_zsu_aarch64 +#define helper_sve_ldffhds_le_zss helper_sve_ldffhds_le_zss_aarch64 +#define helper_sve_ldffhds_le_zd helper_sve_ldffhds_le_zd_aarch64 +#define helper_sve_ldffhss_be_zsu helper_sve_ldffhss_be_zsu_aarch64 +#define helper_sve_ldffhss_be_zss helper_sve_ldffhss_be_zss_aarch64 +#define helper_sve_ldffhds_be_zsu helper_sve_ldffhds_be_zsu_aarch64 +#define helper_sve_ldffhds_be_zss helper_sve_ldffhds_be_zss_aarch64 +#define helper_sve_ldffhds_be_zd helper_sve_ldffhds_be_zd_aarch64 +#define helper_sve_ldffss_le_zsu helper_sve_ldffss_le_zsu_aarch64 +#define helper_sve_ldffss_le_zss helper_sve_ldffss_le_zss_aarch64 +#define helper_sve_ldffsdu_le_zsu helper_sve_ldffsdu_le_zsu_aarch64 +#define helper_sve_ldffsdu_le_zss helper_sve_ldffsdu_le_zss_aarch64 +#define helper_sve_ldffsdu_le_zd helper_sve_ldffsdu_le_zd_aarch64 +#define helper_sve_ldffss_be_zsu helper_sve_ldffss_be_zsu_aarch64 +#define helper_sve_ldffss_be_zss helper_sve_ldffss_be_zss_aarch64 +#define helper_sve_ldffsdu_be_zsu helper_sve_ldffsdu_be_zsu_aarch64 +#define helper_sve_ldffsdu_be_zss helper_sve_ldffsdu_be_zss_aarch64 +#define helper_sve_ldffsdu_be_zd helper_sve_ldffsdu_be_zd_aarch64 +#define helper_sve_ldffsds_le_zsu helper_sve_ldffsds_le_zsu_aarch64 +#define helper_sve_ldffsds_le_zss helper_sve_ldffsds_le_zss_aarch64 +#define helper_sve_ldffsds_le_zd helper_sve_ldffsds_le_zd_aarch64 +#define helper_sve_ldffsds_be_zsu helper_sve_ldffsds_be_zsu_aarch64 +#define helper_sve_ldffsds_be_zss helper_sve_ldffsds_be_zss_aarch64 +#define helper_sve_ldffsds_be_zd helper_sve_ldffsds_be_zd_aarch64 +#define helper_sve_ldffdd_le_zsu helper_sve_ldffdd_le_zsu_aarch64 +#define helper_sve_ldffdd_le_zss helper_sve_ldffdd_le_zss_aarch64 +#define helper_sve_ldffdd_le_zd helper_sve_ldffdd_le_zd_aarch64 +#define helper_sve_ldffdd_be_zsu helper_sve_ldffdd_be_zsu_aarch64 +#define helper_sve_ldffdd_be_zss helper_sve_ldffdd_be_zss_aarch64 +#define helper_sve_ldffdd_be_zd helper_sve_ldffdd_be_zd_aarch64 +#define helper_sve_stbs_zsu helper_sve_stbs_zsu_aarch64 +#define helper_sve_sths_le_zsu helper_sve_sths_le_zsu_aarch64 +#define helper_sve_sths_be_zsu helper_sve_sths_be_zsu_aarch64 +#define helper_sve_stss_le_zsu helper_sve_stss_le_zsu_aarch64 +#define helper_sve_stss_be_zsu helper_sve_stss_be_zsu_aarch64 +#define helper_sve_stbs_zss helper_sve_stbs_zss_aarch64 +#define helper_sve_sths_le_zss helper_sve_sths_le_zss_aarch64 +#define helper_sve_sths_be_zss helper_sve_sths_be_zss_aarch64 +#define helper_sve_stss_le_zss helper_sve_stss_le_zss_aarch64 +#define helper_sve_stss_be_zss helper_sve_stss_be_zss_aarch64 +#define helper_sve_stbd_zsu helper_sve_stbd_zsu_aarch64 +#define helper_sve_sthd_le_zsu helper_sve_sthd_le_zsu_aarch64 +#define helper_sve_sthd_be_zsu helper_sve_sthd_be_zsu_aarch64 +#define helper_sve_stsd_le_zsu helper_sve_stsd_le_zsu_aarch64 +#define helper_sve_stsd_be_zsu helper_sve_stsd_be_zsu_aarch64 +#define helper_sve_stdd_le_zsu helper_sve_stdd_le_zsu_aarch64 +#define helper_sve_stdd_be_zsu helper_sve_stdd_be_zsu_aarch64 +#define helper_sve_stbd_zss helper_sve_stbd_zss_aarch64 +#define helper_sve_sthd_le_zss helper_sve_sthd_le_zss_aarch64 +#define helper_sve_sthd_be_zss helper_sve_sthd_be_zss_aarch64 +#define helper_sve_stsd_le_zss helper_sve_stsd_le_zss_aarch64 +#define helper_sve_stsd_be_zss helper_sve_stsd_be_zss_aarch64 +#define helper_sve_stdd_le_zss helper_sve_stdd_le_zss_aarch64 +#define helper_sve_stdd_be_zss helper_sve_stdd_be_zss_aarch64 +#define helper_sve_stbd_zd helper_sve_stbd_zd_aarch64 +#define helper_sve_sthd_le_zd helper_sve_sthd_le_zd_aarch64 +#define helper_sve_sthd_be_zd helper_sve_sthd_be_zd_aarch64 +#define helper_sve_stsd_le_zd helper_sve_stsd_le_zd_aarch64 +#define helper_sve_stsd_be_zd helper_sve_stsd_be_zd_aarch64 +#define helper_sve_stdd_le_zd helper_sve_stdd_le_zd_aarch64 +#define helper_sve_stdd_be_zd helper_sve_stdd_be_zd_aarch64 +#define arm_cpu_do_unaligned_access arm_cpu_do_unaligned_access_aarch64 +#define arm_cpu_do_transaction_failed arm_cpu_do_transaction_failed_aarch64 +#define arm_cpu_tlb_fill arm_cpu_tlb_fill_aarch64 +#define a64_translate_init a64_translate_init_aarch64 +#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64 +#define unallocated_encoding unallocated_encoding_aarch64 +#define new_tmp_a64 new_tmp_a64_aarch64 +#define new_tmp_a64_zero new_tmp_a64_zero_aarch64 +#define cpu_reg cpu_reg_aarch64 +#define cpu_reg_sp cpu_reg_sp_aarch64 +#define read_cpu_reg read_cpu_reg_aarch64 +#define read_cpu_reg_sp read_cpu_reg_sp_aarch64 +#define write_fp_dreg write_fp_dreg_aarch64 +#define get_fpstatus_ptr get_fpstatus_ptr_aarch64 +#define sve_access_check sve_access_check_aarch64 +#define logic_imm_decode_wmask logic_imm_decode_wmask_aarch64 +#define arm_translate_init arm_translate_init_aarch64 +#define arm_test_cc arm_test_cc_aarch64 +#define arm_free_cc arm_free_cc_aarch64 +#define arm_jump_cc arm_jump_cc_aarch64 +#define arm_gen_test_cc arm_gen_test_cc_aarch64 +#define vfp_expand_imm vfp_expand_imm_aarch64 +#define gen_cmtst_i64 gen_cmtst_i64_aarch64 +#define gen_ushl_i32 gen_ushl_i32_aarch64 +#define gen_ushl_i64 gen_ushl_i64_aarch64 +#define gen_sshl_i32 gen_sshl_i32_aarch64 +#define gen_sshl_i64 gen_sshl_i64_aarch64 +#define gen_intermediate_code gen_intermediate_code_aarch64 +#define restore_state_to_opc restore_state_to_opc_aarch64 +#define disas_sve disas_sve_aarch64 +#define helper_neon_qrdmlah_s16 helper_neon_qrdmlah_s16_aarch64 +#define helper_gvec_qrdmlah_s16 helper_gvec_qrdmlah_s16_aarch64 +#define helper_neon_qrdmlsh_s16 helper_neon_qrdmlsh_s16_aarch64 +#define helper_gvec_qrdmlsh_s16 helper_gvec_qrdmlsh_s16_aarch64 +#define helper_neon_qrdmlah_s32 helper_neon_qrdmlah_s32_aarch64 +#define helper_gvec_qrdmlah_s32 helper_gvec_qrdmlah_s32_aarch64 +#define helper_neon_qrdmlsh_s32 helper_neon_qrdmlsh_s32_aarch64 +#define helper_gvec_qrdmlsh_s32 helper_gvec_qrdmlsh_s32_aarch64 +#define helper_gvec_sdot_b helper_gvec_sdot_b_aarch64 +#define helper_gvec_udot_b helper_gvec_udot_b_aarch64 +#define helper_gvec_sdot_h helper_gvec_sdot_h_aarch64 +#define helper_gvec_udot_h helper_gvec_udot_h_aarch64 +#define helper_gvec_sdot_idx_b helper_gvec_sdot_idx_b_aarch64 +#define helper_gvec_udot_idx_b helper_gvec_udot_idx_b_aarch64 +#define helper_gvec_sdot_idx_h helper_gvec_sdot_idx_h_aarch64 +#define helper_gvec_udot_idx_h helper_gvec_udot_idx_h_aarch64 +#define helper_gvec_fcaddh helper_gvec_fcaddh_aarch64 +#define helper_gvec_fcadds helper_gvec_fcadds_aarch64 +#define helper_gvec_fcaddd helper_gvec_fcaddd_aarch64 +#define helper_gvec_fcmlah helper_gvec_fcmlah_aarch64 +#define helper_gvec_fcmlah_idx helper_gvec_fcmlah_idx_aarch64 +#define helper_gvec_fcmlas helper_gvec_fcmlas_aarch64 +#define helper_gvec_fcmlas_idx helper_gvec_fcmlas_idx_aarch64 +#define helper_gvec_fcmlad helper_gvec_fcmlad_aarch64 +#define helper_gvec_frecpe_h helper_gvec_frecpe_h_aarch64 +#define helper_gvec_frecpe_s helper_gvec_frecpe_s_aarch64 +#define helper_gvec_frecpe_d helper_gvec_frecpe_d_aarch64 +#define helper_gvec_frsqrte_h helper_gvec_frsqrte_h_aarch64 +#define helper_gvec_frsqrte_s helper_gvec_frsqrte_s_aarch64 +#define helper_gvec_frsqrte_d helper_gvec_frsqrte_d_aarch64 +#define helper_gvec_fadd_h helper_gvec_fadd_h_aarch64 +#define helper_gvec_fadd_s helper_gvec_fadd_s_aarch64 +#define helper_gvec_fadd_d helper_gvec_fadd_d_aarch64 +#define helper_gvec_fsub_h helper_gvec_fsub_h_aarch64 +#define helper_gvec_fsub_s helper_gvec_fsub_s_aarch64 +#define helper_gvec_fsub_d helper_gvec_fsub_d_aarch64 +#define helper_gvec_fmul_h helper_gvec_fmul_h_aarch64 +#define helper_gvec_fmul_s helper_gvec_fmul_s_aarch64 +#define helper_gvec_fmul_d helper_gvec_fmul_d_aarch64 +#define helper_gvec_ftsmul_h helper_gvec_ftsmul_h_aarch64 +#define helper_gvec_ftsmul_s helper_gvec_ftsmul_s_aarch64 +#define helper_gvec_ftsmul_d helper_gvec_ftsmul_d_aarch64 +#define helper_gvec_recps_h helper_gvec_recps_h_aarch64 +#define helper_gvec_recps_s helper_gvec_recps_s_aarch64 +#define helper_gvec_recps_d helper_gvec_recps_d_aarch64 +#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64 +#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64 +#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64 +#define helper_gvec_fmul_idx_h helper_gvec_fmul_idx_h_aarch64 +#define helper_gvec_fmul_idx_s helper_gvec_fmul_idx_s_aarch64 +#define helper_gvec_fmul_idx_d helper_gvec_fmul_idx_d_aarch64 +#define helper_gvec_fmla_idx_h helper_gvec_fmla_idx_h_aarch64 +#define helper_gvec_fmla_idx_s helper_gvec_fmla_idx_s_aarch64 +#define helper_gvec_fmla_idx_d helper_gvec_fmla_idx_d_aarch64 +#define helper_gvec_uqadd_b helper_gvec_uqadd_b_aarch64 +#define helper_gvec_uqadd_h helper_gvec_uqadd_h_aarch64 +#define helper_gvec_uqadd_s helper_gvec_uqadd_s_aarch64 +#define helper_gvec_sqadd_b helper_gvec_sqadd_b_aarch64 +#define helper_gvec_sqadd_h helper_gvec_sqadd_h_aarch64 +#define helper_gvec_sqadd_s helper_gvec_sqadd_s_aarch64 +#define helper_gvec_uqsub_b helper_gvec_uqsub_b_aarch64 +#define helper_gvec_uqsub_h helper_gvec_uqsub_h_aarch64 +#define helper_gvec_uqsub_s helper_gvec_uqsub_s_aarch64 +#define helper_gvec_sqsub_b helper_gvec_sqsub_b_aarch64 +#define helper_gvec_sqsub_h helper_gvec_sqsub_h_aarch64 +#define helper_gvec_sqsub_s helper_gvec_sqsub_s_aarch64 +#define helper_gvec_uqadd_d helper_gvec_uqadd_d_aarch64 +#define helper_gvec_uqsub_d helper_gvec_uqsub_d_aarch64 +#define helper_gvec_sqadd_d helper_gvec_sqadd_d_aarch64 +#define helper_gvec_sqsub_d helper_gvec_sqsub_d_aarch64 +#define helper_gvec_fmlal_a32 helper_gvec_fmlal_a32_aarch64 +#define helper_gvec_fmlal_a64 helper_gvec_fmlal_a64_aarch64 +#define helper_gvec_fmlal_idx_a32 helper_gvec_fmlal_idx_a32_aarch64 +#define helper_gvec_fmlal_idx_a64 helper_gvec_fmlal_idx_a64_aarch64 +#define helper_gvec_sshl_b helper_gvec_sshl_b_aarch64 +#define helper_gvec_sshl_h helper_gvec_sshl_h_aarch64 +#define helper_gvec_ushl_b helper_gvec_ushl_b_aarch64 +#define helper_gvec_ushl_h helper_gvec_ushl_h_aarch64 +#define helper_gvec_pmul_b helper_gvec_pmul_b_aarch64 +#define helper_gvec_pmull_q helper_gvec_pmull_q_aarch64 +#define helper_neon_pmull_h helper_neon_pmull_h_aarch64 +#define helper_sve2_pmull_h helper_sve2_pmull_h_aarch64 +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_aarch64 +#define vfp_get_fpscr vfp_get_fpscr_aarch64 +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_aarch64 +#define vfp_set_fpscr vfp_set_fpscr_aarch64 +#define helper_vfp_adds helper_vfp_adds_aarch64 +#define helper_vfp_addd helper_vfp_addd_aarch64 +#define helper_vfp_subs helper_vfp_subs_aarch64 +#define helper_vfp_subd helper_vfp_subd_aarch64 +#define helper_vfp_muls helper_vfp_muls_aarch64 +#define helper_vfp_muld helper_vfp_muld_aarch64 +#define helper_vfp_divs helper_vfp_divs_aarch64 +#define helper_vfp_divd helper_vfp_divd_aarch64 +#define helper_vfp_mins helper_vfp_mins_aarch64 +#define helper_vfp_mind helper_vfp_mind_aarch64 +#define helper_vfp_maxs helper_vfp_maxs_aarch64 +#define helper_vfp_maxd helper_vfp_maxd_aarch64 +#define helper_vfp_minnums helper_vfp_minnums_aarch64 +#define helper_vfp_minnumd helper_vfp_minnumd_aarch64 +#define helper_vfp_maxnums helper_vfp_maxnums_aarch64 +#define helper_vfp_maxnumd helper_vfp_maxnumd_aarch64 +#define helper_vfp_negs helper_vfp_negs_aarch64 +#define helper_vfp_negd helper_vfp_negd_aarch64 +#define helper_vfp_abss helper_vfp_abss_aarch64 +#define helper_vfp_absd helper_vfp_absd_aarch64 +#define helper_vfp_sqrts helper_vfp_sqrts_aarch64 +#define helper_vfp_sqrtd helper_vfp_sqrtd_aarch64 +#define helper_vfp_cmps helper_vfp_cmps_aarch64 +#define helper_vfp_cmpes helper_vfp_cmpes_aarch64 +#define helper_vfp_cmpd helper_vfp_cmpd_aarch64 +#define helper_vfp_cmped helper_vfp_cmped_aarch64 +#define helper_vfp_sitoh helper_vfp_sitoh_aarch64 +#define helper_vfp_tosih helper_vfp_tosih_aarch64 +#define helper_vfp_tosizh helper_vfp_tosizh_aarch64 +#define helper_vfp_sitos helper_vfp_sitos_aarch64 +#define helper_vfp_tosis helper_vfp_tosis_aarch64 +#define helper_vfp_tosizs helper_vfp_tosizs_aarch64 +#define helper_vfp_sitod helper_vfp_sitod_aarch64 +#define helper_vfp_tosid helper_vfp_tosid_aarch64 +#define helper_vfp_tosizd helper_vfp_tosizd_aarch64 +#define helper_vfp_uitoh helper_vfp_uitoh_aarch64 +#define helper_vfp_touih helper_vfp_touih_aarch64 +#define helper_vfp_touizh helper_vfp_touizh_aarch64 +#define helper_vfp_uitos helper_vfp_uitos_aarch64 +#define helper_vfp_touis helper_vfp_touis_aarch64 +#define helper_vfp_touizs helper_vfp_touizs_aarch64 +#define helper_vfp_uitod helper_vfp_uitod_aarch64 +#define helper_vfp_touid helper_vfp_touid_aarch64 +#define helper_vfp_touizd helper_vfp_touizd_aarch64 +#define helper_vfp_fcvtds helper_vfp_fcvtds_aarch64 +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_aarch64 +#define helper_vfp_shtod helper_vfp_shtod_aarch64 +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_aarch64 +#define helper_vfp_toshd helper_vfp_toshd_aarch64 +#define helper_vfp_sltod helper_vfp_sltod_aarch64 +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_aarch64 +#define helper_vfp_tosld helper_vfp_tosld_aarch64 +#define helper_vfp_sqtod helper_vfp_sqtod_aarch64 +#define helper_vfp_tosqd helper_vfp_tosqd_aarch64 +#define helper_vfp_uhtod helper_vfp_uhtod_aarch64 +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_aarch64 +#define helper_vfp_touhd helper_vfp_touhd_aarch64 +#define helper_vfp_ultod helper_vfp_ultod_aarch64 +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_aarch64 +#define helper_vfp_tould helper_vfp_tould_aarch64 +#define helper_vfp_uqtod helper_vfp_uqtod_aarch64 +#define helper_vfp_touqd helper_vfp_touqd_aarch64 +#define helper_vfp_shtos helper_vfp_shtos_aarch64 +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_aarch64 +#define helper_vfp_toshs helper_vfp_toshs_aarch64 +#define helper_vfp_sltos helper_vfp_sltos_aarch64 +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_aarch64 +#define helper_vfp_tosls helper_vfp_tosls_aarch64 +#define helper_vfp_sqtos helper_vfp_sqtos_aarch64 +#define helper_vfp_tosqs helper_vfp_tosqs_aarch64 +#define helper_vfp_uhtos helper_vfp_uhtos_aarch64 +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_aarch64 +#define helper_vfp_touhs helper_vfp_touhs_aarch64 +#define helper_vfp_ultos helper_vfp_ultos_aarch64 +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_aarch64 +#define helper_vfp_touls helper_vfp_touls_aarch64 +#define helper_vfp_uqtos helper_vfp_uqtos_aarch64 +#define helper_vfp_touqs helper_vfp_touqs_aarch64 +#define helper_vfp_sltoh helper_vfp_sltoh_aarch64 +#define helper_vfp_ultoh helper_vfp_ultoh_aarch64 +#define helper_vfp_sqtoh helper_vfp_sqtoh_aarch64 +#define helper_vfp_uqtoh helper_vfp_uqtoh_aarch64 +#define helper_vfp_toshh helper_vfp_toshh_aarch64 +#define helper_vfp_touhh helper_vfp_touhh_aarch64 +#define helper_vfp_toslh helper_vfp_toslh_aarch64 +#define helper_vfp_toulh helper_vfp_toulh_aarch64 +#define helper_vfp_tosqh helper_vfp_tosqh_aarch64 +#define helper_vfp_touqh helper_vfp_touqh_aarch64 +#define helper_set_rmode helper_set_rmode_aarch64 +#define helper_set_neon_rmode helper_set_neon_rmode_aarch64 +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_aarch64 +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_aarch64 +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_aarch64 +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_aarch64 +#define helper_recps_f32 helper_recps_f32_aarch64 +#define helper_rsqrts_f32 helper_rsqrts_f32_aarch64 +#define helper_recpe_f16 helper_recpe_f16_aarch64 +#define helper_recpe_f32 helper_recpe_f32_aarch64 +#define helper_recpe_f64 helper_recpe_f64_aarch64 +#define helper_rsqrte_f16 helper_rsqrte_f16_aarch64 +#define helper_rsqrte_f32 helper_rsqrte_f32_aarch64 +#define helper_rsqrte_f64 helper_rsqrte_f64_aarch64 +#define helper_recpe_u32 helper_recpe_u32_aarch64 +#define helper_rsqrte_u32 helper_rsqrte_u32_aarch64 +#define helper_vfp_muladds helper_vfp_muladds_aarch64 +#define helper_vfp_muladdd helper_vfp_muladdd_aarch64 +#define helper_rints_exact helper_rints_exact_aarch64 +#define helper_rintd_exact helper_rintd_exact_aarch64 +#define helper_rints helper_rints_aarch64 +#define helper_rintd helper_rintd_aarch64 +#define arm_rmode_to_sf arm_rmode_to_sf_aarch64 +#define helper_fjcvtzs helper_fjcvtzs_aarch64 +#define helper_vjcvt helper_vjcvt_aarch64 +#define helper_frint32_s helper_frint32_s_aarch64 +#define helper_frint64_s helper_frint64_s_aarch64 +#define helper_frint32_d helper_frint32_d_aarch64 +#define helper_frint64_d helper_frint64_d_aarch64 +#define helper_check_hcr_el2_trap helper_check_hcr_el2_trap_aarch64 +#define arm64_reg_reset arm64_reg_reset_aarch64 +#define arm64_reg_read arm64_reg_read_aarch64 +#define arm64_reg_write arm64_reg_write_aarch64 +#define mla_op mla_op_aarch64 +#define mls_op mls_op_aarch64 +#define sshl_op sshl_op_aarch64 +#define ushl_op ushl_op_aarch64 +#define uqsub_op uqsub_op_aarch64 +#define sqsub_op sqsub_op_aarch64 +#define uqadd_op uqadd_op_aarch64 +#define sqadd_op sqadd_op_aarch64 +#define sli_op sli_op_aarch64 +#define cmtst_op cmtst_op_aarch64 +#define sri_op sri_op_aarch64 +#define usra_op usra_op_aarch64 +#define ssra_op ssra_op_aarch64 +#define aarch64_translator_ops aarch64_translator_ops_aarch64 +#define pred_esz_masks pred_esz_masks_aarch64 #endif diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index fbdb37a7..da74a577 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -1,1367 +1,1288 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_AARCH64EB_H -#define UNICORN_AUTOGEN_AARCH64EB_H -#define arm_release arm_release_aarch64eb -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_aarch64eb -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_aarch64eb -#define use_idiv_instructions_rt use_idiv_instructions_rt_aarch64eb -#define tcg_target_deposit_valid tcg_target_deposit_valid_aarch64eb -#define helper_power_down helper_power_down_aarch64eb -#define check_exit_request check_exit_request_aarch64eb -#define address_space_unregister address_space_unregister_aarch64eb -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_aarch64eb -#define phys_mem_clean phys_mem_clean_aarch64eb -#define tb_cleanup tb_cleanup_aarch64eb +#ifndef UNICORN_AUTOGEN_aarch64eb_H +#define UNICORN_AUTOGEN_aarch64eb_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _aarch64eb +#endif +#define arm_arch arm_arch_aarch64eb +#define tb_target_set_jmp_target tb_target_set_jmp_target_aarch64eb +#define have_bmi1 have_bmi1_aarch64eb +#define have_popcnt have_popcnt_aarch64eb +#define have_avx1 have_avx1_aarch64eb +#define have_avx2 have_avx2_aarch64eb +#define have_isa have_isa_aarch64eb +#define have_altivec have_altivec_aarch64eb +#define have_vsx have_vsx_aarch64eb +#define flush_icache_range flush_icache_range_aarch64eb +#define s390_facilities s390_facilities_aarch64eb +#define tcg_dump_op tcg_dump_op_aarch64eb +#define tcg_dump_ops tcg_dump_ops_aarch64eb +#define tcg_gen_and_i64 tcg_gen_and_i64_aarch64eb +#define tcg_gen_discard_i64 tcg_gen_discard_i64_aarch64eb +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_aarch64eb +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_aarch64eb +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_aarch64eb +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_aarch64eb +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_aarch64eb +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_aarch64eb +#define tcg_gen_ld_i64 tcg_gen_ld_i64_aarch64eb +#define tcg_gen_mov_i64 tcg_gen_mov_i64_aarch64eb +#define tcg_gen_movi_i64 tcg_gen_movi_i64_aarch64eb +#define tcg_gen_mul_i64 tcg_gen_mul_i64_aarch64eb +#define tcg_gen_or_i64 tcg_gen_or_i64_aarch64eb +#define tcg_gen_sar_i64 tcg_gen_sar_i64_aarch64eb +#define tcg_gen_shl_i64 tcg_gen_shl_i64_aarch64eb +#define tcg_gen_shr_i64 tcg_gen_shr_i64_aarch64eb +#define tcg_gen_st_i64 tcg_gen_st_i64_aarch64eb +#define tcg_gen_xor_i64 tcg_gen_xor_i64_aarch64eb +#define cpu_icount_to_ns cpu_icount_to_ns_aarch64eb +#define cpu_is_stopped cpu_is_stopped_aarch64eb +#define cpu_get_ticks cpu_get_ticks_aarch64eb +#define cpu_get_clock cpu_get_clock_aarch64eb +#define cpu_resume cpu_resume_aarch64eb +#define qemu_init_vcpu qemu_init_vcpu_aarch64eb +#define cpu_stop_current cpu_stop_current_aarch64eb +#define resume_all_vcpus resume_all_vcpus_aarch64eb +#define vm_start vm_start_aarch64eb +#define address_space_dispatch_compact address_space_dispatch_compact_aarch64eb +#define flatview_translate flatview_translate_aarch64eb +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_aarch64eb +#define qemu_get_cpu qemu_get_cpu_aarch64eb +#define cpu_address_space_init cpu_address_space_init_aarch64eb +#define cpu_get_address_space cpu_get_address_space_aarch64eb +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_aarch64eb +#define cpu_exec_initfn cpu_exec_initfn_aarch64eb +#define cpu_exec_realizefn cpu_exec_realizefn_aarch64eb +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_aarch64eb +#define cpu_watchpoint_insert cpu_watchpoint_insert_aarch64eb +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_aarch64eb +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_aarch64eb +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_aarch64eb +#define cpu_breakpoint_insert cpu_breakpoint_insert_aarch64eb +#define cpu_breakpoint_remove cpu_breakpoint_remove_aarch64eb +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64eb +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_aarch64eb +#define cpu_abort cpu_abort_aarch64eb +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_aarch64eb +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_aarch64eb +#define flatview_add_to_dispatch flatview_add_to_dispatch_aarch64eb +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_aarch64eb +#define qemu_ram_get_offset qemu_ram_get_offset_aarch64eb +#define qemu_ram_get_used_length qemu_ram_get_used_length_aarch64eb +#define qemu_ram_is_shared qemu_ram_is_shared_aarch64eb +#define qemu_ram_pagesize qemu_ram_pagesize_aarch64eb +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_aarch64eb +#define qemu_ram_alloc qemu_ram_alloc_aarch64eb +#define qemu_ram_free qemu_ram_free_aarch64eb +#define qemu_map_ram_ptr qemu_map_ram_ptr_aarch64eb +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_aarch64eb +#define qemu_ram_block_from_host qemu_ram_block_from_host_aarch64eb +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_aarch64eb +#define cpu_check_watchpoint cpu_check_watchpoint_aarch64eb +#define iotlb_to_section iotlb_to_section_aarch64eb +#define address_space_dispatch_new address_space_dispatch_new_aarch64eb +#define address_space_dispatch_free address_space_dispatch_free_aarch64eb +#define flatview_read_continue flatview_read_continue_aarch64eb +#define address_space_read_full address_space_read_full_aarch64eb +#define address_space_write address_space_write_aarch64eb +#define address_space_rw address_space_rw_aarch64eb +#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64eb +#define address_space_write_rom address_space_write_rom_aarch64eb +#define cpu_flush_icache_range cpu_flush_icache_range_aarch64eb +#define cpu_exec_init_all cpu_exec_init_all_aarch64eb +#define address_space_access_valid address_space_access_valid_aarch64eb +#define address_space_map address_space_map_aarch64eb +#define address_space_unmap address_space_unmap_aarch64eb +#define cpu_physical_memory_map cpu_physical_memory_map_aarch64eb +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64eb +#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64eb +#define qemu_target_page_size qemu_target_page_size_aarch64eb +#define qemu_target_page_bits qemu_target_page_bits_aarch64eb +#define qemu_target_page_bits_min qemu_target_page_bits_min_aarch64eb +#define target_words_bigendian target_words_bigendian_aarch64eb +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64eb +#define ram_block_discard_range ram_block_discard_range_aarch64eb +#define ramblock_is_pmem ramblock_is_pmem_aarch64eb +#define page_size_init page_size_init_aarch64eb +#define set_preferred_target_page_bits set_preferred_target_page_bits_aarch64eb +#define finalize_target_page_bits finalize_target_page_bits_aarch64eb +#define cpu_outb cpu_outb_aarch64eb +#define cpu_outw cpu_outw_aarch64eb +#define cpu_outl cpu_outl_aarch64eb +#define cpu_inb cpu_inb_aarch64eb +#define cpu_inw cpu_inw_aarch64eb +#define cpu_inl cpu_inl_aarch64eb #define memory_map memory_map_aarch64eb +#define memory_map_io memory_map_io_aarch64eb #define memory_map_ptr memory_map_ptr_aarch64eb #define memory_unmap memory_unmap_aarch64eb #define memory_free memory_free_aarch64eb -#define free_code_gen_buffer free_code_gen_buffer_aarch64eb -#define helper_raise_exception helper_raise_exception_aarch64eb -#define tcg_enabled tcg_enabled_aarch64eb -#define tcg_exec_init tcg_exec_init_aarch64eb -#define memory_register_types memory_register_types_aarch64eb -#define cpu_exec_init_all cpu_exec_init_all_aarch64eb -#define vm_start vm_start_aarch64eb -#define resume_all_vcpus resume_all_vcpus_aarch64eb -#define a15_l2ctlr_read a15_l2ctlr_read_aarch64eb -#define a64_translate_init a64_translate_init_aarch64eb -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_aarch64eb -#define aa64_cacheop_access aa64_cacheop_access_aarch64eb -#define aa64_daif_access aa64_daif_access_aarch64eb -#define aa64_daif_write aa64_daif_write_aarch64eb -#define aa64_dczid_read aa64_dczid_read_aarch64eb -#define aa64_fpcr_read aa64_fpcr_read_aarch64eb -#define aa64_fpcr_write aa64_fpcr_write_aarch64eb -#define aa64_fpsr_read aa64_fpsr_read_aarch64eb -#define aa64_fpsr_write aa64_fpsr_write_aarch64eb -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_aarch64eb -#define aa64_zva_access aa64_zva_access_aarch64eb -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_aarch64eb -#define aarch64_restore_sp aarch64_restore_sp_aarch64eb -#define aarch64_save_sp aarch64_save_sp_aarch64eb -#define accel_find accel_find_aarch64eb -#define accel_init_machine accel_init_machine_aarch64eb -#define accel_type accel_type_aarch64eb -#define access_with_adjusted_size access_with_adjusted_size_aarch64eb -#define add128 add128_aarch64eb -#define add16_sat add16_sat_aarch64eb -#define add16_usat add16_usat_aarch64eb -#define add192 add192_aarch64eb -#define add8_sat add8_sat_aarch64eb -#define add8_usat add8_usat_aarch64eb -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_aarch64eb -#define add_cpreg_to_list add_cpreg_to_list_aarch64eb -#define addFloat128Sigs addFloat128Sigs_aarch64eb -#define addFloat32Sigs addFloat32Sigs_aarch64eb -#define addFloat64Sigs addFloat64Sigs_aarch64eb -#define addFloatx80Sigs addFloatx80Sigs_aarch64eb -#define add_qemu_ldst_label add_qemu_ldst_label_aarch64eb -#define address_space_access_valid address_space_access_valid_aarch64eb -#define address_space_destroy address_space_destroy_aarch64eb -#define address_space_destroy_dispatch address_space_destroy_dispatch_aarch64eb -#define address_space_get_flatview address_space_get_flatview_aarch64eb -#define address_space_init address_space_init_aarch64eb -#define address_space_init_dispatch address_space_init_dispatch_aarch64eb -#define address_space_lookup_region address_space_lookup_region_aarch64eb -#define address_space_map address_space_map_aarch64eb -#define address_space_read address_space_read_aarch64eb -#define address_space_rw address_space_rw_aarch64eb -#define address_space_translate address_space_translate_aarch64eb -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_aarch64eb -#define address_space_translate_internal address_space_translate_internal_aarch64eb -#define address_space_unmap address_space_unmap_aarch64eb -#define address_space_update_topology address_space_update_topology_aarch64eb -#define address_space_update_topology_pass address_space_update_topology_pass_aarch64eb -#define address_space_write address_space_write_aarch64eb -#define addrrange_contains addrrange_contains_aarch64eb -#define addrrange_end addrrange_end_aarch64eb -#define addrrange_equal addrrange_equal_aarch64eb -#define addrrange_intersection addrrange_intersection_aarch64eb -#define addrrange_intersects addrrange_intersects_aarch64eb -#define addrrange_make addrrange_make_aarch64eb -#define adjust_endianness adjust_endianness_aarch64eb -#define all_helpers all_helpers_aarch64eb -#define alloc_code_gen_buffer alloc_code_gen_buffer_aarch64eb -#define alloc_entry alloc_entry_aarch64eb -#define always_true always_true_aarch64eb -#define arm1026_initfn arm1026_initfn_aarch64eb -#define arm1136_initfn arm1136_initfn_aarch64eb -#define arm1136_r2_initfn arm1136_r2_initfn_aarch64eb -#define arm1176_initfn arm1176_initfn_aarch64eb -#define arm11mpcore_initfn arm11mpcore_initfn_aarch64eb -#define arm926_initfn arm926_initfn_aarch64eb -#define arm946_initfn arm946_initfn_aarch64eb -#define arm_ccnt_enabled arm_ccnt_enabled_aarch64eb -#define arm_cp_read_zero arm_cp_read_zero_aarch64eb -#define arm_cp_reset_ignore arm_cp_reset_ignore_aarch64eb -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_aarch64eb -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_aarch64eb -#define arm_cpu_finalizefn arm_cpu_finalizefn_aarch64eb -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_aarch64eb -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_aarch64eb -#define arm_cpu_initfn arm_cpu_initfn_aarch64eb -#define arm_cpu_list arm_cpu_list_aarch64eb -#define cpu_loop_exit cpu_loop_exit_aarch64eb -#define arm_cpu_post_init arm_cpu_post_init_aarch64eb -#define arm_cpu_realizefn arm_cpu_realizefn_aarch64eb -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_aarch64eb -#define arm_cpu_register_types arm_cpu_register_types_aarch64eb -#define cpu_resume_from_signal cpu_resume_from_signal_aarch64eb -#define arm_cpus arm_cpus_aarch64eb -#define arm_cpu_set_pc arm_cpu_set_pc_aarch64eb -#define arm_cp_write_ignore arm_cp_write_ignore_aarch64eb -#define arm_current_el arm_current_el_aarch64eb -#define arm_dc_feature arm_dc_feature_aarch64eb -#define arm_debug_excp_handler arm_debug_excp_handler_aarch64eb -#define arm_debug_target_el arm_debug_target_el_aarch64eb -#define arm_el_is_aa64 arm_el_is_aa64_aarch64eb -#define arm_env_get_cpu arm_env_get_cpu_aarch64eb -#define arm_excp_target_el arm_excp_target_el_aarch64eb -#define arm_excp_unmasked arm_excp_unmasked_aarch64eb -#define arm_feature arm_feature_aarch64eb -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_aarch64eb -#define gen_intermediate_code gen_intermediate_code_aarch64eb -#define gen_intermediate_code_pc gen_intermediate_code_pc_aarch64eb -#define arm_gen_test_cc arm_gen_test_cc_aarch64eb -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_aarch64eb -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_aarch64eb -#define arm_handle_psci_call arm_handle_psci_call_aarch64eb -#define arm_is_psci_call arm_is_psci_call_aarch64eb -#define arm_is_secure arm_is_secure_aarch64eb -#define arm_is_secure_below_el3 arm_is_secure_below_el3_aarch64eb -#define arm_ldl_code arm_ldl_code_aarch64eb -#define arm_lduw_code arm_lduw_code_aarch64eb -#define arm_log_exception arm_log_exception_aarch64eb -#define arm_reg_read arm_reg_read_aarch64eb -#define arm_reg_reset arm_reg_reset_aarch64eb -#define arm_reg_write arm_reg_write_aarch64eb -#define restore_state_to_opc restore_state_to_opc_aarch64eb -#define arm_rmode_to_sf arm_rmode_to_sf_aarch64eb -#define arm_singlestep_active arm_singlestep_active_aarch64eb -#define tlb_fill tlb_fill_aarch64eb -#define tlb_flush tlb_flush_aarch64eb -#define tlb_flush_page tlb_flush_page_aarch64eb -#define tlb_set_page tlb_set_page_aarch64eb -#define arm_translate_init arm_translate_init_aarch64eb -#define arm_v7m_class_init arm_v7m_class_init_aarch64eb -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_aarch64eb -#define ats_access ats_access_aarch64eb -#define ats_write ats_write_aarch64eb -#define bad_mode_switch bad_mode_switch_aarch64eb -#define bank_number bank_number_aarch64eb -#define bitmap_zero_extend bitmap_zero_extend_aarch64eb -#define bp_wp_matches bp_wp_matches_aarch64eb -#define breakpoint_invalidate breakpoint_invalidate_aarch64eb -#define build_page_bitmap build_page_bitmap_aarch64eb -#define bus_add_child bus_add_child_aarch64eb -#define bus_class_init bus_class_init_aarch64eb -#define bus_info bus_info_aarch64eb -#define bus_unparent bus_unparent_aarch64eb -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_aarch64eb -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_aarch64eb -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_aarch64eb -#define call_recip_estimate call_recip_estimate_aarch64eb -#define can_merge can_merge_aarch64eb -#define capacity_increase capacity_increase_aarch64eb -#define ccsidr_read ccsidr_read_aarch64eb -#define check_ap check_ap_aarch64eb -#define check_breakpoints check_breakpoints_aarch64eb -#define check_watchpoints check_watchpoints_aarch64eb -#define cho cho_aarch64eb -#define clear_bit clear_bit_aarch64eb -#define clz32 clz32_aarch64eb -#define clz64 clz64_aarch64eb -#define cmp_flatrange_addr cmp_flatrange_addr_aarch64eb -#define code_gen_alloc code_gen_alloc_aarch64eb -#define commonNaNToFloat128 commonNaNToFloat128_aarch64eb -#define commonNaNToFloat16 commonNaNToFloat16_aarch64eb -#define commonNaNToFloat32 commonNaNToFloat32_aarch64eb -#define commonNaNToFloat64 commonNaNToFloat64_aarch64eb -#define commonNaNToFloatx80 commonNaNToFloatx80_aarch64eb -#define compute_abs_deadline compute_abs_deadline_aarch64eb -#define cond_name cond_name_aarch64eb -#define configure_accelerator configure_accelerator_aarch64eb -#define container_get container_get_aarch64eb -#define container_info container_info_aarch64eb -#define container_register_types container_register_types_aarch64eb -#define contextidr_write contextidr_write_aarch64eb -#define core_log_global_start core_log_global_start_aarch64eb -#define core_log_global_stop core_log_global_stop_aarch64eb -#define core_memory_listener core_memory_listener_aarch64eb -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_aarch64eb -#define cortex_a15_initfn cortex_a15_initfn_aarch64eb -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_aarch64eb -#define cortex_a8_initfn cortex_a8_initfn_aarch64eb -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_aarch64eb -#define cortex_a9_initfn cortex_a9_initfn_aarch64eb -#define cortex_m3_initfn cortex_m3_initfn_aarch64eb -#define count_cpreg count_cpreg_aarch64eb -#define countLeadingZeros32 countLeadingZeros32_aarch64eb -#define countLeadingZeros64 countLeadingZeros64_aarch64eb -#define cp_access_ok cp_access_ok_aarch64eb -#define cpacr_write cpacr_write_aarch64eb -#define cpreg_field_is_64bit cpreg_field_is_64bit_aarch64eb -#define cp_reginfo cp_reginfo_aarch64eb -#define cpreg_key_compare cpreg_key_compare_aarch64eb -#define cpreg_make_keylist cpreg_make_keylist_aarch64eb -#define cp_reg_reset cp_reg_reset_aarch64eb -#define cpreg_to_kvm_id cpreg_to_kvm_id_aarch64eb -#define cpsr_read cpsr_read_aarch64eb -#define cpsr_write cpsr_write_aarch64eb -#define cptype_valid cptype_valid_aarch64eb -#define cpu_abort cpu_abort_aarch64eb -#define cpu_arm_exec cpu_arm_exec_aarch64eb -#define cpu_arm_gen_code cpu_arm_gen_code_aarch64eb -#define cpu_arm_init cpu_arm_init_aarch64eb -#define cpu_breakpoint_insert cpu_breakpoint_insert_aarch64eb -#define cpu_breakpoint_remove cpu_breakpoint_remove_aarch64eb -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_aarch64eb -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64eb -#define cpu_can_do_io cpu_can_do_io_aarch64eb -#define cpu_can_run cpu_can_run_aarch64eb -#define cpu_class_init cpu_class_init_aarch64eb -#define cpu_common_class_by_name cpu_common_class_by_name_aarch64eb -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_aarch64eb -#define cpu_common_get_arch_id cpu_common_get_arch_id_aarch64eb -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_aarch64eb -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_aarch64eb -#define cpu_common_has_work cpu_common_has_work_aarch64eb -#define cpu_common_initfn cpu_common_initfn_aarch64eb -#define cpu_common_noop cpu_common_noop_aarch64eb -#define cpu_common_parse_features cpu_common_parse_features_aarch64eb -#define cpu_common_realizefn cpu_common_realizefn_aarch64eb -#define cpu_common_reset cpu_common_reset_aarch64eb -#define cpu_dump_statistics cpu_dump_statistics_aarch64eb -#define cpu_exec_init cpu_exec_init_aarch64eb -#define cpu_flush_icache_range cpu_flush_icache_range_aarch64eb -#define cpu_gen_init cpu_gen_init_aarch64eb -#define cpu_get_clock cpu_get_clock_aarch64eb -#define cpu_get_real_ticks cpu_get_real_ticks_aarch64eb -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_aarch64eb -#define cpu_handle_debug_exception cpu_handle_debug_exception_aarch64eb -#define cpu_handle_guest_debug cpu_handle_guest_debug_aarch64eb -#define cpu_inb cpu_inb_aarch64eb -#define cpu_inl cpu_inl_aarch64eb -#define cpu_interrupt cpu_interrupt_aarch64eb -#define cpu_interrupt_handler cpu_interrupt_handler_aarch64eb -#define cpu_inw cpu_inw_aarch64eb -#define cpu_io_recompile cpu_io_recompile_aarch64eb -#define cpu_is_stopped cpu_is_stopped_aarch64eb -#define cpu_ldl_code cpu_ldl_code_aarch64eb -#define cpu_ldub_code cpu_ldub_code_aarch64eb -#define cpu_lduw_code cpu_lduw_code_aarch64eb -#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64eb -#define cpu_mmu_index cpu_mmu_index_aarch64eb -#define cpu_outb cpu_outb_aarch64eb -#define cpu_outl cpu_outl_aarch64eb -#define cpu_outw cpu_outw_aarch64eb -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_aarch64eb -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_aarch64eb -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_aarch64eb -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_aarch64eb -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_aarch64eb -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64eb -#define cpu_physical_memory_map cpu_physical_memory_map_aarch64eb -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_aarch64eb -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_aarch64eb -#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64eb -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_aarch64eb -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_aarch64eb -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64eb -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_aarch64eb -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_aarch64eb -#define cpu_register cpu_register_aarch64eb -#define cpu_register_types cpu_register_types_aarch64eb -#define cpu_restore_state cpu_restore_state_aarch64eb -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_aarch64eb -#define cpu_single_step cpu_single_step_aarch64eb -#define cpu_tb_exec cpu_tb_exec_aarch64eb -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_aarch64eb -#define cpu_to_be64 cpu_to_be64_aarch64eb -#define cpu_to_le32 cpu_to_le32_aarch64eb -#define cpu_to_le64 cpu_to_le64_aarch64eb -#define cpu_type_info cpu_type_info_aarch64eb -#define cpu_unassigned_access cpu_unassigned_access_aarch64eb -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_aarch64eb -#define cpu_watchpoint_insert cpu_watchpoint_insert_aarch64eb -#define cpu_watchpoint_remove cpu_watchpoint_remove_aarch64eb -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_aarch64eb -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_aarch64eb -#define crc32c_table crc32c_table_aarch64eb -#define create_new_memory_mapping create_new_memory_mapping_aarch64eb -#define csselr_write csselr_write_aarch64eb -#define cto32 cto32_aarch64eb -#define ctr_el0_access ctr_el0_access_aarch64eb -#define ctz32 ctz32_aarch64eb -#define ctz64 ctz64_aarch64eb -#define dacr_write dacr_write_aarch64eb -#define dbgbcr_write dbgbcr_write_aarch64eb -#define dbgbvr_write dbgbvr_write_aarch64eb -#define dbgwcr_write dbgwcr_write_aarch64eb -#define dbgwvr_write dbgwvr_write_aarch64eb -#define debug_cp_reginfo debug_cp_reginfo_aarch64eb -#define debug_frame debug_frame_aarch64eb -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_aarch64eb -#define define_arm_cp_regs define_arm_cp_regs_aarch64eb -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_aarch64eb -#define define_debug_regs define_debug_regs_aarch64eb -#define define_one_arm_cp_reg define_one_arm_cp_reg_aarch64eb -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_aarch64eb -#define deposit32 deposit32_aarch64eb -#define deposit64 deposit64_aarch64eb -#define deregister_tm_clones deregister_tm_clones_aarch64eb -#define device_class_base_init device_class_base_init_aarch64eb -#define device_class_init device_class_init_aarch64eb -#define device_finalize device_finalize_aarch64eb -#define device_get_realized device_get_realized_aarch64eb -#define device_initfn device_initfn_aarch64eb -#define device_post_init device_post_init_aarch64eb -#define device_reset device_reset_aarch64eb -#define device_set_realized device_set_realized_aarch64eb -#define device_type_info device_type_info_aarch64eb -#define disas_arm_insn disas_arm_insn_aarch64eb -#define disas_coproc_insn disas_coproc_insn_aarch64eb -#define disas_dsp_insn disas_dsp_insn_aarch64eb -#define disas_iwmmxt_insn disas_iwmmxt_insn_aarch64eb -#define disas_neon_data_insn disas_neon_data_insn_aarch64eb -#define disas_neon_ls_insn disas_neon_ls_insn_aarch64eb -#define disas_thumb2_insn disas_thumb2_insn_aarch64eb -#define disas_thumb_insn disas_thumb_insn_aarch64eb -#define disas_vfp_insn disas_vfp_insn_aarch64eb -#define disas_vfp_v8_insn disas_vfp_v8_insn_aarch64eb -#define do_arm_semihosting do_arm_semihosting_aarch64eb -#define do_clz16 do_clz16_aarch64eb -#define do_clz8 do_clz8_aarch64eb -#define do_constant_folding do_constant_folding_aarch64eb -#define do_constant_folding_2 do_constant_folding_2_aarch64eb -#define do_constant_folding_cond do_constant_folding_cond_aarch64eb -#define do_constant_folding_cond2 do_constant_folding_cond2_aarch64eb -#define do_constant_folding_cond_32 do_constant_folding_cond_32_aarch64eb -#define do_constant_folding_cond_64 do_constant_folding_cond_64_aarch64eb -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_aarch64eb -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_aarch64eb -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_aarch64eb -#define do_ssat do_ssat_aarch64eb -#define do_usad do_usad_aarch64eb -#define do_usat do_usat_aarch64eb -#define do_v7m_exception_exit do_v7m_exception_exit_aarch64eb -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_aarch64eb -#define dummy_func dummy_func_aarch64eb -#define dummy_section dummy_section_aarch64eb -#define _DYNAMIC _DYNAMIC_aarch64eb -#define _edata _edata_aarch64eb -#define _end _end_aarch64eb -#define end_list end_list_aarch64eb -#define eq128 eq128_aarch64eb -#define ErrorClass_lookup ErrorClass_lookup_aarch64eb -#define error_copy error_copy_aarch64eb -#define error_exit error_exit_aarch64eb -#define error_get_class error_get_class_aarch64eb -#define error_get_pretty error_get_pretty_aarch64eb -#define error_setg_file_open error_setg_file_open_aarch64eb -#define estimateDiv128To64 estimateDiv128To64_aarch64eb -#define estimateSqrt32 estimateSqrt32_aarch64eb -#define excnames excnames_aarch64eb -#define excp_is_internal excp_is_internal_aarch64eb -#define extended_addresses_enabled extended_addresses_enabled_aarch64eb -#define extended_mpu_ap_bits extended_mpu_ap_bits_aarch64eb -#define extract32 extract32_aarch64eb -#define extract64 extract64_aarch64eb -#define extractFloat128Exp extractFloat128Exp_aarch64eb -#define extractFloat128Frac0 extractFloat128Frac0_aarch64eb -#define extractFloat128Frac1 extractFloat128Frac1_aarch64eb -#define extractFloat128Sign extractFloat128Sign_aarch64eb -#define extractFloat16Exp extractFloat16Exp_aarch64eb -#define extractFloat16Frac extractFloat16Frac_aarch64eb -#define extractFloat16Sign extractFloat16Sign_aarch64eb -#define extractFloat32Exp extractFloat32Exp_aarch64eb -#define extractFloat32Frac extractFloat32Frac_aarch64eb -#define extractFloat32Sign extractFloat32Sign_aarch64eb -#define extractFloat64Exp extractFloat64Exp_aarch64eb -#define extractFloat64Frac extractFloat64Frac_aarch64eb -#define extractFloat64Sign extractFloat64Sign_aarch64eb -#define extractFloatx80Exp extractFloatx80Exp_aarch64eb -#define extractFloatx80Frac extractFloatx80Frac_aarch64eb -#define extractFloatx80Sign extractFloatx80Sign_aarch64eb -#define fcse_write fcse_write_aarch64eb -#define find_better_copy find_better_copy_aarch64eb -#define find_default_machine find_default_machine_aarch64eb -#define find_desc_by_name find_desc_by_name_aarch64eb -#define find_first_bit find_first_bit_aarch64eb -#define find_paging_enabled_cpu find_paging_enabled_cpu_aarch64eb -#define find_ram_block find_ram_block_aarch64eb -#define find_ram_offset find_ram_offset_aarch64eb -#define find_string find_string_aarch64eb -#define find_type find_type_aarch64eb -#define _fini _fini_aarch64eb -#define flatrange_equal flatrange_equal_aarch64eb -#define flatview_destroy flatview_destroy_aarch64eb -#define flatview_init flatview_init_aarch64eb -#define flatview_insert flatview_insert_aarch64eb -#define flatview_lookup flatview_lookup_aarch64eb -#define flatview_ref flatview_ref_aarch64eb -#define flatview_simplify flatview_simplify_aarch64eb #define flatview_unref flatview_unref_aarch64eb -#define float128_add float128_add_aarch64eb -#define float128_compare float128_compare_aarch64eb -#define float128_compare_internal float128_compare_internal_aarch64eb -#define float128_compare_quiet float128_compare_quiet_aarch64eb -#define float128_default_nan float128_default_nan_aarch64eb -#define float128_div float128_div_aarch64eb -#define float128_eq float128_eq_aarch64eb -#define float128_eq_quiet float128_eq_quiet_aarch64eb -#define float128_is_quiet_nan float128_is_quiet_nan_aarch64eb -#define float128_is_signaling_nan float128_is_signaling_nan_aarch64eb -#define float128_le float128_le_aarch64eb -#define float128_le_quiet float128_le_quiet_aarch64eb -#define float128_lt float128_lt_aarch64eb -#define float128_lt_quiet float128_lt_quiet_aarch64eb -#define float128_maybe_silence_nan float128_maybe_silence_nan_aarch64eb -#define float128_mul float128_mul_aarch64eb -#define float128_rem float128_rem_aarch64eb -#define float128_round_to_int float128_round_to_int_aarch64eb -#define float128_scalbn float128_scalbn_aarch64eb -#define float128_sqrt float128_sqrt_aarch64eb -#define float128_sub float128_sub_aarch64eb -#define float128ToCommonNaN float128ToCommonNaN_aarch64eb -#define float128_to_float32 float128_to_float32_aarch64eb -#define float128_to_float64 float128_to_float64_aarch64eb -#define float128_to_floatx80 float128_to_floatx80_aarch64eb -#define float128_to_int32 float128_to_int32_aarch64eb -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_aarch64eb -#define float128_to_int64 float128_to_int64_aarch64eb -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_aarch64eb -#define float128_unordered float128_unordered_aarch64eb -#define float128_unordered_quiet float128_unordered_quiet_aarch64eb -#define float16_default_nan float16_default_nan_aarch64eb +#define address_space_get_flatview address_space_get_flatview_aarch64eb +#define memory_region_transaction_begin memory_region_transaction_begin_aarch64eb +#define memory_region_transaction_commit memory_region_transaction_commit_aarch64eb +#define memory_region_init memory_region_init_aarch64eb +#define memory_region_access_valid memory_region_access_valid_aarch64eb +#define memory_region_dispatch_read memory_region_dispatch_read_aarch64eb +#define memory_region_dispatch_write memory_region_dispatch_write_aarch64eb +#define memory_region_init_io memory_region_init_io_aarch64eb +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_aarch64eb +#define memory_region_size memory_region_size_aarch64eb +#define memory_region_set_readonly memory_region_set_readonly_aarch64eb +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_aarch64eb +#define memory_region_from_host memory_region_from_host_aarch64eb +#define memory_region_get_ram_addr memory_region_get_ram_addr_aarch64eb +#define memory_region_add_subregion memory_region_add_subregion_aarch64eb +#define memory_region_del_subregion memory_region_del_subregion_aarch64eb +#define memory_region_find memory_region_find_aarch64eb +#define memory_listener_register memory_listener_register_aarch64eb +#define memory_listener_unregister memory_listener_unregister_aarch64eb +#define address_space_remove_listeners address_space_remove_listeners_aarch64eb +#define address_space_init address_space_init_aarch64eb +#define address_space_destroy address_space_destroy_aarch64eb +#define memory_region_init_ram memory_region_init_ram_aarch64eb +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_aarch64eb +#define exec_inline_op exec_inline_op_aarch64eb +#define floatx80_default_nan floatx80_default_nan_aarch64eb +#define float_raise float_raise_aarch64eb #define float16_is_quiet_nan float16_is_quiet_nan_aarch64eb #define float16_is_signaling_nan float16_is_signaling_nan_aarch64eb -#define float16_maybe_silence_nan float16_maybe_silence_nan_aarch64eb -#define float16ToCommonNaN float16ToCommonNaN_aarch64eb -#define float16_to_float32 float16_to_float32_aarch64eb -#define float16_to_float64 float16_to_float64_aarch64eb -#define float32_abs float32_abs_aarch64eb -#define float32_add float32_add_aarch64eb -#define float32_chs float32_chs_aarch64eb -#define float32_compare float32_compare_aarch64eb -#define float32_compare_internal float32_compare_internal_aarch64eb -#define float32_compare_quiet float32_compare_quiet_aarch64eb -#define float32_default_nan float32_default_nan_aarch64eb -#define float32_div float32_div_aarch64eb -#define float32_eq float32_eq_aarch64eb -#define float32_eq_quiet float32_eq_quiet_aarch64eb -#define float32_exp2 float32_exp2_aarch64eb -#define float32_exp2_coefficients float32_exp2_coefficients_aarch64eb -#define float32_is_any_nan float32_is_any_nan_aarch64eb -#define float32_is_infinity float32_is_infinity_aarch64eb -#define float32_is_neg float32_is_neg_aarch64eb #define float32_is_quiet_nan float32_is_quiet_nan_aarch64eb #define float32_is_signaling_nan float32_is_signaling_nan_aarch64eb -#define float32_is_zero float32_is_zero_aarch64eb -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_aarch64eb -#define float32_le float32_le_aarch64eb -#define float32_le_quiet float32_le_quiet_aarch64eb -#define float32_log2 float32_log2_aarch64eb -#define float32_lt float32_lt_aarch64eb -#define float32_lt_quiet float32_lt_quiet_aarch64eb +#define float64_is_quiet_nan float64_is_quiet_nan_aarch64eb +#define float64_is_signaling_nan float64_is_signaling_nan_aarch64eb +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_aarch64eb +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_aarch64eb +#define floatx80_silence_nan floatx80_silence_nan_aarch64eb +#define propagateFloatx80NaN propagateFloatx80NaN_aarch64eb +#define float128_is_quiet_nan float128_is_quiet_nan_aarch64eb +#define float128_is_signaling_nan float128_is_signaling_nan_aarch64eb +#define float128_silence_nan float128_silence_nan_aarch64eb +#define float16_add float16_add_aarch64eb +#define float16_sub float16_sub_aarch64eb +#define float32_add float32_add_aarch64eb +#define float32_sub float32_sub_aarch64eb +#define float64_add float64_add_aarch64eb +#define float64_sub float64_sub_aarch64eb +#define float16_mul float16_mul_aarch64eb +#define float32_mul float32_mul_aarch64eb +#define float64_mul float64_mul_aarch64eb +#define float16_muladd float16_muladd_aarch64eb +#define float32_muladd float32_muladd_aarch64eb +#define float64_muladd float64_muladd_aarch64eb +#define float16_div float16_div_aarch64eb +#define float32_div float32_div_aarch64eb +#define float64_div float64_div_aarch64eb +#define float16_to_float32 float16_to_float32_aarch64eb +#define float16_to_float64 float16_to_float64_aarch64eb +#define float32_to_float16 float32_to_float16_aarch64eb +#define float32_to_float64 float32_to_float64_aarch64eb +#define float64_to_float16 float64_to_float16_aarch64eb +#define float64_to_float32 float64_to_float32_aarch64eb +#define float16_round_to_int float16_round_to_int_aarch64eb +#define float32_round_to_int float32_round_to_int_aarch64eb +#define float64_round_to_int float64_round_to_int_aarch64eb +#define float16_to_int16_scalbn float16_to_int16_scalbn_aarch64eb +#define float16_to_int32_scalbn float16_to_int32_scalbn_aarch64eb +#define float16_to_int64_scalbn float16_to_int64_scalbn_aarch64eb +#define float32_to_int16_scalbn float32_to_int16_scalbn_aarch64eb +#define float32_to_int32_scalbn float32_to_int32_scalbn_aarch64eb +#define float32_to_int64_scalbn float32_to_int64_scalbn_aarch64eb +#define float64_to_int16_scalbn float64_to_int16_scalbn_aarch64eb +#define float64_to_int32_scalbn float64_to_int32_scalbn_aarch64eb +#define float64_to_int64_scalbn float64_to_int64_scalbn_aarch64eb +#define float16_to_int16 float16_to_int16_aarch64eb +#define float16_to_int32 float16_to_int32_aarch64eb +#define float16_to_int64 float16_to_int64_aarch64eb +#define float32_to_int16 float32_to_int16_aarch64eb +#define float32_to_int32 float32_to_int32_aarch64eb +#define float32_to_int64 float32_to_int64_aarch64eb +#define float64_to_int16 float64_to_int16_aarch64eb +#define float64_to_int32 float64_to_int32_aarch64eb +#define float64_to_int64 float64_to_int64_aarch64eb +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_aarch64eb +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_aarch64eb +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_aarch64eb +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_aarch64eb +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_aarch64eb +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_aarch64eb +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_aarch64eb +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_aarch64eb +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_aarch64eb +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_aarch64eb +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_aarch64eb +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_aarch64eb +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_aarch64eb +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_aarch64eb +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_aarch64eb +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_aarch64eb +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_aarch64eb +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_aarch64eb +#define float16_to_uint16 float16_to_uint16_aarch64eb +#define float16_to_uint32 float16_to_uint32_aarch64eb +#define float16_to_uint64 float16_to_uint64_aarch64eb +#define float32_to_uint16 float32_to_uint16_aarch64eb +#define float32_to_uint32 float32_to_uint32_aarch64eb +#define float32_to_uint64 float32_to_uint64_aarch64eb +#define float64_to_uint16 float64_to_uint16_aarch64eb +#define float64_to_uint32 float64_to_uint32_aarch64eb +#define float64_to_uint64 float64_to_uint64_aarch64eb +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_aarch64eb +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_aarch64eb +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_aarch64eb +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_aarch64eb +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_aarch64eb +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_aarch64eb +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_aarch64eb +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_aarch64eb +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_aarch64eb +#define int64_to_float16_scalbn int64_to_float16_scalbn_aarch64eb +#define int32_to_float16_scalbn int32_to_float16_scalbn_aarch64eb +#define int16_to_float16_scalbn int16_to_float16_scalbn_aarch64eb +#define int64_to_float16 int64_to_float16_aarch64eb +#define int32_to_float16 int32_to_float16_aarch64eb +#define int16_to_float16 int16_to_float16_aarch64eb +#define int64_to_float32_scalbn int64_to_float32_scalbn_aarch64eb +#define int32_to_float32_scalbn int32_to_float32_scalbn_aarch64eb +#define int16_to_float32_scalbn int16_to_float32_scalbn_aarch64eb +#define int64_to_float32 int64_to_float32_aarch64eb +#define int32_to_float32 int32_to_float32_aarch64eb +#define int16_to_float32 int16_to_float32_aarch64eb +#define int64_to_float64_scalbn int64_to_float64_scalbn_aarch64eb +#define int32_to_float64_scalbn int32_to_float64_scalbn_aarch64eb +#define int16_to_float64_scalbn int16_to_float64_scalbn_aarch64eb +#define int64_to_float64 int64_to_float64_aarch64eb +#define int32_to_float64 int32_to_float64_aarch64eb +#define int16_to_float64 int16_to_float64_aarch64eb +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_aarch64eb +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_aarch64eb +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_aarch64eb +#define uint64_to_float16 uint64_to_float16_aarch64eb +#define uint32_to_float16 uint32_to_float16_aarch64eb +#define uint16_to_float16 uint16_to_float16_aarch64eb +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_aarch64eb +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_aarch64eb +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_aarch64eb +#define uint64_to_float32 uint64_to_float32_aarch64eb +#define uint32_to_float32 uint32_to_float32_aarch64eb +#define uint16_to_float32 uint16_to_float32_aarch64eb +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_aarch64eb +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_aarch64eb +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_aarch64eb +#define uint64_to_float64 uint64_to_float64_aarch64eb +#define uint32_to_float64 uint32_to_float64_aarch64eb +#define uint16_to_float64 uint16_to_float64_aarch64eb +#define float16_min float16_min_aarch64eb +#define float16_minnum float16_minnum_aarch64eb +#define float16_minnummag float16_minnummag_aarch64eb +#define float16_max float16_max_aarch64eb +#define float16_maxnum float16_maxnum_aarch64eb +#define float16_maxnummag float16_maxnummag_aarch64eb +#define float32_min float32_min_aarch64eb +#define float32_minnum float32_minnum_aarch64eb +#define float32_minnummag float32_minnummag_aarch64eb #define float32_max float32_max_aarch64eb #define float32_maxnum float32_maxnum_aarch64eb #define float32_maxnummag float32_maxnummag_aarch64eb -#define float32_maybe_silence_nan float32_maybe_silence_nan_aarch64eb -#define float32_min float32_min_aarch64eb -#define float32_minmax float32_minmax_aarch64eb -#define float32_minnum float32_minnum_aarch64eb -#define float32_minnummag float32_minnummag_aarch64eb -#define float32_mul float32_mul_aarch64eb -#define float32_muladd float32_muladd_aarch64eb -#define float32_rem float32_rem_aarch64eb -#define float32_round_to_int float32_round_to_int_aarch64eb -#define float32_scalbn float32_scalbn_aarch64eb -#define float32_set_sign float32_set_sign_aarch64eb -#define float32_sqrt float32_sqrt_aarch64eb -#define float32_squash_input_denormal float32_squash_input_denormal_aarch64eb -#define float32_sub float32_sub_aarch64eb -#define float32ToCommonNaN float32ToCommonNaN_aarch64eb -#define float32_to_float128 float32_to_float128_aarch64eb -#define float32_to_float16 float32_to_float16_aarch64eb -#define float32_to_float64 float32_to_float64_aarch64eb -#define float32_to_floatx80 float32_to_floatx80_aarch64eb -#define float32_to_int16 float32_to_int16_aarch64eb -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_aarch64eb -#define float32_to_int32 float32_to_int32_aarch64eb -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_aarch64eb -#define float32_to_int64 float32_to_int64_aarch64eb -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_aarch64eb -#define float32_to_uint16 float32_to_uint16_aarch64eb -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_aarch64eb -#define float32_to_uint32 float32_to_uint32_aarch64eb -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_aarch64eb -#define float32_to_uint64 float32_to_uint64_aarch64eb -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_aarch64eb -#define float32_unordered float32_unordered_aarch64eb -#define float32_unordered_quiet float32_unordered_quiet_aarch64eb -#define float64_abs float64_abs_aarch64eb -#define float64_add float64_add_aarch64eb -#define float64_chs float64_chs_aarch64eb -#define float64_compare float64_compare_aarch64eb -#define float64_compare_internal float64_compare_internal_aarch64eb -#define float64_compare_quiet float64_compare_quiet_aarch64eb -#define float64_default_nan float64_default_nan_aarch64eb -#define float64_div float64_div_aarch64eb -#define float64_eq float64_eq_aarch64eb -#define float64_eq_quiet float64_eq_quiet_aarch64eb -#define float64_is_any_nan float64_is_any_nan_aarch64eb -#define float64_is_infinity float64_is_infinity_aarch64eb -#define float64_is_neg float64_is_neg_aarch64eb -#define float64_is_quiet_nan float64_is_quiet_nan_aarch64eb -#define float64_is_signaling_nan float64_is_signaling_nan_aarch64eb -#define float64_is_zero float64_is_zero_aarch64eb -#define float64_le float64_le_aarch64eb -#define float64_le_quiet float64_le_quiet_aarch64eb -#define float64_log2 float64_log2_aarch64eb -#define float64_lt float64_lt_aarch64eb -#define float64_lt_quiet float64_lt_quiet_aarch64eb +#define float64_min float64_min_aarch64eb +#define float64_minnum float64_minnum_aarch64eb +#define float64_minnummag float64_minnummag_aarch64eb #define float64_max float64_max_aarch64eb #define float64_maxnum float64_maxnum_aarch64eb #define float64_maxnummag float64_maxnummag_aarch64eb -#define float64_maybe_silence_nan float64_maybe_silence_nan_aarch64eb -#define float64_min float64_min_aarch64eb -#define float64_minmax float64_minmax_aarch64eb -#define float64_minnum float64_minnum_aarch64eb -#define float64_minnummag float64_minnummag_aarch64eb -#define float64_mul float64_mul_aarch64eb -#define float64_muladd float64_muladd_aarch64eb -#define float64_rem float64_rem_aarch64eb -#define float64_round_to_int float64_round_to_int_aarch64eb +#define float16_compare float16_compare_aarch64eb +#define float16_compare_quiet float16_compare_quiet_aarch64eb +#define float32_compare float32_compare_aarch64eb +#define float32_compare_quiet float32_compare_quiet_aarch64eb +#define float64_compare float64_compare_aarch64eb +#define float64_compare_quiet float64_compare_quiet_aarch64eb +#define float16_scalbn float16_scalbn_aarch64eb +#define float32_scalbn float32_scalbn_aarch64eb #define float64_scalbn float64_scalbn_aarch64eb -#define float64_set_sign float64_set_sign_aarch64eb +#define float16_sqrt float16_sqrt_aarch64eb +#define float32_sqrt float32_sqrt_aarch64eb #define float64_sqrt float64_sqrt_aarch64eb +#define float16_default_nan float16_default_nan_aarch64eb +#define float32_default_nan float32_default_nan_aarch64eb +#define float64_default_nan float64_default_nan_aarch64eb +#define float128_default_nan float128_default_nan_aarch64eb +#define float16_silence_nan float16_silence_nan_aarch64eb +#define float32_silence_nan float32_silence_nan_aarch64eb +#define float64_silence_nan float64_silence_nan_aarch64eb +#define float16_squash_input_denormal float16_squash_input_denormal_aarch64eb +#define float32_squash_input_denormal float32_squash_input_denormal_aarch64eb #define float64_squash_input_denormal float64_squash_input_denormal_aarch64eb -#define float64_sub float64_sub_aarch64eb -#define float64ToCommonNaN float64ToCommonNaN_aarch64eb -#define float64_to_float128 float64_to_float128_aarch64eb -#define float64_to_float16 float64_to_float16_aarch64eb -#define float64_to_float32 float64_to_float32_aarch64eb +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_aarch64eb +#define roundAndPackFloatx80 roundAndPackFloatx80_aarch64eb +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_aarch64eb +#define int32_to_floatx80 int32_to_floatx80_aarch64eb +#define int32_to_float128 int32_to_float128_aarch64eb +#define int64_to_floatx80 int64_to_floatx80_aarch64eb +#define int64_to_float128 int64_to_float128_aarch64eb +#define uint64_to_float128 uint64_to_float128_aarch64eb +#define float32_to_floatx80 float32_to_floatx80_aarch64eb +#define float32_to_float128 float32_to_float128_aarch64eb +#define float32_rem float32_rem_aarch64eb +#define float32_exp2 float32_exp2_aarch64eb +#define float32_log2 float32_log2_aarch64eb +#define float32_eq float32_eq_aarch64eb +#define float32_le float32_le_aarch64eb +#define float32_lt float32_lt_aarch64eb +#define float32_unordered float32_unordered_aarch64eb +#define float32_eq_quiet float32_eq_quiet_aarch64eb +#define float32_le_quiet float32_le_quiet_aarch64eb +#define float32_lt_quiet float32_lt_quiet_aarch64eb +#define float32_unordered_quiet float32_unordered_quiet_aarch64eb #define float64_to_floatx80 float64_to_floatx80_aarch64eb -#define float64_to_int16 float64_to_int16_aarch64eb -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_aarch64eb -#define float64_to_int32 float64_to_int32_aarch64eb -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_aarch64eb -#define float64_to_int64 float64_to_int64_aarch64eb -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_aarch64eb -#define float64_to_uint16 float64_to_uint16_aarch64eb -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_aarch64eb -#define float64_to_uint32 float64_to_uint32_aarch64eb -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_aarch64eb -#define float64_to_uint64 float64_to_uint64_aarch64eb -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_aarch64eb -#define float64_trunc_to_int float64_trunc_to_int_aarch64eb +#define float64_to_float128 float64_to_float128_aarch64eb +#define float64_rem float64_rem_aarch64eb +#define float64_log2 float64_log2_aarch64eb +#define float64_eq float64_eq_aarch64eb +#define float64_le float64_le_aarch64eb +#define float64_lt float64_lt_aarch64eb #define float64_unordered float64_unordered_aarch64eb +#define float64_eq_quiet float64_eq_quiet_aarch64eb +#define float64_le_quiet float64_le_quiet_aarch64eb +#define float64_lt_quiet float64_lt_quiet_aarch64eb #define float64_unordered_quiet float64_unordered_quiet_aarch64eb -#define float_raise float_raise_aarch64eb -#define floatx80_add floatx80_add_aarch64eb -#define floatx80_compare floatx80_compare_aarch64eb -#define floatx80_compare_internal floatx80_compare_internal_aarch64eb -#define floatx80_compare_quiet floatx80_compare_quiet_aarch64eb -#define floatx80_default_nan floatx80_default_nan_aarch64eb -#define floatx80_div floatx80_div_aarch64eb -#define floatx80_eq floatx80_eq_aarch64eb -#define floatx80_eq_quiet floatx80_eq_quiet_aarch64eb -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_aarch64eb -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_aarch64eb -#define floatx80_le floatx80_le_aarch64eb -#define floatx80_le_quiet floatx80_le_quiet_aarch64eb -#define floatx80_lt floatx80_lt_aarch64eb -#define floatx80_lt_quiet floatx80_lt_quiet_aarch64eb -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_aarch64eb -#define floatx80_mul floatx80_mul_aarch64eb -#define floatx80_rem floatx80_rem_aarch64eb -#define floatx80_round_to_int floatx80_round_to_int_aarch64eb -#define floatx80_scalbn floatx80_scalbn_aarch64eb -#define floatx80_sqrt floatx80_sqrt_aarch64eb -#define floatx80_sub floatx80_sub_aarch64eb -#define floatx80ToCommonNaN floatx80ToCommonNaN_aarch64eb -#define floatx80_to_float128 floatx80_to_float128_aarch64eb -#define floatx80_to_float32 floatx80_to_float32_aarch64eb -#define floatx80_to_float64 floatx80_to_float64_aarch64eb #define floatx80_to_int32 floatx80_to_int32_aarch64eb #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_aarch64eb #define floatx80_to_int64 floatx80_to_int64_aarch64eb #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_aarch64eb +#define floatx80_to_float32 floatx80_to_float32_aarch64eb +#define floatx80_to_float64 floatx80_to_float64_aarch64eb +#define floatx80_to_float128 floatx80_to_float128_aarch64eb +#define floatx80_round floatx80_round_aarch64eb +#define floatx80_round_to_int floatx80_round_to_int_aarch64eb +#define floatx80_add floatx80_add_aarch64eb +#define floatx80_sub floatx80_sub_aarch64eb +#define floatx80_mul floatx80_mul_aarch64eb +#define floatx80_div floatx80_div_aarch64eb +#define floatx80_rem floatx80_rem_aarch64eb +#define floatx80_sqrt floatx80_sqrt_aarch64eb +#define floatx80_eq floatx80_eq_aarch64eb +#define floatx80_le floatx80_le_aarch64eb +#define floatx80_lt floatx80_lt_aarch64eb #define floatx80_unordered floatx80_unordered_aarch64eb +#define floatx80_eq_quiet floatx80_eq_quiet_aarch64eb +#define floatx80_le_quiet floatx80_le_quiet_aarch64eb +#define floatx80_lt_quiet floatx80_lt_quiet_aarch64eb #define floatx80_unordered_quiet floatx80_unordered_quiet_aarch64eb -#define flush_icache_range flush_icache_range_aarch64eb -#define format_string format_string_aarch64eb -#define fp_decode_rm fp_decode_rm_aarch64eb -#define frame_dummy frame_dummy_aarch64eb -#define free_range free_range_aarch64eb -#define fstat64 fstat64_aarch64eb -#define futex_wait futex_wait_aarch64eb -#define futex_wake futex_wake_aarch64eb -#define gen_aa32_ld16s gen_aa32_ld16s_aarch64eb -#define gen_aa32_ld16u gen_aa32_ld16u_aarch64eb -#define gen_aa32_ld32u gen_aa32_ld32u_aarch64eb -#define gen_aa32_ld64 gen_aa32_ld64_aarch64eb -#define gen_aa32_ld8s gen_aa32_ld8s_aarch64eb -#define gen_aa32_ld8u gen_aa32_ld8u_aarch64eb -#define gen_aa32_st16 gen_aa32_st16_aarch64eb -#define gen_aa32_st32 gen_aa32_st32_aarch64eb -#define gen_aa32_st64 gen_aa32_st64_aarch64eb -#define gen_aa32_st8 gen_aa32_st8_aarch64eb -#define gen_adc gen_adc_aarch64eb -#define gen_adc_CC gen_adc_CC_aarch64eb -#define gen_add16 gen_add16_aarch64eb -#define gen_add_carry gen_add_carry_aarch64eb -#define gen_add_CC gen_add_CC_aarch64eb -#define gen_add_datah_offset gen_add_datah_offset_aarch64eb -#define gen_add_data_offset gen_add_data_offset_aarch64eb -#define gen_addq gen_addq_aarch64eb -#define gen_addq_lo gen_addq_lo_aarch64eb -#define gen_addq_msw gen_addq_msw_aarch64eb -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_aarch64eb -#define gen_arm_shift_im gen_arm_shift_im_aarch64eb -#define gen_arm_shift_reg gen_arm_shift_reg_aarch64eb -#define gen_bx gen_bx_aarch64eb -#define gen_bx_im gen_bx_im_aarch64eb -#define gen_clrex gen_clrex_aarch64eb -#define generate_memory_topology generate_memory_topology_aarch64eb -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_aarch64eb -#define gen_exception gen_exception_aarch64eb -#define gen_exception_insn gen_exception_insn_aarch64eb -#define gen_exception_internal gen_exception_internal_aarch64eb -#define gen_exception_internal_insn gen_exception_internal_insn_aarch64eb -#define gen_exception_return gen_exception_return_aarch64eb -#define gen_goto_tb gen_goto_tb_aarch64eb -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_aarch64eb -#define gen_helper_add_saturate gen_helper_add_saturate_aarch64eb -#define gen_helper_add_setq gen_helper_add_setq_aarch64eb -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_aarch64eb -#define gen_helper_clz32 gen_helper_clz32_aarch64eb -#define gen_helper_clz64 gen_helper_clz64_aarch64eb -#define gen_helper_clz_arm gen_helper_clz_arm_aarch64eb +#define float128_to_int32 float128_to_int32_aarch64eb +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_aarch64eb +#define float128_to_int64 float128_to_int64_aarch64eb +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_aarch64eb +#define float128_to_uint64 float128_to_uint64_aarch64eb +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_aarch64eb +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_aarch64eb +#define float128_to_uint32 float128_to_uint32_aarch64eb +#define float128_to_float32 float128_to_float32_aarch64eb +#define float128_to_float64 float128_to_float64_aarch64eb +#define float128_to_floatx80 float128_to_floatx80_aarch64eb +#define float128_round_to_int float128_round_to_int_aarch64eb +#define float128_add float128_add_aarch64eb +#define float128_sub float128_sub_aarch64eb +#define float128_mul float128_mul_aarch64eb +#define float128_div float128_div_aarch64eb +#define float128_rem float128_rem_aarch64eb +#define float128_sqrt float128_sqrt_aarch64eb +#define float128_eq float128_eq_aarch64eb +#define float128_le float128_le_aarch64eb +#define float128_lt float128_lt_aarch64eb +#define float128_unordered float128_unordered_aarch64eb +#define float128_eq_quiet float128_eq_quiet_aarch64eb +#define float128_le_quiet float128_le_quiet_aarch64eb +#define float128_lt_quiet float128_lt_quiet_aarch64eb +#define float128_unordered_quiet float128_unordered_quiet_aarch64eb +#define floatx80_compare floatx80_compare_aarch64eb +#define floatx80_compare_quiet floatx80_compare_quiet_aarch64eb +#define float128_compare float128_compare_aarch64eb +#define float128_compare_quiet float128_compare_quiet_aarch64eb +#define floatx80_scalbn floatx80_scalbn_aarch64eb +#define float128_scalbn float128_scalbn_aarch64eb +#define softfloat_init softfloat_init_aarch64eb +#define tcg_optimize tcg_optimize_aarch64eb +#define gen_new_label gen_new_label_aarch64eb +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_aarch64eb +#define tcg_expand_vec_op tcg_expand_vec_op_aarch64eb +#define tcg_register_jit tcg_register_jit_aarch64eb +#define tcg_tb_insert tcg_tb_insert_aarch64eb +#define tcg_tb_remove tcg_tb_remove_aarch64eb +#define tcg_tb_lookup tcg_tb_lookup_aarch64eb +#define tcg_tb_foreach tcg_tb_foreach_aarch64eb +#define tcg_nb_tbs tcg_nb_tbs_aarch64eb +#define tcg_region_reset_all tcg_region_reset_all_aarch64eb +#define tcg_region_init tcg_region_init_aarch64eb +#define tcg_code_size tcg_code_size_aarch64eb +#define tcg_code_capacity tcg_code_capacity_aarch64eb +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_aarch64eb +#define tcg_malloc_internal tcg_malloc_internal_aarch64eb +#define tcg_pool_reset tcg_pool_reset_aarch64eb +#define tcg_context_init tcg_context_init_aarch64eb +#define tcg_tb_alloc tcg_tb_alloc_aarch64eb +#define tcg_prologue_init tcg_prologue_init_aarch64eb +#define tcg_func_start tcg_func_start_aarch64eb +#define tcg_set_frame tcg_set_frame_aarch64eb +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_aarch64eb +#define tcg_temp_new_internal tcg_temp_new_internal_aarch64eb +#define tcg_temp_new_vec tcg_temp_new_vec_aarch64eb +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_aarch64eb +#define tcg_temp_free_internal tcg_temp_free_internal_aarch64eb +#define tcg_const_i32 tcg_const_i32_aarch64eb +#define tcg_const_i64 tcg_const_i64_aarch64eb +#define tcg_const_local_i32 tcg_const_local_i32_aarch64eb +#define tcg_const_local_i64 tcg_const_local_i64_aarch64eb +#define tcg_op_supported tcg_op_supported_aarch64eb +#define tcg_gen_callN tcg_gen_callN_aarch64eb +#define tcg_op_remove tcg_op_remove_aarch64eb +#define tcg_emit_op tcg_emit_op_aarch64eb +#define tcg_op_insert_before tcg_op_insert_before_aarch64eb +#define tcg_op_insert_after tcg_op_insert_after_aarch64eb +#define tcg_cpu_exec_time tcg_cpu_exec_time_aarch64eb +#define tcg_gen_code tcg_gen_code_aarch64eb +#define tcg_gen_op1 tcg_gen_op1_aarch64eb +#define tcg_gen_op2 tcg_gen_op2_aarch64eb +#define tcg_gen_op3 tcg_gen_op3_aarch64eb +#define tcg_gen_op4 tcg_gen_op4_aarch64eb +#define tcg_gen_op5 tcg_gen_op5_aarch64eb +#define tcg_gen_op6 tcg_gen_op6_aarch64eb +#define tcg_gen_mb tcg_gen_mb_aarch64eb +#define tcg_gen_addi_i32 tcg_gen_addi_i32_aarch64eb +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_aarch64eb +#define tcg_gen_subi_i32 tcg_gen_subi_i32_aarch64eb +#define tcg_gen_andi_i32 tcg_gen_andi_i32_aarch64eb +#define tcg_gen_ori_i32 tcg_gen_ori_i32_aarch64eb +#define tcg_gen_xori_i32 tcg_gen_xori_i32_aarch64eb +#define tcg_gen_shli_i32 tcg_gen_shli_i32_aarch64eb +#define tcg_gen_shri_i32 tcg_gen_shri_i32_aarch64eb +#define tcg_gen_sari_i32 tcg_gen_sari_i32_aarch64eb +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_aarch64eb +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_aarch64eb +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_aarch64eb +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_aarch64eb +#define tcg_gen_muli_i32 tcg_gen_muli_i32_aarch64eb +#define tcg_gen_div_i32 tcg_gen_div_i32_aarch64eb +#define tcg_gen_rem_i32 tcg_gen_rem_i32_aarch64eb +#define tcg_gen_divu_i32 tcg_gen_divu_i32_aarch64eb +#define tcg_gen_remu_i32 tcg_gen_remu_i32_aarch64eb +#define tcg_gen_andc_i32 tcg_gen_andc_i32_aarch64eb +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_aarch64eb +#define tcg_gen_nand_i32 tcg_gen_nand_i32_aarch64eb +#define tcg_gen_nor_i32 tcg_gen_nor_i32_aarch64eb +#define tcg_gen_orc_i32 tcg_gen_orc_i32_aarch64eb +#define tcg_gen_clz_i32 tcg_gen_clz_i32_aarch64eb +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_aarch64eb +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_aarch64eb +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_aarch64eb +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_aarch64eb +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_aarch64eb +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_aarch64eb +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_aarch64eb +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_aarch64eb +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_aarch64eb +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_aarch64eb +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_aarch64eb +#define tcg_gen_extract_i32 tcg_gen_extract_i32_aarch64eb +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_aarch64eb +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_aarch64eb +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_aarch64eb +#define tcg_gen_add2_i32 tcg_gen_add2_i32_aarch64eb +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_aarch64eb +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_aarch64eb +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_aarch64eb +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_aarch64eb +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_aarch64eb +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_aarch64eb +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_aarch64eb +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_aarch64eb +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_aarch64eb +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_aarch64eb +#define tcg_gen_smin_i32 tcg_gen_smin_i32_aarch64eb +#define tcg_gen_umin_i32 tcg_gen_umin_i32_aarch64eb +#define tcg_gen_smax_i32 tcg_gen_smax_i32_aarch64eb +#define tcg_gen_umax_i32 tcg_gen_umax_i32_aarch64eb +#define tcg_gen_abs_i32 tcg_gen_abs_i32_aarch64eb +#define tcg_gen_addi_i64 tcg_gen_addi_i64_aarch64eb +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_aarch64eb +#define tcg_gen_subi_i64 tcg_gen_subi_i64_aarch64eb +#define tcg_gen_andi_i64 tcg_gen_andi_i64_aarch64eb +#define tcg_gen_ori_i64 tcg_gen_ori_i64_aarch64eb +#define tcg_gen_xori_i64 tcg_gen_xori_i64_aarch64eb +#define tcg_gen_shli_i64 tcg_gen_shli_i64_aarch64eb +#define tcg_gen_shri_i64 tcg_gen_shri_i64_aarch64eb +#define tcg_gen_sari_i64 tcg_gen_sari_i64_aarch64eb +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_aarch64eb +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_aarch64eb +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_aarch64eb +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_aarch64eb +#define tcg_gen_muli_i64 tcg_gen_muli_i64_aarch64eb +#define tcg_gen_div_i64 tcg_gen_div_i64_aarch64eb +#define tcg_gen_rem_i64 tcg_gen_rem_i64_aarch64eb +#define tcg_gen_divu_i64 tcg_gen_divu_i64_aarch64eb +#define tcg_gen_remu_i64 tcg_gen_remu_i64_aarch64eb +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_aarch64eb +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_aarch64eb +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_aarch64eb +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_aarch64eb +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_aarch64eb +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_aarch64eb +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_aarch64eb +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_aarch64eb +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_aarch64eb +#define tcg_gen_not_i64 tcg_gen_not_i64_aarch64eb +#define tcg_gen_andc_i64 tcg_gen_andc_i64_aarch64eb +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_aarch64eb +#define tcg_gen_nand_i64 tcg_gen_nand_i64_aarch64eb +#define tcg_gen_nor_i64 tcg_gen_nor_i64_aarch64eb +#define tcg_gen_orc_i64 tcg_gen_orc_i64_aarch64eb +#define tcg_gen_clz_i64 tcg_gen_clz_i64_aarch64eb +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_aarch64eb +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_aarch64eb +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_aarch64eb +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_aarch64eb +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_aarch64eb +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_aarch64eb +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_aarch64eb +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_aarch64eb +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_aarch64eb +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_aarch64eb +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_aarch64eb +#define tcg_gen_extract_i64 tcg_gen_extract_i64_aarch64eb +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_aarch64eb +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_aarch64eb +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_aarch64eb +#define tcg_gen_add2_i64 tcg_gen_add2_i64_aarch64eb +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_aarch64eb +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_aarch64eb +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_aarch64eb +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_aarch64eb +#define tcg_gen_smin_i64 tcg_gen_smin_i64_aarch64eb +#define tcg_gen_umin_i64 tcg_gen_umin_i64_aarch64eb +#define tcg_gen_smax_i64 tcg_gen_smax_i64_aarch64eb +#define tcg_gen_umax_i64 tcg_gen_umax_i64_aarch64eb +#define tcg_gen_abs_i64 tcg_gen_abs_i64_aarch64eb +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_aarch64eb +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_aarch64eb +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_aarch64eb +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_aarch64eb +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_aarch64eb +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_aarch64eb +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_aarch64eb +#define tcg_gen_exit_tb tcg_gen_exit_tb_aarch64eb +#define tcg_gen_goto_tb tcg_gen_goto_tb_aarch64eb +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_aarch64eb +#define check_exit_request check_exit_request_aarch64eb +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_aarch64eb +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_aarch64eb +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_aarch64eb +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_aarch64eb +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_aarch64eb +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_aarch64eb +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_aarch64eb +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_aarch64eb +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_aarch64eb +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_aarch64eb +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_aarch64eb +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_aarch64eb +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_aarch64eb +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_aarch64eb +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_aarch64eb +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_aarch64eb +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_aarch64eb +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_aarch64eb +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_aarch64eb +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_aarch64eb +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_aarch64eb +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_aarch64eb +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_aarch64eb +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_aarch64eb +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_aarch64eb +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_aarch64eb +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_aarch64eb +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_aarch64eb +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_aarch64eb +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_aarch64eb +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_aarch64eb +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_aarch64eb +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_aarch64eb +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_aarch64eb +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_aarch64eb +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_aarch64eb +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_aarch64eb +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_aarch64eb +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_aarch64eb +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_aarch64eb +#define simd_desc simd_desc_aarch64eb +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_aarch64eb +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_aarch64eb +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_aarch64eb +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_aarch64eb +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_aarch64eb +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_aarch64eb +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_aarch64eb +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_aarch64eb +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_aarch64eb +#define tcg_gen_gvec_2 tcg_gen_gvec_2_aarch64eb +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_aarch64eb +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_aarch64eb +#define tcg_gen_gvec_3 tcg_gen_gvec_3_aarch64eb +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_aarch64eb +#define tcg_gen_gvec_4 tcg_gen_gvec_4_aarch64eb +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_aarch64eb +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_aarch64eb +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_aarch64eb +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_aarch64eb +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_aarch64eb +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_aarch64eb +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_aarch64eb +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_aarch64eb +#define tcg_gen_gvec_not tcg_gen_gvec_not_aarch64eb +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_aarch64eb +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_aarch64eb +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_aarch64eb +#define tcg_gen_gvec_add tcg_gen_gvec_add_aarch64eb +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_aarch64eb +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_aarch64eb +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_aarch64eb +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_aarch64eb +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_aarch64eb +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_aarch64eb +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_aarch64eb +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_aarch64eb +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_aarch64eb +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_aarch64eb +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_aarch64eb +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_aarch64eb +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_aarch64eb +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_aarch64eb +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_aarch64eb +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_aarch64eb +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_aarch64eb +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_aarch64eb +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_aarch64eb +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_aarch64eb +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_aarch64eb +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_aarch64eb +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_aarch64eb +#define tcg_gen_gvec_and tcg_gen_gvec_and_aarch64eb +#define tcg_gen_gvec_or tcg_gen_gvec_or_aarch64eb +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_aarch64eb +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_aarch64eb +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_aarch64eb +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_aarch64eb +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_aarch64eb +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_aarch64eb +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_aarch64eb +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_aarch64eb +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_aarch64eb +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_aarch64eb +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_aarch64eb +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_aarch64eb +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_aarch64eb +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_aarch64eb +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_aarch64eb +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_aarch64eb +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_aarch64eb +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_aarch64eb +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_aarch64eb +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_aarch64eb +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_aarch64eb +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_aarch64eb +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_aarch64eb +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_aarch64eb +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_aarch64eb +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_aarch64eb +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_aarch64eb +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_aarch64eb +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_aarch64eb +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_aarch64eb +#define vec_gen_2 vec_gen_2_aarch64eb +#define vec_gen_3 vec_gen_3_aarch64eb +#define vec_gen_4 vec_gen_4_aarch64eb +#define tcg_gen_mov_vec tcg_gen_mov_vec_aarch64eb +#define tcg_const_zeros_vec tcg_const_zeros_vec_aarch64eb +#define tcg_const_ones_vec tcg_const_ones_vec_aarch64eb +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_aarch64eb +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_aarch64eb +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_aarch64eb +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_aarch64eb +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_aarch64eb +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_aarch64eb +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_aarch64eb +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_aarch64eb +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_aarch64eb +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_aarch64eb +#define tcg_gen_ld_vec tcg_gen_ld_vec_aarch64eb +#define tcg_gen_st_vec tcg_gen_st_vec_aarch64eb +#define tcg_gen_stl_vec tcg_gen_stl_vec_aarch64eb +#define tcg_gen_and_vec tcg_gen_and_vec_aarch64eb +#define tcg_gen_or_vec tcg_gen_or_vec_aarch64eb +#define tcg_gen_xor_vec tcg_gen_xor_vec_aarch64eb +#define tcg_gen_andc_vec tcg_gen_andc_vec_aarch64eb +#define tcg_gen_orc_vec tcg_gen_orc_vec_aarch64eb +#define tcg_gen_nand_vec tcg_gen_nand_vec_aarch64eb +#define tcg_gen_nor_vec tcg_gen_nor_vec_aarch64eb +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_aarch64eb +#define tcg_gen_not_vec tcg_gen_not_vec_aarch64eb +#define tcg_gen_neg_vec tcg_gen_neg_vec_aarch64eb +#define tcg_gen_abs_vec tcg_gen_abs_vec_aarch64eb +#define tcg_gen_shli_vec tcg_gen_shli_vec_aarch64eb +#define tcg_gen_shri_vec tcg_gen_shri_vec_aarch64eb +#define tcg_gen_sari_vec tcg_gen_sari_vec_aarch64eb +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_aarch64eb +#define tcg_gen_add_vec tcg_gen_add_vec_aarch64eb +#define tcg_gen_sub_vec tcg_gen_sub_vec_aarch64eb +#define tcg_gen_mul_vec tcg_gen_mul_vec_aarch64eb +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_aarch64eb +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_aarch64eb +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_aarch64eb +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_aarch64eb +#define tcg_gen_smin_vec tcg_gen_smin_vec_aarch64eb +#define tcg_gen_umin_vec tcg_gen_umin_vec_aarch64eb +#define tcg_gen_smax_vec tcg_gen_smax_vec_aarch64eb +#define tcg_gen_umax_vec tcg_gen_umax_vec_aarch64eb +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_aarch64eb +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_aarch64eb +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_aarch64eb +#define tcg_gen_shls_vec tcg_gen_shls_vec_aarch64eb +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_aarch64eb +#define tcg_gen_sars_vec tcg_gen_sars_vec_aarch64eb +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_aarch64eb +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_aarch64eb +#define tb_htable_lookup tb_htable_lookup_aarch64eb +#define tb_set_jmp_target tb_set_jmp_target_aarch64eb +#define cpu_exec cpu_exec_aarch64eb +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_aarch64eb +#define cpu_reloading_memory_map cpu_reloading_memory_map_aarch64eb +#define cpu_loop_exit cpu_loop_exit_aarch64eb +#define cpu_loop_exit_restore cpu_loop_exit_restore_aarch64eb +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_aarch64eb +#define tlb_init tlb_init_aarch64eb +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_aarch64eb +#define tlb_flush tlb_flush_aarch64eb +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_aarch64eb +#define tlb_flush_all_cpus tlb_flush_all_cpus_aarch64eb +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_aarch64eb +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_aarch64eb +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_aarch64eb +#define tlb_flush_page tlb_flush_page_aarch64eb +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_aarch64eb +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_aarch64eb +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_aarch64eb +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_aarch64eb +#define tlb_protect_code tlb_protect_code_aarch64eb +#define tlb_unprotect_code tlb_unprotect_code_aarch64eb +#define tlb_reset_dirty tlb_reset_dirty_aarch64eb +#define tlb_set_dirty tlb_set_dirty_aarch64eb +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_aarch64eb +#define tlb_set_page tlb_set_page_aarch64eb +#define get_page_addr_code_hostp get_page_addr_code_hostp_aarch64eb +#define get_page_addr_code get_page_addr_code_aarch64eb +#define probe_access probe_access_aarch64eb +#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64eb +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_aarch64eb +#define helper_le_lduw_mmu helper_le_lduw_mmu_aarch64eb +#define helper_be_lduw_mmu helper_be_lduw_mmu_aarch64eb +#define helper_le_ldul_mmu helper_le_ldul_mmu_aarch64eb +#define helper_be_ldul_mmu helper_be_ldul_mmu_aarch64eb +#define helper_le_ldq_mmu helper_le_ldq_mmu_aarch64eb +#define helper_be_ldq_mmu helper_be_ldq_mmu_aarch64eb +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_aarch64eb +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_aarch64eb +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_aarch64eb +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_aarch64eb +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_aarch64eb +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_aarch64eb +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_aarch64eb +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_aarch64eb +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_aarch64eb +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_aarch64eb +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_aarch64eb +#define cpu_ldub_data_ra cpu_ldub_data_ra_aarch64eb +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_aarch64eb +#define cpu_lduw_data_ra cpu_lduw_data_ra_aarch64eb +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_aarch64eb +#define cpu_ldl_data_ra cpu_ldl_data_ra_aarch64eb +#define cpu_ldq_data_ra cpu_ldq_data_ra_aarch64eb +#define cpu_ldub_data cpu_ldub_data_aarch64eb +#define cpu_ldsb_data cpu_ldsb_data_aarch64eb +#define cpu_lduw_data cpu_lduw_data_aarch64eb +#define cpu_ldsw_data cpu_ldsw_data_aarch64eb +#define cpu_ldl_data cpu_ldl_data_aarch64eb +#define cpu_ldq_data cpu_ldq_data_aarch64eb +#define helper_ret_stb_mmu helper_ret_stb_mmu_aarch64eb +#define helper_le_stw_mmu helper_le_stw_mmu_aarch64eb +#define helper_be_stw_mmu helper_be_stw_mmu_aarch64eb +#define helper_le_stl_mmu helper_le_stl_mmu_aarch64eb +#define helper_be_stl_mmu helper_be_stl_mmu_aarch64eb +#define helper_le_stq_mmu helper_le_stq_mmu_aarch64eb +#define helper_be_stq_mmu helper_be_stq_mmu_aarch64eb +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_aarch64eb +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_aarch64eb +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_aarch64eb +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_aarch64eb +#define cpu_stb_data_ra cpu_stb_data_ra_aarch64eb +#define cpu_stw_data_ra cpu_stw_data_ra_aarch64eb +#define cpu_stl_data_ra cpu_stl_data_ra_aarch64eb +#define cpu_stq_data_ra cpu_stq_data_ra_aarch64eb +#define cpu_stb_data cpu_stb_data_aarch64eb +#define cpu_stw_data cpu_stw_data_aarch64eb +#define cpu_stl_data cpu_stl_data_aarch64eb +#define cpu_stq_data cpu_stq_data_aarch64eb +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_aarch64eb +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_aarch64eb +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_aarch64eb +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_aarch64eb +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_aarch64eb +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_aarch64eb +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_aarch64eb +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_aarch64eb +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_aarch64eb +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_aarch64eb +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_aarch64eb +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_aarch64eb +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_aarch64eb +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_aarch64eb +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_aarch64eb +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_aarch64eb +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_aarch64eb +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_aarch64eb +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_aarch64eb +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_aarch64eb +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_aarch64eb +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_aarch64eb +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_aarch64eb +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_aarch64eb +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_aarch64eb +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_aarch64eb +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_aarch64eb +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_aarch64eb +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_aarch64eb +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_aarch64eb +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_aarch64eb +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_aarch64eb +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_aarch64eb +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_aarch64eb +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_aarch64eb +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_aarch64eb +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_aarch64eb +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_aarch64eb +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_aarch64eb +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_aarch64eb +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_aarch64eb +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_aarch64eb +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_aarch64eb +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_aarch64eb +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_aarch64eb +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_aarch64eb +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_aarch64eb +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_aarch64eb +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_aarch64eb +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_aarch64eb +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_aarch64eb +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_aarch64eb +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_aarch64eb +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_aarch64eb +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_aarch64eb +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_aarch64eb +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_aarch64eb +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_aarch64eb +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_aarch64eb +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_aarch64eb +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_aarch64eb +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_aarch64eb +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_aarch64eb +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_aarch64eb +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_aarch64eb +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_aarch64eb +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_aarch64eb +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_aarch64eb +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_aarch64eb +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_aarch64eb +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_aarch64eb +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_aarch64eb +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_aarch64eb +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_aarch64eb +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_aarch64eb +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_aarch64eb +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_aarch64eb +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_aarch64eb +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_aarch64eb +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_aarch64eb +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_aarch64eb +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_aarch64eb +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_aarch64eb +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_aarch64eb +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_aarch64eb +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_aarch64eb +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_aarch64eb +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_aarch64eb +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_aarch64eb +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_aarch64eb +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_aarch64eb +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_aarch64eb +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_aarch64eb +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_aarch64eb +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_aarch64eb +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_aarch64eb +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_aarch64eb +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_aarch64eb +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_aarch64eb +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_aarch64eb +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_aarch64eb +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_aarch64eb +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_aarch64eb +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_aarch64eb +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_aarch64eb +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_aarch64eb +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_aarch64eb +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_aarch64eb +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_aarch64eb +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_aarch64eb +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_aarch64eb +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_aarch64eb +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_aarch64eb +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_aarch64eb +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_aarch64eb +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_aarch64eb +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_aarch64eb +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_aarch64eb +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_aarch64eb +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_aarch64eb +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_aarch64eb +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_aarch64eb +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_aarch64eb +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_aarch64eb +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_aarch64eb +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_aarch64eb +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_aarch64eb +#define helper_atomic_xchgb helper_atomic_xchgb_aarch64eb +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_aarch64eb +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_aarch64eb +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_aarch64eb +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_aarch64eb +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_aarch64eb +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_aarch64eb +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_aarch64eb +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_aarch64eb +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_aarch64eb +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_aarch64eb +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_aarch64eb +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_aarch64eb +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_aarch64eb +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_aarch64eb +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_aarch64eb +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_aarch64eb +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_aarch64eb +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_aarch64eb +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_aarch64eb +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_aarch64eb +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_aarch64eb +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_aarch64eb +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_aarch64eb +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_aarch64eb +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_aarch64eb +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_aarch64eb +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_aarch64eb +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_aarch64eb +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_aarch64eb +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_aarch64eb +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_aarch64eb +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_aarch64eb +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_aarch64eb +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_aarch64eb +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_aarch64eb +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_aarch64eb +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_aarch64eb +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_aarch64eb +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_aarch64eb +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_aarch64eb +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_aarch64eb +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_aarch64eb +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_aarch64eb +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_aarch64eb +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_aarch64eb +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_aarch64eb +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_aarch64eb +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_aarch64eb +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_aarch64eb +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_aarch64eb +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_aarch64eb +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_aarch64eb +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_aarch64eb +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_aarch64eb +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_aarch64eb +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_aarch64eb +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_aarch64eb +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_aarch64eb +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_aarch64eb +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_aarch64eb +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_aarch64eb +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_aarch64eb +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_aarch64eb +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_aarch64eb +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_aarch64eb +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_aarch64eb +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_aarch64eb +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_aarch64eb +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_aarch64eb +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_aarch64eb +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_aarch64eb +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_aarch64eb +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_aarch64eb +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_aarch64eb +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_aarch64eb +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_aarch64eb +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_aarch64eb +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_aarch64eb +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_aarch64eb +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_aarch64eb +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_aarch64eb +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_aarch64eb +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_aarch64eb +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_aarch64eb +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_aarch64eb +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_aarch64eb +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_aarch64eb +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_aarch64eb +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_aarch64eb +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_aarch64eb +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_aarch64eb +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_aarch64eb +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_aarch64eb +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_aarch64eb +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_aarch64eb +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_aarch64eb +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_aarch64eb +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_aarch64eb +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_aarch64eb +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_aarch64eb +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_aarch64eb +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_aarch64eb +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_aarch64eb +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_aarch64eb +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_aarch64eb +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_aarch64eb +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_aarch64eb +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_aarch64eb +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_aarch64eb +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_aarch64eb +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_aarch64eb +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_aarch64eb +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_aarch64eb +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_aarch64eb +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_aarch64eb +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_aarch64eb +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_aarch64eb +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_aarch64eb +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_aarch64eb +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_aarch64eb +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_aarch64eb +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_aarch64eb +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_aarch64eb +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_aarch64eb +#define cpu_ldub_code cpu_ldub_code_aarch64eb +#define cpu_lduw_code cpu_lduw_code_aarch64eb +#define cpu_ldl_code cpu_ldl_code_aarch64eb +#define cpu_ldq_code cpu_ldq_code_aarch64eb +#define helper_div_i32 helper_div_i32_aarch64eb +#define helper_rem_i32 helper_rem_i32_aarch64eb +#define helper_divu_i32 helper_divu_i32_aarch64eb +#define helper_remu_i32 helper_remu_i32_aarch64eb +#define helper_shl_i64 helper_shl_i64_aarch64eb +#define helper_shr_i64 helper_shr_i64_aarch64eb +#define helper_sar_i64 helper_sar_i64_aarch64eb +#define helper_div_i64 helper_div_i64_aarch64eb +#define helper_rem_i64 helper_rem_i64_aarch64eb +#define helper_divu_i64 helper_divu_i64_aarch64eb +#define helper_remu_i64 helper_remu_i64_aarch64eb +#define helper_muluh_i64 helper_muluh_i64_aarch64eb +#define helper_mulsh_i64 helper_mulsh_i64_aarch64eb +#define helper_clz_i32 helper_clz_i32_aarch64eb +#define helper_ctz_i32 helper_ctz_i32_aarch64eb +#define helper_clz_i64 helper_clz_i64_aarch64eb +#define helper_ctz_i64 helper_ctz_i64_aarch64eb +#define helper_clrsb_i32 helper_clrsb_i32_aarch64eb +#define helper_clrsb_i64 helper_clrsb_i64_aarch64eb +#define helper_ctpop_i32 helper_ctpop_i32_aarch64eb +#define helper_ctpop_i64 helper_ctpop_i64_aarch64eb +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_aarch64eb +#define helper_exit_atomic helper_exit_atomic_aarch64eb +#define helper_gvec_add8 helper_gvec_add8_aarch64eb +#define helper_gvec_add16 helper_gvec_add16_aarch64eb +#define helper_gvec_add32 helper_gvec_add32_aarch64eb +#define helper_gvec_add64 helper_gvec_add64_aarch64eb +#define helper_gvec_adds8 helper_gvec_adds8_aarch64eb +#define helper_gvec_adds16 helper_gvec_adds16_aarch64eb +#define helper_gvec_adds32 helper_gvec_adds32_aarch64eb +#define helper_gvec_adds64 helper_gvec_adds64_aarch64eb +#define helper_gvec_sub8 helper_gvec_sub8_aarch64eb +#define helper_gvec_sub16 helper_gvec_sub16_aarch64eb +#define helper_gvec_sub32 helper_gvec_sub32_aarch64eb +#define helper_gvec_sub64 helper_gvec_sub64_aarch64eb +#define helper_gvec_subs8 helper_gvec_subs8_aarch64eb +#define helper_gvec_subs16 helper_gvec_subs16_aarch64eb +#define helper_gvec_subs32 helper_gvec_subs32_aarch64eb +#define helper_gvec_subs64 helper_gvec_subs64_aarch64eb +#define helper_gvec_mul8 helper_gvec_mul8_aarch64eb +#define helper_gvec_mul16 helper_gvec_mul16_aarch64eb +#define helper_gvec_mul32 helper_gvec_mul32_aarch64eb +#define helper_gvec_mul64 helper_gvec_mul64_aarch64eb +#define helper_gvec_muls8 helper_gvec_muls8_aarch64eb +#define helper_gvec_muls16 helper_gvec_muls16_aarch64eb +#define helper_gvec_muls32 helper_gvec_muls32_aarch64eb +#define helper_gvec_muls64 helper_gvec_muls64_aarch64eb +#define helper_gvec_neg8 helper_gvec_neg8_aarch64eb +#define helper_gvec_neg16 helper_gvec_neg16_aarch64eb +#define helper_gvec_neg32 helper_gvec_neg32_aarch64eb +#define helper_gvec_neg64 helper_gvec_neg64_aarch64eb +#define helper_gvec_abs8 helper_gvec_abs8_aarch64eb +#define helper_gvec_abs16 helper_gvec_abs16_aarch64eb +#define helper_gvec_abs32 helper_gvec_abs32_aarch64eb +#define helper_gvec_abs64 helper_gvec_abs64_aarch64eb +#define helper_gvec_mov helper_gvec_mov_aarch64eb +#define helper_gvec_dup64 helper_gvec_dup64_aarch64eb +#define helper_gvec_dup32 helper_gvec_dup32_aarch64eb +#define helper_gvec_dup16 helper_gvec_dup16_aarch64eb +#define helper_gvec_dup8 helper_gvec_dup8_aarch64eb +#define helper_gvec_not helper_gvec_not_aarch64eb +#define helper_gvec_and helper_gvec_and_aarch64eb +#define helper_gvec_or helper_gvec_or_aarch64eb +#define helper_gvec_xor helper_gvec_xor_aarch64eb +#define helper_gvec_andc helper_gvec_andc_aarch64eb +#define helper_gvec_orc helper_gvec_orc_aarch64eb +#define helper_gvec_nand helper_gvec_nand_aarch64eb +#define helper_gvec_nor helper_gvec_nor_aarch64eb +#define helper_gvec_eqv helper_gvec_eqv_aarch64eb +#define helper_gvec_ands helper_gvec_ands_aarch64eb +#define helper_gvec_xors helper_gvec_xors_aarch64eb +#define helper_gvec_ors helper_gvec_ors_aarch64eb +#define helper_gvec_shl8i helper_gvec_shl8i_aarch64eb +#define helper_gvec_shl16i helper_gvec_shl16i_aarch64eb +#define helper_gvec_shl32i helper_gvec_shl32i_aarch64eb +#define helper_gvec_shl64i helper_gvec_shl64i_aarch64eb +#define helper_gvec_shr8i helper_gvec_shr8i_aarch64eb +#define helper_gvec_shr16i helper_gvec_shr16i_aarch64eb +#define helper_gvec_shr32i helper_gvec_shr32i_aarch64eb +#define helper_gvec_shr64i helper_gvec_shr64i_aarch64eb +#define helper_gvec_sar8i helper_gvec_sar8i_aarch64eb +#define helper_gvec_sar16i helper_gvec_sar16i_aarch64eb +#define helper_gvec_sar32i helper_gvec_sar32i_aarch64eb +#define helper_gvec_sar64i helper_gvec_sar64i_aarch64eb +#define helper_gvec_shl8v helper_gvec_shl8v_aarch64eb +#define helper_gvec_shl16v helper_gvec_shl16v_aarch64eb +#define helper_gvec_shl32v helper_gvec_shl32v_aarch64eb +#define helper_gvec_shl64v helper_gvec_shl64v_aarch64eb +#define helper_gvec_shr8v helper_gvec_shr8v_aarch64eb +#define helper_gvec_shr16v helper_gvec_shr16v_aarch64eb +#define helper_gvec_shr32v helper_gvec_shr32v_aarch64eb +#define helper_gvec_shr64v helper_gvec_shr64v_aarch64eb +#define helper_gvec_sar8v helper_gvec_sar8v_aarch64eb +#define helper_gvec_sar16v helper_gvec_sar16v_aarch64eb +#define helper_gvec_sar32v helper_gvec_sar32v_aarch64eb +#define helper_gvec_sar64v helper_gvec_sar64v_aarch64eb +#define helper_gvec_eq8 helper_gvec_eq8_aarch64eb +#define helper_gvec_ne8 helper_gvec_ne8_aarch64eb +#define helper_gvec_lt8 helper_gvec_lt8_aarch64eb +#define helper_gvec_le8 helper_gvec_le8_aarch64eb +#define helper_gvec_ltu8 helper_gvec_ltu8_aarch64eb +#define helper_gvec_leu8 helper_gvec_leu8_aarch64eb +#define helper_gvec_eq16 helper_gvec_eq16_aarch64eb +#define helper_gvec_ne16 helper_gvec_ne16_aarch64eb +#define helper_gvec_lt16 helper_gvec_lt16_aarch64eb +#define helper_gvec_le16 helper_gvec_le16_aarch64eb +#define helper_gvec_ltu16 helper_gvec_ltu16_aarch64eb +#define helper_gvec_leu16 helper_gvec_leu16_aarch64eb +#define helper_gvec_eq32 helper_gvec_eq32_aarch64eb +#define helper_gvec_ne32 helper_gvec_ne32_aarch64eb +#define helper_gvec_lt32 helper_gvec_lt32_aarch64eb +#define helper_gvec_le32 helper_gvec_le32_aarch64eb +#define helper_gvec_ltu32 helper_gvec_ltu32_aarch64eb +#define helper_gvec_leu32 helper_gvec_leu32_aarch64eb +#define helper_gvec_eq64 helper_gvec_eq64_aarch64eb +#define helper_gvec_ne64 helper_gvec_ne64_aarch64eb +#define helper_gvec_lt64 helper_gvec_lt64_aarch64eb +#define helper_gvec_le64 helper_gvec_le64_aarch64eb +#define helper_gvec_ltu64 helper_gvec_ltu64_aarch64eb +#define helper_gvec_leu64 helper_gvec_leu64_aarch64eb +#define helper_gvec_ssadd8 helper_gvec_ssadd8_aarch64eb +#define helper_gvec_ssadd16 helper_gvec_ssadd16_aarch64eb +#define helper_gvec_ssadd32 helper_gvec_ssadd32_aarch64eb +#define helper_gvec_ssadd64 helper_gvec_ssadd64_aarch64eb +#define helper_gvec_sssub8 helper_gvec_sssub8_aarch64eb +#define helper_gvec_sssub16 helper_gvec_sssub16_aarch64eb +#define helper_gvec_sssub32 helper_gvec_sssub32_aarch64eb +#define helper_gvec_sssub64 helper_gvec_sssub64_aarch64eb +#define helper_gvec_usadd8 helper_gvec_usadd8_aarch64eb +#define helper_gvec_usadd16 helper_gvec_usadd16_aarch64eb +#define helper_gvec_usadd32 helper_gvec_usadd32_aarch64eb +#define helper_gvec_usadd64 helper_gvec_usadd64_aarch64eb +#define helper_gvec_ussub8 helper_gvec_ussub8_aarch64eb +#define helper_gvec_ussub16 helper_gvec_ussub16_aarch64eb +#define helper_gvec_ussub32 helper_gvec_ussub32_aarch64eb +#define helper_gvec_ussub64 helper_gvec_ussub64_aarch64eb +#define helper_gvec_smin8 helper_gvec_smin8_aarch64eb +#define helper_gvec_smin16 helper_gvec_smin16_aarch64eb +#define helper_gvec_smin32 helper_gvec_smin32_aarch64eb +#define helper_gvec_smin64 helper_gvec_smin64_aarch64eb +#define helper_gvec_smax8 helper_gvec_smax8_aarch64eb +#define helper_gvec_smax16 helper_gvec_smax16_aarch64eb +#define helper_gvec_smax32 helper_gvec_smax32_aarch64eb +#define helper_gvec_smax64 helper_gvec_smax64_aarch64eb +#define helper_gvec_umin8 helper_gvec_umin8_aarch64eb +#define helper_gvec_umin16 helper_gvec_umin16_aarch64eb +#define helper_gvec_umin32 helper_gvec_umin32_aarch64eb +#define helper_gvec_umin64 helper_gvec_umin64_aarch64eb +#define helper_gvec_umax8 helper_gvec_umax8_aarch64eb +#define helper_gvec_umax16 helper_gvec_umax16_aarch64eb +#define helper_gvec_umax32 helper_gvec_umax32_aarch64eb +#define helper_gvec_umax64 helper_gvec_umax64_aarch64eb +#define helper_gvec_bitsel helper_gvec_bitsel_aarch64eb +#define cpu_restore_state cpu_restore_state_aarch64eb +#define page_collection_lock page_collection_lock_aarch64eb +#define page_collection_unlock page_collection_unlock_aarch64eb +#define free_code_gen_buffer free_code_gen_buffer_aarch64eb +#define tcg_exec_init tcg_exec_init_aarch64eb +#define tb_cleanup tb_cleanup_aarch64eb +#define tb_flush tb_flush_aarch64eb +#define tb_phys_invalidate tb_phys_invalidate_aarch64eb +#define tb_gen_code tb_gen_code_aarch64eb +#define tb_exec_lock tb_exec_lock_aarch64eb +#define tb_exec_unlock tb_exec_unlock_aarch64eb +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_aarch64eb +#define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64eb +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_aarch64eb +#define tb_check_watchpoint tb_check_watchpoint_aarch64eb +#define cpu_io_recompile cpu_io_recompile_aarch64eb +#define tb_flush_jmp_cache tb_flush_jmp_cache_aarch64eb +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_aarch64eb +#define translator_loop_temp_check translator_loop_temp_check_aarch64eb +#define translator_loop translator_loop_aarch64eb +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_aarch64eb +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_aarch64eb +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_aarch64eb +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_aarch64eb +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_aarch64eb +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_aarch64eb +#define unassigned_mem_ops unassigned_mem_ops_aarch64eb +#define floatx80_infinity floatx80_infinity_aarch64eb +#define dup_const_func dup_const_func_aarch64eb +#define gen_helper_raise_exception gen_helper_raise_exception_aarch64eb +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_aarch64eb +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_aarch64eb +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_aarch64eb #define gen_helper_cpsr_read gen_helper_cpsr_read_aarch64eb #define gen_helper_cpsr_write gen_helper_cpsr_write_aarch64eb -#define gen_helper_crc32_arm gen_helper_crc32_arm_aarch64eb -#define gen_helper_crc32c gen_helper_crc32c_aarch64eb -#define gen_helper_crypto_aese gen_helper_crypto_aese_aarch64eb -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_aarch64eb -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_aarch64eb -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_aarch64eb -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_aarch64eb -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_aarch64eb -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_aarch64eb -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_aarch64eb -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_aarch64eb -#define gen_helper_double_saturate gen_helper_double_saturate_aarch64eb -#define gen_helper_exception_internal gen_helper_exception_internal_aarch64eb -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_aarch64eb -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_aarch64eb -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_aarch64eb -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_aarch64eb -#define gen_helper_get_user_reg gen_helper_get_user_reg_aarch64eb -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_aarch64eb -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_aarch64eb -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_aarch64eb -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_aarch64eb -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_aarch64eb -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_aarch64eb -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_aarch64eb -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_aarch64eb -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_aarch64eb -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_aarch64eb -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_aarch64eb -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_aarch64eb -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_aarch64eb -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_aarch64eb -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_aarch64eb -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_aarch64eb -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_aarch64eb -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_aarch64eb -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_aarch64eb -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_aarch64eb -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_aarch64eb -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_aarch64eb -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_aarch64eb -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_aarch64eb -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_aarch64eb -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_aarch64eb -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_aarch64eb -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_aarch64eb -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_aarch64eb -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_aarch64eb -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_aarch64eb -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_aarch64eb -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_aarch64eb -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_aarch64eb -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_aarch64eb -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_aarch64eb -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_aarch64eb -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_aarch64eb -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_aarch64eb -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_aarch64eb -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_aarch64eb -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_aarch64eb -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_aarch64eb -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_aarch64eb -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_aarch64eb -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_aarch64eb -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_aarch64eb -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_aarch64eb -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_aarch64eb -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_aarch64eb -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_aarch64eb -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_aarch64eb -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_aarch64eb -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_aarch64eb -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_aarch64eb -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_aarch64eb -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_aarch64eb -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_aarch64eb -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_aarch64eb -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_aarch64eb -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_aarch64eb -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_aarch64eb -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_aarch64eb -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_aarch64eb -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_aarch64eb -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_aarch64eb -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_aarch64eb -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_aarch64eb -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_aarch64eb -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_aarch64eb -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_aarch64eb -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_aarch64eb -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_aarch64eb -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_aarch64eb -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_aarch64eb -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_aarch64eb -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_aarch64eb -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_aarch64eb -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_aarch64eb -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_aarch64eb -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_aarch64eb -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_aarch64eb -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_aarch64eb -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_aarch64eb -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_aarch64eb -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_aarch64eb -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_aarch64eb -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_aarch64eb -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_aarch64eb -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_aarch64eb -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_aarch64eb -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_aarch64eb -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_aarch64eb -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_aarch64eb -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_aarch64eb -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_aarch64eb -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_aarch64eb -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_aarch64eb -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_aarch64eb -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_aarch64eb -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_aarch64eb -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_aarch64eb -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_aarch64eb -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_aarch64eb -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_aarch64eb -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_aarch64eb -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_aarch64eb -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_aarch64eb -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_aarch64eb -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_aarch64eb -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_aarch64eb -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_aarch64eb -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_aarch64eb -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_aarch64eb -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_aarch64eb -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_aarch64eb -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_aarch64eb -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_aarch64eb -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_aarch64eb -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_aarch64eb -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_aarch64eb -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_aarch64eb -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_aarch64eb -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_aarch64eb -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_aarch64eb -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_aarch64eb -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_aarch64eb -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_aarch64eb -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_aarch64eb -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_aarch64eb -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_aarch64eb -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_aarch64eb -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_aarch64eb -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_aarch64eb -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_aarch64eb -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_aarch64eb -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_aarch64eb -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_aarch64eb -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_aarch64eb -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_aarch64eb -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_aarch64eb -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_aarch64eb -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_aarch64eb -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_aarch64eb -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_aarch64eb -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_aarch64eb -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_aarch64eb -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_aarch64eb -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_aarch64eb -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_aarch64eb -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_aarch64eb -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_aarch64eb -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_aarch64eb -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_aarch64eb -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_aarch64eb -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_aarch64eb -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_aarch64eb -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_aarch64eb -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_aarch64eb -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_aarch64eb -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_aarch64eb -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_aarch64eb -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_aarch64eb -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_aarch64eb -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_aarch64eb -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_aarch64eb -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_aarch64eb -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_aarch64eb -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_aarch64eb -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_aarch64eb -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_aarch64eb -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_aarch64eb -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_aarch64eb -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_aarch64eb -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_aarch64eb -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_aarch64eb -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_aarch64eb -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_aarch64eb -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_aarch64eb -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_aarch64eb -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_aarch64eb -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_aarch64eb -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_aarch64eb -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_aarch64eb -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_aarch64eb -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_aarch64eb -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_aarch64eb -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_aarch64eb -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_aarch64eb -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_aarch64eb -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_aarch64eb -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_aarch64eb -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_aarch64eb -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_aarch64eb -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_aarch64eb -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_aarch64eb -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_aarch64eb -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_aarch64eb -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_aarch64eb -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_aarch64eb -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_aarch64eb -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_aarch64eb -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_aarch64eb -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_aarch64eb -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_aarch64eb -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_aarch64eb -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_aarch64eb -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_aarch64eb -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_aarch64eb -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_aarch64eb -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_aarch64eb -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_aarch64eb -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_aarch64eb -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_aarch64eb -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_aarch64eb -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_aarch64eb -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_aarch64eb -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_aarch64eb -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_aarch64eb -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_aarch64eb -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_aarch64eb -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_aarch64eb -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_aarch64eb -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_aarch64eb -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_aarch64eb -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_aarch64eb -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_aarch64eb -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_aarch64eb -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_aarch64eb -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_aarch64eb -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_aarch64eb -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_aarch64eb -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_aarch64eb -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_aarch64eb -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_aarch64eb -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_aarch64eb -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_aarch64eb -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_aarch64eb -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_aarch64eb -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_aarch64eb -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_aarch64eb -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_aarch64eb -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_aarch64eb -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_aarch64eb -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_aarch64eb -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_aarch64eb -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_aarch64eb -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_aarch64eb -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_aarch64eb -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_aarch64eb -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_aarch64eb -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_aarch64eb -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_aarch64eb -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_aarch64eb -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_aarch64eb -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_aarch64eb -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_aarch64eb -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_aarch64eb -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_aarch64eb -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_aarch64eb -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_aarch64eb -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_aarch64eb -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_aarch64eb -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_aarch64eb -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_aarch64eb -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_aarch64eb -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_aarch64eb -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_aarch64eb -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_aarch64eb -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_aarch64eb -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_aarch64eb -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_aarch64eb -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_aarch64eb -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_aarch64eb -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_aarch64eb -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_aarch64eb -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_aarch64eb -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_aarch64eb -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_aarch64eb -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_aarch64eb -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_aarch64eb -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_aarch64eb -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_aarch64eb -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_aarch64eb -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_aarch64eb -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_aarch64eb -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_aarch64eb -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_aarch64eb -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_aarch64eb -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_aarch64eb -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_aarch64eb -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_aarch64eb -#define gen_helper_neon_tbl gen_helper_neon_tbl_aarch64eb -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_aarch64eb -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_aarch64eb -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_aarch64eb -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_aarch64eb -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_aarch64eb -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_aarch64eb -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_aarch64eb -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_aarch64eb -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_aarch64eb -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_aarch64eb -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_aarch64eb -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_aarch64eb -#define gen_helper_neon_zip16 gen_helper_neon_zip16_aarch64eb -#define gen_helper_neon_zip8 gen_helper_neon_zip8_aarch64eb -#define gen_helper_pre_hvc gen_helper_pre_hvc_aarch64eb -#define gen_helper_pre_smc gen_helper_pre_smc_aarch64eb -#define gen_helper_qadd16 gen_helper_qadd16_aarch64eb -#define gen_helper_qadd8 gen_helper_qadd8_aarch64eb -#define gen_helper_qaddsubx gen_helper_qaddsubx_aarch64eb -#define gen_helper_qsub16 gen_helper_qsub16_aarch64eb -#define gen_helper_qsub8 gen_helper_qsub8_aarch64eb -#define gen_helper_qsubaddx gen_helper_qsubaddx_aarch64eb -#define gen_helper_rbit gen_helper_rbit_aarch64eb -#define gen_helper_recpe_f32 gen_helper_recpe_f32_aarch64eb -#define gen_helper_recpe_u32 gen_helper_recpe_u32_aarch64eb -#define gen_helper_recps_f32 gen_helper_recps_f32_aarch64eb -#define gen_helper_rintd gen_helper_rintd_aarch64eb -#define gen_helper_rintd_exact gen_helper_rintd_exact_aarch64eb -#define gen_helper_rints gen_helper_rints_aarch64eb -#define gen_helper_rints_exact gen_helper_rints_exact_aarch64eb -#define gen_helper_ror_cc gen_helper_ror_cc_aarch64eb -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_aarch64eb -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_aarch64eb -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_aarch64eb -#define gen_helper_sadd16 gen_helper_sadd16_aarch64eb -#define gen_helper_sadd8 gen_helper_sadd8_aarch64eb -#define gen_helper_saddsubx gen_helper_saddsubx_aarch64eb -#define gen_helper_sar_cc gen_helper_sar_cc_aarch64eb -#define gen_helper_sdiv gen_helper_sdiv_aarch64eb -#define gen_helper_sel_flags gen_helper_sel_flags_aarch64eb -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_aarch64eb -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_aarch64eb -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_aarch64eb -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_aarch64eb -#define gen_helper_set_rmode gen_helper_set_rmode_aarch64eb -#define gen_helper_set_user_reg gen_helper_set_user_reg_aarch64eb -#define gen_helper_shadd16 gen_helper_shadd16_aarch64eb -#define gen_helper_shadd8 gen_helper_shadd8_aarch64eb -#define gen_helper_shaddsubx gen_helper_shaddsubx_aarch64eb -#define gen_helper_shl_cc gen_helper_shl_cc_aarch64eb -#define gen_helper_shr_cc gen_helper_shr_cc_aarch64eb -#define gen_helper_shsub16 gen_helper_shsub16_aarch64eb -#define gen_helper_shsub8 gen_helper_shsub8_aarch64eb -#define gen_helper_shsubaddx gen_helper_shsubaddx_aarch64eb -#define gen_helper_ssat gen_helper_ssat_aarch64eb -#define gen_helper_ssat16 gen_helper_ssat16_aarch64eb -#define gen_helper_ssub16 gen_helper_ssub16_aarch64eb -#define gen_helper_ssub8 gen_helper_ssub8_aarch64eb -#define gen_helper_ssubaddx gen_helper_ssubaddx_aarch64eb -#define gen_helper_sub_saturate gen_helper_sub_saturate_aarch64eb -#define gen_helper_sxtb16 gen_helper_sxtb16_aarch64eb -#define gen_helper_uadd16 gen_helper_uadd16_aarch64eb -#define gen_helper_uadd8 gen_helper_uadd8_aarch64eb -#define gen_helper_uaddsubx gen_helper_uaddsubx_aarch64eb -#define gen_helper_udiv gen_helper_udiv_aarch64eb -#define gen_helper_uhadd16 gen_helper_uhadd16_aarch64eb -#define gen_helper_uhadd8 gen_helper_uhadd8_aarch64eb -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_aarch64eb -#define gen_helper_uhsub16 gen_helper_uhsub16_aarch64eb -#define gen_helper_uhsub8 gen_helper_uhsub8_aarch64eb -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_aarch64eb -#define gen_helper_uqadd16 gen_helper_uqadd16_aarch64eb -#define gen_helper_uqadd8 gen_helper_uqadd8_aarch64eb -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_aarch64eb -#define gen_helper_uqsub16 gen_helper_uqsub16_aarch64eb -#define gen_helper_uqsub8 gen_helper_uqsub8_aarch64eb -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_aarch64eb -#define gen_helper_usad8 gen_helper_usad8_aarch64eb -#define gen_helper_usat gen_helper_usat_aarch64eb -#define gen_helper_usat16 gen_helper_usat16_aarch64eb -#define gen_helper_usub16 gen_helper_usub16_aarch64eb -#define gen_helper_usub8 gen_helper_usub8_aarch64eb -#define gen_helper_usubaddx gen_helper_usubaddx_aarch64eb -#define gen_helper_uxtb16 gen_helper_uxtb16_aarch64eb -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_aarch64eb -#define gen_helper_v7m_msr gen_helper_v7m_msr_aarch64eb -#define gen_helper_vfp_absd gen_helper_vfp_absd_aarch64eb -#define gen_helper_vfp_abss gen_helper_vfp_abss_aarch64eb -#define gen_helper_vfp_addd gen_helper_vfp_addd_aarch64eb -#define gen_helper_vfp_adds gen_helper_vfp_adds_aarch64eb -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_aarch64eb -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_aarch64eb -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_aarch64eb -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_aarch64eb -#define gen_helper_vfp_divd gen_helper_vfp_divd_aarch64eb -#define gen_helper_vfp_divs gen_helper_vfp_divs_aarch64eb -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_aarch64eb -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_aarch64eb -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_aarch64eb -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_aarch64eb -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_aarch64eb -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_aarch64eb -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_aarch64eb -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_aarch64eb -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_aarch64eb -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_aarch64eb -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_aarch64eb -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_aarch64eb -#define gen_helper_vfp_mins gen_helper_vfp_mins_aarch64eb -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_aarch64eb -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_aarch64eb -#define gen_helper_vfp_muld gen_helper_vfp_muld_aarch64eb -#define gen_helper_vfp_muls gen_helper_vfp_muls_aarch64eb -#define gen_helper_vfp_negd gen_helper_vfp_negd_aarch64eb -#define gen_helper_vfp_negs gen_helper_vfp_negs_aarch64eb -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_aarch64eb -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_aarch64eb -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_aarch64eb -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_aarch64eb -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_aarch64eb -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_aarch64eb -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_aarch64eb -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_aarch64eb -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_aarch64eb -#define gen_helper_vfp_subd gen_helper_vfp_subd_aarch64eb -#define gen_helper_vfp_subs gen_helper_vfp_subs_aarch64eb -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_aarch64eb -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_aarch64eb -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_aarch64eb -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_aarch64eb -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_aarch64eb -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_aarch64eb -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_aarch64eb -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_aarch64eb -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_aarch64eb -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_aarch64eb -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_aarch64eb -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_aarch64eb -#define gen_helper_vfp_touid gen_helper_vfp_touid_aarch64eb -#define gen_helper_vfp_touis gen_helper_vfp_touis_aarch64eb -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_aarch64eb -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_aarch64eb -#define gen_helper_vfp_tould gen_helper_vfp_tould_aarch64eb -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_aarch64eb -#define gen_helper_vfp_touls gen_helper_vfp_touls_aarch64eb -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_aarch64eb -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_aarch64eb -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_aarch64eb -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_aarch64eb -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_aarch64eb -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_aarch64eb -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_aarch64eb -#define gen_helper_wfe gen_helper_wfe_aarch64eb -#define gen_helper_wfi gen_helper_wfi_aarch64eb -#define gen_hvc gen_hvc_aarch64eb -#define gen_intermediate_code_internal gen_intermediate_code_internal_aarch64eb -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_aarch64eb -#define gen_iwmmxt_address gen_iwmmxt_address_aarch64eb -#define gen_iwmmxt_shift gen_iwmmxt_shift_aarch64eb -#define gen_jmp gen_jmp_aarch64eb -#define gen_load_and_replicate gen_load_and_replicate_aarch64eb -#define gen_load_exclusive gen_load_exclusive_aarch64eb -#define gen_logic_CC gen_logic_CC_aarch64eb -#define gen_logicq_cc gen_logicq_cc_aarch64eb -#define gen_lookup_tb gen_lookup_tb_aarch64eb -#define gen_mov_F0_vreg gen_mov_F0_vreg_aarch64eb -#define gen_mov_F1_vreg gen_mov_F1_vreg_aarch64eb -#define gen_mov_vreg_F0 gen_mov_vreg_F0_aarch64eb -#define gen_muls_i64_i32 gen_muls_i64_i32_aarch64eb -#define gen_mulu_i64_i32 gen_mulu_i64_i32_aarch64eb -#define gen_mulxy gen_mulxy_aarch64eb -#define gen_neon_add gen_neon_add_aarch64eb -#define gen_neon_addl gen_neon_addl_aarch64eb -#define gen_neon_addl_saturate gen_neon_addl_saturate_aarch64eb -#define gen_neon_bsl gen_neon_bsl_aarch64eb -#define gen_neon_dup_high16 gen_neon_dup_high16_aarch64eb -#define gen_neon_dup_low16 gen_neon_dup_low16_aarch64eb -#define gen_neon_dup_u8 gen_neon_dup_u8_aarch64eb -#define gen_neon_mull gen_neon_mull_aarch64eb -#define gen_neon_narrow gen_neon_narrow_aarch64eb -#define gen_neon_narrow_op gen_neon_narrow_op_aarch64eb -#define gen_neon_narrow_sats gen_neon_narrow_sats_aarch64eb -#define gen_neon_narrow_satu gen_neon_narrow_satu_aarch64eb -#define gen_neon_negl gen_neon_negl_aarch64eb -#define gen_neon_rsb gen_neon_rsb_aarch64eb -#define gen_neon_shift_narrow gen_neon_shift_narrow_aarch64eb -#define gen_neon_subl gen_neon_subl_aarch64eb -#define gen_neon_trn_u16 gen_neon_trn_u16_aarch64eb -#define gen_neon_trn_u8 gen_neon_trn_u8_aarch64eb -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_aarch64eb -#define gen_neon_unzip gen_neon_unzip_aarch64eb -#define gen_neon_widen gen_neon_widen_aarch64eb -#define gen_neon_zip gen_neon_zip_aarch64eb -#define gen_new_label gen_new_label_aarch64eb -#define gen_nop_hint gen_nop_hint_aarch64eb -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_aarch64eb -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_aarch64eb -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_aarch64eb -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_aarch64eb -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_aarch64eb -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_aarch64eb -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_aarch64eb -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_aarch64eb -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_aarch64eb -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_aarch64eb -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_aarch64eb -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_aarch64eb -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_aarch64eb -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_aarch64eb -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_aarch64eb -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_aarch64eb -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_aarch64eb -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_aarch64eb -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_aarch64eb -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_aarch64eb -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_aarch64eb -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_aarch64eb -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_aarch64eb -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_aarch64eb -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_aarch64eb -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_aarch64eb -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_aarch64eb -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_aarch64eb -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_aarch64eb -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_aarch64eb -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_aarch64eb -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_aarch64eb -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_aarch64eb -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_aarch64eb -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_aarch64eb -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_aarch64eb -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_aarch64eb -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_aarch64eb -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_aarch64eb -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_aarch64eb -#define gen_rev16 gen_rev16_aarch64eb -#define gen_revsh gen_revsh_aarch64eb -#define gen_rfe gen_rfe_aarch64eb -#define gen_sar gen_sar_aarch64eb -#define gen_sbc_CC gen_sbc_CC_aarch64eb -#define gen_sbfx gen_sbfx_aarch64eb -#define gen_set_CF_bit31 gen_set_CF_bit31_aarch64eb -#define gen_set_condexec gen_set_condexec_aarch64eb -#define gen_set_cpsr gen_set_cpsr_aarch64eb -#define gen_set_label gen_set_label_aarch64eb -#define gen_set_pc_im gen_set_pc_im_aarch64eb -#define gen_set_psr gen_set_psr_aarch64eb -#define gen_set_psr_im gen_set_psr_im_aarch64eb -#define gen_shl gen_shl_aarch64eb -#define gen_shr gen_shr_aarch64eb -#define gen_smc gen_smc_aarch64eb -#define gen_smul_dual gen_smul_dual_aarch64eb -#define gen_srs gen_srs_aarch64eb -#define gen_ss_advance gen_ss_advance_aarch64eb -#define gen_step_complete_exception gen_step_complete_exception_aarch64eb -#define gen_store_exclusive gen_store_exclusive_aarch64eb -#define gen_storeq_reg gen_storeq_reg_aarch64eb -#define gen_sub_carry gen_sub_carry_aarch64eb -#define gen_sub_CC gen_sub_CC_aarch64eb -#define gen_subq_msw gen_subq_msw_aarch64eb -#define gen_swap_half gen_swap_half_aarch64eb -#define gen_thumb2_data_op gen_thumb2_data_op_aarch64eb -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_aarch64eb -#define gen_ubfx gen_ubfx_aarch64eb -#define gen_vfp_abs gen_vfp_abs_aarch64eb -#define gen_vfp_add gen_vfp_add_aarch64eb -#define gen_vfp_cmp gen_vfp_cmp_aarch64eb -#define gen_vfp_cmpe gen_vfp_cmpe_aarch64eb -#define gen_vfp_div gen_vfp_div_aarch64eb -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_aarch64eb -#define gen_vfp_F1_mul gen_vfp_F1_mul_aarch64eb -#define gen_vfp_F1_neg gen_vfp_F1_neg_aarch64eb -#define gen_vfp_ld gen_vfp_ld_aarch64eb -#define gen_vfp_mrs gen_vfp_mrs_aarch64eb -#define gen_vfp_msr gen_vfp_msr_aarch64eb -#define gen_vfp_mul gen_vfp_mul_aarch64eb -#define gen_vfp_neg gen_vfp_neg_aarch64eb -#define gen_vfp_shto gen_vfp_shto_aarch64eb -#define gen_vfp_sito gen_vfp_sito_aarch64eb -#define gen_vfp_slto gen_vfp_slto_aarch64eb -#define gen_vfp_sqrt gen_vfp_sqrt_aarch64eb -#define gen_vfp_st gen_vfp_st_aarch64eb -#define gen_vfp_sub gen_vfp_sub_aarch64eb -#define gen_vfp_tosh gen_vfp_tosh_aarch64eb -#define gen_vfp_tosi gen_vfp_tosi_aarch64eb -#define gen_vfp_tosiz gen_vfp_tosiz_aarch64eb -#define gen_vfp_tosl gen_vfp_tosl_aarch64eb -#define gen_vfp_touh gen_vfp_touh_aarch64eb -#define gen_vfp_toui gen_vfp_toui_aarch64eb -#define gen_vfp_touiz gen_vfp_touiz_aarch64eb -#define gen_vfp_toul gen_vfp_toul_aarch64eb -#define gen_vfp_uhto gen_vfp_uhto_aarch64eb -#define gen_vfp_uito gen_vfp_uito_aarch64eb -#define gen_vfp_ulto gen_vfp_ulto_aarch64eb -#define get_arm_cp_reginfo get_arm_cp_reginfo_aarch64eb -#define get_clock get_clock_aarch64eb -#define get_clock_realtime get_clock_realtime_aarch64eb -#define get_constraint_priority get_constraint_priority_aarch64eb -#define get_float_exception_flags get_float_exception_flags_aarch64eb -#define get_float_rounding_mode get_float_rounding_mode_aarch64eb -#define get_fpstatus_ptr get_fpstatus_ptr_aarch64eb -#define get_level1_table_address get_level1_table_address_aarch64eb -#define get_mem_index get_mem_index_aarch64eb -#define get_next_param_value get_next_param_value_aarch64eb -#define get_opt_name get_opt_name_aarch64eb -#define get_opt_value get_opt_value_aarch64eb -#define get_page_addr_code get_page_addr_code_aarch64eb -#define get_param_value get_param_value_aarch64eb -#define get_phys_addr get_phys_addr_aarch64eb -#define get_phys_addr_lpae get_phys_addr_lpae_aarch64eb -#define get_phys_addr_mpu get_phys_addr_mpu_aarch64eb -#define get_phys_addr_v5 get_phys_addr_v5_aarch64eb -#define get_phys_addr_v6 get_phys_addr_v6_aarch64eb -#define get_system_memory get_system_memory_aarch64eb -#define get_ticks_per_sec get_ticks_per_sec_aarch64eb -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_aarch64eb -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__aarch64eb -#define gt_cntfrq_access gt_cntfrq_access_aarch64eb -#define gt_cnt_read gt_cnt_read_aarch64eb -#define gt_cnt_reset gt_cnt_reset_aarch64eb -#define gt_counter_access gt_counter_access_aarch64eb -#define gt_ctl_write gt_ctl_write_aarch64eb -#define gt_cval_write gt_cval_write_aarch64eb -#define gt_get_countervalue gt_get_countervalue_aarch64eb -#define gt_pct_access gt_pct_access_aarch64eb -#define gt_ptimer_access gt_ptimer_access_aarch64eb -#define gt_recalc_timer gt_recalc_timer_aarch64eb -#define gt_timer_access gt_timer_access_aarch64eb -#define gt_tval_read gt_tval_read_aarch64eb -#define gt_tval_write gt_tval_write_aarch64eb -#define gt_vct_access gt_vct_access_aarch64eb -#define gt_vtimer_access gt_vtimer_access_aarch64eb -#define guest_phys_blocks_free guest_phys_blocks_free_aarch64eb -#define guest_phys_blocks_init guest_phys_blocks_init_aarch64eb -#define handle_vcvt handle_vcvt_aarch64eb -#define handle_vminmaxnm handle_vminmaxnm_aarch64eb -#define handle_vrint handle_vrint_aarch64eb -#define handle_vsel handle_vsel_aarch64eb -#define has_help_option has_help_option_aarch64eb -#define have_bmi1 have_bmi1_aarch64eb -#define have_bmi2 have_bmi2_aarch64eb -#define hcr_write hcr_write_aarch64eb -#define helper_access_check_cp_reg helper_access_check_cp_reg_aarch64eb -#define helper_add_saturate helper_add_saturate_aarch64eb -#define helper_add_setq helper_add_setq_aarch64eb -#define helper_add_usaturate helper_add_usaturate_aarch64eb -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_aarch64eb -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_aarch64eb -#define helper_be_ldq_mmu helper_be_ldq_mmu_aarch64eb -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_aarch64eb -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_aarch64eb -#define helper_be_ldul_mmu helper_be_ldul_mmu_aarch64eb -#define helper_be_lduw_mmu helper_be_lduw_mmu_aarch64eb -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_aarch64eb -#define helper_be_stl_mmu helper_be_stl_mmu_aarch64eb -#define helper_be_stq_mmu helper_be_stq_mmu_aarch64eb -#define helper_be_stw_mmu helper_be_stw_mmu_aarch64eb -#define helper_clear_pstate_ss helper_clear_pstate_ss_aarch64eb -#define helper_clz_arm helper_clz_arm_aarch64eb -#define helper_cpsr_read helper_cpsr_read_aarch64eb -#define helper_cpsr_write helper_cpsr_write_aarch64eb -#define helper_crc32_arm helper_crc32_arm_aarch64eb -#define helper_crc32c helper_crc32c_aarch64eb +#define cpu_aarch64_init cpu_aarch64_init_aarch64eb +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_aarch64eb +#define arm_cpu_update_virq arm_cpu_update_virq_aarch64eb +#define arm_cpu_update_vfiq arm_cpu_update_vfiq_aarch64eb +#define arm_cpu_initfn arm_cpu_initfn_aarch64eb +#define gt_cntfrq_period_ns gt_cntfrq_period_ns_aarch64eb +#define arm_cpu_post_init arm_cpu_post_init_aarch64eb +#define arm_cpu_realizefn arm_cpu_realizefn_aarch64eb +#define arm_cpu_class_init arm_cpu_class_init_aarch64eb +#define cpu_arm_init cpu_arm_init_aarch64eb #define helper_crypto_aese helper_crypto_aese_aarch64eb #define helper_crypto_aesmc helper_crypto_aesmc_aarch64eb #define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_aarch64eb @@ -1371,1664 +1292,27 @@ #define helper_crypto_sha256h2 helper_crypto_sha256h2_aarch64eb #define helper_crypto_sha256su0 helper_crypto_sha256su0_aarch64eb #define helper_crypto_sha256su1 helper_crypto_sha256su1_aarch64eb -#define helper_dc_zva helper_dc_zva_aarch64eb -#define helper_double_saturate helper_double_saturate_aarch64eb -#define helper_exception_internal helper_exception_internal_aarch64eb -#define helper_exception_return helper_exception_return_aarch64eb -#define helper_exception_with_syndrome helper_exception_with_syndrome_aarch64eb -#define helper_get_cp_reg helper_get_cp_reg_aarch64eb -#define helper_get_cp_reg64 helper_get_cp_reg64_aarch64eb -#define helper_get_r13_banked helper_get_r13_banked_aarch64eb -#define helper_get_user_reg helper_get_user_reg_aarch64eb -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_aarch64eb -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_aarch64eb -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_aarch64eb -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_aarch64eb -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_aarch64eb -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_aarch64eb -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_aarch64eb -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_aarch64eb -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_aarch64eb -#define helper_iwmmxt_addub helper_iwmmxt_addub_aarch64eb -#define helper_iwmmxt_addul helper_iwmmxt_addul_aarch64eb -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_aarch64eb -#define helper_iwmmxt_align helper_iwmmxt_align_aarch64eb -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_aarch64eb -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_aarch64eb -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_aarch64eb -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_aarch64eb -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_aarch64eb -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_aarch64eb -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_aarch64eb -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_aarch64eb -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_aarch64eb -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_aarch64eb -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_aarch64eb -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_aarch64eb -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_aarch64eb -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_aarch64eb -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_aarch64eb -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_aarch64eb -#define helper_iwmmxt_insr helper_iwmmxt_insr_aarch64eb -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_aarch64eb -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_aarch64eb -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_aarch64eb -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_aarch64eb -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_aarch64eb -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_aarch64eb -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_aarch64eb -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_aarch64eb -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_aarch64eb -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_aarch64eb -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_aarch64eb -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_aarch64eb -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_aarch64eb -#define helper_iwmmxt_minub helper_iwmmxt_minub_aarch64eb -#define helper_iwmmxt_minul helper_iwmmxt_minul_aarch64eb -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_aarch64eb -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_aarch64eb -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_aarch64eb -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_aarch64eb -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_aarch64eb -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_aarch64eb -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_aarch64eb -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_aarch64eb -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_aarch64eb -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_aarch64eb -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_aarch64eb -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_aarch64eb -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_aarch64eb -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_aarch64eb -#define helper_iwmmxt_packul helper_iwmmxt_packul_aarch64eb -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_aarch64eb -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_aarch64eb -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_aarch64eb -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_aarch64eb -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_aarch64eb -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_aarch64eb -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_aarch64eb -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_aarch64eb -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_aarch64eb -#define helper_iwmmxt_slll helper_iwmmxt_slll_aarch64eb -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_aarch64eb -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_aarch64eb -#define helper_iwmmxt_sral helper_iwmmxt_sral_aarch64eb -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_aarch64eb -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_aarch64eb -#define helper_iwmmxt_srll helper_iwmmxt_srll_aarch64eb -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_aarch64eb -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_aarch64eb -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_aarch64eb -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_aarch64eb -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_aarch64eb -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_aarch64eb -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_aarch64eb -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_aarch64eb -#define helper_iwmmxt_subub helper_iwmmxt_subub_aarch64eb -#define helper_iwmmxt_subul helper_iwmmxt_subul_aarch64eb -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_aarch64eb -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_aarch64eb -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_aarch64eb -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_aarch64eb -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_aarch64eb -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_aarch64eb -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_aarch64eb -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_aarch64eb -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_aarch64eb -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_aarch64eb -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_aarch64eb -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_aarch64eb -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_aarch64eb -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_aarch64eb -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_aarch64eb -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_aarch64eb -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_aarch64eb -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_aarch64eb -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_aarch64eb -#define helper_ldb_cmmu helper_ldb_cmmu_aarch64eb -#define helper_ldb_mmu helper_ldb_mmu_aarch64eb -#define helper_ldl_cmmu helper_ldl_cmmu_aarch64eb -#define helper_ldl_mmu helper_ldl_mmu_aarch64eb -#define helper_ldq_cmmu helper_ldq_cmmu_aarch64eb -#define helper_ldq_mmu helper_ldq_mmu_aarch64eb -#define helper_ldw_cmmu helper_ldw_cmmu_aarch64eb -#define helper_ldw_mmu helper_ldw_mmu_aarch64eb -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_aarch64eb -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_aarch64eb -#define helper_le_ldq_mmu helper_le_ldq_mmu_aarch64eb -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_aarch64eb -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_aarch64eb -#define helper_le_ldul_mmu helper_le_ldul_mmu_aarch64eb -#define helper_le_lduw_mmu helper_le_lduw_mmu_aarch64eb -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_aarch64eb -#define helper_le_stl_mmu helper_le_stl_mmu_aarch64eb -#define helper_le_stq_mmu helper_le_stq_mmu_aarch64eb -#define helper_le_stw_mmu helper_le_stw_mmu_aarch64eb -#define helper_msr_i_pstate helper_msr_i_pstate_aarch64eb -#define helper_neon_abd_f32 helper_neon_abd_f32_aarch64eb -#define helper_neon_abdl_s16 helper_neon_abdl_s16_aarch64eb -#define helper_neon_abdl_s32 helper_neon_abdl_s32_aarch64eb -#define helper_neon_abdl_s64 helper_neon_abdl_s64_aarch64eb -#define helper_neon_abdl_u16 helper_neon_abdl_u16_aarch64eb -#define helper_neon_abdl_u32 helper_neon_abdl_u32_aarch64eb -#define helper_neon_abdl_u64 helper_neon_abdl_u64_aarch64eb -#define helper_neon_abd_s16 helper_neon_abd_s16_aarch64eb -#define helper_neon_abd_s32 helper_neon_abd_s32_aarch64eb -#define helper_neon_abd_s8 helper_neon_abd_s8_aarch64eb -#define helper_neon_abd_u16 helper_neon_abd_u16_aarch64eb -#define helper_neon_abd_u32 helper_neon_abd_u32_aarch64eb -#define helper_neon_abd_u8 helper_neon_abd_u8_aarch64eb -#define helper_neon_abs_s16 helper_neon_abs_s16_aarch64eb -#define helper_neon_abs_s8 helper_neon_abs_s8_aarch64eb -#define helper_neon_acge_f32 helper_neon_acge_f32_aarch64eb -#define helper_neon_acge_f64 helper_neon_acge_f64_aarch64eb -#define helper_neon_acgt_f32 helper_neon_acgt_f32_aarch64eb -#define helper_neon_acgt_f64 helper_neon_acgt_f64_aarch64eb -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_aarch64eb -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_aarch64eb -#define helper_neon_addl_u16 helper_neon_addl_u16_aarch64eb -#define helper_neon_addl_u32 helper_neon_addl_u32_aarch64eb -#define helper_neon_add_u16 helper_neon_add_u16_aarch64eb -#define helper_neon_add_u8 helper_neon_add_u8_aarch64eb -#define helper_neon_ceq_f32 helper_neon_ceq_f32_aarch64eb -#define helper_neon_ceq_u16 helper_neon_ceq_u16_aarch64eb -#define helper_neon_ceq_u32 helper_neon_ceq_u32_aarch64eb -#define helper_neon_ceq_u8 helper_neon_ceq_u8_aarch64eb -#define helper_neon_cge_f32 helper_neon_cge_f32_aarch64eb -#define helper_neon_cge_s16 helper_neon_cge_s16_aarch64eb -#define helper_neon_cge_s32 helper_neon_cge_s32_aarch64eb -#define helper_neon_cge_s8 helper_neon_cge_s8_aarch64eb -#define helper_neon_cge_u16 helper_neon_cge_u16_aarch64eb -#define helper_neon_cge_u32 helper_neon_cge_u32_aarch64eb -#define helper_neon_cge_u8 helper_neon_cge_u8_aarch64eb -#define helper_neon_cgt_f32 helper_neon_cgt_f32_aarch64eb -#define helper_neon_cgt_s16 helper_neon_cgt_s16_aarch64eb -#define helper_neon_cgt_s32 helper_neon_cgt_s32_aarch64eb -#define helper_neon_cgt_s8 helper_neon_cgt_s8_aarch64eb -#define helper_neon_cgt_u16 helper_neon_cgt_u16_aarch64eb -#define helper_neon_cgt_u32 helper_neon_cgt_u32_aarch64eb -#define helper_neon_cgt_u8 helper_neon_cgt_u8_aarch64eb -#define helper_neon_cls_s16 helper_neon_cls_s16_aarch64eb -#define helper_neon_cls_s32 helper_neon_cls_s32_aarch64eb -#define helper_neon_cls_s8 helper_neon_cls_s8_aarch64eb -#define helper_neon_clz_u16 helper_neon_clz_u16_aarch64eb -#define helper_neon_clz_u8 helper_neon_clz_u8_aarch64eb -#define helper_neon_cnt_u8 helper_neon_cnt_u8_aarch64eb -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_aarch64eb -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_aarch64eb -#define helper_neon_hadd_s16 helper_neon_hadd_s16_aarch64eb -#define helper_neon_hadd_s32 helper_neon_hadd_s32_aarch64eb -#define helper_neon_hadd_s8 helper_neon_hadd_s8_aarch64eb -#define helper_neon_hadd_u16 helper_neon_hadd_u16_aarch64eb -#define helper_neon_hadd_u32 helper_neon_hadd_u32_aarch64eb -#define helper_neon_hadd_u8 helper_neon_hadd_u8_aarch64eb -#define helper_neon_hsub_s16 helper_neon_hsub_s16_aarch64eb -#define helper_neon_hsub_s32 helper_neon_hsub_s32_aarch64eb -#define helper_neon_hsub_s8 helper_neon_hsub_s8_aarch64eb -#define helper_neon_hsub_u16 helper_neon_hsub_u16_aarch64eb -#define helper_neon_hsub_u32 helper_neon_hsub_u32_aarch64eb -#define helper_neon_hsub_u8 helper_neon_hsub_u8_aarch64eb -#define helper_neon_max_s16 helper_neon_max_s16_aarch64eb -#define helper_neon_max_s32 helper_neon_max_s32_aarch64eb -#define helper_neon_max_s8 helper_neon_max_s8_aarch64eb -#define helper_neon_max_u16 helper_neon_max_u16_aarch64eb -#define helper_neon_max_u32 helper_neon_max_u32_aarch64eb -#define helper_neon_max_u8 helper_neon_max_u8_aarch64eb -#define helper_neon_min_s16 helper_neon_min_s16_aarch64eb -#define helper_neon_min_s32 helper_neon_min_s32_aarch64eb -#define helper_neon_min_s8 helper_neon_min_s8_aarch64eb -#define helper_neon_min_u16 helper_neon_min_u16_aarch64eb -#define helper_neon_min_u32 helper_neon_min_u32_aarch64eb -#define helper_neon_min_u8 helper_neon_min_u8_aarch64eb -#define helper_neon_mull_p8 helper_neon_mull_p8_aarch64eb -#define helper_neon_mull_s16 helper_neon_mull_s16_aarch64eb -#define helper_neon_mull_s8 helper_neon_mull_s8_aarch64eb -#define helper_neon_mull_u16 helper_neon_mull_u16_aarch64eb -#define helper_neon_mull_u8 helper_neon_mull_u8_aarch64eb -#define helper_neon_mul_p8 helper_neon_mul_p8_aarch64eb -#define helper_neon_mul_u16 helper_neon_mul_u16_aarch64eb -#define helper_neon_mul_u8 helper_neon_mul_u8_aarch64eb -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_aarch64eb -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_aarch64eb -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_aarch64eb -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_aarch64eb -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_aarch64eb -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_aarch64eb -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_aarch64eb -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_aarch64eb -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_aarch64eb -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_aarch64eb -#define helper_neon_narrow_u16 helper_neon_narrow_u16_aarch64eb -#define helper_neon_narrow_u8 helper_neon_narrow_u8_aarch64eb -#define helper_neon_negl_u16 helper_neon_negl_u16_aarch64eb -#define helper_neon_negl_u32 helper_neon_negl_u32_aarch64eb -#define helper_neon_paddl_u16 helper_neon_paddl_u16_aarch64eb -#define helper_neon_paddl_u32 helper_neon_paddl_u32_aarch64eb -#define helper_neon_padd_u16 helper_neon_padd_u16_aarch64eb -#define helper_neon_padd_u8 helper_neon_padd_u8_aarch64eb -#define helper_neon_pmax_s16 helper_neon_pmax_s16_aarch64eb -#define helper_neon_pmax_s8 helper_neon_pmax_s8_aarch64eb -#define helper_neon_pmax_u16 helper_neon_pmax_u16_aarch64eb -#define helper_neon_pmax_u8 helper_neon_pmax_u8_aarch64eb -#define helper_neon_pmin_s16 helper_neon_pmin_s16_aarch64eb -#define helper_neon_pmin_s8 helper_neon_pmin_s8_aarch64eb -#define helper_neon_pmin_u16 helper_neon_pmin_u16_aarch64eb -#define helper_neon_pmin_u8 helper_neon_pmin_u8_aarch64eb -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_aarch64eb -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_aarch64eb -#define helper_neon_qabs_s16 helper_neon_qabs_s16_aarch64eb -#define helper_neon_qabs_s32 helper_neon_qabs_s32_aarch64eb -#define helper_neon_qabs_s64 helper_neon_qabs_s64_aarch64eb -#define helper_neon_qabs_s8 helper_neon_qabs_s8_aarch64eb -#define helper_neon_qadd_s16 helper_neon_qadd_s16_aarch64eb -#define helper_neon_qadd_s32 helper_neon_qadd_s32_aarch64eb -#define helper_neon_qadd_s64 helper_neon_qadd_s64_aarch64eb -#define helper_neon_qadd_s8 helper_neon_qadd_s8_aarch64eb -#define helper_neon_qadd_u16 helper_neon_qadd_u16_aarch64eb -#define helper_neon_qadd_u32 helper_neon_qadd_u32_aarch64eb -#define helper_neon_qadd_u64 helper_neon_qadd_u64_aarch64eb -#define helper_neon_qadd_u8 helper_neon_qadd_u8_aarch64eb -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_aarch64eb -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_aarch64eb -#define helper_neon_qneg_s16 helper_neon_qneg_s16_aarch64eb -#define helper_neon_qneg_s32 helper_neon_qneg_s32_aarch64eb -#define helper_neon_qneg_s64 helper_neon_qneg_s64_aarch64eb -#define helper_neon_qneg_s8 helper_neon_qneg_s8_aarch64eb -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_aarch64eb -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_aarch64eb -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_aarch64eb -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_aarch64eb -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_aarch64eb -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_aarch64eb -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_aarch64eb -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_aarch64eb -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_aarch64eb -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_aarch64eb -#define helper_neon_qshl_s16 helper_neon_qshl_s16_aarch64eb -#define helper_neon_qshl_s32 helper_neon_qshl_s32_aarch64eb -#define helper_neon_qshl_s64 helper_neon_qshl_s64_aarch64eb -#define helper_neon_qshl_s8 helper_neon_qshl_s8_aarch64eb -#define helper_neon_qshl_u16 helper_neon_qshl_u16_aarch64eb -#define helper_neon_qshl_u32 helper_neon_qshl_u32_aarch64eb -#define helper_neon_qshl_u64 helper_neon_qshl_u64_aarch64eb -#define helper_neon_qshl_u8 helper_neon_qshl_u8_aarch64eb -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_aarch64eb -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_aarch64eb -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_aarch64eb -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_aarch64eb -#define helper_neon_qsub_s16 helper_neon_qsub_s16_aarch64eb -#define helper_neon_qsub_s32 helper_neon_qsub_s32_aarch64eb -#define helper_neon_qsub_s64 helper_neon_qsub_s64_aarch64eb -#define helper_neon_qsub_s8 helper_neon_qsub_s8_aarch64eb -#define helper_neon_qsub_u16 helper_neon_qsub_u16_aarch64eb -#define helper_neon_qsub_u32 helper_neon_qsub_u32_aarch64eb -#define helper_neon_qsub_u64 helper_neon_qsub_u64_aarch64eb -#define helper_neon_qsub_u8 helper_neon_qsub_u8_aarch64eb -#define helper_neon_qunzip16 helper_neon_qunzip16_aarch64eb -#define helper_neon_qunzip32 helper_neon_qunzip32_aarch64eb -#define helper_neon_qunzip8 helper_neon_qunzip8_aarch64eb -#define helper_neon_qzip16 helper_neon_qzip16_aarch64eb -#define helper_neon_qzip32 helper_neon_qzip32_aarch64eb -#define helper_neon_qzip8 helper_neon_qzip8_aarch64eb -#define helper_neon_rbit_u8 helper_neon_rbit_u8_aarch64eb -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_aarch64eb -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_aarch64eb -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_aarch64eb -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_aarch64eb -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_aarch64eb -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_aarch64eb -#define helper_neon_rshl_s16 helper_neon_rshl_s16_aarch64eb -#define helper_neon_rshl_s32 helper_neon_rshl_s32_aarch64eb -#define helper_neon_rshl_s64 helper_neon_rshl_s64_aarch64eb -#define helper_neon_rshl_s8 helper_neon_rshl_s8_aarch64eb -#define helper_neon_rshl_u16 helper_neon_rshl_u16_aarch64eb -#define helper_neon_rshl_u32 helper_neon_rshl_u32_aarch64eb -#define helper_neon_rshl_u64 helper_neon_rshl_u64_aarch64eb -#define helper_neon_rshl_u8 helper_neon_rshl_u8_aarch64eb -#define helper_neon_shl_s16 helper_neon_shl_s16_aarch64eb -#define helper_neon_shl_s32 helper_neon_shl_s32_aarch64eb -#define helper_neon_shl_s64 helper_neon_shl_s64_aarch64eb -#define helper_neon_shl_s8 helper_neon_shl_s8_aarch64eb -#define helper_neon_shl_u16 helper_neon_shl_u16_aarch64eb -#define helper_neon_shl_u32 helper_neon_shl_u32_aarch64eb -#define helper_neon_shl_u64 helper_neon_shl_u64_aarch64eb -#define helper_neon_shl_u8 helper_neon_shl_u8_aarch64eb -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_aarch64eb -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_aarch64eb -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_aarch64eb -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_aarch64eb -#define helper_neon_subl_u16 helper_neon_subl_u16_aarch64eb -#define helper_neon_subl_u32 helper_neon_subl_u32_aarch64eb -#define helper_neon_sub_u16 helper_neon_sub_u16_aarch64eb -#define helper_neon_sub_u8 helper_neon_sub_u8_aarch64eb -#define helper_neon_tbl helper_neon_tbl_aarch64eb -#define helper_neon_tst_u16 helper_neon_tst_u16_aarch64eb -#define helper_neon_tst_u32 helper_neon_tst_u32_aarch64eb -#define helper_neon_tst_u8 helper_neon_tst_u8_aarch64eb -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_aarch64eb -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_aarch64eb -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_aarch64eb -#define helper_neon_unzip16 helper_neon_unzip16_aarch64eb -#define helper_neon_unzip8 helper_neon_unzip8_aarch64eb -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_aarch64eb -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_aarch64eb -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_aarch64eb -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_aarch64eb -#define helper_neon_widen_s16 helper_neon_widen_s16_aarch64eb -#define helper_neon_widen_s8 helper_neon_widen_s8_aarch64eb -#define helper_neon_widen_u16 helper_neon_widen_u16_aarch64eb -#define helper_neon_widen_u8 helper_neon_widen_u8_aarch64eb -#define helper_neon_zip16 helper_neon_zip16_aarch64eb -#define helper_neon_zip8 helper_neon_zip8_aarch64eb -#define helper_pre_hvc helper_pre_hvc_aarch64eb -#define helper_pre_smc helper_pre_smc_aarch64eb -#define helper_qadd16 helper_qadd16_aarch64eb -#define helper_qadd8 helper_qadd8_aarch64eb -#define helper_qaddsubx helper_qaddsubx_aarch64eb -#define helper_qsub16 helper_qsub16_aarch64eb -#define helper_qsub8 helper_qsub8_aarch64eb -#define helper_qsubaddx helper_qsubaddx_aarch64eb -#define helper_rbit helper_rbit_aarch64eb -#define helper_recpe_f32 helper_recpe_f32_aarch64eb -#define helper_recpe_f64 helper_recpe_f64_aarch64eb -#define helper_recpe_u32 helper_recpe_u32_aarch64eb -#define helper_recps_f32 helper_recps_f32_aarch64eb -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_aarch64eb -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_aarch64eb -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_aarch64eb -#define helper_ret_stb_mmu helper_ret_stb_mmu_aarch64eb -#define helper_rintd helper_rintd_aarch64eb -#define helper_rintd_exact helper_rintd_exact_aarch64eb -#define helper_rints helper_rints_aarch64eb -#define helper_rints_exact helper_rints_exact_aarch64eb -#define helper_ror_cc helper_ror_cc_aarch64eb -#define helper_rsqrte_f32 helper_rsqrte_f32_aarch64eb -#define helper_rsqrte_f64 helper_rsqrte_f64_aarch64eb -#define helper_rsqrte_u32 helper_rsqrte_u32_aarch64eb -#define helper_rsqrts_f32 helper_rsqrts_f32_aarch64eb -#define helper_sadd16 helper_sadd16_aarch64eb -#define helper_sadd8 helper_sadd8_aarch64eb -#define helper_saddsubx helper_saddsubx_aarch64eb -#define helper_sar_cc helper_sar_cc_aarch64eb -#define helper_sdiv helper_sdiv_aarch64eb -#define helper_sel_flags helper_sel_flags_aarch64eb -#define helper_set_cp_reg helper_set_cp_reg_aarch64eb -#define helper_set_cp_reg64 helper_set_cp_reg64_aarch64eb -#define helper_set_neon_rmode helper_set_neon_rmode_aarch64eb -#define helper_set_r13_banked helper_set_r13_banked_aarch64eb -#define helper_set_rmode helper_set_rmode_aarch64eb -#define helper_set_user_reg helper_set_user_reg_aarch64eb -#define helper_shadd16 helper_shadd16_aarch64eb -#define helper_shadd8 helper_shadd8_aarch64eb -#define helper_shaddsubx helper_shaddsubx_aarch64eb -#define helper_shl_cc helper_shl_cc_aarch64eb -#define helper_shr_cc helper_shr_cc_aarch64eb -#define helper_shsub16 helper_shsub16_aarch64eb -#define helper_shsub8 helper_shsub8_aarch64eb -#define helper_shsubaddx helper_shsubaddx_aarch64eb -#define helper_ssat helper_ssat_aarch64eb -#define helper_ssat16 helper_ssat16_aarch64eb -#define helper_ssub16 helper_ssub16_aarch64eb -#define helper_ssub8 helper_ssub8_aarch64eb -#define helper_ssubaddx helper_ssubaddx_aarch64eb -#define helper_stb_mmu helper_stb_mmu_aarch64eb -#define helper_stl_mmu helper_stl_mmu_aarch64eb -#define helper_stq_mmu helper_stq_mmu_aarch64eb -#define helper_stw_mmu helper_stw_mmu_aarch64eb -#define helper_sub_saturate helper_sub_saturate_aarch64eb -#define helper_sub_usaturate helper_sub_usaturate_aarch64eb -#define helper_sxtb16 helper_sxtb16_aarch64eb -#define helper_uadd16 helper_uadd16_aarch64eb -#define helper_uadd8 helper_uadd8_aarch64eb -#define helper_uaddsubx helper_uaddsubx_aarch64eb -#define helper_udiv helper_udiv_aarch64eb -#define helper_uhadd16 helper_uhadd16_aarch64eb -#define helper_uhadd8 helper_uhadd8_aarch64eb -#define helper_uhaddsubx helper_uhaddsubx_aarch64eb -#define helper_uhsub16 helper_uhsub16_aarch64eb -#define helper_uhsub8 helper_uhsub8_aarch64eb -#define helper_uhsubaddx helper_uhsubaddx_aarch64eb -#define helper_uqadd16 helper_uqadd16_aarch64eb -#define helper_uqadd8 helper_uqadd8_aarch64eb -#define helper_uqaddsubx helper_uqaddsubx_aarch64eb -#define helper_uqsub16 helper_uqsub16_aarch64eb -#define helper_uqsub8 helper_uqsub8_aarch64eb -#define helper_uqsubaddx helper_uqsubaddx_aarch64eb -#define helper_usad8 helper_usad8_aarch64eb -#define helper_usat helper_usat_aarch64eb -#define helper_usat16 helper_usat16_aarch64eb -#define helper_usub16 helper_usub16_aarch64eb -#define helper_usub8 helper_usub8_aarch64eb -#define helper_usubaddx helper_usubaddx_aarch64eb -#define helper_uxtb16 helper_uxtb16_aarch64eb -#define helper_v7m_mrs helper_v7m_mrs_aarch64eb -#define helper_v7m_msr helper_v7m_msr_aarch64eb -#define helper_vfp_absd helper_vfp_absd_aarch64eb -#define helper_vfp_abss helper_vfp_abss_aarch64eb -#define helper_vfp_addd helper_vfp_addd_aarch64eb -#define helper_vfp_adds helper_vfp_adds_aarch64eb -#define helper_vfp_cmpd helper_vfp_cmpd_aarch64eb -#define helper_vfp_cmped helper_vfp_cmped_aarch64eb -#define helper_vfp_cmpes helper_vfp_cmpes_aarch64eb -#define helper_vfp_cmps helper_vfp_cmps_aarch64eb -#define helper_vfp_divd helper_vfp_divd_aarch64eb -#define helper_vfp_divs helper_vfp_divs_aarch64eb -#define helper_vfp_fcvtds helper_vfp_fcvtds_aarch64eb -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_aarch64eb -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_aarch64eb -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_aarch64eb -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_aarch64eb -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_aarch64eb -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_aarch64eb -#define helper_vfp_maxd helper_vfp_maxd_aarch64eb -#define helper_vfp_maxnumd helper_vfp_maxnumd_aarch64eb -#define helper_vfp_maxnums helper_vfp_maxnums_aarch64eb -#define helper_vfp_maxs helper_vfp_maxs_aarch64eb -#define helper_vfp_mind helper_vfp_mind_aarch64eb -#define helper_vfp_minnumd helper_vfp_minnumd_aarch64eb -#define helper_vfp_minnums helper_vfp_minnums_aarch64eb -#define helper_vfp_mins helper_vfp_mins_aarch64eb -#define helper_vfp_muladdd helper_vfp_muladdd_aarch64eb -#define helper_vfp_muladds helper_vfp_muladds_aarch64eb -#define helper_vfp_muld helper_vfp_muld_aarch64eb -#define helper_vfp_muls helper_vfp_muls_aarch64eb -#define helper_vfp_negd helper_vfp_negd_aarch64eb -#define helper_vfp_negs helper_vfp_negs_aarch64eb -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_aarch64eb -#define helper_vfp_shtod helper_vfp_shtod_aarch64eb -#define helper_vfp_shtos helper_vfp_shtos_aarch64eb -#define helper_vfp_sitod helper_vfp_sitod_aarch64eb -#define helper_vfp_sitos helper_vfp_sitos_aarch64eb -#define helper_vfp_sltod helper_vfp_sltod_aarch64eb -#define helper_vfp_sltos helper_vfp_sltos_aarch64eb -#define helper_vfp_sqrtd helper_vfp_sqrtd_aarch64eb -#define helper_vfp_sqrts helper_vfp_sqrts_aarch64eb -#define helper_vfp_sqtod helper_vfp_sqtod_aarch64eb -#define helper_vfp_sqtos helper_vfp_sqtos_aarch64eb -#define helper_vfp_subd helper_vfp_subd_aarch64eb -#define helper_vfp_subs helper_vfp_subs_aarch64eb -#define helper_vfp_toshd helper_vfp_toshd_aarch64eb -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_aarch64eb -#define helper_vfp_toshs helper_vfp_toshs_aarch64eb -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_aarch64eb -#define helper_vfp_tosid helper_vfp_tosid_aarch64eb -#define helper_vfp_tosis helper_vfp_tosis_aarch64eb -#define helper_vfp_tosizd helper_vfp_tosizd_aarch64eb -#define helper_vfp_tosizs helper_vfp_tosizs_aarch64eb -#define helper_vfp_tosld helper_vfp_tosld_aarch64eb -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_aarch64eb -#define helper_vfp_tosls helper_vfp_tosls_aarch64eb -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_aarch64eb -#define helper_vfp_tosqd helper_vfp_tosqd_aarch64eb -#define helper_vfp_tosqs helper_vfp_tosqs_aarch64eb -#define helper_vfp_touhd helper_vfp_touhd_aarch64eb -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_aarch64eb -#define helper_vfp_touhs helper_vfp_touhs_aarch64eb -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_aarch64eb -#define helper_vfp_touid helper_vfp_touid_aarch64eb -#define helper_vfp_touis helper_vfp_touis_aarch64eb -#define helper_vfp_touizd helper_vfp_touizd_aarch64eb -#define helper_vfp_touizs helper_vfp_touizs_aarch64eb -#define helper_vfp_tould helper_vfp_tould_aarch64eb -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_aarch64eb -#define helper_vfp_touls helper_vfp_touls_aarch64eb -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_aarch64eb -#define helper_vfp_touqd helper_vfp_touqd_aarch64eb -#define helper_vfp_touqs helper_vfp_touqs_aarch64eb -#define helper_vfp_uhtod helper_vfp_uhtod_aarch64eb -#define helper_vfp_uhtos helper_vfp_uhtos_aarch64eb -#define helper_vfp_uitod helper_vfp_uitod_aarch64eb -#define helper_vfp_uitos helper_vfp_uitos_aarch64eb -#define helper_vfp_ultod helper_vfp_ultod_aarch64eb -#define helper_vfp_ultos helper_vfp_ultos_aarch64eb -#define helper_vfp_uqtod helper_vfp_uqtod_aarch64eb -#define helper_vfp_uqtos helper_vfp_uqtos_aarch64eb -#define helper_wfe helper_wfe_aarch64eb -#define helper_wfi helper_wfi_aarch64eb -#define hex2decimal hex2decimal_aarch64eb -#define hw_breakpoint_update hw_breakpoint_update_aarch64eb -#define hw_breakpoint_update_all hw_breakpoint_update_all_aarch64eb -#define hw_watchpoint_update hw_watchpoint_update_aarch64eb -#define hw_watchpoint_update_all hw_watchpoint_update_all_aarch64eb -#define _init _init_aarch64eb -#define init_cpreg_list init_cpreg_list_aarch64eb -#define init_lists init_lists_aarch64eb -#define input_type_enum input_type_enum_aarch64eb -#define int128_2_64 int128_2_64_aarch64eb -#define int128_add int128_add_aarch64eb -#define int128_addto int128_addto_aarch64eb -#define int128_and int128_and_aarch64eb -#define int128_eq int128_eq_aarch64eb -#define int128_ge int128_ge_aarch64eb -#define int128_get64 int128_get64_aarch64eb -#define int128_gt int128_gt_aarch64eb -#define int128_le int128_le_aarch64eb -#define int128_lt int128_lt_aarch64eb -#define int128_make64 int128_make64_aarch64eb -#define int128_max int128_max_aarch64eb -#define int128_min int128_min_aarch64eb -#define int128_ne int128_ne_aarch64eb -#define int128_neg int128_neg_aarch64eb -#define int128_nz int128_nz_aarch64eb -#define int128_rshift int128_rshift_aarch64eb -#define int128_sub int128_sub_aarch64eb -#define int128_subfrom int128_subfrom_aarch64eb -#define int128_zero int128_zero_aarch64eb -#define int16_to_float32 int16_to_float32_aarch64eb -#define int16_to_float64 int16_to_float64_aarch64eb -#define int32_to_float128 int32_to_float128_aarch64eb -#define int32_to_float32 int32_to_float32_aarch64eb -#define int32_to_float64 int32_to_float64_aarch64eb -#define int32_to_floatx80 int32_to_floatx80_aarch64eb -#define int64_to_float128 int64_to_float128_aarch64eb -#define int64_to_float32 int64_to_float32_aarch64eb -#define int64_to_float64 int64_to_float64_aarch64eb -#define int64_to_floatx80 int64_to_floatx80_aarch64eb -#define invalidate_and_set_dirty invalidate_and_set_dirty_aarch64eb -#define invalidate_page_bitmap invalidate_page_bitmap_aarch64eb -#define io_mem_read io_mem_read_aarch64eb -#define io_mem_write io_mem_write_aarch64eb -#define io_readb io_readb_aarch64eb -#define io_readl io_readl_aarch64eb -#define io_readq io_readq_aarch64eb -#define io_readw io_readw_aarch64eb -#define iotlb_to_region iotlb_to_region_aarch64eb -#define io_writeb io_writeb_aarch64eb -#define io_writel io_writel_aarch64eb -#define io_writeq io_writeq_aarch64eb -#define io_writew io_writew_aarch64eb -#define is_a64 is_a64_aarch64eb -#define is_help_option is_help_option_aarch64eb -#define isr_read isr_read_aarch64eb -#define is_valid_option_list is_valid_option_list_aarch64eb -#define iwmmxt_load_creg iwmmxt_load_creg_aarch64eb -#define iwmmxt_load_reg iwmmxt_load_reg_aarch64eb -#define iwmmxt_store_creg iwmmxt_store_creg_aarch64eb -#define iwmmxt_store_reg iwmmxt_store_reg_aarch64eb -#define __jit_debug_descriptor __jit_debug_descriptor_aarch64eb -#define __jit_debug_register_code __jit_debug_register_code_aarch64eb -#define kvm_to_cpreg_id kvm_to_cpreg_id_aarch64eb -#define last_ram_offset last_ram_offset_aarch64eb -#define ldl_be_p ldl_be_p_aarch64eb -#define ldl_be_phys ldl_be_phys_aarch64eb -#define ldl_he_p ldl_he_p_aarch64eb -#define ldl_le_p ldl_le_p_aarch64eb -#define ldl_le_phys ldl_le_phys_aarch64eb -#define ldl_phys ldl_phys_aarch64eb -#define ldl_phys_internal ldl_phys_internal_aarch64eb -#define ldq_be_p ldq_be_p_aarch64eb -#define ldq_be_phys ldq_be_phys_aarch64eb -#define ldq_he_p ldq_he_p_aarch64eb -#define ldq_le_p ldq_le_p_aarch64eb -#define ldq_le_phys ldq_le_phys_aarch64eb -#define ldq_phys ldq_phys_aarch64eb -#define ldq_phys_internal ldq_phys_internal_aarch64eb -#define ldst_name ldst_name_aarch64eb -#define ldub_p ldub_p_aarch64eb -#define ldub_phys ldub_phys_aarch64eb -#define lduw_be_p lduw_be_p_aarch64eb -#define lduw_be_phys lduw_be_phys_aarch64eb -#define lduw_he_p lduw_he_p_aarch64eb -#define lduw_le_p lduw_le_p_aarch64eb -#define lduw_le_phys lduw_le_phys_aarch64eb -#define lduw_phys lduw_phys_aarch64eb -#define lduw_phys_internal lduw_phys_internal_aarch64eb -#define le128 le128_aarch64eb -#define linked_bp_matches linked_bp_matches_aarch64eb -#define listener_add_address_space listener_add_address_space_aarch64eb -#define load_cpu_offset load_cpu_offset_aarch64eb -#define load_reg load_reg_aarch64eb -#define load_reg_var load_reg_var_aarch64eb -#define log_cpu_state log_cpu_state_aarch64eb -#define lpae_cp_reginfo lpae_cp_reginfo_aarch64eb -#define lt128 lt128_aarch64eb -#define machine_class_init machine_class_init_aarch64eb -#define machine_finalize machine_finalize_aarch64eb -#define machine_info machine_info_aarch64eb -#define machine_initfn machine_initfn_aarch64eb -#define machine_register_types machine_register_types_aarch64eb -#define machvirt_init machvirt_init_aarch64eb -#define machvirt_machine_init machvirt_machine_init_aarch64eb -#define maj maj_aarch64eb -#define mapping_conflict mapping_conflict_aarch64eb -#define mapping_contiguous mapping_contiguous_aarch64eb -#define mapping_have_same_region mapping_have_same_region_aarch64eb -#define mapping_merge mapping_merge_aarch64eb -#define mem_add mem_add_aarch64eb -#define mem_begin mem_begin_aarch64eb -#define mem_commit mem_commit_aarch64eb -#define memory_access_is_direct memory_access_is_direct_aarch64eb -#define memory_access_size memory_access_size_aarch64eb -#define memory_init memory_init_aarch64eb -#define memory_listener_match memory_listener_match_aarch64eb -#define memory_listener_register memory_listener_register_aarch64eb -#define memory_listener_unregister memory_listener_unregister_aarch64eb -#define memory_map_init memory_map_init_aarch64eb -#define memory_mapping_filter memory_mapping_filter_aarch64eb -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_aarch64eb -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_aarch64eb -#define memory_mapping_list_free memory_mapping_list_free_aarch64eb -#define memory_mapping_list_init memory_mapping_list_init_aarch64eb -#define memory_region_access_valid memory_region_access_valid_aarch64eb -#define memory_region_add_subregion memory_region_add_subregion_aarch64eb -#define memory_region_add_subregion_common memory_region_add_subregion_common_aarch64eb -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_aarch64eb -#define memory_region_big_endian memory_region_big_endian_aarch64eb -#define memory_region_clear_pending memory_region_clear_pending_aarch64eb -#define memory_region_del_subregion memory_region_del_subregion_aarch64eb -#define memory_region_destructor_alias memory_region_destructor_alias_aarch64eb -#define memory_region_destructor_none memory_region_destructor_none_aarch64eb -#define memory_region_destructor_ram memory_region_destructor_ram_aarch64eb -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_aarch64eb -#define memory_region_dispatch_read memory_region_dispatch_read_aarch64eb -#define memory_region_dispatch_read1 memory_region_dispatch_read1_aarch64eb -#define memory_region_dispatch_write memory_region_dispatch_write_aarch64eb -#define memory_region_escape_name memory_region_escape_name_aarch64eb -#define memory_region_finalize memory_region_finalize_aarch64eb -#define memory_region_find memory_region_find_aarch64eb -#define memory_region_get_addr memory_region_get_addr_aarch64eb -#define memory_region_get_alignment memory_region_get_alignment_aarch64eb -#define memory_region_get_container memory_region_get_container_aarch64eb -#define memory_region_get_fd memory_region_get_fd_aarch64eb -#define memory_region_get_may_overlap memory_region_get_may_overlap_aarch64eb -#define memory_region_get_priority memory_region_get_priority_aarch64eb -#define memory_region_get_ram_addr memory_region_get_ram_addr_aarch64eb -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_aarch64eb -#define memory_region_get_size memory_region_get_size_aarch64eb -#define memory_region_info memory_region_info_aarch64eb -#define memory_region_init memory_region_init_aarch64eb -#define memory_region_init_alias memory_region_init_alias_aarch64eb -#define memory_region_initfn memory_region_initfn_aarch64eb -#define memory_region_init_io memory_region_init_io_aarch64eb -#define memory_region_init_ram memory_region_init_ram_aarch64eb -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_aarch64eb -#define memory_region_init_reservation memory_region_init_reservation_aarch64eb -#define memory_region_is_iommu memory_region_is_iommu_aarch64eb -#define memory_region_is_logging memory_region_is_logging_aarch64eb -#define memory_region_is_mapped memory_region_is_mapped_aarch64eb -#define memory_region_is_ram memory_region_is_ram_aarch64eb -#define memory_region_is_rom memory_region_is_rom_aarch64eb -#define memory_region_is_romd memory_region_is_romd_aarch64eb -#define memory_region_is_skip_dump memory_region_is_skip_dump_aarch64eb -#define memory_region_is_unassigned memory_region_is_unassigned_aarch64eb -#define memory_region_name memory_region_name_aarch64eb -#define memory_region_need_escape memory_region_need_escape_aarch64eb -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_aarch64eb -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_aarch64eb -#define memory_region_present memory_region_present_aarch64eb -#define memory_region_read_accessor memory_region_read_accessor_aarch64eb -#define memory_region_readd_subregion memory_region_readd_subregion_aarch64eb -#define memory_region_ref memory_region_ref_aarch64eb -#define memory_region_resolve_container memory_region_resolve_container_aarch64eb -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_aarch64eb -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_aarch64eb -#define memory_region_set_address memory_region_set_address_aarch64eb -#define memory_region_set_alias_offset memory_region_set_alias_offset_aarch64eb -#define memory_region_set_enabled memory_region_set_enabled_aarch64eb -#define memory_region_set_readonly memory_region_set_readonly_aarch64eb -#define memory_region_set_skip_dump memory_region_set_skip_dump_aarch64eb -#define memory_region_size memory_region_size_aarch64eb -#define memory_region_to_address_space memory_region_to_address_space_aarch64eb -#define memory_region_transaction_begin memory_region_transaction_begin_aarch64eb -#define memory_region_transaction_commit memory_region_transaction_commit_aarch64eb -#define memory_region_unref memory_region_unref_aarch64eb -#define memory_region_update_container_subregions memory_region_update_container_subregions_aarch64eb -#define memory_region_write_accessor memory_region_write_accessor_aarch64eb -#define memory_region_wrong_endianness memory_region_wrong_endianness_aarch64eb -#define memory_try_enable_merging memory_try_enable_merging_aarch64eb -#define module_call_init module_call_init_aarch64eb -#define module_load module_load_aarch64eb -#define mpidr_cp_reginfo mpidr_cp_reginfo_aarch64eb -#define mpidr_read mpidr_read_aarch64eb -#define msr_mask msr_mask_aarch64eb -#define mul128By64To192 mul128By64To192_aarch64eb -#define mul128To256 mul128To256_aarch64eb -#define mul64To128 mul64To128_aarch64eb -#define muldiv64 muldiv64_aarch64eb -#define neon_2rm_is_float_op neon_2rm_is_float_op_aarch64eb -#define neon_2rm_sizes neon_2rm_sizes_aarch64eb -#define neon_3r_sizes neon_3r_sizes_aarch64eb -#define neon_get_scalar neon_get_scalar_aarch64eb -#define neon_load_reg neon_load_reg_aarch64eb -#define neon_load_reg64 neon_load_reg64_aarch64eb -#define neon_load_scratch neon_load_scratch_aarch64eb -#define neon_ls_element_type neon_ls_element_type_aarch64eb -#define neon_reg_offset neon_reg_offset_aarch64eb -#define neon_store_reg neon_store_reg_aarch64eb -#define neon_store_reg64 neon_store_reg64_aarch64eb -#define neon_store_scratch neon_store_scratch_aarch64eb -#define new_ldst_label new_ldst_label_aarch64eb -#define next_list next_list_aarch64eb -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_aarch64eb -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_aarch64eb -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_aarch64eb -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_aarch64eb -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_aarch64eb -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_aarch64eb -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_aarch64eb -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_aarch64eb -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_aarch64eb -#define not_v6_cp_reginfo not_v6_cp_reginfo_aarch64eb -#define not_v7_cp_reginfo not_v7_cp_reginfo_aarch64eb -#define not_v8_cp_reginfo not_v8_cp_reginfo_aarch64eb -#define object_child_foreach object_child_foreach_aarch64eb -#define object_class_foreach object_class_foreach_aarch64eb -#define object_class_foreach_tramp object_class_foreach_tramp_aarch64eb -#define object_class_get_list object_class_get_list_aarch64eb -#define object_class_get_list_tramp object_class_get_list_tramp_aarch64eb -#define object_class_get_parent object_class_get_parent_aarch64eb -#define object_deinit object_deinit_aarch64eb -#define object_dynamic_cast object_dynamic_cast_aarch64eb -#define object_finalize object_finalize_aarch64eb -#define object_finalize_child_property object_finalize_child_property_aarch64eb -#define object_get_child_property object_get_child_property_aarch64eb -#define object_get_link_property object_get_link_property_aarch64eb -#define object_get_root object_get_root_aarch64eb -#define object_initialize_with_type object_initialize_with_type_aarch64eb -#define object_init_with_type object_init_with_type_aarch64eb -#define object_instance_init object_instance_init_aarch64eb -#define object_new_with_type object_new_with_type_aarch64eb -#define object_post_init_with_type object_post_init_with_type_aarch64eb -#define object_property_add_alias object_property_add_alias_aarch64eb -#define object_property_add_link object_property_add_link_aarch64eb -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_aarch64eb -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_aarch64eb -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_aarch64eb -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_aarch64eb -#define object_property_allow_set_link object_property_allow_set_link_aarch64eb -#define object_property_del object_property_del_aarch64eb -#define object_property_del_all object_property_del_all_aarch64eb -#define object_property_find object_property_find_aarch64eb -#define object_property_get object_property_get_aarch64eb -#define object_property_get_bool object_property_get_bool_aarch64eb -#define object_property_get_int object_property_get_int_aarch64eb -#define object_property_get_link object_property_get_link_aarch64eb -#define object_property_get_qobject object_property_get_qobject_aarch64eb -#define object_property_get_str object_property_get_str_aarch64eb -#define object_property_get_type object_property_get_type_aarch64eb -#define object_property_is_child object_property_is_child_aarch64eb -#define object_property_set object_property_set_aarch64eb -#define object_property_set_description object_property_set_description_aarch64eb -#define object_property_set_link object_property_set_link_aarch64eb -#define object_property_set_qobject object_property_set_qobject_aarch64eb -#define object_release_link_property object_release_link_property_aarch64eb -#define object_resolve_abs_path object_resolve_abs_path_aarch64eb -#define object_resolve_child_property object_resolve_child_property_aarch64eb -#define object_resolve_link object_resolve_link_aarch64eb -#define object_resolve_link_property object_resolve_link_property_aarch64eb -#define object_resolve_partial_path object_resolve_partial_path_aarch64eb -#define object_resolve_path object_resolve_path_aarch64eb -#define object_resolve_path_component object_resolve_path_component_aarch64eb -#define object_resolve_path_type object_resolve_path_type_aarch64eb -#define object_set_link_property object_set_link_property_aarch64eb -#define object_unparent object_unparent_aarch64eb -#define omap_cachemaint_write omap_cachemaint_write_aarch64eb -#define omap_cp_reginfo omap_cp_reginfo_aarch64eb -#define omap_threadid_write omap_threadid_write_aarch64eb -#define omap_ticonfig_write omap_ticonfig_write_aarch64eb -#define omap_wfi_write omap_wfi_write_aarch64eb -#define op_bits op_bits_aarch64eb -#define open_modeflags open_modeflags_aarch64eb -#define op_to_mov op_to_mov_aarch64eb -#define op_to_movi op_to_movi_aarch64eb -#define output_type_enum output_type_enum_aarch64eb -#define packFloat128 packFloat128_aarch64eb -#define packFloat16 packFloat16_aarch64eb -#define packFloat32 packFloat32_aarch64eb -#define packFloat64 packFloat64_aarch64eb -#define packFloatx80 packFloatx80_aarch64eb -#define page_find page_find_aarch64eb -#define page_find_alloc page_find_alloc_aarch64eb -#define page_flush_tb page_flush_tb_aarch64eb -#define page_flush_tb_1 page_flush_tb_1_aarch64eb -#define page_init page_init_aarch64eb -#define page_size_init page_size_init_aarch64eb -#define par par_aarch64eb -#define parse_array parse_array_aarch64eb -#define parse_error parse_error_aarch64eb -#define parse_escape parse_escape_aarch64eb -#define parse_keyword parse_keyword_aarch64eb -#define parse_literal parse_literal_aarch64eb -#define parse_object parse_object_aarch64eb -#define parse_optional parse_optional_aarch64eb -#define parse_option_bool parse_option_bool_aarch64eb -#define parse_option_number parse_option_number_aarch64eb -#define parse_option_size parse_option_size_aarch64eb -#define parse_pair parse_pair_aarch64eb -#define parser_context_free parser_context_free_aarch64eb -#define parser_context_new parser_context_new_aarch64eb -#define parser_context_peek_token parser_context_peek_token_aarch64eb -#define parser_context_pop_token parser_context_pop_token_aarch64eb -#define parser_context_restore parser_context_restore_aarch64eb -#define parser_context_save parser_context_save_aarch64eb -#define parse_str parse_str_aarch64eb -#define parse_type_bool parse_type_bool_aarch64eb -#define parse_type_int parse_type_int_aarch64eb -#define parse_type_number parse_type_number_aarch64eb -#define parse_type_size parse_type_size_aarch64eb -#define parse_type_str parse_type_str_aarch64eb -#define parse_value parse_value_aarch64eb -#define par_write par_write_aarch64eb -#define patch_reloc patch_reloc_aarch64eb -#define phys_map_node_alloc phys_map_node_alloc_aarch64eb -#define phys_map_node_reserve phys_map_node_reserve_aarch64eb -#define phys_mem_alloc phys_mem_alloc_aarch64eb -#define phys_mem_set_alloc phys_mem_set_alloc_aarch64eb -#define phys_page_compact phys_page_compact_aarch64eb -#define phys_page_compact_all phys_page_compact_all_aarch64eb -#define phys_page_find phys_page_find_aarch64eb -#define phys_page_set phys_page_set_aarch64eb -#define phys_page_set_level phys_page_set_level_aarch64eb -#define phys_section_add phys_section_add_aarch64eb -#define phys_section_destroy phys_section_destroy_aarch64eb -#define phys_sections_free phys_sections_free_aarch64eb -#define pickNaN pickNaN_aarch64eb -#define pickNaNMulAdd pickNaNMulAdd_aarch64eb -#define pmccfiltr_write pmccfiltr_write_aarch64eb -#define pmccntr_read pmccntr_read_aarch64eb -#define pmccntr_sync pmccntr_sync_aarch64eb -#define pmccntr_write pmccntr_write_aarch64eb -#define pmccntr_write32 pmccntr_write32_aarch64eb -#define pmcntenclr_write pmcntenclr_write_aarch64eb -#define pmcntenset_write pmcntenset_write_aarch64eb -#define pmcr_write pmcr_write_aarch64eb -#define pmintenclr_write pmintenclr_write_aarch64eb -#define pmintenset_write pmintenset_write_aarch64eb -#define pmovsr_write pmovsr_write_aarch64eb -#define pmreg_access pmreg_access_aarch64eb -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_aarch64eb -#define pmsav5_data_ap_read pmsav5_data_ap_read_aarch64eb -#define pmsav5_data_ap_write pmsav5_data_ap_write_aarch64eb -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_aarch64eb -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_aarch64eb -#define pmuserenr_write pmuserenr_write_aarch64eb -#define pmxevtyper_write pmxevtyper_write_aarch64eb -#define print_type_bool print_type_bool_aarch64eb -#define print_type_int print_type_int_aarch64eb -#define print_type_number print_type_number_aarch64eb -#define print_type_size print_type_size_aarch64eb -#define print_type_str print_type_str_aarch64eb -#define propagateFloat128NaN propagateFloat128NaN_aarch64eb -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_aarch64eb -#define propagateFloat32NaN propagateFloat32NaN_aarch64eb -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_aarch64eb -#define propagateFloat64NaN propagateFloat64NaN_aarch64eb -#define propagateFloatx80NaN propagateFloatx80NaN_aarch64eb -#define property_get_alias property_get_alias_aarch64eb -#define property_get_bool property_get_bool_aarch64eb -#define property_get_str property_get_str_aarch64eb -#define property_get_uint16_ptr property_get_uint16_ptr_aarch64eb -#define property_get_uint32_ptr property_get_uint32_ptr_aarch64eb -#define property_get_uint64_ptr property_get_uint64_ptr_aarch64eb -#define property_get_uint8_ptr property_get_uint8_ptr_aarch64eb -#define property_release_alias property_release_alias_aarch64eb -#define property_release_bool property_release_bool_aarch64eb -#define property_release_str property_release_str_aarch64eb -#define property_resolve_alias property_resolve_alias_aarch64eb -#define property_set_alias property_set_alias_aarch64eb -#define property_set_bool property_set_bool_aarch64eb -#define property_set_str property_set_str_aarch64eb -#define pstate_read pstate_read_aarch64eb -#define pstate_write pstate_write_aarch64eb -#define pxa250_initfn pxa250_initfn_aarch64eb -#define pxa255_initfn pxa255_initfn_aarch64eb -#define pxa260_initfn pxa260_initfn_aarch64eb -#define pxa261_initfn pxa261_initfn_aarch64eb -#define pxa262_initfn pxa262_initfn_aarch64eb -#define pxa270a0_initfn pxa270a0_initfn_aarch64eb -#define pxa270a1_initfn pxa270a1_initfn_aarch64eb -#define pxa270b0_initfn pxa270b0_initfn_aarch64eb -#define pxa270b1_initfn pxa270b1_initfn_aarch64eb -#define pxa270c0_initfn pxa270c0_initfn_aarch64eb -#define pxa270c5_initfn pxa270c5_initfn_aarch64eb -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_aarch64eb -#define qapi_dealloc_end_list qapi_dealloc_end_list_aarch64eb -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_aarch64eb -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_aarch64eb -#define qapi_dealloc_next_list qapi_dealloc_next_list_aarch64eb -#define qapi_dealloc_pop qapi_dealloc_pop_aarch64eb -#define qapi_dealloc_push qapi_dealloc_push_aarch64eb -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_aarch64eb -#define qapi_dealloc_start_list qapi_dealloc_start_list_aarch64eb -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_aarch64eb -#define qapi_dealloc_start_union qapi_dealloc_start_union_aarch64eb -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_aarch64eb -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_aarch64eb -#define qapi_dealloc_type_int qapi_dealloc_type_int_aarch64eb -#define qapi_dealloc_type_number qapi_dealloc_type_number_aarch64eb -#define qapi_dealloc_type_size qapi_dealloc_type_size_aarch64eb -#define qapi_dealloc_type_str qapi_dealloc_type_str_aarch64eb -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_aarch64eb -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_aarch64eb -#define qapi_free_boolList qapi_free_boolList_aarch64eb -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_aarch64eb -#define qapi_free_int16List qapi_free_int16List_aarch64eb -#define qapi_free_int32List qapi_free_int32List_aarch64eb -#define qapi_free_int64List qapi_free_int64List_aarch64eb -#define qapi_free_int8List qapi_free_int8List_aarch64eb -#define qapi_free_intList qapi_free_intList_aarch64eb -#define qapi_free_numberList qapi_free_numberList_aarch64eb -#define qapi_free_strList qapi_free_strList_aarch64eb -#define qapi_free_uint16List qapi_free_uint16List_aarch64eb -#define qapi_free_uint32List qapi_free_uint32List_aarch64eb -#define qapi_free_uint64List qapi_free_uint64List_aarch64eb -#define qapi_free_uint8List qapi_free_uint8List_aarch64eb -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_aarch64eb -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_aarch64eb -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_aarch64eb -#define qbool_destroy_obj qbool_destroy_obj_aarch64eb -#define qbool_from_int qbool_from_int_aarch64eb -#define qbool_get_int qbool_get_int_aarch64eb -#define qbool_type qbool_type_aarch64eb -#define qbus_create qbus_create_aarch64eb -#define qbus_create_inplace qbus_create_inplace_aarch64eb -#define qbus_finalize qbus_finalize_aarch64eb -#define qbus_initfn qbus_initfn_aarch64eb -#define qbus_realize qbus_realize_aarch64eb -#define qdev_create qdev_create_aarch64eb -#define qdev_get_type qdev_get_type_aarch64eb -#define qdev_register_types qdev_register_types_aarch64eb -#define qdev_set_parent_bus qdev_set_parent_bus_aarch64eb -#define qdev_try_create qdev_try_create_aarch64eb -#define qdict_add_key qdict_add_key_aarch64eb -#define qdict_array_split qdict_array_split_aarch64eb -#define qdict_clone_shallow qdict_clone_shallow_aarch64eb -#define qdict_del qdict_del_aarch64eb -#define qdict_destroy_obj qdict_destroy_obj_aarch64eb -#define qdict_entry_key qdict_entry_key_aarch64eb -#define qdict_entry_value qdict_entry_value_aarch64eb -#define qdict_extract_subqdict qdict_extract_subqdict_aarch64eb -#define qdict_find qdict_find_aarch64eb -#define qdict_first qdict_first_aarch64eb -#define qdict_flatten qdict_flatten_aarch64eb -#define qdict_flatten_qdict qdict_flatten_qdict_aarch64eb -#define qdict_flatten_qlist qdict_flatten_qlist_aarch64eb -#define qdict_get qdict_get_aarch64eb -#define qdict_get_bool qdict_get_bool_aarch64eb -#define qdict_get_double qdict_get_double_aarch64eb -#define qdict_get_int qdict_get_int_aarch64eb -#define qdict_get_obj qdict_get_obj_aarch64eb -#define qdict_get_qdict qdict_get_qdict_aarch64eb -#define qdict_get_qlist qdict_get_qlist_aarch64eb -#define qdict_get_str qdict_get_str_aarch64eb -#define qdict_get_try_bool qdict_get_try_bool_aarch64eb -#define qdict_get_try_int qdict_get_try_int_aarch64eb -#define qdict_get_try_str qdict_get_try_str_aarch64eb -#define qdict_haskey qdict_haskey_aarch64eb -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_aarch64eb -#define qdict_iter qdict_iter_aarch64eb -#define qdict_join qdict_join_aarch64eb -#define qdict_new qdict_new_aarch64eb -#define qdict_next qdict_next_aarch64eb -#define qdict_next_entry qdict_next_entry_aarch64eb -#define qdict_put_obj qdict_put_obj_aarch64eb -#define qdict_size qdict_size_aarch64eb -#define qdict_type qdict_type_aarch64eb -#define qemu_clock_get_us qemu_clock_get_us_aarch64eb -#define qemu_clock_ptr qemu_clock_ptr_aarch64eb -#define qemu_clocks qemu_clocks_aarch64eb -#define qemu_get_cpu qemu_get_cpu_aarch64eb -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_aarch64eb -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_aarch64eb -#define qemu_get_ram_block qemu_get_ram_block_aarch64eb -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_aarch64eb -#define qemu_get_ram_fd qemu_get_ram_fd_aarch64eb -#define qemu_get_ram_ptr qemu_get_ram_ptr_aarch64eb -#define qemu_host_page_mask qemu_host_page_mask_aarch64eb -#define qemu_host_page_size qemu_host_page_size_aarch64eb -#define qemu_init_vcpu qemu_init_vcpu_aarch64eb -#define qemu_ld_helpers qemu_ld_helpers_aarch64eb -#define qemu_log_close qemu_log_close_aarch64eb -#define qemu_log_enabled qemu_log_enabled_aarch64eb -#define qemu_log_flush qemu_log_flush_aarch64eb -#define qemu_loglevel_mask qemu_loglevel_mask_aarch64eb -#define qemu_log_vprintf qemu_log_vprintf_aarch64eb -#define qemu_oom_check qemu_oom_check_aarch64eb -#define qemu_parse_fd qemu_parse_fd_aarch64eb -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_aarch64eb -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_aarch64eb -#define qemu_ram_alloc qemu_ram_alloc_aarch64eb -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_aarch64eb -#define qemu_ram_foreach_block qemu_ram_foreach_block_aarch64eb -#define qemu_ram_free qemu_ram_free_aarch64eb -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_aarch64eb -#define qemu_ram_ptr_length qemu_ram_ptr_length_aarch64eb -#define qemu_ram_remap qemu_ram_remap_aarch64eb -#define qemu_ram_setup_dump qemu_ram_setup_dump_aarch64eb -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_aarch64eb -#define qemu_real_host_page_size qemu_real_host_page_size_aarch64eb -#define qemu_st_helpers qemu_st_helpers_aarch64eb -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_aarch64eb -#define qemu_try_memalign qemu_try_memalign_aarch64eb -#define qentry_destroy qentry_destroy_aarch64eb -#define qerror_human qerror_human_aarch64eb -#define qerror_report qerror_report_aarch64eb -#define qerror_report_err qerror_report_err_aarch64eb -#define qfloat_destroy_obj qfloat_destroy_obj_aarch64eb -#define qfloat_from_double qfloat_from_double_aarch64eb -#define qfloat_get_double qfloat_get_double_aarch64eb -#define qfloat_type qfloat_type_aarch64eb -#define qint_destroy_obj qint_destroy_obj_aarch64eb -#define qint_from_int qint_from_int_aarch64eb -#define qint_get_int qint_get_int_aarch64eb -#define qint_type qint_type_aarch64eb -#define qlist_append_obj qlist_append_obj_aarch64eb -#define qlist_copy qlist_copy_aarch64eb -#define qlist_copy_elem qlist_copy_elem_aarch64eb -#define qlist_destroy_obj qlist_destroy_obj_aarch64eb -#define qlist_empty qlist_empty_aarch64eb -#define qlist_entry_obj qlist_entry_obj_aarch64eb -#define qlist_first qlist_first_aarch64eb -#define qlist_iter qlist_iter_aarch64eb -#define qlist_new qlist_new_aarch64eb -#define qlist_next qlist_next_aarch64eb -#define qlist_peek qlist_peek_aarch64eb -#define qlist_pop qlist_pop_aarch64eb -#define qlist_size qlist_size_aarch64eb -#define qlist_size_iter qlist_size_iter_aarch64eb -#define qlist_type qlist_type_aarch64eb -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_aarch64eb -#define qmp_input_end_list qmp_input_end_list_aarch64eb -#define qmp_input_end_struct qmp_input_end_struct_aarch64eb -#define qmp_input_get_next_type qmp_input_get_next_type_aarch64eb -#define qmp_input_get_object qmp_input_get_object_aarch64eb -#define qmp_input_get_visitor qmp_input_get_visitor_aarch64eb -#define qmp_input_next_list qmp_input_next_list_aarch64eb -#define qmp_input_optional qmp_input_optional_aarch64eb -#define qmp_input_pop qmp_input_pop_aarch64eb -#define qmp_input_push qmp_input_push_aarch64eb -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_aarch64eb -#define qmp_input_start_list qmp_input_start_list_aarch64eb -#define qmp_input_start_struct qmp_input_start_struct_aarch64eb -#define qmp_input_type_bool qmp_input_type_bool_aarch64eb -#define qmp_input_type_int qmp_input_type_int_aarch64eb -#define qmp_input_type_number qmp_input_type_number_aarch64eb -#define qmp_input_type_str qmp_input_type_str_aarch64eb -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_aarch64eb -#define qmp_input_visitor_new qmp_input_visitor_new_aarch64eb -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_aarch64eb -#define qmp_output_add_obj qmp_output_add_obj_aarch64eb -#define qmp_output_end_list qmp_output_end_list_aarch64eb -#define qmp_output_end_struct qmp_output_end_struct_aarch64eb -#define qmp_output_first qmp_output_first_aarch64eb -#define qmp_output_get_qobject qmp_output_get_qobject_aarch64eb -#define qmp_output_get_visitor qmp_output_get_visitor_aarch64eb -#define qmp_output_last qmp_output_last_aarch64eb -#define qmp_output_next_list qmp_output_next_list_aarch64eb -#define qmp_output_pop qmp_output_pop_aarch64eb -#define qmp_output_push_obj qmp_output_push_obj_aarch64eb -#define qmp_output_start_list qmp_output_start_list_aarch64eb -#define qmp_output_start_struct qmp_output_start_struct_aarch64eb -#define qmp_output_type_bool qmp_output_type_bool_aarch64eb -#define qmp_output_type_int qmp_output_type_int_aarch64eb -#define qmp_output_type_number qmp_output_type_number_aarch64eb -#define qmp_output_type_str qmp_output_type_str_aarch64eb -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_aarch64eb -#define qmp_output_visitor_new qmp_output_visitor_new_aarch64eb -#define qobject_decref qobject_decref_aarch64eb -#define qobject_to_qbool qobject_to_qbool_aarch64eb -#define qobject_to_qdict qobject_to_qdict_aarch64eb -#define qobject_to_qfloat qobject_to_qfloat_aarch64eb -#define qobject_to_qint qobject_to_qint_aarch64eb -#define qobject_to_qlist qobject_to_qlist_aarch64eb -#define qobject_to_qstring qobject_to_qstring_aarch64eb -#define qobject_type qobject_type_aarch64eb -#define qstring_append qstring_append_aarch64eb -#define qstring_append_chr qstring_append_chr_aarch64eb -#define qstring_append_int qstring_append_int_aarch64eb -#define qstring_destroy_obj qstring_destroy_obj_aarch64eb -#define qstring_from_escaped_str qstring_from_escaped_str_aarch64eb -#define qstring_from_str qstring_from_str_aarch64eb -#define qstring_from_substr qstring_from_substr_aarch64eb -#define qstring_get_length qstring_get_length_aarch64eb -#define qstring_get_str qstring_get_str_aarch64eb -#define qstring_new qstring_new_aarch64eb -#define qstring_type qstring_type_aarch64eb -#define ram_block_add ram_block_add_aarch64eb -#define ram_size ram_size_aarch64eb -#define range_compare range_compare_aarch64eb -#define range_covers_byte range_covers_byte_aarch64eb -#define range_get_last range_get_last_aarch64eb -#define range_merge range_merge_aarch64eb -#define ranges_can_merge ranges_can_merge_aarch64eb -#define raw_read raw_read_aarch64eb -#define raw_write raw_write_aarch64eb -#define rcon rcon_aarch64eb -#define read_raw_cp_reg read_raw_cp_reg_aarch64eb -#define recip_estimate recip_estimate_aarch64eb -#define recip_sqrt_estimate recip_sqrt_estimate_aarch64eb -#define register_cp_regs_for_features register_cp_regs_for_features_aarch64eb -#define register_multipage register_multipage_aarch64eb -#define register_subpage register_subpage_aarch64eb -#define register_tm_clones register_tm_clones_aarch64eb -#define register_types_object register_types_object_aarch64eb -#define regnames regnames_aarch64eb -#define render_memory_region render_memory_region_aarch64eb -#define reset_all_temps reset_all_temps_aarch64eb -#define reset_temp reset_temp_aarch64eb -#define rol32 rol32_aarch64eb -#define rol64 rol64_aarch64eb -#define ror32 ror32_aarch64eb -#define ror64 ror64_aarch64eb -#define roundAndPackFloat128 roundAndPackFloat128_aarch64eb -#define roundAndPackFloat16 roundAndPackFloat16_aarch64eb -#define roundAndPackFloat32 roundAndPackFloat32_aarch64eb -#define roundAndPackFloat64 roundAndPackFloat64_aarch64eb -#define roundAndPackFloatx80 roundAndPackFloatx80_aarch64eb -#define roundAndPackInt32 roundAndPackInt32_aarch64eb -#define roundAndPackInt64 roundAndPackInt64_aarch64eb -#define roundAndPackUint64 roundAndPackUint64_aarch64eb -#define round_to_inf round_to_inf_aarch64eb -#define run_on_cpu run_on_cpu_aarch64eb -#define s0 s0_aarch64eb -#define S0 S0_aarch64eb -#define s1 s1_aarch64eb -#define S1 S1_aarch64eb -#define sa1100_initfn sa1100_initfn_aarch64eb -#define sa1110_initfn sa1110_initfn_aarch64eb -#define save_globals save_globals_aarch64eb -#define scr_write scr_write_aarch64eb -#define sctlr_write sctlr_write_aarch64eb -#define set_bit set_bit_aarch64eb -#define set_bits set_bits_aarch64eb -#define set_default_nan_mode set_default_nan_mode_aarch64eb -#define set_feature set_feature_aarch64eb -#define set_float_detect_tininess set_float_detect_tininess_aarch64eb -#define set_float_exception_flags set_float_exception_flags_aarch64eb -#define set_float_rounding_mode set_float_rounding_mode_aarch64eb -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_aarch64eb -#define set_flush_to_zero set_flush_to_zero_aarch64eb -#define set_swi_errno set_swi_errno_aarch64eb -#define sextract32 sextract32_aarch64eb -#define sextract64 sextract64_aarch64eb -#define shift128ExtraRightJamming shift128ExtraRightJamming_aarch64eb -#define shift128Right shift128Right_aarch64eb -#define shift128RightJamming shift128RightJamming_aarch64eb -#define shift32RightJamming shift32RightJamming_aarch64eb -#define shift64ExtraRightJamming shift64ExtraRightJamming_aarch64eb -#define shift64RightJamming shift64RightJamming_aarch64eb -#define shifter_out_im shifter_out_im_aarch64eb -#define shortShift128Left shortShift128Left_aarch64eb -#define shortShift192Left shortShift192Left_aarch64eb -#define simple_mpu_ap_bits simple_mpu_ap_bits_aarch64eb -#define size_code_gen_buffer size_code_gen_buffer_aarch64eb -#define softmmu_lock_user softmmu_lock_user_aarch64eb -#define softmmu_lock_user_string softmmu_lock_user_string_aarch64eb -#define softmmu_tget32 softmmu_tget32_aarch64eb -#define softmmu_tget8 softmmu_tget8_aarch64eb -#define softmmu_tput32 softmmu_tput32_aarch64eb -#define softmmu_unlock_user softmmu_unlock_user_aarch64eb -#define sort_constraints sort_constraints_aarch64eb -#define sp_el0_access sp_el0_access_aarch64eb -#define spsel_read spsel_read_aarch64eb -#define spsel_write spsel_write_aarch64eb -#define start_list start_list_aarch64eb -#define stb_p stb_p_aarch64eb -#define stb_phys stb_phys_aarch64eb -#define stl_be_p stl_be_p_aarch64eb -#define stl_be_phys stl_be_phys_aarch64eb -#define stl_he_p stl_he_p_aarch64eb -#define stl_le_p stl_le_p_aarch64eb -#define stl_le_phys stl_le_phys_aarch64eb -#define stl_phys stl_phys_aarch64eb -#define stl_phys_internal stl_phys_internal_aarch64eb -#define stl_phys_notdirty stl_phys_notdirty_aarch64eb -#define store_cpu_offset store_cpu_offset_aarch64eb -#define store_reg store_reg_aarch64eb -#define store_reg_bx store_reg_bx_aarch64eb -#define store_reg_from_load store_reg_from_load_aarch64eb -#define stq_be_p stq_be_p_aarch64eb -#define stq_be_phys stq_be_phys_aarch64eb -#define stq_he_p stq_he_p_aarch64eb -#define stq_le_p stq_le_p_aarch64eb -#define stq_le_phys stq_le_phys_aarch64eb -#define stq_phys stq_phys_aarch64eb -#define string_input_get_visitor string_input_get_visitor_aarch64eb -#define string_input_visitor_cleanup string_input_visitor_cleanup_aarch64eb -#define string_input_visitor_new string_input_visitor_new_aarch64eb -#define strongarm_cp_reginfo strongarm_cp_reginfo_aarch64eb -#define strstart strstart_aarch64eb -#define strtosz strtosz_aarch64eb -#define strtosz_suffix strtosz_suffix_aarch64eb -#define stw_be_p stw_be_p_aarch64eb -#define stw_be_phys stw_be_phys_aarch64eb -#define stw_he_p stw_he_p_aarch64eb -#define stw_le_p stw_le_p_aarch64eb -#define stw_le_phys stw_le_phys_aarch64eb -#define stw_phys stw_phys_aarch64eb -#define stw_phys_internal stw_phys_internal_aarch64eb -#define sub128 sub128_aarch64eb -#define sub16_sat sub16_sat_aarch64eb -#define sub16_usat sub16_usat_aarch64eb -#define sub192 sub192_aarch64eb -#define sub8_sat sub8_sat_aarch64eb -#define sub8_usat sub8_usat_aarch64eb -#define subFloat128Sigs subFloat128Sigs_aarch64eb -#define subFloat32Sigs subFloat32Sigs_aarch64eb -#define subFloat64Sigs subFloat64Sigs_aarch64eb -#define subFloatx80Sigs subFloatx80Sigs_aarch64eb -#define subpage_accepts subpage_accepts_aarch64eb -#define subpage_init subpage_init_aarch64eb -#define subpage_ops subpage_ops_aarch64eb -#define subpage_read subpage_read_aarch64eb -#define subpage_register subpage_register_aarch64eb -#define subpage_write subpage_write_aarch64eb -#define suffix_mul suffix_mul_aarch64eb -#define swap_commutative swap_commutative_aarch64eb -#define swap_commutative2 swap_commutative2_aarch64eb -#define switch_mode switch_mode_aarch64eb -#define switch_v7m_sp switch_v7m_sp_aarch64eb -#define syn_aa32_bkpt syn_aa32_bkpt_aarch64eb -#define syn_aa32_hvc syn_aa32_hvc_aarch64eb -#define syn_aa32_smc syn_aa32_smc_aarch64eb -#define syn_aa32_svc syn_aa32_svc_aarch64eb -#define syn_breakpoint syn_breakpoint_aarch64eb -#define sync_globals sync_globals_aarch64eb -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_aarch64eb -#define syn_cp14_rt_trap syn_cp14_rt_trap_aarch64eb -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_aarch64eb -#define syn_cp15_rt_trap syn_cp15_rt_trap_aarch64eb -#define syn_data_abort syn_data_abort_aarch64eb -#define syn_fp_access_trap syn_fp_access_trap_aarch64eb -#define syn_insn_abort syn_insn_abort_aarch64eb -#define syn_swstep syn_swstep_aarch64eb -#define syn_uncategorized syn_uncategorized_aarch64eb -#define syn_watchpoint syn_watchpoint_aarch64eb -#define syscall_err syscall_err_aarch64eb -#define system_bus_class_init system_bus_class_init_aarch64eb -#define system_bus_info system_bus_info_aarch64eb -#define t2ee_cp_reginfo t2ee_cp_reginfo_aarch64eb -#define table_logic_cc table_logic_cc_aarch64eb -#define target_parse_constraint target_parse_constraint_aarch64eb -#define target_words_bigendian target_words_bigendian_aarch64eb -#define tb_add_jump tb_add_jump_aarch64eb -#define tb_alloc tb_alloc_aarch64eb -#define tb_alloc_page tb_alloc_page_aarch64eb -#define tb_check_watchpoint tb_check_watchpoint_aarch64eb -#define tb_find_fast tb_find_fast_aarch64eb -#define tb_find_pc tb_find_pc_aarch64eb -#define tb_find_slow tb_find_slow_aarch64eb -#define tb_flush tb_flush_aarch64eb -#define tb_flush_jmp_cache tb_flush_jmp_cache_aarch64eb -#define tb_free tb_free_aarch64eb -#define tb_gen_code tb_gen_code_aarch64eb -#define tb_hash_remove tb_hash_remove_aarch64eb -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_aarch64eb -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_aarch64eb -#define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64eb -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_aarch64eb -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_aarch64eb -#define tb_jmp_remove tb_jmp_remove_aarch64eb -#define tb_link_page tb_link_page_aarch64eb -#define tb_page_remove tb_page_remove_aarch64eb -#define tb_phys_hash_func tb_phys_hash_func_aarch64eb -#define tb_phys_invalidate tb_phys_invalidate_aarch64eb -#define tb_reset_jump tb_reset_jump_aarch64eb -#define tb_set_jmp_target tb_set_jmp_target_aarch64eb -#define tcg_accel_class_init tcg_accel_class_init_aarch64eb -#define tcg_accel_type tcg_accel_type_aarch64eb -#define tcg_add_param_i32 tcg_add_param_i32_aarch64eb -#define tcg_add_param_i64 tcg_add_param_i64_aarch64eb -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_aarch64eb -#define tcg_allowed tcg_allowed_aarch64eb -#define tcg_canonicalize_memop tcg_canonicalize_memop_aarch64eb -#define tcg_commit tcg_commit_aarch64eb -#define tcg_cond_to_jcc tcg_cond_to_jcc_aarch64eb -#define tcg_constant_folding tcg_constant_folding_aarch64eb -#define tcg_const_i32 tcg_const_i32_aarch64eb -#define tcg_const_i64 tcg_const_i64_aarch64eb -#define tcg_const_local_i32 tcg_const_local_i32_aarch64eb -#define tcg_const_local_i64 tcg_const_local_i64_aarch64eb -#define tcg_context_init tcg_context_init_aarch64eb -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_aarch64eb -#define tcg_cpu_exec tcg_cpu_exec_aarch64eb -#define tcg_current_code_size tcg_current_code_size_aarch64eb -#define tcg_dump_info tcg_dump_info_aarch64eb -#define tcg_dump_ops tcg_dump_ops_aarch64eb -#define tcg_exec_all tcg_exec_all_aarch64eb -#define tcg_find_helper tcg_find_helper_aarch64eb -#define tcg_func_start tcg_func_start_aarch64eb -#define tcg_gen_abs_i32 tcg_gen_abs_i32_aarch64eb -#define tcg_gen_add2_i32 tcg_gen_add2_i32_aarch64eb -#define tcg_gen_add_i32 tcg_gen_add_i32_aarch64eb -#define tcg_gen_add_i64 tcg_gen_add_i64_aarch64eb -#define tcg_gen_addi_i32 tcg_gen_addi_i32_aarch64eb -#define tcg_gen_addi_i64 tcg_gen_addi_i64_aarch64eb -#define tcg_gen_andc_i32 tcg_gen_andc_i32_aarch64eb -#define tcg_gen_and_i32 tcg_gen_and_i32_aarch64eb -#define tcg_gen_and_i64 tcg_gen_and_i64_aarch64eb -#define tcg_gen_andi_i32 tcg_gen_andi_i32_aarch64eb -#define tcg_gen_andi_i64 tcg_gen_andi_i64_aarch64eb -#define tcg_gen_br tcg_gen_br_aarch64eb -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_aarch64eb -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_aarch64eb -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_aarch64eb -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_aarch64eb -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_aarch64eb -#define tcg_gen_callN tcg_gen_callN_aarch64eb -#define tcg_gen_code tcg_gen_code_aarch64eb -#define tcg_gen_code_common tcg_gen_code_common_aarch64eb -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_aarch64eb -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_aarch64eb -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_aarch64eb -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_aarch64eb -#define tcg_gen_exit_tb tcg_gen_exit_tb_aarch64eb -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_aarch64eb -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_aarch64eb -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_aarch64eb -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_aarch64eb -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_aarch64eb -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_aarch64eb -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_aarch64eb -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_aarch64eb -#define tcg_gen_goto_tb tcg_gen_goto_tb_aarch64eb -#define tcg_gen_ld_i32 tcg_gen_ld_i32_aarch64eb -#define tcg_gen_ld_i64 tcg_gen_ld_i64_aarch64eb -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_aarch64eb -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_aarch64eb -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_aarch64eb -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_aarch64eb -#define tcg_gen_mov_i32 tcg_gen_mov_i32_aarch64eb -#define tcg_gen_mov_i64 tcg_gen_mov_i64_aarch64eb -#define tcg_gen_movi_i32 tcg_gen_movi_i32_aarch64eb -#define tcg_gen_movi_i64 tcg_gen_movi_i64_aarch64eb -#define tcg_gen_mul_i32 tcg_gen_mul_i32_aarch64eb -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_aarch64eb -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_aarch64eb -#define tcg_gen_neg_i32 tcg_gen_neg_i32_aarch64eb -#define tcg_gen_neg_i64 tcg_gen_neg_i64_aarch64eb -#define tcg_gen_not_i32 tcg_gen_not_i32_aarch64eb -#define tcg_gen_op0 tcg_gen_op0_aarch64eb -#define tcg_gen_op1i tcg_gen_op1i_aarch64eb -#define tcg_gen_op2_i32 tcg_gen_op2_i32_aarch64eb -#define tcg_gen_op2_i64 tcg_gen_op2_i64_aarch64eb -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_aarch64eb -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_aarch64eb -#define tcg_gen_op3_i32 tcg_gen_op3_i32_aarch64eb -#define tcg_gen_op3_i64 tcg_gen_op3_i64_aarch64eb -#define tcg_gen_op4_i32 tcg_gen_op4_i32_aarch64eb -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_aarch64eb -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_aarch64eb -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_aarch64eb -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_aarch64eb -#define tcg_gen_op6_i32 tcg_gen_op6_i32_aarch64eb -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_aarch64eb -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_aarch64eb -#define tcg_gen_orc_i32 tcg_gen_orc_i32_aarch64eb -#define tcg_gen_or_i32 tcg_gen_or_i32_aarch64eb -#define tcg_gen_or_i64 tcg_gen_or_i64_aarch64eb -#define tcg_gen_ori_i32 tcg_gen_ori_i32_aarch64eb -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_aarch64eb -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_aarch64eb -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_aarch64eb -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_aarch64eb -#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_aarch64eb -#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_aarch64eb -#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_aarch64eb -#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_aarch64eb -#define tcg_gen_sar_i32 tcg_gen_sar_i32_aarch64eb -#define tcg_gen_sari_i32 tcg_gen_sari_i32_aarch64eb -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_aarch64eb -#define tcg_gen_shl_i32 tcg_gen_shl_i32_aarch64eb -#define tcg_gen_shl_i64 tcg_gen_shl_i64_aarch64eb -#define tcg_gen_shli_i32 tcg_gen_shli_i32_aarch64eb -#define tcg_gen_shli_i64 tcg_gen_shli_i64_aarch64eb -#define tcg_gen_shr_i32 tcg_gen_shr_i32_aarch64eb -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_aarch64eb -#define tcg_gen_shr_i64 tcg_gen_shr_i64_aarch64eb -#define tcg_gen_shri_i32 tcg_gen_shri_i32_aarch64eb -#define tcg_gen_shri_i64 tcg_gen_shri_i64_aarch64eb -#define tcg_gen_st_i32 tcg_gen_st_i32_aarch64eb -#define tcg_gen_st_i64 tcg_gen_st_i64_aarch64eb -#define tcg_gen_sub_i32 tcg_gen_sub_i32_aarch64eb -#define tcg_gen_sub_i64 tcg_gen_sub_i64_aarch64eb -#define tcg_gen_subi_i32 tcg_gen_subi_i32_aarch64eb -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_aarch64eb -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_aarch64eb -#define tcg_gen_xor_i32 tcg_gen_xor_i32_aarch64eb -#define tcg_gen_xor_i64 tcg_gen_xor_i64_aarch64eb -#define tcg_gen_xori_i32 tcg_gen_xori_i32_aarch64eb -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_aarch64eb -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_aarch64eb -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_aarch64eb -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_aarch64eb -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_aarch64eb -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_aarch64eb -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_aarch64eb -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_aarch64eb -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_aarch64eb -#define tcg_handle_interrupt tcg_handle_interrupt_aarch64eb -#define tcg_init tcg_init_aarch64eb -#define tcg_invert_cond tcg_invert_cond_aarch64eb -#define tcg_la_bb_end tcg_la_bb_end_aarch64eb -#define tcg_la_br_end tcg_la_br_end_aarch64eb -#define tcg_la_func_end tcg_la_func_end_aarch64eb -#define tcg_liveness_analysis tcg_liveness_analysis_aarch64eb -#define tcg_malloc tcg_malloc_aarch64eb -#define tcg_malloc_internal tcg_malloc_internal_aarch64eb -#define tcg_op_defs_org tcg_op_defs_org_aarch64eb -#define tcg_opt_gen_mov tcg_opt_gen_mov_aarch64eb -#define tcg_opt_gen_movi tcg_opt_gen_movi_aarch64eb -#define tcg_optimize tcg_optimize_aarch64eb -#define tcg_out16 tcg_out16_aarch64eb -#define tcg_out32 tcg_out32_aarch64eb -#define tcg_out64 tcg_out64_aarch64eb -#define tcg_out8 tcg_out8_aarch64eb -#define tcg_out_addi tcg_out_addi_aarch64eb -#define tcg_out_branch tcg_out_branch_aarch64eb -#define tcg_out_brcond32 tcg_out_brcond32_aarch64eb -#define tcg_out_brcond64 tcg_out_brcond64_aarch64eb -#define tcg_out_bswap32 tcg_out_bswap32_aarch64eb -#define tcg_out_bswap64 tcg_out_bswap64_aarch64eb -#define tcg_out_call tcg_out_call_aarch64eb -#define tcg_out_cmp tcg_out_cmp_aarch64eb -#define tcg_out_ext16s tcg_out_ext16s_aarch64eb -#define tcg_out_ext16u tcg_out_ext16u_aarch64eb -#define tcg_out_ext32s tcg_out_ext32s_aarch64eb -#define tcg_out_ext32u tcg_out_ext32u_aarch64eb -#define tcg_out_ext8s tcg_out_ext8s_aarch64eb -#define tcg_out_ext8u tcg_out_ext8u_aarch64eb -#define tcg_out_jmp tcg_out_jmp_aarch64eb -#define tcg_out_jxx tcg_out_jxx_aarch64eb -#define tcg_out_label tcg_out_label_aarch64eb -#define tcg_out_ld tcg_out_ld_aarch64eb -#define tcg_out_modrm tcg_out_modrm_aarch64eb -#define tcg_out_modrm_offset tcg_out_modrm_offset_aarch64eb -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_aarch64eb -#define tcg_out_mov tcg_out_mov_aarch64eb -#define tcg_out_movcond32 tcg_out_movcond32_aarch64eb -#define tcg_out_movcond64 tcg_out_movcond64_aarch64eb -#define tcg_out_movi tcg_out_movi_aarch64eb -#define tcg_out_op tcg_out_op_aarch64eb -#define tcg_out_pop tcg_out_pop_aarch64eb -#define tcg_out_push tcg_out_push_aarch64eb -#define tcg_out_qemu_ld tcg_out_qemu_ld_aarch64eb -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_aarch64eb -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_aarch64eb -#define tcg_out_qemu_st tcg_out_qemu_st_aarch64eb -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_aarch64eb -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_aarch64eb -#define tcg_out_reloc tcg_out_reloc_aarch64eb -#define tcg_out_rolw_8 tcg_out_rolw_8_aarch64eb -#define tcg_out_setcond32 tcg_out_setcond32_aarch64eb -#define tcg_out_setcond64 tcg_out_setcond64_aarch64eb -#define tcg_out_shifti tcg_out_shifti_aarch64eb -#define tcg_out_st tcg_out_st_aarch64eb -#define tcg_out_tb_finalize tcg_out_tb_finalize_aarch64eb -#define tcg_out_tb_init tcg_out_tb_init_aarch64eb -#define tcg_out_tlb_load tcg_out_tlb_load_aarch64eb -#define tcg_out_vex_modrm tcg_out_vex_modrm_aarch64eb -#define tcg_patch32 tcg_patch32_aarch64eb -#define tcg_patch8 tcg_patch8_aarch64eb -#define tcg_pcrel_diff tcg_pcrel_diff_aarch64eb -#define tcg_pool_reset tcg_pool_reset_aarch64eb -#define tcg_prologue_init tcg_prologue_init_aarch64eb -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_aarch64eb -#define tcg_reg_alloc tcg_reg_alloc_aarch64eb -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_aarch64eb -#define tcg_reg_alloc_call tcg_reg_alloc_call_aarch64eb -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_aarch64eb -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_aarch64eb -#define tcg_reg_alloc_op tcg_reg_alloc_op_aarch64eb -#define tcg_reg_alloc_start tcg_reg_alloc_start_aarch64eb -#define tcg_reg_free tcg_reg_free_aarch64eb -#define tcg_reg_sync tcg_reg_sync_aarch64eb -#define tcg_set_frame tcg_set_frame_aarch64eb -#define tcg_set_nop tcg_set_nop_aarch64eb -#define tcg_swap_cond tcg_swap_cond_aarch64eb -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_aarch64eb -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_aarch64eb -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_aarch64eb -#define tcg_target_const_match tcg_target_const_match_aarch64eb -#define tcg_target_init tcg_target_init_aarch64eb -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_aarch64eb -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_aarch64eb -#define tcg_temp_alloc tcg_temp_alloc_aarch64eb -#define tcg_temp_free_i32 tcg_temp_free_i32_aarch64eb -#define tcg_temp_free_i64 tcg_temp_free_i64_aarch64eb -#define tcg_temp_free_internal tcg_temp_free_internal_aarch64eb -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_aarch64eb -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_aarch64eb -#define tcg_temp_new_i32 tcg_temp_new_i32_aarch64eb -#define tcg_temp_new_i64 tcg_temp_new_i64_aarch64eb -#define tcg_temp_new_internal tcg_temp_new_internal_aarch64eb -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_aarch64eb -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_aarch64eb -#define tdb_hash tdb_hash_aarch64eb -#define teecr_write teecr_write_aarch64eb -#define teehbr_access teehbr_access_aarch64eb -#define temp_allocate_frame temp_allocate_frame_aarch64eb -#define temp_dead temp_dead_aarch64eb -#define temps_are_copies temps_are_copies_aarch64eb -#define temp_save temp_save_aarch64eb -#define temp_sync temp_sync_aarch64eb -#define tgen_arithi tgen_arithi_aarch64eb -#define tgen_arithr tgen_arithr_aarch64eb -#define thumb2_logic_op thumb2_logic_op_aarch64eb -#define ti925t_initfn ti925t_initfn_aarch64eb -#define tlb_add_large_page tlb_add_large_page_aarch64eb -#define tlb_flush_entry tlb_flush_entry_aarch64eb -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64eb -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64eb -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_aarch64eb -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_aarch64eb -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_aarch64eb -#define tlbi_aa64_va_write tlbi_aa64_va_write_aarch64eb -#define tlbiall_is_write tlbiall_is_write_aarch64eb -#define tlbiall_write tlbiall_write_aarch64eb -#define tlbiasid_is_write tlbiasid_is_write_aarch64eb -#define tlbiasid_write tlbiasid_write_aarch64eb -#define tlbimvaa_is_write tlbimvaa_is_write_aarch64eb -#define tlbimvaa_write tlbimvaa_write_aarch64eb -#define tlbimva_is_write tlbimva_is_write_aarch64eb -#define tlbimva_write tlbimva_write_aarch64eb -#define tlb_is_dirty_ram tlb_is_dirty_ram_aarch64eb -#define tlb_protect_code tlb_protect_code_aarch64eb -#define tlb_reset_dirty_range tlb_reset_dirty_range_aarch64eb -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_aarch64eb -#define tlb_set_dirty tlb_set_dirty_aarch64eb -#define tlb_set_dirty1 tlb_set_dirty1_aarch64eb -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_aarch64eb -#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64eb -#define token_get_type token_get_type_aarch64eb -#define token_get_value token_get_value_aarch64eb -#define token_is_escape token_is_escape_aarch64eb -#define token_is_keyword token_is_keyword_aarch64eb -#define token_is_operator token_is_operator_aarch64eb -#define tokens_append_from_iter tokens_append_from_iter_aarch64eb -#define to_qiv to_qiv_aarch64eb -#define to_qov to_qov_aarch64eb -#define tosa_init tosa_init_aarch64eb -#define tosa_machine_init tosa_machine_init_aarch64eb -#define tswap32 tswap32_aarch64eb -#define tswap64 tswap64_aarch64eb -#define type_class_get_size type_class_get_size_aarch64eb -#define type_get_by_name type_get_by_name_aarch64eb -#define type_get_parent type_get_parent_aarch64eb -#define type_has_parent type_has_parent_aarch64eb -#define type_initialize type_initialize_aarch64eb -#define type_initialize_interface type_initialize_interface_aarch64eb -#define type_is_ancestor type_is_ancestor_aarch64eb -#define type_new type_new_aarch64eb -#define type_object_get_size type_object_get_size_aarch64eb -#define type_register_internal type_register_internal_aarch64eb -#define type_table_add type_table_add_aarch64eb -#define type_table_get type_table_get_aarch64eb -#define type_table_lookup type_table_lookup_aarch64eb -#define uint16_to_float32 uint16_to_float32_aarch64eb -#define uint16_to_float64 uint16_to_float64_aarch64eb -#define uint32_to_float32 uint32_to_float32_aarch64eb -#define uint32_to_float64 uint32_to_float64_aarch64eb -#define uint64_to_float128 uint64_to_float128_aarch64eb -#define uint64_to_float32 uint64_to_float32_aarch64eb -#define uint64_to_float64 uint64_to_float64_aarch64eb -#define unassigned_io_ops unassigned_io_ops_aarch64eb -#define unassigned_io_read unassigned_io_read_aarch64eb -#define unassigned_io_write unassigned_io_write_aarch64eb -#define unassigned_mem_accepts unassigned_mem_accepts_aarch64eb -#define unassigned_mem_ops unassigned_mem_ops_aarch64eb -#define unassigned_mem_read unassigned_mem_read_aarch64eb -#define unassigned_mem_write unassigned_mem_write_aarch64eb -#define update_spsel update_spsel_aarch64eb -#define v6_cp_reginfo v6_cp_reginfo_aarch64eb -#define v6k_cp_reginfo v6k_cp_reginfo_aarch64eb -#define v7_cp_reginfo v7_cp_reginfo_aarch64eb -#define v7mp_cp_reginfo v7mp_cp_reginfo_aarch64eb -#define v7m_pop v7m_pop_aarch64eb -#define v7m_push v7m_push_aarch64eb -#define v8_cp_reginfo v8_cp_reginfo_aarch64eb -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_aarch64eb -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_aarch64eb -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_aarch64eb -#define vapa_cp_reginfo vapa_cp_reginfo_aarch64eb -#define vbar_write vbar_write_aarch64eb -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_aarch64eb -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_aarch64eb -#define vfp_get_fpcr vfp_get_fpcr_aarch64eb -#define vfp_get_fpscr vfp_get_fpscr_aarch64eb -#define vfp_get_fpsr vfp_get_fpsr_aarch64eb -#define vfp_reg_offset vfp_reg_offset_aarch64eb -#define vfp_set_fpcr vfp_set_fpcr_aarch64eb -#define vfp_set_fpscr vfp_set_fpscr_aarch64eb -#define vfp_set_fpsr vfp_set_fpsr_aarch64eb -#define visit_end_implicit_struct visit_end_implicit_struct_aarch64eb -#define visit_end_list visit_end_list_aarch64eb -#define visit_end_struct visit_end_struct_aarch64eb -#define visit_end_union visit_end_union_aarch64eb -#define visit_get_next_type visit_get_next_type_aarch64eb -#define visit_next_list visit_next_list_aarch64eb -#define visit_optional visit_optional_aarch64eb -#define visit_start_implicit_struct visit_start_implicit_struct_aarch64eb -#define visit_start_list visit_start_list_aarch64eb -#define visit_start_struct visit_start_struct_aarch64eb -#define visit_start_union visit_start_union_aarch64eb -#define vmsa_cp_reginfo vmsa_cp_reginfo_aarch64eb -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_aarch64eb -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_aarch64eb -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_aarch64eb -#define vmsa_ttbcr_write vmsa_ttbcr_write_aarch64eb -#define vmsa_ttbr_write vmsa_ttbr_write_aarch64eb -#define write_cpustate_to_list write_cpustate_to_list_aarch64eb -#define write_list_to_cpustate write_list_to_cpustate_aarch64eb -#define write_raw_cp_reg write_raw_cp_reg_aarch64eb -#define X86CPURegister32_lookup X86CPURegister32_lookup_aarch64eb -#define x86_op_defs x86_op_defs_aarch64eb -#define xpsr_read xpsr_read_aarch64eb -#define xpsr_write xpsr_write_aarch64eb -#define xscale_cpar_write xscale_cpar_write_aarch64eb -#define xscale_cp_reginfo xscale_cp_reginfo_aarch64eb -#define ARM64_REGS_STORAGE_SIZE ARM64_REGS_STORAGE_SIZE_aarch64eb -#define arm64_release arm64_release_aarch64eb -#define arm64_reg_reset arm64_reg_reset_aarch64eb -#define arm64_reg_read arm64_reg_read_aarch64eb -#define arm64_reg_write arm64_reg_write_aarch64eb -#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb -#define aarch64_cpu_register_types aarch64_cpu_register_types_aarch64eb +#define helper_crypto_sha512h helper_crypto_sha512h_aarch64eb +#define helper_crypto_sha512h2 helper_crypto_sha512h2_aarch64eb +#define helper_crypto_sha512su0 helper_crypto_sha512su0_aarch64eb +#define helper_crypto_sha512su1 helper_crypto_sha512su1_aarch64eb +#define helper_crypto_sm3partw1 helper_crypto_sm3partw1_aarch64eb +#define helper_crypto_sm3partw2 helper_crypto_sm3partw2_aarch64eb +#define helper_crypto_sm3tt helper_crypto_sm3tt_aarch64eb +#define helper_crypto_sm4e helper_crypto_sm4e_aarch64eb +#define helper_crypto_sm4ekey helper_crypto_sm4ekey_aarch64eb +#define helper_check_breakpoints helper_check_breakpoints_aarch64eb +#define arm_debug_check_watchpoint arm_debug_check_watchpoint_aarch64eb +#define arm_debug_excp_handler arm_debug_excp_handler_aarch64eb +#define arm_adjust_watchpoint_address arm_adjust_watchpoint_address_aarch64eb #define helper_udiv64 helper_udiv64_aarch64eb #define helper_sdiv64 helper_sdiv64_aarch64eb -#define helper_cls64 helper_cls64_aarch64eb -#define helper_cls32 helper_cls32_aarch64eb #define helper_rbit64 helper_rbit64_aarch64eb +#define helper_msr_i_spsel helper_msr_i_spsel_aarch64eb +#define helper_msr_i_daifset helper_msr_i_daifset_aarch64eb +#define helper_msr_i_daifclear helper_msr_i_daifclear_aarch64eb +#define helper_vfp_cmph_a64 helper_vfp_cmph_a64_aarch64eb +#define helper_vfp_cmpeh_a64 helper_vfp_cmpeh_a64_aarch64eb #define helper_vfp_cmps_a64 helper_vfp_cmps_a64_aarch64eb #define helper_vfp_cmpes_a64 helper_vfp_cmpes_a64_aarch64eb #define helper_vfp_cmpd_a64 helper_vfp_cmpd_a64_aarch64eb @@ -3039,18 +1323,1653 @@ #define helper_neon_ceq_f64 helper_neon_ceq_f64_aarch64eb #define helper_neon_cge_f64 helper_neon_cge_f64_aarch64eb #define helper_neon_cgt_f64 helper_neon_cgt_f64_aarch64eb +#define helper_recpsf_f16 helper_recpsf_f16_aarch64eb #define helper_recpsf_f32 helper_recpsf_f32_aarch64eb #define helper_recpsf_f64 helper_recpsf_f64_aarch64eb +#define helper_rsqrtsf_f16 helper_rsqrtsf_f16_aarch64eb #define helper_rsqrtsf_f32 helper_rsqrtsf_f32_aarch64eb #define helper_rsqrtsf_f64 helper_rsqrtsf_f64_aarch64eb #define helper_neon_addlp_s8 helper_neon_addlp_s8_aarch64eb #define helper_neon_addlp_u8 helper_neon_addlp_u8_aarch64eb #define helper_neon_addlp_s16 helper_neon_addlp_s16_aarch64eb #define helper_neon_addlp_u16 helper_neon_addlp_u16_aarch64eb +#define helper_frecpx_f16 helper_frecpx_f16_aarch64eb #define helper_frecpx_f32 helper_frecpx_f32_aarch64eb #define helper_frecpx_f64 helper_frecpx_f64_aarch64eb #define helper_fcvtx_f64_to_f32 helper_fcvtx_f64_to_f32_aarch64eb #define helper_crc32_64 helper_crc32_64_aarch64eb #define helper_crc32c_64 helper_crc32c_64_aarch64eb -#define aarch64_cpu_do_interrupt aarch64_cpu_do_interrupt_aarch64eb +#define helper_paired_cmpxchg64_le helper_paired_cmpxchg64_le_aarch64eb +#define helper_paired_cmpxchg64_le_parallel helper_paired_cmpxchg64_le_parallel_aarch64eb +#define helper_paired_cmpxchg64_be helper_paired_cmpxchg64_be_aarch64eb +#define helper_paired_cmpxchg64_be_parallel helper_paired_cmpxchg64_be_parallel_aarch64eb +#define helper_casp_le_parallel helper_casp_le_parallel_aarch64eb +#define helper_casp_be_parallel helper_casp_be_parallel_aarch64eb +#define helper_advsimd_addh helper_advsimd_addh_aarch64eb +#define helper_advsimd_subh helper_advsimd_subh_aarch64eb +#define helper_advsimd_mulh helper_advsimd_mulh_aarch64eb +#define helper_advsimd_divh helper_advsimd_divh_aarch64eb +#define helper_advsimd_minh helper_advsimd_minh_aarch64eb +#define helper_advsimd_maxh helper_advsimd_maxh_aarch64eb +#define helper_advsimd_minnumh helper_advsimd_minnumh_aarch64eb +#define helper_advsimd_maxnumh helper_advsimd_maxnumh_aarch64eb +#define helper_advsimd_add2h helper_advsimd_add2h_aarch64eb +#define helper_advsimd_sub2h helper_advsimd_sub2h_aarch64eb +#define helper_advsimd_mul2h helper_advsimd_mul2h_aarch64eb +#define helper_advsimd_div2h helper_advsimd_div2h_aarch64eb +#define helper_advsimd_min2h helper_advsimd_min2h_aarch64eb +#define helper_advsimd_max2h helper_advsimd_max2h_aarch64eb +#define helper_advsimd_minnum2h helper_advsimd_minnum2h_aarch64eb +#define helper_advsimd_maxnum2h helper_advsimd_maxnum2h_aarch64eb +#define helper_advsimd_mulxh helper_advsimd_mulxh_aarch64eb +#define helper_advsimd_mulx2h helper_advsimd_mulx2h_aarch64eb +#define helper_advsimd_muladdh helper_advsimd_muladdh_aarch64eb +#define helper_advsimd_muladd2h helper_advsimd_muladd2h_aarch64eb +#define helper_advsimd_ceq_f16 helper_advsimd_ceq_f16_aarch64eb +#define helper_advsimd_cge_f16 helper_advsimd_cge_f16_aarch64eb +#define helper_advsimd_cgt_f16 helper_advsimd_cgt_f16_aarch64eb +#define helper_advsimd_acge_f16 helper_advsimd_acge_f16_aarch64eb +#define helper_advsimd_acgt_f16 helper_advsimd_acgt_f16_aarch64eb +#define helper_advsimd_rinth_exact helper_advsimd_rinth_exact_aarch64eb +#define helper_advsimd_rinth helper_advsimd_rinth_aarch64eb +#define helper_advsimd_f16tosinth helper_advsimd_f16tosinth_aarch64eb +#define helper_advsimd_f16touinth helper_advsimd_f16touinth_aarch64eb +#define helper_exception_return helper_exception_return_aarch64eb +#define helper_sqrt_f16 helper_sqrt_f16_aarch64eb +#define helper_dc_zva helper_dc_zva_aarch64eb +#define read_raw_cp_reg read_raw_cp_reg_aarch64eb +#define pmu_init pmu_init_aarch64eb +#define pmu_op_start pmu_op_start_aarch64eb +#define pmu_op_finish pmu_op_finish_aarch64eb +#define pmu_pre_el_change pmu_pre_el_change_aarch64eb +#define pmu_post_el_change pmu_post_el_change_aarch64eb +#define arm_pmu_timer_cb arm_pmu_timer_cb_aarch64eb +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_aarch64eb +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_aarch64eb +#define arm_gt_htimer_cb arm_gt_htimer_cb_aarch64eb +#define arm_gt_stimer_cb arm_gt_stimer_cb_aarch64eb +#define arm_gt_hvtimer_cb arm_gt_hvtimer_cb_aarch64eb +#define arm_hcr_el2_eff arm_hcr_el2_eff_aarch64eb +#define sve_exception_el sve_exception_el_aarch64eb +#define sve_zcr_len_for_el sve_zcr_len_for_el_aarch64eb +#define hw_watchpoint_update hw_watchpoint_update_aarch64eb +#define hw_watchpoint_update_all hw_watchpoint_update_all_aarch64eb +#define hw_breakpoint_update hw_breakpoint_update_aarch64eb +#define hw_breakpoint_update_all hw_breakpoint_update_all_aarch64eb +#define register_cp_regs_for_features register_cp_regs_for_features_aarch64eb +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_aarch64eb +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_aarch64eb +#define modify_arm_cp_regs modify_arm_cp_regs_aarch64eb +#define get_arm_cp_reginfo get_arm_cp_reginfo_aarch64eb +#define arm_cp_write_ignore arm_cp_write_ignore_aarch64eb +#define arm_cp_read_zero arm_cp_read_zero_aarch64eb +#define arm_cp_reset_ignore arm_cp_reset_ignore_aarch64eb +#define cpsr_read cpsr_read_aarch64eb +#define cpsr_write cpsr_write_aarch64eb +#define helper_sxtb16 helper_sxtb16_aarch64eb +#define helper_uxtb16 helper_uxtb16_aarch64eb +#define helper_sdiv helper_sdiv_aarch64eb +#define helper_udiv helper_udiv_aarch64eb +#define helper_rbit helper_rbit_aarch64eb +#define arm_phys_excp_target_el arm_phys_excp_target_el_aarch64eb +#define aarch64_sync_32_to_64 aarch64_sync_32_to_64_aarch64eb +#define aarch64_sync_64_to_32 aarch64_sync_64_to_32_aarch64eb +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_aarch64eb +#define arm_sctlr arm_sctlr_aarch64eb +#define arm_s1_regime_using_lpae_format arm_s1_regime_using_lpae_format_aarch64eb +#define aa64_va_parameters aa64_va_parameters_aarch64eb +#define v8m_security_lookup v8m_security_lookup_aarch64eb +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_aarch64eb +#define get_phys_addr get_phys_addr_aarch64eb +#define arm_cpu_get_phys_page_attrs_debug arm_cpu_get_phys_page_attrs_debug_aarch64eb +#define helper_qadd16 helper_qadd16_aarch64eb +#define helper_qadd8 helper_qadd8_aarch64eb +#define helper_qsub16 helper_qsub16_aarch64eb +#define helper_qsub8 helper_qsub8_aarch64eb +#define helper_qsubaddx helper_qsubaddx_aarch64eb +#define helper_qaddsubx helper_qaddsubx_aarch64eb +#define helper_uqadd16 helper_uqadd16_aarch64eb +#define helper_uqadd8 helper_uqadd8_aarch64eb +#define helper_uqsub16 helper_uqsub16_aarch64eb +#define helper_uqsub8 helper_uqsub8_aarch64eb +#define helper_uqsubaddx helper_uqsubaddx_aarch64eb +#define helper_uqaddsubx helper_uqaddsubx_aarch64eb +#define helper_sadd16 helper_sadd16_aarch64eb +#define helper_sadd8 helper_sadd8_aarch64eb +#define helper_ssub16 helper_ssub16_aarch64eb +#define helper_ssub8 helper_ssub8_aarch64eb +#define helper_ssubaddx helper_ssubaddx_aarch64eb +#define helper_saddsubx helper_saddsubx_aarch64eb +#define helper_uadd16 helper_uadd16_aarch64eb +#define helper_uadd8 helper_uadd8_aarch64eb +#define helper_usub16 helper_usub16_aarch64eb +#define helper_usub8 helper_usub8_aarch64eb +#define helper_usubaddx helper_usubaddx_aarch64eb +#define helper_uaddsubx helper_uaddsubx_aarch64eb +#define helper_shadd16 helper_shadd16_aarch64eb +#define helper_shadd8 helper_shadd8_aarch64eb +#define helper_shsub16 helper_shsub16_aarch64eb +#define helper_shsub8 helper_shsub8_aarch64eb +#define helper_shsubaddx helper_shsubaddx_aarch64eb +#define helper_shaddsubx helper_shaddsubx_aarch64eb +#define helper_uhadd16 helper_uhadd16_aarch64eb +#define helper_uhadd8 helper_uhadd8_aarch64eb +#define helper_uhsub16 helper_uhsub16_aarch64eb +#define helper_uhsub8 helper_uhsub8_aarch64eb +#define helper_uhsubaddx helper_uhsubaddx_aarch64eb +#define helper_uhaddsubx helper_uhaddsubx_aarch64eb +#define helper_usad8 helper_usad8_aarch64eb +#define helper_sel_flags helper_sel_flags_aarch64eb +#define helper_crc32 helper_crc32_aarch64eb +#define helper_crc32c helper_crc32c_aarch64eb +#define fp_exception_el fp_exception_el_aarch64eb +#define arm_mmu_idx_to_el arm_mmu_idx_to_el_aarch64eb +#define arm_mmu_idx_el arm_mmu_idx_el_aarch64eb +#define arm_mmu_idx arm_mmu_idx_aarch64eb +#define arm_stage1_mmu_idx arm_stage1_mmu_idx_aarch64eb +#define arm_rebuild_hflags arm_rebuild_hflags_aarch64eb +#define helper_rebuild_hflags_m32_newel helper_rebuild_hflags_m32_newel_aarch64eb +#define helper_rebuild_hflags_m32 helper_rebuild_hflags_m32_aarch64eb +#define helper_rebuild_hflags_a32_newel helper_rebuild_hflags_a32_newel_aarch64eb +#define helper_rebuild_hflags_a32 helper_rebuild_hflags_a32_aarch64eb +#define helper_rebuild_hflags_a64 helper_rebuild_hflags_a64_aarch64eb +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_aarch64eb +#define aarch64_sve_narrow_vq aarch64_sve_narrow_vq_aarch64eb +#define aarch64_sve_change_el aarch64_sve_change_el_aarch64eb +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_aarch64eb +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_aarch64eb +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_aarch64eb +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_aarch64eb +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_aarch64eb +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_aarch64eb +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_aarch64eb +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_aarch64eb +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_aarch64eb +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_aarch64eb +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_aarch64eb +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_aarch64eb +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_aarch64eb +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_aarch64eb +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_aarch64eb +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_aarch64eb +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_aarch64eb +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_aarch64eb +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_aarch64eb +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_aarch64eb +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_aarch64eb +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_aarch64eb +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_aarch64eb +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_aarch64eb +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_aarch64eb +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_aarch64eb +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_aarch64eb +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_aarch64eb +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_aarch64eb +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_aarch64eb +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_aarch64eb +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_aarch64eb +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_aarch64eb +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_aarch64eb +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_aarch64eb +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_aarch64eb +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_aarch64eb +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_aarch64eb +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_aarch64eb +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_aarch64eb +#define helper_iwmmxt_minub helper_iwmmxt_minub_aarch64eb +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_aarch64eb +#define helper_iwmmxt_minul helper_iwmmxt_minul_aarch64eb +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_aarch64eb +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_aarch64eb +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_aarch64eb +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_aarch64eb +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_aarch64eb +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_aarch64eb +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_aarch64eb +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_aarch64eb +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_aarch64eb +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_aarch64eb +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_aarch64eb +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_aarch64eb +#define helper_iwmmxt_subub helper_iwmmxt_subub_aarch64eb +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_aarch64eb +#define helper_iwmmxt_subul helper_iwmmxt_subul_aarch64eb +#define helper_iwmmxt_addub helper_iwmmxt_addub_aarch64eb +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_aarch64eb +#define helper_iwmmxt_addul helper_iwmmxt_addul_aarch64eb +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_aarch64eb +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_aarch64eb +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_aarch64eb +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_aarch64eb +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_aarch64eb +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_aarch64eb +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_aarch64eb +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_aarch64eb +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_aarch64eb +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_aarch64eb +#define helper_iwmmxt_align helper_iwmmxt_align_aarch64eb +#define helper_iwmmxt_insr helper_iwmmxt_insr_aarch64eb +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_aarch64eb +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_aarch64eb +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_aarch64eb +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_aarch64eb +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_aarch64eb +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_aarch64eb +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_aarch64eb +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_aarch64eb +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_aarch64eb +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_aarch64eb +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_aarch64eb +#define helper_iwmmxt_srll helper_iwmmxt_srll_aarch64eb +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_aarch64eb +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_aarch64eb +#define helper_iwmmxt_slll helper_iwmmxt_slll_aarch64eb +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_aarch64eb +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_aarch64eb +#define helper_iwmmxt_sral helper_iwmmxt_sral_aarch64eb +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_aarch64eb +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_aarch64eb +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_aarch64eb +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_aarch64eb +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_aarch64eb +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_aarch64eb +#define helper_iwmmxt_packul helper_iwmmxt_packul_aarch64eb +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_aarch64eb +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_aarch64eb +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_aarch64eb +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_aarch64eb +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_aarch64eb +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_aarch64eb +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_aarch64eb +#define armv7m_nvic_set_pending armv7m_nvic_set_pending_aarch64eb +#define helper_v7m_preserve_fp_state helper_v7m_preserve_fp_state_aarch64eb +#define write_v7m_exception write_v7m_exception_aarch64eb +#define helper_v7m_bxns helper_v7m_bxns_aarch64eb +#define helper_v7m_blxns helper_v7m_blxns_aarch64eb +#define armv7m_nvic_neg_prio_requested armv7m_nvic_neg_prio_requested_aarch64eb +#define helper_v7m_vlstm helper_v7m_vlstm_aarch64eb +#define helper_v7m_vlldm helper_v7m_vlldm_aarch64eb +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_aarch64eb +#define helper_v7m_mrs helper_v7m_mrs_aarch64eb +#define helper_v7m_msr helper_v7m_msr_aarch64eb +#define helper_v7m_tt helper_v7m_tt_aarch64eb +#define arm_v7m_mmu_idx_all arm_v7m_mmu_idx_all_aarch64eb +#define arm_v7m_mmu_idx_for_secstate_and_priv arm_v7m_mmu_idx_for_secstate_and_priv_aarch64eb +#define arm_v7m_mmu_idx_for_secstate arm_v7m_mmu_idx_for_secstate_aarch64eb +#define helper_neon_qadd_u8 helper_neon_qadd_u8_aarch64eb +#define helper_neon_qadd_u16 helper_neon_qadd_u16_aarch64eb +#define helper_neon_qadd_u32 helper_neon_qadd_u32_aarch64eb +#define helper_neon_qadd_u64 helper_neon_qadd_u64_aarch64eb +#define helper_neon_qadd_s8 helper_neon_qadd_s8_aarch64eb +#define helper_neon_qadd_s16 helper_neon_qadd_s16_aarch64eb +#define helper_neon_qadd_s32 helper_neon_qadd_s32_aarch64eb +#define helper_neon_qadd_s64 helper_neon_qadd_s64_aarch64eb +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_aarch64eb +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_aarch64eb +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_aarch64eb +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_aarch64eb +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_aarch64eb +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_aarch64eb +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_aarch64eb +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_aarch64eb +#define helper_neon_qsub_u8 helper_neon_qsub_u8_aarch64eb +#define helper_neon_qsub_u16 helper_neon_qsub_u16_aarch64eb +#define helper_neon_qsub_u32 helper_neon_qsub_u32_aarch64eb +#define helper_neon_qsub_u64 helper_neon_qsub_u64_aarch64eb +#define helper_neon_qsub_s8 helper_neon_qsub_s8_aarch64eb +#define helper_neon_qsub_s16 helper_neon_qsub_s16_aarch64eb +#define helper_neon_qsub_s32 helper_neon_qsub_s32_aarch64eb +#define helper_neon_qsub_s64 helper_neon_qsub_s64_aarch64eb +#define helper_neon_hadd_s8 helper_neon_hadd_s8_aarch64eb +#define helper_neon_hadd_u8 helper_neon_hadd_u8_aarch64eb +#define helper_neon_hadd_s16 helper_neon_hadd_s16_aarch64eb +#define helper_neon_hadd_u16 helper_neon_hadd_u16_aarch64eb +#define helper_neon_hadd_s32 helper_neon_hadd_s32_aarch64eb +#define helper_neon_hadd_u32 helper_neon_hadd_u32_aarch64eb +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_aarch64eb +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_aarch64eb +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_aarch64eb +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_aarch64eb +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_aarch64eb +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_aarch64eb +#define helper_neon_hsub_s8 helper_neon_hsub_s8_aarch64eb +#define helper_neon_hsub_u8 helper_neon_hsub_u8_aarch64eb +#define helper_neon_hsub_s16 helper_neon_hsub_s16_aarch64eb +#define helper_neon_hsub_u16 helper_neon_hsub_u16_aarch64eb +#define helper_neon_hsub_s32 helper_neon_hsub_s32_aarch64eb +#define helper_neon_hsub_u32 helper_neon_hsub_u32_aarch64eb +#define helper_neon_cgt_s8 helper_neon_cgt_s8_aarch64eb +#define helper_neon_cgt_u8 helper_neon_cgt_u8_aarch64eb +#define helper_neon_cgt_s16 helper_neon_cgt_s16_aarch64eb +#define helper_neon_cgt_u16 helper_neon_cgt_u16_aarch64eb +#define helper_neon_cgt_s32 helper_neon_cgt_s32_aarch64eb +#define helper_neon_cgt_u32 helper_neon_cgt_u32_aarch64eb +#define helper_neon_cge_s8 helper_neon_cge_s8_aarch64eb +#define helper_neon_cge_u8 helper_neon_cge_u8_aarch64eb +#define helper_neon_cge_s16 helper_neon_cge_s16_aarch64eb +#define helper_neon_cge_u16 helper_neon_cge_u16_aarch64eb +#define helper_neon_cge_s32 helper_neon_cge_s32_aarch64eb +#define helper_neon_cge_u32 helper_neon_cge_u32_aarch64eb +#define helper_neon_pmin_s8 helper_neon_pmin_s8_aarch64eb +#define helper_neon_pmin_u8 helper_neon_pmin_u8_aarch64eb +#define helper_neon_pmin_s16 helper_neon_pmin_s16_aarch64eb +#define helper_neon_pmin_u16 helper_neon_pmin_u16_aarch64eb +#define helper_neon_pmax_s8 helper_neon_pmax_s8_aarch64eb +#define helper_neon_pmax_u8 helper_neon_pmax_u8_aarch64eb +#define helper_neon_pmax_s16 helper_neon_pmax_s16_aarch64eb +#define helper_neon_pmax_u16 helper_neon_pmax_u16_aarch64eb +#define helper_neon_abd_s8 helper_neon_abd_s8_aarch64eb +#define helper_neon_abd_u8 helper_neon_abd_u8_aarch64eb +#define helper_neon_abd_s16 helper_neon_abd_s16_aarch64eb +#define helper_neon_abd_u16 helper_neon_abd_u16_aarch64eb +#define helper_neon_abd_s32 helper_neon_abd_s32_aarch64eb +#define helper_neon_abd_u32 helper_neon_abd_u32_aarch64eb +#define helper_neon_shl_u16 helper_neon_shl_u16_aarch64eb +#define helper_neon_shl_s16 helper_neon_shl_s16_aarch64eb +#define helper_neon_rshl_s8 helper_neon_rshl_s8_aarch64eb +#define helper_neon_rshl_s16 helper_neon_rshl_s16_aarch64eb +#define helper_neon_rshl_s32 helper_neon_rshl_s32_aarch64eb +#define helper_neon_rshl_s64 helper_neon_rshl_s64_aarch64eb +#define helper_neon_rshl_u8 helper_neon_rshl_u8_aarch64eb +#define helper_neon_rshl_u16 helper_neon_rshl_u16_aarch64eb +#define helper_neon_rshl_u32 helper_neon_rshl_u32_aarch64eb +#define helper_neon_rshl_u64 helper_neon_rshl_u64_aarch64eb +#define helper_neon_qshl_u8 helper_neon_qshl_u8_aarch64eb +#define helper_neon_qshl_u16 helper_neon_qshl_u16_aarch64eb +#define helper_neon_qshl_u32 helper_neon_qshl_u32_aarch64eb +#define helper_neon_qshl_u64 helper_neon_qshl_u64_aarch64eb +#define helper_neon_qshl_s8 helper_neon_qshl_s8_aarch64eb +#define helper_neon_qshl_s16 helper_neon_qshl_s16_aarch64eb +#define helper_neon_qshl_s32 helper_neon_qshl_s32_aarch64eb +#define helper_neon_qshl_s64 helper_neon_qshl_s64_aarch64eb +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_aarch64eb +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_aarch64eb +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_aarch64eb +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_aarch64eb +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_aarch64eb +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_aarch64eb +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_aarch64eb +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_aarch64eb +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_aarch64eb +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_aarch64eb +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_aarch64eb +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_aarch64eb +#define helper_neon_add_u8 helper_neon_add_u8_aarch64eb +#define helper_neon_add_u16 helper_neon_add_u16_aarch64eb +#define helper_neon_padd_u8 helper_neon_padd_u8_aarch64eb +#define helper_neon_padd_u16 helper_neon_padd_u16_aarch64eb +#define helper_neon_sub_u8 helper_neon_sub_u8_aarch64eb +#define helper_neon_sub_u16 helper_neon_sub_u16_aarch64eb +#define helper_neon_mul_u8 helper_neon_mul_u8_aarch64eb +#define helper_neon_mul_u16 helper_neon_mul_u16_aarch64eb +#define helper_neon_tst_u8 helper_neon_tst_u8_aarch64eb +#define helper_neon_tst_u16 helper_neon_tst_u16_aarch64eb +#define helper_neon_tst_u32 helper_neon_tst_u32_aarch64eb +#define helper_neon_ceq_u8 helper_neon_ceq_u8_aarch64eb +#define helper_neon_ceq_u16 helper_neon_ceq_u16_aarch64eb +#define helper_neon_ceq_u32 helper_neon_ceq_u32_aarch64eb +#define helper_neon_clz_u8 helper_neon_clz_u8_aarch64eb +#define helper_neon_clz_u16 helper_neon_clz_u16_aarch64eb +#define helper_neon_cls_s8 helper_neon_cls_s8_aarch64eb +#define helper_neon_cls_s16 helper_neon_cls_s16_aarch64eb +#define helper_neon_cls_s32 helper_neon_cls_s32_aarch64eb +#define helper_neon_cnt_u8 helper_neon_cnt_u8_aarch64eb +#define helper_neon_rbit_u8 helper_neon_rbit_u8_aarch64eb +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_aarch64eb +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_aarch64eb +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_aarch64eb +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_aarch64eb +#define helper_neon_narrow_u8 helper_neon_narrow_u8_aarch64eb +#define helper_neon_narrow_u16 helper_neon_narrow_u16_aarch64eb +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_aarch64eb +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_aarch64eb +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_aarch64eb +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_aarch64eb +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_aarch64eb +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_aarch64eb +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_aarch64eb +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_aarch64eb +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_aarch64eb +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_aarch64eb +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_aarch64eb +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_aarch64eb +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_aarch64eb +#define helper_neon_widen_u8 helper_neon_widen_u8_aarch64eb +#define helper_neon_widen_s8 helper_neon_widen_s8_aarch64eb +#define helper_neon_widen_u16 helper_neon_widen_u16_aarch64eb +#define helper_neon_widen_s16 helper_neon_widen_s16_aarch64eb +#define helper_neon_addl_u16 helper_neon_addl_u16_aarch64eb +#define helper_neon_addl_u32 helper_neon_addl_u32_aarch64eb +#define helper_neon_paddl_u16 helper_neon_paddl_u16_aarch64eb +#define helper_neon_paddl_u32 helper_neon_paddl_u32_aarch64eb +#define helper_neon_subl_u16 helper_neon_subl_u16_aarch64eb +#define helper_neon_subl_u32 helper_neon_subl_u32_aarch64eb +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_aarch64eb +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_aarch64eb +#define helper_neon_abdl_u16 helper_neon_abdl_u16_aarch64eb +#define helper_neon_abdl_s16 helper_neon_abdl_s16_aarch64eb +#define helper_neon_abdl_u32 helper_neon_abdl_u32_aarch64eb +#define helper_neon_abdl_s32 helper_neon_abdl_s32_aarch64eb +#define helper_neon_abdl_u64 helper_neon_abdl_u64_aarch64eb +#define helper_neon_abdl_s64 helper_neon_abdl_s64_aarch64eb +#define helper_neon_mull_u8 helper_neon_mull_u8_aarch64eb +#define helper_neon_mull_s8 helper_neon_mull_s8_aarch64eb +#define helper_neon_mull_u16 helper_neon_mull_u16_aarch64eb +#define helper_neon_mull_s16 helper_neon_mull_s16_aarch64eb +#define helper_neon_negl_u16 helper_neon_negl_u16_aarch64eb +#define helper_neon_negl_u32 helper_neon_negl_u32_aarch64eb +#define helper_neon_qabs_s8 helper_neon_qabs_s8_aarch64eb +#define helper_neon_qneg_s8 helper_neon_qneg_s8_aarch64eb +#define helper_neon_qabs_s16 helper_neon_qabs_s16_aarch64eb +#define helper_neon_qneg_s16 helper_neon_qneg_s16_aarch64eb +#define helper_neon_qabs_s32 helper_neon_qabs_s32_aarch64eb +#define helper_neon_qneg_s32 helper_neon_qneg_s32_aarch64eb +#define helper_neon_qabs_s64 helper_neon_qabs_s64_aarch64eb +#define helper_neon_qneg_s64 helper_neon_qneg_s64_aarch64eb +#define helper_neon_abd_f32 helper_neon_abd_f32_aarch64eb +#define helper_neon_ceq_f32 helper_neon_ceq_f32_aarch64eb +#define helper_neon_cge_f32 helper_neon_cge_f32_aarch64eb +#define helper_neon_cgt_f32 helper_neon_cgt_f32_aarch64eb +#define helper_neon_acge_f32 helper_neon_acge_f32_aarch64eb +#define helper_neon_acgt_f32 helper_neon_acgt_f32_aarch64eb +#define helper_neon_acge_f64 helper_neon_acge_f64_aarch64eb +#define helper_neon_acgt_f64 helper_neon_acgt_f64_aarch64eb +#define helper_neon_qunzip8 helper_neon_qunzip8_aarch64eb +#define helper_neon_qunzip16 helper_neon_qunzip16_aarch64eb +#define helper_neon_qunzip32 helper_neon_qunzip32_aarch64eb +#define helper_neon_unzip8 helper_neon_unzip8_aarch64eb +#define helper_neon_unzip16 helper_neon_unzip16_aarch64eb +#define helper_neon_qzip8 helper_neon_qzip8_aarch64eb +#define helper_neon_qzip16 helper_neon_qzip16_aarch64eb +#define helper_neon_qzip32 helper_neon_qzip32_aarch64eb +#define helper_neon_zip8 helper_neon_zip8_aarch64eb +#define helper_neon_zip16 helper_neon_zip16_aarch64eb +#define raise_exception raise_exception_aarch64eb +#define raise_exception_ra raise_exception_ra_aarch64eb +#define helper_neon_tbl helper_neon_tbl_aarch64eb +#define helper_v8m_stackcheck helper_v8m_stackcheck_aarch64eb +#define helper_add_setq helper_add_setq_aarch64eb +#define helper_add_saturate helper_add_saturate_aarch64eb +#define helper_sub_saturate helper_sub_saturate_aarch64eb +#define helper_add_usaturate helper_add_usaturate_aarch64eb +#define helper_sub_usaturate helper_sub_usaturate_aarch64eb +#define helper_ssat helper_ssat_aarch64eb +#define helper_ssat16 helper_ssat16_aarch64eb +#define helper_usat helper_usat_aarch64eb +#define helper_usat16 helper_usat16_aarch64eb +#define helper_setend helper_setend_aarch64eb +#define helper_wfi helper_wfi_aarch64eb +#define helper_wfe helper_wfe_aarch64eb +#define helper_yield helper_yield_aarch64eb +#define helper_exception_internal helper_exception_internal_aarch64eb +#define helper_exception_with_syndrome helper_exception_with_syndrome_aarch64eb +#define helper_exception_bkpt_insn helper_exception_bkpt_insn_aarch64eb +#define helper_cpsr_read helper_cpsr_read_aarch64eb +#define helper_cpsr_write helper_cpsr_write_aarch64eb +#define helper_cpsr_write_eret helper_cpsr_write_eret_aarch64eb +#define helper_get_user_reg helper_get_user_reg_aarch64eb +#define helper_set_user_reg helper_set_user_reg_aarch64eb +#define helper_set_r13_banked helper_set_r13_banked_aarch64eb +#define helper_get_r13_banked helper_get_r13_banked_aarch64eb +#define helper_msr_banked helper_msr_banked_aarch64eb +#define helper_mrs_banked helper_mrs_banked_aarch64eb +#define helper_access_check_cp_reg helper_access_check_cp_reg_aarch64eb +#define helper_set_cp_reg helper_set_cp_reg_aarch64eb +#define helper_get_cp_reg helper_get_cp_reg_aarch64eb +#define helper_set_cp_reg64 helper_set_cp_reg64_aarch64eb +#define helper_get_cp_reg64 helper_get_cp_reg64_aarch64eb +#define helper_pre_hvc helper_pre_hvc_aarch64eb +#define helper_pre_smc helper_pre_smc_aarch64eb +#define helper_shl_cc helper_shl_cc_aarch64eb +#define helper_shr_cc helper_shr_cc_aarch64eb +#define helper_sar_cc helper_sar_cc_aarch64eb +#define helper_ror_cc helper_ror_cc_aarch64eb +#define helper_pacia helper_pacia_aarch64eb +#define helper_pacib helper_pacib_aarch64eb +#define helper_pacda helper_pacda_aarch64eb +#define helper_pacdb helper_pacdb_aarch64eb +#define helper_pacga helper_pacga_aarch64eb +#define helper_autia helper_autia_aarch64eb +#define helper_autib helper_autib_aarch64eb +#define helper_autda helper_autda_aarch64eb +#define helper_autdb helper_autdb_aarch64eb +#define helper_xpaci helper_xpaci_aarch64eb +#define helper_xpacd helper_xpacd_aarch64eb +#define arm_is_psci_call arm_is_psci_call_aarch64eb +#define arm_handle_psci_call arm_handle_psci_call_aarch64eb +#define helper_sve_predtest1 helper_sve_predtest1_aarch64eb +#define helper_sve_predtest helper_sve_predtest_aarch64eb +#define helper_sve_and_pppp helper_sve_and_pppp_aarch64eb +#define helper_sve_bic_pppp helper_sve_bic_pppp_aarch64eb +#define helper_sve_eor_pppp helper_sve_eor_pppp_aarch64eb +#define helper_sve_sel_pppp helper_sve_sel_pppp_aarch64eb +#define helper_sve_orr_pppp helper_sve_orr_pppp_aarch64eb +#define helper_sve_orn_pppp helper_sve_orn_pppp_aarch64eb +#define helper_sve_nor_pppp helper_sve_nor_pppp_aarch64eb +#define helper_sve_nand_pppp helper_sve_nand_pppp_aarch64eb +#define helper_sve_and_zpzz_b helper_sve_and_zpzz_b_aarch64eb +#define helper_sve_and_zpzz_h helper_sve_and_zpzz_h_aarch64eb +#define helper_sve_and_zpzz_s helper_sve_and_zpzz_s_aarch64eb +#define helper_sve_and_zpzz_d helper_sve_and_zpzz_d_aarch64eb +#define helper_sve_orr_zpzz_b helper_sve_orr_zpzz_b_aarch64eb +#define helper_sve_orr_zpzz_h helper_sve_orr_zpzz_h_aarch64eb +#define helper_sve_orr_zpzz_s helper_sve_orr_zpzz_s_aarch64eb +#define helper_sve_orr_zpzz_d helper_sve_orr_zpzz_d_aarch64eb +#define helper_sve_eor_zpzz_b helper_sve_eor_zpzz_b_aarch64eb +#define helper_sve_eor_zpzz_h helper_sve_eor_zpzz_h_aarch64eb +#define helper_sve_eor_zpzz_s helper_sve_eor_zpzz_s_aarch64eb +#define helper_sve_eor_zpzz_d helper_sve_eor_zpzz_d_aarch64eb +#define helper_sve_bic_zpzz_b helper_sve_bic_zpzz_b_aarch64eb +#define helper_sve_bic_zpzz_h helper_sve_bic_zpzz_h_aarch64eb +#define helper_sve_bic_zpzz_s helper_sve_bic_zpzz_s_aarch64eb +#define helper_sve_bic_zpzz_d helper_sve_bic_zpzz_d_aarch64eb +#define helper_sve_add_zpzz_b helper_sve_add_zpzz_b_aarch64eb +#define helper_sve_add_zpzz_h helper_sve_add_zpzz_h_aarch64eb +#define helper_sve_add_zpzz_s helper_sve_add_zpzz_s_aarch64eb +#define helper_sve_add_zpzz_d helper_sve_add_zpzz_d_aarch64eb +#define helper_sve_sub_zpzz_b helper_sve_sub_zpzz_b_aarch64eb +#define helper_sve_sub_zpzz_h helper_sve_sub_zpzz_h_aarch64eb +#define helper_sve_sub_zpzz_s helper_sve_sub_zpzz_s_aarch64eb +#define helper_sve_sub_zpzz_d helper_sve_sub_zpzz_d_aarch64eb +#define helper_sve_smax_zpzz_b helper_sve_smax_zpzz_b_aarch64eb +#define helper_sve_smax_zpzz_h helper_sve_smax_zpzz_h_aarch64eb +#define helper_sve_smax_zpzz_s helper_sve_smax_zpzz_s_aarch64eb +#define helper_sve_smax_zpzz_d helper_sve_smax_zpzz_d_aarch64eb +#define helper_sve_umax_zpzz_b helper_sve_umax_zpzz_b_aarch64eb +#define helper_sve_umax_zpzz_h helper_sve_umax_zpzz_h_aarch64eb +#define helper_sve_umax_zpzz_s helper_sve_umax_zpzz_s_aarch64eb +#define helper_sve_umax_zpzz_d helper_sve_umax_zpzz_d_aarch64eb +#define helper_sve_smin_zpzz_b helper_sve_smin_zpzz_b_aarch64eb +#define helper_sve_smin_zpzz_h helper_sve_smin_zpzz_h_aarch64eb +#define helper_sve_smin_zpzz_s helper_sve_smin_zpzz_s_aarch64eb +#define helper_sve_smin_zpzz_d helper_sve_smin_zpzz_d_aarch64eb +#define helper_sve_umin_zpzz_b helper_sve_umin_zpzz_b_aarch64eb +#define helper_sve_umin_zpzz_h helper_sve_umin_zpzz_h_aarch64eb +#define helper_sve_umin_zpzz_s helper_sve_umin_zpzz_s_aarch64eb +#define helper_sve_umin_zpzz_d helper_sve_umin_zpzz_d_aarch64eb +#define helper_sve_sabd_zpzz_b helper_sve_sabd_zpzz_b_aarch64eb +#define helper_sve_sabd_zpzz_h helper_sve_sabd_zpzz_h_aarch64eb +#define helper_sve_sabd_zpzz_s helper_sve_sabd_zpzz_s_aarch64eb +#define helper_sve_sabd_zpzz_d helper_sve_sabd_zpzz_d_aarch64eb +#define helper_sve_uabd_zpzz_b helper_sve_uabd_zpzz_b_aarch64eb +#define helper_sve_uabd_zpzz_h helper_sve_uabd_zpzz_h_aarch64eb +#define helper_sve_uabd_zpzz_s helper_sve_uabd_zpzz_s_aarch64eb +#define helper_sve_uabd_zpzz_d helper_sve_uabd_zpzz_d_aarch64eb +#define helper_sve_mul_zpzz_b helper_sve_mul_zpzz_b_aarch64eb +#define helper_sve_mul_zpzz_h helper_sve_mul_zpzz_h_aarch64eb +#define helper_sve_mul_zpzz_s helper_sve_mul_zpzz_s_aarch64eb +#define helper_sve_mul_zpzz_d helper_sve_mul_zpzz_d_aarch64eb +#define helper_sve_smulh_zpzz_b helper_sve_smulh_zpzz_b_aarch64eb +#define helper_sve_smulh_zpzz_h helper_sve_smulh_zpzz_h_aarch64eb +#define helper_sve_smulh_zpzz_s helper_sve_smulh_zpzz_s_aarch64eb +#define helper_sve_smulh_zpzz_d helper_sve_smulh_zpzz_d_aarch64eb +#define helper_sve_umulh_zpzz_b helper_sve_umulh_zpzz_b_aarch64eb +#define helper_sve_umulh_zpzz_h helper_sve_umulh_zpzz_h_aarch64eb +#define helper_sve_umulh_zpzz_s helper_sve_umulh_zpzz_s_aarch64eb +#define helper_sve_umulh_zpzz_d helper_sve_umulh_zpzz_d_aarch64eb +#define helper_sve_sdiv_zpzz_s helper_sve_sdiv_zpzz_s_aarch64eb +#define helper_sve_sdiv_zpzz_d helper_sve_sdiv_zpzz_d_aarch64eb +#define helper_sve_udiv_zpzz_s helper_sve_udiv_zpzz_s_aarch64eb +#define helper_sve_udiv_zpzz_d helper_sve_udiv_zpzz_d_aarch64eb +#define helper_sve_asr_zpzz_b helper_sve_asr_zpzz_b_aarch64eb +#define helper_sve_lsr_zpzz_b helper_sve_lsr_zpzz_b_aarch64eb +#define helper_sve_lsl_zpzz_b helper_sve_lsl_zpzz_b_aarch64eb +#define helper_sve_asr_zpzz_h helper_sve_asr_zpzz_h_aarch64eb +#define helper_sve_lsr_zpzz_h helper_sve_lsr_zpzz_h_aarch64eb +#define helper_sve_lsl_zpzz_h helper_sve_lsl_zpzz_h_aarch64eb +#define helper_sve_asr_zpzz_s helper_sve_asr_zpzz_s_aarch64eb +#define helper_sve_lsr_zpzz_s helper_sve_lsr_zpzz_s_aarch64eb +#define helper_sve_lsl_zpzz_s helper_sve_lsl_zpzz_s_aarch64eb +#define helper_sve_asr_zpzz_d helper_sve_asr_zpzz_d_aarch64eb +#define helper_sve_lsr_zpzz_d helper_sve_lsr_zpzz_d_aarch64eb +#define helper_sve_lsl_zpzz_d helper_sve_lsl_zpzz_d_aarch64eb +#define helper_sve_asr_zpzw_b helper_sve_asr_zpzw_b_aarch64eb +#define helper_sve_lsr_zpzw_b helper_sve_lsr_zpzw_b_aarch64eb +#define helper_sve_lsl_zpzw_b helper_sve_lsl_zpzw_b_aarch64eb +#define helper_sve_asr_zpzw_h helper_sve_asr_zpzw_h_aarch64eb +#define helper_sve_lsr_zpzw_h helper_sve_lsr_zpzw_h_aarch64eb +#define helper_sve_lsl_zpzw_h helper_sve_lsl_zpzw_h_aarch64eb +#define helper_sve_asr_zpzw_s helper_sve_asr_zpzw_s_aarch64eb +#define helper_sve_lsr_zpzw_s helper_sve_lsr_zpzw_s_aarch64eb +#define helper_sve_lsl_zpzw_s helper_sve_lsl_zpzw_s_aarch64eb +#define helper_sve_cls_b helper_sve_cls_b_aarch64eb +#define helper_sve_cls_h helper_sve_cls_h_aarch64eb +#define helper_sve_cls_s helper_sve_cls_s_aarch64eb +#define helper_sve_cls_d helper_sve_cls_d_aarch64eb +#define helper_sve_clz_b helper_sve_clz_b_aarch64eb +#define helper_sve_clz_h helper_sve_clz_h_aarch64eb +#define helper_sve_clz_s helper_sve_clz_s_aarch64eb +#define helper_sve_clz_d helper_sve_clz_d_aarch64eb +#define helper_sve_cnt_zpz_b helper_sve_cnt_zpz_b_aarch64eb +#define helper_sve_cnt_zpz_h helper_sve_cnt_zpz_h_aarch64eb +#define helper_sve_cnt_zpz_s helper_sve_cnt_zpz_s_aarch64eb +#define helper_sve_cnt_zpz_d helper_sve_cnt_zpz_d_aarch64eb +#define helper_sve_cnot_b helper_sve_cnot_b_aarch64eb +#define helper_sve_cnot_h helper_sve_cnot_h_aarch64eb +#define helper_sve_cnot_s helper_sve_cnot_s_aarch64eb +#define helper_sve_cnot_d helper_sve_cnot_d_aarch64eb +#define helper_sve_fabs_h helper_sve_fabs_h_aarch64eb +#define helper_sve_fabs_s helper_sve_fabs_s_aarch64eb +#define helper_sve_fabs_d helper_sve_fabs_d_aarch64eb +#define helper_sve_fneg_h helper_sve_fneg_h_aarch64eb +#define helper_sve_fneg_s helper_sve_fneg_s_aarch64eb +#define helper_sve_fneg_d helper_sve_fneg_d_aarch64eb +#define helper_sve_not_zpz_b helper_sve_not_zpz_b_aarch64eb +#define helper_sve_not_zpz_h helper_sve_not_zpz_h_aarch64eb +#define helper_sve_not_zpz_s helper_sve_not_zpz_s_aarch64eb +#define helper_sve_not_zpz_d helper_sve_not_zpz_d_aarch64eb +#define helper_sve_sxtb_h helper_sve_sxtb_h_aarch64eb +#define helper_sve_sxtb_s helper_sve_sxtb_s_aarch64eb +#define helper_sve_sxth_s helper_sve_sxth_s_aarch64eb +#define helper_sve_sxtb_d helper_sve_sxtb_d_aarch64eb +#define helper_sve_sxth_d helper_sve_sxth_d_aarch64eb +#define helper_sve_sxtw_d helper_sve_sxtw_d_aarch64eb +#define helper_sve_uxtb_h helper_sve_uxtb_h_aarch64eb +#define helper_sve_uxtb_s helper_sve_uxtb_s_aarch64eb +#define helper_sve_uxth_s helper_sve_uxth_s_aarch64eb +#define helper_sve_uxtb_d helper_sve_uxtb_d_aarch64eb +#define helper_sve_uxth_d helper_sve_uxth_d_aarch64eb +#define helper_sve_uxtw_d helper_sve_uxtw_d_aarch64eb +#define helper_sve_abs_b helper_sve_abs_b_aarch64eb +#define helper_sve_abs_h helper_sve_abs_h_aarch64eb +#define helper_sve_abs_s helper_sve_abs_s_aarch64eb +#define helper_sve_abs_d helper_sve_abs_d_aarch64eb +#define helper_sve_neg_b helper_sve_neg_b_aarch64eb +#define helper_sve_neg_h helper_sve_neg_h_aarch64eb +#define helper_sve_neg_s helper_sve_neg_s_aarch64eb +#define helper_sve_neg_d helper_sve_neg_d_aarch64eb +#define helper_sve_revb_h helper_sve_revb_h_aarch64eb +#define helper_sve_revb_s helper_sve_revb_s_aarch64eb +#define helper_sve_revb_d helper_sve_revb_d_aarch64eb +#define helper_sve_revh_s helper_sve_revh_s_aarch64eb +#define helper_sve_revh_d helper_sve_revh_d_aarch64eb +#define helper_sve_revw_d helper_sve_revw_d_aarch64eb +#define helper_sve_rbit_b helper_sve_rbit_b_aarch64eb +#define helper_sve_rbit_h helper_sve_rbit_h_aarch64eb +#define helper_sve_rbit_s helper_sve_rbit_s_aarch64eb +#define helper_sve_rbit_d helper_sve_rbit_d_aarch64eb +#define helper_sve_asr_zzw_b helper_sve_asr_zzw_b_aarch64eb +#define helper_sve_lsr_zzw_b helper_sve_lsr_zzw_b_aarch64eb +#define helper_sve_lsl_zzw_b helper_sve_lsl_zzw_b_aarch64eb +#define helper_sve_asr_zzw_h helper_sve_asr_zzw_h_aarch64eb +#define helper_sve_lsr_zzw_h helper_sve_lsr_zzw_h_aarch64eb +#define helper_sve_lsl_zzw_h helper_sve_lsl_zzw_h_aarch64eb +#define helper_sve_asr_zzw_s helper_sve_asr_zzw_s_aarch64eb +#define helper_sve_lsr_zzw_s helper_sve_lsr_zzw_s_aarch64eb +#define helper_sve_lsl_zzw_s helper_sve_lsl_zzw_s_aarch64eb +#define helper_sve_orv_b helper_sve_orv_b_aarch64eb +#define helper_sve_orv_h helper_sve_orv_h_aarch64eb +#define helper_sve_orv_s helper_sve_orv_s_aarch64eb +#define helper_sve_orv_d helper_sve_orv_d_aarch64eb +#define helper_sve_eorv_b helper_sve_eorv_b_aarch64eb +#define helper_sve_eorv_h helper_sve_eorv_h_aarch64eb +#define helper_sve_eorv_s helper_sve_eorv_s_aarch64eb +#define helper_sve_eorv_d helper_sve_eorv_d_aarch64eb +#define helper_sve_andv_b helper_sve_andv_b_aarch64eb +#define helper_sve_andv_h helper_sve_andv_h_aarch64eb +#define helper_sve_andv_s helper_sve_andv_s_aarch64eb +#define helper_sve_andv_d helper_sve_andv_d_aarch64eb +#define helper_sve_saddv_b helper_sve_saddv_b_aarch64eb +#define helper_sve_saddv_h helper_sve_saddv_h_aarch64eb +#define helper_sve_saddv_s helper_sve_saddv_s_aarch64eb +#define helper_sve_uaddv_b helper_sve_uaddv_b_aarch64eb +#define helper_sve_uaddv_h helper_sve_uaddv_h_aarch64eb +#define helper_sve_uaddv_s helper_sve_uaddv_s_aarch64eb +#define helper_sve_uaddv_d helper_sve_uaddv_d_aarch64eb +#define helper_sve_smaxv_b helper_sve_smaxv_b_aarch64eb +#define helper_sve_smaxv_h helper_sve_smaxv_h_aarch64eb +#define helper_sve_smaxv_s helper_sve_smaxv_s_aarch64eb +#define helper_sve_smaxv_d helper_sve_smaxv_d_aarch64eb +#define helper_sve_umaxv_b helper_sve_umaxv_b_aarch64eb +#define helper_sve_umaxv_h helper_sve_umaxv_h_aarch64eb +#define helper_sve_umaxv_s helper_sve_umaxv_s_aarch64eb +#define helper_sve_umaxv_d helper_sve_umaxv_d_aarch64eb +#define helper_sve_sminv_b helper_sve_sminv_b_aarch64eb +#define helper_sve_sminv_h helper_sve_sminv_h_aarch64eb +#define helper_sve_sminv_s helper_sve_sminv_s_aarch64eb +#define helper_sve_sminv_d helper_sve_sminv_d_aarch64eb +#define helper_sve_uminv_b helper_sve_uminv_b_aarch64eb +#define helper_sve_uminv_h helper_sve_uminv_h_aarch64eb +#define helper_sve_uminv_s helper_sve_uminv_s_aarch64eb +#define helper_sve_uminv_d helper_sve_uminv_d_aarch64eb +#define helper_sve_subri_b helper_sve_subri_b_aarch64eb +#define helper_sve_subri_h helper_sve_subri_h_aarch64eb +#define helper_sve_subri_s helper_sve_subri_s_aarch64eb +#define helper_sve_subri_d helper_sve_subri_d_aarch64eb +#define helper_sve_smaxi_b helper_sve_smaxi_b_aarch64eb +#define helper_sve_smaxi_h helper_sve_smaxi_h_aarch64eb +#define helper_sve_smaxi_s helper_sve_smaxi_s_aarch64eb +#define helper_sve_smaxi_d helper_sve_smaxi_d_aarch64eb +#define helper_sve_smini_b helper_sve_smini_b_aarch64eb +#define helper_sve_smini_h helper_sve_smini_h_aarch64eb +#define helper_sve_smini_s helper_sve_smini_s_aarch64eb +#define helper_sve_smini_d helper_sve_smini_d_aarch64eb +#define helper_sve_umaxi_b helper_sve_umaxi_b_aarch64eb +#define helper_sve_umaxi_h helper_sve_umaxi_h_aarch64eb +#define helper_sve_umaxi_s helper_sve_umaxi_s_aarch64eb +#define helper_sve_umaxi_d helper_sve_umaxi_d_aarch64eb +#define helper_sve_umini_b helper_sve_umini_b_aarch64eb +#define helper_sve_umini_h helper_sve_umini_h_aarch64eb +#define helper_sve_umini_s helper_sve_umini_s_aarch64eb +#define helper_sve_umini_d helper_sve_umini_d_aarch64eb +#define helper_sve_pfirst helper_sve_pfirst_aarch64eb +#define helper_sve_pnext helper_sve_pnext_aarch64eb +#define helper_sve_clr_b helper_sve_clr_b_aarch64eb +#define helper_sve_clr_h helper_sve_clr_h_aarch64eb +#define helper_sve_clr_s helper_sve_clr_s_aarch64eb +#define helper_sve_clr_d helper_sve_clr_d_aarch64eb +#define helper_sve_movz_b helper_sve_movz_b_aarch64eb +#define helper_sve_movz_h helper_sve_movz_h_aarch64eb +#define helper_sve_movz_s helper_sve_movz_s_aarch64eb +#define helper_sve_movz_d helper_sve_movz_d_aarch64eb +#define helper_sve_asr_zpzi_b helper_sve_asr_zpzi_b_aarch64eb +#define helper_sve_asr_zpzi_h helper_sve_asr_zpzi_h_aarch64eb +#define helper_sve_asr_zpzi_s helper_sve_asr_zpzi_s_aarch64eb +#define helper_sve_asr_zpzi_d helper_sve_asr_zpzi_d_aarch64eb +#define helper_sve_lsr_zpzi_b helper_sve_lsr_zpzi_b_aarch64eb +#define helper_sve_lsr_zpzi_h helper_sve_lsr_zpzi_h_aarch64eb +#define helper_sve_lsr_zpzi_s helper_sve_lsr_zpzi_s_aarch64eb +#define helper_sve_lsr_zpzi_d helper_sve_lsr_zpzi_d_aarch64eb +#define helper_sve_lsl_zpzi_b helper_sve_lsl_zpzi_b_aarch64eb +#define helper_sve_lsl_zpzi_h helper_sve_lsl_zpzi_h_aarch64eb +#define helper_sve_lsl_zpzi_s helper_sve_lsl_zpzi_s_aarch64eb +#define helper_sve_lsl_zpzi_d helper_sve_lsl_zpzi_d_aarch64eb +#define helper_sve_asrd_b helper_sve_asrd_b_aarch64eb +#define helper_sve_asrd_h helper_sve_asrd_h_aarch64eb +#define helper_sve_asrd_s helper_sve_asrd_s_aarch64eb +#define helper_sve_asrd_d helper_sve_asrd_d_aarch64eb +#define helper_sve_mla_b helper_sve_mla_b_aarch64eb +#define helper_sve_mls_b helper_sve_mls_b_aarch64eb +#define helper_sve_mla_h helper_sve_mla_h_aarch64eb +#define helper_sve_mls_h helper_sve_mls_h_aarch64eb +#define helper_sve_mla_s helper_sve_mla_s_aarch64eb +#define helper_sve_mls_s helper_sve_mls_s_aarch64eb +#define helper_sve_mla_d helper_sve_mla_d_aarch64eb +#define helper_sve_mls_d helper_sve_mls_d_aarch64eb +#define helper_sve_index_b helper_sve_index_b_aarch64eb +#define helper_sve_index_h helper_sve_index_h_aarch64eb +#define helper_sve_index_s helper_sve_index_s_aarch64eb +#define helper_sve_index_d helper_sve_index_d_aarch64eb +#define helper_sve_adr_p32 helper_sve_adr_p32_aarch64eb +#define helper_sve_adr_p64 helper_sve_adr_p64_aarch64eb +#define helper_sve_adr_s32 helper_sve_adr_s32_aarch64eb +#define helper_sve_adr_u32 helper_sve_adr_u32_aarch64eb +#define helper_sve_fexpa_h helper_sve_fexpa_h_aarch64eb +#define helper_sve_fexpa_s helper_sve_fexpa_s_aarch64eb +#define helper_sve_fexpa_d helper_sve_fexpa_d_aarch64eb +#define helper_sve_ftssel_h helper_sve_ftssel_h_aarch64eb +#define helper_sve_ftssel_s helper_sve_ftssel_s_aarch64eb +#define helper_sve_ftssel_d helper_sve_ftssel_d_aarch64eb +#define helper_sve_sqaddi_b helper_sve_sqaddi_b_aarch64eb +#define helper_sve_sqaddi_h helper_sve_sqaddi_h_aarch64eb +#define helper_sve_sqaddi_s helper_sve_sqaddi_s_aarch64eb +#define helper_sve_sqaddi_d helper_sve_sqaddi_d_aarch64eb +#define helper_sve_uqaddi_b helper_sve_uqaddi_b_aarch64eb +#define helper_sve_uqaddi_h helper_sve_uqaddi_h_aarch64eb +#define helper_sve_uqaddi_s helper_sve_uqaddi_s_aarch64eb +#define helper_sve_uqaddi_d helper_sve_uqaddi_d_aarch64eb +#define helper_sve_uqsubi_d helper_sve_uqsubi_d_aarch64eb +#define helper_sve_cpy_m_b helper_sve_cpy_m_b_aarch64eb +#define helper_sve_cpy_m_h helper_sve_cpy_m_h_aarch64eb +#define helper_sve_cpy_m_s helper_sve_cpy_m_s_aarch64eb +#define helper_sve_cpy_m_d helper_sve_cpy_m_d_aarch64eb +#define helper_sve_cpy_z_b helper_sve_cpy_z_b_aarch64eb +#define helper_sve_cpy_z_h helper_sve_cpy_z_h_aarch64eb +#define helper_sve_cpy_z_s helper_sve_cpy_z_s_aarch64eb +#define helper_sve_cpy_z_d helper_sve_cpy_z_d_aarch64eb +#define helper_sve_ext helper_sve_ext_aarch64eb +#define helper_sve_insr_b helper_sve_insr_b_aarch64eb +#define helper_sve_insr_h helper_sve_insr_h_aarch64eb +#define helper_sve_insr_s helper_sve_insr_s_aarch64eb +#define helper_sve_insr_d helper_sve_insr_d_aarch64eb +#define helper_sve_rev_b helper_sve_rev_b_aarch64eb +#define helper_sve_rev_h helper_sve_rev_h_aarch64eb +#define helper_sve_rev_s helper_sve_rev_s_aarch64eb +#define helper_sve_rev_d helper_sve_rev_d_aarch64eb +#define helper_sve_tbl_b helper_sve_tbl_b_aarch64eb +#define helper_sve_tbl_h helper_sve_tbl_h_aarch64eb +#define helper_sve_tbl_s helper_sve_tbl_s_aarch64eb +#define helper_sve_tbl_d helper_sve_tbl_d_aarch64eb +#define helper_sve_sunpk_h helper_sve_sunpk_h_aarch64eb +#define helper_sve_sunpk_s helper_sve_sunpk_s_aarch64eb +#define helper_sve_sunpk_d helper_sve_sunpk_d_aarch64eb +#define helper_sve_uunpk_h helper_sve_uunpk_h_aarch64eb +#define helper_sve_uunpk_s helper_sve_uunpk_s_aarch64eb +#define helper_sve_uunpk_d helper_sve_uunpk_d_aarch64eb +#define helper_sve_zip_p helper_sve_zip_p_aarch64eb +#define helper_sve_uzp_p helper_sve_uzp_p_aarch64eb +#define helper_sve_trn_p helper_sve_trn_p_aarch64eb +#define helper_sve_rev_p helper_sve_rev_p_aarch64eb +#define helper_sve_punpk_p helper_sve_punpk_p_aarch64eb +#define helper_sve_zip_b helper_sve_zip_b_aarch64eb +#define helper_sve_zip_h helper_sve_zip_h_aarch64eb +#define helper_sve_zip_s helper_sve_zip_s_aarch64eb +#define helper_sve_zip_d helper_sve_zip_d_aarch64eb +#define helper_sve_uzp_b helper_sve_uzp_b_aarch64eb +#define helper_sve_uzp_h helper_sve_uzp_h_aarch64eb +#define helper_sve_uzp_s helper_sve_uzp_s_aarch64eb +#define helper_sve_uzp_d helper_sve_uzp_d_aarch64eb +#define helper_sve_trn_b helper_sve_trn_b_aarch64eb +#define helper_sve_trn_h helper_sve_trn_h_aarch64eb +#define helper_sve_trn_s helper_sve_trn_s_aarch64eb +#define helper_sve_trn_d helper_sve_trn_d_aarch64eb +#define helper_sve_compact_s helper_sve_compact_s_aarch64eb +#define helper_sve_compact_d helper_sve_compact_d_aarch64eb +#define helper_sve_last_active_element helper_sve_last_active_element_aarch64eb +#define helper_sve_splice helper_sve_splice_aarch64eb +#define helper_sve_sel_zpzz_b helper_sve_sel_zpzz_b_aarch64eb +#define helper_sve_sel_zpzz_h helper_sve_sel_zpzz_h_aarch64eb +#define helper_sve_sel_zpzz_s helper_sve_sel_zpzz_s_aarch64eb +#define helper_sve_sel_zpzz_d helper_sve_sel_zpzz_d_aarch64eb +#define helper_sve_cmpeq_ppzz_b helper_sve_cmpeq_ppzz_b_aarch64eb +#define helper_sve_cmpeq_ppzz_h helper_sve_cmpeq_ppzz_h_aarch64eb +#define helper_sve_cmpeq_ppzz_s helper_sve_cmpeq_ppzz_s_aarch64eb +#define helper_sve_cmpeq_ppzz_d helper_sve_cmpeq_ppzz_d_aarch64eb +#define helper_sve_cmpne_ppzz_b helper_sve_cmpne_ppzz_b_aarch64eb +#define helper_sve_cmpne_ppzz_h helper_sve_cmpne_ppzz_h_aarch64eb +#define helper_sve_cmpne_ppzz_s helper_sve_cmpne_ppzz_s_aarch64eb +#define helper_sve_cmpne_ppzz_d helper_sve_cmpne_ppzz_d_aarch64eb +#define helper_sve_cmpgt_ppzz_b helper_sve_cmpgt_ppzz_b_aarch64eb +#define helper_sve_cmpgt_ppzz_h helper_sve_cmpgt_ppzz_h_aarch64eb +#define helper_sve_cmpgt_ppzz_s helper_sve_cmpgt_ppzz_s_aarch64eb +#define helper_sve_cmpgt_ppzz_d helper_sve_cmpgt_ppzz_d_aarch64eb +#define helper_sve_cmpge_ppzz_b helper_sve_cmpge_ppzz_b_aarch64eb +#define helper_sve_cmpge_ppzz_h helper_sve_cmpge_ppzz_h_aarch64eb +#define helper_sve_cmpge_ppzz_s helper_sve_cmpge_ppzz_s_aarch64eb +#define helper_sve_cmpge_ppzz_d helper_sve_cmpge_ppzz_d_aarch64eb +#define helper_sve_cmphi_ppzz_b helper_sve_cmphi_ppzz_b_aarch64eb +#define helper_sve_cmphi_ppzz_h helper_sve_cmphi_ppzz_h_aarch64eb +#define helper_sve_cmphi_ppzz_s helper_sve_cmphi_ppzz_s_aarch64eb +#define helper_sve_cmphi_ppzz_d helper_sve_cmphi_ppzz_d_aarch64eb +#define helper_sve_cmphs_ppzz_b helper_sve_cmphs_ppzz_b_aarch64eb +#define helper_sve_cmphs_ppzz_h helper_sve_cmphs_ppzz_h_aarch64eb +#define helper_sve_cmphs_ppzz_s helper_sve_cmphs_ppzz_s_aarch64eb +#define helper_sve_cmphs_ppzz_d helper_sve_cmphs_ppzz_d_aarch64eb +#define helper_sve_cmpeq_ppzw_b helper_sve_cmpeq_ppzw_b_aarch64eb +#define helper_sve_cmpeq_ppzw_h helper_sve_cmpeq_ppzw_h_aarch64eb +#define helper_sve_cmpeq_ppzw_s helper_sve_cmpeq_ppzw_s_aarch64eb +#define helper_sve_cmpne_ppzw_b helper_sve_cmpne_ppzw_b_aarch64eb +#define helper_sve_cmpne_ppzw_h helper_sve_cmpne_ppzw_h_aarch64eb +#define helper_sve_cmpne_ppzw_s helper_sve_cmpne_ppzw_s_aarch64eb +#define helper_sve_cmpgt_ppzw_b helper_sve_cmpgt_ppzw_b_aarch64eb +#define helper_sve_cmpgt_ppzw_h helper_sve_cmpgt_ppzw_h_aarch64eb +#define helper_sve_cmpgt_ppzw_s helper_sve_cmpgt_ppzw_s_aarch64eb +#define helper_sve_cmpge_ppzw_b helper_sve_cmpge_ppzw_b_aarch64eb +#define helper_sve_cmpge_ppzw_h helper_sve_cmpge_ppzw_h_aarch64eb +#define helper_sve_cmpge_ppzw_s helper_sve_cmpge_ppzw_s_aarch64eb +#define helper_sve_cmphi_ppzw_b helper_sve_cmphi_ppzw_b_aarch64eb +#define helper_sve_cmphi_ppzw_h helper_sve_cmphi_ppzw_h_aarch64eb +#define helper_sve_cmphi_ppzw_s helper_sve_cmphi_ppzw_s_aarch64eb +#define helper_sve_cmphs_ppzw_b helper_sve_cmphs_ppzw_b_aarch64eb +#define helper_sve_cmphs_ppzw_h helper_sve_cmphs_ppzw_h_aarch64eb +#define helper_sve_cmphs_ppzw_s helper_sve_cmphs_ppzw_s_aarch64eb +#define helper_sve_cmplt_ppzw_b helper_sve_cmplt_ppzw_b_aarch64eb +#define helper_sve_cmplt_ppzw_h helper_sve_cmplt_ppzw_h_aarch64eb +#define helper_sve_cmplt_ppzw_s helper_sve_cmplt_ppzw_s_aarch64eb +#define helper_sve_cmple_ppzw_b helper_sve_cmple_ppzw_b_aarch64eb +#define helper_sve_cmple_ppzw_h helper_sve_cmple_ppzw_h_aarch64eb +#define helper_sve_cmple_ppzw_s helper_sve_cmple_ppzw_s_aarch64eb +#define helper_sve_cmplo_ppzw_b helper_sve_cmplo_ppzw_b_aarch64eb +#define helper_sve_cmplo_ppzw_h helper_sve_cmplo_ppzw_h_aarch64eb +#define helper_sve_cmplo_ppzw_s helper_sve_cmplo_ppzw_s_aarch64eb +#define helper_sve_cmpls_ppzw_b helper_sve_cmpls_ppzw_b_aarch64eb +#define helper_sve_cmpls_ppzw_h helper_sve_cmpls_ppzw_h_aarch64eb +#define helper_sve_cmpls_ppzw_s helper_sve_cmpls_ppzw_s_aarch64eb +#define helper_sve_cmpeq_ppzi_b helper_sve_cmpeq_ppzi_b_aarch64eb +#define helper_sve_cmpeq_ppzi_h helper_sve_cmpeq_ppzi_h_aarch64eb +#define helper_sve_cmpeq_ppzi_s helper_sve_cmpeq_ppzi_s_aarch64eb +#define helper_sve_cmpeq_ppzi_d helper_sve_cmpeq_ppzi_d_aarch64eb +#define helper_sve_cmpne_ppzi_b helper_sve_cmpne_ppzi_b_aarch64eb +#define helper_sve_cmpne_ppzi_h helper_sve_cmpne_ppzi_h_aarch64eb +#define helper_sve_cmpne_ppzi_s helper_sve_cmpne_ppzi_s_aarch64eb +#define helper_sve_cmpne_ppzi_d helper_sve_cmpne_ppzi_d_aarch64eb +#define helper_sve_cmpgt_ppzi_b helper_sve_cmpgt_ppzi_b_aarch64eb +#define helper_sve_cmpgt_ppzi_h helper_sve_cmpgt_ppzi_h_aarch64eb +#define helper_sve_cmpgt_ppzi_s helper_sve_cmpgt_ppzi_s_aarch64eb +#define helper_sve_cmpgt_ppzi_d helper_sve_cmpgt_ppzi_d_aarch64eb +#define helper_sve_cmpge_ppzi_b helper_sve_cmpge_ppzi_b_aarch64eb +#define helper_sve_cmpge_ppzi_h helper_sve_cmpge_ppzi_h_aarch64eb +#define helper_sve_cmpge_ppzi_s helper_sve_cmpge_ppzi_s_aarch64eb +#define helper_sve_cmpge_ppzi_d helper_sve_cmpge_ppzi_d_aarch64eb +#define helper_sve_cmphi_ppzi_b helper_sve_cmphi_ppzi_b_aarch64eb +#define helper_sve_cmphi_ppzi_h helper_sve_cmphi_ppzi_h_aarch64eb +#define helper_sve_cmphi_ppzi_s helper_sve_cmphi_ppzi_s_aarch64eb +#define helper_sve_cmphi_ppzi_d helper_sve_cmphi_ppzi_d_aarch64eb +#define helper_sve_cmphs_ppzi_b helper_sve_cmphs_ppzi_b_aarch64eb +#define helper_sve_cmphs_ppzi_h helper_sve_cmphs_ppzi_h_aarch64eb +#define helper_sve_cmphs_ppzi_s helper_sve_cmphs_ppzi_s_aarch64eb +#define helper_sve_cmphs_ppzi_d helper_sve_cmphs_ppzi_d_aarch64eb +#define helper_sve_cmplt_ppzi_b helper_sve_cmplt_ppzi_b_aarch64eb +#define helper_sve_cmplt_ppzi_h helper_sve_cmplt_ppzi_h_aarch64eb +#define helper_sve_cmplt_ppzi_s helper_sve_cmplt_ppzi_s_aarch64eb +#define helper_sve_cmplt_ppzi_d helper_sve_cmplt_ppzi_d_aarch64eb +#define helper_sve_cmple_ppzi_b helper_sve_cmple_ppzi_b_aarch64eb +#define helper_sve_cmple_ppzi_h helper_sve_cmple_ppzi_h_aarch64eb +#define helper_sve_cmple_ppzi_s helper_sve_cmple_ppzi_s_aarch64eb +#define helper_sve_cmple_ppzi_d helper_sve_cmple_ppzi_d_aarch64eb +#define helper_sve_cmplo_ppzi_b helper_sve_cmplo_ppzi_b_aarch64eb +#define helper_sve_cmplo_ppzi_h helper_sve_cmplo_ppzi_h_aarch64eb +#define helper_sve_cmplo_ppzi_s helper_sve_cmplo_ppzi_s_aarch64eb +#define helper_sve_cmplo_ppzi_d helper_sve_cmplo_ppzi_d_aarch64eb +#define helper_sve_cmpls_ppzi_b helper_sve_cmpls_ppzi_b_aarch64eb +#define helper_sve_cmpls_ppzi_h helper_sve_cmpls_ppzi_h_aarch64eb +#define helper_sve_cmpls_ppzi_s helper_sve_cmpls_ppzi_s_aarch64eb +#define helper_sve_cmpls_ppzi_d helper_sve_cmpls_ppzi_d_aarch64eb +#define helper_sve_brkpa helper_sve_brkpa_aarch64eb +#define helper_sve_brkpas helper_sve_brkpas_aarch64eb +#define helper_sve_brkpb helper_sve_brkpb_aarch64eb +#define helper_sve_brkpbs helper_sve_brkpbs_aarch64eb +#define helper_sve_brka_z helper_sve_brka_z_aarch64eb +#define helper_sve_brkas_z helper_sve_brkas_z_aarch64eb +#define helper_sve_brkb_z helper_sve_brkb_z_aarch64eb +#define helper_sve_brkbs_z helper_sve_brkbs_z_aarch64eb +#define helper_sve_brka_m helper_sve_brka_m_aarch64eb +#define helper_sve_brkas_m helper_sve_brkas_m_aarch64eb +#define helper_sve_brkb_m helper_sve_brkb_m_aarch64eb +#define helper_sve_brkbs_m helper_sve_brkbs_m_aarch64eb +#define helper_sve_brkn helper_sve_brkn_aarch64eb +#define helper_sve_brkns helper_sve_brkns_aarch64eb +#define helper_sve_cntp helper_sve_cntp_aarch64eb +#define helper_sve_while helper_sve_while_aarch64eb +#define helper_sve_faddv_h helper_sve_faddv_h_aarch64eb +#define helper_sve_faddv_s helper_sve_faddv_s_aarch64eb +#define helper_sve_faddv_d helper_sve_faddv_d_aarch64eb +#define helper_sve_fminnmv_h helper_sve_fminnmv_h_aarch64eb +#define helper_sve_fminnmv_s helper_sve_fminnmv_s_aarch64eb +#define helper_sve_fminnmv_d helper_sve_fminnmv_d_aarch64eb +#define helper_sve_fmaxnmv_h helper_sve_fmaxnmv_h_aarch64eb +#define helper_sve_fmaxnmv_s helper_sve_fmaxnmv_s_aarch64eb +#define helper_sve_fmaxnmv_d helper_sve_fmaxnmv_d_aarch64eb +#define helper_sve_fminv_h helper_sve_fminv_h_aarch64eb +#define helper_sve_fminv_s helper_sve_fminv_s_aarch64eb +#define helper_sve_fminv_d helper_sve_fminv_d_aarch64eb +#define helper_sve_fmaxv_h helper_sve_fmaxv_h_aarch64eb +#define helper_sve_fmaxv_s helper_sve_fmaxv_s_aarch64eb +#define helper_sve_fmaxv_d helper_sve_fmaxv_d_aarch64eb +#define helper_sve_fadda_h helper_sve_fadda_h_aarch64eb +#define helper_sve_fadda_s helper_sve_fadda_s_aarch64eb +#define helper_sve_fadda_d helper_sve_fadda_d_aarch64eb +#define helper_sve_fadd_h helper_sve_fadd_h_aarch64eb +#define helper_sve_fadd_s helper_sve_fadd_s_aarch64eb +#define helper_sve_fadd_d helper_sve_fadd_d_aarch64eb +#define helper_sve_fsub_h helper_sve_fsub_h_aarch64eb +#define helper_sve_fsub_s helper_sve_fsub_s_aarch64eb +#define helper_sve_fsub_d helper_sve_fsub_d_aarch64eb +#define helper_sve_fmul_h helper_sve_fmul_h_aarch64eb +#define helper_sve_fmul_s helper_sve_fmul_s_aarch64eb +#define helper_sve_fmul_d helper_sve_fmul_d_aarch64eb +#define helper_sve_fdiv_h helper_sve_fdiv_h_aarch64eb +#define helper_sve_fdiv_s helper_sve_fdiv_s_aarch64eb +#define helper_sve_fdiv_d helper_sve_fdiv_d_aarch64eb +#define helper_sve_fmin_h helper_sve_fmin_h_aarch64eb +#define helper_sve_fmin_s helper_sve_fmin_s_aarch64eb +#define helper_sve_fmin_d helper_sve_fmin_d_aarch64eb +#define helper_sve_fmax_h helper_sve_fmax_h_aarch64eb +#define helper_sve_fmax_s helper_sve_fmax_s_aarch64eb +#define helper_sve_fmax_d helper_sve_fmax_d_aarch64eb +#define helper_sve_fminnum_h helper_sve_fminnum_h_aarch64eb +#define helper_sve_fminnum_s helper_sve_fminnum_s_aarch64eb +#define helper_sve_fminnum_d helper_sve_fminnum_d_aarch64eb +#define helper_sve_fmaxnum_h helper_sve_fmaxnum_h_aarch64eb +#define helper_sve_fmaxnum_s helper_sve_fmaxnum_s_aarch64eb +#define helper_sve_fmaxnum_d helper_sve_fmaxnum_d_aarch64eb +#define helper_sve_fabd_h helper_sve_fabd_h_aarch64eb +#define helper_sve_fabd_s helper_sve_fabd_s_aarch64eb +#define helper_sve_fabd_d helper_sve_fabd_d_aarch64eb +#define helper_sve_fscalbn_h helper_sve_fscalbn_h_aarch64eb +#define helper_sve_fscalbn_s helper_sve_fscalbn_s_aarch64eb +#define helper_sve_fscalbn_d helper_sve_fscalbn_d_aarch64eb +#define helper_sve_fmulx_h helper_sve_fmulx_h_aarch64eb +#define helper_sve_fmulx_s helper_sve_fmulx_s_aarch64eb +#define helper_sve_fmulx_d helper_sve_fmulx_d_aarch64eb +#define helper_sve_fadds_h helper_sve_fadds_h_aarch64eb +#define helper_sve_fadds_s helper_sve_fadds_s_aarch64eb +#define helper_sve_fadds_d helper_sve_fadds_d_aarch64eb +#define helper_sve_fsubs_h helper_sve_fsubs_h_aarch64eb +#define helper_sve_fsubs_s helper_sve_fsubs_s_aarch64eb +#define helper_sve_fsubs_d helper_sve_fsubs_d_aarch64eb +#define helper_sve_fmuls_h helper_sve_fmuls_h_aarch64eb +#define helper_sve_fmuls_s helper_sve_fmuls_s_aarch64eb +#define helper_sve_fmuls_d helper_sve_fmuls_d_aarch64eb +#define helper_sve_fsubrs_h helper_sve_fsubrs_h_aarch64eb +#define helper_sve_fsubrs_s helper_sve_fsubrs_s_aarch64eb +#define helper_sve_fsubrs_d helper_sve_fsubrs_d_aarch64eb +#define helper_sve_fmaxnms_h helper_sve_fmaxnms_h_aarch64eb +#define helper_sve_fmaxnms_s helper_sve_fmaxnms_s_aarch64eb +#define helper_sve_fmaxnms_d helper_sve_fmaxnms_d_aarch64eb +#define helper_sve_fminnms_h helper_sve_fminnms_h_aarch64eb +#define helper_sve_fminnms_s helper_sve_fminnms_s_aarch64eb +#define helper_sve_fminnms_d helper_sve_fminnms_d_aarch64eb +#define helper_sve_fmaxs_h helper_sve_fmaxs_h_aarch64eb +#define helper_sve_fmaxs_s helper_sve_fmaxs_s_aarch64eb +#define helper_sve_fmaxs_d helper_sve_fmaxs_d_aarch64eb +#define helper_sve_fmins_h helper_sve_fmins_h_aarch64eb +#define helper_sve_fmins_s helper_sve_fmins_s_aarch64eb +#define helper_sve_fmins_d helper_sve_fmins_d_aarch64eb +#define helper_sve_fcvt_sh helper_sve_fcvt_sh_aarch64eb +#define helper_sve_fcvt_hs helper_sve_fcvt_hs_aarch64eb +#define helper_sve_fcvt_dh helper_sve_fcvt_dh_aarch64eb +#define helper_sve_fcvt_hd helper_sve_fcvt_hd_aarch64eb +#define helper_sve_fcvt_ds helper_sve_fcvt_ds_aarch64eb +#define helper_sve_fcvt_sd helper_sve_fcvt_sd_aarch64eb +#define helper_sve_fcvtzs_hh helper_sve_fcvtzs_hh_aarch64eb +#define helper_sve_fcvtzs_hs helper_sve_fcvtzs_hs_aarch64eb +#define helper_sve_fcvtzs_ss helper_sve_fcvtzs_ss_aarch64eb +#define helper_sve_fcvtzs_hd helper_sve_fcvtzs_hd_aarch64eb +#define helper_sve_fcvtzs_sd helper_sve_fcvtzs_sd_aarch64eb +#define helper_sve_fcvtzs_ds helper_sve_fcvtzs_ds_aarch64eb +#define helper_sve_fcvtzs_dd helper_sve_fcvtzs_dd_aarch64eb +#define helper_sve_fcvtzu_hh helper_sve_fcvtzu_hh_aarch64eb +#define helper_sve_fcvtzu_hs helper_sve_fcvtzu_hs_aarch64eb +#define helper_sve_fcvtzu_ss helper_sve_fcvtzu_ss_aarch64eb +#define helper_sve_fcvtzu_hd helper_sve_fcvtzu_hd_aarch64eb +#define helper_sve_fcvtzu_sd helper_sve_fcvtzu_sd_aarch64eb +#define helper_sve_fcvtzu_ds helper_sve_fcvtzu_ds_aarch64eb +#define helper_sve_fcvtzu_dd helper_sve_fcvtzu_dd_aarch64eb +#define helper_sve_frint_h helper_sve_frint_h_aarch64eb +#define helper_sve_frint_s helper_sve_frint_s_aarch64eb +#define helper_sve_frint_d helper_sve_frint_d_aarch64eb +#define helper_sve_frintx_h helper_sve_frintx_h_aarch64eb +#define helper_sve_frintx_s helper_sve_frintx_s_aarch64eb +#define helper_sve_frintx_d helper_sve_frintx_d_aarch64eb +#define helper_sve_frecpx_h helper_sve_frecpx_h_aarch64eb +#define helper_sve_frecpx_s helper_sve_frecpx_s_aarch64eb +#define helper_sve_frecpx_d helper_sve_frecpx_d_aarch64eb +#define helper_sve_fsqrt_h helper_sve_fsqrt_h_aarch64eb +#define helper_sve_fsqrt_s helper_sve_fsqrt_s_aarch64eb +#define helper_sve_fsqrt_d helper_sve_fsqrt_d_aarch64eb +#define helper_sve_scvt_hh helper_sve_scvt_hh_aarch64eb +#define helper_sve_scvt_sh helper_sve_scvt_sh_aarch64eb +#define helper_sve_scvt_ss helper_sve_scvt_ss_aarch64eb +#define helper_sve_scvt_sd helper_sve_scvt_sd_aarch64eb +#define helper_sve_scvt_dh helper_sve_scvt_dh_aarch64eb +#define helper_sve_scvt_ds helper_sve_scvt_ds_aarch64eb +#define helper_sve_scvt_dd helper_sve_scvt_dd_aarch64eb +#define helper_sve_ucvt_hh helper_sve_ucvt_hh_aarch64eb +#define helper_sve_ucvt_sh helper_sve_ucvt_sh_aarch64eb +#define helper_sve_ucvt_ss helper_sve_ucvt_ss_aarch64eb +#define helper_sve_ucvt_sd helper_sve_ucvt_sd_aarch64eb +#define helper_sve_ucvt_dh helper_sve_ucvt_dh_aarch64eb +#define helper_sve_ucvt_ds helper_sve_ucvt_ds_aarch64eb +#define helper_sve_ucvt_dd helper_sve_ucvt_dd_aarch64eb +#define helper_sve_fmla_zpzzz_h helper_sve_fmla_zpzzz_h_aarch64eb +#define helper_sve_fmls_zpzzz_h helper_sve_fmls_zpzzz_h_aarch64eb +#define helper_sve_fnmla_zpzzz_h helper_sve_fnmla_zpzzz_h_aarch64eb +#define helper_sve_fnmls_zpzzz_h helper_sve_fnmls_zpzzz_h_aarch64eb +#define helper_sve_fmla_zpzzz_s helper_sve_fmla_zpzzz_s_aarch64eb +#define helper_sve_fmls_zpzzz_s helper_sve_fmls_zpzzz_s_aarch64eb +#define helper_sve_fnmla_zpzzz_s helper_sve_fnmla_zpzzz_s_aarch64eb +#define helper_sve_fnmls_zpzzz_s helper_sve_fnmls_zpzzz_s_aarch64eb +#define helper_sve_fmla_zpzzz_d helper_sve_fmla_zpzzz_d_aarch64eb +#define helper_sve_fmls_zpzzz_d helper_sve_fmls_zpzzz_d_aarch64eb +#define helper_sve_fnmla_zpzzz_d helper_sve_fnmla_zpzzz_d_aarch64eb +#define helper_sve_fnmls_zpzzz_d helper_sve_fnmls_zpzzz_d_aarch64eb +#define helper_sve_fcmge_h helper_sve_fcmge_h_aarch64eb +#define helper_sve_fcmge_s helper_sve_fcmge_s_aarch64eb +#define helper_sve_fcmge_d helper_sve_fcmge_d_aarch64eb +#define helper_sve_fcmgt_h helper_sve_fcmgt_h_aarch64eb +#define helper_sve_fcmgt_s helper_sve_fcmgt_s_aarch64eb +#define helper_sve_fcmgt_d helper_sve_fcmgt_d_aarch64eb +#define helper_sve_fcmeq_h helper_sve_fcmeq_h_aarch64eb +#define helper_sve_fcmeq_s helper_sve_fcmeq_s_aarch64eb +#define helper_sve_fcmeq_d helper_sve_fcmeq_d_aarch64eb +#define helper_sve_fcmne_h helper_sve_fcmne_h_aarch64eb +#define helper_sve_fcmne_s helper_sve_fcmne_s_aarch64eb +#define helper_sve_fcmne_d helper_sve_fcmne_d_aarch64eb +#define helper_sve_fcmuo_h helper_sve_fcmuo_h_aarch64eb +#define helper_sve_fcmuo_s helper_sve_fcmuo_s_aarch64eb +#define helper_sve_fcmuo_d helper_sve_fcmuo_d_aarch64eb +#define helper_sve_facge_h helper_sve_facge_h_aarch64eb +#define helper_sve_facge_s helper_sve_facge_s_aarch64eb +#define helper_sve_facge_d helper_sve_facge_d_aarch64eb +#define helper_sve_facgt_h helper_sve_facgt_h_aarch64eb +#define helper_sve_facgt_s helper_sve_facgt_s_aarch64eb +#define helper_sve_facgt_d helper_sve_facgt_d_aarch64eb +#define helper_sve_fcmge0_h helper_sve_fcmge0_h_aarch64eb +#define helper_sve_fcmge0_s helper_sve_fcmge0_s_aarch64eb +#define helper_sve_fcmge0_d helper_sve_fcmge0_d_aarch64eb +#define helper_sve_fcmgt0_h helper_sve_fcmgt0_h_aarch64eb +#define helper_sve_fcmgt0_s helper_sve_fcmgt0_s_aarch64eb +#define helper_sve_fcmgt0_d helper_sve_fcmgt0_d_aarch64eb +#define helper_sve_fcmle0_h helper_sve_fcmle0_h_aarch64eb +#define helper_sve_fcmle0_s helper_sve_fcmle0_s_aarch64eb +#define helper_sve_fcmle0_d helper_sve_fcmle0_d_aarch64eb +#define helper_sve_fcmlt0_h helper_sve_fcmlt0_h_aarch64eb +#define helper_sve_fcmlt0_s helper_sve_fcmlt0_s_aarch64eb +#define helper_sve_fcmlt0_d helper_sve_fcmlt0_d_aarch64eb +#define helper_sve_fcmeq0_h helper_sve_fcmeq0_h_aarch64eb +#define helper_sve_fcmeq0_s helper_sve_fcmeq0_s_aarch64eb +#define helper_sve_fcmeq0_d helper_sve_fcmeq0_d_aarch64eb +#define helper_sve_fcmne0_h helper_sve_fcmne0_h_aarch64eb +#define helper_sve_fcmne0_s helper_sve_fcmne0_s_aarch64eb +#define helper_sve_fcmne0_d helper_sve_fcmne0_d_aarch64eb +#define helper_sve_ftmad_h helper_sve_ftmad_h_aarch64eb +#define helper_sve_ftmad_s helper_sve_ftmad_s_aarch64eb +#define helper_sve_ftmad_d helper_sve_ftmad_d_aarch64eb +#define helper_sve_fcadd_h helper_sve_fcadd_h_aarch64eb +#define helper_sve_fcadd_s helper_sve_fcadd_s_aarch64eb +#define helper_sve_fcadd_d helper_sve_fcadd_d_aarch64eb +#define helper_sve_fcmla_zpzzz_h helper_sve_fcmla_zpzzz_h_aarch64eb +#define helper_sve_fcmla_zpzzz_s helper_sve_fcmla_zpzzz_s_aarch64eb +#define helper_sve_fcmla_zpzzz_d helper_sve_fcmla_zpzzz_d_aarch64eb +#define helper_sve_ld1bb_r helper_sve_ld1bb_r_aarch64eb +#define helper_sve_ld1bhu_r helper_sve_ld1bhu_r_aarch64eb +#define helper_sve_ld1bhs_r helper_sve_ld1bhs_r_aarch64eb +#define helper_sve_ld1bsu_r helper_sve_ld1bsu_r_aarch64eb +#define helper_sve_ld1bss_r helper_sve_ld1bss_r_aarch64eb +#define helper_sve_ld1bdu_r helper_sve_ld1bdu_r_aarch64eb +#define helper_sve_ld1bds_r helper_sve_ld1bds_r_aarch64eb +#define helper_sve_ld1hh_le_r helper_sve_ld1hh_le_r_aarch64eb +#define helper_sve_ld1hh_be_r helper_sve_ld1hh_be_r_aarch64eb +#define helper_sve_ld1hsu_le_r helper_sve_ld1hsu_le_r_aarch64eb +#define helper_sve_ld1hsu_be_r helper_sve_ld1hsu_be_r_aarch64eb +#define helper_sve_ld1hss_le_r helper_sve_ld1hss_le_r_aarch64eb +#define helper_sve_ld1hss_be_r helper_sve_ld1hss_be_r_aarch64eb +#define helper_sve_ld1hdu_le_r helper_sve_ld1hdu_le_r_aarch64eb +#define helper_sve_ld1hdu_be_r helper_sve_ld1hdu_be_r_aarch64eb +#define helper_sve_ld1hds_le_r helper_sve_ld1hds_le_r_aarch64eb +#define helper_sve_ld1hds_be_r helper_sve_ld1hds_be_r_aarch64eb +#define helper_sve_ld1ss_le_r helper_sve_ld1ss_le_r_aarch64eb +#define helper_sve_ld1ss_be_r helper_sve_ld1ss_be_r_aarch64eb +#define helper_sve_ld1sdu_le_r helper_sve_ld1sdu_le_r_aarch64eb +#define helper_sve_ld1sdu_be_r helper_sve_ld1sdu_be_r_aarch64eb +#define helper_sve_ld1sds_le_r helper_sve_ld1sds_le_r_aarch64eb +#define helper_sve_ld1sds_be_r helper_sve_ld1sds_be_r_aarch64eb +#define helper_sve_ld1dd_le_r helper_sve_ld1dd_le_r_aarch64eb +#define helper_sve_ld1dd_be_r helper_sve_ld1dd_be_r_aarch64eb +#define helper_sve_ld2bb_r helper_sve_ld2bb_r_aarch64eb +#define helper_sve_ld3bb_r helper_sve_ld3bb_r_aarch64eb +#define helper_sve_ld4bb_r helper_sve_ld4bb_r_aarch64eb +#define helper_sve_ld2hh_le_r helper_sve_ld2hh_le_r_aarch64eb +#define helper_sve_ld2hh_be_r helper_sve_ld2hh_be_r_aarch64eb +#define helper_sve_ld3hh_le_r helper_sve_ld3hh_le_r_aarch64eb +#define helper_sve_ld3hh_be_r helper_sve_ld3hh_be_r_aarch64eb +#define helper_sve_ld4hh_le_r helper_sve_ld4hh_le_r_aarch64eb +#define helper_sve_ld4hh_be_r helper_sve_ld4hh_be_r_aarch64eb +#define helper_sve_ld2ss_le_r helper_sve_ld2ss_le_r_aarch64eb +#define helper_sve_ld2ss_be_r helper_sve_ld2ss_be_r_aarch64eb +#define helper_sve_ld3ss_le_r helper_sve_ld3ss_le_r_aarch64eb +#define helper_sve_ld3ss_be_r helper_sve_ld3ss_be_r_aarch64eb +#define helper_sve_ld4ss_le_r helper_sve_ld4ss_le_r_aarch64eb +#define helper_sve_ld4ss_be_r helper_sve_ld4ss_be_r_aarch64eb +#define helper_sve_ld2dd_le_r helper_sve_ld2dd_le_r_aarch64eb +#define helper_sve_ld2dd_be_r helper_sve_ld2dd_be_r_aarch64eb +#define helper_sve_ld3dd_le_r helper_sve_ld3dd_le_r_aarch64eb +#define helper_sve_ld3dd_be_r helper_sve_ld3dd_be_r_aarch64eb +#define helper_sve_ld4dd_le_r helper_sve_ld4dd_le_r_aarch64eb +#define helper_sve_ld4dd_be_r helper_sve_ld4dd_be_r_aarch64eb +#define helper_sve_ldff1bb_r helper_sve_ldff1bb_r_aarch64eb +#define helper_sve_ldnf1bb_r helper_sve_ldnf1bb_r_aarch64eb +#define helper_sve_ldff1bhu_r helper_sve_ldff1bhu_r_aarch64eb +#define helper_sve_ldnf1bhu_r helper_sve_ldnf1bhu_r_aarch64eb +#define helper_sve_ldff1bhs_r helper_sve_ldff1bhs_r_aarch64eb +#define helper_sve_ldnf1bhs_r helper_sve_ldnf1bhs_r_aarch64eb +#define helper_sve_ldff1bsu_r helper_sve_ldff1bsu_r_aarch64eb +#define helper_sve_ldnf1bsu_r helper_sve_ldnf1bsu_r_aarch64eb +#define helper_sve_ldff1bss_r helper_sve_ldff1bss_r_aarch64eb +#define helper_sve_ldnf1bss_r helper_sve_ldnf1bss_r_aarch64eb +#define helper_sve_ldff1bdu_r helper_sve_ldff1bdu_r_aarch64eb +#define helper_sve_ldnf1bdu_r helper_sve_ldnf1bdu_r_aarch64eb +#define helper_sve_ldff1bds_r helper_sve_ldff1bds_r_aarch64eb +#define helper_sve_ldnf1bds_r helper_sve_ldnf1bds_r_aarch64eb +#define helper_sve_ldff1hh_le_r helper_sve_ldff1hh_le_r_aarch64eb +#define helper_sve_ldnf1hh_le_r helper_sve_ldnf1hh_le_r_aarch64eb +#define helper_sve_ldff1hh_be_r helper_sve_ldff1hh_be_r_aarch64eb +#define helper_sve_ldnf1hh_be_r helper_sve_ldnf1hh_be_r_aarch64eb +#define helper_sve_ldff1hsu_le_r helper_sve_ldff1hsu_le_r_aarch64eb +#define helper_sve_ldnf1hsu_le_r helper_sve_ldnf1hsu_le_r_aarch64eb +#define helper_sve_ldff1hsu_be_r helper_sve_ldff1hsu_be_r_aarch64eb +#define helper_sve_ldnf1hsu_be_r helper_sve_ldnf1hsu_be_r_aarch64eb +#define helper_sve_ldff1hss_le_r helper_sve_ldff1hss_le_r_aarch64eb +#define helper_sve_ldnf1hss_le_r helper_sve_ldnf1hss_le_r_aarch64eb +#define helper_sve_ldff1hss_be_r helper_sve_ldff1hss_be_r_aarch64eb +#define helper_sve_ldnf1hss_be_r helper_sve_ldnf1hss_be_r_aarch64eb +#define helper_sve_ldff1hdu_le_r helper_sve_ldff1hdu_le_r_aarch64eb +#define helper_sve_ldnf1hdu_le_r helper_sve_ldnf1hdu_le_r_aarch64eb +#define helper_sve_ldff1hdu_be_r helper_sve_ldff1hdu_be_r_aarch64eb +#define helper_sve_ldnf1hdu_be_r helper_sve_ldnf1hdu_be_r_aarch64eb +#define helper_sve_ldff1hds_le_r helper_sve_ldff1hds_le_r_aarch64eb +#define helper_sve_ldnf1hds_le_r helper_sve_ldnf1hds_le_r_aarch64eb +#define helper_sve_ldff1hds_be_r helper_sve_ldff1hds_be_r_aarch64eb +#define helper_sve_ldnf1hds_be_r helper_sve_ldnf1hds_be_r_aarch64eb +#define helper_sve_ldff1ss_le_r helper_sve_ldff1ss_le_r_aarch64eb +#define helper_sve_ldnf1ss_le_r helper_sve_ldnf1ss_le_r_aarch64eb +#define helper_sve_ldff1ss_be_r helper_sve_ldff1ss_be_r_aarch64eb +#define helper_sve_ldnf1ss_be_r helper_sve_ldnf1ss_be_r_aarch64eb +#define helper_sve_ldff1sdu_le_r helper_sve_ldff1sdu_le_r_aarch64eb +#define helper_sve_ldnf1sdu_le_r helper_sve_ldnf1sdu_le_r_aarch64eb +#define helper_sve_ldff1sdu_be_r helper_sve_ldff1sdu_be_r_aarch64eb +#define helper_sve_ldnf1sdu_be_r helper_sve_ldnf1sdu_be_r_aarch64eb +#define helper_sve_ldff1sds_le_r helper_sve_ldff1sds_le_r_aarch64eb +#define helper_sve_ldnf1sds_le_r helper_sve_ldnf1sds_le_r_aarch64eb +#define helper_sve_ldff1sds_be_r helper_sve_ldff1sds_be_r_aarch64eb +#define helper_sve_ldnf1sds_be_r helper_sve_ldnf1sds_be_r_aarch64eb +#define helper_sve_ldff1dd_le_r helper_sve_ldff1dd_le_r_aarch64eb +#define helper_sve_ldnf1dd_le_r helper_sve_ldnf1dd_le_r_aarch64eb +#define helper_sve_ldff1dd_be_r helper_sve_ldff1dd_be_r_aarch64eb +#define helper_sve_ldnf1dd_be_r helper_sve_ldnf1dd_be_r_aarch64eb +#define helper_sve_st1bb_r helper_sve_st1bb_r_aarch64eb +#define helper_sve_st1bh_r helper_sve_st1bh_r_aarch64eb +#define helper_sve_st1bs_r helper_sve_st1bs_r_aarch64eb +#define helper_sve_st1bd_r helper_sve_st1bd_r_aarch64eb +#define helper_sve_st2bb_r helper_sve_st2bb_r_aarch64eb +#define helper_sve_st3bb_r helper_sve_st3bb_r_aarch64eb +#define helper_sve_st4bb_r helper_sve_st4bb_r_aarch64eb +#define helper_sve_st1hh_le_r helper_sve_st1hh_le_r_aarch64eb +#define helper_sve_st1hh_be_r helper_sve_st1hh_be_r_aarch64eb +#define helper_sve_st1hs_le_r helper_sve_st1hs_le_r_aarch64eb +#define helper_sve_st1hs_be_r helper_sve_st1hs_be_r_aarch64eb +#define helper_sve_st1hd_le_r helper_sve_st1hd_le_r_aarch64eb +#define helper_sve_st1hd_be_r helper_sve_st1hd_be_r_aarch64eb +#define helper_sve_st2hh_le_r helper_sve_st2hh_le_r_aarch64eb +#define helper_sve_st2hh_be_r helper_sve_st2hh_be_r_aarch64eb +#define helper_sve_st3hh_le_r helper_sve_st3hh_le_r_aarch64eb +#define helper_sve_st3hh_be_r helper_sve_st3hh_be_r_aarch64eb +#define helper_sve_st4hh_le_r helper_sve_st4hh_le_r_aarch64eb +#define helper_sve_st4hh_be_r helper_sve_st4hh_be_r_aarch64eb +#define helper_sve_st1ss_le_r helper_sve_st1ss_le_r_aarch64eb +#define helper_sve_st1ss_be_r helper_sve_st1ss_be_r_aarch64eb +#define helper_sve_st1sd_le_r helper_sve_st1sd_le_r_aarch64eb +#define helper_sve_st1sd_be_r helper_sve_st1sd_be_r_aarch64eb +#define helper_sve_st2ss_le_r helper_sve_st2ss_le_r_aarch64eb +#define helper_sve_st2ss_be_r helper_sve_st2ss_be_r_aarch64eb +#define helper_sve_st3ss_le_r helper_sve_st3ss_le_r_aarch64eb +#define helper_sve_st3ss_be_r helper_sve_st3ss_be_r_aarch64eb +#define helper_sve_st4ss_le_r helper_sve_st4ss_le_r_aarch64eb +#define helper_sve_st4ss_be_r helper_sve_st4ss_be_r_aarch64eb +#define helper_sve_st1dd_le_r helper_sve_st1dd_le_r_aarch64eb +#define helper_sve_st1dd_be_r helper_sve_st1dd_be_r_aarch64eb +#define helper_sve_st2dd_le_r helper_sve_st2dd_le_r_aarch64eb +#define helper_sve_st2dd_be_r helper_sve_st2dd_be_r_aarch64eb +#define helper_sve_st3dd_le_r helper_sve_st3dd_le_r_aarch64eb +#define helper_sve_st3dd_be_r helper_sve_st3dd_be_r_aarch64eb +#define helper_sve_st4dd_le_r helper_sve_st4dd_le_r_aarch64eb +#define helper_sve_st4dd_be_r helper_sve_st4dd_be_r_aarch64eb +#define helper_sve_ldbsu_zsu helper_sve_ldbsu_zsu_aarch64eb +#define helper_sve_ldbsu_zss helper_sve_ldbsu_zss_aarch64eb +#define helper_sve_ldbdu_zsu helper_sve_ldbdu_zsu_aarch64eb +#define helper_sve_ldbdu_zss helper_sve_ldbdu_zss_aarch64eb +#define helper_sve_ldbdu_zd helper_sve_ldbdu_zd_aarch64eb +#define helper_sve_ldbss_zsu helper_sve_ldbss_zsu_aarch64eb +#define helper_sve_ldbss_zss helper_sve_ldbss_zss_aarch64eb +#define helper_sve_ldbds_zsu helper_sve_ldbds_zsu_aarch64eb +#define helper_sve_ldbds_zss helper_sve_ldbds_zss_aarch64eb +#define helper_sve_ldbds_zd helper_sve_ldbds_zd_aarch64eb +#define helper_sve_ldhsu_le_zsu helper_sve_ldhsu_le_zsu_aarch64eb +#define helper_sve_ldhsu_le_zss helper_sve_ldhsu_le_zss_aarch64eb +#define helper_sve_ldhdu_le_zsu helper_sve_ldhdu_le_zsu_aarch64eb +#define helper_sve_ldhdu_le_zss helper_sve_ldhdu_le_zss_aarch64eb +#define helper_sve_ldhdu_le_zd helper_sve_ldhdu_le_zd_aarch64eb +#define helper_sve_ldhsu_be_zsu helper_sve_ldhsu_be_zsu_aarch64eb +#define helper_sve_ldhsu_be_zss helper_sve_ldhsu_be_zss_aarch64eb +#define helper_sve_ldhdu_be_zsu helper_sve_ldhdu_be_zsu_aarch64eb +#define helper_sve_ldhdu_be_zss helper_sve_ldhdu_be_zss_aarch64eb +#define helper_sve_ldhdu_be_zd helper_sve_ldhdu_be_zd_aarch64eb +#define helper_sve_ldhss_le_zsu helper_sve_ldhss_le_zsu_aarch64eb +#define helper_sve_ldhss_le_zss helper_sve_ldhss_le_zss_aarch64eb +#define helper_sve_ldhds_le_zsu helper_sve_ldhds_le_zsu_aarch64eb +#define helper_sve_ldhds_le_zss helper_sve_ldhds_le_zss_aarch64eb +#define helper_sve_ldhds_le_zd helper_sve_ldhds_le_zd_aarch64eb +#define helper_sve_ldhss_be_zsu helper_sve_ldhss_be_zsu_aarch64eb +#define helper_sve_ldhss_be_zss helper_sve_ldhss_be_zss_aarch64eb +#define helper_sve_ldhds_be_zsu helper_sve_ldhds_be_zsu_aarch64eb +#define helper_sve_ldhds_be_zss helper_sve_ldhds_be_zss_aarch64eb +#define helper_sve_ldhds_be_zd helper_sve_ldhds_be_zd_aarch64eb +#define helper_sve_ldss_le_zsu helper_sve_ldss_le_zsu_aarch64eb +#define helper_sve_ldss_le_zss helper_sve_ldss_le_zss_aarch64eb +#define helper_sve_ldsdu_le_zsu helper_sve_ldsdu_le_zsu_aarch64eb +#define helper_sve_ldsdu_le_zss helper_sve_ldsdu_le_zss_aarch64eb +#define helper_sve_ldsdu_le_zd helper_sve_ldsdu_le_zd_aarch64eb +#define helper_sve_ldss_be_zsu helper_sve_ldss_be_zsu_aarch64eb +#define helper_sve_ldss_be_zss helper_sve_ldss_be_zss_aarch64eb +#define helper_sve_ldsdu_be_zsu helper_sve_ldsdu_be_zsu_aarch64eb +#define helper_sve_ldsdu_be_zss helper_sve_ldsdu_be_zss_aarch64eb +#define helper_sve_ldsdu_be_zd helper_sve_ldsdu_be_zd_aarch64eb +#define helper_sve_ldsds_le_zsu helper_sve_ldsds_le_zsu_aarch64eb +#define helper_sve_ldsds_le_zss helper_sve_ldsds_le_zss_aarch64eb +#define helper_sve_ldsds_le_zd helper_sve_ldsds_le_zd_aarch64eb +#define helper_sve_ldsds_be_zsu helper_sve_ldsds_be_zsu_aarch64eb +#define helper_sve_ldsds_be_zss helper_sve_ldsds_be_zss_aarch64eb +#define helper_sve_ldsds_be_zd helper_sve_ldsds_be_zd_aarch64eb +#define helper_sve_lddd_le_zsu helper_sve_lddd_le_zsu_aarch64eb +#define helper_sve_lddd_le_zss helper_sve_lddd_le_zss_aarch64eb +#define helper_sve_lddd_le_zd helper_sve_lddd_le_zd_aarch64eb +#define helper_sve_lddd_be_zsu helper_sve_lddd_be_zsu_aarch64eb +#define helper_sve_lddd_be_zss helper_sve_lddd_be_zss_aarch64eb +#define helper_sve_lddd_be_zd helper_sve_lddd_be_zd_aarch64eb +#define helper_sve_ldffbsu_zsu helper_sve_ldffbsu_zsu_aarch64eb +#define helper_sve_ldffbsu_zss helper_sve_ldffbsu_zss_aarch64eb +#define helper_sve_ldffbdu_zsu helper_sve_ldffbdu_zsu_aarch64eb +#define helper_sve_ldffbdu_zss helper_sve_ldffbdu_zss_aarch64eb +#define helper_sve_ldffbdu_zd helper_sve_ldffbdu_zd_aarch64eb +#define helper_sve_ldffbss_zsu helper_sve_ldffbss_zsu_aarch64eb +#define helper_sve_ldffbss_zss helper_sve_ldffbss_zss_aarch64eb +#define helper_sve_ldffbds_zsu helper_sve_ldffbds_zsu_aarch64eb +#define helper_sve_ldffbds_zss helper_sve_ldffbds_zss_aarch64eb +#define helper_sve_ldffbds_zd helper_sve_ldffbds_zd_aarch64eb +#define helper_sve_ldffhsu_le_zsu helper_sve_ldffhsu_le_zsu_aarch64eb +#define helper_sve_ldffhsu_le_zss helper_sve_ldffhsu_le_zss_aarch64eb +#define helper_sve_ldffhdu_le_zsu helper_sve_ldffhdu_le_zsu_aarch64eb +#define helper_sve_ldffhdu_le_zss helper_sve_ldffhdu_le_zss_aarch64eb +#define helper_sve_ldffhdu_le_zd helper_sve_ldffhdu_le_zd_aarch64eb +#define helper_sve_ldffhsu_be_zsu helper_sve_ldffhsu_be_zsu_aarch64eb +#define helper_sve_ldffhsu_be_zss helper_sve_ldffhsu_be_zss_aarch64eb +#define helper_sve_ldffhdu_be_zsu helper_sve_ldffhdu_be_zsu_aarch64eb +#define helper_sve_ldffhdu_be_zss helper_sve_ldffhdu_be_zss_aarch64eb +#define helper_sve_ldffhdu_be_zd helper_sve_ldffhdu_be_zd_aarch64eb +#define helper_sve_ldffhss_le_zsu helper_sve_ldffhss_le_zsu_aarch64eb +#define helper_sve_ldffhss_le_zss helper_sve_ldffhss_le_zss_aarch64eb +#define helper_sve_ldffhds_le_zsu helper_sve_ldffhds_le_zsu_aarch64eb +#define helper_sve_ldffhds_le_zss helper_sve_ldffhds_le_zss_aarch64eb +#define helper_sve_ldffhds_le_zd helper_sve_ldffhds_le_zd_aarch64eb +#define helper_sve_ldffhss_be_zsu helper_sve_ldffhss_be_zsu_aarch64eb +#define helper_sve_ldffhss_be_zss helper_sve_ldffhss_be_zss_aarch64eb +#define helper_sve_ldffhds_be_zsu helper_sve_ldffhds_be_zsu_aarch64eb +#define helper_sve_ldffhds_be_zss helper_sve_ldffhds_be_zss_aarch64eb +#define helper_sve_ldffhds_be_zd helper_sve_ldffhds_be_zd_aarch64eb +#define helper_sve_ldffss_le_zsu helper_sve_ldffss_le_zsu_aarch64eb +#define helper_sve_ldffss_le_zss helper_sve_ldffss_le_zss_aarch64eb +#define helper_sve_ldffsdu_le_zsu helper_sve_ldffsdu_le_zsu_aarch64eb +#define helper_sve_ldffsdu_le_zss helper_sve_ldffsdu_le_zss_aarch64eb +#define helper_sve_ldffsdu_le_zd helper_sve_ldffsdu_le_zd_aarch64eb +#define helper_sve_ldffss_be_zsu helper_sve_ldffss_be_zsu_aarch64eb +#define helper_sve_ldffss_be_zss helper_sve_ldffss_be_zss_aarch64eb +#define helper_sve_ldffsdu_be_zsu helper_sve_ldffsdu_be_zsu_aarch64eb +#define helper_sve_ldffsdu_be_zss helper_sve_ldffsdu_be_zss_aarch64eb +#define helper_sve_ldffsdu_be_zd helper_sve_ldffsdu_be_zd_aarch64eb +#define helper_sve_ldffsds_le_zsu helper_sve_ldffsds_le_zsu_aarch64eb +#define helper_sve_ldffsds_le_zss helper_sve_ldffsds_le_zss_aarch64eb +#define helper_sve_ldffsds_le_zd helper_sve_ldffsds_le_zd_aarch64eb +#define helper_sve_ldffsds_be_zsu helper_sve_ldffsds_be_zsu_aarch64eb +#define helper_sve_ldffsds_be_zss helper_sve_ldffsds_be_zss_aarch64eb +#define helper_sve_ldffsds_be_zd helper_sve_ldffsds_be_zd_aarch64eb +#define helper_sve_ldffdd_le_zsu helper_sve_ldffdd_le_zsu_aarch64eb +#define helper_sve_ldffdd_le_zss helper_sve_ldffdd_le_zss_aarch64eb +#define helper_sve_ldffdd_le_zd helper_sve_ldffdd_le_zd_aarch64eb +#define helper_sve_ldffdd_be_zsu helper_sve_ldffdd_be_zsu_aarch64eb +#define helper_sve_ldffdd_be_zss helper_sve_ldffdd_be_zss_aarch64eb +#define helper_sve_ldffdd_be_zd helper_sve_ldffdd_be_zd_aarch64eb +#define helper_sve_stbs_zsu helper_sve_stbs_zsu_aarch64eb +#define helper_sve_sths_le_zsu helper_sve_sths_le_zsu_aarch64eb +#define helper_sve_sths_be_zsu helper_sve_sths_be_zsu_aarch64eb +#define helper_sve_stss_le_zsu helper_sve_stss_le_zsu_aarch64eb +#define helper_sve_stss_be_zsu helper_sve_stss_be_zsu_aarch64eb +#define helper_sve_stbs_zss helper_sve_stbs_zss_aarch64eb +#define helper_sve_sths_le_zss helper_sve_sths_le_zss_aarch64eb +#define helper_sve_sths_be_zss helper_sve_sths_be_zss_aarch64eb +#define helper_sve_stss_le_zss helper_sve_stss_le_zss_aarch64eb +#define helper_sve_stss_be_zss helper_sve_stss_be_zss_aarch64eb +#define helper_sve_stbd_zsu helper_sve_stbd_zsu_aarch64eb +#define helper_sve_sthd_le_zsu helper_sve_sthd_le_zsu_aarch64eb +#define helper_sve_sthd_be_zsu helper_sve_sthd_be_zsu_aarch64eb +#define helper_sve_stsd_le_zsu helper_sve_stsd_le_zsu_aarch64eb +#define helper_sve_stsd_be_zsu helper_sve_stsd_be_zsu_aarch64eb +#define helper_sve_stdd_le_zsu helper_sve_stdd_le_zsu_aarch64eb +#define helper_sve_stdd_be_zsu helper_sve_stdd_be_zsu_aarch64eb +#define helper_sve_stbd_zss helper_sve_stbd_zss_aarch64eb +#define helper_sve_sthd_le_zss helper_sve_sthd_le_zss_aarch64eb +#define helper_sve_sthd_be_zss helper_sve_sthd_be_zss_aarch64eb +#define helper_sve_stsd_le_zss helper_sve_stsd_le_zss_aarch64eb +#define helper_sve_stsd_be_zss helper_sve_stsd_be_zss_aarch64eb +#define helper_sve_stdd_le_zss helper_sve_stdd_le_zss_aarch64eb +#define helper_sve_stdd_be_zss helper_sve_stdd_be_zss_aarch64eb +#define helper_sve_stbd_zd helper_sve_stbd_zd_aarch64eb +#define helper_sve_sthd_le_zd helper_sve_sthd_le_zd_aarch64eb +#define helper_sve_sthd_be_zd helper_sve_sthd_be_zd_aarch64eb +#define helper_sve_stsd_le_zd helper_sve_stsd_le_zd_aarch64eb +#define helper_sve_stsd_be_zd helper_sve_stsd_be_zd_aarch64eb +#define helper_sve_stdd_le_zd helper_sve_stdd_le_zd_aarch64eb +#define helper_sve_stdd_be_zd helper_sve_stdd_be_zd_aarch64eb +#define arm_cpu_do_unaligned_access arm_cpu_do_unaligned_access_aarch64eb +#define arm_cpu_do_transaction_failed arm_cpu_do_transaction_failed_aarch64eb +#define arm_cpu_tlb_fill arm_cpu_tlb_fill_aarch64eb +#define a64_translate_init a64_translate_init_aarch64eb +#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb +#define unallocated_encoding unallocated_encoding_aarch64eb +#define new_tmp_a64 new_tmp_a64_aarch64eb +#define new_tmp_a64_zero new_tmp_a64_zero_aarch64eb +#define cpu_reg cpu_reg_aarch64eb +#define cpu_reg_sp cpu_reg_sp_aarch64eb +#define read_cpu_reg read_cpu_reg_aarch64eb +#define read_cpu_reg_sp read_cpu_reg_sp_aarch64eb +#define write_fp_dreg write_fp_dreg_aarch64eb +#define get_fpstatus_ptr get_fpstatus_ptr_aarch64eb +#define sve_access_check sve_access_check_aarch64eb +#define logic_imm_decode_wmask logic_imm_decode_wmask_aarch64eb +#define arm_translate_init arm_translate_init_aarch64eb +#define arm_test_cc arm_test_cc_aarch64eb +#define arm_free_cc arm_free_cc_aarch64eb +#define arm_jump_cc arm_jump_cc_aarch64eb +#define arm_gen_test_cc arm_gen_test_cc_aarch64eb +#define vfp_expand_imm vfp_expand_imm_aarch64eb +#define gen_cmtst_i64 gen_cmtst_i64_aarch64eb +#define gen_ushl_i32 gen_ushl_i32_aarch64eb +#define gen_ushl_i64 gen_ushl_i64_aarch64eb +#define gen_sshl_i32 gen_sshl_i32_aarch64eb +#define gen_sshl_i64 gen_sshl_i64_aarch64eb +#define gen_intermediate_code gen_intermediate_code_aarch64eb +#define restore_state_to_opc restore_state_to_opc_aarch64eb +#define disas_sve disas_sve_aarch64eb +#define helper_neon_qrdmlah_s16 helper_neon_qrdmlah_s16_aarch64eb +#define helper_gvec_qrdmlah_s16 helper_gvec_qrdmlah_s16_aarch64eb +#define helper_neon_qrdmlsh_s16 helper_neon_qrdmlsh_s16_aarch64eb +#define helper_gvec_qrdmlsh_s16 helper_gvec_qrdmlsh_s16_aarch64eb +#define helper_neon_qrdmlah_s32 helper_neon_qrdmlah_s32_aarch64eb +#define helper_gvec_qrdmlah_s32 helper_gvec_qrdmlah_s32_aarch64eb +#define helper_neon_qrdmlsh_s32 helper_neon_qrdmlsh_s32_aarch64eb +#define helper_gvec_qrdmlsh_s32 helper_gvec_qrdmlsh_s32_aarch64eb +#define helper_gvec_sdot_b helper_gvec_sdot_b_aarch64eb +#define helper_gvec_udot_b helper_gvec_udot_b_aarch64eb +#define helper_gvec_sdot_h helper_gvec_sdot_h_aarch64eb +#define helper_gvec_udot_h helper_gvec_udot_h_aarch64eb +#define helper_gvec_sdot_idx_b helper_gvec_sdot_idx_b_aarch64eb +#define helper_gvec_udot_idx_b helper_gvec_udot_idx_b_aarch64eb +#define helper_gvec_sdot_idx_h helper_gvec_sdot_idx_h_aarch64eb +#define helper_gvec_udot_idx_h helper_gvec_udot_idx_h_aarch64eb +#define helper_gvec_fcaddh helper_gvec_fcaddh_aarch64eb +#define helper_gvec_fcadds helper_gvec_fcadds_aarch64eb +#define helper_gvec_fcaddd helper_gvec_fcaddd_aarch64eb +#define helper_gvec_fcmlah helper_gvec_fcmlah_aarch64eb +#define helper_gvec_fcmlah_idx helper_gvec_fcmlah_idx_aarch64eb +#define helper_gvec_fcmlas helper_gvec_fcmlas_aarch64eb +#define helper_gvec_fcmlas_idx helper_gvec_fcmlas_idx_aarch64eb +#define helper_gvec_fcmlad helper_gvec_fcmlad_aarch64eb +#define helper_gvec_frecpe_h helper_gvec_frecpe_h_aarch64eb +#define helper_gvec_frecpe_s helper_gvec_frecpe_s_aarch64eb +#define helper_gvec_frecpe_d helper_gvec_frecpe_d_aarch64eb +#define helper_gvec_frsqrte_h helper_gvec_frsqrte_h_aarch64eb +#define helper_gvec_frsqrte_s helper_gvec_frsqrte_s_aarch64eb +#define helper_gvec_frsqrte_d helper_gvec_frsqrte_d_aarch64eb +#define helper_gvec_fadd_h helper_gvec_fadd_h_aarch64eb +#define helper_gvec_fadd_s helper_gvec_fadd_s_aarch64eb +#define helper_gvec_fadd_d helper_gvec_fadd_d_aarch64eb +#define helper_gvec_fsub_h helper_gvec_fsub_h_aarch64eb +#define helper_gvec_fsub_s helper_gvec_fsub_s_aarch64eb +#define helper_gvec_fsub_d helper_gvec_fsub_d_aarch64eb +#define helper_gvec_fmul_h helper_gvec_fmul_h_aarch64eb +#define helper_gvec_fmul_s helper_gvec_fmul_s_aarch64eb +#define helper_gvec_fmul_d helper_gvec_fmul_d_aarch64eb +#define helper_gvec_ftsmul_h helper_gvec_ftsmul_h_aarch64eb +#define helper_gvec_ftsmul_s helper_gvec_ftsmul_s_aarch64eb +#define helper_gvec_ftsmul_d helper_gvec_ftsmul_d_aarch64eb +#define helper_gvec_recps_h helper_gvec_recps_h_aarch64eb +#define helper_gvec_recps_s helper_gvec_recps_s_aarch64eb +#define helper_gvec_recps_d helper_gvec_recps_d_aarch64eb +#define helper_gvec_rsqrts_h helper_gvec_rsqrts_h_aarch64eb +#define helper_gvec_rsqrts_s helper_gvec_rsqrts_s_aarch64eb +#define helper_gvec_rsqrts_d helper_gvec_rsqrts_d_aarch64eb +#define helper_gvec_fmul_idx_h helper_gvec_fmul_idx_h_aarch64eb +#define helper_gvec_fmul_idx_s helper_gvec_fmul_idx_s_aarch64eb +#define helper_gvec_fmul_idx_d helper_gvec_fmul_idx_d_aarch64eb +#define helper_gvec_fmla_idx_h helper_gvec_fmla_idx_h_aarch64eb +#define helper_gvec_fmla_idx_s helper_gvec_fmla_idx_s_aarch64eb +#define helper_gvec_fmla_idx_d helper_gvec_fmla_idx_d_aarch64eb +#define helper_gvec_uqadd_b helper_gvec_uqadd_b_aarch64eb +#define helper_gvec_uqadd_h helper_gvec_uqadd_h_aarch64eb +#define helper_gvec_uqadd_s helper_gvec_uqadd_s_aarch64eb +#define helper_gvec_sqadd_b helper_gvec_sqadd_b_aarch64eb +#define helper_gvec_sqadd_h helper_gvec_sqadd_h_aarch64eb +#define helper_gvec_sqadd_s helper_gvec_sqadd_s_aarch64eb +#define helper_gvec_uqsub_b helper_gvec_uqsub_b_aarch64eb +#define helper_gvec_uqsub_h helper_gvec_uqsub_h_aarch64eb +#define helper_gvec_uqsub_s helper_gvec_uqsub_s_aarch64eb +#define helper_gvec_sqsub_b helper_gvec_sqsub_b_aarch64eb +#define helper_gvec_sqsub_h helper_gvec_sqsub_h_aarch64eb +#define helper_gvec_sqsub_s helper_gvec_sqsub_s_aarch64eb +#define helper_gvec_uqadd_d helper_gvec_uqadd_d_aarch64eb +#define helper_gvec_uqsub_d helper_gvec_uqsub_d_aarch64eb +#define helper_gvec_sqadd_d helper_gvec_sqadd_d_aarch64eb +#define helper_gvec_sqsub_d helper_gvec_sqsub_d_aarch64eb +#define helper_gvec_fmlal_a32 helper_gvec_fmlal_a32_aarch64eb +#define helper_gvec_fmlal_a64 helper_gvec_fmlal_a64_aarch64eb +#define helper_gvec_fmlal_idx_a32 helper_gvec_fmlal_idx_a32_aarch64eb +#define helper_gvec_fmlal_idx_a64 helper_gvec_fmlal_idx_a64_aarch64eb +#define helper_gvec_sshl_b helper_gvec_sshl_b_aarch64eb +#define helper_gvec_sshl_h helper_gvec_sshl_h_aarch64eb +#define helper_gvec_ushl_b helper_gvec_ushl_b_aarch64eb +#define helper_gvec_ushl_h helper_gvec_ushl_h_aarch64eb +#define helper_gvec_pmul_b helper_gvec_pmul_b_aarch64eb +#define helper_gvec_pmull_q helper_gvec_pmull_q_aarch64eb +#define helper_neon_pmull_h helper_neon_pmull_h_aarch64eb +#define helper_sve2_pmull_h helper_sve2_pmull_h_aarch64eb +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_aarch64eb +#define vfp_get_fpscr vfp_get_fpscr_aarch64eb +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_aarch64eb +#define vfp_set_fpscr vfp_set_fpscr_aarch64eb +#define helper_vfp_adds helper_vfp_adds_aarch64eb +#define helper_vfp_addd helper_vfp_addd_aarch64eb +#define helper_vfp_subs helper_vfp_subs_aarch64eb +#define helper_vfp_subd helper_vfp_subd_aarch64eb +#define helper_vfp_muls helper_vfp_muls_aarch64eb +#define helper_vfp_muld helper_vfp_muld_aarch64eb +#define helper_vfp_divs helper_vfp_divs_aarch64eb +#define helper_vfp_divd helper_vfp_divd_aarch64eb +#define helper_vfp_mins helper_vfp_mins_aarch64eb +#define helper_vfp_mind helper_vfp_mind_aarch64eb +#define helper_vfp_maxs helper_vfp_maxs_aarch64eb +#define helper_vfp_maxd helper_vfp_maxd_aarch64eb +#define helper_vfp_minnums helper_vfp_minnums_aarch64eb +#define helper_vfp_minnumd helper_vfp_minnumd_aarch64eb +#define helper_vfp_maxnums helper_vfp_maxnums_aarch64eb +#define helper_vfp_maxnumd helper_vfp_maxnumd_aarch64eb +#define helper_vfp_negs helper_vfp_negs_aarch64eb +#define helper_vfp_negd helper_vfp_negd_aarch64eb +#define helper_vfp_abss helper_vfp_abss_aarch64eb +#define helper_vfp_absd helper_vfp_absd_aarch64eb +#define helper_vfp_sqrts helper_vfp_sqrts_aarch64eb +#define helper_vfp_sqrtd helper_vfp_sqrtd_aarch64eb +#define helper_vfp_cmps helper_vfp_cmps_aarch64eb +#define helper_vfp_cmpes helper_vfp_cmpes_aarch64eb +#define helper_vfp_cmpd helper_vfp_cmpd_aarch64eb +#define helper_vfp_cmped helper_vfp_cmped_aarch64eb +#define helper_vfp_sitoh helper_vfp_sitoh_aarch64eb +#define helper_vfp_tosih helper_vfp_tosih_aarch64eb +#define helper_vfp_tosizh helper_vfp_tosizh_aarch64eb +#define helper_vfp_sitos helper_vfp_sitos_aarch64eb +#define helper_vfp_tosis helper_vfp_tosis_aarch64eb +#define helper_vfp_tosizs helper_vfp_tosizs_aarch64eb +#define helper_vfp_sitod helper_vfp_sitod_aarch64eb +#define helper_vfp_tosid helper_vfp_tosid_aarch64eb +#define helper_vfp_tosizd helper_vfp_tosizd_aarch64eb +#define helper_vfp_uitoh helper_vfp_uitoh_aarch64eb +#define helper_vfp_touih helper_vfp_touih_aarch64eb +#define helper_vfp_touizh helper_vfp_touizh_aarch64eb +#define helper_vfp_uitos helper_vfp_uitos_aarch64eb +#define helper_vfp_touis helper_vfp_touis_aarch64eb +#define helper_vfp_touizs helper_vfp_touizs_aarch64eb +#define helper_vfp_uitod helper_vfp_uitod_aarch64eb +#define helper_vfp_touid helper_vfp_touid_aarch64eb +#define helper_vfp_touizd helper_vfp_touizd_aarch64eb +#define helper_vfp_fcvtds helper_vfp_fcvtds_aarch64eb +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_aarch64eb +#define helper_vfp_shtod helper_vfp_shtod_aarch64eb +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_aarch64eb +#define helper_vfp_toshd helper_vfp_toshd_aarch64eb +#define helper_vfp_sltod helper_vfp_sltod_aarch64eb +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_aarch64eb +#define helper_vfp_tosld helper_vfp_tosld_aarch64eb +#define helper_vfp_sqtod helper_vfp_sqtod_aarch64eb +#define helper_vfp_tosqd helper_vfp_tosqd_aarch64eb +#define helper_vfp_uhtod helper_vfp_uhtod_aarch64eb +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_aarch64eb +#define helper_vfp_touhd helper_vfp_touhd_aarch64eb +#define helper_vfp_ultod helper_vfp_ultod_aarch64eb +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_aarch64eb +#define helper_vfp_tould helper_vfp_tould_aarch64eb +#define helper_vfp_uqtod helper_vfp_uqtod_aarch64eb +#define helper_vfp_touqd helper_vfp_touqd_aarch64eb +#define helper_vfp_shtos helper_vfp_shtos_aarch64eb +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_aarch64eb +#define helper_vfp_toshs helper_vfp_toshs_aarch64eb +#define helper_vfp_sltos helper_vfp_sltos_aarch64eb +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_aarch64eb +#define helper_vfp_tosls helper_vfp_tosls_aarch64eb +#define helper_vfp_sqtos helper_vfp_sqtos_aarch64eb +#define helper_vfp_tosqs helper_vfp_tosqs_aarch64eb +#define helper_vfp_uhtos helper_vfp_uhtos_aarch64eb +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_aarch64eb +#define helper_vfp_touhs helper_vfp_touhs_aarch64eb +#define helper_vfp_ultos helper_vfp_ultos_aarch64eb +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_aarch64eb +#define helper_vfp_touls helper_vfp_touls_aarch64eb +#define helper_vfp_uqtos helper_vfp_uqtos_aarch64eb +#define helper_vfp_touqs helper_vfp_touqs_aarch64eb +#define helper_vfp_sltoh helper_vfp_sltoh_aarch64eb +#define helper_vfp_ultoh helper_vfp_ultoh_aarch64eb +#define helper_vfp_sqtoh helper_vfp_sqtoh_aarch64eb +#define helper_vfp_uqtoh helper_vfp_uqtoh_aarch64eb +#define helper_vfp_toshh helper_vfp_toshh_aarch64eb +#define helper_vfp_touhh helper_vfp_touhh_aarch64eb +#define helper_vfp_toslh helper_vfp_toslh_aarch64eb +#define helper_vfp_toulh helper_vfp_toulh_aarch64eb +#define helper_vfp_tosqh helper_vfp_tosqh_aarch64eb +#define helper_vfp_touqh helper_vfp_touqh_aarch64eb +#define helper_set_rmode helper_set_rmode_aarch64eb +#define helper_set_neon_rmode helper_set_neon_rmode_aarch64eb +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_aarch64eb +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_aarch64eb +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_aarch64eb +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_aarch64eb +#define helper_recps_f32 helper_recps_f32_aarch64eb +#define helper_rsqrts_f32 helper_rsqrts_f32_aarch64eb +#define helper_recpe_f16 helper_recpe_f16_aarch64eb +#define helper_recpe_f32 helper_recpe_f32_aarch64eb +#define helper_recpe_f64 helper_recpe_f64_aarch64eb +#define helper_rsqrte_f16 helper_rsqrte_f16_aarch64eb +#define helper_rsqrte_f32 helper_rsqrte_f32_aarch64eb +#define helper_rsqrte_f64 helper_rsqrte_f64_aarch64eb +#define helper_recpe_u32 helper_recpe_u32_aarch64eb +#define helper_rsqrte_u32 helper_rsqrte_u32_aarch64eb +#define helper_vfp_muladds helper_vfp_muladds_aarch64eb +#define helper_vfp_muladdd helper_vfp_muladdd_aarch64eb +#define helper_rints_exact helper_rints_exact_aarch64eb +#define helper_rintd_exact helper_rintd_exact_aarch64eb +#define helper_rints helper_rints_aarch64eb +#define helper_rintd helper_rintd_aarch64eb +#define arm_rmode_to_sf arm_rmode_to_sf_aarch64eb +#define helper_fjcvtzs helper_fjcvtzs_aarch64eb +#define helper_vjcvt helper_vjcvt_aarch64eb +#define helper_frint32_s helper_frint32_s_aarch64eb +#define helper_frint64_s helper_frint64_s_aarch64eb +#define helper_frint32_d helper_frint32_d_aarch64eb +#define helper_frint64_d helper_frint64_d_aarch64eb +#define helper_check_hcr_el2_trap helper_check_hcr_el2_trap_aarch64eb +#define arm64_reg_reset arm64_reg_reset_aarch64eb +#define arm64_reg_read arm64_reg_read_aarch64eb +#define arm64_reg_write arm64_reg_write_aarch64eb +#define mla_op mla_op_aarch64eb +#define mls_op mls_op_aarch64eb +#define sshl_op sshl_op_aarch64eb +#define ushl_op ushl_op_aarch64eb +#define uqsub_op uqsub_op_aarch64eb +#define sqsub_op sqsub_op_aarch64eb +#define uqadd_op uqadd_op_aarch64eb +#define sqadd_op sqadd_op_aarch64eb +#define sli_op sli_op_aarch64eb +#define cmtst_op cmtst_op_aarch64eb +#define sri_op sri_op_aarch64eb +#define usra_op usra_op_aarch64eb +#define ssra_op ssra_op_aarch64eb +#define aarch64_translator_ops aarch64_translator_ops_aarch64eb +#define pred_esz_masks pred_esz_masks_aarch64eb #endif diff --git a/qemu/accel.c b/qemu/accel.c deleted file mode 100644 index be1e87a7..00000000 --- a/qemu/accel.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * QEMU System Emulator, accelerator interfaces - * - * Copyright (c) 2003-2008 Fabrice Bellard - * Copyright (c) 2014 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "sysemu/accel.h" -#include "hw/boards.h" -#include "qemu-common.h" -#include "sysemu/sysemu.h" -#include "qom/object.h" -#include "hw/boards.h" - -// use default size for TCG translated block -#define TCG_TB_SIZE 0 - -static bool tcg_allowed = true; -static int tcg_init(MachineState *ms); -static AccelClass *accel_find(struct uc_struct *uc, const char *opt_name); -static int accel_init_machine(AccelClass *acc, MachineState *ms); -static void tcg_accel_class_init(struct uc_struct *uc, ObjectClass *oc, void *data); - -static int tcg_init(MachineState *ms) -{ - ms->uc->tcg_exec_init(ms->uc, TCG_TB_SIZE * 1024 * 1024); // arch-dependent - return 0; -} - -static const TypeInfo accel_type = { - TYPE_ACCEL, - TYPE_OBJECT, - sizeof(AccelClass), - sizeof(AccelState), -}; - -#define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") - -static const TypeInfo tcg_accel_type = { - TYPE_TCG_ACCEL, - TYPE_ACCEL, - 0, - 0, - NULL, - NULL, - NULL, - NULL, - NULL, - tcg_accel_class_init, -}; - - -int configure_accelerator(MachineState *ms) -{ - int ret; - bool accel_initialised = false; - AccelClass *acc; - - acc = accel_find(ms->uc, "tcg"); - ret = accel_init_machine(acc, ms); - if (ret < 0) { - fprintf(stderr, "failed to initialize %s: %s\n", - acc->name, - strerror(-ret)); - } else { - accel_initialised = true; - } - - return !accel_initialised; -} - -void register_accel_types(struct uc_struct *uc) -{ - type_register_static(uc, &accel_type); - type_register_static(uc, &tcg_accel_type); -} - -static void tcg_accel_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - AccelClass *ac = ACCEL_CLASS(uc, oc); - ac->name = "tcg"; - ac->init_machine = tcg_init; - ac->allowed = &tcg_allowed; -} - -/* Lookup AccelClass from opt_name. Returns NULL if not found */ -static AccelClass *accel_find(struct uc_struct *uc, const char *opt_name) -{ - char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name); - AccelClass *ac = ACCEL_CLASS(uc, object_class_by_name(uc, class_name)); - g_free(class_name); - return ac; -} - -static int accel_init_machine(AccelClass *acc, MachineState *ms) -{ - ObjectClass *oc = OBJECT_CLASS(acc); - const char *cname = object_class_get_name(oc); - AccelState *accel = ACCEL(ms->uc, object_new(ms->uc, cname)); - int ret; - ms->accelerator = accel; - *(acc->allowed) = true; - ret = acc->init_machine(ms); - if (ret < 0) { - ms->accelerator = NULL; - *(acc->allowed) = false; - object_unref(ms->uc, OBJECT(accel)); - } - return ret; -} diff --git a/qemu/accel/tcg/atomic_template.h b/qemu/accel/tcg/atomic_template.h new file mode 100644 index 00000000..1b70aa46 --- /dev/null +++ b/qemu/accel/tcg/atomic_template.h @@ -0,0 +1,358 @@ +/* + * Atomic helper templates + * Included from tcg-runtime.c and cputlb.c. + * + * Copyright (c) 2016 Red Hat, Inc + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#if DATA_SIZE == 16 +# define SUFFIX o +# define DATA_TYPE Int128 +# define BSWAP bswap128 +# define SHIFT 4 +#elif DATA_SIZE == 8 +# define SUFFIX q +# define DATA_TYPE uint64_t +# define SDATA_TYPE int64_t +# define BSWAP bswap64 +# define SHIFT 3 +#elif DATA_SIZE == 4 +# define SUFFIX l +# define DATA_TYPE uint32_t +# define SDATA_TYPE int32_t +# define BSWAP bswap32 +# define SHIFT 2 +#elif DATA_SIZE == 2 +# define SUFFIX w +# define DATA_TYPE uint16_t +# define SDATA_TYPE int16_t +# define BSWAP bswap16 +# define SHIFT 1 +#elif DATA_SIZE == 1 +# define SUFFIX b +# define DATA_TYPE uint8_t +# define SDATA_TYPE int8_t +# define BSWAP +# define SHIFT 0 +#else +# error unsupported data size +#endif + +#if DATA_SIZE >= 4 +# define ABI_TYPE DATA_TYPE +#else +# define ABI_TYPE uint32_t +#endif + +/* Define host-endian atomic operations. Note that END is used within + the ATOMIC_NAME macro, and redefined below. */ +#if DATA_SIZE == 1 +# define END +#elif defined(HOST_WORDS_BIGENDIAN) +# define END _be +#else +# define END _le +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE ret; + +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, cmpv, newv); +#else +#ifdef _MSC_VER + ret = atomic_cmpxchg__nocheck((long *)haddr, cmpv, newv); +#else + ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); +#endif +#endif + ATOMIC_MMU_CLEANUP; + return ret; +} + +#if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; + + val = atomic16_read(haddr); + ATOMIC_MMU_CLEANUP; + return val; +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + + atomic16_set(haddr, val); + ATOMIC_MMU_CLEANUP; +} +#endif +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE ret; + + ret = *haddr; + *haddr = val; + ATOMIC_MMU_CLEANUP; + + return ret; +} + +#ifdef _MSC_VER +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val EXTRA_ARGS) \ +{ \ + ATOMIC_MMU_DECLS; \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + DATA_TYPE ret; \ + ret = atomic_##X((long *)haddr, val); \ + ATOMIC_MMU_CLEANUP; \ + return ret; \ +} +#else +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val EXTRA_ARGS) \ +{ \ + ATOMIC_MMU_DECLS; \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + DATA_TYPE ret; \ + ret = atomic_##X(haddr, val); \ + ATOMIC_MMU_CLEANUP; \ + return ret; \ +} +#endif + +GEN_ATOMIC_HELPER(fetch_add) +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(add_fetch) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER + +/* These helpers are, as a whole, full barriers. Within the helper, + * the leading barrier is explicit and the trailing barrier is within + * cmpxchg primitive. + * + * Trace this load + RMW loop as a single RMW op. This way, regardless + * of CF_PARALLEL's value, we'll trace just a read and a write. + */ +#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE xval EXTRA_ARGS) \ +{ \ + ATOMIC_MMU_DECLS; \ + XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + XDATA_TYPE cmp, old, new, val = xval; \ + cmp = *haddr; \ + do { \ + old = cmp; new = FN(old, val); \ + cmp = *haddr; \ + if (cmp == old) \ + *haddr = new; \ + } while (cmp != old); \ + ATOMIC_MMU_CLEANUP; \ + return RET; \ +} + +GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) + +GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) + +#undef GEN_ATOMIC_HELPER_FN +#endif /* DATA SIZE >= 16 */ + +#undef END + +#if DATA_SIZE > 1 + +/* Define reverse-host-endian atomic operations. Note that END is used + within the ATOMIC_NAME macro. */ +#ifdef HOST_WORDS_BIGENDIAN +# define END _le +#else +# define END _be +#endif + +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + DATA_TYPE ret; +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); +#else +#ifdef _MSC_VER + ret = atomic_cmpxchg__nocheck((long *)haddr, BSWAP(cmpv), BSWAP(newv)); +#else + ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); +#endif +#endif + ATOMIC_MMU_CLEANUP; + return BSWAP(ret); +} + +#if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 +ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; + val = atomic16_read(haddr); + ATOMIC_MMU_CLEANUP; + return BSWAP(val); +} + +void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + + val = BSWAP(val); + val = BSWAP(val); + atomic16_set(haddr, val); + ATOMIC_MMU_CLEANUP; +} +#endif +#else +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, + ABI_TYPE val EXTRA_ARGS) +{ + ATOMIC_MMU_DECLS; + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; + ABI_TYPE ret; + ret = *haddr; + *haddr = BSWAP(val); + ATOMIC_MMU_CLEANUP; + return BSWAP(ret); +} + +#ifdef _MSC_VER +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val EXTRA_ARGS) \ +{ \ + ATOMIC_MMU_DECLS; \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + DATA_TYPE ret; \ + ret = atomic_##X((long *)haddr, BSWAP(val)); \ + ATOMIC_MMU_CLEANUP; \ + return BSWAP(ret); \ +} +#else +#define GEN_ATOMIC_HELPER(X) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE val EXTRA_ARGS) \ +{ \ + ATOMIC_MMU_DECLS; \ + DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + DATA_TYPE ret; \ + ret = atomic_##X(haddr, BSWAP(val)); \ + ATOMIC_MMU_CLEANUP; \ + return BSWAP(ret); \ +} +#endif + +GEN_ATOMIC_HELPER(fetch_and) +GEN_ATOMIC_HELPER(fetch_or) +GEN_ATOMIC_HELPER(fetch_xor) +GEN_ATOMIC_HELPER(and_fetch) +GEN_ATOMIC_HELPER(or_fetch) +GEN_ATOMIC_HELPER(xor_fetch) + +#undef GEN_ATOMIC_HELPER + +/* These helpers are, as a whole, full barriers. Within the helper, + * the leading barrier is explicit and the trailing barrier is within + * cmpxchg primitive. + * + * Trace this load + RMW loop as a single RMW op. This way, regardless + * of CF_PARALLEL's value, we'll trace just a read and a write. + */ +#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ + ABI_TYPE xval EXTRA_ARGS) \ +{ \ + ATOMIC_MMU_DECLS; \ + XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \ + XDATA_TYPE ldo, ldn, old, new, val = xval; \ + ldn = *haddr; \ + do { \ + ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ + ldn = *haddr; \ + if (ldn == ldo) \ + *haddr = BSWAP(new); \ + } while (ldo != ldn); \ + ATOMIC_MMU_CLEANUP; \ + return RET; \ +} + +GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old) + +GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) +GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) + +/* Note that for addition, we need to use a separate cmpxchg loop instead + of bswaps for the reverse-host-endian helpers. */ +#define ADD(X, Y) (X + Y) +GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old) +GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) +#undef ADD + +#undef GEN_ATOMIC_HELPER_FN +#endif /* DATA_SIZE >= 16 */ + +#undef END +#endif /* DATA_SIZE > 1 */ + +#undef BSWAP +#undef ABI_TYPE +#undef DATA_TYPE +#undef SDATA_TYPE +#undef SUFFIX +#undef DATA_SIZE +#undef SHIFT diff --git a/qemu/accel/tcg/cpu-exec-common.c b/qemu/accel/tcg/cpu-exec-common.c new file mode 100644 index 00000000..a5353e45 --- /dev/null +++ b/qemu/accel/tcg/cpu-exec-common.c @@ -0,0 +1,58 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "exec/exec-all.h" + +/* exit the current TB, but without causing any exception to be raised */ +void cpu_loop_exit_noexc(CPUState *cpu) +{ + cpu->exception_index = -1; + cpu_loop_exit(cpu); +} + +void cpu_reloading_memory_map(void) +{ +} + +void cpu_loop_exit(CPUState *cpu) +{ + /* Unlock JIT write protect if applicable. */ + tb_exec_unlock(cpu->uc->tcg_ctx); + /* Undo the setting in cpu_tb_exec. */ + cpu->can_do_io = 1; + siglongjmp(cpu->jmp_env, 1); +} + +void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) +{ + if (pc) { + cpu_restore_state(cpu, pc, true); + } + cpu_loop_exit(cpu); +} + +void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) +{ + cpu->exception_index = EXCP_ATOMIC; + cpu_loop_exit_restore(cpu, pc); +} diff --git a/qemu/accel/tcg/cpu-exec.c b/qemu/accel/tcg/cpu-exec.c new file mode 100644 index 00000000..79bcdd13 --- /dev/null +++ b/qemu/accel/tcg/cpu-exec.c @@ -0,0 +1,581 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/core/cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "qemu/atomic.h" +#include "qemu/timer.h" +#include "exec/tb-hash.h" +#include "exec/tb-lookup.h" +#include "sysemu/cpus.h" +#include "uc_priv.h" + +/* -icount align implementation. */ + +typedef struct SyncClocks { + int64_t diff_clk; + int64_t last_cpu_icount; + int64_t realtime_clock; +} SyncClocks; + +/* Allow the guest to have a max 3ms advance. + * The difference between the 2 clocks could therefore + * oscillate around 0. + */ +#define VM_CLOCK_ADVANCE 3000000 +#define THRESHOLD_REDUCE 1.5 +#define MAX_DELAY_PRINT_RATE 2000000000LL +#define MAX_NB_PRINTS 100 + +/* Execute a TB, and fix up the CPU state afterwards if necessary */ +static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) +{ + CPUArchState *env = cpu->env_ptr; + uintptr_t ret; + TranslationBlock *last_tb; + int tb_exit; + uint8_t *tb_ptr = itb->tc.ptr; + + tb_exec_lock(cpu->uc->tcg_ctx); + ret = tcg_qemu_tb_exec(env, tb_ptr); + tb_exec_unlock(cpu->uc->tcg_ctx); + cpu->can_do_io = 1; + last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); + tb_exit = ret & TB_EXIT_MASK; + // trace_exec_tb_exit(last_tb, tb_exit); + + if (tb_exit > TB_EXIT_IDX1) { + /* We didn't start executing this TB (eg because the instruction + * counter hit zero); we must restore the guest PC to the address + * of the start of the TB. + */ + CPUClass *cc = CPU_GET_CLASS(cpu); + if (!HOOK_EXISTS(env->uc, UC_HOOK_CODE) && !env->uc->timeout) { + // We should sync pc for R/W error. + switch (env->uc->invalid_error) { + case UC_ERR_WRITE_PROT: + case UC_ERR_READ_PROT: + case UC_ERR_FETCH_PROT: + case UC_ERR_WRITE_UNMAPPED: + case UC_ERR_READ_UNMAPPED: + case UC_ERR_FETCH_UNMAPPED: + case UC_ERR_WRITE_UNALIGNED: + case UC_ERR_READ_UNALIGNED: + case UC_ERR_FETCH_UNALIGNED: + break; + default: + if (cc->synchronize_from_tb) { + // avoid sync twice when helper_uc_tracecode() already did this. + if (env->uc->emu_counter <= env->uc->emu_count && + !env->uc->stop_request && !env->uc->quit_request) + cc->synchronize_from_tb(cpu, last_tb); + } else { + assert(cc->set_pc); + cc->set_pc(cpu, last_tb->pc); + } + } + } + + cpu->tcg_exit_req = 0; + } + return ret; +} + +/* Execute the code without caching the generated code. An interpreter + could be used if available. */ +static void cpu_exec_nocache(CPUState *cpu, int max_cycles, + TranslationBlock *orig_tb, bool ignore_icount) +{ + TranslationBlock *tb; + uint32_t cflags = curr_cflags() | CF_NOCACHE; + + if (ignore_icount) { + cflags &= ~CF_USE_ICOUNT; + } + + /* Should never happen. + We only end up here when an existing TB is too long. */ + cflags |= MIN(max_cycles, CF_COUNT_MASK); + + mmap_lock(); + tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, + orig_tb->flags, cflags); + tb->orig_tb = orig_tb; + mmap_unlock(); + + /* execute the generated code */ + cpu_tb_exec(cpu, tb); + + mmap_lock(); + tb_phys_invalidate(cpu->uc->tcg_ctx, tb, -1); + mmap_unlock(); + tcg_tb_remove(cpu->uc->tcg_ctx, tb); +} + +struct tb_desc { + target_ulong pc; + target_ulong cs_base; + CPUArchState *env; + tb_page_addr_t phys_page1; + uint32_t flags; + uint32_t cf_mask; + uint32_t trace_vcpu_dstate; +}; + +static bool tb_lookup_cmp(struct uc_struct *uc, const void *p, const void *d) +{ + const TranslationBlock *tb = p; + const struct tb_desc *desc = d; + + if (tb->pc == desc->pc && + tb->page_addr[0] == desc->phys_page1 && + tb->cs_base == desc->cs_base && + tb->flags == desc->flags && + tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && + (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { + /* check next page if needed */ + if (tb->page_addr[1] == -1) { + return true; + } else { + tb_page_addr_t phys_page2; + target_ulong virt_page2; + + virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + phys_page2 = get_page_addr_code(desc->env, virt_page2); + if (tb->page_addr[1] == phys_page2) { + return true; + } + } + } + return false; +} + +TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cf_mask) +{ + struct uc_struct *uc = cpu->uc; + tb_page_addr_t phys_pc; + struct tb_desc desc; + uint32_t h; + + desc.env = (CPUArchState *)cpu->env_ptr; + desc.cs_base = cs_base; + desc.flags = flags; + desc.cf_mask = cf_mask; + desc.trace_vcpu_dstate = *cpu->trace_dstate; + desc.pc = pc; + phys_pc = get_page_addr_code(desc.env, pc); + if (phys_pc == -1) { + return NULL; + } + desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; + h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); + return qht_lookup_custom(uc, &cpu->uc->tcg_ctx->tb_ctx.htable, &desc, h, tb_lookup_cmp); +} + +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) +{ + if (TCG_TARGET_HAS_direct_jump) { + uintptr_t offset = tb->jmp_target_arg[n]; + uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; + tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr); + } else { + tb->jmp_target_arg[n] = addr; + } +} + +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next) +{ + uintptr_t old; + + assert(n < ARRAY_SIZE(tb->jmp_list_next)); + + /* make sure the destination TB is valid */ + if (tb_next->cflags & CF_INVALID) { + goto out_unlock_next; + } + /* Atomically claim the jump destination slot only if it was NULL */ +#ifdef _MSC_VER + old = atomic_cmpxchg((long *)&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next); +#else + old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next); +#endif + if (old) { + goto out_unlock_next; + } + + /* patch the native jump address */ + tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); + + /* add in TB jmp list */ + tb->jmp_list_next[n] = tb_next->jmp_list_head; + tb_next->jmp_list_head = (uintptr_t)tb | n; + + return; + + out_unlock_next: + return; +} + +static inline TranslationBlock *tb_find(CPUState *cpu, + TranslationBlock *last_tb, + int tb_exit, uint32_t cf_mask) +{ + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags; + + tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); + if (tb == NULL) { + mmap_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); + mmap_unlock(); + /* We add the TB in the virtual pc hash table for the fast lookup */ + cpu->tb_jmp_cache[tb_jmp_cache_hash_func(cpu->uc, pc)] = tb; + } + /* We don't take care of direct jumps when address mapping changes in + * system emulation. So it's not safe to make a direct jump to a TB + * spanning two pages because the mapping for the second page can change. + */ + if (tb->page_addr[1] != -1) { + last_tb = NULL; + } + /* See if we can patch the calling TB. */ + if (last_tb) { + tb_add_jump(last_tb, tb_exit, tb); + } + return tb; +} + +static inline bool cpu_handle_halt(CPUState *cpu) +{ + if (cpu->halted) { +#if 0 +#if defined(TARGET_I386) + if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) + && replay_interrupt()) { + X86CPU *x86_cpu = X86_CPU(cpu); + apic_poll_irq(x86_cpu->apic_state); + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); + } +#endif +#endif + if (!cpu_has_work(cpu)) { + return true; + } + + cpu->halted = 0; + } + + return false; +} + +static inline void cpu_handle_debug_exception(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUWatchpoint *wp; + + if (!cpu->watchpoint_hit) { + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + wp->flags &= ~BP_WATCHPOINT_HIT; + } + } + + cc->debug_excp_handler(cpu); +} + +static inline bool cpu_handle_exception(CPUState *cpu, int *ret) +{ + bool catched = false; + struct uc_struct *uc = cpu->uc; + struct hook *hook; + + // printf(">> exception index = %u\n", cpu->exception_index); qq + + if (cpu->uc->stop_interrupt && cpu->uc->stop_interrupt(cpu->uc, cpu->exception_index)) { + // Unicorn: call registered invalid instruction callbacks + catched = false; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN_INVALID) { + if (hook->to_delete) { + continue; + } + catched = ((uc_cb_hookinsn_invalid_t)hook->callback)(uc, hook->user_data); + if (catched) { + break; + } + } + if (!catched) { + uc->invalid_error = UC_ERR_INSN_INVALID; + } + + // we want to stop emulation + *ret = EXCP_HLT; + return true; + } + + if (cpu->exception_index < 0) { + return false; + } + + if (cpu->exception_index >= EXCP_INTERRUPT) { + /* exit request from the cpu execution loop */ + *ret = cpu->exception_index; + if (*ret == EXCP_DEBUG) { + cpu_handle_debug_exception(cpu); + } + cpu->exception_index = -1; + return true; + } else { +#if defined(TARGET_X86_64) + CPUArchState *env = cpu->env_ptr; + if (env->exception_is_int) { + // point EIP to the next instruction after INT + env->eip = env->exception_next_eip; + } +#endif +#if defined(TARGET_MIPS) || defined(TARGET_MIPS64) + // Unicorn: Imported from https://github.com/unicorn-engine/unicorn/pull/1098 + CPUMIPSState *env = &(MIPS_CPU(cpu)->env); + env->active_tc.PC = uc->next_pc; +#endif + // Unicorn: call registered interrupt callbacks + catched = false; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INTR) { + if (hook->to_delete) { + continue; + } + ((uc_cb_hookintr_t)hook->callback)(uc, cpu->exception_index, hook->user_data); + catched = true; + } + // Unicorn: If un-catched interrupt, stop executions. + if (!catched) { + // printf("AAAAAAAAAAAA\n"); qq + uc->invalid_error = UC_ERR_EXCEPTION; + cpu->halted = 1; + *ret = EXCP_HLT; + return true; + } + + cpu->exception_index = -1; + } + + *ret = EXCP_INTERRUPT; + return false; +} + +static inline bool cpu_handle_interrupt(CPUState *cpu, + TranslationBlock **last_tb) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + /* Clear the interrupt flag now since we're processing + * cpu->interrupt_request and cpu->exit_request. + * Ensure zeroing happens before reading cpu->exit_request or + * cpu->interrupt_request (see also smp_wmb in cpu_exit()) + */ + cpu_neg(cpu)->icount_decr.u16.high = 0; + + if (unlikely(cpu->interrupt_request)) { + int interrupt_request; + interrupt_request = cpu->interrupt_request; + if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { + /* Mask out external interrupts for this step. */ + interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; + } + if (interrupt_request & CPU_INTERRUPT_DEBUG) { + cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; + cpu->exception_index = EXCP_DEBUG; + return true; + } +#if defined(TARGET_I386) + else if (interrupt_request & CPU_INTERRUPT_INIT) { + X86CPU *x86_cpu = X86_CPU(cpu); + CPUArchState *env = &x86_cpu->env; + //replay_interrupt(); + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); + do_cpu_init(x86_cpu); + cpu->exception_index = EXCP_HALTED; + return true; + } +#else + else if (interrupt_request & CPU_INTERRUPT_RESET) { + //replay_interrupt(); + cpu_reset(cpu); + return true; + } +#endif + /* The target hook has 3 exit conditions: + False when the interrupt isn't processed, + True when it is, and we should restart on a new TB, + and via longjmp via cpu_loop_exit. */ + else { + if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { + //replay_interrupt(); + cpu->exception_index = -1; + *last_tb = NULL; + } + /* The target hook may have updated the 'cpu->interrupt_request'; + * reload the 'interrupt_request' value */ + interrupt_request = cpu->interrupt_request; + } + if (interrupt_request & CPU_INTERRUPT_EXITTB) { + cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; + /* ensure that no TB jump will be modified as + the program flow was changed */ + *last_tb = NULL; + } + } + + /* Finally, check if we need to exit to the main loop. */ + if (unlikely(cpu->exit_request)) { + cpu->exit_request = 0; + if (cpu->exception_index == -1) { + cpu->exception_index = EXCP_INTERRUPT; + } + return true; + } + + return false; +} + +static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, + TranslationBlock **last_tb, int *tb_exit) +{ + uintptr_t ret; + int32_t insns_left; + + // trace_exec_tb(tb, tb->pc); + ret = cpu_tb_exec(cpu, tb); + tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); + *tb_exit = ret & TB_EXIT_MASK; + if (*tb_exit != TB_EXIT_REQUESTED) { + *last_tb = tb; + return; + } + + *last_tb = NULL; + insns_left = cpu_neg(cpu)->icount_decr.u32; + if (insns_left < 0) { + /* Something asked us to stop executing chained TBs; just + * continue round the main loop. Whatever requested the exit + * will also have set something else (eg exit_request or + * interrupt_request) which will be handled by + * cpu_handle_interrupt. cpu_handle_interrupt will also + * clear cpu->icount_decr.u16.high. + */ + return; + } + + /* Instruction counter expired. */ + /* Refill decrementer and continue execution. */ + insns_left = MIN(0xffff, cpu->icount_budget); + cpu_neg(cpu)->icount_decr.u16.low = insns_left; + cpu->icount_extra = cpu->icount_budget - insns_left; + if (!cpu->icount_extra) { + /* Execute any remaining instructions, then let the main loop + * handle the next event. + */ + if (insns_left > 0) { + cpu_exec_nocache(cpu, insns_left, tb, false); + } + } +} + +/* main execution loop */ +int cpu_exec(struct uc_struct *uc, CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + int ret; + // SyncClocks sc = { 0 }; + + if (cpu_handle_halt(cpu)) { + return EXCP_HALTED; + } + + // rcu_read_lock(); + + cc->cpu_exec_enter(cpu); + + /* Calculate difference between guest clock and host clock. + * This delay includes the delay of the last cycle, so + * what we have to do is sleep until it is 0. As for the + * advance/delay we gain here, we try to fix it next time. + */ + // init_delay_params(&sc, cpu); + + /* prepare setjmp context for exception handling */ + if (sigsetjmp(cpu->jmp_env, 0) != 0) { +#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) + /* Some compilers wrongly smash all local variables after + * siglongjmp. There were bug reports for gcc 4.5.0 and clang. + * Reload essential local variables here for those compilers. + * Newer versions of gcc would complain about this code (-Wclobbered). */ + cc = CPU_GET_CLASS(cpu); +#else /* buggy compiler */ + /* Assert that the compiler does not smash local variables. */ + // g_assert(cpu == current_cpu); + g_assert(cc == CPU_GET_CLASS(cpu)); +#endif /* buggy compiler */ + + assert_no_pages_locked(); + } + + /* if an exception is pending, we execute it here */ + while (!cpu_handle_exception(cpu, &ret)) { + TranslationBlock *last_tb = NULL; + int tb_exit = 0; + + while (!cpu_handle_interrupt(cpu, &last_tb)) { + uint32_t cflags = cpu->cflags_next_tb; + TranslationBlock *tb; + + /* When requested, use an exact setting for cflags for the next + execution. This is used for icount, precise smc, and stop- + after-access watchpoints. Since this request should never + have CF_INVALID set, -1 is a convenient invalid value that + does not require tcg headers for cpu_common_reset. */ + if (cflags == -1) { + cflags = curr_cflags(); + } else { + cpu->cflags_next_tb = -1; + } + + tb = tb_find(cpu, last_tb, tb_exit, cflags); + cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); + /* Try to align the host and virtual clocks + if the guest is in advance */ + // align_clocks(&sc, cpu); + } + } + + // Unicorn: Clear any TCG exit flag that might have been left set by exit requests + uc->cpu->tcg_exit_req = 0; + + cc->cpu_exec_exit(cpu); + // rcu_read_unlock(); + + return ret; +} diff --git a/qemu/accel/tcg/cputlb.c b/qemu/accel/tcg/cputlb.c new file mode 100644 index 00000000..c29f9455 --- /dev/null +++ b/qemu/accel/tcg/cputlb.c @@ -0,0 +1,2420 @@ +/* + * Common CPU TLB handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/memory.h" +#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "tcg/tcg.h" +#include "exec/helper-proto.h" +#include "qemu/atomic.h" +#include "qemu/atomic128.h" +#include "translate-all.h" +#include "exec/cpu-common.h" +#include "trace/mem.h" + +#include + +#include + +/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ +/* #define DEBUG_TLB */ +/* #define DEBUG_TLB_LOG */ + +#ifdef DEBUG_TLB +# define DEBUG_TLB_GATE 1 +# ifdef DEBUG_TLB_LOG +# define DEBUG_TLB_LOG_GATE 1 +# else +# define DEBUG_TLB_LOG_GATE 0 +# endif +#else +# define DEBUG_TLB_GATE 0 +# define DEBUG_TLB_LOG_GATE 0 +#endif + +#if 0 +#define assert_cpu_is_self(cpu) do { \ + if (DEBUG_TLB_GATE) { \ + g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ + } \ + } while (0) +#endif + +/* run_on_cpu_data.target_ptr should always be big enough for a + * target_ulong even on 32 bit builds */ +QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); + +/* We currently can't handle more than 16 bits in the MMUIDX bitmask. + */ +QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); +#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) + +static inline size_t tlb_n_entries(CPUTLBDescFast *fast) +{ + return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; +} + +static inline size_t sizeof_tlb(CPUTLBDescFast *fast) +{ + return fast->mask + (1 << CPU_TLB_ENTRY_BITS); +} + +static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, + size_t max_entries) +{ + desc->window_begin_ns = ns; + desc->window_max_entries = max_entries; +} + +/** + * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary + * @desc: The CPUTLBDesc portion of the TLB + * @fast: The CPUTLBDescFast portion of the same TLB + * + * Called with tlb_lock_held. + * + * We have two main constraints when resizing a TLB: (1) we only resize it + * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing + * the array or unnecessarily flushing it), which means we do not control how + * frequently the resizing can occur; (2) we don't have access to the guest's + * future scheduling decisions, and therefore have to decide the magnitude of + * the resize based on past observations. + * + * In general, a memory-hungry process can benefit greatly from an appropriately + * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that + * we just have to make the TLB as large as possible; while an oversized TLB + * results in minimal TLB miss rates, it also takes longer to be flushed + * (flushes can be _very_ frequent), and the reduced locality can also hurt + * performance. + * + * To achieve near-optimal performance for all kinds of workloads, we: + * + * 1. Aggressively increase the size of the TLB when the use rate of the + * TLB being flushed is high, since it is likely that in the near future this + * memory-hungry process will execute again, and its memory hungriness will + * probably be similar. + * + * 2. Slowly reduce the size of the TLB as the use rate declines over a + * reasonably large time window. The rationale is that if in such a time window + * we have not observed a high TLB use rate, it is likely that we won't observe + * it in the near future. In that case, once a time window expires we downsize + * the TLB to match the maximum use rate observed in the window. + * + * 3. Try to keep the maximum use rate in a time window in the 30-70% range, + * since in that range performance is likely near-optimal. Recall that the TLB + * is direct mapped, so we want the use rate to be low (or at least not too + * high), since otherwise we are likely to have a significant amount of + * conflict misses. + */ +static void tlb_mmu_resize_locked(struct uc_struct *uc, CPUTLBDesc *desc, CPUTLBDescFast *fast, + int64_t now) +{ + size_t old_size = tlb_n_entries(fast); + size_t rate; + size_t new_size = old_size; + int64_t window_len_ms = 100; + int64_t window_len_ns = window_len_ms * 1000 * 1000; + bool window_expired = now > desc->window_begin_ns + window_len_ns; + + if (desc->n_used_entries > desc->window_max_entries) { + desc->window_max_entries = desc->n_used_entries; + } + rate = desc->window_max_entries * 100 / old_size; + + if (rate > 70) { + new_size = MIN(old_size << 1, 1ULL << CPU_TLB_DYN_MAX_BITS); + } else if (rate < 30 && window_expired) { + size_t ceil = pow2ceil(desc->window_max_entries); + size_t expected_rate = desc->window_max_entries * 100 / ceil; + + /* + * Avoid undersizing when the max number of entries seen is just below + * a pow2. For instance, if max_entries == 1025, the expected use rate + * would be 1025/2048==50%. However, if max_entries == 1023, we'd get + * 1023/1024==99.9% use rate, so we'd likely end up doubling the size + * later. Thus, make sure that the expected use rate remains below 70%. + * (and since we double the size, that means the lowest rate we'd + * expect to get is 35%, which is still in the 30-70% range where + * we consider that the size is appropriate.) + */ + if (expected_rate > 70) { + ceil *= 2; + } + new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); + } + + if (new_size == old_size) { + if (window_expired) { + tlb_window_reset(desc, now, desc->n_used_entries); + } + return; + } + + g_free(fast->table); + g_free(desc->iotlb); + + tlb_window_reset(desc, now, 0); + /* desc->n_used_entries is cleared by the caller */ + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; + fast->table = g_try_new(CPUTLBEntry, new_size); + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + if (desc->iotlb) { + memset(desc->iotlb, 0, sizeof(CPUIOTLBEntry) * new_size); + } + + /* + * If the allocations fail, try smaller sizes. We just freed some + * memory, so going back to half of new_size has a good chance of working. + * Increased memory pressure elsewhere in the system might cause the + * allocations to fail though, so we progressively reduce the allocation + * size, aborting if we cannot even allocate the smallest TLB we support. + */ + while (fast->table == NULL || desc->iotlb == NULL) { + if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { + fprintf(stderr, "%s: %s.\n", __func__, strerror(errno)); + abort(); // FIXME: do not abort + } + new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); + fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; + + g_free(fast->table); + g_free(desc->iotlb); + fast->table = g_try_new(CPUTLBEntry, new_size); + desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + } +} + +static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) +{ + desc->n_used_entries = 0; + desc->large_page_addr = -1; + desc->large_page_mask = -1; + desc->vindex = 0; + memset(fast->table, -1, sizeof_tlb(fast)); + memset(desc->vtable, -1, sizeof(desc->vtable)); +} + +static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, + int64_t now) +{ + CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; + CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; + + tlb_mmu_resize_locked(env->uc, desc, fast, now); + tlb_mmu_flush_locked(desc, fast); +} + +static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) +{ + size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; + + tlb_window_reset(desc, now, 0); + desc->n_used_entries = 0; + fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; + fast->table = g_new(CPUTLBEntry, n_entries); + desc->iotlb = g_new(CPUIOTLBEntry, n_entries); + tlb_mmu_flush_locked(desc, fast); +} + +static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) +{ + env_tlb(env)->d[mmu_idx].n_used_entries++; +} + +static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) +{ + env_tlb(env)->d[mmu_idx].n_used_entries--; +} + +void tlb_init(CPUState *cpu) +{ + CPUArchState *env = cpu->env_ptr; + int64_t now = get_clock_realtime(); + int i; + + /* All tlbs are initialized flushed. */ + env_tlb(env)->c.dirty = 0; + + for (i = 0; i < NB_MMU_MODES; i++) { + tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); + } +} + +/* flush_all_helper: run fn across all cpus + * + * If the wait flag is set then the src cpu's helper will be queued as + * "safe" work and the loop exited creating a synchronisation point + * where all queued work will be finished before execution starts + * again. + */ +static void flush_all_helper(CPUState *src, run_on_cpu_func fn, + run_on_cpu_data d) +{ +#if 0 + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu != src) { + // async_run_on_cpu(cpu, fn, d); + fn(cpu, d); + } + } +#endif +} + +static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) +{ + CPUArchState *env = cpu->env_ptr; + uint16_t asked = data.host_int; + uint16_t all_dirty, work, to_clean; + int64_t now = get_clock_realtime(); + + all_dirty = env_tlb(env)->c.dirty; + to_clean = asked & all_dirty; + all_dirty &= ~to_clean; + env_tlb(env)->c.dirty = all_dirty; + + for (work = to_clean; work != 0; work &= work - 1) { + int mmu_idx = ctz32(work); + tlb_flush_one_mmuidx_locked(env, mmu_idx, now); + } + + cpu_tb_jmp_cache_clear(cpu); + + if (to_clean == ALL_MMUIDX_BITS) { + env_tlb(env)->c.full_flush_count = env_tlb(env)->c.full_flush_count + 1; + } else { + env_tlb(env)->c.part_flush_count = env_tlb(env)->c.part_flush_count + ctpop16(to_clean); + if (to_clean != asked) { + env_tlb(env)->c.elide_flush_count = env_tlb(env)->c.elide_flush_count + ctpop16(asked & ~to_clean); + } + } +} + +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +{ + //if (cpu->created && !qemu_cpu_is_self(cpu)) { + // tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); + //} else { + tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); + //} +} + +void tlb_flush(CPUState *cpu) +{ + tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); +} + +void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); +} + +void tlb_flush_all_cpus(CPUState *src_cpu) +{ + tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); +} + +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) +{ + const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; + + flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); + fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); +} + +void tlb_flush_all_cpus_synced(CPUState *src_cpu) +{ + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); +} + +static inline bool tlb_hit_page_anyprot(struct uc_struct *uc, CPUTLBEntry *tlb_entry, + target_ulong page) +{ + return tlb_hit_page(uc, tlb_entry->addr_read, page) || + tlb_hit_page(uc, tlb_addr_write(tlb_entry), page) || + tlb_hit_page(uc, tlb_entry->addr_code, page); +} + +/** + * tlb_entry_is_empty - return true if the entry is not in use + * @te: pointer to CPUTLBEntry + */ +static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) +{ + return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; +} + +/* Called with tlb_c.lock held */ +static inline bool tlb_flush_entry_locked(struct uc_struct *uc, CPUTLBEntry *tlb_entry, + target_ulong page) +{ + if (tlb_hit_page_anyprot(uc, tlb_entry, page)) { + memset(tlb_entry, -1, sizeof(*tlb_entry)); + return true; + } + return false; +} + +/* Called with tlb_c.lock held */ +static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, + target_ulong page) +{ + CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; + int k; + + // assert_cpu_is_self(env_cpu(env)); + for (k = 0; k < CPU_VTLB_SIZE; k++) { + if (tlb_flush_entry_locked(env->uc, &d->vtable[k], page)) { + tlb_n_used_entries_dec(env, mmu_idx); + } + } +} + +static void tlb_flush_page_locked(CPUArchState *env, int midx, + target_ulong page) +{ + target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; + target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; + + /* Check if we need to flush due to large pages. */ + if ((page & lp_mask) == lp_addr) { + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + } else { + if (tlb_flush_entry_locked(env->uc, tlb_entry(env, midx, page), page)) { + tlb_n_used_entries_dec(env, midx); + } + tlb_flush_vtlb_page_locked(env, midx, page); + } +} + +/** + * tlb_flush_page_by_mmuidx_async_0: + * @cpu: cpu on which to flush + * @addr: page of virtual address to flush + * @idxmap: set of mmu_idx to flush + * + * Helper for tlb_flush_page_by_mmuidx and friends, flush one page + * at @addr from the tlbs indicated by @idxmap from @cpu. + */ +static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, + target_ulong addr, + uint16_t idxmap) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + if ((idxmap >> mmu_idx) & 1) { + tlb_flush_page_locked(env, mmu_idx, addr); + } + } + + tb_flush_jmp_cache(cpu, addr); +} + +/** + * tlb_flush_page_by_mmuidx_async_1: + * @cpu: cpu on which to flush + * @data: encoded addr + idxmap + * + * Helper for tlb_flush_page_by_mmuidx and friends, called through + * async_run_on_cpu. The idxmap parameter is encoded in the page + * offset of the target_ptr field. This limits the set of mmu_idx + * that can be passed via this method. + */ +static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, + run_on_cpu_data data) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif + target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; + target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; + uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; + + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); +} + +typedef struct { + target_ulong addr; + uint16_t idxmap; +} TLBFlushPageByMMUIdxData; + +/** + * tlb_flush_page_by_mmuidx_async_2: + * @cpu: cpu on which to flush + * @data: allocated addr + idxmap + * + * Helper for tlb_flush_page_by_mmuidx and friends, called through + * async_run_on_cpu. The addr+idxmap parameters are stored in a + * TLBFlushPageByMMUIdxData structure that has been allocated + * specifically for this helper. Free the structure when done. + */ +static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, + run_on_cpu_data data) +{ + TLBFlushPageByMMUIdxData *d = data.host_ptr; + + tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); + g_free(d); +} + +void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + // if (qemu_cpu_is_self(cpu)) { + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); + //} +#if 0 + else if (idxmap < TARGET_PAGE_SIZE) { + /* + * Most targets have only a few mmu_idx. In the case where + * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid + * allocating memory for this operation. + */ + tlb_flush_page_by_mmuidx_async_1(cpu, RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); + + /* Otherwise allocate a structure, freed by the worker. */ + d->addr = addr; + d->idxmap = idxmap; + tlb_flush_page_by_mmuidx_async_2(cpu, RUN_ON_CPU_HOST_PTR(d)); + } +#endif +} + +void tlb_flush_page(CPUState *cpu, target_ulong addr) +{ + tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); +} + +void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, + uint16_t idxmap) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = src_cpu->uc; +#endif + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + /* + * Allocate memory to hold addr+idxmap only when needed. + * See tlb_flush_page_by_mmuidx for details. + */ + if (idxmap < TARGET_PAGE_SIZE) { + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { +#if 0 + CPUState *dst_cpu; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + TLBFlushPageByMMUIdxData *d + = g_new(TLBFlushPageByMMUIdxData, 1); + + d->addr = addr; + d->idxmap = idxmap; + tlb_flush_page_by_mmuidx_async_2(dst_cpu, RUN_ON_CPU_HOST_PTR(d)); + } + } +#endif + } + + tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); +} + +void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) +{ + tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); +} + +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = src_cpu->uc; +#endif + + /* This should already be page aligned */ + addr &= TARGET_PAGE_MASK; + + /* + * Allocate memory to hold addr+idxmap only when needed. + * See tlb_flush_page_by_mmuidx for details. + */ + if (idxmap < TARGET_PAGE_SIZE) { + flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, + RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + tlb_flush_page_by_mmuidx_async_1(src_cpu, RUN_ON_CPU_TARGET_PTR(addr | idxmap)); + } else { + //CPUState *dst_cpu; + TLBFlushPageByMMUIdxData *d; + +#if 0 + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + d = g_new(TLBFlushPageByMMUIdxData, 1); + d->addr = addr; + d->idxmap = idxmap; + tlb_flush_page_by_mmuidx_async_2(dst_cpu, RUN_ON_CPU_HOST_PTR(d)); + } + } +#endif + + d = g_new(TLBFlushPageByMMUIdxData, 1); + d->addr = addr; + d->idxmap = idxmap; + tlb_flush_page_by_mmuidx_async_2(src_cpu, RUN_ON_CPU_HOST_PTR(d)); + } +} + +void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) +{ + tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); +} + +/* update the TLBs so that writes to code in the virtual page 'addr' + can be detected */ +void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr) +{ + cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, + DIRTY_MEMORY_CODE); +} + +/* update the TLB so that writes in physical page 'phys_addr' are no longer + tested for self modifying code */ +void tlb_unprotect_code(struct uc_struct *uc, ram_addr_t ram_addr) +{ + cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); +} + + +/* + * Dirty write flag handling + * + * When the TCG code writes to a location it looks up the address in + * the TLB and uses that data to compute the final address. If any of + * the lower bits of the address are set then the slow path is forced. + * There are a number of reasons to do this but for normal RAM the + * most usual is detecting writes to code regions which may invalidate + * generated code. + * + * Other vCPUs might be reading their TLBs during guest execution, so we update + * te->addr_write with atomic_set. We don't need to worry about this for + * oversized guests as MTTCG is disabled for them. + * + * Called with tlb_c.lock held. + */ +static void tlb_reset_dirty_range_locked(struct uc_struct *uc, CPUTLBEntry *tlb_entry, + uintptr_t start, uintptr_t length) +{ + uintptr_t addr = tlb_entry->addr_write; + + if ((addr & (TLB_INVALID_MASK | TLB_MMIO | + TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { + addr &= TARGET_PAGE_MASK; + addr += tlb_entry->addend; + if ((addr - start) < length) { +#if TCG_OVERSIZED_GUEST + tlb_entry->addr_write |= TLB_NOTDIRTY; +#else + tlb_entry->addr_write = tlb_entry->addr_write | TLB_NOTDIRTY; +#endif + } + } +} + +/* + * Called with tlb_c.lock held. + * Called only from the vCPU context, i.e. the TLB's owner thread. + */ +static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) +{ + *d = *s; +} + +/* This is a cross vCPU call (i.e. another vCPU resetting the flags of + * the target vCPU). + * We must take tlb_c.lock to avoid racing with another vCPU update. The only + * thing actually updated is the target TLB entry ->addr_write flags. + */ +void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) +{ + struct uc_struct *uc = cpu->uc; + CPUArchState *env; + + int mmu_idx; + + env = cpu->env_ptr; + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + unsigned int i; + unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); + + for (i = 0; i < n; i++) { + tlb_reset_dirty_range_locked(uc, &env_tlb(env)->f[mmu_idx].table[i], + start1, length); + } + + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range_locked(uc, &env_tlb(env)->d[mmu_idx].vtable[i], + start1, length); + } + } +} + +/* Called with tlb_c.lock held */ +static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, + target_ulong vaddr) +{ + if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { + tlb_entry->addr_write = vaddr; + } +} + +/* update the TLB corresponding to virtual page vaddr + so that it is no longer dirty */ +void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + // assert_cpu_is_self(cpu); + + vaddr &= TARGET_PAGE_MASK; + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); + } + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); + } + } +} + +/* Our TLB does not support large pages, so remember the area covered by + large pages and trigger a full TLB flush if these are invalidated. */ +static void tlb_add_large_page(CPUArchState *env, int mmu_idx, + target_ulong vaddr, target_ulong size) +{ + target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; + target_ulong lp_mask = ~(size - 1); + + if (lp_addr == (target_ulong)-1) { + /* No previous large page. */ + lp_addr = vaddr; + } else { + /* Extend the existing region to include the new page. + This is a compromise between unnecessary flushes and + the cost of maintaining a full variable size TLB. */ + lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; + while (((lp_addr ^ vaddr) & lp_mask) != 0) { + lp_mask <<= 1; + } + } + env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; + env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; +} + +/* Add a new TLB entry. At most one entry for a given virtual address + * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the + * supplied size is only used by tlb_flush_page. + * + * Called from TCG-generated code, which is under an RCU read-side + * critical section. + */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif + CPUArchState *env = cpu->env_ptr; + CPUTLB *tlb = env_tlb(env); + CPUTLBDesc *desc = &tlb->d[mmu_idx]; + MemoryRegionSection *section; + unsigned int index; + target_ulong address; + target_ulong write_address; + uintptr_t addend; + CPUTLBEntry *te, tn; + hwaddr iotlb, xlat, sz, paddr_page; + target_ulong vaddr_page; + int asidx = cpu_asidx_from_attrs(cpu, attrs); + int wp_flags; + bool is_ram; + + // assert_cpu_is_self(cpu); + + if (size <= TARGET_PAGE_SIZE) { + sz = TARGET_PAGE_SIZE; + } else { + tlb_add_large_page(env, mmu_idx, vaddr, size); + sz = size; + } + vaddr_page = vaddr & TARGET_PAGE_MASK; + paddr_page = paddr & TARGET_PAGE_MASK; + + section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, + &xlat, &sz, attrs, &prot); + assert(sz >= TARGET_PAGE_SIZE); + + address = vaddr_page; + if (size < TARGET_PAGE_SIZE) { + /* Repeat the MMU check and TLB fill on every access. */ + address |= TLB_INVALID_MASK; + } + if (attrs.byte_swap) { + address |= TLB_BSWAP; + } + + is_ram = memory_region_is_ram(section->mr); + // is_romd = memory_region_is_romd(section->mr); + + if (is_ram) { + /* RAM and ROMD both have associated host memory. */ + addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; + } else { + /* I/O does not; force the host address to NULL. */ + addend = 0; + } + + write_address = address; + if (is_ram) { + iotlb = memory_region_get_ram_addr(section->mr) + xlat; + /* + * Computing is_clean is expensive; avoid all that unless + * the page is actually writable. + */ + if (prot & PAGE_WRITE) { + if (section->readonly) { + write_address |= TLB_DISCARD_WRITE; + } else if (cpu_physical_memory_is_clean(iotlb)) { + write_address |= TLB_NOTDIRTY; + } + } + } else { + /* I/O or ROMD */ + iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; + /* + * Writes to romd devices must go through MMIO to enable write. + * Reads to romd devices go through the ram_ptr found above, + * but of course reads to I/O must go through MMIO. + */ + write_address |= TLB_MMIO; + //if (!is_romd) { + address = write_address; + //} + } + + wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, + TARGET_PAGE_SIZE); + + index = tlb_index(env, mmu_idx, vaddr_page); + te = tlb_entry(env, mmu_idx, vaddr_page); + + /* Note that the tlb is no longer clean. */ + tlb->c.dirty |= 1 << mmu_idx; + + /* Make sure there's no cached translation for the new page. */ + tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); + + /* + * Only evict the old entry to the victim tlb if it's for a + * different page; otherwise just overwrite the stale data. + */ + if (!tlb_hit_page_anyprot(env->uc, te, vaddr_page) && !tlb_entry_is_empty(te)) { + unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; + CPUTLBEntry *tv = &desc->vtable[vidx]; + + /* Evict the old entry into the victim tlb. */ + copy_tlb_helper_locked(tv, te); + desc->viotlb[vidx] = desc->iotlb[index]; + tlb_n_used_entries_dec(env, mmu_idx); + } + + /* refill the tlb */ + /* + * At this point iotlb contains a physical section number in the lower + * TARGET_PAGE_BITS, and either + * + the ram_addr_t of the page base of the target RAM (RAM) + * + the offset within section->mr of the page base (I/O, ROMD) + * We subtract the vaddr_page (which is page aligned and thus won't + * disturb the low bits) to give an offset which can be added to the + * (non-page-aligned) vaddr of the eventual memory access to get + * the MemoryRegion offset for the access. Note that the vaddr we + * subtract here is that of the page base, and not the same as the + * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). + */ + desc->iotlb[index].addr = iotlb - vaddr_page; + desc->iotlb[index].attrs = attrs; + + /* Now calculate the new entry */ + tn.addend = addend - vaddr_page; + if (prot & PAGE_READ) { + tn.addr_read = address; + if (wp_flags & BP_MEM_READ) { + tn.addr_read |= TLB_WATCHPOINT; + } + } else { + tn.addr_read = -1; + } + + if (prot & PAGE_EXEC) { + tn.addr_code = address; + } else { + tn.addr_code = -1; + } + + tn.addr_write = -1; + if (prot & PAGE_WRITE) { + tn.addr_write = write_address; + if (prot & PAGE_WRITE_INV) { + tn.addr_write |= TLB_INVALID_MASK; + } + if (wp_flags & BP_MEM_WRITE) { + tn.addr_write |= TLB_WATCHPOINT; + } + } + + copy_tlb_helper_locked(te, &tn); + tlb_n_used_entries_inc(env, mmu_idx); +} + +/* Add a new TLB entry, but without specifying the memory + * transaction attributes to be used. + */ +void tlb_set_page(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, int prot, + int mmu_idx, target_ulong size) +{ + tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, + prot, mmu_idx, size); +} + +static inline ram_addr_t qemu_ram_addr_from_host_nofail(struct uc_struct *uc, void *ptr) +{ + ram_addr_t ram_addr; + + ram_addr = qemu_ram_addr_from_host(uc, ptr); + if (ram_addr == RAM_ADDR_INVALID) { + // error_report("Bad ram pointer %p", ptr); + abort(); + } + return ram_addr; +} + +/* + * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the + * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must + * be discarded and looked up again (e.g. via tlb_entry()). + */ +static void tlb_fill(CPUState *cpu, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); +#ifndef NDEBUG + bool ok; + + /* + * This is not a probe, so only valid return is success; failure + * should result in exception + longjmp to the cpu loop. + */ + ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); +#else + cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); +#endif + assert(ok); +} + +static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + int mmu_idx, target_ulong addr, uintptr_t retaddr, + MMUAccessType access_type, MemOp op) +{ + CPUState *cpu = env_cpu(env); + struct uc_struct *uc = cpu->uc; + hwaddr mr_offset; + MemoryRegionSection *section; + MemoryRegion *mr; + uint64_t val; + MemTxResult r; + + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + mr = section->mr; + mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + cpu->mem_io_pc = retaddr; + if (!cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + + r = memory_region_dispatch_read(uc, mr, mr_offset, &val, op, iotlbentry->attrs); + if (r != MEMTX_OK) { +#if 0 + hwaddr physaddr = mr_offset + + section->offset_within_address_space - + section->offset_within_region; + + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, + mmu_idx, iotlbentry->attrs, r, retaddr); +#endif + } + + return val; +} + +static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, + int mmu_idx, uint64_t val, target_ulong addr, + uintptr_t retaddr, MemOp op) +{ + CPUState *cpu = env_cpu(env); + struct uc_struct *uc = env->uc; + hwaddr mr_offset; + MemoryRegionSection *section; + MemoryRegion *mr; + MemTxResult r; + + section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + mr = section->mr; + mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + if (!cpu->can_do_io) { + cpu_io_recompile(cpu, retaddr); + } + cpu->mem_io_pc = retaddr; + + r = memory_region_dispatch_write(uc, mr, mr_offset, val, op, iotlbentry->attrs); + if (r != MEMTX_OK) { +#if 0 + hwaddr physaddr = mr_offset + + section->offset_within_address_space - + section->offset_within_region; + + cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), + MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, + retaddr); +#endif + } +} + +static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) +{ +#if TCG_OVERSIZED_GUEST + return *(target_ulong *)((uintptr_t)entry + ofs); +#else + return *(target_ulong *)((uintptr_t)entry + ofs); +#endif +} + +/* Return true if ADDR is present in the victim tlb, and has been copied + back to the main tlb. */ +static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, + size_t elt_ofs, target_ulong page) +{ + size_t vidx; + + // assert_cpu_is_self(env_cpu(env)); + for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { + CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; + target_ulong cmp; + +#if TCG_OVERSIZED_GUEST + cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); +#else + cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); +#endif + + if (cmp == page) { + /* Found entry in victim tlb, swap tlb and iotlb. */ + CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; + + copy_tlb_helper_locked(&tmptlb, tlb); + copy_tlb_helper_locked(tlb, vtlb); + copy_tlb_helper_locked(vtlb, &tmptlb); + + CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; + CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; + tmpio = *io; *io = *vio; *vio = tmpio; + return true; + } + } + return false; +} + +/* Macro to call the above, with local variables from the use context. */ +#define VICTIM_TLB_HIT(TY, ADDR) \ + victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ + (ADDR) & TARGET_PAGE_MASK) + +/* + * Return a ram_addr_t for the virtual address for execution. + * + * Return -1 if we can't translate and execute from an entire page + * of RAM. This will force us to execute by loading and translating + * one insn at a time, without caching. + * + * NOTE: This function will trigger an exception if the page is + * not executable. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp) +{ + struct uc_struct *uc = env->uc; + uintptr_t mmu_idx = cpu_mmu_index(env, true); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + void *p; + + if (unlikely(!tlb_hit(uc, entry->addr_code, addr))) { + if (!VICTIM_TLB_HIT(addr_code, addr)) { + tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + + if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { + /* + * The MMU protection covers a smaller range than a target + * page, so we must redo the MMU check for every insn. + */ + return -1; + } + } + assert(tlb_hit(uc, entry->addr_code, addr)); + } + + if (unlikely(entry->addr_code & TLB_MMIO)) { + /* The region is not backed by RAM. */ + if (hostp) { + *hostp = NULL; + } + return -1; + } + + p = (void *)((uintptr_t)addr + entry->addend); + if (hostp) { + *hostp = p; + } + return qemu_ram_addr_from_host_nofail(env->uc, p); +} + +tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) +{ + return get_page_addr_code_hostp(env, addr, NULL); +} + +static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, + CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) +{ + ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; + + // trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); + + if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { + struct page_collection *pages + = page_collection_lock(cpu->uc, ram_addr, ram_addr + size); + tb_invalidate_phys_page_fast(cpu->uc, pages, ram_addr, size, retaddr); + page_collection_unlock(pages); + } + + /* + * Set both VGA and migration bits for simplicity and to remove + * the notdirty callback faster. + */ + cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); + + /* We remove the notdirty callback only if the code has been flushed. */ + if (!cpu_physical_memory_is_clean(ram_addr)) { + // trace_memory_notdirty_set_dirty(mem_vaddr); + tlb_set_dirty(cpu, mem_vaddr); + } +} + +/* + * Probe for whether the specified guest access is permitted. If it is not + * permitted then an exception will be taken in the same way as if this + * were a real access (and we will not return). + * If the size is 0 or the page requires I/O access, returns NULL; otherwise, + * returns the address of the host page similar to tlb_vaddr_to_host(). + */ +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = env->uc; +#endif + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr; + size_t elt_ofs = 0; + int wp_access = 0; + +#ifdef _MSC_VER + g_assert(((target_ulong)0 - (addr | TARGET_PAGE_MASK)) >= size); +#else + g_assert(-(addr | TARGET_PAGE_MASK) >= size); +#endif + + switch (access_type) { + case MMU_DATA_LOAD: + elt_ofs = offsetof(CPUTLBEntry, addr_read); + wp_access = BP_MEM_READ; + break; + case MMU_DATA_STORE: + elt_ofs = offsetof(CPUTLBEntry, addr_write); + wp_access = BP_MEM_WRITE; + break; + case MMU_INST_FETCH: + elt_ofs = offsetof(CPUTLBEntry, addr_code); + wp_access = BP_MEM_READ; + break; + default: + g_assert_not_reached(); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + + if (unlikely(!tlb_hit(env->uc, tlb_addr, addr))) { + if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); + /* TLB resize via tlb_fill may have moved the entry. */ + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + } + + if (!size) { + return NULL; + } + + if (unlikely(tlb_addr & TLB_FLAGS_MASK)) { + CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Reject I/O access, or other required slow-path. */ + if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) { + return NULL; + } + + /* Handle watchpoints. */ + if (tlb_addr & TLB_WATCHPOINT) { + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, wp_access, retaddr); + } + + /* Handle clean RAM pages. */ + if (tlb_addr & TLB_NOTDIRTY) { + notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); + } + } + + return (void *)((uintptr_t)addr + entry->addend); +} + +void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, + MMUAccessType access_type, int mmu_idx) +{ + struct uc_struct *uc = env->uc; + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr, page; + size_t elt_ofs = 0; + + switch (access_type) { + case MMU_DATA_LOAD: + elt_ofs = offsetof(CPUTLBEntry, addr_read); + break; + case MMU_DATA_STORE: + elt_ofs = offsetof(CPUTLBEntry, addr_write); + break; + case MMU_INST_FETCH: + elt_ofs = offsetof(CPUTLBEntry, addr_code); + break; + default: + g_assert_not_reached(); + } + + page = addr & TARGET_PAGE_MASK; + tlb_addr = tlb_read_ofs(entry, elt_ofs); + + if (!tlb_hit_page(uc, tlb_addr, page)) { + uintptr_t index = tlb_index(env, mmu_idx, addr); + + if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) { + CPUState *cs = env_cpu(env); + CPUClass *cc = CPU_GET_CLASS(cs); + + if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) { + /* Non-faulting page table read failed. */ + return NULL; + } + + /* TLB resize via tlb_fill may have moved the entry. */ + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_read_ofs(entry, elt_ofs); + } + + if (tlb_addr & ~TARGET_PAGE_MASK) { + /* IO access */ + return NULL; + } + + return (void *)((uintptr_t)addr + entry->addend); +} + +/* Probe for a read-modify-write atomic operation. Do not allow unaligned + * operations, or io operations to proceed. Return the host address. */ +static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = env->uc; +#endif + size_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(tlbe); + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + int s_bits = mop & MO_SIZE; + void *hostaddr; + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* Enforce guest required alignment. */ + if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { + /* ??? Maybe indicate atomic op to cpu_unaligned_access */ + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & ((1 << s_bits) - 1))) { + /* We get here if guest alignment was not requested, + or was not enforced by cpu_unaligned_access above. + We might widen the access and emulate, but for now + mark an exception and exit the cpu loop. */ + goto stop_the_world; + } + + /* Check TLB entry and enforce page permissions. */ + if (!tlb_hit(env->uc, tlb_addr, addr)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, + mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; + } + + /* Notice an IO access or a needs-MMU-lookup access */ + if (unlikely(tlb_addr & TLB_MMIO)) { + /* There's really nothing that can be done to + support this apart from stop-the-world. */ + goto stop_the_world; + } + + /* Let the guest notice RMW on a write-only page. */ + if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { + tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, + mmu_idx, retaddr); + /* Since we don't support reads and writes to different addresses, + and we do have the proper page loaded for write, this shouldn't + ever return. But just in case, handle via stop-the-world. */ + goto stop_the_world; + } + + hostaddr = (void *)((uintptr_t)addr + tlbe->addend); + + if (unlikely(tlb_addr & TLB_NOTDIRTY)) { + notdirty_write(env_cpu(env), addr, 1 << s_bits, + &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); + } + + return hostaddr; + + stop_the_world: + cpu_loop_exit_atomic(env_cpu(env), retaddr); +} + +/* + * Load Helpers + * + * We support two different access types. SOFTMMU_CODE_ACCESS is + * specifically for reading instructions from system memory. It is + * called by the translation loop and in some helpers where the code + * is disassembled. It shouldn't be called directly by guest code. + */ + +typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); + +static inline uint64_t +load_memop(const void *haddr, MemOp op) +{ + switch (op) { + case MO_UB: + return ldub_p(haddr); + case MO_BEUW: + return lduw_be_p(haddr); + case MO_LEUW: + return lduw_le_p(haddr); + case MO_BEUL: + return (uint32_t)ldl_be_p(haddr); + case MO_LEUL: + return (uint32_t)ldl_le_p(haddr); + case MO_BEQ: + return ldq_be_p(haddr); + case MO_LEQ: + return ldq_le_p(haddr); + default: + // qemu_build_not_reached(); + return 0; + } +} + +static uint64_t inline +load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, + uintptr_t retaddr, MemOp op, bool code_read, + FullLoadHelper *full_load) +{ + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; + const size_t tlb_off = code_read ? + offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); + const MMUAccessType access_type = + code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; + unsigned a_bits = get_alignment_bits(get_memop(oi)); + void *haddr; + uint64_t res; + size_t size = memop_size(op); + int error_code; + struct hook *hook; + bool handled; + HOOK_FOREACH_VAR_DECLARE; + struct uc_struct *uc = env->uc; + MemoryRegion *mr = memory_mapping(uc, addr); + + // memory might be still unmapped while reading or fetching + if (mr == NULL) { + handled = false; + if (code_read) { + // code fetching + error_code = UC_ERR_FETCH_UNMAPPED; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, size - uc->size_recur_mem, 0, hook->user_data))) + break; + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + } else { + // data reading + error_code = UC_ERR_READ_UNMAPPED; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, size - uc->size_recur_mem, 0, hook->user_data))) + break; + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + } + + if (handled) { + uc->invalid_error = UC_ERR_OK; + mr = memory_mapping(uc, addr); + if (mr == NULL) { + uc->invalid_error = UC_ERR_MAP; + cpu_exit(uc->cpu); + return 0; + } + } else { + uc->invalid_addr = addr; + uc->invalid_error = error_code; + // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->cpu); + return 0; + } + } + + // now it is read on mapped memory + if (!code_read) { + // this is date reading + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, addr, size, 0, hook->user_data); + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + + // callback on non-readable memory + if (mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, size - uc->size_recur_mem, 0, hook->user_data))) + break; + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + + if (handled) { + uc->invalid_error = UC_ERR_OK; + } else { + uc->invalid_addr = addr; + uc->invalid_error = UC_ERR_READ_PROT; + // printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->cpu); + return 0; + } + } + } else { + // code fetching + // Unicorn: callback on fetch from NX + if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, size - uc->size_recur_mem, 0, hook->user_data))) + break; + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + + if (handled) { + uc->invalid_error = UC_ERR_OK; + } else { + uc->invalid_addr = addr; + uc->invalid_error = UC_ERR_FETCH_PROT; + // printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->cpu); + return 0; + } + } + } + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, access_type, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if (!tlb_hit(env->uc, tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, + access_type, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = code_read ? entry->addr_code : entry->addr_read; + tlb_addr &= ~TLB_INVALID_MASK; + } + + /* Handle anything that isn't just a straight memory access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + bool need_swap; + + /* For anything that is unaligned, recurse through full_load. */ + if ((addr & (size - 1)) != 0) { + goto do_unaligned_access; + } + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_READ, retaddr); + } + + need_swap = size > 1 && (tlb_addr & TLB_BSWAP); + + /* Handle I/O access. */ + if (likely(tlb_addr & TLB_MMIO)) { + res = io_readx(env, iotlbentry, mmu_idx, addr, retaddr, + access_type, op ^ (need_swap * MO_BSWAP)); + goto _out; + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + + /* + * Keep these two load_memop separate to ensure that the compiler + * is able to fold the entire function to a single instruction. + * There is a build-time assert inside to remind you of this. ;-) + */ + if (unlikely(need_swap)) { + res = load_memop(haddr, op ^ MO_BSWAP); + goto _out; + } + res = load_memop(haddr, op); + goto _out; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (size > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + uint64_t r1, r2; + unsigned shift; + do_unaligned_access: + addr1 = addr & ~((target_ulong)size - 1); + addr2 = addr1 + size; + r1 = full_load(env, addr1, oi, retaddr); + r2 = full_load(env, addr2, oi, retaddr); + shift = (addr & (size - 1)) * 8; + + if (memop_big_endian(op)) { + /* Big-endian combine. */ + res = (r1 << shift) | (r2 >> ((size * 8) - shift)); + } else { + /* Little-endian combine. */ + res = (r1 >> shift) | (r2 << ((size * 8) - shift)); + } + res = res & MAKE_64BIT_MASK(0, size * 8); + goto _out; + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + res = load_memop(haddr, op); + +_out: + // Unicorn: callback on successful data read + if (!code_read) { + if (!uc->size_recur_mem) { // disabling read callback if in recursive call + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_AFTER) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, addr, size, res, hook->user_data); + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + } + } + + return res; +} + +/* + * For the benefit of TCG generated code, we want to avoid the + * complication of ABI-specific return type promotion and always + * return a value extended to the register size of the host. This is + * tcg_target_long, except in the case of a 32-bit host and 64-bit + * data, and for that we always have uint64_t. + * + * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. + */ + +static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); +} + +tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return full_ldub_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_LEUW, false, + full_le_lduw_mmu); +} + +tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return full_le_lduw_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_BEUW, false, + full_be_lduw_mmu); +} + +tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return full_be_lduw_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_LEUL, false, + full_le_ldul_mmu); +} + +tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return full_le_ldul_mmu(env, addr, oi, retaddr); +} + +static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_BEUL, false, + full_be_ldul_mmu); +} + +tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return full_be_ldul_mmu(env, addr, oi, retaddr); +} + +uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_LEQ, false, + helper_le_ldq_mmu); +} + +uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_BEQ, false, + helper_be_ldq_mmu); +} + +/* + * Provide signed versions of the load routines as well. We can of course + * avoid this for 64-bit data, or for 32-bit data on 32-bit host. + */ + + +tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); +} + +tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); +} + +/* + * Load helpers for cpu_ldst.h. + */ + +static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t retaddr, + MemOp op, FullLoadHelper *full_load) // qq +{ + TCGMemOpIdx oi; + uint64_t ret; + + op &= ~MO_SIGN; + oi = make_memop_idx(op, mmu_idx); + ret = full_load(env, addr, oi, retaddr); + + return ret; +} + +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); +} + +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, + full_ldub_mmu); +} + +uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW, + MO_TE == MO_LE + ? full_le_lduw_mmu : full_be_lduw_mmu); +} + +int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW, + MO_TE == MO_LE + ? full_le_lduw_mmu : full_be_lduw_mmu); +} + +uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL, + MO_TE == MO_LE + ? full_le_ldul_mmu : full_be_ldul_mmu); +} + +uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ, + MO_TE == MO_LE + ? helper_le_ldq_mmu : helper_be_ldq_mmu); +} + +uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, + uintptr_t retaddr) +{ + return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); +} + +int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) +{ + return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); +} + +uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr, + uintptr_t retaddr) +{ + return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); +} + +int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) +{ + return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); +} + +uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) +{ + return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); +} + +uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) +{ + return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); +} + +uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) +{ + return cpu_ldub_data_ra(env, ptr, 0); +} + +int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) +{ + return cpu_ldsb_data_ra(env, ptr, 0); +} + +uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr) +{ + return cpu_lduw_data_ra(env, ptr, 0); +} + +int cpu_ldsw_data(CPUArchState *env, target_ulong ptr) +{ + return cpu_ldsw_data_ra(env, ptr, 0); +} + +uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr) +{ + return cpu_ldl_data_ra(env, ptr, 0); +} + +uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr) +{ + return cpu_ldq_data_ra(env, ptr, 0); +} + +/* + * Store Helpers + */ + +static void inline +store_memop(void *haddr, uint64_t val, MemOp op) +{ + switch (op) { + case MO_UB: + stb_p(haddr, val); + break; + case MO_BEUW: + stw_be_p(haddr, val); + break; + case MO_LEUW: + stw_le_p(haddr, val); + break; + case MO_BEUL: + stl_be_p(haddr, val); + break; + case MO_LEUL: + stl_le_p(haddr, val); + break; + case MO_BEQ: + stq_be_p(haddr, val); + break; + case MO_LEQ: + stq_le_p(haddr, val); + break; + default: + // qemu_build_not_reached(); + break; + } +} + +static void inline +store_helper(CPUArchState *env, target_ulong addr, uint64_t val, + TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) +{ + struct uc_struct *uc = env->uc; + HOOK_FOREACH_VAR_DECLARE; + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(entry); + const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); + unsigned a_bits = get_alignment_bits(get_memop(oi)); + void *haddr; + size_t size = memop_size(op); + struct hook *hook; + bool handled; + MemoryRegion *mr = memory_mapping(uc, addr); + + if (!uc->size_recur_mem) { // disabling write callback if in recursive call + // Unicorn: callback on memory write + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, addr, size, val, hook->user_data); + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + } + + // Unicorn: callback on invalid memory + if (mr == NULL) { + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, addr, size, val, hook->user_data))) + break; + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + + if (!handled) { + // save error & quit + uc->invalid_addr = addr; + uc->invalid_error = UC_ERR_WRITE_UNMAPPED; + // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->cpu); + return; + } else { + uc->invalid_error = UC_ERR_OK; + mr = memory_mapping(uc, addr); + if (mr == NULL) { + uc->invalid_error = UC_ERR_MAP; + cpu_exit(uc->cpu); + return; + } + } + } + + // Unicorn: callback on non-writable memory + if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable + // printf("not writable memory???\n"); + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, addr, size, val, hook->user_data))) + break; + + // the last callback may already asked to stop emulation + if (uc->stop_request) + break; + } + + if (handled) { + uc->invalid_error = UC_ERR_OK; + } else { + uc->invalid_addr = addr; + uc->invalid_error = UC_ERR_WRITE_PROT; + // printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->cpu); + return; + } + } + + /* Handle CPU specific unaligned behaviour */ + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* If the TLB entry is for a different page, reload and try again. */ + if (!tlb_hit(env->uc, tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, + mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + } + tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; + } + + /* Handle anything that isn't just a straight memory access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + bool need_swap; + + /* For anything that is unaligned, recurse through byte stores. */ + if ((addr & (size - 1)) != 0) { + goto do_unaligned_access; + } + + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + + /* Handle watchpoints. */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + /* On watchpoint hit, this will longjmp out. */ + cpu_check_watchpoint(env_cpu(env), addr, size, + iotlbentry->attrs, BP_MEM_WRITE, retaddr); + } + + need_swap = size > 1 && (tlb_addr & TLB_BSWAP); + + /* Handle I/O access. */ + if (tlb_addr & TLB_MMIO) { + io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, + op ^ (need_swap * MO_BSWAP)); + return; + } + + /* Ignore writes to ROM. */ + if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { + return; + } + + /* Handle clean RAM pages. */ + if (tlb_addr & TLB_NOTDIRTY) { + notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + + /* + * Keep these two store_memop separate to ensure that the compiler + * is able to fold the entire function to a single instruction. + * There is a build-time assert inside to remind you of this. ;-) + */ + if (unlikely(need_swap)) { + store_memop(haddr, val, op ^ MO_BSWAP); + } else { + store_memop(haddr, val, op); + } + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (size > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 + >= TARGET_PAGE_SIZE)) { + int i; + uintptr_t index2; + CPUTLBEntry *entry2; + target_ulong page2, tlb_addr2; + size_t size2; + + do_unaligned_access: + /* + * Ensure the second page is in the TLB. Note that the first page + * is already guaranteed to be filled, and that the second page + * cannot evict the first. + */ + page2 = (addr + size) & TARGET_PAGE_MASK; + size2 = (addr + size) & ~TARGET_PAGE_MASK; + index2 = tlb_index(env, mmu_idx, page2); + entry2 = tlb_entry(env, mmu_idx, page2); + tlb_addr2 = tlb_addr_write(entry2); + if (!tlb_hit_page(uc, tlb_addr2, page2)) { + if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { + tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, + mmu_idx, retaddr); + index2 = tlb_index(env, mmu_idx, page2); + entry2 = tlb_entry(env, mmu_idx, page2); + } + tlb_addr2 = tlb_addr_write(entry2); + } + + /* + * Handle watchpoints. Since this may trap, all checks + * must happen before any store. + */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), addr, size - size2, + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + BP_MEM_WRITE, retaddr); + } + if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), page2, size2, + env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, + BP_MEM_WRITE, retaddr); + } + + /* + * XXX: not efficient, but simple. + * This loop must go in the forward direction to avoid issues + * with self-modifying code in Windows 64-bit. + */ + for (i = 0; i < size; ++i) { + uint8_t val8; + if (memop_big_endian(op)) { + /* Big-endian extract. */ + val8 = val >> (((size - 1) * 8) - (i * 8)); + } else { + /* Little-endian extract. */ + val8 = val >> (i * 8); + } + helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); + } + return; + } + + haddr = (void *)((uintptr_t)addr + entry->addend); + store_memop(haddr, val, op); +} + +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + store_helper(env, addr, val, oi, retaddr, MO_UB); +} + +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + store_helper(env, addr, val, oi, retaddr, MO_LEUW); +} + +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + store_helper(env, addr, val, oi, retaddr, MO_BEUW); +} + +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + store_helper(env, addr, val, oi, retaddr, MO_LEUL); +} + +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + store_helper(env, addr, val, oi, retaddr, MO_BEUL); +} + +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + store_helper(env, addr, val, oi, retaddr, MO_LEQ); +} + +void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + store_helper(env, addr, val, oi, retaddr, MO_BEQ); +} + +/* + * Store Helpers for cpu_ldst.h + */ + +static void inline +cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, + int mmu_idx, uintptr_t retaddr, MemOp op) // qq +{ + TCGMemOpIdx oi; + + oi = make_memop_idx(op, mmu_idx); + store_helper(env, addr, val, oi, retaddr, op); +} + +void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, + int mmu_idx, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); +} + +void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, + int mmu_idx, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW); +} + +void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, + int mmu_idx, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL); +} + +void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, + int mmu_idx, uintptr_t retaddr) +{ + cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ); +} + +void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, + uint32_t val, uintptr_t retaddr) +{ + cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); +} + +void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr, + uint32_t val, uintptr_t retaddr) +{ + cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); +} + +void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr, + uint32_t val, uintptr_t retaddr) +{ + cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); +} + +void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr, + uint64_t val, uintptr_t retaddr) +{ + cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); +} + +void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) +{ + cpu_stb_data_ra(env, ptr, val, 0); +} + +void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val) +{ + cpu_stw_data_ra(env, ptr, val, 0); +} + +void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val) +{ + cpu_stl_data_ra(env, ptr, val, 0); +} + +void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val) +{ + cpu_stq_data_ra(env, ptr, val, 0); +} + +/* First set of helpers allows passing in of OI and RETADDR. This makes + them callable from other helpers. */ + +#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr +#define ATOMIC_NAME(X) \ + HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) +#define ATOMIC_MMU_DECLS +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) +#define ATOMIC_MMU_CLEANUP +#define ATOMIC_MMU_IDX get_mmuidx(oi) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif + +#if HAVE_CMPXCHG128 || HAVE_ATOMIC128 +#define DATA_SIZE 16 +#include "atomic_template.h" +#endif + +/* Second set of helpers are directly callable from TCG as helpers. */ + +#undef EXTRA_ARGS +#undef ATOMIC_NAME +#undef ATOMIC_MMU_LOOKUP +#define EXTRA_ARGS , TCGMemOpIdx oi +#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) +#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) + +#define DATA_SIZE 1 +#include "atomic_template.h" + +#define DATA_SIZE 2 +#include "atomic_template.h" + +#define DATA_SIZE 4 +#include "atomic_template.h" + +#ifdef CONFIG_ATOMIC64 +#define DATA_SIZE 8 +#include "atomic_template.h" +#endif +#undef ATOMIC_MMU_IDX + +/* Code access functions. */ + +static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); +} + +uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) +{ + TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); + return full_ldub_code(env, addr, oi, 0); +} + +static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); +} + +uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) +{ + TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); + return full_lduw_code(env, addr, oi, 0); +} + +static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); +} + +uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) +{ + TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); + return full_ldl_code(env, addr, oi, 0); +} + +static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); +} + +uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) +{ + TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); + return full_ldq_code(env, addr, oi, 0); +} diff --git a/qemu/accel/tcg/tcg-all.c b/qemu/accel/tcg/tcg-all.c new file mode 100644 index 00000000..2425c7dc --- /dev/null +++ b/qemu/accel/tcg/tcg-all.c @@ -0,0 +1,39 @@ +/* + * QEMU System Emulator, accelerator interfaces + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "sysemu/tcg.h" +#include "cpu.h" +#include "sysemu/cpus.h" +#include "tcg/tcg.h" + + +/* mask must never be zero, except for A20 change call */ +static void tcg_handle_interrupt(CPUState *cpu, int mask) +{ + cpu->interrupt_request |= mask; +} + +CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; diff --git a/qemu/accel/tcg/tcg-runtime-gvec.c b/qemu/accel/tcg/tcg-runtime-gvec.c new file mode 100644 index 00000000..ea997c25 --- /dev/null +++ b/qemu/accel/tcg/tcg-runtime-gvec.c @@ -0,0 +1,1402 @@ +/* + * Generic vectorized operation runtime + * + * Copyright (c) 2018 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "tcg/tcg-gvec-desc.h" + + +static inline void clear_high(void *d, intptr_t oprsz, uint32_t desc) +{ + intptr_t maxsz = simd_maxsz(desc); + intptr_t i; + + if (unlikely(maxsz > oprsz)) { + for (i = oprsz; i < maxsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = 0; + } + } +} + +void HELPER(gvec_add8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) + *(uint8_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) + *(uint16_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) + *(uint32_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) + *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) + (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) + (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) + (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_adds64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) + b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) - *(uint8_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) - *(uint16_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) - *(uint32_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) - *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) - (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) - (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) - (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_subs64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) - b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) * *(uint8_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) * *(uint16_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) * *(uint32_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) * *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) * (uint8_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) * (uint16_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) * (uint32_t)b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_muls64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) * b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg8)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = -*(uint8_t *)((char *)a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg16)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = -*(uint16_t *)((char *)a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg32)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { +#ifdef _MSC_VER + *(uint32_t *)((char *)d + i) = 0U - *(uint32_t *)((char *)a + i); +#else + *(uint32_t *)((char *)d + i) = -*(uint32_t *)((char *)a + i); +#endif + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_neg64)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { +#ifdef _MSC_VER + *(uint64_t *)((char *)d + i) = 0ULL - *(uint64_t *)((char *)a + i); +#else + *(uint64_t *)((char *)d + i) = -*(uint64_t *)((char *)a + i); +#endif + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs8)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)((char *)a + i); + *(int8_t *)((char *)d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs16)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)((char *)a + i); + *(int16_t *)((char *)d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs32)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)((char *)a + i); + *(int32_t *)((char *)d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_abs64)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)((char *)a + i); + *(int64_t *)((char *)d + i) = aa < 0 ? -aa : aa; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_mov)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + + memcpy(d, a, oprsz); + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup64)(void *d, uint32_t desc, uint64_t c) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + if (c == 0) { + oprsz = 0; + } else { + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = c; + } + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup32)(void *d, uint32_t desc, uint32_t c) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + if (c == 0) { + oprsz = 0; + } else { + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = c; + } + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_dup16)(void *d, uint32_t desc, uint32_t c) +{ + HELPER(gvec_dup32)(d, desc, 0x00010001 * (c & 0xffff)); +} + +void HELPER(gvec_dup8)(void *d, uint32_t desc, uint32_t c) +{ + HELPER(gvec_dup32)(d, desc, 0x01010101 * (c & 0xff)); +} + +void HELPER(gvec_not)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = ~*(uint64_t *)((char *)a + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_and)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) & *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_or)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) | *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_xor)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) ^ *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_andc)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) &~ *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_orc)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) |~ *(uint64_t *)((char *)b + i); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_nand)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = ~(*(uint64_t *)((char *)a + i) & *(uint64_t *)((char *)b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_nor)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = ~(*(uint64_t *)((char *)a + i) | *(uint64_t *)((char *)b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = ~(*(uint64_t *)((char *)a + i) ^ *(uint64_t *)((char *)b + i)); + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) & b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) ^ b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ors)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) | b; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) << shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar8i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(int8_t *)((char *)d + i) = *(int8_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar16i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(int16_t *)((char *)d + i) = *(int16_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar32i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(int32_t *)((char *)d + i) = *(int32_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + int shift = simd_data(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(int64_t *)((char *)d + i) = *(int64_t *)((char *)a + i) >> shift; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)((char *)b + i) & 7; + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)((char *)b + i) & 15; + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)((char *)b + i) & 31; + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)((char *)b + i) & 63; + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) << sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t sh = *(uint8_t *)((char *)b + i) & 7; + *(uint8_t *)((char *)d + i) = *(uint8_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint8_t sh = *(uint16_t *)((char *)b + i) & 15; + *(uint16_t *)((char *)d + i) = *(uint16_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint8_t sh = *(uint32_t *)((char *)b + i) & 31; + *(uint32_t *)((char *)d + i) = *(uint32_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint8_t sh = *(uint64_t *)((char *)b + i) & 63; + *(uint64_t *)((char *)d + i) = *(uint64_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + uint8_t sh = *(uint8_t *)((char *)b + i) & 7; + *(int8_t *)((char *)d + i) = *(int8_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + uint8_t sh = *(uint16_t *)((char *)b + i) & 15; + *(int16_t *)((char *)d + i) = *(int16_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + uint8_t sh = *(uint32_t *)((char *)b + i) & 31; + *(int32_t *)((char *)d + i) = *(int32_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + uint8_t sh = *(uint64_t *)((char *)b + i) & 63; + *(int64_t *)((char *)d + i) = *(int64_t *)((char *)a + i) >> sh; + } + clear_high(d, oprsz, desc); +} + +#define DO_CMP1(NAME, TYPE, OP) \ +void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \ +{ \ + intptr_t oprsz = simd_oprsz(desc); \ + intptr_t i; \ + for (i = 0; i < oprsz; i += sizeof(TYPE)) { \ + *(TYPE *)((char *)d + i) = -(*(TYPE *)((char *)a + i) OP *(TYPE *)((char *)b + i)); \ + } \ + clear_high(d, oprsz, desc); \ +} + +#define DO_CMP2(SZ) \ + DO_CMP1(gvec_eq##SZ, uint##SZ##_t, ==) \ + DO_CMP1(gvec_ne##SZ, uint##SZ##_t, !=) \ + DO_CMP1(gvec_lt##SZ, int##SZ##_t, <) \ + DO_CMP1(gvec_le##SZ, int##SZ##_t, <=) \ + DO_CMP1(gvec_ltu##SZ, uint##SZ##_t, <) \ + DO_CMP1(gvec_leu##SZ, uint##SZ##_t, <=) + +DO_CMP2(8) +DO_CMP2(16) +DO_CMP2(32) +DO_CMP2(64) + +#undef DO_CMP1 +#undef DO_CMP2 + +void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int r = *(int8_t *)((char *)a + i) + *(int8_t *)((char *)b + i); + if (r > INT8_MAX) { + r = INT8_MAX; + } else if (r < INT8_MIN) { + r = INT8_MIN; + } + *(int8_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int r = *(int16_t *)((char *)a + i) + *(int16_t *)((char *)b + i); + if (r > INT16_MAX) { + r = INT16_MAX; + } else if (r < INT16_MIN) { + r = INT16_MIN; + } + *(int16_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t ai = *(int32_t *)((char *)a + i); + int32_t bi = *(int32_t *)((char *)b + i); + int32_t di = ai + bi; + if (((di ^ ai) &~ (ai ^ bi)) < 0) { + /* Signed overflow. */ + di = (di < 0 ? INT32_MAX : INT32_MIN); + } + *(int32_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ssadd64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t ai = *(int64_t *)((char *)a + i); + int64_t bi = *(int64_t *)((char *)b + i); + int64_t di = ai + bi; + if (((di ^ ai) &~ (ai ^ bi)) < 0) { + /* Signed overflow. */ + di = (di < 0 ? INT64_MAX : INT64_MIN); + } + *(int64_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + int r = *(int8_t *)((char *)a + i) - *(int8_t *)((char *)b + i); + if (r > INT8_MAX) { + r = INT8_MAX; + } else if (r < INT8_MIN) { + r = INT8_MIN; + } + *(uint8_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int r = *(int16_t *)((char *)a + i) - *(int16_t *)((char *)b + i); + if (r > INT16_MAX) { + r = INT16_MAX; + } else if (r < INT16_MIN) { + r = INT16_MIN; + } + *(int16_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t ai = *(int32_t *)((char *)a + i); + int32_t bi = *(int32_t *)((char *)b + i); + int32_t di = ai - bi; + if (((di ^ ai) & (ai ^ bi)) < 0) { + /* Signed overflow. */ + di = (di < 0 ? INT32_MAX : INT32_MIN); + } + *(int32_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_sssub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t ai = *(int64_t *)((char *)a + i); + int64_t bi = *(int64_t *)((char *)b + i); + int64_t di = ai - bi; + if (((di ^ ai) & (ai ^ bi)) < 0) { + /* Signed overflow. */ + di = (di < 0 ? INT64_MAX : INT64_MIN); + } + *(int64_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + unsigned r = *(uint8_t *)((char *)a + i) + *(uint8_t *)((char *)b + i); + if (r > UINT8_MAX) { + r = UINT8_MAX; + } + *(uint8_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + unsigned r = *(uint16_t *)((char *)a + i) + *(uint16_t *)((char *)b + i); + if (r > UINT16_MAX) { + r = UINT16_MAX; + } + *(uint16_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t ai = *(uint32_t *)((char *)a + i); + uint32_t bi = *(uint32_t *)((char *)b + i); + uint32_t di = ai + bi; + if (di < ai) { + di = UINT32_MAX; + } + *(uint32_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_usadd64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t ai = *(uint64_t *)((char *)a + i); + uint64_t bi = *(uint64_t *)((char *)b + i); + uint64_t di = ai + bi; + if (di < ai) { + di = UINT64_MAX; + } + *(uint64_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + int r = *(uint8_t *)((char *)a + i) - *(uint8_t *)((char *)b + i); + if (r < 0) { + r = 0; + } + *(uint8_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + int r = *(uint16_t *)((char *)a + i) - *(uint16_t *)((char *)b + i); + if (r < 0) { + r = 0; + } + *(uint16_t *)((char *)d + i) = r; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t ai = *(uint32_t *)((char *)a + i); + uint32_t bi = *(uint32_t *)((char *)b + i); + uint32_t di = ai - bi; + if (ai < bi) { + di = 0; + } + *(uint32_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_ussub64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t ai = *(uint64_t *)((char *)a + i); + uint64_t bi = *(uint64_t *)((char *)b + i); + uint64_t di = ai - bi; + if (ai < bi) { + di = 0; + } + *(uint64_t *)((char *)d + i) = di; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)((char *)a + i); + int8_t bb = *(int8_t *)((char *)b + i); + int8_t dd = aa < bb ? aa : bb; + *(int8_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)((char *)a + i); + int16_t bb = *(int16_t *)((char *)b + i); + int16_t dd = aa < bb ? aa : bb; + *(int16_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)((char *)a + i); + int32_t bb = *(int32_t *)((char *)b + i); + int32_t dd = aa < bb ? aa : bb; + *(int32_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smin64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)((char *)a + i); + int64_t bb = *(int64_t *)((char *)b + i); + int64_t dd = aa < bb ? aa : bb; + *(int64_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int8_t aa = *(int8_t *)((char *)a + i); + int8_t bb = *(int8_t *)((char *)b + i); + int8_t dd = aa > bb ? aa : bb; + *(int8_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int16_t aa = *(int16_t *)((char *)a + i); + int16_t bb = *(int16_t *)((char *)b + i); + int16_t dd = aa > bb ? aa : bb; + *(int16_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int32_t aa = *(int32_t *)((char *)a + i); + int32_t bb = *(int32_t *)((char *)b + i); + int32_t dd = aa > bb ? aa : bb; + *(int32_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_smax64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t aa = *(int64_t *)((char *)a + i); + int64_t bb = *(int64_t *)((char *)b + i); + int64_t dd = aa > bb ? aa : bb; + *(int64_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t aa = *(uint8_t *)((char *)a + i); + uint8_t bb = *(uint8_t *)((char *)b + i); + uint8_t dd = aa < bb ? aa : bb; + *(uint8_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint16_t aa = *(uint16_t *)((char *)a + i); + uint16_t bb = *(uint16_t *)((char *)b + i); + uint16_t dd = aa < bb ? aa : bb; + *(uint16_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t aa = *(uint32_t *)((char *)a + i); + uint32_t bb = *(uint32_t *)((char *)b + i); + uint32_t dd = aa < bb ? aa : bb; + *(uint32_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umin64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)((char *)a + i); + uint64_t bb = *(uint64_t *)((char *)b + i); + uint64_t dd = aa < bb ? aa : bb; + *(uint64_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax8)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + uint8_t aa = *(uint8_t *)((char *)a + i); + uint8_t bb = *(uint8_t *)((char *)b + i); + uint8_t dd = aa > bb ? aa : bb; + *(uint8_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax16)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + uint16_t aa = *(uint16_t *)((char *)a + i); + uint16_t bb = *(uint16_t *)((char *)b + i); + uint16_t dd = aa > bb ? aa : bb; + *(uint16_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax32)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + uint32_t aa = *(uint32_t *)((char *)a + i); + uint32_t bb = *(uint32_t *)((char *)b + i); + uint32_t dd = aa > bb ? aa : bb; + *(uint32_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_umax64)(void *d, void *a, void *b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)((char *)a + i); + uint64_t bb = *(uint64_t *)((char *)b + i); + uint64_t dd = aa > bb ? aa : bb; + *(uint64_t *)((char *)d + i) = dd; + } + clear_high(d, oprsz, desc); +} + +void HELPER(gvec_bitsel)(void *d, void *a, void *b, void *c, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t aa = *(uint64_t *)((char *)a + i); + uint64_t bb = *(uint64_t *)((char *)b + i); + uint64_t cc = *(uint64_t *)((char *)c + i); + *(uint64_t *)((char *)d + i) = (bb & aa) | (cc & ~aa); + } + clear_high(d, oprsz, desc); +} diff --git a/qemu/tcg-runtime.c b/qemu/accel/tcg/tcg-runtime.c similarity index 63% rename from qemu/tcg-runtime.c rename to qemu/accel/tcg/tcg-runtime.c index 21b022a5..1e8283ea 100644 --- a/qemu/tcg-runtime.c +++ b/qemu/accel/tcg/tcg-runtime.c @@ -21,19 +21,16 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include "unicorn/platform.h" +#include "qemu/osdep.h" #include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" +#include "exec/tb-lookup.h" +#include "tcg/tcg.h" -/* This file is compiled once, and thus we can't include the standard - "exec/helper-proto.h", which has includes that are target specific. */ - -#include "exec/helper-head.h" - -#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ - dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)); - -#include "tcg-runtime.h" - +#include /* 32-bit helpers */ @@ -107,3 +104,63 @@ int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2) muls64(&l, &h, arg1, arg2); return h; } + +uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? clz32(arg) : zero_val; +} + +uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val) +{ + return arg ? ctz32(arg) : zero_val; +} + +uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? clz64(arg) : zero_val; +} + +uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val) +{ + return arg ? ctz64(arg) : zero_val; +} + +uint32_t HELPER(clrsb_i32)(uint32_t arg) +{ + return clrsb32(arg); +} + +uint64_t HELPER(clrsb_i64)(uint64_t arg) +{ + return clrsb64(arg); +} + +uint32_t HELPER(ctpop_i32)(uint32_t arg) +{ + return ctpop32(arg); +} + +uint64_t HELPER(ctpop_i64)(uint64_t arg) +{ + return ctpop64(arg); +} + +void *HELPER(lookup_tb_ptr)(CPUArchState *env) +{ + CPUState *cpu = env_cpu(env); + TranslationBlock *tb; + target_ulong cs_base, pc; + uint32_t flags; + struct uc_struct *uc = (struct uc_struct *)cpu->uc; + + tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags()); + if (tb == NULL) { + return uc->tcg_ctx->code_gen_epilogue; + } + return tb->tc.ptr; +} + +void HELPER(exit_atomic)(CPUArchState *env) +{ + cpu_loop_exit_atomic(env_cpu(env), GETPC()); +} diff --git a/qemu/accel/tcg/tcg-runtime.h b/qemu/accel/tcg/tcg-runtime.h new file mode 100644 index 00000000..ab7369e8 --- /dev/null +++ b/qemu/accel/tcg/tcg-runtime.h @@ -0,0 +1,261 @@ +DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) + +DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) + +DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(clz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ctz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(clz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(ctz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(clrsb_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64) + +DEF_HELPER_FLAGS_1(lookup_tb_ptr, TCG_CALL_NO_WG_SE, ptr, env) + +DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env) + +DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, + i32, env, tl, i32, i32, i32) +#ifdef CONFIG_ATOMIC64 +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, + i64, env, tl, i64, i64, i32) +#endif + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ + TCG_CALL_NO_WG, i64, env, tl, i64, i32) +#else +#define GEN_ATOMIC_HELPERS(NAME) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ + TCG_CALL_NO_WG, i32, env, tl, i32, i32) +#endif /* CONFIG_ATOMIC64 */ + +GEN_ATOMIC_HELPERS(fetch_add) +GEN_ATOMIC_HELPERS(fetch_and) +GEN_ATOMIC_HELPERS(fetch_or) +GEN_ATOMIC_HELPERS(fetch_xor) +GEN_ATOMIC_HELPERS(fetch_smin) +GEN_ATOMIC_HELPERS(fetch_umin) +GEN_ATOMIC_HELPERS(fetch_smax) +GEN_ATOMIC_HELPERS(fetch_umax) + +GEN_ATOMIC_HELPERS(add_fetch) +GEN_ATOMIC_HELPERS(and_fetch) +GEN_ATOMIC_HELPERS(or_fetch) +GEN_ATOMIC_HELPERS(xor_fetch) +GEN_ATOMIC_HELPERS(smin_fetch) +GEN_ATOMIC_HELPERS(umin_fetch) +GEN_ATOMIC_HELPERS(smax_fetch) +GEN_ATOMIC_HELPERS(umax_fetch) + +GEN_ATOMIC_HELPERS(xchg) + +#undef GEN_ATOMIC_HELPERS + +DEF_HELPER_FLAGS_3(gvec_mov, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_dup8, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup16, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup32, TCG_CALL_NO_RWG, void, ptr, i32, i32) +DEF_HELPER_FLAGS_3(gvec_dup64, TCG_CALL_NO_RWG, void, ptr, i32, i64) + +DEF_HELPER_FLAGS_4(gvec_add8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_add64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_adds8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_adds64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_sub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_subs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_subs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_mul8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_muls8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_muls64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(gvec_ssadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ssadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sssub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sssub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_usadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_usadd64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ussub8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ussub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umin8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umin64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umax8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_abs8, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs16, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs32, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_abs64, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_not, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_and, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_or, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_xor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_andc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_orc, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_nand, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_3(gvec_shl8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shl64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_shr8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_shr64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_sar8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_eq64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ne8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ne64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_lt8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_lt64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_le8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_le64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_ltu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ltu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) diff --git a/qemu/accel/tcg/translate-all.c b/qemu/accel/tcg/translate-all.c new file mode 100644 index 00000000..79434101 --- /dev/null +++ b/qemu/accel/tcg/translate-all.c @@ -0,0 +1,1960 @@ +/* + * Host code generation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu-common.h" + +#define NO_CPU_IO_DEFS +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "exec/ram_addr.h" + +#include "exec/cputlb.h" +#include "exec/tb-hash.h" +#include "translate-all.h" +#include "qemu/bitmap.h" +#include "qemu/timer.h" +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "uc_priv.h" + +static bool tb_exec_is_locked(TCGContext*); +static void tb_exec_change(TCGContext*, bool locked); + +/* #define DEBUG_TB_INVALIDATE */ +/* #define DEBUG_TB_FLUSH */ +/* make various TB consistency checks */ +/* #define DEBUG_TB_CHECK */ + +#ifdef DEBUG_TB_INVALIDATE +#define DEBUG_TB_INVALIDATE_GATE 1 +#else +#define DEBUG_TB_INVALIDATE_GATE 0 +#endif + +#ifdef DEBUG_TB_FLUSH +#define DEBUG_TB_FLUSH_GATE 1 +#else +#define DEBUG_TB_FLUSH_GATE 0 +#endif + +/* TB consistency checks only implemented for usermode emulation. */ +#undef DEBUG_TB_CHECK + +#ifdef DEBUG_TB_CHECK +#define DEBUG_TB_CHECK_GATE 1 +#else +#define DEBUG_TB_CHECK_GATE 0 +#endif + +/* Access to the various translations structures need to be serialised via locks + * for consistency. + * In user-mode emulation access to the memory related structures are protected + * with mmap_lock. + * In !user-mode we use per-page locks. + */ +#define assert_memory_lock() + +#define SMC_BITMAP_USE_THRESHOLD 10 + +typedef struct PageDesc { + /* list of TBs intersecting this ram page */ + uintptr_t first_tb; + /* in order to optimize self modifying code, we count the number + of lookups we do to a given page to use a bitmap */ + unsigned long *code_bitmap; + unsigned int code_write_count; +} PageDesc; + +/** + * struct page_entry - page descriptor entry + * @pd: pointer to the &struct PageDesc of the page this entry represents + * @index: page index of the page + * @locked: whether the page is locked + * + * This struct helps us keep track of the locked state of a page, without + * bloating &struct PageDesc. + * + * A page lock protects accesses to all fields of &struct PageDesc. + * + * See also: &struct page_collection. + */ +struct page_entry { + PageDesc *pd; + tb_page_addr_t index; + bool locked; +}; + +/** + * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) + * @tree: Binary search tree (BST) of the pages, with key == page index + * @max: Pointer to the page in @tree with the highest page index + * + * To avoid deadlock we lock pages in ascending order of page index. + * When operating on a set of pages, we need to keep track of them so that + * we can lock them in order and also unlock them later. For this we collect + * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the + * @tree implementation we use does not provide an O(1) operation to obtain the + * highest-ranked element, we use @max to keep track of the inserted page + * with the highest index. This is valuable because if a page is not in + * the tree and its index is higher than @max's, then we can lock it + * without breaking the locking order rule. + * + * Note on naming: 'struct page_set' would be shorter, but we already have a few + * page_set_*() helpers, so page_collection is used instead to avoid confusion. + * + * See also: page_collection_lock(). + */ +struct page_collection { + GTree *tree; + struct page_entry *max; +}; + +/* list iterators for lists of tagged pointers in TranslationBlock */ +#define TB_FOR_EACH_TAGGED(head, tb, n, field) \ + for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ + tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ + tb = (TranslationBlock *)((uintptr_t)tb & ~1)) + +#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ + TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) + +#define TB_FOR_EACH_JMP(head_tb, tb, n) \ + TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) + +/* In system mode we want L1_MAP to be based on ram offsets, + while in user mode we want it to be based on virtual addresses. */ +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS +# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS +#else +# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS +#endif + +/* Size of the L2 (and L3, etc) page tables. */ +#define V_L2_BITS 10 +#define V_L2_SIZE (1 << V_L2_BITS) + +/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ +QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > + sizeof_field(TranslationBlock, trace_vcpu_dstate) + * BITS_PER_BYTE); + +/* The bottom level has pointers to PageDesc, and is indexed by + * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. + */ +#define V_L1_MIN_BITS 4 +#define V_L1_MAX_BITS (V_L2_BITS + 3) +#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) + +static void page_table_config_init(struct uc_struct *uc) +{ + uint32_t v_l1_bits; + + assert(TARGET_PAGE_BITS); + /* The bits remaining after N lower levels of page tables. */ + v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; + if (v_l1_bits < V_L1_MIN_BITS) { + v_l1_bits += V_L2_BITS; + } + + uc->v_l1_size = 1 << v_l1_bits; + uc->v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; + uc->v_l2_levels = uc->v_l1_shift / V_L2_BITS - 1; + + assert(v_l1_bits <= V_L1_MAX_BITS); + assert(uc->v_l1_shift % V_L2_BITS == 0); + assert(uc->v_l2_levels >= 0); +} + +/* Encode VAL as a signed leb128 sequence at P. + Return P incremented past the encoded value. */ +static uint8_t *encode_sleb128(uint8_t *p, target_long val) +{ + int more, byte; + + do { + byte = val & 0x7f; + val >>= 7; + more = !((val == 0 && (byte & 0x40) == 0) + || (val == -1 && (byte & 0x40) != 0)); + if (more) { + byte |= 0x80; + } + *p++ = byte; + } while (more); + + return p; +} + +/* Decode a signed leb128 sequence at *PP; increment *PP past the + decoded value. Return the decoded value. */ +static target_long decode_sleb128(uint8_t **pp) +{ + uint8_t *p = *pp; + target_long val = 0; + int byte, shift = 0; + + do { + byte = *p++; + val |= (target_ulong)(byte & 0x7f) << shift; + shift += 7; + } while (byte & 0x80); + if (shift < TARGET_LONG_BITS && (byte & 0x40)) { +#ifdef _MSC_VER + val |= ((target_ulong)0 - 1) << shift; +#else + val |= -(target_ulong)1 << shift; +#endif + } + + *pp = p; + return val; +} + +/* Encode the data collected about the instructions while compiling TB. + Place the data at BLOCK, and return the number of bytes consumed. + + The logical table consists of TARGET_INSN_START_WORDS target_ulong's, + which come from the target's insn_start data, followed by a uintptr_t + which comes from the host pc of the end of the code implementing the insn. + + Each line of the table is encoded as sleb128 deltas from the previous + line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. + That is, the first column is seeded with the guest pc, the last column + with the host pc, and the middle columns with zeros. */ + +static int encode_search(struct uc_struct *uc, TranslationBlock *tb, uint8_t *block) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + uint8_t *highwater = tcg_ctx->code_gen_highwater; + uint8_t *p = block; + int i, j, n; + + for (i = 0, n = tb->icount; i < n; ++i) { + target_ulong prev; + + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + if (i == 0) { + prev = (j == 0 ? tb->pc : 0); + } else { + prev = tcg_ctx->gen_insn_data[i - 1][j]; + } + p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); + } + prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); + p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); + + /* Test for (pending) buffer overflow. The assumption is that any + one row beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + encoding a row without having to check during encoding. */ + if (unlikely(p > highwater)) { + return -1; + } + } + + return p - block; +} + +/* The cpu state corresponding to 'searched_pc' is restored. + * When reset_icount is true, current TB will be interrupted and + * icount should be recalculated. + */ +static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t searched_pc, bool reset_icount) +{ + target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; + uintptr_t host_pc = (uintptr_t)tb->tc.ptr; + CPUArchState *env = cpu->env_ptr; + uint8_t *p = (uint8_t *)tb->tc.ptr + tb->tc.size; + int i, j, num_insns = tb->icount; + + searched_pc -= GETPC_ADJ; + + if (searched_pc < host_pc) { + return -1; + } + + /* Reconstruct the stored insn data while looking for the point at + which the end of the insn exceeds the searched_pc. */ + for (i = 0; i < num_insns; ++i) { + for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + data[j] += decode_sleb128(&p); + } + host_pc += decode_sleb128(&p); + if (host_pc > searched_pc) { + goto found; + } + } + return -1; + + found: + if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { + /* Reset the cycle counter to the start of the block + and shift if to the number of actually executed instructions */ + cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; + } + restore_state_to_opc(env, tb, data); + + return 0; +} + +bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) +{ + TCGContext *tcg_ctx = cpu->uc->tcg_ctx; + TranslationBlock *tb; + bool r = false; + uintptr_t check_offset; + struct uc_struct *uc = cpu->uc; + + /* The host_pc has to be in the region of current code buffer. If + * it is not we will not be able to resolve it here. The two cases + * where host_pc will not be correct are: + * + * - fault during translation (instruction fetch) + * - fault from helper (not using GETPC() macro) + * + * Either way we need return early as we can't resolve it here. + * + * We are using unsigned arithmetic so if host_pc < + * tcg_init_ctx.code_gen_buffer check_offset will wrap to way + * above the code_gen_buffer_size + */ + check_offset = host_pc - (uintptr_t) uc->tcg_ctx->code_gen_buffer; + + if (check_offset < uc->tcg_ctx->code_gen_buffer_size) { + tb = tcg_tb_lookup(tcg_ctx, host_pc); + if (tb) { + cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); + if (tb_cflags(tb) & CF_NOCACHE) { + /* one-shot translation, invalidate it immediately */ + tb_phys_invalidate(tcg_ctx, tb, -1); + tcg_tb_remove(tcg_ctx, tb); + } + r = true; + } + } + + return r; +} + +static void page_init(struct uc_struct *uc) +{ + page_size_init(uc); + page_table_config_init(uc); +} + +static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int alloc) +{ + PageDesc *pd; + void **lp; + int i; + + /* Level 1. Always allocated. */ + lp = uc->l1_map + ((index >> uc->v_l1_shift) & (uc->v_l1_size - 1)); + + /* Level 2..N-1. */ + for (i = uc->v_l2_levels; i > 0; i--) { + void **p = *lp; + + if (p == NULL) { + void *existing; + + if (!alloc) { + return NULL; + } + p = g_new0(void *, V_L2_SIZE); + existing = *lp; + if (*lp == NULL) { + *lp = p; + } + if (unlikely(existing)) { + g_free(p); + p = existing; + } + } + + lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); + } + + pd = *lp; + if (pd == NULL) { + void *existing; + + if (!alloc) { + return NULL; + } + pd = g_new0(PageDesc, V_L2_SIZE); + existing = *lp; + if (*lp == NULL) { + *lp = pd; + } + if (unlikely(existing)) { + g_free(pd); + pd = existing; + } + } + + return pd + (index & (V_L2_SIZE - 1)); +} + +static inline PageDesc *page_find(struct uc_struct *uc, tb_page_addr_t index) +{ + return page_find_alloc(uc, index, 0); +} + +static void page_lock_pair(struct uc_struct *uc, PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); + +#ifdef CONFIG_DEBUG_TCG + +static void ht_pages_locked_debug_init(void) +{ + if (ht_pages_locked_debug) { + return; + } + ht_pages_locked_debug = g_hash_table_new(NULL, NULL); +} + +static bool page_is_locked(const PageDesc *pd) +{ + PageDesc *found; + + ht_pages_locked_debug_init(); + found = g_hash_table_lookup(ht_pages_locked_debug, pd); + return !!found; +} + +static void page_lock__debug(PageDesc *pd) +{ + ht_pages_locked_debug_init(); + g_assert(!page_is_locked(pd)); + g_hash_table_insert(ht_pages_locked_debug, pd, pd); +} + +static void page_unlock__debug(const PageDesc *pd) +{ + bool removed; + + ht_pages_locked_debug_init(); + g_assert(page_is_locked(pd)); + removed = g_hash_table_remove(ht_pages_locked_debug, pd); + g_assert(removed); +} + +static void +do_assert_page_locked(const PageDesc *pd, const char *file, int line) +{ + if (unlikely(!page_is_locked(pd))) { + // error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", + // pd, file, line); + abort(); // unreachable in unicorn. + } +} + +#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) + +void assert_no_pages_locked(void) +{ + ht_pages_locked_debug_init(); + g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); +} + +#else /* !CONFIG_DEBUG_TCG */ + +#define assert_page_locked(pd) + +static inline void page_lock__debug(const PageDesc *pd) +{ +} + +static inline void page_unlock__debug(const PageDesc *pd) +{ +} + +#endif /* CONFIG_DEBUG_TCG */ + +static inline void page_lock(PageDesc *pd) +{ + page_lock__debug(pd); +} + +static inline void page_unlock(PageDesc *pd) +{ + page_unlock__debug(pd); +} + +/* lock the page(s) of a TB in the correct acquisition order */ +static inline void page_lock_tb(struct uc_struct *uc, const TranslationBlock *tb) +{ + page_lock_pair(uc, NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); +} + +static inline void page_unlock_tb(struct uc_struct *uc, const TranslationBlock *tb) +{ + PageDesc *p1 = page_find(uc, tb->page_addr[0] >> TARGET_PAGE_BITS); + + page_unlock(p1); + if (unlikely(tb->page_addr[1] != -1)) { + PageDesc *p2 = page_find(uc, tb->page_addr[1] >> TARGET_PAGE_BITS); + + if (p2 != p1) { + page_unlock(p2); + } + } +} + +static inline struct page_entry * +page_entry_new(PageDesc *pd, tb_page_addr_t index) +{ + struct page_entry *pe = g_malloc(sizeof(*pe)); + + pe->index = index; + pe->pd = pd; + // pe->locked = false; + return pe; +} + +static void page_entry_destroy(gpointer p) +{ + struct page_entry *pe = p; + + // g_assert(pe->locked); + page_unlock(pe->pd); + g_free(pe); +} + +#if 0 +/* returns false on success */ +static bool page_entry_trylock(struct page_entry *pe) +{ + bool busy; + + busy = qemu_spin_trylock(&pe->pd->lock); + if (!busy) { + g_assert(!pe->locked); + pe->locked = true; + page_lock__debug(pe->pd); + } + return busy; +} + +static void do_page_entry_lock(struct page_entry *pe) +{ + page_lock(pe->pd); + g_assert(!pe->locked); + pe->locked = true; +} + +static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) +{ + struct page_entry *pe = value; + + do_page_entry_lock(pe); + return FALSE; +} + +static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) +{ + struct page_entry *pe = value; + + if (pe->locked) { + pe->locked = false; + page_unlock(pe->pd); + } + return FALSE; +} +#endif + +/* + * Trylock a page, and if successful, add the page to a collection. + * Returns true ("busy") if the page could not be locked; false otherwise. + */ +static bool page_trylock_add(struct uc_struct *uc, struct page_collection *set, tb_page_addr_t addr) +{ + tb_page_addr_t index = addr >> TARGET_PAGE_BITS; + struct page_entry *pe; + PageDesc *pd; + + pe = g_tree_lookup(set->tree, &index); + if (pe) { + return false; + } + + pd = page_find(uc, index); + if (pd == NULL) { + return false; + } + + pe = page_entry_new(pd, index); + g_tree_insert(set->tree, &pe->index, pe); + + /* + * If this is either (1) the first insertion or (2) a page whose index + * is higher than any other so far, just lock the page and move on. + */ + if (set->max == NULL || pe->index > set->max->index) { + set->max = pe; +#if 0 + do_page_entry_lock(pe); +#endif + return false; + } + /* + * Try to acquire out-of-order lock; if busy, return busy so that we acquire + * locks in order. + */ +#if 0 + return page_entry_trylock(pe); +#else + return 0; +#endif +} + +static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) +{ + tb_page_addr_t a = *(const tb_page_addr_t *)ap; + tb_page_addr_t b = *(const tb_page_addr_t *)bp; + + if (a == b) { + return 0; + } else if (a < b) { + return -1; + } + return 1; +} + +/* + * Lock a range of pages ([@start,@end[) as well as the pages of all + * intersecting TBs. + * Locking order: acquire locks in ascending order of page index. + */ +struct page_collection * +page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *set = g_malloc(sizeof(*set)); + tb_page_addr_t index; + PageDesc *pd; + + start >>= TARGET_PAGE_BITS; + end >>= TARGET_PAGE_BITS; + g_assert(start <= end); + + set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, + page_entry_destroy); + set->max = NULL; + assert_no_pages_locked(); + + retry: +#if 0 + g_tree_foreach(set->tree, page_entry_lock, NULL); +#endif + + for (index = start; index <= end; index++) { + TranslationBlock *tb; + int n; + + pd = page_find(uc, index); + if (pd == NULL) { + continue; + } + if (page_trylock_add(uc, set, index << TARGET_PAGE_BITS)) { +#if 0 + g_tree_foreach(set->tree, page_entry_unlock, NULL); +#endif + goto retry; + } + assert_page_locked(pd); + PAGE_FOR_EACH_TB(pd, tb, n) { + if (page_trylock_add(uc, set, tb->page_addr[0]) || + (tb->page_addr[1] != -1 && + page_trylock_add(uc, set, tb->page_addr[1]))) { + /* drop all locks, and reacquire in order */ +#if 0 + g_tree_foreach(set->tree, page_entry_unlock, NULL); +#endif + goto retry; + } + } + } + return set; +} + +void page_collection_unlock(struct page_collection *set) +{ + /* entries are unlocked and freed via page_entry_destroy */ + g_tree_destroy(set->tree); + g_free(set); +} + +static void page_lock_pair(struct uc_struct *uc, PageDesc **ret_p1, tb_page_addr_t phys1, + PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) +{ + PageDesc *p1, *p2; + tb_page_addr_t page1; + tb_page_addr_t page2; + + assert_memory_lock(); + g_assert(phys1 != -1); + + page1 = phys1 >> TARGET_PAGE_BITS; + page2 = phys2 >> TARGET_PAGE_BITS; + + p1 = page_find_alloc(uc, page1, alloc); + if (ret_p1) { + *ret_p1 = p1; + } + if (likely(phys2 == -1)) { + page_lock(p1); + return; + } else if (page1 == page2) { + page_lock(p1); + if (ret_p2) { + *ret_p2 = p1; + } + return; + } + p2 = page_find_alloc(uc, page2, alloc); + if (ret_p2) { + *ret_p2 = p2; + } + if (page1 < page2) { + page_lock(p1); + page_lock(p2); + } else { + page_lock(p2); + page_lock(p1); + } +} + +/* Minimum size of the code gen buffer. This number is randomly chosen, + but not so small that we can't have a fair number of TB's live. */ +#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) + +/* Maximum size of the code gen buffer we'd like to use. Unless otherwise + indicated, this is constrained by the range of direct branches on the + host cpu, as used by the TCG implementation of goto_tb. */ +#if defined(__x86_64__) +# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) +#elif defined(__sparc__) +# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) +#elif defined(__powerpc64__) +# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) +#elif defined(__powerpc__) +# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB) +#elif defined(__aarch64__) +# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) +#elif defined(__s390x__) + /* We have a +- 4GB range on the branches; leave some slop. */ +# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) +#elif defined(__mips__) + /* We have a 256MB branch region, but leave room to make sure the + main executable is also within that region. */ +# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB) +#else +# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) +#endif + +#if TCG_TARGET_REG_BITS == 32 +#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) +#else /* TCG_TARGET_REG_BITS == 64 */ +/* + * We expect most system emulation to run one or two guests per host. + * Users running large scale system emulation may want to tweak their + * runtime setup via the tb-size control on the command line. + */ +#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) +#endif + +#define DEFAULT_CODE_GEN_BUFFER_SIZE \ + (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ + ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) + +static inline size_t size_code_gen_buffer(size_t tb_size) +{ + /* Size the buffer. */ + if (tb_size == 0) { + tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; + } + if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { + tb_size = MIN_CODE_GEN_BUFFER_SIZE; + } + if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { + tb_size = MAX_CODE_GEN_BUFFER_SIZE; + } + return tb_size; +} + +#ifdef __mips__ +/* In order to use J and JAL within the code_gen_buffer, we require + that the buffer not cross a 256MB boundary. */ +static inline bool cross_256mb(void *addr, size_t size) +{ + return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; +} + +/* We weren't able to allocate a buffer without crossing that boundary, + so make do with the larger portion of the buffer that doesn't cross. + Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ +static inline void *split_cross_256mb(TCGContext *tcg_ctx, void *buf1, size_t size1) +{ + void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); + size_t size2 = buf1 + size1 - buf2; + + size1 = buf2 - buf1; + if (size1 < size2) { + size1 = size2; + buf1 = buf2; + } + + tcg_ctx->code_gen_buffer_size = size1; + return buf1; +} +#endif + +#ifdef USE_STATIC_CODE_GEN_BUFFER +static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] + __attribute__((aligned(CODE_GEN_ALIGN))); + +static inline void *alloc_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + void *buf = static_code_gen_buffer; + void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer); + size_t size; + + /* page-align the beginning and end of the buffer */ + buf = QEMU_ALIGN_PTR_UP(buf, uc->qemu_real_host_page_size); + end = QEMU_ALIGN_PTR_DOWN(end, uc->qemu_real_host_page_size); + + size = end - buf; + + /* Honor a command-line option limiting the size of the buffer. */ + if (size > tcg_ctx->code_gen_buffer_size) { + size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size, + uc->qemu_real_host_page_size); + } + tcg_ctx->code_gen_buffer_size = size; + +#ifdef __mips__ + if (cross_256mb(buf, size)) { + buf = split_cross_256mb(tcg_ctx, buf, size); + size = tcg_ctx->code_gen_buffer_size; + } +#endif + + if (qemu_mprotect_rwx(buf, size)) { + abort(); + } + qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); + + return buf; +} +#elif defined(_WIN32) +static inline void *alloc_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + size_t size = tcg_ctx->code_gen_buffer_size; + return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, + PAGE_EXECUTE_READWRITE); +} +void free_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + if (tcg_ctx->code_gen_buffer) { + VirtualFree(tcg_ctx->code_gen_buffer, 0, MEM_RELEASE); + } +} +#else +void free_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + if (tcg_ctx->code_gen_buffer) { + munmap(tcg_ctx->code_gen_buffer, tcg_ctx->code_gen_buffer_size); + } +} + +static inline void *alloc_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + int prot = PROT_WRITE | PROT_READ | PROT_EXEC; + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + size_t size = tcg_ctx->code_gen_buffer_size; + void *buf; +#ifdef USE_MAP_JIT + flags |= MAP_JIT; +#endif + buf = mmap(NULL, size, prot, flags, -1, 0); + if (buf == MAP_FAILED) { + return NULL; + } + +#ifdef __mips__ + if (cross_256mb(buf, size)) { + /* + * Try again, with the original still mapped, to avoid re-acquiring + * the same 256mb crossing. + */ + size_t size2; + void *buf2 = mmap(NULL, size, prot, flags, -1, 0); + switch ((int)(buf2 != MAP_FAILED)) { + case 1: + if (!cross_256mb(buf2, size)) { + /* Success! Use the new buffer. */ + munmap(buf, size); + break; + } + /* Failure. Work with what we had. */ + munmap(buf2, size); + /* fallthru */ + default: + /* Split the original buffer. Free the smaller half. */ + buf2 = split_cross_256mb(tcg_ctx, buf, size); + size2 = tcg_ctx->code_gen_buffer_size; + if (buf == buf2) { + munmap(buf + size2, size - size2); + } else { + munmap(buf, size - size2); + } + size = size2; + break; + } + buf = buf2; + } +#endif + + /* Request large pages for the buffer. */ + qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); + + return buf; +} +#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ + +static inline void code_gen_alloc(struct uc_struct *uc, size_t tb_size) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size); + tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(uc); + if (tcg_ctx->code_gen_buffer == NULL) { + fprintf(stderr, "Could not allocate dynamic translator buffer\n"); + exit(1); + } +} + +static bool tb_cmp(struct uc_struct *uc, const void *ap, const void *bp) +{ + const TranslationBlock *a = ap; + const TranslationBlock *b = bp; + + return a->pc == b->pc && + a->cs_base == b->cs_base && + a->flags == b->flags && + (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && + a->trace_vcpu_dstate == b->trace_vcpu_dstate && + a->page_addr[0] == b->page_addr[0] && + a->page_addr[1] == b->page_addr[1]; +} + +static void tb_htable_init(struct uc_struct *uc) +{ + unsigned int mode = QHT_MODE_AUTO_RESIZE; + + qht_init(&uc->tcg_ctx->tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); +} + +/* Must be called before using the QEMU cpus. 'tb_size' is the size + (in bytes) allocated to the translation buffer. Zero means default + size. */ +void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size) +{ + /* remove tcg object. init here. */ + /* tcg class init: tcg-all.c:tcg_accel_class_init(), skip all. */ + /* tcg object init: tcg-all.c:tcg_accel_instance_init(), skip all. */ + /* tcg init: tcg-all.c: tcg_init(), skip all. */ + /* run tcg_exec_init() here. */ + uc->tcg_ctx = g_malloc(sizeof(TCGContext)); + tcg_context_init(uc->tcg_ctx); + uc->tcg_ctx->uc = uc; + page_init(uc); + tb_htable_init(uc); + code_gen_alloc(uc, tb_size); + tb_exec_unlock(uc->tcg_ctx); + tcg_prologue_init(uc->tcg_ctx); + /* cpu_interrupt_handler is not used in uc1 */ + uc->l1_map = g_malloc0(sizeof(void *) * V_L1_MAX_SIZE); +} + +/* call with @p->lock held */ +static inline void invalidate_page_bitmap(PageDesc *p) +{ + assert_page_locked(p); + + g_free(p->code_bitmap); + p->code_bitmap = NULL; + p->code_write_count = 0; +} + +static void tb_clean_internal(void **p, int x) +{ + int i; + void **q; + + if (x <= 1) { + for (i = 0; i < V_L2_SIZE; i++) { + q = p[i]; + if (q) { + g_free(q); + } + } + g_free(p); + } else { + for (i = 0; i < V_L2_SIZE; i++) { + q = p[i]; + if (q) { + tb_clean_internal(q, x - 1); + } + } + g_free(p); + } +} + +void tb_cleanup(struct uc_struct *uc) +{ + int i, x; + void **p; + + if (uc) { + if (uc->l1_map) { + x = uc->v_l2_levels; + if (x <= 0) { + for (i = 0; i < uc->v_l1_size; i++) { + p = uc->l1_map[i]; + if (p) { + g_free(p); + uc->l1_map[i] = NULL; + } + } + } else { + for (i = 0; i < uc->v_l1_size; i++) { + p = uc->l1_map[i]; + if (p) { + tb_clean_internal(p, x); + uc->l1_map[i] = NULL; + } + } + } + } + } +} + +/* Set to NULL all the 'first_tb' fields in all PageDescs. */ +static void page_flush_tb_1(struct uc_struct *uc, int level, void **lp) +{ + int i; + + if (*lp == NULL) { + return; + } + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_lock(&pd[i]); + pd[i].first_tb = (uintptr_t)NULL; + invalidate_page_bitmap(pd + i); + page_unlock(&pd[i]); + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_flush_tb_1(uc, level - 1, pp + i); + } + } +} + +static void page_flush_tb(struct uc_struct *uc) +{ + int i, l1_sz = uc->v_l1_size; + + for (i = 0; i < l1_sz; i++) { + page_flush_tb_1(uc, uc->v_l2_levels, uc->l1_map + i); + } +} + +#if 0 +static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) +{ + const TranslationBlock *tb = value; + size_t *size = data; + + *size += tb->tc.size; + return false; +} +#endif + +/* flush all the translation blocks */ +static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +{ + mmap_lock(); + /* If it is already been done on request of another CPU, + * just retry. + */ + if (cpu->uc->tcg_ctx->tb_ctx.tb_flush_count != tb_flush_count.host_int) { + goto done; + } + +#if 0 + if (DEBUG_TB_FLUSH_GATE) { + size_t nb_tbs = tcg_nb_tbs(cpu->uc->tcg_ctx); + size_t host_size = 0; + + tcg_tb_foreach(cpu->uc->tcg_ctx, tb_host_size_iter, &host_size); + //printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", + // tcg_code_size(cpu->uc->tcg_ctx), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); + } + + CPU_FOREACH(cpu) { + cpu_tb_jmp_cache_clear(cpu); + } +#else + cpu_tb_jmp_cache_clear(cpu); +#endif + + qht_reset_size(cpu->uc, &cpu->uc->tcg_ctx->tb_ctx.htable, CODE_GEN_HTABLE_SIZE); + page_flush_tb(cpu->uc); + + tcg_region_reset_all(cpu->uc->tcg_ctx); + /* XXX: flush processor icache at this point if cache flush is + expensive */ + cpu->uc->tcg_ctx->tb_ctx.tb_flush_count = cpu->uc->tcg_ctx->tb_ctx.tb_flush_count + 1; + +done: + mmap_unlock(); +} + +void tb_flush(CPUState *cpu) +{ + unsigned tb_flush_count = cpu->uc->tcg_ctx->tb_ctx.tb_flush_count; + do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); +} + +/* + * user-mode: call with mmap_lock held + * !user-mode: call with @pd->lock held + */ +static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) +{ + TranslationBlock *tb1; + uintptr_t *pprev; + unsigned int n1; + + assert_page_locked(pd); + pprev = &pd->first_tb; + PAGE_FOR_EACH_TB(pd, tb1, n1) { + if (tb1 == tb) { + *pprev = tb1->page_next[n1]; + return; + } + pprev = &tb1->page_next[n1]; + } + g_assert_not_reached(); +} + +/* remove @orig from its @n_orig-th jump list */ +static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) +{ + uintptr_t ptr, ptr_locked; + TranslationBlock *dest; + TranslationBlock *tb; + uintptr_t *pprev; + int n; + + /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ + ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1); + dest = (TranslationBlock *)(ptr & ~1); + if (dest == NULL) { + return; + } + + ptr_locked = orig->jmp_dest[n_orig]; + if (ptr_locked != ptr) { + /* + * The only possibility is that the jump was unlinked via + * tb_jump_unlink(dest). Seeing here another destination would be a bug, + * because we set the LSB above. + */ + g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); + return; + } + /* + * We first acquired the lock, and since the destination pointer matches, + * we know for sure that @orig is in the jmp list. + */ + pprev = &dest->jmp_list_head; + TB_FOR_EACH_JMP(dest, tb, n) { + if (tb == orig && n == n_orig) { + *pprev = tb->jmp_list_next[n]; + /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ + return; + } + pprev = &tb->jmp_list_next[n]; + } + g_assert_not_reached(); +} + +/* reset the jump entry 'n' of a TB so that it is not chained to + another TB */ +static inline void tb_reset_jump(TranslationBlock *tb, int n) +{ + uintptr_t addr = (uintptr_t)((char *)tb->tc.ptr + tb->jmp_reset_offset[n]); + tb_set_jmp_target(tb, n, addr); +} + +/* remove any jumps to the TB */ +static inline void tb_jmp_unlink(TranslationBlock *dest) +{ + TranslationBlock *tb; + int n; + + TB_FOR_EACH_JMP(dest, tb, n) { + tb_reset_jump(tb, n); +#ifdef _MSC_VER + atomic_and((long *)&tb->jmp_dest[n], (uintptr_t)NULL | 1); +#else + atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); +#endif + /* No need to clear the list entry; setting the dest ptr is enough */ + } + dest->jmp_list_head = (uintptr_t)NULL; +} + +/* + * In user-mode, call with mmap_lock held. + * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' + * locks held. + */ +static void do_tb_phys_invalidate(TCGContext *tcg_ctx, TranslationBlock *tb, bool rm_from_page_list) +{ + CPUState *cpu = tcg_ctx->uc->cpu; + struct uc_struct *uc = tcg_ctx->uc; + PageDesc *p; + uint32_t h; + tb_page_addr_t phys_pc; + bool code_gen_locked; + + assert_memory_lock(); + code_gen_locked = tb_exec_is_locked(tcg_ctx); + tb_exec_unlock(tcg_ctx); + + /* make sure no further incoming jumps will be chained to this TB */ + tb->cflags = tb->cflags | CF_INVALID; + + /* remove the TB from the hash list */ + phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, + tb->trace_vcpu_dstate); + if (!(tb->cflags & CF_NOCACHE) && + !qht_remove(&tcg_ctx->tb_ctx.htable, tb, h)) { + tb_exec_change(tcg_ctx, code_gen_locked); + return; + } + + /* remove the TB from the page list */ + if (rm_from_page_list) { + p = page_find(tcg_ctx->uc, tb->page_addr[0] >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + if (tb->page_addr[1] != -1) { + p = page_find(tcg_ctx->uc, tb->page_addr[1] >> TARGET_PAGE_BITS); + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + } + } + + /* remove the TB from the hash list */ + h = tb_jmp_cache_hash_func(uc, tb->pc); + if (cpu->tb_jmp_cache[h] == tb) { + cpu->tb_jmp_cache[h] = NULL; + } + + /* suppress this TB from the two jump lists */ + tb_remove_from_jmp_list(tb, 0); + tb_remove_from_jmp_list(tb, 1); + + /* suppress any remaining jumps to this TB */ + tb_jmp_unlink(tb); + + tcg_ctx->tb_phys_invalidate_count = tcg_ctx->tb_phys_invalidate_count + 1; + + tb_exec_change(tcg_ctx, code_gen_locked); +} + +static void tb_phys_invalidate__locked(TCGContext *tcg_ctx, TranslationBlock *tb) +{ + do_tb_phys_invalidate(tcg_ctx, tb, true); +} + +/* invalidate one TB + * + * Called with mmap_lock held in user-mode. + */ +void tb_phys_invalidate(TCGContext *tcg_ctx, TranslationBlock *tb, tb_page_addr_t page_addr) +{ + if (page_addr == -1 && tb->page_addr[0] != -1) { + page_lock_tb(tcg_ctx->uc, tb); + do_tb_phys_invalidate(tcg_ctx, tb, true); + page_unlock_tb(tcg_ctx->uc, tb); + } else { + do_tb_phys_invalidate(tcg_ctx, tb, false); + } +} + +/* call with @p->lock held */ +static void build_page_bitmap(struct uc_struct *uc, PageDesc *p) +{ + int n, tb_start, tb_end; + TranslationBlock *tb; + + assert_page_locked(p); + p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); + + PAGE_FOR_EACH_TB(p, tb, n) { + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->pc & ~TARGET_PAGE_MASK; + tb_end = tb_start + tb->size; + if (tb_end > TARGET_PAGE_SIZE) { + tb_end = TARGET_PAGE_SIZE; + } + } else { + tb_start = 0; + tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + qemu_bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); + } +} + +/* add the tb in the target page and protect it if necessary + * + * Called with mmap_lock held for user-mode emulation. + * Called with @p->lock held in !user-mode. + */ +static inline void tb_page_add(struct uc_struct *uc, PageDesc *p, TranslationBlock *tb, + unsigned int n, tb_page_addr_t page_addr) +{ + bool page_already_protected; + + assert_page_locked(p); + + tb->page_addr[n] = page_addr; + tb->page_next[n] = p->first_tb; + page_already_protected = p->first_tb != (uintptr_t)NULL; + p->first_tb = (uintptr_t)tb | n; + invalidate_page_bitmap(p); + + /* if some code is already present, then the pages are already + protected. So we handle the case where only the first TB is + allocated in a physical page */ + if (!page_already_protected) { + tlb_protect_code(uc, page_addr); + } +} + +/* add a new TB and link it to the physical page tables. phys_page2 is + * (-1) to indicate that only one page contains the TB. + * + * Called with mmap_lock held for user-mode emulation. + * + * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. + * Note that in !user-mode, another thread might have already added a TB + * for the same block of guest code that @tb corresponds to. In that case, + * the caller should discard the original @tb, and use instead the returned TB. + */ +static TranslationBlock * +tb_link_page(struct uc_struct *uc, TranslationBlock *tb, tb_page_addr_t phys_pc, + tb_page_addr_t phys_page2) +{ + PageDesc *p; + PageDesc *p2 = NULL; + + assert_memory_lock(); + + if (phys_pc == -1) { + /* + * If the TB is not associated with a physical RAM page then + * it must be a temporary one-insn TB, and we have nothing to do + * except fill in the page_addr[] fields. + */ + assert(tb->cflags & CF_NOCACHE); + tb->page_addr[0] = tb->page_addr[1] = -1; + return tb; + } + + /* + * Add the TB to the page list, acquiring first the pages's locks. + * We keep the locks held until after inserting the TB in the hash table, + * so that if the insertion fails we know for sure that the TBs are still + * in the page descriptors. + * Note that inserting into the hash table first isn't an option, since + * we can only insert TBs that are fully initialized. + */ + page_lock_pair(uc, &p, phys_pc, &p2, phys_page2, 1); + tb_page_add(uc, p, tb, 0, phys_pc & TARGET_PAGE_MASK); + if (p2) { + tb_page_add(uc, p2, tb, 1, phys_page2); + } else { + tb->page_addr[1] = -1; + } + + if (!(tb->cflags & CF_NOCACHE)) { + void *existing_tb = NULL; + uint32_t h; + + /* add in the hash table */ + h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, + tb->trace_vcpu_dstate); + tb->hash = h; // unicorn needs this so it can remove this tb + qht_insert(uc, &uc->tcg_ctx->tb_ctx.htable, tb, h, &existing_tb); + + /* remove TB from the page(s) if we couldn't insert it */ + if (unlikely(existing_tb)) { + tb_page_remove(p, tb); + invalidate_page_bitmap(p); + if (p2) { + tb_page_remove(p2, tb); + invalidate_page_bitmap(p2); + } + tb = existing_tb; + } + } + + if (p2 && p2 != p) { + page_unlock(p2); + } + page_unlock(p); + + return tb; +} + +/* Called with mmap_lock held for user mode emulation. */ +TranslationBlock *tb_gen_code(CPUState *cpu, + target_ulong pc, target_ulong cs_base, + uint32_t flags, int cflags) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif + TCGContext *tcg_ctx = cpu->uc->tcg_ctx; + CPUArchState *env = cpu->env_ptr; + TranslationBlock *tb, *existing_tb; + tb_page_addr_t phys_pc, phys_page2; + target_ulong virt_page2; + tcg_insn_unit *gen_code_buf; + int gen_code_size, search_size, max_insns; + + assert_memory_lock(); + + phys_pc = get_page_addr_code(env, pc); + + if (phys_pc == -1) { + /* Generate a temporary TB with 1 insn in it */ + cflags &= ~CF_COUNT_MASK; + cflags |= CF_NOCACHE | 1; + } + + cflags &= ~CF_CLUSTER_MASK; + cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT; + + max_insns = cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = CF_COUNT_MASK; + } + if (max_insns > TCG_MAX_INSNS) { + max_insns = TCG_MAX_INSNS; + } + if (cpu->singlestep_enabled) { + max_insns = 1; + } + + buffer_overflow: + tb = tcg_tb_alloc(tcg_ctx); + if (unlikely(!tb)) { + /* flush must be done */ + tb_flush(cpu); + mmap_unlock(); + /* Make the execution loop process the flush as soon as possible. */ + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cpu); + } + + gen_code_buf = tcg_ctx->code_gen_ptr; + tb->tc.ptr = gen_code_buf; + tb->pc = pc; + tb->cs_base = cs_base; + tb->flags = flags; + tb->cflags = cflags; + tb->orig_tb = NULL; + tb->trace_vcpu_dstate = *cpu->trace_dstate; + tcg_ctx->tb_cflags = cflags; + tb_overflow: + + tcg_func_start(tcg_ctx); + + tcg_ctx->cpu = env_cpu(env); + gen_intermediate_code(cpu, tb, max_insns); + tcg_ctx->cpu = NULL; + + /* generate machine code */ + tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; + tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; + tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; + if (TCG_TARGET_HAS_direct_jump) { + tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; + tcg_ctx->tb_jmp_target_addr = NULL; + } else { + tcg_ctx->tb_jmp_insn_offset = NULL; + tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; + } + + gen_code_size = tcg_gen_code(tcg_ctx, tb); + if (unlikely(gen_code_size < 0)) { + switch (gen_code_size) { + case -1: + /* + * Overflow of code_gen_buffer, or the current slice of it. + * + * TODO: We don't need to re-do gen_intermediate_code, nor + * should we re-do the tcg optimization currently hidden + * inside tcg_gen_code. All that should be required is to + * flush the TBs, allocate a new TB, re-initialize it per + * above, and re-do the actual code generation. + */ + goto buffer_overflow; + + case -2: + /* + * The code generated for the TranslationBlock is too large. + * The maximum size allowed by the unwind info is 64k. + * There may be stricter constraints from relocations + * in the tcg backend. + * + * Try again with half as many insns as we attempted this time. + * If a single insn overflows, there's a bug somewhere... + */ + max_insns = tb->icount; + assert(max_insns > 1); + max_insns /= 2; + goto tb_overflow; + + default: + g_assert_not_reached(); + } + } + search_size = encode_search(cpu->uc, tb, (uint8_t *)gen_code_buf + gen_code_size); + if (unlikely(search_size < 0)) { + goto buffer_overflow; + } + tb->tc.size = gen_code_size; + + tcg_ctx->code_gen_ptr = (void *) + ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, + CODE_GEN_ALIGN); + + /* init jump list */ + tb->jmp_list_head = (uintptr_t)NULL; + tb->jmp_list_next[0] = (uintptr_t)NULL; + tb->jmp_list_next[1] = (uintptr_t)NULL; + tb->jmp_dest[0] = (uintptr_t)NULL; + tb->jmp_dest[1] = (uintptr_t)NULL; + + /* init original jump addresses which have been set during tcg_gen_code() */ + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 0); + } + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 1); + } + + /* check next page if needed */ + virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; + phys_page2 = -1; + if ((pc & TARGET_PAGE_MASK) != virt_page2) { + phys_page2 = get_page_addr_code(env, virt_page2); + } + /* + * No explicit memory barrier is required -- tb_link_page() makes the + * TB visible in a consistent state. + */ + existing_tb = tb_link_page(cpu->uc, tb, phys_pc, phys_page2); + /* if the TB already exists, discard what we just translated */ + if (unlikely(existing_tb != tb)) { + uintptr_t orig_aligned = (uintptr_t)gen_code_buf; + + orig_aligned -= ROUND_UP(sizeof(*tb), tcg_ctx->uc->qemu_icache_linesize); + tcg_ctx->code_gen_ptr = (void *)orig_aligned; + return existing_tb; + } + tcg_tb_insert(tcg_ctx, tb); + return tb; +} + +/* + * @p must be non-NULL. + * user-mode: call with mmap_lock held. + * !user-mode: call with all @pages locked. + */ +static void +tb_invalidate_phys_page_range__locked(struct uc_struct *uc, struct page_collection *pages, + PageDesc *p, tb_page_addr_t start, + tb_page_addr_t end, + uintptr_t retaddr) +{ + TranslationBlock *tb; + tb_page_addr_t tb_start, tb_end; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + CPUState *cpu = uc->cpu; + CPUArchState *env = NULL; + bool current_tb_not_found = retaddr != 0; + bool current_tb_modified = false; + TranslationBlock *current_tb = NULL; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + uint32_t current_flags = 0; +#endif /* TARGET_HAS_PRECISE_SMC */ + + assert_page_locked(p); + +#if defined(TARGET_HAS_PRECISE_SMC) + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + + /* we remove all the TBs in the range [start, end[ */ + /* XXX: see if in some cases it could be faster to invalidate all + the code */ + PAGE_FOR_EACH_TB(p, tb, n) { + assert_page_locked(p); + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + tb_end = tb_start + tb->size; + } else { + tb_start = tb->page_addr[1]; + tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + if (!(tb_end <= start || tb_start >= end)) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_not_found) { + current_tb_not_found = false; + /* now we have a real cpu fault */ + current_tb = tcg_tb_lookup(uc->tcg_ctx, retaddr); + } + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop + * its execution. We could be more precise by checking + * that the modification is after the current PC, but it + * would require a specialized function to partially + * restore the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate__locked(uc->tcg_ctx, tb); + } + } + + /* if no code remaining, no need to continue to use slow writes */ + if (!p->first_tb) { + invalidate_page_bitmap(p); + tlb_unprotect_code(uc, start); + } + +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + page_collection_unlock(pages); + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | curr_cflags(); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); + } +#endif +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end must refer to the *same* physical page. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation + */ +void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end) +{ + struct page_collection *pages; + PageDesc *p; + + assert_memory_lock(); + + p = page_find(uc, start >> TARGET_PAGE_BITS); + if (p == NULL) { + return; + } + pages = page_collection_lock(uc, start, end); + tb_invalidate_phys_page_range__locked(uc, pages, p, start, end, 0); + page_collection_unlock(pages); +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + * + * Called with mmap_lock held for user-mode emulation. + */ +void tb_invalidate_phys_range(struct uc_struct *uc, ram_addr_t start, ram_addr_t end) +{ + struct page_collection *pages; + tb_page_addr_t next; + + assert_memory_lock(); + + pages = page_collection_lock(uc, start, end); + for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + start < end; + start = next, next += TARGET_PAGE_SIZE) { + PageDesc *pd = page_find(uc, start >> TARGET_PAGE_BITS); + tb_page_addr_t bound = MIN(next, end); + + if (pd == NULL) { + continue; + } + tb_invalidate_phys_page_range__locked(uc, pages, pd, start, bound, 0); + } + page_collection_unlock(pages); +} + +/* len must be <= 8 and start must be a multiple of len. + * Called via softmmu_template.h when code areas are written to with + * iothread mutex not held. + * + * Call with all @pages in the range [@start, @start + len[ locked. + */ +void tb_invalidate_phys_page_fast(struct uc_struct *uc, struct page_collection *pages, + tb_page_addr_t start, int len, + uintptr_t retaddr) +{ + PageDesc *p; + + assert_memory_lock(); + + p = page_find(uc, start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + + assert_page_locked(p); + if (!p->code_bitmap && + ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { + build_page_bitmap(uc, p); + } + if (p->code_bitmap) { + unsigned int nr; + unsigned long b; + + nr = start & ~TARGET_PAGE_MASK; + b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); + if (b & ((1 << len) - 1)) { + goto do_invalidate; + } + } else { + do_invalidate: + tb_invalidate_phys_page_range__locked(uc, pages, p, start, start + len, + retaddr); + } +} + +/* user-mode: call with mmap_lock held */ +void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) +{ + TCGContext *tcg_ctx = cpu->uc->tcg_ctx; + TranslationBlock *tb; + + assert_memory_lock(); + + tb = tcg_tb_lookup(tcg_ctx, retaddr); + if (tb) { + /* We can use retranslation to find the PC. */ + cpu_restore_state_from_tb(cpu, tb, retaddr, true); + tb_phys_invalidate(tcg_ctx, tb, -1); + } else { + /* The exception probably happened in a helper. The CPU state should + have been saved before calling it. Fetch the PC from there. */ + CPUArchState *env = cpu->env_ptr; + target_ulong pc, cs_base; + tb_page_addr_t addr; + uint32_t flags; + + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + addr = get_page_addr_code(env, pc); + if (addr != -1) { + tb_invalidate_phys_range(cpu->uc, addr, addr + 1); + } + } +} + +/* in deterministic execution mode, instructions doing device I/Os + * must be at the end of the TB. + * + * Called by softmmu_template.h, with iothread mutex not held. + */ +void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) +{ + TCGContext *tcg_ctx = cpu->uc->tcg_ctx; +#if defined(TARGET_MIPS) || defined(TARGET_SH4) + CPUArchState *env = cpu->env_ptr; +#endif + TranslationBlock *tb; + uint32_t n; + + tb = tcg_tb_lookup(tcg_ctx, retaddr); + if (!tb) { + cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", + (void *)retaddr); + } + cpu_restore_state_from_tb(cpu, tb, retaddr, true); + + /* On MIPS and SH, delay slot instructions can only be restarted if + they were already the first instruction in the TB. If this is not + the first instruction in a TB then re-execute the preceding + branch. */ + n = 1; +#if defined(TARGET_MIPS) + if ((env->hflags & MIPS_HFLAG_BMASK) != 0 + && env->active_tc.PC != tb->pc) { + env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); + cpu_neg(cpu)->icount_decr.u16.low++; + env->hflags &= ~MIPS_HFLAG_BMASK; + n = 2; + } +#elif defined(TARGET_SH4) + if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 + && env->pc != tb->pc) { + env->pc -= 2; + cpu_neg(cpu)->icount_decr.u16.low++; + env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); + n = 2; + } +#endif + + /* Generate a new TB executing the I/O insn. */ + cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n; + + if (tb_cflags(tb) & CF_NOCACHE) { + if (tb->orig_tb) { + /* Invalidate original TB if this TB was generated in + * cpu_exec_nocache() */ + tb_phys_invalidate(tcg_ctx, tb->orig_tb, -1); + } + tcg_tb_remove(tcg_ctx, tb); + } + + /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not + * the first in the TB) then we end up generating a whole new TB and + * repeating the fault, which is horribly inefficient. + * Better would be to execute just this insn uncached, or generate a + * second new TB. + */ + cpu_loop_exit_noexc(cpu); +} + +static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) +{ + unsigned int i, i0 = tb_jmp_cache_hash_page(cpu->uc, page_addr); + + for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { + cpu->tb_jmp_cache[i0 + i] = NULL; + } +} + +void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif + + /* Discard jump cache entries for any tb which might potentially + overlap the flushed page. */ + tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); + tb_jmp_cache_clear_page(cpu, addr); +} + +/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ +void tcg_flush_softmmu_tlb(struct uc_struct *uc) +{ + tlb_flush(uc->cpu); +} + + +#ifdef HAVE_PTHREAD_JIT_PROTECT +static bool tb_exec_is_locked(TCGContext *tcg_ctx) +{ + return tcg_ctx->code_gen_locked; +} + +static void tb_exec_change(TCGContext *tcg_ctx, bool locked) +{ + jit_write_protect(locked); + tcg_ctx->code_gen_locked = locked; +} +#else /* not needed on non-Darwin platforms */ +static bool tb_exec_is_locked(TCGContext *tcg_ctx) +{ + return false; +} + +static void tb_exec_change(TCGContext *tcg_ctx, bool locked) {} +#endif + +void tb_exec_lock(TCGContext *tcg_ctx) +{ + /* assumes sys_icache_invalidate already called */ + tb_exec_change(tcg_ctx, true); +} + +void tb_exec_unlock(TCGContext *tcg_ctx) +{ + tb_exec_change(tcg_ctx, false); +} \ No newline at end of file diff --git a/qemu/accel/tcg/translate-all.h b/qemu/accel/tcg/translate-all.h new file mode 100644 index 00000000..71be1d41 --- /dev/null +++ b/qemu/accel/tcg/translate-all.h @@ -0,0 +1,35 @@ +/* + * Translated block handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef TRANSLATE_ALL_H +#define TRANSLATE_ALL_H + +#include "exec/exec-all.h" + + +/* translate-all.c */ +struct page_collection *page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, + tb_page_addr_t end); +void page_collection_unlock(struct page_collection *set); +void tb_invalidate_phys_page_fast(struct uc_struct *uc, struct page_collection *pages, + tb_page_addr_t start, int len, + uintptr_t retaddr); +void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end); +void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); + +#endif /* TRANSLATE_ALL_H */ diff --git a/qemu/accel/tcg/translator.c b/qemu/accel/tcg/translator.c new file mode 100644 index 00000000..0fca28a9 --- /dev/null +++ b/qemu/accel/tcg/translator.c @@ -0,0 +1,168 @@ +/* + * Generic intermediate code generation. + * + * Copyright (C) 2016-2017 Lluís Vilanova + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "exec/exec-all.h" +#include "exec/gen-icount.h" +#include "exec/translator.h" + +#include + +/* Pairs with tcg_clear_temp_count. + To be called by #TranslatorOps.{translate_insn,tb_stop} if + (1) the target is sufficiently clean to support reporting, + (2) as and when all temporaries are known to be consumed. + For most targets, (2) is at the end of translate_insn. */ +void translator_loop_temp_check(DisasContextBase *db) +{ +#if 0 + if (tcg_check_temp_count()) { + qemu_log("warning: TCG temporary leaks before " + TARGET_FMT_lx "\n", db->pc_next); + } +#endif +} + +void translator_loop(const TranslatorOps *ops, DisasContextBase *db, + CPUState *cpu, TranslationBlock *tb, int max_insns) +{ + int bp_insn = 0; + struct uc_struct *uc = (struct uc_struct *)cpu->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + TCGOp *prev_op = NULL; + bool block_hook = false; + + /* Initialize DisasContext */ + db->tb = tb; + db->pc_first = tb->pc; + db->pc_next = db->pc_first; + db->is_jmp = DISAS_NEXT; + db->num_insns = 0; + db->max_insns = max_insns; + db->singlestep_enabled = cpu->singlestep_enabled; + + ops->init_disas_context(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + /* Reset the temp count so that we can identify leaks */ + tcg_clear_temp_count(); + + /* Unicorn: early check to see if the address of this block is + * the "run until" address. */ + if (tb->pc == cpu->uc->addr_end) { + // This should catch that instruction is at the end + // and generate appropriate halting code. + gen_tb_start(tcg_ctx, db->tb); + ops->tb_start(db, cpu); + db->num_insns++; + ops->insn_start(db, cpu); + ops->translate_insn(db, cpu); + goto _end_loop; + } + + /* Unicorn: trace this block on request + * Only hook this block if it is not broken from previous translation due to + * full translation cache + */ + if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_BLOCK, tb->pc)) { + prev_op = tcg_last_op(tcg_ctx); + block_hook = true; + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, uc, db->pc_first); + } + + // tcg_dump_ops(tcg_ctx, false, "translator loop"); + + /* Start translating. */ + gen_tb_start(tcg_ctx, db->tb); + // tcg_dump_ops(tcg_ctx, false, "tb start"); + + ops->tb_start(db, cpu); + // tcg_dump_ops(tcg_ctx, false, "tb start 2"); + + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + while (true) { + db->num_insns++; + + ops->insn_start(db, cpu); + tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ + + /* Pass breakpoint hits to target for further processing */ + if (!db->singlestep_enabled + && unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { + CPUBreakpoint *bp; + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + if (bp->pc == db->pc_next) { + if (ops->breakpoint_check(db, cpu, bp)) { + bp_insn = 1; + break; + } + } + } + /* The breakpoint_check hook may use DISAS_TOO_MANY to indicate + that only one more instruction is to be executed. Otherwise + it should use DISAS_NORETURN when generating an exception, + but may use a DISAS_TARGET_* value for Something Else. */ + if (db->is_jmp > DISAS_TOO_MANY) { + break; + } + } + + /* Disassemble one instruction. The translate_insn hook should + update db->pc_next and db->is_jmp to indicate what should be + done next -- either exiting this loop or locate the start of + the next instruction. */ + ops->translate_insn(db, cpu); + // tcg_dump_ops(tcg_ctx, false, "insn translate"); + + /* Stop translation if translate_insn so indicated. */ + if (db->is_jmp != DISAS_NEXT) { + break; + } + + /* Stop translation if the output buffer is full, + or we have executed all of the allowed instructions. */ + if (tcg_op_buf_full(tcg_ctx) || db->num_insns >= db->max_insns) { + db->is_jmp = DISAS_TOO_MANY; + break; + } + } + +_end_loop: + /* Emit code to exit the TB, as indicated by db->is_jmp. */ + ops->tb_stop(db, cpu); + gen_tb_end(tcg_ctx, db->tb, db->num_insns - bp_insn); + // tcg_dump_ops(tcg_ctx, false, "tb end"); + + /* The disas_log hook may use these values rather than recompute. */ + db->tb->size = db->pc_next - db->pc_first; + db->tb->icount = db->num_insns; + + if (block_hook) { + TCGOp *tcg_op; + + // Unicorn: patch the callback to have the proper block size. + if (prev_op) { + // As explained further up in the function where prev_op is + // assigned, we move forward in the tail queue, so we're modifying the + // move instruction generated by gen_uc_tracecode() that contains + // the instruction size to assign the proper size (replacing 0xF1F1F1F1). + tcg_op = QTAILQ_NEXT(prev_op, link); + } else { + // this basic block is the first emulated code ever, + // so the basic block operand is the first operand + tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); + } + + tcg_op->args[1] = db->tb->size; + } +} diff --git a/qemu/arm.h b/qemu/arm.h index 87d02032..d8248f9f 100644 --- a/qemu/arm.h +++ b/qemu/arm.h @@ -1,1367 +1,1288 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_ARM_H -#define UNICORN_AUTOGEN_ARM_H -#define arm_release arm_release_arm -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_arm -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_arm -#define use_idiv_instructions_rt use_idiv_instructions_rt_arm -#define tcg_target_deposit_valid tcg_target_deposit_valid_arm -#define helper_power_down helper_power_down_arm -#define check_exit_request check_exit_request_arm -#define address_space_unregister address_space_unregister_arm -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_arm -#define phys_mem_clean phys_mem_clean_arm -#define tb_cleanup tb_cleanup_arm +#ifndef UNICORN_AUTOGEN_arm_H +#define UNICORN_AUTOGEN_arm_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _arm +#endif +#define arm_arch arm_arch_arm +#define tb_target_set_jmp_target tb_target_set_jmp_target_arm +#define have_bmi1 have_bmi1_arm +#define have_popcnt have_popcnt_arm +#define have_avx1 have_avx1_arm +#define have_avx2 have_avx2_arm +#define have_isa have_isa_arm +#define have_altivec have_altivec_arm +#define have_vsx have_vsx_arm +#define flush_icache_range flush_icache_range_arm +#define s390_facilities s390_facilities_arm +#define tcg_dump_op tcg_dump_op_arm +#define tcg_dump_ops tcg_dump_ops_arm +#define tcg_gen_and_i64 tcg_gen_and_i64_arm +#define tcg_gen_discard_i64 tcg_gen_discard_i64_arm +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_arm +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_arm +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_arm +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_arm +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_arm +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_arm +#define tcg_gen_ld_i64 tcg_gen_ld_i64_arm +#define tcg_gen_mov_i64 tcg_gen_mov_i64_arm +#define tcg_gen_movi_i64 tcg_gen_movi_i64_arm +#define tcg_gen_mul_i64 tcg_gen_mul_i64_arm +#define tcg_gen_or_i64 tcg_gen_or_i64_arm +#define tcg_gen_sar_i64 tcg_gen_sar_i64_arm +#define tcg_gen_shl_i64 tcg_gen_shl_i64_arm +#define tcg_gen_shr_i64 tcg_gen_shr_i64_arm +#define tcg_gen_st_i64 tcg_gen_st_i64_arm +#define tcg_gen_xor_i64 tcg_gen_xor_i64_arm +#define cpu_icount_to_ns cpu_icount_to_ns_arm +#define cpu_is_stopped cpu_is_stopped_arm +#define cpu_get_ticks cpu_get_ticks_arm +#define cpu_get_clock cpu_get_clock_arm +#define cpu_resume cpu_resume_arm +#define qemu_init_vcpu qemu_init_vcpu_arm +#define cpu_stop_current cpu_stop_current_arm +#define resume_all_vcpus resume_all_vcpus_arm +#define vm_start vm_start_arm +#define address_space_dispatch_compact address_space_dispatch_compact_arm +#define flatview_translate flatview_translate_arm +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_arm +#define qemu_get_cpu qemu_get_cpu_arm +#define cpu_address_space_init cpu_address_space_init_arm +#define cpu_get_address_space cpu_get_address_space_arm +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_arm +#define cpu_exec_initfn cpu_exec_initfn_arm +#define cpu_exec_realizefn cpu_exec_realizefn_arm +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_arm +#define cpu_watchpoint_insert cpu_watchpoint_insert_arm +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_arm +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_arm +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_arm +#define cpu_breakpoint_insert cpu_breakpoint_insert_arm +#define cpu_breakpoint_remove cpu_breakpoint_remove_arm +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_arm +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_arm +#define cpu_abort cpu_abort_arm +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_arm +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_arm +#define flatview_add_to_dispatch flatview_add_to_dispatch_arm +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_arm +#define qemu_ram_get_offset qemu_ram_get_offset_arm +#define qemu_ram_get_used_length qemu_ram_get_used_length_arm +#define qemu_ram_is_shared qemu_ram_is_shared_arm +#define qemu_ram_pagesize qemu_ram_pagesize_arm +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_arm +#define qemu_ram_alloc qemu_ram_alloc_arm +#define qemu_ram_free qemu_ram_free_arm +#define qemu_map_ram_ptr qemu_map_ram_ptr_arm +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_arm +#define qemu_ram_block_from_host qemu_ram_block_from_host_arm +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_arm +#define cpu_check_watchpoint cpu_check_watchpoint_arm +#define iotlb_to_section iotlb_to_section_arm +#define address_space_dispatch_new address_space_dispatch_new_arm +#define address_space_dispatch_free address_space_dispatch_free_arm +#define flatview_read_continue flatview_read_continue_arm +#define address_space_read_full address_space_read_full_arm +#define address_space_write address_space_write_arm +#define address_space_rw address_space_rw_arm +#define cpu_physical_memory_rw cpu_physical_memory_rw_arm +#define address_space_write_rom address_space_write_rom_arm +#define cpu_flush_icache_range cpu_flush_icache_range_arm +#define cpu_exec_init_all cpu_exec_init_all_arm +#define address_space_access_valid address_space_access_valid_arm +#define address_space_map address_space_map_arm +#define address_space_unmap address_space_unmap_arm +#define cpu_physical_memory_map cpu_physical_memory_map_arm +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_arm +#define cpu_memory_rw_debug cpu_memory_rw_debug_arm +#define qemu_target_page_size qemu_target_page_size_arm +#define qemu_target_page_bits qemu_target_page_bits_arm +#define qemu_target_page_bits_min qemu_target_page_bits_min_arm +#define target_words_bigendian target_words_bigendian_arm +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_arm +#define ram_block_discard_range ram_block_discard_range_arm +#define ramblock_is_pmem ramblock_is_pmem_arm +#define page_size_init page_size_init_arm +#define set_preferred_target_page_bits set_preferred_target_page_bits_arm +#define finalize_target_page_bits finalize_target_page_bits_arm +#define cpu_outb cpu_outb_arm +#define cpu_outw cpu_outw_arm +#define cpu_outl cpu_outl_arm +#define cpu_inb cpu_inb_arm +#define cpu_inw cpu_inw_arm +#define cpu_inl cpu_inl_arm #define memory_map memory_map_arm +#define memory_map_io memory_map_io_arm #define memory_map_ptr memory_map_ptr_arm #define memory_unmap memory_unmap_arm #define memory_free memory_free_arm -#define free_code_gen_buffer free_code_gen_buffer_arm -#define helper_raise_exception helper_raise_exception_arm -#define tcg_enabled tcg_enabled_arm -#define tcg_exec_init tcg_exec_init_arm -#define memory_register_types memory_register_types_arm -#define cpu_exec_init_all cpu_exec_init_all_arm -#define vm_start vm_start_arm -#define resume_all_vcpus resume_all_vcpus_arm -#define a15_l2ctlr_read a15_l2ctlr_read_arm -#define a64_translate_init a64_translate_init_arm -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_arm -#define aa64_cacheop_access aa64_cacheop_access_arm -#define aa64_daif_access aa64_daif_access_arm -#define aa64_daif_write aa64_daif_write_arm -#define aa64_dczid_read aa64_dczid_read_arm -#define aa64_fpcr_read aa64_fpcr_read_arm -#define aa64_fpcr_write aa64_fpcr_write_arm -#define aa64_fpsr_read aa64_fpsr_read_arm -#define aa64_fpsr_write aa64_fpsr_write_arm -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_arm -#define aa64_zva_access aa64_zva_access_arm -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_arm -#define aarch64_restore_sp aarch64_restore_sp_arm -#define aarch64_save_sp aarch64_save_sp_arm -#define accel_find accel_find_arm -#define accel_init_machine accel_init_machine_arm -#define accel_type accel_type_arm -#define access_with_adjusted_size access_with_adjusted_size_arm -#define add128 add128_arm -#define add16_sat add16_sat_arm -#define add16_usat add16_usat_arm -#define add192 add192_arm -#define add8_sat add8_sat_arm -#define add8_usat add8_usat_arm -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_arm -#define add_cpreg_to_list add_cpreg_to_list_arm -#define addFloat128Sigs addFloat128Sigs_arm -#define addFloat32Sigs addFloat32Sigs_arm -#define addFloat64Sigs addFloat64Sigs_arm -#define addFloatx80Sigs addFloatx80Sigs_arm -#define add_qemu_ldst_label add_qemu_ldst_label_arm -#define address_space_access_valid address_space_access_valid_arm -#define address_space_destroy address_space_destroy_arm -#define address_space_destroy_dispatch address_space_destroy_dispatch_arm -#define address_space_get_flatview address_space_get_flatview_arm -#define address_space_init address_space_init_arm -#define address_space_init_dispatch address_space_init_dispatch_arm -#define address_space_lookup_region address_space_lookup_region_arm -#define address_space_map address_space_map_arm -#define address_space_read address_space_read_arm -#define address_space_rw address_space_rw_arm -#define address_space_translate address_space_translate_arm -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_arm -#define address_space_translate_internal address_space_translate_internal_arm -#define address_space_unmap address_space_unmap_arm -#define address_space_update_topology address_space_update_topology_arm -#define address_space_update_topology_pass address_space_update_topology_pass_arm -#define address_space_write address_space_write_arm -#define addrrange_contains addrrange_contains_arm -#define addrrange_end addrrange_end_arm -#define addrrange_equal addrrange_equal_arm -#define addrrange_intersection addrrange_intersection_arm -#define addrrange_intersects addrrange_intersects_arm -#define addrrange_make addrrange_make_arm -#define adjust_endianness adjust_endianness_arm -#define all_helpers all_helpers_arm -#define alloc_code_gen_buffer alloc_code_gen_buffer_arm -#define alloc_entry alloc_entry_arm -#define always_true always_true_arm -#define arm1026_initfn arm1026_initfn_arm -#define arm1136_initfn arm1136_initfn_arm -#define arm1136_r2_initfn arm1136_r2_initfn_arm -#define arm1176_initfn arm1176_initfn_arm -#define arm11mpcore_initfn arm11mpcore_initfn_arm -#define arm926_initfn arm926_initfn_arm -#define arm946_initfn arm946_initfn_arm -#define arm_ccnt_enabled arm_ccnt_enabled_arm -#define arm_cp_read_zero arm_cp_read_zero_arm -#define arm_cp_reset_ignore arm_cp_reset_ignore_arm -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_arm -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_arm -#define arm_cpu_finalizefn arm_cpu_finalizefn_arm -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_arm -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_arm -#define arm_cpu_initfn arm_cpu_initfn_arm -#define arm_cpu_list arm_cpu_list_arm -#define cpu_loop_exit cpu_loop_exit_arm -#define arm_cpu_post_init arm_cpu_post_init_arm -#define arm_cpu_realizefn arm_cpu_realizefn_arm -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_arm -#define arm_cpu_register_types arm_cpu_register_types_arm -#define cpu_resume_from_signal cpu_resume_from_signal_arm -#define arm_cpus arm_cpus_arm -#define arm_cpu_set_pc arm_cpu_set_pc_arm -#define arm_cp_write_ignore arm_cp_write_ignore_arm -#define arm_current_el arm_current_el_arm -#define arm_dc_feature arm_dc_feature_arm -#define arm_debug_excp_handler arm_debug_excp_handler_arm -#define arm_debug_target_el arm_debug_target_el_arm -#define arm_el_is_aa64 arm_el_is_aa64_arm -#define arm_env_get_cpu arm_env_get_cpu_arm -#define arm_excp_target_el arm_excp_target_el_arm -#define arm_excp_unmasked arm_excp_unmasked_arm -#define arm_feature arm_feature_arm -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_arm -#define gen_intermediate_code gen_intermediate_code_arm -#define gen_intermediate_code_pc gen_intermediate_code_pc_arm -#define arm_gen_test_cc arm_gen_test_cc_arm -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_arm -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_arm -#define arm_handle_psci_call arm_handle_psci_call_arm -#define arm_is_psci_call arm_is_psci_call_arm -#define arm_is_secure arm_is_secure_arm -#define arm_is_secure_below_el3 arm_is_secure_below_el3_arm -#define arm_ldl_code arm_ldl_code_arm -#define arm_lduw_code arm_lduw_code_arm -#define arm_log_exception arm_log_exception_arm -#define arm_reg_read arm_reg_read_arm -#define arm_reg_reset arm_reg_reset_arm -#define arm_reg_write arm_reg_write_arm -#define restore_state_to_opc restore_state_to_opc_arm -#define arm_rmode_to_sf arm_rmode_to_sf_arm -#define arm_singlestep_active arm_singlestep_active_arm -#define tlb_fill tlb_fill_arm -#define tlb_flush tlb_flush_arm -#define tlb_flush_page tlb_flush_page_arm -#define tlb_set_page tlb_set_page_arm -#define arm_translate_init arm_translate_init_arm -#define arm_v7m_class_init arm_v7m_class_init_arm -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_arm -#define ats_access ats_access_arm -#define ats_write ats_write_arm -#define bad_mode_switch bad_mode_switch_arm -#define bank_number bank_number_arm -#define bitmap_zero_extend bitmap_zero_extend_arm -#define bp_wp_matches bp_wp_matches_arm -#define breakpoint_invalidate breakpoint_invalidate_arm -#define build_page_bitmap build_page_bitmap_arm -#define bus_add_child bus_add_child_arm -#define bus_class_init bus_class_init_arm -#define bus_info bus_info_arm -#define bus_unparent bus_unparent_arm -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_arm -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_arm -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_arm -#define call_recip_estimate call_recip_estimate_arm -#define can_merge can_merge_arm -#define capacity_increase capacity_increase_arm -#define ccsidr_read ccsidr_read_arm -#define check_ap check_ap_arm -#define check_breakpoints check_breakpoints_arm -#define check_watchpoints check_watchpoints_arm -#define cho cho_arm -#define clear_bit clear_bit_arm -#define clz32 clz32_arm -#define clz64 clz64_arm -#define cmp_flatrange_addr cmp_flatrange_addr_arm -#define code_gen_alloc code_gen_alloc_arm -#define commonNaNToFloat128 commonNaNToFloat128_arm -#define commonNaNToFloat16 commonNaNToFloat16_arm -#define commonNaNToFloat32 commonNaNToFloat32_arm -#define commonNaNToFloat64 commonNaNToFloat64_arm -#define commonNaNToFloatx80 commonNaNToFloatx80_arm -#define compute_abs_deadline compute_abs_deadline_arm -#define cond_name cond_name_arm -#define configure_accelerator configure_accelerator_arm -#define container_get container_get_arm -#define container_info container_info_arm -#define container_register_types container_register_types_arm -#define contextidr_write contextidr_write_arm -#define core_log_global_start core_log_global_start_arm -#define core_log_global_stop core_log_global_stop_arm -#define core_memory_listener core_memory_listener_arm -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_arm -#define cortex_a15_initfn cortex_a15_initfn_arm -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_arm -#define cortex_a8_initfn cortex_a8_initfn_arm -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_arm -#define cortex_a9_initfn cortex_a9_initfn_arm -#define cortex_m3_initfn cortex_m3_initfn_arm -#define count_cpreg count_cpreg_arm -#define countLeadingZeros32 countLeadingZeros32_arm -#define countLeadingZeros64 countLeadingZeros64_arm -#define cp_access_ok cp_access_ok_arm -#define cpacr_write cpacr_write_arm -#define cpreg_field_is_64bit cpreg_field_is_64bit_arm -#define cp_reginfo cp_reginfo_arm -#define cpreg_key_compare cpreg_key_compare_arm -#define cpreg_make_keylist cpreg_make_keylist_arm -#define cp_reg_reset cp_reg_reset_arm -#define cpreg_to_kvm_id cpreg_to_kvm_id_arm -#define cpsr_read cpsr_read_arm -#define cpsr_write cpsr_write_arm -#define cptype_valid cptype_valid_arm -#define cpu_abort cpu_abort_arm -#define cpu_arm_exec cpu_arm_exec_arm -#define cpu_arm_gen_code cpu_arm_gen_code_arm -#define cpu_arm_init cpu_arm_init_arm -#define cpu_breakpoint_insert cpu_breakpoint_insert_arm -#define cpu_breakpoint_remove cpu_breakpoint_remove_arm -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_arm -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_arm -#define cpu_can_do_io cpu_can_do_io_arm -#define cpu_can_run cpu_can_run_arm -#define cpu_class_init cpu_class_init_arm -#define cpu_common_class_by_name cpu_common_class_by_name_arm -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_arm -#define cpu_common_get_arch_id cpu_common_get_arch_id_arm -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_arm -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_arm -#define cpu_common_has_work cpu_common_has_work_arm -#define cpu_common_initfn cpu_common_initfn_arm -#define cpu_common_noop cpu_common_noop_arm -#define cpu_common_parse_features cpu_common_parse_features_arm -#define cpu_common_realizefn cpu_common_realizefn_arm -#define cpu_common_reset cpu_common_reset_arm -#define cpu_dump_statistics cpu_dump_statistics_arm -#define cpu_exec_init cpu_exec_init_arm -#define cpu_flush_icache_range cpu_flush_icache_range_arm -#define cpu_gen_init cpu_gen_init_arm -#define cpu_get_clock cpu_get_clock_arm -#define cpu_get_real_ticks cpu_get_real_ticks_arm -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_arm -#define cpu_handle_debug_exception cpu_handle_debug_exception_arm -#define cpu_handle_guest_debug cpu_handle_guest_debug_arm -#define cpu_inb cpu_inb_arm -#define cpu_inl cpu_inl_arm -#define cpu_interrupt cpu_interrupt_arm -#define cpu_interrupt_handler cpu_interrupt_handler_arm -#define cpu_inw cpu_inw_arm -#define cpu_io_recompile cpu_io_recompile_arm -#define cpu_is_stopped cpu_is_stopped_arm -#define cpu_ldl_code cpu_ldl_code_arm -#define cpu_ldub_code cpu_ldub_code_arm -#define cpu_lduw_code cpu_lduw_code_arm -#define cpu_memory_rw_debug cpu_memory_rw_debug_arm -#define cpu_mmu_index cpu_mmu_index_arm -#define cpu_outb cpu_outb_arm -#define cpu_outl cpu_outl_arm -#define cpu_outw cpu_outw_arm -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_arm -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_arm -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_arm -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_arm -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_arm -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_arm -#define cpu_physical_memory_map cpu_physical_memory_map_arm -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_arm -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_arm -#define cpu_physical_memory_rw cpu_physical_memory_rw_arm -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_arm -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_arm -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_arm -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_arm -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_arm -#define cpu_register cpu_register_arm -#define cpu_register_types cpu_register_types_arm -#define cpu_restore_state cpu_restore_state_arm -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_arm -#define cpu_single_step cpu_single_step_arm -#define cpu_tb_exec cpu_tb_exec_arm -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_arm -#define cpu_to_be64 cpu_to_be64_arm -#define cpu_to_le32 cpu_to_le32_arm -#define cpu_to_le64 cpu_to_le64_arm -#define cpu_type_info cpu_type_info_arm -#define cpu_unassigned_access cpu_unassigned_access_arm -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_arm -#define cpu_watchpoint_insert cpu_watchpoint_insert_arm -#define cpu_watchpoint_remove cpu_watchpoint_remove_arm -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_arm -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_arm -#define crc32c_table crc32c_table_arm -#define create_new_memory_mapping create_new_memory_mapping_arm -#define csselr_write csselr_write_arm -#define cto32 cto32_arm -#define ctr_el0_access ctr_el0_access_arm -#define ctz32 ctz32_arm -#define ctz64 ctz64_arm -#define dacr_write dacr_write_arm -#define dbgbcr_write dbgbcr_write_arm -#define dbgbvr_write dbgbvr_write_arm -#define dbgwcr_write dbgwcr_write_arm -#define dbgwvr_write dbgwvr_write_arm -#define debug_cp_reginfo debug_cp_reginfo_arm -#define debug_frame debug_frame_arm -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_arm -#define define_arm_cp_regs define_arm_cp_regs_arm -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_arm -#define define_debug_regs define_debug_regs_arm -#define define_one_arm_cp_reg define_one_arm_cp_reg_arm -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_arm -#define deposit32 deposit32_arm -#define deposit64 deposit64_arm -#define deregister_tm_clones deregister_tm_clones_arm -#define device_class_base_init device_class_base_init_arm -#define device_class_init device_class_init_arm -#define device_finalize device_finalize_arm -#define device_get_realized device_get_realized_arm -#define device_initfn device_initfn_arm -#define device_post_init device_post_init_arm -#define device_reset device_reset_arm -#define device_set_realized device_set_realized_arm -#define device_type_info device_type_info_arm -#define disas_arm_insn disas_arm_insn_arm -#define disas_coproc_insn disas_coproc_insn_arm -#define disas_dsp_insn disas_dsp_insn_arm -#define disas_iwmmxt_insn disas_iwmmxt_insn_arm -#define disas_neon_data_insn disas_neon_data_insn_arm -#define disas_neon_ls_insn disas_neon_ls_insn_arm -#define disas_thumb2_insn disas_thumb2_insn_arm -#define disas_thumb_insn disas_thumb_insn_arm -#define disas_vfp_insn disas_vfp_insn_arm -#define disas_vfp_v8_insn disas_vfp_v8_insn_arm -#define do_arm_semihosting do_arm_semihosting_arm -#define do_clz16 do_clz16_arm -#define do_clz8 do_clz8_arm -#define do_constant_folding do_constant_folding_arm -#define do_constant_folding_2 do_constant_folding_2_arm -#define do_constant_folding_cond do_constant_folding_cond_arm -#define do_constant_folding_cond2 do_constant_folding_cond2_arm -#define do_constant_folding_cond_32 do_constant_folding_cond_32_arm -#define do_constant_folding_cond_64 do_constant_folding_cond_64_arm -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_arm -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_arm -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_arm -#define do_ssat do_ssat_arm -#define do_usad do_usad_arm -#define do_usat do_usat_arm -#define do_v7m_exception_exit do_v7m_exception_exit_arm -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_arm -#define dummy_func dummy_func_arm -#define dummy_section dummy_section_arm -#define _DYNAMIC _DYNAMIC_arm -#define _edata _edata_arm -#define _end _end_arm -#define end_list end_list_arm -#define eq128 eq128_arm -#define ErrorClass_lookup ErrorClass_lookup_arm -#define error_copy error_copy_arm -#define error_exit error_exit_arm -#define error_get_class error_get_class_arm -#define error_get_pretty error_get_pretty_arm -#define error_setg_file_open error_setg_file_open_arm -#define estimateDiv128To64 estimateDiv128To64_arm -#define estimateSqrt32 estimateSqrt32_arm -#define excnames excnames_arm -#define excp_is_internal excp_is_internal_arm -#define extended_addresses_enabled extended_addresses_enabled_arm -#define extended_mpu_ap_bits extended_mpu_ap_bits_arm -#define extract32 extract32_arm -#define extract64 extract64_arm -#define extractFloat128Exp extractFloat128Exp_arm -#define extractFloat128Frac0 extractFloat128Frac0_arm -#define extractFloat128Frac1 extractFloat128Frac1_arm -#define extractFloat128Sign extractFloat128Sign_arm -#define extractFloat16Exp extractFloat16Exp_arm -#define extractFloat16Frac extractFloat16Frac_arm -#define extractFloat16Sign extractFloat16Sign_arm -#define extractFloat32Exp extractFloat32Exp_arm -#define extractFloat32Frac extractFloat32Frac_arm -#define extractFloat32Sign extractFloat32Sign_arm -#define extractFloat64Exp extractFloat64Exp_arm -#define extractFloat64Frac extractFloat64Frac_arm -#define extractFloat64Sign extractFloat64Sign_arm -#define extractFloatx80Exp extractFloatx80Exp_arm -#define extractFloatx80Frac extractFloatx80Frac_arm -#define extractFloatx80Sign extractFloatx80Sign_arm -#define fcse_write fcse_write_arm -#define find_better_copy find_better_copy_arm -#define find_default_machine find_default_machine_arm -#define find_desc_by_name find_desc_by_name_arm -#define find_first_bit find_first_bit_arm -#define find_paging_enabled_cpu find_paging_enabled_cpu_arm -#define find_ram_block find_ram_block_arm -#define find_ram_offset find_ram_offset_arm -#define find_string find_string_arm -#define find_type find_type_arm -#define _fini _fini_arm -#define flatrange_equal flatrange_equal_arm -#define flatview_destroy flatview_destroy_arm -#define flatview_init flatview_init_arm -#define flatview_insert flatview_insert_arm -#define flatview_lookup flatview_lookup_arm -#define flatview_ref flatview_ref_arm -#define flatview_simplify flatview_simplify_arm #define flatview_unref flatview_unref_arm -#define float128_add float128_add_arm -#define float128_compare float128_compare_arm -#define float128_compare_internal float128_compare_internal_arm -#define float128_compare_quiet float128_compare_quiet_arm -#define float128_default_nan float128_default_nan_arm -#define float128_div float128_div_arm -#define float128_eq float128_eq_arm -#define float128_eq_quiet float128_eq_quiet_arm -#define float128_is_quiet_nan float128_is_quiet_nan_arm -#define float128_is_signaling_nan float128_is_signaling_nan_arm -#define float128_le float128_le_arm -#define float128_le_quiet float128_le_quiet_arm -#define float128_lt float128_lt_arm -#define float128_lt_quiet float128_lt_quiet_arm -#define float128_maybe_silence_nan float128_maybe_silence_nan_arm -#define float128_mul float128_mul_arm -#define float128_rem float128_rem_arm -#define float128_round_to_int float128_round_to_int_arm -#define float128_scalbn float128_scalbn_arm -#define float128_sqrt float128_sqrt_arm -#define float128_sub float128_sub_arm -#define float128ToCommonNaN float128ToCommonNaN_arm -#define float128_to_float32 float128_to_float32_arm -#define float128_to_float64 float128_to_float64_arm -#define float128_to_floatx80 float128_to_floatx80_arm -#define float128_to_int32 float128_to_int32_arm -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_arm -#define float128_to_int64 float128_to_int64_arm -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_arm -#define float128_unordered float128_unordered_arm -#define float128_unordered_quiet float128_unordered_quiet_arm -#define float16_default_nan float16_default_nan_arm +#define address_space_get_flatview address_space_get_flatview_arm +#define memory_region_transaction_begin memory_region_transaction_begin_arm +#define memory_region_transaction_commit memory_region_transaction_commit_arm +#define memory_region_init memory_region_init_arm +#define memory_region_access_valid memory_region_access_valid_arm +#define memory_region_dispatch_read memory_region_dispatch_read_arm +#define memory_region_dispatch_write memory_region_dispatch_write_arm +#define memory_region_init_io memory_region_init_io_arm +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_arm +#define memory_region_size memory_region_size_arm +#define memory_region_set_readonly memory_region_set_readonly_arm +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_arm +#define memory_region_from_host memory_region_from_host_arm +#define memory_region_get_ram_addr memory_region_get_ram_addr_arm +#define memory_region_add_subregion memory_region_add_subregion_arm +#define memory_region_del_subregion memory_region_del_subregion_arm +#define memory_region_find memory_region_find_arm +#define memory_listener_register memory_listener_register_arm +#define memory_listener_unregister memory_listener_unregister_arm +#define address_space_remove_listeners address_space_remove_listeners_arm +#define address_space_init address_space_init_arm +#define address_space_destroy address_space_destroy_arm +#define memory_region_init_ram memory_region_init_ram_arm +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_arm +#define exec_inline_op exec_inline_op_arm +#define floatx80_default_nan floatx80_default_nan_arm +#define float_raise float_raise_arm #define float16_is_quiet_nan float16_is_quiet_nan_arm #define float16_is_signaling_nan float16_is_signaling_nan_arm -#define float16_maybe_silence_nan float16_maybe_silence_nan_arm -#define float16ToCommonNaN float16ToCommonNaN_arm -#define float16_to_float32 float16_to_float32_arm -#define float16_to_float64 float16_to_float64_arm -#define float32_abs float32_abs_arm -#define float32_add float32_add_arm -#define float32_chs float32_chs_arm -#define float32_compare float32_compare_arm -#define float32_compare_internal float32_compare_internal_arm -#define float32_compare_quiet float32_compare_quiet_arm -#define float32_default_nan float32_default_nan_arm -#define float32_div float32_div_arm -#define float32_eq float32_eq_arm -#define float32_eq_quiet float32_eq_quiet_arm -#define float32_exp2 float32_exp2_arm -#define float32_exp2_coefficients float32_exp2_coefficients_arm -#define float32_is_any_nan float32_is_any_nan_arm -#define float32_is_infinity float32_is_infinity_arm -#define float32_is_neg float32_is_neg_arm #define float32_is_quiet_nan float32_is_quiet_nan_arm #define float32_is_signaling_nan float32_is_signaling_nan_arm -#define float32_is_zero float32_is_zero_arm -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_arm -#define float32_le float32_le_arm -#define float32_le_quiet float32_le_quiet_arm -#define float32_log2 float32_log2_arm -#define float32_lt float32_lt_arm -#define float32_lt_quiet float32_lt_quiet_arm +#define float64_is_quiet_nan float64_is_quiet_nan_arm +#define float64_is_signaling_nan float64_is_signaling_nan_arm +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_arm +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_arm +#define floatx80_silence_nan floatx80_silence_nan_arm +#define propagateFloatx80NaN propagateFloatx80NaN_arm +#define float128_is_quiet_nan float128_is_quiet_nan_arm +#define float128_is_signaling_nan float128_is_signaling_nan_arm +#define float128_silence_nan float128_silence_nan_arm +#define float16_add float16_add_arm +#define float16_sub float16_sub_arm +#define float32_add float32_add_arm +#define float32_sub float32_sub_arm +#define float64_add float64_add_arm +#define float64_sub float64_sub_arm +#define float16_mul float16_mul_arm +#define float32_mul float32_mul_arm +#define float64_mul float64_mul_arm +#define float16_muladd float16_muladd_arm +#define float32_muladd float32_muladd_arm +#define float64_muladd float64_muladd_arm +#define float16_div float16_div_arm +#define float32_div float32_div_arm +#define float64_div float64_div_arm +#define float16_to_float32 float16_to_float32_arm +#define float16_to_float64 float16_to_float64_arm +#define float32_to_float16 float32_to_float16_arm +#define float32_to_float64 float32_to_float64_arm +#define float64_to_float16 float64_to_float16_arm +#define float64_to_float32 float64_to_float32_arm +#define float16_round_to_int float16_round_to_int_arm +#define float32_round_to_int float32_round_to_int_arm +#define float64_round_to_int float64_round_to_int_arm +#define float16_to_int16_scalbn float16_to_int16_scalbn_arm +#define float16_to_int32_scalbn float16_to_int32_scalbn_arm +#define float16_to_int64_scalbn float16_to_int64_scalbn_arm +#define float32_to_int16_scalbn float32_to_int16_scalbn_arm +#define float32_to_int32_scalbn float32_to_int32_scalbn_arm +#define float32_to_int64_scalbn float32_to_int64_scalbn_arm +#define float64_to_int16_scalbn float64_to_int16_scalbn_arm +#define float64_to_int32_scalbn float64_to_int32_scalbn_arm +#define float64_to_int64_scalbn float64_to_int64_scalbn_arm +#define float16_to_int16 float16_to_int16_arm +#define float16_to_int32 float16_to_int32_arm +#define float16_to_int64 float16_to_int64_arm +#define float32_to_int16 float32_to_int16_arm +#define float32_to_int32 float32_to_int32_arm +#define float32_to_int64 float32_to_int64_arm +#define float64_to_int16 float64_to_int16_arm +#define float64_to_int32 float64_to_int32_arm +#define float64_to_int64 float64_to_int64_arm +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_arm +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_arm +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_arm +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_arm +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_arm +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_arm +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_arm +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_arm +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_arm +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_arm +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_arm +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_arm +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_arm +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_arm +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_arm +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_arm +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_arm +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_arm +#define float16_to_uint16 float16_to_uint16_arm +#define float16_to_uint32 float16_to_uint32_arm +#define float16_to_uint64 float16_to_uint64_arm +#define float32_to_uint16 float32_to_uint16_arm +#define float32_to_uint32 float32_to_uint32_arm +#define float32_to_uint64 float32_to_uint64_arm +#define float64_to_uint16 float64_to_uint16_arm +#define float64_to_uint32 float64_to_uint32_arm +#define float64_to_uint64 float64_to_uint64_arm +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_arm +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_arm +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_arm +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_arm +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_arm +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_arm +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_arm +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_arm +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_arm +#define int64_to_float16_scalbn int64_to_float16_scalbn_arm +#define int32_to_float16_scalbn int32_to_float16_scalbn_arm +#define int16_to_float16_scalbn int16_to_float16_scalbn_arm +#define int64_to_float16 int64_to_float16_arm +#define int32_to_float16 int32_to_float16_arm +#define int16_to_float16 int16_to_float16_arm +#define int64_to_float32_scalbn int64_to_float32_scalbn_arm +#define int32_to_float32_scalbn int32_to_float32_scalbn_arm +#define int16_to_float32_scalbn int16_to_float32_scalbn_arm +#define int64_to_float32 int64_to_float32_arm +#define int32_to_float32 int32_to_float32_arm +#define int16_to_float32 int16_to_float32_arm +#define int64_to_float64_scalbn int64_to_float64_scalbn_arm +#define int32_to_float64_scalbn int32_to_float64_scalbn_arm +#define int16_to_float64_scalbn int16_to_float64_scalbn_arm +#define int64_to_float64 int64_to_float64_arm +#define int32_to_float64 int32_to_float64_arm +#define int16_to_float64 int16_to_float64_arm +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_arm +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_arm +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_arm +#define uint64_to_float16 uint64_to_float16_arm +#define uint32_to_float16 uint32_to_float16_arm +#define uint16_to_float16 uint16_to_float16_arm +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_arm +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_arm +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_arm +#define uint64_to_float32 uint64_to_float32_arm +#define uint32_to_float32 uint32_to_float32_arm +#define uint16_to_float32 uint16_to_float32_arm +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_arm +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_arm +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_arm +#define uint64_to_float64 uint64_to_float64_arm +#define uint32_to_float64 uint32_to_float64_arm +#define uint16_to_float64 uint16_to_float64_arm +#define float16_min float16_min_arm +#define float16_minnum float16_minnum_arm +#define float16_minnummag float16_minnummag_arm +#define float16_max float16_max_arm +#define float16_maxnum float16_maxnum_arm +#define float16_maxnummag float16_maxnummag_arm +#define float32_min float32_min_arm +#define float32_minnum float32_minnum_arm +#define float32_minnummag float32_minnummag_arm #define float32_max float32_max_arm #define float32_maxnum float32_maxnum_arm #define float32_maxnummag float32_maxnummag_arm -#define float32_maybe_silence_nan float32_maybe_silence_nan_arm -#define float32_min float32_min_arm -#define float32_minmax float32_minmax_arm -#define float32_minnum float32_minnum_arm -#define float32_minnummag float32_minnummag_arm -#define float32_mul float32_mul_arm -#define float32_muladd float32_muladd_arm -#define float32_rem float32_rem_arm -#define float32_round_to_int float32_round_to_int_arm -#define float32_scalbn float32_scalbn_arm -#define float32_set_sign float32_set_sign_arm -#define float32_sqrt float32_sqrt_arm -#define float32_squash_input_denormal float32_squash_input_denormal_arm -#define float32_sub float32_sub_arm -#define float32ToCommonNaN float32ToCommonNaN_arm -#define float32_to_float128 float32_to_float128_arm -#define float32_to_float16 float32_to_float16_arm -#define float32_to_float64 float32_to_float64_arm -#define float32_to_floatx80 float32_to_floatx80_arm -#define float32_to_int16 float32_to_int16_arm -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_arm -#define float32_to_int32 float32_to_int32_arm -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_arm -#define float32_to_int64 float32_to_int64_arm -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_arm -#define float32_to_uint16 float32_to_uint16_arm -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_arm -#define float32_to_uint32 float32_to_uint32_arm -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_arm -#define float32_to_uint64 float32_to_uint64_arm -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_arm -#define float32_unordered float32_unordered_arm -#define float32_unordered_quiet float32_unordered_quiet_arm -#define float64_abs float64_abs_arm -#define float64_add float64_add_arm -#define float64_chs float64_chs_arm -#define float64_compare float64_compare_arm -#define float64_compare_internal float64_compare_internal_arm -#define float64_compare_quiet float64_compare_quiet_arm -#define float64_default_nan float64_default_nan_arm -#define float64_div float64_div_arm -#define float64_eq float64_eq_arm -#define float64_eq_quiet float64_eq_quiet_arm -#define float64_is_any_nan float64_is_any_nan_arm -#define float64_is_infinity float64_is_infinity_arm -#define float64_is_neg float64_is_neg_arm -#define float64_is_quiet_nan float64_is_quiet_nan_arm -#define float64_is_signaling_nan float64_is_signaling_nan_arm -#define float64_is_zero float64_is_zero_arm -#define float64_le float64_le_arm -#define float64_le_quiet float64_le_quiet_arm -#define float64_log2 float64_log2_arm -#define float64_lt float64_lt_arm -#define float64_lt_quiet float64_lt_quiet_arm +#define float64_min float64_min_arm +#define float64_minnum float64_minnum_arm +#define float64_minnummag float64_minnummag_arm #define float64_max float64_max_arm #define float64_maxnum float64_maxnum_arm #define float64_maxnummag float64_maxnummag_arm -#define float64_maybe_silence_nan float64_maybe_silence_nan_arm -#define float64_min float64_min_arm -#define float64_minmax float64_minmax_arm -#define float64_minnum float64_minnum_arm -#define float64_minnummag float64_minnummag_arm -#define float64_mul float64_mul_arm -#define float64_muladd float64_muladd_arm -#define float64_rem float64_rem_arm -#define float64_round_to_int float64_round_to_int_arm +#define float16_compare float16_compare_arm +#define float16_compare_quiet float16_compare_quiet_arm +#define float32_compare float32_compare_arm +#define float32_compare_quiet float32_compare_quiet_arm +#define float64_compare float64_compare_arm +#define float64_compare_quiet float64_compare_quiet_arm +#define float16_scalbn float16_scalbn_arm +#define float32_scalbn float32_scalbn_arm #define float64_scalbn float64_scalbn_arm -#define float64_set_sign float64_set_sign_arm +#define float16_sqrt float16_sqrt_arm +#define float32_sqrt float32_sqrt_arm #define float64_sqrt float64_sqrt_arm +#define float16_default_nan float16_default_nan_arm +#define float32_default_nan float32_default_nan_arm +#define float64_default_nan float64_default_nan_arm +#define float128_default_nan float128_default_nan_arm +#define float16_silence_nan float16_silence_nan_arm +#define float32_silence_nan float32_silence_nan_arm +#define float64_silence_nan float64_silence_nan_arm +#define float16_squash_input_denormal float16_squash_input_denormal_arm +#define float32_squash_input_denormal float32_squash_input_denormal_arm #define float64_squash_input_denormal float64_squash_input_denormal_arm -#define float64_sub float64_sub_arm -#define float64ToCommonNaN float64ToCommonNaN_arm -#define float64_to_float128 float64_to_float128_arm -#define float64_to_float16 float64_to_float16_arm -#define float64_to_float32 float64_to_float32_arm +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_arm +#define roundAndPackFloatx80 roundAndPackFloatx80_arm +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_arm +#define int32_to_floatx80 int32_to_floatx80_arm +#define int32_to_float128 int32_to_float128_arm +#define int64_to_floatx80 int64_to_floatx80_arm +#define int64_to_float128 int64_to_float128_arm +#define uint64_to_float128 uint64_to_float128_arm +#define float32_to_floatx80 float32_to_floatx80_arm +#define float32_to_float128 float32_to_float128_arm +#define float32_rem float32_rem_arm +#define float32_exp2 float32_exp2_arm +#define float32_log2 float32_log2_arm +#define float32_eq float32_eq_arm +#define float32_le float32_le_arm +#define float32_lt float32_lt_arm +#define float32_unordered float32_unordered_arm +#define float32_eq_quiet float32_eq_quiet_arm +#define float32_le_quiet float32_le_quiet_arm +#define float32_lt_quiet float32_lt_quiet_arm +#define float32_unordered_quiet float32_unordered_quiet_arm #define float64_to_floatx80 float64_to_floatx80_arm -#define float64_to_int16 float64_to_int16_arm -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_arm -#define float64_to_int32 float64_to_int32_arm -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_arm -#define float64_to_int64 float64_to_int64_arm -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_arm -#define float64_to_uint16 float64_to_uint16_arm -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_arm -#define float64_to_uint32 float64_to_uint32_arm -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_arm -#define float64_to_uint64 float64_to_uint64_arm -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_arm -#define float64_trunc_to_int float64_trunc_to_int_arm +#define float64_to_float128 float64_to_float128_arm +#define float64_rem float64_rem_arm +#define float64_log2 float64_log2_arm +#define float64_eq float64_eq_arm +#define float64_le float64_le_arm +#define float64_lt float64_lt_arm #define float64_unordered float64_unordered_arm +#define float64_eq_quiet float64_eq_quiet_arm +#define float64_le_quiet float64_le_quiet_arm +#define float64_lt_quiet float64_lt_quiet_arm #define float64_unordered_quiet float64_unordered_quiet_arm -#define float_raise float_raise_arm -#define floatx80_add floatx80_add_arm -#define floatx80_compare floatx80_compare_arm -#define floatx80_compare_internal floatx80_compare_internal_arm -#define floatx80_compare_quiet floatx80_compare_quiet_arm -#define floatx80_default_nan floatx80_default_nan_arm -#define floatx80_div floatx80_div_arm -#define floatx80_eq floatx80_eq_arm -#define floatx80_eq_quiet floatx80_eq_quiet_arm -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_arm -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_arm -#define floatx80_le floatx80_le_arm -#define floatx80_le_quiet floatx80_le_quiet_arm -#define floatx80_lt floatx80_lt_arm -#define floatx80_lt_quiet floatx80_lt_quiet_arm -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_arm -#define floatx80_mul floatx80_mul_arm -#define floatx80_rem floatx80_rem_arm -#define floatx80_round_to_int floatx80_round_to_int_arm -#define floatx80_scalbn floatx80_scalbn_arm -#define floatx80_sqrt floatx80_sqrt_arm -#define floatx80_sub floatx80_sub_arm -#define floatx80ToCommonNaN floatx80ToCommonNaN_arm -#define floatx80_to_float128 floatx80_to_float128_arm -#define floatx80_to_float32 floatx80_to_float32_arm -#define floatx80_to_float64 floatx80_to_float64_arm #define floatx80_to_int32 floatx80_to_int32_arm #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_arm #define floatx80_to_int64 floatx80_to_int64_arm #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_arm +#define floatx80_to_float32 floatx80_to_float32_arm +#define floatx80_to_float64 floatx80_to_float64_arm +#define floatx80_to_float128 floatx80_to_float128_arm +#define floatx80_round floatx80_round_arm +#define floatx80_round_to_int floatx80_round_to_int_arm +#define floatx80_add floatx80_add_arm +#define floatx80_sub floatx80_sub_arm +#define floatx80_mul floatx80_mul_arm +#define floatx80_div floatx80_div_arm +#define floatx80_rem floatx80_rem_arm +#define floatx80_sqrt floatx80_sqrt_arm +#define floatx80_eq floatx80_eq_arm +#define floatx80_le floatx80_le_arm +#define floatx80_lt floatx80_lt_arm #define floatx80_unordered floatx80_unordered_arm +#define floatx80_eq_quiet floatx80_eq_quiet_arm +#define floatx80_le_quiet floatx80_le_quiet_arm +#define floatx80_lt_quiet floatx80_lt_quiet_arm #define floatx80_unordered_quiet floatx80_unordered_quiet_arm -#define flush_icache_range flush_icache_range_arm -#define format_string format_string_arm -#define fp_decode_rm fp_decode_rm_arm -#define frame_dummy frame_dummy_arm -#define free_range free_range_arm -#define fstat64 fstat64_arm -#define futex_wait futex_wait_arm -#define futex_wake futex_wake_arm -#define gen_aa32_ld16s gen_aa32_ld16s_arm -#define gen_aa32_ld16u gen_aa32_ld16u_arm -#define gen_aa32_ld32u gen_aa32_ld32u_arm -#define gen_aa32_ld64 gen_aa32_ld64_arm -#define gen_aa32_ld8s gen_aa32_ld8s_arm -#define gen_aa32_ld8u gen_aa32_ld8u_arm -#define gen_aa32_st16 gen_aa32_st16_arm -#define gen_aa32_st32 gen_aa32_st32_arm -#define gen_aa32_st64 gen_aa32_st64_arm -#define gen_aa32_st8 gen_aa32_st8_arm -#define gen_adc gen_adc_arm -#define gen_adc_CC gen_adc_CC_arm -#define gen_add16 gen_add16_arm -#define gen_add_carry gen_add_carry_arm -#define gen_add_CC gen_add_CC_arm -#define gen_add_datah_offset gen_add_datah_offset_arm -#define gen_add_data_offset gen_add_data_offset_arm -#define gen_addq gen_addq_arm -#define gen_addq_lo gen_addq_lo_arm -#define gen_addq_msw gen_addq_msw_arm -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_arm -#define gen_arm_shift_im gen_arm_shift_im_arm -#define gen_arm_shift_reg gen_arm_shift_reg_arm -#define gen_bx gen_bx_arm -#define gen_bx_im gen_bx_im_arm -#define gen_clrex gen_clrex_arm -#define generate_memory_topology generate_memory_topology_arm -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_arm -#define gen_exception gen_exception_arm -#define gen_exception_insn gen_exception_insn_arm -#define gen_exception_internal gen_exception_internal_arm -#define gen_exception_internal_insn gen_exception_internal_insn_arm -#define gen_exception_return gen_exception_return_arm -#define gen_goto_tb gen_goto_tb_arm -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_arm -#define gen_helper_add_saturate gen_helper_add_saturate_arm -#define gen_helper_add_setq gen_helper_add_setq_arm -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_arm -#define gen_helper_clz32 gen_helper_clz32_arm -#define gen_helper_clz64 gen_helper_clz64_arm -#define gen_helper_clz_arm gen_helper_clz_arm_arm +#define float128_to_int32 float128_to_int32_arm +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_arm +#define float128_to_int64 float128_to_int64_arm +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_arm +#define float128_to_uint64 float128_to_uint64_arm +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_arm +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_arm +#define float128_to_uint32 float128_to_uint32_arm +#define float128_to_float32 float128_to_float32_arm +#define float128_to_float64 float128_to_float64_arm +#define float128_to_floatx80 float128_to_floatx80_arm +#define float128_round_to_int float128_round_to_int_arm +#define float128_add float128_add_arm +#define float128_sub float128_sub_arm +#define float128_mul float128_mul_arm +#define float128_div float128_div_arm +#define float128_rem float128_rem_arm +#define float128_sqrt float128_sqrt_arm +#define float128_eq float128_eq_arm +#define float128_le float128_le_arm +#define float128_lt float128_lt_arm +#define float128_unordered float128_unordered_arm +#define float128_eq_quiet float128_eq_quiet_arm +#define float128_le_quiet float128_le_quiet_arm +#define float128_lt_quiet float128_lt_quiet_arm +#define float128_unordered_quiet float128_unordered_quiet_arm +#define floatx80_compare floatx80_compare_arm +#define floatx80_compare_quiet floatx80_compare_quiet_arm +#define float128_compare float128_compare_arm +#define float128_compare_quiet float128_compare_quiet_arm +#define floatx80_scalbn floatx80_scalbn_arm +#define float128_scalbn float128_scalbn_arm +#define softfloat_init softfloat_init_arm +#define tcg_optimize tcg_optimize_arm +#define gen_new_label gen_new_label_arm +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_arm +#define tcg_expand_vec_op tcg_expand_vec_op_arm +#define tcg_register_jit tcg_register_jit_arm +#define tcg_tb_insert tcg_tb_insert_arm +#define tcg_tb_remove tcg_tb_remove_arm +#define tcg_tb_lookup tcg_tb_lookup_arm +#define tcg_tb_foreach tcg_tb_foreach_arm +#define tcg_nb_tbs tcg_nb_tbs_arm +#define tcg_region_reset_all tcg_region_reset_all_arm +#define tcg_region_init tcg_region_init_arm +#define tcg_code_size tcg_code_size_arm +#define tcg_code_capacity tcg_code_capacity_arm +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_arm +#define tcg_malloc_internal tcg_malloc_internal_arm +#define tcg_pool_reset tcg_pool_reset_arm +#define tcg_context_init tcg_context_init_arm +#define tcg_tb_alloc tcg_tb_alloc_arm +#define tcg_prologue_init tcg_prologue_init_arm +#define tcg_func_start tcg_func_start_arm +#define tcg_set_frame tcg_set_frame_arm +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_arm +#define tcg_temp_new_internal tcg_temp_new_internal_arm +#define tcg_temp_new_vec tcg_temp_new_vec_arm +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_arm +#define tcg_temp_free_internal tcg_temp_free_internal_arm +#define tcg_const_i32 tcg_const_i32_arm +#define tcg_const_i64 tcg_const_i64_arm +#define tcg_const_local_i32 tcg_const_local_i32_arm +#define tcg_const_local_i64 tcg_const_local_i64_arm +#define tcg_op_supported tcg_op_supported_arm +#define tcg_gen_callN tcg_gen_callN_arm +#define tcg_op_remove tcg_op_remove_arm +#define tcg_emit_op tcg_emit_op_arm +#define tcg_op_insert_before tcg_op_insert_before_arm +#define tcg_op_insert_after tcg_op_insert_after_arm +#define tcg_cpu_exec_time tcg_cpu_exec_time_arm +#define tcg_gen_code tcg_gen_code_arm +#define tcg_gen_op1 tcg_gen_op1_arm +#define tcg_gen_op2 tcg_gen_op2_arm +#define tcg_gen_op3 tcg_gen_op3_arm +#define tcg_gen_op4 tcg_gen_op4_arm +#define tcg_gen_op5 tcg_gen_op5_arm +#define tcg_gen_op6 tcg_gen_op6_arm +#define tcg_gen_mb tcg_gen_mb_arm +#define tcg_gen_addi_i32 tcg_gen_addi_i32_arm +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_arm +#define tcg_gen_subi_i32 tcg_gen_subi_i32_arm +#define tcg_gen_andi_i32 tcg_gen_andi_i32_arm +#define tcg_gen_ori_i32 tcg_gen_ori_i32_arm +#define tcg_gen_xori_i32 tcg_gen_xori_i32_arm +#define tcg_gen_shli_i32 tcg_gen_shli_i32_arm +#define tcg_gen_shri_i32 tcg_gen_shri_i32_arm +#define tcg_gen_sari_i32 tcg_gen_sari_i32_arm +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_arm +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_arm +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_arm +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_arm +#define tcg_gen_muli_i32 tcg_gen_muli_i32_arm +#define tcg_gen_div_i32 tcg_gen_div_i32_arm +#define tcg_gen_rem_i32 tcg_gen_rem_i32_arm +#define tcg_gen_divu_i32 tcg_gen_divu_i32_arm +#define tcg_gen_remu_i32 tcg_gen_remu_i32_arm +#define tcg_gen_andc_i32 tcg_gen_andc_i32_arm +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_arm +#define tcg_gen_nand_i32 tcg_gen_nand_i32_arm +#define tcg_gen_nor_i32 tcg_gen_nor_i32_arm +#define tcg_gen_orc_i32 tcg_gen_orc_i32_arm +#define tcg_gen_clz_i32 tcg_gen_clz_i32_arm +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_arm +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_arm +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_arm +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_arm +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_arm +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_arm +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_arm +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_arm +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_arm +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_arm +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_arm +#define tcg_gen_extract_i32 tcg_gen_extract_i32_arm +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_arm +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_arm +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_arm +#define tcg_gen_add2_i32 tcg_gen_add2_i32_arm +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_arm +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_arm +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_arm +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_arm +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_arm +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_arm +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_arm +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_arm +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_arm +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_arm +#define tcg_gen_smin_i32 tcg_gen_smin_i32_arm +#define tcg_gen_umin_i32 tcg_gen_umin_i32_arm +#define tcg_gen_smax_i32 tcg_gen_smax_i32_arm +#define tcg_gen_umax_i32 tcg_gen_umax_i32_arm +#define tcg_gen_abs_i32 tcg_gen_abs_i32_arm +#define tcg_gen_addi_i64 tcg_gen_addi_i64_arm +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_arm +#define tcg_gen_subi_i64 tcg_gen_subi_i64_arm +#define tcg_gen_andi_i64 tcg_gen_andi_i64_arm +#define tcg_gen_ori_i64 tcg_gen_ori_i64_arm +#define tcg_gen_xori_i64 tcg_gen_xori_i64_arm +#define tcg_gen_shli_i64 tcg_gen_shli_i64_arm +#define tcg_gen_shri_i64 tcg_gen_shri_i64_arm +#define tcg_gen_sari_i64 tcg_gen_sari_i64_arm +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_arm +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_arm +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_arm +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_arm +#define tcg_gen_muli_i64 tcg_gen_muli_i64_arm +#define tcg_gen_div_i64 tcg_gen_div_i64_arm +#define tcg_gen_rem_i64 tcg_gen_rem_i64_arm +#define tcg_gen_divu_i64 tcg_gen_divu_i64_arm +#define tcg_gen_remu_i64 tcg_gen_remu_i64_arm +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_arm +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_arm +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_arm +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_arm +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_arm +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_arm +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_arm +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_arm +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_arm +#define tcg_gen_not_i64 tcg_gen_not_i64_arm +#define tcg_gen_andc_i64 tcg_gen_andc_i64_arm +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_arm +#define tcg_gen_nand_i64 tcg_gen_nand_i64_arm +#define tcg_gen_nor_i64 tcg_gen_nor_i64_arm +#define tcg_gen_orc_i64 tcg_gen_orc_i64_arm +#define tcg_gen_clz_i64 tcg_gen_clz_i64_arm +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_arm +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_arm +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_arm +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_arm +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_arm +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_arm +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_arm +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_arm +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_arm +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_arm +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_arm +#define tcg_gen_extract_i64 tcg_gen_extract_i64_arm +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_arm +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_arm +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_arm +#define tcg_gen_add2_i64 tcg_gen_add2_i64_arm +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_arm +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_arm +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_arm +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_arm +#define tcg_gen_smin_i64 tcg_gen_smin_i64_arm +#define tcg_gen_umin_i64 tcg_gen_umin_i64_arm +#define tcg_gen_smax_i64 tcg_gen_smax_i64_arm +#define tcg_gen_umax_i64 tcg_gen_umax_i64_arm +#define tcg_gen_abs_i64 tcg_gen_abs_i64_arm +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_arm +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_arm +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_arm +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_arm +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_arm +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_arm +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_arm +#define tcg_gen_exit_tb tcg_gen_exit_tb_arm +#define tcg_gen_goto_tb tcg_gen_goto_tb_arm +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_arm +#define check_exit_request check_exit_request_arm +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_arm +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_arm +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_arm +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_arm +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_arm +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_arm +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_arm +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_arm +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_arm +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_arm +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_arm +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_arm +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_arm +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_arm +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_arm +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_arm +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_arm +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_arm +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_arm +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_arm +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_arm +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_arm +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_arm +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_arm +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_arm +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_arm +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_arm +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_arm +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_arm +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_arm +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_arm +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_arm +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_arm +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_arm +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_arm +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_arm +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_arm +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_arm +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_arm +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_arm +#define simd_desc simd_desc_arm +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_arm +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_arm +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_arm +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_arm +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_arm +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_arm +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_arm +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_arm +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_arm +#define tcg_gen_gvec_2 tcg_gen_gvec_2_arm +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_arm +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_arm +#define tcg_gen_gvec_3 tcg_gen_gvec_3_arm +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_arm +#define tcg_gen_gvec_4 tcg_gen_gvec_4_arm +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_arm +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_arm +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_arm +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_arm +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_arm +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_arm +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_arm +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_arm +#define tcg_gen_gvec_not tcg_gen_gvec_not_arm +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_arm +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_arm +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_arm +#define tcg_gen_gvec_add tcg_gen_gvec_add_arm +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_arm +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_arm +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_arm +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_arm +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_arm +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_arm +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_arm +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_arm +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_arm +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_arm +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_arm +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_arm +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_arm +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_arm +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_arm +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_arm +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_arm +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_arm +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_arm +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_arm +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_arm +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_arm +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_arm +#define tcg_gen_gvec_and tcg_gen_gvec_and_arm +#define tcg_gen_gvec_or tcg_gen_gvec_or_arm +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_arm +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_arm +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_arm +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_arm +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_arm +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_arm +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_arm +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_arm +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_arm +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_arm +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_arm +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_arm +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_arm +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_arm +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_arm +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_arm +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_arm +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_arm +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_arm +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_arm +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_arm +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_arm +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_arm +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_arm +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_arm +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_arm +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_arm +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_arm +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_arm +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_arm +#define vec_gen_2 vec_gen_2_arm +#define vec_gen_3 vec_gen_3_arm +#define vec_gen_4 vec_gen_4_arm +#define tcg_gen_mov_vec tcg_gen_mov_vec_arm +#define tcg_const_zeros_vec tcg_const_zeros_vec_arm +#define tcg_const_ones_vec tcg_const_ones_vec_arm +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_arm +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_arm +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_arm +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_arm +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_arm +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_arm +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_arm +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_arm +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_arm +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_arm +#define tcg_gen_ld_vec tcg_gen_ld_vec_arm +#define tcg_gen_st_vec tcg_gen_st_vec_arm +#define tcg_gen_stl_vec tcg_gen_stl_vec_arm +#define tcg_gen_and_vec tcg_gen_and_vec_arm +#define tcg_gen_or_vec tcg_gen_or_vec_arm +#define tcg_gen_xor_vec tcg_gen_xor_vec_arm +#define tcg_gen_andc_vec tcg_gen_andc_vec_arm +#define tcg_gen_orc_vec tcg_gen_orc_vec_arm +#define tcg_gen_nand_vec tcg_gen_nand_vec_arm +#define tcg_gen_nor_vec tcg_gen_nor_vec_arm +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_arm +#define tcg_gen_not_vec tcg_gen_not_vec_arm +#define tcg_gen_neg_vec tcg_gen_neg_vec_arm +#define tcg_gen_abs_vec tcg_gen_abs_vec_arm +#define tcg_gen_shli_vec tcg_gen_shli_vec_arm +#define tcg_gen_shri_vec tcg_gen_shri_vec_arm +#define tcg_gen_sari_vec tcg_gen_sari_vec_arm +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_arm +#define tcg_gen_add_vec tcg_gen_add_vec_arm +#define tcg_gen_sub_vec tcg_gen_sub_vec_arm +#define tcg_gen_mul_vec tcg_gen_mul_vec_arm +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_arm +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_arm +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_arm +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_arm +#define tcg_gen_smin_vec tcg_gen_smin_vec_arm +#define tcg_gen_umin_vec tcg_gen_umin_vec_arm +#define tcg_gen_smax_vec tcg_gen_smax_vec_arm +#define tcg_gen_umax_vec tcg_gen_umax_vec_arm +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_arm +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_arm +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_arm +#define tcg_gen_shls_vec tcg_gen_shls_vec_arm +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_arm +#define tcg_gen_sars_vec tcg_gen_sars_vec_arm +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_arm +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_arm +#define tb_htable_lookup tb_htable_lookup_arm +#define tb_set_jmp_target tb_set_jmp_target_arm +#define cpu_exec cpu_exec_arm +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_arm +#define cpu_reloading_memory_map cpu_reloading_memory_map_arm +#define cpu_loop_exit cpu_loop_exit_arm +#define cpu_loop_exit_restore cpu_loop_exit_restore_arm +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_arm +#define tlb_init tlb_init_arm +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_arm +#define tlb_flush tlb_flush_arm +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_arm +#define tlb_flush_all_cpus tlb_flush_all_cpus_arm +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_arm +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_arm +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_arm +#define tlb_flush_page tlb_flush_page_arm +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_arm +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_arm +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_arm +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_arm +#define tlb_protect_code tlb_protect_code_arm +#define tlb_unprotect_code tlb_unprotect_code_arm +#define tlb_reset_dirty tlb_reset_dirty_arm +#define tlb_set_dirty tlb_set_dirty_arm +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_arm +#define tlb_set_page tlb_set_page_arm +#define get_page_addr_code_hostp get_page_addr_code_hostp_arm +#define get_page_addr_code get_page_addr_code_arm +#define probe_access probe_access_arm +#define tlb_vaddr_to_host tlb_vaddr_to_host_arm +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_arm +#define helper_le_lduw_mmu helper_le_lduw_mmu_arm +#define helper_be_lduw_mmu helper_be_lduw_mmu_arm +#define helper_le_ldul_mmu helper_le_ldul_mmu_arm +#define helper_be_ldul_mmu helper_be_ldul_mmu_arm +#define helper_le_ldq_mmu helper_le_ldq_mmu_arm +#define helper_be_ldq_mmu helper_be_ldq_mmu_arm +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_arm +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_arm +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_arm +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_arm +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_arm +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_arm +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_arm +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_arm +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_arm +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_arm +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_arm +#define cpu_ldub_data_ra cpu_ldub_data_ra_arm +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_arm +#define cpu_lduw_data_ra cpu_lduw_data_ra_arm +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_arm +#define cpu_ldl_data_ra cpu_ldl_data_ra_arm +#define cpu_ldq_data_ra cpu_ldq_data_ra_arm +#define cpu_ldub_data cpu_ldub_data_arm +#define cpu_ldsb_data cpu_ldsb_data_arm +#define cpu_lduw_data cpu_lduw_data_arm +#define cpu_ldsw_data cpu_ldsw_data_arm +#define cpu_ldl_data cpu_ldl_data_arm +#define cpu_ldq_data cpu_ldq_data_arm +#define helper_ret_stb_mmu helper_ret_stb_mmu_arm +#define helper_le_stw_mmu helper_le_stw_mmu_arm +#define helper_be_stw_mmu helper_be_stw_mmu_arm +#define helper_le_stl_mmu helper_le_stl_mmu_arm +#define helper_be_stl_mmu helper_be_stl_mmu_arm +#define helper_le_stq_mmu helper_le_stq_mmu_arm +#define helper_be_stq_mmu helper_be_stq_mmu_arm +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_arm +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_arm +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_arm +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_arm +#define cpu_stb_data_ra cpu_stb_data_ra_arm +#define cpu_stw_data_ra cpu_stw_data_ra_arm +#define cpu_stl_data_ra cpu_stl_data_ra_arm +#define cpu_stq_data_ra cpu_stq_data_ra_arm +#define cpu_stb_data cpu_stb_data_arm +#define cpu_stw_data cpu_stw_data_arm +#define cpu_stl_data cpu_stl_data_arm +#define cpu_stq_data cpu_stq_data_arm +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_arm +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_arm +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_arm +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_arm +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_arm +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_arm +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_arm +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_arm +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_arm +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_arm +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_arm +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_arm +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_arm +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_arm +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_arm +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_arm +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_arm +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_arm +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_arm +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_arm +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_arm +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_arm +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_arm +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_arm +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_arm +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_arm +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_arm +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_arm +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_arm +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_arm +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_arm +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_arm +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_arm +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_arm +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_arm +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_arm +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_arm +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_arm +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_arm +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_arm +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_arm +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_arm +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_arm +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_arm +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_arm +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_arm +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_arm +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_arm +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_arm +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_arm +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_arm +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_arm +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_arm +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_arm +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_arm +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_arm +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_arm +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_arm +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_arm +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_arm +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_arm +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_arm +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_arm +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_arm +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_arm +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_arm +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_arm +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_arm +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_arm +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_arm +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_arm +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_arm +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_arm +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_arm +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_arm +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_arm +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_arm +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_arm +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_arm +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_arm +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_arm +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_arm +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_arm +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_arm +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_arm +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_arm +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_arm +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_arm +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_arm +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_arm +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_arm +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_arm +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_arm +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_arm +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_arm +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_arm +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_arm +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_arm +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_arm +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_arm +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_arm +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_arm +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_arm +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_arm +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_arm +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_arm +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_arm +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_arm +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_arm +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_arm +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_arm +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_arm +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_arm +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_arm +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_arm +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_arm +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_arm +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_arm +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_arm +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_arm +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_arm +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_arm +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_arm +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_arm +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_arm +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_arm +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_arm +#define helper_atomic_xchgb helper_atomic_xchgb_arm +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_arm +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_arm +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_arm +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_arm +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_arm +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_arm +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_arm +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_arm +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_arm +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_arm +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_arm +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_arm +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_arm +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_arm +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_arm +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_arm +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_arm +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_arm +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_arm +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_arm +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_arm +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_arm +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_arm +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_arm +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_arm +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_arm +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_arm +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_arm +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_arm +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_arm +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_arm +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_arm +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_arm +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_arm +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_arm +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_arm +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_arm +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_arm +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_arm +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_arm +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_arm +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_arm +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_arm +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_arm +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_arm +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_arm +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_arm +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_arm +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_arm +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_arm +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_arm +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_arm +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_arm +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_arm +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_arm +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_arm +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_arm +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_arm +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_arm +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_arm +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_arm +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_arm +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_arm +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_arm +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_arm +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_arm +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_arm +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_arm +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_arm +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_arm +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_arm +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_arm +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_arm +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_arm +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_arm +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_arm +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_arm +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_arm +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_arm +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_arm +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_arm +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_arm +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_arm +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_arm +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_arm +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_arm +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_arm +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_arm +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_arm +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_arm +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_arm +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_arm +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_arm +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_arm +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_arm +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_arm +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_arm +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_arm +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_arm +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_arm +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_arm +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_arm +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_arm +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_arm +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_arm +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_arm +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_arm +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_arm +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_arm +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_arm +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_arm +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_arm +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_arm +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_arm +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_arm +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_arm +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_arm +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_arm +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_arm +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_arm +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_arm +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_arm +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_arm +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_arm +#define cpu_ldub_code cpu_ldub_code_arm +#define cpu_lduw_code cpu_lduw_code_arm +#define cpu_ldl_code cpu_ldl_code_arm +#define cpu_ldq_code cpu_ldq_code_arm +#define helper_div_i32 helper_div_i32_arm +#define helper_rem_i32 helper_rem_i32_arm +#define helper_divu_i32 helper_divu_i32_arm +#define helper_remu_i32 helper_remu_i32_arm +#define helper_shl_i64 helper_shl_i64_arm +#define helper_shr_i64 helper_shr_i64_arm +#define helper_sar_i64 helper_sar_i64_arm +#define helper_div_i64 helper_div_i64_arm +#define helper_rem_i64 helper_rem_i64_arm +#define helper_divu_i64 helper_divu_i64_arm +#define helper_remu_i64 helper_remu_i64_arm +#define helper_muluh_i64 helper_muluh_i64_arm +#define helper_mulsh_i64 helper_mulsh_i64_arm +#define helper_clz_i32 helper_clz_i32_arm +#define helper_ctz_i32 helper_ctz_i32_arm +#define helper_clz_i64 helper_clz_i64_arm +#define helper_ctz_i64 helper_ctz_i64_arm +#define helper_clrsb_i32 helper_clrsb_i32_arm +#define helper_clrsb_i64 helper_clrsb_i64_arm +#define helper_ctpop_i32 helper_ctpop_i32_arm +#define helper_ctpop_i64 helper_ctpop_i64_arm +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_arm +#define helper_exit_atomic helper_exit_atomic_arm +#define helper_gvec_add8 helper_gvec_add8_arm +#define helper_gvec_add16 helper_gvec_add16_arm +#define helper_gvec_add32 helper_gvec_add32_arm +#define helper_gvec_add64 helper_gvec_add64_arm +#define helper_gvec_adds8 helper_gvec_adds8_arm +#define helper_gvec_adds16 helper_gvec_adds16_arm +#define helper_gvec_adds32 helper_gvec_adds32_arm +#define helper_gvec_adds64 helper_gvec_adds64_arm +#define helper_gvec_sub8 helper_gvec_sub8_arm +#define helper_gvec_sub16 helper_gvec_sub16_arm +#define helper_gvec_sub32 helper_gvec_sub32_arm +#define helper_gvec_sub64 helper_gvec_sub64_arm +#define helper_gvec_subs8 helper_gvec_subs8_arm +#define helper_gvec_subs16 helper_gvec_subs16_arm +#define helper_gvec_subs32 helper_gvec_subs32_arm +#define helper_gvec_subs64 helper_gvec_subs64_arm +#define helper_gvec_mul8 helper_gvec_mul8_arm +#define helper_gvec_mul16 helper_gvec_mul16_arm +#define helper_gvec_mul32 helper_gvec_mul32_arm +#define helper_gvec_mul64 helper_gvec_mul64_arm +#define helper_gvec_muls8 helper_gvec_muls8_arm +#define helper_gvec_muls16 helper_gvec_muls16_arm +#define helper_gvec_muls32 helper_gvec_muls32_arm +#define helper_gvec_muls64 helper_gvec_muls64_arm +#define helper_gvec_neg8 helper_gvec_neg8_arm +#define helper_gvec_neg16 helper_gvec_neg16_arm +#define helper_gvec_neg32 helper_gvec_neg32_arm +#define helper_gvec_neg64 helper_gvec_neg64_arm +#define helper_gvec_abs8 helper_gvec_abs8_arm +#define helper_gvec_abs16 helper_gvec_abs16_arm +#define helper_gvec_abs32 helper_gvec_abs32_arm +#define helper_gvec_abs64 helper_gvec_abs64_arm +#define helper_gvec_mov helper_gvec_mov_arm +#define helper_gvec_dup64 helper_gvec_dup64_arm +#define helper_gvec_dup32 helper_gvec_dup32_arm +#define helper_gvec_dup16 helper_gvec_dup16_arm +#define helper_gvec_dup8 helper_gvec_dup8_arm +#define helper_gvec_not helper_gvec_not_arm +#define helper_gvec_and helper_gvec_and_arm +#define helper_gvec_or helper_gvec_or_arm +#define helper_gvec_xor helper_gvec_xor_arm +#define helper_gvec_andc helper_gvec_andc_arm +#define helper_gvec_orc helper_gvec_orc_arm +#define helper_gvec_nand helper_gvec_nand_arm +#define helper_gvec_nor helper_gvec_nor_arm +#define helper_gvec_eqv helper_gvec_eqv_arm +#define helper_gvec_ands helper_gvec_ands_arm +#define helper_gvec_xors helper_gvec_xors_arm +#define helper_gvec_ors helper_gvec_ors_arm +#define helper_gvec_shl8i helper_gvec_shl8i_arm +#define helper_gvec_shl16i helper_gvec_shl16i_arm +#define helper_gvec_shl32i helper_gvec_shl32i_arm +#define helper_gvec_shl64i helper_gvec_shl64i_arm +#define helper_gvec_shr8i helper_gvec_shr8i_arm +#define helper_gvec_shr16i helper_gvec_shr16i_arm +#define helper_gvec_shr32i helper_gvec_shr32i_arm +#define helper_gvec_shr64i helper_gvec_shr64i_arm +#define helper_gvec_sar8i helper_gvec_sar8i_arm +#define helper_gvec_sar16i helper_gvec_sar16i_arm +#define helper_gvec_sar32i helper_gvec_sar32i_arm +#define helper_gvec_sar64i helper_gvec_sar64i_arm +#define helper_gvec_shl8v helper_gvec_shl8v_arm +#define helper_gvec_shl16v helper_gvec_shl16v_arm +#define helper_gvec_shl32v helper_gvec_shl32v_arm +#define helper_gvec_shl64v helper_gvec_shl64v_arm +#define helper_gvec_shr8v helper_gvec_shr8v_arm +#define helper_gvec_shr16v helper_gvec_shr16v_arm +#define helper_gvec_shr32v helper_gvec_shr32v_arm +#define helper_gvec_shr64v helper_gvec_shr64v_arm +#define helper_gvec_sar8v helper_gvec_sar8v_arm +#define helper_gvec_sar16v helper_gvec_sar16v_arm +#define helper_gvec_sar32v helper_gvec_sar32v_arm +#define helper_gvec_sar64v helper_gvec_sar64v_arm +#define helper_gvec_eq8 helper_gvec_eq8_arm +#define helper_gvec_ne8 helper_gvec_ne8_arm +#define helper_gvec_lt8 helper_gvec_lt8_arm +#define helper_gvec_le8 helper_gvec_le8_arm +#define helper_gvec_ltu8 helper_gvec_ltu8_arm +#define helper_gvec_leu8 helper_gvec_leu8_arm +#define helper_gvec_eq16 helper_gvec_eq16_arm +#define helper_gvec_ne16 helper_gvec_ne16_arm +#define helper_gvec_lt16 helper_gvec_lt16_arm +#define helper_gvec_le16 helper_gvec_le16_arm +#define helper_gvec_ltu16 helper_gvec_ltu16_arm +#define helper_gvec_leu16 helper_gvec_leu16_arm +#define helper_gvec_eq32 helper_gvec_eq32_arm +#define helper_gvec_ne32 helper_gvec_ne32_arm +#define helper_gvec_lt32 helper_gvec_lt32_arm +#define helper_gvec_le32 helper_gvec_le32_arm +#define helper_gvec_ltu32 helper_gvec_ltu32_arm +#define helper_gvec_leu32 helper_gvec_leu32_arm +#define helper_gvec_eq64 helper_gvec_eq64_arm +#define helper_gvec_ne64 helper_gvec_ne64_arm +#define helper_gvec_lt64 helper_gvec_lt64_arm +#define helper_gvec_le64 helper_gvec_le64_arm +#define helper_gvec_ltu64 helper_gvec_ltu64_arm +#define helper_gvec_leu64 helper_gvec_leu64_arm +#define helper_gvec_ssadd8 helper_gvec_ssadd8_arm +#define helper_gvec_ssadd16 helper_gvec_ssadd16_arm +#define helper_gvec_ssadd32 helper_gvec_ssadd32_arm +#define helper_gvec_ssadd64 helper_gvec_ssadd64_arm +#define helper_gvec_sssub8 helper_gvec_sssub8_arm +#define helper_gvec_sssub16 helper_gvec_sssub16_arm +#define helper_gvec_sssub32 helper_gvec_sssub32_arm +#define helper_gvec_sssub64 helper_gvec_sssub64_arm +#define helper_gvec_usadd8 helper_gvec_usadd8_arm +#define helper_gvec_usadd16 helper_gvec_usadd16_arm +#define helper_gvec_usadd32 helper_gvec_usadd32_arm +#define helper_gvec_usadd64 helper_gvec_usadd64_arm +#define helper_gvec_ussub8 helper_gvec_ussub8_arm +#define helper_gvec_ussub16 helper_gvec_ussub16_arm +#define helper_gvec_ussub32 helper_gvec_ussub32_arm +#define helper_gvec_ussub64 helper_gvec_ussub64_arm +#define helper_gvec_smin8 helper_gvec_smin8_arm +#define helper_gvec_smin16 helper_gvec_smin16_arm +#define helper_gvec_smin32 helper_gvec_smin32_arm +#define helper_gvec_smin64 helper_gvec_smin64_arm +#define helper_gvec_smax8 helper_gvec_smax8_arm +#define helper_gvec_smax16 helper_gvec_smax16_arm +#define helper_gvec_smax32 helper_gvec_smax32_arm +#define helper_gvec_smax64 helper_gvec_smax64_arm +#define helper_gvec_umin8 helper_gvec_umin8_arm +#define helper_gvec_umin16 helper_gvec_umin16_arm +#define helper_gvec_umin32 helper_gvec_umin32_arm +#define helper_gvec_umin64 helper_gvec_umin64_arm +#define helper_gvec_umax8 helper_gvec_umax8_arm +#define helper_gvec_umax16 helper_gvec_umax16_arm +#define helper_gvec_umax32 helper_gvec_umax32_arm +#define helper_gvec_umax64 helper_gvec_umax64_arm +#define helper_gvec_bitsel helper_gvec_bitsel_arm +#define cpu_restore_state cpu_restore_state_arm +#define page_collection_lock page_collection_lock_arm +#define page_collection_unlock page_collection_unlock_arm +#define free_code_gen_buffer free_code_gen_buffer_arm +#define tcg_exec_init tcg_exec_init_arm +#define tb_cleanup tb_cleanup_arm +#define tb_flush tb_flush_arm +#define tb_phys_invalidate tb_phys_invalidate_arm +#define tb_gen_code tb_gen_code_arm +#define tb_exec_lock tb_exec_lock_arm +#define tb_exec_unlock tb_exec_unlock_arm +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_arm +#define tb_invalidate_phys_range tb_invalidate_phys_range_arm +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_arm +#define tb_check_watchpoint tb_check_watchpoint_arm +#define cpu_io_recompile cpu_io_recompile_arm +#define tb_flush_jmp_cache tb_flush_jmp_cache_arm +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_arm +#define translator_loop_temp_check translator_loop_temp_check_arm +#define translator_loop translator_loop_arm +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_arm +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_arm +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_arm +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_arm +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_arm +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_arm +#define unassigned_mem_ops unassigned_mem_ops_arm +#define floatx80_infinity floatx80_infinity_arm +#define dup_const_func dup_const_func_arm +#define gen_helper_raise_exception gen_helper_raise_exception_arm +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_arm +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_arm +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_arm #define gen_helper_cpsr_read gen_helper_cpsr_read_arm #define gen_helper_cpsr_write gen_helper_cpsr_write_arm -#define gen_helper_crc32_arm gen_helper_crc32_arm_arm -#define gen_helper_crc32c gen_helper_crc32c_arm -#define gen_helper_crypto_aese gen_helper_crypto_aese_arm -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_arm -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_arm -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_arm -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_arm -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_arm -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_arm -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_arm -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_arm -#define gen_helper_double_saturate gen_helper_double_saturate_arm -#define gen_helper_exception_internal gen_helper_exception_internal_arm -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_arm -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_arm -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_arm -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_arm -#define gen_helper_get_user_reg gen_helper_get_user_reg_arm -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_arm -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_arm -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_arm -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_arm -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_arm -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_arm -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_arm -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_arm -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_arm -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_arm -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_arm -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_arm -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_arm -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_arm -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_arm -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_arm -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_arm -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_arm -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_arm -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_arm -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_arm -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_arm -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_arm -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_arm -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_arm -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_arm -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_arm -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_arm -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_arm -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_arm -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_arm -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_arm -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_arm -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_arm -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_arm -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_arm -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_arm -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_arm -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_arm -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_arm -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_arm -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_arm -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_arm -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_arm -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_arm -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_arm -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_arm -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_arm -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_arm -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_arm -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_arm -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_arm -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_arm -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_arm -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_arm -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_arm -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_arm -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_arm -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_arm -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_arm -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_arm -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_arm -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_arm -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_arm -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_arm -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_arm -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_arm -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_arm -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_arm -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_arm -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_arm -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_arm -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_arm -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_arm -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_arm -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_arm -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_arm -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_arm -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_arm -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_arm -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_arm -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_arm -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_arm -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_arm -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_arm -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_arm -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_arm -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_arm -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_arm -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_arm -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_arm -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_arm -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_arm -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_arm -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_arm -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_arm -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_arm -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_arm -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_arm -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_arm -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_arm -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_arm -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_arm -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_arm -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_arm -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_arm -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_arm -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_arm -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_arm -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_arm -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_arm -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_arm -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_arm -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_arm -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_arm -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_arm -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_arm -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_arm -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_arm -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_arm -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_arm -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_arm -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_arm -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_arm -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_arm -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_arm -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_arm -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_arm -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_arm -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_arm -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_arm -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_arm -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_arm -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_arm -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_arm -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_arm -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_arm -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_arm -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_arm -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_arm -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_arm -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_arm -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_arm -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_arm -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_arm -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_arm -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_arm -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_arm -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_arm -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_arm -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_arm -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_arm -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_arm -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_arm -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_arm -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_arm -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_arm -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_arm -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_arm -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_arm -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_arm -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_arm -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_arm -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_arm -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_arm -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_arm -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_arm -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_arm -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_arm -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_arm -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_arm -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_arm -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_arm -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_arm -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_arm -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_arm -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_arm -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_arm -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_arm -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_arm -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_arm -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_arm -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_arm -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_arm -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_arm -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_arm -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_arm -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_arm -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_arm -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_arm -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_arm -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_arm -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_arm -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_arm -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_arm -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_arm -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_arm -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_arm -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_arm -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_arm -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_arm -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_arm -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_arm -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_arm -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_arm -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_arm -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_arm -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_arm -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_arm -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_arm -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_arm -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_arm -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_arm -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_arm -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_arm -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_arm -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_arm -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_arm -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_arm -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_arm -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_arm -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_arm -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_arm -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_arm -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_arm -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_arm -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_arm -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_arm -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_arm -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_arm -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_arm -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_arm -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_arm -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_arm -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_arm -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_arm -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_arm -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_arm -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_arm -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_arm -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_arm -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_arm -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_arm -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_arm -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_arm -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_arm -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_arm -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_arm -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_arm -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_arm -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_arm -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_arm -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_arm -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_arm -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_arm -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_arm -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_arm -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_arm -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_arm -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_arm -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_arm -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_arm -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_arm -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_arm -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_arm -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_arm -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_arm -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_arm -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_arm -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_arm -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_arm -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_arm -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_arm -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_arm -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_arm -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_arm -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_arm -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_arm -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_arm -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_arm -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_arm -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_arm -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_arm -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_arm -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_arm -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_arm -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_arm -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_arm -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_arm -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_arm -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_arm -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_arm -#define gen_helper_neon_tbl gen_helper_neon_tbl_arm -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_arm -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_arm -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_arm -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_arm -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_arm -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_arm -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_arm -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_arm -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_arm -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_arm -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_arm -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_arm -#define gen_helper_neon_zip16 gen_helper_neon_zip16_arm -#define gen_helper_neon_zip8 gen_helper_neon_zip8_arm -#define gen_helper_pre_hvc gen_helper_pre_hvc_arm -#define gen_helper_pre_smc gen_helper_pre_smc_arm -#define gen_helper_qadd16 gen_helper_qadd16_arm -#define gen_helper_qadd8 gen_helper_qadd8_arm -#define gen_helper_qaddsubx gen_helper_qaddsubx_arm -#define gen_helper_qsub16 gen_helper_qsub16_arm -#define gen_helper_qsub8 gen_helper_qsub8_arm -#define gen_helper_qsubaddx gen_helper_qsubaddx_arm -#define gen_helper_rbit gen_helper_rbit_arm -#define gen_helper_recpe_f32 gen_helper_recpe_f32_arm -#define gen_helper_recpe_u32 gen_helper_recpe_u32_arm -#define gen_helper_recps_f32 gen_helper_recps_f32_arm -#define gen_helper_rintd gen_helper_rintd_arm -#define gen_helper_rintd_exact gen_helper_rintd_exact_arm -#define gen_helper_rints gen_helper_rints_arm -#define gen_helper_rints_exact gen_helper_rints_exact_arm -#define gen_helper_ror_cc gen_helper_ror_cc_arm -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_arm -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_arm -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_arm -#define gen_helper_sadd16 gen_helper_sadd16_arm -#define gen_helper_sadd8 gen_helper_sadd8_arm -#define gen_helper_saddsubx gen_helper_saddsubx_arm -#define gen_helper_sar_cc gen_helper_sar_cc_arm -#define gen_helper_sdiv gen_helper_sdiv_arm -#define gen_helper_sel_flags gen_helper_sel_flags_arm -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_arm -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_arm -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_arm -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_arm -#define gen_helper_set_rmode gen_helper_set_rmode_arm -#define gen_helper_set_user_reg gen_helper_set_user_reg_arm -#define gen_helper_shadd16 gen_helper_shadd16_arm -#define gen_helper_shadd8 gen_helper_shadd8_arm -#define gen_helper_shaddsubx gen_helper_shaddsubx_arm -#define gen_helper_shl_cc gen_helper_shl_cc_arm -#define gen_helper_shr_cc gen_helper_shr_cc_arm -#define gen_helper_shsub16 gen_helper_shsub16_arm -#define gen_helper_shsub8 gen_helper_shsub8_arm -#define gen_helper_shsubaddx gen_helper_shsubaddx_arm -#define gen_helper_ssat gen_helper_ssat_arm -#define gen_helper_ssat16 gen_helper_ssat16_arm -#define gen_helper_ssub16 gen_helper_ssub16_arm -#define gen_helper_ssub8 gen_helper_ssub8_arm -#define gen_helper_ssubaddx gen_helper_ssubaddx_arm -#define gen_helper_sub_saturate gen_helper_sub_saturate_arm -#define gen_helper_sxtb16 gen_helper_sxtb16_arm -#define gen_helper_uadd16 gen_helper_uadd16_arm -#define gen_helper_uadd8 gen_helper_uadd8_arm -#define gen_helper_uaddsubx gen_helper_uaddsubx_arm -#define gen_helper_udiv gen_helper_udiv_arm -#define gen_helper_uhadd16 gen_helper_uhadd16_arm -#define gen_helper_uhadd8 gen_helper_uhadd8_arm -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_arm -#define gen_helper_uhsub16 gen_helper_uhsub16_arm -#define gen_helper_uhsub8 gen_helper_uhsub8_arm -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_arm -#define gen_helper_uqadd16 gen_helper_uqadd16_arm -#define gen_helper_uqadd8 gen_helper_uqadd8_arm -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_arm -#define gen_helper_uqsub16 gen_helper_uqsub16_arm -#define gen_helper_uqsub8 gen_helper_uqsub8_arm -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_arm -#define gen_helper_usad8 gen_helper_usad8_arm -#define gen_helper_usat gen_helper_usat_arm -#define gen_helper_usat16 gen_helper_usat16_arm -#define gen_helper_usub16 gen_helper_usub16_arm -#define gen_helper_usub8 gen_helper_usub8_arm -#define gen_helper_usubaddx gen_helper_usubaddx_arm -#define gen_helper_uxtb16 gen_helper_uxtb16_arm -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_arm -#define gen_helper_v7m_msr gen_helper_v7m_msr_arm -#define gen_helper_vfp_absd gen_helper_vfp_absd_arm -#define gen_helper_vfp_abss gen_helper_vfp_abss_arm -#define gen_helper_vfp_addd gen_helper_vfp_addd_arm -#define gen_helper_vfp_adds gen_helper_vfp_adds_arm -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_arm -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_arm -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_arm -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_arm -#define gen_helper_vfp_divd gen_helper_vfp_divd_arm -#define gen_helper_vfp_divs gen_helper_vfp_divs_arm -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_arm -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_arm -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_arm -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_arm -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_arm -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_arm -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_arm -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_arm -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_arm -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_arm -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_arm -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_arm -#define gen_helper_vfp_mins gen_helper_vfp_mins_arm -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_arm -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_arm -#define gen_helper_vfp_muld gen_helper_vfp_muld_arm -#define gen_helper_vfp_muls gen_helper_vfp_muls_arm -#define gen_helper_vfp_negd gen_helper_vfp_negd_arm -#define gen_helper_vfp_negs gen_helper_vfp_negs_arm -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_arm -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_arm -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_arm -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_arm -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_arm -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_arm -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_arm -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_arm -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_arm -#define gen_helper_vfp_subd gen_helper_vfp_subd_arm -#define gen_helper_vfp_subs gen_helper_vfp_subs_arm -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_arm -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_arm -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_arm -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_arm -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_arm -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_arm -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_arm -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_arm -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_arm -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_arm -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_arm -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_arm -#define gen_helper_vfp_touid gen_helper_vfp_touid_arm -#define gen_helper_vfp_touis gen_helper_vfp_touis_arm -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_arm -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_arm -#define gen_helper_vfp_tould gen_helper_vfp_tould_arm -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_arm -#define gen_helper_vfp_touls gen_helper_vfp_touls_arm -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_arm -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_arm -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_arm -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_arm -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_arm -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_arm -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_arm -#define gen_helper_wfe gen_helper_wfe_arm -#define gen_helper_wfi gen_helper_wfi_arm -#define gen_hvc gen_hvc_arm -#define gen_intermediate_code_internal gen_intermediate_code_internal_arm -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_arm -#define gen_iwmmxt_address gen_iwmmxt_address_arm -#define gen_iwmmxt_shift gen_iwmmxt_shift_arm -#define gen_jmp gen_jmp_arm -#define gen_load_and_replicate gen_load_and_replicate_arm -#define gen_load_exclusive gen_load_exclusive_arm -#define gen_logic_CC gen_logic_CC_arm -#define gen_logicq_cc gen_logicq_cc_arm -#define gen_lookup_tb gen_lookup_tb_arm -#define gen_mov_F0_vreg gen_mov_F0_vreg_arm -#define gen_mov_F1_vreg gen_mov_F1_vreg_arm -#define gen_mov_vreg_F0 gen_mov_vreg_F0_arm -#define gen_muls_i64_i32 gen_muls_i64_i32_arm -#define gen_mulu_i64_i32 gen_mulu_i64_i32_arm -#define gen_mulxy gen_mulxy_arm -#define gen_neon_add gen_neon_add_arm -#define gen_neon_addl gen_neon_addl_arm -#define gen_neon_addl_saturate gen_neon_addl_saturate_arm -#define gen_neon_bsl gen_neon_bsl_arm -#define gen_neon_dup_high16 gen_neon_dup_high16_arm -#define gen_neon_dup_low16 gen_neon_dup_low16_arm -#define gen_neon_dup_u8 gen_neon_dup_u8_arm -#define gen_neon_mull gen_neon_mull_arm -#define gen_neon_narrow gen_neon_narrow_arm -#define gen_neon_narrow_op gen_neon_narrow_op_arm -#define gen_neon_narrow_sats gen_neon_narrow_sats_arm -#define gen_neon_narrow_satu gen_neon_narrow_satu_arm -#define gen_neon_negl gen_neon_negl_arm -#define gen_neon_rsb gen_neon_rsb_arm -#define gen_neon_shift_narrow gen_neon_shift_narrow_arm -#define gen_neon_subl gen_neon_subl_arm -#define gen_neon_trn_u16 gen_neon_trn_u16_arm -#define gen_neon_trn_u8 gen_neon_trn_u8_arm -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_arm -#define gen_neon_unzip gen_neon_unzip_arm -#define gen_neon_widen gen_neon_widen_arm -#define gen_neon_zip gen_neon_zip_arm -#define gen_new_label gen_new_label_arm -#define gen_nop_hint gen_nop_hint_arm -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_arm -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_arm -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_arm -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_arm -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_arm -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_arm -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_arm -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_arm -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_arm -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_arm -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_arm -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_arm -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_arm -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_arm -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_arm -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_arm -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_arm -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_arm -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_arm -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_arm -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_arm -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_arm -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_arm -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_arm -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_arm -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_arm -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_arm -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_arm -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_arm -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_arm -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_arm -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_arm -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_arm -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_arm -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_arm -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_arm -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_arm -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_arm -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_arm -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_arm -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_arm -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_arm -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_arm -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_arm -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_arm -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_arm -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_arm -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_arm -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_arm -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_arm -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_arm -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_arm -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_arm -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_arm -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_arm -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_arm -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_arm -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_arm -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_arm -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_arm -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_arm -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_arm -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_arm -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_arm -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_arm -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_arm -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_arm -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_arm -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_arm -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_arm -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_arm -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_arm -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_arm -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_arm -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_arm -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_arm -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_arm -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_arm -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_arm -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_arm -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_arm -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_arm -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_arm -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_arm -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_arm -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_arm -#define gen_rev16 gen_rev16_arm -#define gen_revsh gen_revsh_arm -#define gen_rfe gen_rfe_arm -#define gen_sar gen_sar_arm -#define gen_sbc_CC gen_sbc_CC_arm -#define gen_sbfx gen_sbfx_arm -#define gen_set_CF_bit31 gen_set_CF_bit31_arm -#define gen_set_condexec gen_set_condexec_arm -#define gen_set_cpsr gen_set_cpsr_arm -#define gen_set_label gen_set_label_arm -#define gen_set_pc_im gen_set_pc_im_arm -#define gen_set_psr gen_set_psr_arm -#define gen_set_psr_im gen_set_psr_im_arm -#define gen_shl gen_shl_arm -#define gen_shr gen_shr_arm -#define gen_smc gen_smc_arm -#define gen_smul_dual gen_smul_dual_arm -#define gen_srs gen_srs_arm -#define gen_ss_advance gen_ss_advance_arm -#define gen_step_complete_exception gen_step_complete_exception_arm -#define gen_store_exclusive gen_store_exclusive_arm -#define gen_storeq_reg gen_storeq_reg_arm -#define gen_sub_carry gen_sub_carry_arm -#define gen_sub_CC gen_sub_CC_arm -#define gen_subq_msw gen_subq_msw_arm -#define gen_swap_half gen_swap_half_arm -#define gen_thumb2_data_op gen_thumb2_data_op_arm -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_arm -#define gen_ubfx gen_ubfx_arm -#define gen_vfp_abs gen_vfp_abs_arm -#define gen_vfp_add gen_vfp_add_arm -#define gen_vfp_cmp gen_vfp_cmp_arm -#define gen_vfp_cmpe gen_vfp_cmpe_arm -#define gen_vfp_div gen_vfp_div_arm -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_arm -#define gen_vfp_F1_mul gen_vfp_F1_mul_arm -#define gen_vfp_F1_neg gen_vfp_F1_neg_arm -#define gen_vfp_ld gen_vfp_ld_arm -#define gen_vfp_mrs gen_vfp_mrs_arm -#define gen_vfp_msr gen_vfp_msr_arm -#define gen_vfp_mul gen_vfp_mul_arm -#define gen_vfp_neg gen_vfp_neg_arm -#define gen_vfp_shto gen_vfp_shto_arm -#define gen_vfp_sito gen_vfp_sito_arm -#define gen_vfp_slto gen_vfp_slto_arm -#define gen_vfp_sqrt gen_vfp_sqrt_arm -#define gen_vfp_st gen_vfp_st_arm -#define gen_vfp_sub gen_vfp_sub_arm -#define gen_vfp_tosh gen_vfp_tosh_arm -#define gen_vfp_tosi gen_vfp_tosi_arm -#define gen_vfp_tosiz gen_vfp_tosiz_arm -#define gen_vfp_tosl gen_vfp_tosl_arm -#define gen_vfp_touh gen_vfp_touh_arm -#define gen_vfp_toui gen_vfp_toui_arm -#define gen_vfp_touiz gen_vfp_touiz_arm -#define gen_vfp_toul gen_vfp_toul_arm -#define gen_vfp_uhto gen_vfp_uhto_arm -#define gen_vfp_uito gen_vfp_uito_arm -#define gen_vfp_ulto gen_vfp_ulto_arm -#define get_arm_cp_reginfo get_arm_cp_reginfo_arm -#define get_clock get_clock_arm -#define get_clock_realtime get_clock_realtime_arm -#define get_constraint_priority get_constraint_priority_arm -#define get_float_exception_flags get_float_exception_flags_arm -#define get_float_rounding_mode get_float_rounding_mode_arm -#define get_fpstatus_ptr get_fpstatus_ptr_arm -#define get_level1_table_address get_level1_table_address_arm -#define get_mem_index get_mem_index_arm -#define get_next_param_value get_next_param_value_arm -#define get_opt_name get_opt_name_arm -#define get_opt_value get_opt_value_arm -#define get_page_addr_code get_page_addr_code_arm -#define get_param_value get_param_value_arm -#define get_phys_addr get_phys_addr_arm -#define get_phys_addr_lpae get_phys_addr_lpae_arm -#define get_phys_addr_mpu get_phys_addr_mpu_arm -#define get_phys_addr_v5 get_phys_addr_v5_arm -#define get_phys_addr_v6 get_phys_addr_v6_arm -#define get_system_memory get_system_memory_arm -#define get_ticks_per_sec get_ticks_per_sec_arm -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_arm -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__arm -#define gt_cntfrq_access gt_cntfrq_access_arm -#define gt_cnt_read gt_cnt_read_arm -#define gt_cnt_reset gt_cnt_reset_arm -#define gt_counter_access gt_counter_access_arm -#define gt_ctl_write gt_ctl_write_arm -#define gt_cval_write gt_cval_write_arm -#define gt_get_countervalue gt_get_countervalue_arm -#define gt_pct_access gt_pct_access_arm -#define gt_ptimer_access gt_ptimer_access_arm -#define gt_recalc_timer gt_recalc_timer_arm -#define gt_timer_access gt_timer_access_arm -#define gt_tval_read gt_tval_read_arm -#define gt_tval_write gt_tval_write_arm -#define gt_vct_access gt_vct_access_arm -#define gt_vtimer_access gt_vtimer_access_arm -#define guest_phys_blocks_free guest_phys_blocks_free_arm -#define guest_phys_blocks_init guest_phys_blocks_init_arm -#define handle_vcvt handle_vcvt_arm -#define handle_vminmaxnm handle_vminmaxnm_arm -#define handle_vrint handle_vrint_arm -#define handle_vsel handle_vsel_arm -#define has_help_option has_help_option_arm -#define have_bmi1 have_bmi1_arm -#define have_bmi2 have_bmi2_arm -#define hcr_write hcr_write_arm -#define helper_access_check_cp_reg helper_access_check_cp_reg_arm -#define helper_add_saturate helper_add_saturate_arm -#define helper_add_setq helper_add_setq_arm -#define helper_add_usaturate helper_add_usaturate_arm -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_arm -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_arm -#define helper_be_ldq_mmu helper_be_ldq_mmu_arm -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_arm -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_arm -#define helper_be_ldul_mmu helper_be_ldul_mmu_arm -#define helper_be_lduw_mmu helper_be_lduw_mmu_arm -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_arm -#define helper_be_stl_mmu helper_be_stl_mmu_arm -#define helper_be_stq_mmu helper_be_stq_mmu_arm -#define helper_be_stw_mmu helper_be_stw_mmu_arm -#define helper_clear_pstate_ss helper_clear_pstate_ss_arm -#define helper_clz_arm helper_clz_arm_arm -#define helper_cpsr_read helper_cpsr_read_arm -#define helper_cpsr_write helper_cpsr_write_arm -#define helper_crc32_arm helper_crc32_arm_arm -#define helper_crc32c helper_crc32c_arm +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_arm +#define arm_cpu_update_virq arm_cpu_update_virq_arm +#define arm_cpu_update_vfiq arm_cpu_update_vfiq_arm +#define arm_cpu_initfn arm_cpu_initfn_arm +#define gt_cntfrq_period_ns gt_cntfrq_period_ns_arm +#define arm_cpu_post_init arm_cpu_post_init_arm +#define arm_cpu_realizefn arm_cpu_realizefn_arm +#define a15_l2ctlr_read a15_l2ctlr_read_arm +#define arm_cpu_class_init arm_cpu_class_init_arm +#define cpu_arm_init cpu_arm_init_arm #define helper_crypto_aese helper_crypto_aese_arm #define helper_crypto_aesmc helper_crypto_aesmc_arm #define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_arm @@ -1371,1651 +1292,693 @@ #define helper_crypto_sha256h2 helper_crypto_sha256h2_arm #define helper_crypto_sha256su0 helper_crypto_sha256su0_arm #define helper_crypto_sha256su1 helper_crypto_sha256su1_arm -#define helper_dc_zva helper_dc_zva_arm -#define helper_double_saturate helper_double_saturate_arm -#define helper_exception_internal helper_exception_internal_arm -#define helper_exception_return helper_exception_return_arm -#define helper_exception_with_syndrome helper_exception_with_syndrome_arm -#define helper_get_cp_reg helper_get_cp_reg_arm -#define helper_get_cp_reg64 helper_get_cp_reg64_arm -#define helper_get_r13_banked helper_get_r13_banked_arm -#define helper_get_user_reg helper_get_user_reg_arm -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_arm -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_arm -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_arm +#define helper_crypto_sha512h helper_crypto_sha512h_arm +#define helper_crypto_sha512h2 helper_crypto_sha512h2_arm +#define helper_crypto_sha512su0 helper_crypto_sha512su0_arm +#define helper_crypto_sha512su1 helper_crypto_sha512su1_arm +#define helper_crypto_sm3partw1 helper_crypto_sm3partw1_arm +#define helper_crypto_sm3partw2 helper_crypto_sm3partw2_arm +#define helper_crypto_sm3tt helper_crypto_sm3tt_arm +#define helper_crypto_sm4e helper_crypto_sm4e_arm +#define helper_crypto_sm4ekey helper_crypto_sm4ekey_arm +#define helper_check_breakpoints helper_check_breakpoints_arm +#define arm_debug_check_watchpoint arm_debug_check_watchpoint_arm +#define arm_debug_excp_handler arm_debug_excp_handler_arm +#define arm_adjust_watchpoint_address arm_adjust_watchpoint_address_arm +#define read_raw_cp_reg read_raw_cp_reg_arm +#define pmu_init pmu_init_arm +#define pmu_op_start pmu_op_start_arm +#define pmu_op_finish pmu_op_finish_arm +#define pmu_pre_el_change pmu_pre_el_change_arm +#define pmu_post_el_change pmu_post_el_change_arm +#define arm_pmu_timer_cb arm_pmu_timer_cb_arm +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_arm +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_arm +#define arm_gt_htimer_cb arm_gt_htimer_cb_arm +#define arm_gt_stimer_cb arm_gt_stimer_cb_arm +#define arm_gt_hvtimer_cb arm_gt_hvtimer_cb_arm +#define arm_hcr_el2_eff arm_hcr_el2_eff_arm +#define sve_exception_el sve_exception_el_arm +#define sve_zcr_len_for_el sve_zcr_len_for_el_arm +#define hw_watchpoint_update hw_watchpoint_update_arm +#define hw_watchpoint_update_all hw_watchpoint_update_all_arm +#define hw_breakpoint_update hw_breakpoint_update_arm +#define hw_breakpoint_update_all hw_breakpoint_update_all_arm +#define register_cp_regs_for_features register_cp_regs_for_features_arm +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_arm +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_arm +#define modify_arm_cp_regs modify_arm_cp_regs_arm +#define get_arm_cp_reginfo get_arm_cp_reginfo_arm +#define arm_cp_write_ignore arm_cp_write_ignore_arm +#define arm_cp_read_zero arm_cp_read_zero_arm +#define arm_cp_reset_ignore arm_cp_reset_ignore_arm +#define cpsr_read cpsr_read_arm +#define cpsr_write cpsr_write_arm +#define helper_sxtb16 helper_sxtb16_arm +#define helper_uxtb16 helper_uxtb16_arm +#define helper_sdiv helper_sdiv_arm +#define helper_udiv helper_udiv_arm +#define helper_rbit helper_rbit_arm +#define arm_phys_excp_target_el arm_phys_excp_target_el_arm +#define aarch64_sync_32_to_64 aarch64_sync_32_to_64_arm +#define aarch64_sync_64_to_32 aarch64_sync_64_to_32_arm +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_arm +#define arm_sctlr arm_sctlr_arm +#define arm_s1_regime_using_lpae_format arm_s1_regime_using_lpae_format_arm +#define aa64_va_parameters aa64_va_parameters_arm +#define v8m_security_lookup v8m_security_lookup_arm +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_arm +#define get_phys_addr get_phys_addr_arm +#define arm_cpu_get_phys_page_attrs_debug arm_cpu_get_phys_page_attrs_debug_arm +#define helper_qadd16 helper_qadd16_arm +#define helper_qadd8 helper_qadd8_arm +#define helper_qsub16 helper_qsub16_arm +#define helper_qsub8 helper_qsub8_arm +#define helper_qsubaddx helper_qsubaddx_arm +#define helper_qaddsubx helper_qaddsubx_arm +#define helper_uqadd16 helper_uqadd16_arm +#define helper_uqadd8 helper_uqadd8_arm +#define helper_uqsub16 helper_uqsub16_arm +#define helper_uqsub8 helper_uqsub8_arm +#define helper_uqsubaddx helper_uqsubaddx_arm +#define helper_uqaddsubx helper_uqaddsubx_arm +#define helper_sadd16 helper_sadd16_arm +#define helper_sadd8 helper_sadd8_arm +#define helper_ssub16 helper_ssub16_arm +#define helper_ssub8 helper_ssub8_arm +#define helper_ssubaddx helper_ssubaddx_arm +#define helper_saddsubx helper_saddsubx_arm +#define helper_uadd16 helper_uadd16_arm +#define helper_uadd8 helper_uadd8_arm +#define helper_usub16 helper_usub16_arm +#define helper_usub8 helper_usub8_arm +#define helper_usubaddx helper_usubaddx_arm +#define helper_uaddsubx helper_uaddsubx_arm +#define helper_shadd16 helper_shadd16_arm +#define helper_shadd8 helper_shadd8_arm +#define helper_shsub16 helper_shsub16_arm +#define helper_shsub8 helper_shsub8_arm +#define helper_shsubaddx helper_shsubaddx_arm +#define helper_shaddsubx helper_shaddsubx_arm +#define helper_uhadd16 helper_uhadd16_arm +#define helper_uhadd8 helper_uhadd8_arm +#define helper_uhsub16 helper_uhsub16_arm +#define helper_uhsub8 helper_uhsub8_arm +#define helper_uhsubaddx helper_uhsubaddx_arm +#define helper_uhaddsubx helper_uhaddsubx_arm +#define helper_usad8 helper_usad8_arm +#define helper_sel_flags helper_sel_flags_arm +#define helper_crc32 helper_crc32_arm +#define helper_crc32c helper_crc32c_arm +#define fp_exception_el fp_exception_el_arm +#define arm_mmu_idx_to_el arm_mmu_idx_to_el_arm +#define arm_mmu_idx_el arm_mmu_idx_el_arm +#define arm_mmu_idx arm_mmu_idx_arm +#define arm_stage1_mmu_idx arm_stage1_mmu_idx_arm +#define arm_rebuild_hflags arm_rebuild_hflags_arm +#define helper_rebuild_hflags_m32_newel helper_rebuild_hflags_m32_newel_arm +#define helper_rebuild_hflags_m32 helper_rebuild_hflags_m32_arm +#define helper_rebuild_hflags_a32_newel helper_rebuild_hflags_a32_newel_arm +#define helper_rebuild_hflags_a32 helper_rebuild_hflags_a32_arm +#define helper_rebuild_hflags_a64 helper_rebuild_hflags_a64_arm +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_arm +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_arm +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_arm +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_arm +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_arm +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_arm +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_arm +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_arm +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_arm +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_arm +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_arm +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_arm +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_arm +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_arm +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_arm +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_arm +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_arm +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_arm +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_arm +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_arm +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_arm +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_arm +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_arm +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_arm +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_arm +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_arm +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_arm +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_arm +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_arm +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_arm +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_arm +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_arm +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_arm +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_arm +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_arm +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_arm +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_arm +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_arm +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_arm +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_arm +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_arm +#define helper_iwmmxt_minub helper_iwmmxt_minub_arm +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_arm +#define helper_iwmmxt_minul helper_iwmmxt_minul_arm +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_arm +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_arm +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_arm +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_arm +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_arm +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_arm +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_arm +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_arm +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_arm #define helper_iwmmxt_addnb helper_iwmmxt_addnb_arm -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_arm #define helper_iwmmxt_addnw helper_iwmmxt_addnw_arm -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_arm -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_arm -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_arm +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_arm +#define helper_iwmmxt_subub helper_iwmmxt_subub_arm +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_arm +#define helper_iwmmxt_subul helper_iwmmxt_subul_arm #define helper_iwmmxt_addub helper_iwmmxt_addub_arm -#define helper_iwmmxt_addul helper_iwmmxt_addul_arm #define helper_iwmmxt_adduw helper_iwmmxt_adduw_arm -#define helper_iwmmxt_align helper_iwmmxt_align_arm +#define helper_iwmmxt_addul helper_iwmmxt_addul_arm +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_arm +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_arm +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_arm +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_arm +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_arm +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_arm #define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_arm #define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_arm #define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_arm #define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_arm -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_arm -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_arm -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_arm -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_arm -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_arm -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_arm -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_arm -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_arm -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_arm -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_arm -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_arm -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_arm +#define helper_iwmmxt_align helper_iwmmxt_align_arm #define helper_iwmmxt_insr helper_iwmmxt_insr_arm -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_arm -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_arm -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_arm -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_arm -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_arm -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_arm -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_arm -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_arm -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_arm -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_arm -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_arm -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_arm -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_arm -#define helper_iwmmxt_minub helper_iwmmxt_minub_arm -#define helper_iwmmxt_minul helper_iwmmxt_minul_arm -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_arm +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_arm +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_arm +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_arm +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_arm +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_arm +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_arm +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_arm #define helper_iwmmxt_msbb helper_iwmmxt_msbb_arm -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_arm #define helper_iwmmxt_msbw helper_iwmmxt_msbw_arm +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_arm +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_arm +#define helper_iwmmxt_srll helper_iwmmxt_srll_arm +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_arm +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_arm +#define helper_iwmmxt_slll helper_iwmmxt_slll_arm +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_arm +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_arm +#define helper_iwmmxt_sral helper_iwmmxt_sral_arm +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_arm +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_arm +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_arm +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_arm +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_arm +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_arm +#define helper_iwmmxt_packul helper_iwmmxt_packul_arm +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_arm +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_arm +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_arm +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_arm #define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_arm #define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_arm #define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_arm -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_arm -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_arm -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_arm -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_arm -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_arm -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_arm -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_arm -#define helper_iwmmxt_packul helper_iwmmxt_packul_arm -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_arm -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_arm -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_arm -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_arm -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_arm -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_arm -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_arm -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_arm -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_arm -#define helper_iwmmxt_slll helper_iwmmxt_slll_arm -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_arm -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_arm -#define helper_iwmmxt_sral helper_iwmmxt_sral_arm -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_arm -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_arm -#define helper_iwmmxt_srll helper_iwmmxt_srll_arm -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_arm -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_arm -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_arm -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_arm -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_arm -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_arm -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_arm -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_arm -#define helper_iwmmxt_subub helper_iwmmxt_subub_arm -#define helper_iwmmxt_subul helper_iwmmxt_subul_arm -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_arm -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_arm -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_arm -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_arm -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_arm -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_arm -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_arm -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_arm -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_arm -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_arm -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_arm -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_arm -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_arm -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_arm -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_arm -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_arm -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_arm -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_arm -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_arm -#define helper_ldb_cmmu helper_ldb_cmmu_arm -#define helper_ldb_mmu helper_ldb_mmu_arm -#define helper_ldl_cmmu helper_ldl_cmmu_arm -#define helper_ldl_mmu helper_ldl_mmu_arm -#define helper_ldq_cmmu helper_ldq_cmmu_arm -#define helper_ldq_mmu helper_ldq_mmu_arm -#define helper_ldw_cmmu helper_ldw_cmmu_arm -#define helper_ldw_mmu helper_ldw_mmu_arm -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_arm -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_arm -#define helper_le_ldq_mmu helper_le_ldq_mmu_arm -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_arm -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_arm -#define helper_le_ldul_mmu helper_le_ldul_mmu_arm -#define helper_le_lduw_mmu helper_le_lduw_mmu_arm -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_arm -#define helper_le_stl_mmu helper_le_stl_mmu_arm -#define helper_le_stq_mmu helper_le_stq_mmu_arm -#define helper_le_stw_mmu helper_le_stw_mmu_arm -#define helper_msr_i_pstate helper_msr_i_pstate_arm -#define helper_neon_abd_f32 helper_neon_abd_f32_arm -#define helper_neon_abdl_s16 helper_neon_abdl_s16_arm -#define helper_neon_abdl_s32 helper_neon_abdl_s32_arm -#define helper_neon_abdl_s64 helper_neon_abdl_s64_arm -#define helper_neon_abdl_u16 helper_neon_abdl_u16_arm -#define helper_neon_abdl_u32 helper_neon_abdl_u32_arm -#define helper_neon_abdl_u64 helper_neon_abdl_u64_arm -#define helper_neon_abd_s16 helper_neon_abd_s16_arm -#define helper_neon_abd_s32 helper_neon_abd_s32_arm -#define helper_neon_abd_s8 helper_neon_abd_s8_arm -#define helper_neon_abd_u16 helper_neon_abd_u16_arm -#define helper_neon_abd_u32 helper_neon_abd_u32_arm -#define helper_neon_abd_u8 helper_neon_abd_u8_arm -#define helper_neon_abs_s16 helper_neon_abs_s16_arm -#define helper_neon_abs_s8 helper_neon_abs_s8_arm -#define helper_neon_acge_f32 helper_neon_acge_f32_arm -#define helper_neon_acge_f64 helper_neon_acge_f64_arm -#define helper_neon_acgt_f32 helper_neon_acgt_f32_arm -#define helper_neon_acgt_f64 helper_neon_acgt_f64_arm -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_arm -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_arm -#define helper_neon_addl_u16 helper_neon_addl_u16_arm -#define helper_neon_addl_u32 helper_neon_addl_u32_arm -#define helper_neon_add_u16 helper_neon_add_u16_arm -#define helper_neon_add_u8 helper_neon_add_u8_arm -#define helper_neon_ceq_f32 helper_neon_ceq_f32_arm -#define helper_neon_ceq_u16 helper_neon_ceq_u16_arm -#define helper_neon_ceq_u32 helper_neon_ceq_u32_arm -#define helper_neon_ceq_u8 helper_neon_ceq_u8_arm -#define helper_neon_cge_f32 helper_neon_cge_f32_arm -#define helper_neon_cge_s16 helper_neon_cge_s16_arm -#define helper_neon_cge_s32 helper_neon_cge_s32_arm -#define helper_neon_cge_s8 helper_neon_cge_s8_arm -#define helper_neon_cge_u16 helper_neon_cge_u16_arm -#define helper_neon_cge_u32 helper_neon_cge_u32_arm -#define helper_neon_cge_u8 helper_neon_cge_u8_arm -#define helper_neon_cgt_f32 helper_neon_cgt_f32_arm -#define helper_neon_cgt_s16 helper_neon_cgt_s16_arm -#define helper_neon_cgt_s32 helper_neon_cgt_s32_arm -#define helper_neon_cgt_s8 helper_neon_cgt_s8_arm -#define helper_neon_cgt_u16 helper_neon_cgt_u16_arm -#define helper_neon_cgt_u32 helper_neon_cgt_u32_arm -#define helper_neon_cgt_u8 helper_neon_cgt_u8_arm -#define helper_neon_cls_s16 helper_neon_cls_s16_arm -#define helper_neon_cls_s32 helper_neon_cls_s32_arm -#define helper_neon_cls_s8 helper_neon_cls_s8_arm -#define helper_neon_clz_u16 helper_neon_clz_u16_arm -#define helper_neon_clz_u8 helper_neon_clz_u8_arm -#define helper_neon_cnt_u8 helper_neon_cnt_u8_arm -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_arm -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_arm -#define helper_neon_hadd_s16 helper_neon_hadd_s16_arm -#define helper_neon_hadd_s32 helper_neon_hadd_s32_arm -#define helper_neon_hadd_s8 helper_neon_hadd_s8_arm -#define helper_neon_hadd_u16 helper_neon_hadd_u16_arm -#define helper_neon_hadd_u32 helper_neon_hadd_u32_arm -#define helper_neon_hadd_u8 helper_neon_hadd_u8_arm -#define helper_neon_hsub_s16 helper_neon_hsub_s16_arm -#define helper_neon_hsub_s32 helper_neon_hsub_s32_arm -#define helper_neon_hsub_s8 helper_neon_hsub_s8_arm -#define helper_neon_hsub_u16 helper_neon_hsub_u16_arm -#define helper_neon_hsub_u32 helper_neon_hsub_u32_arm -#define helper_neon_hsub_u8 helper_neon_hsub_u8_arm -#define helper_neon_max_s16 helper_neon_max_s16_arm -#define helper_neon_max_s32 helper_neon_max_s32_arm -#define helper_neon_max_s8 helper_neon_max_s8_arm -#define helper_neon_max_u16 helper_neon_max_u16_arm -#define helper_neon_max_u32 helper_neon_max_u32_arm -#define helper_neon_max_u8 helper_neon_max_u8_arm -#define helper_neon_min_s16 helper_neon_min_s16_arm -#define helper_neon_min_s32 helper_neon_min_s32_arm -#define helper_neon_min_s8 helper_neon_min_s8_arm -#define helper_neon_min_u16 helper_neon_min_u16_arm -#define helper_neon_min_u32 helper_neon_min_u32_arm -#define helper_neon_min_u8 helper_neon_min_u8_arm -#define helper_neon_mull_p8 helper_neon_mull_p8_arm -#define helper_neon_mull_s16 helper_neon_mull_s16_arm -#define helper_neon_mull_s8 helper_neon_mull_s8_arm -#define helper_neon_mull_u16 helper_neon_mull_u16_arm -#define helper_neon_mull_u8 helper_neon_mull_u8_arm -#define helper_neon_mul_p8 helper_neon_mul_p8_arm -#define helper_neon_mul_u16 helper_neon_mul_u16_arm -#define helper_neon_mul_u8 helper_neon_mul_u8_arm -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_arm -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_arm -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_arm -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_arm -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_arm -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_arm -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_arm -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_arm -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_arm -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_arm -#define helper_neon_narrow_u16 helper_neon_narrow_u16_arm -#define helper_neon_narrow_u8 helper_neon_narrow_u8_arm -#define helper_neon_negl_u16 helper_neon_negl_u16_arm -#define helper_neon_negl_u32 helper_neon_negl_u32_arm -#define helper_neon_paddl_u16 helper_neon_paddl_u16_arm -#define helper_neon_paddl_u32 helper_neon_paddl_u32_arm -#define helper_neon_padd_u16 helper_neon_padd_u16_arm -#define helper_neon_padd_u8 helper_neon_padd_u8_arm -#define helper_neon_pmax_s16 helper_neon_pmax_s16_arm -#define helper_neon_pmax_s8 helper_neon_pmax_s8_arm -#define helper_neon_pmax_u16 helper_neon_pmax_u16_arm -#define helper_neon_pmax_u8 helper_neon_pmax_u8_arm -#define helper_neon_pmin_s16 helper_neon_pmin_s16_arm -#define helper_neon_pmin_s8 helper_neon_pmin_s8_arm -#define helper_neon_pmin_u16 helper_neon_pmin_u16_arm -#define helper_neon_pmin_u8 helper_neon_pmin_u8_arm -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_arm -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_arm -#define helper_neon_qabs_s16 helper_neon_qabs_s16_arm -#define helper_neon_qabs_s32 helper_neon_qabs_s32_arm -#define helper_neon_qabs_s64 helper_neon_qabs_s64_arm -#define helper_neon_qabs_s8 helper_neon_qabs_s8_arm -#define helper_neon_qadd_s16 helper_neon_qadd_s16_arm -#define helper_neon_qadd_s32 helper_neon_qadd_s32_arm -#define helper_neon_qadd_s64 helper_neon_qadd_s64_arm -#define helper_neon_qadd_s8 helper_neon_qadd_s8_arm +#define armv7m_nvic_set_pending armv7m_nvic_set_pending_arm +#define helper_v7m_preserve_fp_state helper_v7m_preserve_fp_state_arm +#define write_v7m_exception write_v7m_exception_arm +#define helper_v7m_bxns helper_v7m_bxns_arm +#define helper_v7m_blxns helper_v7m_blxns_arm +#define armv7m_nvic_neg_prio_requested armv7m_nvic_neg_prio_requested_arm +#define helper_v7m_vlstm helper_v7m_vlstm_arm +#define helper_v7m_vlldm helper_v7m_vlldm_arm +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_arm +#define helper_v7m_mrs helper_v7m_mrs_arm +#define helper_v7m_msr helper_v7m_msr_arm +#define helper_v7m_tt helper_v7m_tt_arm +#define arm_v7m_mmu_idx_all arm_v7m_mmu_idx_all_arm +#define arm_v7m_mmu_idx_for_secstate_and_priv arm_v7m_mmu_idx_for_secstate_and_priv_arm +#define arm_v7m_mmu_idx_for_secstate arm_v7m_mmu_idx_for_secstate_arm +#define helper_neon_qadd_u8 helper_neon_qadd_u8_arm #define helper_neon_qadd_u16 helper_neon_qadd_u16_arm #define helper_neon_qadd_u32 helper_neon_qadd_u32_arm #define helper_neon_qadd_u64 helper_neon_qadd_u64_arm -#define helper_neon_qadd_u8 helper_neon_qadd_u8_arm -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_arm -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_arm -#define helper_neon_qneg_s16 helper_neon_qneg_s16_arm -#define helper_neon_qneg_s32 helper_neon_qneg_s32_arm -#define helper_neon_qneg_s64 helper_neon_qneg_s64_arm -#define helper_neon_qneg_s8 helper_neon_qneg_s8_arm -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_arm -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_arm -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_arm -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_arm -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_arm -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_arm -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_arm -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_arm -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_arm -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_arm -#define helper_neon_qshl_s16 helper_neon_qshl_s16_arm -#define helper_neon_qshl_s32 helper_neon_qshl_s32_arm -#define helper_neon_qshl_s64 helper_neon_qshl_s64_arm -#define helper_neon_qshl_s8 helper_neon_qshl_s8_arm -#define helper_neon_qshl_u16 helper_neon_qshl_u16_arm -#define helper_neon_qshl_u32 helper_neon_qshl_u32_arm -#define helper_neon_qshl_u64 helper_neon_qshl_u64_arm -#define helper_neon_qshl_u8 helper_neon_qshl_u8_arm -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_arm -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_arm -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_arm -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_arm -#define helper_neon_qsub_s16 helper_neon_qsub_s16_arm -#define helper_neon_qsub_s32 helper_neon_qsub_s32_arm -#define helper_neon_qsub_s64 helper_neon_qsub_s64_arm -#define helper_neon_qsub_s8 helper_neon_qsub_s8_arm -#define helper_neon_qsub_u16 helper_neon_qsub_u16_arm -#define helper_neon_qsub_u32 helper_neon_qsub_u32_arm -#define helper_neon_qsub_u64 helper_neon_qsub_u64_arm -#define helper_neon_qsub_u8 helper_neon_qsub_u8_arm -#define helper_neon_qunzip16 helper_neon_qunzip16_arm -#define helper_neon_qunzip32 helper_neon_qunzip32_arm -#define helper_neon_qunzip8 helper_neon_qunzip8_arm -#define helper_neon_qzip16 helper_neon_qzip16_arm -#define helper_neon_qzip32 helper_neon_qzip32_arm -#define helper_neon_qzip8 helper_neon_qzip8_arm -#define helper_neon_rbit_u8 helper_neon_rbit_u8_arm -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_arm -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_arm -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_arm -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_arm -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_arm -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_arm -#define helper_neon_rshl_s16 helper_neon_rshl_s16_arm -#define helper_neon_rshl_s32 helper_neon_rshl_s32_arm -#define helper_neon_rshl_s64 helper_neon_rshl_s64_arm -#define helper_neon_rshl_s8 helper_neon_rshl_s8_arm -#define helper_neon_rshl_u16 helper_neon_rshl_u16_arm -#define helper_neon_rshl_u32 helper_neon_rshl_u32_arm -#define helper_neon_rshl_u64 helper_neon_rshl_u64_arm -#define helper_neon_rshl_u8 helper_neon_rshl_u8_arm -#define helper_neon_shl_s16 helper_neon_shl_s16_arm -#define helper_neon_shl_s32 helper_neon_shl_s32_arm -#define helper_neon_shl_s64 helper_neon_shl_s64_arm -#define helper_neon_shl_s8 helper_neon_shl_s8_arm -#define helper_neon_shl_u16 helper_neon_shl_u16_arm -#define helper_neon_shl_u32 helper_neon_shl_u32_arm -#define helper_neon_shl_u64 helper_neon_shl_u64_arm -#define helper_neon_shl_u8 helper_neon_shl_u8_arm -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_arm -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_arm -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_arm -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_arm -#define helper_neon_subl_u16 helper_neon_subl_u16_arm -#define helper_neon_subl_u32 helper_neon_subl_u32_arm -#define helper_neon_sub_u16 helper_neon_sub_u16_arm -#define helper_neon_sub_u8 helper_neon_sub_u8_arm -#define helper_neon_tbl helper_neon_tbl_arm -#define helper_neon_tst_u16 helper_neon_tst_u16_arm -#define helper_neon_tst_u32 helper_neon_tst_u32_arm -#define helper_neon_tst_u8 helper_neon_tst_u8_arm -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_arm -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_arm -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_arm -#define helper_neon_unzip16 helper_neon_unzip16_arm -#define helper_neon_unzip8 helper_neon_unzip8_arm +#define helper_neon_qadd_s8 helper_neon_qadd_s8_arm +#define helper_neon_qadd_s16 helper_neon_qadd_s16_arm +#define helper_neon_qadd_s32 helper_neon_qadd_s32_arm +#define helper_neon_qadd_s64 helper_neon_qadd_s64_arm +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_arm #define helper_neon_uqadd_s16 helper_neon_uqadd_s16_arm #define helper_neon_uqadd_s32 helper_neon_uqadd_s32_arm #define helper_neon_uqadd_s64 helper_neon_uqadd_s64_arm -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_arm -#define helper_neon_widen_s16 helper_neon_widen_s16_arm +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_arm +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_arm +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_arm +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_arm +#define helper_neon_qsub_u8 helper_neon_qsub_u8_arm +#define helper_neon_qsub_u16 helper_neon_qsub_u16_arm +#define helper_neon_qsub_u32 helper_neon_qsub_u32_arm +#define helper_neon_qsub_u64 helper_neon_qsub_u64_arm +#define helper_neon_qsub_s8 helper_neon_qsub_s8_arm +#define helper_neon_qsub_s16 helper_neon_qsub_s16_arm +#define helper_neon_qsub_s32 helper_neon_qsub_s32_arm +#define helper_neon_qsub_s64 helper_neon_qsub_s64_arm +#define helper_neon_hadd_s8 helper_neon_hadd_s8_arm +#define helper_neon_hadd_u8 helper_neon_hadd_u8_arm +#define helper_neon_hadd_s16 helper_neon_hadd_s16_arm +#define helper_neon_hadd_u16 helper_neon_hadd_u16_arm +#define helper_neon_hadd_s32 helper_neon_hadd_s32_arm +#define helper_neon_hadd_u32 helper_neon_hadd_u32_arm +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_arm +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_arm +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_arm +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_arm +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_arm +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_arm +#define helper_neon_hsub_s8 helper_neon_hsub_s8_arm +#define helper_neon_hsub_u8 helper_neon_hsub_u8_arm +#define helper_neon_hsub_s16 helper_neon_hsub_s16_arm +#define helper_neon_hsub_u16 helper_neon_hsub_u16_arm +#define helper_neon_hsub_s32 helper_neon_hsub_s32_arm +#define helper_neon_hsub_u32 helper_neon_hsub_u32_arm +#define helper_neon_cgt_s8 helper_neon_cgt_s8_arm +#define helper_neon_cgt_u8 helper_neon_cgt_u8_arm +#define helper_neon_cgt_s16 helper_neon_cgt_s16_arm +#define helper_neon_cgt_u16 helper_neon_cgt_u16_arm +#define helper_neon_cgt_s32 helper_neon_cgt_s32_arm +#define helper_neon_cgt_u32 helper_neon_cgt_u32_arm +#define helper_neon_cge_s8 helper_neon_cge_s8_arm +#define helper_neon_cge_u8 helper_neon_cge_u8_arm +#define helper_neon_cge_s16 helper_neon_cge_s16_arm +#define helper_neon_cge_u16 helper_neon_cge_u16_arm +#define helper_neon_cge_s32 helper_neon_cge_s32_arm +#define helper_neon_cge_u32 helper_neon_cge_u32_arm +#define helper_neon_pmin_s8 helper_neon_pmin_s8_arm +#define helper_neon_pmin_u8 helper_neon_pmin_u8_arm +#define helper_neon_pmin_s16 helper_neon_pmin_s16_arm +#define helper_neon_pmin_u16 helper_neon_pmin_u16_arm +#define helper_neon_pmax_s8 helper_neon_pmax_s8_arm +#define helper_neon_pmax_u8 helper_neon_pmax_u8_arm +#define helper_neon_pmax_s16 helper_neon_pmax_s16_arm +#define helper_neon_pmax_u16 helper_neon_pmax_u16_arm +#define helper_neon_abd_s8 helper_neon_abd_s8_arm +#define helper_neon_abd_u8 helper_neon_abd_u8_arm +#define helper_neon_abd_s16 helper_neon_abd_s16_arm +#define helper_neon_abd_u16 helper_neon_abd_u16_arm +#define helper_neon_abd_s32 helper_neon_abd_s32_arm +#define helper_neon_abd_u32 helper_neon_abd_u32_arm +#define helper_neon_shl_u16 helper_neon_shl_u16_arm +#define helper_neon_shl_s16 helper_neon_shl_s16_arm +#define helper_neon_rshl_s8 helper_neon_rshl_s8_arm +#define helper_neon_rshl_s16 helper_neon_rshl_s16_arm +#define helper_neon_rshl_s32 helper_neon_rshl_s32_arm +#define helper_neon_rshl_s64 helper_neon_rshl_s64_arm +#define helper_neon_rshl_u8 helper_neon_rshl_u8_arm +#define helper_neon_rshl_u16 helper_neon_rshl_u16_arm +#define helper_neon_rshl_u32 helper_neon_rshl_u32_arm +#define helper_neon_rshl_u64 helper_neon_rshl_u64_arm +#define helper_neon_qshl_u8 helper_neon_qshl_u8_arm +#define helper_neon_qshl_u16 helper_neon_qshl_u16_arm +#define helper_neon_qshl_u32 helper_neon_qshl_u32_arm +#define helper_neon_qshl_u64 helper_neon_qshl_u64_arm +#define helper_neon_qshl_s8 helper_neon_qshl_s8_arm +#define helper_neon_qshl_s16 helper_neon_qshl_s16_arm +#define helper_neon_qshl_s32 helper_neon_qshl_s32_arm +#define helper_neon_qshl_s64 helper_neon_qshl_s64_arm +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_arm +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_arm +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_arm +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_arm +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_arm +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_arm +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_arm +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_arm +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_arm +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_arm +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_arm +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_arm +#define helper_neon_add_u8 helper_neon_add_u8_arm +#define helper_neon_add_u16 helper_neon_add_u16_arm +#define helper_neon_padd_u8 helper_neon_padd_u8_arm +#define helper_neon_padd_u16 helper_neon_padd_u16_arm +#define helper_neon_sub_u8 helper_neon_sub_u8_arm +#define helper_neon_sub_u16 helper_neon_sub_u16_arm +#define helper_neon_mul_u8 helper_neon_mul_u8_arm +#define helper_neon_mul_u16 helper_neon_mul_u16_arm +#define helper_neon_tst_u8 helper_neon_tst_u8_arm +#define helper_neon_tst_u16 helper_neon_tst_u16_arm +#define helper_neon_tst_u32 helper_neon_tst_u32_arm +#define helper_neon_ceq_u8 helper_neon_ceq_u8_arm +#define helper_neon_ceq_u16 helper_neon_ceq_u16_arm +#define helper_neon_ceq_u32 helper_neon_ceq_u32_arm +#define helper_neon_clz_u8 helper_neon_clz_u8_arm +#define helper_neon_clz_u16 helper_neon_clz_u16_arm +#define helper_neon_cls_s8 helper_neon_cls_s8_arm +#define helper_neon_cls_s16 helper_neon_cls_s16_arm +#define helper_neon_cls_s32 helper_neon_cls_s32_arm +#define helper_neon_cnt_u8 helper_neon_cnt_u8_arm +#define helper_neon_rbit_u8 helper_neon_rbit_u8_arm +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_arm +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_arm +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_arm +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_arm +#define helper_neon_narrow_u8 helper_neon_narrow_u8_arm +#define helper_neon_narrow_u16 helper_neon_narrow_u16_arm +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_arm +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_arm +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_arm +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_arm +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_arm +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_arm +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_arm +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_arm +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_arm +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_arm +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_arm +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_arm +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_arm +#define helper_neon_widen_u8 helper_neon_widen_u8_arm #define helper_neon_widen_s8 helper_neon_widen_s8_arm #define helper_neon_widen_u16 helper_neon_widen_u16_arm -#define helper_neon_widen_u8 helper_neon_widen_u8_arm -#define helper_neon_zip16 helper_neon_zip16_arm +#define helper_neon_widen_s16 helper_neon_widen_s16_arm +#define helper_neon_addl_u16 helper_neon_addl_u16_arm +#define helper_neon_addl_u32 helper_neon_addl_u32_arm +#define helper_neon_paddl_u16 helper_neon_paddl_u16_arm +#define helper_neon_paddl_u32 helper_neon_paddl_u32_arm +#define helper_neon_subl_u16 helper_neon_subl_u16_arm +#define helper_neon_subl_u32 helper_neon_subl_u32_arm +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_arm +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_arm +#define helper_neon_abdl_u16 helper_neon_abdl_u16_arm +#define helper_neon_abdl_s16 helper_neon_abdl_s16_arm +#define helper_neon_abdl_u32 helper_neon_abdl_u32_arm +#define helper_neon_abdl_s32 helper_neon_abdl_s32_arm +#define helper_neon_abdl_u64 helper_neon_abdl_u64_arm +#define helper_neon_abdl_s64 helper_neon_abdl_s64_arm +#define helper_neon_mull_u8 helper_neon_mull_u8_arm +#define helper_neon_mull_s8 helper_neon_mull_s8_arm +#define helper_neon_mull_u16 helper_neon_mull_u16_arm +#define helper_neon_mull_s16 helper_neon_mull_s16_arm +#define helper_neon_negl_u16 helper_neon_negl_u16_arm +#define helper_neon_negl_u32 helper_neon_negl_u32_arm +#define helper_neon_qabs_s8 helper_neon_qabs_s8_arm +#define helper_neon_qneg_s8 helper_neon_qneg_s8_arm +#define helper_neon_qabs_s16 helper_neon_qabs_s16_arm +#define helper_neon_qneg_s16 helper_neon_qneg_s16_arm +#define helper_neon_qabs_s32 helper_neon_qabs_s32_arm +#define helper_neon_qneg_s32 helper_neon_qneg_s32_arm +#define helper_neon_qabs_s64 helper_neon_qabs_s64_arm +#define helper_neon_qneg_s64 helper_neon_qneg_s64_arm +#define helper_neon_abd_f32 helper_neon_abd_f32_arm +#define helper_neon_ceq_f32 helper_neon_ceq_f32_arm +#define helper_neon_cge_f32 helper_neon_cge_f32_arm +#define helper_neon_cgt_f32 helper_neon_cgt_f32_arm +#define helper_neon_acge_f32 helper_neon_acge_f32_arm +#define helper_neon_acgt_f32 helper_neon_acgt_f32_arm +#define helper_neon_acge_f64 helper_neon_acge_f64_arm +#define helper_neon_acgt_f64 helper_neon_acgt_f64_arm +#define helper_neon_qunzip8 helper_neon_qunzip8_arm +#define helper_neon_qunzip16 helper_neon_qunzip16_arm +#define helper_neon_qunzip32 helper_neon_qunzip32_arm +#define helper_neon_unzip8 helper_neon_unzip8_arm +#define helper_neon_unzip16 helper_neon_unzip16_arm +#define helper_neon_qzip8 helper_neon_qzip8_arm +#define helper_neon_qzip16 helper_neon_qzip16_arm +#define helper_neon_qzip32 helper_neon_qzip32_arm #define helper_neon_zip8 helper_neon_zip8_arm -#define helper_pre_hvc helper_pre_hvc_arm -#define helper_pre_smc helper_pre_smc_arm -#define helper_qadd16 helper_qadd16_arm -#define helper_qadd8 helper_qadd8_arm -#define helper_qaddsubx helper_qaddsubx_arm -#define helper_qsub16 helper_qsub16_arm -#define helper_qsub8 helper_qsub8_arm -#define helper_qsubaddx helper_qsubaddx_arm -#define helper_rbit helper_rbit_arm -#define helper_recpe_f32 helper_recpe_f32_arm -#define helper_recpe_f64 helper_recpe_f64_arm -#define helper_recpe_u32 helper_recpe_u32_arm -#define helper_recps_f32 helper_recps_f32_arm -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_arm -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_arm -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_arm -#define helper_ret_stb_mmu helper_ret_stb_mmu_arm -#define helper_rintd helper_rintd_arm -#define helper_rintd_exact helper_rintd_exact_arm -#define helper_rints helper_rints_arm -#define helper_rints_exact helper_rints_exact_arm -#define helper_ror_cc helper_ror_cc_arm -#define helper_rsqrte_f32 helper_rsqrte_f32_arm -#define helper_rsqrte_f64 helper_rsqrte_f64_arm -#define helper_rsqrte_u32 helper_rsqrte_u32_arm -#define helper_rsqrts_f32 helper_rsqrts_f32_arm -#define helper_sadd16 helper_sadd16_arm -#define helper_sadd8 helper_sadd8_arm -#define helper_saddsubx helper_saddsubx_arm -#define helper_sar_cc helper_sar_cc_arm -#define helper_sdiv helper_sdiv_arm -#define helper_sel_flags helper_sel_flags_arm -#define helper_set_cp_reg helper_set_cp_reg_arm -#define helper_set_cp_reg64 helper_set_cp_reg64_arm -#define helper_set_neon_rmode helper_set_neon_rmode_arm -#define helper_set_r13_banked helper_set_r13_banked_arm -#define helper_set_rmode helper_set_rmode_arm -#define helper_set_user_reg helper_set_user_reg_arm -#define helper_shadd16 helper_shadd16_arm -#define helper_shadd8 helper_shadd8_arm -#define helper_shaddsubx helper_shaddsubx_arm -#define helper_shl_cc helper_shl_cc_arm -#define helper_shr_cc helper_shr_cc_arm -#define helper_shsub16 helper_shsub16_arm -#define helper_shsub8 helper_shsub8_arm -#define helper_shsubaddx helper_shsubaddx_arm +#define helper_neon_zip16 helper_neon_zip16_arm +#define raise_exception raise_exception_arm +#define raise_exception_ra raise_exception_ra_arm +#define helper_neon_tbl helper_neon_tbl_arm +#define helper_v8m_stackcheck helper_v8m_stackcheck_arm +#define helper_add_setq helper_add_setq_arm +#define helper_add_saturate helper_add_saturate_arm +#define helper_sub_saturate helper_sub_saturate_arm +#define helper_add_usaturate helper_add_usaturate_arm +#define helper_sub_usaturate helper_sub_usaturate_arm #define helper_ssat helper_ssat_arm #define helper_ssat16 helper_ssat16_arm -#define helper_ssub16 helper_ssub16_arm -#define helper_ssub8 helper_ssub8_arm -#define helper_ssubaddx helper_ssubaddx_arm -#define helper_stb_mmu helper_stb_mmu_arm -#define helper_stl_mmu helper_stl_mmu_arm -#define helper_stq_mmu helper_stq_mmu_arm -#define helper_stw_mmu helper_stw_mmu_arm -#define helper_sub_saturate helper_sub_saturate_arm -#define helper_sub_usaturate helper_sub_usaturate_arm -#define helper_sxtb16 helper_sxtb16_arm -#define helper_uadd16 helper_uadd16_arm -#define helper_uadd8 helper_uadd8_arm -#define helper_uaddsubx helper_uaddsubx_arm -#define helper_udiv helper_udiv_arm -#define helper_uhadd16 helper_uhadd16_arm -#define helper_uhadd8 helper_uhadd8_arm -#define helper_uhaddsubx helper_uhaddsubx_arm -#define helper_uhsub16 helper_uhsub16_arm -#define helper_uhsub8 helper_uhsub8_arm -#define helper_uhsubaddx helper_uhsubaddx_arm -#define helper_uqadd16 helper_uqadd16_arm -#define helper_uqadd8 helper_uqadd8_arm -#define helper_uqaddsubx helper_uqaddsubx_arm -#define helper_uqsub16 helper_uqsub16_arm -#define helper_uqsub8 helper_uqsub8_arm -#define helper_uqsubaddx helper_uqsubaddx_arm -#define helper_usad8 helper_usad8_arm #define helper_usat helper_usat_arm #define helper_usat16 helper_usat16_arm -#define helper_usub16 helper_usub16_arm -#define helper_usub8 helper_usub8_arm -#define helper_usubaddx helper_usubaddx_arm -#define helper_uxtb16 helper_uxtb16_arm -#define helper_v7m_mrs helper_v7m_mrs_arm -#define helper_v7m_msr helper_v7m_msr_arm -#define helper_vfp_absd helper_vfp_absd_arm -#define helper_vfp_abss helper_vfp_abss_arm -#define helper_vfp_addd helper_vfp_addd_arm +#define helper_setend helper_setend_arm +#define helper_wfi helper_wfi_arm +#define helper_wfe helper_wfe_arm +#define helper_yield helper_yield_arm +#define helper_exception_internal helper_exception_internal_arm +#define helper_exception_with_syndrome helper_exception_with_syndrome_arm +#define helper_exception_bkpt_insn helper_exception_bkpt_insn_arm +#define helper_cpsr_read helper_cpsr_read_arm +#define helper_cpsr_write helper_cpsr_write_arm +#define helper_cpsr_write_eret helper_cpsr_write_eret_arm +#define helper_get_user_reg helper_get_user_reg_arm +#define helper_set_user_reg helper_set_user_reg_arm +#define helper_set_r13_banked helper_set_r13_banked_arm +#define helper_get_r13_banked helper_get_r13_banked_arm +#define helper_msr_banked helper_msr_banked_arm +#define helper_mrs_banked helper_mrs_banked_arm +#define helper_access_check_cp_reg helper_access_check_cp_reg_arm +#define helper_set_cp_reg helper_set_cp_reg_arm +#define helper_get_cp_reg helper_get_cp_reg_arm +#define helper_set_cp_reg64 helper_set_cp_reg64_arm +#define helper_get_cp_reg64 helper_get_cp_reg64_arm +#define helper_pre_hvc helper_pre_hvc_arm +#define helper_pre_smc helper_pre_smc_arm +#define helper_shl_cc helper_shl_cc_arm +#define helper_shr_cc helper_shr_cc_arm +#define helper_sar_cc helper_sar_cc_arm +#define helper_ror_cc helper_ror_cc_arm +#define arm_is_psci_call arm_is_psci_call_arm +#define arm_handle_psci_call arm_handle_psci_call_arm +#define arm_cpu_do_unaligned_access arm_cpu_do_unaligned_access_arm +#define arm_cpu_do_transaction_failed arm_cpu_do_transaction_failed_arm +#define arm_cpu_tlb_fill arm_cpu_tlb_fill_arm +#define arm_translate_init arm_translate_init_arm +#define arm_test_cc arm_test_cc_arm +#define arm_free_cc arm_free_cc_arm +#define arm_jump_cc arm_jump_cc_arm +#define arm_gen_test_cc arm_gen_test_cc_arm +#define vfp_expand_imm vfp_expand_imm_arm +#define gen_cmtst_i64 gen_cmtst_i64_arm +#define gen_ushl_i32 gen_ushl_i32_arm +#define gen_ushl_i64 gen_ushl_i64_arm +#define gen_sshl_i32 gen_sshl_i32_arm +#define gen_sshl_i64 gen_sshl_i64_arm +#define gen_intermediate_code gen_intermediate_code_arm +#define restore_state_to_opc restore_state_to_opc_arm +#define helper_neon_qrdmlah_s16 helper_neon_qrdmlah_s16_arm +#define helper_gvec_qrdmlah_s16 helper_gvec_qrdmlah_s16_arm +#define helper_neon_qrdmlsh_s16 helper_neon_qrdmlsh_s16_arm +#define helper_gvec_qrdmlsh_s16 helper_gvec_qrdmlsh_s16_arm +#define helper_neon_qrdmlah_s32 helper_neon_qrdmlah_s32_arm +#define helper_gvec_qrdmlah_s32 helper_gvec_qrdmlah_s32_arm +#define helper_neon_qrdmlsh_s32 helper_neon_qrdmlsh_s32_arm +#define helper_gvec_qrdmlsh_s32 helper_gvec_qrdmlsh_s32_arm +#define helper_gvec_sdot_b helper_gvec_sdot_b_arm +#define helper_gvec_udot_b helper_gvec_udot_b_arm +#define helper_gvec_sdot_h helper_gvec_sdot_h_arm +#define helper_gvec_udot_h helper_gvec_udot_h_arm +#define helper_gvec_sdot_idx_b helper_gvec_sdot_idx_b_arm +#define helper_gvec_udot_idx_b helper_gvec_udot_idx_b_arm +#define helper_gvec_sdot_idx_h helper_gvec_sdot_idx_h_arm +#define helper_gvec_udot_idx_h helper_gvec_udot_idx_h_arm +#define helper_gvec_fcaddh helper_gvec_fcaddh_arm +#define helper_gvec_fcadds helper_gvec_fcadds_arm +#define helper_gvec_fcaddd helper_gvec_fcaddd_arm +#define helper_gvec_fcmlah helper_gvec_fcmlah_arm +#define helper_gvec_fcmlah_idx helper_gvec_fcmlah_idx_arm +#define helper_gvec_fcmlas helper_gvec_fcmlas_arm +#define helper_gvec_fcmlas_idx helper_gvec_fcmlas_idx_arm +#define helper_gvec_fcmlad helper_gvec_fcmlad_arm +#define helper_gvec_frecpe_h helper_gvec_frecpe_h_arm +#define helper_gvec_frecpe_s helper_gvec_frecpe_s_arm +#define helper_gvec_frecpe_d helper_gvec_frecpe_d_arm +#define helper_gvec_frsqrte_h helper_gvec_frsqrte_h_arm +#define helper_gvec_frsqrte_s helper_gvec_frsqrte_s_arm +#define helper_gvec_frsqrte_d helper_gvec_frsqrte_d_arm +#define helper_gvec_fadd_h helper_gvec_fadd_h_arm +#define helper_gvec_fadd_s helper_gvec_fadd_s_arm +#define helper_gvec_fadd_d helper_gvec_fadd_d_arm +#define helper_gvec_fsub_h helper_gvec_fsub_h_arm +#define helper_gvec_fsub_s helper_gvec_fsub_s_arm +#define helper_gvec_fsub_d helper_gvec_fsub_d_arm +#define helper_gvec_fmul_h helper_gvec_fmul_h_arm +#define helper_gvec_fmul_s helper_gvec_fmul_s_arm +#define helper_gvec_fmul_d helper_gvec_fmul_d_arm +#define helper_gvec_ftsmul_h helper_gvec_ftsmul_h_arm +#define helper_gvec_ftsmul_s helper_gvec_ftsmul_s_arm +#define helper_gvec_ftsmul_d helper_gvec_ftsmul_d_arm +#define helper_gvec_fmul_idx_h helper_gvec_fmul_idx_h_arm +#define helper_gvec_fmul_idx_s helper_gvec_fmul_idx_s_arm +#define helper_gvec_fmul_idx_d helper_gvec_fmul_idx_d_arm +#define helper_gvec_fmla_idx_h helper_gvec_fmla_idx_h_arm +#define helper_gvec_fmla_idx_s helper_gvec_fmla_idx_s_arm +#define helper_gvec_fmla_idx_d helper_gvec_fmla_idx_d_arm +#define helper_gvec_uqadd_b helper_gvec_uqadd_b_arm +#define helper_gvec_uqadd_h helper_gvec_uqadd_h_arm +#define helper_gvec_uqadd_s helper_gvec_uqadd_s_arm +#define helper_gvec_sqadd_b helper_gvec_sqadd_b_arm +#define helper_gvec_sqadd_h helper_gvec_sqadd_h_arm +#define helper_gvec_sqadd_s helper_gvec_sqadd_s_arm +#define helper_gvec_uqsub_b helper_gvec_uqsub_b_arm +#define helper_gvec_uqsub_h helper_gvec_uqsub_h_arm +#define helper_gvec_uqsub_s helper_gvec_uqsub_s_arm +#define helper_gvec_sqsub_b helper_gvec_sqsub_b_arm +#define helper_gvec_sqsub_h helper_gvec_sqsub_h_arm +#define helper_gvec_sqsub_s helper_gvec_sqsub_s_arm +#define helper_gvec_uqadd_d helper_gvec_uqadd_d_arm +#define helper_gvec_uqsub_d helper_gvec_uqsub_d_arm +#define helper_gvec_sqadd_d helper_gvec_sqadd_d_arm +#define helper_gvec_sqsub_d helper_gvec_sqsub_d_arm +#define helper_gvec_fmlal_a32 helper_gvec_fmlal_a32_arm +#define helper_gvec_fmlal_a64 helper_gvec_fmlal_a64_arm +#define helper_gvec_fmlal_idx_a32 helper_gvec_fmlal_idx_a32_arm +#define helper_gvec_fmlal_idx_a64 helper_gvec_fmlal_idx_a64_arm +#define helper_gvec_sshl_b helper_gvec_sshl_b_arm +#define helper_gvec_sshl_h helper_gvec_sshl_h_arm +#define helper_gvec_ushl_b helper_gvec_ushl_b_arm +#define helper_gvec_ushl_h helper_gvec_ushl_h_arm +#define helper_gvec_pmul_b helper_gvec_pmul_b_arm +#define helper_gvec_pmull_q helper_gvec_pmull_q_arm +#define helper_neon_pmull_h helper_neon_pmull_h_arm +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_arm +#define vfp_get_fpscr vfp_get_fpscr_arm +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_arm +#define vfp_set_fpscr vfp_set_fpscr_arm #define helper_vfp_adds helper_vfp_adds_arm +#define helper_vfp_addd helper_vfp_addd_arm +#define helper_vfp_subs helper_vfp_subs_arm +#define helper_vfp_subd helper_vfp_subd_arm +#define helper_vfp_muls helper_vfp_muls_arm +#define helper_vfp_muld helper_vfp_muld_arm +#define helper_vfp_divs helper_vfp_divs_arm +#define helper_vfp_divd helper_vfp_divd_arm +#define helper_vfp_mins helper_vfp_mins_arm +#define helper_vfp_mind helper_vfp_mind_arm +#define helper_vfp_maxs helper_vfp_maxs_arm +#define helper_vfp_maxd helper_vfp_maxd_arm +#define helper_vfp_minnums helper_vfp_minnums_arm +#define helper_vfp_minnumd helper_vfp_minnumd_arm +#define helper_vfp_maxnums helper_vfp_maxnums_arm +#define helper_vfp_maxnumd helper_vfp_maxnumd_arm +#define helper_vfp_negs helper_vfp_negs_arm +#define helper_vfp_negd helper_vfp_negd_arm +#define helper_vfp_abss helper_vfp_abss_arm +#define helper_vfp_absd helper_vfp_absd_arm +#define helper_vfp_sqrts helper_vfp_sqrts_arm +#define helper_vfp_sqrtd helper_vfp_sqrtd_arm +#define helper_vfp_cmps helper_vfp_cmps_arm +#define helper_vfp_cmpes helper_vfp_cmpes_arm #define helper_vfp_cmpd helper_vfp_cmpd_arm #define helper_vfp_cmped helper_vfp_cmped_arm -#define helper_vfp_cmpes helper_vfp_cmpes_arm -#define helper_vfp_cmps helper_vfp_cmps_arm -#define helper_vfp_divd helper_vfp_divd_arm -#define helper_vfp_divs helper_vfp_divs_arm -#define helper_vfp_fcvtds helper_vfp_fcvtds_arm -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_arm -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_arm -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_arm -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_arm -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_arm -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_arm -#define helper_vfp_maxd helper_vfp_maxd_arm -#define helper_vfp_maxnumd helper_vfp_maxnumd_arm -#define helper_vfp_maxnums helper_vfp_maxnums_arm -#define helper_vfp_maxs helper_vfp_maxs_arm -#define helper_vfp_mind helper_vfp_mind_arm -#define helper_vfp_minnumd helper_vfp_minnumd_arm -#define helper_vfp_minnums helper_vfp_minnums_arm -#define helper_vfp_mins helper_vfp_mins_arm -#define helper_vfp_muladdd helper_vfp_muladdd_arm -#define helper_vfp_muladds helper_vfp_muladds_arm -#define helper_vfp_muld helper_vfp_muld_arm -#define helper_vfp_muls helper_vfp_muls_arm -#define helper_vfp_negd helper_vfp_negd_arm -#define helper_vfp_negs helper_vfp_negs_arm -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_arm -#define helper_vfp_shtod helper_vfp_shtod_arm -#define helper_vfp_shtos helper_vfp_shtos_arm -#define helper_vfp_sitod helper_vfp_sitod_arm +#define helper_vfp_sitoh helper_vfp_sitoh_arm +#define helper_vfp_tosih helper_vfp_tosih_arm +#define helper_vfp_tosizh helper_vfp_tosizh_arm #define helper_vfp_sitos helper_vfp_sitos_arm -#define helper_vfp_sltod helper_vfp_sltod_arm -#define helper_vfp_sltos helper_vfp_sltos_arm -#define helper_vfp_sqrtd helper_vfp_sqrtd_arm -#define helper_vfp_sqrts helper_vfp_sqrts_arm -#define helper_vfp_sqtod helper_vfp_sqtod_arm -#define helper_vfp_sqtos helper_vfp_sqtos_arm -#define helper_vfp_subd helper_vfp_subd_arm -#define helper_vfp_subs helper_vfp_subs_arm -#define helper_vfp_toshd helper_vfp_toshd_arm -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_arm -#define helper_vfp_toshs helper_vfp_toshs_arm -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_arm -#define helper_vfp_tosid helper_vfp_tosid_arm #define helper_vfp_tosis helper_vfp_tosis_arm -#define helper_vfp_tosizd helper_vfp_tosizd_arm #define helper_vfp_tosizs helper_vfp_tosizs_arm -#define helper_vfp_tosld helper_vfp_tosld_arm -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_arm -#define helper_vfp_tosls helper_vfp_tosls_arm -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_arm -#define helper_vfp_tosqd helper_vfp_tosqd_arm -#define helper_vfp_tosqs helper_vfp_tosqs_arm -#define helper_vfp_touhd helper_vfp_touhd_arm -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_arm -#define helper_vfp_touhs helper_vfp_touhs_arm -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_arm -#define helper_vfp_touid helper_vfp_touid_arm -#define helper_vfp_touis helper_vfp_touis_arm -#define helper_vfp_touizd helper_vfp_touizd_arm -#define helper_vfp_touizs helper_vfp_touizs_arm -#define helper_vfp_tould helper_vfp_tould_arm -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_arm -#define helper_vfp_touls helper_vfp_touls_arm -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_arm -#define helper_vfp_touqd helper_vfp_touqd_arm -#define helper_vfp_touqs helper_vfp_touqs_arm -#define helper_vfp_uhtod helper_vfp_uhtod_arm -#define helper_vfp_uhtos helper_vfp_uhtos_arm -#define helper_vfp_uitod helper_vfp_uitod_arm +#define helper_vfp_sitod helper_vfp_sitod_arm +#define helper_vfp_tosid helper_vfp_tosid_arm +#define helper_vfp_tosizd helper_vfp_tosizd_arm +#define helper_vfp_uitoh helper_vfp_uitoh_arm +#define helper_vfp_touih helper_vfp_touih_arm +#define helper_vfp_touizh helper_vfp_touizh_arm #define helper_vfp_uitos helper_vfp_uitos_arm +#define helper_vfp_touis helper_vfp_touis_arm +#define helper_vfp_touizs helper_vfp_touizs_arm +#define helper_vfp_uitod helper_vfp_uitod_arm +#define helper_vfp_touid helper_vfp_touid_arm +#define helper_vfp_touizd helper_vfp_touizd_arm +#define helper_vfp_fcvtds helper_vfp_fcvtds_arm +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_arm +#define helper_vfp_shtod helper_vfp_shtod_arm +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_arm +#define helper_vfp_toshd helper_vfp_toshd_arm +#define helper_vfp_sltod helper_vfp_sltod_arm +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_arm +#define helper_vfp_tosld helper_vfp_tosld_arm +#define helper_vfp_sqtod helper_vfp_sqtod_arm +#define helper_vfp_tosqd helper_vfp_tosqd_arm +#define helper_vfp_uhtod helper_vfp_uhtod_arm +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_arm +#define helper_vfp_touhd helper_vfp_touhd_arm #define helper_vfp_ultod helper_vfp_ultod_arm -#define helper_vfp_ultos helper_vfp_ultos_arm +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_arm +#define helper_vfp_tould helper_vfp_tould_arm #define helper_vfp_uqtod helper_vfp_uqtod_arm +#define helper_vfp_touqd helper_vfp_touqd_arm +#define helper_vfp_shtos helper_vfp_shtos_arm +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_arm +#define helper_vfp_toshs helper_vfp_toshs_arm +#define helper_vfp_sltos helper_vfp_sltos_arm +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_arm +#define helper_vfp_tosls helper_vfp_tosls_arm +#define helper_vfp_sqtos helper_vfp_sqtos_arm +#define helper_vfp_tosqs helper_vfp_tosqs_arm +#define helper_vfp_uhtos helper_vfp_uhtos_arm +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_arm +#define helper_vfp_touhs helper_vfp_touhs_arm +#define helper_vfp_ultos helper_vfp_ultos_arm +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_arm +#define helper_vfp_touls helper_vfp_touls_arm #define helper_vfp_uqtos helper_vfp_uqtos_arm -#define helper_wfe helper_wfe_arm -#define helper_wfi helper_wfi_arm -#define hex2decimal hex2decimal_arm -#define hw_breakpoint_update hw_breakpoint_update_arm -#define hw_breakpoint_update_all hw_breakpoint_update_all_arm -#define hw_watchpoint_update hw_watchpoint_update_arm -#define hw_watchpoint_update_all hw_watchpoint_update_all_arm -#define _init _init_arm -#define init_cpreg_list init_cpreg_list_arm -#define init_lists init_lists_arm -#define input_type_enum input_type_enum_arm -#define int128_2_64 int128_2_64_arm -#define int128_add int128_add_arm -#define int128_addto int128_addto_arm -#define int128_and int128_and_arm -#define int128_eq int128_eq_arm -#define int128_ge int128_ge_arm -#define int128_get64 int128_get64_arm -#define int128_gt int128_gt_arm -#define int128_le int128_le_arm -#define int128_lt int128_lt_arm -#define int128_make64 int128_make64_arm -#define int128_max int128_max_arm -#define int128_min int128_min_arm -#define int128_ne int128_ne_arm -#define int128_neg int128_neg_arm -#define int128_nz int128_nz_arm -#define int128_rshift int128_rshift_arm -#define int128_sub int128_sub_arm -#define int128_subfrom int128_subfrom_arm -#define int128_zero int128_zero_arm -#define int16_to_float32 int16_to_float32_arm -#define int16_to_float64 int16_to_float64_arm -#define int32_to_float128 int32_to_float128_arm -#define int32_to_float32 int32_to_float32_arm -#define int32_to_float64 int32_to_float64_arm -#define int32_to_floatx80 int32_to_floatx80_arm -#define int64_to_float128 int64_to_float128_arm -#define int64_to_float32 int64_to_float32_arm -#define int64_to_float64 int64_to_float64_arm -#define int64_to_floatx80 int64_to_floatx80_arm -#define invalidate_and_set_dirty invalidate_and_set_dirty_arm -#define invalidate_page_bitmap invalidate_page_bitmap_arm -#define io_mem_read io_mem_read_arm -#define io_mem_write io_mem_write_arm -#define io_readb io_readb_arm -#define io_readl io_readl_arm -#define io_readq io_readq_arm -#define io_readw io_readw_arm -#define iotlb_to_region iotlb_to_region_arm -#define io_writeb io_writeb_arm -#define io_writel io_writel_arm -#define io_writeq io_writeq_arm -#define io_writew io_writew_arm -#define is_a64 is_a64_arm -#define is_help_option is_help_option_arm -#define isr_read isr_read_arm -#define is_valid_option_list is_valid_option_list_arm -#define iwmmxt_load_creg iwmmxt_load_creg_arm -#define iwmmxt_load_reg iwmmxt_load_reg_arm -#define iwmmxt_store_creg iwmmxt_store_creg_arm -#define iwmmxt_store_reg iwmmxt_store_reg_arm -#define __jit_debug_descriptor __jit_debug_descriptor_arm -#define __jit_debug_register_code __jit_debug_register_code_arm -#define kvm_to_cpreg_id kvm_to_cpreg_id_arm -#define last_ram_offset last_ram_offset_arm -#define ldl_be_p ldl_be_p_arm -#define ldl_be_phys ldl_be_phys_arm -#define ldl_he_p ldl_he_p_arm -#define ldl_le_p ldl_le_p_arm -#define ldl_le_phys ldl_le_phys_arm -#define ldl_phys ldl_phys_arm -#define ldl_phys_internal ldl_phys_internal_arm -#define ldq_be_p ldq_be_p_arm -#define ldq_be_phys ldq_be_phys_arm -#define ldq_he_p ldq_he_p_arm -#define ldq_le_p ldq_le_p_arm -#define ldq_le_phys ldq_le_phys_arm -#define ldq_phys ldq_phys_arm -#define ldq_phys_internal ldq_phys_internal_arm -#define ldst_name ldst_name_arm -#define ldub_p ldub_p_arm -#define ldub_phys ldub_phys_arm -#define lduw_be_p lduw_be_p_arm -#define lduw_be_phys lduw_be_phys_arm -#define lduw_he_p lduw_he_p_arm -#define lduw_le_p lduw_le_p_arm -#define lduw_le_phys lduw_le_phys_arm -#define lduw_phys lduw_phys_arm -#define lduw_phys_internal lduw_phys_internal_arm -#define le128 le128_arm -#define linked_bp_matches linked_bp_matches_arm -#define listener_add_address_space listener_add_address_space_arm -#define load_cpu_offset load_cpu_offset_arm -#define load_reg load_reg_arm -#define load_reg_var load_reg_var_arm -#define log_cpu_state log_cpu_state_arm -#define lpae_cp_reginfo lpae_cp_reginfo_arm -#define lt128 lt128_arm -#define machine_class_init machine_class_init_arm -#define machine_finalize machine_finalize_arm -#define machine_info machine_info_arm -#define machine_initfn machine_initfn_arm -#define machine_register_types machine_register_types_arm -#define machvirt_init machvirt_init_arm -#define machvirt_machine_init machvirt_machine_init_arm -#define maj maj_arm -#define mapping_conflict mapping_conflict_arm -#define mapping_contiguous mapping_contiguous_arm -#define mapping_have_same_region mapping_have_same_region_arm -#define mapping_merge mapping_merge_arm -#define mem_add mem_add_arm -#define mem_begin mem_begin_arm -#define mem_commit mem_commit_arm -#define memory_access_is_direct memory_access_is_direct_arm -#define memory_access_size memory_access_size_arm -#define memory_init memory_init_arm -#define memory_listener_match memory_listener_match_arm -#define memory_listener_register memory_listener_register_arm -#define memory_listener_unregister memory_listener_unregister_arm -#define memory_map_init memory_map_init_arm -#define memory_mapping_filter memory_mapping_filter_arm -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_arm -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_arm -#define memory_mapping_list_free memory_mapping_list_free_arm -#define memory_mapping_list_init memory_mapping_list_init_arm -#define memory_region_access_valid memory_region_access_valid_arm -#define memory_region_add_subregion memory_region_add_subregion_arm -#define memory_region_add_subregion_common memory_region_add_subregion_common_arm -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_arm -#define memory_region_big_endian memory_region_big_endian_arm -#define memory_region_clear_pending memory_region_clear_pending_arm -#define memory_region_del_subregion memory_region_del_subregion_arm -#define memory_region_destructor_alias memory_region_destructor_alias_arm -#define memory_region_destructor_none memory_region_destructor_none_arm -#define memory_region_destructor_ram memory_region_destructor_ram_arm -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_arm -#define memory_region_dispatch_read memory_region_dispatch_read_arm -#define memory_region_dispatch_read1 memory_region_dispatch_read1_arm -#define memory_region_dispatch_write memory_region_dispatch_write_arm -#define memory_region_escape_name memory_region_escape_name_arm -#define memory_region_finalize memory_region_finalize_arm -#define memory_region_find memory_region_find_arm -#define memory_region_get_addr memory_region_get_addr_arm -#define memory_region_get_alignment memory_region_get_alignment_arm -#define memory_region_get_container memory_region_get_container_arm -#define memory_region_get_fd memory_region_get_fd_arm -#define memory_region_get_may_overlap memory_region_get_may_overlap_arm -#define memory_region_get_priority memory_region_get_priority_arm -#define memory_region_get_ram_addr memory_region_get_ram_addr_arm -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_arm -#define memory_region_get_size memory_region_get_size_arm -#define memory_region_info memory_region_info_arm -#define memory_region_init memory_region_init_arm -#define memory_region_init_alias memory_region_init_alias_arm -#define memory_region_initfn memory_region_initfn_arm -#define memory_region_init_io memory_region_init_io_arm -#define memory_region_init_ram memory_region_init_ram_arm -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_arm -#define memory_region_init_reservation memory_region_init_reservation_arm -#define memory_region_is_iommu memory_region_is_iommu_arm -#define memory_region_is_logging memory_region_is_logging_arm -#define memory_region_is_mapped memory_region_is_mapped_arm -#define memory_region_is_ram memory_region_is_ram_arm -#define memory_region_is_rom memory_region_is_rom_arm -#define memory_region_is_romd memory_region_is_romd_arm -#define memory_region_is_skip_dump memory_region_is_skip_dump_arm -#define memory_region_is_unassigned memory_region_is_unassigned_arm -#define memory_region_name memory_region_name_arm -#define memory_region_need_escape memory_region_need_escape_arm -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_arm -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_arm -#define memory_region_present memory_region_present_arm -#define memory_region_read_accessor memory_region_read_accessor_arm -#define memory_region_readd_subregion memory_region_readd_subregion_arm -#define memory_region_ref memory_region_ref_arm -#define memory_region_resolve_container memory_region_resolve_container_arm -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_arm -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_arm -#define memory_region_set_address memory_region_set_address_arm -#define memory_region_set_alias_offset memory_region_set_alias_offset_arm -#define memory_region_set_enabled memory_region_set_enabled_arm -#define memory_region_set_readonly memory_region_set_readonly_arm -#define memory_region_set_skip_dump memory_region_set_skip_dump_arm -#define memory_region_size memory_region_size_arm -#define memory_region_to_address_space memory_region_to_address_space_arm -#define memory_region_transaction_begin memory_region_transaction_begin_arm -#define memory_region_transaction_commit memory_region_transaction_commit_arm -#define memory_region_unref memory_region_unref_arm -#define memory_region_update_container_subregions memory_region_update_container_subregions_arm -#define memory_region_write_accessor memory_region_write_accessor_arm -#define memory_region_wrong_endianness memory_region_wrong_endianness_arm -#define memory_try_enable_merging memory_try_enable_merging_arm -#define module_call_init module_call_init_arm -#define module_load module_load_arm -#define mpidr_cp_reginfo mpidr_cp_reginfo_arm -#define mpidr_read mpidr_read_arm -#define msr_mask msr_mask_arm -#define mul128By64To192 mul128By64To192_arm -#define mul128To256 mul128To256_arm -#define mul64To128 mul64To128_arm -#define muldiv64 muldiv64_arm -#define neon_2rm_is_float_op neon_2rm_is_float_op_arm -#define neon_2rm_sizes neon_2rm_sizes_arm -#define neon_3r_sizes neon_3r_sizes_arm -#define neon_get_scalar neon_get_scalar_arm -#define neon_load_reg neon_load_reg_arm -#define neon_load_reg64 neon_load_reg64_arm -#define neon_load_scratch neon_load_scratch_arm -#define neon_ls_element_type neon_ls_element_type_arm -#define neon_reg_offset neon_reg_offset_arm -#define neon_store_reg neon_store_reg_arm -#define neon_store_reg64 neon_store_reg64_arm -#define neon_store_scratch neon_store_scratch_arm -#define new_ldst_label new_ldst_label_arm -#define next_list next_list_arm -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_arm -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_arm -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_arm -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_arm -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_arm -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_arm -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_arm -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_arm -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_arm -#define not_v6_cp_reginfo not_v6_cp_reginfo_arm -#define not_v7_cp_reginfo not_v7_cp_reginfo_arm -#define not_v8_cp_reginfo not_v8_cp_reginfo_arm -#define object_child_foreach object_child_foreach_arm -#define object_class_foreach object_class_foreach_arm -#define object_class_foreach_tramp object_class_foreach_tramp_arm -#define object_class_get_list object_class_get_list_arm -#define object_class_get_list_tramp object_class_get_list_tramp_arm -#define object_class_get_parent object_class_get_parent_arm -#define object_deinit object_deinit_arm -#define object_dynamic_cast object_dynamic_cast_arm -#define object_finalize object_finalize_arm -#define object_finalize_child_property object_finalize_child_property_arm -#define object_get_child_property object_get_child_property_arm -#define object_get_link_property object_get_link_property_arm -#define object_get_root object_get_root_arm -#define object_initialize_with_type object_initialize_with_type_arm -#define object_init_with_type object_init_with_type_arm -#define object_instance_init object_instance_init_arm -#define object_new_with_type object_new_with_type_arm -#define object_post_init_with_type object_post_init_with_type_arm -#define object_property_add_alias object_property_add_alias_arm -#define object_property_add_link object_property_add_link_arm -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_arm -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_arm -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_arm -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_arm -#define object_property_allow_set_link object_property_allow_set_link_arm -#define object_property_del object_property_del_arm -#define object_property_del_all object_property_del_all_arm -#define object_property_find object_property_find_arm -#define object_property_get object_property_get_arm -#define object_property_get_bool object_property_get_bool_arm -#define object_property_get_int object_property_get_int_arm -#define object_property_get_link object_property_get_link_arm -#define object_property_get_qobject object_property_get_qobject_arm -#define object_property_get_str object_property_get_str_arm -#define object_property_get_type object_property_get_type_arm -#define object_property_is_child object_property_is_child_arm -#define object_property_set object_property_set_arm -#define object_property_set_description object_property_set_description_arm -#define object_property_set_link object_property_set_link_arm -#define object_property_set_qobject object_property_set_qobject_arm -#define object_release_link_property object_release_link_property_arm -#define object_resolve_abs_path object_resolve_abs_path_arm -#define object_resolve_child_property object_resolve_child_property_arm -#define object_resolve_link object_resolve_link_arm -#define object_resolve_link_property object_resolve_link_property_arm -#define object_resolve_partial_path object_resolve_partial_path_arm -#define object_resolve_path object_resolve_path_arm -#define object_resolve_path_component object_resolve_path_component_arm -#define object_resolve_path_type object_resolve_path_type_arm -#define object_set_link_property object_set_link_property_arm -#define object_unparent object_unparent_arm -#define omap_cachemaint_write omap_cachemaint_write_arm -#define omap_cp_reginfo omap_cp_reginfo_arm -#define omap_threadid_write omap_threadid_write_arm -#define omap_ticonfig_write omap_ticonfig_write_arm -#define omap_wfi_write omap_wfi_write_arm -#define op_bits op_bits_arm -#define open_modeflags open_modeflags_arm -#define op_to_mov op_to_mov_arm -#define op_to_movi op_to_movi_arm -#define output_type_enum output_type_enum_arm -#define packFloat128 packFloat128_arm -#define packFloat16 packFloat16_arm -#define packFloat32 packFloat32_arm -#define packFloat64 packFloat64_arm -#define packFloatx80 packFloatx80_arm -#define page_find page_find_arm -#define page_find_alloc page_find_alloc_arm -#define page_flush_tb page_flush_tb_arm -#define page_flush_tb_1 page_flush_tb_1_arm -#define page_init page_init_arm -#define page_size_init page_size_init_arm -#define par par_arm -#define parse_array parse_array_arm -#define parse_error parse_error_arm -#define parse_escape parse_escape_arm -#define parse_keyword parse_keyword_arm -#define parse_literal parse_literal_arm -#define parse_object parse_object_arm -#define parse_optional parse_optional_arm -#define parse_option_bool parse_option_bool_arm -#define parse_option_number parse_option_number_arm -#define parse_option_size parse_option_size_arm -#define parse_pair parse_pair_arm -#define parser_context_free parser_context_free_arm -#define parser_context_new parser_context_new_arm -#define parser_context_peek_token parser_context_peek_token_arm -#define parser_context_pop_token parser_context_pop_token_arm -#define parser_context_restore parser_context_restore_arm -#define parser_context_save parser_context_save_arm -#define parse_str parse_str_arm -#define parse_type_bool parse_type_bool_arm -#define parse_type_int parse_type_int_arm -#define parse_type_number parse_type_number_arm -#define parse_type_size parse_type_size_arm -#define parse_type_str parse_type_str_arm -#define parse_value parse_value_arm -#define par_write par_write_arm -#define patch_reloc patch_reloc_arm -#define phys_map_node_alloc phys_map_node_alloc_arm -#define phys_map_node_reserve phys_map_node_reserve_arm -#define phys_mem_alloc phys_mem_alloc_arm -#define phys_mem_set_alloc phys_mem_set_alloc_arm -#define phys_page_compact phys_page_compact_arm -#define phys_page_compact_all phys_page_compact_all_arm -#define phys_page_find phys_page_find_arm -#define phys_page_set phys_page_set_arm -#define phys_page_set_level phys_page_set_level_arm -#define phys_section_add phys_section_add_arm -#define phys_section_destroy phys_section_destroy_arm -#define phys_sections_free phys_sections_free_arm -#define pickNaN pickNaN_arm -#define pickNaNMulAdd pickNaNMulAdd_arm -#define pmccfiltr_write pmccfiltr_write_arm -#define pmccntr_read pmccntr_read_arm -#define pmccntr_sync pmccntr_sync_arm -#define pmccntr_write pmccntr_write_arm -#define pmccntr_write32 pmccntr_write32_arm -#define pmcntenclr_write pmcntenclr_write_arm -#define pmcntenset_write pmcntenset_write_arm -#define pmcr_write pmcr_write_arm -#define pmintenclr_write pmintenclr_write_arm -#define pmintenset_write pmintenset_write_arm -#define pmovsr_write pmovsr_write_arm -#define pmreg_access pmreg_access_arm -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_arm -#define pmsav5_data_ap_read pmsav5_data_ap_read_arm -#define pmsav5_data_ap_write pmsav5_data_ap_write_arm -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_arm -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_arm -#define pmuserenr_write pmuserenr_write_arm -#define pmxevtyper_write pmxevtyper_write_arm -#define print_type_bool print_type_bool_arm -#define print_type_int print_type_int_arm -#define print_type_number print_type_number_arm -#define print_type_size print_type_size_arm -#define print_type_str print_type_str_arm -#define propagateFloat128NaN propagateFloat128NaN_arm -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_arm -#define propagateFloat32NaN propagateFloat32NaN_arm -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_arm -#define propagateFloat64NaN propagateFloat64NaN_arm -#define propagateFloatx80NaN propagateFloatx80NaN_arm -#define property_get_alias property_get_alias_arm -#define property_get_bool property_get_bool_arm -#define property_get_str property_get_str_arm -#define property_get_uint16_ptr property_get_uint16_ptr_arm -#define property_get_uint32_ptr property_get_uint32_ptr_arm -#define property_get_uint64_ptr property_get_uint64_ptr_arm -#define property_get_uint8_ptr property_get_uint8_ptr_arm -#define property_release_alias property_release_alias_arm -#define property_release_bool property_release_bool_arm -#define property_release_str property_release_str_arm -#define property_resolve_alias property_resolve_alias_arm -#define property_set_alias property_set_alias_arm -#define property_set_bool property_set_bool_arm -#define property_set_str property_set_str_arm -#define pstate_read pstate_read_arm -#define pstate_write pstate_write_arm -#define pxa250_initfn pxa250_initfn_arm -#define pxa255_initfn pxa255_initfn_arm -#define pxa260_initfn pxa260_initfn_arm -#define pxa261_initfn pxa261_initfn_arm -#define pxa262_initfn pxa262_initfn_arm -#define pxa270a0_initfn pxa270a0_initfn_arm -#define pxa270a1_initfn pxa270a1_initfn_arm -#define pxa270b0_initfn pxa270b0_initfn_arm -#define pxa270b1_initfn pxa270b1_initfn_arm -#define pxa270c0_initfn pxa270c0_initfn_arm -#define pxa270c5_initfn pxa270c5_initfn_arm -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_arm -#define qapi_dealloc_end_list qapi_dealloc_end_list_arm -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_arm -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_arm -#define qapi_dealloc_next_list qapi_dealloc_next_list_arm -#define qapi_dealloc_pop qapi_dealloc_pop_arm -#define qapi_dealloc_push qapi_dealloc_push_arm -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_arm -#define qapi_dealloc_start_list qapi_dealloc_start_list_arm -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_arm -#define qapi_dealloc_start_union qapi_dealloc_start_union_arm -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_arm -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_arm -#define qapi_dealloc_type_int qapi_dealloc_type_int_arm -#define qapi_dealloc_type_number qapi_dealloc_type_number_arm -#define qapi_dealloc_type_size qapi_dealloc_type_size_arm -#define qapi_dealloc_type_str qapi_dealloc_type_str_arm -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_arm -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_arm -#define qapi_free_boolList qapi_free_boolList_arm -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_arm -#define qapi_free_int16List qapi_free_int16List_arm -#define qapi_free_int32List qapi_free_int32List_arm -#define qapi_free_int64List qapi_free_int64List_arm -#define qapi_free_int8List qapi_free_int8List_arm -#define qapi_free_intList qapi_free_intList_arm -#define qapi_free_numberList qapi_free_numberList_arm -#define qapi_free_strList qapi_free_strList_arm -#define qapi_free_uint16List qapi_free_uint16List_arm -#define qapi_free_uint32List qapi_free_uint32List_arm -#define qapi_free_uint64List qapi_free_uint64List_arm -#define qapi_free_uint8List qapi_free_uint8List_arm -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_arm -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_arm -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_arm -#define qbool_destroy_obj qbool_destroy_obj_arm -#define qbool_from_int qbool_from_int_arm -#define qbool_get_int qbool_get_int_arm -#define qbool_type qbool_type_arm -#define qbus_create qbus_create_arm -#define qbus_create_inplace qbus_create_inplace_arm -#define qbus_finalize qbus_finalize_arm -#define qbus_initfn qbus_initfn_arm -#define qbus_realize qbus_realize_arm -#define qdev_create qdev_create_arm -#define qdev_get_type qdev_get_type_arm -#define qdev_register_types qdev_register_types_arm -#define qdev_set_parent_bus qdev_set_parent_bus_arm -#define qdev_try_create qdev_try_create_arm -#define qdict_add_key qdict_add_key_arm -#define qdict_array_split qdict_array_split_arm -#define qdict_clone_shallow qdict_clone_shallow_arm -#define qdict_del qdict_del_arm -#define qdict_destroy_obj qdict_destroy_obj_arm -#define qdict_entry_key qdict_entry_key_arm -#define qdict_entry_value qdict_entry_value_arm -#define qdict_extract_subqdict qdict_extract_subqdict_arm -#define qdict_find qdict_find_arm -#define qdict_first qdict_first_arm -#define qdict_flatten qdict_flatten_arm -#define qdict_flatten_qdict qdict_flatten_qdict_arm -#define qdict_flatten_qlist qdict_flatten_qlist_arm -#define qdict_get qdict_get_arm -#define qdict_get_bool qdict_get_bool_arm -#define qdict_get_double qdict_get_double_arm -#define qdict_get_int qdict_get_int_arm -#define qdict_get_obj qdict_get_obj_arm -#define qdict_get_qdict qdict_get_qdict_arm -#define qdict_get_qlist qdict_get_qlist_arm -#define qdict_get_str qdict_get_str_arm -#define qdict_get_try_bool qdict_get_try_bool_arm -#define qdict_get_try_int qdict_get_try_int_arm -#define qdict_get_try_str qdict_get_try_str_arm -#define qdict_haskey qdict_haskey_arm -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_arm -#define qdict_iter qdict_iter_arm -#define qdict_join qdict_join_arm -#define qdict_new qdict_new_arm -#define qdict_next qdict_next_arm -#define qdict_next_entry qdict_next_entry_arm -#define qdict_put_obj qdict_put_obj_arm -#define qdict_size qdict_size_arm -#define qdict_type qdict_type_arm -#define qemu_clock_get_us qemu_clock_get_us_arm -#define qemu_clock_ptr qemu_clock_ptr_arm -#define qemu_clocks qemu_clocks_arm -#define qemu_get_cpu qemu_get_cpu_arm -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_arm -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_arm -#define qemu_get_ram_block qemu_get_ram_block_arm -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_arm -#define qemu_get_ram_fd qemu_get_ram_fd_arm -#define qemu_get_ram_ptr qemu_get_ram_ptr_arm -#define qemu_host_page_mask qemu_host_page_mask_arm -#define qemu_host_page_size qemu_host_page_size_arm -#define qemu_init_vcpu qemu_init_vcpu_arm -#define qemu_ld_helpers qemu_ld_helpers_arm -#define qemu_log_close qemu_log_close_arm -#define qemu_log_enabled qemu_log_enabled_arm -#define qemu_log_flush qemu_log_flush_arm -#define qemu_loglevel_mask qemu_loglevel_mask_arm -#define qemu_log_vprintf qemu_log_vprintf_arm -#define qemu_oom_check qemu_oom_check_arm -#define qemu_parse_fd qemu_parse_fd_arm -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_arm -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_arm -#define qemu_ram_alloc qemu_ram_alloc_arm -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_arm -#define qemu_ram_foreach_block qemu_ram_foreach_block_arm -#define qemu_ram_free qemu_ram_free_arm -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_arm -#define qemu_ram_ptr_length qemu_ram_ptr_length_arm -#define qemu_ram_remap qemu_ram_remap_arm -#define qemu_ram_setup_dump qemu_ram_setup_dump_arm -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_arm -#define qemu_real_host_page_size qemu_real_host_page_size_arm -#define qemu_st_helpers qemu_st_helpers_arm -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_arm -#define qemu_try_memalign qemu_try_memalign_arm -#define qentry_destroy qentry_destroy_arm -#define qerror_human qerror_human_arm -#define qerror_report qerror_report_arm -#define qerror_report_err qerror_report_err_arm -#define qfloat_destroy_obj qfloat_destroy_obj_arm -#define qfloat_from_double qfloat_from_double_arm -#define qfloat_get_double qfloat_get_double_arm -#define qfloat_type qfloat_type_arm -#define qint_destroy_obj qint_destroy_obj_arm -#define qint_from_int qint_from_int_arm -#define qint_get_int qint_get_int_arm -#define qint_type qint_type_arm -#define qlist_append_obj qlist_append_obj_arm -#define qlist_copy qlist_copy_arm -#define qlist_copy_elem qlist_copy_elem_arm -#define qlist_destroy_obj qlist_destroy_obj_arm -#define qlist_empty qlist_empty_arm -#define qlist_entry_obj qlist_entry_obj_arm -#define qlist_first qlist_first_arm -#define qlist_iter qlist_iter_arm -#define qlist_new qlist_new_arm -#define qlist_next qlist_next_arm -#define qlist_peek qlist_peek_arm -#define qlist_pop qlist_pop_arm -#define qlist_size qlist_size_arm -#define qlist_size_iter qlist_size_iter_arm -#define qlist_type qlist_type_arm -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_arm -#define qmp_input_end_list qmp_input_end_list_arm -#define qmp_input_end_struct qmp_input_end_struct_arm -#define qmp_input_get_next_type qmp_input_get_next_type_arm -#define qmp_input_get_object qmp_input_get_object_arm -#define qmp_input_get_visitor qmp_input_get_visitor_arm -#define qmp_input_next_list qmp_input_next_list_arm -#define qmp_input_optional qmp_input_optional_arm -#define qmp_input_pop qmp_input_pop_arm -#define qmp_input_push qmp_input_push_arm -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_arm -#define qmp_input_start_list qmp_input_start_list_arm -#define qmp_input_start_struct qmp_input_start_struct_arm -#define qmp_input_type_bool qmp_input_type_bool_arm -#define qmp_input_type_int qmp_input_type_int_arm -#define qmp_input_type_number qmp_input_type_number_arm -#define qmp_input_type_str qmp_input_type_str_arm -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_arm -#define qmp_input_visitor_new qmp_input_visitor_new_arm -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_arm -#define qmp_output_add_obj qmp_output_add_obj_arm -#define qmp_output_end_list qmp_output_end_list_arm -#define qmp_output_end_struct qmp_output_end_struct_arm -#define qmp_output_first qmp_output_first_arm -#define qmp_output_get_qobject qmp_output_get_qobject_arm -#define qmp_output_get_visitor qmp_output_get_visitor_arm -#define qmp_output_last qmp_output_last_arm -#define qmp_output_next_list qmp_output_next_list_arm -#define qmp_output_pop qmp_output_pop_arm -#define qmp_output_push_obj qmp_output_push_obj_arm -#define qmp_output_start_list qmp_output_start_list_arm -#define qmp_output_start_struct qmp_output_start_struct_arm -#define qmp_output_type_bool qmp_output_type_bool_arm -#define qmp_output_type_int qmp_output_type_int_arm -#define qmp_output_type_number qmp_output_type_number_arm -#define qmp_output_type_str qmp_output_type_str_arm -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_arm -#define qmp_output_visitor_new qmp_output_visitor_new_arm -#define qobject_decref qobject_decref_arm -#define qobject_to_qbool qobject_to_qbool_arm -#define qobject_to_qdict qobject_to_qdict_arm -#define qobject_to_qfloat qobject_to_qfloat_arm -#define qobject_to_qint qobject_to_qint_arm -#define qobject_to_qlist qobject_to_qlist_arm -#define qobject_to_qstring qobject_to_qstring_arm -#define qobject_type qobject_type_arm -#define qstring_append qstring_append_arm -#define qstring_append_chr qstring_append_chr_arm -#define qstring_append_int qstring_append_int_arm -#define qstring_destroy_obj qstring_destroy_obj_arm -#define qstring_from_escaped_str qstring_from_escaped_str_arm -#define qstring_from_str qstring_from_str_arm -#define qstring_from_substr qstring_from_substr_arm -#define qstring_get_length qstring_get_length_arm -#define qstring_get_str qstring_get_str_arm -#define qstring_new qstring_new_arm -#define qstring_type qstring_type_arm -#define ram_block_add ram_block_add_arm -#define ram_size ram_size_arm -#define range_compare range_compare_arm -#define range_covers_byte range_covers_byte_arm -#define range_get_last range_get_last_arm -#define range_merge range_merge_arm -#define ranges_can_merge ranges_can_merge_arm -#define raw_read raw_read_arm -#define raw_write raw_write_arm -#define rcon rcon_arm -#define read_raw_cp_reg read_raw_cp_reg_arm -#define recip_estimate recip_estimate_arm -#define recip_sqrt_estimate recip_sqrt_estimate_arm -#define register_cp_regs_for_features register_cp_regs_for_features_arm -#define register_multipage register_multipage_arm -#define register_subpage register_subpage_arm -#define register_tm_clones register_tm_clones_arm -#define register_types_object register_types_object_arm -#define regnames regnames_arm -#define render_memory_region render_memory_region_arm -#define reset_all_temps reset_all_temps_arm -#define reset_temp reset_temp_arm -#define rol32 rol32_arm -#define rol64 rol64_arm -#define ror32 ror32_arm -#define ror64 ror64_arm -#define roundAndPackFloat128 roundAndPackFloat128_arm -#define roundAndPackFloat16 roundAndPackFloat16_arm -#define roundAndPackFloat32 roundAndPackFloat32_arm -#define roundAndPackFloat64 roundAndPackFloat64_arm -#define roundAndPackFloatx80 roundAndPackFloatx80_arm -#define roundAndPackInt32 roundAndPackInt32_arm -#define roundAndPackInt64 roundAndPackInt64_arm -#define roundAndPackUint64 roundAndPackUint64_arm -#define round_to_inf round_to_inf_arm -#define run_on_cpu run_on_cpu_arm -#define s0 s0_arm -#define S0 S0_arm -#define s1 s1_arm -#define S1 S1_arm -#define sa1100_initfn sa1100_initfn_arm -#define sa1110_initfn sa1110_initfn_arm -#define save_globals save_globals_arm -#define scr_write scr_write_arm -#define sctlr_write sctlr_write_arm -#define set_bit set_bit_arm -#define set_bits set_bits_arm -#define set_default_nan_mode set_default_nan_mode_arm -#define set_feature set_feature_arm -#define set_float_detect_tininess set_float_detect_tininess_arm -#define set_float_exception_flags set_float_exception_flags_arm -#define set_float_rounding_mode set_float_rounding_mode_arm -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_arm -#define set_flush_to_zero set_flush_to_zero_arm -#define set_swi_errno set_swi_errno_arm -#define sextract32 sextract32_arm -#define sextract64 sextract64_arm -#define shift128ExtraRightJamming shift128ExtraRightJamming_arm -#define shift128Right shift128Right_arm -#define shift128RightJamming shift128RightJamming_arm -#define shift32RightJamming shift32RightJamming_arm -#define shift64ExtraRightJamming shift64ExtraRightJamming_arm -#define shift64RightJamming shift64RightJamming_arm -#define shifter_out_im shifter_out_im_arm -#define shortShift128Left shortShift128Left_arm -#define shortShift192Left shortShift192Left_arm -#define simple_mpu_ap_bits simple_mpu_ap_bits_arm -#define size_code_gen_buffer size_code_gen_buffer_arm -#define softmmu_lock_user softmmu_lock_user_arm -#define softmmu_lock_user_string softmmu_lock_user_string_arm -#define softmmu_tget32 softmmu_tget32_arm -#define softmmu_tget8 softmmu_tget8_arm -#define softmmu_tput32 softmmu_tput32_arm -#define softmmu_unlock_user softmmu_unlock_user_arm -#define sort_constraints sort_constraints_arm -#define sp_el0_access sp_el0_access_arm -#define spsel_read spsel_read_arm -#define spsel_write spsel_write_arm -#define start_list start_list_arm -#define stb_p stb_p_arm -#define stb_phys stb_phys_arm -#define stl_be_p stl_be_p_arm -#define stl_be_phys stl_be_phys_arm -#define stl_he_p stl_he_p_arm -#define stl_le_p stl_le_p_arm -#define stl_le_phys stl_le_phys_arm -#define stl_phys stl_phys_arm -#define stl_phys_internal stl_phys_internal_arm -#define stl_phys_notdirty stl_phys_notdirty_arm -#define store_cpu_offset store_cpu_offset_arm -#define store_reg store_reg_arm -#define store_reg_bx store_reg_bx_arm -#define store_reg_from_load store_reg_from_load_arm -#define stq_be_p stq_be_p_arm -#define stq_be_phys stq_be_phys_arm -#define stq_he_p stq_he_p_arm -#define stq_le_p stq_le_p_arm -#define stq_le_phys stq_le_phys_arm -#define stq_phys stq_phys_arm -#define string_input_get_visitor string_input_get_visitor_arm -#define string_input_visitor_cleanup string_input_visitor_cleanup_arm -#define string_input_visitor_new string_input_visitor_new_arm -#define strongarm_cp_reginfo strongarm_cp_reginfo_arm -#define strstart strstart_arm -#define strtosz strtosz_arm -#define strtosz_suffix strtosz_suffix_arm -#define stw_be_p stw_be_p_arm -#define stw_be_phys stw_be_phys_arm -#define stw_he_p stw_he_p_arm -#define stw_le_p stw_le_p_arm -#define stw_le_phys stw_le_phys_arm -#define stw_phys stw_phys_arm -#define stw_phys_internal stw_phys_internal_arm -#define sub128 sub128_arm -#define sub16_sat sub16_sat_arm -#define sub16_usat sub16_usat_arm -#define sub192 sub192_arm -#define sub8_sat sub8_sat_arm -#define sub8_usat sub8_usat_arm -#define subFloat128Sigs subFloat128Sigs_arm -#define subFloat32Sigs subFloat32Sigs_arm -#define subFloat64Sigs subFloat64Sigs_arm -#define subFloatx80Sigs subFloatx80Sigs_arm -#define subpage_accepts subpage_accepts_arm -#define subpage_init subpage_init_arm -#define subpage_ops subpage_ops_arm -#define subpage_read subpage_read_arm -#define subpage_register subpage_register_arm -#define subpage_write subpage_write_arm -#define suffix_mul suffix_mul_arm -#define swap_commutative swap_commutative_arm -#define swap_commutative2 swap_commutative2_arm -#define switch_mode switch_mode_arm -#define switch_v7m_sp switch_v7m_sp_arm -#define syn_aa32_bkpt syn_aa32_bkpt_arm -#define syn_aa32_hvc syn_aa32_hvc_arm -#define syn_aa32_smc syn_aa32_smc_arm -#define syn_aa32_svc syn_aa32_svc_arm -#define syn_breakpoint syn_breakpoint_arm -#define sync_globals sync_globals_arm -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_arm -#define syn_cp14_rt_trap syn_cp14_rt_trap_arm -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_arm -#define syn_cp15_rt_trap syn_cp15_rt_trap_arm -#define syn_data_abort syn_data_abort_arm -#define syn_fp_access_trap syn_fp_access_trap_arm -#define syn_insn_abort syn_insn_abort_arm -#define syn_swstep syn_swstep_arm -#define syn_uncategorized syn_uncategorized_arm -#define syn_watchpoint syn_watchpoint_arm -#define syscall_err syscall_err_arm -#define system_bus_class_init system_bus_class_init_arm -#define system_bus_info system_bus_info_arm -#define t2ee_cp_reginfo t2ee_cp_reginfo_arm -#define table_logic_cc table_logic_cc_arm -#define target_parse_constraint target_parse_constraint_arm -#define target_words_bigendian target_words_bigendian_arm -#define tb_add_jump tb_add_jump_arm -#define tb_alloc tb_alloc_arm -#define tb_alloc_page tb_alloc_page_arm -#define tb_check_watchpoint tb_check_watchpoint_arm -#define tb_find_fast tb_find_fast_arm -#define tb_find_pc tb_find_pc_arm -#define tb_find_slow tb_find_slow_arm -#define tb_flush tb_flush_arm -#define tb_flush_jmp_cache tb_flush_jmp_cache_arm -#define tb_free tb_free_arm -#define tb_gen_code tb_gen_code_arm -#define tb_hash_remove tb_hash_remove_arm -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_arm -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_arm -#define tb_invalidate_phys_range tb_invalidate_phys_range_arm -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_arm -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_arm -#define tb_jmp_remove tb_jmp_remove_arm -#define tb_link_page tb_link_page_arm -#define tb_page_remove tb_page_remove_arm -#define tb_phys_hash_func tb_phys_hash_func_arm -#define tb_phys_invalidate tb_phys_invalidate_arm -#define tb_reset_jump tb_reset_jump_arm -#define tb_set_jmp_target tb_set_jmp_target_arm -#define tcg_accel_class_init tcg_accel_class_init_arm -#define tcg_accel_type tcg_accel_type_arm -#define tcg_add_param_i32 tcg_add_param_i32_arm -#define tcg_add_param_i64 tcg_add_param_i64_arm -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_arm -#define tcg_allowed tcg_allowed_arm -#define tcg_canonicalize_memop tcg_canonicalize_memop_arm -#define tcg_commit tcg_commit_arm -#define tcg_cond_to_jcc tcg_cond_to_jcc_arm -#define tcg_constant_folding tcg_constant_folding_arm -#define tcg_const_i32 tcg_const_i32_arm -#define tcg_const_i64 tcg_const_i64_arm -#define tcg_const_local_i32 tcg_const_local_i32_arm -#define tcg_const_local_i64 tcg_const_local_i64_arm -#define tcg_context_init tcg_context_init_arm -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_arm -#define tcg_cpu_exec tcg_cpu_exec_arm -#define tcg_current_code_size tcg_current_code_size_arm -#define tcg_dump_info tcg_dump_info_arm -#define tcg_dump_ops tcg_dump_ops_arm -#define tcg_exec_all tcg_exec_all_arm -#define tcg_find_helper tcg_find_helper_arm -#define tcg_func_start tcg_func_start_arm -#define tcg_gen_abs_i32 tcg_gen_abs_i32_arm -#define tcg_gen_add2_i32 tcg_gen_add2_i32_arm -#define tcg_gen_add_i32 tcg_gen_add_i32_arm -#define tcg_gen_add_i64 tcg_gen_add_i64_arm -#define tcg_gen_addi_i32 tcg_gen_addi_i32_arm -#define tcg_gen_addi_i64 tcg_gen_addi_i64_arm -#define tcg_gen_andc_i32 tcg_gen_andc_i32_arm -#define tcg_gen_and_i32 tcg_gen_and_i32_arm -#define tcg_gen_and_i64 tcg_gen_and_i64_arm -#define tcg_gen_andi_i32 tcg_gen_andi_i32_arm -#define tcg_gen_andi_i64 tcg_gen_andi_i64_arm -#define tcg_gen_br tcg_gen_br_arm -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_arm -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_arm -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_arm -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_arm -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_arm -#define tcg_gen_callN tcg_gen_callN_arm -#define tcg_gen_code tcg_gen_code_arm -#define tcg_gen_code_common tcg_gen_code_common_arm -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_arm -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_arm -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_arm -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_arm -#define tcg_gen_exit_tb tcg_gen_exit_tb_arm -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_arm -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_arm -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_arm -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_arm -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_arm -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_arm -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_arm -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_arm -#define tcg_gen_goto_tb tcg_gen_goto_tb_arm -#define tcg_gen_ld_i32 tcg_gen_ld_i32_arm -#define tcg_gen_ld_i64 tcg_gen_ld_i64_arm -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_arm -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_arm -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_arm -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_arm -#define tcg_gen_mov_i32 tcg_gen_mov_i32_arm -#define tcg_gen_mov_i64 tcg_gen_mov_i64_arm -#define tcg_gen_movi_i32 tcg_gen_movi_i32_arm -#define tcg_gen_movi_i64 tcg_gen_movi_i64_arm -#define tcg_gen_mul_i32 tcg_gen_mul_i32_arm -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_arm -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_arm -#define tcg_gen_neg_i32 tcg_gen_neg_i32_arm -#define tcg_gen_neg_i64 tcg_gen_neg_i64_arm -#define tcg_gen_not_i32 tcg_gen_not_i32_arm -#define tcg_gen_op0 tcg_gen_op0_arm -#define tcg_gen_op1i tcg_gen_op1i_arm -#define tcg_gen_op2_i32 tcg_gen_op2_i32_arm -#define tcg_gen_op2_i64 tcg_gen_op2_i64_arm -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_arm -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_arm -#define tcg_gen_op3_i32 tcg_gen_op3_i32_arm -#define tcg_gen_op3_i64 tcg_gen_op3_i64_arm -#define tcg_gen_op4_i32 tcg_gen_op4_i32_arm -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_arm -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_arm -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_arm -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_arm -#define tcg_gen_op6_i32 tcg_gen_op6_i32_arm -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_arm -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_arm -#define tcg_gen_orc_i32 tcg_gen_orc_i32_arm -#define tcg_gen_or_i32 tcg_gen_or_i32_arm -#define tcg_gen_or_i64 tcg_gen_or_i64_arm -#define tcg_gen_ori_i32 tcg_gen_ori_i32_arm -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_arm -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_arm -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_arm -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_arm -#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_arm -#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_arm -#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_arm -#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_arm -#define tcg_gen_sar_i32 tcg_gen_sar_i32_arm -#define tcg_gen_sari_i32 tcg_gen_sari_i32_arm -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_arm -#define tcg_gen_shl_i32 tcg_gen_shl_i32_arm -#define tcg_gen_shl_i64 tcg_gen_shl_i64_arm -#define tcg_gen_shli_i32 tcg_gen_shli_i32_arm -#define tcg_gen_shli_i64 tcg_gen_shli_i64_arm -#define tcg_gen_shr_i32 tcg_gen_shr_i32_arm -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_arm -#define tcg_gen_shr_i64 tcg_gen_shr_i64_arm -#define tcg_gen_shri_i32 tcg_gen_shri_i32_arm -#define tcg_gen_shri_i64 tcg_gen_shri_i64_arm -#define tcg_gen_st_i32 tcg_gen_st_i32_arm -#define tcg_gen_st_i64 tcg_gen_st_i64_arm -#define tcg_gen_sub_i32 tcg_gen_sub_i32_arm -#define tcg_gen_sub_i64 tcg_gen_sub_i64_arm -#define tcg_gen_subi_i32 tcg_gen_subi_i32_arm -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_arm -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_arm -#define tcg_gen_xor_i32 tcg_gen_xor_i32_arm -#define tcg_gen_xor_i64 tcg_gen_xor_i64_arm -#define tcg_gen_xori_i32 tcg_gen_xori_i32_arm -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_arm -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_arm -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_arm -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_arm -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_arm -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_arm -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_arm -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_arm -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_arm -#define tcg_handle_interrupt tcg_handle_interrupt_arm -#define tcg_init tcg_init_arm -#define tcg_invert_cond tcg_invert_cond_arm -#define tcg_la_bb_end tcg_la_bb_end_arm -#define tcg_la_br_end tcg_la_br_end_arm -#define tcg_la_func_end tcg_la_func_end_arm -#define tcg_liveness_analysis tcg_liveness_analysis_arm -#define tcg_malloc tcg_malloc_arm -#define tcg_malloc_internal tcg_malloc_internal_arm -#define tcg_op_defs_org tcg_op_defs_org_arm -#define tcg_opt_gen_mov tcg_opt_gen_mov_arm -#define tcg_opt_gen_movi tcg_opt_gen_movi_arm -#define tcg_optimize tcg_optimize_arm -#define tcg_out16 tcg_out16_arm -#define tcg_out32 tcg_out32_arm -#define tcg_out64 tcg_out64_arm -#define tcg_out8 tcg_out8_arm -#define tcg_out_addi tcg_out_addi_arm -#define tcg_out_branch tcg_out_branch_arm -#define tcg_out_brcond32 tcg_out_brcond32_arm -#define tcg_out_brcond64 tcg_out_brcond64_arm -#define tcg_out_bswap32 tcg_out_bswap32_arm -#define tcg_out_bswap64 tcg_out_bswap64_arm -#define tcg_out_call tcg_out_call_arm -#define tcg_out_cmp tcg_out_cmp_arm -#define tcg_out_ext16s tcg_out_ext16s_arm -#define tcg_out_ext16u tcg_out_ext16u_arm -#define tcg_out_ext32s tcg_out_ext32s_arm -#define tcg_out_ext32u tcg_out_ext32u_arm -#define tcg_out_ext8s tcg_out_ext8s_arm -#define tcg_out_ext8u tcg_out_ext8u_arm -#define tcg_out_jmp tcg_out_jmp_arm -#define tcg_out_jxx tcg_out_jxx_arm -#define tcg_out_label tcg_out_label_arm -#define tcg_out_ld tcg_out_ld_arm -#define tcg_out_modrm tcg_out_modrm_arm -#define tcg_out_modrm_offset tcg_out_modrm_offset_arm -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_arm -#define tcg_out_mov tcg_out_mov_arm -#define tcg_out_movcond32 tcg_out_movcond32_arm -#define tcg_out_movcond64 tcg_out_movcond64_arm -#define tcg_out_movi tcg_out_movi_arm -#define tcg_out_op tcg_out_op_arm -#define tcg_out_pop tcg_out_pop_arm -#define tcg_out_push tcg_out_push_arm -#define tcg_out_qemu_ld tcg_out_qemu_ld_arm -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_arm -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_arm -#define tcg_out_qemu_st tcg_out_qemu_st_arm -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_arm -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_arm -#define tcg_out_reloc tcg_out_reloc_arm -#define tcg_out_rolw_8 tcg_out_rolw_8_arm -#define tcg_out_setcond32 tcg_out_setcond32_arm -#define tcg_out_setcond64 tcg_out_setcond64_arm -#define tcg_out_shifti tcg_out_shifti_arm -#define tcg_out_st tcg_out_st_arm -#define tcg_out_tb_finalize tcg_out_tb_finalize_arm -#define tcg_out_tb_init tcg_out_tb_init_arm -#define tcg_out_tlb_load tcg_out_tlb_load_arm -#define tcg_out_vex_modrm tcg_out_vex_modrm_arm -#define tcg_patch32 tcg_patch32_arm -#define tcg_patch8 tcg_patch8_arm -#define tcg_pcrel_diff tcg_pcrel_diff_arm -#define tcg_pool_reset tcg_pool_reset_arm -#define tcg_prologue_init tcg_prologue_init_arm -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_arm -#define tcg_reg_alloc tcg_reg_alloc_arm -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_arm -#define tcg_reg_alloc_call tcg_reg_alloc_call_arm -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_arm -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_arm -#define tcg_reg_alloc_op tcg_reg_alloc_op_arm -#define tcg_reg_alloc_start tcg_reg_alloc_start_arm -#define tcg_reg_free tcg_reg_free_arm -#define tcg_reg_sync tcg_reg_sync_arm -#define tcg_set_frame tcg_set_frame_arm -#define tcg_set_nop tcg_set_nop_arm -#define tcg_swap_cond tcg_swap_cond_arm -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_arm -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_arm -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_arm -#define tcg_target_const_match tcg_target_const_match_arm -#define tcg_target_init tcg_target_init_arm -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_arm -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_arm -#define tcg_temp_alloc tcg_temp_alloc_arm -#define tcg_temp_free_i32 tcg_temp_free_i32_arm -#define tcg_temp_free_i64 tcg_temp_free_i64_arm -#define tcg_temp_free_internal tcg_temp_free_internal_arm -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_arm -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_arm -#define tcg_temp_new_i32 tcg_temp_new_i32_arm -#define tcg_temp_new_i64 tcg_temp_new_i64_arm -#define tcg_temp_new_internal tcg_temp_new_internal_arm -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_arm -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_arm -#define tdb_hash tdb_hash_arm -#define teecr_write teecr_write_arm -#define teehbr_access teehbr_access_arm -#define temp_allocate_frame temp_allocate_frame_arm -#define temp_dead temp_dead_arm -#define temps_are_copies temps_are_copies_arm -#define temp_save temp_save_arm -#define temp_sync temp_sync_arm -#define tgen_arithi tgen_arithi_arm -#define tgen_arithr tgen_arithr_arm -#define thumb2_logic_op thumb2_logic_op_arm -#define ti925t_initfn ti925t_initfn_arm -#define tlb_add_large_page tlb_add_large_page_arm -#define tlb_flush_entry tlb_flush_entry_arm -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_arm -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_arm -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_arm -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_arm -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_arm -#define tlbi_aa64_va_write tlbi_aa64_va_write_arm -#define tlbiall_is_write tlbiall_is_write_arm -#define tlbiall_write tlbiall_write_arm -#define tlbiasid_is_write tlbiasid_is_write_arm -#define tlbiasid_write tlbiasid_write_arm -#define tlbimvaa_is_write tlbimvaa_is_write_arm -#define tlbimvaa_write tlbimvaa_write_arm -#define tlbimva_is_write tlbimva_is_write_arm -#define tlbimva_write tlbimva_write_arm -#define tlb_is_dirty_ram tlb_is_dirty_ram_arm -#define tlb_protect_code tlb_protect_code_arm -#define tlb_reset_dirty_range tlb_reset_dirty_range_arm -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_arm -#define tlb_set_dirty tlb_set_dirty_arm -#define tlb_set_dirty1 tlb_set_dirty1_arm -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_arm -#define tlb_vaddr_to_host tlb_vaddr_to_host_arm -#define token_get_type token_get_type_arm -#define token_get_value token_get_value_arm -#define token_is_escape token_is_escape_arm -#define token_is_keyword token_is_keyword_arm -#define token_is_operator token_is_operator_arm -#define tokens_append_from_iter tokens_append_from_iter_arm -#define to_qiv to_qiv_arm -#define to_qov to_qov_arm -#define tosa_init tosa_init_arm -#define tosa_machine_init tosa_machine_init_arm -#define tswap32 tswap32_arm -#define tswap64 tswap64_arm -#define type_class_get_size type_class_get_size_arm -#define type_get_by_name type_get_by_name_arm -#define type_get_parent type_get_parent_arm -#define type_has_parent type_has_parent_arm -#define type_initialize type_initialize_arm -#define type_initialize_interface type_initialize_interface_arm -#define type_is_ancestor type_is_ancestor_arm -#define type_new type_new_arm -#define type_object_get_size type_object_get_size_arm -#define type_register_internal type_register_internal_arm -#define type_table_add type_table_add_arm -#define type_table_get type_table_get_arm -#define type_table_lookup type_table_lookup_arm -#define uint16_to_float32 uint16_to_float32_arm -#define uint16_to_float64 uint16_to_float64_arm -#define uint32_to_float32 uint32_to_float32_arm -#define uint32_to_float64 uint32_to_float64_arm -#define uint64_to_float128 uint64_to_float128_arm -#define uint64_to_float32 uint64_to_float32_arm -#define uint64_to_float64 uint64_to_float64_arm -#define unassigned_io_ops unassigned_io_ops_arm -#define unassigned_io_read unassigned_io_read_arm -#define unassigned_io_write unassigned_io_write_arm -#define unassigned_mem_accepts unassigned_mem_accepts_arm -#define unassigned_mem_ops unassigned_mem_ops_arm -#define unassigned_mem_read unassigned_mem_read_arm -#define unassigned_mem_write unassigned_mem_write_arm -#define update_spsel update_spsel_arm -#define v6_cp_reginfo v6_cp_reginfo_arm -#define v6k_cp_reginfo v6k_cp_reginfo_arm -#define v7_cp_reginfo v7_cp_reginfo_arm -#define v7mp_cp_reginfo v7mp_cp_reginfo_arm -#define v7m_pop v7m_pop_arm -#define v7m_push v7m_push_arm -#define v8_cp_reginfo v8_cp_reginfo_arm -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_arm -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_arm -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_arm -#define vapa_cp_reginfo vapa_cp_reginfo_arm -#define vbar_write vbar_write_arm -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_arm -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_arm -#define vfp_get_fpcr vfp_get_fpcr_arm -#define vfp_get_fpscr vfp_get_fpscr_arm -#define vfp_get_fpsr vfp_get_fpsr_arm -#define vfp_reg_offset vfp_reg_offset_arm -#define vfp_set_fpcr vfp_set_fpcr_arm -#define vfp_set_fpscr vfp_set_fpscr_arm -#define vfp_set_fpsr vfp_set_fpsr_arm -#define visit_end_implicit_struct visit_end_implicit_struct_arm -#define visit_end_list visit_end_list_arm -#define visit_end_struct visit_end_struct_arm -#define visit_end_union visit_end_union_arm -#define visit_get_next_type visit_get_next_type_arm -#define visit_next_list visit_next_list_arm -#define visit_optional visit_optional_arm -#define visit_start_implicit_struct visit_start_implicit_struct_arm -#define visit_start_list visit_start_list_arm -#define visit_start_struct visit_start_struct_arm -#define visit_start_union visit_start_union_arm -#define vmsa_cp_reginfo vmsa_cp_reginfo_arm -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_arm -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_arm -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_arm -#define vmsa_ttbcr_write vmsa_ttbcr_write_arm -#define vmsa_ttbr_write vmsa_ttbr_write_arm -#define write_cpustate_to_list write_cpustate_to_list_arm -#define write_list_to_cpustate write_list_to_cpustate_arm -#define write_raw_cp_reg write_raw_cp_reg_arm -#define X86CPURegister32_lookup X86CPURegister32_lookup_arm -#define x86_op_defs x86_op_defs_arm -#define xpsr_read xpsr_read_arm -#define xpsr_write xpsr_write_arm -#define xscale_cpar_write xscale_cpar_write_arm -#define xscale_cp_reginfo xscale_cp_reginfo_arm -#define ARM_REGS_STORAGE_SIZE ARM_REGS_STORAGE_SIZE_arm +#define helper_vfp_touqs helper_vfp_touqs_arm +#define helper_vfp_sltoh helper_vfp_sltoh_arm +#define helper_vfp_ultoh helper_vfp_ultoh_arm +#define helper_vfp_sqtoh helper_vfp_sqtoh_arm +#define helper_vfp_uqtoh helper_vfp_uqtoh_arm +#define helper_vfp_toshh helper_vfp_toshh_arm +#define helper_vfp_touhh helper_vfp_touhh_arm +#define helper_vfp_toslh helper_vfp_toslh_arm +#define helper_vfp_toulh helper_vfp_toulh_arm +#define helper_vfp_tosqh helper_vfp_tosqh_arm +#define helper_vfp_touqh helper_vfp_touqh_arm +#define helper_set_rmode helper_set_rmode_arm +#define helper_set_neon_rmode helper_set_neon_rmode_arm +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_arm +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_arm +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_arm +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_arm +#define helper_recps_f32 helper_recps_f32_arm +#define helper_rsqrts_f32 helper_rsqrts_f32_arm +#define helper_recpe_f16 helper_recpe_f16_arm +#define helper_recpe_f32 helper_recpe_f32_arm +#define helper_recpe_f64 helper_recpe_f64_arm +#define helper_rsqrte_f16 helper_rsqrte_f16_arm +#define helper_rsqrte_f32 helper_rsqrte_f32_arm +#define helper_rsqrte_f64 helper_rsqrte_f64_arm +#define helper_recpe_u32 helper_recpe_u32_arm +#define helper_rsqrte_u32 helper_rsqrte_u32_arm +#define helper_vfp_muladds helper_vfp_muladds_arm +#define helper_vfp_muladdd helper_vfp_muladdd_arm +#define helper_rints_exact helper_rints_exact_arm +#define helper_rintd_exact helper_rintd_exact_arm +#define helper_rints helper_rints_arm +#define helper_rintd helper_rintd_arm +#define arm_rmode_to_sf arm_rmode_to_sf_arm +#define helper_fjcvtzs helper_fjcvtzs_arm +#define helper_vjcvt helper_vjcvt_arm +#define helper_frint32_s helper_frint32_s_arm +#define helper_frint64_s helper_frint64_s_arm +#define helper_frint32_d helper_frint32_d_arm +#define helper_frint64_d helper_frint64_d_arm +#define helper_check_hcr_el2_trap helper_check_hcr_el2_trap_arm +#define arm_reg_reset arm_reg_reset_arm +#define arm_reg_read arm_reg_read_arm +#define arm_reg_write arm_reg_write_arm +#define mla_op mla_op_arm +#define mls_op mls_op_arm +#define sshl_op sshl_op_arm +#define ushl_op ushl_op_arm +#define uqsub_op uqsub_op_arm +#define sqsub_op sqsub_op_arm +#define uqadd_op uqadd_op_arm +#define sqadd_op sqadd_op_arm +#define sli_op sli_op_arm +#define cmtst_op cmtst_op_arm +#define sri_op sri_op_arm +#define usra_op usra_op_arm +#define ssra_op ssra_op_arm #endif diff --git a/qemu/armeb.h b/qemu/armeb.h index 30b771b5..d8f379d8 100644 --- a/qemu/armeb.h +++ b/qemu/armeb.h @@ -1,1367 +1,1288 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_ARMEB_H -#define UNICORN_AUTOGEN_ARMEB_H -#define arm_release arm_release_armeb -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_armeb -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_armeb -#define use_idiv_instructions_rt use_idiv_instructions_rt_armeb -#define tcg_target_deposit_valid tcg_target_deposit_valid_armeb -#define helper_power_down helper_power_down_armeb -#define check_exit_request check_exit_request_armeb -#define address_space_unregister address_space_unregister_armeb -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_armeb -#define phys_mem_clean phys_mem_clean_armeb -#define tb_cleanup tb_cleanup_armeb +#ifndef UNICORN_AUTOGEN_armeb_H +#define UNICORN_AUTOGEN_armeb_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _armeb +#endif +#define arm_arch arm_arch_armeb +#define tb_target_set_jmp_target tb_target_set_jmp_target_armeb +#define have_bmi1 have_bmi1_armeb +#define have_popcnt have_popcnt_armeb +#define have_avx1 have_avx1_armeb +#define have_avx2 have_avx2_armeb +#define have_isa have_isa_armeb +#define have_altivec have_altivec_armeb +#define have_vsx have_vsx_armeb +#define flush_icache_range flush_icache_range_armeb +#define s390_facilities s390_facilities_armeb +#define tcg_dump_op tcg_dump_op_armeb +#define tcg_dump_ops tcg_dump_ops_armeb +#define tcg_gen_and_i64 tcg_gen_and_i64_armeb +#define tcg_gen_discard_i64 tcg_gen_discard_i64_armeb +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_armeb +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_armeb +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_armeb +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_armeb +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_armeb +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_armeb +#define tcg_gen_ld_i64 tcg_gen_ld_i64_armeb +#define tcg_gen_mov_i64 tcg_gen_mov_i64_armeb +#define tcg_gen_movi_i64 tcg_gen_movi_i64_armeb +#define tcg_gen_mul_i64 tcg_gen_mul_i64_armeb +#define tcg_gen_or_i64 tcg_gen_or_i64_armeb +#define tcg_gen_sar_i64 tcg_gen_sar_i64_armeb +#define tcg_gen_shl_i64 tcg_gen_shl_i64_armeb +#define tcg_gen_shr_i64 tcg_gen_shr_i64_armeb +#define tcg_gen_st_i64 tcg_gen_st_i64_armeb +#define tcg_gen_xor_i64 tcg_gen_xor_i64_armeb +#define cpu_icount_to_ns cpu_icount_to_ns_armeb +#define cpu_is_stopped cpu_is_stopped_armeb +#define cpu_get_ticks cpu_get_ticks_armeb +#define cpu_get_clock cpu_get_clock_armeb +#define cpu_resume cpu_resume_armeb +#define qemu_init_vcpu qemu_init_vcpu_armeb +#define cpu_stop_current cpu_stop_current_armeb +#define resume_all_vcpus resume_all_vcpus_armeb +#define vm_start vm_start_armeb +#define address_space_dispatch_compact address_space_dispatch_compact_armeb +#define flatview_translate flatview_translate_armeb +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_armeb +#define qemu_get_cpu qemu_get_cpu_armeb +#define cpu_address_space_init cpu_address_space_init_armeb +#define cpu_get_address_space cpu_get_address_space_armeb +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_armeb +#define cpu_exec_initfn cpu_exec_initfn_armeb +#define cpu_exec_realizefn cpu_exec_realizefn_armeb +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_armeb +#define cpu_watchpoint_insert cpu_watchpoint_insert_armeb +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_armeb +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_armeb +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_armeb +#define cpu_breakpoint_insert cpu_breakpoint_insert_armeb +#define cpu_breakpoint_remove cpu_breakpoint_remove_armeb +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_armeb +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_armeb +#define cpu_abort cpu_abort_armeb +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_armeb +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_armeb +#define flatview_add_to_dispatch flatview_add_to_dispatch_armeb +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_armeb +#define qemu_ram_get_offset qemu_ram_get_offset_armeb +#define qemu_ram_get_used_length qemu_ram_get_used_length_armeb +#define qemu_ram_is_shared qemu_ram_is_shared_armeb +#define qemu_ram_pagesize qemu_ram_pagesize_armeb +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_armeb +#define qemu_ram_alloc qemu_ram_alloc_armeb +#define qemu_ram_free qemu_ram_free_armeb +#define qemu_map_ram_ptr qemu_map_ram_ptr_armeb +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_armeb +#define qemu_ram_block_from_host qemu_ram_block_from_host_armeb +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_armeb +#define cpu_check_watchpoint cpu_check_watchpoint_armeb +#define iotlb_to_section iotlb_to_section_armeb +#define address_space_dispatch_new address_space_dispatch_new_armeb +#define address_space_dispatch_free address_space_dispatch_free_armeb +#define flatview_read_continue flatview_read_continue_armeb +#define address_space_read_full address_space_read_full_armeb +#define address_space_write address_space_write_armeb +#define address_space_rw address_space_rw_armeb +#define cpu_physical_memory_rw cpu_physical_memory_rw_armeb +#define address_space_write_rom address_space_write_rom_armeb +#define cpu_flush_icache_range cpu_flush_icache_range_armeb +#define cpu_exec_init_all cpu_exec_init_all_armeb +#define address_space_access_valid address_space_access_valid_armeb +#define address_space_map address_space_map_armeb +#define address_space_unmap address_space_unmap_armeb +#define cpu_physical_memory_map cpu_physical_memory_map_armeb +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_armeb +#define cpu_memory_rw_debug cpu_memory_rw_debug_armeb +#define qemu_target_page_size qemu_target_page_size_armeb +#define qemu_target_page_bits qemu_target_page_bits_armeb +#define qemu_target_page_bits_min qemu_target_page_bits_min_armeb +#define target_words_bigendian target_words_bigendian_armeb +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_armeb +#define ram_block_discard_range ram_block_discard_range_armeb +#define ramblock_is_pmem ramblock_is_pmem_armeb +#define page_size_init page_size_init_armeb +#define set_preferred_target_page_bits set_preferred_target_page_bits_armeb +#define finalize_target_page_bits finalize_target_page_bits_armeb +#define cpu_outb cpu_outb_armeb +#define cpu_outw cpu_outw_armeb +#define cpu_outl cpu_outl_armeb +#define cpu_inb cpu_inb_armeb +#define cpu_inw cpu_inw_armeb +#define cpu_inl cpu_inl_armeb #define memory_map memory_map_armeb +#define memory_map_io memory_map_io_armeb #define memory_map_ptr memory_map_ptr_armeb #define memory_unmap memory_unmap_armeb #define memory_free memory_free_armeb -#define free_code_gen_buffer free_code_gen_buffer_armeb -#define helper_raise_exception helper_raise_exception_armeb -#define tcg_enabled tcg_enabled_armeb -#define tcg_exec_init tcg_exec_init_armeb -#define memory_register_types memory_register_types_armeb -#define cpu_exec_init_all cpu_exec_init_all_armeb -#define vm_start vm_start_armeb -#define resume_all_vcpus resume_all_vcpus_armeb -#define a15_l2ctlr_read a15_l2ctlr_read_armeb -#define a64_translate_init a64_translate_init_armeb -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_armeb -#define aa64_cacheop_access aa64_cacheop_access_armeb -#define aa64_daif_access aa64_daif_access_armeb -#define aa64_daif_write aa64_daif_write_armeb -#define aa64_dczid_read aa64_dczid_read_armeb -#define aa64_fpcr_read aa64_fpcr_read_armeb -#define aa64_fpcr_write aa64_fpcr_write_armeb -#define aa64_fpsr_read aa64_fpsr_read_armeb -#define aa64_fpsr_write aa64_fpsr_write_armeb -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_armeb -#define aa64_zva_access aa64_zva_access_armeb -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_armeb -#define aarch64_restore_sp aarch64_restore_sp_armeb -#define aarch64_save_sp aarch64_save_sp_armeb -#define accel_find accel_find_armeb -#define accel_init_machine accel_init_machine_armeb -#define accel_type accel_type_armeb -#define access_with_adjusted_size access_with_adjusted_size_armeb -#define add128 add128_armeb -#define add16_sat add16_sat_armeb -#define add16_usat add16_usat_armeb -#define add192 add192_armeb -#define add8_sat add8_sat_armeb -#define add8_usat add8_usat_armeb -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_armeb -#define add_cpreg_to_list add_cpreg_to_list_armeb -#define addFloat128Sigs addFloat128Sigs_armeb -#define addFloat32Sigs addFloat32Sigs_armeb -#define addFloat64Sigs addFloat64Sigs_armeb -#define addFloatx80Sigs addFloatx80Sigs_armeb -#define add_qemu_ldst_label add_qemu_ldst_label_armeb -#define address_space_access_valid address_space_access_valid_armeb -#define address_space_destroy address_space_destroy_armeb -#define address_space_destroy_dispatch address_space_destroy_dispatch_armeb -#define address_space_get_flatview address_space_get_flatview_armeb -#define address_space_init address_space_init_armeb -#define address_space_init_dispatch address_space_init_dispatch_armeb -#define address_space_lookup_region address_space_lookup_region_armeb -#define address_space_map address_space_map_armeb -#define address_space_read address_space_read_armeb -#define address_space_rw address_space_rw_armeb -#define address_space_translate address_space_translate_armeb -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_armeb -#define address_space_translate_internal address_space_translate_internal_armeb -#define address_space_unmap address_space_unmap_armeb -#define address_space_update_topology address_space_update_topology_armeb -#define address_space_update_topology_pass address_space_update_topology_pass_armeb -#define address_space_write address_space_write_armeb -#define addrrange_contains addrrange_contains_armeb -#define addrrange_end addrrange_end_armeb -#define addrrange_equal addrrange_equal_armeb -#define addrrange_intersection addrrange_intersection_armeb -#define addrrange_intersects addrrange_intersects_armeb -#define addrrange_make addrrange_make_armeb -#define adjust_endianness adjust_endianness_armeb -#define all_helpers all_helpers_armeb -#define alloc_code_gen_buffer alloc_code_gen_buffer_armeb -#define alloc_entry alloc_entry_armeb -#define always_true always_true_armeb -#define arm1026_initfn arm1026_initfn_armeb -#define arm1136_initfn arm1136_initfn_armeb -#define arm1136_r2_initfn arm1136_r2_initfn_armeb -#define arm1176_initfn arm1176_initfn_armeb -#define arm11mpcore_initfn arm11mpcore_initfn_armeb -#define arm926_initfn arm926_initfn_armeb -#define arm946_initfn arm946_initfn_armeb -#define arm_ccnt_enabled arm_ccnt_enabled_armeb -#define arm_cp_read_zero arm_cp_read_zero_armeb -#define arm_cp_reset_ignore arm_cp_reset_ignore_armeb -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_armeb -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_armeb -#define arm_cpu_finalizefn arm_cpu_finalizefn_armeb -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_armeb -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_armeb -#define arm_cpu_initfn arm_cpu_initfn_armeb -#define arm_cpu_list arm_cpu_list_armeb -#define cpu_loop_exit cpu_loop_exit_armeb -#define arm_cpu_post_init arm_cpu_post_init_armeb -#define arm_cpu_realizefn arm_cpu_realizefn_armeb -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_armeb -#define arm_cpu_register_types arm_cpu_register_types_armeb -#define cpu_resume_from_signal cpu_resume_from_signal_armeb -#define arm_cpus arm_cpus_armeb -#define arm_cpu_set_pc arm_cpu_set_pc_armeb -#define arm_cp_write_ignore arm_cp_write_ignore_armeb -#define arm_current_el arm_current_el_armeb -#define arm_dc_feature arm_dc_feature_armeb -#define arm_debug_excp_handler arm_debug_excp_handler_armeb -#define arm_debug_target_el arm_debug_target_el_armeb -#define arm_el_is_aa64 arm_el_is_aa64_armeb -#define arm_env_get_cpu arm_env_get_cpu_armeb -#define arm_excp_target_el arm_excp_target_el_armeb -#define arm_excp_unmasked arm_excp_unmasked_armeb -#define arm_feature arm_feature_armeb -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_armeb -#define gen_intermediate_code gen_intermediate_code_armeb -#define gen_intermediate_code_pc gen_intermediate_code_pc_armeb -#define arm_gen_test_cc arm_gen_test_cc_armeb -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_armeb -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_armeb -#define arm_handle_psci_call arm_handle_psci_call_armeb -#define arm_is_psci_call arm_is_psci_call_armeb -#define arm_is_secure arm_is_secure_armeb -#define arm_is_secure_below_el3 arm_is_secure_below_el3_armeb -#define arm_ldl_code arm_ldl_code_armeb -#define arm_lduw_code arm_lduw_code_armeb -#define arm_log_exception arm_log_exception_armeb -#define arm_reg_read arm_reg_read_armeb -#define arm_reg_reset arm_reg_reset_armeb -#define arm_reg_write arm_reg_write_armeb -#define restore_state_to_opc restore_state_to_opc_armeb -#define arm_rmode_to_sf arm_rmode_to_sf_armeb -#define arm_singlestep_active arm_singlestep_active_armeb -#define tlb_fill tlb_fill_armeb -#define tlb_flush tlb_flush_armeb -#define tlb_flush_page tlb_flush_page_armeb -#define tlb_set_page tlb_set_page_armeb -#define arm_translate_init arm_translate_init_armeb -#define arm_v7m_class_init arm_v7m_class_init_armeb -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_armeb -#define ats_access ats_access_armeb -#define ats_write ats_write_armeb -#define bad_mode_switch bad_mode_switch_armeb -#define bank_number bank_number_armeb -#define bitmap_zero_extend bitmap_zero_extend_armeb -#define bp_wp_matches bp_wp_matches_armeb -#define breakpoint_invalidate breakpoint_invalidate_armeb -#define build_page_bitmap build_page_bitmap_armeb -#define bus_add_child bus_add_child_armeb -#define bus_class_init bus_class_init_armeb -#define bus_info bus_info_armeb -#define bus_unparent bus_unparent_armeb -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_armeb -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_armeb -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_armeb -#define call_recip_estimate call_recip_estimate_armeb -#define can_merge can_merge_armeb -#define capacity_increase capacity_increase_armeb -#define ccsidr_read ccsidr_read_armeb -#define check_ap check_ap_armeb -#define check_breakpoints check_breakpoints_armeb -#define check_watchpoints check_watchpoints_armeb -#define cho cho_armeb -#define clear_bit clear_bit_armeb -#define clz32 clz32_armeb -#define clz64 clz64_armeb -#define cmp_flatrange_addr cmp_flatrange_addr_armeb -#define code_gen_alloc code_gen_alloc_armeb -#define commonNaNToFloat128 commonNaNToFloat128_armeb -#define commonNaNToFloat16 commonNaNToFloat16_armeb -#define commonNaNToFloat32 commonNaNToFloat32_armeb -#define commonNaNToFloat64 commonNaNToFloat64_armeb -#define commonNaNToFloatx80 commonNaNToFloatx80_armeb -#define compute_abs_deadline compute_abs_deadline_armeb -#define cond_name cond_name_armeb -#define configure_accelerator configure_accelerator_armeb -#define container_get container_get_armeb -#define container_info container_info_armeb -#define container_register_types container_register_types_armeb -#define contextidr_write contextidr_write_armeb -#define core_log_global_start core_log_global_start_armeb -#define core_log_global_stop core_log_global_stop_armeb -#define core_memory_listener core_memory_listener_armeb -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_armeb -#define cortex_a15_initfn cortex_a15_initfn_armeb -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_armeb -#define cortex_a8_initfn cortex_a8_initfn_armeb -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_armeb -#define cortex_a9_initfn cortex_a9_initfn_armeb -#define cortex_m3_initfn cortex_m3_initfn_armeb -#define count_cpreg count_cpreg_armeb -#define countLeadingZeros32 countLeadingZeros32_armeb -#define countLeadingZeros64 countLeadingZeros64_armeb -#define cp_access_ok cp_access_ok_armeb -#define cpacr_write cpacr_write_armeb -#define cpreg_field_is_64bit cpreg_field_is_64bit_armeb -#define cp_reginfo cp_reginfo_armeb -#define cpreg_key_compare cpreg_key_compare_armeb -#define cpreg_make_keylist cpreg_make_keylist_armeb -#define cp_reg_reset cp_reg_reset_armeb -#define cpreg_to_kvm_id cpreg_to_kvm_id_armeb -#define cpsr_read cpsr_read_armeb -#define cpsr_write cpsr_write_armeb -#define cptype_valid cptype_valid_armeb -#define cpu_abort cpu_abort_armeb -#define cpu_arm_exec cpu_arm_exec_armeb -#define cpu_arm_gen_code cpu_arm_gen_code_armeb -#define cpu_arm_init cpu_arm_init_armeb -#define cpu_breakpoint_insert cpu_breakpoint_insert_armeb -#define cpu_breakpoint_remove cpu_breakpoint_remove_armeb -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_armeb -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_armeb -#define cpu_can_do_io cpu_can_do_io_armeb -#define cpu_can_run cpu_can_run_armeb -#define cpu_class_init cpu_class_init_armeb -#define cpu_common_class_by_name cpu_common_class_by_name_armeb -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_armeb -#define cpu_common_get_arch_id cpu_common_get_arch_id_armeb -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_armeb -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_armeb -#define cpu_common_has_work cpu_common_has_work_armeb -#define cpu_common_initfn cpu_common_initfn_armeb -#define cpu_common_noop cpu_common_noop_armeb -#define cpu_common_parse_features cpu_common_parse_features_armeb -#define cpu_common_realizefn cpu_common_realizefn_armeb -#define cpu_common_reset cpu_common_reset_armeb -#define cpu_dump_statistics cpu_dump_statistics_armeb -#define cpu_exec_init cpu_exec_init_armeb -#define cpu_flush_icache_range cpu_flush_icache_range_armeb -#define cpu_gen_init cpu_gen_init_armeb -#define cpu_get_clock cpu_get_clock_armeb -#define cpu_get_real_ticks cpu_get_real_ticks_armeb -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_armeb -#define cpu_handle_debug_exception cpu_handle_debug_exception_armeb -#define cpu_handle_guest_debug cpu_handle_guest_debug_armeb -#define cpu_inb cpu_inb_armeb -#define cpu_inl cpu_inl_armeb -#define cpu_interrupt cpu_interrupt_armeb -#define cpu_interrupt_handler cpu_interrupt_handler_armeb -#define cpu_inw cpu_inw_armeb -#define cpu_io_recompile cpu_io_recompile_armeb -#define cpu_is_stopped cpu_is_stopped_armeb -#define cpu_ldl_code cpu_ldl_code_armeb -#define cpu_ldub_code cpu_ldub_code_armeb -#define cpu_lduw_code cpu_lduw_code_armeb -#define cpu_memory_rw_debug cpu_memory_rw_debug_armeb -#define cpu_mmu_index cpu_mmu_index_armeb -#define cpu_outb cpu_outb_armeb -#define cpu_outl cpu_outl_armeb -#define cpu_outw cpu_outw_armeb -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_armeb -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_armeb -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_armeb -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_armeb -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_armeb -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_armeb -#define cpu_physical_memory_map cpu_physical_memory_map_armeb -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_armeb -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_armeb -#define cpu_physical_memory_rw cpu_physical_memory_rw_armeb -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_armeb -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_armeb -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_armeb -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_armeb -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_armeb -#define cpu_register cpu_register_armeb -#define cpu_register_types cpu_register_types_armeb -#define cpu_restore_state cpu_restore_state_armeb -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_armeb -#define cpu_single_step cpu_single_step_armeb -#define cpu_tb_exec cpu_tb_exec_armeb -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_armeb -#define cpu_to_be64 cpu_to_be64_armeb -#define cpu_to_le32 cpu_to_le32_armeb -#define cpu_to_le64 cpu_to_le64_armeb -#define cpu_type_info cpu_type_info_armeb -#define cpu_unassigned_access cpu_unassigned_access_armeb -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_armeb -#define cpu_watchpoint_insert cpu_watchpoint_insert_armeb -#define cpu_watchpoint_remove cpu_watchpoint_remove_armeb -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_armeb -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_armeb -#define crc32c_table crc32c_table_armeb -#define create_new_memory_mapping create_new_memory_mapping_armeb -#define csselr_write csselr_write_armeb -#define cto32 cto32_armeb -#define ctr_el0_access ctr_el0_access_armeb -#define ctz32 ctz32_armeb -#define ctz64 ctz64_armeb -#define dacr_write dacr_write_armeb -#define dbgbcr_write dbgbcr_write_armeb -#define dbgbvr_write dbgbvr_write_armeb -#define dbgwcr_write dbgwcr_write_armeb -#define dbgwvr_write dbgwvr_write_armeb -#define debug_cp_reginfo debug_cp_reginfo_armeb -#define debug_frame debug_frame_armeb -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_armeb -#define define_arm_cp_regs define_arm_cp_regs_armeb -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_armeb -#define define_debug_regs define_debug_regs_armeb -#define define_one_arm_cp_reg define_one_arm_cp_reg_armeb -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_armeb -#define deposit32 deposit32_armeb -#define deposit64 deposit64_armeb -#define deregister_tm_clones deregister_tm_clones_armeb -#define device_class_base_init device_class_base_init_armeb -#define device_class_init device_class_init_armeb -#define device_finalize device_finalize_armeb -#define device_get_realized device_get_realized_armeb -#define device_initfn device_initfn_armeb -#define device_post_init device_post_init_armeb -#define device_reset device_reset_armeb -#define device_set_realized device_set_realized_armeb -#define device_type_info device_type_info_armeb -#define disas_arm_insn disas_arm_insn_armeb -#define disas_coproc_insn disas_coproc_insn_armeb -#define disas_dsp_insn disas_dsp_insn_armeb -#define disas_iwmmxt_insn disas_iwmmxt_insn_armeb -#define disas_neon_data_insn disas_neon_data_insn_armeb -#define disas_neon_ls_insn disas_neon_ls_insn_armeb -#define disas_thumb2_insn disas_thumb2_insn_armeb -#define disas_thumb_insn disas_thumb_insn_armeb -#define disas_vfp_insn disas_vfp_insn_armeb -#define disas_vfp_v8_insn disas_vfp_v8_insn_armeb -#define do_arm_semihosting do_arm_semihosting_armeb -#define do_clz16 do_clz16_armeb -#define do_clz8 do_clz8_armeb -#define do_constant_folding do_constant_folding_armeb -#define do_constant_folding_2 do_constant_folding_2_armeb -#define do_constant_folding_cond do_constant_folding_cond_armeb -#define do_constant_folding_cond2 do_constant_folding_cond2_armeb -#define do_constant_folding_cond_32 do_constant_folding_cond_32_armeb -#define do_constant_folding_cond_64 do_constant_folding_cond_64_armeb -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_armeb -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_armeb -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_armeb -#define do_ssat do_ssat_armeb -#define do_usad do_usad_armeb -#define do_usat do_usat_armeb -#define do_v7m_exception_exit do_v7m_exception_exit_armeb -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_armeb -#define dummy_func dummy_func_armeb -#define dummy_section dummy_section_armeb -#define _DYNAMIC _DYNAMIC_armeb -#define _edata _edata_armeb -#define _end _end_armeb -#define end_list end_list_armeb -#define eq128 eq128_armeb -#define ErrorClass_lookup ErrorClass_lookup_armeb -#define error_copy error_copy_armeb -#define error_exit error_exit_armeb -#define error_get_class error_get_class_armeb -#define error_get_pretty error_get_pretty_armeb -#define error_setg_file_open error_setg_file_open_armeb -#define estimateDiv128To64 estimateDiv128To64_armeb -#define estimateSqrt32 estimateSqrt32_armeb -#define excnames excnames_armeb -#define excp_is_internal excp_is_internal_armeb -#define extended_addresses_enabled extended_addresses_enabled_armeb -#define extended_mpu_ap_bits extended_mpu_ap_bits_armeb -#define extract32 extract32_armeb -#define extract64 extract64_armeb -#define extractFloat128Exp extractFloat128Exp_armeb -#define extractFloat128Frac0 extractFloat128Frac0_armeb -#define extractFloat128Frac1 extractFloat128Frac1_armeb -#define extractFloat128Sign extractFloat128Sign_armeb -#define extractFloat16Exp extractFloat16Exp_armeb -#define extractFloat16Frac extractFloat16Frac_armeb -#define extractFloat16Sign extractFloat16Sign_armeb -#define extractFloat32Exp extractFloat32Exp_armeb -#define extractFloat32Frac extractFloat32Frac_armeb -#define extractFloat32Sign extractFloat32Sign_armeb -#define extractFloat64Exp extractFloat64Exp_armeb -#define extractFloat64Frac extractFloat64Frac_armeb -#define extractFloat64Sign extractFloat64Sign_armeb -#define extractFloatx80Exp extractFloatx80Exp_armeb -#define extractFloatx80Frac extractFloatx80Frac_armeb -#define extractFloatx80Sign extractFloatx80Sign_armeb -#define fcse_write fcse_write_armeb -#define find_better_copy find_better_copy_armeb -#define find_default_machine find_default_machine_armeb -#define find_desc_by_name find_desc_by_name_armeb -#define find_first_bit find_first_bit_armeb -#define find_paging_enabled_cpu find_paging_enabled_cpu_armeb -#define find_ram_block find_ram_block_armeb -#define find_ram_offset find_ram_offset_armeb -#define find_string find_string_armeb -#define find_type find_type_armeb -#define _fini _fini_armeb -#define flatrange_equal flatrange_equal_armeb -#define flatview_destroy flatview_destroy_armeb -#define flatview_init flatview_init_armeb -#define flatview_insert flatview_insert_armeb -#define flatview_lookup flatview_lookup_armeb -#define flatview_ref flatview_ref_armeb -#define flatview_simplify flatview_simplify_armeb #define flatview_unref flatview_unref_armeb -#define float128_add float128_add_armeb -#define float128_compare float128_compare_armeb -#define float128_compare_internal float128_compare_internal_armeb -#define float128_compare_quiet float128_compare_quiet_armeb -#define float128_default_nan float128_default_nan_armeb -#define float128_div float128_div_armeb -#define float128_eq float128_eq_armeb -#define float128_eq_quiet float128_eq_quiet_armeb -#define float128_is_quiet_nan float128_is_quiet_nan_armeb -#define float128_is_signaling_nan float128_is_signaling_nan_armeb -#define float128_le float128_le_armeb -#define float128_le_quiet float128_le_quiet_armeb -#define float128_lt float128_lt_armeb -#define float128_lt_quiet float128_lt_quiet_armeb -#define float128_maybe_silence_nan float128_maybe_silence_nan_armeb -#define float128_mul float128_mul_armeb -#define float128_rem float128_rem_armeb -#define float128_round_to_int float128_round_to_int_armeb -#define float128_scalbn float128_scalbn_armeb -#define float128_sqrt float128_sqrt_armeb -#define float128_sub float128_sub_armeb -#define float128ToCommonNaN float128ToCommonNaN_armeb -#define float128_to_float32 float128_to_float32_armeb -#define float128_to_float64 float128_to_float64_armeb -#define float128_to_floatx80 float128_to_floatx80_armeb -#define float128_to_int32 float128_to_int32_armeb -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_armeb -#define float128_to_int64 float128_to_int64_armeb -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_armeb -#define float128_unordered float128_unordered_armeb -#define float128_unordered_quiet float128_unordered_quiet_armeb -#define float16_default_nan float16_default_nan_armeb +#define address_space_get_flatview address_space_get_flatview_armeb +#define memory_region_transaction_begin memory_region_transaction_begin_armeb +#define memory_region_transaction_commit memory_region_transaction_commit_armeb +#define memory_region_init memory_region_init_armeb +#define memory_region_access_valid memory_region_access_valid_armeb +#define memory_region_dispatch_read memory_region_dispatch_read_armeb +#define memory_region_dispatch_write memory_region_dispatch_write_armeb +#define memory_region_init_io memory_region_init_io_armeb +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_armeb +#define memory_region_size memory_region_size_armeb +#define memory_region_set_readonly memory_region_set_readonly_armeb +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_armeb +#define memory_region_from_host memory_region_from_host_armeb +#define memory_region_get_ram_addr memory_region_get_ram_addr_armeb +#define memory_region_add_subregion memory_region_add_subregion_armeb +#define memory_region_del_subregion memory_region_del_subregion_armeb +#define memory_region_find memory_region_find_armeb +#define memory_listener_register memory_listener_register_armeb +#define memory_listener_unregister memory_listener_unregister_armeb +#define address_space_remove_listeners address_space_remove_listeners_armeb +#define address_space_init address_space_init_armeb +#define address_space_destroy address_space_destroy_armeb +#define memory_region_init_ram memory_region_init_ram_armeb +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_armeb +#define exec_inline_op exec_inline_op_armeb +#define floatx80_default_nan floatx80_default_nan_armeb +#define float_raise float_raise_armeb #define float16_is_quiet_nan float16_is_quiet_nan_armeb #define float16_is_signaling_nan float16_is_signaling_nan_armeb -#define float16_maybe_silence_nan float16_maybe_silence_nan_armeb -#define float16ToCommonNaN float16ToCommonNaN_armeb -#define float16_to_float32 float16_to_float32_armeb -#define float16_to_float64 float16_to_float64_armeb -#define float32_abs float32_abs_armeb -#define float32_add float32_add_armeb -#define float32_chs float32_chs_armeb -#define float32_compare float32_compare_armeb -#define float32_compare_internal float32_compare_internal_armeb -#define float32_compare_quiet float32_compare_quiet_armeb -#define float32_default_nan float32_default_nan_armeb -#define float32_div float32_div_armeb -#define float32_eq float32_eq_armeb -#define float32_eq_quiet float32_eq_quiet_armeb -#define float32_exp2 float32_exp2_armeb -#define float32_exp2_coefficients float32_exp2_coefficients_armeb -#define float32_is_any_nan float32_is_any_nan_armeb -#define float32_is_infinity float32_is_infinity_armeb -#define float32_is_neg float32_is_neg_armeb #define float32_is_quiet_nan float32_is_quiet_nan_armeb #define float32_is_signaling_nan float32_is_signaling_nan_armeb -#define float32_is_zero float32_is_zero_armeb -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_armeb -#define float32_le float32_le_armeb -#define float32_le_quiet float32_le_quiet_armeb -#define float32_log2 float32_log2_armeb -#define float32_lt float32_lt_armeb -#define float32_lt_quiet float32_lt_quiet_armeb +#define float64_is_quiet_nan float64_is_quiet_nan_armeb +#define float64_is_signaling_nan float64_is_signaling_nan_armeb +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_armeb +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_armeb +#define floatx80_silence_nan floatx80_silence_nan_armeb +#define propagateFloatx80NaN propagateFloatx80NaN_armeb +#define float128_is_quiet_nan float128_is_quiet_nan_armeb +#define float128_is_signaling_nan float128_is_signaling_nan_armeb +#define float128_silence_nan float128_silence_nan_armeb +#define float16_add float16_add_armeb +#define float16_sub float16_sub_armeb +#define float32_add float32_add_armeb +#define float32_sub float32_sub_armeb +#define float64_add float64_add_armeb +#define float64_sub float64_sub_armeb +#define float16_mul float16_mul_armeb +#define float32_mul float32_mul_armeb +#define float64_mul float64_mul_armeb +#define float16_muladd float16_muladd_armeb +#define float32_muladd float32_muladd_armeb +#define float64_muladd float64_muladd_armeb +#define float16_div float16_div_armeb +#define float32_div float32_div_armeb +#define float64_div float64_div_armeb +#define float16_to_float32 float16_to_float32_armeb +#define float16_to_float64 float16_to_float64_armeb +#define float32_to_float16 float32_to_float16_armeb +#define float32_to_float64 float32_to_float64_armeb +#define float64_to_float16 float64_to_float16_armeb +#define float64_to_float32 float64_to_float32_armeb +#define float16_round_to_int float16_round_to_int_armeb +#define float32_round_to_int float32_round_to_int_armeb +#define float64_round_to_int float64_round_to_int_armeb +#define float16_to_int16_scalbn float16_to_int16_scalbn_armeb +#define float16_to_int32_scalbn float16_to_int32_scalbn_armeb +#define float16_to_int64_scalbn float16_to_int64_scalbn_armeb +#define float32_to_int16_scalbn float32_to_int16_scalbn_armeb +#define float32_to_int32_scalbn float32_to_int32_scalbn_armeb +#define float32_to_int64_scalbn float32_to_int64_scalbn_armeb +#define float64_to_int16_scalbn float64_to_int16_scalbn_armeb +#define float64_to_int32_scalbn float64_to_int32_scalbn_armeb +#define float64_to_int64_scalbn float64_to_int64_scalbn_armeb +#define float16_to_int16 float16_to_int16_armeb +#define float16_to_int32 float16_to_int32_armeb +#define float16_to_int64 float16_to_int64_armeb +#define float32_to_int16 float32_to_int16_armeb +#define float32_to_int32 float32_to_int32_armeb +#define float32_to_int64 float32_to_int64_armeb +#define float64_to_int16 float64_to_int16_armeb +#define float64_to_int32 float64_to_int32_armeb +#define float64_to_int64 float64_to_int64_armeb +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_armeb +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_armeb +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_armeb +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_armeb +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_armeb +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_armeb +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_armeb +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_armeb +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_armeb +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_armeb +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_armeb +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_armeb +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_armeb +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_armeb +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_armeb +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_armeb +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_armeb +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_armeb +#define float16_to_uint16 float16_to_uint16_armeb +#define float16_to_uint32 float16_to_uint32_armeb +#define float16_to_uint64 float16_to_uint64_armeb +#define float32_to_uint16 float32_to_uint16_armeb +#define float32_to_uint32 float32_to_uint32_armeb +#define float32_to_uint64 float32_to_uint64_armeb +#define float64_to_uint16 float64_to_uint16_armeb +#define float64_to_uint32 float64_to_uint32_armeb +#define float64_to_uint64 float64_to_uint64_armeb +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_armeb +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_armeb +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_armeb +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_armeb +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_armeb +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_armeb +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_armeb +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_armeb +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_armeb +#define int64_to_float16_scalbn int64_to_float16_scalbn_armeb +#define int32_to_float16_scalbn int32_to_float16_scalbn_armeb +#define int16_to_float16_scalbn int16_to_float16_scalbn_armeb +#define int64_to_float16 int64_to_float16_armeb +#define int32_to_float16 int32_to_float16_armeb +#define int16_to_float16 int16_to_float16_armeb +#define int64_to_float32_scalbn int64_to_float32_scalbn_armeb +#define int32_to_float32_scalbn int32_to_float32_scalbn_armeb +#define int16_to_float32_scalbn int16_to_float32_scalbn_armeb +#define int64_to_float32 int64_to_float32_armeb +#define int32_to_float32 int32_to_float32_armeb +#define int16_to_float32 int16_to_float32_armeb +#define int64_to_float64_scalbn int64_to_float64_scalbn_armeb +#define int32_to_float64_scalbn int32_to_float64_scalbn_armeb +#define int16_to_float64_scalbn int16_to_float64_scalbn_armeb +#define int64_to_float64 int64_to_float64_armeb +#define int32_to_float64 int32_to_float64_armeb +#define int16_to_float64 int16_to_float64_armeb +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_armeb +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_armeb +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_armeb +#define uint64_to_float16 uint64_to_float16_armeb +#define uint32_to_float16 uint32_to_float16_armeb +#define uint16_to_float16 uint16_to_float16_armeb +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_armeb +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_armeb +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_armeb +#define uint64_to_float32 uint64_to_float32_armeb +#define uint32_to_float32 uint32_to_float32_armeb +#define uint16_to_float32 uint16_to_float32_armeb +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_armeb +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_armeb +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_armeb +#define uint64_to_float64 uint64_to_float64_armeb +#define uint32_to_float64 uint32_to_float64_armeb +#define uint16_to_float64 uint16_to_float64_armeb +#define float16_min float16_min_armeb +#define float16_minnum float16_minnum_armeb +#define float16_minnummag float16_minnummag_armeb +#define float16_max float16_max_armeb +#define float16_maxnum float16_maxnum_armeb +#define float16_maxnummag float16_maxnummag_armeb +#define float32_min float32_min_armeb +#define float32_minnum float32_minnum_armeb +#define float32_minnummag float32_minnummag_armeb #define float32_max float32_max_armeb #define float32_maxnum float32_maxnum_armeb #define float32_maxnummag float32_maxnummag_armeb -#define float32_maybe_silence_nan float32_maybe_silence_nan_armeb -#define float32_min float32_min_armeb -#define float32_minmax float32_minmax_armeb -#define float32_minnum float32_minnum_armeb -#define float32_minnummag float32_minnummag_armeb -#define float32_mul float32_mul_armeb -#define float32_muladd float32_muladd_armeb -#define float32_rem float32_rem_armeb -#define float32_round_to_int float32_round_to_int_armeb -#define float32_scalbn float32_scalbn_armeb -#define float32_set_sign float32_set_sign_armeb -#define float32_sqrt float32_sqrt_armeb -#define float32_squash_input_denormal float32_squash_input_denormal_armeb -#define float32_sub float32_sub_armeb -#define float32ToCommonNaN float32ToCommonNaN_armeb -#define float32_to_float128 float32_to_float128_armeb -#define float32_to_float16 float32_to_float16_armeb -#define float32_to_float64 float32_to_float64_armeb -#define float32_to_floatx80 float32_to_floatx80_armeb -#define float32_to_int16 float32_to_int16_armeb -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_armeb -#define float32_to_int32 float32_to_int32_armeb -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_armeb -#define float32_to_int64 float32_to_int64_armeb -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_armeb -#define float32_to_uint16 float32_to_uint16_armeb -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_armeb -#define float32_to_uint32 float32_to_uint32_armeb -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_armeb -#define float32_to_uint64 float32_to_uint64_armeb -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_armeb -#define float32_unordered float32_unordered_armeb -#define float32_unordered_quiet float32_unordered_quiet_armeb -#define float64_abs float64_abs_armeb -#define float64_add float64_add_armeb -#define float64_chs float64_chs_armeb -#define float64_compare float64_compare_armeb -#define float64_compare_internal float64_compare_internal_armeb -#define float64_compare_quiet float64_compare_quiet_armeb -#define float64_default_nan float64_default_nan_armeb -#define float64_div float64_div_armeb -#define float64_eq float64_eq_armeb -#define float64_eq_quiet float64_eq_quiet_armeb -#define float64_is_any_nan float64_is_any_nan_armeb -#define float64_is_infinity float64_is_infinity_armeb -#define float64_is_neg float64_is_neg_armeb -#define float64_is_quiet_nan float64_is_quiet_nan_armeb -#define float64_is_signaling_nan float64_is_signaling_nan_armeb -#define float64_is_zero float64_is_zero_armeb -#define float64_le float64_le_armeb -#define float64_le_quiet float64_le_quiet_armeb -#define float64_log2 float64_log2_armeb -#define float64_lt float64_lt_armeb -#define float64_lt_quiet float64_lt_quiet_armeb +#define float64_min float64_min_armeb +#define float64_minnum float64_minnum_armeb +#define float64_minnummag float64_minnummag_armeb #define float64_max float64_max_armeb #define float64_maxnum float64_maxnum_armeb #define float64_maxnummag float64_maxnummag_armeb -#define float64_maybe_silence_nan float64_maybe_silence_nan_armeb -#define float64_min float64_min_armeb -#define float64_minmax float64_minmax_armeb -#define float64_minnum float64_minnum_armeb -#define float64_minnummag float64_minnummag_armeb -#define float64_mul float64_mul_armeb -#define float64_muladd float64_muladd_armeb -#define float64_rem float64_rem_armeb -#define float64_round_to_int float64_round_to_int_armeb +#define float16_compare float16_compare_armeb +#define float16_compare_quiet float16_compare_quiet_armeb +#define float32_compare float32_compare_armeb +#define float32_compare_quiet float32_compare_quiet_armeb +#define float64_compare float64_compare_armeb +#define float64_compare_quiet float64_compare_quiet_armeb +#define float16_scalbn float16_scalbn_armeb +#define float32_scalbn float32_scalbn_armeb #define float64_scalbn float64_scalbn_armeb -#define float64_set_sign float64_set_sign_armeb +#define float16_sqrt float16_sqrt_armeb +#define float32_sqrt float32_sqrt_armeb #define float64_sqrt float64_sqrt_armeb +#define float16_default_nan float16_default_nan_armeb +#define float32_default_nan float32_default_nan_armeb +#define float64_default_nan float64_default_nan_armeb +#define float128_default_nan float128_default_nan_armeb +#define float16_silence_nan float16_silence_nan_armeb +#define float32_silence_nan float32_silence_nan_armeb +#define float64_silence_nan float64_silence_nan_armeb +#define float16_squash_input_denormal float16_squash_input_denormal_armeb +#define float32_squash_input_denormal float32_squash_input_denormal_armeb #define float64_squash_input_denormal float64_squash_input_denormal_armeb -#define float64_sub float64_sub_armeb -#define float64ToCommonNaN float64ToCommonNaN_armeb -#define float64_to_float128 float64_to_float128_armeb -#define float64_to_float16 float64_to_float16_armeb -#define float64_to_float32 float64_to_float32_armeb +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_armeb +#define roundAndPackFloatx80 roundAndPackFloatx80_armeb +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_armeb +#define int32_to_floatx80 int32_to_floatx80_armeb +#define int32_to_float128 int32_to_float128_armeb +#define int64_to_floatx80 int64_to_floatx80_armeb +#define int64_to_float128 int64_to_float128_armeb +#define uint64_to_float128 uint64_to_float128_armeb +#define float32_to_floatx80 float32_to_floatx80_armeb +#define float32_to_float128 float32_to_float128_armeb +#define float32_rem float32_rem_armeb +#define float32_exp2 float32_exp2_armeb +#define float32_log2 float32_log2_armeb +#define float32_eq float32_eq_armeb +#define float32_le float32_le_armeb +#define float32_lt float32_lt_armeb +#define float32_unordered float32_unordered_armeb +#define float32_eq_quiet float32_eq_quiet_armeb +#define float32_le_quiet float32_le_quiet_armeb +#define float32_lt_quiet float32_lt_quiet_armeb +#define float32_unordered_quiet float32_unordered_quiet_armeb #define float64_to_floatx80 float64_to_floatx80_armeb -#define float64_to_int16 float64_to_int16_armeb -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_armeb -#define float64_to_int32 float64_to_int32_armeb -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_armeb -#define float64_to_int64 float64_to_int64_armeb -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_armeb -#define float64_to_uint16 float64_to_uint16_armeb -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_armeb -#define float64_to_uint32 float64_to_uint32_armeb -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_armeb -#define float64_to_uint64 float64_to_uint64_armeb -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_armeb -#define float64_trunc_to_int float64_trunc_to_int_armeb +#define float64_to_float128 float64_to_float128_armeb +#define float64_rem float64_rem_armeb +#define float64_log2 float64_log2_armeb +#define float64_eq float64_eq_armeb +#define float64_le float64_le_armeb +#define float64_lt float64_lt_armeb #define float64_unordered float64_unordered_armeb +#define float64_eq_quiet float64_eq_quiet_armeb +#define float64_le_quiet float64_le_quiet_armeb +#define float64_lt_quiet float64_lt_quiet_armeb #define float64_unordered_quiet float64_unordered_quiet_armeb -#define float_raise float_raise_armeb -#define floatx80_add floatx80_add_armeb -#define floatx80_compare floatx80_compare_armeb -#define floatx80_compare_internal floatx80_compare_internal_armeb -#define floatx80_compare_quiet floatx80_compare_quiet_armeb -#define floatx80_default_nan floatx80_default_nan_armeb -#define floatx80_div floatx80_div_armeb -#define floatx80_eq floatx80_eq_armeb -#define floatx80_eq_quiet floatx80_eq_quiet_armeb -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_armeb -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_armeb -#define floatx80_le floatx80_le_armeb -#define floatx80_le_quiet floatx80_le_quiet_armeb -#define floatx80_lt floatx80_lt_armeb -#define floatx80_lt_quiet floatx80_lt_quiet_armeb -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_armeb -#define floatx80_mul floatx80_mul_armeb -#define floatx80_rem floatx80_rem_armeb -#define floatx80_round_to_int floatx80_round_to_int_armeb -#define floatx80_scalbn floatx80_scalbn_armeb -#define floatx80_sqrt floatx80_sqrt_armeb -#define floatx80_sub floatx80_sub_armeb -#define floatx80ToCommonNaN floatx80ToCommonNaN_armeb -#define floatx80_to_float128 floatx80_to_float128_armeb -#define floatx80_to_float32 floatx80_to_float32_armeb -#define floatx80_to_float64 floatx80_to_float64_armeb #define floatx80_to_int32 floatx80_to_int32_armeb #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_armeb #define floatx80_to_int64 floatx80_to_int64_armeb #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_armeb +#define floatx80_to_float32 floatx80_to_float32_armeb +#define floatx80_to_float64 floatx80_to_float64_armeb +#define floatx80_to_float128 floatx80_to_float128_armeb +#define floatx80_round floatx80_round_armeb +#define floatx80_round_to_int floatx80_round_to_int_armeb +#define floatx80_add floatx80_add_armeb +#define floatx80_sub floatx80_sub_armeb +#define floatx80_mul floatx80_mul_armeb +#define floatx80_div floatx80_div_armeb +#define floatx80_rem floatx80_rem_armeb +#define floatx80_sqrt floatx80_sqrt_armeb +#define floatx80_eq floatx80_eq_armeb +#define floatx80_le floatx80_le_armeb +#define floatx80_lt floatx80_lt_armeb #define floatx80_unordered floatx80_unordered_armeb +#define floatx80_eq_quiet floatx80_eq_quiet_armeb +#define floatx80_le_quiet floatx80_le_quiet_armeb +#define floatx80_lt_quiet floatx80_lt_quiet_armeb #define floatx80_unordered_quiet floatx80_unordered_quiet_armeb -#define flush_icache_range flush_icache_range_armeb -#define format_string format_string_armeb -#define fp_decode_rm fp_decode_rm_armeb -#define frame_dummy frame_dummy_armeb -#define free_range free_range_armeb -#define fstat64 fstat64_armeb -#define futex_wait futex_wait_armeb -#define futex_wake futex_wake_armeb -#define gen_aa32_ld16s gen_aa32_ld16s_armeb -#define gen_aa32_ld16u gen_aa32_ld16u_armeb -#define gen_aa32_ld32u gen_aa32_ld32u_armeb -#define gen_aa32_ld64 gen_aa32_ld64_armeb -#define gen_aa32_ld8s gen_aa32_ld8s_armeb -#define gen_aa32_ld8u gen_aa32_ld8u_armeb -#define gen_aa32_st16 gen_aa32_st16_armeb -#define gen_aa32_st32 gen_aa32_st32_armeb -#define gen_aa32_st64 gen_aa32_st64_armeb -#define gen_aa32_st8 gen_aa32_st8_armeb -#define gen_adc gen_adc_armeb -#define gen_adc_CC gen_adc_CC_armeb -#define gen_add16 gen_add16_armeb -#define gen_add_carry gen_add_carry_armeb -#define gen_add_CC gen_add_CC_armeb -#define gen_add_datah_offset gen_add_datah_offset_armeb -#define gen_add_data_offset gen_add_data_offset_armeb -#define gen_addq gen_addq_armeb -#define gen_addq_lo gen_addq_lo_armeb -#define gen_addq_msw gen_addq_msw_armeb -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_armeb -#define gen_arm_shift_im gen_arm_shift_im_armeb -#define gen_arm_shift_reg gen_arm_shift_reg_armeb -#define gen_bx gen_bx_armeb -#define gen_bx_im gen_bx_im_armeb -#define gen_clrex gen_clrex_armeb -#define generate_memory_topology generate_memory_topology_armeb -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_armeb -#define gen_exception gen_exception_armeb -#define gen_exception_insn gen_exception_insn_armeb -#define gen_exception_internal gen_exception_internal_armeb -#define gen_exception_internal_insn gen_exception_internal_insn_armeb -#define gen_exception_return gen_exception_return_armeb -#define gen_goto_tb gen_goto_tb_armeb -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_armeb -#define gen_helper_add_saturate gen_helper_add_saturate_armeb -#define gen_helper_add_setq gen_helper_add_setq_armeb -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_armeb -#define gen_helper_clz32 gen_helper_clz32_armeb -#define gen_helper_clz64 gen_helper_clz64_armeb -#define gen_helper_clz_arm gen_helper_clz_arm_armeb +#define float128_to_int32 float128_to_int32_armeb +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_armeb +#define float128_to_int64 float128_to_int64_armeb +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_armeb +#define float128_to_uint64 float128_to_uint64_armeb +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_armeb +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_armeb +#define float128_to_uint32 float128_to_uint32_armeb +#define float128_to_float32 float128_to_float32_armeb +#define float128_to_float64 float128_to_float64_armeb +#define float128_to_floatx80 float128_to_floatx80_armeb +#define float128_round_to_int float128_round_to_int_armeb +#define float128_add float128_add_armeb +#define float128_sub float128_sub_armeb +#define float128_mul float128_mul_armeb +#define float128_div float128_div_armeb +#define float128_rem float128_rem_armeb +#define float128_sqrt float128_sqrt_armeb +#define float128_eq float128_eq_armeb +#define float128_le float128_le_armeb +#define float128_lt float128_lt_armeb +#define float128_unordered float128_unordered_armeb +#define float128_eq_quiet float128_eq_quiet_armeb +#define float128_le_quiet float128_le_quiet_armeb +#define float128_lt_quiet float128_lt_quiet_armeb +#define float128_unordered_quiet float128_unordered_quiet_armeb +#define floatx80_compare floatx80_compare_armeb +#define floatx80_compare_quiet floatx80_compare_quiet_armeb +#define float128_compare float128_compare_armeb +#define float128_compare_quiet float128_compare_quiet_armeb +#define floatx80_scalbn floatx80_scalbn_armeb +#define float128_scalbn float128_scalbn_armeb +#define softfloat_init softfloat_init_armeb +#define tcg_optimize tcg_optimize_armeb +#define gen_new_label gen_new_label_armeb +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_armeb +#define tcg_expand_vec_op tcg_expand_vec_op_armeb +#define tcg_register_jit tcg_register_jit_armeb +#define tcg_tb_insert tcg_tb_insert_armeb +#define tcg_tb_remove tcg_tb_remove_armeb +#define tcg_tb_lookup tcg_tb_lookup_armeb +#define tcg_tb_foreach tcg_tb_foreach_armeb +#define tcg_nb_tbs tcg_nb_tbs_armeb +#define tcg_region_reset_all tcg_region_reset_all_armeb +#define tcg_region_init tcg_region_init_armeb +#define tcg_code_size tcg_code_size_armeb +#define tcg_code_capacity tcg_code_capacity_armeb +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_armeb +#define tcg_malloc_internal tcg_malloc_internal_armeb +#define tcg_pool_reset tcg_pool_reset_armeb +#define tcg_context_init tcg_context_init_armeb +#define tcg_tb_alloc tcg_tb_alloc_armeb +#define tcg_prologue_init tcg_prologue_init_armeb +#define tcg_func_start tcg_func_start_armeb +#define tcg_set_frame tcg_set_frame_armeb +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_armeb +#define tcg_temp_new_internal tcg_temp_new_internal_armeb +#define tcg_temp_new_vec tcg_temp_new_vec_armeb +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_armeb +#define tcg_temp_free_internal tcg_temp_free_internal_armeb +#define tcg_const_i32 tcg_const_i32_armeb +#define tcg_const_i64 tcg_const_i64_armeb +#define tcg_const_local_i32 tcg_const_local_i32_armeb +#define tcg_const_local_i64 tcg_const_local_i64_armeb +#define tcg_op_supported tcg_op_supported_armeb +#define tcg_gen_callN tcg_gen_callN_armeb +#define tcg_op_remove tcg_op_remove_armeb +#define tcg_emit_op tcg_emit_op_armeb +#define tcg_op_insert_before tcg_op_insert_before_armeb +#define tcg_op_insert_after tcg_op_insert_after_armeb +#define tcg_cpu_exec_time tcg_cpu_exec_time_armeb +#define tcg_gen_code tcg_gen_code_armeb +#define tcg_gen_op1 tcg_gen_op1_armeb +#define tcg_gen_op2 tcg_gen_op2_armeb +#define tcg_gen_op3 tcg_gen_op3_armeb +#define tcg_gen_op4 tcg_gen_op4_armeb +#define tcg_gen_op5 tcg_gen_op5_armeb +#define tcg_gen_op6 tcg_gen_op6_armeb +#define tcg_gen_mb tcg_gen_mb_armeb +#define tcg_gen_addi_i32 tcg_gen_addi_i32_armeb +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_armeb +#define tcg_gen_subi_i32 tcg_gen_subi_i32_armeb +#define tcg_gen_andi_i32 tcg_gen_andi_i32_armeb +#define tcg_gen_ori_i32 tcg_gen_ori_i32_armeb +#define tcg_gen_xori_i32 tcg_gen_xori_i32_armeb +#define tcg_gen_shli_i32 tcg_gen_shli_i32_armeb +#define tcg_gen_shri_i32 tcg_gen_shri_i32_armeb +#define tcg_gen_sari_i32 tcg_gen_sari_i32_armeb +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_armeb +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_armeb +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_armeb +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_armeb +#define tcg_gen_muli_i32 tcg_gen_muli_i32_armeb +#define tcg_gen_div_i32 tcg_gen_div_i32_armeb +#define tcg_gen_rem_i32 tcg_gen_rem_i32_armeb +#define tcg_gen_divu_i32 tcg_gen_divu_i32_armeb +#define tcg_gen_remu_i32 tcg_gen_remu_i32_armeb +#define tcg_gen_andc_i32 tcg_gen_andc_i32_armeb +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_armeb +#define tcg_gen_nand_i32 tcg_gen_nand_i32_armeb +#define tcg_gen_nor_i32 tcg_gen_nor_i32_armeb +#define tcg_gen_orc_i32 tcg_gen_orc_i32_armeb +#define tcg_gen_clz_i32 tcg_gen_clz_i32_armeb +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_armeb +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_armeb +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_armeb +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_armeb +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_armeb +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_armeb +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_armeb +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_armeb +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_armeb +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_armeb +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_armeb +#define tcg_gen_extract_i32 tcg_gen_extract_i32_armeb +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_armeb +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_armeb +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_armeb +#define tcg_gen_add2_i32 tcg_gen_add2_i32_armeb +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_armeb +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_armeb +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_armeb +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_armeb +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_armeb +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_armeb +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_armeb +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_armeb +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_armeb +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_armeb +#define tcg_gen_smin_i32 tcg_gen_smin_i32_armeb +#define tcg_gen_umin_i32 tcg_gen_umin_i32_armeb +#define tcg_gen_smax_i32 tcg_gen_smax_i32_armeb +#define tcg_gen_umax_i32 tcg_gen_umax_i32_armeb +#define tcg_gen_abs_i32 tcg_gen_abs_i32_armeb +#define tcg_gen_addi_i64 tcg_gen_addi_i64_armeb +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_armeb +#define tcg_gen_subi_i64 tcg_gen_subi_i64_armeb +#define tcg_gen_andi_i64 tcg_gen_andi_i64_armeb +#define tcg_gen_ori_i64 tcg_gen_ori_i64_armeb +#define tcg_gen_xori_i64 tcg_gen_xori_i64_armeb +#define tcg_gen_shli_i64 tcg_gen_shli_i64_armeb +#define tcg_gen_shri_i64 tcg_gen_shri_i64_armeb +#define tcg_gen_sari_i64 tcg_gen_sari_i64_armeb +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_armeb +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_armeb +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_armeb +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_armeb +#define tcg_gen_muli_i64 tcg_gen_muli_i64_armeb +#define tcg_gen_div_i64 tcg_gen_div_i64_armeb +#define tcg_gen_rem_i64 tcg_gen_rem_i64_armeb +#define tcg_gen_divu_i64 tcg_gen_divu_i64_armeb +#define tcg_gen_remu_i64 tcg_gen_remu_i64_armeb +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_armeb +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_armeb +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_armeb +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_armeb +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_armeb +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_armeb +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_armeb +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_armeb +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_armeb +#define tcg_gen_not_i64 tcg_gen_not_i64_armeb +#define tcg_gen_andc_i64 tcg_gen_andc_i64_armeb +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_armeb +#define tcg_gen_nand_i64 tcg_gen_nand_i64_armeb +#define tcg_gen_nor_i64 tcg_gen_nor_i64_armeb +#define tcg_gen_orc_i64 tcg_gen_orc_i64_armeb +#define tcg_gen_clz_i64 tcg_gen_clz_i64_armeb +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_armeb +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_armeb +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_armeb +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_armeb +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_armeb +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_armeb +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_armeb +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_armeb +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_armeb +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_armeb +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_armeb +#define tcg_gen_extract_i64 tcg_gen_extract_i64_armeb +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_armeb +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_armeb +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_armeb +#define tcg_gen_add2_i64 tcg_gen_add2_i64_armeb +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_armeb +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_armeb +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_armeb +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_armeb +#define tcg_gen_smin_i64 tcg_gen_smin_i64_armeb +#define tcg_gen_umin_i64 tcg_gen_umin_i64_armeb +#define tcg_gen_smax_i64 tcg_gen_smax_i64_armeb +#define tcg_gen_umax_i64 tcg_gen_umax_i64_armeb +#define tcg_gen_abs_i64 tcg_gen_abs_i64_armeb +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_armeb +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_armeb +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_armeb +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_armeb +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_armeb +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_armeb +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_armeb +#define tcg_gen_exit_tb tcg_gen_exit_tb_armeb +#define tcg_gen_goto_tb tcg_gen_goto_tb_armeb +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_armeb +#define check_exit_request check_exit_request_armeb +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_armeb +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_armeb +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_armeb +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_armeb +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_armeb +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_armeb +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_armeb +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_armeb +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_armeb +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_armeb +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_armeb +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_armeb +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_armeb +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_armeb +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_armeb +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_armeb +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_armeb +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_armeb +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_armeb +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_armeb +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_armeb +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_armeb +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_armeb +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_armeb +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_armeb +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_armeb +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_armeb +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_armeb +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_armeb +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_armeb +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_armeb +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_armeb +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_armeb +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_armeb +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_armeb +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_armeb +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_armeb +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_armeb +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_armeb +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_armeb +#define simd_desc simd_desc_armeb +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_armeb +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_armeb +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_armeb +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_armeb +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_armeb +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_armeb +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_armeb +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_armeb +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_armeb +#define tcg_gen_gvec_2 tcg_gen_gvec_2_armeb +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_armeb +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_armeb +#define tcg_gen_gvec_3 tcg_gen_gvec_3_armeb +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_armeb +#define tcg_gen_gvec_4 tcg_gen_gvec_4_armeb +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_armeb +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_armeb +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_armeb +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_armeb +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_armeb +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_armeb +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_armeb +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_armeb +#define tcg_gen_gvec_not tcg_gen_gvec_not_armeb +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_armeb +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_armeb +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_armeb +#define tcg_gen_gvec_add tcg_gen_gvec_add_armeb +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_armeb +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_armeb +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_armeb +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_armeb +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_armeb +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_armeb +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_armeb +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_armeb +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_armeb +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_armeb +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_armeb +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_armeb +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_armeb +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_armeb +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_armeb +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_armeb +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_armeb +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_armeb +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_armeb +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_armeb +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_armeb +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_armeb +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_armeb +#define tcg_gen_gvec_and tcg_gen_gvec_and_armeb +#define tcg_gen_gvec_or tcg_gen_gvec_or_armeb +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_armeb +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_armeb +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_armeb +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_armeb +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_armeb +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_armeb +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_armeb +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_armeb +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_armeb +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_armeb +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_armeb +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_armeb +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_armeb +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_armeb +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_armeb +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_armeb +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_armeb +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_armeb +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_armeb +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_armeb +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_armeb +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_armeb +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_armeb +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_armeb +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_armeb +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_armeb +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_armeb +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_armeb +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_armeb +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_armeb +#define vec_gen_2 vec_gen_2_armeb +#define vec_gen_3 vec_gen_3_armeb +#define vec_gen_4 vec_gen_4_armeb +#define tcg_gen_mov_vec tcg_gen_mov_vec_armeb +#define tcg_const_zeros_vec tcg_const_zeros_vec_armeb +#define tcg_const_ones_vec tcg_const_ones_vec_armeb +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_armeb +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_armeb +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_armeb +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_armeb +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_armeb +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_armeb +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_armeb +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_armeb +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_armeb +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_armeb +#define tcg_gen_ld_vec tcg_gen_ld_vec_armeb +#define tcg_gen_st_vec tcg_gen_st_vec_armeb +#define tcg_gen_stl_vec tcg_gen_stl_vec_armeb +#define tcg_gen_and_vec tcg_gen_and_vec_armeb +#define tcg_gen_or_vec tcg_gen_or_vec_armeb +#define tcg_gen_xor_vec tcg_gen_xor_vec_armeb +#define tcg_gen_andc_vec tcg_gen_andc_vec_armeb +#define tcg_gen_orc_vec tcg_gen_orc_vec_armeb +#define tcg_gen_nand_vec tcg_gen_nand_vec_armeb +#define tcg_gen_nor_vec tcg_gen_nor_vec_armeb +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_armeb +#define tcg_gen_not_vec tcg_gen_not_vec_armeb +#define tcg_gen_neg_vec tcg_gen_neg_vec_armeb +#define tcg_gen_abs_vec tcg_gen_abs_vec_armeb +#define tcg_gen_shli_vec tcg_gen_shli_vec_armeb +#define tcg_gen_shri_vec tcg_gen_shri_vec_armeb +#define tcg_gen_sari_vec tcg_gen_sari_vec_armeb +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_armeb +#define tcg_gen_add_vec tcg_gen_add_vec_armeb +#define tcg_gen_sub_vec tcg_gen_sub_vec_armeb +#define tcg_gen_mul_vec tcg_gen_mul_vec_armeb +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_armeb +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_armeb +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_armeb +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_armeb +#define tcg_gen_smin_vec tcg_gen_smin_vec_armeb +#define tcg_gen_umin_vec tcg_gen_umin_vec_armeb +#define tcg_gen_smax_vec tcg_gen_smax_vec_armeb +#define tcg_gen_umax_vec tcg_gen_umax_vec_armeb +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_armeb +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_armeb +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_armeb +#define tcg_gen_shls_vec tcg_gen_shls_vec_armeb +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_armeb +#define tcg_gen_sars_vec tcg_gen_sars_vec_armeb +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_armeb +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_armeb +#define tb_htable_lookup tb_htable_lookup_armeb +#define tb_set_jmp_target tb_set_jmp_target_armeb +#define cpu_exec cpu_exec_armeb +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_armeb +#define cpu_reloading_memory_map cpu_reloading_memory_map_armeb +#define cpu_loop_exit cpu_loop_exit_armeb +#define cpu_loop_exit_restore cpu_loop_exit_restore_armeb +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_armeb +#define tlb_init tlb_init_armeb +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_armeb +#define tlb_flush tlb_flush_armeb +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_armeb +#define tlb_flush_all_cpus tlb_flush_all_cpus_armeb +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_armeb +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_armeb +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_armeb +#define tlb_flush_page tlb_flush_page_armeb +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_armeb +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_armeb +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_armeb +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_armeb +#define tlb_protect_code tlb_protect_code_armeb +#define tlb_unprotect_code tlb_unprotect_code_armeb +#define tlb_reset_dirty tlb_reset_dirty_armeb +#define tlb_set_dirty tlb_set_dirty_armeb +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_armeb +#define tlb_set_page tlb_set_page_armeb +#define get_page_addr_code_hostp get_page_addr_code_hostp_armeb +#define get_page_addr_code get_page_addr_code_armeb +#define probe_access probe_access_armeb +#define tlb_vaddr_to_host tlb_vaddr_to_host_armeb +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_armeb +#define helper_le_lduw_mmu helper_le_lduw_mmu_armeb +#define helper_be_lduw_mmu helper_be_lduw_mmu_armeb +#define helper_le_ldul_mmu helper_le_ldul_mmu_armeb +#define helper_be_ldul_mmu helper_be_ldul_mmu_armeb +#define helper_le_ldq_mmu helper_le_ldq_mmu_armeb +#define helper_be_ldq_mmu helper_be_ldq_mmu_armeb +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_armeb +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_armeb +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_armeb +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_armeb +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_armeb +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_armeb +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_armeb +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_armeb +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_armeb +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_armeb +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_armeb +#define cpu_ldub_data_ra cpu_ldub_data_ra_armeb +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_armeb +#define cpu_lduw_data_ra cpu_lduw_data_ra_armeb +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_armeb +#define cpu_ldl_data_ra cpu_ldl_data_ra_armeb +#define cpu_ldq_data_ra cpu_ldq_data_ra_armeb +#define cpu_ldub_data cpu_ldub_data_armeb +#define cpu_ldsb_data cpu_ldsb_data_armeb +#define cpu_lduw_data cpu_lduw_data_armeb +#define cpu_ldsw_data cpu_ldsw_data_armeb +#define cpu_ldl_data cpu_ldl_data_armeb +#define cpu_ldq_data cpu_ldq_data_armeb +#define helper_ret_stb_mmu helper_ret_stb_mmu_armeb +#define helper_le_stw_mmu helper_le_stw_mmu_armeb +#define helper_be_stw_mmu helper_be_stw_mmu_armeb +#define helper_le_stl_mmu helper_le_stl_mmu_armeb +#define helper_be_stl_mmu helper_be_stl_mmu_armeb +#define helper_le_stq_mmu helper_le_stq_mmu_armeb +#define helper_be_stq_mmu helper_be_stq_mmu_armeb +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_armeb +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_armeb +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_armeb +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_armeb +#define cpu_stb_data_ra cpu_stb_data_ra_armeb +#define cpu_stw_data_ra cpu_stw_data_ra_armeb +#define cpu_stl_data_ra cpu_stl_data_ra_armeb +#define cpu_stq_data_ra cpu_stq_data_ra_armeb +#define cpu_stb_data cpu_stb_data_armeb +#define cpu_stw_data cpu_stw_data_armeb +#define cpu_stl_data cpu_stl_data_armeb +#define cpu_stq_data cpu_stq_data_armeb +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_armeb +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_armeb +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_armeb +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_armeb +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_armeb +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_armeb +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_armeb +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_armeb +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_armeb +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_armeb +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_armeb +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_armeb +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_armeb +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_armeb +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_armeb +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_armeb +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_armeb +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_armeb +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_armeb +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_armeb +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_armeb +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_armeb +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_armeb +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_armeb +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_armeb +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_armeb +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_armeb +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_armeb +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_armeb +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_armeb +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_armeb +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_armeb +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_armeb +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_armeb +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_armeb +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_armeb +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_armeb +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_armeb +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_armeb +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_armeb +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_armeb +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_armeb +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_armeb +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_armeb +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_armeb +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_armeb +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_armeb +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_armeb +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_armeb +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_armeb +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_armeb +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_armeb +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_armeb +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_armeb +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_armeb +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_armeb +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_armeb +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_armeb +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_armeb +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_armeb +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_armeb +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_armeb +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_armeb +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_armeb +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_armeb +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_armeb +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_armeb +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_armeb +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_armeb +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_armeb +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_armeb +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_armeb +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_armeb +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_armeb +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_armeb +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_armeb +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_armeb +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_armeb +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_armeb +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_armeb +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_armeb +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_armeb +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_armeb +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_armeb +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_armeb +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_armeb +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_armeb +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_armeb +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_armeb +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_armeb +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_armeb +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_armeb +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_armeb +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_armeb +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_armeb +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_armeb +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_armeb +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_armeb +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_armeb +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_armeb +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_armeb +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_armeb +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_armeb +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_armeb +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_armeb +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_armeb +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_armeb +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_armeb +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_armeb +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_armeb +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_armeb +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_armeb +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_armeb +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_armeb +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_armeb +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_armeb +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_armeb +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_armeb +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_armeb +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_armeb +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_armeb +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_armeb +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_armeb +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_armeb +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_armeb +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_armeb +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_armeb +#define helper_atomic_xchgb helper_atomic_xchgb_armeb +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_armeb +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_armeb +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_armeb +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_armeb +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_armeb +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_armeb +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_armeb +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_armeb +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_armeb +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_armeb +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_armeb +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_armeb +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_armeb +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_armeb +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_armeb +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_armeb +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_armeb +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_armeb +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_armeb +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_armeb +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_armeb +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_armeb +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_armeb +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_armeb +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_armeb +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_armeb +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_armeb +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_armeb +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_armeb +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_armeb +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_armeb +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_armeb +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_armeb +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_armeb +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_armeb +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_armeb +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_armeb +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_armeb +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_armeb +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_armeb +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_armeb +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_armeb +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_armeb +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_armeb +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_armeb +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_armeb +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_armeb +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_armeb +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_armeb +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_armeb +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_armeb +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_armeb +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_armeb +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_armeb +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_armeb +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_armeb +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_armeb +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_armeb +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_armeb +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_armeb +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_armeb +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_armeb +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_armeb +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_armeb +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_armeb +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_armeb +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_armeb +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_armeb +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_armeb +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_armeb +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_armeb +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_armeb +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_armeb +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_armeb +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_armeb +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_armeb +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_armeb +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_armeb +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_armeb +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_armeb +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_armeb +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_armeb +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_armeb +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_armeb +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_armeb +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_armeb +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_armeb +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_armeb +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_armeb +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_armeb +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_armeb +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_armeb +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_armeb +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_armeb +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_armeb +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_armeb +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_armeb +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_armeb +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_armeb +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_armeb +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_armeb +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_armeb +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_armeb +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_armeb +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_armeb +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_armeb +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_armeb +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_armeb +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_armeb +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_armeb +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_armeb +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_armeb +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_armeb +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_armeb +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_armeb +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_armeb +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_armeb +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_armeb +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_armeb +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_armeb +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_armeb +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_armeb +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_armeb +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_armeb +#define cpu_ldub_code cpu_ldub_code_armeb +#define cpu_lduw_code cpu_lduw_code_armeb +#define cpu_ldl_code cpu_ldl_code_armeb +#define cpu_ldq_code cpu_ldq_code_armeb +#define helper_div_i32 helper_div_i32_armeb +#define helper_rem_i32 helper_rem_i32_armeb +#define helper_divu_i32 helper_divu_i32_armeb +#define helper_remu_i32 helper_remu_i32_armeb +#define helper_shl_i64 helper_shl_i64_armeb +#define helper_shr_i64 helper_shr_i64_armeb +#define helper_sar_i64 helper_sar_i64_armeb +#define helper_div_i64 helper_div_i64_armeb +#define helper_rem_i64 helper_rem_i64_armeb +#define helper_divu_i64 helper_divu_i64_armeb +#define helper_remu_i64 helper_remu_i64_armeb +#define helper_muluh_i64 helper_muluh_i64_armeb +#define helper_mulsh_i64 helper_mulsh_i64_armeb +#define helper_clz_i32 helper_clz_i32_armeb +#define helper_ctz_i32 helper_ctz_i32_armeb +#define helper_clz_i64 helper_clz_i64_armeb +#define helper_ctz_i64 helper_ctz_i64_armeb +#define helper_clrsb_i32 helper_clrsb_i32_armeb +#define helper_clrsb_i64 helper_clrsb_i64_armeb +#define helper_ctpop_i32 helper_ctpop_i32_armeb +#define helper_ctpop_i64 helper_ctpop_i64_armeb +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_armeb +#define helper_exit_atomic helper_exit_atomic_armeb +#define helper_gvec_add8 helper_gvec_add8_armeb +#define helper_gvec_add16 helper_gvec_add16_armeb +#define helper_gvec_add32 helper_gvec_add32_armeb +#define helper_gvec_add64 helper_gvec_add64_armeb +#define helper_gvec_adds8 helper_gvec_adds8_armeb +#define helper_gvec_adds16 helper_gvec_adds16_armeb +#define helper_gvec_adds32 helper_gvec_adds32_armeb +#define helper_gvec_adds64 helper_gvec_adds64_armeb +#define helper_gvec_sub8 helper_gvec_sub8_armeb +#define helper_gvec_sub16 helper_gvec_sub16_armeb +#define helper_gvec_sub32 helper_gvec_sub32_armeb +#define helper_gvec_sub64 helper_gvec_sub64_armeb +#define helper_gvec_subs8 helper_gvec_subs8_armeb +#define helper_gvec_subs16 helper_gvec_subs16_armeb +#define helper_gvec_subs32 helper_gvec_subs32_armeb +#define helper_gvec_subs64 helper_gvec_subs64_armeb +#define helper_gvec_mul8 helper_gvec_mul8_armeb +#define helper_gvec_mul16 helper_gvec_mul16_armeb +#define helper_gvec_mul32 helper_gvec_mul32_armeb +#define helper_gvec_mul64 helper_gvec_mul64_armeb +#define helper_gvec_muls8 helper_gvec_muls8_armeb +#define helper_gvec_muls16 helper_gvec_muls16_armeb +#define helper_gvec_muls32 helper_gvec_muls32_armeb +#define helper_gvec_muls64 helper_gvec_muls64_armeb +#define helper_gvec_neg8 helper_gvec_neg8_armeb +#define helper_gvec_neg16 helper_gvec_neg16_armeb +#define helper_gvec_neg32 helper_gvec_neg32_armeb +#define helper_gvec_neg64 helper_gvec_neg64_armeb +#define helper_gvec_abs8 helper_gvec_abs8_armeb +#define helper_gvec_abs16 helper_gvec_abs16_armeb +#define helper_gvec_abs32 helper_gvec_abs32_armeb +#define helper_gvec_abs64 helper_gvec_abs64_armeb +#define helper_gvec_mov helper_gvec_mov_armeb +#define helper_gvec_dup64 helper_gvec_dup64_armeb +#define helper_gvec_dup32 helper_gvec_dup32_armeb +#define helper_gvec_dup16 helper_gvec_dup16_armeb +#define helper_gvec_dup8 helper_gvec_dup8_armeb +#define helper_gvec_not helper_gvec_not_armeb +#define helper_gvec_and helper_gvec_and_armeb +#define helper_gvec_or helper_gvec_or_armeb +#define helper_gvec_xor helper_gvec_xor_armeb +#define helper_gvec_andc helper_gvec_andc_armeb +#define helper_gvec_orc helper_gvec_orc_armeb +#define helper_gvec_nand helper_gvec_nand_armeb +#define helper_gvec_nor helper_gvec_nor_armeb +#define helper_gvec_eqv helper_gvec_eqv_armeb +#define helper_gvec_ands helper_gvec_ands_armeb +#define helper_gvec_xors helper_gvec_xors_armeb +#define helper_gvec_ors helper_gvec_ors_armeb +#define helper_gvec_shl8i helper_gvec_shl8i_armeb +#define helper_gvec_shl16i helper_gvec_shl16i_armeb +#define helper_gvec_shl32i helper_gvec_shl32i_armeb +#define helper_gvec_shl64i helper_gvec_shl64i_armeb +#define helper_gvec_shr8i helper_gvec_shr8i_armeb +#define helper_gvec_shr16i helper_gvec_shr16i_armeb +#define helper_gvec_shr32i helper_gvec_shr32i_armeb +#define helper_gvec_shr64i helper_gvec_shr64i_armeb +#define helper_gvec_sar8i helper_gvec_sar8i_armeb +#define helper_gvec_sar16i helper_gvec_sar16i_armeb +#define helper_gvec_sar32i helper_gvec_sar32i_armeb +#define helper_gvec_sar64i helper_gvec_sar64i_armeb +#define helper_gvec_shl8v helper_gvec_shl8v_armeb +#define helper_gvec_shl16v helper_gvec_shl16v_armeb +#define helper_gvec_shl32v helper_gvec_shl32v_armeb +#define helper_gvec_shl64v helper_gvec_shl64v_armeb +#define helper_gvec_shr8v helper_gvec_shr8v_armeb +#define helper_gvec_shr16v helper_gvec_shr16v_armeb +#define helper_gvec_shr32v helper_gvec_shr32v_armeb +#define helper_gvec_shr64v helper_gvec_shr64v_armeb +#define helper_gvec_sar8v helper_gvec_sar8v_armeb +#define helper_gvec_sar16v helper_gvec_sar16v_armeb +#define helper_gvec_sar32v helper_gvec_sar32v_armeb +#define helper_gvec_sar64v helper_gvec_sar64v_armeb +#define helper_gvec_eq8 helper_gvec_eq8_armeb +#define helper_gvec_ne8 helper_gvec_ne8_armeb +#define helper_gvec_lt8 helper_gvec_lt8_armeb +#define helper_gvec_le8 helper_gvec_le8_armeb +#define helper_gvec_ltu8 helper_gvec_ltu8_armeb +#define helper_gvec_leu8 helper_gvec_leu8_armeb +#define helper_gvec_eq16 helper_gvec_eq16_armeb +#define helper_gvec_ne16 helper_gvec_ne16_armeb +#define helper_gvec_lt16 helper_gvec_lt16_armeb +#define helper_gvec_le16 helper_gvec_le16_armeb +#define helper_gvec_ltu16 helper_gvec_ltu16_armeb +#define helper_gvec_leu16 helper_gvec_leu16_armeb +#define helper_gvec_eq32 helper_gvec_eq32_armeb +#define helper_gvec_ne32 helper_gvec_ne32_armeb +#define helper_gvec_lt32 helper_gvec_lt32_armeb +#define helper_gvec_le32 helper_gvec_le32_armeb +#define helper_gvec_ltu32 helper_gvec_ltu32_armeb +#define helper_gvec_leu32 helper_gvec_leu32_armeb +#define helper_gvec_eq64 helper_gvec_eq64_armeb +#define helper_gvec_ne64 helper_gvec_ne64_armeb +#define helper_gvec_lt64 helper_gvec_lt64_armeb +#define helper_gvec_le64 helper_gvec_le64_armeb +#define helper_gvec_ltu64 helper_gvec_ltu64_armeb +#define helper_gvec_leu64 helper_gvec_leu64_armeb +#define helper_gvec_ssadd8 helper_gvec_ssadd8_armeb +#define helper_gvec_ssadd16 helper_gvec_ssadd16_armeb +#define helper_gvec_ssadd32 helper_gvec_ssadd32_armeb +#define helper_gvec_ssadd64 helper_gvec_ssadd64_armeb +#define helper_gvec_sssub8 helper_gvec_sssub8_armeb +#define helper_gvec_sssub16 helper_gvec_sssub16_armeb +#define helper_gvec_sssub32 helper_gvec_sssub32_armeb +#define helper_gvec_sssub64 helper_gvec_sssub64_armeb +#define helper_gvec_usadd8 helper_gvec_usadd8_armeb +#define helper_gvec_usadd16 helper_gvec_usadd16_armeb +#define helper_gvec_usadd32 helper_gvec_usadd32_armeb +#define helper_gvec_usadd64 helper_gvec_usadd64_armeb +#define helper_gvec_ussub8 helper_gvec_ussub8_armeb +#define helper_gvec_ussub16 helper_gvec_ussub16_armeb +#define helper_gvec_ussub32 helper_gvec_ussub32_armeb +#define helper_gvec_ussub64 helper_gvec_ussub64_armeb +#define helper_gvec_smin8 helper_gvec_smin8_armeb +#define helper_gvec_smin16 helper_gvec_smin16_armeb +#define helper_gvec_smin32 helper_gvec_smin32_armeb +#define helper_gvec_smin64 helper_gvec_smin64_armeb +#define helper_gvec_smax8 helper_gvec_smax8_armeb +#define helper_gvec_smax16 helper_gvec_smax16_armeb +#define helper_gvec_smax32 helper_gvec_smax32_armeb +#define helper_gvec_smax64 helper_gvec_smax64_armeb +#define helper_gvec_umin8 helper_gvec_umin8_armeb +#define helper_gvec_umin16 helper_gvec_umin16_armeb +#define helper_gvec_umin32 helper_gvec_umin32_armeb +#define helper_gvec_umin64 helper_gvec_umin64_armeb +#define helper_gvec_umax8 helper_gvec_umax8_armeb +#define helper_gvec_umax16 helper_gvec_umax16_armeb +#define helper_gvec_umax32 helper_gvec_umax32_armeb +#define helper_gvec_umax64 helper_gvec_umax64_armeb +#define helper_gvec_bitsel helper_gvec_bitsel_armeb +#define cpu_restore_state cpu_restore_state_armeb +#define page_collection_lock page_collection_lock_armeb +#define page_collection_unlock page_collection_unlock_armeb +#define free_code_gen_buffer free_code_gen_buffer_armeb +#define tcg_exec_init tcg_exec_init_armeb +#define tb_cleanup tb_cleanup_armeb +#define tb_flush tb_flush_armeb +#define tb_phys_invalidate tb_phys_invalidate_armeb +#define tb_gen_code tb_gen_code_armeb +#define tb_exec_lock tb_exec_lock_armeb +#define tb_exec_unlock tb_exec_unlock_armeb +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_armeb +#define tb_invalidate_phys_range tb_invalidate_phys_range_armeb +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_armeb +#define tb_check_watchpoint tb_check_watchpoint_armeb +#define cpu_io_recompile cpu_io_recompile_armeb +#define tb_flush_jmp_cache tb_flush_jmp_cache_armeb +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_armeb +#define translator_loop_temp_check translator_loop_temp_check_armeb +#define translator_loop translator_loop_armeb +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_armeb +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_armeb +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_armeb +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_armeb +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_armeb +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_armeb +#define unassigned_mem_ops unassigned_mem_ops_armeb +#define floatx80_infinity floatx80_infinity_armeb +#define dup_const_func dup_const_func_armeb +#define gen_helper_raise_exception gen_helper_raise_exception_armeb +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_armeb +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_armeb +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_armeb #define gen_helper_cpsr_read gen_helper_cpsr_read_armeb #define gen_helper_cpsr_write gen_helper_cpsr_write_armeb -#define gen_helper_crc32_arm gen_helper_crc32_arm_armeb -#define gen_helper_crc32c gen_helper_crc32c_armeb -#define gen_helper_crypto_aese gen_helper_crypto_aese_armeb -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_armeb -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_armeb -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_armeb -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_armeb -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_armeb -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_armeb -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_armeb -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_armeb -#define gen_helper_double_saturate gen_helper_double_saturate_armeb -#define gen_helper_exception_internal gen_helper_exception_internal_armeb -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_armeb -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_armeb -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_armeb -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_armeb -#define gen_helper_get_user_reg gen_helper_get_user_reg_armeb -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_armeb -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_armeb -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_armeb -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_armeb -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_armeb -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_armeb -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_armeb -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_armeb -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_armeb -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_armeb -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_armeb -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_armeb -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_armeb -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_armeb -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_armeb -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_armeb -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_armeb -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_armeb -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_armeb -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_armeb -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_armeb -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_armeb -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_armeb -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_armeb -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_armeb -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_armeb -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_armeb -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_armeb -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_armeb -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_armeb -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_armeb -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_armeb -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_armeb -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_armeb -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_armeb -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_armeb -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_armeb -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_armeb -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_armeb -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_armeb -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_armeb -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_armeb -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_armeb -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_armeb -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_armeb -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_armeb -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_armeb -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_armeb -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_armeb -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_armeb -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_armeb -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_armeb -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_armeb -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_armeb -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_armeb -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_armeb -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_armeb -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_armeb -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_armeb -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_armeb -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_armeb -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_armeb -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_armeb -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_armeb -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_armeb -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_armeb -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_armeb -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_armeb -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_armeb -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_armeb -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_armeb -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_armeb -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_armeb -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_armeb -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_armeb -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_armeb -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_armeb -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_armeb -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_armeb -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_armeb -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_armeb -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_armeb -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_armeb -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_armeb -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_armeb -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_armeb -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_armeb -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_armeb -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_armeb -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_armeb -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_armeb -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_armeb -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_armeb -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_armeb -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_armeb -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_armeb -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_armeb -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_armeb -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_armeb -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_armeb -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_armeb -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_armeb -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_armeb -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_armeb -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_armeb -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_armeb -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_armeb -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_armeb -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_armeb -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_armeb -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_armeb -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_armeb -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_armeb -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_armeb -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_armeb -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_armeb -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_armeb -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_armeb -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_armeb -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_armeb -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_armeb -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_armeb -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_armeb -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_armeb -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_armeb -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_armeb -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_armeb -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_armeb -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_armeb -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_armeb -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_armeb -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_armeb -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_armeb -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_armeb -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_armeb -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_armeb -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_armeb -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_armeb -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_armeb -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_armeb -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_armeb -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_armeb -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_armeb -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_armeb -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_armeb -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_armeb -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_armeb -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_armeb -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_armeb -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_armeb -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_armeb -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_armeb -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_armeb -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_armeb -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_armeb -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_armeb -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_armeb -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_armeb -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_armeb -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_armeb -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_armeb -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_armeb -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_armeb -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_armeb -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_armeb -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_armeb -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_armeb -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_armeb -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_armeb -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_armeb -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_armeb -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_armeb -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_armeb -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_armeb -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_armeb -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_armeb -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_armeb -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_armeb -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_armeb -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_armeb -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_armeb -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_armeb -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_armeb -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_armeb -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_armeb -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_armeb -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_armeb -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_armeb -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_armeb -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_armeb -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_armeb -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_armeb -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_armeb -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_armeb -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_armeb -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_armeb -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_armeb -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_armeb -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_armeb -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_armeb -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_armeb -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_armeb -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_armeb -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_armeb -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_armeb -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_armeb -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_armeb -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_armeb -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_armeb -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_armeb -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_armeb -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_armeb -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_armeb -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_armeb -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_armeb -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_armeb -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_armeb -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_armeb -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_armeb -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_armeb -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_armeb -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_armeb -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_armeb -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_armeb -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_armeb -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_armeb -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_armeb -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_armeb -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_armeb -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_armeb -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_armeb -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_armeb -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_armeb -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_armeb -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_armeb -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_armeb -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_armeb -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_armeb -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_armeb -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_armeb -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_armeb -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_armeb -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_armeb -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_armeb -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_armeb -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_armeb -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_armeb -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_armeb -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_armeb -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_armeb -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_armeb -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_armeb -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_armeb -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_armeb -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_armeb -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_armeb -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_armeb -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_armeb -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_armeb -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_armeb -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_armeb -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_armeb -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_armeb -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_armeb -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_armeb -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_armeb -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_armeb -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_armeb -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_armeb -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_armeb -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_armeb -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_armeb -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_armeb -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_armeb -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_armeb -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_armeb -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_armeb -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_armeb -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_armeb -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_armeb -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_armeb -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_armeb -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_armeb -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_armeb -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_armeb -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_armeb -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_armeb -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_armeb -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_armeb -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_armeb -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_armeb -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_armeb -#define gen_helper_neon_tbl gen_helper_neon_tbl_armeb -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_armeb -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_armeb -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_armeb -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_armeb -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_armeb -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_armeb -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_armeb -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_armeb -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_armeb -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_armeb -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_armeb -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_armeb -#define gen_helper_neon_zip16 gen_helper_neon_zip16_armeb -#define gen_helper_neon_zip8 gen_helper_neon_zip8_armeb -#define gen_helper_pre_hvc gen_helper_pre_hvc_armeb -#define gen_helper_pre_smc gen_helper_pre_smc_armeb -#define gen_helper_qadd16 gen_helper_qadd16_armeb -#define gen_helper_qadd8 gen_helper_qadd8_armeb -#define gen_helper_qaddsubx gen_helper_qaddsubx_armeb -#define gen_helper_qsub16 gen_helper_qsub16_armeb -#define gen_helper_qsub8 gen_helper_qsub8_armeb -#define gen_helper_qsubaddx gen_helper_qsubaddx_armeb -#define gen_helper_rbit gen_helper_rbit_armeb -#define gen_helper_recpe_f32 gen_helper_recpe_f32_armeb -#define gen_helper_recpe_u32 gen_helper_recpe_u32_armeb -#define gen_helper_recps_f32 gen_helper_recps_f32_armeb -#define gen_helper_rintd gen_helper_rintd_armeb -#define gen_helper_rintd_exact gen_helper_rintd_exact_armeb -#define gen_helper_rints gen_helper_rints_armeb -#define gen_helper_rints_exact gen_helper_rints_exact_armeb -#define gen_helper_ror_cc gen_helper_ror_cc_armeb -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_armeb -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_armeb -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_armeb -#define gen_helper_sadd16 gen_helper_sadd16_armeb -#define gen_helper_sadd8 gen_helper_sadd8_armeb -#define gen_helper_saddsubx gen_helper_saddsubx_armeb -#define gen_helper_sar_cc gen_helper_sar_cc_armeb -#define gen_helper_sdiv gen_helper_sdiv_armeb -#define gen_helper_sel_flags gen_helper_sel_flags_armeb -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_armeb -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_armeb -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_armeb -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_armeb -#define gen_helper_set_rmode gen_helper_set_rmode_armeb -#define gen_helper_set_user_reg gen_helper_set_user_reg_armeb -#define gen_helper_shadd16 gen_helper_shadd16_armeb -#define gen_helper_shadd8 gen_helper_shadd8_armeb -#define gen_helper_shaddsubx gen_helper_shaddsubx_armeb -#define gen_helper_shl_cc gen_helper_shl_cc_armeb -#define gen_helper_shr_cc gen_helper_shr_cc_armeb -#define gen_helper_shsub16 gen_helper_shsub16_armeb -#define gen_helper_shsub8 gen_helper_shsub8_armeb -#define gen_helper_shsubaddx gen_helper_shsubaddx_armeb -#define gen_helper_ssat gen_helper_ssat_armeb -#define gen_helper_ssat16 gen_helper_ssat16_armeb -#define gen_helper_ssub16 gen_helper_ssub16_armeb -#define gen_helper_ssub8 gen_helper_ssub8_armeb -#define gen_helper_ssubaddx gen_helper_ssubaddx_armeb -#define gen_helper_sub_saturate gen_helper_sub_saturate_armeb -#define gen_helper_sxtb16 gen_helper_sxtb16_armeb -#define gen_helper_uadd16 gen_helper_uadd16_armeb -#define gen_helper_uadd8 gen_helper_uadd8_armeb -#define gen_helper_uaddsubx gen_helper_uaddsubx_armeb -#define gen_helper_udiv gen_helper_udiv_armeb -#define gen_helper_uhadd16 gen_helper_uhadd16_armeb -#define gen_helper_uhadd8 gen_helper_uhadd8_armeb -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_armeb -#define gen_helper_uhsub16 gen_helper_uhsub16_armeb -#define gen_helper_uhsub8 gen_helper_uhsub8_armeb -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_armeb -#define gen_helper_uqadd16 gen_helper_uqadd16_armeb -#define gen_helper_uqadd8 gen_helper_uqadd8_armeb -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_armeb -#define gen_helper_uqsub16 gen_helper_uqsub16_armeb -#define gen_helper_uqsub8 gen_helper_uqsub8_armeb -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_armeb -#define gen_helper_usad8 gen_helper_usad8_armeb -#define gen_helper_usat gen_helper_usat_armeb -#define gen_helper_usat16 gen_helper_usat16_armeb -#define gen_helper_usub16 gen_helper_usub16_armeb -#define gen_helper_usub8 gen_helper_usub8_armeb -#define gen_helper_usubaddx gen_helper_usubaddx_armeb -#define gen_helper_uxtb16 gen_helper_uxtb16_armeb -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_armeb -#define gen_helper_v7m_msr gen_helper_v7m_msr_armeb -#define gen_helper_vfp_absd gen_helper_vfp_absd_armeb -#define gen_helper_vfp_abss gen_helper_vfp_abss_armeb -#define gen_helper_vfp_addd gen_helper_vfp_addd_armeb -#define gen_helper_vfp_adds gen_helper_vfp_adds_armeb -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_armeb -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_armeb -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_armeb -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_armeb -#define gen_helper_vfp_divd gen_helper_vfp_divd_armeb -#define gen_helper_vfp_divs gen_helper_vfp_divs_armeb -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_armeb -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_armeb -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_armeb -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_armeb -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_armeb -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_armeb -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_armeb -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_armeb -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_armeb -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_armeb -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_armeb -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_armeb -#define gen_helper_vfp_mins gen_helper_vfp_mins_armeb -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_armeb -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_armeb -#define gen_helper_vfp_muld gen_helper_vfp_muld_armeb -#define gen_helper_vfp_muls gen_helper_vfp_muls_armeb -#define gen_helper_vfp_negd gen_helper_vfp_negd_armeb -#define gen_helper_vfp_negs gen_helper_vfp_negs_armeb -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_armeb -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_armeb -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_armeb -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_armeb -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_armeb -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_armeb -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_armeb -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_armeb -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_armeb -#define gen_helper_vfp_subd gen_helper_vfp_subd_armeb -#define gen_helper_vfp_subs gen_helper_vfp_subs_armeb -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_armeb -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_armeb -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_armeb -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_armeb -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_armeb -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_armeb -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_armeb -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_armeb -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_armeb -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_armeb -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_armeb -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_armeb -#define gen_helper_vfp_touid gen_helper_vfp_touid_armeb -#define gen_helper_vfp_touis gen_helper_vfp_touis_armeb -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_armeb -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_armeb -#define gen_helper_vfp_tould gen_helper_vfp_tould_armeb -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_armeb -#define gen_helper_vfp_touls gen_helper_vfp_touls_armeb -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_armeb -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_armeb -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_armeb -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_armeb -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_armeb -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_armeb -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_armeb -#define gen_helper_wfe gen_helper_wfe_armeb -#define gen_helper_wfi gen_helper_wfi_armeb -#define gen_hvc gen_hvc_armeb -#define gen_intermediate_code_internal gen_intermediate_code_internal_armeb -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_armeb -#define gen_iwmmxt_address gen_iwmmxt_address_armeb -#define gen_iwmmxt_shift gen_iwmmxt_shift_armeb -#define gen_jmp gen_jmp_armeb -#define gen_load_and_replicate gen_load_and_replicate_armeb -#define gen_load_exclusive gen_load_exclusive_armeb -#define gen_logic_CC gen_logic_CC_armeb -#define gen_logicq_cc gen_logicq_cc_armeb -#define gen_lookup_tb gen_lookup_tb_armeb -#define gen_mov_F0_vreg gen_mov_F0_vreg_armeb -#define gen_mov_F1_vreg gen_mov_F1_vreg_armeb -#define gen_mov_vreg_F0 gen_mov_vreg_F0_armeb -#define gen_muls_i64_i32 gen_muls_i64_i32_armeb -#define gen_mulu_i64_i32 gen_mulu_i64_i32_armeb -#define gen_mulxy gen_mulxy_armeb -#define gen_neon_add gen_neon_add_armeb -#define gen_neon_addl gen_neon_addl_armeb -#define gen_neon_addl_saturate gen_neon_addl_saturate_armeb -#define gen_neon_bsl gen_neon_bsl_armeb -#define gen_neon_dup_high16 gen_neon_dup_high16_armeb -#define gen_neon_dup_low16 gen_neon_dup_low16_armeb -#define gen_neon_dup_u8 gen_neon_dup_u8_armeb -#define gen_neon_mull gen_neon_mull_armeb -#define gen_neon_narrow gen_neon_narrow_armeb -#define gen_neon_narrow_op gen_neon_narrow_op_armeb -#define gen_neon_narrow_sats gen_neon_narrow_sats_armeb -#define gen_neon_narrow_satu gen_neon_narrow_satu_armeb -#define gen_neon_negl gen_neon_negl_armeb -#define gen_neon_rsb gen_neon_rsb_armeb -#define gen_neon_shift_narrow gen_neon_shift_narrow_armeb -#define gen_neon_subl gen_neon_subl_armeb -#define gen_neon_trn_u16 gen_neon_trn_u16_armeb -#define gen_neon_trn_u8 gen_neon_trn_u8_armeb -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_armeb -#define gen_neon_unzip gen_neon_unzip_armeb -#define gen_neon_widen gen_neon_widen_armeb -#define gen_neon_zip gen_neon_zip_armeb -#define gen_new_label gen_new_label_armeb -#define gen_nop_hint gen_nop_hint_armeb -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_armeb -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_armeb -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_armeb -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_armeb -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_armeb -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_armeb -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_armeb -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_armeb -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_armeb -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_armeb -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_armeb -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_armeb -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_armeb -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_armeb -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_armeb -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_armeb -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_armeb -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_armeb -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_armeb -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_armeb -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_armeb -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_armeb -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_armeb -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_armeb -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_armeb -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_armeb -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_armeb -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_armeb -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_armeb -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_armeb -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_armeb -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_armeb -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_armeb -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_armeb -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_armeb -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_armeb -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_armeb -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_armeb -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_armeb -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_armeb -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_armeb -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_armeb -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_armeb -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_armeb -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_armeb -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_armeb -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_armeb -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_armeb -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_armeb -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_armeb -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_armeb -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_armeb -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_armeb -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_armeb -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_armeb -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_armeb -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_armeb -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_armeb -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_armeb -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_armeb -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_armeb -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_armeb -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_armeb -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_armeb -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_armeb -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_armeb -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_armeb -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_armeb -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_armeb -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_armeb -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_armeb -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_armeb -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_armeb -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_armeb -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_armeb -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_armeb -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_armeb -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_armeb -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_armeb -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_armeb -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_armeb -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_armeb -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_armeb -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_armeb -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_armeb -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_armeb -#define gen_rev16 gen_rev16_armeb -#define gen_revsh gen_revsh_armeb -#define gen_rfe gen_rfe_armeb -#define gen_sar gen_sar_armeb -#define gen_sbc_CC gen_sbc_CC_armeb -#define gen_sbfx gen_sbfx_armeb -#define gen_set_CF_bit31 gen_set_CF_bit31_armeb -#define gen_set_condexec gen_set_condexec_armeb -#define gen_set_cpsr gen_set_cpsr_armeb -#define gen_set_label gen_set_label_armeb -#define gen_set_pc_im gen_set_pc_im_armeb -#define gen_set_psr gen_set_psr_armeb -#define gen_set_psr_im gen_set_psr_im_armeb -#define gen_shl gen_shl_armeb -#define gen_shr gen_shr_armeb -#define gen_smc gen_smc_armeb -#define gen_smul_dual gen_smul_dual_armeb -#define gen_srs gen_srs_armeb -#define gen_ss_advance gen_ss_advance_armeb -#define gen_step_complete_exception gen_step_complete_exception_armeb -#define gen_store_exclusive gen_store_exclusive_armeb -#define gen_storeq_reg gen_storeq_reg_armeb -#define gen_sub_carry gen_sub_carry_armeb -#define gen_sub_CC gen_sub_CC_armeb -#define gen_subq_msw gen_subq_msw_armeb -#define gen_swap_half gen_swap_half_armeb -#define gen_thumb2_data_op gen_thumb2_data_op_armeb -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_armeb -#define gen_ubfx gen_ubfx_armeb -#define gen_vfp_abs gen_vfp_abs_armeb -#define gen_vfp_add gen_vfp_add_armeb -#define gen_vfp_cmp gen_vfp_cmp_armeb -#define gen_vfp_cmpe gen_vfp_cmpe_armeb -#define gen_vfp_div gen_vfp_div_armeb -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_armeb -#define gen_vfp_F1_mul gen_vfp_F1_mul_armeb -#define gen_vfp_F1_neg gen_vfp_F1_neg_armeb -#define gen_vfp_ld gen_vfp_ld_armeb -#define gen_vfp_mrs gen_vfp_mrs_armeb -#define gen_vfp_msr gen_vfp_msr_armeb -#define gen_vfp_mul gen_vfp_mul_armeb -#define gen_vfp_neg gen_vfp_neg_armeb -#define gen_vfp_shto gen_vfp_shto_armeb -#define gen_vfp_sito gen_vfp_sito_armeb -#define gen_vfp_slto gen_vfp_slto_armeb -#define gen_vfp_sqrt gen_vfp_sqrt_armeb -#define gen_vfp_st gen_vfp_st_armeb -#define gen_vfp_sub gen_vfp_sub_armeb -#define gen_vfp_tosh gen_vfp_tosh_armeb -#define gen_vfp_tosi gen_vfp_tosi_armeb -#define gen_vfp_tosiz gen_vfp_tosiz_armeb -#define gen_vfp_tosl gen_vfp_tosl_armeb -#define gen_vfp_touh gen_vfp_touh_armeb -#define gen_vfp_toui gen_vfp_toui_armeb -#define gen_vfp_touiz gen_vfp_touiz_armeb -#define gen_vfp_toul gen_vfp_toul_armeb -#define gen_vfp_uhto gen_vfp_uhto_armeb -#define gen_vfp_uito gen_vfp_uito_armeb -#define gen_vfp_ulto gen_vfp_ulto_armeb -#define get_arm_cp_reginfo get_arm_cp_reginfo_armeb -#define get_clock get_clock_armeb -#define get_clock_realtime get_clock_realtime_armeb -#define get_constraint_priority get_constraint_priority_armeb -#define get_float_exception_flags get_float_exception_flags_armeb -#define get_float_rounding_mode get_float_rounding_mode_armeb -#define get_fpstatus_ptr get_fpstatus_ptr_armeb -#define get_level1_table_address get_level1_table_address_armeb -#define get_mem_index get_mem_index_armeb -#define get_next_param_value get_next_param_value_armeb -#define get_opt_name get_opt_name_armeb -#define get_opt_value get_opt_value_armeb -#define get_page_addr_code get_page_addr_code_armeb -#define get_param_value get_param_value_armeb -#define get_phys_addr get_phys_addr_armeb -#define get_phys_addr_lpae get_phys_addr_lpae_armeb -#define get_phys_addr_mpu get_phys_addr_mpu_armeb -#define get_phys_addr_v5 get_phys_addr_v5_armeb -#define get_phys_addr_v6 get_phys_addr_v6_armeb -#define get_system_memory get_system_memory_armeb -#define get_ticks_per_sec get_ticks_per_sec_armeb -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_armeb -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__armeb -#define gt_cntfrq_access gt_cntfrq_access_armeb -#define gt_cnt_read gt_cnt_read_armeb -#define gt_cnt_reset gt_cnt_reset_armeb -#define gt_counter_access gt_counter_access_armeb -#define gt_ctl_write gt_ctl_write_armeb -#define gt_cval_write gt_cval_write_armeb -#define gt_get_countervalue gt_get_countervalue_armeb -#define gt_pct_access gt_pct_access_armeb -#define gt_ptimer_access gt_ptimer_access_armeb -#define gt_recalc_timer gt_recalc_timer_armeb -#define gt_timer_access gt_timer_access_armeb -#define gt_tval_read gt_tval_read_armeb -#define gt_tval_write gt_tval_write_armeb -#define gt_vct_access gt_vct_access_armeb -#define gt_vtimer_access gt_vtimer_access_armeb -#define guest_phys_blocks_free guest_phys_blocks_free_armeb -#define guest_phys_blocks_init guest_phys_blocks_init_armeb -#define handle_vcvt handle_vcvt_armeb -#define handle_vminmaxnm handle_vminmaxnm_armeb -#define handle_vrint handle_vrint_armeb -#define handle_vsel handle_vsel_armeb -#define has_help_option has_help_option_armeb -#define have_bmi1 have_bmi1_armeb -#define have_bmi2 have_bmi2_armeb -#define hcr_write hcr_write_armeb -#define helper_access_check_cp_reg helper_access_check_cp_reg_armeb -#define helper_add_saturate helper_add_saturate_armeb -#define helper_add_setq helper_add_setq_armeb -#define helper_add_usaturate helper_add_usaturate_armeb -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_armeb -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_armeb -#define helper_be_ldq_mmu helper_be_ldq_mmu_armeb -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_armeb -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_armeb -#define helper_be_ldul_mmu helper_be_ldul_mmu_armeb -#define helper_be_lduw_mmu helper_be_lduw_mmu_armeb -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_armeb -#define helper_be_stl_mmu helper_be_stl_mmu_armeb -#define helper_be_stq_mmu helper_be_stq_mmu_armeb -#define helper_be_stw_mmu helper_be_stw_mmu_armeb -#define helper_clear_pstate_ss helper_clear_pstate_ss_armeb -#define helper_clz_arm helper_clz_arm_armeb -#define helper_cpsr_read helper_cpsr_read_armeb -#define helper_cpsr_write helper_cpsr_write_armeb -#define helper_crc32_arm helper_crc32_arm_armeb -#define helper_crc32c helper_crc32c_armeb +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_armeb +#define arm_cpu_update_virq arm_cpu_update_virq_armeb +#define arm_cpu_update_vfiq arm_cpu_update_vfiq_armeb +#define arm_cpu_initfn arm_cpu_initfn_armeb +#define gt_cntfrq_period_ns gt_cntfrq_period_ns_armeb +#define arm_cpu_post_init arm_cpu_post_init_armeb +#define arm_cpu_realizefn arm_cpu_realizefn_armeb +#define a15_l2ctlr_read a15_l2ctlr_read_armeb +#define arm_cpu_class_init arm_cpu_class_init_armeb +#define cpu_arm_init cpu_arm_init_armeb #define helper_crypto_aese helper_crypto_aese_armeb #define helper_crypto_aesmc helper_crypto_aesmc_armeb #define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_armeb @@ -1371,1651 +1292,693 @@ #define helper_crypto_sha256h2 helper_crypto_sha256h2_armeb #define helper_crypto_sha256su0 helper_crypto_sha256su0_armeb #define helper_crypto_sha256su1 helper_crypto_sha256su1_armeb -#define helper_dc_zva helper_dc_zva_armeb -#define helper_double_saturate helper_double_saturate_armeb -#define helper_exception_internal helper_exception_internal_armeb -#define helper_exception_return helper_exception_return_armeb -#define helper_exception_with_syndrome helper_exception_with_syndrome_armeb -#define helper_get_cp_reg helper_get_cp_reg_armeb -#define helper_get_cp_reg64 helper_get_cp_reg64_armeb -#define helper_get_r13_banked helper_get_r13_banked_armeb -#define helper_get_user_reg helper_get_user_reg_armeb -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_armeb -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_armeb -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_armeb +#define helper_crypto_sha512h helper_crypto_sha512h_armeb +#define helper_crypto_sha512h2 helper_crypto_sha512h2_armeb +#define helper_crypto_sha512su0 helper_crypto_sha512su0_armeb +#define helper_crypto_sha512su1 helper_crypto_sha512su1_armeb +#define helper_crypto_sm3partw1 helper_crypto_sm3partw1_armeb +#define helper_crypto_sm3partw2 helper_crypto_sm3partw2_armeb +#define helper_crypto_sm3tt helper_crypto_sm3tt_armeb +#define helper_crypto_sm4e helper_crypto_sm4e_armeb +#define helper_crypto_sm4ekey helper_crypto_sm4ekey_armeb +#define helper_check_breakpoints helper_check_breakpoints_armeb +#define arm_debug_check_watchpoint arm_debug_check_watchpoint_armeb +#define arm_debug_excp_handler arm_debug_excp_handler_armeb +#define arm_adjust_watchpoint_address arm_adjust_watchpoint_address_armeb +#define read_raw_cp_reg read_raw_cp_reg_armeb +#define pmu_init pmu_init_armeb +#define pmu_op_start pmu_op_start_armeb +#define pmu_op_finish pmu_op_finish_armeb +#define pmu_pre_el_change pmu_pre_el_change_armeb +#define pmu_post_el_change pmu_post_el_change_armeb +#define arm_pmu_timer_cb arm_pmu_timer_cb_armeb +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_armeb +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_armeb +#define arm_gt_htimer_cb arm_gt_htimer_cb_armeb +#define arm_gt_stimer_cb arm_gt_stimer_cb_armeb +#define arm_gt_hvtimer_cb arm_gt_hvtimer_cb_armeb +#define arm_hcr_el2_eff arm_hcr_el2_eff_armeb +#define sve_exception_el sve_exception_el_armeb +#define sve_zcr_len_for_el sve_zcr_len_for_el_armeb +#define hw_watchpoint_update hw_watchpoint_update_armeb +#define hw_watchpoint_update_all hw_watchpoint_update_all_armeb +#define hw_breakpoint_update hw_breakpoint_update_armeb +#define hw_breakpoint_update_all hw_breakpoint_update_all_armeb +#define register_cp_regs_for_features register_cp_regs_for_features_armeb +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_armeb +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_armeb +#define modify_arm_cp_regs modify_arm_cp_regs_armeb +#define get_arm_cp_reginfo get_arm_cp_reginfo_armeb +#define arm_cp_write_ignore arm_cp_write_ignore_armeb +#define arm_cp_read_zero arm_cp_read_zero_armeb +#define arm_cp_reset_ignore arm_cp_reset_ignore_armeb +#define cpsr_read cpsr_read_armeb +#define cpsr_write cpsr_write_armeb +#define helper_sxtb16 helper_sxtb16_armeb +#define helper_uxtb16 helper_uxtb16_armeb +#define helper_sdiv helper_sdiv_armeb +#define helper_udiv helper_udiv_armeb +#define helper_rbit helper_rbit_armeb +#define arm_phys_excp_target_el arm_phys_excp_target_el_armeb +#define aarch64_sync_32_to_64 aarch64_sync_32_to_64_armeb +#define aarch64_sync_64_to_32 aarch64_sync_64_to_32_armeb +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_armeb +#define arm_sctlr arm_sctlr_armeb +#define arm_s1_regime_using_lpae_format arm_s1_regime_using_lpae_format_armeb +#define aa64_va_parameters aa64_va_parameters_armeb +#define v8m_security_lookup v8m_security_lookup_armeb +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_armeb +#define get_phys_addr get_phys_addr_armeb +#define arm_cpu_get_phys_page_attrs_debug arm_cpu_get_phys_page_attrs_debug_armeb +#define helper_qadd16 helper_qadd16_armeb +#define helper_qadd8 helper_qadd8_armeb +#define helper_qsub16 helper_qsub16_armeb +#define helper_qsub8 helper_qsub8_armeb +#define helper_qsubaddx helper_qsubaddx_armeb +#define helper_qaddsubx helper_qaddsubx_armeb +#define helper_uqadd16 helper_uqadd16_armeb +#define helper_uqadd8 helper_uqadd8_armeb +#define helper_uqsub16 helper_uqsub16_armeb +#define helper_uqsub8 helper_uqsub8_armeb +#define helper_uqsubaddx helper_uqsubaddx_armeb +#define helper_uqaddsubx helper_uqaddsubx_armeb +#define helper_sadd16 helper_sadd16_armeb +#define helper_sadd8 helper_sadd8_armeb +#define helper_ssub16 helper_ssub16_armeb +#define helper_ssub8 helper_ssub8_armeb +#define helper_ssubaddx helper_ssubaddx_armeb +#define helper_saddsubx helper_saddsubx_armeb +#define helper_uadd16 helper_uadd16_armeb +#define helper_uadd8 helper_uadd8_armeb +#define helper_usub16 helper_usub16_armeb +#define helper_usub8 helper_usub8_armeb +#define helper_usubaddx helper_usubaddx_armeb +#define helper_uaddsubx helper_uaddsubx_armeb +#define helper_shadd16 helper_shadd16_armeb +#define helper_shadd8 helper_shadd8_armeb +#define helper_shsub16 helper_shsub16_armeb +#define helper_shsub8 helper_shsub8_armeb +#define helper_shsubaddx helper_shsubaddx_armeb +#define helper_shaddsubx helper_shaddsubx_armeb +#define helper_uhadd16 helper_uhadd16_armeb +#define helper_uhadd8 helper_uhadd8_armeb +#define helper_uhsub16 helper_uhsub16_armeb +#define helper_uhsub8 helper_uhsub8_armeb +#define helper_uhsubaddx helper_uhsubaddx_armeb +#define helper_uhaddsubx helper_uhaddsubx_armeb +#define helper_usad8 helper_usad8_armeb +#define helper_sel_flags helper_sel_flags_armeb +#define helper_crc32 helper_crc32_armeb +#define helper_crc32c helper_crc32c_armeb +#define fp_exception_el fp_exception_el_armeb +#define arm_mmu_idx_to_el arm_mmu_idx_to_el_armeb +#define arm_mmu_idx_el arm_mmu_idx_el_armeb +#define arm_mmu_idx arm_mmu_idx_armeb +#define arm_stage1_mmu_idx arm_stage1_mmu_idx_armeb +#define arm_rebuild_hflags arm_rebuild_hflags_armeb +#define helper_rebuild_hflags_m32_newel helper_rebuild_hflags_m32_newel_armeb +#define helper_rebuild_hflags_m32 helper_rebuild_hflags_m32_armeb +#define helper_rebuild_hflags_a32_newel helper_rebuild_hflags_a32_newel_armeb +#define helper_rebuild_hflags_a32 helper_rebuild_hflags_a32_armeb +#define helper_rebuild_hflags_a64 helper_rebuild_hflags_a64_armeb +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_armeb +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_armeb +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_armeb +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_armeb +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_armeb +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_armeb +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_armeb +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_armeb +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_armeb +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_armeb +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_armeb +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_armeb +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_armeb +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_armeb +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_armeb +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_armeb +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_armeb +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_armeb +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_armeb +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_armeb +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_armeb +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_armeb +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_armeb +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_armeb +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_armeb +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_armeb +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_armeb +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_armeb +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_armeb +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_armeb +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_armeb +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_armeb +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_armeb +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_armeb +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_armeb +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_armeb +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_armeb +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_armeb +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_armeb +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_armeb +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_armeb +#define helper_iwmmxt_minub helper_iwmmxt_minub_armeb +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_armeb +#define helper_iwmmxt_minul helper_iwmmxt_minul_armeb +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_armeb +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_armeb +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_armeb +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_armeb +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_armeb +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_armeb +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_armeb +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_armeb +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_armeb #define helper_iwmmxt_addnb helper_iwmmxt_addnb_armeb -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_armeb #define helper_iwmmxt_addnw helper_iwmmxt_addnw_armeb -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_armeb -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_armeb -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_armeb +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_armeb +#define helper_iwmmxt_subub helper_iwmmxt_subub_armeb +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_armeb +#define helper_iwmmxt_subul helper_iwmmxt_subul_armeb #define helper_iwmmxt_addub helper_iwmmxt_addub_armeb -#define helper_iwmmxt_addul helper_iwmmxt_addul_armeb #define helper_iwmmxt_adduw helper_iwmmxt_adduw_armeb -#define helper_iwmmxt_align helper_iwmmxt_align_armeb +#define helper_iwmmxt_addul helper_iwmmxt_addul_armeb +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_armeb +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_armeb +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_armeb +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_armeb +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_armeb +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_armeb #define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_armeb #define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_armeb #define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_armeb #define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_armeb -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_armeb -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_armeb -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_armeb -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_armeb -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_armeb -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_armeb -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_armeb -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_armeb -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_armeb -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_armeb -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_armeb -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_armeb +#define helper_iwmmxt_align helper_iwmmxt_align_armeb #define helper_iwmmxt_insr helper_iwmmxt_insr_armeb -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_armeb -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_armeb -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_armeb -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_armeb -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_armeb -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_armeb -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_armeb -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_armeb -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_armeb -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_armeb -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_armeb -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_armeb -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_armeb -#define helper_iwmmxt_minub helper_iwmmxt_minub_armeb -#define helper_iwmmxt_minul helper_iwmmxt_minul_armeb -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_armeb +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_armeb +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_armeb +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_armeb +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_armeb +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_armeb +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_armeb +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_armeb #define helper_iwmmxt_msbb helper_iwmmxt_msbb_armeb -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_armeb #define helper_iwmmxt_msbw helper_iwmmxt_msbw_armeb +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_armeb +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_armeb +#define helper_iwmmxt_srll helper_iwmmxt_srll_armeb +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_armeb +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_armeb +#define helper_iwmmxt_slll helper_iwmmxt_slll_armeb +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_armeb +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_armeb +#define helper_iwmmxt_sral helper_iwmmxt_sral_armeb +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_armeb +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_armeb +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_armeb +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_armeb +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_armeb +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_armeb +#define helper_iwmmxt_packul helper_iwmmxt_packul_armeb +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_armeb +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_armeb +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_armeb +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_armeb #define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_armeb #define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_armeb #define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_armeb -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_armeb -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_armeb -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_armeb -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_armeb -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_armeb -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_armeb -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_armeb -#define helper_iwmmxt_packul helper_iwmmxt_packul_armeb -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_armeb -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_armeb -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_armeb -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_armeb -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_armeb -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_armeb -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_armeb -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_armeb -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_armeb -#define helper_iwmmxt_slll helper_iwmmxt_slll_armeb -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_armeb -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_armeb -#define helper_iwmmxt_sral helper_iwmmxt_sral_armeb -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_armeb -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_armeb -#define helper_iwmmxt_srll helper_iwmmxt_srll_armeb -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_armeb -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_armeb -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_armeb -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_armeb -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_armeb -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_armeb -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_armeb -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_armeb -#define helper_iwmmxt_subub helper_iwmmxt_subub_armeb -#define helper_iwmmxt_subul helper_iwmmxt_subul_armeb -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_armeb -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_armeb -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_armeb -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_armeb -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_armeb -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_armeb -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_armeb -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_armeb -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_armeb -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_armeb -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_armeb -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_armeb -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_armeb -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_armeb -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_armeb -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_armeb -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_armeb -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_armeb -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_armeb -#define helper_ldb_cmmu helper_ldb_cmmu_armeb -#define helper_ldb_mmu helper_ldb_mmu_armeb -#define helper_ldl_cmmu helper_ldl_cmmu_armeb -#define helper_ldl_mmu helper_ldl_mmu_armeb -#define helper_ldq_cmmu helper_ldq_cmmu_armeb -#define helper_ldq_mmu helper_ldq_mmu_armeb -#define helper_ldw_cmmu helper_ldw_cmmu_armeb -#define helper_ldw_mmu helper_ldw_mmu_armeb -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_armeb -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_armeb -#define helper_le_ldq_mmu helper_le_ldq_mmu_armeb -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_armeb -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_armeb -#define helper_le_ldul_mmu helper_le_ldul_mmu_armeb -#define helper_le_lduw_mmu helper_le_lduw_mmu_armeb -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_armeb -#define helper_le_stl_mmu helper_le_stl_mmu_armeb -#define helper_le_stq_mmu helper_le_stq_mmu_armeb -#define helper_le_stw_mmu helper_le_stw_mmu_armeb -#define helper_msr_i_pstate helper_msr_i_pstate_armeb -#define helper_neon_abd_f32 helper_neon_abd_f32_armeb -#define helper_neon_abdl_s16 helper_neon_abdl_s16_armeb -#define helper_neon_abdl_s32 helper_neon_abdl_s32_armeb -#define helper_neon_abdl_s64 helper_neon_abdl_s64_armeb -#define helper_neon_abdl_u16 helper_neon_abdl_u16_armeb -#define helper_neon_abdl_u32 helper_neon_abdl_u32_armeb -#define helper_neon_abdl_u64 helper_neon_abdl_u64_armeb -#define helper_neon_abd_s16 helper_neon_abd_s16_armeb -#define helper_neon_abd_s32 helper_neon_abd_s32_armeb -#define helper_neon_abd_s8 helper_neon_abd_s8_armeb -#define helper_neon_abd_u16 helper_neon_abd_u16_armeb -#define helper_neon_abd_u32 helper_neon_abd_u32_armeb -#define helper_neon_abd_u8 helper_neon_abd_u8_armeb -#define helper_neon_abs_s16 helper_neon_abs_s16_armeb -#define helper_neon_abs_s8 helper_neon_abs_s8_armeb -#define helper_neon_acge_f32 helper_neon_acge_f32_armeb -#define helper_neon_acge_f64 helper_neon_acge_f64_armeb -#define helper_neon_acgt_f32 helper_neon_acgt_f32_armeb -#define helper_neon_acgt_f64 helper_neon_acgt_f64_armeb -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_armeb -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_armeb -#define helper_neon_addl_u16 helper_neon_addl_u16_armeb -#define helper_neon_addl_u32 helper_neon_addl_u32_armeb -#define helper_neon_add_u16 helper_neon_add_u16_armeb -#define helper_neon_add_u8 helper_neon_add_u8_armeb -#define helper_neon_ceq_f32 helper_neon_ceq_f32_armeb -#define helper_neon_ceq_u16 helper_neon_ceq_u16_armeb -#define helper_neon_ceq_u32 helper_neon_ceq_u32_armeb -#define helper_neon_ceq_u8 helper_neon_ceq_u8_armeb -#define helper_neon_cge_f32 helper_neon_cge_f32_armeb -#define helper_neon_cge_s16 helper_neon_cge_s16_armeb -#define helper_neon_cge_s32 helper_neon_cge_s32_armeb -#define helper_neon_cge_s8 helper_neon_cge_s8_armeb -#define helper_neon_cge_u16 helper_neon_cge_u16_armeb -#define helper_neon_cge_u32 helper_neon_cge_u32_armeb -#define helper_neon_cge_u8 helper_neon_cge_u8_armeb -#define helper_neon_cgt_f32 helper_neon_cgt_f32_armeb -#define helper_neon_cgt_s16 helper_neon_cgt_s16_armeb -#define helper_neon_cgt_s32 helper_neon_cgt_s32_armeb -#define helper_neon_cgt_s8 helper_neon_cgt_s8_armeb -#define helper_neon_cgt_u16 helper_neon_cgt_u16_armeb -#define helper_neon_cgt_u32 helper_neon_cgt_u32_armeb -#define helper_neon_cgt_u8 helper_neon_cgt_u8_armeb -#define helper_neon_cls_s16 helper_neon_cls_s16_armeb -#define helper_neon_cls_s32 helper_neon_cls_s32_armeb -#define helper_neon_cls_s8 helper_neon_cls_s8_armeb -#define helper_neon_clz_u16 helper_neon_clz_u16_armeb -#define helper_neon_clz_u8 helper_neon_clz_u8_armeb -#define helper_neon_cnt_u8 helper_neon_cnt_u8_armeb -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_armeb -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_armeb -#define helper_neon_hadd_s16 helper_neon_hadd_s16_armeb -#define helper_neon_hadd_s32 helper_neon_hadd_s32_armeb -#define helper_neon_hadd_s8 helper_neon_hadd_s8_armeb -#define helper_neon_hadd_u16 helper_neon_hadd_u16_armeb -#define helper_neon_hadd_u32 helper_neon_hadd_u32_armeb -#define helper_neon_hadd_u8 helper_neon_hadd_u8_armeb -#define helper_neon_hsub_s16 helper_neon_hsub_s16_armeb -#define helper_neon_hsub_s32 helper_neon_hsub_s32_armeb -#define helper_neon_hsub_s8 helper_neon_hsub_s8_armeb -#define helper_neon_hsub_u16 helper_neon_hsub_u16_armeb -#define helper_neon_hsub_u32 helper_neon_hsub_u32_armeb -#define helper_neon_hsub_u8 helper_neon_hsub_u8_armeb -#define helper_neon_max_s16 helper_neon_max_s16_armeb -#define helper_neon_max_s32 helper_neon_max_s32_armeb -#define helper_neon_max_s8 helper_neon_max_s8_armeb -#define helper_neon_max_u16 helper_neon_max_u16_armeb -#define helper_neon_max_u32 helper_neon_max_u32_armeb -#define helper_neon_max_u8 helper_neon_max_u8_armeb -#define helper_neon_min_s16 helper_neon_min_s16_armeb -#define helper_neon_min_s32 helper_neon_min_s32_armeb -#define helper_neon_min_s8 helper_neon_min_s8_armeb -#define helper_neon_min_u16 helper_neon_min_u16_armeb -#define helper_neon_min_u32 helper_neon_min_u32_armeb -#define helper_neon_min_u8 helper_neon_min_u8_armeb -#define helper_neon_mull_p8 helper_neon_mull_p8_armeb -#define helper_neon_mull_s16 helper_neon_mull_s16_armeb -#define helper_neon_mull_s8 helper_neon_mull_s8_armeb -#define helper_neon_mull_u16 helper_neon_mull_u16_armeb -#define helper_neon_mull_u8 helper_neon_mull_u8_armeb -#define helper_neon_mul_p8 helper_neon_mul_p8_armeb -#define helper_neon_mul_u16 helper_neon_mul_u16_armeb -#define helper_neon_mul_u8 helper_neon_mul_u8_armeb -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_armeb -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_armeb -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_armeb -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_armeb -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_armeb -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_armeb -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_armeb -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_armeb -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_armeb -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_armeb -#define helper_neon_narrow_u16 helper_neon_narrow_u16_armeb -#define helper_neon_narrow_u8 helper_neon_narrow_u8_armeb -#define helper_neon_negl_u16 helper_neon_negl_u16_armeb -#define helper_neon_negl_u32 helper_neon_negl_u32_armeb -#define helper_neon_paddl_u16 helper_neon_paddl_u16_armeb -#define helper_neon_paddl_u32 helper_neon_paddl_u32_armeb -#define helper_neon_padd_u16 helper_neon_padd_u16_armeb -#define helper_neon_padd_u8 helper_neon_padd_u8_armeb -#define helper_neon_pmax_s16 helper_neon_pmax_s16_armeb -#define helper_neon_pmax_s8 helper_neon_pmax_s8_armeb -#define helper_neon_pmax_u16 helper_neon_pmax_u16_armeb -#define helper_neon_pmax_u8 helper_neon_pmax_u8_armeb -#define helper_neon_pmin_s16 helper_neon_pmin_s16_armeb -#define helper_neon_pmin_s8 helper_neon_pmin_s8_armeb -#define helper_neon_pmin_u16 helper_neon_pmin_u16_armeb -#define helper_neon_pmin_u8 helper_neon_pmin_u8_armeb -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_armeb -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_armeb -#define helper_neon_qabs_s16 helper_neon_qabs_s16_armeb -#define helper_neon_qabs_s32 helper_neon_qabs_s32_armeb -#define helper_neon_qabs_s64 helper_neon_qabs_s64_armeb -#define helper_neon_qabs_s8 helper_neon_qabs_s8_armeb -#define helper_neon_qadd_s16 helper_neon_qadd_s16_armeb -#define helper_neon_qadd_s32 helper_neon_qadd_s32_armeb -#define helper_neon_qadd_s64 helper_neon_qadd_s64_armeb -#define helper_neon_qadd_s8 helper_neon_qadd_s8_armeb +#define armv7m_nvic_set_pending armv7m_nvic_set_pending_armeb +#define helper_v7m_preserve_fp_state helper_v7m_preserve_fp_state_armeb +#define write_v7m_exception write_v7m_exception_armeb +#define helper_v7m_bxns helper_v7m_bxns_armeb +#define helper_v7m_blxns helper_v7m_blxns_armeb +#define armv7m_nvic_neg_prio_requested armv7m_nvic_neg_prio_requested_armeb +#define helper_v7m_vlstm helper_v7m_vlstm_armeb +#define helper_v7m_vlldm helper_v7m_vlldm_armeb +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_armeb +#define helper_v7m_mrs helper_v7m_mrs_armeb +#define helper_v7m_msr helper_v7m_msr_armeb +#define helper_v7m_tt helper_v7m_tt_armeb +#define arm_v7m_mmu_idx_all arm_v7m_mmu_idx_all_armeb +#define arm_v7m_mmu_idx_for_secstate_and_priv arm_v7m_mmu_idx_for_secstate_and_priv_armeb +#define arm_v7m_mmu_idx_for_secstate arm_v7m_mmu_idx_for_secstate_armeb +#define helper_neon_qadd_u8 helper_neon_qadd_u8_armeb #define helper_neon_qadd_u16 helper_neon_qadd_u16_armeb #define helper_neon_qadd_u32 helper_neon_qadd_u32_armeb #define helper_neon_qadd_u64 helper_neon_qadd_u64_armeb -#define helper_neon_qadd_u8 helper_neon_qadd_u8_armeb -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_armeb -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_armeb -#define helper_neon_qneg_s16 helper_neon_qneg_s16_armeb -#define helper_neon_qneg_s32 helper_neon_qneg_s32_armeb -#define helper_neon_qneg_s64 helper_neon_qneg_s64_armeb -#define helper_neon_qneg_s8 helper_neon_qneg_s8_armeb -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_armeb -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_armeb -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_armeb -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_armeb -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_armeb -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_armeb -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_armeb -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_armeb -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_armeb -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_armeb -#define helper_neon_qshl_s16 helper_neon_qshl_s16_armeb -#define helper_neon_qshl_s32 helper_neon_qshl_s32_armeb -#define helper_neon_qshl_s64 helper_neon_qshl_s64_armeb -#define helper_neon_qshl_s8 helper_neon_qshl_s8_armeb -#define helper_neon_qshl_u16 helper_neon_qshl_u16_armeb -#define helper_neon_qshl_u32 helper_neon_qshl_u32_armeb -#define helper_neon_qshl_u64 helper_neon_qshl_u64_armeb -#define helper_neon_qshl_u8 helper_neon_qshl_u8_armeb -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_armeb -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_armeb -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_armeb -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_armeb -#define helper_neon_qsub_s16 helper_neon_qsub_s16_armeb -#define helper_neon_qsub_s32 helper_neon_qsub_s32_armeb -#define helper_neon_qsub_s64 helper_neon_qsub_s64_armeb -#define helper_neon_qsub_s8 helper_neon_qsub_s8_armeb -#define helper_neon_qsub_u16 helper_neon_qsub_u16_armeb -#define helper_neon_qsub_u32 helper_neon_qsub_u32_armeb -#define helper_neon_qsub_u64 helper_neon_qsub_u64_armeb -#define helper_neon_qsub_u8 helper_neon_qsub_u8_armeb -#define helper_neon_qunzip16 helper_neon_qunzip16_armeb -#define helper_neon_qunzip32 helper_neon_qunzip32_armeb -#define helper_neon_qunzip8 helper_neon_qunzip8_armeb -#define helper_neon_qzip16 helper_neon_qzip16_armeb -#define helper_neon_qzip32 helper_neon_qzip32_armeb -#define helper_neon_qzip8 helper_neon_qzip8_armeb -#define helper_neon_rbit_u8 helper_neon_rbit_u8_armeb -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_armeb -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_armeb -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_armeb -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_armeb -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_armeb -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_armeb -#define helper_neon_rshl_s16 helper_neon_rshl_s16_armeb -#define helper_neon_rshl_s32 helper_neon_rshl_s32_armeb -#define helper_neon_rshl_s64 helper_neon_rshl_s64_armeb -#define helper_neon_rshl_s8 helper_neon_rshl_s8_armeb -#define helper_neon_rshl_u16 helper_neon_rshl_u16_armeb -#define helper_neon_rshl_u32 helper_neon_rshl_u32_armeb -#define helper_neon_rshl_u64 helper_neon_rshl_u64_armeb -#define helper_neon_rshl_u8 helper_neon_rshl_u8_armeb -#define helper_neon_shl_s16 helper_neon_shl_s16_armeb -#define helper_neon_shl_s32 helper_neon_shl_s32_armeb -#define helper_neon_shl_s64 helper_neon_shl_s64_armeb -#define helper_neon_shl_s8 helper_neon_shl_s8_armeb -#define helper_neon_shl_u16 helper_neon_shl_u16_armeb -#define helper_neon_shl_u32 helper_neon_shl_u32_armeb -#define helper_neon_shl_u64 helper_neon_shl_u64_armeb -#define helper_neon_shl_u8 helper_neon_shl_u8_armeb -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_armeb -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_armeb -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_armeb -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_armeb -#define helper_neon_subl_u16 helper_neon_subl_u16_armeb -#define helper_neon_subl_u32 helper_neon_subl_u32_armeb -#define helper_neon_sub_u16 helper_neon_sub_u16_armeb -#define helper_neon_sub_u8 helper_neon_sub_u8_armeb -#define helper_neon_tbl helper_neon_tbl_armeb -#define helper_neon_tst_u16 helper_neon_tst_u16_armeb -#define helper_neon_tst_u32 helper_neon_tst_u32_armeb -#define helper_neon_tst_u8 helper_neon_tst_u8_armeb -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_armeb -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_armeb -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_armeb -#define helper_neon_unzip16 helper_neon_unzip16_armeb -#define helper_neon_unzip8 helper_neon_unzip8_armeb +#define helper_neon_qadd_s8 helper_neon_qadd_s8_armeb +#define helper_neon_qadd_s16 helper_neon_qadd_s16_armeb +#define helper_neon_qadd_s32 helper_neon_qadd_s32_armeb +#define helper_neon_qadd_s64 helper_neon_qadd_s64_armeb +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_armeb #define helper_neon_uqadd_s16 helper_neon_uqadd_s16_armeb #define helper_neon_uqadd_s32 helper_neon_uqadd_s32_armeb #define helper_neon_uqadd_s64 helper_neon_uqadd_s64_armeb -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_armeb -#define helper_neon_widen_s16 helper_neon_widen_s16_armeb +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_armeb +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_armeb +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_armeb +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_armeb +#define helper_neon_qsub_u8 helper_neon_qsub_u8_armeb +#define helper_neon_qsub_u16 helper_neon_qsub_u16_armeb +#define helper_neon_qsub_u32 helper_neon_qsub_u32_armeb +#define helper_neon_qsub_u64 helper_neon_qsub_u64_armeb +#define helper_neon_qsub_s8 helper_neon_qsub_s8_armeb +#define helper_neon_qsub_s16 helper_neon_qsub_s16_armeb +#define helper_neon_qsub_s32 helper_neon_qsub_s32_armeb +#define helper_neon_qsub_s64 helper_neon_qsub_s64_armeb +#define helper_neon_hadd_s8 helper_neon_hadd_s8_armeb +#define helper_neon_hadd_u8 helper_neon_hadd_u8_armeb +#define helper_neon_hadd_s16 helper_neon_hadd_s16_armeb +#define helper_neon_hadd_u16 helper_neon_hadd_u16_armeb +#define helper_neon_hadd_s32 helper_neon_hadd_s32_armeb +#define helper_neon_hadd_u32 helper_neon_hadd_u32_armeb +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_armeb +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_armeb +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_armeb +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_armeb +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_armeb +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_armeb +#define helper_neon_hsub_s8 helper_neon_hsub_s8_armeb +#define helper_neon_hsub_u8 helper_neon_hsub_u8_armeb +#define helper_neon_hsub_s16 helper_neon_hsub_s16_armeb +#define helper_neon_hsub_u16 helper_neon_hsub_u16_armeb +#define helper_neon_hsub_s32 helper_neon_hsub_s32_armeb +#define helper_neon_hsub_u32 helper_neon_hsub_u32_armeb +#define helper_neon_cgt_s8 helper_neon_cgt_s8_armeb +#define helper_neon_cgt_u8 helper_neon_cgt_u8_armeb +#define helper_neon_cgt_s16 helper_neon_cgt_s16_armeb +#define helper_neon_cgt_u16 helper_neon_cgt_u16_armeb +#define helper_neon_cgt_s32 helper_neon_cgt_s32_armeb +#define helper_neon_cgt_u32 helper_neon_cgt_u32_armeb +#define helper_neon_cge_s8 helper_neon_cge_s8_armeb +#define helper_neon_cge_u8 helper_neon_cge_u8_armeb +#define helper_neon_cge_s16 helper_neon_cge_s16_armeb +#define helper_neon_cge_u16 helper_neon_cge_u16_armeb +#define helper_neon_cge_s32 helper_neon_cge_s32_armeb +#define helper_neon_cge_u32 helper_neon_cge_u32_armeb +#define helper_neon_pmin_s8 helper_neon_pmin_s8_armeb +#define helper_neon_pmin_u8 helper_neon_pmin_u8_armeb +#define helper_neon_pmin_s16 helper_neon_pmin_s16_armeb +#define helper_neon_pmin_u16 helper_neon_pmin_u16_armeb +#define helper_neon_pmax_s8 helper_neon_pmax_s8_armeb +#define helper_neon_pmax_u8 helper_neon_pmax_u8_armeb +#define helper_neon_pmax_s16 helper_neon_pmax_s16_armeb +#define helper_neon_pmax_u16 helper_neon_pmax_u16_armeb +#define helper_neon_abd_s8 helper_neon_abd_s8_armeb +#define helper_neon_abd_u8 helper_neon_abd_u8_armeb +#define helper_neon_abd_s16 helper_neon_abd_s16_armeb +#define helper_neon_abd_u16 helper_neon_abd_u16_armeb +#define helper_neon_abd_s32 helper_neon_abd_s32_armeb +#define helper_neon_abd_u32 helper_neon_abd_u32_armeb +#define helper_neon_shl_u16 helper_neon_shl_u16_armeb +#define helper_neon_shl_s16 helper_neon_shl_s16_armeb +#define helper_neon_rshl_s8 helper_neon_rshl_s8_armeb +#define helper_neon_rshl_s16 helper_neon_rshl_s16_armeb +#define helper_neon_rshl_s32 helper_neon_rshl_s32_armeb +#define helper_neon_rshl_s64 helper_neon_rshl_s64_armeb +#define helper_neon_rshl_u8 helper_neon_rshl_u8_armeb +#define helper_neon_rshl_u16 helper_neon_rshl_u16_armeb +#define helper_neon_rshl_u32 helper_neon_rshl_u32_armeb +#define helper_neon_rshl_u64 helper_neon_rshl_u64_armeb +#define helper_neon_qshl_u8 helper_neon_qshl_u8_armeb +#define helper_neon_qshl_u16 helper_neon_qshl_u16_armeb +#define helper_neon_qshl_u32 helper_neon_qshl_u32_armeb +#define helper_neon_qshl_u64 helper_neon_qshl_u64_armeb +#define helper_neon_qshl_s8 helper_neon_qshl_s8_armeb +#define helper_neon_qshl_s16 helper_neon_qshl_s16_armeb +#define helper_neon_qshl_s32 helper_neon_qshl_s32_armeb +#define helper_neon_qshl_s64 helper_neon_qshl_s64_armeb +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_armeb +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_armeb +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_armeb +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_armeb +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_armeb +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_armeb +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_armeb +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_armeb +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_armeb +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_armeb +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_armeb +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_armeb +#define helper_neon_add_u8 helper_neon_add_u8_armeb +#define helper_neon_add_u16 helper_neon_add_u16_armeb +#define helper_neon_padd_u8 helper_neon_padd_u8_armeb +#define helper_neon_padd_u16 helper_neon_padd_u16_armeb +#define helper_neon_sub_u8 helper_neon_sub_u8_armeb +#define helper_neon_sub_u16 helper_neon_sub_u16_armeb +#define helper_neon_mul_u8 helper_neon_mul_u8_armeb +#define helper_neon_mul_u16 helper_neon_mul_u16_armeb +#define helper_neon_tst_u8 helper_neon_tst_u8_armeb +#define helper_neon_tst_u16 helper_neon_tst_u16_armeb +#define helper_neon_tst_u32 helper_neon_tst_u32_armeb +#define helper_neon_ceq_u8 helper_neon_ceq_u8_armeb +#define helper_neon_ceq_u16 helper_neon_ceq_u16_armeb +#define helper_neon_ceq_u32 helper_neon_ceq_u32_armeb +#define helper_neon_clz_u8 helper_neon_clz_u8_armeb +#define helper_neon_clz_u16 helper_neon_clz_u16_armeb +#define helper_neon_cls_s8 helper_neon_cls_s8_armeb +#define helper_neon_cls_s16 helper_neon_cls_s16_armeb +#define helper_neon_cls_s32 helper_neon_cls_s32_armeb +#define helper_neon_cnt_u8 helper_neon_cnt_u8_armeb +#define helper_neon_rbit_u8 helper_neon_rbit_u8_armeb +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_armeb +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_armeb +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_armeb +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_armeb +#define helper_neon_narrow_u8 helper_neon_narrow_u8_armeb +#define helper_neon_narrow_u16 helper_neon_narrow_u16_armeb +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_armeb +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_armeb +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_armeb +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_armeb +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_armeb +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_armeb +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_armeb +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_armeb +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_armeb +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_armeb +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_armeb +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_armeb +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_armeb +#define helper_neon_widen_u8 helper_neon_widen_u8_armeb #define helper_neon_widen_s8 helper_neon_widen_s8_armeb #define helper_neon_widen_u16 helper_neon_widen_u16_armeb -#define helper_neon_widen_u8 helper_neon_widen_u8_armeb -#define helper_neon_zip16 helper_neon_zip16_armeb +#define helper_neon_widen_s16 helper_neon_widen_s16_armeb +#define helper_neon_addl_u16 helper_neon_addl_u16_armeb +#define helper_neon_addl_u32 helper_neon_addl_u32_armeb +#define helper_neon_paddl_u16 helper_neon_paddl_u16_armeb +#define helper_neon_paddl_u32 helper_neon_paddl_u32_armeb +#define helper_neon_subl_u16 helper_neon_subl_u16_armeb +#define helper_neon_subl_u32 helper_neon_subl_u32_armeb +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_armeb +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_armeb +#define helper_neon_abdl_u16 helper_neon_abdl_u16_armeb +#define helper_neon_abdl_s16 helper_neon_abdl_s16_armeb +#define helper_neon_abdl_u32 helper_neon_abdl_u32_armeb +#define helper_neon_abdl_s32 helper_neon_abdl_s32_armeb +#define helper_neon_abdl_u64 helper_neon_abdl_u64_armeb +#define helper_neon_abdl_s64 helper_neon_abdl_s64_armeb +#define helper_neon_mull_u8 helper_neon_mull_u8_armeb +#define helper_neon_mull_s8 helper_neon_mull_s8_armeb +#define helper_neon_mull_u16 helper_neon_mull_u16_armeb +#define helper_neon_mull_s16 helper_neon_mull_s16_armeb +#define helper_neon_negl_u16 helper_neon_negl_u16_armeb +#define helper_neon_negl_u32 helper_neon_negl_u32_armeb +#define helper_neon_qabs_s8 helper_neon_qabs_s8_armeb +#define helper_neon_qneg_s8 helper_neon_qneg_s8_armeb +#define helper_neon_qabs_s16 helper_neon_qabs_s16_armeb +#define helper_neon_qneg_s16 helper_neon_qneg_s16_armeb +#define helper_neon_qabs_s32 helper_neon_qabs_s32_armeb +#define helper_neon_qneg_s32 helper_neon_qneg_s32_armeb +#define helper_neon_qabs_s64 helper_neon_qabs_s64_armeb +#define helper_neon_qneg_s64 helper_neon_qneg_s64_armeb +#define helper_neon_abd_f32 helper_neon_abd_f32_armeb +#define helper_neon_ceq_f32 helper_neon_ceq_f32_armeb +#define helper_neon_cge_f32 helper_neon_cge_f32_armeb +#define helper_neon_cgt_f32 helper_neon_cgt_f32_armeb +#define helper_neon_acge_f32 helper_neon_acge_f32_armeb +#define helper_neon_acgt_f32 helper_neon_acgt_f32_armeb +#define helper_neon_acge_f64 helper_neon_acge_f64_armeb +#define helper_neon_acgt_f64 helper_neon_acgt_f64_armeb +#define helper_neon_qunzip8 helper_neon_qunzip8_armeb +#define helper_neon_qunzip16 helper_neon_qunzip16_armeb +#define helper_neon_qunzip32 helper_neon_qunzip32_armeb +#define helper_neon_unzip8 helper_neon_unzip8_armeb +#define helper_neon_unzip16 helper_neon_unzip16_armeb +#define helper_neon_qzip8 helper_neon_qzip8_armeb +#define helper_neon_qzip16 helper_neon_qzip16_armeb +#define helper_neon_qzip32 helper_neon_qzip32_armeb #define helper_neon_zip8 helper_neon_zip8_armeb -#define helper_pre_hvc helper_pre_hvc_armeb -#define helper_pre_smc helper_pre_smc_armeb -#define helper_qadd16 helper_qadd16_armeb -#define helper_qadd8 helper_qadd8_armeb -#define helper_qaddsubx helper_qaddsubx_armeb -#define helper_qsub16 helper_qsub16_armeb -#define helper_qsub8 helper_qsub8_armeb -#define helper_qsubaddx helper_qsubaddx_armeb -#define helper_rbit helper_rbit_armeb -#define helper_recpe_f32 helper_recpe_f32_armeb -#define helper_recpe_f64 helper_recpe_f64_armeb -#define helper_recpe_u32 helper_recpe_u32_armeb -#define helper_recps_f32 helper_recps_f32_armeb -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_armeb -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_armeb -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_armeb -#define helper_ret_stb_mmu helper_ret_stb_mmu_armeb -#define helper_rintd helper_rintd_armeb -#define helper_rintd_exact helper_rintd_exact_armeb -#define helper_rints helper_rints_armeb -#define helper_rints_exact helper_rints_exact_armeb -#define helper_ror_cc helper_ror_cc_armeb -#define helper_rsqrte_f32 helper_rsqrte_f32_armeb -#define helper_rsqrte_f64 helper_rsqrte_f64_armeb -#define helper_rsqrte_u32 helper_rsqrte_u32_armeb -#define helper_rsqrts_f32 helper_rsqrts_f32_armeb -#define helper_sadd16 helper_sadd16_armeb -#define helper_sadd8 helper_sadd8_armeb -#define helper_saddsubx helper_saddsubx_armeb -#define helper_sar_cc helper_sar_cc_armeb -#define helper_sdiv helper_sdiv_armeb -#define helper_sel_flags helper_sel_flags_armeb -#define helper_set_cp_reg helper_set_cp_reg_armeb -#define helper_set_cp_reg64 helper_set_cp_reg64_armeb -#define helper_set_neon_rmode helper_set_neon_rmode_armeb -#define helper_set_r13_banked helper_set_r13_banked_armeb -#define helper_set_rmode helper_set_rmode_armeb -#define helper_set_user_reg helper_set_user_reg_armeb -#define helper_shadd16 helper_shadd16_armeb -#define helper_shadd8 helper_shadd8_armeb -#define helper_shaddsubx helper_shaddsubx_armeb -#define helper_shl_cc helper_shl_cc_armeb -#define helper_shr_cc helper_shr_cc_armeb -#define helper_shsub16 helper_shsub16_armeb -#define helper_shsub8 helper_shsub8_armeb -#define helper_shsubaddx helper_shsubaddx_armeb +#define helper_neon_zip16 helper_neon_zip16_armeb +#define raise_exception raise_exception_armeb +#define raise_exception_ra raise_exception_ra_armeb +#define helper_neon_tbl helper_neon_tbl_armeb +#define helper_v8m_stackcheck helper_v8m_stackcheck_armeb +#define helper_add_setq helper_add_setq_armeb +#define helper_add_saturate helper_add_saturate_armeb +#define helper_sub_saturate helper_sub_saturate_armeb +#define helper_add_usaturate helper_add_usaturate_armeb +#define helper_sub_usaturate helper_sub_usaturate_armeb #define helper_ssat helper_ssat_armeb #define helper_ssat16 helper_ssat16_armeb -#define helper_ssub16 helper_ssub16_armeb -#define helper_ssub8 helper_ssub8_armeb -#define helper_ssubaddx helper_ssubaddx_armeb -#define helper_stb_mmu helper_stb_mmu_armeb -#define helper_stl_mmu helper_stl_mmu_armeb -#define helper_stq_mmu helper_stq_mmu_armeb -#define helper_stw_mmu helper_stw_mmu_armeb -#define helper_sub_saturate helper_sub_saturate_armeb -#define helper_sub_usaturate helper_sub_usaturate_armeb -#define helper_sxtb16 helper_sxtb16_armeb -#define helper_uadd16 helper_uadd16_armeb -#define helper_uadd8 helper_uadd8_armeb -#define helper_uaddsubx helper_uaddsubx_armeb -#define helper_udiv helper_udiv_armeb -#define helper_uhadd16 helper_uhadd16_armeb -#define helper_uhadd8 helper_uhadd8_armeb -#define helper_uhaddsubx helper_uhaddsubx_armeb -#define helper_uhsub16 helper_uhsub16_armeb -#define helper_uhsub8 helper_uhsub8_armeb -#define helper_uhsubaddx helper_uhsubaddx_armeb -#define helper_uqadd16 helper_uqadd16_armeb -#define helper_uqadd8 helper_uqadd8_armeb -#define helper_uqaddsubx helper_uqaddsubx_armeb -#define helper_uqsub16 helper_uqsub16_armeb -#define helper_uqsub8 helper_uqsub8_armeb -#define helper_uqsubaddx helper_uqsubaddx_armeb -#define helper_usad8 helper_usad8_armeb #define helper_usat helper_usat_armeb #define helper_usat16 helper_usat16_armeb -#define helper_usub16 helper_usub16_armeb -#define helper_usub8 helper_usub8_armeb -#define helper_usubaddx helper_usubaddx_armeb -#define helper_uxtb16 helper_uxtb16_armeb -#define helper_v7m_mrs helper_v7m_mrs_armeb -#define helper_v7m_msr helper_v7m_msr_armeb -#define helper_vfp_absd helper_vfp_absd_armeb -#define helper_vfp_abss helper_vfp_abss_armeb -#define helper_vfp_addd helper_vfp_addd_armeb +#define helper_setend helper_setend_armeb +#define helper_wfi helper_wfi_armeb +#define helper_wfe helper_wfe_armeb +#define helper_yield helper_yield_armeb +#define helper_exception_internal helper_exception_internal_armeb +#define helper_exception_with_syndrome helper_exception_with_syndrome_armeb +#define helper_exception_bkpt_insn helper_exception_bkpt_insn_armeb +#define helper_cpsr_read helper_cpsr_read_armeb +#define helper_cpsr_write helper_cpsr_write_armeb +#define helper_cpsr_write_eret helper_cpsr_write_eret_armeb +#define helper_get_user_reg helper_get_user_reg_armeb +#define helper_set_user_reg helper_set_user_reg_armeb +#define helper_set_r13_banked helper_set_r13_banked_armeb +#define helper_get_r13_banked helper_get_r13_banked_armeb +#define helper_msr_banked helper_msr_banked_armeb +#define helper_mrs_banked helper_mrs_banked_armeb +#define helper_access_check_cp_reg helper_access_check_cp_reg_armeb +#define helper_set_cp_reg helper_set_cp_reg_armeb +#define helper_get_cp_reg helper_get_cp_reg_armeb +#define helper_set_cp_reg64 helper_set_cp_reg64_armeb +#define helper_get_cp_reg64 helper_get_cp_reg64_armeb +#define helper_pre_hvc helper_pre_hvc_armeb +#define helper_pre_smc helper_pre_smc_armeb +#define helper_shl_cc helper_shl_cc_armeb +#define helper_shr_cc helper_shr_cc_armeb +#define helper_sar_cc helper_sar_cc_armeb +#define helper_ror_cc helper_ror_cc_armeb +#define arm_is_psci_call arm_is_psci_call_armeb +#define arm_handle_psci_call arm_handle_psci_call_armeb +#define arm_cpu_do_unaligned_access arm_cpu_do_unaligned_access_armeb +#define arm_cpu_do_transaction_failed arm_cpu_do_transaction_failed_armeb +#define arm_cpu_tlb_fill arm_cpu_tlb_fill_armeb +#define arm_translate_init arm_translate_init_armeb +#define arm_test_cc arm_test_cc_armeb +#define arm_free_cc arm_free_cc_armeb +#define arm_jump_cc arm_jump_cc_armeb +#define arm_gen_test_cc arm_gen_test_cc_armeb +#define vfp_expand_imm vfp_expand_imm_armeb +#define gen_cmtst_i64 gen_cmtst_i64_armeb +#define gen_ushl_i32 gen_ushl_i32_armeb +#define gen_ushl_i64 gen_ushl_i64_armeb +#define gen_sshl_i32 gen_sshl_i32_armeb +#define gen_sshl_i64 gen_sshl_i64_armeb +#define gen_intermediate_code gen_intermediate_code_armeb +#define restore_state_to_opc restore_state_to_opc_armeb +#define helper_neon_qrdmlah_s16 helper_neon_qrdmlah_s16_armeb +#define helper_gvec_qrdmlah_s16 helper_gvec_qrdmlah_s16_armeb +#define helper_neon_qrdmlsh_s16 helper_neon_qrdmlsh_s16_armeb +#define helper_gvec_qrdmlsh_s16 helper_gvec_qrdmlsh_s16_armeb +#define helper_neon_qrdmlah_s32 helper_neon_qrdmlah_s32_armeb +#define helper_gvec_qrdmlah_s32 helper_gvec_qrdmlah_s32_armeb +#define helper_neon_qrdmlsh_s32 helper_neon_qrdmlsh_s32_armeb +#define helper_gvec_qrdmlsh_s32 helper_gvec_qrdmlsh_s32_armeb +#define helper_gvec_sdot_b helper_gvec_sdot_b_armeb +#define helper_gvec_udot_b helper_gvec_udot_b_armeb +#define helper_gvec_sdot_h helper_gvec_sdot_h_armeb +#define helper_gvec_udot_h helper_gvec_udot_h_armeb +#define helper_gvec_sdot_idx_b helper_gvec_sdot_idx_b_armeb +#define helper_gvec_udot_idx_b helper_gvec_udot_idx_b_armeb +#define helper_gvec_sdot_idx_h helper_gvec_sdot_idx_h_armeb +#define helper_gvec_udot_idx_h helper_gvec_udot_idx_h_armeb +#define helper_gvec_fcaddh helper_gvec_fcaddh_armeb +#define helper_gvec_fcadds helper_gvec_fcadds_armeb +#define helper_gvec_fcaddd helper_gvec_fcaddd_armeb +#define helper_gvec_fcmlah helper_gvec_fcmlah_armeb +#define helper_gvec_fcmlah_idx helper_gvec_fcmlah_idx_armeb +#define helper_gvec_fcmlas helper_gvec_fcmlas_armeb +#define helper_gvec_fcmlas_idx helper_gvec_fcmlas_idx_armeb +#define helper_gvec_fcmlad helper_gvec_fcmlad_armeb +#define helper_gvec_frecpe_h helper_gvec_frecpe_h_armeb +#define helper_gvec_frecpe_s helper_gvec_frecpe_s_armeb +#define helper_gvec_frecpe_d helper_gvec_frecpe_d_armeb +#define helper_gvec_frsqrte_h helper_gvec_frsqrte_h_armeb +#define helper_gvec_frsqrte_s helper_gvec_frsqrte_s_armeb +#define helper_gvec_frsqrte_d helper_gvec_frsqrte_d_armeb +#define helper_gvec_fadd_h helper_gvec_fadd_h_armeb +#define helper_gvec_fadd_s helper_gvec_fadd_s_armeb +#define helper_gvec_fadd_d helper_gvec_fadd_d_armeb +#define helper_gvec_fsub_h helper_gvec_fsub_h_armeb +#define helper_gvec_fsub_s helper_gvec_fsub_s_armeb +#define helper_gvec_fsub_d helper_gvec_fsub_d_armeb +#define helper_gvec_fmul_h helper_gvec_fmul_h_armeb +#define helper_gvec_fmul_s helper_gvec_fmul_s_armeb +#define helper_gvec_fmul_d helper_gvec_fmul_d_armeb +#define helper_gvec_ftsmul_h helper_gvec_ftsmul_h_armeb +#define helper_gvec_ftsmul_s helper_gvec_ftsmul_s_armeb +#define helper_gvec_ftsmul_d helper_gvec_ftsmul_d_armeb +#define helper_gvec_fmul_idx_h helper_gvec_fmul_idx_h_armeb +#define helper_gvec_fmul_idx_s helper_gvec_fmul_idx_s_armeb +#define helper_gvec_fmul_idx_d helper_gvec_fmul_idx_d_armeb +#define helper_gvec_fmla_idx_h helper_gvec_fmla_idx_h_armeb +#define helper_gvec_fmla_idx_s helper_gvec_fmla_idx_s_armeb +#define helper_gvec_fmla_idx_d helper_gvec_fmla_idx_d_armeb +#define helper_gvec_uqadd_b helper_gvec_uqadd_b_armeb +#define helper_gvec_uqadd_h helper_gvec_uqadd_h_armeb +#define helper_gvec_uqadd_s helper_gvec_uqadd_s_armeb +#define helper_gvec_sqadd_b helper_gvec_sqadd_b_armeb +#define helper_gvec_sqadd_h helper_gvec_sqadd_h_armeb +#define helper_gvec_sqadd_s helper_gvec_sqadd_s_armeb +#define helper_gvec_uqsub_b helper_gvec_uqsub_b_armeb +#define helper_gvec_uqsub_h helper_gvec_uqsub_h_armeb +#define helper_gvec_uqsub_s helper_gvec_uqsub_s_armeb +#define helper_gvec_sqsub_b helper_gvec_sqsub_b_armeb +#define helper_gvec_sqsub_h helper_gvec_sqsub_h_armeb +#define helper_gvec_sqsub_s helper_gvec_sqsub_s_armeb +#define helper_gvec_uqadd_d helper_gvec_uqadd_d_armeb +#define helper_gvec_uqsub_d helper_gvec_uqsub_d_armeb +#define helper_gvec_sqadd_d helper_gvec_sqadd_d_armeb +#define helper_gvec_sqsub_d helper_gvec_sqsub_d_armeb +#define helper_gvec_fmlal_a32 helper_gvec_fmlal_a32_armeb +#define helper_gvec_fmlal_a64 helper_gvec_fmlal_a64_armeb +#define helper_gvec_fmlal_idx_a32 helper_gvec_fmlal_idx_a32_armeb +#define helper_gvec_fmlal_idx_a64 helper_gvec_fmlal_idx_a64_armeb +#define helper_gvec_sshl_b helper_gvec_sshl_b_armeb +#define helper_gvec_sshl_h helper_gvec_sshl_h_armeb +#define helper_gvec_ushl_b helper_gvec_ushl_b_armeb +#define helper_gvec_ushl_h helper_gvec_ushl_h_armeb +#define helper_gvec_pmul_b helper_gvec_pmul_b_armeb +#define helper_gvec_pmull_q helper_gvec_pmull_q_armeb +#define helper_neon_pmull_h helper_neon_pmull_h_armeb +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_armeb +#define vfp_get_fpscr vfp_get_fpscr_armeb +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_armeb +#define vfp_set_fpscr vfp_set_fpscr_armeb #define helper_vfp_adds helper_vfp_adds_armeb +#define helper_vfp_addd helper_vfp_addd_armeb +#define helper_vfp_subs helper_vfp_subs_armeb +#define helper_vfp_subd helper_vfp_subd_armeb +#define helper_vfp_muls helper_vfp_muls_armeb +#define helper_vfp_muld helper_vfp_muld_armeb +#define helper_vfp_divs helper_vfp_divs_armeb +#define helper_vfp_divd helper_vfp_divd_armeb +#define helper_vfp_mins helper_vfp_mins_armeb +#define helper_vfp_mind helper_vfp_mind_armeb +#define helper_vfp_maxs helper_vfp_maxs_armeb +#define helper_vfp_maxd helper_vfp_maxd_armeb +#define helper_vfp_minnums helper_vfp_minnums_armeb +#define helper_vfp_minnumd helper_vfp_minnumd_armeb +#define helper_vfp_maxnums helper_vfp_maxnums_armeb +#define helper_vfp_maxnumd helper_vfp_maxnumd_armeb +#define helper_vfp_negs helper_vfp_negs_armeb +#define helper_vfp_negd helper_vfp_negd_armeb +#define helper_vfp_abss helper_vfp_abss_armeb +#define helper_vfp_absd helper_vfp_absd_armeb +#define helper_vfp_sqrts helper_vfp_sqrts_armeb +#define helper_vfp_sqrtd helper_vfp_sqrtd_armeb +#define helper_vfp_cmps helper_vfp_cmps_armeb +#define helper_vfp_cmpes helper_vfp_cmpes_armeb #define helper_vfp_cmpd helper_vfp_cmpd_armeb #define helper_vfp_cmped helper_vfp_cmped_armeb -#define helper_vfp_cmpes helper_vfp_cmpes_armeb -#define helper_vfp_cmps helper_vfp_cmps_armeb -#define helper_vfp_divd helper_vfp_divd_armeb -#define helper_vfp_divs helper_vfp_divs_armeb -#define helper_vfp_fcvtds helper_vfp_fcvtds_armeb -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_armeb -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_armeb -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_armeb -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_armeb -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_armeb -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_armeb -#define helper_vfp_maxd helper_vfp_maxd_armeb -#define helper_vfp_maxnumd helper_vfp_maxnumd_armeb -#define helper_vfp_maxnums helper_vfp_maxnums_armeb -#define helper_vfp_maxs helper_vfp_maxs_armeb -#define helper_vfp_mind helper_vfp_mind_armeb -#define helper_vfp_minnumd helper_vfp_minnumd_armeb -#define helper_vfp_minnums helper_vfp_minnums_armeb -#define helper_vfp_mins helper_vfp_mins_armeb -#define helper_vfp_muladdd helper_vfp_muladdd_armeb -#define helper_vfp_muladds helper_vfp_muladds_armeb -#define helper_vfp_muld helper_vfp_muld_armeb -#define helper_vfp_muls helper_vfp_muls_armeb -#define helper_vfp_negd helper_vfp_negd_armeb -#define helper_vfp_negs helper_vfp_negs_armeb -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_armeb -#define helper_vfp_shtod helper_vfp_shtod_armeb -#define helper_vfp_shtos helper_vfp_shtos_armeb -#define helper_vfp_sitod helper_vfp_sitod_armeb +#define helper_vfp_sitoh helper_vfp_sitoh_armeb +#define helper_vfp_tosih helper_vfp_tosih_armeb +#define helper_vfp_tosizh helper_vfp_tosizh_armeb #define helper_vfp_sitos helper_vfp_sitos_armeb -#define helper_vfp_sltod helper_vfp_sltod_armeb -#define helper_vfp_sltos helper_vfp_sltos_armeb -#define helper_vfp_sqrtd helper_vfp_sqrtd_armeb -#define helper_vfp_sqrts helper_vfp_sqrts_armeb -#define helper_vfp_sqtod helper_vfp_sqtod_armeb -#define helper_vfp_sqtos helper_vfp_sqtos_armeb -#define helper_vfp_subd helper_vfp_subd_armeb -#define helper_vfp_subs helper_vfp_subs_armeb -#define helper_vfp_toshd helper_vfp_toshd_armeb -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_armeb -#define helper_vfp_toshs helper_vfp_toshs_armeb -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_armeb -#define helper_vfp_tosid helper_vfp_tosid_armeb #define helper_vfp_tosis helper_vfp_tosis_armeb -#define helper_vfp_tosizd helper_vfp_tosizd_armeb #define helper_vfp_tosizs helper_vfp_tosizs_armeb -#define helper_vfp_tosld helper_vfp_tosld_armeb -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_armeb -#define helper_vfp_tosls helper_vfp_tosls_armeb -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_armeb -#define helper_vfp_tosqd helper_vfp_tosqd_armeb -#define helper_vfp_tosqs helper_vfp_tosqs_armeb -#define helper_vfp_touhd helper_vfp_touhd_armeb -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_armeb -#define helper_vfp_touhs helper_vfp_touhs_armeb -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_armeb -#define helper_vfp_touid helper_vfp_touid_armeb -#define helper_vfp_touis helper_vfp_touis_armeb -#define helper_vfp_touizd helper_vfp_touizd_armeb -#define helper_vfp_touizs helper_vfp_touizs_armeb -#define helper_vfp_tould helper_vfp_tould_armeb -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_armeb -#define helper_vfp_touls helper_vfp_touls_armeb -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_armeb -#define helper_vfp_touqd helper_vfp_touqd_armeb -#define helper_vfp_touqs helper_vfp_touqs_armeb -#define helper_vfp_uhtod helper_vfp_uhtod_armeb -#define helper_vfp_uhtos helper_vfp_uhtos_armeb -#define helper_vfp_uitod helper_vfp_uitod_armeb +#define helper_vfp_sitod helper_vfp_sitod_armeb +#define helper_vfp_tosid helper_vfp_tosid_armeb +#define helper_vfp_tosizd helper_vfp_tosizd_armeb +#define helper_vfp_uitoh helper_vfp_uitoh_armeb +#define helper_vfp_touih helper_vfp_touih_armeb +#define helper_vfp_touizh helper_vfp_touizh_armeb #define helper_vfp_uitos helper_vfp_uitos_armeb +#define helper_vfp_touis helper_vfp_touis_armeb +#define helper_vfp_touizs helper_vfp_touizs_armeb +#define helper_vfp_uitod helper_vfp_uitod_armeb +#define helper_vfp_touid helper_vfp_touid_armeb +#define helper_vfp_touizd helper_vfp_touizd_armeb +#define helper_vfp_fcvtds helper_vfp_fcvtds_armeb +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_armeb +#define helper_vfp_shtod helper_vfp_shtod_armeb +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_armeb +#define helper_vfp_toshd helper_vfp_toshd_armeb +#define helper_vfp_sltod helper_vfp_sltod_armeb +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_armeb +#define helper_vfp_tosld helper_vfp_tosld_armeb +#define helper_vfp_sqtod helper_vfp_sqtod_armeb +#define helper_vfp_tosqd helper_vfp_tosqd_armeb +#define helper_vfp_uhtod helper_vfp_uhtod_armeb +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_armeb +#define helper_vfp_touhd helper_vfp_touhd_armeb #define helper_vfp_ultod helper_vfp_ultod_armeb -#define helper_vfp_ultos helper_vfp_ultos_armeb +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_armeb +#define helper_vfp_tould helper_vfp_tould_armeb #define helper_vfp_uqtod helper_vfp_uqtod_armeb +#define helper_vfp_touqd helper_vfp_touqd_armeb +#define helper_vfp_shtos helper_vfp_shtos_armeb +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_armeb +#define helper_vfp_toshs helper_vfp_toshs_armeb +#define helper_vfp_sltos helper_vfp_sltos_armeb +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_armeb +#define helper_vfp_tosls helper_vfp_tosls_armeb +#define helper_vfp_sqtos helper_vfp_sqtos_armeb +#define helper_vfp_tosqs helper_vfp_tosqs_armeb +#define helper_vfp_uhtos helper_vfp_uhtos_armeb +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_armeb +#define helper_vfp_touhs helper_vfp_touhs_armeb +#define helper_vfp_ultos helper_vfp_ultos_armeb +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_armeb +#define helper_vfp_touls helper_vfp_touls_armeb #define helper_vfp_uqtos helper_vfp_uqtos_armeb -#define helper_wfe helper_wfe_armeb -#define helper_wfi helper_wfi_armeb -#define hex2decimal hex2decimal_armeb -#define hw_breakpoint_update hw_breakpoint_update_armeb -#define hw_breakpoint_update_all hw_breakpoint_update_all_armeb -#define hw_watchpoint_update hw_watchpoint_update_armeb -#define hw_watchpoint_update_all hw_watchpoint_update_all_armeb -#define _init _init_armeb -#define init_cpreg_list init_cpreg_list_armeb -#define init_lists init_lists_armeb -#define input_type_enum input_type_enum_armeb -#define int128_2_64 int128_2_64_armeb -#define int128_add int128_add_armeb -#define int128_addto int128_addto_armeb -#define int128_and int128_and_armeb -#define int128_eq int128_eq_armeb -#define int128_ge int128_ge_armeb -#define int128_get64 int128_get64_armeb -#define int128_gt int128_gt_armeb -#define int128_le int128_le_armeb -#define int128_lt int128_lt_armeb -#define int128_make64 int128_make64_armeb -#define int128_max int128_max_armeb -#define int128_min int128_min_armeb -#define int128_ne int128_ne_armeb -#define int128_neg int128_neg_armeb -#define int128_nz int128_nz_armeb -#define int128_rshift int128_rshift_armeb -#define int128_sub int128_sub_armeb -#define int128_subfrom int128_subfrom_armeb -#define int128_zero int128_zero_armeb -#define int16_to_float32 int16_to_float32_armeb -#define int16_to_float64 int16_to_float64_armeb -#define int32_to_float128 int32_to_float128_armeb -#define int32_to_float32 int32_to_float32_armeb -#define int32_to_float64 int32_to_float64_armeb -#define int32_to_floatx80 int32_to_floatx80_armeb -#define int64_to_float128 int64_to_float128_armeb -#define int64_to_float32 int64_to_float32_armeb -#define int64_to_float64 int64_to_float64_armeb -#define int64_to_floatx80 int64_to_floatx80_armeb -#define invalidate_and_set_dirty invalidate_and_set_dirty_armeb -#define invalidate_page_bitmap invalidate_page_bitmap_armeb -#define io_mem_read io_mem_read_armeb -#define io_mem_write io_mem_write_armeb -#define io_readb io_readb_armeb -#define io_readl io_readl_armeb -#define io_readq io_readq_armeb -#define io_readw io_readw_armeb -#define iotlb_to_region iotlb_to_region_armeb -#define io_writeb io_writeb_armeb -#define io_writel io_writel_armeb -#define io_writeq io_writeq_armeb -#define io_writew io_writew_armeb -#define is_a64 is_a64_armeb -#define is_help_option is_help_option_armeb -#define isr_read isr_read_armeb -#define is_valid_option_list is_valid_option_list_armeb -#define iwmmxt_load_creg iwmmxt_load_creg_armeb -#define iwmmxt_load_reg iwmmxt_load_reg_armeb -#define iwmmxt_store_creg iwmmxt_store_creg_armeb -#define iwmmxt_store_reg iwmmxt_store_reg_armeb -#define __jit_debug_descriptor __jit_debug_descriptor_armeb -#define __jit_debug_register_code __jit_debug_register_code_armeb -#define kvm_to_cpreg_id kvm_to_cpreg_id_armeb -#define last_ram_offset last_ram_offset_armeb -#define ldl_be_p ldl_be_p_armeb -#define ldl_be_phys ldl_be_phys_armeb -#define ldl_he_p ldl_he_p_armeb -#define ldl_le_p ldl_le_p_armeb -#define ldl_le_phys ldl_le_phys_armeb -#define ldl_phys ldl_phys_armeb -#define ldl_phys_internal ldl_phys_internal_armeb -#define ldq_be_p ldq_be_p_armeb -#define ldq_be_phys ldq_be_phys_armeb -#define ldq_he_p ldq_he_p_armeb -#define ldq_le_p ldq_le_p_armeb -#define ldq_le_phys ldq_le_phys_armeb -#define ldq_phys ldq_phys_armeb -#define ldq_phys_internal ldq_phys_internal_armeb -#define ldst_name ldst_name_armeb -#define ldub_p ldub_p_armeb -#define ldub_phys ldub_phys_armeb -#define lduw_be_p lduw_be_p_armeb -#define lduw_be_phys lduw_be_phys_armeb -#define lduw_he_p lduw_he_p_armeb -#define lduw_le_p lduw_le_p_armeb -#define lduw_le_phys lduw_le_phys_armeb -#define lduw_phys lduw_phys_armeb -#define lduw_phys_internal lduw_phys_internal_armeb -#define le128 le128_armeb -#define linked_bp_matches linked_bp_matches_armeb -#define listener_add_address_space listener_add_address_space_armeb -#define load_cpu_offset load_cpu_offset_armeb -#define load_reg load_reg_armeb -#define load_reg_var load_reg_var_armeb -#define log_cpu_state log_cpu_state_armeb -#define lpae_cp_reginfo lpae_cp_reginfo_armeb -#define lt128 lt128_armeb -#define machine_class_init machine_class_init_armeb -#define machine_finalize machine_finalize_armeb -#define machine_info machine_info_armeb -#define machine_initfn machine_initfn_armeb -#define machine_register_types machine_register_types_armeb -#define machvirt_init machvirt_init_armeb -#define machvirt_machine_init machvirt_machine_init_armeb -#define maj maj_armeb -#define mapping_conflict mapping_conflict_armeb -#define mapping_contiguous mapping_contiguous_armeb -#define mapping_have_same_region mapping_have_same_region_armeb -#define mapping_merge mapping_merge_armeb -#define mem_add mem_add_armeb -#define mem_begin mem_begin_armeb -#define mem_commit mem_commit_armeb -#define memory_access_is_direct memory_access_is_direct_armeb -#define memory_access_size memory_access_size_armeb -#define memory_init memory_init_armeb -#define memory_listener_match memory_listener_match_armeb -#define memory_listener_register memory_listener_register_armeb -#define memory_listener_unregister memory_listener_unregister_armeb -#define memory_map_init memory_map_init_armeb -#define memory_mapping_filter memory_mapping_filter_armeb -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_armeb -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_armeb -#define memory_mapping_list_free memory_mapping_list_free_armeb -#define memory_mapping_list_init memory_mapping_list_init_armeb -#define memory_region_access_valid memory_region_access_valid_armeb -#define memory_region_add_subregion memory_region_add_subregion_armeb -#define memory_region_add_subregion_common memory_region_add_subregion_common_armeb -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_armeb -#define memory_region_big_endian memory_region_big_endian_armeb -#define memory_region_clear_pending memory_region_clear_pending_armeb -#define memory_region_del_subregion memory_region_del_subregion_armeb -#define memory_region_destructor_alias memory_region_destructor_alias_armeb -#define memory_region_destructor_none memory_region_destructor_none_armeb -#define memory_region_destructor_ram memory_region_destructor_ram_armeb -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_armeb -#define memory_region_dispatch_read memory_region_dispatch_read_armeb -#define memory_region_dispatch_read1 memory_region_dispatch_read1_armeb -#define memory_region_dispatch_write memory_region_dispatch_write_armeb -#define memory_region_escape_name memory_region_escape_name_armeb -#define memory_region_finalize memory_region_finalize_armeb -#define memory_region_find memory_region_find_armeb -#define memory_region_get_addr memory_region_get_addr_armeb -#define memory_region_get_alignment memory_region_get_alignment_armeb -#define memory_region_get_container memory_region_get_container_armeb -#define memory_region_get_fd memory_region_get_fd_armeb -#define memory_region_get_may_overlap memory_region_get_may_overlap_armeb -#define memory_region_get_priority memory_region_get_priority_armeb -#define memory_region_get_ram_addr memory_region_get_ram_addr_armeb -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_armeb -#define memory_region_get_size memory_region_get_size_armeb -#define memory_region_info memory_region_info_armeb -#define memory_region_init memory_region_init_armeb -#define memory_region_init_alias memory_region_init_alias_armeb -#define memory_region_initfn memory_region_initfn_armeb -#define memory_region_init_io memory_region_init_io_armeb -#define memory_region_init_ram memory_region_init_ram_armeb -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_armeb -#define memory_region_init_reservation memory_region_init_reservation_armeb -#define memory_region_is_iommu memory_region_is_iommu_armeb -#define memory_region_is_logging memory_region_is_logging_armeb -#define memory_region_is_mapped memory_region_is_mapped_armeb -#define memory_region_is_ram memory_region_is_ram_armeb -#define memory_region_is_rom memory_region_is_rom_armeb -#define memory_region_is_romd memory_region_is_romd_armeb -#define memory_region_is_skip_dump memory_region_is_skip_dump_armeb -#define memory_region_is_unassigned memory_region_is_unassigned_armeb -#define memory_region_name memory_region_name_armeb -#define memory_region_need_escape memory_region_need_escape_armeb -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_armeb -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_armeb -#define memory_region_present memory_region_present_armeb -#define memory_region_read_accessor memory_region_read_accessor_armeb -#define memory_region_readd_subregion memory_region_readd_subregion_armeb -#define memory_region_ref memory_region_ref_armeb -#define memory_region_resolve_container memory_region_resolve_container_armeb -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_armeb -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_armeb -#define memory_region_set_address memory_region_set_address_armeb -#define memory_region_set_alias_offset memory_region_set_alias_offset_armeb -#define memory_region_set_enabled memory_region_set_enabled_armeb -#define memory_region_set_readonly memory_region_set_readonly_armeb -#define memory_region_set_skip_dump memory_region_set_skip_dump_armeb -#define memory_region_size memory_region_size_armeb -#define memory_region_to_address_space memory_region_to_address_space_armeb -#define memory_region_transaction_begin memory_region_transaction_begin_armeb -#define memory_region_transaction_commit memory_region_transaction_commit_armeb -#define memory_region_unref memory_region_unref_armeb -#define memory_region_update_container_subregions memory_region_update_container_subregions_armeb -#define memory_region_write_accessor memory_region_write_accessor_armeb -#define memory_region_wrong_endianness memory_region_wrong_endianness_armeb -#define memory_try_enable_merging memory_try_enable_merging_armeb -#define module_call_init module_call_init_armeb -#define module_load module_load_armeb -#define mpidr_cp_reginfo mpidr_cp_reginfo_armeb -#define mpidr_read mpidr_read_armeb -#define msr_mask msr_mask_armeb -#define mul128By64To192 mul128By64To192_armeb -#define mul128To256 mul128To256_armeb -#define mul64To128 mul64To128_armeb -#define muldiv64 muldiv64_armeb -#define neon_2rm_is_float_op neon_2rm_is_float_op_armeb -#define neon_2rm_sizes neon_2rm_sizes_armeb -#define neon_3r_sizes neon_3r_sizes_armeb -#define neon_get_scalar neon_get_scalar_armeb -#define neon_load_reg neon_load_reg_armeb -#define neon_load_reg64 neon_load_reg64_armeb -#define neon_load_scratch neon_load_scratch_armeb -#define neon_ls_element_type neon_ls_element_type_armeb -#define neon_reg_offset neon_reg_offset_armeb -#define neon_store_reg neon_store_reg_armeb -#define neon_store_reg64 neon_store_reg64_armeb -#define neon_store_scratch neon_store_scratch_armeb -#define new_ldst_label new_ldst_label_armeb -#define next_list next_list_armeb -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_armeb -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_armeb -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_armeb -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_armeb -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_armeb -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_armeb -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_armeb -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_armeb -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_armeb -#define not_v6_cp_reginfo not_v6_cp_reginfo_armeb -#define not_v7_cp_reginfo not_v7_cp_reginfo_armeb -#define not_v8_cp_reginfo not_v8_cp_reginfo_armeb -#define object_child_foreach object_child_foreach_armeb -#define object_class_foreach object_class_foreach_armeb -#define object_class_foreach_tramp object_class_foreach_tramp_armeb -#define object_class_get_list object_class_get_list_armeb -#define object_class_get_list_tramp object_class_get_list_tramp_armeb -#define object_class_get_parent object_class_get_parent_armeb -#define object_deinit object_deinit_armeb -#define object_dynamic_cast object_dynamic_cast_armeb -#define object_finalize object_finalize_armeb -#define object_finalize_child_property object_finalize_child_property_armeb -#define object_get_child_property object_get_child_property_armeb -#define object_get_link_property object_get_link_property_armeb -#define object_get_root object_get_root_armeb -#define object_initialize_with_type object_initialize_with_type_armeb -#define object_init_with_type object_init_with_type_armeb -#define object_instance_init object_instance_init_armeb -#define object_new_with_type object_new_with_type_armeb -#define object_post_init_with_type object_post_init_with_type_armeb -#define object_property_add_alias object_property_add_alias_armeb -#define object_property_add_link object_property_add_link_armeb -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_armeb -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_armeb -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_armeb -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_armeb -#define object_property_allow_set_link object_property_allow_set_link_armeb -#define object_property_del object_property_del_armeb -#define object_property_del_all object_property_del_all_armeb -#define object_property_find object_property_find_armeb -#define object_property_get object_property_get_armeb -#define object_property_get_bool object_property_get_bool_armeb -#define object_property_get_int object_property_get_int_armeb -#define object_property_get_link object_property_get_link_armeb -#define object_property_get_qobject object_property_get_qobject_armeb -#define object_property_get_str object_property_get_str_armeb -#define object_property_get_type object_property_get_type_armeb -#define object_property_is_child object_property_is_child_armeb -#define object_property_set object_property_set_armeb -#define object_property_set_description object_property_set_description_armeb -#define object_property_set_link object_property_set_link_armeb -#define object_property_set_qobject object_property_set_qobject_armeb -#define object_release_link_property object_release_link_property_armeb -#define object_resolve_abs_path object_resolve_abs_path_armeb -#define object_resolve_child_property object_resolve_child_property_armeb -#define object_resolve_link object_resolve_link_armeb -#define object_resolve_link_property object_resolve_link_property_armeb -#define object_resolve_partial_path object_resolve_partial_path_armeb -#define object_resolve_path object_resolve_path_armeb -#define object_resolve_path_component object_resolve_path_component_armeb -#define object_resolve_path_type object_resolve_path_type_armeb -#define object_set_link_property object_set_link_property_armeb -#define object_unparent object_unparent_armeb -#define omap_cachemaint_write omap_cachemaint_write_armeb -#define omap_cp_reginfo omap_cp_reginfo_armeb -#define omap_threadid_write omap_threadid_write_armeb -#define omap_ticonfig_write omap_ticonfig_write_armeb -#define omap_wfi_write omap_wfi_write_armeb -#define op_bits op_bits_armeb -#define open_modeflags open_modeflags_armeb -#define op_to_mov op_to_mov_armeb -#define op_to_movi op_to_movi_armeb -#define output_type_enum output_type_enum_armeb -#define packFloat128 packFloat128_armeb -#define packFloat16 packFloat16_armeb -#define packFloat32 packFloat32_armeb -#define packFloat64 packFloat64_armeb -#define packFloatx80 packFloatx80_armeb -#define page_find page_find_armeb -#define page_find_alloc page_find_alloc_armeb -#define page_flush_tb page_flush_tb_armeb -#define page_flush_tb_1 page_flush_tb_1_armeb -#define page_init page_init_armeb -#define page_size_init page_size_init_armeb -#define par par_armeb -#define parse_array parse_array_armeb -#define parse_error parse_error_armeb -#define parse_escape parse_escape_armeb -#define parse_keyword parse_keyword_armeb -#define parse_literal parse_literal_armeb -#define parse_object parse_object_armeb -#define parse_optional parse_optional_armeb -#define parse_option_bool parse_option_bool_armeb -#define parse_option_number parse_option_number_armeb -#define parse_option_size parse_option_size_armeb -#define parse_pair parse_pair_armeb -#define parser_context_free parser_context_free_armeb -#define parser_context_new parser_context_new_armeb -#define parser_context_peek_token parser_context_peek_token_armeb -#define parser_context_pop_token parser_context_pop_token_armeb -#define parser_context_restore parser_context_restore_armeb -#define parser_context_save parser_context_save_armeb -#define parse_str parse_str_armeb -#define parse_type_bool parse_type_bool_armeb -#define parse_type_int parse_type_int_armeb -#define parse_type_number parse_type_number_armeb -#define parse_type_size parse_type_size_armeb -#define parse_type_str parse_type_str_armeb -#define parse_value parse_value_armeb -#define par_write par_write_armeb -#define patch_reloc patch_reloc_armeb -#define phys_map_node_alloc phys_map_node_alloc_armeb -#define phys_map_node_reserve phys_map_node_reserve_armeb -#define phys_mem_alloc phys_mem_alloc_armeb -#define phys_mem_set_alloc phys_mem_set_alloc_armeb -#define phys_page_compact phys_page_compact_armeb -#define phys_page_compact_all phys_page_compact_all_armeb -#define phys_page_find phys_page_find_armeb -#define phys_page_set phys_page_set_armeb -#define phys_page_set_level phys_page_set_level_armeb -#define phys_section_add phys_section_add_armeb -#define phys_section_destroy phys_section_destroy_armeb -#define phys_sections_free phys_sections_free_armeb -#define pickNaN pickNaN_armeb -#define pickNaNMulAdd pickNaNMulAdd_armeb -#define pmccfiltr_write pmccfiltr_write_armeb -#define pmccntr_read pmccntr_read_armeb -#define pmccntr_sync pmccntr_sync_armeb -#define pmccntr_write pmccntr_write_armeb -#define pmccntr_write32 pmccntr_write32_armeb -#define pmcntenclr_write pmcntenclr_write_armeb -#define pmcntenset_write pmcntenset_write_armeb -#define pmcr_write pmcr_write_armeb -#define pmintenclr_write pmintenclr_write_armeb -#define pmintenset_write pmintenset_write_armeb -#define pmovsr_write pmovsr_write_armeb -#define pmreg_access pmreg_access_armeb -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_armeb -#define pmsav5_data_ap_read pmsav5_data_ap_read_armeb -#define pmsav5_data_ap_write pmsav5_data_ap_write_armeb -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_armeb -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_armeb -#define pmuserenr_write pmuserenr_write_armeb -#define pmxevtyper_write pmxevtyper_write_armeb -#define print_type_bool print_type_bool_armeb -#define print_type_int print_type_int_armeb -#define print_type_number print_type_number_armeb -#define print_type_size print_type_size_armeb -#define print_type_str print_type_str_armeb -#define propagateFloat128NaN propagateFloat128NaN_armeb -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_armeb -#define propagateFloat32NaN propagateFloat32NaN_armeb -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_armeb -#define propagateFloat64NaN propagateFloat64NaN_armeb -#define propagateFloatx80NaN propagateFloatx80NaN_armeb -#define property_get_alias property_get_alias_armeb -#define property_get_bool property_get_bool_armeb -#define property_get_str property_get_str_armeb -#define property_get_uint16_ptr property_get_uint16_ptr_armeb -#define property_get_uint32_ptr property_get_uint32_ptr_armeb -#define property_get_uint64_ptr property_get_uint64_ptr_armeb -#define property_get_uint8_ptr property_get_uint8_ptr_armeb -#define property_release_alias property_release_alias_armeb -#define property_release_bool property_release_bool_armeb -#define property_release_str property_release_str_armeb -#define property_resolve_alias property_resolve_alias_armeb -#define property_set_alias property_set_alias_armeb -#define property_set_bool property_set_bool_armeb -#define property_set_str property_set_str_armeb -#define pstate_read pstate_read_armeb -#define pstate_write pstate_write_armeb -#define pxa250_initfn pxa250_initfn_armeb -#define pxa255_initfn pxa255_initfn_armeb -#define pxa260_initfn pxa260_initfn_armeb -#define pxa261_initfn pxa261_initfn_armeb -#define pxa262_initfn pxa262_initfn_armeb -#define pxa270a0_initfn pxa270a0_initfn_armeb -#define pxa270a1_initfn pxa270a1_initfn_armeb -#define pxa270b0_initfn pxa270b0_initfn_armeb -#define pxa270b1_initfn pxa270b1_initfn_armeb -#define pxa270c0_initfn pxa270c0_initfn_armeb -#define pxa270c5_initfn pxa270c5_initfn_armeb -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_armeb -#define qapi_dealloc_end_list qapi_dealloc_end_list_armeb -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_armeb -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_armeb -#define qapi_dealloc_next_list qapi_dealloc_next_list_armeb -#define qapi_dealloc_pop qapi_dealloc_pop_armeb -#define qapi_dealloc_push qapi_dealloc_push_armeb -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_armeb -#define qapi_dealloc_start_list qapi_dealloc_start_list_armeb -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_armeb -#define qapi_dealloc_start_union qapi_dealloc_start_union_armeb -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_armeb -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_armeb -#define qapi_dealloc_type_int qapi_dealloc_type_int_armeb -#define qapi_dealloc_type_number qapi_dealloc_type_number_armeb -#define qapi_dealloc_type_size qapi_dealloc_type_size_armeb -#define qapi_dealloc_type_str qapi_dealloc_type_str_armeb -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_armeb -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_armeb -#define qapi_free_boolList qapi_free_boolList_armeb -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_armeb -#define qapi_free_int16List qapi_free_int16List_armeb -#define qapi_free_int32List qapi_free_int32List_armeb -#define qapi_free_int64List qapi_free_int64List_armeb -#define qapi_free_int8List qapi_free_int8List_armeb -#define qapi_free_intList qapi_free_intList_armeb -#define qapi_free_numberList qapi_free_numberList_armeb -#define qapi_free_strList qapi_free_strList_armeb -#define qapi_free_uint16List qapi_free_uint16List_armeb -#define qapi_free_uint32List qapi_free_uint32List_armeb -#define qapi_free_uint64List qapi_free_uint64List_armeb -#define qapi_free_uint8List qapi_free_uint8List_armeb -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_armeb -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_armeb -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_armeb -#define qbool_destroy_obj qbool_destroy_obj_armeb -#define qbool_from_int qbool_from_int_armeb -#define qbool_get_int qbool_get_int_armeb -#define qbool_type qbool_type_armeb -#define qbus_create qbus_create_armeb -#define qbus_create_inplace qbus_create_inplace_armeb -#define qbus_finalize qbus_finalize_armeb -#define qbus_initfn qbus_initfn_armeb -#define qbus_realize qbus_realize_armeb -#define qdev_create qdev_create_armeb -#define qdev_get_type qdev_get_type_armeb -#define qdev_register_types qdev_register_types_armeb -#define qdev_set_parent_bus qdev_set_parent_bus_armeb -#define qdev_try_create qdev_try_create_armeb -#define qdict_add_key qdict_add_key_armeb -#define qdict_array_split qdict_array_split_armeb -#define qdict_clone_shallow qdict_clone_shallow_armeb -#define qdict_del qdict_del_armeb -#define qdict_destroy_obj qdict_destroy_obj_armeb -#define qdict_entry_key qdict_entry_key_armeb -#define qdict_entry_value qdict_entry_value_armeb -#define qdict_extract_subqdict qdict_extract_subqdict_armeb -#define qdict_find qdict_find_armeb -#define qdict_first qdict_first_armeb -#define qdict_flatten qdict_flatten_armeb -#define qdict_flatten_qdict qdict_flatten_qdict_armeb -#define qdict_flatten_qlist qdict_flatten_qlist_armeb -#define qdict_get qdict_get_armeb -#define qdict_get_bool qdict_get_bool_armeb -#define qdict_get_double qdict_get_double_armeb -#define qdict_get_int qdict_get_int_armeb -#define qdict_get_obj qdict_get_obj_armeb -#define qdict_get_qdict qdict_get_qdict_armeb -#define qdict_get_qlist qdict_get_qlist_armeb -#define qdict_get_str qdict_get_str_armeb -#define qdict_get_try_bool qdict_get_try_bool_armeb -#define qdict_get_try_int qdict_get_try_int_armeb -#define qdict_get_try_str qdict_get_try_str_armeb -#define qdict_haskey qdict_haskey_armeb -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_armeb -#define qdict_iter qdict_iter_armeb -#define qdict_join qdict_join_armeb -#define qdict_new qdict_new_armeb -#define qdict_next qdict_next_armeb -#define qdict_next_entry qdict_next_entry_armeb -#define qdict_put_obj qdict_put_obj_armeb -#define qdict_size qdict_size_armeb -#define qdict_type qdict_type_armeb -#define qemu_clock_get_us qemu_clock_get_us_armeb -#define qemu_clock_ptr qemu_clock_ptr_armeb -#define qemu_clocks qemu_clocks_armeb -#define qemu_get_cpu qemu_get_cpu_armeb -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_armeb -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_armeb -#define qemu_get_ram_block qemu_get_ram_block_armeb -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_armeb -#define qemu_get_ram_fd qemu_get_ram_fd_armeb -#define qemu_get_ram_ptr qemu_get_ram_ptr_armeb -#define qemu_host_page_mask qemu_host_page_mask_armeb -#define qemu_host_page_size qemu_host_page_size_armeb -#define qemu_init_vcpu qemu_init_vcpu_armeb -#define qemu_ld_helpers qemu_ld_helpers_armeb -#define qemu_log_close qemu_log_close_armeb -#define qemu_log_enabled qemu_log_enabled_armeb -#define qemu_log_flush qemu_log_flush_armeb -#define qemu_loglevel_mask qemu_loglevel_mask_armeb -#define qemu_log_vprintf qemu_log_vprintf_armeb -#define qemu_oom_check qemu_oom_check_armeb -#define qemu_parse_fd qemu_parse_fd_armeb -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_armeb -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_armeb -#define qemu_ram_alloc qemu_ram_alloc_armeb -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_armeb -#define qemu_ram_foreach_block qemu_ram_foreach_block_armeb -#define qemu_ram_free qemu_ram_free_armeb -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_armeb -#define qemu_ram_ptr_length qemu_ram_ptr_length_armeb -#define qemu_ram_remap qemu_ram_remap_armeb -#define qemu_ram_setup_dump qemu_ram_setup_dump_armeb -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_armeb -#define qemu_real_host_page_size qemu_real_host_page_size_armeb -#define qemu_st_helpers qemu_st_helpers_armeb -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_armeb -#define qemu_try_memalign qemu_try_memalign_armeb -#define qentry_destroy qentry_destroy_armeb -#define qerror_human qerror_human_armeb -#define qerror_report qerror_report_armeb -#define qerror_report_err qerror_report_err_armeb -#define qfloat_destroy_obj qfloat_destroy_obj_armeb -#define qfloat_from_double qfloat_from_double_armeb -#define qfloat_get_double qfloat_get_double_armeb -#define qfloat_type qfloat_type_armeb -#define qint_destroy_obj qint_destroy_obj_armeb -#define qint_from_int qint_from_int_armeb -#define qint_get_int qint_get_int_armeb -#define qint_type qint_type_armeb -#define qlist_append_obj qlist_append_obj_armeb -#define qlist_copy qlist_copy_armeb -#define qlist_copy_elem qlist_copy_elem_armeb -#define qlist_destroy_obj qlist_destroy_obj_armeb -#define qlist_empty qlist_empty_armeb -#define qlist_entry_obj qlist_entry_obj_armeb -#define qlist_first qlist_first_armeb -#define qlist_iter qlist_iter_armeb -#define qlist_new qlist_new_armeb -#define qlist_next qlist_next_armeb -#define qlist_peek qlist_peek_armeb -#define qlist_pop qlist_pop_armeb -#define qlist_size qlist_size_armeb -#define qlist_size_iter qlist_size_iter_armeb -#define qlist_type qlist_type_armeb -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_armeb -#define qmp_input_end_list qmp_input_end_list_armeb -#define qmp_input_end_struct qmp_input_end_struct_armeb -#define qmp_input_get_next_type qmp_input_get_next_type_armeb -#define qmp_input_get_object qmp_input_get_object_armeb -#define qmp_input_get_visitor qmp_input_get_visitor_armeb -#define qmp_input_next_list qmp_input_next_list_armeb -#define qmp_input_optional qmp_input_optional_armeb -#define qmp_input_pop qmp_input_pop_armeb -#define qmp_input_push qmp_input_push_armeb -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_armeb -#define qmp_input_start_list qmp_input_start_list_armeb -#define qmp_input_start_struct qmp_input_start_struct_armeb -#define qmp_input_type_bool qmp_input_type_bool_armeb -#define qmp_input_type_int qmp_input_type_int_armeb -#define qmp_input_type_number qmp_input_type_number_armeb -#define qmp_input_type_str qmp_input_type_str_armeb -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_armeb -#define qmp_input_visitor_new qmp_input_visitor_new_armeb -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_armeb -#define qmp_output_add_obj qmp_output_add_obj_armeb -#define qmp_output_end_list qmp_output_end_list_armeb -#define qmp_output_end_struct qmp_output_end_struct_armeb -#define qmp_output_first qmp_output_first_armeb -#define qmp_output_get_qobject qmp_output_get_qobject_armeb -#define qmp_output_get_visitor qmp_output_get_visitor_armeb -#define qmp_output_last qmp_output_last_armeb -#define qmp_output_next_list qmp_output_next_list_armeb -#define qmp_output_pop qmp_output_pop_armeb -#define qmp_output_push_obj qmp_output_push_obj_armeb -#define qmp_output_start_list qmp_output_start_list_armeb -#define qmp_output_start_struct qmp_output_start_struct_armeb -#define qmp_output_type_bool qmp_output_type_bool_armeb -#define qmp_output_type_int qmp_output_type_int_armeb -#define qmp_output_type_number qmp_output_type_number_armeb -#define qmp_output_type_str qmp_output_type_str_armeb -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_armeb -#define qmp_output_visitor_new qmp_output_visitor_new_armeb -#define qobject_decref qobject_decref_armeb -#define qobject_to_qbool qobject_to_qbool_armeb -#define qobject_to_qdict qobject_to_qdict_armeb -#define qobject_to_qfloat qobject_to_qfloat_armeb -#define qobject_to_qint qobject_to_qint_armeb -#define qobject_to_qlist qobject_to_qlist_armeb -#define qobject_to_qstring qobject_to_qstring_armeb -#define qobject_type qobject_type_armeb -#define qstring_append qstring_append_armeb -#define qstring_append_chr qstring_append_chr_armeb -#define qstring_append_int qstring_append_int_armeb -#define qstring_destroy_obj qstring_destroy_obj_armeb -#define qstring_from_escaped_str qstring_from_escaped_str_armeb -#define qstring_from_str qstring_from_str_armeb -#define qstring_from_substr qstring_from_substr_armeb -#define qstring_get_length qstring_get_length_armeb -#define qstring_get_str qstring_get_str_armeb -#define qstring_new qstring_new_armeb -#define qstring_type qstring_type_armeb -#define ram_block_add ram_block_add_armeb -#define ram_size ram_size_armeb -#define range_compare range_compare_armeb -#define range_covers_byte range_covers_byte_armeb -#define range_get_last range_get_last_armeb -#define range_merge range_merge_armeb -#define ranges_can_merge ranges_can_merge_armeb -#define raw_read raw_read_armeb -#define raw_write raw_write_armeb -#define rcon rcon_armeb -#define read_raw_cp_reg read_raw_cp_reg_armeb -#define recip_estimate recip_estimate_armeb -#define recip_sqrt_estimate recip_sqrt_estimate_armeb -#define register_cp_regs_for_features register_cp_regs_for_features_armeb -#define register_multipage register_multipage_armeb -#define register_subpage register_subpage_armeb -#define register_tm_clones register_tm_clones_armeb -#define register_types_object register_types_object_armeb -#define regnames regnames_armeb -#define render_memory_region render_memory_region_armeb -#define reset_all_temps reset_all_temps_armeb -#define reset_temp reset_temp_armeb -#define rol32 rol32_armeb -#define rol64 rol64_armeb -#define ror32 ror32_armeb -#define ror64 ror64_armeb -#define roundAndPackFloat128 roundAndPackFloat128_armeb -#define roundAndPackFloat16 roundAndPackFloat16_armeb -#define roundAndPackFloat32 roundAndPackFloat32_armeb -#define roundAndPackFloat64 roundAndPackFloat64_armeb -#define roundAndPackFloatx80 roundAndPackFloatx80_armeb -#define roundAndPackInt32 roundAndPackInt32_armeb -#define roundAndPackInt64 roundAndPackInt64_armeb -#define roundAndPackUint64 roundAndPackUint64_armeb -#define round_to_inf round_to_inf_armeb -#define run_on_cpu run_on_cpu_armeb -#define s0 s0_armeb -#define S0 S0_armeb -#define s1 s1_armeb -#define S1 S1_armeb -#define sa1100_initfn sa1100_initfn_armeb -#define sa1110_initfn sa1110_initfn_armeb -#define save_globals save_globals_armeb -#define scr_write scr_write_armeb -#define sctlr_write sctlr_write_armeb -#define set_bit set_bit_armeb -#define set_bits set_bits_armeb -#define set_default_nan_mode set_default_nan_mode_armeb -#define set_feature set_feature_armeb -#define set_float_detect_tininess set_float_detect_tininess_armeb -#define set_float_exception_flags set_float_exception_flags_armeb -#define set_float_rounding_mode set_float_rounding_mode_armeb -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_armeb -#define set_flush_to_zero set_flush_to_zero_armeb -#define set_swi_errno set_swi_errno_armeb -#define sextract32 sextract32_armeb -#define sextract64 sextract64_armeb -#define shift128ExtraRightJamming shift128ExtraRightJamming_armeb -#define shift128Right shift128Right_armeb -#define shift128RightJamming shift128RightJamming_armeb -#define shift32RightJamming shift32RightJamming_armeb -#define shift64ExtraRightJamming shift64ExtraRightJamming_armeb -#define shift64RightJamming shift64RightJamming_armeb -#define shifter_out_im shifter_out_im_armeb -#define shortShift128Left shortShift128Left_armeb -#define shortShift192Left shortShift192Left_armeb -#define simple_mpu_ap_bits simple_mpu_ap_bits_armeb -#define size_code_gen_buffer size_code_gen_buffer_armeb -#define softmmu_lock_user softmmu_lock_user_armeb -#define softmmu_lock_user_string softmmu_lock_user_string_armeb -#define softmmu_tget32 softmmu_tget32_armeb -#define softmmu_tget8 softmmu_tget8_armeb -#define softmmu_tput32 softmmu_tput32_armeb -#define softmmu_unlock_user softmmu_unlock_user_armeb -#define sort_constraints sort_constraints_armeb -#define sp_el0_access sp_el0_access_armeb -#define spsel_read spsel_read_armeb -#define spsel_write spsel_write_armeb -#define start_list start_list_armeb -#define stb_p stb_p_armeb -#define stb_phys stb_phys_armeb -#define stl_be_p stl_be_p_armeb -#define stl_be_phys stl_be_phys_armeb -#define stl_he_p stl_he_p_armeb -#define stl_le_p stl_le_p_armeb -#define stl_le_phys stl_le_phys_armeb -#define stl_phys stl_phys_armeb -#define stl_phys_internal stl_phys_internal_armeb -#define stl_phys_notdirty stl_phys_notdirty_armeb -#define store_cpu_offset store_cpu_offset_armeb -#define store_reg store_reg_armeb -#define store_reg_bx store_reg_bx_armeb -#define store_reg_from_load store_reg_from_load_armeb -#define stq_be_p stq_be_p_armeb -#define stq_be_phys stq_be_phys_armeb -#define stq_he_p stq_he_p_armeb -#define stq_le_p stq_le_p_armeb -#define stq_le_phys stq_le_phys_armeb -#define stq_phys stq_phys_armeb -#define string_input_get_visitor string_input_get_visitor_armeb -#define string_input_visitor_cleanup string_input_visitor_cleanup_armeb -#define string_input_visitor_new string_input_visitor_new_armeb -#define strongarm_cp_reginfo strongarm_cp_reginfo_armeb -#define strstart strstart_armeb -#define strtosz strtosz_armeb -#define strtosz_suffix strtosz_suffix_armeb -#define stw_be_p stw_be_p_armeb -#define stw_be_phys stw_be_phys_armeb -#define stw_he_p stw_he_p_armeb -#define stw_le_p stw_le_p_armeb -#define stw_le_phys stw_le_phys_armeb -#define stw_phys stw_phys_armeb -#define stw_phys_internal stw_phys_internal_armeb -#define sub128 sub128_armeb -#define sub16_sat sub16_sat_armeb -#define sub16_usat sub16_usat_armeb -#define sub192 sub192_armeb -#define sub8_sat sub8_sat_armeb -#define sub8_usat sub8_usat_armeb -#define subFloat128Sigs subFloat128Sigs_armeb -#define subFloat32Sigs subFloat32Sigs_armeb -#define subFloat64Sigs subFloat64Sigs_armeb -#define subFloatx80Sigs subFloatx80Sigs_armeb -#define subpage_accepts subpage_accepts_armeb -#define subpage_init subpage_init_armeb -#define subpage_ops subpage_ops_armeb -#define subpage_read subpage_read_armeb -#define subpage_register subpage_register_armeb -#define subpage_write subpage_write_armeb -#define suffix_mul suffix_mul_armeb -#define swap_commutative swap_commutative_armeb -#define swap_commutative2 swap_commutative2_armeb -#define switch_mode switch_mode_armeb -#define switch_v7m_sp switch_v7m_sp_armeb -#define syn_aa32_bkpt syn_aa32_bkpt_armeb -#define syn_aa32_hvc syn_aa32_hvc_armeb -#define syn_aa32_smc syn_aa32_smc_armeb -#define syn_aa32_svc syn_aa32_svc_armeb -#define syn_breakpoint syn_breakpoint_armeb -#define sync_globals sync_globals_armeb -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_armeb -#define syn_cp14_rt_trap syn_cp14_rt_trap_armeb -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_armeb -#define syn_cp15_rt_trap syn_cp15_rt_trap_armeb -#define syn_data_abort syn_data_abort_armeb -#define syn_fp_access_trap syn_fp_access_trap_armeb -#define syn_insn_abort syn_insn_abort_armeb -#define syn_swstep syn_swstep_armeb -#define syn_uncategorized syn_uncategorized_armeb -#define syn_watchpoint syn_watchpoint_armeb -#define syscall_err syscall_err_armeb -#define system_bus_class_init system_bus_class_init_armeb -#define system_bus_info system_bus_info_armeb -#define t2ee_cp_reginfo t2ee_cp_reginfo_armeb -#define table_logic_cc table_logic_cc_armeb -#define target_parse_constraint target_parse_constraint_armeb -#define target_words_bigendian target_words_bigendian_armeb -#define tb_add_jump tb_add_jump_armeb -#define tb_alloc tb_alloc_armeb -#define tb_alloc_page tb_alloc_page_armeb -#define tb_check_watchpoint tb_check_watchpoint_armeb -#define tb_find_fast tb_find_fast_armeb -#define tb_find_pc tb_find_pc_armeb -#define tb_find_slow tb_find_slow_armeb -#define tb_flush tb_flush_armeb -#define tb_flush_jmp_cache tb_flush_jmp_cache_armeb -#define tb_free tb_free_armeb -#define tb_gen_code tb_gen_code_armeb -#define tb_hash_remove tb_hash_remove_armeb -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_armeb -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_armeb -#define tb_invalidate_phys_range tb_invalidate_phys_range_armeb -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_armeb -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_armeb -#define tb_jmp_remove tb_jmp_remove_armeb -#define tb_link_page tb_link_page_armeb -#define tb_page_remove tb_page_remove_armeb -#define tb_phys_hash_func tb_phys_hash_func_armeb -#define tb_phys_invalidate tb_phys_invalidate_armeb -#define tb_reset_jump tb_reset_jump_armeb -#define tb_set_jmp_target tb_set_jmp_target_armeb -#define tcg_accel_class_init tcg_accel_class_init_armeb -#define tcg_accel_type tcg_accel_type_armeb -#define tcg_add_param_i32 tcg_add_param_i32_armeb -#define tcg_add_param_i64 tcg_add_param_i64_armeb -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_armeb -#define tcg_allowed tcg_allowed_armeb -#define tcg_canonicalize_memop tcg_canonicalize_memop_armeb -#define tcg_commit tcg_commit_armeb -#define tcg_cond_to_jcc tcg_cond_to_jcc_armeb -#define tcg_constant_folding tcg_constant_folding_armeb -#define tcg_const_i32 tcg_const_i32_armeb -#define tcg_const_i64 tcg_const_i64_armeb -#define tcg_const_local_i32 tcg_const_local_i32_armeb -#define tcg_const_local_i64 tcg_const_local_i64_armeb -#define tcg_context_init tcg_context_init_armeb -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_armeb -#define tcg_cpu_exec tcg_cpu_exec_armeb -#define tcg_current_code_size tcg_current_code_size_armeb -#define tcg_dump_info tcg_dump_info_armeb -#define tcg_dump_ops tcg_dump_ops_armeb -#define tcg_exec_all tcg_exec_all_armeb -#define tcg_find_helper tcg_find_helper_armeb -#define tcg_func_start tcg_func_start_armeb -#define tcg_gen_abs_i32 tcg_gen_abs_i32_armeb -#define tcg_gen_add2_i32 tcg_gen_add2_i32_armeb -#define tcg_gen_add_i32 tcg_gen_add_i32_armeb -#define tcg_gen_add_i64 tcg_gen_add_i64_armeb -#define tcg_gen_addi_i32 tcg_gen_addi_i32_armeb -#define tcg_gen_addi_i64 tcg_gen_addi_i64_armeb -#define tcg_gen_andc_i32 tcg_gen_andc_i32_armeb -#define tcg_gen_and_i32 tcg_gen_and_i32_armeb -#define tcg_gen_and_i64 tcg_gen_and_i64_armeb -#define tcg_gen_andi_i32 tcg_gen_andi_i32_armeb -#define tcg_gen_andi_i64 tcg_gen_andi_i64_armeb -#define tcg_gen_br tcg_gen_br_armeb -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_armeb -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_armeb -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_armeb -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_armeb -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_armeb -#define tcg_gen_callN tcg_gen_callN_armeb -#define tcg_gen_code tcg_gen_code_armeb -#define tcg_gen_code_common tcg_gen_code_common_armeb -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_armeb -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_armeb -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_armeb -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_armeb -#define tcg_gen_exit_tb tcg_gen_exit_tb_armeb -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_armeb -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_armeb -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_armeb -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_armeb -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_armeb -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_armeb -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_armeb -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_armeb -#define tcg_gen_goto_tb tcg_gen_goto_tb_armeb -#define tcg_gen_ld_i32 tcg_gen_ld_i32_armeb -#define tcg_gen_ld_i64 tcg_gen_ld_i64_armeb -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_armeb -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_armeb -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_armeb -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_armeb -#define tcg_gen_mov_i32 tcg_gen_mov_i32_armeb -#define tcg_gen_mov_i64 tcg_gen_mov_i64_armeb -#define tcg_gen_movi_i32 tcg_gen_movi_i32_armeb -#define tcg_gen_movi_i64 tcg_gen_movi_i64_armeb -#define tcg_gen_mul_i32 tcg_gen_mul_i32_armeb -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_armeb -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_armeb -#define tcg_gen_neg_i32 tcg_gen_neg_i32_armeb -#define tcg_gen_neg_i64 tcg_gen_neg_i64_armeb -#define tcg_gen_not_i32 tcg_gen_not_i32_armeb -#define tcg_gen_op0 tcg_gen_op0_armeb -#define tcg_gen_op1i tcg_gen_op1i_armeb -#define tcg_gen_op2_i32 tcg_gen_op2_i32_armeb -#define tcg_gen_op2_i64 tcg_gen_op2_i64_armeb -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_armeb -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_armeb -#define tcg_gen_op3_i32 tcg_gen_op3_i32_armeb -#define tcg_gen_op3_i64 tcg_gen_op3_i64_armeb -#define tcg_gen_op4_i32 tcg_gen_op4_i32_armeb -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_armeb -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_armeb -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_armeb -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_armeb -#define tcg_gen_op6_i32 tcg_gen_op6_i32_armeb -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_armeb -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_armeb -#define tcg_gen_orc_i32 tcg_gen_orc_i32_armeb -#define tcg_gen_or_i32 tcg_gen_or_i32_armeb -#define tcg_gen_or_i64 tcg_gen_or_i64_armeb -#define tcg_gen_ori_i32 tcg_gen_ori_i32_armeb -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_armeb -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_armeb -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_armeb -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_armeb -#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_armeb -#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_armeb -#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_armeb -#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_armeb -#define tcg_gen_sar_i32 tcg_gen_sar_i32_armeb -#define tcg_gen_sari_i32 tcg_gen_sari_i32_armeb -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_armeb -#define tcg_gen_shl_i32 tcg_gen_shl_i32_armeb -#define tcg_gen_shl_i64 tcg_gen_shl_i64_armeb -#define tcg_gen_shli_i32 tcg_gen_shli_i32_armeb -#define tcg_gen_shli_i64 tcg_gen_shli_i64_armeb -#define tcg_gen_shr_i32 tcg_gen_shr_i32_armeb -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_armeb -#define tcg_gen_shr_i64 tcg_gen_shr_i64_armeb -#define tcg_gen_shri_i32 tcg_gen_shri_i32_armeb -#define tcg_gen_shri_i64 tcg_gen_shri_i64_armeb -#define tcg_gen_st_i32 tcg_gen_st_i32_armeb -#define tcg_gen_st_i64 tcg_gen_st_i64_armeb -#define tcg_gen_sub_i32 tcg_gen_sub_i32_armeb -#define tcg_gen_sub_i64 tcg_gen_sub_i64_armeb -#define tcg_gen_subi_i32 tcg_gen_subi_i32_armeb -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_armeb -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_armeb -#define tcg_gen_xor_i32 tcg_gen_xor_i32_armeb -#define tcg_gen_xor_i64 tcg_gen_xor_i64_armeb -#define tcg_gen_xori_i32 tcg_gen_xori_i32_armeb -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_armeb -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_armeb -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_armeb -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_armeb -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_armeb -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_armeb -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_armeb -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_armeb -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_armeb -#define tcg_handle_interrupt tcg_handle_interrupt_armeb -#define tcg_init tcg_init_armeb -#define tcg_invert_cond tcg_invert_cond_armeb -#define tcg_la_bb_end tcg_la_bb_end_armeb -#define tcg_la_br_end tcg_la_br_end_armeb -#define tcg_la_func_end tcg_la_func_end_armeb -#define tcg_liveness_analysis tcg_liveness_analysis_armeb -#define tcg_malloc tcg_malloc_armeb -#define tcg_malloc_internal tcg_malloc_internal_armeb -#define tcg_op_defs_org tcg_op_defs_org_armeb -#define tcg_opt_gen_mov tcg_opt_gen_mov_armeb -#define tcg_opt_gen_movi tcg_opt_gen_movi_armeb -#define tcg_optimize tcg_optimize_armeb -#define tcg_out16 tcg_out16_armeb -#define tcg_out32 tcg_out32_armeb -#define tcg_out64 tcg_out64_armeb -#define tcg_out8 tcg_out8_armeb -#define tcg_out_addi tcg_out_addi_armeb -#define tcg_out_branch tcg_out_branch_armeb -#define tcg_out_brcond32 tcg_out_brcond32_armeb -#define tcg_out_brcond64 tcg_out_brcond64_armeb -#define tcg_out_bswap32 tcg_out_bswap32_armeb -#define tcg_out_bswap64 tcg_out_bswap64_armeb -#define tcg_out_call tcg_out_call_armeb -#define tcg_out_cmp tcg_out_cmp_armeb -#define tcg_out_ext16s tcg_out_ext16s_armeb -#define tcg_out_ext16u tcg_out_ext16u_armeb -#define tcg_out_ext32s tcg_out_ext32s_armeb -#define tcg_out_ext32u tcg_out_ext32u_armeb -#define tcg_out_ext8s tcg_out_ext8s_armeb -#define tcg_out_ext8u tcg_out_ext8u_armeb -#define tcg_out_jmp tcg_out_jmp_armeb -#define tcg_out_jxx tcg_out_jxx_armeb -#define tcg_out_label tcg_out_label_armeb -#define tcg_out_ld tcg_out_ld_armeb -#define tcg_out_modrm tcg_out_modrm_armeb -#define tcg_out_modrm_offset tcg_out_modrm_offset_armeb -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_armeb -#define tcg_out_mov tcg_out_mov_armeb -#define tcg_out_movcond32 tcg_out_movcond32_armeb -#define tcg_out_movcond64 tcg_out_movcond64_armeb -#define tcg_out_movi tcg_out_movi_armeb -#define tcg_out_op tcg_out_op_armeb -#define tcg_out_pop tcg_out_pop_armeb -#define tcg_out_push tcg_out_push_armeb -#define tcg_out_qemu_ld tcg_out_qemu_ld_armeb -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_armeb -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_armeb -#define tcg_out_qemu_st tcg_out_qemu_st_armeb -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_armeb -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_armeb -#define tcg_out_reloc tcg_out_reloc_armeb -#define tcg_out_rolw_8 tcg_out_rolw_8_armeb -#define tcg_out_setcond32 tcg_out_setcond32_armeb -#define tcg_out_setcond64 tcg_out_setcond64_armeb -#define tcg_out_shifti tcg_out_shifti_armeb -#define tcg_out_st tcg_out_st_armeb -#define tcg_out_tb_finalize tcg_out_tb_finalize_armeb -#define tcg_out_tb_init tcg_out_tb_init_armeb -#define tcg_out_tlb_load tcg_out_tlb_load_armeb -#define tcg_out_vex_modrm tcg_out_vex_modrm_armeb -#define tcg_patch32 tcg_patch32_armeb -#define tcg_patch8 tcg_patch8_armeb -#define tcg_pcrel_diff tcg_pcrel_diff_armeb -#define tcg_pool_reset tcg_pool_reset_armeb -#define tcg_prologue_init tcg_prologue_init_armeb -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_armeb -#define tcg_reg_alloc tcg_reg_alloc_armeb -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_armeb -#define tcg_reg_alloc_call tcg_reg_alloc_call_armeb -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_armeb -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_armeb -#define tcg_reg_alloc_op tcg_reg_alloc_op_armeb -#define tcg_reg_alloc_start tcg_reg_alloc_start_armeb -#define tcg_reg_free tcg_reg_free_armeb -#define tcg_reg_sync tcg_reg_sync_armeb -#define tcg_set_frame tcg_set_frame_armeb -#define tcg_set_nop tcg_set_nop_armeb -#define tcg_swap_cond tcg_swap_cond_armeb -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_armeb -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_armeb -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_armeb -#define tcg_target_const_match tcg_target_const_match_armeb -#define tcg_target_init tcg_target_init_armeb -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_armeb -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_armeb -#define tcg_temp_alloc tcg_temp_alloc_armeb -#define tcg_temp_free_i32 tcg_temp_free_i32_armeb -#define tcg_temp_free_i64 tcg_temp_free_i64_armeb -#define tcg_temp_free_internal tcg_temp_free_internal_armeb -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_armeb -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_armeb -#define tcg_temp_new_i32 tcg_temp_new_i32_armeb -#define tcg_temp_new_i64 tcg_temp_new_i64_armeb -#define tcg_temp_new_internal tcg_temp_new_internal_armeb -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_armeb -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_armeb -#define tdb_hash tdb_hash_armeb -#define teecr_write teecr_write_armeb -#define teehbr_access teehbr_access_armeb -#define temp_allocate_frame temp_allocate_frame_armeb -#define temp_dead temp_dead_armeb -#define temps_are_copies temps_are_copies_armeb -#define temp_save temp_save_armeb -#define temp_sync temp_sync_armeb -#define tgen_arithi tgen_arithi_armeb -#define tgen_arithr tgen_arithr_armeb -#define thumb2_logic_op thumb2_logic_op_armeb -#define ti925t_initfn ti925t_initfn_armeb -#define tlb_add_large_page tlb_add_large_page_armeb -#define tlb_flush_entry tlb_flush_entry_armeb -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_armeb -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_armeb -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_armeb -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_armeb -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_armeb -#define tlbi_aa64_va_write tlbi_aa64_va_write_armeb -#define tlbiall_is_write tlbiall_is_write_armeb -#define tlbiall_write tlbiall_write_armeb -#define tlbiasid_is_write tlbiasid_is_write_armeb -#define tlbiasid_write tlbiasid_write_armeb -#define tlbimvaa_is_write tlbimvaa_is_write_armeb -#define tlbimvaa_write tlbimvaa_write_armeb -#define tlbimva_is_write tlbimva_is_write_armeb -#define tlbimva_write tlbimva_write_armeb -#define tlb_is_dirty_ram tlb_is_dirty_ram_armeb -#define tlb_protect_code tlb_protect_code_armeb -#define tlb_reset_dirty_range tlb_reset_dirty_range_armeb -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_armeb -#define tlb_set_dirty tlb_set_dirty_armeb -#define tlb_set_dirty1 tlb_set_dirty1_armeb -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_armeb -#define tlb_vaddr_to_host tlb_vaddr_to_host_armeb -#define token_get_type token_get_type_armeb -#define token_get_value token_get_value_armeb -#define token_is_escape token_is_escape_armeb -#define token_is_keyword token_is_keyword_armeb -#define token_is_operator token_is_operator_armeb -#define tokens_append_from_iter tokens_append_from_iter_armeb -#define to_qiv to_qiv_armeb -#define to_qov to_qov_armeb -#define tosa_init tosa_init_armeb -#define tosa_machine_init tosa_machine_init_armeb -#define tswap32 tswap32_armeb -#define tswap64 tswap64_armeb -#define type_class_get_size type_class_get_size_armeb -#define type_get_by_name type_get_by_name_armeb -#define type_get_parent type_get_parent_armeb -#define type_has_parent type_has_parent_armeb -#define type_initialize type_initialize_armeb -#define type_initialize_interface type_initialize_interface_armeb -#define type_is_ancestor type_is_ancestor_armeb -#define type_new type_new_armeb -#define type_object_get_size type_object_get_size_armeb -#define type_register_internal type_register_internal_armeb -#define type_table_add type_table_add_armeb -#define type_table_get type_table_get_armeb -#define type_table_lookup type_table_lookup_armeb -#define uint16_to_float32 uint16_to_float32_armeb -#define uint16_to_float64 uint16_to_float64_armeb -#define uint32_to_float32 uint32_to_float32_armeb -#define uint32_to_float64 uint32_to_float64_armeb -#define uint64_to_float128 uint64_to_float128_armeb -#define uint64_to_float32 uint64_to_float32_armeb -#define uint64_to_float64 uint64_to_float64_armeb -#define unassigned_io_ops unassigned_io_ops_armeb -#define unassigned_io_read unassigned_io_read_armeb -#define unassigned_io_write unassigned_io_write_armeb -#define unassigned_mem_accepts unassigned_mem_accepts_armeb -#define unassigned_mem_ops unassigned_mem_ops_armeb -#define unassigned_mem_read unassigned_mem_read_armeb -#define unassigned_mem_write unassigned_mem_write_armeb -#define update_spsel update_spsel_armeb -#define v6_cp_reginfo v6_cp_reginfo_armeb -#define v6k_cp_reginfo v6k_cp_reginfo_armeb -#define v7_cp_reginfo v7_cp_reginfo_armeb -#define v7mp_cp_reginfo v7mp_cp_reginfo_armeb -#define v7m_pop v7m_pop_armeb -#define v7m_push v7m_push_armeb -#define v8_cp_reginfo v8_cp_reginfo_armeb -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_armeb -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_armeb -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_armeb -#define vapa_cp_reginfo vapa_cp_reginfo_armeb -#define vbar_write vbar_write_armeb -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_armeb -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_armeb -#define vfp_get_fpcr vfp_get_fpcr_armeb -#define vfp_get_fpscr vfp_get_fpscr_armeb -#define vfp_get_fpsr vfp_get_fpsr_armeb -#define vfp_reg_offset vfp_reg_offset_armeb -#define vfp_set_fpcr vfp_set_fpcr_armeb -#define vfp_set_fpscr vfp_set_fpscr_armeb -#define vfp_set_fpsr vfp_set_fpsr_armeb -#define visit_end_implicit_struct visit_end_implicit_struct_armeb -#define visit_end_list visit_end_list_armeb -#define visit_end_struct visit_end_struct_armeb -#define visit_end_union visit_end_union_armeb -#define visit_get_next_type visit_get_next_type_armeb -#define visit_next_list visit_next_list_armeb -#define visit_optional visit_optional_armeb -#define visit_start_implicit_struct visit_start_implicit_struct_armeb -#define visit_start_list visit_start_list_armeb -#define visit_start_struct visit_start_struct_armeb -#define visit_start_union visit_start_union_armeb -#define vmsa_cp_reginfo vmsa_cp_reginfo_armeb -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_armeb -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_armeb -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_armeb -#define vmsa_ttbcr_write vmsa_ttbcr_write_armeb -#define vmsa_ttbr_write vmsa_ttbr_write_armeb -#define write_cpustate_to_list write_cpustate_to_list_armeb -#define write_list_to_cpustate write_list_to_cpustate_armeb -#define write_raw_cp_reg write_raw_cp_reg_armeb -#define X86CPURegister32_lookup X86CPURegister32_lookup_armeb -#define x86_op_defs x86_op_defs_armeb -#define xpsr_read xpsr_read_armeb -#define xpsr_write xpsr_write_armeb -#define xscale_cpar_write xscale_cpar_write_armeb -#define xscale_cp_reginfo xscale_cp_reginfo_armeb -#define ARM_REGS_STORAGE_SIZE ARM_REGS_STORAGE_SIZE_armeb +#define helper_vfp_touqs helper_vfp_touqs_armeb +#define helper_vfp_sltoh helper_vfp_sltoh_armeb +#define helper_vfp_ultoh helper_vfp_ultoh_armeb +#define helper_vfp_sqtoh helper_vfp_sqtoh_armeb +#define helper_vfp_uqtoh helper_vfp_uqtoh_armeb +#define helper_vfp_toshh helper_vfp_toshh_armeb +#define helper_vfp_touhh helper_vfp_touhh_armeb +#define helper_vfp_toslh helper_vfp_toslh_armeb +#define helper_vfp_toulh helper_vfp_toulh_armeb +#define helper_vfp_tosqh helper_vfp_tosqh_armeb +#define helper_vfp_touqh helper_vfp_touqh_armeb +#define helper_set_rmode helper_set_rmode_armeb +#define helper_set_neon_rmode helper_set_neon_rmode_armeb +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_armeb +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_armeb +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_armeb +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_armeb +#define helper_recps_f32 helper_recps_f32_armeb +#define helper_rsqrts_f32 helper_rsqrts_f32_armeb +#define helper_recpe_f16 helper_recpe_f16_armeb +#define helper_recpe_f32 helper_recpe_f32_armeb +#define helper_recpe_f64 helper_recpe_f64_armeb +#define helper_rsqrte_f16 helper_rsqrte_f16_armeb +#define helper_rsqrte_f32 helper_rsqrte_f32_armeb +#define helper_rsqrte_f64 helper_rsqrte_f64_armeb +#define helper_recpe_u32 helper_recpe_u32_armeb +#define helper_rsqrte_u32 helper_rsqrte_u32_armeb +#define helper_vfp_muladds helper_vfp_muladds_armeb +#define helper_vfp_muladdd helper_vfp_muladdd_armeb +#define helper_rints_exact helper_rints_exact_armeb +#define helper_rintd_exact helper_rintd_exact_armeb +#define helper_rints helper_rints_armeb +#define helper_rintd helper_rintd_armeb +#define arm_rmode_to_sf arm_rmode_to_sf_armeb +#define helper_fjcvtzs helper_fjcvtzs_armeb +#define helper_vjcvt helper_vjcvt_armeb +#define helper_frint32_s helper_frint32_s_armeb +#define helper_frint64_s helper_frint64_s_armeb +#define helper_frint32_d helper_frint32_d_armeb +#define helper_frint64_d helper_frint64_d_armeb +#define helper_check_hcr_el2_trap helper_check_hcr_el2_trap_armeb +#define arm_reg_reset arm_reg_reset_armeb +#define arm_reg_read arm_reg_read_armeb +#define arm_reg_write arm_reg_write_armeb +#define mla_op mla_op_armeb +#define mls_op mls_op_armeb +#define sshl_op sshl_op_armeb +#define ushl_op ushl_op_armeb +#define uqsub_op uqsub_op_armeb +#define sqsub_op sqsub_op_armeb +#define uqadd_op uqadd_op_armeb +#define sqadd_op sqadd_op_armeb +#define sli_op sli_op_armeb +#define cmtst_op cmtst_op_armeb +#define sri_op sri_op_armeb +#define usra_op usra_op_armeb +#define ssra_op ssra_op_armeb #endif diff --git a/qemu/configure b/qemu/configure index b5d52d76..851530c0 100755 --- a/qemu/configure +++ b/qemu/configure @@ -8,6 +8,9 @@ CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS +# Don't allow CCACHE, if present, to use cached results of compile tests! +export CCACHE_RECACHE=yes + # Temporary directory used for files created while # configure runs. Since it is in the build directory # we can safely blow away any previous version of it @@ -24,7 +27,10 @@ fi TMPB="qemu-conf" TMPC="${TMPDIR1}/${TMPB}.c" TMPO="${TMPDIR1}/${TMPB}.o" +TMPCXX="${TMPDIR1}/${TMPB}.cxx" TMPE="${TMPDIR1}/${TMPB}.exe" +TMPMO="${TMPDIR1}/${TMPB}.mo" +TMPTXT="${TMPDIR1}/${TMPB}.txt" rm -f config.log @@ -35,14 +41,18 @@ printf " '%s'" "$0" "$@" >> config.log echo >> config.log echo "#" >> config.log -error_exit() { - echo +print_error() { + (echo echo "ERROR: $1" while test -n "$2"; do echo " $2" shift done - echo + echo) >&2 +} + +error_exit() { + print_error "$@" exit 1 } @@ -51,6 +61,11 @@ do_compiler() { # is compiler binary to execute. local compiler="$1" shift + if test -n "$BASH_VERSION"; then eval ' + echo >>config.log " +funcs: ${FUNCNAME[*]} +lines: ${BASH_LINENO[*]}" + '; fi echo $compiler "$@" >> config.log $compiler "$@" >> config.log 2>&1 || return $? # Test passed. If this is an --enable-werror build, rerun @@ -78,14 +93,40 @@ do_cc() { do_compiler "$cc" "$@" } +do_cxx() { + do_compiler "$cxx" "$@" +} + +update_cxxflags() { + # Set QEMU_CXXFLAGS from QEMU_CFLAGS by filtering out those + # options which some versions of GCC's C++ compiler complain about + # because they only make sense for C programs. + QEMU_CXXFLAGS="$QEMU_CXXFLAGS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS" + + for arg in $QEMU_CFLAGS; do + case $arg in + -Wstrict-prototypes|-Wmissing-prototypes|-Wnested-externs|\ + -Wold-style-declaration|-Wold-style-definition|-Wredundant-decls) + ;; + -std=gnu99) + QEMU_CXXFLAGS=${QEMU_CXXFLAGS:+$QEMU_CXXFLAGS }"-std=gnu++98" + ;; + *) + QEMU_CXXFLAGS=${QEMU_CXXFLAGS:+$QEMU_CXXFLAGS }$arg + ;; + esac + done +} + compile_object() { - do_cc $QEMU_CFLAGS -c -o $TMPO $TMPC + local_cflags="$1" + do_cc $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC } compile_prog() { local_cflags="$1" local_ldflags="$2" - do_cc $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC $LDFLAGS $local_ldflags + do_cc $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC $QEMU_LDFLAGS $local_ldflags } # symbolically link $1 to $2. Portable version of "ln -sf". @@ -131,18 +172,58 @@ path_of() { return 1 } +glob() { + eval test -z '"${1#'"$2"'}"' +} + +supported_target() { + case "$1" in + *-softmmu) + ;; + *) + print_error "Invalid target name '$target'" + return 1 + ;; + esac + test "$tcg" = "yes" && return 0 + print_error "TCG disabled, but hardware accelerator not available for '$target'" + return 1 +} + + +ld_has() { + $ld --help 2>/dev/null | grep ".$1" >/dev/null 2>&1 +} + +# make source path absolute +source_path=$(cd "$(dirname -- "$0")"; pwd) + +if printf %s\\n "$source_path" "$PWD" | grep -q "[[:space:]:]"; +then + error_exit "main directory cannot contain spaces nor colons" +fi + # default parameters -source_path=`dirname "$0"` cpu="" +iasl="iasl" +interp_prefix="/usr/gnemul/qemu-%M" static="no" cross_prefix="" host_cc="cc" -cc_i386=i386-pc-linux-gnu-gcc +libs_cpu="" +libs_softmmu="" +libs_tools="" debug_info="yes" stack_protector="" +git_update=no +git_submodules="" + +git="git" + # Don't accept a target_list environment variable. unset target_list +unset target_list_exclude # Default value for a variable defining feature "foo". # * foo="no" feature will only be used if --enable-foo arg is given @@ -156,41 +237,77 @@ unset target_list # Distributions want to ensure that several features are compiled in, and it # is impossible without a --enable-foo that exits if a feature is not found. -debug_tcg="no" +tcg="yes" +membarrier="" debug="no" +sanitizers="no" strip_opt="yes" bigendian="no" mingw32="no" EXESUF="" DSOSUF=".so" LDFLAGS_SHARED="-shared" +prefix="/usr/local" +bindir="\${prefix}/bin" +libdir="\${prefix}/lib" +libexecdir="\${prefix}/libexec" +includedir="\${prefix}/include" +sysconfdir="\${prefix}/etc" +local_statedir="\${prefix}/var" +confsuffix="/qemu" bsd="no" linux="no" solaris="no" softmmu="yes" -aix="no" +pkgversion="" pie="" +cpuid_h="no" +avx2_opt="" +debug_stack_usage="no" +gtk_gl="no" +tcmalloc="no" +jemalloc="no" + +supported_cpu="no" +supported_os="no" +bogus_os="no" +malloc_trim="" # parse CC options first for opt do - optarg=`expr "x$opt" : 'x[^=]*=\(.*\)'` + optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') case "$opt" in + --cross-prefix=*) cross_prefix="$optarg" + ;; --cc=*) CC="$optarg" ;; - --source-path=*) source_path="$optarg" + --cxx=*) CXX="$optarg" ;; --cpu=*) cpu="$optarg" ;; - --extra-cflags=*) QEMU_CFLAGS="$optarg $QEMU_CFLAGS" - EXTRA_CFLAGS="$optarg" + --extra-cflags=*) QEMU_CFLAGS="$QEMU_CFLAGS $optarg" + QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" ;; - --extra-ldflags=*) LDFLAGS="$optarg $LDFLAGS" + --extra-cxxflags=*) QEMU_CXXFLAGS="$QEMU_CXXFLAGS $optarg" + ;; + --extra-ldflags=*) QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" EXTRA_LDFLAGS="$optarg" ;; --enable-debug-info) debug_info="yes" ;; --disable-debug-info) debug_info="no" ;; + --cross-cc-*[!a-zA-Z0-9_-]*=*) error_exit "Passed bad --cross-cc-FOO option" + ;; + --cross-cc-cflags-*) cc_arch=${opt#--cross-cc-flags-}; cc_arch=${cc_arch%%=*} + eval "cross_cc_cflags_${cc_arch}=\$optarg" + cross_cc_vars="$cross_cc_vars cross_cc_cflags_${cc_arch}" + ;; + --cross-cc-*) cc_arch=${opt#--cross-cc-}; cc_arch=${cc_arch%%=*} + cc_archs="$cc_archs $cc_arch" + eval "cross_cc_${cc_arch}=\$optarg" + cross_cc_vars="$cross_cc_vars cross_cc_${cc_arch}" + ;; esac done # OS specific @@ -207,34 +324,45 @@ else cc="${CC-${cross_prefix}gcc}" fi +if test -z "${CXX}${cross_prefix}"; then + cxx="c++" +else + cxx="${CXX-${cross_prefix}g++}" +fi + ar="${AR-${cross_prefix}ar}" as="${AS-${cross_prefix}as}" +ccas="${CCAS-$cc}" cpp="${CPP-$cc -E}" objcopy="${OBJCOPY-${cross_prefix}objcopy}" ld="${LD-${cross_prefix}ld}" +ranlib="${RANLIB-${cross_prefix}ranlib}" nm="${NM-${cross_prefix}nm}" strip="${STRIP-${cross_prefix}strip}" +pkg_config_exe="${PKG_CONFIG-${cross_prefix}pkg-config}" +query_pkg_config() { + "${pkg_config_exe}" ${QEMU_PKG_CONFIG_FLAGS} "$@" +} +pkg_config=query_pkg_config # If the user hasn't specified ARFLAGS, default to 'rv', just as make does. ARFLAGS="${ARFLAGS-rv}" # default flags for all hosts -QEMU_CFLAGS="-fno-strict-aliasing -fno-common $QEMU_CFLAGS" +# We use -fwrapv to tell the compiler that we require a C dialect where +# left shift of signed integers is well defined and has the expected +# 2s-complement style results. (Both clang and gcc agree that it +# provides these semantics.) +QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv -std=gnu99 $QEMU_CFLAGS" QEMU_CFLAGS="-Wall -Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS" QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" -QEMU_INCLUDES="-I. -I\$(SRC_PATH) -I\$(SRC_PATH)/include" +QEMU_INCLUDES="-iquote . -iquote \$(SRC_PATH) -iquote \$(SRC_PATH)/accel/tcg -iquote \$(SRC_PATH)/include" +QEMU_INCLUDES="$QEMU_INCLUDES -iquote \$(SRC_PATH)/disas/libvixl" if test "$debug_info" = "yes"; then CFLAGS="-g $CFLAGS" - LDFLAGS="-g $LDFLAGS" -else - CFLAGS="-O3 $CFLAGS" - LDFLAGS="-O3 $LDFLAGS" fi -# make source path absolute -source_path=`cd "$source_path"; pwd` - # running configure in the source tree? # we know that's the case if configure is there. if test -f "./configure"; then @@ -253,6 +381,20 @@ EOF compile_object } +check_include() { +cat > $TMPC < +int main(void) { return 0; } +EOF + compile_object +} + +write_c_skeleton() { + cat > $TMPC < $TMPC << EOF -int main(void) { return 0; } -EOF + # MinGW needs -mthreads for TLS and macro _MT. + QEMU_CFLAGS="-mthreads $QEMU_CFLAGS" + LIBS="-lwinmm -lws2_32 $LIBS" + write_c_skeleton; if compile_prog "" "-liberty" ; then LIBS="-liberty $LIBS" fi + prefix="c:/Program Files/QEMU" + bindir="\${prefix}" + sysconfdir="\${prefix}" + local_statedir= + confsuffix="" fi werror="" for opt do - optarg=`expr "x$opt" : 'x[^=]*=\(.*\)'` + optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') case "$opt" in --help|-h) show_help=yes ;; --version|-V) exec cat $source_path/VERSION ;; - --source-path=*) + --prefix=*) prefix="$optarg" + ;; + --interp-prefix=*) interp_prefix="$optarg" + ;; + --cross-prefix=*) ;; --cc=*) ;; --host-cc=*) host_cc="$optarg" ;; + --cxx=*) + ;; + --iasl=*) iasl="$optarg" + ;; --objcc=*) objcc="$optarg" ;; --make=*) make="$optarg" ;; + --install=*) install="$optarg" + ;; --extra-cflags=*) ;; + --extra-cxxflags=*) + ;; --extra-ldflags=*) ;; --enable-debug-info) ;; --disable-debug-info) ;; + --cross-cc-*) + ;; --cpu=*) ;; --target-list=*) target_list="$optarg" + if test "$target_list_exclude"; then + error_exit "Can't mix --target-list with --target-list-exclude" + fi + ;; + --target-list-exclude=*) target_list_exclude="$optarg" + if test "$target_list"; then + error_exit "Can't mix --target-list-exclude with --target-list" + fi ;; --static) static="yes" - LDFLAGS="-static $LDFLAGS" + QEMU_PKG_CONFIG_FLAGS="--static $QEMU_PKG_CONFIG_FLAGS" ;; - --enable-debug-tcg) debug_tcg="yes" + --bindir=*) bindir="$optarg" ;; - --disable-debug-tcg) debug_tcg="no" + --libdir=*) libdir="$optarg" + ;; + --libexecdir=*) libexecdir="$optarg" + ;; + --includedir=*) includedir="$optarg" + ;; + --with-confsuffix=*) confsuffix="$optarg" + ;; + --sysconfdir=*) sysconfdir="$optarg" + ;; + --localstatedir=*) local_statedir="$optarg" + ;; + --host=*|--build=*|\ + --disable-dependency-tracking|\ + --sbindir=*|--sharedstatedir=*|\ + --oldincludedir=*|--datarootdir=*|--infodir=*|--localedir=*|\ + --htmldir=*|--dvidir=*|--pdfdir=*|--psdir=*) + # These switches are silently ignored, for compatibility with + # autoconf-generated configure scripts. This allows QEMU's + # configure to be used by RPM and similar macros that set + # lots of directory switches by default. ;; --enable-debug) # Enable debugging options that aren't excessively noisy - debug_tcg="yes" debug="yes" strip_opt="no" ;; + --enable-sanitizers) sanitizers="yes" + ;; + --disable-sanitizers) sanitizers="no" + ;; --disable-strip) strip_opt="no" ;; + --disable-tcg) tcg="no" + ;; + --enable-tcg) tcg="yes" + ;; + --disable-malloc-trim) malloc_trim="no" + ;; + --enable-malloc-trim) malloc_trim="yes" + ;; + --enable-system) softmmu="yes" + ;; --enable-pie) pie="yes" ;; --disable-pie) pie="no" @@ -533,6 +758,42 @@ for opt do ;; --disable-stack-protector) stack_protector="no" ;; + --disable-membarrier) membarrier="no" + ;; + --enable-membarrier) membarrier="yes" + ;; + --with-pkgversion=*) pkgversion="$optarg" + ;; + --enable-debug-stack-usage) debug_stack_usage="yes" + ;; + --disable-avx2) avx2_opt="no" + ;; + --enable-avx2) avx2_opt="yes" + ;; + --disable-avx512f) avx512f_opt="no" + ;; + --enable-avx512f) avx512f_opt="yes" + ;; + + --disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane) + echo "$0: $opt is obsolete, virtio-blk data-plane is always on" >&2 + ;; + --enable-vhdx|--disable-vhdx) + echo "$0: $opt is obsolete, VHDX driver is always built" >&2 + ;; + --enable-uuid|--disable-uuid) + echo "$0: $opt is obsolete, UUID support is always built" >&2 + ;; + --disable-tcmalloc) tcmalloc="no" + ;; + --enable-tcmalloc) tcmalloc="yes" + ;; + --disable-jemalloc) jemalloc="no" + ;; + --enable-jemalloc) jemalloc="yes" + ;; + --with-git=*) git="$optarg" + ;; *) echo "ERROR: unknown option $opt" echo "Try '$0 --help' for more information" @@ -544,60 +805,54 @@ done case "$cpu" in ppc) CPU_CFLAGS="-m32" - LDFLAGS="-m32 $LDFLAGS" + QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" ;; ppc64) CPU_CFLAGS="-m64" - LDFLAGS="-m64 $LDFLAGS" + QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; sparc) - LDFLAGS="-m32 $LDFLAGS" - CPU_CFLAGS="-m32 -mcpu=ultrasparc" + CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc" + QEMU_LDFLAGS="-m32 -mv8plus $QEMU_LDFLAGS" ;; sparc64) - LDFLAGS="-m64 $LDFLAGS" CPU_CFLAGS="-m64 -mcpu=ultrasparc" + QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; s390) CPU_CFLAGS="-m31" - LDFLAGS="-m31 $LDFLAGS" + QEMU_LDFLAGS="-m31 $QEMU_LDFLAGS" ;; s390x) CPU_CFLAGS="-m64" - LDFLAGS="-m64 $LDFLAGS" + QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; i386) CPU_CFLAGS="-m32" - LDFLAGS="-m32 $LDFLAGS" - cc_i386='$(CC) -m32' + QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" ;; x86_64) - CPU_CFLAGS="-m64" - LDFLAGS="-m64 $LDFLAGS" - cc_i386='$(CC) -m32' + # ??? Only extremely old AMD cpus do not have cmpxchg16b. + # If we truly care, we should simply detect this case at + # runtime and generate the fallback to serial emulation. + CPU_CFLAGS="-m64 -mcx16" + QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" ;; x32) CPU_CFLAGS="-mx32" - LDFLAGS="-mx32 $LDFLAGS" - cc_i386='$(CC) -m32' + QEMU_LDFLAGS="-mx32 $QEMU_LDFLAGS" ;; # No special flags required for other host CPUs esac +eval "cross_cc_${cpu}=\$host_cc" +cross_cc_vars="$cross_cc_vars cross_cc_${cpu}" QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" -EXTRA_CFLAGS="$CPU_CFLAGS $EXTRA_CFLAGS" -default_target_list="" - -mak_wilds="" - -if [ "$softmmu" = "yes" ]; then - mak_wilds="${mak_wilds} $source_path/default-configs/*-softmmu.mak" -fi - -for config in $mak_wilds; do - default_target_list="${default_target_list} $(basename "$config" .mak)" -done +default_target_list="aarch64eb-softmmu aarch64-softmmu armeb-softmmu \ + arm-softmmu m68k-softmmu mips64el-softmmu mips64-softmmu mipsel-softmmu \ + mips-softmmu ppc64-softmmu ppc-softmmu sparc64-softmmu sparc-softmmu \ + x86_64-softmmu riscv32-softmmu riscv64-softmmu" if test x"$show_help" = x"yes" ; then cat << EOF @@ -607,85 +862,170 @@ Options: [defaults in brackets after descriptions] Standard options: --help print this message + --prefix=PREFIX install in PREFIX [$prefix] + --interp-prefix=PREFIX where to find shared libraries, etc. + use %M for cpu name [$interp_prefix] --target-list=LIST set target list (default: build everything) $(echo Available targets: $default_target_list | \ fold -s -w 53 | sed -e 's/^/ /') + --target-list-exclude=LIST exclude a set of targets from the default target-list Advanced options (experts only): - --source-path=PATH path of source code [$source_path] + --cross-prefix=PREFIX use PREFIX for compile tools [$cross_prefix] --cc=CC use C compiler CC [$cc] + --iasl=IASL use ACPI compiler IASL [$iasl] --host-cc=CC use C compiler CC [$host_cc] for code run at build time + --cxx=CXX use C++ compiler CXX [$cxx] --objcc=OBJCC use Objective-C compiler OBJCC [$objcc] --extra-cflags=CFLAGS append extra C compiler flags QEMU_CFLAGS + --extra-cxxflags=CXXFLAGS append extra C++ compiler flags QEMU_CXXFLAGS --extra-ldflags=LDFLAGS append extra linker flags LDFLAGS + --cross-cc-ARCH=CC use compiler when building ARCH guest test cases + --cross-cc-flags-ARCH= use compiler flags when building ARCH guest tests --make=MAKE use specified make [$make] + --install=INSTALL use specified install [$install] + --with-git=GIT use specified git [$git] --static enable static build [$static] - --enable-debug-tcg enable TCG debugging - --disable-debug-tcg disable TCG debugging (default) - --enable-debug-info enable debugging information (default) - --disable-debug-info disable debugging information + --docdir=PATH install documentation in PATH$confsuffix + --bindir=PATH install binaries in PATH + --libdir=PATH install libraries in PATH + --libexecdir=PATH install helper binaries in PATH + --sysconfdir=PATH install config in PATH$confsuffix + --localstatedir=PATH install local state in PATH (set at runtime on win32) + --with-confsuffix=SUFFIX suffix for QEMU data inside datadir/libdir/sysconfdir [$confsuffix] + --with-pkgversion=VERS use specified string as sub-version of the package --enable-debug enable common debug build options + --enable-sanitizers enable default sanitizers --disable-strip disable stripping binaries --disable-werror disable compilation abort on warning --disable-stack-protector disable compiler-provided stack protection - --enable-pie build Position Independent Executables - --disable-pie do not build Position Independent Executables + --enable-malloc-trim enable libc malloc_trim() for memory optimization --cpu=CPU Build for host CPU [$cpu] + --enable-debug-stack-usage + track the maximum stack usage of stacks created by qemu_alloc_stack + +Optional features, enabled with --enable-FEATURE and +disabled with --disable-FEATURE, default is enabled if available: + + pie Position Independent Executables + debug-tcg TCG debugging (default is disabled) + membarrier membarrier system call (for Linux 4.14+ or Windows) + tcmalloc tcmalloc support + jemalloc jemalloc support + avx2 AVX2 optimization support + avx512f AVX512F optimization support NOTE: The object files are built at the place where configure is launched EOF exit 0 fi +# Remove old dependency files to make sure that they get properly regenerated +rm -f */config-devices.mak.d + +# Check that the C compiler works. Doing this here before testing +# the host CPU ensures that we had a valid CC to autodetect the +# $cpu var (and we should bail right here if that's not the case). +# It also allows the help message to be printed without a CC. +write_c_skeleton; +if compile_object ; then + : C compiler works ok +else + error_exit "\"$cc\" either does not exist or does not work" +fi +if ! compile_prog ; then + error_exit "\"$cc\" cannot build an executable (is your linker broken?)" +fi + +# Now we have handled --enable-tcg-interpreter and know we're not just +# printing the help message, bail out if the host CPU isn't supported. +if test "$ARCH" = "unknown"; then + error_exit "Unsupported CPU = $cpu, try --enable-tcg-interpreter" +fi + # Consult white-list to determine whether to enable werror # by default. Only enable by default for git builds -z_version=`cut -f3 -d. $source_path/VERSION` - if test -z "$werror" ; then - if test -d "$source_path/.git" -a \ - "$linux" = "yes" ; then + if test -e "$source_path/.git" && \ + { test "$linux" = "yes" || test "$mingw32" = "yes"; }; then werror="yes" else werror="no" fi fi -# check that the C compiler works. -cat > $TMPC < $TMPC << EOF +#if defined(__clang_major__) && defined(__clang_minor__) +# ifdef __apple_build_version__ +# if __clang_major__ < 5 || (__clang_major__ == 5 && __clang_minor__ < 1) +# error You need at least XCode Clang v5.1 to compile QEMU +# endif +# else +# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 4) +# error You need at least Clang v3.4 to compile QEMU +# endif +# endif +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) +# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) +# error You need at least GCC v4.8 to compile QEMU +# endif +#else +# error You either need GCC or Clang to compiler QEMU +#endif +int main (void) { return 0; } +EOF +if ! compile_prog "" "" ; then + error_exit "You need at least GCC v4.8 or Clang v3.4 (or XCode Clang v5.1)" fi gcc_flags="-Wold-style-declaration -Wold-style-definition -Wtype-limits" gcc_flags="-Wformat-security -Wformat-y2k -Winit-self -Wignored-qualifiers $gcc_flags" -gcc_flags="-Wmissing-include-dirs -Wempty-body -Wnested-externs $gcc_flags" -gcc_flags="-Wendif-labels $gcc_flags" -gcc_flags="-Wno-initializer-overrides $gcc_flags" -gcc_flags="-Wno-string-plus-int $gcc_flags" +gcc_flags="-Wno-missing-include-dirs -Wempty-body -Wnested-externs $gcc_flags" +gcc_flags="-Wendif-labels -Wno-shift-negative-value $gcc_flags" +gcc_flags="-Wno-initializer-overrides -Wexpansion-to-defined $gcc_flags" +gcc_flags="-Wno-string-plus-int -Wno-typedef-redefinition $gcc_flags" # Note that we do not add -Werror to gcc_flags here, because that would # enable it for all configure tests. If a configure test failed due # to -Werror this would just silently disable some features, # so it's too error prone. -cat > $TMPC << EOF -int main(void) { return 0; } -EOF -for flag in $gcc_flags; do + +cc_has_warning_flag() { + write_c_skeleton; + # Use the positive sense of the flag when testing for -Wno-wombat # support (gcc will happily accept the -Wno- form of unknown # warning options). - optflag="$(echo $flag | sed -e 's/^-Wno-/-W/')" - if compile_prog "-Werror $optflag" "" ; then - QEMU_CFLAGS="$QEMU_CFLAGS $flag" + optflag="$(echo $1 | sed -e 's/^-Wno-/-W/')" + compile_prog "-Werror $optflag" "" +} + +for flag in $gcc_flags; do + if cc_has_warning_flag $flag ; then + QEMU_CFLAGS="$QEMU_CFLAGS $flag" fi done if test "$stack_protector" != "no"; then + cat > $TMPC << EOF +int main(int argc, char *argv[]) +{ + char arr[64], *p = arr, *c = argv[0]; + while (*c) { + *p++ = *c++; + } + return 0; +} +EOF gcc_flags="-fstack-protector-strong -fstack-protector-all" sp_on=0 for flag in $gcc_flags; do @@ -694,6 +1034,7 @@ if test "$stack_protector" != "no"; then if do_cc $QEMU_CFLAGS -Werror $flag -c -o $TMPO $TMPC && compile_prog "-Werror $flag" ""; then QEMU_CFLAGS="$QEMU_CFLAGS $flag" + QEMU_LDFLAGS="$QEMU_LDFLAGS $flag" sp_on=1 break fi @@ -705,73 +1046,70 @@ if test "$stack_protector" != "no"; then fi fi -# Workaround for http://gcc.gnu.org/PR55489. Happens with -fPIE/-fPIC and -# large functions that use global variables. The bug is in all releases of -# GCC, but it became particularly acute in 4.6.x and 4.7.x. It is fixed in -# 4.7.3 and 4.8.0. We should be able to delete this at the end of 2013. +# Disable -Wmissing-braces on older compilers that warn even for +# the "universal" C zero initializer {0}. cat > $TMPC << EOF -#if __GNUC__ == 4 && (__GNUC_MINOR__ == 6 || (__GNUC_MINOR__ == 7 && __GNUC_PATCHLEVEL__ <= 2)) -int main(void) { return 0; } -#else -#error No bug in this compiler. -#endif +struct { + int a[2]; +} x = {0}; EOF -if compile_prog "-Werror -fno-gcse" "" ; then - TRANSLATE_OPT_CFLAGS=-fno-gcse +if compile_object "-Werror" "" ; then + : +else + QEMU_CFLAGS="$QEMU_CFLAGS -Wno-missing-braces" fi -if test "$static" = "yes" ; then - if test "$pie" = "yes" ; then - error_exit "static and pie are mutually incompatible" - else - pie="no" - fi -fi - -if test "$pie" = ""; then - case "$cpu-$targetos" in - i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD) - ;; - *) - pie="no" - ;; - esac -fi - -if test "$pie" != "no" ; then +# Unconditional check for compiler __thread support cat > $TMPC << EOF +static __thread int tls_var; +int main(void) { return tls_var; } +EOF + +if ! compile_prog "-Werror" "" ; then + error_exit "Your compiler does not support the __thread specifier for " \ + "Thread-Local Storage (TLS). Please upgrade to a version that does." +fi + +cat > $TMPC << EOF #ifdef __linux__ # define THREAD __thread #else # define THREAD #endif - static THREAD int tls_var; - int main(void) { return tls_var; } - EOF - if compile_prog "-fPIE -DPIE" "-pie"; then - QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS" - LDFLAGS="-pie $LDFLAGS" - pie="yes" - if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then - LDFLAGS="-Wl,-z,relro -Wl,-z,now $LDFLAGS" - fi - else - if test "$pie" = "yes"; then - error_exit "PIE not available due to missing toolchain support" - else - echo "Disabling PIE due to missing toolchain support" - pie="no" - fi - fi - if compile_prog "-fno-pie" "-nopie"; then - CFLAGS_NOPIE="-fno-pie" - LDFLAGS_NOPIE="-nopie" +if test "$static" = "yes"; then + if test "$pie" != "no" && compile_prog "-Werror -fPIE -DPIE" "-static-pie"; then + QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS" + QEMU_LDFLAGS="-static-pie $QEMU_LDFLAGS" + pie="yes" + elif test "$pie" = "yes"; then + error_exit "-static-pie not available due to missing toolchain support" + else + QEMU_LDFLAGS="-static $QEMU_LDFLAGS" + pie="no" fi +elif test "$pie" = "no"; then + QEMU_CFLAGS="$CFLAGS_NOPIE $QEMU_CFLAGS" + QEMU_LDFLAGS="$LDFLAGS_NOPIE $QEMU_LDFLAGS" +elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then + QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS" + QEMU_LDFLAGS="-pie $QEMU_LDFLAGS" + pie="yes" +elif test "$pie" = "yes"; then + error_exit "PIE not available due to missing toolchain support" +else + echo "Disabling PIE due to missing toolchain support" + pie="no" +fi + +# Detect support for PT_GNU_RELRO + DT_BIND_NOW. +# The combination is known as "full relro", because .got.plt is read-only too. +if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then + QEMU_LDFLAGS="-Wl,-z,relro -Wl,-z,now $QEMU_LDFLAGS" fi ########################################## @@ -803,6 +1141,18 @@ fi # Solaris specific configure tool chain decisions if test "$solaris" = "yes" ; then + if has $install; then + : + else + error_exit "Solaris install program not found. Use --install=/usr/ucb/install or" \ + "install fileutils from www.blastwave.org using pkg-get -i fileutils" \ + "to get ginstall which is used by default (which lives in /opt/csw/bin)" + fi + if test "$(path_of $install)" = "/usr/sbin/install" ; then + error_exit "Solaris /usr/sbin/install is not an appropriate install program." \ + "try ginstall from the GNU fileutils available from www.blastwave.org" \ + "using pkg-get -i fileutils, or use --install=/usr/ucb/install" + fi if has ar; then : else @@ -815,23 +1165,27 @@ if test "$solaris" = "yes" ; then fi if test -z "${target_list+xxx}" ; then - target_list="$default_target_list" + for target in $default_target_list; do + supported_target $target 2>/dev/null && \ + target_list="$target_list $target" + done + target_list="${target_list# }" else - target_list=`echo "$target_list" | sed -e 's/,/ /g'` + target_list=$(echo "$target_list" | sed -e 's/,/ /g') + for target in $target_list; do + # Check that we recognised the target name; this allows a more + # friendly error message than if we let it fall through. + case " $default_target_list " in + *" $target "*) + ;; + *) + error_exit "Unknown target name '$target'" + ;; + esac + supported_target $target || exit 1 + done fi -# Check that we recognised the target name; this allows a more -# friendly error message than if we let it fall through. -for target in $target_list; do - case " $default_target_list " in - *" $target "*) - ;; - *) - error_exit "Unknown target name '$target'" - ;; - esac -done - # see if system emulation was really requested case " $target_list " in *"-softmmu "*) softmmu=yes @@ -861,9 +1215,9 @@ int main(int argc, char *argv[]) { EOF if compile_object ; then - if grep -q BiGeNdIaN $TMPO ; then + if strings -a $TMPO | grep -q BiGeNdIaN ; then bigendian="yes" - elif grep -q LiTtLeEnDiAn $TMPO ; then + elif strings -a $TMPO | grep -q LiTtLeEnDiAn ; then bigendian="no" else echo big/little test failed @@ -872,6 +1226,13 @@ else echo big/little test failed fi +########################################## +# pkg-config probe + +if ! has "$pkg_config_exe"; then + error_exit "pkg-config binary '$pkg_config_exe' not found" +fi + ########################################## # pthread probe PTHREADLIBS_LIST="-pthread -lpthread -lpthreadGC2" @@ -902,16 +1263,217 @@ else if test "$found" = "no"; then LIBS="$pthread_lib $LIBS" fi + PTHREAD_LIB="$pthread_lib" break fi done fi -if test "$mingw32" != yes -a "$pthread" = no; then +if test "$mingw32" != yes && test "$pthread" = no; then error_exit "pthread check failed" \ "Make sure to have the pthread libs and headers installed." fi +# check for pthread_setname_np with thread id +pthread_setname_np_w_tid=no +cat > $TMPC << EOF +#include + +static void *f(void *p) { return NULL; } +int main(void) +{ + pthread_t thread; + pthread_create(&thread, 0, f, 0); + pthread_setname_np(thread, "QEMU"); + return 0; +} +EOF +if compile_prog "" "$pthread_lib" ; then + pthread_setname_np_w_tid=yes +fi + +# check for pthread_setname_np without thread id +pthread_setname_np_wo_tid=no +cat > $TMPC << EOF +#include + +static void *f(void *p) { pthread_setname_np("QEMU"); } +int main(void) +{ + pthread_t thread; + pthread_create(&thread, 0, f, 0); + return 0; +} +EOF +if compile_prog "" "$pthread_lib" ; then + pthread_setname_np_wo_tid=yes +fi + +if test "$tcmalloc" = "yes" && test "$jemalloc" = "yes" ; then + echo "ERROR: tcmalloc && jemalloc can't be used at the same time" + exit 1 +fi + +# Even if malloc_trim() is available, these non-libc memory allocators +# do not support it. +if test "$tcmalloc" = "yes" || test "$jemalloc" = "yes" ; then + if test "$malloc_trim" = "yes" ; then + echo "Disabling malloc_trim with non-libc memory allocator" + fi + malloc_trim="no" +fi + +####################################### +# malloc_trim + +if test "$malloc_trim" != "no" ; then + cat > $TMPC << EOF +#include +int main(void) { malloc_trim(0); return 0; } +EOF + if compile_prog "" "" ; then + malloc_trim="yes" + else + malloc_trim="no" + fi +fi + +########################################## +# tcmalloc probe + +if test "$tcmalloc" = "yes" ; then + cat > $TMPC << EOF +#include +int main(void) { malloc(1); return 0; } +EOF + + if compile_prog "" "-ltcmalloc" ; then + LIBS="-ltcmalloc $LIBS" + else + feature_not_found "tcmalloc" "install gperftools devel" + fi +fi + +########################################## +# jemalloc probe + +if test "$jemalloc" = "yes" ; then + cat > $TMPC << EOF +#include +int main(void) { malloc(1); return 0; } +EOF + + if compile_prog "" "-ljemalloc" ; then + LIBS="-ljemalloc $LIBS" + else + feature_not_found "jemalloc" "install jemalloc devel" + fi +fi + +########################################## +# signalfd probe +signalfd="no" +cat > $TMPC << EOF +#include +#include +#include +int main(void) { return syscall(SYS_signalfd, -1, NULL, _NSIG / 8); } +EOF + +if compile_prog "" "" ; then + signalfd=yes +fi + +# check for sync_file_range +sync_file_range=no +cat > $TMPC << EOF +#include + +int main(void) +{ + sync_file_range(0, 0, 0, 0); + return 0; +} +EOF +if compile_prog "" "" ; then + sync_file_range=yes +fi + +# check for dup3 +dup3=no +cat > $TMPC << EOF +#include + +int main(void) +{ + dup3(0, 0, 0); + return 0; +} +EOF +if compile_prog "" "" ; then + dup3=yes +fi + +# check for prctl(PR_SET_TIMERSLACK , ... ) support +prctl_pr_set_timerslack=no +cat > $TMPC << EOF +#include + +int main(void) +{ + prctl(PR_SET_TIMERSLACK, 1, 0, 0, 0); + return 0; +} +EOF +if compile_prog "" "" ; then + prctl_pr_set_timerslack=yes +fi + +# check for epoll support +epoll=no +cat > $TMPC << EOF +#include + +int main(void) +{ + epoll_create(0); + return 0; +} +EOF +if compile_prog "" "" ; then + epoll=yes +fi + +# clock_adjtime probe +clock_adjtime=no +cat > $TMPC < + +int main(void) +{ + return clock_adjtime(0, 0); +} +EOF +clock_adjtime=no +if compile_prog "" "" ; then + clock_adjtime=yes +fi + +# syncfs probe +syncfs=no +cat > $TMPC < + +int main(void) +{ + return syncfs(0); +} +EOF +syncfs=no +if compile_prog "" "" ; then + syncfs=yes +fi + # Search for bswap_32 function byteswap_h=no cat > $TMPC << EOF @@ -970,24 +1532,186 @@ elif compile_prog "" "$pthread_lib -lrt" ; then LIBS="$LIBS -lrt" fi -######################################## -# check if we have valgrind/valgrind.h - -valgrind_h=no +# Check whether we need to link libutil for openpty() cat > $TMPC << EOF -#include +extern int openpty(int *am, int *as, char *name, void *termp, void *winp); +int main(void) { return openpty(0, 0, 0, 0, 0); } +EOF + +if ! compile_prog "" "" ; then + if compile_prog "" "-lutil" ; then + libs_softmmu="-lutil $libs_softmmu" + libs_tools="-lutil $libs_tools" + fi +fi + +########################################## +# check if we have madvise + +madvise=no +cat > $TMPC << EOF +#include +#include +#include +int main(void) { return madvise(NULL, 0, MADV_DONTNEED); } +EOF +if compile_prog "" "" ; then + madvise=yes +fi + +########################################## +# check if we have posix_madvise + +posix_madvise=no +cat > $TMPC << EOF +#include +#include +int main(void) { return posix_madvise(NULL, 0, POSIX_MADV_DONTNEED); } +EOF +if compile_prog "" "" ; then + posix_madvise=yes +fi + +########################################## +# check if we have posix_memalign() + +posix_memalign=no +cat > $TMPC << EOF +#include +int main(void) { + void *p; + return posix_memalign(&p, 8, 8); +} +EOF +if compile_prog "" "" ; then + posix_memalign=yes +fi + +########################################## +# check if we have posix_syslog + +posix_syslog=no +cat > $TMPC << EOF +#include +int main(void) { openlog("qemu", LOG_PID, LOG_DAEMON); syslog(LOG_INFO, "configure"); return 0; } +EOF +if compile_prog "" "" ; then + posix_syslog=yes +fi + +########################################## +# check if we have sem_timedwait + +sem_timedwait=no +cat > $TMPC << EOF +#include +int main(void) { sem_t s; struct timespec t = {0}; return sem_timedwait(&s, &t); } +EOF +if compile_prog "" "" ; then + sem_timedwait=yes +fi + +########################################## +# check if we have strchrnul + +strchrnul=no +cat > $TMPC << EOF +#include +int main(void); +// Use a haystack that the compiler shouldn't be able to constant fold +char *haystack = (char*)&main; +int main(void) { return strchrnul(haystack, 'x') != &haystack[6]; } +EOF +if compile_prog "" "" ; then + strchrnul=yes +fi + +######################################### +# check if we have st_atim + +st_atim=no +cat > $TMPC << EOF +#include +#include +int main(void) { return offsetof(struct stat, st_atim); } +EOF +if compile_prog "" "" ; then + st_atim=yes +fi + +########################################## +# check if we have open_by_handle_at + +open_by_handle_at=no +cat > $TMPC << EOF +#include +#if !defined(AT_EMPTY_PATH) +# error missing definition +#else +int main(void) { struct file_handle fh; return open_by_handle_at(0, &fh, 0); } +#endif +EOF +if compile_prog "" "" ; then + open_by_handle_at=yes +fi + +######################################## +# check if we have linux/magic.h + +linux_magic_h=no +cat > $TMPC << EOF +#include int main(void) { return 0; } EOF if compile_prog "" "" ; then - valgrind_h=yes + linux_magic_h=yes +fi + +######################################## +# check whether we can disable warning option with a pragma (this is needed +# to silence warnings in the headers of some versions of external libraries). +# This test has to be compiled with -Werror as otherwise an unknown pragma is +# only a warning. +# +# If we can't selectively disable warning in the code, disable -Werror so that +# the build doesn't fail anyway. + +pragma_disable_unused_but_set=no +cat > $TMPC << EOF +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-prototypes" +#pragma GCC diagnostic pop + +int main(void) { + return 0; +} +EOF +if compile_prog "-Werror" "" ; then + pragma_diagnostic_available=yes +else + werror=no +fi + +######################################## +# check if environ is declared + +has_environ=no +cat > $TMPC << EOF +#include +int main(void) { + environ = 0; + return 0; +} +EOF +if compile_prog "" "" ; then + has_environ=yes fi ######################################## # check if cpuid.h is usable. -cpuid_h=no cat > $TMPC << EOF #include int main(void) { @@ -1009,16 +1733,66 @@ if compile_prog "" "" ; then cpuid_h=yes fi +########################################## +# avx2 optimization requirement check +# +# There is no point enabling this if cpuid.h is not usable, +# since we won't be able to select the new routines. + +if test "$cpuid_h" = "yes" && test "$avx2_opt" != "no"; then + cat > $TMPC << EOF +#pragma GCC push_options +#pragma GCC target("avx2") +#include +#include +static int bar(void *a) { + __m256i x = *(__m256i *)a; + return _mm256_testz_si256(x, x); +} +int main(int argc, char *argv[]) { return bar(argv[0]); } +EOF + if compile_object "" ; then + avx2_opt="yes" + else + avx2_opt="no" + fi +fi + +########################################## +# avx512f optimization requirement check +# +# There is no point enabling this if cpuid.h is not usable, +# since we won't be able to select the new routines. +# by default, it is turned off. +# if user explicitly want to enable it, check environment + +if test "$cpuid_h" = "yes" && test "$avx512f_opt" = "yes"; then + cat > $TMPC << EOF +#pragma GCC push_options +#pragma GCC target("avx512f") +#include +#include +static int bar(void *a) { + __m512i x = *(__m512i *)a; + return _mm512_test_epi64_mask(x, x); +} +int main(int argc, char *argv[]) +{ + return bar(argv[0]); +} +EOF + if ! compile_object "" ; then + avx512f_opt="no" + fi +else + avx512f_opt="no" +fi + ######################################## # check if __[u]int128_t is usable. int128=no cat > $TMPC << EOF -#if defined(__clang_major__) && defined(__clang_minor__) -# if ((__clang_major__ < 3) || (__clang_major__ == 3) && (__clang_minor__ < 2)) -# error __int128_t does not work in CLANG before 3.2 -# endif -#endif __int128_t a; __uint128_t b; int main (void) { @@ -1032,6 +1806,363 @@ if compile_prog "" "" ; then int128=yes fi +######################################### +# See if 128-bit atomic operations are supported. + +atomic128=no +if test "$int128" = "yes"; then + cat > $TMPC << EOF +int main(void) +{ + unsigned __int128 x = 0, y = 0; + y = __atomic_load_16(&x, 0); + __atomic_store_16(&x, y, 0); + __atomic_compare_exchange_16(&x, &y, x, 0, 0, 0); + return 0; +} +EOF + if compile_prog "" "" ; then + atomic128=yes + fi +fi + +cmpxchg128=no +if test "$int128" = yes && test "$atomic128" = no; then + cat > $TMPC << EOF +int main(void) +{ + unsigned __int128 x = 0, y = 0; + __sync_val_compare_and_swap_16(&x, y, x); + return 0; +} +EOF + if compile_prog "" "" ; then + cmpxchg128=yes + fi +fi + +######################################### +# See if 64-bit atomic operations are supported. +# Note that without __atomic builtins, we can only +# assume atomic loads/stores max at pointer size. + +cat > $TMPC << EOF +#include +int main(void) +{ + uint64_t x = 0, y = 0; +#ifdef __ATOMIC_RELAXED + y = __atomic_load_8(&x, 0); + __atomic_store_8(&x, y, 0); + __atomic_compare_exchange_8(&x, &y, x, 0, 0, 0); + __atomic_exchange_8(&x, y, 0); + __atomic_fetch_add_8(&x, y, 0); +#else + typedef char is_host64[sizeof(void *) >= sizeof(uint64_t) ? 1 : -1]; + __sync_lock_test_and_set(&x, y); + __sync_val_compare_and_swap(&x, y, 0); + __sync_fetch_and_add(&x, y); +#endif + return 0; +} +EOF +if compile_prog "" "" ; then + atomic64=yes +fi + +######################################### +# See if --dynamic-list is supported by the linker +ld_dynamic_list="no" +if test "$static" = "no" ; then + cat > $TMPTXT < $TMPC < +void foo(void); + +void foo(void) +{ + printf("foo\n"); +} + +int main(void) +{ + foo(); + return 0; +} +EOF + + if compile_prog "" "-Wl,--dynamic-list=$TMPTXT" ; then + ld_dynamic_list="yes" + fi +fi + +######################################### +# See if -exported_symbols_list is supported by the linker + +ld_exported_symbols_list="no" +if test "$static" = "no" ; then + cat > $TMPTXT < $TMPC << EOF +int x = 1; +extern const int y __attribute__((alias("x"))); +int main(void) { return 0; } +EOF +if compile_prog "" "" ; then + attralias=yes +fi + +######################################## +# check if getauxval is available. + +getauxval=no +cat > $TMPC << EOF +#include +int main(void) { + return getauxval(AT_HWCAP) == 0; +} +EOF +if compile_prog "" "" ; then + getauxval=yes +fi + +######################################## +# check if ccache is interfering with +# semantic analysis of macros + +unset CCACHE_CPP2 +ccache_cpp2=no +cat > $TMPC << EOF +static const int Z = 1; +#define fn() ({ Z; }) +#define TAUT(X) ((X) == Z) +#define PAREN(X, Y) (X == Y) +#define ID(X) (X) +int main(int argc, char *argv[]) +{ + int x = 0, y = 0; + x = ID(x); + x = fn(); + fn(); + if (PAREN(x, y)) return 0; + if (TAUT(Z)) return 0; + return 0; +} +EOF + +if ! compile_object "-Werror"; then + ccache_cpp2=yes +fi + +########################################## +# check for usable membarrier system call +if test "$membarrier" = "yes"; then + have_membarrier=no + if test "$mingw32" = "yes" ; then + have_membarrier=yes + elif test "$linux" = "yes" ; then + cat > $TMPC << EOF + #include + #include + #include + #include + int main(void) { + syscall(__NR_membarrier, MEMBARRIER_CMD_QUERY, 0); + syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0); + exit(0); + } +EOF + if compile_prog "" "" ; then + have_membarrier=yes + fi + fi + if test "$have_membarrier" = "no"; then + feature_not_found "membarrier" "membarrier system call not available" + fi +else + # Do not enable it by default even for Mingw32, because it doesn't + # work on Wine. + membarrier=no +fi + +################################################# +# Sparc implicitly links with --relax, which is +# incompatible with -r, so --no-relax should be +# given. It does no harm to give it on other +# platforms too. + +# Note: the prototype is needed since QEMU_CFLAGS +# contains -Wmissing-prototypes +cat > $TMPC << EOF +extern int foo(void); +int foo(void) { return 0; } +EOF +if ! compile_object ""; then + error_exit "Failed to compile object file for LD_REL_FLAGS test" +fi +for i in '-Wl,-r -Wl,--no-relax' -Wl,-r -r; do + if do_cc -nostdlib $i -o $TMPMO $TMPO; then + LD_REL_FLAGS=$i + break + fi +done + +########################################## +# check for sysmacros.h + +have_sysmacros=no +cat > $TMPC << EOF +#include +int main(void) { + return makedev(0, 0); +} +EOF +if compile_prog "" "" ; then + have_sysmacros=yes +fi + +########################################## +# check for _Static_assert() + +have_static_assert=no +cat > $TMPC << EOF +_Static_assert(1, "success"); +int main(void) { + return 0; +} +EOF +if compile_prog "" "" ; then + have_static_assert=yes +fi + +########################################## +# check for utmpx.h, it is missing e.g. on OpenBSD + +have_utmpx=no +cat > $TMPC << EOF +#include +struct utmpx user_info; +int main(void) { + return 0; +} +EOF +if compile_prog "" "" ; then + have_utmpx=yes +fi + +########################################## +# check for getrandom() + +have_getrandom=no +cat > $TMPC << EOF +#include +int main(void) { + return getrandom(0, 0, GRND_NONBLOCK); +} +EOF +if compile_prog "" "" ; then + have_getrandom=yes +fi + +########################################## +# checks for sanitizers + +have_asan=no +have_ubsan=no +have_asan_iface_h=no +have_asan_iface_fiber=no + +if test "$sanitizers" = "yes" ; then + write_c_skeleton + if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" ""; then + have_asan=yes + fi + + # we could use a simple skeleton for flags checks, but this also + # detect the static linking issue of ubsan, see also: + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285 + cat > $TMPC << EOF +#include +int main(void) { + void *tmp = malloc(10); + return *(int *)(tmp + 2); +} +EOF + if compile_prog "$CPU_CFLAGS -Werror -fsanitize=undefined" ""; then + have_ubsan=yes + fi + + if check_include "sanitizer/asan_interface.h" ; then + have_asan_iface_h=yes + fi + + cat > $TMPC << EOF +#include +int main(void) { + __sanitizer_start_switch_fiber(0, 0, 0); + return 0; +} +EOF + if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address" "" ; then + have_asan_iface_fiber=yes + fi +fi + +########################################## +# check for Apple Silicon JIT function + +if [ "$darwin" = "yes" ] ; then + cat > $TMPC << EOF +#include +int main() { pthread_jit_write_protect_np(0); return 0; } +EOF + if ! compile_prog ""; then + have_pthread_jit_protect='no' + else + have_pthread_jit_protect='yes' + fi +fi + +########################################## +# End of CC checks +# After here, no more $cc or $ld runs + +write_c_skeleton + +if test "$have_asan" = "yes"; then + QEMU_CFLAGS="-fsanitize=address $QEMU_CFLAGS" + QEMU_LDFLAGS="-fsanitize=address $QEMU_LDFLAGS" + if test "$have_asan_iface_h" = "no" ; then + echo "ASAN build enabled, but ASAN header missing." \ + "Without code annotation, the report may be inferior." + elif test "$have_asan_iface_fiber" = "no" ; then + echo "ASAN build enabled, but ASAN header is too old." \ + "Without code annotation, the report may be inferior." + fi +fi +if test "$have_ubsan" = "yes"; then + QEMU_CFLAGS="-fsanitize=undefined $QEMU_CFLAGS" + QEMU_LDFLAGS="-fsanitize=undefined $QEMU_LDFLAGS" +fi + # Now we've finished running tests it's OK to add -Werror to the compiler flags if test "$werror" = "yes"; then QEMU_CFLAGS="-Werror $QEMU_CFLAGS" @@ -1039,39 +2170,141 @@ fi if test "$solaris" = "no" ; then if $ld --version 2>/dev/null | grep "GNU ld" >/dev/null 2>/dev/null ; then - LDFLAGS="-Wl,--warn-common $LDFLAGS" + QEMU_LDFLAGS="-Wl,--warn-common $QEMU_LDFLAGS" fi fi # Use ASLR, no-SEH and DEP if available if test "$mingw32" = "yes" ; then for flag in --dynamicbase --no-seh --nxcompat; do - if $ld --help 2>/dev/null | grep ".$flag" >/dev/null 2>/dev/null ; then - LDFLAGS="-Wl,$flag $LDFLAGS" + if ld_has $flag ; then + QEMU_LDFLAGS="-Wl,$flag $QEMU_LDFLAGS" fi done fi +# Disable OpenBSD W^X if available +if test "$tcg" = "yes" && test "$targetos" = "OpenBSD"; then + cat > $TMPC < $TMPC < $TMPCXX < $config_host_mak echo >> $config_host_mak echo all: >> $config_host_mak -echo "extra_cflags=$EXTRA_CFLAGS" >> $config_host_mak -echo "extra_ldflags=$EXTRA_LDFLAGS" >> $config_host_mak +echo "prefix=$prefix" >> $config_host_mak +echo "bindir=$bindir" >> $config_host_mak +echo "libdir=$libdir" >> $config_host_mak +echo "libexecdir=$libexecdir" >> $config_host_mak +echo "includedir=$includedir" >> $config_host_mak +echo "sysconfdir=$sysconfdir" >> $config_host_mak +echo "qemu_confdir=$qemu_confdir" >> $config_host_mak +if test "$mingw32" = "no" ; then + echo "qemu_localstatedir=$local_statedir" >> $config_host_mak +fi +echo "qemu_helperdir=$libexecdir" >> $config_host_mak +echo "qemu_localedir=$qemu_localedir" >> $config_host_mak +echo "libs_cpu=$libs_cpu" >> $config_host_mak +echo "libs_softmmu=$libs_softmmu" >> $config_host_mak +echo "GIT=$git" >> $config_host_mak +echo "GIT_SUBMODULES=$git_submodules" >> $config_host_mak +echo "GIT_UPDATE=$git_update" >> $config_host_mak echo "ARCH=$ARCH" >> $config_host_mak -if test "$debug_tcg" = "yes" ; then - echo "CONFIG_DEBUG_TCG=y" >> $config_host_mak -fi if test "$strip_opt" = "yes" ; then echo "STRIP=${strip}" >> $config_host_mak fi @@ -1095,7 +2340,7 @@ if test "$bigendian" = "yes" ; then fi if test "$mingw32" = "yes" ; then echo "CONFIG_WIN32=y" >> $config_host_mak - rc_version=`cat $source_path/VERSION` + rc_version=$(cat $source_path/VERSION) version_major=${rc_version%%.*} rc_version=${rc_version#*.} version_minor=${rc_version%%.*} @@ -1112,32 +2357,119 @@ if test "$linux" = "yes" ; then echo "CONFIG_LINUX=y" >> $config_host_mak fi +if test "$darwin" = "yes" ; then + echo "CONFIG_DARWIN=y" >> $config_host_mak +fi + if test "$solaris" = "yes" ; then echo "CONFIG_SOLARIS=y" >> $config_host_mak - echo "CONFIG_SOLARIS_VERSION=$solarisrev" >> $config_host_mak - if test "$needs_libsunmath" = "yes" ; then - echo "CONFIG_NEEDS_LIBSUNMATH=y" >> $config_host_mak - fi +fi +if test "$haiku" = "yes" ; then + echo "CONFIG_HAIKU=y" >> $config_host_mak fi if test "$static" = "yes" ; then echo "CONFIG_STATIC=y" >> $config_host_mak fi +qemu_version=$(head $source_path/VERSION) +echo "VERSION=$qemu_version" >>$config_host_mak +echo "PKGVERSION=$pkgversion" >>$config_host_mak echo "SRC_PATH=$source_path" >> $config_host_mak echo "TARGET_DIRS=$target_list" >> $config_host_mak +if test "$sync_file_range" = "yes" ; then + echo "CONFIG_SYNC_FILE_RANGE=y" >> $config_host_mak +fi +if test "$dup3" = "yes" ; then + echo "CONFIG_DUP3=y" >> $config_host_mak +fi +if test "$prctl_pr_set_timerslack" = "yes" ; then + echo "CONFIG_PRCTL_PR_SET_TIMERSLACK=y" >> $config_host_mak +fi +if test "$epoll" = "yes" ; then + echo "CONFIG_EPOLL=y" >> $config_host_mak +fi +if test "$clock_adjtime" = "yes" ; then + echo "CONFIG_CLOCK_ADJTIME=y" >> $config_host_mak +fi +if test "$syncfs" = "yes" ; then + echo "CONFIG_SYNCFS=y" >> $config_host_mak +fi +if test "$sem_timedwait" = "yes" ; then + echo "CONFIG_SEM_TIMEDWAIT=y" >> $config_host_mak +fi +if test "$strchrnul" = "yes" ; then + echo "HAVE_STRCHRNUL=y" >> $config_host_mak +fi +if test "$st_atim" = "yes" ; then + echo "HAVE_STRUCT_STAT_ST_ATIM=y" >> $config_host_mak +fi if test "$byteswap_h" = "yes" ; then echo "CONFIG_BYTESWAP_H=y" >> $config_host_mak fi if test "$bswap_h" = "yes" ; then echo "CONFIG_MACHINE_BSWAP_H=y" >> $config_host_mak fi +if test "$have_broken_size_max" = "yes" ; then + echo "HAVE_BROKEN_SIZE_MAX=y" >> $config_host_mak +fi + +if test "$membarrier" = "yes" ; then + echo "CONFIG_MEMBARRIER=y" >> $config_host_mak +fi +if test "$signalfd" = "yes" ; then + echo "CONFIG_SIGNALFD=y" >> $config_host_mak +fi +if test "$tcg" = "yes"; then + echo "CONFIG_TCG=y" >> $config_host_mak +fi +if test "$madvise" = "yes" ; then + echo "CONFIG_MADVISE=y" >> $config_host_mak +fi +if test "$posix_madvise" = "yes" ; then + echo "CONFIG_POSIX_MADVISE=y" >> $config_host_mak +fi +if test "$posix_memalign" = "yes" ; then + echo "CONFIG_POSIX_MEMALIGN=y" >> $config_host_mak +fi + +if test "$malloc_trim" = "yes" ; then + echo "CONFIG_MALLOC_TRIM=y" >> $config_host_mak +fi + +if test "$avx2_opt" = "yes" ; then + echo "CONFIG_AVX2_OPT=y" >> $config_host_mak +fi + +if test "$avx512f_opt" = "yes" ; then + echo "CONFIG_AVX512F_OPT=y" >> $config_host_mak +fi # XXX: suppress that if [ "$bsd" = "yes" ] ; then echo "CONFIG_BSD=y" >> $config_host_mak fi -if test "$valgrind_h" = "yes" ; then - echo "CONFIG_VALGRIND_H=y" >> $config_host_mak +if test "$debug_stack_usage" = "yes" ; then + echo "CONFIG_DEBUG_STACK_USAGE=y" >> $config_host_mak +fi + +if test "$open_by_handle_at" = "yes" ; then + echo "CONFIG_OPEN_BY_HANDLE=y" >> $config_host_mak +fi + +if test "$linux_magic_h" = "yes" ; then + echo "CONFIG_LINUX_MAGIC_H=y" >> $config_host_mak +fi + +if test "$pragma_diagnostic_available" = "yes" ; then + echo "CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE=y" >> $config_host_mak +fi + +if test "$have_asan_iface_fiber" = "yes" ; then + echo "CONFIG_ASAN_IFACE_FIBER=y" >> $config_host_mak +fi + +if test "$has_environ" = "yes" ; then + echo "CONFIG_HAS_ENVIRON=y" >> $config_host_mak fi if test "$cpuid_h" = "yes" ; then @@ -1148,161 +2480,280 @@ if test "$int128" = "yes" ; then echo "CONFIG_INT128=y" >> $config_host_mak fi -if test "$ARCH" = "sparc64" ; then - QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/sparc $QEMU_INCLUDES" -elif test "$ARCH" = "s390x" ; then - QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/s390 $QEMU_INCLUDES" -elif test "$ARCH" = "x86_64" -o "$ARCH" = "x32" ; then - QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/i386 $QEMU_INCLUDES" -elif test "$ARCH" = "ppc64" ; then - QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/ppc $QEMU_INCLUDES" -else - QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES" +if test "$atomic128" = "yes" ; then + echo "CONFIG_ATOMIC128=y" >> $config_host_mak fi -QEMU_INCLUDES="-I\$(SRC_PATH)/tcg $QEMU_INCLUDES" +if test "$cmpxchg128" = "yes" ; then + echo "CONFIG_CMPXCHG128=y" >> $config_host_mak +fi + +if test "$atomic64" = "yes" ; then + echo "CONFIG_ATOMIC64=y" >> $config_host_mak +fi + +if test "$attralias" = "yes" ; then + echo "CONFIG_ATTRIBUTE_ALIAS=y" >> $config_host_mak +fi + +if test "$getauxval" = "yes" ; then + echo "CONFIG_GETAUXVAL=y" >> $config_host_mak +fi + +if test "$have_sysmacros" = "yes" ; then + echo "CONFIG_SYSMACROS=y" >> $config_host_mak +fi + +if test "$have_static_assert" = "yes" ; then + echo "CONFIG_STATIC_ASSERT=y" >> $config_host_mak +fi + +if test "$have_utmpx" = "yes" ; then + echo "HAVE_UTMPX=y" >> $config_host_mak +fi +if test "$have_getrandom" = "yes" ; then + echo "CONFIG_GETRANDOM=y" >> $config_host_mak +fi + +if test "$have_pthread_jit_protect" = "yes" ; then + echo "HAVE_PTHREAD_JIT_PROTECT=y" >> $config_host_mak +fi + +# Hold two types of flag: +# CONFIG_THREAD_SETNAME_BYTHREAD - we've got a way of setting the name on +# a thread we have a handle to +# CONFIG_PTHREAD_SETNAME_NP_W_TID - A way of doing it on a particular +# platform +if test "$pthread_setname_np_w_tid" = "yes" ; then + echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak + echo "CONFIG_PTHREAD_SETNAME_NP_W_TID=y" >> $config_host_mak +elif test "$pthread_setname_np_wo_tid" = "yes" ; then + echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak + echo "CONFIG_PTHREAD_SETNAME_NP_WO_TID=y" >> $config_host_mak +fi + +if test "$ARCH" = "sparc64" ; then + QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/sparc $QEMU_INCLUDES" +elif test "$ARCH" = "s390x" ; then + QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/s390 $QEMU_INCLUDES" +elif test "$ARCH" = "x86_64" || test "$ARCH" = "x32" ; then + QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/i386 $QEMU_INCLUDES" +elif test "$ARCH" = "ppc64" ; then + QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/ppc $QEMU_INCLUDES" +elif test "$ARCH" = "riscv32" || test "$ARCH" = "riscv64" ; then + QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/riscv $QEMU_INCLUDES" +else + QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES" +fi + +echo "TOOLS=$tools" >> $config_host_mak echo "MAKE=$make" >> $config_host_mak +echo "INSTALL=$install" >> $config_host_mak +echo "INSTALL_DIR=$install -d -m 0755" >> $config_host_mak +echo "INSTALL_DATA=$install -c -m 0644" >> $config_host_mak +echo "INSTALL_PROG=$install -c -m 0755" >> $config_host_mak +echo "INSTALL_LIB=$install -c -m 0644" >> $config_host_mak echo "CC=$cc" >> $config_host_mak -echo "CC_I386=$cc_i386" >> $config_host_mak +if $iasl -h > /dev/null 2>&1; then + echo "IASL=$iasl" >> $config_host_mak +fi echo "HOST_CC=$host_cc" >> $config_host_mak +echo "CXX=$cxx" >> $config_host_mak echo "OBJCC=$objcc" >> $config_host_mak echo "AR=$ar" >> $config_host_mak echo "ARFLAGS=$ARFLAGS" >> $config_host_mak echo "AS=$as" >> $config_host_mak +echo "CCAS=$ccas" >> $config_host_mak echo "CPP=$cpp" >> $config_host_mak echo "OBJCOPY=$objcopy" >> $config_host_mak echo "LD=$ld" >> $config_host_mak +echo "RANLIB=$ranlib" >> $config_host_mak echo "NM=$nm" >> $config_host_mak +echo "PKG_CONFIG=$pkg_config_exe" >> $config_host_mak echo "CFLAGS=$CFLAGS" >> $config_host_mak echo "CFLAGS_NOPIE=$CFLAGS_NOPIE" >> $config_host_mak echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak +echo "QEMU_CXXFLAGS=$QEMU_CXXFLAGS" >> $config_host_mak echo "QEMU_INCLUDES=$QEMU_INCLUDES" >> $config_host_mak -echo "LDFLAGS=$LDFLAGS" >> $config_host_mak +echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak +echo "LD_REL_FLAGS=$LD_REL_FLAGS" >> $config_host_mak +echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_host_mak echo "LIBS+=$LIBS" >> $config_host_mak +echo "LIBS_TOOLS+=$libs_tools" >> $config_host_mak +echo "PTHREAD_LIB=$PTHREAD_LIB" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak echo "DSOSUF=$DSOSUF" >> $config_host_mak echo "LDFLAGS_SHARED=$LDFLAGS_SHARED" >> $config_host_mak -echo "TRANSLATE_OPT_CFLAGS=$TRANSLATE_OPT_CFLAGS" >> $config_host_mak for target in $target_list; do target_dir="$target" config_target_mak=$target_dir/config-target.mak -target_name=`echo $target | cut -d '-' -f 1` -target_bigendian="no" - +target_name=$(echo $target | cut -d '-' -f 1) +target_aligned_only="no" case "$target_name" in - aarch64eb|armeb|lm32|m68k|microblaze|mips|mipsn32|mips64|moxie|or32|ppc|ppcemb|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb) - target_bigendian=yes + alpha|hppa|mips64el|mips64|mipsel|mips|mipsn32|mipsn32el|sh4|sh4eb|sparc|sparc64|sparc32plus|xtensa|xtensaeb) + target_aligned_only="yes" ;; esac -target_softmmu="yes" -case "$target" in - ${target_name}-softmmu) - target_softmmu="yes" - ;; - *) - error_exit "Target '$target' not recognised" - exit 1 - ;; +target_bigendian="no" +case "$target_name" in + armeb|aarch64eb|hppa|lm32|m68k|microblaze|mips|mipsn32|mips64|moxie|or1k|ppc|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb) + target_bigendian="yes" + ;; esac +target_user_only="no" +target_linux_user="no" +target_bsd_user="no" +target_softmmu="yes" mkdir -p $target_dir echo "# Automatically generated by configure - do not modify" > $config_target_mak -bflt="no" +mttcg="no" TARGET_ARCH="$target_name" TARGET_BASE_ARCH="" +TARGET_ABI_DIR="" case "$target_name" in i386) + mttcg="yes" + TARGET_SYSTBL_ABI=i386 ;; x86_64) TARGET_BASE_ARCH=i386 + TARGET_SYSTBL_ABI=common,64 + mttcg="yes" ;; alpha) + mttcg="yes" + TARGET_SYSTBL_ABI=common ;; arm|armeb) TARGET_ARCH=arm - bflt="yes" + TARGET_SYSTBL_ABI=common,oabi + mttcg="yes" ;; aarch64|aarch64eb) - TARGET_BASE_ARCH=arm TARGET_ARCH=aarch64 - bflt="yes" + TARGET_BASE_ARCH=arm + mttcg="yes" ;; cris) ;; + hppa) + mttcg="yes" + TARGET_SYSTBL_ABI=common,32 + ;; lm32) ;; m68k) - bflt="yes" + TARGET_SYSTBL_ABI=common ;; microblaze|microblazeel) TARGET_ARCH=microblaze - bflt="yes" + TARGET_SYSTBL_ABI=common + echo "TARGET_ABI32=y" >> $config_target_mak ;; mips|mipsel) + mttcg="yes" TARGET_ARCH=mips echo "TARGET_ABI_MIPSO32=y" >> $config_target_mak + TARGET_SYSTBL_ABI=o32 ;; mipsn32|mipsn32el) + mttcg="yes" TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips echo "TARGET_ABI_MIPSN32=y" >> $config_target_mak echo "TARGET_ABI32=y" >> $config_target_mak + TARGET_SYSTBL_ABI=n32 ;; mips64|mips64el) + mttcg="no" TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak - ;; - tricore) + TARGET_SYSTBL_ABI=n64 ;; moxie) ;; - or32) + nios2) + ;; + or1k) TARGET_ARCH=openrisc TARGET_BASE_ARCH=openrisc ;; ppc) - ;; - ppcemb) - TARGET_BASE_ARCH=ppc + TARGET_SYSTBL_ABI=common,nospu,32 ;; ppc64) TARGET_BASE_ARCH=ppc + TARGET_ABI_DIR=ppc + TARGET_SYSTBL_ABI=common,nospu,64 + mttcg=yes ;; ppc64le) TARGET_ARCH=ppc64 TARGET_BASE_ARCH=ppc + TARGET_ABI_DIR=ppc + TARGET_SYSTBL_ABI=common,nospu,64 + mttcg=yes ;; ppc64abi32) TARGET_ARCH=ppc64 TARGET_BASE_ARCH=ppc + TARGET_ABI_DIR=ppc + TARGET_SYSTBL_ABI=common,nospu,32 echo "TARGET_ABI32=y" >> $config_target_mak ;; + riscv32) + TARGET_BASE_ARCH=riscv + TARGET_ABI_DIR=riscv + mttcg=yes + ;; + riscv64) + TARGET_BASE_ARCH=riscv + TARGET_ABI_DIR=riscv + mttcg=yes + ;; + rx) + TARGET_ARCH=rx + target_compiler=$cross_cc_rx + ;; sh4|sh4eb) TARGET_ARCH=sh4 - bflt="yes" + TARGET_SYSTBL_ABI=common ;; sparc) + TARGET_SYSTBL_ABI=common,32 ;; sparc64) TARGET_BASE_ARCH=sparc + TARGET_SYSTBL_ABI=common,64 ;; sparc32plus) TARGET_ARCH=sparc64 TARGET_BASE_ARCH=sparc + TARGET_ABI_DIR=sparc + TARGET_SYSTBL_ABI=common,32 echo "TARGET_ABI32=y" >> $config_target_mak ;; s390x) + TARGET_SYSTBL_ABI=common,64 + mttcg=yes + ;; + tilegx) + ;; + tricore) ;; unicore32) ;; xtensa|xtensaeb) TARGET_ARCH=xtensa + TARGET_SYSTBL_ABI=common + mttcg="yes" ;; *) error_exit "Unsupported target CPU" @@ -1313,24 +2764,52 @@ if [ "$TARGET_BASE_ARCH" = "" ]; then TARGET_BASE_ARCH=$TARGET_ARCH fi -symlink "$source_path/Makefile.target" "$target_dir/Makefile" +#symlink "$source_path/Makefile.target" "$target_dir/Makefile" upper() { echo "$@"| LC_ALL=C tr '[a-z]' '[A-Z]' } -target_arch_name="`upper $TARGET_ARCH`" +target_arch_name="$(upper $TARGET_ARCH)" echo "TARGET_$target_arch_name=y" >> $config_target_mak echo "TARGET_NAME=$target_name" >> $config_target_mak echo "TARGET_BASE_ARCH=$TARGET_BASE_ARCH" >> $config_target_mak +if [ "$TARGET_ABI_DIR" = "" ]; then + TARGET_ABI_DIR=$TARGET_ARCH +fi +echo "TARGET_ABI_DIR=$TARGET_ABI_DIR" >> $config_target_mak +if [ "$HOST_VARIANT_DIR" != "" ]; then + echo "HOST_VARIANT_DIR=$HOST_VARIANT_DIR" >> $config_target_mak +fi +if [ "$TARGET_SYSTBL_ABI" != "" ]; then + echo "TARGET_SYSTBL_ABI=$TARGET_SYSTBL_ABI" >> $config_target_mak +fi + +if test "$target_aligned_only" = "yes" ; then + echo "TARGET_ALIGNED_ONLY=y" >> $config_target_mak +fi if test "$target_bigendian" = "yes" ; then echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak fi if test "$target_softmmu" = "yes" ; then echo "CONFIG_SOFTMMU=y" >> $config_target_mak + if test "$mttcg" = "yes" ; then + echo "TARGET_SUPPORTS_MTTCG=y" >> $config_target_mak + fi +fi +if test "$target_user_only" = "yes" ; then + echo "CONFIG_USER_ONLY=y" >> $config_target_mak +fi +if test "$target_linux_user" = "yes" ; then + echo "CONFIG_LINUX_USER=y" >> $config_target_mak fi -# generate QEMU_CFLAGS/LDFLAGS for targets +if test "$target_bsd_user" = "yes" ; then + echo "CONFIG_BSD_USER=y" >> $config_target_mak +fi + + +# generate QEMU_CFLAGS/QEMU_LDFLAGS for targets cflags="" ldflags="" @@ -1342,12 +2821,15 @@ alpha) ;; esac -echo "LDFLAGS+=$ldflags" >> $config_target_mak +echo "QEMU_LDFLAGS+=$ldflags" >> $config_target_mak echo "QEMU_CFLAGS+=$cflags" >> $config_target_mak -echo "QEMU_CFLAGS+=-include ${target_name}.h" >> $config_target_mak done # for target in $targets +if test "$ccache_cpp2" = "yes"; then + echo "export CCACHE_CPP2=y" >> $config_host_mak +fi + # Save the configure command line for later reuse. cat <config.status #!/bin/sh @@ -1356,9 +2838,44 @@ cat <config.status # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. EOD + +preserve_env() { + envname=$1 + + eval envval=\$$envname + + if test -n "$envval" + then + echo "$envname='$envval'" >> config.status + echo "export $envname" >> config.status + else + echo "unset $envname" >> config.status + fi +} + +# Preserve various env variables that influence what +# features/build target configure will detect +preserve_env AR +preserve_env AS +preserve_env CC +preserve_env CPP +preserve_env CXX +preserve_env INSTALL +preserve_env LD +preserve_env LD_LIBRARY_PATH +preserve_env LIBTOOL +preserve_env MAKE +preserve_env NM +preserve_env OBJCOPY +preserve_env PATH +preserve_env PKG_CONFIG +preserve_env PKG_CONFIG_LIBDIR +preserve_env PKG_CONFIG_PATH +preserve_env STRIP + printf "exec" >>config.status printf " '%s'" "$0" "$@" >>config.status -echo >>config.status +echo ' "$@"' >>config.status chmod +x config.status rm -r "$TMPDIR1" diff --git a/qemu/cpu-exec.c b/qemu/cpu-exec.c deleted file mode 100644 index b9559645..00000000 --- a/qemu/cpu-exec.c +++ /dev/null @@ -1,463 +0,0 @@ -/* - * emulator main execution loop - * - * Copyright (c) 2003-2005 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "tcg.h" -#include "sysemu/sysemu.h" - -#include "uc_priv.h" - -static tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr); -static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, - target_ulong cs_base, uint64_t flags); -static TranslationBlock *tb_find_fast(CPUArchState *env); -static void cpu_handle_debug_exception(CPUArchState *env); - -void cpu_loop_exit(CPUState *cpu) -{ - cpu->current_tb = NULL; - siglongjmp(cpu->jmp_env, 1); -} - -/* exit the current TB from a signal handler. The host registers are - restored in a state compatible with the CPU emulator - */ -void cpu_resume_from_signal(CPUState *cpu, void *puc) -{ - /* XXX: restore cpu registers saved in host registers */ - cpu->exception_index = -1; - siglongjmp(cpu->jmp_env, 1); -} - -/* main execution loop */ - -int cpu_exec(struct uc_struct *uc, CPUArchState *env) // qq -{ - CPUState *cpu = ENV_GET_CPU(env); - TCGContext *tcg_ctx = env->uc->tcg_ctx; - CPUClass *cc = CPU_GET_CLASS(uc, cpu); -#ifdef TARGET_I386 - X86CPU *x86_cpu = X86_CPU(uc, cpu); -#endif - int ret = 0, interrupt_request; - TranslationBlock *tb; - uint8_t *tc_ptr; - uintptr_t next_tb; - struct hook *hook; - - if (cpu->halted) { - if (!cpu_has_work(cpu)) { - return EXCP_HALTED; - } - - cpu->halted = 0; - } - - uc->current_cpu = cpu; - - /* As long as current_cpu is null, up to the assignment just above, - * requests by other threads to exit the execution loop are expected to - * be issued using the exit_request global. We must make sure that our - * evaluation of the global value is performed past the current_cpu - * value transition point, which requires a memory barrier as well as - * an instruction scheduling constraint on modern architectures. */ - smp_mb(); - - if (unlikely(uc->exit_request)) { - cpu->exit_request = 1; - } - - cc->cpu_exec_enter(cpu); - cpu->exception_index = -1; - env->invalid_error = UC_ERR_OK; - - /* prepare setjmp context for exception handling */ - for(;;) { - if (sigsetjmp(cpu->jmp_env, 0) == 0) { - if (uc->stop_request || uc->invalid_error) { - break; - } - - /* if an exception is pending, we execute it here */ - if (cpu->exception_index >= 0) { - //printf(">>> GOT INTERRUPT. exception idx = %x\n", cpu->exception_index); // qq - if (cpu->exception_index >= EXCP_INTERRUPT) { - /* exit request from the cpu execution loop */ - ret = cpu->exception_index; - if (ret == EXCP_DEBUG) { - cpu_handle_debug_exception(env); - } - break; - } else { - bool catched = false; -#if defined(CONFIG_USER_ONLY) - /* if user mode only, we simulate a fake exception - which will be handled outside the cpu execution - loop */ -#if defined(TARGET_I386) - cc->do_interrupt(cpu); -#endif - ret = cpu->exception_index; - break; -#else -#if defined(TARGET_X86_64) - if (env->exception_is_int) { - // point EIP to the next instruction after INT - env->eip = env->exception_next_eip; - } -#endif -#if defined(TARGET_MIPS) || defined(TARGET_MIPS64) - env->active_tc.PC = uc->next_pc; -#endif - if (uc->stop_interrupt && uc->stop_interrupt(cpu->exception_index)) { - // Unicorn: call registered invalid instruction callbacks - HOOK_FOREACH_VAR_DECLARE; - HOOK_FOREACH(uc, hook, UC_HOOK_INSN_INVALID) { - if (hook->to_delete) - continue; - catched = ((uc_cb_hookinsn_invalid_t)hook->callback)(uc, hook->user_data); - if (catched) - break; - } - if (!catched) - uc->invalid_error = UC_ERR_INSN_INVALID; - } else { - // Unicorn: call registered interrupt callbacks - HOOK_FOREACH_VAR_DECLARE; - HOOK_FOREACH(uc, hook, UC_HOOK_INTR) { - if (hook->to_delete) - continue; - ((uc_cb_hookintr_t)hook->callback)(uc, cpu->exception_index, hook->user_data); - catched = true; - } - if (!catched) - uc->invalid_error = UC_ERR_EXCEPTION; - } - - // Unicorn: If un-catched interrupt, stop executions. - if (!catched) { - cpu->halted = 1; - ret = EXCP_HLT; - break; - } - - cpu->exception_index = -1; -#endif - } - } - - next_tb = 0; /* force lookup of first TB */ - for(;;) { - interrupt_request = cpu->interrupt_request; - - if (unlikely(interrupt_request)) { - if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { - /* Mask out external interrupts for this step. */ - interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; - } - - if (interrupt_request & CPU_INTERRUPT_DEBUG) { - cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; - cpu->exception_index = EXCP_DEBUG; - cpu_loop_exit(cpu); - } - - if (interrupt_request & CPU_INTERRUPT_HALT) { - cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; - cpu->halted = 1; - cpu->exception_index = EXCP_HLT; - cpu_loop_exit(cpu); - } -#if defined(TARGET_I386) - if (interrupt_request & CPU_INTERRUPT_INIT) { - cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0); - do_cpu_init(x86_cpu); - cpu->exception_index = EXCP_HALTED; - cpu_loop_exit(cpu); - } -#else - if (interrupt_request & CPU_INTERRUPT_RESET) { - cpu_reset(cpu); - } -#endif - /* The target hook has 3 exit conditions: - False when the interrupt isn't processed, - True when it is, and we should restart on a new TB, - and via longjmp via cpu_loop_exit. */ - if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { - next_tb = 0; - } - - /* Don't use the cached interrupt_request value, - do_interrupt may have updated the EXITTB flag. */ - if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) { - cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; - /* ensure that no TB jump will be modified as - the program flow was changed */ - next_tb = 0; - } - } - - if (unlikely(cpu->exit_request)) { - cpu->exit_request = 0; - cpu->exception_index = EXCP_INTERRUPT; - cpu_loop_exit(cpu); - } - - tb = tb_find_fast(env); // qq - if (!tb) { // invalid TB due to invalid code? - uc->invalid_error = UC_ERR_FETCH_UNMAPPED; - ret = EXCP_HLT; - break; - } - - /* Note: we do it here to avoid a gcc bug on Mac OS X when - doing it in tb_find_slow */ - if (tcg_ctx->tb_ctx.tb_invalidated_flag) { - /* as some TB could have been invalidated because - of memory exceptions while generating the code, we - must recompute the hash index here */ - next_tb = 0; - tcg_ctx->tb_ctx.tb_invalidated_flag = 0; - } - - /* see if we can patch the calling TB. When the TB - spans two pages, we cannot safely do a direct - jump. */ - if (next_tb != 0 && tb->page_addr[1] == -1) { - tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK), - next_tb & TB_EXIT_MASK, tb); - } - - /* cpu_interrupt might be called while translating the - TB, but before it is linked into a potentially - infinite loop and becomes env->current_tb. Avoid - starting execution if there is a pending interrupt. */ - cpu->current_tb = tb; - barrier(); - if (likely(!cpu->exit_request)) { - tc_ptr = tb->tc_ptr; - /* execute the generated code */ - next_tb = cpu_tb_exec(cpu, tc_ptr); // qq - - switch (next_tb & TB_EXIT_MASK) { - case TB_EXIT_REQUESTED: - /* Something asked us to stop executing - * chained TBs; just continue round the main - * loop. Whatever requested the exit will also - * have set something else (eg exit_request or - * interrupt_request) which we will handle - * next time around the loop. - */ - tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); - next_tb = 0; - break; - default: - break; - } - } - - cpu->current_tb = NULL; - /* reset soft MMU for next block (it can currently - only be set by a memory fault) */ - } /* for(;;) */ - } else { - /* Reload env after longjmp - the compiler may have smashed all - * local variables as longjmp is marked 'noreturn'. */ - cpu = uc->current_cpu; - env = cpu->env_ptr; - cc = CPU_GET_CLASS(uc, cpu); -#ifdef TARGET_I386 - x86_cpu = X86_CPU(uc, cpu); -#endif - } - } /* for(;;) */ - - // Unicorn: Clear any TCG exit flag that might have been left set by exit requests - uc->current_cpu->tcg_exit_req = 0; - - cc->cpu_exec_exit(cpu); - - // Unicorn: flush JIT cache to because emulation might stop in - // the middle of translation, thus generate incomplete code. - // TODO: optimize this for better performance - tb_flush(env); - - /* fail safe : never use current_cpu outside cpu_exec() */ - // uc->current_cpu = NULL; - - return ret; -} - -/* Execute a TB, and fix up the CPU state afterwards if necessary */ -static tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr) -{ - CPUArchState *env = cpu->env_ptr; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - uintptr_t next_tb; - - next_tb = tcg_qemu_tb_exec(env, tb_ptr); - - if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) { - /* We didn't start executing this TB (eg because the instruction - * counter hit zero); we must restore the guest PC to the address - * of the start of the TB. - */ - CPUClass *cc = CPU_GET_CLASS(env->uc, cpu); - TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); - - /* Both set_pc() & synchronize_fromtb() can be ignored when code tracing hook is installed, - * or timer mode is in effect, since these already fix the PC. - */ - if (!HOOK_EXISTS(env->uc, UC_HOOK_CODE) && !env->uc->timeout) { - // We should sync pc for R/W error. - switch (env->invalid_error) { - case UC_ERR_WRITE_PROT: - case UC_ERR_READ_PROT: - case UC_ERR_FETCH_PROT: - case UC_ERR_WRITE_UNMAPPED: - case UC_ERR_READ_UNMAPPED: - case UC_ERR_FETCH_UNMAPPED: - case UC_ERR_WRITE_UNALIGNED: - case UC_ERR_READ_UNALIGNED: - case UC_ERR_FETCH_UNALIGNED: - break; - default: - if (cc->synchronize_from_tb) { - // avoid sync twice when helper_uc_tracecode() already did this. - if (env->uc->emu_counter <= env->uc->emu_count && - !env->uc->stop_request && !env->uc->quit_request) - cc->synchronize_from_tb(cpu, tb); - } else { - assert(cc->set_pc); - // avoid sync twice when helper_uc_tracecode() already did this. - if (env->uc->emu_counter <= env->uc->emu_count && - !env->uc->stop_request && !env->uc->quit_request) - cc->set_pc(cpu, tb->pc); - } - } - } - } - - if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) { - /* We were asked to stop executing TBs (probably a pending - * interrupt. We've now stopped, so clear the flag. - */ - cpu->tcg_exit_req = 0; - } - - return next_tb; -} - -static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, - target_ulong cs_base, uint64_t flags) // qq -{ - CPUState *cpu = ENV_GET_CPU(env); - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TranslationBlock *tb, **ptb1; - unsigned int h; - tb_page_addr_t phys_pc, phys_page1; - target_ulong virt_page2; - - tcg_ctx->tb_ctx.tb_invalidated_flag = 0; - - /* find translated block using physical mappings */ - phys_pc = get_page_addr_code(env, pc); // qq - if (phys_pc == -1) { // invalid code? - return NULL; - } - phys_page1 = phys_pc & TARGET_PAGE_MASK; - h = tb_phys_hash_func(phys_pc); - ptb1 = &tcg_ctx->tb_ctx.tb_phys_hash[h]; - for(;;) { - tb = *ptb1; - if (!tb) - goto not_found; - if (tb->pc == pc && - tb->page_addr[0] == phys_page1 && - tb->cs_base == cs_base && - tb->flags == flags) { - /* check next page if needed */ - if (tb->page_addr[1] != -1) { - tb_page_addr_t phys_page2; - - virt_page2 = (pc & TARGET_PAGE_MASK) + - TARGET_PAGE_SIZE; - phys_page2 = get_page_addr_code(env, virt_page2); - if (tb->page_addr[1] == phys_page2) - goto found; - } else { - goto found; - } - } - ptb1 = &tb->phys_hash_next; - } -not_found: - /* if no translated code available, then translate it now */ - tb = tb_gen_code(cpu, pc, cs_base, (int)flags, 0); // qq - if (tb == NULL) { - return NULL; - } - -found: - /* Move the last found TB to the head of the list */ - if (likely(*ptb1)) { - *ptb1 = tb->phys_hash_next; - tb->phys_hash_next = tcg_ctx->tb_ctx.tb_phys_hash[h]; - tcg_ctx->tb_ctx.tb_phys_hash[h] = tb; - } - /* we add the TB in the virtual pc hash table */ - cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; - return tb; -} - -static TranslationBlock *tb_find_fast(CPUArchState *env) // qq -{ - CPUState *cpu = ENV_GET_CPU(env); - TranslationBlock *tb; - target_ulong cs_base, pc; - int flags; - - /* we record a subset of the CPU state. It will - always be the same before a given translated block - is executed. */ - cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; - if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || - tb->flags != flags)) { - tb = tb_find_slow(env, pc, cs_base, flags); // qq - } - return tb; -} - -static void cpu_handle_debug_exception(CPUArchState *env) -{ - CPUState *cpu = ENV_GET_CPU(env); - CPUClass *cc = CPU_GET_CLASS(env->uc, cpu); - CPUWatchpoint *wp; - - if (!cpu->watchpoint_hit) { - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - wp->flags &= ~BP_WATCHPOINT_HIT; - } - } - - cc->debug_excp_handler(cpu); -} diff --git a/qemu/cputlb.c b/qemu/cputlb.c deleted file mode 100644 index fd0bb806..00000000 --- a/qemu/cputlb.c +++ /dev/null @@ -1,426 +0,0 @@ -/* - * Common CPU TLB handling - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "config.h" -#include "cpu.h" -#include "exec/exec-all.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" -#include "exec/cpu_ldst.h" - -#include "exec/cputlb.h" - -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" -#include "tcg/tcg.h" - -#include "uc_priv.h" - -//#define DEBUG_TLB -//#define DEBUG_TLB_CHECK - -static void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr); -static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe); -static bool qemu_ram_addr_from_host_nofail(struct uc_struct *uc, void *ptr, ram_addr_t *addr); -static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, - target_ulong size); -static void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr); - -/* statistics */ -//int tlb_flush_count; - -/* NOTE: - * If flush_global is true (the usual case), flush all tlb entries. - * If flush_global is false, flush (at least) all tlb entries not - * marked global. - * - * Since QEMU doesn't currently implement a global/not-global flag - * for tlb entries, at the moment tlb_flush() will also flush all - * tlb entries in the flush_global == false case. This is OK because - * CPU architectures generally permit an implementation to drop - * entries from the TLB at any time, so flushing more entries than - * required is only an efficiency issue, not a correctness issue. - */ -void tlb_flush(CPUState *cpu, int flush_global) -{ - CPUArchState *env = cpu->env_ptr; - -#if defined(DEBUG_TLB) - printf("tlb_flush:\n"); -#endif - /* must reset current TB so that interrupts cannot modify the - links while we are modifying them */ - cpu->current_tb = NULL; - - memset(env->tlb_table, -1, sizeof(env->tlb_table)); - memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); - memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); - - env->vtlb_index = 0; - env->tlb_flush_addr = -1; - env->tlb_flush_mask = 0; - //tlb_flush_count++; -} - -void tlb_flush_page(CPUState *cpu, target_ulong addr) -{ - CPUArchState *env = cpu->env_ptr; - int i; - int mmu_idx; - -#if defined(DEBUG_TLB) - printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); -#endif - /* Check if we need to flush due to large pages. */ - if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { -#if defined(DEBUG_TLB) - printf("tlb_flush_page: forced full flush (" - TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", - env->tlb_flush_addr, env->tlb_flush_mask); -#endif - tlb_flush(cpu, 1); - return; - } - /* must reset current TB so that interrupts cannot modify the - links while we are modifying them */ - cpu->current_tb = NULL; - - addr &= TARGET_PAGE_MASK; - i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); - } - - /* check whether there are entries that need to be flushed in the vtlb */ - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - int k; - for (k = 0; k < CPU_VTLB_SIZE; k++) { - tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); - } - } - - tb_flush_jmp_cache(cpu, addr); -} - -/* update the TLBs so that writes to code in the virtual page 'addr' - can be detected */ -void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr) -{ - cpu_physical_memory_reset_dirty(uc, ram_addr, TARGET_PAGE_SIZE, - DIRTY_MEMORY_CODE); -} - -/* update the TLB so that writes in physical page 'phys_addr' are no longer - tested for self modifying code */ -void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr, - target_ulong vaddr) -{ - cpu_physical_memory_set_dirty_flag(cpu->uc, ram_addr, DIRTY_MEMORY_CODE); -} - -void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, - uintptr_t length) -{ - uintptr_t addr; - - if (tlb_is_dirty_ram(tlb_entry)) { - addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; - if ((addr - start) < length) { - tlb_entry->addr_write |= TLB_NOTDIRTY; - } - } -} - -void cpu_tlb_reset_dirty_all(struct uc_struct *uc, - ram_addr_t start1, ram_addr_t length) -{ - CPUState *cpu = uc->cpu; - CPUArchState *env; - - int mmu_idx; - - env = cpu->env_ptr; - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - unsigned int i; - - for (i = 0; i < CPU_TLB_SIZE; i++) { - tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], - start1, length); - } - - for (i = 0; i < CPU_VTLB_SIZE; i++) { - tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], - start1, length); - } - } -} - -/* update the TLB corresponding to virtual page vaddr - so that it is no longer dirty */ -void tlb_set_dirty(CPUArchState *env, target_ulong vaddr) -{ - int i; - int mmu_idx; - - vaddr &= TARGET_PAGE_MASK; - i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); - } - - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - int k; - for (k = 0; k < CPU_VTLB_SIZE; k++) { - tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); - } - } -} - - -/* Add a new TLB entry. At most one entry for a given virtual address - is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the - supplied size is only used by tlb_flush_page. */ -void tlb_set_page(CPUState *cpu, target_ulong vaddr, - hwaddr paddr, int prot, - int mmu_idx, target_ulong size) -{ - CPUArchState *env = cpu->env_ptr; - MemoryRegionSection *section; - unsigned int index; - target_ulong address; - target_ulong code_address; - uintptr_t addend; - CPUTLBEntry *te; - hwaddr iotlb, xlat, sz; - unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; - - assert(size >= TARGET_PAGE_SIZE); - if (size != TARGET_PAGE_SIZE) { - tlb_add_large_page(env, vaddr, size); - } - - sz = size; - section = address_space_translate_for_iotlb(cpu->as, paddr, - &xlat, &sz); - assert(sz >= TARGET_PAGE_SIZE); - -#if defined(DEBUG_TLB) - printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx - " prot=%x idx=%d\n", - vaddr, paddr, prot, mmu_idx); -#endif - - address = vaddr; - if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { - /* IO memory case */ - address |= TLB_MMIO; - addend = 0; - } else { - /* TLB_MMIO for rom/romd handled below */ - addend = (uintptr_t)((char*)memory_region_get_ram_ptr(section->mr) + xlat); - } - - code_address = address; - iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, - prot, &address); - - index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - te = &env->tlb_table[mmu_idx][index]; - - /* do not discard the translation in te, evict it into a victim tlb */ - env->tlb_v_table[mmu_idx][vidx] = *te; - env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; - - /* refill the tlb */ - env->iotlb[mmu_idx][index] = iotlb - vaddr; - te->addend = (uintptr_t)(addend - vaddr); - if (prot & PAGE_READ) { - te->addr_read = address; - } else { - te->addr_read = -1; - } - - if (prot & PAGE_EXEC) { - te->addr_code = code_address; - } else { - te->addr_code = -1; - } - if (prot & PAGE_WRITE) { - if ((memory_region_is_ram(section->mr) && section->readonly) - || memory_region_is_romd(section->mr)) { - /* Write access calls the I/O callback. */ - te->addr_write = address | TLB_MMIO; - } else if (memory_region_is_ram(section->mr) - && cpu_physical_memory_is_clean(cpu->uc, (ram_addr_t)(section->mr->ram_addr - + xlat))) { - te->addr_write = address | TLB_NOTDIRTY; - } else { - te->addr_write = address; - } - } else { - te->addr_write = -1; - } -} - -/* NOTE: this function can trigger an exception */ -/* NOTE2: the returned address is not exactly the physical address: it - * is actually a ram_addr_t (in system mode; the user mode emulation - * version of this function returns a guest virtual address). - */ -tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) -{ - int mmu_idx, page_index, pd; - void *p; - MemoryRegion *mr; - ram_addr_t ram_addr; - CPUState *cpu = ENV_GET_CPU(env1); - - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = cpu_mmu_index(env1); - - if ((mmu_idx < 0) || (mmu_idx >= NB_MMU_MODES)) { - return -1; - } - - if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != - (addr & TARGET_PAGE_MASK))) { - cpu_ldub_code(env1, addr); - //check for NX related error from softmmu - if (env1->invalid_error == UC_ERR_FETCH_PROT) { - return -1; - } - } - pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; - mr = iotlb_to_region(cpu->as, pd); - if (memory_region_is_unassigned(cpu->uc, mr)) { - CPUClass *cc = CPU_GET_CLASS(env1->uc, cpu); - - if (cc->do_unassigned_access) { - cc->do_unassigned_access(cpu, addr, false, true, 0, 4); - } else { - //cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x" - // TARGET_FMT_lx "\n", addr); // qq - env1->invalid_addr = addr; - env1->invalid_error = UC_ERR_FETCH_UNMAPPED; - return -1; - } - } - p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); - if (!qemu_ram_addr_from_host_nofail(cpu->uc, p, &ram_addr)) { - env1->invalid_addr = addr; - env1->invalid_error = UC_ERR_FETCH_UNMAPPED; - return -1; - } else - return ram_addr; -} - -static bool qemu_ram_addr_from_host_nofail(struct uc_struct *uc, void *ptr, ram_addr_t *ram_addr) -{ - if (qemu_ram_addr_from_host(uc, ptr, ram_addr) == NULL) { - // fprintf(stderr, "Bad ram pointer %p\n", ptr); - return false; - } - - return true; -} - -static void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) -{ - if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { - tlb_entry->addr_write = vaddr; - } -} - -/* Our TLB does not support large pages, so remember the area covered by - large pages and trigger a full TLB flush if these are invalidated. */ -static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, - target_ulong size) -{ - target_ulong mask = ~(size - 1); - - if (env->tlb_flush_addr == (target_ulong)-1) { - env->tlb_flush_addr = vaddr & mask; - env->tlb_flush_mask = mask; - return; - } - /* Extend the existing region to include the new page. - This is a compromise between unnecessary flushes and the cost - of maintaining a full variable size TLB. */ - mask &= env->tlb_flush_mask; - while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { - mask <<= 1; - } - env->tlb_flush_addr &= mask; - env->tlb_flush_mask = mask; -} - -static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe) -{ - return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0; -} - - -static void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) -{ - if (addr == (tlb_entry->addr_read & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_write & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_code & - (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - memset(tlb_entry, -1, sizeof(*tlb_entry)); - } -} - - -#define MMUSUFFIX _mmu - -#define SHIFT 0 -#include "softmmu_template.h" - -#define SHIFT 1 -#include "softmmu_template.h" - -#define SHIFT 2 -#include "softmmu_template.h" - -#define SHIFT 3 -#include "softmmu_template.h" -#undef MMUSUFFIX - -#define MMUSUFFIX _cmmu -#undef GETPC_ADJ -#define GETPC_ADJ 0 -#undef GETRA -#define GETRA() ((uintptr_t)0) -#define SOFTMMU_CODE_ACCESS - -#define SHIFT 0 -#include "softmmu_template.h" - -#define SHIFT 1 -#include "softmmu_template.h" - -#define SHIFT 2 -#include "softmmu_template.h" - -#define SHIFT 3 -#include "softmmu_template.h" diff --git a/qemu/util/aes.c b/qemu/crypto/aes.c similarity index 66% rename from qemu/util/aes.c rename to qemu/crypto/aes.c index 50c69c38..0f6a195a 100644 --- a/qemu/util/aes.c +++ b/qemu/crypto/aes.c @@ -27,8 +27,8 @@ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "qemu-common.h" -#include "qemu/aes.h" +#include "qemu/osdep.h" +#include "crypto/aes.h" typedef uint32_t u32; typedef uint8_t u8; @@ -1057,3 +1057,596 @@ const uint32_t AES_Td4[256] = { 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U, 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU, }; +static const u32 rcon[] = { + 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, + 0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ +}; + +/** + * Expand the cipher key into the encryption key schedule. + */ +int AES_set_encrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key) { + + u32 *rk; + int i = 0; + u32 temp; + + if (!userKey || !key) + return -1; + if (bits != 128 && bits != 192 && bits != 256) + return -2; + + rk = key->rd_key; + + if (bits==128) + key->rounds = 10; + else if (bits==192) + key->rounds = 12; + else + key->rounds = 14; + + rk[0] = GETU32(userKey ); + rk[1] = GETU32(userKey + 4); + rk[2] = GETU32(userKey + 8); + rk[3] = GETU32(userKey + 12); + if (bits == 128) { + while (1) { + temp = rk[3]; + rk[4] = rk[0] ^ + (AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(temp >> 24) ] & 0x000000ff) ^ + rcon[i]; + rk[5] = rk[1] ^ rk[4]; + rk[6] = rk[2] ^ rk[5]; + rk[7] = rk[3] ^ rk[6]; + if (++i == 10) { + return 0; + } + rk += 4; + } + } + rk[4] = GETU32(userKey + 16); + rk[5] = GETU32(userKey + 20); + if (bits == 192) { + while (1) { + temp = rk[ 5]; + rk[ 6] = rk[ 0] ^ + (AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(temp >> 24) ] & 0x000000ff) ^ + rcon[i]; + rk[ 7] = rk[ 1] ^ rk[ 6]; + rk[ 8] = rk[ 2] ^ rk[ 7]; + rk[ 9] = rk[ 3] ^ rk[ 8]; + if (++i == 8) { + return 0; + } + rk[10] = rk[ 4] ^ rk[ 9]; + rk[11] = rk[ 5] ^ rk[10]; + rk += 6; + } + } + rk[6] = GETU32(userKey + 24); + rk[7] = GETU32(userKey + 28); + if (bits == 256) { + while (1) { + temp = rk[ 7]; + rk[ 8] = rk[ 0] ^ + (AES_Te4[(temp >> 16) & 0xff] & 0xff000000) ^ + (AES_Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(temp ) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(temp >> 24) ] & 0x000000ff) ^ + rcon[i]; + rk[ 9] = rk[ 1] ^ rk[ 8]; + rk[10] = rk[ 2] ^ rk[ 9]; + rk[11] = rk[ 3] ^ rk[10]; + if (++i == 7) { + return 0; + } + temp = rk[11]; + rk[12] = rk[ 4] ^ + (AES_Te4[(temp >> 24) ] & 0xff000000) ^ + (AES_Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(temp ) & 0xff] & 0x000000ff); + rk[13] = rk[ 5] ^ rk[12]; + rk[14] = rk[ 6] ^ rk[13]; + rk[15] = rk[ 7] ^ rk[14]; + + rk += 8; + } + } + abort(); +} + +/** + * Expand the cipher key into the decryption key schedule. + */ +int AES_set_decrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key) { + + u32 *rk; + int i, j, status; + u32 temp; + + /* first, start with an encryption schedule */ + status = AES_set_encrypt_key(userKey, bits, key); + if (status < 0) + return status; + + rk = key->rd_key; + + /* invert the order of the round keys: */ + for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) { + temp = rk[i ]; rk[i ] = rk[j ]; rk[j ] = temp; + temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp; + temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp; + temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp; + } + /* apply the inverse MixColumn transform to all round keys but the first and the last: */ + for (i = 1; i < (key->rounds); i++) { + rk += 4; + rk[0] = + AES_Td0[AES_Te4[(rk[0] >> 24) ] & 0xff] ^ + AES_Td1[AES_Te4[(rk[0] >> 16) & 0xff] & 0xff] ^ + AES_Td2[AES_Te4[(rk[0] >> 8) & 0xff] & 0xff] ^ + AES_Td3[AES_Te4[(rk[0] ) & 0xff] & 0xff]; + rk[1] = + AES_Td0[AES_Te4[(rk[1] >> 24) ] & 0xff] ^ + AES_Td1[AES_Te4[(rk[1] >> 16) & 0xff] & 0xff] ^ + AES_Td2[AES_Te4[(rk[1] >> 8) & 0xff] & 0xff] ^ + AES_Td3[AES_Te4[(rk[1] ) & 0xff] & 0xff]; + rk[2] = + AES_Td0[AES_Te4[(rk[2] >> 24) ] & 0xff] ^ + AES_Td1[AES_Te4[(rk[2] >> 16) & 0xff] & 0xff] ^ + AES_Td2[AES_Te4[(rk[2] >> 8) & 0xff] & 0xff] ^ + AES_Td3[AES_Te4[(rk[2] ) & 0xff] & 0xff]; + rk[3] = + AES_Td0[AES_Te4[(rk[3] >> 24) ] & 0xff] ^ + AES_Td1[AES_Te4[(rk[3] >> 16) & 0xff] & 0xff] ^ + AES_Td2[AES_Te4[(rk[3] >> 8) & 0xff] & 0xff] ^ + AES_Td3[AES_Te4[(rk[3] ) & 0xff] & 0xff]; + } + return 0; +} + +#ifndef AES_ASM +/* + * Encrypt a single block + * in and out can overlap + */ +void AES_encrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key) { + + const u32 *rk; + u32 s0, s1, s2, s3, t0, t1, t2, t3; +#ifndef FULL_UNROLL + int r; +#endif /* ?FULL_UNROLL */ + + assert(in && out && key); + rk = key->rd_key; + + /* + * map byte array block to cipher state + * and add initial round key: + */ + s0 = GETU32(in ) ^ rk[0]; + s1 = GETU32(in + 4) ^ rk[1]; + s2 = GETU32(in + 8) ^ rk[2]; + s3 = GETU32(in + 12) ^ rk[3]; +#ifdef FULL_UNROLL + /* round 1: */ + t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[ 4]; + t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[ 5]; + t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[ 6]; + t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[ 7]; + /* round 2: */ + s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[ 8]; + s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[ 9]; + s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[10]; + s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[11]; + /* round 3: */ + t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[12]; + t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[13]; + t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[14]; + t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[15]; + /* round 4: */ + s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[16]; + s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[17]; + s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[18]; + s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[19]; + /* round 5: */ + t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[20]; + t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[21]; + t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[22]; + t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[23]; + /* round 6: */ + s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[24]; + s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[25]; + s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[26]; + s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[27]; + /* round 7: */ + t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[28]; + t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[29]; + t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[30]; + t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[31]; + /* round 8: */ + s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[32]; + s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[33]; + s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[34]; + s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[35]; + /* round 9: */ + t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[36]; + t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[37]; + t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[38]; + t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[39]; + if (key->rounds > 10) { + /* round 10: */ + s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[40]; + s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[41]; + s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[42]; + s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[43]; + /* round 11: */ + t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[44]; + t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[45]; + t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[46]; + t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[47]; + if (key->rounds > 12) { + /* round 12: */ + s0 = AES_Te0[t0 >> 24] ^ AES_Te1[(t1 >> 16) & 0xff] ^ AES_Te2[(t2 >> 8) & 0xff] ^ AES_Te3[t3 & 0xff] ^ rk[48]; + s1 = AES_Te0[t1 >> 24] ^ AES_Te1[(t2 >> 16) & 0xff] ^ AES_Te2[(t3 >> 8) & 0xff] ^ AES_Te3[t0 & 0xff] ^ rk[49]; + s2 = AES_Te0[t2 >> 24] ^ AES_Te1[(t3 >> 16) & 0xff] ^ AES_Te2[(t0 >> 8) & 0xff] ^ AES_Te3[t1 & 0xff] ^ rk[50]; + s3 = AES_Te0[t3 >> 24] ^ AES_Te1[(t0 >> 16) & 0xff] ^ AES_Te2[(t1 >> 8) & 0xff] ^ AES_Te3[t2 & 0xff] ^ rk[51]; + /* round 13: */ + t0 = AES_Te0[s0 >> 24] ^ AES_Te1[(s1 >> 16) & 0xff] ^ AES_Te2[(s2 >> 8) & 0xff] ^ AES_Te3[s3 & 0xff] ^ rk[52]; + t1 = AES_Te0[s1 >> 24] ^ AES_Te1[(s2 >> 16) & 0xff] ^ AES_Te2[(s3 >> 8) & 0xff] ^ AES_Te3[s0 & 0xff] ^ rk[53]; + t2 = AES_Te0[s2 >> 24] ^ AES_Te1[(s3 >> 16) & 0xff] ^ AES_Te2[(s0 >> 8) & 0xff] ^ AES_Te3[s1 & 0xff] ^ rk[54]; + t3 = AES_Te0[s3 >> 24] ^ AES_Te1[(s0 >> 16) & 0xff] ^ AES_Te2[(s1 >> 8) & 0xff] ^ AES_Te3[s2 & 0xff] ^ rk[55]; + } + } + rk += key->rounds << 2; +#else /* !FULL_UNROLL */ + /* + * Nr - 1 full rounds: + */ + r = key->rounds >> 1; + for (;;) { + t0 = + AES_Te0[(s0 >> 24) ] ^ + AES_Te1[(s1 >> 16) & 0xff] ^ + AES_Te2[(s2 >> 8) & 0xff] ^ + AES_Te3[(s3 ) & 0xff] ^ + rk[4]; + t1 = + AES_Te0[(s1 >> 24) ] ^ + AES_Te1[(s2 >> 16) & 0xff] ^ + AES_Te2[(s3 >> 8) & 0xff] ^ + AES_Te3[(s0 ) & 0xff] ^ + rk[5]; + t2 = + AES_Te0[(s2 >> 24) ] ^ + AES_Te1[(s3 >> 16) & 0xff] ^ + AES_Te2[(s0 >> 8) & 0xff] ^ + AES_Te3[(s1 ) & 0xff] ^ + rk[6]; + t3 = + AES_Te0[(s3 >> 24) ] ^ + AES_Te1[(s0 >> 16) & 0xff] ^ + AES_Te2[(s1 >> 8) & 0xff] ^ + AES_Te3[(s2 ) & 0xff] ^ + rk[7]; + + rk += 8; + if (--r == 0) { + break; + } + + s0 = + AES_Te0[(t0 >> 24) ] ^ + AES_Te1[(t1 >> 16) & 0xff] ^ + AES_Te2[(t2 >> 8) & 0xff] ^ + AES_Te3[(t3 ) & 0xff] ^ + rk[0]; + s1 = + AES_Te0[(t1 >> 24) ] ^ + AES_Te1[(t2 >> 16) & 0xff] ^ + AES_Te2[(t3 >> 8) & 0xff] ^ + AES_Te3[(t0 ) & 0xff] ^ + rk[1]; + s2 = + AES_Te0[(t2 >> 24) ] ^ + AES_Te1[(t3 >> 16) & 0xff] ^ + AES_Te2[(t0 >> 8) & 0xff] ^ + AES_Te3[(t1 ) & 0xff] ^ + rk[2]; + s3 = + AES_Te0[(t3 >> 24) ] ^ + AES_Te1[(t0 >> 16) & 0xff] ^ + AES_Te2[(t1 >> 8) & 0xff] ^ + AES_Te3[(t2 ) & 0xff] ^ + rk[3]; + } +#endif /* ?FULL_UNROLL */ + /* + * apply last round and + * map cipher state to byte array block: + */ + s0 = + (AES_Te4[(t0 >> 24) ] & 0xff000000) ^ + (AES_Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(t3 ) & 0xff] & 0x000000ff) ^ + rk[0]; + PUTU32(out , s0); + s1 = + (AES_Te4[(t1 >> 24) ] & 0xff000000) ^ + (AES_Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(t0 ) & 0xff] & 0x000000ff) ^ + rk[1]; + PUTU32(out + 4, s1); + s2 = + (AES_Te4[(t2 >> 24) ] & 0xff000000) ^ + (AES_Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(t1 ) & 0xff] & 0x000000ff) ^ + rk[2]; + PUTU32(out + 8, s2); + s3 = + (AES_Te4[(t3 >> 24) ] & 0xff000000) ^ + (AES_Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Te4[(t2 ) & 0xff] & 0x000000ff) ^ + rk[3]; + PUTU32(out + 12, s3); +} + +/* + * Decrypt a single block + * in and out can overlap + */ +void AES_decrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key) { + + const u32 *rk; + u32 s0, s1, s2, s3, t0, t1, t2, t3; +#ifndef FULL_UNROLL + int r; +#endif /* ?FULL_UNROLL */ + + assert(in && out && key); + rk = key->rd_key; + + /* + * map byte array block to cipher state + * and add initial round key: + */ + s0 = GETU32(in ) ^ rk[0]; + s1 = GETU32(in + 4) ^ rk[1]; + s2 = GETU32(in + 8) ^ rk[2]; + s3 = GETU32(in + 12) ^ rk[3]; +#ifdef FULL_UNROLL + /* round 1: */ + t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[ 4]; + t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[ 5]; + t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[ 6]; + t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[ 7]; + /* round 2: */ + s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[ 8]; + s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[ 9]; + s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[10]; + s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[11]; + /* round 3: */ + t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[12]; + t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[13]; + t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[14]; + t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[15]; + /* round 4: */ + s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[16]; + s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[17]; + s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[18]; + s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[19]; + /* round 5: */ + t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[20]; + t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[21]; + t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[22]; + t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[23]; + /* round 6: */ + s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[24]; + s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[25]; + s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[26]; + s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[27]; + /* round 7: */ + t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[28]; + t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[29]; + t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[30]; + t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[31]; + /* round 8: */ + s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[32]; + s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[33]; + s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[34]; + s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[35]; + /* round 9: */ + t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[36]; + t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[37]; + t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[38]; + t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[39]; + if (key->rounds > 10) { + /* round 10: */ + s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[40]; + s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[41]; + s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[42]; + s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[43]; + /* round 11: */ + t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[44]; + t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[45]; + t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[46]; + t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[47]; + if (key->rounds > 12) { + /* round 12: */ + s0 = AES_Td0[t0 >> 24] ^ AES_Td1[(t3 >> 16) & 0xff] ^ AES_Td2[(t2 >> 8) & 0xff] ^ AES_Td3[t1 & 0xff] ^ rk[48]; + s1 = AES_Td0[t1 >> 24] ^ AES_Td1[(t0 >> 16) & 0xff] ^ AES_Td2[(t3 >> 8) & 0xff] ^ AES_Td3[t2 & 0xff] ^ rk[49]; + s2 = AES_Td0[t2 >> 24] ^ AES_Td1[(t1 >> 16) & 0xff] ^ AES_Td2[(t0 >> 8) & 0xff] ^ AES_Td3[t3 & 0xff] ^ rk[50]; + s3 = AES_Td0[t3 >> 24] ^ AES_Td1[(t2 >> 16) & 0xff] ^ AES_Td2[(t1 >> 8) & 0xff] ^ AES_Td3[t0 & 0xff] ^ rk[51]; + /* round 13: */ + t0 = AES_Td0[s0 >> 24] ^ AES_Td1[(s3 >> 16) & 0xff] ^ AES_Td2[(s2 >> 8) & 0xff] ^ AES_Td3[s1 & 0xff] ^ rk[52]; + t1 = AES_Td0[s1 >> 24] ^ AES_Td1[(s0 >> 16) & 0xff] ^ AES_Td2[(s3 >> 8) & 0xff] ^ AES_Td3[s2 & 0xff] ^ rk[53]; + t2 = AES_Td0[s2 >> 24] ^ AES_Td1[(s1 >> 16) & 0xff] ^ AES_Td2[(s0 >> 8) & 0xff] ^ AES_Td3[s3 & 0xff] ^ rk[54]; + t3 = AES_Td0[s3 >> 24] ^ AES_Td1[(s2 >> 16) & 0xff] ^ AES_Td2[(s1 >> 8) & 0xff] ^ AES_Td3[s0 & 0xff] ^ rk[55]; + } + } + rk += key->rounds << 2; +#else /* !FULL_UNROLL */ + /* + * Nr - 1 full rounds: + */ + r = key->rounds >> 1; + for (;;) { + t0 = + AES_Td0[(s0 >> 24) ] ^ + AES_Td1[(s3 >> 16) & 0xff] ^ + AES_Td2[(s2 >> 8) & 0xff] ^ + AES_Td3[(s1 ) & 0xff] ^ + rk[4]; + t1 = + AES_Td0[(s1 >> 24) ] ^ + AES_Td1[(s0 >> 16) & 0xff] ^ + AES_Td2[(s3 >> 8) & 0xff] ^ + AES_Td3[(s2 ) & 0xff] ^ + rk[5]; + t2 = + AES_Td0[(s2 >> 24) ] ^ + AES_Td1[(s1 >> 16) & 0xff] ^ + AES_Td2[(s0 >> 8) & 0xff] ^ + AES_Td3[(s3 ) & 0xff] ^ + rk[6]; + t3 = + AES_Td0[(s3 >> 24) ] ^ + AES_Td1[(s2 >> 16) & 0xff] ^ + AES_Td2[(s1 >> 8) & 0xff] ^ + AES_Td3[(s0 ) & 0xff] ^ + rk[7]; + + rk += 8; + if (--r == 0) { + break; + } + + s0 = + AES_Td0[(t0 >> 24) ] ^ + AES_Td1[(t3 >> 16) & 0xff] ^ + AES_Td2[(t2 >> 8) & 0xff] ^ + AES_Td3[(t1 ) & 0xff] ^ + rk[0]; + s1 = + AES_Td0[(t1 >> 24) ] ^ + AES_Td1[(t0 >> 16) & 0xff] ^ + AES_Td2[(t3 >> 8) & 0xff] ^ + AES_Td3[(t2 ) & 0xff] ^ + rk[1]; + s2 = + AES_Td0[(t2 >> 24) ] ^ + AES_Td1[(t1 >> 16) & 0xff] ^ + AES_Td2[(t0 >> 8) & 0xff] ^ + AES_Td3[(t3 ) & 0xff] ^ + rk[2]; + s3 = + AES_Td0[(t3 >> 24) ] ^ + AES_Td1[(t2 >> 16) & 0xff] ^ + AES_Td2[(t1 >> 8) & 0xff] ^ + AES_Td3[(t0 ) & 0xff] ^ + rk[3]; + } +#endif /* ?FULL_UNROLL */ + /* + * apply last round and + * map cipher state to byte array block: + */ + s0 = + (AES_Td4[(t0 >> 24) ] & 0xff000000) ^ + (AES_Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Td4[(t1 ) & 0xff] & 0x000000ff) ^ + rk[0]; + PUTU32(out , s0); + s1 = + (AES_Td4[(t1 >> 24) ] & 0xff000000) ^ + (AES_Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Td4[(t2 ) & 0xff] & 0x000000ff) ^ + rk[1]; + PUTU32(out + 4, s1); + s2 = + (AES_Td4[(t2 >> 24) ] & 0xff000000) ^ + (AES_Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Td4[(t3 ) & 0xff] & 0x000000ff) ^ + rk[2]; + PUTU32(out + 8, s2); + s3 = + (AES_Td4[(t3 >> 24) ] & 0xff000000) ^ + (AES_Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ + (AES_Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ + (AES_Td4[(t0 ) & 0xff] & 0x000000ff) ^ + rk[3]; + PUTU32(out + 12, s3); +} + +#endif /* AES_ASM */ + +void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, + const unsigned long length, const AES_KEY *key, + unsigned char *ivec, const int enc) +{ + + unsigned long n; + unsigned long len = length; + unsigned char tmp[AES_BLOCK_SIZE]; + + assert(in && out && key && ivec); + + if (enc) { + while (len >= AES_BLOCK_SIZE) { + for(n=0; n < AES_BLOCK_SIZE; ++n) + tmp[n] = in[n] ^ ivec[n]; + AES_encrypt(tmp, out, key); + memcpy(ivec, out, AES_BLOCK_SIZE); + len -= AES_BLOCK_SIZE; + in += AES_BLOCK_SIZE; + out += AES_BLOCK_SIZE; + } + if (len) { + for(n=0; n < len; ++n) + tmp[n] = in[n] ^ ivec[n]; + for(n=len; n < AES_BLOCK_SIZE; ++n) + tmp[n] = ivec[n]; + AES_encrypt(tmp, tmp, key); + memcpy(out, tmp, AES_BLOCK_SIZE); + memcpy(ivec, tmp, AES_BLOCK_SIZE); + } + } else { + while (len >= AES_BLOCK_SIZE) { + memcpy(tmp, in, AES_BLOCK_SIZE); + AES_decrypt(in, out, key); + for(n=0; n < AES_BLOCK_SIZE; ++n) + out[n] ^= ivec[n]; + memcpy(ivec, tmp, AES_BLOCK_SIZE); + len -= AES_BLOCK_SIZE; + in += AES_BLOCK_SIZE; + out += AES_BLOCK_SIZE; + } + if (len) { + memcpy(tmp, in, AES_BLOCK_SIZE); + AES_decrypt(tmp, tmp, key); + for(n=0; n < len; ++n) + out[n] = tmp[n] ^ ivec[n]; + memcpy(ivec, tmp, AES_BLOCK_SIZE); + } + } +} diff --git a/qemu/crypto/init.c b/qemu/crypto/init.c new file mode 100644 index 00000000..bca2e1d3 --- /dev/null +++ b/qemu/crypto/init.c @@ -0,0 +1,94 @@ +/* + * QEMU Crypto initialization + * + * Copyright (c) 2015 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "crypto/init.h" +#include "qapi/error.h" +#include "qemu/thread.h" + +#ifdef CONFIG_GNUTLS +#include +#include +#endif + +#ifdef CONFIG_GCRYPT +#include +#endif + +#include "crypto/random.h" + +/* #define DEBUG_GNUTLS */ + +/* + * We need to init gcrypt threading if + * + * - gcrypt < 1.6.0 + * + */ + +#if (defined(CONFIG_GCRYPT) && \ + (GCRYPT_VERSION_NUMBER < 0x010600)) +#define QCRYPTO_INIT_GCRYPT_THREADS +#else +#undef QCRYPTO_INIT_GCRYPT_THREADS +#endif + +#ifdef DEBUG_GNUTLS +static void qcrypto_gnutls_log(int level, const char *str) +{ + fprintf(stderr, "%d: %s", level, str); +} +#endif + +int qcrypto_init(void) +{ +#ifdef QCRYPTO_INIT_GCRYPT_THREADS + gcry_control(GCRYCTL_SET_THREAD_CBS, &qcrypto_gcrypt_thread_impl); +#endif /* QCRYPTO_INIT_GCRYPT_THREADS */ + +#ifdef CONFIG_GNUTLS + int ret; + ret = gnutls_global_init(); + if (ret < 0) { + // error_setg(errp, + // "Unable to initialize GNUTLS library: %s", + // gnutls_strerror(ret)); + return -1; + } +#ifdef DEBUG_GNUTLS + gnutls_global_set_log_level(10); + gnutls_global_set_log_function(qcrypto_gnutls_log); +#endif +#endif + +#ifdef CONFIG_GCRYPT + if (!gcry_check_version(GCRYPT_VERSION)) { + // error_setg(errp, "Unable to initialize gcrypt"); + return -1; + } + gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0); +#endif + + if (qcrypto_random_init() < 0) { + return -1; + } + + return 0; +} diff --git a/qemu/default-configs/aarch64-softmmu.mak b/qemu/default-configs/aarch64-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/aarch64eb-softmmu.mak b/qemu/default-configs/aarch64eb-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/arm-softmmu.mak b/qemu/default-configs/arm-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/armeb-softmmu.mak b/qemu/default-configs/armeb-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/m68k-softmmu.mak b/qemu/default-configs/m68k-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/mips-softmmu.mak b/qemu/default-configs/mips-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/mips64-softmmu.mak b/qemu/default-configs/mips64-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/mips64el-softmmu.mak b/qemu/default-configs/mips64el-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/mipsel-softmmu.mak b/qemu/default-configs/mipsel-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/sparc-softmmu.mak b/qemu/default-configs/sparc-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/sparc64-softmmu.mak b/qemu/default-configs/sparc64-softmmu.mak deleted file mode 100644 index e69de29b..00000000 diff --git a/qemu/default-configs/x86_64-softmmu.mak b/qemu/default-configs/x86_64-softmmu.mak deleted file mode 100644 index 6826a92a..00000000 --- a/qemu/default-configs/x86_64-softmmu.mak +++ /dev/null @@ -1,3 +0,0 @@ -# Default configuration for x86_64-softmmu - -CONFIG_APIC=y diff --git a/qemu/docs/memory.txt b/qemu/docs/memory.txt deleted file mode 100644 index b12f1f04..00000000 --- a/qemu/docs/memory.txt +++ /dev/null @@ -1,244 +0,0 @@ -The memory API -============== - -The memory API models the memory and I/O buses and controllers of a QEMU -machine. It attempts to allow modelling of: - - - ordinary RAM - - memory-mapped I/O (MMIO) - - memory controllers that can dynamically reroute physical memory regions - to different destinations - -The memory model provides support for - - - tracking RAM changes by the guest - - setting up coalesced memory for kvm - - setting up ioeventfd regions for kvm - -Memory is modelled as an acyclic graph of MemoryRegion objects. Sinks -(leaves) are RAM and MMIO regions, while other nodes represent -buses, memory controllers, and memory regions that have been rerouted. - -In addition to MemoryRegion objects, the memory API provides AddressSpace -objects for every root and possibly for intermediate MemoryRegions too. -These represent memory as seen from the CPU or a device's viewpoint. - -Types of regions ----------------- - -There are four types of memory regions (all represented by a single C type -MemoryRegion): - -- RAM: a RAM region is simply a range of host memory that can be made available - to the guest. - -- MMIO: a range of guest memory that is implemented by host callbacks; - each read or write causes a callback to be called on the host. - -- container: a container simply includes other memory regions, each at - a different offset. Containers are useful for grouping several regions - into one unit. For example, a PCI BAR may be composed of a RAM region - and an MMIO region. - - A container's subregions are usually non-overlapping. In some cases it is - useful to have overlapping regions; for example a memory controller that - can overlay a subregion of RAM with MMIO or ROM, or a PCI controller - that does not prevent card from claiming overlapping BARs. - -- alias: a subsection of another region. Aliases allow a region to be - split apart into discontiguous regions. Examples of uses are memory banks - used when the guest address space is smaller than the amount of RAM - addressed, or a memory controller that splits main memory to expose a "PCI - hole". Aliases may point to any type of region, including other aliases, - but an alias may not point back to itself, directly or indirectly. - -It is valid to add subregions to a region which is not a pure container -(that is, to an MMIO, RAM or ROM region). This means that the region -will act like a container, except that any addresses within the container's -region which are not claimed by any subregion are handled by the -container itself (ie by its MMIO callbacks or RAM backing). However -it is generally possible to achieve the same effect with a pure container -one of whose subregions is a low priority "background" region covering -the whole address range; this is often clearer and is preferred. -Subregions cannot be added to an alias region. - -Region names ------------- - -Regions are assigned names by the constructor. For most regions these are -only used for debugging purposes, but RAM regions also use the name to identify -live migration sections. This means that RAM region names need to have ABI -stability. - -Region lifecycle ----------------- - -A region is created by one of the constructor functions (memory_region_init*()) -and attached to an object. It is then destroyed by object_unparent() or simply -when the parent object dies. - -In between, a region can be added to an address space -by using memory_region_add_subregion() and removed using -memory_region_del_subregion(). Destroying the region implicitly -removes the region from the address space. - -Region attributes may be changed at any point; they take effect once -the region becomes exposed to the guest. - -Overlapping regions and priority --------------------------------- -Usually, regions may not overlap each other; a memory address decodes into -exactly one target. In some cases it is useful to allow regions to overlap, -and sometimes to control which of an overlapping regions is visible to the -guest. This is done with memory_region_add_subregion_overlap(), which -allows the region to overlap any other region in the same container, and -specifies a priority that allows the core to decide which of two regions at -the same address are visible (highest wins). -Priority values are signed, and the default value is zero. This means that -you can use memory_region_add_subregion_overlap() both to specify a region -that must sit 'above' any others (with a positive priority) and also a -background region that sits 'below' others (with a negative priority). - -If the higher priority region in an overlap is a container or alias, then -the lower priority region will appear in any "holes" that the higher priority -region has left by not mapping subregions to that area of its address range. -(This applies recursively -- if the subregions are themselves containers or -aliases that leave holes then the lower priority region will appear in these -holes too.) - -For example, suppose we have a container A of size 0x8000 with two subregions -B and C. B is a container mapped at 0x2000, size 0x4000, priority 1; C is -an MMIO region mapped at 0x0, size 0x6000, priority 2. B currently has two -of its own subregions: D of size 0x1000 at offset 0 and E of size 0x1000 at -offset 0x2000. As a diagram: - - 0 1000 2000 3000 4000 5000 6000 7000 8000 - |------|------|------|------|------|------|------|-------| - A: [ ] - C: [CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC] - B: [ ] - D: [DDDDD] - E: [EEEEE] - -The regions that will be seen within this address range then are: - [CCCCCCCCCCCC][DDDDD][CCCCC][EEEEE][CCCCC] - -Since B has higher priority than C, its subregions appear in the flat map -even where they overlap with C. In ranges where B has not mapped anything -C's region appears. - -If B had provided its own MMIO operations (ie it was not a pure container) -then these would be used for any addresses in its range not handled by -D or E, and the result would be: - [CCCCCCCCCCCC][DDDDD][BBBBB][EEEEE][BBBBB] - -Priority values are local to a container, because the priorities of two -regions are only compared when they are both children of the same container. -This means that the device in charge of the container (typically modelling -a bus or a memory controller) can use them to manage the interaction of -its child regions without any side effects on other parts of the system. -In the example above, the priorities of D and E are unimportant because -they do not overlap each other. It is the relative priority of B and C -that causes D and E to appear on top of C: D and E's priorities are never -compared against the priority of C. - -Visibility ----------- -The memory core uses the following rules to select a memory region when the -guest accesses an address: - -- all direct subregions of the root region are matched against the address, in - descending priority order - - if the address lies outside the region offset/size, the subregion is - discarded - - if the subregion is a leaf (RAM or MMIO), the search terminates, returning - this leaf region - - if the subregion is a container, the same algorithm is used within the - subregion (after the address is adjusted by the subregion offset) - - if the subregion is an alias, the search is continued at the alias target - (after the address is adjusted by the subregion offset and alias offset) - - if a recursive search within a container or alias subregion does not - find a match (because of a "hole" in the container's coverage of its - address range), then if this is a container with its own MMIO or RAM - backing the search terminates, returning the container itself. Otherwise - we continue with the next subregion in priority order -- if none of the subregions match the address then the search terminates - with no match found - -Example memory map ------------------- - -system_memory: container@0-2^48-1 - | - +---- lomem: alias@0-0xdfffffff ---> #ram (0-0xdfffffff) - | - +---- himem: alias@0x100000000-0x11fffffff ---> #ram (0xe0000000-0xffffffff) - | - +---- vga-window: alias@0xa0000-0xbfffff ---> #pci (0xa0000-0xbffff) - | (prio 1) - | - +---- pci-hole: alias@0xe0000000-0xffffffff ---> #pci (0xe0000000-0xffffffff) - -pci (0-2^32-1) - | - +--- vga-area: container@0xa0000-0xbffff - | | - | +--- alias@0x00000-0x7fff ---> #vram (0x010000-0x017fff) - | | - | +--- alias@0x08000-0xffff ---> #vram (0x020000-0x027fff) - | - +---- vram: ram@0xe1000000-0xe1ffffff - | - +---- vga-mmio: mmio@0xe2000000-0xe200ffff - -ram: ram@0x00000000-0xffffffff - -This is a (simplified) PC memory map. The 4GB RAM block is mapped into the -system address space via two aliases: "lomem" is a 1:1 mapping of the first -3.5GB; "himem" maps the last 0.5GB at address 4GB. This leaves 0.5GB for the -so-called PCI hole, that allows a 32-bit PCI bus to exist in a system with -4GB of memory. - -The memory controller diverts addresses in the range 640K-768K to the PCI -address space. This is modelled using the "vga-window" alias, mapped at a -higher priority so it obscures the RAM at the same addresses. The vga window -can be removed by programming the memory controller; this is modelled by -removing the alias and exposing the RAM underneath. - -The pci address space is not a direct child of the system address space, since -we only want parts of it to be visible (we accomplish this using aliases). -It has two subregions: vga-area models the legacy vga window and is occupied -by two 32K memory banks pointing at two sections of the framebuffer. -In addition the vram is mapped as a BAR at address e1000000, and an additional -BAR containing MMIO registers is mapped after it. - -Note that if the guest maps a BAR outside the PCI hole, it would not be -visible as the pci-hole alias clips it to a 0.5GB range. - -Attributes ----------- - -Various region attributes (read-only, dirty logging, coalesced mmio, ioeventfd) -can be changed during the region lifecycle. They take effect once the region -is made visible (which can be immediately, later, or never). - -MMIO Operations ---------------- - -MMIO regions are provided with ->read() and ->write() callbacks; in addition -various constraints can be supplied to control how these callbacks are called: - - - .valid.min_access_size, .valid.max_access_size define the access sizes - (in bytes) which the device accepts; accesses outside this range will - have device and bus specific behaviour (ignored, or machine check) - - .valid.aligned specifies that the device only accepts naturally aligned - accesses. Unaligned accesses invoke device and bus specific behaviour. - - .impl.min_access_size, .impl.max_access_size define the access sizes - (in bytes) supported by the *implementation*; other access sizes will be - emulated using the ones available. For example a 4-byte write will be - emulated using four 1-byte writes, if .impl.max_access_size = 1. - - .impl.unaligned specifies that the *implementation* supports unaligned - accesses; if false, unaligned accesses will be emulated by two aligned - accesses. - - .old_mmio can be used to ease porting from code using - cpu_register_io_memory(). It should not be used in new code. diff --git a/qemu/exec-vary.c b/qemu/exec-vary.c new file mode 100644 index 00000000..d74d791c --- /dev/null +++ b/qemu/exec-vary.c @@ -0,0 +1,69 @@ +/* + * Variable page size handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#define IN_EXEC_VARY 1 + +#include "exec/exec-all.h" + +#include + +bool set_preferred_target_page_bits(struct uc_struct *uc, int bits) +{ + /* + * The target page size is the lowest common denominator for all + * the CPUs in the system, so we can only make it smaller, never + * larger. And we can't make it smaller once we've committed to + * a particular size. + */ +#ifdef TARGET_PAGE_BITS_VARY + assert(bits >= TARGET_PAGE_BITS_MIN); + if (uc->init_target_page->bits == 0 || uc->init_target_page->bits > bits) { + if (uc->init_target_page->decided) { + return false; + } + uc->init_target_page->bits = bits; + } +#endif + return true; +} + +void finalize_target_page_bits(struct uc_struct *uc) +{ +#ifdef TARGET_PAGE_BITS_VARY + if (uc->init_target_page == NULL) { + uc->init_target_page = g_new0(TargetPageBits, 1); + } else { + return; + } + if (uc->init_target_page->bits == 0) { + uc->init_target_page->bits = TARGET_PAGE_BITS_MIN; + } + uc->init_target_page->mask = (target_long)-1 << uc->init_target_page->bits; + uc->init_target_page->decided = true; + + /* + * For the benefit of an -flto build, prevent the compiler from + * hoisting a read from target_page before we finish initializing. + */ + barrier(); +#endif +} diff --git a/qemu/exec.c b/qemu/exec.c index 9e4fa5d1..317552f8 100644 --- a/qemu/exec.c +++ b/qemu/exec.c @@ -16,31 +16,27 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ -#include "config.h" -#ifndef _WIN32 -#include -#include -#endif - -#include "qemu-common.h" -#include "cpu.h" -#include "tcg.h" -#include "hw/hw.h" -#include "hw/qdev.h" #include "qemu/osdep.h" +#include "qemu-common.h" +#include "exec/cpu-defs.h" +#include "cpu.h" + +#include "qemu/cutils.h" +#include "exec/exec-all.h" +#include "exec/target_page.h" +#include "tcg/tcg.h" #include "sysemu/sysemu.h" +#include "sysemu/tcg.h" #include "qemu/timer.h" #include "exec/memory.h" -#include "exec/address-spaces.h" -#if defined(CONFIG_USER_ONLY) -#include -#endif -#include "exec/cpu-all.h" +#include "exec/ioport.h" -#include "exec/cputlb.h" -#include "translate-all.h" +#ifdef CONFIG_FALLOCATE_PUNCH_HOLE +#include +#endif + +#include "accel/tcg/translate-all.h" #include "exec/memory-internal.h" #include "exec/ram_addr.h" @@ -49,29 +45,12 @@ #include "uc_priv.h" -//#define DEBUG_SUBPAGE - -#if !defined(CONFIG_USER_ONLY) - -/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ -#define RAM_PREALLOC (1 << 0) - -/* RAM is mmap-ed with MAP_SHARED */ -#define RAM_SHARED (1 << 1) - -#endif - -#if !defined(CONFIG_USER_ONLY) -/* current CPU in the current thread. It is only valid inside - cpu_exec() */ -//DEFINE_TLS(CPUState *, current_cpu); - typedef struct PhysPageEntry PhysPageEntry; struct PhysPageEntry { /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ uint32_t skip : 6; - /* index into phys_sections (!skip) or phys_map_nodes (skip) */ + /* index into phys_sections (!skip) or phys_map_nodes (skip) */ uint32_t ptr : 26; }; @@ -97,78 +76,82 @@ typedef struct PhysPageMap { } PhysPageMap; struct AddressSpaceDispatch { + MemoryRegionSection *mru_section; /* This is a multi-level map on the physical address space. * The bottom level has pointers to MemoryRegionSections. */ PhysPageEntry phys_map; PhysPageMap map; - AddressSpace *as; + struct uc_struct *uc; }; #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) typedef struct subpage_t { MemoryRegion iomem; - AddressSpace *as; + FlatView *fv; hwaddr base; - uint16_t sub_section[TARGET_PAGE_SIZE]; + uint16_t sub_section[]; } subpage_t; #define PHYS_SECTION_UNASSIGNED 0 -#define PHYS_SECTION_NOTDIRTY 1 -#define PHYS_SECTION_ROM 2 -#define PHYS_SECTION_WATCH 3 -static void memory_map_init(struct uc_struct *uc); static void tcg_commit(MemoryListener *listener); -#endif +/** + * CPUAddressSpace: all the information a CPU needs about an AddressSpace + * @cpu: the CPU whose AddressSpace this is + * @as: the AddressSpace itself + * @memory_dispatch: its dispatch pointer (cached, RCU protected) + * @tcg_as_listener: listener for tracking changes to the AddressSpace + */ +struct CPUAddressSpace { + CPUState *cpu; + AddressSpace *as; + struct AddressSpaceDispatch *memory_dispatch; + MemoryListener tcg_as_listener; +}; -#if !defined(CONFIG_USER_ONLY) -static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) +static void phys_map_node_reserve(AddressSpaceDispatch *d, PhysPageMap *map, unsigned nodes) { if (map->nodes_nb + nodes > map->nodes_nb_alloc) { - map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16); - map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); + map->nodes_nb_alloc = MAX(d->uc->alloc_hint, map->nodes_nb + nodes); map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); + d->uc->alloc_hint = map->nodes_nb_alloc; } } -static uint32_t phys_map_node_alloc(PhysPageMap *map) +static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) { unsigned i; uint32_t ret; + PhysPageEntry e; + PhysPageEntry *p; ret = map->nodes_nb++; + p = map->nodes[ret]; assert(ret != PHYS_MAP_NODE_NIL); assert(ret != map->nodes_nb_alloc); + + e.skip = leaf ? 0 : 1; + e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; for (i = 0; i < P_L2_SIZE; ++i) { - map->nodes[ret][i].skip = 1; - map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; + memcpy(&p[i], &e, sizeof(e)); } return ret; } static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, - hwaddr *index, hwaddr *nb, uint16_t leaf, - int level) + hwaddr *index, uint64_t *nb, uint16_t leaf, + int level) { PhysPageEntry *p; - int i; hwaddr step = (hwaddr)1 << (level * P_L2_BITS); if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { - lp->ptr = phys_map_node_alloc(map); - p = map->nodes[lp->ptr]; - if (level == 0) { - for (i = 0; i < P_L2_SIZE; i++) { - p[i].skip = 0; - p[i].ptr = PHYS_SECTION_UNASSIGNED; - } - } - } else { - p = map->nodes[lp->ptr]; + lp->ptr = phys_map_node_alloc(map, level == 0); } + p = map->nodes[lp->ptr]; lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; while (*nb && lp < &p[P_L2_SIZE]) { @@ -185,11 +168,14 @@ static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, } static void phys_page_set(AddressSpaceDispatch *d, - hwaddr index, hwaddr nb, - uint16_t leaf) + hwaddr index, uint64_t nb, + uint16_t leaf) { +#ifdef TARGET_ARM + struct uc_struct *uc = d->uc; +#endif /* Wildly overreserve - it doesn't matter much. */ - phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); + phys_map_node_reserve(d, &d->map, 3 * P_L2_LEVELS); phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); } @@ -197,7 +183,7 @@ static void phys_page_set(AddressSpaceDispatch *d, /* Compact a non leaf page entry. Simply detect that the entry has a single child, * and update our entry so we can skip it and go directly to the destination. */ -static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted) +static void phys_page_compact(struct uc_struct *uc, PhysPageEntry *lp, Node *nodes) { unsigned valid_ptr = P_L2_SIZE; int valid = 0; @@ -217,7 +203,7 @@ static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *com valid_ptr = i; valid++; if (p[i].skip) { - phys_page_compact(&p[i], nodes, compacted); + phys_page_compact(uc, &p[i], nodes); } } @@ -229,7 +215,8 @@ static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *com assert(valid_ptr < P_L2_SIZE); /* Don't compress if it won't fit in the # of bits we have. */ - if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { + if (P_L2_LEVELS >= (1 << 6) && + lp->skip + p[valid_ptr].skip >= (1 << 6)) { return; } @@ -247,21 +234,32 @@ static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *com } } -static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) +void address_space_dispatch_compact(AddressSpaceDispatch *d) { - //DECLARE_BITMAP(compacted, nodes_nb); - // this isnt actually used - unsigned long* compacted = NULL; - if (d->phys_map.skip) { - phys_page_compact(&d->phys_map, d->map.nodes, compacted); + phys_page_compact(d->uc, &d->phys_map, d->map.nodes); } } -static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, - Node *nodes, MemoryRegionSection *sections) +static inline bool section_covers_addr(const MemoryRegionSection *section, + hwaddr addr) { - PhysPageEntry *p; + /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means + * the section must cover the entire address space. + */ + return int128_gethi(section->size) || + range_covers_byte(section->offset_within_address_space, + int128_getlo(section->size), addr); +} + +static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = d->uc; +#endif + PhysPageEntry lp = d->phys_map, *p; + Node *nodes = d->map.nodes; + MemoryRegionSection *sections = d->map.sections; hwaddr index = addr >> TARGET_PAGE_BITS; int i; @@ -273,29 +271,29 @@ static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; } - if (sections[lp.ptr].size.hi || - range_covers_byte(sections[lp.ptr].offset_within_address_space, - sections[lp.ptr].size.lo, addr)) { + if (section_covers_addr(§ions[lp.ptr], addr)) { return §ions[lp.ptr]; } else { return §ions[PHYS_SECTION_UNASSIGNED]; } } -bool memory_region_is_unassigned(struct uc_struct* uc, MemoryRegion *mr) -{ - return mr != &uc->io_mem_rom && mr != &uc->io_mem_notdirty && - !mr->rom_device && mr != &uc->io_mem_watch; -} - +/* Called from RCU critical section */ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, - hwaddr addr, - bool resolve_subpage) + hwaddr addr, + bool resolve_subpage) { - MemoryRegionSection *section; +#ifdef TARGET_ARM + struct uc_struct *uc = d->uc; +#endif + MemoryRegionSection *section = d->mru_section; subpage_t *subpage; - section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections); + if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || + !section_covers_addr(section, addr)) { + section = phys_page_find(d, addr); + d->mru_section = section; + } if (resolve_subpage && section->mr->subpage) { subpage = container_of(section->mr, subpage_t, iomem); section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; @@ -303,11 +301,13 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, return section; } +/* Called from RCU critical section */ static MemoryRegionSection * address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, - hwaddr *plen, bool resolve_subpage) + hwaddr *plen, bool resolve_subpage) { MemoryRegionSection *section; + MemoryRegion *mr; Int128 diff; section = address_space_lookup_region(d, addr, resolve_subpage); @@ -317,70 +317,242 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x /* Compute offset within MemoryRegion */ *xlat = addr + section->offset_within_region; - diff = int128_sub(section->mr->size, int128_make64(addr)); - *plen = int128_get64(int128_min(diff, int128_make64(*plen))); + mr = section->mr; + + /* MMIO registers can be expected to perform full-width accesses based only + * on their address, without considering adjacent registers that could + * decode to completely different MemoryRegions. When such registers + * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO + * regions overlap wildly. For this reason we cannot clamp the accesses + * here. + * + * If the length is small (as is the case for address_space_ldl/stl), + * everything works fine. If the incoming length is large, however, + * the caller really has to do the clamping through memory_access_size. + */ + if (memory_region_is_ram(mr)) { + diff = int128_sub(section->size, int128_make64(addr)); + *plen = int128_get64(int128_min(diff, int128_make64(*plen))); + } return section; } -static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) +/** + * address_space_translate_iommu - translate an address through an IOMMU + * memory region and then through the target address space. + * + * @iommu_mr: the IOMMU memory region that we start the translation from + * @addr: the address to be translated through the MMU + * @xlat: the translated address offset within the destination memory region. + * It cannot be %NULL. + * @plen_out: valid read/write length of the translated address. It + * cannot be %NULL. + * @page_mask_out: page mask for the translated address. This + * should only be meaningful for IOMMU translated + * addresses, since there may be huge pages that this bit + * would tell. It can be %NULL if we don't care about it. + * @is_write: whether the translation operation is for write + * @is_mmio: whether this can be MMIO, set true if it can + * @target_as: the address space targeted by the IOMMU + * @attrs: transaction attributes + * + * This function is called from RCU critical section. It is the common + * part of flatview_do_translate and address_space_translate_cached. + */ +static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, + hwaddr *xlat, + hwaddr *plen_out, + hwaddr *page_mask_out, + bool is_write, + bool is_mmio, + AddressSpace **target_as, + MemTxAttrs attrs) { - if (memory_region_is_ram(mr)) { - return !(is_write && mr->readonly); - } - if (memory_region_is_romd(mr)) { - return !is_write; - } - - return false; -} - -MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, - hwaddr *xlat, hwaddr *plen, - bool is_write) -{ - IOMMUTLBEntry iotlb; MemoryRegionSection *section; - MemoryRegion *mr; - hwaddr len = *plen; + hwaddr page_mask = (hwaddr)-1; + MemoryRegion *mr = MEMORY_REGION(iommu_mr); - for (;;) { - section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true); - mr = section->mr; - if (mr->ops == NULL) - return NULL; + do { + hwaddr addr = *xlat; + IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); + int iommu_idx = 0; + IOMMUTLBEntry iotlb; - if (!mr->iommu_ops) { - break; + if (imrc->attrs_to_index) { + iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); + } + + iotlb = imrc->translate(iommu_mr, addr, is_write ? + IOMMU_WO : IOMMU_RO, iommu_idx); + + if (!(iotlb.perm & (1 << is_write))) { + goto unassigned; } - iotlb = mr->iommu_ops->translate(mr, addr, is_write); addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | (addr & iotlb.addr_mask)); - len = MIN(len, (addr | iotlb.addr_mask) - addr + 1); - if (!(iotlb.perm & (1 << is_write))) { - mr = &as->uc->io_mem_unassigned; - break; - } + page_mask &= iotlb.addr_mask; + *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); + *target_as = iotlb.target_as; - as = iotlb.target_as; + section = address_space_translate_internal( + address_space_to_dispatch(iotlb.target_as), addr, xlat, + plen_out, is_mmio); + + iommu_mr = memory_region_get_iommu(section->mr); + } while (unlikely(iommu_mr)); + + if (page_mask_out) { + *page_mask_out = page_mask; + } + return *section; + +unassigned: + return (MemoryRegionSection) { .mr = &(mr->uc->io_mem_unassigned) }; +} + +/** + * flatview_do_translate - translate an address in FlatView + * + * @fv: the flat view that we want to translate on + * @addr: the address to be translated in above address space + * @xlat: the translated address offset within memory region. It + * cannot be @NULL. + * @plen_out: valid read/write length of the translated address. It + * can be @NULL when we don't care about it. + * @page_mask_out: page mask for the translated address. This + * should only be meaningful for IOMMU translated + * addresses, since there may be huge pages that this bit + * would tell. It can be @NULL if we don't care about it. + * @is_write: whether the translation operation is for write + * @is_mmio: whether this can be MMIO, set true if it can + * @target_as: the address space targeted by the IOMMU + * @attrs: memory transaction attributes + * + * This function is called from RCU critical section + */ +static MemoryRegionSection flatview_do_translate(struct uc_struct *uc, FlatView *fv, + hwaddr addr, + hwaddr *xlat, + hwaddr *plen_out, + hwaddr *page_mask_out, + bool is_write, + bool is_mmio, + AddressSpace **target_as, + MemTxAttrs attrs) +{ + MemoryRegionSection *section; + IOMMUMemoryRegion *iommu_mr; + hwaddr plen = (hwaddr)(-1); + + if (!plen_out) { + plen_out = &plen; } - *plen = len; - *xlat = addr; + section = address_space_translate_internal( + flatview_to_dispatch(fv), addr, xlat, + plen_out, is_mmio); + + iommu_mr = memory_region_get_iommu(section->mr); + if (unlikely(iommu_mr)) { + return address_space_translate_iommu(iommu_mr, xlat, + plen_out, page_mask_out, + is_write, is_mmio, + target_as, attrs); + } + if (page_mask_out) { + /* Not behind an IOMMU, use default page size. */ + *page_mask_out = ~TARGET_PAGE_MASK; + } + + return *section; +} + +/* Called from RCU critical section */ +MemoryRegion *flatview_translate(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr *xlat, + hwaddr *plen, bool is_write, + MemTxAttrs attrs) +{ + MemoryRegion *mr; + MemoryRegionSection section; + AddressSpace *as = NULL; + + /* This can be MMIO, so setup MMIO bit. */ + section = flatview_do_translate(uc, fv, addr, xlat, plen, NULL, + is_write, true, &as, attrs); + mr = section.mr; + return mr; } +/* Called from RCU critical section */ MemoryRegionSection * -address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, - hwaddr *plen) +address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, + hwaddr *xlat, hwaddr *plen, + MemTxAttrs attrs, int *prot) { MemoryRegionSection *section; - section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false); + IOMMUMemoryRegion *iommu_mr; + IOMMUMemoryRegionClass *imrc; + IOMMUTLBEntry iotlb; + int iommu_idx; + AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; - assert(!section->mr->iommu_ops); + for (;;) { + section = address_space_translate_internal(d, addr, &addr, plen, false); + + iommu_mr = memory_region_get_iommu(section->mr); + if (!iommu_mr) { + break; + } + + imrc = memory_region_get_iommu_class_nocheck(iommu_mr); + + iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); + + // tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); + + /* We need all the permissions, so pass IOMMU_NONE so the IOMMU + * doesn't short-cut its translation table walk. + */ + iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); + addr = ((iotlb.translated_addr & ~iotlb.addr_mask) + | (addr & iotlb.addr_mask)); + /* Update the caller's prot bits to remove permissions the IOMMU + * is giving us a failure response for. If we get down to no + * permissions left at all we can give up now. + */ + if (!(iotlb.perm & IOMMU_RO)) { + *prot &= ~(PAGE_READ | PAGE_EXEC); + } + if (!(iotlb.perm & IOMMU_WO)) { + *prot &= ~PAGE_WRITE; + } + + if (!*prot) { + goto translate_fail; + } + + d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); + } + + assert(!(memory_region_get_iommu(section->mr) != NULL)); + *xlat = addr; + // Unicorn: + // If there is no memory mapped but still we start emulation, we will get + // a default memory region section and it would be marked as an IO memory + // in cputlb which prevents further fecthing and execution. + // + // The reason we set prot to 0 here is not to setting protection but to notify + // the outer function to add a new **blank** tlb which will never be hitted. + if (!memory_region_is_ram(section->mr) && section == &d->map.sections[PHYS_SECTION_UNASSIGNED]) { + *prot = 0; + } return section; + +translate_fail: + return &d->map.sections[PHYS_SECTION_UNASSIGNED]; } -#endif CPUState *qemu_get_cpu(struct uc_struct *uc, int index) { @@ -388,92 +560,93 @@ CPUState *qemu_get_cpu(struct uc_struct *uc, int index) if (cpu->cpu_index == index) { return cpu; } + return NULL; } -#if !defined(CONFIG_USER_ONLY) -void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as) +void cpu_address_space_init(CPUState *cpu, int asidx, MemoryRegion *mr) { - /* We only support one address space per cpu at the moment. */ - assert(cpu->as == as); + /* Target code should have set num_ases before calling us */ + assert(asidx < cpu->num_ases); - if (cpu->tcg_as_listener) { - memory_listener_unregister(as->uc, cpu->tcg_as_listener); - } else { - cpu->tcg_as_listener = g_new0(MemoryListener, 1); + if (!cpu->cpu_ases) { + cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); + cpu->cpu_ases[0].cpu = cpu; + cpu->cpu_ases[0].as = &(cpu->uc->address_space_memory); + cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit; + memory_listener_register(&(cpu->cpu_ases[0].tcg_as_listener), cpu->cpu_ases[0].as); } - cpu->tcg_as_listener->commit = tcg_commit; - memory_listener_register(as->uc, cpu->tcg_as_listener, as); -} -#endif - -void cpu_exec_init(CPUArchState *env, void *opaque) -{ - struct uc_struct *uc = opaque; - CPUState *cpu = ENV_GET_CPU(env); - - cpu->uc = uc; - env->uc = uc; - - cpu->cpu_index = 0; - cpu->numa_node = 0; - QTAILQ_INIT(&cpu->breakpoints); - QTAILQ_INIT(&cpu->watchpoints); - - cpu->as = &uc->as; - - // TODO: assert uc does not already have a cpu? - uc->cpu = cpu; -} - -#if defined(TARGET_HAS_ICE) -#if defined(CONFIG_USER_ONLY) -static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) -{ - tb_invalidate_phys_page_range(pc, pc + 1, 0); -} -#else -static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) -{ - hwaddr phys = cpu_get_phys_page_debug(cpu, pc); - if (phys != -1) { - tb_invalidate_phys_addr(cpu->as, - phys | (pc & ~TARGET_PAGE_MASK)); + /* arm security memory */ + if (asidx > 0) { + cpu->cpu_ases[asidx].cpu = cpu; + cpu->cpu_ases[asidx].as = &(cpu->uc->address_space_memory); + cpu->cpu_ases[asidx].tcg_as_listener.commit = tcg_commit; + memory_listener_register(&(cpu->cpu_ases[asidx].tcg_as_listener), cpu->cpu_ases[asidx].as); } } -#endif -#endif /* TARGET_HAS_ICE */ -#if defined(CONFIG_USER_ONLY) -void cpu_watchpoint_remove_all(CPUState *cpu, int mask) +AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) +{ + /* only one AddressSpace. */ + return cpu->cpu_ases[0].as; +} +void cpu_exec_unrealizefn(CPUState *cpu) { } -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, - int flags) +void cpu_exec_initfn(CPUState *cpu) { - return -ENOSYS; + cpu->num_ases = 1; + cpu->as = &(cpu->uc->address_space_memory); + cpu->memory = cpu->uc->system_memory; } -void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) +void cpu_exec_realizefn(CPUState *cpu) { + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->tcg_initialize(cpu->uc); + tlb_init(cpu); } -int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, - int flags, CPUWatchpoint **watchpoint) +void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) { - return -ENOSYS; + ram_addr_t ram_addr; + MemoryRegion *mr; + hwaddr l = 1; + + mr = address_space_translate(as, addr, &addr, &l, false, attrs); + if (!memory_region_is_ram(mr)) { + return; + } + + ram_addr = memory_region_get_ram_addr(mr) + addr; + tb_invalidate_phys_page_range(as->uc, ram_addr, ram_addr + 1); } -#else + +static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) +{ + /* + * There may not be a virtual to physical translation for the pc + * right now, but there may exist cached TB for this pc. + * Flush the whole TB cache to force re-translation of such TBs. + * This is heavyweight, but we're debugging anyway. + */ + tb_flush(cpu); +} + /* Add a watchpoint. */ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, - int flags, CPUWatchpoint **watchpoint) + int flags, CPUWatchpoint **watchpoint) { +#if 0 CPUWatchpoint *wp; /* forbid ranges which are empty or run off the end of the address space */ if (len == 0 || (addr + len - 1) < addr) { + error_report("tried to set invalid watchpoint at %" + VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); return -EINVAL; } wp = g_malloc(sizeof(*wp)); @@ -493,38 +666,27 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, if (watchpoint) *watchpoint = wp; +#endif + return 0; } -/* Remove a specific watchpoint. */ -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, - int flags) -{ - CPUWatchpoint *wp; - - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (addr == wp->vaddr && len == wp->len - && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { - cpu_watchpoint_remove_by_ref(cpu, wp); - return 0; - } - } - return -ENOENT; -} - /* Remove a specific watchpoint by reference. */ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) { +#if 0 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); tlb_flush_page(cpu, watchpoint->vaddr); g_free(watchpoint); +#endif } /* Remove all matching watchpoints. */ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) { +#if 0 CPUWatchpoint *wp, *next; QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { @@ -532,35 +694,30 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask) cpu_watchpoint_remove_by_ref(cpu, wp); } } -} - -/* Return true if this watchpoint address matches the specified - * access (ie the address range covered by the watchpoint overlaps - * partially or completely with the address range covered by the - * access). - */ -static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, - vaddr addr, - vaddr len) -{ - /* We know the lengths are non-zero, but a little caution is - * required to avoid errors in the case where the range ends - * exactly at the top of the address space and so addr + len - * wraps round to zero. - */ - vaddr wpend = wp->vaddr + wp->len - 1; - vaddr addrend = addr + len - 1; - - return !(addr > wpend || wp->vaddr > addrend); -} - #endif +} + +/* Return flags for watchpoints that match addr + prot. */ +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) +{ +#if 0 + CPUWatchpoint *wp; + int ret = 0; + + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) { + ret |= wp->flags; + } + } + return ret; +#endif + return 0; +} /* Add a breakpoint. */ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, - CPUBreakpoint **breakpoint) + CPUBreakpoint **breakpoint) { -#if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; bp = g_malloc(sizeof(*bp)); @@ -581,15 +738,11 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, *breakpoint = bp; } return 0; -#else - return -ENOSYS; -#endif } /* Remove a specific breakpoint. */ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) { -#if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { @@ -599,27 +752,21 @@ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) } } return -ENOENT; -#else - return -ENOSYS; -#endif } /* Remove a specific breakpoint by reference. */ void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) { -#if defined(TARGET_HAS_ICE) QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); breakpoint_invalidate(cpu, breakpoint->pc); g_free(breakpoint); -#endif } /* Remove all matching breakpoints. */ void cpu_breakpoint_remove_all(CPUState *cpu, int mask) { -#if defined(TARGET_HAS_ICE) CPUBreakpoint *bp, *next; QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { @@ -627,69 +774,24 @@ void cpu_breakpoint_remove_all(CPUState *cpu, int mask) cpu_breakpoint_remove_by_ref(cpu, bp); } } -#endif -} - -/* enable or disable single step mode. EXCP_DEBUG is returned by the - CPU loop after each instruction */ -void cpu_single_step(CPUState *cpu, int enabled) -{ -#if defined(TARGET_HAS_ICE) - if (cpu->singlestep_enabled != enabled) { - CPUArchState *env; - cpu->singlestep_enabled = enabled; - /* must flush all the translated code to avoid inconsistencies */ - /* XXX: only flush what is necessary */ - env = cpu->env_ptr; - tb_flush(env); - } -#endif } void cpu_abort(CPUState *cpu, const char *fmt, ...) { - va_list ap; - va_list ap2; - - va_start(ap, fmt); - va_copy(ap2, ap); - fprintf(stderr, "qemu: fatal: "); - vfprintf(stderr, fmt, ap); - fprintf(stderr, "\n"); - cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); - if (qemu_log_enabled()) { - qemu_log("qemu: fatal: "); - qemu_log_vprintf(fmt, ap2); - qemu_log("\n"); - log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); - qemu_log_flush(); - qemu_log_close(); - } - va_end(ap2); - va_end(ap); -#if defined(CONFIG_USER_ONLY) - { - struct sigaction act; - sigfillset(&act.sa_mask); - act.sa_handler = SIG_DFL; - sigaction(SIGABRT, &act, NULL); - } -#endif abort(); } -#if !defined(CONFIG_USER_ONLY) +/* Called from RCU critical section */ static RAMBlock *qemu_get_ram_block(struct uc_struct *uc, ram_addr_t addr) { RAMBlock *block; - /* The list is protected by the iothread lock here. */ block = uc->ram_list.mru_block; - if (block && addr - block->offset < block->length) { - goto found; + if (block && addr - block->offset < block->max_length) { + return block; } - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - if (addr - block->offset < block->length) { + RAMBLOCK_FOREACH(block) { + if (addr - block->offset < block->max_length) { goto found; } } @@ -702,97 +804,31 @@ found: return block; } -static void tlb_reset_dirty_range_all(struct uc_struct* uc, - ram_addr_t start, ram_addr_t length) -{ - ram_addr_t start1; - RAMBlock *block; - ram_addr_t end; - - end = TARGET_PAGE_ALIGN(start + length); - start &= TARGET_PAGE_MASK; - - block = qemu_get_ram_block(uc, start); - assert(block == qemu_get_ram_block(uc, end - 1)); - start1 = (uintptr_t)block->host + (start - block->offset); - cpu_tlb_reset_dirty_all(uc, start1, length); -} - /* Note: start and end must be within the same ram block. */ -void cpu_physical_memory_reset_dirty(struct uc_struct* uc, - ram_addr_t start, ram_addr_t length, unsigned client) +bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, + ram_addr_t length, + unsigned client) { - if (length == 0) - return; - cpu_physical_memory_clear_dirty_range(uc, start, length, client); - - if (tcg_enabled(uc)) { - tlb_reset_dirty_range_all(uc, start, length); - } + return false; } +/* Called from RCU critical section */ hwaddr memory_region_section_get_iotlb(CPUState *cpu, - MemoryRegionSection *section, - target_ulong vaddr, - hwaddr paddr, hwaddr xlat, - int prot, - target_ulong *address) + MemoryRegionSection *section) { - hwaddr iotlb; - CPUWatchpoint *wp; - - if (memory_region_is_ram(section->mr)) { - /* Normal RAM. */ - iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + xlat; - if (!section->readonly) { - iotlb |= PHYS_SECTION_NOTDIRTY; - } else { - iotlb |= PHYS_SECTION_ROM; - } - } else { - iotlb = section - section->address_space->dispatch->map.sections; - iotlb += xlat; - } - - /* Make accesses to pages with watchpoints go via the - watchpoint trap routines. */ - QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { - /* Avoid trapping reads of pages with a write breakpoint. */ - if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { - iotlb = PHYS_SECTION_WATCH + paddr; - *address |= TLB_MMIO; - break; - } - } - } - - return iotlb; -} -#endif /* defined(CONFIG_USER_ONLY) */ - -#if !defined(CONFIG_USER_ONLY) - -static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, - uint16_t section); -static subpage_t *subpage_init(AddressSpace *as, hwaddr base); - -static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = -qemu_anon_ram_alloc; - -/* - * Set a custom physical guest memory alloator. - * Accelerators with unusual needs may need this. Hopefully, we can - * get rid of it eventually. - */ -void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) -{ - phys_mem_alloc = alloc; + AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); + return section - d->map.sections; } -static uint16_t phys_section_add(PhysPageMap *map, - MemoryRegionSection *section) +static int subpage_register(struct uc_struct *uc, subpage_t *mmio, uint32_t start, uint32_t end, + uint16_t section); +static subpage_t *subpage_init(struct uc_struct *, FlatView *fv, hwaddr base); + +static void *(*phys_mem_alloc)(struct uc_struct *uc, size_t size, uint64_t *align) = + qemu_anon_ram_alloc; + +static uint16_t phys_section_add(struct uc_struct *uc, PhysPageMap *map, + MemoryRegionSection *section) { /* The physical section number is ORed with a page-aligned * pointer to produce the iotlb entries. Thus it should @@ -803,20 +839,19 @@ static uint16_t phys_section_add(PhysPageMap *map, if (map->sections_nb == map->sections_nb_alloc) { map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); map->sections = g_renew(MemoryRegionSection, map->sections, - map->sections_nb_alloc); + map->sections_nb_alloc); } map->sections[map->sections_nb] = *section; - memory_region_ref(section->mr); return map->sections_nb++; } static void phys_section_destroy(MemoryRegion *mr) { - memory_region_unref(mr); + bool have_sub_page = mr->subpage; - if (mr->subpage) { + if (have_sub_page) { subpage_t *subpage = container_of(mr, subpage_t, iomem); - object_unref(mr->uc, OBJECT(&subpage->iomem)); + // object_unref(OBJECT(&subpage->iomem)); g_free(subpage); } } @@ -831,89 +866,99 @@ static void phys_sections_free(PhysPageMap *map) g_free(map->nodes); } -static void register_subpage(struct uc_struct* uc, - AddressSpaceDispatch *d, MemoryRegionSection *section) +static void register_subpage(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section) { + AddressSpaceDispatch *d = flatview_to_dispatch(fv); subpage_t *subpage; hwaddr base = section->offset_within_address_space & TARGET_PAGE_MASK; - MemoryRegionSection *existing = phys_page_find(d->phys_map, base, - d->map.nodes, d->map.sections); + MemoryRegionSection *existing = phys_page_find(d, base); + MemoryRegionSection subsection = { + .offset_within_address_space = base, + .size = int128_make64(TARGET_PAGE_SIZE), + }; hwaddr start, end; - MemoryRegionSection subsection = MemoryRegionSection_make(NULL, NULL, 0, int128_make64(TARGET_PAGE_SIZE), base, false); - assert(existing->mr->subpage || existing->mr == &uc->io_mem_unassigned); + assert(existing->mr->subpage || existing->mr == &(section->mr->uc->io_mem_unassigned)); if (!(existing->mr->subpage)) { - subpage = subpage_init(d->as, base); - subsection.address_space = d->as; + subpage = subpage_init(uc, fv, base); + subsection.fv = fv; subsection.mr = &subpage->iomem; phys_page_set(d, base >> TARGET_PAGE_BITS, 1, - phys_section_add(&d->map, &subsection)); + phys_section_add(uc, &d->map, &subsection)); } else { subpage = container_of(existing->mr, subpage_t, iomem); } start = section->offset_within_address_space & ~TARGET_PAGE_MASK; end = start + int128_get64(section->size) - 1; - subpage_register(subpage, start, end, - phys_section_add(&d->map, section)); - //g_free(subpage); + subpage_register(uc, subpage, start, end, + phys_section_add(uc, &d->map, section)); } -static void register_multipage(AddressSpaceDispatch *d, - MemoryRegionSection *section) +static void register_multipage(struct uc_struct *uc, FlatView *fv, + MemoryRegionSection *section) { + AddressSpaceDispatch *d = flatview_to_dispatch(fv); hwaddr start_addr = section->offset_within_address_space; - uint16_t section_index = phys_section_add(&d->map, section); + uint16_t section_index = phys_section_add(uc, &d->map, section); uint64_t num_pages = int128_get64(int128_rshift(section->size, - TARGET_PAGE_BITS)); + TARGET_PAGE_BITS)); assert(num_pages); phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); } -static void mem_add(MemoryListener *listener, MemoryRegionSection *section) +/* + * The range in *section* may look like this: + * + * |s|PPPPPPP|s| + * + * where s stands for subpage and P for page. + */ +void flatview_add_to_dispatch(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section) { - AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); - AddressSpaceDispatch *d = as->next_dispatch; - MemoryRegionSection now = *section, remain = *section; + MemoryRegionSection remain = *section; Int128 page_size = int128_make64(TARGET_PAGE_SIZE); - if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { - uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) - - now.offset_within_address_space; + /* register first subpage */ + if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { + uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) + - remain.offset_within_address_space; + MemoryRegionSection now = remain; now.size = int128_min(int128_make64(left), now.size); - register_subpage(as->uc, d, &now); - } else { - now.size = int128_zero(); - } - while (int128_ne(remain.size, now.size)) { + register_subpage(uc, fv, &now); + if (int128_eq(remain.size, now.size)) { + return; + } remain.size = int128_sub(remain.size, now.size); remain.offset_within_address_space += int128_get64(now.size); remain.offset_within_region += int128_get64(now.size); - now = remain; - if (int128_lt(remain.size, page_size)) { - register_subpage(as->uc, d, &now); - } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { - now.size = page_size; - register_subpage(as->uc, d, &now); - } else { - now.size = int128_and(now.size, int128_neg(page_size)); - register_multipage(d, &now); - } } + + /* register whole pages */ + if (int128_ge(remain.size, page_size)) { + MemoryRegionSection now = remain; + now.size = int128_and(now.size, int128_neg(page_size)); + register_multipage(uc, fv, &now); + if (int128_eq(remain.size, now.size)) { + return; + } + remain.size = int128_sub(remain.size, now.size); + remain.offset_within_address_space += int128_get64(now.size); + remain.offset_within_region += int128_get64(now.size); + } + + /* register last subpage */ + register_subpage(uc, fv, &remain); } -#ifdef __linux__ - -#include - -#define HUGETLBFS_MAGIC 0x958458f6 - -#endif - +/* Allocate space within the ram_addr_t space that governs the + * dirty bitmaps. + * Called with the ramlist lock held. + */ static ram_addr_t find_ram_offset(struct uc_struct *uc, ram_addr_t size) { RAMBlock *block, *next_block; @@ -921,22 +966,35 @@ static ram_addr_t find_ram_offset(struct uc_struct *uc, ram_addr_t size) assert(size != 0); /* it would hand out same offset multiple times */ - if (QTAILQ_EMPTY(&uc->ram_list.blocks)) + if (QLIST_EMPTY(&uc->ram_list.blocks)) { return 0; + } - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - ram_addr_t end, next = RAM_ADDR_MAX; + RAMBLOCK_FOREACH(block) { + ram_addr_t candidate, next = RAM_ADDR_MAX; - end = block->offset + block->length; + /* Align blocks to start on a 'long' in the bitmap + * which makes the bitmap sync'ing take the fast path. + */ + candidate = block->offset + block->max_length; + candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); - QTAILQ_FOREACH(next_block, &uc->ram_list.blocks, next) { - if (next_block->offset >= end) { + /* Search for the closest following block + * and find the gap. + */ + RAMBLOCK_FOREACH(next_block) { + if (next_block->offset >= candidate) { next = MIN(next, next_block->offset); } } - if (next - end >= size && next - end < mingap) { - offset = end; - mingap = next - end; + + /* If it fits remember our place and remember the size + * of gap, but keep going so that we might find a smaller + * gap to fill so avoiding fragmentation. + */ + if (next - candidate >= size && next - candidate < mingap) { + offset = candidate; + mingap = next - candidate; } } @@ -949,294 +1007,222 @@ static ram_addr_t find_ram_offset(struct uc_struct *uc, ram_addr_t size) return offset; } -ram_addr_t last_ram_offset(struct uc_struct *uc) +void *qemu_ram_get_host_addr(RAMBlock *rb) +{ + return rb->host; +} + +ram_addr_t qemu_ram_get_offset(RAMBlock *rb) +{ + return rb->offset; +} + +ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) +{ + return rb->used_length; +} + +bool qemu_ram_is_shared(RAMBlock *rb) +{ + return rb->flags & RAM_SHARED; +} + +size_t qemu_ram_pagesize(RAMBlock *rb) +{ + return rb->page_size; +} + +static void ram_block_add(struct uc_struct *uc, RAMBlock *new_block) { RAMBlock *block; - ram_addr_t last = 0; + RAMBlock *last_block = NULL; - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) - last = MAX(last, block->offset + block->length); - - return last; -} - -static void qemu_ram_setup_dump(void *addr, ram_addr_t size) -{ -} - -static RAMBlock *find_ram_block(struct uc_struct *uc, ram_addr_t addr) -{ - RAMBlock *block; - - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - if (block->offset == addr) { - return block; - } - } - - return NULL; -} - -void qemu_ram_unset_idstr(struct uc_struct *uc, ram_addr_t addr) -{ - RAMBlock *block = find_ram_block(uc, addr); - - if (block) { - memset(block->idstr, 0, sizeof(block->idstr)); - } -} - -static int memory_try_enable_merging(void *addr, size_t len) -{ - return 0; -} - -static ram_addr_t ram_block_add(struct uc_struct *uc, RAMBlock *new_block, Error **errp) -{ - RAMBlock *block; - ram_addr_t old_ram_size, new_ram_size; - - old_ram_size = last_ram_offset(uc) >> TARGET_PAGE_BITS; - - new_block->offset = find_ram_offset(uc, new_block->length); + new_block->offset = find_ram_offset(uc, new_block->max_length); if (!new_block->host) { - new_block->host = phys_mem_alloc(new_block->length, + new_block->host = phys_mem_alloc(uc, new_block->max_length, &new_block->mr->align); if (!new_block->host) { - error_setg_errno(errp, errno, - "cannot set up guest memory '%s'", - memory_region_name(new_block->mr)); - return -1; + // error_setg_errno(errp, errno, + // "cannot set up guest memory '%s'", + // memory_region_name(new_block->mr)); + return; } - memory_try_enable_merging(new_block->host, new_block->length); + // memory_try_enable_merging(new_block->host, new_block->max_length); } - /* Keep the list sorted from biggest to smallest block. */ - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - if (block->length < new_block->length) { + /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, + * QLIST (which has an RCU-friendly variant) does not have insertion at + * tail, so save the last element in last_block. + */ + RAMBLOCK_FOREACH(block) { + last_block = block; + if (block->max_length < new_block->max_length) { break; } } if (block) { - QTAILQ_INSERT_BEFORE(block, new_block, next); - } else { - QTAILQ_INSERT_TAIL(&uc->ram_list.blocks, new_block, next); + QLIST_INSERT_BEFORE(block, new_block, next); + } else if (last_block) { + QLIST_INSERT_AFTER(last_block, new_block, next); + } else { /* list is empty */ + QLIST_INSERT_HEAD(&uc->ram_list.blocks, new_block, next); } uc->ram_list.mru_block = NULL; - uc->ram_list.version++; + /* Write list before version */ + //smp_wmb(); - new_ram_size = last_ram_offset(uc) >> TARGET_PAGE_BITS; + cpu_physical_memory_set_dirty_range(new_block->offset, + new_block->used_length, + DIRTY_CLIENTS_ALL); - if (new_ram_size > old_ram_size) { - int i; - for (i = 0; i < DIRTY_MEMORY_NUM; i++) { - uc->ram_list.dirty_memory[i] = - bitmap_zero_extend(uc->ram_list.dirty_memory[i], - old_ram_size, new_ram_size); - } - } - cpu_physical_memory_set_dirty_range(uc, new_block->offset, new_block->length); - - qemu_ram_setup_dump(new_block->host, new_block->length); - //qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE); - //qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK); - - return new_block->offset; } -// return -1 on error -ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, - MemoryRegion *mr, Error **errp) +RAMBlock *qemu_ram_alloc_from_ptr(struct uc_struct *uc, ram_addr_t size, void *host, + MemoryRegion *mr) { RAMBlock *new_block; - ram_addr_t addr; - Error *local_err = NULL; + ram_addr_t max_size = size; - size = TARGET_PAGE_ALIGN(size); + size = HOST_PAGE_ALIGN(uc, size); + max_size = HOST_PAGE_ALIGN(uc, max_size); new_block = g_malloc0(sizeof(*new_block)); if (new_block == NULL) - return -1; - + return NULL; new_block->mr = mr; - new_block->length = size; - new_block->fd = -1; + new_block->used_length = size; + new_block->max_length = max_size; + assert(max_size >= size); + new_block->page_size = uc->qemu_real_host_page_size; new_block->host = host; if (host) { new_block->flags |= RAM_PREALLOC; } - addr = ram_block_add(mr->uc, new_block, &local_err); - if (local_err) { - g_free(new_block); - error_propagate(errp, local_err); - return -1; + ram_block_add(mr->uc, new_block); + + return new_block; +} + +RAMBlock *qemu_ram_alloc(struct uc_struct *uc, ram_addr_t size, MemoryRegion *mr) +{ + return qemu_ram_alloc_from_ptr(uc, size, NULL, mr); +} + +static void reclaim_ramblock(struct uc_struct *uc, RAMBlock *block) +{ + if (block->flags & RAM_PREALLOC) { + ; + } else if (false) { + } else { + qemu_anon_ram_free(uc, block->host, block->max_length); } - return addr; + g_free(block); } -ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) +void qemu_ram_free(struct uc_struct *uc, RAMBlock *block) { - return qemu_ram_alloc_from_ptr(size, NULL, mr, errp); -} - -void qemu_ram_free_from_ptr(struct uc_struct *uc, ram_addr_t addr) -{ - RAMBlock *block; - - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - if (addr == block->offset) { - QTAILQ_REMOVE(&uc->ram_list.blocks, block, next); - uc->ram_list.mru_block = NULL; - uc->ram_list.version++; - g_free(block); - break; - } + if (!block) { + return; } -} -void qemu_ram_free(struct uc_struct *uc, ram_addr_t addr) -{ - RAMBlock *block; + //if (block->host) { + // ram_block_notify_remove(block->host, block->max_length); + //} - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - if (addr == block->offset) { - QTAILQ_REMOVE(&uc->ram_list.blocks, block, next); - uc->ram_list.mru_block = NULL; - uc->ram_list.version++; - if (block->flags & RAM_PREALLOC) { - ; -#ifndef _WIN32 - } else if (block->fd >= 0) { - munmap(block->host, block->length); - close(block->fd); -#endif - } else { - qemu_anon_ram_free(block->host, block->length); - } - g_free(block); - break; - } - } -} - -#ifndef _WIN32 -void qemu_ram_remap(struct uc_struct *uc, ram_addr_t addr, ram_addr_t length) -{ - RAMBlock *block; - ram_addr_t offset; - int flags; - void *area, *vaddr; - - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - offset = addr - block->offset; - if (offset < block->length) { - vaddr = block->host + offset; - if (block->flags & RAM_PREALLOC) { - ; - } else { - flags = MAP_FIXED; - munmap(vaddr, length); - if (block->fd >= 0) { - flags |= (block->flags & RAM_SHARED ? - MAP_SHARED : MAP_PRIVATE); - area = mmap(vaddr, length, PROT_READ | PROT_WRITE, - flags, block->fd, offset); - } else { - /* - * Remap needs to match alloc. Accelerators that - * set phys_mem_alloc never remap. If they did, - * we'd need a remap hook here. - */ - assert(phys_mem_alloc == qemu_anon_ram_alloc); - - flags |= MAP_PRIVATE | MAP_ANONYMOUS; - area = mmap(vaddr, length, PROT_READ | PROT_WRITE, - flags, -1, 0); - } - if (area == MAP_FAILED || area != vaddr) { - fprintf(stderr, "Could not remap addr: " - RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", - length, addr); - exit(1); - } - memory_try_enable_merging(vaddr, length); - qemu_ram_setup_dump(vaddr, length); - } - return; - } - } -} -#endif /* !_WIN32 */ - -int qemu_get_ram_fd(struct uc_struct *uc, ram_addr_t addr) -{ - RAMBlock *block = qemu_get_ram_block(uc, addr); - - return block->fd; -} - -void *qemu_get_ram_block_host_ptr(struct uc_struct *uc, ram_addr_t addr) -{ - RAMBlock *block = qemu_get_ram_block(uc, addr); - - return block->host; + QLIST_REMOVE(block, next); + uc->ram_list.mru_block = NULL; + /* Write list before version */ + //smp_wmb(); + // call_rcu(block, reclaim_ramblock, rcu); + reclaim_ramblock(uc, block); } /* Return a host pointer to ram allocated with qemu_ram_alloc. - With the exception of the softmmu code in this file, this should - only be used for local memory (e.g. video ram) that the device owns, - and knows it isn't going to access beyond the end of the block. - - It should not be used for general purpose DMA. - Use cpu_physical_memory_map/cpu_physical_memory_rw instead. - */ -void *qemu_get_ram_ptr(struct uc_struct *uc, ram_addr_t addr) + * This should not be used for general purpose DMA. Use address_space_map + * or address_space_rw instead. For local memory (e.g. video ram) that the + * device owns, use memory_region_get_ram_ptr. + * + * Called within RCU critical section. + */ +void *qemu_map_ram_ptr(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr) { - RAMBlock *block = qemu_get_ram_block(uc, addr); + RAMBlock *block = ram_block; - return block->host + (addr - block->offset); + if (block == NULL) { + block = qemu_get_ram_block(uc, addr); + addr -= block->offset; + } + + return ramblock_ptr(block, addr); } -/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr - * but takes a size argument */ -static void *qemu_ram_ptr_length(struct uc_struct *uc, ram_addr_t addr, hwaddr *size) +/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr + * but takes a size argument. + * + * Called within RCU critical section. + */ +static void *qemu_ram_ptr_length(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr, + hwaddr *size, bool lock) { - RAMBlock *block; + RAMBlock *block = ram_block; if (*size == 0) { return NULL; } - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - if (addr - block->offset < block->length) { - if (addr - block->offset + *size > block->length) - *size = block->length - addr + block->offset; - return block->host + (addr - block->offset); - } + if (block == NULL) { + block = qemu_get_ram_block(uc, addr); + addr -= block->offset; } + *size = MIN(*size, block->max_length - addr); - fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); - abort(); + return ramblock_ptr(block, addr); } -/* Some of the softmmu routines need to translate from a host pointer - (typically a TLB entry) back to a ram offset. */ -MemoryRegion *qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr, ram_addr_t *ram_addr) +/* Return the offset of a hostpointer within a ramblock */ +ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) +{ + ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; + assert((uintptr_t)host >= (uintptr_t)rb->host); + assert(res < rb->max_length); + + return res; +} + +/* + * Translates a host ptr back to a RAMBlock, a ram_addr and an offset + * in that RAMBlock. + * + * ptr: Host pointer to look up + * round_offset: If true round the result offset down to a page boundary + * *ram_addr: set to result ram_addr + * *offset: set to result offset within the RAMBlock + * + * Returns: RAMBlock (or NULL if not found) + * + * By the time this function returns, the returned pointer is not protected + * by RCU anymore. If the caller is not within an RCU critical section and + * does not hold the iothread lock, it must have other means of protecting the + * pointer, such as a reference to the region that includes the incoming + * ram_addr_t. + */ +RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr, + bool round_offset, ram_addr_t *offset) { RAMBlock *block; uint8_t *host = ptr; block = uc->ram_list.mru_block; - if (block && block->host && host - block->host < block->length) { + if (block && block->host && host - block->host < block->max_length) { goto found; } - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + RAMBLOCK_FOREACH(block) { /* This case append when the block is not mapped. */ if (block->host == NULL) { continue; } - if (host - block->host < block->length) { + if (host - block->host < block->max_length) { goto found; } } @@ -1244,84 +1230,102 @@ MemoryRegion *qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr, ram_addr_ return NULL; found: - *ram_addr = block->offset + (host - block->host); - return block->mr; + *offset = (host - block->host); + if (round_offset) { + *offset &= TARGET_PAGE_MASK; + } + return block; } -static uint64_t subpage_read(struct uc_struct* uc, void *opaque, hwaddr addr, - unsigned len) +/* Some of the softmmu routines need to translate from a host pointer + (typically a TLB entry) back to a ram offset. */ +ram_addr_t qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr) +{ + RAMBlock *block; + ram_addr_t offset; + + block = qemu_ram_block_from_host(uc, ptr, false, &offset); + if (!block) { + return RAM_ADDR_INVALID; + } + + return block->offset + offset; +} + +/* Generate a debug exception if a watchpoint has been hit. */ +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra) +{ +} + +static MemTxResult flatview_read(struct uc_struct *uc, FlatView *fv, hwaddr addr, + MemTxAttrs attrs, void *buf, hwaddr len); +static MemTxResult flatview_write(struct uc_struct *, FlatView *fv, hwaddr addr, MemTxAttrs attrs, + const void *buf, hwaddr len); +static bool flatview_access_valid(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr len, + bool is_write, MemTxAttrs attrs); + +static MemTxResult subpage_read(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t *data, + unsigned len, MemTxAttrs attrs) { subpage_t *subpage = opaque; - uint8_t buf[4]; + uint8_t buf[8]; + MemTxResult res; #if defined(DEBUG_SUBPAGE) printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, - subpage, len, addr); + subpage, len, addr); #endif - address_space_read(subpage->as, addr + subpage->base, buf, len); - switch (len) { - case 1: - return ldub_p(buf); - case 2: - return lduw_p(buf); - case 4: - return ldl_p(buf); - default: - abort(); + res = flatview_read(uc, subpage->fv, addr + subpage->base, attrs, buf, len); + if (res) { + return res; } + *data = ldn_p(buf, len); + return MEMTX_OK; } -static void subpage_write(struct uc_struct* uc, void *opaque, hwaddr addr, - uint64_t value, unsigned len) +static MemTxResult subpage_write(struct uc_struct *uc, void *opaque, hwaddr addr, + uint64_t value, unsigned len, MemTxAttrs attrs) { subpage_t *subpage = opaque; - uint8_t buf[4]; + uint8_t buf[8]; #if defined(DEBUG_SUBPAGE) printf("%s: subpage %p len %u addr " TARGET_FMT_plx - " value %"PRIx64"\n", - __func__, subpage, len, addr, value); + " value %"PRIx64"\n", + __func__, subpage, len, addr, value); #endif - switch (len) { - case 1: - stb_p(buf, value); - break; - case 2: - stw_p(buf, value); - break; - case 4: - stl_p(buf, value); - break; - default: - abort(); - } - address_space_write(subpage->as, addr + subpage->base, buf, len); + stn_p(buf, len, value); + return flatview_write(uc, subpage->fv, addr + subpage->base, attrs, buf, len); } -static bool subpage_accepts(void *opaque, hwaddr addr, - unsigned len, bool is_write) +static bool subpage_accepts(struct uc_struct *uc, void *opaque, hwaddr addr, + unsigned len, bool is_write, + MemTxAttrs attrs) { subpage_t *subpage = opaque; #if defined(DEBUG_SUBPAGE) printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", - __func__, subpage, is_write ? 'w' : 'r', len, addr); + __func__, subpage, is_write ? 'w' : 'r', len, addr); #endif - return address_space_access_valid(subpage->as, addr + subpage->base, - len, is_write); + return flatview_access_valid(uc, subpage->fv, addr + subpage->base, + len, is_write, attrs); } static const MemoryRegionOps subpage_ops = { - subpage_read, - subpage_write, - DEVICE_NATIVE_ENDIAN, - { - 0, 0, false, subpage_accepts, - }, + .read_with_attrs = subpage_read, + .write_with_attrs = subpage_write, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .valid.accepts = subpage_accepts, + .endianness = DEVICE_NATIVE_ENDIAN, }; -static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, - uint16_t section) +static int subpage_register(struct uc_struct *uc, subpage_t *mmio, uint32_t start, uint32_t end, + uint16_t section) { int idx, eidx; @@ -1331,7 +1335,7 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, eidx = SUBPAGE_IDX(end); #if defined(DEBUG_SUBPAGE) printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", - __func__, mmio, start, end, idx, eidx, section); + __func__, mmio, start, end, idx, eidx, section); #endif for (; idx <= eidx; idx++) { mmio->sub_section[idx] = section; @@ -1340,263 +1344,116 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, return 0; } -static void notdirty_mem_write(struct uc_struct* uc, void *opaque, hwaddr ram_addr, - uint64_t val, unsigned size) -{ - if (!cpu_physical_memory_get_dirty_flag(uc, ram_addr, DIRTY_MEMORY_CODE)) { - tb_invalidate_phys_page_fast(uc, ram_addr, size); - } - switch (size) { - case 1: - stb_p(qemu_get_ram_ptr(uc, ram_addr), val); - break; - case 2: - stw_p(qemu_get_ram_ptr(uc, ram_addr), val); - break; - case 4: - stl_p(qemu_get_ram_ptr(uc, ram_addr), val); - break; - default: - abort(); - } - /* we remove the notdirty callback only if the code has been - flushed */ - if (!cpu_physical_memory_is_clean(uc, ram_addr)) { - CPUArchState *env = uc->current_cpu->env_ptr; - tlb_set_dirty(env, uc->current_cpu->mem_io_vaddr); - } -} - -static bool notdirty_mem_accepts(void *opaque, hwaddr addr, - unsigned size, bool is_write) -{ - return is_write; -} - -static const MemoryRegionOps notdirty_mem_ops = { - NULL, - notdirty_mem_write, - DEVICE_NATIVE_ENDIAN, - { - 0, 0, false, notdirty_mem_accepts, - }, -}; - -static void io_mem_init(struct uc_struct* uc) -{ - memory_region_init_io(uc, &uc->io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); - memory_region_init_io(uc, &uc->io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, - NULL, UINT64_MAX); - memory_region_init_io(uc, &uc->io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, - NULL, UINT64_MAX); - //memory_region_init_io(uc, &uc->io_mem_watch, NULL, &watch_mem_ops, NULL, - // NULL, UINT64_MAX); -} - -static subpage_t *subpage_init(AddressSpace *as, hwaddr base) +static subpage_t *subpage_init(struct uc_struct *uc, FlatView *fv, hwaddr base) { subpage_t *mmio; - mmio = g_malloc0(sizeof(subpage_t)); - - mmio->as = as; + /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ + mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); + mmio->fv = fv; mmio->base = base; - memory_region_init_io(as->uc, &mmio->iomem, NULL, &subpage_ops, mmio, - NULL, TARGET_PAGE_SIZE); + memory_region_init_io(fv->root->uc, &mmio->iomem, &subpage_ops, mmio, + TARGET_PAGE_SIZE); mmio->iomem.subpage = true; #if defined(DEBUG_SUBPAGE) printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, - mmio, base, TARGET_PAGE_SIZE); + mmio, base, TARGET_PAGE_SIZE); #endif - subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); return mmio; } -static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, - MemoryRegion *mr) +static uint16_t dummy_section(struct uc_struct *uc, PhysPageMap *map, FlatView *fv, MemoryRegion *mr) { - MemoryRegionSection section = MemoryRegionSection_make( - mr, as, 0, - int128_2_64(), - false, - 0 - ); - - assert(as); + assert(fv); + MemoryRegionSection section = { + .fv = fv, + .mr = mr, + .offset_within_address_space = 0, + .offset_within_region = 0, + .size = int128_2_64(), + }; - return phys_section_add(map, §ion); + return phys_section_add(uc, map, §ion); } -MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index) +MemoryRegionSection *iotlb_to_section(CPUState *cpu, + hwaddr index, MemTxAttrs attrs) { - return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr; +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif + int asidx = cpu_asidx_from_attrs(cpu, attrs); + CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; + AddressSpaceDispatch *d = cpuas->memory_dispatch; + MemoryRegionSection *sections = d->map.sections; + + return §ions[index & ~TARGET_PAGE_MASK]; } -void phys_mem_clean(struct uc_struct* uc) +static void io_mem_init(struct uc_struct *uc) { - AddressSpaceDispatch* d = uc->as.next_dispatch; - g_free(d->map.sections); + memory_region_init_io(uc, &uc->io_mem_unassigned, &unassigned_mem_ops, NULL, + UINT64_MAX); } -static void mem_begin(MemoryListener *listener) +AddressSpaceDispatch *address_space_dispatch_new(struct uc_struct *uc, FlatView *fv) { - AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); +#ifndef NDEBUG uint16_t n; - PhysPageEntry ppe = { 1, PHYS_MAP_NODE_NIL }; - struct uc_struct *uc = as->uc; - n = dummy_section(&d->map, as, &uc->io_mem_unassigned); + n = dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned)); +#else + dummy_section(uc, &d->map, fv, &(uc->io_mem_unassigned)); +#endif assert(n == PHYS_SECTION_UNASSIGNED); - n = dummy_section(&d->map, as, &uc->io_mem_notdirty); - assert(n == PHYS_SECTION_NOTDIRTY); - n = dummy_section(&d->map, as, &uc->io_mem_rom); - assert(n == PHYS_SECTION_ROM); - // n = dummy_section(&d->map, as, &uc->io_mem_watch); - // assert(n == PHYS_SECTION_WATCH); - d->phys_map = ppe; - d->as = as; - as->next_dispatch = d; + d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; + d->uc = uc; + + return d; } -static void mem_commit(MemoryListener *listener) +void address_space_dispatch_free(AddressSpaceDispatch *d) { - AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); - AddressSpaceDispatch *cur = as->dispatch; - AddressSpaceDispatch *next = as->next_dispatch; - - phys_page_compact_all(next, next->map.nodes_nb); - - as->dispatch = next; - - if (cur) { - phys_sections_free(&cur->map); - g_free(cur); - } + phys_sections_free(&d->map); + g_free(d); } static void tcg_commit(MemoryListener *listener) { - struct uc_struct* uc = listener->address_space_filter->uc; + CPUAddressSpace *cpuas; + AddressSpaceDispatch *d; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ - /* XXX: slow ! */ - tlb_flush(uc->cpu, 1); -} - -void address_space_init_dispatch(AddressSpace *as) -{ - MemoryListener ml = { 0 }; - - ml.begin = mem_begin; - ml.commit = mem_commit; - ml.region_add = mem_add; - ml.region_nop = mem_add; - ml.priority = 0; - - as->dispatch = NULL; - as->dispatch_listener = ml; - memory_listener_register(as->uc, &as->dispatch_listener, as); -} - -void address_space_unregister(AddressSpace *as) -{ - memory_listener_unregister(as->uc, &as->dispatch_listener); -} - -void address_space_destroy_dispatch(AddressSpace *as) -{ - AddressSpaceDispatch *d = as->dispatch; - - memory_listener_unregister(as->uc, &as->dispatch_listener); - g_free(d->map.nodes); - g_free(d); - - if (as->dispatch != as->next_dispatch) { - d = as->next_dispatch; - g_free(d->map.nodes); - g_free(d); - } - - as->dispatch = NULL; - as->next_dispatch = NULL; + cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); + cpu_reloading_memory_map(); + /* The CPU and TLB are protected by the iothread lock. + * We reload the dispatch pointer now because cpu_reloading_memory_map() + * may have split the RCU critical section. + */ + d = address_space_to_dispatch(cpuas->as); + cpuas->memory_dispatch = d; + tlb_flush(cpuas->cpu); } static void memory_map_init(struct uc_struct *uc) { uc->system_memory = g_malloc(sizeof(*(uc->system_memory))); + memory_region_init(uc, uc->system_memory, UINT64_MAX); + address_space_init(uc, &uc->address_space_memory, uc->system_memory); - memory_region_init(uc, uc->system_memory, NULL, "system", UINT64_MAX); - address_space_init(uc, &uc->as, uc->system_memory, "memory"); + uc->system_io = g_malloc(sizeof(*(uc->system_io))); + memory_region_init_io(uc, uc->system_io, &unassigned_io_ops, NULL, 65536); + address_space_init(uc, &uc->address_space_io, uc->system_io); } -void cpu_exec_init_all(struct uc_struct *uc) -{ -#if !defined(CONFIG_USER_ONLY) - memory_map_init(uc); -#endif - io_mem_init(uc); -} - -MemoryRegion *get_system_memory(struct uc_struct *uc) -{ - return uc->system_memory; -} - -#endif /* !defined(CONFIG_USER_ONLY) */ - /* physical memory access (slow version, mainly for debug) */ -#if defined(CONFIG_USER_ONLY) -int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - uint8_t *buf, int len, int is_write) +static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, + hwaddr length) { - int l, flags; - target_ulong page; - void * p; - - while (len > 0) { - page = addr & TARGET_PAGE_MASK; - l = (page + TARGET_PAGE_SIZE) - addr; - if (l > len) - l = len; - flags = page_get_flags(page); - if (!(flags & PAGE_VALID)) - return -1; - if (is_write) { - if (!(flags & PAGE_WRITE)) - return -1; - /* XXX: this code should not depend on lock_user */ - if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) - return -1; - memcpy(p, buf, l); - unlock_user(p, addr, l); - } else { - if (!(flags & PAGE_READ)) - return -1; - /* XXX: this code should not depend on lock_user */ - if (!(p = lock_user(VERIFY_READ, addr, l, 1))) - return -1; - memcpy(buf, p, l); - unlock_user(p, addr, 0); - } - len -= l; - buf += l; - addr += l; - } - return 0; -} - -#else - -static void invalidate_and_set_dirty(struct uc_struct *uc, hwaddr addr, - hwaddr length) -{ - if (cpu_physical_memory_range_includes_clean(uc, addr, length)) { - tb_invalidate_phys_range(uc, addr, addr + length, 0); - } } static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) @@ -1611,7 +1468,11 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) /* Bound the maximum access by the alignment of the address. */ if (!mr->ops->impl.unaligned) { - unsigned align_size_max = addr & (0-addr); +#ifdef _MSC_VER + unsigned align_size_max = addr & (0ULL - addr); +#else + unsigned align_size_max = addr & -addr; +#endif if (align_size_max != 0 && align_size_max < access_size_max) { access_size_max = align_size_max; } @@ -1621,125 +1482,188 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) if (l > access_size_max) { l = access_size_max; } - if (l & (l - 1)) { - l = 1 << (qemu_fls(l) - 1); - } + l = pow2floor(l); return l; } -bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, - int len, bool is_write) +static bool prepare_mmio_access(MemoryRegion *mr) { - hwaddr l; - uint8_t *ptr; + return true; +} + +/* Called within RCU critical section. */ +static MemTxResult flatview_write_continue(struct uc_struct *uc, FlatView *fv, hwaddr addr, + MemTxAttrs attrs, + const void *ptr, + hwaddr len, hwaddr addr1, + hwaddr l, MemoryRegion *mr) +{ + uint8_t *ram_ptr; uint64_t val; - hwaddr addr1; - MemoryRegion *mr; - bool error = false; + MemTxResult result = MEMTX_OK; + bool release_lock = false; + const uint8_t *buf = ptr; - while (len > 0) { - l = len; - - mr = address_space_translate(as, addr, &addr1, &l, is_write); - if (!mr) - return true; - - if (is_write) { - if (!memory_access_is_direct(mr, is_write)) { - l = memory_access_size(mr, l, addr1); - /* XXX: could force current_cpu to NULL to avoid - potential bugs */ - switch (l) { - case 8: - /* 64 bit write access */ - val = ldq_p(buf); - error |= io_mem_write(mr, addr1, val, 8); - break; - case 4: - /* 32 bit write access */ - val = ldl_p(buf); - error |= io_mem_write(mr, addr1, val, 4); - break; - case 2: - /* 16 bit write access */ - val = lduw_p(buf); - error |= io_mem_write(mr, addr1, val, 2); - break; - case 1: - /* 8 bit write access */ - val = ldub_p(buf); - error |= io_mem_write(mr, addr1, val, 1); - break; - default: - abort(); - } - } else { - addr1 += memory_region_get_ram_addr(mr); - /* RAM case */ - ptr = qemu_get_ram_ptr(as->uc, addr1); - memcpy(ptr, buf, l); - invalidate_and_set_dirty(as->uc, addr1, l); - } + for (;;) { + if (!memory_access_is_direct(mr, true)) { + release_lock |= prepare_mmio_access(mr); + l = memory_access_size(mr, l, addr1); + /* XXX: could force current_cpu to NULL to avoid + potential bugs */ + val = ldn_he_p(buf, l); + result |= memory_region_dispatch_write(uc, mr, addr1, val, + size_memop(l), attrs); } else { - if (!memory_access_is_direct(mr, is_write)) { - /* I/O case */ - l = memory_access_size(mr, l, addr1); - - switch (l) { - case 8: - /* 64 bit read access */ - error |= io_mem_read(mr, addr1, &val, 8); - stq_p(buf, val); - break; - case 4: - /* 32 bit read access */ - error |= io_mem_read(mr, addr1, &val, 4); - stl_p(buf, val); - break; - case 2: - /* 16 bit read access */ - error |= io_mem_read(mr, addr1, &val, 2); - stw_p(buf, val); - break; - case 1: - /* 8 bit read access */ - error |= io_mem_read(mr, addr1, &val, 1); - stb_p(buf, val); - break; - default: - abort(); - } - } else { - /* RAM case */ - ptr = qemu_get_ram_ptr(as->uc, mr->ram_addr + addr1); - memcpy(buf, ptr, l); - } + /* RAM case */ + ram_ptr = qemu_ram_ptr_length(fv->root->uc, mr->ram_block, addr1, &l, false); + memcpy(ram_ptr, buf, l); } + + if (release_lock) { + release_lock = false; + } + len -= l; buf += l; addr += l; + + if (!len) { + break; + } + + l = len; + mr = flatview_translate(uc, fv, addr, &addr1, &l, true, attrs); } - return error; + return result; } -bool address_space_write(AddressSpace *as, hwaddr addr, - const uint8_t *buf, int len) +/* Called from RCU critical section. */ +static MemTxResult flatview_write(struct uc_struct *uc, FlatView *fv, hwaddr addr, MemTxAttrs attrs, + const void *buf, hwaddr len) { - return address_space_rw(as, addr, (uint8_t *)buf, len, true); + hwaddr l; + hwaddr addr1; + MemoryRegion *mr; + MemTxResult result = MEMTX_OK; + + l = len; + mr = flatview_translate(uc, fv, addr, &addr1, &l, true, attrs); + result = flatview_write_continue(uc, fv, addr, attrs, buf, len, + addr1, l, mr); + + return result; } -bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) +/* Called within RCU critical section. */ +MemTxResult flatview_read_continue(struct uc_struct *uc, FlatView *fv, hwaddr addr, + MemTxAttrs attrs, void *ptr, + hwaddr len, hwaddr addr1, hwaddr l, + MemoryRegion *mr) { - return address_space_rw(as, addr, buf, len, false); + uint8_t *ram_ptr; + uint64_t val; + MemTxResult result = MEMTX_OK; + bool release_lock = false; + uint8_t *buf = ptr; + + for (;;) { + if (!memory_access_is_direct(mr, false)) { + /* I/O case */ + release_lock |= prepare_mmio_access(mr); + l = memory_access_size(mr, l, addr1); + result |= memory_region_dispatch_read(uc, mr, addr1, &val, + size_memop(l), attrs); + stn_he_p(buf, l, val); + } else { + /* RAM case */ + ram_ptr = qemu_ram_ptr_length(fv->root->uc, mr->ram_block, addr1, &l, false); + memcpy(buf, ram_ptr, l); + } + + if (release_lock) { + release_lock = false; + } + + len -= l; + buf += l; + addr += l; + + if (!len) { + break; + } + + l = len; + mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs); + } + + return result; } - -bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, - int len, int is_write) +/* Called from RCU critical section. */ +static MemTxResult flatview_read(struct uc_struct *uc, FlatView *fv, hwaddr addr, + MemTxAttrs attrs, void *buf, hwaddr len) { - return address_space_rw(as, addr, buf, len, is_write); + hwaddr l; + hwaddr addr1; + MemoryRegion *mr; + + l = len; + mr = flatview_translate(uc, fv, addr, &addr1, &l, false, attrs); + return flatview_read_continue(uc, fv, addr, attrs, buf, len, + addr1, l, mr); +} + +MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, void *buf, hwaddr len) +{ + MemTxResult result = MEMTX_OK; + FlatView *fv; + + if (len > 0) { + fv = address_space_to_flatview(as); + result = flatview_read(as->uc, fv, addr, attrs, buf, len); + } + + return result; +} + +MemTxResult address_space_write(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + const void *buf, hwaddr len) +{ + MemTxResult result = MEMTX_OK; + FlatView *fv; + + if (len > 0) { + fv = address_space_to_flatview(as); + result = flatview_write(as->uc, fv, addr, attrs, buf, len); + } + + return result; +} + +MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, + void *buf, hwaddr len, bool is_write) +{ + if (is_write) { + return address_space_write(as, addr, attrs, buf, len); + } else { + return address_space_read_full(as, addr, attrs, buf, len); + } +} + +bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, void *buf, + hwaddr len, bool is_write) +{ + MemTxResult result = address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, + buf, len, is_write); + if (result == MEMTX_OK) { + return true; + } else { + return false; + } } enum write_rom_type { @@ -1747,77 +1671,83 @@ enum write_rom_type { FLUSH_CACHE, }; -static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, - hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type) +static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, + hwaddr addr, + MemTxAttrs attrs, + const void *ptr, + hwaddr len, + enum write_rom_type type) { hwaddr l; - uint8_t *ptr; + uint8_t *ram_ptr; hwaddr addr1; MemoryRegion *mr; + const uint8_t *buf = ptr; while (len > 0) { l = len; - mr = address_space_translate(as, addr, &addr1, &l, true); + mr = address_space_translate(as, addr, &addr1, &l, true, attrs); - if (!(memory_region_is_ram(mr) || - memory_region_is_romd(mr))) { - /* do nothing */ + if (!memory_region_is_ram(mr)) { + l = memory_access_size(mr, l, addr1); } else { - addr1 += memory_region_get_ram_addr(mr); /* ROM/RAM case */ - ptr = qemu_get_ram_ptr(as->uc, addr1); + ram_ptr = qemu_map_ram_ptr(as->uc, mr->ram_block, addr1); switch (type) { - case WRITE_DATA: - memcpy(ptr, buf, l); - invalidate_and_set_dirty(as->uc, addr1, l); - break; - case FLUSH_CACHE: - flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); - break; + case WRITE_DATA: + memcpy(ram_ptr, buf, l); + break; + case FLUSH_CACHE: + flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l); + break; } } len -= l; buf += l; addr += l; } + return MEMTX_OK; } /* used for ROM loading : can write in RAM and ROM */ -DEFAULT_VISIBILITY -void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, - const uint8_t *buf, int len) +MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + const void *buf, hwaddr len) { - cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA); + return address_space_write_rom_internal(as, addr, attrs, + buf, len, WRITE_DATA); } -void cpu_flush_icache_range(AddressSpace *as, hwaddr start, int len) +void cpu_flush_icache_range(AddressSpace *as, hwaddr start, hwaddr len) { - /* - * This function should do the same thing as an icache flush that was - * triggered from within the guest. For TCG we are always cache coherent, - * so there is no need to flush anything. For KVM / Xen we need to flush - * the host's instruction cache at least. +} + +void cpu_exec_init_all(struct uc_struct *uc) +{ + /* The data structures we set up here depend on knowing the page size, + * so no more changes can be made after this point. + * In an ideal world, nothing we did before we had finished the + * machine setup would care about the target page size, and we could + * do this much later, rather than requiring board models to state + * up front what their requirements are. */ - if (tcg_enabled(as->uc)) { - return; - } - - cpu_physical_memory_write_rom_internal(as, - start, NULL, len, FLUSH_CACHE); + finalize_target_page_bits(uc); + memory_map_init(uc); + io_mem_init(uc); } - -bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) +static bool flatview_access_valid(struct uc_struct *uc, FlatView *fv, hwaddr addr, hwaddr len, + bool is_write, MemTxAttrs attrs) { MemoryRegion *mr; hwaddr l, xlat; while (len > 0) { l = len; - mr = address_space_translate(as, addr, &xlat, &l, is_write); + mr = flatview_translate(uc, fv, addr, &xlat, &l, is_write, attrs); if (!memory_access_is_direct(mr, is_write)) { l = memory_access_size(mr, l, addr); - if (!memory_region_access_valid(mr, xlat, l, is_write)) { + if (!memory_region_access_valid(uc, mr, xlat, l, is_write, attrs)) { return false; } } @@ -1828,6 +1758,45 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_ return true; } +bool address_space_access_valid(AddressSpace *as, hwaddr addr, + hwaddr len, bool is_write, + MemTxAttrs attrs) +{ + FlatView *fv; + bool result; + + fv = address_space_to_flatview(as); + result = flatview_access_valid(as->uc, fv, addr, len, is_write, attrs); + return result; +} + +static hwaddr +flatview_extend_translation(struct uc_struct *uc, FlatView *fv, hwaddr addr, + hwaddr target_len, + MemoryRegion *mr, hwaddr base, hwaddr len, + bool is_write, MemTxAttrs attrs) +{ + hwaddr done = 0; + hwaddr xlat; + MemoryRegion *this_mr; + + for (;;) { + target_len -= len; + addr += len; + done += len; + if (target_len == 0) { + return done; + } + + len = target_len; + this_mr = flatview_translate(uc, fv, addr, &xlat, + &len, is_write, attrs); + if (this_mr != mr || xlat != base + done) { + return done; + } + } +} + /* Map a physical memory region into a host virtual address. * May map a subset of the requested range, given by and returned in *plen. * May return NULL if resources needed to perform the mapping are exhausted. @@ -1836,460 +1805,159 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_ * likely to succeed. */ void *address_space_map(AddressSpace *as, - hwaddr addr, - hwaddr *plen, - bool is_write) + hwaddr addr, + hwaddr *plen, + bool is_write, + MemTxAttrs attrs) { hwaddr len = *plen; - hwaddr done = 0; - hwaddr l, xlat, base; - MemoryRegion *mr, *this_mr; - ram_addr_t raddr; + hwaddr l, xlat; + MemoryRegion *mr; + void *ptr; + FlatView *fv; + struct uc_struct *uc = as->uc; if (len == 0) { return NULL; } l = len; - mr = address_space_translate(as, addr, &xlat, &l, is_write); + fv = address_space_to_flatview(as); + mr = flatview_translate(uc, fv, addr, &xlat, &l, is_write, attrs); + if (!memory_access_is_direct(mr, is_write)) { - if (as->uc->bounce.buffer) { - return NULL; - } /* Avoid unbounded allocations */ l = MIN(l, TARGET_PAGE_SIZE); - as->uc->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); - as->uc->bounce.addr = addr; - as->uc->bounce.len = l; + mr->uc->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); + mr->uc->bounce.addr = addr; + mr->uc->bounce.len = l; - memory_region_ref(mr); - as->uc->bounce.mr = mr; + mr->uc->bounce.mr = mr; if (!is_write) { - address_space_read(as, addr, as->uc->bounce.buffer, l); + flatview_read(as->uc, fv, addr, MEMTXATTRS_UNSPECIFIED, + mr->uc->bounce.buffer, l); } *plen = l; - return as->uc->bounce.buffer; + return mr->uc->bounce.buffer; } - base = xlat; - raddr = memory_region_get_ram_addr(mr); - for (;;) { - len -= l; - addr += l; - done += l; - if (len == 0) { - break; - } + *plen = flatview_extend_translation(as->uc, fv, addr, len, mr, xlat, + l, is_write, attrs); + ptr = qemu_ram_ptr_length(as->uc, mr->ram_block, xlat, plen, true); - l = len; - this_mr = address_space_translate(as, addr, &xlat, &l, is_write); - if (this_mr != mr || xlat != base + done) { - break; - } - } - - memory_region_ref(mr); - *plen = done; - return qemu_ram_ptr_length(as->uc, raddr + base, plen); + return ptr; } /* Unmaps a memory region previously mapped by address_space_map(). - * Will also mark the memory as dirty if is_write == 1. access_len gives + * Will also mark the memory as dirty if is_write is true. access_len gives * the amount of memory that was actually read or written by the caller. */ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, - int is_write, hwaddr access_len) + bool is_write, hwaddr access_len) { if (buffer != as->uc->bounce.buffer) { MemoryRegion *mr; ram_addr_t addr1; - mr = qemu_ram_addr_from_host(as->uc, buffer, &addr1); + mr = memory_region_from_host(as->uc, buffer, &addr1); assert(mr != NULL); if (is_write) { - invalidate_and_set_dirty(as->uc, addr1, access_len); + invalidate_and_set_dirty(mr, addr1, access_len); } - memory_region_unref(mr); return; } if (is_write) { - address_space_write(as, as->uc->bounce.addr, as->uc->bounce.buffer, access_len); + address_space_write(as, as->uc->bounce.addr, MEMTXATTRS_UNSPECIFIED, + as->uc->bounce.buffer, access_len); } qemu_vfree(as->uc->bounce.buffer); as->uc->bounce.buffer = NULL; - memory_region_unref(as->uc->bounce.mr); } void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr, - hwaddr *plen, - int is_write) + hwaddr *plen, + bool is_write) { - return address_space_map(as, addr, plen, is_write); + return address_space_map(as, addr, plen, is_write, + MEMTXATTRS_UNSPECIFIED); } void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len, - int is_write, hwaddr access_len) + bool is_write, hwaddr access_len) { address_space_unmap(as, buffer, len, is_write, access_len); } -/* warning: addr must be aligned */ -static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, - enum device_endian endian) -{ - uint8_t *ptr; - uint64_t val; - MemoryRegion *mr; - hwaddr l = 4; - hwaddr addr1; - - mr = address_space_translate(as, addr, &addr1, &l, false); - if (l < 4 || !memory_access_is_direct(mr, false)) { - /* I/O case */ - io_mem_read(mr, addr1, &val, 4); -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap32(val); - } +#define ARG1_DECL AddressSpace *as +#define ARG1 as +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX UNICORN_ARCH_POSTFIX #else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap32(val); - } +#define SUFFIX #endif - } else { - /* RAM case */ - ptr = qemu_get_ram_ptr(as->uc, (memory_region_get_ram_addr(mr) - & TARGET_PAGE_MASK) - + addr1); - switch (endian) { - case DEVICE_LITTLE_ENDIAN: - val = ldl_le_p(ptr); - break; - case DEVICE_BIG_ENDIAN: - val = ldl_be_p(ptr); - break; - default: - val = ldl_p(ptr); - break; - } - } - return val; -} +#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) +#include "memory_ldst.inc.c" -uint32_t ldl_phys(AddressSpace *as, hwaddr addr) +/* Called from RCU critical section. This function has the same + * semantics as address_space_translate, but it only works on a + * predefined range of a MemoryRegion that was mapped with + * address_space_cache_init. + */ +static inline MemoryRegion *address_space_translate_cached( + MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, + hwaddr *plen, bool is_write, MemTxAttrs attrs) { - return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); -} - -uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr) -{ - return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); -} - -uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr) -{ - return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN); -} - -/* warning: addr must be aligned */ -static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, - enum device_endian endian) -{ - uint8_t *ptr; - uint64_t val; + MemoryRegionSection section; MemoryRegion *mr; - hwaddr l = 8; - hwaddr addr1; + IOMMUMemoryRegion *iommu_mr; + AddressSpace *target_as; - mr = address_space_translate(as, addr, &addr1, &l, - false); - if (l < 8 || !memory_access_is_direct(mr, false)) { - /* I/O case */ - io_mem_read(mr, addr1, &val, 8); -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap64(val); - } + assert(!cache->ptr); + *xlat = addr + cache->xlat; + + mr = cache->mrs.mr; + iommu_mr = memory_region_get_iommu(mr); + if (!iommu_mr) { + /* MMIO region. */ + return mr; + } + + section = address_space_translate_iommu(iommu_mr, xlat, plen, + NULL, is_write, true, + &target_as, attrs); + return section.mr; +} + +#define ARG1_DECL MemoryRegionCache *cache +#define ARG1 cache +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX glue(_cached_slow, UNICORN_ARCH_POSTFIX) #else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap64(val); - } +#define SUFFIX _cached_slow #endif - } else { - /* RAM case */ - ptr = qemu_get_ram_ptr(as->uc, (memory_region_get_ram_addr(mr) - & TARGET_PAGE_MASK) - + addr1); - switch (endian) { - case DEVICE_LITTLE_ENDIAN: - val = ldq_le_p(ptr); - break; - case DEVICE_BIG_ENDIAN: - val = ldq_be_p(ptr); - break; - default: - val = ldq_p(ptr); - break; - } - } - return val; -} - -uint64_t ldq_phys(AddressSpace *as, hwaddr addr) -{ - return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); -} - -uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr) -{ - return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); -} - -uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr) -{ - return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN); -} - -/* XXX: optimize */ -uint32_t ldub_phys(AddressSpace *as, hwaddr addr) -{ - uint8_t val; - address_space_rw(as, addr, &val, 1, 0); - return val; -} - -/* warning: addr must be aligned */ -static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, - enum device_endian endian) -{ - uint8_t *ptr; - uint64_t val; - MemoryRegion *mr; - hwaddr l = 2; - hwaddr addr1; - - mr = address_space_translate(as, addr, &addr1, &l, - false); - if (l < 2 || !memory_access_is_direct(mr, false)) { - /* I/O case */ - io_mem_read(mr, addr1, &val, 2); -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap16(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap16(val); - } -#endif - } else { - /* RAM case */ - ptr = qemu_get_ram_ptr(as->uc, (memory_region_get_ram_addr(mr) - & TARGET_PAGE_MASK) - + addr1); - switch (endian) { - case DEVICE_LITTLE_ENDIAN: - val = lduw_le_p(ptr); - break; - case DEVICE_BIG_ENDIAN: - val = lduw_be_p(ptr); - break; - default: - val = lduw_p(ptr); - break; - } - } - return val; -} - -uint32_t lduw_phys(AddressSpace *as, hwaddr addr) -{ - return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); -} - -uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr) -{ - return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); -} - -uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr) -{ - return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN); -} - -/* warning: addr must be aligned. The ram page is not masked as dirty - and the code inside is not invalidated. It is useful if the dirty - bits are used to track modified PTEs */ -void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) -{ - uint8_t *ptr; - MemoryRegion *mr; - hwaddr l = 4; - hwaddr addr1; - - mr = address_space_translate(as, addr, &addr1, &l, - true); - if (l < 4 || !memory_access_is_direct(mr, true)) { - io_mem_write(mr, addr1, val, 4); - } else { - addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; - ptr = qemu_get_ram_ptr(as->uc, addr1); - stl_p(ptr, val); - } -} - -/* warning: addr must be aligned */ -static inline void stl_phys_internal(AddressSpace *as, - hwaddr addr, uint32_t val, - enum device_endian endian) -{ - uint8_t *ptr; - MemoryRegion *mr; - hwaddr l = 4; - hwaddr addr1; - - mr = address_space_translate(as, addr, &addr1, &l, - true); - if (l < 4 || !memory_access_is_direct(mr, true)) { -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap32(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap32(val); - } -#endif - io_mem_write(mr, addr1, val, 4); - } else { - /* RAM case */ - addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; - ptr = qemu_get_ram_ptr(as->uc, addr1); - switch (endian) { - case DEVICE_LITTLE_ENDIAN: - stl_le_p(ptr, val); - break; - case DEVICE_BIG_ENDIAN: - stl_be_p(ptr, val); - break; - default: - stl_p(ptr, val); - break; - } - invalidate_and_set_dirty(mr->uc, addr1, 4); - } -} - -void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val) -{ - stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); -} - -void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) -{ - stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); -} - -void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) -{ - stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); -} - -/* XXX: optimize */ -void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val) -{ - uint8_t v = val; - address_space_rw(as, addr, &v, 1, 1); -} - -/* warning: addr must be aligned */ -static inline void stw_phys_internal(AddressSpace *as, - hwaddr addr, uint32_t val, - enum device_endian endian) -{ - uint8_t *ptr; - MemoryRegion *mr; - hwaddr l = 2; - hwaddr addr1; - - mr = address_space_translate(as, addr, &addr1, &l, true); - if (l < 2 || !memory_access_is_direct(mr, true)) { -#if defined(TARGET_WORDS_BIGENDIAN) - if (endian == DEVICE_LITTLE_ENDIAN) { - val = bswap16(val); - } -#else - if (endian == DEVICE_BIG_ENDIAN) { - val = bswap16(val); - } -#endif - io_mem_write(mr, addr1, val, 2); - } else { - /* RAM case */ - addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; - ptr = qemu_get_ram_ptr(as->uc, addr1); - switch (endian) { - case DEVICE_LITTLE_ENDIAN: - stw_le_p(ptr, val); - break; - case DEVICE_BIG_ENDIAN: - stw_be_p(ptr, val); - break; - default: - stw_p(ptr, val); - break; - } - invalidate_and_set_dirty(as->uc, addr1, 2); - } -} - -void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val) -{ - stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); -} - -void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) -{ - stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); -} - -void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) -{ - stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); -} - -/* XXX: optimize */ -void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val) -{ - val = tswap64(val); - address_space_rw(as, addr, (void *) &val, 8, 1); -} - -void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val) -{ - val = cpu_to_le64(val); - address_space_rw(as, addr, (void *) &val, 8, 1); -} - -void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val) -{ - val = cpu_to_be64(val); - address_space_rw(as, addr, (void *) &val, 8, 1); -} +#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) +#include "memory_ldst.inc.c" /* virtual memory access for debug (includes writing to ROM) */ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - uint8_t *buf, int len, int is_write) + void *ptr, target_ulong len, bool is_write) { - int l; +#ifdef TARGET_ARM + struct uc_struct *uc = cpu->uc; +#endif hwaddr phys_addr; - target_ulong page; + target_ulong l, page; + uint8_t *buf = ptr; while (len > 0) { + int asidx; + MemTxAttrs attrs; + page = addr & TARGET_PAGE_MASK; - phys_addr = cpu_get_phys_page_debug(cpu, page); + phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); + asidx = cpu_asidx_from_attrs(cpu, attrs); /* if no physical page mapped, return an error */ if (phys_addr == -1) return -1; @@ -2298,9 +1966,11 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, l = len; phys_addr += (addr & ~TARGET_PAGE_MASK); if (is_write) { - cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l); + address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, + attrs, buf, l); } else { - address_space_rw(cpu->as, phys_addr, buf, l, 0); + address_space_read(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, + l); } len -= l; buf += l; @@ -2308,13 +1978,26 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, } return 0; } -#endif /* - * A helper function for the _utterly broken_ virtio device model to find out if - * it's running on a big endian machine. Don't do this at home kids! + * Allows code that needs to deal with migration bitmaps etc to still be built + * target independent. */ -bool target_words_bigendian(void); +size_t qemu_target_page_size(struct uc_struct *uc) +{ + return TARGET_PAGE_SIZE; +} + +int qemu_target_page_bits(struct uc_struct *uc) +{ + return TARGET_PAGE_BITS; +} + +int qemu_target_page_bits_min(void) +{ + return TARGET_PAGE_BITS_MIN; +} + bool target_words_bigendian(void) { #if defined(TARGET_WORDS_BIGENDIAN) @@ -2324,24 +2007,101 @@ bool target_words_bigendian(void) #endif } -#ifndef CONFIG_USER_ONLY bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr) { MemoryRegion*mr; hwaddr l = 1; + bool res; - mr = address_space_translate(as, phys_addr, &phys_addr, &l, false); + mr = address_space_translate(as, + phys_addr, &phys_addr, &l, false, + MEMTXATTRS_UNSPECIFIED); - return !(memory_region_is_ram(mr) || - memory_region_is_romd(mr)); + res = !memory_region_is_ram(mr); + return res; } -void qemu_ram_foreach_block(struct uc_struct *uc, RAMBlockIterFunc func, void *opaque) +/* + * Unmap pages of memory from start to start+length such that + * they a) read as 0, b) Trigger whatever fault mechanism + * the OS provides for postcopy. + * The pages must be unmapped by the end of the function. + * Returns: 0 on success, none-0 on failure + * + */ +int ram_block_discard_range(struct uc_struct *uc, RAMBlock *rb, uint64_t start, size_t length) { - RAMBlock *block; + int ret = -1; - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - func(block->host, block->offset, block->length, opaque); + uint8_t *host_startaddr = rb->host + start; + + if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { + //error_report("ram_block_discard_range: Unaligned start address: %p", + // host_startaddr); + goto err; + } + + if ((start + length) <= rb->used_length) { + bool need_madvise; + if (!QEMU_IS_ALIGNED(length, rb->page_size)) { + //error_report("ram_block_discard_range: Unaligned length: %zx", + // length); + goto err; + } + + errno = ENOTSUP; /* If we are missing MADVISE etc */ + + /* The logic here is messy; + * madvise DONTNEED fails for hugepages + * fallocate works on hugepages and shmem + */ + need_madvise = (rb->page_size == uc->qemu_host_page_size); + if (need_madvise) { + /* For normal RAM this causes it to be unmapped, + * for shared memory it causes the local mapping to disappear + * and to fall back on the file contents (which we just + * fallocate'd away). + */ +#if defined(CONFIG_MADVISE) + ret = madvise(host_startaddr, length, MADV_DONTNEED); + if (ret) { + ret = -errno; + //error_report("ram_block_discard_range: Failed to discard range " + // "%s:%" PRIx64 " +%zx (%d)", + // rb->idstr, start, length, ret); + goto err; + } +#else + ret = -ENOSYS; + //error_report("ram_block_discard_range: MADVISE not available" + // "%s:%" PRIx64 " +%zx (%d)", + // rb->idstr, start, length, ret); + goto err; +#endif + } + } else { + //error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 + // "/%zx/" RAM_ADDR_FMT")", + // rb->idstr, start, length, rb->used_length); + } + +err: + return ret; +} + +bool ramblock_is_pmem(RAMBlock *rb) +{ + return rb->flags & RAM_PMEM; +} + +void page_size_init(struct uc_struct *uc) +{ + /* NOTE: we can always suppose that qemu_host_page_size >= + TARGET_PAGE_SIZE */ + if (uc->qemu_host_page_size == 0) { + uc->qemu_host_page_size = uc->qemu_real_host_page_size; + } + if (uc->qemu_host_page_size < TARGET_PAGE_SIZE) { + uc->qemu_host_page_size = TARGET_PAGE_SIZE; } } -#endif diff --git a/qemu/fpu/softfloat-specialize.h b/qemu/fpu/softfloat-specialize.h deleted file mode 100644 index 549b4256..00000000 --- a/qemu/fpu/softfloat-specialize.h +++ /dev/null @@ -1,1161 +0,0 @@ -/* - * QEMU float support - * - * Derived from SoftFloat. - */ - -/*============================================================================ - -This C source fragment is part of the SoftFloat IEC/IEEE Floating-point -Arithmetic Package, Release 2b. - -Written by John R. Hauser. This work was made possible in part by the -International Computer Science Institute, located at Suite 600, 1947 Center -Street, Berkeley, California 94704. Funding was partially provided by the -National Science Foundation under grant MIP-9311980. The original version -of this code was written as part of a project to build a fixed-point vector -processor in collaboration with the University of California at Berkeley, -overseen by Profs. Nelson Morgan and John Wawrzynek. More information -is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ -arithmetic/SoftFloat.html'. - -THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has -been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES -RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS -AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, -COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE -EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE -INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR -OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. - -Derivative works are acceptable, even for commercial purposes, so long as -(1) the source code for the derivative work includes prominent notice that -the work is derivative, and (2) the source code includes prominent notice with -these four paragraphs for those parts of this code that are retained. - -=============================================================================*/ - -#if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) -#define SNAN_BIT_IS_ONE 1 -#else -#define SNAN_BIT_IS_ONE 0 -#endif - -#if defined(TARGET_XTENSA) -/* Define for architectures which deviate from IEEE in not supporting - * signaling NaNs (so all NaNs are treated as quiet). - */ -#define NO_SIGNALING_NANS 1 -#endif - -/*---------------------------------------------------------------------------- -| The pattern for a default generated half-precision NaN. -*----------------------------------------------------------------------------*/ -#if defined(TARGET_ARM) -const float16 float16_default_nan = const_float16(0x7E00); -#elif SNAN_BIT_IS_ONE -const float16 float16_default_nan = const_float16(0x7DFF); -#else -const float16 float16_default_nan = const_float16(0xFE00); -#endif - -/*---------------------------------------------------------------------------- -| The pattern for a default generated single-precision NaN. -*----------------------------------------------------------------------------*/ -#if defined(TARGET_SPARC) -const float32 float32_default_nan = const_float32(0x7FFFFFFF); -#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \ - defined(TARGET_XTENSA) -const float32 float32_default_nan = const_float32(0x7FC00000); -#elif SNAN_BIT_IS_ONE -const float32 float32_default_nan = const_float32(0x7FBFFFFF); -#else -const float32 float32_default_nan = const_float32(0xFFC00000); -#endif - -/*---------------------------------------------------------------------------- -| The pattern for a default generated double-precision NaN. -*----------------------------------------------------------------------------*/ -#if defined(TARGET_SPARC) -const float64 float64_default_nan = const_float64(LIT64( 0x7FFFFFFFFFFFFFFF )); -#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) -const float64 float64_default_nan = const_float64(LIT64( 0x7FF8000000000000 )); -#elif SNAN_BIT_IS_ONE -const float64 float64_default_nan = const_float64(LIT64( 0x7FF7FFFFFFFFFFFF )); -#else -const float64 float64_default_nan = const_float64(LIT64( 0xFFF8000000000000 )); -#endif - -/*---------------------------------------------------------------------------- -| The pattern for a default generated extended double-precision NaN. -*----------------------------------------------------------------------------*/ -#if SNAN_BIT_IS_ONE -#define floatx80_default_nan_high 0x7FFF -#define floatx80_default_nan_low LIT64( 0xBFFFFFFFFFFFFFFF ) -#else -#define floatx80_default_nan_high 0xFFFF -#define floatx80_default_nan_low LIT64( 0xC000000000000000 ) -#endif - -const floatx80 floatx80_default_nan - = make_floatx80_init(floatx80_default_nan_high, floatx80_default_nan_low); - -/*---------------------------------------------------------------------------- -| The pattern for a default generated quadruple-precision NaN. The `high' and -| `low' values hold the most- and least-significant bits, respectively. -*----------------------------------------------------------------------------*/ -#if SNAN_BIT_IS_ONE -#define float128_default_nan_high LIT64( 0x7FFF7FFFFFFFFFFF ) -#define float128_default_nan_low LIT64( 0xFFFFFFFFFFFFFFFF ) -#else -#define float128_default_nan_high LIT64( 0xFFFF800000000000 ) -#define float128_default_nan_low LIT64( 0x0000000000000000 ) -#endif - -const float128 float128_default_nan - = make_float128_init(float128_default_nan_high, float128_default_nan_low); - -/*---------------------------------------------------------------------------- -| Raises the exceptions specified by `flags'. Floating-point traps can be -| defined here if desired. It is currently not possible for such a trap -| to substitute a result value. If traps are not implemented, this routine -| should be simply `float_exception_flags |= flags;'. -*----------------------------------------------------------------------------*/ - -void float_raise( uint8_t flags STATUS_PARAM ) -{ - STATUS(float_exception_flags) |= flags; -} - -/*---------------------------------------------------------------------------- -| Internal canonical NaN format. -*----------------------------------------------------------------------------*/ -typedef struct { - flag sign; - uint64_t high, low; -} commonNaNT; - -#ifdef NO_SIGNALING_NANS -int float16_is_quiet_nan(float16 a_) -{ - return float16_is_any_nan(a_); -} - -int float16_is_signaling_nan(float16 a_) -{ - return 0; -} -#else -/*---------------------------------------------------------------------------- -| Returns 1 if the half-precision floating-point value `a' is a quiet -| NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float16_is_quiet_nan(float16 a_) -{ - uint16_t a = float16_val(a_); -#if SNAN_BIT_IS_ONE - return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); -#else - return ((a & ~0x8000) >= 0x7c80); -#endif -} - -/*---------------------------------------------------------------------------- -| Returns 1 if the half-precision floating-point value `a' is a signaling -| NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float16_is_signaling_nan(float16 a_) -{ - uint16_t a = float16_val(a_); -#if SNAN_BIT_IS_ONE - return ((a & ~0x8000) >= 0x7c80); -#else - return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); -#endif -} -#endif - -/*---------------------------------------------------------------------------- -| Returns a quiet NaN if the half-precision floating point value `a' is a -| signaling NaN; otherwise returns `a'. -*----------------------------------------------------------------------------*/ -float16 float16_maybe_silence_nan(float16 a_) -{ - if (float16_is_signaling_nan(a_)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - return float16_default_nan; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - uint16_t a = float16_val(a_); - a |= (1 << 9); - return make_float16(a); -#endif - } - return a_; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the half-precision floating-point NaN -| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid -| exception is raised. -*----------------------------------------------------------------------------*/ - -static commonNaNT float16ToCommonNaN( float16 a STATUS_PARAM ) -{ - commonNaNT z; - - if ( float16_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR ); - z.sign = float16_val(a) >> 15; - z.low = 0; - z.high = ((uint64_t) float16_val(a))<<54; - return z; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the canonical NaN `a' to the half- -| precision floating-point format. -*----------------------------------------------------------------------------*/ - -static float16 commonNaNToFloat16(commonNaNT a STATUS_PARAM) -{ - uint16_t mantissa = a.high>>54; - - if (STATUS(default_nan_mode)) { - return float16_default_nan; - } - - if (mantissa) { - return make_float16(((((uint16_t) a.sign) << 15) - | (0x1F << 10) | mantissa)); - } else { - return float16_default_nan; - } -} - -#ifdef NO_SIGNALING_NANS -int float32_is_quiet_nan(float32 a_) -{ - return float32_is_any_nan(a_); -} - -int float32_is_signaling_nan(float32 a_) -{ - return 0; -} -#else -/*---------------------------------------------------------------------------- -| Returns 1 if the single-precision floating-point value `a' is a quiet -| NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float32_is_quiet_nan( float32 a_ ) -{ - uint32_t a = float32_val(a_); -#if SNAN_BIT_IS_ONE - return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); -#else - return ( 0xFF800000 <= (uint32_t) ( a<<1 ) ); -#endif -} - -/*---------------------------------------------------------------------------- -| Returns 1 if the single-precision floating-point value `a' is a signaling -| NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float32_is_signaling_nan( float32 a_ ) -{ - uint32_t a = float32_val(a_); -#if SNAN_BIT_IS_ONE - return ( 0xFF800000 <= (uint32_t) ( a<<1 ) ); -#else - return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); -#endif -} -#endif - -/*---------------------------------------------------------------------------- -| Returns a quiet NaN if the single-precision floating point value `a' is a -| signaling NaN; otherwise returns `a'. -*----------------------------------------------------------------------------*/ - -float32 float32_maybe_silence_nan( float32 a_ ) -{ - if (float32_is_signaling_nan(a_)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - return float32_default_nan; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - uint32_t a = float32_val(a_); - a |= (1 << 22); - return make_float32(a); -#endif - } - return a_; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the single-precision floating-point NaN -| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid -| exception is raised. -*----------------------------------------------------------------------------*/ - -static commonNaNT float32ToCommonNaN( float32 a STATUS_PARAM ) -{ - commonNaNT z; - - if ( float32_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR ); - z.sign = float32_val(a)>>31; - z.low = 0; - z.high = ( (uint64_t) float32_val(a) )<<41; - return z; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the canonical NaN `a' to the single- -| precision floating-point format. -*----------------------------------------------------------------------------*/ - -static float32 commonNaNToFloat32( commonNaNT a STATUS_PARAM) -{ - uint32_t mantissa = a.high>>41; - - if ( STATUS(default_nan_mode) ) { - return float32_default_nan; - } - - if ( mantissa ) - return make_float32( - ( ( (uint32_t) a.sign )<<31 ) | 0x7F800000 | ( a.high>>41 ) ); - else - return float32_default_nan; -} - -/*---------------------------------------------------------------------------- -| Select which NaN to propagate for a two-input operation. -| IEEE754 doesn't specify all the details of this, so the -| algorithm is target-specific. -| The routine is passed various bits of information about the -| two NaNs and should return 0 to select NaN a and 1 for NaN b. -| Note that signalling NaNs are always squashed to quiet NaNs -| by the caller, by calling floatXX_maybe_silence_nan() before -| returning them. -| -| aIsLargerSignificand is only valid if both a and b are NaNs -| of some kind, and is true if a has the larger significand, -| or if both a and b have the same significand but a is -| positive but b is negative. It is only needed for the x87 -| tie-break rule. -*----------------------------------------------------------------------------*/ - -#if defined(TARGET_ARM) -static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag aIsLargerSignificand) -{ - /* ARM mandated NaN propagation rules: take the first of: - * 1. A if it is signaling - * 2. B if it is signaling - * 3. A (quiet) - * 4. B (quiet) - * A signaling NaN is always quietened before returning it. - */ - if (aIsSNaN) { - return 0; - } else if (bIsSNaN) { - return 1; - } else if (aIsQNaN) { - return 0; - } else { - return 1; - } -} -#elif defined(TARGET_MIPS) -static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag aIsLargerSignificand) -{ - /* According to MIPS specifications, if one of the two operands is - * a sNaN, a new qNaN has to be generated. This is done in - * floatXX_maybe_silence_nan(). For qNaN inputs the specifications - * says: "When possible, this QNaN result is one of the operand QNaN - * values." In practice it seems that most implementations choose - * the first operand if both operands are qNaN. In short this gives - * the following rules: - * 1. A if it is signaling - * 2. B if it is signaling - * 3. A (quiet) - * 4. B (quiet) - * A signaling NaN is always silenced before returning it. - */ - if (aIsSNaN) { - return 0; - } else if (bIsSNaN) { - return 1; - } else if (aIsQNaN) { - return 0; - } else { - return 1; - } -} -#elif defined(TARGET_PPC) || defined(TARGET_XTENSA) -static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag aIsLargerSignificand) -{ - /* PowerPC propagation rules: - * 1. A if it sNaN or qNaN - * 2. B if it sNaN or qNaN - * A signaling NaN is always silenced before returning it. - */ - if (aIsSNaN || aIsQNaN) { - return 0; - } else { - return 1; - } -} -#else -static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag aIsLargerSignificand) -{ - /* This implements x87 NaN propagation rules: - * SNaN + QNaN => return the QNaN - * two SNaNs => return the one with the larger significand, silenced - * two QNaNs => return the one with the larger significand - * SNaN and a non-NaN => return the SNaN, silenced - * QNaN and a non-NaN => return the QNaN - * - * If we get down to comparing significands and they are the same, - * return the NaN with the positive sign bit (if any). - */ - if (aIsSNaN) { - if (bIsSNaN) { - return aIsLargerSignificand ? 0 : 1; - } - return bIsQNaN ? 1 : 0; - } - else if (aIsQNaN) { - if (bIsSNaN || !bIsQNaN) - return 0; - else { - return aIsLargerSignificand ? 0 : 1; - } - } else { - return 1; - } -} -#endif - -/*---------------------------------------------------------------------------- -| Select which NaN to propagate for a three-input operation. -| For the moment we assume that no CPU needs the 'larger significand' -| information. -| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN -*----------------------------------------------------------------------------*/ -#if defined(TARGET_ARM) -static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) -{ - /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns - * the default NaN - */ - if (infzero && cIsQNaN) { - float_raise(float_flag_invalid STATUS_VAR); - return 3; - } - - /* This looks different from the ARM ARM pseudocode, because the ARM ARM - * puts the operands to a fused mac operation (a*b)+c in the order c,a,b. - */ - if (cIsSNaN) { - return 2; - } else if (aIsSNaN) { - return 0; - } else if (bIsSNaN) { - return 1; - } else if (cIsQNaN) { - return 2; - } else if (aIsQNaN) { - return 0; - } else { - return 1; - } -} -#elif defined(TARGET_MIPS) -static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) -{ - /* For MIPS, the (inf,zero,qnan) case sets InvalidOp and returns - * the default NaN - */ - if (infzero) { - float_raise(float_flag_invalid STATUS_VAR); - return 3; - } - - /* Prefer sNaN over qNaN, in the a, b, c order. */ - if (aIsSNaN) { - return 0; - } else if (bIsSNaN) { - return 1; - } else if (cIsSNaN) { - return 2; - } else if (aIsQNaN) { - return 0; - } else if (bIsQNaN) { - return 1; - } else { - return 2; - } -} -#elif defined(TARGET_PPC) -static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) -{ - /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer - * to return an input NaN if we have one (ie c) rather than generating - * a default NaN - */ - if (infzero) { - float_raise(float_flag_invalid STATUS_VAR); - return 2; - } - - /* If fRA is a NaN return it; otherwise if fRB is a NaN return it; - * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB - */ - if (aIsSNaN || aIsQNaN) { - return 0; - } else if (cIsSNaN || cIsQNaN) { - return 2; - } else { - return 1; - } -} -#else -/* A default implementation: prefer a to b to c. - * This is unlikely to actually match any real implementation. - */ -static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, - flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) -{ - if (aIsSNaN || aIsQNaN) { - return 0; - } else if (bIsSNaN || bIsQNaN) { - return 1; - } else { - return 2; - } -} -#endif - -/*---------------------------------------------------------------------------- -| Takes two single-precision floating-point values `a' and `b', one of which -| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a -| signaling NaN, the invalid exception is raised. -*----------------------------------------------------------------------------*/ - -static float32 propagateFloat32NaN( float32 a, float32 b STATUS_PARAM) -{ - flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; - flag aIsLargerSignificand; - uint32_t av, bv; - - aIsQuietNaN = float32_is_quiet_nan( a ); - aIsSignalingNaN = float32_is_signaling_nan( a ); - bIsQuietNaN = float32_is_quiet_nan( b ); - bIsSignalingNaN = float32_is_signaling_nan( b ); - av = float32_val(a); - bv = float32_val(b); - - if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); - - if ( STATUS(default_nan_mode) ) - return float32_default_nan; - - if ((uint32_t)(av<<1) < (uint32_t)(bv<<1)) { - aIsLargerSignificand = 0; - } else if ((uint32_t)(bv<<1) < (uint32_t)(av<<1)) { - aIsLargerSignificand = 1; - } else { - aIsLargerSignificand = (av < bv) ? 1 : 0; - } - - if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, - aIsLargerSignificand)) { - return float32_maybe_silence_nan(b); - } else { - return float32_maybe_silence_nan(a); - } -} - -/*---------------------------------------------------------------------------- -| Takes three single-precision floating-point values `a', `b' and `c', one of -| which is a NaN, and returns the appropriate NaN result. If any of `a', -| `b' or `c' is a signaling NaN, the invalid exception is raised. -| The input infzero indicates whether a*b was 0*inf or inf*0 (in which case -| obviously c is a NaN, and whether to propagate c or some other NaN is -| implementation defined). -*----------------------------------------------------------------------------*/ - -static float32 propagateFloat32MulAddNaN(float32 a, float32 b, - float32 c, flag infzero STATUS_PARAM) -{ - flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, - cIsQuietNaN, cIsSignalingNaN; - int which; - - aIsQuietNaN = float32_is_quiet_nan(a); - aIsSignalingNaN = float32_is_signaling_nan(a); - bIsQuietNaN = float32_is_quiet_nan(b); - bIsSignalingNaN = float32_is_signaling_nan(b); - cIsQuietNaN = float32_is_quiet_nan(c); - cIsSignalingNaN = float32_is_signaling_nan(c); - - if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { - float_raise(float_flag_invalid STATUS_VAR); - } - - which = pickNaNMulAdd(aIsQuietNaN, aIsSignalingNaN, - bIsQuietNaN, bIsSignalingNaN, - cIsQuietNaN, cIsSignalingNaN, infzero STATUS_VAR); - - if (STATUS(default_nan_mode)) { - /* Note that this check is after pickNaNMulAdd so that function - * has an opportunity to set the Invalid flag. - */ - return float32_default_nan; - } - - switch (which) { - case 0: - return float32_maybe_silence_nan(a); - case 1: - return float32_maybe_silence_nan(b); - case 2: - return float32_maybe_silence_nan(c); - case 3: - default: - return float32_default_nan; - } -} - -#ifdef NO_SIGNALING_NANS -int float64_is_quiet_nan(float64 a_) -{ - return float64_is_any_nan(a_); -} - -int float64_is_signaling_nan(float64 a_) -{ - return 0; -} -#else -/*---------------------------------------------------------------------------- -| Returns 1 if the double-precision floating-point value `a' is a quiet -| NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float64_is_quiet_nan( float64 a_ ) -{ - uint64_t a = float64_val(a_); -#if SNAN_BIT_IS_ONE - return - ( ( ( a>>51 ) & 0xFFF ) == 0xFFE ) - && ( a & LIT64( 0x0007FFFFFFFFFFFF ) ); -#else - return ( LIT64( 0xFFF0000000000000 ) <= (uint64_t) ( a<<1 ) ); -#endif -} - -/*---------------------------------------------------------------------------- -| Returns 1 if the double-precision floating-point value `a' is a signaling -| NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float64_is_signaling_nan( float64 a_ ) -{ - uint64_t a = float64_val(a_); -#if SNAN_BIT_IS_ONE - return ( LIT64( 0xFFF0000000000000 ) <= (uint64_t) ( a<<1 ) ); -#else - return - ( ( ( a>>51 ) & 0xFFF ) == 0xFFE ) - && ( a & LIT64( 0x0007FFFFFFFFFFFF ) ); -#endif -} -#endif - -/*---------------------------------------------------------------------------- -| Returns a quiet NaN if the double-precision floating point value `a' is a -| signaling NaN; otherwise returns `a'. -*----------------------------------------------------------------------------*/ - -float64 float64_maybe_silence_nan( float64 a_ ) -{ - if (float64_is_signaling_nan(a_)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - return float64_default_nan; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - uint64_t a = float64_val(a_); - a |= LIT64( 0x0008000000000000 ); - return make_float64(a); -#endif - } - return a_; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the double-precision floating-point NaN -| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid -| exception is raised. -*----------------------------------------------------------------------------*/ - -static commonNaNT float64ToCommonNaN( float64 a STATUS_PARAM) -{ - commonNaNT z; - - if ( float64_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR); - z.sign = float64_val(a)>>63; - z.low = 0; - z.high = float64_val(a)<<12; - return z; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the canonical NaN `a' to the double- -| precision floating-point format. -*----------------------------------------------------------------------------*/ - -static float64 commonNaNToFloat64( commonNaNT a STATUS_PARAM) -{ - uint64_t mantissa = a.high>>12; - - if ( STATUS(default_nan_mode) ) { - return float64_default_nan; - } - - if ( mantissa ) - return make_float64( - ( ( (uint64_t) a.sign )<<63 ) - | LIT64( 0x7FF0000000000000 ) - | ( a.high>>12 )); - else - return float64_default_nan; -} - -/*---------------------------------------------------------------------------- -| Takes two double-precision floating-point values `a' and `b', one of which -| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a -| signaling NaN, the invalid exception is raised. -*----------------------------------------------------------------------------*/ - -static float64 propagateFloat64NaN( float64 a, float64 b STATUS_PARAM) -{ - flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; - flag aIsLargerSignificand; - uint64_t av, bv; - - aIsQuietNaN = float64_is_quiet_nan( a ); - aIsSignalingNaN = float64_is_signaling_nan( a ); - bIsQuietNaN = float64_is_quiet_nan( b ); - bIsSignalingNaN = float64_is_signaling_nan( b ); - av = float64_val(a); - bv = float64_val(b); - - if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); - - if ( STATUS(default_nan_mode) ) - return float64_default_nan; - - if ((uint64_t)(av<<1) < (uint64_t)(bv<<1)) { - aIsLargerSignificand = 0; - } else if ((uint64_t)(bv<<1) < (uint64_t)(av<<1)) { - aIsLargerSignificand = 1; - } else { - aIsLargerSignificand = (av < bv) ? 1 : 0; - } - - if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, - aIsLargerSignificand)) { - return float64_maybe_silence_nan(b); - } else { - return float64_maybe_silence_nan(a); - } -} - -/*---------------------------------------------------------------------------- -| Takes three double-precision floating-point values `a', `b' and `c', one of -| which is a NaN, and returns the appropriate NaN result. If any of `a', -| `b' or `c' is a signaling NaN, the invalid exception is raised. -| The input infzero indicates whether a*b was 0*inf or inf*0 (in which case -| obviously c is a NaN, and whether to propagate c or some other NaN is -| implementation defined). -*----------------------------------------------------------------------------*/ - -static float64 propagateFloat64MulAddNaN(float64 a, float64 b, - float64 c, flag infzero STATUS_PARAM) -{ - flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, - cIsQuietNaN, cIsSignalingNaN; - int which; - - aIsQuietNaN = float64_is_quiet_nan(a); - aIsSignalingNaN = float64_is_signaling_nan(a); - bIsQuietNaN = float64_is_quiet_nan(b); - bIsSignalingNaN = float64_is_signaling_nan(b); - cIsQuietNaN = float64_is_quiet_nan(c); - cIsSignalingNaN = float64_is_signaling_nan(c); - - if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { - float_raise(float_flag_invalid STATUS_VAR); - } - - which = pickNaNMulAdd(aIsQuietNaN, aIsSignalingNaN, - bIsQuietNaN, bIsSignalingNaN, - cIsQuietNaN, cIsSignalingNaN, infzero STATUS_VAR); - - if (STATUS(default_nan_mode)) { - /* Note that this check is after pickNaNMulAdd so that function - * has an opportunity to set the Invalid flag. - */ - return float64_default_nan; - } - - switch (which) { - case 0: - return float64_maybe_silence_nan(a); - case 1: - return float64_maybe_silence_nan(b); - case 2: - return float64_maybe_silence_nan(c); - case 3: - default: - return float64_default_nan; - } -} - -#ifdef NO_SIGNALING_NANS -int floatx80_is_quiet_nan(floatx80 a_) -{ - return floatx80_is_any_nan(a_); -} - -int floatx80_is_signaling_nan(floatx80 a_) -{ - return 0; -} -#else -/*---------------------------------------------------------------------------- -| Returns 1 if the extended double-precision floating-point value `a' is a -| quiet NaN; otherwise returns 0. This slightly differs from the same -| function for other types as floatx80 has an explicit bit. -*----------------------------------------------------------------------------*/ - -int floatx80_is_quiet_nan( floatx80 a ) -{ -#if SNAN_BIT_IS_ONE - uint64_t aLow; - - aLow = a.low & ~ LIT64( 0x4000000000000000 ); - return - ( ( a.high & 0x7FFF ) == 0x7FFF ) - && (uint64_t) ( aLow<<1 ) - && ( a.low == aLow ); -#else - return ( ( a.high & 0x7FFF ) == 0x7FFF ) - && (LIT64( 0x8000000000000000 ) <= ((uint64_t) ( a.low<<1 ))); -#endif -} - -/*---------------------------------------------------------------------------- -| Returns 1 if the extended double-precision floating-point value `a' is a -| signaling NaN; otherwise returns 0. This slightly differs from the same -| function for other types as floatx80 has an explicit bit. -*----------------------------------------------------------------------------*/ - -int floatx80_is_signaling_nan( floatx80 a ) -{ -#if SNAN_BIT_IS_ONE - return ( ( a.high & 0x7FFF ) == 0x7FFF ) - && (LIT64( 0x8000000000000000 ) <= ((uint64_t) ( a.low<<1 ))); -#else - uint64_t aLow; - - aLow = a.low & ~ LIT64( 0x4000000000000000 ); - return - ( ( a.high & 0x7FFF ) == 0x7FFF ) - && (uint64_t) ( aLow<<1 ) - && ( a.low == aLow ); -#endif -} -#endif - -/*---------------------------------------------------------------------------- -| Returns a quiet NaN if the extended double-precision floating point value -| `a' is a signaling NaN; otherwise returns `a'. -*----------------------------------------------------------------------------*/ - -floatx80 floatx80_maybe_silence_nan( floatx80 a ) -{ - if (floatx80_is_signaling_nan(a)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - a.low = floatx80_default_nan_low; - a.high = floatx80_default_nan_high; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - a.low |= LIT64( 0xC000000000000000 ); - return a; -#endif - } - return a; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the extended double-precision floating- -| point NaN `a' to the canonical NaN format. If `a' is a signaling NaN, the -| invalid exception is raised. -*----------------------------------------------------------------------------*/ - -static commonNaNT floatx80ToCommonNaN( floatx80 a STATUS_PARAM) -{ - commonNaNT z; - - if ( floatx80_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR); - if ( a.low >> 63 ) { - z.sign = a.high >> 15; - z.low = 0; - z.high = a.low << 1; - } else { - z.sign = floatx80_default_nan_high >> 15; - z.low = 0; - z.high = floatx80_default_nan_low << 1; - } - return z; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the canonical NaN `a' to the extended -| double-precision floating-point format. -*----------------------------------------------------------------------------*/ - -static floatx80 commonNaNToFloatx80( commonNaNT a STATUS_PARAM) -{ - floatx80 z; - - if ( STATUS(default_nan_mode) ) { - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; - } - - if (a.high >> 1) { - z.low = LIT64( 0x8000000000000000 ) | a.high >> 1; - z.high = ( ( (uint16_t) a.sign )<<15 ) | 0x7FFF; - } else { - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - } - - return z; -} - -/*---------------------------------------------------------------------------- -| Takes two extended double-precision floating-point values `a' and `b', one -| of which is a NaN, and returns the appropriate NaN result. If either `a' or -| `b' is a signaling NaN, the invalid exception is raised. -*----------------------------------------------------------------------------*/ - -static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b STATUS_PARAM) -{ - flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; - flag aIsLargerSignificand; - - aIsQuietNaN = floatx80_is_quiet_nan( a ); - aIsSignalingNaN = floatx80_is_signaling_nan( a ); - bIsQuietNaN = floatx80_is_quiet_nan( b ); - bIsSignalingNaN = floatx80_is_signaling_nan( b ); - - if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); - - if ( STATUS(default_nan_mode) ) { - a.low = floatx80_default_nan_low; - a.high = floatx80_default_nan_high; - return a; - } - - if (a.low < b.low) { - aIsLargerSignificand = 0; - } else if (b.low < a.low) { - aIsLargerSignificand = 1; - } else { - aIsLargerSignificand = (a.high < b.high) ? 1 : 0; - } - - if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, - aIsLargerSignificand)) { - return floatx80_maybe_silence_nan(b); - } else { - return floatx80_maybe_silence_nan(a); - } -} - -#ifdef NO_SIGNALING_NANS -int float128_is_quiet_nan(float128 a_) -{ - return float128_is_any_nan(a_); -} - -int float128_is_signaling_nan(float128 a_) -{ - return 0; -} -#else -/*---------------------------------------------------------------------------- -| Returns 1 if the quadruple-precision floating-point value `a' is a quiet -| NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float128_is_quiet_nan( float128 a ) -{ -#if SNAN_BIT_IS_ONE - return - ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE ) - && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) ); -#else - return - ( LIT64( 0xFFFE000000000000 ) <= (uint64_t) ( a.high<<1 ) ) - && ( a.low || ( a.high & LIT64( 0x0000FFFFFFFFFFFF ) ) ); -#endif -} - -/*---------------------------------------------------------------------------- -| Returns 1 if the quadruple-precision floating-point value `a' is a -| signaling NaN; otherwise returns 0. -*----------------------------------------------------------------------------*/ - -int float128_is_signaling_nan( float128 a ) -{ -#if SNAN_BIT_IS_ONE - return - ( LIT64( 0xFFFE000000000000 ) <= (uint64_t) ( a.high<<1 ) ) - && ( a.low || ( a.high & LIT64( 0x0000FFFFFFFFFFFF ) ) ); -#else - return - ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE ) - && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) ); -#endif -} -#endif - -/*---------------------------------------------------------------------------- -| Returns a quiet NaN if the quadruple-precision floating point value `a' is -| a signaling NaN; otherwise returns `a'. -*----------------------------------------------------------------------------*/ - -float128 float128_maybe_silence_nan( float128 a ) -{ - if (float128_is_signaling_nan(a)) { -#if SNAN_BIT_IS_ONE -# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) - a.low = float128_default_nan_low; - a.high = float128_default_nan_high; -# else -# error Rules for silencing a signaling NaN are target-specific -# endif -#else - a.high |= LIT64( 0x0000800000000000 ); - return a; -#endif - } - return a; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the quadruple-precision floating-point NaN -| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid -| exception is raised. -*----------------------------------------------------------------------------*/ - -static commonNaNT float128ToCommonNaN( float128 a STATUS_PARAM) -{ - commonNaNT z; - - if ( float128_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR); - z.sign = a.high>>63; - shortShift128Left( a.high, a.low, 16, &z.high, &z.low ); - return z; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the canonical NaN `a' to the quadruple- -| precision floating-point format. -*----------------------------------------------------------------------------*/ - -static float128 commonNaNToFloat128( commonNaNT a STATUS_PARAM) -{ - float128 z; - - if ( STATUS(default_nan_mode) ) { - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; - } - - shift128Right( a.high, a.low, 16, &z.high, &z.low ); - z.high |= ( ( (uint64_t) a.sign )<<63 ) | LIT64( 0x7FFF000000000000 ); - return z; -} - -/*---------------------------------------------------------------------------- -| Takes two quadruple-precision floating-point values `a' and `b', one of -| which is a NaN, and returns the appropriate NaN result. If either `a' or -| `b' is a signaling NaN, the invalid exception is raised. -*----------------------------------------------------------------------------*/ - -static float128 propagateFloat128NaN( float128 a, float128 b STATUS_PARAM) -{ - flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; - flag aIsLargerSignificand; - - aIsQuietNaN = float128_is_quiet_nan( a ); - aIsSignalingNaN = float128_is_signaling_nan( a ); - bIsQuietNaN = float128_is_quiet_nan( b ); - bIsSignalingNaN = float128_is_signaling_nan( b ); - - if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); - - if ( STATUS(default_nan_mode) ) { - a.low = float128_default_nan_low; - a.high = float128_default_nan_high; - return a; - } - - if (lt128(a.high<<1, a.low, b.high<<1, b.low)) { - aIsLargerSignificand = 0; - } else if (lt128(b.high<<1, b.low, a.high<<1, a.low)) { - aIsLargerSignificand = 1; - } else { - aIsLargerSignificand = (a.high < b.high) ? 1 : 0; - } - - if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, - aIsLargerSignificand)) { - return float128_maybe_silence_nan(b); - } else { - return float128_maybe_silence_nan(a); - } -} - diff --git a/qemu/fpu/softfloat-specialize.inc.c b/qemu/fpu/softfloat-specialize.inc.c new file mode 100644 index 00000000..5ab2fa19 --- /dev/null +++ b/qemu/fpu/softfloat-specialize.inc.c @@ -0,0 +1,1083 @@ +/* + * QEMU float support + * + * The code in this source file is derived from release 2a of the SoftFloat + * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and + * some later contributions) are provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file after December 1st 2014 will be + * taken to be licensed under the Softfloat-2a license unless specifically + * indicated otherwise. + */ + +/* +=============================================================================== +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2a. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +/* BSD licensing: + * Copyright (c) 2006, Fabrice Bellard + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + +/* Define for architectures which deviate from IEEE in not supporting + * signaling NaNs (so all NaNs are treated as quiet). + */ +#if defined(TARGET_XTENSA) +#define NO_SIGNALING_NANS 1 +#endif + +/* Define how the architecture discriminates signaling NaNs. + * This done with the most significant bit of the fraction. + * In IEEE 754-1985 this was implementation defined, but in IEEE 754-2008 + * the msb must be zero. MIPS is (so far) unique in supporting both the + * 2008 revision and backward compatibility with their original choice. + * Thus for MIPS we must make the choice at runtime. + */ +static inline flag snan_bit_is_one(float_status *status) +{ +#if defined(TARGET_MIPS) + return status->snan_bit_is_one; +#elif defined(TARGET_HPPA) || defined(TARGET_UNICORE32) || defined(TARGET_SH4) + return 1; +#else + return 0; +#endif +} + +/*---------------------------------------------------------------------------- +| For the deconstructed floating-point with fraction FRAC, return true +| if the fraction represents a signalling NaN; otherwise false. +*----------------------------------------------------------------------------*/ + +static bool parts_is_snan_frac(uint64_t frac, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return false; +#else + flag msb = extract64(frac, DECOMPOSED_BINARY_POINT - 1, 1); + return msb == snan_bit_is_one(status); +#endif +} + +/*---------------------------------------------------------------------------- +| The pattern for a default generated deconstructed floating-point NaN. +*----------------------------------------------------------------------------*/ + +static FloatParts parts_default_nan(float_status *status) +{ + bool sign = 0; + uint64_t frac; + +#if defined(TARGET_SPARC) || defined(TARGET_M68K) + /* !snan_bit_is_one, set all bits */ + frac = (1ULL << DECOMPOSED_BINARY_POINT) - 1; +#elif defined(TARGET_I386) || defined(TARGET_X86_64) \ + || defined(TARGET_MICROBLAZE) + /* !snan_bit_is_one, set sign and msb */ + frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1); + sign = 1; +#elif defined(TARGET_HPPA) + /* snan_bit_is_one, set msb-1. */ + frac = 1ULL << (DECOMPOSED_BINARY_POINT - 2); +#else + /* This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V, + * S390, SH4, TriCore, and Xtensa. I cannot find documentation + * for Unicore32; the choice from the original commit is unchanged. + * Our other supported targets, CRIS, LM32, Moxie, Nios2, and Tile, + * do not have floating-point. + */ + if (snan_bit_is_one(status)) { + /* set all bits other than msb */ + frac = (1ULL << (DECOMPOSED_BINARY_POINT - 1)) - 1; + } else { + /* set msb */ + frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1); + } +#endif + + return (FloatParts) { + .cls = float_class_qnan, + .sign = sign, + .exp = INT_MAX, + .frac = frac + }; +} + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN from a signalling NaN for the deconstructed +| floating-point parts. +*----------------------------------------------------------------------------*/ + +static FloatParts parts_silence_nan(FloatParts a, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + g_assert_not_reached(); +#elif defined(TARGET_HPPA) + a.frac &= ~(1ULL << (DECOMPOSED_BINARY_POINT - 1)); + a.frac |= 1ULL << (DECOMPOSED_BINARY_POINT - 2); +#else + if (snan_bit_is_one(status)) { + return parts_default_nan(status); + } else { + a.frac |= 1ULL << (DECOMPOSED_BINARY_POINT - 1); + } +#endif + a.cls = float_class_qnan; + return a; +} + +/*---------------------------------------------------------------------------- +| The pattern for a default generated extended double-precision NaN. +*----------------------------------------------------------------------------*/ +floatx80 floatx80_default_nan(float_status *status) +{ + floatx80 r; + + /* None of the targets that have snan_bit_is_one use floatx80. */ + assert(!snan_bit_is_one(status)); +#if defined(TARGET_M68K) + r.low = UINT64_C(0xFFFFFFFFFFFFFFFF); + r.high = 0x7FFF; +#else + /* X86 */ + r.low = UINT64_C(0xC000000000000000); + r.high = 0xFFFF; +#endif + return r; +} + +/*---------------------------------------------------------------------------- +| The pattern for a default generated extended double-precision inf. +*----------------------------------------------------------------------------*/ + +#define floatx80_infinity_high 0x7FFF +#if defined(TARGET_M68K) +#define floatx80_infinity_low UINT64_C(0x0000000000000000) +#else +#define floatx80_infinity_low UINT64_C(0x8000000000000000) +#endif + +const floatx80 floatx80_infinity + = make_floatx80_init(floatx80_infinity_high, floatx80_infinity_low); + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `float_exception_flags |= flags;'. +*----------------------------------------------------------------------------*/ + +void float_raise(uint8_t flags, float_status *status) +{ + status->float_exception_flags |= flags; +} + +/*---------------------------------------------------------------------------- +| Internal canonical NaN format. +*----------------------------------------------------------------------------*/ +typedef struct { + flag sign; + uint64_t high, low; +} commonNaNT; + +/*---------------------------------------------------------------------------- +| Returns 1 if the half-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float16_is_quiet_nan(float16 a_, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return float16_is_any_nan(a_); +#else + uint16_t a = float16_val(a_); + if (snan_bit_is_one(status)) { + return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); + } else { + return ((a & ~0x8000) >= 0x7C80); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the half-precision floating-point value `a' is a signaling +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float16_is_signaling_nan(float16 a_, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return 0; +#else + uint16_t a = float16_val(a_); + if (snan_bit_is_one(status)) { + return ((a & ~0x8000) >= 0x7C80); + } else { + return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float32_is_quiet_nan(float32 a_, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return float32_is_any_nan(a_); +#else + uint32_t a = float32_val(a_); + if (snan_bit_is_one(status)) { + return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF); + } else { + return ((uint32_t)(a << 1) >= 0xFF800000); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is a signaling +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float32_is_signaling_nan(float32 a_, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return 0; +#else + uint32_t a = float32_val(a_); + if (snan_bit_is_one(status)) { + return ((uint32_t)(a << 1) >= 0xFF800000); + } else { + return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point NaN +| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT float32ToCommonNaN(float32 a, float_status *status) +{ + commonNaNT z; + + if (float32_is_signaling_nan(a, status)) { + float_raise(float_flag_invalid, status); + } + z.sign = float32_val(a) >> 31; + z.low = 0; + z.high = ((uint64_t)float32_val(a)) << 41; + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the single- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +static float32 commonNaNToFloat32(commonNaNT a, float_status *status) +{ + uint32_t mantissa = a.high >> 41; + + if (status->default_nan_mode) { + return float32_default_nan(status); + } + + if (mantissa) { + return make_float32( + (((uint32_t)a.sign) << 31) | 0x7F800000 | (a.high >> 41)); + } else { + return float32_default_nan(status); + } +} + +/*---------------------------------------------------------------------------- +| Select which NaN to propagate for a two-input operation. +| IEEE754 doesn't specify all the details of this, so the +| algorithm is target-specific. +| The routine is passed various bits of information about the +| two NaNs and should return 0 to select NaN a and 1 for NaN b. +| Note that signalling NaNs are always squashed to quiet NaNs +| by the caller, by calling floatXX_silence_nan() before +| returning them. +| +| aIsLargerSignificand is only valid if both a and b are NaNs +| of some kind, and is true if a has the larger significand, +| or if both a and b have the same significand but a is +| positive but b is negative. It is only needed for the x87 +| tie-break rule. +*----------------------------------------------------------------------------*/ + +static int pickNaN(FloatClass a_cls, FloatClass b_cls, + flag aIsLargerSignificand) +{ +#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) + /* ARM mandated NaN propagation rules (see FPProcessNaNs()), take + * the first of: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always quietened before returning it. + */ + /* According to MIPS specifications, if one of the two operands is + * a sNaN, a new qNaN has to be generated. This is done in + * floatXX_silence_nan(). For qNaN inputs the specifications + * says: "When possible, this QNaN result is one of the operand QNaN + * values." In practice it seems that most implementations choose + * the first operand if both operands are qNaN. In short this gives + * the following rules: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always silenced before returning it. + */ + if (is_snan(a_cls)) { + return 0; + } else if (is_snan(b_cls)) { + return 1; + } else if (is_qnan(a_cls)) { + return 0; + } else { + return 1; + } +#elif defined(TARGET_PPC) || defined(TARGET_XTENSA) || defined(TARGET_M68K) + /* PowerPC propagation rules: + * 1. A if it sNaN or qNaN + * 2. B if it sNaN or qNaN + * A signaling NaN is always silenced before returning it. + */ + /* M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL + * 3.4 FLOATING-POINT INSTRUCTION DETAILS + * If either operand, but not both operands, of an operation is a + * nonsignaling NaN, then that NaN is returned as the result. If both + * operands are nonsignaling NaNs, then the destination operand + * nonsignaling NaN is returned as the result. + * If either operand to an operation is a signaling NaN (SNaN), then the + * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit + * is set in the FPCR ENABLE byte, then the exception is taken and the + * destination is not modified. If the SNaN exception enable bit is not + * set, setting the SNaN bit in the operand to a one converts the SNaN to + * a nonsignaling NaN. The operation then continues as described in the + * preceding paragraph for nonsignaling NaNs. + */ + if (is_nan(a_cls)) { + return 0; + } else { + return 1; + } +#else + /* This implements x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ + if (is_snan(a_cls)) { + if (is_snan(b_cls)) { + return aIsLargerSignificand ? 0 : 1; + } + return is_qnan(b_cls) ? 1 : 0; + } else if (is_qnan(a_cls)) { + if (is_snan(b_cls) || !is_qnan(b_cls)) { + return 0; + } else { + return aIsLargerSignificand ? 0 : 1; + } + } else { + return 1; + } +#endif +} + +/*---------------------------------------------------------------------------- +| Select which NaN to propagate for a three-input operation. +| For the moment we assume that no CPU needs the 'larger significand' +| information. +| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN +*----------------------------------------------------------------------------*/ +static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, + bool infzero, float_status *status) +{ +#if defined(TARGET_ARM) + /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns + * the default NaN + */ + if (infzero && is_qnan(c_cls)) { + float_raise(float_flag_invalid, status); + return 3; + } + + /* This looks different from the ARM ARM pseudocode, because the ARM ARM + * puts the operands to a fused mac operation (a*b)+c in the order c,a,b. + */ + if (is_snan(c_cls)) { + return 2; + } else if (is_snan(a_cls)) { + return 0; + } else if (is_snan(b_cls)) { + return 1; + } else if (is_qnan(c_cls)) { + return 2; + } else if (is_qnan(a_cls)) { + return 0; + } else { + return 1; + } +#elif defined(TARGET_MIPS) + if (snan_bit_is_one(status)) { + /* + * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan) + * case sets InvalidOp and returns the default NaN + */ + if (infzero) { + float_raise(float_flag_invalid, status); + return 3; + } + /* Prefer sNaN over qNaN, in the a, b, c order. */ + if (is_snan(a_cls)) { + return 0; + } else if (is_snan(b_cls)) { + return 1; + } else if (is_snan(c_cls)) { + return 2; + } else if (is_qnan(a_cls)) { + return 0; + } else if (is_qnan(b_cls)) { + return 1; + } else { + return 2; + } + } else { + /* + * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan) + * case sets InvalidOp and returns the input value 'c' + */ + if (infzero) { + float_raise(float_flag_invalid, status); + return 2; + } + /* Prefer sNaN over qNaN, in the c, a, b order. */ + if (is_snan(c_cls)) { + return 2; + } else if (is_snan(a_cls)) { + return 0; + } else if (is_snan(b_cls)) { + return 1; + } else if (is_qnan(c_cls)) { + return 2; + } else if (is_qnan(a_cls)) { + return 0; + } else { + return 1; + } + } +#elif defined(TARGET_PPC) + /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer + * to return an input NaN if we have one (ie c) rather than generating + * a default NaN + */ + if (infzero) { + float_raise(float_flag_invalid, status); + return 2; + } + + /* If fRA is a NaN return it; otherwise if fRB is a NaN return it; + * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB + */ + if (is_nan(a_cls)) { + return 0; + } else if (is_nan(c_cls)) { + return 2; + } else { + return 1; + } +#else + /* A default implementation: prefer a to b to c. + * This is unlikely to actually match any real implementation. + */ + if (is_nan(a_cls)) { + return 0; + } else if (is_nan(b_cls)) { + return 1; + } else { + return 2; + } +#endif +} + +/*---------------------------------------------------------------------------- +| Takes two single-precision floating-point values `a' and `b', one of which +| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status) +{ + flag aIsLargerSignificand; + uint32_t av, bv; + FloatClass a_cls, b_cls; + + /* This is not complete, but is good enough for pickNaN. */ + a_cls = (!float32_is_any_nan(a) + ? float_class_normal + : float32_is_signaling_nan(a, status) + ? float_class_snan + : float_class_qnan); + b_cls = (!float32_is_any_nan(b) + ? float_class_normal + : float32_is_signaling_nan(b, status) + ? float_class_snan + : float_class_qnan); + + av = float32_val(a); + bv = float32_val(b); + + if (is_snan(a_cls) || is_snan(b_cls)) { + float_raise(float_flag_invalid, status); + } + + if (status->default_nan_mode) { + return float32_default_nan(status); + } + + if ((uint32_t)(av << 1) < (uint32_t)(bv << 1)) { + aIsLargerSignificand = 0; + } else if ((uint32_t)(bv << 1) < (uint32_t)(av << 1)) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (av < bv) ? 1 : 0; + } + + if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { + if (is_snan(b_cls)) { + return float32_silence_nan(b, status); + } + return b; + } else { + if (is_snan(a_cls)) { + return float32_silence_nan(a, status); + } + return a; + } +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float64_is_quiet_nan(float64 a_, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return float64_is_any_nan(a_); +#else + uint64_t a = float64_val(a_); + if (snan_bit_is_one(status)) { + return (((a >> 51) & 0xFFF) == 0xFFE) + && (a & 0x0007FFFFFFFFFFFFULL); + } else { + return ((a << 1) >= 0xFFF0000000000000ULL); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is a signaling +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float64_is_signaling_nan(float64 a_, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return 0; +#else + uint64_t a = float64_val(a_); + if (snan_bit_is_one(status)) { + return ((a << 1) >= 0xFFF0000000000000ULL); + } else { + return (((a >> 51) & 0xFFF) == 0xFFE) + && (a & UINT64_C(0x0007FFFFFFFFFFFF)); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point NaN +| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT float64ToCommonNaN(float64 a, float_status *status) +{ + commonNaNT z; + + if (float64_is_signaling_nan(a, status)) { + float_raise(float_flag_invalid, status); + } + z.sign = float64_val(a) >> 63; + z.low = 0; + z.high = float64_val(a) << 12; + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the double- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +static float64 commonNaNToFloat64(commonNaNT a, float_status *status) +{ + uint64_t mantissa = a.high >> 12; + + if (status->default_nan_mode) { + return float64_default_nan(status); + } + + if (mantissa) { + return make_float64( + (((uint64_t) a.sign) << 63) + | UINT64_C(0x7FF0000000000000) + | (a.high >> 12)); + } else { + return float64_default_nan(status); + } +} + +/*---------------------------------------------------------------------------- +| Takes two double-precision floating-point values `a' and `b', one of which +| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status) +{ + flag aIsLargerSignificand; + uint64_t av, bv; + FloatClass a_cls, b_cls; + + /* This is not complete, but is good enough for pickNaN. */ + a_cls = (!float64_is_any_nan(a) + ? float_class_normal + : float64_is_signaling_nan(a, status) + ? float_class_snan + : float_class_qnan); + b_cls = (!float64_is_any_nan(b) + ? float_class_normal + : float64_is_signaling_nan(b, status) + ? float_class_snan + : float_class_qnan); + + av = float64_val(a); + bv = float64_val(b); + + if (is_snan(a_cls) || is_snan(b_cls)) { + float_raise(float_flag_invalid, status); + } + + if (status->default_nan_mode) { + return float64_default_nan(status); + } + + if ((uint64_t)(av << 1) < (uint64_t)(bv << 1)) { + aIsLargerSignificand = 0; + } else if ((uint64_t)(bv << 1) < (uint64_t)(av << 1)) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (av < bv) ? 1 : 0; + } + + if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { + if (is_snan(b_cls)) { + return float64_silence_nan(b, status); + } + return b; + } else { + if (is_snan(a_cls)) { + return float64_silence_nan(a, status); + } + return a; + } +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is a +| quiet NaN; otherwise returns 0. This slightly differs from the same +| function for other types as floatx80 has an explicit bit. +*----------------------------------------------------------------------------*/ + +int floatx80_is_quiet_nan(floatx80 a, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return floatx80_is_any_nan(a); +#else + if (snan_bit_is_one(status)) { + uint64_t aLow; + + aLow = a.low & ~0x4000000000000000ULL; + return ((a.high & 0x7FFF) == 0x7FFF) + && (aLow << 1) + && (a.low == aLow); + } else { + return ((a.high & 0x7FFF) == 0x7FFF) + && (UINT64_C(0x8000000000000000) <= ((uint64_t)(a.low << 1))); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is a +| signaling NaN; otherwise returns 0. This slightly differs from the same +| function for other types as floatx80 has an explicit bit. +*----------------------------------------------------------------------------*/ + +int floatx80_is_signaling_nan(floatx80 a, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return 0; +#else + if (snan_bit_is_one(status)) { + return ((a.high & 0x7FFF) == 0x7FFF) + && ((a.low << 1) >= 0x8000000000000000ULL); + } else { + uint64_t aLow; + + aLow = a.low & ~UINT64_C(0x4000000000000000); + return ((a.high & 0x7FFF) == 0x7FFF) + && (uint64_t)(aLow << 1) + && (a.low == aLow); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN from a signalling NaN for the extended double-precision +| floating point value `a'. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_silence_nan(floatx80 a, float_status *status) +{ + /* None of the targets that have snan_bit_is_one use floatx80. */ + assert(!snan_bit_is_one(status)); + a.low |= UINT64_C(0xC000000000000000); + return a; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point NaN `a' to the canonical NaN format. If `a' is a signaling NaN, the +| invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT floatx80ToCommonNaN(floatx80 a, float_status *status) +{ + floatx80 dflt; + commonNaNT z; + + if (floatx80_is_signaling_nan(a, status)) { + float_raise(float_flag_invalid, status); + } + if (a.low >> 63) { + z.sign = a.high >> 15; + z.low = 0; + z.high = a.low << 1; + } else { + dflt = floatx80_default_nan(status); + z.sign = dflt.high >> 15; + z.low = 0; + z.high = dflt.low << 1; + } + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the extended +| double-precision floating-point format. +*----------------------------------------------------------------------------*/ + +static floatx80 commonNaNToFloatx80(commonNaNT a, float_status *status) +{ + floatx80 z; + + if (status->default_nan_mode) { + return floatx80_default_nan(status); + } + + if (a.high >> 1) { + z.low = UINT64_C(0x8000000000000000) | a.high >> 1; + z.high = (((uint16_t)a.sign) << 15) | 0x7FFF; + } else { + z = floatx80_default_nan(status); + } + return z; +} + +/*---------------------------------------------------------------------------- +| Takes two extended double-precision floating-point values `a' and `b', one +| of which is a NaN, and returns the appropriate NaN result. If either `a' or +| `b' is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status) +{ + flag aIsLargerSignificand; + FloatClass a_cls, b_cls; + + /* This is not complete, but is good enough for pickNaN. */ + a_cls = (!floatx80_is_any_nan(a) + ? float_class_normal + : floatx80_is_signaling_nan(a, status) + ? float_class_snan + : float_class_qnan); + b_cls = (!floatx80_is_any_nan(b) + ? float_class_normal + : floatx80_is_signaling_nan(b, status) + ? float_class_snan + : float_class_qnan); + + if (is_snan(a_cls) || is_snan(b_cls)) { + float_raise(float_flag_invalid, status); + } + + if (status->default_nan_mode) { + return floatx80_default_nan(status); + } + + if (a.low < b.low) { + aIsLargerSignificand = 0; + } else if (b.low < a.low) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (a.high < b.high) ? 1 : 0; + } + + if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { + if (is_snan(b_cls)) { + return floatx80_silence_nan(b, status); + } + return b; + } else { + if (is_snan(a_cls)) { + return floatx80_silence_nan(a, status); + } + return a; + } +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float128_is_quiet_nan(float128 a, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return float128_is_any_nan(a); +#else + if (snan_bit_is_one(status)) { + return (((a.high >> 47) & 0xFFFF) == 0xFFFE) + && (a.low || (a.high & 0x00007FFFFFFFFFFFULL)); + } else { + return ((a.high << 1) >= 0xFFFF000000000000ULL) + && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL)); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is a +| signaling NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float128_is_signaling_nan(float128 a, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + return 0; +#else + if (snan_bit_is_one(status)) { + return ((a.high << 1) >= 0xFFFF000000000000ULL) + && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL)); + } else { + return (((a.high >> 47) & 0xFFFF) == 0xFFFE) + && (a.low || (a.high & UINT64_C(0x00007FFFFFFFFFFF))); + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN from a signalling NaN for the quadruple-precision +| floating point value `a'. +*----------------------------------------------------------------------------*/ + +float128 float128_silence_nan(float128 a, float_status *status) +{ +#ifdef NO_SIGNALING_NANS + g_assert_not_reached(); +#else + if (snan_bit_is_one(status)) { + return float128_default_nan(status); + } else { + a.high |= UINT64_C(0x0000800000000000); + return a; + } +#endif +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point NaN +| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT float128ToCommonNaN(float128 a, float_status *status) +{ + commonNaNT z; + + if (float128_is_signaling_nan(a, status)) { + float_raise(float_flag_invalid, status); + } + z.sign = a.high >> 63; + shortShift128Left(a.high, a.low, 16, &z.high, &z.low); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the quadruple- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +static float128 commonNaNToFloat128(commonNaNT a, float_status *status) +{ + float128 z; + + if (status->default_nan_mode) { + return float128_default_nan(status); + } + + shift128Right(a.high, a.low, 16, &z.high, &z.low); + z.high |= (((uint64_t)a.sign) << 63) | UINT64_C(0x7FFF000000000000); + return z; +} + +/*---------------------------------------------------------------------------- +| Takes two quadruple-precision floating-point values `a' and `b', one of +| which is a NaN, and returns the appropriate NaN result. If either `a' or +| `b' is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static float128 propagateFloat128NaN(float128 a, float128 b, + float_status *status) +{ + flag aIsLargerSignificand; + FloatClass a_cls, b_cls; + + /* This is not complete, but is good enough for pickNaN. */ + a_cls = (!float128_is_any_nan(a) + ? float_class_normal + : float128_is_signaling_nan(a, status) + ? float_class_snan + : float_class_qnan); + b_cls = (!float128_is_any_nan(b) + ? float_class_normal + : float128_is_signaling_nan(b, status) + ? float_class_snan + : float_class_qnan); + + if (is_snan(a_cls) || is_snan(b_cls)) { + float_raise(float_flag_invalid, status); + } + + if (status->default_nan_mode) { + return float128_default_nan(status); + } + + if (lt128(a.high << 1, a.low, b.high << 1, b.low)) { + aIsLargerSignificand = 0; + } else if (lt128(b.high << 1, b.low, a.high << 1, a.low)) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (a.high < b.high) ? 1 : 0; + } + + if (pickNaN(a_cls, b_cls, aIsLargerSignificand)) { + if (is_snan(b_cls)) { + return float128_silence_nan(b, status); + } + return b; + } else { + if (is_snan(a_cls)) { + return float128_silence_nan(a, status); + } + return a; + } +} diff --git a/qemu/fpu/softfloat.c b/qemu/fpu/softfloat.c index d1031ba8..0e7938dc 100644 --- a/qemu/fpu/softfloat.c +++ b/qemu/fpu/softfloat.c @@ -1,13 +1,24 @@ /* * QEMU float support * - * Derived from SoftFloat. + * The code in this source file is derived from release 2a of the SoftFloat + * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and + * some later contributions) are provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file after December 1st 2014 will be + * taken to be licensed under the Softfloat-2a license unless specifically + * indicated otherwise. */ -/*============================================================================ - -This C source file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic -Package, Release 2b. +/* +=============================================================================== +This C source file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center @@ -16,41 +27,632 @@ National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information -is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. -THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has -been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES -RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS -AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, -COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE -EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE -INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR -OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as -(1) the source code for the derivative work includes prominent notice that -the work is derivative, and (2) the source code includes prominent notice with -these four paragraphs for those parts of this code that are retained. +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. -=============================================================================*/ +=============================================================================== +*/ + +/* BSD licensing: + * Copyright (c) 2006, Fabrice Bellard + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ /* softfloat (and in particular the code in softfloat-specialize.h) is * target-dependent and needs the TARGET_* macros. */ -#include "config.h" - +#include "qemu/osdep.h" +#include +#include "qemu/bitops.h" #include "fpu/softfloat.h" /* We only need stdlib for abort() */ -#include /*---------------------------------------------------------------------------- | Primitive arithmetic functions, including multi-word arithmetic, and | division and square root approximations. (Can be specialized to target if | desired.) *----------------------------------------------------------------------------*/ -#include "softfloat-macros.h" +#include "fpu/softfloat-macros.h" + +/* + * Hardfloat + * + * Fast emulation of guest FP instructions is challenging for two reasons. + * First, FP instruction semantics are similar but not identical, particularly + * when handling NaNs. Second, emulating at reasonable speed the guest FP + * exception flags is not trivial: reading the host's flags register with a + * feclearexcept & fetestexcept pair is slow [slightly slower than soft-fp], + * and trapping on every FP exception is not fast nor pleasant to work with. + * + * We address these challenges by leveraging the host FPU for a subset of the + * operations. To do this we expand on the idea presented in this paper: + * + * Guo, Yu-Chuan, et al. "Translating the ARM Neon and VFP instructions in a + * binary translator." Software: Practice and Experience 46.12 (2016):1591-1615. + * + * The idea is thus to leverage the host FPU to (1) compute FP operations + * and (2) identify whether FP exceptions occurred while avoiding + * expensive exception flag register accesses. + * + * An important optimization shown in the paper is that given that exception + * flags are rarely cleared by the guest, we can avoid recomputing some flags. + * This is particularly useful for the inexact flag, which is very frequently + * raised in floating-point workloads. + * + * We optimize the code further by deferring to soft-fp whenever FP exception + * detection might get hairy. Two examples: (1) when at least one operand is + * denormal/inf/NaN; (2) when operands are not guaranteed to lead to a 0 result + * and the result is < the minimum normal. + */ +#define GEN_INPUT_FLUSH__NOCHECK(name, soft_t) \ + static inline void name(soft_t *a, float_status *s) \ + { \ + if (unlikely(soft_t ## _is_denormal(*a))) { \ + *a = soft_t ## _set_sign(soft_t ## _zero, \ + soft_t ## _is_neg(*a)); \ + s->float_exception_flags |= float_flag_input_denormal; \ + } \ + } + +GEN_INPUT_FLUSH__NOCHECK(float32_input_flush__nocheck, float32) +GEN_INPUT_FLUSH__NOCHECK(float64_input_flush__nocheck, float64) +#undef GEN_INPUT_FLUSH__NOCHECK + +#define GEN_INPUT_FLUSH1(name, soft_t) \ + static inline void name(soft_t *a, float_status *s) \ + { \ + if (likely(!s->flush_inputs_to_zero)) { \ + return; \ + } \ + soft_t ## _input_flush__nocheck(a, s); \ + } + +GEN_INPUT_FLUSH1(float32_input_flush1, float32) +GEN_INPUT_FLUSH1(float64_input_flush1, float64) +#undef GEN_INPUT_FLUSH1 + +#define GEN_INPUT_FLUSH2(name, soft_t) \ + static inline void name(soft_t *a, soft_t *b, float_status *s) \ + { \ + if (likely(!s->flush_inputs_to_zero)) { \ + return; \ + } \ + soft_t ## _input_flush__nocheck(a, s); \ + soft_t ## _input_flush__nocheck(b, s); \ + } + +GEN_INPUT_FLUSH2(float32_input_flush2, float32) +GEN_INPUT_FLUSH2(float64_input_flush2, float64) +#undef GEN_INPUT_FLUSH2 + +#define GEN_INPUT_FLUSH3(name, soft_t) \ + static inline void name(soft_t *a, soft_t *b, soft_t *c, float_status *s) \ + { \ + if (likely(!s->flush_inputs_to_zero)) { \ + return; \ + } \ + soft_t ## _input_flush__nocheck(a, s); \ + soft_t ## _input_flush__nocheck(b, s); \ + soft_t ## _input_flush__nocheck(c, s); \ + } + +GEN_INPUT_FLUSH3(float32_input_flush3, float32) +GEN_INPUT_FLUSH3(float64_input_flush3, float64) +#undef GEN_INPUT_FLUSH3 + +/* + * Choose whether to use fpclassify or float32/64_* primitives in the generated + * hardfloat functions. Each combination of number of inputs and float size + * gets its own value. + */ +#if defined(__x86_64__) +# define QEMU_HARDFLOAT_1F32_USE_FP 0 +# define QEMU_HARDFLOAT_1F64_USE_FP 1 +# define QEMU_HARDFLOAT_2F32_USE_FP 0 +# define QEMU_HARDFLOAT_2F64_USE_FP 1 +# define QEMU_HARDFLOAT_3F32_USE_FP 0 +# define QEMU_HARDFLOAT_3F64_USE_FP 1 +#else +# define QEMU_HARDFLOAT_1F32_USE_FP 0 +# define QEMU_HARDFLOAT_1F64_USE_FP 0 +# define QEMU_HARDFLOAT_2F32_USE_FP 0 +# define QEMU_HARDFLOAT_2F64_USE_FP 0 +# define QEMU_HARDFLOAT_3F32_USE_FP 0 +# define QEMU_HARDFLOAT_3F64_USE_FP 0 +#endif + +/* + * QEMU_HARDFLOAT_USE_ISINF chooses whether to use isinf() over + * float{32,64}_is_infinity when !USE_FP. + * On x86_64/aarch64, using the former over the latter can yield a ~6% speedup. + * On power64 however, using isinf() reduces fp-bench performance by up to 50%. + */ +#if defined(__x86_64__) || defined(__aarch64__) +# define QEMU_HARDFLOAT_USE_ISINF 1 +#else +# define QEMU_HARDFLOAT_USE_ISINF 0 +#endif + +/* + * Some targets clear the FP flags before most FP operations. This prevents + * the use of hardfloat, since hardfloat relies on the inexact flag being + * already set. + */ +#if defined(TARGET_PPC) || defined(__FAST_MATH__) +# if defined(__FAST_MATH__) +# warning disabling hardfloat due to -ffast-math: hardfloat requires an exact \ + IEEE implementation +# endif +# define QEMU_NO_HARDFLOAT 1 +# define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN +#elif !defined(_MSC_VER) +# define QEMU_NO_HARDFLOAT 0 +# define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN __attribute__((noinline)) +#else // MSVC +# define QEMU_NO_HARDFLOAT 0 +# define QEMU_SOFTFLOAT_ATTR +#endif + +static inline bool can_use_fpu(const float_status *s) +{ + if (QEMU_NO_HARDFLOAT) { + return false; + } + return likely(s->float_exception_flags & float_flag_inexact && + s->float_rounding_mode == float_round_nearest_even); +} + +/* + * Hardfloat generation functions. Each operation can have two flavors: + * either using softfloat primitives (e.g. float32_is_zero_or_normal) for + * most condition checks, or native ones (e.g. fpclassify). + * + * The flavor is chosen by the callers. Instead of using macros, we rely on the + * compiler to propagate constants and inline everything into the callers. + * + * We only generate functions for operations with two inputs, since only + * these are common enough to justify consolidating them into common code. + */ + +typedef union { + float32 s; + float h; +} union_float32; + +typedef union { + float64 s; + double h; +} union_float64; + +typedef bool (*f32_check_fn)(union_float32 a, union_float32 b); +typedef bool (*f64_check_fn)(union_float64 a, union_float64 b); + +typedef float32 (*soft_f32_op2_fn)(float32 a, float32 b, float_status *s); +typedef float64 (*soft_f64_op2_fn)(float64 a, float64 b, float_status *s); +typedef float (*hard_f32_op2_fn)(float a, float b); +typedef double (*hard_f64_op2_fn)(double a, double b); + +/* 2-input is-zero-or-normal */ +static inline bool f32_is_zon2(union_float32 a, union_float32 b) +{ + if (QEMU_HARDFLOAT_2F32_USE_FP) { + /* + * Not using a temp variable for consecutive fpclassify calls ends up + * generating faster code. + */ + return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && + (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO); + } + return float32_is_zero_or_normal(a.s) && + float32_is_zero_or_normal(b.s); +} + +static inline bool f64_is_zon2(union_float64 a, union_float64 b) +{ + if (QEMU_HARDFLOAT_2F64_USE_FP) { + return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && + (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO); + } + return float64_is_zero_or_normal(a.s) && + float64_is_zero_or_normal(b.s); +} + +/* 3-input is-zero-or-normal */ +static inline +bool f32_is_zon3(union_float32 a, union_float32 b, union_float32 c) +{ + if (QEMU_HARDFLOAT_3F32_USE_FP) { + return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && + (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO) && + (fpclassify(c.h) == FP_NORMAL || fpclassify(c.h) == FP_ZERO); + } + return float32_is_zero_or_normal(a.s) && + float32_is_zero_or_normal(b.s) && + float32_is_zero_or_normal(c.s); +} + +static inline +bool f64_is_zon3(union_float64 a, union_float64 b, union_float64 c) +{ + if (QEMU_HARDFLOAT_3F64_USE_FP) { + return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && + (fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO) && + (fpclassify(c.h) == FP_NORMAL || fpclassify(c.h) == FP_ZERO); + } + return float64_is_zero_or_normal(a.s) && + float64_is_zero_or_normal(b.s) && + float64_is_zero_or_normal(c.s); +} + +static inline bool f32_is_inf(union_float32 a) +{ + if (QEMU_HARDFLOAT_USE_ISINF) { + return isinf(a.h); + } + return float32_is_infinity(a.s); +} + +static inline bool f64_is_inf(union_float64 a) +{ + if (QEMU_HARDFLOAT_USE_ISINF) { + return isinf(a.h); + } + return float64_is_infinity(a.s); +} + +/* Note: @fast_test and @post can be NULL */ +static inline float32 +float32_gen2(float32 xa, float32 xb, float_status *s, + hard_f32_op2_fn hard, soft_f32_op2_fn soft, + f32_check_fn pre, f32_check_fn post, + f32_check_fn fast_test, soft_f32_op2_fn fast_op) +{ + union_float32 ua, ub, ur; + + ua.s = xa; + ub.s = xb; + + if (unlikely(!can_use_fpu(s))) { + goto soft; + } + + float32_input_flush2(&ua.s, &ub.s, s); + if (unlikely(!pre(ua, ub))) { + goto soft; + } + if (fast_test && fast_test(ua, ub)) { + return fast_op(ua.s, ub.s, s); + } + + ur.h = hard(ua.h, ub.h); + if (unlikely(f32_is_inf(ur))) { + s->float_exception_flags |= float_flag_overflow; + } else if (unlikely(fabsf(ur.h) <= FLT_MIN)) { + if (post == NULL || post(ua, ub)) { + goto soft; + } + } + return ur.s; + + soft: + return soft(ua.s, ub.s, s); +} + +static inline float64 +float64_gen2(float64 xa, float64 xb, float_status *s, + hard_f64_op2_fn hard, soft_f64_op2_fn soft, + f64_check_fn pre, f64_check_fn post, + f64_check_fn fast_test, soft_f64_op2_fn fast_op) +{ + union_float64 ua, ub, ur; + + ua.s = xa; + ub.s = xb; + + if (unlikely(!can_use_fpu(s))) { + goto soft; + } + + float64_input_flush2(&ua.s, &ub.s, s); + if (unlikely(!pre(ua, ub))) { + goto soft; + } + if (fast_test && fast_test(ua, ub)) { + return fast_op(ua.s, ub.s, s); + } + + ur.h = hard(ua.h, ub.h); + if (unlikely(f64_is_inf(ur))) { + s->float_exception_flags |= float_flag_overflow; + } else if (unlikely(fabs(ur.h) <= DBL_MIN)) { + if (post == NULL || post(ua, ub)) { + goto soft; + } + } + return ur.s; + + soft: + return soft(ua.s, ub.s, s); +} + +/*---------------------------------------------------------------------------- +| Returns the fraction bits of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint32_t extractFloat32Frac(float32 a) +{ + return float32_val(a) & 0x007FFFFF; +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline int extractFloat32Exp(float32 a) +{ + return (float32_val(a) >> 23) & 0xFF; +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloat32Sign(float32 a) +{ + return float32_val(a) >> 31; +} + +/*---------------------------------------------------------------------------- +| Returns the fraction bits of the double-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint64_t extractFloat64Frac(float64 a) +{ + return float64_val(a) & UINT64_C(0x000FFFFFFFFFFFFF); +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the double-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline int extractFloat64Exp(float64 a) +{ + return (float64_val(a) >> 52) & 0x7FF; +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the double-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloat64Sign(float64 a) +{ + return float64_val(a) >> 63; +} + +/* + * Classify a floating point number. Everything above float_class_qnan + * is a NaN so cls >= float_class_qnan is any NaN. + */ + +#ifndef _MSC_VER +typedef enum __attribute__ ((__packed__)) { + float_class_unclassified, + float_class_zero, + float_class_normal, + float_class_inf, + float_class_qnan, /* all NaNs from here */ + float_class_snan, +} FloatClass; +#else +__pragma(pack(push, 1)) +typedef enum { + float_class_unclassified, + float_class_zero, + float_class_normal, + float_class_inf, + float_class_qnan, /* all NaNs from here */ + float_class_snan, +} FloatClass; +__pragma(pack(pop)) +#endif + +/* Simple helpers for checking if, or what kind of, NaN we have */ +#ifndef _MSC_VER +static inline __attribute__((unused)) bool is_nan(FloatClass c) +#else +static inline bool is_nan(FloatClass c) +#endif +{ + return unlikely(c >= float_class_qnan); +} + +#ifndef _MSC_VER +static inline __attribute__((unused)) bool is_snan(FloatClass c) +#else +static inline bool is_snan(FloatClass c) +#endif +{ + return c == float_class_snan; +} + +#ifndef _MSC_VER +static inline __attribute__((unused)) bool is_qnan(FloatClass c) +#else +static inline bool is_qnan(FloatClass c) +#endif +{ + return c == float_class_qnan; +} + +/* + * Structure holding all of the decomposed parts of a float. The + * exponent is unbiased and the fraction is normalized. All + * calculations are done with a 64 bit fraction and then rounded as + * appropriate for the final format. + * + * Thanks to the packed FloatClass a decent compiler should be able to + * fit the whole structure into registers and avoid using the stack + * for parameter passing. + */ + +typedef struct { + uint64_t frac; + int32_t exp; + FloatClass cls; + bool sign; +} FloatParts; + +#define DECOMPOSED_BINARY_POINT (64 - 2) +#define DECOMPOSED_IMPLICIT_BIT (1ull << DECOMPOSED_BINARY_POINT) +#define DECOMPOSED_OVERFLOW_BIT (DECOMPOSED_IMPLICIT_BIT << 1) + +/* Structure holding all of the relevant parameters for a format. + * exp_size: the size of the exponent field + * exp_bias: the offset applied to the exponent field + * exp_max: the maximum normalised exponent + * frac_size: the size of the fraction field + * frac_shift: shift to normalise the fraction with DECOMPOSED_BINARY_POINT + * The following are computed based the size of fraction + * frac_lsb: least significant bit of fraction + * frac_lsbm1: the bit below the least significant bit (for rounding) + * round_mask/roundeven_mask: masks used for rounding + * The following optional modifiers are available: + * arm_althp: handle ARM Alternative Half Precision + */ +typedef struct { + int exp_size; + int exp_bias; + int exp_max; + int frac_size; + int frac_shift; + uint64_t frac_lsb; + uint64_t frac_lsbm1; + uint64_t round_mask; + uint64_t roundeven_mask; + bool arm_althp; +} FloatFmt; + +/* Expand fields based on the size of exponent and fraction */ +#define FLOAT_PARAMS(E, F) \ + .exp_size = E, \ + .exp_bias = ((1 << E) - 1) >> 1, \ + .exp_max = (1 << E) - 1, \ + .frac_size = F, \ + .frac_shift = DECOMPOSED_BINARY_POINT - F, \ + .frac_lsb = 1ull << (DECOMPOSED_BINARY_POINT - F), \ + .frac_lsbm1 = 1ull << ((DECOMPOSED_BINARY_POINT - F) - 1), \ + .round_mask = (1ull << (DECOMPOSED_BINARY_POINT - F)) - 1, \ + .roundeven_mask = (2ull << (DECOMPOSED_BINARY_POINT - F)) - 1 + +static const FloatFmt float16_params = { + FLOAT_PARAMS(5, 10) +}; + +static const FloatFmt float16_params_ahp = { + FLOAT_PARAMS(5, 10), + .arm_althp = true +}; + +static const FloatFmt float32_params = { + FLOAT_PARAMS(8, 23) +}; + +static const FloatFmt float64_params = { + FLOAT_PARAMS(11, 52) +}; + +/* Unpack a float to parts, but do not canonicalize. */ +static inline FloatParts unpack_raw(FloatFmt fmt, uint64_t raw) +{ + const int sign_pos = fmt.frac_size + fmt.exp_size; + + return (FloatParts) { + .cls = float_class_unclassified, + .sign = extract64(raw, sign_pos, 1), + .exp = extract64(raw, fmt.frac_size, fmt.exp_size), + .frac = extract64(raw, 0, fmt.frac_size), + }; +} + +static inline FloatParts float16_unpack_raw(float16 f) +{ + return unpack_raw(float16_params, f); +} + +static inline FloatParts float32_unpack_raw(float32 f) +{ + return unpack_raw(float32_params, f); +} + +static inline FloatParts float64_unpack_raw(float64 f) +{ + return unpack_raw(float64_params, f); +} + +/* Pack a float from parts, but do not canonicalize. */ +static inline uint64_t pack_raw(FloatFmt fmt, FloatParts p) +{ + const int sign_pos = fmt.frac_size + fmt.exp_size; + uint64_t ret = deposit64(p.frac, fmt.frac_size, fmt.exp_size, p.exp); + return deposit64(ret, sign_pos, 1, p.sign); +} + +static inline float16 float16_pack_raw(FloatParts p) +{ + return make_float16(pack_raw(float16_params, p)); +} + +static inline float32 float32_pack_raw(FloatParts p) +{ + return make_float32(pack_raw(float32_params, p)); +} + +static inline float64 float64_pack_raw(FloatParts p) +{ + return make_float64(pack_raw(float64_params, p)); +} /*---------------------------------------------------------------------------- | Functions and definitions to determine: (1) whether tininess for underflow @@ -60,33 +662,2751 @@ these four paragraphs for those parts of this code that are retained. | are propagated from function inputs to output. These details are target- | specific. *----------------------------------------------------------------------------*/ -#include "softfloat-specialize.h" +#include "softfloat-specialize.inc.c" -/*---------------------------------------------------------------------------- -| Returns the fraction bits of the half-precision floating-point value `a'. -*----------------------------------------------------------------------------*/ - -static inline uint32_t extractFloat16Frac(float16 a) +/* Canonicalize EXP and FRAC, setting CLS. */ +static FloatParts sf_canonicalize(FloatParts part, const FloatFmt *parm, + float_status *status) { - return float16_val(a) & 0x3ff; + if (part.exp == parm->exp_max && !parm->arm_althp) { + if (part.frac == 0) { + part.cls = float_class_inf; + } else { + part.frac <<= parm->frac_shift; + part.cls = (parts_is_snan_frac(part.frac, status) + ? float_class_snan : float_class_qnan); + } + } else if (part.exp == 0) { + if (likely(part.frac == 0)) { + part.cls = float_class_zero; + } else if (status->flush_inputs_to_zero) { + float_raise(float_flag_input_denormal, status); + part.cls = float_class_zero; + part.frac = 0; + } else { + int shift = clz64(part.frac) - 1; + part.cls = float_class_normal; + part.exp = parm->frac_shift - parm->exp_bias - shift + 1; + part.frac <<= shift; + } + } else { + part.cls = float_class_normal; + part.exp -= parm->exp_bias; + part.frac = DECOMPOSED_IMPLICIT_BIT + (part.frac << parm->frac_shift); + } + return part; +} + +/* Round and uncanonicalize a floating-point number by parts. There + * are FRAC_SHIFT bits that may require rounding at the bottom of the + * fraction; these bits will be removed. The exponent will be biased + * by EXP_BIAS and must be bounded by [EXP_MAX-1, 0]. + */ + +static FloatParts round_canonical(FloatParts p, float_status *s, + const FloatFmt *parm) +{ + const uint64_t frac_lsb = parm->frac_lsb; + const uint64_t frac_lsbm1 = parm->frac_lsbm1; + const uint64_t round_mask = parm->round_mask; + const uint64_t roundeven_mask = parm->roundeven_mask; + const int exp_max = parm->exp_max; + const int frac_shift = parm->frac_shift; + uint64_t frac, inc = 0; + int exp, flags = 0; + bool overflow_norm = false; + + frac = p.frac; + exp = p.exp; + + switch (p.cls) { + case float_class_normal: + switch (s->float_rounding_mode) { + case float_round_nearest_even: + overflow_norm = false; + inc = ((frac & roundeven_mask) != frac_lsbm1 ? frac_lsbm1 : 0); + break; + case float_round_ties_away: + overflow_norm = false; + inc = frac_lsbm1; + break; + case float_round_to_zero: + overflow_norm = true; + inc = 0; + break; + case float_round_up: + inc = p.sign ? 0 : round_mask; + overflow_norm = p.sign; + break; + case float_round_down: + inc = p.sign ? round_mask : 0; + overflow_norm = !p.sign; + break; + case float_round_to_odd: + overflow_norm = true; + inc = frac & frac_lsb ? 0 : round_mask; + break; + default: + g_assert_not_reached(); + break; + } + + exp += parm->exp_bias; + if (likely(exp > 0)) { + if (frac & round_mask) { + flags |= float_flag_inexact; + frac += inc; + if (frac & DECOMPOSED_OVERFLOW_BIT) { + frac >>= 1; + exp++; + } + } + frac >>= frac_shift; + + if (parm->arm_althp) { + /* ARM Alt HP eschews Inf and NaN for a wider exponent. */ + if (unlikely(exp > exp_max)) { + /* Overflow. Return the maximum normal. */ + flags = float_flag_invalid; + exp = exp_max; + frac = -1; + } + } else if (unlikely(exp >= exp_max)) { + flags |= float_flag_overflow | float_flag_inexact; + if (overflow_norm) { + exp = exp_max - 1; + frac = -1; + } else { + p.cls = float_class_inf; + goto do_inf; + } + } + } else if (s->flush_to_zero) { + flags |= float_flag_output_denormal; + p.cls = float_class_zero; + goto do_zero; + } else { + bool is_tiny = (s->float_detect_tininess + == float_tininess_before_rounding) + || (exp < 0) + || !((frac + inc) & DECOMPOSED_OVERFLOW_BIT); + + shift64RightJamming(frac, 1 - exp, &frac); + if (frac & round_mask) { + /* Need to recompute round-to-even. */ + switch (s->float_rounding_mode) { + case float_round_nearest_even: + inc = ((frac & roundeven_mask) != frac_lsbm1 + ? frac_lsbm1 : 0); + break; + case float_round_to_odd: + inc = frac & frac_lsb ? 0 : round_mask; + break; + } + flags |= float_flag_inexact; + frac += inc; + } + + exp = (frac & DECOMPOSED_IMPLICIT_BIT ? 1 : 0); + frac >>= frac_shift; + + if (is_tiny && (flags & float_flag_inexact)) { + flags |= float_flag_underflow; + } + if (exp == 0 && frac == 0) { + p.cls = float_class_zero; + } + } + break; + + case float_class_zero: + do_zero: + exp = 0; + frac = 0; + break; + + case float_class_inf: + do_inf: + assert(!parm->arm_althp); + exp = exp_max; + frac = 0; + break; + + case float_class_qnan: + case float_class_snan: + assert(!parm->arm_althp); + exp = exp_max; + frac >>= parm->frac_shift; + break; + + default: + g_assert_not_reached(); + break; + } + + float_raise(flags, s); + p.exp = exp; + p.frac = frac; + return p; +} + +/* Explicit FloatFmt version */ +static FloatParts float16a_unpack_canonical(float16 f, float_status *s, + const FloatFmt *params) +{ + return sf_canonicalize(float16_unpack_raw(f), params, s); +} + +static FloatParts float16_unpack_canonical(float16 f, float_status *s) +{ + return float16a_unpack_canonical(f, s, &float16_params); +} + +static float16 float16a_round_pack_canonical(FloatParts p, float_status *s, + const FloatFmt *params) +{ + return float16_pack_raw(round_canonical(p, s, params)); +} + +static float16 float16_round_pack_canonical(FloatParts p, float_status *s) +{ + return float16a_round_pack_canonical(p, s, &float16_params); +} + +static FloatParts float32_unpack_canonical(float32 f, float_status *s) +{ + return sf_canonicalize(float32_unpack_raw(f), &float32_params, s); +} + +static float32 float32_round_pack_canonical(FloatParts p, float_status *s) +{ + return float32_pack_raw(round_canonical(p, s, &float32_params)); +} + +static FloatParts float64_unpack_canonical(float64 f, float_status *s) +{ + return sf_canonicalize(float64_unpack_raw(f), &float64_params, s); +} + +static float64 float64_round_pack_canonical(FloatParts p, float_status *s) +{ + return float64_pack_raw(round_canonical(p, s, &float64_params)); +} + +static FloatParts return_nan(FloatParts a, float_status *s) +{ + switch (a.cls) { + case float_class_snan: + s->float_exception_flags |= float_flag_invalid; + a = parts_silence_nan(a, s); + /* fall through */ + case float_class_qnan: + if (s->default_nan_mode) { + return parts_default_nan(s); + } + break; + + default: + g_assert_not_reached(); + break; + } + return a; +} + +static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s) +{ + if (is_snan(a.cls) || is_snan(b.cls)) { + s->float_exception_flags |= float_flag_invalid; + } + + if (s->default_nan_mode) { + return parts_default_nan(s); + } else { + if (pickNaN(a.cls, b.cls, + a.frac > b.frac || + (a.frac == b.frac && a.sign < b.sign))) { + a = b; + } + if (is_snan(a.cls)) { + return parts_silence_nan(a, s); + } + } + return a; +} + +static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c, + bool inf_zero, float_status *s) +{ + int which; + + if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) { + s->float_exception_flags |= float_flag_invalid; + } + + which = pickNaNMulAdd(a.cls, b.cls, c.cls, inf_zero, s); + + if (s->default_nan_mode) { + /* Note that this check is after pickNaNMulAdd so that function + * has an opportunity to set the Invalid flag. + */ + which = 3; + } + + switch (which) { + case 0: + break; + case 1: + a = b; + break; + case 2: + a = c; + break; + case 3: + return parts_default_nan(s); + default: + g_assert_not_reached(); + break; + } + + if (is_snan(a.cls)) { + return parts_silence_nan(a, s); + } + return a; +} + +/* + * Returns the result of adding or subtracting the values of the + * floating-point values `a' and `b'. The operation is performed + * according to the IEC/IEEE Standard for Binary Floating-Point + * Arithmetic. + */ + +static FloatParts addsub_floats(FloatParts a, FloatParts b, bool subtract, + float_status *s) +{ + bool a_sign = a.sign; + bool b_sign = b.sign ^ subtract; + + if (a_sign != b_sign) { + /* Subtraction */ + + if (a.cls == float_class_normal && b.cls == float_class_normal) { + if (a.exp > b.exp || (a.exp == b.exp && a.frac >= b.frac)) { + shift64RightJamming(b.frac, a.exp - b.exp, &b.frac); + a.frac = a.frac - b.frac; + } else { + shift64RightJamming(a.frac, b.exp - a.exp, &a.frac); + a.frac = b.frac - a.frac; + a.exp = b.exp; + a_sign ^= 1; + } + + if (a.frac == 0) { + a.cls = float_class_zero; + a.sign = s->float_rounding_mode == float_round_down; + } else { + int shift = clz64(a.frac) - 1; + a.frac = a.frac << shift; + a.exp = a.exp - shift; + a.sign = a_sign; + } + return a; + } + if (is_nan(a.cls) || is_nan(b.cls)) { + return pick_nan(a, b, s); + } + if (a.cls == float_class_inf) { + if (b.cls == float_class_inf) { + float_raise(float_flag_invalid, s); + return parts_default_nan(s); + } + return a; + } + if (a.cls == float_class_zero && b.cls == float_class_zero) { + a.sign = s->float_rounding_mode == float_round_down; + return a; + } + if (a.cls == float_class_zero || b.cls == float_class_inf) { + b.sign = a_sign ^ 1; + return b; + } + if (b.cls == float_class_zero) { + return a; + } + } else { + /* Addition */ + if (a.cls == float_class_normal && b.cls == float_class_normal) { + if (a.exp > b.exp) { + shift64RightJamming(b.frac, a.exp - b.exp, &b.frac); + } else if (a.exp < b.exp) { + shift64RightJamming(a.frac, b.exp - a.exp, &a.frac); + a.exp = b.exp; + } + a.frac += b.frac; + if (a.frac & DECOMPOSED_OVERFLOW_BIT) { + shift64RightJamming(a.frac, 1, &a.frac); + a.exp += 1; + } + return a; + } + if (is_nan(a.cls) || is_nan(b.cls)) { + return pick_nan(a, b, s); + } + if (a.cls == float_class_inf || b.cls == float_class_zero) { + return a; + } + if (b.cls == float_class_inf || a.cls == float_class_zero) { + b.sign = b_sign; + return b; + } + } + + g_assert_not_reached(); + return a; +} + +/* + * Returns the result of adding or subtracting the floating-point + * values `a' and `b'. The operation is performed according to the + * IEC/IEEE Standard for Binary Floating-Point Arithmetic. + */ + +float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status) +{ + FloatParts pa = float16_unpack_canonical(a, status); + FloatParts pb = float16_unpack_canonical(b, status); + FloatParts pr = addsub_floats(pa, pb, false, status); + + return float16_round_pack_canonical(pr, status); +} + +float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status) +{ + FloatParts pa = float16_unpack_canonical(a, status); + FloatParts pb = float16_unpack_canonical(b, status); + FloatParts pr = addsub_floats(pa, pb, true, status); + + return float16_round_pack_canonical(pr, status); +} + +static float32 QEMU_SOFTFLOAT_ATTR +soft_f32_addsub(float32 a, float32 b, bool subtract, float_status *status) +{ + FloatParts pa = float32_unpack_canonical(a, status); + FloatParts pb = float32_unpack_canonical(b, status); + FloatParts pr = addsub_floats(pa, pb, subtract, status); + + return float32_round_pack_canonical(pr, status); +} + +static inline float32 soft_f32_add(float32 a, float32 b, float_status *status) +{ + return soft_f32_addsub(a, b, false, status); +} + +static inline float32 soft_f32_sub(float32 a, float32 b, float_status *status) +{ + return soft_f32_addsub(a, b, true, status); +} + +static float64 QEMU_SOFTFLOAT_ATTR +soft_f64_addsub(float64 a, float64 b, bool subtract, float_status *status) +{ + FloatParts pa = float64_unpack_canonical(a, status); + FloatParts pb = float64_unpack_canonical(b, status); + FloatParts pr = addsub_floats(pa, pb, subtract, status); + + return float64_round_pack_canonical(pr, status); +} + +static inline float64 soft_f64_add(float64 a, float64 b, float_status *status) +{ + return soft_f64_addsub(a, b, false, status); +} + +static inline float64 soft_f64_sub(float64 a, float64 b, float_status *status) +{ + return soft_f64_addsub(a, b, true, status); +} + +static float hard_f32_add(float a, float b) +{ + return a + b; +} + +static float hard_f32_sub(float a, float b) +{ + return a - b; +} + +static double hard_f64_add(double a, double b) +{ + return a + b; +} + +static double hard_f64_sub(double a, double b) +{ + return a - b; +} + +static bool f32_addsub_post(union_float32 a, union_float32 b) +{ + if (QEMU_HARDFLOAT_2F32_USE_FP) { + return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO); + } + return !(float32_is_zero(a.s) && float32_is_zero(b.s)); +} + +static bool f64_addsub_post(union_float64 a, union_float64 b) +{ + if (QEMU_HARDFLOAT_2F64_USE_FP) { + return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO); + } else { + return !(float64_is_zero(a.s) && float64_is_zero(b.s)); + } +} + +static float32 float32_addsub(float32 a, float32 b, float_status *s, + hard_f32_op2_fn hard, soft_f32_op2_fn soft) +{ + return float32_gen2(a, b, s, hard, soft, + f32_is_zon2, f32_addsub_post, NULL, NULL); +} + +static float64 float64_addsub(float64 a, float64 b, float_status *s, + hard_f64_op2_fn hard, soft_f64_op2_fn soft) +{ + return float64_gen2(a, b, s, hard, soft, + f64_is_zon2, f64_addsub_post, NULL, NULL); +} + +float32 QEMU_FLATTEN +float32_add(float32 a, float32 b, float_status *s) +{ + return float32_addsub(a, b, s, hard_f32_add, soft_f32_add); +} + +float32 QEMU_FLATTEN +float32_sub(float32 a, float32 b, float_status *s) +{ + return float32_addsub(a, b, s, hard_f32_sub, soft_f32_sub); +} + +float64 QEMU_FLATTEN +float64_add(float64 a, float64 b, float_status *s) +{ + return float64_addsub(a, b, s, hard_f64_add, soft_f64_add); +} + +float64 QEMU_FLATTEN +float64_sub(float64 a, float64 b, float_status *s) +{ + return float64_addsub(a, b, s, hard_f64_sub, soft_f64_sub); +} + +/* + * Returns the result of multiplying the floating-point values `a' and + * `b'. The operation is performed according to the IEC/IEEE Standard + * for Binary Floating-Point Arithmetic. + */ + +static FloatParts mul_floats(FloatParts a, FloatParts b, float_status *s) +{ + bool sign = a.sign ^ b.sign; + + if (a.cls == float_class_normal && b.cls == float_class_normal) { + uint64_t hi, lo; + int exp = a.exp + b.exp; + + mul64To128(a.frac, b.frac, &hi, &lo); + shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo); + if (lo & DECOMPOSED_OVERFLOW_BIT) { + shift64RightJamming(lo, 1, &lo); + exp += 1; + } + + /* Re-use a */ + a.exp = exp; + a.sign = sign; + a.frac = lo; + return a; + } + /* handle all the NaN cases */ + if (is_nan(a.cls) || is_nan(b.cls)) { + return pick_nan(a, b, s); + } + /* Inf * Zero == NaN */ + if ((a.cls == float_class_inf && b.cls == float_class_zero) || + (a.cls == float_class_zero && b.cls == float_class_inf)) { + s->float_exception_flags |= float_flag_invalid; + return parts_default_nan(s); + } + /* Multiply by 0 or Inf */ + if (a.cls == float_class_inf || a.cls == float_class_zero) { + a.sign = sign; + return a; + } + if (b.cls == float_class_inf || b.cls == float_class_zero) { + b.sign = sign; + return b; + } + + g_assert_not_reached(); + return a; +} + +float16 QEMU_FLATTEN float16_mul(float16 a, float16 b, float_status *status) +{ + FloatParts pa = float16_unpack_canonical(a, status); + FloatParts pb = float16_unpack_canonical(b, status); + FloatParts pr = mul_floats(pa, pb, status); + + return float16_round_pack_canonical(pr, status); +} + +static float32 QEMU_SOFTFLOAT_ATTR +soft_f32_mul(float32 a, float32 b, float_status *status) +{ + FloatParts pa = float32_unpack_canonical(a, status); + FloatParts pb = float32_unpack_canonical(b, status); + FloatParts pr = mul_floats(pa, pb, status); + + return float32_round_pack_canonical(pr, status); +} + +static float64 QEMU_SOFTFLOAT_ATTR +soft_f64_mul(float64 a, float64 b, float_status *status) +{ + FloatParts pa = float64_unpack_canonical(a, status); + FloatParts pb = float64_unpack_canonical(b, status); + FloatParts pr = mul_floats(pa, pb, status); + + return float64_round_pack_canonical(pr, status); +} + +static float hard_f32_mul(float a, float b) +{ + return a * b; +} + +static double hard_f64_mul(double a, double b) +{ + return a * b; +} + +static bool f32_mul_fast_test(union_float32 a, union_float32 b) +{ + return float32_is_zero(a.s) || float32_is_zero(b.s); +} + +static bool f64_mul_fast_test(union_float64 a, union_float64 b) +{ + return float64_is_zero(a.s) || float64_is_zero(b.s); +} + +static float32 f32_mul_fast_op(float32 a, float32 b, float_status *s) +{ + bool signbit = float32_is_neg(a) ^ float32_is_neg(b); + + return float32_set_sign(float32_zero, signbit); +} + +static float64 f64_mul_fast_op(float64 a, float64 b, float_status *s) +{ + bool signbit = float64_is_neg(a) ^ float64_is_neg(b); + + return float64_set_sign(float64_zero, signbit); +} + +float32 QEMU_FLATTEN +float32_mul(float32 a, float32 b, float_status *s) +{ + return float32_gen2(a, b, s, hard_f32_mul, soft_f32_mul, + f32_is_zon2, NULL, f32_mul_fast_test, f32_mul_fast_op); +} + +float64 QEMU_FLATTEN +float64_mul(float64 a, float64 b, float_status *s) +{ + return float64_gen2(a, b, s, hard_f64_mul, soft_f64_mul, + f64_is_zon2, NULL, f64_mul_fast_test, f64_mul_fast_op); +} + +/* + * Returns the result of multiplying the floating-point values `a' and + * `b' then adding 'c', with no intermediate rounding step after the + * multiplication. The operation is performed according to the + * IEC/IEEE Standard for Binary Floating-Point Arithmetic 754-2008. + * The flags argument allows the caller to select negation of the + * addend, the intermediate product, or the final result. (The + * difference between this and having the caller do a separate + * negation is that negating externally will flip the sign bit on + * NaNs.) + */ + +static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c, + int flags, float_status *s) +{ + bool inf_zero = ((1 << a.cls) | (1 << b.cls)) == + ((1 << float_class_inf) | (1 << float_class_zero)); + bool p_sign; + bool sign_flip = flags & float_muladd_negate_result; + FloatClass p_class; + uint64_t hi, lo; + int p_exp; + + /* It is implementation-defined whether the cases of (0,inf,qnan) + * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN + * they return if they do), so we have to hand this information + * off to the target-specific pick-a-NaN routine. + */ + if (is_nan(a.cls) || is_nan(b.cls) || is_nan(c.cls)) { + return pick_nan_muladd(a, b, c, inf_zero, s); + } + + if (inf_zero) { + s->float_exception_flags |= float_flag_invalid; + return parts_default_nan(s); + } + + if (flags & float_muladd_negate_c) { + c.sign ^= 1; + } + + p_sign = a.sign ^ b.sign; + + if (flags & float_muladd_negate_product) { + p_sign ^= 1; + } + + if (a.cls == float_class_inf || b.cls == float_class_inf) { + p_class = float_class_inf; + } else if (a.cls == float_class_zero || b.cls == float_class_zero) { + p_class = float_class_zero; + } else { + p_class = float_class_normal; + } + + if (c.cls == float_class_inf) { + if (p_class == float_class_inf && p_sign != c.sign) { + s->float_exception_flags |= float_flag_invalid; + return parts_default_nan(s); + } else { + a.cls = float_class_inf; + a.sign = c.sign ^ sign_flip; + return a; + } + } + + if (p_class == float_class_inf) { + a.cls = float_class_inf; + a.sign = p_sign ^ sign_flip; + return a; + } + + if (p_class == float_class_zero) { + if (c.cls == float_class_zero) { + if (p_sign != c.sign) { + p_sign = s->float_rounding_mode == float_round_down; + } + c.sign = p_sign; + } else if (flags & float_muladd_halve_result) { + c.exp -= 1; + } + c.sign ^= sign_flip; + return c; + } + + /* a & b should be normals now... */ + assert(a.cls == float_class_normal && + b.cls == float_class_normal); + + p_exp = a.exp + b.exp; + + /* Multiply of 2 62-bit numbers produces a (2*62) == 124-bit + * result. + */ + mul64To128(a.frac, b.frac, &hi, &lo); + /* binary point now at bit 124 */ + + /* check for overflow */ + if (hi & (1ULL << (DECOMPOSED_BINARY_POINT * 2 + 1 - 64))) { + shift128RightJamming(hi, lo, 1, &hi, &lo); + p_exp += 1; + } + + /* + add/sub */ + if (c.cls == float_class_zero) { + /* move binary point back to 62 */ + shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo); + } else { + int exp_diff = p_exp - c.exp; + if (p_sign == c.sign) { + /* Addition */ + if (exp_diff <= 0) { + shift128RightJamming(hi, lo, + DECOMPOSED_BINARY_POINT - exp_diff, + &hi, &lo); + lo += c.frac; + p_exp = c.exp; + } else { + uint64_t c_hi, c_lo; + /* shift c to the same binary point as the product (124) */ + c_hi = c.frac >> 2; + c_lo = 0; + shift128RightJamming(c_hi, c_lo, + exp_diff, + &c_hi, &c_lo); + add128(hi, lo, c_hi, c_lo, &hi, &lo); + /* move binary point back to 62 */ + shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo); + } + + if (lo & DECOMPOSED_OVERFLOW_BIT) { + shift64RightJamming(lo, 1, &lo); + p_exp += 1; + } + + } else { + /* Subtraction */ + uint64_t c_hi, c_lo; + /* make C binary point match product at bit 124 */ + c_hi = c.frac >> 2; + c_lo = 0; + + if (exp_diff <= 0) { + shift128RightJamming(hi, lo, -exp_diff, &hi, &lo); + if (exp_diff == 0 + && + (hi > c_hi || (hi == c_hi && lo >= c_lo))) { + sub128(hi, lo, c_hi, c_lo, &hi, &lo); + } else { + sub128(c_hi, c_lo, hi, lo, &hi, &lo); + p_sign ^= 1; + p_exp = c.exp; + } + } else { + shift128RightJamming(c_hi, c_lo, + exp_diff, + &c_hi, &c_lo); + sub128(hi, lo, c_hi, c_lo, &hi, &lo); + } + + if (hi == 0 && lo == 0) { + a.cls = float_class_zero; + a.sign = s->float_rounding_mode == float_round_down; + a.sign ^= sign_flip; + return a; + } else { + int shift; + if (hi != 0) { + shift = clz64(hi); + } else { + shift = clz64(lo) + 64; + } + /* Normalizing to a binary point of 124 is the + correct adjust for the exponent. However since we're + shifting, we might as well put the binary point back + at 62 where we really want it. Therefore shift as + if we're leaving 1 bit at the top of the word, but + adjust the exponent as if we're leaving 3 bits. */ + shift -= 1; + if (shift >= 64) { + lo = lo << (shift - 64); + } else { + hi = (hi << shift) | (lo >> (64 - shift)); + lo = hi | ((lo << shift) != 0); + } + p_exp -= shift - 2; + } + } + } + + if (flags & float_muladd_halve_result) { + p_exp -= 1; + } + + /* finally prepare our result */ + a.cls = float_class_normal; + a.sign = p_sign ^ sign_flip; + a.exp = p_exp; + a.frac = lo; + + return a; +} + +float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c, + int flags, float_status *status) +{ + FloatParts pa = float16_unpack_canonical(a, status); + FloatParts pb = float16_unpack_canonical(b, status); + FloatParts pc = float16_unpack_canonical(c, status); + FloatParts pr = muladd_floats(pa, pb, pc, flags, status); + + return float16_round_pack_canonical(pr, status); +} + +static float32 QEMU_SOFTFLOAT_ATTR +soft_f32_muladd(float32 a, float32 b, float32 c, int flags, + float_status *status) +{ + FloatParts pa = float32_unpack_canonical(a, status); + FloatParts pb = float32_unpack_canonical(b, status); + FloatParts pc = float32_unpack_canonical(c, status); + FloatParts pr = muladd_floats(pa, pb, pc, flags, status); + + return float32_round_pack_canonical(pr, status); +} + +static float64 QEMU_SOFTFLOAT_ATTR +soft_f64_muladd(float64 a, float64 b, float64 c, int flags, + float_status *status) +{ + FloatParts pa = float64_unpack_canonical(a, status); + FloatParts pb = float64_unpack_canonical(b, status); + FloatParts pc = float64_unpack_canonical(c, status); + FloatParts pr = muladd_floats(pa, pb, pc, flags, status); + + return float64_round_pack_canonical(pr, status); +} + +static bool force_soft_fma; + +float32 QEMU_FLATTEN +float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s) +{ + union_float32 ua, ub, uc, ur; + + ua.s = xa; + ub.s = xb; + uc.s = xc; + + if (unlikely(!can_use_fpu(s))) { + goto soft; + } + if (unlikely(flags & float_muladd_halve_result)) { + goto soft; + } + + float32_input_flush3(&ua.s, &ub.s, &uc.s, s); + if (unlikely(!f32_is_zon3(ua, ub, uc))) { + goto soft; + } + + if (unlikely(force_soft_fma)) { + goto soft; + } + + /* + * When (a || b) == 0, there's no need to check for under/over flow, + * since we know the addend is (normal || 0) and the product is 0. + */ + if (float32_is_zero(ua.s) || float32_is_zero(ub.s)) { + union_float32 up; + bool prod_sign; + + prod_sign = float32_is_neg(ua.s) ^ float32_is_neg(ub.s); + prod_sign ^= !!(flags & float_muladd_negate_product); + up.s = float32_set_sign(float32_zero, prod_sign); + + if (flags & float_muladd_negate_c) { + uc.h = -uc.h; + } + ur.h = up.h + uc.h; + } else { + union_float32 ua_orig = ua; + union_float32 uc_orig = uc; + + if (flags & float_muladd_negate_product) { + ua.h = -ua.h; + } + if (flags & float_muladd_negate_c) { + uc.h = -uc.h; + } + + ur.h = fmaf(ua.h, ub.h, uc.h); + + if (unlikely(f32_is_inf(ur))) { + s->float_exception_flags |= float_flag_overflow; + } else if (unlikely(fabsf(ur.h) <= FLT_MIN)) { + ua = ua_orig; + uc = uc_orig; + goto soft; + } + } + if (flags & float_muladd_negate_result) { + return float32_chs(ur.s); + } + return ur.s; + + soft: + return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s); +} + +float64 QEMU_FLATTEN +float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s) +{ + union_float64 ua, ub, uc, ur; + + ua.s = xa; + ub.s = xb; + uc.s = xc; + + if (unlikely(!can_use_fpu(s))) { + goto soft; + } + if (unlikely(flags & float_muladd_halve_result)) { + goto soft; + } + + float64_input_flush3(&ua.s, &ub.s, &uc.s, s); + if (unlikely(!f64_is_zon3(ua, ub, uc))) { + goto soft; + } + + if (unlikely(force_soft_fma)) { + goto soft; + } + + /* + * When (a || b) == 0, there's no need to check for under/over flow, + * since we know the addend is (normal || 0) and the product is 0. + */ + if (float64_is_zero(ua.s) || float64_is_zero(ub.s)) { + union_float64 up; + bool prod_sign; + + prod_sign = float64_is_neg(ua.s) ^ float64_is_neg(ub.s); + prod_sign ^= !!(flags & float_muladd_negate_product); + up.s = float64_set_sign(float64_zero, prod_sign); + + if (flags & float_muladd_negate_c) { + uc.h = -uc.h; + } + ur.h = up.h + uc.h; + } else { + union_float64 ua_orig = ua; + union_float64 uc_orig = uc; + + if (flags & float_muladd_negate_product) { + ua.h = -ua.h; + } + if (flags & float_muladd_negate_c) { + uc.h = -uc.h; + } + + ur.h = fma(ua.h, ub.h, uc.h); + + if (unlikely(f64_is_inf(ur))) { + s->float_exception_flags |= float_flag_overflow; + } else if (unlikely(fabs(ur.h) <= FLT_MIN)) { + ua = ua_orig; + uc = uc_orig; + goto soft; + } + } + if (flags & float_muladd_negate_result) { + return float64_chs(ur.s); + } + return ur.s; + + soft: + return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s); +} + +/* + * Returns the result of dividing the floating-point value `a' by the + * corresponding value `b'. The operation is performed according to + * the IEC/IEEE Standard for Binary Floating-Point Arithmetic. + */ + +static FloatParts div_floats(FloatParts a, FloatParts b, float_status *s) +{ + bool sign = a.sign ^ b.sign; + + if (a.cls == float_class_normal && b.cls == float_class_normal) { + uint64_t n0, n1, q, r; + int exp = a.exp - b.exp; + + /* + * We want a 2*N / N-bit division to produce exactly an N-bit + * result, so that we do not lose any precision and so that we + * do not have to renormalize afterward. If A.frac < B.frac, + * then division would produce an (N-1)-bit result; shift A left + * by one to produce the an N-bit result, and decrement the + * exponent to match. + * + * The udiv_qrnnd algorithm that we're using requires normalization, + * i.e. the msb of the denominator must be set. Since we know that + * DECOMPOSED_BINARY_POINT is msb-1, the inputs must be shifted left + * by one (more), and the remainder must be shifted right by one. + */ + if (a.frac < b.frac) { + exp -= 1; + shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 2, &n1, &n0); + } else { + shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 1, &n1, &n0); + } + q = udiv_qrnnd(&r, n1, n0, b.frac << 1); + + /* + * Set lsb if there is a remainder, to set inexact. + * As mentioned above, to find the actual value of the remainder we + * would need to shift right, but (1) we are only concerned about + * non-zero-ness, and (2) the remainder will always be even because + * both inputs to the division primitive are even. + */ + a.frac = q | (r != 0); + a.sign = sign; + a.exp = exp; + return a; + } + /* handle all the NaN cases */ + if (is_nan(a.cls) || is_nan(b.cls)) { + return pick_nan(a, b, s); + } + /* 0/0 or Inf/Inf */ + if (a.cls == b.cls + && + (a.cls == float_class_inf || a.cls == float_class_zero)) { + s->float_exception_flags |= float_flag_invalid; + return parts_default_nan(s); + } + /* Inf / x or 0 / x */ + if (a.cls == float_class_inf || a.cls == float_class_zero) { + a.sign = sign; + return a; + } + /* Div 0 => Inf */ + if (b.cls == float_class_zero) { + s->float_exception_flags |= float_flag_divbyzero; + a.cls = float_class_inf; + a.sign = sign; + return a; + } + /* Div by Inf */ + if (b.cls == float_class_inf) { + a.cls = float_class_zero; + a.sign = sign; + return a; + } + + g_assert_not_reached(); + return a; +} + +float16 float16_div(float16 a, float16 b, float_status *status) +{ + FloatParts pa = float16_unpack_canonical(a, status); + FloatParts pb = float16_unpack_canonical(b, status); + FloatParts pr = div_floats(pa, pb, status); + + return float16_round_pack_canonical(pr, status); +} + +static float32 QEMU_SOFTFLOAT_ATTR +soft_f32_div(float32 a, float32 b, float_status *status) +{ + FloatParts pa = float32_unpack_canonical(a, status); + FloatParts pb = float32_unpack_canonical(b, status); + FloatParts pr = div_floats(pa, pb, status); + + return float32_round_pack_canonical(pr, status); +} + +static float64 QEMU_SOFTFLOAT_ATTR +soft_f64_div(float64 a, float64 b, float_status *status) +{ + FloatParts pa = float64_unpack_canonical(a, status); + FloatParts pb = float64_unpack_canonical(b, status); + FloatParts pr = div_floats(pa, pb, status); + + return float64_round_pack_canonical(pr, status); +} + +static float hard_f32_div(float a, float b) +{ + return a / b; +} + +static double hard_f64_div(double a, double b) +{ + return a / b; +} + +static bool f32_div_pre(union_float32 a, union_float32 b) +{ + if (QEMU_HARDFLOAT_2F32_USE_FP) { + return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && + fpclassify(b.h) == FP_NORMAL; + } + return float32_is_zero_or_normal(a.s) && float32_is_normal(b.s); +} + +static bool f64_div_pre(union_float64 a, union_float64 b) +{ + if (QEMU_HARDFLOAT_2F64_USE_FP) { + return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) && + fpclassify(b.h) == FP_NORMAL; + } + return float64_is_zero_or_normal(a.s) && float64_is_normal(b.s); +} + +static bool f32_div_post(union_float32 a, union_float32 b) +{ + if (QEMU_HARDFLOAT_2F32_USE_FP) { + return fpclassify(a.h) != FP_ZERO; + } + return !float32_is_zero(a.s); +} + +static bool f64_div_post(union_float64 a, union_float64 b) +{ + if (QEMU_HARDFLOAT_2F64_USE_FP) { + return fpclassify(a.h) != FP_ZERO; + } + return !float64_is_zero(a.s); +} + +float32 QEMU_FLATTEN +float32_div(float32 a, float32 b, float_status *s) +{ + return float32_gen2(a, b, s, hard_f32_div, soft_f32_div, + f32_div_pre, f32_div_post, NULL, NULL); +} + +float64 QEMU_FLATTEN +float64_div(float64 a, float64 b, float_status *s) +{ + return float64_gen2(a, b, s, hard_f64_div, soft_f64_div, + f64_div_pre, f64_div_post, NULL, NULL); +} + +/* + * Float to Float conversions + * + * Returns the result of converting one float format to another. The + * conversion is performed according to the IEC/IEEE Standard for + * Binary Floating-Point Arithmetic. + * + * The float_to_float helper only needs to take care of raising + * invalid exceptions and handling the conversion on NaNs. + */ + +static FloatParts float_to_float(FloatParts a, const FloatFmt *dstf, + float_status *s) +{ + if (dstf->arm_althp) { + switch (a.cls) { + case float_class_qnan: + case float_class_snan: + /* There is no NaN in the destination format. Raise Invalid + * and return a zero with the sign of the input NaN. + */ + s->float_exception_flags |= float_flag_invalid; + a.cls = float_class_zero; + a.frac = 0; + a.exp = 0; + break; + + case float_class_inf: + /* There is no Inf in the destination format. Raise Invalid + * and return the maximum normal with the correct sign. + */ + s->float_exception_flags |= float_flag_invalid; + a.cls = float_class_normal; + a.exp = dstf->exp_max; + a.frac = ((1ull << dstf->frac_size) - 1) << dstf->frac_shift; + break; + + default: + break; + } + } else if (is_nan(a.cls)) { + if (is_snan(a.cls)) { + s->float_exception_flags |= float_flag_invalid; + a = parts_silence_nan(a, s); + } + if (s->default_nan_mode) { + return parts_default_nan(s); + } + } + return a; +} + +float32 float16_to_float32(float16 a, bool ieee, float_status *s) +{ + const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; + FloatParts p = float16a_unpack_canonical(a, s, fmt16); + FloatParts pr = float_to_float(p, &float32_params, s); + return float32_round_pack_canonical(pr, s); +} + +float64 float16_to_float64(float16 a, bool ieee, float_status *s) +{ + const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; + FloatParts p = float16a_unpack_canonical(a, s, fmt16); + FloatParts pr = float_to_float(p, &float64_params, s); + return float64_round_pack_canonical(pr, s); +} + +float16 float32_to_float16(float32 a, bool ieee, float_status *s) +{ + const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; + FloatParts p = float32_unpack_canonical(a, s); + FloatParts pr = float_to_float(p, fmt16, s); + return float16a_round_pack_canonical(pr, s, fmt16); +} + +static float64 QEMU_SOFTFLOAT_ATTR +soft_float32_to_float64(float32 a, float_status *s) +{ + FloatParts p = float32_unpack_canonical(a, s); + FloatParts pr = float_to_float(p, &float64_params, s); + return float64_round_pack_canonical(pr, s); +} + +float64 float32_to_float64(float32 a, float_status *s) +{ + if (likely(float32_is_normal(a))) { + /* Widening conversion can never produce inexact results. */ + union_float32 uf; + union_float64 ud; + uf.s = a; + ud.h = uf.h; + return ud.s; + } else if (float32_is_zero(a)) { + return float64_set_sign(float64_zero, float32_is_neg(a)); + } else { + return soft_float32_to_float64(a, s); + } +} + +float16 float64_to_float16(float64 a, bool ieee, float_status *s) +{ + const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp; + FloatParts p = float64_unpack_canonical(a, s); + FloatParts pr = float_to_float(p, fmt16, s); + return float16a_round_pack_canonical(pr, s, fmt16); +} + +float32 float64_to_float32(float64 a, float_status *s) +{ + FloatParts p = float64_unpack_canonical(a, s); + FloatParts pr = float_to_float(p, &float32_params, s); + return float32_round_pack_canonical(pr, s); +} + +/* + * Rounds the floating-point value `a' to an integer, and returns the + * result as a floating-point value. The operation is performed + * according to the IEC/IEEE Standard for Binary Floating-Point + * Arithmetic. + */ + +static FloatParts round_to_int(FloatParts a, int rmode, + int scale, float_status *s) +{ + switch (a.cls) { + case float_class_qnan: + case float_class_snan: + return return_nan(a, s); + + case float_class_zero: + case float_class_inf: + /* already "integral" */ + break; + + case float_class_normal: + scale = MIN(MAX(scale, -0x10000), 0x10000); + a.exp += scale; + + if (a.exp >= DECOMPOSED_BINARY_POINT) { + /* already integral */ + break; + } + if (a.exp < 0) { + bool one = false; + /* all fractional */ + s->float_exception_flags |= float_flag_inexact; + switch (rmode) { + case float_round_nearest_even: + one = a.exp == -1 && a.frac > DECOMPOSED_IMPLICIT_BIT; + break; + case float_round_ties_away: + one = a.exp == -1 && a.frac >= DECOMPOSED_IMPLICIT_BIT; + break; + case float_round_to_zero: + one = false; + break; + case float_round_up: + one = !a.sign; + break; + case float_round_down: + one = a.sign; + break; + case float_round_to_odd: + one = true; + break; + default: + g_assert_not_reached(); + break; + } + + if (one) { + a.frac = DECOMPOSED_IMPLICIT_BIT; + a.exp = 0; + } else { + a.cls = float_class_zero; + } + } else { + uint64_t frac_lsb = DECOMPOSED_IMPLICIT_BIT >> a.exp; + uint64_t frac_lsbm1 = frac_lsb >> 1; + uint64_t rnd_even_mask = (frac_lsb - 1) | frac_lsb; + uint64_t rnd_mask = rnd_even_mask >> 1; + uint64_t inc = 0; + + switch (rmode) { + case float_round_nearest_even: + inc = ((a.frac & rnd_even_mask) != frac_lsbm1 ? frac_lsbm1 : 0); + break; + case float_round_ties_away: + inc = frac_lsbm1; + break; + case float_round_to_zero: + inc = 0; + break; + case float_round_up: + inc = a.sign ? 0 : rnd_mask; + break; + case float_round_down: + inc = a.sign ? rnd_mask : 0; + break; + case float_round_to_odd: + inc = a.frac & frac_lsb ? 0 : rnd_mask; + break; + default: + g_assert_not_reached(); + break; + } + + if (a.frac & rnd_mask) { + s->float_exception_flags |= float_flag_inexact; + a.frac += inc; + a.frac &= ~rnd_mask; + if (a.frac & DECOMPOSED_OVERFLOW_BIT) { + a.frac >>= 1; + a.exp++; + } + } + } + break; + default: + g_assert_not_reached(); + } + return a; +} + +float16 float16_round_to_int(float16 a, float_status *s) +{ + FloatParts pa = float16_unpack_canonical(a, s); + FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s); + return float16_round_pack_canonical(pr, s); +} + +float32 float32_round_to_int(float32 a, float_status *s) +{ + FloatParts pa = float32_unpack_canonical(a, s); + FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s); + return float32_round_pack_canonical(pr, s); +} + +float64 float64_round_to_int(float64 a, float_status *s) +{ + FloatParts pa = float64_unpack_canonical(a, s); + FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s); + return float64_round_pack_canonical(pr, s); +} + +/* + * Returns the result of converting the floating-point value `a' to + * the two's complement integer format. The conversion is performed + * according to the IEC/IEEE Standard for Binary Floating-Point + * Arithmetic---which means in particular that the conversion is + * rounded according to the current rounding mode. If `a' is a NaN, + * the largest positive integer is returned. Otherwise, if the + * conversion overflows, the largest integer with the same sign as `a' + * is returned. +*/ + +static int64_t round_to_int_and_pack(FloatParts in, int rmode, int scale, + int64_t min, int64_t max, + float_status *s) +{ + uint64_t r; + int orig_flags = get_float_exception_flags(s); + FloatParts p = round_to_int(in, rmode, scale, s); + + switch (p.cls) { + case float_class_snan: + case float_class_qnan: + s->float_exception_flags = orig_flags | float_flag_invalid; + return max; + case float_class_inf: + s->float_exception_flags = orig_flags | float_flag_invalid; + return p.sign ? min : max; + case float_class_zero: + return 0; + case float_class_normal: + if (p.exp < DECOMPOSED_BINARY_POINT) { + r = p.frac >> (DECOMPOSED_BINARY_POINT - p.exp); + } else if (p.exp - DECOMPOSED_BINARY_POINT < 2) { + r = p.frac << (p.exp - DECOMPOSED_BINARY_POINT); + } else { + r = UINT64_MAX; + } + if (p.sign) { +#ifdef _MSC_VER + if (r <= 0ULL - (uint64_t)min) { + return (0ULL - r); +#else + if (r <= -(uint64_t) min) { + return -r; +#endif + } else { + s->float_exception_flags = orig_flags | float_flag_invalid; + return min; + } + } else { + if (r <= max) { + return r; + } else { + s->float_exception_flags = orig_flags | float_flag_invalid; + return max; + } + } + default: + g_assert_not_reached(); + return max; + } +} + +int16_t float16_to_int16_scalbn(float16 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float16_unpack_canonical(a, s), + rmode, scale, INT16_MIN, INT16_MAX, s); +} + +int32_t float16_to_int32_scalbn(float16 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float16_unpack_canonical(a, s), + rmode, scale, INT32_MIN, INT32_MAX, s); +} + +int64_t float16_to_int64_scalbn(float16 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float16_unpack_canonical(a, s), + rmode, scale, INT64_MIN, INT64_MAX, s); +} + +int16_t float32_to_int16_scalbn(float32 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float32_unpack_canonical(a, s), + rmode, scale, INT16_MIN, INT16_MAX, s); +} + +int32_t float32_to_int32_scalbn(float32 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float32_unpack_canonical(a, s), + rmode, scale, INT32_MIN, INT32_MAX, s); +} + +int64_t float32_to_int64_scalbn(float32 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float32_unpack_canonical(a, s), + rmode, scale, INT64_MIN, INT64_MAX, s); +} + +int16_t float64_to_int16_scalbn(float64 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float64_unpack_canonical(a, s), + rmode, scale, INT16_MIN, INT16_MAX, s); +} + +int32_t float64_to_int32_scalbn(float64 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float64_unpack_canonical(a, s), + rmode, scale, INT32_MIN, INT32_MAX, s); +} + +int64_t float64_to_int64_scalbn(float64 a, int rmode, int scale, + float_status *s) +{ + return round_to_int_and_pack(float64_unpack_canonical(a, s), + rmode, scale, INT64_MIN, INT64_MAX, s); +} + +int16_t float16_to_int16(float16 a, float_status *s) +{ + return float16_to_int16_scalbn(a, s->float_rounding_mode, 0, s); +} + +int32_t float16_to_int32(float16 a, float_status *s) +{ + return float16_to_int32_scalbn(a, s->float_rounding_mode, 0, s); +} + +int64_t float16_to_int64(float16 a, float_status *s) +{ + return float16_to_int64_scalbn(a, s->float_rounding_mode, 0, s); +} + +int16_t float32_to_int16(float32 a, float_status *s) +{ + return float32_to_int16_scalbn(a, s->float_rounding_mode, 0, s); +} + +int32_t float32_to_int32(float32 a, float_status *s) +{ + return float32_to_int32_scalbn(a, s->float_rounding_mode, 0, s); +} + +int64_t float32_to_int64(float32 a, float_status *s) +{ + return float32_to_int64_scalbn(a, s->float_rounding_mode, 0, s); +} + +int16_t float64_to_int16(float64 a, float_status *s) +{ + return float64_to_int16_scalbn(a, s->float_rounding_mode, 0, s); +} + +int32_t float64_to_int32(float64 a, float_status *s) +{ + return float64_to_int32_scalbn(a, s->float_rounding_mode, 0, s); +} + +int64_t float64_to_int64(float64 a, float_status *s) +{ + return float64_to_int64_scalbn(a, s->float_rounding_mode, 0, s); +} + +int16_t float16_to_int16_round_to_zero(float16 a, float_status *s) +{ + return float16_to_int16_scalbn(a, float_round_to_zero, 0, s); +} + +int32_t float16_to_int32_round_to_zero(float16 a, float_status *s) +{ + return float16_to_int32_scalbn(a, float_round_to_zero, 0, s); +} + +int64_t float16_to_int64_round_to_zero(float16 a, float_status *s) +{ + return float16_to_int64_scalbn(a, float_round_to_zero, 0, s); +} + +int16_t float32_to_int16_round_to_zero(float32 a, float_status *s) +{ + return float32_to_int16_scalbn(a, float_round_to_zero, 0, s); +} + +int32_t float32_to_int32_round_to_zero(float32 a, float_status *s) +{ + return float32_to_int32_scalbn(a, float_round_to_zero, 0, s); +} + +int64_t float32_to_int64_round_to_zero(float32 a, float_status *s) +{ + return float32_to_int64_scalbn(a, float_round_to_zero, 0, s); +} + +int16_t float64_to_int16_round_to_zero(float64 a, float_status *s) +{ + return float64_to_int16_scalbn(a, float_round_to_zero, 0, s); +} + +int32_t float64_to_int32_round_to_zero(float64 a, float_status *s) +{ + return float64_to_int32_scalbn(a, float_round_to_zero, 0, s); +} + +int64_t float64_to_int64_round_to_zero(float64 a, float_status *s) +{ + return float64_to_int64_scalbn(a, float_round_to_zero, 0, s); +} + +/* + * Returns the result of converting the floating-point value `a' to + * the unsigned integer format. The conversion is performed according + * to the IEC/IEEE Standard for Binary Floating-Point + * Arithmetic---which means in particular that the conversion is + * rounded according to the current rounding mode. If `a' is a NaN, + * the largest unsigned integer is returned. Otherwise, if the + * conversion overflows, the largest unsigned integer is returned. If + * the 'a' is negative, the result is rounded and zero is returned; + * values that do not round to zero will raise the inexact exception + * flag. + */ + +static uint64_t round_to_uint_and_pack(FloatParts in, int rmode, int scale, + uint64_t max, float_status *s) +{ + int orig_flags = get_float_exception_flags(s); + FloatParts p = round_to_int(in, rmode, scale, s); + uint64_t r; + + switch (p.cls) { + case float_class_snan: + case float_class_qnan: + s->float_exception_flags = orig_flags | float_flag_invalid; + return max; + case float_class_inf: + s->float_exception_flags = orig_flags | float_flag_invalid; + return p.sign ? 0 : max; + case float_class_zero: + return 0; + case float_class_normal: + if (p.sign) { + s->float_exception_flags = orig_flags | float_flag_invalid; + return 0; + } + + if (p.exp < DECOMPOSED_BINARY_POINT) { + r = p.frac >> (DECOMPOSED_BINARY_POINT - p.exp); + } else if (p.exp - DECOMPOSED_BINARY_POINT < 2) { + r = p.frac << (p.exp - DECOMPOSED_BINARY_POINT); + } else { + s->float_exception_flags = orig_flags | float_flag_invalid; + return max; + } + + /* For uint64 this will never trip, but if p.exp is too large + * to shift a decomposed fraction we shall have exited via the + * 3rd leg above. + */ + if (r > max) { + s->float_exception_flags = orig_flags | float_flag_invalid; + return max; + } + return r; + default: + g_assert_not_reached(); + return max; + } +} + +uint16_t float16_to_uint16_scalbn(float16 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float16_unpack_canonical(a, s), + rmode, scale, UINT16_MAX, s); +} + +uint32_t float16_to_uint32_scalbn(float16 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float16_unpack_canonical(a, s), + rmode, scale, UINT32_MAX, s); +} + +uint64_t float16_to_uint64_scalbn(float16 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float16_unpack_canonical(a, s), + rmode, scale, UINT64_MAX, s); +} + +uint16_t float32_to_uint16_scalbn(float32 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float32_unpack_canonical(a, s), + rmode, scale, UINT16_MAX, s); +} + +uint32_t float32_to_uint32_scalbn(float32 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float32_unpack_canonical(a, s), + rmode, scale, UINT32_MAX, s); +} + +uint64_t float32_to_uint64_scalbn(float32 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float32_unpack_canonical(a, s), + rmode, scale, UINT64_MAX, s); +} + +uint16_t float64_to_uint16_scalbn(float64 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float64_unpack_canonical(a, s), + rmode, scale, UINT16_MAX, s); +} + +uint32_t float64_to_uint32_scalbn(float64 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float64_unpack_canonical(a, s), + rmode, scale, UINT32_MAX, s); +} + +uint64_t float64_to_uint64_scalbn(float64 a, int rmode, int scale, + float_status *s) +{ + return round_to_uint_and_pack(float64_unpack_canonical(a, s), + rmode, scale, UINT64_MAX, s); +} + +uint16_t float16_to_uint16(float16 a, float_status *s) +{ + return float16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint32_t float16_to_uint32(float16 a, float_status *s) +{ + return float16_to_uint32_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint64_t float16_to_uint64(float16 a, float_status *s) +{ + return float16_to_uint64_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint16_t float32_to_uint16(float32 a, float_status *s) +{ + return float32_to_uint16_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint32_t float32_to_uint32(float32 a, float_status *s) +{ + return float32_to_uint32_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint64_t float32_to_uint64(float32 a, float_status *s) +{ + return float32_to_uint64_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint16_t float64_to_uint16(float64 a, float_status *s) +{ + return float64_to_uint16_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint32_t float64_to_uint32(float64 a, float_status *s) +{ + return float64_to_uint32_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint64_t float64_to_uint64(float64 a, float_status *s) +{ + return float64_to_uint64_scalbn(a, s->float_rounding_mode, 0, s); +} + +uint16_t float16_to_uint16_round_to_zero(float16 a, float_status *s) +{ + return float16_to_uint16_scalbn(a, float_round_to_zero, 0, s); +} + +uint32_t float16_to_uint32_round_to_zero(float16 a, float_status *s) +{ + return float16_to_uint32_scalbn(a, float_round_to_zero, 0, s); +} + +uint64_t float16_to_uint64_round_to_zero(float16 a, float_status *s) +{ + return float16_to_uint64_scalbn(a, float_round_to_zero, 0, s); +} + +uint16_t float32_to_uint16_round_to_zero(float32 a, float_status *s) +{ + return float32_to_uint16_scalbn(a, float_round_to_zero, 0, s); +} + +uint32_t float32_to_uint32_round_to_zero(float32 a, float_status *s) +{ + return float32_to_uint32_scalbn(a, float_round_to_zero, 0, s); +} + +uint64_t float32_to_uint64_round_to_zero(float32 a, float_status *s) +{ + return float32_to_uint64_scalbn(a, float_round_to_zero, 0, s); +} + +uint16_t float64_to_uint16_round_to_zero(float64 a, float_status *s) +{ + return float64_to_uint16_scalbn(a, float_round_to_zero, 0, s); +} + +uint32_t float64_to_uint32_round_to_zero(float64 a, float_status *s) +{ + return float64_to_uint32_scalbn(a, float_round_to_zero, 0, s); +} + +uint64_t float64_to_uint64_round_to_zero(float64 a, float_status *s) +{ + return float64_to_uint64_scalbn(a, float_round_to_zero, 0, s); +} + +/* + * Integer to float conversions + * + * Returns the result of converting the two's complement integer `a' + * to the floating-point format. The conversion is performed according + * to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. + */ + +static FloatParts int_to_float(int64_t a, int scale, float_status *status) +{ + FloatParts r = { .sign = false }; + + if (a == 0) { + r.cls = float_class_zero; + } else { + uint64_t f = a; + int shift; + + r.cls = float_class_normal; + if (a < 0) { +#ifdef _MSC_VER + f = 0ULL - f; +#else + f = -f; +#endif + r.sign = true; + } + shift = clz64(f) - 1; + scale = MIN(MAX(scale, -0x10000), 0x10000); + + r.exp = DECOMPOSED_BINARY_POINT - shift + scale; + r.frac = (shift < 0 ? DECOMPOSED_IMPLICIT_BIT : f << shift); + } + + return r; +} + +float16 int64_to_float16_scalbn(int64_t a, int scale, float_status *status) +{ + FloatParts pa = int_to_float(a, scale, status); + return float16_round_pack_canonical(pa, status); +} + +float16 int32_to_float16_scalbn(int32_t a, int scale, float_status *status) +{ + return int64_to_float16_scalbn(a, scale, status); +} + +float16 int16_to_float16_scalbn(int16_t a, int scale, float_status *status) +{ + return int64_to_float16_scalbn(a, scale, status); +} + +float16 int64_to_float16(int64_t a, float_status *status) +{ + return int64_to_float16_scalbn(a, 0, status); +} + +float16 int32_to_float16(int32_t a, float_status *status) +{ + return int64_to_float16_scalbn(a, 0, status); +} + +float16 int16_to_float16(int16_t a, float_status *status) +{ + return int64_to_float16_scalbn(a, 0, status); +} + +float32 int64_to_float32_scalbn(int64_t a, int scale, float_status *status) +{ + FloatParts pa = int_to_float(a, scale, status); + return float32_round_pack_canonical(pa, status); +} + +float32 int32_to_float32_scalbn(int32_t a, int scale, float_status *status) +{ + return int64_to_float32_scalbn(a, scale, status); +} + +float32 int16_to_float32_scalbn(int16_t a, int scale, float_status *status) +{ + return int64_to_float32_scalbn(a, scale, status); +} + +float32 int64_to_float32(int64_t a, float_status *status) +{ + return int64_to_float32_scalbn(a, 0, status); +} + +float32 int32_to_float32(int32_t a, float_status *status) +{ + return int64_to_float32_scalbn(a, 0, status); +} + +float32 int16_to_float32(int16_t a, float_status *status) +{ + return int64_to_float32_scalbn(a, 0, status); +} + +float64 int64_to_float64_scalbn(int64_t a, int scale, float_status *status) +{ + FloatParts pa = int_to_float(a, scale, status); + return float64_round_pack_canonical(pa, status); +} + +float64 int32_to_float64_scalbn(int32_t a, int scale, float_status *status) +{ + return int64_to_float64_scalbn(a, scale, status); +} + +float64 int16_to_float64_scalbn(int16_t a, int scale, float_status *status) +{ + return int64_to_float64_scalbn(a, scale, status); +} + +float64 int64_to_float64(int64_t a, float_status *status) +{ + return int64_to_float64_scalbn(a, 0, status); +} + +float64 int32_to_float64(int32_t a, float_status *status) +{ + return int64_to_float64_scalbn(a, 0, status); +} + +float64 int16_to_float64(int16_t a, float_status *status) +{ + return int64_to_float64_scalbn(a, 0, status); +} + + +/* + * Unsigned Integer to float conversions + * + * Returns the result of converting the unsigned integer `a' to the + * floating-point format. The conversion is performed according to the + * IEC/IEEE Standard for Binary Floating-Point Arithmetic. + */ + +static FloatParts uint_to_float(uint64_t a, int scale, float_status *status) +{ + FloatParts r = { .sign = false }; + + if (a == 0) { + r.cls = float_class_zero; + } else { + scale = MIN(MAX(scale, -0x10000), 0x10000); + r.cls = float_class_normal; + if ((int64_t)a < 0) { + r.exp = DECOMPOSED_BINARY_POINT + 1 + scale; + shift64RightJamming(a, 1, &a); + r.frac = a; + } else { + int shift = clz64(a) - 1; + r.exp = DECOMPOSED_BINARY_POINT - shift + scale; + r.frac = a << shift; + } + } + + return r; +} + +float16 uint64_to_float16_scalbn(uint64_t a, int scale, float_status *status) +{ + FloatParts pa = uint_to_float(a, scale, status); + return float16_round_pack_canonical(pa, status); +} + +float16 uint32_to_float16_scalbn(uint32_t a, int scale, float_status *status) +{ + return uint64_to_float16_scalbn(a, scale, status); +} + +float16 uint16_to_float16_scalbn(uint16_t a, int scale, float_status *status) +{ + return uint64_to_float16_scalbn(a, scale, status); +} + +float16 uint64_to_float16(uint64_t a, float_status *status) +{ + return uint64_to_float16_scalbn(a, 0, status); +} + +float16 uint32_to_float16(uint32_t a, float_status *status) +{ + return uint64_to_float16_scalbn(a, 0, status); +} + +float16 uint16_to_float16(uint16_t a, float_status *status) +{ + return uint64_to_float16_scalbn(a, 0, status); +} + +float32 uint64_to_float32_scalbn(uint64_t a, int scale, float_status *status) +{ + FloatParts pa = uint_to_float(a, scale, status); + return float32_round_pack_canonical(pa, status); +} + +float32 uint32_to_float32_scalbn(uint32_t a, int scale, float_status *status) +{ + return uint64_to_float32_scalbn(a, scale, status); +} + +float32 uint16_to_float32_scalbn(uint16_t a, int scale, float_status *status) +{ + return uint64_to_float32_scalbn(a, scale, status); +} + +float32 uint64_to_float32(uint64_t a, float_status *status) +{ + return uint64_to_float32_scalbn(a, 0, status); +} + +float32 uint32_to_float32(uint32_t a, float_status *status) +{ + return uint64_to_float32_scalbn(a, 0, status); +} + +float32 uint16_to_float32(uint16_t a, float_status *status) +{ + return uint64_to_float32_scalbn(a, 0, status); +} + +float64 uint64_to_float64_scalbn(uint64_t a, int scale, float_status *status) +{ + FloatParts pa = uint_to_float(a, scale, status); + return float64_round_pack_canonical(pa, status); +} + +float64 uint32_to_float64_scalbn(uint32_t a, int scale, float_status *status) +{ + return uint64_to_float64_scalbn(a, scale, status); +} + +float64 uint16_to_float64_scalbn(uint16_t a, int scale, float_status *status) +{ + return uint64_to_float64_scalbn(a, scale, status); +} + +float64 uint64_to_float64(uint64_t a, float_status *status) +{ + return uint64_to_float64_scalbn(a, 0, status); +} + +float64 uint32_to_float64(uint32_t a, float_status *status) +{ + return uint64_to_float64_scalbn(a, 0, status); +} + +float64 uint16_to_float64(uint16_t a, float_status *status) +{ + return uint64_to_float64_scalbn(a, 0, status); +} + +/* Float Min/Max */ +/* min() and max() functions. These can't be implemented as + * 'compare and pick one input' because that would mishandle + * NaNs and +0 vs -0. + * + * minnum() and maxnum() functions. These are similar to the min() + * and max() functions but if one of the arguments is a QNaN and + * the other is numerical then the numerical argument is returned. + * SNaNs will get quietened before being returned. + * minnum() and maxnum correspond to the IEEE 754-2008 minNum() + * and maxNum() operations. min() and max() are the typical min/max + * semantics provided by many CPUs which predate that specification. + * + * minnummag() and maxnummag() functions correspond to minNumMag() + * and minNumMag() from the IEEE-754 2008. + */ +static FloatParts minmax_floats(FloatParts a, FloatParts b, bool ismin, + bool ieee, bool ismag, float_status *s) +{ + if (unlikely(is_nan(a.cls) || is_nan(b.cls))) { + if (ieee) { + /* Takes two floating-point values `a' and `b', one of + * which is a NaN, and returns the appropriate NaN + * result. If either `a' or `b' is a signaling NaN, + * the invalid exception is raised. + */ + if (is_snan(a.cls) || is_snan(b.cls)) { + return pick_nan(a, b, s); + } else if (is_nan(a.cls) && !is_nan(b.cls)) { + return b; + } else if (is_nan(b.cls) && !is_nan(a.cls)) { + return a; + } + } + return pick_nan(a, b, s); + } else { + int a_exp = 0, b_exp = 0; + + switch (a.cls) { + case float_class_normal: + a_exp = a.exp; + break; + case float_class_inf: + a_exp = INT_MAX; + break; + case float_class_zero: + a_exp = INT_MIN; + break; + default: + g_assert_not_reached(); + break; + } + switch (b.cls) { + case float_class_normal: + b_exp = b.exp; + break; + case float_class_inf: + b_exp = INT_MAX; + break; + case float_class_zero: + b_exp = INT_MIN; + break; + default: + g_assert_not_reached(); + break; + } + + if (ismag && (a_exp != b_exp || a.frac != b.frac)) { + bool a_less = a_exp < b_exp; + if (a_exp == b_exp) { + a_less = a.frac < b.frac; + } + return a_less ^ ismin ? b : a; + } + + if (a.sign == b.sign) { + bool a_less = a_exp < b_exp; + if (a_exp == b_exp) { + a_less = a.frac < b.frac; + } + return a.sign ^ a_less ^ ismin ? b : a; + } else { + return a.sign ^ ismin ? b : a; + } + } +} + +#define MINMAX(sz, name, ismin, isiee, ismag) \ +float ## sz float ## sz ## _ ## name(float ## sz a, float ## sz b, \ + float_status *s) \ +{ \ + FloatParts pa = float ## sz ## _unpack_canonical(a, s); \ + FloatParts pb = float ## sz ## _unpack_canonical(b, s); \ + FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag, s); \ + \ + return float ## sz ## _round_pack_canonical(pr, s); \ +} + +MINMAX(16, min, true, false, false) +MINMAX(16, minnum, true, true, false) +MINMAX(16, minnummag, true, true, true) +MINMAX(16, max, false, false, false) +MINMAX(16, maxnum, false, true, false) +MINMAX(16, maxnummag, false, true, true) + +MINMAX(32, min, true, false, false) +MINMAX(32, minnum, true, true, false) +MINMAX(32, minnummag, true, true, true) +MINMAX(32, max, false, false, false) +MINMAX(32, maxnum, false, true, false) +MINMAX(32, maxnummag, false, true, true) + +MINMAX(64, min, true, false, false) +MINMAX(64, minnum, true, true, false) +MINMAX(64, minnummag, true, true, true) +MINMAX(64, max, false, false, false) +MINMAX(64, maxnum, false, true, false) +MINMAX(64, maxnummag, false, true, true) + +#undef MINMAX + +/* Floating point compare */ +static int compare_floats(FloatParts a, FloatParts b, bool is_quiet, + float_status *s) +{ + if (is_nan(a.cls) || is_nan(b.cls)) { + if (!is_quiet || + a.cls == float_class_snan || + b.cls == float_class_snan) { + s->float_exception_flags |= float_flag_invalid; + } + return float_relation_unordered; + } + + if (a.cls == float_class_zero) { + if (b.cls == float_class_zero) { + return float_relation_equal; + } + return b.sign ? float_relation_greater : float_relation_less; + } else if (b.cls == float_class_zero) { + return a.sign ? float_relation_less : float_relation_greater; + } + + /* The only really important thing about infinity is its sign. If + * both are infinities the sign marks the smallest of the two. + */ + if (a.cls == float_class_inf) { + if ((b.cls == float_class_inf) && (a.sign == b.sign)) { + return float_relation_equal; + } + return a.sign ? float_relation_less : float_relation_greater; + } else if (b.cls == float_class_inf) { + return b.sign ? float_relation_greater : float_relation_less; + } + + if (a.sign != b.sign) { + return a.sign ? float_relation_less : float_relation_greater; + } + + if (a.exp == b.exp) { + if (a.frac == b.frac) { + return float_relation_equal; + } + if (a.sign) { + return a.frac > b.frac ? + float_relation_less : float_relation_greater; + } else { + return a.frac > b.frac ? + float_relation_greater : float_relation_less; + } + } else { + if (a.sign) { + return a.exp > b.exp ? float_relation_less : float_relation_greater; + } else { + return a.exp > b.exp ? float_relation_greater : float_relation_less; + } + } +} + +#define COMPARE(name, attr, sz) \ +static int attr \ +name(float ## sz a, float ## sz b, bool is_quiet, float_status *s) \ +{ \ + FloatParts pa = float ## sz ## _unpack_canonical(a, s); \ + FloatParts pb = float ## sz ## _unpack_canonical(b, s); \ + return compare_floats(pa, pb, is_quiet, s); \ +} + +COMPARE(soft_f16_compare, QEMU_FLATTEN, 16) +COMPARE(soft_f32_compare, QEMU_SOFTFLOAT_ATTR, 32) +COMPARE(soft_f64_compare, QEMU_SOFTFLOAT_ATTR, 64) + +#undef COMPARE + +int float16_compare(float16 a, float16 b, float_status *s) +{ + return soft_f16_compare(a, b, false, s); +} + +int float16_compare_quiet(float16 a, float16 b, float_status *s) +{ + return soft_f16_compare(a, b, true, s); +} + +static int QEMU_FLATTEN +f32_compare(float32 xa, float32 xb, bool is_quiet, float_status *s) +{ + union_float32 ua, ub; + + ua.s = xa; + ub.s = xb; + + if (QEMU_NO_HARDFLOAT) { + goto soft; + } + + float32_input_flush2(&ua.s, &ub.s, s); + if (isgreaterequal(ua.h, ub.h)) { + if (isgreater(ua.h, ub.h)) { + return float_relation_greater; + } + return float_relation_equal; + } + if (likely(isless(ua.h, ub.h))) { + return float_relation_less; + } + /* The only condition remaining is unordered. + * Fall through to set flags. + */ + soft: + return soft_f32_compare(ua.s, ub.s, is_quiet, s); +} + +int float32_compare(float32 a, float32 b, float_status *s) +{ + return f32_compare(a, b, false, s); +} + +int float32_compare_quiet(float32 a, float32 b, float_status *s) +{ + return f32_compare(a, b, true, s); +} + +static int QEMU_FLATTEN +f64_compare(float64 xa, float64 xb, bool is_quiet, float_status *s) +{ + union_float64 ua, ub; + + ua.s = xa; + ub.s = xb; + + if (QEMU_NO_HARDFLOAT) { + goto soft; + } + + float64_input_flush2(&ua.s, &ub.s, s); + if (isgreaterequal(ua.h, ub.h)) { + if (isgreater(ua.h, ub.h)) { + return float_relation_greater; + } + return float_relation_equal; + } + if (likely(isless(ua.h, ub.h))) { + return float_relation_less; + } + /* The only condition remaining is unordered. + * Fall through to set flags. + */ + soft: + return soft_f64_compare(ua.s, ub.s, is_quiet, s); +} + +int float64_compare(float64 a, float64 b, float_status *s) +{ + return f64_compare(a, b, false, s); +} + +int float64_compare_quiet(float64 a, float64 b, float_status *s) +{ + return f64_compare(a, b, true, s); +} + +/* Multiply A by 2 raised to the power N. */ +static FloatParts scalbn_decomposed(FloatParts a, int n, float_status *s) +{ + if (unlikely(is_nan(a.cls))) { + return return_nan(a, s); + } + if (a.cls == float_class_normal) { + /* The largest float type (even though not supported by FloatParts) + * is float128, which has a 15 bit exponent. Bounding N to 16 bits + * still allows rounding to infinity, without allowing overflow + * within the int32_t that backs FloatParts.exp. + */ + n = MIN(MAX(n, -0x10000), 0x10000); + a.exp += n; + } + return a; +} + +float16 float16_scalbn(float16 a, int n, float_status *status) +{ + FloatParts pa = float16_unpack_canonical(a, status); + FloatParts pr = scalbn_decomposed(pa, n, status); + return float16_round_pack_canonical(pr, status); +} + +float32 float32_scalbn(float32 a, int n, float_status *status) +{ + FloatParts pa = float32_unpack_canonical(a, status); + FloatParts pr = scalbn_decomposed(pa, n, status); + return float32_round_pack_canonical(pr, status); +} + +float64 float64_scalbn(float64 a, int n, float_status *status) +{ + FloatParts pa = float64_unpack_canonical(a, status); + FloatParts pr = scalbn_decomposed(pa, n, status); + return float64_round_pack_canonical(pr, status); +} + +/* + * Square Root + * + * The old softfloat code did an approximation step before zeroing in + * on the final result. However for simpleness we just compute the + * square root by iterating down from the implicit bit to enough extra + * bits to ensure we get a correctly rounded result. + * + * This does mean however the calculation is slower than before, + * especially for 64 bit floats. + */ + +static FloatParts sqrt_float(FloatParts a, float_status *s, const FloatFmt *p) +{ + uint64_t a_frac, r_frac, s_frac; + int bit, last_bit; + + if (is_nan(a.cls)) { + return return_nan(a, s); + } + if (a.cls == float_class_zero) { + return a; /* sqrt(+-0) = +-0 */ + } + if (a.sign) { + s->float_exception_flags |= float_flag_invalid; + return parts_default_nan(s); + } + if (a.cls == float_class_inf) { + return a; /* sqrt(+inf) = +inf */ + } + + assert(a.cls == float_class_normal); + + /* We need two overflow bits at the top. Adding room for that is a + * right shift. If the exponent is odd, we can discard the low bit + * by multiplying the fraction by 2; that's a left shift. Combine + * those and we shift right if the exponent is even. + */ + a_frac = a.frac; + if (!(a.exp & 1)) { + a_frac >>= 1; + } + a.exp >>= 1; + + /* Bit-by-bit computation of sqrt. */ + r_frac = 0; + s_frac = 0; + + /* Iterate from implicit bit down to the 3 extra bits to compute a + * properly rounded result. Remember we've inserted one more bit + * at the top, so these positions are one less. + */ + bit = DECOMPOSED_BINARY_POINT - 1; + last_bit = MAX(p->frac_shift - 4, 0); + do { + uint64_t q = 1ULL << bit; + uint64_t t_frac = s_frac + q; + if (t_frac <= a_frac) { + s_frac = t_frac + q; + a_frac -= t_frac; + r_frac += q; + } + a_frac <<= 1; + } while (--bit >= last_bit); + + /* Undo the right shift done above. If there is any remaining + * fraction, the result is inexact. Set the sticky bit. + */ + a.frac = (r_frac << 1) + (a_frac != 0); + + return a; +} + +float16 QEMU_FLATTEN float16_sqrt(float16 a, float_status *status) +{ + FloatParts pa = float16_unpack_canonical(a, status); + FloatParts pr = sqrt_float(pa, status, &float16_params); + return float16_round_pack_canonical(pr, status); +} + +static float32 QEMU_SOFTFLOAT_ATTR +soft_f32_sqrt(float32 a, float_status *status) +{ + FloatParts pa = float32_unpack_canonical(a, status); + FloatParts pr = sqrt_float(pa, status, &float32_params); + return float32_round_pack_canonical(pr, status); +} + +static float64 QEMU_SOFTFLOAT_ATTR +soft_f64_sqrt(float64 a, float_status *status) +{ + FloatParts pa = float64_unpack_canonical(a, status); + FloatParts pr = sqrt_float(pa, status, &float64_params); + return float64_round_pack_canonical(pr, status); +} + +float32 QEMU_FLATTEN float32_sqrt(float32 xa, float_status *s) +{ + union_float32 ua, ur; + + ua.s = xa; + if (unlikely(!can_use_fpu(s))) { + goto soft; + } + + float32_input_flush1(&ua.s, s); + if (QEMU_HARDFLOAT_1F32_USE_FP) { + if (unlikely(!(fpclassify(ua.h) == FP_NORMAL || + fpclassify(ua.h) == FP_ZERO) || + signbit(ua.h))) { + goto soft; + } + } else if (unlikely(!float32_is_zero_or_normal(ua.s) || + float32_is_neg(ua.s))) { + goto soft; + } + ur.h = sqrtf(ua.h); + return ur.s; + + soft: + return soft_f32_sqrt(ua.s, s); +} + +float64 QEMU_FLATTEN float64_sqrt(float64 xa, float_status *s) +{ + union_float64 ua, ur; + + ua.s = xa; + if (unlikely(!can_use_fpu(s))) { + goto soft; + } + + float64_input_flush1(&ua.s, s); + if (QEMU_HARDFLOAT_1F64_USE_FP) { + if (unlikely(!(fpclassify(ua.h) == FP_NORMAL || + fpclassify(ua.h) == FP_ZERO) || + signbit(ua.h))) { + goto soft; + } + } else if (unlikely(!float64_is_zero_or_normal(ua.s) || + float64_is_neg(ua.s))) { + goto soft; + } + ur.h = sqrt(ua.h); + return ur.s; + + soft: + return soft_f64_sqrt(ua.s, s); } /*---------------------------------------------------------------------------- -| Returns the exponent bits of the half-precision floating-point value `a'. +| The pattern for a default generated NaN. *----------------------------------------------------------------------------*/ -static inline int_fast16_t extractFloat16Exp(float16 a) +float16 float16_default_nan(float_status *status) { - return (float16_val(a) >> 10) & 0x1f; + FloatParts p = parts_default_nan(status); + p.frac >>= float16_params.frac_shift; + return float16_pack_raw(p); +} + +float32 float32_default_nan(float_status *status) +{ + FloatParts p = parts_default_nan(status); + p.frac >>= float32_params.frac_shift; + return float32_pack_raw(p); +} + +float64 float64_default_nan(float_status *status) +{ + FloatParts p = parts_default_nan(status); + p.frac >>= float64_params.frac_shift; + return float64_pack_raw(p); +} + +float128 float128_default_nan(float_status *status) +{ + FloatParts p = parts_default_nan(status); + float128 r; + + /* Extrapolate from the choices made by parts_default_nan to fill + * in the quad-floating format. If the low bit is set, assume we + * want to set all non-snan bits. + */ +#ifdef _MSC_VER + r.low = 0ULL - (p.frac & 1); +#else + r.low = -(p.frac & 1); +#endif + r.high = p.frac >> (DECOMPOSED_BINARY_POINT - 48); + r.high |= UINT64_C(0x7FFF000000000000); + r.high |= (uint64_t)p.sign << 63; + + return r; } /*---------------------------------------------------------------------------- -| Returns the sign bit of the single-precision floating-point value `a'. +| Returns a quiet NaN from a signalling NaN for the floating point value `a'. *----------------------------------------------------------------------------*/ -static inline flag extractFloat16Sign(float16 a) +float16 float16_silence_nan(float16 a, float_status *status) { - return float16_val(a)>>15; + FloatParts p = float16_unpack_raw(a); + p.frac <<= float16_params.frac_shift; + p = parts_silence_nan(p, status); + p.frac >>= float16_params.frac_shift; + return float16_pack_raw(p); +} + +float32 float32_silence_nan(float32 a, float_status *status) +{ + FloatParts p = float32_unpack_raw(a); + p.frac <<= float32_params.frac_shift; + p = parts_silence_nan(p, status); + p.frac >>= float32_params.frac_shift; + return float32_pack_raw(p); +} + +float64 float64_silence_nan(float64 a, float_status *status) +{ + FloatParts p = float64_unpack_raw(a); + p.frac <<= float64_params.frac_shift; + p = parts_silence_nan(p, status); + p.frac >>= float64_params.frac_shift; + return float64_pack_raw(p); +} + + +/*---------------------------------------------------------------------------- +| If `a' is denormal and we are in flush-to-zero mode then set the +| input-denormal exception and return zero. Otherwise just return the value. +*----------------------------------------------------------------------------*/ + +static bool parts_squash_denormal(FloatParts p, float_status *status) +{ + if (p.exp == 0 && p.frac != 0) { + float_raise(float_flag_input_denormal, status); + return true; + } + + return false; +} + +float16 float16_squash_input_denormal(float16 a, float_status *status) +{ + if (status->flush_inputs_to_zero) { + FloatParts p = float16_unpack_raw(a); + if (parts_squash_denormal(p, status)) { + return float16_set_sign(float16_zero, p.sign); + } + } + return a; +} + +float32 float32_squash_input_denormal(float32 a, float_status *status) +{ + if (status->flush_inputs_to_zero) { + FloatParts p = float32_unpack_raw(a); + if (parts_squash_denormal(p, status)) { + return float32_set_sign(float32_zero, p.sign); + } + } + return a; +} + +float64 float64_squash_input_denormal(float64 a, float_status *status) +{ + if (status->flush_inputs_to_zero) { + FloatParts p = float64_unpack_raw(a); + if (parts_squash_denormal(p, status)) { + return float64_set_sign(float64_zero, p.sign); + } + } + return a; } /*---------------------------------------------------------------------------- @@ -100,14 +3420,14 @@ static inline flag extractFloat16Sign(float16 a) | positive or negative integer is returned. *----------------------------------------------------------------------------*/ -static int32 roundAndPackInt32( flag zSign, uint64_t absZ STATUS_PARAM) +static int32_t roundAndPackInt32(flag zSign, uint64_t absZ, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven; - int8 roundIncrement, roundBits; + int8_t roundIncrement, roundBits; int32_t z; - roundingMode = STATUS(float_rounding_mode); + roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: @@ -123,21 +3443,24 @@ static int32 roundAndPackInt32( flag zSign, uint64_t absZ STATUS_PARAM) case float_round_down: roundIncrement = zSign ? 0x7f : 0; break; - default: - roundIncrement = 0; - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + roundIncrement = absZ & 0x80 ? 0 : 0x7f; break; + default: + abort(); } roundBits = absZ & 0x7F; absZ = ( absZ + roundIncrement )>>7; absZ &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); - z = (int32_t)absZ; - if ( zSign && (z != 0x80000000)) z = - z; + z = absZ; + if ( zSign ) z = - z; if ( ( absZ>>32 ) || ( z && ( ( z < 0 ) ^ zSign ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); - return zSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; + float_raise(float_flag_invalid, status); + return zSign ? INT32_MIN : INT32_MAX; + } + if (roundBits) { + status->float_exception_flags |= float_flag_inexact; } - if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; return z; } @@ -154,13 +3477,14 @@ static int32 roundAndPackInt32( flag zSign, uint64_t absZ STATUS_PARAM) | returned. *----------------------------------------------------------------------------*/ -static int64 roundAndPackInt64( flag zSign, uint64_t absZ0, uint64_t absZ1 STATUS_PARAM) +static int64_t roundAndPackInt64(flag zSign, uint64_t absZ0, uint64_t absZ1, + float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven, increment; int64_t z; - roundingMode = STATUS(float_rounding_mode); + roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: @@ -176,10 +3500,11 @@ static int64 roundAndPackInt64( flag zSign, uint64_t absZ0, uint64_t absZ1 STATU case float_round_down: increment = zSign && absZ1; break; - default: - increment = 0; - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + increment = !(absZ0 & 1) && absZ1; break; + default: + abort(); } if ( increment ) { ++absZ0; @@ -187,15 +3512,15 @@ static int64 roundAndPackInt64( flag zSign, uint64_t absZ0, uint64_t absZ1 STATU absZ0 &= ~ ( ( (uint64_t) ( absZ1<<1 ) == 0 ) & roundNearestEven ); } z = absZ0; - if ( zSign && z != 0x8000000000000000ULL ) z = - z; + if ( zSign ) z = - z; if ( z && ( ( z < 0 ) ^ zSign ) ) { overflow: - float_raise( float_flag_invalid STATUS_VAR); - return - zSign ? (int64_t) LIT64( 0x8000000000000000 ) - : LIT64( 0x7FFFFFFFFFFFFFFF ); + float_raise(float_flag_invalid, status); + return zSign ? INT64_MIN : INT64_MAX; + } + if (absZ1) { + status->float_exception_flags |= float_flag_inexact; } - if ( absZ1 ) STATUS(float_exception_flags) |= float_flag_inexact; return z; } @@ -210,13 +3535,13 @@ static int64 roundAndPackInt64( flag zSign, uint64_t absZ0, uint64_t absZ1 STATU | exception is raised and the largest unsigned integer is returned. *----------------------------------------------------------------------------*/ -static int64 roundAndPackUint64(flag zSign, uint64_t absZ0, - uint64_t absZ1 STATUS_PARAM) +static int64_t roundAndPackUint64(flag zSign, uint64_t absZ0, + uint64_t absZ1, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven, increment; - roundingMode = STATUS(float_rounding_mode); + roundingMode = status->float_rounding_mode; roundNearestEven = (roundingMode == float_round_nearest_even); switch (roundingMode) { case float_round_nearest_even: @@ -232,79 +3557,32 @@ static int64 roundAndPackUint64(flag zSign, uint64_t absZ0, case float_round_down: increment = zSign && absZ1; break; - default: - increment = 0; - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + increment = !(absZ0 & 1) && absZ1; break; + default: + abort(); } if (increment) { ++absZ0; if (absZ0 == 0) { - float_raise(float_flag_invalid STATUS_VAR); - return LIT64(0xFFFFFFFFFFFFFFFF); + float_raise(float_flag_invalid, status); + return UINT64_MAX; } absZ0 &= ~(((uint64_t)(absZ1<<1) == 0) & roundNearestEven); } if (zSign && absZ0) { - float_raise(float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } if (absZ1) { - STATUS(float_exception_flags) |= float_flag_inexact; + status->float_exception_flags |= float_flag_inexact; } return absZ0; } -/*---------------------------------------------------------------------------- -| Returns the fraction bits of the single-precision floating-point value `a'. -*----------------------------------------------------------------------------*/ - -static inline uint32_t extractFloat32Frac( float32 a ) -{ - - return float32_val(a) & 0x007FFFFF; - -} - -/*---------------------------------------------------------------------------- -| Returns the exponent bits of the single-precision floating-point value `a'. -*----------------------------------------------------------------------------*/ - -static inline int_fast16_t extractFloat32Exp(float32 a) -{ - - return ( float32_val(a)>>23 ) & 0xFF; - -} - -/*---------------------------------------------------------------------------- -| Returns the sign bit of the single-precision floating-point value `a'. -*----------------------------------------------------------------------------*/ - -static inline flag extractFloat32Sign( float32 a ) -{ - - return float32_val(a)>>31; - -} - -/*---------------------------------------------------------------------------- -| If `a' is denormal and we are in flush-to-zero mode then set the -| input-denormal exception and return zero. Otherwise just return the value. -*----------------------------------------------------------------------------*/ -float32 float32_squash_input_denormal(float32 a STATUS_PARAM) -{ - if (STATUS(flush_inputs_to_zero)) { - if (extractFloat32Exp(a) == 0 && extractFloat32Frac(a) != 0) { - float_raise(float_flag_input_denormal STATUS_VAR); - return make_float32(float32_val(a) & 0x80000000); - } - } - return a; -} - /*---------------------------------------------------------------------------- | Normalizes the subnormal single-precision floating-point value represented | by the denormalized significand `aSig'. The normalized exponent and @@ -313,35 +3591,16 @@ float32 float32_squash_input_denormal(float32 a STATUS_PARAM) *----------------------------------------------------------------------------*/ static void - normalizeFloat32Subnormal(uint32_t aSig, int_fast16_t *zExpPtr, uint32_t *zSigPtr) + normalizeFloat32Subnormal(uint32_t aSig, int *zExpPtr, uint32_t *zSigPtr) { - int8 shiftCount; + int8_t shiftCount; - shiftCount = countLeadingZeros32( aSig ) - 8; + shiftCount = clz32(aSig) - 8; *zSigPtr = aSig<float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: @@ -387,8 +3647,11 @@ static float32 roundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig case float_round_down: roundIncrement = zSign ? 0x7f : 0; break; + case float_round_to_odd: + roundIncrement = zSig & 0x80 ? 0 : 0x7f; + break; default: - float_raise(float_flag_invalid STATUS_VAR); + abort(); break; } roundBits = zSig & 0x7F; @@ -397,25 +3660,39 @@ static float32 roundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig || ( ( zExp == 0xFD ) && ( (int32_t) ( zSig + roundIncrement ) < 0 ) ) ) { - float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); - return packFloat32( zSign, 0xFF, - ( roundIncrement == 0 )); + bool overflow_to_inf = roundingMode != float_round_to_odd && + roundIncrement != 0; + float_raise(float_flag_overflow | float_flag_inexact, status); + return packFloat32(zSign, 0xFF, -!overflow_to_inf); } if ( zExp < 0 ) { - if (STATUS(flush_to_zero)) { - float_raise(float_flag_output_denormal STATUS_VAR); + if (status->flush_to_zero) { + float_raise(float_flag_output_denormal, status); return packFloat32(zSign, 0, 0); } isTiny = - ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + (status->float_detect_tininess + == float_tininess_before_rounding) || ( zExp < -1 ) || ( zSig + roundIncrement < 0x80000000 ); shift32RightJamming( zSig, - zExp, &zSig ); zExp = 0; roundBits = zSig & 0x7F; - if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); + if (isTiny && roundBits) { + float_raise(float_flag_underflow, status); + } + if (roundingMode == float_round_to_odd) { + /* + * For round-to-odd case, the roundIncrement depends on + * zSig which just changed. + */ + roundIncrement = zSig & 0x80 ? 0 : 0x7f; + } } } - if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + if (roundBits) { + status->float_exception_flags |= float_flag_inexact; + } zSig = ( zSig + roundIncrement )>>7; zSig &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); if ( zSig == 0 ) zExp = 0; @@ -433,63 +3710,17 @@ static float32 roundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig *----------------------------------------------------------------------------*/ static float32 - normalizeRoundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig STATUS_PARAM) + normalizeRoundAndPackFloat32(flag zSign, int zExp, uint32_t zSig, + float_status *status) { - int8 shiftCount; + int8_t shiftCount; - shiftCount = countLeadingZeros32( zSig ) - 1; - return roundAndPackFloat32( zSign, zExp - shiftCount, zSig<>52 ) & 0x7FF; - -} - -/*---------------------------------------------------------------------------- -| Returns the sign bit of the double-precision floating-point value `a'. -*----------------------------------------------------------------------------*/ - -static inline flag extractFloat64Sign( float64 a ) -{ - - return float64_val(a)>>63; - -} - -/*---------------------------------------------------------------------------- -| If `a' is denormal and we are in flush-to-zero mode then set the -| input-denormal exception and return zero. Otherwise just return the value. -*----------------------------------------------------------------------------*/ -float64 float64_squash_input_denormal(float64 a STATUS_PARAM) -{ - if (STATUS(flush_inputs_to_zero)) { - if (extractFloat64Exp(a) == 0 && extractFloat64Frac(a) != 0) { - float_raise(float_flag_input_denormal STATUS_VAR); - return make_float64(float64_val(a) & (1ULL << 63)); - } - } - return a; -} - /*---------------------------------------------------------------------------- | Normalizes the subnormal double-precision floating-point value represented | by the denormalized significand `aSig'. The normalized exponent and @@ -498,11 +3729,11 @@ float64 float64_squash_input_denormal(float64 a STATUS_PARAM) *----------------------------------------------------------------------------*/ static void - normalizeFloat64Subnormal(uint64_t aSig, int_fast16_t *zExpPtr, uint64_t *zSigPtr) + normalizeFloat64Subnormal(uint64_t aSig, int *zExpPtr, uint64_t *zSigPtr) { - int8 shiftCount; + int8_t shiftCount; - shiftCount = countLeadingZeros64( aSig ) - 11; + shiftCount = clz64(aSig) - 11; *zSigPtr = aSig<float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: @@ -572,9 +3804,11 @@ static float64 roundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig case float_round_down: roundIncrement = zSign ? 0x3ff : 0; break; - default: - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + roundIncrement = (zSig & 0x400) ? 0 : 0x3ff; break; + default: + abort(); } roundBits = zSig & 0x3FF; if ( 0x7FD <= (uint16_t) zExp ) { @@ -582,25 +3816,39 @@ static float64 roundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig || ( ( zExp == 0x7FD ) && ( (int64_t) ( zSig + roundIncrement ) < 0 ) ) ) { - float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); - return packFloat64( zSign, 0x7FF, - ( roundIncrement == 0 )); + bool overflow_to_inf = roundingMode != float_round_to_odd && + roundIncrement != 0; + float_raise(float_flag_overflow | float_flag_inexact, status); + return packFloat64(zSign, 0x7FF, -(!overflow_to_inf)); } if ( zExp < 0 ) { - if (STATUS(flush_to_zero)) { - float_raise(float_flag_output_denormal STATUS_VAR); + if (status->flush_to_zero) { + float_raise(float_flag_output_denormal, status); return packFloat64(zSign, 0, 0); } isTiny = - ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + (status->float_detect_tininess + == float_tininess_before_rounding) || ( zExp < -1 ) - || ( zSig + roundIncrement < LIT64( 0x8000000000000000 ) ); + || ( zSig + roundIncrement < UINT64_C(0x8000000000000000) ); shift64RightJamming( zSig, - zExp, &zSig ); zExp = 0; roundBits = zSig & 0x3FF; - if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); + if (isTiny && roundBits) { + float_raise(float_flag_underflow, status); + } + if (roundingMode == float_round_to_odd) { + /* + * For round-to-odd case, the roundIncrement depends on + * zSig which just changed. + */ + roundIncrement = (zSig & 0x400) ? 0 : 0x3ff; + } } } - if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + if (roundBits) { + status->float_exception_flags |= float_flag_inexact; + } zSig = ( zSig + roundIncrement )>>10; zSig &= ~ ( ( ( roundBits ^ 0x200 ) == 0 ) & roundNearestEven ); if ( zSig == 0 ) zExp = 0; @@ -618,48 +3866,14 @@ static float64 roundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig *----------------------------------------------------------------------------*/ static float64 - normalizeRoundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig STATUS_PARAM) + normalizeRoundAndPackFloat64(flag zSign, int zExp, uint64_t zSig, + float_status *status) { - int8 shiftCount; + int8_t shiftCount; - shiftCount = countLeadingZeros64( zSig ) - 1; - return roundAndPackFloat64( zSign, zExp - shiftCount, zSig<>15; + shiftCount = clz64(zSig) - 1; + return roundAndPackFloat64(zSign, zExp - shiftCount, zSig<float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); if ( roundingPrecision == 80 ) goto precision80; if ( roundingPrecision == 64 ) { - roundIncrement = LIT64( 0x0000000000000400 ); - roundMask = LIT64( 0x00000000000007FF ); + roundIncrement = UINT64_C(0x0000000000000400); + roundMask = UINT64_C(0x00000000000007FF); } else if ( roundingPrecision == 32 ) { - roundIncrement = LIT64( 0x0000008000000000 ); - roundMask = LIT64( 0x000000FFFFFFFFFF ); + roundIncrement = UINT64_C(0x0000008000000000); + roundMask = UINT64_C(0x000000FFFFFFFFFF); } else { goto precision80; @@ -758,8 +3955,7 @@ static floatx80 roundIncrement = zSign ? roundMask : 0; break; default: - float_raise(float_flag_invalid STATUS_VAR); - break; + abort(); } roundBits = zSig0 & roundMask; if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { @@ -769,19 +3965,24 @@ static floatx80 goto overflow; } if ( zExp <= 0 ) { - if (STATUS(flush_to_zero)) { - float_raise(float_flag_output_denormal STATUS_VAR); + if (status->flush_to_zero) { + float_raise(float_flag_output_denormal, status); return packFloatx80(zSign, 0, 0); } isTiny = - ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + (status->float_detect_tininess + == float_tininess_before_rounding) || ( zExp < 0 ) || ( zSig0 <= zSig0 + roundIncrement ); shift64RightJamming( zSig0, 1 - zExp, &zSig0 ); zExp = 0; roundBits = zSig0 & roundMask; - if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); - if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + if (isTiny && roundBits) { + float_raise(float_flag_underflow, status); + } + if (roundBits) { + status->float_exception_flags |= float_flag_inexact; + } zSig0 += roundIncrement; if ( (int64_t) zSig0 < 0 ) zExp = 1; roundIncrement = roundMask + 1; @@ -792,11 +3993,13 @@ static floatx80 return packFloatx80( zSign, zExp, zSig0 ); } } - if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + if (roundBits) { + status->float_exception_flags |= float_flag_inexact; + } zSig0 += roundIncrement; - if ( zSig0 < (uint64_t)roundIncrement ) { + if ( zSig0 < roundIncrement ) { ++zExp; - zSig0 = LIT64( 0x8000000000000000 ); + zSig0 = UINT64_C(0x8000000000000000); } roundIncrement = roundMask + 1; if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { @@ -821,37 +4024,43 @@ static floatx80 increment = zSign && zSig1; break; default: - float_raise(float_flag_invalid STATUS_VAR); - break; + abort(); } if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { if ( ( 0x7FFE < zExp ) || ( ( zExp == 0x7FFE ) - && ( zSig0 == LIT64( 0xFFFFFFFFFFFFFFFF ) ) + && ( zSig0 == UINT64_C(0xFFFFFFFFFFFFFFFF) ) && increment ) ) { roundMask = 0; overflow: - float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); + float_raise(float_flag_overflow | float_flag_inexact, status); if ( ( roundingMode == float_round_to_zero ) || ( zSign && ( roundingMode == float_round_up ) ) || ( ! zSign && ( roundingMode == float_round_down ) ) ) { return packFloatx80( zSign, 0x7FFE, ~ roundMask ); } - return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + return packFloatx80(zSign, + floatx80_infinity_high, + floatx80_infinity_low); } if ( zExp <= 0 ) { isTiny = - ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + (status->float_detect_tininess + == float_tininess_before_rounding) || ( zExp < 0 ) || ! increment - || ( zSig0 < LIT64( 0xFFFFFFFFFFFFFFFF ) ); + || ( zSig0 < UINT64_C(0xFFFFFFFFFFFFFFFF) ); shift64ExtraRightJamming( zSig0, zSig1, 1 - zExp, &zSig0, &zSig1 ); zExp = 0; - if ( isTiny && zSig1 ) float_raise( float_flag_underflow STATUS_VAR); - if ( zSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; + if (isTiny && zSig1) { + float_raise(float_flag_underflow, status); + } + if (zSig1) { + status->float_exception_flags |= float_flag_inexact; + } switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: @@ -867,8 +4076,7 @@ static floatx80 increment = zSign && zSig1; break; default: - float_raise(float_flag_invalid STATUS_VAR); - break; + abort(); } if ( increment ) { ++zSig0; @@ -879,12 +4087,14 @@ static floatx80 return packFloatx80( zSign, zExp, zSig0 ); } } - if ( zSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; + if (zSig1) { + status->float_exception_flags |= float_flag_inexact; + } if ( increment ) { ++zSig0; if ( zSig0 == 0 ) { ++zExp; - zSig0 = LIT64( 0x8000000000000000 ); + zSig0 = UINT64_C(0x8000000000000000); } else { zSig0 &= ~ ( ( (uint64_t) ( zSig1<<1 ) == 0 ) & roundNearestEven ); @@ -906,23 +4116,23 @@ static floatx80 | normalized. *----------------------------------------------------------------------------*/ -static floatx80 - normalizeRoundAndPackFloatx80( - int8 roundingPrecision, flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 - STATUS_PARAM) +floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision, + flag zSign, int32_t zExp, + uint64_t zSig0, uint64_t zSig1, + float_status *status) { - int8 shiftCount; + int8_t shiftCount; if ( zSig0 == 0 ) { zSig0 = zSig1; zSig1 = 0; zExp -= 64; } - shiftCount = countLeadingZeros64( zSig0 ); + shiftCount = clz64(zSig0); shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); zExp -= shiftCount; - return - roundAndPackFloatx80( roundingPrecision, zSign, zExp, zSig0, zSig1 STATUS_VAR); + return roundAndPackFloatx80(roundingPrecision, zSign, zExp, + zSig0, zSig1, status); } @@ -946,7 +4156,7 @@ static inline uint64_t extractFloat128Frac1( float128 a ) static inline uint64_t extractFloat128Frac0( float128 a ) { - return a.high & LIT64( 0x0000FFFFFFFFFFFF ); + return a.high & UINT64_C(0x0000FFFFFFFFFFFF); } @@ -955,7 +4165,7 @@ static inline uint64_t extractFloat128Frac0( float128 a ) | `a'. *----------------------------------------------------------------------------*/ -static inline int32 extractFloat128Exp( float128 a ) +static inline int32_t extractFloat128Exp( float128 a ) { return ( a.high>>48 ) & 0x7FFF; @@ -987,15 +4197,15 @@ static void normalizeFloat128Subnormal( uint64_t aSig0, uint64_t aSig1, - int32 *zExpPtr, + int32_t *zExpPtr, uint64_t *zSig0Ptr, uint64_t *zSig1Ptr ) { - int8 shiftCount; + int8_t shiftCount; if ( aSig0 == 0 ) { - shiftCount = countLeadingZeros64( aSig1 ) - 15; + shiftCount = clz64(aSig1) - 15; if ( shiftCount < 0 ) { *zSig0Ptr = aSig1>>( - shiftCount ); *zSig1Ptr = aSig1<<( shiftCount & 63 ); @@ -1007,7 +4217,7 @@ static void *zExpPtr = - shiftCount - 63; } else { - shiftCount = countLeadingZeros64( aSig0 ) - 15; + shiftCount = clz64(aSig0) - 15; shortShift128Left( aSig0, aSig1, shiftCount, zSig0Ptr, zSig1Ptr ); *zExpPtr = 1 - shiftCount; } @@ -1028,7 +4238,7 @@ static void *----------------------------------------------------------------------------*/ static inline float128 - packFloat128( flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 ) + packFloat128( flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1 ) { float128 z; @@ -1059,14 +4269,14 @@ static inline float128 | overflow follows the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -static float128 - roundAndPackFloat128( - flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1, uint64_t zSig2 STATUS_PARAM) +static float128 roundAndPackFloat128(flag zSign, int32_t zExp, + uint64_t zSig0, uint64_t zSig1, + uint64_t zSig2, float_status *status) { - int8 roundingMode; - flag roundNearestEven, increment = 0, isTiny; + int8_t roundingMode; + flag roundNearestEven, increment, isTiny; - roundingMode = STATUS(float_rounding_mode); + roundingMode = status->float_rounding_mode; roundNearestEven = ( roundingMode == float_round_nearest_even ); switch (roundingMode) { case float_round_nearest_even: @@ -1082,56 +4292,62 @@ static float128 case float_round_down: increment = zSign && zSig2; break; - default: - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + increment = !(zSig1 & 0x1) && zSig2; break; + default: + abort(); } if ( 0x7FFD <= (uint32_t) zExp ) { if ( ( 0x7FFD < zExp ) || ( ( zExp == 0x7FFD ) && eq128( - LIT64( 0x0001FFFFFFFFFFFF ), - LIT64( 0xFFFFFFFFFFFFFFFF ), + UINT64_C(0x0001FFFFFFFFFFFF), + UINT64_C(0xFFFFFFFFFFFFFFFF), zSig0, zSig1 ) && increment ) ) { - float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); + float_raise(float_flag_overflow | float_flag_inexact, status); if ( ( roundingMode == float_round_to_zero ) || ( zSign && ( roundingMode == float_round_up ) ) || ( ! zSign && ( roundingMode == float_round_down ) ) + || (roundingMode == float_round_to_odd) ) { return packFloat128( zSign, 0x7FFE, - LIT64( 0x0000FFFFFFFFFFFF ), - LIT64( 0xFFFFFFFFFFFFFFFF ) + UINT64_C(0x0000FFFFFFFFFFFF), + UINT64_C(0xFFFFFFFFFFFFFFFF) ); } return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( zExp < 0 ) { - if (STATUS(flush_to_zero)) { - float_raise(float_flag_output_denormal STATUS_VAR); + if (status->flush_to_zero) { + float_raise(float_flag_output_denormal, status); return packFloat128(zSign, 0, 0, 0); } isTiny = - ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + (status->float_detect_tininess + == float_tininess_before_rounding) || ( zExp < -1 ) || ! increment || lt128( zSig0, zSig1, - LIT64( 0x0001FFFFFFFFFFFF ), - LIT64( 0xFFFFFFFFFFFFFFFF ) + UINT64_C(0x0001FFFFFFFFFFFF), + UINT64_C(0xFFFFFFFFFFFFFFFF) ); shift128ExtraRightJamming( zSig0, zSig1, zSig2, - zExp, &zSig0, &zSig1, &zSig2 ); zExp = 0; - if ( isTiny && zSig2 ) float_raise( float_flag_underflow STATUS_VAR); + if (isTiny && zSig2) { + float_raise(float_flag_underflow, status); + } switch (roundingMode) { case float_round_nearest_even: case float_round_ties_away: @@ -1146,13 +4362,17 @@ static float128 case float_round_down: increment = zSign && zSig2; break; - default: - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + increment = !(zSig1 & 0x1) && zSig2; break; + default: + abort(); } } } - if ( zSig2 ) STATUS(float_exception_flags) |= float_flag_inexact; + if (zSig2) { + status->float_exception_flags |= float_flag_inexact; + } if ( increment ) { add128( zSig0, zSig1, 0, 1, &zSig0, &zSig1 ); zSig1 &= ~ ( ( zSig2 + zSig2 == 0 ) & roundNearestEven ); @@ -1174,11 +4394,11 @@ static float128 | point exponent. *----------------------------------------------------------------------------*/ -static float128 - normalizeRoundAndPackFloat128( - flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 STATUS_PARAM) +static float128 normalizeRoundAndPackFloat128(flag zSign, int32_t zExp, + uint64_t zSig0, uint64_t zSig1, + float_status *status) { - int8 shiftCount; + int8_t shiftCount; uint64_t zSig2; if ( zSig0 == 0 ) { @@ -1186,7 +4406,7 @@ static float128 zSig1 = 0; zExp -= 64; } - shiftCount = countLeadingZeros64( zSig0 ) - 15; + shiftCount = clz64(zSig0) - 15; if ( 0 <= shiftCount ) { zSig2 = 0; shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); @@ -1196,48 +4416,10 @@ static float128 zSig0, zSig1, 0, - shiftCount, &zSig0, &zSig1, &zSig2 ); } zExp -= shiftCount; - return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR); + return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } -/*---------------------------------------------------------------------------- -| Returns the result of converting the 32-bit two's complement integer `a' -| to the single-precision floating-point format. The conversion is performed -| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 int32_to_float32(int32_t a STATUS_PARAM) -{ - flag zSign; - - if ( a == 0 ) return float32_zero; - if ( a == (int32_t) 0x80000000 ) return packFloat32( 1, 0x9E, 0 ); - zSign = ( a < 0 ); - return normalizeRoundAndPackFloat32( zSign, 0x9C, zSign ? - a : a STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the 32-bit two's complement integer `a' -| to the double-precision floating-point format. The conversion is performed -| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 int32_to_float64(int32_t a STATUS_PARAM) -{ - flag zSign; - uint32 absA; - int8 shiftCount; - uint64_t zSig; - - if ( a == 0 ) return float64_zero; - zSign = ( a < 0 ); - absA = (zSign && (a != 0x80000000)) ? - a : a; - shiftCount = countLeadingZeros32( absA ) + 21; - zSig = absA; - return packFloat64( zSign, 0x432 - shiftCount, zSig<>( - shiftCount ); - if ( (uint32_t) ( aSig<<( shiftCount & 31 ) ) ) { - STATUS(float_exception_flags) |= float_flag_inexact; - } - if ( aSign ) z = - z; - return z; - -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the single-precision floating-point value -| `a' to the 16-bit two's complement integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic, except that the conversion is always rounded toward zero. -| If `a' is a NaN, the largest positive integer is returned. Otherwise, if -| the conversion overflows, the largest integer with the same sign as `a' is -| returned. -*----------------------------------------------------------------------------*/ - -int_fast16_t float32_to_int16_round_to_zero(float32 a STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp, shiftCount; - uint32_t aSig; - int32 z; - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - shiftCount = aExp - 0x8E; - if ( 0 <= shiftCount ) { - if ( float32_val(a) != 0xC7000000 ) { - float_raise( float_flag_invalid STATUS_VAR); - if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) { - return 0x7FFF; - } - } - return (int32_t) 0xffff8000; - } - else if ( aExp <= 0x7E ) { - if ( aExp | aSig ) { - STATUS(float_exception_flags) |= float_flag_inexact; - } - return 0; - } - shiftCount -= 0x10; - aSig = ( aSig | 0x00800000 )<<8; - z = aSig>>( - shiftCount ); - if ( (uint32_t) ( aSig<<( shiftCount & 31 ) ) ) { - STATUS(float_exception_flags) |= float_flag_inexact; - } - if ( aSign ) { - z = - z; - } - return z; - -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the single-precision floating-point value -| `a' to the 64-bit two's complement integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic---which means in particular that the conversion is rounded -| according to the current rounding mode. If `a' is a NaN, the largest -| positive integer is returned. Otherwise, if the conversion overflows, the -| largest integer with the same sign as `a' is returned. -*----------------------------------------------------------------------------*/ - -int64 float32_to_int64( float32 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp, shiftCount; - uint32_t aSig; - uint64_t aSig64, aSigExtra; - a = float32_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - shiftCount = 0xBE - aExp; - if ( shiftCount < 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) { - return LIT64( 0x7FFFFFFFFFFFFFFF ); - } - return (int64_t) LIT64( 0x8000000000000000 ); - } - if ( aExp ) aSig |= 0x00800000; - aSig64 = aSig; - aSig64 <<= 40; - shift64ExtraRightJamming( aSig64, 0, shiftCount, &aSig64, &aSigExtra ); - return roundAndPackInt64( aSign, aSig64, aSigExtra STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the single-precision floating-point value -| `a' to the 64-bit unsigned integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic---which means in particular that the conversion is rounded -| according to the current rounding mode. If `a' is a NaN, the largest -| unsigned integer is returned. Otherwise, if the conversion overflows, the -| largest unsigned integer is returned. If the 'a' is negative, the result -| is rounded and zero is returned; values that do not round to zero will -| raise the inexact exception flag. -*----------------------------------------------------------------------------*/ - -uint64 float32_to_uint64(float32 a STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp, shiftCount; - uint32_t aSig; - uint64_t aSig64, aSigExtra; - a = float32_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat32Frac(a); - aExp = extractFloat32Exp(a); - aSign = extractFloat32Sign(a); - if ((aSign) && (aExp > 126)) { - float_raise(float_flag_invalid STATUS_VAR); - if (float32_is_any_nan(a)) { - return LIT64(0xFFFFFFFFFFFFFFFF); - } else { - return 0; - } - } - shiftCount = 0xBE - aExp; - if (aExp) { - aSig |= 0x00800000; - } - if (shiftCount < 0) { - float_raise(float_flag_invalid STATUS_VAR); - return LIT64(0xFFFFFFFFFFFFFFFF); - } - - aSig64 = aSig; - aSig64 <<= 40; - shift64ExtraRightJamming(aSig64, 0, shiftCount, &aSig64, &aSigExtra); - return roundAndPackUint64(aSign, aSig64, aSigExtra STATUS_VAR); -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the single-precision floating-point value -| `a' to the 64-bit unsigned integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic, except that the conversion is always rounded toward zero. If -| `a' is a NaN, the largest unsigned integer is returned. Otherwise, if the -| conversion overflows, the largest unsigned integer is returned. If the -| 'a' is negative, the result is rounded and zero is returned; values that do -| not round to zero will raise the inexact flag. -*----------------------------------------------------------------------------*/ - -uint64 float32_to_uint64_round_to_zero(float32 a STATUS_PARAM) -{ - int64_t v; - signed char current_rounding_mode = STATUS(float_rounding_mode); - set_float_rounding_mode(float_round_to_zero STATUS_VAR); - v = float32_to_uint64(a STATUS_VAR); - set_float_rounding_mode(current_rounding_mode STATUS_VAR); - return v; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the single-precision floating-point value -| `a' to the 64-bit two's complement integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic, except that the conversion is always rounded toward zero. If -| `a' is a NaN, the largest positive integer is returned. Otherwise, if the -| conversion overflows, the largest integer with the same sign as `a' is -| returned. -*----------------------------------------------------------------------------*/ - -int64 float32_to_int64_round_to_zero( float32 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp, shiftCount; - uint32_t aSig; - uint64_t aSig64; - int64 z; - a = float32_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - shiftCount = aExp - 0xBE; - if ( 0 <= shiftCount ) { - if ( float32_val(a) != 0xDF000000 ) { - float_raise( float_flag_invalid STATUS_VAR); - if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) { - return LIT64( 0x7FFFFFFFFFFFFFFF ); - } - } - return (int64_t) LIT64( 0x8000000000000000 ); - } - else if ( aExp <= 0x7E ) { - if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; - return 0; - } - aSig64 = aSig | 0x00800000; - aSig64 <<= 40; - z = aSig64>>( - shiftCount ); - if ( (uint64_t) ( aSig64<<( shiftCount & 63 ) ) ) { - STATUS(float_exception_flags) |= float_flag_inexact; - } - if ( aSign ) z = - z; - return z; - -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the single-precision floating-point value -| `a' to the double-precision floating-point format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 float32_to_float64( float32 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp; - uint32_t aSig; - a = float32_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - if ( aExp == 0xFF ) { - if ( aSig ) return commonNaNToFloat64( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); - return packFloat64( aSign, 0x7FF, 0 ); - } - if ( aExp == 0 ) { - if ( aSig == 0 ) return packFloat64( aSign, 0, 0 ); - normalizeFloat32Subnormal( aSig, &aExp, &aSig ); - --aExp; - } - return packFloat64( aSign, aExp + 0x380, ( (uint64_t) aSig )<<29 ); - + return normalizeRoundAndPackFloat128(0, 0x406E, 0, a, status); } /*---------------------------------------------------------------------------- @@ -1744,19 +4541,23 @@ float64 float32_to_float64( float32 a STATUS_PARAM ) | Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 float32_to_floatx80( float32 a STATUS_PARAM ) +floatx80 float32_to_floatx80(float32 a, float_status *status) { flag aSign; - int_fast16_t aExp; + int aExp; uint32_t aSig; - a = float32_squash_input_denormal(a STATUS_VAR); + a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { - if ( aSig ) return commonNaNToFloatx80( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); - return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + if (aSig) { + return commonNaNToFloatx80(float32ToCommonNaN(a, status), status); + } + return packFloatx80(aSign, + floatx80_infinity_high, + floatx80_infinity_low); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); @@ -1774,18 +4575,20 @@ floatx80 float32_to_floatx80( float32 a STATUS_PARAM ) | Arithmetic. *----------------------------------------------------------------------------*/ -float128 float32_to_float128( float32 a STATUS_PARAM ) +float128 float32_to_float128(float32 a, float_status *status) { flag aSign; - int_fast16_t aExp; + int aExp; uint32_t aSig; - a = float32_squash_input_denormal(a STATUS_VAR); + a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF ) { - if ( aSig ) return commonNaNToFloat128( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + if (aSig) { + return commonNaNToFloat128(float32ToCommonNaN(a, status), status); + } return packFloat128( aSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { @@ -1797,430 +4600,23 @@ float128 float32_to_float128( float32 a STATUS_PARAM ) } -/*---------------------------------------------------------------------------- -| Rounds the single-precision floating-point value `a' to an integer, and -| returns the result as a single-precision floating-point value. The -| operation is performed according to the IEC/IEEE Standard for Binary -| Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 float32_round_to_int( float32 a STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp; - uint32_t lastBitMask, roundBitsMask; - uint32_t z; - a = float32_squash_input_denormal(a STATUS_VAR); - - aExp = extractFloat32Exp( a ); - if ( 0x96 <= aExp ) { - if ( ( aExp == 0xFF ) && extractFloat32Frac( a ) ) { - return propagateFloat32NaN( a, a STATUS_VAR ); - } - return a; - } - if ( aExp <= 0x7E ) { - if ( (uint32_t) ( float32_val(a)<<1 ) == 0 ) return a; - STATUS(float_exception_flags) |= float_flag_inexact; - aSign = extractFloat32Sign( a ); - switch ( STATUS(float_rounding_mode) ) { - case float_round_nearest_even: - if ( ( aExp == 0x7E ) && extractFloat32Frac( a ) ) { - return packFloat32( aSign, 0x7F, 0 ); - } - break; - case float_round_ties_away: - if (aExp == 0x7E) { - return packFloat32(aSign, 0x7F, 0); - } - break; - case float_round_down: - return make_float32(aSign ? 0xBF800000 : 0); - case float_round_up: - return make_float32(aSign ? 0x80000000 : 0x3F800000); - } - return packFloat32( aSign, 0, 0 ); - } - lastBitMask = 1; - lastBitMask <<= 0x96 - aExp; - roundBitsMask = lastBitMask - 1; - z = float32_val(a); - switch (STATUS(float_rounding_mode)) { - case float_round_nearest_even: - z += lastBitMask>>1; - if ((z & roundBitsMask) == 0) { - z &= ~lastBitMask; - } - break; - case float_round_ties_away: - z += lastBitMask >> 1; - break; - case float_round_to_zero: - break; - case float_round_up: - if (!extractFloat32Sign(make_float32(z))) { - z += roundBitsMask; - } - break; - case float_round_down: - if (extractFloat32Sign(make_float32(z))) { - z += roundBitsMask; - } - break; - default: - float_raise(float_flag_invalid STATUS_VAR); - break; - } - z &= ~ roundBitsMask; - if ( z != float32_val(a) ) STATUS(float_exception_flags) |= float_flag_inexact; - return make_float32(z); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of adding the absolute values of the single-precision -| floating-point values `a' and `b'. If `zSign' is 1, the sum is negated -| before being returned. `zSign' is ignored if the result is a NaN. -| The addition is performed according to the IEC/IEEE Standard for Binary -| Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -static float32 addFloat32Sigs( float32 a, float32 b, flag zSign STATUS_PARAM) -{ - int_fast16_t aExp, bExp, zExp; - uint32_t aSig, bSig, zSig; - int_fast16_t expDiff; - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - bSig = extractFloat32Frac( b ); - bExp = extractFloat32Exp( b ); - expDiff = aExp - bExp; - aSig <<= 6; - bSig <<= 6; - if ( 0 < expDiff ) { - if ( aExp == 0xFF ) { - if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - return a; - } - if ( bExp == 0 ) { - --expDiff; - } - else { - bSig |= 0x20000000; - } - shift32RightJamming( bSig, expDiff, &bSig ); - zExp = aExp; - } - else if ( expDiff < 0 ) { - if ( bExp == 0xFF ) { - if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - return packFloat32( zSign, 0xFF, 0 ); - } - if ( aExp == 0 ) { - ++expDiff; - } - else { - aSig |= 0x20000000; - } - shift32RightJamming( aSig, - expDiff, &aSig ); - zExp = bExp; - } - else { - if ( aExp == 0xFF ) { - if ( aSig | bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - return a; - } - if ( aExp == 0 ) { - if (STATUS(flush_to_zero)) { - if (aSig | bSig) { - float_raise(float_flag_output_denormal STATUS_VAR); - } - return packFloat32(zSign, 0, 0); - } - return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); - } - zSig = 0x40000000 + aSig + bSig; - zExp = aExp; - goto roundAndPack; - } - aSig |= 0x20000000; - zSig = ( aSig + bSig )<<1; - --zExp; - if ( (int32_t) zSig < 0 ) { - zSig = aSig + bSig; - ++zExp; - } - roundAndPack: - return roundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of subtracting the absolute values of the single- -| precision floating-point values `a' and `b'. If `zSign' is 1, the -| difference is negated before being returned. `zSign' is ignored if the -| result is a NaN. The subtraction is performed according to the IEC/IEEE -| Standard for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -static float32 subFloat32Sigs( float32 a, float32 b, flag zSign STATUS_PARAM) -{ - int_fast16_t aExp, bExp, zExp; - uint32_t aSig, bSig, zSig; - int_fast16_t expDiff; - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - bSig = extractFloat32Frac( b ); - bExp = extractFloat32Exp( b ); - expDiff = aExp - bExp; - aSig <<= 7; - bSig <<= 7; - if ( 0 < expDiff ) goto aExpBigger; - if ( expDiff < 0 ) goto bExpBigger; - if ( aExp == 0xFF ) { - if ( aSig | bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - if ( aExp == 0 ) { - aExp = 1; - bExp = 1; - } - if ( bSig < aSig ) goto aBigger; - if ( aSig < bSig ) goto bBigger; - return packFloat32( STATUS(float_rounding_mode) == float_round_down, 0, 0 ); - bExpBigger: - if ( bExp == 0xFF ) { - if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - return packFloat32( zSign ^ 1, 0xFF, 0 ); - } - if ( aExp == 0 ) { - ++expDiff; - } - else { - aSig |= 0x40000000; - } - shift32RightJamming( aSig, - expDiff, &aSig ); - bSig |= 0x40000000; - bBigger: - zSig = bSig - aSig; - zExp = bExp; - zSign ^= 1; - goto normalizeRoundAndPack; - aExpBigger: - if ( aExp == 0xFF ) { - if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - return a; - } - if ( bExp == 0 ) { - --expDiff; - } - else { - bSig |= 0x40000000; - } - shift32RightJamming( bSig, expDiff, &bSig ); - aSig |= 0x40000000; - aBigger: - zSig = aSig - bSig; - zExp = aExp; - normalizeRoundAndPack: - --zExp; - return normalizeRoundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of adding the single-precision floating-point values `a' -| and `b'. The operation is performed according to the IEC/IEEE Standard for -| Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 float32_add( float32 a, float32 b STATUS_PARAM ) -{ - flag aSign, bSign; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); - - aSign = extractFloat32Sign( a ); - bSign = extractFloat32Sign( b ); - if ( aSign == bSign ) { - return addFloat32Sigs( a, b, aSign STATUS_VAR); - } - else { - return subFloat32Sigs( a, b, aSign STATUS_VAR ); - } - -} - -/*---------------------------------------------------------------------------- -| Returns the result of subtracting the single-precision floating-point values -| `a' and `b'. The operation is performed according to the IEC/IEEE Standard -| for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 float32_sub( float32 a, float32 b STATUS_PARAM ) -{ - flag aSign, bSign; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); - - aSign = extractFloat32Sign( a ); - bSign = extractFloat32Sign( b ); - if ( aSign == bSign ) { - return subFloat32Sigs( a, b, aSign STATUS_VAR ); - } - else { - return addFloat32Sigs( a, b, aSign STATUS_VAR ); - } - -} - -/*---------------------------------------------------------------------------- -| Returns the result of multiplying the single-precision floating-point values -| `a' and `b'. The operation is performed according to the IEC/IEEE Standard -| for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 float32_mul( float32 a, float32 b STATUS_PARAM ) -{ - flag aSign, bSign, zSign; - int_fast16_t aExp, bExp, zExp; - uint32_t aSig, bSig; - uint64_t zSig64; - uint32_t zSig; - - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - bSig = extractFloat32Frac( b ); - bExp = extractFloat32Exp( b ); - bSign = extractFloat32Sign( b ); - zSign = aSign ^ bSign; - if ( aExp == 0xFF ) { - if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { - return propagateFloat32NaN( a, b STATUS_VAR ); - } - if ( ( bExp | bSig ) == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - return packFloat32( zSign, 0xFF, 0 ); - } - if ( bExp == 0xFF ) { - if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - if ( ( aExp | aSig ) == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - return packFloat32( zSign, 0xFF, 0 ); - } - if ( aExp == 0 ) { - if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); - normalizeFloat32Subnormal( aSig, &aExp, &aSig ); - } - if ( bExp == 0 ) { - if ( bSig == 0 ) return packFloat32( zSign, 0, 0 ); - normalizeFloat32Subnormal( bSig, &bExp, &bSig ); - } - zExp = aExp + bExp - 0x7F; - aSig = ( aSig | 0x00800000 )<<7; - bSig = ( bSig | 0x00800000 )<<8; - shift64RightJamming( ( (uint64_t) aSig ) * bSig, 32, &zSig64 ); - zSig = (uint32_t)zSig64; - if ( 0 <= (int32_t) ( zSig<<1 ) ) { - zSig <<= 1; - --zExp; - } - return roundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of dividing the single-precision floating-point value `a' -| by the corresponding value `b'. The operation is performed according to the -| IEC/IEEE Standard for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 float32_div( float32 a, float32 b STATUS_PARAM ) -{ - flag aSign, bSign, zSign; - int_fast16_t aExp, bExp, zExp; - uint32_t aSig, bSig, zSig; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - bSig = extractFloat32Frac( b ); - bExp = extractFloat32Exp( b ); - bSign = extractFloat32Sign( b ); - zSign = aSign ^ bSign; - if ( aExp == 0xFF ) { - if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - if ( bExp == 0xFF ) { - if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - return packFloat32( zSign, 0xFF, 0 ); - } - if ( bExp == 0xFF ) { - if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); - return packFloat32( zSign, 0, 0 ); - } - if ( bExp == 0 ) { - if ( bSig == 0 ) { - if ( ( aExp | aSig ) == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - float_raise( float_flag_divbyzero STATUS_VAR); - return packFloat32( zSign, 0xFF, 0 ); - } - normalizeFloat32Subnormal( bSig, &bExp, &bSig ); - } - if ( aExp == 0 ) { - if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); - normalizeFloat32Subnormal( aSig, &aExp, &aSig ); - } - zExp = aExp - bExp + 0x7D; - aSig = ( aSig | 0x00800000 )<<7; - bSig = ( bSig | 0x00800000 )<<8; - if ( bSig <= ( aSig + aSig ) ) { - aSig >>= 1; - ++zExp; - } - zSig = ( ( (uint64_t) aSig )<<32 ) / bSig; - if ( ( zSig & 0x3F ) == 0 ) { - zSig |= ( (uint64_t) bSig * zSig != ( (uint64_t) aSig )<<32 ); - } - return roundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); - -} - /*---------------------------------------------------------------------------- | Returns the remainder of the single-precision floating-point value `a' | with respect to the corresponding value `b'. The operation is performed | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float32 float32_rem( float32 a, float32 b STATUS_PARAM ) +float32 float32_rem(float32 a, float32 b, float_status *status) { flag aSign, zSign; - int_fast16_t aExp, bExp, expDiff; + int aExp, bExp, expDiff; uint32_t aSig, bSig; uint32_t q; uint64_t aSig64, bSig64, q64; uint32_t alternateASig; int32_t sigMean; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); @@ -2229,19 +4625,21 @@ float32 float32_rem( float32 a, float32 b STATUS_PARAM ) bExp = extractFloat32Exp( b ); if ( aExp == 0xFF ) { if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { - return propagateFloat32NaN( a, b STATUS_VAR ); + return propagateFloat32NaN(a, b, status); } - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; + float_raise(float_flag_invalid, status); + return float32_default_nan(status); } if ( bExp == 0xFF ) { - if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + if (bSig) { + return propagateFloat32NaN(a, b, status); + } return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; + float_raise(float_flag_invalid, status); + return float32_default_nan(status); } normalizeFloat32Subnormal( bSig, &bExp, &bSig ); } @@ -2280,13 +4678,17 @@ float32 float32_rem( float32 a, float32 b STATUS_PARAM ) while ( 0 < expDiff ) { q64 = estimateDiv128To64( aSig64, 0, bSig64 ); q64 = ( 2 < q64 ) ? q64 - 2 : 0; - aSig64 = 0- ( ( bSig * q64 )<<38 ); +#ifdef _MSC_VER + aSig64 = 0ULL - ( ( bSig * q64 )<<38 ); +#else + aSig64 = - ( ( bSig * q64 )<<38 ); +#endif expDiff -= 62; } expDiff += 64; q64 = estimateDiv128To64( aSig64, 0, bSig64 ); q64 = ( 2 < q64 ) ? q64 - 2 : 0; - q = (uint32_t)(q64>>( 64 - expDiff )); + q = q64>>( 64 - expDiff ); bSig <<= 6; aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q; } @@ -2300,290 +4702,15 @@ float32 float32_rem( float32 a, float32 b STATUS_PARAM ) aSig = alternateASig; } zSign = ( (int32_t) aSig < 0 ); - if ( zSign ) aSig = 0- aSig; - return normalizeRoundAndPackFloat32( aSign ^ zSign, bExp, aSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of multiplying the single-precision floating-point values -| `a' and `b' then adding 'c', with no intermediate rounding step after the -| multiplication. The operation is performed according to the IEC/IEEE -| Standard for Binary Floating-Point Arithmetic 754-2008. -| The flags argument allows the caller to select negation of the -| addend, the intermediate product, or the final result. (The difference -| between this and having the caller do a separate negation is that negating -| externally will flip the sign bit on NaNs.) -*----------------------------------------------------------------------------*/ - -float32 float32_muladd(float32 a, float32 b, float32 c, int flags STATUS_PARAM) -{ - flag aSign, bSign, cSign, zSign; - int_fast16_t aExp, bExp, cExp, pExp, zExp, expDiff; - uint32_t aSig, bSig, cSig; - flag pInf, pZero, pSign; - uint64_t pSig64, cSig64, zSig64; - uint32_t pSig; - int shiftcount; - flag signflip, infzero; - - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); - c = float32_squash_input_denormal(c STATUS_VAR); - aSig = extractFloat32Frac(a); - aExp = extractFloat32Exp(a); - aSign = extractFloat32Sign(a); - bSig = extractFloat32Frac(b); - bExp = extractFloat32Exp(b); - bSign = extractFloat32Sign(b); - cSig = extractFloat32Frac(c); - cExp = extractFloat32Exp(c); - cSign = extractFloat32Sign(c); - - infzero = ((aExp == 0 && aSig == 0 && bExp == 0xff && bSig == 0) || - (aExp == 0xff && aSig == 0 && bExp == 0 && bSig == 0)); - - /* It is implementation-defined whether the cases of (0,inf,qnan) - * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN - * they return if they do), so we have to hand this information - * off to the target-specific pick-a-NaN routine. - */ - if (((aExp == 0xff) && aSig) || - ((bExp == 0xff) && bSig) || - ((cExp == 0xff) && cSig)) { - return propagateFloat32MulAddNaN(a, b, c, infzero STATUS_VAR); - } - - if (infzero) { - float_raise(float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - - if (flags & float_muladd_negate_c) { - cSign ^= 1; - } - - signflip = (flags & float_muladd_negate_result) ? 1 : 0; - - /* Work out the sign and type of the product */ - pSign = aSign ^ bSign; - if (flags & float_muladd_negate_product) { - pSign ^= 1; - } - pInf = (aExp == 0xff) || (bExp == 0xff); - pZero = ((aExp | aSig) == 0) || ((bExp | bSig) == 0); - - if (cExp == 0xff) { - if (pInf && (pSign ^ cSign)) { - /* addition of opposite-signed infinities => InvalidOperation */ - float_raise(float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - /* Otherwise generate an infinity of the same sign */ - return packFloat32(cSign ^ signflip, 0xff, 0); - } - - if (pInf) { - return packFloat32(pSign ^ signflip, 0xff, 0); - } - - if (pZero) { - if (cExp == 0) { - if (cSig == 0) { - /* Adding two exact zeroes */ - if (pSign == cSign) { - zSign = pSign; - } else if (STATUS(float_rounding_mode) == float_round_down) { - zSign = 1; - } else { - zSign = 0; - } - return packFloat32(zSign ^ signflip, 0, 0); - } - /* Exact zero plus a denorm */ - if (STATUS(flush_to_zero)) { - float_raise(float_flag_output_denormal STATUS_VAR); - return packFloat32(cSign ^ signflip, 0, 0); - } - } - /* Zero plus something non-zero : just return the something */ - if (flags & float_muladd_halve_result) { - if (cExp == 0) { - normalizeFloat32Subnormal(cSig, &cExp, &cSig); - } - /* Subtract one to halve, and one again because roundAndPackFloat32 - * wants one less than the true exponent. - */ - cExp -= 2; - cSig = (cSig | 0x00800000) << 7; - return roundAndPackFloat32(cSign ^ signflip, cExp, cSig STATUS_VAR); - } - return packFloat32(cSign ^ signflip, cExp, cSig); - } - - if (aExp == 0) { - normalizeFloat32Subnormal(aSig, &aExp, &aSig); - } - if (bExp == 0) { - normalizeFloat32Subnormal(bSig, &bExp, &bSig); - } - - /* Calculate the actual result a * b + c */ - - /* Multiply first; this is easy. */ - /* NB: we subtract 0x7e where float32_mul() subtracts 0x7f - * because we want the true exponent, not the "one-less-than" - * flavour that roundAndPackFloat32() takes. - */ - pExp = aExp + bExp - 0x7e; - aSig = (aSig | 0x00800000) << 7; - bSig = (bSig | 0x00800000) << 8; - pSig64 = (uint64_t)aSig * bSig; - if ((int64_t)(pSig64 << 1) >= 0) { - pSig64 <<= 1; - pExp--; - } - - zSign = pSign ^ signflip; - - /* Now pSig64 is the significand of the multiply, with the explicit bit in - * position 62. - */ - if (cExp == 0) { - if (!cSig) { - /* Throw out the special case of c being an exact zero now */ - shift64RightJamming(pSig64, 32, &pSig64); - pSig = (uint32_t)pSig64; - if (flags & float_muladd_halve_result) { - pExp--; - } - return roundAndPackFloat32(zSign, pExp - 1, - pSig STATUS_VAR); - } - normalizeFloat32Subnormal(cSig, &cExp, &cSig); - } - - cSig64 = (uint64_t)cSig << (62 - 23); - cSig64 |= LIT64(0x4000000000000000); - expDiff = pExp - cExp; - - if (pSign == cSign) { - /* Addition */ - if (expDiff > 0) { - /* scale c to match p */ - shift64RightJamming(cSig64, expDiff, &cSig64); - zExp = pExp; - } else if (expDiff < 0) { - /* scale p to match c */ - shift64RightJamming(pSig64, -expDiff, &pSig64); - zExp = cExp; - } else { - /* no scaling needed */ - zExp = cExp; - } - /* Add significands and make sure explicit bit ends up in posn 62 */ - zSig64 = pSig64 + cSig64; - if ((int64_t)zSig64 < 0) { - shift64RightJamming(zSig64, 1, &zSig64); - } else { - zExp--; - } - } else { - /* Subtraction */ - if (expDiff > 0) { - shift64RightJamming(cSig64, expDiff, &cSig64); - zSig64 = pSig64 - cSig64; - zExp = pExp; - } else if (expDiff < 0) { - shift64RightJamming(pSig64, -expDiff, &pSig64); - zSig64 = cSig64 - pSig64; - zExp = cExp; - zSign ^= 1; - } else { - zExp = pExp; - if (cSig64 < pSig64) { - zSig64 = pSig64 - cSig64; - } else if (pSig64 < cSig64) { - zSig64 = cSig64 - pSig64; - zSign ^= 1; - } else { - /* Exact zero */ - zSign = signflip; - if (STATUS(float_rounding_mode) == float_round_down) { - zSign ^= 1; - } - return packFloat32(zSign, 0, 0); - } - } - --zExp; - /* Normalize to put the explicit bit back into bit 62. */ - shiftcount = countLeadingZeros64(zSig64) - 1; - zSig64 <<= shiftcount; - zExp -= shiftcount; - } - if (flags & float_muladd_halve_result) { - zExp--; - } - - shift64RightJamming(zSig64, 32, &zSig64); - return roundAndPackFloat32(zSign, zExp, (uint32_t)zSig64 STATUS_VAR); +#ifdef _MSC_VER + if ( zSign ) aSig = 0ULL - aSig; +#else + if ( zSign ) aSig = - aSig; +#endif + return normalizeRoundAndPackFloat32(aSign ^ zSign, bExp, aSig, status); } -/*---------------------------------------------------------------------------- -| Returns the square root of the single-precision floating-point value `a'. -| The operation is performed according to the IEC/IEEE Standard for Binary -| Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 float32_sqrt( float32 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp, zExp; - uint32_t aSig, zSig; - uint64_t rem, term; - a = float32_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - if ( aExp == 0xFF ) { - if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); - if ( ! aSign ) return a; - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - if ( aSign ) { - if ( ( aExp | aSig ) == 0 ) return a; - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; - } - if ( aExp == 0 ) { - if ( aSig == 0 ) return float32_zero; - normalizeFloat32Subnormal( aSig, &aExp, &aSig ); - } - zExp = ( ( aExp - 0x7F )>>1 ) + 0x7E; - aSig = ( aSig | 0x00800000 )<<8; - zSig = estimateSqrt32( aExp, aSig ) + 2; - if ( ( zSig & 0x7F ) <= 5 ) { - if ( zSig < 2 ) { - zSig = 0x7FFFFFFF; - goto roundAndPack; - } - aSig >>= aExp & 1; - term = ( (uint64_t) zSig ) * zSig; - rem = ( ( (uint64_t) aSig )<<32 ) - term; - while ( (int64_t) rem < 0 ) { - --zSig; - rem += ( ( (uint64_t) zSig )<<1 ) | 1; - } - zSig |= ( rem != 0 ); - } - shift32RightJamming( zSig, 1, &zSig ); - roundAndPack: - return roundAndPackFloat32( 0, zExp, zSig STATUS_VAR ); - -} /*---------------------------------------------------------------------------- | Returns the binary exponential of the single-precision floating-point value @@ -2622,44 +4749,46 @@ static const float64 float32_exp2_coefficients[15] = const_float64( 0x3d6ae7f3e733b81fll ), /* 15 */ }; -float32 float32_exp2( float32 a STATUS_PARAM ) +float32 float32_exp2(float32 a, float_status *status) { flag aSign; - int_fast16_t aExp; + int aExp; uint32_t aSig; float64 r, x, xn; int i; - a = float32_squash_input_denormal(a STATUS_VAR); + a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); if ( aExp == 0xFF) { - if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); + if (aSig) { + return propagateFloat32NaN(a, float32_zero, status); + } return (aSign) ? float32_zero : a; } if (aExp == 0) { if (aSig == 0) return float32_one; } - float_raise( float_flag_inexact STATUS_VAR); + float_raise(float_flag_inexact, status); /* ******************************* */ /* using float64 for approximation */ /* ******************************* */ - x = float32_to_float64(a STATUS_VAR); - x = float64_mul(x, float64_ln2 STATUS_VAR); + x = float32_to_float64(a, status); + x = float64_mul(x, float64_ln2, status); xn = x; r = float64_one; for (i = 0 ; i < 15 ; i++) { float64 f; - f = float64_mul(xn, float32_exp2_coefficients[i] STATUS_VAR); - r = float64_add(r, f STATUS_VAR); + f = float64_mul(xn, float32_exp2_coefficients[i], status); + r = float64_add(r, f, status); - xn = float64_mul(xn, x STATUS_VAR); + xn = float64_mul(xn, x, status); } return float64_to_float32(r, status); @@ -2670,13 +4799,13 @@ float32 float32_exp2( float32 a STATUS_PARAM ) | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float32 float32_log2( float32 a STATUS_PARAM ) +float32 float32_log2(float32 a, float_status *status) { flag aSign, zSign; - int_fast16_t aExp; + int aExp; uint32_t aSig, zSig, i; - a = float32_squash_input_denormal(a STATUS_VAR); + a = float32_squash_input_denormal(a, status); aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); @@ -2686,11 +4815,13 @@ float32 float32_log2( float32 a STATUS_PARAM ) normalizeFloat32Subnormal( aSig, &aExp, &aSig ); } if ( aSign ) { - float_raise( float_flag_invalid STATUS_VAR); - return float32_default_nan; + float_raise(float_flag_invalid, status); + return float32_default_nan(status); } if ( aExp == 0xFF ) { - if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); + if (aSig) { + return propagateFloat32NaN(a, float32_zero, status); + } return a; } @@ -2708,9 +4839,13 @@ float32 float32_log2( float32 a STATUS_PARAM ) } if ( zSign ) - zSig = 0-zSig; +#ifdef _MSC_VER + zSig = 0 - zSig; +#else + zSig = -zSig; +#endif - return normalizeRoundAndPackFloat32( zSign, 0x85, zSig STATUS_VAR ); + return normalizeRoundAndPackFloat32(zSign, 0x85, zSig, status); } /*---------------------------------------------------------------------------- @@ -2720,16 +4855,16 @@ float32 float32_log2( float32 a STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_eq( float32 a, float32 b STATUS_PARAM ) +int float32_eq(float32 a, float32 b, float_status *status) { uint32_t av, bv; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } av = float32_val(a); @@ -2744,17 +4879,17 @@ int float32_eq( float32 a, float32 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_le( float32 a, float32 b STATUS_PARAM ) +int float32_le(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat32Sign( a ); @@ -2773,17 +4908,17 @@ int float32_le( float32 a, float32 b STATUS_PARAM ) | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_lt( float32 a, float32 b STATUS_PARAM ) +int float32_lt(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat32Sign( a ); @@ -2802,15 +4937,15 @@ int float32_lt( float32 a, float32 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_unordered( float32 a, float32 b STATUS_PARAM ) +int float32_unordered(float32 a, float32 b, float_status *status) { - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 1; } return 0; @@ -2823,16 +4958,17 @@ int float32_unordered( float32 a, float32 b STATUS_PARAM ) | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_eq_quiet( float32 a, float32 b STATUS_PARAM ) +int float32_eq_quiet(float32 a, float32 b, float_status *status) { - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -2847,18 +4983,19 @@ int float32_eq_quiet( float32 a, float32 b STATUS_PARAM ) | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_le_quiet( float32 a, float32 b STATUS_PARAM ) +int float32_le_quiet(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -2878,18 +5015,19 @@ int float32_le_quiet( float32 a, float32 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_lt_quiet( float32 a, float32 b STATUS_PARAM ) +int float32_lt_quiet(float32 a, float32 b, float_status *status) { flag aSign, bSign; uint32_t av, bv; - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -2909,564 +5047,23 @@ int float32_lt_quiet( float32 a, float32 b STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float32_unordered_quiet( float32 a, float32 b STATUS_PARAM ) +int float32_unordered_quiet(float32 a, float32 b, float_status *status) { - a = float32_squash_input_denormal(a STATUS_VAR); - b = float32_squash_input_denormal(b STATUS_VAR); + a = float32_squash_input_denormal(a, status); + b = float32_squash_input_denormal(b, status); if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) ) { - if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float32_is_signaling_nan(a, status) + || float32_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 1; } return 0; } -/*---------------------------------------------------------------------------- -| Returns the result of converting the double-precision floating-point value -| `a' to the 32-bit two's complement integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic---which means in particular that the conversion is rounded -| according to the current rounding mode. If `a' is a NaN, the largest -| positive integer is returned. Otherwise, if the conversion overflows, the -| largest integer with the same sign as `a' is returned. -*----------------------------------------------------------------------------*/ - -int32 float64_to_int32( float64 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp, shiftCount; - uint64_t aSig; - a = float64_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - aSign = extractFloat64Sign( a ); - if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; - if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); - shiftCount = 0x42C - aExp; - if ( 0 < shiftCount ) shift64RightJamming( aSig, shiftCount, &aSig ); - return roundAndPackInt32( aSign, aSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the double-precision floating-point value -| `a' to the 32-bit two's complement integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic, except that the conversion is always rounded toward zero. -| If `a' is a NaN, the largest positive integer is returned. Otherwise, if -| the conversion overflows, the largest integer with the same sign as `a' is -| returned. -*----------------------------------------------------------------------------*/ - -int32 float64_to_int32_round_to_zero( float64 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp, shiftCount; - uint64_t aSig, savedASig; - int32_t z; - a = float64_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - aSign = extractFloat64Sign( a ); - if ( 0x41E < aExp ) { - if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; - goto invalid; - } - else if ( aExp < 0x3FF ) { - if ( aExp || aSig ) STATUS(float_exception_flags) |= float_flag_inexact; - return 0; - } - aSig |= LIT64( 0x0010000000000000 ); - shiftCount = 0x433 - aExp; - savedASig = aSig; - aSig >>= shiftCount; - z = (int32_t)aSig; - if ( aSign && (z != 0x80000000)) z = - z; - if ( ( z < 0 ) ^ aSign ) { - invalid: - float_raise( float_flag_invalid STATUS_VAR); - return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; - } - if ( ( aSig<>= shiftCount; - z = (int32)aSig; - if ( aSign ) { - z = - z; - } - if ( ( (int16_t)z < 0 ) ^ aSign ) { - invalid: - float_raise( float_flag_invalid STATUS_VAR); - return aSign ? (int32_t) 0xffff8000 : 0x7FFF; - } - if ( ( aSig<>( - shiftCount ); - if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) { - STATUS(float_exception_flags) |= float_flag_inexact; - } - } - if ( aSign ) z = - z; - return z; - -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the double-precision floating-point value -| `a' to the single-precision floating-point format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic. -*----------------------------------------------------------------------------*/ - -float32 float64_to_float32( float64 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp; - uint64_t aSig; - uint32_t zSig; - a = float64_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - aSign = extractFloat64Sign( a ); - if ( aExp == 0x7FF ) { - if ( aSig ) return commonNaNToFloat32( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); - return packFloat32( aSign, 0xFF, 0 ); - } - shift64RightJamming( aSig, 22, &aSig ); - zSig = (uint32_t)aSig; - if ( aExp || zSig ) { - zSig |= 0x40000000; - aExp -= 0x381; - } - return roundAndPackFloat32( aSign, aExp, zSig STATUS_VAR ); - -} - - -/*---------------------------------------------------------------------------- -| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a -| half-precision floating-point value, returning the result. After being -| shifted into the proper positions, the three fields are simply added -| together to form the result. This means that any integer portion of `zSig' -| will be added into the exponent. Since a properly normalized significand -| will have an integer portion equal to 1, the `zExp' input should be 1 less -| than the desired result exponent whenever `zSig' is a complete, normalized -| significand. -*----------------------------------------------------------------------------*/ -static float16 packFloat16(flag zSign, int_fast16_t zExp, uint16_t zSig) -{ - return make_float16( - (((uint32_t)zSign) << 15) + (((uint32_t)zExp) << 10) + zSig); -} - -/*---------------------------------------------------------------------------- -| Takes an abstract floating-point value having sign `zSign', exponent `zExp', -| and significand `zSig', and returns the proper half-precision floating- -| point value corresponding to the abstract input. Ordinarily, the abstract -| value is simply rounded and packed into the half-precision format, with -| the inexact exception raised if the abstract input cannot be represented -| exactly. However, if the abstract value is too large, the overflow and -| inexact exceptions are raised and an infinity or maximal finite value is -| returned. If the abstract value is too small, the input value is rounded to -| a subnormal number, and the underflow and inexact exceptions are raised if -| the abstract input cannot be represented exactly as a subnormal half- -| precision floating-point number. -| The `ieee' flag indicates whether to use IEEE standard half precision, or -| ARM-style "alternative representation", which omits the NaN and Inf -| encodings in order to raise the maximum representable exponent by one. -| The input significand `zSig' has its binary point between bits 22 -| and 23, which is 13 bits to the left of the usual location. This shifted -| significand must be normalized or smaller. If `zSig' is not normalized, -| `zExp' must be 0; in that case, the result returned is a subnormal number, -| and it must not require rounding. In the usual case that `zSig' is -| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. -| Note the slightly odd position of the binary point in zSig compared with the -| other roundAndPackFloat functions. This should probably be fixed if we -| need to implement more float16 routines than just conversion. -| The handling of underflow and overflow follows the IEC/IEEE Standard for -| Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -static float32 roundAndPackFloat16(flag zSign, int_fast16_t zExp, - uint32_t zSig, flag ieee STATUS_PARAM) -{ - int maxexp = ieee ? 29 : 30; - uint32_t mask; - uint32_t increment; - bool rounding_bumps_exp; - bool is_tiny = false; - - /* Calculate the mask of bits of the mantissa which are not - * representable in half-precision and will be lost. - */ - if (zExp < 1) { - /* Will be denormal in halfprec */ - mask = 0x00ffffff; - if (zExp >= -11) { - mask >>= 11 + zExp; - } - } else { - /* Normal number in halfprec */ - mask = 0x00001fff; - } - - switch (STATUS(float_rounding_mode)) { - case float_round_nearest_even: - increment = (mask + 1) >> 1; - if ((zSig & mask) == increment) { - increment = zSig & (increment << 1); - } - break; - case float_round_ties_away: - increment = (mask + 1) >> 1; - break; - case float_round_up: - increment = zSign ? 0 : mask; - break; - case float_round_down: - increment = zSign ? mask : 0; - break; - default: /* round_to_zero */ - increment = 0; - break; - } - - rounding_bumps_exp = (zSig + increment >= 0x01000000); - - if (zExp > maxexp || (zExp == maxexp && rounding_bumps_exp)) { - if (ieee) { - float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); - return packFloat16(zSign, 0x1f, 0); - } else { - float_raise(float_flag_invalid STATUS_VAR); - return packFloat16(zSign, 0x1f, 0x3ff); - } - } - - if (zExp < 0) { - /* Note that flush-to-zero does not affect half-precision results */ - is_tiny = - (STATUS(float_detect_tininess) == float_tininess_before_rounding) - || (zExp < -1) - || (!rounding_bumps_exp); - } - if (zSig & mask) { - float_raise(float_flag_inexact STATUS_VAR); - if (is_tiny) { - float_raise(float_flag_underflow STATUS_VAR); - } - } - - zSig += increment; - if (rounding_bumps_exp) { - zSig >>= 1; - zExp++; - } - - if (zExp < -10) { - return packFloat16(zSign, 0, 0); - } - if (zExp < 0) { - zSig >>= -zExp; - zExp = 0; - } - return packFloat16(zSign, zExp, zSig >> 13); -} - -static void normalizeFloat16Subnormal(uint32_t aSig, int_fast16_t *zExpPtr, - uint32_t *zSigPtr) -{ - int8_t shiftCount = countLeadingZeros32(aSig) - 21; - *zSigPtr = aSig << shiftCount; - *zExpPtr = 1 - shiftCount; -} - -/* Half precision floats come in two formats: standard IEEE and "ARM" format. - The latter gains extra exponent range by omitting the NaN/Inf encodings. */ - -float32 float16_to_float32(float16 a, flag ieee STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp; - uint32_t aSig; - - aSign = extractFloat16Sign(a); - aExp = extractFloat16Exp(a); - aSig = extractFloat16Frac(a); - - if (aExp == 0x1f && ieee) { - if (aSig) { - return commonNaNToFloat32(float16ToCommonNaN(a STATUS_VAR) STATUS_VAR); - } - return packFloat32(aSign, 0xff, 0); - } - if (aExp == 0) { - if (aSig == 0) { - return packFloat32(aSign, 0, 0); - } - - normalizeFloat16Subnormal(aSig, &aExp, &aSig); - aExp--; - } - return packFloat32( aSign, aExp + 0x70, aSig << 13); -} - -float16 float32_to_float16(float32 a, flag ieee STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp; - uint32_t aSig; - - a = float32_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - if ( aExp == 0xFF ) { - if (aSig) { - /* Input is a NaN */ - if (!ieee) { - float_raise(float_flag_invalid STATUS_VAR); - return packFloat16(aSign, 0, 0); - } - return commonNaNToFloat16( - float32ToCommonNaN(a STATUS_VAR) STATUS_VAR); - } - /* Infinity */ - if (!ieee) { - float_raise(float_flag_invalid STATUS_VAR); - return packFloat16(aSign, 0x1f, 0x3ff); - } - return packFloat16(aSign, 0x1f, 0); - } - if (aExp == 0 && aSig == 0) { - return packFloat16(aSign, 0, 0); - } - /* Decimal point between bits 22 and 23. Note that we add the 1 bit - * even if the input is denormal; however this is harmless because - * the largest possible single-precision denormal is still smaller - * than the smallest representable half-precision denormal, and so we - * will end up ignoring aSig and returning via the "always return zero" - * codepath. - */ - aSig |= 0x00800000; - aExp -= 0x71; - - return roundAndPackFloat16(aSign, aExp, aSig, ieee STATUS_VAR); -} - -float64 float16_to_float64(float16 a, flag ieee STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp; - uint32_t aSig; - - aSign = extractFloat16Sign(a); - aExp = extractFloat16Exp(a); - aSig = extractFloat16Frac(a); - - if (aExp == 0x1f && ieee) { - if (aSig) { - return commonNaNToFloat64( - float16ToCommonNaN(a STATUS_VAR) STATUS_VAR); - } - return packFloat64(aSign, 0x7ff, 0); - } - if (aExp == 0) { - if (aSig == 0) { - return packFloat64(aSign, 0, 0); - } - - normalizeFloat16Subnormal(aSig, &aExp, &aSig); - aExp--; - } - return packFloat64(aSign, aExp + 0x3f0, ((uint64_t)aSig) << 42); -} - -float16 float64_to_float16(float64 a, flag ieee STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp; - uint64_t aSig; - uint32_t zSig; - - a = float64_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat64Frac(a); - aExp = extractFloat64Exp(a); - aSign = extractFloat64Sign(a); - if (aExp == 0x7FF) { - if (aSig) { - /* Input is a NaN */ - if (!ieee) { - float_raise(float_flag_invalid STATUS_VAR); - return packFloat16(aSign, 0, 0); - } - return commonNaNToFloat16( - float64ToCommonNaN(a STATUS_VAR) STATUS_VAR); - } - /* Infinity */ - if (!ieee) { - float_raise(float_flag_invalid STATUS_VAR); - return packFloat16(aSign, 0x1f, 0x3ff); - } - return packFloat16(aSign, 0x1f, 0); - } - shift64RightJamming(aSig, 29, &aSig); - zSig = (uint32_t)aSig; - if (aExp == 0 && zSig == 0) { - return packFloat16(aSign, 0, 0); - } - /* Decimal point between bits 22 and 23. Note that we add the 1 bit - * even if the input is denormal; however this is harmless because - * the largest possible single-precision denormal is still smaller - * than the smallest representable half-precision denormal, and so we - * will end up ignoring aSig and returning via the "always return zero" - * codepath. - */ - zSig |= 0x00800000; - aExp -= 0x3F1; - - return roundAndPackFloat16(aSign, aExp, zSig, ieee STATUS_VAR); -} - /*---------------------------------------------------------------------------- | Returns the result of converting the double-precision floating-point value | `a' to the extended double-precision floating-point format. The conversion @@ -3474,19 +5071,23 @@ float16 float64_to_float16(float64 a, flag ieee STATUS_PARAM) | Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 float64_to_floatx80( float64 a STATUS_PARAM ) +floatx80 float64_to_floatx80(float64 a, float_status *status) { flag aSign; - int_fast16_t aExp; + int aExp; uint64_t aSig; - a = float64_squash_input_denormal(a STATUS_VAR); + a = float64_squash_input_denormal(a, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0x7FF ) { - if ( aSig ) return commonNaNToFloatx80( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); - return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + if (aSig) { + return commonNaNToFloatx80(float64ToCommonNaN(a, status), status); + } + return packFloatx80(aSign, + floatx80_infinity_high, + floatx80_infinity_low); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); @@ -3494,7 +5095,7 @@ floatx80 float64_to_floatx80( float64 a STATUS_PARAM ) } return packFloatx80( - aSign, aExp + 0x3C00, ( aSig | LIT64( 0x0010000000000000 ) )<<11 ); + aSign, aExp + 0x3C00, (aSig | UINT64_C(0x0010000000000000)) << 11); } @@ -3505,18 +5106,20 @@ floatx80 float64_to_floatx80( float64 a STATUS_PARAM ) | Arithmetic. *----------------------------------------------------------------------------*/ -float128 float64_to_float128( float64 a STATUS_PARAM ) +float128 float64_to_float128(float64 a, float_status *status) { flag aSign; - int_fast16_t aExp; + int aExp; uint64_t aSig, zSig0, zSig1; - a = float64_squash_input_denormal(a STATUS_VAR); + a = float64_squash_input_denormal(a, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp == 0x7FF ) { - if ( aSig ) return commonNaNToFloat128( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + if (aSig) { + return commonNaNToFloat128(float64ToCommonNaN(a, status), status); + } return packFloat128( aSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { @@ -3529,431 +5132,6 @@ float128 float64_to_float128( float64 a STATUS_PARAM ) } -/*---------------------------------------------------------------------------- -| Rounds the double-precision floating-point value `a' to an integer, and -| returns the result as a double-precision floating-point value. The -| operation is performed according to the IEC/IEEE Standard for Binary -| Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 float64_round_to_int( float64 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp; - uint64_t lastBitMask, roundBitsMask; - uint64_t z; - a = float64_squash_input_denormal(a STATUS_VAR); - - aExp = extractFloat64Exp( a ); - if ( 0x433 <= aExp ) { - if ( ( aExp == 0x7FF ) && extractFloat64Frac( a ) ) { - return propagateFloat64NaN( a, a STATUS_VAR ); - } - return a; - } - if ( aExp < 0x3FF ) { - if ( (uint64_t) ( float64_val(a)<<1 ) == 0 ) return a; - STATUS(float_exception_flags) |= float_flag_inexact; - aSign = extractFloat64Sign( a ); - switch ( STATUS(float_rounding_mode) ) { - case float_round_nearest_even: - if ( ( aExp == 0x3FE ) && extractFloat64Frac( a ) ) { - return packFloat64( aSign, 0x3FF, 0 ); - } - break; - case float_round_ties_away: - if (aExp == 0x3FE) { - return packFloat64(aSign, 0x3ff, 0); - } - break; - case float_round_down: - return make_float64(aSign ? LIT64( 0xBFF0000000000000 ) : 0); - case float_round_up: - return make_float64( - aSign ? LIT64( 0x8000000000000000 ) : LIT64( 0x3FF0000000000000 )); - } - return packFloat64( aSign, 0, 0 ); - } - lastBitMask = 1; - lastBitMask <<= 0x433 - aExp; - roundBitsMask = lastBitMask - 1; - z = float64_val(a); - switch (STATUS(float_rounding_mode)) { - case float_round_nearest_even: - z += lastBitMask >> 1; - if ((z & roundBitsMask) == 0) { - z &= ~lastBitMask; - } - break; - case float_round_ties_away: - z += lastBitMask >> 1; - break; - case float_round_to_zero: - break; - case float_round_up: - if (!extractFloat64Sign(make_float64(z))) { - z += roundBitsMask; - } - break; - case float_round_down: - if (extractFloat64Sign(make_float64(z))) { - z += roundBitsMask; - } - break; - default: - float_raise(float_flag_invalid STATUS_VAR); - break; - } - z &= ~ roundBitsMask; - if ( z != float64_val(a) ) - STATUS(float_exception_flags) |= float_flag_inexact; - return make_float64(z); - -} - -float64 float64_trunc_to_int( float64 a STATUS_PARAM) -{ - int oldmode; - float64 res; - oldmode = STATUS(float_rounding_mode); - STATUS(float_rounding_mode) = float_round_to_zero; - res = float64_round_to_int(a STATUS_VAR); - STATUS(float_rounding_mode) = oldmode; - return res; -} - -/*---------------------------------------------------------------------------- -| Returns the result of adding the absolute values of the double-precision -| floating-point values `a' and `b'. If `zSign' is 1, the sum is negated -| before being returned. `zSign' is ignored if the result is a NaN. -| The addition is performed according to the IEC/IEEE Standard for Binary -| Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -static float64 addFloat64Sigs( float64 a, float64 b, flag zSign STATUS_PARAM ) -{ - int_fast16_t aExp, bExp, zExp; - uint64_t aSig, bSig, zSig; - int_fast16_t expDiff; - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - bSig = extractFloat64Frac( b ); - bExp = extractFloat64Exp( b ); - expDiff = aExp - bExp; - aSig <<= 9; - bSig <<= 9; - if ( 0 < expDiff ) { - if ( aExp == 0x7FF ) { - if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - return a; - } - if ( bExp == 0 ) { - --expDiff; - } - else { - bSig |= LIT64( 0x2000000000000000 ); - } - shift64RightJamming( bSig, expDiff, &bSig ); - zExp = aExp; - } - else if ( expDiff < 0 ) { - if ( bExp == 0x7FF ) { - if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - return packFloat64( zSign, 0x7FF, 0 ); - } - if ( aExp == 0 ) { - ++expDiff; - } - else { - aSig |= LIT64( 0x2000000000000000 ); - } - shift64RightJamming( aSig, - expDiff, &aSig ); - zExp = bExp; - } - else { - if ( aExp == 0x7FF ) { - if ( aSig | bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - return a; - } - if ( aExp == 0 ) { - if (STATUS(flush_to_zero)) { - if (aSig | bSig) { - float_raise(float_flag_output_denormal STATUS_VAR); - } - return packFloat64(zSign, 0, 0); - } - return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); - } - zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; - zExp = aExp; - goto roundAndPack; - } - aSig |= LIT64( 0x2000000000000000 ); - zSig = ( aSig + bSig )<<1; - --zExp; - if ( (int64_t) zSig < 0 ) { - zSig = aSig + bSig; - ++zExp; - } - roundAndPack: - return roundAndPackFloat64( zSign, zExp, zSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of subtracting the absolute values of the double- -| precision floating-point values `a' and `b'. If `zSign' is 1, the -| difference is negated before being returned. `zSign' is ignored if the -| result is a NaN. The subtraction is performed according to the IEC/IEEE -| Standard for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -static float64 subFloat64Sigs( float64 a, float64 b, flag zSign STATUS_PARAM ) -{ - int_fast16_t aExp, bExp, zExp; - uint64_t aSig, bSig, zSig; - int_fast16_t expDiff; - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - bSig = extractFloat64Frac( b ); - bExp = extractFloat64Exp( b ); - expDiff = aExp - bExp; - aSig <<= 10; - bSig <<= 10; - if ( 0 < expDiff ) goto aExpBigger; - if ( expDiff < 0 ) goto bExpBigger; - if ( aExp == 0x7FF ) { - if ( aSig | bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - if ( aExp == 0 ) { - aExp = 1; - bExp = 1; - } - if ( bSig < aSig ) goto aBigger; - if ( aSig < bSig ) goto bBigger; - return packFloat64( STATUS(float_rounding_mode) == float_round_down, 0, 0 ); - bExpBigger: - if ( bExp == 0x7FF ) { - if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - return packFloat64( zSign ^ 1, 0x7FF, 0 ); - } - if ( aExp == 0 ) { - ++expDiff; - } - else { - aSig |= LIT64( 0x4000000000000000 ); - } - shift64RightJamming( aSig, - expDiff, &aSig ); - bSig |= LIT64( 0x4000000000000000 ); - bBigger: - zSig = bSig - aSig; - zExp = bExp; - zSign ^= 1; - goto normalizeRoundAndPack; - aExpBigger: - if ( aExp == 0x7FF ) { - if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - return a; - } - if ( bExp == 0 ) { - --expDiff; - } - else { - bSig |= LIT64( 0x4000000000000000 ); - } - shift64RightJamming( bSig, expDiff, &bSig ); - aSig |= LIT64( 0x4000000000000000 ); - aBigger: - zSig = aSig - bSig; - zExp = aExp; - normalizeRoundAndPack: - --zExp; - return normalizeRoundAndPackFloat64( zSign, zExp, zSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of adding the double-precision floating-point values `a' -| and `b'. The operation is performed according to the IEC/IEEE Standard for -| Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 float64_add( float64 a, float64 b STATUS_PARAM ) -{ - flag aSign, bSign; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); - - aSign = extractFloat64Sign( a ); - bSign = extractFloat64Sign( b ); - if ( aSign == bSign ) { - return addFloat64Sigs( a, b, aSign STATUS_VAR ); - } - else { - return subFloat64Sigs( a, b, aSign STATUS_VAR ); - } - -} - -/*---------------------------------------------------------------------------- -| Returns the result of subtracting the double-precision floating-point values -| `a' and `b'. The operation is performed according to the IEC/IEEE Standard -| for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 float64_sub( float64 a, float64 b STATUS_PARAM ) -{ - flag aSign, bSign; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); - - aSign = extractFloat64Sign( a ); - bSign = extractFloat64Sign( b ); - if ( aSign == bSign ) { - return subFloat64Sigs( a, b, aSign STATUS_VAR ); - } - else { - return addFloat64Sigs( a, b, aSign STATUS_VAR ); - } - -} - -/*---------------------------------------------------------------------------- -| Returns the result of multiplying the double-precision floating-point values -| `a' and `b'. The operation is performed according to the IEC/IEEE Standard -| for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 float64_mul( float64 a, float64 b STATUS_PARAM ) -{ - flag aSign, bSign, zSign; - int_fast16_t aExp, bExp, zExp; - uint64_t aSig, bSig, zSig0, zSig1; - - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - aSign = extractFloat64Sign( a ); - bSig = extractFloat64Frac( b ); - bExp = extractFloat64Exp( b ); - bSign = extractFloat64Sign( b ); - zSign = aSign ^ bSign; - if ( aExp == 0x7FF ) { - if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { - return propagateFloat64NaN( a, b STATUS_VAR ); - } - if ( ( bExp | bSig ) == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - return packFloat64( zSign, 0x7FF, 0 ); - } - if ( bExp == 0x7FF ) { - if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - if ( ( aExp | aSig ) == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - return packFloat64( zSign, 0x7FF, 0 ); - } - if ( aExp == 0 ) { - if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); - normalizeFloat64Subnormal( aSig, &aExp, &aSig ); - } - if ( bExp == 0 ) { - if ( bSig == 0 ) return packFloat64( zSign, 0, 0 ); - normalizeFloat64Subnormal( bSig, &bExp, &bSig ); - } - zExp = aExp + bExp - 0x3FF; - aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; - bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; - mul64To128( aSig, bSig, &zSig0, &zSig1 ); - zSig0 |= ( zSig1 != 0 ); - if ( 0 <= (int64_t) ( zSig0<<1 ) ) { - zSig0 <<= 1; - --zExp; - } - return roundAndPackFloat64( zSign, zExp, zSig0 STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of dividing the double-precision floating-point value `a' -| by the corresponding value `b'. The operation is performed according to -| the IEC/IEEE Standard for Binary Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 float64_div( float64 a, float64 b STATUS_PARAM ) -{ - flag aSign, bSign, zSign; - int_fast16_t aExp, bExp, zExp; - uint64_t aSig, bSig, zSig; - uint64_t rem0, rem1; - uint64_t term0, term1; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - aSign = extractFloat64Sign( a ); - bSig = extractFloat64Frac( b ); - bExp = extractFloat64Exp( b ); - bSign = extractFloat64Sign( b ); - zSign = aSign ^ bSign; - if ( aExp == 0x7FF ) { - if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - if ( bExp == 0x7FF ) { - if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - return packFloat64( zSign, 0x7FF, 0 ); - } - if ( bExp == 0x7FF ) { - if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); - return packFloat64( zSign, 0, 0 ); - } - if ( bExp == 0 ) { - if ( bSig == 0 ) { - if ( ( aExp | aSig ) == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - float_raise( float_flag_divbyzero STATUS_VAR); - return packFloat64( zSign, 0x7FF, 0 ); - } - normalizeFloat64Subnormal( bSig, &bExp, &bSig ); - } - if ( aExp == 0 ) { - if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); - normalizeFloat64Subnormal( aSig, &aExp, &aSig ); - } - zExp = aExp - bExp + 0x3FD; - aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; - bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; - if ( bSig <= ( aSig + aSig ) ) { - aSig >>= 1; - ++zExp; - } - zSig = estimateDiv128To64( aSig, 0, bSig ); - if ( ( zSig & 0x1FF ) <= 2 ) { - mul64To128( bSig, zSig, &term0, &term1 ); - sub128( aSig, 0, term0, term1, &rem0, &rem1 ); - while ( (int64_t) rem0 < 0 ) { - --zSig; - add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); - } - zSig |= ( rem1 != 0 ); - } - return roundAndPackFloat64( zSign, zExp, zSig STATUS_VAR ); - -} /*---------------------------------------------------------------------------- | Returns the remainder of the double-precision floating-point value `a' @@ -3961,16 +5139,16 @@ float64 float64_div( float64 a, float64 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float64 float64_rem( float64 a, float64 b STATUS_PARAM ) +float64 float64_rem(float64 a, float64 b, float_status *status) { flag aSign, zSign; - int_fast16_t aExp, bExp, expDiff; + int aExp, bExp, expDiff; uint64_t aSig, bSig; uint64_t q, alternateASig; int64_t sigMean; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); @@ -3978,19 +5156,21 @@ float64 float64_rem( float64 a, float64 b STATUS_PARAM ) bExp = extractFloat64Exp( b ); if ( aExp == 0x7FF ) { if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { - return propagateFloat64NaN( a, b STATUS_VAR ); + return propagateFloat64NaN(a, b, status); } - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; + float_raise(float_flag_invalid, status); + return float64_default_nan(status); } if ( bExp == 0x7FF ) { - if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + if (bSig) { + return propagateFloat64NaN(a, b, status); + } return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; + float_raise(float_flag_invalid, status); + return float64_default_nan(status); } normalizeFloat64Subnormal( bSig, &bExp, &bSig ); } @@ -3999,8 +5179,8 @@ float64 float64_rem( float64 a, float64 b STATUS_PARAM ) normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } expDiff = aExp - bExp; - aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<11; - bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; + aSig = (aSig | UINT64_C(0x0010000000000000)) << 11; + bSig = (bSig | UINT64_C(0x0010000000000000)) << 11; if ( expDiff < 0 ) { if ( expDiff < -1 ) return a; aSig >>= 1; @@ -4011,7 +5191,11 @@ float64 float64_rem( float64 a, float64 b STATUS_PARAM ) while ( 0 < expDiff ) { q = estimateDiv128To64( aSig, 0, bSig ); q = ( 2 < q ) ? q - 2 : 0; - aSig = 0- ( ( bSig>>2 ) * q ); +#ifdef _MSC_VER + aSig = 0ULL - ( ( bSig>>2 ) * q ); +#else + aSig = - ( ( bSig>>2 ) * q ); +#endif expDiff -= 62; } expDiff += 64; @@ -4036,306 +5220,12 @@ float64 float64_rem( float64 a, float64 b STATUS_PARAM ) aSig = alternateASig; } zSign = ( (int64_t) aSig < 0 ); - if ( zSign ) aSig = 0- aSig; - return normalizeRoundAndPackFloat64( aSign ^ zSign, bExp, aSig STATUS_VAR ); - -} - -/*---------------------------------------------------------------------------- -| Returns the result of multiplying the double-precision floating-point values -| `a' and `b' then adding 'c', with no intermediate rounding step after the -| multiplication. The operation is performed according to the IEC/IEEE -| Standard for Binary Floating-Point Arithmetic 754-2008. -| The flags argument allows the caller to select negation of the -| addend, the intermediate product, or the final result. (The difference -| between this and having the caller do a separate negation is that negating -| externally will flip the sign bit on NaNs.) -*----------------------------------------------------------------------------*/ - -float64 float64_muladd(float64 a, float64 b, float64 c, int flags STATUS_PARAM) -{ - flag aSign, bSign, cSign, zSign; - int_fast16_t aExp, bExp, cExp, pExp, zExp, expDiff; - uint64_t aSig, bSig, cSig; - flag pInf, pZero, pSign; - uint64_t pSig0, pSig1, cSig0, cSig1, zSig0, zSig1; - int shiftcount; - flag signflip, infzero; - - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); - c = float64_squash_input_denormal(c STATUS_VAR); - aSig = extractFloat64Frac(a); - aExp = extractFloat64Exp(a); - aSign = extractFloat64Sign(a); - bSig = extractFloat64Frac(b); - bExp = extractFloat64Exp(b); - bSign = extractFloat64Sign(b); - cSig = extractFloat64Frac(c); - cExp = extractFloat64Exp(c); - cSign = extractFloat64Sign(c); - - infzero = ((aExp == 0 && aSig == 0 && bExp == 0x7ff && bSig == 0) || - (aExp == 0x7ff && aSig == 0 && bExp == 0 && bSig == 0)); - - /* It is implementation-defined whether the cases of (0,inf,qnan) - * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN - * they return if they do), so we have to hand this information - * off to the target-specific pick-a-NaN routine. - */ - if (((aExp == 0x7ff) && aSig) || - ((bExp == 0x7ff) && bSig) || - ((cExp == 0x7ff) && cSig)) { - return propagateFloat64MulAddNaN(a, b, c, infzero STATUS_VAR); - } - - if (infzero) { - float_raise(float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - - if (flags & float_muladd_negate_c) { - cSign ^= 1; - } - - signflip = (flags & float_muladd_negate_result) ? 1 : 0; - - /* Work out the sign and type of the product */ - pSign = aSign ^ bSign; - if (flags & float_muladd_negate_product) { - pSign ^= 1; - } - pInf = (aExp == 0x7ff) || (bExp == 0x7ff); - pZero = ((aExp | aSig) == 0) || ((bExp | bSig) == 0); - - if (cExp == 0x7ff) { - if (pInf && (pSign ^ cSign)) { - /* addition of opposite-signed infinities => InvalidOperation */ - float_raise(float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - /* Otherwise generate an infinity of the same sign */ - return packFloat64(cSign ^ signflip, 0x7ff, 0); - } - - if (pInf) { - return packFloat64(pSign ^ signflip, 0x7ff, 0); - } - - if (pZero) { - if (cExp == 0) { - if (cSig == 0) { - /* Adding two exact zeroes */ - if (pSign == cSign) { - zSign = pSign; - } else if (STATUS(float_rounding_mode) == float_round_down) { - zSign = 1; - } else { - zSign = 0; - } - return packFloat64(zSign ^ signflip, 0, 0); - } - /* Exact zero plus a denorm */ - if (STATUS(flush_to_zero)) { - float_raise(float_flag_output_denormal STATUS_VAR); - return packFloat64(cSign ^ signflip, 0, 0); - } - } - /* Zero plus something non-zero : just return the something */ - if (flags & float_muladd_halve_result) { - if (cExp == 0) { - normalizeFloat64Subnormal(cSig, &cExp, &cSig); - } - /* Subtract one to halve, and one again because roundAndPackFloat64 - * wants one less than the true exponent. - */ - cExp -= 2; - cSig = (cSig | 0x0010000000000000ULL) << 10; - return roundAndPackFloat64(cSign ^ signflip, cExp, cSig STATUS_VAR); - } - return packFloat64(cSign ^ signflip, cExp, cSig); - } - - if (aExp == 0) { - normalizeFloat64Subnormal(aSig, &aExp, &aSig); - } - if (bExp == 0) { - normalizeFloat64Subnormal(bSig, &bExp, &bSig); - } - - /* Calculate the actual result a * b + c */ - - /* Multiply first; this is easy. */ - /* NB: we subtract 0x3fe where float64_mul() subtracts 0x3ff - * because we want the true exponent, not the "one-less-than" - * flavour that roundAndPackFloat64() takes. - */ - pExp = aExp + bExp - 0x3fe; - aSig = (aSig | LIT64(0x0010000000000000))<<10; - bSig = (bSig | LIT64(0x0010000000000000))<<11; - mul64To128(aSig, bSig, &pSig0, &pSig1); - if ((int64_t)(pSig0 << 1) >= 0) { - shortShift128Left(pSig0, pSig1, 1, &pSig0, &pSig1); - pExp--; - } - - zSign = pSign ^ signflip; - - /* Now [pSig0:pSig1] is the significand of the multiply, with the explicit - * bit in position 126. - */ - if (cExp == 0) { - if (!cSig) { - /* Throw out the special case of c being an exact zero now */ - shift128RightJamming(pSig0, pSig1, 64, &pSig0, &pSig1); - if (flags & float_muladd_halve_result) { - pExp--; - } - return roundAndPackFloat64(zSign, pExp - 1, - pSig1 STATUS_VAR); - } - normalizeFloat64Subnormal(cSig, &cExp, &cSig); - } - - /* Shift cSig and add the explicit bit so [cSig0:cSig1] is the - * significand of the addend, with the explicit bit in position 126. - */ - cSig0 = cSig << (126 - 64 - 52); - cSig1 = 0; - cSig0 |= LIT64(0x4000000000000000); - expDiff = pExp - cExp; - - if (pSign == cSign) { - /* Addition */ - if (expDiff > 0) { - /* scale c to match p */ - shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1); - zExp = pExp; - } else if (expDiff < 0) { - /* scale p to match c */ - shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1); - zExp = cExp; - } else { - /* no scaling needed */ - zExp = cExp; - } - /* Add significands and make sure explicit bit ends up in posn 126 */ - add128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); - if ((int64_t)zSig0 < 0) { - shift128RightJamming(zSig0, zSig1, 1, &zSig0, &zSig1); - } else { - zExp--; - } - shift128RightJamming(zSig0, zSig1, 64, &zSig0, &zSig1); - if (flags & float_muladd_halve_result) { - zExp--; - } - return roundAndPackFloat64(zSign, zExp, zSig1 STATUS_VAR); - } else { - /* Subtraction */ - if (expDiff > 0) { - shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1); - sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); - zExp = pExp; - } else if (expDiff < 0) { - shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1); - sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1); - zExp = cExp; - zSign ^= 1; - } else { - zExp = pExp; - if (lt128(cSig0, cSig1, pSig0, pSig1)) { - sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); - } else if (lt128(pSig0, pSig1, cSig0, cSig1)) { - sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1); - zSign ^= 1; - } else { - /* Exact zero */ - zSign = signflip; - if (STATUS(float_rounding_mode) == float_round_down) { - zSign ^= 1; - } - return packFloat64(zSign, 0, 0); - } - } - --zExp; - /* Do the equivalent of normalizeRoundAndPackFloat64() but - * starting with the significand in a pair of uint64_t. - */ - if (zSig0) { - shiftcount = countLeadingZeros64(zSig0) - 1; - shortShift128Left(zSig0, zSig1, shiftcount, &zSig0, &zSig1); - if (zSig1) { - zSig0 |= 1; - } - zExp -= shiftcount; - } else { - shiftcount = countLeadingZeros64(zSig1); - if (shiftcount == 0) { - zSig0 = (zSig1 >> 1) | (zSig1 & 1); - zExp -= 63; - } else { - shiftcount--; - zSig0 = zSig1 << shiftcount; - zExp -= (shiftcount + 64); - } - } - if (flags & float_muladd_halve_result) { - zExp--; - } - return roundAndPackFloat64(zSign, zExp, zSig0 STATUS_VAR); - } -} - -/*---------------------------------------------------------------------------- -| Returns the square root of the double-precision floating-point value `a'. -| The operation is performed according to the IEC/IEEE Standard for Binary -| Floating-Point Arithmetic. -*----------------------------------------------------------------------------*/ - -float64 float64_sqrt( float64 a STATUS_PARAM ) -{ - flag aSign; - int_fast16_t aExp, zExp; - uint64_t aSig, zSig, doubleZSig; - uint64_t rem0, rem1, term0, term1; - a = float64_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - aSign = extractFloat64Sign( a ); - if ( aExp == 0x7FF ) { - if ( aSig ) return propagateFloat64NaN( a, a STATUS_VAR ); - if ( ! aSign ) return a; - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - if ( aSign ) { - if ( ( aExp | aSig ) == 0 ) return a; - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; - } - if ( aExp == 0 ) { - if ( aSig == 0 ) return float64_zero; - normalizeFloat64Subnormal( aSig, &aExp, &aSig ); - } - zExp = ( ( aExp - 0x3FF )>>1 ) + 0x3FE; - aSig |= LIT64( 0x0010000000000000 ); - zSig = estimateSqrt32( aExp, (uint32_t)(aSig>>21) ); - aSig <<= 9 - ( aExp & 1 ); - zSig = estimateDiv128To64( aSig, 0, zSig<<32 ) + ( zSig<<30 ); - if ( ( zSig & 0x1FF ) <= 5 ) { - doubleZSig = zSig<<1; - mul64To128( zSig, zSig, &term0, &term1 ); - sub128( aSig, 0, term0, term1, &rem0, &rem1 ); - while ( (int64_t) rem0 < 0 ) { - --zSig; - doubleZSig -= 2; - add128( rem0, rem1, zSig>>63, doubleZSig | 1, &rem0, &rem1 ); - } - zSig |= ( ( rem0 | rem1 ) != 0 ); - } - return roundAndPackFloat64( 0, zExp, zSig STATUS_VAR ); +#ifdef _MSC_VER + if ( zSign ) aSig = 0 - aSig; +#else + if ( zSign ) aSig = - aSig; +#endif + return normalizeRoundAndPackFloat64(aSign ^ zSign, bExp, aSig, status); } @@ -4344,12 +5234,12 @@ float64 float64_sqrt( float64 a STATUS_PARAM ) | The operation is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float64 float64_log2( float64 a STATUS_PARAM ) +float64 float64_log2(float64 a, float_status *status) { flag aSign, zSign; - int_fast16_t aExp; + int aExp; uint64_t aSig, aSig0, aSig1, zSig, i; - a = float64_squash_input_denormal(a STATUS_VAR); + a = float64_squash_input_denormal(a, status); aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); @@ -4360,30 +5250,36 @@ float64 float64_log2( float64 a STATUS_PARAM ) normalizeFloat64Subnormal( aSig, &aExp, &aSig ); } if ( aSign ) { - float_raise( float_flag_invalid STATUS_VAR); - return float64_default_nan; + float_raise(float_flag_invalid, status); + return float64_default_nan(status); } if ( aExp == 0x7FF ) { - if ( aSig ) return propagateFloat64NaN( a, float64_zero STATUS_VAR ); + if (aSig) { + return propagateFloat64NaN(a, float64_zero, status); + } return a; } aExp -= 0x3FF; - aSig |= LIT64( 0x0010000000000000 ); + aSig |= UINT64_C(0x0010000000000000); zSign = aExp < 0; zSig = (uint64_t)aExp << 52; for (i = 1LL << 51; i > 0; i >>= 1) { mul64To128( aSig, aSig, &aSig0, &aSig1 ); aSig = ( aSig0 << 12 ) | ( aSig1 >> 52 ); - if ( aSig & LIT64( 0x0020000000000000 ) ) { + if ( aSig & UINT64_C(0x0020000000000000) ) { aSig >>= 1; zSig |= i; } } if ( zSign ) - zSig = 0-zSig; - return normalizeRoundAndPackFloat64( zSign, 0x408, zSig STATUS_VAR ); +#ifdef _MSC_VER + zSig = 0 - zSig; +#else + zSig = -zSig; +#endif + return normalizeRoundAndPackFloat64(zSign, 0x408, zSig, status); } /*---------------------------------------------------------------------------- @@ -4393,16 +5289,16 @@ float64 float64_log2( float64 a STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_eq( float64 a, float64 b STATUS_PARAM ) +int float64_eq(float64 a, float64 b, float_status *status) { uint64_t av, bv; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } av = float64_val(a); @@ -4418,17 +5314,17 @@ int float64_eq( float64 a, float64 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_le( float64 a, float64 b STATUS_PARAM ) +int float64_le(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat64Sign( a ); @@ -4447,17 +5343,17 @@ int float64_le( float64 a, float64 b STATUS_PARAM ) | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_lt( float64 a, float64 b STATUS_PARAM ) +int float64_lt(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat64Sign( a ); @@ -4476,15 +5372,15 @@ int float64_lt( float64 a, float64 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_unordered( float64 a, float64 b STATUS_PARAM ) +int float64_unordered(float64 a, float64 b, float_status *status) { - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 1; } return 0; @@ -4497,17 +5393,18 @@ int float64_unordered( float64 a, float64 b STATUS_PARAM ) | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_eq_quiet( float64 a, float64 b STATUS_PARAM ) +int float64_eq_quiet(float64 a, float64 b, float_status *status) { uint64_t av, bv; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -4524,18 +5421,19 @@ int float64_eq_quiet( float64 a, float64 b STATUS_PARAM ) | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_le_quiet( float64 a, float64 b STATUS_PARAM ) +int float64_le_quiet(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -4555,18 +5453,19 @@ int float64_le_quiet( float64 a, float64 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_lt_quiet( float64 a, float64 b STATUS_PARAM ) +int float64_lt_quiet(float64 a, float64 b, float_status *status) { flag aSign, bSign; uint64_t av, bv; - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -4586,16 +5485,17 @@ int float64_lt_quiet( float64 a, float64 b STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float64_unordered_quiet( float64 a, float64 b STATUS_PARAM ) +int float64_unordered_quiet(float64 a, float64 b, float_status *status) { - a = float64_squash_input_denormal(a STATUS_VAR); - b = float64_squash_input_denormal(b STATUS_VAR); + a = float64_squash_input_denormal(a, status); + b = float64_squash_input_denormal(b, status); if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) ) { - if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float64_is_signaling_nan(a, status) + || float64_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 1; } @@ -4612,15 +5512,15 @@ int float64_unordered_quiet( float64 a, float64 b STATUS_PARAM ) | overflows, the largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ -int32 floatx80_to_int32( floatx80 a STATUS_PARAM ) +int32_t floatx80_to_int32(floatx80 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return (int32)(1U << 31); + float_raise(float_flag_invalid, status); + return 1 << 31; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -4629,7 +5529,7 @@ int32 floatx80_to_int32( floatx80 a STATUS_PARAM ) shiftCount = 0x4037 - aExp; if ( shiftCount <= 0 ) shiftCount = 1; shift64RightJamming( aSig, shiftCount, &aSig ); - return roundAndPackInt32( aSign, aSig STATUS_VAR ); + return roundAndPackInt32(aSign, aSig, status); } @@ -4643,16 +5543,16 @@ int32 floatx80_to_int32( floatx80 a STATUS_PARAM ) | sign as `a' is returned. *----------------------------------------------------------------------------*/ -int32 floatx80_to_int32_round_to_zero( floatx80 a STATUS_PARAM ) +int32_t floatx80_to_int32_round_to_zero(floatx80 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig, savedASig; int32_t z; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return (int32)(1U << 31); + float_raise(float_flag_invalid, status); + return 1 << 31; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -4662,21 +5562,23 @@ int32 floatx80_to_int32_round_to_zero( floatx80 a STATUS_PARAM ) goto invalid; } else if ( aExp < 0x3FFF ) { - if ( aExp || aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + if (aExp || aSig) { + status->float_exception_flags |= float_flag_inexact; + } return 0; } shiftCount = 0x403E - aExp; savedASig = aSig; aSig >>= shiftCount; - z = (int32_t)aSig; - if ( aSign && (z != 0x80000000) ) z = - z; + z = aSig; + if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; } if ( ( aSig<float_exception_flags |= float_flag_inexact; } return z; @@ -4692,15 +5594,15 @@ int32 floatx80_to_int32_round_to_zero( floatx80 a STATUS_PARAM ) | overflows, the largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ -int64 floatx80_to_int64( floatx80 a STATUS_PARAM ) +int64_t floatx80_to_int64(floatx80 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig, aSigExtra; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return (uint64_t)1 << 63; + float_raise(float_flag_invalid, status); + return 1ULL << 63; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -4708,21 +5610,18 @@ int64 floatx80_to_int64( floatx80 a STATUS_PARAM ) shiftCount = 0x403E - aExp; if ( shiftCount <= 0 ) { if ( shiftCount ) { - float_raise( float_flag_invalid STATUS_VAR); - if ( ! aSign - || ( ( aExp == 0x7FFF ) - && ( aSig != LIT64( 0x8000000000000000 ) ) ) - ) { - return LIT64( 0x7FFFFFFFFFFFFFFF ); + float_raise(float_flag_invalid, status); + if (!aSign || floatx80_is_any_nan(a)) { + return INT64_MAX; } - return (int64_t) LIT64( 0x8000000000000000 ); + return INT64_MIN; } aSigExtra = 0; } else { shift64ExtraRightJamming( aSig, 0, shiftCount, &aSig, &aSigExtra ); } - return roundAndPackInt64( aSign, aSig, aSigExtra STATUS_VAR ); + return roundAndPackInt64(aSign, aSig, aSigExtra, status); } @@ -4736,38 +5635,40 @@ int64 floatx80_to_int64( floatx80 a STATUS_PARAM ) | sign as `a' is returned. *----------------------------------------------------------------------------*/ -int64 floatx80_to_int64_round_to_zero( floatx80 a STATUS_PARAM ) +int64_t floatx80_to_int64_round_to_zero(floatx80 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig; - int64 z; + int64_t z; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return (uint64_t)1 << 63; + float_raise(float_flag_invalid, status); + return 1ULL << 63; } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); shiftCount = aExp - 0x403E; if ( 0 <= shiftCount ) { - aSig &= LIT64( 0x7FFFFFFFFFFFFFFF ); + aSig &= UINT64_C(0x7FFFFFFFFFFFFFFF); if ( ( a.high != 0xC03E ) || aSig ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); if ( ! aSign || ( ( aExp == 0x7FFF ) && aSig ) ) { - return LIT64( 0x7FFFFFFFFFFFFFFF ); + return INT64_MAX; } } - return (int64_t) LIT64( 0x8000000000000000 ); + return INT64_MIN; } else if ( aExp < 0x3FFF ) { - if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + if (aExp | aSig) { + status->float_exception_flags |= float_flag_inexact; + } return 0; } z = aSig>>( - shiftCount ); if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) { - STATUS(float_exception_flags) |= float_flag_inexact; + status->float_exception_flags |= float_flag_inexact; } if ( aSign ) z = - z; return z; @@ -4781,28 +5682,28 @@ int64 floatx80_to_int64_round_to_zero( floatx80 a STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float32 floatx80_to_float32( floatx80 a STATUS_PARAM ) +float32 floatx80_to_float32(floatx80 a, float_status *status) { flag aSign; - int32 aExp; + int32_t aExp; uint64_t aSig; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return float32_default_nan; + float_raise(float_flag_invalid, status); + return float32_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig<<1 ) ) { - return commonNaNToFloat32( floatx80ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return commonNaNToFloat32(floatx80ToCommonNaN(a, status), status); } return packFloat32( aSign, 0xFF, 0 ); } shift64RightJamming( aSig, 33, &aSig ); if ( aExp || aSig ) aExp -= 0x3F81; - return roundAndPackFloat32( aSign, aExp, (uint32_t)aSig STATUS_VAR ); + return roundAndPackFloat32(aSign, aExp, aSig, status); } @@ -4813,28 +5714,28 @@ float32 floatx80_to_float32( floatx80 a STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float64 floatx80_to_float64( floatx80 a STATUS_PARAM ) +float64 floatx80_to_float64(floatx80 a, float_status *status) { flag aSign; - int32 aExp; + int32_t aExp; uint64_t aSig, zSig; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return float64_default_nan; + float_raise(float_flag_invalid, status); + return float64_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig<<1 ) ) { - return commonNaNToFloat64( floatx80ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return commonNaNToFloat64(floatx80ToCommonNaN(a, status), status); } return packFloat64( aSign, 0x7FF, 0 ); } shift64RightJamming( aSig, 1, &zSig ); if ( aExp || aSig ) aExp -= 0x3C01; - return roundAndPackFloat64( aSign, aExp, zSig STATUS_VAR ); + return roundAndPackFloat64(aSign, aExp, zSig, status); } @@ -4845,27 +5746,43 @@ float64 floatx80_to_float64( floatx80 a STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 floatx80_to_float128( floatx80 a STATUS_PARAM ) +float128 floatx80_to_float128(floatx80 a, float_status *status) { flag aSign; - int_fast16_t aExp; + int aExp; uint64_t aSig, zSig0, zSig1; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return float128_default_nan; + float_raise(float_flag_invalid, status); + return float128_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) { - return commonNaNToFloat128( floatx80ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return commonNaNToFloat128(floatx80ToCommonNaN(a, status), status); } shift128Right( aSig<<1, 0, 16, &zSig0, &zSig1 ); return packFloat128( aSign, aExp, zSig0, zSig1 ); } +/*---------------------------------------------------------------------------- +| Rounds the extended double-precision floating-point value `a' +| to the precision provided by floatx80_rounding_precision and returns the +| result as an extended double-precision floating-point value. +| The operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_round(floatx80 a, float_status *status) +{ + return roundAndPackFloatx80(status->floatx80_rounding_precision, + extractFloatx80Sign(a), + extractFloatx80Exp(a), + extractFloatx80Frac(a), 0, status); +} + /*---------------------------------------------------------------------------- | Rounds the extended double-precision floating-point value `a' to an integer, | and returns the result as an extended quadruple-precision floating-point @@ -4873,21 +5790,21 @@ float128 floatx80_to_float128( floatx80 a STATUS_PARAM ) | Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM ) +floatx80 floatx80_round_to_int(floatx80 a, float_status *status) { flag aSign; - int32 aExp; + int32_t aExp; uint64_t lastBitMask, roundBitsMask; floatx80 z; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aExp = extractFloatx80Exp( a ); if ( 0x403E <= aExp ) { if ( ( aExp == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) { - return propagateFloatx80NaN( a, a STATUS_VAR ); + return propagateFloatx80NaN(a, a, status); } return a; } @@ -4896,30 +5813,30 @@ floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM ) && ( (uint64_t) ( extractFloatx80Frac( a )<<1 ) == 0 ) ) { return a; } - STATUS(float_exception_flags) |= float_flag_inexact; + status->float_exception_flags |= float_flag_inexact; aSign = extractFloatx80Sign( a ); - switch ( STATUS(float_rounding_mode) ) { + switch (status->float_rounding_mode) { case float_round_nearest_even: if ( ( aExp == 0x3FFE ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) { return - packFloatx80( aSign, 0x3FFF, LIT64( 0x8000000000000000 ) ); + packFloatx80( aSign, 0x3FFF, UINT64_C(0x8000000000000000)); } break; case float_round_ties_away: if (aExp == 0x3FFE) { - return packFloatx80(aSign, 0x3FFF, LIT64(0x8000000000000000)); + return packFloatx80(aSign, 0x3FFF, UINT64_C(0x8000000000000000)); } break; case float_round_down: return aSign ? - packFloatx80( 1, 0x3FFF, LIT64( 0x8000000000000000 ) ) + packFloatx80( 1, 0x3FFF, UINT64_C(0x8000000000000000)) : packFloatx80( 0, 0, 0 ); case float_round_up: return aSign ? packFloatx80( 1, 0, 0 ) - : packFloatx80( 0, 0x3FFF, LIT64( 0x8000000000000000 ) ); + : packFloatx80( 0, 0x3FFF, UINT64_C(0x8000000000000000)); } return packFloatx80( aSign, 0, 0 ); } @@ -4927,7 +5844,7 @@ floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM ) lastBitMask <<= 0x403E - aExp; roundBitsMask = lastBitMask - 1; z = a; - switch (STATUS(float_rounding_mode)) { + switch (status->float_rounding_mode) { case float_round_nearest_even: z.low += lastBitMask>>1; if ((z.low & roundBitsMask) == 0) { @@ -4950,15 +5867,16 @@ floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM ) } break; default: - float_raise(float_flag_invalid STATUS_VAR); - break; + abort(); } z.low &= ~ roundBitsMask; if ( z.low == 0 ) { ++z.high; - z.low = LIT64( 0x8000000000000000 ); + z.low = UINT64_C(0x8000000000000000); + } + if (z.low != a.low) { + status->float_exception_flags |= float_flag_inexact; } - if ( z.low != a.low ) STATUS(float_exception_flags) |= float_flag_inexact; return z; } @@ -4971,11 +5889,12 @@ floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM) +static floatx80 addFloatx80Sigs(floatx80 a, floatx80 b, flag zSign, + float_status *status) { - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; - int32 expDiff; + int32_t expDiff; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -4984,7 +5903,9 @@ static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM expDiff = aExp - bExp; if ( 0 < expDiff ) { if ( aExp == 0x7FFF ) { - if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ((uint64_t)(aSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } return a; } if ( bExp == 0 ) --expDiff; @@ -4993,8 +5914,12 @@ static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM } else if ( expDiff < 0 ) { if ( bExp == 0x7FFF ) { - if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); - return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + if ((uint64_t)(bSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } + return packFloatx80(zSign, + floatx80_infinity_high, + floatx80_infinity_low); } if ( aExp == 0 ) ++expDiff; shift64ExtraRightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); @@ -5003,13 +5928,16 @@ static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM else { if ( aExp == 0x7FFF ) { if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { - return propagateFloatx80NaN( a, b STATUS_VAR ); + return propagateFloatx80NaN(a, b, status); } return a; } zSig1 = 0; zSig0 = aSig + bSig; if ( aExp == 0 ) { + if (zSig0 == 0) { + return packFloatx80(zSign, 0, 0); + } normalizeFloatx80Subnormal( zSig0, &zExp, &zSig0 ); goto roundAndPack; } @@ -5020,13 +5948,11 @@ static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM if ( (int64_t) zSig0 < 0 ) goto roundAndPack; shiftRight1: shift64ExtraRightJamming( zSig0, zSig1, 1, &zSig0, &zSig1 ); - zSig0 |= LIT64( 0x8000000000000000 ); + zSig0 |= UINT64_C(0x8000000000000000); ++zExp; roundAndPack: - return - roundAndPackFloatx80( - STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); - + return roundAndPackFloatx80(status->floatx80_rounding_precision, + zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- @@ -5037,12 +5963,12 @@ static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM ) +static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, flag zSign, + float_status *status) { - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; - int32 expDiff; - floatx80 z; + int32_t expDiff; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5053,12 +5979,10 @@ static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM if ( expDiff < 0 ) goto bExpBigger; if ( aExp == 0x7FFF ) { if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { - return propagateFloatx80NaN( a, b STATUS_VAR ); + return propagateFloatx80NaN(a, b, status); } - float_raise( float_flag_invalid STATUS_VAR); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } if ( aExp == 0 ) { aExp = 1; @@ -5067,11 +5991,14 @@ static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM zSig1 = 0; if ( bSig < aSig ) goto aBigger; if ( aSig < bSig ) goto bBigger; - return packFloatx80( STATUS(float_rounding_mode) == float_round_down, 0, 0 ); + return packFloatx80(status->float_rounding_mode == float_round_down, 0, 0); bExpBigger: if ( bExp == 0x7FFF ) { - if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); - return packFloatx80( zSign ^ 1, 0x7FFF, LIT64( 0x8000000000000000 ) ); + if ((uint64_t)(bSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } + return packFloatx80(zSign ^ 1, floatx80_infinity_high, + floatx80_infinity_low); } if ( aExp == 0 ) ++expDiff; shift128RightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); @@ -5082,7 +6009,9 @@ static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM goto normalizeRoundAndPack; aExpBigger: if ( aExp == 0x7FFF ) { - if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ((uint64_t)(aSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } return a; } if ( bExp == 0 ) --expDiff; @@ -5091,10 +6020,8 @@ static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 ); zExp = aExp; normalizeRoundAndPack: - return - normalizeRoundAndPackFloatx80( - STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); - + return normalizeRoundAndPackFloatx80(status->floatx80_rounding_precision, + zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- @@ -5103,21 +6030,21 @@ static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 floatx80_add( floatx80 a, floatx80 b STATUS_PARAM ) +floatx80 floatx80_add(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign == bSign ) { - return addFloatx80Sigs( a, b, aSign STATUS_VAR ); + return addFloatx80Sigs(a, b, aSign, status); } else { - return subFloatx80Sigs( a, b, aSign STATUS_VAR ); + return subFloatx80Sigs(a, b, aSign, status); } } @@ -5128,21 +6055,21 @@ floatx80 floatx80_add( floatx80 a, floatx80 b STATUS_PARAM ) | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 floatx80_sub( floatx80 a, floatx80 b STATUS_PARAM ) +floatx80 floatx80_sub(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aSign = extractFloatx80Sign( a ); bSign = extractFloatx80Sign( b ); if ( aSign == bSign ) { - return subFloatx80Sigs( a, b, aSign STATUS_VAR ); + return subFloatx80Sigs(a, b, aSign, status); } else { - return addFloatx80Sigs( a, b, aSign STATUS_VAR ); + return addFloatx80Sigs(a, b, aSign, status); } } @@ -5153,16 +6080,15 @@ floatx80 floatx80_sub( floatx80 a, floatx80 b STATUS_PARAM ) | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 floatx80_mul( floatx80 a, floatx80 b STATUS_PARAM ) +floatx80 floatx80_mul(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign, zSign; - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; - floatx80 z; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5174,21 +6100,23 @@ floatx80 floatx80_mul( floatx80 a, floatx80 b STATUS_PARAM ) if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig<<1 ) || ( ( bExp == 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { - return propagateFloatx80NaN( a, b STATUS_VAR ); + return propagateFloatx80NaN(a, b, status); } if ( ( bExp | bSig ) == 0 ) goto invalid; - return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + return packFloatx80(zSign, floatx80_infinity_high, + floatx80_infinity_low); } if ( bExp == 0x7FFF ) { - if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ((uint64_t)(bSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } if ( ( aExp | aSig ) == 0 ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } - return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + return packFloatx80(zSign, floatx80_infinity_high, + floatx80_infinity_low); } if ( aExp == 0 ) { if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); @@ -5204,10 +6132,8 @@ floatx80 floatx80_mul( floatx80 a, floatx80 b STATUS_PARAM ) shortShift128Left( zSig0, zSig1, 1, &zSig0, &zSig1 ); --zExp; } - return - roundAndPackFloatx80( - STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); - + return roundAndPackFloatx80(status->floatx80_rounding_precision, + zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- @@ -5216,17 +6142,16 @@ floatx80 floatx80_mul( floatx80 a, floatx80 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 floatx80_div( floatx80 a, floatx80 b STATUS_PARAM ) +floatx80 floatx80_div(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign, zSign; - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig, bSig, zSig0, zSig1; uint64_t rem0, rem1, rem2, term0, term1, term2; - floatx80 z; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5236,28 +6161,34 @@ floatx80 floatx80_div( floatx80 a, floatx80 b STATUS_PARAM ) bSign = extractFloatx80Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { - if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ((uint64_t)(aSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } if ( bExp == 0x7FFF ) { - if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ((uint64_t)(bSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } goto invalid; } - return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + return packFloatx80(zSign, floatx80_infinity_high, + floatx80_infinity_low); } if ( bExp == 0x7FFF ) { - if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ((uint64_t)(bSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } return packFloatx80( zSign, 0, 0 ); } if ( bExp == 0 ) { if ( bSig == 0 ) { if ( ( aExp | aSig ) == 0 ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } - float_raise( float_flag_divbyzero STATUS_VAR); - return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + float_raise(float_flag_divbyzero, status); + return packFloatx80(zSign, floatx80_infinity_high, + floatx80_infinity_low); } normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } @@ -5288,10 +6219,8 @@ floatx80 floatx80_div( floatx80 a, floatx80 b STATUS_PARAM ) } zSig1 |= ( ( rem1 | rem2 ) != 0 ); } - return - roundAndPackFloatx80( - STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); - + return roundAndPackFloatx80(status->floatx80_rounding_precision, + zSign, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- @@ -5300,17 +6229,16 @@ floatx80 floatx80_div( floatx80 a, floatx80 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM ) +floatx80 floatx80_rem(floatx80 a, floatx80 b, float_status *status) { flag aSign, zSign; - int32 aExp, bExp, expDiff; + int32_t aExp, bExp, expDiff; uint64_t aSig0, aSig1, bSig; uint64_t q, term0, term1, alternateASig0, alternateASig1; - floatx80 z; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -5320,21 +6248,21 @@ floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM ) if ( aExp == 0x7FFF ) { if ( (uint64_t) ( aSig0<<1 ) || ( ( bExp == 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { - return propagateFloatx80NaN( a, b STATUS_VAR ); + return propagateFloatx80NaN(a, b, status); } goto invalid; } if ( bExp == 0x7FFF ) { - if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ((uint64_t)(bSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } return a; } if ( bExp == 0 ) { if ( bSig == 0 ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); } @@ -5342,7 +6270,7 @@ floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM ) if ( (uint64_t) ( aSig0<<1 ) == 0 ) return a; normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); } - bSig |= LIT64( 0x8000000000000000 ); + bSig |= UINT64_C(0x8000000000000000); zSign = aSign; expDiff = aExp - bExp; aSig1 = 0; @@ -5390,7 +6318,7 @@ floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM ) } return normalizeRoundAndPackFloatx80( - 80, zSign, bExp + expDiff, aSig0, aSig1 STATUS_VAR ); + 80, zSign, bExp + expDiff, aSig0, aSig1, status); } @@ -5400,33 +6328,32 @@ floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM ) | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 floatx80_sqrt( floatx80 a STATUS_PARAM ) +floatx80 floatx80_sqrt(floatx80 a, float_status *status) { flag aSign; - int32 aExp, zExp; + int32_t aExp, zExp; uint64_t aSig0, aSig1, zSig0, zSig1, doubleZSig0; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; - floatx80 z; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aSig0 = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FFF ) { - if ( (uint64_t) ( aSig0<<1 ) ) return propagateFloatx80NaN( a, a STATUS_VAR ); + if ((uint64_t)(aSig0 << 1)) { + return propagateFloatx80NaN(a, a, status); + } if ( ! aSign ) return a; goto invalid; } if ( aSign ) { if ( ( aExp | aSig0 ) == 0 ) return a; invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = floatx80_default_nan_low; - z.high = floatx80_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } if ( aExp == 0 ) { if ( aSig0 == 0 ) return packFloatx80( 0, 0, 0 ); @@ -5445,7 +6372,7 @@ floatx80 floatx80_sqrt( floatx80 a STATUS_PARAM ) add128( rem0, rem1, zSig0>>63, doubleZSig0 | 1, &rem0, &rem1 ); } zSig1 = estimateDiv128To64( rem1, 0, doubleZSig0 ); - if ( ( zSig1 & LIT64( 0x3FFFFFFFFFFFFFFF ) ) <= 5 ) { + if ( ( zSig1 & UINT64_C(0x3FFFFFFFFFFFFFFF) ) <= 5 ) { if ( zSig1 == 0 ) zSig1 = 1; mul64To128( doubleZSig0, zSig1, &term1, &term2 ); sub128( rem1, 0, term1, term2, &rem1, &rem2 ); @@ -5462,10 +6389,8 @@ floatx80 floatx80_sqrt( floatx80 a STATUS_PARAM ) } shortShift128Left( 0, zSig1, 1, &zSig0, &zSig1 ); zSig0 |= doubleZSig0; - return - roundAndPackFloatx80( - STATUS(floatx80_rounding_precision), 0, zExp, zSig0, zSig1 STATUS_VAR ); - + return roundAndPackFloatx80(status->floatx80_rounding_precision, + 0, zExp, zSig0, zSig1, status); } /*---------------------------------------------------------------------------- @@ -5475,7 +6400,7 @@ floatx80 floatx80_sqrt( floatx80 a STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_eq( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_eq(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) @@ -5484,7 +6409,7 @@ int floatx80_eq( floatx80 a, floatx80 b STATUS_PARAM ) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } return @@ -5504,7 +6429,7 @@ int floatx80_eq( floatx80 a, floatx80 b STATUS_PARAM ) | Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_le( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_le(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; @@ -5514,7 +6439,7 @@ int floatx80_le( floatx80 a, floatx80 b STATUS_PARAM ) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloatx80Sign( a ); @@ -5538,7 +6463,7 @@ int floatx80_le( floatx80 a, floatx80 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_lt( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_lt(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; @@ -5548,7 +6473,7 @@ int floatx80_lt( floatx80 a, floatx80 b STATUS_PARAM ) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloatx80Sign( a ); @@ -5571,7 +6496,7 @@ int floatx80_lt( floatx80 a, floatx80 b STATUS_PARAM ) | either operand is a NaN. The comparison is performed according to the | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_unordered( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_unordered(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) || (extractFloatx80Exp(a) == 0x7FFF @@ -5579,7 +6504,7 @@ int floatx80_unordered( floatx80 a, floatx80 b STATUS_PARAM ) || (extractFloatx80Exp(b) == 0x7FFF && (uint64_t) (extractFloatx80Frac(b) << 1)) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 1; } return 0; @@ -5592,11 +6517,11 @@ int floatx80_unordered( floatx80 a, floatx80 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_eq_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_eq_quiet(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) @@ -5604,9 +6529,9 @@ int floatx80_eq_quiet( floatx80 a, floatx80 b STATUS_PARAM ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -5626,12 +6551,12 @@ int floatx80_eq_quiet( floatx80 a, floatx80 b STATUS_PARAM ) | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_le_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_le_quiet(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) @@ -5639,9 +6564,9 @@ int floatx80_le_quiet( floatx80 a, floatx80 b STATUS_PARAM ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -5666,12 +6591,12 @@ int floatx80_le_quiet( floatx80 a, floatx80 b STATUS_PARAM ) | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_lt_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_lt_quiet(floatx80 a, floatx80 b, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) @@ -5679,9 +6604,9 @@ int floatx80_lt_quiet( floatx80 a, floatx80 b STATUS_PARAM ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -5705,10 +6630,10 @@ int floatx80_lt_quiet( floatx80 a, floatx80 b STATUS_PARAM ) | The comparison is performed according to the IEC/IEEE Standard for Binary | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int floatx80_unordered_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_unordered_quiet(floatx80 a, floatx80 b, float_status *status) { if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 1; } if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) @@ -5716,9 +6641,9 @@ int floatx80_unordered_quiet( floatx80 a, floatx80 b STATUS_PARAM ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { - if ( floatx80_is_signaling_nan( a ) - || floatx80_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (floatx80_is_signaling_nan(a, status) + || floatx80_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 1; } @@ -5735,10 +6660,10 @@ int floatx80_unordered_quiet( floatx80 a, floatx80 b STATUS_PARAM ) | largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ -int32 float128_to_int32( float128 a STATUS_PARAM ) +int32_t float128_to_int32(float128 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); @@ -5746,11 +6671,11 @@ int32 float128_to_int32( float128 a STATUS_PARAM ) aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( ( aExp == 0x7FFF ) && ( aSig0 | aSig1 ) ) aSign = 0; - if ( aExp ) aSig0 |= LIT64( 0x0001000000000000 ); + if ( aExp ) aSig0 |= UINT64_C(0x0001000000000000); aSig0 |= ( aSig1 != 0 ); shiftCount = 0x4028 - aExp; if ( 0 < shiftCount ) shift64RightJamming( aSig0, shiftCount, &aSig0 ); - return roundAndPackInt32( aSign, aSig0 STATUS_VAR ); + return roundAndPackInt32(aSign, aSig0, status); } @@ -5764,10 +6689,10 @@ int32 float128_to_int32( float128 a STATUS_PARAM ) | returned. *----------------------------------------------------------------------------*/ -int32 float128_to_int32_round_to_zero( float128 a STATUS_PARAM ) +int32_t float128_to_int32_round_to_zero(float128 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig0, aSig1, savedASig; int32_t z; @@ -5781,22 +6706,24 @@ int32 float128_to_int32_round_to_zero( float128 a STATUS_PARAM ) goto invalid; } else if ( aExp < 0x3FFF ) { - if ( aExp || aSig0 ) STATUS(float_exception_flags) |= float_flag_inexact; + if (aExp || aSig0) { + status->float_exception_flags |= float_flag_inexact; + } return 0; } - aSig0 |= LIT64( 0x0001000000000000 ); + aSig0 |= UINT64_C(0x0001000000000000); shiftCount = 0x402F - aExp; savedASig = aSig0; aSig0 >>= shiftCount; - z = (int32_t)aSig0; + z = aSig0; if ( aSign ) z = - z; if ( ( z < 0 ) ^ aSign ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); - return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; + float_raise(float_flag_invalid, status); + return aSign ? INT32_MIN : INT32_MAX; } if ( ( aSig0<float_exception_flags |= float_flag_inexact; } return z; @@ -5812,36 +6739,36 @@ int32 float128_to_int32_round_to_zero( float128 a STATUS_PARAM ) | largest integer with the same sign as `a' is returned. *----------------------------------------------------------------------------*/ -int64 float128_to_int64( float128 a STATUS_PARAM ) +int64_t float128_to_int64(float128 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); - if ( aExp ) aSig0 |= LIT64( 0x0001000000000000 ); + if ( aExp ) aSig0 |= UINT64_C(0x0001000000000000); shiftCount = 0x402F - aExp; if ( shiftCount <= 0 ) { if ( 0x403E < aExp ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); if ( ! aSign || ( ( aExp == 0x7FFF ) - && ( aSig1 || ( aSig0 != LIT64( 0x0001000000000000 ) ) ) + && ( aSig1 || ( aSig0 != UINT64_C(0x0001000000000000) ) ) ) ) { - return LIT64( 0x7FFFFFFFFFFFFFFF ); + return INT64_MAX; } - return (int64_t) LIT64( 0x8000000000000000 ); + return INT64_MIN; } shortShift128Left( aSig0, aSig1, - shiftCount, &aSig0, &aSig1 ); } else { shift64ExtraRightJamming( aSig0, aSig1, shiftCount, &aSig0, &aSig1 ); } - return roundAndPackInt64( aSign, aSig0, aSig1 STATUS_VAR ); + return roundAndPackInt64(aSign, aSig0, aSig1, status); } @@ -5855,50 +6782,52 @@ int64 float128_to_int64( float128 a STATUS_PARAM ) | returned. *----------------------------------------------------------------------------*/ -int64 float128_to_int64_round_to_zero( float128 a STATUS_PARAM ) +int64_t float128_to_int64_round_to_zero(float128 a, float_status *status) { flag aSign; - int32 aExp, shiftCount; + int32_t aExp, shiftCount; uint64_t aSig0, aSig1; - int64 z; + int64_t z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); - if ( aExp ) aSig0 |= LIT64( 0x0001000000000000 ); + if ( aExp ) aSig0 |= UINT64_C(0x0001000000000000); shiftCount = aExp - 0x402F; if ( 0 < shiftCount ) { if ( 0x403E <= aExp ) { - aSig0 &= LIT64( 0x0000FFFFFFFFFFFF ); - if ( ( a.high == LIT64( 0xC03E000000000000 ) ) - && ( aSig1 < LIT64( 0x0002000000000000 ) ) ) { - if ( aSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; - } - else { - float_raise( float_flag_invalid STATUS_VAR); - if ( ! aSign || ( ( aExp == 0x7FFF ) && ( aSig0 | aSig1 ) ) ) { - return LIT64( 0x7FFFFFFFFFFFFFFF ); + aSig0 &= UINT64_C(0x0000FFFFFFFFFFFF); + if ( ( a.high == UINT64_C(0xC03E000000000000) ) + && ( aSig1 < UINT64_C(0x0002000000000000) ) ) { + if (aSig1) { + status->float_exception_flags |= float_flag_inexact; } } - return (int64_t) LIT64( 0x8000000000000000 ); + else { + float_raise(float_flag_invalid, status); + if ( ! aSign || ( ( aExp == 0x7FFF ) && ( aSig0 | aSig1 ) ) ) { + return INT64_MAX; + } + } + return INT64_MIN; } z = ( aSig0<>( ( - shiftCount ) & 63 ) ); if ( (uint64_t) ( aSig1<float_exception_flags |= float_flag_inexact; } } else { if ( aExp < 0x3FFF ) { if ( aExp | aSig0 | aSig1 ) { - STATUS(float_exception_flags) |= float_flag_inexact; + status->float_exception_flags |= float_flag_inexact; } return 0; } z = aSig0>>( - shiftCount ); if ( aSig1 || ( shiftCount && (uint64_t) ( aSig0<<( shiftCount & 63 ) ) ) ) { - STATUS(float_exception_flags) |= float_flag_inexact; + status->float_exception_flags |= float_flag_inexact; } } if ( aSign ) z = - z; @@ -5906,6 +6835,122 @@ int64 float128_to_int64_round_to_zero( float128 a STATUS_PARAM ) } +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point value +| `a' to the 64-bit unsigned integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. If the conversion overflows, the +| largest unsigned integer is returned. If 'a' is negative, the value is +| rounded and zero is returned; negative values that do not round to zero +| will raise the inexact exception. +*----------------------------------------------------------------------------*/ + +uint64_t float128_to_uint64(float128 a, float_status *status) +{ + flag aSign; + int aExp; + int shiftCount; + uint64_t aSig0, aSig1; + + aSig0 = extractFloat128Frac0(a); + aSig1 = extractFloat128Frac1(a); + aExp = extractFloat128Exp(a); + aSign = extractFloat128Sign(a); + if (aSign && (aExp > 0x3FFE)) { + float_raise(float_flag_invalid, status); + if (float128_is_any_nan(a)) { + return UINT64_MAX; + } else { + return 0; + } + } + if (aExp) { + aSig0 |= UINT64_C(0x0001000000000000); + } + shiftCount = 0x402F - aExp; + if (shiftCount <= 0) { + if (0x403E < aExp) { + float_raise(float_flag_invalid, status); + return UINT64_MAX; + } + shortShift128Left(aSig0, aSig1, -shiftCount, &aSig0, &aSig1); + } else { + shift64ExtraRightJamming(aSig0, aSig1, shiftCount, &aSig0, &aSig1); + } + return roundAndPackUint64(aSign, aSig0, aSig1, status); +} + +uint64_t float128_to_uint64_round_to_zero(float128 a, float_status *status) +{ + uint64_t v; + signed char current_rounding_mode = status->float_rounding_mode; + + set_float_rounding_mode(float_round_to_zero, status); + v = float128_to_uint64(a, status); + set_float_rounding_mode(current_rounding_mode, status); + + return v; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the 32-bit unsigned integer format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic except that the conversion is always rounded toward zero. +| If `a' is a NaN, the largest positive integer is returned. Otherwise, +| if the conversion overflows, the largest unsigned integer is returned. +| If 'a' is negative, the value is rounded and zero is returned; negative +| values that do not round to zero will raise the inexact exception. +*----------------------------------------------------------------------------*/ + +uint32_t float128_to_uint32_round_to_zero(float128 a, float_status *status) +{ + uint64_t v; + uint32_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float128_to_uint64_round_to_zero(a, status); + if (v > 0xffffffff) { + res = 0xffffffff; + } else { + return v; + } + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid, status); + return res; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point value +| `a' to the 32-bit unsigned integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. If the conversion overflows, the +| largest unsigned integer is returned. If 'a' is negative, the value is +| rounded and zero is returned; negative values that do not round to zero +| will raise the inexact exception. +*----------------------------------------------------------------------------*/ + +uint32_t float128_to_uint32(float128 a, float_status *status) +{ + uint64_t v; + uint32_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float128_to_uint64(a, status); + if (v > 0xffffffff) { + res = 0xffffffff; + } else { + return v; + } + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid, status); + return res; +} + /*---------------------------------------------------------------------------- | Returns the result of converting the quadruple-precision floating-point | value `a' to the single-precision floating-point format. The conversion @@ -5913,10 +6958,10 @@ int64 float128_to_int64_round_to_zero( float128 a STATUS_PARAM ) | Arithmetic. *----------------------------------------------------------------------------*/ -float32 float128_to_float32( float128 a STATUS_PARAM ) +float32 float128_to_float32(float128 a, float_status *status) { flag aSign; - int32 aExp; + int32_t aExp; uint64_t aSig0, aSig1; uint32_t zSig; @@ -5926,18 +6971,18 @@ float32 float128_to_float32( float128 a STATUS_PARAM ) aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { - return commonNaNToFloat32( float128ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return commonNaNToFloat32(float128ToCommonNaN(a, status), status); } return packFloat32( aSign, 0xFF, 0 ); } aSig0 |= ( aSig1 != 0 ); shift64RightJamming( aSig0, 18, &aSig0 ); - zSig = (uint32_t)aSig0; + zSig = aSig0; if ( aExp || zSig ) { zSig |= 0x40000000; aExp -= 0x3F81; } - return roundAndPackFloat32( aSign, aExp, zSig STATUS_VAR ); + return roundAndPackFloat32(aSign, aExp, zSig, status); } @@ -5948,10 +6993,10 @@ float32 float128_to_float32( float128 a STATUS_PARAM ) | Arithmetic. *----------------------------------------------------------------------------*/ -float64 float128_to_float64( float128 a STATUS_PARAM ) +float64 float128_to_float64(float128 a, float_status *status) { flag aSign; - int32 aExp; + int32_t aExp; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); @@ -5960,17 +7005,17 @@ float64 float128_to_float64( float128 a STATUS_PARAM ) aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { - return commonNaNToFloat64( float128ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return commonNaNToFloat64(float128ToCommonNaN(a, status), status); } return packFloat64( aSign, 0x7FF, 0 ); } shortShift128Left( aSig0, aSig1, 14, &aSig0, &aSig1 ); aSig0 |= ( aSig1 != 0 ); if ( aExp || aSig0 ) { - aSig0 |= LIT64( 0x4000000000000000 ); + aSig0 |= UINT64_C(0x4000000000000000); aExp -= 0x3C01; } - return roundAndPackFloat64( aSign, aExp, aSig0 STATUS_VAR ); + return roundAndPackFloat64(aSign, aExp, aSig0, status); } @@ -5981,10 +7026,10 @@ float64 float128_to_float64( float128 a STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -floatx80 float128_to_floatx80( float128 a STATUS_PARAM ) +floatx80 float128_to_floatx80(float128 a, float_status *status) { flag aSign; - int32 aExp; + int32_t aExp; uint64_t aSig0, aSig1; aSig1 = extractFloat128Frac1( a ); @@ -5993,19 +7038,20 @@ floatx80 float128_to_floatx80( float128 a STATUS_PARAM ) aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { - return commonNaNToFloatx80( float128ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return commonNaNToFloatx80(float128ToCommonNaN(a, status), status); } - return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + return packFloatx80(aSign, floatx80_infinity_high, + floatx80_infinity_low); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return packFloatx80( aSign, 0, 0 ); normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } else { - aSig0 |= LIT64( 0x0001000000000000 ); + aSig0 |= UINT64_C(0x0001000000000000); } shortShift128Left( aSig0, aSig1, 15, &aSig0, &aSig1 ); - return roundAndPackFloatx80( 80, aSign, aExp, aSig0, aSig1 STATUS_VAR ); + return roundAndPackFloatx80(80, aSign, aExp, aSig0, aSig1, status); } @@ -6016,10 +7062,10 @@ floatx80 float128_to_floatx80( float128 a STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 float128_round_to_int( float128 a STATUS_PARAM ) +float128 float128_round_to_int(float128 a, float_status *status) { flag aSign; - int32 aExp; + int32_t aExp; uint64_t lastBitMask, roundBitsMask; float128 z; @@ -6029,7 +7075,7 @@ float128 float128_round_to_int( float128 a STATUS_PARAM ) if ( ( aExp == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) { - return propagateFloat128NaN( a, a STATUS_VAR ); + return propagateFloat128NaN(a, a, status); } return a; } @@ -6037,7 +7083,7 @@ float128 float128_round_to_int( float128 a STATUS_PARAM ) lastBitMask = ( lastBitMask<<( 0x406E - aExp ) )<<1; roundBitsMask = lastBitMask - 1; z = a; - switch (STATUS(float_rounding_mode)) { + switch (status->float_rounding_mode) { case float_round_nearest_even: if ( lastBitMask ) { add128( z.high, z.low, 0, lastBitMask>>1, &z.high, &z.low ); @@ -6071,19 +7117,27 @@ float128 float128_round_to_int( float128 a STATUS_PARAM ) add128(z.high, z.low, 0, roundBitsMask, &z.high, &z.low); } break; - default: - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + /* + * Note that if lastBitMask == 0, the last bit is the lsb + * of high, and roundBitsMask == -1. + */ + if ((lastBitMask ? z.low & lastBitMask : z.high & 1) == 0) { + add128(z.high, z.low, 0, roundBitsMask, &z.high, &z.low); + } break; + default: + abort(); } z.low &= ~ roundBitsMask; } else { if ( aExp < 0x3FFF ) { if ( ( ( (uint64_t) ( a.high<<1 ) ) | a.low ) == 0 ) return a; - STATUS(float_exception_flags) |= float_flag_inexact; + status->float_exception_flags |= float_flag_inexact; aSign = extractFloat128Sign( a ); - switch ( STATUS(float_rounding_mode) ) { - case float_round_nearest_even: + switch (status->float_rounding_mode) { + case float_round_nearest_even: if ( ( aExp == 0x3FFE ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) @@ -6096,14 +7150,17 @@ float128 float128_round_to_int( float128 a STATUS_PARAM ) return packFloat128(aSign, 0x3FFF, 0, 0); } break; - case float_round_down: + case float_round_down: return aSign ? packFloat128( 1, 0x3FFF, 0, 0 ) : packFloat128( 0, 0, 0, 0 ); - case float_round_up: + case float_round_up: return aSign ? packFloat128( 1, 0, 0, 0 ) : packFloat128( 0, 0x3FFF, 0, 0 ); + + case float_round_to_odd: + return packFloat128(aSign, 0x3FFF, 0, 0); } return packFloat128( aSign, 0, 0, 0 ); } @@ -6112,7 +7169,7 @@ float128 float128_round_to_int( float128 a STATUS_PARAM ) roundBitsMask = lastBitMask - 1; z.low = 0; z.high = a.high; - switch (STATUS(float_rounding_mode)) { + switch (status->float_rounding_mode) { case float_round_nearest_even: z.high += lastBitMask>>1; if ( ( ( z.high & roundBitsMask ) | a.low ) == 0 ) { @@ -6136,14 +7193,19 @@ float128 float128_round_to_int( float128 a STATUS_PARAM ) z.high += roundBitsMask; } break; - default: - float_raise(float_flag_invalid STATUS_VAR); + case float_round_to_odd: + if ((z.high & lastBitMask) == 0) { + z.high |= (a.low != 0); + z.high += roundBitsMask; + } break; + default: + abort(); } z.high &= ~ roundBitsMask; } if ( ( z.low != a.low ) || ( z.high != a.high ) ) { - STATUS(float_exception_flags) |= float_flag_inexact; + status->float_exception_flags |= float_flag_inexact; } return z; @@ -6157,11 +7219,12 @@ float128 float128_round_to_int( float128 a STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM) +static float128 addFloat128Sigs(float128 a, float128 b, flag zSign, + float_status *status) { - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; - int32 expDiff; + int32_t expDiff; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6172,14 +7235,16 @@ static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM expDiff = aExp - bExp; if ( 0 < expDiff ) { if ( aExp == 0x7FFF ) { - if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (aSig0 | aSig1) { + return propagateFloat128NaN(a, b, status); + } return a; } if ( bExp == 0 ) { --expDiff; } else { - bSig0 |= LIT64( 0x0001000000000000 ); + bSig0 |= UINT64_C(0x0001000000000000); } shift128ExtraRightJamming( bSig0, bSig1, 0, expDiff, &bSig0, &bSig1, &zSig2 ); @@ -6187,14 +7252,16 @@ static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM } else if ( expDiff < 0 ) { if ( bExp == 0x7FFF ) { - if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (bSig0 | bSig1) { + return propagateFloat128NaN(a, b, status); + } return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { - aSig0 |= LIT64( 0x0001000000000000 ); + aSig0 |= UINT64_C(0x0001000000000000); } shift128ExtraRightJamming( aSig0, aSig1, 0, - expDiff, &aSig0, &aSig1, &zSig2 ); @@ -6203,35 +7270,35 @@ static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM else { if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 | bSig0 | bSig1 ) { - return propagateFloat128NaN( a, b STATUS_VAR ); + return propagateFloat128NaN(a, b, status); } return a; } add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); if ( aExp == 0 ) { - if (STATUS(flush_to_zero)) { + if (status->flush_to_zero) { if (zSig0 | zSig1) { - float_raise(float_flag_output_denormal STATUS_VAR); + float_raise(float_flag_output_denormal, status); } return packFloat128(zSign, 0, 0, 0); } return packFloat128( zSign, 0, zSig0, zSig1 ); } zSig2 = 0; - zSig0 |= LIT64( 0x0002000000000000 ); + zSig0 |= UINT64_C(0x0002000000000000); zExp = aExp; goto shiftRight1; } - aSig0 |= LIT64( 0x0001000000000000 ); + aSig0 |= UINT64_C(0x0001000000000000); add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); --zExp; - if ( zSig0 < LIT64( 0x0002000000000000 ) ) goto roundAndPack; + if ( zSig0 < UINT64_C(0x0002000000000000) ) goto roundAndPack; ++zExp; shiftRight1: shift128ExtraRightJamming( zSig0, zSig1, zSig2, 1, &zSig0, &zSig1, &zSig2 ); roundAndPack: - return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } @@ -6243,12 +7310,12 @@ static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -static float128 subFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM) +static float128 subFloat128Sigs(float128 a, float128 b, flag zSign, + float_status *status) { - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1; - int32 expDiff; - float128 z; + int32_t expDiff; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6263,12 +7330,10 @@ static float128 subFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM if ( expDiff < 0 ) goto bExpBigger; if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 | bSig0 | bSig1 ) { - return propagateFloat128NaN( a, b STATUS_VAR ); + return propagateFloat128NaN(a, b, status); } - float_raise( float_flag_invalid STATUS_VAR); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return float128_default_nan(status); } if ( aExp == 0 ) { aExp = 1; @@ -6278,20 +7343,23 @@ static float128 subFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM if ( aSig0 < bSig0 ) goto bBigger; if ( bSig1 < aSig1 ) goto aBigger; if ( aSig1 < bSig1 ) goto bBigger; - return packFloat128( STATUS(float_rounding_mode) == float_round_down, 0, 0, 0 ); + return packFloat128(status->float_rounding_mode == float_round_down, + 0, 0, 0); bExpBigger: if ( bExp == 0x7FFF ) { - if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (bSig0 | bSig1) { + return propagateFloat128NaN(a, b, status); + } return packFloat128( zSign ^ 1, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { - aSig0 |= LIT64( 0x4000000000000000 ); + aSig0 |= UINT64_C(0x4000000000000000); } shift128RightJamming( aSig0, aSig1, - expDiff, &aSig0, &aSig1 ); - bSig0 |= LIT64( 0x4000000000000000 ); + bSig0 |= UINT64_C(0x4000000000000000); bBigger: sub128( bSig0, bSig1, aSig0, aSig1, &zSig0, &zSig1 ); zExp = bExp; @@ -6299,23 +7367,26 @@ static float128 subFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM goto normalizeRoundAndPack; aExpBigger: if ( aExp == 0x7FFF ) { - if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (aSig0 | aSig1) { + return propagateFloat128NaN(a, b, status); + } return a; } if ( bExp == 0 ) { --expDiff; } else { - bSig0 |= LIT64( 0x4000000000000000 ); + bSig0 |= UINT64_C(0x4000000000000000); } shift128RightJamming( bSig0, bSig1, expDiff, &bSig0, &bSig1 ); - aSig0 |= LIT64( 0x4000000000000000 ); + aSig0 |= UINT64_C(0x4000000000000000); aBigger: sub128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); zExp = aExp; normalizeRoundAndPack: --zExp; - return normalizeRoundAndPackFloat128( zSign, zExp - 14, zSig0, zSig1 STATUS_VAR ); + return normalizeRoundAndPackFloat128(zSign, zExp - 14, zSig0, zSig1, + status); } @@ -6325,17 +7396,17 @@ static float128 subFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 float128_add( float128 a, float128 b STATUS_PARAM ) +float128 float128_add(float128 a, float128 b, float_status *status) { flag aSign, bSign; aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign == bSign ) { - return addFloat128Sigs( a, b, aSign STATUS_VAR ); + return addFloat128Sigs(a, b, aSign, status); } else { - return subFloat128Sigs( a, b, aSign STATUS_VAR ); + return subFloat128Sigs(a, b, aSign, status); } } @@ -6346,17 +7417,17 @@ float128 float128_add( float128 a, float128 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 float128_sub( float128 a, float128 b STATUS_PARAM ) +float128 float128_sub(float128 a, float128 b, float_status *status) { flag aSign, bSign; aSign = extractFloat128Sign( a ); bSign = extractFloat128Sign( b ); if ( aSign == bSign ) { - return subFloat128Sigs( a, b, aSign STATUS_VAR ); + return subFloat128Sigs(a, b, aSign, status); } else { - return addFloat128Sigs( a, b, aSign STATUS_VAR ); + return addFloat128Sigs(a, b, aSign, status); } } @@ -6367,12 +7438,11 @@ float128 float128_sub( float128 a, float128 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 float128_mul( float128 a, float128 b STATUS_PARAM ) +float128 float128_mul(float128 a, float128 b, float_status *status) { flag aSign, bSign, zSign; - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2, zSig3; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6386,19 +7456,19 @@ float128 float128_mul( float128 a, float128 b STATUS_PARAM ) if ( aExp == 0x7FFF ) { if ( ( aSig0 | aSig1 ) || ( ( bExp == 0x7FFF ) && ( bSig0 | bSig1 ) ) ) { - return propagateFloat128NaN( a, b STATUS_VAR ); + return propagateFloat128NaN(a, b, status); } if ( ( bExp | bSig0 | bSig1 ) == 0 ) goto invalid; return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( bExp == 0x7FFF ) { - if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (bSig0 | bSig1) { + return propagateFloat128NaN(a, b, status); + } if ( ( aExp | aSig0 | aSig1 ) == 0 ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return float128_default_nan(status); } return packFloat128( zSign, 0x7FFF, 0, 0 ); } @@ -6411,17 +7481,17 @@ float128 float128_mul( float128 a, float128 b STATUS_PARAM ) normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); } zExp = aExp + bExp - 0x4000; - aSig0 |= LIT64( 0x0001000000000000 ); + aSig0 |= UINT64_C(0x0001000000000000); shortShift128Left( bSig0, bSig1, 16, &bSig0, &bSig1 ); mul128To256( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1, &zSig2, &zSig3 ); add128( zSig0, zSig1, aSig0, aSig1, &zSig0, &zSig1 ); zSig2 |= ( zSig3 != 0 ); - if ( LIT64( 0x0002000000000000 ) <= zSig0 ) { + if (UINT64_C( 0x0002000000000000) <= zSig0 ) { shift128ExtraRightJamming( zSig0, zSig1, zSig2, 1, &zSig0, &zSig1, &zSig2 ); ++zExp; } - return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } @@ -6431,13 +7501,12 @@ float128 float128_mul( float128 a, float128 b STATUS_PARAM ) | the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 float128_div( float128 a, float128 b STATUS_PARAM ) +float128 float128_div(float128 a, float128 b, float_status *status) { flag aSign, bSign, zSign; - int32 aExp, bExp, zExp; + int32_t aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6449,27 +7518,31 @@ float128 float128_div( float128 a, float128 b STATUS_PARAM ) bSign = extractFloat128Sign( b ); zSign = aSign ^ bSign; if ( aExp == 0x7FFF ) { - if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (aSig0 | aSig1) { + return propagateFloat128NaN(a, b, status); + } if ( bExp == 0x7FFF ) { - if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (bSig0 | bSig1) { + return propagateFloat128NaN(a, b, status); + } goto invalid; } return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( bExp == 0x7FFF ) { - if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (bSig0 | bSig1) { + return propagateFloat128NaN(a, b, status); + } return packFloat128( zSign, 0, 0, 0 ); } if ( bExp == 0 ) { if ( ( bSig0 | bSig1 ) == 0 ) { if ( ( aExp | aSig0 | aSig1 ) == 0 ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return float128_default_nan(status); } - float_raise( float_flag_divbyzero STATUS_VAR); + float_raise(float_flag_divbyzero, status); return packFloat128( zSign, 0x7FFF, 0, 0 ); } normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); @@ -6480,9 +7553,9 @@ float128 float128_div( float128 a, float128 b STATUS_PARAM ) } zExp = aExp - bExp + 0x3FFD; shortShift128Left( - aSig0 | LIT64( 0x0001000000000000 ), aSig1, 15, &aSig0, &aSig1 ); + aSig0 | UINT64_C(0x0001000000000000), aSig1, 15, &aSig0, &aSig1 ); shortShift128Left( - bSig0 | LIT64( 0x0001000000000000 ), bSig1, 15, &bSig0, &bSig1 ); + bSig0 | UINT64_C(0x0001000000000000), bSig1, 15, &bSig0, &bSig1 ); if ( le128( bSig0, bSig1, aSig0, aSig1 ) ) { shift128Right( aSig0, aSig1, 1, &aSig0, &aSig1 ); ++zExp; @@ -6505,7 +7578,7 @@ float128 float128_div( float128 a, float128 b STATUS_PARAM ) zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); } shift128ExtraRightJamming( zSig0, zSig1, 0, 15, &zSig0, &zSig1, &zSig2 ); - return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + return roundAndPackFloat128(zSign, zExp, zSig0, zSig1, zSig2, status); } @@ -6515,14 +7588,13 @@ float128 float128_div( float128 a, float128 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 float128_rem( float128 a, float128 b STATUS_PARAM ) +float128 float128_rem(float128 a, float128 b, float_status *status) { flag aSign, zSign; - int32 aExp, bExp, expDiff; + int32_t aExp, bExp, expDiff; uint64_t aSig0, aSig1, bSig0, bSig1, q, term0, term1, term2; uint64_t allZero, alternateASig0, alternateASig1, sigMean1; int64_t sigMean0; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); @@ -6534,21 +7606,21 @@ float128 float128_rem( float128 a, float128 b STATUS_PARAM ) if ( aExp == 0x7FFF ) { if ( ( aSig0 | aSig1 ) || ( ( bExp == 0x7FFF ) && ( bSig0 | bSig1 ) ) ) { - return propagateFloat128NaN( a, b STATUS_VAR ); + return propagateFloat128NaN(a, b, status); } goto invalid; } if ( bExp == 0x7FFF ) { - if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if (bSig0 | bSig1) { + return propagateFloat128NaN(a, b, status); + } return a; } if ( bExp == 0 ) { if ( ( bSig0 | bSig1 ) == 0 ) { invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return float128_default_nan(status); } normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); } @@ -6559,14 +7631,14 @@ float128 float128_rem( float128 a, float128 b STATUS_PARAM ) expDiff = aExp - bExp; if ( expDiff < -1 ) return a; shortShift128Left( - aSig0 | LIT64( 0x0001000000000000 ), + aSig0 | UINT64_C(0x0001000000000000), aSig1, 15 - ( expDiff < 0 ), &aSig0, &aSig1 ); shortShift128Left( - bSig0 | LIT64( 0x0001000000000000 ), bSig1, 15, &bSig0, &bSig1 ); + bSig0 | UINT64_C(0x0001000000000000), bSig1, 15, &bSig0, &bSig1 ); q = le128( bSig0, bSig1, aSig0, aSig1 ); if ( q ) sub128( aSig0, aSig1, bSig0, bSig1, &aSig0, &aSig1 ); expDiff -= 64; @@ -6613,9 +7685,8 @@ float128 float128_rem( float128 a, float128 b STATUS_PARAM ) } zSign = ( (int64_t) aSig0 < 0 ); if ( zSign ) sub128( 0, 0, aSig0, aSig1, &aSig0, &aSig1 ); - return - normalizeRoundAndPackFloat128( aSign ^ zSign, bExp - 4, aSig0, aSig1 STATUS_VAR ); - + return normalizeRoundAndPackFloat128(aSign ^ zSign, bExp - 4, aSig0, aSig1, + status); } /*---------------------------------------------------------------------------- @@ -6624,38 +7695,37 @@ float128 float128_rem( float128 a, float128 b STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -float128 float128_sqrt( float128 a STATUS_PARAM ) +float128 float128_sqrt(float128 a, float_status *status) { flag aSign; - int32 aExp, zExp; + int32_t aExp, zExp; uint64_t aSig0, aSig1, zSig0, zSig1, zSig2, doubleZSig0; uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; - float128 z; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { - if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, a STATUS_VAR ); + if (aSig0 | aSig1) { + return propagateFloat128NaN(a, a, status); + } if ( ! aSign ) return a; goto invalid; } if ( aSign ) { if ( ( aExp | aSig0 | aSig1 ) == 0 ) return a; invalid: - float_raise( float_flag_invalid STATUS_VAR); - z.low = float128_default_nan_low; - z.high = float128_default_nan_high; - return z; + float_raise(float_flag_invalid, status); + return float128_default_nan(status); } if ( aExp == 0 ) { if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( 0, 0, 0, 0 ); normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); } zExp = ( ( aExp - 0x3FFF )>>1 ) + 0x3FFE; - aSig0 |= LIT64( 0x0001000000000000 ); - zSig0 = estimateSqrt32( aExp, (uint32_t)(aSig0>>17) ); + aSig0 |= UINT64_C(0x0001000000000000); + zSig0 = estimateSqrt32( aExp, aSig0>>17 ); shortShift128Left( aSig0, aSig1, 13 - ( aExp & 1 ), &aSig0, &aSig1 ); zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0<<32 ) + ( zSig0<<30 ); doubleZSig0 = zSig0<<1; @@ -6683,7 +7753,7 @@ float128 float128_sqrt( float128 a STATUS_PARAM ) zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); } shift128ExtraRightJamming( zSig0, zSig1, 0, 14, &zSig0, &zSig1, &zSig2 ); - return roundAndPackFloat128( 0, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + return roundAndPackFloat128(0, zExp, zSig0, zSig1, zSig2, status); } @@ -6694,7 +7764,7 @@ float128 float128_sqrt( float128 a STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_eq( float128 a, float128 b STATUS_PARAM ) +int float128_eq(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) @@ -6702,7 +7772,7 @@ int float128_eq( float128 a, float128 b STATUS_PARAM ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } return @@ -6721,7 +7791,7 @@ int float128_eq( float128 a, float128 b STATUS_PARAM ) | according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_le( float128 a, float128 b STATUS_PARAM ) +int float128_le(float128 a, float128 b, float_status *status) { flag aSign, bSign; @@ -6730,7 +7800,7 @@ int float128_le( float128 a, float128 b STATUS_PARAM ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat128Sign( a ); @@ -6754,7 +7824,7 @@ int float128_le( float128 a, float128 b STATUS_PARAM ) | to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_lt( float128 a, float128 b STATUS_PARAM ) +int float128_lt(float128 a, float128 b, float_status *status) { flag aSign, bSign; @@ -6763,7 +7833,7 @@ int float128_lt( float128 a, float128 b STATUS_PARAM ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 0; } aSign = extractFloat128Sign( a ); @@ -6787,14 +7857,14 @@ int float128_lt( float128 a, float128 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_unordered( float128 a, float128 b STATUS_PARAM ) +int float128_unordered(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return 1; } return 0; @@ -6807,7 +7877,7 @@ int float128_unordered( float128 a, float128 b STATUS_PARAM ) | for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_eq_quiet( float128 a, float128 b STATUS_PARAM ) +int float128_eq_quiet(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) @@ -6815,9 +7885,9 @@ int float128_eq_quiet( float128 a, float128 b STATUS_PARAM ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -6837,7 +7907,7 @@ int float128_eq_quiet( float128 a, float128 b STATUS_PARAM ) | IEC/IEEE Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_le_quiet( float128 a, float128 b STATUS_PARAM ) +int float128_le_quiet(float128 a, float128 b, float_status *status) { flag aSign, bSign; @@ -6846,9 +7916,9 @@ int float128_le_quiet( float128 a, float128 b STATUS_PARAM ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -6873,7 +7943,7 @@ int float128_le_quiet( float128 a, float128 b STATUS_PARAM ) | Standard for Binary Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_lt_quiet( float128 a, float128 b STATUS_PARAM ) +int float128_lt_quiet(float128 a, float128 b, float_status *status) { flag aSign, bSign; @@ -6882,9 +7952,9 @@ int float128_lt_quiet( float128 a, float128 b STATUS_PARAM ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 0; } @@ -6909,340 +7979,29 @@ int float128_lt_quiet( float128 a, float128 b STATUS_PARAM ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -int float128_unordered_quiet( float128 a, float128 b STATUS_PARAM ) +int float128_unordered_quiet(float128 a, float128 b, float_status *status) { if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || ( ( extractFloat128Exp( b ) == 0x7FFF ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) ) { - if ( float128_is_signaling_nan( a ) - || float128_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + if (float128_is_signaling_nan(a, status) + || float128_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return 1; } return 0; } -/* misc functions */ -float32 uint32_to_float32(uint32_t a STATUS_PARAM) -{ - return int64_to_float32(a STATUS_VAR); -} - -float64 uint32_to_float64(uint32_t a STATUS_PARAM) -{ - return int64_to_float64(a STATUS_VAR); -} - -uint32 float32_to_uint32( float32 a STATUS_PARAM ) -{ - int64_t v; - uint32 res; - int old_exc_flags = get_float_exception_flags(status); - - v = float32_to_int64(a STATUS_VAR); - if (v < 0) { - res = 0; - } else if (v > 0xffffffff) { - res = 0xffffffff; - } else { - return (uint32)v; - } - set_float_exception_flags(old_exc_flags STATUS_VAR); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -uint32 float32_to_uint32_round_to_zero( float32 a STATUS_PARAM ) -{ - int64_t v; - uint32 res; - int old_exc_flags = get_float_exception_flags(status); - - v = float32_to_int64_round_to_zero(a STATUS_VAR); - if (v < 0) { - res = 0; - } else if (v > 0xffffffff) { - res = 0xffffffff; - } else { - return (uint32)v; - } - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -int_fast16_t float32_to_int16(float32 a STATUS_PARAM) -{ - int32_t v; - int_fast16_t res; - int old_exc_flags = get_float_exception_flags(status); - - v = float32_to_int32(a STATUS_VAR); - if (v < -0x8000) { - res = -0x8000; - } else if (v > 0x7fff) { - res = 0x7fff; - } else { - return v; - } - - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -uint_fast16_t float32_to_uint16(float32 a STATUS_PARAM) -{ - int32_t v; - uint_fast16_t res; - int old_exc_flags = get_float_exception_flags(status); - - v = float32_to_int32(a STATUS_VAR); - if (v < 0) { - res = 0; - } else if (v > 0xffff) { - res = 0xffff; - } else { - return v; - } - - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -uint_fast16_t float32_to_uint16_round_to_zero(float32 a STATUS_PARAM) -{ - int64_t v; - uint_fast16_t res; - int old_exc_flags = get_float_exception_flags(status); - - v = float32_to_int64_round_to_zero(a STATUS_VAR); - if (v < 0) { - res = 0; - } else if (v > 0xffff) { - res = 0xffff; - } else { - return (uint_fast16_t)v; - } - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -uint32 float64_to_uint32( float64 a STATUS_PARAM ) -{ - uint64_t v; - uint32 res; - int old_exc_flags = get_float_exception_flags(status); - - v = float64_to_uint64(a STATUS_VAR); - if (v > 0xffffffff) { - res = 0xffffffff; - } else { - return (uint32)v; - } - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -uint32 float64_to_uint32_round_to_zero( float64 a STATUS_PARAM ) -{ - uint64_t v; - uint32 res; - int old_exc_flags = get_float_exception_flags(status); - - v = float64_to_uint64_round_to_zero(a STATUS_VAR); - if (v > 0xffffffff) { - res = 0xffffffff; - } else { - return (uint32)v; - } - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -int_fast16_t float64_to_int16(float64 a STATUS_PARAM) -{ - int64_t v; - int_fast16_t res; - int old_exc_flags = get_float_exception_flags(status); - - v = float64_to_int32(a STATUS_VAR); - if (v < -0x8000) { - res = -0x8000; - } else if (v > 0x7fff) { - res = 0x7fff; - } else { - return (int_fast16_t)v; - } - - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -uint_fast16_t float64_to_uint16(float64 a STATUS_PARAM) -{ - int64_t v; - uint_fast16_t res; - int old_exc_flags = get_float_exception_flags(status); - - v = float64_to_int32(a STATUS_VAR); - if (v < 0) { - res = 0; - } else if (v > 0xffff) { - res = 0xffff; - } else { - return (uint_fast16_t)v; - } - - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -uint_fast16_t float64_to_uint16_round_to_zero(float64 a STATUS_PARAM) -{ - int64_t v; - uint_fast16_t res; - int old_exc_flags = get_float_exception_flags(status); - - v = float64_to_int64_round_to_zero(a STATUS_VAR); - if (v < 0) { - res = 0; - } else if (v > 0xffff) { - res = 0xffff; - } else { - return (uint_fast16_t)v; - } - set_float_exception_flags(old_exc_flags, status); - float_raise(float_flag_invalid STATUS_VAR); - return res; -} - -/*---------------------------------------------------------------------------- -| Returns the result of converting the double-precision floating-point value -| `a' to the 64-bit unsigned integer format. The conversion is -| performed according to the IEC/IEEE Standard for Binary Floating-Point -| Arithmetic---which means in particular that the conversion is rounded -| according to the current rounding mode. If `a' is a NaN, the largest -| positive integer is returned. If the conversion overflows, the -| largest unsigned integer is returned. If 'a' is negative, the value is -| rounded and zero is returned; negative values that do not round to zero -| will raise the inexact exception. -*----------------------------------------------------------------------------*/ - -uint64_t float64_to_uint64(float64 a STATUS_PARAM) -{ - flag aSign; - int_fast16_t aExp, shiftCount; - uint64_t aSig, aSigExtra; - a = float64_squash_input_denormal(a STATUS_VAR); - - aSig = extractFloat64Frac(a); - aExp = extractFloat64Exp(a); - aSign = extractFloat64Sign(a); - if (aSign && (aExp > 1022)) { - float_raise(float_flag_invalid STATUS_VAR); - if (float64_is_any_nan(a)) { - return LIT64(0xFFFFFFFFFFFFFFFF); - } else { - return 0; - } - } - if (aExp) { - aSig |= LIT64(0x0010000000000000); - } - shiftCount = 0x433 - aExp; - if (shiftCount <= 0) { - if (0x43E < aExp) { - float_raise(float_flag_invalid STATUS_VAR); - return LIT64(0xFFFFFFFFFFFFFFFF); - } - aSigExtra = 0; - aSig <<= -shiftCount; - } else { - shift64ExtraRightJamming(aSig, 0, shiftCount, &aSig, &aSigExtra); - } - return roundAndPackUint64(aSign, aSig, aSigExtra STATUS_VAR); -} - -uint64_t float64_to_uint64_round_to_zero (float64 a STATUS_PARAM) -{ - int64_t v; - signed char current_rounding_mode = STATUS(float_rounding_mode); - set_float_rounding_mode(float_round_to_zero STATUS_VAR); - v = float64_to_uint64(a STATUS_VAR); - set_float_rounding_mode(current_rounding_mode STATUS_VAR); - return v; -} - -#define COMPARE(s, nan_exp) \ -static inline int float ## s ## _compare_internal( float ## s a, float ## s b, \ - int is_quiet STATUS_PARAM ) \ -{ \ - flag aSign, bSign; \ - uint ## s ## _t av, bv; \ - a = float ## s ## _squash_input_denormal(a STATUS_VAR); \ - b = float ## s ## _squash_input_denormal(b STATUS_VAR); \ - \ - if (( ( extractFloat ## s ## Exp( a ) == nan_exp ) && \ - extractFloat ## s ## Frac( a ) ) || \ - ( ( extractFloat ## s ## Exp( b ) == nan_exp ) && \ - extractFloat ## s ## Frac( b ) )) { \ - if (!is_quiet || \ - float ## s ## _is_signaling_nan( a ) || \ - float ## s ## _is_signaling_nan( b ) ) { \ - float_raise( float_flag_invalid STATUS_VAR); \ - } \ - return float_relation_unordered; \ - } \ - aSign = extractFloat ## s ## Sign( a ); \ - bSign = extractFloat ## s ## Sign( b ); \ - av = float ## s ## _val(a); \ - bv = float ## s ## _val(b); \ - if ( aSign != bSign ) { \ - if ( (uint ## s ## _t) ( ( av | bv )<<1 ) == 0 ) { \ - /* zero case */ \ - return float_relation_equal; \ - } else { \ - return 1 - (2 * aSign); \ - } \ - } else { \ - if (av == bv) { \ - return float_relation_equal; \ - } else { \ - return 1 - 2 * (aSign ^ ( av < bv )); \ - } \ - } \ -} \ - \ -int float ## s ## _compare( float ## s a, float ## s b STATUS_PARAM ) \ -{ \ - return float ## s ## _compare_internal(a, b, 0 STATUS_VAR); \ -} \ - \ -int float ## s ## _compare_quiet( float ## s a, float ## s b STATUS_PARAM ) \ -{ \ - return float ## s ## _compare_internal(a, b, 1 STATUS_VAR); \ -} - -COMPARE(32, 0xff) -COMPARE(64, 0x7ff) - -static inline int floatx80_compare_internal( floatx80 a, floatx80 b, - int is_quiet STATUS_PARAM ) +static inline int floatx80_compare_internal(floatx80 a, floatx80 b, + int is_quiet, float_status *status) { flag aSign, bSign; if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { - float_raise(float_flag_invalid STATUS_VAR); + float_raise(float_flag_invalid, status); return float_relation_unordered; } if (( ( extractFloatx80Exp( a ) == 0x7fff ) && @@ -7250,9 +8009,9 @@ static inline int floatx80_compare_internal( floatx80 a, floatx80 b, ( ( extractFloatx80Exp( b ) == 0x7fff ) && ( extractFloatx80Frac( b )<<1 ) )) { if (!is_quiet || - floatx80_is_signaling_nan( a ) || - floatx80_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + floatx80_is_signaling_nan(a, status) || + floatx80_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return float_relation_unordered; } @@ -7276,18 +8035,18 @@ static inline int floatx80_compare_internal( floatx80 a, floatx80 b, } } -int floatx80_compare( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_compare(floatx80 a, floatx80 b, float_status *status) { - return floatx80_compare_internal(a, b, 0 STATUS_VAR); + return floatx80_compare_internal(a, b, 0, status); } -int floatx80_compare_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +int floatx80_compare_quiet(floatx80 a, floatx80 b, float_status *status) { - return floatx80_compare_internal(a, b, 1 STATUS_VAR); + return floatx80_compare_internal(a, b, 1, status); } -static inline int float128_compare_internal( float128 a, float128 b, - int is_quiet STATUS_PARAM ) +static inline int float128_compare_internal(float128 a, float128 b, + int is_quiet, float_status *status) { flag aSign, bSign; @@ -7296,9 +8055,9 @@ static inline int float128_compare_internal( float128 a, float128 b, ( ( extractFloat128Exp( b ) == 0x7fff ) && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )) { if (!is_quiet || - float128_is_signaling_nan( a ) || - float128_is_signaling_nan( b ) ) { - float_raise( float_flag_invalid STATUS_VAR); + float128_is_signaling_nan(a, status) || + float128_is_signaling_nan(b, status)) { + float_raise(float_flag_invalid, status); } return float_relation_unordered; } @@ -7320,198 +8079,25 @@ static inline int float128_compare_internal( float128 a, float128 b, } } -int float128_compare( float128 a, float128 b STATUS_PARAM ) +int float128_compare(float128 a, float128 b, float_status *status) { - return float128_compare_internal(a, b, 0 STATUS_VAR); + return float128_compare_internal(a, b, 0, status); } -int float128_compare_quiet( float128 a, float128 b STATUS_PARAM ) +int float128_compare_quiet(float128 a, float128 b, float_status *status) { - return float128_compare_internal(a, b, 1 STATUS_VAR); + return float128_compare_internal(a, b, 1, status); } -/* min() and max() functions. These can't be implemented as - * 'compare and pick one input' because that would mishandle - * NaNs and +0 vs -0. - * - * minnum() and maxnum() functions. These are similar to the min() - * and max() functions but if one of the arguments is a QNaN and - * the other is numerical then the numerical argument is returned. - * minnum() and maxnum correspond to the IEEE 754-2008 minNum() - * and maxNum() operations. min() and max() are the typical min/max - * semantics provided by many CPUs which predate that specification. - * - * minnummag() and maxnummag() functions correspond to minNumMag() - * and minNumMag() from the IEEE-754 2008. - */ -#define MINMAX(s) \ -static inline float ## s float ## s ## _minmax(float ## s a, float ## s b, \ - int ismin, int isieee, \ - int ismag STATUS_PARAM) \ -{ \ - flag aSign, bSign; \ - uint ## s ## _t av, bv, aav, abv; \ - a = float ## s ## _squash_input_denormal(a STATUS_VAR); \ - b = float ## s ## _squash_input_denormal(b STATUS_VAR); \ - if (float ## s ## _is_any_nan(a) || \ - float ## s ## _is_any_nan(b)) { \ - if (isieee) { \ - if (float ## s ## _is_quiet_nan(a) && \ - !float ## s ##_is_any_nan(b)) { \ - return b; \ - } else if (float ## s ## _is_quiet_nan(b) && \ - !float ## s ## _is_any_nan(a)) { \ - return a; \ - } \ - } \ - return propagateFloat ## s ## NaN(a, b STATUS_VAR); \ - } \ - aSign = extractFloat ## s ## Sign(a); \ - bSign = extractFloat ## s ## Sign(b); \ - av = float ## s ## _val(a); \ - bv = float ## s ## _val(b); \ - if (ismag) { \ - aav = float ## s ## _abs(av); \ - abv = float ## s ## _abs(bv); \ - if (aav != abv) { \ - if (ismin) { \ - return (aav < abv) ? a : b; \ - } else { \ - return (aav < abv) ? b : a; \ - } \ - } \ - } \ - if (aSign != bSign) { \ - if (ismin) { \ - return aSign ? a : b; \ - } else { \ - return aSign ? b : a; \ - } \ - } else { \ - if (ismin) { \ - return (aSign ^ (av < bv)) ? a : b; \ - } else { \ - return (aSign ^ (av < bv)) ? b : a; \ - } \ - } \ -} \ - \ -float ## s float ## s ## _min(float ## s a, float ## s b STATUS_PARAM) \ -{ \ - return float ## s ## _minmax(a, b, 1, 0, 0 STATUS_VAR); \ -} \ - \ -float ## s float ## s ## _max(float ## s a, float ## s b STATUS_PARAM) \ -{ \ - return float ## s ## _minmax(a, b, 0, 0, 0 STATUS_VAR); \ -} \ - \ -float ## s float ## s ## _minnum(float ## s a, float ## s b STATUS_PARAM) \ -{ \ - return float ## s ## _minmax(a, b, 1, 1, 0 STATUS_VAR); \ -} \ - \ -float ## s float ## s ## _maxnum(float ## s a, float ## s b STATUS_PARAM) \ -{ \ - return float ## s ## _minmax(a, b, 0, 1, 0 STATUS_VAR); \ -} \ - \ -float ## s float ## s ## _minnummag(float ## s a, float ## s b STATUS_PARAM) \ -{ \ - return float ## s ## _minmax(a, b, 1, 1, 1 STATUS_VAR); \ -} \ - \ -float ## s float ## s ## _maxnummag(float ## s a, float ## s b STATUS_PARAM) \ -{ \ - return float ## s ## _minmax(a, b, 0, 1, 1 STATUS_VAR); \ -} - -MINMAX(32) -MINMAX(64) - - -/* Multiply A by 2 raised to the power N. */ -float32 float32_scalbn( float32 a, int n STATUS_PARAM ) -{ - flag aSign; - int16_t aExp; - uint32_t aSig; - - a = float32_squash_input_denormal(a STATUS_VAR); - aSig = extractFloat32Frac( a ); - aExp = extractFloat32Exp( a ); - aSign = extractFloat32Sign( a ); - - if ( aExp == 0xFF ) { - if ( aSig ) { - return propagateFloat32NaN( a, a STATUS_VAR ); - } - return a; - } - if (aExp != 0) { - aSig |= 0x00800000; - } else if (aSig == 0) { - return a; - } else { - aExp++; - } - - if (n > 0x200) { - n = 0x200; - } else if (n < -0x200) { - n = -0x200; - } - - aExp += n - 1; - aSig <<= 7; - return normalizeRoundAndPackFloat32( aSign, aExp, aSig STATUS_VAR ); -} - -float64 float64_scalbn( float64 a, int n STATUS_PARAM ) -{ - flag aSign; - int16_t aExp; - uint64_t aSig; - - a = float64_squash_input_denormal(a STATUS_VAR); - aSig = extractFloat64Frac( a ); - aExp = extractFloat64Exp( a ); - aSign = extractFloat64Sign( a ); - - if ( aExp == 0x7FF ) { - if ( aSig ) { - return propagateFloat64NaN( a, a STATUS_VAR ); - } - return a; - } - if (aExp != 0) { - aSig |= LIT64( 0x0010000000000000 ); - } else if (aSig == 0) { - return a; - } else { - aExp++; - } - - if (n > 0x1000) { - n = 0x1000; - } else if (n < -0x1000) { - n = -0x1000; - } - - aExp += n - 1; - aSig <<= 10; - return normalizeRoundAndPackFloat64( aSign, aExp, aSig STATUS_VAR ); -} - -floatx80 floatx80_scalbn( floatx80 a, int n STATUS_PARAM ) +floatx80 floatx80_scalbn(floatx80 a, int n, float_status *status) { flag aSign; int32_t aExp; uint64_t aSig; if (floatx80_invalid_encoding(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return floatx80_default_nan; + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); } aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); @@ -7519,7 +8105,7 @@ floatx80 floatx80_scalbn( floatx80 a, int n STATUS_PARAM ) if ( aExp == 0x7FFF ) { if ( aSig<<1 ) { - return propagateFloatx80NaN( a, a STATUS_VAR ); + return propagateFloatx80NaN(a, a, status); } return a; } @@ -7538,11 +8124,11 @@ floatx80 floatx80_scalbn( floatx80 a, int n STATUS_PARAM ) } aExp += n; - return normalizeRoundAndPackFloatx80( STATUS(floatx80_rounding_precision), - aSign, aExp, aSig, 0 STATUS_VAR ); + return normalizeRoundAndPackFloatx80(status->floatx80_rounding_precision, + aSign, aExp, aSig, 0, status); } -float128 float128_scalbn( float128 a, int n STATUS_PARAM ) +float128 float128_scalbn(float128 a, int n, float_status *status) { flag aSign; int32_t aExp; @@ -7554,12 +8140,12 @@ float128 float128_scalbn( float128 a, int n STATUS_PARAM ) aSign = extractFloat128Sign( a ); if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) { - return propagateFloat128NaN( a, a STATUS_VAR ); + return propagateFloat128NaN(a, a, status); } return a; } if (aExp != 0) { - aSig0 |= LIT64( 0x0001000000000000 ); + aSig0 |= UINT64_C(0x0001000000000000); } else if (aSig0 == 0 && aSig1 == 0) { return a; } else { @@ -7574,6 +8160,27 @@ float128 float128_scalbn( float128 a, int n STATUS_PARAM ) aExp += n - 1; return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1 - STATUS_VAR ); + , status); } + +void softfloat_init(void) +{ + union_float64 ua, ub, uc, ur; + + if (QEMU_NO_HARDFLOAT) { + return; + } + /* + * Test that the host's FMA is not obviously broken. For example, + * glibc < 2.23 can perform an incorrect FMA on certain hosts; see + * https://sourceware.org/bugzilla/show_bug.cgi?id=13304 + */ + ua.s = 0x0020000000000001ULL; + ub.s = 0x3ca0000000000000ULL; + uc.s = 0x0020000000000000ULL; + ur.h = fma(ua.h, ub.h, uc.h); + if (ur.s != 0x0020000000000001ULL) { + force_soft_fma = true; + } +} diff --git a/qemu/gen_all_header.sh b/qemu/gen_all_header.sh deleted file mode 100755 index 1330ae54..00000000 --- a/qemu/gen_all_header.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -for d in x86_64 arm armeb m68k aarch64 aarch64eb mips mipsel mips64 mips64el sparc sparc64; do - python header_gen.py $d > $d.h -done diff --git a/qemu/header_gen.py b/qemu/header_gen.py deleted file mode 100644 index 253bd3e4..00000000 --- a/qemu/header_gen.py +++ /dev/null @@ -1,4085 +0,0 @@ -#!/usr/bin/python -# Unicorn Emulator Engine -# By Dang Hoang Vu & Nguyen Anh Quynh - -# syntax: ./header_gen.py - -import sys - -symbols = ( - 'arm_release', - 'aarch64_tb_set_jmp_target', - 'ppc_tb_set_jmp_target', - 'use_idiv_instructions_rt', - 'tcg_target_deposit_valid', - 'helper_power_down', - 'check_exit_request', - 'address_space_unregister', - 'tb_invalidate_phys_page_fast', - 'phys_mem_clean', - 'tb_cleanup', - 'memory_map', - 'memory_map_ptr', - 'memory_unmap', - 'memory_free', - 'free_code_gen_buffer', - 'helper_raise_exception', - 'tcg_enabled', - 'tcg_exec_init', - 'memory_register_types', - 'cpu_exec_init_all', - 'vm_start', - 'resume_all_vcpus', - 'a15_l2ctlr_read', - 'a64_translate_init', - 'aa32_generate_debug_exceptions', - 'aa64_cacheop_access', - 'aa64_daif_access', - 'aa64_daif_write', - 'aa64_dczid_read', - 'aa64_fpcr_read', - 'aa64_fpcr_write', - 'aa64_fpsr_read', - 'aa64_fpsr_write', - 'aa64_generate_debug_exceptions', - 'aa64_zva_access', - 'aarch64_banked_spsr_index', - 'aarch64_restore_sp', - 'aarch64_save_sp', - 'accel_find', - 'accel_init_machine', - 'accel_type', - 'access_with_adjusted_size', - 'add128', - 'add16_sat', - 'add16_usat', - 'add192', - 'add8_sat', - 'add8_usat', - 'add_cpreg_to_hashtable', - 'add_cpreg_to_list', - 'addFloat128Sigs', - 'addFloat32Sigs', - 'addFloat64Sigs', - 'addFloatx80Sigs', - 'add_qemu_ldst_label', - 'address_space_access_valid', - 'address_space_destroy', - 'address_space_destroy_dispatch', - 'address_space_get_flatview', - 'address_space_init', - 'address_space_init_dispatch', - 'address_space_lookup_region', - 'address_space_map', - 'address_space_read', - 'address_space_rw', - 'address_space_translate', - 'address_space_translate_for_iotlb', - 'address_space_translate_internal', - 'address_space_unmap', - 'address_space_update_topology', - 'address_space_update_topology_pass', - 'address_space_write', - 'addrrange_contains', - 'addrrange_end', - 'addrrange_equal', - 'addrrange_intersection', - 'addrrange_intersects', - 'addrrange_make', - 'adjust_endianness', - 'all_helpers', - 'alloc_code_gen_buffer', - 'alloc_entry', - 'always_true', - 'arm1026_initfn', - 'arm1136_initfn', - 'arm1136_r2_initfn', - 'arm1176_initfn', - 'arm11mpcore_initfn', - 'arm926_initfn', - 'arm946_initfn', - 'arm_ccnt_enabled', - 'arm_cp_read_zero', - 'arm_cp_reset_ignore', - 'arm_cpu_do_interrupt', - 'arm_cpu_exec_interrupt', - 'arm_cpu_finalizefn', - 'arm_cpu_get_phys_page_debug', - 'arm_cpu_handle_mmu_fault', - 'arm_cpu_initfn', - 'arm_cpu_list', - 'cpu_loop_exit', - 'arm_cpu_post_init', - 'arm_cpu_realizefn', - 'arm_cpu_register_gdb_regs_for_features', - 'arm_cpu_register_types', - 'cpu_resume_from_signal', - 'arm_cpus', - 'arm_cpu_set_pc', - 'arm_cp_write_ignore', - 'arm_current_el', - 'arm_dc_feature', - 'arm_debug_excp_handler', - 'arm_debug_target_el', - 'arm_el_is_aa64', - 'arm_env_get_cpu', - 'arm_excp_target_el', - 'arm_excp_unmasked', - 'arm_feature', - 'arm_generate_debug_exceptions', - 'gen_intermediate_code', - 'gen_intermediate_code_pc', - 'arm_gen_test_cc', - 'arm_gt_ptimer_cb', - 'arm_gt_vtimer_cb', - 'arm_handle_psci_call', - 'arm_is_psci_call', - 'arm_is_secure', - 'arm_is_secure_below_el3', - 'arm_ldl_code', - 'arm_lduw_code', - 'arm_log_exception', - 'arm_reg_read', - 'arm_reg_reset', - 'arm_reg_write', - 'restore_state_to_opc', - 'arm_rmode_to_sf', - 'arm_singlestep_active', - 'tlb_fill', - 'tlb_flush', - 'tlb_flush_page', - 'tlb_set_page', - 'arm_translate_init', - 'arm_v7m_class_init', - 'arm_v7m_cpu_do_interrupt', - 'ats_access', - 'ats_write', - 'bad_mode_switch', - 'bank_number', - 'bitmap_zero_extend', - 'bp_wp_matches', - 'breakpoint_invalidate', - 'build_page_bitmap', - 'bus_add_child', - 'bus_class_init', - 'bus_info', - 'bus_unparent', - 'cache_block_ops_cp_reginfo', - 'cache_dirty_status_cp_reginfo', - 'cache_test_clean_cp_reginfo', - 'call_recip_estimate', - 'can_merge', - 'capacity_increase', - 'ccsidr_read', - 'check_ap', - 'check_breakpoints', - 'check_watchpoints', - 'cho', - 'clear_bit', - 'clz32', - 'clz64', - 'cmp_flatrange_addr', - 'code_gen_alloc', - 'commonNaNToFloat128', - 'commonNaNToFloat16', - 'commonNaNToFloat32', - 'commonNaNToFloat64', - 'commonNaNToFloatx80', - 'compute_abs_deadline', - 'cond_name', - 'configure_accelerator', - 'container_get', - 'container_info', - 'container_register_types', - 'contextidr_write', - 'core_log_global_start', - 'core_log_global_stop', - 'core_memory_listener', - 'cortexa15_cp_reginfo', - 'cortex_a15_initfn', - 'cortexa8_cp_reginfo', - 'cortex_a8_initfn', - 'cortexa9_cp_reginfo', - 'cortex_a9_initfn', - 'cortex_m3_initfn', - 'count_cpreg', - 'countLeadingZeros32', - 'countLeadingZeros64', - 'cp_access_ok', - 'cpacr_write', - 'cpreg_field_is_64bit', - 'cp_reginfo', - 'cpreg_key_compare', - 'cpreg_make_keylist', - 'cp_reg_reset', - 'cpreg_to_kvm_id', - 'cpsr_read', - 'cpsr_write', - 'cptype_valid', - 'cpu_abort', - 'cpu_arm_exec', - 'cpu_arm_gen_code', - 'cpu_arm_init', - 'cpu_breakpoint_insert', - 'cpu_breakpoint_remove', - 'cpu_breakpoint_remove_all', - 'cpu_breakpoint_remove_by_ref', - 'cpu_can_do_io', - 'cpu_can_run', - 'cpu_class_init', - 'cpu_common_class_by_name', - 'cpu_common_exec_interrupt', - 'cpu_common_get_arch_id', - 'cpu_common_get_memory_mapping', - 'cpu_common_get_paging_enabled', - 'cpu_common_has_work', - 'cpu_common_initfn', - 'cpu_common_noop', - 'cpu_common_parse_features', - 'cpu_common_realizefn', - 'cpu_common_reset', - 'cpu_dump_statistics', - 'cpu_exec_init', - 'cpu_flush_icache_range', - 'cpu_gen_init', - 'cpu_get_clock', - 'cpu_get_real_ticks', - 'cpu_get_tb_cpu_state', - 'cpu_handle_debug_exception', - 'cpu_handle_guest_debug', - 'cpu_inb', - 'cpu_inl', - 'cpu_interrupt', - 'cpu_interrupt_handler', - 'cpu_inw', - 'cpu_io_recompile', - 'cpu_is_stopped', - 'cpu_ldl_code', - 'cpu_ldub_code', - 'cpu_lduw_code', - 'cpu_memory_rw_debug', - 'cpu_mmu_index', - 'cpu_outb', - 'cpu_outl', - 'cpu_outw', - 'cpu_physical_memory_clear_dirty_range', - 'cpu_physical_memory_get_clean', - 'cpu_physical_memory_get_dirty', - 'cpu_physical_memory_get_dirty_flag', - 'cpu_physical_memory_is_clean', - 'cpu_physical_memory_is_io', - 'cpu_physical_memory_map', - 'cpu_physical_memory_range_includes_clean', - 'cpu_physical_memory_reset_dirty', - 'cpu_physical_memory_rw', - 'cpu_physical_memory_set_dirty_flag', - 'cpu_physical_memory_set_dirty_range', - 'cpu_physical_memory_unmap', - 'cpu_physical_memory_write_rom', - 'cpu_physical_memory_write_rom_internal', - 'cpu_register', - 'cpu_register_types', - 'cpu_restore_state', - 'cpu_restore_state_from_tb', - 'cpu_single_step', - 'cpu_tb_exec', - 'cpu_tlb_reset_dirty_all', - 'cpu_to_be64', - 'cpu_to_le32', - 'cpu_to_le64', - 'cpu_type_info', - 'cpu_unassigned_access', - 'cpu_watchpoint_address_matches', - 'cpu_watchpoint_insert', - 'cpu_watchpoint_remove', - 'cpu_watchpoint_remove_all', - 'cpu_watchpoint_remove_by_ref', - 'crc32c_table', - 'create_new_memory_mapping', - 'csselr_write', - 'cto32', - 'ctr_el0_access', - 'ctz32', - 'ctz64', - 'dacr_write', - 'dbgbcr_write', - 'dbgbvr_write', - 'dbgwcr_write', - 'dbgwvr_write', - 'debug_cp_reginfo', - 'debug_frame', - 'debug_lpae_cp_reginfo', - 'define_arm_cp_regs', - 'define_arm_cp_regs_with_opaque', - 'define_debug_regs', - 'define_one_arm_cp_reg', - 'define_one_arm_cp_reg_with_opaque', - 'deposit32', - 'deposit64', - 'deregister_tm_clones', - 'device_class_base_init', - 'device_class_init', - 'device_finalize', - 'device_get_realized', - 'device_initfn', - 'device_post_init', - 'device_reset', - 'device_set_realized', - 'device_type_info', - 'disas_arm_insn', - 'disas_coproc_insn', - 'disas_dsp_insn', - 'disas_iwmmxt_insn', - 'disas_neon_data_insn', - 'disas_neon_ls_insn', - 'disas_thumb2_insn', - 'disas_thumb_insn', - 'disas_vfp_insn', - 'disas_vfp_v8_insn', - 'do_arm_semihosting', - 'do_clz16', - 'do_clz8', - 'do_constant_folding', - 'do_constant_folding_2', - 'do_constant_folding_cond', - 'do_constant_folding_cond2', - 'do_constant_folding_cond_32', - 'do_constant_folding_cond_64', - 'do_constant_folding_cond_eq', - 'do_fcvt_f16_to_f32', - 'do_fcvt_f32_to_f16', - 'do_ssat', - 'do_usad', - 'do_usat', - 'do_v7m_exception_exit', - 'dummy_c15_cp_reginfo', - 'dummy_func', - 'dummy_section', - '_DYNAMIC', - '_edata', - '_end', - 'end_list', - 'eq128', - 'ErrorClass_lookup', - 'error_copy', - 'error_exit', - 'error_get_class', - 'error_get_pretty', - 'error_setg_file_open', - 'estimateDiv128To64', - 'estimateSqrt32', - 'excnames', - 'excp_is_internal', - 'extended_addresses_enabled', - 'extended_mpu_ap_bits', - 'extract32', - 'extract64', - 'extractFloat128Exp', - 'extractFloat128Frac0', - 'extractFloat128Frac1', - 'extractFloat128Sign', - 'extractFloat16Exp', - 'extractFloat16Frac', - 'extractFloat16Sign', - 'extractFloat32Exp', - 'extractFloat32Frac', - 'extractFloat32Sign', - 'extractFloat64Exp', - 'extractFloat64Frac', - 'extractFloat64Sign', - 'extractFloatx80Exp', - 'extractFloatx80Frac', - 'extractFloatx80Sign', - 'fcse_write', - 'find_better_copy', - 'find_default_machine', - 'find_desc_by_name', - 'find_first_bit', - 'find_paging_enabled_cpu', - 'find_ram_block', - 'find_ram_offset', - 'find_string', - 'find_type', - '_fini', - 'flatrange_equal', - 'flatview_destroy', - 'flatview_init', - 'flatview_insert', - 'flatview_lookup', - 'flatview_ref', - 'flatview_simplify', - 'flatview_unref', - 'float128_add', - 'float128_compare', - 'float128_compare_internal', - 'float128_compare_quiet', - 'float128_default_nan', - 'float128_div', - 'float128_eq', - 'float128_eq_quiet', - 'float128_is_quiet_nan', - 'float128_is_signaling_nan', - 'float128_le', - 'float128_le_quiet', - 'float128_lt', - 'float128_lt_quiet', - 'float128_maybe_silence_nan', - 'float128_mul', - 'float128_rem', - 'float128_round_to_int', - 'float128_scalbn', - 'float128_sqrt', - 'float128_sub', - 'float128ToCommonNaN', - 'float128_to_float32', - 'float128_to_float64', - 'float128_to_floatx80', - 'float128_to_int32', - 'float128_to_int32_round_to_zero', - 'float128_to_int64', - 'float128_to_int64_round_to_zero', - 'float128_unordered', - 'float128_unordered_quiet', - 'float16_default_nan', - 'float16_is_quiet_nan', - 'float16_is_signaling_nan', - 'float16_maybe_silence_nan', - 'float16ToCommonNaN', - 'float16_to_float32', - 'float16_to_float64', - 'float32_abs', - 'float32_add', - 'float32_chs', - 'float32_compare', - 'float32_compare_internal', - 'float32_compare_quiet', - 'float32_default_nan', - 'float32_div', - 'float32_eq', - 'float32_eq_quiet', - 'float32_exp2', - 'float32_exp2_coefficients', - 'float32_is_any_nan', - 'float32_is_infinity', - 'float32_is_neg', - 'float32_is_quiet_nan', - 'float32_is_signaling_nan', - 'float32_is_zero', - 'float32_is_zero_or_denormal', - 'float32_le', - 'float32_le_quiet', - 'float32_log2', - 'float32_lt', - 'float32_lt_quiet', - 'float32_max', - 'float32_maxnum', - 'float32_maxnummag', - 'float32_maybe_silence_nan', - 'float32_min', - 'float32_minmax', - 'float32_minnum', - 'float32_minnummag', - 'float32_mul', - 'float32_muladd', - 'float32_rem', - 'float32_round_to_int', - 'float32_scalbn', - 'float32_set_sign', - 'float32_sqrt', - 'float32_squash_input_denormal', - 'float32_sub', - 'float32ToCommonNaN', - 'float32_to_float128', - 'float32_to_float16', - 'float32_to_float64', - 'float32_to_floatx80', - 'float32_to_int16', - 'float32_to_int16_round_to_zero', - 'float32_to_int32', - 'float32_to_int32_round_to_zero', - 'float32_to_int64', - 'float32_to_int64_round_to_zero', - 'float32_to_uint16', - 'float32_to_uint16_round_to_zero', - 'float32_to_uint32', - 'float32_to_uint32_round_to_zero', - 'float32_to_uint64', - 'float32_to_uint64_round_to_zero', - 'float32_unordered', - 'float32_unordered_quiet', - 'float64_abs', - 'float64_add', - 'float64_chs', - 'float64_compare', - 'float64_compare_internal', - 'float64_compare_quiet', - 'float64_default_nan', - 'float64_div', - 'float64_eq', - 'float64_eq_quiet', - 'float64_is_any_nan', - 'float64_is_infinity', - 'float64_is_neg', - 'float64_is_quiet_nan', - 'float64_is_signaling_nan', - 'float64_is_zero', - 'float64_le', - 'float64_le_quiet', - 'float64_log2', - 'float64_lt', - 'float64_lt_quiet', - 'float64_max', - 'float64_maxnum', - 'float64_maxnummag', - 'float64_maybe_silence_nan', - 'float64_min', - 'float64_minmax', - 'float64_minnum', - 'float64_minnummag', - 'float64_mul', - 'float64_muladd', - 'float64_rem', - 'float64_round_to_int', - 'float64_scalbn', - 'float64_set_sign', - 'float64_sqrt', - 'float64_squash_input_denormal', - 'float64_sub', - 'float64ToCommonNaN', - 'float64_to_float128', - 'float64_to_float16', - 'float64_to_float32', - 'float64_to_floatx80', - 'float64_to_int16', - 'float64_to_int16_round_to_zero', - 'float64_to_int32', - 'float64_to_int32_round_to_zero', - 'float64_to_int64', - 'float64_to_int64_round_to_zero', - 'float64_to_uint16', - 'float64_to_uint16_round_to_zero', - 'float64_to_uint32', - 'float64_to_uint32_round_to_zero', - 'float64_to_uint64', - 'float64_to_uint64_round_to_zero', - 'float64_trunc_to_int', - 'float64_unordered', - 'float64_unordered_quiet', - 'float_raise', - 'floatx80_add', - 'floatx80_compare', - 'floatx80_compare_internal', - 'floatx80_compare_quiet', - 'floatx80_default_nan', - 'floatx80_div', - 'floatx80_eq', - 'floatx80_eq_quiet', - 'floatx80_is_quiet_nan', - 'floatx80_is_signaling_nan', - 'floatx80_le', - 'floatx80_le_quiet', - 'floatx80_lt', - 'floatx80_lt_quiet', - 'floatx80_maybe_silence_nan', - 'floatx80_mul', - 'floatx80_rem', - 'floatx80_round_to_int', - 'floatx80_scalbn', - 'floatx80_sqrt', - 'floatx80_sub', - 'floatx80ToCommonNaN', - 'floatx80_to_float128', - 'floatx80_to_float32', - 'floatx80_to_float64', - 'floatx80_to_int32', - 'floatx80_to_int32_round_to_zero', - 'floatx80_to_int64', - 'floatx80_to_int64_round_to_zero', - 'floatx80_unordered', - 'floatx80_unordered_quiet', - 'flush_icache_range', - 'format_string', - 'fp_decode_rm', - 'frame_dummy', - 'free_range', - 'fstat64', - 'futex_wait', - 'futex_wake', - 'gen_aa32_ld16s', - 'gen_aa32_ld16u', - 'gen_aa32_ld32u', - 'gen_aa32_ld64', - 'gen_aa32_ld8s', - 'gen_aa32_ld8u', - 'gen_aa32_st16', - 'gen_aa32_st32', - 'gen_aa32_st64', - 'gen_aa32_st8', - 'gen_adc', - 'gen_adc_CC', - 'gen_add16', - 'gen_add_carry', - 'gen_add_CC', - 'gen_add_datah_offset', - 'gen_add_data_offset', - 'gen_addq', - 'gen_addq_lo', - 'gen_addq_msw', - 'gen_arm_parallel_addsub', - 'gen_arm_shift_im', - 'gen_arm_shift_reg', - 'gen_bx', - 'gen_bx_im', - 'gen_clrex', - 'generate_memory_topology', - 'generic_timer_cp_reginfo', - 'gen_exception', - 'gen_exception_insn', - 'gen_exception_internal', - 'gen_exception_internal_insn', - 'gen_exception_return', - 'gen_goto_tb', - 'gen_helper_access_check_cp_reg', - 'gen_helper_add_saturate', - 'gen_helper_add_setq', - 'gen_helper_clear_pstate_ss', - 'gen_helper_clz32', - 'gen_helper_clz64', - 'gen_helper_clz_arm', - 'gen_helper_cpsr_read', - 'gen_helper_cpsr_write', - 'gen_helper_crc32_arm', - 'gen_helper_crc32c', - 'gen_helper_crypto_aese', - 'gen_helper_crypto_aesmc', - 'gen_helper_crypto_sha1_3reg', - 'gen_helper_crypto_sha1h', - 'gen_helper_crypto_sha1su1', - 'gen_helper_crypto_sha256h', - 'gen_helper_crypto_sha256h2', - 'gen_helper_crypto_sha256su0', - 'gen_helper_crypto_sha256su1', - 'gen_helper_double_saturate', - 'gen_helper_exception_internal', - 'gen_helper_exception_with_syndrome', - 'gen_helper_get_cp_reg', - 'gen_helper_get_cp_reg64', - 'gen_helper_get_r13_banked', - 'gen_helper_get_user_reg', - 'gen_helper_iwmmxt_addcb', - 'gen_helper_iwmmxt_addcl', - 'gen_helper_iwmmxt_addcw', - 'gen_helper_iwmmxt_addnb', - 'gen_helper_iwmmxt_addnl', - 'gen_helper_iwmmxt_addnw', - 'gen_helper_iwmmxt_addsb', - 'gen_helper_iwmmxt_addsl', - 'gen_helper_iwmmxt_addsw', - 'gen_helper_iwmmxt_addub', - 'gen_helper_iwmmxt_addul', - 'gen_helper_iwmmxt_adduw', - 'gen_helper_iwmmxt_align', - 'gen_helper_iwmmxt_avgb0', - 'gen_helper_iwmmxt_avgb1', - 'gen_helper_iwmmxt_avgw0', - 'gen_helper_iwmmxt_avgw1', - 'gen_helper_iwmmxt_bcstb', - 'gen_helper_iwmmxt_bcstl', - 'gen_helper_iwmmxt_bcstw', - 'gen_helper_iwmmxt_cmpeqb', - 'gen_helper_iwmmxt_cmpeql', - 'gen_helper_iwmmxt_cmpeqw', - 'gen_helper_iwmmxt_cmpgtsb', - 'gen_helper_iwmmxt_cmpgtsl', - 'gen_helper_iwmmxt_cmpgtsw', - 'gen_helper_iwmmxt_cmpgtub', - 'gen_helper_iwmmxt_cmpgtul', - 'gen_helper_iwmmxt_cmpgtuw', - 'gen_helper_iwmmxt_insr', - 'gen_helper_iwmmxt_macsw', - 'gen_helper_iwmmxt_macuw', - 'gen_helper_iwmmxt_maddsq', - 'gen_helper_iwmmxt_madduq', - 'gen_helper_iwmmxt_maxsb', - 'gen_helper_iwmmxt_maxsl', - 'gen_helper_iwmmxt_maxsw', - 'gen_helper_iwmmxt_maxub', - 'gen_helper_iwmmxt_maxul', - 'gen_helper_iwmmxt_maxuw', - 'gen_helper_iwmmxt_minsb', - 'gen_helper_iwmmxt_minsl', - 'gen_helper_iwmmxt_minsw', - 'gen_helper_iwmmxt_minub', - 'gen_helper_iwmmxt_minul', - 'gen_helper_iwmmxt_minuw', - 'gen_helper_iwmmxt_msbb', - 'gen_helper_iwmmxt_msbl', - 'gen_helper_iwmmxt_msbw', - 'gen_helper_iwmmxt_muladdsl', - 'gen_helper_iwmmxt_muladdsw', - 'gen_helper_iwmmxt_muladdswl', - 'gen_helper_iwmmxt_mulshw', - 'gen_helper_iwmmxt_mulslw', - 'gen_helper_iwmmxt_muluhw', - 'gen_helper_iwmmxt_mululw', - 'gen_helper_iwmmxt_packsl', - 'gen_helper_iwmmxt_packsq', - 'gen_helper_iwmmxt_packsw', - 'gen_helper_iwmmxt_packul', - 'gen_helper_iwmmxt_packuq', - 'gen_helper_iwmmxt_packuw', - 'gen_helper_iwmmxt_rorl', - 'gen_helper_iwmmxt_rorq', - 'gen_helper_iwmmxt_rorw', - 'gen_helper_iwmmxt_sadb', - 'gen_helper_iwmmxt_sadw', - 'gen_helper_iwmmxt_setpsr_nz', - 'gen_helper_iwmmxt_shufh', - 'gen_helper_iwmmxt_slll', - 'gen_helper_iwmmxt_sllq', - 'gen_helper_iwmmxt_sllw', - 'gen_helper_iwmmxt_sral', - 'gen_helper_iwmmxt_sraq', - 'gen_helper_iwmmxt_sraw', - 'gen_helper_iwmmxt_srll', - 'gen_helper_iwmmxt_srlq', - 'gen_helper_iwmmxt_srlw', - 'gen_helper_iwmmxt_subnb', - 'gen_helper_iwmmxt_subnl', - 'gen_helper_iwmmxt_subnw', - 'gen_helper_iwmmxt_subsb', - 'gen_helper_iwmmxt_subsl', - 'gen_helper_iwmmxt_subsw', - 'gen_helper_iwmmxt_subub', - 'gen_helper_iwmmxt_subul', - 'gen_helper_iwmmxt_subuw', - 'gen_helper_iwmmxt_unpackhb', - 'gen_helper_iwmmxt_unpackhl', - 'gen_helper_iwmmxt_unpackhsb', - 'gen_helper_iwmmxt_unpackhsl', - 'gen_helper_iwmmxt_unpackhsw', - 'gen_helper_iwmmxt_unpackhub', - 'gen_helper_iwmmxt_unpackhul', - 'gen_helper_iwmmxt_unpackhuw', - 'gen_helper_iwmmxt_unpackhw', - 'gen_helper_iwmmxt_unpacklb', - 'gen_helper_iwmmxt_unpackll', - 'gen_helper_iwmmxt_unpacklsb', - 'gen_helper_iwmmxt_unpacklsl', - 'gen_helper_iwmmxt_unpacklsw', - 'gen_helper_iwmmxt_unpacklub', - 'gen_helper_iwmmxt_unpacklul', - 'gen_helper_iwmmxt_unpackluw', - 'gen_helper_iwmmxt_unpacklw', - 'gen_helper_neon_abd_f32', - 'gen_helper_neon_abdl_s16', - 'gen_helper_neon_abdl_s32', - 'gen_helper_neon_abdl_s64', - 'gen_helper_neon_abdl_u16', - 'gen_helper_neon_abdl_u32', - 'gen_helper_neon_abdl_u64', - 'gen_helper_neon_abd_s16', - 'gen_helper_neon_abd_s32', - 'gen_helper_neon_abd_s8', - 'gen_helper_neon_abd_u16', - 'gen_helper_neon_abd_u32', - 'gen_helper_neon_abd_u8', - 'gen_helper_neon_abs_s16', - 'gen_helper_neon_abs_s8', - 'gen_helper_neon_acge_f32', - 'gen_helper_neon_acgt_f32', - 'gen_helper_neon_addl_saturate_s32', - 'gen_helper_neon_addl_saturate_s64', - 'gen_helper_neon_addl_u16', - 'gen_helper_neon_addl_u32', - 'gen_helper_neon_add_u16', - 'gen_helper_neon_add_u8', - 'gen_helper_neon_ceq_f32', - 'gen_helper_neon_ceq_u16', - 'gen_helper_neon_ceq_u32', - 'gen_helper_neon_ceq_u8', - 'gen_helper_neon_cge_f32', - 'gen_helper_neon_cge_s16', - 'gen_helper_neon_cge_s32', - 'gen_helper_neon_cge_s8', - 'gen_helper_neon_cge_u16', - 'gen_helper_neon_cge_u32', - 'gen_helper_neon_cge_u8', - 'gen_helper_neon_cgt_f32', - 'gen_helper_neon_cgt_s16', - 'gen_helper_neon_cgt_s32', - 'gen_helper_neon_cgt_s8', - 'gen_helper_neon_cgt_u16', - 'gen_helper_neon_cgt_u32', - 'gen_helper_neon_cgt_u8', - 'gen_helper_neon_cls_s16', - 'gen_helper_neon_cls_s32', - 'gen_helper_neon_cls_s8', - 'gen_helper_neon_clz_u16', - 'gen_helper_neon_clz_u8', - 'gen_helper_neon_cnt_u8', - 'gen_helper_neon_fcvt_f16_to_f32', - 'gen_helper_neon_fcvt_f32_to_f16', - 'gen_helper_neon_hadd_s16', - 'gen_helper_neon_hadd_s32', - 'gen_helper_neon_hadd_s8', - 'gen_helper_neon_hadd_u16', - 'gen_helper_neon_hadd_u32', - 'gen_helper_neon_hadd_u8', - 'gen_helper_neon_hsub_s16', - 'gen_helper_neon_hsub_s32', - 'gen_helper_neon_hsub_s8', - 'gen_helper_neon_hsub_u16', - 'gen_helper_neon_hsub_u32', - 'gen_helper_neon_hsub_u8', - 'gen_helper_neon_max_s16', - 'gen_helper_neon_max_s32', - 'gen_helper_neon_max_s8', - 'gen_helper_neon_max_u16', - 'gen_helper_neon_max_u32', - 'gen_helper_neon_max_u8', - 'gen_helper_neon_min_s16', - 'gen_helper_neon_min_s32', - 'gen_helper_neon_min_s8', - 'gen_helper_neon_min_u16', - 'gen_helper_neon_min_u32', - 'gen_helper_neon_min_u8', - 'gen_helper_neon_mull_p8', - 'gen_helper_neon_mull_s16', - 'gen_helper_neon_mull_s8', - 'gen_helper_neon_mull_u16', - 'gen_helper_neon_mull_u8', - 'gen_helper_neon_mul_p8', - 'gen_helper_neon_mul_u16', - 'gen_helper_neon_mul_u8', - 'gen_helper_neon_narrow_high_u16', - 'gen_helper_neon_narrow_high_u8', - 'gen_helper_neon_narrow_round_high_u16', - 'gen_helper_neon_narrow_round_high_u8', - 'gen_helper_neon_narrow_sat_s16', - 'gen_helper_neon_narrow_sat_s32', - 'gen_helper_neon_narrow_sat_s8', - 'gen_helper_neon_narrow_sat_u16', - 'gen_helper_neon_narrow_sat_u32', - 'gen_helper_neon_narrow_sat_u8', - 'gen_helper_neon_narrow_u16', - 'gen_helper_neon_narrow_u8', - 'gen_helper_neon_negl_u16', - 'gen_helper_neon_negl_u32', - 'gen_helper_neon_paddl_u16', - 'gen_helper_neon_paddl_u32', - 'gen_helper_neon_padd_u16', - 'gen_helper_neon_padd_u8', - 'gen_helper_neon_pmax_s16', - 'gen_helper_neon_pmax_s8', - 'gen_helper_neon_pmax_u16', - 'gen_helper_neon_pmax_u8', - 'gen_helper_neon_pmin_s16', - 'gen_helper_neon_pmin_s8', - 'gen_helper_neon_pmin_u16', - 'gen_helper_neon_pmin_u8', - 'gen_helper_neon_pmull_64_hi', - 'gen_helper_neon_pmull_64_lo', - 'gen_helper_neon_qabs_s16', - 'gen_helper_neon_qabs_s32', - 'gen_helper_neon_qabs_s8', - 'gen_helper_neon_qadd_s16', - 'gen_helper_neon_qadd_s32', - 'gen_helper_neon_qadd_s64', - 'gen_helper_neon_qadd_s8', - 'gen_helper_neon_qadd_u16', - 'gen_helper_neon_qadd_u32', - 'gen_helper_neon_qadd_u64', - 'gen_helper_neon_qadd_u8', - 'gen_helper_neon_qdmulh_s16', - 'gen_helper_neon_qdmulh_s32', - 'gen_helper_neon_qneg_s16', - 'gen_helper_neon_qneg_s32', - 'gen_helper_neon_qneg_s8', - 'gen_helper_neon_qrdmulh_s16', - 'gen_helper_neon_qrdmulh_s32', - 'gen_helper_neon_qrshl_s16', - 'gen_helper_neon_qrshl_s32', - 'gen_helper_neon_qrshl_s64', - 'gen_helper_neon_qrshl_s8', - 'gen_helper_neon_qrshl_u16', - 'gen_helper_neon_qrshl_u32', - 'gen_helper_neon_qrshl_u64', - 'gen_helper_neon_qrshl_u8', - 'gen_helper_neon_qshl_s16', - 'gen_helper_neon_qshl_s32', - 'gen_helper_neon_qshl_s64', - 'gen_helper_neon_qshl_s8', - 'gen_helper_neon_qshl_u16', - 'gen_helper_neon_qshl_u32', - 'gen_helper_neon_qshl_u64', - 'gen_helper_neon_qshl_u8', - 'gen_helper_neon_qshlu_s16', - 'gen_helper_neon_qshlu_s32', - 'gen_helper_neon_qshlu_s64', - 'gen_helper_neon_qshlu_s8', - 'gen_helper_neon_qsub_s16', - 'gen_helper_neon_qsub_s32', - 'gen_helper_neon_qsub_s64', - 'gen_helper_neon_qsub_s8', - 'gen_helper_neon_qsub_u16', - 'gen_helper_neon_qsub_u32', - 'gen_helper_neon_qsub_u64', - 'gen_helper_neon_qsub_u8', - 'gen_helper_neon_qunzip16', - 'gen_helper_neon_qunzip32', - 'gen_helper_neon_qunzip8', - 'gen_helper_neon_qzip16', - 'gen_helper_neon_qzip32', - 'gen_helper_neon_qzip8', - 'gen_helper_neon_rhadd_s16', - 'gen_helper_neon_rhadd_s32', - 'gen_helper_neon_rhadd_s8', - 'gen_helper_neon_rhadd_u16', - 'gen_helper_neon_rhadd_u32', - 'gen_helper_neon_rhadd_u8', - 'gen_helper_neon_rshl_s16', - 'gen_helper_neon_rshl_s32', - 'gen_helper_neon_rshl_s64', - 'gen_helper_neon_rshl_s8', - 'gen_helper_neon_rshl_u16', - 'gen_helper_neon_rshl_u32', - 'gen_helper_neon_rshl_u64', - 'gen_helper_neon_rshl_u8', - 'gen_helper_neon_shl_s16', - 'gen_helper_neon_shl_s32', - 'gen_helper_neon_shl_s64', - 'gen_helper_neon_shl_s8', - 'gen_helper_neon_shl_u16', - 'gen_helper_neon_shl_u32', - 'gen_helper_neon_shl_u64', - 'gen_helper_neon_shl_u8', - 'gen_helper_neon_subl_u16', - 'gen_helper_neon_subl_u32', - 'gen_helper_neon_sub_u16', - 'gen_helper_neon_sub_u8', - 'gen_helper_neon_tbl', - 'gen_helper_neon_tst_u16', - 'gen_helper_neon_tst_u32', - 'gen_helper_neon_tst_u8', - 'gen_helper_neon_unarrow_sat16', - 'gen_helper_neon_unarrow_sat32', - 'gen_helper_neon_unarrow_sat8', - 'gen_helper_neon_unzip16', - 'gen_helper_neon_unzip8', - 'gen_helper_neon_widen_s16', - 'gen_helper_neon_widen_s8', - 'gen_helper_neon_widen_u16', - 'gen_helper_neon_widen_u8', - 'gen_helper_neon_zip16', - 'gen_helper_neon_zip8', - 'gen_helper_pre_hvc', - 'gen_helper_pre_smc', - 'gen_helper_qadd16', - 'gen_helper_qadd8', - 'gen_helper_qaddsubx', - 'gen_helper_qsub16', - 'gen_helper_qsub8', - 'gen_helper_qsubaddx', - 'gen_helper_rbit', - 'gen_helper_recpe_f32', - 'gen_helper_recpe_u32', - 'gen_helper_recps_f32', - 'gen_helper_rintd', - 'gen_helper_rintd_exact', - 'gen_helper_rints', - 'gen_helper_rints_exact', - 'gen_helper_ror_cc', - 'gen_helper_rsqrte_f32', - 'gen_helper_rsqrte_u32', - 'gen_helper_rsqrts_f32', - 'gen_helper_sadd16', - 'gen_helper_sadd8', - 'gen_helper_saddsubx', - 'gen_helper_sar_cc', - 'gen_helper_sdiv', - 'gen_helper_sel_flags', - 'gen_helper_set_cp_reg', - 'gen_helper_set_cp_reg64', - 'gen_helper_set_neon_rmode', - 'gen_helper_set_r13_banked', - 'gen_helper_set_rmode', - 'gen_helper_set_user_reg', - 'gen_helper_shadd16', - 'gen_helper_shadd8', - 'gen_helper_shaddsubx', - 'gen_helper_shl_cc', - 'gen_helper_shr_cc', - 'gen_helper_shsub16', - 'gen_helper_shsub8', - 'gen_helper_shsubaddx', - 'gen_helper_ssat', - 'gen_helper_ssat16', - 'gen_helper_ssub16', - 'gen_helper_ssub8', - 'gen_helper_ssubaddx', - 'gen_helper_sub_saturate', - 'gen_helper_sxtb16', - 'gen_helper_uadd16', - 'gen_helper_uadd8', - 'gen_helper_uaddsubx', - 'gen_helper_udiv', - 'gen_helper_uhadd16', - 'gen_helper_uhadd8', - 'gen_helper_uhaddsubx', - 'gen_helper_uhsub16', - 'gen_helper_uhsub8', - 'gen_helper_uhsubaddx', - 'gen_helper_uqadd16', - 'gen_helper_uqadd8', - 'gen_helper_uqaddsubx', - 'gen_helper_uqsub16', - 'gen_helper_uqsub8', - 'gen_helper_uqsubaddx', - 'gen_helper_usad8', - 'gen_helper_usat', - 'gen_helper_usat16', - 'gen_helper_usub16', - 'gen_helper_usub8', - 'gen_helper_usubaddx', - 'gen_helper_uxtb16', - 'gen_helper_v7m_mrs', - 'gen_helper_v7m_msr', - 'gen_helper_vfp_absd', - 'gen_helper_vfp_abss', - 'gen_helper_vfp_addd', - 'gen_helper_vfp_adds', - 'gen_helper_vfp_cmpd', - 'gen_helper_vfp_cmped', - 'gen_helper_vfp_cmpes', - 'gen_helper_vfp_cmps', - 'gen_helper_vfp_divd', - 'gen_helper_vfp_divs', - 'gen_helper_vfp_fcvtds', - 'gen_helper_vfp_fcvt_f16_to_f32', - 'gen_helper_vfp_fcvt_f16_to_f64', - 'gen_helper_vfp_fcvt_f32_to_f16', - 'gen_helper_vfp_fcvt_f64_to_f16', - 'gen_helper_vfp_fcvtsd', - 'gen_helper_vfp_get_fpscr', - 'gen_helper_vfp_maxnumd', - 'gen_helper_vfp_maxnums', - 'gen_helper_vfp_maxs', - 'gen_helper_vfp_minnumd', - 'gen_helper_vfp_minnums', - 'gen_helper_vfp_mins', - 'gen_helper_vfp_muladdd', - 'gen_helper_vfp_muladds', - 'gen_helper_vfp_muld', - 'gen_helper_vfp_muls', - 'gen_helper_vfp_negd', - 'gen_helper_vfp_negs', - 'gen_helper_vfp_set_fpscr', - 'gen_helper_vfp_shtod', - 'gen_helper_vfp_shtos', - 'gen_helper_vfp_sitod', - 'gen_helper_vfp_sitos', - 'gen_helper_vfp_sltod', - 'gen_helper_vfp_sltos', - 'gen_helper_vfp_sqrtd', - 'gen_helper_vfp_sqrts', - 'gen_helper_vfp_subd', - 'gen_helper_vfp_subs', - 'gen_helper_vfp_toshd_round_to_zero', - 'gen_helper_vfp_toshs_round_to_zero', - 'gen_helper_vfp_tosid', - 'gen_helper_vfp_tosis', - 'gen_helper_vfp_tosizd', - 'gen_helper_vfp_tosizs', - 'gen_helper_vfp_tosld', - 'gen_helper_vfp_tosld_round_to_zero', - 'gen_helper_vfp_tosls', - 'gen_helper_vfp_tosls_round_to_zero', - 'gen_helper_vfp_touhd_round_to_zero', - 'gen_helper_vfp_touhs_round_to_zero', - 'gen_helper_vfp_touid', - 'gen_helper_vfp_touis', - 'gen_helper_vfp_touizd', - 'gen_helper_vfp_touizs', - 'gen_helper_vfp_tould', - 'gen_helper_vfp_tould_round_to_zero', - 'gen_helper_vfp_touls', - 'gen_helper_vfp_touls_round_to_zero', - 'gen_helper_vfp_uhtod', - 'gen_helper_vfp_uhtos', - 'gen_helper_vfp_uitod', - 'gen_helper_vfp_uitos', - 'gen_helper_vfp_ultod', - 'gen_helper_vfp_ultos', - 'gen_helper_wfe', - 'gen_helper_wfi', - 'gen_hvc', - 'gen_intermediate_code_internal', - 'gen_intermediate_code_internal_a64', - 'gen_iwmmxt_address', - 'gen_iwmmxt_shift', - 'gen_jmp', - 'gen_load_and_replicate', - 'gen_load_exclusive', - 'gen_logic_CC', - 'gen_logicq_cc', - 'gen_lookup_tb', - 'gen_mov_F0_vreg', - 'gen_mov_F1_vreg', - 'gen_mov_vreg_F0', - 'gen_muls_i64_i32', - 'gen_mulu_i64_i32', - 'gen_mulxy', - 'gen_neon_add', - 'gen_neon_addl', - 'gen_neon_addl_saturate', - 'gen_neon_bsl', - 'gen_neon_dup_high16', - 'gen_neon_dup_low16', - 'gen_neon_dup_u8', - 'gen_neon_mull', - 'gen_neon_narrow', - 'gen_neon_narrow_op', - 'gen_neon_narrow_sats', - 'gen_neon_narrow_satu', - 'gen_neon_negl', - 'gen_neon_rsb', - 'gen_neon_shift_narrow', - 'gen_neon_subl', - 'gen_neon_trn_u16', - 'gen_neon_trn_u8', - 'gen_neon_unarrow_sats', - 'gen_neon_unzip', - 'gen_neon_widen', - 'gen_neon_zip', - 'gen_new_label', - 'gen_nop_hint', - 'gen_op_iwmmxt_addl_M0_wRn', - 'gen_op_iwmmxt_addnb_M0_wRn', - 'gen_op_iwmmxt_addnl_M0_wRn', - 'gen_op_iwmmxt_addnw_M0_wRn', - 'gen_op_iwmmxt_addsb_M0_wRn', - 'gen_op_iwmmxt_addsl_M0_wRn', - 'gen_op_iwmmxt_addsw_M0_wRn', - 'gen_op_iwmmxt_addub_M0_wRn', - 'gen_op_iwmmxt_addul_M0_wRn', - 'gen_op_iwmmxt_adduw_M0_wRn', - 'gen_op_iwmmxt_andq_M0_wRn', - 'gen_op_iwmmxt_avgb0_M0_wRn', - 'gen_op_iwmmxt_avgb1_M0_wRn', - 'gen_op_iwmmxt_avgw0_M0_wRn', - 'gen_op_iwmmxt_avgw1_M0_wRn', - 'gen_op_iwmmxt_cmpeqb_M0_wRn', - 'gen_op_iwmmxt_cmpeql_M0_wRn', - 'gen_op_iwmmxt_cmpeqw_M0_wRn', - 'gen_op_iwmmxt_cmpgtsb_M0_wRn', - 'gen_op_iwmmxt_cmpgtsl_M0_wRn', - 'gen_op_iwmmxt_cmpgtsw_M0_wRn', - 'gen_op_iwmmxt_cmpgtub_M0_wRn', - 'gen_op_iwmmxt_cmpgtul_M0_wRn', - 'gen_op_iwmmxt_cmpgtuw_M0_wRn', - 'gen_op_iwmmxt_macsw_M0_wRn', - 'gen_op_iwmmxt_macuw_M0_wRn', - 'gen_op_iwmmxt_maddsq_M0_wRn', - 'gen_op_iwmmxt_madduq_M0_wRn', - 'gen_op_iwmmxt_maxsb_M0_wRn', - 'gen_op_iwmmxt_maxsl_M0_wRn', - 'gen_op_iwmmxt_maxsw_M0_wRn', - 'gen_op_iwmmxt_maxub_M0_wRn', - 'gen_op_iwmmxt_maxul_M0_wRn', - 'gen_op_iwmmxt_maxuw_M0_wRn', - 'gen_op_iwmmxt_minsb_M0_wRn', - 'gen_op_iwmmxt_minsl_M0_wRn', - 'gen_op_iwmmxt_minsw_M0_wRn', - 'gen_op_iwmmxt_minub_M0_wRn', - 'gen_op_iwmmxt_minul_M0_wRn', - 'gen_op_iwmmxt_minuw_M0_wRn', - 'gen_op_iwmmxt_movq_M0_wRn', - 'gen_op_iwmmxt_movq_wRn_M0', - 'gen_op_iwmmxt_mulshw_M0_wRn', - 'gen_op_iwmmxt_mulslw_M0_wRn', - 'gen_op_iwmmxt_muluhw_M0_wRn', - 'gen_op_iwmmxt_mululw_M0_wRn', - 'gen_op_iwmmxt_orq_M0_wRn', - 'gen_op_iwmmxt_packsl_M0_wRn', - 'gen_op_iwmmxt_packsq_M0_wRn', - 'gen_op_iwmmxt_packsw_M0_wRn', - 'gen_op_iwmmxt_packul_M0_wRn', - 'gen_op_iwmmxt_packuq_M0_wRn', - 'gen_op_iwmmxt_packuw_M0_wRn', - 'gen_op_iwmmxt_sadb_M0_wRn', - 'gen_op_iwmmxt_sadw_M0_wRn', - 'gen_op_iwmmxt_set_cup', - 'gen_op_iwmmxt_set_mup', - 'gen_op_iwmmxt_setpsr_nz', - 'gen_op_iwmmxt_subnb_M0_wRn', - 'gen_op_iwmmxt_subnl_M0_wRn', - 'gen_op_iwmmxt_subnw_M0_wRn', - 'gen_op_iwmmxt_subsb_M0_wRn', - 'gen_op_iwmmxt_subsl_M0_wRn', - 'gen_op_iwmmxt_subsw_M0_wRn', - 'gen_op_iwmmxt_subub_M0_wRn', - 'gen_op_iwmmxt_subul_M0_wRn', - 'gen_op_iwmmxt_subuw_M0_wRn', - 'gen_op_iwmmxt_unpackhb_M0_wRn', - 'gen_op_iwmmxt_unpackhl_M0_wRn', - 'gen_op_iwmmxt_unpackhsb_M0', - 'gen_op_iwmmxt_unpackhsl_M0', - 'gen_op_iwmmxt_unpackhsw_M0', - 'gen_op_iwmmxt_unpackhub_M0', - 'gen_op_iwmmxt_unpackhul_M0', - 'gen_op_iwmmxt_unpackhuw_M0', - 'gen_op_iwmmxt_unpackhw_M0_wRn', - 'gen_op_iwmmxt_unpacklb_M0_wRn', - 'gen_op_iwmmxt_unpackll_M0_wRn', - 'gen_op_iwmmxt_unpacklsb_M0', - 'gen_op_iwmmxt_unpacklsl_M0', - 'gen_op_iwmmxt_unpacklsw_M0', - 'gen_op_iwmmxt_unpacklub_M0', - 'gen_op_iwmmxt_unpacklul_M0', - 'gen_op_iwmmxt_unpackluw_M0', - 'gen_op_iwmmxt_unpacklw_M0_wRn', - 'gen_op_iwmmxt_xorq_M0_wRn', - 'gen_rev16', - 'gen_revsh', - 'gen_rfe', - 'gen_sar', - 'gen_sbc_CC', - 'gen_sbfx', - 'gen_set_CF_bit31', - 'gen_set_condexec', - 'gen_set_cpsr', - 'gen_set_label', - 'gen_set_pc_im', - 'gen_set_psr', - 'gen_set_psr_im', - 'gen_shl', - 'gen_shr', - 'gen_smc', - 'gen_smul_dual', - 'gen_srs', - 'gen_ss_advance', - 'gen_step_complete_exception', - 'gen_store_exclusive', - 'gen_storeq_reg', - 'gen_sub_carry', - 'gen_sub_CC', - 'gen_subq_msw', - 'gen_swap_half', - 'gen_thumb2_data_op', - 'gen_thumb2_parallel_addsub', - 'gen_ubfx', - 'gen_vfp_abs', - 'gen_vfp_add', - 'gen_vfp_cmp', - 'gen_vfp_cmpe', - 'gen_vfp_div', - 'gen_vfp_F1_ld0', - 'gen_vfp_F1_mul', - 'gen_vfp_F1_neg', - 'gen_vfp_ld', - 'gen_vfp_mrs', - 'gen_vfp_msr', - 'gen_vfp_mul', - 'gen_vfp_neg', - 'gen_vfp_shto', - 'gen_vfp_sito', - 'gen_vfp_slto', - 'gen_vfp_sqrt', - 'gen_vfp_st', - 'gen_vfp_sub', - 'gen_vfp_tosh', - 'gen_vfp_tosi', - 'gen_vfp_tosiz', - 'gen_vfp_tosl', - 'gen_vfp_touh', - 'gen_vfp_toui', - 'gen_vfp_touiz', - 'gen_vfp_toul', - 'gen_vfp_uhto', - 'gen_vfp_uito', - 'gen_vfp_ulto', - 'get_arm_cp_reginfo', - 'get_clock', - 'get_clock_realtime', - 'get_constraint_priority', - 'get_float_exception_flags', - 'get_float_rounding_mode', - 'get_fpstatus_ptr', - 'get_level1_table_address', - 'get_mem_index', - 'get_next_param_value', - 'get_opt_name', - 'get_opt_value', - 'get_page_addr_code', - 'get_param_value', - 'get_phys_addr', - 'get_phys_addr_lpae', - 'get_phys_addr_mpu', - 'get_phys_addr_v5', - 'get_phys_addr_v6', - 'get_system_memory', - 'get_ticks_per_sec', - 'g_list_insert_sorted_merged', - '_GLOBAL_OFFSET_TABLE_', - 'gt_cntfrq_access', - 'gt_cnt_read', - 'gt_cnt_reset', - 'gt_counter_access', - 'gt_ctl_write', - 'gt_cval_write', - 'gt_get_countervalue', - 'gt_pct_access', - 'gt_ptimer_access', - 'gt_recalc_timer', - 'gt_timer_access', - 'gt_tval_read', - 'gt_tval_write', - 'gt_vct_access', - 'gt_vtimer_access', - 'guest_phys_blocks_free', - 'guest_phys_blocks_init', - 'handle_vcvt', - 'handle_vminmaxnm', - 'handle_vrint', - 'handle_vsel', - 'has_help_option', - 'have_bmi1', - 'have_bmi2', - 'hcr_write', - 'helper_access_check_cp_reg', - 'helper_add_saturate', - 'helper_add_setq', - 'helper_add_usaturate', - 'helper_be_ldl_cmmu', - 'helper_be_ldq_cmmu', - 'helper_be_ldq_mmu', - 'helper_be_ldsl_mmu', - 'helper_be_ldsw_mmu', - 'helper_be_ldul_mmu', - 'helper_be_lduw_mmu', - 'helper_be_ldw_cmmu', - 'helper_be_stl_mmu', - 'helper_be_stq_mmu', - 'helper_be_stw_mmu', - 'helper_clear_pstate_ss', - 'helper_clz_arm', - 'helper_cpsr_read', - 'helper_cpsr_write', - 'helper_crc32_arm', - 'helper_crc32c', - 'helper_crypto_aese', - 'helper_crypto_aesmc', - 'helper_crypto_sha1_3reg', - 'helper_crypto_sha1h', - 'helper_crypto_sha1su1', - 'helper_crypto_sha256h', - 'helper_crypto_sha256h2', - 'helper_crypto_sha256su0', - 'helper_crypto_sha256su1', - 'helper_dc_zva', - 'helper_double_saturate', - 'helper_exception_internal', - 'helper_exception_return', - 'helper_exception_with_syndrome', - 'helper_get_cp_reg', - 'helper_get_cp_reg64', - 'helper_get_r13_banked', - 'helper_get_user_reg', - 'helper_iwmmxt_addcb', - 'helper_iwmmxt_addcl', - 'helper_iwmmxt_addcw', - 'helper_iwmmxt_addnb', - 'helper_iwmmxt_addnl', - 'helper_iwmmxt_addnw', - 'helper_iwmmxt_addsb', - 'helper_iwmmxt_addsl', - 'helper_iwmmxt_addsw', - 'helper_iwmmxt_addub', - 'helper_iwmmxt_addul', - 'helper_iwmmxt_adduw', - 'helper_iwmmxt_align', - 'helper_iwmmxt_avgb0', - 'helper_iwmmxt_avgb1', - 'helper_iwmmxt_avgw0', - 'helper_iwmmxt_avgw1', - 'helper_iwmmxt_bcstb', - 'helper_iwmmxt_bcstl', - 'helper_iwmmxt_bcstw', - 'helper_iwmmxt_cmpeqb', - 'helper_iwmmxt_cmpeql', - 'helper_iwmmxt_cmpeqw', - 'helper_iwmmxt_cmpgtsb', - 'helper_iwmmxt_cmpgtsl', - 'helper_iwmmxt_cmpgtsw', - 'helper_iwmmxt_cmpgtub', - 'helper_iwmmxt_cmpgtul', - 'helper_iwmmxt_cmpgtuw', - 'helper_iwmmxt_insr', - 'helper_iwmmxt_macsw', - 'helper_iwmmxt_macuw', - 'helper_iwmmxt_maddsq', - 'helper_iwmmxt_madduq', - 'helper_iwmmxt_maxsb', - 'helper_iwmmxt_maxsl', - 'helper_iwmmxt_maxsw', - 'helper_iwmmxt_maxub', - 'helper_iwmmxt_maxul', - 'helper_iwmmxt_maxuw', - 'helper_iwmmxt_minsb', - 'helper_iwmmxt_minsl', - 'helper_iwmmxt_minsw', - 'helper_iwmmxt_minub', - 'helper_iwmmxt_minul', - 'helper_iwmmxt_minuw', - 'helper_iwmmxt_msbb', - 'helper_iwmmxt_msbl', - 'helper_iwmmxt_msbw', - 'helper_iwmmxt_muladdsl', - 'helper_iwmmxt_muladdsw', - 'helper_iwmmxt_muladdswl', - 'helper_iwmmxt_mulshw', - 'helper_iwmmxt_mulslw', - 'helper_iwmmxt_muluhw', - 'helper_iwmmxt_mululw', - 'helper_iwmmxt_packsl', - 'helper_iwmmxt_packsq', - 'helper_iwmmxt_packsw', - 'helper_iwmmxt_packul', - 'helper_iwmmxt_packuq', - 'helper_iwmmxt_packuw', - 'helper_iwmmxt_rorl', - 'helper_iwmmxt_rorq', - 'helper_iwmmxt_rorw', - 'helper_iwmmxt_sadb', - 'helper_iwmmxt_sadw', - 'helper_iwmmxt_setpsr_nz', - 'helper_iwmmxt_shufh', - 'helper_iwmmxt_slll', - 'helper_iwmmxt_sllq', - 'helper_iwmmxt_sllw', - 'helper_iwmmxt_sral', - 'helper_iwmmxt_sraq', - 'helper_iwmmxt_sraw', - 'helper_iwmmxt_srll', - 'helper_iwmmxt_srlq', - 'helper_iwmmxt_srlw', - 'helper_iwmmxt_subnb', - 'helper_iwmmxt_subnl', - 'helper_iwmmxt_subnw', - 'helper_iwmmxt_subsb', - 'helper_iwmmxt_subsl', - 'helper_iwmmxt_subsw', - 'helper_iwmmxt_subub', - 'helper_iwmmxt_subul', - 'helper_iwmmxt_subuw', - 'helper_iwmmxt_unpackhb', - 'helper_iwmmxt_unpackhl', - 'helper_iwmmxt_unpackhsb', - 'helper_iwmmxt_unpackhsl', - 'helper_iwmmxt_unpackhsw', - 'helper_iwmmxt_unpackhub', - 'helper_iwmmxt_unpackhul', - 'helper_iwmmxt_unpackhuw', - 'helper_iwmmxt_unpackhw', - 'helper_iwmmxt_unpacklb', - 'helper_iwmmxt_unpackll', - 'helper_iwmmxt_unpacklsb', - 'helper_iwmmxt_unpacklsl', - 'helper_iwmmxt_unpacklsw', - 'helper_iwmmxt_unpacklub', - 'helper_iwmmxt_unpacklul', - 'helper_iwmmxt_unpackluw', - 'helper_iwmmxt_unpacklw', - 'helper_ldb_cmmu', - 'helper_ldb_mmu', - 'helper_ldl_cmmu', - 'helper_ldl_mmu', - 'helper_ldq_cmmu', - 'helper_ldq_mmu', - 'helper_ldw_cmmu', - 'helper_ldw_mmu', - 'helper_le_ldl_cmmu', - 'helper_le_ldq_cmmu', - 'helper_le_ldq_mmu', - 'helper_le_ldsl_mmu', - 'helper_le_ldsw_mmu', - 'helper_le_ldul_mmu', - 'helper_le_lduw_mmu', - 'helper_le_ldw_cmmu', - 'helper_le_stl_mmu', - 'helper_le_stq_mmu', - 'helper_le_stw_mmu', - 'helper_msr_i_pstate', - 'helper_neon_abd_f32', - 'helper_neon_abdl_s16', - 'helper_neon_abdl_s32', - 'helper_neon_abdl_s64', - 'helper_neon_abdl_u16', - 'helper_neon_abdl_u32', - 'helper_neon_abdl_u64', - 'helper_neon_abd_s16', - 'helper_neon_abd_s32', - 'helper_neon_abd_s8', - 'helper_neon_abd_u16', - 'helper_neon_abd_u32', - 'helper_neon_abd_u8', - 'helper_neon_abs_s16', - 'helper_neon_abs_s8', - 'helper_neon_acge_f32', - 'helper_neon_acge_f64', - 'helper_neon_acgt_f32', - 'helper_neon_acgt_f64', - 'helper_neon_addl_saturate_s32', - 'helper_neon_addl_saturate_s64', - 'helper_neon_addl_u16', - 'helper_neon_addl_u32', - 'helper_neon_add_u16', - 'helper_neon_add_u8', - 'helper_neon_ceq_f32', - 'helper_neon_ceq_u16', - 'helper_neon_ceq_u32', - 'helper_neon_ceq_u8', - 'helper_neon_cge_f32', - 'helper_neon_cge_s16', - 'helper_neon_cge_s32', - 'helper_neon_cge_s8', - 'helper_neon_cge_u16', - 'helper_neon_cge_u32', - 'helper_neon_cge_u8', - 'helper_neon_cgt_f32', - 'helper_neon_cgt_s16', - 'helper_neon_cgt_s32', - 'helper_neon_cgt_s8', - 'helper_neon_cgt_u16', - 'helper_neon_cgt_u32', - 'helper_neon_cgt_u8', - 'helper_neon_cls_s16', - 'helper_neon_cls_s32', - 'helper_neon_cls_s8', - 'helper_neon_clz_u16', - 'helper_neon_clz_u8', - 'helper_neon_cnt_u8', - 'helper_neon_fcvt_f16_to_f32', - 'helper_neon_fcvt_f32_to_f16', - 'helper_neon_hadd_s16', - 'helper_neon_hadd_s32', - 'helper_neon_hadd_s8', - 'helper_neon_hadd_u16', - 'helper_neon_hadd_u32', - 'helper_neon_hadd_u8', - 'helper_neon_hsub_s16', - 'helper_neon_hsub_s32', - 'helper_neon_hsub_s8', - 'helper_neon_hsub_u16', - 'helper_neon_hsub_u32', - 'helper_neon_hsub_u8', - 'helper_neon_max_s16', - 'helper_neon_max_s32', - 'helper_neon_max_s8', - 'helper_neon_max_u16', - 'helper_neon_max_u32', - 'helper_neon_max_u8', - 'helper_neon_min_s16', - 'helper_neon_min_s32', - 'helper_neon_min_s8', - 'helper_neon_min_u16', - 'helper_neon_min_u32', - 'helper_neon_min_u8', - 'helper_neon_mull_p8', - 'helper_neon_mull_s16', - 'helper_neon_mull_s8', - 'helper_neon_mull_u16', - 'helper_neon_mull_u8', - 'helper_neon_mul_p8', - 'helper_neon_mul_u16', - 'helper_neon_mul_u8', - 'helper_neon_narrow_high_u16', - 'helper_neon_narrow_high_u8', - 'helper_neon_narrow_round_high_u16', - 'helper_neon_narrow_round_high_u8', - 'helper_neon_narrow_sat_s16', - 'helper_neon_narrow_sat_s32', - 'helper_neon_narrow_sat_s8', - 'helper_neon_narrow_sat_u16', - 'helper_neon_narrow_sat_u32', - 'helper_neon_narrow_sat_u8', - 'helper_neon_narrow_u16', - 'helper_neon_narrow_u8', - 'helper_neon_negl_u16', - 'helper_neon_negl_u32', - 'helper_neon_paddl_u16', - 'helper_neon_paddl_u32', - 'helper_neon_padd_u16', - 'helper_neon_padd_u8', - 'helper_neon_pmax_s16', - 'helper_neon_pmax_s8', - 'helper_neon_pmax_u16', - 'helper_neon_pmax_u8', - 'helper_neon_pmin_s16', - 'helper_neon_pmin_s8', - 'helper_neon_pmin_u16', - 'helper_neon_pmin_u8', - 'helper_neon_pmull_64_hi', - 'helper_neon_pmull_64_lo', - 'helper_neon_qabs_s16', - 'helper_neon_qabs_s32', - 'helper_neon_qabs_s64', - 'helper_neon_qabs_s8', - 'helper_neon_qadd_s16', - 'helper_neon_qadd_s32', - 'helper_neon_qadd_s64', - 'helper_neon_qadd_s8', - 'helper_neon_qadd_u16', - 'helper_neon_qadd_u32', - 'helper_neon_qadd_u64', - 'helper_neon_qadd_u8', - 'helper_neon_qdmulh_s16', - 'helper_neon_qdmulh_s32', - 'helper_neon_qneg_s16', - 'helper_neon_qneg_s32', - 'helper_neon_qneg_s64', - 'helper_neon_qneg_s8', - 'helper_neon_qrdmulh_s16', - 'helper_neon_qrdmulh_s32', - 'helper_neon_qrshl_s16', - 'helper_neon_qrshl_s32', - 'helper_neon_qrshl_s64', - 'helper_neon_qrshl_s8', - 'helper_neon_qrshl_u16', - 'helper_neon_qrshl_u32', - 'helper_neon_qrshl_u64', - 'helper_neon_qrshl_u8', - 'helper_neon_qshl_s16', - 'helper_neon_qshl_s32', - 'helper_neon_qshl_s64', - 'helper_neon_qshl_s8', - 'helper_neon_qshl_u16', - 'helper_neon_qshl_u32', - 'helper_neon_qshl_u64', - 'helper_neon_qshl_u8', - 'helper_neon_qshlu_s16', - 'helper_neon_qshlu_s32', - 'helper_neon_qshlu_s64', - 'helper_neon_qshlu_s8', - 'helper_neon_qsub_s16', - 'helper_neon_qsub_s32', - 'helper_neon_qsub_s64', - 'helper_neon_qsub_s8', - 'helper_neon_qsub_u16', - 'helper_neon_qsub_u32', - 'helper_neon_qsub_u64', - 'helper_neon_qsub_u8', - 'helper_neon_qunzip16', - 'helper_neon_qunzip32', - 'helper_neon_qunzip8', - 'helper_neon_qzip16', - 'helper_neon_qzip32', - 'helper_neon_qzip8', - 'helper_neon_rbit_u8', - 'helper_neon_rhadd_s16', - 'helper_neon_rhadd_s32', - 'helper_neon_rhadd_s8', - 'helper_neon_rhadd_u16', - 'helper_neon_rhadd_u32', - 'helper_neon_rhadd_u8', - 'helper_neon_rshl_s16', - 'helper_neon_rshl_s32', - 'helper_neon_rshl_s64', - 'helper_neon_rshl_s8', - 'helper_neon_rshl_u16', - 'helper_neon_rshl_u32', - 'helper_neon_rshl_u64', - 'helper_neon_rshl_u8', - 'helper_neon_shl_s16', - 'helper_neon_shl_s32', - 'helper_neon_shl_s64', - 'helper_neon_shl_s8', - 'helper_neon_shl_u16', - 'helper_neon_shl_u32', - 'helper_neon_shl_u64', - 'helper_neon_shl_u8', - 'helper_neon_sqadd_u16', - 'helper_neon_sqadd_u32', - 'helper_neon_sqadd_u64', - 'helper_neon_sqadd_u8', - 'helper_neon_subl_u16', - 'helper_neon_subl_u32', - 'helper_neon_sub_u16', - 'helper_neon_sub_u8', - 'helper_neon_tbl', - 'helper_neon_tst_u16', - 'helper_neon_tst_u32', - 'helper_neon_tst_u8', - 'helper_neon_unarrow_sat16', - 'helper_neon_unarrow_sat32', - 'helper_neon_unarrow_sat8', - 'helper_neon_unzip16', - 'helper_neon_unzip8', - 'helper_neon_uqadd_s16', - 'helper_neon_uqadd_s32', - 'helper_neon_uqadd_s64', - 'helper_neon_uqadd_s8', - 'helper_neon_widen_s16', - 'helper_neon_widen_s8', - 'helper_neon_widen_u16', - 'helper_neon_widen_u8', - 'helper_neon_zip16', - 'helper_neon_zip8', - 'helper_pre_hvc', - 'helper_pre_smc', - 'helper_qadd16', - 'helper_qadd8', - 'helper_qaddsubx', - 'helper_qsub16', - 'helper_qsub8', - 'helper_qsubaddx', - 'helper_rbit', - 'helper_recpe_f32', - 'helper_recpe_f64', - 'helper_recpe_u32', - 'helper_recps_f32', - 'helper_ret_ldb_cmmu', - 'helper_ret_ldsb_mmu', - 'helper_ret_ldub_mmu', - 'helper_ret_stb_mmu', - 'helper_rintd', - 'helper_rintd_exact', - 'helper_rints', - 'helper_rints_exact', - 'helper_ror_cc', - 'helper_rsqrte_f32', - 'helper_rsqrte_f64', - 'helper_rsqrte_u32', - 'helper_rsqrts_f32', - 'helper_sadd16', - 'helper_sadd8', - 'helper_saddsubx', - 'helper_sar_cc', - 'helper_sdiv', - 'helper_sel_flags', - 'helper_set_cp_reg', - 'helper_set_cp_reg64', - 'helper_set_neon_rmode', - 'helper_set_r13_banked', - 'helper_set_rmode', - 'helper_set_user_reg', - 'helper_shadd16', - 'helper_shadd8', - 'helper_shaddsubx', - 'helper_shl_cc', - 'helper_shr_cc', - 'helper_shsub16', - 'helper_shsub8', - 'helper_shsubaddx', - 'helper_ssat', - 'helper_ssat16', - 'helper_ssub16', - 'helper_ssub8', - 'helper_ssubaddx', - 'helper_stb_mmu', - 'helper_stl_mmu', - 'helper_stq_mmu', - 'helper_stw_mmu', - 'helper_sub_saturate', - 'helper_sub_usaturate', - 'helper_sxtb16', - 'helper_uadd16', - 'helper_uadd8', - 'helper_uaddsubx', - 'helper_udiv', - 'helper_uhadd16', - 'helper_uhadd8', - 'helper_uhaddsubx', - 'helper_uhsub16', - 'helper_uhsub8', - 'helper_uhsubaddx', - 'helper_uqadd16', - 'helper_uqadd8', - 'helper_uqaddsubx', - 'helper_uqsub16', - 'helper_uqsub8', - 'helper_uqsubaddx', - 'helper_usad8', - 'helper_usat', - 'helper_usat16', - 'helper_usub16', - 'helper_usub8', - 'helper_usubaddx', - 'helper_uxtb16', - 'helper_v7m_mrs', - 'helper_v7m_msr', - 'helper_vfp_absd', - 'helper_vfp_abss', - 'helper_vfp_addd', - 'helper_vfp_adds', - 'helper_vfp_cmpd', - 'helper_vfp_cmped', - 'helper_vfp_cmpes', - 'helper_vfp_cmps', - 'helper_vfp_divd', - 'helper_vfp_divs', - 'helper_vfp_fcvtds', - 'helper_vfp_fcvt_f16_to_f32', - 'helper_vfp_fcvt_f16_to_f64', - 'helper_vfp_fcvt_f32_to_f16', - 'helper_vfp_fcvt_f64_to_f16', - 'helper_vfp_fcvtsd', - 'helper_vfp_get_fpscr', - 'helper_vfp_maxd', - 'helper_vfp_maxnumd', - 'helper_vfp_maxnums', - 'helper_vfp_maxs', - 'helper_vfp_mind', - 'helper_vfp_minnumd', - 'helper_vfp_minnums', - 'helper_vfp_mins', - 'helper_vfp_muladdd', - 'helper_vfp_muladds', - 'helper_vfp_muld', - 'helper_vfp_muls', - 'helper_vfp_negd', - 'helper_vfp_negs', - 'helper_vfp_set_fpscr', - 'helper_vfp_shtod', - 'helper_vfp_shtos', - 'helper_vfp_sitod', - 'helper_vfp_sitos', - 'helper_vfp_sltod', - 'helper_vfp_sltos', - 'helper_vfp_sqrtd', - 'helper_vfp_sqrts', - 'helper_vfp_sqtod', - 'helper_vfp_sqtos', - 'helper_vfp_subd', - 'helper_vfp_subs', - 'helper_vfp_toshd', - 'helper_vfp_toshd_round_to_zero', - 'helper_vfp_toshs', - 'helper_vfp_toshs_round_to_zero', - 'helper_vfp_tosid', - 'helper_vfp_tosis', - 'helper_vfp_tosizd', - 'helper_vfp_tosizs', - 'helper_vfp_tosld', - 'helper_vfp_tosld_round_to_zero', - 'helper_vfp_tosls', - 'helper_vfp_tosls_round_to_zero', - 'helper_vfp_tosqd', - 'helper_vfp_tosqs', - 'helper_vfp_touhd', - 'helper_vfp_touhd_round_to_zero', - 'helper_vfp_touhs', - 'helper_vfp_touhs_round_to_zero', - 'helper_vfp_touid', - 'helper_vfp_touis', - 'helper_vfp_touizd', - 'helper_vfp_touizs', - 'helper_vfp_tould', - 'helper_vfp_tould_round_to_zero', - 'helper_vfp_touls', - 'helper_vfp_touls_round_to_zero', - 'helper_vfp_touqd', - 'helper_vfp_touqs', - 'helper_vfp_uhtod', - 'helper_vfp_uhtos', - 'helper_vfp_uitod', - 'helper_vfp_uitos', - 'helper_vfp_ultod', - 'helper_vfp_ultos', - 'helper_vfp_uqtod', - 'helper_vfp_uqtos', - 'helper_wfe', - 'helper_wfi', - 'hex2decimal', - 'hw_breakpoint_update', - 'hw_breakpoint_update_all', - 'hw_watchpoint_update', - 'hw_watchpoint_update_all', - '_init', - 'init_cpreg_list', - 'init_lists', - 'input_type_enum', - 'int128_2_64', - 'int128_add', - 'int128_addto', - 'int128_and', - 'int128_eq', - 'int128_ge', - 'int128_get64', - 'int128_gt', - 'int128_le', - 'int128_lt', - 'int128_make64', - 'int128_max', - 'int128_min', - 'int128_ne', - 'int128_neg', - 'int128_nz', - 'int128_rshift', - 'int128_sub', - 'int128_subfrom', - 'int128_zero', - 'int16_to_float32', - 'int16_to_float64', - 'int32_to_float128', - 'int32_to_float32', - 'int32_to_float64', - 'int32_to_floatx80', - 'int64_to_float128', - 'int64_to_float32', - 'int64_to_float64', - 'int64_to_floatx80', - 'invalidate_and_set_dirty', - 'invalidate_page_bitmap', - 'io_mem_read', - 'io_mem_write', - 'io_readb', - 'io_readl', - 'io_readq', - 'io_readw', - 'iotlb_to_region', - 'io_writeb', - 'io_writel', - 'io_writeq', - 'io_writew', - 'is_a64', - 'is_help_option', - 'isr_read', - 'is_valid_option_list', - 'iwmmxt_load_creg', - 'iwmmxt_load_reg', - 'iwmmxt_store_creg', - 'iwmmxt_store_reg', - '__jit_debug_descriptor', - '__jit_debug_register_code', - 'kvm_to_cpreg_id', - 'last_ram_offset', - 'ldl_be_p', - 'ldl_be_phys', - 'ldl_he_p', - 'ldl_le_p', - 'ldl_le_phys', - 'ldl_phys', - 'ldl_phys_internal', - 'ldq_be_p', - 'ldq_be_phys', - 'ldq_he_p', - 'ldq_le_p', - 'ldq_le_phys', - 'ldq_phys', - 'ldq_phys_internal', - 'ldst_name', - 'ldub_p', - 'ldub_phys', - 'lduw_be_p', - 'lduw_be_phys', - 'lduw_he_p', - 'lduw_le_p', - 'lduw_le_phys', - 'lduw_phys', - 'lduw_phys_internal', - 'le128', - 'linked_bp_matches', - 'listener_add_address_space', - 'load_cpu_offset', - 'load_reg', - 'load_reg_var', - 'log_cpu_state', - 'lpae_cp_reginfo', - 'lt128', - 'machine_class_init', - 'machine_finalize', - 'machine_info', - 'machine_initfn', - 'machine_register_types', - 'machvirt_init', - 'machvirt_machine_init', - 'maj', - 'mapping_conflict', - 'mapping_contiguous', - 'mapping_have_same_region', - 'mapping_merge', - 'mem_add', - 'mem_begin', - 'mem_commit', - 'memory_access_is_direct', - 'memory_access_size', - 'memory_init', - 'memory_listener_match', - 'memory_listener_register', - 'memory_listener_unregister', - 'memory_map_init', - 'memory_mapping_filter', - 'memory_mapping_list_add_mapping_sorted', - 'memory_mapping_list_add_merge_sorted', - 'memory_mapping_list_free', - 'memory_mapping_list_init', - 'memory_region_access_valid', - 'memory_region_add_subregion', - 'memory_region_add_subregion_common', - 'memory_region_add_subregion_overlap', - 'memory_region_big_endian', - 'memory_region_clear_pending', - 'memory_region_del_subregion', - 'memory_region_destructor_alias', - 'memory_region_destructor_none', - 'memory_region_destructor_ram', - 'memory_region_destructor_ram_from_ptr', - 'memory_region_dispatch_read', - 'memory_region_dispatch_read1', - 'memory_region_dispatch_write', - 'memory_region_escape_name', - 'memory_region_finalize', - 'memory_region_find', - 'memory_region_get_addr', - 'memory_region_get_alignment', - 'memory_region_get_container', - 'memory_region_get_fd', - 'memory_region_get_may_overlap', - 'memory_region_get_priority', - 'memory_region_get_ram_addr', - 'memory_region_get_ram_ptr', - 'memory_region_get_size', - 'memory_region_info', - 'memory_region_init', - 'memory_region_init_alias', - 'memory_region_initfn', - 'memory_region_init_io', - 'memory_region_init_ram', - 'memory_region_init_ram_ptr', - 'memory_region_init_reservation', - 'memory_region_is_iommu', - 'memory_region_is_logging', - 'memory_region_is_mapped', - 'memory_region_is_ram', - 'memory_region_is_rom', - 'memory_region_is_romd', - 'memory_region_is_skip_dump', - 'memory_region_is_unassigned', - 'memory_region_name', - 'memory_region_need_escape', - 'memory_region_oldmmio_read_accessor', - 'memory_region_oldmmio_write_accessor', - 'memory_region_present', - 'memory_region_read_accessor', - 'memory_region_readd_subregion', - 'memory_region_ref', - 'memory_region_resolve_container', - 'memory_region_rom_device_set_romd', - 'memory_region_section_get_iotlb', - 'memory_region_set_address', - 'memory_region_set_alias_offset', - 'memory_region_set_enabled', - 'memory_region_set_readonly', - 'memory_region_set_skip_dump', - 'memory_region_size', - 'memory_region_to_address_space', - 'memory_region_transaction_begin', - 'memory_region_transaction_commit', - 'memory_region_unref', - 'memory_region_update_container_subregions', - 'memory_region_write_accessor', - 'memory_region_wrong_endianness', - 'memory_try_enable_merging', - 'module_call_init', - 'module_load', - 'mpidr_cp_reginfo', - 'mpidr_read', - 'msr_mask', - 'mul128By64To192', - 'mul128To256', - 'mul64To128', - 'muldiv64', - 'neon_2rm_is_float_op', - 'neon_2rm_sizes', - 'neon_3r_sizes', - 'neon_get_scalar', - 'neon_load_reg', - 'neon_load_reg64', - 'neon_load_scratch', - 'neon_ls_element_type', - 'neon_reg_offset', - 'neon_store_reg', - 'neon_store_reg64', - 'neon_store_scratch', - 'new_ldst_label', - 'next_list', - 'normalizeFloat128Subnormal', - 'normalizeFloat16Subnormal', - 'normalizeFloat32Subnormal', - 'normalizeFloat64Subnormal', - 'normalizeFloatx80Subnormal', - 'normalizeRoundAndPackFloat128', - 'normalizeRoundAndPackFloat32', - 'normalizeRoundAndPackFloat64', - 'normalizeRoundAndPackFloatx80', - 'not_v6_cp_reginfo', - 'not_v7_cp_reginfo', - 'not_v8_cp_reginfo', - 'object_child_foreach', - 'object_class_foreach', - 'object_class_foreach_tramp', - 'object_class_get_list', - 'object_class_get_list_tramp', - 'object_class_get_parent', - 'object_deinit', - 'object_dynamic_cast', - 'object_finalize', - 'object_finalize_child_property', - 'object_get_child_property', - 'object_get_link_property', - 'object_get_root', - 'object_initialize_with_type', - 'object_init_with_type', - 'object_instance_init', - 'object_new_with_type', - 'object_post_init_with_type', - 'object_property_add_alias', - 'object_property_add_link', - 'object_property_add_uint16_ptr', - 'object_property_add_uint32_ptr', - 'object_property_add_uint64_ptr', - 'object_property_add_uint8_ptr', - 'object_property_allow_set_link', - 'object_property_del', - 'object_property_del_all', - 'object_property_find', - 'object_property_get', - 'object_property_get_bool', - 'object_property_get_int', - 'object_property_get_link', - 'object_property_get_qobject', - 'object_property_get_str', - 'object_property_get_type', - 'object_property_is_child', - 'object_property_set', - 'object_property_set_description', - 'object_property_set_link', - 'object_property_set_qobject', - 'object_release_link_property', - 'object_resolve_abs_path', - 'object_resolve_child_property', - 'object_resolve_link', - 'object_resolve_link_property', - 'object_resolve_partial_path', - 'object_resolve_path', - 'object_resolve_path_component', - 'object_resolve_path_type', - 'object_set_link_property', - 'object_unparent', - 'omap_cachemaint_write', - 'omap_cp_reginfo', - 'omap_threadid_write', - 'omap_ticonfig_write', - 'omap_wfi_write', - 'op_bits', - 'open_modeflags', - 'op_to_mov', - 'op_to_movi', - 'output_type_enum', - 'packFloat128', - 'packFloat16', - 'packFloat32', - 'packFloat64', - 'packFloatx80', - 'page_find', - 'page_find_alloc', - 'page_flush_tb', - 'page_flush_tb_1', - 'page_init', - 'page_size_init', - 'par', - 'parse_array', - 'parse_error', - 'parse_escape', - 'parse_keyword', - 'parse_literal', - 'parse_object', - 'parse_optional', - 'parse_option_bool', - 'parse_option_number', - 'parse_option_size', - 'parse_pair', - 'parser_context_free', - 'parser_context_new', - 'parser_context_peek_token', - 'parser_context_pop_token', - 'parser_context_restore', - 'parser_context_save', - 'parse_str', - 'parse_type_bool', - 'parse_type_int', - 'parse_type_number', - 'parse_type_size', - 'parse_type_str', - 'parse_value', - 'par_write', - 'patch_reloc', - 'phys_map_node_alloc', - 'phys_map_node_reserve', - 'phys_mem_alloc', - 'phys_mem_set_alloc', - 'phys_page_compact', - 'phys_page_compact_all', - 'phys_page_find', - 'phys_page_set', - 'phys_page_set_level', - 'phys_section_add', - 'phys_section_destroy', - 'phys_sections_free', - 'pickNaN', - 'pickNaNMulAdd', - 'pmccfiltr_write', - 'pmccntr_read', - 'pmccntr_sync', - 'pmccntr_write', - 'pmccntr_write32', - 'pmcntenclr_write', - 'pmcntenset_write', - 'pmcr_write', - 'pmintenclr_write', - 'pmintenset_write', - 'pmovsr_write', - 'pmreg_access', - 'pmsav5_cp_reginfo', - 'pmsav5_data_ap_read', - 'pmsav5_data_ap_write', - 'pmsav5_insn_ap_read', - 'pmsav5_insn_ap_write', - 'pmuserenr_write', - 'pmxevtyper_write', - 'print_type_bool', - 'print_type_int', - 'print_type_number', - 'print_type_size', - 'print_type_str', - 'propagateFloat128NaN', - 'propagateFloat32MulAddNaN', - 'propagateFloat32NaN', - 'propagateFloat64MulAddNaN', - 'propagateFloat64NaN', - 'propagateFloatx80NaN', - 'property_get_alias', - 'property_get_bool', - 'property_get_str', - 'property_get_uint16_ptr', - 'property_get_uint32_ptr', - 'property_get_uint64_ptr', - 'property_get_uint8_ptr', - 'property_release_alias', - 'property_release_bool', - 'property_release_str', - 'property_resolve_alias', - 'property_set_alias', - 'property_set_bool', - 'property_set_str', - 'pstate_read', - 'pstate_write', - 'pxa250_initfn', - 'pxa255_initfn', - 'pxa260_initfn', - 'pxa261_initfn', - 'pxa262_initfn', - 'pxa270a0_initfn', - 'pxa270a1_initfn', - 'pxa270b0_initfn', - 'pxa270b1_initfn', - 'pxa270c0_initfn', - 'pxa270c5_initfn', - 'qapi_dealloc_end_implicit_struct', - 'qapi_dealloc_end_list', - 'qapi_dealloc_end_struct', - 'qapi_dealloc_get_visitor', - 'qapi_dealloc_next_list', - 'qapi_dealloc_pop', - 'qapi_dealloc_push', - 'qapi_dealloc_start_implicit_struct', - 'qapi_dealloc_start_list', - 'qapi_dealloc_start_struct', - 'qapi_dealloc_start_union', - 'qapi_dealloc_type_bool', - 'qapi_dealloc_type_enum', - 'qapi_dealloc_type_int', - 'qapi_dealloc_type_number', - 'qapi_dealloc_type_size', - 'qapi_dealloc_type_str', - 'qapi_dealloc_visitor_cleanup', - 'qapi_dealloc_visitor_new', - 'qapi_free_boolList', - 'qapi_free_ErrorClassList', - 'qapi_free_int16List', - 'qapi_free_int32List', - 'qapi_free_int64List', - 'qapi_free_int8List', - 'qapi_free_intList', - 'qapi_free_numberList', - 'qapi_free_strList', - 'qapi_free_uint16List', - 'qapi_free_uint32List', - 'qapi_free_uint64List', - 'qapi_free_uint8List', - 'qapi_free_X86CPUFeatureWordInfo', - 'qapi_free_X86CPUFeatureWordInfoList', - 'qapi_free_X86CPURegister32List', - 'qbool_destroy_obj', - 'qbool_from_int', - 'qbool_get_int', - 'qbool_type', - 'qbus_create', - 'qbus_create_inplace', - 'qbus_finalize', - 'qbus_initfn', - 'qbus_realize', - 'qdev_create', - 'qdev_get_type', - 'qdev_register_types', - 'qdev_set_parent_bus', - 'qdev_try_create', - 'qdict_add_key', - 'qdict_array_split', - 'qdict_clone_shallow', - 'qdict_del', - 'qdict_destroy_obj', - 'qdict_entry_key', - 'qdict_entry_value', - 'qdict_extract_subqdict', - 'qdict_find', - 'qdict_first', - 'qdict_flatten', - 'qdict_flatten_qdict', - 'qdict_flatten_qlist', - 'qdict_get', - 'qdict_get_bool', - 'qdict_get_double', - 'qdict_get_int', - 'qdict_get_obj', - 'qdict_get_qdict', - 'qdict_get_qlist', - 'qdict_get_str', - 'qdict_get_try_bool', - 'qdict_get_try_int', - 'qdict_get_try_str', - 'qdict_haskey', - 'qdict_has_prefixed_entries', - 'qdict_iter', - 'qdict_join', - 'qdict_new', - 'qdict_next', - 'qdict_next_entry', - 'qdict_put_obj', - 'qdict_size', - 'qdict_type', - 'qemu_clock_get_us', - 'qemu_clock_ptr', - 'qemu_clocks', - 'qemu_get_cpu', - 'qemu_get_guest_memory_mapping', - 'qemu_get_guest_simple_memory_mapping', - 'qemu_get_ram_block', - 'qemu_get_ram_block_host_ptr', - 'qemu_get_ram_fd', - 'qemu_get_ram_ptr', - 'qemu_host_page_mask', - 'qemu_host_page_size', - 'qemu_init_vcpu', - 'qemu_ld_helpers', - 'qemu_log_close', - 'qemu_log_enabled', - 'qemu_log_flush', - 'qemu_loglevel_mask', - 'qemu_log_vprintf', - 'qemu_oom_check', - 'qemu_parse_fd', - 'qemu_ram_addr_from_host', - 'qemu_ram_addr_from_host_nofail', - 'qemu_ram_alloc', - 'qemu_ram_alloc_from_ptr', - 'qemu_ram_foreach_block', - 'qemu_ram_free', - 'qemu_ram_free_from_ptr', - 'qemu_ram_ptr_length', - 'qemu_ram_remap', - 'qemu_ram_setup_dump', - 'qemu_ram_unset_idstr', - 'qemu_real_host_page_size', - 'qemu_st_helpers', - 'qemu_tcg_init_vcpu', - 'qemu_try_memalign', - 'qentry_destroy', - 'qerror_human', - 'qerror_report', - 'qerror_report_err', - 'qfloat_destroy_obj', - 'qfloat_from_double', - 'qfloat_get_double', - 'qfloat_type', - 'qint_destroy_obj', - 'qint_from_int', - 'qint_get_int', - 'qint_type', - 'qlist_append_obj', - 'qlist_copy', - 'qlist_copy_elem', - 'qlist_destroy_obj', - 'qlist_empty', - 'qlist_entry_obj', - 'qlist_first', - 'qlist_iter', - 'qlist_new', - 'qlist_next', - 'qlist_peek', - 'qlist_pop', - 'qlist_size', - 'qlist_size_iter', - 'qlist_type', - 'qmp_input_end_implicit_struct', - 'qmp_input_end_list', - 'qmp_input_end_struct', - 'qmp_input_get_next_type', - 'qmp_input_get_object', - 'qmp_input_get_visitor', - 'qmp_input_next_list', - 'qmp_input_optional', - 'qmp_input_pop', - 'qmp_input_push', - 'qmp_input_start_implicit_struct', - 'qmp_input_start_list', - 'qmp_input_start_struct', - 'qmp_input_type_bool', - 'qmp_input_type_int', - 'qmp_input_type_number', - 'qmp_input_type_str', - 'qmp_input_visitor_cleanup', - 'qmp_input_visitor_new', - 'qmp_input_visitor_new_strict', - 'qmp_output_add_obj', - 'qmp_output_end_list', - 'qmp_output_end_struct', - 'qmp_output_first', - 'qmp_output_get_qobject', - 'qmp_output_get_visitor', - 'qmp_output_last', - 'qmp_output_next_list', - 'qmp_output_pop', - 'qmp_output_push_obj', - 'qmp_output_start_list', - 'qmp_output_start_struct', - 'qmp_output_type_bool', - 'qmp_output_type_int', - 'qmp_output_type_number', - 'qmp_output_type_str', - 'qmp_output_visitor_cleanup', - 'qmp_output_visitor_new', - 'qobject_decref', - 'qobject_to_qbool', - 'qobject_to_qdict', - 'qobject_to_qfloat', - 'qobject_to_qint', - 'qobject_to_qlist', - 'qobject_to_qstring', - 'qobject_type', - 'qstring_append', - 'qstring_append_chr', - 'qstring_append_int', - 'qstring_destroy_obj', - 'qstring_from_escaped_str', - 'qstring_from_str', - 'qstring_from_substr', - 'qstring_get_length', - 'qstring_get_str', - 'qstring_new', - 'qstring_type', - 'ram_block_add', - 'ram_size', - 'range_compare', - 'range_covers_byte', - 'range_get_last', - 'range_merge', - 'ranges_can_merge', - 'raw_read', - 'raw_write', - 'rcon', - 'read_raw_cp_reg', - 'recip_estimate', - 'recip_sqrt_estimate', - 'register_cp_regs_for_features', - 'register_multipage', - 'register_subpage', - 'register_tm_clones', - 'register_types_object', - 'regnames', - 'render_memory_region', - 'reset_all_temps', - 'reset_temp', - 'rol32', - 'rol64', - 'ror32', - 'ror64', - 'roundAndPackFloat128', - 'roundAndPackFloat16', - 'roundAndPackFloat32', - 'roundAndPackFloat64', - 'roundAndPackFloatx80', - 'roundAndPackInt32', - 'roundAndPackInt64', - 'roundAndPackUint64', - 'round_to_inf', - 'run_on_cpu', - 's0', - 'S0', - 's1', - 'S1', - 'sa1100_initfn', - 'sa1110_initfn', - 'save_globals', - 'scr_write', - 'sctlr_write', - 'set_bit', - 'set_bits', - 'set_default_nan_mode', - 'set_feature', - 'set_float_detect_tininess', - 'set_float_exception_flags', - 'set_float_rounding_mode', - 'set_flush_inputs_to_zero', - 'set_flush_to_zero', - 'set_swi_errno', - 'sextract32', - 'sextract64', - 'shift128ExtraRightJamming', - 'shift128Right', - 'shift128RightJamming', - 'shift32RightJamming', - 'shift64ExtraRightJamming', - 'shift64RightJamming', - 'shifter_out_im', - 'shortShift128Left', - 'shortShift192Left', - 'simple_mpu_ap_bits', - 'size_code_gen_buffer', - 'softmmu_lock_user', - 'softmmu_lock_user_string', - 'softmmu_tget32', - 'softmmu_tget8', - 'softmmu_tput32', - 'softmmu_unlock_user', - 'sort_constraints', - 'sp_el0_access', - 'spsel_read', - 'spsel_write', - 'start_list', - 'stb_p', - 'stb_phys', - 'stl_be_p', - 'stl_be_phys', - 'stl_he_p', - 'stl_le_p', - 'stl_le_phys', - 'stl_phys', - 'stl_phys_internal', - 'stl_phys_notdirty', - 'store_cpu_offset', - 'store_reg', - 'store_reg_bx', - 'store_reg_from_load', - 'stq_be_p', - 'stq_be_phys', - 'stq_he_p', - 'stq_le_p', - 'stq_le_phys', - 'stq_phys', - 'string_input_get_visitor', - 'string_input_visitor_cleanup', - 'string_input_visitor_new', - 'strongarm_cp_reginfo', - 'strstart', - 'strtosz', - 'strtosz_suffix', - 'stw_be_p', - 'stw_be_phys', - 'stw_he_p', - 'stw_le_p', - 'stw_le_phys', - 'stw_phys', - 'stw_phys_internal', - 'sub128', - 'sub16_sat', - 'sub16_usat', - 'sub192', - 'sub8_sat', - 'sub8_usat', - 'subFloat128Sigs', - 'subFloat32Sigs', - 'subFloat64Sigs', - 'subFloatx80Sigs', - 'subpage_accepts', - 'subpage_init', - 'subpage_ops', - 'subpage_read', - 'subpage_register', - 'subpage_write', - 'suffix_mul', - 'swap_commutative', - 'swap_commutative2', - 'switch_mode', - 'switch_v7m_sp', - 'syn_aa32_bkpt', - 'syn_aa32_hvc', - 'syn_aa32_smc', - 'syn_aa32_svc', - 'syn_breakpoint', - 'sync_globals', - 'syn_cp14_rrt_trap', - 'syn_cp14_rt_trap', - 'syn_cp15_rrt_trap', - 'syn_cp15_rt_trap', - 'syn_data_abort', - 'syn_fp_access_trap', - 'syn_insn_abort', - 'syn_swstep', - 'syn_uncategorized', - 'syn_watchpoint', - 'syscall_err', - 'system_bus_class_init', - 'system_bus_info', - 't2ee_cp_reginfo', - 'table_logic_cc', - 'target_parse_constraint', - 'target_words_bigendian', - 'tb_add_jump', - 'tb_alloc', - 'tb_alloc_page', - 'tb_check_watchpoint', - 'tb_find_fast', - 'tb_find_pc', - 'tb_find_slow', - 'tb_flush', - 'tb_flush_jmp_cache', - 'tb_free', - 'tb_gen_code', - 'tb_hash_remove', - 'tb_invalidate_phys_addr', - 'tb_invalidate_phys_page_range', - 'tb_invalidate_phys_range', - 'tb_jmp_cache_hash_func', - 'tb_jmp_cache_hash_page', - 'tb_jmp_remove', - 'tb_link_page', - 'tb_page_remove', - 'tb_phys_hash_func', - 'tb_phys_invalidate', - 'tb_reset_jump', - 'tb_set_jmp_target', - 'tcg_accel_class_init', - 'tcg_accel_type', - 'tcg_add_param_i32', - 'tcg_add_param_i64', - 'tcg_add_target_add_op_defs', - 'tcg_allowed', - 'tcg_canonicalize_memop', - 'tcg_commit', - 'tcg_cond_to_jcc', - 'tcg_constant_folding', - 'tcg_const_i32', - 'tcg_const_i64', - 'tcg_const_local_i32', - 'tcg_const_local_i64', - 'tcg_context_init', - 'tcg_cpu_address_space_init', - 'tcg_cpu_exec', - 'tcg_current_code_size', - 'tcg_dump_info', - 'tcg_dump_ops', - 'tcg_exec_all', - 'tcg_find_helper', - 'tcg_func_start', - 'tcg_gen_abs_i32', - 'tcg_gen_add2_i32', - 'tcg_gen_add_i32', - 'tcg_gen_add_i64', - 'tcg_gen_addi_i32', - 'tcg_gen_addi_i64', - 'tcg_gen_andc_i32', - 'tcg_gen_and_i32', - 'tcg_gen_and_i64', - 'tcg_gen_andi_i32', - 'tcg_gen_andi_i64', - 'tcg_gen_br', - 'tcg_gen_brcond_i32', - 'tcg_gen_brcond_i64', - 'tcg_gen_brcondi_i32', - 'tcg_gen_bswap16_i32', - 'tcg_gen_bswap32_i32', - 'tcg_gen_callN', - 'tcg_gen_code', - 'tcg_gen_code_common', - 'tcg_gen_code_search_pc', - 'tcg_gen_concat_i32_i64', - 'tcg_gen_debug_insn_start', - 'tcg_gen_deposit_i32', - 'tcg_gen_exit_tb', - 'tcg_gen_ext16s_i32', - 'tcg_gen_ext16u_i32', - 'tcg_gen_ext32s_i64', - 'tcg_gen_ext32u_i64', - 'tcg_gen_ext8s_i32', - 'tcg_gen_ext8u_i32', - 'tcg_gen_ext_i32_i64', - 'tcg_gen_extu_i32_i64', - 'tcg_gen_goto_tb', - 'tcg_gen_ld_i32', - 'tcg_gen_ld_i64', - 'tcg_gen_ldst_op_i32', - 'tcg_gen_ldst_op_i64', - 'tcg_gen_movcond_i32', - 'tcg_gen_movcond_i64', - 'tcg_gen_mov_i32', - 'tcg_gen_mov_i64', - 'tcg_gen_movi_i32', - 'tcg_gen_movi_i64', - 'tcg_gen_mul_i32', - 'tcg_gen_muls2_i32', - 'tcg_gen_mulu2_i32', - 'tcg_gen_neg_i32', - 'tcg_gen_neg_i64', - 'tcg_gen_not_i32', - 'tcg_gen_op0', - 'tcg_gen_op1i', - 'tcg_gen_op2_i32', - 'tcg_gen_op2_i64', - 'tcg_gen_op2i_i32', - 'tcg_gen_op2i_i64', - 'tcg_gen_op3_i32', - 'tcg_gen_op3_i64', - 'tcg_gen_op4_i32', - 'tcg_gen_op4i_i32', - 'tcg_gen_op4ii_i32', - 'tcg_gen_op4ii_i64', - 'tcg_gen_op5ii_i32', - 'tcg_gen_op6_i32', - 'tcg_gen_op6i_i32', - 'tcg_gen_op6i_i64', - 'tcg_gen_orc_i32', - 'tcg_gen_or_i32', - 'tcg_gen_or_i64', - 'tcg_gen_ori_i32', - 'tcg_gen_qemu_ld_i32', - 'tcg_gen_qemu_ld_i64', - 'tcg_gen_qemu_st_i32', - 'tcg_gen_qemu_st_i64', - 'tcg_gen_rotl_i32', - 'tcg_gen_rotli_i32', - 'tcg_gen_rotr_i32', - 'tcg_gen_rotri_i32', - 'tcg_gen_sar_i32', - 'tcg_gen_sari_i32', - 'tcg_gen_setcond_i32', - 'tcg_gen_shl_i32', - 'tcg_gen_shl_i64', - 'tcg_gen_shli_i32', - 'tcg_gen_shli_i64', - 'tcg_gen_shr_i32', - 'tcg_gen_shifti_i64', - 'tcg_gen_shr_i64', - 'tcg_gen_shri_i32', - 'tcg_gen_shri_i64', - 'tcg_gen_st_i32', - 'tcg_gen_st_i64', - 'tcg_gen_sub_i32', - 'tcg_gen_sub_i64', - 'tcg_gen_subi_i32', - 'tcg_gen_trunc_i64_i32', - 'tcg_gen_trunc_shr_i64_i32', - 'tcg_gen_xor_i32', - 'tcg_gen_xor_i64', - 'tcg_gen_xori_i32', - 'tcg_get_arg_str_i32', - 'tcg_get_arg_str_i64', - 'tcg_get_arg_str_idx', - 'tcg_global_mem_new_i32', - 'tcg_global_mem_new_i64', - 'tcg_global_mem_new_internal', - 'tcg_global_reg_new_i32', - 'tcg_global_reg_new_i64', - 'tcg_global_reg_new_internal', - 'tcg_handle_interrupt', - 'tcg_init', - 'tcg_invert_cond', - 'tcg_la_bb_end', - 'tcg_la_br_end', - 'tcg_la_func_end', - 'tcg_liveness_analysis', - 'tcg_malloc', - 'tcg_malloc_internal', - 'tcg_op_defs_org', - 'tcg_opt_gen_mov', - 'tcg_opt_gen_movi', - 'tcg_optimize', - 'tcg_out16', - 'tcg_out32', - 'tcg_out64', - 'tcg_out8', - 'tcg_out_addi', - 'tcg_out_branch', - 'tcg_out_brcond32', - 'tcg_out_brcond64', - 'tcg_out_bswap32', - 'tcg_out_bswap64', - 'tcg_out_call', - 'tcg_out_cmp', - 'tcg_out_ext16s', - 'tcg_out_ext16u', - 'tcg_out_ext32s', - 'tcg_out_ext32u', - 'tcg_out_ext8s', - 'tcg_out_ext8u', - 'tcg_out_jmp', - 'tcg_out_jxx', - 'tcg_out_label', - 'tcg_out_ld', - 'tcg_out_modrm', - 'tcg_out_modrm_offset', - 'tcg_out_modrm_sib_offset', - 'tcg_out_mov', - 'tcg_out_movcond32', - 'tcg_out_movcond64', - 'tcg_out_movi', - 'tcg_out_op', - 'tcg_out_pop', - 'tcg_out_push', - 'tcg_out_qemu_ld', - 'tcg_out_qemu_ld_direct', - 'tcg_out_qemu_ld_slow_path', - 'tcg_out_qemu_st', - 'tcg_out_qemu_st_direct', - 'tcg_out_qemu_st_slow_path', - 'tcg_out_reloc', - 'tcg_out_rolw_8', - 'tcg_out_setcond32', - 'tcg_out_setcond64', - 'tcg_out_shifti', - 'tcg_out_st', - 'tcg_out_tb_finalize', - 'tcg_out_tb_init', - 'tcg_out_tlb_load', - 'tcg_out_vex_modrm', - 'tcg_patch32', - 'tcg_patch8', - 'tcg_pcrel_diff', - 'tcg_pool_reset', - 'tcg_prologue_init', - 'tcg_ptr_byte_diff', - 'tcg_reg_alloc', - 'tcg_reg_alloc_bb_end', - 'tcg_reg_alloc_call', - 'tcg_reg_alloc_mov', - 'tcg_reg_alloc_movi', - 'tcg_reg_alloc_op', - 'tcg_reg_alloc_start', - 'tcg_reg_free', - 'tcg_reg_sync', - 'tcg_set_frame', - 'tcg_set_nop', - 'tcg_swap_cond', - 'tcg_target_callee_save_regs', - 'tcg_target_call_iarg_regs', - 'tcg_target_call_oarg_regs', - 'tcg_target_const_match', - 'tcg_target_init', - 'tcg_target_qemu_prologue', - 'tcg_target_reg_alloc_order', - 'tcg_temp_alloc', - 'tcg_temp_free_i32', - 'tcg_temp_free_i64', - 'tcg_temp_free_internal', - 'tcg_temp_local_new_i32', - 'tcg_temp_local_new_i64', - 'tcg_temp_new_i32', - 'tcg_temp_new_i64', - 'tcg_temp_new_internal', - 'tcg_temp_new_internal_i32', - 'tcg_temp_new_internal_i64', - 'tdb_hash', - 'teecr_write', - 'teehbr_access', - 'temp_allocate_frame', - 'temp_dead', - 'temps_are_copies', - 'temp_save', - 'temp_sync', - 'tgen_arithi', - 'tgen_arithr', - 'thumb2_logic_op', - 'ti925t_initfn', - 'tlb_add_large_page', - 'tlb_flush_entry', - 'tlbi_aa64_asid_is_write', - 'tlbi_aa64_asid_write', - 'tlbi_aa64_vaa_is_write', - 'tlbi_aa64_vaa_write', - 'tlbi_aa64_va_is_write', - 'tlbi_aa64_va_write', - 'tlbiall_is_write', - 'tlbiall_write', - 'tlbiasid_is_write', - 'tlbiasid_write', - 'tlbimvaa_is_write', - 'tlbimvaa_write', - 'tlbimva_is_write', - 'tlbimva_write', - 'tlb_is_dirty_ram', - 'tlb_protect_code', - 'tlb_reset_dirty_range', - 'tlb_reset_dirty_range_all', - 'tlb_set_dirty', - 'tlb_set_dirty1', - 'tlb_unprotect_code_phys', - 'tlb_vaddr_to_host', - 'token_get_type', - 'token_get_value', - 'token_is_escape', - 'token_is_keyword', - 'token_is_operator', - 'tokens_append_from_iter', - 'to_qiv', - 'to_qov', - 'tosa_init', - 'tosa_machine_init', - 'tswap32', - 'tswap64', - 'type_class_get_size', - 'type_get_by_name', - 'type_get_parent', - 'type_has_parent', - 'type_initialize', - 'type_initialize_interface', - 'type_is_ancestor', - 'type_new', - 'type_object_get_size', - 'type_register_internal', - 'type_table_add', - 'type_table_get', - 'type_table_lookup', - 'uint16_to_float32', - 'uint16_to_float64', - 'uint32_to_float32', - 'uint32_to_float64', - 'uint64_to_float128', - 'uint64_to_float32', - 'uint64_to_float64', - 'unassigned_io_ops', - 'unassigned_io_read', - 'unassigned_io_write', - 'unassigned_mem_accepts', - 'unassigned_mem_ops', - 'unassigned_mem_read', - 'unassigned_mem_write', - 'update_spsel', - 'v6_cp_reginfo', - 'v6k_cp_reginfo', - 'v7_cp_reginfo', - 'v7mp_cp_reginfo', - 'v7m_pop', - 'v7m_push', - 'v8_cp_reginfo', - 'v8_el2_cp_reginfo', - 'v8_el3_cp_reginfo', - 'v8_el3_no_el2_cp_reginfo', - 'vapa_cp_reginfo', - 'vbar_write', - 'vfp_exceptbits_from_host', - 'vfp_exceptbits_to_host', - 'vfp_get_fpcr', - 'vfp_get_fpscr', - 'vfp_get_fpsr', - 'vfp_reg_offset', - 'vfp_set_fpcr', - 'vfp_set_fpscr', - 'vfp_set_fpsr', - 'visit_end_implicit_struct', - 'visit_end_list', - 'visit_end_struct', - 'visit_end_union', - 'visit_get_next_type', - 'visit_next_list', - 'visit_optional', - 'visit_start_implicit_struct', - 'visit_start_list', - 'visit_start_struct', - 'visit_start_union', - 'vmsa_cp_reginfo', - 'vmsa_tcr_el1_write', - 'vmsa_ttbcr_raw_write', - 'vmsa_ttbcr_reset', - 'vmsa_ttbcr_write', - 'vmsa_ttbr_write', - 'write_cpustate_to_list', - 'write_list_to_cpustate', - 'write_raw_cp_reg', - 'X86CPURegister32_lookup', - 'x86_op_defs', - 'xpsr_read', - 'xpsr_write', - 'xscale_cpar_write', - 'xscale_cp_reginfo' -) - -arm_symbols = ( - 'ARM_REGS_STORAGE_SIZE', -) - -aarch64_symbols = ( - 'ARM64_REGS_STORAGE_SIZE', - 'arm64_release', - 'arm64_reg_reset', - 'arm64_reg_read', - 'arm64_reg_write', - 'gen_a64_set_pc_im', - 'aarch64_cpu_register_types', - 'helper_udiv64', - 'helper_sdiv64', - 'helper_cls64', - 'helper_cls32', - 'helper_rbit64', - 'helper_vfp_cmps_a64', - 'helper_vfp_cmpes_a64', - 'helper_vfp_cmpd_a64', - 'helper_vfp_cmped_a64', - 'helper_vfp_mulxs', - 'helper_vfp_mulxd', - 'helper_simd_tbl', - 'helper_neon_ceq_f64', - 'helper_neon_cge_f64', - 'helper_neon_cgt_f64', - 'helper_recpsf_f32', - 'helper_recpsf_f64', - 'helper_rsqrtsf_f32', - 'helper_rsqrtsf_f64', - 'helper_neon_addlp_s8', - 'helper_neon_addlp_u8', - 'helper_neon_addlp_s16', - 'helper_neon_addlp_u16', - 'helper_frecpx_f32', - 'helper_frecpx_f64', - 'helper_fcvtx_f64_to_f32', - 'helper_crc32_64', - 'helper_crc32c_64', - 'aarch64_cpu_do_interrupt', - -) - -mips_symbols = ( - 'cpu_mips_exec', - 'cpu_mips_get_random', - 'cpu_mips_get_count', - 'cpu_mips_store_count', - 'cpu_mips_store_compare', - 'cpu_mips_start_count', - 'cpu_mips_stop_count', - 'mips_machine_init', - 'cpu_mips_kseg0_to_phys', - 'cpu_mips_phys_to_kseg0', - 'cpu_mips_kvm_um_phys_to_kseg0', - 'mips_cpu_register_types', - 'cpu_mips_init', - 'cpu_state_reset', - 'helper_msa_andi_b', - 'helper_msa_ori_b', - 'helper_msa_nori_b', - 'helper_msa_xori_b', - 'helper_msa_bmnzi_b', - 'helper_msa_bmzi_b', - 'helper_msa_bseli_b', - 'helper_msa_shf_df', - 'helper_msa_and_v', - 'helper_msa_or_v', - 'helper_msa_nor_v', - 'helper_msa_xor_v', - 'helper_msa_bmnz_v', - 'helper_msa_bmz_v', - 'helper_msa_bsel_v', - 'helper_msa_addvi_df', - 'helper_msa_subvi_df', - 'helper_msa_ceqi_df', - 'helper_msa_clei_s_df', - 'helper_msa_clei_u_df', - 'helper_msa_clti_s_df', - 'helper_msa_clti_u_df', - 'helper_msa_maxi_s_df', - 'helper_msa_maxi_u_df', - 'helper_msa_mini_s_df', - 'helper_msa_mini_u_df', - 'helper_msa_ldi_df', - 'helper_msa_slli_df', - 'helper_msa_srai_df', - 'helper_msa_srli_df', - 'helper_msa_bclri_df', - 'helper_msa_bseti_df', - 'helper_msa_bnegi_df', - 'helper_msa_sat_s_df', - 'helper_msa_sat_u_df', - 'helper_msa_srari_df', - 'helper_msa_srlri_df', - 'helper_msa_binsli_df', - 'helper_msa_binsri_df', - 'helper_msa_sll_df', - 'helper_msa_sra_df', - 'helper_msa_srl_df', - 'helper_msa_bclr_df', - 'helper_msa_bset_df', - 'helper_msa_bneg_df', - 'helper_msa_addv_df', - 'helper_msa_subv_df', - 'helper_msa_max_s_df', - 'helper_msa_max_u_df', - 'helper_msa_min_s_df', - 'helper_msa_min_u_df', - 'helper_msa_max_a_df', - 'helper_msa_min_a_df', - 'helper_msa_ceq_df', - 'helper_msa_clt_s_df', - 'helper_msa_clt_u_df', - 'helper_msa_cle_s_df', - 'helper_msa_cle_u_df', - 'helper_msa_add_a_df', - 'helper_msa_adds_a_df', - 'helper_msa_adds_s_df', - 'helper_msa_adds_u_df', - 'helper_msa_ave_s_df', - 'helper_msa_ave_u_df', - 'helper_msa_aver_s_df', - 'helper_msa_aver_u_df', - 'helper_msa_subs_s_df', - 'helper_msa_subs_u_df', - 'helper_msa_subsus_u_df', - 'helper_msa_subsuu_s_df', - 'helper_msa_asub_s_df', - 'helper_msa_asub_u_df', - 'helper_msa_mulv_df', - 'helper_msa_div_s_df', - 'helper_msa_div_u_df', - 'helper_msa_mod_s_df', - 'helper_msa_mod_u_df', - 'helper_msa_dotp_s_df', - 'helper_msa_dotp_u_df', - 'helper_msa_srar_df', - 'helper_msa_srlr_df', - 'helper_msa_hadd_s_df', - 'helper_msa_hadd_u_df', - 'helper_msa_hsub_s_df', - 'helper_msa_hsub_u_df', - 'helper_msa_mul_q_df', - 'helper_msa_mulr_q_df', - 'helper_msa_sld_df', - 'helper_msa_maddv_df', - 'helper_msa_msubv_df', - 'helper_msa_dpadd_s_df', - 'helper_msa_dpadd_u_df', - 'helper_msa_dpsub_s_df', - 'helper_msa_dpsub_u_df', - 'helper_msa_binsl_df', - 'helper_msa_binsr_df', - 'helper_msa_madd_q_df', - 'helper_msa_msub_q_df', - 'helper_msa_maddr_q_df', - 'helper_msa_msubr_q_df', - 'helper_msa_splat_df', - 'helper_msa_pckev_df', - 'helper_msa_pckod_df', - 'helper_msa_ilvl_df', - 'helper_msa_ilvr_df', - 'helper_msa_ilvev_df', - 'helper_msa_ilvod_df', - 'helper_msa_vshf_df', - 'helper_msa_sldi_df', - 'helper_msa_splati_df', - 'helper_msa_copy_s_df', - 'helper_msa_copy_u_df', - 'helper_msa_insert_df', - 'helper_msa_insve_df', - 'helper_msa_ctcmsa', - 'helper_msa_cfcmsa', - 'helper_msa_move_v', - 'helper_msa_fill_df', - 'helper_msa_nlzc_df', - 'helper_msa_nloc_df', - 'helper_msa_pcnt_df', - 'helper_msa_fcaf_df', - 'helper_msa_fcun_df', - 'helper_msa_fceq_df', - 'helper_msa_fcueq_df', - 'helper_msa_fclt_df', - 'helper_msa_fcult_df', - 'helper_msa_fcle_df', - 'helper_msa_fcule_df', - 'helper_msa_fsaf_df', - 'helper_msa_fsun_df', - 'helper_msa_fseq_df', - 'helper_msa_fsueq_df', - 'helper_msa_fslt_df', - 'helper_msa_fsult_df', - 'helper_msa_fsle_df', - 'helper_msa_fsule_df', - 'helper_msa_fcor_df', - 'helper_msa_fcune_df', - 'helper_msa_fcne_df', - 'helper_msa_fsor_df', - 'helper_msa_fsune_df', - 'helper_msa_fsne_df', - 'helper_msa_fadd_df', - 'helper_msa_fsub_df', - 'helper_msa_fmul_df', - 'helper_msa_fdiv_df', - 'helper_msa_fmadd_df', - 'helper_msa_fmsub_df', - 'helper_msa_fexp2_df', - 'helper_msa_fexdo_df', - 'helper_msa_ftq_df', - 'helper_msa_fmin_df', - 'helper_msa_fmin_a_df', - 'helper_msa_fmax_df', - 'helper_msa_fmax_a_df', - 'helper_msa_fclass_df', - 'helper_msa_ftrunc_s_df', - 'helper_msa_ftrunc_u_df', - 'helper_msa_fsqrt_df', - 'helper_msa_frsqrt_df', - 'helper_msa_frcp_df', - 'helper_msa_frint_df', - 'helper_msa_flog2_df', - 'helper_msa_fexupl_df', - 'helper_msa_fexupr_df', - 'helper_msa_ffql_df', - 'helper_msa_ffqr_df', - 'helper_msa_ftint_s_df', - 'helper_msa_ftint_u_df', - 'helper_msa_ffint_s_df', - 'helper_msa_ffint_u_df', - 'helper_paddsb', - 'helper_paddusb', - 'helper_paddsh', - 'helper_paddush', - 'helper_paddb', - 'helper_paddh', - 'helper_paddw', - 'helper_psubsb', - 'helper_psubusb', - 'helper_psubsh', - 'helper_psubush', - 'helper_psubb', - 'helper_psubh', - 'helper_psubw', - 'helper_pshufh', - 'helper_packsswh', - 'helper_packsshb', - 'helper_packushb', - 'helper_punpcklwd', - 'helper_punpckhwd', - 'helper_punpcklhw', - 'helper_punpckhhw', - 'helper_punpcklbh', - 'helper_punpckhbh', - 'helper_pavgh', - 'helper_pavgb', - 'helper_pmaxsh', - 'helper_pminsh', - 'helper_pmaxub', - 'helper_pminub', - 'helper_pcmpeqw', - 'helper_pcmpgtw', - 'helper_pcmpeqh', - 'helper_pcmpgth', - 'helper_pcmpeqb', - 'helper_pcmpgtb', - 'helper_psllw', - 'helper_psrlw', - 'helper_psraw', - 'helper_psllh', - 'helper_psrlh', - 'helper_psrah', - 'helper_pmullh', - 'helper_pmulhh', - 'helper_pmulhuh', - 'helper_pmaddhw', - 'helper_pasubub', - 'helper_biadd', - 'helper_pmovmskb', - 'helper_absq_s_ph', - 'helper_absq_s_qb', - 'helper_absq_s_w', - 'helper_addqh_ph', - 'helper_addqh_r_ph', - 'helper_addqh_r_w', - 'helper_addqh_w', - 'helper_adduh_qb', - 'helper_adduh_r_qb', - 'helper_subqh_ph', - 'helper_subqh_r_ph', - 'helper_subqh_r_w', - 'helper_subqh_w', - 'helper_addq_ph', - 'helper_addq_s_ph', - 'helper_addq_s_w', - 'helper_addu_ph', - 'helper_addu_qb', - 'helper_addu_s_ph', - 'helper_addu_s_qb', - 'helper_subq_ph', - 'helper_subq_s_ph', - 'helper_subq_s_w', - 'helper_subu_ph', - 'helper_subu_qb', - 'helper_subu_s_ph', - 'helper_subu_s_qb', - 'helper_subuh_qb', - 'helper_subuh_r_qb', - 'helper_addsc', - 'helper_addwc', - 'helper_modsub', - 'helper_raddu_w_qb', - 'helper_precr_qb_ph', - 'helper_precrq_qb_ph', - 'helper_precr_sra_ph_w', - 'helper_precr_sra_r_ph_w', - 'helper_precrq_ph_w', - 'helper_precrq_rs_ph_w', - 'helper_precrqu_s_qb_ph', - 'helper_precequ_ph_qbl', - 'helper_precequ_ph_qbr', - 'helper_precequ_ph_qbla', - 'helper_precequ_ph_qbra', - 'helper_preceu_ph_qbl', - 'helper_preceu_ph_qbr', - 'helper_preceu_ph_qbla', - 'helper_preceu_ph_qbra', - 'helper_shll_qb', - 'helper_shrl_qb', - 'helper_shra_qb', - 'helper_shra_r_qb', - 'helper_shll_ph', - 'helper_shll_s_ph', - 'helper_shll_s_w', - 'helper_shra_r_w', - 'helper_shrl_ph', - 'helper_shra_ph', - 'helper_shra_r_ph', - 'helper_muleu_s_ph_qbl', - 'helper_muleu_s_ph_qbr', - 'helper_mulq_rs_ph', - 'helper_mul_ph', - 'helper_mul_s_ph', - 'helper_mulq_s_ph', - 'helper_muleq_s_w_phl', - 'helper_muleq_s_w_phr', - 'helper_mulsaq_s_w_ph', - 'helper_mulsa_w_ph', - 'helper_dpau_h_qbl', - 'helper_dpau_h_qbr', - 'helper_dpsu_h_qbl', - 'helper_dpsu_h_qbr', - 'helper_dpa_w_ph', - 'helper_dpax_w_ph', - 'helper_dps_w_ph', - 'helper_dpsx_w_ph', - 'helper_dpaq_s_w_ph', - 'helper_dpaqx_s_w_ph', - 'helper_dpsq_s_w_ph', - 'helper_dpsqx_s_w_ph', - 'helper_dpaqx_sa_w_ph', - 'helper_dpsqx_sa_w_ph', - 'helper_dpaq_sa_l_w', - 'helper_dpsq_sa_l_w', - 'helper_maq_s_w_phl', - 'helper_maq_s_w_phr', - 'helper_maq_sa_w_phl', - 'helper_maq_sa_w_phr', - 'helper_mulq_s_w', - 'helper_mulq_rs_w', - 'helper_bitrev', - 'helper_insv', - 'helper_cmpgu_eq_qb', - 'helper_cmpgu_lt_qb', - 'helper_cmpgu_le_qb', - 'helper_cmpu_eq_qb', - 'helper_cmpu_lt_qb', - 'helper_cmpu_le_qb', - 'helper_cmp_eq_ph', - 'helper_cmp_lt_ph', - 'helper_cmp_le_ph', - 'helper_pick_qb', - 'helper_pick_ph', - 'helper_packrl_ph', - 'helper_extr_w', - 'helper_extr_r_w', - 'helper_extr_rs_w', - 'helper_extr_s_h', - 'helper_extp', - 'helper_extpdp', - 'helper_shilo', - 'helper_mthlip', - 'cpu_wrdsp', - 'helper_wrdsp', - 'cpu_rddsp', - 'helper_rddsp', - 'helper_raise_exception_err', - 'helper_clo', - 'helper_clz', - 'helper_muls', - 'helper_mulsu', - 'helper_macc', - 'helper_macchi', - 'helper_maccu', - 'helper_macchiu', - 'helper_msac', - 'helper_msachi', - 'helper_msacu', - 'helper_msachiu', - 'helper_mulhi', - 'helper_mulhiu', - 'helper_mulshi', - 'helper_mulshiu', - 'helper_bitswap', - 'helper_ll', - 'helper_sc', - 'helper_swl', - 'helper_swr', - 'helper_lwm', - 'helper_swm', - 'helper_mfc0_mvpcontrol', - 'helper_mfc0_mvpconf0', - 'helper_mfc0_mvpconf1', - 'helper_mfc0_random', - 'helper_mfc0_tcstatus', - 'helper_mftc0_tcstatus', - 'helper_mfc0_tcbind', - 'helper_mftc0_tcbind', - 'helper_mfc0_tcrestart', - 'helper_mftc0_tcrestart', - 'helper_mfc0_tchalt', - 'helper_mftc0_tchalt', - 'helper_mfc0_tccontext', - 'helper_mftc0_tccontext', - 'helper_mfc0_tcschedule', - 'helper_mftc0_tcschedule', - 'helper_mfc0_tcschefback', - 'helper_mftc0_tcschefback', - 'helper_mfc0_count', - 'helper_mftc0_entryhi', - 'helper_mftc0_cause', - 'helper_mftc0_status', - 'helper_mfc0_lladdr', - 'helper_mfc0_watchlo', - 'helper_mfc0_watchhi', - 'helper_mfc0_debug', - 'helper_mftc0_debug', - 'helper_mtc0_index', - 'helper_mtc0_mvpcontrol', - 'helper_mtc0_vpecontrol', - 'helper_mttc0_vpecontrol', - 'helper_mftc0_vpecontrol', - 'helper_mftc0_vpeconf0', - 'helper_mtc0_vpeconf0', - 'helper_mttc0_vpeconf0', - 'helper_mtc0_vpeconf1', - 'helper_mtc0_yqmask', - 'helper_mtc0_vpeopt', - 'helper_mtc0_entrylo0', - 'helper_mtc0_tcstatus', - 'helper_mttc0_tcstatus', - 'helper_mtc0_tcbind', - 'helper_mttc0_tcbind', - 'helper_mtc0_tcrestart', - 'helper_mttc0_tcrestart', - 'helper_mtc0_tchalt', - 'helper_mttc0_tchalt', - 'helper_mtc0_tccontext', - 'helper_mttc0_tccontext', - 'helper_mtc0_tcschedule', - 'helper_mttc0_tcschedule', - 'helper_mtc0_tcschefback', - 'helper_mttc0_tcschefback', - 'helper_mtc0_entrylo1', - 'helper_mtc0_context', - 'helper_mtc0_pagemask', - 'helper_mtc0_pagegrain', - 'helper_mtc0_wired', - 'helper_mtc0_srsconf0', - 'helper_mtc0_srsconf1', - 'helper_mtc0_srsconf2', - 'helper_mtc0_srsconf3', - 'helper_mtc0_srsconf4', - 'helper_mtc0_hwrena', - 'helper_mtc0_count', - 'helper_mtc0_entryhi', - 'helper_mttc0_entryhi', - 'helper_mtc0_compare', - 'helper_mtc0_status', - 'helper_mttc0_status', - 'helper_mtc0_intctl', - 'helper_mtc0_srsctl', - 'helper_mtc0_cause', - 'helper_mttc0_cause', - 'helper_mftc0_epc', - 'helper_mftc0_ebase', - 'helper_mtc0_ebase', - 'helper_mttc0_ebase', - 'helper_mftc0_configx', - 'helper_mtc0_config0', - 'helper_mtc0_config2', - 'helper_mtc0_config4', - 'helper_mtc0_config5', - 'helper_mtc0_lladdr', - 'helper_mtc0_watchlo', - 'helper_mtc0_watchhi', - 'helper_mtc0_xcontext', - 'helper_mtc0_framemask', - 'helper_mtc0_debug', - 'helper_mttc0_debug', - 'helper_mtc0_performance0', - 'helper_mtc0_taglo', - 'helper_mtc0_datalo', - 'helper_mtc0_taghi', - 'helper_mtc0_datahi', - 'helper_mftgpr', - 'helper_mftlo', - 'helper_mfthi', - 'helper_mftacx', - 'helper_mftdsp', - 'helper_mttgpr', - 'helper_mttlo', - 'helper_mtthi', - 'helper_mttacx', - 'helper_mttdsp', - 'helper_dmt', - 'helper_emt', - 'helper_dvpe', - 'helper_evpe', - 'helper_fork', - 'helper_yield', - 'r4k_helper_tlbinv', - 'r4k_helper_tlbinvf', - 'r4k_helper_tlbwi', - 'r4k_helper_tlbwr', - 'r4k_helper_tlbp', - 'r4k_helper_tlbr', - 'helper_tlbwi', - 'helper_tlbwr', - 'helper_tlbp', - 'helper_tlbr', - 'helper_tlbinv', - 'helper_tlbinvf', - 'helper_di', - 'helper_ei', - 'helper_eret', - 'helper_deret', - 'helper_rdhwr_cpunum', - 'helper_rdhwr_synci_step', - 'helper_rdhwr_cc', - 'helper_rdhwr_ccres', - 'helper_pmon', - 'helper_wait', - 'mips_cpu_do_unaligned_access', - 'mips_cpu_unassigned_access', - 'ieee_rm', - 'helper_cfc1', - 'helper_ctc1', - 'ieee_ex_to_mips', - 'helper_float_sqrt_d', - 'helper_float_sqrt_s', - 'helper_float_cvtd_s', - 'helper_float_cvtd_w', - 'helper_float_cvtd_l', - 'helper_float_cvtl_d', - 'helper_float_cvtl_s', - 'helper_float_cvtps_pw', - 'helper_float_cvtpw_ps', - 'helper_float_cvts_d', - 'helper_float_cvts_w', - 'helper_float_cvts_l', - 'helper_float_cvts_pl', - 'helper_float_cvts_pu', - 'helper_float_cvtw_s', - 'helper_float_cvtw_d', - 'helper_float_roundl_d', - 'helper_float_roundl_s', - 'helper_float_roundw_d', - 'helper_float_roundw_s', - 'helper_float_truncl_d', - 'helper_float_truncl_s', - 'helper_float_truncw_d', - 'helper_float_truncw_s', - 'helper_float_ceill_d', - 'helper_float_ceill_s', - 'helper_float_ceilw_d', - 'helper_float_ceilw_s', - 'helper_float_floorl_d', - 'helper_float_floorl_s', - 'helper_float_floorw_d', - 'helper_float_floorw_s', - 'helper_float_abs_d', - 'helper_float_abs_s', - 'helper_float_abs_ps', - 'helper_float_chs_d', - 'helper_float_chs_s', - 'helper_float_chs_ps', - 'helper_float_maddf_s', - 'helper_float_maddf_d', - 'helper_float_msubf_s', - 'helper_float_msubf_d', - 'helper_float_max_s', - 'helper_float_max_d', - 'helper_float_maxa_s', - 'helper_float_maxa_d', - 'helper_float_min_s', - 'helper_float_min_d', - 'helper_float_mina_s', - 'helper_float_mina_d', - 'helper_float_rint_s', - 'helper_float_rint_d', - 'helper_float_class_s', - 'helper_float_class_d', - 'helper_float_recip_d', - 'helper_float_recip_s', - 'helper_float_rsqrt_d', - 'helper_float_rsqrt_s', - 'helper_float_recip1_d', - 'helper_float_recip1_s', - 'helper_float_recip1_ps', - 'helper_float_rsqrt1_d', - 'helper_float_rsqrt1_s', - 'helper_float_rsqrt1_ps', - 'helper_float_add_d', - 'helper_float_add_s', - 'helper_float_add_ps', - 'helper_float_sub_d', - 'helper_float_sub_s', - 'helper_float_sub_ps', - 'helper_float_mul_d', - 'helper_float_mul_s', - 'helper_float_mul_ps', - 'helper_float_div_d', - 'helper_float_div_s', - 'helper_float_div_ps', - 'helper_float_madd_d', - 'helper_float_madd_s', - 'helper_float_madd_ps', - 'helper_float_msub_d', - 'helper_float_msub_s', - 'helper_float_msub_ps', - 'helper_float_nmadd_d', - 'helper_float_nmadd_s', - 'helper_float_nmadd_ps', - 'helper_float_nmsub_d', - 'helper_float_nmsub_s', - 'helper_float_nmsub_ps', - 'helper_float_recip2_d', - 'helper_float_recip2_s', - 'helper_float_recip2_ps', - 'helper_float_rsqrt2_d', - 'helper_float_rsqrt2_s', - 'helper_float_rsqrt2_ps', - 'helper_float_addr_ps', - 'helper_float_mulr_ps', - 'helper_cmp_d_f', - 'helper_cmpabs_d_f', - 'helper_cmp_d_un', - 'helper_cmpabs_d_un', - 'helper_cmp_d_eq', - 'helper_cmpabs_d_eq', - 'helper_cmp_d_ueq', - 'helper_cmpabs_d_ueq', - 'helper_cmp_d_olt', - 'helper_cmpabs_d_olt', - 'helper_cmp_d_ult', - 'helper_cmpabs_d_ult', - 'helper_cmp_d_ole', - 'helper_cmpabs_d_ole', - 'helper_cmp_d_ule', - 'helper_cmpabs_d_ule', - 'helper_cmp_d_sf', - 'helper_cmpabs_d_sf', - 'helper_cmp_d_ngle', - 'helper_cmpabs_d_ngle', - 'helper_cmp_d_seq', - 'helper_cmpabs_d_seq', - 'helper_cmp_d_ngl', - 'helper_cmpabs_d_ngl', - 'helper_cmp_d_lt', - 'helper_cmpabs_d_lt', - 'helper_cmp_d_nge', - 'helper_cmpabs_d_nge', - 'helper_cmp_d_le', - 'helper_cmpabs_d_le', - 'helper_cmp_d_ngt', - 'helper_cmpabs_d_ngt', - 'helper_cmp_s_f', - 'helper_cmpabs_s_f', - 'helper_cmp_s_un', - 'helper_cmpabs_s_un', - 'helper_cmp_s_eq', - 'helper_cmpabs_s_eq', - 'helper_cmp_s_ueq', - 'helper_cmpabs_s_ueq', - 'helper_cmp_s_olt', - 'helper_cmpabs_s_olt', - 'helper_cmp_s_ult', - 'helper_cmpabs_s_ult', - 'helper_cmp_s_ole', - 'helper_cmpabs_s_ole', - 'helper_cmp_s_ule', - 'helper_cmpabs_s_ule', - 'helper_cmp_s_sf', - 'helper_cmpabs_s_sf', - 'helper_cmp_s_ngle', - 'helper_cmpabs_s_ngle', - 'helper_cmp_s_seq', - 'helper_cmpabs_s_seq', - 'helper_cmp_s_ngl', - 'helper_cmpabs_s_ngl', - 'helper_cmp_s_lt', - 'helper_cmpabs_s_lt', - 'helper_cmp_s_nge', - 'helper_cmpabs_s_nge', - 'helper_cmp_s_le', - 'helper_cmpabs_s_le', - 'helper_cmp_s_ngt', - 'helper_cmpabs_s_ngt', - 'helper_cmp_ps_f', - 'helper_cmpabs_ps_f', - 'helper_cmp_ps_un', - 'helper_cmpabs_ps_un', - 'helper_cmp_ps_eq', - 'helper_cmpabs_ps_eq', - 'helper_cmp_ps_ueq', - 'helper_cmpabs_ps_ueq', - 'helper_cmp_ps_olt', - 'helper_cmpabs_ps_olt', - 'helper_cmp_ps_ult', - 'helper_cmpabs_ps_ult', - 'helper_cmp_ps_ole', - 'helper_cmpabs_ps_ole', - 'helper_cmp_ps_ule', - 'helper_cmpabs_ps_ule', - 'helper_cmp_ps_sf', - 'helper_cmpabs_ps_sf', - 'helper_cmp_ps_ngle', - 'helper_cmpabs_ps_ngle', - 'helper_cmp_ps_seq', - 'helper_cmpabs_ps_seq', - 'helper_cmp_ps_ngl', - 'helper_cmpabs_ps_ngl', - 'helper_cmp_ps_lt', - 'helper_cmpabs_ps_lt', - 'helper_cmp_ps_nge', - 'helper_cmpabs_ps_nge', - 'helper_cmp_ps_le', - 'helper_cmpabs_ps_le', - 'helper_cmp_ps_ngt', - 'helper_cmpabs_ps_ngt', - 'helper_r6_cmp_d_af', - 'helper_r6_cmp_d_un', - 'helper_r6_cmp_d_eq', - 'helper_r6_cmp_d_ueq', - 'helper_r6_cmp_d_lt', - 'helper_r6_cmp_d_ult', - 'helper_r6_cmp_d_le', - 'helper_r6_cmp_d_ule', - 'helper_r6_cmp_d_saf', - 'helper_r6_cmp_d_sun', - 'helper_r6_cmp_d_seq', - 'helper_r6_cmp_d_sueq', - 'helper_r6_cmp_d_slt', - 'helper_r6_cmp_d_sult', - 'helper_r6_cmp_d_sle', - 'helper_r6_cmp_d_sule', - 'helper_r6_cmp_d_or', - 'helper_r6_cmp_d_une', - 'helper_r6_cmp_d_ne', - 'helper_r6_cmp_d_sor', - 'helper_r6_cmp_d_sune', - 'helper_r6_cmp_d_sne', - 'helper_r6_cmp_s_af', - 'helper_r6_cmp_s_un', - 'helper_r6_cmp_s_eq', - 'helper_r6_cmp_s_ueq', - 'helper_r6_cmp_s_lt', - 'helper_r6_cmp_s_ult', - 'helper_r6_cmp_s_le', - 'helper_r6_cmp_s_ule', - 'helper_r6_cmp_s_saf', - 'helper_r6_cmp_s_sun', - 'helper_r6_cmp_s_seq', - 'helper_r6_cmp_s_sueq', - 'helper_r6_cmp_s_slt', - 'helper_r6_cmp_s_sult', - 'helper_r6_cmp_s_sle', - 'helper_r6_cmp_s_sule', - 'helper_r6_cmp_s_or', - 'helper_r6_cmp_s_une', - 'helper_r6_cmp_s_ne', - 'helper_r6_cmp_s_sor', - 'helper_r6_cmp_s_sune', - 'helper_r6_cmp_s_sne', - 'helper_msa_ld_df', - 'helper_msa_st_df', - 'no_mmu_map_address', - 'fixed_mmu_map_address', - 'r4k_map_address', - 'mips_cpu_get_phys_page_debug', - 'mips_cpu_handle_mmu_fault', - 'cpu_mips_translate_address', - 'exception_resume_pc', - 'mips_cpu_do_interrupt', - 'mips_cpu_exec_interrupt', - 'r4k_invalidate_tlb', - 'helper_absq_s_ob', - 'helper_absq_s_qh', - 'helper_absq_s_pw', - 'helper_adduh_ob', - 'helper_adduh_r_ob', - 'helper_subuh_ob', - 'helper_subuh_r_ob', - 'helper_addq_pw', - 'helper_addq_qh', - 'helper_addq_s_pw', - 'helper_addq_s_qh', - 'helper_addu_ob', - 'helper_addu_qh', - 'helper_addu_s_ob', - 'helper_addu_s_qh', - 'helper_subq_pw', - 'helper_subq_qh', - 'helper_subq_s_pw', - 'helper_subq_s_qh', - 'helper_subu_ob', - 'helper_subu_qh', - 'helper_subu_s_ob', - 'helper_subu_s_qh', - 'helper_raddu_l_ob', - 'helper_precr_ob_qh', - 'helper_precr_sra_qh_pw', - 'helper_precr_sra_r_qh_pw', - 'helper_precrq_ob_qh', - 'helper_precrq_qh_pw', - 'helper_precrq_rs_qh_pw', - 'helper_precrq_pw_l', - 'helper_precrqu_s_ob_qh', - 'helper_preceq_pw_qhl', - 'helper_preceq_pw_qhr', - 'helper_preceq_pw_qhla', - 'helper_preceq_pw_qhra', - 'helper_precequ_qh_obl', - 'helper_precequ_qh_obr', - 'helper_precequ_qh_obla', - 'helper_precequ_qh_obra', - 'helper_preceu_qh_obl', - 'helper_preceu_qh_obr', - 'helper_preceu_qh_obla', - 'helper_preceu_qh_obra', - 'helper_shll_ob', - 'helper_shrl_ob', - 'helper_shra_ob', - 'helper_shra_r_ob', - 'helper_shll_qh', - 'helper_shll_s_qh', - 'helper_shrl_qh', - 'helper_shra_qh', - 'helper_shra_r_qh', - 'helper_shll_pw', - 'helper_shll_s_pw', - 'helper_shra_pw', - 'helper_shra_r_pw', - 'helper_muleu_s_qh_obl', - 'helper_muleu_s_qh_obr', - 'helper_mulq_rs_qh', - 'helper_muleq_s_pw_qhl', - 'helper_muleq_s_pw_qhr', - 'helper_mulsaq_s_w_qh', - 'helper_dpau_h_obl', - 'helper_dpau_h_obr', - 'helper_dpsu_h_obl', - 'helper_dpsu_h_obr', - 'helper_dpa_w_qh', - 'helper_dpaq_s_w_qh', - 'helper_dps_w_qh', - 'helper_dpsq_s_w_qh', - 'helper_dpaq_sa_l_pw', - 'helper_dpsq_sa_l_pw', - 'helper_mulsaq_s_l_pw', - 'helper_maq_s_w_qhll', - 'helper_maq_s_w_qhlr', - 'helper_maq_s_w_qhrl', - 'helper_maq_s_w_qhrr', - 'helper_maq_sa_w_qhll', - 'helper_maq_sa_w_qhlr', - 'helper_maq_sa_w_qhrl', - 'helper_maq_sa_w_qhrr', - 'helper_maq_s_l_pwl', - 'helper_maq_s_l_pwr', - 'helper_dmadd', - 'helper_dmaddu', - 'helper_dmsub', - 'helper_dmsubu', - 'helper_dinsv', - 'helper_cmpgu_eq_ob', - 'helper_cmpgu_lt_ob', - 'helper_cmpgu_le_ob', - 'helper_cmpu_eq_ob', - 'helper_cmpu_lt_ob', - 'helper_cmpu_le_ob', - 'helper_cmp_eq_qh', - 'helper_cmp_lt_qh', - 'helper_cmp_le_qh', - 'helper_cmp_eq_pw', - 'helper_cmp_lt_pw', - 'helper_cmp_le_pw', - 'helper_cmpgdu_eq_ob', - 'helper_cmpgdu_lt_ob', - 'helper_cmpgdu_le_ob', - 'helper_pick_ob', - 'helper_pick_qh', - 'helper_pick_pw', - 'helper_packrl_pw', - 'helper_dextr_w', - 'helper_dextr_r_w', - 'helper_dextr_rs_w', - 'helper_dextr_l', - 'helper_dextr_r_l', - 'helper_dextr_rs_l', - 'helper_dextr_s_h', - 'helper_dextp', - 'helper_dextpdp', - 'helper_dshilo', - 'helper_dmthlip', - 'helper_dclo', - 'helper_dclz', - 'helper_dbitswap', - 'helper_lld', - 'helper_scd', - 'helper_sdl', - 'helper_sdr', - 'helper_ldm', - 'helper_sdm', - 'helper_dmfc0_tcrestart', - 'helper_dmfc0_tchalt', - 'helper_dmfc0_tccontext', - 'helper_dmfc0_tcschedule', - 'helper_dmfc0_tcschefback', - 'helper_dmfc0_lladdr', - 'helper_dmfc0_watchlo', - 'helper_dmtc0_entrylo0', - 'helper_dmtc0_entrylo1', - 'mips_reg_reset', - 'mips_reg_read', - 'mips_reg_write', - 'mips_tcg_init', - 'mips_cpu_list', - 'mips_release', - 'MIPS64_REGS_STORAGE_SIZE', - 'MIPS_REGS_STORAGE_SIZE' -) - -sparc_symbols = ( - 'cpu_sparc_exec', - 'helper_compute_psr', - 'helper_compute_C_icc', - 'cpu_sparc_init', - 'cpu_sparc_set_id', - 'sparc_cpu_register_types', - 'helper_fadds', - 'helper_faddd', - 'helper_faddq', - 'helper_fsubs', - 'helper_fsubd', - 'helper_fsubq', - 'helper_fmuls', - 'helper_fmuld', - 'helper_fmulq', - 'helper_fdivs', - 'helper_fdivd', - 'helper_fdivq', - 'helper_fsmuld', - 'helper_fdmulq', - 'helper_fnegs', - 'helper_fitos', - 'helper_fitod', - 'helper_fitoq', - 'helper_fdtos', - 'helper_fstod', - 'helper_fqtos', - 'helper_fstoq', - 'helper_fqtod', - 'helper_fdtoq', - 'helper_fstoi', - 'helper_fdtoi', - 'helper_fqtoi', - 'helper_fabss', - 'helper_fsqrts', - 'helper_fsqrtd', - 'helper_fsqrtq', - 'helper_fcmps', - 'helper_fcmpd', - 'helper_fcmpes', - 'helper_fcmped', - 'helper_fcmpq', - 'helper_fcmpeq', - 'helper_ldfsr', - 'helper_debug', - 'helper_udiv_cc', - 'helper_sdiv_cc', - 'helper_taddcctv', - 'helper_tsubcctv', - 'sparc_cpu_do_interrupt', - 'helper_check_align', - 'helper_ld_asi', - 'helper_st_asi', - 'helper_cas_asi', - 'helper_ldqf', - 'helper_stqf', - 'sparc_cpu_unassigned_access', - 'sparc_cpu_do_unaligned_access', - 'sparc_cpu_handle_mmu_fault', - 'dump_mmu', - 'sparc_cpu_get_phys_page_debug', - 'sparc_reg_reset', - 'sparc_reg_read', - 'sparc_reg_write', - 'gen_intermediate_code_init', - 'cpu_set_cwp', - 'cpu_get_psr', - 'cpu_put_psr', - 'cpu_cwp_inc', - 'cpu_cwp_dec', - 'helper_save', - 'helper_restore') - - -if __name__ == '__main__': - arch = sys.argv[1] - - print("/* Autogen header for Unicorn Engine - DONOT MODIFY */") - print("#ifndef UNICORN_AUTOGEN_%s_H" %arch.upper()) - print("#define UNICORN_AUTOGEN_%s_H" %arch.upper()) - - for s in symbols: - print("#define %s %s_%s" %(s, s, arch)) - - if 'arm' in arch: - for s in arm_symbols: - print("#define %s %s_%s" %(s, s, arch)) - - if 'aarch64' in arch: - for s in aarch64_symbols: - print("#define %s %s_%s" %(s, s, arch)) - - if 'mips' in arch: - for s in mips_symbols: - print("#define %s %s_%s" %(s, s, arch)) - - if 'sparc' in arch: - for s in sparc_symbols: - print("#define %s %s_%s" %(s, s, arch)) - - print("#endif") - diff --git a/qemu/hw/Makefile.objs b/qemu/hw/Makefile.objs deleted file mode 100644 index 08a0be13..00000000 --- a/qemu/hw/Makefile.objs +++ /dev/null @@ -1,4 +0,0 @@ -devices-dirs-$(CONFIG_SOFTMMU) += intc/ -devices-dirs-y += core/ -common-obj-y += $(devices-dirs-y) -obj-y += $(devices-dirs-y) diff --git a/qemu/hw/arm/Makefile.objs b/qemu/hw/arm/Makefile.objs deleted file mode 100644 index 77564851..00000000 --- a/qemu/hw/arm/Makefile.objs +++ /dev/null @@ -1,2 +0,0 @@ -obj-y += tosa.o -obj-y += virt.o diff --git a/qemu/hw/arm/tosa.c b/qemu/hw/arm/tosa.c deleted file mode 100644 index 7004fc87..00000000 --- a/qemu/hw/arm/tosa.c +++ /dev/null @@ -1,45 +0,0 @@ -/* vim:set shiftwidth=4 ts=4 et: */ -/* - * PXA255 Sharp Zaurus SL-6000 PDA platform - * - * Copyright (c) 2008 Dmitry Baryshkov - * - * Code based on spitz platform by Andrzej Zaborowski - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "hw/hw.h" -#include "hw/arm/arm.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" - - -static int tosa_init(struct uc_struct *uc, MachineState *machine) -{ - if (uc->mode & UC_MODE_MCLASS) - uc->cpu = (CPUState *)cpu_arm_init(uc, "cortex-m3"); - else if (uc->mode & UC_MODE_ARM926) - uc->cpu = (CPUState *)cpu_arm_init(uc, "arm926"); - else if (uc->mode & UC_MODE_ARM946) - uc->cpu = (CPUState *)cpu_arm_init(uc, "arm946"); - else if (uc->mode & UC_MODE_ARM1176) - uc->cpu = (CPUState *)cpu_arm_init(uc, "arm1176"); - else - uc->cpu = (CPUState *)cpu_arm_init(uc, "cortex-a15"); - - return 0; -} - -void tosa_machine_init(struct uc_struct *uc) -{ - static QEMUMachine tosapda_machine = { 0 }; - tosapda_machine.name = "tosa", - tosapda_machine.init = tosa_init, - tosapda_machine.is_default = 1, - tosapda_machine.arch = UC_ARCH_ARM, - - qemu_register_machine(uc, &tosapda_machine, TYPE_MACHINE, NULL); -} diff --git a/qemu/hw/arm/virt.c b/qemu/hw/arm/virt.c deleted file mode 100644 index 485109cc..00000000 --- a/qemu/hw/arm/virt.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * ARM mach-virt emulation - * - * Copyright (c) 2013 Linaro Limited - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - * - * Emulate a virtual board which works by passing Linux all the information - * it needs about what devices are present via the device tree. - * There are some restrictions about what we can do here: - * + we can only present devices whose Linux drivers will work based - * purely on the device tree with no platform data at all - * + we want to present a very stripped-down minimalist platform, - * both because this reduces the security attack surface from the guest - * and also because it reduces our exposure to being broken when - * the kernel updates its device tree bindings and requires further - * information in a device binding that we aren't providing. - * This is essentially the same approach kvmtool uses. - */ - -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -#include "hw/arm/arm.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" - - -static int machvirt_init(struct uc_struct *uc, MachineState *machine) -{ - const char *cpu_model = machine->cpu_model; - int n; - - if (!cpu_model) { - cpu_model = "cortex-a57"; // ARM64 - } - - for (n = 0; n < smp_cpus; n++) { - Object *cpuobj; - ObjectClass *oc = cpu_class_by_name(uc, TYPE_ARM_CPU, cpu_model); - - if (!oc) { - fprintf(stderr, "Unable to find CPU definition\n"); - return -1; - } - - cpuobj = object_new(uc, object_class_get_name(oc)); - uc->cpu = (CPUState *)cpuobj; - object_property_set_bool(uc, cpuobj, true, "realized", NULL); - } - - return 0; -} - -void machvirt_machine_init(struct uc_struct *uc) -{ - static QEMUMachine machvirt_a15_machine = { 0 }; - machvirt_a15_machine.name = "virt", - machvirt_a15_machine.init = machvirt_init, - machvirt_a15_machine.is_default = 1, - machvirt_a15_machine.arch = UC_ARCH_ARM64, - - qemu_register_machine(uc, &machvirt_a15_machine, TYPE_MACHINE, NULL); -} diff --git a/qemu/hw/core/Makefile.objs b/qemu/hw/core/Makefile.objs deleted file mode 100644 index 9d402237..00000000 --- a/qemu/hw/core/Makefile.objs +++ /dev/null @@ -1,3 +0,0 @@ -# core qdev-related obj files, also used by *-user: -common-obj-y += qdev.o -common-obj-$(CONFIG_SOFTMMU) += machine.o diff --git a/qemu/hw/core/cpu.c b/qemu/hw/core/cpu.c new file mode 100644 index 00000000..0bd16dc9 --- /dev/null +++ b/qemu/hw/core/cpu.c @@ -0,0 +1,154 @@ +/* + * QEMU CPU model + * + * Copyright (c) 2012-2014 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "uc_priv.h" +#include "qemu/osdep.h" +#include "hw/core/cpu.h" +#include "sysemu/tcg.h" + +bool cpu_paging_enabled(const CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + return cc->get_paging_enabled(cpu); +} + +static bool cpu_common_get_paging_enabled(const CPUState *cpu) +{ + return false; +} + +void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->get_memory_mapping(cpu, list); +} + +static void cpu_common_get_memory_mapping(CPUState *cpu, + MemoryMappingList *list) +{ + // error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); +} + +/* Resetting the IRQ comes from across the code base so we take the + * BQL here if we need to. cpu_interrupt assumes it is held.*/ +void cpu_reset_interrupt(CPUState *cpu, int mask) +{ + cpu->interrupt_request &= ~mask; +} + +void cpu_exit(CPUState *cpu) +{ + cpu->exit_request = 1; + cpu->tcg_exit_req = 1; + cpu->icount_decr_ptr->u16.high = -1; +} + +static void cpu_common_noop(CPUState *cpu) +{ +} + +static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) +{ + return false; +} + +void cpu_reset(CPUState *cpu) +{ + CPUClass *klass = CPU_GET_CLASS(cpu); + + if (klass->reset != NULL) { + (*klass->reset)(cpu); + } +} + +static void cpu_common_reset(CPUState *dev) +{ + CPUState *cpu = CPU(dev); + + cpu->interrupt_request = 0; + cpu->halted = 0; + cpu->mem_io_pc = 0; + cpu->icount_extra = 0; + cpu->can_do_io = 1; + cpu->exception_index = -1; + cpu->crash_occurred = false; + cpu->cflags_next_tb = -1; + + cpu_tb_jmp_cache_clear(cpu); + + cpu->uc->tcg_flush_tlb(cpu->uc); +} + +static bool cpu_common_has_work(CPUState *cs) +{ + return false; +} + +static int64_t cpu_common_get_arch_id(CPUState *cpu) +{ + return cpu->cpu_index; +} + +void cpu_class_init(struct uc_struct *uc, CPUClass *k) +{ + k->get_arch_id = cpu_common_get_arch_id; + k->has_work = cpu_common_has_work; + k->get_paging_enabled = cpu_common_get_paging_enabled; + k->get_memory_mapping = cpu_common_get_memory_mapping; + k->debug_excp_handler = cpu_common_noop; + k->cpu_exec_enter = cpu_common_noop; + k->cpu_exec_exit = cpu_common_noop; + k->cpu_exec_interrupt = cpu_common_exec_interrupt; + /* instead of dc->reset. */ + k->reset = cpu_common_reset; + + return; +} + +void cpu_common_initfn(struct uc_struct *uc, CPUState *cs) +{ + CPUState *cpu = CPU(cs); + + cpu->cpu_index = UNASSIGNED_CPU_INDEX; + cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; + /* *-user doesn't have configurable SMP topology */ + /* the default value is changed by qemu_init_vcpu() for softmmu */ + cpu->nr_cores = 1; + cpu->nr_threads = 1; + + QTAILQ_INIT(&cpu->breakpoints); + QTAILQ_INIT(&cpu->watchpoints); + + /* cpu_exec_initfn(cpu); */ + cpu->num_ases = 1; + cpu->as = &(cpu->uc->address_space_memory); + cpu->memory = cpu->uc->system_memory; +} + +void cpu_stop(struct uc_struct *uc) +{ + if (uc->cpu) { + uc->cpu->stop = false; + uc->cpu->stopped = true; + cpu_exit(uc->cpu); + } +} diff --git a/qemu/hw/core/machine.c b/qemu/hw/core/machine.c deleted file mode 100644 index fa36062b..00000000 --- a/qemu/hw/core/machine.c +++ /dev/null @@ -1,47 +0,0 @@ -/* - * QEMU Machine - * - * Copyright (C) 2014 Red Hat Inc - * - * Authors: - * Marcel Apfelbaum - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "hw/boards.h" - -static void machine_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static void machine_finalize(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static const TypeInfo machine_info = { - TYPE_MACHINE, - TYPE_OBJECT, - - sizeof(MachineClass), - sizeof(MachineState), - NULL, - - machine_initfn, - NULL, - machine_finalize, - - NULL, - - NULL, - NULL, - NULL, - - true, -}; - -void machine_register_types(struct uc_struct *uc) -{ - type_register_static(uc, &machine_info); -} diff --git a/qemu/hw/core/qdev.c b/qemu/hw/core/qdev.c deleted file mode 100644 index 8b98f2af..00000000 --- a/qemu/hw/core/qdev.c +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Dynamic device configuration and creation. - * - * Copyright (c) 2009 CodeSourcery - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -/* The theory here is that it should be possible to create a machine without - knowledge of specific devices. Historically board init routines have - passed a bunch of arguments to each device, requiring the board know - exactly which device it is dealing with. This file provides an abstract - API for device configuration and initialization. Devices will generally - inherit from a particular bus (e.g. PCI or I2C) rather than - this API directly. */ - -#include "hw/qdev.h" -#include "qapi/error.h" -#include "qapi/qmp/qerror.h" - - -static void bus_add_child(BusState *bus, DeviceState *child) -{ - char name[32]; - BusChild *kid = g_malloc0(sizeof(*kid)); - - kid->index = bus->max_index++; - kid->child = child; - object_ref(OBJECT(kid->child)); - - QTAILQ_INSERT_HEAD(&bus->children, kid, sibling); - - /* This transfers ownership of kid->child to the property. */ - snprintf(name, sizeof(name), "child[%d]", kid->index); - object_property_add_link(OBJECT(bus), name, - object_get_typename(OBJECT(child)), - (Object **)&kid->child, - NULL, /* read-only property */ - 0, /* return ownership on prop deletion */ - NULL); -} - -void qdev_set_parent_bus(DeviceState *dev, BusState *bus) -{ - dev->parent_bus = bus; - object_ref(OBJECT(bus)); - bus_add_child(bus, dev); -} - -/* Create a new device. This only initializes the device state structure - and allows properties to be set. qdev_init should be called to - initialize the actual device emulation. */ -DeviceState *qdev_create(BusState *bus, const char *name) -{ - DeviceState *dev; - - dev = qdev_try_create(bus, name); - if (!dev) { - abort(); - } - - return dev; -} - -DeviceState *qdev_try_create(BusState *bus, const char *type) -{ -#if 0 - DeviceState *dev; - - if (object_class_by_name(NULL, type) == NULL) { // no need to fix. aq - return NULL; - } - dev = DEVICE(object_new(NULL, type)); // no need to fix. aq - if (!dev) { - return NULL; - } - - if (!bus) { - bus = sysbus_get_default(); - } - - qdev_set_parent_bus(dev, bus); - object_unref(OBJECT(dev)); - return dev; -#endif - return NULL; -} - -/* Initialize a device. Device properties should be set before calling - this function. IRQs and MMIO regions should be connected/mapped after - calling this function. - On failure, destroy the device and return negative value. - Return 0 on success. */ -int qdev_init(DeviceState *dev) -{ - return 0; -} - -BusState *qdev_get_parent_bus(DeviceState *dev) -{ - return dev->parent_bus; -} - -static void qbus_realize(BusState *bus, DeviceState *parent, const char *name) -{ -} - -static void bus_unparent(struct uc_struct *uc, Object *obj) -{ - BusState *bus = BUS(uc, obj); - BusChild *kid; - - while ((kid = QTAILQ_FIRST(&bus->children)) != NULL) { - DeviceState *dev = kid->child; - object_unparent(uc, OBJECT(dev)); - } - if (bus->parent) { - QLIST_REMOVE(bus, sibling); - bus->parent->num_child_bus--; - bus->parent = NULL; - } -} - -void qbus_create_inplace(void *bus, size_t size, const char *typename, - DeviceState *parent, const char *name) -{ - object_initialize(NULL, bus, size, typename); // unused, so no need to fix. aq - qbus_realize(bus, parent, name); -} - -BusState *qbus_create(const char *typename, DeviceState *parent, const char *name) -{ - BusState *bus; - - bus = BUS(NULL, object_new(NULL, typename)); // no need to fix. aq - qbus_realize(bus, parent, name); - - return bus; -} - -static bool device_get_realized(struct uc_struct *uc, Object *obj, Error **errp) -{ - DeviceState *dev = DEVICE(uc, obj); - return dev->realized; -} - -static int device_set_realized(struct uc_struct *uc, Object *obj, bool value, Error **errp) -{ - DeviceState *dev = DEVICE(uc, obj); - DeviceClass *dc = DEVICE_GET_CLASS(uc, dev); - BusState *bus; - Error *local_err = NULL; - - if (dev->hotplugged && !dc->hotpluggable) { - error_set(errp, QERR_DEVICE_NO_HOTPLUG, object_get_typename(obj)); - return -1; - } - - if (value && !dev->realized) { -#if 0 - if (!obj->parent) { - static int unattached_count; - gchar *name = g_strdup_printf("device[%d]", unattached_count++); - - object_property_add_child(container_get(qdev_get_machine(), - "/unattached"), - name, obj, &error_abort); - g_free(name); - } -#endif - - if (dc->realize) { - if (dc->realize(uc, dev, &local_err)) - return -1; - } - - if (local_err != NULL) { - goto fail; - } - - if (local_err != NULL) { - goto post_realize_fail; - } - - QLIST_FOREACH(bus, &dev->child_bus, sibling) { - object_property_set_bool(uc, OBJECT(bus), true, "realized", - &local_err); - if (local_err != NULL) { - goto child_realize_fail; - } - } - if (dev->hotplugged) { - device_reset(dev); - } - dev->pending_deleted_event = false; - } else if (!value && dev->realized) { - Error **local_errp = NULL; - QLIST_FOREACH(bus, &dev->child_bus, sibling) { - local_errp = local_err ? NULL : &local_err; - object_property_set_bool(uc, OBJECT(bus), false, "realized", - local_errp); - } - if (dc->unrealize) { - local_errp = local_err ? NULL : &local_err; - dc->unrealize(dev, local_errp); - } - dev->pending_deleted_event = true; - } - - if (local_err != NULL) { - goto fail; - } - - dev->realized = value; - return 0; - -child_realize_fail: - QLIST_FOREACH(bus, &dev->child_bus, sibling) { - object_property_set_bool(uc, OBJECT(bus), false, "realized", - NULL); - } - -post_realize_fail: - if (dc->unrealize) { - dc->unrealize(dev, NULL); - } - -fail: - error_propagate(errp, local_err); - return -1; -} - -static void device_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - DeviceState *dev = DEVICE(uc, obj); - - dev->instance_id_alias = -1; - dev->realized = false; - - object_property_add_bool(uc, obj, "realized", - device_get_realized, device_set_realized, NULL); -} - -static void device_post_init(struct uc_struct *uc, Object *obj) -{ -} - -/* Unlink device from bus and free the structure. */ -static void device_finalize(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static void device_class_base_init(ObjectClass *class, void *data) -{ -} - - -static void device_class_init(struct uc_struct *uc, ObjectClass *class, void *data) -{ -} - -void device_reset(DeviceState *dev) -{ -} - -Object *qdev_get_machine(struct uc_struct *uc) -{ - return container_get(uc, object_get_root(uc), "/machine"); -} - -static const TypeInfo device_type_info = { - TYPE_DEVICE, - TYPE_OBJECT, - - sizeof(DeviceClass), - sizeof(DeviceState), - NULL, - - device_initfn, - device_post_init, - device_finalize, - - NULL, - - device_class_init, - device_class_base_init, - NULL, - - true, -}; - -static void qbus_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static void bus_class_init(struct uc_struct *uc, ObjectClass *class, void *data) -{ - class->unparent = bus_unparent; -} - -static void qbus_finalize(struct uc_struct *uc, Object *obj, void *opaque) -{ - BusState *bus = BUS(uc, obj); - - g_free((char *)bus->name); -} - -static const TypeInfo bus_info = { - TYPE_BUS, - TYPE_OBJECT, - - sizeof(BusClass), - sizeof(BusState), - NULL, - - qbus_initfn, - NULL, - qbus_finalize, - - NULL, - - bus_class_init, - NULL, - NULL, - - true, -}; - -void qdev_register_types(struct uc_struct *uc) -{ - type_register_static(uc, &bus_info); - type_register_static(uc, &device_type_info); -} diff --git a/qemu/hw/i386/Makefile.objs b/qemu/hw/i386/Makefile.objs deleted file mode 100644 index 649888e4..00000000 --- a/qemu/hw/i386/Makefile.objs +++ /dev/null @@ -1 +0,0 @@ -obj-y += pc.o pc_piix.o diff --git a/qemu/hw/i386/pc.c b/qemu/hw/i386/pc.c deleted file mode 100644 index 6377bee3..00000000 --- a/qemu/hw/i386/pc.c +++ /dev/null @@ -1,181 +0,0 @@ -/* - * QEMU PC System Emulator - * - * Copyright (c) 2003-2004 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "hw/hw.h" -#include "hw/i386/pc.h" -#include "sysemu/sysemu.h" -#include "qapi-visit.h" - - -/* XXX: add IGNNE support */ -void cpu_set_ferr(CPUX86State *s) -{ -// qemu_irq_raise(ferr_irq); -} - -/* TSC handling */ -uint64_t cpu_get_tsc(CPUX86State *env) -{ - return cpu_get_ticks(); -} - -/* SMM support */ - -static cpu_set_smm_t smm_set; -static void *smm_arg; - -void cpu_smm_register(cpu_set_smm_t callback, void *arg) -{ - assert(smm_set == NULL); - assert(smm_arg == NULL); - smm_set = callback; - smm_arg = arg; -} - -void cpu_smm_update(CPUX86State *env) -{ - struct uc_struct *uc = x86_env_get_cpu(env)->parent_obj.uc; - - if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == uc->cpu) { - smm_set(!!(env->hflags & HF_SMM_MASK), smm_arg); - } -} - -/* IRQ handling */ -int cpu_get_pic_interrupt(CPUX86State *env) -{ - X86CPU *cpu = x86_env_get_cpu(env); - int intno; - - intno = apic_get_interrupt(cpu->apic_state); - if (intno >= 0) { - return intno; - } - /* read the irq from the PIC */ - if (!apic_accept_pic_intr(cpu->apic_state)) { - return -1; - } - - return 0; -} - -DeviceState *cpu_get_current_apic(struct uc_struct *uc) -{ - if (uc->current_cpu) { - X86CPU *cpu = X86_CPU(uc, uc->current_cpu); - return cpu->apic_state; - } else { - return NULL; - } -} - -static X86CPU *pc_new_cpu(struct uc_struct *uc, const char *cpu_model, int64_t apic_id, - Error **errp) -{ - X86CPU *cpu; - Error *local_err = NULL; - - cpu = cpu_x86_create(uc, cpu_model, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return NULL; - } - - object_property_set_int(uc, OBJECT(cpu), apic_id, "apic-id", &local_err); - object_property_set_bool(uc, OBJECT(cpu), true, "realized", &local_err); - - if (local_err) { - error_propagate(errp, local_err); - object_unref(uc, OBJECT(cpu)); - cpu = NULL; - } - return cpu; -} - -int pc_cpus_init(struct uc_struct *uc, const char *cpu_model) -{ - int i; - Error *error = NULL; - - /* init CPUs */ - if (cpu_model == NULL) { -#ifdef TARGET_X86_64 - cpu_model = "qemu64"; -#else - cpu_model = "qemu32"; -#endif - } - - for (i = 0; i < smp_cpus; i++) { - uc->cpu = (CPUState *)pc_new_cpu(uc, cpu_model, x86_cpu_apic_id_from_index(i), &error); - if (error) { - //error_report("%s", error_get_pretty(error)); - error_free(error); - return -1; - } - } - - return 0; -} - -static void pc_machine_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static void pc_machine_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ -} - -static const TypeInfo pc_machine_info = { - TYPE_PC_MACHINE, - TYPE_MACHINE, - - sizeof(PCMachineClass), - sizeof(PCMachineState), - NULL, - - pc_machine_initfn, - NULL, - NULL, - - NULL, - - pc_machine_class_init, - NULL, - NULL, - - true, - - NULL, - NULL, - - // should this be added somehow? - //.interfaces = (InterfaceInfo[]) { { } }, -}; - -void pc_machine_register_types(struct uc_struct *uc) -{ - type_register_static(uc, &pc_machine_info); -} diff --git a/qemu/hw/i386/pc_piix.c b/qemu/hw/i386/pc_piix.c deleted file mode 100644 index a5ea8628..00000000 --- a/qemu/hw/i386/pc_piix.c +++ /dev/null @@ -1,78 +0,0 @@ -/* - * QEMU PC System Emulator - * - * Copyright (c) 2003-2004 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "hw/i386/pc.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" -#include "uc_priv.h" - - -/* Make sure that guest addresses aligned at 1Gbyte boundaries get mapped to - * host addresses aligned at 1Gbyte boundaries. This way we can use 1GByte - * pages in the host. - */ -#define GIGABYTE_ALIGN true - -/* PC hardware initialisation */ -static int pc_init1(struct uc_struct *uc, MachineState *machine) -{ - return pc_cpus_init(uc, machine->cpu_model); -} - -static int pc_init_pci(struct uc_struct *uc, MachineState *machine) -{ - return pc_init1(uc, machine); -} - -static QEMUMachine pc_i440fx_machine_v2_2 = { - "pc_piix", - "pc-i440fx-2.2", - pc_init_pci, - NULL, - 255, - 1, - UC_ARCH_X86, // X86 -}; - -static void pc_generic_machine_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(uc, oc); - QEMUMachine *qm = data; - - mc->family = qm->family; - mc->name = qm->name; - mc->init = qm->init; - mc->reset = qm->reset; - mc->max_cpus = qm->max_cpus; - mc->is_default = qm->is_default; - mc->arch = qm->arch; -} - -void pc_machine_init(struct uc_struct *uc); -void pc_machine_init(struct uc_struct *uc) -{ - qemu_register_machine(uc, &pc_i440fx_machine_v2_2, - TYPE_PC_MACHINE, pc_generic_machine_class_init); -} diff --git a/qemu/hw/mips/addr.c b/qemu/hw/i386/x86.c similarity index 73% rename from qemu/hw/mips/addr.c rename to qemu/hw/i386/x86.c index ff3b9526..9ad5c63e 100644 --- a/qemu/hw/mips/addr.c +++ b/qemu/hw/i386/x86.c @@ -1,5 +1,7 @@ /* - * QEMU MIPS address translation support + * QEMU PC System Emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -19,21 +21,17 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ -#include "hw/hw.h" -#include "hw/mips/cpudevs.h" +#include "qemu/compiler.h" +#include "sysemu/sysemu.h" +#include "target/i386/cpu.h" -uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr) + +/* TSC handling */ +uint64_t cpu_get_tsc(CPUX86State *env) { - return addr & 0x1fffffffll; + return cpu_get_ticks(); } -uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr) -{ - return addr | ~0x7fffffffll; -} - -uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr) -{ - return addr | 0x40000000ll; -} diff --git a/qemu/hw/intc/Makefile.objs b/qemu/hw/intc/Makefile.objs deleted file mode 100644 index 7de05c64..00000000 --- a/qemu/hw/intc/Makefile.objs +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_APIC) += apic.o apic_common.o diff --git a/qemu/hw/intc/apic.c b/qemu/hw/intc/apic.c deleted file mode 100644 index 957a66eb..00000000 --- a/qemu/hw/intc/apic.c +++ /dev/null @@ -1,230 +0,0 @@ -/* - * APIC support - * - * Copyright (c) 2004-2005 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - */ -#include "qemu/thread.h" -#include "hw/i386/apic_internal.h" -#include "hw/i386/apic.h" -#include "qemu/host-utils.h" -#include "hw/i386/pc.h" - -#include "exec/address-spaces.h" - -#define MAX_APIC_WORDS 8 - -#define SYNC_FROM_VAPIC 0x1 -#define SYNC_TO_VAPIC 0x2 -#define SYNC_ISR_IRR_TO_VAPIC 0x4 - -static void apic_update_irq(APICCommonState *s); - -/* Find first bit starting from msb */ -static int apic_fls_bit(uint32_t value) -{ - return 31 - clz32(value); -} - -/* return -1 if no bit is set */ -static int get_highest_priority_int(uint32_t *tab) -{ - int i; - for (i = 7; i >= 0; i--) { - if (tab[i] != 0) { - return i * 32 + apic_fls_bit(tab[i]); - } - } - return -1; -} - -static void apic_sync_vapic(APICCommonState *s, int sync_type) -{ - VAPICState vapic_state; - //size_t length; - //off_t start; - int vector; - - if (!s->vapic_paddr) { - return; - } - if (sync_type & SYNC_FROM_VAPIC) { - cpu_physical_memory_read(NULL, s->vapic_paddr, &vapic_state, - sizeof(vapic_state)); - s->tpr = vapic_state.tpr; - } - if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { - //start = offsetof(VAPICState, isr); - //length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); - - if (sync_type & SYNC_TO_VAPIC) { - - vapic_state.tpr = s->tpr; - vapic_state.enabled = 1; - //start = 0; - //length = sizeof(VAPICState); - } - - vector = get_highest_priority_int(s->isr); - if (vector < 0) { - vector = 0; - } - vapic_state.isr = vector & 0xf0; - - vapic_state.zero = 0; - - vector = get_highest_priority_int(s->irr); - if (vector < 0) { - vector = 0; - } - vapic_state.irr = vector & 0xff; - - //cpu_physical_memory_write_rom(&address_space_memory, - // s->vapic_paddr + start, - // ((void *)&vapic_state) + start, length); - // FIXME qq - } -} - -static void apic_vapic_base_update(APICCommonState *s) -{ - apic_sync_vapic(s, SYNC_TO_VAPIC); -} - -#define foreach_apic(apic, deliver_bitmask, code) \ -{\ - int __i, __j;\ - for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ - uint32_t __mask = deliver_bitmask[__i];\ - if (__mask) {\ - for(__j = 0; __j < 32; __j++) {\ - if (__mask & (1U << __j)) {\ - apic = local_apics[__i * 32 + __j];\ - if (apic) {\ - code;\ - }\ - }\ - }\ - }\ - }\ -} - -static void apic_set_base(APICCommonState *s, uint64_t val) -{ - s->apicbase = (val & 0xfffff000) | - (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); - /* if disabled, cannot be enabled again */ - if (!(val & MSR_IA32_APICBASE_ENABLE)) { - s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; - cpu_clear_apic_feature(&s->cpu->env); - s->spurious_vec &= ~APIC_SV_ENABLE; - } -} - -static void apic_set_tpr(APICCommonState *s, uint8_t val) -{ - /* Updates from cr8 are ignored while the VAPIC is active */ - if (!s->vapic_paddr) { - s->tpr = val << 4; - apic_update_irq(s); - } -} - -static uint8_t apic_get_tpr(APICCommonState *s) -{ - apic_sync_vapic(s, SYNC_FROM_VAPIC); - return s->tpr >> 4; -} - -/* signal the CPU if an irq is pending */ -static void apic_update_irq(APICCommonState *s) -{ -} - -void apic_poll_irq(DeviceState *dev) -{ -} - -void apic_sipi(DeviceState *dev) -{ -} - -int apic_get_interrupt(DeviceState *dev) -{ - return 0; -} - -int apic_accept_pic_intr(DeviceState *dev) -{ - return 0; -} - -static void apic_pre_save(APICCommonState *s) -{ - apic_sync_vapic(s, SYNC_FROM_VAPIC); -} - -static void apic_post_load(APICCommonState *s) -{ -#if 0 - if (s->timer_expiry != -1) { - timer_mod(s->timer, s->timer_expiry); - } else { - timer_del(s->timer); - } -#endif -} - -static int apic_realize(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - return 0; -} - -static void apic_class_init(struct uc_struct *uc, ObjectClass *klass, void *data) -{ - APICCommonClass *k = APIC_COMMON_CLASS(uc, klass); - - k->realize = apic_realize; - k->set_base = apic_set_base; - k->set_tpr = apic_set_tpr; - k->get_tpr = apic_get_tpr; - k->vapic_base_update = apic_vapic_base_update; - k->pre_save = apic_pre_save; - k->post_load = apic_post_load; - //printf("... init apic class\n"); -} - -static const TypeInfo apic_info = { - "apic", - TYPE_APIC_COMMON, - - 0, - sizeof(APICCommonState), - NULL, - - NULL, - NULL, - NULL, - - NULL, - - apic_class_init, -}; - -void apic_register_types(struct uc_struct *uc) -{ - //printf("... register apic types\n"); - type_register_static(uc, &apic_info); -} diff --git a/qemu/hw/intc/apic_common.c b/qemu/hw/intc/apic_common.c deleted file mode 100644 index ad7b35aa..00000000 --- a/qemu/hw/intc/apic_common.c +++ /dev/null @@ -1,274 +0,0 @@ -/* - * APIC support - common bits of emulated and KVM kernel model - * - * Copyright (c) 2004-2005 Fabrice Bellard - * Copyright (c) 2011 Jan Kiszka, Siemens AG - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - */ -#include "hw/i386/apic.h" -#include "hw/i386/apic_internal.h" -#include "hw/qdev.h" - -#include "uc_priv.h" - - -void cpu_set_apic_base(struct uc_struct *uc, DeviceState *dev, uint64_t val) -{ - if (dev) { - APICCommonState *s = APIC_COMMON(uc, dev); - APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); - info->set_base(s, val); - } -} - -uint64_t cpu_get_apic_base(struct uc_struct *uc, DeviceState *dev) -{ - if (dev) { - APICCommonState *s = APIC_COMMON(uc, dev); - return s->apicbase; - } else { - return MSR_IA32_APICBASE_BSP; - } -} - -void cpu_set_apic_tpr(struct uc_struct *uc, DeviceState *dev, uint8_t val) -{ - APICCommonState *s; - APICCommonClass *info; - - if (!dev) { - return; - } - - s = APIC_COMMON(uc, dev); - info = APIC_COMMON_GET_CLASS(uc, s); - - info->set_tpr(s, val); -} - -uint8_t cpu_get_apic_tpr(struct uc_struct *uc, DeviceState *dev) -{ - APICCommonState *s; - APICCommonClass *info; - - if (!dev) { - return 0; - } - - s = APIC_COMMON(uc, dev); - info = APIC_COMMON_GET_CLASS(uc, s); - - return info->get_tpr(s); -} - -void apic_enable_vapic(struct uc_struct *uc, DeviceState *dev, hwaddr paddr) -{ - APICCommonState *s = APIC_COMMON(uc, dev); - APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); - - s->vapic_paddr = paddr; - info->vapic_base_update(s); -} - -void apic_handle_tpr_access_report(DeviceState *dev, target_ulong ip, - TPRAccess access) -{ - //APICCommonState *s = APIC_COMMON(NULL, dev); - - //vapic_report_tpr_access(s->vapic, CPU(s->cpu), ip, access); -} - -bool apic_next_timer(APICCommonState *s, int64_t current_time) -{ - int64_t d; - - /* We need to store the timer state separately to support APIC - * implementations that maintain a non-QEMU timer, e.g. inside the - * host kernel. This open-coded state allows us to migrate between - * both models. */ - s->timer_expiry = -1; - - if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_MASKED) { - return false; - } - - d = (current_time - s->initial_count_load_time) >> s->count_shift; - - if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { - if (!s->initial_count) { - return false; - } - d = ((d / ((uint64_t)s->initial_count + 1)) + 1) * - ((uint64_t)s->initial_count + 1); - } else { - if (d >= s->initial_count) { - return false; - } - d = (uint64_t)s->initial_count + 1; - } - s->next_time = s->initial_count_load_time + (d << s->count_shift); - s->timer_expiry = s->next_time; - return true; -} - -void apic_init_reset(struct uc_struct *uc, DeviceState *dev) -{ - APICCommonState *s = APIC_COMMON(uc, dev); - APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); - int i; - - if (!s) { - return; - } - s->tpr = 0; - s->spurious_vec = 0xff; - s->log_dest = 0; - s->dest_mode = 0xf; - memset(s->isr, 0, sizeof(s->isr)); - memset(s->tmr, 0, sizeof(s->tmr)); - memset(s->irr, 0, sizeof(s->irr)); - for (i = 0; i < APIC_LVT_NB; i++) { - s->lvt[i] = APIC_LVT_MASKED; - } - s->esr = 0; - memset(s->icr, 0, sizeof(s->icr)); - s->divide_conf = 0; - s->count_shift = 0; - s->initial_count = 0; - s->initial_count_load_time = 0; - s->next_time = 0; - s->wait_for_sipi = !cpu_is_bsp(s->cpu); - - if (s->timer) { - // timer_del(s->timer); - } - s->timer_expiry = -1; - - if (info->reset) { - info->reset(s); - } -} - -void apic_designate_bsp(struct uc_struct *uc, DeviceState *dev) -{ - APICCommonState *s; - - if (dev == NULL) { - return; - } - - s = APIC_COMMON(uc, dev); - s->apicbase |= MSR_IA32_APICBASE_BSP; -} - -static void apic_reset_common(struct uc_struct *uc, DeviceState *dev) -{ - APICCommonState *s = APIC_COMMON(uc, dev); - APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); - bool bsp; - - bsp = cpu_is_bsp(s->cpu); - s->apicbase = APIC_DEFAULT_ADDRESS | - (bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE; - - s->vapic_paddr = 0; - info->vapic_base_update(s); - - apic_init_reset(uc, dev); - - if (bsp) { - /* - * LINT0 delivery mode on CPU #0 is set to ExtInt at initialization - * time typically by BIOS, so PIC interrupt can be delivered to the - * processor when local APIC is enabled. - */ - s->lvt[APIC_LVT_LINT0] = 0x700; - } -} - -static int apic_common_realize(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - APICCommonState *s = APIC_COMMON(uc, dev); - APICCommonClass *info; - - if (uc->apic_no >= MAX_APICS) { - error_setg(errp, "%s initialization failed.", - object_get_typename(OBJECT(dev))); - return -1; - } - s->idx = uc->apic_no++; - - info = APIC_COMMON_GET_CLASS(uc, s); - info->realize(uc, dev, errp); - if (!uc->mmio_registered) { - ICCBus *b = ICC_BUS(uc, qdev_get_parent_bus(dev)); - memory_region_add_subregion(b->apic_address_space, 0, &s->io_memory); - uc->mmio_registered = true; - } - - /* Note: We need at least 1M to map the VAPIC option ROM */ - if (!uc->vapic && s->vapic_control & VAPIC_ENABLE_MASK) { - // ram_size >= 1024 * 1024) { // FIXME - uc->vapic = NULL; - } - s->vapic = uc->vapic; - if (uc->apic_report_tpr_access && info->enable_tpr_reporting) { - info->enable_tpr_reporting(s, true); - } - - return 0; -} - -static void apic_common_class_init(struct uc_struct *uc, ObjectClass *klass, void *data) -{ - ICCDeviceClass *idc = ICC_DEVICE_CLASS(uc, klass); - DeviceClass *dc = DEVICE_CLASS(uc, klass); - - dc->reset = apic_reset_common; - idc->realize = apic_common_realize; - /* - * Reason: APIC and CPU need to be wired up by - * x86_cpu_apic_create() - */ - dc->cannot_instantiate_with_device_add_yet = true; - //printf("... init apic common class\n"); -} - -static const TypeInfo apic_common_type = { - TYPE_APIC_COMMON, - TYPE_DEVICE, - - sizeof(APICCommonClass), - sizeof(APICCommonState), - NULL, - - NULL, - NULL, - NULL, - - NULL, - - apic_common_class_init, - NULL, - NULL, - - true, -}; - -void apic_common_register_types(struct uc_struct *uc) -{ - //printf("... register apic common\n"); - type_register_static(uc, &apic_common_type); -} diff --git a/qemu/hw/m68k/Makefile.objs b/qemu/hw/m68k/Makefile.objs deleted file mode 100644 index 05b7a670..00000000 --- a/qemu/hw/m68k/Makefile.objs +++ /dev/null @@ -1 +0,0 @@ -obj-y += dummy_m68k.o diff --git a/qemu/hw/m68k/dummy_m68k.c b/qemu/hw/m68k/dummy_m68k.c deleted file mode 100644 index d1558579..00000000 --- a/qemu/hw/m68k/dummy_m68k.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Dummy board with just RAM and CPU for use as an ISS. - * - * Copyright (c) 2007 CodeSourcery. - * - * This code is licensed under the GPL - */ - -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -#include "hw/hw.h" -#include "hw/m68k/m68k.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" - - -/* Board init. */ -static int dummy_m68k_init(struct uc_struct *uc, MachineState *machine) -{ - const char *cpu_model = machine->cpu_model; - CPUM68KState *env; - - if (!cpu_model) - cpu_model = "cfv4e"; - - env = cpu_init(uc, cpu_model); - if (!env) { - fprintf(stderr, "Unable to find m68k CPU definition\n"); - return -1; - } - - /* Initialize CPU registers. */ - env->vbr = 0; - env->pc = 0; - - return 0; -} - -void dummy_m68k_machine_init(struct uc_struct *uc) -{ - static QEMUMachine dummy_m68k_machine = { 0 }; - dummy_m68k_machine.name = "dummy", - dummy_m68k_machine.init = dummy_m68k_init, - dummy_m68k_machine.is_default = 1, - dummy_m68k_machine.arch = UC_ARCH_M68K, - - //printf(">>> dummy_m68k_machine_init\n"); - qemu_register_machine(uc, &dummy_m68k_machine, TYPE_MACHINE, NULL); -} diff --git a/qemu/hw/mips/Makefile.objs b/qemu/hw/mips/Makefile.objs deleted file mode 100644 index 910e23b0..00000000 --- a/qemu/hw/mips/Makefile.objs +++ /dev/null @@ -1,2 +0,0 @@ -obj-y += mips_r4k.o -obj-y += addr.o cputimer.o diff --git a/qemu/hw/mips/mips_r4k.c b/qemu/hw/mips/mips_r4k.c deleted file mode 100644 index aa1ee11f..00000000 --- a/qemu/hw/mips/mips_r4k.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * QEMU/MIPS pseudo-board - * - * emulates a simple machine with ISA-like bus. - * ISA IO space mapped to the 0x14000000 (PHYS) and - * ISA memory at the 0x10000000 (PHYS, 16Mb in size). - * All peripherial devices are attached to this "bus" with - * the standard PC ISA addresses. -*/ - -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -#include "hw/hw.h" -#include "hw/mips/mips.h" -#include "hw/mips/cpudevs.h" -#include "sysemu/sysemu.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" - - -static int mips_r4k_init(struct uc_struct *uc, MachineState *machine) -{ - const char *cpu_model = machine->cpu_model; - - /* init CPUs */ - if (cpu_model == NULL) { -#ifdef TARGET_MIPS64 - cpu_model = "R4000"; -#else - cpu_model = "24Kf"; -#endif - } - - uc->cpu = (void*) cpu_mips_init(uc, cpu_model); - if (uc->cpu == NULL) { - fprintf(stderr, "Unable to find CPU definition\n"); - return -1; - } - - return 0; -} - -void mips_machine_init(struct uc_struct *uc) -{ - static QEMUMachine mips_machine = { - NULL, - "mips", - mips_r4k_init, - NULL, - 0, - 1, - UC_ARCH_MIPS, - }; - - qemu_register_machine(uc, &mips_machine, TYPE_MACHINE, NULL); -} diff --git a/qemu/hw/ppc/ppc.c b/qemu/hw/ppc/ppc.c new file mode 100644 index 00000000..6f3bba6d --- /dev/null +++ b/qemu/hw/ppc/ppc.c @@ -0,0 +1,1569 @@ +/* + * QEMU generic PowerPC hardware System Emulator + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +//#include "hw/irq.h" +#include "hw/ppc/ppc.h" +//#include "hw/ppc/ppc_e500.h" +#include "qemu/timer.h" +#include "sysemu/cpus.h" +//#include "qemu/log.h" +//#include "qemu/main-loop.h" +//#include "qemu/error-report.h" +//#include "sysemu/kvm.h" +//#include "sysemu/runstate.h" +//#include "kvm_ppc.h" +//#include "migration/vmstate.h" +//#include "trace.h" + +//#define PPC_DEBUG_IRQ +//#define PPC_DEBUG_TB + +#ifdef PPC_DEBUG_IRQ +# define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) +#else +# define LOG_IRQ(...) do { } while (0) +#endif + + +#ifdef PPC_DEBUG_TB +# define LOG_TB(...) qemu_log(__VA_ARGS__) +#else +# define LOG_TB(...) do { } while (0) +#endif + +#if 0 +static void cpu_ppc_tb_stop (CPUPPCState *env); +static void cpu_ppc_tb_start (CPUPPCState *env); +#endif + +void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; +#if 0 + unsigned int old_pending; + + old_pending = env->pending_interrupts; +#endif + + if (level) { + env->pending_interrupts |= 1 << n_IRQ; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + env->pending_interrupts &= ~(1 << n_IRQ); + if (env->pending_interrupts == 0) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + } + +#if 0 + if (old_pending != env->pending_interrupts) { + kvmppc_set_interrupt(cpu, n_IRQ, level); + } +#endif + + LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 + "req %08x\n", __func__, env, n_IRQ, level, + env->pending_interrupts, CPU(cpu)->interrupt_request); +} + +#if 0 +/* PowerPC 6xx / 7xx internal IRQ controller */ +static void ppc6xx_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + LOG_IRQ("%s: env %p pin %d level %d\n", __func__, + env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + CPUState *cs = CPU(cpu); + + switch (pin) { + case PPC6xx_INPUT_TBEN: + /* Level sensitive - active high */ + LOG_IRQ("%s: %s the time base\n", + __func__, level ? "start" : "stop"); + if (level) { + cpu_ppc_tb_start(env); + } else { + cpu_ppc_tb_stop(env); + } + case PPC6xx_INPUT_INT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the external IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPC6xx_INPUT_SMI: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the SMI IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); + break; + case PPC6xx_INPUT_MCP: + /* Negative edge sensitive */ + /* XXX: TODO: actual reaction may depends on HID0 status + * 603/604/740/750: check HID0[EMCP] + */ + if (cur_level == 1 && level == 0) { + LOG_IRQ("%s: raise machine check state\n", + __func__); + ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); + } + break; + case PPC6xx_INPUT_CKSTP_IN: + /* Level sensitive - active low */ + /* XXX: TODO: relay the signal to CKSTP_OUT pin */ + /* XXX: Note that the only way to restart the CPU is to reset it */ + if (level) { + LOG_IRQ("%s: stop the CPU\n", __func__); + cs->halted = 1; + } + break; + case PPC6xx_INPUT_HRESET: + /* Level sensitive - active low */ + if (level) { + LOG_IRQ("%s: reset the CPU\n", __func__); + cpu_interrupt(cs, CPU_INTERRUPT_RESET); + } + break; + case PPC6xx_INPUT_SRESET: + LOG_IRQ("%s: set the RESET IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); + break; + default: + /* Unknown pin - do nothing */ + LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + return; + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} +#endif + +void ppc6xx_irq_init(PowerPCCPU *cpu) +{ +#if 0 + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu, + PPC6xx_INPUT_NB); +#endif +} + +#if defined(TARGET_PPC64) +#if 0 +/* PowerPC 970 internal IRQ controller */ +static void ppc970_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + LOG_IRQ("%s: env %p pin %d level %d\n", __func__, + env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + CPUState *cs = CPU(cpu); + + switch (pin) { + case PPC970_INPUT_INT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the external IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPC970_INPUT_THINT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, + level); + ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); + break; + case PPC970_INPUT_MCP: + /* Negative edge sensitive */ + /* XXX: TODO: actual reaction may depends on HID0 status + * 603/604/740/750: check HID0[EMCP] + */ + if (cur_level == 1 && level == 0) { + LOG_IRQ("%s: raise machine check state\n", + __func__); + ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); + } + break; + case PPC970_INPUT_CKSTP: + /* Level sensitive - active low */ + /* XXX: TODO: relay the signal to CKSTP_OUT pin */ + if (level) { + LOG_IRQ("%s: stop the CPU\n", __func__); + cs->halted = 1; + } else { + LOG_IRQ("%s: restart the CPU\n", __func__); + cs->halted = 0; +// qemu_cpu_kick(cs); + } + break; + case PPC970_INPUT_HRESET: + /* Level sensitive - active low */ + if (level) { + cpu_interrupt(cs, CPU_INTERRUPT_RESET); + } + break; + case PPC970_INPUT_SRESET: + LOG_IRQ("%s: set the RESET IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); + break; + case PPC970_INPUT_TBEN: + LOG_IRQ("%s: set the TBEN state to %d\n", __func__, + level); + /* XXX: TODO */ + break; + default: + /* Unknown pin - do nothing */ + LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + return; + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} +#endif + +void ppc970_irq_init(PowerPCCPU *cpu) +{ +#if 0 + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu, + PPC970_INPUT_NB); +#endif +} + +#if 0 +/* POWER7 internal IRQ controller */ +static void power7_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + + LOG_IRQ("%s: env %p pin %d level %d\n", __func__, + &cpu->env, pin, level); + + switch (pin) { + case POWER7_INPUT_INT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the external IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + default: + /* Unknown pin - do nothing */ + LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + return; + } +} +#endif + +void ppcPOWER7_irq_init(PowerPCCPU *cpu) +{ +#if 0 + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu, + POWER7_INPUT_NB); +#endif +} + +#if 0 +/* POWER9 internal IRQ controller */ +static void power9_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + + LOG_IRQ("%s: env %p pin %d level %d\n", __func__, + &cpu->env, pin, level); + + switch (pin) { + case POWER9_INPUT_INT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the external IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case POWER9_INPUT_HINT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the external IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level); + break; + default: + /* Unknown pin - do nothing */ + LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + return; + } +} +#endif + +void ppcPOWER9_irq_init(PowerPCCPU *cpu) +{ +#if 0 + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu, + POWER9_INPUT_NB); +#endif +} +#endif /* defined(TARGET_PPC64) */ + +void ppc40x_core_reset(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + target_ulong dbsr; + +// qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n"); + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); + dbsr = env->spr[SPR_40x_DBSR]; + dbsr &= ~0x00000300; + dbsr |= 0x00000100; + env->spr[SPR_40x_DBSR] = dbsr; +} + +void ppc40x_chip_reset(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + target_ulong dbsr; + +// qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n"); + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); + /* XXX: TODO reset all internal peripherals */ + dbsr = env->spr[SPR_40x_DBSR]; + dbsr &= ~0x00000300; + dbsr |= 0x00000200; + env->spr[SPR_40x_DBSR] = dbsr; +} + +void ppc40x_system_reset(PowerPCCPU *cpu) +{ +// qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n"); +// qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); +} + +void store_40x_dbcr0(CPUPPCState *env, uint32_t val) +{ + PowerPCCPU *cpu = env_archcpu(env); + + switch ((val >> 28) & 0x3) { + case 0x0: + /* No action */ + break; + case 0x1: + /* Core reset */ + ppc40x_core_reset(cpu); + break; + case 0x2: + /* Chip reset */ + ppc40x_chip_reset(cpu); + break; + case 0x3: + /* System reset */ + ppc40x_system_reset(cpu); + break; + } +} + +#if 0 +/* PowerPC 40x internal IRQ controller */ +static void ppc40x_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + LOG_IRQ("%s: env %p pin %d level %d\n", __func__, + env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + CPUState *cs = CPU(cpu); + + switch (pin) { + case PPC40x_INPUT_RESET_SYS: + if (level) { + LOG_IRQ("%s: reset the PowerPC system\n", + __func__); + ppc40x_system_reset(cpu); + } + break; + case PPC40x_INPUT_RESET_CHIP: + if (level) { + LOG_IRQ("%s: reset the PowerPC chip\n", __func__); + ppc40x_chip_reset(cpu); + } + break; + case PPC40x_INPUT_RESET_CORE: + /* XXX: TODO: update DBSR[MRR] */ + if (level) { + LOG_IRQ("%s: reset the PowerPC core\n", __func__); + ppc40x_core_reset(cpu); + } + break; + case PPC40x_INPUT_CINT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the critical IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); + break; + case PPC40x_INPUT_INT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the external IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPC40x_INPUT_HALT: + /* Level sensitive - active low */ + if (level) { + LOG_IRQ("%s: stop the CPU\n", __func__); + cs->halted = 1; + } else { + LOG_IRQ("%s: restart the CPU\n", __func__); + cs->halted = 0; +// qemu_cpu_kick(cs); + } + break; + case PPC40x_INPUT_DEBUG: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the debug pin state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); + break; + default: + /* Unknown pin - do nothing */ + LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + return; + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} +#endif + +void ppc40x_irq_init(PowerPCCPU *cpu) +{ +#if 0 + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq, + cpu, PPC40x_INPUT_NB); +#endif +} + +#if 0 +/* PowerPC E500 internal IRQ controller */ +static void ppce500_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + LOG_IRQ("%s: env %p pin %d level %d\n", __func__, + env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + switch (pin) { + case PPCE500_INPUT_MCK: + if (level) { + LOG_IRQ("%s: reset the PowerPC system\n", + __func__); +// qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + case PPCE500_INPUT_RESET_CORE: + if (level) { + LOG_IRQ("%s: reset the PowerPC core\n", __func__); + ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); + } + break; + case PPCE500_INPUT_CINT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the critical IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); + break; + case PPCE500_INPUT_INT: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the core IRQ state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPCE500_INPUT_DEBUG: + /* Level sensitive - active high */ + LOG_IRQ("%s: set the debug pin state to %d\n", + __func__, level); + ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); + break; + default: + /* Unknown pin - do nothing */ + LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + return; + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} +#endif + +void ppce500_irq_init(PowerPCCPU *cpu) +{ +#if 0 + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq, + cpu, PPCE500_INPUT_NB); +#endif +} + +/* Enable or Disable the E500 EPR capability */ +void ppce500_set_mpic_proxy(bool enabled) +{ +#if 0 + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + cpu->env.mpic_proxy = enabled; + if (kvm_enabled()) { + kvmppc_set_mpic_proxy(cpu, enabled); + } + } +#endif +} + +/*****************************************************************************/ +/* PowerPC time base and decrementer emulation */ + +uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset) +{ + /* TB time in tb periods */ + return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset; +} + +uint64_t cpu_ppc_load_tbl (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + +#if 0 + if (kvm_enabled()) { + return env->spr[SPR_TBL]; + } +#endif + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + + return tb; +} + +static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + + return tb >> 32; +} + +uint32_t cpu_ppc_load_tbu (CPUPPCState *env) +{ +#if 0 + if (kvm_enabled()) { + return env->spr[SPR_TBU]; + } +#endif + + return _cpu_ppc_load_tbu(env); +} + +static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, + int64_t *tb_offsetp, uint64_t value) +{ + *tb_offsetp = value - + muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND); + + LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n", + __func__, value, *tb_offsetp); +} + +void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + tb &= 0xFFFFFFFF00000000ULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->tb_offset, tb | (uint64_t)value); +} + +static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + tb &= 0x00000000FFFFFFFFULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->tb_offset, ((uint64_t)value << 32) | tb); +} + +void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value) +{ + _cpu_ppc_store_tbu(env, value); +} + +uint64_t cpu_ppc_load_atbl (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + + return tb; +} + +uint32_t cpu_ppc_load_atbu (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + + return tb >> 32; +} + +void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + tb &= 0xFFFFFFFF00000000ULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->atb_offset, tb | (uint64_t)value); +} + +void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + tb &= 0x00000000FFFFFFFFULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->atb_offset, ((uint64_t)value << 32) | tb); +} + +uint64_t cpu_ppc_load_vtb(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + + return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + tb_env->vtb_offset); +} + +void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->vtb_offset, value); +} + +void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + tb_env->tb_offset); + tb &= 0xFFFFFFUL; + tb |= (value & ~0xFFFFFFUL); + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->tb_offset, tb); +} + +#if 0 +static void cpu_ppc_tb_stop (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb, atb, vmclk; + + /* If the time base is already frozen, do nothing */ + if (tb_env->tb_freq != 0) { + vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* Get the time base */ + tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset); + /* Get the alternate time base */ + atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset); + /* Store the time base value (ie compute the current offset) */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); + /* Store the alternate time base value (compute the current offset) */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); + /* Set the time base frequency to zero */ + tb_env->tb_freq = 0; + /* Now, the time bases are frozen to tb_offset / atb_offset value */ + } +} + +static void cpu_ppc_tb_start (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb, atb, vmclk; + + /* If the time base is not frozen, do nothing */ + if (tb_env->tb_freq == 0) { + vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* Get the time base from tb_offset */ + tb = tb_env->tb_offset; + /* Get the alternate time base from atb_offset */ + atb = tb_env->atb_offset; + /* Restore the tb frequency from the decrementer frequency */ + tb_env->tb_freq = tb_env->decr_freq; + /* Store the time base value */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); + /* Store the alternate time base value */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); + } +} +#endif + +bool ppc_decr_clear_on_delivery(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL; + return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED); +} + +static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) +{ + ppc_tb_t *tb_env = env->tb_env; + int64_t decr, diff; + + diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + if (diff >= 0) { + decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); + } else if (tb_env->flags & PPC_TIMER_BOOKE) { + decr = 0; + } else { +#ifdef _MSC_VER + decr = 0 - muldiv64(0 - diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); +#else + decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); +#endif + } + LOG_TB("%s: %016" PRIx64 "\n", __func__, decr); + + return decr; +} + +target_ulong cpu_ppc_load_decr(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t decr; + +#if 0 + if (kvm_enabled()) { + return env->spr[SPR_DECR]; + } +#endif + + decr = _cpu_ppc_load_decr(env, tb_env->decr_next); + + /* + * If large decrementer is enabled then the decrementer is signed extened + * to 64 bits, otherwise it is a 32 bit value. + */ + if (env->spr[SPR_LPCR] & LPCR_LD) { + return decr; + } + return (uint32_t) decr; +} + +target_ulong cpu_ppc_load_hdecr(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + ppc_tb_t *tb_env = env->tb_env; + uint64_t hdecr; + + hdecr = _cpu_ppc_load_decr(env, tb_env->hdecr_next); + + /* + * If we have a large decrementer (POWER9 or later) then hdecr is sign + * extended to 64 bits, otherwise it is 32 bits. + */ + if (pcc->lrg_decr_bits > 32) { + return hdecr; + } + return (uint32_t) hdecr; +} + +uint64_t cpu_ppc_load_purr (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + + return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + tb_env->purr_offset); +} + +/* When decrementer expires, + * all we need to do is generate or queue a CPU exception + */ +static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) +{ + /* Raise it */ + LOG_TB("raise decrementer exception\n"); + ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); +} + +static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu) +{ + ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0); +} + +static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + /* Raise it */ + LOG_TB("raise hv decrementer exception\n"); + + /* The architecture specifies that we don't deliver HDEC + * interrupts in a PM state. Not only they don't cause a + * wakeup but they also get effectively discarded. + */ + if (!env->resume_as_sreset) { + ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1); + } +} + +static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu) +{ + ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); +} + +static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, + QEMUTimer *timer, + void (*raise_excp)(void *), + void (*lower_excp)(PowerPCCPU *), + target_ulong decr, target_ulong value, + int nr_bits) +{ +#if 0 + CPUPPCState *env = &cpu->env; + ppc_tb_t *tb_env = env->tb_env; + uint64_t now, next; + bool negative; + + /* Truncate value to decr_width and sign extend for simplicity */ + value &= ((1ULL << nr_bits) - 1); + negative = !!(value & (1ULL << (nr_bits - 1))); + if (negative) { + value |= (0xFFFFFFFFULL << nr_bits); + } + + LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__, + decr, value); + +#if 0 + if (kvm_enabled()) { + /* KVM handles decrementer exceptions, we don't need our own timer */ + return; + } +#endif + + /* + * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC + * interrupt. + * + * If we get a really small DEC value, we can assume that by the time we + * handled it we should inject an interrupt already. + * + * On MSB level based DEC implementations the MSB always means the interrupt + * is pending, so raise it on those. + * + * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers + * an edge interrupt, so raise it here too. + */ + if ((value < 3) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative + && !(decr & (1ULL << (nr_bits - 1))))) { + (*raise_excp)(cpu); + return; + } + + /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ + if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { + (*lower_excp)(cpu); + } + + /* Calculate the next timer event */ + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq); + *nextp = next; + + /* Adjust timer */ + timer_mod(timer, next); +#endif +} + +static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr, + target_ulong value, int nr_bits) +{ + ppc_tb_t *tb_env = cpu->env.tb_env; + + __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer, + tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr, + value, nr_bits); +} + +void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value) +{ + PowerPCCPU *cpu = env_archcpu(env); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + int nr_bits = 32; + + if (env->spr[SPR_LPCR] & LPCR_LD) { + nr_bits = pcc->lrg_decr_bits; + } + + _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits); +} + +static void cpu_ppc_decr_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_ppc_decr_excp(cpu); +} + +static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr, + target_ulong value, int nr_bits) +{ + ppc_tb_t *tb_env = cpu->env.tb_env; + + if (tb_env->hdecr_timer != NULL) { + __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer, + tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower, + hdecr, value, nr_bits); + } +} + +void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value) +{ + PowerPCCPU *cpu = env_archcpu(env); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, + pcc->lrg_decr_bits); +} + +static void cpu_ppc_hdecr_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_ppc_hdecr_excp(cpu); +} + +void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->purr_offset, value); +} + +static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq) +{ + CPUPPCState *env = opaque; + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env = env->tb_env; + + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + /* There is a bug in Linux 2.4 kernels: + * if a decrementer exception is pending when it enables msr_ee at startup, + * it's not ready to handle it... + */ + _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32); + _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32); + cpu_ppc_store_purr(env, 0x0000000000000000ULL); +} + +#if 0 +static void timebase_save(PPCTimebase *tb) +{ + uint64_t ticks = cpu_get_host_ticks(); + PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + + if (!first_ppc_cpu->env.tb_env) { +// error_report("No timebase object"); + return; + } + + /* not used anymore, we keep it for compatibility */ + tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST); + /* + * tb_offset is only expected to be changed by QEMU so + * there is no need to update it from KVM here + */ + tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset; + + tb->runstate_paused = runstate_check(RUN_STATE_PAUSED); +} + +static void timebase_load(PPCTimebase *tb) +{ + CPUState *cpu; + PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + int64_t tb_off_adj, tb_off; + unsigned long freq; + + if (!first_ppc_cpu->env.tb_env) { +// error_report("No timebase object"); + return; + } + + freq = first_ppc_cpu->env.tb_env->tb_freq; + + tb_off_adj = tb->guest_timebase - cpu_get_host_ticks(); + + tb_off = first_ppc_cpu->env.tb_env->tb_offset; + trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off, + (tb_off_adj - tb_off) / freq); + + /* Set new offset to all CPUs */ + CPU_FOREACH(cpu) { + PowerPCCPU *pcpu = POWERPC_CPU(cpu); + pcpu->env.tb_env->tb_offset = tb_off_adj; + kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset); + } +} + +void cpu_ppc_clock_vm_state_change(void *opaque, int running, + RunState state) +{ + PPCTimebase *tb = opaque; + + if (running) { + timebase_load(tb); + } else { + timebase_save(tb); + } +} + +/* + * When migrating a running guest, read the clock just + * before migration, so that the guest clock counts + * during the events between: + * + * * vm_stop() + * * + * * pre_save() + * + * This reduces clock difference on migration from 5s + * to 0.1s (when max_downtime == 5s), because sending the + * final pages of memory (which happens between vm_stop() + * and pre_save()) takes max_downtime. + */ +static int timebase_pre_save(void *opaque) +{ + PPCTimebase *tb = opaque; + + /* guest_timebase won't be overridden in case of paused guest */ + if (!tb->runstate_paused) { + timebase_save(tb); + } + + return 0; +} + +const VMStateDescription vmstate_ppc_timebase = { + .name = "timebase", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .pre_save = timebase_pre_save, + .fields = (VMStateField []) { + VMSTATE_UINT64(guest_timebase, PPCTimebase), + VMSTATE_INT64(time_of_the_day_ns, PPCTimebase), + VMSTATE_END_OF_LIST() + }, +}; +#endif + +/* Set up (once) timebase frequency (in Hz) */ +clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env; + + tb_env = g_malloc0(sizeof(ppc_tb_t)); + env->tb_env = tb_env; + tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; + if (is_book3s_arch2x(env)) { + /* All Book3S 64bit CPUs implement level based DEC logic */ + tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL; + } + /* Create new timer */ + tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu); + if (env->has_hv_mode) { + tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb, + cpu); + } else { + tb_env->hdecr_timer = NULL; + } + cpu_ppc_set_tb_clk(env, freq); + + return &cpu_ppc_set_tb_clk; +} + +/* Specific helpers for POWER & PowerPC 601 RTC */ +void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value) +{ + _cpu_ppc_store_tbu(env, value); +} + +uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env) +{ + return _cpu_ppc_load_tbu(env); +} + +void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value) +{ + cpu_ppc_store_tbl(env, value & 0x3FFFFF80); +} + +uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env) +{ + return cpu_ppc_load_tbl(env) & 0x3FFFFF80; +} + +/*****************************************************************************/ +/* PowerPC 40x timers */ + +/* PIT, FIT & WDT */ +typedef struct ppc40x_timer_t ppc40x_timer_t; +struct ppc40x_timer_t { + uint64_t pit_reload; /* PIT auto-reload value */ + uint64_t fit_next; /* Tick for next FIT interrupt */ + QEMUTimer *fit_timer; + uint64_t wdt_next; /* Tick for next WDT interrupt */ + QEMUTimer *wdt_timer; + + /* 405 have the PIT, 440 have a DECR. */ + unsigned int decr_excp; +}; + +#if 0 +/* Fixed interval timer */ +static void cpu_4xx_fit_cb (void *opaque) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + uint64_t now, next; + + env = opaque; + cpu = env_archcpu(env); + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) { + case 0: + next = 1 << 9; + break; + case 1: + next = 1 << 13; + break; + case 2: + next = 1 << 17; + break; + case 3: + next = 1 << 21; + break; + default: + /* Cannot occur, but makes gcc happy */ + return; + } + next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq); + if (next == now) + next++; + timer_mod(ppc40x_timer->fit_timer, next); + env->spr[SPR_40x_TSR] |= 1 << 26; + if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { + ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); + } + LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, + (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); +} +#endif + +/* Programmable interval timer */ +static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) +{ +#if 0 + ppc40x_timer_t *ppc40x_timer; + uint64_t now, next; + + ppc40x_timer = tb_env->opaque; + if (ppc40x_timer->pit_reload <= 1 || + !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || + (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { + /* Stop PIT */ + LOG_TB("%s: stop PIT\n", __func__); + timer_del(tb_env->decr_timer); + } else { + LOG_TB("%s: start PIT %016" PRIx64 "\n", + __func__, ppc40x_timer->pit_reload); + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + muldiv64(ppc40x_timer->pit_reload, + NANOSECONDS_PER_SECOND, tb_env->decr_freq); + if (is_excp) + next += tb_env->decr_next - now; + if (next == now) + next++; + timer_mod(tb_env->decr_timer, next); + tb_env->decr_next = next; + } +#endif +} + +#if 0 +static void cpu_4xx_pit_cb (void *opaque) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + + env = opaque; + cpu = env_archcpu(env); + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + env->spr[SPR_40x_TSR] |= 1 << 27; + if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) { + ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); + } + start_stop_pit(env, tb_env, 1); + LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " " + "%016" PRIx64 "\n", __func__, + (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), + (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], + ppc40x_timer->pit_reload); +} + +/* Watchdog timer */ +static void cpu_4xx_wdt_cb (void *opaque) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + uint64_t now, next; + + env = opaque; + cpu = env_archcpu(env); + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) { + case 0: + next = 1 << 17; + break; + case 1: + next = 1 << 21; + break; + case 2: + next = 1 << 25; + break; + case 3: + next = 1 << 29; + break; + default: + /* Cannot occur, but makes gcc happy */ + return; + } + next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq); + if (next == now) + next++; + LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { + case 0x0: + case 0x1: + timer_mod(ppc40x_timer->wdt_timer, next); + ppc40x_timer->wdt_next = next; + env->spr[SPR_40x_TSR] |= 1U << 31; + break; + case 0x2: + timer_mod(ppc40x_timer->wdt_timer, next); + ppc40x_timer->wdt_next = next; + env->spr[SPR_40x_TSR] |= 1 << 30; + if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) { + ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1); + } + break; + case 0x3: + env->spr[SPR_40x_TSR] &= ~0x30000000; + env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000; + switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) { + case 0x0: + /* No reset */ + break; + case 0x1: /* Core reset */ + ppc40x_core_reset(cpu); + break; + case 0x2: /* Chip reset */ + ppc40x_chip_reset(cpu); + break; + case 0x3: /* System reset */ + ppc40x_system_reset(cpu); + break; + } + } +} +#endif + +void store_40x_pit (CPUPPCState *env, target_ulong val) +{ + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val); + ppc40x_timer->pit_reload = val; + start_stop_pit(env, tb_env, 0); +} + +target_ulong load_40x_pit (CPUPPCState *env) +{ + return cpu_ppc_load_decr(env); +} + +static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) +{ + CPUPPCState *env = opaque; + ppc_tb_t *tb_env = env->tb_env; + + LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__, + freq); + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + /* XXX: we should also update all timers */ +} + +clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, + unsigned int decr_excp) +{ +#if 0 + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + + tb_env = g_malloc0(sizeof(ppc_tb_t)); + env->tb_env = tb_env; + tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; + ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t)); + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + tb_env->opaque = ppc40x_timer; + LOG_TB("%s freq %" PRIu32 "\n", __func__, freq); + if (ppc40x_timer != NULL) { + /* We use decr timer for PIT */ + tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); + ppc40x_timer->fit_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env); + ppc40x_timer->wdt_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env); + ppc40x_timer->decr_excp = decr_excp; + } +#endif + + return &ppc_40x_set_tb_clk; +} + +/*****************************************************************************/ +/* Embedded PowerPC Device Control Registers */ +typedef struct ppc_dcrn_t ppc_dcrn_t; +struct ppc_dcrn_t { + dcr_read_cb dcr_read; + dcr_write_cb dcr_write; + void *opaque; +}; + +/* XXX: on 460, DCR addresses are 32 bits wide, + * using DCRIPR to get the 22 upper bits of the DCR address + */ +#define DCRN_NB 1024 +struct ppc_dcr_t { + ppc_dcrn_t dcrn[DCRN_NB]; + int (*read_error)(int dcrn); + int (*write_error)(int dcrn); +}; + +int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) +{ + ppc_dcrn_t *dcr; + + if (dcrn < 0 || dcrn >= DCRN_NB) + goto error; + dcr = &dcr_env->dcrn[dcrn]; + if (dcr->dcr_read == NULL) + goto error; + *valp = (*dcr->dcr_read)(dcr->opaque, dcrn); + + return 0; + + error: + if (dcr_env->read_error != NULL) + return (*dcr_env->read_error)(dcrn); + + return -1; +} + +int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) +{ + ppc_dcrn_t *dcr; + + if (dcrn < 0 || dcrn >= DCRN_NB) + goto error; + dcr = &dcr_env->dcrn[dcrn]; + if (dcr->dcr_write == NULL) + goto error; + (*dcr->dcr_write)(dcr->opaque, dcrn, val); + + return 0; + + error: + if (dcr_env->write_error != NULL) + return (*dcr_env->write_error)(dcrn); + + return -1; +} + +int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque, + dcr_read_cb dcr_read, dcr_write_cb dcr_write) +{ + ppc_dcr_t *dcr_env; + ppc_dcrn_t *dcr; + + dcr_env = env->dcr_env; + if (dcr_env == NULL) + return -1; + if (dcrn < 0 || dcrn >= DCRN_NB) + return -1; + dcr = &dcr_env->dcrn[dcrn]; + if (dcr->opaque != NULL || + dcr->dcr_read != NULL || + dcr->dcr_write != NULL) + return -1; + dcr->opaque = opaque; + dcr->dcr_read = dcr_read; + dcr->dcr_write = dcr_write; + + return 0; +} + +int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn), + int (*write_error)(int dcrn)) +{ + ppc_dcr_t *dcr_env; + + dcr_env = g_malloc0(sizeof(ppc_dcr_t)); + dcr_env->read_error = read_error; + dcr_env->write_error = write_error; + env->dcr_env = dcr_env; + + return 0; +} + +/*****************************************************************************/ + +int ppc_cpu_pir(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + return env->spr_cb[SPR_PIR].default_value; +} + +#if 0 +PowerPCCPU *ppc_get_vcpu_by_pir(int pir) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + if (ppc_cpu_pir(cpu) == pir) { + return cpu; + } + } + + return NULL; +} +#endif + +void ppc_irq_reset(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_input_state = 0; +// kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0); +} diff --git a/qemu/hw/ppc/ppc_booke.c b/qemu/hw/ppc/ppc_booke.c new file mode 100644 index 00000000..ad780742 --- /dev/null +++ b/qemu/hw/ppc/ppc_booke.c @@ -0,0 +1,373 @@ +/* + * QEMU PowerPC Booke hardware System Emulator + * + * Copyright (c) 2011 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/ppc/ppc.h" +#include "qemu/timer.h" +#include "qemu/log.h" +// #include "kvm_ppc.h" + + +/* Timer Control Register */ + +#define TCR_WP_SHIFT 30 /* Watchdog Timer Period */ +#define TCR_WP_MASK (0x3U << TCR_WP_SHIFT) +#define TCR_WRC_SHIFT 28 /* Watchdog Timer Reset Control */ +#define TCR_WRC_MASK (0x3U << TCR_WRC_SHIFT) +#define TCR_WIE (1U << 27) /* Watchdog Timer Interrupt Enable */ +#define TCR_DIE (1U << 26) /* Decrementer Interrupt Enable */ +#define TCR_FP_SHIFT 24 /* Fixed-Interval Timer Period */ +#define TCR_FP_MASK (0x3U << TCR_FP_SHIFT) +#define TCR_FIE (1U << 23) /* Fixed-Interval Timer Interrupt Enable */ +#define TCR_ARE (1U << 22) /* Auto-Reload Enable */ + +/* Timer Control Register (e500 specific fields) */ + +#define TCR_E500_FPEXT_SHIFT 13 /* Fixed-Interval Timer Period Extension */ +#define TCR_E500_FPEXT_MASK (0xf << TCR_E500_FPEXT_SHIFT) +#define TCR_E500_WPEXT_SHIFT 17 /* Watchdog Timer Period Extension */ +#define TCR_E500_WPEXT_MASK (0xf << TCR_E500_WPEXT_SHIFT) + +/* Timer Status Register */ + +#define TSR_FIS (1U << 26) /* Fixed-Interval Timer Interrupt Status */ +#define TSR_DIS (1U << 27) /* Decrementer Interrupt Status */ +#define TSR_WRS_SHIFT 28 /* Watchdog Timer Reset Status */ +#define TSR_WRS_MASK (0x3U << TSR_WRS_SHIFT) +#define TSR_WIS (1U << 30) /* Watchdog Timer Interrupt Status */ +#define TSR_ENW (1U << 31) /* Enable Next Watchdog Timer */ + +typedef struct booke_timer_t booke_timer_t; +struct booke_timer_t { + + uint64_t fit_next; + QEMUTimer *fit_timer; + + uint64_t wdt_next; + QEMUTimer *wdt_timer; + + uint32_t flags; +}; + +static void booke_update_irq(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + ppc_set_irq(cpu, PPC_INTERRUPT_DECR, + (env->spr[SPR_BOOKE_TSR] & TSR_DIS + && env->spr[SPR_BOOKE_TCR] & TCR_DIE)); + + ppc_set_irq(cpu, PPC_INTERRUPT_WDT, + (env->spr[SPR_BOOKE_TSR] & TSR_WIS + && env->spr[SPR_BOOKE_TCR] & TCR_WIE)); + + ppc_set_irq(cpu, PPC_INTERRUPT_FIT, + (env->spr[SPR_BOOKE_TSR] & TSR_FIS + && env->spr[SPR_BOOKE_TCR] & TCR_FIE)); +} + +/* Return the location of the bit of time base at which the FIT will raise an + interrupt */ +static uint8_t booke_get_fit_target(CPUPPCState *env, ppc_tb_t *tb_env) +{ + uint8_t fp = (env->spr[SPR_BOOKE_TCR] & TCR_FP_MASK) >> TCR_FP_SHIFT; + + if (tb_env->flags & PPC_TIMER_E500) { + /* e500 Fixed-interval timer period extension */ + uint32_t fpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_FPEXT_MASK) + >> TCR_E500_FPEXT_SHIFT; + fp = 63 - (fp | fpext << 2); + } else { + fp = env->fit_period[fp]; + } + + return fp; +} + +/* Return the location of the bit of time base at which the WDT will raise an + interrupt */ +static uint8_t booke_get_wdt_target(CPUPPCState *env, ppc_tb_t *tb_env) +{ + uint8_t wp = (env->spr[SPR_BOOKE_TCR] & TCR_WP_MASK) >> TCR_WP_SHIFT; + + if (tb_env->flags & PPC_TIMER_E500) { + /* e500 Watchdog timer period extension */ + uint32_t wpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_WPEXT_MASK) + >> TCR_E500_WPEXT_SHIFT; + wp = 63 - (wp | wpext << 2); + } else { + wp = env->wdt_period[wp]; + } + + return wp; +} + +static void booke_update_fixed_timer(CPUPPCState *env, + uint8_t target_bit, + uint64_t *next, + QEMUTimer *timer, + int tsr_bit) +{ +#if 0 + ppc_tb_t *tb_env = env->tb_env; + uint64_t delta_tick, ticks = 0; + uint64_t tb; + uint64_t period; + uint64_t now; + + if (!(env->spr[SPR_BOOKE_TSR] & tsr_bit)) { + /* + * Don't arm the timer again when the guest has the current + * interrupt still pending. Wait for it to ack it. + */ + return; + } + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + tb = cpu_ppc_get_tb(tb_env, now, tb_env->tb_offset); + period = 1ULL << target_bit; + delta_tick = period - (tb & (period - 1)); + + /* the timer triggers only when the selected bit toggles from 0 to 1 */ + if (tb & period) { + ticks = period; + } + + if (ticks + delta_tick < ticks) { + /* Overflow, so assume the biggest number we can express. */ + ticks = UINT64_MAX; + } else { + ticks += delta_tick; + } + + *next = now + muldiv64(ticks, NANOSECONDS_PER_SECOND, tb_env->tb_freq); + if ((*next < now) || (*next > INT64_MAX)) { + /* Overflow, so assume the biggest number the qemu timer supports. */ + *next = INT64_MAX; + } + + /* XXX: If expire time is now. We can't run the callback because we don't + * have access to it. So we just set the timer one nanosecond later. + */ + + if (*next == now) { + (*next)++; + } else { + /* + * There's no point to fake any granularity that's more fine grained + * than milliseconds. Anything beyond that just overloads the system. + */ + *next = MAX(*next, now + SCALE_MS); + } + + /* Fire the next timer */ + timer_mod(timer, *next); +#endif +} + +static void booke_decr_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + env->spr[SPR_BOOKE_TSR] |= TSR_DIS; + booke_update_irq(cpu); + + if (env->spr[SPR_BOOKE_TCR] & TCR_ARE) { + /* Do not reload 0, it is already there. It would just trigger + * the timer again and lead to infinite loop */ + if (env->spr[SPR_BOOKE_DECAR] != 0) { + /* Auto Reload */ + cpu_ppc_store_decr(env, env->spr[SPR_BOOKE_DECAR]); + } + } +} + +static void booke_fit_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + ppc_tb_t *tb_env; + booke_timer_t *booke_timer; + + tb_env = env->tb_env; + booke_timer = tb_env->opaque; + env->spr[SPR_BOOKE_TSR] |= TSR_FIS; + + booke_update_irq(cpu); + + booke_update_fixed_timer(env, + booke_get_fit_target(env, tb_env), + &booke_timer->fit_next, + booke_timer->fit_timer, + TSR_FIS); +} + +static void booke_wdt_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + ppc_tb_t *tb_env; + booke_timer_t *booke_timer; + + tb_env = env->tb_env; + booke_timer = tb_env->opaque; + + /* TODO: There's lots of complicated stuff to do here */ + + booke_update_irq(cpu); + + booke_update_fixed_timer(env, + booke_get_wdt_target(env, tb_env), + &booke_timer->wdt_next, + booke_timer->wdt_timer, + TSR_WIS); +} + +void store_booke_tsr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env = env->tb_env; + booke_timer_t *booke_timer = tb_env->opaque; + + env->spr[SPR_BOOKE_TSR] &= ~val; + // kvmppc_clear_tsr_bits(cpu, val); + + if (val & TSR_FIS) { + booke_update_fixed_timer(env, + booke_get_fit_target(env, tb_env), + &booke_timer->fit_next, + booke_timer->fit_timer, + TSR_FIS); + } + + if (val & TSR_WIS) { + booke_update_fixed_timer(env, + booke_get_wdt_target(env, tb_env), + &booke_timer->wdt_next, + booke_timer->wdt_timer, + TSR_WIS); + } + + booke_update_irq(cpu); +} + +void store_booke_tcr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env = env->tb_env; + booke_timer_t *booke_timer = tb_env->opaque; + + env->spr[SPR_BOOKE_TCR] = val; + // kvmppc_set_tcr(cpu); + + booke_update_irq(cpu); + + booke_update_fixed_timer(env, + booke_get_fit_target(env, tb_env), + &booke_timer->fit_next, + booke_timer->fit_timer, + TSR_FIS); + + booke_update_fixed_timer(env, + booke_get_wdt_target(env, tb_env), + &booke_timer->wdt_next, + booke_timer->wdt_timer, + TSR_WIS); +} + +#if 0 +static void ppc_booke_timer_reset_handle(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + store_booke_tcr(env, 0); + store_booke_tsr(env, -1); +} + +/* + * This function will be called whenever the CPU state changes. + * CPU states are defined "typedef enum RunState". + * Regarding timer, When CPU state changes to running after debug halt + * or similar cases which takes time then in between final watchdog + * expiry happenes. This will cause exit to QEMU and configured watchdog + * action will be taken. To avoid this we always clear the watchdog state when + * state changes to running. + */ +static void cpu_state_change_handler(void *opaque, int running, RunState state) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + if (!running) { + return; + } + + /* + * Clear watchdog interrupt condition by clearing TSR. + */ + store_booke_tsr(env, TSR_ENW | TSR_WIS | TSR_WRS_MASK); +} +#endif + +void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags) +{ + ppc_tb_t *tb_env; + booke_timer_t *booke_timer; + + tb_env = g_malloc0(sizeof(ppc_tb_t)); + booke_timer = g_malloc0(sizeof(booke_timer_t)); + + cpu->env.tb_env = tb_env; + tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED; + + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + tb_env->opaque = booke_timer; + tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_decr_cb, cpu); + + booke_timer->fit_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_fit_cb, cpu); + booke_timer->wdt_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_wdt_cb, cpu); + +#if 0 + int ret = 0; + ret = kvmppc_booke_watchdog_enable(cpu); + + if (ret) { + /* TODO: Start the QEMU emulated watchdog if not running on KVM. + * Also start the QEMU emulated watchdog if KVM does not support + * emulated watchdog or somehow it is not enabled (supported but + * not enabled is though some bug and requires debugging :)). + */ + } + + qemu_add_vm_change_state_handler(cpu_state_change_handler, cpu); + + qemu_register_reset(ppc_booke_timer_reset_handle, cpu); +#endif +} diff --git a/qemu/hw/sparc/Makefile.objs b/qemu/hw/sparc/Makefile.objs deleted file mode 100644 index 20bd9409..00000000 --- a/qemu/hw/sparc/Makefile.objs +++ /dev/null @@ -1 +0,0 @@ -obj-y += leon3.o diff --git a/qemu/hw/sparc/leon3.c b/qemu/hw/sparc/leon3.c deleted file mode 100644 index 150aaed1..00000000 --- a/qemu/hw/sparc/leon3.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * QEMU Leon3 System Emulator - * - * Copyright (c) 2010-2011 AdaCore - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -#include "hw/hw.h" -#include "hw/sparc/sparc.h" -#include "qemu/timer.h" -#include "sysemu/sysemu.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" - - -static int leon3_generic_hw_init(struct uc_struct *uc, MachineState *machine) -{ - const char *cpu_model = machine->cpu_model; - SPARCCPU *cpu; - - /* Init CPU */ - if (!cpu_model) { - cpu_model = "LEON3"; - } - - cpu = cpu_sparc_init(uc, cpu_model); - uc->cpu = CPU(cpu); - if (cpu == NULL) { - fprintf(stderr, "qemu: Unable to find Sparc CPU definition\n"); - return -1; - } - - cpu_sparc_set_id(&cpu->env, 0); - - return 0; -} - -void leon3_machine_init(struct uc_struct *uc) -{ - static QEMUMachine leon3_generic_machine = { - NULL, - "leon3_generic", - leon3_generic_hw_init, - NULL, - 0, - 1, - UC_ARCH_SPARC, - }; - - //printf(">>> leon3_machine_init\n"); - qemu_register_machine(uc, &leon3_generic_machine, TYPE_MACHINE, NULL); -} diff --git a/qemu/hw/sparc64/Makefile.objs b/qemu/hw/sparc64/Makefile.objs deleted file mode 100644 index a84cfe3e..00000000 --- a/qemu/hw/sparc64/Makefile.objs +++ /dev/null @@ -1 +0,0 @@ -obj-y += sun4u.o diff --git a/qemu/include/config.h b/qemu/include/config.h deleted file mode 100644 index e20f7869..00000000 --- a/qemu/include/config.h +++ /dev/null @@ -1,2 +0,0 @@ -#include "config-host.h" -#include "config-target.h" diff --git a/qemu/include/qemu/aes.h b/qemu/include/crypto/aes.h similarity index 84% rename from qemu/include/qemu/aes.h rename to qemu/include/crypto/aes.h index 63438adb..12fb321b 100644 --- a/qemu/include/qemu/aes.h +++ b/qemu/include/crypto/aes.h @@ -10,27 +10,26 @@ struct aes_key_st { }; typedef struct aes_key_st AES_KEY; -/* FreeBSD has its own AES_set_decrypt_key in -lcrypto, avoid conflicts */ -#ifdef __FreeBSD__ +/* FreeBSD/OpenSSL have their own AES functions with the same names in -lcrypto + * (which might be pulled in via curl), so redefine to avoid conflicts. */ #define AES_set_encrypt_key QEMU_AES_set_encrypt_key #define AES_set_decrypt_key QEMU_AES_set_decrypt_key #define AES_encrypt QEMU_AES_encrypt #define AES_decrypt QEMU_AES_decrypt #define AES_cbc_encrypt QEMU_AES_cbc_encrypt -#endif int AES_set_encrypt_key(const unsigned char *userKey, const int bits, - AES_KEY *key); + AES_KEY *key); int AES_set_decrypt_key(const unsigned char *userKey, const int bits, - AES_KEY *key); + AES_KEY *key); void AES_encrypt(const unsigned char *in, unsigned char *out, - const AES_KEY *key); + const AES_KEY *key); void AES_decrypt(const unsigned char *in, unsigned char *out, - const AES_KEY *key); + const AES_KEY *key); void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, - const unsigned long length, const AES_KEY *key, - unsigned char *ivec, const int enc); + const unsigned long length, const AES_KEY *key, + unsigned char *ivec, const int enc); extern const uint8_t AES_sbox[256]; extern const uint8_t AES_isbox[256]; diff --git a/qemu/qemu-log.c b/qemu/include/crypto/init.h similarity index 51% rename from qemu/qemu-log.c rename to qemu/include/crypto/init.h index 6198eb61..00e0f637 100644 --- a/qemu/qemu-log.c +++ b/qemu/include/crypto/init.h @@ -1,12 +1,12 @@ /* - * Logging support + * QEMU Crypto initialization * - * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -15,33 +15,14 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . + * */ -#include "qemu-common.h" -#include "qemu/log.h" +#ifndef QCRYPTO_INIT_H +#define QCRYPTO_INIT_H -FILE *qemu_logfile; -int qemu_loglevel; +#include "qapi/error.h" -void qemu_log(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - if (qemu_logfile) { - vfprintf(qemu_logfile, fmt, ap); - } - va_end(ap); -} - -void qemu_log_mask(int mask, const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - if ((qemu_loglevel & mask) && qemu_logfile) { - vfprintf(qemu_logfile, fmt, ap); - } - va_end(ap); -} +int qcrypto_init(Error **errp); +#endif /* QCRYPTO_INIT_H */ diff --git a/qemu/include/crypto/random.h b/qemu/include/crypto/random.h new file mode 100644 index 00000000..d2b7540d --- /dev/null +++ b/qemu/include/crypto/random.h @@ -0,0 +1,33 @@ +/* + * QEMU Crypto random number provider + * + * Copyright (c) 2015-2016 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef QCRYPTO_RANDOM_H +#define QCRYPTO_RANDOM_H + +/** + * qcrypto_random_init: + * + * Initializes the handles used by qcrypto_random_bytes + * + * Returns 0 on success, -1 on error + */ +int qcrypto_random_init(void); + +#endif /* QCRYPTO_RANDOM_H */ diff --git a/qemu/include/disas/dis-asm.h b/qemu/include/disas/dis-asm.h new file mode 100644 index 00000000..c5f9fa08 --- /dev/null +++ b/qemu/include/disas/dis-asm.h @@ -0,0 +1,519 @@ +/* Interface between the opcode library and its callers. + Written by Cygnus Support, 1993. + + The opcode library (libopcodes.a) provides instruction decoders for + a large variety of instruction sets, callable with an identical + interface, for making instruction-processing programs more independent + of the instruction set being processed. */ + +#ifndef DISAS_DIS_ASM_H +#define DISAS_DIS_ASM_H + +typedef void *PTR; +typedef uint64_t bfd_vma; +typedef int64_t bfd_signed_vma; +typedef uint8_t bfd_byte; +#define sprintf_vma(s,x) sprintf (s, "%0" PRIx64, x) +#define snprintf_vma(s,ss,x) snprintf (s, ss, "%0" PRIx64, x) + +#define BFD64 + +enum bfd_flavour { + bfd_target_unknown_flavour, + bfd_target_aout_flavour, + bfd_target_coff_flavour, + bfd_target_ecoff_flavour, + bfd_target_elf_flavour, + bfd_target_ieee_flavour, + bfd_target_nlm_flavour, + bfd_target_oasys_flavour, + bfd_target_tekhex_flavour, + bfd_target_srec_flavour, + bfd_target_ihex_flavour, + bfd_target_som_flavour, + bfd_target_os9k_flavour, + bfd_target_versados_flavour, + bfd_target_msdos_flavour, + bfd_target_evax_flavour +}; + +enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN }; + +enum bfd_architecture +{ + bfd_arch_unknown, /* File arch not known */ + bfd_arch_obscure, /* Arch known, not one of these */ + bfd_arch_m68k, /* Motorola 68xxx */ +#define bfd_mach_m68000 1 +#define bfd_mach_m68008 2 +#define bfd_mach_m68010 3 +#define bfd_mach_m68020 4 +#define bfd_mach_m68030 5 +#define bfd_mach_m68040 6 +#define bfd_mach_m68060 7 +#define bfd_mach_cpu32 8 +#define bfd_mach_mcf5200 9 +#define bfd_mach_mcf5206e 10 +#define bfd_mach_mcf5307 11 +#define bfd_mach_mcf5407 12 +#define bfd_mach_mcf528x 13 +#define bfd_mach_mcfv4e 14 +#define bfd_mach_mcf521x 15 +#define bfd_mach_mcf5249 16 +#define bfd_mach_mcf547x 17 +#define bfd_mach_mcf548x 18 + bfd_arch_vax, /* DEC Vax */ + bfd_arch_i960, /* Intel 960 */ + /* The order of the following is important. + lower number indicates a machine type that + only accepts a subset of the instructions + available to machines with higher numbers. + The exception is the "ca", which is + incompatible with all other machines except + "core". */ + +#define bfd_mach_i960_core 1 +#define bfd_mach_i960_ka_sa 2 +#define bfd_mach_i960_kb_sb 3 +#define bfd_mach_i960_mc 4 +#define bfd_mach_i960_xa 5 +#define bfd_mach_i960_ca 6 +#define bfd_mach_i960_jx 7 +#define bfd_mach_i960_hx 8 + + bfd_arch_a29k, /* AMD 29000 */ + bfd_arch_sparc, /* SPARC */ +#define bfd_mach_sparc 1 +/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */ +#define bfd_mach_sparc_sparclet 2 +#define bfd_mach_sparc_sparclite 3 +#define bfd_mach_sparc_v8plus 4 +#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */ +#define bfd_mach_sparc_sparclite_le 6 +#define bfd_mach_sparc_v9 7 +#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */ +#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */ +#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */ +/* Nonzero if MACH has the v9 instruction set. */ +#define bfd_mach_sparc_v9_p(mach) \ + ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \ + && (mach) != bfd_mach_sparc_sparclite_le) + bfd_arch_mips, /* MIPS Rxxxx */ +#define bfd_mach_mips3000 3000 +#define bfd_mach_mips3900 3900 +#define bfd_mach_mips4000 4000 +#define bfd_mach_mips4010 4010 +#define bfd_mach_mips4100 4100 +#define bfd_mach_mips4300 4300 +#define bfd_mach_mips4400 4400 +#define bfd_mach_mips4600 4600 +#define bfd_mach_mips4650 4650 +#define bfd_mach_mips5000 5000 +#define bfd_mach_mips6000 6000 +#define bfd_mach_mips8000 8000 +#define bfd_mach_mips10000 10000 +#define bfd_mach_mips16 16 + bfd_arch_i386, /* Intel 386 */ +#define bfd_mach_i386_i386 0 +#define bfd_mach_i386_i8086 1 +#define bfd_mach_i386_i386_intel_syntax 2 +#define bfd_mach_x86_64 3 +#define bfd_mach_x86_64_intel_syntax 4 + bfd_arch_we32k, /* AT&T WE32xxx */ + bfd_arch_tahoe, /* CCI/Harris Tahoe */ + bfd_arch_i860, /* Intel 860 */ + bfd_arch_romp, /* IBM ROMP PC/RT */ + bfd_arch_alliant, /* Alliant */ + bfd_arch_convex, /* Convex */ + bfd_arch_m88k, /* Motorola 88xxx */ + bfd_arch_pyramid, /* Pyramid Technology */ + bfd_arch_h8300, /* Hitachi H8/300 */ +#define bfd_mach_h8300 1 +#define bfd_mach_h8300h 2 +#define bfd_mach_h8300s 3 + bfd_arch_powerpc, /* PowerPC */ +#define bfd_mach_ppc 0 +#define bfd_mach_ppc64 1 +#define bfd_mach_ppc_403 403 +#define bfd_mach_ppc_403gc 4030 +#define bfd_mach_ppc_e500 500 +#define bfd_mach_ppc_505 505 +#define bfd_mach_ppc_601 601 +#define bfd_mach_ppc_602 602 +#define bfd_mach_ppc_603 603 +#define bfd_mach_ppc_ec603e 6031 +#define bfd_mach_ppc_604 604 +#define bfd_mach_ppc_620 620 +#define bfd_mach_ppc_630 630 +#define bfd_mach_ppc_750 750 +#define bfd_mach_ppc_860 860 +#define bfd_mach_ppc_a35 35 +#define bfd_mach_ppc_rs64ii 642 +#define bfd_mach_ppc_rs64iii 643 +#define bfd_mach_ppc_7400 7400 + bfd_arch_rs6000, /* IBM RS/6000 */ + bfd_arch_hppa, /* HP PA RISC */ +#define bfd_mach_hppa10 10 +#define bfd_mach_hppa11 11 +#define bfd_mach_hppa20 20 +#define bfd_mach_hppa20w 25 + bfd_arch_d10v, /* Mitsubishi D10V */ + bfd_arch_z8k, /* Zilog Z8000 */ +#define bfd_mach_z8001 1 +#define bfd_mach_z8002 2 + bfd_arch_h8500, /* Hitachi H8/500 */ + bfd_arch_sh, /* Hitachi SH */ +#define bfd_mach_sh 1 +#define bfd_mach_sh2 0x20 +#define bfd_mach_sh_dsp 0x2d +#define bfd_mach_sh2a 0x2a +#define bfd_mach_sh2a_nofpu 0x2b +#define bfd_mach_sh2e 0x2e +#define bfd_mach_sh3 0x30 +#define bfd_mach_sh3_nommu 0x31 +#define bfd_mach_sh3_dsp 0x3d +#define bfd_mach_sh3e 0x3e +#define bfd_mach_sh4 0x40 +#define bfd_mach_sh4_nofpu 0x41 +#define bfd_mach_sh4_nommu_nofpu 0x42 +#define bfd_mach_sh4a 0x4a +#define bfd_mach_sh4a_nofpu 0x4b +#define bfd_mach_sh4al_dsp 0x4d +#define bfd_mach_sh5 0x50 + bfd_arch_alpha, /* Dec Alpha */ +#define bfd_mach_alpha 1 +#define bfd_mach_alpha_ev4 0x10 +#define bfd_mach_alpha_ev5 0x20 +#define bfd_mach_alpha_ev6 0x30 + bfd_arch_arm, /* Advanced Risc Machines ARM */ +#define bfd_mach_arm_unknown 0 +#define bfd_mach_arm_2 1 +#define bfd_mach_arm_2a 2 +#define bfd_mach_arm_3 3 +#define bfd_mach_arm_3M 4 +#define bfd_mach_arm_4 5 +#define bfd_mach_arm_4T 6 +#define bfd_mach_arm_5 7 +#define bfd_mach_arm_5T 8 +#define bfd_mach_arm_5TE 9 +#define bfd_mach_arm_XScale 10 +#define bfd_mach_arm_ep9312 11 +#define bfd_mach_arm_iWMMXt 12 +#define bfd_mach_arm_iWMMXt2 13 + bfd_arch_ns32k, /* National Semiconductors ns32000 */ + bfd_arch_w65, /* WDC 65816 */ + bfd_arch_tic30, /* Texas Instruments TMS320C30 */ + bfd_arch_v850, /* NEC V850 */ +#define bfd_mach_v850 0 + bfd_arch_arc, /* Argonaut RISC Core */ +#define bfd_mach_arc_base 0 + bfd_arch_m32r, /* Mitsubishi M32R/D */ +#define bfd_mach_m32r 0 /* backwards compatibility */ + bfd_arch_mn10200, /* Matsushita MN10200 */ + bfd_arch_mn10300, /* Matsushita MN10300 */ + bfd_arch_cris, /* Axis CRIS */ +#define bfd_mach_cris_v0_v10 255 +#define bfd_mach_cris_v32 32 +#define bfd_mach_cris_v10_v32 1032 + bfd_arch_microblaze, /* Xilinx MicroBlaze. */ + bfd_arch_moxie, /* The Moxie core. */ + bfd_arch_ia64, /* HP/Intel ia64 */ +#define bfd_mach_ia64_elf64 64 +#define bfd_mach_ia64_elf32 32 + bfd_arch_nios2, /* Nios II */ +#define bfd_mach_nios2 0 +#define bfd_mach_nios2r1 1 +#define bfd_mach_nios2r2 2 + bfd_arch_lm32, /* Lattice Mico32 */ +#define bfd_mach_lm32 1 + bfd_arch_rx, /* Renesas RX */ +#define bfd_mach_rx 0x75 +#define bfd_mach_rx_v2 0x76 +#define bfd_mach_rx_v3 0x77 + bfd_arch_last + }; +#define bfd_mach_s390_31 31 +#define bfd_mach_s390_64 64 + +typedef struct symbol_cache_entry +{ + const char *name; + union + { + PTR p; + bfd_vma i; + } udata; +} asymbol; + +typedef int (*fprintf_function)(FILE *f, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); + +enum dis_insn_type { + dis_noninsn, /* Not a valid instruction */ + dis_nonbranch, /* Not a branch instruction */ + dis_branch, /* Unconditional branch */ + dis_condbranch, /* Conditional branch */ + dis_jsr, /* Jump to subroutine */ + dis_condjsr, /* Conditional jump to subroutine */ + dis_dref, /* Data reference instruction */ + dis_dref2 /* Two data references in instruction */ +}; + +/* This struct is passed into the instruction decoding routine, + and is passed back out into each callback. The various fields are used + for conveying information from your main routine into your callbacks, + for passing information into the instruction decoders (such as the + addresses of the callback functions), or for passing information + back from the instruction decoders to their callers. + + It must be initialized before it is first passed; this can be done + by hand, or using one of the initialization macros below. */ + +typedef struct disassemble_info { + fprintf_function fprintf_func; + FILE *stream; + PTR application_data; + + /* Target description. We could replace this with a pointer to the bfd, + but that would require one. There currently isn't any such requirement + so to avoid introducing one we record these explicitly. */ + /* The bfd_flavour. This can be bfd_target_unknown_flavour. */ + enum bfd_flavour flavour; + /* The bfd_arch value. */ + enum bfd_architecture arch; + /* The bfd_mach value. */ + unsigned long mach; + /* Endianness (for bi-endian cpus). Mono-endian cpus can ignore this. */ + enum bfd_endian endian; + + /* An array of pointers to symbols either at the location being disassembled + or at the start of the function being disassembled. The array is sorted + so that the first symbol is intended to be the one used. The others are + present for any misc. purposes. This is not set reliably, but if it is + not NULL, it is correct. */ + asymbol **symbols; + /* Number of symbols in array. */ + int num_symbols; + + /* For use by the disassembler. + The top 16 bits are reserved for public use (and are documented here). + The bottom 16 bits are for the internal use of the disassembler. */ + unsigned long flags; +#define INSN_HAS_RELOC 0x80000000 +#define INSN_ARM_BE32 0x00010000 + PTR private_data; + + /* Function used to get bytes to disassemble. MEMADDR is the + address of the stuff to be disassembled, MYADDR is the address to + put the bytes in, and LENGTH is the number of bytes to read. + INFO is a pointer to this struct. + Returns an errno value or 0 for success. */ + int (*read_memory_func) + (bfd_vma memaddr, bfd_byte *myaddr, int length, + struct disassemble_info *info); + + /* Function which should be called if we get an error that we can't + recover from. STATUS is the errno value from read_memory_func and + MEMADDR is the address that we were trying to read. INFO is a + pointer to this struct. */ + void (*memory_error_func) + (int status, bfd_vma memaddr, struct disassemble_info *info); + + /* Function called to print ADDR. */ + void (*print_address_func) + (bfd_vma addr, struct disassemble_info *info); + + /* Function called to print an instruction. The function is architecture + * specific. + */ + int (*print_insn)(bfd_vma addr, struct disassemble_info *info); + + /* Function called to determine if there is a symbol at the given ADDR. + If there is, the function returns 1, otherwise it returns 0. + This is used by ports which support an overlay manager where + the overlay number is held in the top part of an address. In + some circumstances we want to include the overlay number in the + address, (normally because there is a symbol associated with + that address), but sometimes we want to mask out the overlay bits. */ + int (* symbol_at_address_func) + (bfd_vma addr, struct disassemble_info * info); + + /* These are for buffer_read_memory. */ + bfd_byte *buffer; + bfd_vma buffer_vma; + int buffer_length; + + /* This variable may be set by the instruction decoder. It suggests + the number of bytes objdump should display on a single line. If + the instruction decoder sets this, it should always set it to + the same value in order to get reasonable looking output. */ + int bytes_per_line; + + /* the next two variables control the way objdump displays the raw data */ + /* For example, if bytes_per_line is 8 and bytes_per_chunk is 4, the */ + /* output will look like this: + 00: 00000000 00000000 + with the chunks displayed according to "display_endian". */ + int bytes_per_chunk; + enum bfd_endian display_endian; + + /* Results from instruction decoders. Not all decoders yet support + this information. This info is set each time an instruction is + decoded, and is only valid for the last such instruction. + + To determine whether this decoder supports this information, set + insn_info_valid to 0, decode an instruction, then check it. */ + + char insn_info_valid; /* Branch info has been set. */ + char branch_delay_insns; /* How many sequential insn's will run before + a branch takes effect. (0 = normal) */ + char data_size; /* Size of data reference in insn, in bytes */ + enum dis_insn_type insn_type; /* Type of instruction */ + bfd_vma target; /* Target address of branch or dref, if known; + zero if unknown. */ + bfd_vma target2; /* Second target address for dref2 */ + + /* Command line options specific to the target disassembler. */ + char * disassembler_options; + + /* Field intended to be used by targets in any way they deem suitable. */ + int64_t target_info; + + /* Options for Capstone disassembly. */ + int cap_arch; + int cap_mode; + int cap_insn_unit; + int cap_insn_split; + +} disassemble_info; + + +/* Standard disassemblers. Disassemble one instruction at the given + target address. Return number of bytes processed. */ +typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *); + +int print_insn_tci(bfd_vma, disassemble_info*); +int print_insn_big_mips (bfd_vma, disassemble_info*); +int print_insn_little_mips (bfd_vma, disassemble_info*); +int print_insn_nanomips (bfd_vma, disassemble_info*); +int print_insn_i386 (bfd_vma, disassemble_info*); +int print_insn_m68k (bfd_vma, disassemble_info*); +int print_insn_z8001 (bfd_vma, disassemble_info*); +int print_insn_z8002 (bfd_vma, disassemble_info*); +int print_insn_h8300 (bfd_vma, disassemble_info*); +int print_insn_h8300h (bfd_vma, disassemble_info*); +int print_insn_h8300s (bfd_vma, disassemble_info*); +int print_insn_h8500 (bfd_vma, disassemble_info*); +int print_insn_arm_a64 (bfd_vma, disassemble_info*); +int print_insn_alpha (bfd_vma, disassemble_info*); +disassembler_ftype arc_get_disassembler (int, int); +int print_insn_arm (bfd_vma, disassemble_info*); +int print_insn_sparc (bfd_vma, disassemble_info*); +int print_insn_big_a29k (bfd_vma, disassemble_info*); +int print_insn_little_a29k (bfd_vma, disassemble_info*); +int print_insn_i960 (bfd_vma, disassemble_info*); +int print_insn_sh (bfd_vma, disassemble_info*); +int print_insn_shl (bfd_vma, disassemble_info*); +int print_insn_hppa (bfd_vma, disassemble_info*); +int print_insn_m32r (bfd_vma, disassemble_info*); +int print_insn_m88k (bfd_vma, disassemble_info*); +int print_insn_mn10200 (bfd_vma, disassemble_info*); +int print_insn_mn10300 (bfd_vma, disassemble_info*); +int print_insn_moxie (bfd_vma, disassemble_info*); +int print_insn_ns32k (bfd_vma, disassemble_info*); +int print_insn_big_powerpc (bfd_vma, disassemble_info*); +int print_insn_little_powerpc (bfd_vma, disassemble_info*); +int print_insn_rs6000 (bfd_vma, disassemble_info*); +int print_insn_w65 (bfd_vma, disassemble_info*); +int print_insn_d10v (bfd_vma, disassemble_info*); +int print_insn_v850 (bfd_vma, disassemble_info*); +int print_insn_tic30 (bfd_vma, disassemble_info*); +int print_insn_ppc (bfd_vma, disassemble_info*); +int print_insn_s390 (bfd_vma, disassemble_info*); +int print_insn_crisv32 (bfd_vma, disassemble_info*); +int print_insn_crisv10 (bfd_vma, disassemble_info*); +int print_insn_microblaze (bfd_vma, disassemble_info*); +int print_insn_ia64 (bfd_vma, disassemble_info*); +int print_insn_lm32 (bfd_vma, disassemble_info*); +int print_insn_big_nios2 (bfd_vma, disassemble_info*); +int print_insn_little_nios2 (bfd_vma, disassemble_info*); +int print_insn_xtensa (bfd_vma, disassemble_info*); +int print_insn_riscv32 (bfd_vma, disassemble_info*); +int print_insn_riscv64 (bfd_vma, disassemble_info*); +int print_insn_rx(bfd_vma, disassemble_info *); + +#if 0 +/* Fetch the disassembler for a given BFD, if that support is available. */ +disassembler_ftype disassembler(bfd *); +#endif + + +/* This block of definitions is for particular callers who read instructions + into a buffer before calling the instruction decoder. */ + +/* Here is a function which callers may wish to use for read_memory_func. + It gets bytes from a buffer. */ +int buffer_read_memory(bfd_vma, bfd_byte *, int, struct disassemble_info *); + +/* This function goes with buffer_read_memory. + It prints a message using info->fprintf_func and info->stream. */ +void perror_memory(int, bfd_vma, struct disassemble_info *); + + +/* Just print the address in hex. This is included for completeness even + though both GDB and objdump provide their own (to print symbolic + addresses). */ +void generic_print_address(bfd_vma, struct disassemble_info *); + +/* Always true. */ +int generic_symbol_at_address(bfd_vma, struct disassemble_info *); + +/* Macro to initialize a disassemble_info struct. This should be called + by all applications creating such a struct. */ +#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \ + (INFO).flavour = bfd_target_unknown_flavour, \ + (INFO).arch = bfd_arch_unknown, \ + (INFO).mach = 0, \ + (INFO).endian = BFD_ENDIAN_UNKNOWN, \ + INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) + +/* Call this macro to initialize only the internal variables for the + disassembler. Architecture dependent things such as byte order, or machine + variant are not touched by this macro. This makes things much easier for + GDB which must initialize these things separately. */ + +#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \ + (INFO).fprintf_func = (FPRINTF_FUNC), \ + (INFO).stream = (STREAM), \ + (INFO).symbols = NULL, \ + (INFO).num_symbols = 0, \ + (INFO).private_data = NULL, \ + (INFO).buffer = NULL, \ + (INFO).buffer_vma = 0, \ + (INFO).buffer_length = 0, \ + (INFO).read_memory_func = buffer_read_memory, \ + (INFO).memory_error_func = perror_memory, \ + (INFO).print_address_func = generic_print_address, \ + (INFO).print_insn = NULL, \ + (INFO).symbol_at_address_func = generic_symbol_at_address, \ + (INFO).flags = 0, \ + (INFO).bytes_per_line = 0, \ + (INFO).bytes_per_chunk = 0, \ + (INFO).display_endian = BFD_ENDIAN_UNKNOWN, \ + (INFO).disassembler_options = NULL, \ + (INFO).insn_info_valid = 0 + +#ifndef ATTRIBUTE_UNUSED +#define ATTRIBUTE_UNUSED __attribute__((unused)) +#endif + +/* from libbfd */ + +bfd_vma bfd_getl64 (const bfd_byte *addr); +bfd_vma bfd_getl32 (const bfd_byte *addr); +bfd_vma bfd_getb32 (const bfd_byte *addr); +bfd_vma bfd_getl16 (const bfd_byte *addr); +bfd_vma bfd_getb16 (const bfd_byte *addr); +typedef bool bfd_boolean; + +#endif /* DISAS_DIS_ASM_H */ diff --git a/qemu/include/elf.h b/qemu/include/elf.h index b55aaa6f..8fbfe60e 100644 --- a/qemu/include/elf.h +++ b/qemu/include/elf.h @@ -1,9 +1,363 @@ #ifndef QEMU_ELF_H #define QEMU_ELF_H +/* 32-bit ELF base types. */ +typedef uint32_t Elf32_Addr; +typedef uint16_t Elf32_Half; +typedef uint32_t Elf32_Off; +typedef int32_t Elf32_Sword; +typedef uint32_t Elf32_Word; + +/* 64-bit ELF base types. */ +typedef uint64_t Elf64_Addr; +typedef uint16_t Elf64_Half; +typedef int16_t Elf64_SHalf; +typedef uint64_t Elf64_Off; +typedef int32_t Elf64_Sword; +typedef uint32_t Elf64_Word; +typedef uint64_t Elf64_Xword; +typedef int64_t Elf64_Sxword; + +/* These constants are for the segment types stored in the image headers */ +#define PT_NULL 0 +#define PT_LOAD 1 +#define PT_DYNAMIC 2 +#define PT_INTERP 3 +#define PT_NOTE 4 +#define PT_SHLIB 5 +#define PT_PHDR 6 +#define PT_LOPROC 0x70000000 +#define PT_HIPROC 0x7fffffff + +#define PT_MIPS_REGINFO 0x70000000 +#define PT_MIPS_RTPROC 0x70000001 +#define PT_MIPS_OPTIONS 0x70000002 +#define PT_MIPS_ABIFLAGS 0x70000003 + +/* Flags in the e_flags field of the header */ +/* MIPS architecture level. */ +#define EF_MIPS_ARCH 0xf0000000 + +/* Legal values for MIPS architecture level. */ +#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ +#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ +#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ +#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ +#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ +#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */ +#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */ +#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32r2 code. */ +#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64r2 code. */ +#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */ +#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */ + +/* The ABI of a file. */ +#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ +#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ + +#define EF_MIPS_NOREORDER 0x00000001 +#define EF_MIPS_PIC 0x00000002 +#define EF_MIPS_CPIC 0x00000004 +#define EF_MIPS_ABI2 0x00000020 +#define EF_MIPS_OPTIONS_FIRST 0x00000080 +#define EF_MIPS_32BITMODE 0x00000100 +#define EF_MIPS_ABI 0x0000f000 +#define EF_MIPS_FP64 0x00000200 +#define EF_MIPS_NAN2008 0x00000400 + +/* MIPS machine variant */ +#define EF_MIPS_MACH_NONE 0x00000000 /* A standard MIPS implementation */ +#define EF_MIPS_MACH_3900 0x00810000 /* Toshiba R3900 */ +#define EF_MIPS_MACH_4010 0x00820000 /* LSI R4010 */ +#define EF_MIPS_MACH_4100 0x00830000 /* NEC VR4100 */ +#define EF_MIPS_MACH_4650 0x00850000 /* MIPS R4650 */ +#define EF_MIPS_MACH_4120 0x00870000 /* NEC VR4120 */ +#define EF_MIPS_MACH_4111 0x00880000 /* NEC VR4111/VR4181 */ +#define EF_MIPS_MACH_SB1 0x008a0000 /* Broadcom SB-1 */ +#define EF_MIPS_MACH_OCTEON 0x008b0000 /* Cavium Networks Octeon */ +#define EF_MIPS_MACH_XLR 0x008c0000 /* RMI Xlr */ +#define EF_MIPS_MACH_OCTEON2 0x008d0000 /* Cavium Networks Octeon2 */ +#define EF_MIPS_MACH_OCTEON3 0x008e0000 /* Cavium Networks Octeon3 */ +#define EF_MIPS_MACH_5400 0x00910000 /* NEC VR5400 */ +#define EF_MIPS_MACH_5900 0x00920000 /* Toshiba/Sony R5900 */ +#define EF_MIPS_MACH_5500 0x00980000 /* NEC VR5500 */ +#define EF_MIPS_MACH_9000 0x00990000 /* PMC-Sierra RM9000 */ +#define EF_MIPS_MACH_LS2E 0x00a00000 /* ST Microelectronics Loongson 2E */ +#define EF_MIPS_MACH_LS2F 0x00a10000 /* ST Microelectronics Loongson 2F */ +#define EF_MIPS_MACH_LS3A 0x00a20000 /* ST Microelectronics Loongson 3A */ +#define EF_MIPS_MACH 0x00ff0000 /* EF_MIPS_MACH_xxx selection mask */ + +#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (internal) */ + +#define MIPS_ABI_FP_ANY 0x0 /* FP ABI doesn't matter */ +#define MIPS_ABI_FP_DOUBLE 0x1 /* -mdouble-float */ +#define MIPS_ABI_FP_SINGLE 0x2 /* -msingle-float */ +#define MIPS_ABI_FP_SOFT 0x3 /* -msoft-float */ +#define MIPS_ABI_FP_OLD_64 0x4 /* -mips32r2 -mfp64 */ +#define MIPS_ABI_FP_XX 0x5 /* -mfpxx */ +#define MIPS_ABI_FP_64 0x6 /* -mips32r2 -mfp64 */ +#define MIPS_ABI_FP_64A 0x7 /* -mips32r2 -mfp64 -mno-odd-spreg */ + +typedef struct mips_elf_abiflags_v0 { + uint16_t version; /* Version of flags structure */ + uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */ + uint8_t isa_rev; /* The revision of ISA: */ + /* - 0 for MIPS V and below, */ + /* - 1-n otherwise. */ + uint8_t gpr_size; /* The size of general purpose registers */ + uint8_t cpr1_size; /* The size of co-processor 1 registers */ + uint8_t cpr2_size; /* The size of co-processor 2 registers */ + uint8_t fp_abi; /* The floating-point ABI */ + uint32_t isa_ext; /* Mask of processor-specific extensions */ + uint32_t ases; /* Mask of ASEs used */ + uint32_t flags1; /* Mask of general flags */ + uint32_t flags2; +} Mips_elf_abiflags_v0; + +/* These constants define the different elf file types */ +#define ET_NONE 0 +#define ET_REL 1 +#define ET_EXEC 2 +#define ET_DYN 3 +#define ET_CORE 4 +#define ET_LOPROC 0xff00 +#define ET_HIPROC 0xffff + +/* These constants define the various ELF target machines */ +#define EM_NONE 0 +#define EM_M32 1 +#define EM_SPARC 2 +#define EM_386 3 +#define EM_68K 4 +#define EM_88K 5 +#define EM_486 6 /* Perhaps disused */ +#define EM_860 7 + +#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */ + +#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */ + +#define EM_PARISC 15 /* HPPA */ + +#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */ + +#define EM_PPC 20 /* PowerPC */ +#define EM_PPC64 21 /* PowerPC64 */ + +#define EM_ARM 40 /* ARM */ + +#define EM_SH 42 /* SuperH */ + +#define EM_SPARCV9 43 /* SPARC v9 64-bit */ + +#define EM_TRICORE 44 /* Infineon TriCore */ + +#define EM_IA_64 50 /* HP/Intel IA-64 */ + +#define EM_X86_64 62 /* AMD x86-64 */ + +#define EM_S390 22 /* IBM S/390 */ + +#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ + +#define EM_V850 87 /* NEC v850 */ + +#define EM_H8_300H 47 /* Hitachi H8/300H */ +#define EM_H8S 48 /* Hitachi H8S */ +#define EM_LATTICEMICO32 138 /* LatticeMico32 */ + +#define EM_OPENRISC 92 /* OpenCores OpenRISC */ + +#define EM_UNICORE32 110 /* UniCore32 */ + +#define EM_RISCV 243 /* RISC-V */ + +#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */ + /* - * i386 ELF relocation types + * This is an interim value that we will use until the committee comes + * up with a final number. */ +#define EM_ALPHA 0x9026 + +/* Bogus old v850 magic number, used by old tools. */ +#define EM_CYGNUS_V850 0x9080 + +/* + * This is the old interim value for S/390 architecture + */ +#define EM_S390_OLD 0xA390 + +#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ + +#define EM_MICROBLAZE 189 +#define EM_MICROBLAZE_OLD 0xBAAB + +#define EM_XTENSA 94 /* Tensilica Xtensa */ + +#define EM_AARCH64 183 + +#define EM_TILEGX 191 /* TILE-Gx */ + +#define EM_MOXIE 223 /* Moxie processor family */ +#define EM_MOXIE_OLD 0xFEED + +/* This is the info that is needed to parse the dynamic section of the file */ +#define DT_NULL 0 +#define DT_NEEDED 1 +#define DT_PLTRELSZ 2 +#define DT_PLTGOT 3 +#define DT_HASH 4 +#define DT_STRTAB 5 +#define DT_SYMTAB 6 +#define DT_RELA 7 +#define DT_RELASZ 8 +#define DT_RELAENT 9 +#define DT_STRSZ 10 +#define DT_SYMENT 11 +#define DT_INIT 12 +#define DT_FINI 13 +#define DT_SONAME 14 +#define DT_RPATH 15 +#define DT_SYMBOLIC 16 +#define DT_REL 17 +#define DT_RELSZ 18 +#define DT_RELENT 19 +#define DT_PLTREL 20 +#define DT_DEBUG 21 +#define DT_TEXTREL 22 +#define DT_JMPREL 23 +#define DT_BINDNOW 24 +#define DT_INIT_ARRAY 25 +#define DT_FINI_ARRAY 26 +#define DT_INIT_ARRAYSZ 27 +#define DT_FINI_ARRAYSZ 28 +#define DT_RUNPATH 29 +#define DT_FLAGS 30 +#define DT_LOOS 0x6000000d +#define DT_HIOS 0x6ffff000 +#define DT_LOPROC 0x70000000 +#define DT_HIPROC 0x7fffffff + +/* DT_ entries which fall between DT_VALRNGLO and DT_VALRNDHI use + the d_val field of the Elf*_Dyn structure. I.e. they contain scalars. */ +#define DT_VALRNGLO 0x6ffffd00 +#define DT_VALRNGHI 0x6ffffdff + +/* DT_ entries which fall between DT_ADDRRNGLO and DT_ADDRRNGHI use + the d_ptr field of the Elf*_Dyn structure. I.e. they contain pointers. */ +#define DT_ADDRRNGLO 0x6ffffe00 +#define DT_ADDRRNGHI 0x6ffffeff + +#define DT_VERSYM 0x6ffffff0 +#define DT_RELACOUNT 0x6ffffff9 +#define DT_RELCOUNT 0x6ffffffa +#define DT_FLAGS_1 0x6ffffffb +#define DT_VERDEF 0x6ffffffc +#define DT_VERDEFNUM 0x6ffffffd +#define DT_VERNEED 0x6ffffffe +#define DT_VERNEEDNUM 0x6fffffff + +#define DT_MIPS_RLD_VERSION 0x70000001 +#define DT_MIPS_TIME_STAMP 0x70000002 +#define DT_MIPS_ICHECKSUM 0x70000003 +#define DT_MIPS_IVERSION 0x70000004 +#define DT_MIPS_FLAGS 0x70000005 + #define RHF_NONE 0 + #define RHF_HARDWAY 1 + #define RHF_NOTPOT 2 +#define DT_MIPS_BASE_ADDRESS 0x70000006 +#define DT_MIPS_CONFLICT 0x70000008 +#define DT_MIPS_LIBLIST 0x70000009 +#define DT_MIPS_LOCAL_GOTNO 0x7000000a +#define DT_MIPS_CONFLICTNO 0x7000000b +#define DT_MIPS_LIBLISTNO 0x70000010 +#define DT_MIPS_SYMTABNO 0x70000011 +#define DT_MIPS_UNREFEXTNO 0x70000012 +#define DT_MIPS_GOTSYM 0x70000013 +#define DT_MIPS_HIPAGENO 0x70000014 +#define DT_MIPS_RLD_MAP 0x70000016 + +/* This info is needed when parsing the symbol table */ +#define STB_LOCAL 0 +#define STB_GLOBAL 1 +#define STB_WEAK 2 + +#define STT_NOTYPE 0 +#define STT_OBJECT 1 +#define STT_FUNC 2 +#define STT_SECTION 3 +#define STT_FILE 4 + +#define ELF_ST_BIND(x) ((x) >> 4) +#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf) +#define ELF_ST_INFO(bind, type) (((bind) << 4) | ((type) & 0xf)) +#define ELF32_ST_BIND(x) ELF_ST_BIND(x) +#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x) +#define ELF64_ST_BIND(x) ELF_ST_BIND(x) +#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) + +/* Symbolic values for the entries in the auxiliary table + put on the initial stack */ +#define AT_NULL 0 /* end of vector */ +#define AT_IGNORE 1 /* entry should be ignored */ +#define AT_EXECFD 2 /* file descriptor of program */ +#define AT_PHDR 3 /* program headers for program */ +#define AT_PHENT 4 /* size of program header entry */ +#define AT_PHNUM 5 /* number of program headers */ +#define AT_PAGESZ 6 /* system page size */ +#define AT_BASE 7 /* base address of interpreter */ +#define AT_FLAGS 8 /* flags */ +#define AT_ENTRY 9 /* entry point of program */ +#define AT_NOTELF 10 /* program is not ELF */ +#define AT_UID 11 /* real uid */ +#define AT_EUID 12 /* effective uid */ +#define AT_GID 13 /* real gid */ +#define AT_EGID 14 /* effective gid */ +#define AT_PLATFORM 15 /* string identifying CPU for optimizations */ +#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ +#define AT_CLKTCK 17 /* frequency at which times() increments */ +#define AT_FPUCW 18 /* info about fpu initialization by kernel */ +#define AT_DCACHEBSIZE 19 /* data cache block size */ +#define AT_ICACHEBSIZE 20 /* instruction cache block size */ +#define AT_UCACHEBSIZE 21 /* unified cache block size */ +#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */ +#define AT_SECURE 23 /* boolean, was exec suid-like? */ +#define AT_BASE_PLATFORM 24 /* string identifying real platforms */ +#define AT_RANDOM 25 /* address of 16 random bytes */ +#define AT_HWCAP2 26 /* extension of AT_HWCAP */ +#define AT_EXECFN 31 /* filename of the executable */ +#define AT_SYSINFO 32 /* address of kernel entry point */ +#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */ +#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */ +#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */ +#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */ +#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */ + +typedef struct dynamic{ + Elf32_Sword d_tag; + union{ + Elf32_Sword d_val; + Elf32_Addr d_ptr; + } d_un; +} Elf32_Dyn; + +typedef struct { + Elf64_Sxword d_tag; /* entry tag value */ + union { + Elf64_Xword d_val; + Elf64_Addr d_ptr; + } d_un; +} Elf64_Dyn; + +/* The following are used with relocations */ +#define ELF32_R_SYM(x) ((x) >> 8) +#define ELF32_R_TYPE(x) ((x) & 0xff) + +#define ELF64_R_SYM(i) ((i) >> 32) +#define ELF64_R_TYPE(i) ((i) & 0xffffffff) +#define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000) + #define R_386_NONE 0 #define R_386_32 1 #define R_386_PC32 2 @@ -19,9 +373,6 @@ /* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */ #define R_386_PC8 23 -/* - * Mips ELF relocation types - */ #define R_MIPS_NONE 0 #define R_MIPS_16 1 #define R_MIPS_32 2 @@ -70,6 +421,22 @@ #define R_MIPS_LOVENDOR 100 #define R_MIPS_HIVENDOR 127 + +/* SUN SPARC specific definitions. */ + +/* Values for Elf64_Ehdr.e_flags. */ + +#define EF_SPARCV9_MM 3 +#define EF_SPARCV9_TSO 0 +#define EF_SPARCV9_PSO 1 +#define EF_SPARCV9_RMO 2 +#define EF_SPARC_LEDATA 0x800000 /* little endian data */ +#define EF_SPARC_EXT_MASK 0xFFFF00 +#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */ +#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */ +#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */ +#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */ + /* * Sparc ELF relocation types */ @@ -116,25 +483,150 @@ #define R_SPARC_5 44 #define R_SPARC_6 45 - /* Bits present in AT_HWCAP for ARM. */ + +#define HWCAP_ARM_SWP (1 << 0) +#define HWCAP_ARM_HALF (1 << 1) +#define HWCAP_ARM_THUMB (1 << 2) +#define HWCAP_ARM_26BIT (1 << 3) +#define HWCAP_ARM_FAST_MULT (1 << 4) +#define HWCAP_ARM_FPA (1 << 5) +#define HWCAP_ARM_VFP (1 << 6) +#define HWCAP_ARM_EDSP (1 << 7) +#define HWCAP_ARM_JAVA (1 << 8) +#define HWCAP_ARM_IWMMXT (1 << 9) +#define HWCAP_ARM_CRUNCH (1 << 10) +#define HWCAP_ARM_THUMBEE (1 << 11) +#define HWCAP_ARM_NEON (1 << 12) +#define HWCAP_ARM_VFPv3 (1 << 13) +#define HWCAP_ARM_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ +#define HWCAP_ARM_TLS (1 << 15) +#define HWCAP_ARM_VFPv4 (1 << 16) #define HWCAP_ARM_IDIVA (1 << 17) - -/* Bits present in AT_HWCAP for s390. */ -#define HWCAP_S390_STFLE 4 - -/* Bits present in AT_HWCAP for Sparc. */ -#define HWCAP_SPARC_VIS3 0x00020000 +#define HWCAP_ARM_IDIVT (1 << 18) +#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) +#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs */ +#define HWCAP_LPAE (1 << 20) /* Bits present in AT_HWCAP for PowerPC. */ -#define PPC_FEATURE_ARCH_2_06 0x00000100 -/* Symbolic values for the entries in the auxiliary table - put on the initial stack */ -#define AT_PLATFORM 15 /* string identifying CPU for optimizations */ -#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ -#define AT_DCACHEBSIZE 19 /* data cache block size */ -#define AT_ICACHEBSIZE 20 /* instruction cache block size */ +#define PPC_FEATURE_32 0x80000000 +#define PPC_FEATURE_64 0x40000000 +#define PPC_FEATURE_601_INSTR 0x20000000 +#define PPC_FEATURE_HAS_ALTIVEC 0x10000000 +#define PPC_FEATURE_HAS_FPU 0x08000000 +#define PPC_FEATURE_HAS_MMU 0x04000000 +#define PPC_FEATURE_HAS_4xxMAC 0x02000000 +#define PPC_FEATURE_UNIFIED_CACHE 0x01000000 +#define PPC_FEATURE_HAS_SPE 0x00800000 +#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000 +#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000 +#define PPC_FEATURE_NO_TB 0x00100000 +#define PPC_FEATURE_POWER4 0x00080000 +#define PPC_FEATURE_POWER5 0x00040000 +#define PPC_FEATURE_POWER5_PLUS 0x00020000 +#define PPC_FEATURE_CELL 0x00010000 +#define PPC_FEATURE_BOOKE 0x00008000 +#define PPC_FEATURE_SMT 0x00004000 +#define PPC_FEATURE_ICACHE_SNOOP 0x00002000 +#define PPC_FEATURE_ARCH_2_05 0x00001000 +#define PPC_FEATURE_PA6T 0x00000800 +#define PPC_FEATURE_HAS_DFP 0x00000400 +#define PPC_FEATURE_POWER6_EXT 0x00000200 +#define PPC_FEATURE_ARCH_2_06 0x00000100 +#define PPC_FEATURE_HAS_VSX 0x00000080 + +#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ + 0x00000040 + +#define PPC_FEATURE_TRUE_LE 0x00000002 +#define PPC_FEATURE_PPC_LE 0x00000001 + +/* Bits present in AT_HWCAP2 for PowerPC. */ + +#define PPC_FEATURE2_ARCH_2_07 0x80000000 +#define PPC_FEATURE2_HAS_HTM 0x40000000 +#define PPC_FEATURE2_HAS_DSCR 0x20000000 +#define PPC_FEATURE2_HAS_EBB 0x10000000 +#define PPC_FEATURE2_HAS_ISEL 0x08000000 +#define PPC_FEATURE2_HAS_TAR 0x04000000 +#define PPC_FEATURE2_HAS_VEC_CRYPTO 0x02000000 +#define PPC_FEATURE2_HTM_NOSC 0x01000000 +#define PPC_FEATURE2_ARCH_3_00 0x00800000 +#define PPC_FEATURE2_HAS_IEEE128 0x00400000 + +/* Bits present in AT_HWCAP for Sparc. */ + +#define HWCAP_SPARC_FLUSH 0x00000001 +#define HWCAP_SPARC_STBAR 0x00000002 +#define HWCAP_SPARC_SWAP 0x00000004 +#define HWCAP_SPARC_MULDIV 0x00000008 +#define HWCAP_SPARC_V9 0x00000010 +#define HWCAP_SPARC_ULTRA3 0x00000020 +#define HWCAP_SPARC_BLKINIT 0x00000040 +#define HWCAP_SPARC_N2 0x00000080 +#define HWCAP_SPARC_MUL32 0x00000100 +#define HWCAP_SPARC_DIV32 0x00000200 +#define HWCAP_SPARC_FSMULD 0x00000400 +#define HWCAP_SPARC_V8PLUS 0x00000800 +#define HWCAP_SPARC_POPC 0x00001000 +#define HWCAP_SPARC_VIS 0x00002000 +#define HWCAP_SPARC_VIS2 0x00004000 +#define HWCAP_SPARC_ASI_BLK_INIT 0x00008000 +#define HWCAP_SPARC_FMAF 0x00010000 +#define HWCAP_SPARC_VIS3 0x00020000 +#define HWCAP_SPARC_HPC 0x00040000 +#define HWCAP_SPARC_RANDOM 0x00080000 +#define HWCAP_SPARC_TRANS 0x00100000 +#define HWCAP_SPARC_FJFMAU 0x00200000 +#define HWCAP_SPARC_IMA 0x00400000 +#define HWCAP_SPARC_ASI_CACHE_SPARING 0x00800000 +#define HWCAP_SPARC_PAUSE 0x01000000 +#define HWCAP_SPARC_CBCOND 0x02000000 +#define HWCAP_SPARC_CRYPTO 0x04000000 + +/* Bits present in AT_HWCAP for s390. */ + +#define HWCAP_S390_ESAN3 1 +#define HWCAP_S390_ZARCH 2 +#define HWCAP_S390_STFLE 4 +#define HWCAP_S390_MSA 8 +#define HWCAP_S390_LDISP 16 +#define HWCAP_S390_EIMM 32 +#define HWCAP_S390_DFP 64 +#define HWCAP_S390_HPAGE 128 +#define HWCAP_S390_ETF3EH 256 +#define HWCAP_S390_HIGH_GPRS 512 +#define HWCAP_S390_TE 1024 +#define HWCAP_S390_VXRS 2048 + +/* M68K specific definitions. */ +/* We use the top 24 bits to encode information about the + architecture variant. */ +#define EF_M68K_CPU32 0x00810000 +#define EF_M68K_M68000 0x01000000 +#define EF_M68K_CFV4E 0x00008000 +#define EF_M68K_FIDO 0x02000000 +#define EF_M68K_ARCH_MASK \ + (EF_M68K_M68000 | EF_M68K_CPU32 | EF_M68K_CFV4E | EF_M68K_FIDO) + +/* We use the bottom 8 bits to encode information about the + coldfire variant. If we use any of these bits, the top 24 bits are + either 0 or EF_M68K_CFV4E. */ +#define EF_M68K_CF_ISA_MASK 0x0F /* Which ISA */ +#define EF_M68K_CF_ISA_A_NODIV 0x01 /* ISA A except for div */ +#define EF_M68K_CF_ISA_A 0x02 +#define EF_M68K_CF_ISA_A_PLUS 0x03 +#define EF_M68K_CF_ISA_B_NOUSP 0x04 /* ISA_B except for USP */ +#define EF_M68K_CF_ISA_B 0x05 +#define EF_M68K_CF_ISA_C 0x06 +#define EF_M68K_CF_ISA_C_NODIV 0x07 /* ISA C except for div */ +#define EF_M68K_CF_MAC_MASK 0x30 +#define EF_M68K_CF_MAC 0x10 /* MAC */ +#define EF_M68K_CF_EMAC 0x20 /* EMAC */ +#define EF_M68K_CF_EMAC_B 0x30 /* EMAC_B */ +#define EF_M68K_CF_FLOAT 0x40 /* Has float insns */ +#define EF_M68K_CF_MASK 0xFF /* * 68k ELF relocation types @@ -163,6 +655,51 @@ #define R_68K_JMP_SLOT 21 #define R_68K_RELATIVE 22 +/* + * Alpha ELF relocation types + */ +#define R_ALPHA_NONE 0 /* No reloc */ +#define R_ALPHA_REFLONG 1 /* Direct 32 bit */ +#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ +#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ +#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_ALPHA_GPDISP 6 /* Add displacement to GP */ +#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_ALPHA_SREL16 9 /* PC relative 16 bit */ +#define R_ALPHA_SREL32 10 /* PC relative 32 bit */ +#define R_ALPHA_SREL64 11 /* PC relative 64 bit */ +#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ +#define R_ALPHA_COPY 24 /* Copy symbol at runtime */ +#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ +#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ +#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ +#define R_ALPHA_BRSGP 28 +#define R_ALPHA_TLSGD 29 +#define R_ALPHA_TLS_LDM 30 +#define R_ALPHA_DTPMOD64 31 +#define R_ALPHA_GOTDTPREL 32 +#define R_ALPHA_DTPREL64 33 +#define R_ALPHA_DTPRELHI 34 +#define R_ALPHA_DTPRELLO 35 +#define R_ALPHA_DTPREL16 36 +#define R_ALPHA_GOTTPREL 37 +#define R_ALPHA_TPREL64 38 +#define R_ALPHA_TPRELHI 39 +#define R_ALPHA_TPRELLO 40 +#define R_ALPHA_TPREL16 41 + +#define SHF_ALPHA_GPREL 0x10000000 + + +/* PowerPC specific definitions. */ + +/* Processor specific flags for the ELF header e_flags field. */ +#define EF_PPC64_ABI 0x3 + /* PowerPC relocations defined by the ABIs */ #define R_PPC_NONE 0 #define R_PPC_ADDR32 1 /* 32bit absolute address */ @@ -206,6 +743,52 @@ #define R_PPC_NUM 37 #endif +/* ARM specific declarations */ + +/* Processor specific flags for the ELF header e_flags field. */ +#define EF_ARM_RELEXEC 0x01 +#define EF_ARM_HASENTRY 0x02 +#define EF_ARM_INTERWORK 0x04 +#define EF_ARM_APCS_26 0x08 +#define EF_ARM_APCS_FLOAT 0x10 +#define EF_ARM_PIC 0x20 +#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */ +#define EF_NEW_ABI 0x80 +#define EF_OLD_ABI 0x100 +#define EF_ARM_SOFT_FLOAT 0x200 +#define EF_ARM_VFP_FLOAT 0x400 +#define EF_ARM_MAVERICK_FLOAT 0x800 + +/* Other constants defined in the ARM ELF spec. version B-01. */ +#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */ +#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */ +#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */ +#define EF_ARM_EABIMASK 0xFF000000 + +/* Constants defined in AAELF. */ +#define EF_ARM_BE8 0x00800000 +#define EF_ARM_LE8 0x00400000 + +#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK) +#define EF_ARM_EABI_UNKNOWN 0x00000000 +#define EF_ARM_EABI_VER1 0x01000000 +#define EF_ARM_EABI_VER2 0x02000000 +#define EF_ARM_EABI_VER3 0x03000000 +#define EF_ARM_EABI_VER4 0x04000000 +#define EF_ARM_EABI_VER5 0x05000000 + +/* Additional symbol types for Thumb */ +#define STT_ARM_TFUNC 0xd + +/* ARM-specific values for sh_flags */ +#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */ +#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined + in the input to a link step */ + +/* ARM-specific program header flags */ +#define PF_ARM_SB 0x10000000 /* Segment contains the location + addressed by the static base */ + /* ARM relocs. */ #define R_ARM_NONE 0 /* No reloc */ #define R_ARM_PC24 1 /* PC relative 26 bit branch */ @@ -450,8 +1033,9 @@ #define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ #define R_390_TLS_TPOFF 56 /* Negate offset in static TLS block. */ +#define R_390_20 57 /* Keep this the last entry. */ -#define R_390_NUM 57 +#define R_390_NUM 58 /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ @@ -464,7 +1048,7 @@ #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ #define R_X86_64_RELATIVE 8 /* Adjust by program base */ #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative - offset to GOT */ + offset to GOT */ #define R_X86_64_32 10 /* Direct 32 bit zero extended */ #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ #define R_X86_64_16 12 /* Direct 16 bit zero extended */ @@ -474,6 +1058,204 @@ #define R_X86_64_NUM 16 +/* Legal values for e_flags field of Elf64_Ehdr. */ + +#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ + +/* HPPA specific definitions. */ + +/* Legal values for e_flags field of Elf32_Ehdr. */ + +#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */ +#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */ +#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ +#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ +#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch + prediction. */ +#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ +#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ + +/* Defined values for `e_flags & EF_PARISC_ARCH' are: */ + +#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */ +#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */ +#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ + +/* Additional section indeces. */ + +#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared + symbols in ANSI C. */ +#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ + +/* Legal values for sh_type field of Elf32_Shdr. */ + +#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */ +#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */ +#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */ + +/* Legal values for sh_flags field of Elf32_Shdr. */ + +#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */ +#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */ +#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */ + +/* Legal values for ST_TYPE subfield of st_info (symbol type). */ + +#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */ + +#define STT_HP_OPAQUE (STT_LOOS + 0x1) +#define STT_HP_STUB (STT_LOOS + 0x2) + +/* HPPA relocs. */ + +#define R_PARISC_NONE 0 /* No reloc. */ +#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */ +#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */ +#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */ +#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */ +#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */ +#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */ +#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */ +#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */ +#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ +#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ +#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ +#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ +#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ +#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ +#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ +#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ +#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ +#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ +#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ +#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ +#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ +#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ +#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ +#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ +#define R_PARISC_FPTR64 64 /* 64 bits function address. */ +#define R_PARISC_PLABEL32 65 /* 32 bits function address. */ +#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ +#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ +#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ +#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ +#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */ +#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */ +#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */ +#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */ +#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */ +#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */ +#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */ +#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */ +#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */ +#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */ +#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */ +#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */ +#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */ +#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */ +#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */ +#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */ +#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */ +#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */ +#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */ +#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ +#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ +#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ +#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ +#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ +#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ +#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */ +#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */ +#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */ +#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */ +#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */ +#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */ +#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */ +#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */ +#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */ +#define R_PARISC_LORESERVE 128 +#define R_PARISC_COPY 128 /* Copy relocation. */ +#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */ +#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */ +#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */ +#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */ +#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */ +#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */ +#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/ +#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */ +#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */ +#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */ +#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */ +#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */ +#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */ +#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */ +#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */ +#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/ +#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/ +#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */ +#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */ +#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */ +#define R_PARISC_HIRESERVE 255 + +/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ + +#define PT_HP_TLS (PT_LOOS + 0x0) +#define PT_HP_CORE_NONE (PT_LOOS + 0x1) +#define PT_HP_CORE_VERSION (PT_LOOS + 0x2) +#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3) +#define PT_HP_CORE_COMM (PT_LOOS + 0x4) +#define PT_HP_CORE_PROC (PT_LOOS + 0x5) +#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6) +#define PT_HP_CORE_STACK (PT_LOOS + 0x7) +#define PT_HP_CORE_SHM (PT_LOOS + 0x8) +#define PT_HP_CORE_MMF (PT_LOOS + 0x9) +#define PT_HP_PARALLEL (PT_LOOS + 0x10) +#define PT_HP_FASTBIND (PT_LOOS + 0x11) +#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12) +#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13) +#define PT_HP_STACK (PT_LOOS + 0x14) + +#define PT_PARISC_ARCHEXT 0x70000000 +#define PT_PARISC_UNWIND 0x70000001 + +/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */ + +#define PF_PARISC_SBP 0x08000000 + +#define PF_HP_PAGE_SIZE 0x00100000 +#define PF_HP_FAR_SHARED 0x00200000 +#define PF_HP_NEAR_SHARED 0x00400000 +#define PF_HP_CODE 0x01000000 +#define PF_HP_MODIFY 0x02000000 +#define PF_HP_LAZYSWAP 0x04000000 +#define PF_HP_SBP 0x08000000 + +/* IA-64 specific declarations. */ + +/* Processor specific flags for the Ehdr e_flags field. */ +#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */ +#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */ +#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */ + +/* Processor specific values for the Phdr p_type field. */ +#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */ +#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */ + +/* Processor specific flags for the Phdr p_flags field. */ +#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */ + +/* Processor specific values for the Shdr sh_type field. */ +#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */ +#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */ + +/* Processor specific flags for the Shdr sh_flags field. */ +#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ +#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */ + +/* Processor specific values for the Dyn d_tag field. */ +#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0) +#define DT_IA_64_NUM 1 + /* IA-64 relocations. */ #define R_IA64_NONE 0x00 /* none */ #define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ @@ -557,4 +1339,409 @@ #define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ #define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ +/* RISC-V relocations. */ +#define R_RISCV_NONE 0 +#define R_RISCV_32 1 +#define R_RISCV_64 2 +#define R_RISCV_RELATIVE 3 +#define R_RISCV_COPY 4 +#define R_RISCV_JUMP_SLOT 5 +#define R_RISCV_TLS_DTPMOD32 6 +#define R_RISCV_TLS_DTPMOD64 7 +#define R_RISCV_TLS_DTPREL32 8 +#define R_RISCV_TLS_DTPREL64 9 +#define R_RISCV_TLS_TPREL32 10 +#define R_RISCV_TLS_TPREL64 11 +#define R_RISCV_BRANCH 16 +#define R_RISCV_JAL 17 +#define R_RISCV_CALL 18 +#define R_RISCV_CALL_PLT 19 +#define R_RISCV_GOT_HI20 20 +#define R_RISCV_TLS_GOT_HI20 21 +#define R_RISCV_TLS_GD_HI20 22 +#define R_RISCV_PCREL_HI20 23 +#define R_RISCV_PCREL_LO12_I 24 +#define R_RISCV_PCREL_LO12_S 25 +#define R_RISCV_HI20 26 +#define R_RISCV_LO12_I 27 +#define R_RISCV_LO12_S 28 +#define R_RISCV_TPREL_HI20 29 +#define R_RISCV_TPREL_LO12_I 30 +#define R_RISCV_TPREL_LO12_S 31 +#define R_RISCV_TPREL_ADD 32 +#define R_RISCV_ADD8 33 +#define R_RISCV_ADD16 34 +#define R_RISCV_ADD32 35 +#define R_RISCV_ADD64 36 +#define R_RISCV_SUB8 37 +#define R_RISCV_SUB16 38 +#define R_RISCV_SUB32 39 +#define R_RISCV_SUB64 40 +#define R_RISCV_GNU_VTINHERIT 41 +#define R_RISCV_GNU_VTENTRY 42 +#define R_RISCV_ALIGN 43 +#define R_RISCV_RVC_BRANCH 44 +#define R_RISCV_RVC_JUMP 45 +#define R_RISCV_RVC_LUI 46 +#define R_RISCV_GPREL_I 47 +#define R_RISCV_GPREL_S 48 +#define R_RISCV_TPREL_I 49 +#define R_RISCV_TPREL_S 50 +#define R_RISCV_RELAX 51 +#define R_RISCV_SUB6 52 +#define R_RISCV_SET6 53 +#define R_RISCV_SET8 54 +#define R_RISCV_SET16 55 +#define R_RISCV_SET32 56 + +/* RISC-V ELF Flags. */ +#define EF_RISCV_RVC 0x0001 +#define EF_RISCV_FLOAT_ABI 0x0006 +#define EF_RISCV_FLOAT_ABI_SOFT 0x0000 +#define EF_RISCV_FLOAT_ABI_SINGLE 0x0002 +#define EF_RISCV_FLOAT_ABI_DOUBLE 0x0004 +#define EF_RISCV_FLOAT_ABI_QUAD 0x0006 +#define EF_RISCV_RVE 0x0008 +#define EF_RISCV_TSO 0x0010 + +typedef struct elf32_rel { + Elf32_Addr r_offset; + Elf32_Word r_info; +} Elf32_Rel; + +typedef struct elf64_rel { + Elf64_Addr r_offset; /* Location at which to apply the action */ + Elf64_Xword r_info; /* index and type of relocation */ +} Elf64_Rel; + +typedef struct elf32_rela{ + Elf32_Addr r_offset; + Elf32_Word r_info; + Elf32_Sword r_addend; +} Elf32_Rela; + +typedef struct elf64_rela { + Elf64_Addr r_offset; /* Location at which to apply the action */ + Elf64_Xword r_info; /* index and type of relocation */ + Elf64_Sxword r_addend; /* Constant addend used to compute value */ +} Elf64_Rela; + +typedef struct elf32_sym{ + Elf32_Word st_name; + Elf32_Addr st_value; + Elf32_Word st_size; + unsigned char st_info; + unsigned char st_other; + Elf32_Half st_shndx; +} Elf32_Sym; + +typedef struct elf64_sym { + Elf64_Word st_name; /* Symbol name, index in string tbl */ + unsigned char st_info; /* Type and binding attributes */ + unsigned char st_other; /* No defined meaning, 0 */ + Elf64_Half st_shndx; /* Associated section index */ + Elf64_Addr st_value; /* Value of the symbol */ + Elf64_Xword st_size; /* Associated symbol size */ +} Elf64_Sym; + + +#define EI_NIDENT 16 + +/* Special value for e_phnum. This indicates that the real number of + program headers is too large to fit into e_phnum. Instead the real + value is in the field sh_info of section 0. */ +#define PN_XNUM 0xffff + +typedef struct elf32_hdr{ + unsigned char e_ident[EI_NIDENT]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; /* Entry point */ + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +} Elf32_Ehdr; + +typedef struct elf64_hdr { + unsigned char e_ident[16]; /* ELF "magic number" */ + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; /* Entry point virtual address */ + Elf64_Off e_phoff; /* Program header table file offset */ + Elf64_Off e_shoff; /* Section header table file offset */ + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +} Elf64_Ehdr; + +/* These constants define the permissions on sections in the program + header, p_flags. */ +#define PF_R 0x4 +#define PF_W 0x2 +#define PF_X 0x1 + +typedef struct elf32_phdr{ + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +} Elf32_Phdr; + +typedef struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; /* Segment file offset */ + Elf64_Addr p_vaddr; /* Segment virtual address */ + Elf64_Addr p_paddr; /* Segment physical address */ + Elf64_Xword p_filesz; /* Segment size in file */ + Elf64_Xword p_memsz; /* Segment size in memory */ + Elf64_Xword p_align; /* Segment alignment, file & memory */ +} Elf64_Phdr; + +/* sh_type */ +#define SHT_NULL 0 +#define SHT_PROGBITS 1 +#define SHT_SYMTAB 2 +#define SHT_STRTAB 3 +#define SHT_RELA 4 +#define SHT_HASH 5 +#define SHT_DYNAMIC 6 +#define SHT_NOTE 7 +#define SHT_NOBITS 8 +#define SHT_REL 9 +#define SHT_SHLIB 10 +#define SHT_DYNSYM 11 +#define SHT_NUM 12 +#define SHT_LOPROC 0x70000000 +#define SHT_HIPROC 0x7fffffff +#define SHT_LOUSER 0x80000000 +#define SHT_HIUSER 0xffffffff +#define SHT_MIPS_LIST 0x70000000 +#define SHT_MIPS_CONFLICT 0x70000002 +#define SHT_MIPS_GPTAB 0x70000003 +#define SHT_MIPS_UCODE 0x70000004 + +/* sh_flags */ +#define SHF_WRITE 0x1 +#define SHF_ALLOC 0x2 +#define SHF_EXECINSTR 0x4 +#define SHF_MASKPROC 0xf0000000 +#define SHF_MIPS_GPREL 0x10000000 + +/* special section indexes */ +#define SHN_UNDEF 0 +#define SHN_LORESERVE 0xff00 +#define SHN_LOPROC 0xff00 +#define SHN_HIPROC 0xff1f +#define SHN_ABS 0xfff1 +#define SHN_COMMON 0xfff2 +#define SHN_HIRESERVE 0xffff +#define SHN_MIPS_ACCOMON 0xff00 + +typedef struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +} Elf32_Shdr; + +typedef struct elf64_shdr { + Elf64_Word sh_name; /* Section name, index in string tbl */ + Elf64_Word sh_type; /* Type of section */ + Elf64_Xword sh_flags; /* Miscellaneous section attributes */ + Elf64_Addr sh_addr; /* Section virtual addr at execution */ + Elf64_Off sh_offset; /* Section file offset */ + Elf64_Xword sh_size; /* Size of section in bytes */ + Elf64_Word sh_link; /* Index of another section */ + Elf64_Word sh_info; /* Additional section information */ + Elf64_Xword sh_addralign; /* Section alignment */ + Elf64_Xword sh_entsize; /* Entry size if section holds table */ +} Elf64_Shdr; + +#define EI_MAG0 0 /* e_ident[] indexes */ +#define EI_MAG1 1 +#define EI_MAG2 2 +#define EI_MAG3 3 +#define EI_CLASS 4 +#define EI_DATA 5 +#define EI_VERSION 6 +#define EI_OSABI 7 +#define EI_PAD 8 + +#define ELFOSABI_NONE 0 /* UNIX System V ABI */ +#define ELFOSABI_SYSV 0 /* Alias. */ +#define ELFOSABI_HPUX 1 /* HP-UX */ +#define ELFOSABI_NETBSD 2 /* NetBSD. */ +#define ELFOSABI_LINUX 3 /* Linux. */ +#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */ +#define ELFOSABI_AIX 7 /* IBM AIX. */ +#define ELFOSABI_IRIX 8 /* SGI Irix. */ +#define ELFOSABI_FREEBSD 9 /* FreeBSD. */ +#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */ +#define ELFOSABI_MODESTO 11 /* Novell Modesto. */ +#define ELFOSABI_OPENBSD 12 /* OpenBSD. */ +#define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC */ +#define ELFOSABI_ARM 97 /* ARM */ +#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ + +#define ELFMAG0 0x7f /* EI_MAG */ +#define ELFMAG1 'E' +#define ELFMAG2 'L' +#define ELFMAG3 'F' +#define ELFMAG "\177ELF" +#define SELFMAG 4 + +#define ELFCLASSNONE 0 /* EI_CLASS */ +#define ELFCLASS32 1 +#define ELFCLASS64 2 +#define ELFCLASSNUM 3 + +#define ELFDATANONE 0 /* e_ident[EI_DATA] */ +#define ELFDATA2LSB 1 +#define ELFDATA2MSB 2 + +#define EV_NONE 0 /* e_version, EI_VERSION */ +#define EV_CURRENT 1 +#define EV_NUM 2 + +/* Notes used in ET_CORE */ +#define NT_PRSTATUS 1 +#define NT_FPREGSET 2 +#define NT_PRFPREG 2 +#define NT_PRPSINFO 3 +#define NT_TASKSTRUCT 4 +#define NT_AUXV 6 +#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ +#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ +#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */ +#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 (lower half) */ +#define NT_S390_PREFIX 0x305 /* s390 prefix register */ +#define NT_S390_CTRS 0x304 /* s390 control registers */ +#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */ +#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */ +#define NT_S390_TIMER 0x301 /* s390 timer register */ +#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ +#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ +#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ +#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ +#define NT_ARM_TLS 0x401 /* ARM TLS register */ +#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ +#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ +#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ +#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension regs */ + +/* + * Physical entry point into the kernel. + * + * 32bit entry point into the kernel. When requested to launch the + * guest kernel, use this entry point to launch the guest in 32-bit + * protected mode with paging disabled. + * + * [ Corresponding definition in Linux kernel: include/xen/interface/elfnote.h ] + */ +#define XEN_ELFNOTE_PHYS32_ENTRY 18 /* 0x12 */ + +/* Note header in a PT_NOTE section */ +typedef struct elf32_note { + Elf32_Word n_namesz; /* Name size */ + Elf32_Word n_descsz; /* Content size */ + Elf32_Word n_type; /* Content type */ +} Elf32_Nhdr; + +/* Note header in a PT_NOTE section */ +typedef struct elf64_note { + Elf64_Word n_namesz; /* Name size */ + Elf64_Word n_descsz; /* Content size */ + Elf64_Word n_type; /* Content type */ +} Elf64_Nhdr; + + +/* This data structure represents a PT_LOAD segment. */ +struct elf32_fdpic_loadseg { + /* Core address to which the segment is mapped. */ + Elf32_Addr addr; + /* VMA recorded in the program header. */ + Elf32_Addr p_vaddr; + /* Size of this segment in memory. */ + Elf32_Word p_memsz; +}; +struct elf32_fdpic_loadmap { + /* Protocol version number, must be zero. */ + Elf32_Half version; + /* Number of segments in this map. */ + Elf32_Half nsegs; + /* The actual memory map. */ + struct elf32_fdpic_loadseg segs[/*nsegs*/]; +}; + +#ifdef ELF_CLASS +#if ELF_CLASS == ELFCLASS32 + +#define elfhdr elf32_hdr +#define elf_phdr elf32_phdr +#define elf_note elf32_note +#define elf_shdr elf32_shdr +#define elf_sym elf32_sym +#define elf_addr_t Elf32_Off +#define elf_rela elf32_rela + +#ifdef ELF_USES_RELOCA +# define ELF_RELOC Elf32_Rela +#else +# define ELF_RELOC Elf32_Rel +#endif + +#else + +#define elfhdr elf64_hdr +#define elf_phdr elf64_phdr +#define elf_note elf64_note +#define elf_shdr elf64_shdr +#define elf_sym elf64_sym +#define elf_addr_t Elf64_Off +#define elf_rela elf64_rela + +#ifdef ELF_USES_RELOCA +# define ELF_RELOC Elf64_Rela +#else +# define ELF_RELOC Elf64_Rel +#endif + +#endif /* ELF_CLASS */ + +#ifndef ElfW +# if ELF_CLASS == ELFCLASS32 +# define ElfW(x) Elf32_ ## x +# define ELFW(x) ELF32_ ## x +# else +# define ElfW(x) Elf64_ ## x +# define ELFW(x) ELF64_ ## x +# endif +#endif + +#endif /* ELF_CLASS */ + + #endif /* QEMU_ELF_H */ diff --git a/qemu/include/exec/address-spaces.h b/qemu/include/exec/address-spaces.h deleted file mode 100644 index 58e825a6..00000000 --- a/qemu/include/exec/address-spaces.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Internal memory management interfaces - * - * Copyright 2011 Red Hat, Inc. and/or its affiliates - * - * Authors: - * Avi Kivity - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#ifndef EXEC_MEMORY_H -#define EXEC_MEMORY_H - -/* - * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless - * you're one of them. - */ - -#include "exec/memory.h" - -#ifndef CONFIG_USER_ONLY - -/* Get the root memory region. This interface should only be used temporarily - * until a proper bus interface is available. - */ -MemoryRegion *get_system_memory(struct uc_struct *uc); - -extern AddressSpace address_space_memory; - -#endif - -#endif diff --git a/qemu/include/exec/cpu-all.h b/qemu/include/exec/cpu-all.h index 6d196ba8..ddac7207 100644 --- a/qemu/include/exec/cpu-all.h +++ b/qemu/include/exec/cpu-all.h @@ -19,22 +19,29 @@ #ifndef CPU_ALL_H #define CPU_ALL_H -#include "qemu-common.h" #include "exec/cpu-common.h" #include "exec/memory.h" #include "qemu/thread.h" -#include "qom/cpu.h" +#include "hw/core/cpu.h" + +#include + +#if 0 +#include "qemu/rcu.h" +#endif + +#define EXCP_INTERRUPT 0x10000 /* async interruption */ +#define EXCP_HLT 0x10001 /* hlt instruction reached */ +#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ +#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ +#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ +#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ /* some important defines: - * - * WORDS_ALIGNED : if defined, the host cpu can only make word aligned - * memory accesses. * * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and * otherwise little endian. * - * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) - * * TARGET_WORDS_BIGENDIAN : same for target cpu */ @@ -115,43 +122,9 @@ static inline void tswap64s(uint64_t *s) #define bswaptls(s) bswap64s(s) #endif -/* CPU memory access without any memory or io remapping */ - -/* - * the generic syntax for the memory accesses is: - * - * load: ld{type}{sign}{size}{endian}_{access_type}(ptr) - * - * store: st{type}{size}{endian}_{access_type}(ptr, val) - * - * type is: - * (empty): integer access - * f : float access - * - * sign is: - * (empty): for floats or 32 bit size - * u : unsigned - * s : signed - * - * size is: - * b: 8 bits - * w: 16 bits - * l: 32 bits - * q: 64 bits - * - * endian is: - * (empty): target cpu endianness or 8 bit access - * r : reversed target cpu endianness (not implemented yet) - * be : big endian (not implemented yet) - * le : little endian (not implemented yet) - * - * access_type is: - * raw : host memory access - * user : user mode access using soft MMU - * kernel : kernel mode access using soft MMU +/* Target-endianness CPU memory access functions. These fit into the + * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h. */ - -/* target-endianness CPU memory access functions */ #if defined(TARGET_WORDS_BIGENDIAN) #define lduw_p(p) lduw_be_p(p) #define ldsw_p(p) ldsw_be_p(p) @@ -164,6 +137,8 @@ static inline void tswap64s(uint64_t *s) #define stq_p(p, v) stq_be_p(p, v) #define stfl_p(p, v) stfl_be_p(p, v) #define stfq_p(p, v) stfq_be_p(p, v) +#define ldn_p(p, sz) ldn_be_p(p, sz) +#define stn_p(p, sz, v) stn_be_p(p, sz, v) #else #define lduw_p(p) lduw_le_p(p) #define ldsw_p(p) ldsw_le_p(p) @@ -176,39 +151,102 @@ static inline void tswap64s(uint64_t *s) #define stq_p(p, v) stq_le_p(p, v) #define stfl_p(p, v) stfl_le_p(p, v) #define stfq_p(p, v) stfq_le_p(p, v) +#define ldn_p(p, sz) ldn_le_p(p, sz) +#define stn_p(p, sz, v) stn_le_p(p, sz, v) #endif /* MMU memory access macros */ +#include "exec/hwaddr.h" -#if defined(CONFIG_USER_ONLY) -#include -#include "exec/user/abitypes.h" - -/* On some host systems the guest address space is reserved on the host. - * This allows the guest address space to be offset to a convenient location. - */ -#if defined(CONFIG_USE_GUEST_BASE) -extern unsigned long guest_base; -extern int have_guest_base; -extern unsigned long reserved_va; -#define GUEST_BASE guest_base -#define RESERVED_VA reserved_va +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX UNICORN_ARCH_POSTFIX #else -#define GUEST_BASE 0ul -#define RESERVED_VA 0ul +#define SUFFIX #endif +#define ARG1 as +#define ARG1_DECL AddressSpace *as +#define TARGET_ENDIANNESS +#include "exec/memory_ldst.inc.h" -#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \ - (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX glue(_cached_slow, UNICORN_ARCH_POSTFIX) +#else +#define SUFFIX _cached_slow #endif +#define ARG1 cache +#define ARG1_DECL MemoryRegionCache *cache +#define TARGET_ENDIANNESS +#include "exec/memory_ldst.inc.h" + +static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) +{ +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stl_notdirty, UNICORN_ARCH_POSTFIX) + (as->uc, as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); +#else + address_space_stl_notdirty(as->uc, as, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +#endif +} + +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX UNICORN_ARCH_POSTFIX +#else +#define SUFFIX +#endif +#define ARG1 as +#define ARG1_DECL AddressSpace *as +#define TARGET_ENDIANNESS +#include "exec/memory_ldst_phys.inc.h" + +/* Inline fast path for direct RAM access. */ +#define ENDIANNESS +#include "exec/memory_ldst_cached.inc.h" + +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX glue(_cached, UNICORN_ARCH_POSTFIX) +#else +#define SUFFIX _cached +#endif +#define ARG1 cache +#define ARG1_DECL MemoryRegionCache *cache +#define TARGET_ENDIANNESS +#include "exec/memory_ldst_phys.inc.h" /* page related stuff */ -#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) -#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) -#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) +#ifdef TARGET_PAGE_BITS_VARY +typedef struct TargetPageBits { + bool decided; + int bits; + target_long mask; +} TargetPageBits; +#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY) +extern const TargetPageBits target_page; +#else +extern TargetPageBits target_page; +#endif -#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) +#ifdef CONFIG_DEBUG_TCG +#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; }) +#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; }) +#else +#define TARGET_PAGE_BITS uc->init_target_page->bits +#define TARGET_PAGE_MASK uc->init_target_page->mask +#endif +#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) // qq +#else +#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS +#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) +#define TARGET_PAGE_MASK ((target_ulong)-1 << TARGET_PAGE_BITS) +#endif + +#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) + +#define HOST_PAGE_ALIGN(uc, addr) ROUND_UP((addr), uc->qemu_host_page_size) +#if 0 +#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), uc->qemu_real_host_page_size) +#endif /* same as PROT_xxx */ #define PAGE_READ 0x0001 @@ -219,16 +257,9 @@ extern unsigned long reserved_va; /* original state of the write flag (used when tracking self-modifying code */ #define PAGE_WRITE_ORG 0x0010 -#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) -/* FIXME: Code that sets/uses this is broken and needs to go away. */ -#define PAGE_RESERVED 0x0020 -#endif - -#if defined(CONFIG_USER_ONLY) -//void page_dump(FILE *f); - -int page_get_flags(target_ulong address); -#endif +/* Invalidate the TLB entry immediately, helpful for s390x + * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */ +#define PAGE_WRITE_INV 0x0040 CPUArchState *cpu_copy(CPUArchState *env); @@ -284,26 +315,131 @@ CPUArchState *cpu_copy(CPUArchState *env); | CPU_INTERRUPT_TGT_EXT_3 \ | CPU_INTERRUPT_TGT_EXT_4) -#if !defined(CONFIG_USER_ONLY) - -/* memory API */ - -/* Flags stored in the low bits of the TLB virtual address. These are - defined so that fast path ram access is all zeros. */ +/* + * Flags stored in the low bits of the TLB virtual address. + * These are defined so that fast path ram access is all zeros. + * The flags all must be between TARGET_PAGE_BITS and + * maximum address alignment bit. + * + * Use TARGET_PAGE_BITS_MIN so that these bits are constant + * when TARGET_PAGE_BITS_VARY is in effect. + */ /* Zero if TLB entry is valid. */ -#define TLB_INVALID_MASK (1 << 3) +#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) /* Set if TLB entry references a clean RAM page. The iotlb entry will contain the page physical address. */ -#define TLB_NOTDIRTY (1 << 4) +#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) /* Set if TLB entry is an IO callback. */ -#define TLB_MMIO (1 << 5) +#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) +/* Set if TLB entry contains a watchpoint. */ +#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4)) +/* Set if TLB entry requires byte swap. */ +#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5)) +/* Set if TLB entry writes ignored. */ +#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6)) -ram_addr_t last_ram_offset(struct uc_struct *uc); -void qemu_mutex_lock_ramlist(struct uc_struct *uc); -void qemu_mutex_unlock_ramlist(struct uc_struct *uc); -#endif /* !CONFIG_USER_ONLY */ +/* Use this mask to check interception with an alignment mask + * in a TCG backend. + */ +#define TLB_FLAGS_MASK \ + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ + | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE) + +/** + * tlb_hit_page: return true if page aligned @addr is a hit against the + * TLB entry @tlb_addr + * + * @addr: virtual address to test (must be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit_page(struct uc_struct *uc, target_ulong tlb_addr, target_ulong addr) +{ + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); +} + +/** + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr + * + * @addr: virtual address to test (need not be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit(struct uc_struct *uc, target_ulong tlb_addr, target_ulong addr) +{ + return tlb_hit_page(uc, tlb_addr, addr & TARGET_PAGE_MASK); +} int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - uint8_t *buf, int len, int is_write); + void *ptr, target_ulong len, bool is_write); + +int cpu_exec(struct uc_struct *uc, CPUState *cpu); + +/** + * cpu_set_cpustate_pointers(cpu) + * @cpu: The cpu object + * + * Set the generic pointers in CPUState into the outer object. + */ +static inline void cpu_set_cpustate_pointers(ArchCPU *cpu) +{ + cpu->parent_obj.env_ptr = &cpu->env; + cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr; +} + +/** + * env_archcpu(env) + * @env: The architecture environment + * + * Return the ArchCPU associated with the environment. + */ +static inline ArchCPU *env_archcpu(CPUArchState *env) +{ + return container_of(env, ArchCPU, env); +} + +/** + * env_cpu(env) + * @env: The architecture environment + * + * Return the CPUState associated with the environment. + */ +static inline CPUState *env_cpu(CPUArchState *env) +{ + return &env_archcpu(env)->parent_obj; +} + +/** + * env_neg(env) + * @env: The architecture environment + * + * Return the CPUNegativeOffsetState associated with the environment. + */ +static inline CPUNegativeOffsetState *env_neg(CPUArchState *env) +{ + ArchCPU *arch_cpu = container_of(env, ArchCPU, env); + return &arch_cpu->neg; +} + +/** + * cpu_neg(cpu) + * @cpu: The generic CPUState + * + * Return the CPUNegativeOffsetState associated with the cpu. + */ +static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu) +{ + ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj); + return &arch_cpu->neg; +} + +/** + * env_tlb(env) + * @env: The architecture environment + * + * Return the CPUTLB state associated with the environment. + */ +static inline CPUTLB *env_tlb(CPUArchState *env) +{ + return &env_neg(env)->tlb; +} #endif /* CPU_ALL_H */ diff --git a/qemu/include/exec/cpu-common.h b/qemu/include/exec/cpu-common.h index 40664d77..28ba0e0e 100644 --- a/qemu/include/exec/cpu-common.h +++ b/qemu/include/exec/cpu-common.h @@ -1,24 +1,16 @@ #ifndef CPU_COMMON_H -#define CPU_COMMON_H 1 +#define CPU_COMMON_H /* CPU interfaces that are target independent. */ -struct uc_struct; - -#ifndef CONFIG_USER_ONLY #include "exec/hwaddr.h" -#endif -#include "qemu/bswap.h" -#include "qemu/queue.h" +/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ +void qemu_init_cpu_list(void); +void cpu_list_lock(void); +void cpu_list_unlock(void); -typedef enum MMUAccessType { - MMU_DATA_LOAD = 0, - MMU_DATA_STORE = 1, - MMU_INST_FETCH = 2 -} MMUAccessType; - -#if !defined(CONFIG_USER_ONLY) +void tcg_flush_softmmu_tlb(struct uc_struct *uc); enum device_endian { DEVICE_NATIVE_ENDIAN, @@ -26,95 +18,57 @@ enum device_endian { DEVICE_LITTLE_ENDIAN, }; -/* address in the RAM (different from a physical address) */ -#if defined(CONFIG_XEN_BACKEND) -typedef uint64_t ram_addr_t; -# define RAM_ADDR_MAX UINT64_MAX -# define RAM_ADDR_FMT "%" PRIx64 +#if defined(HOST_WORDS_BIGENDIAN) +#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN #else +#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN +#endif + +/* address in the RAM (different from a physical address) */ typedef uintptr_t ram_addr_t; # define RAM_ADDR_MAX UINTPTR_MAX # define RAM_ADDR_FMT "%" PRIxPTR -#endif - -extern ram_addr_t ram_size; /* memory API */ typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value); typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr); -void qemu_ram_remap(struct uc_struct *uc, ram_addr_t addr, ram_addr_t length); /* This should not be used by devices. */ -MemoryRegion *qemu_ram_addr_from_host(struct uc_struct* uc, void *ptr, ram_addr_t *ram_addr); -void qemu_ram_set_idstr(struct uc_struct *uc, ram_addr_t addr, const char *name, DeviceState *dev); -void qemu_ram_unset_idstr(struct uc_struct *uc, ram_addr_t addr); +ram_addr_t qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr); +RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr, + bool round_offset, ram_addr_t *offset); +ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); +void *qemu_ram_get_host_addr(RAMBlock *rb); +ram_addr_t qemu_ram_get_offset(RAMBlock *rb); +ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); +bool qemu_ram_is_shared(RAMBlock *rb); -bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, - int len, int is_write); +size_t qemu_ram_pagesize(RAMBlock *block); +size_t qemu_ram_pagesize_largest(void); + +bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, void *buf, + hwaddr len, bool is_write); static inline void cpu_physical_memory_read(AddressSpace *as, hwaddr addr, - void *buf, int len) + void *buf, hwaddr len) { - cpu_physical_memory_rw(as, addr, buf, len, 0); + cpu_physical_memory_rw(as, addr, buf, len, false); } static inline void cpu_physical_memory_write(AddressSpace *as, hwaddr addr, - const void *buf, int len) + const void *buf, hwaddr len) { - cpu_physical_memory_rw(as, addr, (void *)buf, len, 1); + cpu_physical_memory_rw(as, addr, (void *)buf, len, true); } void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr, hwaddr *plen, - int is_write); + bool is_write); void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len, - int is_write, hwaddr access_len); -void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); + bool is_write, hwaddr access_len); bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr); -/* Coalesced MMIO regions are areas where write operations can be reordered. - * This usually implies that write operations are side-effect free. This allows - * batching which can make a major impact on performance when using - * virtualization. - */ -void qemu_flush_coalesced_mmio_buffer(void); +void cpu_flush_icache_range(AddressSpace *as, hwaddr start, hwaddr len); -uint32_t ldub_phys(AddressSpace *as, hwaddr addr); -uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); -uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); -uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); -uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); -uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); -uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); -void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); -void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); +int ram_block_discard_range(struct uc_struct *uc, RAMBlock *rb, uint64_t start, size_t length); -#ifdef NEED_CPU_H -uint32_t lduw_phys(AddressSpace *as, hwaddr addr); -uint32_t ldl_phys(AddressSpace *as, hwaddr addr); -uint64_t ldq_phys(AddressSpace *as, hwaddr addr); -void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val); -void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val); -#endif - -void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, - const uint8_t *buf, int len); -void cpu_flush_icache_range(AddressSpace *as, hwaddr start, int len); - -extern struct MemoryRegion io_mem_rom; -extern struct MemoryRegion io_mem_notdirty; - -typedef void (RAMBlockIterFunc)(void *host_addr, - ram_addr_t offset, ram_addr_t length, void *opaque); - -void qemu_ram_foreach_block(struct uc_struct *uc, RAMBlockIterFunc func, void *opaque); - -#endif - -#endif /* !CPU_COMMON_H */ +#endif /* CPU_COMMON_H */ diff --git a/qemu/include/exec/cpu-defs.h b/qemu/include/exec/cpu-defs.h index 84226560..eceaf157 100644 --- a/qemu/include/exec/cpu-defs.h +++ b/qemu/include/exec/cpu-defs.h @@ -23,16 +23,35 @@ #error cpu.h included from common code #endif -#include "config.h" -#include "unicorn/platform.h" -#include "qemu/osdep.h" -#include "qemu/queue.h" -#ifndef CONFIG_USER_ONLY +#include "qemu/host-utils.h" +#include "qemu/thread.h" +#include "tcg-target.h" #include "exec/hwaddr.h" -#endif +#include "exec/memattrs.h" +#include "hw/core/cpu.h" + +#include "cpu-param.h" #ifndef TARGET_LONG_BITS -#error TARGET_LONG_BITS must be defined before including this header +# error TARGET_LONG_BITS must be defined in cpu-param.h +#endif +#ifndef NB_MMU_MODES +# error NB_MMU_MODES must be defined in cpu-param.h +#endif +#ifndef TARGET_PHYS_ADDR_SPACE_BITS +# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h +#endif +#ifndef TARGET_VIRT_ADDR_SPACE_BITS +# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h +#endif +#ifndef TARGET_PAGE_BITS +# ifdef TARGET_PAGE_BITS_VARY +# ifndef TARGET_PAGE_BITS_MIN +# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h +# endif +# else +# error TARGET_PAGE_BITS must be defined in cpu-param.h +# endif #endif #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) @@ -54,23 +73,6 @@ typedef uint64_t target_ulong; #error TARGET_LONG_SIZE undefined #endif -#define EXCP_INTERRUPT 0x10000 /* async interruption */ -#define EXCP_HLT 0x10001 /* hlt instruction reached */ -#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ -#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ -#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ - -/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for - addresses on the same page. The top bits are the same. This allows - TLB invalidation to quickly clear a subset of the hash table. */ -#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) -#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) -#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) -#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) - -#if !defined(CONFIG_USER_ONLY) -#define CPU_TLB_BITS 8 -#define CPU_TLB_SIZE (1 << CPU_TLB_BITS) /* use a fully associative victim tlb of 8 entries */ #define CPU_VTLB_SIZE 8 @@ -80,6 +82,24 @@ typedef uint64_t target_ulong; #define CPU_TLB_ENTRY_BITS 5 #endif +#define CPU_TLB_DYN_MIN_BITS 6 +#define CPU_TLB_DYN_DEFAULT_BITS 8 + +# if HOST_LONG_BITS == 32 +/* Make sure we do not require a double-word shift for the TLB load */ +# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) +# else /* HOST_LONG_BITS == 64 */ +/* + * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == + * 2**34 == 16G of address space. This is roughly what one would expect a + * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel + * Skylake's Level-2 STLB has 16 1G entries. + * Also, make sure we do not size the TLB past the guest's address space. + */ +# define CPU_TLB_DYN_MAX_BITS \ + MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) +# endif + typedef struct CPUTLBEntry { /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not @@ -87,65 +107,123 @@ typedef struct CPUTLBEntry { bit 3 : indicates that the entry is invalid bit 2..0 : zero */ - target_ulong addr_read; - target_ulong addr_write; - target_ulong addr_code; - /* Addend to virtual address to get host address. IO accesses - use the corresponding iotlb value. */ - uintptr_t addend; - /* padding to get a power of two size */ - -#ifdef _MSC_VER -# define TARGET_ULONG_SIZE (TARGET_LONG_BITS/8) -# ifdef _WIN64 -# define UINTPTR_SIZE 8 -# else -# define UINTPTR_SIZE 4 -# endif - -#define DUMMY_SIZE (1 << CPU_TLB_ENTRY_BITS) - \ - (TARGET_ULONG_SIZE * 3 + \ - ((-TARGET_ULONG_SIZE * 3) & (UINTPTR_SIZE - 1)) + \ - UINTPTR_SIZE) - -#if DUMMY_SIZE > 0 - uint8_t dummy[DUMMY_SIZE]; -#endif -#else // _MSC_VER - uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - - (sizeof(target_ulong) * 3 + - ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) + - sizeof(uintptr_t))]; -#endif // _MSC_VER + union { + struct { + target_ulong addr_read; + target_ulong addr_write; + target_ulong addr_code; + /* Addend to virtual address to get host address. IO accesses + use the corresponding iotlb value. */ + uintptr_t addend; + }; + /* padding to get a power of two size */ + uint8_t dummy[1 << CPU_TLB_ENTRY_BITS]; + }; } CPUTLBEntry; QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); -#define CPU_COMMON_TLB \ - /* The meaning of the MMU modes is defined in the target code. */ \ - CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ - CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ - hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ - hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ - target_ulong tlb_flush_addr; \ - target_ulong tlb_flush_mask; \ - target_ulong vtlb_index; \ +/* The IOTLB is not accessed directly inline by generated TCG code, + * so the CPUIOTLBEntry layout is not as critical as that of the + * CPUTLBEntry. (This is also why we don't want to combine the two + * structs into one.) + */ +typedef struct CPUIOTLBEntry { + /* + * @addr contains: + * - in the lower TARGET_PAGE_BITS, a physical section number + * - with the lower TARGET_PAGE_BITS masked off, an offset which + * must be added to the virtual address to obtain: + * + the ram_addr_t of the target RAM (if the physical section + * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) + * + the offset within the target MemoryRegion (otherwise) + */ + hwaddr addr; + MemTxAttrs attrs; +} CPUIOTLBEntry; -#else +/* + * Data elements that are per MMU mode, minus the bits accessed by + * the TCG fast path. + */ +typedef struct CPUTLBDesc { + /* + * Describe a region covering all of the large pages allocated + * into the tlb. When any page within this region is flushed, + * we must flush the entire tlb. The region is matched if + * (addr & large_page_mask) == large_page_addr. + */ + target_ulong large_page_addr; + target_ulong large_page_mask; + /* host time (in ns) at the beginning of the time window */ + int64_t window_begin_ns; + /* maximum number of entries observed in the window */ + size_t window_max_entries; + size_t n_used_entries; + /* The next index to use in the tlb victim table. */ + size_t vindex; + /* The tlb victim table, in two parts. */ + CPUTLBEntry vtable[CPU_VTLB_SIZE]; + CPUIOTLBEntry viotlb[CPU_VTLB_SIZE]; + /* The iotlb. */ + CPUIOTLBEntry *iotlb; +} CPUTLBDesc; -#define CPU_COMMON_TLB +/* + * Data elements that are per MMU mode, accessed by the fast path. + * The structure is aligned to aid loading the pair with one insn. + */ +typedef struct CPUTLBDescFast { + /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ + uintptr_t mask; + /* The array of tlb entries itself. */ + CPUTLBEntry *table; +} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *)); + +/* + * Data elements that are shared between all MMU modes. + */ +typedef struct CPUTLBCommon { + /* + * Within dirty, for each bit N, modifications have been made to + * mmu_idx N since the last time that mmu_idx was flushed. + * Protected by tlb_c.lock. + */ + uint16_t dirty; + /* + * Statistics. These are not lock protected, but are read and + * written atomically. This allows the monitor to print a snapshot + * of the stats without interfering with the cpu. + */ + size_t full_flush_count; + size_t part_flush_count; + size_t elide_flush_count; +} CPUTLBCommon; + +/* + * The entire softmmu tlb, for all MMU modes. + * The meaning of each of the MMU modes is defined in the target code. + * Since this is placed within CPUNegativeOffsetState, the smallest + * negative offsets are at the end of the struct. + */ + +typedef struct CPUTLB { + CPUTLBCommon c; + CPUTLBDesc d[NB_MMU_MODES]; + CPUTLBDescFast f[NB_MMU_MODES]; +} CPUTLB; + +/* This will be used by TCG backends to compute offsets. */ +#define TLB_MASK_TABLE_OFS(IDX) \ + ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env)) + +/* + * This structure must be placed in ArchCPU immediately + * before CPUArchState, as a field named "neg". + */ +typedef struct CPUNegativeOffsetState { + CPUTLB tlb; + IcountDecr icount_decr; +} CPUNegativeOffsetState; #endif - - -#define CPU_TEMP_BUF_NLONGS 128 - -// Unicorn engine -// @invalid_addr: invalid memory access address -// @invalid_error: error code for memory access (1 = READ, 2 = WRITE) -#define CPU_COMMON \ - /* soft mmu support */ \ - CPU_COMMON_TLB \ - uint64_t invalid_addr; \ - int invalid_error; -#endif diff --git a/qemu/include/exec/cpu_ldst.h b/qemu/include/exec/cpu_ldst.h index 715cee51..b8482bce 100644 --- a/qemu/include/exec/cpu_ldst.h +++ b/qemu/include/exec/cpu_ldst.h @@ -23,325 +23,134 @@ * * Used by target op helpers. * - * MMU mode suffixes are defined in target cpu.h. + * The syntax for the accessors is: + * + * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr) + * cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr) + * cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmu_idx, retaddr) + * + * store: cpu_st{size}_{mmusuffix}(env, ptr, val) + * cpu_st{size}_{mmusuffix}_ra(env, ptr, val, retaddr) + * cpu_st{size}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) + * + * sign is: + * (empty): for 32 and 64 bit sizes + * u : unsigned + * s : signed + * + * size is: + * b: 8 bits + * w: 16 bits + * l: 32 bits + * q: 64 bits + * + * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". + * The "mmuidx" suffix carries an extra mmu_idx argument that specifies + * the index to use; the "data" and "code" suffixes take the index from + * cpu_mmu_index(). */ #ifndef CPU_LDST_H #define CPU_LDST_H -#if defined(CONFIG_USER_ONLY) -/* All direct uses of g2h and h2g need to go away for usermode softmmu. */ -#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE)) +#include "cpu-defs.h" +#include "cpu.h" -#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS -#define h2g_valid(x) 1 -#else -#define h2g_valid(x) ({ \ - unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ - (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \ - (!RESERVED_VA || (__guest < RESERVED_VA)); \ -}) +typedef target_ulong abi_ptr; +#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx + +uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); +uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr); +uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr); +uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr); +int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); +int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr); + +uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); +uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); +uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); +uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); +int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); +int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr); + +void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); +void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val); +void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val); +void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val); + +void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr); +void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr); +void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr); +void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr, + uint64_t val, uintptr_t retaddr); + +/* Needed for TCG_OVERSIZED_GUEST */ +#include "tcg/tcg.h" + +static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) +{ + return entry->addr_write; +} + +/* Find the TLB index corresponding to the mmu_idx + address pair. */ +static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, + target_ulong addr) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = env->uc; #endif + uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; -#define h2g_nocheck(x) ({ \ - unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ - (abi_ulong)__ret; \ -}) + return (addr >> TARGET_PAGE_BITS) & size_mask; +} -#define h2g(x) ({ \ - /* Check if given address fits target address space */ \ - assert(h2g_valid(x)); \ - h2g_nocheck(x); \ -}) +/* Find the TLB entry corresponding to the mmu_idx + address pair. */ +static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, + target_ulong addr) +{ + return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; +} -#define saddr(x) g2h(x) -#define laddr(x) g2h(x) +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra); +uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra); -#else /* !CONFIG_USER_ONLY */ -/* NOTE: we use double casts if pointers and target_ulong have - different sizes */ -#define saddr(x) (uint8_t *)(intptr_t)(x) -#define laddr(x) (uint8_t *)(intptr_t)(x) -#endif +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra); +int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra); -#define ldub_raw(p) ldub_p(laddr((p))) -#define ldsb_raw(p) ldsb_p(laddr((p))) -#define lduw_raw(p) lduw_p(laddr((p))) -#define ldsw_raw(p) ldsw_p(laddr((p))) -#define ldl_raw(p) ldl_p(laddr((p))) -#define ldq_raw(p) ldq_p(laddr((p))) -#define ldfl_raw(p) ldfl_p(laddr((p))) -#define ldfq_raw(p) ldfq_p(laddr((p))) -#define stb_raw(p, v) stb_p(saddr((p)), v) -#define stw_raw(p, v) stw_p(saddr((p)), v) -#define stl_raw(p, v) stl_p(saddr((p)), v) -#define stq_raw(p, v) stq_p(saddr((p)), v) -#define stfl_raw(p, v) stfl_p(saddr((p)), v) -#define stfq_raw(p, v) stfq_p(saddr((p)), v) +void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t retaddr); +void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t retaddr); +void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t retaddr); +void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t retaddr); -#if defined(CONFIG_USER_ONLY) +uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); +uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); +uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); +uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); -/* if user mode, no other memory access functions */ -#define ldub(p) ldub_raw(p) -#define ldsb(p) ldsb_raw(p) -#define lduw(p) lduw_raw(p) -#define ldsw(p) ldsw_raw(p) -#define ldl(p) ldl_raw(p) -#define ldq(p) ldq_raw(p) -#define ldfl(p) ldfl_raw(p) -#define ldfq(p) ldfq_raw(p) -#define stb(p, v) stb_raw(p, v) -#define stw(p, v) stw_raw(p, v) -#define stl(p, v) stl_raw(p, v) -#define stq(p, v) stq_raw(p, v) -#define stfl(p, v) stfl_raw(p, v) -#define stfq(p, v) stfq_raw(p, v) +static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr) +{ + return (int8_t)cpu_ldub_code(env, addr); +} -#define cpu_ldub_code(env1, p) ldub_raw(p) -#define cpu_ldsb_code(env1, p) ldsb_raw(p) -#define cpu_lduw_code(env1, p) lduw_raw(p) -#define cpu_ldsw_code(env1, p) ldsw_raw(p) -#define cpu_ldl_code(env1, p) ldl_raw(p) -#define cpu_ldq_code(env1, p) ldq_raw(p) - -#define cpu_ldub_data(env, addr) ldub_raw(addr) -#define cpu_lduw_data(env, addr) lduw_raw(addr) -#define cpu_ldsw_data(env, addr) ldsw_raw(addr) -#define cpu_ldl_data(env, addr) ldl_raw(addr) -#define cpu_ldq_data(env, addr) ldq_raw(addr) - -#define cpu_stb_data(env, addr, data) stb_raw(addr, data) -#define cpu_stw_data(env, addr, data) stw_raw(addr, data) -#define cpu_stl_data(env, addr, data) stl_raw(addr, data) -#define cpu_stq_data(env, addr, data) stq_raw(addr, data) - -#define cpu_ldub_kernel(env, addr) ldub_raw(addr) -#define cpu_lduw_kernel(env, addr) lduw_raw(addr) -#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr) -#define cpu_ldl_kernel(env, addr) ldl_raw(addr) -#define cpu_ldq_kernel(env, addr) ldq_raw(addr) - -#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data) -#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data) -#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data) -#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data) - -#define ldub_kernel(p) ldub_raw(p) -#define ldsb_kernel(p) ldsb_raw(p) -#define lduw_kernel(p) lduw_raw(p) -#define ldsw_kernel(p) ldsw_raw(p) -#define ldl_kernel(p) ldl_raw(p) -#define ldq_kernel(p) ldq_raw(p) -#define ldfl_kernel(p) ldfl_raw(p) -#define ldfq_kernel(p) ldfq_raw(p) -#define stb_kernel(p, v) stb_raw(p, v) -#define stw_kernel(p, v) stw_raw(p, v) -#define stl_kernel(p, v) stl_raw(p, v) -#define stq_kernel(p, v) stq_raw(p, v) -#define stfl_kernel(p, v) stfl_raw(p, v) -#define stfq_kernel(p, vt) stfq_raw(p, v) - -#define cpu_ldub_data(env, addr) ldub_raw(addr) -#define cpu_lduw_data(env, addr) lduw_raw(addr) -#define cpu_ldl_data(env, addr) ldl_raw(addr) - -#define cpu_stb_data(env, addr, data) stb_raw(addr, data) -#define cpu_stw_data(env, addr, data) stw_raw(addr, data) -#define cpu_stl_data(env, addr, data) stl_raw(addr, data) - -#else - -/* XXX: find something cleaner. - * Furthermore, this is false for 64 bits targets - */ -#define ldul_user ldl_user -#define ldul_kernel ldl_kernel -#define ldul_hypv ldl_hypv -#define ldul_executive ldl_executive -#define ldul_supervisor ldl_supervisor - -/* The memory helpers for tcg-generated code need tcg_target_long etc. */ -#include "tcg.h" - -uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); - -void helper_stb_mmu(CPUArchState *env, target_ulong addr, - uint8_t val, int mmu_idx); -void helper_stw_mmu(CPUArchState *env, target_ulong addr, - uint16_t val, int mmu_idx); -void helper_stl_mmu(CPUArchState *env, target_ulong addr, - uint32_t val, int mmu_idx); -void helper_stq_mmu(CPUArchState *env, target_ulong addr, - uint64_t val, int mmu_idx); - -uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); -uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); - -#define CPU_MMU_INDEX 0 -#define MEMSUFFIX MMU_MODE0_SUFFIX -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX - -#define CPU_MMU_INDEX 1 -#define MEMSUFFIX MMU_MODE1_SUFFIX -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX - -#if (NB_MMU_MODES >= 3) - -#define CPU_MMU_INDEX 2 -#define MEMSUFFIX MMU_MODE2_SUFFIX -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 3) */ - -#if (NB_MMU_MODES >= 4) - -#define CPU_MMU_INDEX 3 -#define MEMSUFFIX MMU_MODE3_SUFFIX -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 4) */ - -#if (NB_MMU_MODES >= 5) - -#define CPU_MMU_INDEX 4 -#define MEMSUFFIX MMU_MODE4_SUFFIX -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 5) */ - -#if (NB_MMU_MODES >= 6) - -#define CPU_MMU_INDEX 5 -#define MEMSUFFIX MMU_MODE5_SUFFIX -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif /* (NB_MMU_MODES >= 6) */ - -#if (NB_MMU_MODES > 6) -#error "NB_MMU_MODES > 6 is not supported for now" -#endif /* (NB_MMU_MODES > 6) */ - -/* these access are slower, they must be as rare as possible */ -#define CPU_MMU_INDEX (cpu_mmu_index(env)) -#define MEMSUFFIX _data -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX - -#define ldub(p) ldub_data(p) -#define ldsb(p) ldsb_data(p) -#define lduw(p) lduw_data(p) -#define ldsw(p) ldsw_data(p) -#define ldl(p) ldl_data(p) -#define ldq(p) ldq_data(p) - -#define stb(p, v) stb_data(p, v) -#define stw(p, v) stw_data(p, v) -#define stl(p, v) stl_data(p, v) -#define stq(p, v) stq_data(p, v) - -#define CPU_MMU_INDEX (cpu_mmu_index(env)) -#define MEMSUFFIX _code -#define SOFTMMU_CODE_ACCESS - -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" - -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" - -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#undef SOFTMMU_CODE_ACCESS +static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_code(env, addr); +} /** * tlb_vaddr_to_host: @@ -351,50 +160,12 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); * @mmu_idx: MMU index to use for lookup * * Look up the specified guest virtual index in the TCG softmmu TLB. - * If the TLB contains a host virtual address suitable for direct RAM - * access, then return it. Otherwise (TLB miss, TLB entry is for an - * I/O access, etc) return NULL. - * - * This is the equivalent of the initial fast-path code used by - * TCG backends for guest load and store accesses. + * If we can translate a host virtual address suitable for direct RAM + * access, without causing a guest exception, then return it. + * Otherwise (TLB entry is for an I/O access, guest software + * TLB fill required, etc) return NULL. */ -static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr, - int access_type, int mmu_idx) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index]; - target_ulong tlb_addr; - uintptr_t haddr; - - switch (access_type) { - case 0: - tlb_addr = tlbentry->addr_read; - break; - case 1: - tlb_addr = tlbentry->addr_write; - break; - case 2: - tlb_addr = tlbentry->addr_code; - break; - default: - g_assert_not_reached(); - } - - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - /* TLB entry is for a different page */ - return NULL; - } - - if (tlb_addr & ~TARGET_PAGE_MASK) { - /* IO access */ - return NULL; - } - - haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); - return (void *)haddr; -} - -#endif /* defined(CONFIG_USER_ONLY) */ +void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, + MMUAccessType access_type, int mmu_idx); #endif /* CPU_LDST_H */ diff --git a/qemu/include/exec/cpu_ldst_template.h b/qemu/include/exec/cpu_ldst_template.h deleted file mode 100644 index fc68ee31..00000000 --- a/qemu/include/exec/cpu_ldst_template.h +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Software MMU support - * - * Generate inline load/store functions for one MMU mode and data - * size. - * - * Generate a store function as well as signed and unsigned loads. For - * 32 and 64 bit cases, also generate floating point functions with - * the same size. - * - * Not used directly but included from cpu_ldst.h. - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#if DATA_SIZE == 8 -#define SUFFIX q -#define USUFFIX q -#define DATA_TYPE uint64_t -#elif DATA_SIZE == 4 -#define SUFFIX l -#define USUFFIX l -#define DATA_TYPE uint32_t -#elif DATA_SIZE == 2 -#define SUFFIX w -#define USUFFIX uw -#define DATA_TYPE uint16_t -#define DATA_STYPE int16_t -#elif DATA_SIZE == 1 -#define SUFFIX b -#define USUFFIX ub -#define DATA_TYPE uint8_t -#define DATA_STYPE int8_t -#else -#error unsupported data size -#endif - -#if DATA_SIZE == 8 -#define RES_TYPE uint64_t -#else -#define RES_TYPE uint32_t -#endif - -#ifdef SOFTMMU_CODE_ACCESS -#define ADDR_READ addr_code -#define MMUSUFFIX _cmmu -#else -#define ADDR_READ addr_read -#define MMUSUFFIX _mmu -#endif - -/* generic load/store macros */ - -static inline RES_TYPE -glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) -{ - int page_index; - RES_TYPE res; - target_ulong addr; - int mmu_idx; - - addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != - (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx); - } else { - uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend); - res = glue(glue(ld, USUFFIX), _raw)(hostaddr); - } - return res; -} - -#if DATA_SIZE <= 2 -static inline int -glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) -{ - int res, page_index; - target_ulong addr; - int mmu_idx; - - addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != - (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX), - MMUSUFFIX)(env, addr, mmu_idx); - } else { - uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend); - res = glue(glue(lds, SUFFIX), _raw)(hostaddr); - } - return res; -} -#endif - -#ifndef SOFTMMU_CODE_ACCESS - -/* generic store macro */ - -static inline void -glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr, - RES_TYPE v) -{ - int page_index; - target_ulong addr; - int mmu_idx; - - addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != - (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx); - } else { - uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend); - glue(glue(st, SUFFIX), _raw)(hostaddr, v); - } -} - - - -#if DATA_SIZE == 8 -static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr) -{ - union { - float64 d; - uint64_t i; - } u; - u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr); - return u.d; -} - -static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr, float64 v) -{ - union { - float64 d; - uint64_t i; - } u; - u.d = v; - glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i); -} -#endif /* DATA_SIZE == 8 */ - -#if DATA_SIZE == 4 -static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr) -{ - union { - float32 f; - uint32_t i; - } u; - u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr); - return u.f; -} - -static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env, - target_ulong ptr, float32 v) -{ - union { - float32 f; - uint32_t i; - } u; - u.f = v; - glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i); -} -#endif /* DATA_SIZE == 4 */ - -#endif /* !SOFTMMU_CODE_ACCESS */ - -#undef RES_TYPE -#undef DATA_TYPE -#undef DATA_STYPE -#undef SUFFIX -#undef USUFFIX -#undef DATA_SIZE -#undef MMUSUFFIX -#undef ADDR_READ diff --git a/qemu/include/exec/cputlb.h b/qemu/include/exec/cputlb.h index 1a43d325..0a775711 100644 --- a/qemu/include/exec/cputlb.h +++ b/qemu/include/exec/cputlb.h @@ -16,33 +16,13 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ + #ifndef CPUTLB_H #define CPUTLB_H -#if !defined(CONFIG_USER_ONLY) +#include "exec/cpu-common.h" + /* cputlb.c */ void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr); -void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr, - target_ulong vaddr); -void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, - uintptr_t start, uintptr_t length); -void cpu_tlb_reset_dirty_all(struct uc_struct *uc, ram_addr_t start1, ram_addr_t length); -void tlb_set_dirty(CPUArchState *env, target_ulong vaddr); -//extern int tlb_flush_count; - -/* exec.c */ -void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); - -MemoryRegionSection * -address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, - hwaddr *plen); -hwaddr memory_region_section_get_iotlb(CPUState *cpu, - MemoryRegionSection *section, - target_ulong vaddr, - hwaddr paddr, hwaddr xlat, - int prot, - target_ulong *address); -bool memory_region_is_unassigned(struct uc_struct* uc, MemoryRegion *mr); - -#endif +void tlb_unprotect_code(struct uc_struct *uc, ram_addr_t ram_addr); #endif diff --git a/qemu/include/exec/exec-all.h b/qemu/include/exec/exec-all.h index e839825f..b72bfc22 100644 --- a/qemu/include/exec/exec-all.h +++ b/qemu/include/exec/exec-all.h @@ -17,10 +17,13 @@ * License along with this library; if not, see . */ -#ifndef _EXEC_ALL_H_ -#define _EXEC_ALL_H_ +#ifndef EXEC_ALL_H +#define EXEC_ALL_H -#include "qemu-common.h" +#include "hw/core/cpu.h" +#include "exec/tb-context.h" +#include "exec/cpu_ldst.h" +#include "sysemu/cpus.h" /* allow to see translation results - the slowdown should be negligible, so we leave it */ #define DEBUG_DISAS @@ -28,289 +31,372 @@ /* Page tracking code uses ram addresses in system mode, and virtual addresses in userspace mode. Define tb_page_addr_t to be an appropriate type. */ -#if defined(CONFIG_USER_ONLY) -typedef abi_ulong tb_page_addr_t; -#else typedef ram_addr_t tb_page_addr_t; -#endif - -/* is_jmp field values */ -#define DISAS_NEXT 0 /* next instruction can be analyzed */ -#define DISAS_JUMP 1 /* only pc was modified dynamically */ -#define DISAS_UPDATE 2 /* cpu state was modified dynamically */ -#define DISAS_TB_JUMP 3 /* only pc was modified statically */ - -struct TranslationBlock; -typedef struct TranslationBlock TranslationBlock; - -/* XXX: make safe guess about sizes */ -#define MAX_OP_PER_INSTR 266 - -#if HOST_LONG_BITS == 32 -#define MAX_OPC_PARAM_PER_ARG 2 -#else -#define MAX_OPC_PARAM_PER_ARG 1 -#endif -#define MAX_OPC_PARAM_IARGS 5 -#define MAX_OPC_PARAM_OARGS 1 -#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) - -/* A Call op needs up to 4 + 2N parameters on 32-bit archs, - * and up to 4 + N parameters on 64-bit archs - * (N = number of input arguments + output arguments). */ -#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) -#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) - -/* Maximum size a TCG op can expand to. This is complicated because a - single op may require several host instructions and register reloads. - For now take a wild guess at 192 bytes, which should allow at least - a couple of fixup instructions per argument. */ -#define TCG_MAX_OP_SIZE 192 - -#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) +#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT #include "qemu/log.h" -void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); -void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); -void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, - int pc_pos); -bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); +void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, + target_ulong *data); -void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc); +/** + * cpu_restore_state: + * @cpu: the vCPU state is to be restore to + * @searched_pc: the host PC the fault occurred at + * @will_exit: true if the TB executed will be interrupted after some + cpu adjustments. Required for maintaining the correct + icount valus + * @return: true if state was restored, false otherwise + * + * Attempt to restore the state for a fault occurring in translated + * code. If the searched_pc is not in translated code no state is + * restored and the function returns false. + */ +bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); +void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); TranslationBlock *tb_gen_code(CPUState *cpu, - target_ulong pc, target_ulong cs_base, int flags, + target_ulong pc, target_ulong cs_base, + uint32_t flags, int cflags); -void cpu_exec_init(CPUArchState *env, void *opaque); void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); +void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); +void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); + +/** + * cpu_loop_exit_requested: + * @cpu: The CPU state to be tested + * + * Indicate if somebody asked for a return of the CPU to the main loop + * (e.g., via cpu_exit() or cpu_interrupt()). + * + * This is helpful for architectures that support interruptible + * instructions. After writing back all state to registers/memory, this + * call can be used to check if it makes sense to return to the main loop + * or to continue executing the interruptible instruction. + */ +static inline bool cpu_loop_exit_requested(CPUState *cpu) +{ + return (int32_t)cpu_neg(cpu)->icount_decr.u32 < 0; +} + +void cpu_reloading_memory_map(void); +/** + * cpu_address_space_init: + * @cpu: CPU to add this address space to + * @asidx: integer index of this address space + * @mr: the root memory region of address space + * + * Add the specified address space to the CPU's cpu_ases list. + * The address space added with @asidx 0 is the one used for the + * convenience pointer cpu->as. + * The target-specific code which registers ASes is responsible + * for defining what semantics address space 0, 1, 2, etc have. + * + * Before the first call to this function, the caller must set + * cpu->num_ases to the total number of address spaces it needs + * to support. + */ +void cpu_address_space_init(CPUState *cpu, int asidx, MemoryRegion *mr); -void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, - int is_cpu_write_access); -void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, - int is_cpu_write_access); -#if !defined(CONFIG_USER_ONLY) -void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as); /* cputlb.c */ +/** + * tlb_init - initialize a CPU's TLB + * @cpu: CPU whose TLB should be initialized + */ +void tlb_init(CPUState *cpu); +/** + * tlb_flush_page: + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of the specified CPU, for all + * MMU indexes. + */ void tlb_flush_page(CPUState *cpu, target_ulong addr); -void tlb_flush(CPUState *cpu, int flush_global); +/** + * tlb_flush_page_all_cpus: + * @cpu: src CPU of the flush + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of the specified CPU, for all + * MMU indexes. + */ +void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); +/** + * tlb_flush_page_all_cpus_synced: + * @cpu: src CPU of the flush + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of the specified CPU, for all MMU + * indexes like tlb_flush_page_all_cpus except the source vCPUs work + * is scheduled as safe work meaning all flushes will be complete once + * the source vCPUs safe work is complete. This will depend on when + * the guests translation ends the TB. + */ +void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); +/** + * tlb_flush: + * @cpu: CPU whose TLB should be flushed + * + * Flush the entire TLB for the specified CPU. Most CPU architectures + * allow the implementation to drop entries from the TLB at any time + * so this is generally safe. If more selective flushing is required + * use one of the other functions for efficiency. + */ +void tlb_flush(CPUState *cpu); +/** + * tlb_flush_all_cpus: + * @cpu: src CPU of the flush + */ +void tlb_flush_all_cpus(CPUState *src_cpu); +/** + * tlb_flush_all_cpus_synced: + * @cpu: src CPU of the flush + * + * Like tlb_flush_all_cpus except this except the source vCPUs work is + * scheduled as safe work meaning all flushes will be complete once + * the source vCPUs safe work is complete. This will depend on when + * the guests translation ends the TB. + */ +void tlb_flush_all_cpus_synced(CPUState *src_cpu); +/** + * tlb_flush_page_by_mmuidx: + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of the specified CPU, for the specified + * MMU indexes. + */ +void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, + uint16_t idxmap); +/** + * tlb_flush_page_by_mmuidx_all_cpus: + * @cpu: Originating CPU of the flush + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of all CPUs, for the specified + * MMU indexes. + */ +void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, + uint16_t idxmap); +/** + * tlb_flush_page_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of all CPUs, for the specified MMU + * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source + * vCPUs work is scheduled as safe work meaning all flushes will be + * complete once the source vCPUs safe work is complete. This will + * depend on when the guests translation ends the TB. + */ +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, + uint16_t idxmap); +/** + * tlb_flush_by_mmuidx: + * @cpu: CPU whose TLB should be flushed + * @wait: If true ensure synchronisation by exiting the cpu_loop + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from the TLB of the specified CPU, for the specified + * MMU indexes. + */ +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); +/** + * tlb_flush_by_mmuidx_all_cpus: + * @cpu: Originating CPU of the flush + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from all TLBs of all CPUs, for the specified + * MMU indexes. + */ +void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); +/** + * tlb_flush_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from all TLBs of all CPUs, for the specified + * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source + * vCPUs work is scheduled as safe work meaning all flushes will be + * complete once the source vCPUs safe work is complete. This will + * depend on when the guests translation ends the TB. + */ +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); +/** + * tlb_set_page_with_attrs: + * @cpu: CPU to add this TLB entry for + * @vaddr: virtual address of page to add entry for + * @paddr: physical address of the page + * @attrs: memory transaction attributes + * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) + * @mmu_idx: MMU index to insert TLB entry for + * @size: size of the page in bytes + * + * Add an entry to this CPU's TLB (a mapping from virtual address + * @vaddr to physical address @paddr) with the specified memory + * transaction attributes. This is generally called by the target CPU + * specific code after it has been called through the tlb_fill() + * entry point and performed a successful page table walk to find + * the physical address and attributes for the virtual address + * which provoked the TLB miss. + * + * At most one entry for a given virtual address is permitted. Only a + * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only + * used by tlb_flush_page. + */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, + int prot, int mmu_idx, target_ulong size); +/* tlb_set_page: + * + * This function is equivalent to calling tlb_set_page_with_attrs() + * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided + * as a convenience for CPUs which don't use memory transaction attributes. + */ void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size); +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); -void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); - -#else -static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) +static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, + int mmu_idx, uintptr_t retaddr) { + return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); } -static inline void tlb_flush(CPUState *cpu, int flush_global) +static inline void *probe_read(CPUArchState *env, target_ulong addr, int size, + int mmu_idx, uintptr_t retaddr) { + return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); } -#endif #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ -#define CODE_GEN_PHYS_HASH_BITS 15 -#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) +/* Estimated block size for TB allocation. */ +/* ??? The following is based on a 2015 survey of x86_64 host output. + Better would seem to be some sort of dynamically sized TB array, + adapting to the block sizes actually being produced. */ +#define CODE_GEN_AVG_BLOCK_SIZE 400 -/* estimated block size for TB allocation */ -/* XXX: use a per code average code fragment size and modulate it - according to the host CPU */ -#if defined(CONFIG_SOFTMMU) -#define CODE_GEN_AVG_BLOCK_SIZE 128 -#else -#define CODE_GEN_AVG_BLOCK_SIZE 64 -#endif - -#if defined(__arm__) || defined(_ARCH_PPC) \ - || defined(__x86_64__) || defined(__i386__) \ - || defined(__sparc__) || defined(__aarch64__) \ - || defined(__s390x__) || defined(__mips__) \ - || defined(CONFIG_TCG_INTERPRETER) -#define USE_DIRECT_JUMP -#endif +/* + * Translation Cache-related fields of a TB. + * This struct exists just for convenience; we keep track of TB's in a binary + * search tree, and the only fields needed to compare TB's in the tree are + * @ptr and @size. + * Note: the address of search data can be obtained by adding @size to @ptr. + */ +struct tb_tc { + void *ptr; /* pointer to the translated code */ + size_t size; +}; struct TranslationBlock { target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ target_ulong cs_base; /* CS base for this block */ - uint64_t flags; /* flags defining in which context the code was generated */ + uint32_t flags; /* flags defining in which context the code was generated */ uint16_t size; /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */ - uint16_t cflags; /* compile flags */ -#define CF_COUNT_MASK 0x7fff -#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ + uint16_t icount; + uint32_t cflags; /* compile flags */ +#define CF_COUNT_MASK 0x00007fff +#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ +#define CF_NOCACHE 0x00010000 /* To be freed after execution */ +#define CF_USE_ICOUNT 0x00020000 +#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ +#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ +#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ +#define CF_CLUSTER_SHIFT 24 +/* cflags' mask for hashing/comparison */ +#define CF_HASH_MASK \ + (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK) - void *tc_ptr; /* pointer to the translated code */ - /* next matching tb for physical address. */ - struct TranslationBlock *phys_hash_next; + /* Per-vCPU dynamic tracing state used to generate this TB */ + uint32_t trace_vcpu_dstate; + + struct tb_tc tc; + + /* original tb when cflags has CF_NOCACHE */ + struct TranslationBlock *orig_tb; /* first and second physical page containing code. The lower bit - of the pointer tells the index in page_next[] */ - struct TranslationBlock *page_next[2]; + of the pointer tells the index in page_next[]. + The list is protected by the TB's page('s) lock(s) */ + uintptr_t page_next[2]; tb_page_addr_t page_addr[2]; - /* the following data are used to directly call another TB from - the code of this one. */ - uint16_t tb_next_offset[2]; /* offset of original jump target */ -#ifdef USE_DIRECT_JUMP - uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ -#else - uintptr_t tb_next[2]; /* address of jump generated code */ -#endif - /* list of TBs jumping to this one. This is a circular list using - the two least significant bits of the pointers to tell what is - the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = - jmp_first */ - struct TranslationBlock *jmp_next[2]; - struct TranslationBlock *jmp_first; - uint32_t icount; + /* The following data are used to directly call another TB from + * the code of this one. This can be done either by emitting direct or + * indirect native jump instructions. These jumps are reset so that the TB + * just continues its execution. The TB can be linked to another one by + * setting one of the jump targets (or patching the jump instruction). Only + * two of such jumps are supported. + */ + uint16_t jmp_reset_offset[2]; /* offset of original jump target */ +#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ + uintptr_t jmp_target_arg[2]; /* target address or offset */ + + /* + * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. + * Each TB can have two outgoing jumps, and therefore can participate + * in two lists. The list entries are kept in jmp_list_next[2]. The least + * significant bit (LSB) of the pointers in these lists is used to encode + * which of the two list entries is to be used in the pointed TB. + * + * List traversals are protected by jmp_lock. The destination TB of each + * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock + * can be acquired from any origin TB. + * + * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is + * being invalidated, so that no further outgoing jumps from it can be set. + * + * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained + * to a destination TB that has CF_INVALID set. + */ + uintptr_t jmp_list_head; + uintptr_t jmp_list_next[2]; + uintptr_t jmp_dest[2]; + uint32_t hash; // unicorn needs this hash to remove this TB from QHT cache }; -typedef struct TBContext TBContext; +// extern bool parallel_cpus; -struct TBContext { - - TranslationBlock *tbs; - TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; - int nb_tbs; - - /* statistics */ - int tb_flush_count; - int tb_phys_invalidate_count; - - int tb_invalidated_flag; -}; - -static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) +/* Hide the atomic_read to make code a little easier on the eyes */ +static inline uint32_t tb_cflags(const TranslationBlock *tb) { - target_ulong tmp; - tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); - return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; + return tb->cflags; } -static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) +/* current cflags for hashing/comparison */ +static inline uint32_t curr_cflags(void) { - target_ulong tmp; - tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); - return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) - | (tmp & TB_JMP_ADDR_MASK)); + return 0; } -static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) -{ - return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); -} +/* TranslationBlock invalidate API */ +void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); +void tb_flush(CPUState *cpu); +void tb_phys_invalidate(TCGContext *tcg_ctx, TranslationBlock *tb, tb_page_addr_t page_addr); +TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, + target_ulong cs_base, uint32_t flags, + uint32_t cf_mask); +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); +void tb_exec_lock(TCGContext*); +void tb_exec_unlock(TCGContext*); -void tb_free(struct uc_struct *uc, TranslationBlock *tb); -void tb_flush(CPUArchState *env); -void tb_phys_invalidate(struct uc_struct *uc, - TranslationBlock *tb, tb_page_addr_t page_addr); - -#if defined(USE_DIRECT_JUMP) - -#if defined(CONFIG_TCG_INTERPRETER) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); - /* no need to flush icache explicitly */ -} -#elif defined(_ARCH_PPC) -void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); -#define tb_set_jmp_target1 ppc_tb_set_jmp_target -#elif defined(__i386__) || defined(__x86_64__) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4)); - /* no need to flush icache explicitly */ -} -#elif defined(__s390x__) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - /* patch the branch destination */ - intptr_t disp = addr - (jmp_addr - 2); - stl_be_p((void*)jmp_addr, disp / 2); - /* no need to flush icache explicitly */ -} -#elif defined(__aarch64__) -void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); -#define tb_set_jmp_target1 aarch64_tb_set_jmp_target -#elif defined(__arm__) -static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ -#if !QEMU_GNUC_PREREQ(4, 1) - register unsigned long _beg __asm ("a1"); - register unsigned long _end __asm ("a2"); - register unsigned long _flg __asm ("a3"); -#endif - - /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ - *(uint32_t *)jmp_addr = - (*(uint32_t *)jmp_addr & ~0xffffff) - | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); - -#if QEMU_GNUC_PREREQ(4, 1) - __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); -#else - /* flush icache */ - _beg = jmp_addr; - _end = jmp_addr + 4; - _flg = 0; - __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); -#endif -} -#elif defined(__sparc__) || defined(__mips__) -void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); -#else -#error tb_set_jmp_target1 is missing -#endif - -static inline void tb_set_jmp_target(TranslationBlock *tb, - int n, uintptr_t addr) -{ - uint16_t offset = tb->tb_jmp_offset[n]; - tb_set_jmp_target1((uintptr_t)((char*)tb->tc_ptr + offset), addr); -} - -#else - -/* set the jump target */ -static inline void tb_set_jmp_target(TranslationBlock *tb, - int n, uintptr_t addr) -{ - tb->tb_next[n] = addr; -} - -#endif - -static inline void tb_add_jump(TranslationBlock *tb, int n, - TranslationBlock *tb_next) -{ - /* NOTE: this test is only needed for thread safety */ - if (!tb->jmp_next[n]) { - /* patch the native jump address */ - tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); - - /* add in TB jmp circular list */ - tb->jmp_next[n] = tb_next->jmp_first; - tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n)); - } -} - -/* GETRA is the true target of the return instruction that we'll execute, - defined here for simplicity of defining the follow-up macros. */ -#if defined(CONFIG_TCG_INTERPRETER) -extern uintptr_t tci_tb_ptr; -# define GETRA() tci_tb_ptr -#elif defined(_MSC_VER) +/* GETPC is the true target of the return instruction that we'll execute. */ +#ifdef _MSC_VER #include -# define GETRA() (uintptr_t)_ReturnAddress() +# define GETPC() (uintptr_t)_ReturnAddress() #else -# define GETRA() \ +# define GETPC() \ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) #endif @@ -321,59 +407,73 @@ extern uintptr_t tci_tb_ptr; to indicate the compressed mode; subtracting two works around that. It is also the case that there are no host isas that contain a call insn smaller than 4 bytes, so we don't worry about special-casing this. */ -#if defined(CONFIG_TCG_INTERPRETER) -# define GETPC_ADJ 0 +#define GETPC_ADJ 2 + +#if defined(CONFIG_DEBUG_TCG) +void assert_no_pages_locked(void); #else -# define GETPC_ADJ 2 -#endif - -#define GETPC() (GETRA() - GETPC_ADJ) - -#if !defined(CONFIG_USER_ONLY) - -void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)); - -struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index); -bool io_mem_read(struct MemoryRegion *mr, hwaddr addr, - uint64_t *pvalue, unsigned size); -bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, - uint64_t value, unsigned size); - - -void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr); -#endif - -#if defined(CONFIG_USER_ONLY) -static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) +static inline void assert_no_pages_locked(void) { - return addr; } -#else -/* cputlb.c */ -tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); #endif -/* vl.c */ -extern int singlestep; - -/* cpu-exec.c */ -extern volatile sig_atomic_t exit_request; /** - * cpu_can_do_io: - * @cpu: The CPU for which to check IO. + * iotlb_to_section: + * @cpu: CPU performing the access + * @index: TCG CPU IOTLB entry * - * Deterministic execution requires that IO only be performed on the last - * instruction of a TB so that interrupts take effect immediately. - * - * Returns: %true if memory-mapped IO is safe, %false otherwise. + * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that + * it refers to. @index will have been initially created and returned + * by memory_region_section_get_iotlb(). */ -static inline bool cpu_can_do_io(CPUState *cpu) -{ - return true; -} +struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, + hwaddr index, MemTxAttrs attrs); -void phys_mem_clean(struct uc_struct* uc); +static inline void mmap_lock(void) {} +static inline void mmap_unlock(void) {} + +/** + * get_page_addr_code() - full-system version + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * If we cannot translate and execute from the entire RAM page, or if + * the region is not backed by RAM, returns -1. Otherwise, returns the + * ram_addr_t corresponding to the guest code at @addr. + * + * Note: this function can trigger an exception. + */ +tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); + +/** + * get_page_addr_code_hostp() - full-system version + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * See get_page_addr_code() (full-system version) for documentation on the + * return value. + * + * Sets *@hostp (when @hostp is non-NULL) as follows. + * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp + * to the host address where @addr's content is kept. + * + * Note: this function can trigger an exception. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp); + +void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); +void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); + +/* exec.c */ +void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); + +MemoryRegionSection * +address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, + hwaddr *xlat, hwaddr *plen, + MemTxAttrs attrs, int *prot); +hwaddr memory_region_section_get_iotlb(CPUState *cpu, + MemoryRegionSection *section); #endif diff --git a/qemu/include/exec/gen-icount.h b/qemu/include/exec/gen-icount.h index bbbc5de6..988e801e 100644 --- a/qemu/include/exec/gen-icount.h +++ b/qemu/include/exec/gen-icount.h @@ -1,72 +1,69 @@ #ifndef GEN_ICOUNT_H -#define GEN_ICOUNT_H 1 +#define GEN_ICOUNT_H #include "qemu/timer.h" /* Helpers for instruction counting code generation. */ -//static TCGArg *icount_arg; -//static int icount_label; - -static inline void gen_tb_start(TCGContext *tcg_ctx) +static inline void gen_io_start(TCGContext *tcg_ctx) { - // TCGv_i32 count; - TCGv_i32 flag; + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 1); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, + offsetof(ArchCPU, parent_obj.can_do_io) - + offsetof(ArchCPU, env)); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* + * cpu->can_do_io is cleared automatically at the beginning of + * each translation block. The cost is minimal and only paid + * for -icount, plus it would be very easy to forget doing it + * in the translator. Therefore, backends only need to call + * gen_io_start. + */ +static inline void gen_io_end(TCGContext *tcg_ctx) +{ + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 0); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, + offsetof(ArchCPU, parent_obj.can_do_io) - + offsetof(ArchCPU, env)); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb) +{ + TCGv_i32 count; tcg_ctx->exitreq_label = gen_new_label(tcg_ctx); - flag = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env, - offsetof(CPUState, tcg_exit_req) - ENV_OFFSET); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label); - tcg_temp_free_i32(tcg_ctx, flag); -#if 0 - if (!use_icount) + // first TB ever does not need to check exit request + if (tcg_ctx->uc->first_tb) { + // next TB is not the first anymore + tcg_ctx->uc->first_tb = false; return; + } - icount_label = gen_new_label(); - count = tcg_temp_local_new_i32(); - tcg_gen_ld_i32(count, cpu_env, - -ENV_OFFSET + offsetof(CPUState, icount_decr.u32)); - /* This is a horrid hack to allow fixing up the value later. */ - icount_arg = tcg_ctx.gen_opparam_ptr + 1; - tcg_gen_subi_i32(count, count, 0xdeadbeef); + count = tcg_temp_new_i32(tcg_ctx); - tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label); - tcg_gen_st16_i32(count, cpu_env, - -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low)); - tcg_temp_free_i32(count); -#endif + tcg_gen_ld_i32(tcg_ctx, count, tcg_ctx->cpu_env, + offsetof(ArchCPU, neg.icount_decr.u32) - + offsetof(ArchCPU, env)); + + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); + + tcg_temp_free_i32(tcg_ctx, count); } static inline void gen_tb_end(TCGContext *tcg_ctx, TranslationBlock *tb, int num_insns) { - gen_set_label(tcg_ctx, tcg_ctx->exitreq_label); - tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + TB_EXIT_REQUESTED); - -#if 0 - if (use_icount) { - *icount_arg = num_insns; - gen_set_label(icount_label); - tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED); + if (tb_cflags(tb) & CF_USE_ICOUNT) { + /* Update the num_insn immediate parameter now that we know + * the actual insn count. */ + tcg_set_insn_param(tcg_ctx->icount_start_insn, 1, num_insns); } -#endif + + gen_set_label(tcg_ctx, tcg_ctx->exitreq_label); + tcg_gen_exit_tb(tcg_ctx, tb, TB_EXIT_REQUESTED); } -#if 0 -static inline void gen_io_start(void) -{ - TCGv_i32 tmp = tcg_const_i32(1); - tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); - tcg_temp_free_i32(tmp); -} - -static inline void gen_io_end(void) -{ - TCGv_i32 tmp = tcg_const_i32(0); - tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); - tcg_temp_free_i32(tmp); -} -#endif - #endif diff --git a/qemu/include/exec/helper-gen.h b/qemu/include/exec/helper-gen.h index df3ea61b..c77990f5 100644 --- a/qemu/include/exec/helper-gen.h +++ b/qemu/include/exec/helper-gen.h @@ -2,38 +2,38 @@ This one expands generation functions for tcg opcodes. */ #ifndef HELPER_GEN_H -#define HELPER_GEN_H 1 +#define HELPER_GEN_H -#include +#include "exec/helper-head.h" #define DEF_HELPER_FLAGS_0(name, flags, ret) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl0(ret)) \ { \ - tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \ } #define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ - dh_arg_decl(t1, 1)) \ + dh_arg_decl(t1, 1)) \ { \ - TCGArg args[1] = { dh_arg(t1, 1) }; \ - tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \ + TCGTemp *args[1] = { dh_arg(t1, 1) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \ } #define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \ { \ - TCGArg args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \ - tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \ + TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \ } #define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \ { \ - TCGArg args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \ - tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \ + TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \ } #define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \ @@ -41,9 +41,9 @@ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(r dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \ { \ - TCGArg args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \ + TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \ dh_arg(t3, 3), dh_arg(t4, 4) }; \ - tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \ } #define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \ @@ -51,13 +51,35 @@ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(r dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \ { \ - TCGArg args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ + TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ dh_arg(t4, 4), dh_arg(t5, 5) }; \ - tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \ +} + +#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ + dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ + dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \ +{ \ + TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ + dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 6, args); \ +} + +#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ + dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ + dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \ + dh_arg_decl(t7, 7)) \ +{ \ + TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ + dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \ + dh_arg(t7, 7) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 7, args); \ } #include "helper.h" -#include "tcg-runtime.h" +#include "accel/tcg/tcg-runtime.h" #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 @@ -65,6 +87,8 @@ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(r #undef DEF_HELPER_FLAGS_3 #undef DEF_HELPER_FLAGS_4 #undef DEF_HELPER_FLAGS_5 +#undef DEF_HELPER_FLAGS_6 +#undef DEF_HELPER_FLAGS_7 #undef GEN_HELPER #endif /* HELPER_GEN_H */ diff --git a/qemu/include/exec/helper-head.h b/qemu/include/exec/helper-head.h index b009ccb1..9d7add43 100644 --- a/qemu/include/exec/helper-head.h +++ b/qemu/include/exec/helper-head.h @@ -15,36 +15,24 @@ GEN_HELPER 2 to do runtime registration helper functions. */ -#ifndef DEF_HELPER_H -#define DEF_HELPER_H 1 - -#include "qemu/osdep.h" +#ifndef EXEC_HELPER_HEAD_H +#define EXEC_HELPER_HEAD_H #define HELPER(name) glue(helper_, name) -#define GET_TCGV_i32 GET_TCGV_I32 -#define GET_TCGV_i64 GET_TCGV_I64 -#define GET_TCGV_ptr GET_TCGV_PTR - /* Some types that make sense in C, but not for TCG. */ #define dh_alias_i32 i32 #define dh_alias_s32 i32 #define dh_alias_int i32 #define dh_alias_i64 i64 #define dh_alias_s64 i64 +#define dh_alias_f16 i32 #define dh_alias_f32 i32 #define dh_alias_f64 i64 -#ifdef TARGET_LONG_BITS -# if TARGET_LONG_BITS == 32 -# define dh_alias_tl i32 -# else -# define dh_alias_tl i64 -# endif -#endif #define dh_alias_ptr ptr +#define dh_alias_cptr ptr #define dh_alias_void void #define dh_alias_noreturn noreturn -#define dh_alias_env ptr #define dh_alias(t) glue(dh_alias_, t) #define dh_ctype_i32 uint32_t @@ -52,15 +40,28 @@ #define dh_ctype_int int #define dh_ctype_i64 uint64_t #define dh_ctype_s64 int64_t +#define dh_ctype_f16 uint32_t #define dh_ctype_f32 float32 #define dh_ctype_f64 float64 -#define dh_ctype_tl target_ulong #define dh_ctype_ptr void * +#define dh_ctype_cptr const void * #define dh_ctype_void void #define dh_ctype_noreturn void QEMU_NORETURN -#define dh_ctype_env CPUArchState * #define dh_ctype(t) dh_ctype_##t +#ifdef NEED_CPU_H +# ifdef TARGET_LONG_BITS +# if TARGET_LONG_BITS == 32 +# define dh_alias_tl i32 +# else +# define dh_alias_tl i64 +# endif +# endif +# define dh_alias_env ptr +# define dh_ctype_tl target_ulong +# define dh_ctype_env CPUArchState * +#endif + /* We can't use glue() here because it falls foul of C preprocessor recursive expansion rules. */ #define dh_retvar_decl0_void void @@ -77,11 +78,11 @@ #define dh_retvar_decl_ptr TCGv_ptr retval, #define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t)) -#define dh_retvar_void TCG_CALL_DUMMY_ARG -#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG -#define dh_retvar_i32 GET_TCGV_i32(retval) -#define dh_retvar_i64 GET_TCGV_i64(retval) -#define dh_retvar_ptr GET_TCGV_ptr(retval) +#define dh_retvar_void NULL +#define dh_retvar_noreturn NULL +#define dh_retvar_i32 tcgv_i32_temp(tcg_ctx, retval) +#define dh_retvar_i64 tcgv_i64_temp(tcg_ctx, retval) +#define dh_retvar_ptr tcgv_ptr_temp(tcg_ctx, retval) #define dh_retvar(t) glue(dh_retvar_, dh_alias(t)) #define dh_is_64bit_void 0 @@ -89,6 +90,7 @@ #define dh_is_64bit_i32 0 #define dh_is_64bit_i64 1 #define dh_is_64bit_ptr (sizeof(void *) == 8) +#define dh_is_64bit_cptr dh_is_64bit_ptr #define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t)) #define dh_is_signed_void 0 @@ -97,6 +99,7 @@ #define dh_is_signed_s32 1 #define dh_is_signed_i64 0 #define dh_is_signed_s64 1 +#define dh_is_signed_f16 0 #define dh_is_signed_f32 0 #define dh_is_signed_f64 0 #define dh_is_signed_tl 0 @@ -105,14 +108,29 @@ extension instructions that may be required, e.g. ia64's addp4. But for now we don't support any 64-bit targets with 32-bit pointers. */ #define dh_is_signed_ptr 0 +#define dh_is_signed_cptr dh_is_signed_ptr #define dh_is_signed_env dh_is_signed_ptr #define dh_is_signed(t) dh_is_signed_##t +#define dh_callflag_i32 0 +#define dh_callflag_s32 0 +#define dh_callflag_int 0 +#define dh_callflag_i64 0 +#define dh_callflag_s64 0 +#define dh_callflag_f16 0 +#define dh_callflag_f32 0 +#define dh_callflag_f64 0 +#define dh_callflag_ptr 0 +#define dh_callflag_cptr dh_callflag_ptr +#define dh_callflag_void 0 +#define dh_callflag_noreturn TCG_CALL_NO_RETURN +#define dh_callflag(t) glue(dh_callflag_, dh_alias(t)) + #define dh_sizemask(t, n) \ ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1))) #define dh_arg(t, n) \ - glue(GET_TCGV_, dh_alias(t))(glue(arg, n)) + glue(glue(tcgv_, dh_alias(t)), _temp)(tcg_ctx, glue(arg, n)) #define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n) @@ -128,7 +146,11 @@ DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4) #define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \ DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5) +#define DEF_HELPER_6(name, ret, t1, t2, t3, t4, t5, t6) \ + DEF_HELPER_FLAGS_6(name, 0, ret, t1, t2, t3, t4, t5, t6) +#define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \ + DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7) /* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */ -#endif /* DEF_HELPER_H */ +#endif /* EXEC_HELPER_HEAD_H */ diff --git a/qemu/include/exec/helper-proto.h b/qemu/include/exec/helper-proto.h index 828951c6..217d670a 100644 --- a/qemu/include/exec/helper-proto.h +++ b/qemu/include/exec/helper-proto.h @@ -2,9 +2,9 @@ This one expands prototypes for the helper functions. */ #ifndef HELPER_PROTO_H -#define HELPER_PROTO_H 1 +#define HELPER_PROTO_H -#include +#include "exec/helper-head.h" #define DEF_HELPER_FLAGS_0(name, flags, ret) \ dh_ctype(ret) HELPER(name) (void); @@ -26,8 +26,17 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ dh_ctype(t4), dh_ctype(t5)); +#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \ +dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ + dh_ctype(t4), dh_ctype(t5), dh_ctype(t6)); + +#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \ +dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ + dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \ + dh_ctype(t7)); + #include "helper.h" -#include "tcg-runtime.h" +#include "accel/tcg/tcg-runtime.h" #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 @@ -35,5 +44,7 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ #undef DEF_HELPER_FLAGS_3 #undef DEF_HELPER_FLAGS_4 #undef DEF_HELPER_FLAGS_5 +#undef DEF_HELPER_FLAGS_6 +#undef DEF_HELPER_FLAGS_7 #endif /* HELPER_PROTO_H */ diff --git a/qemu/include/exec/helper-tcg.h b/qemu/include/exec/helper-tcg.h index 5b12f316..42d145b8 100644 --- a/qemu/include/exec/helper-tcg.h +++ b/qemu/include/exec/helper-tcg.h @@ -2,47 +2,73 @@ This one defines data structures private to tcg.c. */ #ifndef HELPER_TCG_H -#define HELPER_TCG_H 1 +#define HELPER_TCG_H -#include +#include "exec/helper-head.h" + +/* Need one more level of indirection before stringification + to get all the macros expanded first. */ +#define str(s) #s #define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \ - { HELPER(NAME), #NAME, FLAGS, \ - dh_sizemask(ret, 0) }, + { .func = HELPER(NAME), .name = str(NAME), \ + .flags = FLAGS | dh_callflag(ret), \ + .sizemask = dh_sizemask(ret, 0) }, #define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \ - { HELPER(NAME), #NAME, FLAGS, \ - dh_sizemask(ret, 0) | dh_sizemask(t1, 1) }, + { .func = HELPER(NAME), .name = str(NAME), \ + .flags = FLAGS | dh_callflag(ret), \ + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) }, #define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \ - { HELPER(NAME), #NAME, FLAGS, \ - dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + { .func = HELPER(NAME), .name = str(NAME), \ + .flags = FLAGS | dh_callflag(ret), \ + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) }, #define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \ - { HELPER(NAME), #NAME, FLAGS, \ - dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + { .func = HELPER(NAME), .name = str(NAME), \ + .flags = FLAGS | dh_callflag(ret), \ + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) }, #define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \ - { HELPER(NAME), #NAME, FLAGS, \ - dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + { .func = HELPER(NAME), .name = str(NAME), \ + .flags = FLAGS | dh_callflag(ret), \ + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) }, #define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \ - { HELPER(NAME), #NAME, FLAGS, \ - dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + { .func = HELPER(NAME), .name = str(NAME), \ + .flags = FLAGS | dh_callflag(ret), \ + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ | dh_sizemask(t5, 5) }, -#include "helper.h" -#include "tcg-runtime.h" +#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \ + { .func = HELPER(NAME), .name = str(NAME), \ + .flags = FLAGS | dh_callflag(ret), \ + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ + | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) }, +#define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \ + { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \ + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ + | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) | dh_sizemask(t7, 7) }, + +#include "helper.h" +#include "accel/tcg/tcg-runtime.h" + +#undef str #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 #undef DEF_HELPER_FLAGS_2 #undef DEF_HELPER_FLAGS_3 #undef DEF_HELPER_FLAGS_4 #undef DEF_HELPER_FLAGS_5 +#undef DEF_HELPER_FLAGS_6 +#undef DEF_HELPER_FLAGS_7 #endif /* HELPER_TCG_H */ diff --git a/qemu/include/exec/hwaddr.h b/qemu/include/exec/hwaddr.h index 8ac83941..a71c93cc 100644 --- a/qemu/include/exec/hwaddr.h +++ b/qemu/include/exec/hwaddr.h @@ -3,12 +3,11 @@ #ifndef HWADDR_H #define HWADDR_H + #define HWADDR_BITS 64 /* hwaddr is the type of a physical address (its size can be different from 'target_ulong'). */ -#include "unicorn/platform.h" - typedef uint64_t hwaddr; #define HWADDR_MAX UINT64_MAX #define TARGET_FMT_plx "%016" PRIx64 diff --git a/qemu/include/exec/ioport.h b/qemu/include/exec/ioport.h index 0d32c895..e5a5fbce 100644 --- a/qemu/include/exec/ioport.h +++ b/qemu/include/exec/ioport.h @@ -24,13 +24,8 @@ #ifndef IOPORT_H #define IOPORT_H -#include "qemu-common.h" -#include "qom/object.h" #include "exec/memory.h" -typedef uint32_t pio_addr_t; -#define FMT_pioaddr PRIx32 - #define MAX_IOPORTS (64 * 1024) #define IOPORTS_MASK (MAX_IOPORTS - 1) @@ -45,15 +40,31 @@ typedef struct MemoryRegionPortio { #define PORTIO_END_OF_LIST() { } -#ifndef CONFIG_USER_ONLY extern const MemoryRegionOps unassigned_io_ops; -#endif -void cpu_outb(struct uc_struct *uc, pio_addr_t addr, uint8_t val); -void cpu_outw(struct uc_struct *uc, pio_addr_t addr, uint16_t val); -void cpu_outl(struct uc_struct *uc, pio_addr_t addr, uint32_t val); -uint8_t cpu_inb(struct uc_struct *uc, pio_addr_t addr); -uint16_t cpu_inw(struct uc_struct *uc, pio_addr_t addr); -uint32_t cpu_inl(struct uc_struct *uc, pio_addr_t addr); +void cpu_outb(struct uc_struct *uc, uint32_t addr, uint8_t val); +void cpu_outw(struct uc_struct *uc, uint32_t addr, uint16_t val); +void cpu_outl(struct uc_struct *uc, uint32_t addr, uint32_t val); +uint8_t cpu_inb(struct uc_struct *uc, uint32_t addr); +uint16_t cpu_inw(struct uc_struct *uc, uint32_t addr); +uint32_t cpu_inl(struct uc_struct *uc, uint32_t addr); + +typedef struct PortioList { + const struct MemoryRegionPortio *ports; + struct MemoryRegion *address_space; + unsigned nr; + struct MemoryRegion **regions; + void *opaque; + const char *name; +} PortioList; + +void portio_list_init(PortioList *piolist, + const struct MemoryRegionPortio *callbacks, + void *opaque, const char *name); +void portio_list_destroy(PortioList *piolist); +void portio_list_add(PortioList *piolist, + struct MemoryRegion *address_space, + uint32_t addr); +void portio_list_del(PortioList *piolist); #endif /* IOPORT_H */ diff --git a/qemu/include/exec/memattrs.h b/qemu/include/exec/memattrs.h new file mode 100644 index 00000000..95f2d20d --- /dev/null +++ b/qemu/include/exec/memattrs.h @@ -0,0 +1,71 @@ +/* + * Memory transaction attributes + * + * Copyright (c) 2015 Linaro Limited. + * + * Authors: + * Peter Maydell + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMATTRS_H +#define MEMATTRS_H + +/* Every memory transaction has associated with it a set of + * attributes. Some of these are generic (such as the ID of + * the bus master); some are specific to a particular kind of + * bus (such as the ARM Secure/NonSecure bit). We define them + * all as non-overlapping bitfields in a single struct to avoid + * confusion if different parts of QEMU used the same bit for + * different semantics. + */ +typedef struct MemTxAttrs { + /* Bus masters which don't specify any attributes will get this + * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can + * distinguish "all attributes deliberately clear" from + * "didn't specify" if necessary. + */ + unsigned int unspecified:1; + /* ARM/AMBA: TrustZone Secure access + * x86: System Management Mode access + */ + unsigned int secure:1; + /* Memory access is usermode (unprivileged) */ + unsigned int user:1; + /* Requester ID (for MSI for example) */ + unsigned int requester_id:16; + /* Invert endianness for this page */ + unsigned int byte_swap:1; + /* + * The following are target-specific page-table bits. These are not + * related to actual memory transactions at all. However, this structure + * is part of the tlb_fill interface, cached in the cputlb structure, + * and has unused bits. These fields will be read by target-specific + * helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN. + */ + unsigned int target_tlb_bit0 : 1; + unsigned int target_tlb_bit1 : 1; + unsigned int target_tlb_bit2 : 1; +} MemTxAttrs; + +/* Bus masters which don't specify any attributes will get this, + * which has all attribute bits clear except the topmost one + * (so that we can distinguish "all attributes deliberately clear" + * from "didn't specify" if necessary). + */ +#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 }) + +/* New-style MMIO accessors can indicate that the transaction failed. + * A zero (MEMTX_OK) response means success; anything else is a failure + * of some kind. The memory subsystem will bitwise-OR together results + * if it is synthesizing an operation from multiple smaller accesses. + */ +#define MEMTX_OK 0 +#define MEMTX_ERROR (1U << 0) /* device returned an error */ +#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ +typedef uint32_t MemTxResult; + +#endif diff --git a/qemu/include/exec/memop.h b/qemu/include/exec/memop.h new file mode 100644 index 00000000..529d07b0 --- /dev/null +++ b/qemu/include/exec/memop.h @@ -0,0 +1,134 @@ +/* + * Constants for memory operations + * + * Authors: + * Richard Henderson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMOP_H +#define MEMOP_H + +#include "qemu/host-utils.h" + +typedef enum MemOp { + MO_8 = 0, + MO_16 = 1, + MO_32 = 2, + MO_64 = 3, + MO_SIZE = 3, /* Mask for the above. */ + + MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ + + MO_BSWAP = 8, /* Host reverse endian. */ +#ifdef HOST_WORDS_BIGENDIAN + MO_LE = MO_BSWAP, + MO_BE = 0, +#else + MO_LE = 0, + MO_BE = MO_BSWAP, +#endif +#ifdef NEED_CPU_H +#ifdef TARGET_WORDS_BIGENDIAN + MO_TE = MO_BE, +#else + MO_TE = MO_LE, +#endif +#endif + + /* + * MO_UNALN accesses are never checked for alignment. + * MO_ALIGN accesses will result in a call to the CPU's + * do_unaligned_access hook if the guest address is not aligned. + * The default depends on whether the target CPU defines + * TARGET_ALIGNED_ONLY. + * + * Some architectures (e.g. ARMv8) need the address which is aligned + * to a size more than the size of the memory access. + * Some architectures (e.g. SPARCv9) need an address which is aligned, + * but less strictly than the natural alignment. + * + * MO_ALIGN supposes the alignment size is the size of a memory access. + * + * There are three options: + * - unaligned access permitted (MO_UNALN). + * - an alignment to the size of an access (MO_ALIGN); + * - an alignment to a specified size, which may be more or less than + * the access size (MO_ALIGN_x where 'x' is a size in bytes); + */ + MO_ASHIFT = 4, + MO_AMASK = 7 << MO_ASHIFT, +#ifdef NEED_CPU_H +#ifdef TARGET_ALIGNED_ONLY + MO_ALIGN = 0, + MO_UNALN = MO_AMASK, +#else + MO_ALIGN = MO_AMASK, + MO_UNALN = 0, +#endif +#endif + MO_ALIGN_2 = 1 << MO_ASHIFT, + MO_ALIGN_4 = 2 << MO_ASHIFT, + MO_ALIGN_8 = 3 << MO_ASHIFT, + MO_ALIGN_16 = 4 << MO_ASHIFT, + MO_ALIGN_32 = 5 << MO_ASHIFT, + MO_ALIGN_64 = 6 << MO_ASHIFT, + + /* Combinations of the above, for ease of use. */ + MO_UB = MO_8, + MO_UW = MO_16, + MO_UL = MO_32, + MO_SB = MO_SIGN | MO_8, + MO_SW = MO_SIGN | MO_16, + MO_SL = MO_SIGN | MO_32, + MO_Q = MO_64, + + MO_LEUW = MO_LE | MO_UW, + MO_LEUL = MO_LE | MO_UL, + MO_LESW = MO_LE | MO_SW, + MO_LESL = MO_LE | MO_SL, + MO_LEQ = MO_LE | MO_Q, + + MO_BEUW = MO_BE | MO_UW, + MO_BEUL = MO_BE | MO_UL, + MO_BESW = MO_BE | MO_SW, + MO_BESL = MO_BE | MO_SL, + MO_BEQ = MO_BE | MO_Q, + +#ifdef NEED_CPU_H + MO_TEUW = MO_TE | MO_UW, + MO_TEUL = MO_TE | MO_UL, + MO_TESW = MO_TE | MO_SW, + MO_TESL = MO_TE | MO_SL, + MO_TEQ = MO_TE | MO_Q, +#endif + + MO_SSIZE = MO_SIZE | MO_SIGN, +} MemOp; + +/* MemOp to size in bytes. */ +static inline unsigned memop_size(MemOp op) +{ + return 1 << (op & MO_SIZE); +} + +/* Size in bytes to MemOp. */ +static inline MemOp size_memop(unsigned size) +{ +#ifdef CONFIG_DEBUG_TCG + /* Power of 2 up to 8. */ + assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); +#endif + return ctz32(size); +} + +/* Big endianness from MemOp. */ +static inline bool memop_big_endian(MemOp op) +{ + return (op & MO_BSWAP) == MO_BE; +} + +#endif diff --git a/qemu/include/exec/memory-internal.h b/qemu/include/exec/memory-internal.h index d3546d29..d8f05c1b 100644 --- a/qemu/include/exec/memory-internal.h +++ b/qemu/include/exec/memory-internal.h @@ -1,5 +1,5 @@ /* - * Declarations for obsolete exec.c functions + * Declarations for functions which are internal to the memory subsystem. * * Copyright 2011 Red Hat, Inc. and/or its affiliates * @@ -12,25 +12,40 @@ */ /* - * This header is for use by exec.c and memory.c ONLY. Do not include it. - * The functions declared here will be removed soon. + * This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY, + * for declarations which are shared between the memory subsystem's + * internals and the TCG TLB code. Do not include it from elsewhere. */ #ifndef MEMORY_INTERNAL_H #define MEMORY_INTERNAL_H -#ifndef CONFIG_USER_ONLY -typedef struct AddressSpaceDispatch AddressSpaceDispatch; +#include "cpu.h" -void address_space_init_dispatch(AddressSpace *as); -void address_space_destroy_dispatch(AddressSpace *as); +static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv) +{ + return fv->dispatch; +} + +static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as) +{ + return flatview_to_dispatch(address_space_to_flatview(as)); +} + +FlatView *address_space_get_flatview(AddressSpace *as); +void flatview_unref(FlatView *view); extern const MemoryRegionOps unassigned_mem_ops; -bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr, - unsigned size, bool is_write); +bool memory_region_access_valid(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs); -void address_space_unregister(AddressSpace *as); +void flatview_add_to_dispatch(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section); +AddressSpaceDispatch *address_space_dispatch_new(struct uc_struct *uc, FlatView *fv); +void address_space_dispatch_compact(AddressSpaceDispatch *d); +void address_space_dispatch_free(AddressSpaceDispatch *d); -#endif +void mtree_print_dispatch(struct AddressSpaceDispatch *d, + MemoryRegion *root); #endif diff --git a/qemu/include/exec/memory.h b/qemu/include/exec/memory.h index ef28b9d0..a4b36e53 100644 --- a/qemu/include/exec/memory.h +++ b/qemu/include/exec/memory.h @@ -14,37 +14,27 @@ #ifndef MEMORY_H #define MEMORY_H -#ifndef CONFIG_USER_ONLY - -#define DIRTY_MEMORY_CODE 0 -#define DIRTY_MEMORY_NUM 1 /* num of dirty bits */ - -#include "unicorn/platform.h" -#include "qemu-common.h" #include "exec/cpu-common.h" #include "exec/hwaddr.h" +#include "exec/memattrs.h" +#include "exec/memop.h" +#include "exec/ramlist.h" +#include "qemu/bswap.h" #include "qemu/queue.h" #include "qemu/int128.h" -#include "qapi/error.h" -#include "qom/object.h" + +#define RAM_ADDR_INVALID (~(ram_addr_t)0) #define MAX_PHYS_ADDR_SPACE_BITS 62 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) -#define TYPE_MEMORY_REGION "qemu:memory-region" -#define MEMORY_REGION(uc, obj) \ - OBJECT_CHECK(uc, MemoryRegion, (obj), TYPE_MEMORY_REGION) - typedef struct MemoryRegionOps MemoryRegionOps; -typedef struct MemoryRegionMmio MemoryRegionMmio; - -struct MemoryRegionMmio { - CPUReadMemoryFunc *read[3]; - CPUWriteMemoryFunc *write[3]; -}; typedef struct IOMMUTLBEntry IOMMUTLBEntry; +typedef uint64_t (*uc_cb_mmio_read_t)(struct uc_struct *uc, uint64_t addr, unsigned size, void *user_data); +typedef void (*uc_cb_mmio_write_t)(struct uc_struct *uc, uint64_t addr, unsigned size, uint64_t data, void *user_data); + /* See address_space_translate: bit 0 is read, bit 1 is write. */ typedef enum { IOMMU_NONE = 0, @@ -53,6 +43,8 @@ typedef enum { IOMMU_RW = 3, } IOMMUAccessFlags; +#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) + struct IOMMUTLBEntry { AddressSpace *target_as; hwaddr iova; @@ -61,22 +53,100 @@ struct IOMMUTLBEntry { IOMMUAccessFlags perm; }; +/* + * Bitmap for different IOMMUNotifier capabilities. Each notifier can + * register with one or multiple IOMMU Notifier capability bit(s). + */ +typedef enum { + IOMMU_NOTIFIER_NONE = 0, + /* Notify cache invalidations */ + IOMMU_NOTIFIER_UNMAP = 0x1, + /* Notify entry changes (newly created entries) */ + IOMMU_NOTIFIER_MAP = 0x2, +} IOMMUNotifierFlag; + +#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) + +struct IOMMUNotifier; +typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, + IOMMUTLBEntry *data); + +struct IOMMUNotifier { + IOMMUNotify notify; + IOMMUNotifierFlag notifier_flags; + /* Notify for address space range start <= addr <= end */ + hwaddr start; + hwaddr end; + int iommu_idx; + QLIST_ENTRY(IOMMUNotifier) node; +}; +typedef struct IOMMUNotifier IOMMUNotifier; + +/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ +#define RAM_PREALLOC (1 << 0) + +/* RAM is mmap-ed with MAP_SHARED */ +#define RAM_SHARED (1 << 1) + +/* Only a portion of RAM (used_length) is actually used, and migrated. + * This used_length size can change across reboots. + */ +#define RAM_RESIZEABLE (1 << 2) + +/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically + * zero the page and wake waiting processes. + * (Set during postcopy) + */ +#define RAM_UF_ZEROPAGE (1 << 3) + +/* RAM can be migrated */ +#define RAM_MIGRATABLE (1 << 4) + +/* RAM is a persistent kind memory */ +#define RAM_PMEM (1 << 5) + +static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, + IOMMUNotifierFlag flags, + hwaddr start, hwaddr end, + int iommu_idx) +{ + n->notify = fn; + n->notifier_flags = flags; + n->start = start; + n->end = end; + n->iommu_idx = iommu_idx; +} + /* * Memory region callbacks */ struct MemoryRegionOps { /* Read from the memory region. @addr is relative to @mr; @size is * in bytes. */ - uint64_t (*read)(struct uc_struct* uc, void *opaque, + uint64_t (*read)(struct uc_struct *uc, + void *opaque, hwaddr addr, unsigned size); /* Write to the memory region. @addr is relative to @mr; @size is * in bytes. */ - void (*write)(struct uc_struct* uc, void *opaque, + void (*write)(struct uc_struct *uc, + void *opaque, hwaddr addr, uint64_t data, unsigned size); + MemTxResult (*read_with_attrs)(struct uc_struct *uc, void *opaque, + hwaddr addr, + uint64_t *data, + unsigned size, + MemTxAttrs attrs); + + MemTxResult (*write_with_attrs)(struct uc_struct *, void *opaque, + hwaddr addr, + uint64_t data, + unsigned size, + MemTxAttrs attrs); + enum device_endian endianness; /* Guest-visible constraints: */ struct { @@ -94,8 +164,9 @@ struct MemoryRegionOps { * by the device (and results in machine dependent behaviour such * as a machine check exception). */ - bool (*accepts)(void *opaque, hwaddr addr, - unsigned size, bool is_write); + bool (*accepts)(struct uc_struct *uc, void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs); } valid; /* Internal implementation constraints: */ struct { @@ -112,54 +183,165 @@ struct MemoryRegionOps { */ bool unaligned; } impl; +}; - /* If .read and .write are not present, old_mmio may be used for - * backwards compatibility with old mmio registration +enum IOMMUMemoryRegionAttr { + IOMMU_ATTR_SPAPR_TCE_FD +}; + +/** + * IOMMUMemoryRegionClass: + * + * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION + * and provide an implementation of at least the @translate method here + * to handle requests to the memory region. Other methods are optional. + * + * The IOMMU implementation must use the IOMMU notifier infrastructure + * to report whenever mappings are changed, by calling + * memory_region_notify_iommu() (or, if necessary, by calling + * memory_region_notify_one() for each registered notifier). + * + * Conceptually an IOMMU provides a mapping from input address + * to an output TLB entry. If the IOMMU is aware of memory transaction + * attributes and the output TLB entry depends on the transaction + * attributes, we represent this using IOMMU indexes. Each index + * selects a particular translation table that the IOMMU has: + * @attrs_to_index returns the IOMMU index for a set of transaction attributes + * @translate takes an input address and an IOMMU index + * and the mapping returned can only depend on the input address and the + * IOMMU index. + * + * Most IOMMUs don't care about the transaction attributes and support + * only a single IOMMU index. A more complex IOMMU might have one index + * for secure transactions and one for non-secure transactions. + */ +typedef struct IOMMUMemoryRegionClass { + /* + * Return a TLB entry that contains a given address. + * + * The IOMMUAccessFlags indicated via @flag are optional and may + * be specified as IOMMU_NONE to indicate that the caller needs + * the full translation information for both reads and writes. If + * the access flags are specified then the IOMMU implementation + * may use this as an optimization, to stop doing a page table + * walk as soon as it knows that the requested permissions are not + * allowed. If IOMMU_NONE is passed then the IOMMU must do the + * full page table walk and report the permissions in the returned + * IOMMUTLBEntry. (Note that this implies that an IOMMU may not + * return different mappings for reads and writes.) + * + * The returned information remains valid while the caller is + * holding the big QEMU lock or is inside an RCU critical section; + * if the caller wishes to cache the mapping beyond that it must + * register an IOMMU notifier so it can invalidate its cached + * information when the IOMMU mapping changes. + * + * @iommu: the IOMMUMemoryRegion + * @hwaddr: address to be translated within the memory region + * @flag: requested access permissions + * @iommu_idx: IOMMU index for the translation */ - const MemoryRegionMmio old_mmio; -}; + IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx); + /* Returns minimum supported page size in bytes. + * If this method is not provided then the minimum is assumed to + * be TARGET_PAGE_SIZE. + * + * @iommu: the IOMMUMemoryRegion + */ + uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); -typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; + /* Get IOMMU misc attributes. This is an optional method that + * can be used to allow users of the IOMMU to get implementation-specific + * information. The IOMMU implements this method to handle calls + * by IOMMU users to memory_region_iommu_get_attr() by filling in + * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that + * the IOMMU supports. If the method is unimplemented then + * memory_region_iommu_get_attr() will always return -EINVAL. + * + * @iommu: the IOMMUMemoryRegion + * @attr: attribute being queried + * @data: memory to fill in with the attribute data + * + * Returns 0 on success, or a negative errno; in particular + * returns -EINVAL for unrecognized or unimplemented attribute types. + */ + int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, + void *data); -struct MemoryRegionIOMMUOps { - /* Return a TLB entry that contains a given address. */ - IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); -}; + /* Return the IOMMU index to use for a given set of transaction attributes. + * + * Optional method: if an IOMMU only supports a single IOMMU index then + * the default implementation of memory_region_iommu_attrs_to_index() + * will return 0. + * + * The indexes supported by an IOMMU must be contiguous, starting at 0. + * + * @iommu: the IOMMUMemoryRegion + * @attrs: memory transaction attributes + */ + int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs); + /* Return the number of IOMMU indexes this IOMMU supports. + * + * Optional method: if this method is not provided, then + * memory_region_iommu_num_indexes() will return 1, indicating that + * only a single IOMMU index is supported. + * + * @iommu: the IOMMUMemoryRegion + */ + int (*num_indexes)(IOMMUMemoryRegion *iommu); +} IOMMUMemoryRegionClass; + +/** MemoryRegion: + * + * A struct representing a memory region. + */ struct MemoryRegion { - Object parent_obj; - /* All fields are private - violators will be prosecuted */ + /* private: */ + + /* The following fields should fit in a cache line */ + bool ram; + bool subpage; + bool readonly; /* For RAM regions */ + bool is_iommu; + RAMBlock *ram_block; + const MemoryRegionOps *ops; - const MemoryRegionIOMMUOps *iommu_ops; void *opaque; MemoryRegion *container; Int128 size; hwaddr addr; void (*destructor)(MemoryRegion *mr); - ram_addr_t ram_addr; uint64_t align; - bool subpage; bool terminates; - bool romd_mode; - bool ram; - bool skip_dump; - bool readonly; /* For RAM regions */ bool enabled; - bool rom_device; - bool warning_printed; /* For reservations */ - MemoryRegion *alias; - hwaddr alias_offset; int32_t priority; - bool may_overlap; - QTAILQ_HEAD(subregions, MemoryRegion) subregions; + QTAILQ_HEAD(, MemoryRegion) subregions; QTAILQ_ENTRY(MemoryRegion) subregions_link; - const char *name; - uint8_t dirty_log_mask; + struct uc_struct *uc; - uint32_t perms; //all perms, partially redundant with readonly - uint64_t end; + uint32_t perms; + hwaddr end; }; +struct IOMMUMemoryRegion { + MemoryRegion parent_obj; + + QLIST_HEAD(, IOMMUNotifier) iommu_notify; + IOMMUNotifierFlag iommu_notify_flags; + + IOMMUMemoryRegionClass cc; +}; + +#define MEMORY_REGION(obj) ((MemoryRegion *)obj) +#define IOMMU_MEMORY_REGION(obj) ((IOMMUMemoryRegion *)obj) +#define IOMMU_MEMORY_REGION_CLASS(klass) ((IOMMUMemoryRegionClass *)klass) +#define IOMMU_MEMORY_REGION_GET_CLASS(obj) (&((IOMMUMemoryRegion *)obj)->cc) + +#define IOMMU_NOTIFIER_FOREACH(n, mr) \ + QLIST_FOREACH((n), &(mr)->iommu_notify, node) + /** * MemoryListener: callbacks structure for updates to the physical memory map * @@ -167,43 +349,115 @@ struct MemoryRegion { * Use with memory_listener_register() and memory_listener_unregister(). */ struct MemoryListener { + /** + * @begin: + * + * Called at the beginning of an address space update transaction. + * Followed by calls to #MemoryListener.region_add(), + * #MemoryListener.region_del(), #MemoryListener.region_nop(), + * #MemoryListener.log_start() and #MemoryListener.log_stop() in + * increasing address order. + * + * @listener: The #MemoryListener. + */ void (*begin)(MemoryListener *listener); + + /** + * @commit: + * + * Called at the end of an address space update transaction, + * after the last call to #MemoryListener.region_add(), + * #MemoryListener.region_del() or #MemoryListener.region_nop(), + * #MemoryListener.log_start() and #MemoryListener.log_stop(). + * + * @listener: The #MemoryListener. + */ void (*commit)(MemoryListener *listener); + + /** + * @region_add: + * + * Called during an address space update transaction, + * for a section of the address space that is new in this address space + * space since the last transaction. + * + * @listener: The #MemoryListener. + * @section: The new #MemoryRegionSection. + */ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); + + /** + * @region_del: + * + * Called during an address space update transaction, + * for a section of the address space that has disappeared in the address + * space since the last transaction. + * + * @listener: The #MemoryListener. + * @section: The old #MemoryRegionSection. + */ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); + + /** + * @region_nop: + * + * Called during an address space update transaction, + * for a section of the address space that is in the same place in the address + * space as in the last transaction. + * + * @listener: The #MemoryListener. + * @section: The #MemoryRegionSection. + */ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); - void (*log_start)(MemoryListener *listener, MemoryRegionSection *section); - void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section); - void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); - void (*log_global_start)(MemoryListener *listener); - void (*log_global_stop)(MemoryListener *listener); - /* Lower = earlier (during add), later (during del) */ - unsigned priority; - AddressSpace *address_space_filter; + + /* private: */ + AddressSpace *address_space; QTAILQ_ENTRY(MemoryListener) link; + QTAILQ_ENTRY(MemoryListener) link_as; }; /** * AddressSpace: describes a mapping of addresses to #MemoryRegion objects */ struct AddressSpace { - /* All fields are private. */ - char *name; + /* private: */ MemoryRegion *root; - struct FlatView *current_map; - struct AddressSpaceDispatch *dispatch; - struct AddressSpaceDispatch *next_dispatch; - MemoryListener dispatch_listener; - struct uc_struct* uc; + /* Accessed via RCU. */ + struct FlatView *current_map; + + QTAILQ_HEAD(, MemoryListener) listeners; QTAILQ_ENTRY(AddressSpace) address_spaces_link; + + struct uc_struct *uc; }; +typedef struct AddressSpaceDispatch AddressSpaceDispatch; +typedef struct FlatRange FlatRange; + +/* Flattened global view of current active memory hierarchy. Kept in sorted + * order. + */ +struct FlatView { + unsigned ref; + FlatRange *ranges; + unsigned nr; + unsigned nr_allocated; + struct AddressSpaceDispatch *dispatch; + MemoryRegion *root; +}; + +static inline FlatView *address_space_to_flatview(AddressSpace *as) +{ + return as->current_map; +} + + /** * MemoryRegionSection: describes a fragment of a #MemoryRegion * * @mr: the region, or %NULL if empty - * @address_space: the address space the region is mapped in + * @fv: the flat view of the address space the region is mapped in * @offset_within_region: the beginning of the section, relative to @mr's start * @size: the size of the section; will not exceed @mr's boundaries * @offset_within_address_space: the address of the first byte of the section @@ -211,25 +465,23 @@ struct AddressSpace { * @readonly: writes to this section are ignored */ struct MemoryRegionSection { - MemoryRegion *mr; - AddressSpace *address_space; - hwaddr offset_within_region; Int128 size; + MemoryRegion *mr; + FlatView *fv; + hwaddr offset_within_region; hwaddr offset_within_address_space; bool readonly; }; -static inline MemoryRegionSection MemoryRegionSection_make(MemoryRegion *mr, AddressSpace *address_space, - hwaddr offset_within_region, Int128 size, hwaddr offset_within_address_space, bool readonly) +static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, + MemoryRegionSection *b) { - MemoryRegionSection section; - section.mr = mr; - section.address_space = address_space; - section.offset_within_region = offset_within_region; - section.size = size; - section.offset_within_address_space = offset_within_address_space; - section.readonly = readonly; - return section; + return a->mr == b->mr && + a->fv == b->fv && + a->offset_within_region == b->offset_within_region && + a->offset_within_address_space == b->offset_within_address_space && + int128_eq(a->size, b->size) && + a->readonly == b->readonly; } /** @@ -239,13 +491,10 @@ static inline MemoryRegionSection MemoryRegionSection_make(MemoryRegion *mr, Add * memory_region_add_subregion() to add subregions. * * @mr: the #MemoryRegion to be initialized - * @owner: the object that tracks the region's reference count - * @name: used for debugging; not visible to the user or ABI * @size: size of the region; any subregions beyond this size will be clipped */ -void memory_region_init(struct uc_struct *uc, MemoryRegion *mr, - struct Object *owner, - const char *name, +void memory_region_init(struct uc_struct *uc, + MemoryRegion *mr, uint64_t size); /** @@ -265,18 +514,6 @@ void memory_region_init(struct uc_struct *uc, MemoryRegion *mr, */ void memory_region_ref(MemoryRegion *mr); -/** - * memory_region_unref: Remove 1 to a memory region's reference count - * - * Whenever memory regions are accessed outside the BQL, they need to be - * preserved against hot-unplug. MemoryRegions actually do not have their - * own reference count; they piggyback on a QOM object, their "owner". - * This function removes a reference to the owner and possibly destroys it. - * - * @mr: the #MemoryRegion - */ -void memory_region_unref(MemoryRegion *mr); - /** * memory_region_init_io: Initialize an I/O memory region. * @@ -284,129 +521,57 @@ void memory_region_unref(MemoryRegion *mr); * if @size is nonzero, subregions will be clipped to @size. * * @mr: the #MemoryRegion to be initialized. - * @owner: the object that tracks the region's reference count * @ops: a structure containing read and write callbacks to be used when * I/O is performed on the region. - * @opaque: passed to to the read and write callbacks of the @ops structure. - * @name: used for debugging; not visible to the user or ABI + * @opaque: passed to the read and write callbacks of the @ops structure. * @size: size of the region. */ -void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr, - struct Object *owner, +void memory_region_init_io(struct uc_struct *uc, + MemoryRegion *mr, const MemoryRegionOps *ops, void *opaque, - const char *name, uint64_t size); -/** - * memory_region_init_ram: Initialize RAM memory region. Accesses into the - * region will modify memory directly. - * - * @mr: the #MemoryRegion to be initialized. - * @owner: the object that tracks the region's reference count - * @name: the name of the region. - * @size: size of the region. - * @perms: permissions on the region (UC_PROT_READ, UC_PROT_WRITE, UC_PROT_EXEC). - * @errp: pointer to Error*, to store an error if it happens. - */ -void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr, - struct Object *owner, - const char *name, - uint64_t size, - uint32_t perms, - Error **errp); - /** * memory_region_init_ram_ptr: Initialize RAM memory region from a * user-provided pointer. Accesses into the * region will modify memory directly. * * @mr: the #MemoryRegion to be initialized. - * @owner: the object that tracks the region's reference count - * @name: the name of the region. * @size: size of the region. * @ptr: memory to be mapped; must contain at least @size bytes. + * + * Note that this function does not do anything to cause the data in the + * RAM memory region to be migrated; that is the responsibility of the caller. */ -void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr, - struct Object *owner, - const char *name, +void memory_region_init_ram_ptr(struct uc_struct *uc, + MemoryRegion *mr, uint64_t size, void *ptr); /** - * memory_region_init_alias: Initialize a memory region that aliases all or a - * part of another memory region. - * - * @mr: the #MemoryRegion to be initialized. - * @owner: the object that tracks the region's reference count - * @name: used for debugging; not visible to the user or ABI - * @orig: the region to be referenced; @mr will be equivalent to - * @orig between @offset and @offset + @size - 1. - * @offset: start of the section in @orig to be referenced. - * @size: size of the region. - */ -void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr, - struct Object *owner, - const char *name, - MemoryRegion *orig, - hwaddr offset, - uint64_t size); - -/** - * memory_region_init_rom_device: Initialize a ROM memory region. Writes are - * handled via callbacks. - * - * @mr: the #MemoryRegion to be initialized. - * @owner: the object that tracks the region's reference count - * @ops: callbacks for write access handling. - * @name: the name of the region. - * @size: size of the region. - * @errp: pointer to Error*, to store an error if it happens. - */ -void memory_region_init_rom_device(MemoryRegion *mr, - struct Object *owner, - const MemoryRegionOps *ops, - void *opaque, - const char *name, - uint64_t size, - Error **errp); - -/** - * memory_region_init_reservation: Initialize a memory region that reserves - * I/O space. - * - * A reservation region primariy serves debugging purposes. It claims I/O - * space that is not supposed to be handled by QEMU itself. Any access via - * the memory API will cause an abort(). + * memory_region_init_ram - Initialize RAM memory region. Accesses into the + * region will modify memory directly. * * @mr: the #MemoryRegion to be initialized - * @owner: the object that tracks the region's reference count - * @name: used for debugging; not visible to the user or ABI - * @size: size of the region. - */ -void memory_region_init_reservation(struct uc_struct *uc, MemoryRegion *mr, - struct Object *owner, - const char *name, - uint64_t size); - -/** - * memory_region_init_iommu: Initialize a memory region that translates - * addresses + * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) + * @size: size of the region in bytes * - * An IOMMU region translates addresses and forwards accesses to a target - * memory region. + * This function allocates RAM for a board model or device, and + * arranges for it to be migrated (by calling vmstate_register_ram() + * if @owner is a DeviceState, or vmstate_register_ram_global() if + * @owner is NULL). * - * @mr: the #MemoryRegion to be initialized - * @owner: the object that tracks the region's reference count - * @ops: a function that translates addresses into the @target region - * @name: used for debugging; not visible to the user or ABI - * @size: size of the region. + * TODO: Currently we restrict @owner to being either NULL (for + * global RAM regions with no owner) or devices, so that we can + * give the RAM block a unique name for migration purposes. + * We should lift this restriction and allow arbitrary Objects. + * If you pass a non-NULL non-device @owner then we will assert. */ -void memory_region_init_iommu(MemoryRegion *mr, - struct Object *owner, - const MemoryRegionIOMMUOps *ops, - const char *name, - uint64_t size); +void memory_region_init_ram(struct uc_struct *uc, + MemoryRegion *mr, + uint64_t size, + uint32_t perms); /** * memory_region_size: get a memory region's size. @@ -418,110 +583,64 @@ uint64_t memory_region_size(MemoryRegion *mr); /** * memory_region_is_ram: check whether a memory region is random access * - * Returns %true is a memory region is random access. + * Returns %true if a memory region is random access. * * @mr: the memory region being queried */ -bool memory_region_is_ram(MemoryRegion *mr); - -/** - * memory_region_is_skip_dump: check whether a memory region should not be - * dumped - * - * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP). - * - * @mr: the memory region being queried - */ -bool memory_region_is_skip_dump(MemoryRegion *mr); - -/** - * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory - * region - * - * @mr: the memory region being queried - */ -void memory_region_set_skip_dump(MemoryRegion *mr); - -/** - * memory_region_is_romd: check whether a memory region is in ROMD mode - * - * Returns %true if a memory region is a ROM device and currently set to allow - * direct reads. - * - * @mr: the memory region being queried - */ -static inline bool memory_region_is_romd(MemoryRegion *mr) +static inline bool memory_region_is_ram(MemoryRegion *mr) { - return mr->rom_device && mr->romd_mode; + return mr->ram; } /** - * memory_region_is_iommu: check whether a memory region is an iommu + * memory_region_get_iommu: check whether a memory region is an iommu * - * Returns %true is a memory region is an iommu. + * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, + * otherwise NULL. * * @mr: the memory region being queried */ -bool memory_region_is_iommu(MemoryRegion *mr); +static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) +{ + if (mr->is_iommu) { + return (IOMMUMemoryRegion *) mr; + } + return NULL; +} /** - * memory_region_notify_iommu: notify a change in an IOMMU translation entry. + * memory_region_get_iommu_class_nocheck: returns iommu memory region class + * if an iommu or NULL if not * - * @mr: the memory region that was changed - * @entry: the new entry in the IOMMU translation table. The entry - * replaces all old entries for the same virtual I/O address range. - * Deleted entries have .@perm == 0. + * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, + * otherwise NULL. This is fast path avoiding QOM checking, use with caution. + * + * @iommu_mr: the memory region being queried */ -void memory_region_notify_iommu(MemoryRegion *mr, - IOMMUTLBEntry entry); +static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( + IOMMUMemoryRegion *iommu_mr) +{ + return &iommu_mr->cc; +} /** - * memory_region_name: get a memory region's name + * memory_region_from_host: Convert a pointer into a RAM memory region + * and an offset within it. * - * Returns the string that was used to initialize the memory region. + * Given a host pointer inside a RAM memory region (created with + * memory_region_init_ram() or memory_region_init_ram_ptr()), return + * the MemoryRegion and the offset within it. * - * @mr: the memory region being queried + * Use with care; by the time this function returns, the returned pointer is + * not protected by RCU anymore. If the caller is not within an RCU critical + * section and does not hold the iothread lock, it must have other means of + * protecting the pointer, such as a reference to the region that includes + * the incoming ram_addr_t. + * + * @ptr: the host pointer to be converted + * @offset: the offset within memory region */ -const char *memory_region_name(const MemoryRegion *mr); - -/** - * memory_region_is_logging: return whether a memory region is logging writes - * - * Returns %true if the memory region is logging writes - * - * @mr: the memory region being queried - */ -bool memory_region_is_logging(MemoryRegion *mr); - -/** - * memory_region_is_rom: check whether a memory region is ROM - * - * Returns %true is a memory region is read-only memory. - * - * @mr: the memory region being queried - */ -bool memory_region_is_rom(MemoryRegion *mr); - -/** - * memory_region_get_fd: Get a file descriptor backing a RAM memory region. - * - * Returns a file descriptor backing a file-based RAM memory region, - * or -1 if the region is not a file-based RAM memory region. - * - * @mr: the RAM or alias memory region being queried. - */ -int memory_region_get_fd(MemoryRegion *mr); - -/** - * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. - * - * Returns a host pointer to a RAM memory region (created with - * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with - * care. - * - * @mr: the memory region being queried. - */ -void *memory_region_get_ram_ptr(MemoryRegion *mr); +MemoryRegion *memory_region_from_host(struct uc_struct *uc, void *ptr, ram_addr_t *offset); /** * memory_region_set_readonly: Turn a memory region read-only (or read-write) @@ -535,18 +654,20 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr); void memory_region_set_readonly(MemoryRegion *mr, bool readonly); /** - * memory_region_rom_device_set_romd: enable/disable ROMD mode + * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. * - * Allows a ROM device (initialized with memory_region_init_rom_device() to - * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the - * device is mapped to guest memory and satisfies read access directly. - * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. - * Writes are always handled by the #MemoryRegion.write function. + * Returns a host pointer to a RAM memory region (created with + * memory_region_init_ram() or memory_region_init_ram_ptr()). * - * @mr: the memory region to be updated - * @romd_mode: %true to put the region into ROMD mode + * Use with care; by the time this function returns, the returned pointer is + * not protected by RCU anymore. If the caller is not within an RCU critical + * section and does not hold the iothread lock, it must have other means of + * protecting the pointer, such as a reference to the region that includes + * the incoming ram_addr_t. + * + * @mr: the memory region being queried. */ -void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); +void *memory_region_get_ram_ptr(MemoryRegion *mr); /** * memory_region_add_subregion: Add a subregion to a container. @@ -565,39 +686,15 @@ void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); void memory_region_add_subregion(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion); -/** - * memory_region_add_subregion_overlap: Add a subregion to a container - * with overlap. - * - * Adds a subregion at @offset. The subregion may overlap with other - * subregions. Conflicts are resolved by having a higher @priority hide a - * lower @priority. Subregions without priority are taken as @priority 0. - * A region may only be added once as a subregion (unless removed with - * memory_region_del_subregion()); use memory_region_init_alias() if you - * want a region to be a subregion in multiple locations. - * - * @mr: the region to contain the new subregion; must be a container - * initialized with memory_region_init(). - * @offset: the offset relative to @mr where @subregion is added. - * @subregion: the subregion to be added. - * @priority: used for resolving overlaps; highest priority wins. - */ -void memory_region_add_subregion_overlap(MemoryRegion *mr, - hwaddr offset, - MemoryRegion *subregion, - int priority); /** * memory_region_get_ram_addr: Get the ram address associated with a memory * region * - * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen - * code is being reworked. + * @mr: the region to be queried */ ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); -uint64_t memory_region_get_alignment(const MemoryRegion *mr); - /** * memory_region_del_subregion: Remove a subregion. * @@ -609,64 +706,6 @@ uint64_t memory_region_get_alignment(const MemoryRegion *mr); void memory_region_del_subregion(MemoryRegion *mr, MemoryRegion *subregion); -/* - * memory_region_set_enabled: dynamically enable or disable a region - * - * Enables or disables a memory region. A disabled memory region - * ignores all accesses to itself and its subregions. It does not - * obscure sibling subregions with lower priority - it simply behaves as - * if it was removed from the hierarchy. - * - * Regions default to being enabled. - * - * @mr: the region to be updated - * @enabled: whether to enable or disable the region - */ -void memory_region_set_enabled(MemoryRegion *mr, bool enabled); - -/* - * memory_region_set_address: dynamically update the address of a region - * - * Dynamically updates the address of a region, relative to its container. - * May be used on regions are currently part of a memory hierarchy. - * - * @mr: the region to be updated - * @addr: new address, relative to container region - */ -void memory_region_set_address(MemoryRegion *mr, hwaddr addr); - -/* - * memory_region_set_alias_offset: dynamically update a memory alias's offset - * - * Dynamically updates the offset into the target region that an alias points - * to, as if the fourth argument to memory_region_init_alias() has changed. - * - * @mr: the #MemoryRegion to be updated; should be an alias. - * @offset: the new offset into the target memory region - */ -void memory_region_set_alias_offset(MemoryRegion *mr, - hwaddr offset); - -/** - * memory_region_present: checks if an address relative to a @container - * translates into #MemoryRegion within @container - * - * Answer whether a #MemoryRegion within @container covers the address - * @addr. - * - * @container: a #MemoryRegion within which @addr is a relative address - * @addr: the area within @container to be searched - */ -bool memory_region_present(MemoryRegion *container, hwaddr addr); - -/** - * memory_region_is_mapped: returns true if #MemoryRegion is mapped - * into any address space. - * - * @mr: a #MemoryRegion which should be checked if it's mapped - */ -bool memory_region_is_mapped(MemoryRegion *mr); - /** * memory_region_find: translate an address/size relative to a * MemoryRegion into a #MemoryRegionSection. @@ -676,8 +715,8 @@ bool memory_region_is_mapped(MemoryRegion *mr); * * Returns a #MemoryRegionSection that describes a contiguous overlap. * It will have the following characteristics: - * .@size = 0 iff no overlap was found - * .@mr is non-%NULL iff an overlap was found + * - @size = 0 iff no overlap was found + * - @mr is non-%NULL iff an overlap was found * * Remember that in the return value the @offset_within_region is * relative to the returned region (in the .@mr field), not to the @@ -688,8 +727,8 @@ bool memory_region_is_mapped(MemoryRegion *mr); * returned one. However, in the special case where the @mr argument * has no container (and thus is the root of the address space), the * following will hold: - * .@offset_within_address_space >= @addr - * .@offset_within_address_space + .@size <= @addr + @size + * - @offset_within_address_space >= @addr + * - @offset_within_address_space + .@size <= @addr + @size * * @mr: a MemoryRegion within which @addr is a relative address * @addr: start of the area within @as to be searched @@ -698,20 +737,6 @@ bool memory_region_is_mapped(MemoryRegion *mr); MemoryRegionSection memory_region_find(MemoryRegion *mr, hwaddr addr, uint64_t size); -/** - * memory_region_transaction_begin: Start a transaction. - * - * During a transaction, changes will be accumulated and made visible - * only when the transaction ends (is committed). - */ -void memory_region_transaction_begin(struct uc_struct*); - -/** - * memory_region_transaction_commit: Commit a transaction and make changes - * visible to the guest. - */ -void memory_region_transaction_commit(struct uc_struct*); - /** * memory_listener_register: register callbacks to be called when memory * sections are mapped or unmapped into an address @@ -720,25 +745,55 @@ void memory_region_transaction_commit(struct uc_struct*); * @listener: an object containing the callbacks to be called * @filter: if non-%NULL, only regions in this address space will be observed */ -void memory_listener_register(struct uc_struct* uc, MemoryListener *listener, AddressSpace *filter); +void memory_listener_register(MemoryListener *listener, AddressSpace *filter); /** * memory_listener_unregister: undo the effect of memory_listener_register() * * @listener: an object containing the callbacks to be removed */ -void memory_listener_unregister(struct uc_struct* uc, MemoryListener *listener); +void memory_listener_unregister(MemoryListener *listener); + +/** + * memory_region_dispatch_read: perform a read directly to the specified + * MemoryRegion. + * + * @mr: #MemoryRegion to access + * @addr: address within that region + * @pval: pointer to uint64_t which the data is written to + * @op: size, sign, and endianness of the memory operation + * @attrs: memory transaction attributes to use for the access + */ +MemTxResult memory_region_dispatch_read(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + MemOp op, + MemTxAttrs attrs); +/** + * memory_region_dispatch_write: perform a write directly to the specified + * MemoryRegion. + * + * @mr: #MemoryRegion to access + * @addr: address within that region + * @data: data to write + * @op: size, sign, and endianness of the memory operation + * @attrs: memory transaction attributes to use for the access + */ +MemTxResult memory_region_dispatch_write(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t data, + MemOp op, + MemTxAttrs attrs); /** * address_space_init: initializes an address space * * @as: an uninitialized #AddressSpace - * @root: a #MemoryRegion that routes addesses for the address space - * @name: an address space name. The name is only used for debugging - * output. + * @root: a #MemoryRegion that routes addresses for the address space */ -void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root, const char *name); - +void address_space_init(struct uc_struct *uc, + AddressSpace *as, + MemoryRegion *root); /** * address_space_destroy: destroy an address space @@ -751,58 +806,241 @@ void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *ro */ void address_space_destroy(AddressSpace *as); +/** + * address_space_remove_listeners: unregister all listeners of an address space + * + * Removes all callbacks previously registered with memory_listener_register() + * for @as. + * + * @as: an initialized #AddressSpace + */ +void address_space_remove_listeners(AddressSpace *as); + /** * address_space_rw: read from or write to an address space. * - * Return true if the operation hit any unassigned memory or encountered an - * IOMMU fault. + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space + * @attrs: memory transaction attributes * @buf: buffer with the data transferred + * @len: the number of bytes to read or write * @is_write: indicates the transfer direction */ -bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, - int len, bool is_write); +MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, void *buf, + hwaddr len, bool is_write); /** * address_space_write: write to address space. * - * Return true if the operation hit any unassigned memory or encountered an - * IOMMU fault. + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space + * @attrs: memory transaction attributes * @buf: buffer with the data transferred + * @len: the number of bytes to write */ -bool address_space_write(AddressSpace *as, hwaddr addr, - const uint8_t *buf, int len); +MemTxResult address_space_write(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + const void *buf, hwaddr len); /** - * address_space_read: read from an address space. + * address_space_write_rom: write to address space, including ROM. * - * Return true if the operation hit any unassigned memory or encountered an - * IOMMU fault. + * This function writes to the specified address space, but will + * write data to both ROM and RAM. This is used for non-guest + * writes like writes from the gdb debug stub or initial loading + * of ROM contents. + * + * Note that portions of the write which attempt to write data to + * a device will be silently ignored -- only real RAM and ROM will + * be written to. + * + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). * * @as: #AddressSpace to be accessed * @addr: address within that address space + * @attrs: memory transaction attributes * @buf: buffer with the data transferred + * @len: the number of bytes to write */ -bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); +MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + const void *buf, hwaddr len); + +/* address_space_ld*: load from an address space + * address_space_st*: store to an address space + * + * These functions perform a load or store of the byte, word, + * longword or quad to the specified address within the AddressSpace. + * The _le suffixed functions treat the data as little endian; + * _be indicates big endian; no suffix indicates "same endianness + * as guest CPU". + * + * The "guest CPU endianness" accessors are deprecated for use outside + * target-* code; devices should be CPU-agnostic and use either the LE + * or the BE accessors. + * + * @as #AddressSpace to be accessed + * @addr: address within that address space + * @val: data value, for stores + * @attrs: memory transaction attributes + * @result: location to write the success/failure of the transaction; + * if NULL, this information is discarded + */ + +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX UNICORN_ARCH_POSTFIX +#else +#define SUFFIX +#endif +#define ARG1 as +#define ARG1_DECL AddressSpace *as +#include "exec/memory_ldst.inc.h" + +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX UNICORN_ARCH_POSTFIX +#else +#define SUFFIX +#endif +#define ARG1 as +#define ARG1_DECL AddressSpace *as +#include "exec/memory_ldst_phys.inc.h" + +struct MemoryRegionCache { + void *ptr; + hwaddr xlat; + hwaddr len; + FlatView *fv; + MemoryRegionSection mrs; + bool is_write; +}; + +#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL }) + + +/* address_space_ld*_cached: load from a cached #MemoryRegion + * address_space_st*_cached: store into a cached #MemoryRegion + * + * These functions perform a load or store of the byte, word, + * longword or quad to the specified address. The address is + * a physical address in the AddressSpace, but it must lie within + * a #MemoryRegion that was mapped with address_space_cache_init. + * + * The _le suffixed functions treat the data as little endian; + * _be indicates big endian; no suffix indicates "same endianness + * as guest CPU". + * + * The "guest CPU endianness" accessors are deprecated for use outside + * target-* code; devices should be CPU-agnostic and use either the LE + * or the BE accessors. + * + * @cache: previously initialized #MemoryRegionCache to be accessed + * @addr: address within the address space + * @val: data value, for stores + * @attrs: memory transaction attributes + * @result: location to write the success/failure of the transaction; + * if NULL, this information is discarded + */ + +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX glue(_cached_slow, UNICORN_ARCH_POSTFIX) +#else +#define SUFFIX _cached_slow +#endif +#define ARG1 cache +#define ARG1_DECL MemoryRegionCache *cache +#include "exec/memory_ldst.inc.h" + +/* Inline fast path for direct RAM access. */ +#ifdef UNICORN_ARCH_POSTFIX +static inline uint8_t glue(address_space_ldub_cached, UNICORN_ARCH_POSTFIX)(struct uc_struct *uc, MemoryRegionCache *cache, +#else +static inline uint8_t address_space_ldub_cached(struct uc_struct *uc, MemoryRegionCache *cache, +#endif + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len); + if (likely(cache->ptr)) { + return ldub_p((char *)cache->ptr + addr); + } else { +#ifdef UNICORN_ARCH_POSTFIX + return glue(address_space_ldub_cached_slow, UNICORN_ARCH_POSTFIX)(uc, cache, addr, attrs, result); +#else + return address_space_ldub_cached_slow(uc, cache, addr, attrs, result); +#endif + } +} + +#ifdef UNICORN_ARCH_POSTFIX +static inline void glue(address_space_stb_cached, UNICORN_ARCH_POSTFIX)(struct uc_struct *uc, MemoryRegionCache *cache, +#else +static inline void address_space_stb_cached(struct uc_struct *uc, MemoryRegionCache *cache, +#endif + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len); + if (likely(cache->ptr)) { + stb_p((char *)cache->ptr + addr, val); + } else { +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stb_cached_slow, UNICORN_ARCH_POSTFIX)(uc, cache, addr, val, attrs, result); +#else + address_space_stb_cached_slow(uc, cache, addr, val, attrs, result); +#endif + } +} + +#define ENDIANNESS _le +#include "exec/memory_ldst_cached.inc.h" + +#define ENDIANNESS _be +#include "exec/memory_ldst_cached.inc.h" + +#ifdef UNICORN_ARCH_POSTFIX +#define SUFFIX glue(_cached, UNICORN_ARCH_POSTFIX) +#else +#define SUFFIX _cached +#endif +#define ARG1 cache +#define ARG1_DECL MemoryRegionCache *cache +#include "exec/memory_ldst_phys.inc.h" /* address_space_translate: translate an address range into an address space - * into a MemoryRegion and an address range into that section + * into a MemoryRegion and an address range into that section. Should be + * called from an RCU critical section, to avoid that the last reference + * to the returned region disappears after address_space_translate returns. * - * @as: #AddressSpace to be accessed + * @fv: #FlatView to be accessed * @addr: address within that address space * @xlat: pointer to address within the returned memory region section's * #MemoryRegion. * @len: pointer to length * @is_write: indicates the transfer direction + * @attrs: memory attributes */ -MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, - hwaddr *xlat, hwaddr *len, - bool is_write); +MemoryRegion *flatview_translate(struct uc_struct *uc, FlatView *fv, + hwaddr addr, hwaddr *xlat, + hwaddr *len, bool is_write, + MemTxAttrs attrs); + +static inline MemoryRegion *address_space_translate(AddressSpace *as, + hwaddr addr, hwaddr *xlat, + hwaddr *len, bool is_write, + MemTxAttrs attrs) +{ + return flatview_translate(as->uc, address_space_to_flatview(as), + addr, xlat, len, is_write, attrs); +} /* address_space_access_valid: check for validity of accessing an address * space range @@ -818,8 +1056,10 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, * @addr: address within that address space * @len: length of the area to be checked * @is_write: indicates the transfer direction + * @attrs: memory attributes */ -bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); +bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, + bool is_write, MemTxAttrs attrs); /* address_space_map: map a physical memory region into a host virtual address * @@ -833,9 +1073,10 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_ * @addr: address within that address space * @plen: pointer to length of buffer; updated on return * @is_write: indicates the transfer direction + * @attrs: memory attributes */ void *address_space_map(AddressSpace *as, hwaddr addr, - hwaddr *plen, bool is_write); + hwaddr *plen, bool is_write, MemTxAttrs attrs); /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() * @@ -843,22 +1084,109 @@ void *address_space_map(AddressSpace *as, hwaddr addr, * the amount of memory that was actually read or written by the caller. * * @as: #AddressSpace used - * @addr: address within that address space + * @buffer: host pointer as returned by address_space_map() * @len: buffer length as returned by address_space_map() * @access_len: amount of data actually transferred * @is_write: indicates the transfer direction */ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, - int is_write, hwaddr access_len); + bool is_write, hwaddr access_len); -void memory_register_types(struct uc_struct *uc); +/* Internal functions, part of the implementation of address_space_read. */ +MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, void *buf, hwaddr len); +MemTxResult flatview_read_continue(struct uc_struct *, FlatView *fv, hwaddr addr, + MemTxAttrs attrs, void *buf, + hwaddr len, hwaddr addr1, hwaddr l, + MemoryRegion *mr); +void *qemu_map_ram_ptr(struct uc_struct *uc, RAMBlock *ram_block, ram_addr_t addr); + +static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) +{ + if (is_write) { + return memory_region_is_ram(mr) && !mr->readonly; + } else { + return memory_region_is_ram(mr); + } +} + +/** + * address_space_read: read from an address space. + * + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). Called within RCU critical section. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @attrs: memory transaction attributes + * @buf: buffer with the data transferred + * @len: length of the data transferred + */ +#ifndef _MSC_VER +static inline __attribute__((__always_inline__)) +#else +static inline +#endif +MemTxResult address_space_read(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, void *buf, + hwaddr len) +{ + MemTxResult result = MEMTX_OK; +#ifndef _MSC_VER + hwaddr l, addr1; + void *ptr; + MemoryRegion *mr; + FlatView *fv; + + if (__builtin_constant_p(len)) { + if (len) { + fv = address_space_to_flatview(as); + l = len; + mr = flatview_translate(as->uc, fv, addr, &addr1, &l, false, attrs); + if (len == l && memory_access_is_direct(mr, false)) { + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + memcpy(buf, ptr, len); + } else { + result = flatview_read_continue(as->uc, fv, addr, attrs, buf, len, + addr1, l, mr); + } + } + } else { + result = address_space_read_full(as, addr, attrs, buf, len); + } +#else + result = address_space_read_full(as, addr, attrs, buf, len); +#endif + return result; +} + +#ifdef NEED_CPU_H +/* enum device_endian to MemOp. */ +static inline MemOp devend_memop(enum device_endian end) +{ + QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN && + DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN); + +#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) + /* Swap if non-host endianness or native (target) endianness */ + return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP; +#else + const int non_host_endianness = + DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN; + + /* In this case, native (target) endianness needs no swap. */ + return (end == non_host_endianness) ? MO_BSWAP : 0; +#endif +} +#endif MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms); MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr); + MemoryRegion *memory_map_io(struct uc_struct *uc, ram_addr_t begin, size_t size, uc_cb_mmio_read_t read_cb, + uc_cb_mmio_write_t write_cb, void *user_data_read, void *user_data_write); void memory_unmap(struct uc_struct *uc, MemoryRegion *mr); int memory_free(struct uc_struct *uc); #endif - -#endif diff --git a/qemu/include/exec/memory_ldst.inc.h b/qemu/include/exec/memory_ldst.inc.h new file mode 100644 index 00000000..cbd3b07c --- /dev/null +++ b/qemu/include/exec/memory_ldst.inc.h @@ -0,0 +1,71 @@ +/* + * Physical memory access templates + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2015 Linaro, Inc. + * Copyright (c) 2016 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifdef TARGET_ENDIANNESS +extern uint32_t glue(address_space_lduw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint32_t glue(address_space_ldl, SUFFIX)(struct uc_struct *, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint64_t glue(address_space_ldq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stl_notdirty, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stl, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); +#else +extern uint32_t glue(address_space_ldub, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint32_t glue(address_space_lduw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint32_t glue(address_space_lduw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint32_t glue(address_space_ldl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint32_t glue(address_space_ldl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint64_t glue(address_space_ldq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern uint64_t glue(address_space_ldq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stb, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); +extern void glue(address_space_stq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); +#endif + +#undef ARG1_DECL +#undef ARG1 +#undef SUFFIX +#undef TARGET_ENDIANNESS diff --git a/qemu/include/exec/memory_ldst_cached.inc.h b/qemu/include/exec/memory_ldst_cached.inc.h new file mode 100644 index 00000000..6e303e0f --- /dev/null +++ b/qemu/include/exec/memory_ldst_cached.inc.h @@ -0,0 +1,126 @@ +/* + * Memory access templates for MemoryRegionCache + * + * Copyright (c) 2018 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifdef UNICORN_ARCH_POSTFIX +#define ADDRESS_SPACE_LD_CACHED(size) \ + glue(glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached)), UNICORN_ARCH_POSTFIX) +#define ADDRESS_SPACE_LD_CACHED_SLOW(size) \ + glue(glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow)), UNICORN_ARCH_POSTFIX) +#define LD_P(size) \ + glue(glue(ld, size), glue(ENDIANNESS, _p)) +#else +#define ADDRESS_SPACE_LD_CACHED(size) \ + glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached)) +#define ADDRESS_SPACE_LD_CACHED_SLOW(size) \ + glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow)) +#define LD_P(size) \ + glue(glue(ld, size), glue(ENDIANNESS, _p)) +#endif + +static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(struct uc_struct *uc, MemoryRegionCache *cache, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 4 <= cache->len - addr); + if (likely(cache->ptr)) { + return LD_P(l)((char *)cache->ptr + addr); + } else { + return ADDRESS_SPACE_LD_CACHED_SLOW(l)(uc, cache, addr, attrs, result); + } +} + +static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(struct uc_struct *uc, MemoryRegionCache *cache, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 8 <= cache->len - addr); + if (likely(cache->ptr)) { + return LD_P(q)((char *)cache->ptr + addr); + } else { + return ADDRESS_SPACE_LD_CACHED_SLOW(q)(uc, cache, addr, attrs, result); + } +} + +static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(struct uc_struct *uc, MemoryRegionCache *cache, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 2 <= cache->len - addr); + if (likely(cache->ptr)) { + return LD_P(uw)((char *)cache->ptr + addr); + } else { + return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(uc, cache, addr, attrs, result); + } +} + +#undef ADDRESS_SPACE_LD_CACHED +#undef ADDRESS_SPACE_LD_CACHED_SLOW +#undef LD_P + +#ifdef UNICORN_ARCH_POSTFIX +#define ADDRESS_SPACE_ST_CACHED(size) \ + glue(glue(glue(address_space_st, size), glue(ENDIANNESS, _cached)), UNICORN_ARCH_POSTFIX) +#define ADDRESS_SPACE_ST_CACHED_SLOW(size) \ + glue(glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow)), UNICORN_ARCH_POSTFIX) +#define ST_P(size) \ + glue(glue(st, size), glue(ENDIANNESS, _p)) +#else +#define ADDRESS_SPACE_ST_CACHED(size) \ + glue(glue(address_space_st, size), glue(ENDIANNESS, _cached)) +#define ADDRESS_SPACE_ST_CACHED_SLOW(size) \ + glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow)) +#define ST_P(size) \ + glue(glue(st, size), glue(ENDIANNESS, _p)) +#endif + +static inline void ADDRESS_SPACE_ST_CACHED(l)(struct uc_struct *uc, MemoryRegionCache *cache, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 4 <= cache->len - addr); + if (likely(cache->ptr)) { + ST_P(l)((char *)cache->ptr + addr, val); + } else { + ADDRESS_SPACE_ST_CACHED_SLOW(l)(uc, cache, addr, val, attrs, result); + } +} + +static inline void ADDRESS_SPACE_ST_CACHED(w)(struct uc_struct *uc, MemoryRegionCache *cache, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 2 <= cache->len - addr); + if (likely(cache->ptr)) { + ST_P(w)((char *)cache->ptr + addr, val); + } else { + ADDRESS_SPACE_ST_CACHED_SLOW(w)(uc, cache, addr, val, attrs, result); + } +} + +static inline void ADDRESS_SPACE_ST_CACHED(q)(struct uc_struct *uc, MemoryRegionCache *cache, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) +{ + assert(addr < cache->len && 8 <= cache->len - addr); + if (likely(cache->ptr)) { + ST_P(q)((char *)cache->ptr + addr, val); + } else { + ADDRESS_SPACE_ST_CACHED_SLOW(q)(uc, cache, addr, val, attrs, result); + } +} + +#undef ADDRESS_SPACE_ST_CACHED +#undef ADDRESS_SPACE_ST_CACHED_SLOW +#undef ST_P + +#undef ENDIANNESS diff --git a/qemu/include/exec/memory_ldst_phys.inc.h b/qemu/include/exec/memory_ldst_phys.inc.h new file mode 100644 index 00000000..98a46d4a --- /dev/null +++ b/qemu/include/exec/memory_ldst_phys.inc.h @@ -0,0 +1,147 @@ +/* + * Physical memory access templates + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2015 Linaro, Inc. + * Copyright (c) 2016 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifdef TARGET_ENDIANNESS +static inline uint32_t glue(ldl_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_ldl, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint64_t glue(ldq_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_ldq, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint32_t glue(lduw_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_lduw, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stl_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) +{ + glue(address_space_stl, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stw_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) +{ + glue(address_space_stw, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stq_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val) +{ + glue(address_space_stq, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} +#else +static inline uint32_t glue(ldl_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_ldl_le, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint32_t glue(ldl_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_ldl_be, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint64_t glue(ldq_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_ldq_le, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint64_t glue(ldq_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_ldq_be, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint32_t glue(ldub_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_ldub, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint32_t glue(lduw_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_lduw_le, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline uint32_t glue(lduw_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr) +{ + return glue(address_space_lduw_be, SUFFIX)(uc, ARG1, addr, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stl_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) +{ + glue(address_space_stl_le, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stl_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) +{ + glue(address_space_stl_be, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stb_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) +{ + glue(address_space_stb, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stw_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) +{ + glue(address_space_stw_le, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stw_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val) +{ + glue(address_space_stw_be, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stq_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val) +{ + glue(address_space_stq_le, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + +static inline void glue(stq_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val) +{ + glue(address_space_stq_be, SUFFIX)(uc, ARG1, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} +#endif + +#undef ARG1_DECL +#undef ARG1 +#undef SUFFIX +#undef TARGET_ENDIANNESS diff --git a/qemu/include/exec/poison.h b/qemu/include/exec/poison.h new file mode 100644 index 00000000..7b9ac361 --- /dev/null +++ b/qemu/include/exec/poison.h @@ -0,0 +1,96 @@ +/* Poison identifiers that should not be used when building + target independent device code. */ + +#ifndef HW_POISON_H +#define HW_POISON_H +#ifdef __GNUC__ + +#pragma GCC poison TARGET_I386 +#pragma GCC poison TARGET_X86_64 +#pragma GCC poison TARGET_AARCH64 +#pragma GCC poison TARGET_ALPHA +#pragma GCC poison TARGET_ARM +#pragma GCC poison TARGET_CRIS +#pragma GCC poison TARGET_HPPA +#pragma GCC poison TARGET_LM32 +#pragma GCC poison TARGET_M68K +#pragma GCC poison TARGET_MICROBLAZE +#pragma GCC poison TARGET_MIPS +#pragma GCC poison TARGET_ABI_MIPSN32 +#pragma GCC poison TARGET_ABI_MIPSO32 +#pragma GCC poison TARGET_MIPS64 +#pragma GCC poison TARGET_ABI_MIPSN64 +#pragma GCC poison TARGET_MOXIE +#pragma GCC poison TARGET_NIOS2 +#pragma GCC poison TARGET_OPENRISC +#pragma GCC poison TARGET_PPC +#pragma GCC poison TARGET_PPC64 +#pragma GCC poison TARGET_ABI32 +#pragma GCC poison TARGET_RX +#pragma GCC poison TARGET_S390X +#pragma GCC poison TARGET_SH4 +#pragma GCC poison TARGET_SPARC +#pragma GCC poison TARGET_SPARC64 +#pragma GCC poison TARGET_TILEGX +#pragma GCC poison TARGET_TRICORE +#pragma GCC poison TARGET_UNICORE32 +#pragma GCC poison TARGET_XTENSA + +#pragma GCC poison TARGET_ALIGNED_ONLY +#pragma GCC poison TARGET_HAS_BFLT +#pragma GCC poison TARGET_NAME +#pragma GCC poison TARGET_SUPPORTS_MTTCG +#pragma GCC poison TARGET_WORDS_BIGENDIAN +#pragma GCC poison BSWAP_NEEDED + +#pragma GCC poison TARGET_LONG_BITS +#pragma GCC poison TARGET_FMT_lx +#pragma GCC poison TARGET_FMT_ld +#pragma GCC poison TARGET_FMT_lu + +#pragma GCC poison TARGET_PAGE_SIZE +#pragma GCC poison TARGET_PAGE_MASK +#pragma GCC poison TARGET_PAGE_BITS +#pragma GCC poison TARGET_PAGE_ALIGN + +#pragma GCC poison CPUArchState + +#pragma GCC poison CPU_INTERRUPT_HARD +#pragma GCC poison CPU_INTERRUPT_EXITTB +#pragma GCC poison CPU_INTERRUPT_HALT +#pragma GCC poison CPU_INTERRUPT_DEBUG +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_2 + +#pragma GCC poison CONFIG_ALPHA_DIS +#pragma GCC poison CONFIG_ARM_A64_DIS +#pragma GCC poison CONFIG_ARM_DIS +#pragma GCC poison CONFIG_CRIS_DIS +#pragma GCC poison CONFIG_HPPA_DIS +#pragma GCC poison CONFIG_I386_DIS +#pragma GCC poison CONFIG_LM32_DIS +#pragma GCC poison CONFIG_M68K_DIS +#pragma GCC poison CONFIG_MICROBLAZE_DIS +#pragma GCC poison CONFIG_MIPS_DIS +#pragma GCC poison CONFIG_NANOMIPS_DIS +#pragma GCC poison CONFIG_MOXIE_DIS +#pragma GCC poison CONFIG_NIOS2_DIS +#pragma GCC poison CONFIG_PPC_DIS +#pragma GCC poison CONFIG_RISCV_DIS +#pragma GCC poison CONFIG_S390_DIS +#pragma GCC poison CONFIG_SH4_DIS +#pragma GCC poison CONFIG_SPARC_DIS +#pragma GCC poison CONFIG_XTENSA_DIS + +#pragma GCC poison CONFIG_LINUX_USER +#pragma GCC poison CONFIG_KVM +#pragma GCC poison CONFIG_SOFTMMU + +#endif +#endif diff --git a/qemu/include/exec/ram_addr.h b/qemu/include/exec/ram_addr.h index 5e614e59..a56832e0 100644 --- a/qemu/include/exec/ram_addr.h +++ b/qemu/include/exec/ram_addr.h @@ -19,145 +19,101 @@ #ifndef RAM_ADDR_H #define RAM_ADDR_H -#include "uc_priv.h" +#include "cpu.h" +#include "sysemu/tcg.h" +#include "exec/ramlist.h" +#include "exec/ramblock.h" -#ifndef CONFIG_USER_ONLY +static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) +{ + return (b && b->host && offset < b->used_length) ? true : false; +} -ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, - MemoryRegion *mr, Error **errp); -ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp); -int qemu_get_ram_fd(struct uc_struct *uc, ram_addr_t addr); -void *qemu_get_ram_block_host_ptr(struct uc_struct *uc, ram_addr_t addr); -void *qemu_get_ram_ptr(struct uc_struct *uc, ram_addr_t addr); -void qemu_ram_free(struct uc_struct *c, ram_addr_t addr); -void qemu_ram_free_from_ptr(struct uc_struct *uc, ram_addr_t addr); +static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) +{ + assert(offset_in_ramblock(block, offset)); + return (char *)block->host + offset; +} -static inline bool cpu_physical_memory_get_dirty(struct uc_struct *uc, ram_addr_t start, +static inline unsigned long int ramblock_recv_bitmap_offset(struct uc_struct *uc, void *host_addr, + RAMBlock *rb) +{ + uint64_t host_addr_offset = + (uint64_t)(uintptr_t)((char *)host_addr - (char *)rb->host); + return host_addr_offset >> TARGET_PAGE_BITS; +} + +RAMBlock *qemu_ram_alloc_from_ptr(struct uc_struct *uc, ram_addr_t size, void *host, + MemoryRegion *mr); +RAMBlock *qemu_ram_alloc(struct uc_struct *uc, ram_addr_t size, MemoryRegion *mr); +void qemu_ram_free(struct uc_struct *uc, RAMBlock *block); + +#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) +#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) + +void tb_invalidate_phys_range(struct uc_struct *uc, ram_addr_t start, ram_addr_t end); + +static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, ram_addr_t length, unsigned client) { - unsigned long end, page, next; - - assert(client < DIRTY_MEMORY_NUM); - - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; - next = find_next_bit(uc->ram_list.dirty_memory[client], end, page); - - return next < end; + return false; } -static inline bool cpu_physical_memory_get_clean(struct uc_struct *uc, ram_addr_t start, +static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, ram_addr_t length, unsigned client) { - unsigned long end, page, next; - - assert(client < DIRTY_MEMORY_NUM); - - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; - next = find_next_zero_bit(uc->ram_list.dirty_memory[client], end, page); - - return next < end; + return false; } -static inline bool cpu_physical_memory_get_dirty_flag(struct uc_struct *uc, ram_addr_t addr, +static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client) { - return cpu_physical_memory_get_dirty(uc, addr, 1, client); + return cpu_physical_memory_get_dirty(addr, 1, client); } -static inline bool cpu_physical_memory_is_clean(struct uc_struct *uc, ram_addr_t addr) +static inline bool cpu_physical_memory_is_clean(ram_addr_t addr) { - return !cpu_physical_memory_get_dirty_flag(uc, addr, DIRTY_MEMORY_CODE); + return true; } -static inline bool cpu_physical_memory_range_includes_clean(struct uc_struct *uc, ram_addr_t start, - ram_addr_t length) -{ - return cpu_physical_memory_get_clean(uc, start, length, DIRTY_MEMORY_CODE); -} - -static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_addr_t addr, +static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client) { - assert(client < DIRTY_MEMORY_NUM); - set_bit(addr >> TARGET_PAGE_BITS, uc->ram_list.dirty_memory[client]); } -static inline void cpu_physical_memory_set_dirty_range(struct uc_struct *uc, ram_addr_t start, - ram_addr_t length) +static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, + ram_addr_t length, + uint8_t mask) { - unsigned long end, page; - - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; - qemu_bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page); } #if !defined(_WIN32) -static inline void cpu_physical_memory_set_dirty_lebitmap(struct uc_struct *uc, unsigned long *bitmap, +static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, ram_addr_t start, ram_addr_t pages) { - unsigned long i, j; - unsigned long page_number, c; - hwaddr addr; - ram_addr_t ram_addr; - unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; - unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE; - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); - - /* start address is aligned at the start of a word? */ - if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) && - (hpratio == 1)) { - long k; - long nr = BITS_TO_LONGS(pages); - - for (k = 0; k < nr; k++) { - if (bitmap[k]) { - unsigned long temp = leul_to_cpu(bitmap[k]); - uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp; - } - } - } else { - /* - * bitmap-traveling is faster than memory-traveling (for addr...) - * especially when most of the memory is not dirty. - */ - for (i = 0; i < len; i++) { - if (bitmap[i] != 0) { - c = leul_to_cpu(bitmap[i]); - do { - j = ctzl(c); - c &= ~(1ul << j); - page_number = (i * HOST_LONG_BITS + j) * hpratio; - addr = page_number * TARGET_PAGE_SIZE; - ram_addr = start + addr; - cpu_physical_memory_set_dirty_range(uc, ram_addr, - TARGET_PAGE_SIZE * hpratio); - } while (c != 0); - } - } - } } #endif /* not _WIN32 */ -static inline void cpu_physical_memory_clear_dirty_range(struct uc_struct *uc, ram_addr_t start, - ram_addr_t length, - unsigned client) -{ - unsigned long end, page; +bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, + ram_addr_t length, + unsigned client); - assert(client < DIRTY_MEMORY_NUM); - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; - qemu_bitmap_clear(uc->ram_list.dirty_memory[client], page, end - page); +static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, + ram_addr_t length) +{ } -void cpu_physical_memory_reset_dirty(struct uc_struct *uc, - ram_addr_t start, ram_addr_t length, unsigned client); -#endif +/* Called with RCU critical section */ +static inline +uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, + ram_addr_t start, + ram_addr_t length, + uint64_t *real_dirty_pages) +{ + return 0; +} #endif diff --git a/qemu/include/exec/ramblock.h b/qemu/include/exec/ramblock.h new file mode 100644 index 00000000..1418a05d --- /dev/null +++ b/qemu/include/exec/ramblock.h @@ -0,0 +1,25 @@ +/* + * Declarations for cpu physical memory functions + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + * + */ + +/* + * This header is for use by exec.c and memory.c ONLY. Do not include it. + * The functions declared here will be removed soon. + */ + +#ifndef QEMU_EXEC_RAMBLOCK_H +#define QEMU_EXEC_RAMBLOCK_H + +#include "cpu-common.h" +#include "qemu.h" + +#endif diff --git a/qemu/include/exec/ramlist.h b/qemu/include/exec/ramlist.h new file mode 100644 index 00000000..0f024318 --- /dev/null +++ b/qemu/include/exec/ramlist.h @@ -0,0 +1,19 @@ +#ifndef RAMLIST_H +#define RAMLIST_H + +#include "qemu/queue.h" +#include "qemu/thread.h" +//#include "qemu/rcu.h" +//#include "qemu/rcu_queue.h" + +#define DIRTY_MEMORY_VGA 0 +#define DIRTY_MEMORY_CODE 1 +#define DIRTY_MEMORY_MIGRATION 2 +#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ + +#define INTERNAL_RAMBLOCK_FOREACH(block) \ + QLIST_FOREACH(block, &uc->ram_list.blocks, next) +/* Never use the INTERNAL_ version except for defining other macros */ +#define RAMBLOCK_FOREACH(block) INTERNAL_RAMBLOCK_FOREACH(block) + +#endif /* RAMLIST_H */ diff --git a/qemu/include/exec/softmmu-semi.h b/qemu/include/exec/softmmu-semi.h new file mode 100644 index 00000000..fbcae88f --- /dev/null +++ b/qemu/include/exec/softmmu-semi.h @@ -0,0 +1,101 @@ +/* + * Helper routines to provide target memory access for semihosting + * syscalls in system emulation mode. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#ifndef SOFTMMU_SEMI_H +#define SOFTMMU_SEMI_H + +#include "cpu.h" + +static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr) +{ + uint64_t val; + + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0); + return tswap64(val); +} + +static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr) +{ + uint32_t val; + + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0); + return tswap32(val); +} + +static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr) +{ + uint8_t val; + + cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0); + return val; +} + +#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; }) +#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; }) +#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; }) +#define get_user_ual(arg, p) get_user_u32(arg, p) + +static inline void softmmu_tput64(CPUArchState *env, + target_ulong addr, uint64_t val) +{ + val = tswap64(val); + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1); +} + +static inline void softmmu_tput32(CPUArchState *env, + target_ulong addr, uint32_t val) +{ + val = tswap32(val); + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1); +} +#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; }) +#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; }) +#define put_user_ual(arg, p) put_user_u32(arg, p) + +static void *softmmu_lock_user(CPUArchState *env, + target_ulong addr, target_ulong len, int copy) +{ + uint8_t *p; + /* TODO: Make this something that isn't fixed size. */ + p = malloc(len); + if (p && copy) { + cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0); + } + return p; +} +#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy) +static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr) +{ + char *p; + char *s; + uint8_t c; + /* TODO: Make this something that isn't fixed size. */ + s = p = malloc(1024); + if (!s) { + return NULL; + } + do { + cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0); + addr++; + *(p++) = c; + } while (c); + return s; +} +#define lock_user_string(p) softmmu_lock_user_string(env, p) +static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr, + target_ulong len) +{ + if (len) { + cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1); + } + free(p); +} +#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len) + +#endif diff --git a/qemu/include/exec/target_page.h b/qemu/include/exec/target_page.h new file mode 100644 index 00000000..77ca78a4 --- /dev/null +++ b/qemu/include/exec/target_page.h @@ -0,0 +1,23 @@ +/* + * Target page sizes and friends for non target files + * + * Copyright (c) 2017 Red Hat Inc + * + * Authors: + * David Alan Gilbert + * Juan Quintela + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef EXEC_TARGET_PAGE_H +#define EXEC_TARGET_PAGE_H + +struct uc_struct; + +size_t qemu_target_page_size(struct uc_struct *uc); +int qemu_target_page_bits(struct uc_struct *uc); +int qemu_target_page_bits_min(void); + +#endif diff --git a/qemu/translate-all.h b/qemu/include/exec/tb-context.h similarity index 62% rename from qemu/translate-all.h rename to qemu/include/exec/tb-context.h index 8216ad8d..834747d1 100644 --- a/qemu/translate-all.h +++ b/qemu/include/exec/tb-context.h @@ -1,5 +1,5 @@ /* - * Translated block handling + * Internal structs that QEMU exports to TCG * * Copyright (c) 2003 Fabrice Bellard * @@ -16,13 +16,24 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ -#ifndef TRANSLATE_ALL_H -#define TRANSLATE_ALL_H -/* translate-all.c */ -void cpu_unlink_tb(CPUState *cpu); -void tb_check_watchpoint(CPUState *cpu); -void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, int len); -void tb_cleanup(struct uc_struct *uc); +#ifndef QEMU_TB_CONTEXT_H +#define QEMU_TB_CONTEXT_H -#endif /* TRANSLATE_ALL_H */ +#include "qemu/thread.h" +#include "qemu/qht.h" + +#define CODE_GEN_HTABLE_BITS 15 +#define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS) + +typedef struct TranslationBlock TranslationBlock; +typedef struct TBContext TBContext; + +struct TBContext { + struct qht htable; + + /* statistics */ + unsigned tb_flush_count; +}; + +#endif diff --git a/qemu/include/exec/tb-hash.h b/qemu/include/exec/tb-hash.h new file mode 100644 index 00000000..27dd0418 --- /dev/null +++ b/qemu/include/exec/tb-hash.h @@ -0,0 +1,57 @@ +/* + * internal execution defines for qemu + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef EXEC_TB_HASH_H +#define EXEC_TB_HASH_H + +#include "exec/cpu-defs.h" +#include "exec/exec-all.h" +#include "qemu/xxhash.h" + +/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for + addresses on the same page. The top bits are the same. This allows + TLB invalidation to quickly clear a subset of the hash table. */ +#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) +#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) +#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) +#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) + +static inline unsigned int tb_jmp_cache_hash_page(struct uc_struct *uc, target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; +} + +static inline unsigned int tb_jmp_cache_hash_func(struct uc_struct *uc, target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) + | (tmp & TB_JMP_ADDR_MASK)); +} + +static inline +uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags, + uint32_t cf_mask, uint32_t trace_vcpu_dstate) +{ + return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate); +} + +#endif diff --git a/qemu/include/exec/tb-lookup.h b/qemu/include/exec/tb-lookup.h new file mode 100644 index 00000000..042423c2 --- /dev/null +++ b/qemu/include/exec/tb-lookup.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2017, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef EXEC_TB_LOOKUP_H +#define EXEC_TB_LOOKUP_H + +#ifdef NEED_CPU_H +#include "cpu.h" +#else +#include "exec/poison.h" +#endif + +#include "exec/exec-all.h" +#include "exec/tb-hash.h" + +/* Might cause an exception, so have a longjmp destination ready */ +static inline TranslationBlock * +tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, + uint32_t *flags, uint32_t cf_mask) +{ + CPUArchState *env = (CPUArchState *)cpu->env_ptr; + TranslationBlock *tb; + uint32_t hash; + + cpu_get_tb_cpu_state(env, pc, cs_base, flags); + hash = tb_jmp_cache_hash_func(env->uc, *pc); + tb = cpu->tb_jmp_cache[hash]; + + cf_mask &= ~CF_CLUSTER_MASK; + cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; + + if (likely(tb && + tb->pc == *pc && + tb->cs_base == *cs_base && + tb->flags == *flags && + tb->trace_vcpu_dstate == *cpu->trace_dstate && + (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { + return tb; + } + tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask); + if (tb == NULL) { + return NULL; + } + cpu->tb_jmp_cache[hash] = tb; + return tb; +} + +#endif /* EXEC_TB_LOOKUP_H */ diff --git a/qemu/include/exec/translator.h b/qemu/include/exec/translator.h new file mode 100644 index 00000000..1b0be080 --- /dev/null +++ b/qemu/include/exec/translator.h @@ -0,0 +1,177 @@ +/* + * Generic intermediate code generation. + * + * Copyright (C) 2016-2017 Lluís Vilanova + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef EXEC__TRANSLATOR_H +#define EXEC__TRANSLATOR_H + +/* + * Include this header from a target-specific file, and add a + * + * DisasContextBase base; + * + * member in your target-specific DisasContext. + */ + + +#include "qemu/bswap.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "tcg/tcg.h" + + +/** + * DisasJumpType: + * @DISAS_NEXT: Next instruction in program order. + * @DISAS_TOO_MANY: Too many instructions translated. + * @DISAS_NORETURN: Following code is dead. + * @DISAS_TARGET_*: Start of target-specific conditions. + * + * What instruction to disassemble next. + */ +typedef enum DisasJumpType { + DISAS_NEXT, + DISAS_TOO_MANY, + DISAS_NORETURN, + DISAS_TARGET_0, + DISAS_TARGET_1, + DISAS_TARGET_2, + DISAS_TARGET_3, + DISAS_TARGET_4, + DISAS_TARGET_5, + DISAS_TARGET_6, + DISAS_TARGET_7, + DISAS_TARGET_8, + DISAS_TARGET_9, + DISAS_TARGET_10, + DISAS_TARGET_11, +} DisasJumpType; + +/** + * DisasContextBase: + * @tb: Translation block for this disassembly. + * @pc_first: Address of first guest instruction in this TB. + * @pc_next: Address of next guest instruction in this TB (current during + * disassembly). + * @is_jmp: What instruction to disassemble next. + * @num_insns: Number of translated instructions (including current). + * @max_insns: Maximum number of instructions to be translated in this TB. + * @singlestep_enabled: "Hardware" single stepping enabled. + * + * Architecture-agnostic disassembly context. + */ +typedef struct DisasContextBase { + TranslationBlock *tb; + target_ulong pc_first; + target_ulong pc_next; + DisasJumpType is_jmp; + int num_insns; + int max_insns; + bool singlestep_enabled; +} DisasContextBase; + +/** + * TranslatorOps: + * @init_disas_context: + * Initialize the target-specific portions of DisasContext struct. + * The generic DisasContextBase has already been initialized. + * + * @tb_start: + * Emit any code required before the start of the main loop, + * after the generic gen_tb_start(). + * + * @insn_start: + * Emit the tcg_gen_insn_start opcode. + * + * @breakpoint_check: + * When called, the breakpoint has already been checked to match the PC, + * but the target may decide the breakpoint missed the address + * (e.g., due to conditions encoded in their flags). Return true to + * indicate that the breakpoint did hit, in which case no more breakpoints + * are checked. If the breakpoint did hit, emit any code required to + * signal the exception, and set db->is_jmp as necessary to terminate + * the main loop. + * + * @translate_insn: + * Disassemble one instruction and set db->pc_next for the start + * of the following instruction. Set db->is_jmp as necessary to + * terminate the main loop. + * + * @tb_stop: + * Emit any opcodes required to exit the TB, based on db->is_jmp. + */ +typedef struct TranslatorOps { + void (*init_disas_context)(DisasContextBase *db, CPUState *cpu); + void (*tb_start)(DisasContextBase *db, CPUState *cpu); + void (*insn_start)(DisasContextBase *db, CPUState *cpu); + bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu, + const CPUBreakpoint *bp); + void (*translate_insn)(DisasContextBase *db, CPUState *cpu); + void (*tb_stop)(DisasContextBase *db, CPUState *cpu); +} TranslatorOps; + +/** + * translator_loop: + * @ops: Target-specific operations. + * @db: Disassembly context. + * @cpu: Target vCPU. + * @tb: Translation block. + * @max_insns: Maximum number of insns to translate. + * + * Generic translator loop. + * + * Translation will stop in the following cases (in order): + * - When is_jmp set by #TranslatorOps::breakpoint_check. + * - set to DISAS_TOO_MANY exits after translating one more insn + * - set to any other value than DISAS_NEXT exits immediately. + * - When is_jmp set by #TranslatorOps::translate_insn. + * - set to any value other than DISAS_NEXT exits immediately. + * - When the TCG operation buffer is full. + * - When single-stepping is enabled (system-wide or on the current vCPU). + * - When too many instructions have been translated. + */ +void translator_loop(const TranslatorOps *ops, DisasContextBase *db, + CPUState *cpu, TranslationBlock *tb, int max_insns); + +void translator_loop_temp_check(DisasContextBase *db); + +/* + * Translator Load Functions + * + * These are intended to replace the direct usage of the cpu_ld*_code + * functions and are mandatory for front-ends that have been migrated + * to the common translator_loop. These functions are only intended + * to be called from the translation stage and should not be called + * from helper functions. Those functions should be converted to encode + * the relevant information at translation time. + */ + +#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \ + static inline type \ + fullname ## _swap(TCGContext *tcg_ctx, CPUArchState *env, abi_ptr pc, bool do_swap) \ + { \ + type ret = load_fn(env, pc); \ + if (do_swap) { \ + ret = swap_fn(ret); \ + } \ + return ret; \ + } \ + \ + static inline type fullname(TCGContext *tcg_ctx, CPUArchState *env, abi_ptr pc) \ + { \ + return fullname ## _swap(tcg_ctx, env, pc, false); \ + } + +GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) +GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) +GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16) +GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32) +GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64) +#undef GEN_TRANSLATOR_LD + +#endif /* EXEC__TRANSLATOR_H */ diff --git a/qemu/include/fpu/softfloat-helpers.h b/qemu/include/fpu/softfloat-helpers.h new file mode 100644 index 00000000..e0baf24c --- /dev/null +++ b/qemu/include/fpu/softfloat-helpers.h @@ -0,0 +1,132 @@ +/* + * QEMU float support - standalone helpers + * + * This is provided for files that don't need the access to the full + * set of softfloat functions. Typically this is cpu initialisation + * code which wants to set default rounding and exceptions modes. + * + * The code in this source file is derived from release 2a of the SoftFloat + * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and + * some later contributions) are provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file after December 1st 2014 will be + * taken to be licensed under the Softfloat-2a license unless specifically + * indicated otherwise. + */ + +/* +=============================================================================== +This C header file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2a. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +#ifndef _SOFTFLOAT_HELPERS_H_ +#define _SOFTFLOAT_HELPERS_H_ + +#include "fpu/softfloat-types.h" + +static inline void set_float_detect_tininess(int val, float_status *status) +{ + status->float_detect_tininess = val; +} + +static inline void set_float_rounding_mode(int val, float_status *status) +{ + status->float_rounding_mode = val; +} + +static inline void set_float_exception_flags(int val, float_status *status) +{ + status->float_exception_flags = val; +} + +static inline void set_floatx80_rounding_precision(int val, + float_status *status) +{ + status->floatx80_rounding_precision = val; +} + +static inline void set_flush_to_zero(flag val, float_status *status) +{ + status->flush_to_zero = val; +} + +static inline void set_flush_inputs_to_zero(flag val, float_status *status) +{ + status->flush_inputs_to_zero = val; +} + +static inline void set_default_nan_mode(flag val, float_status *status) +{ + status->default_nan_mode = val; +} + +static inline void set_snan_bit_is_one(flag val, float_status *status) +{ + status->snan_bit_is_one = val; +} + +static inline int get_float_detect_tininess(float_status *status) +{ + return status->float_detect_tininess; +} + +static inline int get_float_rounding_mode(float_status *status) +{ + return status->float_rounding_mode; +} + +static inline int get_float_exception_flags(float_status *status) +{ + return status->float_exception_flags; +} + +static inline int get_floatx80_rounding_precision(float_status *status) +{ + return status->floatx80_rounding_precision; +} + +static inline flag get_flush_to_zero(float_status *status) +{ + return status->flush_to_zero; +} + +static inline flag get_flush_inputs_to_zero(float_status *status) +{ + return status->flush_inputs_to_zero; +} + +static inline flag get_default_nan_mode(float_status *status) +{ + return status->default_nan_mode; +} + +#endif /* _SOFTFLOAT_HELPERS_H_ */ diff --git a/qemu/fpu/softfloat-macros.h b/qemu/include/fpu/softfloat-macros.h similarity index 74% rename from qemu/fpu/softfloat-macros.h rename to qemu/include/fpu/softfloat-macros.h index 2892b4fe..afae4f74 100644 --- a/qemu/fpu/softfloat-macros.h +++ b/qemu/include/fpu/softfloat-macros.h @@ -1,13 +1,24 @@ /* * QEMU float support macros * - * Derived from SoftFloat. + * The code in this source file is derived from release 2a of the SoftFloat + * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and + * some later contributions) are provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file after December 1st 2014 will be + * taken to be licensed under the Softfloat-2a license unless specifically + * indicated otherwise. */ -/*============================================================================ - +/* +=============================================================================== This C source fragment is part of the SoftFloat IEC/IEEE Floating-point -Arithmetic Package, Release 2b. +Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center @@ -16,35 +27,62 @@ National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information -is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. -THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has -been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES -RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS -AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, -COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE -EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE -INSTITUTE (possibly via similar legal notice) AGAINST ALL LOSSES, COSTS, OR -OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as -(1) the source code for the derivative work includes prominent notice that -the work is derivative, and (2) the source code includes prominent notice with -these four paragraphs for those parts of this code that are retained. +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. -=============================================================================*/ +=============================================================================== +*/ -/*---------------------------------------------------------------------------- -| This macro tests for minimum version of the GNU C compiler. -*----------------------------------------------------------------------------*/ -#if defined(__GNUC__) && defined(__GNUC_MINOR__) -# define SOFTFLOAT_GNUC_PREREQ(maj, min) \ - ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) -#else -# define SOFTFLOAT_GNUC_PREREQ(maj, min) 0 -#endif +/* BSD licensing: + * Copyright (c) 2006, Fabrice Bellard + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + +#ifndef FPU_SOFTFLOAT_MACROS_H +#define FPU_SOFTFLOAT_MACROS_H + +#include "fpu/softfloat-types.h" /*---------------------------------------------------------------------------- | Shifts `a' right by the number of bits given in `count'. If any nonzero @@ -55,7 +93,7 @@ these four paragraphs for those parts of this code that are retained. | The result is stored in the location pointed to by `zPtr'. *----------------------------------------------------------------------------*/ -static inline void shift32RightJamming(uint32_t a, int_fast16_t count, uint32_t *zPtr) +static inline void shift32RightJamming(uint32_t a, int count, uint32_t *zPtr) { uint32_t z; @@ -81,7 +119,7 @@ static inline void shift32RightJamming(uint32_t a, int_fast16_t count, uint32_t | The result is stored in the location pointed to by `zPtr'. *----------------------------------------------------------------------------*/ -static inline void shift64RightJamming(uint64_t a, int_fast16_t count, uint64_t *zPtr) +static inline void shift64RightJamming(uint64_t a, int count, uint64_t *zPtr) { uint64_t z; @@ -107,20 +145,20 @@ static inline void shift64RightJamming(uint64_t a, int_fast16_t count, uint64_t | 63 bits of the extra result are all zero if and only if _all_but_the_last_ | bits shifted off were all zero. This extra result is stored in the location | pointed to by `z1Ptr'. The value of `count' can be arbitrarily large. -| (This routine makes more sense if `a0' and `a1' are considered to form -| a fixed-point value with binary point between `a0' and `a1'. This fixed- -| point value is shifted right by the number of bits given in `count', and -| the integer part of the result is returned at the location pointed to by +| (This routine makes more sense if `a0' and `a1' are considered to form a +| fixed-point value with binary point between `a0' and `a1'. This fixed-point +| value is shifted right by the number of bits given in `count', and the +| integer part of the result is returned at the location pointed to by | `z0Ptr'. The fractional part of the result may be slightly corrupted as | described above, and is returned at the location pointed to by `z1Ptr'.) *----------------------------------------------------------------------------*/ static inline void shift64ExtraRightJamming( - uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) + uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; @@ -154,10 +192,10 @@ static inline void static inline void shift128Right( - uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) + uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; @@ -189,10 +227,10 @@ static inline void static inline void shift128RightJamming( - uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) + uint64_t a0, uint64_t a1, int count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; @@ -243,14 +281,14 @@ static inline void uint64_t a0, uint64_t a1, uint64_t a2, - int_fast16_t count, + int count, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr ) { uint64_t z0, z1, z2; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z2 = a2; @@ -296,15 +334,30 @@ static inline void | pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. *----------------------------------------------------------------------------*/ -static inline void - shortShift128Left( - uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) +static inline void shortShift128Left(uint64_t a0, uint64_t a1, int count, + uint64_t *z0Ptr, uint64_t *z1Ptr) { + *z1Ptr = a1 << count; + *z0Ptr = count == 0 ? a0 : (a0 << count) | (a1 >> (-count & 63)); +} - *z1Ptr = a1<<(count & 0x3f); - *z0Ptr = - ( count == 0 ) ? a0 : ( a0<<(count & 0x3f) ) | ( a1>>( ( - count ) & 63 ) ); +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the +| number of bits given in `count'. Any bits shifted off are lost. The value +| of `count' may be greater than 64. The result is broken into two 64-bit +| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ +static inline void shift128Left(uint64_t a0, uint64_t a1, int count, + uint64_t *z0Ptr, uint64_t *z1Ptr) +{ + if (count < 64) { + *z1Ptr = a1 << count; + *z0Ptr = count == 0 ? a0 : (a0 << count) | (a1 >> (-count & 63)); + } else { + *z1Ptr = 0; + *z0Ptr = a1 << (count - 64); + } } /*---------------------------------------------------------------------------- @@ -320,14 +373,14 @@ static inline void uint64_t a0, uint64_t a1, uint64_t a2, - int_fast16_t count, + int count, uint64_t *z0Ptr, uint64_t *z1Ptr, uint64_t *z2Ptr ) { uint64_t z0, z1, z2; - int8 negCount; + int8_t negCount; z2 = a2<>32; - bLow = (uint32_t)b; + bLow = b; bHigh = b>>32; z1 = ( (uint64_t) aLow ) * bLow; zMiddleA = ( (uint64_t) aLow ) * bHigh; @@ -559,19 +612,19 @@ static inline void | unsigned integer is returned. *----------------------------------------------------------------------------*/ -static uint64_t estimateDiv128To64( uint64_t a0, uint64_t a1, uint64_t b ) +static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b) { uint64_t b0, b1; uint64_t rem0, rem1, term0, term1; uint64_t z; - if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF ); + if ( b <= a0 ) return UINT64_C(0xFFFFFFFFFFFFFFFF); b0 = b>>32; - z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32; + z = ( b0<<32 <= a0 ) ? UINT64_C(0xFFFFFFFF00000000) : ( a0 / b0 )<<32; mul64To128( b, z, &term0, &term1 ); sub128( a0, a1, term0, term1, &rem0, &rem1 ); while ( ( (int64_t) rem0 ) < 0 ) { - z -= LIT64( 0x100000000 ); + z -= UINT64_C(0x100000000); b1 = b<<32; add128( rem0, rem1, b0, b1, &rem0, &rem1 ); } @@ -581,6 +634,83 @@ static uint64_t estimateDiv128To64( uint64_t a0, uint64_t a1, uint64_t b ) } +/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd + * (https://gmplib.org/repo/gmp/file/tip/longlong.h) + * + * Licensed under the GPLv2/LGPLv3 + */ +static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, + uint64_t n0, uint64_t d) +{ +#if defined(__x86_64__) && !defined(_MSC_VER) + uint64_t q; + asm ("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); + return q; +#elif defined(__s390x__) && !defined(__clang__) + /* Need to use a TImode type to get an even register pair for DLGR. */ + unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; + asm("dlgr %0, %1" : "+r"(n) : "r"(d)); + *r = n >> 64; + return n; +#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) + /* From Power ISA 2.06, programming note for divdeu. */ + uint64_t q1, q2, Q, r1, r2, R; + asm("divdeu %0,%2,%4; divdu %1,%3,%4" + : "=&r"(q1), "=r"(q2) + : "r"(n1), "r"(n0), "r"(d)); + r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ + r2 = n0 - (q2 * d); + Q = q1 + q2; + R = r1 + r2; + if (R >= d || R < r2) { /* overflow implies R > d */ + Q += 1; + R -= d; + } + *r = R; + return Q; +#else + uint64_t d0, d1, q0, q1, r1, r0, m; + + d0 = (uint32_t)d; + d1 = d >> 32; + + r1 = n1 % d1; + q1 = n1 / d1; + m = q1 * d0; + r1 = (r1 << 32) | (n0 >> 32); + if (r1 < m) { + q1 -= 1; + r1 += d; + if (r1 >= d) { + if (r1 < m) { + q1 -= 1; + r1 += d; + } + } + } + r1 -= m; + + r0 = r1 % d1; + q0 = r1 / d1; + m = q0 * d0; + r0 = (r0 << 32) | (uint32_t)n0; + if (r0 < m) { + q0 -= 1; + r0 += d; + if (r0 >= d) { + if (r0 < m) { + q0 -= 1; + r0 += d; + } + } + } + r0 -= m; + + *r = r0; + return (q1 << 32) | q0; +#endif +} + /*---------------------------------------------------------------------------- | Returns an approximation to the square root of the 32-bit significand given | by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of @@ -591,7 +721,7 @@ static uint64_t estimateDiv128To64( uint64_t a0, uint64_t a1, uint64_t b ) | value. *----------------------------------------------------------------------------*/ -static uint32_t estimateSqrt32(int_fast16_t aExp, uint32_t a) +static inline uint32_t estimateSqrt32(int aExp, uint32_t a) { static const uint16_t sqrtOddAdjustments[] = { 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0, @@ -601,7 +731,7 @@ static uint32_t estimateSqrt32(int_fast16_t aExp, uint32_t a) 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 }; - int8 index; + int8_t index; uint32_t z; index = ( a>>27 ) & 15; @@ -620,82 +750,6 @@ static uint32_t estimateSqrt32(int_fast16_t aExp, uint32_t a) } -/*---------------------------------------------------------------------------- -| Returns the number of leading 0 bits before the most-significant 1 bit of -| `a'. If `a' is zero, 32 is returned. -*----------------------------------------------------------------------------*/ - -static int8 countLeadingZeros32( uint32_t a ) -{ -#if SOFTFLOAT_GNUC_PREREQ(3, 4) - if (a) { - return __builtin_clz(a); - } else { - return 32; - } -#else - static const int8 countLeadingZerosHigh[] = { - 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - int8 shiftCount; - - shiftCount = 0; - if ( a < 0x10000 ) { - shiftCount += 16; - a <<= 16; - } - if ( a < 0x1000000 ) { - shiftCount += 8; - a <<= 8; - } - shiftCount += countLeadingZerosHigh[ a>>24 ]; - return shiftCount; -#endif -} - -/*---------------------------------------------------------------------------- -| Returns the number of leading 0 bits before the most-significant 1 bit of -| `a'. If `a' is zero, 64 is returned. -*----------------------------------------------------------------------------*/ - -static int8 countLeadingZeros64( uint64_t a ) -{ -#if SOFTFLOAT_GNUC_PREREQ(3, 4) - if (a) { - return __builtin_clzll(a); - } else { - return 64; - } -#else - int8 shiftCount; - - shiftCount = 0; - if ( a < ( (uint64_t) 1 )<<32 ) { - shiftCount += 32; - } - else { - a >>= 32; - } - shiftCount += countLeadingZeros32( (uint32_t)a ); - return shiftCount; -#endif -} - /*---------------------------------------------------------------------------- | Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' | is equal to the 128-bit value formed by concatenating `b0' and `b1'. @@ -747,3 +801,5 @@ static inline flag ne128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) return ( a0 != b0 ) || ( a1 != b1 ); } + +#endif diff --git a/qemu/include/fpu/softfloat-types.h b/qemu/include/fpu/softfloat-types.h new file mode 100644 index 00000000..565dced5 --- /dev/null +++ b/qemu/include/fpu/softfloat-types.h @@ -0,0 +1,182 @@ +/* + * QEMU float support + * + * The code in this source file is derived from release 2a of the SoftFloat + * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and + * some later contributions) are provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * This header holds definitions for code that might be dealing with + * softfloat types but not need access to the actual library functions. + */ +/* +=============================================================================== +This C header file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2a. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +/* BSD licensing: + * Copyright (c) 2006, Fabrice Bellard + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + +#ifndef SOFTFLOAT_TYPES_H +#define SOFTFLOAT_TYPES_H + +#include + +/* This 'flag' type must be able to hold at least 0 and 1. It should + * probably be replaced with 'bool' but the uses would need to be audited + * to check that they weren't accidentally relying on it being a larger type. + */ +typedef uint8_t flag; + +/* + * Software IEC/IEEE floating-point types. + */ + +typedef uint16_t float16; +typedef uint32_t float32; +typedef uint64_t float64; +#define float16_val(x) (x) +#define float32_val(x) (x) +#define float64_val(x) (x) +#define make_float16(x) (x) +#define make_float32(x) (x) +#define make_float64(x) (x) +#define const_float16(x) (x) +#define const_float32(x) (x) +#define const_float64(x) (x) +typedef struct { + uint64_t low; + uint16_t high; +} floatx80; +#define make_floatx80(exp, mant) ((floatx80) { mant, exp }) +#define make_floatx80_init(exp, mant) { .low = mant, .high = exp } +typedef struct { +#ifdef HOST_WORDS_BIGENDIAN + uint64_t high, low; +#else + uint64_t low, high; +#endif +} float128; +#define make_float128(high_, low_) ((float128) { .high = high_, .low = low_ }) +#define make_float128_init(high_, low_) { .high = high_, .low = low_ } + +/* + * Software IEC/IEEE floating-point underflow tininess-detection mode. + */ + +enum { + float_tininess_after_rounding = 0, + float_tininess_before_rounding = 1 +}; + +/* + *Software IEC/IEEE floating-point rounding mode. + */ + +enum { + float_round_nearest_even = 0, + float_round_down = 1, + float_round_up = 2, + float_round_to_zero = 3, + float_round_ties_away = 4, + /* Not an IEEE rounding mode: round to the closest odd mantissa value */ + float_round_to_odd = 5, +}; + +/* + * Software IEC/IEEE floating-point exception flags. + */ + +enum { + float_flag_invalid = 1, + float_flag_divbyzero = 4, + float_flag_overflow = 8, + float_flag_underflow = 16, + float_flag_inexact = 32, + float_flag_input_denormal = 64, + float_flag_output_denormal = 128 +}; + + +/* + * Floating Point Status. Individual architectures may maintain + * several versions of float_status for different functions. The + * correct status for the operation is then passed by reference to + * most of the softfloat functions. + */ + +typedef struct float_status { + signed char float_detect_tininess; + signed char float_rounding_mode; + uint8_t float_exception_flags; + signed char floatx80_rounding_precision; + /* should denormalised results go to zero and set the inexact flag? */ + flag flush_to_zero; + /* should denormalised inputs go to zero and set the input_denormal flag? */ + flag flush_inputs_to_zero; + flag default_nan_mode; + /* not always used -- see snan_bit_is_one() in softfloat-specialize.h */ + flag snan_bit_is_one; +} float_status; + +#endif /* SOFTFLOAT_TYPES_H */ diff --git a/qemu/include/fpu/softfloat.h b/qemu/include/fpu/softfloat.h index d15678d5..ecb8ba01 100644 --- a/qemu/include/fpu/softfloat.h +++ b/qemu/include/fpu/softfloat.h @@ -1,13 +1,24 @@ /* * QEMU float support * - * Derived from SoftFloat. + * The code in this source file is derived from release 2a of the SoftFloat + * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and + * some later contributions) are provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file after December 1st 2014 will be + * taken to be licensed under the Softfloat-2a license unless specifically + * indicated otherwise. */ -/*============================================================================ - -This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic -Package, Release 2b. +/* +=============================================================================== +This C header file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2a. Written by John R. Hauser. This work was made possible in part by the International Computer Science Institute, located at Suite 600, 1947 Center @@ -16,58 +27,61 @@ National Science Foundation under grant MIP-9311980. The original version of this code was written as part of a project to build a fixed-point vector processor in collaboration with the University of California at Berkeley, overseen by Profs. Nelson Morgan and John Wawrzynek. More information -is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ arithmetic/SoftFloat.html'. -THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has -been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES -RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS -AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, -COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE -EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE -INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR -OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. Derivative works are acceptable, even for commercial purposes, so long as -(1) the source code for the derivative work includes prominent notice that -the work is derivative, and (2) the source code includes prominent notice with -these four paragraphs for those parts of this code that are retained. +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. -=============================================================================*/ +=============================================================================== +*/ + +/* BSD licensing: + * Copyright (c) 2006, Fabrice Bellard + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ #ifndef SOFTFLOAT_H #define SOFTFLOAT_H -#if defined(CONFIG_SOLARIS) && defined(CONFIG_NEEDS_LIBSUNMATH) -#include -#endif - -#include "unicorn/platform.h" -#include "config-host.h" -#include "qemu/osdep.h" - -/*---------------------------------------------------------------------------- -| Each of the following `typedef's defines the most convenient type that holds -| integers of at least as many bits as specified. For example, `uint8' should -| be the most convenient type that can hold unsigned integers of as many as -| 8 bits. The `flag' type must be able to hold either a 0 or 1. For most -| implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed -| to the same as `int'. -*----------------------------------------------------------------------------*/ -typedef uint8_t flag; -typedef uint8_t uint8; -typedef int8_t int8; -typedef unsigned int uint32; -typedef signed int int32; -typedef uint64_t uint64; -typedef int64_t int64; - -#define LIT64( a ) a##LL - -#define STATUS_PARAM , float_status *status -#define STATUS(field) status->field -#define STATUS_VAR , status - /*---------------------------------------------------------------------------- | Software IEC/IEEE floating-point ordering relations *----------------------------------------------------------------------------*/ @@ -78,182 +92,22 @@ enum { float_relation_unordered = 2 }; -/*---------------------------------------------------------------------------- -| Software IEC/IEEE floating-point types. -*----------------------------------------------------------------------------*/ -/* Use structures for soft-float types. This prevents accidentally mixing - them with native int/float types. A sufficiently clever compiler and - sane ABI should be able to see though these structs. However - x86/gcc 3.x seems to struggle a bit, so leave them disabled by default. */ -//#define USE_SOFTFLOAT_STRUCT_TYPES -#ifdef USE_SOFTFLOAT_STRUCT_TYPES -typedef struct { - uint16_t v; -} float16; -#define float16_val(x) (((float16)(x)).v) -#define make_float16(x) __extension__ ({ float16 f16_val = {x}; f16_val; }) -#define const_float16(x) { x } -typedef struct { - uint32_t v; -} float32; -/* The cast ensures an error if the wrong type is passed. */ -#define float32_val(x) (((float32)(x)).v) -#define make_float32(x) __extension__ ({ float32 f32_val = {x}; f32_val; }) -#define const_float32(x) { x } -typedef struct { - uint64_t v; -} float64; -#define float64_val(x) (((float64)(x)).v) -#define make_float64(x) __extension__ ({ float64 f64_val = {x}; f64_val; }) -#define const_float64(x) { x } -#else -typedef uint16_t float16; -typedef uint32_t float32; -typedef uint64_t float64; -#define float16_val(x) (x) -#define float32_val(x) (x) -#define float64_val(x) (x) -#define make_float16(x) (x) -#define make_float32(x) (x) -#define make_float64(x) (x) -#define const_float16(x) (x) -#define const_float32(x) (x) -#define const_float64(x) (x) -#endif -typedef struct { - uint64_t low; - uint16_t high; -} floatx80; -#define make_floatx80(exp, mant) ((floatx80) { mant, exp }) -#define make_floatx80_init(exp, mant) { mant, exp } -typedef struct { -#ifdef HOST_WORDS_BIGENDIAN - uint64_t high, low; -#else - uint64_t low, high; -#endif -} float128; -#ifdef HOST_WORDS_BIGENDIAN -#define make_float128(high_, low_) ((float128) { high_, low_ }) -#define make_float128_init(high_, low_) { high_, low_ } -#else -#define make_float128(high_, low_) ((float128) { low_, high_ }) -#define make_float128_init(high_, low_) { low_, high_ } -#endif - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE floating-point underflow tininess-detection mode. -*----------------------------------------------------------------------------*/ -enum { - float_tininess_after_rounding = 0, - float_tininess_before_rounding = 1 -}; - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE floating-point rounding mode. -*----------------------------------------------------------------------------*/ -enum { - float_round_nearest_even = 0, - float_round_down = 1, - float_round_up = 2, - float_round_to_zero = 3, - float_round_ties_away = 4, -}; - -/*---------------------------------------------------------------------------- -| Software IEC/IEEE floating-point exception flags. -*----------------------------------------------------------------------------*/ -enum { - float_flag_invalid = 1, - float_flag_divbyzero = 4, - float_flag_overflow = 8, - float_flag_underflow = 16, - float_flag_inexact = 32, - float_flag_input_denormal = 64, - float_flag_output_denormal = 128 -}; - -typedef struct float_status { - signed char float_detect_tininess; - signed char float_rounding_mode; - signed char float_exception_flags; - signed char floatx80_rounding_precision; - /* should denormalised results go to zero and set the inexact flag? */ - flag flush_to_zero; - /* should denormalised inputs go to zero and set the input_denormal flag? */ - flag flush_inputs_to_zero; - flag default_nan_mode; -} float_status; - -static inline void set_float_detect_tininess(int val STATUS_PARAM) -{ - STATUS(float_detect_tininess) = val; -} -static inline void set_float_rounding_mode(int val STATUS_PARAM) -{ - STATUS(float_rounding_mode) = val; -} -static inline void set_float_exception_flags(int val STATUS_PARAM) -{ - STATUS(float_exception_flags) = val; -} -static inline void set_floatx80_rounding_precision(int val STATUS_PARAM) -{ - STATUS(floatx80_rounding_precision) = val; -} -static inline void set_flush_to_zero(flag val STATUS_PARAM) -{ - STATUS(flush_to_zero) = val; -} -static inline void set_flush_inputs_to_zero(flag val STATUS_PARAM) -{ - STATUS(flush_inputs_to_zero) = val; -} -static inline void set_default_nan_mode(flag val STATUS_PARAM) -{ - STATUS(default_nan_mode) = val; -} -static inline int get_float_detect_tininess(float_status *status) -{ - return STATUS(float_detect_tininess); -} -static inline int get_float_rounding_mode(float_status *status) -{ - return STATUS(float_rounding_mode); -} -static inline int get_float_exception_flags(float_status *status) -{ - return STATUS(float_exception_flags); -} -static inline int get_floatx80_rounding_precision(float_status *status) -{ - return STATUS(floatx80_rounding_precision); -} -static inline flag get_flush_to_zero(float_status *status) -{ - return STATUS(flush_to_zero); -} -static inline flag get_flush_inputs_to_zero(float_status *status) -{ - return STATUS(flush_inputs_to_zero); -} -static inline flag get_default_nan_mode(float_status *status) -{ - return STATUS(default_nan_mode); -} +#include "fpu/softfloat-types.h" +#include "fpu/softfloat-helpers.h" /*---------------------------------------------------------------------------- | Routine to raise any or all of the software IEC/IEEE floating-point | exception flags. *----------------------------------------------------------------------------*/ -void float_raise( uint8_t flags STATUS_PARAM); +void float_raise(uint8_t flags, float_status *status); /*---------------------------------------------------------------------------- | If `a' is denormal and we are in flush-to-zero mode then set the | input-denormal exception and return zero. Otherwise just return the value. *----------------------------------------------------------------------------*/ -float32 float32_squash_input_denormal(float32 a STATUS_PARAM); -float64 float64_squash_input_denormal(float64 a STATUS_PARAM); +float16 float16_squash_input_denormal(float16 a, float_status *status); +float32 float32_squash_input_denormal(float32 a, float_status *status); +float64 float64_squash_input_denormal(float64 a, float_status *status); /*---------------------------------------------------------------------------- | Options to indicate which negations to perform in float*_muladd() @@ -273,118 +127,238 @@ enum { /*---------------------------------------------------------------------------- | Software IEC/IEEE integer-to-floating-point conversion routines. *----------------------------------------------------------------------------*/ -float32 int32_to_float32(int32_t STATUS_PARAM); -float64 int32_to_float64(int32_t STATUS_PARAM); -float32 uint32_to_float32(uint32_t STATUS_PARAM); -float64 uint32_to_float64(uint32_t STATUS_PARAM); -floatx80 int32_to_floatx80(int32_t STATUS_PARAM); -float128 int32_to_float128(int32_t STATUS_PARAM); -float32 int64_to_float32(int64_t STATUS_PARAM); -float32 uint64_to_float32(uint64_t STATUS_PARAM); -float64 int64_to_float64(int64_t STATUS_PARAM); -float64 uint64_to_float64(uint64_t STATUS_PARAM); -floatx80 int64_to_floatx80(int64_t STATUS_PARAM); -float128 int64_to_float128(int64_t STATUS_PARAM); -float128 uint64_to_float128(uint64_t STATUS_PARAM); -/* We provide the int16 versions for symmetry of API with float-to-int */ -static inline float32 int16_to_float32(int16_t v STATUS_PARAM) -{ - return int32_to_float32(v STATUS_VAR); -} +float16 int16_to_float16_scalbn(int16_t a, int, float_status *status); +float16 int32_to_float16_scalbn(int32_t a, int, float_status *status); +float16 int64_to_float16_scalbn(int64_t a, int, float_status *status); +float16 uint16_to_float16_scalbn(uint16_t a, int, float_status *status); +float16 uint32_to_float16_scalbn(uint32_t a, int, float_status *status); +float16 uint64_to_float16_scalbn(uint64_t a, int, float_status *status); -static inline float32 uint16_to_float32(uint16_t v STATUS_PARAM) -{ - return uint32_to_float32(v STATUS_VAR); -} +float16 int16_to_float16(int16_t a, float_status *status); +float16 int32_to_float16(int32_t a, float_status *status); +float16 int64_to_float16(int64_t a, float_status *status); +float16 uint16_to_float16(uint16_t a, float_status *status); +float16 uint32_to_float16(uint32_t a, float_status *status); +float16 uint64_to_float16(uint64_t a, float_status *status); -static inline float64 int16_to_float64(int16_t v STATUS_PARAM) -{ - return int32_to_float64(v STATUS_VAR); -} +float32 int16_to_float32_scalbn(int16_t, int, float_status *status); +float32 int32_to_float32_scalbn(int32_t, int, float_status *status); +float32 int64_to_float32_scalbn(int64_t, int, float_status *status); +float32 uint16_to_float32_scalbn(uint16_t, int, float_status *status); +float32 uint32_to_float32_scalbn(uint32_t, int, float_status *status); +float32 uint64_to_float32_scalbn(uint64_t, int, float_status *status); -static inline float64 uint16_to_float64(uint16_t v STATUS_PARAM) -{ - return uint32_to_float64(v STATUS_VAR); -} +float32 int16_to_float32(int16_t, float_status *status); +float32 int32_to_float32(int32_t, float_status *status); +float32 int64_to_float32(int64_t, float_status *status); +float32 uint16_to_float32(uint16_t, float_status *status); +float32 uint32_to_float32(uint32_t, float_status *status); +float32 uint64_to_float32(uint64_t, float_status *status); + +float64 int16_to_float64_scalbn(int16_t, int, float_status *status); +float64 int32_to_float64_scalbn(int32_t, int, float_status *status); +float64 int64_to_float64_scalbn(int64_t, int, float_status *status); +float64 uint16_to_float64_scalbn(uint16_t, int, float_status *status); +float64 uint32_to_float64_scalbn(uint32_t, int, float_status *status); +float64 uint64_to_float64_scalbn(uint64_t, int, float_status *status); + +float64 int16_to_float64(int16_t, float_status *status); +float64 int32_to_float64(int32_t, float_status *status); +float64 int64_to_float64(int64_t, float_status *status); +float64 uint16_to_float64(uint16_t, float_status *status); +float64 uint32_to_float64(uint32_t, float_status *status); +float64 uint64_to_float64(uint64_t, float_status *status); + +floatx80 int32_to_floatx80(int32_t, float_status *status); +floatx80 int64_to_floatx80(int64_t, float_status *status); + +float128 int32_to_float128(int32_t, float_status *status); +float128 int64_to_float128(int64_t, float_status *status); +float128 uint64_to_float128(uint64_t, float_status *status); /*---------------------------------------------------------------------------- | Software half-precision conversion routines. *----------------------------------------------------------------------------*/ -float16 float32_to_float16( float32, flag STATUS_PARAM ); -float32 float16_to_float32( float16, flag STATUS_PARAM ); -float16 float64_to_float16(float64 a, flag ieee STATUS_PARAM); -float64 float16_to_float64(float16 a, flag ieee STATUS_PARAM); + +float16 float32_to_float16(float32, bool ieee, float_status *status); +float32 float16_to_float32(float16, bool ieee, float_status *status); +float16 float64_to_float16(float64 a, bool ieee, float_status *status); +float64 float16_to_float64(float16 a, bool ieee, float_status *status); + +int16_t float16_to_int16_scalbn(float16, int, int, float_status *status); +int32_t float16_to_int32_scalbn(float16, int, int, float_status *status); +int64_t float16_to_int64_scalbn(float16, int, int, float_status *status); + +int16_t float16_to_int16(float16, float_status *status); +int32_t float16_to_int32(float16, float_status *status); +int64_t float16_to_int64(float16, float_status *status); + +int16_t float16_to_int16_round_to_zero(float16, float_status *status); +int32_t float16_to_int32_round_to_zero(float16, float_status *status); +int64_t float16_to_int64_round_to_zero(float16, float_status *status); + +uint16_t float16_to_uint16_scalbn(float16 a, int, int, float_status *status); +uint32_t float16_to_uint32_scalbn(float16 a, int, int, float_status *status); +uint64_t float16_to_uint64_scalbn(float16 a, int, int, float_status *status); + +uint16_t float16_to_uint16(float16 a, float_status *status); +uint32_t float16_to_uint32(float16 a, float_status *status); +uint64_t float16_to_uint64(float16 a, float_status *status); + +uint16_t float16_to_uint16_round_to_zero(float16 a, float_status *status); +uint32_t float16_to_uint32_round_to_zero(float16 a, float_status *status); +uint64_t float16_to_uint64_round_to_zero(float16 a, float_status *status); /*---------------------------------------------------------------------------- | Software half-precision operations. *----------------------------------------------------------------------------*/ -int float16_is_quiet_nan( float16 ); -int float16_is_signaling_nan( float16 ); -float16 float16_maybe_silence_nan( float16 ); + +float16 float16_round_to_int(float16, float_status *status); +float16 float16_add(float16, float16, float_status *status); +float16 float16_sub(float16, float16, float_status *status); +float16 float16_mul(float16, float16, float_status *status); +float16 float16_muladd(float16, float16, float16, int, float_status *status); +float16 float16_div(float16, float16, float_status *status); +float16 float16_scalbn(float16, int, float_status *status); +float16 float16_min(float16, float16, float_status *status); +float16 float16_max(float16, float16, float_status *status); +float16 float16_minnum(float16, float16, float_status *status); +float16 float16_maxnum(float16, float16, float_status *status); +float16 float16_minnummag(float16, float16, float_status *status); +float16 float16_maxnummag(float16, float16, float_status *status); +float16 float16_sqrt(float16, float_status *status); +int float16_compare(float16, float16, float_status *status); +int float16_compare_quiet(float16, float16, float_status *status); + +int float16_is_quiet_nan(float16, float_status *status); +int float16_is_signaling_nan(float16, float_status *status); +float16 float16_silence_nan(float16, float_status *status); static inline int float16_is_any_nan(float16 a) { return ((float16_val(a) & ~0x8000) > 0x7c00); } +static inline int float16_is_neg(float16 a) +{ + return float16_val(a) >> 15; +} + +static inline int float16_is_infinity(float16 a) +{ + return (float16_val(a) & 0x7fff) == 0x7c00; +} + +static inline int float16_is_zero(float16 a) +{ + return (float16_val(a) & 0x7fff) == 0; +} + +static inline int float16_is_zero_or_denormal(float16 a) +{ + return (float16_val(a) & 0x7c00) == 0; +} + +static inline float16 float16_abs(float16 a) +{ + /* Note that abs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float16(float16_val(a) & 0x7fff); +} + +static inline float16 float16_chs(float16 a) +{ + /* Note that chs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float16(float16_val(a) ^ 0x8000); +} + +static inline float16 float16_set_sign(float16 a, int sign) +{ + return make_float16((float16_val(a) & 0x7fff) | (sign << 15)); +} + +#define float16_zero make_float16(0) +#define float16_half make_float16(0x3800) +#define float16_one make_float16(0x3c00) +#define float16_one_point_five make_float16(0x3e00) +#define float16_two make_float16(0x4000) +#define float16_three make_float16(0x4200) +#define float16_infinity make_float16(0x7c00) + /*---------------------------------------------------------------------------- | The pattern for a default generated half-precision NaN. *----------------------------------------------------------------------------*/ -extern const float16 float16_default_nan; +float16 float16_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE single-precision conversion routines. *----------------------------------------------------------------------------*/ -int_fast16_t float32_to_int16(float32 STATUS_PARAM); -uint_fast16_t float32_to_uint16(float32 STATUS_PARAM); -int_fast16_t float32_to_int16_round_to_zero(float32 STATUS_PARAM); -uint_fast16_t float32_to_uint16_round_to_zero(float32 STATUS_PARAM); -int32 float32_to_int32( float32 STATUS_PARAM ); -int32 float32_to_int32_round_to_zero( float32 STATUS_PARAM ); -uint32 float32_to_uint32( float32 STATUS_PARAM ); -uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM ); -int64 float32_to_int64( float32 STATUS_PARAM ); -uint64 float32_to_uint64(float32 STATUS_PARAM); -uint64 float32_to_uint64_round_to_zero(float32 STATUS_PARAM); -int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM ); -float64 float32_to_float64( float32 STATUS_PARAM ); -floatx80 float32_to_floatx80( float32 STATUS_PARAM ); -float128 float32_to_float128( float32 STATUS_PARAM ); + +int16_t float32_to_int16_scalbn(float32, int, int, float_status *status); +int32_t float32_to_int32_scalbn(float32, int, int, float_status *status); +int64_t float32_to_int64_scalbn(float32, int, int, float_status *status); + +int16_t float32_to_int16(float32, float_status *status); +int32_t float32_to_int32(float32, float_status *status); +int64_t float32_to_int64(float32, float_status *status); + +int16_t float32_to_int16_round_to_zero(float32, float_status *status); +int32_t float32_to_int32_round_to_zero(float32, float_status *status); +int64_t float32_to_int64_round_to_zero(float32, float_status *status); + +uint16_t float32_to_uint16_scalbn(float32, int, int, float_status *status); +uint32_t float32_to_uint32_scalbn(float32, int, int, float_status *status); +uint64_t float32_to_uint64_scalbn(float32, int, int, float_status *status); + +uint16_t float32_to_uint16(float32, float_status *status); +uint32_t float32_to_uint32(float32, float_status *status); +uint64_t float32_to_uint64(float32, float_status *status); + +uint16_t float32_to_uint16_round_to_zero(float32, float_status *status); +uint32_t float32_to_uint32_round_to_zero(float32, float_status *status); +uint64_t float32_to_uint64_round_to_zero(float32, float_status *status); + +float64 float32_to_float64(float32, float_status *status); +floatx80 float32_to_floatx80(float32, float_status *status); +float128 float32_to_float128(float32, float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE single-precision operations. *----------------------------------------------------------------------------*/ -float32 float32_round_to_int( float32 STATUS_PARAM ); -float32 float32_add( float32, float32 STATUS_PARAM ); -float32 float32_sub( float32, float32 STATUS_PARAM ); -float32 float32_mul( float32, float32 STATUS_PARAM ); -float32 float32_div( float32, float32 STATUS_PARAM ); -float32 float32_rem( float32, float32 STATUS_PARAM ); -float32 float32_muladd(float32, float32, float32, int STATUS_PARAM); -float32 float32_sqrt( float32 STATUS_PARAM ); -float32 float32_exp2( float32 STATUS_PARAM ); -float32 float32_log2( float32 STATUS_PARAM ); -int float32_eq( float32, float32 STATUS_PARAM ); -int float32_le( float32, float32 STATUS_PARAM ); -int float32_lt( float32, float32 STATUS_PARAM ); -int float32_unordered( float32, float32 STATUS_PARAM ); -int float32_eq_quiet( float32, float32 STATUS_PARAM ); -int float32_le_quiet( float32, float32 STATUS_PARAM ); -int float32_lt_quiet( float32, float32 STATUS_PARAM ); -int float32_unordered_quiet( float32, float32 STATUS_PARAM ); -int float32_compare( float32, float32 STATUS_PARAM ); -int float32_compare_quiet( float32, float32 STATUS_PARAM ); -float32 float32_min(float32, float32 STATUS_PARAM); -float32 float32_max(float32, float32 STATUS_PARAM); -float32 float32_minnum(float32, float32 STATUS_PARAM); -float32 float32_maxnum(float32, float32 STATUS_PARAM); -float32 float32_minnummag(float32, float32 STATUS_PARAM); -float32 float32_maxnummag(float32, float32 STATUS_PARAM); -int float32_is_quiet_nan( float32 ); -int float32_is_signaling_nan( float32 ); -float32 float32_maybe_silence_nan( float32 ); -float32 float32_scalbn( float32, int STATUS_PARAM ); +float32 float32_round_to_int(float32, float_status *status); +float32 float32_add(float32, float32, float_status *status); +float32 float32_sub(float32, float32, float_status *status); +float32 float32_mul(float32, float32, float_status *status); +float32 float32_div(float32, float32, float_status *status); +float32 float32_rem(float32, float32, float_status *status); +float32 float32_muladd(float32, float32, float32, int, float_status *status); +float32 float32_sqrt(float32, float_status *status); +float32 float32_exp2(float32, float_status *status); +float32 float32_log2(float32, float_status *status); +int float32_eq(float32, float32, float_status *status); +int float32_le(float32, float32, float_status *status); +int float32_lt(float32, float32, float_status *status); +int float32_unordered(float32, float32, float_status *status); +int float32_eq_quiet(float32, float32, float_status *status); +int float32_le_quiet(float32, float32, float_status *status); +int float32_lt_quiet(float32, float32, float_status *status); +int float32_unordered_quiet(float32, float32, float_status *status); +int float32_compare(float32, float32, float_status *status); +int float32_compare_quiet(float32, float32, float_status *status); +float32 float32_min(float32, float32, float_status *status); +float32 float32_max(float32, float32, float_status *status); +float32 float32_minnum(float32, float32, float_status *status); +float32 float32_maxnum(float32, float32, float_status *status); +float32 float32_minnummag(float32, float32, float_status *status); +float32 float32_maxnummag(float32, float32, float_status *status); +int float32_is_quiet_nan(float32, float_status *status); +int float32_is_signaling_nan(float32, float_status *status); +float32 float32_silence_nan(float32, float_status *status); +float32 float32_scalbn(float32, int, float_status *status); static inline float32 float32_abs(float32 a) { @@ -419,7 +393,7 @@ static inline int float32_is_zero(float32 a) static inline int float32_is_any_nan(float32 a) { - return ((float32_val(a) & ~(1U << 31)) > 0x7f800000UL); + return ((float32_val(a) & ~(1 << 31)) > 0x7f800000UL); } static inline int float32_is_zero_or_denormal(float32 a) @@ -427,76 +401,120 @@ static inline int float32_is_zero_or_denormal(float32 a) return (float32_val(a) & 0x7f800000) == 0; } +static inline bool float32_is_normal(float32 a) +{ + return (((float32_val(a) >> 23) + 1) & 0xff) >= 2; +} + +static inline bool float32_is_denormal(float32 a) +{ + return float32_is_zero_or_denormal(a) && !float32_is_zero(a); +} + +static inline bool float32_is_zero_or_normal(float32 a) +{ + return float32_is_normal(a) || float32_is_zero(a); +} + static inline float32 float32_set_sign(float32 a, int sign) { return make_float32((float32_val(a) & 0x7fffffff) | (sign << 31)); } #define float32_zero make_float32(0) -#define float32_one make_float32(0x3f800000) -#define float32_ln2 make_float32(0x3f317218) -#define float32_pi make_float32(0x40490fdb) #define float32_half make_float32(0x3f000000) +#define float32_one make_float32(0x3f800000) +#define float32_one_point_five make_float32(0x3fc00000) +#define float32_two make_float32(0x40000000) +#define float32_three make_float32(0x40400000) #define float32_infinity make_float32(0x7f800000) +/*---------------------------------------------------------------------------- +| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a +| single-precision floating-point value, returning the result. After being +| shifted into the proper positions, the three fields are simply added +| together to form the result. This means that any integer portion of `zSig' +| will be added into the exponent. Since a properly normalized significand +| will have an integer portion equal to 1, the `zExp' input should be 1 less +| than the desired result exponent whenever `zSig' is a complete, normalized +| significand. +*----------------------------------------------------------------------------*/ + +static inline float32 packFloat32(flag zSign, int zExp, uint32_t zSig) +{ + return make_float32( + (((uint32_t)zSign) << 31) + (((uint32_t)zExp) << 23) + zSig); +} /*---------------------------------------------------------------------------- | The pattern for a default generated single-precision NaN. *----------------------------------------------------------------------------*/ -extern const float32 float32_default_nan; +float32 float32_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE double-precision conversion routines. *----------------------------------------------------------------------------*/ -int_fast16_t float64_to_int16(float64 STATUS_PARAM); -uint_fast16_t float64_to_uint16(float64 STATUS_PARAM); -int_fast16_t float64_to_int16_round_to_zero(float64 STATUS_PARAM); -uint_fast16_t float64_to_uint16_round_to_zero(float64 STATUS_PARAM); -int32 float64_to_int32( float64 STATUS_PARAM ); -int32 float64_to_int32_round_to_zero( float64 STATUS_PARAM ); -uint32 float64_to_uint32( float64 STATUS_PARAM ); -uint32 float64_to_uint32_round_to_zero( float64 STATUS_PARAM ); -int64 float64_to_int64( float64 STATUS_PARAM ); -int64 float64_to_int64_round_to_zero( float64 STATUS_PARAM ); -uint64 float64_to_uint64 (float64 a STATUS_PARAM); -uint64 float64_to_uint64_round_to_zero (float64 a STATUS_PARAM); -float32 float64_to_float32( float64 STATUS_PARAM ); -floatx80 float64_to_floatx80( float64 STATUS_PARAM ); -float128 float64_to_float128( float64 STATUS_PARAM ); + +int16_t float64_to_int16_scalbn(float64, int, int, float_status *status); +int32_t float64_to_int32_scalbn(float64, int, int, float_status *status); +int64_t float64_to_int64_scalbn(float64, int, int, float_status *status); + +int16_t float64_to_int16(float64, float_status *status); +int32_t float64_to_int32(float64, float_status *status); +int64_t float64_to_int64(float64, float_status *status); + +int16_t float64_to_int16_round_to_zero(float64, float_status *status); +int32_t float64_to_int32_round_to_zero(float64, float_status *status); +int64_t float64_to_int64_round_to_zero(float64, float_status *status); + +uint16_t float64_to_uint16_scalbn(float64, int, int, float_status *status); +uint32_t float64_to_uint32_scalbn(float64, int, int, float_status *status); +uint64_t float64_to_uint64_scalbn(float64, int, int, float_status *status); + +uint16_t float64_to_uint16(float64, float_status *status); +uint32_t float64_to_uint32(float64, float_status *status); +uint64_t float64_to_uint64(float64, float_status *status); + +uint16_t float64_to_uint16_round_to_zero(float64, float_status *status); +uint32_t float64_to_uint32_round_to_zero(float64, float_status *status); +uint64_t float64_to_uint64_round_to_zero(float64, float_status *status); + +float32 float64_to_float32(float64, float_status *status); +floatx80 float64_to_floatx80(float64, float_status *status); +float128 float64_to_float128(float64, float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE double-precision operations. *----------------------------------------------------------------------------*/ -float64 float64_round_to_int( float64 STATUS_PARAM ); -float64 float64_trunc_to_int( float64 STATUS_PARAM ); -float64 float64_add( float64, float64 STATUS_PARAM ); -float64 float64_sub( float64, float64 STATUS_PARAM ); -float64 float64_mul( float64, float64 STATUS_PARAM ); -float64 float64_div( float64, float64 STATUS_PARAM ); -float64 float64_rem( float64, float64 STATUS_PARAM ); -float64 float64_muladd(float64, float64, float64, int STATUS_PARAM); -float64 float64_sqrt( float64 STATUS_PARAM ); -float64 float64_log2( float64 STATUS_PARAM ); -int float64_eq( float64, float64 STATUS_PARAM ); -int float64_le( float64, float64 STATUS_PARAM ); -int float64_lt( float64, float64 STATUS_PARAM ); -int float64_unordered( float64, float64 STATUS_PARAM ); -int float64_eq_quiet( float64, float64 STATUS_PARAM ); -int float64_le_quiet( float64, float64 STATUS_PARAM ); -int float64_lt_quiet( float64, float64 STATUS_PARAM ); -int float64_unordered_quiet( float64, float64 STATUS_PARAM ); -int float64_compare( float64, float64 STATUS_PARAM ); -int float64_compare_quiet( float64, float64 STATUS_PARAM ); -float64 float64_min(float64, float64 STATUS_PARAM); -float64 float64_max(float64, float64 STATUS_PARAM); -float64 float64_minnum(float64, float64 STATUS_PARAM); -float64 float64_maxnum(float64, float64 STATUS_PARAM); -float64 float64_minnummag(float64, float64 STATUS_PARAM); -float64 float64_maxnummag(float64, float64 STATUS_PARAM); -int float64_is_quiet_nan( float64 a ); -int float64_is_signaling_nan( float64 ); -float64 float64_maybe_silence_nan( float64 ); -float64 float64_scalbn( float64, int STATUS_PARAM ); +float64 float64_round_to_int(float64, float_status *status); +float64 float64_add(float64, float64, float_status *status); +float64 float64_sub(float64, float64, float_status *status); +float64 float64_mul(float64, float64, float_status *status); +float64 float64_div(float64, float64, float_status *status); +float64 float64_rem(float64, float64, float_status *status); +float64 float64_muladd(float64, float64, float64, int, float_status *status); +float64 float64_sqrt(float64, float_status *status); +float64 float64_log2(float64, float_status *status); +int float64_eq(float64, float64, float_status *status); +int float64_le(float64, float64, float_status *status); +int float64_lt(float64, float64, float_status *status); +int float64_unordered(float64, float64, float_status *status); +int float64_eq_quiet(float64, float64, float_status *status); +int float64_le_quiet(float64, float64, float_status *status); +int float64_lt_quiet(float64, float64, float_status *status); +int float64_unordered_quiet(float64, float64, float_status *status); +int float64_compare(float64, float64, float_status *status); +int float64_compare_quiet(float64, float64, float_status *status); +float64 float64_min(float64, float64, float_status *status); +float64 float64_max(float64, float64, float_status *status); +float64 float64_minnum(float64, float64, float_status *status); +float64 float64_maxnum(float64, float64, float_status *status); +float64 float64_minnummag(float64, float64, float_status *status); +float64 float64_maxnummag(float64, float64, float_status *status); +int float64_is_quiet_nan(float64 a, float_status *status); +int float64_is_signaling_nan(float64, float_status *status); +float64 float64_silence_nan(float64, float_status *status); +float64 float64_scalbn(float64, int, float_status *status); static inline float64 float64_abs(float64 a) { @@ -539,6 +557,21 @@ static inline int float64_is_zero_or_denormal(float64 a) return (float64_val(a) & 0x7ff0000000000000LL) == 0; } +static inline bool float64_is_normal(float64 a) +{ + return (((float64_val(a) >> 52) + 1) & 0x7ff) >= 2; +} + +static inline bool float64_is_denormal(float64 a) +{ + return float64_is_zero_or_denormal(a) && !float64_is_zero(a); +} + +static inline bool float64_is_zero_or_normal(float64 a) +{ + return float64_is_normal(a) || float64_is_zero(a); +} + static inline float64 float64_set_sign(float64 a, int sign) { return make_float64((float64_val(a) & 0x7fffffffffffffffULL) @@ -546,52 +579,60 @@ static inline float64 float64_set_sign(float64 a, int sign) } #define float64_zero make_float64(0) -#define float64_one make_float64(0x3ff0000000000000LL) -#define float64_ln2 make_float64(0x3fe62e42fefa39efLL) -#define float64_pi make_float64(0x400921fb54442d18LL) #define float64_half make_float64(0x3fe0000000000000LL) +#define float64_one make_float64(0x3ff0000000000000LL) +#define float64_one_point_five make_float64(0x3FF8000000000000ULL) +#define float64_two make_float64(0x4000000000000000ULL) +#define float64_three make_float64(0x4008000000000000ULL) +#define float64_ln2 make_float64(0x3fe62e42fefa39efLL) #define float64_infinity make_float64(0x7ff0000000000000LL) /*---------------------------------------------------------------------------- | The pattern for a default generated double-precision NaN. *----------------------------------------------------------------------------*/ -extern const float64 float64_default_nan; +float64 float64_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision conversion routines. *----------------------------------------------------------------------------*/ -int32 floatx80_to_int32( floatx80 STATUS_PARAM ); -int32 floatx80_to_int32_round_to_zero( floatx80 STATUS_PARAM ); -int64 floatx80_to_int64( floatx80 STATUS_PARAM ); -int64 floatx80_to_int64_round_to_zero( floatx80 STATUS_PARAM ); -float32 floatx80_to_float32( floatx80 STATUS_PARAM ); -float64 floatx80_to_float64( floatx80 STATUS_PARAM ); -float128 floatx80_to_float128( floatx80 STATUS_PARAM ); +int32_t floatx80_to_int32(floatx80, float_status *status); +int32_t floatx80_to_int32_round_to_zero(floatx80, float_status *status); +int64_t floatx80_to_int64(floatx80, float_status *status); +int64_t floatx80_to_int64_round_to_zero(floatx80, float_status *status); +float32 floatx80_to_float32(floatx80, float_status *status); +float64 floatx80_to_float64(floatx80, float_status *status); +float128 floatx80_to_float128(floatx80, float_status *status); + +/*---------------------------------------------------------------------------- +| The pattern for an extended double-precision inf. +*----------------------------------------------------------------------------*/ +extern const floatx80 floatx80_infinity; /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision operations. *----------------------------------------------------------------------------*/ -floatx80 floatx80_round_to_int( floatx80 STATUS_PARAM ); -floatx80 floatx80_add( floatx80, floatx80 STATUS_PARAM ); -floatx80 floatx80_sub( floatx80, floatx80 STATUS_PARAM ); -floatx80 floatx80_mul( floatx80, floatx80 STATUS_PARAM ); -floatx80 floatx80_div( floatx80, floatx80 STATUS_PARAM ); -floatx80 floatx80_rem( floatx80, floatx80 STATUS_PARAM ); -floatx80 floatx80_sqrt( floatx80 STATUS_PARAM ); -int floatx80_eq( floatx80, floatx80 STATUS_PARAM ); -int floatx80_le( floatx80, floatx80 STATUS_PARAM ); -int floatx80_lt( floatx80, floatx80 STATUS_PARAM ); -int floatx80_unordered( floatx80, floatx80 STATUS_PARAM ); -int floatx80_eq_quiet( floatx80, floatx80 STATUS_PARAM ); -int floatx80_le_quiet( floatx80, floatx80 STATUS_PARAM ); -int floatx80_lt_quiet( floatx80, floatx80 STATUS_PARAM ); -int floatx80_unordered_quiet( floatx80, floatx80 STATUS_PARAM ); -int floatx80_compare( floatx80, floatx80 STATUS_PARAM ); -int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM ); -int floatx80_is_quiet_nan( floatx80 ); -int floatx80_is_signaling_nan( floatx80 ); -floatx80 floatx80_maybe_silence_nan( floatx80 ); -floatx80 floatx80_scalbn( floatx80, int STATUS_PARAM ); +floatx80 floatx80_round(floatx80 a, float_status *status); +floatx80 floatx80_round_to_int(floatx80, float_status *status); +floatx80 floatx80_add(floatx80, floatx80, float_status *status); +floatx80 floatx80_sub(floatx80, floatx80, float_status *status); +floatx80 floatx80_mul(floatx80, floatx80, float_status *status); +floatx80 floatx80_div(floatx80, floatx80, float_status *status); +floatx80 floatx80_rem(floatx80, floatx80, float_status *status); +floatx80 floatx80_sqrt(floatx80, float_status *status); +int floatx80_eq(floatx80, floatx80, float_status *status); +int floatx80_le(floatx80, floatx80, float_status *status); +int floatx80_lt(floatx80, floatx80, float_status *status); +int floatx80_unordered(floatx80, floatx80, float_status *status); +int floatx80_eq_quiet(floatx80, floatx80, float_status *status); +int floatx80_le_quiet(floatx80, floatx80, float_status *status); +int floatx80_lt_quiet(floatx80, floatx80, float_status *status); +int floatx80_unordered_quiet(floatx80, floatx80, float_status *status); +int floatx80_compare(floatx80, floatx80, float_status *status); +int floatx80_compare_quiet(floatx80, floatx80, float_status *status); +int floatx80_is_quiet_nan(floatx80, float_status *status); +int floatx80_is_signaling_nan(floatx80, float_status *status); +floatx80 floatx80_silence_nan(floatx80, float_status *status); +floatx80 floatx80_scalbn(floatx80, int, float_status *status); static inline floatx80 floatx80_abs(floatx80 a) { @@ -607,7 +648,12 @@ static inline floatx80 floatx80_chs(floatx80 a) static inline int floatx80_is_infinity(floatx80 a) { - return (a.high & 0x7fff) == 0x7fff && a.low == 0x8000000000000000LL; +#if defined(TARGET_M68K) + return (a.high & 0x7fff) == floatx80_infinity.high && !(a.low << 1); +#else + return (a.high & 0x7fff) == floatx80_infinity.high && + a.low == floatx80_infinity.low; +#endif } static inline int floatx80_is_neg(floatx80 a) @@ -642,7 +688,7 @@ static inline int floatx80_is_any_nan(floatx80 a) *----------------------------------------------------------------------------*/ static inline bool floatx80_invalid_encoding(floatx80 a) { - return (a.low & ((uint64_t)1 << 63)) == 0 && (a.high & 0x7FFF) != 0; + return (a.low & (1ULL << 63)) == 0 && (a.high & 0x7FFF) != 0; } #define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL) @@ -650,48 +696,155 @@ static inline bool floatx80_invalid_encoding(floatx80 a) #define floatx80_ln2 make_floatx80(0x3ffe, 0xb17217f7d1cf79acLL) #define floatx80_pi make_floatx80(0x4000, 0xc90fdaa22168c235LL) #define floatx80_half make_floatx80(0x3ffe, 0x8000000000000000LL) -#define floatx80_infinity make_floatx80(0x7fff, 0x8000000000000000LL) + +/*---------------------------------------------------------------------------- +| Returns the fraction bits of the extended double-precision floating-point +| value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint64_t extractFloatx80Frac(floatx80 a) +{ + return a.low; +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the extended double-precision floating-point +| value `a'. +*----------------------------------------------------------------------------*/ + +static inline int32_t extractFloatx80Exp(floatx80 a) +{ + return a.high & 0x7FFF; +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the extended double-precision floating-point value +| `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloatx80Sign(floatx80 a) +{ + return a.high >> 15; +} + +/*---------------------------------------------------------------------------- +| Packs the sign `zSign', exponent `zExp', and significand `zSig' into an +| extended double-precision floating-point value, returning the result. +*----------------------------------------------------------------------------*/ + +static inline floatx80 packFloatx80(flag zSign, int32_t zExp, uint64_t zSig) +{ + floatx80 z; + + z.low = zSig; + z.high = (((uint16_t)zSign) << 15) + zExp; + return z; +} + +/*---------------------------------------------------------------------------- +| Normalizes the subnormal extended double-precision floating-point value +| represented by the denormalized significand `aSig'. The normalized exponent +| and significand are stored at the locations pointed to by `zExpPtr' and +| `zSigPtr', respectively. +*----------------------------------------------------------------------------*/ + +void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr, + uint64_t *zSigPtr); + +/*---------------------------------------------------------------------------- +| Takes two extended double-precision floating-point values `a' and `b', one +| of which is a NaN, and returns the appropriate NaN result. If either `a' or +| `b' is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status); + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and extended significand formed by the concatenation of `zSig0' and `zSig1', +| and returns the proper extended double-precision floating-point value +| corresponding to the abstract input. Ordinarily, the abstract value is +| rounded and packed into the extended double-precision format, with the +| inexact exception raised if the abstract input cannot be represented +| exactly. However, if the abstract value is too large, the overflow and +| inexact exceptions are raised and an infinity or maximal finite value is +| returned. If the abstract value is too small, the input value is rounded to +| a subnormal number, and the underflow and inexact exceptions are raised if +| the abstract input cannot be represented exactly as a subnormal extended +| double-precision floating-point number. +| If `roundingPrecision' is 32 or 64, the result is rounded to the same +| number of bits as single or double precision, respectively. Otherwise, the +| result is rounded to the full precision of the extended double-precision +| format. +| The input significand must be normalized or smaller. If the input +| significand is not normalized, `zExp' must be 0; in that case, the result +| returned is a subnormal number, and it must not require rounding. The +| handling of underflow and overflow follows the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 roundAndPackFloatx80(int8_t roundingPrecision, flag zSign, + int32_t zExp, uint64_t zSig0, uint64_t zSig1, + float_status *status); + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent +| `zExp', and significand formed by the concatenation of `zSig0' and `zSig1', +| and returns the proper extended double-precision floating-point value +| corresponding to the abstract input. This routine is just like +| `roundAndPackFloatx80' except that the input significand does not have to be +| normalized. +*----------------------------------------------------------------------------*/ + +floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision, + flag zSign, int32_t zExp, + uint64_t zSig0, uint64_t zSig1, + float_status *status); /*---------------------------------------------------------------------------- | The pattern for a default generated extended double-precision NaN. *----------------------------------------------------------------------------*/ -extern const floatx80 floatx80_default_nan; +floatx80 floatx80_default_nan(float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE quadruple-precision conversion routines. *----------------------------------------------------------------------------*/ -int32 float128_to_int32( float128 STATUS_PARAM ); -int32 float128_to_int32_round_to_zero( float128 STATUS_PARAM ); -int64 float128_to_int64( float128 STATUS_PARAM ); -int64 float128_to_int64_round_to_zero( float128 STATUS_PARAM ); -float32 float128_to_float32( float128 STATUS_PARAM ); -float64 float128_to_float64( float128 STATUS_PARAM ); -floatx80 float128_to_floatx80( float128 STATUS_PARAM ); +int32_t float128_to_int32(float128, float_status *status); +int32_t float128_to_int32_round_to_zero(float128, float_status *status); +int64_t float128_to_int64(float128, float_status *status); +int64_t float128_to_int64_round_to_zero(float128, float_status *status); +uint64_t float128_to_uint64(float128, float_status *status); +uint64_t float128_to_uint64_round_to_zero(float128, float_status *status); +uint32_t float128_to_uint32(float128, float_status *status); +uint32_t float128_to_uint32_round_to_zero(float128, float_status *status); +float32 float128_to_float32(float128, float_status *status); +float64 float128_to_float64(float128, float_status *status); +floatx80 float128_to_floatx80(float128, float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE quadruple-precision operations. *----------------------------------------------------------------------------*/ -float128 float128_round_to_int( float128 STATUS_PARAM ); -float128 float128_add( float128, float128 STATUS_PARAM ); -float128 float128_sub( float128, float128 STATUS_PARAM ); -float128 float128_mul( float128, float128 STATUS_PARAM ); -float128 float128_div( float128, float128 STATUS_PARAM ); -float128 float128_rem( float128, float128 STATUS_PARAM ); -float128 float128_sqrt( float128 STATUS_PARAM ); -int float128_eq( float128, float128 STATUS_PARAM ); -int float128_le( float128, float128 STATUS_PARAM ); -int float128_lt( float128, float128 STATUS_PARAM ); -int float128_unordered( float128, float128 STATUS_PARAM ); -int float128_eq_quiet( float128, float128 STATUS_PARAM ); -int float128_le_quiet( float128, float128 STATUS_PARAM ); -int float128_lt_quiet( float128, float128 STATUS_PARAM ); -int float128_unordered_quiet( float128, float128 STATUS_PARAM ); -int float128_compare( float128, float128 STATUS_PARAM ); -int float128_compare_quiet( float128, float128 STATUS_PARAM ); -int float128_is_quiet_nan( float128 ); -int float128_is_signaling_nan( float128 ); -float128 float128_maybe_silence_nan( float128 ); -float128 float128_scalbn( float128, int STATUS_PARAM ); +float128 float128_round_to_int(float128, float_status *status); +float128 float128_add(float128, float128, float_status *status); +float128 float128_sub(float128, float128, float_status *status); +float128 float128_mul(float128, float128, float_status *status); +float128 float128_div(float128, float128, float_status *status); +float128 float128_rem(float128, float128, float_status *status); +float128 float128_sqrt(float128, float_status *status); +int float128_eq(float128, float128, float_status *status); +int float128_le(float128, float128, float_status *status); +int float128_lt(float128, float128, float_status *status); +int float128_unordered(float128, float128, float_status *status); +int float128_eq_quiet(float128, float128, float_status *status); +int float128_le_quiet(float128, float128, float_status *status); +int float128_lt_quiet(float128, float128, float_status *status); +int float128_unordered_quiet(float128, float128, float_status *status); +int float128_compare(float128, float128, float_status *status); +int float128_compare_quiet(float128, float128, float_status *status); +int float128_is_quiet_nan(float128, float_status *status); +int float128_is_signaling_nan(float128, float_status *status); +float128 float128_silence_nan(float128, float_status *status); +float128 float128_scalbn(float128, int, float_status *status); static inline float128 float128_abs(float128 a) { @@ -725,6 +878,16 @@ static inline int float128_is_zero_or_denormal(float128 a) return (a.high & 0x7fff000000000000LL) == 0; } +static inline bool float128_is_normal(float128 a) +{ + return (((a.high >> 48) + 1) & 0x7fff) >= 2; +} + +static inline bool float128_is_denormal(float128 a) +{ + return float128_is_zero_or_denormal(a) && !float128_is_zero(a); +} + static inline int float128_is_any_nan(float128 a) { return ((a.high >> 48) & 0x7fff) == 0x7fff && @@ -736,6 +899,6 @@ static inline int float128_is_any_nan(float128 a) /*---------------------------------------------------------------------------- | The pattern for a default generated quadruple-precision NaN. *----------------------------------------------------------------------------*/ -extern const float128 float128_default_nan; +float128 float128_default_nan(float_status *status); -#endif /* !SOFTFLOAT_H */ +#endif /* SOFTFLOAT_H */ diff --git a/qemu/include/hw/arm/arm.h b/qemu/include/hw/arm/arm.h deleted file mode 100644 index 159c5cf9..00000000 --- a/qemu/include/hw/arm/arm.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Misc ARM declarations - * - * Copyright (c) 2006 CodeSourcery. - * Written by Paul Brook - * - * This code is licensed under the LGPL. - * - */ - -#ifndef ARM_MISC_H -#define ARM_MISC_H - -#include "exec/memory.h" - -void tosa_machine_init(struct uc_struct *uc); -void machvirt_machine_init(struct uc_struct *uc); // ARM64 - -void arm_cpu_register_types(void *opaque); -void aarch64_cpu_register_types(void *opaque); - -#endif /* !ARM_MISC_H */ diff --git a/qemu/include/hw/boards.h b/qemu/include/hw/boards.h deleted file mode 100644 index e0afde01..00000000 --- a/qemu/include/hw/boards.h +++ /dev/null @@ -1,82 +0,0 @@ -/* Declarations for use by board files for creating devices. */ - -#ifndef HW_BOARDS_H -#define HW_BOARDS_H - -#include "qemu/typedefs.h" -#include "sysemu/accel.h" -#include "hw/qdev.h" -#include "qom/object.h" -#include "uc_priv.h" - -typedef int QEMUMachineInitFunc(struct uc_struct *uc, MachineState *ms); - -typedef void QEMUMachineResetFunc(void); - -struct QEMUMachine { - const char *family; /* NULL iff @name identifies a standalone machtype */ - const char *name; - QEMUMachineInitFunc *init; - QEMUMachineResetFunc *reset; - int max_cpus; - int is_default; - int arch; -}; - -void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, - const char *name, - uint64_t ram_size); - -void qemu_register_machine(struct uc_struct *uc, QEMUMachine *m, const char *type_machine, - void (*init)(struct uc_struct *uc, ObjectClass *oc, void *data)); - -#define TYPE_MACHINE_SUFFIX "-machine" -#define TYPE_MACHINE "machine" -#undef MACHINE /* BSD defines it and QEMU does not use it */ -#define MACHINE(uc, obj) \ - OBJECT_CHECK(uc, MachineState, (obj), TYPE_MACHINE) -#define MACHINE_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, MachineClass, (obj), TYPE_MACHINE) -#define MACHINE_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, MachineClass, (klass), TYPE_MACHINE) - -MachineClass *find_default_machine(struct uc_struct *uc, int arch); - -/** - * MachineClass: - * @qemu_machine: #QEMUMachine - */ -struct MachineClass { - /*< private >*/ - ObjectClass parent_class; - /*< public >*/ - - const char *family; /* NULL iff @name identifies a standalone machtype */ - const char *name; - - int (*init)(struct uc_struct *uc, MachineState *state); - void (*reset)(void); - - int max_cpus; - int is_default; - int arch; -}; - -/** - * MachineState: - */ -struct MachineState { - /*< private >*/ - Object parent_obj; - - /*< public >*/ - ram_addr_t ram_size; - ram_addr_t maxram_size; - const char *cpu_model; - struct uc_struct *uc; - AccelState *accelerator; -}; - -void machine_register_types(struct uc_struct *uc); - -#endif diff --git a/qemu/include/hw/core/cpu.h b/qemu/include/hw/core/cpu.h new file mode 100644 index 00000000..d1b7c250 --- /dev/null +++ b/qemu/include/hw/core/cpu.h @@ -0,0 +1,615 @@ +/* + * QEMU CPU model + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ +#ifndef QEMU_CPU_H +#define QEMU_CPU_H + +#include "exec/hwaddr.h" +#include "exec/memattrs.h" +#include "qemu/bitmap.h" +#include "qemu/queue.h" +#include "qemu/thread.h" + +/** + * vaddr: + * Type wide enough to contain any #target_ulong virtual address. + */ +typedef uint64_t vaddr; +#define VADDR_PRId PRId64 +#define VADDR_PRIu PRIu64 +#define VADDR_PRIo PRIo64 +#define VADDR_PRIx PRIx64 +#define VADDR_PRIX PRIX64 +#define VADDR_MAX UINT64_MAX + +typedef enum MMUAccessType { + MMU_DATA_LOAD = 0, + MMU_DATA_STORE = 1, + MMU_INST_FETCH = 2 +} MMUAccessType; + +typedef struct CPUWatchpoint CPUWatchpoint; + +struct TranslationBlock; + +/** + * CPUClass: + * @class_by_name: Callback to map -cpu command line model name to an + * instantiatable CPU type. + * @has_work: Callback for checking if there is work to do. + * @do_interrupt: Callback for interrupt handling. + * @do_unaligned_access: Callback for unaligned access handling, if + * the target defines #TARGET_ALIGNED_ONLY. + * @do_transaction_failed: Callback for handling failed memory transactions + * (ie bus faults or external aborts; not MMU faults) + * @get_arch_id: Callback for getting architecture-dependent CPU ID. + * @get_paging_enabled: Callback for inquiring whether paging is enabled. + * @get_memory_mapping: Callback for obtaining the memory mappings. + * @set_pc: Callback for setting the Program Counter register. This + * should have the semantics used by the target architecture when + * setting the PC from a source such as an ELF file entry point; + * for example on Arm it will also set the Thumb mode bit based + * on the least significant bit of the new PC value. + * If the target behaviour here is anything other than "set + * the PC register to the value passed in" then the target must + * also implement the synchronize_from_tb hook. + * @synchronize_from_tb: Callback for synchronizing state from a TCG + * #TranslationBlock. This is called when we abandon execution + * of a TB before starting it, and must set all parts of the CPU + * state which the previous TB in the chain may not have updated. + * This always includes at least the program counter; some targets + * will need to do more. If this hook is not implemented then the + * default is to call @set_pc(tb->pc). + * @tlb_fill: Callback for handling a softmmu tlb miss or user-only + * address fault. For system mode, if the access is valid, call + * tlb_set_page and return true; if the access is invalid, and + * probe is true, return false; otherwise raise an exception and + * do not return. For user-only mode, always raise an exception + * and do not return. + * @get_phys_page_debug: Callback for obtaining a physical address. + * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the + * associated memory transaction attributes to use for the access. + * CPUs which use memory transaction attributes should implement this + * instead of get_phys_page_debug. + * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for + * a memory access with the specified memory transaction attributes. + * @debug_check_watchpoint: Callback: return true if the architectural + * watchpoint whose address has matched should really fire. + * @debug_excp_handler: Callback for handling debug exceptions. + * @cpu_exec_enter: Callback for cpu_exec preparation. + * @cpu_exec_exit: Callback for cpu_exec cleanup. + * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec. + * @adjust_watchpoint_address: Perform a target-specific adjustment to an + * address before attempting to match it against watchpoints. + * + * Represents a CPU family or model. + */ +typedef struct CPUClass { + /* no DeviceClass->reset(), add here. */ + void (*reset)(CPUState *cpu); + bool (*has_work)(CPUState *cpu); + void (*do_interrupt)(CPUState *cpu); + void (*do_unaligned_access)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + int64_t (*get_arch_id)(CPUState *cpu); + bool (*get_paging_enabled)(const CPUState *cpu); + void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list); + void (*set_pc)(CPUState *cpu, vaddr value); + void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); + bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); + hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs); + int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs); + bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); + void (*debug_excp_handler)(CPUState *cpu); + + void (*cpu_exec_enter)(CPUState *cpu); + void (*cpu_exec_exit)(CPUState *cpu); + bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); + + vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len); + void (*tcg_initialize)(struct uc_struct *uc); +} CPUClass; + +/* + * Low 16 bits: number of cycles left, used only in icount mode. + * High 16 bits: Set to -1 to force TCG to stop executing linked TBs + * for this CPU and return to its top level loop (even in non-icount mode). + * This allows a single read-compare-cbranch-write sequence to test + * for both decrementer underflow and exceptions. + */ +typedef union IcountDecr { + uint32_t u32; + struct { +#ifdef HOST_WORDS_BIGENDIAN + uint16_t high; + uint16_t low; +#else + uint16_t low; + uint16_t high; +#endif + } u16; +} IcountDecr; + +typedef struct CPUBreakpoint { + vaddr pc; + int flags; /* BP_* */ + QTAILQ_ENTRY(CPUBreakpoint) entry; +} CPUBreakpoint; + +struct CPUWatchpoint { + vaddr vaddr; + vaddr len; + vaddr hitaddr; + MemTxAttrs hitattrs; + int flags; /* BP_* */ + QTAILQ_ENTRY(CPUWatchpoint) entry; +}; + +#define TB_JMP_CACHE_BITS 12 +#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) + +/* work queue */ + +/* The union type allows passing of 64 bit target pointers on 32 bit + * hosts in a single parameter + */ +typedef union { + int host_int; + unsigned long host_ulong; + void *host_ptr; + vaddr target_ptr; +} run_on_cpu_data; + +#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)}) +#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)}) +#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)}) +#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)}) +#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL) + +typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data); + +struct qemu_work_item; + +#define CPU_UNSET_NUMA_NODE_ID -1 +#define CPU_TRACE_DSTATE_MAX_EVENTS 32 + +/** + * CPUState: + * @cpu_index: CPU index (informative). + * @cluster_index: Identifies which cluster this CPU is in. + * For boards which don't define clusters or for "loose" CPUs not assigned + * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will + * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER + * QOM parent. + * @nr_cores: Number of cores within this CPU package. + * @nr_threads: Number of threads within this CPU. + * @running: #true if CPU is currently running (lockless). + * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; + * valid under cpu_list_lock. + * @created: Indicates whether the CPU thread has been successfully created. + * @interrupt_request: Indicates a pending interrupt request. + * @halted: Nonzero if the CPU is in suspended state. + * @stop: Indicates a pending stop request. + * @stopped: Indicates the CPU has been artificially stopped. + * @unplug: Indicates a pending CPU unplug request. + * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU + * @singlestep_enabled: Flags for single-stepping. + * @icount_extra: Instructions until next timer event. + * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution + * requires that IO only be performed on the last instruction of a TB + * so that interrupts take effect immediately. + * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the + * AddressSpaces this CPU has) + * @num_ases: number of CPUAddressSpaces in @cpu_ases + * @as: Pointer to the first AddressSpace, for the convenience of targets which + * only have a single AddressSpace + * @env_ptr: Pointer to subclass-specific CPUArchState field. + * @icount_decr_ptr: Pointer to IcountDecr field within subclass. + * @next_cpu: Next CPU sharing TB cache. + * @opaque: User data. + * @mem_io_pc: Host Program Counter at which the memory was accessed. + * @work_mutex: Lock to prevent multiple access to queued_work_*. + * @queued_work_first: First asynchronous work pending. + * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes + * to @trace_dstate). + * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). + * @ignore_memory_transaction_failures: Cached copy of the MachineState + * flag of the same name: allows the board to suppress calling of the + * CPU do_transaction_failed hook function. + * + * State of one CPU core or thread. + */ +struct CPUState { + int nr_cores; + int nr_threads; + + struct QemuThread *thread; +#ifdef _WIN32 + HANDLE hThread; +#endif +#if 0 + int thread_id; + bool running, has_waiter; + struct QemuCond *halt_cond; + bool thread_kicked; +#endif + bool created; + bool stop; + bool stopped; + bool unplug; + bool crash_occurred; + bool exit_request; + bool in_exclusive_context; + uint32_t cflags_next_tb; + /* updates protected by BQL */ + uint32_t interrupt_request; + int singlestep_enabled; + int64_t icount_budget; + int64_t icount_extra; + uint64_t random_seed; + sigjmp_buf jmp_env; + + CPUAddressSpace *cpu_ases; + int num_ases; + AddressSpace *as; + MemoryRegion *memory; + + void *env_ptr; /* CPUArchState */ + IcountDecr *icount_decr_ptr; + + /* Accessed in parallel; all accesses must be atomic */ + struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; + + QTAILQ_ENTRY(CPUState) node; + + /* ice debug support */ + QTAILQ_HEAD(, CPUBreakpoint) breakpoints; + + QTAILQ_HEAD(, CPUWatchpoint) watchpoints; + CPUWatchpoint *watchpoint_hit; + + void *opaque; + + /* In order to avoid passing too many arguments to the MMIO helpers, + * we store some rarely used information in the CPU context. + */ + uintptr_t mem_io_pc; + + /* Used for events with 'vcpu' and *without* the 'disabled' properties */ + DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS); + DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS); + + /* TODO Move common fields from CPUArchState here. */ + int cpu_index; + int cluster_index; + uint32_t halted; + uint32_t can_do_io; + int32_t exception_index; + + struct uc_struct* uc; + + /* pointer to CPUArchState.cc */ + struct CPUClass *cc; + + // Set to force TCG to stop executing linked TBs for this + // CPU and return to its top level loop. + volatile sig_atomic_t tcg_exit_req; +}; + +#define CPU(obj) ((CPUState *)(obj)) +#define CPU_CLASS(class) ((CPUClass *)class) +#define CPU_GET_CLASS(obj) (((CPUState *)obj)->cc) + +static inline void cpu_tb_jmp_cache_clear(CPUState *cpu) +{ + unsigned int i; + + for (i = 0; i < TB_JMP_CACHE_SIZE; i++) { + cpu->tb_jmp_cache[i] = NULL; + } +} + +/** + * cpu_paging_enabled: + * @cpu: The CPU whose state is to be inspected. + * + * Returns: %true if paging is enabled, %false otherwise. + */ +bool cpu_paging_enabled(const CPUState *cpu); + +/** + * cpu_get_memory_mapping: + * @cpu: The CPU whose memory mappings are to be obtained. + * @list: Where to write the memory mappings to. + */ +void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list); + +/** + * CPUDumpFlags: + * @CPU_DUMP_CODE: + * @CPU_DUMP_FPU: dump FPU register state, not just integer + * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state + */ +enum CPUDumpFlags { + CPU_DUMP_CODE = 0x00010000, + CPU_DUMP_FPU = 0x00020000, + CPU_DUMP_CCOP = 0x00040000, +}; + +/** + * cpu_get_phys_page_attrs_debug: + * @cpu: The CPU to obtain the physical page address for. + * @addr: The virtual address. + * @attrs: Updated on return with the memory transaction attributes to use + * for this access. + * + * Obtains the physical page corresponding to a virtual one, together + * with the corresponding memory transaction attributes to use for the access. + * Use it only for debugging because no protection checks are done. + * + * Returns: Corresponding physical page address or -1 if no page found. + */ +static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->get_phys_page_attrs_debug) { + return cc->get_phys_page_attrs_debug(cpu, addr, attrs); + } + /* Fallback for CPUs which don't implement the _attrs_ hook */ + *attrs = MEMTXATTRS_UNSPECIFIED; + return cc->get_phys_page_debug(cpu, addr); +} + +/** + * cpu_get_phys_page_debug: + * @cpu: The CPU to obtain the physical page address for. + * @addr: The virtual address. + * + * Obtains the physical page corresponding to a virtual one. + * Use it only for debugging because no protection checks are done. + * + * Returns: Corresponding physical page address or -1 if no page found. + */ +static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) +{ + MemTxAttrs attrs = { 0 }; + + return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs); +} + +/** cpu_asidx_from_attrs: + * @cpu: CPU + * @attrs: memory transaction attributes + * + * Returns the address space index specifying the CPU AddressSpace + * to use for a memory access with the given transaction attributes. + */ +static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + int ret = 0; + + if (cc->asidx_from_attrs) { + ret = cc->asidx_from_attrs(cpu, attrs); + assert(ret < cpu->num_ases && ret >= 0); + } + return ret; +} + +/** + * cpu_reset: + * @cpu: The CPU whose state is to be reset. + */ +void cpu_reset(CPUState *cpu); + +/** + * cpu_has_work: + * @cpu: The vCPU to check. + * + * Checks whether the CPU has work to do. + * + * Returns: %true if the CPU has work, %false otherwise. + */ +static inline bool cpu_has_work(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + g_assert(cc->has_work); + return cc->has_work(cpu); +} + +/** + * cpu_is_stopped: + * @cpu: The CPU to check. + * + * Checks whether the CPU is stopped. + * + * Returns: %true if run state is not running or if artificially stopped; + * %false otherwise. + */ +bool cpu_is_stopped(CPUState *cpu); + +typedef void (*CPUInterruptHandler)(CPUState *, int); + +extern CPUInterruptHandler cpu_interrupt_handler; + +/** + * cpu_interrupt: + * @cpu: The CPU to set an interrupt on. + * @mask: The interrupts to set. + * + * Invokes the interrupt handler. + */ +static inline void cpu_interrupt(CPUState *cpu, int mask) +{ + cpu_interrupt_handler(cpu, mask); +} + +#ifdef NEED_CPU_H + +static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); +} + +#endif /* NEED_CPU_H */ + +/** + * cpu_set_pc: + * @cpu: The CPU to set the program counter for. + * @addr: Program counter value. + * + * Sets the program counter for a CPU. + */ +static inline void cpu_set_pc(CPUState *cpu, vaddr addr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->set_pc(cpu, addr); +} + +/** + * cpu_reset_interrupt: + * @cpu: The CPU to clear the interrupt on. + * @mask: The interrupt mask to clear. + * + * Resets interrupts on the vCPU @cpu. + */ +void cpu_reset_interrupt(CPUState *cpu, int mask); + +/** + * cpu_exit: + * @cpu: The CPU to exit. + * + * Requests the CPU @cpu to exit execution. + */ +void cpu_exit(CPUState *cpu); + +/** + * cpu_resume: + * @cpu: The CPU to resume. + * + * Resumes CPU, i.e. puts CPU into runnable state. + */ +void cpu_resume(CPUState *cpu); + +/** + * qemu_init_vcpu: + * @cpu: The vCPU to initialize. + * + * Initializes a vCPU. + */ +void qemu_init_vcpu(CPUState *cpu); + +#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ +#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ +#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ + +/* Breakpoint/watchpoint flags */ +#define BP_MEM_READ 0x01 +#define BP_MEM_WRITE 0x02 +#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) +#define BP_STOP_BEFORE_ACCESS 0x04 +/* 0x08 currently unused */ +#define BP_GDB 0x10 +#define BP_CPU 0x20 +#define BP_ANY (BP_GDB | BP_CPU) +#define BP_WATCHPOINT_HIT_READ 0x40 +#define BP_WATCHPOINT_HIT_WRITE 0x80 +#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) + +int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, + CPUBreakpoint **breakpoint); +int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); +void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); +void cpu_breakpoint_remove_all(CPUState *cpu, int mask); + +/* Return true if PC matches an installed breakpoint. */ +static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) +{ + CPUBreakpoint *bp; + + if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + if (bp->pc == pc && (bp->flags & mask)) { + return true; + } + } + } + return false; +} + +int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint); +int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, + vaddr len, int flags); +void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); +void cpu_watchpoint_remove_all(CPUState *cpu, int mask); +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra); +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); + +/** + * cpu_get_address_space: + * @cpu: CPU to get address space from + * @asidx: index identifying which address space to get + * + * Return the requested address space of this CPU. @asidx + * specifies which address space to read. + */ +AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); + +void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); +void cpu_exec_initfn(CPUState *cpu); +void cpu_exec_realizefn(CPUState *cpu); +void cpu_exec_unrealizefn(CPUState *cpu); + +/** + * target_words_bigendian: + * Returns true if the (default) endianness of the target is big endian, + * false otherwise. Note that in target-specific code, you can use + * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common + * code should normally never need to know about the endianness of the + * target, so please do *not* use this function unless you know very well + * what you are doing! + */ +bool target_words_bigendian(void); + +/* use original func name. */ +void cpu_class_init(struct uc_struct *uc, CPUClass *k); +void cpu_common_initfn(struct uc_struct *uc, CPUState *cs); + +void cpu_stop(struct uc_struct *uc); + +#define UNASSIGNED_CPU_INDEX -1 +#define UNASSIGNED_CLUSTER_INDEX -1 + +#endif diff --git a/qemu/include/hw/cpu/icc_bus.h b/qemu/include/hw/cpu/icc_bus.h deleted file mode 100644 index cbb9c014..00000000 --- a/qemu/include/hw/cpu/icc_bus.h +++ /dev/null @@ -1,79 +0,0 @@ -/* icc_bus.h - * emulate x86 ICC (Interrupt Controller Communications) bus - * - * Copyright (c) 2013 Red Hat, Inc - * - * Authors: - * Igor Mammedov - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - */ -#ifndef ICC_BUS_H -#define ICC_BUS_H - -#include "exec/memory.h" -#include "hw/qdev-core.h" - -#define TYPE_ICC_BUS "icc-bus" - -#ifndef CONFIG_USER_ONLY - -/** - * ICCBus: - * - * ICC bus - */ -typedef struct ICCBus { - /*< private >*/ - BusState parent_obj; - /*< public >*/ - - MemoryRegion *apic_address_space; -} ICCBus; - -#define ICC_BUS(uc, obj) OBJECT_CHECK(uc, ICCBus, (obj), TYPE_ICC_BUS) - -/** - * ICCDevice: - * - * ICC device - */ -typedef struct ICCDevice { - /*< private >*/ - DeviceState qdev; - /*< public >*/ -} ICCDevice; - -/** - * ICCDeviceClass: - * @init: Initialization callback for derived classes. - * - * ICC device class - */ -typedef struct ICCDeviceClass { - /*< private >*/ - DeviceClass parent_class; - /*< public >*/ - - DeviceRealize realize; -} ICCDeviceClass; - -#define TYPE_ICC_DEVICE "icc-device" -#define ICC_DEVICE_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, ICCDeviceClass, (klass), TYPE_ICC_DEVICE) - -void icc_bus_register_types(struct uc_struct *uc); - -#endif /* CONFIG_USER_ONLY */ -#endif diff --git a/qemu/include/hw/hw.h b/qemu/include/hw/hw.h deleted file mode 100644 index 54b25b6b..00000000 --- a/qemu/include/hw/hw.h +++ /dev/null @@ -1,41 +0,0 @@ -/* Declarations for use by hardware emulation. */ -#ifndef QEMU_HW_H -#define QEMU_HW_H - -#include "qemu-common.h" - -#if !defined(CONFIG_USER_ONLY) && !defined(NEED_CPU_H) -#include "exec/cpu-common.h" -#endif - -#include "exec/ioport.h" -#include "qemu/log.h" - -#ifdef NEED_CPU_H -#if TARGET_LONG_BITS == 64 -#define qemu_put_betl qemu_put_be64 -#define qemu_get_betl qemu_get_be64 -#define qemu_put_betls qemu_put_be64s -#define qemu_get_betls qemu_get_be64s -#define qemu_put_sbetl qemu_put_sbe64 -#define qemu_get_sbetl qemu_get_sbe64 -#define qemu_put_sbetls qemu_put_sbe64s -#define qemu_get_sbetls qemu_get_sbe64s -#else -#define qemu_put_betl qemu_put_be32 -#define qemu_get_betl qemu_get_be32 -#define qemu_put_betls qemu_put_be32s -#define qemu_get_betls qemu_get_be32s -#define qemu_put_sbetl qemu_put_sbe32 -#define qemu_get_sbetl qemu_get_sbe32 -#define qemu_put_sbetls qemu_put_sbe32s -#define qemu_get_sbetls qemu_get_sbe32s -#endif -#endif - -typedef void QEMUResetHandler(void *opaque); - -void qemu_register_reset(QEMUResetHandler *func, void *opaque); -void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); - -#endif diff --git a/qemu/include/hw/i386/apic.h b/qemu/include/hw/i386/apic.h deleted file mode 100644 index 42b90b94..00000000 --- a/qemu/include/hw/i386/apic.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef APIC_H -#define APIC_H - -#include "qemu-common.h" - -/* apic.c */ -int apic_accept_pic_intr(DeviceState *s); -int apic_get_interrupt(DeviceState *s); -void cpu_set_apic_base(struct uc_struct *uc, DeviceState *s, uint64_t val); -uint64_t cpu_get_apic_base(struct uc_struct *uc, DeviceState *s); -void cpu_set_apic_tpr(struct uc_struct *uc, DeviceState *s, uint8_t val); -uint8_t cpu_get_apic_tpr(struct uc_struct *uc, DeviceState *s); -void apic_init_reset(struct uc_struct *uc, DeviceState *s); -void apic_sipi(DeviceState *s); -void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, - TPRAccess access); -void apic_poll_irq(DeviceState *d); -void apic_designate_bsp(struct uc_struct *uc, DeviceState *d); - -/* pc.c */ -DeviceState *cpu_get_current_apic(struct uc_struct *uc); - -/* cpu.c */ -bool cpu_is_bsp(X86CPU *cpu); - -void apic_register_types(struct uc_struct *uc); -void apic_common_register_types(struct uc_struct *uc); - -#endif diff --git a/qemu/include/hw/i386/apic_internal.h b/qemu/include/hw/i386/apic_internal.h deleted file mode 100644 index b833d147..00000000 --- a/qemu/include/hw/i386/apic_internal.h +++ /dev/null @@ -1,147 +0,0 @@ -/* - * APIC support - internal interfaces - * - * Copyright (c) 2004-2005 Fabrice Bellard - * Copyright (c) 2011 Jan Kiszka, Siemens AG - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - */ -#ifndef QEMU_APIC_INTERNAL_H -#define QEMU_APIC_INTERNAL_H - -#include "exec/memory.h" -#include "hw/cpu/icc_bus.h" -#include "qemu/timer.h" - -/* APIC Local Vector Table */ -#define APIC_LVT_TIMER 0 -#define APIC_LVT_THERMAL 1 -#define APIC_LVT_PERFORM 2 -#define APIC_LVT_LINT0 3 -#define APIC_LVT_LINT1 4 -#define APIC_LVT_ERROR 5 -#define APIC_LVT_NB 6 - -/* APIC delivery modes */ -#define APIC_DM_FIXED 0 -#define APIC_DM_LOWPRI 1 -#define APIC_DM_SMI 2 -#define APIC_DM_NMI 4 -#define APIC_DM_INIT 5 -#define APIC_DM_SIPI 6 -#define APIC_DM_EXTINT 7 - -/* APIC destination mode */ -#define APIC_DESTMODE_FLAT 0xf -#define APIC_DESTMODE_CLUSTER 1 - -#define APIC_TRIGGER_EDGE 0 -#define APIC_TRIGGER_LEVEL 1 - -#define APIC_LVT_TIMER_PERIODIC (1<<17) -#define APIC_LVT_MASKED (1<<16) -#define APIC_LVT_LEVEL_TRIGGER (1<<15) -#define APIC_LVT_REMOTE_IRR (1<<14) -#define APIC_INPUT_POLARITY (1<<13) -#define APIC_SEND_PENDING (1<<12) - -#define ESR_ILLEGAL_ADDRESS (1 << 7) - -#define APIC_SV_DIRECTED_IO (1<<12) -#define APIC_SV_ENABLE (1<<8) - -#define VAPIC_ENABLE_BIT 0 -#define VAPIC_ENABLE_MASK (1 << VAPIC_ENABLE_BIT) - -#define MAX_APICS 255 - -typedef struct APICCommonState APICCommonState; - -#define TYPE_APIC_COMMON "apic-common" -#define APIC_COMMON(uc, obj) \ - OBJECT_CHECK(uc, APICCommonState, (obj), TYPE_APIC_COMMON) -#define APIC_COMMON_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, APICCommonClass, (klass), TYPE_APIC_COMMON) -#define APIC_COMMON_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, APICCommonClass, (obj), TYPE_APIC_COMMON) - -typedef struct APICCommonClass -{ - ICCDeviceClass parent_class; - - DeviceRealize realize; - void (*set_base)(APICCommonState *s, uint64_t val); - void (*set_tpr)(APICCommonState *s, uint8_t val); - uint8_t (*get_tpr)(APICCommonState *s); - void (*enable_tpr_reporting)(APICCommonState *s, bool enable); - void (*vapic_base_update)(APICCommonState *s); - void (*external_nmi)(APICCommonState *s); - void (*pre_save)(APICCommonState *s); - void (*post_load)(APICCommonState *s); - void (*reset)(APICCommonState *s); -} APICCommonClass; - -struct APICCommonState { - ICCDevice busdev; - - MemoryRegion io_memory; - X86CPU *cpu; - uint32_t apicbase; - uint8_t id; - uint8_t version; - uint8_t arb_id; - uint8_t tpr; - uint32_t spurious_vec; - uint8_t log_dest; - uint8_t dest_mode; - uint32_t isr[8]; /* in service register */ - uint32_t tmr[8]; /* trigger mode register */ - uint32_t irr[8]; /* interrupt request register */ - uint32_t lvt[APIC_LVT_NB]; - uint32_t esr; /* error register */ - uint32_t icr[2]; - - uint32_t divide_conf; - int count_shift; - uint32_t initial_count; - int64_t initial_count_load_time; - int64_t next_time; - int idx; - QEMUTimer *timer; - int64_t timer_expiry; - int sipi_vector; - int wait_for_sipi; - - uint32_t vapic_control; - DeviceState *vapic; - hwaddr vapic_paddr; /* note: persistence via kvmvapic */ -}; - -QEMU_PACK( typedef struct VAPICState { - uint8_t tpr; - uint8_t isr; - uint8_t zero; - uint8_t irr; - uint8_t enabled; -}) VAPICState; - -extern bool apic_report_tpr_access; - -bool apic_next_timer(APICCommonState *s, int64_t current_time); -void apic_enable_vapic(struct uc_struct *uc, DeviceState *d, hwaddr paddr); - -void vapic_report_tpr_access(DeviceState *dev, CPUState *cpu, target_ulong ip, - TPRAccess access); - -#endif /* !QEMU_APIC_INTERNAL_H */ diff --git a/qemu/include/hw/i386/pc.h b/qemu/include/hw/i386/pc.h deleted file mode 100644 index c149ed7a..00000000 --- a/qemu/include/hw/i386/pc.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef HW_PC_H -#define HW_PC_H - -#include "hw/boards.h" - -/** - * PCMachineState: - */ -struct PCMachineState { - /*< private >*/ - MachineState parent_obj; - - uint64_t max_ram_below_4g; -}; - -#define PC_MACHINE_MAX_RAM_BELOW_4G "max-ram-below-4g" - -/** - * PCMachineClass: - */ -struct PCMachineClass { - /*< private >*/ - MachineClass parent_class; -}; - -typedef struct PCMachineState PCMachineState; -typedef struct PCMachineClass PCMachineClass; - -#define TYPE_PC_MACHINE "generic-pc-machine" -#define PC_MACHINE(uc, obj) \ - OBJECT_CHECK(uc, PCMachineState, (obj), TYPE_PC_MACHINE) -#define PC_MACHINE_GET_CLASS(obj) \ - OBJECT_GET_CLASS(PCMachineClass, (obj), TYPE_PC_MACHINE) -#define PC_MACHINE_CLASS(klass) \ - OBJECT_CLASS_CHECK(PCMachineClass, (klass), TYPE_PC_MACHINE) - -int pc_cpus_init(struct uc_struct *uc, const char *cpu_model); - -FWCfgState *pc_memory_init(MachineState *machine, - MemoryRegion *system_memory, - ram_addr_t begin, - MemoryRegion **ram_memory); -typedef void (*cpu_set_smm_t)(int smm, void *arg); -void cpu_smm_register(cpu_set_smm_t callback, void *arg); - -void pc_machine_register_types(struct uc_struct *uc); -void x86_cpu_register_types(struct uc_struct *uc); - -#define PC_DEFAULT_MACHINE_OPTIONS \ - .max_cpus = 255 - -#endif diff --git a/qemu/include/hw/i386/topology.h b/qemu/include/hw/i386/topology.h new file mode 100644 index 00000000..07239f95 --- /dev/null +++ b/qemu/include/hw/i386/topology.h @@ -0,0 +1,271 @@ +/* + * x86 CPU topology data structures and functions + * + * Copyright (c) 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HW_I386_TOPOLOGY_H +#define HW_I386_TOPOLOGY_H + +/* This file implements the APIC-ID-based CPU topology enumeration logic, + * documented at the following document: + * Intel® 64 Architecture Processor Topology Enumeration + * http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/ + * + * This code should be compatible with AMD's "Extended Method" described at: + * AMD CPUID Specification (Publication #25481) + * Section 3: Multiple Core Calcuation + * as long as: + * nr_threads is set to 1; + * OFFSET_IDX is assumed to be 0; + * CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width(). + */ + + +#include "qemu/bitops.h" + +/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support + */ +typedef uint32_t apic_id_t; + +typedef struct X86CPUTopoIDs { + unsigned pkg_id; + unsigned node_id; + unsigned die_id; + unsigned core_id; + unsigned smt_id; +} X86CPUTopoIDs; + +typedef struct X86CPUTopoInfo { + unsigned nodes_per_pkg; + unsigned dies_per_pkg; + unsigned cores_per_die; + unsigned threads_per_core; +} X86CPUTopoInfo; + +/* Return the bit width needed for 'count' IDs + */ +static unsigned apicid_bitwidth_for_count(unsigned count) +{ + g_assert(count >= 1); + count -= 1; + return count ? 32 - clz32(count) : 0; +} + +/* Bit width of the SMT_ID (thread ID) field on the APIC ID + */ +static inline unsigned apicid_smt_width(X86CPUTopoInfo *topo_info) +{ + return apicid_bitwidth_for_count(topo_info->threads_per_core); +} + +/* Bit width of the Core_ID field + */ +static inline unsigned apicid_core_width(X86CPUTopoInfo *topo_info) +{ + return apicid_bitwidth_for_count(topo_info->cores_per_die); +} + +/* Bit width of the Die_ID field */ +static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info) +{ + return apicid_bitwidth_for_count(topo_info->dies_per_pkg); +} + +/* Bit width of the node_id field per socket */ +static inline unsigned apicid_node_width_epyc(X86CPUTopoInfo *topo_info) +{ + return apicid_bitwidth_for_count(MAX(topo_info->nodes_per_pkg, 1)); +} +/* Bit offset of the Core_ID field + */ +static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info) +{ + return apicid_smt_width(topo_info); +} + +/* Bit offset of the Die_ID field */ +static inline unsigned apicid_die_offset(X86CPUTopoInfo *topo_info) +{ + return apicid_core_offset(topo_info) + apicid_core_width(topo_info); +} + +/* Bit offset of the Pkg_ID (socket ID) field + */ +static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info) +{ + return apicid_die_offset(topo_info) + apicid_die_width(topo_info); +} + +#define NODE_ID_OFFSET 3 /* Minimum node_id offset if numa configured */ + +/* + * Bit offset of the node_id field + * + * Make sure nodes_per_pkg > 0 if numa configured else zero. + */ +static inline unsigned apicid_node_offset_epyc(X86CPUTopoInfo *topo_info) +{ + unsigned offset = apicid_die_offset(topo_info) + + apicid_die_width(topo_info); + + if (topo_info->nodes_per_pkg) { + return MAX(NODE_ID_OFFSET, offset); + } else { + return offset; + } +} + +/* Bit offset of the Pkg_ID (socket ID) field */ +static inline unsigned apicid_pkg_offset_epyc(X86CPUTopoInfo *topo_info) +{ + return apicid_node_offset_epyc(topo_info) + + apicid_node_width_epyc(topo_info); +} + +/* + * Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID + * + * The caller must make sure core_id < nr_cores and smt_id < nr_threads. + */ +static inline apic_id_t +x86_apicid_from_topo_ids_epyc(X86CPUTopoInfo *topo_info, + const X86CPUTopoIDs *topo_ids) +{ + return (topo_ids->pkg_id << apicid_pkg_offset_epyc(topo_info)) | + (topo_ids->node_id << apicid_node_offset_epyc(topo_info)) | + (topo_ids->die_id << apicid_die_offset(topo_info)) | + (topo_ids->core_id << apicid_core_offset(topo_info)) | + topo_ids->smt_id; +} + +static inline void x86_topo_ids_from_idx_epyc(X86CPUTopoInfo *topo_info, + unsigned cpu_index, + X86CPUTopoIDs *topo_ids) +{ + unsigned nr_nodes = MAX(topo_info->nodes_per_pkg, 1); + unsigned nr_dies = topo_info->dies_per_pkg; + unsigned nr_cores = topo_info->cores_per_die; + unsigned nr_threads = topo_info->threads_per_core; + unsigned cores_per_node = DIV_ROUND_UP((nr_dies * nr_cores * nr_threads), + nr_nodes); + + topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads); + topo_ids->node_id = (cpu_index / cores_per_node) % nr_nodes; + topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies; + topo_ids->core_id = cpu_index / nr_threads % nr_cores; + topo_ids->smt_id = cpu_index % nr_threads; +} + +/* + * Calculate thread/core/package IDs for a specific topology, + * based on APIC ID + */ +static inline void x86_topo_ids_from_apicid_epyc(apic_id_t apicid, + X86CPUTopoInfo *topo_info, + X86CPUTopoIDs *topo_ids) +{ + topo_ids->smt_id = apicid & + ~(0xFFFFFFFFUL << apicid_smt_width(topo_info)); + topo_ids->core_id = + (apicid >> apicid_core_offset(topo_info)) & + ~(0xFFFFFFFFUL << apicid_core_width(topo_info)); + topo_ids->die_id = + (apicid >> apicid_die_offset(topo_info)) & + ~(0xFFFFFFFFUL << apicid_die_width(topo_info)); + topo_ids->node_id = + (apicid >> apicid_node_offset_epyc(topo_info)) & + ~(0xFFFFFFFFUL << apicid_node_width_epyc(topo_info)); + topo_ids->pkg_id = apicid >> apicid_pkg_offset_epyc(topo_info); +} + +/* + * Make APIC ID for the CPU 'cpu_index' + * + * 'cpu_index' is a sequential, contiguous ID for the CPU. + */ +static inline apic_id_t x86_apicid_from_cpu_idx_epyc(X86CPUTopoInfo *topo_info, + unsigned cpu_index) +{ + X86CPUTopoIDs topo_ids; + x86_topo_ids_from_idx_epyc(topo_info, cpu_index, &topo_ids); + return x86_apicid_from_topo_ids_epyc(topo_info, &topo_ids); +} +/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID + * + * The caller must make sure core_id < nr_cores and smt_id < nr_threads. + */ +static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info, + const X86CPUTopoIDs *topo_ids) +{ + return (topo_ids->pkg_id << apicid_pkg_offset(topo_info)) | + (topo_ids->die_id << apicid_die_offset(topo_info)) | + (topo_ids->core_id << apicid_core_offset(topo_info)) | + topo_ids->smt_id; +} + +/* Calculate thread/core/package IDs for a specific topology, + * based on (contiguous) CPU index + */ +static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info, + unsigned cpu_index, + X86CPUTopoIDs *topo_ids) +{ + unsigned nr_dies = topo_info->dies_per_pkg; + unsigned nr_cores = topo_info->cores_per_die; + unsigned nr_threads = topo_info->threads_per_core; + + topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads); + topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies; + topo_ids->core_id = cpu_index / nr_threads % nr_cores; + topo_ids->smt_id = cpu_index % nr_threads; +} + +/* Calculate thread/core/package IDs for a specific topology, + * based on APIC ID + */ +static inline void x86_topo_ids_from_apicid(apic_id_t apicid, + X86CPUTopoInfo *topo_info, + X86CPUTopoIDs *topo_ids) +{ + topo_ids->smt_id = apicid & + ~(0xFFFFFFFFUL << apicid_smt_width(topo_info)); + topo_ids->core_id = + (apicid >> apicid_core_offset(topo_info)) & + ~(0xFFFFFFFFUL << apicid_core_width(topo_info)); + topo_ids->die_id = + (apicid >> apicid_die_offset(topo_info)) & + ~(0xFFFFFFFFUL << apicid_die_width(topo_info)); + topo_ids->pkg_id = apicid >> apicid_pkg_offset(topo_info); +} + +/* Make APIC ID for the CPU 'cpu_index' + * + * 'cpu_index' is a sequential, contiguous ID for the CPU. + */ +static inline apic_id_t x86_apicid_from_cpu_idx(X86CPUTopoInfo *topo_info, + unsigned cpu_index) +{ + X86CPUTopoIDs topo_ids; + x86_topo_ids_from_idx(topo_info, cpu_index, &topo_ids); + return x86_apicid_from_topo_ids(topo_info, &topo_ids); +} + +#endif /* HW_I386_TOPOLOGY_H */ diff --git a/qemu/include/hw/m68k/m68k.h b/qemu/include/hw/m68k/m68k.h deleted file mode 100644 index 893da01e..00000000 --- a/qemu/include/hw/m68k/m68k.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef HW_M68K_H -#define HW_M68K_H - -#include "uc_priv.h" - -void dummy_m68k_machine_init(struct uc_struct *uc); - -void m68k_cpu_register_types(void *opaque); - -#endif diff --git a/qemu/include/hw/mips/cpudevs.h b/qemu/include/hw/mips/cpudevs.h index ce6b4878..291f5928 100644 --- a/qemu/include/hw/mips/cpudevs.h +++ b/qemu/include/hw/mips/cpudevs.h @@ -1,10 +1,21 @@ #ifndef HW_MIPS_CPUDEVS_H #define HW_MIPS_CPUDEVS_H + +#include "target/mips/cpu-qom.h" + /* Definitions for MIPS CPU internal devices. */ -/* mips_addr.c */ +/* addr.c */ uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr); uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr); uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr); +bool mips_um_ksegs_enabled(void); +void mips_um_ksegs_enable(void); + +/* mips_int.c */ +void cpu_mips_irq_init_cpu(MIPSCPU *cpu); + +/* mips_timer.c */ +void cpu_mips_clock_init(MIPSCPU *cpu); #endif diff --git a/qemu/include/hw/mips/mips.h b/qemu/include/hw/mips/mips.h deleted file mode 100644 index 94a57dd4..00000000 --- a/qemu/include/hw/mips/mips.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef HW_MIPS_H -#define HW_MIPS_H - -void mips_machine_init(struct uc_struct *uc); -void mips_cpu_register_types(void *opaque); - -#endif diff --git a/qemu/include/hw/ppc/ppc.h b/qemu/include/hw/ppc/ppc.h new file mode 100644 index 00000000..93e614cf --- /dev/null +++ b/qemu/include/hw/ppc/ppc.h @@ -0,0 +1,115 @@ +#ifndef HW_PPC_H +#define HW_PPC_H + +#include "target/ppc/cpu-qom.h" + +void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level); +PowerPCCPU *ppc_get_vcpu_by_pir(int pir); +int ppc_cpu_pir(PowerPCCPU *cpu); + +/* PowerPC hardware exceptions management helpers */ +typedef void (*clk_setup_cb)(void *opaque, uint32_t freq); +typedef struct clk_setup_t clk_setup_t; +struct clk_setup_t { + clk_setup_cb cb; + void *opaque; +}; +static inline void clk_setup (clk_setup_t *clk, uint32_t freq) +{ + if (clk->cb != NULL) + (*clk->cb)(clk->opaque, freq); +} + +struct ppc_tb_t { + /* Time base management */ + int64_t tb_offset; /* Compensation */ + int64_t atb_offset; /* Compensation */ + int64_t vtb_offset; + uint32_t tb_freq; /* TB frequency */ + /* Decrementer management */ + uint64_t decr_next; /* Tick for next decr interrupt */ + uint32_t decr_freq; /* decrementer frequency */ + QEMUTimer *decr_timer; + /* Hypervisor decrementer management */ + uint64_t hdecr_next; /* Tick for next hdecr interrupt */ + QEMUTimer *hdecr_timer; + int64_t purr_offset; + void *opaque; + uint32_t flags; +}; + +/* PPC Timers flags */ +#define PPC_TIMER_BOOKE (1 << 0) /* Enable Booke support */ +#define PPC_TIMER_E500 (1 << 1) /* Enable e500 support */ +#define PPC_DECR_UNDERFLOW_TRIGGERED (1 << 2) /* Decr interrupt triggered when + * the most significant bit + * changes from 0 to 1. + */ +#define PPC_DECR_ZERO_TRIGGERED (1 << 3) /* Decr interrupt triggered when + * the decrementer reaches zero. + */ +#define PPC_DECR_UNDERFLOW_LEVEL (1 << 4) /* Decr interrupt active when + * the most significant bit is 1. + */ + +uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset); +clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq); +/* Embedded PowerPC DCR management */ +typedef uint32_t (*dcr_read_cb)(void *opaque, int dcrn); +typedef void (*dcr_write_cb)(void *opaque, int dcrn, uint32_t val); +int ppc_dcr_init (CPUPPCState *env, int (*dcr_read_error)(int dcrn), + int (*dcr_write_error)(int dcrn)); +int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque, + dcr_read_cb drc_read, dcr_write_cb dcr_write); +clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, + unsigned int decr_excp); + +/* Embedded PowerPC reset */ +void ppc40x_core_reset(PowerPCCPU *cpu); +void ppc40x_chip_reset(PowerPCCPU *cpu); +void ppc40x_system_reset(PowerPCCPU *cpu); + +#if defined(CONFIG_USER_ONLY) +static inline void ppc40x_irq_init(PowerPCCPU *cpu) {} +static inline void ppc6xx_irq_init(PowerPCCPU *cpu) {} +static inline void ppc970_irq_init(PowerPCCPU *cpu) {} +static inline void ppcPOWER7_irq_init(PowerPCCPU *cpu) {} +static inline void ppcPOWER9_irq_init(PowerPCCPU *cpu) {} +static inline void ppce500_irq_init(PowerPCCPU *cpu) {} +static inline void ppc_irq_reset(PowerPCCPU *cpu) {} +#else +void ppc40x_irq_init(PowerPCCPU *cpu); +void ppce500_irq_init(PowerPCCPU *cpu); +void ppc6xx_irq_init(PowerPCCPU *cpu); +void ppc970_irq_init(PowerPCCPU *cpu); +void ppcPOWER7_irq_init(PowerPCCPU *cpu); +void ppcPOWER9_irq_init(PowerPCCPU *cpu); +void ppc_irq_reset(PowerPCCPU *cpu); +#endif + +/* PPC machines for OpenBIOS */ +enum { + ARCH_PREP = 0, + ARCH_MAC99, + ARCH_HEATHROW, + ARCH_MAC99_U3, +}; + +#define FW_CFG_PPC_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) +#define FW_CFG_PPC_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) +#define FW_CFG_PPC_DEPTH (FW_CFG_ARCH_LOCAL + 0x02) +#define FW_CFG_PPC_TBFREQ (FW_CFG_ARCH_LOCAL + 0x03) +#define FW_CFG_PPC_CLOCKFREQ (FW_CFG_ARCH_LOCAL + 0x04) +#define FW_CFG_PPC_IS_KVM (FW_CFG_ARCH_LOCAL + 0x05) +#define FW_CFG_PPC_KVM_HC (FW_CFG_ARCH_LOCAL + 0x06) +#define FW_CFG_PPC_KVM_PID (FW_CFG_ARCH_LOCAL + 0x07) +#define FW_CFG_PPC_NVRAM_ADDR (FW_CFG_ARCH_LOCAL + 0x08) +#define FW_CFG_PPC_BUSFREQ (FW_CFG_ARCH_LOCAL + 0x09) +#define FW_CFG_PPC_NVRAM_FLAT (FW_CFG_ARCH_LOCAL + 0x0a) +#define FW_CFG_PPC_VIACONFIG (FW_CFG_ARCH_LOCAL + 0x0b) + +#define PPC_SERIAL_MM_BAUDBASE 399193 + +/* ppc_booke.c */ +void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags); +#endif diff --git a/qemu/include/hw/qdev-core.h b/qemu/include/hw/qdev-core.h deleted file mode 100644 index 9c9b1e3c..00000000 --- a/qemu/include/hw/qdev-core.h +++ /dev/null @@ -1,353 +0,0 @@ -#ifndef QDEV_CORE_H -#define QDEV_CORE_H - -#include "qemu/queue.h" -#include "qemu/typedefs.h" -#include "qemu/bitmap.h" -#include "qom/object.h" -#include "qapi/error.h" - -enum { - DEV_NVECTORS_UNSPECIFIED = -1, -}; - -#define TYPE_DEVICE "device" -#define DEVICE(uc, obj) OBJECT_CHECK(uc, DeviceState, (obj), TYPE_DEVICE) -#define DEVICE_CLASS(uc, klass) OBJECT_CLASS_CHECK(uc, DeviceClass, (klass), TYPE_DEVICE) -#define DEVICE_GET_CLASS(uc, obj) OBJECT_GET_CLASS(uc, DeviceClass, (obj), TYPE_DEVICE) - -typedef enum DeviceCategory { - DEVICE_CATEGORY_BRIDGE, - DEVICE_CATEGORY_USB, - DEVICE_CATEGORY_STORAGE, - DEVICE_CATEGORY_NETWORK, - DEVICE_CATEGORY_INPUT, - DEVICE_CATEGORY_DISPLAY, - DEVICE_CATEGORY_SOUND, - DEVICE_CATEGORY_MISC, - DEVICE_CATEGORY_MAX -} DeviceCategory; - -typedef int (*qdev_initfn)(DeviceState *dev); -typedef int (*qdev_event)(DeviceState *dev); -typedef void (*qdev_resetfn)(DeviceState *dev); -typedef int (*DeviceRealize)(struct uc_struct *uc, DeviceState *dev, Error **errp); -typedef void (*DeviceUnrealize)(DeviceState *dev, Error **errp); -typedef void (*BusRealize)(BusState *bus, Error **errp); -typedef void (*BusUnrealize)(BusState *bus, Error **errp); - -struct VMStateDescription; - -/** - * DeviceClass: - * @props: Properties accessing state fields. - * @realize: Callback function invoked when the #DeviceState:realized - * property is changed to %true. The default invokes @init if not %NULL. - * @unrealize: Callback function invoked when the #DeviceState:realized - * property is changed to %false. - * @init: Callback function invoked when the #DeviceState::realized property - * is changed to %true. Deprecated, new types inheriting directly from - * TYPE_DEVICE should use @realize instead, new leaf types should consult - * their respective parent type. - * @hotpluggable: indicates if #DeviceClass is hotpluggable, available - * as readonly "hotpluggable" property of #DeviceState instance - * - * # Realization # - * Devices are constructed in two stages, - * 1) object instantiation via object_initialize() and - * 2) device realization via #DeviceState:realized property. - * The former may not fail (it might assert or exit), the latter may return - * error information to the caller and must be re-entrant. - * Trivial field initializations should go into #TypeInfo.instance_init. - * Operations depending on @props static properties should go into @realize. - * After successful realization, setting static properties will fail. - * - * As an interim step, the #DeviceState:realized property is set by deprecated - * functions qdev_init() and qdev_init_nofail(). - * In the future, devices will propagate this state change to their children - * and along busses they expose. - * The point in time will be deferred to machine creation, so that values - * set in @realize will not be introspectable beforehand. Therefore devices - * must not create children during @realize; they should initialize them via - * object_initialize() in their own #TypeInfo.instance_init and forward the - * realization events appropriately. - * - * The @init callback is considered private to a particular bus implementation - * (immediate abstract child types of TYPE_DEVICE). Derived leaf types set an - * "init" callback on their parent class instead. - * - * Any type may override the @realize and/or @unrealize callbacks but needs - * to call the parent type's implementation if keeping their functionality - * is desired. Refer to QOM documentation for further discussion and examples. - * - * - * - * If a type derived directly from TYPE_DEVICE implements @realize, it does - * not need to implement @init and therefore does not need to store and call - * #DeviceClass' default @realize callback. - * For other types consult the documentation and implementation of the - * respective parent types. - * - * - */ -typedef struct DeviceClass { - /*< private >*/ - ObjectClass parent_class; - /*< public >*/ - - DECLARE_BITMAP(categories, DEVICE_CATEGORY_MAX); - const char *fw_name; - const char *desc; - Property *props; - - /* - * Shall we hide this device model from -device / device_add? - * All devices should support instantiation with device_add, and - * this flag should not exist. But we're not there, yet. Some - * devices fail to instantiate with cryptic error messages. - * Others instantiate, but don't work. Exposing users to such - * behavior would be cruel; this flag serves to protect them. It - * should never be set without a comment explaining why it is set. - * TODO remove once we're there - */ - bool cannot_instantiate_with_device_add_yet; - bool hotpluggable; - - /* callbacks */ - void (*reset)(struct uc_struct *uc, DeviceState *dev); - DeviceRealize realize; - DeviceUnrealize unrealize; - - /* device state */ - const struct VMStateDescription *vmsd; - - /* Private to qdev / bus. */ - qdev_initfn init; /* TODO remove, once users are converted to realize */ - qdev_event exit; /* TODO remove, once users are converted to unrealize */ - const char *bus_type; -} DeviceClass; - -typedef struct NamedGPIOList NamedGPIOList; - -struct NamedGPIOList { - char *name; - int num_in; - int num_out; - QLIST_ENTRY(NamedGPIOList) node; -}; - -/** - * DeviceState: - * @realized: Indicates whether the device has been fully constructed. - * - * This structure should not be accessed directly. We declare it here - * so that it can be embedded in individual device state structures. - */ -struct DeviceState { - /*< private >*/ - Object parent_obj; - /*< public >*/ - - const char *id; - bool realized; - bool pending_deleted_event; - int hotplugged; - BusState *parent_bus; - QLIST_HEAD(, NamedGPIOList) gpios; - QLIST_HEAD(, BusState) child_bus; - int num_child_bus; - int instance_id_alias; - int alias_required_for_version; -}; - -#define TYPE_BUS "bus" -#define BUS(uc, obj) OBJECT_CHECK(uc, BusState, (obj), TYPE_BUS) -#define BUS_CLASS(klass) OBJECT_CLASS_CHECK(BusClass, (klass), TYPE_BUS) -#define BUS_GET_CLASS(obj) OBJECT_GET_CLASS(BusClass, (obj), TYPE_BUS) - -struct BusClass { - ObjectClass parent_class; - - /* FIXME first arg should be BusState */ - char *(*get_dev_path)(DeviceState *dev); - /* - * This callback is used to create Open Firmware device path in accordance - * with OF spec http://forthworks.com/standards/of1275.pdf. Individual bus - * bindings can be found at http://playground.sun.com/1275/bindings/. - */ - char *(*get_fw_dev_path)(DeviceState *dev); - void (*reset)(BusState *bus); - BusRealize realize; - BusUnrealize unrealize; - - /* maximum devices allowed on the bus, 0: no limit. */ - int max_dev; - /* number of automatically allocated bus ids (e.g. ide.0) */ - int automatic_ids; -}; - -typedef struct BusChild { - DeviceState *child; - int index; - QTAILQ_ENTRY(BusChild) sibling; -} BusChild; - -#define QDEV_HOTPLUG_HANDLER_PROPERTY "hotplug-handler" - -/** - * BusState: - * @hotplug_device: link to a hotplug device associated with bus. - */ -struct BusState { - Object obj; - DeviceState *parent; - const char *name; - int max_index; - bool realized; - QTAILQ_HEAD(ChildrenHead, BusChild) children; - QLIST_ENTRY(BusState) sibling; -}; - -struct Property { - const char *name; - PropertyInfo *info; - int offset; - uint8_t bitnr; - uint8_t qtype; - int64_t defval; - int arrayoffset; - PropertyInfo *arrayinfo; - int arrayfieldsize; -}; - -struct PropertyInfo { - const char *name; - const char *description; - const char **enum_table; - int (*print)(DeviceState *dev, Property *prop, char *dest, size_t len); - ObjectPropertyAccessor *get; - ObjectPropertyAccessor *set; - ObjectPropertyRelease *release; -}; - -/** - * GlobalProperty: - * @user_provided: Set to true if property comes from user-provided config - * (command-line or config file). - * @used: Set to true if property was used when initializing a device. - */ -typedef struct GlobalProperty { - const char *driver; - const char *property; - const char *value; - bool user_provided; - bool used; - QTAILQ_ENTRY(GlobalProperty) next; -} GlobalProperty; - -/*** Board API. This should go away once we have a machine config file. ***/ - -DeviceState *qdev_create(BusState *bus, const char *name); -DeviceState *qdev_try_create(BusState *bus, const char *name); -int qdev_init(DeviceState *dev) QEMU_WARN_UNUSED_RESULT; -void qdev_init_nofail(DeviceState *dev); -void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id, - int required_for_version); -void qdev_unplug(DeviceState *dev, Error **errp); -void qdev_machine_creation_done(void); -bool qdev_machine_modified(void); - -BusState *qdev_get_child_bus(DeviceState *dev, const char *name); - -/*** Device API. ***/ - -/* Register device properties. */ -/* GPIO inputs also double as IRQ sinks. */ -void qdev_pass_gpios(DeviceState *dev, DeviceState *container, - const char *name); - -BusState *qdev_get_parent_bus(DeviceState *dev); - -/*** BUS API. ***/ - -DeviceState *qdev_find_recursive(BusState *bus, const char *id); - -/* Returns 0 to walk children, > 0 to skip walk, < 0 to terminate walk. */ -typedef int (qbus_walkerfn)(BusState *bus, void *opaque); -typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque); - -void qbus_create_inplace(void *bus, size_t size, const char *typename_, - DeviceState *parent, const char *name); -BusState *qbus_create(const char *typename_, DeviceState *parent, const char *name); -/* Returns > 0 if either devfn or busfn skip walk somewhere in cursion, - * < 0 if either devfn or busfn terminate walk somewhere in cursion, - * 0 otherwise. */ -int qbus_walk_children(BusState *bus, - qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn, - qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn, - void *opaque); -int qdev_walk_children(DeviceState *dev, - qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn, - qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn, - void *opaque); - -void qdev_reset_all(DeviceState *dev); - -/** - * @qbus_reset_all: - * @bus: Bus to be reset. - * - * Reset @bus and perform a bus-level ("hard") reset of all devices connected - * to it, including recursive processing of all buses below @bus itself. A - * hard reset means that qbus_reset_all will reset all state of the device. - * For PCI devices, for example, this will include the base address registers - * or configuration space. - */ -void qbus_reset_all(BusState *bus); -void qbus_reset_all_fn(void *opaque); - -/* This should go away once we get rid of the NULL bus hack */ -BusState *sysbus_get_default(void); - -char *qdev_get_fw_dev_path(DeviceState *dev); - -/** - * @qdev_machine_init - * - * Initialize platform devices before machine init. This is a hack until full - * support for composition is added. - */ -void qdev_machine_init(void); - -/** - * @device_reset - * - * Reset a single device (by calling the reset method). - */ -void device_reset(DeviceState *dev); - -const struct VMStateDescription *qdev_get_vmsd(DeviceState *dev); - -const char *qdev_fw_name(DeviceState *dev); - -Object *qdev_get_machine(struct uc_struct *); - -/* FIXME: make this a link<> */ -void qdev_set_parent_bus(DeviceState *dev, BusState *bus); - -extern int qdev_hotplug; - -char *qdev_get_dev_path(DeviceState *dev); - -GSList *qdev_build_hotpluggable_device_list(Object *peripheral); - -void qbus_set_hotplug_handler(BusState *bus, DeviceState *handler, - Error **errp); - -void qbus_set_bus_hotplug_handler(BusState *bus, Error **errp); - -void qdev_register_types(struct uc_struct *uc); - -void sysbus_register_types(struct uc_struct *uc); - -#endif diff --git a/qemu/include/hw/qdev.h b/qemu/include/hw/qdev.h deleted file mode 100644 index 85313af0..00000000 --- a/qemu/include/hw/qdev.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef QDEV_H -#define QDEV_H - -#include "hw/hw.h" -#include "hw/qdev-core.h" - -#endif diff --git a/qemu/include/hw/registerfields.h b/qemu/include/hw/registerfields.h new file mode 100644 index 00000000..686aca12 --- /dev/null +++ b/qemu/include/hw/registerfields.h @@ -0,0 +1,99 @@ +/* + * Register Definition API: field macros + * + * Copyright (c) 2016 Xilinx Inc. + * Copyright (c) 2013 Peter Crosthwaite + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef REGISTERFIELDS_H +#define REGISTERFIELDS_H + +#include "qemu/bitops.h" + +/* Define constants for a 32 bit register */ + +/* This macro will define A_FOO, for the byte address of a register + * as well as R_FOO for the uint32_t[] register number (A_FOO / 4). + */ +#define REG32(reg, addr) \ + enum { A_ ## reg = (addr) }; \ + enum { R_ ## reg = (addr) / 4 }; + +#define REG8(reg, addr) \ + enum { A_ ## reg = (addr) }; \ + enum { R_ ## reg = (addr) }; + +#define REG16(reg, addr) \ + enum { A_ ## reg = (addr) }; \ + enum { R_ ## reg = (addr) / 2 }; + +/* Define SHIFT, LENGTH and MASK constants for a field within a register */ + +/* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH + * constants for field BAR in register FOO. + */ +#define FIELD(reg, field, shift, length) \ + enum { R_ ## reg ## _ ## field ## _SHIFT = (shift)}; \ + enum { R_ ## reg ## _ ## field ## _LENGTH = (length)}; \ + enum { R_ ## reg ## _ ## field ## _MASK = \ + MAKE_64BIT_MASK(shift, length)}; + +/* Extract a field from a register */ +#define FIELD_EX8(storage, reg, field) \ + extract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) +#define FIELD_EX16(storage, reg, field) \ + extract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) +#define FIELD_EX32(storage, reg, field) \ + extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) +#define FIELD_EX64(storage, reg, field) \ + extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH) + +/* Extract a field from an array of registers */ +#define ARRAY_FIELD_EX32(regs, reg, field) \ + FIELD_EX32((regs)[R_ ## reg], reg, field) + +/* Deposit a register field. + * Assigning values larger then the target field will result in + * compilation warnings. + */ +#define FIELD_DP8(storage, reg, field, val, d) { \ + struct { \ + unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ + } v = { .v = val }; \ + d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, v.v); \ + } +#define FIELD_DP16(storage, reg, field, val, d) { \ + struct { \ + unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ + } v = { .v = val }; \ + d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, v.v); \ + } +#define FIELD_DP32(storage, reg, field, val, d) { \ + struct { \ + unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ + } v = { .v = val }; \ + d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, v.v); \ + } +#define FIELD_DP64(storage, reg, field, val, d) { \ + struct { \ + unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ + } v = { .v = val }; \ + d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \ + R_ ## reg ## _ ## field ## _LENGTH, v.v); \ + } + +/* Deposit a field to array of registers. */ +#define ARRAY_FIELD_DP32(regs, reg, field, val) \ + (regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val); + +#endif diff --git a/qemu/include/hw/sparc/sparc.h b/qemu/include/hw/sparc/sparc.h deleted file mode 100644 index e4789111..00000000 --- a/qemu/include/hw/sparc/sparc.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef HW_SPARC_H -#define HW_SPARC_H - -void sparc_cpu_register_types(void *opaque); -void leon3_machine_init(struct uc_struct *uc); -void sun4u_machine_init(struct uc_struct *uc); - -#endif diff --git a/qemu/include/libdecnumber/dconfig.h b/qemu/include/libdecnumber/dconfig.h new file mode 100644 index 00000000..0f7dccef --- /dev/null +++ b/qemu/include/libdecnumber/dconfig.h @@ -0,0 +1,39 @@ +/* Configure decNumber for either host or target. + Copyright (C) 2008 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + + +#if defined(HOST_WORDS_BIGENDIAN) +#define WORDS_BIGENDIAN 1 +#else +#define WORDS_BIGENDIAN 0 +#endif + +#ifndef DECDPUN +#define DECDPUN 3 +#endif diff --git a/qemu/include/libdecnumber/decContext.h b/qemu/include/libdecnumber/decContext.h new file mode 100644 index 00000000..cea6e427 --- /dev/null +++ b/qemu/include/libdecnumber/decContext.h @@ -0,0 +1,255 @@ +/* Decimal context header module for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal Context module header */ +/* ------------------------------------------------------------------ */ +/* */ +/* Context variables must always have valid values: */ +/* */ +/* status -- [any bits may be cleared, but not set, by user] */ +/* round -- must be one of the enumerated rounding modes */ +/* */ +/* The following variables are implied for fixed size formats (i.e., */ +/* they are ignored) but should still be set correctly in case used */ +/* with decNumber functions: */ +/* */ +/* clamp -- must be either 0 or 1 */ +/* digits -- must be in the range 1 through 999999999 */ +/* emax -- must be in the range 0 through 999999999 */ +/* emin -- must be in the range 0 through -999999999 */ +/* extended -- must be either 0 or 1 [present only if DECSUBSET] */ +/* traps -- only defined bits may be set */ +/* */ +/* ------------------------------------------------------------------ */ + +#ifndef DECCONTEXT_H +#define DECCONTEXT_H + + #define DECCNAME "decContext" /* Short name */ + #define DECCFULLNAME "Decimal Context Descriptor" /* Verbose name */ + #define DECCAUTHOR "Mike Cowlishaw" /* Who to blame */ + + + /* Extended flags setting -- set this to 0 to use only IEEE flags */ + #define DECEXTFLAG 1 /* 1=enable extended flags */ + + /* Conditional code flag -- set this to 0 for best performance */ + #define DECSUBSET 0 /* 1=enable subset arithmetic */ + + /* Context for operations, with associated constants */ + enum rounding { + DEC_ROUND_CEILING, /* round towards +infinity */ + DEC_ROUND_UP, /* round away from 0 */ + DEC_ROUND_HALF_UP, /* 0.5 rounds up */ + DEC_ROUND_HALF_EVEN, /* 0.5 rounds to nearest even */ + DEC_ROUND_HALF_DOWN, /* 0.5 rounds down */ + DEC_ROUND_DOWN, /* round towards 0 (truncate) */ + DEC_ROUND_FLOOR, /* round towards -infinity */ + DEC_ROUND_05UP, /* round for reround */ + DEC_ROUND_MAX /* enum must be less than this */ + }; + #define DEC_ROUND_DEFAULT DEC_ROUND_HALF_EVEN; + + typedef struct { + int32_t digits; /* working precision */ + int32_t emax; /* maximum positive exponent */ + int32_t emin; /* minimum negative exponent */ + enum rounding round; /* rounding mode */ + uint32_t traps; /* trap-enabler flags */ + uint32_t status; /* status flags */ + uint8_t clamp; /* flag: apply IEEE exponent clamp */ + #if DECSUBSET + uint8_t extended; /* flag: special-values allowed */ + #endif + } decContext; + + /* Maxima and Minima for context settings */ + #define DEC_MAX_DIGITS 999999999 + #define DEC_MIN_DIGITS 1 + #define DEC_MAX_EMAX 999999999 + #define DEC_MIN_EMAX 0 + #define DEC_MAX_EMIN 0 + #define DEC_MIN_EMIN -999999999 + #define DEC_MAX_MATH 999999 /* max emax, etc., for math funcs. */ + + /* Classifications for decimal numbers, aligned with 754r (note */ + /* that 'normal' and 'subnormal' are meaningful only with a */ + /* decContext or a fixed size format). */ + enum decClass { + DEC_CLASS_SNAN, + DEC_CLASS_QNAN, + DEC_CLASS_NEG_INF, + DEC_CLASS_NEG_NORMAL, + DEC_CLASS_NEG_SUBNORMAL, + DEC_CLASS_NEG_ZERO, + DEC_CLASS_POS_ZERO, + DEC_CLASS_POS_SUBNORMAL, + DEC_CLASS_POS_NORMAL, + DEC_CLASS_POS_INF + }; + /* Strings for the decClasses */ + #define DEC_ClassString_SN "sNaN" + #define DEC_ClassString_QN "NaN" + #define DEC_ClassString_NI "-Infinity" + #define DEC_ClassString_NN "-Normal" + #define DEC_ClassString_NS "-Subnormal" + #define DEC_ClassString_NZ "-Zero" + #define DEC_ClassString_PZ "+Zero" + #define DEC_ClassString_PS "+Subnormal" + #define DEC_ClassString_PN "+Normal" + #define DEC_ClassString_PI "+Infinity" + #define DEC_ClassString_UN "Invalid" + + /* Trap-enabler and Status flags (exceptional conditions), and */ + /* their names. The top byte is reserved for internal use */ + #if DECEXTFLAG + /* Extended flags */ + #define DEC_Conversion_syntax 0x00000001 + #define DEC_Division_by_zero 0x00000002 + #define DEC_Division_impossible 0x00000004 + #define DEC_Division_undefined 0x00000008 + #define DEC_Insufficient_storage 0x00000010 /* [when malloc fails] */ + #define DEC_Inexact 0x00000020 + #define DEC_Invalid_context 0x00000040 + #define DEC_Invalid_operation 0x00000080 + #if DECSUBSET + #define DEC_Lost_digits 0x00000100 + #endif + #define DEC_Overflow 0x00000200 + #define DEC_Clamped 0x00000400 + #define DEC_Rounded 0x00000800 + #define DEC_Subnormal 0x00001000 + #define DEC_Underflow 0x00002000 + #else + /* IEEE flags only */ + #define DEC_Conversion_syntax 0x00000010 + #define DEC_Division_by_zero 0x00000002 + #define DEC_Division_impossible 0x00000010 + #define DEC_Division_undefined 0x00000010 + #define DEC_Insufficient_storage 0x00000010 /* [when malloc fails] */ + #define DEC_Inexact 0x00000001 + #define DEC_Invalid_context 0x00000010 + #define DEC_Invalid_operation 0x00000010 + #if DECSUBSET + #define DEC_Lost_digits 0x00000000 + #endif + #define DEC_Overflow 0x00000008 + #define DEC_Clamped 0x00000000 + #define DEC_Rounded 0x00000000 + #define DEC_Subnormal 0x00000000 + #define DEC_Underflow 0x00000004 + #endif + + /* IEEE 854 groupings for the flags */ + /* [DEC_Clamped, DEC_Lost_digits, DEC_Rounded, and DEC_Subnormal */ + /* are not in IEEE 854] */ + #define DEC_IEEE_854_Division_by_zero (DEC_Division_by_zero) + #if DECSUBSET + #define DEC_IEEE_854_Inexact (DEC_Inexact | DEC_Lost_digits) + #else + #define DEC_IEEE_854_Inexact (DEC_Inexact) + #endif + #define DEC_IEEE_854_Invalid_operation (DEC_Conversion_syntax | \ + DEC_Division_impossible | \ + DEC_Division_undefined | \ + DEC_Insufficient_storage | \ + DEC_Invalid_context | \ + DEC_Invalid_operation) + #define DEC_IEEE_854_Overflow (DEC_Overflow) + #define DEC_IEEE_854_Underflow (DEC_Underflow) + + /* flags which are normally errors (result is qNaN, infinite, or 0) */ + #define DEC_Errors (DEC_IEEE_854_Division_by_zero | \ + DEC_IEEE_854_Invalid_operation | \ + DEC_IEEE_854_Overflow | DEC_IEEE_854_Underflow) + /* flags which cause a result to become qNaN */ + #define DEC_NaNs DEC_IEEE_854_Invalid_operation + + /* flags which are normally for information only (finite results) */ + #if DECSUBSET + #define DEC_Information (DEC_Clamped | DEC_Rounded | DEC_Inexact \ + | DEC_Lost_digits) + #else + #define DEC_Information (DEC_Clamped | DEC_Rounded | DEC_Inexact) + #endif + + /* Name strings for the exceptional conditions */ + #define DEC_Condition_CS "Conversion syntax" + #define DEC_Condition_DZ "Division by zero" + #define DEC_Condition_DI "Division impossible" + #define DEC_Condition_DU "Division undefined" + #define DEC_Condition_IE "Inexact" + #define DEC_Condition_IS "Insufficient storage" + #define DEC_Condition_IC "Invalid context" + #define DEC_Condition_IO "Invalid operation" + #if DECSUBSET + #define DEC_Condition_LD "Lost digits" + #endif + #define DEC_Condition_OV "Overflow" + #define DEC_Condition_PA "Clamped" + #define DEC_Condition_RO "Rounded" + #define DEC_Condition_SU "Subnormal" + #define DEC_Condition_UN "Underflow" + #define DEC_Condition_ZE "No status" + #define DEC_Condition_MU "Multiple status" + #define DEC_Condition_Length 21 /* length of the longest string, */ + /* including terminator */ + + /* Initialization descriptors, used by decContextDefault */ + #define DEC_INIT_BASE 0 + #define DEC_INIT_DECIMAL32 32 + #define DEC_INIT_DECIMAL64 64 + #define DEC_INIT_DECIMAL128 128 + /* Synonyms */ + #define DEC_INIT_DECSINGLE DEC_INIT_DECIMAL32 + #define DEC_INIT_DECDOUBLE DEC_INIT_DECIMAL64 + #define DEC_INIT_DECQUAD DEC_INIT_DECIMAL128 + + /* decContext routines */ + + + extern decContext * decContextClearStatus(decContext *, uint32_t); + extern decContext * decContextDefault(decContext *, int32_t); + extern enum rounding decContextGetRounding(decContext *); + extern uint32_t decContextGetStatus(decContext *); + extern decContext * decContextRestoreStatus(decContext *, uint32_t, uint32_t); + extern uint32_t decContextSaveStatus(decContext *, uint32_t); + extern decContext * decContextSetRounding(decContext *, enum rounding); + extern decContext * decContextSetStatus(decContext *, uint32_t); + extern decContext * decContextSetStatusFromString(decContext *, const char *); + extern decContext * decContextSetStatusFromStringQuiet(decContext *, const char *); + extern decContext * decContextSetStatusQuiet(decContext *, uint32_t); + extern const char * decContextStatusToString(const decContext *); + extern uint32_t decContextTestSavedStatus(uint32_t, uint32_t); + extern uint32_t decContextTestStatus(decContext *, uint32_t); + extern decContext * decContextZeroStatus(decContext *); + +#endif diff --git a/qemu/include/libdecnumber/decDPD.h b/qemu/include/libdecnumber/decDPD.h new file mode 100644 index 00000000..26a21ec8 --- /dev/null +++ b/qemu/include/libdecnumber/decDPD.h @@ -0,0 +1,1214 @@ +/* Conversion lookup tables for the decNumber C Library. + Copyright (C) 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------------ */ +/* Binary Coded Decimal and Densely Packed Decimal conversion lookup tables */ +/* [Automatically generated -- do not edit. 2007.05.05] */ +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ +/* For details, see: http://www2.hursley.ibm.com/decimal/DPDecimal.html */ + + +/* This include file defines several DPD and BCD conversion tables: */ +/* */ +/* uint16_t BCD2DPD[2458]; -- BCD -> DPD (0x999 => 2457) */ +/* uint16_t BIN2DPD[1000]; -- Bin -> DPD (999 => 2457) */ +/* uint8_t BIN2CHAR[4001]; -- Bin -> CHAR (999 => '\3' '9' '9' '9') */ +/* uint8_t BIN2BCD8[4000]; -- Bin -> bytes (999 => 9 9 9 3) */ +/* uint16_t DPD2BCD[1024]; -- DPD -> BCD (0x3FF => 0x999) */ +/* uint16_t DPD2BIN[1024]; -- DPD -> BIN (0x3FF => 999) */ +/* uint32_t DPD2BINK[1024]; -- DPD -> BIN * 1000 (0x3FF => 999000) */ +/* uint32_t DPD2BINM[1024]; -- DPD -> BIN * 1E+6 (0x3FF => 999000000) */ +/* uint8_t DPD2BCD8[4096]; -- DPD -> bytes (x3FF => 9 9 9 3) */ +/* */ +/* In all cases the result (10 bits or 12 bits, or binary) is right-aligned */ +/* in the table entry. BIN2CHAR entries are a single byte length (0 for */ +/* value 0) followed by three digit characters; a trailing terminator is */ +/* included to allow 4-char moves always. BIN2BCD8 and DPD2BCD8 entries */ +/* are similar with the three BCD8 digits followed by a one-byte length */ +/* (again, length=0 for value 0). */ +/* */ +/* To use a table, its name, prefixed with DEC_, must be defined with a */ +/* value of 1 before this header file is included. For example: */ +/* #define DEC_BCD2DPD 1 */ +/* This mechanism allows software to only include tables that are needed. */ +/* ------------------------------------------------------------------------ */ + +#if defined(DEC_BCD2DPD) && DEC_BCD2DPD==1 && !defined(DECBCD2DPD) +#define DECBCD2DPD + +const uint16_t BCD2DPD[2458]={ 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 0, 0, 0, 0, 0, 0, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 0, 0, 0, 0, 0, 0, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 0, 0, 0, 0, 0, + 0, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 0, 0, + 0, 0, 0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 0, 0, 0, 0, 0, 0, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 0, 0, 0, 0, 0, 0, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 0, 0, 0, 0, 0, 0, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 0, 0, 0, + 0, 0, 0, 10, 11, 42, 43, 74, 75, 106, 107, 78, 79, + 0, 0, 0, 0, 0, 0, 26, 27, 58, 59, 90, 91, 122, + 123, 94, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 0, 0, + 0, 0, 0, 0, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 0, 0, 0, 0, 0, 0, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 0, 0, 0, 0, 0, 0, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 0, 0, 0, 0, 0, 0, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 0, 0, 0, + 0, 0, 0, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 0, 0, 0, 0, 0, 0, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 0, 0, 0, 0, 0, 0, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 0, 0, 0, 0, 0, 0, 138, + 139, 170, 171, 202, 203, 234, 235, 206, 207, 0, 0, 0, 0, + 0, 0, 154, 155, 186, 187, 218, 219, 250, 251, 222, 223, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 0, 0, 0, 0, 0, 0, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, + 0, 0, 0, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, + 0, 0, 0, 0, 0, 0, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 0, 0, 0, 0, 0, 0, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 0, 0, 0, 0, 0, 0, 336, + 337, 338, 339, 340, 341, 342, 343, 344, 345, 0, 0, 0, 0, + 0, 0, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 0, + 0, 0, 0, 0, 0, 368, 369, 370, 371, 372, 373, 374, 375, + 376, 377, 0, 0, 0, 0, 0, 0, 266, 267, 298, 299, 330, + 331, 362, 363, 334, 335, 0, 0, 0, 0, 0, 0, 282, 283, + 314, 315, 346, 347, 378, 379, 350, 351, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 384, 385, 386, 387, 388, 389, 390, + 391, 392, 393, 0, 0, 0, 0, 0, 0, 400, 401, 402, 403, + 404, 405, 406, 407, 408, 409, 0, 0, 0, 0, 0, 0, 416, + 417, 418, 419, 420, 421, 422, 423, 424, 425, 0, 0, 0, 0, + 0, 0, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 0, + 0, 0, 0, 0, 0, 448, 449, 450, 451, 452, 453, 454, 455, + 456, 457, 0, 0, 0, 0, 0, 0, 464, 465, 466, 467, 468, + 469, 470, 471, 472, 473, 0, 0, 0, 0, 0, 0, 480, 481, + 482, 483, 484, 485, 486, 487, 488, 489, 0, 0, 0, 0, 0, + 0, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 0, 0, + 0, 0, 0, 0, 394, 395, 426, 427, 458, 459, 490, 491, 462, + 463, 0, 0, 0, 0, 0, 0, 410, 411, 442, 443, 474, 475, + 506, 507, 478, 479, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 0, + 0, 0, 0, 0, 0, 528, 529, 530, 531, 532, 533, 534, 535, + 536, 537, 0, 0, 0, 0, 0, 0, 544, 545, 546, 547, 548, + 549, 550, 551, 552, 553, 0, 0, 0, 0, 0, 0, 560, 561, + 562, 563, 564, 565, 566, 567, 568, 569, 0, 0, 0, 0, 0, + 0, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 0, 0, + 0, 0, 0, 0, 592, 593, 594, 595, 596, 597, 598, 599, 600, + 601, 0, 0, 0, 0, 0, 0, 608, 609, 610, 611, 612, 613, + 614, 615, 616, 617, 0, 0, 0, 0, 0, 0, 624, 625, 626, + 627, 628, 629, 630, 631, 632, 633, 0, 0, 0, 0, 0, 0, + 522, 523, 554, 555, 586, 587, 618, 619, 590, 591, 0, 0, 0, + 0, 0, 0, 538, 539, 570, 571, 602, 603, 634, 635, 606, 607, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 640, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 0, 0, 0, 0, 0, + 0, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 0, 0, + 0, 0, 0, 0, 672, 673, 674, 675, 676, 677, 678, 679, 680, + 681, 0, 0, 0, 0, 0, 0, 688, 689, 690, 691, 692, 693, + 694, 695, 696, 697, 0, 0, 0, 0, 0, 0, 704, 705, 706, + 707, 708, 709, 710, 711, 712, 713, 0, 0, 0, 0, 0, 0, + 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 0, 0, 0, + 0, 0, 0, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, + 0, 0, 0, 0, 0, 0, 752, 753, 754, 755, 756, 757, 758, + 759, 760, 761, 0, 0, 0, 0, 0, 0, 650, 651, 682, 683, + 714, 715, 746, 747, 718, 719, 0, 0, 0, 0, 0, 0, 666, + 667, 698, 699, 730, 731, 762, 763, 734, 735, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 768, 769, 770, 771, 772, 773, + 774, 775, 776, 777, 0, 0, 0, 0, 0, 0, 784, 785, 786, + 787, 788, 789, 790, 791, 792, 793, 0, 0, 0, 0, 0, 0, + 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 0, 0, 0, + 0, 0, 0, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, + 0, 0, 0, 0, 0, 0, 832, 833, 834, 835, 836, 837, 838, + 839, 840, 841, 0, 0, 0, 0, 0, 0, 848, 849, 850, 851, + 852, 853, 854, 855, 856, 857, 0, 0, 0, 0, 0, 0, 864, + 865, 866, 867, 868, 869, 870, 871, 872, 873, 0, 0, 0, 0, + 0, 0, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 0, + 0, 0, 0, 0, 0, 778, 779, 810, 811, 842, 843, 874, 875, + 846, 847, 0, 0, 0, 0, 0, 0, 794, 795, 826, 827, 858, + 859, 890, 891, 862, 863, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, + 0, 0, 0, 0, 0, 0, 912, 913, 914, 915, 916, 917, 918, + 919, 920, 921, 0, 0, 0, 0, 0, 0, 928, 929, 930, 931, + 932, 933, 934, 935, 936, 937, 0, 0, 0, 0, 0, 0, 944, + 945, 946, 947, 948, 949, 950, 951, 952, 953, 0, 0, 0, 0, + 0, 0, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 0, + 0, 0, 0, 0, 0, 976, 977, 978, 979, 980, 981, 982, 983, + 984, 985, 0, 0, 0, 0, 0, 0, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 0, 0, 0, 0, 0, 0, 1008, 1009, + 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 0, 0, 0, 0, 0, + 0, 906, 907, 938, 939, 970, 971, 1002, 1003, 974, 975, 0, 0, + 0, 0, 0, 0, 922, 923, 954, 955, 986, 987, 1018, 1019, 990, + 991, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, + 13, 268, 269, 524, 525, 780, 781, 46, 47, 0, 0, 0, 0, + 0, 0, 28, 29, 284, 285, 540, 541, 796, 797, 62, 63, 0, + 0, 0, 0, 0, 0, 44, 45, 300, 301, 556, 557, 812, 813, + 302, 303, 0, 0, 0, 0, 0, 0, 60, 61, 316, 317, 572, + 573, 828, 829, 318, 319, 0, 0, 0, 0, 0, 0, 76, 77, + 332, 333, 588, 589, 844, 845, 558, 559, 0, 0, 0, 0, 0, + 0, 92, 93, 348, 349, 604, 605, 860, 861, 574, 575, 0, 0, + 0, 0, 0, 0, 108, 109, 364, 365, 620, 621, 876, 877, 814, + 815, 0, 0, 0, 0, 0, 0, 124, 125, 380, 381, 636, 637, + 892, 893, 830, 831, 0, 0, 0, 0, 0, 0, 14, 15, 270, + 271, 526, 527, 782, 783, 110, 111, 0, 0, 0, 0, 0, 0, + 30, 31, 286, 287, 542, 543, 798, 799, 126, 127, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 140, 141, 396, 397, 652, + 653, 908, 909, 174, 175, 0, 0, 0, 0, 0, 0, 156, 157, + 412, 413, 668, 669, 924, 925, 190, 191, 0, 0, 0, 0, 0, + 0, 172, 173, 428, 429, 684, 685, 940, 941, 430, 431, 0, 0, + 0, 0, 0, 0, 188, 189, 444, 445, 700, 701, 956, 957, 446, + 447, 0, 0, 0, 0, 0, 0, 204, 205, 460, 461, 716, 717, + 972, 973, 686, 687, 0, 0, 0, 0, 0, 0, 220, 221, 476, + 477, 732, 733, 988, 989, 702, 703, 0, 0, 0, 0, 0, 0, + 236, 237, 492, 493, 748, 749, 1004, 1005, 942, 943, 0, 0, 0, + 0, 0, 0, 252, 253, 508, 509, 764, 765, 1020, 1021, 958, 959, + 0, 0, 0, 0, 0, 0, 142, 143, 398, 399, 654, 655, 910, + 911, 238, 239, 0, 0, 0, 0, 0, 0, 158, 159, 414, 415, + 670, 671, 926, 927, 254, 255}; +#endif + +#if defined(DEC_DPD2BCD) && DEC_DPD2BCD==1 && !defined(DECDPD2BCD) +#define DECDPD2BCD + +const uint16_t DPD2BCD[1024]={ 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 128, 129, 2048, 2049, 2176, 2177, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 144, 145, 2064, 2065, 2192, 2193, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 130, 131, 2080, 2081, 2056, + 2057, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 146, 147, + 2096, 2097, 2072, 2073, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 132, 133, 2112, 2113, 136, 137, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 148, 149, 2128, 2129, 152, 153, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 134, 135, 2144, 2145, 2184, 2185, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 150, 151, 2160, + 2161, 2200, 2201, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 384, 385, 2304, 2305, 2432, 2433, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 400, 401, 2320, 2321, 2448, 2449, 288, 289, 290, 291, + 292, 293, 294, 295, 296, 297, 386, 387, 2336, 2337, 2312, 2313, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 402, 403, 2352, 2353, + 2328, 2329, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 388, + 389, 2368, 2369, 392, 393, 336, 337, 338, 339, 340, 341, 342, 343, + 344, 345, 404, 405, 2384, 2385, 408, 409, 352, 353, 354, 355, 356, + 357, 358, 359, 360, 361, 390, 391, 2400, 2401, 2440, 2441, 368, 369, + 370, 371, 372, 373, 374, 375, 376, 377, 406, 407, 2416, 2417, 2456, + 2457, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 640, 641, + 2050, 2051, 2178, 2179, 528, 529, 530, 531, 532, 533, 534, 535, 536, + 537, 656, 657, 2066, 2067, 2194, 2195, 544, 545, 546, 547, 548, 549, + 550, 551, 552, 553, 642, 643, 2082, 2083, 2088, 2089, 560, 561, 562, + 563, 564, 565, 566, 567, 568, 569, 658, 659, 2098, 2099, 2104, 2105, + 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 644, 645, 2114, + 2115, 648, 649, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, + 660, 661, 2130, 2131, 664, 665, 608, 609, 610, 611, 612, 613, 614, + 615, 616, 617, 646, 647, 2146, 2147, 2184, 2185, 624, 625, 626, 627, + 628, 629, 630, 631, 632, 633, 662, 663, 2162, 2163, 2200, 2201, 768, + 769, 770, 771, 772, 773, 774, 775, 776, 777, 896, 897, 2306, 2307, + 2434, 2435, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 912, + 913, 2322, 2323, 2450, 2451, 800, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 898, 899, 2338, 2339, 2344, 2345, 816, 817, 818, 819, 820, + 821, 822, 823, 824, 825, 914, 915, 2354, 2355, 2360, 2361, 832, 833, + 834, 835, 836, 837, 838, 839, 840, 841, 900, 901, 2370, 2371, 904, + 905, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 916, 917, + 2386, 2387, 920, 921, 864, 865, 866, 867, 868, 869, 870, 871, 872, + 873, 902, 903, 2402, 2403, 2440, 2441, 880, 881, 882, 883, 884, 885, + 886, 887, 888, 889, 918, 919, 2418, 2419, 2456, 2457, 1024, 1025, 1026, + 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1152, 1153, 2052, 2053, 2180, 2181, + 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1168, 1169, 2068, + 2069, 2196, 2197, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, + 1154, 1155, 2084, 2085, 2120, 2121, 1072, 1073, 1074, 1075, 1076, 1077, 1078, + 1079, 1080, 1081, 1170, 1171, 2100, 2101, 2136, 2137, 1088, 1089, 1090, 1091, + 1092, 1093, 1094, 1095, 1096, 1097, 1156, 1157, 2116, 2117, 1160, 1161, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1172, 1173, 2132, 2133, + 1176, 1177, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1158, + 1159, 2148, 2149, 2184, 2185, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, + 1144, 1145, 1174, 1175, 2164, 2165, 2200, 2201, 1280, 1281, 1282, 1283, 1284, + 1285, 1286, 1287, 1288, 1289, 1408, 1409, 2308, 2309, 2436, 2437, 1296, 1297, + 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1424, 1425, 2324, 2325, 2452, + 2453, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1410, 1411, + 2340, 2341, 2376, 2377, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1426, 1427, 2356, 2357, 2392, 2393, 1344, 1345, 1346, 1347, 1348, 1349, + 1350, 1351, 1352, 1353, 1412, 1413, 2372, 2373, 1416, 1417, 1360, 1361, 1362, + 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1428, 1429, 2388, 2389, 1432, 1433, + 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1414, 1415, 2404, + 2405, 2440, 2441, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, + 1430, 1431, 2420, 2421, 2456, 2457, 1536, 1537, 1538, 1539, 1540, 1541, 1542, + 1543, 1544, 1545, 1664, 1665, 2054, 2055, 2182, 2183, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1680, 1681, 2070, 2071, 2198, 2199, 1568, + 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1666, 1667, 2086, 2087, + 2152, 2153, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1682, + 1683, 2102, 2103, 2168, 2169, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, + 1608, 1609, 1668, 1669, 2118, 2119, 1672, 1673, 1616, 1617, 1618, 1619, 1620, + 1621, 1622, 1623, 1624, 1625, 1684, 1685, 2134, 2135, 1688, 1689, 1632, 1633, + 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1670, 1671, 2150, 2151, 2184, + 2185, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1686, 1687, + 2166, 2167, 2200, 2201, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, + 1801, 1920, 1921, 2310, 2311, 2438, 2439, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1815, 1816, 1817, 1936, 1937, 2326, 2327, 2454, 2455, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1922, 1923, 2342, 2343, 2408, 2409, + 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1938, 1939, 2358, + 2359, 2424, 2425, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1924, 1925, 2374, 2375, 1928, 1929, 1872, 1873, 1874, 1875, 1876, 1877, 1878, + 1879, 1880, 1881, 1940, 1941, 2390, 2391, 1944, 1945, 1888, 1889, 1890, 1891, + 1892, 1893, 1894, 1895, 1896, 1897, 1926, 1927, 2406, 2407, 2440, 2441, 1904, + 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1942, 1943, 2422, 2423, + 2456, 2457}; +#endif + +#if defined(DEC_BIN2DPD) && DEC_BIN2DPD==1 && !defined(DECBIN2DPD) +#define DECBIN2DPD + +const uint16_t BIN2DPD[1000]={ 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 10, 11, 42, 43, 74, 75, + 106, 107, 78, 79, 26, 27, 58, 59, 90, 91, 122, 123, 94, + 95, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 138, 139, 170, 171, 202, 203, 234, 235, 206, 207, + 154, 155, 186, 187, 218, 219, 250, 251, 222, 223, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 297, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 320, 321, + 322, 323, 324, 325, 326, 327, 328, 329, 336, 337, 338, 339, 340, + 341, 342, 343, 344, 345, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 266, + 267, 298, 299, 330, 331, 362, 363, 334, 335, 282, 283, 314, 315, + 346, 347, 378, 379, 350, 351, 384, 385, 386, 387, 388, 389, 390, + 391, 392, 393, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, + 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 448, 449, 450, 451, 452, 453, + 454, 455, 456, 457, 464, 465, 466, 467, 468, 469, 470, 471, 472, + 473, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 496, 497, + 498, 499, 500, 501, 502, 503, 504, 505, 394, 395, 426, 427, 458, + 459, 490, 491, 462, 463, 410, 411, 442, 443, 474, 475, 506, 507, + 478, 479, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 528, + 529, 530, 531, 532, 533, 534, 535, 536, 537, 544, 545, 546, 547, + 548, 549, 550, 551, 552, 553, 560, 561, 562, 563, 564, 565, 566, + 567, 568, 569, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, + 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 608, 609, 610, + 611, 612, 613, 614, 615, 616, 617, 624, 625, 626, 627, 628, 629, + 630, 631, 632, 633, 522, 523, 554, 555, 586, 587, 618, 619, 590, + 591, 538, 539, 570, 571, 602, 603, 634, 635, 606, 607, 640, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 656, 657, 658, 659, 660, + 661, 662, 663, 664, 665, 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 704, + 705, 706, 707, 708, 709, 710, 711, 712, 713, 720, 721, 722, 723, + 724, 725, 726, 727, 728, 729, 736, 737, 738, 739, 740, 741, 742, + 743, 744, 745, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, + 650, 651, 682, 683, 714, 715, 746, 747, 718, 719, 666, 667, 698, + 699, 730, 731, 762, 763, 734, 735, 768, 769, 770, 771, 772, 773, + 774, 775, 776, 777, 784, 785, 786, 787, 788, 789, 790, 791, 792, + 793, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 816, 817, + 818, 819, 820, 821, 822, 823, 824, 825, 832, 833, 834, 835, 836, + 837, 838, 839, 840, 841, 848, 849, 850, 851, 852, 853, 854, 855, + 856, 857, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 880, + 881, 882, 883, 884, 885, 886, 887, 888, 889, 778, 779, 810, 811, + 842, 843, 874, 875, 846, 847, 794, 795, 826, 827, 858, 859, 890, + 891, 862, 863, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, + 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 928, 929, 930, + 931, 932, 933, 934, 935, 936, 937, 944, 945, 946, 947, 948, 949, + 950, 951, 952, 953, 960, 961, 962, 963, 964, 965, 966, 967, 968, + 969, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, 1008, 1009, 1010, 1011, 1012, + 1013, 1014, 1015, 1016, 1017, 906, 907, 938, 939, 970, 971, 1002, 1003, + 974, 975, 922, 923, 954, 955, 986, 987, 1018, 1019, 990, 991, 12, + 13, 268, 269, 524, 525, 780, 781, 46, 47, 28, 29, 284, 285, + 540, 541, 796, 797, 62, 63, 44, 45, 300, 301, 556, 557, 812, + 813, 302, 303, 60, 61, 316, 317, 572, 573, 828, 829, 318, 319, + 76, 77, 332, 333, 588, 589, 844, 845, 558, 559, 92, 93, 348, + 349, 604, 605, 860, 861, 574, 575, 108, 109, 364, 365, 620, 621, + 876, 877, 814, 815, 124, 125, 380, 381, 636, 637, 892, 893, 830, + 831, 14, 15, 270, 271, 526, 527, 782, 783, 110, 111, 30, 31, + 286, 287, 542, 543, 798, 799, 126, 127, 140, 141, 396, 397, 652, + 653, 908, 909, 174, 175, 156, 157, 412, 413, 668, 669, 924, 925, + 190, 191, 172, 173, 428, 429, 684, 685, 940, 941, 430, 431, 188, + 189, 444, 445, 700, 701, 956, 957, 446, 447, 204, 205, 460, 461, + 716, 717, 972, 973, 686, 687, 220, 221, 476, 477, 732, 733, 988, + 989, 702, 703, 236, 237, 492, 493, 748, 749, 1004, 1005, 942, 943, + 252, 253, 508, 509, 764, 765, 1020, 1021, 958, 959, 142, 143, 398, + 399, 654, 655, 910, 911, 238, 239, 158, 159, 414, 415, 670, 671, + 926, 927, 254, 255}; +#endif + +#if defined(DEC_DPD2BIN) && DEC_DPD2BIN==1 && !defined(DECDPD2BIN) +#define DECDPD2BIN + +const uint16_t DPD2BIN[1024]={ 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 80, 81, 800, 801, 880, 881, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 90, 91, 810, 811, 890, 891, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 82, 83, 820, 821, 808, + 809, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 92, 93, + 830, 831, 818, 819, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 84, 85, 840, 841, 88, 89, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 94, 95, 850, 851, 98, 99, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 86, 87, 860, 861, 888, 889, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 96, 97, 870, + 871, 898, 899, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 180, 181, 900, 901, 980, 981, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 190, 191, 910, 911, 990, 991, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 182, 183, 920, 921, 908, 909, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 192, 193, 930, 931, + 918, 919, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 184, + 185, 940, 941, 188, 189, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 194, 195, 950, 951, 198, 199, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 186, 187, 960, 961, 988, 989, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 196, 197, 970, 971, 998, + 999, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 280, 281, + 802, 803, 882, 883, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 290, 291, 812, 813, 892, 893, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 282, 283, 822, 823, 828, 829, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 292, 293, 832, 833, 838, 839, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 284, 285, 842, + 843, 288, 289, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 294, 295, 852, 853, 298, 299, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 286, 287, 862, 863, 888, 889, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 296, 297, 872, 873, 898, 899, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 380, 381, 902, 903, + 982, 983, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 390, + 391, 912, 913, 992, 993, 320, 321, 322, 323, 324, 325, 326, 327, + 328, 329, 382, 383, 922, 923, 928, 929, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 392, 393, 932, 933, 938, 939, 340, 341, + 342, 343, 344, 345, 346, 347, 348, 349, 384, 385, 942, 943, 388, + 389, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 394, 395, + 952, 953, 398, 399, 360, 361, 362, 363, 364, 365, 366, 367, 368, + 369, 386, 387, 962, 963, 988, 989, 370, 371, 372, 373, 374, 375, + 376, 377, 378, 379, 396, 397, 972, 973, 998, 999, 400, 401, 402, + 403, 404, 405, 406, 407, 408, 409, 480, 481, 804, 805, 884, 885, + 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 490, 491, 814, + 815, 894, 895, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, + 482, 483, 824, 825, 848, 849, 430, 431, 432, 433, 434, 435, 436, + 437, 438, 439, 492, 493, 834, 835, 858, 859, 440, 441, 442, 443, + 444, 445, 446, 447, 448, 449, 484, 485, 844, 845, 488, 489, 450, + 451, 452, 453, 454, 455, 456, 457, 458, 459, 494, 495, 854, 855, + 498, 499, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 486, + 487, 864, 865, 888, 889, 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 496, 497, 874, 875, 898, 899, 500, 501, 502, 503, 504, + 505, 506, 507, 508, 509, 580, 581, 904, 905, 984, 985, 510, 511, + 512, 513, 514, 515, 516, 517, 518, 519, 590, 591, 914, 915, 994, + 995, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 582, 583, + 924, 925, 948, 949, 530, 531, 532, 533, 534, 535, 536, 537, 538, + 539, 592, 593, 934, 935, 958, 959, 540, 541, 542, 543, 544, 545, + 546, 547, 548, 549, 584, 585, 944, 945, 588, 589, 550, 551, 552, + 553, 554, 555, 556, 557, 558, 559, 594, 595, 954, 955, 598, 599, + 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 586, 587, 964, + 965, 988, 989, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, + 596, 597, 974, 975, 998, 999, 600, 601, 602, 603, 604, 605, 606, + 607, 608, 609, 680, 681, 806, 807, 886, 887, 610, 611, 612, 613, + 614, 615, 616, 617, 618, 619, 690, 691, 816, 817, 896, 897, 620, + 621, 622, 623, 624, 625, 626, 627, 628, 629, 682, 683, 826, 827, + 868, 869, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 692, + 693, 836, 837, 878, 879, 640, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 684, 685, 846, 847, 688, 689, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 694, 695, 856, 857, 698, 699, 660, 661, + 662, 663, 664, 665, 666, 667, 668, 669, 686, 687, 866, 867, 888, + 889, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 696, 697, + 876, 877, 898, 899, 700, 701, 702, 703, 704, 705, 706, 707, 708, + 709, 780, 781, 906, 907, 986, 987, 710, 711, 712, 713, 714, 715, + 716, 717, 718, 719, 790, 791, 916, 917, 996, 997, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 782, 783, 926, 927, 968, 969, + 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 792, 793, 936, + 937, 978, 979, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, + 784, 785, 946, 947, 788, 789, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 794, 795, 956, 957, 798, 799, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 769, 786, 787, 966, 967, 988, 989, 770, + 771, 772, 773, 774, 775, 776, 777, 778, 779, 796, 797, 976, 977, + 998, 999}; +#endif + +#if defined(DEC_DPD2BINK) && DEC_DPD2BINK==1 && !defined(DECDPD2BINK) +#define DECDPD2BINK + +const uint32_t DPD2BINK[1024]={ 0, 1000, 2000, 3000, 4000, 5000, + 6000, 7000, 8000, 9000, 80000, 81000, 800000, 801000, 880000, 881000, + 10000, 11000, 12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000, + 90000, 91000, 810000, 811000, 890000, 891000, 20000, 21000, 22000, 23000, + 24000, 25000, 26000, 27000, 28000, 29000, 82000, 83000, 820000, 821000, + 808000, 809000, 30000, 31000, 32000, 33000, 34000, 35000, 36000, 37000, + 38000, 39000, 92000, 93000, 830000, 831000, 818000, 819000, 40000, 41000, + 42000, 43000, 44000, 45000, 46000, 47000, 48000, 49000, 84000, 85000, + 840000, 841000, 88000, 89000, 50000, 51000, 52000, 53000, 54000, 55000, + 56000, 57000, 58000, 59000, 94000, 95000, 850000, 851000, 98000, 99000, + 60000, 61000, 62000, 63000, 64000, 65000, 66000, 67000, 68000, 69000, + 86000, 87000, 860000, 861000, 888000, 889000, 70000, 71000, 72000, 73000, + 74000, 75000, 76000, 77000, 78000, 79000, 96000, 97000, 870000, 871000, + 898000, 899000, 100000, 101000, 102000, 103000, 104000, 105000, 106000, 107000, + 108000, 109000, 180000, 181000, 900000, 901000, 980000, 981000, 110000, 111000, + 112000, 113000, 114000, 115000, 116000, 117000, 118000, 119000, 190000, 191000, + 910000, 911000, 990000, 991000, 120000, 121000, 122000, 123000, 124000, 125000, + 126000, 127000, 128000, 129000, 182000, 183000, 920000, 921000, 908000, 909000, + 130000, 131000, 132000, 133000, 134000, 135000, 136000, 137000, 138000, 139000, + 192000, 193000, 930000, 931000, 918000, 919000, 140000, 141000, 142000, 143000, + 144000, 145000, 146000, 147000, 148000, 149000, 184000, 185000, 940000, 941000, + 188000, 189000, 150000, 151000, 152000, 153000, 154000, 155000, 156000, 157000, + 158000, 159000, 194000, 195000, 950000, 951000, 198000, 199000, 160000, 161000, + 162000, 163000, 164000, 165000, 166000, 167000, 168000, 169000, 186000, 187000, + 960000, 961000, 988000, 989000, 170000, 171000, 172000, 173000, 174000, 175000, + 176000, 177000, 178000, 179000, 196000, 197000, 970000, 971000, 998000, 999000, + 200000, 201000, 202000, 203000, 204000, 205000, 206000, 207000, 208000, 209000, + 280000, 281000, 802000, 803000, 882000, 883000, 210000, 211000, 212000, 213000, + 214000, 215000, 216000, 217000, 218000, 219000, 290000, 291000, 812000, 813000, + 892000, 893000, 220000, 221000, 222000, 223000, 224000, 225000, 226000, 227000, + 228000, 229000, 282000, 283000, 822000, 823000, 828000, 829000, 230000, 231000, + 232000, 233000, 234000, 235000, 236000, 237000, 238000, 239000, 292000, 293000, + 832000, 833000, 838000, 839000, 240000, 241000, 242000, 243000, 244000, 245000, + 246000, 247000, 248000, 249000, 284000, 285000, 842000, 843000, 288000, 289000, + 250000, 251000, 252000, 253000, 254000, 255000, 256000, 257000, 258000, 259000, + 294000, 295000, 852000, 853000, 298000, 299000, 260000, 261000, 262000, 263000, + 264000, 265000, 266000, 267000, 268000, 269000, 286000, 287000, 862000, 863000, + 888000, 889000, 270000, 271000, 272000, 273000, 274000, 275000, 276000, 277000, + 278000, 279000, 296000, 297000, 872000, 873000, 898000, 899000, 300000, 301000, + 302000, 303000, 304000, 305000, 306000, 307000, 308000, 309000, 380000, 381000, + 902000, 903000, 982000, 983000, 310000, 311000, 312000, 313000, 314000, 315000, + 316000, 317000, 318000, 319000, 390000, 391000, 912000, 913000, 992000, 993000, + 320000, 321000, 322000, 323000, 324000, 325000, 326000, 327000, 328000, 329000, + 382000, 383000, 922000, 923000, 928000, 929000, 330000, 331000, 332000, 333000, + 334000, 335000, 336000, 337000, 338000, 339000, 392000, 393000, 932000, 933000, + 938000, 939000, 340000, 341000, 342000, 343000, 344000, 345000, 346000, 347000, + 348000, 349000, 384000, 385000, 942000, 943000, 388000, 389000, 350000, 351000, + 352000, 353000, 354000, 355000, 356000, 357000, 358000, 359000, 394000, 395000, + 952000, 953000, 398000, 399000, 360000, 361000, 362000, 363000, 364000, 365000, + 366000, 367000, 368000, 369000, 386000, 387000, 962000, 963000, 988000, 989000, + 370000, 371000, 372000, 373000, 374000, 375000, 376000, 377000, 378000, 379000, + 396000, 397000, 972000, 973000, 998000, 999000, 400000, 401000, 402000, 403000, + 404000, 405000, 406000, 407000, 408000, 409000, 480000, 481000, 804000, 805000, + 884000, 885000, 410000, 411000, 412000, 413000, 414000, 415000, 416000, 417000, + 418000, 419000, 490000, 491000, 814000, 815000, 894000, 895000, 420000, 421000, + 422000, 423000, 424000, 425000, 426000, 427000, 428000, 429000, 482000, 483000, + 824000, 825000, 848000, 849000, 430000, 431000, 432000, 433000, 434000, 435000, + 436000, 437000, 438000, 439000, 492000, 493000, 834000, 835000, 858000, 859000, + 440000, 441000, 442000, 443000, 444000, 445000, 446000, 447000, 448000, 449000, + 484000, 485000, 844000, 845000, 488000, 489000, 450000, 451000, 452000, 453000, + 454000, 455000, 456000, 457000, 458000, 459000, 494000, 495000, 854000, 855000, + 498000, 499000, 460000, 461000, 462000, 463000, 464000, 465000, 466000, 467000, + 468000, 469000, 486000, 487000, 864000, 865000, 888000, 889000, 470000, 471000, + 472000, 473000, 474000, 475000, 476000, 477000, 478000, 479000, 496000, 497000, + 874000, 875000, 898000, 899000, 500000, 501000, 502000, 503000, 504000, 505000, + 506000, 507000, 508000, 509000, 580000, 581000, 904000, 905000, 984000, 985000, + 510000, 511000, 512000, 513000, 514000, 515000, 516000, 517000, 518000, 519000, + 590000, 591000, 914000, 915000, 994000, 995000, 520000, 521000, 522000, 523000, + 524000, 525000, 526000, 527000, 528000, 529000, 582000, 583000, 924000, 925000, + 948000, 949000, 530000, 531000, 532000, 533000, 534000, 535000, 536000, 537000, + 538000, 539000, 592000, 593000, 934000, 935000, 958000, 959000, 540000, 541000, + 542000, 543000, 544000, 545000, 546000, 547000, 548000, 549000, 584000, 585000, + 944000, 945000, 588000, 589000, 550000, 551000, 552000, 553000, 554000, 555000, + 556000, 557000, 558000, 559000, 594000, 595000, 954000, 955000, 598000, 599000, + 560000, 561000, 562000, 563000, 564000, 565000, 566000, 567000, 568000, 569000, + 586000, 587000, 964000, 965000, 988000, 989000, 570000, 571000, 572000, 573000, + 574000, 575000, 576000, 577000, 578000, 579000, 596000, 597000, 974000, 975000, + 998000, 999000, 600000, 601000, 602000, 603000, 604000, 605000, 606000, 607000, + 608000, 609000, 680000, 681000, 806000, 807000, 886000, 887000, 610000, 611000, + 612000, 613000, 614000, 615000, 616000, 617000, 618000, 619000, 690000, 691000, + 816000, 817000, 896000, 897000, 620000, 621000, 622000, 623000, 624000, 625000, + 626000, 627000, 628000, 629000, 682000, 683000, 826000, 827000, 868000, 869000, + 630000, 631000, 632000, 633000, 634000, 635000, 636000, 637000, 638000, 639000, + 692000, 693000, 836000, 837000, 878000, 879000, 640000, 641000, 642000, 643000, + 644000, 645000, 646000, 647000, 648000, 649000, 684000, 685000, 846000, 847000, + 688000, 689000, 650000, 651000, 652000, 653000, 654000, 655000, 656000, 657000, + 658000, 659000, 694000, 695000, 856000, 857000, 698000, 699000, 660000, 661000, + 662000, 663000, 664000, 665000, 666000, 667000, 668000, 669000, 686000, 687000, + 866000, 867000, 888000, 889000, 670000, 671000, 672000, 673000, 674000, 675000, + 676000, 677000, 678000, 679000, 696000, 697000, 876000, 877000, 898000, 899000, + 700000, 701000, 702000, 703000, 704000, 705000, 706000, 707000, 708000, 709000, + 780000, 781000, 906000, 907000, 986000, 987000, 710000, 711000, 712000, 713000, + 714000, 715000, 716000, 717000, 718000, 719000, 790000, 791000, 916000, 917000, + 996000, 997000, 720000, 721000, 722000, 723000, 724000, 725000, 726000, 727000, + 728000, 729000, 782000, 783000, 926000, 927000, 968000, 969000, 730000, 731000, + 732000, 733000, 734000, 735000, 736000, 737000, 738000, 739000, 792000, 793000, + 936000, 937000, 978000, 979000, 740000, 741000, 742000, 743000, 744000, 745000, + 746000, 747000, 748000, 749000, 784000, 785000, 946000, 947000, 788000, 789000, + 750000, 751000, 752000, 753000, 754000, 755000, 756000, 757000, 758000, 759000, + 794000, 795000, 956000, 957000, 798000, 799000, 760000, 761000, 762000, 763000, + 764000, 765000, 766000, 767000, 768000, 769000, 786000, 787000, 966000, 967000, + 988000, 989000, 770000, 771000, 772000, 773000, 774000, 775000, 776000, 777000, + 778000, 779000, 796000, 797000, 976000, 977000, 998000, 999000}; +#endif + +#if defined(DEC_DPD2BINM) && DEC_DPD2BINM==1 && !defined(DECDPD2BINM) +#define DECDPD2BINM + +const uint32_t DPD2BINM[1024]={0, 1000000, 2000000, 3000000, 4000000, + 5000000, 6000000, 7000000, 8000000, 9000000, 80000000, 81000000, + 800000000, 801000000, 880000000, 881000000, 10000000, 11000000, 12000000, + 13000000, 14000000, 15000000, 16000000, 17000000, 18000000, 19000000, + 90000000, 91000000, 810000000, 811000000, 890000000, 891000000, 20000000, + 21000000, 22000000, 23000000, 24000000, 25000000, 26000000, 27000000, + 28000000, 29000000, 82000000, 83000000, 820000000, 821000000, 808000000, + 809000000, 30000000, 31000000, 32000000, 33000000, 34000000, 35000000, + 36000000, 37000000, 38000000, 39000000, 92000000, 93000000, 830000000, + 831000000, 818000000, 819000000, 40000000, 41000000, 42000000, 43000000, + 44000000, 45000000, 46000000, 47000000, 48000000, 49000000, 84000000, + 85000000, 840000000, 841000000, 88000000, 89000000, 50000000, 51000000, + 52000000, 53000000, 54000000, 55000000, 56000000, 57000000, 58000000, + 59000000, 94000000, 95000000, 850000000, 851000000, 98000000, 99000000, + 60000000, 61000000, 62000000, 63000000, 64000000, 65000000, 66000000, + 67000000, 68000000, 69000000, 86000000, 87000000, 860000000, 861000000, + 888000000, 889000000, 70000000, 71000000, 72000000, 73000000, 74000000, + 75000000, 76000000, 77000000, 78000000, 79000000, 96000000, 97000000, + 870000000, 871000000, 898000000, 899000000, 100000000, 101000000, 102000000, + 103000000, 104000000, 105000000, 106000000, 107000000, 108000000, 109000000, + 180000000, 181000000, 900000000, 901000000, 980000000, 981000000, 110000000, + 111000000, 112000000, 113000000, 114000000, 115000000, 116000000, 117000000, + 118000000, 119000000, 190000000, 191000000, 910000000, 911000000, 990000000, + 991000000, 120000000, 121000000, 122000000, 123000000, 124000000, 125000000, + 126000000, 127000000, 128000000, 129000000, 182000000, 183000000, 920000000, + 921000000, 908000000, 909000000, 130000000, 131000000, 132000000, 133000000, + 134000000, 135000000, 136000000, 137000000, 138000000, 139000000, 192000000, + 193000000, 930000000, 931000000, 918000000, 919000000, 140000000, 141000000, + 142000000, 143000000, 144000000, 145000000, 146000000, 147000000, 148000000, + 149000000, 184000000, 185000000, 940000000, 941000000, 188000000, 189000000, + 150000000, 151000000, 152000000, 153000000, 154000000, 155000000, 156000000, + 157000000, 158000000, 159000000, 194000000, 195000000, 950000000, 951000000, + 198000000, 199000000, 160000000, 161000000, 162000000, 163000000, 164000000, + 165000000, 166000000, 167000000, 168000000, 169000000, 186000000, 187000000, + 960000000, 961000000, 988000000, 989000000, 170000000, 171000000, 172000000, + 173000000, 174000000, 175000000, 176000000, 177000000, 178000000, 179000000, + 196000000, 197000000, 970000000, 971000000, 998000000, 999000000, 200000000, + 201000000, 202000000, 203000000, 204000000, 205000000, 206000000, 207000000, + 208000000, 209000000, 280000000, 281000000, 802000000, 803000000, 882000000, + 883000000, 210000000, 211000000, 212000000, 213000000, 214000000, 215000000, + 216000000, 217000000, 218000000, 219000000, 290000000, 291000000, 812000000, + 813000000, 892000000, 893000000, 220000000, 221000000, 222000000, 223000000, + 224000000, 225000000, 226000000, 227000000, 228000000, 229000000, 282000000, + 283000000, 822000000, 823000000, 828000000, 829000000, 230000000, 231000000, + 232000000, 233000000, 234000000, 235000000, 236000000, 237000000, 238000000, + 239000000, 292000000, 293000000, 832000000, 833000000, 838000000, 839000000, + 240000000, 241000000, 242000000, 243000000, 244000000, 245000000, 246000000, + 247000000, 248000000, 249000000, 284000000, 285000000, 842000000, 843000000, + 288000000, 289000000, 250000000, 251000000, 252000000, 253000000, 254000000, + 255000000, 256000000, 257000000, 258000000, 259000000, 294000000, 295000000, + 852000000, 853000000, 298000000, 299000000, 260000000, 261000000, 262000000, + 263000000, 264000000, 265000000, 266000000, 267000000, 268000000, 269000000, + 286000000, 287000000, 862000000, 863000000, 888000000, 889000000, 270000000, + 271000000, 272000000, 273000000, 274000000, 275000000, 276000000, 277000000, + 278000000, 279000000, 296000000, 297000000, 872000000, 873000000, 898000000, + 899000000, 300000000, 301000000, 302000000, 303000000, 304000000, 305000000, + 306000000, 307000000, 308000000, 309000000, 380000000, 381000000, 902000000, + 903000000, 982000000, 983000000, 310000000, 311000000, 312000000, 313000000, + 314000000, 315000000, 316000000, 317000000, 318000000, 319000000, 390000000, + 391000000, 912000000, 913000000, 992000000, 993000000, 320000000, 321000000, + 322000000, 323000000, 324000000, 325000000, 326000000, 327000000, 328000000, + 329000000, 382000000, 383000000, 922000000, 923000000, 928000000, 929000000, + 330000000, 331000000, 332000000, 333000000, 334000000, 335000000, 336000000, + 337000000, 338000000, 339000000, 392000000, 393000000, 932000000, 933000000, + 938000000, 939000000, 340000000, 341000000, 342000000, 343000000, 344000000, + 345000000, 346000000, 347000000, 348000000, 349000000, 384000000, 385000000, + 942000000, 943000000, 388000000, 389000000, 350000000, 351000000, 352000000, + 353000000, 354000000, 355000000, 356000000, 357000000, 358000000, 359000000, + 394000000, 395000000, 952000000, 953000000, 398000000, 399000000, 360000000, + 361000000, 362000000, 363000000, 364000000, 365000000, 366000000, 367000000, + 368000000, 369000000, 386000000, 387000000, 962000000, 963000000, 988000000, + 989000000, 370000000, 371000000, 372000000, 373000000, 374000000, 375000000, + 376000000, 377000000, 378000000, 379000000, 396000000, 397000000, 972000000, + 973000000, 998000000, 999000000, 400000000, 401000000, 402000000, 403000000, + 404000000, 405000000, 406000000, 407000000, 408000000, 409000000, 480000000, + 481000000, 804000000, 805000000, 884000000, 885000000, 410000000, 411000000, + 412000000, 413000000, 414000000, 415000000, 416000000, 417000000, 418000000, + 419000000, 490000000, 491000000, 814000000, 815000000, 894000000, 895000000, + 420000000, 421000000, 422000000, 423000000, 424000000, 425000000, 426000000, + 427000000, 428000000, 429000000, 482000000, 483000000, 824000000, 825000000, + 848000000, 849000000, 430000000, 431000000, 432000000, 433000000, 434000000, + 435000000, 436000000, 437000000, 438000000, 439000000, 492000000, 493000000, + 834000000, 835000000, 858000000, 859000000, 440000000, 441000000, 442000000, + 443000000, 444000000, 445000000, 446000000, 447000000, 448000000, 449000000, + 484000000, 485000000, 844000000, 845000000, 488000000, 489000000, 450000000, + 451000000, 452000000, 453000000, 454000000, 455000000, 456000000, 457000000, + 458000000, 459000000, 494000000, 495000000, 854000000, 855000000, 498000000, + 499000000, 460000000, 461000000, 462000000, 463000000, 464000000, 465000000, + 466000000, 467000000, 468000000, 469000000, 486000000, 487000000, 864000000, + 865000000, 888000000, 889000000, 470000000, 471000000, 472000000, 473000000, + 474000000, 475000000, 476000000, 477000000, 478000000, 479000000, 496000000, + 497000000, 874000000, 875000000, 898000000, 899000000, 500000000, 501000000, + 502000000, 503000000, 504000000, 505000000, 506000000, 507000000, 508000000, + 509000000, 580000000, 581000000, 904000000, 905000000, 984000000, 985000000, + 510000000, 511000000, 512000000, 513000000, 514000000, 515000000, 516000000, + 517000000, 518000000, 519000000, 590000000, 591000000, 914000000, 915000000, + 994000000, 995000000, 520000000, 521000000, 522000000, 523000000, 524000000, + 525000000, 526000000, 527000000, 528000000, 529000000, 582000000, 583000000, + 924000000, 925000000, 948000000, 949000000, 530000000, 531000000, 532000000, + 533000000, 534000000, 535000000, 536000000, 537000000, 538000000, 539000000, + 592000000, 593000000, 934000000, 935000000, 958000000, 959000000, 540000000, + 541000000, 542000000, 543000000, 544000000, 545000000, 546000000, 547000000, + 548000000, 549000000, 584000000, 585000000, 944000000, 945000000, 588000000, + 589000000, 550000000, 551000000, 552000000, 553000000, 554000000, 555000000, + 556000000, 557000000, 558000000, 559000000, 594000000, 595000000, 954000000, + 955000000, 598000000, 599000000, 560000000, 561000000, 562000000, 563000000, + 564000000, 565000000, 566000000, 567000000, 568000000, 569000000, 586000000, + 587000000, 964000000, 965000000, 988000000, 989000000, 570000000, 571000000, + 572000000, 573000000, 574000000, 575000000, 576000000, 577000000, 578000000, + 579000000, 596000000, 597000000, 974000000, 975000000, 998000000, 999000000, + 600000000, 601000000, 602000000, 603000000, 604000000, 605000000, 606000000, + 607000000, 608000000, 609000000, 680000000, 681000000, 806000000, 807000000, + 886000000, 887000000, 610000000, 611000000, 612000000, 613000000, 614000000, + 615000000, 616000000, 617000000, 618000000, 619000000, 690000000, 691000000, + 816000000, 817000000, 896000000, 897000000, 620000000, 621000000, 622000000, + 623000000, 624000000, 625000000, 626000000, 627000000, 628000000, 629000000, + 682000000, 683000000, 826000000, 827000000, 868000000, 869000000, 630000000, + 631000000, 632000000, 633000000, 634000000, 635000000, 636000000, 637000000, + 638000000, 639000000, 692000000, 693000000, 836000000, 837000000, 878000000, + 879000000, 640000000, 641000000, 642000000, 643000000, 644000000, 645000000, + 646000000, 647000000, 648000000, 649000000, 684000000, 685000000, 846000000, + 847000000, 688000000, 689000000, 650000000, 651000000, 652000000, 653000000, + 654000000, 655000000, 656000000, 657000000, 658000000, 659000000, 694000000, + 695000000, 856000000, 857000000, 698000000, 699000000, 660000000, 661000000, + 662000000, 663000000, 664000000, 665000000, 666000000, 667000000, 668000000, + 669000000, 686000000, 687000000, 866000000, 867000000, 888000000, 889000000, + 670000000, 671000000, 672000000, 673000000, 674000000, 675000000, 676000000, + 677000000, 678000000, 679000000, 696000000, 697000000, 876000000, 877000000, + 898000000, 899000000, 700000000, 701000000, 702000000, 703000000, 704000000, + 705000000, 706000000, 707000000, 708000000, 709000000, 780000000, 781000000, + 906000000, 907000000, 986000000, 987000000, 710000000, 711000000, 712000000, + 713000000, 714000000, 715000000, 716000000, 717000000, 718000000, 719000000, + 790000000, 791000000, 916000000, 917000000, 996000000, 997000000, 720000000, + 721000000, 722000000, 723000000, 724000000, 725000000, 726000000, 727000000, + 728000000, 729000000, 782000000, 783000000, 926000000, 927000000, 968000000, + 969000000, 730000000, 731000000, 732000000, 733000000, 734000000, 735000000, + 736000000, 737000000, 738000000, 739000000, 792000000, 793000000, 936000000, + 937000000, 978000000, 979000000, 740000000, 741000000, 742000000, 743000000, + 744000000, 745000000, 746000000, 747000000, 748000000, 749000000, 784000000, + 785000000, 946000000, 947000000, 788000000, 789000000, 750000000, 751000000, + 752000000, 753000000, 754000000, 755000000, 756000000, 757000000, 758000000, + 759000000, 794000000, 795000000, 956000000, 957000000, 798000000, 799000000, + 760000000, 761000000, 762000000, 763000000, 764000000, 765000000, 766000000, + 767000000, 768000000, 769000000, 786000000, 787000000, 966000000, 967000000, + 988000000, 989000000, 770000000, 771000000, 772000000, 773000000, 774000000, + 775000000, 776000000, 777000000, 778000000, 779000000, 796000000, 797000000, + 976000000, 977000000, 998000000, 999000000}; +#endif + +#if defined(DEC_BIN2CHAR) && DEC_BIN2CHAR==1 && !defined(DECBIN2CHAR) +#define DECBIN2CHAR + +const uint8_t BIN2CHAR[4001]={ + '\0','0','0','0', '\1','0','0','1', '\1','0','0','2', '\1','0','0','3', '\1','0','0','4', + '\1','0','0','5', '\1','0','0','6', '\1','0','0','7', '\1','0','0','8', '\1','0','0','9', + '\2','0','1','0', '\2','0','1','1', '\2','0','1','2', '\2','0','1','3', '\2','0','1','4', + '\2','0','1','5', '\2','0','1','6', '\2','0','1','7', '\2','0','1','8', '\2','0','1','9', + '\2','0','2','0', '\2','0','2','1', '\2','0','2','2', '\2','0','2','3', '\2','0','2','4', + '\2','0','2','5', '\2','0','2','6', '\2','0','2','7', '\2','0','2','8', '\2','0','2','9', + '\2','0','3','0', '\2','0','3','1', '\2','0','3','2', '\2','0','3','3', '\2','0','3','4', + '\2','0','3','5', '\2','0','3','6', '\2','0','3','7', '\2','0','3','8', '\2','0','3','9', + '\2','0','4','0', '\2','0','4','1', '\2','0','4','2', '\2','0','4','3', '\2','0','4','4', + '\2','0','4','5', '\2','0','4','6', '\2','0','4','7', '\2','0','4','8', '\2','0','4','9', + '\2','0','5','0', '\2','0','5','1', '\2','0','5','2', '\2','0','5','3', '\2','0','5','4', + '\2','0','5','5', '\2','0','5','6', '\2','0','5','7', '\2','0','5','8', '\2','0','5','9', + '\2','0','6','0', '\2','0','6','1', '\2','0','6','2', '\2','0','6','3', '\2','0','6','4', + '\2','0','6','5', '\2','0','6','6', '\2','0','6','7', '\2','0','6','8', '\2','0','6','9', + '\2','0','7','0', '\2','0','7','1', '\2','0','7','2', '\2','0','7','3', '\2','0','7','4', + '\2','0','7','5', '\2','0','7','6', '\2','0','7','7', '\2','0','7','8', '\2','0','7','9', + '\2','0','8','0', '\2','0','8','1', '\2','0','8','2', '\2','0','8','3', '\2','0','8','4', + '\2','0','8','5', '\2','0','8','6', '\2','0','8','7', '\2','0','8','8', '\2','0','8','9', + '\2','0','9','0', '\2','0','9','1', '\2','0','9','2', '\2','0','9','3', '\2','0','9','4', + '\2','0','9','5', '\2','0','9','6', '\2','0','9','7', '\2','0','9','8', '\2','0','9','9', + '\3','1','0','0', '\3','1','0','1', '\3','1','0','2', '\3','1','0','3', '\3','1','0','4', + '\3','1','0','5', '\3','1','0','6', '\3','1','0','7', '\3','1','0','8', '\3','1','0','9', + '\3','1','1','0', '\3','1','1','1', '\3','1','1','2', '\3','1','1','3', '\3','1','1','4', + '\3','1','1','5', '\3','1','1','6', '\3','1','1','7', '\3','1','1','8', '\3','1','1','9', + '\3','1','2','0', '\3','1','2','1', '\3','1','2','2', '\3','1','2','3', '\3','1','2','4', + '\3','1','2','5', '\3','1','2','6', '\3','1','2','7', '\3','1','2','8', '\3','1','2','9', + '\3','1','3','0', '\3','1','3','1', '\3','1','3','2', '\3','1','3','3', '\3','1','3','4', + '\3','1','3','5', '\3','1','3','6', '\3','1','3','7', '\3','1','3','8', '\3','1','3','9', + '\3','1','4','0', '\3','1','4','1', '\3','1','4','2', '\3','1','4','3', '\3','1','4','4', + '\3','1','4','5', '\3','1','4','6', '\3','1','4','7', '\3','1','4','8', '\3','1','4','9', + '\3','1','5','0', '\3','1','5','1', '\3','1','5','2', '\3','1','5','3', '\3','1','5','4', + '\3','1','5','5', '\3','1','5','6', '\3','1','5','7', '\3','1','5','8', '\3','1','5','9', + '\3','1','6','0', '\3','1','6','1', '\3','1','6','2', '\3','1','6','3', '\3','1','6','4', + '\3','1','6','5', '\3','1','6','6', '\3','1','6','7', '\3','1','6','8', '\3','1','6','9', + '\3','1','7','0', '\3','1','7','1', '\3','1','7','2', '\3','1','7','3', '\3','1','7','4', + '\3','1','7','5', '\3','1','7','6', '\3','1','7','7', '\3','1','7','8', '\3','1','7','9', + '\3','1','8','0', '\3','1','8','1', '\3','1','8','2', '\3','1','8','3', '\3','1','8','4', + '\3','1','8','5', '\3','1','8','6', '\3','1','8','7', '\3','1','8','8', '\3','1','8','9', + '\3','1','9','0', '\3','1','9','1', '\3','1','9','2', '\3','1','9','3', '\3','1','9','4', + '\3','1','9','5', '\3','1','9','6', '\3','1','9','7', '\3','1','9','8', '\3','1','9','9', + '\3','2','0','0', '\3','2','0','1', '\3','2','0','2', '\3','2','0','3', '\3','2','0','4', + '\3','2','0','5', '\3','2','0','6', '\3','2','0','7', '\3','2','0','8', '\3','2','0','9', + '\3','2','1','0', '\3','2','1','1', '\3','2','1','2', '\3','2','1','3', '\3','2','1','4', + '\3','2','1','5', '\3','2','1','6', '\3','2','1','7', '\3','2','1','8', '\3','2','1','9', + '\3','2','2','0', '\3','2','2','1', '\3','2','2','2', '\3','2','2','3', '\3','2','2','4', + '\3','2','2','5', '\3','2','2','6', '\3','2','2','7', '\3','2','2','8', '\3','2','2','9', + '\3','2','3','0', '\3','2','3','1', '\3','2','3','2', '\3','2','3','3', '\3','2','3','4', + '\3','2','3','5', '\3','2','3','6', '\3','2','3','7', '\3','2','3','8', '\3','2','3','9', + '\3','2','4','0', '\3','2','4','1', '\3','2','4','2', '\3','2','4','3', '\3','2','4','4', + '\3','2','4','5', '\3','2','4','6', '\3','2','4','7', '\3','2','4','8', '\3','2','4','9', + '\3','2','5','0', '\3','2','5','1', '\3','2','5','2', '\3','2','5','3', '\3','2','5','4', + '\3','2','5','5', '\3','2','5','6', '\3','2','5','7', '\3','2','5','8', '\3','2','5','9', + '\3','2','6','0', '\3','2','6','1', '\3','2','6','2', '\3','2','6','3', '\3','2','6','4', + '\3','2','6','5', '\3','2','6','6', '\3','2','6','7', '\3','2','6','8', '\3','2','6','9', + '\3','2','7','0', '\3','2','7','1', '\3','2','7','2', '\3','2','7','3', '\3','2','7','4', + '\3','2','7','5', '\3','2','7','6', '\3','2','7','7', '\3','2','7','8', '\3','2','7','9', + '\3','2','8','0', '\3','2','8','1', '\3','2','8','2', '\3','2','8','3', '\3','2','8','4', + '\3','2','8','5', '\3','2','8','6', '\3','2','8','7', '\3','2','8','8', '\3','2','8','9', + '\3','2','9','0', '\3','2','9','1', '\3','2','9','2', '\3','2','9','3', '\3','2','9','4', + '\3','2','9','5', '\3','2','9','6', '\3','2','9','7', '\3','2','9','8', '\3','2','9','9', + '\3','3','0','0', '\3','3','0','1', '\3','3','0','2', '\3','3','0','3', '\3','3','0','4', + '\3','3','0','5', '\3','3','0','6', '\3','3','0','7', '\3','3','0','8', '\3','3','0','9', + '\3','3','1','0', '\3','3','1','1', '\3','3','1','2', '\3','3','1','3', '\3','3','1','4', + '\3','3','1','5', '\3','3','1','6', '\3','3','1','7', '\3','3','1','8', '\3','3','1','9', + '\3','3','2','0', '\3','3','2','1', '\3','3','2','2', '\3','3','2','3', '\3','3','2','4', + '\3','3','2','5', '\3','3','2','6', '\3','3','2','7', '\3','3','2','8', '\3','3','2','9', + '\3','3','3','0', '\3','3','3','1', '\3','3','3','2', '\3','3','3','3', '\3','3','3','4', + '\3','3','3','5', '\3','3','3','6', '\3','3','3','7', '\3','3','3','8', '\3','3','3','9', + '\3','3','4','0', '\3','3','4','1', '\3','3','4','2', '\3','3','4','3', '\3','3','4','4', + '\3','3','4','5', '\3','3','4','6', '\3','3','4','7', '\3','3','4','8', '\3','3','4','9', + '\3','3','5','0', '\3','3','5','1', '\3','3','5','2', '\3','3','5','3', '\3','3','5','4', + '\3','3','5','5', '\3','3','5','6', '\3','3','5','7', '\3','3','5','8', '\3','3','5','9', + '\3','3','6','0', '\3','3','6','1', '\3','3','6','2', '\3','3','6','3', '\3','3','6','4', + '\3','3','6','5', '\3','3','6','6', '\3','3','6','7', '\3','3','6','8', '\3','3','6','9', + '\3','3','7','0', '\3','3','7','1', '\3','3','7','2', '\3','3','7','3', '\3','3','7','4', + '\3','3','7','5', '\3','3','7','6', '\3','3','7','7', '\3','3','7','8', '\3','3','7','9', + '\3','3','8','0', '\3','3','8','1', '\3','3','8','2', '\3','3','8','3', '\3','3','8','4', + '\3','3','8','5', '\3','3','8','6', '\3','3','8','7', '\3','3','8','8', '\3','3','8','9', + '\3','3','9','0', '\3','3','9','1', '\3','3','9','2', '\3','3','9','3', '\3','3','9','4', + '\3','3','9','5', '\3','3','9','6', '\3','3','9','7', '\3','3','9','8', '\3','3','9','9', + '\3','4','0','0', '\3','4','0','1', '\3','4','0','2', '\3','4','0','3', '\3','4','0','4', + '\3','4','0','5', '\3','4','0','6', '\3','4','0','7', '\3','4','0','8', '\3','4','0','9', + '\3','4','1','0', '\3','4','1','1', '\3','4','1','2', '\3','4','1','3', '\3','4','1','4', + '\3','4','1','5', '\3','4','1','6', '\3','4','1','7', '\3','4','1','8', '\3','4','1','9', + '\3','4','2','0', '\3','4','2','1', '\3','4','2','2', '\3','4','2','3', '\3','4','2','4', + '\3','4','2','5', '\3','4','2','6', '\3','4','2','7', '\3','4','2','8', '\3','4','2','9', + '\3','4','3','0', '\3','4','3','1', '\3','4','3','2', '\3','4','3','3', '\3','4','3','4', + '\3','4','3','5', '\3','4','3','6', '\3','4','3','7', '\3','4','3','8', '\3','4','3','9', + '\3','4','4','0', '\3','4','4','1', '\3','4','4','2', '\3','4','4','3', '\3','4','4','4', + '\3','4','4','5', '\3','4','4','6', '\3','4','4','7', '\3','4','4','8', '\3','4','4','9', + '\3','4','5','0', '\3','4','5','1', '\3','4','5','2', '\3','4','5','3', '\3','4','5','4', + '\3','4','5','5', '\3','4','5','6', '\3','4','5','7', '\3','4','5','8', '\3','4','5','9', + '\3','4','6','0', '\3','4','6','1', '\3','4','6','2', '\3','4','6','3', '\3','4','6','4', + '\3','4','6','5', '\3','4','6','6', '\3','4','6','7', '\3','4','6','8', '\3','4','6','9', + '\3','4','7','0', '\3','4','7','1', '\3','4','7','2', '\3','4','7','3', '\3','4','7','4', + '\3','4','7','5', '\3','4','7','6', '\3','4','7','7', '\3','4','7','8', '\3','4','7','9', + '\3','4','8','0', '\3','4','8','1', '\3','4','8','2', '\3','4','8','3', '\3','4','8','4', + '\3','4','8','5', '\3','4','8','6', '\3','4','8','7', '\3','4','8','8', '\3','4','8','9', + '\3','4','9','0', '\3','4','9','1', '\3','4','9','2', '\3','4','9','3', '\3','4','9','4', + '\3','4','9','5', '\3','4','9','6', '\3','4','9','7', '\3','4','9','8', '\3','4','9','9', + '\3','5','0','0', '\3','5','0','1', '\3','5','0','2', '\3','5','0','3', '\3','5','0','4', + '\3','5','0','5', '\3','5','0','6', '\3','5','0','7', '\3','5','0','8', '\3','5','0','9', + '\3','5','1','0', '\3','5','1','1', '\3','5','1','2', '\3','5','1','3', '\3','5','1','4', + '\3','5','1','5', '\3','5','1','6', '\3','5','1','7', '\3','5','1','8', '\3','5','1','9', + '\3','5','2','0', '\3','5','2','1', '\3','5','2','2', '\3','5','2','3', '\3','5','2','4', + '\3','5','2','5', '\3','5','2','6', '\3','5','2','7', '\3','5','2','8', '\3','5','2','9', + '\3','5','3','0', '\3','5','3','1', '\3','5','3','2', '\3','5','3','3', '\3','5','3','4', + '\3','5','3','5', '\3','5','3','6', '\3','5','3','7', '\3','5','3','8', '\3','5','3','9', + '\3','5','4','0', '\3','5','4','1', '\3','5','4','2', '\3','5','4','3', '\3','5','4','4', + '\3','5','4','5', '\3','5','4','6', '\3','5','4','7', '\3','5','4','8', '\3','5','4','9', + '\3','5','5','0', '\3','5','5','1', '\3','5','5','2', '\3','5','5','3', '\3','5','5','4', + '\3','5','5','5', '\3','5','5','6', '\3','5','5','7', '\3','5','5','8', '\3','5','5','9', + '\3','5','6','0', '\3','5','6','1', '\3','5','6','2', '\3','5','6','3', '\3','5','6','4', + '\3','5','6','5', '\3','5','6','6', '\3','5','6','7', '\3','5','6','8', '\3','5','6','9', + '\3','5','7','0', '\3','5','7','1', '\3','5','7','2', '\3','5','7','3', '\3','5','7','4', + '\3','5','7','5', '\3','5','7','6', '\3','5','7','7', '\3','5','7','8', '\3','5','7','9', + '\3','5','8','0', '\3','5','8','1', '\3','5','8','2', '\3','5','8','3', '\3','5','8','4', + '\3','5','8','5', '\3','5','8','6', '\3','5','8','7', '\3','5','8','8', '\3','5','8','9', + '\3','5','9','0', '\3','5','9','1', '\3','5','9','2', '\3','5','9','3', '\3','5','9','4', + '\3','5','9','5', '\3','5','9','6', '\3','5','9','7', '\3','5','9','8', '\3','5','9','9', + '\3','6','0','0', '\3','6','0','1', '\3','6','0','2', '\3','6','0','3', '\3','6','0','4', + '\3','6','0','5', '\3','6','0','6', '\3','6','0','7', '\3','6','0','8', '\3','6','0','9', + '\3','6','1','0', '\3','6','1','1', '\3','6','1','2', '\3','6','1','3', '\3','6','1','4', + '\3','6','1','5', '\3','6','1','6', '\3','6','1','7', '\3','6','1','8', '\3','6','1','9', + '\3','6','2','0', '\3','6','2','1', '\3','6','2','2', '\3','6','2','3', '\3','6','2','4', + '\3','6','2','5', '\3','6','2','6', '\3','6','2','7', '\3','6','2','8', '\3','6','2','9', + '\3','6','3','0', '\3','6','3','1', '\3','6','3','2', '\3','6','3','3', '\3','6','3','4', + '\3','6','3','5', '\3','6','3','6', '\3','6','3','7', '\3','6','3','8', '\3','6','3','9', + '\3','6','4','0', '\3','6','4','1', '\3','6','4','2', '\3','6','4','3', '\3','6','4','4', + '\3','6','4','5', '\3','6','4','6', '\3','6','4','7', '\3','6','4','8', '\3','6','4','9', + '\3','6','5','0', '\3','6','5','1', '\3','6','5','2', '\3','6','5','3', '\3','6','5','4', + '\3','6','5','5', '\3','6','5','6', '\3','6','5','7', '\3','6','5','8', '\3','6','5','9', + '\3','6','6','0', '\3','6','6','1', '\3','6','6','2', '\3','6','6','3', '\3','6','6','4', + '\3','6','6','5', '\3','6','6','6', '\3','6','6','7', '\3','6','6','8', '\3','6','6','9', + '\3','6','7','0', '\3','6','7','1', '\3','6','7','2', '\3','6','7','3', '\3','6','7','4', + '\3','6','7','5', '\3','6','7','6', '\3','6','7','7', '\3','6','7','8', '\3','6','7','9', + '\3','6','8','0', '\3','6','8','1', '\3','6','8','2', '\3','6','8','3', '\3','6','8','4', + '\3','6','8','5', '\3','6','8','6', '\3','6','8','7', '\3','6','8','8', '\3','6','8','9', + '\3','6','9','0', '\3','6','9','1', '\3','6','9','2', '\3','6','9','3', '\3','6','9','4', + '\3','6','9','5', '\3','6','9','6', '\3','6','9','7', '\3','6','9','8', '\3','6','9','9', + '\3','7','0','0', '\3','7','0','1', '\3','7','0','2', '\3','7','0','3', '\3','7','0','4', + '\3','7','0','5', '\3','7','0','6', '\3','7','0','7', '\3','7','0','8', '\3','7','0','9', + '\3','7','1','0', '\3','7','1','1', '\3','7','1','2', '\3','7','1','3', '\3','7','1','4', + '\3','7','1','5', '\3','7','1','6', '\3','7','1','7', '\3','7','1','8', '\3','7','1','9', + '\3','7','2','0', '\3','7','2','1', '\3','7','2','2', '\3','7','2','3', '\3','7','2','4', + '\3','7','2','5', '\3','7','2','6', '\3','7','2','7', '\3','7','2','8', '\3','7','2','9', + '\3','7','3','0', '\3','7','3','1', '\3','7','3','2', '\3','7','3','3', '\3','7','3','4', + '\3','7','3','5', '\3','7','3','6', '\3','7','3','7', '\3','7','3','8', '\3','7','3','9', + '\3','7','4','0', '\3','7','4','1', '\3','7','4','2', '\3','7','4','3', '\3','7','4','4', + '\3','7','4','5', '\3','7','4','6', '\3','7','4','7', '\3','7','4','8', '\3','7','4','9', + '\3','7','5','0', '\3','7','5','1', '\3','7','5','2', '\3','7','5','3', '\3','7','5','4', + '\3','7','5','5', '\3','7','5','6', '\3','7','5','7', '\3','7','5','8', '\3','7','5','9', + '\3','7','6','0', '\3','7','6','1', '\3','7','6','2', '\3','7','6','3', '\3','7','6','4', + '\3','7','6','5', '\3','7','6','6', '\3','7','6','7', '\3','7','6','8', '\3','7','6','9', + '\3','7','7','0', '\3','7','7','1', '\3','7','7','2', '\3','7','7','3', '\3','7','7','4', + '\3','7','7','5', '\3','7','7','6', '\3','7','7','7', '\3','7','7','8', '\3','7','7','9', + '\3','7','8','0', '\3','7','8','1', '\3','7','8','2', '\3','7','8','3', '\3','7','8','4', + '\3','7','8','5', '\3','7','8','6', '\3','7','8','7', '\3','7','8','8', '\3','7','8','9', + '\3','7','9','0', '\3','7','9','1', '\3','7','9','2', '\3','7','9','3', '\3','7','9','4', + '\3','7','9','5', '\3','7','9','6', '\3','7','9','7', '\3','7','9','8', '\3','7','9','9', + '\3','8','0','0', '\3','8','0','1', '\3','8','0','2', '\3','8','0','3', '\3','8','0','4', + '\3','8','0','5', '\3','8','0','6', '\3','8','0','7', '\3','8','0','8', '\3','8','0','9', + '\3','8','1','0', '\3','8','1','1', '\3','8','1','2', '\3','8','1','3', '\3','8','1','4', + '\3','8','1','5', '\3','8','1','6', '\3','8','1','7', '\3','8','1','8', '\3','8','1','9', + '\3','8','2','0', '\3','8','2','1', '\3','8','2','2', '\3','8','2','3', '\3','8','2','4', + '\3','8','2','5', '\3','8','2','6', '\3','8','2','7', '\3','8','2','8', '\3','8','2','9', + '\3','8','3','0', '\3','8','3','1', '\3','8','3','2', '\3','8','3','3', '\3','8','3','4', + '\3','8','3','5', '\3','8','3','6', '\3','8','3','7', '\3','8','3','8', '\3','8','3','9', + '\3','8','4','0', '\3','8','4','1', '\3','8','4','2', '\3','8','4','3', '\3','8','4','4', + '\3','8','4','5', '\3','8','4','6', '\3','8','4','7', '\3','8','4','8', '\3','8','4','9', + '\3','8','5','0', '\3','8','5','1', '\3','8','5','2', '\3','8','5','3', '\3','8','5','4', + '\3','8','5','5', '\3','8','5','6', '\3','8','5','7', '\3','8','5','8', '\3','8','5','9', + '\3','8','6','0', '\3','8','6','1', '\3','8','6','2', '\3','8','6','3', '\3','8','6','4', + '\3','8','6','5', '\3','8','6','6', '\3','8','6','7', '\3','8','6','8', '\3','8','6','9', + '\3','8','7','0', '\3','8','7','1', '\3','8','7','2', '\3','8','7','3', '\3','8','7','4', + '\3','8','7','5', '\3','8','7','6', '\3','8','7','7', '\3','8','7','8', '\3','8','7','9', + '\3','8','8','0', '\3','8','8','1', '\3','8','8','2', '\3','8','8','3', '\3','8','8','4', + '\3','8','8','5', '\3','8','8','6', '\3','8','8','7', '\3','8','8','8', '\3','8','8','9', + '\3','8','9','0', '\3','8','9','1', '\3','8','9','2', '\3','8','9','3', '\3','8','9','4', + '\3','8','9','5', '\3','8','9','6', '\3','8','9','7', '\3','8','9','8', '\3','8','9','9', + '\3','9','0','0', '\3','9','0','1', '\3','9','0','2', '\3','9','0','3', '\3','9','0','4', + '\3','9','0','5', '\3','9','0','6', '\3','9','0','7', '\3','9','0','8', '\3','9','0','9', + '\3','9','1','0', '\3','9','1','1', '\3','9','1','2', '\3','9','1','3', '\3','9','1','4', + '\3','9','1','5', '\3','9','1','6', '\3','9','1','7', '\3','9','1','8', '\3','9','1','9', + '\3','9','2','0', '\3','9','2','1', '\3','9','2','2', '\3','9','2','3', '\3','9','2','4', + '\3','9','2','5', '\3','9','2','6', '\3','9','2','7', '\3','9','2','8', '\3','9','2','9', + '\3','9','3','0', '\3','9','3','1', '\3','9','3','2', '\3','9','3','3', '\3','9','3','4', + '\3','9','3','5', '\3','9','3','6', '\3','9','3','7', '\3','9','3','8', '\3','9','3','9', + '\3','9','4','0', '\3','9','4','1', '\3','9','4','2', '\3','9','4','3', '\3','9','4','4', + '\3','9','4','5', '\3','9','4','6', '\3','9','4','7', '\3','9','4','8', '\3','9','4','9', + '\3','9','5','0', '\3','9','5','1', '\3','9','5','2', '\3','9','5','3', '\3','9','5','4', + '\3','9','5','5', '\3','9','5','6', '\3','9','5','7', '\3','9','5','8', '\3','9','5','9', + '\3','9','6','0', '\3','9','6','1', '\3','9','6','2', '\3','9','6','3', '\3','9','6','4', + '\3','9','6','5', '\3','9','6','6', '\3','9','6','7', '\3','9','6','8', '\3','9','6','9', + '\3','9','7','0', '\3','9','7','1', '\3','9','7','2', '\3','9','7','3', '\3','9','7','4', + '\3','9','7','5', '\3','9','7','6', '\3','9','7','7', '\3','9','7','8', '\3','9','7','9', + '\3','9','8','0', '\3','9','8','1', '\3','9','8','2', '\3','9','8','3', '\3','9','8','4', + '\3','9','8','5', '\3','9','8','6', '\3','9','8','7', '\3','9','8','8', '\3','9','8','9', + '\3','9','9','0', '\3','9','9','1', '\3','9','9','2', '\3','9','9','3', '\3','9','9','4', + '\3','9','9','5', '\3','9','9','6', '\3','9','9','7', '\3','9','9','8', '\3','9','9','9', '\0'}; +#endif + +#if defined(DEC_DPD2BCD8) && DEC_DPD2BCD8==1 && !defined(DECDPD2BCD8) +#define DECDPD2BCD8 + +const uint8_t DPD2BCD8[4096]={ + 0,0,0,0, 0,0,1,1, 0,0,2,1, 0,0,3,1, 0,0,4,1, 0,0,5,1, 0,0,6,1, 0,0,7,1, 0,0,8,1, + 0,0,9,1, 0,8,0,2, 0,8,1,2, 8,0,0,3, 8,0,1,3, 8,8,0,3, 8,8,1,3, 0,1,0,2, 0,1,1,2, + 0,1,2,2, 0,1,3,2, 0,1,4,2, 0,1,5,2, 0,1,6,2, 0,1,7,2, 0,1,8,2, 0,1,9,2, 0,9,0,2, + 0,9,1,2, 8,1,0,3, 8,1,1,3, 8,9,0,3, 8,9,1,3, 0,2,0,2, 0,2,1,2, 0,2,2,2, 0,2,3,2, + 0,2,4,2, 0,2,5,2, 0,2,6,2, 0,2,7,2, 0,2,8,2, 0,2,9,2, 0,8,2,2, 0,8,3,2, 8,2,0,3, + 8,2,1,3, 8,0,8,3, 8,0,9,3, 0,3,0,2, 0,3,1,2, 0,3,2,2, 0,3,3,2, 0,3,4,2, 0,3,5,2, + 0,3,6,2, 0,3,7,2, 0,3,8,2, 0,3,9,2, 0,9,2,2, 0,9,3,2, 8,3,0,3, 8,3,1,3, 8,1,8,3, + 8,1,9,3, 0,4,0,2, 0,4,1,2, 0,4,2,2, 0,4,3,2, 0,4,4,2, 0,4,5,2, 0,4,6,2, 0,4,7,2, + 0,4,8,2, 0,4,9,2, 0,8,4,2, 0,8,5,2, 8,4,0,3, 8,4,1,3, 0,8,8,2, 0,8,9,2, 0,5,0,2, + 0,5,1,2, 0,5,2,2, 0,5,3,2, 0,5,4,2, 0,5,5,2, 0,5,6,2, 0,5,7,2, 0,5,8,2, 0,5,9,2, + 0,9,4,2, 0,9,5,2, 8,5,0,3, 8,5,1,3, 0,9,8,2, 0,9,9,2, 0,6,0,2, 0,6,1,2, 0,6,2,2, + 0,6,3,2, 0,6,4,2, 0,6,5,2, 0,6,6,2, 0,6,7,2, 0,6,8,2, 0,6,9,2, 0,8,6,2, 0,8,7,2, + 8,6,0,3, 8,6,1,3, 8,8,8,3, 8,8,9,3, 0,7,0,2, 0,7,1,2, 0,7,2,2, 0,7,3,2, 0,7,4,2, + 0,7,5,2, 0,7,6,2, 0,7,7,2, 0,7,8,2, 0,7,9,2, 0,9,6,2, 0,9,7,2, 8,7,0,3, 8,7,1,3, + 8,9,8,3, 8,9,9,3, 1,0,0,3, 1,0,1,3, 1,0,2,3, 1,0,3,3, 1,0,4,3, 1,0,5,3, 1,0,6,3, + 1,0,7,3, 1,0,8,3, 1,0,9,3, 1,8,0,3, 1,8,1,3, 9,0,0,3, 9,0,1,3, 9,8,0,3, 9,8,1,3, + 1,1,0,3, 1,1,1,3, 1,1,2,3, 1,1,3,3, 1,1,4,3, 1,1,5,3, 1,1,6,3, 1,1,7,3, 1,1,8,3, + 1,1,9,3, 1,9,0,3, 1,9,1,3, 9,1,0,3, 9,1,1,3, 9,9,0,3, 9,9,1,3, 1,2,0,3, 1,2,1,3, + 1,2,2,3, 1,2,3,3, 1,2,4,3, 1,2,5,3, 1,2,6,3, 1,2,7,3, 1,2,8,3, 1,2,9,3, 1,8,2,3, + 1,8,3,3, 9,2,0,3, 9,2,1,3, 9,0,8,3, 9,0,9,3, 1,3,0,3, 1,3,1,3, 1,3,2,3, 1,3,3,3, + 1,3,4,3, 1,3,5,3, 1,3,6,3, 1,3,7,3, 1,3,8,3, 1,3,9,3, 1,9,2,3, 1,9,3,3, 9,3,0,3, + 9,3,1,3, 9,1,8,3, 9,1,9,3, 1,4,0,3, 1,4,1,3, 1,4,2,3, 1,4,3,3, 1,4,4,3, 1,4,5,3, + 1,4,6,3, 1,4,7,3, 1,4,8,3, 1,4,9,3, 1,8,4,3, 1,8,5,3, 9,4,0,3, 9,4,1,3, 1,8,8,3, + 1,8,9,3, 1,5,0,3, 1,5,1,3, 1,5,2,3, 1,5,3,3, 1,5,4,3, 1,5,5,3, 1,5,6,3, 1,5,7,3, + 1,5,8,3, 1,5,9,3, 1,9,4,3, 1,9,5,3, 9,5,0,3, 9,5,1,3, 1,9,8,3, 1,9,9,3, 1,6,0,3, + 1,6,1,3, 1,6,2,3, 1,6,3,3, 1,6,4,3, 1,6,5,3, 1,6,6,3, 1,6,7,3, 1,6,8,3, 1,6,9,3, + 1,8,6,3, 1,8,7,3, 9,6,0,3, 9,6,1,3, 9,8,8,3, 9,8,9,3, 1,7,0,3, 1,7,1,3, 1,7,2,3, + 1,7,3,3, 1,7,4,3, 1,7,5,3, 1,7,6,3, 1,7,7,3, 1,7,8,3, 1,7,9,3, 1,9,6,3, 1,9,7,3, + 9,7,0,3, 9,7,1,3, 9,9,8,3, 9,9,9,3, 2,0,0,3, 2,0,1,3, 2,0,2,3, 2,0,3,3, 2,0,4,3, + 2,0,5,3, 2,0,6,3, 2,0,7,3, 2,0,8,3, 2,0,9,3, 2,8,0,3, 2,8,1,3, 8,0,2,3, 8,0,3,3, + 8,8,2,3, 8,8,3,3, 2,1,0,3, 2,1,1,3, 2,1,2,3, 2,1,3,3, 2,1,4,3, 2,1,5,3, 2,1,6,3, + 2,1,7,3, 2,1,8,3, 2,1,9,3, 2,9,0,3, 2,9,1,3, 8,1,2,3, 8,1,3,3, 8,9,2,3, 8,9,3,3, + 2,2,0,3, 2,2,1,3, 2,2,2,3, 2,2,3,3, 2,2,4,3, 2,2,5,3, 2,2,6,3, 2,2,7,3, 2,2,8,3, + 2,2,9,3, 2,8,2,3, 2,8,3,3, 8,2,2,3, 8,2,3,3, 8,2,8,3, 8,2,9,3, 2,3,0,3, 2,3,1,3, + 2,3,2,3, 2,3,3,3, 2,3,4,3, 2,3,5,3, 2,3,6,3, 2,3,7,3, 2,3,8,3, 2,3,9,3, 2,9,2,3, + 2,9,3,3, 8,3,2,3, 8,3,3,3, 8,3,8,3, 8,3,9,3, 2,4,0,3, 2,4,1,3, 2,4,2,3, 2,4,3,3, + 2,4,4,3, 2,4,5,3, 2,4,6,3, 2,4,7,3, 2,4,8,3, 2,4,9,3, 2,8,4,3, 2,8,5,3, 8,4,2,3, + 8,4,3,3, 2,8,8,3, 2,8,9,3, 2,5,0,3, 2,5,1,3, 2,5,2,3, 2,5,3,3, 2,5,4,3, 2,5,5,3, + 2,5,6,3, 2,5,7,3, 2,5,8,3, 2,5,9,3, 2,9,4,3, 2,9,5,3, 8,5,2,3, 8,5,3,3, 2,9,8,3, + 2,9,9,3, 2,6,0,3, 2,6,1,3, 2,6,2,3, 2,6,3,3, 2,6,4,3, 2,6,5,3, 2,6,6,3, 2,6,7,3, + 2,6,8,3, 2,6,9,3, 2,8,6,3, 2,8,7,3, 8,6,2,3, 8,6,3,3, 8,8,8,3, 8,8,9,3, 2,7,0,3, + 2,7,1,3, 2,7,2,3, 2,7,3,3, 2,7,4,3, 2,7,5,3, 2,7,6,3, 2,7,7,3, 2,7,8,3, 2,7,9,3, + 2,9,6,3, 2,9,7,3, 8,7,2,3, 8,7,3,3, 8,9,8,3, 8,9,9,3, 3,0,0,3, 3,0,1,3, 3,0,2,3, + 3,0,3,3, 3,0,4,3, 3,0,5,3, 3,0,6,3, 3,0,7,3, 3,0,8,3, 3,0,9,3, 3,8,0,3, 3,8,1,3, + 9,0,2,3, 9,0,3,3, 9,8,2,3, 9,8,3,3, 3,1,0,3, 3,1,1,3, 3,1,2,3, 3,1,3,3, 3,1,4,3, + 3,1,5,3, 3,1,6,3, 3,1,7,3, 3,1,8,3, 3,1,9,3, 3,9,0,3, 3,9,1,3, 9,1,2,3, 9,1,3,3, + 9,9,2,3, 9,9,3,3, 3,2,0,3, 3,2,1,3, 3,2,2,3, 3,2,3,3, 3,2,4,3, 3,2,5,3, 3,2,6,3, + 3,2,7,3, 3,2,8,3, 3,2,9,3, 3,8,2,3, 3,8,3,3, 9,2,2,3, 9,2,3,3, 9,2,8,3, 9,2,9,3, + 3,3,0,3, 3,3,1,3, 3,3,2,3, 3,3,3,3, 3,3,4,3, 3,3,5,3, 3,3,6,3, 3,3,7,3, 3,3,8,3, + 3,3,9,3, 3,9,2,3, 3,9,3,3, 9,3,2,3, 9,3,3,3, 9,3,8,3, 9,3,9,3, 3,4,0,3, 3,4,1,3, + 3,4,2,3, 3,4,3,3, 3,4,4,3, 3,4,5,3, 3,4,6,3, 3,4,7,3, 3,4,8,3, 3,4,9,3, 3,8,4,3, + 3,8,5,3, 9,4,2,3, 9,4,3,3, 3,8,8,3, 3,8,9,3, 3,5,0,3, 3,5,1,3, 3,5,2,3, 3,5,3,3, + 3,5,4,3, 3,5,5,3, 3,5,6,3, 3,5,7,3, 3,5,8,3, 3,5,9,3, 3,9,4,3, 3,9,5,3, 9,5,2,3, + 9,5,3,3, 3,9,8,3, 3,9,9,3, 3,6,0,3, 3,6,1,3, 3,6,2,3, 3,6,3,3, 3,6,4,3, 3,6,5,3, + 3,6,6,3, 3,6,7,3, 3,6,8,3, 3,6,9,3, 3,8,6,3, 3,8,7,3, 9,6,2,3, 9,6,3,3, 9,8,8,3, + 9,8,9,3, 3,7,0,3, 3,7,1,3, 3,7,2,3, 3,7,3,3, 3,7,4,3, 3,7,5,3, 3,7,6,3, 3,7,7,3, + 3,7,8,3, 3,7,9,3, 3,9,6,3, 3,9,7,3, 9,7,2,3, 9,7,3,3, 9,9,8,3, 9,9,9,3, 4,0,0,3, + 4,0,1,3, 4,0,2,3, 4,0,3,3, 4,0,4,3, 4,0,5,3, 4,0,6,3, 4,0,7,3, 4,0,8,3, 4,0,9,3, + 4,8,0,3, 4,8,1,3, 8,0,4,3, 8,0,5,3, 8,8,4,3, 8,8,5,3, 4,1,0,3, 4,1,1,3, 4,1,2,3, + 4,1,3,3, 4,1,4,3, 4,1,5,3, 4,1,6,3, 4,1,7,3, 4,1,8,3, 4,1,9,3, 4,9,0,3, 4,9,1,3, + 8,1,4,3, 8,1,5,3, 8,9,4,3, 8,9,5,3, 4,2,0,3, 4,2,1,3, 4,2,2,3, 4,2,3,3, 4,2,4,3, + 4,2,5,3, 4,2,6,3, 4,2,7,3, 4,2,8,3, 4,2,9,3, 4,8,2,3, 4,8,3,3, 8,2,4,3, 8,2,5,3, + 8,4,8,3, 8,4,9,3, 4,3,0,3, 4,3,1,3, 4,3,2,3, 4,3,3,3, 4,3,4,3, 4,3,5,3, 4,3,6,3, + 4,3,7,3, 4,3,8,3, 4,3,9,3, 4,9,2,3, 4,9,3,3, 8,3,4,3, 8,3,5,3, 8,5,8,3, 8,5,9,3, + 4,4,0,3, 4,4,1,3, 4,4,2,3, 4,4,3,3, 4,4,4,3, 4,4,5,3, 4,4,6,3, 4,4,7,3, 4,4,8,3, + 4,4,9,3, 4,8,4,3, 4,8,5,3, 8,4,4,3, 8,4,5,3, 4,8,8,3, 4,8,9,3, 4,5,0,3, 4,5,1,3, + 4,5,2,3, 4,5,3,3, 4,5,4,3, 4,5,5,3, 4,5,6,3, 4,5,7,3, 4,5,8,3, 4,5,9,3, 4,9,4,3, + 4,9,5,3, 8,5,4,3, 8,5,5,3, 4,9,8,3, 4,9,9,3, 4,6,0,3, 4,6,1,3, 4,6,2,3, 4,6,3,3, + 4,6,4,3, 4,6,5,3, 4,6,6,3, 4,6,7,3, 4,6,8,3, 4,6,9,3, 4,8,6,3, 4,8,7,3, 8,6,4,3, + 8,6,5,3, 8,8,8,3, 8,8,9,3, 4,7,0,3, 4,7,1,3, 4,7,2,3, 4,7,3,3, 4,7,4,3, 4,7,5,3, + 4,7,6,3, 4,7,7,3, 4,7,8,3, 4,7,9,3, 4,9,6,3, 4,9,7,3, 8,7,4,3, 8,7,5,3, 8,9,8,3, + 8,9,9,3, 5,0,0,3, 5,0,1,3, 5,0,2,3, 5,0,3,3, 5,0,4,3, 5,0,5,3, 5,0,6,3, 5,0,7,3, + 5,0,8,3, 5,0,9,3, 5,8,0,3, 5,8,1,3, 9,0,4,3, 9,0,5,3, 9,8,4,3, 9,8,5,3, 5,1,0,3, + 5,1,1,3, 5,1,2,3, 5,1,3,3, 5,1,4,3, 5,1,5,3, 5,1,6,3, 5,1,7,3, 5,1,8,3, 5,1,9,3, + 5,9,0,3, 5,9,1,3, 9,1,4,3, 9,1,5,3, 9,9,4,3, 9,9,5,3, 5,2,0,3, 5,2,1,3, 5,2,2,3, + 5,2,3,3, 5,2,4,3, 5,2,5,3, 5,2,6,3, 5,2,7,3, 5,2,8,3, 5,2,9,3, 5,8,2,3, 5,8,3,3, + 9,2,4,3, 9,2,5,3, 9,4,8,3, 9,4,9,3, 5,3,0,3, 5,3,1,3, 5,3,2,3, 5,3,3,3, 5,3,4,3, + 5,3,5,3, 5,3,6,3, 5,3,7,3, 5,3,8,3, 5,3,9,3, 5,9,2,3, 5,9,3,3, 9,3,4,3, 9,3,5,3, + 9,5,8,3, 9,5,9,3, 5,4,0,3, 5,4,1,3, 5,4,2,3, 5,4,3,3, 5,4,4,3, 5,4,5,3, 5,4,6,3, + 5,4,7,3, 5,4,8,3, 5,4,9,3, 5,8,4,3, 5,8,5,3, 9,4,4,3, 9,4,5,3, 5,8,8,3, 5,8,9,3, + 5,5,0,3, 5,5,1,3, 5,5,2,3, 5,5,3,3, 5,5,4,3, 5,5,5,3, 5,5,6,3, 5,5,7,3, 5,5,8,3, + 5,5,9,3, 5,9,4,3, 5,9,5,3, 9,5,4,3, 9,5,5,3, 5,9,8,3, 5,9,9,3, 5,6,0,3, 5,6,1,3, + 5,6,2,3, 5,6,3,3, 5,6,4,3, 5,6,5,3, 5,6,6,3, 5,6,7,3, 5,6,8,3, 5,6,9,3, 5,8,6,3, + 5,8,7,3, 9,6,4,3, 9,6,5,3, 9,8,8,3, 9,8,9,3, 5,7,0,3, 5,7,1,3, 5,7,2,3, 5,7,3,3, + 5,7,4,3, 5,7,5,3, 5,7,6,3, 5,7,7,3, 5,7,8,3, 5,7,9,3, 5,9,6,3, 5,9,7,3, 9,7,4,3, + 9,7,5,3, 9,9,8,3, 9,9,9,3, 6,0,0,3, 6,0,1,3, 6,0,2,3, 6,0,3,3, 6,0,4,3, 6,0,5,3, + 6,0,6,3, 6,0,7,3, 6,0,8,3, 6,0,9,3, 6,8,0,3, 6,8,1,3, 8,0,6,3, 8,0,7,3, 8,8,6,3, + 8,8,7,3, 6,1,0,3, 6,1,1,3, 6,1,2,3, 6,1,3,3, 6,1,4,3, 6,1,5,3, 6,1,6,3, 6,1,7,3, + 6,1,8,3, 6,1,9,3, 6,9,0,3, 6,9,1,3, 8,1,6,3, 8,1,7,3, 8,9,6,3, 8,9,7,3, 6,2,0,3, + 6,2,1,3, 6,2,2,3, 6,2,3,3, 6,2,4,3, 6,2,5,3, 6,2,6,3, 6,2,7,3, 6,2,8,3, 6,2,9,3, + 6,8,2,3, 6,8,3,3, 8,2,6,3, 8,2,7,3, 8,6,8,3, 8,6,9,3, 6,3,0,3, 6,3,1,3, 6,3,2,3, + 6,3,3,3, 6,3,4,3, 6,3,5,3, 6,3,6,3, 6,3,7,3, 6,3,8,3, 6,3,9,3, 6,9,2,3, 6,9,3,3, + 8,3,6,3, 8,3,7,3, 8,7,8,3, 8,7,9,3, 6,4,0,3, 6,4,1,3, 6,4,2,3, 6,4,3,3, 6,4,4,3, + 6,4,5,3, 6,4,6,3, 6,4,7,3, 6,4,8,3, 6,4,9,3, 6,8,4,3, 6,8,5,3, 8,4,6,3, 8,4,7,3, + 6,8,8,3, 6,8,9,3, 6,5,0,3, 6,5,1,3, 6,5,2,3, 6,5,3,3, 6,5,4,3, 6,5,5,3, 6,5,6,3, + 6,5,7,3, 6,5,8,3, 6,5,9,3, 6,9,4,3, 6,9,5,3, 8,5,6,3, 8,5,7,3, 6,9,8,3, 6,9,9,3, + 6,6,0,3, 6,6,1,3, 6,6,2,3, 6,6,3,3, 6,6,4,3, 6,6,5,3, 6,6,6,3, 6,6,7,3, 6,6,8,3, + 6,6,9,3, 6,8,6,3, 6,8,7,3, 8,6,6,3, 8,6,7,3, 8,8,8,3, 8,8,9,3, 6,7,0,3, 6,7,1,3, + 6,7,2,3, 6,7,3,3, 6,7,4,3, 6,7,5,3, 6,7,6,3, 6,7,7,3, 6,7,8,3, 6,7,9,3, 6,9,6,3, + 6,9,7,3, 8,7,6,3, 8,7,7,3, 8,9,8,3, 8,9,9,3, 7,0,0,3, 7,0,1,3, 7,0,2,3, 7,0,3,3, + 7,0,4,3, 7,0,5,3, 7,0,6,3, 7,0,7,3, 7,0,8,3, 7,0,9,3, 7,8,0,3, 7,8,1,3, 9,0,6,3, + 9,0,7,3, 9,8,6,3, 9,8,7,3, 7,1,0,3, 7,1,1,3, 7,1,2,3, 7,1,3,3, 7,1,4,3, 7,1,5,3, + 7,1,6,3, 7,1,7,3, 7,1,8,3, 7,1,9,3, 7,9,0,3, 7,9,1,3, 9,1,6,3, 9,1,7,3, 9,9,6,3, + 9,9,7,3, 7,2,0,3, 7,2,1,3, 7,2,2,3, 7,2,3,3, 7,2,4,3, 7,2,5,3, 7,2,6,3, 7,2,7,3, + 7,2,8,3, 7,2,9,3, 7,8,2,3, 7,8,3,3, 9,2,6,3, 9,2,7,3, 9,6,8,3, 9,6,9,3, 7,3,0,3, + 7,3,1,3, 7,3,2,3, 7,3,3,3, 7,3,4,3, 7,3,5,3, 7,3,6,3, 7,3,7,3, 7,3,8,3, 7,3,9,3, + 7,9,2,3, 7,9,3,3, 9,3,6,3, 9,3,7,3, 9,7,8,3, 9,7,9,3, 7,4,0,3, 7,4,1,3, 7,4,2,3, + 7,4,3,3, 7,4,4,3, 7,4,5,3, 7,4,6,3, 7,4,7,3, 7,4,8,3, 7,4,9,3, 7,8,4,3, 7,8,5,3, + 9,4,6,3, 9,4,7,3, 7,8,8,3, 7,8,9,3, 7,5,0,3, 7,5,1,3, 7,5,2,3, 7,5,3,3, 7,5,4,3, + 7,5,5,3, 7,5,6,3, 7,5,7,3, 7,5,8,3, 7,5,9,3, 7,9,4,3, 7,9,5,3, 9,5,6,3, 9,5,7,3, + 7,9,8,3, 7,9,9,3, 7,6,0,3, 7,6,1,3, 7,6,2,3, 7,6,3,3, 7,6,4,3, 7,6,5,3, 7,6,6,3, + 7,6,7,3, 7,6,8,3, 7,6,9,3, 7,8,6,3, 7,8,7,3, 9,6,6,3, 9,6,7,3, 9,8,8,3, 9,8,9,3, + 7,7,0,3, 7,7,1,3, 7,7,2,3, 7,7,3,3, 7,7,4,3, 7,7,5,3, 7,7,6,3, 7,7,7,3, 7,7,8,3, + 7,7,9,3, 7,9,6,3, 7,9,7,3, 9,7,6,3, 9,7,7,3, 9,9,8,3, 9,9,9,3}; +#endif + +#if defined(DEC_BIN2BCD8) && DEC_BIN2BCD8==1 && !defined(DECBIN2BCD8) +#define DECBIN2BCD8 + +const uint8_t BIN2BCD8[4000]={ + 0,0,0,0, 0,0,1,1, 0,0,2,1, 0,0,3,1, 0,0,4,1, 0,0,5,1, 0,0,6,1, 0,0,7,1, 0,0,8,1, + 0,0,9,1, 0,1,0,2, 0,1,1,2, 0,1,2,2, 0,1,3,2, 0,1,4,2, 0,1,5,2, 0,1,6,2, 0,1,7,2, + 0,1,8,2, 0,1,9,2, 0,2,0,2, 0,2,1,2, 0,2,2,2, 0,2,3,2, 0,2,4,2, 0,2,5,2, 0,2,6,2, + 0,2,7,2, 0,2,8,2, 0,2,9,2, 0,3,0,2, 0,3,1,2, 0,3,2,2, 0,3,3,2, 0,3,4,2, 0,3,5,2, + 0,3,6,2, 0,3,7,2, 0,3,8,2, 0,3,9,2, 0,4,0,2, 0,4,1,2, 0,4,2,2, 0,4,3,2, 0,4,4,2, + 0,4,5,2, 0,4,6,2, 0,4,7,2, 0,4,8,2, 0,4,9,2, 0,5,0,2, 0,5,1,2, 0,5,2,2, 0,5,3,2, + 0,5,4,2, 0,5,5,2, 0,5,6,2, 0,5,7,2, 0,5,8,2, 0,5,9,2, 0,6,0,2, 0,6,1,2, 0,6,2,2, + 0,6,3,2, 0,6,4,2, 0,6,5,2, 0,6,6,2, 0,6,7,2, 0,6,8,2, 0,6,9,2, 0,7,0,2, 0,7,1,2, + 0,7,2,2, 0,7,3,2, 0,7,4,2, 0,7,5,2, 0,7,6,2, 0,7,7,2, 0,7,8,2, 0,7,9,2, 0,8,0,2, + 0,8,1,2, 0,8,2,2, 0,8,3,2, 0,8,4,2, 0,8,5,2, 0,8,6,2, 0,8,7,2, 0,8,8,2, 0,8,9,2, + 0,9,0,2, 0,9,1,2, 0,9,2,2, 0,9,3,2, 0,9,4,2, 0,9,5,2, 0,9,6,2, 0,9,7,2, 0,9,8,2, + 0,9,9,2, 1,0,0,3, 1,0,1,3, 1,0,2,3, 1,0,3,3, 1,0,4,3, 1,0,5,3, 1,0,6,3, 1,0,7,3, + 1,0,8,3, 1,0,9,3, 1,1,0,3, 1,1,1,3, 1,1,2,3, 1,1,3,3, 1,1,4,3, 1,1,5,3, 1,1,6,3, + 1,1,7,3, 1,1,8,3, 1,1,9,3, 1,2,0,3, 1,2,1,3, 1,2,2,3, 1,2,3,3, 1,2,4,3, 1,2,5,3, + 1,2,6,3, 1,2,7,3, 1,2,8,3, 1,2,9,3, 1,3,0,3, 1,3,1,3, 1,3,2,3, 1,3,3,3, 1,3,4,3, + 1,3,5,3, 1,3,6,3, 1,3,7,3, 1,3,8,3, 1,3,9,3, 1,4,0,3, 1,4,1,3, 1,4,2,3, 1,4,3,3, + 1,4,4,3, 1,4,5,3, 1,4,6,3, 1,4,7,3, 1,4,8,3, 1,4,9,3, 1,5,0,3, 1,5,1,3, 1,5,2,3, + 1,5,3,3, 1,5,4,3, 1,5,5,3, 1,5,6,3, 1,5,7,3, 1,5,8,3, 1,5,9,3, 1,6,0,3, 1,6,1,3, + 1,6,2,3, 1,6,3,3, 1,6,4,3, 1,6,5,3, 1,6,6,3, 1,6,7,3, 1,6,8,3, 1,6,9,3, 1,7,0,3, + 1,7,1,3, 1,7,2,3, 1,7,3,3, 1,7,4,3, 1,7,5,3, 1,7,6,3, 1,7,7,3, 1,7,8,3, 1,7,9,3, + 1,8,0,3, 1,8,1,3, 1,8,2,3, 1,8,3,3, 1,8,4,3, 1,8,5,3, 1,8,6,3, 1,8,7,3, 1,8,8,3, + 1,8,9,3, 1,9,0,3, 1,9,1,3, 1,9,2,3, 1,9,3,3, 1,9,4,3, 1,9,5,3, 1,9,6,3, 1,9,7,3, + 1,9,8,3, 1,9,9,3, 2,0,0,3, 2,0,1,3, 2,0,2,3, 2,0,3,3, 2,0,4,3, 2,0,5,3, 2,0,6,3, + 2,0,7,3, 2,0,8,3, 2,0,9,3, 2,1,0,3, 2,1,1,3, 2,1,2,3, 2,1,3,3, 2,1,4,3, 2,1,5,3, + 2,1,6,3, 2,1,7,3, 2,1,8,3, 2,1,9,3, 2,2,0,3, 2,2,1,3, 2,2,2,3, 2,2,3,3, 2,2,4,3, + 2,2,5,3, 2,2,6,3, 2,2,7,3, 2,2,8,3, 2,2,9,3, 2,3,0,3, 2,3,1,3, 2,3,2,3, 2,3,3,3, + 2,3,4,3, 2,3,5,3, 2,3,6,3, 2,3,7,3, 2,3,8,3, 2,3,9,3, 2,4,0,3, 2,4,1,3, 2,4,2,3, + 2,4,3,3, 2,4,4,3, 2,4,5,3, 2,4,6,3, 2,4,7,3, 2,4,8,3, 2,4,9,3, 2,5,0,3, 2,5,1,3, + 2,5,2,3, 2,5,3,3, 2,5,4,3, 2,5,5,3, 2,5,6,3, 2,5,7,3, 2,5,8,3, 2,5,9,3, 2,6,0,3, + 2,6,1,3, 2,6,2,3, 2,6,3,3, 2,6,4,3, 2,6,5,3, 2,6,6,3, 2,6,7,3, 2,6,8,3, 2,6,9,3, + 2,7,0,3, 2,7,1,3, 2,7,2,3, 2,7,3,3, 2,7,4,3, 2,7,5,3, 2,7,6,3, 2,7,7,3, 2,7,8,3, + 2,7,9,3, 2,8,0,3, 2,8,1,3, 2,8,2,3, 2,8,3,3, 2,8,4,3, 2,8,5,3, 2,8,6,3, 2,8,7,3, + 2,8,8,3, 2,8,9,3, 2,9,0,3, 2,9,1,3, 2,9,2,3, 2,9,3,3, 2,9,4,3, 2,9,5,3, 2,9,6,3, + 2,9,7,3, 2,9,8,3, 2,9,9,3, 3,0,0,3, 3,0,1,3, 3,0,2,3, 3,0,3,3, 3,0,4,3, 3,0,5,3, + 3,0,6,3, 3,0,7,3, 3,0,8,3, 3,0,9,3, 3,1,0,3, 3,1,1,3, 3,1,2,3, 3,1,3,3, 3,1,4,3, + 3,1,5,3, 3,1,6,3, 3,1,7,3, 3,1,8,3, 3,1,9,3, 3,2,0,3, 3,2,1,3, 3,2,2,3, 3,2,3,3, + 3,2,4,3, 3,2,5,3, 3,2,6,3, 3,2,7,3, 3,2,8,3, 3,2,9,3, 3,3,0,3, 3,3,1,3, 3,3,2,3, + 3,3,3,3, 3,3,4,3, 3,3,5,3, 3,3,6,3, 3,3,7,3, 3,3,8,3, 3,3,9,3, 3,4,0,3, 3,4,1,3, + 3,4,2,3, 3,4,3,3, 3,4,4,3, 3,4,5,3, 3,4,6,3, 3,4,7,3, 3,4,8,3, 3,4,9,3, 3,5,0,3, + 3,5,1,3, 3,5,2,3, 3,5,3,3, 3,5,4,3, 3,5,5,3, 3,5,6,3, 3,5,7,3, 3,5,8,3, 3,5,9,3, + 3,6,0,3, 3,6,1,3, 3,6,2,3, 3,6,3,3, 3,6,4,3, 3,6,5,3, 3,6,6,3, 3,6,7,3, 3,6,8,3, + 3,6,9,3, 3,7,0,3, 3,7,1,3, 3,7,2,3, 3,7,3,3, 3,7,4,3, 3,7,5,3, 3,7,6,3, 3,7,7,3, + 3,7,8,3, 3,7,9,3, 3,8,0,3, 3,8,1,3, 3,8,2,3, 3,8,3,3, 3,8,4,3, 3,8,5,3, 3,8,6,3, + 3,8,7,3, 3,8,8,3, 3,8,9,3, 3,9,0,3, 3,9,1,3, 3,9,2,3, 3,9,3,3, 3,9,4,3, 3,9,5,3, + 3,9,6,3, 3,9,7,3, 3,9,8,3, 3,9,9,3, 4,0,0,3, 4,0,1,3, 4,0,2,3, 4,0,3,3, 4,0,4,3, + 4,0,5,3, 4,0,6,3, 4,0,7,3, 4,0,8,3, 4,0,9,3, 4,1,0,3, 4,1,1,3, 4,1,2,3, 4,1,3,3, + 4,1,4,3, 4,1,5,3, 4,1,6,3, 4,1,7,3, 4,1,8,3, 4,1,9,3, 4,2,0,3, 4,2,1,3, 4,2,2,3, + 4,2,3,3, 4,2,4,3, 4,2,5,3, 4,2,6,3, 4,2,7,3, 4,2,8,3, 4,2,9,3, 4,3,0,3, 4,3,1,3, + 4,3,2,3, 4,3,3,3, 4,3,4,3, 4,3,5,3, 4,3,6,3, 4,3,7,3, 4,3,8,3, 4,3,9,3, 4,4,0,3, + 4,4,1,3, 4,4,2,3, 4,4,3,3, 4,4,4,3, 4,4,5,3, 4,4,6,3, 4,4,7,3, 4,4,8,3, 4,4,9,3, + 4,5,0,3, 4,5,1,3, 4,5,2,3, 4,5,3,3, 4,5,4,3, 4,5,5,3, 4,5,6,3, 4,5,7,3, 4,5,8,3, + 4,5,9,3, 4,6,0,3, 4,6,1,3, 4,6,2,3, 4,6,3,3, 4,6,4,3, 4,6,5,3, 4,6,6,3, 4,6,7,3, + 4,6,8,3, 4,6,9,3, 4,7,0,3, 4,7,1,3, 4,7,2,3, 4,7,3,3, 4,7,4,3, 4,7,5,3, 4,7,6,3, + 4,7,7,3, 4,7,8,3, 4,7,9,3, 4,8,0,3, 4,8,1,3, 4,8,2,3, 4,8,3,3, 4,8,4,3, 4,8,5,3, + 4,8,6,3, 4,8,7,3, 4,8,8,3, 4,8,9,3, 4,9,0,3, 4,9,1,3, 4,9,2,3, 4,9,3,3, 4,9,4,3, + 4,9,5,3, 4,9,6,3, 4,9,7,3, 4,9,8,3, 4,9,9,3, 5,0,0,3, 5,0,1,3, 5,0,2,3, 5,0,3,3, + 5,0,4,3, 5,0,5,3, 5,0,6,3, 5,0,7,3, 5,0,8,3, 5,0,9,3, 5,1,0,3, 5,1,1,3, 5,1,2,3, + 5,1,3,3, 5,1,4,3, 5,1,5,3, 5,1,6,3, 5,1,7,3, 5,1,8,3, 5,1,9,3, 5,2,0,3, 5,2,1,3, + 5,2,2,3, 5,2,3,3, 5,2,4,3, 5,2,5,3, 5,2,6,3, 5,2,7,3, 5,2,8,3, 5,2,9,3, 5,3,0,3, + 5,3,1,3, 5,3,2,3, 5,3,3,3, 5,3,4,3, 5,3,5,3, 5,3,6,3, 5,3,7,3, 5,3,8,3, 5,3,9,3, + 5,4,0,3, 5,4,1,3, 5,4,2,3, 5,4,3,3, 5,4,4,3, 5,4,5,3, 5,4,6,3, 5,4,7,3, 5,4,8,3, + 5,4,9,3, 5,5,0,3, 5,5,1,3, 5,5,2,3, 5,5,3,3, 5,5,4,3, 5,5,5,3, 5,5,6,3, 5,5,7,3, + 5,5,8,3, 5,5,9,3, 5,6,0,3, 5,6,1,3, 5,6,2,3, 5,6,3,3, 5,6,4,3, 5,6,5,3, 5,6,6,3, + 5,6,7,3, 5,6,8,3, 5,6,9,3, 5,7,0,3, 5,7,1,3, 5,7,2,3, 5,7,3,3, 5,7,4,3, 5,7,5,3, + 5,7,6,3, 5,7,7,3, 5,7,8,3, 5,7,9,3, 5,8,0,3, 5,8,1,3, 5,8,2,3, 5,8,3,3, 5,8,4,3, + 5,8,5,3, 5,8,6,3, 5,8,7,3, 5,8,8,3, 5,8,9,3, 5,9,0,3, 5,9,1,3, 5,9,2,3, 5,9,3,3, + 5,9,4,3, 5,9,5,3, 5,9,6,3, 5,9,7,3, 5,9,8,3, 5,9,9,3, 6,0,0,3, 6,0,1,3, 6,0,2,3, + 6,0,3,3, 6,0,4,3, 6,0,5,3, 6,0,6,3, 6,0,7,3, 6,0,8,3, 6,0,9,3, 6,1,0,3, 6,1,1,3, + 6,1,2,3, 6,1,3,3, 6,1,4,3, 6,1,5,3, 6,1,6,3, 6,1,7,3, 6,1,8,3, 6,1,9,3, 6,2,0,3, + 6,2,1,3, 6,2,2,3, 6,2,3,3, 6,2,4,3, 6,2,5,3, 6,2,6,3, 6,2,7,3, 6,2,8,3, 6,2,9,3, + 6,3,0,3, 6,3,1,3, 6,3,2,3, 6,3,3,3, 6,3,4,3, 6,3,5,3, 6,3,6,3, 6,3,7,3, 6,3,8,3, + 6,3,9,3, 6,4,0,3, 6,4,1,3, 6,4,2,3, 6,4,3,3, 6,4,4,3, 6,4,5,3, 6,4,6,3, 6,4,7,3, + 6,4,8,3, 6,4,9,3, 6,5,0,3, 6,5,1,3, 6,5,2,3, 6,5,3,3, 6,5,4,3, 6,5,5,3, 6,5,6,3, + 6,5,7,3, 6,5,8,3, 6,5,9,3, 6,6,0,3, 6,6,1,3, 6,6,2,3, 6,6,3,3, 6,6,4,3, 6,6,5,3, + 6,6,6,3, 6,6,7,3, 6,6,8,3, 6,6,9,3, 6,7,0,3, 6,7,1,3, 6,7,2,3, 6,7,3,3, 6,7,4,3, + 6,7,5,3, 6,7,6,3, 6,7,7,3, 6,7,8,3, 6,7,9,3, 6,8,0,3, 6,8,1,3, 6,8,2,3, 6,8,3,3, + 6,8,4,3, 6,8,5,3, 6,8,6,3, 6,8,7,3, 6,8,8,3, 6,8,9,3, 6,9,0,3, 6,9,1,3, 6,9,2,3, + 6,9,3,3, 6,9,4,3, 6,9,5,3, 6,9,6,3, 6,9,7,3, 6,9,8,3, 6,9,9,3, 7,0,0,3, 7,0,1,3, + 7,0,2,3, 7,0,3,3, 7,0,4,3, 7,0,5,3, 7,0,6,3, 7,0,7,3, 7,0,8,3, 7,0,9,3, 7,1,0,3, + 7,1,1,3, 7,1,2,3, 7,1,3,3, 7,1,4,3, 7,1,5,3, 7,1,6,3, 7,1,7,3, 7,1,8,3, 7,1,9,3, + 7,2,0,3, 7,2,1,3, 7,2,2,3, 7,2,3,3, 7,2,4,3, 7,2,5,3, 7,2,6,3, 7,2,7,3, 7,2,8,3, + 7,2,9,3, 7,3,0,3, 7,3,1,3, 7,3,2,3, 7,3,3,3, 7,3,4,3, 7,3,5,3, 7,3,6,3, 7,3,7,3, + 7,3,8,3, 7,3,9,3, 7,4,0,3, 7,4,1,3, 7,4,2,3, 7,4,3,3, 7,4,4,3, 7,4,5,3, 7,4,6,3, + 7,4,7,3, 7,4,8,3, 7,4,9,3, 7,5,0,3, 7,5,1,3, 7,5,2,3, 7,5,3,3, 7,5,4,3, 7,5,5,3, + 7,5,6,3, 7,5,7,3, 7,5,8,3, 7,5,9,3, 7,6,0,3, 7,6,1,3, 7,6,2,3, 7,6,3,3, 7,6,4,3, + 7,6,5,3, 7,6,6,3, 7,6,7,3, 7,6,8,3, 7,6,9,3, 7,7,0,3, 7,7,1,3, 7,7,2,3, 7,7,3,3, + 7,7,4,3, 7,7,5,3, 7,7,6,3, 7,7,7,3, 7,7,8,3, 7,7,9,3, 7,8,0,3, 7,8,1,3, 7,8,2,3, + 7,8,3,3, 7,8,4,3, 7,8,5,3, 7,8,6,3, 7,8,7,3, 7,8,8,3, 7,8,9,3, 7,9,0,3, 7,9,1,3, + 7,9,2,3, 7,9,3,3, 7,9,4,3, 7,9,5,3, 7,9,6,3, 7,9,7,3, 7,9,8,3, 7,9,9,3, 8,0,0,3, + 8,0,1,3, 8,0,2,3, 8,0,3,3, 8,0,4,3, 8,0,5,3, 8,0,6,3, 8,0,7,3, 8,0,8,3, 8,0,9,3, + 8,1,0,3, 8,1,1,3, 8,1,2,3, 8,1,3,3, 8,1,4,3, 8,1,5,3, 8,1,6,3, 8,1,7,3, 8,1,8,3, + 8,1,9,3, 8,2,0,3, 8,2,1,3, 8,2,2,3, 8,2,3,3, 8,2,4,3, 8,2,5,3, 8,2,6,3, 8,2,7,3, + 8,2,8,3, 8,2,9,3, 8,3,0,3, 8,3,1,3, 8,3,2,3, 8,3,3,3, 8,3,4,3, 8,3,5,3, 8,3,6,3, + 8,3,7,3, 8,3,8,3, 8,3,9,3, 8,4,0,3, 8,4,1,3, 8,4,2,3, 8,4,3,3, 8,4,4,3, 8,4,5,3, + 8,4,6,3, 8,4,7,3, 8,4,8,3, 8,4,9,3, 8,5,0,3, 8,5,1,3, 8,5,2,3, 8,5,3,3, 8,5,4,3, + 8,5,5,3, 8,5,6,3, 8,5,7,3, 8,5,8,3, 8,5,9,3, 8,6,0,3, 8,6,1,3, 8,6,2,3, 8,6,3,3, + 8,6,4,3, 8,6,5,3, 8,6,6,3, 8,6,7,3, 8,6,8,3, 8,6,9,3, 8,7,0,3, 8,7,1,3, 8,7,2,3, + 8,7,3,3, 8,7,4,3, 8,7,5,3, 8,7,6,3, 8,7,7,3, 8,7,8,3, 8,7,9,3, 8,8,0,3, 8,8,1,3, + 8,8,2,3, 8,8,3,3, 8,8,4,3, 8,8,5,3, 8,8,6,3, 8,8,7,3, 8,8,8,3, 8,8,9,3, 8,9,0,3, + 8,9,1,3, 8,9,2,3, 8,9,3,3, 8,9,4,3, 8,9,5,3, 8,9,6,3, 8,9,7,3, 8,9,8,3, 8,9,9,3, + 9,0,0,3, 9,0,1,3, 9,0,2,3, 9,0,3,3, 9,0,4,3, 9,0,5,3, 9,0,6,3, 9,0,7,3, 9,0,8,3, + 9,0,9,3, 9,1,0,3, 9,1,1,3, 9,1,2,3, 9,1,3,3, 9,1,4,3, 9,1,5,3, 9,1,6,3, 9,1,7,3, + 9,1,8,3, 9,1,9,3, 9,2,0,3, 9,2,1,3, 9,2,2,3, 9,2,3,3, 9,2,4,3, 9,2,5,3, 9,2,6,3, + 9,2,7,3, 9,2,8,3, 9,2,9,3, 9,3,0,3, 9,3,1,3, 9,3,2,3, 9,3,3,3, 9,3,4,3, 9,3,5,3, + 9,3,6,3, 9,3,7,3, 9,3,8,3, 9,3,9,3, 9,4,0,3, 9,4,1,3, 9,4,2,3, 9,4,3,3, 9,4,4,3, + 9,4,5,3, 9,4,6,3, 9,4,7,3, 9,4,8,3, 9,4,9,3, 9,5,0,3, 9,5,1,3, 9,5,2,3, 9,5,3,3, + 9,5,4,3, 9,5,5,3, 9,5,6,3, 9,5,7,3, 9,5,8,3, 9,5,9,3, 9,6,0,3, 9,6,1,3, 9,6,2,3, + 9,6,3,3, 9,6,4,3, 9,6,5,3, 9,6,6,3, 9,6,7,3, 9,6,8,3, 9,6,9,3, 9,7,0,3, 9,7,1,3, + 9,7,2,3, 9,7,3,3, 9,7,4,3, 9,7,5,3, 9,7,6,3, 9,7,7,3, 9,7,8,3, 9,7,9,3, 9,8,0,3, + 9,8,1,3, 9,8,2,3, 9,8,3,3, 9,8,4,3, 9,8,5,3, 9,8,6,3, 9,8,7,3, 9,8,8,3, 9,8,9,3, + 9,9,0,3, 9,9,1,3, 9,9,2,3, 9,9,3,3, 9,9,4,3, 9,9,5,3, 9,9,6,3, 9,9,7,3, 9,9,8,3, + 9,9,9,3}; +#endif diff --git a/qemu/include/libdecnumber/decNumber.h b/qemu/include/libdecnumber/decNumber.h new file mode 100644 index 00000000..aa115fed --- /dev/null +++ b/qemu/include/libdecnumber/decNumber.h @@ -0,0 +1,201 @@ +/* Decimal number arithmetic module header for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal Number arithmetic module header */ +/* ------------------------------------------------------------------ */ + +#ifndef DECNUMBER_H +#define DECNUMBER_H + + #define DECNAME "decNumber" /* Short name */ + #define DECFULLNAME "Decimal Number Module" /* Verbose name */ + #define DECAUTHOR "Mike Cowlishaw" /* Who to blame */ + + #include "libdecnumber/decContext.h" + + /* Bit settings for decNumber.bits */ + #define DECNEG 0x80 /* Sign; 1=negative, 0=positive or zero */ + #define DECINF 0x40 /* 1=Infinity */ + #define DECNAN 0x20 /* 1=NaN */ + #define DECSNAN 0x10 /* 1=sNaN */ + /* The remaining bits are reserved; they must be 0 */ + #define DECSPECIAL (DECINF|DECNAN|DECSNAN) /* any special value */ + + /* Define the decNumber data structure. The size and shape of the */ + /* units array in the structure is determined by the following */ + /* constant. This must not be changed without recompiling the */ + /* decNumber library modules. */ + + #define DECDPUN 3 /* DECimal Digits Per UNit [must be >0 */ + /* and <10; 3 or powers of 2 are best]. */ + + /* DECNUMDIGITS is the default number of digits that can be held in */ + /* the structure. If undefined, 1 is assumed and it is assumed */ + /* that the structure will be immediately followed by extra space, */ + /* as required. DECNUMDIGITS is always >0. */ + #if !defined(DECNUMDIGITS) + #define DECNUMDIGITS 1 + #endif + + /* The size (integer data type) of each unit is determined by the */ + /* number of digits it will hold. */ + #if DECDPUN<=2 + #define decNumberUnit uint8_t + #elif DECDPUN<=4 + #define decNumberUnit uint16_t + #else + #define decNumberUnit uint32_t + #endif + /* The number of units needed is ceil(DECNUMDIGITS/DECDPUN) */ + #define DECNUMUNITS ((DECNUMDIGITS+DECDPUN-1)/DECDPUN) + + /* The data structure... */ + typedef struct { + int32_t digits; /* Count of digits in the coefficient; >0 */ + int32_t exponent; /* Unadjusted exponent, unbiased, in */ + /* range: -1999999997 through 999999999 */ + uint8_t bits; /* Indicator bits (see above) */ + /* Coefficient, from least significant unit */ + decNumberUnit lsu[DECNUMUNITS]; + } decNumber; + + /* Notes: */ + /* 1. If digits is > DECDPUN then there will one or more */ + /* decNumberUnits immediately following the first element of lsu.*/ + /* These contain the remaining (more significant) digits of the */ + /* number, and may be in the lsu array, or may be guaranteed by */ + /* some other mechanism (such as being contained in another */ + /* structure, or being overlaid on dynamically allocated */ + /* storage). */ + /* */ + /* Each integer of the coefficient (except potentially the last) */ + /* contains DECDPUN digits (e.g., a value in the range 0 through */ + /* 99999999 if DECDPUN is 8, or 0 through 999 if DECDPUN is 3). */ + /* */ + /* 2. A decNumber converted to a string may need up to digits+14 */ + /* characters. The worst cases (non-exponential and exponential */ + /* formats) are -0.00000{9...}# and -9.{9...}E+999999999# */ + /* (where # is '\0') */ + + + /* ---------------------------------------------------------------- */ + /* decNumber public functions and macros */ + /* ---------------------------------------------------------------- */ + + + /* Conversions */ + decNumber * decNumberFromInt32(decNumber *, int32_t); + decNumber * decNumberFromUInt32(decNumber *, uint32_t); + decNumber *decNumberFromInt64(decNumber *, int64_t); + decNumber *decNumberFromUInt64(decNumber *, uint64_t); + decNumber * decNumberFromString(decNumber *, const char *, decContext *); + char * decNumberToString(const decNumber *, char *); + char * decNumberToEngString(const decNumber *, char *); + uint32_t decNumberToUInt32(const decNumber *, decContext *); + int32_t decNumberToInt32(const decNumber *, decContext *); + int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set); + uint8_t * decNumberGetBCD(const decNumber *, uint8_t *); + decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t); + + /* Operators and elementary functions */ + decNumber * decNumberAbs(decNumber *, const decNumber *, decContext *); + decNumber * decNumberAdd(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberAnd(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberCompare(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberCompareSignal(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberCompareTotal(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberCompareTotalMag(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberDivide(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberDivideInteger(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberExp(decNumber *, const decNumber *, decContext *); + decNumber * decNumberFMA(decNumber *, const decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberInvert(decNumber *, const decNumber *, decContext *); + decNumber * decNumberLn(decNumber *, const decNumber *, decContext *); + decNumber * decNumberLogB(decNumber *, const decNumber *, decContext *); + decNumber * decNumberLog10(decNumber *, const decNumber *, decContext *); + decNumber * decNumberMax(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberMaxMag(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberMin(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberMinMag(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberMinus(decNumber *, const decNumber *, decContext *); + decNumber * decNumberMultiply(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberNormalize(decNumber *, const decNumber *, decContext *); + decNumber * decNumberOr(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberPlus(decNumber *, const decNumber *, decContext *); + decNumber * decNumberPower(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberQuantize(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberReduce(decNumber *, const decNumber *, decContext *); + decNumber * decNumberRemainder(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberRemainderNear(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberRescale(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberRotate(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberSameQuantum(decNumber *, const decNumber *, const decNumber *); + decNumber * decNumberScaleB(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberShift(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberSquareRoot(decNumber *, const decNumber *, decContext *); + decNumber * decNumberSubtract(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberToIntegralExact(decNumber *, const decNumber *, decContext *); + decNumber * decNumberToIntegralValue(decNumber *, const decNumber *, decContext *); + decNumber * decNumberXor(decNumber *, const decNumber *, const decNumber *, decContext *); + + /* Utilities */ + enum decClass decNumberClass(const decNumber *, decContext *); + const char * decNumberClassToString(enum decClass); + decNumber * decNumberCopy(decNumber *, const decNumber *); + decNumber * decNumberCopyAbs(decNumber *, const decNumber *); + decNumber * decNumberCopyNegate(decNumber *, const decNumber *); + decNumber * decNumberCopySign(decNumber *, const decNumber *, const decNumber *); + decNumber * decNumberNextMinus(decNumber *, const decNumber *, decContext *); + decNumber * decNumberNextPlus(decNumber *, const decNumber *, decContext *); + decNumber * decNumberNextToward(decNumber *, const decNumber *, const decNumber *, decContext *); + decNumber * decNumberTrim(decNumber *); + const char * decNumberVersion(void); + decNumber * decNumberZero(decNumber *); + + /* Functions for testing decNumbers (normality depends on context) */ + int32_t decNumberIsNormal(const decNumber *, decContext *); + int32_t decNumberIsSubnormal(const decNumber *, decContext *); + + /* Macros for testing decNumber *dn */ + #define decNumberIsCanonical(dn) (1) /* All decNumbers are saintly */ + #define decNumberIsFinite(dn) (((dn)->bits&DECSPECIAL)==0) + #define decNumberIsInfinite(dn) (((dn)->bits&DECINF)!=0) + #define decNumberIsNaN(dn) (((dn)->bits&(DECNAN|DECSNAN))!=0) + #define decNumberIsNegative(dn) (((dn)->bits&DECNEG)!=0) + #define decNumberIsQNaN(dn) (((dn)->bits&(DECNAN))!=0) + #define decNumberIsSNaN(dn) (((dn)->bits&(DECSNAN))!=0) + #define decNumberIsSpecial(dn) (((dn)->bits&DECSPECIAL)!=0) + #define decNumberIsZero(dn) (*(dn)->lsu==0 \ + && (dn)->digits==1 \ + && (((dn)->bits&DECSPECIAL)==0)) + #define decNumberRadix(dn) (10) + +#endif diff --git a/qemu/include/libdecnumber/decNumberLocal.h b/qemu/include/libdecnumber/decNumberLocal.h new file mode 100644 index 00000000..4d53c077 --- /dev/null +++ b/qemu/include/libdecnumber/decNumberLocal.h @@ -0,0 +1,663 @@ +/* Local definitions for the decNumber C Library. + Copyright (C) 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* decNumber package local type, tuning, and macro definitions */ +/* ------------------------------------------------------------------ */ +/* This header file is included by all modules in the decNumber */ +/* library, and contains local type definitions, tuning parameters, */ +/* etc. It should not need to be used by application programs. */ +/* decNumber.h or one of decDouble (etc.) must be included first. */ +/* ------------------------------------------------------------------ */ + +#ifndef DECNUMBERLOCAL_H +#define DECNUMBERLOCAL_H + + #define DECVERSION "decNumber 3.53" /* Package Version [16 max.] */ + #define DECNLAUTHOR "Mike Cowlishaw" /* Who to blame */ + + #include "libdecnumber/dconfig.h" + #include "libdecnumber/decContext.h" + + /* Conditional code flag -- set this to match hardware platform */ + /* 1=little-endian, 0=big-endian */ + #if WORDS_BIGENDIAN + #define DECLITEND 0 + #else + #define DECLITEND 1 + #endif + + /* Conditional code flag -- set this to 1 for best performance */ + #define DECUSE64 1 /* 1=use int64s, 0=int32 & smaller only */ + + /* Conditional check flags -- set these to 0 for best performance */ + #define DECCHECK 0 /* 1 to enable robust checking */ + #define DECALLOC 0 /* 1 to enable memory accounting */ + #define DECTRACE 0 /* 1 to trace certain internals, etc. */ + + /* Tuning parameter for decNumber (arbitrary precision) module */ + #define DECBUFFER 36 /* Size basis for local buffers. This */ + /* should be a common maximum precision */ + /* rounded up to a multiple of 4; must */ + /* be zero or positive. */ + + /* ---------------------------------------------------------------- */ + /* Definitions for all modules (general-purpose) */ + /* ---------------------------------------------------------------- */ + + /* Local names for common types -- for safety, decNumber modules do */ + /* not use int or long directly. */ + #define Flag uint8_t + #define Byte int8_t + #define uByte uint8_t + #define Short int16_t + #define uShort uint16_t + #define Int int32_t + #define uInt uint32_t + #define Unit decNumberUnit + #if DECUSE64 + #define Long int64_t + #define uLong uint64_t + #endif + + /* Development-use definitions */ + typedef long int LI; /* for printf arguments only */ + #define DECNOINT 0 /* 1 to check no internal use of 'int' */ + #if DECNOINT + /* if these interfere with your C includes, do not set DECNOINT */ + #define int ? /* enable to ensure that plain C 'int' */ + #define long ?? /* .. or 'long' types are not used */ + #endif + + /* Shared lookup tables */ + extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */ + extern const uLong DECPOWERS[19]; /* powers of ten table */ + /* The following are included from decDPD.h */ + extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */ + extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */ + extern const uInt DPD2BINK[1024]; /* DPD -> 0-999000 */ + extern const uInt DPD2BINM[1024]; /* DPD -> 0-999000000 */ + extern const uByte DPD2BCD8[4096]; /* DPD -> ddd + len */ + extern const uByte BIN2BCD8[4000]; /* 0-999 -> ddd + len */ + extern const uShort BCD2DPD[2458]; /* 0-0x999 -> DPD (0x999=2457)*/ + + /* LONGMUL32HI -- set w=(u*v)>>32, where w, u, and v are uInts */ + /* (that is, sets w to be the high-order word of the 64-bit result; */ + /* the low-order word is simply u*v.) */ + /* This version is derived from Knuth via Hacker's Delight; */ + /* it seems to optimize better than some others tried */ + #define LONGMUL32HI(w, u, v) { \ + uInt u0, u1, v0, v1, w0, w1, w2, t; \ + u0=u & 0xffff; u1=u>>16; \ + v0=v & 0xffff; v1=v>>16; \ + w0=u0*v0; \ + t=u1*v0 + (w0>>16); \ + w1=t & 0xffff; w2=t>>16; \ + w1=u0*v1 + w1; \ + (w)=u1*v1 + w2 + (w1>>16);} + + /* ROUNDUP -- round an integer up to a multiple of n */ + #define ROUNDUP(i, n) ((((i)+(n)-1)/n)*n) + + /* ROUNDDOWN -- round an integer down to a multiple of n */ + #define ROUNDDOWN(i, n) (((i)/n)*n) + #define ROUNDDOWN4(i) ((i)&~3) /* special for n=4 */ + + /* References to multi-byte sequences under different sizes */ + /* Refer to a uInt from four bytes starting at a char* or uByte*, */ + /* etc. */ + #define UINTAT(b) (*((uInt *)(b))) + #define USHORTAT(b) (*((uShort *)(b))) + #define UBYTEAT(b) (*((uByte *)(b))) + + /* X10 and X100 -- multiply integer i by 10 or 100 */ + /* [shifts are usually faster than multiply; could be conditional] */ + #define X10(i) (((i)<<1)+((i)<<3)) + #define X100(i) (((i)<<2)+((i)<<5)+((i)<<6)) + + /* MAXI and MINI -- general max & min (not in ANSI) for integers */ + #define MAXI(x,y) ((x)<(y)?(y):(x)) + #define MINI(x,y) ((x)>(y)?(y):(x)) + + /* Useful constants */ + #define BILLION 1000000000 /* 10**9 */ + /* CHARMASK: 0x30303030 for ASCII/UTF8; 0xF0F0F0F0 for EBCDIC */ + #define CHARMASK ((((((((uInt)'0')<<8)+'0')<<8)+'0')<<8)+'0') + + + /* ---------------------------------------------------------------- */ + /* Definitions for arbitrary-precision modules (only valid after */ + /* decNumber.h has been included) */ + /* ---------------------------------------------------------------- */ + + /* Limits and constants */ + #define DECNUMMAXP 999999999 /* maximum precision code can handle */ + #define DECNUMMAXE 999999999 /* maximum adjusted exponent ditto */ + #define DECNUMMINE -999999999 /* minimum adjusted exponent ditto */ + #if (DECNUMMAXP != DEC_MAX_DIGITS) + #error Maximum digits mismatch + #endif + #if (DECNUMMAXE != DEC_MAX_EMAX) + #error Maximum exponent mismatch + #endif + #if (DECNUMMINE != DEC_MIN_EMIN) + #error Minimum exponent mismatch + #endif + + /* Set DECDPUNMAX -- the maximum integer that fits in DECDPUN */ + /* digits, and D2UTABLE -- the initializer for the D2U table */ + #if DECDPUN==1 + #define DECDPUNMAX 9 + #define D2UTABLE {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17, \ + 18,19,20,21,22,23,24,25,26,27,28,29,30,31,32, \ + 33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, \ + 48,49} + #elif DECDPUN==2 + #define DECDPUNMAX 99 + #define D2UTABLE {0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10, \ + 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18, \ + 18,19,19,20,20,21,21,22,22,23,23,24,24,25} + #elif DECDPUN==3 + #define DECDPUNMAX 999 + #define D2UTABLE {0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7, \ + 8,8,8,9,9,9,10,10,10,11,11,11,12,12,12,13,13, \ + 13,14,14,14,15,15,15,16,16,16,17} + #elif DECDPUN==4 + #define DECDPUNMAX 9999 + #define D2UTABLE {0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,6, \ + 6,6,6,7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,11, \ + 11,11,11,12,12,12,12,13} + #elif DECDPUN==5 + #define DECDPUNMAX 99999 + #define D2UTABLE {0,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5, \ + 5,5,5,5,6,6,6,6,6,7,7,7,7,7,8,8,8,8,8,9,9,9, \ + 9,9,10,10,10,10} + #elif DECDPUN==6 + #define DECDPUNMAX 999999 + #define D2UTABLE {0,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4, \ + 4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8, \ + 8,8,8,8,8,9} + #elif DECDPUN==7 + #define DECDPUNMAX 9999999 + #define D2UTABLE {0,1,1,1,1,1,1,1,2,2,2,2,2,2,2,3,3,3,3,3,3,3, \ + 4,4,4,4,4,4,4,5,5,5,5,5,5,5,6,6,6,6,6,6,6,7, \ + 7,7,7,7,7,7} + #elif DECDPUN==8 + #define DECDPUNMAX 99999999 + #define D2UTABLE {0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3, \ + 3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6, \ + 6,6,6,6,6,7} + #elif DECDPUN==9 + #define DECDPUNMAX 999999999 + #define D2UTABLE {0,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,3,3,3, \ + 3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5, \ + 5,5,6,6,6,6} + #elif defined(DECDPUN) + #error DECDPUN must be in the range 1-9 + #endif + + /* ----- Shared data (in decNumber.c) ----- */ + /* Public lookup table used by the D2U macro (see below) */ + #define DECMAXD2U 49 + extern const uByte d2utable[DECMAXD2U+1]; + + /* ----- Macros ----- */ + /* ISZERO -- return true if decNumber dn is a zero */ + /* [performance-critical in some situations] */ + #define ISZERO(dn) decNumberIsZero(dn) /* now just a local name */ + + /* D2U -- return the number of Units needed to hold d digits */ + /* (runtime version, with table lookaside for small d) */ + #if DECDPUN==8 + #define D2U(d) ((unsigned)((d)<=DECMAXD2U?d2utable[d]:((d)+7)>>3)) + #elif DECDPUN==4 + #define D2U(d) ((unsigned)((d)<=DECMAXD2U?d2utable[d]:((d)+3)>>2)) + #else + #define D2U(d) ((d)<=DECMAXD2U?d2utable[d]:((d)+DECDPUN-1)/DECDPUN) + #endif + /* SD2U -- static D2U macro (for compile-time calculation) */ + #define SD2U(d) (((d)+DECDPUN-1)/DECDPUN) + + /* MSUDIGITS -- returns digits in msu, from digits, calculated */ + /* using D2U */ + #define MSUDIGITS(d) ((d)-(D2U(d)-1)*DECDPUN) + + /* D2N -- return the number of decNumber structs that would be */ + /* needed to contain that number of digits (and the initial */ + /* decNumber struct) safely. Note that one Unit is included in the */ + /* initial structure. Used for allocating space that is aligned on */ + /* a decNumber struct boundary. */ + #define D2N(d) \ + ((((SD2U(d)-1)*sizeof(Unit))+sizeof(decNumber)*2-1)/sizeof(decNumber)) + + /* TODIGIT -- macro to remove the leading digit from the unsigned */ + /* integer u at column cut (counting from the right, LSD=0) and */ + /* place it as an ASCII character into the character pointed to by */ + /* c. Note that cut must be <= 9, and the maximum value for u is */ + /* 2,000,000,000 (as is needed for negative exponents of */ + /* subnormals). The unsigned integer pow is used as a temporary */ + /* variable. */ + #define TODIGIT(u, cut, c, pow) { \ + *(c)='0'; \ + pow=DECPOWERS[cut]*2; \ + if ((u)>pow) { \ + pow*=4; \ + if ((u)>=pow) {(u)-=pow; *(c)+=8;} \ + pow/=2; \ + if ((u)>=pow) {(u)-=pow; *(c)+=4;} \ + pow/=2; \ + } \ + if ((u)>=pow) {(u)-=pow; *(c)+=2;} \ + pow/=2; \ + if ((u)>=pow) {(u)-=pow; *(c)+=1;} \ + } + + /* ---------------------------------------------------------------- */ + /* Definitions for fixed-precision modules (only valid after */ + /* decSingle.h, decDouble.h, or decQuad.h has been included) */ + /* ---------------------------------------------------------------- */ + + /* bcdnum -- a structure describing a format-independent finite */ + /* number, whose coefficient is a string of bcd8 uBytes */ + typedef struct { + uByte *msd; /* -> most significant digit */ + uByte *lsd; /* -> least ditto */ + uInt sign; /* 0=positive, DECFLOAT_Sign=negative */ + Int exponent; /* Unadjusted signed exponent (q), or */ + /* DECFLOAT_NaN etc. for a special */ + } bcdnum; + + /* Test if exponent or bcdnum exponent must be a special, etc. */ + #define EXPISSPECIAL(exp) ((exp)>=DECFLOAT_MinSp) + #define EXPISINF(exp) (exp==DECFLOAT_Inf) + #define EXPISNAN(exp) (exp==DECFLOAT_qNaN || exp==DECFLOAT_sNaN) + #define NUMISSPECIAL(num) (EXPISSPECIAL((num)->exponent)) + + /* Refer to a 32-bit word or byte in a decFloat (df) by big-endian */ + /* (array) notation (the 0 word or byte contains the sign bit), */ + /* automatically adjusting for endianness; similarly address a word */ + /* in the next-wider format (decFloatWider, or dfw) */ + #define DECWORDS (DECBYTES/4) + #define DECWWORDS (DECWBYTES/4) + #if DECLITEND + #define DFWORD(df, off) ((df)->words[DECWORDS-1-(off)]) + #define DFBYTE(df, off) ((df)->bytes[DECBYTES-1-(off)]) + #define DFWWORD(dfw, off) ((dfw)->words[DECWWORDS-1-(off)]) + #else + #define DFWORD(df, off) ((df)->words[off]) + #define DFBYTE(df, off) ((df)->bytes[off]) + #define DFWWORD(dfw, off) ((dfw)->words[off]) + #endif + + /* Tests for sign or specials, directly on DECFLOATs */ + #define DFISSIGNED(df) (DFWORD(df, 0)&0x80000000) + #define DFISSPECIAL(df) ((DFWORD(df, 0)&0x78000000)==0x78000000) + #define DFISINF(df) ((DFWORD(df, 0)&0x7c000000)==0x78000000) + #define DFISNAN(df) ((DFWORD(df, 0)&0x7c000000)==0x7c000000) + #define DFISQNAN(df) ((DFWORD(df, 0)&0x7e000000)==0x7c000000) + #define DFISSNAN(df) ((DFWORD(df, 0)&0x7e000000)==0x7e000000) + + /* Shared lookup tables */ + extern const uInt DECCOMBMSD[64]; /* Combination field -> MSD */ + extern const uInt DECCOMBFROM[48]; /* exp+msd -> Combination */ + + /* Private generic (utility) routine */ + #if DECCHECK || DECTRACE + extern void decShowNum(const bcdnum *, const char *); + #endif + + /* Format-dependent macros and constants */ + #if defined(DECPMAX) + + /* Useful constants */ + #define DECPMAX9 (ROUNDUP(DECPMAX, 9)/9) /* 'Pmax' in 10**9s */ + /* Top words for a zero */ + #define SINGLEZERO 0x22500000 + #define DOUBLEZERO 0x22380000 + #define QUADZERO 0x22080000 + /* [ZEROWORD is defined to be one of these in the DFISZERO macro] */ + + /* Format-dependent common tests: */ + /* DFISZERO -- test for (any) zero */ + /* DFISCCZERO -- test for coefficient continuation being zero */ + /* DFISCC01 -- test for coefficient contains only 0s and 1s */ + /* DFISINT -- test for finite and exponent q=0 */ + /* DFISUINT01 -- test for sign=0, finite, exponent q=0, and */ + /* MSD=0 or 1 */ + /* ZEROWORD is also defined here. */ + /* In DFISZERO the first test checks the least-significant word */ + /* (most likely to be non-zero); the penultimate tests MSD and */ + /* DPDs in the signword, and the final test excludes specials and */ + /* MSD>7. DFISINT similarly has to allow for the two forms of */ + /* MSD codes. DFISUINT01 only has to allow for one form of MSD */ + /* code. */ + #if DECPMAX==7 + #define ZEROWORD SINGLEZERO + /* [test macros not needed except for Zero] */ + #define DFISZERO(df) ((DFWORD(df, 0)&0x1c0fffff)==0 \ + && (DFWORD(df, 0)&0x60000000)!=0x60000000) + #elif DECPMAX==16 + #define ZEROWORD DOUBLEZERO + #define DFISZERO(df) ((DFWORD(df, 1)==0 \ + && (DFWORD(df, 0)&0x1c03ffff)==0 \ + && (DFWORD(df, 0)&0x60000000)!=0x60000000)) + #define DFISINT(df) ((DFWORD(df, 0)&0x63fc0000)==0x22380000 \ + ||(DFWORD(df, 0)&0x7bfc0000)==0x6a380000) + #define DFISUINT01(df) ((DFWORD(df, 0)&0xfbfc0000)==0x22380000) + #define DFISCCZERO(df) (DFWORD(df, 1)==0 \ + && (DFWORD(df, 0)&0x0003ffff)==0) + #define DFISCC01(df) ((DFWORD(df, 0)&~0xfffc9124)==0 \ + && (DFWORD(df, 1)&~0x49124491)==0) + #elif DECPMAX==34 + #define ZEROWORD QUADZERO + #define DFISZERO(df) ((DFWORD(df, 3)==0 \ + && DFWORD(df, 2)==0 \ + && DFWORD(df, 1)==0 \ + && (DFWORD(df, 0)&0x1c003fff)==0 \ + && (DFWORD(df, 0)&0x60000000)!=0x60000000)) + #define DFISINT(df) ((DFWORD(df, 0)&0x63ffc000)==0x22080000 \ + ||(DFWORD(df, 0)&0x7bffc000)==0x6a080000) + #define DFISUINT01(df) ((DFWORD(df, 0)&0xfbffc000)==0x22080000) + #define DFISCCZERO(df) (DFWORD(df, 3)==0 \ + && DFWORD(df, 2)==0 \ + && DFWORD(df, 1)==0 \ + && (DFWORD(df, 0)&0x00003fff)==0) + + #define DFISCC01(df) ((DFWORD(df, 0)&~0xffffc912)==0 \ + && (DFWORD(df, 1)&~0x44912449)==0 \ + && (DFWORD(df, 2)&~0x12449124)==0 \ + && (DFWORD(df, 3)&~0x49124491)==0) + #endif + + /* Macros to test if a certain 10 bits of a uInt or pair of uInts */ + /* are a canonical declet [higher or lower bits are ignored]. */ + /* declet is at offset 0 (from the right) in a uInt: */ + #define CANONDPD(dpd) (((dpd)&0x300)==0 || ((dpd)&0x6e)!=0x6e) + /* declet is at offset k (a multiple of 2) in a uInt: */ + #define CANONDPDOFF(dpd, k) (((dpd)&(0x300<<(k)))==0 \ + || ((dpd)&(((uInt)0x6e)<<(k)))!=(((uInt)0x6e)<<(k))) + /* declet is at offset k (a multiple of 2) in a pair of uInts: */ + /* [the top 2 bits will always be in the more-significant uInt] */ + #define CANONDPDTWO(hi, lo, k) (((hi)&(0x300>>(32-(k))))==0 \ + || ((hi)&(0x6e>>(32-(k))))!=(0x6e>>(32-(k))) \ + || ((lo)&(((uInt)0x6e)<<(k)))!=(((uInt)0x6e)<<(k))) + + /* Macro to test whether a full-length (length DECPMAX) BCD8 */ + /* coefficient is zero */ + /* test just the LSWord first, then the remainder */ + #if DECPMAX==7 + #define ISCOEFFZERO(u) (UINTAT((u)+DECPMAX-4)==0 \ + && UINTAT((u)+DECPMAX-7)==0) + #elif DECPMAX==16 + #define ISCOEFFZERO(u) (UINTAT((u)+DECPMAX-4)==0 \ + && (UINTAT((u)+DECPMAX-8)+UINTAT((u)+DECPMAX-12) \ + +UINTAT((u)+DECPMAX-16))==0) + #elif DECPMAX==34 + #define ISCOEFFZERO(u) (UINTAT((u)+DECPMAX-4)==0 \ + && (UINTAT((u)+DECPMAX-8) +UINTAT((u)+DECPMAX-12) \ + +UINTAT((u)+DECPMAX-16)+UINTAT((u)+DECPMAX-20) \ + +UINTAT((u)+DECPMAX-24)+UINTAT((u)+DECPMAX-28) \ + +UINTAT((u)+DECPMAX-32)+USHORTAT((u)+DECPMAX-34))==0) + #endif + + /* Macros and masks for the exponent continuation field and MSD */ + /* Get the exponent continuation from a decFloat *df as an Int */ + #define GETECON(df) ((Int)((DFWORD((df), 0)&0x03ffffff)>>(32-6-DECECONL))) + /* Ditto, from the next-wider format */ + #define GETWECON(df) ((Int)((DFWWORD((df), 0)&0x03ffffff)>>(32-6-DECWECONL))) + /* Get the biased exponent similarly */ + #define GETEXP(df) ((Int)(DECCOMBEXP[DFWORD((df), 0)>>26]+GETECON(df))) + /* Get the unbiased exponent similarly */ + #define GETEXPUN(df) ((Int)GETEXP(df)-DECBIAS) + /* Get the MSD similarly (as uInt) */ + #define GETMSD(df) (DECCOMBMSD[DFWORD((df), 0)>>26]) + + /* Compile-time computes of the exponent continuation field masks */ + /* full exponent continuation field: */ + #define ECONMASK ((0x03ffffff>>(32-6-DECECONL))<<(32-6-DECECONL)) + /* same, not including its first digit (the qNaN/sNaN selector): */ + #define ECONNANMASK ((0x01ffffff>>(32-6-DECECONL))<<(32-6-DECECONL)) + + /* Macros to decode the coefficient in a finite decFloat *df into */ + /* a BCD string (uByte *bcdin) of length DECPMAX uBytes */ + + /* In-line sequence to convert 10 bits at right end of uInt dpd */ + /* to three BCD8 digits starting at uByte u. Note that an extra */ + /* byte is written to the right of the three digits because this */ + /* moves four at a time for speed; the alternative macro moves */ + /* exactly three bytes */ + #define dpd2bcd8(u, dpd) { \ + UINTAT(u)=UINTAT(&DPD2BCD8[((dpd)&0x3ff)*4]);} + + #define dpd2bcd83(u, dpd) { \ + *(u)=DPD2BCD8[((dpd)&0x3ff)*4]; \ + *(u+1)=DPD2BCD8[((dpd)&0x3ff)*4+1]; \ + *(u+2)=DPD2BCD8[((dpd)&0x3ff)*4+2];} + + /* Decode the declets. After extracting each one, it is decoded */ + /* to BCD8 using a table lookup (also used for variable-length */ + /* decode). Each DPD decode is 3 bytes BCD8 plus a one-byte */ + /* length which is not used, here). Fixed-length 4-byte moves */ + /* are fast, however, almost everywhere, and so are used except */ + /* for the final three bytes (to avoid overrun). The code below */ + /* is 36 instructions for Doubles and about 70 for Quads, even */ + /* on IA32. */ + + /* Two macros are defined for each format: */ + /* GETCOEFF extracts the coefficient of the current format */ + /* GETWCOEFF extracts the coefficient of the next-wider format. */ + /* The latter is a copy of the next-wider GETCOEFF using DFWWORD. */ + + #if DECPMAX==7 + #define GETCOEFF(df, bcd) { \ + uInt sourhi=DFWORD(df, 0); \ + *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ + dpd2bcd8(bcd+1, sourhi>>10); \ + dpd2bcd83(bcd+4, sourhi);} + #define GETWCOEFF(df, bcd) { \ + uInt sourhi=DFWWORD(df, 0); \ + uInt sourlo=DFWWORD(df, 1); \ + *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ + dpd2bcd8(bcd+1, sourhi>>8); \ + dpd2bcd8(bcd+4, (sourhi<<2) | (sourlo>>30)); \ + dpd2bcd8(bcd+7, sourlo>>20); \ + dpd2bcd8(bcd+10, sourlo>>10); \ + dpd2bcd83(bcd+13, sourlo);} + + #elif DECPMAX==16 + #define GETCOEFF(df, bcd) { \ + uInt sourhi=DFWORD(df, 0); \ + uInt sourlo=DFWORD(df, 1); \ + *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ + dpd2bcd8(bcd+1, sourhi>>8); \ + dpd2bcd8(bcd+4, (sourhi<<2) | (sourlo>>30)); \ + dpd2bcd8(bcd+7, sourlo>>20); \ + dpd2bcd8(bcd+10, sourlo>>10); \ + dpd2bcd83(bcd+13, sourlo);} + #define GETWCOEFF(df, bcd) { \ + uInt sourhi=DFWWORD(df, 0); \ + uInt sourmh=DFWWORD(df, 1); \ + uInt sourml=DFWWORD(df, 2); \ + uInt sourlo=DFWWORD(df, 3); \ + *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ + dpd2bcd8(bcd+1, sourhi>>4); \ + dpd2bcd8(bcd+4, ((sourhi)<<6) | (sourmh>>26)); \ + dpd2bcd8(bcd+7, sourmh>>16); \ + dpd2bcd8(bcd+10, sourmh>>6); \ + dpd2bcd8(bcd+13, ((sourmh)<<4) | (sourml>>28)); \ + dpd2bcd8(bcd+16, sourml>>18); \ + dpd2bcd8(bcd+19, sourml>>8); \ + dpd2bcd8(bcd+22, ((sourml)<<2) | (sourlo>>30)); \ + dpd2bcd8(bcd+25, sourlo>>20); \ + dpd2bcd8(bcd+28, sourlo>>10); \ + dpd2bcd83(bcd+31, sourlo);} + + #elif DECPMAX==34 + #define GETCOEFF(df, bcd) { \ + uInt sourhi=DFWORD(df, 0); \ + uInt sourmh=DFWORD(df, 1); \ + uInt sourml=DFWORD(df, 2); \ + uInt sourlo=DFWORD(df, 3); \ + *(bcd)=(uByte)DECCOMBMSD[sourhi>>26]; \ + dpd2bcd8(bcd+1, sourhi>>4); \ + dpd2bcd8(bcd+4, ((sourhi)<<6) | (sourmh>>26)); \ + dpd2bcd8(bcd+7, sourmh>>16); \ + dpd2bcd8(bcd+10, sourmh>>6); \ + dpd2bcd8(bcd+13, ((sourmh)<<4) | (sourml>>28)); \ + dpd2bcd8(bcd+16, sourml>>18); \ + dpd2bcd8(bcd+19, sourml>>8); \ + dpd2bcd8(bcd+22, ((sourml)<<2) | (sourlo>>30)); \ + dpd2bcd8(bcd+25, sourlo>>20); \ + dpd2bcd8(bcd+28, sourlo>>10); \ + dpd2bcd83(bcd+31, sourlo);} + + #define GETWCOEFF(df, bcd) {??} /* [should never be used] */ + #endif + + /* Macros to decode the coefficient in a finite decFloat *df into */ + /* a base-billion uInt array, with the least-significant */ + /* 0-999999999 'digit' at offset 0. */ + + /* Decode the declets. After extracting each one, it is decoded */ + /* to binary using a table lookup. Three tables are used; one */ + /* the usual DPD to binary, the other two pre-multiplied by 1000 */ + /* and 1000000 to avoid multiplication during decode. These */ + /* tables can also be used for multiplying up the MSD as the DPD */ + /* code for 0 through 9 is the identity. */ + #define DPD2BIN0 DPD2BIN /* for prettier code */ + + #if DECPMAX==7 + #define GETCOEFFBILL(df, buf) { \ + uInt sourhi=DFWORD(df, 0); \ + (buf)[0]=DPD2BIN0[sourhi&0x3ff] \ + +DPD2BINK[(sourhi>>10)&0x3ff] \ + +DPD2BINM[DECCOMBMSD[sourhi>>26]];} + + #elif DECPMAX==16 + #define GETCOEFFBILL(df, buf) { \ + uInt sourhi, sourlo; \ + sourlo=DFWORD(df, 1); \ + (buf)[0]=DPD2BIN0[sourlo&0x3ff] \ + +DPD2BINK[(sourlo>>10)&0x3ff] \ + +DPD2BINM[(sourlo>>20)&0x3ff]; \ + sourhi=DFWORD(df, 0); \ + (buf)[1]=DPD2BIN0[((sourhi<<2) | (sourlo>>30))&0x3ff] \ + +DPD2BINK[(sourhi>>8)&0x3ff] \ + +DPD2BINM[DECCOMBMSD[sourhi>>26]];} + + #elif DECPMAX==34 + #define GETCOEFFBILL(df, buf) { \ + uInt sourhi, sourmh, sourml, sourlo; \ + sourlo=DFWORD(df, 3); \ + (buf)[0]=DPD2BIN0[sourlo&0x3ff] \ + +DPD2BINK[(sourlo>>10)&0x3ff] \ + +DPD2BINM[(sourlo>>20)&0x3ff]; \ + sourml=DFWORD(df, 2); \ + (buf)[1]=DPD2BIN0[((sourml<<2) | (sourlo>>30))&0x3ff] \ + +DPD2BINK[(sourml>>8)&0x3ff] \ + +DPD2BINM[(sourml>>18)&0x3ff]; \ + sourmh=DFWORD(df, 1); \ + (buf)[2]=DPD2BIN0[((sourmh<<4) | (sourml>>28))&0x3ff] \ + +DPD2BINK[(sourmh>>6)&0x3ff] \ + +DPD2BINM[(sourmh>>16)&0x3ff]; \ + sourhi=DFWORD(df, 0); \ + (buf)[3]=DPD2BIN0[((sourhi<<6) | (sourmh>>26))&0x3ff] \ + +DPD2BINK[(sourhi>>4)&0x3ff] \ + +DPD2BINM[DECCOMBMSD[sourhi>>26]];} + + #endif + + /* Macros to decode the coefficient in a finite decFloat *df into */ + /* a base-thousand uInt array, with the least-significant 0-999 */ + /* 'digit' at offset 0. */ + + /* Decode the declets. After extracting each one, it is decoded */ + /* to binary using a table lookup. */ + #if DECPMAX==7 + #define GETCOEFFTHOU(df, buf) { \ + uInt sourhi=DFWORD(df, 0); \ + (buf)[0]=DPD2BIN[sourhi&0x3ff]; \ + (buf)[1]=DPD2BIN[(sourhi>>10)&0x3ff]; \ + (buf)[2]=DECCOMBMSD[sourhi>>26];} + + #elif DECPMAX==16 + #define GETCOEFFTHOU(df, buf) { \ + uInt sourhi, sourlo; \ + sourlo=DFWORD(df, 1); \ + (buf)[0]=DPD2BIN[sourlo&0x3ff]; \ + (buf)[1]=DPD2BIN[(sourlo>>10)&0x3ff]; \ + (buf)[2]=DPD2BIN[(sourlo>>20)&0x3ff]; \ + sourhi=DFWORD(df, 0); \ + (buf)[3]=DPD2BIN[((sourhi<<2) | (sourlo>>30))&0x3ff]; \ + (buf)[4]=DPD2BIN[(sourhi>>8)&0x3ff]; \ + (buf)[5]=DECCOMBMSD[sourhi>>26];} + + #elif DECPMAX==34 + #define GETCOEFFTHOU(df, buf) { \ + uInt sourhi, sourmh, sourml, sourlo; \ + sourlo=DFWORD(df, 3); \ + (buf)[0]=DPD2BIN[sourlo&0x3ff]; \ + (buf)[1]=DPD2BIN[(sourlo>>10)&0x3ff]; \ + (buf)[2]=DPD2BIN[(sourlo>>20)&0x3ff]; \ + sourml=DFWORD(df, 2); \ + (buf)[3]=DPD2BIN[((sourml<<2) | (sourlo>>30))&0x3ff]; \ + (buf)[4]=DPD2BIN[(sourml>>8)&0x3ff]; \ + (buf)[5]=DPD2BIN[(sourml>>18)&0x3ff]; \ + sourmh=DFWORD(df, 1); \ + (buf)[6]=DPD2BIN[((sourmh<<4) | (sourml>>28))&0x3ff]; \ + (buf)[7]=DPD2BIN[(sourmh>>6)&0x3ff]; \ + (buf)[8]=DPD2BIN[(sourmh>>16)&0x3ff]; \ + sourhi=DFWORD(df, 0); \ + (buf)[9]=DPD2BIN[((sourhi<<6) | (sourmh>>26))&0x3ff]; \ + (buf)[10]=DPD2BIN[(sourhi>>4)&0x3ff]; \ + (buf)[11]=DECCOMBMSD[sourhi>>26];} + + #endif + + /* Set a decFloat to the maximum positive finite number (Nmax) */ + #if DECPMAX==7 + #define DFSETNMAX(df) \ + {DFWORD(df, 0)=0x77f3fcff;} + #elif DECPMAX==16 + #define DFSETNMAX(df) \ + {DFWORD(df, 0)=0x77fcff3f; \ + DFWORD(df, 1)=0xcff3fcff;} + #elif DECPMAX==34 + #define DFSETNMAX(df) \ + {DFWORD(df, 0)=0x77ffcff3; \ + DFWORD(df, 1)=0xfcff3fcf; \ + DFWORD(df, 2)=0xf3fcff3f; \ + DFWORD(df, 3)=0xcff3fcff;} + #endif + + /* [end of format-dependent macros and constants] */ + #endif + +#endif diff --git a/qemu/include/libdecnumber/dpd/decimal128.h b/qemu/include/libdecnumber/dpd/decimal128.h new file mode 100644 index 00000000..aff261e5 --- /dev/null +++ b/qemu/include/libdecnumber/dpd/decimal128.h @@ -0,0 +1,99 @@ +/* Decimal 128-bit format module header for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal 128-bit format module header */ +/* ------------------------------------------------------------------ */ + +#ifndef DECIMAL128_H +#define DECIMAL128_H + + #define DEC128NAME "decimal128" /* Short name */ + #define DEC128FULLNAME "Decimal 128-bit Number" /* Verbose name */ + #define DEC128AUTHOR "Mike Cowlishaw" /* Who to blame */ + + /* parameters for decimal128s */ + #define DECIMAL128_Bytes 16 /* length */ + #define DECIMAL128_Pmax 34 /* maximum precision (digits) */ + #define DECIMAL128_Emax 6144 /* maximum adjusted exponent */ + #define DECIMAL128_Emin -6143 /* minimum adjusted exponent */ + #define DECIMAL128_Bias 6176 /* bias for the exponent */ + #define DECIMAL128_String 43 /* maximum string length, +1 */ + #define DECIMAL128_EconL 12 /* exp. continuation length */ + /* highest biased exponent (Elimit-1) */ + #define DECIMAL128_Ehigh (DECIMAL128_Emax+DECIMAL128_Bias-DECIMAL128_Pmax+1) + + /* check enough digits, if pre-defined */ + #if defined(DECNUMDIGITS) + #if (DECNUMDIGITS=34 for safe use + #endif + #endif + + #ifndef DECNUMDIGITS + #define DECNUMDIGITS DECIMAL128_Pmax /* size if not already defined*/ + #endif + #include "libdecnumber/decNumber.h" + + /* Decimal 128-bit type, accessible by bytes */ + typedef struct { + uint8_t bytes[DECIMAL128_Bytes]; /* decimal128: 1, 5, 12, 110 bits*/ + } decimal128; + + /* special values [top byte excluding sign bit; last two bits are */ + /* don't-care for Infinity on input, last bit don't-care for NaN] */ + #if !defined(DECIMAL_NaN) + #define DECIMAL_NaN 0x7c /* 0 11111 00 NaN */ + #define DECIMAL_sNaN 0x7e /* 0 11111 10 sNaN */ + #define DECIMAL_Inf 0x78 /* 0 11110 00 Infinity */ + #endif + + #include "decimal128Local.h" + + /* ---------------------------------------------------------------- */ + /* Routines */ + /* ---------------------------------------------------------------- */ + + + /* String conversions */ + decimal128 * decimal128FromString(decimal128 *, const char *, decContext *); + char * decimal128ToString(const decimal128 *, char *); + char * decimal128ToEngString(const decimal128 *, char *); + + /* decNumber conversions */ + decimal128 * decimal128FromNumber(decimal128 *, const decNumber *, + decContext *); + decNumber * decimal128ToNumber(const decimal128 *, decNumber *); + + /* Format-dependent utilities */ + uint32_t decimal128IsCanonical(const decimal128 *); + decimal128 * decimal128Canonical(decimal128 *, const decimal128 *); + +#endif diff --git a/qemu/include/libdecnumber/dpd/decimal128Local.h b/qemu/include/libdecnumber/dpd/decimal128Local.h new file mode 100644 index 00000000..97654277 --- /dev/null +++ b/qemu/include/libdecnumber/dpd/decimal128Local.h @@ -0,0 +1,47 @@ +/* Local definitions for use with the decNumber C Library. + Copyright (C) 2007 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +#if !defined(DECIMAL128LOCAL) + +/* The compiler needs sign manipulation functions for decimal128 which + are not part of the decNumber package. */ + +/* Set sign; this assumes the sign was previously zero. */ +#define decimal128SetSign(d,b) \ + { (d)->bytes[WORDS_BIGENDIAN ? 0 : 15] |= ((unsigned) (b) << 7); } + +/* Clear sign. */ +#define decimal128ClearSign(d) \ + { (d)->bytes[WORDS_BIGENDIAN ? 0 : 15] &= ~0x80; } + +/* Flip sign. */ +#define decimal128FlipSign(d) \ + { (d)->bytes[WORDS_BIGENDIAN ? 0 : 15] ^= 0x80; } + +#endif diff --git a/qemu/include/libdecnumber/dpd/decimal32.h b/qemu/include/libdecnumber/dpd/decimal32.h new file mode 100644 index 00000000..6cb9e436 --- /dev/null +++ b/qemu/include/libdecnumber/dpd/decimal32.h @@ -0,0 +1,97 @@ +/* Decimal 32-bit format module header for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal 32-bit format module header */ +/* ------------------------------------------------------------------ */ + +#ifndef DECIMAL32_H +#define DECIMAL32_H + + #define DEC32NAME "decimal32" /* Short name */ + #define DEC32FULLNAME "Decimal 32-bit Number" /* Verbose name */ + #define DEC32AUTHOR "Mike Cowlishaw" /* Who to blame */ + + /* parameters for decimal32s */ + #define DECIMAL32_Bytes 4 /* length */ + #define DECIMAL32_Pmax 7 /* maximum precision (digits) */ + #define DECIMAL32_Emax 96 /* maximum adjusted exponent */ + #define DECIMAL32_Emin -95 /* minimum adjusted exponent */ + #define DECIMAL32_Bias 101 /* bias for the exponent */ + #define DECIMAL32_String 15 /* maximum string length, +1 */ + #define DECIMAL32_EconL 6 /* exp. continuation length */ + /* highest biased exponent (Elimit-1) */ + #define DECIMAL32_Ehigh (DECIMAL32_Emax+DECIMAL32_Bias-DECIMAL32_Pmax+1) + + /* check enough digits, if pre-defined */ + #if defined(DECNUMDIGITS) + #if (DECNUMDIGITS=7 for safe use + #endif + #endif + + #ifndef DECNUMDIGITS + #define DECNUMDIGITS DECIMAL32_Pmax /* size if not already defined*/ + #endif + #include "libdecnumber/decNumber.h" + + /* Decimal 32-bit type, accessible by bytes */ + typedef struct { + uint8_t bytes[DECIMAL32_Bytes]; /* decimal32: 1, 5, 6, 20 bits*/ + } decimal32; + + /* special values [top byte excluding sign bit; last two bits are */ + /* don't-care for Infinity on input, last bit don't-care for NaN] */ + #if !defined(DECIMAL_NaN) + #define DECIMAL_NaN 0x7c /* 0 11111 00 NaN */ + #define DECIMAL_sNaN 0x7e /* 0 11111 10 sNaN */ + #define DECIMAL_Inf 0x78 /* 0 11110 00 Infinity */ + #endif + + /* ---------------------------------------------------------------- */ + /* Routines */ + /* ---------------------------------------------------------------- */ + + + /* String conversions */ + decimal32 * decimal32FromString(decimal32 *, const char *, decContext *); + char * decimal32ToString(const decimal32 *, char *); + char * decimal32ToEngString(const decimal32 *, char *); + + /* decNumber conversions */ + decimal32 * decimal32FromNumber(decimal32 *, const decNumber *, + decContext *); + decNumber * decimal32ToNumber(const decimal32 *, decNumber *); + + /* Format-dependent utilities */ + uint32_t decimal32IsCanonical(const decimal32 *); + decimal32 * decimal32Canonical(decimal32 *, const decimal32 *); + +#endif diff --git a/qemu/include/libdecnumber/dpd/decimal64.h b/qemu/include/libdecnumber/dpd/decimal64.h new file mode 100644 index 00000000..f29e5706 --- /dev/null +++ b/qemu/include/libdecnumber/dpd/decimal64.h @@ -0,0 +1,99 @@ +/* Decimal 64-bit format module header for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal 64-bit format module header */ +/* ------------------------------------------------------------------ */ + +#ifndef DECIMAL64_H +#define DECIMAL64_H + + #define DEC64NAME "decimal64" /* Short name */ + #define DEC64FULLNAME "Decimal 64-bit Number" /* Verbose name */ + #define DEC64AUTHOR "Mike Cowlishaw" /* Who to blame */ + + + /* parameters for decimal64s */ + #define DECIMAL64_Bytes 8 /* length */ + #define DECIMAL64_Pmax 16 /* maximum precision (digits) */ + #define DECIMAL64_Emax 384 /* maximum adjusted exponent */ + #define DECIMAL64_Emin -383 /* minimum adjusted exponent */ + #define DECIMAL64_Bias 398 /* bias for the exponent */ + #define DECIMAL64_String 24 /* maximum string length, +1 */ + #define DECIMAL64_EconL 8 /* exp. continuation length */ + /* highest biased exponent (Elimit-1) */ + #define DECIMAL64_Ehigh (DECIMAL64_Emax+DECIMAL64_Bias-DECIMAL64_Pmax+1) + + /* check enough digits, if pre-defined */ + #if defined(DECNUMDIGITS) + #if (DECNUMDIGITS=16 for safe use + #endif + #endif + + + #ifndef DECNUMDIGITS + #define DECNUMDIGITS DECIMAL64_Pmax /* size if not already defined*/ + #endif + #include "libdecnumber/decNumber.h" + + /* Decimal 64-bit type, accessible by bytes */ + typedef struct { + uint8_t bytes[DECIMAL64_Bytes]; /* decimal64: 1, 5, 8, 50 bits*/ + } decimal64; + + /* special values [top byte excluding sign bit; last two bits are */ + /* don't-care for Infinity on input, last bit don't-care for NaN] */ + #if !defined(DECIMAL_NaN) + #define DECIMAL_NaN 0x7c /* 0 11111 00 NaN */ + #define DECIMAL_sNaN 0x7e /* 0 11111 10 sNaN */ + #define DECIMAL_Inf 0x78 /* 0 11110 00 Infinity */ + #endif + + /* ---------------------------------------------------------------- */ + /* Routines */ + /* ---------------------------------------------------------------- */ + + + /* String conversions */ + decimal64 * decimal64FromString(decimal64 *, const char *, decContext *); + char * decimal64ToString(const decimal64 *, char *); + char * decimal64ToEngString(const decimal64 *, char *); + + /* decNumber conversions */ + decimal64 * decimal64FromNumber(decimal64 *, const decNumber *, + decContext *); + decNumber * decimal64ToNumber(const decimal64 *, decNumber *); + + /* Format-dependent utilities */ + uint32_t decimal64IsCanonical(const decimal64 *); + decimal64 * decimal64Canonical(decimal64 *, const decimal64 *); + +#endif diff --git a/qemu/include/qapi/dealloc-visitor.h b/qemu/include/qapi/dealloc-visitor.h deleted file mode 100644 index cf4c36d2..00000000 --- a/qemu/include/qapi/dealloc-visitor.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Dealloc Visitor - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Michael Roth - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QAPI_DEALLOC_VISITOR_H -#define QAPI_DEALLOC_VISITOR_H - -#include "qapi/visitor.h" - -typedef struct QapiDeallocVisitor QapiDeallocVisitor; - -QapiDeallocVisitor *qapi_dealloc_visitor_new(void); -void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *d); - -Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v); - -#endif diff --git a/qemu/include/qapi/error.h b/qemu/include/qapi/error.h deleted file mode 100644 index fc1cec66..00000000 --- a/qemu/include/qapi/error.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * QEMU Error Objects - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2. See - * the COPYING.LIB file in the top-level directory. - */ -#ifndef ERROR_H -#define ERROR_H - -#include "qemu/compiler.h" -#include "qapi-types.h" -#include "unicorn/platform.h" - -/** - * A class representing internal errors within QEMU. An error has a ErrorClass - * code and a human message. - */ -typedef struct Error Error; - -/** - * Set an indirect pointer to an error given a ErrorClass value and a - * printf-style human message. This function is not meant to be used outside - * of QEMU. - */ -void error_set(Error **errp, ErrorClass err_class, const char *fmt, ...) - GCC_FMT_ATTR(3, 4); - -/** - * Set an indirect pointer to an error given a ErrorClass value and a - * printf-style human message, followed by a strerror() string if - * @os_error is not zero. - */ -void error_set_errno(Error **errp, int os_error, ErrorClass err_class, - const char *fmt, ...) GCC_FMT_ATTR(4, 5); - -/** - * Same as error_set(), but sets a generic error - */ -#define error_setg(errp, fmt, ...) \ - error_set(errp, ERROR_CLASS_GENERIC_ERROR, fmt, ## __VA_ARGS__) -#define error_setg_errno(errp, os_error, fmt, ...) \ - error_set_errno(errp, os_error, ERROR_CLASS_GENERIC_ERROR, \ - fmt, ## __VA_ARGS__) - -/** - * Helper for open() errors - */ -void error_setg_file_open(Error **errp, int os_errno, const char *filename); - -/* - * Get the error class of an error object. - */ -ErrorClass error_get_class(const Error *err); - -/** - * Returns an exact copy of the error passed as an argument. - */ -Error *error_copy(const Error *err); - -/** - * Get a human readable representation of an error object. - */ -const char *error_get_pretty(Error *err); - -/** - * Propagate an error to an indirect pointer to an error. This function will - * always transfer ownership of the error reference and handles the case where - * dst_err is NULL correctly. Errors after the first are discarded. - */ -void error_propagate(Error **dst_errp, Error *local_err); - -/** - * Free an error object. - */ -void error_free(Error *err); - -/** - * If passed to error_set and friends, abort(). - */ - -extern Error *error_abort; - -#endif diff --git a/qemu/include/qapi/qmp-input-visitor.h b/qemu/include/qapi/qmp-input-visitor.h deleted file mode 100644 index 3ed499cc..00000000 --- a/qemu/include/qapi/qmp-input-visitor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Input Visitor - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QMP_INPUT_VISITOR_H -#define QMP_INPUT_VISITOR_H - -#include "qapi/visitor.h" -#include "qapi/qmp/qobject.h" - -typedef struct QmpInputVisitor QmpInputVisitor; - -QmpInputVisitor *qmp_input_visitor_new(QObject *obj); -QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj); - -void qmp_input_visitor_cleanup(QmpInputVisitor *v); - -Visitor *qmp_input_get_visitor(QmpInputVisitor *v); - -#endif diff --git a/qemu/include/qapi/qmp-output-visitor.h b/qemu/include/qapi/qmp-output-visitor.h deleted file mode 100644 index 22667706..00000000 --- a/qemu/include/qapi/qmp-output-visitor.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Output Visitor - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QMP_OUTPUT_VISITOR_H -#define QMP_OUTPUT_VISITOR_H - -#include "qapi/visitor.h" -#include "qapi/qmp/qobject.h" - -typedef struct QmpOutputVisitor QmpOutputVisitor; - -QmpOutputVisitor *qmp_output_visitor_new(void); -void qmp_output_visitor_cleanup(QmpOutputVisitor *v); - -QObject *qmp_output_get_qobject(QmpOutputVisitor *v); -Visitor *qmp_output_get_visitor(QmpOutputVisitor *v); - -#endif diff --git a/qemu/include/qapi/qmp/qbool.h b/qemu/include/qapi/qmp/qbool.h deleted file mode 100644 index 5304dc5e..00000000 --- a/qemu/include/qapi/qmp/qbool.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * QBool Module - * - * Copyright IBM, Corp. 2009 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QBOOL_H -#define QBOOL_H - -#include "unicorn/platform.h" -#include "qapi/qmp/qobject.h" - -typedef struct QBool { - QObject_HEAD; - int value; -} QBool; - -QBool *qbool_from_int(int value); -int qbool_get_int(const QBool *qb); -QBool *qobject_to_qbool(const QObject *obj); - -#endif /* QBOOL_H */ diff --git a/qemu/include/qapi/qmp/qdict.h b/qemu/include/qapi/qmp/qdict.h deleted file mode 100644 index 567c02f4..00000000 --- a/qemu/include/qapi/qmp/qdict.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * QDict Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#ifndef QDICT_H -#define QDICT_H - -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qlist.h" -#include "qemu/queue.h" -#include "unicorn/platform.h" - -#define QDICT_BUCKET_MAX 512 - -typedef struct QDictEntry { - char *key; - QObject *value; - QLIST_ENTRY(QDictEntry) next; -} QDictEntry; - -typedef struct QDict { - QObject_HEAD; - size_t size; - QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX]; -} QDict; - -/* Object API */ -QDict *qdict_new(void); -const char *qdict_entry_key(const QDictEntry *entry); -QObject *qdict_entry_value(const QDictEntry *entry); -size_t qdict_size(const QDict *qdict); -void qdict_put_obj(QDict *qdict, const char *key, QObject *value); -void qdict_del(QDict *qdict, const char *key); -int qdict_haskey(const QDict *qdict, const char *key); -QObject *qdict_get(const QDict *qdict, const char *key); -QDict *qobject_to_qdict(const QObject *obj); -void qdict_iter(const QDict *qdict, - void (*iter)(const char *key, QObject *obj, void *opaque), - void *opaque); -const QDictEntry *qdict_first(const QDict *qdict); -const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry); - -/* Helper to qdict_put_obj(), accepts any object */ -#define qdict_put(qdict, key, obj) \ - qdict_put_obj(qdict, key, QOBJECT(obj)) - -/* High level helpers */ -double qdict_get_double(const QDict *qdict, const char *key); -int64_t qdict_get_int(const QDict *qdict, const char *key); -int qdict_get_bool(const QDict *qdict, const char *key); -QList *qdict_get_qlist(const QDict *qdict, const char *key); -QDict *qdict_get_qdict(const QDict *qdict, const char *key); -const char *qdict_get_str(const QDict *qdict, const char *key); -int64_t qdict_get_try_int(const QDict *qdict, const char *key, - int64_t def_value); -int qdict_get_try_bool(const QDict *qdict, const char *key, int def_value); -const char *qdict_get_try_str(const QDict *qdict, const char *key); - -QDict *qdict_clone_shallow(const QDict *src); -void qdict_flatten(QDict *qdict); - -void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start); -void qdict_array_split(QDict *src, QList **dst); - -void qdict_join(QDict *dest, QDict *src, bool overwrite); - -#endif /* QDICT_H */ diff --git a/qemu/include/qapi/qmp/qerror.h b/qemu/include/qapi/qmp/qerror.h deleted file mode 100644 index ed12abeb..00000000 --- a/qemu/include/qapi/qmp/qerror.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - * QError Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ -#ifndef QERROR_H -#define QERROR_H - -#include "qapi/qmp/qstring.h" -#include "qapi/error.h" -#include "qapi-types.h" -#include - -typedef struct QError { - QObject_HEAD; - char *err_msg; - ErrorClass err_class; -} QError; - -QString *qerror_human(const QError *qerror); -void qerror_report(ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(2, 3); -void qerror_report_err(Error *err); - -/* - * QError class list - * Please keep the definitions in alphabetical order. - * Use scripts/check-qerror.sh to check. - */ -#define QERR_BASE_NOT_FOUND \ - ERROR_CLASS_GENERIC_ERROR, "Base '%s' not found" - -#define QERR_BLOCK_JOB_NOT_ACTIVE \ - ERROR_CLASS_DEVICE_NOT_ACTIVE, "No active block job on device '%s'" - -#define QERR_BLOCK_JOB_NOT_READY \ - ERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed" - -#define QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED \ - ERROR_CLASS_GENERIC_ERROR, "Block format '%s' used by device '%s' does not support feature '%s'" - -#define QERR_BUS_NO_HOTPLUG \ - ERROR_CLASS_GENERIC_ERROR, "Bus '%s' does not support hotplugging" - -#define QERR_BUS_NOT_FOUND \ - ERROR_CLASS_GENERIC_ERROR, "Bus '%s' not found" - -#define QERR_COMMAND_NOT_FOUND \ - ERROR_CLASS_COMMAND_NOT_FOUND, "The command %s has not been found" - -#define QERR_DEVICE_ENCRYPTED \ - ERROR_CLASS_DEVICE_ENCRYPTED, "'%s' (%s) is encrypted" - -#define QERR_DEVICE_HAS_NO_MEDIUM \ - ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no medium" - -#define QERR_DEVICE_INIT_FAILED \ - ERROR_CLASS_GENERIC_ERROR, "Device '%s' could not be initialized" - -#define QERR_DEVICE_IN_USE \ - ERROR_CLASS_GENERIC_ERROR, "Device '%s' is in use" - -#define QERR_DEVICE_IS_READ_ONLY \ - ERROR_CLASS_GENERIC_ERROR, "Device '%s' is read only" - -#define QERR_DEVICE_NO_HOTPLUG \ - ERROR_CLASS_GENERIC_ERROR, "Device '%s' does not support hotplugging" - -#define QERR_DEVICE_NOT_ACTIVE \ - ERROR_CLASS_DEVICE_NOT_ACTIVE, "No %s device has been activated" - -#define QERR_DEVICE_NOT_ENCRYPTED \ - ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not encrypted" - -#define QERR_DEVICE_NOT_FOUND \ - ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found" - -#define QERR_FD_NOT_FOUND \ - ERROR_CLASS_GENERIC_ERROR, "File descriptor named '%s' not found" - -#define QERR_FD_NOT_SUPPLIED \ - ERROR_CLASS_GENERIC_ERROR, "No file descriptor supplied via SCM_RIGHTS" - -#define QERR_FEATURE_DISABLED \ - ERROR_CLASS_GENERIC_ERROR, "The feature '%s' is not enabled" - -#define QERR_INVALID_BLOCK_FORMAT \ - ERROR_CLASS_GENERIC_ERROR, "Invalid block format '%s'" - -#define QERR_INVALID_PARAMETER \ - ERROR_CLASS_GENERIC_ERROR, "Invalid parameter '%s'" - -#define QERR_INVALID_PARAMETER_TYPE \ - ERROR_CLASS_GENERIC_ERROR, "Invalid parameter type for '%s', expected: %s" - -#define QERR_INVALID_PARAMETER_VALUE \ - ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' expects %s" - -#define QERR_INVALID_PASSWORD \ - ERROR_CLASS_GENERIC_ERROR, "Password incorrect" - -#define QERR_IO_ERROR \ - ERROR_CLASS_GENERIC_ERROR, "An IO error has occurred" - -#define QERR_JSON_PARSING \ - ERROR_CLASS_GENERIC_ERROR, "Invalid JSON syntax" - -#define QERR_KVM_MISSING_CAP \ - ERROR_CLASS_KVM_MISSING_CAP, "Using KVM without %s, %s unavailable" - -#define QERR_MIGRATION_ACTIVE \ - ERROR_CLASS_GENERIC_ERROR, "There's a migration process in progress" - -#define QERR_MISSING_PARAMETER \ - ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' is missing" - -#define QERR_PERMISSION_DENIED \ - ERROR_CLASS_GENERIC_ERROR, "Insufficient permission to perform this operation" - -#define QERR_PROPERTY_VALUE_BAD \ - ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' doesn't take value '%s'" - -#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \ - ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" - -#define QERR_QGA_COMMAND_FAILED \ - ERROR_CLASS_GENERIC_ERROR, "Guest agent command failed, error was '%s'" - -#define QERR_QMP_BAD_INPUT_OBJECT \ - ERROR_CLASS_GENERIC_ERROR, "Expected '%s' in QMP input" - -#define QERR_QMP_BAD_INPUT_OBJECT_MEMBER \ - ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' expects '%s'" - -#define QERR_QMP_EXTRA_MEMBER \ - ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' is unexpected" - -#define QERR_SET_PASSWD_FAILED \ - ERROR_CLASS_GENERIC_ERROR, "Could not set password" - -#define QERR_UNDEFINED_ERROR \ - ERROR_CLASS_GENERIC_ERROR, "An undefined error has occurred" - -#define QERR_UNKNOWN_BLOCK_FORMAT_FEATURE \ - ERROR_CLASS_GENERIC_ERROR, "'%s' uses a %s feature which is not supported by this qemu version: %s" - -#define QERR_UNSUPPORTED \ - ERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported" - -#endif /* QERROR_H */ diff --git a/qemu/include/qapi/qmp/qfloat.h b/qemu/include/qapi/qmp/qfloat.h deleted file mode 100644 index b068ed3f..00000000 --- a/qemu/include/qapi/qmp/qfloat.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * QFloat Module - * - * Copyright IBM, Corp. 2009 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QFLOAT_H -#define QFLOAT_H - -#include "unicorn/platform.h" -#include "qapi/qmp/qobject.h" - -typedef struct QFloat { - QObject_HEAD; - double value; -} QFloat; - -QFloat *qfloat_from_double(double value); -double qfloat_get_double(const QFloat *qi); -QFloat *qobject_to_qfloat(const QObject *obj); - -#endif /* QFLOAT_H */ diff --git a/qemu/include/qapi/qmp/qint.h b/qemu/include/qapi/qmp/qint.h deleted file mode 100644 index 0150b7e4..00000000 --- a/qemu/include/qapi/qmp/qint.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * QInt Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#ifndef QINT_H -#define QINT_H - -#include "unicorn/platform.h" -#include "qapi/qmp/qobject.h" - -typedef struct QInt { - QObject_HEAD; - int64_t value; -} QInt; - -QInt *qint_from_int(int64_t value); -int64_t qint_get_int(const QInt *qi); -QInt *qobject_to_qint(const QObject *obj); - -#endif /* QINT_H */ diff --git a/qemu/include/qapi/qmp/qjson.h b/qemu/include/qapi/qmp/qjson.h deleted file mode 100644 index ee4d31a4..00000000 --- a/qemu/include/qapi/qmp/qjson.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * QObject JSON integration - * - * Copyright IBM, Corp. 2009 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QJSON_H -#define QJSON_H - -#include -#include "qemu/compiler.h" -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qstring.h" - -QObject *qobject_from_json(const char *string); -QObject *qobject_from_jsonf(const char *string, ...) GCC_FMT_ATTR(1, 2); -QObject *qobject_from_jsonv(const char *string, va_list *ap) GCC_FMT_ATTR(1, 0); - -QString *qobject_to_json(const QObject *obj); -QString *qobject_to_json_pretty(const QObject *obj); - -#endif /* QJSON_H */ diff --git a/qemu/include/qapi/qmp/qlist.h b/qemu/include/qapi/qmp/qlist.h deleted file mode 100644 index 6cc4831d..00000000 --- a/qemu/include/qapi/qmp/qlist.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * QList Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#ifndef QLIST_H -#define QLIST_H - -#include "qapi/qmp/qobject.h" -#include "qemu/queue.h" - -typedef struct QListEntry { - QObject *value; - QTAILQ_ENTRY(QListEntry) next; -} QListEntry; - -typedef struct QList { - QObject_HEAD; - QTAILQ_HEAD(,QListEntry) head; -} QList; - -#define qlist_append(qlist, obj) \ - qlist_append_obj(qlist, QOBJECT(obj)) - -#define QLIST_FOREACH_ENTRY(qlist, var) \ - for ((var) = ((qlist)->head.tqh_first); \ - (var); \ - (var) = ((var)->next.tqe_next)) - -static inline QObject *qlist_entry_obj(const QListEntry *entry) -{ - return entry->value; -} - -QList *qlist_new(void); -QList *qlist_copy(QList *src); -void qlist_append_obj(QList *qlist, QObject *obj); -void qlist_iter(const QList *qlist, - void (*iter)(QObject *obj, void *opaque), void *opaque); -QObject *qlist_pop(QList *qlist); -QObject *qlist_peek(QList *qlist); -int qlist_empty(const QList *qlist); -size_t qlist_size(const QList *qlist); -QList *qobject_to_qlist(const QObject *obj); - -static inline const QListEntry *qlist_first(const QList *qlist) -{ - return QTAILQ_FIRST(&qlist->head); -} - -static inline const QListEntry *qlist_next(const QListEntry *entry) -{ - return QTAILQ_NEXT(entry, next); -} - -#endif /* QLIST_H */ diff --git a/qemu/include/qapi/qmp/qobject.h b/qemu/include/qapi/qmp/qobject.h deleted file mode 100644 index d0bbc7c4..00000000 --- a/qemu/include/qapi/qmp/qobject.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * QEMU Object Model. - * - * Based on ideas by Avi Kivity - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - * QObject Reference Counts Terminology - * ------------------------------------ - * - * - Returning references: A function that returns an object may - * return it as either a weak or a strong reference. If the reference - * is strong, you are responsible for calling QDECREF() on the reference - * when you are done. - * - * If the reference is weak, the owner of the reference may free it at - * any time in the future. Before storing the reference anywhere, you - * should call QINCREF() to make the reference strong. - * - * - Transferring ownership: when you transfer ownership of a reference - * by calling a function, you are no longer responsible for calling - * QDECREF() when the reference is no longer needed. In other words, - * when the function returns you must behave as if the reference to the - * passed object was weak. - */ -#ifndef QOBJECT_H -#define QOBJECT_H - -#include -#include - -typedef enum { - QTYPE_NONE, - QTYPE_QINT, - QTYPE_QSTRING, - QTYPE_QDICT, - QTYPE_QLIST, - QTYPE_QFLOAT, - QTYPE_QBOOL, - QTYPE_QERROR, - QTYPE_MAX, -} qtype_code; - -struct QObject; - -typedef struct QType { - qtype_code code; - void (*destroy)(struct QObject *); -} QType; - -typedef struct QObject { - const QType *type; - size_t refcnt; -} QObject; - -/* Objects definitions must include this */ -#define QObject_HEAD \ - QObject base - -/* Get the 'base' part of an object */ -#define QOBJECT(obj) (&(obj)->base) - -/* High-level interface for qobject_incref() */ -#define QINCREF(obj) \ - qobject_incref(QOBJECT(obj)) - -/* High-level interface for qobject_decref() */ -#define QDECREF(obj) \ - qobject_decref(obj ? QOBJECT(obj) : NULL) - -/* Initialize an object to default values */ -#define QOBJECT_INIT(obj, qtype_type) \ - obj->base.refcnt = 1; \ - obj->base.type = qtype_type - -/** - * qobject_incref(): Increment QObject's reference count - */ -static inline void qobject_incref(QObject *obj) -{ - if (obj) - obj->refcnt++; -} - -/** - * qobject_decref(): Decrement QObject's reference count, deallocate - * when it reaches zero - */ -static inline void qobject_decref(QObject *obj) -{ - if (obj && --obj->refcnt == 0) { - assert(obj->type != NULL); - assert(obj->type->destroy != NULL); - obj->type->destroy(obj); - } -} - -/** - * qobject_type(): Return the QObject's type - */ -static inline qtype_code qobject_type(const QObject *obj) -{ - assert(obj->type != NULL); - return obj->type->code; -} - -#endif /* QOBJECT_H */ diff --git a/qemu/include/qapi/qmp/qstring.h b/qemu/include/qapi/qmp/qstring.h deleted file mode 100644 index 734e9127..00000000 --- a/qemu/include/qapi/qmp/qstring.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * QString Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#ifndef QSTRING_H -#define QSTRING_H - -#include "unicorn/platform.h" -#include "qapi/qmp/qobject.h" - -typedef struct QString { - QObject_HEAD; - char *string; - size_t length; - size_t capacity; -} QString; - -QString *qstring_new(void); -QString *qstring_from_str(const char *str); -QString *qstring_from_substr(const char *str, int start, int end); -size_t qstring_get_length(const QString *qstring); -const char *qstring_get_str(const QString *qstring); -void qstring_append_int(QString *qstring, int64_t value); -void qstring_append(QString *qstring, const char *str); -void qstring_append_chr(QString *qstring, int c); -QString *qobject_to_qstring(const QObject *obj); - -#endif /* QSTRING_H */ diff --git a/qemu/include/qapi/qmp/types.h b/qemu/include/qapi/qmp/types.h deleted file mode 100644 index 7782ec5a..00000000 --- a/qemu/include/qapi/qmp/types.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Include all QEMU objects. - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#ifndef QEMU_OBJECTS_H -#define QEMU_OBJECTS_H - -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qint.h" -#include "qapi/qmp/qfloat.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qjson.h" - -#endif /* QEMU_OBJECTS_H */ diff --git a/qemu/include/qapi/string-input-visitor.h b/qemu/include/qapi/string-input-visitor.h deleted file mode 100644 index 089243c0..00000000 --- a/qemu/include/qapi/string-input-visitor.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * String parsing Visitor - * - * Copyright Red Hat, Inc. 2012 - * - * Author: Paolo Bonzini - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef STRING_INPUT_VISITOR_H -#define STRING_INPUT_VISITOR_H - -#include "qapi/visitor.h" - -typedef struct StringInputVisitor StringInputVisitor; - -StringInputVisitor *string_input_visitor_new(const char *str); -void string_input_visitor_cleanup(StringInputVisitor *v); - -Visitor *string_input_get_visitor(StringInputVisitor *v); - -#endif diff --git a/qemu/include/qapi/visitor-impl.h b/qemu/include/qapi/visitor-impl.h deleted file mode 100644 index 09bb0fd4..00000000 --- a/qemu/include/qapi/visitor-impl.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Core Definitions for QAPI Visitor implementations - * - * Copyright (C) 2012 Red Hat, Inc. - * - * Author: Paolo Bonizni - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ -#ifndef QAPI_VISITOR_IMPL_H -#define QAPI_VISITOR_IMPL_H - -#include "qapi/error.h" -#include "qapi/visitor.h" - -struct Visitor -{ - /* Must be set */ - void (*start_struct)(Visitor *v, void **obj, const char *kind, - const char *name, size_t size, Error **errp); - void (*end_struct)(Visitor *v, Error **errp); - - void (*start_implicit_struct)(Visitor *v, void **obj, size_t size, - Error **errp); - void (*end_implicit_struct)(Visitor *v, Error **errp); - - void (*start_list)(Visitor *v, const char *name, Error **errp); - GenericList *(*next_list)(Visitor *v, GenericList **list, Error **errp); - void (*end_list)(Visitor *v, Error **errp); - - void (*type_enum)(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, Error **errp); - void (*get_next_type)(Visitor *v, int *kind, const int *qobjects, - const char *name, Error **errp); - - void (*type_int)(Visitor *v, int64_t *obj, const char *name, Error **errp); - void (*type_bool)(Visitor *v, bool *obj, const char *name, Error **errp); - void (*type_str)(Visitor *v, char **obj, const char *name, Error **errp); - void (*type_number)(Visitor *v, double *obj, const char *name, - Error **errp); - - /* May be NULL */ - void (*optional)(Visitor *v, bool *present, const char *name, - Error **errp); - - void (*type_uint8)(Visitor *v, uint8_t *obj, const char *name, Error **errp); - void (*type_uint16)(Visitor *v, uint16_t *obj, const char *name, Error **errp); - void (*type_uint32)(Visitor *v, uint32_t *obj, const char *name, Error **errp); - void (*type_uint64)(Visitor *v, uint64_t *obj, const char *name, Error **errp); - void (*type_int8)(Visitor *v, int8_t *obj, const char *name, Error **errp); - void (*type_int16)(Visitor *v, int16_t *obj, const char *name, Error **errp); - void (*type_int32)(Visitor *v, int32_t *obj, const char *name, Error **errp); - void (*type_int64)(Visitor *v, int64_t *obj, const char *name, Error **errp); - /* visit_type_size() falls back to (*type_uint64)() if type_size is unset */ - void (*type_size)(Visitor *v, uint64_t *obj, const char *name, Error **errp); - bool (*start_union)(Visitor *v, bool data_present, Error **errp); - void (*end_union)(Visitor *v, bool data_present, Error **errp); -}; - -void input_type_enum(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, Error **errp); -void output_type_enum(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, Error **errp); - -#endif diff --git a/qemu/include/qapi/visitor.h b/qemu/include/qapi/visitor.h deleted file mode 100644 index 5934f59a..00000000 --- a/qemu/include/qapi/visitor.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Core Definitions for QAPI Visitor Classes - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ -#ifndef QAPI_VISITOR_CORE_H -#define QAPI_VISITOR_CORE_H - -#include "qemu/typedefs.h" -#include "qapi/qmp/qobject.h" -#include "qapi/error.h" -#include - -typedef struct GenericList -{ - union { - void *value; - uint64_t padding; - }; - struct GenericList *next; -} GenericList; - -void visit_start_handle(Visitor *v, void **obj, const char *kind, - const char *name, Error **errp); -void visit_end_handle(Visitor *v, Error **errp); -void visit_start_struct(Visitor *v, void **obj, const char *kind, - const char *name, size_t size, Error **errp); -void visit_end_struct(Visitor *v, Error **errp); -void visit_start_implicit_struct(Visitor *v, void **obj, size_t size, - Error **errp); -void visit_end_implicit_struct(Visitor *v, Error **errp); -void visit_start_list(Visitor *v, const char *name, Error **errp); -GenericList *visit_next_list(Visitor *v, GenericList **list, Error **errp); -void visit_end_list(Visitor *v, Error **errp); -void visit_optional(Visitor *v, bool *present, const char *name, - Error **errp); -void visit_get_next_type(Visitor *v, int *obj, const int *qtypes, - const char *name, Error **errp); -void visit_type_enum(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, Error **errp); -void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp); -void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp); -void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp); -void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp); -void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp); -void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp); -void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp); -void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp); -void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp); -void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp); -void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp); -void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp); -void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp); -bool visit_start_union(Visitor *v, bool data_present, Error **errp); -void visit_end_union(Visitor *v, bool data_present, Error **errp); - -#endif diff --git a/qemu/include/qemu-common.h b/qemu/include/qemu-common.h index d2097dbf..5b6a6cba 100644 --- a/qemu/include/qemu-common.h +++ b/qemu/include/qemu-common.h @@ -1,6 +1,4 @@ - -/* Common header file that is included by all of QEMU. - * +/* * This file is supposed to be included only by .c files. No header file should * depend on qemu-common.h, as this would easily lead to circular header * dependencies. @@ -12,135 +10,26 @@ #ifndef QEMU_COMMON_H #define QEMU_COMMON_H -#include "qemu/compiler.h" -#include "config-host.h" -#include "qemu/typedefs.h" -#include "exec/cpu-common.h" - -#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__) -#define WORDS_ALIGNED -#endif +#include #define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) -/* we put basic includes here to avoid repeating them in device drivers */ -#include -#include -#include -#include "unicorn/platform.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "glib_compat.h" +/* Copyright string for -version arguments, About dialogs, etc */ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2020 " \ + "Fabrice Bellard and the QEMU Project developers" -#ifdef _WIN32 -#include "sysemu/os-win32.h" +/* Bug reporting information for --help arguments, About dialogs, etc */ +#define QEMU_HELP_BOTTOM \ + "See for how to report bugs.\n" \ + "More information on the QEMU project at ." + +/* main function, renamed */ +#if defined(CONFIG_COCOA) +int qemu_main(int argc, char **argv, char **envp); #endif -#ifndef O_LARGEFILE -#define O_LARGEFILE 0 -#endif -#ifndef O_BINARY -#define O_BINARY 0 -#endif -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif -#ifndef ENOMEDIUM -#define ENOMEDIUM ENODEV -#endif -#if !defined(ENOTSUP) -#define ENOTSUP 4096 -#endif -#if !defined(ECANCELED) -#define ECANCELED 4097 -#endif -#if !defined(EMEDIUMTYPE) -#define EMEDIUMTYPE 4098 -#endif -#ifndef TIME_MAX -#define TIME_MAX LONG_MAX -#endif - -/* HOST_LONG_BITS is the size of a native pointer in bits. */ -#if UINTPTR_MAX == UINT32_MAX -# define HOST_LONG_BITS 32 -#elif UINTPTR_MAX == UINT64_MAX -# define HOST_LONG_BITS 64 -#else -# error Unknown pointer size -#endif - -typedef int (*fprintf_function)(FILE *f, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); - -#ifdef _WIN32 -#define fsync _commit -#if !defined(lseek) -# define lseek _lseeki64 -#endif -int qemu_ftruncate64(int, int64_t); -#if !defined(ftruncate) -# define ftruncate qemu_ftruncate64 -#endif -#endif - -#include "qemu/osdep.h" -#include "qemu/bswap.h" - -/* FIXME: Remove NEED_CPU_H. */ -#ifdef NEED_CPU_H -#include "cpu.h" -#endif /* !defined(NEED_CPU_H) */ - -/* cutils.c */ -void pstrcpy(char *buf, int buf_size, const char *str); -char *pstrcat(char *buf, int buf_size, const char *s); -int strstart(const char *str, const char *val, const char **ptr); -int qemu_fls(int i); - -/* - * strtosz() suffixes used to specify the default treatment of an - * argument passed to strtosz() without an explicit suffix. - * These should be defined using upper case characters in the range - * A-Z, as strtosz() will use qemu_toupper() on the given argument - * prior to comparison. - */ -#define STRTOSZ_DEFSUFFIX_EB 'E' -#define STRTOSZ_DEFSUFFIX_PB 'P' -#define STRTOSZ_DEFSUFFIX_TB 'T' -#define STRTOSZ_DEFSUFFIX_GB 'G' -#define STRTOSZ_DEFSUFFIX_MB 'M' -#define STRTOSZ_DEFSUFFIX_KB 'K' -#define STRTOSZ_DEFSUFFIX_B 'B' -int64_t strtosz(const char *nptr, char **end); -int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix); -int64_t strtosz_suffix_unit(const char *nptr, char **end, - const char default_suffix, int64_t unit); - -/* used to print char* safely */ -#define STR_OR_NULL(str) ((str) ? (str) : "null") - -#define qemu_isalnum(c) isalnum((unsigned char)(c)) -#define qemu_isalpha(c) isalpha((unsigned char)(c)) -#define qemu_iscntrl(c) iscntrl((unsigned char)(c)) -#define qemu_isdigit(c) isdigit((unsigned char)(c)) -#define qemu_isgraph(c) isgraph((unsigned char)(c)) -#define qemu_islower(c) islower((unsigned char)(c)) -#define qemu_isprint(c) isprint((unsigned char)(c)) -#define qemu_ispunct(c) ispunct((unsigned char)(c)) -#define qemu_isspace(c) isspace((unsigned char)(c)) -#define qemu_isupper(c) isupper((unsigned char)(c)) -#define qemu_isxdigit(c) isxdigit((unsigned char)(c)) -#define qemu_tolower(c) tolower((unsigned char)(c)) -#define qemu_toupper(c) toupper((unsigned char)(c)) -#define qemu_isascii(c) isascii((unsigned char)(c)) -#define qemu_toascii(c) toascii((unsigned char)(c)) +void qemu_get_timedate(struct tm *tm, int offset); +int qemu_timedate_diff(struct tm *tm); void *qemu_oom_check(void *ptr); @@ -163,90 +52,30 @@ void *qemu_oom_check(void *ptr); sendto(sockfd, buf, len, flags, destaddr, addrlen) #endif -/* Error handling. */ - -void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size); -bool tcg_enabled(struct uc_struct *uc); - struct uc_struct; void cpu_exec_init_all(struct uc_struct *uc); -/* compute with 96 bit intermediate result: (a*b)/c */ -static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) -{ - union { - uint64_t ll; - struct { -#ifdef HOST_WORDS_BIGENDIAN - uint32_t high, low; -#else - uint32_t low, high; -#endif - } l; - } u, res; - uint64_t rl, rh; - - u.ll = a; - rl = (uint64_t)u.l.low * (uint64_t)b; - rh = (uint64_t)u.l.high * (uint64_t)b; - rh += (rl >> 32); - res.l.high = (uint32_t)(rh / c); - res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; - return res.ll; -} - -/* Round number down to multiple */ -#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m)) - -/* Round number up to multiple */ -#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m)) - -#include "qemu/module.h" - -/* vector definitions */ -#ifdef __ALTIVEC__ -/* The altivec.h header says we're allowed to undef these for - * C++ compatibility. Here we don't care about C++, but we - * undef them anyway to avoid namespace pollution. +/** + * set_preferred_target_page_bits: + * @bits: number of bits needed to represent an address within the page + * + * Set the preferred target page size (the actual target page + * size may be smaller than any given CPU's preference). + * Returns true on success, false on failure (which can only happen + * if this is called after the system has already finalized its + * choice of page size and the requested page size is smaller than that). */ -#undef vector -#undef pixel -#undef bool -#include -#define VECTYPE __vector unsigned char -#define SPLAT(p) vec_splat(vec_ld(0, p), 0) -#define ALL_EQ(v1, v2) vec_all_eq(v1, v2) -/* altivec.h may redefine the bool macro as vector type. - * Reset it to POSIX semantics. */ -#define bool _Bool -#elif defined __SSE2__ -#include -#define VECTYPE __m128i -#define SPLAT(p) _mm_set1_epi8(*(p)) -#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF) -#else -#define VECTYPE unsigned long -#define SPLAT(p) (*(p) * (~0UL / 255)) -#define ALL_EQ(v1, v2) ((v1) == (v2)) -#endif +bool set_preferred_target_page_bits(struct uc_struct *uc, int bits); -// support for calling functions before main code is executed. -#if defined(_MSC_VER) - #pragma section(".CRT$XCU",read) - #define INITIALIZER2_(f,p) \ - static void f(void); \ - __declspec(allocate(".CRT$XCU")) void (*f##_)(void) = f; \ - __pragma(comment(linker,"/include:" p #f "_")) \ - static void f(void) - #ifdef _WIN64 - #define INITIALIZER(f) INITIALIZER2_(f,"") - #else - #define INITIALIZER(f) INITIALIZER2_(f,"_") - #endif -#else - #define INITIALIZER(f) \ - static void f(void) __attribute__((constructor)); \ - static void f(void) -#endif +/** + * finalize_target_page_bits: + * Commit the final value set by set_preferred_target_page_bits. + */ +void finalize_target_page_bits(struct uc_struct *uc); + +/* OS specific functions */ +void os_setup_early_signal_handling(void); + +void page_size_init(struct uc_struct *uc); #endif diff --git a/qemu/include/qemu/atomic.h b/qemu/include/qemu/atomic.h index 79c10efe..b358c95e 100644 --- a/qemu/include/qemu/atomic.h +++ b/qemu/include/qemu/atomic.h @@ -8,227 +8,251 @@ * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * + * See docs/atomics.txt for discussion about the guarantees each + * atomic primitive is meant to provide. */ -#ifndef __QEMU_ATOMIC_H -#define __QEMU_ATOMIC_H 1 +#ifndef QEMU_ATOMIC_H +#define QEMU_ATOMIC_H #include "qemu/compiler.h" +// we do not really support multiple CPUs, so we dont care +#define smp_mb() +#define smp_wmb() +#define smp_rmb() +#define barrier() + +/* The variable that receives the old value of an atomically-accessed + * variable must be non-qualified, because atomic builtins return values + * through a pointer-type argument as in __atomic_load(&var, &old, MODEL). + * + * This macro has to handle types smaller than int manually, because of + * implicit promotion. int and larger types, as well as pointers, can be + * converted to a non-qualified type just by applying a binary operator. + */ +#define typeof_strip_qual(expr) \ + typeof( \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(expr), bool) || \ + __builtin_types_compatible_p(typeof(expr), const bool) || \ + __builtin_types_compatible_p(typeof(expr), volatile bool) || \ + __builtin_types_compatible_p(typeof(expr), const volatile bool), \ + (bool)1, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(expr), signed char) || \ + __builtin_types_compatible_p(typeof(expr), const signed char) || \ + __builtin_types_compatible_p(typeof(expr), volatile signed char) || \ + __builtin_types_compatible_p(typeof(expr), const volatile signed char), \ + (signed char)1, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(expr), unsigned char) || \ + __builtin_types_compatible_p(typeof(expr), const unsigned char) || \ + __builtin_types_compatible_p(typeof(expr), volatile unsigned char) || \ + __builtin_types_compatible_p(typeof(expr), const volatile unsigned char), \ + (unsigned char)1, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(expr), signed short) || \ + __builtin_types_compatible_p(typeof(expr), const signed short) || \ + __builtin_types_compatible_p(typeof(expr), volatile signed short) || \ + __builtin_types_compatible_p(typeof(expr), const volatile signed short), \ + (signed short)1, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(expr), unsigned short) || \ + __builtin_types_compatible_p(typeof(expr), const unsigned short) || \ + __builtin_types_compatible_p(typeof(expr), volatile unsigned short) || \ + __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \ + (unsigned short)1, \ + (expr)+0)))))) + +#ifdef __ATOMIC_RELAXED /* For C11 atomic ops */ -/* Compiler barrier */ -#ifdef _MSC_VER -void _ReadWriteBarrier(void); -#pragma intrinsic(_ReadWriteBarrier) -#define barrier() do { _ReadWriteBarrier(); } while (0) -#else -#define barrier() ({ asm volatile("" ::: "memory"); (void)0; }) -#endif - -#ifndef __ATOMIC_RELAXED - -/* - * We use GCC builtin if it's available, as that can use mfence on - * 32-bit as well, e.g. if built with -march=pentium-m. However, on - * i386 the spec is buggy, and the implementation followed it until - * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793). +/* Sanity check that the size of an atomic operation isn't "overly large". + * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not + * want to use them because we ought not need them, and this lets us do a + * bit of sanity checking that other 32-bit hosts might build. + * + * That said, we have a problem on 64-bit ILP32 hosts in that in order to + * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS. + * We'd prefer not want to pull in everything else TCG related, so handle + * those few cases by hand. + * + * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for + * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) & + * n64 (LP64) ABIs are both detected using __mips64. */ -#if defined(__i386__) || defined(__x86_64__) -#if !QEMU_GNUC_PREREQ(4, 4) -#if defined __x86_64__ -# ifdef _MSC_VER -// TODO: fix me!!! -# define smp_mb() //{ __asm volatile("mfence" ::: "memory"); (void)0; } -# else -# define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; }) -# endif +#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64) +# define ATOMIC_REG_SIZE 8 #else -# ifdef _MSC_VER -// TODO: fix me!!! -# define smp_mb() //{ __asm volatile("lock; addl $0,0(%esp) " ::: "memory"); (void)0; } -# else -# define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; }) -# endif -#endif -#endif +# define ATOMIC_REG_SIZE sizeof(void *) #endif +/* Weak atomic operations prevent the compiler moving other + * loads/stores past the atomic operation load/store. However there is + * no explicit memory barrier for the processor. + * + * The C11 memory model says that variables that are accessed from + * different threads should at least be done with __ATOMIC_RELAXED + * primitives or the result is undefined. Generally this has little to + * no effect on the generated code but not using the atomic primitives + * will get flagged by sanitizers as a violation. + */ +#define atomic_read__nocheck(ptr) \ + __atomic_load_n(ptr, __ATOMIC_RELAXED) -#ifdef __alpha__ -#define smp_read_barrier_depends() asm volatile("mb":::"memory") -#endif +#define atomic_read(ptr) \ + ({ \ + QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + atomic_read__nocheck(ptr); \ + }) + +#define atomic_set__nocheck(ptr, i) \ + __atomic_store_n(ptr, i, __ATOMIC_RELAXED) + +#define atomic_set(ptr, i) do { \ + QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + atomic_set__nocheck(ptr, i); \ +} while(0) + +/* All the remaining operations are fully sequentially consistent */ + +#define atomic_xchg__nocheck(ptr, i) ({ \ + __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \ +}) + +#define atomic_xchg(ptr, i) ({ \ + QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + atomic_xchg__nocheck(ptr, i); \ +}) + +/* Returns the eventual value, failed or not */ +#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \ + typeof_strip_qual(*ptr) _old = (old); \ + (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \ + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ + _old; \ +}) + +#define atomic_cmpxchg(ptr, old, new) ({ \ + QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ + atomic_cmpxchg__nocheck(ptr, old, new); \ +}) + +/* Provide shorter names for GCC atomic builtins, return old value */ +#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) + +#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) + +/* And even shorter names that return void. */ +#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) +#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)) +#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)) +#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) +#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) +#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) +#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) + +#else /* __ATOMIC_RELAXED */ #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) - -/* - * Because of the strongly ordered storage model, wmb() and rmb() are nops - * here (a compiler barrier only). QEMU doesn't do accesses to write-combining - * qemu memory or non-temporal load/stores from C code. - */ -#define smp_wmb() barrier() -#define smp_rmb() barrier() - /* * __sync_lock_test_and_set() is documented to be an acquire barrier only, * but it is a full barrier at the hardware level. Add a compiler barrier * to make it a full barrier also at the compiler level. */ -#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) - -/* - * Load/store with Java volatile semantics. - */ -#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) - -#elif defined(_ARCH_PPC) - -/* - * We use an eieio() for wmb() on powerpc. This assumes we don't - * need to order cacheable and non-cacheable stores with respect to - * each other. - * - * smp_mb has the same problem as on x86 for not-very-new GCC - * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011). - */ -#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) -#if defined(__powerpc64__) -#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) -#else -#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; }) -#endif -#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) - -#endif /* _ARCH_PPC */ - -#endif /* C11 atomics */ - -/* - * For (host) platforms we don't have explicit barrier definitions - * for, we use the gcc __sync_synchronize() primitive to generate a - * full barrier. This should be safe on all platforms, though it may - * be overkill for smp_wmb() and smp_rmb(). - */ -#ifndef smp_mb -#define smp_mb() __sync_synchronize() -#endif - -#ifndef smp_wmb -#ifdef __ATOMIC_RELEASE -#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE) -#else -#define smp_wmb() __sync_synchronize() -#endif -#endif - -#ifndef smp_rmb -#ifdef __ATOMIC_ACQUIRE -#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE) -#else -#define smp_rmb() __sync_synchronize() -#endif -#endif - -#ifndef smp_read_barrier_depends -#ifdef __ATOMIC_CONSUME -#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME) -#else -#define smp_read_barrier_depends() barrier() -#endif -#endif - -#ifndef atomic_read -#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr)) -#endif - -#ifndef atomic_set -#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i)) -#endif - -/* These have the same semantics as Java volatile variables. - * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html: - * "1. Issue a StoreStore barrier (wmb) before each volatile store." - * 2. Issue a StoreLoad barrier after each volatile store. - * Note that you could instead issue one before each volatile load, but - * this would be slower for typical programs using volatiles in which - * reads greatly outnumber writes. Alternatively, if available, you - * can implement volatile store as an atomic instruction (for example - * XCHG on x86) and omit the barrier. This may be more efficient if - * atomic instructions are cheaper than StoreLoad barriers. - * 3. Issue LoadLoad and LoadStore barriers after each volatile load." - * - * If you prefer to think in terms of "pairing" of memory barriers, - * an atomic_mb_read pairs with an atomic_mb_set. - * - * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq, - * while an atomic_mb_set is a st.rel followed by a memory barrier. - * - * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST - * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough. - * Just always use the barriers manually by the rules above. - */ -#ifndef atomic_mb_read -#define atomic_mb_read(ptr) ({ \ - typeof(*ptr) _val = atomic_read(ptr); \ - smp_rmb(); \ - _val; \ -}) -#endif - -#ifndef atomic_mb_set -#define atomic_mb_set(ptr, i) do { \ - smp_wmb(); \ - atomic_set(ptr, i); \ - smp_mb(); \ -} while (0) -#endif - -#ifndef atomic_xchg +#ifndef _MSC_VER #if defined(__clang__) #define atomic_xchg(ptr, i) __sync_swap(ptr, i) -#elif defined(__ATOMIC_SEQ_CST) -#define atomic_xchg(ptr, i) ({ \ - typeof(*ptr) _new = (i), _old; \ - __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \ - _old; \ -}) #else -/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ -#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) +#define atomic_xchg(ptr, i) (__sync_lock_test_and_set(ptr, i)) #endif #endif +#endif + +/* These will only be atomic if the processor does the fetch or store + * in a single issue memory operation + */ +#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p)) +#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i)) + +#define atomic_read(ptr) atomic_read__nocheck(ptr) +#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i) + +#define atomic_xchg__nocheck atomic_xchg + /* Provide shorter names for GCC atomic builtins. */ #ifdef _MSC_VER // these return the new value (so we make it return the previous value) -#define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1) -#define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1) -#define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n) -#define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n) -#else -// these return the previous value -#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) -#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) -#define atomic_fetch_add __sync_fetch_and_add -#define atomic_fetch_sub __sync_fetch_and_sub -#define atomic_fetch_and __sync_fetch_and_and -#define atomic_fetch_or __sync_fetch_and_or -#define atomic_cmpxchg __sync_val_compare_and_swap -#endif +#define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1) +#define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1) +#define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n) +#define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n) +#define atomic_fetch_and(ptr, n) ((InterlockedAnd(ptr, n))) +#define atomic_fetch_or(ptr, n) ((InterlockedOr(ptr, n))) +#define atomic_fetch_xor(ptr, n) ((InterlockedXor(ptr, n))) + +#define atomic_inc_fetch(ptr) (InterlockedIncrement((long*)(ptr))) +#define atomic_dec_fetch(ptr) (InterlockedDecrement((long*)(ptr))) +#define atomic_add_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) + n) +#define atomic_sub_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) - n) +#define atomic_and_fetch(ptr, n) (InterlockedAnd((long*)ptr, n) & n) +#define atomic_or_fetch(ptr, n) (InterlockedOr((long*)ptr, n) | n) +#define atomic_xor_fetch(ptr, n) (InterlockedXor((long*)ptr, n) ^ n) + +#define atomic_cmpxchg(ptr, old, new) ((InterlockedCompareExchange(ptr, old, new))) +#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new) -/* And even shorter names that return void. */ -#ifdef _MSC_VER #define atomic_inc(ptr) ((void) InterlockedIncrement(ptr)) #define atomic_dec(ptr) ((void) InterlockedDecrement(ptr)) #define atomic_add(ptr, n) ((void) InterlockedAdd(ptr, n)) #define atomic_sub(ptr, n) ((void) InterlockedAdd(ptr, -n)) -#else +#define atomic_and(ptr, n) ((void) InterlockedAnd(ptr, n)) +#define atomic_or(ptr, n) ((void) InterlockedOr(ptr, n)) +#define atomic_xor(ptr, n) ((void) InterlockedXor(ptr, n)) +#else // GCC/clang +// these return the previous value +#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) +#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) +#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n) +#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n) +#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n) +#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n) +#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n) + +#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) +#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) +#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) +#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) +#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) +#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n) +#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n) + +#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new) +#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new) + #define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) #define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) #define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) #define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) #define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) #define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) +#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n)) #endif -#endif +#endif /* __ATOMIC_RELAXED */ + +#endif /* QEMU_ATOMIC_H */ diff --git a/qemu/include/qemu/atomic128.h b/qemu/include/qemu/atomic128.h new file mode 100644 index 00000000..4183863d --- /dev/null +++ b/qemu/include/qemu/atomic128.h @@ -0,0 +1,175 @@ +/* + * Simple interface for 128-bit atomic operations. + * + * Copyright (C) 2018 Linaro, Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * See docs/devel/atomics.txt for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef QEMU_ATOMIC128_H +#define QEMU_ATOMIC128_H + +#include "int128.h" + +/* + * GCC is a house divided about supporting large atomic operations. + * + * For hosts that only have large compare-and-swap, a legalistic reading + * of the C++ standard means that one cannot implement __atomic_read on + * read-only memory, and thus all atomic operations must synchronize + * through libatomic. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878 + * + * This interpretation is not especially helpful for QEMU. + * For softmmu, all RAM is always read/write from the hypervisor. + * For user-only, if the guest doesn't implement such an __atomic_read + * then the host need not worry about it either. + * + * Moreover, using libatomic is not an option, because its interface is + * built for std::atomic, and requires that *all* accesses to such an + * object go through the library. In our case we do not have an object + * in the C/C++ sense, but a view of memory as seen by the guest. + * The guest may issue a large atomic operation and then access those + * pieces using word-sized accesses. From the hypervisor, we have no + * way to connect those two actions. + * + * Therefore, special case each platform. + */ + +#if defined(CONFIG_ATOMIC128) +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + return atomic_cmpxchg__nocheck(ptr, cmp, new); +} +# define HAVE_CMPXCHG128 1 +#elif defined(CONFIG_CMPXCHG128) +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ +#ifdef _MSC_VER + /* compare and swap. the same as __sync_val_compare_and_swap(). + if the current value of *ptr is cmp, then write new into *ptr, + return *ptr old value. */ + Int128 save = *ptr; + if (!memcmp(ptr, &cmp, sizeof(cmp))) { + *ptr = new; + } + return save; +#else + return __sync_val_compare_and_swap_16(ptr, cmp, new); +#endif +} +# define HAVE_CMPXCHG128 1 +#elif defined(__aarch64__) +/* Through gcc 8, aarch64 has no support for 128-bit at all. */ +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); + uint64_t newl = int128_getlo(new), newh = int128_gethi(new); + uint64_t oldl, oldh; + uint32_t tmp; + + asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" + "cmp %[oldl], %[cmpl]\n\t" + "ccmp %[oldh], %[cmph], #0, eq\n\t" + "b.ne 1f\n\t" + "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" + "cbnz %w[tmp], 0b\n" + "1:" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), + [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) + : [cmpl] "r"(cmpl), [cmph] "r"(cmph), + [newl] "r"(newl), [newh] "r"(newh) + : "memory", "cc"); + + return int128_make128(oldl, oldh); +} +# define HAVE_CMPXCHG128 1 +#else +/* Fallback definition that must be optimized away, or error. */ +Int128 QEMU_ERROR("unsupported atomic") + atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new); +# define HAVE_CMPXCHG128 0 +#endif /* Some definition for HAVE_CMPXCHG128 */ + +#if defined(CONFIG_ATOMIC128) +static inline Int128 atomic16_read(Int128 *ptr) +{ + return atomic_read__nocheck(ptr); +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + atomic_set__nocheck(ptr, val); +} + +# define HAVE_ATOMIC128 1 +#elif defined(__aarch64__) +/* We can do better than cmpxchg for AArch64. */ +static inline Int128 atomic16_read(Int128 *ptr) +{ + uint64_t l, h; + uint32_t tmp; + + /* The load must be paired with the store to guarantee not tearing. */ + asm("0: ldxp %[l], %[h], %[mem]\n\t" + "stxp %w[tmp], %[l], %[h], %[mem]\n\t" + "cbnz %w[tmp], 0b" + : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h)); + + return int128_make128(l, h); +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + uint64_t l = int128_getlo(val), h = int128_gethi(val); + uint64_t t1, t2; + + /* Load into temporaries to acquire the exclusive access lock. */ + asm("0: ldxp %[t1], %[t2], %[mem]\n\t" + "stxp %w[t1], %[l], %[h], %[mem]\n\t" + "cbnz %w[t1], 0b" + : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) + : [l] "r"(l), [h] "r"(h)); +} + +# define HAVE_ATOMIC128 1 +#elif HAVE_CMPXCHG128 +static inline Int128 atomic16_read(Int128 *ptr) +{ + /* Maybe replace 0 with 0, returning the old value. */ +#ifdef _MSC_VER + Int128 x = int128_make64(0); + Int128 y = int128_make64(0); + return atomic16_cmpxchg(ptr, x, y); +#else + return atomic16_cmpxchg(ptr, 0, 0); +#endif +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + Int128 old = *ptr, cmp; + do { + cmp = old; + old = atomic16_cmpxchg(ptr, cmp, val); +#ifdef _MSC_VER + } while (memcmp(&old, &cmp, sizeof(old))); +#else + } while (old != cmp); +#endif +} + +# define HAVE_ATOMIC128 1 +#else +/* Fallback definitions that must be optimized away, or error. */ +Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr); +void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val); +# define HAVE_ATOMIC128 0 +#endif /* Some definition for HAVE_ATOMIC128 */ + +#endif /* QEMU_ATOMIC128_H */ diff --git a/qemu/include/qemu/bitmap.h b/qemu/include/qemu/bitmap.h index b8faee8d..267f04f2 100644 --- a/qemu/include/qemu/bitmap.h +++ b/qemu/include/qemu/bitmap.h @@ -12,11 +12,7 @@ #ifndef BITMAP_H #define BITMAP_H -#include "glib_compat.h" -#include -#include -#include "qemu/osdep.h" #include "qemu/bitops.h" /* @@ -26,8 +22,29 @@ * Note that nbits should be always a compile time evaluable constant. * Otherwise many inlines will generate horrible code. * + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? * qemu_bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops * qemu_bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_to_le(dst, src, nbits) Convert bitmap to little endian + * bitmap_from_le(dst, src, nbits) Convert bitmap from little endian + * bitmap_copy_with_src_offset(dst, src, offset, nbits) + * *dst = *src (with an offset into src) + * bitmap_copy_with_dst_offset(dst, src, offset, nbits) + * *dst = *src (with an offset into dst) */ /* @@ -35,19 +52,214 @@ * * set_bit(bit, addr) *addr |= bit * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit */ -#define BITMAP_LAST_WORD_MASK(nbits) \ - ( \ - ((nbits) % BITS_PER_LONG) ? \ - (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ - ) +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) +#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] +#define small_nbits(nbits) \ + ((nbits) <= BITS_PER_LONG) + +int slow_bitmap_empty(const unsigned long *bitmap, long bits); +int slow_bitmap_full(const unsigned long *bitmap, long bits); +int slow_bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits); +void slow_bitmap_complement(unsigned long *dst, const unsigned long *src, + long bits); +int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits); +void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits); +void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits); +int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits); +int slow_bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits); +long slow_bitmap_count_one(const unsigned long *bitmap, long nbits); + +static inline unsigned long *bitmap_try_new(long nbits) +{ + long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + return g_try_malloc0(len); +} + +static inline unsigned long *bitmap_new(long nbits) +{ + unsigned long *ptr = bitmap_try_new(nbits); + if (ptr == NULL) { + abort(); + } + return ptr; +} + +static inline void bitmap_zero(unsigned long *dst, long nbits) +{ + if (small_nbits(nbits)) { + *dst = 0UL; + } else { + long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} + +static inline void bitmap_fill(unsigned long *dst, long nbits) +{ + size_t nlongs = BITS_TO_LONGS(nbits); + if (!small_nbits(nbits)) { + long len = (nlongs - 1) * sizeof(unsigned long); + memset(dst, 0xff, len); + } + dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); +} + +static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, + long nbits) +{ + if (small_nbits(nbits)) { + *dst = *src; + } else { + long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); + } +} + +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, long nbits) +{ + if (small_nbits(nbits)) { + return (*dst = *src1 & *src2) != 0; + } + return slow_bitmap_and(dst, src1, src2, nbits); +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, long nbits) +{ + if (small_nbits(nbits)) { + *dst = *src1 | *src2; + } else { + slow_bitmap_or(dst, src1, src2, nbits); + } +} + +static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, long nbits) +{ + if (small_nbits(nbits)) { + *dst = *src1 ^ *src2; + } else { + slow_bitmap_xor(dst, src1, src2, nbits); + } +} + +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, long nbits) +{ + if (small_nbits(nbits)) { + return (*dst = *src1 & ~(*src2)) != 0; + } + return slow_bitmap_andnot(dst, src1, src2, nbits); +} + +static inline void bitmap_complement(unsigned long *dst, + const unsigned long *src, + long nbits) +{ + if (small_nbits(nbits)) { + *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); + } else { + slow_bitmap_complement(dst, src, nbits); + } +} + +static inline int bitmap_equal(const unsigned long *src1, + const unsigned long *src2, long nbits) +{ + if (small_nbits(nbits)) { + return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); + } else { + return slow_bitmap_equal(src1, src2, nbits); + } +} + +static inline int bitmap_empty(const unsigned long *src, long nbits) +{ + if (small_nbits(nbits)) { + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + } else { + return slow_bitmap_empty(src, nbits); + } +} + +static inline int bitmap_full(const unsigned long *src, long nbits) +{ + if (small_nbits(nbits)) { + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + } else { + return slow_bitmap_full(src, nbits); + } +} + +static inline int bitmap_intersects(const unsigned long *src1, + const unsigned long *src2, long nbits) +{ + if (small_nbits(nbits)) { + return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + } else { + return slow_bitmap_intersects(src1, src2, nbits); + } +} + +static inline long bitmap_count_one(const unsigned long *bitmap, long nbits) +{ + if (!nbits) { + return 0; + } + + if (small_nbits(nbits)) { + return ctpopl(*bitmap & BITMAP_LAST_WORD_MASK(nbits)); + } else { + return slow_bitmap_count_one(bitmap, nbits); + } +} + +static inline long bitmap_count_one_with_offset(const unsigned long *bitmap, + long offset, long nbits) +{ + long aligned_offset = QEMU_ALIGN_DOWN(offset, BITS_PER_LONG); + long redundant_bits = offset - aligned_offset; + long bits_to_count = nbits + redundant_bits; + const unsigned long *bitmap_start = bitmap + + aligned_offset / BITS_PER_LONG; + + return bitmap_count_one(bitmap_start, bits_to_count) - + bitmap_count_one(bitmap_start, redundant_bits); +} + void qemu_bitmap_set(unsigned long *map, long i, long len); +void bitmap_set_atomic(unsigned long *map, long i, long len); void qemu_bitmap_clear(unsigned long *map, long start, long nr); +bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr); +void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, + long nr); +unsigned long bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned long nr, + unsigned long align_mask); static inline unsigned long *bitmap_zero_extend(unsigned long *old, long old_nbits, long new_nbits) @@ -58,4 +270,14 @@ static inline unsigned long *bitmap_zero_extend(unsigned long *old, return new; } +void bitmap_to_le(unsigned long *dst, const unsigned long *src, + long nbits); +void bitmap_from_le(unsigned long *dst, const unsigned long *src, + long nbits); + +void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src, + unsigned long offset, unsigned long nbits); +void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src, + unsigned long shift, unsigned long nbits); + #endif /* BITMAP_H */ diff --git a/qemu/include/qemu/bitops.h b/qemu/include/qemu/bitops.h index b523df9d..2db3288f 100644 --- a/qemu/include/qemu/bitops.h +++ b/qemu/include/qemu/bitops.h @@ -12,18 +12,21 @@ #ifndef BITOPS_H #define BITOPS_H -#include "unicorn/platform.h" -#include #include "host-utils.h" +#include "atomic.h" #define BITS_PER_BYTE CHAR_BIT #define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE) -#define BIT(nr) (1UL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) + +#define MAKE_64BIT_MASK(shift, length) \ + (((~0ULL) >> (64 - (length))) << (shift)) /** * set_bit - Set a bit in memory @@ -33,7 +36,7 @@ static inline void set_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long *p = addr + BIT_WORD(nr); *p |= mask; } @@ -46,7 +49,7 @@ static inline void set_bit(long nr, unsigned long *addr) static inline void clear_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long *p = addr + BIT_WORD(nr); *p &= ~mask; } @@ -59,7 +62,7 @@ static inline void clear_bit(long nr, unsigned long *addr) static inline void change_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long *p = addr + BIT_WORD(nr); *p ^= mask; } @@ -72,7 +75,7 @@ static inline void change_bit(long nr, unsigned long *addr) static inline int test_and_set_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long *p = addr + BIT_WORD(nr); unsigned long old = *p; *p = old | mask; @@ -87,7 +90,7 @@ static inline int test_and_set_bit(long nr, unsigned long *addr) static inline int test_and_clear_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long *p = addr + BIT_WORD(nr); unsigned long old = *p; *p = old & ~mask; @@ -102,7 +105,7 @@ static inline int test_and_clear_bit(long nr, unsigned long *addr) static inline int test_and_change_bit(long nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); - unsigned long *p = addr + BIT_WORD(nr); + unsigned long *p = addr + BIT_WORD(nr); unsigned long old = *p; *p = old ^ mask; @@ -119,6 +122,16 @@ static inline int test_bit(long nr, const unsigned long *addr) return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit, or size. + */ +unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); + /** * find_next_bit - find the next set bit in a memory region * @addr: The address to base the search on @@ -126,7 +139,8 @@ static inline int test_bit(long nr, const unsigned long *addr) * @size: The bitmap size in bits */ unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); + unsigned long size, + unsigned long offset); /** * find_next_zero_bit - find the next cleared bit in a memory region @@ -175,16 +189,6 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr, return find_next_zero_bit(addr, size, 0); } -static inline unsigned long hweight_long(unsigned long w) -{ - unsigned long count; - - for (count = 0; w; w >>= 1) { - count += w & 1; - } - return count; -} - /** * rol8 - rotate an 8-bit value left * @word: value to rotate @@ -192,7 +196,7 @@ static inline unsigned long hweight_long(unsigned long w) */ static inline uint8_t rol8(uint8_t word, unsigned int shift) { - return (word << shift) | (word >> (8 - shift)); + return (word << shift) | (word >> ((8 - shift) & 7)); } /** @@ -202,7 +206,7 @@ static inline uint8_t rol8(uint8_t word, unsigned int shift) */ static inline uint8_t ror8(uint8_t word, unsigned int shift) { - return (word >> shift) | (word << (8 - shift)); + return (word >> shift) | (word << ((8 - shift) & 7)); } /** @@ -212,7 +216,7 @@ static inline uint8_t ror8(uint8_t word, unsigned int shift) */ static inline uint16_t rol16(uint16_t word, unsigned int shift) { - return (word << shift) | (word >> (16 - shift)); + return (word << shift) | (word >> ((16 - shift) & 15)); } /** @@ -222,7 +226,7 @@ static inline uint16_t rol16(uint16_t word, unsigned int shift) */ static inline uint16_t ror16(uint16_t word, unsigned int shift) { - return (word >> shift) | (word << (16 - shift)); + return (word >> shift) | (word << ((16 - shift) & 15)); } /** @@ -232,7 +236,7 @@ static inline uint16_t ror16(uint16_t word, unsigned int shift) */ static inline uint32_t rol32(uint32_t word, unsigned int shift) { - return (word << shift) | (word >> (32 - shift)); + return (word << shift) | (word >> ((32 - shift) & 31)); } /** @@ -242,7 +246,7 @@ static inline uint32_t rol32(uint32_t word, unsigned int shift) */ static inline uint32_t ror32(uint32_t word, unsigned int shift) { - return (word >> shift) | (word << ((32 - shift) & 0x1f)); + return (word >> shift) | (word << ((32 - shift) & 31)); } /** @@ -252,7 +256,7 @@ static inline uint32_t ror32(uint32_t word, unsigned int shift) */ static inline uint64_t rol64(uint64_t word, unsigned int shift) { - return (word << shift) | (word >> (64 - shift)); + return (word << shift) | (word >> ((64 - shift) & 63)); } /** @@ -262,7 +266,7 @@ static inline uint64_t rol64(uint64_t word, unsigned int shift) */ static inline uint64_t ror64(uint64_t word, unsigned int shift) { - return (word >> shift) | (word << (64 - shift)); + return (word >> shift) | (word << ((64 - shift) & 63)); } /** @@ -284,6 +288,44 @@ static inline uint32_t extract32(uint32_t value, int start, int length) return (value >> start) & (~0U >> (32 - length)); } +/** + * extract8: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 8 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 8 bit word. It is valid to request that + * all 8 bits are returned (ie @length 8 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint8_t extract8(uint8_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 8 - start); + return extract32(value, start, length); +} + +/** + * extract16: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 16 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 16 bit word. It is valid to request that + * all 16 bits are returned (ie @length 16 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint16_t extract16(uint16_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 16 - start); + return extract32(value, start, length); +} + /** * extract64: * @value: the value to extract the bit field from @@ -344,7 +386,7 @@ static inline int32_t sextract32(uint32_t value, int start, int length) * Returns: the sign extended value of the bit field extracted from the * input value. */ -static inline uint64_t sextract64(uint64_t value, int start, int length) +static inline int64_t sextract64(uint64_t value, int start, int length) { assert(start >= 0 && length > 0 && length <= 64 - start); /* Note that this implementation relies on right shift of signed @@ -405,4 +447,124 @@ static inline uint64_t deposit64(uint64_t value, int start, int length, return (value & ~mask) | ((fieldval << start) & mask); } +/** + * half_shuffle32: + * @x: 32-bit value (of which only the bottom 16 bits are of interest) + * + * Given an input value:: + * + * xxxx xxxx xxxx xxxx ABCD EFGH IJKL MNOP + * + * return the value where the bottom 16 bits are spread out into + * the odd bits in the word, and the even bits are zeroed:: + * + * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N 0O0P + * + * Any bits set in the top half of the input are ignored. + * + * Returns: the shuffled bits. + */ +static inline uint32_t half_shuffle32(uint32_t x) +{ + /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". + * It ignores any bits set in the top half of the input. + */ + x = ((x & 0xFF00) << 8) | (x & 0x00FF); + x = ((x << 4) | x) & 0x0F0F0F0F; + x = ((x << 2) | x) & 0x33333333; + x = ((x << 1) | x) & 0x55555555; + return x; +} + +/** + * half_shuffle64: + * @x: 64-bit value (of which only the bottom 32 bits are of interest) + * + * Given an input value:: + * + * xxxx xxxx xxxx .... xxxx xxxx ABCD EFGH IJKL MNOP QRST UVWX YZab cdef + * + * return the value where the bottom 32 bits are spread out into + * the odd bits in the word, and the even bits are zeroed:: + * + * 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N .... 0U0V 0W0X 0Y0Z 0a0b 0c0d 0e0f + * + * Any bits set in the top half of the input are ignored. + * + * Returns: the shuffled bits. + */ +static inline uint64_t half_shuffle64(uint64_t x) +{ + /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". + * It ignores any bits set in the top half of the input. + */ + x = ((x & 0xFFFF0000ULL) << 16) | (x & 0xFFFF); + x = ((x << 8) | x) & 0x00FF00FF00FF00FFULL; + x = ((x << 4) | x) & 0x0F0F0F0F0F0F0F0FULL; + x = ((x << 2) | x) & 0x3333333333333333ULL; + x = ((x << 1) | x) & 0x5555555555555555ULL; + return x; +} + +/** + * half_unshuffle32: + * @x: 32-bit value (of which only the odd bits are of interest) + * + * Given an input value:: + * + * xAxB xCxD xExF xGxH xIxJ xKxL xMxN xOxP + * + * return the value where all the odd bits are compressed down + * into the low half of the word, and the high half is zeroed:: + * + * 0000 0000 0000 0000 ABCD EFGH IJKL MNOP + * + * Any even bits set in the input are ignored. + * + * Returns: the unshuffled bits. + */ +static inline uint32_t half_unshuffle32(uint32_t x) +{ + /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". + * where it is called an inverse half shuffle. + */ + x &= 0x55555555; + x = ((x >> 1) | x) & 0x33333333; + x = ((x >> 2) | x) & 0x0F0F0F0F; + x = ((x >> 4) | x) & 0x00FF00FF; + x = ((x >> 8) | x) & 0x0000FFFF; + return x; +} + +/** + * half_unshuffle64: + * @x: 64-bit value (of which only the odd bits are of interest) + * + * Given an input value:: + * + * xAxB xCxD xExF xGxH xIxJ xKxL xMxN .... xUxV xWxX xYxZ xaxb xcxd xexf + * + * return the value where all the odd bits are compressed down + * into the low half of the word, and the high half is zeroed:: + * + * 0000 0000 0000 .... 0000 0000 ABCD EFGH IJKL MNOP QRST UVWX YZab cdef + * + * Any even bits set in the input are ignored. + * + * Returns: the unshuffled bits. + */ +static inline uint64_t half_unshuffle64(uint64_t x) +{ + /* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits". + * where it is called an inverse half shuffle. + */ + x &= 0x5555555555555555ULL; + x = ((x >> 1) | x) & 0x3333333333333333ULL; + x = ((x >> 2) | x) & 0x0F0F0F0F0F0F0F0FULL; + x = ((x >> 4) | x) & 0x00FF00FF00FF00FFULL; + x = ((x >> 8) | x) & 0x0000FFFF0000FFFFULL; + x = ((x >> 16) | x) & 0x00000000FFFFFFFFULL; + return x; +} + #endif diff --git a/qemu/include/qemu/bswap.h b/qemu/include/qemu/bswap.h index 9d069d05..7591f6c8 100644 --- a/qemu/include/qemu/bswap.h +++ b/qemu/include/qemu/bswap.h @@ -1,15 +1,11 @@ #ifndef BSWAP_H #define BSWAP_H -#include "config-host.h" -#include "unicorn/platform.h" -#include -#include -#include "fpu/softfloat.h" +#include "osdep.h" +#include "fpu/softfloat-types.h" #ifdef CONFIG_MACHINE_BSWAP_H # include -# include # include #elif defined(__FreeBSD__) # include @@ -85,6 +81,64 @@ static inline void bswap64s(uint64_t *s) #define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) #endif +/** + * Endianness conversion functions between host cpu and specified endianness. + * (We list the complete set of prototypes produced by the macros below + * to assist people who search the headers to find their definitions.) + * + * uint16_t le16_to_cpu(uint16_t v); + * uint32_t le32_to_cpu(uint32_t v); + * uint64_t le64_to_cpu(uint64_t v); + * uint16_t be16_to_cpu(uint16_t v); + * uint32_t be32_to_cpu(uint32_t v); + * uint64_t be64_to_cpu(uint64_t v); + * + * Convert the value @v from the specified format to the native + * endianness of the host CPU by byteswapping if necessary, and + * return the converted value. + * + * uint16_t cpu_to_le16(uint16_t v); + * uint32_t cpu_to_le32(uint32_t v); + * uint64_t cpu_to_le64(uint64_t v); + * uint16_t cpu_to_be16(uint16_t v); + * uint32_t cpu_to_be32(uint32_t v); + * uint64_t cpu_to_be64(uint64_t v); + * + * Convert the value @v from the native endianness of the host CPU to + * the specified format by byteswapping if necessary, and return + * the converted value. + * + * void le16_to_cpus(uint16_t *v); + * void le32_to_cpus(uint32_t *v); + * void le64_to_cpus(uint64_t *v); + * void be16_to_cpus(uint16_t *v); + * void be32_to_cpus(uint32_t *v); + * void be64_to_cpus(uint64_t *v); + * + * Do an in-place conversion of the value pointed to by @v from the + * specified format to the native endianness of the host CPU. + * + * void cpu_to_le16s(uint16_t *v); + * void cpu_to_le32s(uint32_t *v); + * void cpu_to_le64s(uint64_t *v); + * void cpu_to_be16s(uint16_t *v); + * void cpu_to_be32s(uint32_t *v); + * void cpu_to_be64s(uint64_t *v); + * + * Do an in-place conversion of the value pointed to by @v from the + * native endianness of the host CPU to the specified format. + * + * Both X_to_cpu() and cpu_to_X() perform the same operation; you + * should use whichever one is better documenting of the function your + * code is performing. + * + * Do not use these functions for conversion of values which are in guest + * memory, since the data may not be sufficiently aligned for the host CPU's + * load and store instructions. Instead you should use the ld*_p() and + * st*_p() functions, which perform loads and stores of data of any + * required size and endianness and handle possible misalignment. + */ + #define CPU_CONVERT(endian, size, type)\ static inline type endian ## size ## _to_cpu(type v)\ {\ @@ -104,16 +158,6 @@ static inline void endian ## size ## _to_cpus(type *p)\ static inline void cpu_to_ ## endian ## size ## s(type *p)\ {\ glue(endian, _bswaps)(p, size);\ -}\ -\ -static inline type endian ## size ## _to_cpup(const type *p)\ -{\ - return glue(glue(endian, size), _to_cpu)(*p);\ -}\ -\ -static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\ -{\ - *p = glue(glue(cpu_to_, endian), size)(v);\ } CPU_CONVERT(be, 16, uint16_t) @@ -130,6 +174,25 @@ static inline uint32_t qemu_bswap_len(uint32_t value, int len) return bswap32(value) >> (32 - 8 * len); } +/* + * Same as cpu_to_le{16,32}, except that gcc will figure the result is + * a compile-time constant if you pass in a constant. So this can be + * used to initialize static variables. + */ +#if defined(HOST_WORDS_BIGENDIAN) +# define const_le32(_x) \ + ((((_x) & 0x000000ffU) << 24) | \ + (((_x) & 0x0000ff00U) << 8) | \ + (((_x) & 0x00ff0000U) >> 8) | \ + (((_x) & 0xff000000U) >> 24)) +# define const_le16(_x) \ + ((((_x) & 0x00ff) << 8) | \ + (((_x) & 0xff00) >> 8)) +#else +# define const_le32(_x) (_x) +# define const_le16(_x) (_x) +#endif + /* Unions for reinterpreting between floats and integers. */ typedef union { @@ -193,9 +256,9 @@ typedef union { /* * the generic syntax is: * - * load: ld{type}{sign}{size}{endian}_p(ptr) + * load: ld{type}{sign}{size}_{endian}_p(ptr) * - * store: st{type}{size}{endian}_p(ptr, val) + * store: st{type}{size}_{endian}_p(ptr, val) * * Note there are small differences with the softmmu access API! * @@ -204,7 +267,7 @@ typedef union { * f : float access * * sign is: - * (empty): for floats or 32 bit size + * (empty): for 32 or 64 bit sizes (including floats and doubles) * u : unsigned * s : signed * @@ -218,7 +281,25 @@ typedef union { * he : host endian * be : big endian * le : little endian + * te : target endian * (except for byte accesses, which have no endian infix). + * + * The target endian accessors are obviously only available to source + * files which are built per-target; they are defined in cpu-all.h. + * + * In all cases these functions take a host pointer. + * For accessors that take a guest address rather than a + * host address, see the cpu_{ld,st}_* accessors defined in + * cpu_ldst.h. + * + * For cases where the size to be used is not fixed at compile time, + * there are + * stn_{endian}_p(ptr, sz, val) + * which stores @val to @ptr as an @endian-order number @sz bytes in size + * and + * ldn_{endian}_p(ptr, sz) + * which loads @sz bytes from @ptr as an unsigned @endian-order number + * and returns it in a uint64_t. */ static inline int ldub_p(const void *ptr) @@ -236,51 +317,85 @@ static inline void stb_p(void *ptr, uint8_t v) *(uint8_t *)ptr = v; } -/* Any compiler worth its salt will turn these memcpy into native unaligned - operations. Thus we don't need to play games with packed attributes, or - inline byte-by-byte stores. */ +/* + * Any compiler worth its salt will turn these memcpy into native unaligned + * operations. Thus we don't need to play games with packed attributes, or + * inline byte-by-byte stores. + * Some compilation environments (eg some fortify-source implementations) + * may intercept memcpy() in a way that defeats the compiler optimization, + * though, so we use __builtin_memcpy() to give ourselves the best chance + * of good performance. + */ static inline int lduw_he_p(const void *ptr) { uint16_t r; +#ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); +#else + __builtin_memcpy(&r, ptr, sizeof(r)); +#endif return r; } static inline int ldsw_he_p(const void *ptr) { int16_t r; +#ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); +#else + __builtin_memcpy(&r, ptr, sizeof(r)); +#endif return r; } static inline void stw_he_p(void *ptr, uint16_t v) { +#ifdef _MSC_VER memcpy(ptr, &v, sizeof(v)); +#else + __builtin_memcpy(ptr, &v, sizeof(v)); +#endif } static inline int ldl_he_p(const void *ptr) { int32_t r; +#ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); +#else + __builtin_memcpy(&r, ptr, sizeof(r)); +#endif return r; } static inline void stl_he_p(void *ptr, uint32_t v) { +#ifdef _MSC_VER memcpy(ptr, &v, sizeof(v)); +#else + __builtin_memcpy(ptr, &v, sizeof(v)); +#endif } static inline uint64_t ldq_he_p(const void *ptr) { uint64_t r; +#ifdef _MSC_VER memcpy(&r, ptr, sizeof(r)); +#else + __builtin_memcpy(&r, ptr, sizeof(r)); +#endif return r; } static inline void stq_he_p(void *ptr, uint64_t v) { +#ifdef _MSC_VER memcpy(ptr, &v, sizeof(v)); +#else + __builtin_memcpy(ptr, &v, sizeof(v)); +#endif } static inline int lduw_le_p(const void *ptr) @@ -415,17 +530,58 @@ static inline void stfq_be_p(void *ptr, float64 v) static inline unsigned long leul_to_cpu(unsigned long v) { - /* In order to break an include loop between here and - qemu-common.h, don't rely on HOST_LONG_BITS. */ -#if ULONG_MAX == UINT32_MAX +#if HOST_LONG_BITS == 32 return le_bswap(v, 32); -#elif ULONG_MAX == UINT64_MAX +#elif HOST_LONG_BITS == 64 return le_bswap(v, 64); #else # error Unknown sizeof long #endif } +/* Store v to p as a sz byte value in host order */ +#define DO_STN_LDN_P(END) \ + static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \ + { \ + switch (sz) { \ + case 1: \ + stb_p(ptr, v); \ + break; \ + case 2: \ + stw_ ## END ## _p(ptr, v); \ + break; \ + case 4: \ + stl_ ## END ## _p(ptr, v); \ + break; \ + case 8: \ + stq_ ## END ## _p(ptr, v); \ + break; \ + default: \ + break; /* g_assert_not_reached(); */ \ + } \ + } \ + static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \ + { \ + switch (sz) { \ + case 1: \ + return ldub_p(ptr); \ + case 2: \ + return lduw_ ## END ## _p(ptr); \ + case 4: \ + return (uint32_t)ldl_ ## END ## _p(ptr); \ + case 8: \ + return ldq_ ## END ## _p(ptr); \ + default: \ + return 0; /* g_assert_not_reached(); */ \ + } \ + } + +DO_STN_LDN_P(he) +DO_STN_LDN_P(le) +DO_STN_LDN_P(be) + +#undef DO_STN_LDN_P + #undef le_bswap #undef be_bswap #undef le_bswaps diff --git a/qemu/include/qemu/compiler.h b/qemu/include/qemu/compiler.h index d0e322fc..66341749 100644 --- a/qemu/include/qemu/compiler.h +++ b/qemu/include/qemu/compiler.h @@ -1,11 +1,21 @@ -/* public domain */ +/* compiler.h: macros to abstract away compiler specifics + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ #ifndef COMPILER_H #define COMPILER_H -#include "config-host.h" #include "unicorn/platform.h" +#ifndef glue +#define xglue(x, y) x ## y +#define glue(x, y) xglue(x, y) +#define stringify(s) tostring(s) +#define tostring(s) #s +#endif + #ifdef _MSC_VER // MSVC support @@ -58,22 +68,36 @@ static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}}; #define QEMU_WARN_UNUSED_RESULT #define QEMU_ARTIFICIAL #define QEMU_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop) ) +#define QEMU_NOINLINE __declspec(noinline) #define QEMU_ALIGN(A, B) __declspec(align(A)) B +#define QEMU_ALIGNED(X) #define cat(x,y) x ## y #define cat2(x,y) cat(x,y) -#define QEMU_BUILD_BUG_ON(x) \ - typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] QEMU_UNUSED_VAR; +#define QEMU_BUILD_BUG_ON(x) +#define QEMU_BUILD_BUG_ON_ZERO(x) #define GCC_FMT_ATTR(n, m) -#else +#define likely(x) (x) +#define unlikely(x) (x) + +#define container_of(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member))) + +#define QEMU_FLATTEN +#define QEMU_ALWAYS_INLINE __declspec(inline) + +#else // Unix compilers #ifndef NAN #define NAN (0.0 / 0.0) #endif +#if defined __clang_analyzer__ || defined __COVERITY__ +#define QEMU_STATIC_ANALYSIS 1 +#endif + /*---------------------------------------------------------------------------- | The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler. | The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h. @@ -90,30 +114,83 @@ static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}}; #define QEMU_UNUSED_VAR __attribute__((unused)) #define QEMU_UNUSED_FUNC __attribute__((unused)) -#if QEMU_GNUC_PREREQ(3, 4) #define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) -#else -#define QEMU_WARN_UNUSED_RESULT -#endif -#if QEMU_GNUC_PREREQ(4, 3) -#define QEMU_ARTIFICIAL __attribute__((always_inline, artificial)) -#else -#define QEMU_ARTIFICIAL -#endif +#define QEMU_SENTINEL __attribute__((sentinel)) -#if defined(_WIN32) +#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) +# define QEMU_PACKED __attribute__((gcc_struct, packed)) # define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((gcc_struct, packed)) #else +# define QEMU_PACKED __attribute__((packed)) # define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((packed)) #endif #define QEMU_ALIGN(A, B) B __attribute__((aligned(A))) -#define cat(x,y) x ## y -#define cat2(x,y) cat(x,y) -#define QEMU_BUILD_BUG_ON(x) \ - typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] __attribute__((unused)); +#define QEMU_ALIGNED(X) __attribute__((aligned(X))) + +#define QEMU_NOINLINE __attribute__((noinline)) + +#ifndef likely +#if __GNUC__ < 3 +#define __builtin_expect(x, n) (x) +#endif + +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + const typeof(((type *) 0)->member) *__mptr = (ptr); \ + (type *) ((char *) __mptr - offsetof(type, member));}) +#endif + +#define sizeof_field(type, field) sizeof(((type *)0)->field) + +/* + * Calculate the number of bytes up to and including the given 'field' of + * 'container'. + */ +#define endof(container, field) \ + (offsetof(container, field) + sizeof_field(container, field)) + +/* Convert from a base type to a parent type, with compile time checking. */ +#ifdef __GNUC__ +#define DO_UPCAST(type, field, dev) ( __extension__ ( { \ + char __attribute__((unused)) offset_must_be_zero[ \ + -offsetof(type, field)]; \ + container_of(dev, type, field);})) +#else +#define DO_UPCAST(type, field, dev) container_of(dev, type, field) +#endif + +#define typeof_field(type, field) typeof(((type *)0)->field) +#define type_check(t1,t2) ((t1*)0 - (t2*)0) + +#define QEMU_BUILD_BUG_ON_STRUCT(x) \ + struct { \ + int:(x) ? -1 : 1; \ + } + +/* QEMU_BUILD_BUG_MSG() emits the message given if _Static_assert is + * supported; otherwise, it will be omitted from the compiler error + * message (but as it remains present in the source code, it can still + * be useful when debugging). */ +#if defined(CONFIG_STATIC_ASSERT) +#define QEMU_BUILD_BUG_MSG(x, msg) _Static_assert(!(x), msg) +#elif defined(__COUNTER__) +#define QEMU_BUILD_BUG_MSG(x, msg) typedef QEMU_BUILD_BUG_ON_STRUCT(x) \ + glue(qemu_build_bug_on__, __COUNTER__) __attribute__((unused)) +#else +#define QEMU_BUILD_BUG_MSG(x, msg) +#endif + +#define QEMU_BUILD_BUG_ON(x) QEMU_BUILD_BUG_MSG(x, "not expecting: " #x) + +#define QEMU_BUILD_BUG_ON_ZERO(x) (sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)) - \ + sizeof(QEMU_BUILD_BUG_ON_STRUCT(x))) #if defined __GNUC__ # if !QEMU_GNUC_PREREQ(4, 4) @@ -132,6 +209,128 @@ static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}}; #define GCC_FMT_ATTR(n, m) #endif -#endif // _MSC_VER +#ifndef __has_warning +#define __has_warning(x) 0 /* compatibility with non-clang compilers */ +#endif +#ifndef __has_feature +#define __has_feature(x) 0 /* compatibility with non-clang compilers */ +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 /* compatibility with non-clang compilers */ +#endif + +#if __has_builtin(__builtin_assume_aligned) || !defined(__clang__) +#define HAS_ASSUME_ALIGNED +#endif + +#ifndef __has_attribute +#define __has_attribute(x) 0 /* compatibility with older GCC */ +#endif + +/* + * GCC doesn't provide __has_attribute() until GCC 5, but we know all the GCC + * versions we support have the "flatten" attribute. Clang may not have the + * "flatten" attribute but always has __has_attribute() to check for it. + */ +#if __has_attribute(flatten) || !defined(__clang__) +# define QEMU_FLATTEN __attribute__((flatten)) +#else +# define QEMU_FLATTEN +#endif + +/* + * If __attribute__((error)) is present, use it to produce an error at + * compile time. Otherwise, one must wait for the linker to diagnose + * the missing symbol. + */ +#if __has_attribute(error) +# define QEMU_ERROR(X) __attribute__((error(X))) +#else +# define QEMU_ERROR(X) +#endif + +/* + * The nonstring variable attribute specifies that an object or member + * declaration with type array of char or pointer to char is intended + * to store character arrays that do not necessarily contain a terminating + * NUL character. This is useful in detecting uses of such arrays or pointers + * with functions that expect NUL-terminated strings, and to avoid warnings + * when such an array or pointer is used as an argument to a bounded string + * manipulation function such as strncpy. + */ +#if __has_attribute(nonstring) +# define QEMU_NONSTRING __attribute__((nonstring)) +#else +# define QEMU_NONSTRING +#endif + +/* + * Forced inlining may be desired to encourage constant propagation + * of function parameters. However, it can also make debugging harder, + * so disable it for a non-optimizing build. + */ +#if defined(__OPTIMIZE__) +#define QEMU_ALWAYS_INLINE __attribute__((always_inline)) +#else +#define QEMU_ALWAYS_INLINE +#endif + +/* Implement C11 _Generic via GCC builtins. Example: + * + * QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x) + * + * The first argument is the discriminator. The last is the default value. + * The middle ones are tuples in "(type, expansion)" format. + */ + +/* First, find out the number of generic cases. */ +#define QEMU_GENERIC(x, ...) \ + QEMU_GENERIC_(typeof(x), __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + +/* There will be extra arguments, but they are not used. */ +#define QEMU_GENERIC_(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, count, ...) \ + QEMU_GENERIC##count(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) + +/* Two more helper macros, this time to extract items from a parenthesized + * list. + */ +#define QEMU_FIRST_(a, b) a +#define QEMU_SECOND_(a, b) b + +/* ... and a final one for the common part of the "recursion". */ +#define QEMU_GENERIC_IF(x, type_then, else_) \ + __builtin_choose_expr(__builtin_types_compatible_p(x, \ + QEMU_FIRST_ type_then), \ + QEMU_SECOND_ type_then, else_) + +/* CPP poor man's "recursion". */ +#define QEMU_GENERIC1(x, a0, ...) (a0) +#define QEMU_GENERIC2(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC1(x, __VA_ARGS__)) +#define QEMU_GENERIC3(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC2(x, __VA_ARGS__)) +#define QEMU_GENERIC4(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC3(x, __VA_ARGS__)) +#define QEMU_GENERIC5(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC4(x, __VA_ARGS__)) +#define QEMU_GENERIC6(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC5(x, __VA_ARGS__)) +#define QEMU_GENERIC7(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC6(x, __VA_ARGS__)) +#define QEMU_GENERIC8(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC7(x, __VA_ARGS__)) +#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__)) +#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__)) + +/** + * qemu_build_not_reached() + * + * The compiler, during optimization, is expected to prove that a call + * to this function cannot be reached and remove it. If the compiler + * supports QEMU_ERROR, this will be reported at compile time; otherwise + * this will be reported at link time due to the missing symbol. + */ +#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__) +extern void QEMU_NORETURN QEMU_ERROR("code path is reachable") + qemu_build_not_reached(void); +#else +#define qemu_build_not_reached() g_assert_not_reached() +#endif + +#endif // _MSC_VER #endif /* COMPILER_H */ diff --git a/qemu/include/qemu/cpuid.h b/qemu/include/qemu/cpuid.h new file mode 100644 index 00000000..9c09dd1b --- /dev/null +++ b/qemu/include/qemu/cpuid.h @@ -0,0 +1,67 @@ +/* cpuid.h: Macros to identify the properties of an x86 host. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_CPUID_H +#define QEMU_CPUID_H + +#ifndef CONFIG_CPUID_H +# error " is unusable with this compiler" +#endif + +#ifdef _MSC_VER +#include +#else +#include +#endif + +/* Cover the uses that we have within qemu. */ +/* ??? Irritating that we have the same information in target/i386/. */ + +/* Leaf 1, %edx */ +#ifndef bit_CMOV +#define bit_CMOV (1 << 15) +#endif +#ifndef bit_SSE2 +#define bit_SSE2 (1 << 26) +#endif +#ifndef bit_POPCNT +#define bit_POPCNT (1 << 23) +#endif + +/* Leaf 1, %ecx */ +#ifndef bit_SSE4_1 +#define bit_SSE4_1 (1 << 19) +#endif +#ifndef bit_MOVBE +#define bit_MOVBE (1 << 22) +#endif +#ifndef bit_OSXSAVE +#define bit_OSXSAVE (1 << 27) +#endif +#ifndef bit_AVX +#define bit_AVX (1 << 28) +#endif + +/* Leaf 7, %ebx */ +#ifndef bit_BMI +#define bit_BMI (1 << 3) +#endif +#ifndef bit_AVX2 +#define bit_AVX2 (1 << 5) +#endif +#ifndef bit_AVX512F +#define bit_AVX512F (1 << 16) +#endif +#ifndef bit_BMI2 +#define bit_BMI2 (1 << 8) +#endif + +/* Leaf 0x80000001, %ecx */ +#ifndef bit_LZCNT +#define bit_LZCNT (1 << 5) +#endif + +#endif /* QEMU_CPUID_H */ diff --git a/qemu/include/qemu/crc32c.h b/qemu/include/qemu/crc32c.h index dafb6a1a..1b537024 100644 --- a/qemu/include/qemu/crc32c.h +++ b/qemu/include/qemu/crc32c.h @@ -28,8 +28,9 @@ #ifndef QEMU_CRC32C_H #define QEMU_CRC32C_H -#include "qemu-common.h" uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length); +uint32_t crc32(uint32_t crc, const uint8_t *data, unsigned int length); + #endif diff --git a/qemu/include/qemu/ctype.h b/qemu/include/qemu/ctype.h new file mode 100644 index 00000000..3691f098 --- /dev/null +++ b/qemu/include/qemu/ctype.h @@ -0,0 +1,27 @@ +/* + * QEMU TCG support + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_CTYPE_H +#define QEMU_CTYPE_H + +#define qemu_isalnum(c) isalnum((unsigned char)(c)) +#define qemu_isalpha(c) isalpha((unsigned char)(c)) +#define qemu_iscntrl(c) iscntrl((unsigned char)(c)) +#define qemu_isdigit(c) isdigit((unsigned char)(c)) +#define qemu_isgraph(c) isgraph((unsigned char)(c)) +#define qemu_islower(c) islower((unsigned char)(c)) +#define qemu_isprint(c) isprint((unsigned char)(c)) +#define qemu_ispunct(c) ispunct((unsigned char)(c)) +#define qemu_isspace(c) isspace((unsigned char)(c)) +#define qemu_isupper(c) isupper((unsigned char)(c)) +#define qemu_isxdigit(c) isxdigit((unsigned char)(c)) +#define qemu_tolower(c) tolower((unsigned char)(c)) +#define qemu_toupper(c) toupper((unsigned char)(c)) +#define qemu_isascii(c) isascii((unsigned char)(c)) +#define qemu_toascii(c) toascii((unsigned char)(c)) + +#endif diff --git a/qemu/include/qemu/cutils.h b/qemu/include/qemu/cutils.h new file mode 100644 index 00000000..732c61a9 --- /dev/null +++ b/qemu/include/qemu/cutils.h @@ -0,0 +1,41 @@ +#ifndef QEMU_CUTILS_H +#define QEMU_CUTILS_H + +/** + * pstrcpy: + * @buf: buffer to copy string into + * @buf_size: size of @buf in bytes + * @str: string to copy + * + * Copy @str into @buf, including the trailing NUL, but do not + * write more than @buf_size bytes. The resulting buffer is + * always NUL terminated (even if the source string was too long). + * If @buf_size is zero or negative then no bytes are copied. + * + * This function is similar to strncpy(), but avoids two of that + * function's problems: + * * if @str fits in the buffer, pstrcpy() does not zero-fill the + * remaining space at the end of @buf + * * if @str is too long, pstrcpy() will copy the first @buf_size-1 + * bytes and then add a NUL + */ +void pstrcpy(char *buf, int buf_size, const char *str); +/** + * pstrcat: + * @buf: buffer containing existing string + * @buf_size: size of @buf in bytes + * @s: string to concatenate to @buf + * + * Append a copy of @s to the string already in @buf, but do not + * allow the buffer to overflow. If the existing contents of @buf + * plus @str would total more than @buf_size bytes, then write + * as much of @str as will fit followed by a NUL terminator. + * + * @buf must already contain a NUL-terminated string, or the + * behaviour is undefined. + * + * Returns: @buf. + */ +char *pstrcat(char *buf, int buf_size, const char *s); + +#endif diff --git a/qemu/include/qemu/guest-random.h b/qemu/include/qemu/guest-random.h new file mode 100644 index 00000000..1d720059 --- /dev/null +++ b/qemu/include/qemu/guest-random.h @@ -0,0 +1,56 @@ +/* + * QEMU guest-visible random functions + * + * Copyright 2019 Linaro, Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef QEMU_GUEST_RANDOM_H +#define QEMU_GUEST_RANDOM_H + +/** + * qemu_guest_random_seed_thread_part1(void) + * + * If qemu_getrandom is in deterministic mode, returns an + * independent seed for the new thread. Otherwise returns 0. + */ +uint64_t qemu_guest_random_seed_thread_part1(void); + +/** + * qemu_guest_random_seed_thread_part2(uint64_t seed) + * @seed: a value for the new thread. + * + * If qemu_guest_getrandom is in deterministic mode, this stores an + * independent seed for the new thread. Otherwise a no-op. + */ +void qemu_guest_random_seed_thread_part2(uint64_t seed); + +/** + * qemu_guest_getrandom(void *buf, size_t len, Error **errp) + * @buf: a buffer of bytes to be written + * @len: the number of bytes in @buf + * @errp: an error indicator + * + * Fills len bytes in buf with random data. This should only be used + * for data presented to the guest. Host-side crypto services should + * use qcrypto_random_bytes. + * + * Returns 0 on success, < 0 on failure while setting *errp. + */ +int qemu_guest_getrandom(void *buf, size_t len); + +/** + * qemu_guest_getrandom_nofail(void *buf, size_t len) + * @buf: a buffer of bytes to be written + * @len: the number of bytes in @buf + * + * Like qemu_guest_getrandom, but will assert for failure. + * Use this when there is no reasonable recovery. + */ +void qemu_guest_getrandom_nofail(void *buf, size_t len); + +#endif /* QEMU_GUEST_RANDOM_H */ diff --git a/qemu/include/qemu/host-utils.h b/qemu/include/qemu/host-utils.h index f6157cfd..2939025f 100644 --- a/qemu/include/qemu/host-utils.h +++ b/qemu/include/qemu/host-utils.h @@ -22,11 +22,12 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef HOST_UTILS_H -#define HOST_UTILS_H 1 -#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */ -#include +#ifndef HOST_UTILS_H +#define HOST_UTILS_H + +#include "qemu/bswap.h" +#include "qemu/int128.h" #ifdef CONFIG_INT128 static inline void mulu64(uint64_t *plow, uint64_t *phigh, @@ -45,6 +46,12 @@ static inline void muls64(uint64_t *plow, uint64_t *phigh, *phigh = r >> 64; } +/* compute with 96 bit intermediate result: (a*b)/c */ +static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) +{ + return (__int128_t)a * b / c; +} + static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) { if (divisor == 0) { @@ -75,6 +82,29 @@ void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b); void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b); int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); int divs128(int64_t *plow, int64_t *phigh, int64_t divisor); + +static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) +{ + union { + uint64_t ll; + struct { +#ifdef HOST_WORDS_BIGENDIAN + uint32_t high, low; +#else + uint32_t low, high; +#endif + } l; + } u, res; + uint64_t rl, rh; + + u.ll = a; + rl = (uint64_t)u.l.low * (uint64_t)b; + rh = (uint64_t)u.l.high * (uint64_t)b; + rh += (rl >> 32); + res.l.high = rh / c; + res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; + return res.ll; +} #endif /** @@ -86,7 +116,7 @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor); */ static inline int clz32(uint32_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return val ? __builtin_clz(val) : 32; #else /* Binary search for the leading one bit. */ @@ -139,7 +169,7 @@ static inline int clo32(uint32_t val) */ static inline int clz64(uint64_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return val ? __builtin_clzll(val) : 64; #else int cnt = 0; @@ -174,7 +204,7 @@ static inline int clo64(uint64_t val) */ static inline int ctz32(uint32_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return val ? __builtin_ctz(val) : 32; #else /* Binary search for the trailing one bit. */ @@ -229,7 +259,7 @@ static inline int cto32(uint32_t val) */ static inline int ctz64(uint64_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return val ? __builtin_ctzll(val) : 64; #else int cnt; @@ -264,7 +294,7 @@ static inline int cto64(uint64_t val) */ static inline int clrsb32(uint32_t val) { -#if QEMU_GNUC_PREREQ(4, 7) +#if !defined(_MSC_VER) && !defined(__clang__) return __builtin_clrsb(val); #else return clz32(val ^ ((int32_t)val >> 1)) - 1; @@ -280,7 +310,7 @@ static inline int clrsb32(uint32_t val) */ static inline int clrsb64(uint64_t val) { -#if QEMU_GNUC_PREREQ(4, 7) +#if !defined(_MSC_VER) && !defined(__clang__) return __builtin_clrsbll(val); #else return clz64(val ^ ((int64_t)val >> 1)) - 1; @@ -293,7 +323,7 @@ static inline int clrsb64(uint64_t val) */ static inline int ctpop8(uint8_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return __builtin_popcount(val); #else val = (val & 0x55) + ((val >> 1) & 0x55); @@ -310,7 +340,7 @@ static inline int ctpop8(uint8_t val) */ static inline int ctpop16(uint16_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return __builtin_popcount(val); #else val = (val & 0x5555) + ((val >> 1) & 0x5555); @@ -328,7 +358,7 @@ static inline int ctpop16(uint16_t val) */ static inline int ctpop32(uint32_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return __builtin_popcount(val); #else val = (val & 0x55555555) + ((val >> 1) & 0x55555555); @@ -347,7 +377,7 @@ static inline int ctpop32(uint32_t val) */ static inline int ctpop64(uint64_t val) { -#if QEMU_GNUC_PREREQ(3, 4) +#ifndef _MSC_VER return __builtin_popcountll(val); #else val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); @@ -361,6 +391,80 @@ static inline int ctpop64(uint64_t val) #endif } +/** + * revbit8 - reverse the bits in an 8-bit value. + * @x: The value to modify. + */ +static inline uint8_t revbit8(uint8_t x) +{ + /* Assign the correct nibble position. */ + x = ((x & 0xf0) >> 4) + | ((x & 0x0f) << 4); + /* Assign the correct bit position. */ + x = ((x & 0x88) >> 3) + | ((x & 0x44) >> 1) + | ((x & 0x22) << 1) + | ((x & 0x11) << 3); + return x; +} + +/** + * revbit16 - reverse the bits in a 16-bit value. + * @x: The value to modify. + */ +static inline uint16_t revbit16(uint16_t x) +{ + /* Assign the correct byte position. */ + x = bswap16(x); + /* Assign the correct nibble position. */ + x = ((x & 0xf0f0) >> 4) + | ((x & 0x0f0f) << 4); + /* Assign the correct bit position. */ + x = ((x & 0x8888) >> 3) + | ((x & 0x4444) >> 1) + | ((x & 0x2222) << 1) + | ((x & 0x1111) << 3); + return x; +} + +/** + * revbit32 - reverse the bits in a 32-bit value. + * @x: The value to modify. + */ +static inline uint32_t revbit32(uint32_t x) +{ + /* Assign the correct byte position. */ + x = bswap32(x); + /* Assign the correct nibble position. */ + x = ((x & 0xf0f0f0f0u) >> 4) + | ((x & 0x0f0f0f0fu) << 4); + /* Assign the correct bit position. */ + x = ((x & 0x88888888u) >> 3) + | ((x & 0x44444444u) >> 1) + | ((x & 0x22222222u) << 1) + | ((x & 0x11111111u) << 3); + return x; +} + +/** + * revbit64 - reverse the bits in a 64-bit value. + * @x: The value to modify. + */ +static inline uint64_t revbit64(uint64_t x) +{ + /* Assign the correct byte position. */ + x = bswap64(x); + /* Assign the correct nibble position. */ + x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4) + | ((x & 0x0f0f0f0f0f0f0f0full) << 4); + /* Assign the correct bit position. */ + x = ((x & 0x8888888888888888ull) >> 3) + | ((x & 0x4444444444444444ull) >> 1) + | ((x & 0x2222222222222222ull) << 1) + | ((x & 0x1111111111111111ull) << 3); + return x; +} + /* Host type specific sizes of these routines. */ #if ULONG_MAX == UINT32_MAX @@ -369,14 +473,93 @@ static inline int ctpop64(uint64_t val) # define clol clo32 # define ctol cto32 # define ctpopl ctpop32 +# define revbitl revbit32 #elif ULONG_MAX == UINT64_MAX # define clzl clz64 # define ctzl ctz64 # define clol clo64 # define ctol cto64 # define ctpopl ctpop64 +# define revbitl revbit64 #else # error Unknown sizeof long #endif +static inline bool is_power_of_2(uint64_t value) +{ + if (!value) { + return false; + } + + return !(value & (value - 1)); +} + +/** + * Return @value rounded down to the nearest power of two or zero. + */ +static inline uint64_t pow2floor(uint64_t value) +{ + if (!value) { + /* Avoid undefined shift by 64 */ + return 0; + } + return 0x8000000000000000ull >> clz64(value); +} + +/* + * Return @value rounded up to the nearest power of two modulo 2^64. + * This is *zero* for @value > 2^63, so be careful. + */ +static inline uint64_t pow2ceil(uint64_t value) +{ + int n = clz64(value - 1); + + if (!n) { + /* + * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63 + * Therefore, either @value == 0 or @value > 2^63. + * If it's 0, return 1, else return 0. + */ + return !value; + } + return 0x8000000000000000ull >> (n - 1); +} + +static inline uint32_t pow2roundup32(uint32_t x) +{ + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + return x + 1; +} + +/** + * urshift - 128-bit Unsigned Right Shift. + * @plow: in/out - lower 64-bit integer. + * @phigh: in/out - higher 64-bit integer. + * @shift: in - bytes to shift, between 0 and 127. + * + * Result is zero-extended and stored in plow/phigh, which are + * input/output variables. Shift values outside the range will + * be mod to 128. In other words, the caller is responsible to + * verify/assert both the shift range and plow/phigh pointers. + */ +void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift); + +/** + * ulshift - 128-bit Unsigned Left Shift. + * @plow: in/out - lower 64-bit integer. + * @phigh: in/out - higher 64-bit integer. + * @shift: in - bytes to shift, between 0 and 127. + * @overflow: out - true if any 1-bit is shifted out. + * + * Result is zero-extended and stored in plow/phigh, which are + * input/output variables. Shift values outside the range will + * be mod to 128. In other words, the caller is responsible to + * verify/assert both the shift range and plow/phigh pointers. + */ +void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow); + #endif diff --git a/qemu/include/qemu/int128.h b/qemu/include/qemu/int128.h index f4352100..d32511a6 100644 --- a/qemu/include/qemu/int128.h +++ b/qemu/include/qemu/int128.h @@ -1,10 +1,152 @@ #ifndef INT128_H #define INT128_H -//#include -#include "unicorn/platform.h" +#include "qemu/bswap.h" +#ifdef CONFIG_INT128 + +typedef __int128_t Int128; + +static inline Int128 int128_make64(uint64_t a) +{ + return a; +} + +static inline Int128 int128_make128(uint64_t lo, uint64_t hi) +{ + return (__uint128_t)hi << 64 | lo; +} + +static inline uint64_t int128_get64(Int128 a) +{ + uint64_t r = a; + assert(r == a); + return r; +} + +static inline uint64_t int128_getlo(Int128 a) +{ + return a; +} + +static inline int64_t int128_gethi(Int128 a) +{ + return a >> 64; +} + +static inline Int128 int128_zero(void) +{ + return 0; +} + +static inline Int128 int128_one(void) +{ + return 1; +} + +static inline Int128 int128_2_64(void) +{ + return (Int128)1 << 64; +} + +static inline Int128 int128_exts64(int64_t a) +{ + return a; +} + +static inline Int128 int128_and(Int128 a, Int128 b) +{ + return a & b; +} + +static inline Int128 int128_rshift(Int128 a, int n) +{ + return a >> n; +} + +static inline Int128 int128_add(Int128 a, Int128 b) +{ + return a + b; +} + +static inline Int128 int128_neg(Int128 a) +{ + return -a; +} + +static inline Int128 int128_sub(Int128 a, Int128 b) +{ + return a - b; +} + +static inline bool int128_nonneg(Int128 a) +{ + return a >= 0; +} + +static inline bool int128_eq(Int128 a, Int128 b) +{ + return a == b; +} + +static inline bool int128_ne(Int128 a, Int128 b) +{ + return a != b; +} + +static inline bool int128_ge(Int128 a, Int128 b) +{ + return a >= b; +} + +static inline bool int128_lt(Int128 a, Int128 b) +{ + return a < b; +} + +static inline bool int128_le(Int128 a, Int128 b) +{ + return a <= b; +} + +static inline bool int128_gt(Int128 a, Int128 b) +{ + return a > b; +} + +static inline bool int128_nz(Int128 a) +{ + return a != 0; +} + +static inline Int128 int128_min(Int128 a, Int128 b) +{ + return a < b ? a : b; +} + +static inline Int128 int128_max(Int128 a, Int128 b) +{ + return a > b ? a : b; +} + +static inline void int128_addto(Int128 *a, Int128 b) +{ + *a += b; +} + +static inline void int128_subfrom(Int128 *a, Int128 b) +{ + *a -= b; +} + +static inline Int128 bswap128(Int128 a) +{ + return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a))); +} + +#else /* !CONFIG_INT128 */ typedef struct Int128 Int128; +typedef Int128 __int128_t; struct Int128 { uint64_t lo; @@ -13,16 +155,30 @@ struct Int128 { static inline Int128 int128_make64(uint64_t a) { - Int128 i128 = { a, 0 }; - return i128; + return (Int128) { a, 0 }; +} + +static inline Int128 int128_make128(uint64_t lo, uint64_t hi) +{ + return (Int128) { lo, hi }; } static inline uint64_t int128_get64(Int128 a) { - //assert(!a.hi); + assert(!a.hi); return a.lo; } +static inline uint64_t int128_getlo(Int128 a) +{ + return a.lo; +} + +static inline int64_t int128_gethi(Int128 a) +{ + return a.hi; +} + static inline Int128 int128_zero(void) { return int128_make64(0); @@ -35,20 +191,17 @@ static inline Int128 int128_one(void) static inline Int128 int128_2_64(void) { - Int128 i128 = { 0, 1 }; - return i128; + return (Int128) { 0, 1 }; } static inline Int128 int128_exts64(int64_t a) { - Int128 i128 = { a, (a < 0) ? -1 : 0 }; - return i128; + return (Int128) { .lo = a, .hi = (a < 0) ? -1 : 0 }; } static inline Int128 int128_and(Int128 a, Int128 b) { - Int128 i128 = { a.lo & b.lo, a.hi & b.hi }; - return i128; + return (Int128) { a.lo & b.lo, a.hi & b.hi }; } static inline Int128 int128_rshift(Int128 a, int n) @@ -59,11 +212,9 @@ static inline Int128 int128_rshift(Int128 a, int n) } h = a.hi >> (n & 63); if (n >= 64) { - Int128 i128 = { h, h >> 63 }; - return i128; + return int128_make128(h, h >> 63); } else { - Int128 i128 = { (a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h }; - return i128; + return int128_make128((a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h); } } @@ -77,21 +228,23 @@ static inline Int128 int128_add(Int128 a, Int128 b) * * So the carry is lo < a.lo. */ - Int128 i128 = { lo, (uint64_t)a.hi + b.hi + (lo < a.lo) }; - return i128; + return int128_make128(lo, (uint64_t)a.hi + b.hi + (lo < a.lo)); } static inline Int128 int128_neg(Int128 a) { - uint64_t lo = 0-a.lo; - Int128 i128 = { lo, ~(uint64_t)a.hi + !lo }; - return i128; +#ifdef _MSC_VER + uint64_t lo = a.lo; + lo = 0 - lo; +#else + uint64_t lo = (uint64_t)(-a.lo); +#endif + return int128_make128(lo, ~(uint64_t)a.hi + !lo); } static inline Int128 int128_sub(Int128 a, Int128 b) { - Int128 i128 = { a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo) }; - return i128; + return int128_make128(a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo)); } static inline bool int128_nonneg(Int128 a) @@ -154,4 +307,10 @@ static inline void int128_subfrom(Int128 *a, Int128 b) *a = int128_sub(*a, b); } -#endif +static inline Int128 bswap128(Int128 a) +{ + return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a))); +} + +#endif /* CONFIG_INT128 */ +#endif /* INT128_H */ diff --git a/qemu/include/qemu/log.h b/qemu/include/qemu/log.h index 6f9ccdf7..ab8e0f0a 100644 --- a/qemu/include/qemu/log.h +++ b/qemu/include/qemu/log.h @@ -1,29 +1,6 @@ #ifndef QEMU_LOG_H #define QEMU_LOG_H -#include -#include "unicorn/platform.h" -#include "qemu/compiler.h" -#include "qom/cpu.h" - -/* Private global variables, don't use */ -extern FILE *qemu_logfile; -extern int qemu_loglevel; - -/* - * The new API: - * - */ - -/* Log settings checking macros: */ - -/* Returns true if qemu_log() will really write somewhere - */ -static inline bool qemu_log_enabled(void) -{ - return qemu_logfile != NULL; -} - #define CPU_LOG_TB_OUT_ASM (1 << 0) #define CPU_LOG_TB_IN_ASM (1 << 1) #define CPU_LOG_TB_OP (1 << 2) @@ -31,87 +8,42 @@ static inline bool qemu_log_enabled(void) #define CPU_LOG_INT (1 << 4) #define CPU_LOG_EXEC (1 << 5) #define CPU_LOG_PCALL (1 << 6) -#define CPU_LOG_IOPORT (1 << 7) #define CPU_LOG_TB_CPU (1 << 8) #define CPU_LOG_RESET (1 << 9) #define LOG_UNIMP (1 << 10) #define LOG_GUEST_ERROR (1 << 11) +#define CPU_LOG_MMU (1 << 12) +#define CPU_LOG_TB_NOCHAIN (1 << 13) +#define CPU_LOG_PAGE (1 << 14) +/* LOG_TRACE (1 << 15) is defined in log-for-trace.h */ +#define CPU_LOG_TB_OP_IND (1 << 16) +#define CPU_LOG_TB_FPU (1 << 17) +#define CPU_LOG_PLUGIN (1 << 18) +/* LOG_STRACE is used for user-mode strace logging. */ +#define LOG_STRACE (1 << 19) -/* Returns true if a bit is set in the current loglevel mask +/* Lock output for a series of related logs. Since this is not needed + * for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we + * assume that qemu_loglevel_mask has already been tested, and that + * qemu_loglevel is never set when qemu_logfile is unset. */ -static inline bool qemu_loglevel_mask(int mask) -{ - return (qemu_loglevel & mask) != 0; -} /* Logging functions: */ -/* main logging function +/* log only if a bit is set on the current loglevel mask: + * @mask: bit to check in the mask + * @fmt: printf-style format string + * @args: optional arguments for format string */ -void GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...); - -/* vfprintf-like logging function - */ -static inline void GCC_FMT_ATTR(1, 0) -qemu_log_vprintf(const char *fmt, va_list va) -{ - if (qemu_logfile) { - vfprintf(qemu_logfile, fmt, va); - } -} +#define qemu_log_mask(MASK, FMT, ...) /* log only if a bit is set on the current loglevel mask + * and we are in the address range we care about: + * @mask: bit to check in the mask + * @addr: address to check in dfilter + * @fmt: printf-style format string + * @args: optional arguments for format string */ -void GCC_FMT_ATTR(2, 3) qemu_log_mask(int mask, const char *fmt, ...); - - -/* Special cases: */ - -/* cpu_dump_state() logging functions: */ -/** - * log_cpu_state: - * @cpu: The CPU whose state is to be logged. - * @flags: Flags what to log. - * - * Logs the output of cpu_dump_state(). - */ -static inline void log_cpu_state(CPUState *cpu, int flags) -{ - if (qemu_log_enabled()) { - cpu_dump_state(cpu, qemu_logfile, fprintf, flags); - } -} - -/** - * log_cpu_state_mask: - * @mask: Mask when to log. - * @cpu: The CPU whose state is to be logged. - * @flags: Flags what to log. - * - * Logs the output of cpu_dump_state() if loglevel includes @mask. - */ -static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags) -{ - if (qemu_loglevel & mask) { - log_cpu_state(cpu, flags); - } -} - -/* fflush() the log file */ -static inline void qemu_log_flush(void) -{ - fflush(qemu_logfile); -} - -/* Close the log file */ -static inline void qemu_log_close(void) -{ - if (qemu_logfile) { - if (qemu_logfile != stderr) { - fclose(qemu_logfile); - } - qemu_logfile = NULL; - } -} +#define qemu_log_mask_and_addr(MASK, ADDR, FMT, ...) #endif diff --git a/qemu/include/qemu/module.h b/qemu/include/qemu/module.h deleted file mode 100644 index 01b9fe58..00000000 --- a/qemu/include/qemu/module.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * QEMU Module Infrastructure - * - * Copyright IBM, Corp. 2009 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_MODULE_H -#define QEMU_MODULE_H - -#include "qemu/osdep.h" - -typedef enum { - MODULE_INIT_MACHINE, - MODULE_INIT_QOM, - MODULE_INIT_MAX -} module_init_type; - -#define machine_init(function) module_init(function, MODULE_INIT_MACHINE) -#define type_init(function) module_init(function, MODULE_INIT_QOM) - -void module_call_init(struct uc_struct *uc, module_init_type type); - -#endif diff --git a/qemu/include/qemu/osdep.h b/qemu/include/qemu/osdep.h index 5387816d..dc9e655a 100644 --- a/qemu/include/qemu/osdep.h +++ b/qemu/include/qemu/osdep.h @@ -1,11 +1,101 @@ +/* + * OS includes and handling of OS dependencies + * + * This header exists to pull in some common system headers that + * most code in QEMU will want, and to fix up some possible issues with + * it (missing defines, Windows weirdness, and so on). + * + * To avoid getting into possible circular include dependencies, this + * file should not include any other QEMU headers, with the exceptions + * of config-host.h, config-target.h, qemu/compiler.h, + * sysemu/os-posix.h, sysemu/os-win32.h, glib-compat.h and + * qemu/typedefs.h, all of which are doing a similar job to this file + * and are under similar constraints. + * + * This header also contains prototypes for functions defined in + * os-*.c and util/oslib-*.c; those would probably be better split + * out into separate header files. + * + * In an ideal world this header would contain only: + * (1) things which everybody needs + * (2) things without which code would work on most platforms but + * fail to compile or misbehave on a minority of host OSes + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ #ifndef QEMU_OSDEP_H #define QEMU_OSDEP_H #include "config-host.h" +#ifdef NEED_CPU_H +#include "config-target.h" +#else +#include "exec/poison.h" +#endif + +#include "qemu/compiler.h" + +struct uc_struct; + + +/* Older versions of C++ don't get definitions of various macros from + * stdlib.h unless we define these macros before first inclusion of + * that system header. + */ +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#ifdef _WIN32 +/* as defined in sdkddkver.h */ +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0600 /* Vista */ +#endif +/* reduces the number of implicitly included headers */ +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#endif + +/* enable C99/POSIX format strings (needs mingw32-runtime 3.15 or later) */ +#ifdef __MINGW32__ +#ifndef __USE_MINGW_ANSI_STDIO +#define __USE_MINGW_ANSI_STDIO 1 +#endif // __USE_MINGW_ANSI_STDIO +#endif + #include #include -#include "unicorn/platform.h" +#include +#include #include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +/* setjmp must be declared before sysemu/os-win32.h + * because it is redefined there. */ +#include +#include + #ifdef __OpenBSD__ #include #endif @@ -17,52 +107,153 @@ #define WEXITSTATUS(x) (x) #endif -#if defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10 -/* [u]int_fast*_t not in */ -typedef unsigned char uint_fast8_t; -typedef unsigned int uint_fast16_t; -typedef signed int int_fast16_t; +#ifdef _WIN32 +#include "sysemu/os-win32.h" #endif -#ifndef glue -#define xglue(x, y) x ## y -#define glue(x, y) xglue(x, y) -#define stringify(s) tostring(s) -#define tostring(s) #s +#ifdef CONFIG_POSIX +#include "sys/mman.h" #endif -#ifndef likely -#if __GNUC__ < 3 -#define __builtin_expect(x, n) (x) +/* + * Only allow MAP_JIT for Mojave or later. + * + * Source: https://github.com/moby/hyperkit/pull/259/files#diff-e6b5417230ff2daff9155d9b15aefae12e89410ec2dca1f59d04be511f6737fcR41 + */ +#if defined(__APPLE__) + #if defined(HAVE_PTHREAD_JIT_PROTECT) + #define USE_MAP_JIT + #else + #include + #ifdef __MAC_OS_X_VERSION_MIN_REQUIRED + #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 101400 && defined(MAP_JIT) + #define USE_MAP_JIT + #endif + #endif + #endif #endif -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) +#include +#include "qemu/typedefs.h" + + +/* Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default + * instead of QEMU_VERSION, so setting hw_version on MachineClass + * is no longer mandatory. + * + * Do NOT change this string, or it will break compatibility on all + * machine classes that don't set hw_version. + */ +#define QEMU_HW_VERSION "2.5+" + + +/* + * For mingw, as of v6.0.0, the function implementing the assert macro is + * not marked as noreturn, so the compiler cannot delete code following an + * assert(false) as unused. We rely on this within the code base to delete + * code that is unreachable when features are disabled. + * All supported versions of Glib's g_assert() satisfy this requirement. + */ +#ifdef __MINGW32__ +#undef assert +#define assert(x) g_assert(x) #endif -#ifndef container_of -#ifndef _MSC_VER -#define container_of(ptr, type, member) ({ \ - const typeof(((type *) 0)->member) *__mptr = (ptr); \ - (type *) ((char *) __mptr - offsetof(type, member));}) +/* + * According to waitpid man page: + * WCOREDUMP + * This macro is not specified in POSIX.1-2001 and is not + * available on some UNIX implementations (e.g., AIX, SunOS). + * Therefore, enclose its use inside #ifdef WCOREDUMP ... #endif. + */ +#ifndef WCOREDUMP +#define WCOREDUMP(status) 0 +#endif +/* + * We have a lot of unaudited code that may fail in strange ways, or + * even be a security risk during migration, if you disable assertions + * at compile-time. You may comment out these safety checks if you + * absolutely want to disable assertion overhead, but it is not + * supported upstream so the risk is all yours. Meanwhile, please + * submit patches to remove any side-effects inside an assertion, or + * fixing error handling that should use Error instead of assert. + */ +#ifdef G_DISABLE_ASSERT +#error building with G_DISABLE_ASSERT is not supported +#endif + +#ifndef O_LARGEFILE +#define O_LARGEFILE 0 +#endif +#ifndef O_BINARY +#define O_BINARY 0 +#endif +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +#ifndef ENOMEDIUM +#define ENOMEDIUM ENODEV +#endif +#if !defined(ENOTSUP) +#define ENOTSUP 4096 +#endif +#if !defined(ECANCELED) +#define ECANCELED 4097 +#endif +#if !defined(EMEDIUMTYPE) +#define EMEDIUMTYPE 4098 +#endif +#if !defined(ESHUTDOWN) +#define ESHUTDOWN 4099 +#endif + +/* time_t may be either 32 or 64 bits depending on the host OS, and + * can be either signed or unsigned, so we can't just hardcode a + * specific maximum value. This is not a C preprocessor constant, + * so you can't use TIME_MAX in an #ifdef, but for our purposes + * this isn't a problem. + */ + +/* The macros TYPE_SIGNED, TYPE_WIDTH, and TYPE_MAXIMUM are from + * Gnulib, and are under the LGPL v2.1 or (at your option) any + * later version. + */ + +/* True if the real type T is signed. */ +#define TYPE_SIGNED(t) (!((t)0 < (t)-1)) + +/* The width in bits of the integer type or expression T. + * Padding bits are not supported. + */ +#define TYPE_WIDTH(t) (sizeof(t) * CHAR_BIT) + +/* The maximum and minimum values for the integer type T. */ +#define TYPE_MAXIMUM(t) \ + ((t) (!TYPE_SIGNED(t) \ + ? (t)-1 \ + : ((((t)1 << (TYPE_WIDTH(t) - 2)) - 1) * 2 + 1))) + +#ifndef TIME_MAX +#define TIME_MAX TYPE_MAXIMUM(time_t) +#endif + +/* HOST_LONG_BITS is the size of a native pointer in bits. */ +#if UINTPTR_MAX == UINT32_MAX +# define HOST_LONG_BITS 32 +#elif UINTPTR_MAX == UINT64_MAX +# define HOST_LONG_BITS 64 #else -#define container_of(ptr, type, member) ((type *)((char *)(ptr) -offsetof(type,member))) -#endif +# error Unknown pointer size #endif -/* Convert from a base type to a parent type, with compile time checking. */ -#ifdef __GNUC__ -#define DO_UPCAST(type, field, dev) ( __extension__ ( { \ - char QEMU_UNUSED_VAR offset_must_be_zero[ \ - -offsetof(type, field)]; \ - container_of(dev, type, field);})) -#else -#define DO_UPCAST(type, field, dev) container_of(dev, type, field) +/* Mac OSX has a bug that incorrectly defines SIZE_MAX with + * the wrong type. Our replacement isn't usable in preprocessor + * expressions, but it is sufficient for our needs. */ +#if defined(HAVE_BROKEN_SIZE_MAX) && HAVE_BROKEN_SIZE_MAX +#undef SIZE_MAX +#define SIZE_MAX ((size_t)-1) #endif -#define typeof_field(type, field) typeof(((type *)0)->field) -#define type_check(t1,t2) ((t1*)0 - (t2*)0) - #ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif @@ -73,40 +264,215 @@ typedef signed int int_fast16_t; /* Minimum function that returns zero only iff both values are zero. * Intended for use with unsigned values only. */ #ifndef MIN_NON_ZERO -#define MIN_NON_ZERO(a, b) (((a) != 0 && (a) < (b)) ? (a) : (b)) +#define MIN_NON_ZERO(a, b) ((a) == 0 ? (b) : \ + ((b) == 0 ? (a) : (MIN(a, b)))) #endif +/* Round number down to multiple */ +#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m)) + +/* Round number up to multiple. Safe when m is not a power of 2 (see + * ROUND_UP for a faster version when a power of 2 is guaranteed) */ +#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m)) + +/* Check if n is a multiple of m */ +#define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0) + +/* n-byte align pointer down */ +#ifdef _MSC_VER +#define QEMU_ALIGN_PTR_DOWN(p, n) (QEMU_ALIGN_DOWN((uintptr_t)(p), (n))) +#else +#define QEMU_ALIGN_PTR_DOWN(p, n) ((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n))) +#endif + +/* n-byte align pointer up */ +#ifndef _MSC_VER +#define QEMU_ALIGN_PTR_UP(p, n) ((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n))) +#else +#define QEMU_ALIGN_PTR_UP(p, n) QEMU_ALIGN_UP((uintptr_t)(p), (n)) +#endif + +/* Check if pointer p is n-bytes aligned */ +#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n)) + +/* Round number up to multiple. Requires that d be a power of 2 (see + * QEMU_ALIGN_UP for a safer but slower version on arbitrary + * numbers); works even if d is a smaller type than n. */ #ifndef ROUND_UP -#define ROUND_UP(n,d) (((n) + (d) - 1) & -(d)) +#ifdef _MSC_VER +#define ROUND_UP(n, d) (((n) + (d) - 1) & (0 - (0 ? (n) : (d)))) +#else +#define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d))) +#endif #endif #ifndef DIV_ROUND_UP -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif +/* + * &(x)[0] is always a pointer - if it's same type as x then the argument is a + * pointer, not an array. + */ +#define QEMU_IS_ARRAY(x) (!__builtin_types_compatible_p(typeof(x), \ + typeof(&(x)[0]))) #ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - -#ifndef always_inline -#if !((__GNUC__ < 3) || defined(__APPLE__)) -#ifdef __OPTIMIZE__ -#undef inline -#define inline __attribute__ (( always_inline )) __inline__ -#endif -#endif +#ifndef _MSC_VER +#define ARRAY_SIZE(x) ((sizeof(x) / sizeof((x)[0])) + \ + QEMU_BUILD_BUG_ON_ZERO(!QEMU_IS_ARRAY(x))) #else -#undef inline -#define inline always_inline +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) +#endif #endif - -#define qemu_printf printf void *qemu_try_memalign(size_t alignment, size_t size); void *qemu_memalign(size_t alignment, size_t size); -void *qemu_anon_ram_alloc(size_t size, uint64_t *align); +void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *align); void qemu_vfree(void *ptr); -void qemu_anon_ram_free(void *ptr, size_t size); +void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size); + +#define QEMU_MADV_INVALID -1 + +#if defined(CONFIG_MADVISE) + +#define QEMU_MADV_WILLNEED MADV_WILLNEED +#define QEMU_MADV_DONTNEED MADV_DONTNEED +#ifdef MADV_DONTFORK +#define QEMU_MADV_DONTFORK MADV_DONTFORK +#else +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#endif +#ifdef MADV_MERGEABLE +#define QEMU_MADV_MERGEABLE MADV_MERGEABLE +#else +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#endif +#ifdef MADV_UNMERGEABLE +#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE +#else +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#endif +#ifdef MADV_DODUMP +#define QEMU_MADV_DODUMP MADV_DODUMP +#else +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#endif +#ifdef MADV_DONTDUMP +#define QEMU_MADV_DONTDUMP MADV_DONTDUMP +#else +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#endif +#ifdef MADV_HUGEPAGE +#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE +#else +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#endif +#ifdef MADV_NOHUGEPAGE +#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE +#else +#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID +#endif +#ifdef MADV_REMOVE +#define QEMU_MADV_REMOVE MADV_REMOVE +#else +#define QEMU_MADV_REMOVE QEMU_MADV_INVALID +#endif + +#elif defined(CONFIG_POSIX_MADVISE) + +#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED +#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_REMOVE QEMU_MADV_INVALID + +#else /* no-op */ + +#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID +#define QEMU_MADV_REMOVE QEMU_MADV_INVALID + +#endif + +#ifdef _WIN32 +#define HAVE_CHARDEV_SERIAL 1 +#elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \ + || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \ + || defined(__GLIBC__) +#define HAVE_CHARDEV_SERIAL 1 +#endif + +#if defined(__linux__) || defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) || defined(__DragonFly__) +#define HAVE_CHARDEV_PARPORT 1 +#endif + +#if defined(CONFIG_LINUX) +#ifndef BUS_MCEERR_AR +#define BUS_MCEERR_AR 4 +#endif +#ifndef BUS_MCEERR_AO +#define BUS_MCEERR_AO 5 +#endif +#endif + +#if defined(__linux__) && \ + (defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) \ + || defined(__powerpc64__)) + /* Use 2 MiB alignment so transparent hugepages can be used by KVM. + Valgrind does not support alignments larger than 1 MiB, + therefore we need special code which handles running on Valgrind. */ +# define QEMU_VMALLOC_ALIGN (512 * 4096) +#elif defined(__linux__) && defined(__s390x__) + /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ +# define QEMU_VMALLOC_ALIGN (256 * 4096) +#elif defined(__linux__) && defined(__sparc__) +#include +# define QEMU_VMALLOC_ALIGN MAX(uc->qemu_real_host_page_size, SHMLBA) +#else +# define QEMU_VMALLOC_ALIGN uc->qemu_real_host_page_size +#endif + +#ifdef CONFIG_POSIX +struct qemu_signalfd_siginfo { + uint32_t ssi_signo; /* Signal number */ + int32_t ssi_errno; /* Error number (unused) */ + int32_t ssi_code; /* Signal code */ + uint32_t ssi_pid; /* PID of sender */ + uint32_t ssi_uid; /* Real UID of sender */ + int32_t ssi_fd; /* File descriptor (SIGIO) */ + uint32_t ssi_tid; /* Kernel timer ID (POSIX timers) */ + uint32_t ssi_band; /* Band event (SIGIO) */ + uint32_t ssi_overrun; /* POSIX timer overrun count */ + uint32_t ssi_trapno; /* Trap number that caused signal */ + int32_t ssi_status; /* Exit status or signal (SIGCHLD) */ + int32_t ssi_int; /* Integer sent by sigqueue(2) */ + uint64_t ssi_ptr; /* Pointer sent by sigqueue(2) */ + uint64_t ssi_utime; /* User CPU time consumed (SIGCHLD) */ + uint64_t ssi_stime; /* System CPU time consumed (SIGCHLD) */ + uint64_t ssi_addr; /* Address that generated signal + (for hardware-generated signals) */ + uint8_t pad[48]; /* Pad size to 128 bytes (allow for + additional fields in the future) */ +}; + +#endif + +int qemu_madvise(void *addr, size_t len, int advice); +int qemu_mprotect_rwx(void *addr, size_t size); +int qemu_mprotect_none(void *addr, size_t size); #if defined(__HAIKU__) && defined(__i386__) #define FMT_pid "%ld" @@ -116,6 +482,25 @@ void qemu_anon_ram_free(void *ptr, size_t size); #define FMT_pid "%d" #endif +int qemu_get_thread_id(void); + +#ifdef _WIN32 +static inline void qemu_timersub(const struct timeval *val1, + const struct timeval *val2, + struct timeval *res) +{ + res->tv_sec = val1->tv_sec - val2->tv_sec; + if (val1->tv_usec < val2->tv_usec) { + res->tv_sec--; + res->tv_usec = val1->tv_usec - val2->tv_usec + 1000 * 1000; + } else { + res->tv_usec = val1->tv_usec - val2->tv_usec; + } +} +#else +#define qemu_timersub timersub +#endif + /** * qemu_getauxval: * @type: the auxiliary vector key to lookup diff --git a/qemu/include/qemu/processor.h b/qemu/include/qemu/processor.h new file mode 100644 index 00000000..8e16c927 --- /dev/null +++ b/qemu/include/qemu/processor.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2016, Emilio G. Cota + * + * License: GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_PROCESSOR_H +#define QEMU_PROCESSOR_H + +#include "qemu/atomic.h" + +#if defined(__i386__) || defined(__x86_64__) +# define cpu_relax() asm volatile("rep; nop" ::: "memory") + +#elif defined(__aarch64__) +# define cpu_relax() asm volatile("yield" ::: "memory") + +#elif defined(__powerpc64__) +/* set Hardware Multi-Threading (HMT) priority to low; then back to medium */ +# define cpu_relax() asm volatile("or 1, 1, 1;" \ + "or 2, 2, 2;" ::: "memory") + +#else +# define cpu_relax() barrier() +#endif + +#endif /* QEMU_PROCESSOR_H */ diff --git a/qemu/include/qemu/qdist.h b/qemu/include/qemu/qdist.h new file mode 100644 index 00000000..79c1697e --- /dev/null +++ b/qemu/include/qemu/qdist.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2016, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_QDIST_H +#define QEMU_QDIST_H + +#include "qemu/bitops.h" + +/* + * Samples with the same 'x value' end up in the same qdist_entry, + * e.g. inc(0.1) and inc(0.1) end up as {x=0.1, count=2}. + * + * Binning happens only at print time, so that we retain the flexibility to + * choose the binning. This might not be ideal for workloads that do not care + * much about precision and insert many samples all with different x values; + * in that case, pre-binning (e.g. entering both 0.115 and 0.097 as 0.1) + * should be considered. + */ +struct qdist_entry { + double x; + unsigned long count; +}; + +struct qdist { + struct qdist_entry *entries; + size_t n; + size_t size; +}; + +#define QDIST_PR_BORDER BIT(0) +#define QDIST_PR_LABELS BIT(1) +/* the remaining options only work if PR_LABELS is set */ +#define QDIST_PR_NODECIMAL BIT(2) +#define QDIST_PR_PERCENT BIT(3) +#define QDIST_PR_100X BIT(4) +#define QDIST_PR_NOBINRANGE BIT(5) + +void qdist_init(struct qdist *dist); +void qdist_destroy(struct qdist *dist); + +void qdist_add(struct qdist *dist, double x, long count); +void qdist_inc(struct qdist *dist, double x); +double qdist_xmin(const struct qdist *dist); +double qdist_xmax(const struct qdist *dist); +double qdist_avg(const struct qdist *dist); +unsigned long qdist_sample_count(const struct qdist *dist); +size_t qdist_unique_entries(const struct qdist *dist); + +/* Only qdist code and test code should ever call this function */ +void qdist_bin__internal(struct qdist *to, const struct qdist *from, size_t n); + +#endif /* QEMU_QDIST_H */ diff --git a/qemu/include/qemu/qht.h b/qemu/include/qemu/qht.h new file mode 100644 index 00000000..7afd069e --- /dev/null +++ b/qemu/include/qemu/qht.h @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2016, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_QHT_H +#define QEMU_QHT_H + +// #include "qemu/seqlock.h" +#include "qemu/thread.h" +#include "qemu/qdist.h" + +struct uc_struct; + +typedef bool (*qht_cmp_func_t)(struct uc_struct *uc, const void *a, const void *b); + +struct qht { + struct qht_map *map; + qht_cmp_func_t cmp; + unsigned int mode; +}; + +/** + * struct qht_stats - Statistics of a QHT + * @head_buckets: number of head buckets + * @used_head_buckets: number of non-empty head buckets + * @entries: total number of entries + * @chain: frequency distribution representing the number of buckets in each + * chain, excluding empty chains. + * @occupancy: frequency distribution representing chain occupancy rate. + * Valid range: from 0.0 (empty) to 1.0 (full occupancy). + * + * An entry is a pointer-hash pair. + * Each bucket can host several entries. + * Chains are chains of buckets, whose first link is always a head bucket. + */ +struct qht_stats { + size_t head_buckets; + size_t used_head_buckets; + size_t entries; + struct qdist chain; + struct qdist occupancy; +}; + +typedef bool (*qht_lookup_func_t)(struct uc_struct *uc, const void *obj, const void *userp); +typedef void (*qht_iter_func_t)(struct uc_struct *uc, void *p, uint32_t h, void *up); +typedef bool (*qht_iter_bool_func_t)(void *p, uint32_t h, void *up); + +#define QHT_MODE_AUTO_RESIZE 0x1 /* auto-resize when heavily loaded */ +#define QHT_MODE_RAW_MUTEXES 0x2 /* bypass the profiler (QSP) */ + +/** + * qht_init - Initialize a QHT + * @ht: QHT to be initialized + * @cmp: default comparison function. Cannot be NULL. + * @n_elems: number of entries the hash table should be optimized for. + * @mode: bitmask with OR'ed QHT_MODE_* + */ +void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems, + unsigned int mode); + +/** + * qht_destroy - destroy a previously initialized QHT + * @ht: QHT to be destroyed + * + * Call only when there are no readers/writers left. + */ +void qht_destroy(struct qht *ht); + +/** + * qht_insert - Insert a pointer into the hash table + * @ht: QHT to insert to + * @p: pointer to be inserted + * @hash: hash corresponding to @p + * @existing: address where the pointer to an existing entry can be copied to + * + * Attempting to insert a NULL @p is a bug. + * Inserting the same pointer @p with different @hash values is a bug. + * + * In case of successful operation, smp_wmb() is implied before the pointer is + * inserted into the hash table. + * + * Returns true on success. + * Returns false if there is an existing entry in the table that is equivalent + * (i.e. ht->cmp matches and the hash is the same) to @p-@h. If @existing + * is !NULL, a pointer to this existing entry is copied to it. + */ +bool qht_insert(struct uc_struct *uc, struct qht *ht, void *p, uint32_t hash, void **existing); + +/** + * qht_lookup_custom - Look up a pointer using a custom comparison function. + * @ht: QHT to be looked up + * @userp: pointer to pass to @func + * @hash: hash of the pointer to be looked up + * @func: function to compare existing pointers against @userp + * + * Needs to be called under an RCU read-critical section. + * + * smp_read_barrier_depends() is implied before the call to @func. + * + * The user-provided @func compares pointers in QHT against @userp. + * If the function returns true, a match has been found. + * + * Returns the corresponding pointer when a match is found. + * Returns NULL otherwise. + */ +void *qht_lookup_custom(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash, + qht_lookup_func_t func); + +/** + * qht_lookup - Look up a pointer in a QHT + * @ht: QHT to be looked up + * @userp: pointer to pass to the comparison function + * @hash: hash of the pointer to be looked up + * + * Calls qht_lookup_custom() using @ht's default comparison function. + */ +void *qht_lookup(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash); + +/** + * qht_remove - remove a pointer from the hash table + * @ht: QHT to remove from + * @p: pointer to be removed + * @hash: hash corresponding to @p + * + * Attempting to remove a NULL @p is a bug. + * + * Just-removed @p pointers cannot be immediately freed; they need to remain + * valid until the end of the RCU grace period in which qht_remove() is called. + * This guarantees that concurrent lookups will always compare against valid + * data. + * + * Returns true on success. + * Returns false if the @p-@hash pair was not found. + */ +bool qht_remove(struct qht *ht, const void *p, uint32_t hash); + +/** + * qht_reset - reset a QHT + * @ht: QHT to be reset + * + * All entries in the hash table are reset. No resizing is performed. + * + * If concurrent readers may exist, the objects pointed to by the hash table + * must remain valid for the existing RCU grace period -- see qht_remove(). + * See also: qht_reset_size() + */ +void qht_reset(struct qht *ht); + +/** + * qht_reset_size - reset and resize a QHT + * @ht: QHT to be reset and resized + * @n_elems: number of entries the resized hash table should be optimized for. + * + * Returns true if the resize was necessary and therefore performed. + * Returns false otherwise. + * + * If concurrent readers may exist, the objects pointed to by the hash table + * must remain valid for the existing RCU grace period -- see qht_remove(). + * See also: qht_reset(), qht_resize(). + */ +bool qht_reset_size(struct uc_struct *uc, struct qht *ht, size_t n_elems); + +/** + * qht_resize - resize a QHT + * @ht: QHT to be resized + * @n_elems: number of entries the resized hash table should be optimized for + * + * Returns true on success. + * Returns false if the resize was not necessary and therefore not performed. + * See also: qht_reset_size(). + */ +bool qht_resize(struct uc_struct *uc, struct qht *ht, size_t n_elems); + +/** + * qht_iter - Iterate over a QHT + * @ht: QHT to be iterated over + * @func: function to be called for each entry in QHT + * @userp: additional pointer to be passed to @func + * + * Each time it is called, user-provided @func is passed a pointer-hash pair, + * plus @userp. + * + * Note: @ht cannot be accessed from @func + * See also: qht_iter_remove() + */ +void qht_iter(struct uc_struct *uc, struct qht *ht, qht_iter_func_t func, void *userp); + +/** + * qht_iter_remove - Iterate over a QHT, optionally removing entries + * @ht: QHT to be iterated over + * @func: function to be called for each entry in QHT + * @userp: additional pointer to be passed to @func + * + * Each time it is called, user-provided @func is passed a pointer-hash pair, + * plus @userp. If @func returns true, the pointer-hash pair is removed. + * + * Note: @ht cannot be accessed from @func + * See also: qht_iter() + */ +void qht_iter_remove(struct uc_struct *uc, struct qht *ht, qht_iter_bool_func_t func, void *userp); + +/** + * qht_statistics_init - Gather statistics from a QHT + * @ht: QHT to gather statistics from + * @stats: pointer to a &struct qht_stats to be filled in + * + * Does NOT need to be called under an RCU read-critical section, + * since it does not dereference any pointers stored in the hash table. + * + * When done with @stats, pass the struct to qht_statistics_destroy(). + * Failing to do this will leak memory. + */ +void qht_statistics_init(const struct qht *ht, struct qht_stats *stats); + +/** + * qht_statistics_destroy - Destroy a &struct qht_stats + * @stats: &struct qht_stats to be destroyed + * + * See also: qht_statistics_init(). + */ +void qht_statistics_destroy(struct qht_stats *stats); + +#endif /* QEMU_QHT_H */ diff --git a/qemu/include/qemu/queue.h b/qemu/include/qemu/queue.h index d433b901..f0108346 100644 --- a/qemu/include/qemu/queue.h +++ b/qemu/include/qemu/queue.h @@ -37,8 +37,8 @@ * @(#)queue.h 8.5 (Berkeley) 8/20/94 */ -#ifndef QEMU_SYS_QUEUE_H_ -#define QEMU_SYS_QUEUE_H_ +#ifndef QEMU_SYS_QUEUE_H +#define QEMU_SYS_QUEUE_H /* * This file defines four types of data structures: singly-linked lists, @@ -78,8 +78,6 @@ * For details on the use of these macros, see the queue(3) manual page. */ -#include "qemu/atomic.h" /* for smp_wmb() */ - /* * List definitions. */ @@ -104,6 +102,19 @@ struct { \ (head)->lh_first = NULL; \ } while (/*CONSTCOND*/0) +#define QLIST_SWAP(dstlist, srclist, field) do { \ + void *tmplist; \ + tmplist = (srclist)->lh_first; \ + (srclist)->lh_first = (dstlist)->lh_first; \ + if ((srclist)->lh_first != NULL) { \ + (srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \ + } \ + (dstlist)->lh_first = tmplist; \ + if ((dstlist)->lh_first != NULL) { \ + (dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \ + } \ +} while (/*CONSTCOND*/0) + #define QLIST_INSERT_AFTER(listelm, elm, field) do { \ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ (listelm)->field.le_next->field.le_prev = \ @@ -126,24 +137,32 @@ struct { \ (elm)->field.le_prev = &(head)->lh_first; \ } while (/*CONSTCOND*/0) -#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \ - (elm)->field.le_prev = &(head)->lh_first; \ - (elm)->field.le_next = (head)->lh_first; \ - smp_wmb(); /* fill elm before linking it */ \ - if ((head)->lh_first != NULL) { \ - (head)->lh_first->field.le_prev = &(elm)->field.le_next; \ - } \ - (head)->lh_first = (elm); \ - smp_wmb(); \ -} while (/* CONSTCOND*/0) - #define QLIST_REMOVE(elm, field) do { \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = (elm)->field.le_next; \ + (elm)->field.le_next = NULL; \ + (elm)->field.le_prev = NULL; \ } while (/*CONSTCOND*/0) +/* + * Like QLIST_REMOVE() but safe to call when elm is not in a list + */ +#define QLIST_SAFE_REMOVE(elm, field) do { \ + if ((elm)->field.le_prev != NULL) { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ + (elm)->field.le_next = NULL; \ + (elm)->field.le_prev = NULL; \ + } \ +} while (/*CONSTCOND*/0) + +/* Is elm in a list? */ +#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL) + #define QLIST_FOREACH(var, head, field) \ for ((var) = ((head)->lh_first); \ (var); \ @@ -191,17 +210,44 @@ struct { \ } while (/*CONSTCOND*/0) #define QSLIST_INSERT_HEAD(head, elm, field) do { \ - (elm)->field.sle_next = (head)->slh_first; \ - (head)->slh_first = (elm); \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \ + typeof(elm) save_sle_next; \ + do { \ + save_sle_next = (elm)->field.sle_next = (head)->slh_first; \ + } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \ + save_sle_next); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_MOVE_ATOMIC(dest, src) do { \ + (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \ } while (/*CONSTCOND*/0) #define QSLIST_REMOVE_HEAD(head, field) do { \ - (head)->slh_first = (head)->slh_first->field.sle_next; \ + typeof((head)->slh_first) elm = (head)->slh_first; \ + (head)->slh_first = elm->field.sle_next; \ + elm->field.sle_next = NULL; \ } while (/*CONSTCOND*/0) -#define QSLIST_REMOVE_AFTER(slistelm, field) do { \ - (slistelm)->field.sle_next = \ - QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \ +#define QSLIST_REMOVE_AFTER(slistelm, field) do { \ + typeof(slistelm) next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = next->field.sle_next; \ + next->field.sle_next = NULL; \ +} while (/*CONSTCOND*/0) + +#define QSLIST_REMOVE(head, elm, type, field) do { \ + if ((head)->slh_first == (elm)) { \ + QSLIST_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->slh_first; \ + while (curelm->field.sle_next != (elm)) \ + curelm = curelm->field.sle_next; \ + curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \ + (elm)->field.sle_next = NULL; \ + } \ } while (/*CONSTCOND*/0) #define QSLIST_FOREACH(var, head, field) \ @@ -264,8 +310,21 @@ struct { \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE_HEAD(head, field) do { \ - if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\ + typeof((head)->sqh_first) elm = (head)->sqh_first; \ + if (((head)->sqh_first = elm->field.sqe_next) == NULL) \ (head)->sqh_last = &(head)->sqh_first; \ + elm->field.sqe_next = NULL; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \ + QSIMPLEQ_INIT(removed); \ + if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \ + if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \ + (head)->sqh_last = &(head)->sqh_first; \ + } \ + (removed)->sqh_last = &(elm)->field.sqe_next; \ + (elm)->field.sqe_next = NULL; \ + } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE(head, elm, type, field) do { \ @@ -278,6 +337,7 @@ struct { \ if ((curelm->field.sqe_next = \ curelm->field.sqe_next->field.sqe_next) == NULL) \ (head)->sqh_last = &(curelm)->field.sqe_next; \ + (elm)->field.sqe_next = NULL; \ } \ } while (/*CONSTCOND*/0) @@ -299,6 +359,14 @@ struct { \ } \ } while (/*CONSTCOND*/0) +#define QSIMPLEQ_PREPEND(head1, head2) do { \ + if (!QSIMPLEQ_EMPTY((head2))) { \ + *(head2)->sqh_last = (head1)->sqh_first; \ + (head1)->sqh_first = (head2)->sqh_first; \ + QSIMPLEQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + #define QSIMPLEQ_LAST(head, type, field) \ (QSIMPLEQ_EMPTY((head)) ? \ NULL : \ @@ -308,82 +376,99 @@ struct { \ /* * Simple queue access methods. */ +#define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL) #define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) #define QSIMPLEQ_FIRST(head) ((head)->sqh_first) #define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) +typedef struct QTailQLink { + void *tql_next; + struct QTailQLink *tql_prev; +} QTailQLink; /* - * Tail queue definitions. + * Tail queue definitions. The union acts as a poor man template, as if + * it were QTailQLink. */ -#define Q_TAILQ_HEAD(name, type, qual) \ -struct name { \ - qual type *tqh_first; /* first element */ \ - qual type *qual *tqh_last; /* addr of last next element */ \ +#define QTAILQ_HEAD(name, type) \ +union name { \ + struct type *tqh_first; /* first element */ \ + QTailQLink tqh_circ; /* link for circular backwards list */ \ } -#define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,) #define QTAILQ_HEAD_INITIALIZER(head) \ - { NULL, &(head).tqh_first } + { .tqh_circ = { NULL, &(head).tqh_circ } } -#define Q_TAILQ_ENTRY(type, qual) \ -struct { \ - qual type *tqe_next; /* next element */ \ - qual type *qual *tqe_prev; /* address of previous next element */\ +#define QTAILQ_ENTRY(type) \ +union { \ + struct type *tqe_next; /* next element */ \ + QTailQLink tqe_circ; /* link for circular backwards list */ \ } -#define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,) /* * Tail queue functions. */ #define QTAILQ_INIT(head) do { \ (head)->tqh_first = NULL; \ - (head)->tqh_last = &(head)->tqh_first; \ + (head)->tqh_circ.tql_prev = &(head)->tqh_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ - (head)->tqh_first->field.tqe_prev = \ - &(elm)->field.tqe_next; \ + (head)->tqh_first->field.tqe_circ.tql_prev = \ + &(elm)->field.tqe_circ; \ else \ - (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ (head)->tqh_first = (elm); \ - (elm)->field.tqe_prev = &(head)->tqh_first; \ + (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.tqe_next = NULL; \ - (elm)->field.tqe_prev = (head)->tqh_last; \ - *(head)->tqh_last = (elm); \ - (head)->tqh_last = &(elm)->field.tqe_next; \ + (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \ + (head)->tqh_circ.tql_prev->tql_next = (elm); \ + (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ - (elm)->field.tqe_next->field.tqe_prev = \ - &(elm)->field.tqe_next; \ + (elm)->field.tqe_next->field.tqe_circ.tql_prev = \ + &(elm)->field.tqe_circ; \ else \ - (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ (listelm)->field.tqe_next = (elm); \ - (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ + (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) -#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ - (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ - (elm)->field.tqe_next = (listelm); \ - *(listelm)->field.tqe_prev = (elm); \ - (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \ + (elm)->field.tqe_next = (listelm); \ + (listelm)->field.tqe_circ.tql_prev->tql_next = (elm); \ + (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \ } while (/*CONSTCOND*/0) #define QTAILQ_REMOVE(head, elm, field) do { \ if (((elm)->field.tqe_next) != NULL) \ - (elm)->field.tqe_next->field.tqe_prev = \ - (elm)->field.tqe_prev; \ + (elm)->field.tqe_next->field.tqe_circ.tql_prev = \ + (elm)->field.tqe_circ.tql_prev; \ else \ - (head)->tqh_last = (elm)->field.tqe_prev; \ - *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ + (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \ + (elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \ + (elm)->field.tqe_circ.tql_prev = NULL; \ + (elm)->field.tqe_circ.tql_next = NULL; \ + (elm)->field.tqe_next = NULL; \ } while (/*CONSTCOND*/0) +/* remove @left, @right and all elements in between from @head */ +#define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \ + if (((right)->field.tqe_next) != NULL) \ + (right)->field.tqe_next->field.tqe_circ.tql_prev = \ + (left)->field.tqe_circ.tql_prev; \ + else \ + (head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \ + (left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \ + } while (/*CONSTCOND*/0) + #define QTAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->tqh_first); \ (var); \ @@ -394,10 +479,15 @@ struct { \ (var) && ((next_var) = ((var)->field.tqe_next), 1); \ (var) = (next_var)) -#define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \ - for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ +#define QTAILQ_FOREACH_REVERSE(var, head, field) \ + for ((var) = QTAILQ_LAST(head); \ (var); \ - (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) + (var) = QTAILQ_PREV(var, field)) + +#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \ + for ((var) = QTAILQ_LAST(head); \ + (var) && ((prev_var) = QTAILQ_PREV(var, field), 1); \ + (var) = (prev_var)) /* * Tail queue access methods. @@ -405,10 +495,88 @@ struct { \ #define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define QTAILQ_FIRST(head) ((head)->tqh_first) #define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_circ.tql_prev != NULL) -#define QTAILQ_LAST(head, headname) \ - (*(((struct headname *)((head)->tqh_last))->tqh_last)) -#define QTAILQ_PREV(elm, headname, field) \ - (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) +#define QTAILQ_LINK_PREV(link) \ + ((link).tql_prev->tql_prev->tql_next) +#ifndef _MSC_VER +#define QTAILQ_LAST(head) \ + ((typeof((head)->tqh_first)) QTAILQ_LINK_PREV((head)->tqh_circ)) +#define QTAILQ_PREV(elm, field) \ + ((typeof((elm)->field.tqe_next)) QTAILQ_LINK_PREV((elm)->field.tqe_circ)) +#else +#define QTAILQ_LAST(head) \ + (QTAILQ_LINK_PREV((head)->tqh_circ)) +#define QTAILQ_PREV(elm, field) \ + (QTAILQ_LINK_PREV((elm)->field.tqe_circ)) +#endif -#endif /* !QEMU_SYS_QUEUE_H_ */ +#define field_at_offset(base, offset, type) \ + ((type *) (((char *) (base)) + (offset))) + +/* + * Raw access of elements of a tail queue head. Offsets are all zero + * because it's a union. + */ +#define QTAILQ_RAW_FIRST(head) \ + field_at_offset(head, 0, void *) +#define QTAILQ_RAW_TQH_CIRC(head) \ + field_at_offset(head, 0, QTailQLink) + +/* + * Raw access of elements of a tail entry + */ +#define QTAILQ_RAW_NEXT(elm, entry) \ + field_at_offset(elm, entry, void *) +#define QTAILQ_RAW_TQE_CIRC(elm, entry) \ + field_at_offset(elm, entry, QTailQLink) +/* + * Tail queue traversal using pointer arithmetic. + */ +#define QTAILQ_RAW_FOREACH(elm, head, entry) \ + for ((elm) = *QTAILQ_RAW_FIRST(head); \ + (elm); \ + (elm) = *QTAILQ_RAW_NEXT(elm, entry)) +/* + * Tail queue insertion using pointer arithmetic. + */ +#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \ + *QTAILQ_RAW_NEXT(elm, entry) = NULL; \ + QTAILQ_RAW_TQE_CIRC(elm, entry)->tql_prev = QTAILQ_RAW_TQH_CIRC(head)->tql_prev; \ + QTAILQ_RAW_TQH_CIRC(head)->tql_prev->tql_next = (elm); \ + QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \ +} while (/*CONSTCOND*/0) + +#define QLIST_RAW_FIRST(head) \ + field_at_offset(head, 0, void *) + +#define QLIST_RAW_NEXT(elm, entry) \ + field_at_offset(elm, entry, void *) + +#define QLIST_RAW_PREVIOUS(elm, entry) \ + field_at_offset(elm, entry + sizeof(void *), void *) + +#define QLIST_RAW_FOREACH(elm, head, entry) \ + for ((elm) = *QLIST_RAW_FIRST(head); \ + (elm); \ + (elm) = *QLIST_RAW_NEXT(elm, entry)) + +#define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \ + *QLIST_RAW_NEXT(prev, entry) = elem; \ + *QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \ + *QLIST_RAW_NEXT(elem, entry) = NULL; \ +} while (0) + +#define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \ + void *first = *QLIST_RAW_FIRST(head); \ + *QLIST_RAW_FIRST(head) = elm; \ + *QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \ + if (first) { \ + *QLIST_RAW_NEXT(elm, entry) = first; \ + *QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \ + } else { \ + *QLIST_RAW_NEXT(elm, entry) = NULL; \ + } \ +} while (0) + +#endif /* QEMU_SYS_QUEUE_H */ diff --git a/qemu/include/qemu/range.h b/qemu/include/qemu/range.h index f5b1c5f3..f62b363e 100644 --- a/qemu/include/qemu/range.h +++ b/qemu/include/qemu/range.h @@ -1,39 +1,195 @@ +/* + * QEMU 64-bit address ranges + * + * Copyright (c) 2015-2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + #ifndef QEMU_RANGE_H #define QEMU_RANGE_H -#include "unicorn/platform.h" -#include -#include "qemu/queue.h" - /* * Operations on 64 bit address ranges. * Notes: - * - ranges must not wrap around 0, but can include the last byte ~0x0LL. - * - this can not represent a full 0 to ~0x0LL range. + * - Ranges must not wrap around 0, but can include UINT64_MAX. */ -/* A structure representing a range of addresses. */ struct Range { - uint64_t begin; /* First byte of the range, or 0 if empty. */ - uint64_t end; /* 1 + the last byte. 0 if range empty or ends at ~0x0LL. */ + /* + * Do not access members directly, use the functions! + * A non-empty range has @lob <= @upb. + * An empty range has @lob == @upb + 1. + */ + uint64_t lob; /* inclusive lower bound */ + uint64_t upb; /* inclusive upper bound */ }; +static inline void range_invariant(const Range *range) +{ + assert(range->lob <= range->upb || range->lob == range->upb + 1); +} + +/* Compound literal encoding the empty range */ +#define range_empty ((Range){ .lob = 1, .upb = 0 }) + +/* Is @range empty? */ +static inline bool range_is_empty(const Range *range) +{ + range_invariant(range); + return range->lob > range->upb; +} + +/* Does @range contain @val? */ +static inline bool range_contains(const Range *range, uint64_t val) +{ + return val >= range->lob && val <= range->upb; +} + +/* Initialize @range to the empty range */ +static inline void range_make_empty(Range *range) +{ + *range = range_empty; + assert(range_is_empty(range)); +} + +/* + * Initialize @range to span the interval [@lob,@upb]. + * Both bounds are inclusive. + * The interval must not be empty, i.e. @lob must be less than or + * equal @upb. + */ +static inline void range_set_bounds(Range *range, uint64_t lob, uint64_t upb) +{ + range->lob = lob; + range->upb = upb; + assert(!range_is_empty(range)); +} + +/* + * Initialize @range to span the interval [@lob,@upb_plus1). + * The lower bound is inclusive, the upper bound is exclusive. + * Zero @upb_plus1 is special: if @lob is also zero, set @range to the + * empty range. Else, set @range to [@lob,UINT64_MAX]. + */ +static inline void range_set_bounds1(Range *range, + uint64_t lob, uint64_t upb_plus1) +{ + if (!lob && !upb_plus1) { + *range = range_empty; + } else { + range->lob = lob; + range->upb = upb_plus1 - 1; + } + range_invariant(range); +} + +/* Return @range's lower bound. @range must not be empty. */ +static inline uint64_t range_lob(Range *range) +{ + assert(!range_is_empty(range)); + return range->lob; +} + +/* Return @range's upper bound. @range must not be empty. */ +static inline uint64_t range_upb(Range *range) +{ + assert(!range_is_empty(range)); + return range->upb; +} + +/* + * Initialize @range to span the interval [@lob,@lob + @size - 1]. + * @size may be 0. If the range would overflow, returns -ERANGE, otherwise + * 0. + */ +static inline int QEMU_WARN_UNUSED_RESULT range_init(Range *range, uint64_t lob, + uint64_t size) +{ + if (lob + size < lob) { + return -ERANGE; + } + range->lob = lob; + range->upb = lob + size - 1; + range_invariant(range); + return 0; +} + +/* + * Initialize @range to span the interval [@lob,@lob + @size - 1]. + * @size may be 0. Range must not overflow. + */ +static inline void range_init_nofail(Range *range, uint64_t lob, uint64_t size) +{ + range->lob = lob; + range->upb = lob + size - 1; + range_invariant(range); +} + +/* + * Get the size of @range. + */ +static inline uint64_t range_size(const Range *range) +{ + return range->upb - range->lob + 1; +} + +/* + * Check if @range1 overlaps with @range2. If one of the ranges is empty, + * the result is always "false". + */ +static inline bool range_overlaps_range(const Range *range1, + const Range *range2) +{ + if (range_is_empty(range1) || range_is_empty(range2)) { + return false; + } + return !(range2->upb < range1->lob || range1->upb < range2->lob); +} + +/* + * Check if @range1 contains @range2. If one of the ranges is empty, + * the result is always "false". + */ +static inline bool range_contains_range(const Range *range1, + const Range *range2) +{ + if (range_is_empty(range1) || range_is_empty(range2)) { + return false; + } + return range1->lob <= range2->lob && range1->upb >= range2->upb; +} + +/* + * Extend @range to the smallest interval that includes @extend_by, too. + */ static inline void range_extend(Range *range, Range *extend_by) { - if (!extend_by->begin && !extend_by->end) { + if (range_is_empty(extend_by)) { return; } - if (!range->begin && !range->end) { + if (range_is_empty(range)) { *range = *extend_by; return; } - if (range->begin > extend_by->begin) { - range->begin = extend_by->begin; + if (range->lob > extend_by->lob) { + range->lob = extend_by->lob; } - /* Compare last byte in case region ends at ~0x0LL */ - if (range->end - 1 < extend_by->end - 1) { - range->end = extend_by->end; + if (range->upb < extend_by->upb) { + range->upb = extend_by->upb; } + range_invariant(range); } /* Get last byte of a range from offset + length. @@ -61,75 +217,6 @@ static inline int ranges_overlap(uint64_t first1, uint64_t len1, return !(last2 < first1 || last1 < first2); } -/* 0,1 can merge with 1,2 but don't overlap */ -static inline bool ranges_can_merge(Range *range1, Range *range2) -{ - return !(range1->end < range2->begin || range2->end < range1->begin); -} - -static inline int range_merge(Range *range1, Range *range2) -{ - if (ranges_can_merge(range1, range2)) { - if (range1->end < range2->end) { - range1->end = range2->end; - } - if (range1->begin > range2->begin) { - range1->begin = range2->begin; - } - return 0; - } - - return -1; -} - -static inline GList *g_list_insert_sorted_merged(GList *list, - gpointer data, - GCompareFunc func) -{ - GList *l, *next = NULL; - Range *r, *nextr; - - if (!list) { - list = g_list_insert_sorted(list, data, func); - return list; - } - - nextr = data; - l = list; - while (l && l != next && nextr) { - r = l->data; - if (ranges_can_merge(r, nextr)) { - range_merge(r, nextr); - l = g_list_remove_link(l, next); - next = g_list_next(l); - if (next) { - nextr = next->data; - } else { - nextr = NULL; - } - } else { - l = g_list_next(l); - } - } - - if (!l) { - list = g_list_insert_sorted(list, data, func); - } - - return list; -} - -static inline gint range_compare(gconstpointer a, gconstpointer b) -{ - Range *ra = (Range *)a, *rb = (Range *)b; - if (ra->begin == rb->begin && ra->end == rb->end) { - return 0; - } else if (range_get_last(ra->begin, ra->end) < - range_get_last(rb->begin, rb->end)) { - return -1; - } else { - return 1; - } -} +GList *range_list_insert(GList *list, Range *data); #endif diff --git a/qemu/include/qemu/thread-posix.h b/qemu/include/qemu/thread-posix.h index 36f22721..05777209 100644 --- a/qemu/include/qemu/thread-posix.h +++ b/qemu/include/qemu/thread-posix.h @@ -1,6 +1,7 @@ -#ifndef __QEMU_THREAD_POSIX_H -#define __QEMU_THREAD_POSIX_H 1 -#include "pthread.h" +#ifndef QEMU_THREAD_POSIX_H +#define QEMU_THREAD_POSIX_H + +#include #include struct QemuThread { diff --git a/qemu/include/qemu/thread-win32.h b/qemu/include/qemu/thread-win32.h index 9f82ea23..34bb221e 100644 --- a/qemu/include/qemu/thread-win32.h +++ b/qemu/include/qemu/thread-win32.h @@ -1,6 +1,7 @@ -#ifndef __QEMU_THREAD_WIN32_H -#define __QEMU_THREAD_WIN32_H 1 -#include "windows.h" +#ifndef QEMU_THREAD_WIN32_H +#define QEMU_THREAD_WIN32_H + +#include typedef struct QemuThreadData QemuThreadData; struct QemuThread { @@ -9,6 +10,6 @@ struct QemuThread { }; /* Only valid for joinable threads. */ -HANDLE qemu_thread_get_handle(QemuThread *thread); +HANDLE qemu_thread_get_handle(struct QemuThread *thread); #endif diff --git a/qemu/include/qemu/thread.h b/qemu/include/qemu/thread.h index c5d25f61..be4834d2 100644 --- a/qemu/include/qemu/thread.h +++ b/qemu/include/qemu/thread.h @@ -1,11 +1,13 @@ -#ifndef __QEMU_THREAD_H -#define __QEMU_THREAD_H 1 +#ifndef QEMU_THREAD_H +#define QEMU_THREAD_H #include "unicorn/platform.h" +#include "qemu/processor.h" +struct uc_struct; typedef struct QemuThread QemuThread; -#ifdef _WIN32 +#if defined(_WIN32) && !defined(__MINGW32__) #include "qemu/thread-win32.h" #else #include "qemu/thread-posix.h" @@ -14,8 +16,6 @@ typedef struct QemuThread QemuThread; #define QEMU_THREAD_JOINABLE 0 #define QEMU_THREAD_DETACHED 1 -struct uc_struct; -// return -1 on error, 0 on success int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name, void *(*start_routine)(void *), void *arg, int mode); diff --git a/qemu/include/qemu/timer.h b/qemu/include/qemu/timer.h index d106e680..2e15948d 100644 --- a/qemu/include/qemu/timer.h +++ b/qemu/include/qemu/timer.h @@ -1,8 +1,10 @@ #ifndef QEMU_TIMER_H #define QEMU_TIMER_H -#include "qemu/typedefs.h" -#include "qemu-common.h" +#include "qemu/bitops.h" +#include "qemu/host-utils.h" + +#define NANOSECONDS_PER_SECOND 1000000000LL /* timers */ @@ -18,32 +20,57 @@ * @QEMU_CLOCK_REALTIME: Real time clock * * The real time clock should be used only for stuff which does not - * change the virtual machine state, as it is run even if the virtual - * machine is stopped. The real time clock has a frequency of 1000 - * Hz. + * change the virtual machine state, as it runs even if the virtual + * machine is stopped. * * @QEMU_CLOCK_VIRTUAL: virtual clock * - * The virtual clock is only run during the emulation. It is stopped - * when the virtual machine is stopped. Virtual timers use a high - * precision clock, usually cpu cycles (use ticks_per_sec). + * The virtual clock only runs during the emulation. It stops + * when the virtual machine is stopped. * * @QEMU_CLOCK_HOST: host clock * - * The host clock should be use for device models that emulate accurate + * The host clock should be used for device models that emulate accurate * real time sources. It will continue to run when the virtual machine * is suspended, and it will reflect system time changes the host may - * undergo (e.g. due to NTP). The host clock has the same precision as - * the virtual clock. + * undergo (e.g. due to NTP). + * + * @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp + * + * Outside icount mode, this clock is the same as @QEMU_CLOCK_VIRTUAL. + * In icount mode, this clock counts nanoseconds while the virtual + * machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL + * while the CPUs are sleeping and thus not executing instructions. */ typedef enum { QEMU_CLOCK_REALTIME = 0, QEMU_CLOCK_VIRTUAL = 1, QEMU_CLOCK_HOST = 2, + QEMU_CLOCK_VIRTUAL_RT = 3, QEMU_CLOCK_MAX } QEMUClockType; +/** + * QEMU Timer attributes: + * + * An individual timer may be given one or multiple attributes when initialized. + * Each attribute corresponds to one bit. Attributes modify the processing + * of timers when they fire. + * + * The following attributes are available: + * + * QEMU_TIMER_ATTR_EXTERNAL: drives external subsystem + * QEMU_TIMER_ATTR_ALL: mask for all existing attributes + * + * Timers with this attribute do not recorded in rr mode, therefore it could be + * used for the subsystems that operate outside the guest core. Applicable only + * with virtual clock type. + */ + +#define QEMU_TIMER_ATTR_EXTERNAL ((int)BIT(0)) +#define QEMU_TIMER_ATTR_ALL 0xffffffff + typedef struct QEMUTimerList QEMUTimerList; struct QEMUTimerListGroup { @@ -51,7 +78,7 @@ struct QEMUTimerListGroup { }; typedef void QEMUTimerCB(void *opaque); -typedef void QEMUTimerListNotifyCB(void *opaque); +typedef void QEMUTimerListNotifyCB(void *opaque, QEMUClockType type); struct QEMUTimer { int64_t expire_time; /* in nanoseconds */ @@ -59,13 +86,10 @@ struct QEMUTimer { QEMUTimerCB *cb; void *opaque; QEMUTimer *next; + int attributes; int scale; }; -/* - * QEMUClockType - */ - /* * qemu_clock_get_ns; * @type: the clock type @@ -105,6 +129,599 @@ static inline int64_t qemu_clock_get_us(QEMUClockType type) return qemu_clock_get_ns(type) / SCALE_US; } +/** + * qemu_clock_has_timers: + * @type: the clock type + * + * Determines whether a clock's default timer list + * has timers attached + * + * Note that this function should not be used when other threads also access + * the timer list. The return value may be outdated by the time it is acted + * upon. + * + * Returns: true if the clock's default timer list + * has timers attached + */ +bool qemu_clock_has_timers(QEMUClockType type); + +/** + * qemu_clock_expired: + * @type: the clock type + * + * Determines whether a clock's default timer list + * has an expired timer. + * + * Returns: true if the clock's default timer list has + * an expired timer + */ +bool qemu_clock_expired(QEMUClockType type); + +/** + * qemu_clock_use_for_deadline: + * @type: the clock type + * + * Determine whether a clock should be used for deadline + * calculations. Some clocks, for instance vm_clock with + * use_icount set, do not count in nanoseconds. Such clocks + * are not used for deadline calculations, and are presumed + * to interrupt any poll using qemu_notify/aio_notify + * etc. + * + * Returns: true if the clock runs in nanoseconds and + * should be used for a deadline. + */ +bool qemu_clock_use_for_deadline(QEMUClockType type); + +/** + * qemu_clock_deadline_ns_all: + * @type: the clock type + * @attr_mask: mask for the timer attributes that are included + * in deadline calculation + * + * Calculate the deadline across all timer lists associated + * with a clock (as opposed to just the default one) + * in nanoseconds, or -1 if no timer is set to expire. + * + * Returns: time until expiry in nanoseconds or -1 + */ +int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask); + +/** + * qemu_clock_get_main_loop_timerlist: + * @type: the clock type + * + * Return the default timer list associated with a clock. + * + * Returns: the default timer list + */ +QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type); + +/** + * qemu_clock_nofify: + * @type: the clock type + * + * Call the notifier callback connected with the default timer + * list linked to the clock, or qemu_notify() if none. + */ +void qemu_clock_notify(QEMUClockType type); + +/** + * qemu_clock_enable: + * @type: the clock type + * @enabled: true to enable, false to disable + * + * Enable or disable a clock + * Disabling the clock will wait for related timerlists to stop + * executing qemu_run_timers. Thus, this functions should not + * be used from the callback of a timer that is based on @clock. + * Doing so would cause a deadlock. + * + * Caller should hold BQL. + */ +void qemu_clock_enable(QEMUClockType type, bool enabled); + +/** + * qemu_start_warp_timer: + * + * Starts a timer for virtual clock update + */ +void qemu_start_warp_timer(void); + +/** + * qemu_clock_run_timers: + * @type: clock on which to operate + * + * Run all the timers associated with the default timer list + * of a clock. + * + * Returns: true if any timer ran. + */ +bool qemu_clock_run_timers(QEMUClockType type); + +/** + * qemu_clock_run_all_timers: + * + * Run all the timers associated with the default timer list + * of every clock. + * + * Returns: true if any timer ran. + */ +bool qemu_clock_run_all_timers(void); + + +/* + * QEMUTimerList + */ + +/** + * timerlist_new: + * @type: the clock type to associate with the timerlist + * @cb: the callback to call on notification + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timerlist associated with the clock of + * type @type. + * + * Returns: a pointer to the QEMUTimerList created + */ +QEMUTimerList *timerlist_new(QEMUClockType type, + QEMUTimerListNotifyCB *cb, void *opaque); + +/** + * timerlist_free: + * @timer_list: the timer list to free + * + * Frees a timer_list. It must have no active timers. + */ +void timerlist_free(QEMUTimerList *timer_list); + +/** + * timerlist_has_timers: + * @timer_list: the timer list to operate on + * + * Determine whether a timer list has active timers + * + * Note that this function should not be used when other threads also access + * the timer list. The return value may be outdated by the time it is acted + * upon. + * + * Returns: true if the timer list has timers. + */ +bool timerlist_has_timers(QEMUTimerList *timer_list); + +/** + * timerlist_expired: + * @timer_list: the timer list to operate on + * + * Determine whether a timer list has any timers which + * are expired. + * + * Returns: true if the timer list has timers which + * have expired. + */ +bool timerlist_expired(QEMUTimerList *timer_list); + +/** + * timerlist_deadline_ns: + * @timer_list: the timer list to operate on + * + * Determine the deadline for a timer_list, i.e. + * the number of nanoseconds until the first timer + * expires. Return -1 if there are no timers. + * + * Returns: the number of nanoseconds until the earliest + * timer expires -1 if none + */ +int64_t timerlist_deadline_ns(QEMUTimerList *timer_list); + +/** + * timerlist_get_clock: + * @timer_list: the timer list to operate on + * + * Determine the clock type associated with a timer list. + * + * Returns: the clock type associated with the + * timer list. + */ +QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list); + +/** + * timerlist_run_timers: + * @timer_list: the timer list to use + * + * Call all expired timers associated with the timer list. + * + * Returns: true if any timer expired + */ +bool timerlist_run_timers(QEMUTimerList *timer_list); + +/** + * timerlist_notify: + * @timer_list: the timer list to use + * + * call the notifier callback associated with the timer list. + */ +void timerlist_notify(QEMUTimerList *timer_list); + +/* + * QEMUTimerListGroup + */ + +/** + * timerlistgroup_init: + * @tlg: the timer list group + * @cb: the callback to call when a notify is required + * @opaque: the opaque pointer to be passed to the callback. + * + * Initialise a timer list group. This must already be + * allocated in memory and zeroed. The notifier callback is + * called whenever a clock in the timer list group is + * reenabled or whenever a timer associated with any timer + * list is modified. If @cb is specified as null, qemu_notify() + * is used instead. + */ +void timerlistgroup_init(QEMUTimerListGroup *tlg, + QEMUTimerListNotifyCB *cb, void *opaque); + +/** + * timerlistgroup_deinit: + * @tlg: the timer list group + * + * Deinitialise a timer list group. This must already be + * initialised. Note the memory is not freed. + */ +void timerlistgroup_deinit(QEMUTimerListGroup *tlg); + +/** + * timerlistgroup_run_timers: + * @tlg: the timer list group + * + * Run the timers associated with a timer list group. + * This will run timers on multiple clocks. + * + * Returns: true if any timer callback ran + */ +bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg); + +/** + * timerlistgroup_deadline_ns: + * @tlg: the timer list group + * + * Determine the deadline of the soonest timer to + * expire associated with any timer list linked to + * the timer list group. Only clocks suitable for + * deadline calculation are included. + * + * Returns: the deadline in nanoseconds or -1 if no + * timers are to expire. + */ +int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg); + +/* + * QEMUTimer + */ + +/** + * timer_init_full: + * @ts: the timer to be initialised + * @timer_list_group: (optional) the timer list group to attach the timer to + * @type: the clock type to use + * @scale: the scale value for the timer + * @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_ values + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Initialise a timer with the given scale and attributes, + * and associate it with timer list for given clock @type in @timer_list_group + * (or default timer list group, if NULL). + * The caller is responsible for allocating the memory. + * + * You need not call an explicit deinit call. Simply make + * sure it is not on a list with timer_del. + */ +void timer_init_full(QEMUTimer *ts, + QEMUTimerListGroup *timer_list_group, QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque); + +/** + * timer_init: + * @ts: the timer to be initialised + * @type: the clock to associate with the timer + * @scale: the scale value for the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with the given scale on the default timer list + * associated with the clock. + * See timer_init_full for details. + */ +static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale, + QEMUTimerCB *cb, void *opaque) +{ + // timer_init_full(ts, NULL, type, scale, 0, cb, opaque); +} + +/** + * timer_init_ns: + * @ts: the timer to be initialised + * @type: the clock to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with nanosecond scale on the default timer list + * associated with the clock. + * See timer_init_full for details. + */ +static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type, + QEMUTimerCB *cb, void *opaque) +{ + timer_init(ts, type, SCALE_NS, cb, opaque); +} + +/** + * timer_init_us: + * @ts: the timer to be initialised + * @type: the clock to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with microsecond scale on the default timer list + * associated with the clock. + * See timer_init_full for details. + */ +static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type, + QEMUTimerCB *cb, void *opaque) +{ + timer_init(ts, type, SCALE_US, cb, opaque); +} + +/** + * timer_init_ms: + * @ts: the timer to be initialised + * @type: the clock to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with millisecond scale on the default timer list + * associated with the clock. + * See timer_init_full for details. + */ +static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type, + QEMUTimerCB *cb, void *opaque) +{ + timer_init(ts, type, SCALE_MS, cb, opaque); +} + +/** + * timer_new_full: + * @timer_list_group: (optional) the timer list group to attach the timer to + * @type: the clock type to use + * @scale: the scale value for the timer + * @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_ values + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Create a new timer with the given scale and attributes, + * and associate it with timer list for given clock @type in @timer_list_group + * (or default timer list group, if NULL). + * The memory is allocated by the function. + * + * This is not the preferred interface unless you know you + * are going to call timer_free. Use timer_init or timer_init_full instead. + * + * The default timer list has one special feature: in icount mode, + * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is + * not true of other timer lists, which are typically associated + * with an AioContext---each of them runs its timer callbacks in its own + * AioContext thread. + * + * Returns: a pointer to the timer + */ +static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group, + QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque) +{ + QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer)); + // timer_init_full(ts, timer_list_group, type, scale, attributes, cb, opaque); + return ts; +} + +/** + * timer_new: + * @type: the clock type to use + * @scale: the scale value for the timer + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Create a new timer with the given scale, + * and associate it with the default timer list for the clock type @type. + * See timer_new_full for details. + * + * Returns: a pointer to the timer + */ +static inline QEMUTimer *timer_new(QEMUClockType type, int scale, + QEMUTimerCB *cb, void *opaque) +{ + return timer_new_full(NULL, type, scale, 0, cb, opaque); +} + +/** + * timer_new_ns: + * @type: the clock type to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with nanosecond scale on the default timer list + * associated with the clock. + * See timer_new_full for details. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) +{ + return timer_new(type, SCALE_NS, cb, opaque); +} + +/** + * timer_new_us: + * @type: the clock type to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with microsecond scale on the default timer list + * associated with the clock. + * See timer_new_full for details. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) +{ + return timer_new(type, SCALE_US, cb, opaque); +} + +/** + * timer_new_ms: + * @type: the clock type to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with millisecond scale on the default timer list + * associated with the clock. + * See timer_new_full for details. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) +{ + return timer_new(type, SCALE_MS, cb, opaque); +} + +/** + * timer_deinit: + * @ts: the timer to be de-initialised + * + * Deassociate the timer from any timerlist. You should + * call timer_del before. After this call, any further + * timer_del call cannot cause dangling pointer accesses + * even if the previously used timerlist is freed. + */ +void timer_deinit(QEMUTimer *ts); + +/** + * timer_free: + * @ts: the timer + * + * Free a timer (it must not be on the active list) + */ +static inline void timer_free(QEMUTimer *ts) +{ + g_free(ts); +} + +/** + * timer_del: + * @ts: the timer + * + * Delete a timer from the active list. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_del(QEMUTimer *ts); + +/** + * timer_mod_ns: + * @ts: the timer + * @expire_time: the expiry time in nanoseconds + * + * Modify a timer to expire at @expire_time + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod_ns(QEMUTimer *ts, int64_t expire_time); + +/** + * timer_mod_anticipate_ns: + * @ts: the timer + * @expire_time: the expiry time in nanoseconds + * + * Modify a timer to expire at @expire_time or the current time, + * whichever comes earlier. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time); + +/** + * timer_mod: + * @ts: the timer + * @expire_time: the expire time in the units associated with the timer + * + * Modify a timer to expiry at @expire_time, taking into + * account the scale associated with the timer. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod(QEMUTimer *ts, int64_t expire_timer); + +/** + * timer_mod_anticipate: + * @ts: the timer + * @expire_time: the expiry time in nanoseconds + * + * Modify a timer to expire at @expire_time or the current time, whichever + * comes earlier, taking into account the scale associated with the timer. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time); + +/** + * timer_pending: + * @ts: the timer + * + * Determines whether a timer is pending (i.e. is on the + * active list of timers, whether or not it has not yet expired). + * + * Returns: true if the timer is pending + */ +bool timer_pending(QEMUTimer *ts); + +/** + * timer_expired: + * @ts: the timer + * @current_time: the current time + * + * Determines whether a timer has expired. + * + * Returns: true if the timer has expired + */ +bool timer_expired(QEMUTimer *timer_head, int64_t current_time); + +/** + * timer_expire_time_ns: + * @ts: the timer + * + * Determine the expiry time of a timer + * + * Returns: the expiry time in nanoseconds + */ +uint64_t timer_expire_time_ns(QEMUTimer *ts); + +/* + * General utility functions + */ + /** * qemu_timeout_ns_to_ms: * @ns: nanosecond timeout value @@ -140,7 +757,7 @@ static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2) * * Initialise the clock & timer infrastructure */ -void init_clocks(void); +void init_clocks(QEMUTimerListNotifyCB *notify_cb); int64_t cpu_get_ticks(void); /* Caller must hold BQL */ @@ -148,16 +765,20 @@ void cpu_enable_ticks(void); /* Caller must hold BQL */ void cpu_disable_ticks(void); -static inline int64_t get_ticks_per_sec(void) +static inline int64_t get_max_clock_jump(void) { - return 1000000000LL; + /* This should be small enough to prevent excessive interrupts from being + * generated by the RTC on clock jumps, but large enough to avoid frequent + * unnecessary resets in idle VMs. + */ + return 60 * NANOSECONDS_PER_SECOND; } /* * Low level clock functions */ -/* real time host monotonic timer */ +/* get host real time in nanosecond */ static inline int64_t get_clock_realtime(void) { struct timeval tv; @@ -176,29 +797,40 @@ static inline int64_t get_clock(void) { LARGE_INTEGER ti; QueryPerformanceCounter(&ti); - return muldiv64(ti.QuadPart, (uint32_t)get_ticks_per_sec(), (uint32_t)clock_freq); + return muldiv64(ti.QuadPart, NANOSECONDS_PER_SECOND, clock_freq); } #else +extern int use_rt_clock; + static inline int64_t get_clock(void) { - return get_clock_realtime(); + if (use_rt_clock) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000LL + ts.tv_nsec; + } else { + /* XXX: using gettimeofday leads to problems if the date + changes, so it should be avoided. */ + return get_clock_realtime(); + } } #endif /* icount */ +int64_t cpu_get_icount_raw(void); int64_t cpu_get_icount(void); int64_t cpu_get_clock(void); -int64_t cpu_get_clock_offset(void); int64_t cpu_icount_to_ns(int64_t icount); +void cpu_update_icount(CPUState *cpu); /*******************************************/ /* host CPU ticks (if available) */ #if defined(_ARCH_PPC) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int64_t retval; #ifdef _ARCH_PPC64 @@ -224,7 +856,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__i386__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { #ifdef _MSC_VER return __rdtsc(); @@ -237,7 +869,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__x86_64__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { #ifdef _MSC_VER return __rdtsc(); @@ -254,25 +886,16 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__hppa__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int val; asm volatile ("mfctl %%cr16, %0" : "=r"(val)); return val; } -#elif defined(__ia64) - -static inline int64_t cpu_get_real_ticks(void) -{ - int64_t val; - asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); - return val; -} - #elif defined(__s390__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { int64_t val; asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); @@ -281,7 +904,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__sparc__) -static inline int64_t cpu_get_real_ticks (void) +static inline int64_t cpu_get_host_ticks (void) { #if defined(_LP64) uint64_t rval; @@ -319,7 +942,7 @@ static inline int64_t cpu_get_real_ticks (void) : "=r" (value)); \ } -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { /* On kernels >= 2.6.25 rdhwr , $2 and $3 are emulated */ uint32_t count; @@ -335,7 +958,7 @@ static inline int64_t cpu_get_real_ticks(void) #elif defined(__alpha__) -static inline int64_t cpu_get_real_ticks(void) +static inline int64_t cpu_get_host_ticks(void) { uint64_t cc; uint32_t cur, ofs; @@ -350,22 +973,12 @@ static inline int64_t cpu_get_real_ticks(void) /* The host CPU doesn't have an easily accessible cycle counter. Just return a monotonically increasing value. This will be totally wrong, but hopefully better than nothing. */ -static inline int64_t cpu_get_real_ticks (void) +static inline int64_t cpu_get_host_ticks(void) { - static int64_t ticks = 0; - return ticks++; + return get_clock(); } #endif -#ifdef CONFIG_PROFILER -static inline int64_t profile_getclock(void) -{ - return cpu_get_real_ticks(); -} - -extern int64_t qemu_time, qemu_time_start; -extern int64_t tlb_flush_time; -extern int64_t dev_time; -#endif +void init_get_clock(void); #endif diff --git a/qemu/include/qemu/typedefs.h b/qemu/include/qemu/typedefs.h index d9759fc0..59e20a3d 100644 --- a/qemu/include/qemu/typedefs.h +++ b/qemu/include/qemu/typedefs.h @@ -1,80 +1,47 @@ #ifndef QEMU_TYPEDEFS_H #define QEMU_TYPEDEFS_H -/* A load of opaque types so that device init declarations don't have to - pull in all the real definitions. */ +/* + * This header is for selectively avoiding #include just to get a + * typedef name. + * + * Declaring a typedef name in its "obvious" place can result in + * inclusion cycles, in particular for complete struct and union + * types that need more types for their members. It can also result + * in headers pulling in many more headers, slowing down builds. + * + * You can break such cycles and unwanted dependencies by declaring + * the typedef name here. + * + * For struct types used in only a few headers, judicious use of the + * struct tag instead of the typedef name is commonly preferable. + */ + +/* + * Incomplete struct types + * Please keep this list in case-insensitive alphabetical order. + */ +typedef struct AddressSpace AddressSpace; +typedef struct CPUAddressSpace CPUAddressSpace; +typedef struct CPUState CPUState; +typedef struct FlatView FlatView; +typedef struct IOMMUMemoryRegion IOMMUMemoryRegion; +typedef struct MemoryListener MemoryListener; +typedef struct MemoryMappingList MemoryMappingList; +typedef struct MemoryRegion MemoryRegion; +typedef struct MemoryRegionCache MemoryRegionCache; +typedef struct MemoryRegionSection MemoryRegionSection; typedef struct QEMUTimer QEMUTimer; typedef struct QEMUTimerListGroup QEMUTimerListGroup; -typedef struct QEMUFile QEMUFile; -typedef struct QEMUBH QEMUBH; - -typedef struct AioContext AioContext; - -typedef struct Visitor Visitor; - -typedef struct MigrationParams MigrationParams; - -typedef struct Property Property; -typedef struct PropertyInfo PropertyInfo; -typedef struct CompatProperty CompatProperty; -typedef struct DeviceState DeviceState; -typedef struct BusState BusState; -typedef struct BusClass BusClass; - -typedef struct AddressSpace AddressSpace; -typedef struct MemoryRegion MemoryRegion; -typedef struct MemoryRegionSection MemoryRegionSection; -typedef struct MemoryListener MemoryListener; - -typedef struct MemoryMappingList MemoryMappingList; - -typedef struct QEMUMachine QEMUMachine; -typedef struct MachineClass MachineClass; -typedef struct MachineState MachineState; -typedef struct NICInfo NICInfo; -typedef struct HCIInfo HCIInfo; -typedef struct AudioState AudioState; -typedef struct BlockBackend BlockBackend; -typedef struct BlockDriverState BlockDriverState; -typedef struct DriveInfo DriveInfo; -typedef struct DisplayState DisplayState; -typedef struct DisplayChangeListener DisplayChangeListener; -typedef struct DisplaySurface DisplaySurface; -typedef struct PixelFormat PixelFormat; -typedef struct QemuConsole QemuConsole; -typedef struct CharDriverState CharDriverState; -typedef struct MACAddr MACAddr; -typedef struct NetClientState NetClientState; -typedef struct I2CBus I2CBus; -typedef struct ISABus ISABus; -typedef struct ISADevice ISADevice; -typedef struct SMBusDevice SMBusDevice; -typedef struct PCIHostState PCIHostState; -typedef struct PCIExpressHost PCIExpressHost; -typedef struct PCIBus PCIBus; -typedef struct PCIDevice PCIDevice; -typedef struct PCIExpressDevice PCIExpressDevice; -typedef struct PCIBridge PCIBridge; -typedef struct PCIEAERMsg PCIEAERMsg; -typedef struct PCIEAERLog PCIEAERLog; -typedef struct PCIEAERErr PCIEAERErr; -typedef struct PCIEPort PCIEPort; -typedef struct PCIESlot PCIESlot; -typedef struct MSIMessage MSIMessage; -typedef struct SerialState SerialState; -typedef struct PCMCIACardState PCMCIACardState; -typedef struct MouseTransformInfo MouseTransformInfo; -typedef struct uWireSlave uWireSlave; -typedef struct I2SCodec I2SCodec; -typedef struct SSIBus SSIBus; -typedef struct EventNotifier EventNotifier; -typedef struct VirtIODevice VirtIODevice; -typedef struct QEMUSGList QEMUSGList; -typedef struct QEMUSizedBuffer QEMUSizedBuffer; -typedef struct SHPCDevice SHPCDevice; -typedef struct FWCfgState FWCfgState; -typedef struct PcGuestInfo PcGuestInfo; +typedef struct RAMBlock RAMBlock; typedef struct Range Range; -typedef struct AdapterInfo AdapterInfo; + +/* + * Pointer types + * Such typedefs should be limited to cases where the typedef's users + * are oblivious of its "pointer-ness". + * Please keep this list in case-insensitive alphabetical order. + */ +typedef struct IRQState *qemu_irq; #endif /* QEMU_TYPEDEFS_H */ diff --git a/qemu/include/qemu/units.h b/qemu/include/qemu/units.h new file mode 100644 index 00000000..692db3fb --- /dev/null +++ b/qemu/include/qemu/units.h @@ -0,0 +1,20 @@ +/* + * IEC binary prefixes definitions + * + * Copyright (C) 2015 Nikunj A Dadhania, IBM Corporation + * Copyright (C) 2018 Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_UNITS_H +#define QEMU_UNITS_H + +#define KiB (INT64_C(1) << 10) +#define MiB (INT64_C(1) << 20) +#define GiB (INT64_C(1) << 30) +#define TiB (INT64_C(1) << 40) +#define PiB (INT64_C(1) << 50) +#define EiB (INT64_C(1) << 60) + +#endif diff --git a/qemu/include/qemu/xxhash.h b/qemu/include/qemu/xxhash.h new file mode 100644 index 00000000..f26fa947 --- /dev/null +++ b/qemu/include/qemu/xxhash.h @@ -0,0 +1,129 @@ +/* + * xxHash - Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * + Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - xxHash source repository : https://github.com/Cyan4973/xxHash + */ + +#ifndef QEMU_XXHASH_H +#define QEMU_XXHASH_H + +#include "qemu/bitops.h" + +#define PRIME32_1 2654435761U +#define PRIME32_2 2246822519U +#define PRIME32_3 3266489917U +#define PRIME32_4 668265263U +#define PRIME32_5 374761393U + +#define QEMU_XXHASH_SEED 1 + +/* + * xxhash32, customized for input variables that are not guaranteed to be + * contiguous in memory. + */ +static inline uint32_t +qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g) +{ +#ifdef _WIN32 + uint64_t v1x = QEMU_XXHASH_SEED; + v1x += PRIME32_1; + v1x += PRIME32_2; + uint32_t v1 = v1x; +#else + uint32_t v1 = QEMU_XXHASH_SEED + PRIME32_1 + PRIME32_2; +#endif + uint32_t v2 = QEMU_XXHASH_SEED + PRIME32_2; + uint32_t v3 = QEMU_XXHASH_SEED + 0; + uint32_t v4 = QEMU_XXHASH_SEED - PRIME32_1; + uint32_t a = ab; + uint32_t b = ab >> 32; + uint32_t c = cd; + uint32_t d = cd >> 32; + uint32_t h32; + + v1 += a * PRIME32_2; + v1 = rol32(v1, 13); + v1 *= PRIME32_1; + + v2 += b * PRIME32_2; + v2 = rol32(v2, 13); + v2 *= PRIME32_1; + + v3 += c * PRIME32_2; + v3 = rol32(v3, 13); + v3 *= PRIME32_1; + + v4 += d * PRIME32_2; + v4 = rol32(v4, 13); + v4 *= PRIME32_1; + + h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18); + h32 += 28; + + h32 += e * PRIME32_3; + h32 = rol32(h32, 17) * PRIME32_4; + + h32 += f * PRIME32_3; + h32 = rol32(h32, 17) * PRIME32_4; + + h32 += g * PRIME32_3; + h32 = rol32(h32, 17) * PRIME32_4; + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + +static inline uint32_t qemu_xxhash2(uint64_t ab) +{ + return qemu_xxhash7(ab, 0, 0, 0, 0); +} + +static inline uint32_t qemu_xxhash4(uint64_t ab, uint64_t cd) +{ + return qemu_xxhash7(ab, cd, 0, 0, 0); +} + +static inline uint32_t qemu_xxhash5(uint64_t ab, uint64_t cd, uint32_t e) +{ + return qemu_xxhash7(ab, cd, e, 0, 0); +} + +static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e, + uint32_t f) +{ + return qemu_xxhash7(ab, cd, e, f, 0); +} + +#endif /* QEMU_XXHASH_H */ diff --git a/qemu/include/qom/cpu.h b/qemu/include/qom/cpu.h deleted file mode 100644 index 3a24f78c..00000000 --- a/qemu/include/qom/cpu.h +++ /dev/null @@ -1,629 +0,0 @@ -/* - * QEMU CPU model - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see - * - */ -#ifndef QEMU_CPU_H -#define QEMU_CPU_H - -#include -#include -#include "hw/qdev-core.h" -#include "exec/hwaddr.h" -#include "qemu/queue.h" -#include "qemu/thread.h" -#include "qemu/typedefs.h" - -typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, - void *opaque); - -/** - * vaddr: - * Type wide enough to contain any #target_ulong virtual address. - */ -typedef uint64_t vaddr; -#define VADDR_PRId PRId64 -#define VADDR_PRIu PRIu64 -#define VADDR_PRIo PRIo64 -#define VADDR_PRIx PRIx64 -#define VADDR_PRIX PRIX64 -#define VADDR_MAX UINT64_MAX - -/** - * SECTION:cpu - * @section_id: QEMU-cpu - * @title: CPU Class - * @short_description: Base class for all CPUs - */ - -#define TYPE_CPU "cpu" - -/* Since this macro is used a lot in hot code paths and in conjunction with - * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using - * an unchecked cast. - */ -#define CPU(obj) ((CPUState *)(obj)) - -#define CPU_CLASS(uc, class) OBJECT_CLASS_CHECK(uc, CPUClass, (class), TYPE_CPU) -#define CPU_GET_CLASS(uc, obj) OBJECT_GET_CLASS(uc, CPUClass, (obj), TYPE_CPU) - -typedef struct CPUState CPUState; - -typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr, - bool is_write, bool is_exec, int opaque, - unsigned size); - -struct TranslationBlock; - -/** - * CPUClass: - * @class_by_name: Callback to map -cpu command line model name to an - * instantiatable CPU type. - * @parse_features: Callback to parse command line arguments. - * @reset: Callback to reset the #CPUState to its initial state. - * @reset_dump_flags: #CPUDumpFlags to use for reset logging. - * @has_work: Callback for checking if there is work to do. - * @do_interrupt: Callback for interrupt handling. - * @do_unassigned_access: Callback for unassigned access handling. - * @do_unaligned_access: Callback for unaligned access handling, if - * the target defines #ALIGNED_ONLY. - * @memory_rw_debug: Callback for GDB memory access. - * @dump_state: Callback for dumping state. - * @dump_statistics: Callback for dumping statistics. - * @get_arch_id: Callback for getting architecture-dependent CPU ID. - * @get_paging_enabled: Callback for inquiring whether paging is enabled. - * @get_memory_mapping: Callback for obtaining the memory mappings. - * @set_pc: Callback for setting the Program Counter register. - * @synchronize_from_tb: Callback for synchronizing state from a TCG - * #TranslationBlock. - * @handle_mmu_fault: Callback for handling an MMU fault. - * @get_phys_page_debug: Callback for obtaining a physical address. - * @debug_excp_handler: Callback for handling debug exceptions. - * @vmsd: State description for migration. - * @cpu_exec_enter: Callback for cpu_exec preparation. - * @cpu_exec_exit: Callback for cpu_exec cleanup. - * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec. - * - * Represents a CPU family or model. - */ -typedef struct CPUClass { - /*< private >*/ - DeviceClass parent_class; - /*< public >*/ - - ObjectClass *(*class_by_name)(struct uc_struct *uc, const char *cpu_model); - void (*parse_features)(CPUState *cpu, char *str, Error **errp); - - void (*reset)(CPUState *cpu); - int reset_dump_flags; - bool (*has_work)(CPUState *cpu); - void (*do_interrupt)(CPUState *cpu); - CPUUnassignedAccess do_unassigned_access; - void (*do_unaligned_access)(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); - int (*memory_rw_debug)(CPUState *cpu, vaddr addr, - uint8_t *buf, int len, bool is_write); - void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, - int flags); - void (*dump_statistics)(CPUState *cpu, FILE *f, - fprintf_function cpu_fprintf, int flags); - int64_t (*get_arch_id)(CPUState *cpu); - bool (*get_paging_enabled)(const CPUState *cpu); - void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list, - Error **errp); - void (*set_pc)(CPUState *cpu, vaddr value); - void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); - int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw, - int mmu_index); - hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); - void (*debug_excp_handler)(CPUState *cpu); - - const struct VMStateDescription *vmsd; - - void (*cpu_exec_enter)(CPUState *cpu); - void (*cpu_exec_exit)(CPUState *cpu); - bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); -} CPUClass; - -#ifdef HOST_WORDS_BIGENDIAN -typedef struct icount_decr_u16 { - uint16_t high; - uint16_t low; -} icount_decr_u16; -#else -typedef struct icount_decr_u16 { - uint16_t low; - uint16_t high; -} icount_decr_u16; -#endif - -typedef struct CPUBreakpoint { - vaddr pc; - int flags; /* BP_* */ - QTAILQ_ENTRY(CPUBreakpoint) entry; -} CPUBreakpoint; - -typedef struct CPUWatchpoint { - vaddr vaddr; - vaddr len; - vaddr hitaddr; - int flags; /* BP_* */ - QTAILQ_ENTRY(CPUWatchpoint) entry; -} CPUWatchpoint; - -struct KVMState; -struct kvm_run; - -#define TB_JMP_CACHE_BITS 12 -#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) - -/** - * CPUState: - * @cpu_index: CPU index (informative). - * @nr_cores: Number of cores within this CPU package. - * @nr_threads: Number of threads within this CPU. - * @numa_node: NUMA node this CPU is belonging to. - * @host_tid: Host thread ID. - * @running: #true if CPU is currently running (usermode). - * @created: Indicates whether the CPU thread has been successfully created. - * @interrupt_request: Indicates a pending interrupt request. - * @halted: Nonzero if the CPU is in suspended state. - * @stop: Indicates a pending stop request. - * @stopped: Indicates the CPU has been artificially stopped. - * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this - * CPU and return to its top level loop. - * @singlestep_enabled: Flags for single-stepping. - * @icount_extra: Instructions until next timer event. - * @icount_decr: Number of cycles left, with interrupt flag in high bit. - * This allows a single read-compare-cbranch-write sequence to test - * for both decrementer underflow and exceptions. - * @can_do_io: Nonzero if memory-mapped IO is safe. - * @env_ptr: Pointer to subclass-specific CPUArchState field. - * @current_tb: Currently executing TB. - * @next_cpu: Next CPU sharing TB cache. - * @opaque: User data. - * @mem_io_pc: Host Program Counter at which the memory was accessed. - * @mem_io_vaddr: Target virtual address at which the memory was accessed. - * @kvm_fd: vCPU file descriptor for KVM. - * - * State of one CPU core or thread. - */ -struct CPUState { - /*< private >*/ - DeviceState parent_obj; - /*< public >*/ - - int nr_cores; - int nr_threads; - int numa_node; - - struct QemuThread *thread; -#ifdef _WIN32 - HANDLE hThread; -#endif - int thread_id; - uint32_t host_tid; - bool running; - struct qemu_work_item *queued_work_first, *queued_work_last; - bool thread_kicked; - bool created; - bool stop; - bool stopped; - volatile sig_atomic_t exit_request; - uint32_t interrupt_request; - int singlestep_enabled; - int64_t icount_extra; - sigjmp_buf jmp_env; - - AddressSpace *as; - MemoryListener *tcg_as_listener; - - void *env_ptr; /* CPUArchState */ - struct TranslationBlock *current_tb; - struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; - QTAILQ_ENTRY(CPUState) node; - - /* ice debug support */ - QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; - - QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; - CPUWatchpoint *watchpoint_hit; - - void *opaque; - - /* In order to avoid passing too many arguments to the MMIO helpers, - * we store some rarely used information in the CPU context. - */ - uintptr_t mem_io_pc; - vaddr mem_io_vaddr; - - int kvm_fd; - bool kvm_vcpu_dirty; - struct KVMState *kvm_state; - struct kvm_run *kvm_run; - - /* TODO Move common fields from CPUArchState here. */ - int cpu_index; /* used by alpha TCG */ - uint32_t halted; /* used by alpha, cris, ppc TCG */ - union { - uint32_t u32; - icount_decr_u16 u16; - } icount_decr; - uint32_t can_do_io; - int32_t exception_index; /* used by m68k TCG */ - - /* Note that this is accessed at the start of every TB via a negative - offset from AREG0. Leave this field at the end so as to make the - (absolute value) offset as small as possible. This reduces code - size, especially for hosts without large memory offsets. */ - volatile sig_atomic_t tcg_exit_req; - struct uc_struct* uc; -}; - - -/** - * cpu_paging_enabled: - * @cpu: The CPU whose state is to be inspected. - * - * Returns: %true if paging is enabled, %false otherwise. - */ -bool cpu_paging_enabled(const CPUState *cpu); - -/** - * cpu_get_memory_mapping: - * @cpu: The CPU whose memory mappings are to be obtained. - * @list: Where to write the memory mappings to. - * @errp: Pointer for reporting an #Error. - */ -void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, - Error **errp); - -/** - * cpu_write_elf64_note: - * @f: pointer to a function that writes memory to a file - * @cpu: The CPU whose memory is to be dumped - * @cpuid: ID number of the CPU - * @opaque: pointer to the CPUState struct - */ -int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); - -/** - * cpu_write_elf64_qemunote: - * @f: pointer to a function that writes memory to a file - * @cpu: The CPU whose memory is to be dumped - * @cpuid: ID number of the CPU - * @opaque: pointer to the CPUState struct - */ -int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); - -/** - * cpu_write_elf32_note: - * @f: pointer to a function that writes memory to a file - * @cpu: The CPU whose memory is to be dumped - * @cpuid: ID number of the CPU - * @opaque: pointer to the CPUState struct - */ -int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); - -/** - * cpu_write_elf32_qemunote: - * @f: pointer to a function that writes memory to a file - * @cpu: The CPU whose memory is to be dumped - * @cpuid: ID number of the CPU - * @opaque: pointer to the CPUState struct - */ -int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); - -/** - * CPUDumpFlags: - * @CPU_DUMP_CODE: - * @CPU_DUMP_FPU: dump FPU register state, not just integer - * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state - */ -enum CPUDumpFlags { - CPU_DUMP_CODE = 0x00010000, - CPU_DUMP_FPU = 0x00020000, - CPU_DUMP_CCOP = 0x00040000, -}; - -/** - * cpu_dump_state: - * @cpu: The CPU whose state is to be dumped. - * @f: File to dump to. - * @cpu_fprintf: Function to dump with. - * @flags: Flags what to dump. - * - * Dumps CPU state. - */ -void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, - int flags); - -/** - * cpu_dump_statistics: - * @cpu: The CPU whose state is to be dumped. - * @f: File to dump to. - * @cpu_fprintf: Function to dump with. - * @flags: Flags what to dump. - * - * Dumps CPU statistics. - */ -void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, - int flags); - -#ifndef CONFIG_USER_ONLY -/** - * cpu_get_phys_page_debug: - * @cpu: The CPU to obtain the physical page address for. - * @addr: The virtual address. - * - * Obtains the physical page corresponding to a virtual one. - * Use it only for debugging because no protection checks are done. - * - * Returns: Corresponding physical page address or -1 if no page found. - */ -static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - return cc->get_phys_page_debug(cpu, addr); -} -#endif - -/** - * cpu_reset: - * @cpu: The CPU whose state is to be reset. - */ -void cpu_reset(CPUState *cpu); - -/** - * cpu_class_by_name: - * @typename: The CPU base type. - * @cpu_model: The model string without any parameters. - * - * Looks up a CPU #ObjectClass matching name @cpu_model. - * - * Returns: A #CPUClass or %NULL if not matching class is found. - */ -ObjectClass *cpu_class_by_name(struct uc_struct *uc, const char *typename_, const char *cpu_model); - -/** - * cpu_generic_init: - * @typename: The CPU base type. - * @cpu_model: The model string including optional parameters. - * - * Instantiates a CPU, processes optional parameters and realizes the CPU. - * - * Returns: A #CPUState or %NULL if an error occurred. - */ -CPUState *cpu_generic_init(struct uc_struct *uc, const char *typename_, const char *cpu_model); - -/** - * cpu_has_work: - * @cpu: The vCPU to check. - * - * Checks whether the CPU has work to do. - * - * Returns: %true if the CPU has work, %false otherwise. - */ -static inline bool cpu_has_work(CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - g_assert(cc->has_work); - return cc->has_work(cpu); -} - -/** - * qemu_cpu_kick: - * @cpu: The vCPU to kick. - * - * Kicks @cpu's thread. - */ -void qemu_cpu_kick(CPUState *cpu); - -/** - * cpu_is_stopped: - * @cpu: The CPU to check. - * - * Checks whether the CPU is stopped. - * - * Returns: %true if run state is not running or if artificially stopped; - * %false otherwise. - */ -bool cpu_is_stopped(CPUState *cpu); - -/** - * run_on_cpu: - * @cpu: The vCPU to run on. - * @func: The function to be executed. - * @data: Data to pass to the function. - * - * Schedules the function @func for execution on the vCPU @cpu. - */ -void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); - -/** - * async_run_on_cpu: - * @cpu: The vCPU to run on. - * @func: The function to be executed. - * @data: Data to pass to the function. - * - * Schedules the function @func for execution on the vCPU @cpu asynchronously. - */ -void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); - -/** - * qemu_get_cpu: - * @index: The CPUState@cpu_index value of the CPU to obtain. - * - * Gets a CPU matching @index. - * - * Returns: The CPU or %NULL if there is no matching CPU. - */ -CPUState *qemu_get_cpu(struct uc_struct *uc, int index); - -/** - * cpu_exists: - * @id: Guest-exposed CPU ID to lookup. - * - * Search for CPU with specified ID. - * - * Returns: %true - CPU is found, %false - CPU isn't found. - */ -bool cpu_exists(struct uc_struct* uc, int64_t id); - -#ifndef CONFIG_USER_ONLY - -typedef void (*CPUInterruptHandler)(CPUState *, int); - -extern CPUInterruptHandler cpu_interrupt_handler; - -/** - * cpu_interrupt: - * @cpu: The CPU to set an interrupt on. - * @mask: The interupts to set. - * - * Invokes the interrupt handler. - */ -static inline void cpu_interrupt(CPUState *cpu, int mask) -{ - cpu_interrupt_handler(cpu, mask); -} - -#else /* USER_ONLY */ - -void cpu_interrupt(CPUState *cpu, int mask); - -#endif /* USER_ONLY */ - -#ifdef CONFIG_SOFTMMU -static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, - bool is_write, bool is_exec, - int opaque, unsigned size) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - if (cc->do_unassigned_access) { - cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size); - } -} - -static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, - uintptr_t retaddr) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); -} -#endif - -/** - * cpu_reset_interrupt: - * @cpu: The CPU to clear the interrupt on. - * @mask: The interrupt mask to clear. - * - * Resets interrupts on the vCPU @cpu. - */ -void cpu_reset_interrupt(CPUState *cpu, int mask); - -/** - * cpu_exit: - * @cpu: The CPU to exit. - * - * Requests the CPU @cpu to exit execution. - */ -void cpu_exit(CPUState *cpu); - -/** - * cpu_resume: - * @cpu: The CPU to resume. - * - * Resumes CPU, i.e. puts CPU into runnable state. - */ -void cpu_resume(CPUState *cpu); - -/** - * qemu_init_vcpu: - * @cpu: The vCPU to initialize. - * - * Initializes a vCPU. - */ -int qemu_init_vcpu(CPUState *cpu); - -#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ -#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ -#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ - -/** - * cpu_single_step: - * @cpu: CPU to the flags for. - * @enabled: Flags to enable. - * - * Enables or disables single-stepping for @cpu. - */ -void cpu_single_step(CPUState *cpu, int enabled); - -/* Breakpoint/watchpoint flags */ -#define BP_MEM_READ 0x01 -#define BP_MEM_WRITE 0x02 -#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) -#define BP_STOP_BEFORE_ACCESS 0x04 -/* 0x08 currently unused */ -#define BP_GDB 0x10 -#define BP_CPU 0x20 -#define BP_WATCHPOINT_HIT_READ 0x40 -#define BP_WATCHPOINT_HIT_WRITE 0x80 -#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) - -int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, - CPUBreakpoint **breakpoint); -int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); -void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); -void cpu_breakpoint_remove_all(CPUState *cpu, int mask); - -int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, - int flags, CPUWatchpoint **watchpoint); -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, - vaddr len, int flags); -void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); -void cpu_watchpoint_remove_all(CPUState *cpu, int mask); - -void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) - GCC_FMT_ATTR(2, 3); - -void cpu_register_types(struct uc_struct *uc); - -#ifdef CONFIG_SOFTMMU -extern const struct VMStateDescription vmstate_cpu_common; -#else -#define vmstate_cpu_common vmstate_dummy -#endif - -#define VMSTATE_CPU() { \ - .name = "parent_obj", \ - .size = sizeof(CPUState), \ - .vmsd = &vmstate_cpu_common, \ - .flags = VMS_STRUCT, \ - .offset = 0, \ -} - -#endif diff --git a/qemu/include/qom/object.h b/qemu/include/qom/object.h deleted file mode 100644 index c3a1ffe0..00000000 --- a/qemu/include/qom/object.h +++ /dev/null @@ -1,1270 +0,0 @@ -/* - * QEMU Object Model - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_OBJECT_H -#define QEMU_OBJECT_H - -#include "glib_compat.h" -#include "unicorn/platform.h" -#include "qemu/queue.h" -#include "qapi/error.h" - -struct Visitor; - -struct TypeImpl; -typedef struct TypeImpl *Type; - -typedef struct ObjectClass ObjectClass; -typedef struct Object Object; - -typedef struct TypeInfo TypeInfo; - -typedef struct InterfaceClass InterfaceClass; -typedef struct InterfaceInfo InterfaceInfo; - -struct uc_struct; - -#define TYPE_OBJECT "object" - -/** - * SECTION:object.h - * @title:Base Object Type System - * @short_description: interfaces for creating new types and objects - * - * The QEMU Object Model provides a framework for registering user creatable - * types and instantiating objects from those types. QOM provides the following - * features: - * - * - System for dynamically registering types - * - Support for single-inheritance of types - * - Multiple inheritance of stateless interfaces - * - * - * Creating a minimal type - * - * #include "qdev.h" - * - * #define TYPE_MY_DEVICE "my-device" - * - * // No new virtual functions: we can reuse the typedef for the - * // superclass. - * typedef DeviceClass MyDeviceClass; - * typedef struct MyDevice - * { - * DeviceState parent; - * - * int reg0, reg1, reg2; - * } MyDevice; - * - * static const TypeInfo my_device_info = { - * .name = TYPE_MY_DEVICE, - * .parent = TYPE_DEVICE, - * .instance_size = sizeof(MyDevice), - * }; - * - * static void my_device_register_types(void) - * { - * type_register_static(&my_device_info); - * } - * - * type_init(my_device_register_types) - * - * - * - * In the above example, we create a simple type that is described by #TypeInfo. - * #TypeInfo describes information about the type including what it inherits - * from, the instance and class size, and constructor/destructor hooks. - * - * Every type has an #ObjectClass associated with it. #ObjectClass derivatives - * are instantiated dynamically but there is only ever one instance for any - * given type. The #ObjectClass typically holds a table of function pointers - * for the virtual methods implemented by this type. - * - * Using object_new(), a new #Object derivative will be instantiated. You can - * cast an #Object to a subclass (or base-class) type using - * object_dynamic_cast(). You typically want to define macro wrappers around - * OBJECT_CHECK() and OBJECT_CLASS_CHECK() to make it easier to convert to a - * specific type: - * - * - * Typecasting macros - * - * #define MY_DEVICE_GET_CLASS(obj) \ - * OBJECT_GET_CLASS(MyDeviceClass, obj, TYPE_MY_DEVICE) - * #define MY_DEVICE_CLASS(klass) \ - * OBJECT_CLASS_CHECK(MyDeviceClass, klass, TYPE_MY_DEVICE) - * #define MY_DEVICE(obj) \ - * OBJECT_CHECK(MyDevice, obj, TYPE_MY_DEVICE) - * - * - * - * # Class Initialization # - * - * Before an object is initialized, the class for the object must be - * initialized. There is only one class object for all instance objects - * that is created lazily. - * - * Classes are initialized by first initializing any parent classes (if - * necessary). After the parent class object has initialized, it will be - * copied into the current class object and any additional storage in the - * class object is zero filled. - * - * The effect of this is that classes automatically inherit any virtual - * function pointers that the parent class has already initialized. All - * other fields will be zero filled. - * - * Once all of the parent classes have been initialized, #TypeInfo::class_init - * is called to let the class being instantiated provide default initialize for - * its virtual functions. Here is how the above example might be modified - * to introduce an overridden virtual function: - * - * - * Overriding a virtual function - * - * #include "qdev.h" - * - * void my_device_class_init(ObjectClass *klass, void *class_data) - * { - * DeviceClass *dc = DEVICE_CLASS(klass); - * dc->reset = my_device_reset; - * } - * - * static const TypeInfo my_device_info = { - * .name = TYPE_MY_DEVICE, - * .parent = TYPE_DEVICE, - * .instance_size = sizeof(MyDevice), - * .class_init = my_device_class_init, - * }; - * - * - * - * Introducing new virtual methods requires a class to define its own - * struct and to add a .class_size member to the #TypeInfo. Each method - * will also have a wrapper function to call it easily: - * - * - * Defining an abstract class - * - * #include "qdev.h" - * - * typedef struct MyDeviceClass - * { - * DeviceClass parent; - * - * void (*frobnicate) (MyDevice *obj); - * } MyDeviceClass; - * - * static const TypeInfo my_device_info = { - * .name = TYPE_MY_DEVICE, - * .parent = TYPE_DEVICE, - * .instance_size = sizeof(MyDevice), - * .abstract = true, // or set a default in my_device_class_init - * .class_size = sizeof(MyDeviceClass), - * }; - * - * void my_device_frobnicate(MyDevice *obj) - * { - * MyDeviceClass *klass = MY_DEVICE_GET_CLASS(obj); - * - * klass->frobnicate(obj); - * } - * - * - * - * # Interfaces # - * - * Interfaces allow a limited form of multiple inheritance. Instances are - * similar to normal types except for the fact that are only defined by - * their classes and never carry any state. You can dynamically cast an object - * to one of its #Interface types and vice versa. - * - * # Methods # - * - * A method is a function within the namespace scope of - * a class. It usually operates on the object instance by passing it as a - * strongly-typed first argument. - * If it does not operate on an object instance, it is dubbed - * class method. - * - * Methods cannot be overloaded. That is, the #ObjectClass and method name - * uniquely identity the function to be called; the signature does not vary - * except for trailing varargs. - * - * Methods are always virtual. Overriding a method in - * #TypeInfo.class_init of a subclass leads to any user of the class obtained - * via OBJECT_GET_CLASS() accessing the overridden function. - * The original function is not automatically invoked. It is the responsibility - * of the overriding class to determine whether and when to invoke the method - * being overridden. - * - * To invoke the method being overridden, the preferred solution is to store - * the original value in the overriding class before overriding the method. - * This corresponds to |[ {super,base}.method(...) ]| in Java and C# - * respectively; this frees the overriding class from hardcoding its parent - * class, which someone might choose to change at some point. - * - * - * Overriding a virtual method - * - * typedef struct MyState MyState; - * - * typedef void (*MyDoSomething)(MyState *obj); - * - * typedef struct MyClass { - * ObjectClass parent_class; - * - * MyDoSomething do_something; - * } MyClass; - * - * static void my_do_something(MyState *obj) - * { - * // do something - * } - * - * static void my_class_init(ObjectClass *oc, void *data) - * { - * MyClass *mc = MY_CLASS(oc); - * - * mc->do_something = my_do_something; - * } - * - * static const TypeInfo my_type_info = { - * .name = TYPE_MY, - * .parent = TYPE_OBJECT, - * .instance_size = sizeof(MyState), - * .class_size = sizeof(MyClass), - * .class_init = my_class_init, - * }; - * - * typedef struct DerivedClass { - * MyClass parent_class; - * - * MyDoSomething parent_do_something; - * } DerivedClass; - * - * static void derived_do_something(MyState *obj) - * { - * DerivedClass *dc = DERIVED_GET_CLASS(obj); - * - * // do something here - * dc->parent_do_something(obj); - * // do something else here - * } - * - * static void derived_class_init(ObjectClass *oc, void *data) - * { - * MyClass *mc = MY_CLASS(oc); - * DerivedClass *dc = DERIVED_CLASS(oc); - * - * dc->parent_do_something = mc->do_something; - * mc->do_something = derived_do_something; - * } - * - * static const TypeInfo derived_type_info = { - * .name = TYPE_DERIVED, - * .parent = TYPE_MY, - * .class_size = sizeof(DerivedClass), - * .class_init = my_class_init, - * }; - * - * - * - * Alternatively, object_class_by_name() can be used to obtain the class and - * its non-overridden methods for a specific type. This would correspond to - * |[ MyClass::method(...) ]| in C++. - * - * The first example of such a QOM method was #CPUClass.reset, - * another example is #DeviceClass.realize. - */ - - -/** - * ObjectPropertyAccessor: - * @obj: the object that owns the property - * @v: the visitor that contains the property data - * @opaque: the object property opaque - * @name: the name of the property - * @errp: a pointer to an Error that is filled if getting/setting fails. - * - * Called when trying to get/set a property. - */ -typedef void (ObjectPropertyAccessor)(struct uc_struct *uc, Object *obj, - struct Visitor *v, - void *opaque, - const char *name, - Error **errp); -typedef int (ObjectPropertySetAccessor)(struct uc_struct *uc, Object *obj, - struct Visitor *v, - void *opaque, - const char *name, - Error **errp); - -/** - * ObjectPropertyResolve: - * @obj: the object that owns the property - * @opaque: the opaque registered with the property - * @part: the name of the property - * - * Resolves the #Object corresponding to property @part. - * - * The returned object can also be used as a starting point - * to resolve a relative path starting with "@part". - * - * Returns: If @path is the path that led to @obj, the function - * returns the #Object corresponding to "@path/@part". - * If "@path/@part" is not a valid object path, it returns #NULL. - */ -typedef Object *(ObjectPropertyResolve)(struct uc_struct *uc, Object *obj, - void *opaque, - const char *part); - -/** - * ObjectPropertyRelease: - * @obj: the object that owns the property - * @name: the name of the property - * @opaque: the opaque registered with the property - * - * Called when a property is removed from a object. - */ -typedef void (ObjectPropertyRelease)(struct uc_struct *uc, Object *obj, - const char *name, - void *opaque); - -typedef struct ObjectProperty -{ - gchar *name; - gchar *type; - gchar *description; - ObjectPropertyAccessor *get; - ObjectPropertySetAccessor *set; - ObjectPropertyResolve *resolve; - ObjectPropertyRelease *release; - void *opaque; - - QTAILQ_ENTRY(ObjectProperty) node; -} ObjectProperty; - -/** - * ObjectUnparent: - * @obj: the object that is being removed from the composition tree - * - * Called when an object is being removed from the QOM composition tree. - * The function should remove any backlinks from children objects to @obj. - */ -typedef void (ObjectUnparent)(struct uc_struct *uc, Object *obj); - -/** - * ObjectFree: - * @obj: the object being freed - * - * Called when an object's last reference is removed. - */ -typedef void (ObjectFree)(void *obj); - -#define OBJECT_CLASS_CAST_CACHE 4 - -/** - * ObjectClass: - * - * The base for all classes. The only thing that #ObjectClass contains is an - * integer type handle. - */ -struct ObjectClass -{ - /*< private >*/ - Type type; - GSList *interfaces; - - const char *object_cast_cache[OBJECT_CLASS_CAST_CACHE]; - const char *class_cast_cache[OBJECT_CLASS_CAST_CACHE]; - - ObjectUnparent *unparent; -}; - -/** - * Object: - * - * The base for all objects. The first member of this object is a pointer to - * a #ObjectClass. Since C guarantees that the first member of a structure - * always begins at byte 0 of that structure, as long as any sub-object places - * its parent as the first member, we can cast directly to a #Object. - * - * As a result, #Object contains a reference to the objects type as its - * first member. This allows identification of the real type of the object at - * run time. - * - * #Object also contains a list of #Interfaces that this object - * implements. - */ -struct Object -{ - /*< private >*/ - ObjectClass *class_; - ObjectFree *free; - QTAILQ_HEAD(, ObjectProperty) properties; - uint32_t ref; - Object *parent; -}; - -/** - * TypeInfo: - * @name: The name of the type. - * @parent: The name of the parent type. - * @instance_size: The size of the object (derivative of #Object). If - * @instance_size is 0, then the size of the object will be the size of the - * parent object. - * @instance_init: This function is called to initialize an object. The parent - * class will have already been initialized so the type is only responsible - * for initializing its own members. - * @instance_post_init: This function is called to finish initialization of - * an object, after all @instance_init functions were called. - * @instance_finalize: This function is called during object destruction. This - * is called before the parent @instance_finalize function has been called. - * An object should only free the members that are unique to its type in this - * function. - * @abstract: If this field is true, then the class is considered abstract and - * cannot be directly instantiated. - * @class_size: The size of the class object (derivative of #ObjectClass) - * for this object. If @class_size is 0, then the size of the class will be - * assumed to be the size of the parent class. This allows a type to avoid - * implementing an explicit class type if they are not adding additional - * virtual functions. - * @class_init: This function is called after all parent class initialization - * has occurred to allow a class to set its default virtual method pointers. - * This is also the function to use to override virtual methods from a parent - * class. - * @class_base_init: This function is called for all base classes after all - * parent class initialization has occurred, but before the class itself - * is initialized. This is the function to use to undo the effects of - * memcpy from the parent class to the descendents. - * @class_finalize: This function is called during class destruction and is - * meant to release and dynamic parameters allocated by @class_init. - * @class_data: Data to pass to the @class_init, @class_base_init and - * @class_finalize functions. This can be useful when building dynamic - * classes. - * @interfaces: The list of interfaces associated with this type. This - * should point to a static array that's terminated with a zero filled - * element. - */ -struct TypeInfo -{ - const char *name; - const char *parent; - - size_t class_size; - size_t instance_size; - void *instance_userdata; - - void (*instance_init)(struct uc_struct *uc, Object *obj, void *opaque); - void (*instance_post_init)(struct uc_struct *uc, Object *obj); - void (*instance_finalize)(struct uc_struct *uc, Object *obj, void *opaque); - - void *class_data; - - void (*class_init)(struct uc_struct *uc, ObjectClass *klass, void *data); - void (*class_base_init)(ObjectClass *klass, void *data); - void (*class_finalize)(ObjectClass *klass, void *data); - - bool abstract; - - void *parent_type; - ObjectClass *class_; - - InterfaceInfo *interfaces; -}; - -/** - * OBJECT: - * @obj: A derivative of #Object - * - * Converts an object to a #Object. Since all objects are #Objects, - * this function will always succeed. - */ -#define OBJECT(obj) \ - ((Object *)(obj)) - -/** - * OBJECT_CLASS: - * @class: A derivative of #ObjectClass. - * - * Converts a class to an #ObjectClass. Since all objects are #Objects, - * this function will always succeed. - */ -#define OBJECT_CLASS(class) \ - ((ObjectClass *)(class)) - -/** - * OBJECT_CHECK: - * @type: The C type to use for the return value. - * @obj: A derivative of @type to cast. - * @name: The QOM typename of @type - * - * A type safe version of @object_dynamic_cast_assert. Typically each class - * will define a macro based on this type to perform type safe dynamic_casts to - * this object type. - * - * If an invalid object is passed to this function, a run time assert will be - * generated. - */ -#define OBJECT_CHECK(uc, type, obj, name) \ - ((type *)object_dynamic_cast_assert(uc, OBJECT(obj), (name), \ - __FILE__, __LINE__, __func__)) - -/** - * OBJECT_CLASS_CHECK: - * @class: The C type to use for the return value. - * @obj: A derivative of @type to cast. - * @name: the QOM typename of @class. - * - * A type safe version of @object_class_dynamic_cast_assert. This macro is - * typically wrapped by each type to perform type safe casts of a class to a - * specific class type. - */ -#define OBJECT_CLASS_CHECK(uc, class, obj, name) \ - ((class *)object_class_dynamic_cast_assert(uc, OBJECT_CLASS(obj), (name), \ - __FILE__, __LINE__, __func__)) - -/** - * OBJECT_GET_CLASS: - * @class: The C type to use for the return value. - * @obj: The object to obtain the class for. - * @name: The QOM typename of @obj. - * - * This function will return a specific class for a given object. Its generally - * used by each type to provide a type safe macro to get a specific class type - * from an object. - */ -#define OBJECT_GET_CLASS(uc, class, obj, name) \ - OBJECT_CLASS_CHECK(uc, class, object_get_class(OBJECT(obj)), name) - -/** - * InterfaceInfo: - * @type: The name of the interface. - * - * The information associated with an interface. - */ -struct InterfaceInfo { - const char *type; -}; - -/** - * InterfaceClass: - * @parent_class: the base class - * - * The class for all interfaces. Subclasses of this class should only add - * virtual methods. - */ -struct InterfaceClass -{ - ObjectClass parent_class; - /*< private >*/ - ObjectClass *concrete_class; - Type interface_type; -}; - -#define TYPE_INTERFACE "interface" - -/** - * INTERFACE_CLASS: - * @klass: class to cast from - * Returns: An #InterfaceClass or raise an error if cast is invalid - */ -#define INTERFACE_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, InterfaceClass, klass, TYPE_INTERFACE) - -/** - * INTERFACE_CHECK: - * @interface: the type to return - * @obj: the object to convert to an interface - * @name: the interface type name - * - * Returns: @obj casted to @interface if cast is valid, otherwise raise error. - */ -#define INTERFACE_CHECK(uc, interface, obj, name) \ - ((interface *)object_dynamic_cast_assert(uc, OBJECT((obj)), (name), \ - __FILE__, __LINE__, __func__)) - -/** - * object_new: - * @typename: The name of the type of the object to instantiate. - * - * This function will initialize a new object using heap allocated memory. - * The returned object has a reference count of 1, and will be freed when - * the last reference is dropped. - * - * Returns: The newly allocated and instantiated object. - */ -Object *object_new(struct uc_struct *, const char *typename_); - -/** - * object_initialize: - * @obj: A pointer to the memory to be used for the object. - * @size: The maximum size available at @obj for the object. - * @typename: The name of the type of the object to instantiate. - * - * This function will initialize an object. The memory for the object should - * have already been allocated. The returned object has a reference count of 1, - * and will be finalized when the last reference is dropped. - */ -void object_initialize(struct uc_struct *uc, void *obj, size_t size, const char *typename_); - -/** - * object_dynamic_cast: - * @obj: The object to cast. - * @typename: The @typename to cast to. - * - * This function will determine if @obj is-a @typename. @obj can refer to an - * object or an interface associated with an object. - * - * Returns: This function returns @obj on success or #NULL on failure. - */ -Object *object_dynamic_cast(struct uc_struct *uc, Object *obj, const char *typename_); - -/** - * object_dynamic_cast_assert: - * - * See object_dynamic_cast() for a description of the parameters of this - * function. The only difference in behavior is that this function asserts - * instead of returning #NULL on failure if QOM cast debugging is enabled. - * This function is not meant to be called directly, but only through - * the wrapper macro OBJECT_CHECK. - */ -Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char *typename_, - const char *file, int line, const char *func); - -/** - * object_get_class: - * @obj: A derivative of #Object - * - * Returns: The #ObjectClass of the type associated with @obj. - */ -ObjectClass *object_get_class(Object *obj); - -/** - * object_get_typename: - * @obj: A derivative of #Object. - * - * Returns: The QOM typename of @obj. - */ -const char *object_get_typename(Object *obj); - -/** - * type_register_static: - * @info: The #TypeInfo of the new type. - * - * @info and all of the strings it points to should exist for the life time - * that the type is registered. - * - * Returns: 0 on failure, the new #Type on success. - */ -Type type_register_static(struct uc_struct *uc, const TypeInfo *info); - -/** - * type_register: - * @info: The #TypeInfo of the new type - * - * Unlike type_register_static(), this call does not require @info or its - * string members to continue to exist after the call returns. - * - * Returns: 0 on failure, the new #Type on success. - */ -Type type_register(struct uc_struct *uc, const TypeInfo *info); - -/** - * object_class_dynamic_cast_assert: - * @klass: The #ObjectClass to attempt to cast. - * @typename: The QOM typename of the class to cast to. - * - * See object_class_dynamic_cast() for a description of the parameters - * of this function. The only difference in behavior is that this function - * asserts instead of returning #NULL on failure if QOM cast debugging is - * enabled. This function is not meant to be called directly, but only through - * the wrapper macros OBJECT_CLASS_CHECK and INTERFACE_CHECK. - */ -ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass *klass, - const char *typename_, - const char *file, int line, - const char *func); - -/** - * object_class_dynamic_cast: - * @klass: The #ObjectClass to attempt to cast. - * @typename: The QOM typename of the class to cast to. - * - * Returns: If @typename is a class, this function returns @klass if - * @typename is a subtype of @klass, else returns #NULL. - * - * If @typename is an interface, this function returns the interface - * definition for @klass if @klass implements it unambiguously; #NULL - * is returned if @klass does not implement the interface or if multiple - * classes or interfaces on the hierarchy leading to @klass implement - * it. (FIXME: perhaps this can be detected at type definition time?) - */ -ObjectClass *object_class_dynamic_cast(struct uc_struct *uc, ObjectClass *klass, - const char *typename_); - -/** - * object_class_get_parent: - * @klass: The class to obtain the parent for. - * - * Returns: The parent for @klass or %NULL if none. - */ -ObjectClass *object_class_get_parent(struct uc_struct *uc, ObjectClass *klass); - -/** - * object_class_get_name: - * @klass: The class to obtain the QOM typename for. - * - * Returns: The QOM typename for @klass. - */ -const char *object_class_get_name(ObjectClass *klass); - -/** - * object_class_is_abstract: - * @klass: The class to obtain the abstractness for. - * - * Returns: %true if @klass is abstract, %false otherwise. - */ -bool object_class_is_abstract(ObjectClass *klass); - -/** - * object_class_by_name: - * @typename: The QOM typename to obtain the class for. - * - * Returns: The class for @typename or %NULL if not found. - */ -ObjectClass *object_class_by_name(struct uc_struct *uc, const char *typename_); - -void object_class_foreach(struct uc_struct *uc, void (*fn)(ObjectClass *klass, void *opaque), - const char *implements_type, bool include_abstract, - void *opaque); - -/** - * object_class_get_list: - * @implements_type: The type to filter for, including its derivatives. - * @include_abstract: Whether to include abstract classes. - * - * Returns: A singly-linked list of the classes in reverse hashtable order. - */ -GSList *object_class_get_list(struct uc_struct *uc, const char *implements_type, - bool include_abstract); - -/** - * object_ref: - * @obj: the object - * - * Increase the reference count of a object. A object cannot be freed as long - * as its reference count is greater than zero. - */ -void object_ref(Object *obj); - -/** - * qdef_unref: - * @obj: the object - * - * Decrease the reference count of a object. A object cannot be freed as long - * as its reference count is greater than zero. - */ -void object_unref(struct uc_struct *uc, Object *obj); - -/** - * object_property_add: - * @obj: the object to add a property to - * @name: the name of the property. This can contain any character except for - * a forward slash. In general, you should use hyphens '-' instead of - * underscores '_' when naming properties. - * @type: the type name of the property. This namespace is pretty loosely - * defined. Sub namespaces are constructed by using a prefix and then - * to angle brackets. For instance, the type 'virtio-net-pci' in the - * 'link' namespace would be 'link'. - * @get: The getter to be called to read a property. If this is NULL, then - * the property cannot be read. - * @set: the setter to be called to write a property. If this is NULL, - * then the property cannot be written. - * @release: called when the property is removed from the object. This is - * meant to allow a property to free its opaque upon object - * destruction. This may be NULL. - * @opaque: an opaque pointer to pass to the callbacks for the property - * @errp: returns an error if this function fails - * - * Returns: The #ObjectProperty; this can be used to set the @resolve - * callback for child and link properties. - */ -ObjectProperty *object_property_add(Object *obj, const char *name, - const char *type, - ObjectPropertyAccessor *get, - ObjectPropertySetAccessor *set, - ObjectPropertyRelease *release, - void *opaque, Error **errp); - -void object_property_del(struct uc_struct *uc, Object *obj, const char *name, Error **errp); - -void object_property_del_child(struct uc_struct *uc, Object *obj, Object *child, Error **errp); - -/** - * object_property_find: - * @obj: the object - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Look up a property for an object and return its #ObjectProperty if found. - */ -ObjectProperty *object_property_find(Object *obj, const char *name, - Error **errp); - -void object_unparent(struct uc_struct *uc, Object *obj); - -/** - * object_property_get: - * @obj: the object - * @v: the visitor that will receive the property value. This should be an - * Output visitor and the data will be written with @name as the name. - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Reads a property from a object. - */ -void object_property_get(struct uc_struct *uc, Object *obj, struct Visitor *v, const char *name, - Error **errp); - -/** - * object_property_set_str: - * @value: the value to be written to the property - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Writes a string value to a property. - */ -void object_property_set_str(struct uc_struct *uc, Object *obj, const char *value, - const char *name, Error **errp); - -/** - * object_property_get_str: - * @obj: the object - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Returns: the value of the property, converted to a C string, or NULL if - * an error occurs (including when the property value is not a string). - * The caller should free the string. - */ -char *object_property_get_str(struct uc_struct *uc, Object *obj, const char *name, - Error **errp); - -/** - * object_property_set_link: - * @value: the value to be written to the property - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Writes an object's canonical path to a property. - */ -void object_property_set_link(struct uc_struct *uc, Object *obj, Object *value, - const char *name, Error **errp); - -/** - * object_property_get_link: - * @obj: the object - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Returns: the value of the property, resolved from a path to an Object, - * or NULL if an error occurs (including when the property value is not a - * string or not a valid object path). - */ -Object *object_property_get_link(struct uc_struct *uc, Object *obj, const char *name, - Error **errp); - -/** - * object_property_set_bool: - * @value: the value to be written to the property - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Writes a bool value to a property. - */ -void object_property_set_bool(struct uc_struct *uc, Object *obj, bool value, - const char *name, Error **errp); - -/** - * object_property_get_bool: - * @obj: the object - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Returns: the value of the property, converted to a boolean, or NULL if - * an error occurs (including when the property value is not a bool). - */ -bool object_property_get_bool(struct uc_struct *uc, Object *obj, const char *name, - Error **errp); - -/** - * object_property_set_int: - * @value: the value to be written to the property - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Writes an integer value to a property. - */ -void object_property_set_int(struct uc_struct *uc, Object *obj, int64_t value, - const char *name, Error **errp); - -/** - * object_property_get_int: - * @obj: the object - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Returns: the value of the property, converted to an integer, or NULL if - * an error occurs (including when the property value is not an integer). - */ -int64_t object_property_get_int(struct uc_struct *uc, Object *obj, const char *name, - Error **errp); - -/** - * object_property_set: - * @obj: the object - * @v: the visitor that will be used to write the property value. This should - * be an Input visitor and the data will be first read with @name as the - * name and then written as the property value. - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Writes a property to a object. - */ -void object_property_set(struct uc_struct *uc, Object *obj, struct Visitor *v, const char *name, - Error **errp); - -/** - * object_property_parse: - * @obj: the object - * @string: the string that will be used to parse the property value. - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Parses a string and writes the result into a property of an object. - */ -void object_property_parse(struct uc_struct *uc, Object *obj, const char *string, - const char *name, Error **errp); - -/** - * object_property_get_type: - * @obj: the object - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Returns: The type name of the property. - */ -const char *object_property_get_type(Object *obj, const char *name, - Error **errp); - -/** - * object_get_root: - * - * Returns: the root object of the composition tree - */ -Object *object_get_root(struct uc_struct *uc); - -/** - * object_get_canonical_path_component: - * - * Returns: The final component in the object's canonical path. The canonical - * path is the path within the composition tree starting from the root. - */ -gchar *object_get_canonical_path_component(Object *obj); - -/** - * object_get_canonical_path: - * - * Returns: The canonical path for a object. This is the path within the - * composition tree starting from the root. - */ -gchar *object_get_canonical_path(Object *obj); - -/** - * object_resolve_path: - * @path: the path to resolve - * @ambiguous: returns true if the path resolution failed because of an - * ambiguous match - * - * There are two types of supported paths--absolute paths and partial paths. - * - * Absolute paths are derived from the root object and can follow child<> or - * link<> properties. Since they can follow link<> properties, they can be - * arbitrarily long. Absolute paths look like absolute filenames and are - * prefixed with a leading slash. - * - * Partial paths look like relative filenames. They do not begin with a - * prefix. The matching rules for partial paths are subtle but designed to make - * specifying objects easy. At each level of the composition tree, the partial - * path is matched as an absolute path. The first match is not returned. At - * least two matches are searched for. A successful result is only returned if - * only one match is found. If more than one match is found, a flag is - * returned to indicate that the match was ambiguous. - * - * Returns: The matched object or NULL on path lookup failure. - */ -Object *object_resolve_path(struct uc_struct *uc, const char *path, bool *ambiguous); - -/** - * object_resolve_path_type: - * @path: the path to resolve - * @typename: the type to look for. - * @ambiguous: returns true if the path resolution failed because of an - * ambiguous match - * - * This is similar to object_resolve_path. However, when looking for a - * partial path only matches that implement the given type are considered. - * This restricts the search and avoids spuriously flagging matches as - * ambiguous. - * - * For both partial and absolute paths, the return value goes through - * a dynamic cast to @typename. This is important if either the link, - * or the typename itself are of interface types. - * - * Returns: The matched object or NULL on path lookup failure. - */ -Object *object_resolve_path_type(struct uc_struct *uc, const char *path, const char *typename_, - bool *ambiguous); - -/** - * object_resolve_path_component: - * @parent: the object in which to resolve the path - * @part: the component to resolve. - * - * This is similar to object_resolve_path with an absolute path, but it - * only resolves one element (@part) and takes the others from @parent. - * - * Returns: The resolved object or NULL on path lookup failure. - */ -Object *object_resolve_path_component(struct uc_struct *uc, Object *parent, const gchar *part); - -/** - * object_property_add_child: - * @obj: the object to add a property to - * @name: the name of the property - * @child: the child object - * @errp: if an error occurs, a pointer to an area to store the area - * - * Child properties form the composition tree. All objects need to be a child - * of another object. Objects can only be a child of one object. - * - * There is no way for a child to determine what its parent is. It is not - * a bidirectional relationship. This is by design. - * - * The value of a child property as a C string will be the child object's - * canonical path. It can be retrieved using object_property_get_str(). - * The child object itself can be retrieved using object_property_get_link(). - */ -void object_property_add_child(Object *obj, const char *name, - Object *child, Error **errp); - -typedef enum { - /* Unref the link pointer when the property is deleted */ - OBJ_PROP_LINK_UNREF_ON_RELEASE = 0x1, -} ObjectPropertyLinkFlags; - -/** - * object_property_allow_set_link: - * - * The default implementation of the object_property_add_link() check() - * callback function. It allows the link property to be set and never returns - * an error. - */ -void object_property_allow_set_link(Object *, const char *, - Object *, Error **); - -/** - * object_property_add_link: - * @obj: the object to add a property to - * @name: the name of the property - * @type: the qobj type of the link - * @child: a pointer to where the link object reference is stored - * @check: callback to veto setting or NULL if the property is read-only - * @flags: additional options for the link - * @errp: if an error occurs, a pointer to an area to store the area - * - * Links establish relationships between objects. Links are unidirectional - * although two links can be combined to form a bidirectional relationship - * between objects. - * - * Links form the graph in the object model. - * - * The @check() callback is invoked when - * object_property_set_link() is called and can raise an error to prevent the - * link being set. If @check is NULL, the property is read-only - * and cannot be set. - * - * Ownership of the pointer that @child points to is transferred to the - * link property. The reference count for *@child is - * managed by the property from after the function returns till the - * property is deleted with object_property_del(). If the - * @flags OBJ_PROP_LINK_UNREF_ON_RELEASE bit is set, - * the reference count is decremented when the property is deleted. - */ -void object_property_add_link(Object *obj, const char *name, - const char *type, Object **child, - void (*check)(Object *obj, const char *name, - Object *val, Error **errp), - ObjectPropertyLinkFlags flags, - Error **errp); - -/** - * object_property_add_str: - * @obj: the object to add a property to - * @name: the name of the property - * @get: the getter or NULL if the property is write-only. This function must - * return a string to be freed by g_free(). - * @set: the setter or NULL if the property is read-only - * @errp: if an error occurs, a pointer to an area to store the error - * - * Add a string property using getters/setters. This function will add a - * property of type 'string'. - */ -void object_property_add_str(Object *obj, const char *name, - char *(*get)(struct uc_struct *uc, Object *, Error **), - int (*set)(struct uc_struct *uc, Object *, const char *, Error **), - Error **errp); - -/** - * object_property_add_bool: - * @obj: the object to add a property to - * @name: the name of the property - * @get: the getter or NULL if the property is write-only. - * @set: the setter or NULL if the property is read-only - * @errp: if an error occurs, a pointer to an area to store the error - * - * Add a bool property using getters/setters. This function will add a - * property of type 'bool'. - */ -void object_property_add_bool(struct uc_struct *uc, Object *obj, const char *name, - bool (*get)(struct uc_struct *uc, Object *, Error **), - int (*set)(struct uc_struct *uc, Object *, bool, Error **), - Error **errp); - -/** - * object_property_add_uint8_ptr: - * @obj: the object to add a property to - * @name: the name of the property - * @v: pointer to value - * @errp: if an error occurs, a pointer to an area to store the error - * - * Add an integer property in memory. This function will add a - * property of type 'uint8'. - */ -void object_property_add_uint8_ptr(Object *obj, const char *name, - const uint8_t *v, Error **errp); - -/** - * object_property_add_uint16_ptr: - * @obj: the object to add a property to - * @name: the name of the property - * @v: pointer to value - * @errp: if an error occurs, a pointer to an area to store the error - * - * Add an integer property in memory. This function will add a - * property of type 'uint16'. - */ -void object_property_add_uint16_ptr(Object *obj, const char *name, - const uint16_t *v, Error **errp); - -/** - * object_property_add_uint32_ptr: - * @obj: the object to add a property to - * @name: the name of the property - * @v: pointer to value - * @errp: if an error occurs, a pointer to an area to store the error - * - * Add an integer property in memory. This function will add a - * property of type 'uint32'. - */ -void object_property_add_uint32_ptr(Object *obj, const char *name, - const uint32_t *v, Error **errp); - -/** - * object_property_add_uint64_ptr: - * @obj: the object to add a property to - * @name: the name of the property - * @v: pointer to value - * @errp: if an error occurs, a pointer to an area to store the error - * - * Add an integer property in memory. This function will add a - * property of type 'uint64'. - */ -void object_property_add_uint64_ptr(Object *obj, const char *name, - const uint64_t *v, Error **Errp); - -/** - * object_property_add_alias: - * @obj: the object to add a property to - * @name: the name of the property - * @target_obj: the object to forward property access to - * @target_name: the name of the property on the forwarded object - * @errp: if an error occurs, a pointer to an area to store the error - * - * Add an alias for a property on an object. This function will add a property - * of the same type as the forwarded property. - * - * The caller must ensure that @target_obj stays alive as long as - * this property exists. In the case of a child object or an alias on the same - * object this will be the case. For aliases to other objects the caller is - * responsible for taking a reference. - */ -void object_property_add_alias(Object *obj, const char *name, - Object *target_obj, const char *target_name, - Error **errp); - -/** - * object_property_set_description: - * @obj: the object owning the property - * @name: the name of the property - * @description: the description of the property on the object - * @errp: if an error occurs, a pointer to an area to store the error - * - * Set an object property's description. - * - */ -void object_property_set_description(Object *obj, const char *name, - const char *description, Error **errp); - -/** - * object_child_foreach: - * @obj: the object whose children will be navigated - * @fn: the iterator function to be called - * @opaque: an opaque value that will be passed to the iterator - * - * Call @fn passing each child of @obj and @opaque to it, until @fn returns - * non-zero. - * - * Returns: The last value returned by @fn, or 0 if there is no child. - */ -int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque), - void *opaque); - -/** - * container_get: - * @root: root of the #path, e.g., object_get_root() - * @path: path to the container - * - * Return a container object whose path is @path. Create more containers - * along the path if necessary. - * - * Returns: the container object. - */ -Object *container_get(struct uc_struct *uc, Object *root, const char *path); - -void container_register_types(struct uc_struct *uc); - -void register_types_object(struct uc_struct *uc); - -#endif diff --git a/qemu/include/qom/qom-qobject.h b/qemu/include/qom/qom-qobject.h deleted file mode 100644 index e22e1642..00000000 --- a/qemu/include/qom/qom-qobject.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * QEMU Object Model - QObject wrappers - * - * Copyright (C) 2012 Red Hat, Inc. - * - * Author: Paolo Bonzini - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_QOM_QOBJECT_H -#define QEMU_QOM_QOBJECT_H - -#include "qom/object.h" - -/* - * object_property_get_qobject: - * @obj: the object - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Returns: the value of the property, converted to QObject, or NULL if - * an error occurs. - */ -struct QObject *object_property_get_qobject(struct uc_struct *uc, Object *obj, const char *name, - struct Error **errp); - -/** - * object_property_set_qobject: - * @obj: the object - * @ret: The value that will be written to the property. - * @name: the name of the property - * @errp: returns an error if this function fails - * - * Writes a property to a object. - */ -void object_property_set_qobject(struct uc_struct *uc, Object *obj, struct QObject *qobj, - const char *name, struct Error **errp); - -#endif diff --git a/qemu/include/sysemu/cpus.h b/qemu/include/sysemu/cpus.h index 8a55e0b9..ee8fb60d 100644 --- a/qemu/include/sysemu/cpus.h +++ b/qemu/include/sysemu/cpus.h @@ -1,20 +1,25 @@ #ifndef QEMU_CPUS_H #define QEMU_CPUS_H -struct uc_struct; +#include "qemu/timer.h" /* cpus.c */ -int resume_all_vcpus(struct uc_struct*); -void cpu_stop_current(struct uc_struct*); +bool qemu_in_vcpu_thread(void); +void qemu_init_cpu_loop(void); +void resume_all_vcpus(struct uc_struct* uc); +void cpu_stop_current(struct uc_struct* uc); +void cpu_ticks_init(void); -#ifndef CONFIG_USER_ONLY -/* vl.c */ -extern int smp_cores; -extern int smp_threads; -#else -/* *-user doesn't have configurable SMP topology */ -#define smp_cores 1 -#define smp_threads 1 -#endif +/* Unblock cpu */ +void qemu_cpu_kick_self(void); + +void cpu_synchronize_all_states(void); +void cpu_synchronize_all_post_reset(void); +void cpu_synchronize_all_post_init(void); +void cpu_synchronize_all_pre_loadvm(void); + +void qtest_clock_warp(int64_t dest); + +void list_cpus(const char *optarg); #endif diff --git a/qemu/include/sysemu/memory_mapping.h b/qemu/include/sysemu/memory_mapping.h index dcf35987..4fec2736 100644 --- a/qemu/include/sysemu/memory_mapping.h +++ b/qemu/include/sysemu/memory_mapping.h @@ -15,28 +15,10 @@ #define MEMORY_MAPPING_H #include "qemu/queue.h" -#include "qemu/typedefs.h" +#include "exec/cpu-defs.h" +#include "exec/memory.h" -typedef struct GuestPhysBlock { - /* visible to guest, reflects PCI hole, etc */ - hwaddr target_start; - - /* implies size */ - hwaddr target_end; - - /* points into host memory */ - uint8_t *host_addr; - - QTAILQ_ENTRY(GuestPhysBlock) next; -} GuestPhysBlock; - -/* point-in-time snapshot of guest-visible physical mappings */ -typedef struct GuestPhysBlockList { - unsigned num; - QTAILQ_HEAD(GuestPhysBlockHead, GuestPhysBlock) head; -} GuestPhysBlockList; - -/* The physical and virtual address in the memory mapping are contiguous. */ + /* The physical and virtual address in the memory mapping are contiguous. */ typedef struct MemoryMapping { hwaddr phys_addr; target_ulong virt_addr; @@ -60,24 +42,4 @@ void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, hwaddr virt_addr, ram_addr_t length); -void memory_mapping_list_free(MemoryMappingList *list); - -void memory_mapping_list_init(MemoryMappingList *list); - -void guest_phys_blocks_free(GuestPhysBlockList *list); -void guest_phys_blocks_init(GuestPhysBlockList *list); -//void guest_phys_blocks_append(GuestPhysBlockList *list); - -void qemu_get_guest_memory_mapping(struct uc_struct *uc, - MemoryMappingList *list, - const GuestPhysBlockList *guest_phys_blocks, - Error **errp); - -/* get guest's memory mapping without do paging(virtual address is 0). */ -void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list, - const GuestPhysBlockList *guest_phys_blocks); - -void memory_mapping_filter(MemoryMappingList *list, int64_t begin, - int64_t length); - #endif diff --git a/qemu/include/sysemu/os-win32.h b/qemu/include/sysemu/os-win32.h index 7825c310..44e37d7e 100644 --- a/qemu/include/sysemu/os-win32.h +++ b/qemu/include/sysemu/os-win32.h @@ -28,56 +28,27 @@ #include #include -#include - -/* Workaround for older versions of MinGW. */ -#ifndef ECONNREFUSED -# define ECONNREFUSED WSAECONNREFUSED -#endif -#ifndef EINPROGRESS -# define EINPROGRESS WSAEINPROGRESS -#endif -#ifndef EHOSTUNREACH -# define EHOSTUNREACH WSAEHOSTUNREACH -#endif -#ifndef EINTR -# define EINTR WSAEINTR -#endif -#ifndef EINPROGRESS -# define EINPROGRESS WSAEINPROGRESS -#endif -#ifndef ENETUNREACH -# define ENETUNREACH WSAENETUNREACH -#endif -#ifndef ENOTCONN -# define ENOTCONN WSAENOTCONN -#endif -#ifndef EWOULDBLOCK -# define EWOULDBLOCK WSAEWOULDBLOCK -#endif +#include #if defined(_WIN64) /* On w64, setjmp is implemented by _setjmp which needs a second parameter. * If this parameter is NULL, longjump does no stack unwinding. * That is what we need for QEMU. Passing the value of register rsp (default) * lets longjmp try a stack unwinding which will crash with generated code. */ - -#if defined(_MSC_VER) // MSVC - +#if defined(_MSC_VER) // MSVC // See qemu/include/utils/setjmp-wrapper-win32.asm for details. extern int _setjmp_wrapper(jmp_buf); +#undef setjmp +#define setjmp(env) _setjmp_wrapper(env) -# undef setjmp -# define setjmp(env) _setjmp_wrapper(env) +#else // MingW -#else // MinGW - -// Original QEMU patch. -# undef setjmp -# define setjmp(env) _setjmp(env, NULL) +#undef setjmp +#define setjmp(env) _setjmp(env, NULL) #endif #endif + /* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify * "longjmp and don't touch the signal masks". Since we know that the * savemask parameter will always be zero we can safely define these @@ -87,10 +58,6 @@ extern int _setjmp_wrapper(jmp_buf); #define sigsetjmp(env, savemask) setjmp(env) #define siglongjmp(env, val) longjmp(env, val) -size_t getpagesize(void); - -#if !defined(EPROTONOSUPPORT) -# define EPROTONOSUPPORT EINVAL -#endif +int getpagesize(void); #endif diff --git a/qemu/include/sysemu/sysemu.h b/qemu/include/sysemu/sysemu.h index e5c93292..25b1c1b9 100644 --- a/qemu/include/sysemu/sysemu.h +++ b/qemu/include/sysemu/sysemu.h @@ -1,27 +1,9 @@ #ifndef SYSEMU_H #define SYSEMU_H -/* Misc. things related to the system emulator. */ - -#include "qemu/timer.h" -#include "qapi/error.h" - -/* vl.c */ struct uc_struct; -int runstate_is_running(void); -typedef struct vm_change_state_entry VMChangeStateEntry; - -#define VMRESET_SILENT false -#define VMRESET_REPORT true - -int vm_start(struct uc_struct*); - void qemu_system_reset_request(struct uc_struct*); -void qemu_system_shutdown_request(void); -void qemu_system_powerdown_request(void); -void qemu_system_reset(bool report); - -extern int smp_cpus; +void qemu_system_shutdown_request(struct uc_struct*); #endif diff --git a/qemu/include/sysemu/tcg.h b/qemu/include/sysemu/tcg.h new file mode 100644 index 00000000..ea267831 --- /dev/null +++ b/qemu/include/sysemu/tcg.h @@ -0,0 +1,19 @@ +/* + * QEMU TCG support + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef SYSEMU_TCG_H +#define SYSEMU_TCG_H + +#include + +//#include "uc_priv.h" + +struct uc_struct; + +void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size); + +#endif diff --git a/qemu/include/tcg/tcg-apple-jit.h b/qemu/include/tcg/tcg-apple-jit.h new file mode 100644 index 00000000..4ba6008d --- /dev/null +++ b/qemu/include/tcg/tcg-apple-jit.h @@ -0,0 +1,45 @@ +/* + * Apple Silicon APRR functions for JIT handling + * + * Copyright (c) 2020 osy + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * Credits to: https://siguza.github.io/APRR/ + * Reversed from /usr/lib/system/libsystem_pthread.dylib + */ + +#ifndef TCG_APPLE_JIT_H +#define TCG_APPLE_JIT_H + +#ifdef HAVE_PTHREAD_JIT_PROTECT + +/* write protect enable = write disable */ +static inline void jit_write_protect(int enabled) +{ + return pthread_jit_write_protect_np(enabled); +} + +#else /* defined(__aarch64__) && defined(CONFIG_DARWIN) */ + +static inline void jit_write_protect(int enabled) +{ + return; +} + +#endif + +#endif /* define TCG_APPLE_JIT_H */ \ No newline at end of file diff --git a/qemu/include/tcg/tcg-gvec-desc.h b/qemu/include/tcg/tcg-gvec-desc.h new file mode 100644 index 00000000..0224ac3e --- /dev/null +++ b/qemu/include/tcg/tcg-gvec-desc.h @@ -0,0 +1,54 @@ +/* + * Generic vector operation descriptor + * + * Copyright (c) 2018 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TCG_TCG_GVEC_DESC_H +#define TCG_TCG_GVEC_DESC_H + +/* ??? These bit widths are set for ARM SVE, maxing out at 256 byte vectors. */ +#define SIMD_OPRSZ_SHIFT 0 +#define SIMD_OPRSZ_BITS 5 + +#define SIMD_MAXSZ_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS) +#define SIMD_MAXSZ_BITS 5 + +#define SIMD_DATA_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS) +#define SIMD_DATA_BITS (32 - SIMD_DATA_SHIFT) + +/* Create a descriptor from components. */ +uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data); + +/* Extract the operation size from a descriptor. */ +static inline intptr_t simd_oprsz(uint32_t desc) +{ + return (extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS) + 1) * 8; +} + +/* Extract the max vector size from a descriptor. */ +static inline intptr_t simd_maxsz(uint32_t desc) +{ + return (extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) + 1) * 8; +} + +/* Extract the operation-specific data from a descriptor. */ +static inline int32_t simd_data(uint32_t desc) +{ + return sextract32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS); +} + +#endif diff --git a/qemu/include/sysemu/accel.h b/qemu/include/tcg/tcg-mo.h similarity index 52% rename from qemu/include/sysemu/accel.h rename to qemu/include/tcg/tcg-mo.h index 3abba241..c2c55704 100644 --- a/qemu/include/sysemu/accel.h +++ b/qemu/include/tcg/tcg-mo.h @@ -1,6 +1,7 @@ -/* QEMU accelerator interfaces +/* + * Tiny Code Generator for QEMU * - * Copyright (c) 2014 Red Hat Inc + * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -20,43 +21,28 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef HW_ACCEL_H -#define HW_ACCEL_H -#include "qemu/typedefs.h" -#include "qom/object.h" +#ifndef TCG_MO_H +#define TCG_MO_H -typedef struct AccelState { - /*< private >*/ - Object parent_obj; -} AccelState; +typedef enum { + /* Used to indicate the type of accesses on which ordering + is to be ensured. Modeled after SPARC barriers. -typedef struct AccelClass { - /*< private >*/ - ObjectClass parent_class; - /*< public >*/ + This is of the form TCG_MO_A_B where A is before B in program order. + */ + TCG_MO_LD_LD = 0x01, + TCG_MO_ST_LD = 0x02, + TCG_MO_LD_ST = 0x04, + TCG_MO_ST_ST = 0x08, + TCG_MO_ALL = 0x0F, /* OR of the above */ - const char *opt_name; - const char *name; - int (*available)(void); - int (*init_machine)(MachineState *ms); - bool *allowed; -} AccelClass; + /* Used to indicate the kind of ordering which is to be ensured by the + instruction. These types are derived from x86/aarch64 instructions. + It should be noted that these are different from C11 semantics. */ + TCG_BAR_LDAQ = 0x10, /* Following ops will not come forward */ + TCG_BAR_STRL = 0x20, /* Previous ops will not be delayed */ + TCG_BAR_SC = 0x30, /* No ops cross barrier; OR of the above */ +} TCGBar; -#define TYPE_ACCEL "accel" - -#define ACCEL_CLASS_SUFFIX "-" TYPE_ACCEL -#define ACCEL_CLASS_NAME(a) (a ACCEL_CLASS_SUFFIX) - -#define ACCEL_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, AccelClass, (klass), TYPE_ACCEL) -#define ACCEL(uc, obj) \ - OBJECT_CHECK(uc, AccelState, (obj), TYPE_ACCEL) -#define ACCEL_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, AccelClass, (obj), TYPE_ACCEL) - -int configure_accelerator(MachineState *ms); - -void register_accel_types(struct uc_struct *uc); - -#endif +#endif /* TCG_MO_H */ diff --git a/qemu/include/tcg/tcg-op-gvec.h b/qemu/include/tcg/tcg-op-gvec.h new file mode 100644 index 00000000..dd414fc7 --- /dev/null +++ b/qemu/include/tcg/tcg-op-gvec.h @@ -0,0 +1,387 @@ +/* + * Generic vector operation expansion + * + * Copyright (c) 2018 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TCG_TCG_OP_GVEC_H +#define TCG_TCG_OP_GVEC_H + +/* + * "Generic" vectors. All operands are given as offsets from ENV, + * and therefore cannot also be allocated via tcg_global_mem_new_*. + * OPRSZ is the byte size of the vector upon which the operation is performed. + * MAXSZ is the byte size of the full vector; bytes beyond OPSZ are cleared. + * + * All sizes must be 8 or any multiple of 16. + * When OPRSZ is 8, the alignment may be 8, otherwise must be 16. + * Operands may completely, but not partially, overlap. + */ + +/* Expand a call to a gvec-style helper, with pointers to two vector + operands, and a descriptor (see tcg-gvec-desc.h). */ +typedef void gen_helper_gvec_2(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_2_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_2 *fn); + +/* Similarly, passing an extra data value. */ +typedef void gen_helper_gvec_2i(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); +void tcg_gen_gvec_2i_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, TCGv_i64 c, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_2i *fn); + +/* Similarly, passing an extra pointer (e.g. env or float_status). */ +typedef void gen_helper_gvec_2_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_2_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_2_ptr *fn); + +/* Similarly, with three vector operands. */ +typedef void gen_helper_gvec_3(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_3_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_3 *fn); + +/* Similarly, with four vector operands. */ +typedef void gen_helper_gvec_4(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_4_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_4 *fn); + +/* Similarly, with five vector operands. */ +typedef void gen_helper_gvec_5(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_5_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t xofs, uint32_t oprsz, + uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn); + +typedef void gen_helper_gvec_3_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_3_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_3_ptr *fn); + +typedef void gen_helper_gvec_4_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_4_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz, + uint32_t maxsz, int32_t data, + gen_helper_gvec_4_ptr *fn); + +typedef void gen_helper_gvec_5_ptr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_ptr, TCGv_i32); +void tcg_gen_gvec_5_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t eofs, TCGv_ptr ptr, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_5_ptr *fn); + +/* Expand a gvec operation. Either inline or out-of-line depending on + the actual vector size and the operations supported by the host. */ +typedef struct { + /* Expand inline as a 64-bit or 32-bit integer. + Only one of these will be non-NULL. */ + void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64); + void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32); + /* Expand inline with a host vector type. */ + void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec); + /* Expand out-of-line helper w/descriptor. */ + gen_helper_gvec_2 *fno; + /* The optional opcodes, if any, utilized by .fniv. */ + const TCGOpcode *opt_opc; + /* The data argument to the out-of-line helper. */ + int32_t data; + /* The vector element size, if applicable. */ + uint8_t vece; + /* Prefer i64 to v64. */ + bool prefer_i64; +} GVecGen2; + +typedef struct { + /* Expand inline as a 64-bit or 32-bit integer. + Only one of these will be non-NULL. */ + void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, int64_t); + void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, int32_t); + /* Expand inline with a host vector type. */ + void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, int64_t); + /* Expand out-of-line helper w/descriptor, data in descriptor. */ + gen_helper_gvec_2 *fno; + /* Expand out-of-line helper w/descriptor, data as argument. */ + gen_helper_gvec_2i *fnoi; + /* The optional opcodes, if any, utilized by .fniv. */ + const TCGOpcode *opt_opc; + /* The vector element size, if applicable. */ + uint8_t vece; + /* Prefer i64 to v64. */ + bool prefer_i64; + /* Load dest as a 3rd source operand. */ + bool load_dest; +} GVecGen2i; + +typedef struct { + /* Expand inline as a 64-bit or 32-bit integer. + Only one of these will be non-NULL. */ + void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); + void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); + /* Expand inline with a host vector type. */ + void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec); + /* Expand out-of-line helper w/descriptor. */ + gen_helper_gvec_2i *fno; + /* The optional opcodes, if any, utilized by .fniv. */ + const TCGOpcode *opt_opc; + /* The data argument to the out-of-line helper. */ + uint32_t data; + /* The vector element size, if applicable. */ + uint8_t vece; + /* Prefer i64 to v64. */ + bool prefer_i64; + /* Load scalar as 1st source operand. */ + bool scalar_first; +} GVecGen2s; + +typedef struct { + /* Expand inline as a 64-bit or 32-bit integer. + Only one of these will be non-NULL. */ + void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); + void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); + /* Expand inline with a host vector type. */ + void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec); + /* Expand out-of-line helper w/descriptor. */ + gen_helper_gvec_3 *fno; + /* The optional opcodes, if any, utilized by .fniv. */ + const TCGOpcode *opt_opc; + /* The data argument to the out-of-line helper. */ + int32_t data; + /* The vector element size, if applicable. */ + uint8_t vece; + /* Prefer i64 to v64. */ + bool prefer_i64; + /* Load dest as a 3rd source operand. */ + bool load_dest; +} GVecGen3; + +typedef struct { + /* + * Expand inline as a 64-bit or 32-bit integer. Only one of these will be + * non-NULL. + */ + void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, int64_t); + void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, int32_t); + /* Expand inline with a host vector type. */ + void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t); + /* Expand out-of-line helper w/descriptor, data in descriptor. */ + gen_helper_gvec_3 *fno; + /* The optional opcodes, if any, utilized by .fniv. */ + const TCGOpcode *opt_opc; + /* The vector element size, if applicable. */ + uint8_t vece; + /* Prefer i64 to v64. */ + bool prefer_i64; + /* Load dest as a 3rd source operand. */ + bool load_dest; +} GVecGen3i; + +typedef struct { + /* Expand inline as a 64-bit or 32-bit integer. + Only one of these will be non-NULL. */ + void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64); + void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); + /* Expand inline with a host vector type. */ + void (*fniv)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec); + /* Expand out-of-line helper w/descriptor. */ + gen_helper_gvec_4 *fno; + /* The optional opcodes, if any, utilized by .fniv. */ + const TCGOpcode *opt_opc; + /* The data argument to the out-of-line helper. */ + int32_t data; + /* The vector element size, if applicable. */ + uint8_t vece; + /* Prefer i64 to v64. */ + bool prefer_i64; + /* Write aofs as a 2nd dest operand. */ + bool write_aofs; +} GVecGen4; + +void tcg_gen_gvec_2(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *); +void tcg_gen_gvec_2i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + uint32_t maxsz, int64_t c, const GVecGen2i *); +void tcg_gen_gvec_2s(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + uint32_t maxsz, TCGv_i64 c, const GVecGen2s *); +void tcg_gen_gvec_3(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *); +void tcg_gen_gvec_3i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, int64_t c, + const GVecGen3i *); +void tcg_gen_gvec_4(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen4 *); + +/* Expand a specific vector operation. */ + +void tcg_gen_gvec_mov(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_not(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_neg(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_abs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_add(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_sub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_mul(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_addi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_muli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_adds(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_subs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_muls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); + +/* Saturated arithmetic. */ +void tcg_gen_gvec_ssadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_sssub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_usadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_ussub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); + +/* Min/max. */ +void tcg_gen_gvec_smin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_umin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_smax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_umax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_and(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_or(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_xor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_andc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_orc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_nand(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_nor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_eqv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_andi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_xori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_ori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_ands(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_xors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_ors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_dup_mem(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t s, uint32_t m); +void tcg_gen_gvec_dup_i32(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t s, + uint32_t m, TCGv_i32); +void tcg_gen_gvec_dup_i64(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t s, + uint32_t m, TCGv_i64); + +void tcg_gen_gvec_dup8i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint8_t x); +void tcg_gen_gvec_dup16i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint16_t x); +void tcg_gen_gvec_dup32i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint32_t x); +void tcg_gen_gvec_dup64i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t s, uint32_t m, uint64_t x); + +void tcg_gen_gvec_shli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_shri(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_sari(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_shls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_shrs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_sars(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz); + +/* + * Perform vector shift by vector element, modulo the element size. + * E.g. D[i] = A[i] << (B[i] % (8 << vece)). + */ +void tcg_gen_gvec_shlv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_shrv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_sarv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); + +void tcg_gen_gvec_cmp(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, uint32_t dofs, + uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz); + +/* + * Perform vector bit select: d = (b & a) | (c & ~a). + */ +void tcg_gen_gvec_bitsel(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t cofs, + uint32_t oprsz, uint32_t maxsz); + +/* + * 64-bit vector operations. Use these when the register has been allocated + * with tcg_global_mem_new_i64, and so we cannot also address it via pointer. + * OPRSZ = MAXSZ = 8. + */ + +void tcg_gen_vec_neg8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a); +void tcg_gen_vec_neg16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a); +void tcg_gen_vec_neg32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a); + +void tcg_gen_vec_add8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void tcg_gen_vec_add16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void tcg_gen_vec_add32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); + +void tcg_gen_vec_sub8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void tcg_gen_vec_sub16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void tcg_gen_vec_sub32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); + +void tcg_gen_vec_shl8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); +void tcg_gen_vec_shl16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); +void tcg_gen_vec_shr8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); +void tcg_gen_vec_shr16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); +void tcg_gen_vec_sar8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); +void tcg_gen_vec_sar16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t); + +#endif diff --git a/qemu/include/tcg/tcg-op.h b/qemu/include/tcg/tcg-op.h new file mode 100644 index 00000000..73d1b93f --- /dev/null +++ b/qemu/include/tcg/tcg-op.h @@ -0,0 +1,1330 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TCG_TCG_OP_H +#define TCG_TCG_OP_H + +#include "tcg.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +static inline void gen_uc_tracecode(TCGContext *tcg_ctx, int32_t size, int32_t type, void *uc, uint64_t pc) +{ + TCGv_i32 tsize = tcg_const_i32(tcg_ctx, size); + TCGv_i32 ttype = tcg_const_i32(tcg_ctx, type); + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, uc); + TCGv_i64 tpc = tcg_const_i64(tcg_ctx, pc); + gen_helper_uc_tracecode(tcg_ctx, tsize, ttype, tuc, tpc); + tcg_temp_free_i64(tcg_ctx, tpc); + tcg_temp_free_ptr(tcg_ctx, tuc); + tcg_temp_free_i32(tcg_ctx, ttype); + tcg_temp_free_i32(tcg_ctx, tsize); +} + +/* Basic output routines. Not for general consumption. */ + +void tcg_gen_op1(TCGContext *tcg_ctx, TCGOpcode, TCGArg); +void tcg_gen_op2(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg); +void tcg_gen_op3(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg); +void tcg_gen_op4(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg); +void tcg_gen_op5(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg); +void tcg_gen_op6(TCGContext *tcg_ctx, TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg); + +void vec_gen_2(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, TCGArg); +void vec_gen_3(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg); +void vec_gen_4(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg); + +static inline void tcg_gen_op1_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1) +{ + tcg_gen_op1(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1)); +} + +static inline void tcg_gen_op1_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1) +{ + tcg_gen_op1(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1)); +} + +static inline void tcg_gen_op1i(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1) +{ + tcg_gen_op1(tcg_ctx, opc, a1); +} + +static inline void tcg_gen_op2_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2) +{ + tcg_gen_op2(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2)); +} + +static inline void tcg_gen_op2_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2) +{ + tcg_gen_op2(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2)); +} + +static inline void tcg_gen_op2i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGArg a2) +{ + tcg_gen_op2(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), a2); +} + +static inline void tcg_gen_op2i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGArg a2) +{ + tcg_gen_op2(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), a2); +} + +static inline void tcg_gen_op2ii(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2) +{ + tcg_gen_op2(tcg_ctx, opc, a1, a2); +} + +static inline void tcg_gen_op3_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, + TCGv_i32 a2, TCGv_i32 a3) +{ + tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), tcgv_i32_arg(tcg_ctx, a3)); +} + +static inline void tcg_gen_op3_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, + TCGv_i64 a2, TCGv_i64 a3) +{ + tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), tcgv_i64_arg(tcg_ctx, a3)); +} + +static inline void tcg_gen_op3i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, + TCGv_i32 a2, TCGArg a3) +{ + tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), a3); +} + +static inline void tcg_gen_op3i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, + TCGv_i64 a2, TCGArg a3) +{ + tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), a3); +} + +static inline void tcg_gen_ldst_op_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 val, + TCGv_ptr base, TCGArg offset) +{ + tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, val), tcgv_ptr_arg(tcg_ctx, base), offset); +} + +static inline void tcg_gen_ldst_op_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 val, + TCGv_ptr base, TCGArg offset) +{ + tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, val), tcgv_ptr_arg(tcg_ctx, base), offset); +} + +static inline void tcg_gen_op4_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGv_i32 a4) +{ + tcg_gen_op4(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4)); +} + +static inline void tcg_gen_op4_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGv_i64 a4) +{ + tcg_gen_op4(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4)); +} + +static inline void tcg_gen_op4i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGArg a4) +{ + tcg_gen_op4(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), a4); +} + +static inline void tcg_gen_op4i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGArg a4) +{ + tcg_gen_op4(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), a4); +} + +static inline void tcg_gen_op4ii_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGArg a3, TCGArg a4) +{ + tcg_gen_op4(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), a3, a4); +} + +static inline void tcg_gen_op4ii_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGArg a3, TCGArg a4) +{ + tcg_gen_op4(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), a3, a4); +} + +static inline void tcg_gen_op5_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5) +{ + tcg_gen_op5(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), tcgv_i32_arg(tcg_ctx, a5)); +} + +static inline void tcg_gen_op5_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5) +{ + tcg_gen_op5(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), tcgv_i64_arg(tcg_ctx, a5)); +} + +static inline void tcg_gen_op5i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGv_i32 a4, TCGArg a5) +{ + tcg_gen_op5(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), a5); +} + +static inline void tcg_gen_op5i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGv_i64 a4, TCGArg a5) +{ + tcg_gen_op5(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), a5); +} + +static inline void tcg_gen_op5ii_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGArg a4, TCGArg a5) +{ + tcg_gen_op5(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), a4, a5); +} + +static inline void tcg_gen_op5ii_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGArg a4, TCGArg a5) +{ + tcg_gen_op5(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), a4, a5); +} + +static inline void tcg_gen_op6_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGv_i32 a4, + TCGv_i32 a5, TCGv_i32 a6) +{ + tcg_gen_op6(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), tcgv_i32_arg(tcg_ctx, a5), + tcgv_i32_arg(tcg_ctx, a6)); +} + +static inline void tcg_gen_op6_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGv_i64 a4, + TCGv_i64 a5, TCGv_i64 a6) +{ + tcg_gen_op6(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), tcgv_i64_arg(tcg_ctx, a5), + tcgv_i64_arg(tcg_ctx, a6)); +} + +static inline void tcg_gen_op6i_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGv_i32 a4, + TCGv_i32 a5, TCGArg a6) +{ + tcg_gen_op6(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), tcgv_i32_arg(tcg_ctx, a5), a6); +} + +static inline void tcg_gen_op6i_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGv_i64 a4, + TCGv_i64 a5, TCGArg a6) +{ + tcg_gen_op6(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), tcgv_i64_arg(tcg_ctx, a5), a6); +} + +static inline void tcg_gen_op6ii_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGv_i32 a4, + TCGArg a5, TCGArg a6) +{ + tcg_gen_op6(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, a1), tcgv_i32_arg(tcg_ctx, a2), + tcgv_i32_arg(tcg_ctx, a3), tcgv_i32_arg(tcg_ctx, a4), a5, a6); +} + +static inline void tcg_gen_op6ii_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGv_i64 a3, TCGv_i64 a4, + TCGArg a5, TCGArg a6) +{ + tcg_gen_op6(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, a1), tcgv_i64_arg(tcg_ctx, a2), + tcgv_i64_arg(tcg_ctx, a3), tcgv_i64_arg(tcg_ctx, a4), a5, a6); +} + + +/* Generic ops. */ + +static inline void gen_set_label(TCGContext *tcg_ctx, TCGLabel *l) +{ + l->present = 1; + tcg_gen_op1(tcg_ctx, INDEX_op_set_label, label_arg(l)); +} + +static inline void tcg_gen_br(TCGContext *tcg_ctx, TCGLabel *l) +{ + l->refs++; + tcg_gen_op1(tcg_ctx, INDEX_op_br, label_arg(l)); +} + +void tcg_gen_mb(TCGContext *tcg_ctx, TCGBar); + +/* Helper calls. */ + +/* 32 bit ops */ + +void tcg_gen_addi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_subfi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2); +void tcg_gen_subi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_andi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_ori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_xori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_shli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_shri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_sari_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_muli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); +void tcg_gen_div_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_rem_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_divu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_remu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_andc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_eqv_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_nand_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_nor_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_orc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_clz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_ctz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_clzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2); +void tcg_gen_ctzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2); +void tcg_gen_clrsb_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_ctpop_i32(TCGContext *tcg_ctx, TCGv_i32 a1, TCGv_i32 a2); +void tcg_gen_rotl_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_rotli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); +void tcg_gen_rotr_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_rotri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); +void tcg_gen_deposit_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, + unsigned int ofs, unsigned int len); +void tcg_gen_deposit_z_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, + unsigned int ofs, unsigned int len); +void tcg_gen_extract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, + unsigned int ofs, unsigned int len); +void tcg_gen_sextract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, + unsigned int ofs, unsigned int len); +void tcg_gen_extract2_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, + unsigned int ofs); +void tcg_gen_brcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *); +void tcg_gen_brcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *); +void tcg_gen_setcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_setcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, + TCGv_i32 arg1, int32_t arg2); +void tcg_gen_movcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, + TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2); +void tcg_gen_add2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, + TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); +void tcg_gen_sub2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, + TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); +void tcg_gen_mulu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_muls2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_mulsu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_ext8s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_ext16s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_ext8u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_ext16u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_bswap16_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_bswap32_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg); +void tcg_gen_smin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_smax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_umin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_umax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2); +void tcg_gen_abs_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32); + +static inline void tcg_gen_discard_i32(TCGContext *tcg_ctx, TCGv_i32 arg) +{ + tcg_gen_op1_i32(tcg_ctx, INDEX_op_discard, arg); +} + +static inline void tcg_gen_mov_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (ret != arg) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_mov_i32, ret, arg); + } +} + +static inline void tcg_gen_movi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, int32_t arg) +{ + tcg_gen_op2i_i32(tcg_ctx, INDEX_op_movi_i32, ret, arg); +} + +static inline void tcg_gen_ld8u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld8u_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld8s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld8s_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld16u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld16u_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld16s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld16s_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_ld_i32, ret, arg2, offset); +} + +static inline void tcg_gen_st8_i32(TCGContext *tcg_ctx, TCGv_i32 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_st8_i32, arg1, arg2, offset); +} + +static inline void tcg_gen_st16_i32(TCGContext *tcg_ctx, TCGv_i32 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_st16_i32, arg1, arg2, offset); +} + +static inline void tcg_gen_st_i32(TCGContext *tcg_ctx, TCGv_i32 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(tcg_ctx, INDEX_op_st_i32, arg1, arg2, offset); +} + +static inline void tcg_gen_add_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_add_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_sub_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_sub_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_and_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_and_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_or_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_or_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_xor_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_xor_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_shl_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_shl_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_shr_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_shr_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_sar_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_sar_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_mul_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(tcg_ctx, INDEX_op_mul_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_neg_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_neg_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_neg_i32, ret, arg); + } else { + tcg_gen_subfi_i32(tcg_ctx, ret, 0, arg); + } +} + +static inline void tcg_gen_not_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_not_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_not_i32, ret, arg); + } else { + tcg_gen_xori_i32(tcg_ctx, ret, arg, -1); + } +} + +/* 64 bit ops */ + +void tcg_gen_addi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_subfi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2); +void tcg_gen_subi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_andi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_ori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_xori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_shli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_shri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_sari_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_muli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2); +void tcg_gen_div_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_rem_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_divu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_remu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_andc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_eqv_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_nand_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_nor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_orc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_clz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_ctz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_clzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2); +void tcg_gen_ctzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2); +void tcg_gen_clrsb_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_ctpop_i64(TCGContext *tcg_ctx, TCGv_i64 a1, TCGv_i64 a2); +void tcg_gen_rotl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_rotli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2); +void tcg_gen_rotr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_rotri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2); +void tcg_gen_deposit_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, + unsigned int ofs, unsigned int len); +void tcg_gen_deposit_z_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, + unsigned int ofs, unsigned int len); +void tcg_gen_extract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, + unsigned int ofs, unsigned int len); +void tcg_gen_sextract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, + unsigned int ofs, unsigned int len); +void tcg_gen_extract2_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, + unsigned int ofs); +void tcg_gen_brcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *); +void tcg_gen_brcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *); +void tcg_gen_setcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, + TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_setcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, + TCGv_i64 arg1, int64_t arg2); +void tcg_gen_movcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, + TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2); +void tcg_gen_add2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); +void tcg_gen_sub2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); +void tcg_gen_mulu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_muls2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_mulsu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_not_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_ext8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_ext16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_ext32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_ext8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_ext16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_ext32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_bswap16_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_bswap32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_bswap64_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_smin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_smax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_umin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_umax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_abs_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64); + +#if TCG_TARGET_REG_BITS == 64 +static inline void tcg_gen_discard_i64(TCGContext *tcg_ctx, TCGv_i64 arg) +{ + tcg_gen_op1_i64(tcg_ctx, INDEX_op_discard, arg); +} + +static inline void tcg_gen_mov_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ + if (ret != arg) { + tcg_gen_op2_i64(tcg_ctx, INDEX_op_mov_i64, ret, arg); + } +} + +static inline void tcg_gen_movi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg) +{ + tcg_gen_op2i_i64(tcg_ctx, INDEX_op_movi_i64, ret, arg); +} + +static inline void tcg_gen_ld8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld8u_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld8s_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld16u_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld16s_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld32u_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld32s_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_ld_i64, ret, arg2, offset); +} + +static inline void tcg_gen_st8_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st8_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_st16_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st16_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_st32_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st32_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_st_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(tcg_ctx, INDEX_op_st_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_add_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_add_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_sub_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_sub_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_and_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_and_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_or_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_or_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_xor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_xor_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_shl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_shl_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_shr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_shr_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_sar_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_sar_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_mul_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(tcg_ctx, INDEX_op_mul_i64, ret, arg1, arg2); +} +#else /* TCG_TARGET_REG_BITS == 32 */ +static inline void tcg_gen_st8_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_st8_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); +} + +static inline void tcg_gen_st16_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_st16_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); +} + +static inline void tcg_gen_st32_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); +} + +static inline void tcg_gen_add_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_add2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), + TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), TCGV_HIGH(tcg_ctx, arg2)); +} + +static inline void tcg_gen_sub_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_sub2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), + TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), TCGV_HIGH(tcg_ctx, arg2)); +} + +void tcg_gen_discard_i64(TCGContext *tcg_ctx, TCGv_i64 arg); +void tcg_gen_mov_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg); +void tcg_gen_movi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg); +void tcg_gen_ld8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_ld8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_ld16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_ld16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_ld32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_ld32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_ld_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_st_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset); +void tcg_gen_and_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_or_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_xor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_shl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_shr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_sar_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +void tcg_gen_mul_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2); +#endif /* TCG_TARGET_REG_BITS */ + +static inline void tcg_gen_neg_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_neg_i64) { + tcg_gen_op2_i64(tcg_ctx, INDEX_op_neg_i64, ret, arg); + } else { + tcg_gen_subfi_i64(tcg_ctx, ret, 0, arg); + } +} + +/* Size changing operations. */ + +void tcg_gen_extu_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg); +void tcg_gen_ext_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg); +void tcg_gen_concat_i32_i64(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high); +void tcg_gen_extrl_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg); +void tcg_gen_extrh_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg); +void tcg_gen_extr_i64_i32(TCGContext *tcg_ctx, TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg); +void tcg_gen_extr32_i64(TCGContext *tcg_ctx, TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg); + +static inline void tcg_gen_concat32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi) +{ + tcg_gen_deposit_i64(tcg_ctx, ret, lo, hi, 32, 32); +} + +/* QEMU specific operations. */ + +#ifndef TARGET_LONG_BITS +#error must include QEMU headers +#endif + +#if TARGET_INSN_START_WORDS == 1 +# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS +static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc) +{ + tcg_gen_op1(tcg_ctx, INDEX_op_insn_start, pc); +} +# else +static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc) +{ + tcg_gen_op2(tcg_ctx, INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32)); +} +# endif +#elif TARGET_INSN_START_WORDS == 2 +# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS +static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1) +{ + tcg_gen_op2(tcg_ctx, INDEX_op_insn_start, pc, a1); +} +# else +static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1) +{ + tcg_gen_op4(tcg_ctx, INDEX_op_insn_start, + (uint32_t)pc, (uint32_t)(pc >> 32), + (uint32_t)a1, (uint32_t)(a1 >> 32)); +} +# endif +#elif TARGET_INSN_START_WORDS == 3 +# if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS +static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1, + target_ulong a2) +{ + tcg_gen_op3(tcg_ctx, INDEX_op_insn_start, pc, a1, a2); +} +# else +static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc, target_ulong a1, + target_ulong a2) +{ + tcg_gen_op6(tcg_ctx, INDEX_op_insn_start, + (uint32_t)pc, (uint32_t)(pc >> 32), + (uint32_t)a1, (uint32_t)(a1 >> 32), + (uint32_t)a2, (uint32_t)(a2 >> 32)); +} +# endif +#else +# error "Unhandled number of operands to insn_start" +#endif + +/** + * tcg_gen_exit_tb() - output exit_tb TCG operation + * @tb: The TranslationBlock from which we are exiting + * @idx: Direct jump slot index, or exit request + * + * See tcg/README for more info about this TCG operation. + * See also tcg.h and the block comment above TB_EXIT_MASK. + * + * For a normal exit from the TB, back to the main loop, @tb should + * be NULL and @idx should be 0. Otherwise, @tb should be valid and + * @idx should be one of the TB_EXIT_ values. + */ +void tcg_gen_exit_tb(TCGContext *tcg_ctx, TranslationBlock *tb, unsigned idx); + +/** + * tcg_gen_goto_tb() - output goto_tb TCG operation + * @idx: Direct jump slot index (0 or 1) + * + * See tcg/README for more info about this TCG operation. + * + * NOTE: In softmmu emulation, direct jumps with goto_tb are only safe within + * the pages this TB resides in because we don't take care of direct jumps when + * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a + * static address translation, so the destination address is always valid, TBs + * are always invalidated properly, and direct jumps are reset when mapping + * changes. + */ +void tcg_gen_goto_tb(TCGContext *tcg_ctx, unsigned idx); + +/** + * tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid + * @addr: Guest address of the target TB + * + * If the TB is not valid, jump to the epilogue. + * + * This operation is optional. If the TCG backend does not implement goto_ptr, + * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument. + */ +void tcg_gen_lookup_and_goto_ptr(TCGContext *tcg_ctx); + +#if TARGET_LONG_BITS == 32 +#define tcg_temp_new tcg_temp_new_i32 +#define tcg_global_reg_new tcg_global_reg_new_i32 +#define tcg_global_mem_new tcg_global_mem_new_i32 +#define tcg_temp_local_new tcg_temp_local_new_i32 +#define tcg_temp_free tcg_temp_free_i32 +#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 +#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 +#else +#define tcg_temp_new tcg_temp_new_i64 +#define tcg_global_reg_new tcg_global_reg_new_i64 +#define tcg_global_mem_new tcg_global_mem_new_i64 +#define tcg_temp_local_new tcg_temp_local_new_i64 +#define tcg_temp_free tcg_temp_free_i64 +#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 +#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 +#endif + +void tcg_gen_qemu_ld_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_ld_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGArg, MemOp); +void tcg_gen_qemu_st_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGArg, MemOp); + +static inline void tcg_gen_qemu_ld8u(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_UB); +} + +static inline void tcg_gen_qemu_ld8s(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_SB); +} + +static inline void tcg_gen_qemu_ld16u(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TEUW); +} + +static inline void tcg_gen_qemu_ld16s(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TESW); +} + +static inline void tcg_gen_qemu_ld32u(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TEUL); +} + +static inline void tcg_gen_qemu_ld32s(TCGContext *tcg_ctx, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(tcg_ctx, ret, addr, mem_index, MO_TESL); +} + +static inline void tcg_gen_qemu_ld64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_i64(tcg_ctx, ret, addr, mem_index, MO_TEQ); +} + +static inline void tcg_gen_qemu_st8(TCGContext *tcg_ctx, TCGv arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_tl(tcg_ctx, arg, addr, mem_index, MO_UB); +} + +static inline void tcg_gen_qemu_st16(TCGContext *tcg_ctx, TCGv arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_tl(tcg_ctx, arg, addr, mem_index, MO_TEUW); +} + +static inline void tcg_gen_qemu_st32(TCGContext *tcg_ctx, TCGv arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_tl(tcg_ctx, arg, addr, mem_index, MO_TEUL); +} + +static inline void tcg_gen_qemu_st64(TCGContext *tcg_ctx, TCGv_i64 arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_i64(tcg_ctx, arg, addr, mem_index, MO_TEQ); +} + +void tcg_gen_atomic_cmpxchg_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGv_i32, + TCGArg, MemOp); +void tcg_gen_atomic_cmpxchg_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGv_i64, + TCGArg, MemOp); + +void tcg_gen_atomic_xchg_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xchg_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); + +void tcg_gen_atomic_fetch_add_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_add_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_and_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_or_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_xor_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umin_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_smax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_fetch_umax_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); + +void tcg_gen_atomic_add_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_add_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_and_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_or_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_xor_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smin_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umin_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_smax_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i32(TCGContext *tcg_ctx, TCGv_i32, TCGv, TCGv_i32, TCGArg, MemOp); +void tcg_gen_atomic_umax_fetch_i64(TCGContext *tcg_ctx, TCGv_i64, TCGv, TCGv_i64, TCGArg, MemOp); + +void tcg_gen_mov_vec(TCGContext *tcg_ctx, TCGv_vec, TCGv_vec); +void tcg_gen_dup_i32_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, TCGv_i32); +void tcg_gen_dup_i64_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, TCGv_i64); +void tcg_gen_dup_mem_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long); +void tcg_gen_dup8i_vec(TCGContext *tcg_ctx, TCGv_vec, uint32_t); +void tcg_gen_dup16i_vec(TCGContext *tcg_ctx, TCGv_vec, uint32_t); +void tcg_gen_dup32i_vec(TCGContext *tcg_ctx, TCGv_vec, uint32_t); +void tcg_gen_dup64i_vec(TCGContext *tcg_ctx, TCGv_vec, uint64_t); +void tcg_gen_dupi_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec, uint64_t); +void tcg_gen_add_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_sub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_mul_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_and_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_or_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_xor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_andc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_orc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_nand_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_nor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_eqv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_not_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a); +void tcg_gen_neg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a); +void tcg_gen_abs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a); +void tcg_gen_ssadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_usadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_sssub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_ussub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_smin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_umin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_smax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); +void tcg_gen_umax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b); + +void tcg_gen_shli_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i); +void tcg_gen_shri_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i); +void tcg_gen_sari_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i); + +void tcg_gen_shls_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s); +void tcg_gen_shrs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s); +void tcg_gen_sars_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s); + +void tcg_gen_shlv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); +void tcg_gen_shrv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); +void tcg_gen_sarv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); + +void tcg_gen_cmp_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, TCGv_vec r, + TCGv_vec a, TCGv_vec b); + +void tcg_gen_bitsel_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, + TCGv_vec b, TCGv_vec c); +void tcg_gen_cmpsel_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, TCGv_vec r, + TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d); + +void tcg_gen_ld_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr base, TCGArg offset); +void tcg_gen_st_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr base, TCGArg offset); +void tcg_gen_stl_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t); + +#if TARGET_LONG_BITS == 64 +#define tcg_gen_movi_tl tcg_gen_movi_i64 +#define tcg_gen_mov_tl tcg_gen_mov_i64 +#define tcg_gen_ld8u_tl tcg_gen_ld8u_i64 +#define tcg_gen_ld8s_tl tcg_gen_ld8s_i64 +#define tcg_gen_ld16u_tl tcg_gen_ld16u_i64 +#define tcg_gen_ld16s_tl tcg_gen_ld16s_i64 +#define tcg_gen_ld32u_tl tcg_gen_ld32u_i64 +#define tcg_gen_ld32s_tl tcg_gen_ld32s_i64 +#define tcg_gen_ld_tl tcg_gen_ld_i64 +#define tcg_gen_st8_tl tcg_gen_st8_i64 +#define tcg_gen_st16_tl tcg_gen_st16_i64 +#define tcg_gen_st32_tl tcg_gen_st32_i64 +#define tcg_gen_st_tl tcg_gen_st_i64 +#define tcg_gen_add_tl tcg_gen_add_i64 +#define tcg_gen_addi_tl tcg_gen_addi_i64 +#define tcg_gen_sub_tl tcg_gen_sub_i64 +#define tcg_gen_neg_tl tcg_gen_neg_i64 +#define tcg_gen_abs_tl tcg_gen_abs_i64 +#define tcg_gen_subfi_tl tcg_gen_subfi_i64 +#define tcg_gen_subi_tl tcg_gen_subi_i64 +#define tcg_gen_and_tl tcg_gen_and_i64 +#define tcg_gen_andi_tl tcg_gen_andi_i64 +#define tcg_gen_or_tl tcg_gen_or_i64 +#define tcg_gen_ori_tl tcg_gen_ori_i64 +#define tcg_gen_xor_tl tcg_gen_xor_i64 +#define tcg_gen_xori_tl tcg_gen_xori_i64 +#define tcg_gen_not_tl tcg_gen_not_i64 +#define tcg_gen_shl_tl tcg_gen_shl_i64 +#define tcg_gen_shli_tl tcg_gen_shli_i64 +#define tcg_gen_shr_tl tcg_gen_shr_i64 +#define tcg_gen_shri_tl tcg_gen_shri_i64 +#define tcg_gen_sar_tl tcg_gen_sar_i64 +#define tcg_gen_sari_tl tcg_gen_sari_i64 +#define tcg_gen_brcond_tl tcg_gen_brcond_i64 +#define tcg_gen_brcondi_tl tcg_gen_brcondi_i64 +#define tcg_gen_setcond_tl tcg_gen_setcond_i64 +#define tcg_gen_setcondi_tl tcg_gen_setcondi_i64 +#define tcg_gen_mul_tl tcg_gen_mul_i64 +#define tcg_gen_muli_tl tcg_gen_muli_i64 +#define tcg_gen_div_tl tcg_gen_div_i64 +#define tcg_gen_rem_tl tcg_gen_rem_i64 +#define tcg_gen_divu_tl tcg_gen_divu_i64 +#define tcg_gen_remu_tl tcg_gen_remu_i64 +#define tcg_gen_discard_tl tcg_gen_discard_i64 +#define tcg_gen_trunc_tl_i32 tcg_gen_extrl_i64_i32 +#define tcg_gen_trunc_i64_tl tcg_gen_mov_i64 +#define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64 +#define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64 +#define tcg_gen_extu_tl_i64 tcg_gen_mov_i64 +#define tcg_gen_ext_tl_i64 tcg_gen_mov_i64 +#define tcg_gen_ext8u_tl tcg_gen_ext8u_i64 +#define tcg_gen_ext8s_tl tcg_gen_ext8s_i64 +#define tcg_gen_ext16u_tl tcg_gen_ext16u_i64 +#define tcg_gen_ext16s_tl tcg_gen_ext16s_i64 +#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64 +#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64 +#define tcg_gen_bswap16_tl tcg_gen_bswap16_i64 +#define tcg_gen_bswap32_tl tcg_gen_bswap32_i64 +#define tcg_gen_bswap64_tl tcg_gen_bswap64_i64 +#define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64 +#define tcg_gen_extr_i64_tl tcg_gen_extr32_i64 +#define tcg_gen_andc_tl tcg_gen_andc_i64 +#define tcg_gen_eqv_tl tcg_gen_eqv_i64 +#define tcg_gen_nand_tl tcg_gen_nand_i64 +#define tcg_gen_nor_tl tcg_gen_nor_i64 +#define tcg_gen_orc_tl tcg_gen_orc_i64 +#define tcg_gen_clz_tl tcg_gen_clz_i64 +#define tcg_gen_ctz_tl tcg_gen_ctz_i64 +#define tcg_gen_clzi_tl tcg_gen_clzi_i64 +#define tcg_gen_ctzi_tl tcg_gen_ctzi_i64 +#define tcg_gen_clrsb_tl tcg_gen_clrsb_i64 +#define tcg_gen_ctpop_tl tcg_gen_ctpop_i64 +#define tcg_gen_rotl_tl tcg_gen_rotl_i64 +#define tcg_gen_rotli_tl tcg_gen_rotli_i64 +#define tcg_gen_rotr_tl tcg_gen_rotr_i64 +#define tcg_gen_rotri_tl tcg_gen_rotri_i64 +#define tcg_gen_deposit_tl tcg_gen_deposit_i64 +#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i64 +#define tcg_gen_extract_tl tcg_gen_extract_i64 +#define tcg_gen_sextract_tl tcg_gen_sextract_i64 +#define tcg_gen_extract2_tl tcg_gen_extract2_i64 +#define tcg_const_tl tcg_const_i64 +#define tcg_const_local_tl tcg_const_local_i64 +#define tcg_gen_movcond_tl tcg_gen_movcond_i64 +#define tcg_gen_add2_tl tcg_gen_add2_i64 +#define tcg_gen_sub2_tl tcg_gen_sub2_i64 +#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64 +#define tcg_gen_muls2_tl tcg_gen_muls2_i64 +#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64 +#define tcg_gen_smin_tl tcg_gen_smin_i64 +#define tcg_gen_umin_tl tcg_gen_umin_i64 +#define tcg_gen_smax_tl tcg_gen_smax_i64 +#define tcg_gen_umax_tl tcg_gen_umax_i64 +#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i64 +#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i64 +#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i64 +#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i64 +#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i64 +#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i64 +#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i64 +#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i64 +#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i64 +#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i64 +#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i64 +#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i64 +#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i64 +#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i64 +#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i64 +#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i64 +#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64 +#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64 +#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec +#else +#define tcg_gen_movi_tl tcg_gen_movi_i32 +#define tcg_gen_mov_tl tcg_gen_mov_i32 +#define tcg_gen_ld8u_tl tcg_gen_ld8u_i32 +#define tcg_gen_ld8s_tl tcg_gen_ld8s_i32 +#define tcg_gen_ld16u_tl tcg_gen_ld16u_i32 +#define tcg_gen_ld16s_tl tcg_gen_ld16s_i32 +#define tcg_gen_ld32u_tl tcg_gen_ld_i32 +#define tcg_gen_ld32s_tl tcg_gen_ld_i32 +#define tcg_gen_ld_tl tcg_gen_ld_i32 +#define tcg_gen_st8_tl tcg_gen_st8_i32 +#define tcg_gen_st16_tl tcg_gen_st16_i32 +#define tcg_gen_st32_tl tcg_gen_st_i32 +#define tcg_gen_st_tl tcg_gen_st_i32 +#define tcg_gen_add_tl tcg_gen_add_i32 +#define tcg_gen_addi_tl tcg_gen_addi_i32 +#define tcg_gen_sub_tl tcg_gen_sub_i32 +#define tcg_gen_neg_tl tcg_gen_neg_i32 +#define tcg_gen_abs_tl tcg_gen_abs_i32 +#define tcg_gen_subfi_tl tcg_gen_subfi_i32 +#define tcg_gen_subi_tl tcg_gen_subi_i32 +#define tcg_gen_and_tl tcg_gen_and_i32 +#define tcg_gen_andi_tl tcg_gen_andi_i32 +#define tcg_gen_or_tl tcg_gen_or_i32 +#define tcg_gen_ori_tl tcg_gen_ori_i32 +#define tcg_gen_xor_tl tcg_gen_xor_i32 +#define tcg_gen_xori_tl tcg_gen_xori_i32 +#define tcg_gen_not_tl tcg_gen_not_i32 +#define tcg_gen_shl_tl tcg_gen_shl_i32 +#define tcg_gen_shli_tl tcg_gen_shli_i32 +#define tcg_gen_shr_tl tcg_gen_shr_i32 +#define tcg_gen_shri_tl tcg_gen_shri_i32 +#define tcg_gen_sar_tl tcg_gen_sar_i32 +#define tcg_gen_sari_tl tcg_gen_sari_i32 +#define tcg_gen_brcond_tl tcg_gen_brcond_i32 +#define tcg_gen_brcondi_tl tcg_gen_brcondi_i32 +#define tcg_gen_setcond_tl tcg_gen_setcond_i32 +#define tcg_gen_setcondi_tl tcg_gen_setcondi_i32 +#define tcg_gen_mul_tl tcg_gen_mul_i32 +#define tcg_gen_muli_tl tcg_gen_muli_i32 +#define tcg_gen_div_tl tcg_gen_div_i32 +#define tcg_gen_rem_tl tcg_gen_rem_i32 +#define tcg_gen_divu_tl tcg_gen_divu_i32 +#define tcg_gen_remu_tl tcg_gen_remu_i32 +#define tcg_gen_discard_tl tcg_gen_discard_i32 +#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32 +#define tcg_gen_trunc_i64_tl tcg_gen_extrl_i64_i32 +#define tcg_gen_extu_i32_tl tcg_gen_mov_i32 +#define tcg_gen_ext_i32_tl tcg_gen_mov_i32 +#define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64 +#define tcg_gen_ext_tl_i64 tcg_gen_ext_i32_i64 +#define tcg_gen_ext8u_tl tcg_gen_ext8u_i32 +#define tcg_gen_ext8s_tl tcg_gen_ext8s_i32 +#define tcg_gen_ext16u_tl tcg_gen_ext16u_i32 +#define tcg_gen_ext16s_tl tcg_gen_ext16s_i32 +#define tcg_gen_ext32u_tl tcg_gen_mov_i32 +#define tcg_gen_ext32s_tl tcg_gen_mov_i32 +#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 +#define tcg_gen_bswap32_tl tcg_gen_bswap32_i32 +#define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 +#define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32 +#define tcg_gen_andc_tl tcg_gen_andc_i32 +#define tcg_gen_eqv_tl tcg_gen_eqv_i32 +#define tcg_gen_nand_tl tcg_gen_nand_i32 +#define tcg_gen_nor_tl tcg_gen_nor_i32 +#define tcg_gen_orc_tl tcg_gen_orc_i32 +#define tcg_gen_clz_tl tcg_gen_clz_i32 +#define tcg_gen_ctz_tl tcg_gen_ctz_i32 +#define tcg_gen_clzi_tl tcg_gen_clzi_i32 +#define tcg_gen_ctzi_tl tcg_gen_ctzi_i32 +#define tcg_gen_clrsb_tl tcg_gen_clrsb_i32 +#define tcg_gen_ctpop_tl tcg_gen_ctpop_i32 +#define tcg_gen_rotl_tl tcg_gen_rotl_i32 +#define tcg_gen_rotli_tl tcg_gen_rotli_i32 +#define tcg_gen_rotr_tl tcg_gen_rotr_i32 +#define tcg_gen_rotri_tl tcg_gen_rotri_i32 +#define tcg_gen_deposit_tl tcg_gen_deposit_i32 +#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i32 +#define tcg_gen_extract_tl tcg_gen_extract_i32 +#define tcg_gen_sextract_tl tcg_gen_sextract_i32 +#define tcg_gen_extract2_tl tcg_gen_extract2_i32 +#define tcg_const_tl tcg_const_i32 +#define tcg_const_local_tl tcg_const_local_i32 +#define tcg_gen_movcond_tl tcg_gen_movcond_i32 +#define tcg_gen_add2_tl tcg_gen_add2_i32 +#define tcg_gen_sub2_tl tcg_gen_sub2_i32 +#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32 +#define tcg_gen_muls2_tl tcg_gen_muls2_i32 +#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32 +#define tcg_gen_smin_tl tcg_gen_smin_i32 +#define tcg_gen_umin_tl tcg_gen_umin_i32 +#define tcg_gen_smax_tl tcg_gen_smax_i32 +#define tcg_gen_umax_tl tcg_gen_umax_i32 +#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i32 +#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i32 +#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i32 +#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i32 +#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i32 +#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i32 +#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i32 +#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i32 +#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i32 +#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i32 +#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i32 +#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i32 +#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i32 +#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i32 +#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i32 +#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i32 +#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32 +#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32 +#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec +#endif + +#if UINTPTR_MAX == UINT32_MAX +# define PTR i32 +# define NAT TCGv_i32 +#else +# define PTR i64 +# define NAT TCGv_i64 +#endif + +static inline void tcg_gen_ld_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, intptr_t o) +{ + glue(tcg_gen_ld_,PTR)(tcg_ctx, (NAT)r, a, o); +} + +static inline void tcg_gen_st_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, intptr_t o) +{ + glue(tcg_gen_st_, PTR)(tcg_ctx, (NAT)r, a, o); +} + +static inline void tcg_gen_discard_ptr(TCGContext *tcg_ctx, TCGv_ptr a) +{ + glue(tcg_gen_discard_,PTR)(tcg_ctx, (NAT)a); +} + +static inline void tcg_gen_add_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, TCGv_ptr b) +{ + glue(tcg_gen_add_,PTR)(tcg_ctx, (NAT)r, (NAT)a, (NAT)b); +} + +static inline void tcg_gen_addi_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_ptr a, intptr_t b) +{ + glue(tcg_gen_addi_,PTR)(tcg_ctx, (NAT)r, (NAT)a, b); +} + +static inline void tcg_gen_brcondi_ptr(TCGContext *tcg_ctx, TCGCond cond, TCGv_ptr a, + intptr_t b, TCGLabel *label) +{ + glue(tcg_gen_brcondi_,PTR)(tcg_ctx, cond, (NAT)a, b, label); +} + +static inline void tcg_gen_ext_i32_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_i32 a) +{ +#if UINTPTR_MAX == UINT32_MAX + tcg_gen_mov_i32(tcg_ctx, (NAT)r, a); +#else + tcg_gen_ext_i32_i64(tcg_ctx, (NAT)r, a); +#endif +} + +static inline void tcg_gen_trunc_i64_ptr(TCGContext *tcg_ctx, TCGv_ptr r, TCGv_i64 a) +{ +#if UINTPTR_MAX == UINT32_MAX + tcg_gen_extrl_i64_i32(tcg_ctx, (NAT)r, a); +#else + tcg_gen_mov_i64(tcg_ctx, (NAT)r, a); +#endif +} + +static inline void tcg_gen_extu_ptr_i64(TCGContext *tcg_ctx, TCGv_i64 r, TCGv_ptr a) +{ +#if UINTPTR_MAX == UINT32_MAX + tcg_gen_extu_i32_i64(tcg_ctx, r, (NAT)a); +#else + tcg_gen_mov_i64(tcg_ctx, r, (NAT)a); +#endif +} + +static inline void tcg_gen_trunc_ptr_i32(TCGContext *tcg_ctx, TCGv_i32 r, TCGv_ptr a) +{ +#if UINTPTR_MAX == UINT32_MAX + tcg_gen_mov_i32(tcg_ctx, r, (NAT)a); +#else + tcg_gen_extrl_i64_i32(tcg_ctx, r, (NAT)a); +#endif +} + +#undef PTR +#undef NAT + +#endif /* TCG_TCG_OP_H */ diff --git a/qemu/tcg/tcg-opc.h b/qemu/include/tcg/tcg-opc.h similarity index 61% rename from qemu/tcg/tcg-opc.h rename to qemu/include/tcg/tcg-opc.h index 9df65bc7..22033870 100644 --- a/qemu/tcg/tcg-opc.h +++ b/qemu/include/tcg/tcg-opc.h @@ -27,15 +27,6 @@ */ /* predefined ops */ -DEF(end, 0, 0, 0, TCG_OPF_NOT_PRESENT) /* must be kept first */ -DEF(nop, 0, 0, 0, TCG_OPF_NOT_PRESENT) -DEF(nop1, 0, 0, 1, TCG_OPF_NOT_PRESENT) -DEF(nop2, 0, 0, 2, TCG_OPF_NOT_PRESENT) -DEF(nop3, 0, 0, 3, TCG_OPF_NOT_PRESENT) - -/* variable number of parameters */ -DEF(nopn, 0, 0, 1, TCG_OPF_NOT_PRESENT) - DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT) DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) @@ -44,18 +35,19 @@ DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT) DEF(br, 0, 0, 1, TCG_OPF_BB_END) -#ifdef _MSC_VER -#define IMPL(X) ((0 && !(X)) ? TCG_OPF_NOT_PRESENT : 0) +#ifndef _MSC_VER +#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0) #else -#define IMPL(X) (__builtin_constant_p(X) && !(X) ? TCG_OPF_NOT_PRESENT : 0) +#define IMPL(X) ((X) <= 0 ? TCG_OPF_NOT_PRESENT : 0) #endif - #if TCG_TARGET_REG_BITS == 32 # define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT #else # define IMPL64 TCG_OPF_64BIT #endif +DEF(mb, 0, 0, 1, 0) + DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT) DEF(setcond_i32, 1, 2, 1, 0) @@ -89,6 +81,9 @@ DEF(sar_i32, 1, 2, 0, 0) DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32)) +DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32)) +DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32)) +DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32)) DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END) @@ -109,11 +104,24 @@ DEF(bswap16_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap16_i32)) DEF(bswap32_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap32_i32)) DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32)) DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32)) +#ifdef _MSC_VER +DEF(andc_i32, 1, 2, 0, 0) +#else DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32)) +#endif + DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32)) DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32)) DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32)) DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32)) +DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32)) +DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32)) + +#ifdef _MSC_VER +DEF(ctpop_i32, 1, 1, 0, 0) +#else +DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32)) +#endif DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) @@ -151,9 +159,18 @@ DEF(sar_i64, 1, 2, 0, IMPL64) DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64)) +DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64)) +DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64)) +DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64)) -DEF(trunc_shr_i32, 1, 1, 1, - IMPL(TCG_TARGET_HAS_trunc_shr_i32) +/* size changing ops */ +DEF(ext_i32_i64, 1, 1, 0, IMPL64) +DEF(extu_i32_i64, 1, 1, 0, IMPL64) +DEF(extrl_i64_i32, 1, 1, 0, + IMPL(TCG_TARGET_HAS_extrl_i64_i32) + | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) +DEF(extrh_i64_i32, 1, 1, 0, + IMPL(TCG_TARGET_HAS_extrh_i64_i32) | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64) @@ -168,42 +185,120 @@ DEF(bswap32_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64)) DEF(bswap64_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64)) DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64)) DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64)) + +#ifdef _MSC_VER +DEF(andc_i64, 1, 2, 0, IMPL64) +#else DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64)) +#endif + DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64)) DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64)) DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64)) DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64)) +DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64)) +DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64)) + +#ifdef _MSC_VER +DEF(ctpop_i64, 1, 1, 0, IMPL64) +#else +DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64)) +#endif DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64)) DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64)) DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64)) DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64)) -DEF(muluh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i64)) -DEF(mulsh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i64)) +DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64)) +DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64)) -/* QEMU specific */ -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS -DEF(debug_insn_start, 0, 0, 2, TCG_OPF_NOT_PRESENT) -#else -DEF(debug_insn_start, 0, 0, 1, TCG_OPF_NOT_PRESENT) -#endif -DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END) -DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END) - -#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2) +#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2) #define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) -DEF(qemu_ld_i32, 1, TLADDR_ARGS, 2, +/* QEMU specific */ +DEF(insn_start, 0, 0, TLADDR_ARGS * TARGET_INSN_START_WORDS, + TCG_OPF_NOT_PRESENT) +DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) +DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) +DEF(goto_ptr, 0, 1, 0, + TCG_OPF_BB_EXIT | TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr)) + +DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 2, +DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 2, +DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) -DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 2, +DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) +/* Host vector support. */ + +#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) + +DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) +DEF(dupi_vec, 1, 0, 1, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) + +DEF(dup_vec, 1, 1, 0, IMPLVEC) +DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32)) + +DEF(ld_vec, 1, 1, 1, IMPLVEC) +DEF(st_vec, 0, 2, 1, IMPLVEC) +DEF(dupm_vec, 1, 1, 1, IMPLVEC) + +DEF(add_vec, 1, 2, 0, IMPLVEC) +DEF(sub_vec, 1, 2, 0, IMPLVEC) +DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec)) +DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec)) +DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec)) +DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) +DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) +DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) +DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) +DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) +DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) +DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) +DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) + +DEF(and_vec, 1, 2, 0, IMPLVEC) +DEF(or_vec, 1, 2, 0, IMPLVEC) +DEF(xor_vec, 1, 2, 0, IMPLVEC) +DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec)) +DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec)) +DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec)) + +DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) +DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) +DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) + +DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) +DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) +DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) + +#ifdef _MSC_VER +DEF(shlv_vec, 1, 2, 0, IMPLVEC) +DEF(shrv_vec, 1, 2, 0, IMPLVEC) +DEF(sarv_vec, 1, 2, 0, IMPLVEC) +#else +DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) +DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) +DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) +#endif + +DEF(cmp_vec, 1, 2, 1, IMPLVEC) + +DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec)) +DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec)) + +DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT) + +#if TCG_TARGET_MAYBE_vec +#include "tcg-target.opc.h" +#endif + #undef TLADDR_ARGS #undef DATA64_ARGS #undef IMPL #undef IMPL64 +#undef IMPLVEC #undef DEF diff --git a/qemu/include/tcg/tcg.h b/qemu/include/tcg/tcg.h new file mode 100644 index 00000000..f9b87452 --- /dev/null +++ b/qemu/include/tcg/tcg.h @@ -0,0 +1,1561 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TCG_H +#define TCG_H + +#include "cpu.h" +#include "exec/memop.h" +#include "exec/tb-context.h" +#include "qemu/bitops.h" +#include "qemu/queue.h" +#include "tcg/tcg-mo.h" +#include "tcg-target.h" +#include "tcg-apple-jit.h" +#include "qemu/int128.h" + +/* XXX: make safe guess about sizes */ +#define MAX_OP_PER_INSTR 266 + +#if HOST_LONG_BITS == 32 +#define MAX_OPC_PARAM_PER_ARG 2 +#else +#define MAX_OPC_PARAM_PER_ARG 1 +#endif +#define MAX_OPC_PARAM_IARGS 6 +#define MAX_OPC_PARAM_OARGS 1 +#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) + +/* A Call op needs up to 4 + 2N parameters on 32-bit archs, + * and up to 4 + N parameters on 64-bit archs + * (N = number of input arguments + output arguments). */ +#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) + +#define CPU_TEMP_BUF_NLONGS 128 + +/* Default target word size to pointer size. */ +#ifndef TCG_TARGET_REG_BITS +# if UINTPTR_MAX == UINT32_MAX +# define TCG_TARGET_REG_BITS 32 +# elif UINTPTR_MAX == UINT64_MAX +# define TCG_TARGET_REG_BITS 64 +# else +# error Unknown pointer size for tcg target +# endif +#endif + +#if TCG_TARGET_REG_BITS == 32 +typedef int32_t tcg_target_long; +typedef uint32_t tcg_target_ulong; +#define TCG_PRIlx PRIx32 +#define TCG_PRIld PRId32 +#elif TCG_TARGET_REG_BITS == 64 +typedef int64_t tcg_target_long; +typedef uint64_t tcg_target_ulong; +#define TCG_PRIlx PRIx64 +#define TCG_PRIld PRId64 +#else +#error unsupported +#endif + +/* Oversized TCG guests make things like MTTCG hard + * as we can't use atomics for cputlb updates. + */ +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS +#define TCG_OVERSIZED_GUEST 1 +#else +#define TCG_OVERSIZED_GUEST 0 +#endif + +#if TCG_TARGET_NB_REGS <= 32 +typedef uint32_t TCGRegSet; +#elif TCG_TARGET_NB_REGS <= 64 +typedef uint64_t TCGRegSet; +#else +#error unsupported +#endif + +#if TCG_TARGET_REG_BITS == 32 +/* Turn some undef macros into false macros. */ +#define TCG_TARGET_HAS_extrl_i64_i32 0 +#define TCG_TARGET_HAS_extrh_i64_i32 0 +#define TCG_TARGET_HAS_div_i64 0 +#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_div2_i64 0 +#define TCG_TARGET_HAS_rot_i64 0 +#define TCG_TARGET_HAS_ext8s_i64 0 +#define TCG_TARGET_HAS_ext16s_i64 0 +#define TCG_TARGET_HAS_ext32s_i64 0 +#define TCG_TARGET_HAS_ext8u_i64 0 +#define TCG_TARGET_HAS_ext16u_i64 0 +#define TCG_TARGET_HAS_ext32u_i64 0 +#define TCG_TARGET_HAS_bswap16_i64 0 +#define TCG_TARGET_HAS_bswap32_i64 0 +#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_neg_i64 0 +#define TCG_TARGET_HAS_not_i64 0 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_clz_i64 0 +#define TCG_TARGET_HAS_ctz_i64 0 +#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_deposit_i64 0 +#define TCG_TARGET_HAS_extract_i64 0 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 +#define TCG_TARGET_HAS_movcond_i64 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 0 +#define TCG_TARGET_HAS_mulsh_i64 0 +/* Turn some undef macros into true macros. */ +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#endif + +#ifndef TCG_TARGET_deposit_i32_valid +#define TCG_TARGET_deposit_i32_valid(ofs, len) 1 +#endif +#ifndef TCG_TARGET_deposit_i64_valid +#define TCG_TARGET_deposit_i64_valid(ofs, len) 1 +#endif +#ifndef TCG_TARGET_extract_i32_valid +#define TCG_TARGET_extract_i32_valid(ofs, len) 1 +#endif +#ifndef TCG_TARGET_extract_i64_valid +#define TCG_TARGET_extract_i64_valid(ofs, len) 1 +#endif + +/* Only one of DIV or DIV2 should be defined. */ +#if defined(TCG_TARGET_HAS_div_i32) +#define TCG_TARGET_HAS_div2_i32 0 +#elif defined(TCG_TARGET_HAS_div2_i32) +#define TCG_TARGET_HAS_div_i32 0 +#define TCG_TARGET_HAS_rem_i32 0 +#endif +#if defined(TCG_TARGET_HAS_div_i64) +#define TCG_TARGET_HAS_div2_i64 0 +#elif defined(TCG_TARGET_HAS_div2_i64) +#define TCG_TARGET_HAS_div_i64 0 +#define TCG_TARGET_HAS_rem_i64 0 +#endif + +/* For 32-bit targets, some sort of unsigned widening multiply is required. */ +#if TCG_TARGET_REG_BITS == 32 \ + && !(defined(TCG_TARGET_HAS_mulu2_i32) \ + || defined(TCG_TARGET_HAS_muluh_i32)) +# error "Missing unsigned widening multiply" +#endif + +#if !defined(TCG_TARGET_HAS_v64) \ + && !defined(TCG_TARGET_HAS_v128) \ + && !defined(TCG_TARGET_HAS_v256) +#define TCG_TARGET_MAYBE_vec 0 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_andc_vec 0 +#define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_shi_vec 0 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 0 +#define TCG_TARGET_HAS_mul_vec 0 +#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_minmax_vec 0 +#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec 0 +#else +#define TCG_TARGET_MAYBE_vec 1 +#endif +#ifndef TCG_TARGET_HAS_v64 +#define TCG_TARGET_HAS_v64 0 +#endif +#ifndef TCG_TARGET_HAS_v128 +#define TCG_TARGET_HAS_v128 0 +#endif +#ifndef TCG_TARGET_HAS_v256 +#define TCG_TARGET_HAS_v256 0 +#endif + +#ifndef TARGET_INSN_START_EXTRA_WORDS +# define TARGET_INSN_START_WORDS 1 +#else +# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS) +#endif + +typedef enum TCGOpcode { +#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, +#include "tcg/tcg-opc.h" +#undef DEF + NB_OPS, +} TCGOpcode; + +#define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r)) +#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r))) +#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) + +#ifndef TCG_TARGET_INSN_UNIT_SIZE +# error "Missing TCG_TARGET_INSN_UNIT_SIZE" +#elif TCG_TARGET_INSN_UNIT_SIZE == 1 +typedef uint8_t tcg_insn_unit; +#elif TCG_TARGET_INSN_UNIT_SIZE == 2 +typedef uint16_t tcg_insn_unit; +#elif TCG_TARGET_INSN_UNIT_SIZE == 4 +typedef uint32_t tcg_insn_unit; +#elif TCG_TARGET_INSN_UNIT_SIZE == 8 +typedef uint64_t tcg_insn_unit; +#else +/* The port better have done this. */ +#endif + + +#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS +# define tcg_debug_assert(X) do { assert(X); } while (0) +#else +#ifndef _MSC_VER +# define tcg_debug_assert(X) \ + do { if (!(X)) { __builtin_unreachable(); } } while (0) +#else +# define tcg_debug_assert(X) +#endif +#endif + +typedef struct TCGRelocation TCGRelocation; +struct TCGRelocation { + QSIMPLEQ_ENTRY(TCGRelocation) next; + tcg_insn_unit *ptr; + intptr_t addend; + int type; +}; + +typedef struct TCGLabel TCGLabel; +struct TCGLabel { + unsigned present : 1; + unsigned has_value : 1; + unsigned id : 14; + unsigned refs : 16; + union { + uintptr_t value; + tcg_insn_unit *value_ptr; + } u; + QSIMPLEQ_HEAD(, TCGRelocation) relocs; + QSIMPLEQ_ENTRY(TCGLabel) next; +}; + +typedef struct TCGPool { + struct TCGPool *next; + int size; + uint8_t QEMU_ALIGN(8, data[0]); +} TCGPool; + +#define TCG_POOL_CHUNK_SIZE 32768 + +#define TCG_MAX_TEMPS 512 +#define TCG_MAX_INSNS 512 + +/* when the size of the arguments of a called function is smaller than + this value, they are statically allocated in the TB stack frame */ +#define TCG_STATIC_CALL_ARGS_SIZE 128 + +typedef enum TCGType { + TCG_TYPE_I32, + TCG_TYPE_I64, + + TCG_TYPE_V64, + TCG_TYPE_V128, + TCG_TYPE_V256, + + TCG_TYPE_COUNT, /* number of different types */ + + /* An alias for the size of the host register. */ +#if TCG_TARGET_REG_BITS == 32 + TCG_TYPE_REG = TCG_TYPE_I32, +#else + TCG_TYPE_REG = TCG_TYPE_I64, +#endif + + /* An alias for the size of the native pointer. */ +#if UINTPTR_MAX == UINT32_MAX + TCG_TYPE_PTR = TCG_TYPE_I32, +#else + TCG_TYPE_PTR = TCG_TYPE_I64, +#endif + + /* An alias for the size of the target "long", aka register. */ +#if TARGET_LONG_BITS == 64 + TCG_TYPE_TL = TCG_TYPE_I64, +#else + TCG_TYPE_TL = TCG_TYPE_I32, +#endif +} TCGType; + +/** + * get_alignment_bits + * @memop: MemOp value + * + * Extract the alignment size from the memop. + */ +static inline unsigned get_alignment_bits(MemOp memop) +{ + unsigned a = memop & MO_AMASK; + + if (a == MO_UNALN) { + /* No alignment required. */ + a = 0; + } else if (a == MO_ALIGN) { + /* A natural alignment requirement. */ + a = memop & MO_SIZE; + } else { + /* A specific alignment requirement. */ + a = a >> MO_ASHIFT; + } + + /* The requested alignment cannot overlap the TLB flags. */ + tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0); + + return a; +} + +typedef tcg_target_ulong TCGArg; + +/* Define type and accessor macros for TCG variables. + + TCG variables are the inputs and outputs of TCG ops, as described + in tcg/README. Target CPU front-end code uses these types to deal + with TCG variables as it emits TCG code via the tcg_gen_* functions. + They come in several flavours: + * TCGv_i32 : 32 bit integer type + * TCGv_i64 : 64 bit integer type + * TCGv_ptr : a host pointer type + * TCGv_vec : a host vector type; the exact size is not exposed + to the CPU front-end code. + * TCGv : an integer type the same size as target_ulong + (an alias for either TCGv_i32 or TCGv_i64) + The compiler's type checking will complain if you mix them + up and pass the wrong sized TCGv to a function. + + Users of tcg_gen_* don't need to know about any of the internal + details of these, and should treat them as opaque types. + You won't be able to look inside them in a debugger either. + + Internal implementation details follow: + + Note that there is no definition of the structs TCGv_i32_d etc anywhere. + This is deliberate, because the values we store in variables of type + TCGv_i32 are not really pointers-to-structures. They're just small + integers, but keeping them in pointer types like this means that the + compiler will complain if you accidentally pass a TCGv_i32 to a + function which takes a TCGv_i64, and so on. Only the internals of + TCG need to care about the actual contents of the types. */ + +typedef struct TCGv_i32_d *TCGv_i32; +typedef struct TCGv_i64_d *TCGv_i64; +typedef struct TCGv_ptr_d *TCGv_ptr; +typedef struct TCGv_vec_d *TCGv_vec; +typedef TCGv_ptr TCGv_env; +#if TARGET_LONG_BITS == 32 +#define TCGv TCGv_i32 +#elif TARGET_LONG_BITS == 64 +#define TCGv TCGv_i64 +#else +#error Unhandled TARGET_LONG_BITS value +#endif + +/* call flags */ +/* Helper does not read globals (either directly or through an exception). It + implies TCG_CALL_NO_WRITE_GLOBALS. */ +#define TCG_CALL_NO_READ_GLOBALS 0x0001 +/* Helper does not write globals */ +#define TCG_CALL_NO_WRITE_GLOBALS 0x0002 +/* Helper can be safely suppressed if the return value is not used. */ +#define TCG_CALL_NO_SIDE_EFFECTS 0x0004 +/* Helper is QEMU_NORETURN. */ +#define TCG_CALL_NO_RETURN 0x0008 + +/* convenience version of most used call flags */ +#define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS +#define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS +#define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS +#define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) +#define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) + +/* Used to align parameters. See the comment before tcgv_i32_temp. */ +#define TCG_CALL_DUMMY_ARG ((TCGArg)0) + +/* Conditions. Note that these are laid out for easy manipulation by + the functions below: + bit 0 is used for inverting; + bit 1 is signed, + bit 2 is unsigned, + bit 3 is used with bit 0 for swapping signed/unsigned. */ +typedef enum { + /* non-signed */ + TCG_COND_NEVER = 0 | 0 | 0 | 0, + TCG_COND_ALWAYS = 0 | 0 | 0 | 1, + TCG_COND_EQ = 8 | 0 | 0 | 0, + TCG_COND_NE = 8 | 0 | 0 | 1, + /* signed */ + TCG_COND_LT = 0 | 0 | 2 | 0, + TCG_COND_GE = 0 | 0 | 2 | 1, + TCG_COND_LE = 8 | 0 | 2 | 0, + TCG_COND_GT = 8 | 0 | 2 | 1, + /* unsigned */ + TCG_COND_LTU = 0 | 4 | 0 | 0, + TCG_COND_GEU = 0 | 4 | 0 | 1, + TCG_COND_LEU = 8 | 4 | 0 | 0, + TCG_COND_GTU = 8 | 4 | 0 | 1, +} TCGCond; + +/* Invert the sense of the comparison. */ +static inline TCGCond tcg_invert_cond(TCGCond c) +{ + return (TCGCond)(c ^ 1); +} + +/* Swap the operands in a comparison. */ +static inline TCGCond tcg_swap_cond(TCGCond c) +{ + return c & 6 ? (TCGCond)(c ^ 9) : c; +} + +/* Create an "unsigned" version of a "signed" comparison. */ +static inline TCGCond tcg_unsigned_cond(TCGCond c) +{ + return c & 2 ? (TCGCond)(c ^ 6) : c; +} + +/* Create a "signed" version of an "unsigned" comparison. */ +static inline TCGCond tcg_signed_cond(TCGCond c) +{ + return c & 4 ? (TCGCond)(c ^ 6) : c; +} + +/* Must a comparison be considered unsigned? */ +static inline bool is_unsigned_cond(TCGCond c) +{ + return (c & 4) != 0; +} + +/* Create a "high" version of a double-word comparison. + This removes equality from a LTE or GTE comparison. */ +static inline TCGCond tcg_high_cond(TCGCond c) +{ + switch (c) { + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GEU: + case TCG_COND_LEU: + return (TCGCond)(c ^ 8); + default: + return c; + } +} + +typedef enum TCGTempVal { + TEMP_VAL_DEAD, + TEMP_VAL_REG, + TEMP_VAL_MEM, + TEMP_VAL_CONST, +} TCGTempVal; + +typedef struct TCGTemp { + TCGReg reg:8; + TCGTempVal val_type:8; + TCGType base_type:8; + TCGType type:8; + unsigned int fixed_reg:1; + unsigned int indirect_reg:1; + unsigned int indirect_base:1; + unsigned int mem_coherent:1; + unsigned int mem_allocated:1; + /* If true, the temp is saved across both basic blocks and + translation blocks. */ + unsigned int temp_global:1; + /* If true, the temp is saved across basic blocks but dead + at the end of translation blocks. If false, the temp is + dead at the end of basic blocks. */ + unsigned int temp_local:1; + unsigned int temp_allocated:1; + + tcg_target_long val; + struct TCGTemp *mem_base; + intptr_t mem_offset; + const char *name; + + /* Pass-specific information that can be stored for a temporary. + One word worth of integer data, and one pointer to data + allocated separately. */ + uintptr_t state; + void *state_ptr; +} TCGTemp; + +typedef struct TCGContext TCGContext; + +typedef struct TCGTempSet { + unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; +} TCGTempSet; + +/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding, + this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands. + There are never more than 2 outputs, which means that we can store all + dead + sync data within 16 bits. */ +#define DEAD_ARG 4 +#define SYNC_ARG 1 +typedef uint16_t TCGLifeData; + +/* The layout here is designed to avoid a bitfield crossing of + a 32-bit boundary, which would cause GCC to add extra padding. */ +typedef struct TCGOp { +#ifdef _MSC_VER + uint32_t opc : 8; /* 8 */ +#else + TCGOpcode opc : 8; /* 8 */ +#endif + + /* Parameters for this opcode. See below. */ + unsigned param1 : 4; /* 12 */ + unsigned param2 : 4; /* 16 */ + + /* Lifetime data of the operands. */ + unsigned life : 16; /* 32 */ + + /* Next and previous opcodes. */ + QTAILQ_ENTRY(TCGOp) link; + + /* Arguments for the opcode. */ + TCGArg args[MAX_OPC_PARAM]; + + /* Register preferences for the output(s). */ + TCGRegSet output_pref[2]; +} TCGOp; + +#define TCGOP_CALLI(X) (X)->param1 +#define TCGOP_CALLO(X) (X)->param2 + +#define TCGOP_VECL(X) (X)->param1 +#define TCGOP_VECE(X) (X)->param2 + +/* Make sure operands fit in the bitfields above. */ +QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); + +typedef struct TCGProfile { + int64_t cpu_exec_time; + int64_t tb_count1; + int64_t tb_count; + int64_t op_count; /* total insn count */ + int op_count_max; /* max insn per TB */ + int temp_count_max; + int64_t temp_count; + int64_t del_op_count; + int64_t code_in_len; + int64_t code_out_len; + int64_t search_out_len; + int64_t interm_time; + int64_t code_time; + int64_t la_time; + int64_t opt_time; + int64_t restore_count; + int64_t restore_time; + int64_t table_op_count[NB_OPS]; +} TCGProfile; + +/* + * We divide code_gen_buffer into equally-sized "regions" that TCG threads + * dynamically allocate from as demand dictates. Given appropriate region + * sizing, this minimizes flushes even when some TCG threads generate a lot + * more code than others. + */ +typedef struct TCGOpDef TCGOpDef; +struct tcg_region_state { + /* fields set at init time */ + void *start; + void *start_aligned; + void *end; + size_t n; + size_t size; /* size of one region */ + size_t stride; /* .size + guard size */ + + size_t current; /* current region index */ + size_t agg_size_full; /* aggregate size of full regions */ +}; + +struct TCGContext { + uint8_t *pool_cur, *pool_end; + TCGPool *pool_first, *pool_current, *pool_first_large; + int nb_labels; + int nb_globals; + int nb_temps; + int nb_indirects; + int nb_ops; + + /* goto_tb support */ + tcg_insn_unit *code_buf; + uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */ + uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */ + uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */ + + TCGRegSet reserved_regs; + uint32_t tb_cflags; /* cflags of the current TB */ + intptr_t current_frame_offset; + intptr_t frame_start; + intptr_t frame_end; + TCGTemp *frame_temp; + + tcg_insn_unit *code_ptr; + +#ifdef CONFIG_DEBUG_TCG + int temps_in_use; + int goto_tb_issue_mask; + const TCGOpcode *vecop_list; +#endif + + /* Code generation. Note that we specifically do not use tcg_insn_unit + here, because there's too much arithmetic throughout that relies + on addition and subtraction working on bytes. Rely on the GCC + extension that allows arithmetic on void*. */ + void *code_gen_prologue; + void *code_gen_epilogue; + void *code_gen_buffer; + size_t code_gen_buffer_size; + void *code_gen_ptr; + void *data_gen_ptr; + + /* Threshold to flush the translated code buffer. */ + void *code_gen_highwater; + +#ifdef HAVE_PTHREAD_JIT_PROTECT + /* + * True for X, False for W. + * + * Source: https://developer.apple.com/documentation/apple_silicon/porting_just-in-time_compilers_to_apple_silicon?language=objc + */ + bool code_gen_locked; +#endif + + size_t tb_phys_invalidate_count; + + /* Track which vCPU triggers events */ + CPUState *cpu; /* *_trans */ + + /* These structures are private to tcg-target.inc.c. */ +#ifdef TCG_TARGET_NEED_LDST_LABELS + QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels; +#endif +#ifdef TCG_TARGET_NEED_POOL_LABELS + struct TCGLabelPoolData *pool_labels; +#endif + + TCGLabel *exitreq_label; + + TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; + TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ + + QTAILQ_HEAD(, TCGOp) ops, free_ops; + QSIMPLEQ_HEAD(, TCGLabel) labels; + + /* Tells which temporary holds a given register. + It does not take into account fixed registers */ + TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; + + uint16_t gen_insn_end_off[TCG_MAX_INSNS]; + target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; + + /* qemu/accel/tcg/translate-all.c */ + TBContext tb_ctx; + /* qemu/include/exec/gen-icount.h */ + TCGOp *icount_start_insn; + /* qemu/tcg/tcg.c */ + GHashTable *helper_table; + TCGv_ptr cpu_env; + struct tcg_region_state region; + GTree *tree; + TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT]; + TCGRegSet tcg_target_call_clobber_regs; + int *indirect_reg_alloc_order; + struct jit_code_entry *one_entry; + /* qemu/tcg/tcg-common.c */ + TCGOpDef *tcg_op_defs; + + // Unicorn engine variables + struct uc_struct *uc; + + /* qemu/target/i386/translate.c: global register indexes */ + TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2; + TCGv_i32 cpu_cc_op; + TCGv cpu_regs[56]; // 16 GRP for x64 + /* only x86 need cpu_seg_base[]. */ + TCGv cpu_seg_base[6]; + TCGv_i64 cpu_bndl[4]; + TCGv_i64 cpu_bndu[4]; + + /* qemu/tcg/i386/tcg-target.inc.c */ + void *tb_ret_addr; + + /* target/riscv/translate.c */ + TCGv cpu_gpr[32], cpu_pc; // also target/mips/translate.c + TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ + TCGv load_res; + TCGv load_val; + + // target/arm/translate.c + /* We reuse the same 64-bit temporaries for efficiency. */ + TCGv_i64 cpu_V0, cpu_V1, cpu_M0; + TCGv_i32 cpu_R[16]; + TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF; + TCGv_i64 cpu_exclusive_addr; + TCGv_i64 cpu_exclusive_val; + + // target/arm/translate-a64.c + TCGv_i64 cpu_X[32]; + TCGv_i64 cpu_pc_arm64; + /* Load/store exclusive handling */ + TCGv_i64 cpu_exclusive_high; + + // target/mips/translate.c + // #define MIPS_DSP_ACC 4 + // TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC]; + TCGv cpu_HI[4], cpu_LO[4]; + TCGv cpu_dspctrl, btarget, bcond; + TCGv cpu_lladdr, cpu_llval; + TCGv_i32 hflags; + TCGv_i32 fpu_fcr0, fpu_fcr31; + TCGv_i64 fpu_f64[32]; + TCGv_i64 msa_wr_d[64]; +#if defined(TARGET_MIPS64) + /* Upper halves of R5900's 128-bit registers: MMRs (multimedia registers) */ + TCGv_i64 cpu_mmr[32]; +#endif +#if !defined(TARGET_MIPS64) + /* MXU registers */ + // #define NUMBER_OF_MXU_REGISTERS 16 + // TCGv mxu_gpr[NUMBER_OF_MXU_REGISTERS - 1]; + TCGv mxu_gpr[16 - 1]; + TCGv mxu_CR; +#endif + + // target/sparc/translate.c + /* global register indexes */ + TCGv_ptr cpu_regwptr; + // TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst; + // TCGv_i32 cpu_cc_op; + TCGv_i32 cpu_psr; + TCGv cpu_fsr, cpu_npc; + // TCGv cpu_regs[32]; + TCGv cpu_y; + TCGv cpu_tbr; + TCGv cpu_cond; +#ifdef TARGET_SPARC64 + TCGv_i32 cpu_xcc, cpu_fprs; + TCGv cpu_gsr; + TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr; + TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver; +#else + TCGv cpu_wim; +#endif + /* Floating point registers */ + // TCGv_i64 cpu_fpr[TARGET_DPREGS]; + + // target/m68k/translate.c + TCGv_i32 cpu_halted; + TCGv_i32 cpu_exception_index; + char cpu_reg_names[2 * 8 * 3 + 5 * 4]; + TCGv cpu_dregs[8]; + TCGv cpu_aregs[8]; + TCGv_i64 cpu_macc[4]; + TCGv NULL_QREG; + /* Used to distinguish stores from bad addressing modes. */ + TCGv store_dummy; +}; + +static inline size_t temp_idx(TCGContext *tcg_ctx, TCGTemp *ts) +{ + ptrdiff_t n = ts - tcg_ctx->temps; + tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps); + return n; +} + +static inline TCGArg temp_arg(TCGTemp *ts) +{ + return (uintptr_t)ts; +} + +static inline TCGTemp *arg_temp(TCGArg a) +{ + return (TCGTemp *)(uintptr_t)a; +} + +/* Using the offset of a temporary, relative to TCGContext, rather than + its index means that we don't use 0. That leaves offset 0 free for + a NULL representation without having to leave index 0 unused. */ +static inline TCGTemp *tcgv_i32_temp(TCGContext *tcg_ctx, TCGv_i32 v) +{ + uintptr_t o = (uintptr_t)v; + TCGTemp *t = (TCGTemp *)((char *)tcg_ctx + o); + tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(tcg_ctx, t)]) == o); + return t; +} + +static inline TCGTemp *tcgv_i64_temp(TCGContext *tcg_ctx, TCGv_i64 v) +{ + return tcgv_i32_temp(tcg_ctx, (TCGv_i32)v); +} + +static inline TCGTemp *tcgv_ptr_temp(TCGContext *tcg_ctx, TCGv_ptr v) +{ + return tcgv_i32_temp(tcg_ctx, (TCGv_i32)v); +} + +static inline TCGTemp *tcgv_vec_temp(TCGContext *tcg_ctx, TCGv_vec v) +{ + return tcgv_i32_temp(tcg_ctx, (TCGv_i32)v); +} + +static inline TCGArg tcgv_i32_arg(TCGContext *tcg_ctx, TCGv_i32 v) +{ + return temp_arg(tcgv_i32_temp(tcg_ctx, v)); +} + +static inline TCGArg tcgv_i64_arg(TCGContext *tcg_ctx, TCGv_i64 v) +{ + return temp_arg(tcgv_i64_temp(tcg_ctx, v)); +} + +static inline TCGArg tcgv_ptr_arg(TCGContext *tcg_ctx, TCGv_ptr v) +{ + return temp_arg(tcgv_ptr_temp(tcg_ctx, v)); +} + +static inline TCGArg tcgv_vec_arg(TCGContext *tcg_ctx, TCGv_vec v) +{ + return temp_arg(tcgv_vec_temp(tcg_ctx, v)); +} + +static inline TCGv_i32 temp_tcgv_i32(TCGContext *tcg_ctx, TCGTemp *t) +{ + (void)temp_idx(tcg_ctx, t); /* trigger embedded assert */ + return (TCGv_i32)((char *)t - (char *)tcg_ctx); +} + +static inline TCGv_i64 temp_tcgv_i64(TCGContext *tcg_ctx, TCGTemp *t) +{ + return (TCGv_i64)temp_tcgv_i32(tcg_ctx, t); +} + +static inline TCGv_ptr temp_tcgv_ptr(TCGContext *tcg_ctx, TCGTemp *t) +{ + return (TCGv_ptr)temp_tcgv_i32(tcg_ctx, t); +} + +static inline TCGv_vec temp_tcgv_vec(TCGContext *tcg_ctx, TCGTemp *t) +{ + return (TCGv_vec)temp_tcgv_i32(tcg_ctx, t); +} + +#if TCG_TARGET_REG_BITS == 32 +static inline TCGv_i32 TCGV_LOW(TCGContext *tcg_ctx, TCGv_i64 t) +{ + return temp_tcgv_i32(tcg_ctx, tcgv_i64_temp(tcg_ctx, t)); +} + +static inline TCGv_i32 TCGV_HIGH(TCGContext *tcg_ctx, TCGv_i64 t) +{ + return temp_tcgv_i32(tcg_ctx, tcgv_i64_temp(tcg_ctx, t) + 1); +} +#endif + +static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) +{ + op->args[arg] = v; +} + +static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v) +{ +#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS + tcg_set_insn_param(op, arg, v); +#else + tcg_set_insn_param(op, arg * 2, v); + tcg_set_insn_param(op, arg * 2 + 1, v >> 32); +#endif +} + +/* The last op that was emitted. */ +static inline TCGOp *tcg_last_op(TCGContext *tcg_ctx) +{ + return QTAILQ_LAST(&tcg_ctx->ops); +} + +/* Test for whether to terminate the TB for using too many opcodes. */ +static inline bool tcg_op_buf_full(TCGContext *tcg_ctx) +{ + /* This is not a hard limit, it merely stops translation when + * we have produced "enough" opcodes. We want to limit TB size + * such that a RISC host can reasonably use a 16-bit signed + * branch within the TB. We also need to be mindful of the + * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[] + * and TCGContext.gen_insn_end_off[]. + */ + return tcg_ctx->nb_ops >= 4000; +} + +/* pool based memory allocation */ + +/* user-mode: mmap_lock must be held for tcg_malloc_internal. */ +void *tcg_malloc_internal(TCGContext *s, int size); +void tcg_pool_reset(TCGContext *s); +TranslationBlock *tcg_tb_alloc(TCGContext *s); + +void tcg_region_init(TCGContext *tcg_ctx); +void tcg_region_reset_all(TCGContext *tcg_ctx); + +size_t tcg_code_size(TCGContext *tcg_ctx); +size_t tcg_code_capacity(TCGContext *tcg_ctx); + +void tcg_tb_insert(TCGContext *tcg_ctx, TranslationBlock *tb); +void tcg_tb_remove(TCGContext *tcg_ctx, TranslationBlock *tb); +size_t tcg_tb_phys_invalidate_count(TCGContext *tcg_ctx); +TranslationBlock *tcg_tb_lookup(TCGContext *tcg_ctx, uintptr_t tc_ptr); +/* glib gtree: + * gboolean (*GTraverseFunc) (gpointer key, gpointer value, gpointer data); +*/ +typedef int (*GTraverseFunc) (void *key, void *value, void *data); +void tcg_tb_foreach(TCGContext *tcg_ctx, GTraverseFunc func, gpointer user_data); +size_t tcg_nb_tbs(TCGContext *tcg_ctx); + +/* user-mode: Called with mmap_lock held. */ +static inline void *tcg_malloc(TCGContext *tcg_ctx, int size) +{ + TCGContext *s = tcg_ctx; + uint8_t *ptr, *ptr_end; + + /* ??? This is a weak placeholder for minimum malloc alignment. */ + size = QEMU_ALIGN_UP(size, 8); + + ptr = s->pool_cur; + ptr_end = ptr + size; + if (unlikely(ptr_end > s->pool_end)) { + return tcg_malloc_internal(tcg_ctx, size); + } else { + s->pool_cur = ptr_end; + return ptr; + } +} + +void tcg_context_init(TCGContext *s); +void tcg_register_thread(void); +void tcg_prologue_init(TCGContext *s); +void tcg_func_start(TCGContext *s); + +int tcg_gen_code(TCGContext *s, TranslationBlock *tb); + +void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); + +TCGTemp *tcg_global_mem_new_internal(TCGContext *tcg_ctx, TCGType, TCGv_ptr, + intptr_t, const char *); +TCGTemp *tcg_temp_new_internal(TCGContext *tcg_ctx, TCGType, bool); +void tcg_temp_free_internal(TCGContext *tcg_ctx, TCGTemp *); +TCGv_vec tcg_temp_new_vec(TCGContext *tcg_ctx, TCGType type); +TCGv_vec tcg_temp_new_vec_matching(TCGContext *tcg_ctx, TCGv_vec match); + +static inline void tcg_temp_free_i32(TCGContext *tcg_ctx, TCGv_i32 arg) +{ + tcg_temp_free_internal(tcg_ctx, tcgv_i32_temp(tcg_ctx, arg)); +} + +static inline void tcg_temp_free_i64(TCGContext *tcg_ctx, TCGv_i64 arg) +{ + tcg_temp_free_internal(tcg_ctx, tcgv_i64_temp(tcg_ctx, arg)); +} + +static inline void tcg_temp_free_ptr(TCGContext *tcg_ctx, TCGv_ptr arg) +{ + tcg_temp_free_internal(tcg_ctx, tcgv_ptr_temp(tcg_ctx, arg)); +} + +static inline void tcg_temp_free_vec(TCGContext *tcg_ctx, TCGv_vec arg) +{ + tcg_temp_free_internal(tcg_ctx, tcgv_vec_temp(tcg_ctx, arg)); +} + +static inline TCGv_i32 tcg_global_mem_new_i32(TCGContext *tcg_ctx, TCGv_ptr reg, intptr_t offset, + const char *name) +{ + TCGTemp *t = tcg_global_mem_new_internal(tcg_ctx, TCG_TYPE_I32, reg, offset, name); + return temp_tcgv_i32(tcg_ctx, t); +} + +static inline TCGv_i32 tcg_temp_new_i32(TCGContext *tcg_ctx) +{ + TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I32, false); + return temp_tcgv_i32(tcg_ctx, t); +} + +static inline TCGv_i32 tcg_temp_local_new_i32(TCGContext *tcg_ctx) +{ + TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I32, true); + return temp_tcgv_i32(tcg_ctx, t); +} + +static inline TCGv_i64 tcg_global_mem_new_i64(TCGContext *tcg_ctx, TCGv_ptr reg, intptr_t offset, + const char *name) +{ + TCGTemp *t = tcg_global_mem_new_internal(tcg_ctx, TCG_TYPE_I64, reg, offset, name); + return temp_tcgv_i64(tcg_ctx, t); +} + +static inline TCGv_i64 tcg_temp_new_i64(TCGContext *tcg_ctx) +{ + TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I64, false); + return temp_tcgv_i64(tcg_ctx, t); +} + +static inline TCGv_i64 tcg_temp_local_new_i64(TCGContext *tcg_ctx) +{ + TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_I64, true); + return temp_tcgv_i64(tcg_ctx, t); +} + +static inline TCGv_ptr tcg_global_mem_new_ptr(TCGContext *tcg_ctx, TCGv_ptr reg, intptr_t offset, + const char *name) +{ + TCGTemp *t = tcg_global_mem_new_internal(tcg_ctx, TCG_TYPE_PTR, reg, offset, name); + return temp_tcgv_ptr(tcg_ctx, t); +} + +static inline TCGv_ptr tcg_temp_new_ptr(TCGContext *tcg_ctx) +{ + TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_PTR, false); + return temp_tcgv_ptr(tcg_ctx, t); +} + +static inline TCGv_ptr tcg_temp_local_new_ptr(TCGContext *tcg_ctx) +{ + TCGTemp *t = tcg_temp_new_internal(tcg_ctx, TCG_TYPE_PTR, true); + return temp_tcgv_ptr(tcg_ctx, t); +} + +#if defined(CONFIG_DEBUG_TCG) +/* If you call tcg_clear_temp_count() at the start of a section of + * code which is not supposed to leak any TCG temporaries, then + * calling tcg_check_temp_count() at the end of the section will + * return 1 if the section did in fact leak a temporary. + */ +void tcg_clear_temp_count(void); +int tcg_check_temp_count(void); +#else +#define tcg_clear_temp_count() do { } while (0) +#define tcg_check_temp_count() 0 +#endif + +int64_t tcg_cpu_exec_time(void); + +#define TCG_CT_ALIAS 0x80 +#define TCG_CT_IALIAS 0x40 +#define TCG_CT_NEWREG 0x20 /* output requires a new register */ +#define TCG_CT_REG 0x01 +#define TCG_CT_CONST 0x02 /* any constant of register size */ + +typedef struct TCGArgConstraint { + uint16_t ct; + uint8_t alias_index; + union { + TCGRegSet regs; + } u; +} TCGArgConstraint; + +#define TCG_MAX_OP_ARGS 16 + +/* Bits for TCGOpDef->flags, 8 bits available. */ +enum { + /* Instruction exits the translation block. */ + TCG_OPF_BB_EXIT = 0x01, + /* Instruction defines the end of a basic block. */ + TCG_OPF_BB_END = 0x02, + /* Instruction clobbers call registers and potentially update globals. */ + TCG_OPF_CALL_CLOBBER = 0x04, + /* Instruction has side effects: it cannot be removed if its outputs + are not used, and might trigger exceptions. */ + TCG_OPF_SIDE_EFFECTS = 0x08, + /* Instruction operands are 64-bits (otherwise 32-bits). */ + TCG_OPF_64BIT = 0x10, + /* Instruction is optional and not implemented by the host, or insn + is generic and should not be implemened by the host. */ + TCG_OPF_NOT_PRESENT = 0x20, + /* Instruction operands are vectors. */ + TCG_OPF_VECTOR = 0x40, +}; + +typedef struct TCGOpDef { + const char *name; + uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; + uint8_t flags; + TCGArgConstraint *args_ct; + int *sorted_args; +#if defined(CONFIG_DEBUG_TCG) + int used; +#endif +} TCGOpDef; + +typedef struct TCGTargetOpDef { + TCGOpcode op; + const char *args_ct_str[TCG_MAX_OP_ARGS]; +} TCGTargetOpDef; + +#ifndef NDEBUG +#define tcg_abort() \ +do {\ + fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ + abort();\ +} while (0) +#else +#define tcg_abort() abort() +#endif + +bool tcg_op_supported(TCGOpcode op); + +void tcg_gen_callN(TCGContext *tcg_ctx, void *func, TCGTemp *ret, int nargs, TCGTemp **args); + +TCGOp *tcg_emit_op(TCGContext *tcg_ctx, TCGOpcode opc); +void tcg_op_remove(TCGContext *s, TCGOp *op); +TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc); +TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc); + +void tcg_optimize(TCGContext *s); + +TCGv_i32 tcg_const_i32(TCGContext *tcg_ctx, int32_t val); +TCGv_i64 tcg_const_i64(TCGContext *tcg_ctx, int64_t val); +TCGv_i32 tcg_const_local_i32(TCGContext *tcg_ctx, int32_t val); +TCGv_i64 tcg_const_local_i64(TCGContext *tcg_ctx, int64_t val); +TCGv_vec tcg_const_zeros_vec(TCGContext *tcg_ctx, TCGType); +TCGv_vec tcg_const_ones_vec(TCGContext *tcg_ctx, TCGType); +TCGv_vec tcg_const_zeros_vec_matching(TCGContext *tcg_ctx, TCGv_vec); +TCGv_vec tcg_const_ones_vec_matching(TCGContext *tcg_ctx, TCGv_vec); + +#if UINTPTR_MAX == UINT32_MAX +# define tcg_const_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_i32(tcg_ctx, (intptr_t)(x))) +# define tcg_const_local_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_local_i32(tcg_ctx, (intptr_t)(x))) +#else +# define tcg_const_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_i64(tcg_ctx, (intptr_t)(x))) +# define tcg_const_local_ptr(tcg_ctx, x) ((TCGv_ptr)tcg_const_local_i64(tcg_ctx, (intptr_t)(x))) +#endif + +TCGLabel *gen_new_label(TCGContext *tcg_ctx); + +/** + * label_arg + * @l: label + * + * Encode a label for storage in the TCG opcode stream. + */ + +static inline TCGArg label_arg(TCGLabel *l) +{ + return (uintptr_t)l; +} + +/** + * arg_label + * @i: value + * + * The opposite of label_arg. Retrieve a label from the + * encoding of the TCG opcode stream. + */ + +static inline TCGLabel *arg_label(TCGArg i) +{ + return (TCGLabel *)(uintptr_t)i; +} + +/** + * tcg_ptr_byte_diff + * @a, @b: addresses to be differenced + * + * There are many places within the TCG backends where we need a byte + * difference between two pointers. While this can be accomplished + * with local casting, it's easy to get wrong -- especially if one is + * concerned with the signedness of the result. + * + * This version relies on GCC's void pointer arithmetic to get the + * correct result. + */ + +static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b) +{ + return (char *)a - (char *)b; +} + +/** + * tcg_pcrel_diff + * @s: the tcg context + * @target: address of the target + * + * Produce a pc-relative difference, from the current code_ptr + * to the destination address. + */ + +static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target) +{ + return tcg_ptr_byte_diff(target, s->code_ptr); +} + +/** + * tcg_current_code_size + * @s: the tcg context + * + * Compute the current code size within the translation block. + * This is used to fill in qemu's data structures for goto_tb. + */ + +static inline size_t tcg_current_code_size(TCGContext *s) +{ + return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); +} + +/* Combine the MemOp and mmu_idx parameters into a single value. */ +typedef uint32_t TCGMemOpIdx; + +/** + * make_memop_idx + * @op: memory operation + * @idx: mmu index + * + * Encode these values into a single parameter. + */ +static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) +{ + tcg_debug_assert(idx <= 15); + return (op << 4) | idx; +} + +/** + * get_memop + * @oi: combined op/idx parameter + * + * Extract the memory operation from the combined value. + */ +static inline MemOp get_memop(TCGMemOpIdx oi) +{ + return oi >> 4; +} + +/** + * get_mmuidx + * @oi: combined op/idx parameter + * + * Extract the mmu index from the combined value. + */ +static inline unsigned get_mmuidx(TCGMemOpIdx oi) +{ + return oi & 15; +} + +/** + * tcg_qemu_tb_exec: + * @env: pointer to CPUArchState for the CPU + * @tb_ptr: address of generated code for the TB to execute + * + * Start executing code from a given translation block. + * Where translation blocks have been linked, execution + * may proceed from the given TB into successive ones. + * Control eventually returns only when some action is needed + * from the top-level loop: either control must pass to a TB + * which has not yet been directly linked, or an asynchronous + * event such as an interrupt needs handling. + * + * Return: The return value is the value passed to the corresponding + * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. + * The value is either zero or a 4-byte aligned pointer to that TB combined + * with additional information in its two least significant bits. The + * additional information is encoded as follows: + * 0, 1: the link between this TB and the next is via the specified + * TB index (0 or 1). That is, we left the TB via (the equivalent + * of) "goto_tb ". The main loop uses this to determine + * how to link the TB just executed to the next. + * 2: we are using instruction counting code generation, and we + * did not start executing this TB because the instruction counter + * would hit zero midway through it. In this case the pointer + * returned is the TB we were about to execute, and the caller must + * arrange to execute the remaining count of instructions. + * 3: we stopped because the CPU's exit_request flag was set + * (usually meaning that there is an interrupt that needs to be + * handled). The pointer returned is the TB we were about to execute + * when we noticed the pending exit request. + * + * If the bottom two bits indicate an exit-via-index then the CPU + * state is correctly synchronised and ready for execution of the next + * TB (and in particular the guest PC is the address to execute next). + * Otherwise, we gave up on execution of this TB before it started, and + * the caller must fix up the CPU state by calling the CPU's + * synchronize_from_tb() method with the TB pointer we return (falling + * back to calling the CPU's set_pc method with tb->pb if no + * synchronize_from_tb() method exists). + * + * Note that TCG targets may use a different definition of tcg_qemu_tb_exec + * to this default (which just calls the prologue.code emitted by + * tcg_target_qemu_prologue()). + */ +#define TB_EXIT_MASK 3 +#define TB_EXIT_IDX0 0 +#define TB_EXIT_IDX1 1 +#define TB_EXIT_IDXMAX 1 +#define TB_EXIT_REQUESTED 3 + +#ifdef HAVE_TCG_QEMU_TB_EXEC +uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr); +#else +# define tcg_qemu_tb_exec(env, tb_ptr) \ + ((uintptr_t (*)(void *, void *))env->uc->tcg_ctx->code_gen_prologue)(env, tb_ptr) +#endif + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size); + +#if TCG_TARGET_MAYBE_vec +/* Return zero if the tuple (opc, type, vece) is unsupportable; + return > 0 if it is directly supportable; + return < 0 if we must call tcg_expand_vec_op. */ +int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned); +#else +static inline int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode o, TCGType t, unsigned ve) +{ + return 0; +} +#endif + +/* Expand the tuple (opc, type, vece) on the given arguments. */ +void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode, TCGType, unsigned, TCGArg, ...); + +/* Replicate a constant C accoring to the log2 of the element size. */ +uint64_t dup_const_func(unsigned vece, uint64_t c); + +#ifndef _MSC_VER +#define dup_const(VECE, C) \ + (__builtin_constant_p(VECE) \ + ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \ + : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \ + : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \ + : dup_const_func(VECE, C)) \ + : dup_const_func(VECE, C)) +#else +#define dup_const(VECE, C) dup_const_func(VECE, C) +#endif + + +/* + * Memory helpers that will be used by TCG generated code. + */ +/* Value zero-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); + +/* Value sign-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); + +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + TCGMemOpIdx oi, uintptr_t retaddr); + +/* Temporary aliases until backends are converted. */ +#ifdef TARGET_WORDS_BIGENDIAN +# define helper_ret_ldsw_mmu helper_be_ldsw_mmu +# define helper_ret_lduw_mmu helper_be_lduw_mmu +# define helper_ret_ldsl_mmu helper_be_ldsl_mmu +# define helper_ret_ldul_mmu helper_be_ldul_mmu +# define helper_ret_ldl_mmu helper_be_ldul_mmu +# define helper_ret_ldq_mmu helper_be_ldq_mmu +# define helper_ret_stw_mmu helper_be_stw_mmu +# define helper_ret_stl_mmu helper_be_stl_mmu +# define helper_ret_stq_mmu helper_be_stq_mmu +#else +# define helper_ret_ldsw_mmu helper_le_ldsw_mmu +# define helper_ret_lduw_mmu helper_le_lduw_mmu +# define helper_ret_ldsl_mmu helper_le_ldsl_mmu +# define helper_ret_ldul_mmu helper_le_ldul_mmu +# define helper_ret_ldl_mmu helper_le_ldul_mmu +# define helper_ret_ldq_mmu helper_le_ldq_mmu +# define helper_ret_stw_mmu helper_le_stw_mmu +# define helper_ret_stl_mmu helper_le_stl_mmu +# define helper_ret_stq_mmu helper_le_stq_mmu +#endif + +uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); +uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + TCGMemOpIdx oi, uintptr_t retaddr); + +#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ +TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \ + (CPUArchState *env, target_ulong addr, TYPE val, \ + TCGMemOpIdx oi, uintptr_t retaddr); + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) +#else +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) +#endif + +GEN_ATOMIC_HELPER_ALL(fetch_add) +GEN_ATOMIC_HELPER_ALL(fetch_sub) +GEN_ATOMIC_HELPER_ALL(fetch_and) +GEN_ATOMIC_HELPER_ALL(fetch_or) +GEN_ATOMIC_HELPER_ALL(fetch_xor) +GEN_ATOMIC_HELPER_ALL(fetch_smin) +GEN_ATOMIC_HELPER_ALL(fetch_umin) +GEN_ATOMIC_HELPER_ALL(fetch_smax) +GEN_ATOMIC_HELPER_ALL(fetch_umax) + +GEN_ATOMIC_HELPER_ALL(add_fetch) +GEN_ATOMIC_HELPER_ALL(sub_fetch) +GEN_ATOMIC_HELPER_ALL(and_fetch) +GEN_ATOMIC_HELPER_ALL(or_fetch) +GEN_ATOMIC_HELPER_ALL(xor_fetch) +GEN_ATOMIC_HELPER_ALL(smin_fetch) +GEN_ATOMIC_HELPER_ALL(umin_fetch) +GEN_ATOMIC_HELPER_ALL(smax_fetch) +GEN_ATOMIC_HELPER_ALL(umax_fetch) + +GEN_ATOMIC_HELPER_ALL(xchg) + +#undef GEN_ATOMIC_HELPER_ALL +#undef GEN_ATOMIC_HELPER + +/* + * These aren't really a "proper" helpers because TCG cannot manage Int128. + * However, use the same format as the others, for use by the backends. + * + * The cmpxchg functions are only defined if HAVE_CMPXCHG128; + * the ld/st functions are only defined if HAVE_ATOMIC128, + * as defined by . + */ +Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + TCGMemOpIdx oi, uintptr_t retaddr); +Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + TCGMemOpIdx oi, uintptr_t retaddr); + +Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, + TCGMemOpIdx oi, uintptr_t retaddr); +void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, + TCGMemOpIdx oi, uintptr_t retaddr); + +#ifdef CONFIG_DEBUG_TCG +void tcg_assert_listed_vecop(TCGOpcode); +#else +static inline void tcg_assert_listed_vecop(TCGOpcode op) { } +#endif + +static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) +{ +#ifdef CONFIG_DEBUG_TCG + const TCGOpcode *o = tcg_ctx->vecop_list; + tcg_ctx->vecop_list = n; + return o; +#else + return NULL; +#endif +} + +bool tcg_can_emit_vecop_list(TCGContext *tcg_ctx, const TCGOpcode *, TCGType, unsigned); + +void check_exit_request(TCGContext *tcg_ctx); + +void tcg_dump_ops(TCGContext *s, bool have_prefs, const char *headline); + +struct jit_code_entry { + struct jit_code_entry *next_entry; + struct jit_code_entry *prev_entry; + const void *symfile_addr; + uint64_t symfile_size; +}; + +#endif /* TCG_H */ diff --git a/qemu/libdecnumber/decContext.c b/qemu/libdecnumber/decContext.c new file mode 100644 index 00000000..7d97a65a --- /dev/null +++ b/qemu/libdecnumber/decContext.c @@ -0,0 +1,432 @@ +/* Decimal context module for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal Context module */ +/* ------------------------------------------------------------------ */ +/* This module comprises the routines for handling arithmetic */ +/* context structures. */ +/* ------------------------------------------------------------------ */ + +#include "qemu/osdep.h" +#include "libdecnumber/dconfig.h" +#include "libdecnumber/decContext.h" +#include "libdecnumber/decNumberLocal.h" + +#if DECCHECK +/* compile-time endian tester [assumes sizeof(Int)>1] */ +static const Int mfcone=1; /* constant 1 */ +static const Flag *mfctop=(Flag *)&mfcone; /* -> top byte */ +#define LITEND *mfctop /* named flag; 1=little-endian */ +#endif + +/* ------------------------------------------------------------------ */ +/* round-for-reround digits */ +/* ------------------------------------------------------------------ */ +const uByte DECSTICKYTAB[10]={1,1,2,3,4,6,6,7,8,9}; /* used if sticky */ + +/* ------------------------------------------------------------------ */ +/* Powers of ten (powers[n]==10**n, 0<=n<=9) */ +/* ------------------------------------------------------------------ */ +const uLong DECPOWERS[19] = {1, 10, 100, 1000, 10000, 100000, 1000000, + 10000000, 100000000, 1000000000, 10000000000ULL, 100000000000ULL, + 1000000000000ULL, 10000000000000ULL, 100000000000000ULL, 1000000000000000ULL, + 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, }; + +/* ------------------------------------------------------------------ */ +/* decContextClearStatus -- clear bits in current status */ +/* */ +/* context is the context structure to be queried */ +/* mask indicates the bits to be cleared (the status bit that */ +/* corresponds to each 1 bit in the mask is cleared) */ +/* returns context */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decContext *decContextClearStatus(decContext *context, uInt mask) { + context->status&=~mask; + return context; + } /* decContextClearStatus */ + +/* ------------------------------------------------------------------ */ +/* decContextDefault -- initialize a context structure */ +/* */ +/* context is the structure to be initialized */ +/* kind selects the required set of default values, one of: */ +/* DEC_INIT_BASE -- select ANSI X3-274 defaults */ +/* DEC_INIT_DECIMAL32 -- select IEEE 754r defaults, 32-bit */ +/* DEC_INIT_DECIMAL64 -- select IEEE 754r defaults, 64-bit */ +/* DEC_INIT_DECIMAL128 -- select IEEE 754r defaults, 128-bit */ +/* For any other value a valid context is returned, but with */ +/* Invalid_operation set in the status field. */ +/* returns a context structure with the appropriate initial values. */ +/* ------------------------------------------------------------------ */ +decContext * decContextDefault(decContext *context, Int kind) { + /* set defaults... */ + context->digits=9; /* 9 digits */ + context->emax=DEC_MAX_EMAX; /* 9-digit exponents */ + context->emin=DEC_MIN_EMIN; /* .. balanced */ + context->round=DEC_ROUND_HALF_UP; /* 0.5 rises */ + context->traps=DEC_Errors; /* all but informational */ + context->status=0; /* cleared */ + context->clamp=0; /* no clamping */ + #if DECSUBSET + context->extended=0; /* cleared */ + #endif + switch (kind) { + case DEC_INIT_BASE: + /* [use defaults] */ + break; + case DEC_INIT_DECIMAL32: + context->digits=7; /* digits */ + context->emax=96; /* Emax */ + context->emin=-95; /* Emin */ + context->round=DEC_ROUND_HALF_EVEN; /* 0.5 to nearest even */ + context->traps=0; /* no traps set */ + context->clamp=1; /* clamp exponents */ + #if DECSUBSET + context->extended=1; /* set */ + #endif + break; + case DEC_INIT_DECIMAL64: + context->digits=16; /* digits */ + context->emax=384; /* Emax */ + context->emin=-383; /* Emin */ + context->round=DEC_ROUND_HALF_EVEN; /* 0.5 to nearest even */ + context->traps=0; /* no traps set */ + context->clamp=1; /* clamp exponents */ + #if DECSUBSET + context->extended=1; /* set */ + #endif + break; + case DEC_INIT_DECIMAL128: + context->digits=34; /* digits */ + context->emax=6144; /* Emax */ + context->emin=-6143; /* Emin */ + context->round=DEC_ROUND_HALF_EVEN; /* 0.5 to nearest even */ + context->traps=0; /* no traps set */ + context->clamp=1; /* clamp exponents */ + #if DECSUBSET + context->extended=1; /* set */ + #endif + break; + + default: /* invalid Kind */ + /* use defaults, and .. */ + decContextSetStatus(context, DEC_Invalid_operation); /* trap */ + } + + #if DECCHECK + if (LITEND!=DECLITEND) { + const char *adj; + if (LITEND) adj="little"; + else adj="big"; + printf("Warning: DECLITEND is set to %d, but this computer appears to be %s-endian\n", + DECLITEND, adj); + } + #endif + return context;} /* decContextDefault */ + +/* ------------------------------------------------------------------ */ +/* decContextGetRounding -- return current rounding mode */ +/* */ +/* context is the context structure to be queried */ +/* returns the rounding mode */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +enum rounding decContextGetRounding(decContext *context) { + return context->round; + } /* decContextGetRounding */ + +/* ------------------------------------------------------------------ */ +/* decContextGetStatus -- return current status */ +/* */ +/* context is the context structure to be queried */ +/* returns status */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +uInt decContextGetStatus(decContext *context) { + return context->status; + } /* decContextGetStatus */ + +/* ------------------------------------------------------------------ */ +/* decContextRestoreStatus -- restore bits in current status */ +/* */ +/* context is the context structure to be updated */ +/* newstatus is the source for the bits to be restored */ +/* mask indicates the bits to be restored (the status bit that */ +/* corresponds to each 1 bit in the mask is set to the value of */ +/* the corresponding bit in newstatus) */ +/* returns context */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decContext *decContextRestoreStatus(decContext *context, + uInt newstatus, uInt mask) { + context->status&=~mask; /* clear the selected bits */ + context->status|=(mask&newstatus); /* or in the new bits */ + return context; + } /* decContextRestoreStatus */ + +/* ------------------------------------------------------------------ */ +/* decContextSaveStatus -- save bits in current status */ +/* */ +/* context is the context structure to be queried */ +/* mask indicates the bits to be saved (the status bits that */ +/* correspond to each 1 bit in the mask are saved) */ +/* returns the AND of the mask and the current status */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +uInt decContextSaveStatus(decContext *context, uInt mask) { + return context->status&mask; + } /* decContextSaveStatus */ + +/* ------------------------------------------------------------------ */ +/* decContextSetRounding -- set current rounding mode */ +/* */ +/* context is the context structure to be updated */ +/* newround is the value which will replace the current mode */ +/* returns context */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decContext *decContextSetRounding(decContext *context, + enum rounding newround) { + context->round=newround; + return context; + } /* decContextSetRounding */ + +/* ------------------------------------------------------------------ */ +/* decContextSetStatus -- set status and raise trap if appropriate */ +/* */ +/* context is the context structure to be updated */ +/* status is the DEC_ exception code */ +/* returns the context structure */ +/* */ +/* Control may never return from this routine, if there is a signal */ +/* handler and it takes a long jump. */ +/* ------------------------------------------------------------------ */ +decContext * decContextSetStatus(decContext *context, uInt status) { + context->status|=status; + if (status & context->traps) raise(SIGFPE); + return context;} /* decContextSetStatus */ + +/* ------------------------------------------------------------------ */ +/* decContextSetStatusFromString -- set status from a string + trap */ +/* */ +/* context is the context structure to be updated */ +/* string is a string exactly equal to one that might be returned */ +/* by decContextStatusToString */ +/* */ +/* The status bit corresponding to the string is set, and a trap */ +/* is raised if appropriate. */ +/* */ +/* returns the context structure, unless the string is equal to */ +/* DEC_Condition_MU or is not recognized. In these cases NULL is */ +/* returned. */ +/* ------------------------------------------------------------------ */ +decContext * decContextSetStatusFromString(decContext *context, + const char *string) { + if (strcmp(string, DEC_Condition_CS)==0) + return decContextSetStatus(context, DEC_Conversion_syntax); + if (strcmp(string, DEC_Condition_DZ)==0) + return decContextSetStatus(context, DEC_Division_by_zero); + if (strcmp(string, DEC_Condition_DI)==0) + return decContextSetStatus(context, DEC_Division_impossible); + if (strcmp(string, DEC_Condition_DU)==0) + return decContextSetStatus(context, DEC_Division_undefined); + if (strcmp(string, DEC_Condition_IE)==0) + return decContextSetStatus(context, DEC_Inexact); + if (strcmp(string, DEC_Condition_IS)==0) + return decContextSetStatus(context, DEC_Insufficient_storage); + if (strcmp(string, DEC_Condition_IC)==0) + return decContextSetStatus(context, DEC_Invalid_context); + if (strcmp(string, DEC_Condition_IO)==0) + return decContextSetStatus(context, DEC_Invalid_operation); + #if DECSUBSET + if (strcmp(string, DEC_Condition_LD)==0) + return decContextSetStatus(context, DEC_Lost_digits); + #endif + if (strcmp(string, DEC_Condition_OV)==0) + return decContextSetStatus(context, DEC_Overflow); + if (strcmp(string, DEC_Condition_PA)==0) + return decContextSetStatus(context, DEC_Clamped); + if (strcmp(string, DEC_Condition_RO)==0) + return decContextSetStatus(context, DEC_Rounded); + if (strcmp(string, DEC_Condition_SU)==0) + return decContextSetStatus(context, DEC_Subnormal); + if (strcmp(string, DEC_Condition_UN)==0) + return decContextSetStatus(context, DEC_Underflow); + if (strcmp(string, DEC_Condition_ZE)==0) + return context; + return NULL; /* Multiple status, or unknown */ + } /* decContextSetStatusFromString */ + +/* ------------------------------------------------------------------ */ +/* decContextSetStatusFromStringQuiet -- set status from a string */ +/* */ +/* context is the context structure to be updated */ +/* string is a string exactly equal to one that might be returned */ +/* by decContextStatusToString */ +/* */ +/* The status bit corresponding to the string is set; no trap is */ +/* raised. */ +/* */ +/* returns the context structure, unless the string is equal to */ +/* DEC_Condition_MU or is not recognized. In these cases NULL is */ +/* returned. */ +/* ------------------------------------------------------------------ */ +decContext * decContextSetStatusFromStringQuiet(decContext *context, + const char *string) { + if (strcmp(string, DEC_Condition_CS)==0) + return decContextSetStatusQuiet(context, DEC_Conversion_syntax); + if (strcmp(string, DEC_Condition_DZ)==0) + return decContextSetStatusQuiet(context, DEC_Division_by_zero); + if (strcmp(string, DEC_Condition_DI)==0) + return decContextSetStatusQuiet(context, DEC_Division_impossible); + if (strcmp(string, DEC_Condition_DU)==0) + return decContextSetStatusQuiet(context, DEC_Division_undefined); + if (strcmp(string, DEC_Condition_IE)==0) + return decContextSetStatusQuiet(context, DEC_Inexact); + if (strcmp(string, DEC_Condition_IS)==0) + return decContextSetStatusQuiet(context, DEC_Insufficient_storage); + if (strcmp(string, DEC_Condition_IC)==0) + return decContextSetStatusQuiet(context, DEC_Invalid_context); + if (strcmp(string, DEC_Condition_IO)==0) + return decContextSetStatusQuiet(context, DEC_Invalid_operation); + #if DECSUBSET + if (strcmp(string, DEC_Condition_LD)==0) + return decContextSetStatusQuiet(context, DEC_Lost_digits); + #endif + if (strcmp(string, DEC_Condition_OV)==0) + return decContextSetStatusQuiet(context, DEC_Overflow); + if (strcmp(string, DEC_Condition_PA)==0) + return decContextSetStatusQuiet(context, DEC_Clamped); + if (strcmp(string, DEC_Condition_RO)==0) + return decContextSetStatusQuiet(context, DEC_Rounded); + if (strcmp(string, DEC_Condition_SU)==0) + return decContextSetStatusQuiet(context, DEC_Subnormal); + if (strcmp(string, DEC_Condition_UN)==0) + return decContextSetStatusQuiet(context, DEC_Underflow); + if (strcmp(string, DEC_Condition_ZE)==0) + return context; + return NULL; /* Multiple status, or unknown */ + } /* decContextSetStatusFromStringQuiet */ + +/* ------------------------------------------------------------------ */ +/* decContextSetStatusQuiet -- set status without trap */ +/* */ +/* context is the context structure to be updated */ +/* status is the DEC_ exception code */ +/* returns the context structure */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decContext * decContextSetStatusQuiet(decContext *context, uInt status) { + context->status|=status; + return context;} /* decContextSetStatusQuiet */ + +/* ------------------------------------------------------------------ */ +/* decContextStatusToString -- convert status flags to a string */ +/* */ +/* context is a context with valid status field */ +/* */ +/* returns a constant string describing the condition. If multiple */ +/* (or no) flags are set, a generic constant message is returned. */ +/* ------------------------------------------------------------------ */ +const char *decContextStatusToString(const decContext *context) { + Int status=context->status; + + /* test the five IEEE first, as some of the others are ambiguous when */ + /* DECEXTFLAG=0 */ + if (status==DEC_Invalid_operation ) return DEC_Condition_IO; + if (status==DEC_Division_by_zero ) return DEC_Condition_DZ; + if (status==DEC_Overflow ) return DEC_Condition_OV; + if (status==DEC_Underflow ) return DEC_Condition_UN; + if (status==DEC_Inexact ) return DEC_Condition_IE; + + if (status==DEC_Division_impossible ) return DEC_Condition_DI; + if (status==DEC_Division_undefined ) return DEC_Condition_DU; + if (status==DEC_Rounded ) return DEC_Condition_RO; + if (status==DEC_Clamped ) return DEC_Condition_PA; + if (status==DEC_Subnormal ) return DEC_Condition_SU; + if (status==DEC_Conversion_syntax ) return DEC_Condition_CS; + if (status==DEC_Insufficient_storage ) return DEC_Condition_IS; + if (status==DEC_Invalid_context ) return DEC_Condition_IC; + #if DECSUBSET + if (status==DEC_Lost_digits ) return DEC_Condition_LD; + #endif + if (status==0 ) return DEC_Condition_ZE; + return DEC_Condition_MU; /* Multiple errors */ + } /* decContextStatusToString */ + +/* ------------------------------------------------------------------ */ +/* decContextTestSavedStatus -- test bits in saved status */ +/* */ +/* oldstatus is the status word to be tested */ +/* mask indicates the bits to be tested (the oldstatus bits that */ +/* correspond to each 1 bit in the mask are tested) */ +/* returns 1 if any of the tested bits are 1, or 0 otherwise */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +uInt decContextTestSavedStatus(uInt oldstatus, uInt mask) { + return (oldstatus&mask)!=0; + } /* decContextTestSavedStatus */ + +/* ------------------------------------------------------------------ */ +/* decContextTestStatus -- test bits in current status */ +/* */ +/* context is the context structure to be updated */ +/* mask indicates the bits to be tested (the status bits that */ +/* correspond to each 1 bit in the mask are tested) */ +/* returns 1 if any of the tested bits are 1, or 0 otherwise */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +uInt decContextTestStatus(decContext *context, uInt mask) { + return (context->status&mask)!=0; + } /* decContextTestStatus */ + +/* ------------------------------------------------------------------ */ +/* decContextZeroStatus -- clear all status bits */ +/* */ +/* context is the context structure to be updated */ +/* returns context */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decContext *decContextZeroStatus(decContext *context) { + context->status=0; + return context; + } /* decContextZeroStatus */ diff --git a/qemu/libdecnumber/decNumber.c b/qemu/libdecnumber/decNumber.c new file mode 100644 index 00000000..1e84e9b8 --- /dev/null +++ b/qemu/libdecnumber/decNumber.c @@ -0,0 +1,8196 @@ +/* Decimal number arithmetic module for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal Number arithmetic module */ +/* ------------------------------------------------------------------ */ +/* This module comprises the routines for General Decimal Arithmetic */ +/* as defined in the specification which may be found on the */ +/* http://www2.hursley.ibm.com/decimal web pages. It implements both */ +/* the full ('extended') arithmetic and the simpler ('subset') */ +/* arithmetic. */ +/* */ +/* Usage notes: */ +/* */ +/* 1. This code is ANSI C89 except: */ +/* */ +/* If DECDPUN>4 or DECUSE64=1, the C99 64-bit int64_t and */ +/* uint64_t types may be used. To avoid these, set DECUSE64=0 */ +/* and DECDPUN<=4 (see documentation). */ +/* */ +/* 2. The decNumber format which this library uses is optimized for */ +/* efficient processing of relatively short numbers; in particular */ +/* it allows the use of fixed sized structures and minimizes copy */ +/* and move operations. It does, however, support arbitrary */ +/* precision (up to 999,999,999 digits) and arbitrary exponent */ +/* range (Emax in the range 0 through 999,999,999 and Emin in the */ +/* range -999,999,999 through 0). Mathematical functions (for */ +/* example decNumberExp) as identified below are restricted more */ +/* tightly: digits, emax, and -emin in the context must be <= */ +/* DEC_MAX_MATH (999999), and their operand(s) must be within */ +/* these bounds. */ +/* */ +/* 3. Logical functions are further restricted; their operands must */ +/* be finite, positive, have an exponent of zero, and all digits */ +/* must be either 0 or 1. The result will only contain digits */ +/* which are 0 or 1 (and will have exponent=0 and a sign of 0). */ +/* */ +/* 4. Operands to operator functions are never modified unless they */ +/* are also specified to be the result number (which is always */ +/* permitted). Other than that case, operands must not overlap. */ +/* */ +/* 5. Error handling: the type of the error is ORed into the status */ +/* flags in the current context (decContext structure). The */ +/* SIGFPE signal is then raised if the corresponding trap-enabler */ +/* flag in the decContext is set (is 1). */ +/* */ +/* It is the responsibility of the caller to clear the status */ +/* flags as required. */ +/* */ +/* The result of any routine which returns a number will always */ +/* be a valid number (which may be a special value, such as an */ +/* Infinity or NaN). */ +/* */ +/* 6. The decNumber format is not an exchangeable concrete */ +/* representation as it comprises fields which may be machine- */ +/* dependent (packed or unpacked, or special length, for example). */ +/* Canonical conversions to and from strings are provided; other */ +/* conversions are available in separate modules. */ +/* */ +/* 7. Normally, input operands are assumed to be valid. Set DECCHECK */ +/* to 1 for extended operand checking (including NULL operands). */ +/* Results are undefined if a badly-formed structure (or a NULL */ +/* pointer to a structure) is provided, though with DECCHECK */ +/* enabled the operator routines are protected against exceptions. */ +/* (Except if the result pointer is NULL, which is unrecoverable.) */ +/* */ +/* However, the routines will never cause exceptions if they are */ +/* given well-formed operands, even if the value of the operands */ +/* is inappropriate for the operation and DECCHECK is not set. */ +/* (Except for SIGFPE, as and where documented.) */ +/* */ +/* 8. Subset arithmetic is available only if DECSUBSET is set to 1. */ +/* ------------------------------------------------------------------ */ +/* Implementation notes for maintenance of this module: */ +/* */ +/* 1. Storage leak protection: Routines which use malloc are not */ +/* permitted to use return for fastpath or error exits (i.e., */ +/* they follow strict structured programming conventions). */ +/* Instead they have a do{}while(0); construct surrounding the */ +/* code which is protected -- break may be used to exit this. */ +/* Other routines can safely use the return statement inline. */ +/* */ +/* Storage leak accounting can be enabled using DECALLOC. */ +/* */ +/* 2. All loops use the for(;;) construct. Any do construct does */ +/* not loop; it is for allocation protection as just described. */ +/* */ +/* 3. Setting status in the context must always be the very last */ +/* action in a routine, as non-0 status may raise a trap and hence */ +/* the call to set status may not return (if the handler uses long */ +/* jump). Therefore all cleanup must be done first. In general, */ +/* to achieve this status is accumulated and is only applied just */ +/* before return by calling decContextSetStatus (via decStatus). */ +/* */ +/* Routines which allocate storage cannot, in general, use the */ +/* 'top level' routines which could cause a non-returning */ +/* transfer of control. The decXxxxOp routines are safe (do not */ +/* call decStatus even if traps are set in the context) and should */ +/* be used instead (they are also a little faster). */ +/* */ +/* 4. Exponent checking is minimized by allowing the exponent to */ +/* grow outside its limits during calculations, provided that */ +/* the decFinalize function is called later. Multiplication and */ +/* division, and intermediate calculations in exponentiation, */ +/* require more careful checks because of the risk of 31-bit */ +/* overflow (the most negative valid exponent is -1999999997, for */ +/* a 999999999-digit number with adjusted exponent of -999999999). */ +/* */ +/* 5. Rounding is deferred until finalization of results, with any */ +/* 'off to the right' data being represented as a single digit */ +/* residue (in the range -1 through 9). This avoids any double- */ +/* rounding when more than one shortening takes place (for */ +/* example, when a result is subnormal). */ +/* */ +/* 6. The digits count is allowed to rise to a multiple of DECDPUN */ +/* during many operations, so whole Units are handled and exact */ +/* accounting of digits is not needed. The correct digits value */ +/* is found by decGetDigits, which accounts for leading zeros. */ +/* This must be called before any rounding if the number of digits */ +/* is not known exactly. */ +/* */ +/* 7. The multiply-by-reciprocal 'trick' is used for partitioning */ +/* numbers up to four digits, using appropriate constants. This */ +/* is not useful for longer numbers because overflow of 32 bits */ +/* would lead to 4 multiplies, which is almost as expensive as */ +/* a divide (unless a floating-point or 64-bit multiply is */ +/* assumed to be available). */ +/* */ +/* 8. Unusual abbreviations that may be used in the commentary: */ +/* lhs -- left hand side (operand, of an operation) */ +/* lsd -- least significant digit (of coefficient) */ +/* lsu -- least significant Unit (of coefficient) */ +/* msd -- most significant digit (of coefficient) */ +/* msi -- most significant item (in an array) */ +/* msu -- most significant Unit (of coefficient) */ +/* rhs -- right hand side (operand, of an operation) */ +/* +ve -- positive */ +/* -ve -- negative */ +/* ** -- raise to the power */ +/* ------------------------------------------------------------------ */ + +#include "qemu/osdep.h" +#include "libdecnumber/dconfig.h" +#include "libdecnumber/decNumber.h" +#include "libdecnumber/decNumberLocal.h" + +/* Constants */ +/* Public lookup table used by the D2U macro */ +const uByte d2utable[DECMAXD2U+1]=D2UTABLE; + +#define DECVERB 1 /* set to 1 for verbose DECCHECK */ +#define powers DECPOWERS /* old internal name */ + +/* Local constants */ +#define DIVIDE 0x80 /* Divide operators */ +#define REMAINDER 0x40 /* .. */ +#define DIVIDEINT 0x20 /* .. */ +#define REMNEAR 0x10 /* .. */ +#define COMPARE 0x01 /* Compare operators */ +#define COMPMAX 0x02 /* .. */ +#define COMPMIN 0x03 /* .. */ +#define COMPTOTAL 0x04 /* .. */ +#define COMPNAN 0x05 /* .. [NaN processing] */ +#define COMPSIG 0x06 /* .. [signaling COMPARE] */ +#define COMPMAXMAG 0x07 /* .. */ +#define COMPMINMAG 0x08 /* .. */ + +#define DEC_sNaN 0x40000000 /* local status: sNaN signal */ +#define BADINT (Int)0x80000000 /* most-negative Int; error indicator */ +/* Next two indicate an integer >= 10**6, and its parity (bottom bit) */ +#define BIGEVEN (Int)0x80000002 +#define BIGODD (Int)0x80000003 + +static Unit uarrone[1]={1}; /* Unit array of 1, used for incrementing */ + +/* Granularity-dependent code */ +#if DECDPUN<=4 + #define eInt Int /* extended integer */ + #define ueInt uInt /* unsigned extended integer */ + /* Constant multipliers for divide-by-power-of five using reciprocal */ + /* multiply, after removing powers of 2 by shifting, and final shift */ + /* of 17 [we only need up to **4] */ + static const uInt multies[]={131073, 26215, 5243, 1049, 210}; + /* QUOT10 -- macro to return the quotient of unit u divided by 10**n */ + #define QUOT10(u, n) ((((uInt)(u)>>(n))*multies[n])>>17) +#else + /* For DECDPUN>4 non-ANSI-89 64-bit types are needed. */ + #if !DECUSE64 + #error decNumber.c: DECUSE64 must be 1 when DECDPUN>4 + #endif + #define eInt Long /* extended integer */ + #define ueInt uLong /* unsigned extended integer */ +#endif + +/* Local routines */ +static decNumber * decAddOp(decNumber *, const decNumber *, const decNumber *, + decContext *, uByte, uInt *); +static Flag decBiStr(const char *, const char *, const char *); +static uInt decCheckMath(const decNumber *, decContext *, uInt *); +static void decApplyRound(decNumber *, decContext *, Int, uInt *); +static Int decCompare(const decNumber *lhs, const decNumber *rhs, Flag); +static decNumber * decCompareOp(decNumber *, const decNumber *, + const decNumber *, decContext *, + Flag, uInt *); +static void decCopyFit(decNumber *, const decNumber *, decContext *, + Int *, uInt *); +static decNumber * decDecap(decNumber *, Int); +static decNumber * decDivideOp(decNumber *, const decNumber *, + const decNumber *, decContext *, Flag, uInt *); +static decNumber * decExpOp(decNumber *, const decNumber *, + decContext *, uInt *); +static void decFinalize(decNumber *, decContext *, Int *, uInt *); +static Int decGetDigits(Unit *, Int); +static Int decGetInt(const decNumber *); +static decNumber * decLnOp(decNumber *, const decNumber *, + decContext *, uInt *); +static decNumber * decMultiplyOp(decNumber *, const decNumber *, + const decNumber *, decContext *, + uInt *); +static decNumber * decNaNs(decNumber *, const decNumber *, + const decNumber *, decContext *, uInt *); +static decNumber * decQuantizeOp(decNumber *, const decNumber *, + const decNumber *, decContext *, Flag, + uInt *); +static void decReverse(Unit *, Unit *); +static void decSetCoeff(decNumber *, decContext *, const Unit *, + Int, Int *, uInt *); +static void decSetMaxValue(decNumber *, decContext *); +static void decSetOverflow(decNumber *, decContext *, uInt *); +static void decSetSubnormal(decNumber *, decContext *, Int *, uInt *); +static Int decShiftToLeast(Unit *, Int, Int); +static Int decShiftToMost(Unit *, Int, Int); +static void decStatus(decNumber *, uInt, decContext *); +static void decToString(const decNumber *, char[], Flag); +static decNumber * decTrim(decNumber *, decContext *, Flag, Int *); +static Int decUnitAddSub(const Unit *, Int, const Unit *, Int, Int, + Unit *, Int); +static Int decUnitCompare(const Unit *, Int, const Unit *, Int, Int); + +#if !DECSUBSET +/* decFinish == decFinalize when no subset arithmetic needed */ +#define decFinish(a,b,c,d) decFinalize(a,b,c,d) +#else +static void decFinish(decNumber *, decContext *, Int *, uInt *); +static decNumber * decRoundOperand(const decNumber *, decContext *, uInt *); +#endif + +/* Local macros */ +/* masked special-values bits */ +#define SPECIALARG (rhs->bits & DECSPECIAL) +#define SPECIALARGS ((lhs->bits | rhs->bits) & DECSPECIAL) + +/* Diagnostic macros, etc. */ +#if DECALLOC +/* Handle malloc/free accounting. If enabled, our accountable routines */ +/* are used; otherwise the code just goes straight to the system malloc */ +/* and free routines. */ +#define malloc(a) decMalloc(a) +#define free(a) decFree(a) +#define DECFENCE 0x5a /* corruption detector */ +/* 'Our' malloc and free: */ +static void *decMalloc(size_t); +static void decFree(void *); +uInt decAllocBytes=0; /* count of bytes allocated */ +/* Note that DECALLOC code only checks for storage buffer overflow. */ +/* To check for memory leaks, the decAllocBytes variable must be */ +/* checked to be 0 at appropriate times (e.g., after the test */ +/* harness completes a set of tests). This checking may be unreliable */ +/* if the testing is done in a multi-thread environment. */ +#endif + +#if DECCHECK +/* Optional checking routines. Enabling these means that decNumber */ +/* and decContext operands to operator routines are checked for */ +/* correctness. This roughly doubles the execution time of the */ +/* fastest routines (and adds 600+ bytes), so should not normally be */ +/* used in 'production'. */ +/* decCheckInexact is used to check that inexact results have a full */ +/* complement of digits (where appropriate -- this is not the case */ +/* for Quantize, for example) */ +#define DECUNRESU ((decNumber *)(void *)0xffffffff) +#define DECUNUSED ((const decNumber *)(void *)0xffffffff) +#define DECUNCONT ((decContext *)(void *)(0xffffffff)) +static Flag decCheckOperands(decNumber *, const decNumber *, + const decNumber *, decContext *); +static Flag decCheckNumber(const decNumber *); +static void decCheckInexact(const decNumber *, decContext *); +#endif + +#if DECTRACE || DECCHECK +/* Optional trace/debugging routines (may or may not be used) */ +void decNumberShow(const decNumber *); /* displays the components of a number */ +static void decDumpAr(char, const Unit *, Int); +#endif + +/* ================================================================== */ +/* Conversions */ +/* ================================================================== */ + +/* ------------------------------------------------------------------ */ +/* from-int32 -- conversion from Int or uInt */ +/* */ +/* dn is the decNumber to receive the integer */ +/* in or uin is the integer to be converted */ +/* returns dn */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberFromInt32(decNumber *dn, Int in) { + uInt unsig; + if (in>=0) unsig=in; + else { /* negative (possibly BADINT) */ + if (in==BADINT) unsig=(uInt)1073741824*2; /* special case */ + else unsig=-in; /* invert */ + } + /* in is now positive */ + decNumberFromUInt32(dn, unsig); + if (in<0) dn->bits=DECNEG; /* sign needed */ + return dn; + } /* decNumberFromInt32 */ + +decNumber * decNumberFromUInt32(decNumber *dn, uInt uin) { + Unit *up; /* work pointer */ + decNumberZero(dn); /* clean */ + if (uin==0) return dn; /* [or decGetDigits bad call] */ + for (up=dn->lsu; uin>0; up++) { + *up=(Unit)(uin%(DECDPUNMAX+1)); + uin=uin/(DECDPUNMAX+1); + } + dn->digits=decGetDigits(dn->lsu, up-dn->lsu); + return dn; + } /* decNumberFromUInt32 */ + +/* ------------------------------------------------------------------ */ +/* to-int32 -- conversion to Int or uInt */ +/* */ +/* dn is the decNumber to convert */ +/* set is the context for reporting errors */ +/* returns the converted decNumber, or 0 if Invalid is set */ +/* */ +/* Invalid is set if the decNumber does not have exponent==0 or if */ +/* it is a NaN, Infinite, or out-of-range. */ +/* ------------------------------------------------------------------ */ +Int decNumberToInt32(const decNumber *dn, decContext *set) { + #if DECCHECK + if (decCheckOperands(DECUNRESU, DECUNUSED, dn, set)) return 0; + #endif + + /* special or too many digits, or bad exponent */ + if (dn->bits&DECSPECIAL || dn->digits>10 || dn->exponent!=0) ; /* bad */ + else { /* is a finite integer with 10 or fewer digits */ + Int d; /* work */ + const Unit *up; /* .. */ + uInt hi=0, lo; /* .. */ + up=dn->lsu; /* -> lsu */ + lo=*up; /* get 1 to 9 digits */ + #if DECDPUN>1 /* split to higher */ + hi=lo/10; + lo=lo%10; + #endif + up++; + /* collect remaining Units, if any, into hi */ + for (d=DECDPUN; ddigits; up++, d+=DECDPUN) hi+=*up*powers[d-1]; + /* now low has the lsd, hi the remainder */ + if (hi>214748364 || (hi==214748364 && lo>7)) { /* out of range? */ + /* most-negative is a reprieve */ + if (dn->bits&DECNEG && hi==214748364 && lo==8) return 0x80000000; + /* bad -- drop through */ + } + else { /* in-range always */ + Int i=X10(hi)+lo; + if (dn->bits&DECNEG) return -i; + return i; + } + } /* integer */ + decContextSetStatus(set, DEC_Invalid_operation); /* [may not return] */ + return 0; + } /* decNumberToInt32 */ + +uInt decNumberToUInt32(const decNumber *dn, decContext *set) { + #if DECCHECK + if (decCheckOperands(DECUNRESU, DECUNUSED, dn, set)) return 0; + #endif + /* special or too many digits, or bad exponent, or negative (<0) */ + if (dn->bits&DECSPECIAL || dn->digits>10 || dn->exponent!=0 + || (dn->bits&DECNEG && !ISZERO(dn))); /* bad */ + else { /* is a finite integer with 10 or fewer digits */ + Int d; /* work */ + const Unit *up; /* .. */ + uInt hi=0, lo; /* .. */ + up=dn->lsu; /* -> lsu */ + lo=*up; /* get 1 to 9 digits */ + #if DECDPUN>1 /* split to higher */ + hi=lo/10; + lo=lo%10; + #endif + up++; + /* collect remaining Units, if any, into hi */ + for (d=DECDPUN; ddigits; up++, d+=DECDPUN) hi+=*up*powers[d-1]; + + /* now low has the lsd, hi the remainder */ + if (hi>429496729 || (hi==429496729 && lo>5)) ; /* no reprieve possible */ + else return X10(hi)+lo; + } /* integer */ + decContextSetStatus(set, DEC_Invalid_operation); /* [may not return] */ + return 0; + } /* decNumberToUInt32 */ + +decNumber *decNumberFromInt64(decNumber *dn, int64_t in) +{ + uint64_t unsig = in; + if (in < 0) { +#ifdef _MSC_VER + unsig = 0 - unsig; +#else + unsig = -unsig; +#endif + } + + decNumberFromUInt64(dn, unsig); + if (in < 0) { + dn->bits = DECNEG; /* sign needed */ + } + return dn; +} /* decNumberFromInt64 */ + +decNumber *decNumberFromUInt64(decNumber *dn, uint64_t uin) +{ + Unit *up; /* work pointer */ + decNumberZero(dn); /* clean */ + if (uin == 0) { + return dn; /* [or decGetDigits bad call] */ + } + for (up = dn->lsu; uin > 0; up++) { + *up = (Unit)(uin % (DECDPUNMAX + 1)); + uin = uin / (DECDPUNMAX + 1); + } + dn->digits = decGetDigits(dn->lsu, up-dn->lsu); + return dn; +} /* decNumberFromUInt64 */ + +/* ------------------------------------------------------------------ */ +/* to-int64 -- conversion to int64 */ +/* */ +/* dn is the decNumber to convert. dn is assumed to have been */ +/* rounded to a floating point integer value. */ +/* set is the context for reporting errors */ +/* returns the converted decNumber, or 0 if Invalid is set */ +/* */ +/* Invalid is set if the decNumber is a NaN, Infinite or is out of */ +/* range for a signed 64 bit integer. */ +/* ------------------------------------------------------------------ */ + +int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set) +{ + if (decNumberIsSpecial(dn) || (dn->exponent < 0) || + (dn->digits + dn->exponent > 19)) { + goto Invalid; + } else { + int64_t d; /* work */ + const Unit *up; /* .. */ + uint64_t hi = 0; + up = dn->lsu; /* -> lsu */ + + for (d = 1; d <= dn->digits; up++, d += DECDPUN) { + uint64_t prev = hi; + hi += *up * powers[d-1]; + if ((hi < prev) || (hi > INT64_MAX)) { + goto Invalid; + } + } + + uint64_t prev = hi; + hi *= (uint64_t)powers[dn->exponent]; + if ((hi < prev) || (hi > INT64_MAX)) { + goto Invalid; + } + return (decNumberIsNegative(dn)) ? -((int64_t)hi) : (int64_t)hi; + } + +Invalid: + decContextSetStatus(set, DEC_Invalid_operation); + return 0; +} /* decNumberIntegralToInt64 */ + + +/* ------------------------------------------------------------------ */ +/* to-scientific-string -- conversion to numeric string */ +/* to-engineering-string -- conversion to numeric string */ +/* */ +/* decNumberToString(dn, string); */ +/* decNumberToEngString(dn, string); */ +/* */ +/* dn is the decNumber to convert */ +/* string is the string where the result will be laid out */ +/* */ +/* string must be at least dn->digits+14 characters long */ +/* */ +/* No error is possible, and no status can be set. */ +/* ------------------------------------------------------------------ */ +char * decNumberToString(const decNumber *dn, char *string){ + decToString(dn, string, 0); + return string; + } /* DecNumberToString */ + +char * decNumberToEngString(const decNumber *dn, char *string){ + decToString(dn, string, 1); + return string; + } /* DecNumberToEngString */ + +/* ------------------------------------------------------------------ */ +/* to-number -- conversion from numeric string */ +/* */ +/* decNumberFromString -- convert string to decNumber */ +/* dn -- the number structure to fill */ +/* chars[] -- the string to convert ('\0' terminated) */ +/* set -- the context used for processing any error, */ +/* determining the maximum precision available */ +/* (set.digits), determining the maximum and minimum */ +/* exponent (set.emax and set.emin), determining if */ +/* extended values are allowed, and checking the */ +/* rounding mode if overflow occurs or rounding is */ +/* needed. */ +/* */ +/* The length of the coefficient and the size of the exponent are */ +/* checked by this routine, so the correct error (Underflow or */ +/* Overflow) can be reported or rounding applied, as necessary. */ +/* */ +/* If bad syntax is detected, the result will be a quiet NaN. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberFromString(decNumber *dn, const char chars[], + decContext *set) { + Int exponent=0; /* working exponent [assume 0] */ + uByte bits=0; /* working flags [assume +ve] */ + Unit *res; /* where result will be built */ + Unit resbuff[SD2U(DECBUFFER+9)];/* local buffer in case need temporary */ + /* [+9 allows for ln() constants] */ + Unit *allocres=NULL; /* -> allocated result, iff allocated */ + Int d=0; /* count of digits found in decimal part */ + const char *dotchar=NULL; /* where dot was found */ + const char *cfirst=chars; /* -> first character of decimal part */ + const char *last=NULL; /* -> last digit of decimal part */ + const char *c; /* work */ + Unit *up; /* .. */ + #if DECDPUN>1 + Int cut, out; /* .. */ + #endif + Int residue; /* rounding residue */ + uInt status=0; /* error code */ + + #if DECCHECK + if (decCheckOperands(DECUNRESU, DECUNUSED, DECUNUSED, set)) + return decNumberZero(dn); + #endif + + do { /* status & malloc protection */ + for (c=chars;; c++) { /* -> input character */ + if (*c>='0' && *c<='9') { /* test for Arabic digit */ + last=c; + d++; /* count of real digits */ + continue; /* still in decimal part */ + } + if (*c=='.' && dotchar==NULL) { /* first '.' */ + dotchar=c; /* record offset into decimal part */ + if (c==cfirst) cfirst++; /* first digit must follow */ + continue;} + if (c==chars) { /* first in string... */ + if (*c=='-') { /* valid - sign */ + cfirst++; + bits=DECNEG; + continue;} + if (*c=='+') { /* valid + sign */ + cfirst++; + continue;} + } + /* *c is not a digit, or a valid +, -, or '.' */ + break; + } /* c */ + + if (last==NULL) { /* no digits yet */ + status=DEC_Conversion_syntax;/* assume the worst */ + if (*c=='\0') break; /* and no more to come... */ + #if DECSUBSET + /* if subset then infinities and NaNs are not allowed */ + if (!set->extended) break; /* hopeless */ + #endif + /* Infinities and NaNs are possible, here */ + if (dotchar!=NULL) break; /* .. unless had a dot */ + decNumberZero(dn); /* be optimistic */ + if (decBiStr(c, "infinity", "INFINITY") + || decBiStr(c, "inf", "INF")) { + dn->bits=bits | DECINF; + status=0; /* is OK */ + break; /* all done */ + } + /* a NaN expected */ + /* 2003.09.10 NaNs are now permitted to have a sign */ + dn->bits=bits | DECNAN; /* assume simple NaN */ + if (*c=='s' || *c=='S') { /* looks like an sNaN */ + c++; + dn->bits=bits | DECSNAN; + } + if (*c!='n' && *c!='N') break; /* check caseless "NaN" */ + c++; + if (*c!='a' && *c!='A') break; /* .. */ + c++; + if (*c!='n' && *c!='N') break; /* .. */ + c++; + /* now either nothing, or nnnn payload, expected */ + /* -> start of integer and skip leading 0s [including plain 0] */ + for (cfirst=c; *cfirst=='0';) cfirst++; + if (*cfirst=='\0') { /* "NaN" or "sNaN", maybe with all 0s */ + status=0; /* it's good */ + break; /* .. */ + } + /* something other than 0s; setup last and d as usual [no dots] */ + for (c=cfirst;; c++, d++) { + if (*c<'0' || *c>'9') break; /* test for Arabic digit */ + last=c; + } + if (*c!='\0') break; /* not all digits */ + if (d>set->digits-1) { + /* [NB: payload in a decNumber can be full length unless */ + /* clamped, in which case can only be digits-1] */ + if (set->clamp) break; + if (d>set->digits) break; + } /* too many digits? */ + /* good; drop through to convert the integer to coefficient */ + status=0; /* syntax is OK */ + bits=dn->bits; /* for copy-back */ + } /* last==NULL */ + + else if (*c!='\0') { /* more to process... */ + /* had some digits; exponent is only valid sequence now */ + Flag nege; /* 1=negative exponent */ + const char *firstexp; /* -> first significant exponent digit */ + status=DEC_Conversion_syntax;/* assume the worst */ + if (*c!='e' && *c!='E') break; + /* Found 'e' or 'E' -- now process explicit exponent */ + /* 1998.07.11: sign no longer required */ + nege=0; + c++; /* to (possible) sign */ + if (*c=='-') {nege=1; c++;} + else if (*c=='+') c++; + if (*c=='\0') break; + + for (; *c=='0' && *(c+1)!='\0';) c++; /* strip insignificant zeros */ + firstexp=c; /* save exponent digit place */ + for (; ;c++) { + if (*c<'0' || *c>'9') break; /* not a digit */ + exponent=X10(exponent)+(Int)*c-(Int)'0'; + } /* c */ + /* if not now on a '\0', *c must not be a digit */ + if (*c!='\0') break; + + /* (this next test must be after the syntax checks) */ + /* if it was too long the exponent may have wrapped, so check */ + /* carefully and set it to a certain overflow if wrap possible */ + if (c>=firstexp+9+1) { + if (c>firstexp+9+1 || *firstexp>'1') exponent=DECNUMMAXE*2; + /* [up to 1999999999 is OK, for example 1E-1000000998] */ + } + if (nege) exponent=-exponent; /* was negative */ + status=0; /* is OK */ + } /* stuff after digits */ + + /* Here when whole string has been inspected; syntax is good */ + /* cfirst->first digit (never dot), last->last digit (ditto) */ + + /* strip leading zeros/dot [leave final 0 if all 0's] */ + if (*cfirst=='0') { /* [cfirst has stepped over .] */ + for (c=cfirst; cextended) { + decNumberZero(dn); /* clean result */ + break; /* [could be return] */ + } + #endif + } /* at least one leading 0 */ + + /* Handle decimal point... */ + if (dotchar!=NULL && dotchardigits) res=dn->lsu; /* fits into supplied decNumber */ + else { /* rounding needed */ + Int needbytes=D2U(d)*sizeof(Unit);/* bytes needed */ + res=resbuff; /* assume use local buffer */ + if (needbytes>(Int)sizeof(resbuff)) { /* too big for local */ + allocres=(Unit *)malloc(needbytes); + if (allocres==NULL) {status|=DEC_Insufficient_storage; break;} + res=allocres; + } + } + /* res now -> number lsu, buffer, or allocated storage for Unit array */ + + /* Place the coefficient into the selected Unit array */ + /* [this is often 70% of the cost of this function when DECDPUN>1] */ + #if DECDPUN>1 + out=0; /* accumulator */ + up=res+D2U(d)-1; /* -> msu */ + cut=d-(up-res)*DECDPUN; /* digits in top unit */ + for (c=cfirst;; c++) { /* along the digits */ + if (*c=='.') continue; /* ignore '.' [don't decrement cut] */ + out=X10(out)+(Int)*c-(Int)'0'; + if (c==last) break; /* done [never get to trailing '.'] */ + cut--; + if (cut>0) continue; /* more for this unit */ + *up=(Unit)out; /* write unit */ + up--; /* prepare for unit below.. */ + cut=DECDPUN; /* .. */ + out=0; /* .. */ + } /* c */ + *up=(Unit)out; /* write lsu */ + + #else + /* DECDPUN==1 */ + up=res; /* -> lsu */ + for (c=last; c>=cfirst; c--) { /* over each character, from least */ + if (*c=='.') continue; /* ignore . [don't step up] */ + *up=(Unit)((Int)*c-(Int)'0'); + up++; + } /* c */ + #endif + + dn->bits=bits; + dn->exponent=exponent; + dn->digits=d; + + /* if not in number (too long) shorten into the number */ + if (d>set->digits) { + residue=0; + decSetCoeff(dn, set, res, d, &residue, &status); + /* always check for overflow or subnormal and round as needed */ + decFinalize(dn, set, &residue, &status); + } + else { /* no rounding, but may still have overflow or subnormal */ + /* [these tests are just for performance; finalize repeats them] */ + if ((dn->exponent-1emin-dn->digits) + || (dn->exponent-1>set->emax-set->digits)) { + residue=0; + decFinalize(dn, set, &residue, &status); + } + } + /* decNumberShow(dn); */ + } while(0); /* [for break] */ + + if (allocres!=NULL) free(allocres); /* drop any storage used */ + if (status!=0) decStatus(dn, status, set); + return dn; + } /* decNumberFromString */ + +/* ================================================================== */ +/* Operators */ +/* ================================================================== */ + +/* ------------------------------------------------------------------ */ +/* decNumberAbs -- absolute value operator */ +/* */ +/* This computes C = abs(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context */ +/* */ +/* See also decNumberCopyAbs for a quiet bitwise version of this. */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +/* This has the same effect as decNumberPlus unless A is negative, */ +/* in which case it has the same effect as decNumberMinus. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberAbs(decNumber *res, const decNumber *rhs, + decContext *set) { + decNumber dzero; /* for 0 */ + uInt status=0; /* accumulator */ + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + decNumberZero(&dzero); /* set 0 */ + dzero.exponent=rhs->exponent; /* [no coefficient expansion] */ + decAddOp(res, &dzero, rhs, set, (uByte)(rhs->bits & DECNEG), &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberAbs */ + +/* ------------------------------------------------------------------ */ +/* decNumberAdd -- add two Numbers */ +/* */ +/* This computes C = A + B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X+X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +/* This just calls the routine shared with Subtract */ +decNumber * decNumberAdd(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decAddOp(res, lhs, rhs, set, 0, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberAdd */ + +/* ------------------------------------------------------------------ */ +/* decNumberAnd -- AND two Numbers, digitwise */ +/* */ +/* This computes C = A & B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X&X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context (used for result length and error report) */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Logical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberAnd(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + const Unit *ua, *ub; /* -> operands */ + const Unit *msua, *msub; /* -> operand msus */ + Unit *uc, *msuc; /* -> result and its msu */ + Int msudigs; /* digits in res msu */ + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + if (lhs->exponent!=0 || decNumberIsSpecial(lhs) || decNumberIsNegative(lhs) + || rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + + /* operands are valid */ + ua=lhs->lsu; /* bottom-up */ + ub=rhs->lsu; /* .. */ + uc=res->lsu; /* .. */ + msua=ua+D2U(lhs->digits)-1; /* -> msu of lhs */ + msub=ub+D2U(rhs->digits)-1; /* -> msu of rhs */ + msuc=uc+D2U(set->digits)-1; /* -> msu of result */ + msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ + for (; uc<=msuc; ua++, ub++, uc++) { /* Unit loop */ + Unit a, b; /* extract units */ + if (ua>msua) a=0; + else a=*ua; + if (ub>msub) b=0; + else b=*ub; + *uc=0; /* can now write back */ + if (a|b) { /* maybe 1 bits to examine */ + Int i, j; + *uc=0; /* can now write back */ + /* This loop could be unrolled and/or use BIN2BCD tables */ + for (i=0; i1) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + if (uc==msuc && i==msudigs-1) break; /* just did final digit */ + } /* each digit */ + } /* both OK */ + } /* each unit */ + /* [here uc-1 is the msu of the result] */ + res->digits=decGetDigits(res->lsu, uc-res->lsu); + res->exponent=0; /* integer */ + res->bits=0; /* sign=0 */ + return res; /* [no status to set] */ + } /* decNumberAnd */ + +/* ------------------------------------------------------------------ */ +/* decNumberCompare -- compare two Numbers */ +/* */ +/* This computes C = A ? B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for one digit (or NaN). */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCompare(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decCompareOp(res, lhs, rhs, set, COMPARE, &status); + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberCompare */ + +/* ------------------------------------------------------------------ */ +/* decNumberCompareSignal -- compare, signalling on all NaNs */ +/* */ +/* This computes C = A ? B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for one digit (or NaN). */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCompareSignal(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decCompareOp(res, lhs, rhs, set, COMPSIG, &status); + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberCompareSignal */ + +/* ------------------------------------------------------------------ */ +/* decNumberCompareTotal -- compare two Numbers, using total ordering */ +/* */ +/* This computes C = A ? B, under total ordering */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for one digit; the result will always be one of */ +/* -1, 0, or 1. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCompareTotal(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decCompareOp(res, lhs, rhs, set, COMPTOTAL, &status); + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberCompareTotal */ + +/* ------------------------------------------------------------------ */ +/* decNumberCompareTotalMag -- compare, total ordering of magnitudes */ +/* */ +/* This computes C = |A| ? |B|, under total ordering */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for one digit; the result will always be one of */ +/* -1, 0, or 1. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCompareTotalMag(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + uInt needbytes; /* for space calculations */ + decNumber bufa[D2N(DECBUFFER+1)];/* +1 in case DECBUFFER=0 */ + decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ + decNumber bufb[D2N(DECBUFFER+1)]; + decNumber *allocbufb=NULL; /* -> allocated bufb, iff allocated */ + decNumber *a, *b; /* temporary pointers */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + /* if either is negative, take a copy and absolute */ + if (decNumberIsNegative(lhs)) { /* lhs<0 */ + a=bufa; + needbytes=sizeof(decNumber)+(D2U(lhs->digits)-1)*sizeof(Unit); + if (needbytes>sizeof(bufa)) { /* need malloc space */ + allocbufa=(decNumber *)malloc(needbytes); + if (allocbufa==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + a=allocbufa; /* use the allocated space */ + } + decNumberCopy(a, lhs); /* copy content */ + a->bits&=~DECNEG; /* .. and clear the sign */ + lhs=a; /* use copy from here on */ + } + if (decNumberIsNegative(rhs)) { /* rhs<0 */ + b=bufb; + needbytes=sizeof(decNumber)+(D2U(rhs->digits)-1)*sizeof(Unit); + if (needbytes>sizeof(bufb)) { /* need malloc space */ + allocbufb=(decNumber *)malloc(needbytes); + if (allocbufb==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + b=allocbufb; /* use the allocated space */ + } + decNumberCopy(b, rhs); /* copy content */ + b->bits&=~DECNEG; /* .. and clear the sign */ + rhs=b; /* use copy from here on */ + } + decCompareOp(res, lhs, rhs, set, COMPTOTAL, &status); + } while(0); /* end protected */ + + if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ + if (allocbufb!=NULL) free(allocbufb); /* .. */ + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberCompareTotalMag */ + +/* ------------------------------------------------------------------ */ +/* decNumberDivide -- divide one number by another */ +/* */ +/* This computes C = A / B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X/X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberDivide(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decDivideOp(res, lhs, rhs, set, DIVIDE, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberDivide */ + +/* ------------------------------------------------------------------ */ +/* decNumberDivideInteger -- divide and return integer quotient */ +/* */ +/* This computes C = A # B, where # is the integer divide operator */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X#X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberDivideInteger(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decDivideOp(res, lhs, rhs, set, DIVIDEINT, &status); + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberDivideInteger */ + +/* ------------------------------------------------------------------ */ +/* decNumberExp -- exponentiation */ +/* */ +/* This computes C = exp(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context; note that rounding mode has no effect */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Mathematical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* */ +/* Finite results will always be full precision and Inexact, except */ +/* when A is a zero or -Infinity (giving 1 or 0 respectively). */ +/* */ +/* An Inexact result is rounded using DEC_ROUND_HALF_EVEN; it will */ +/* almost always be correctly rounded, but may be up to 1 ulp in */ +/* error in rare cases. */ +/* ------------------------------------------------------------------ */ +/* This is a wrapper for decExpOp which can handle the slightly wider */ +/* (double) range needed by Ln (which has to be able to calculate */ +/* exp(-a) where a can be the tiniest number (Ntiny). */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberExp(decNumber *res, const decNumber *rhs, + decContext *set) { + uInt status=0; /* accumulator */ + #if DECSUBSET + decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ + #endif + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + /* Check restrictions; these restrictions ensure that if h=8 (see */ + /* decExpOp) then the result will either overflow or underflow to 0. */ + /* Other math functions restrict the input range, too, for inverses. */ + /* If not violated then carry out the operation. */ + if (!decCheckMath(rhs, set, &status)) do { /* protect allocation */ + #if DECSUBSET + if (!set->extended) { + /* reduce operand and set lostDigits status, as needed */ + if (rhs->digits>set->digits) { + allocrhs=decRoundOperand(rhs, set, &status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + } + #endif + decExpOp(res, rhs, set, &status); + } while(0); /* end protected */ + + #if DECSUBSET + if (allocrhs !=NULL) free(allocrhs); /* drop any storage used */ + #endif + /* apply significant status */ + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberExp */ + +/* ------------------------------------------------------------------ */ +/* decNumberFMA -- fused multiply add */ +/* */ +/* This computes D = (A * B) + C with only one rounding */ +/* */ +/* res is D, the result. D may be A or B or C (e.g., X=FMA(X,X,X)) */ +/* lhs is A */ +/* rhs is B */ +/* fhs is C [far hand side] */ +/* set is the context */ +/* */ +/* Mathematical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberFMA(decNumber *res, const decNumber *lhs, + const decNumber *rhs, const decNumber *fhs, + decContext *set) { + uInt status=0; /* accumulator */ + decContext dcmul; /* context for the multiplication */ + uInt needbytes; /* for space calculations */ + decNumber bufa[D2N(DECBUFFER*2+1)]; + decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ + decNumber *acc; /* accumulator pointer */ + decNumber dzero; /* work */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + if (decCheckOperands(res, fhs, DECUNUSED, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { /* [undefined if subset] */ + status|=DEC_Invalid_operation; + break;} + #endif + /* Check math restrictions [these ensure no overflow or underflow] */ + if ((!decNumberIsSpecial(lhs) && decCheckMath(lhs, set, &status)) + || (!decNumberIsSpecial(rhs) && decCheckMath(rhs, set, &status)) + || (!decNumberIsSpecial(fhs) && decCheckMath(fhs, set, &status))) break; + /* set up context for multiply */ + dcmul=*set; + dcmul.digits=lhs->digits+rhs->digits; /* just enough */ + /* [The above may be an over-estimate for subset arithmetic, but that's OK] */ + dcmul.emax=DEC_MAX_EMAX; /* effectively unbounded .. */ + dcmul.emin=DEC_MIN_EMIN; /* [thanks to Math restrictions] */ + /* set up decNumber space to receive the result of the multiply */ + acc=bufa; /* may fit */ + needbytes=sizeof(decNumber)+(D2U(dcmul.digits)-1)*sizeof(Unit); + if (needbytes>sizeof(bufa)) { /* need malloc space */ + allocbufa=(decNumber *)malloc(needbytes); + if (allocbufa==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + acc=allocbufa; /* use the allocated space */ + } + /* multiply with extended range and necessary precision */ + /*printf("emin=%ld\n", dcmul.emin); */ + decMultiplyOp(acc, lhs, rhs, &dcmul, &status); + /* Only Invalid operation (from sNaN or Inf * 0) is possible in */ + /* status; if either is seen than ignore fhs (in case it is */ + /* another sNaN) and set acc to NaN unless we had an sNaN */ + /* [decMultiplyOp leaves that to caller] */ + /* Note sNaN has to go through addOp to shorten payload if */ + /* necessary */ + if ((status&DEC_Invalid_operation)!=0) { + if (!(status&DEC_sNaN)) { /* but be true invalid */ + decNumberZero(res); /* acc not yet set */ + res->bits=DECNAN; + break; + } + decNumberZero(&dzero); /* make 0 (any non-NaN would do) */ + fhs=&dzero; /* use that */ + } + #if DECCHECK + else { /* multiply was OK */ + if (status!=0) printf("Status=%08lx after FMA multiply\n", status); + } + #endif + /* add the third operand and result -> res, and all is done */ + decAddOp(res, acc, fhs, set, 0, &status); + } while(0); /* end protected */ + + if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberFMA */ + +/* ------------------------------------------------------------------ */ +/* decNumberInvert -- invert a Number, digitwise */ +/* */ +/* This computes C = ~A */ +/* */ +/* res is C, the result. C may be A (e.g., X=~X) */ +/* rhs is A */ +/* set is the context (used for result length and error report) */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Logical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberInvert(decNumber *res, const decNumber *rhs, + decContext *set) { + const Unit *ua, *msua; /* -> operand and its msu */ + Unit *uc, *msuc; /* -> result and its msu */ + Int msudigs; /* digits in res msu */ + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + if (rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + /* operand is valid */ + ua=rhs->lsu; /* bottom-up */ + uc=res->lsu; /* .. */ + msua=ua+D2U(rhs->digits)-1; /* -> msu of rhs */ + msuc=uc+D2U(set->digits)-1; /* -> msu of result */ + msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ + for (; uc<=msuc; ua++, uc++) { /* Unit loop */ + Unit a; /* extract unit */ + Int i, j; /* work */ + if (ua>msua) a=0; + else a=*ua; + *uc=0; /* can now write back */ + /* always need to examine all bits in rhs */ + /* This loop could be unrolled and/or use BIN2BCD tables */ + for (i=0; i1) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + if (uc==msuc && i==msudigs-1) break; /* just did final digit */ + } /* each digit */ + } /* each unit */ + /* [here uc-1 is the msu of the result] */ + res->digits=decGetDigits(res->lsu, uc-res->lsu); + res->exponent=0; /* integer */ + res->bits=0; /* sign=0 */ + return res; /* [no status to set] */ + } /* decNumberInvert */ + +/* ------------------------------------------------------------------ */ +/* decNumberLn -- natural logarithm */ +/* */ +/* This computes C = ln(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context; note that rounding mode has no effect */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Notable cases: */ +/* A<0 -> Invalid */ +/* A=0 -> -Infinity (Exact) */ +/* A=+Infinity -> +Infinity (Exact) */ +/* A=1 exactly -> 0 (Exact) */ +/* */ +/* Mathematical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* */ +/* An Inexact result is rounded using DEC_ROUND_HALF_EVEN; it will */ +/* almost always be correctly rounded, but may be up to 1 ulp in */ +/* error in rare cases. */ +/* ------------------------------------------------------------------ */ +/* This is a wrapper for decLnOp which can handle the slightly wider */ +/* (+11) range needed by Ln, Log10, etc. (which may have to be able */ +/* to calculate at p+e+2). */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberLn(decNumber *res, const decNumber *rhs, + decContext *set) { + uInt status=0; /* accumulator */ + #if DECSUBSET + decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ + #endif + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + /* Check restrictions; this is a math function; if not violated */ + /* then carry out the operation. */ + if (!decCheckMath(rhs, set, &status)) do { /* protect allocation */ + #if DECSUBSET + if (!set->extended) { + /* reduce operand and set lostDigits status, as needed */ + if (rhs->digits>set->digits) { + allocrhs=decRoundOperand(rhs, set, &status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + /* special check in subset for rhs=0 */ + if (ISZERO(rhs)) { /* +/- zeros -> error */ + status|=DEC_Invalid_operation; + break;} + } /* extended=0 */ + #endif + decLnOp(res, rhs, set, &status); + } while(0); /* end protected */ + + #if DECSUBSET + if (allocrhs !=NULL) free(allocrhs); /* drop any storage used */ + #endif + /* apply significant status */ + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberLn */ + +/* ------------------------------------------------------------------ */ +/* decNumberLogB - get adjusted exponent, by 754r rules */ +/* */ +/* This computes C = adjustedexponent(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context, used only for digits and status */ +/* */ +/* C must have space for 10 digits (A might have 10**9 digits and */ +/* an exponent of +999999999, or one digit and an exponent of */ +/* -1999999999). */ +/* */ +/* This returns the adjusted exponent of A after (in theory) padding */ +/* with zeros on the right to set->digits digits while keeping the */ +/* same value. The exponent is not limited by emin/emax. */ +/* */ +/* Notable cases: */ +/* A<0 -> Use |A| */ +/* A=0 -> -Infinity (Division by zero) */ +/* A=Infinite -> +Infinity (Exact) */ +/* A=1 exactly -> 0 (Exact) */ +/* NaNs are propagated as usual */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberLogB(decNumber *res, const decNumber *rhs, + decContext *set) { + uInt status=0; /* accumulator */ + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + /* NaNs as usual; Infinities return +Infinity; 0->oops */ + if (decNumberIsNaN(rhs)) decNaNs(res, rhs, NULL, set, &status); + else if (decNumberIsInfinite(rhs)) decNumberCopyAbs(res, rhs); + else if (decNumberIsZero(rhs)) { + decNumberZero(res); /* prepare for Infinity */ + res->bits=DECNEG|DECINF; /* -Infinity */ + status|=DEC_Division_by_zero; /* as per 754r */ + } + else { /* finite non-zero */ + Int ae=rhs->exponent+rhs->digits-1; /* adjusted exponent */ + decNumberFromInt32(res, ae); /* lay it out */ + } + + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberLogB */ + +/* ------------------------------------------------------------------ */ +/* decNumberLog10 -- logarithm in base 10 */ +/* */ +/* This computes C = log10(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context; note that rounding mode has no effect */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Notable cases: */ +/* A<0 -> Invalid */ +/* A=0 -> -Infinity (Exact) */ +/* A=+Infinity -> +Infinity (Exact) */ +/* A=10**n (if n is an integer) -> n (Exact) */ +/* */ +/* Mathematical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* */ +/* An Inexact result is rounded using DEC_ROUND_HALF_EVEN; it will */ +/* almost always be correctly rounded, but may be up to 1 ulp in */ +/* error in rare cases. */ +/* ------------------------------------------------------------------ */ +/* This calculates ln(A)/ln(10) using appropriate precision. For */ +/* ln(A) this is the max(p, rhs->digits + t) + 3, where p is the */ +/* requested digits and t is the number of digits in the exponent */ +/* (maximum 6). For ln(10) it is p + 3; this is often handled by the */ +/* fastpath in decLnOp. The final division is done to the requested */ +/* precision. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberLog10(decNumber *res, const decNumber *rhs, + decContext *set) { + uInt status=0, ignore=0; /* status accumulators */ + uInt needbytes; /* for space calculations */ + Int p; /* working precision */ + Int t; /* digits in exponent of A */ + + /* buffers for a and b working decimals */ + /* (adjustment calculator, same size) */ + decNumber bufa[D2N(DECBUFFER+2)]; + decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ + decNumber *a=bufa; /* temporary a */ + decNumber bufb[D2N(DECBUFFER+2)]; + decNumber *allocbufb=NULL; /* -> allocated bufb, iff allocated */ + decNumber *b=bufb; /* temporary b */ + decNumber bufw[D2N(10)]; /* working 2-10 digit number */ + decNumber *w=bufw; /* .. */ + #if DECSUBSET + decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ + #endif + + decContext aset; /* working context */ + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + /* Check restrictions; this is a math function; if not violated */ + /* then carry out the operation. */ + if (!decCheckMath(rhs, set, &status)) do { /* protect malloc */ + #if DECSUBSET + if (!set->extended) { + /* reduce operand and set lostDigits status, as needed */ + if (rhs->digits>set->digits) { + allocrhs=decRoundOperand(rhs, set, &status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + /* special check in subset for rhs=0 */ + if (ISZERO(rhs)) { /* +/- zeros -> error */ + status|=DEC_Invalid_operation; + break;} + } /* extended=0 */ + #endif + + decContextDefault(&aset, DEC_INIT_DECIMAL64); /* clean context */ + + /* handle exact powers of 10; only check if +ve finite */ + if (!(rhs->bits&(DECNEG|DECSPECIAL)) && !ISZERO(rhs)) { + Int residue=0; /* (no residue) */ + uInt copystat=0; /* clean status */ + + /* round to a single digit... */ + aset.digits=1; + decCopyFit(w, rhs, &aset, &residue, ©stat); /* copy & shorten */ + /* if exact and the digit is 1, rhs is a power of 10 */ + if (!(copystat&DEC_Inexact) && w->lsu[0]==1) { + /* the exponent, conveniently, is the power of 10; making */ + /* this the result needs a little care as it might not fit, */ + /* so first convert it into the working number, and then move */ + /* to res */ + decNumberFromInt32(w, w->exponent); + residue=0; + decCopyFit(res, w, set, &residue, &status); /* copy & round */ + decFinish(res, set, &residue, &status); /* cleanup/set flags */ + break; + } /* not a power of 10 */ + } /* not a candidate for exact */ + + /* simplify the information-content calculation to use 'total */ + /* number of digits in a, including exponent' as compared to the */ + /* requested digits, as increasing this will only rarely cost an */ + /* iteration in ln(a) anyway */ + t=6; /* it can never be >6 */ + + /* allocate space when needed... */ + p=(rhs->digits+t>set->digits?rhs->digits+t:set->digits)+3; + needbytes=sizeof(decNumber)+(D2U(p)-1)*sizeof(Unit); + if (needbytes>sizeof(bufa)) { /* need malloc space */ + allocbufa=(decNumber *)malloc(needbytes); + if (allocbufa==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + a=allocbufa; /* use the allocated space */ + } + aset.digits=p; /* as calculated */ + aset.emax=DEC_MAX_MATH; /* usual bounds */ + aset.emin=-DEC_MAX_MATH; /* .. */ + aset.clamp=0; /* and no concrete format */ + decLnOp(a, rhs, &aset, &status); /* a=ln(rhs) */ + + /* skip the division if the result so far is infinite, NaN, or */ + /* zero, or there was an error; note NaN from sNaN needs copy */ + if (status&DEC_NaNs && !(status&DEC_sNaN)) break; + if (a->bits&DECSPECIAL || ISZERO(a)) { + decNumberCopy(res, a); /* [will fit] */ + break;} + + /* for ln(10) an extra 3 digits of precision are needed */ + p=set->digits+3; + needbytes=sizeof(decNumber)+(D2U(p)-1)*sizeof(Unit); + if (needbytes>sizeof(bufb)) { /* need malloc space */ + allocbufb=(decNumber *)malloc(needbytes); + if (allocbufb==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + b=allocbufb; /* use the allocated space */ + } + decNumberZero(w); /* set up 10... */ + #if DECDPUN==1 + w->lsu[1]=1; w->lsu[0]=0; /* .. */ + #else + w->lsu[0]=10; /* .. */ + #endif + w->digits=2; /* .. */ + + aset.digits=p; + decLnOp(b, w, &aset, &ignore); /* b=ln(10) */ + + aset.digits=set->digits; /* for final divide */ + decDivideOp(res, a, b, &aset, DIVIDE, &status); /* into result */ + } while(0); /* [for break] */ + + if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ + if (allocbufb!=NULL) free(allocbufb); /* .. */ + #if DECSUBSET + if (allocrhs !=NULL) free(allocrhs); /* .. */ + #endif + /* apply significant status */ + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberLog10 */ + +/* ------------------------------------------------------------------ */ +/* decNumberMax -- compare two Numbers and return the maximum */ +/* */ +/* This computes C = A ? B, returning the maximum by 754R rules */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberMax(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decCompareOp(res, lhs, rhs, set, COMPMAX, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberMax */ + +/* ------------------------------------------------------------------ */ +/* decNumberMaxMag -- compare and return the maximum by magnitude */ +/* */ +/* This computes C = A ? B, returning the maximum by 754R rules */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberMaxMag(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decCompareOp(res, lhs, rhs, set, COMPMAXMAG, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberMaxMag */ + +/* ------------------------------------------------------------------ */ +/* decNumberMin -- compare two Numbers and return the minimum */ +/* */ +/* This computes C = A ? B, returning the minimum by 754R rules */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberMin(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decCompareOp(res, lhs, rhs, set, COMPMIN, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberMin */ + +/* ------------------------------------------------------------------ */ +/* decNumberMinMag -- compare and return the minimum by magnitude */ +/* */ +/* This computes C = A ? B, returning the minimum by 754R rules */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberMinMag(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decCompareOp(res, lhs, rhs, set, COMPMINMAG, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberMinMag */ + +/* ------------------------------------------------------------------ */ +/* decNumberMinus -- prefix minus operator */ +/* */ +/* This computes C = 0 - A */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context */ +/* */ +/* See also decNumberCopyNegate for a quiet bitwise version of this. */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +/* Simply use AddOp for the subtract, which will do the necessary. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberMinus(decNumber *res, const decNumber *rhs, + decContext *set) { + decNumber dzero; + uInt status=0; /* accumulator */ + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + decNumberZero(&dzero); /* make 0 */ + dzero.exponent=rhs->exponent; /* [no coefficient expansion] */ + decAddOp(res, &dzero, rhs, set, DECNEG, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberMinus */ + +/* ------------------------------------------------------------------ */ +/* decNumberNextMinus -- next towards -Infinity */ +/* */ +/* This computes C = A - infinitesimal, rounded towards -Infinity */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context */ +/* */ +/* This is a generalization of 754r NextDown. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberNextMinus(decNumber *res, const decNumber *rhs, + decContext *set) { + decNumber dtiny; /* constant */ + decContext workset=*set; /* work */ + uInt status=0; /* accumulator */ + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + /* +Infinity is the special case */ + if ((rhs->bits&(DECINF|DECNEG))==DECINF) { + decSetMaxValue(res, set); /* is +ve */ + /* there is no status to set */ + return res; + } + decNumberZero(&dtiny); /* start with 0 */ + dtiny.lsu[0]=1; /* make number that is .. */ + dtiny.exponent=DEC_MIN_EMIN-1; /* .. smaller than tiniest */ + workset.round=DEC_ROUND_FLOOR; + decAddOp(res, rhs, &dtiny, &workset, DECNEG, &status); + status&=DEC_Invalid_operation|DEC_sNaN; /* only sNaN Invalid please */ + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberNextMinus */ + +/* ------------------------------------------------------------------ */ +/* decNumberNextPlus -- next towards +Infinity */ +/* */ +/* This computes C = A + infinitesimal, rounded towards +Infinity */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context */ +/* */ +/* This is a generalization of 754r NextUp. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberNextPlus(decNumber *res, const decNumber *rhs, + decContext *set) { + decNumber dtiny; /* constant */ + decContext workset=*set; /* work */ + uInt status=0; /* accumulator */ + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + /* -Infinity is the special case */ + if ((rhs->bits&(DECINF|DECNEG))==(DECINF|DECNEG)) { + decSetMaxValue(res, set); + res->bits=DECNEG; /* negative */ + /* there is no status to set */ + return res; + } + decNumberZero(&dtiny); /* start with 0 */ + dtiny.lsu[0]=1; /* make number that is .. */ + dtiny.exponent=DEC_MIN_EMIN-1; /* .. smaller than tiniest */ + workset.round=DEC_ROUND_CEILING; + decAddOp(res, rhs, &dtiny, &workset, 0, &status); + status&=DEC_Invalid_operation|DEC_sNaN; /* only sNaN Invalid please */ + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberNextPlus */ + +/* ------------------------------------------------------------------ */ +/* decNumberNextToward -- next towards rhs */ +/* */ +/* This computes C = A +/- infinitesimal, rounded towards */ +/* +/-Infinity in the direction of B, as per 754r nextafter rules */ +/* */ +/* res is C, the result. C may be A or B. */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* This is a generalization of 754r NextAfter. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberNextToward(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + decNumber dtiny; /* constant */ + decContext workset=*set; /* work */ + Int result; /* .. */ + uInt status=0; /* accumulator */ + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) { + decNaNs(res, lhs, rhs, set, &status); + } + else { /* Is numeric, so no chance of sNaN Invalid, etc. */ + result=decCompare(lhs, rhs, 0); /* sign matters */ + if (result==BADINT) status|=DEC_Insufficient_storage; /* rare */ + else { /* valid compare */ + if (result==0) decNumberCopySign(res, lhs, rhs); /* easy */ + else { /* differ: need NextPlus or NextMinus */ + uByte sub; /* add or subtract */ + if (result<0) { /* lhsbits&(DECINF|DECNEG))==(DECINF|DECNEG)) { + decSetMaxValue(res, set); + res->bits=DECNEG; /* negative */ + return res; /* there is no status to set */ + } + workset.round=DEC_ROUND_CEILING; + sub=0; /* add, please */ + } /* plus */ + else { /* lhs>rhs, do nextminus */ + /* +Infinity is the special case */ + if ((lhs->bits&(DECINF|DECNEG))==DECINF) { + decSetMaxValue(res, set); + return res; /* there is no status to set */ + } + workset.round=DEC_ROUND_FLOOR; + sub=DECNEG; /* subtract, please */ + } /* minus */ + decNumberZero(&dtiny); /* start with 0 */ + dtiny.lsu[0]=1; /* make number that is .. */ + dtiny.exponent=DEC_MIN_EMIN-1; /* .. smaller than tiniest */ + decAddOp(res, lhs, &dtiny, &workset, sub, &status); /* + or - */ + /* turn off exceptions if the result is a normal number */ + /* (including Nmin), otherwise let all status through */ + if (decNumberIsNormal(res, set)) status=0; + } /* unequal */ + } /* compare OK */ + } /* numeric */ + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberNextToward */ + +/* ------------------------------------------------------------------ */ +/* decNumberOr -- OR two Numbers, digitwise */ +/* */ +/* This computes C = A | B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X|X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context (used for result length and error report) */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Logical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberOr(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + const Unit *ua, *ub; /* -> operands */ + const Unit *msua, *msub; /* -> operand msus */ + Unit *uc, *msuc; /* -> result and its msu */ + Int msudigs; /* digits in res msu */ + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + if (lhs->exponent!=0 || decNumberIsSpecial(lhs) || decNumberIsNegative(lhs) + || rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + /* operands are valid */ + ua=lhs->lsu; /* bottom-up */ + ub=rhs->lsu; /* .. */ + uc=res->lsu; /* .. */ + msua=ua+D2U(lhs->digits)-1; /* -> msu of lhs */ + msub=ub+D2U(rhs->digits)-1; /* -> msu of rhs */ + msuc=uc+D2U(set->digits)-1; /* -> msu of result */ + msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ + for (; uc<=msuc; ua++, ub++, uc++) { /* Unit loop */ + Unit a, b; /* extract units */ + if (ua>msua) a=0; + else a=*ua; + if (ub>msub) b=0; + else b=*ub; + *uc=0; /* can now write back */ + if (a|b) { /* maybe 1 bits to examine */ + Int i, j; + /* This loop could be unrolled and/or use BIN2BCD tables */ + for (i=0; i1) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + if (uc==msuc && i==msudigs-1) break; /* just did final digit */ + } /* each digit */ + } /* non-zero */ + } /* each unit */ + /* [here uc-1 is the msu of the result] */ + res->digits=decGetDigits(res->lsu, uc-res->lsu); + res->exponent=0; /* integer */ + res->bits=0; /* sign=0 */ + return res; /* [no status to set] */ + } /* decNumberOr */ + +/* ------------------------------------------------------------------ */ +/* decNumberPlus -- prefix plus operator */ +/* */ +/* This computes C = 0 + A */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context */ +/* */ +/* See also decNumberCopy for a quiet bitwise version of this. */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +/* This simply uses AddOp; Add will take fast path after preparing A. */ +/* Performance is a concern here, as this routine is often used to */ +/* check operands and apply rounding and overflow/underflow testing. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberPlus(decNumber *res, const decNumber *rhs, + decContext *set) { + decNumber dzero; + uInt status=0; /* accumulator */ + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + decNumberZero(&dzero); /* make 0 */ + dzero.exponent=rhs->exponent; /* [no coefficient expansion] */ + decAddOp(res, &dzero, rhs, set, 0, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberPlus */ + +/* ------------------------------------------------------------------ */ +/* decNumberMultiply -- multiply two Numbers */ +/* */ +/* This computes C = A x B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X+X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberMultiply(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decMultiplyOp(res, lhs, rhs, set, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberMultiply */ + +/* ------------------------------------------------------------------ */ +/* decNumberPower -- raise a number to a power */ +/* */ +/* This computes C = A ** B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X**X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Mathematical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* */ +/* However, if 1999999997<=B<=999999999 and B is an integer then the */ +/* restrictions on A and the context are relaxed to the usual bounds, */ +/* for compatibility with the earlier (integer power only) version */ +/* of this function. */ +/* */ +/* When B is an integer, the result may be exact, even if rounded. */ +/* */ +/* The final result is rounded according to the context; it will */ +/* almost always be correctly rounded, but may be up to 1 ulp in */ +/* error in rare cases. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberPower(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + #if DECSUBSET + decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ + decNumber *allocrhs=NULL; /* .., rhs */ + #endif + decNumber *allocdac=NULL; /* -> allocated acc buffer, iff used */ + decNumber *allocinv=NULL; /* -> allocated 1/x buffer, iff used */ + Int reqdigits=set->digits; /* requested DIGITS */ + Int n; /* rhs in binary */ + Flag rhsint=0; /* 1 if rhs is an integer */ + Flag useint=0; /* 1 if can use integer calculation */ + Flag isoddint=0; /* 1 if rhs is an integer and odd */ + Int i; /* work */ + #if DECSUBSET + Int dropped; /* .. */ + #endif + uInt needbytes; /* buffer size needed */ + Flag seenbit; /* seen a bit while powering */ + Int residue=0; /* rounding residue */ + uInt status=0; /* accumulators */ + uByte bits=0; /* result sign if errors */ + decContext aset; /* working context */ + decNumber dnOne; /* work value 1... */ + /* local accumulator buffer [a decNumber, with digits+elength+1 digits] */ + decNumber dacbuff[D2N(DECBUFFER+9)]; + decNumber *dac=dacbuff; /* -> result accumulator */ + /* same again for possible 1/lhs calculation */ + decNumber invbuff[D2N(DECBUFFER+9)]; + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { /* reduce operands and set status, as needed */ + if (lhs->digits>reqdigits) { + alloclhs=decRoundOperand(lhs, set, &status); + if (alloclhs==NULL) break; + lhs=alloclhs; + } + if (rhs->digits>reqdigits) { + allocrhs=decRoundOperand(rhs, set, &status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + /* handle NaNs and rhs Infinity (lhs infinity is harder) */ + if (SPECIALARGS) { + if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) { /* NaNs */ + decNaNs(res, lhs, rhs, set, &status); + break;} + if (decNumberIsInfinite(rhs)) { /* rhs Infinity */ + Flag rhsneg=rhs->bits&DECNEG; /* save rhs sign */ + if (decNumberIsNegative(lhs) /* lhs<0 */ + && !decNumberIsZero(lhs)) /* .. */ + status|=DEC_Invalid_operation; + else { /* lhs >=0 */ + decNumberZero(&dnOne); /* set up 1 */ + dnOne.lsu[0]=1; + decNumberCompare(dac, lhs, &dnOne, set); /* lhs ? 1 */ + decNumberZero(res); /* prepare for 0/1/Infinity */ + if (decNumberIsNegative(dac)) { /* lhs<1 */ + if (rhsneg) res->bits|=DECINF; /* +Infinity [else is +0] */ + } + else if (dac->lsu[0]==0) { /* lhs=1 */ + /* 1**Infinity is inexact, so return fully-padded 1.0000 */ + Int shift=set->digits-1; + *res->lsu=1; /* was 0, make int 1 */ + res->digits=decShiftToMost(res->lsu, 1, shift); + res->exponent=-shift; /* make 1.0000... */ + status|=DEC_Inexact|DEC_Rounded; /* deemed inexact */ + } + else { /* lhs>1 */ + if (!rhsneg) res->bits|=DECINF; /* +Infinity [else is +0] */ + } + } /* lhs>=0 */ + break;} + /* [lhs infinity drops through] */ + } /* specials */ + + /* Original rhs may be an integer that fits and is in range */ + n=decGetInt(rhs); + if (n!=BADINT) { /* it is an integer */ + rhsint=1; /* record the fact for 1**n */ + isoddint=(Flag)n&1; /* [works even if big] */ + if (n!=BIGEVEN && n!=BIGODD) /* can use integer path? */ + useint=1; /* looks good */ + } + + if (decNumberIsNegative(lhs) /* -x .. */ + && isoddint) bits=DECNEG; /* .. to an odd power */ + + /* handle LHS infinity */ + if (decNumberIsInfinite(lhs)) { /* [NaNs already handled] */ + uByte rbits=rhs->bits; /* save */ + decNumberZero(res); /* prepare */ + if (n==0) *res->lsu=1; /* [-]Inf**0 => 1 */ + else { + /* -Inf**nonint -> error */ + if (!rhsint && decNumberIsNegative(lhs)) { + status|=DEC_Invalid_operation; /* -Inf**nonint is error */ + break;} + if (!(rbits & DECNEG)) bits|=DECINF; /* was not a **-n */ + /* [otherwise will be 0 or -0] */ + res->bits=bits; + } + break;} + + /* similarly handle LHS zero */ + if (decNumberIsZero(lhs)) { + if (n==0) { /* 0**0 => Error */ + #if DECSUBSET + if (!set->extended) { /* [unless subset] */ + decNumberZero(res); + *res->lsu=1; /* return 1 */ + break;} + #endif + status|=DEC_Invalid_operation; + } + else { /* 0**x */ + uByte rbits=rhs->bits; /* save */ + if (rbits & DECNEG) { /* was a 0**(-n) */ + #if DECSUBSET + if (!set->extended) { /* [bad if subset] */ + status|=DEC_Invalid_operation; + break;} + #endif + bits|=DECINF; + } + decNumberZero(res); /* prepare */ + /* [otherwise will be 0 or -0] */ + res->bits=bits; + } + break;} + + /* here both lhs and rhs are finite; rhs==0 is handled in the */ + /* integer path. Next handle the non-integer cases */ + if (!useint) { /* non-integral rhs */ + /* any -ve lhs is bad, as is either operand or context out of */ + /* bounds */ + if (decNumberIsNegative(lhs)) { + status|=DEC_Invalid_operation; + break;} + if (decCheckMath(lhs, set, &status) + || decCheckMath(rhs, set, &status)) break; /* variable status */ + + decContextDefault(&aset, DEC_INIT_DECIMAL64); /* clean context */ + aset.emax=DEC_MAX_MATH; /* usual bounds */ + aset.emin=-DEC_MAX_MATH; /* .. */ + aset.clamp=0; /* and no concrete format */ + + /* calculate the result using exp(ln(lhs)*rhs), which can */ + /* all be done into the accumulator, dac. The precision needed */ + /* is enough to contain the full information in the lhs (which */ + /* is the total digits, including exponent), or the requested */ + /* precision, if larger, + 4; 6 is used for the exponent */ + /* maximum length, and this is also used when it is shorter */ + /* than the requested digits as it greatly reduces the >0.5 ulp */ + /* cases at little cost (because Ln doubles digits each */ + /* iteration so a few extra digits rarely causes an extra */ + /* iteration) */ + aset.digits=MAXI(lhs->digits, set->digits)+6+4; + } /* non-integer rhs */ + + else { /* rhs is in-range integer */ + if (n==0) { /* x**0 = 1 */ + /* (0**0 was handled above) */ + decNumberZero(res); /* result=1 */ + *res->lsu=1; /* .. */ + break;} + /* rhs is a non-zero integer */ + if (n<0) n=-n; /* use abs(n) */ + + aset=*set; /* clone the context */ + aset.round=DEC_ROUND_HALF_EVEN; /* internally use balanced */ + /* calculate the working DIGITS */ + aset.digits=reqdigits+(rhs->digits+rhs->exponent)+2; + #if DECSUBSET + if (!set->extended) aset.digits--; /* use classic precision */ + #endif + /* it's an error if this is more than can be handled */ + if (aset.digits>DECNUMMAXP) {status|=DEC_Invalid_operation; break;} + } /* integer path */ + + /* aset.digits is the count of digits for the accumulator needed */ + /* if accumulator is too long for local storage, then allocate */ + needbytes=sizeof(decNumber)+(D2U(aset.digits)-1)*sizeof(Unit); + /* [needbytes also used below if 1/lhs needed] */ + if (needbytes>sizeof(dacbuff)) { + allocdac=(decNumber *)malloc(needbytes); + if (allocdac==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + dac=allocdac; /* use the allocated space */ + } + /* here, aset is set up and accumulator is ready for use */ + + if (!useint) { /* non-integral rhs */ + /* x ** y; special-case x=1 here as it will otherwise always */ + /* reduce to integer 1; decLnOp has a fastpath which detects */ + /* the case of x=1 */ + decLnOp(dac, lhs, &aset, &status); /* dac=ln(lhs) */ + /* [no error possible, as lhs 0 already handled] */ + if (ISZERO(dac)) { /* x==1, 1.0, etc. */ + /* need to return fully-padded 1.0000 etc., but rhsint->1 */ + *dac->lsu=1; /* was 0, make int 1 */ + if (!rhsint) { /* add padding */ + Int shift=set->digits-1; + dac->digits=decShiftToMost(dac->lsu, 1, shift); + dac->exponent=-shift; /* make 1.0000... */ + status|=DEC_Inexact|DEC_Rounded; /* deemed inexact */ + } + } + else { + decMultiplyOp(dac, dac, rhs, &aset, &status); /* dac=dac*rhs */ + decExpOp(dac, dac, &aset, &status); /* dac=exp(dac) */ + } + /* and drop through for final rounding */ + } /* non-integer rhs */ + + else { /* carry on with integer */ + decNumberZero(dac); /* acc=1 */ + *dac->lsu=1; /* .. */ + + /* if a negative power the constant 1 is needed, and if not subset */ + /* invert the lhs now rather than inverting the result later */ + if (decNumberIsNegative(rhs)) { /* was a **-n [hence digits>0] */ + decNumber *inv=invbuff; /* assume use fixed buffer */ + decNumberCopy(&dnOne, dac); /* dnOne=1; [needed now or later] */ + #if DECSUBSET + if (set->extended) { /* need to calculate 1/lhs */ + #endif + /* divide lhs into 1, putting result in dac [dac=1/dac] */ + decDivideOp(dac, &dnOne, lhs, &aset, DIVIDE, &status); + /* now locate or allocate space for the inverted lhs */ + if (needbytes>sizeof(invbuff)) { + allocinv=(decNumber *)malloc(needbytes); + if (allocinv==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + inv=allocinv; /* use the allocated space */ + } + /* [inv now points to big-enough buffer or allocated storage] */ + decNumberCopy(inv, dac); /* copy the 1/lhs */ + decNumberCopy(dac, &dnOne); /* restore acc=1 */ + lhs=inv; /* .. and go forward with new lhs */ + #if DECSUBSET + } + #endif + } + + /* Raise-to-the-power loop... */ + seenbit=0; /* set once a 1-bit is encountered */ + for (i=1;;i++){ /* for each bit [top bit ignored] */ + /* abandon if had overflow or terminal underflow */ + if (status & (DEC_Overflow|DEC_Underflow)) { /* interesting? */ + if (status&DEC_Overflow || ISZERO(dac)) break; + } + /* [the following two lines revealed an optimizer bug in a C++ */ + /* compiler, with symptom: 5**3 -> 25, when n=n+n was used] */ + n=n<<1; /* move next bit to testable position */ + if (n<0) { /* top bit is set */ + seenbit=1; /* OK, significant bit seen */ + decMultiplyOp(dac, dac, lhs, &aset, &status); /* dac=dac*x */ + } + if (i==31) break; /* that was the last bit */ + if (!seenbit) continue; /* no need to square 1 */ + decMultiplyOp(dac, dac, dac, &aset, &status); /* dac=dac*dac [square] */ + } /*i*/ /* 32 bits */ + + /* complete internal overflow or underflow processing */ + if (status & (DEC_Overflow|DEC_Underflow)) { + #if DECSUBSET + /* If subset, and power was negative, reverse the kind of -erflow */ + /* [1/x not yet done] */ + if (!set->extended && decNumberIsNegative(rhs)) { + if (status & DEC_Overflow) + status^=DEC_Overflow | DEC_Underflow | DEC_Subnormal; + else { /* trickier -- Underflow may or may not be set */ + status&=~(DEC_Underflow | DEC_Subnormal); /* [one or both] */ + status|=DEC_Overflow; + } + } + #endif + dac->bits=(dac->bits & ~DECNEG) | bits; /* force correct sign */ + /* round subnormals [to set.digits rather than aset.digits] */ + /* or set overflow result similarly as required */ + decFinalize(dac, set, &residue, &status); + decNumberCopy(res, dac); /* copy to result (is now OK length) */ + break; + } + + #if DECSUBSET + if (!set->extended && /* subset math */ + decNumberIsNegative(rhs)) { /* was a **-n [hence digits>0] */ + /* so divide result into 1 [dac=1/dac] */ + decDivideOp(dac, &dnOne, dac, &aset, DIVIDE, &status); + } + #endif + } /* rhs integer path */ + + /* reduce result to the requested length and copy to result */ + decCopyFit(res, dac, set, &residue, &status); + decFinish(res, set, &residue, &status); /* final cleanup */ + #if DECSUBSET + if (!set->extended) decTrim(res, set, 0, &dropped); /* trailing zeros */ + #endif + } while(0); /* end protected */ + + if (allocdac!=NULL) free(allocdac); /* drop any storage used */ + if (allocinv!=NULL) free(allocinv); /* .. */ + #if DECSUBSET + if (alloclhs!=NULL) free(alloclhs); /* .. */ + if (allocrhs!=NULL) free(allocrhs); /* .. */ + #endif + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberPower */ + +/* ------------------------------------------------------------------ */ +/* decNumberQuantize -- force exponent to requested value */ +/* */ +/* This computes C = op(A, B), where op adjusts the coefficient */ +/* of C (by rounding or shifting) such that the exponent (-scale) */ +/* of C has exponent of B. The numerical value of C will equal A, */ +/* except for the effects of any rounding that occurred. */ +/* */ +/* res is C, the result. C may be A or B */ +/* lhs is A, the number to adjust */ +/* rhs is B, the number with exponent to match */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Unless there is an error or the result is infinite, the exponent */ +/* after the operation is guaranteed to be equal to that of B. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberQuantize(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decQuantizeOp(res, lhs, rhs, set, 1, &status); + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberQuantize */ + +/* ------------------------------------------------------------------ */ +/* decNumberReduce -- remove trailing zeros */ +/* */ +/* This computes C = 0 + A, and normalizes the result */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +/* Previously known as Normalize */ +decNumber * decNumberNormalize(decNumber *res, const decNumber *rhs, + decContext *set) { + return decNumberReduce(res, rhs, set); + } /* decNumberNormalize */ + +decNumber * decNumberReduce(decNumber *res, const decNumber *rhs, + decContext *set) { + #if DECSUBSET + decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ + #endif + uInt status=0; /* as usual */ + Int residue=0; /* as usual */ + Int dropped; /* work */ + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { + /* reduce operand and set lostDigits status, as needed */ + if (rhs->digits>set->digits) { + allocrhs=decRoundOperand(rhs, set, &status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + /* Infinities copy through; NaNs need usual treatment */ + if (decNumberIsNaN(rhs)) { + decNaNs(res, rhs, NULL, set, &status); + break; + } + + /* reduce result to the requested length and copy to result */ + decCopyFit(res, rhs, set, &residue, &status); /* copy & round */ + decFinish(res, set, &residue, &status); /* cleanup/set flags */ + decTrim(res, set, 1, &dropped); /* normalize in place */ + } while(0); /* end protected */ + + #if DECSUBSET + if (allocrhs !=NULL) free(allocrhs); /* .. */ + #endif + if (status!=0) decStatus(res, status, set);/* then report status */ + return res; + } /* decNumberReduce */ + +/* ------------------------------------------------------------------ */ +/* decNumberRescale -- force exponent to requested value */ +/* */ +/* This computes C = op(A, B), where op adjusts the coefficient */ +/* of C (by rounding or shifting) such that the exponent (-scale) */ +/* of C has the value B. The numerical value of C will equal A, */ +/* except for the effects of any rounding that occurred. */ +/* */ +/* res is C, the result. C may be A or B */ +/* lhs is A, the number to adjust */ +/* rhs is B, the requested exponent */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Unless there is an error or the result is infinite, the exponent */ +/* after the operation is guaranteed to be equal to B. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberRescale(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decQuantizeOp(res, lhs, rhs, set, 0, &status); + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberRescale */ + +/* ------------------------------------------------------------------ */ +/* decNumberRemainder -- divide and return remainder */ +/* */ +/* This computes C = A % B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X%X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberRemainder(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decDivideOp(res, lhs, rhs, set, REMAINDER, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberRemainder */ + +/* ------------------------------------------------------------------ */ +/* decNumberRemainderNear -- divide and return remainder from nearest */ +/* */ +/* This computes C = A % B, where % is the IEEE remainder operator */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X%X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberRemainderNear(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + decDivideOp(res, lhs, rhs, set, REMNEAR, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberRemainderNear */ + +/* ------------------------------------------------------------------ */ +/* decNumberRotate -- rotate the coefficient of a Number left/right */ +/* */ +/* This computes C = A rot B (in base ten and rotating set->digits */ +/* digits). */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=XrotX) */ +/* lhs is A */ +/* rhs is B, the number of digits to rotate (-ve to right) */ +/* set is the context */ +/* */ +/* The digits of the coefficient of A are rotated to the left (if B */ +/* is positive) or to the right (if B is negative) without adjusting */ +/* the exponent or the sign of A. If lhs->digits is less than */ +/* set->digits the coefficient is padded with zeros on the left */ +/* before the rotate. Any leading zeros in the result are removed */ +/* as usual. */ +/* */ +/* B must be an integer (q=0) and in the range -set->digits through */ +/* +set->digits. */ +/* C must have space for set->digits digits. */ +/* NaNs are propagated as usual. Infinities are unaffected (but */ +/* B must be valid). No status is set unless B is invalid or an */ +/* operand is an sNaN. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberRotate(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + Int rotate; /* rhs as an Int */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + /* NaNs propagate as normal */ + if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) + decNaNs(res, lhs, rhs, set, &status); + /* rhs must be an integer */ + else if (decNumberIsInfinite(rhs) || rhs->exponent!=0) + status=DEC_Invalid_operation; + else { /* both numeric, rhs is an integer */ + rotate=decGetInt(rhs); /* [cannot fail] */ + if (rotate==BADINT /* something bad .. */ + || rotate==BIGODD || rotate==BIGEVEN /* .. very big .. */ + || abs(rotate)>set->digits) /* .. or out of range */ + status=DEC_Invalid_operation; + else { /* rhs is OK */ + decNumberCopy(res, lhs); + /* convert -ve rotate to equivalent positive rotation */ + if (rotate<0) rotate=set->digits+rotate; + if (rotate!=0 && rotate!=set->digits /* zero or full rotation */ + && !decNumberIsInfinite(res)) { /* lhs was infinite */ + /* left-rotate to do; 0 < rotate < set->digits */ + uInt units, shift; /* work */ + uInt msudigits; /* digits in result msu */ + Unit *msu=res->lsu+D2U(res->digits)-1; /* current msu */ + Unit *msumax=res->lsu+D2U(set->digits)-1; /* rotation msu */ + for (msu++; msu<=msumax; msu++) *msu=0; /* ensure high units=0 */ + res->digits=set->digits; /* now full-length */ + msudigits=MSUDIGITS(res->digits); /* actual digits in msu */ + + /* rotation here is done in-place, in three steps */ + /* 1. shift all to least up to one unit to unit-align final */ + /* lsd [any digits shifted out are rotated to the left, */ + /* abutted to the original msd (which may require split)] */ + /* */ + /* [if there are no whole units left to rotate, the */ + /* rotation is now complete] */ + /* */ + /* 2. shift to least, from below the split point only, so that */ + /* the final msd is in the right place in its Unit [any */ + /* digits shifted out will fit exactly in the current msu, */ + /* left aligned, no split required] */ + /* */ + /* 3. rotate all the units by reversing left part, right */ + /* part, and then whole */ + /* */ + /* example: rotate right 8 digits (2 units + 2), DECDPUN=3. */ + /* */ + /* start: 00a bcd efg hij klm npq */ + /* */ + /* 1a 000 0ab cde fgh|ijk lmn [pq saved] */ + /* 1b 00p qab cde fgh|ijk lmn */ + /* */ + /* 2a 00p qab cde fgh|00i jkl [mn saved] */ + /* 2b mnp qab cde fgh|00i jkl */ + /* */ + /* 3a fgh cde qab mnp|00i jkl */ + /* 3b fgh cde qab mnp|jkl 00i */ + /* 3c 00i jkl mnp qab cde fgh */ + + /* Step 1: amount to shift is the partial right-rotate count */ + rotate=set->digits-rotate; /* make it right-rotate */ + units=rotate/DECDPUN; /* whole units to rotate */ + shift=rotate%DECDPUN; /* left-over digits count */ + if (shift>0) { /* not an exact number of units */ + uInt save=res->lsu[0]%powers[shift]; /* save low digit(s) */ + decShiftToLeast(res->lsu, D2U(res->digits), shift); + if (shift>msudigits) { /* msumax-1 needs >0 digits */ + uInt rem=save%powers[shift-msudigits];/* split save */ + *msumax=(Unit)(save/powers[shift-msudigits]); /* and insert */ + *(msumax-1)=*(msumax-1) + +(Unit)(rem*powers[DECDPUN-(shift-msudigits)]); /* .. */ + } + else { /* all fits in msumax */ + *msumax=*msumax+(Unit)(save*powers[msudigits-shift]); /* [maybe *1] */ + } + } /* digits shift needed */ + + /* If whole units to rotate... */ + if (units>0) { /* some to do */ + /* Step 2: the units to touch are the whole ones in rotate, */ + /* if any, and the shift is DECDPUN-msudigits (which may be */ + /* 0, again) */ + shift=DECDPUN-msudigits; + if (shift>0) { /* not an exact number of units */ + uInt save=res->lsu[0]%powers[shift]; /* save low digit(s) */ + decShiftToLeast(res->lsu, units, shift); + *msumax=*msumax+(Unit)(save*powers[msudigits]); + } /* partial shift needed */ + + /* Step 3: rotate the units array using triple reverse */ + /* (reversing is easy and fast) */ + decReverse(res->lsu+units, msumax); /* left part */ + decReverse(res->lsu, res->lsu+units-1); /* right part */ + decReverse(res->lsu, msumax); /* whole */ + } /* whole units to rotate */ + /* the rotation may have left an undetermined number of zeros */ + /* on the left, so true length needs to be calculated */ + res->digits=decGetDigits(res->lsu, msumax-res->lsu+1); + } /* rotate needed */ + } /* rhs OK */ + } /* numerics */ + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberRotate */ + +/* ------------------------------------------------------------------ */ +/* decNumberSameQuantum -- test for equal exponents */ +/* */ +/* res is the result number, which will contain either 0 or 1 */ +/* lhs is a number to test */ +/* rhs is the second (usually a pattern) */ +/* */ +/* No errors are possible and no context is needed. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberSameQuantum(decNumber *res, const decNumber *lhs, + const decNumber *rhs) { + Unit ret=0; /* return value */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, DECUNCONT)) return res; + #endif + + if (SPECIALARGS) { + if (decNumberIsNaN(lhs) && decNumberIsNaN(rhs)) ret=1; + else if (decNumberIsInfinite(lhs) && decNumberIsInfinite(rhs)) ret=1; + /* [anything else with a special gives 0] */ + } + else if (lhs->exponent==rhs->exponent) ret=1; + + decNumberZero(res); /* OK to overwrite an operand now */ + *res->lsu=ret; + return res; + } /* decNumberSameQuantum */ + +/* ------------------------------------------------------------------ */ +/* decNumberScaleB -- multiply by a power of 10 */ +/* */ +/* This computes C = A x 10**B where B is an integer (q=0) with */ +/* maximum magnitude 2*(emax+digits) */ +/* */ +/* res is C, the result. C may be A or B */ +/* lhs is A, the number to adjust */ +/* rhs is B, the requested power of ten to use */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* The result may underflow or overflow. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberScaleB(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + Int reqexp; /* requested exponent change [B] */ + uInt status=0; /* accumulator */ + Int residue; /* work */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + /* Handle special values except lhs infinite */ + if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) + decNaNs(res, lhs, rhs, set, &status); + /* rhs must be an integer */ + else if (decNumberIsInfinite(rhs) || rhs->exponent!=0) + status=DEC_Invalid_operation; + else { + /* lhs is a number; rhs is a finite with q==0 */ + reqexp=decGetInt(rhs); /* [cannot fail] */ + if (reqexp==BADINT /* something bad .. */ + || reqexp==BIGODD || reqexp==BIGEVEN /* .. very big .. */ + || abs(reqexp)>(2*(set->digits+set->emax))) /* .. or out of range */ + status=DEC_Invalid_operation; + else { /* rhs is OK */ + decNumberCopy(res, lhs); /* all done if infinite lhs */ + if (!decNumberIsInfinite(res)) { /* prepare to scale */ + res->exponent+=reqexp; /* adjust the exponent */ + residue=0; + decFinalize(res, set, &residue, &status); /* .. and check */ + } /* finite LHS */ + } /* rhs OK */ + } /* rhs finite */ + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberScaleB */ + +/* ------------------------------------------------------------------ */ +/* decNumberShift -- shift the coefficient of a Number left or right */ +/* */ +/* This computes C = A << B or C = A >> -B (in base ten). */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X<digits through */ +/* +set->digits. */ +/* C must have space for set->digits digits. */ +/* NaNs are propagated as usual. Infinities are unaffected (but */ +/* B must be valid). No status is set unless B is invalid or an */ +/* operand is an sNaN. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberShift(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + Int shift; /* rhs as an Int */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + /* NaNs propagate as normal */ + if (decNumberIsNaN(lhs) || decNumberIsNaN(rhs)) + decNaNs(res, lhs, rhs, set, &status); + /* rhs must be an integer */ + else if (decNumberIsInfinite(rhs) || rhs->exponent!=0) + status=DEC_Invalid_operation; + else { /* both numeric, rhs is an integer */ + shift=decGetInt(rhs); /* [cannot fail] */ + if (shift==BADINT /* something bad .. */ + || shift==BIGODD || shift==BIGEVEN /* .. very big .. */ + || abs(shift)>set->digits) /* .. or out of range */ + status=DEC_Invalid_operation; + else { /* rhs is OK */ + decNumberCopy(res, lhs); + if (shift!=0 && !decNumberIsInfinite(res)) { /* something to do */ + if (shift>0) { /* to left */ + if (shift==set->digits) { /* removing all */ + *res->lsu=0; /* so place 0 */ + res->digits=1; /* .. */ + } + else { /* */ + /* first remove leading digits if necessary */ + if (res->digits+shift>set->digits) { + decDecap(res, res->digits+shift-set->digits); + /* that updated res->digits; may have gone to 1 (for a */ + /* single digit or for zero */ + } + if (res->digits>1 || *res->lsu) /* if non-zero.. */ + res->digits=decShiftToMost(res->lsu, res->digits, shift); + } /* partial left */ + } /* left */ + else { /* to right */ + if (-shift>=res->digits) { /* discarding all */ + *res->lsu=0; /* so place 0 */ + res->digits=1; /* .. */ + } + else { + decShiftToLeast(res->lsu, D2U(res->digits), -shift); + res->digits-=(-shift); + } + } /* to right */ + } /* non-0 non-Inf shift */ + } /* rhs OK */ + } /* numerics */ + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberShift */ + +/* ------------------------------------------------------------------ */ +/* decNumberSquareRoot -- square root operator */ +/* */ +/* This computes C = squareroot(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context; note that rounding mode has no effect */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +/* This uses the following varying-precision algorithm in: */ +/* */ +/* Properly Rounded Variable Precision Square Root, T. E. Hull and */ +/* A. Abrham, ACM Transactions on Mathematical Software, Vol 11 #3, */ +/* pp229-237, ACM, September 1985. */ +/* */ +/* The square-root is calculated using Newton's method, after which */ +/* a check is made to ensure the result is correctly rounded. */ +/* */ +/* % [Reformatted original Numerical Turing source code follows.] */ +/* function sqrt(x : real) : real */ +/* % sqrt(x) returns the properly rounded approximation to the square */ +/* % root of x, in the precision of the calling environment, or it */ +/* % fails if x < 0. */ +/* % t e hull and a abrham, august, 1984 */ +/* if x <= 0 then */ +/* if x < 0 then */ +/* assert false */ +/* else */ +/* result 0 */ +/* end if */ +/* end if */ +/* var f := setexp(x, 0) % fraction part of x [0.1 <= x < 1] */ +/* var e := getexp(x) % exponent part of x */ +/* var approx : real */ +/* if e mod 2 = 0 then */ +/* approx := .259 + .819 * f % approx to root of f */ +/* else */ +/* f := f/l0 % adjustments */ +/* e := e + 1 % for odd */ +/* approx := .0819 + 2.59 * f % exponent */ +/* end if */ +/* */ +/* var p:= 3 */ +/* const maxp := currentprecision + 2 */ +/* loop */ +/* p := min(2*p - 2, maxp) % p = 4,6,10, . . . , maxp */ +/* precision p */ +/* approx := .5 * (approx + f/approx) */ +/* exit when p = maxp */ +/* end loop */ +/* */ +/* % approx is now within 1 ulp of the properly rounded square root */ +/* % of f; to ensure proper rounding, compare squares of (approx - */ +/* % l/2 ulp) and (approx + l/2 ulp) with f. */ +/* p := currentprecision */ +/* begin */ +/* precision p + 2 */ +/* const approxsubhalf := approx - setexp(.5, -p) */ +/* if mulru(approxsubhalf, approxsubhalf) > f then */ +/* approx := approx - setexp(.l, -p + 1) */ +/* else */ +/* const approxaddhalf := approx + setexp(.5, -p) */ +/* if mulrd(approxaddhalf, approxaddhalf) < f then */ +/* approx := approx + setexp(.l, -p + 1) */ +/* end if */ +/* end if */ +/* end */ +/* result setexp(approx, e div 2) % fix exponent */ +/* end sqrt */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberSquareRoot(decNumber *res, const decNumber *rhs, + decContext *set) { + decContext workset, approxset; /* work contexts */ + decNumber dzero; /* used for constant zero */ + Int maxp; /* largest working precision */ + Int workp; /* working precision */ + Int residue=0; /* rounding residue */ + uInt status=0, ignore=0; /* status accumulators */ + uInt rstatus; /* .. */ + Int exp; /* working exponent */ + Int ideal; /* ideal (preferred) exponent */ + Int needbytes; /* work */ + Int dropped; /* .. */ + + #if DECSUBSET + decNumber *allocrhs=NULL; /* non-NULL if rounded rhs allocated */ + #endif + /* buffer for f [needs +1 in case DECBUFFER 0] */ + decNumber buff[D2N(DECBUFFER+1)]; + /* buffer for a [needs +2 to match likely maxp] */ + decNumber bufa[D2N(DECBUFFER+2)]; + /* buffer for temporary, b [must be same size as a] */ + decNumber bufb[D2N(DECBUFFER+2)]; + decNumber *allocbuff=NULL; /* -> allocated buff, iff allocated */ + decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ + decNumber *allocbufb=NULL; /* -> allocated bufb, iff allocated */ + decNumber *f=buff; /* reduced fraction */ + decNumber *a=bufa; /* approximation to result */ + decNumber *b=bufb; /* intermediate result */ + /* buffer for temporary variable, up to 3 digits */ + decNumber buft[D2N(3)]; + decNumber *t=buft; /* up-to-3-digit constant or work */ + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { + /* reduce operand and set lostDigits status, as needed */ + if (rhs->digits>set->digits) { + allocrhs=decRoundOperand(rhs, set, &status); + if (allocrhs==NULL) break; + /* [Note: 'f' allocation below could reuse this buffer if */ + /* used, but as this is rare they are kept separate for clarity.] */ + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + /* handle infinities and NaNs */ + if (SPECIALARG) { + if (decNumberIsInfinite(rhs)) { /* an infinity */ + if (decNumberIsNegative(rhs)) status|=DEC_Invalid_operation; + else decNumberCopy(res, rhs); /* +Infinity */ + } + else decNaNs(res, rhs, NULL, set, &status); /* a NaN */ + break; + } + + /* calculate the ideal (preferred) exponent [floor(exp/2)] */ + /* [We would like to write: ideal=rhs->exponent>>1, but this */ + /* generates a compiler warning. Generated code is the same.] */ + ideal=(rhs->exponent&~1)/2; /* target */ + + /* handle zeros */ + if (ISZERO(rhs)) { + decNumberCopy(res, rhs); /* could be 0 or -0 */ + res->exponent=ideal; /* use the ideal [safe] */ + /* use decFinish to clamp any out-of-range exponent, etc. */ + decFinish(res, set, &residue, &status); + break; + } + + /* any other -x is an oops */ + if (decNumberIsNegative(rhs)) { + status|=DEC_Invalid_operation; + break; + } + + /* space is needed for three working variables */ + /* f -- the same precision as the RHS, reduced to 0.01->0.99... */ + /* a -- Hull's approximation -- precision, when assigned, is */ + /* currentprecision+1 or the input argument precision, */ + /* whichever is larger (+2 for use as temporary) */ + /* b -- intermediate temporary result (same size as a) */ + /* if any is too long for local storage, then allocate */ + workp=MAXI(set->digits+1, rhs->digits); /* actual rounding precision */ + maxp=workp+2; /* largest working precision */ + + needbytes=sizeof(decNumber)+(D2U(rhs->digits)-1)*sizeof(Unit); + if (needbytes>(Int)sizeof(buff)) { + allocbuff=(decNumber *)malloc(needbytes); + if (allocbuff==NULL) { /* hopeless -- abandon */ + status|=DEC_Insufficient_storage; + break;} + f=allocbuff; /* use the allocated space */ + } + /* a and b both need to be able to hold a maxp-length number */ + needbytes=sizeof(decNumber)+(D2U(maxp)-1)*sizeof(Unit); + if (needbytes>(Int)sizeof(bufa)) { /* [same applies to b] */ + allocbufa=(decNumber *)malloc(needbytes); + allocbufb=(decNumber *)malloc(needbytes); + if (allocbufa==NULL || allocbufb==NULL) { /* hopeless */ + status|=DEC_Insufficient_storage; + break;} + a=allocbufa; /* use the allocated spaces */ + b=allocbufb; /* .. */ + } + + /* copy rhs -> f, save exponent, and reduce so 0.1 <= f < 1 */ + decNumberCopy(f, rhs); + exp=f->exponent+f->digits; /* adjusted to Hull rules */ + f->exponent=-(f->digits); /* to range */ + + /* set up working context */ + decContextDefault(&workset, DEC_INIT_DECIMAL64); + + /* [Until further notice, no error is possible and status bits */ + /* (Rounded, etc.) should be ignored, not accumulated.] */ + + /* Calculate initial approximation, and allow for odd exponent */ + workset.digits=workp; /* p for initial calculation */ + t->bits=0; t->digits=3; + a->bits=0; a->digits=3; + if ((exp & 1)==0) { /* even exponent */ + /* Set t=0.259, a=0.819 */ + t->exponent=-3; + a->exponent=-3; + #if DECDPUN>=3 + t->lsu[0]=259; + a->lsu[0]=819; + #elif DECDPUN==2 + t->lsu[0]=59; t->lsu[1]=2; + a->lsu[0]=19; a->lsu[1]=8; + #else + t->lsu[0]=9; t->lsu[1]=5; t->lsu[2]=2; + a->lsu[0]=9; a->lsu[1]=1; a->lsu[2]=8; + #endif + } + else { /* odd exponent */ + /* Set t=0.0819, a=2.59 */ + f->exponent--; /* f=f/10 */ + exp++; /* e=e+1 */ + t->exponent=-4; + a->exponent=-2; + #if DECDPUN>=3 + t->lsu[0]=819; + a->lsu[0]=259; + #elif DECDPUN==2 + t->lsu[0]=19; t->lsu[1]=8; + a->lsu[0]=59; a->lsu[1]=2; + #else + t->lsu[0]=9; t->lsu[1]=1; t->lsu[2]=8; + a->lsu[0]=9; a->lsu[1]=5; a->lsu[2]=2; + #endif + } + decMultiplyOp(a, a, f, &workset, &ignore); /* a=a*f */ + decAddOp(a, a, t, &workset, 0, &ignore); /* ..+t */ + /* [a is now the initial approximation for sqrt(f), calculated with */ + /* currentprecision, which is also a's precision.] */ + + /* the main calculation loop */ + decNumberZero(&dzero); /* make 0 */ + decNumberZero(t); /* set t = 0.5 */ + t->lsu[0]=5; /* .. */ + t->exponent=-1; /* .. */ + workset.digits=3; /* initial p */ + for (;;) { + /* set p to min(2*p - 2, maxp) [hence 3; or: 4, 6, 10, ... , maxp] */ + workset.digits=workset.digits*2-2; + if (workset.digits>maxp) workset.digits=maxp; + /* a = 0.5 * (a + f/a) */ + /* [calculated at p then rounded to currentprecision] */ + decDivideOp(b, f, a, &workset, DIVIDE, &ignore); /* b=f/a */ + decAddOp(b, b, a, &workset, 0, &ignore); /* b=b+a */ + decMultiplyOp(a, b, t, &workset, &ignore); /* a=b*0.5 */ + if (a->digits==maxp) break; /* have required digits */ + } /* loop */ + + /* Here, 0.1 <= a < 1 [Hull], and a has maxp digits */ + /* now reduce to length, etc.; this needs to be done with a */ + /* having the correct exponent so as to handle subnormals */ + /* correctly */ + approxset=*set; /* get emin, emax, etc. */ + approxset.round=DEC_ROUND_HALF_EVEN; + a->exponent+=exp/2; /* set correct exponent */ + + rstatus=0; /* clear status */ + residue=0; /* .. and accumulator */ + decCopyFit(a, a, &approxset, &residue, &rstatus); /* reduce (if needed) */ + decFinish(a, &approxset, &residue, &rstatus); /* clean and finalize */ + + /* Overflow was possible if the input exponent was out-of-range, */ + /* in which case quit */ + if (rstatus&DEC_Overflow) { + status=rstatus; /* use the status as-is */ + decNumberCopy(res, a); /* copy to result */ + break; + } + + /* Preserve status except Inexact/Rounded */ + status|=(rstatus & ~(DEC_Rounded|DEC_Inexact)); + + /* Carry out the Hull correction */ + a->exponent-=exp/2; /* back to 0.1->1 */ + + /* a is now at final precision and within 1 ulp of the properly */ + /* rounded square root of f; to ensure proper rounding, compare */ + /* squares of (a - l/2 ulp) and (a + l/2 ulp) with f. */ + /* Here workset.digits=maxp and t=0.5, and a->digits determines */ + /* the ulp */ + workset.digits--; /* maxp-1 is OK now */ + t->exponent=-a->digits-1; /* make 0.5 ulp */ + decAddOp(b, a, t, &workset, DECNEG, &ignore); /* b = a - 0.5 ulp */ + workset.round=DEC_ROUND_UP; + decMultiplyOp(b, b, b, &workset, &ignore); /* b = mulru(b, b) */ + decCompareOp(b, f, b, &workset, COMPARE, &ignore); /* b ? f, reversed */ + if (decNumberIsNegative(b)) { /* f < b [i.e., b > f] */ + /* this is the more common adjustment, though both are rare */ + t->exponent++; /* make 1.0 ulp */ + t->lsu[0]=1; /* .. */ + decAddOp(a, a, t, &workset, DECNEG, &ignore); /* a = a - 1 ulp */ + /* assign to approx [round to length] */ + approxset.emin-=exp/2; /* adjust to match a */ + approxset.emax-=exp/2; + decAddOp(a, &dzero, a, &approxset, 0, &ignore); + } + else { + decAddOp(b, a, t, &workset, 0, &ignore); /* b = a + 0.5 ulp */ + workset.round=DEC_ROUND_DOWN; + decMultiplyOp(b, b, b, &workset, &ignore); /* b = mulrd(b, b) */ + decCompareOp(b, b, f, &workset, COMPARE, &ignore); /* b ? f */ + if (decNumberIsNegative(b)) { /* b < f */ + t->exponent++; /* make 1.0 ulp */ + t->lsu[0]=1; /* .. */ + decAddOp(a, a, t, &workset, 0, &ignore); /* a = a + 1 ulp */ + /* assign to approx [round to length] */ + approxset.emin-=exp/2; /* adjust to match a */ + approxset.emax-=exp/2; + decAddOp(a, &dzero, a, &approxset, 0, &ignore); + } + } + /* [no errors are possible in the above, and rounding/inexact during */ + /* estimation are irrelevant, so status was not accumulated] */ + + /* Here, 0.1 <= a < 1 (still), so adjust back */ + a->exponent+=exp/2; /* set correct exponent */ + + /* count droppable zeros [after any subnormal rounding] by */ + /* trimming a copy */ + decNumberCopy(b, a); + decTrim(b, set, 1, &dropped); /* [drops trailing zeros] */ + + /* Set Inexact and Rounded. The answer can only be exact if */ + /* it is short enough so that squaring it could fit in workp digits, */ + /* and it cannot have trailing zeros due to clamping, so these are */ + /* the only (relatively rare) conditions a careful check is needed */ + if (b->digits*2-1 > workp && !set->clamp) { /* cannot fit */ + status|=DEC_Inexact|DEC_Rounded; + } + else { /* could be exact/unrounded */ + uInt mstatus=0; /* local status */ + decMultiplyOp(b, b, b, &workset, &mstatus); /* try the multiply */ + if (mstatus&DEC_Overflow) { /* result just won't fit */ + status|=DEC_Inexact|DEC_Rounded; + } + else { /* plausible */ + decCompareOp(t, b, rhs, &workset, COMPARE, &mstatus); /* b ? rhs */ + if (!ISZERO(t)) status|=DEC_Inexact|DEC_Rounded; /* not equal */ + else { /* is Exact */ + /* here, dropped is the count of trailing zeros in 'a' */ + /* use closest exponent to ideal... */ + Int todrop=ideal-a->exponent; /* most that can be dropped */ + if (todrop<0) status|=DEC_Rounded; /* ideally would add 0s */ + else { /* unrounded */ + if (dropped0) { /* have some to drop */ + decShiftToLeast(a->lsu, D2U(a->digits), todrop); + a->exponent+=todrop; /* maintain numerical value */ + a->digits-=todrop; /* new length */ + } + } + } + } + } + + /* double-check Underflow, as perhaps the result could not have */ + /* been subnormal (initial argument too big), or it is now Exact */ + if (status&DEC_Underflow) { + Int ae=rhs->exponent+rhs->digits-1; /* adjusted exponent */ + /* check if truly subnormal */ + #if DECEXTFLAG /* DEC_Subnormal too */ + if (ae>=set->emin*2) status&=~(DEC_Subnormal|DEC_Underflow); + #else + if (ae>=set->emin*2) status&=~DEC_Underflow; + #endif + /* check if truly inexact */ + if (!(status&DEC_Inexact)) status&=~DEC_Underflow; + } + + decNumberCopy(res, a); /* a is now the result */ + } while(0); /* end protected */ + + if (allocbuff!=NULL) free(allocbuff); /* drop any storage used */ + if (allocbufa!=NULL) free(allocbufa); /* .. */ + if (allocbufb!=NULL) free(allocbufb); /* .. */ + #if DECSUBSET + if (allocrhs !=NULL) free(allocrhs); /* .. */ + #endif + if (status!=0) decStatus(res, status, set);/* then report status */ + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberSquareRoot */ + +/* ------------------------------------------------------------------ */ +/* decNumberSubtract -- subtract two Numbers */ +/* */ +/* This computes C = A - B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X-X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* */ +/* C must have space for set->digits digits. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberSubtract(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + uInt status=0; /* accumulator */ + + decAddOp(res, lhs, rhs, set, DECNEG, &status); + if (status!=0) decStatus(res, status, set); + #if DECCHECK + decCheckInexact(res, set); + #endif + return res; + } /* decNumberSubtract */ + +/* ------------------------------------------------------------------ */ +/* decNumberToIntegralExact -- round-to-integral-value with InExact */ +/* decNumberToIntegralValue -- round-to-integral-value */ +/* */ +/* res is the result */ +/* rhs is input number */ +/* set is the context */ +/* */ +/* res must have space for any value of rhs. */ +/* */ +/* This implements the IEEE special operators and therefore treats */ +/* special values as valid. For finite numbers it returns */ +/* rescale(rhs, 0) if rhs->exponent is <0. */ +/* Otherwise the result is rhs (so no error is possible, except for */ +/* sNaN). */ +/* */ +/* The context is used for rounding mode and status after sNaN, but */ +/* the digits setting is ignored. The Exact version will signal */ +/* Inexact if the result differs numerically from rhs; the other */ +/* never signals Inexact. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberToIntegralExact(decNumber *res, const decNumber *rhs, + decContext *set) { + decNumber dn; + decContext workset; /* working context */ + uInt status=0; /* accumulator */ + + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + /* handle infinities and NaNs */ + if (SPECIALARG) { + if (decNumberIsInfinite(rhs)) decNumberCopy(res, rhs); /* an Infinity */ + else decNaNs(res, rhs, NULL, set, &status); /* a NaN */ + } + else { /* finite */ + /* have a finite number; no error possible (res must be big enough) */ + if (rhs->exponent>=0) return decNumberCopy(res, rhs); + /* that was easy, but if negative exponent there is work to do... */ + workset=*set; /* clone rounding, etc. */ + workset.digits=rhs->digits; /* no length rounding */ + workset.traps=0; /* no traps */ + decNumberZero(&dn); /* make a number with exponent 0 */ + decNumberQuantize(res, rhs, &dn, &workset); + status|=workset.status; + } + if (status!=0) decStatus(res, status, set); + return res; + } /* decNumberToIntegralExact */ + +decNumber * decNumberToIntegralValue(decNumber *res, const decNumber *rhs, + decContext *set) { + decContext workset=*set; /* working context */ + workset.traps=0; /* no traps */ + decNumberToIntegralExact(res, rhs, &workset); + /* this never affects set, except for sNaNs; NaN will have been set */ + /* or propagated already, so no need to call decStatus */ + set->status|=workset.status&DEC_Invalid_operation; + return res; + } /* decNumberToIntegralValue */ + +/* ------------------------------------------------------------------ */ +/* decNumberXor -- XOR two Numbers, digitwise */ +/* */ +/* This computes C = A ^ B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X^X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context (used for result length and error report) */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Logical function restrictions apply (see above); a NaN is */ +/* returned with Invalid_operation if a restriction is violated. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberXor(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + const Unit *ua, *ub; /* -> operands */ + const Unit *msua, *msub; /* -> operand msus */ + Unit *uc, *msuc; /* -> result and its msu */ + Int msudigs; /* digits in res msu */ + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + if (lhs->exponent!=0 || decNumberIsSpecial(lhs) || decNumberIsNegative(lhs) + || rhs->exponent!=0 || decNumberIsSpecial(rhs) || decNumberIsNegative(rhs)) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + /* operands are valid */ + ua=lhs->lsu; /* bottom-up */ + ub=rhs->lsu; /* .. */ + uc=res->lsu; /* .. */ + msua=ua+D2U(lhs->digits)-1; /* -> msu of lhs */ + msub=ub+D2U(rhs->digits)-1; /* -> msu of rhs */ + msuc=uc+D2U(set->digits)-1; /* -> msu of result */ + msudigs=MSUDIGITS(set->digits); /* [faster than remainder] */ + for (; uc<=msuc; ua++, ub++, uc++) { /* Unit loop */ + Unit a, b; /* extract units */ + if (ua>msua) a=0; + else a=*ua; + if (ub>msub) b=0; + else b=*ub; + *uc=0; /* can now write back */ + if (a|b) { /* maybe 1 bits to examine */ + Int i, j; + /* This loop could be unrolled and/or use BIN2BCD tables */ + for (i=0; i1) { + decStatus(res, DEC_Invalid_operation, set); + return res; + } + if (uc==msuc && i==msudigs-1) break; /* just did final digit */ + } /* each digit */ + } /* non-zero */ + } /* each unit */ + /* [here uc-1 is the msu of the result] */ + res->digits=decGetDigits(res->lsu, uc-res->lsu); + res->exponent=0; /* integer */ + res->bits=0; /* sign=0 */ + return res; /* [no status to set] */ + } /* decNumberXor */ + + +/* ================================================================== */ +/* Utility routines */ +/* ================================================================== */ + +/* ------------------------------------------------------------------ */ +/* decNumberClass -- return the decClass of a decNumber */ +/* dn -- the decNumber to test */ +/* set -- the context to use for Emin */ +/* returns the decClass enum */ +/* ------------------------------------------------------------------ */ +enum decClass decNumberClass(const decNumber *dn, decContext *set) { + if (decNumberIsSpecial(dn)) { + if (decNumberIsQNaN(dn)) return DEC_CLASS_QNAN; + if (decNumberIsSNaN(dn)) return DEC_CLASS_SNAN; + /* must be an infinity */ + if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_INF; + return DEC_CLASS_POS_INF; + } + /* is finite */ + if (decNumberIsNormal(dn, set)) { /* most common */ + if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_NORMAL; + return DEC_CLASS_POS_NORMAL; + } + /* is subnormal or zero */ + if (decNumberIsZero(dn)) { /* most common */ + if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_ZERO; + return DEC_CLASS_POS_ZERO; + } + if (decNumberIsNegative(dn)) return DEC_CLASS_NEG_SUBNORMAL; + return DEC_CLASS_POS_SUBNORMAL; + } /* decNumberClass */ + +/* ------------------------------------------------------------------ */ +/* decNumberClassToString -- convert decClass to a string */ +/* */ +/* eclass is a valid decClass */ +/* returns a constant string describing the class (max 13+1 chars) */ +/* ------------------------------------------------------------------ */ +const char *decNumberClassToString(enum decClass eclass) { + if (eclass==DEC_CLASS_POS_NORMAL) return DEC_ClassString_PN; + if (eclass==DEC_CLASS_NEG_NORMAL) return DEC_ClassString_NN; + if (eclass==DEC_CLASS_POS_ZERO) return DEC_ClassString_PZ; + if (eclass==DEC_CLASS_NEG_ZERO) return DEC_ClassString_NZ; + if (eclass==DEC_CLASS_POS_SUBNORMAL) return DEC_ClassString_PS; + if (eclass==DEC_CLASS_NEG_SUBNORMAL) return DEC_ClassString_NS; + if (eclass==DEC_CLASS_POS_INF) return DEC_ClassString_PI; + if (eclass==DEC_CLASS_NEG_INF) return DEC_ClassString_NI; + if (eclass==DEC_CLASS_QNAN) return DEC_ClassString_QN; + if (eclass==DEC_CLASS_SNAN) return DEC_ClassString_SN; + return DEC_ClassString_UN; /* Unknown */ + } /* decNumberClassToString */ + +/* ------------------------------------------------------------------ */ +/* decNumberCopy -- copy a number */ +/* */ +/* dest is the target decNumber */ +/* src is the source decNumber */ +/* returns dest */ +/* */ +/* (dest==src is allowed and is a no-op) */ +/* All fields are updated as required. This is a utility operation, */ +/* so special values are unchanged and no error is possible. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCopy(decNumber *dest, const decNumber *src) { + + #if DECCHECK + if (src==NULL) return decNumberZero(dest); + #endif + + if (dest==src) return dest; /* no copy required */ + + /* Use explicit assignments here as structure assignment could copy */ + /* more than just the lsu (for small DECDPUN). This would not affect */ + /* the value of the results, but could disturb test harness spill */ + /* checking. */ + dest->bits=src->bits; + dest->exponent=src->exponent; + dest->digits=src->digits; + dest->lsu[0]=src->lsu[0]; + if (src->digits>DECDPUN) { /* more Units to come */ + const Unit *smsup, *s; /* work */ + Unit *d; /* .. */ + /* memcpy for the remaining Units would be safe as they cannot */ + /* overlap. However, this explicit loop is faster in short cases. */ + d=dest->lsu+1; /* -> first destination */ + smsup=src->lsu+D2U(src->digits); /* -> source msu+1 */ + for (s=src->lsu+1; sdigits digits. */ +/* No exception or error can occur; this is a quiet bitwise operation.*/ +/* See also decNumberAbs for a checking version of this. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCopyAbs(decNumber *res, const decNumber *rhs) { + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, DECUNCONT)) return res; + #endif + decNumberCopy(res, rhs); + res->bits&=~DECNEG; /* turn off sign */ + return res; + } /* decNumberCopyAbs */ + +/* ------------------------------------------------------------------ */ +/* decNumberCopyNegate -- quiet negate value operator */ +/* */ +/* This sets C = negate(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* */ +/* C must have space for set->digits digits. */ +/* No exception or error can occur; this is a quiet bitwise operation.*/ +/* See also decNumberMinus for a checking version of this. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCopyNegate(decNumber *res, const decNumber *rhs) { + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, DECUNCONT)) return res; + #endif + decNumberCopy(res, rhs); + res->bits^=DECNEG; /* invert the sign */ + return res; + } /* decNumberCopyNegate */ + +/* ------------------------------------------------------------------ */ +/* decNumberCopySign -- quiet copy and set sign operator */ +/* */ +/* This sets C = A with the sign of B */ +/* */ +/* res is C, the result. C may be A */ +/* lhs is A */ +/* rhs is B */ +/* */ +/* C must have space for set->digits digits. */ +/* No exception or error can occur; this is a quiet bitwise operation.*/ +/* ------------------------------------------------------------------ */ +decNumber * decNumberCopySign(decNumber *res, const decNumber *lhs, + const decNumber *rhs) { + uByte sign; /* rhs sign */ + #if DECCHECK + if (decCheckOperands(res, DECUNUSED, rhs, DECUNCONT)) return res; + #endif + sign=rhs->bits & DECNEG; /* save sign bit */ + decNumberCopy(res, lhs); + res->bits&=~DECNEG; /* clear the sign */ + res->bits|=sign; /* set from rhs */ + return res; + } /* decNumberCopySign */ + +/* ------------------------------------------------------------------ */ +/* decNumberGetBCD -- get the coefficient in BCD8 */ +/* dn is the source decNumber */ +/* bcd is the uInt array that will receive dn->digits BCD bytes, */ +/* most-significant at offset 0 */ +/* returns bcd */ +/* */ +/* bcd must have at least dn->digits bytes. No error is possible; if */ +/* dn is a NaN or Infinite, digits must be 1 and the coefficient 0. */ +/* ------------------------------------------------------------------ */ +uByte * decNumberGetBCD(const decNumber *dn, uint8_t *bcd) { + uByte *ub=bcd+dn->digits-1; /* -> lsd */ + const Unit *up=dn->lsu; /* Unit pointer, -> lsu */ + + #if DECDPUN==1 /* trivial simple copy */ + for (; ub>=bcd; ub--, up++) *ub=*up; + #else /* chopping needed */ + uInt u=*up; /* work */ + uInt cut=DECDPUN; /* downcounter through unit */ + for (; ub>=bcd; ub--) { + *ub=(uByte)(u%10); /* [*6554 trick inhibits, here] */ + u=u/10; + cut--; + if (cut>0) continue; /* more in this unit */ + up++; + u=*up; + cut=DECDPUN; + } + #endif + return bcd; + } /* decNumberGetBCD */ + +/* ------------------------------------------------------------------ */ +/* decNumberSetBCD -- set (replace) the coefficient from BCD8 */ +/* dn is the target decNumber */ +/* bcd is the uInt array that will source n BCD bytes, most- */ +/* significant at offset 0 */ +/* n is the number of digits in the source BCD array (bcd) */ +/* returns dn */ +/* */ +/* dn must have space for at least n digits. No error is possible; */ +/* if dn is a NaN, or Infinite, or is to become a zero, n must be 1 */ +/* and bcd[0] zero. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberSetBCD(decNumber *dn, const uByte *bcd, uInt n) { + Unit *up = dn->lsu + D2U(n) - 1; /* -> msu [target pointer] */ + const uByte *ub=bcd; /* -> source msd */ + + #if DECDPUN==1 /* trivial simple copy */ + for (; ub=dn->lsu; up--) { /* each Unit from msu */ + *up=0; /* will take <=DECDPUN digits */ + for (; cut>0; ub++, cut--) *up=X10(*up)+*ub; + cut=DECDPUN; /* next Unit has all digits */ + } + #endif + dn->digits=n; /* set digit count */ + return dn; + } /* decNumberSetBCD */ + +/* ------------------------------------------------------------------ */ +/* decNumberIsNormal -- test normality of a decNumber */ +/* dn is the decNumber to test */ +/* set is the context to use for Emin */ +/* returns 1 if |dn| is finite and >=Nmin, 0 otherwise */ +/* ------------------------------------------------------------------ */ +Int decNumberIsNormal(const decNumber *dn, decContext *set) { + Int ae; /* adjusted exponent */ + #if DECCHECK + if (decCheckOperands(DECUNRESU, DECUNUSED, dn, set)) return 0; + #endif + + if (decNumberIsSpecial(dn)) return 0; /* not finite */ + if (decNumberIsZero(dn)) return 0; /* not non-zero */ + + ae=dn->exponent+dn->digits-1; /* adjusted exponent */ + if (aeemin) return 0; /* is subnormal */ + return 1; + } /* decNumberIsNormal */ + +/* ------------------------------------------------------------------ */ +/* decNumberIsSubnormal -- test subnormality of a decNumber */ +/* dn is the decNumber to test */ +/* set is the context to use for Emin */ +/* returns 1 if |dn| is finite, non-zero, and exponent+dn->digits-1; /* adjusted exponent */ + if (aeemin) return 1; /* is subnormal */ + return 0; + } /* decNumberIsSubnormal */ + +/* ------------------------------------------------------------------ */ +/* decNumberTrim -- remove insignificant zeros */ +/* */ +/* dn is the number to trim */ +/* returns dn */ +/* */ +/* All fields are updated as required. This is a utility operation, */ +/* so special values are unchanged and no error is possible. */ +/* ------------------------------------------------------------------ */ +decNumber * decNumberTrim(decNumber *dn) { + Int dropped; /* work */ + decContext set; /* .. */ + #if DECCHECK + if (decCheckOperands(DECUNRESU, DECUNUSED, dn, DECUNCONT)) return dn; + #endif + decContextDefault(&set, DEC_INIT_BASE); /* clamp=0 */ + return decTrim(dn, &set, 0, &dropped); + } /* decNumberTrim */ + +/* ------------------------------------------------------------------ */ +/* decNumberVersion -- return the name and version of this module */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +const char * decNumberVersion(void) { + return DECVERSION; + } /* decNumberVersion */ + +/* ------------------------------------------------------------------ */ +/* decNumberZero -- set a number to 0 */ +/* */ +/* dn is the number to set, with space for one digit */ +/* returns dn */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +/* Memset is not used as it is much slower in some environments. */ +decNumber * decNumberZero(decNumber *dn) { + + #if DECCHECK + if (decCheckOperands(dn, DECUNUSED, DECUNUSED, DECUNCONT)) return dn; + #endif + + dn->bits=0; + dn->exponent=0; + dn->digits=1; + dn->lsu[0]=0; + return dn; + } /* decNumberZero */ + +/* ================================================================== */ +/* Local routines */ +/* ================================================================== */ + +/* ------------------------------------------------------------------ */ +/* decToString -- lay out a number into a string */ +/* */ +/* dn is the number to lay out */ +/* string is where to lay out the number */ +/* eng is 1 if Engineering, 0 if Scientific */ +/* */ +/* string must be at least dn->digits+14 characters long */ +/* No error is possible. */ +/* */ +/* Note that this routine can generate a -0 or 0.000. These are */ +/* never generated in subset to-number or arithmetic, but can occur */ +/* in non-subset arithmetic (e.g., -1*0 or 1.234-1.234). */ +/* ------------------------------------------------------------------ */ +/* If DECCHECK is enabled the string "?" is returned if a number is */ +/* invalid. */ +static void decToString(const decNumber *dn, char *string, Flag eng) { + Int exp=dn->exponent; /* local copy */ + Int e; /* E-part value */ + Int pre; /* digits before the '.' */ + Int cut; /* for counting digits in a Unit */ + char *c=string; /* work [output pointer] */ + const Unit *up=dn->lsu+D2U(dn->digits)-1; /* -> msu [input pointer] */ + uInt u, pow; /* work */ + + #if DECCHECK + if (decCheckOperands(DECUNRESU, dn, DECUNUSED, DECUNCONT)) { + strcpy(string, "?"); + return;} + #endif + + if (decNumberIsNegative(dn)) { /* Negatives get a minus */ + *c='-'; + c++; + } + if (dn->bits&DECSPECIAL) { /* Is a special value */ + if (decNumberIsInfinite(dn)) { + strcpy(c, "Inf"); + strcpy(c+3, "inity"); + return;} + /* a NaN */ + if (dn->bits&DECSNAN) { /* signalling NaN */ + *c='s'; + c++; + } + strcpy(c, "NaN"); + c+=3; /* step past */ + /* if not a clean non-zero coefficient, that's all there is in a */ + /* NaN string */ + if (exp!=0 || (*dn->lsu==0 && dn->digits==1)) return; + /* [drop through to add integer] */ + } + + /* calculate how many digits in msu, and hence first cut */ + cut=MSUDIGITS(dn->digits); /* [faster than remainder] */ + cut--; /* power of ten for digit */ + + if (exp==0) { /* simple integer [common fastpath] */ + for (;up>=dn->lsu; up--) { /* each Unit from msu */ + u=*up; /* contains DECDPUN digits to lay out */ + for (; cut>=0; c++, cut--) TODIGIT(u, cut, c, pow); + cut=DECDPUN-1; /* next Unit has all digits */ + } + *c='\0'; /* terminate the string */ + return;} + + /* non-0 exponent -- assume plain form */ + pre=dn->digits+exp; /* digits before '.' */ + e=0; /* no E */ + if ((exp>0) || (pre<-5)) { /* need exponential form */ + e=exp+dn->digits-1; /* calculate E value */ + pre=1; /* assume one digit before '.' */ + if (eng && (e!=0)) { /* engineering: may need to adjust */ + Int adj; /* adjustment */ + /* The C remainder operator is undefined for negative numbers, so */ + /* a positive remainder calculation must be used here */ + if (e<0) { + adj=(-e)%3; + if (adj!=0) adj=3-adj; + } + else { /* e>0 */ + adj=e%3; + } + e=e-adj; + /* if dealing with zero still produce an exponent which is a */ + /* multiple of three, as expected, but there will only be the */ + /* one zero before the E, still. Otherwise note the padding. */ + if (!ISZERO(dn)) pre+=adj; + else { /* is zero */ + if (adj!=0) { /* 0.00Esnn needed */ + e=e+3; + pre=-(2-adj); + } + } /* zero */ + } /* eng */ + } /* need exponent */ + + /* lay out the digits of the coefficient, adding 0s and . as needed */ + u=*up; + if (pre>0) { /* xxx.xxx or xx00 (engineering) form */ + Int n=pre; + for (; pre>0; pre--, c++, cut--) { + if (cut<0) { /* need new Unit */ + if (up==dn->lsu) break; /* out of input digits (pre>digits) */ + up--; + cut=DECDPUN-1; + u=*up; + } + TODIGIT(u, cut, c, pow); + } + if (ndigits) { /* more to come, after '.' */ + *c='.'; c++; + for (;; c++, cut--) { + if (cut<0) { /* need new Unit */ + if (up==dn->lsu) break; /* out of input digits */ + up--; + cut=DECDPUN-1; + u=*up; + } + TODIGIT(u, cut, c, pow); + } + } + else for (; pre>0; pre--, c++) *c='0'; /* 0 padding (for engineering) needed */ + } + else { /* 0.xxx or 0.000xxx form */ + *c='0'; c++; + *c='.'; c++; + for (; pre<0; pre++, c++) *c='0'; /* add any 0's after '.' */ + for (; ; c++, cut--) { + if (cut<0) { /* need new Unit */ + if (up==dn->lsu) break; /* out of input digits */ + up--; + cut=DECDPUN-1; + u=*up; + } + TODIGIT(u, cut, c, pow); + } + } + + /* Finally add the E-part, if needed. It will never be 0, has a + base maximum and minimum of +999999999 through -999999999, but + could range down to -1999999998 for anormal numbers */ + if (e!=0) { + Flag had=0; /* 1=had non-zero */ + *c='E'; c++; + *c='+'; c++; /* assume positive */ + u=e; /* .. */ + if (e<0) { + *(c-1)='-'; /* oops, need - */ + u=-e; /* uInt, please */ + } + /* lay out the exponent [_itoa or equivalent is not ANSI C] */ + for (cut=9; cut>=0; cut--) { + TODIGIT(u, cut, c, pow); + if (*c=='0' && !had) continue; /* skip leading zeros */ + had=1; /* had non-0 */ + c++; /* step for next */ + } /* cut */ + } + *c='\0'; /* terminate the string (all paths) */ + return; + } /* decToString */ + +/* ------------------------------------------------------------------ */ +/* decAddOp -- add/subtract operation */ +/* */ +/* This computes C = A + B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X+X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* negate is DECNEG if rhs should be negated, or 0 otherwise */ +/* status accumulates status for the caller */ +/* */ +/* C must have space for set->digits digits. */ +/* Inexact in status must be 0 for correct Exact zero sign in result */ +/* ------------------------------------------------------------------ */ +/* If possible, the coefficient is calculated directly into C. */ +/* However, if: */ +/* -- a digits+1 calculation is needed because the numbers are */ +/* unaligned and span more than set->digits digits */ +/* -- a carry to digits+1 digits looks possible */ +/* -- C is the same as A or B, and the result would destructively */ +/* overlap the A or B coefficient */ +/* then the result must be calculated into a temporary buffer. In */ +/* this case a local (stack) buffer is used if possible, and only if */ +/* too long for that does malloc become the final resort. */ +/* */ +/* Misalignment is handled as follows: */ +/* Apad: (AExp>BExp) Swap operands and proceed as for BExp>AExp. */ +/* BPad: Apply the padding by a combination of shifting (whole */ +/* units) and multiplication (part units). */ +/* */ +/* Addition, especially x=x+1, is speed-critical. */ +/* The static buffer is larger than might be expected to allow for */ +/* calls from higher-level functions (notably exp). */ +/* ------------------------------------------------------------------ */ +static decNumber * decAddOp(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set, + uByte negate, uInt *status) { + #if DECSUBSET + decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ + decNumber *allocrhs=NULL; /* .., rhs */ + #endif + Int rhsshift; /* working shift (in Units) */ + Int maxdigits; /* longest logical length */ + Int mult; /* multiplier */ + Int residue; /* rounding accumulator */ + uByte bits; /* result bits */ + Flag diffsign; /* non-0 if arguments have different sign */ + Unit *acc; /* accumulator for result */ + Unit accbuff[SD2U(DECBUFFER*2+20)]; /* local buffer [*2+20 reduces many */ + /* allocations when called from */ + /* other operations, notable exp] */ + Unit *allocacc=NULL; /* -> allocated acc buffer, iff allocated */ + Int reqdigits=set->digits; /* local copy; requested DIGITS */ + Int padding; /* work */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { + /* reduce operands and set lostDigits status, as needed */ + if (lhs->digits>reqdigits) { + alloclhs=decRoundOperand(lhs, set, status); + if (alloclhs==NULL) break; + lhs=alloclhs; + } + if (rhs->digits>reqdigits) { + allocrhs=decRoundOperand(rhs, set, status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + /* note whether signs differ [used all paths] */ + diffsign=(Flag)((lhs->bits^rhs->bits^negate)&DECNEG); + + /* handle infinities and NaNs */ + if (SPECIALARGS) { /* a special bit set */ + if (SPECIALARGS & (DECSNAN | DECNAN)) /* a NaN */ + decNaNs(res, lhs, rhs, set, status); + else { /* one or two infinities */ + if (decNumberIsInfinite(lhs)) { /* LHS is infinity */ + /* two infinities with different signs is invalid */ + if (decNumberIsInfinite(rhs) && diffsign) { + *status|=DEC_Invalid_operation; + break; + } + bits=lhs->bits & DECNEG; /* get sign from LHS */ + } + else bits=(rhs->bits^negate) & DECNEG;/* RHS must be Infinity */ + bits|=DECINF; + decNumberZero(res); + res->bits=bits; /* set +/- infinity */ + } /* an infinity */ + break; + } + + /* Quick exit for add 0s; return the non-0, modified as need be */ + if (ISZERO(lhs)) { + Int adjust; /* work */ + Int lexp=lhs->exponent; /* save in case LHS==RES */ + bits=lhs->bits; /* .. */ + residue=0; /* clear accumulator */ + decCopyFit(res, rhs, set, &residue, status); /* copy (as needed) */ + res->bits^=negate; /* flip if rhs was negated */ + #if DECSUBSET + if (set->extended) { /* exponents on zeros count */ + #endif + /* exponent will be the lower of the two */ + adjust=lexp-res->exponent; /* adjustment needed [if -ve] */ + if (ISZERO(res)) { /* both 0: special IEEE 854 rules */ + if (adjust<0) res->exponent=lexp; /* set exponent */ + /* 0-0 gives +0 unless rounding to -infinity, and -0-0 gives -0 */ + if (diffsign) { + if (set->round!=DEC_ROUND_FLOOR) res->bits=0; + else res->bits=DECNEG; /* preserve 0 sign */ + } + } + else { /* non-0 res */ + if (adjust<0) { /* 0-padding needed */ + if ((res->digits-adjust)>set->digits) { + adjust=res->digits-set->digits; /* to fit exactly */ + *status|=DEC_Rounded; /* [but exact] */ + } + res->digits=decShiftToMost(res->lsu, res->digits, -adjust); + res->exponent+=adjust; /* set the exponent. */ + } + } /* non-0 res */ + #if DECSUBSET + } /* extended */ + #endif + decFinish(res, set, &residue, status); /* clean and finalize */ + break;} + + if (ISZERO(rhs)) { /* [lhs is non-zero] */ + Int adjust; /* work */ + Int rexp=rhs->exponent; /* save in case RHS==RES */ + bits=rhs->bits; /* be clean */ + residue=0; /* clear accumulator */ + decCopyFit(res, lhs, set, &residue, status); /* copy (as needed) */ + #if DECSUBSET + if (set->extended) { /* exponents on zeros count */ + #endif + /* exponent will be the lower of the two */ + /* [0-0 case handled above] */ + adjust=rexp-res->exponent; /* adjustment needed [if -ve] */ + if (adjust<0) { /* 0-padding needed */ + if ((res->digits-adjust)>set->digits) { + adjust=res->digits-set->digits; /* to fit exactly */ + *status|=DEC_Rounded; /* [but exact] */ + } + res->digits=decShiftToMost(res->lsu, res->digits, -adjust); + res->exponent+=adjust; /* set the exponent. */ + } + #if DECSUBSET + } /* extended */ + #endif + decFinish(res, set, &residue, status); /* clean and finalize */ + break;} + + /* [NB: both fastpath and mainpath code below assume these cases */ + /* (notably 0-0) have already been handled] */ + + /* calculate the padding needed to align the operands */ + padding=rhs->exponent-lhs->exponent; + + /* Fastpath cases where the numbers are aligned and normal, the RHS */ + /* is all in one unit, no operand rounding is needed, and no carry, */ + /* lengthening, or borrow is needed */ + if (padding==0 + && rhs->digits<=DECDPUN + && rhs->exponent>=set->emin /* [some normals drop through] */ + && rhs->exponent<=set->emax-set->digits+1 /* [could clamp] */ + && rhs->digits<=reqdigits + && lhs->digits<=reqdigits) { + Int partial=*lhs->lsu; + if (!diffsign) { /* adding */ + partial+=*rhs->lsu; + if ((partial<=DECDPUNMAX) /* result fits in unit */ + && (lhs->digits>=DECDPUN || /* .. and no digits-count change */ + partial<(Int)powers[lhs->digits])) { /* .. */ + if (res!=lhs) decNumberCopy(res, lhs); /* not in place */ + *res->lsu=(Unit)partial; /* [copy could have overwritten RHS] */ + break; + } + /* else drop out for careful add */ + } + else { /* signs differ */ + partial-=*rhs->lsu; + if (partial>0) { /* no borrow needed, and non-0 result */ + if (res!=lhs) decNumberCopy(res, lhs); /* not in place */ + *res->lsu=(Unit)partial; + /* this could have reduced digits [but result>0] */ + res->digits=decGetDigits(res->lsu, D2U(res->digits)); + break; + } + /* else drop out for careful subtract */ + } + } + + /* Now align (pad) the lhs or rhs so they can be added or */ + /* subtracted, as necessary. If one number is much larger than */ + /* the other (that is, if in plain form there is a least one */ + /* digit between the lowest digit of one and the highest of the */ + /* other) padding with up to DIGITS-1 trailing zeros may be */ + /* needed; then apply rounding (as exotic rounding modes may be */ + /* affected by the residue). */ + rhsshift=0; /* rhs shift to left (padding) in Units */ + bits=lhs->bits; /* assume sign is that of LHS */ + mult=1; /* likely multiplier */ + + /* [if padding==0 the operands are aligned; no padding is needed] */ + if (padding!=0) { + /* some padding needed; always pad the RHS, as any required */ + /* padding can then be effected by a simple combination of */ + /* shifts and a multiply */ + Flag swapped=0; + if (padding<0) { /* LHS needs the padding */ + const decNumber *t; + padding=-padding; /* will be +ve */ + bits=(uByte)(rhs->bits^negate); /* assumed sign is now that of RHS */ + t=lhs; lhs=rhs; rhs=t; + swapped=1; + } + + /* If, after pad, rhs would be longer than lhs by digits+1 or */ + /* more then lhs cannot affect the answer, except as a residue, */ + /* so only need to pad up to a length of DIGITS+1. */ + if (rhs->digits+padding > lhs->digits+reqdigits+1) { + /* The RHS is sufficient */ + /* for residue use the relative sign indication... */ + Int shift=reqdigits-rhs->digits; /* left shift needed */ + residue=1; /* residue for rounding */ + if (diffsign) residue=-residue; /* signs differ */ + /* copy, shortening if necessary */ + decCopyFit(res, rhs, set, &residue, status); + /* if it was already shorter, then need to pad with zeros */ + if (shift>0) { + res->digits=decShiftToMost(res->lsu, res->digits, shift); + res->exponent-=shift; /* adjust the exponent. */ + } + /* flip the result sign if unswapped and rhs was negated */ + if (!swapped) res->bits^=negate; + decFinish(res, set, &residue, status); /* done */ + break;} + + /* LHS digits may affect result */ + rhsshift=D2U(padding+1)-1; /* this much by Unit shift .. */ + mult=powers[padding-(rhsshift*DECDPUN)]; /* .. this by multiplication */ + } /* padding needed */ + + if (diffsign) mult=-mult; /* signs differ */ + + /* determine the longer operand */ + maxdigits=rhs->digits+padding; /* virtual length of RHS */ + if (lhs->digits>maxdigits) maxdigits=lhs->digits; + + /* Decide on the result buffer to use; if possible place directly */ + /* into result. */ + acc=res->lsu; /* assume add direct to result */ + /* If destructive overlap, or the number is too long, or a carry or */ + /* borrow to DIGITS+1 might be possible, a buffer must be used. */ + /* [Might be worth more sophisticated tests when maxdigits==reqdigits] */ + if ((maxdigits>=reqdigits) /* is, or could be, too large */ + || (res==rhs && rhsshift>0)) { /* destructive overlap */ + /* buffer needed, choose it; units for maxdigits digits will be */ + /* needed, +1 Unit for carry or borrow */ + Int need=D2U(maxdigits)+1; + acc=accbuff; /* assume use local buffer */ + if (need*sizeof(Unit)>sizeof(accbuff)) { + /* printf("malloc add %ld %ld\n", need, sizeof(accbuff)); */ + allocacc=(Unit *)malloc(need*sizeof(Unit)); + if (allocacc==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + acc=allocacc; + } + } + + res->bits=(uByte)(bits&DECNEG); /* it's now safe to overwrite.. */ + res->exponent=lhs->exponent; /* .. operands (even if aliased) */ + + #if DECTRACE + decDumpAr('A', lhs->lsu, D2U(lhs->digits)); + decDumpAr('B', rhs->lsu, D2U(rhs->digits)); + printf(" :h: %ld %ld\n", rhsshift, mult); + #endif + + /* add [A+B*m] or subtract [A+B*(-m)] */ + res->digits=decUnitAddSub(lhs->lsu, D2U(lhs->digits), + rhs->lsu, D2U(rhs->digits), + rhsshift, acc, mult) + *DECDPUN; /* [units -> digits] */ + if (res->digits<0) { /* borrowed... */ + res->digits=-res->digits; + res->bits^=DECNEG; /* flip the sign */ + } + #if DECTRACE + decDumpAr('+', acc, D2U(res->digits)); + #endif + + /* If a buffer was used the result must be copied back, possibly */ + /* shortening. (If no buffer was used then the result must have */ + /* fit, so can't need rounding and residue must be 0.) */ + residue=0; /* clear accumulator */ + if (acc!=res->lsu) { + #if DECSUBSET + if (set->extended) { /* round from first significant digit */ + #endif + /* remove leading zeros that were added due to rounding up to */ + /* integral Units -- before the test for rounding. */ + if (res->digits>reqdigits) + res->digits=decGetDigits(acc, D2U(res->digits)); + decSetCoeff(res, set, acc, res->digits, &residue, status); + #if DECSUBSET + } + else { /* subset arithmetic rounds from original significant digit */ + /* May have an underestimate. This only occurs when both */ + /* numbers fit in DECDPUN digits and are padding with a */ + /* negative multiple (-10, -100...) and the top digit(s) become */ + /* 0. (This only matters when using X3.274 rules where the */ + /* leading zero could be included in the rounding.) */ + if (res->digitsdigits))=0; /* ensure leading 0 is there */ + res->digits=maxdigits; + } + else { + /* remove leading zeros that added due to rounding up to */ + /* integral Units (but only those in excess of the original */ + /* maxdigits length, unless extended) before test for rounding. */ + if (res->digits>reqdigits) { + res->digits=decGetDigits(acc, D2U(res->digits)); + if (res->digitsdigits=maxdigits; + } + } + decSetCoeff(res, set, acc, res->digits, &residue, status); + /* Now apply rounding if needed before removing leading zeros. */ + /* This is safe because subnormals are not a possibility */ + if (residue!=0) { + decApplyRound(res, set, residue, status); + residue=0; /* did what needed to be done */ + } + } /* subset */ + #endif + } /* used buffer */ + + /* strip leading zeros [these were left on in case of subset subtract] */ + res->digits=decGetDigits(res->lsu, D2U(res->digits)); + + /* apply checks and rounding */ + decFinish(res, set, &residue, status); + + /* "When the sum of two operands with opposite signs is exactly */ + /* zero, the sign of that sum shall be '+' in all rounding modes */ + /* except round toward -Infinity, in which mode that sign shall be */ + /* '-'." [Subset zeros also never have '-', set by decFinish.] */ + if (ISZERO(res) && diffsign + #if DECSUBSET + && set->extended + #endif + && (*status&DEC_Inexact)==0) { + if (set->round==DEC_ROUND_FLOOR) res->bits|=DECNEG; /* sign - */ + else res->bits&=~DECNEG; /* sign + */ + } + } while(0); /* end protected */ + + if (allocacc!=NULL) free(allocacc); /* drop any storage used */ + #if DECSUBSET + if (allocrhs!=NULL) free(allocrhs); /* .. */ + if (alloclhs!=NULL) free(alloclhs); /* .. */ + #endif + return res; + } /* decAddOp */ + +/* ------------------------------------------------------------------ */ +/* decDivideOp -- division operation */ +/* */ +/* This routine performs the calculations for all four division */ +/* operators (divide, divideInteger, remainder, remainderNear). */ +/* */ +/* C=A op B */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X/X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* op is DIVIDE, DIVIDEINT, REMAINDER, or REMNEAR respectively. */ +/* status is the usual accumulator */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* ------------------------------------------------------------------ */ +/* The underlying algorithm of this routine is the same as in the */ +/* 1981 S/370 implementation, that is, non-restoring long division */ +/* with bi-unit (rather than bi-digit) estimation for each unit */ +/* multiplier. In this pseudocode overview, complications for the */ +/* Remainder operators and division residues for exact rounding are */ +/* omitted for clarity. */ +/* */ +/* Prepare operands and handle special values */ +/* Test for x/0 and then 0/x */ +/* Exp =Exp1 - Exp2 */ +/* Exp =Exp +len(var1) -len(var2) */ +/* Sign=Sign1 * Sign2 */ +/* Pad accumulator (Var1) to double-length with 0's (pad1) */ +/* Pad Var2 to same length as Var1 */ +/* msu2pair/plus=1st 2 or 1 units of var2, +1 to allow for round */ +/* have=0 */ +/* Do until (have=digits+1 OR residue=0) */ +/* if exp<0 then if integer divide/residue then leave */ +/* this_unit=0 */ +/* Do forever */ +/* compare numbers */ +/* if <0 then leave inner_loop */ +/* if =0 then (* quick exit without subtract *) do */ +/* this_unit=this_unit+1; output this_unit */ +/* leave outer_loop; end */ +/* Compare lengths of numbers (mantissae): */ +/* If same then tops2=msu2pair -- {units 1&2 of var2} */ +/* else tops2=msu2plus -- {0, unit 1 of var2} */ +/* tops1=first_unit_of_Var1*10**DECDPUN +second_unit_of_var1 */ +/* mult=tops1/tops2 -- Good and safe guess at divisor */ +/* if mult=0 then mult=1 */ +/* this_unit=this_unit+mult */ +/* subtract */ +/* end inner_loop */ +/* if have\=0 | this_unit\=0 then do */ +/* output this_unit */ +/* have=have+1; end */ +/* var2=var2/10 */ +/* exp=exp-1 */ +/* end outer_loop */ +/* exp=exp+1 -- set the proper exponent */ +/* if have=0 then generate answer=0 */ +/* Return (Result is defined by Var1) */ +/* */ +/* ------------------------------------------------------------------ */ +/* Two working buffers are needed during the division; one (digits+ */ +/* 1) to accumulate the result, and the other (up to 2*digits+1) for */ +/* long subtractions. These are acc and var1 respectively. */ +/* var1 is a copy of the lhs coefficient, var2 is the rhs coefficient.*/ +/* The static buffers may be larger than might be expected to allow */ +/* for calls from higher-level functions (notably exp). */ +/* ------------------------------------------------------------------ */ +static decNumber * decDivideOp(decNumber *res, + const decNumber *lhs, const decNumber *rhs, + decContext *set, Flag op, uInt *status) { + #if DECSUBSET + decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ + decNumber *allocrhs=NULL; /* .., rhs */ + #endif + Unit accbuff[SD2U(DECBUFFER+DECDPUN+10)]; /* local buffer */ + Unit *acc=accbuff; /* -> accumulator array for result */ + Unit *allocacc=NULL; /* -> allocated buffer, iff allocated */ + Unit *accnext; /* -> where next digit will go */ + Int acclength; /* length of acc needed [Units] */ + Int accunits; /* count of units accumulated */ + Int accdigits; /* count of digits accumulated */ + + Unit varbuff[SD2U(DECBUFFER*2+DECDPUN)*sizeof(Unit)]; /* buffer for var1 */ + Unit *var1=varbuff; /* -> var1 array for long subtraction */ + Unit *varalloc=NULL; /* -> allocated buffer, iff used */ + Unit *msu1; /* -> msu of var1 */ + + const Unit *var2; /* -> var2 array */ + const Unit *msu2; /* -> msu of var2 */ + Int msu2plus; /* msu2 plus one [does not vary] */ + eInt msu2pair; /* msu2 pair plus one [does not vary] */ + + Int var1units, var2units; /* actual lengths */ + Int var2ulen; /* logical length (units) */ + Int var1initpad=0; /* var1 initial padding (digits) */ + Int maxdigits; /* longest LHS or required acc length */ + Int mult; /* multiplier for subtraction */ + Unit thisunit; /* current unit being accumulated */ + Int residue; /* for rounding */ + Int reqdigits=set->digits; /* requested DIGITS */ + Int exponent; /* working exponent */ + Int maxexponent=0; /* DIVIDE maximum exponent if unrounded */ + uByte bits; /* working sign */ + Unit *target; /* work */ + const Unit *source; /* .. */ + uLong const *pow; /* .. */ + Int shift, cut; /* .. */ + #if DECSUBSET + Int dropped; /* work */ + #endif + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { + /* reduce operands and set lostDigits status, as needed */ + if (lhs->digits>reqdigits) { + alloclhs=decRoundOperand(lhs, set, status); + if (alloclhs==NULL) break; + lhs=alloclhs; + } + if (rhs->digits>reqdigits) { + allocrhs=decRoundOperand(rhs, set, status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + bits=(lhs->bits^rhs->bits)&DECNEG; /* assumed sign for divisions */ + + /* handle infinities and NaNs */ + if (SPECIALARGS) { /* a special bit set */ + if (SPECIALARGS & (DECSNAN | DECNAN)) { /* one or two NaNs */ + decNaNs(res, lhs, rhs, set, status); + break; + } + /* one or two infinities */ + if (decNumberIsInfinite(lhs)) { /* LHS (dividend) is infinite */ + if (decNumberIsInfinite(rhs) || /* two infinities are invalid .. */ + op & (REMAINDER | REMNEAR)) { /* as is remainder of infinity */ + *status|=DEC_Invalid_operation; + break; + } + /* [Note that infinity/0 raises no exceptions] */ + decNumberZero(res); + res->bits=bits|DECINF; /* set +/- infinity */ + break; + } + else { /* RHS (divisor) is infinite */ + residue=0; + if (op&(REMAINDER|REMNEAR)) { + /* result is [finished clone of] lhs */ + decCopyFit(res, lhs, set, &residue, status); + } + else { /* a division */ + decNumberZero(res); + res->bits=bits; /* set +/- zero */ + /* for DIVIDEINT the exponent is always 0. For DIVIDE, result */ + /* is a 0 with infinitely negative exponent, clamped to minimum */ + if (op&DIVIDE) { + res->exponent=set->emin-set->digits+1; + *status|=DEC_Clamped; + } + } + decFinish(res, set, &residue, status); + break; + } + } + + /* handle 0 rhs (x/0) */ + if (ISZERO(rhs)) { /* x/0 is always exceptional */ + if (ISZERO(lhs)) { + decNumberZero(res); /* [after lhs test] */ + *status|=DEC_Division_undefined;/* 0/0 will become NaN */ + } + else { + decNumberZero(res); + if (op&(REMAINDER|REMNEAR)) *status|=DEC_Invalid_operation; + else { + *status|=DEC_Division_by_zero; /* x/0 */ + res->bits=bits|DECINF; /* .. is +/- Infinity */ + } + } + break;} + + /* handle 0 lhs (0/x) */ + if (ISZERO(lhs)) { /* 0/x [x!=0] */ + #if DECSUBSET + if (!set->extended) decNumberZero(res); + else { + #endif + if (op&DIVIDE) { + residue=0; + exponent=lhs->exponent-rhs->exponent; /* ideal exponent */ + decNumberCopy(res, lhs); /* [zeros always fit] */ + res->bits=bits; /* sign as computed */ + res->exponent=exponent; /* exponent, too */ + decFinalize(res, set, &residue, status); /* check exponent */ + } + else if (op&DIVIDEINT) { + decNumberZero(res); /* integer 0 */ + res->bits=bits; /* sign as computed */ + } + else { /* a remainder */ + exponent=rhs->exponent; /* [save in case overwrite] */ + decNumberCopy(res, lhs); /* [zeros always fit] */ + if (exponentexponent) res->exponent=exponent; /* use lower */ + } + #if DECSUBSET + } + #endif + break;} + + /* Precalculate exponent. This starts off adjusted (and hence fits */ + /* in 31 bits) and becomes the usual unadjusted exponent as the */ + /* division proceeds. The order of evaluation is important, here, */ + /* to avoid wrap. */ + exponent=(lhs->exponent+lhs->digits)-(rhs->exponent+rhs->digits); + + /* If the working exponent is -ve, then some quick exits are */ + /* possible because the quotient is known to be <1 */ + /* [for REMNEAR, it needs to be < -1, as -0.5 could need work] */ + if (exponent<0 && !(op==DIVIDE)) { + if (op&DIVIDEINT) { + decNumberZero(res); /* integer part is 0 */ + #if DECSUBSET + if (set->extended) + #endif + res->bits=bits; /* set +/- zero */ + break;} + /* fastpath remainders so long as the lhs has the smaller */ + /* (or equal) exponent */ + if (lhs->exponent<=rhs->exponent) { + if (op&REMAINDER || exponent<-1) { + /* It is REMAINDER or safe REMNEAR; result is [finished */ + /* clone of] lhs (r = x - 0*y) */ + residue=0; + decCopyFit(res, lhs, set, &residue, status); + decFinish(res, set, &residue, status); + break; + } + /* [unsafe REMNEAR drops through] */ + } + } /* fastpaths */ + + /* Long (slow) division is needed; roll up the sleeves... */ + + /* The accumulator will hold the quotient of the division. */ + /* If it needs to be too long for stack storage, then allocate. */ + acclength=D2U(reqdigits+DECDPUN); /* in Units */ + if (acclength*sizeof(Unit)>sizeof(accbuff)) { + /* printf("malloc dvacc %ld units\n", acclength); */ + allocacc=(Unit *)malloc(acclength*sizeof(Unit)); + if (allocacc==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + acc=allocacc; /* use the allocated space */ + } + + /* var1 is the padded LHS ready for subtractions. */ + /* If it needs to be too long for stack storage, then allocate. */ + /* The maximum units needed for var1 (long subtraction) is: */ + /* Enough for */ + /* (rhs->digits+reqdigits-1) -- to allow full slide to right */ + /* or (lhs->digits) -- to allow for long lhs */ + /* whichever is larger */ + /* +1 -- for rounding of slide to right */ + /* +1 -- for leading 0s */ + /* +1 -- for pre-adjust if a remainder or DIVIDEINT */ + /* [Note: unused units do not participate in decUnitAddSub data] */ + maxdigits=rhs->digits+reqdigits-1; + if (lhs->digits>maxdigits) maxdigits=lhs->digits; + var1units=D2U(maxdigits)+2; + /* allocate a guard unit above msu1 for REMAINDERNEAR */ + if (!(op&DIVIDE)) var1units++; + if ((var1units+1)*sizeof(Unit)>sizeof(varbuff)) { + /* printf("malloc dvvar %ld units\n", var1units+1); */ + varalloc=(Unit *)malloc((var1units+1)*sizeof(Unit)); + if (varalloc==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + var1=varalloc; /* use the allocated space */ + } + + /* Extend the lhs and rhs to full long subtraction length. The lhs */ + /* is truly extended into the var1 buffer, with 0 padding, so a */ + /* subtract in place is always possible. The rhs (var2) has */ + /* virtual padding (implemented by decUnitAddSub). */ + /* One guard unit was allocated above msu1 for rem=rem+rem in */ + /* REMAINDERNEAR. */ + msu1=var1+var1units-1; /* msu of var1 */ + source=lhs->lsu+D2U(lhs->digits)-1; /* msu of input array */ + for (target=msu1; source>=lhs->lsu; source--, target--) *target=*source; + for (; target>=var1; target--) *target=0; + + /* rhs (var2) is left-aligned with var1 at the start */ + var2ulen=var1units; /* rhs logical length (units) */ + var2units=D2U(rhs->digits); /* rhs actual length (units) */ + var2=rhs->lsu; /* -> rhs array */ + msu2=var2+var2units-1; /* -> msu of var2 [never changes] */ + /* now set up the variables which will be used for estimating the */ + /* multiplication factor. If these variables are not exact, add */ + /* 1 to make sure that the multiplier is never overestimated. */ + msu2plus=*msu2; /* it's value .. */ + if (var2units>1) msu2plus++; /* .. +1 if any more */ + msu2pair=(eInt)*msu2*(DECDPUNMAX+1);/* top two pair .. */ + if (var2units>1) { /* .. [else treat 2nd as 0] */ + msu2pair+=*(msu2-1); /* .. */ + if (var2units>2) msu2pair++; /* .. +1 if any more */ + } + + /* The calculation is working in units, which may have leading zeros, */ + /* but the exponent was calculated on the assumption that they are */ + /* both left-aligned. Adjust the exponent to compensate: add the */ + /* number of leading zeros in var1 msu and subtract those in var2 msu. */ + /* [This is actually done by counting the digits and negating, as */ + /* lead1=DECDPUN-digits1, and similarly for lead2.] */ + for (pow=&powers[1]; *msu1>=*pow; pow++) exponent--; + for (pow=&powers[1]; *msu2>=*pow; pow++) exponent++; + + /* Now, if doing an integer divide or remainder, ensure that */ + /* the result will be Unit-aligned. To do this, shift the var1 */ + /* accumulator towards least if need be. (It's much easier to */ + /* do this now than to reassemble the residue afterwards, if */ + /* doing a remainder.) Also ensure the exponent is not negative. */ + if (!(op&DIVIDE)) { + Unit *u; /* work */ + /* save the initial 'false' padding of var1, in digits */ + var1initpad=(var1units-D2U(lhs->digits))*DECDPUN; + /* Determine the shift to do. */ + if (exponent<0) cut=-exponent; + else cut=DECDPUN-exponent%DECDPUN; + decShiftToLeast(var1, var1units, cut); + exponent+=cut; /* maintain numerical value */ + var1initpad-=cut; /* .. and reduce padding */ + /* clean any most-significant units which were just emptied */ + for (u=msu1; cut>=DECDPUN; cut-=DECDPUN, u--) *u=0; + } /* align */ + else { /* is DIVIDE */ + maxexponent=lhs->exponent-rhs->exponent; /* save */ + /* optimization: if the first iteration will just produce 0, */ + /* preadjust to skip it [valid for DIVIDE only] */ + if (*msu1<*msu2) { + var2ulen--; /* shift down */ + exponent-=DECDPUN; /* update the exponent */ + } + } + + /* ---- start the long-division loops ------------------------------ */ + accunits=0; /* no units accumulated yet */ + accdigits=0; /* .. or digits */ + accnext=acc+acclength-1; /* -> msu of acc [NB: allows digits+1] */ + for (;;) { /* outer forever loop */ + thisunit=0; /* current unit assumed 0 */ + /* find the next unit */ + for (;;) { /* inner forever loop */ + /* strip leading zero units [from either pre-adjust or from */ + /* subtract last time around]. Leave at least one unit. */ + for (; *msu1==0 && msu1>var1; msu1--) var1units--; + + if (var1units msu */ + for (pv1=msu1; ; pv1--, pv2--) { + /* v1=*pv1 -- always OK */ + v2=0; /* assume in padding */ + if (pv2>=var2) v2=*pv2; /* in range */ + if (*pv1!=v2) break; /* no longer the same */ + if (pv1==var1) break; /* done; leave pv1 as is */ + } + /* here when all inspected or a difference seen */ + if (*pv1v2. Prepare for real subtraction; the lengths are equal */ + /* Estimate the multiplier (there's always a msu1-1)... */ + /* Bring in two units of var2 to provide a good estimate. */ + mult=(Int)(((eInt)*msu1*(DECDPUNMAX+1)+*(msu1-1))/msu2pair); + } /* lengths the same */ + else { /* var1units > var2ulen, so subtraction is safe */ + /* The var2 msu is one unit towards the lsu of the var1 msu, */ + /* so only one unit for var2 can be used. */ + mult=(Int)(((eInt)*msu1*(DECDPUNMAX+1)+*(msu1-1))/msu2plus); + } + if (mult==0) mult=1; /* must always be at least 1 */ + /* subtraction needed; var1 is > var2 */ + thisunit=(Unit)(thisunit+mult); /* accumulate */ + /* subtract var1-var2, into var1; only the overlap needs */ + /* processing, as this is an in-place calculation */ + shift=var2ulen-var2units; + #if DECTRACE + decDumpAr('1', &var1[shift], var1units-shift); + decDumpAr('2', var2, var2units); + printf("m=%ld\n", -mult); + #endif + decUnitAddSub(&var1[shift], var1units-shift, + var2, var2units, 0, + &var1[shift], -mult); + #if DECTRACE + decDumpAr('#', &var1[shift], var1units-shift); + #endif + /* var1 now probably has leading zeros; these are removed at the */ + /* top of the inner loop. */ + } /* inner loop */ + + /* The next unit has been calculated in full; unless it's a */ + /* leading zero, add to acc */ + if (accunits!=0 || thisunit!=0) { /* is first or non-zero */ + *accnext=thisunit; /* store in accumulator */ + /* account exactly for the new digits */ + if (accunits==0) { + accdigits++; /* at least one */ + for (pow=&powers[1]; thisunit>=*pow; pow++) accdigits++; + } + else accdigits+=DECDPUN; + accunits++; /* update count */ + accnext--; /* ready for next */ + if (accdigits>reqdigits) break; /* have enough digits */ + } + + /* if the residue is zero, the operation is done (unless divide */ + /* or divideInteger and still not enough digits yet) */ + if (*var1==0 && var1units==1) { /* residue is 0 */ + if (op&(REMAINDER|REMNEAR)) break; + if ((op&DIVIDE) && (exponent<=maxexponent)) break; + /* [drop through if divideInteger] */ + } + /* also done enough if calculating remainder or integer */ + /* divide and just did the last ('units') unit */ + if (exponent==0 && !(op&DIVIDE)) break; + + /* to get here, var1 is less than var2, so divide var2 by the per- */ + /* Unit power of ten and go for the next digit */ + var2ulen--; /* shift down */ + exponent-=DECDPUN; /* update the exponent */ + } /* outer loop */ + + /* ---- division is complete --------------------------------------- */ + /* here: acc has at least reqdigits+1 of good results (or fewer */ + /* if early stop), starting at accnext+1 (its lsu) */ + /* var1 has any residue at the stopping point */ + /* accunits is the number of digits collected in acc */ + if (accunits==0) { /* acc is 0 */ + accunits=1; /* show have a unit .. */ + accdigits=1; /* .. */ + *accnext=0; /* .. whose value is 0 */ + } + else accnext++; /* back to last placed */ + /* accnext now -> lowest unit of result */ + + residue=0; /* assume no residue */ + if (op&DIVIDE) { + /* record the presence of any residue, for rounding */ + if (*var1!=0 || var1units>1) residue=1; + else { /* no residue */ + /* Had an exact division; clean up spurious trailing 0s. */ + /* There will be at most DECDPUN-1, from the final multiply, */ + /* and then only if the result is non-0 (and even) and the */ + /* exponent is 'loose'. */ + #if DECDPUN>1 + Unit lsu=*accnext; + if (!(lsu&0x01) && (lsu!=0)) { + /* count the trailing zeros */ + Int drop=0; + for (;; drop++) { /* [will terminate because lsu!=0] */ + if (exponent>=maxexponent) break; /* don't chop real 0s */ + #if DECDPUN<=4 + if ((lsu-QUOT10(lsu, drop+1) + *powers[drop+1])!=0) break; /* found non-0 digit */ + #else + if (lsu%powers[drop+1]!=0) break; /* found non-0 digit */ + #endif + exponent++; + } + if (drop>0) { + accunits=decShiftToLeast(accnext, accunits, drop); + accdigits=decGetDigits(accnext, accunits); + accunits=D2U(accdigits); + /* [exponent was adjusted in the loop] */ + } + } /* neither odd nor 0 */ + #endif + } /* exact divide */ + } /* divide */ + else /* op!=DIVIDE */ { + /* check for coefficient overflow */ + if (accdigits+exponent>reqdigits) { + *status|=DEC_Division_impossible; + break; + } + if (op & (REMAINDER|REMNEAR)) { + /* [Here, the exponent will be 0, because var1 was adjusted */ + /* appropriately.] */ + Int postshift; /* work */ + Flag wasodd=0; /* integer was odd */ + Unit *quotlsu; /* for save */ + Int quotdigits; /* .. */ + + bits=lhs->bits; /* remainder sign is always as lhs */ + + /* Fastpath when residue is truly 0 is worthwhile [and */ + /* simplifies the code below] */ + if (*var1==0 && var1units==1) { /* residue is 0 */ + Int exp=lhs->exponent; /* save min(exponents) */ + if (rhs->exponentexponent; + decNumberZero(res); /* 0 coefficient */ + #if DECSUBSET + if (set->extended) + #endif + res->exponent=exp; /* .. with proper exponent */ + res->bits=(uByte)(bits&DECNEG); /* [cleaned] */ + decFinish(res, set, &residue, status); /* might clamp */ + break; + } + /* note if the quotient was odd */ + if (*accnext & 0x01) wasodd=1; /* acc is odd */ + quotlsu=accnext; /* save in case need to reinspect */ + quotdigits=accdigits; /* .. */ + + /* treat the residue, in var1, as the value to return, via acc */ + /* calculate the unused zero digits. This is the smaller of: */ + /* var1 initial padding (saved above) */ + /* var2 residual padding, which happens to be given by: */ + postshift=var1initpad+exponent-lhs->exponent+rhs->exponent; + /* [the 'exponent' term accounts for the shifts during divide] */ + if (var1initpadexponent; /* exponent is smaller of lhs & rhs */ + if (rhs->exponentexponent; + + /* Now correct the result if doing remainderNear; if it */ + /* (looking just at coefficients) is > rhs/2, or == rhs/2 and */ + /* the integer was odd then the result should be rem-rhs. */ + if (op&REMNEAR) { + Int compare, tarunits; /* work */ + Unit *up; /* .. */ + /* calculate remainder*2 into the var1 buffer (which has */ + /* 'headroom' of an extra unit and hence enough space) */ + /* [a dedicated 'double' loop would be faster, here] */ + tarunits=decUnitAddSub(accnext, accunits, accnext, accunits, + 0, accnext, 1); + /* decDumpAr('r', accnext, tarunits); */ + + /* Here, accnext (var1) holds tarunits Units with twice the */ + /* remainder's coefficient, which must now be compared to the */ + /* RHS. The remainder's exponent may be smaller than the RHS's. */ + compare=decUnitCompare(accnext, tarunits, rhs->lsu, D2U(rhs->digits), + rhs->exponent-exponent); + if (compare==BADINT) { /* deep trouble */ + *status|=DEC_Insufficient_storage; + break;} + + /* now restore the remainder by dividing by two; the lsu */ + /* is known to be even. */ + for (up=accnext; up0 || (compare==0 && wasodd)) { /* adjustment needed */ + Int exp, expunits, exprem; /* work */ + /* This is effectively causing round-up of the quotient, */ + /* so if it was the rare case where it was full and all */ + /* nines, it would overflow and hence division-impossible */ + /* should be raised */ + Flag allnines=0; /* 1 if quotient all nines */ + if (quotdigits==reqdigits) { /* could be borderline */ + for (up=quotlsu; ; up++) { + if (quotdigits>DECDPUN) { + if (*up!=DECDPUNMAX) break;/* non-nines */ + } + else { /* this is the last Unit */ + if (*up==powers[quotdigits]-1) allnines=1; + break; + } + quotdigits-=DECDPUN; /* checked those digits */ + } /* up */ + } /* borderline check */ + if (allnines) { + *status|=DEC_Division_impossible; + break;} + + /* rem-rhs is needed; the sign will invert. Again, var1 */ + /* can safely be used for the working Units array. */ + exp=rhs->exponent-exponent; /* RHS padding needed */ + /* Calculate units and remainder from exponent. */ + expunits=exp/DECDPUN; + exprem=exp%DECDPUN; + /* subtract [A+B*(-m)]; the result will always be negative */ + accunits=-decUnitAddSub(accnext, accunits, + rhs->lsu, D2U(rhs->digits), + expunits, accnext, -(Int)powers[exprem]); + accdigits=decGetDigits(accnext, accunits); /* count digits exactly */ + accunits=D2U(accdigits); /* and recalculate the units for copy */ + /* [exponent is as for original remainder] */ + bits^=DECNEG; /* flip the sign */ + } + } /* REMNEAR */ + } /* REMAINDER or REMNEAR */ + } /* not DIVIDE */ + + /* Set exponent and bits */ + res->exponent=exponent; + res->bits=(uByte)(bits&DECNEG); /* [cleaned] */ + + /* Now the coefficient. */ + decSetCoeff(res, set, accnext, accdigits, &residue, status); + + decFinish(res, set, &residue, status); /* final cleanup */ + + #if DECSUBSET + /* If a divide then strip trailing zeros if subset [after round] */ + if (!set->extended && (op==DIVIDE)) decTrim(res, set, 0, &dropped); + #endif + } while(0); /* end protected */ + + if (varalloc!=NULL) free(varalloc); /* drop any storage used */ + if (allocacc!=NULL) free(allocacc); /* .. */ + #if DECSUBSET + if (allocrhs!=NULL) free(allocrhs); /* .. */ + if (alloclhs!=NULL) free(alloclhs); /* .. */ + #endif + return res; + } /* decDivideOp */ + +/* ------------------------------------------------------------------ */ +/* decMultiplyOp -- multiplication operation */ +/* */ +/* This routine performs the multiplication C=A x B. */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X*X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* status is the usual accumulator */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* ------------------------------------------------------------------ */ +/* 'Classic' multiplication is used rather than Karatsuba, as the */ +/* latter would give only a minor improvement for the short numbers */ +/* expected to be handled most (and uses much more memory). */ +/* */ +/* There are two major paths here: the general-purpose ('old code') */ +/* path which handles all DECDPUN values, and a fastpath version */ +/* which is used if 64-bit ints are available, DECDPUN<=4, and more */ +/* than two calls to decUnitAddSub would be made. */ +/* */ +/* The fastpath version lumps units together into 8-digit or 9-digit */ +/* chunks, and also uses a lazy carry strategy to minimise expensive */ +/* 64-bit divisions. The chunks are then broken apart again into */ +/* units for continuing processing. Despite this overhead, the */ +/* fastpath can speed up some 16-digit operations by 10x (and much */ +/* more for higher-precision calculations). */ +/* */ +/* A buffer always has to be used for the accumulator; in the */ +/* fastpath, buffers are also always needed for the chunked copies of */ +/* of the operand coefficients. */ +/* Static buffers are larger than needed just for multiply, to allow */ +/* for calls from other operations (notably exp). */ +/* ------------------------------------------------------------------ */ +#define FASTMUL (DECUSE64 && DECDPUN<5) +static decNumber * decMultiplyOp(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set, + uInt *status) { + Int accunits; /* Units of accumulator in use */ + Int exponent; /* work */ + Int residue=0; /* rounding residue */ + uByte bits; /* result sign */ + Unit *acc; /* -> accumulator Unit array */ + Int needbytes; /* size calculator */ + void *allocacc=NULL; /* -> allocated accumulator, iff allocated */ + Unit accbuff[SD2U(DECBUFFER*4+1)]; /* buffer (+1 for DECBUFFER==0, */ + /* *4 for calls from other operations) */ + const Unit *mer, *mermsup; /* work */ + Int madlength; /* Units in multiplicand */ + Int shift; /* Units to shift multiplicand by */ + + #if FASTMUL + /* if DECDPUN is 1 or 3 work in base 10**9, otherwise */ + /* (DECDPUN is 2 or 4) then work in base 10**8 */ + #if DECDPUN & 1 /* odd */ + #define FASTBASE 1000000000 /* base */ + #define FASTDIGS 9 /* digits in base */ + #define FASTLAZY 18 /* carry resolution point [1->18] */ + #else + #define FASTBASE 100000000 + #define FASTDIGS 8 + #define FASTLAZY 1844 /* carry resolution point [1->1844] */ + #endif + /* three buffers are used, two for chunked copies of the operands */ + /* (base 10**8 or base 10**9) and one base 2**64 accumulator with */ + /* lazy carry evaluation */ + uInt zlhibuff[(DECBUFFER*2+1)/8+1]; /* buffer (+1 for DECBUFFER==0) */ + uInt *zlhi=zlhibuff; /* -> lhs array */ + uInt *alloclhi=NULL; /* -> allocated buffer, iff allocated */ + uInt zrhibuff[(DECBUFFER*2+1)/8+1]; /* buffer (+1 for DECBUFFER==0) */ + uInt *zrhi=zrhibuff; /* -> rhs array */ + uInt *allocrhi=NULL; /* -> allocated buffer, iff allocated */ + uLong zaccbuff[(DECBUFFER*2+1)/4+2]; /* buffer (+1 for DECBUFFER==0) */ + /* [allocacc is shared for both paths, as only one will run] */ + uLong *zacc=zaccbuff; /* -> accumulator array for exact result */ + #if DECDPUN==1 + Int zoff; /* accumulator offset */ + #endif + uInt *lip, *rip; /* item pointers */ + uInt *lmsi, *rmsi; /* most significant items */ + Int ilhs, irhs, iacc; /* item counts in the arrays */ + Int lazy; /* lazy carry counter */ + uLong lcarry; /* uLong carry */ + uInt carry; /* carry (NB not uLong) */ + Int count; /* work */ + const Unit *cup; /* .. */ + Unit *up; /* .. */ + uLong *lp; /* .. */ + Int p; /* .. */ + #endif + + #if DECSUBSET + decNumber *alloclhs=NULL; /* -> allocated buffer, iff allocated */ + decNumber *allocrhs=NULL; /* -> allocated buffer, iff allocated */ + #endif + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + /* precalculate result sign */ + bits=(uByte)((lhs->bits^rhs->bits)&DECNEG); + + /* handle infinities and NaNs */ + if (SPECIALARGS) { /* a special bit set */ + if (SPECIALARGS & (DECSNAN | DECNAN)) { /* one or two NaNs */ + decNaNs(res, lhs, rhs, set, status); + return res;} + /* one or two infinities; Infinity * 0 is invalid */ + if (((lhs->bits & DECINF)==0 && ISZERO(lhs)) + ||((rhs->bits & DECINF)==0 && ISZERO(rhs))) { + *status|=DEC_Invalid_operation; + return res;} + decNumberZero(res); + res->bits=bits|DECINF; /* infinity */ + return res;} + + /* For best speed, as in DMSRCN [the original Rexx numerics */ + /* module], use the shorter number as the multiplier (rhs) and */ + /* the longer as the multiplicand (lhs) to minimise the number of */ + /* adds (partial products) */ + if (lhs->digitsdigits) { /* swap... */ + const decNumber *hold=lhs; + lhs=rhs; + rhs=hold; + } + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { + /* reduce operands and set lostDigits status, as needed */ + if (lhs->digits>set->digits) { + alloclhs=decRoundOperand(lhs, set, status); + if (alloclhs==NULL) break; + lhs=alloclhs; + } + if (rhs->digits>set->digits) { + allocrhs=decRoundOperand(rhs, set, status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + #if FASTMUL /* fastpath can be used */ + /* use the fast path if there are enough digits in the shorter */ + /* operand to make the setup and takedown worthwhile */ + #define NEEDTWO (DECDPUN*2) /* within two decUnitAddSub calls */ + if (rhs->digits>NEEDTWO) { /* use fastpath... */ + /* calculate the number of elements in each array */ + ilhs=(lhs->digits+FASTDIGS-1)/FASTDIGS; /* [ceiling] */ + irhs=(rhs->digits+FASTDIGS-1)/FASTDIGS; /* .. */ + iacc=ilhs+irhs; + + /* allocate buffers if required, as usual */ + needbytes=ilhs*sizeof(uInt); + if (needbytes>(Int)sizeof(zlhibuff)) { + alloclhi=(uInt *)malloc(needbytes); + zlhi=alloclhi;} + needbytes=irhs*sizeof(uInt); + if (needbytes>(Int)sizeof(zrhibuff)) { + allocrhi=(uInt *)malloc(needbytes); + zrhi=allocrhi;} + + /* Allocating the accumulator space needs a special case when */ + /* DECDPUN=1 because when converting the accumulator to Units */ + /* after the multiplication each 8-byte item becomes 9 1-byte */ + /* units. Therefore iacc extra bytes are needed at the front */ + /* (rounded up to a multiple of 8 bytes), and the uLong */ + /* accumulator starts offset the appropriate number of units */ + /* to the right to avoid overwrite during the unchunking. */ + needbytes=iacc*sizeof(uLong); + #if DECDPUN==1 + zoff=(iacc+7)/8; /* items to offset by */ + needbytes+=zoff*8; + #endif + if (needbytes>(Int)sizeof(zaccbuff)) { + allocacc=(uLong *)malloc(needbytes); + zacc=(uLong *)allocacc;} + if (zlhi==NULL||zrhi==NULL||zacc==NULL) { + *status|=DEC_Insufficient_storage; + break;} + + acc=(Unit *)zacc; /* -> target Unit array */ + #if DECDPUN==1 + zacc+=zoff; /* start uLong accumulator to right */ + #endif + + /* assemble the chunked copies of the left and right sides */ + for (count=lhs->digits, cup=lhs->lsu, lip=zlhi; count>0; lip++) + for (p=0, *lip=0; p0; + p+=DECDPUN, cup++, count-=DECDPUN) + *lip+=*cup*powers[p]; + lmsi=lip-1; /* save -> msi */ + for (count=rhs->digits, cup=rhs->lsu, rip=zrhi; count>0; rip++) + for (p=0, *rip=0; p0; + p+=DECDPUN, cup++, count-=DECDPUN) + *rip+=*cup*powers[p]; + rmsi=rip-1; /* save -> msi */ + + /* zero the accumulator */ + for (lp=zacc; lp0 && rip!=rmsi) continue; + lazy=FASTLAZY; /* reset delay count */ + /* spin up the accumulator resolving overflows */ + for (lp=zacc; lp assume buffer for accumulator */ + needbytes=(D2U(lhs->digits)+D2U(rhs->digits))*sizeof(Unit); + if (needbytes>(Int)sizeof(accbuff)) { + allocacc=(Unit *)malloc(needbytes); + if (allocacc==NULL) {*status|=DEC_Insufficient_storage; break;} + acc=(Unit *)allocacc; /* use the allocated space */ + } + + /* Now the main long multiplication loop */ + /* Unlike the equivalent in the IBM Java implementation, there */ + /* is no advantage in calculating from msu to lsu. So, do it */ + /* by the book, as it were. */ + /* Each iteration calculates ACC=ACC+MULTAND*MULT */ + accunits=1; /* accumulator starts at '0' */ + *acc=0; /* .. (lsu=0) */ + shift=0; /* no multiplicand shift at first */ + madlength=D2U(lhs->digits); /* this won't change */ + mermsup=rhs->lsu+D2U(rhs->digits); /* -> msu+1 of multiplier */ + + for (mer=rhs->lsu; merlsu, madlength, 0, + &acc[shift], *mer) + + shift; + else { /* extend acc with a 0; it will be used shortly */ + *(acc+accunits)=0; /* [this avoids length of <=0 later] */ + accunits++; + } + /* multiply multiplicand by 10**DECDPUN for next Unit to left */ + shift++; /* add this for 'logical length' */ + } /* n */ + #if FASTMUL + } /* unchunked units */ + #endif + /* common end-path */ + #if DECTRACE + decDumpAr('*', acc, accunits); /* Show exact result */ + #endif + + /* acc now contains the exact result of the multiplication, */ + /* possibly with a leading zero unit; build the decNumber from */ + /* it, noting if any residue */ + res->bits=bits; /* set sign */ + res->digits=decGetDigits(acc, accunits); /* count digits exactly */ + + /* There can be a 31-bit wrap in calculating the exponent. */ + /* This can only happen if both input exponents are negative and */ + /* both their magnitudes are large. If there was a wrap, set a */ + /* safe very negative exponent, from which decFinalize() will */ + /* raise a hard underflow shortly. */ + exponent=lhs->exponent+rhs->exponent; /* calculate exponent */ + if (lhs->exponent<0 && rhs->exponent<0 && exponent>0) + exponent=-2*DECNUMMAXE; /* force underflow */ + res->exponent=exponent; /* OK to overwrite now */ + + + /* Set the coefficient. If any rounding, residue records */ + decSetCoeff(res, set, acc, res->digits, &residue, status); + decFinish(res, set, &residue, status); /* final cleanup */ + } while(0); /* end protected */ + + if (allocacc!=NULL) free(allocacc); /* drop any storage used */ + #if DECSUBSET + if (allocrhs!=NULL) free(allocrhs); /* .. */ + if (alloclhs!=NULL) free(alloclhs); /* .. */ + #endif + #if FASTMUL + if (allocrhi!=NULL) free(allocrhi); /* .. */ + if (alloclhi!=NULL) free(alloclhi); /* .. */ + #endif + return res; + } /* decMultiplyOp */ + +/* ------------------------------------------------------------------ */ +/* decExpOp -- effect exponentiation */ +/* */ +/* This computes C = exp(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context; note that rounding mode has no effect */ +/* */ +/* C must have space for set->digits digits. status is updated but */ +/* not set. */ +/* */ +/* Restrictions: */ +/* */ +/* digits, emax, and -emin in the context must be less than */ +/* 2*DEC_MAX_MATH (1999998), and the rhs must be within these */ +/* bounds or a zero. This is an internal routine, so these */ +/* restrictions are contractual and not enforced. */ +/* */ +/* A finite result is rounded using DEC_ROUND_HALF_EVEN; it will */ +/* almost always be correctly rounded, but may be up to 1 ulp in */ +/* error in rare cases. */ +/* */ +/* Finite results will always be full precision and Inexact, except */ +/* when A is a zero or -Infinity (giving 1 or 0 respectively). */ +/* ------------------------------------------------------------------ */ +/* This approach used here is similar to the algorithm described in */ +/* */ +/* Variable Precision Exponential Function, T. E. Hull and */ +/* A. Abrham, ACM Transactions on Mathematical Software, Vol 12 #2, */ +/* pp79-91, ACM, June 1986. */ +/* */ +/* with the main difference being that the iterations in the series */ +/* evaluation are terminated dynamically (which does not require the */ +/* extra variable-precision variables which are expensive in this */ +/* context). */ +/* */ +/* The error analysis in Hull & Abrham's paper applies except for the */ +/* round-off error accumulation during the series evaluation. This */ +/* code does not precalculate the number of iterations and so cannot */ +/* use Horner's scheme. Instead, the accumulation is done at double- */ +/* precision, which ensures that the additions of the terms are exact */ +/* and do not accumulate round-off (and any round-off errors in the */ +/* terms themselves move 'to the right' faster than they can */ +/* accumulate). This code also extends the calculation by allowing, */ +/* in the spirit of other decNumber operators, the input to be more */ +/* precise than the result (the precision used is based on the more */ +/* precise of the input or requested result). */ +/* */ +/* Implementation notes: */ +/* */ +/* 1. This is separated out as decExpOp so it can be called from */ +/* other Mathematical functions (notably Ln) with a wider range */ +/* than normal. In particular, it can handle the slightly wider */ +/* (double) range needed by Ln (which has to be able to calculate */ +/* exp(-x) where x can be the tiniest number (Ntiny). */ +/* */ +/* 2. Normalizing x to be <=0.1 (instead of <=1) reduces loop */ +/* iterations by approximately a third with additional (although */ +/* diminishing) returns as the range is reduced to even smaller */ +/* fractions. However, h (the power of 10 used to correct the */ +/* result at the end, see below) must be kept <=8 as otherwise */ +/* the final result cannot be computed. Hence the leverage is a */ +/* sliding value (8-h), where potentially the range is reduced */ +/* more for smaller values. */ +/* */ +/* The leverage that can be applied in this way is severely */ +/* limited by the cost of the raise-to-the power at the end, */ +/* which dominates when the number of iterations is small (less */ +/* than ten) or when rhs is short. As an example, the adjustment */ +/* x**10,000,000 needs 31 multiplications, all but one full-width. */ +/* */ +/* 3. The restrictions (especially precision) could be raised with */ +/* care, but the full decNumber range seems very hard within the */ +/* 32-bit limits. */ +/* */ +/* 4. The working precisions for the static buffers are twice the */ +/* obvious size to allow for calls from decNumberPower. */ +/* ------------------------------------------------------------------ */ +static decNumber *decExpOp(decNumber *res, const decNumber *rhs, + decContext *set, uInt *status) { + uInt ignore=0; /* working status */ + Int h; /* adjusted exponent for 0.xxxx */ + Int p; /* working precision */ + Int residue; /* rounding residue */ + uInt needbytes; /* for space calculations */ + const decNumber *x=rhs; /* (may point to safe copy later) */ + decContext aset, tset, dset; /* working contexts */ + Int comp; /* work */ + + /* the argument is often copied to normalize it, so (unusually) it */ + /* is treated like other buffers, using DECBUFFER, +1 in case */ + /* DECBUFFER is 0 */ + decNumber bufr[D2N(DECBUFFER*2+1)]; + decNumber *allocrhs=NULL; /* non-NULL if rhs buffer allocated */ + + /* the working precision will be no more than set->digits+8+1 */ + /* so for on-stack buffers DECBUFFER+9 is used, +1 in case DECBUFFER */ + /* is 0 (and twice that for the accumulator) */ + + /* buffer for t, term (working precision plus) */ + decNumber buft[D2N(DECBUFFER*2+9+1)]; + decNumber *allocbuft=NULL; /* -> allocated buft, iff allocated */ + decNumber *t=buft; /* term */ + /* buffer for a, accumulator (working precision * 2), at least 9 */ + decNumber bufa[D2N(DECBUFFER*4+18+1)]; + decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ + decNumber *a=bufa; /* accumulator */ + /* decNumber for the divisor term; this needs at most 9 digits */ + /* and so can be fixed size [16 so can use standard context] */ + decNumber bufd[D2N(16)]; + decNumber *d=bufd; /* divisor */ + decNumber numone; /* constant 1 */ + + #if DECCHECK + Int iterations=0; /* for later sanity check */ + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + if (SPECIALARG) { /* handle infinities and NaNs */ + if (decNumberIsInfinite(rhs)) { /* an infinity */ + if (decNumberIsNegative(rhs)) /* -Infinity -> +0 */ + decNumberZero(res); + else decNumberCopy(res, rhs); /* +Infinity -> self */ + } + else decNaNs(res, rhs, NULL, set, status); /* a NaN */ + break;} + + if (ISZERO(rhs)) { /* zeros -> exact 1 */ + decNumberZero(res); /* make clean 1 */ + *res->lsu=1; /* .. */ + break;} /* [no status to set] */ + + /* e**x when 0 < x < 0.66 is < 1+3x/2, hence can fast-path */ + /* positive and negative tiny cases which will result in inexact */ + /* 1. This also allows the later add-accumulate to always be */ + /* exact (because its length will never be more than twice the */ + /* working precision). */ + /* The comparator (tiny) needs just one digit, so use the */ + /* decNumber d for it (reused as the divisor, etc., below); its */ + /* exponent is such that if x is positive it will have */ + /* set->digits-1 zeros between the decimal point and the digit, */ + /* which is 4, and if x is negative one more zero there as the */ + /* more precise result will be of the form 0.9999999 rather than */ + /* 1.0000001. Hence, tiny will be 0.0000004 if digits=7 and x>0 */ + /* or 0.00000004 if digits=7 and x<0. If RHS not larger than */ + /* this then the result will be 1.000000 */ + decNumberZero(d); /* clean */ + *d->lsu=4; /* set 4 .. */ + d->exponent=-set->digits; /* * 10**(-d) */ + if (decNumberIsNegative(rhs)) d->exponent--; /* negative case */ + comp=decCompare(d, rhs, 1); /* signless compare */ + if (comp==BADINT) { + *status|=DEC_Insufficient_storage; + break;} + if (comp>=0) { /* rhs < d */ + Int shift=set->digits-1; + decNumberZero(res); /* set 1 */ + *res->lsu=1; /* .. */ + res->digits=decShiftToMost(res->lsu, 1, shift); + res->exponent=-shift; /* make 1.0000... */ + *status|=DEC_Inexact | DEC_Rounded; /* .. inexactly */ + break;} /* tiny */ + + /* set up the context to be used for calculating a, as this is */ + /* used on both paths below */ + decContextDefault(&aset, DEC_INIT_DECIMAL64); + /* accumulator bounds are as requested (could underflow) */ + aset.emax=set->emax; /* usual bounds */ + aset.emin=set->emin; /* .. */ + aset.clamp=0; /* and no concrete format */ + + /* calculate the adjusted (Hull & Abrham) exponent (where the */ + /* decimal point is just to the left of the coefficient msd) */ + h=rhs->exponent+rhs->digits; + /* if h>8 then 10**h cannot be calculated safely; however, when */ + /* h=8 then exp(|rhs|) will be at least exp(1E+7) which is at */ + /* least 6.59E+4342944, so (due to the restriction on Emax/Emin) */ + /* overflow (or underflow to 0) is guaranteed -- so this case can */ + /* be handled by simply forcing the appropriate excess */ + if (h>8) { /* overflow/underflow */ + /* set up here so Power call below will over or underflow to */ + /* zero; set accumulator to either 2 or 0.02 */ + /* [stack buffer for a is always big enough for this] */ + decNumberZero(a); + *a->lsu=2; /* not 1 but < exp(1) */ + if (decNumberIsNegative(rhs)) a->exponent=-2; /* make 0.02 */ + h=8; /* clamp so 10**h computable */ + p=9; /* set a working precision */ + } + else { /* h<=8 */ + Int maxlever=(rhs->digits>8?1:0); + /* [could/should increase this for precisions >40 or so, too] */ + + /* if h is 8, cannot normalize to a lower upper limit because */ + /* the final result will not be computable (see notes above), */ + /* but leverage can be applied whenever h is less than 8. */ + /* Apply as much as possible, up to a MAXLEVER digits, which */ + /* sets the tradeoff against the cost of the later a**(10**h). */ + /* As h is increased, the working precision below also */ + /* increases to compensate for the "constant digits at the */ + /* front" effect. */ + Int lever=MINI(8-h, maxlever); /* leverage attainable */ + Int use=-rhs->digits-lever; /* exponent to use for RHS */ + h+=lever; /* apply leverage selected */ + if (h<0) { /* clamp */ + use+=h; /* [may end up subnormal] */ + h=0; + } + /* Take a copy of RHS if it needs normalization (true whenever x>=1) */ + if (rhs->exponent!=use) { + decNumber *newrhs=bufr; /* assume will fit on stack */ + needbytes=sizeof(decNumber)+(D2U(rhs->digits)-1)*sizeof(Unit); + if (needbytes>sizeof(bufr)) { /* need malloc space */ + allocrhs=(decNumber *)malloc(needbytes); + if (allocrhs==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + newrhs=allocrhs; /* use the allocated space */ + } + decNumberCopy(newrhs, rhs); /* copy to safe space */ + newrhs->exponent=use; /* normalize; now <1 */ + x=newrhs; /* ready for use */ + /* decNumberShow(x); */ + } + + /* Now use the usual power series to evaluate exp(x). The */ + /* series starts as 1 + x + x^2/2 ... so prime ready for the */ + /* third term by setting the term variable t=x, the accumulator */ + /* a=1, and the divisor d=2. */ + + /* First determine the working precision. From Hull & Abrham */ + /* this is set->digits+h+2. However, if x is 'over-precise' we */ + /* need to allow for all its digits to potentially participate */ + /* (consider an x where all the excess digits are 9s) so in */ + /* this case use x->digits+h+2 */ + p=MAXI(x->digits, set->digits)+h+2; /* [h<=8] */ + + /* a and t are variable precision, and depend on p, so space */ + /* must be allocated for them if necessary */ + + /* the accumulator needs to be able to hold 2p digits so that */ + /* the additions on the second and subsequent iterations are */ + /* sufficiently exact. */ + needbytes=sizeof(decNumber)+(D2U(p*2)-1)*sizeof(Unit); + if (needbytes>sizeof(bufa)) { /* need malloc space */ + allocbufa=(decNumber *)malloc(needbytes); + if (allocbufa==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + a=allocbufa; /* use the allocated space */ + } + /* the term needs to be able to hold p digits (which is */ + /* guaranteed to be larger than x->digits, so the initial copy */ + /* is safe); it may also be used for the raise-to-power */ + /* calculation below, which needs an extra two digits */ + needbytes=sizeof(decNumber)+(D2U(p+2)-1)*sizeof(Unit); + if (needbytes>sizeof(buft)) { /* need malloc space */ + allocbuft=(decNumber *)malloc(needbytes); + if (allocbuft==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + t=allocbuft; /* use the allocated space */ + } + + decNumberCopy(t, x); /* term=x */ + decNumberZero(a); *a->lsu=1; /* accumulator=1 */ + decNumberZero(d); *d->lsu=2; /* divisor=2 */ + decNumberZero(&numone); *numone.lsu=1; /* constant 1 for increment */ + + /* set up the contexts for calculating a, t, and d */ + decContextDefault(&tset, DEC_INIT_DECIMAL64); + dset=tset; + /* accumulator bounds are set above, set precision now */ + aset.digits=p*2; /* double */ + /* term bounds avoid any underflow or overflow */ + tset.digits=p; + tset.emin=DEC_MIN_EMIN; /* [emax is plenty] */ + /* [dset.digits=16, etc., are sufficient] */ + + /* finally ready to roll */ + for (;;) { + #if DECCHECK + iterations++; + #endif + /* only the status from the accumulation is interesting */ + /* [but it should remain unchanged after first add] */ + decAddOp(a, a, t, &aset, 0, status); /* a=a+t */ + decMultiplyOp(t, t, x, &tset, &ignore); /* t=t*x */ + decDivideOp(t, t, d, &tset, DIVIDE, &ignore); /* t=t/d */ + /* the iteration ends when the term cannot affect the result, */ + /* if rounded to p digits, which is when its value is smaller */ + /* than the accumulator by p+1 digits. There must also be */ + /* full precision in a. */ + if (((a->digits+a->exponent)>=(t->digits+t->exponent+p+1)) + && (a->digits>=p)) break; + decAddOp(d, d, &numone, &dset, 0, &ignore); /* d=d+1 */ + } /* iterate */ + + #if DECCHECK + /* just a sanity check; comment out test to show always */ + if (iterations>p+3) + printf("Exp iterations=%ld, status=%08lx, p=%ld, d=%ld\n", + iterations, *status, p, x->digits); + #endif + } /* h<=8 */ + + /* apply postconditioning: a=a**(10**h) -- this is calculated */ + /* at a slightly higher precision than Hull & Abrham suggest */ + if (h>0) { + Int seenbit=0; /* set once a 1-bit is seen */ + Int i; /* counter */ + Int n=powers[h]; /* always positive */ + aset.digits=p+2; /* sufficient precision */ + /* avoid the overhead and many extra digits of decNumberPower */ + /* as all that is needed is the short 'multipliers' loop; here */ + /* accumulate the answer into t */ + decNumberZero(t); *t->lsu=1; /* acc=1 */ + for (i=1;;i++){ /* for each bit [top bit ignored] */ + /* abandon if have had overflow or terminal underflow */ + if (*status & (DEC_Overflow|DEC_Underflow)) { /* interesting? */ + if (*status&DEC_Overflow || ISZERO(t)) break;} + n=n<<1; /* move next bit to testable position */ + if (n<0) { /* top bit is set */ + seenbit=1; /* OK, have a significant bit */ + decMultiplyOp(t, t, a, &aset, status); /* acc=acc*x */ + } + if (i==31) break; /* that was the last bit */ + if (!seenbit) continue; /* no need to square 1 */ + decMultiplyOp(t, t, t, &aset, status); /* acc=acc*acc [square] */ + } /*i*/ /* 32 bits */ + /* decNumberShow(t); */ + a=t; /* and carry on using t instead of a */ + } + + /* Copy and round the result to res */ + residue=1; /* indicate dirt to right .. */ + if (ISZERO(a)) residue=0; /* .. unless underflowed to 0 */ + aset.digits=set->digits; /* [use default rounding] */ + decCopyFit(res, a, &aset, &residue, status); /* copy & shorten */ + decFinish(res, set, &residue, status); /* cleanup/set flags */ + } while(0); /* end protected */ + + if (allocrhs !=NULL) free(allocrhs); /* drop any storage used */ + if (allocbufa!=NULL) free(allocbufa); /* .. */ + if (allocbuft!=NULL) free(allocbuft); /* .. */ + /* [status is handled by caller] */ + return res; + } /* decExpOp */ + +/* ------------------------------------------------------------------ */ +/* Initial-estimate natural logarithm table */ +/* */ +/* LNnn -- 90-entry 16-bit table for values from .10 through .99. */ +/* The result is a 4-digit encode of the coefficient (c=the */ +/* top 14 bits encoding 0-9999) and a 2-digit encode of the */ +/* exponent (e=the bottom 2 bits encoding 0-3) */ +/* */ +/* The resulting value is given by: */ +/* */ +/* v = -c * 10**(-e-3) */ +/* */ +/* where e and c are extracted from entry k = LNnn[x-10] */ +/* where x is truncated (NB) into the range 10 through 99, */ +/* and then c = k>>2 and e = k&3. */ +/* ------------------------------------------------------------------ */ +static const uShort LNnn[90] = { + 9016, 8652, 8316, 8008, 7724, 7456, 7208, + 6972, 6748, 6540, 6340, 6148, 5968, 5792, 5628, 5464, 5312, + 5164, 5020, 4884, 4748, 4620, 4496, 4376, 4256, 4144, 4032, + 39233, 38181, 37157, 36157, 35181, 34229, 33297, 32389, 31501, 30629, + 29777, 28945, 28129, 27329, 26545, 25777, 25021, 24281, 23553, 22837, + 22137, 21445, 20769, 20101, 19445, 18801, 18165, 17541, 16925, 16321, + 15721, 15133, 14553, 13985, 13421, 12865, 12317, 11777, 11241, 10717, + 10197, 9685, 9177, 8677, 8185, 7697, 7213, 6737, 6269, 5801, + 5341, 4889, 4437, 39930, 35534, 31186, 26886, 22630, 18418, 14254, + 10130, 6046, 20055}; + +/* ------------------------------------------------------------------ */ +/* decLnOp -- effect natural logarithm */ +/* */ +/* This computes C = ln(A) */ +/* */ +/* res is C, the result. C may be A */ +/* rhs is A */ +/* set is the context; note that rounding mode has no effect */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Notable cases: */ +/* A<0 -> Invalid */ +/* A=0 -> -Infinity (Exact) */ +/* A=+Infinity -> +Infinity (Exact) */ +/* A=1 exactly -> 0 (Exact) */ +/* */ +/* Restrictions (as for Exp): */ +/* */ +/* digits, emax, and -emin in the context must be less than */ +/* DEC_MAX_MATH+11 (1000010), and the rhs must be within these */ +/* bounds or a zero. This is an internal routine, so these */ +/* restrictions are contractual and not enforced. */ +/* */ +/* A finite result is rounded using DEC_ROUND_HALF_EVEN; it will */ +/* almost always be correctly rounded, but may be up to 1 ulp in */ +/* error in rare cases. */ +/* ------------------------------------------------------------------ */ +/* The result is calculated using Newton's method, with each */ +/* iteration calculating a' = a + x * exp(-a) - 1. See, for example, */ +/* Epperson 1989. */ +/* */ +/* The iteration ends when the adjustment x*exp(-a)-1 is tiny enough. */ +/* This has to be calculated at the sum of the precision of x and the */ +/* working precision. */ +/* */ +/* Implementation notes: */ +/* */ +/* 1. This is separated out as decLnOp so it can be called from */ +/* other Mathematical functions (e.g., Log 10) with a wider range */ +/* than normal. In particular, it can handle the slightly wider */ +/* (+9+2) range needed by a power function. */ +/* */ +/* 2. The speed of this function is about 10x slower than exp, as */ +/* it typically needs 4-6 iterations for short numbers, and the */ +/* extra precision needed adds a squaring effect, twice. */ +/* */ +/* 3. Fastpaths are included for ln(10) and ln(2), up to length 40, */ +/* as these are common requests. ln(10) is used by log10(x). */ +/* */ +/* 4. An iteration might be saved by widening the LNnn table, and */ +/* would certainly save at least one if it were made ten times */ +/* bigger, too (for truncated fractions 0.100 through 0.999). */ +/* However, for most practical evaluations, at least four or five */ +/* iterations will be neede -- so this would only speed up by */ +/* 20-25% and that probably does not justify increasing the table */ +/* size. */ +/* */ +/* 5. The static buffers are larger than might be expected to allow */ +/* for calls from decNumberPower. */ +/* ------------------------------------------------------------------ */ +static decNumber *decLnOp(decNumber *res, const decNumber *rhs, + decContext *set, uInt *status) { + uInt ignore=0; /* working status accumulator */ + uInt needbytes; /* for space calculations */ + Int residue; /* rounding residue */ + Int r; /* rhs=f*10**r [see below] */ + Int p; /* working precision */ + Int pp; /* precision for iteration */ + Int t; /* work */ + + /* buffers for a (accumulator, typically precision+2) and b */ + /* (adjustment calculator, same size) */ + decNumber bufa[D2N(DECBUFFER+12)]; + decNumber *allocbufa=NULL; /* -> allocated bufa, iff allocated */ + decNumber *a=bufa; /* accumulator/work */ + decNumber bufb[D2N(DECBUFFER*2+2)]; + decNumber *allocbufb=NULL; /* -> allocated bufa, iff allocated */ + decNumber *b=bufb; /* adjustment/work */ + + decNumber numone; /* constant 1 */ + decNumber cmp; /* work */ + decContext aset, bset; /* working contexts */ + + #if DECCHECK + Int iterations=0; /* for later sanity check */ + if (decCheckOperands(res, DECUNUSED, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + if (SPECIALARG) { /* handle infinities and NaNs */ + if (decNumberIsInfinite(rhs)) { /* an infinity */ + if (decNumberIsNegative(rhs)) /* -Infinity -> error */ + *status|=DEC_Invalid_operation; + else decNumberCopy(res, rhs); /* +Infinity -> self */ + } + else decNaNs(res, rhs, NULL, set, status); /* a NaN */ + break;} + + if (ISZERO(rhs)) { /* +/- zeros -> -Infinity */ + decNumberZero(res); /* make clean */ + res->bits=DECINF|DECNEG; /* set - infinity */ + break;} /* [no status to set] */ + + /* Non-zero negatives are bad... */ + if (decNumberIsNegative(rhs)) { /* -x -> error */ + *status|=DEC_Invalid_operation; + break;} + + /* Here, rhs is positive, finite, and in range */ + + /* lookaside fastpath code for ln(2) and ln(10) at common lengths */ + if (rhs->exponent==0 && set->digits<=40) { + #if DECDPUN==1 + if (rhs->lsu[0]==0 && rhs->lsu[1]==1 && rhs->digits==2) { /* ln(10) */ + #else + if (rhs->lsu[0]==10 && rhs->digits==2) { /* ln(10) */ + #endif + aset=*set; aset.round=DEC_ROUND_HALF_EVEN; + #define LN10 "2.302585092994045684017991454684364207601" + decNumberFromString(res, LN10, &aset); + *status|=(DEC_Inexact | DEC_Rounded); /* is inexact */ + break;} + if (rhs->lsu[0]==2 && rhs->digits==1) { /* ln(2) */ + aset=*set; aset.round=DEC_ROUND_HALF_EVEN; + #define LN2 "0.6931471805599453094172321214581765680755" + decNumberFromString(res, LN2, &aset); + *status|=(DEC_Inexact | DEC_Rounded); + break;} + } /* integer and short */ + + /* Determine the working precision. This is normally the */ + /* requested precision + 2, with a minimum of 9. However, if */ + /* the rhs is 'over-precise' then allow for all its digits to */ + /* potentially participate (consider an rhs where all the excess */ + /* digits are 9s) so in this case use rhs->digits+2. */ + p=MAXI(rhs->digits, MAXI(set->digits, 7))+2; + + /* Allocate space for the accumulator and the high-precision */ + /* adjustment calculator, if necessary. The accumulator must */ + /* be able to hold p digits, and the adjustment up to */ + /* rhs->digits+p digits. They are also made big enough for 16 */ + /* digits so that they can be used for calculating the initial */ + /* estimate. */ + needbytes=sizeof(decNumber)+(D2U(MAXI(p,16))-1)*sizeof(Unit); + if (needbytes>sizeof(bufa)) { /* need malloc space */ + allocbufa=(decNumber *)malloc(needbytes); + if (allocbufa==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + a=allocbufa; /* use the allocated space */ + } + pp=p+rhs->digits; + needbytes=sizeof(decNumber)+(D2U(MAXI(pp,16))-1)*sizeof(Unit); + if (needbytes>sizeof(bufb)) { /* need malloc space */ + allocbufb=(decNumber *)malloc(needbytes); + if (allocbufb==NULL) { /* hopeless -- abandon */ + *status|=DEC_Insufficient_storage; + break;} + b=allocbufb; /* use the allocated space */ + } + + /* Prepare an initial estimate in acc. Calculate this by */ + /* considering the coefficient of x to be a normalized fraction, */ + /* f, with the decimal point at far left and multiplied by */ + /* 10**r. Then, rhs=f*10**r and 0.1<=f<1, and */ + /* ln(x) = ln(f) + ln(10)*r */ + /* Get the initial estimate for ln(f) from a small lookup */ + /* table (see above) indexed by the first two digits of f, */ + /* truncated. */ + + decContextDefault(&aset, DEC_INIT_DECIMAL64); /* 16-digit extended */ + r=rhs->exponent+rhs->digits; /* 'normalised' exponent */ + decNumberFromInt32(a, r); /* a=r */ + decNumberFromInt32(b, 2302585); /* b=ln(10) (2.302585) */ + b->exponent=-6; /* .. */ + decMultiplyOp(a, a, b, &aset, &ignore); /* a=a*b */ + /* now get top two digits of rhs into b by simple truncate and */ + /* force to integer */ + residue=0; /* (no residue) */ + aset.digits=2; aset.round=DEC_ROUND_DOWN; + decCopyFit(b, rhs, &aset, &residue, &ignore); /* copy & shorten */ + b->exponent=0; /* make integer */ + t=decGetInt(b); /* [cannot fail] */ + if (t<10) t=X10(t); /* adjust single-digit b */ + t=LNnn[t-10]; /* look up ln(b) */ + decNumberFromInt32(b, t>>2); /* b=ln(b) coefficient */ + b->exponent=-(t&3)-3; /* set exponent */ + b->bits=DECNEG; /* ln(0.10)->ln(0.99) always -ve */ + aset.digits=16; aset.round=DEC_ROUND_HALF_EVEN; /* restore */ + decAddOp(a, a, b, &aset, 0, &ignore); /* acc=a+b */ + /* the initial estimate is now in a, with up to 4 digits correct. */ + /* When rhs is at or near Nmax the estimate will be low, so we */ + /* will approach it from below, avoiding overflow when calling exp. */ + + decNumberZero(&numone); *numone.lsu=1; /* constant 1 for adjustment */ + + /* accumulator bounds are as requested (could underflow, but */ + /* cannot overflow) */ + aset.emax=set->emax; + aset.emin=set->emin; + aset.clamp=0; /* no concrete format */ + /* set up a context to be used for the multiply and subtract */ + bset=aset; + bset.emax=DEC_MAX_MATH*2; /* use double bounds for the */ + bset.emin=-DEC_MAX_MATH*2; /* adjustment calculation */ + /* [see decExpOp call below] */ + /* for each iteration double the number of digits to calculate, */ + /* up to a maximum of p */ + pp=9; /* initial precision */ + /* [initially 9 as then the sequence starts 7+2, 16+2, and */ + /* 34+2, which is ideal for standard-sized numbers] */ + aset.digits=pp; /* working context */ + bset.digits=pp+rhs->digits; /* wider context */ + for (;;) { /* iterate */ + #if DECCHECK + iterations++; + if (iterations>24) break; /* consider 9 * 2**24 */ + #endif + /* calculate the adjustment (exp(-a)*x-1) into b. This is a */ + /* catastrophic subtraction but it really is the difference */ + /* from 1 that is of interest. */ + /* Use the internal entry point to Exp as it allows the double */ + /* range for calculating exp(-a) when a is the tiniest subnormal. */ + a->bits^=DECNEG; /* make -a */ + decExpOp(b, a, &bset, &ignore); /* b=exp(-a) */ + a->bits^=DECNEG; /* restore sign of a */ + /* now multiply by rhs and subtract 1, at the wider precision */ + decMultiplyOp(b, b, rhs, &bset, &ignore); /* b=b*rhs */ + decAddOp(b, b, &numone, &bset, DECNEG, &ignore); /* b=b-1 */ + + /* the iteration ends when the adjustment cannot affect the */ + /* result by >=0.5 ulp (at the requested digits), which */ + /* is when its value is smaller than the accumulator by */ + /* set->digits+1 digits (or it is zero) -- this is a looser */ + /* requirement than for Exp because all that happens to the */ + /* accumulator after this is the final rounding (but note that */ + /* there must also be full precision in a, or a=0). */ + + if (decNumberIsZero(b) || + (a->digits+a->exponent)>=(b->digits+b->exponent+set->digits+1)) { + if (a->digits==p) break; + if (decNumberIsZero(a)) { + decCompareOp(&cmp, rhs, &numone, &aset, COMPARE, &ignore); /* rhs=1 ? */ + if (cmp.lsu[0]==0) a->exponent=0; /* yes, exact 0 */ + else *status|=(DEC_Inexact | DEC_Rounded); /* no, inexact */ + break; + } + /* force padding if adjustment has gone to 0 before full length */ + if (decNumberIsZero(b)) b->exponent=a->exponent-p; + } + + /* not done yet ... */ + decAddOp(a, a, b, &aset, 0, &ignore); /* a=a+b for next estimate */ + if (pp==p) continue; /* precision is at maximum */ + /* lengthen the next calculation */ + pp=pp*2; /* double precision */ + if (pp>p) pp=p; /* clamp to maximum */ + aset.digits=pp; /* working context */ + bset.digits=pp+rhs->digits; /* wider context */ + } /* Newton's iteration */ + + #if DECCHECK + /* just a sanity check; remove the test to show always */ + if (iterations>24) + printf("Ln iterations=%ld, status=%08lx, p=%ld, d=%ld\n", + iterations, *status, p, rhs->digits); + #endif + + /* Copy and round the result to res */ + residue=1; /* indicate dirt to right */ + if (ISZERO(a)) residue=0; /* .. unless underflowed to 0 */ + aset.digits=set->digits; /* [use default rounding] */ + decCopyFit(res, a, &aset, &residue, status); /* copy & shorten */ + decFinish(res, set, &residue, status); /* cleanup/set flags */ + } while(0); /* end protected */ + + if (allocbufa!=NULL) free(allocbufa); /* drop any storage used */ + if (allocbufb!=NULL) free(allocbufb); /* .. */ + /* [status is handled by caller] */ + return res; + } /* decLnOp */ + +/* ------------------------------------------------------------------ */ +/* decQuantizeOp -- force exponent to requested value */ +/* */ +/* This computes C = op(A, B), where op adjusts the coefficient */ +/* of C (by rounding or shifting) such that the exponent (-scale) */ +/* of C has the value B or matches the exponent of B. */ +/* The numerical value of C will equal A, except for the effects of */ +/* any rounding that occurred. */ +/* */ +/* res is C, the result. C may be A or B */ +/* lhs is A, the number to adjust */ +/* rhs is B, the requested exponent */ +/* set is the context */ +/* quant is 1 for quantize or 0 for rescale */ +/* status is the status accumulator (this can be called without */ +/* risk of control loss) */ +/* */ +/* C must have space for set->digits digits. */ +/* */ +/* Unless there is an error or the result is infinite, the exponent */ +/* after the operation is guaranteed to be that requested. */ +/* ------------------------------------------------------------------ */ +static decNumber * decQuantizeOp(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set, + Flag quant, uInt *status) { + #if DECSUBSET + decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ + decNumber *allocrhs=NULL; /* .., rhs */ + #endif + const decNumber *inrhs=rhs; /* save original rhs */ + Int reqdigits=set->digits; /* requested DIGITS */ + Int reqexp; /* requested exponent [-scale] */ + Int residue=0; /* rounding residue */ + Int etiny=set->emin-(reqdigits-1); + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { + /* reduce operands and set lostDigits status, as needed */ + if (lhs->digits>reqdigits) { + alloclhs=decRoundOperand(lhs, set, status); + if (alloclhs==NULL) break; + lhs=alloclhs; + } + if (rhs->digits>reqdigits) { /* [this only checks lostDigits] */ + allocrhs=decRoundOperand(rhs, set, status); + if (allocrhs==NULL) break; + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + /* Handle special values */ + if (SPECIALARGS) { + /* NaNs get usual processing */ + if (SPECIALARGS & (DECSNAN | DECNAN)) + decNaNs(res, lhs, rhs, set, status); + /* one infinity but not both is bad */ + else if ((lhs->bits ^ rhs->bits) & DECINF) + *status|=DEC_Invalid_operation; + /* both infinity: return lhs */ + else decNumberCopy(res, lhs); /* [nop if in place] */ + break; + } + + /* set requested exponent */ + if (quant) reqexp=inrhs->exponent; /* quantize -- match exponents */ + else { /* rescale -- use value of rhs */ + /* Original rhs must be an integer that fits and is in range, */ + /* which could be from -1999999997 to +999999999, thanks to */ + /* subnormals */ + reqexp=decGetInt(inrhs); /* [cannot fail] */ + } + + #if DECSUBSET + if (!set->extended) etiny=set->emin; /* no subnormals */ + #endif + + if (reqexp==BADINT /* bad (rescale only) or .. */ + || reqexp==BIGODD || reqexp==BIGEVEN /* very big (ditto) or .. */ + || (reqexpset->emax)) { /* > emax */ + *status|=DEC_Invalid_operation; + break;} + + /* the RHS has been processed, so it can be overwritten now if necessary */ + if (ISZERO(lhs)) { /* zero coefficient unchanged */ + decNumberCopy(res, lhs); /* [nop if in place] */ + res->exponent=reqexp; /* .. just set exponent */ + #if DECSUBSET + if (!set->extended) res->bits=0; /* subset specification; no -0 */ + #endif + } + else { /* non-zero lhs */ + Int adjust=reqexp-lhs->exponent; /* digit adjustment needed */ + /* if adjusted coefficient will definitely not fit, give up now */ + if ((lhs->digits-adjust)>reqdigits) { + *status|=DEC_Invalid_operation; + break; + } + + if (adjust>0) { /* increasing exponent */ + /* this will decrease the length of the coefficient by adjust */ + /* digits, and must round as it does so */ + decContext workset; /* work */ + workset=*set; /* clone rounding, etc. */ + workset.digits=lhs->digits-adjust; /* set requested length */ + /* [note that the latter can be <1, here] */ + decCopyFit(res, lhs, &workset, &residue, status); /* fit to result */ + decApplyRound(res, &workset, residue, status); /* .. and round */ + residue=0; /* [used] */ + /* If just rounded a 999s case, exponent will be off by one; */ + /* adjust back (after checking space), if so. */ + if (res->exponent>reqexp) { + /* re-check needed, e.g., for quantize(0.9999, 0.001) under */ + /* set->digits==3 */ + if (res->digits==reqdigits) { /* cannot shift by 1 */ + *status&=~(DEC_Inexact | DEC_Rounded); /* [clean these] */ + *status|=DEC_Invalid_operation; + break; + } + res->digits=decShiftToMost(res->lsu, res->digits, 1); /* shift */ + res->exponent--; /* (re)adjust the exponent. */ + } + #if DECSUBSET + if (ISZERO(res) && !set->extended) res->bits=0; /* subset; no -0 */ + #endif + } /* increase */ + else /* adjust<=0 */ { /* decreasing or = exponent */ + /* this will increase the length of the coefficient by -adjust */ + /* digits, by adding zero or more trailing zeros; this is */ + /* already checked for fit, above */ + decNumberCopy(res, lhs); /* [it will fit] */ + /* if padding needed (adjust<0), add it now... */ + if (adjust<0) { + res->digits=decShiftToMost(res->lsu, res->digits, -adjust); + res->exponent+=adjust; /* adjust the exponent */ + } + } /* decrease */ + } /* non-zero */ + + /* Check for overflow [do not use Finalize in this case, as an */ + /* overflow here is a "don't fit" situation] */ + if (res->exponent>set->emax-res->digits+1) { /* too big */ + *status|=DEC_Invalid_operation; + break; + } + else { + decFinalize(res, set, &residue, status); /* set subnormal flags */ + *status&=~DEC_Underflow; /* suppress Underflow [754r] */ + } + } while(0); /* end protected */ + + #if DECSUBSET + if (allocrhs!=NULL) free(allocrhs); /* drop any storage used */ + if (alloclhs!=NULL) free(alloclhs); /* .. */ + #endif + return res; + } /* decQuantizeOp */ + +/* ------------------------------------------------------------------ */ +/* decCompareOp -- compare, min, or max two Numbers */ +/* */ +/* This computes C = A ? B and carries out one of four operations: */ +/* COMPARE -- returns the signum (as a number) giving the */ +/* result of a comparison unless one or both */ +/* operands is a NaN (in which case a NaN results) */ +/* COMPSIG -- as COMPARE except that a quiet NaN raises */ +/* Invalid operation. */ +/* COMPMAX -- returns the larger of the operands, using the */ +/* 754r maxnum operation */ +/* COMPMAXMAG -- ditto, comparing absolute values */ +/* COMPMIN -- the 754r minnum operation */ +/* COMPMINMAG -- ditto, comparing absolute values */ +/* COMTOTAL -- returns the signum (as a number) giving the */ +/* result of a comparison using 754r total ordering */ +/* */ +/* res is C, the result. C may be A and/or B (e.g., X=X?X) */ +/* lhs is A */ +/* rhs is B */ +/* set is the context */ +/* op is the operation flag */ +/* status is the usual accumulator */ +/* */ +/* C must have space for one digit for COMPARE or set->digits for */ +/* COMPMAX, COMPMIN, COMPMAXMAG, or COMPMINMAG. */ +/* ------------------------------------------------------------------ */ +/* The emphasis here is on speed for common cases, and avoiding */ +/* coefficient comparison if possible. */ +/* ------------------------------------------------------------------ */ +static decNumber *decCompareOp(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set, + Flag op, uInt *status) { + #if DECSUBSET + decNumber *alloclhs=NULL; /* non-NULL if rounded lhs allocated */ + decNumber *allocrhs=NULL; /* .., rhs */ + #endif + Int result=0; /* default result value */ + uByte merged; /* work */ + + #if DECCHECK + if (decCheckOperands(res, lhs, rhs, set)) return res; + #endif + + do { /* protect allocated storage */ + #if DECSUBSET + if (!set->extended) { + /* reduce operands and set lostDigits status, as needed */ + if (lhs->digits>set->digits) { + alloclhs=decRoundOperand(lhs, set, status); + if (alloclhs==NULL) {result=BADINT; break;} + lhs=alloclhs; + } + if (rhs->digits>set->digits) { + allocrhs=decRoundOperand(rhs, set, status); + if (allocrhs==NULL) {result=BADINT; break;} + rhs=allocrhs; + } + } + #endif + /* [following code does not require input rounding] */ + + /* If total ordering then handle differing signs 'up front' */ + if (op==COMPTOTAL) { /* total ordering */ + if (decNumberIsNegative(lhs) && !decNumberIsNegative(rhs)) { + result=-1; + break; + } + if (!decNumberIsNegative(lhs) && decNumberIsNegative(rhs)) { + result=+1; + break; + } + } + + /* handle NaNs specially; let infinities drop through */ + /* This assumes sNaN (even just one) leads to NaN. */ + merged=(lhs->bits | rhs->bits) & (DECSNAN | DECNAN); + if (merged) { /* a NaN bit set */ + if (op==COMPARE); /* result will be NaN */ + else if (op==COMPSIG) /* treat qNaN as sNaN */ + *status|=DEC_Invalid_operation | DEC_sNaN; + else if (op==COMPTOTAL) { /* total ordering, always finite */ + /* signs are known to be the same; compute the ordering here */ + /* as if the signs are both positive, then invert for negatives */ + if (!decNumberIsNaN(lhs)) result=-1; + else if (!decNumberIsNaN(rhs)) result=+1; + /* here if both NaNs */ + else if (decNumberIsSNaN(lhs) && decNumberIsQNaN(rhs)) result=-1; + else if (decNumberIsQNaN(lhs) && decNumberIsSNaN(rhs)) result=+1; + else { /* both NaN or both sNaN */ + /* now it just depends on the payload */ + result=decUnitCompare(lhs->lsu, D2U(lhs->digits), + rhs->lsu, D2U(rhs->digits), 0); + /* [Error not possible, as these are 'aligned'] */ + } /* both same NaNs */ + if (decNumberIsNegative(lhs)) result=-result; + break; + } /* total order */ + + else if (merged & DECSNAN); /* sNaN -> qNaN */ + else { /* here if MIN or MAX and one or two quiet NaNs */ + /* min or max -- 754r rules ignore single NaN */ + if (!decNumberIsNaN(lhs) || !decNumberIsNaN(rhs)) { + /* just one NaN; force choice to be the non-NaN operand */ + op=COMPMAX; + if (lhs->bits & DECNAN) result=-1; /* pick rhs */ + else result=+1; /* pick lhs */ + break; + } + } /* max or min */ + op=COMPNAN; /* use special path */ + decNaNs(res, lhs, rhs, set, status); /* propagate NaN */ + break; + } + /* have numbers */ + if (op==COMPMAXMAG || op==COMPMINMAG) result=decCompare(lhs, rhs, 1); + else result=decCompare(lhs, rhs, 0); /* sign matters */ + } while(0); /* end protected */ + + if (result==BADINT) *status|=DEC_Insufficient_storage; /* rare */ + else { + if (op==COMPARE || op==COMPSIG ||op==COMPTOTAL) { /* returning signum */ + if (op==COMPTOTAL && result==0) { + /* operands are numerically equal or same NaN (and same sign, */ + /* tested first); if identical, leave result 0 */ + if (lhs->exponent!=rhs->exponent) { + if (lhs->exponentexponent) result=-1; + else result=+1; + if (decNumberIsNegative(lhs)) result=-result; + } /* lexp!=rexp */ + } /* total-order by exponent */ + decNumberZero(res); /* [always a valid result] */ + if (result!=0) { /* must be -1 or +1 */ + *res->lsu=1; + if (result<0) res->bits=DECNEG; + } + } + else if (op==COMPNAN); /* special, drop through */ + else { /* MAX or MIN, non-NaN result */ + Int residue=0; /* rounding accumulator */ + /* choose the operand for the result */ + const decNumber *choice; + if (result==0) { /* operands are numerically equal */ + /* choose according to sign then exponent (see 754r) */ + uByte slhs=(lhs->bits & DECNEG); + uByte srhs=(rhs->bits & DECNEG); + #if DECSUBSET + if (!set->extended) { /* subset: force left-hand */ + op=COMPMAX; + result=+1; + } + else + #endif + if (slhs!=srhs) { /* signs differ */ + if (slhs) result=-1; /* rhs is max */ + else result=+1; /* lhs is max */ + } + else if (slhs && srhs) { /* both negative */ + if (lhs->exponentexponent) result=+1; + else result=-1; + /* [if equal, use lhs, technically identical] */ + } + else { /* both positive */ + if (lhs->exponent>rhs->exponent) result=+1; + else result=-1; + /* [ditto] */ + } + } /* numerically equal */ + /* here result will be non-0; reverse if looking for MIN */ + if (op==COMPMIN || op==COMPMINMAG) result=-result; + choice=(result>0 ? lhs : rhs); /* choose */ + /* copy chosen to result, rounding if need be */ + decCopyFit(res, choice, set, &residue, status); + decFinish(res, set, &residue, status); + } + } + #if DECSUBSET + if (allocrhs!=NULL) free(allocrhs); /* free any storage used */ + if (alloclhs!=NULL) free(alloclhs); /* .. */ + #endif + return res; + } /* decCompareOp */ + +/* ------------------------------------------------------------------ */ +/* decCompare -- compare two decNumbers by numerical value */ +/* */ +/* This routine compares A ? B without altering them. */ +/* */ +/* Arg1 is A, a decNumber which is not a NaN */ +/* Arg2 is B, a decNumber which is not a NaN */ +/* Arg3 is 1 for a sign-independent compare, 0 otherwise */ +/* */ +/* returns -1, 0, or 1 for AB, or BADINT if failure */ +/* (the only possible failure is an allocation error) */ +/* ------------------------------------------------------------------ */ +static Int decCompare(const decNumber *lhs, const decNumber *rhs, + Flag abs) { + Int result; /* result value */ + Int sigr; /* rhs signum */ + Int compare; /* work */ + + result=1; /* assume signum(lhs) */ + if (ISZERO(lhs)) result=0; + if (abs) { + if (ISZERO(rhs)) return result; /* LHS wins or both 0 */ + /* RHS is non-zero */ + if (result==0) return -1; /* LHS is 0; RHS wins */ + /* [here, both non-zero, result=1] */ + } + else { /* signs matter */ + if (result && decNumberIsNegative(lhs)) result=-1; + sigr=1; /* compute signum(rhs) */ + if (ISZERO(rhs)) sigr=0; + else if (decNumberIsNegative(rhs)) sigr=-1; + if (result > sigr) return +1; /* L > R, return 1 */ + if (result < sigr) return -1; /* L < R, return -1 */ + if (result==0) return 0; /* both 0 */ + } + + /* signums are the same; both are non-zero */ + if ((lhs->bits | rhs->bits) & DECINF) { /* one or more infinities */ + if (decNumberIsInfinite(rhs)) { + if (decNumberIsInfinite(lhs)) result=0;/* both infinite */ + else result=-result; /* only rhs infinite */ + } + return result; + } + /* must compare the coefficients, allowing for exponents */ + if (lhs->exponent>rhs->exponent) { /* LHS exponent larger */ + /* swap sides, and sign */ + const decNumber *temp=lhs; + lhs=rhs; + rhs=temp; + result=-result; + } + compare=decUnitCompare(lhs->lsu, D2U(lhs->digits), + rhs->lsu, D2U(rhs->digits), + rhs->exponent-lhs->exponent); + if (compare!=BADINT) compare*=result; /* comparison succeeded */ + return compare; + } /* decCompare */ + +/* ------------------------------------------------------------------ */ +/* decUnitCompare -- compare two >=0 integers in Unit arrays */ +/* */ +/* This routine compares A ? B*10**E where A and B are unit arrays */ +/* A is a plain integer */ +/* B has an exponent of E (which must be non-negative) */ +/* */ +/* Arg1 is A first Unit (lsu) */ +/* Arg2 is A length in Units */ +/* Arg3 is B first Unit (lsu) */ +/* Arg4 is B length in Units */ +/* Arg5 is E (0 if the units are aligned) */ +/* */ +/* returns -1, 0, or 1 for AB, or BADINT if failure */ +/* (the only possible failure is an allocation error, which can */ +/* only occur if E!=0) */ +/* ------------------------------------------------------------------ */ +static Int decUnitCompare(const Unit *a, Int alength, + const Unit *b, Int blength, Int exp) { + Unit *acc; /* accumulator for result */ + Unit accbuff[SD2U(DECBUFFER*2+1)]; /* local buffer */ + Unit *allocacc=NULL; /* -> allocated acc buffer, iff allocated */ + Int accunits, need; /* units in use or needed for acc */ + const Unit *l, *r, *u; /* work */ + Int expunits, exprem, result; /* .. */ + + if (exp==0) { /* aligned; fastpath */ + if (alength>blength) return 1; + if (alength=a; l--, r--) { + if (*l>*r) return 1; + if (*l<*r) return -1; + } + return 0; /* all units match */ + } /* aligned */ + + /* Unaligned. If one is >1 unit longer than the other, padded */ + /* approximately, then can return easily */ + if (alength>blength+(Int)D2U(exp)) return 1; + if (alength+1sizeof(accbuff)) { + allocacc=(Unit *)malloc(need*sizeof(Unit)); + if (allocacc==NULL) return BADINT; /* hopeless -- abandon */ + acc=allocacc; + } + /* Calculate units and remainder from exponent. */ + expunits=exp/DECDPUN; + exprem=exp%DECDPUN; + /* subtract [A+B*(-m)] */ + accunits=decUnitAddSub(a, alength, b, blength, expunits, acc, + -(Int)powers[exprem]); + /* [UnitAddSub result may have leading zeros, even on zero] */ + if (accunits<0) result=-1; /* negative result */ + else { /* non-negative result */ + /* check units of the result before freeing any storage */ + for (u=acc; u=0 integers in Unit arrays */ +/* */ +/* This routine performs the calculation: */ +/* */ +/* C=A+(B*M) */ +/* */ +/* Where M is in the range -DECDPUNMAX through +DECDPUNMAX. */ +/* */ +/* A may be shorter or longer than B. */ +/* */ +/* Leading zeros are not removed after a calculation. The result is */ +/* either the same length as the longer of A and B (adding any */ +/* shift), or one Unit longer than that (if a Unit carry occurred). */ +/* */ +/* A and B content are not altered unless C is also A or B. */ +/* C may be the same array as A or B, but only if no zero padding is */ +/* requested (that is, C may be B only if bshift==0). */ +/* C is filled from the lsu; only those units necessary to complete */ +/* the calculation are referenced. */ +/* */ +/* Arg1 is A first Unit (lsu) */ +/* Arg2 is A length in Units */ +/* Arg3 is B first Unit (lsu) */ +/* Arg4 is B length in Units */ +/* Arg5 is B shift in Units (>=0; pads with 0 units if positive) */ +/* Arg6 is C first Unit (lsu) */ +/* Arg7 is M, the multiplier */ +/* */ +/* returns the count of Units written to C, which will be non-zero */ +/* and negated if the result is negative. That is, the sign of the */ +/* returned Int is the sign of the result (positive for zero) and */ +/* the absolute value of the Int is the count of Units. */ +/* */ +/* It is the caller's responsibility to make sure that C size is */ +/* safe, allowing space if necessary for a one-Unit carry. */ +/* */ +/* This routine is severely performance-critical; *any* change here */ +/* must be measured (timed) to assure no performance degradation. */ +/* In particular, trickery here tends to be counter-productive, as */ +/* increased complexity of code hurts register optimizations on */ +/* register-poor architectures. Avoiding divisions is nearly */ +/* always a Good Idea, however. */ +/* */ +/* Special thanks to Rick McGuire (IBM Cambridge, MA) and Dave Clark */ +/* (IBM Warwick, UK) for some of the ideas used in this routine. */ +/* ------------------------------------------------------------------ */ +static Int decUnitAddSub(const Unit *a, Int alength, + const Unit *b, Int blength, Int bshift, + Unit *c, Int m) { + const Unit *alsu=a; /* A lsu [need to remember it] */ + Unit *clsu=c; /* C ditto */ + Unit *minC; /* low water mark for C */ + Unit *maxC; /* high water mark for C */ + eInt carry=0; /* carry integer (could be Long) */ + Int add; /* work */ + #if DECDPUN<=4 /* myriadal, millenary, etc. */ + Int est; /* estimated quotient */ + #endif + + #if DECTRACE + if (alength<1 || blength<1) + printf("decUnitAddSub: alen blen m %ld %ld [%ld]\n", alength, blength, m); + #endif + + maxC=c+alength; /* A is usually the longer */ + minC=c+blength; /* .. and B the shorter */ + if (bshift!=0) { /* B is shifted; low As copy across */ + minC+=bshift; + /* if in place [common], skip copy unless there's a gap [rare] */ + if (a==c && bshift<=alength) { + c+=bshift; + a+=bshift; + } + else for (; cmaxC) { /* swap */ + Unit *hold=minC; + minC=maxC; + maxC=hold; + } + + /* For speed, do the addition as two loops; the first where both A */ + /* and B contribute, and the second (if necessary) where only one or */ + /* other of the numbers contribute. */ + /* Carry handling is the same (i.e., duplicated) in each case. */ + for (; c=0) { + est=(((ueInt)carry>>11)*53687)>>18; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ + carry=est; /* likely quotient [89%] */ + if (*c>11)*53687)>>18; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); + carry=est-(DECDPUNMAX+1); /* correctly negative */ + if (*c=0) { + est=(((ueInt)carry>>3)*16777)>>21; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ + carry=est; /* likely quotient [99%] */ + if (*c>3)*16777)>>21; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); + carry=est-(DECDPUNMAX+1); /* correctly negative */ + if (*c=0) { + est=QUOT10(carry, DECDPUN); + *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ + carry=est; /* quotient */ + continue; + } + /* negative case */ + carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ + est=QUOT10(carry, DECDPUN); + *c=(Unit)(carry-est*(DECDPUNMAX+1)); + carry=est-(DECDPUNMAX+1); /* correctly negative */ + #else + /* remainder operator is undefined if negative, so must test */ + if ((ueInt)carry<(DECDPUNMAX+1)*2) { /* fastpath carry +1 */ + *c=(Unit)(carry-(DECDPUNMAX+1)); /* [helps additions] */ + carry=1; + continue; + } + if (carry>=0) { + *c=(Unit)(carry%(DECDPUNMAX+1)); + carry=carry/(DECDPUNMAX+1); + continue; + } + /* negative case */ + carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ + *c=(Unit)(carry%(DECDPUNMAX+1)); + carry=carry/(DECDPUNMAX+1)-(DECDPUNMAX+1); + #endif + } /* c */ + + /* now may have one or other to complete */ + /* [pretest to avoid loop setup/shutdown] */ + if (cDECDPUNMAX */ + #if DECDPUN==4 /* use divide-by-multiply */ + if (carry>=0) { + est=(((ueInt)carry>>11)*53687)>>18; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ + carry=est; /* likely quotient [79.7%] */ + if (*c>11)*53687)>>18; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); + carry=est-(DECDPUNMAX+1); /* correctly negative */ + if (*c=0) { + est=(((ueInt)carry>>3)*16777)>>21; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ + carry=est; /* likely quotient [99%] */ + if (*c>3)*16777)>>21; + *c=(Unit)(carry-est*(DECDPUNMAX+1)); + carry=est-(DECDPUNMAX+1); /* correctly negative */ + if (*c=0) { + est=QUOT10(carry, DECDPUN); + *c=(Unit)(carry-est*(DECDPUNMAX+1)); /* remainder */ + carry=est; /* quotient */ + continue; + } + /* negative case */ + carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ + est=QUOT10(carry, DECDPUN); + *c=(Unit)(carry-est*(DECDPUNMAX+1)); + carry=est-(DECDPUNMAX+1); /* correctly negative */ + #else + if ((ueInt)carry<(DECDPUNMAX+1)*2){ /* fastpath carry 1 */ + *c=(Unit)(carry-(DECDPUNMAX+1)); + carry=1; + continue; + } + /* remainder operator is undefined if negative, so must test */ + if (carry>=0) { + *c=(Unit)(carry%(DECDPUNMAX+1)); + carry=carry/(DECDPUNMAX+1); + continue; + } + /* negative case */ + carry=carry+(eInt)(DECDPUNMAX+1)*(DECDPUNMAX+1); /* make positive */ + *c=(Unit)(carry%(DECDPUNMAX+1)); + carry=carry/(DECDPUNMAX+1)-(DECDPUNMAX+1); + #endif + } /* c */ + + /* OK, all A and B processed; might still have carry or borrow */ + /* return number of Units in the result, negated if a borrow */ + if (carry==0) return c-clsu; /* no carry, so no more to do */ + if (carry>0) { /* positive carry */ + *c=(Unit)carry; /* place as new unit */ + c++; /* .. */ + return c-clsu; + } + /* -ve carry: it's a borrow; complement needed */ + add=1; /* temporary carry... */ + for (c=clsu; c current Unit */ + + #if DECCHECK + if (decCheckOperands(dn, DECUNUSED, DECUNUSED, DECUNCONT)) return dn; + #endif + + *dropped=0; /* assume no zeros dropped */ + if ((dn->bits & DECSPECIAL) /* fast exit if special .. */ + || (*dn->lsu & 0x01)) return dn; /* .. or odd */ + if (ISZERO(dn)) { /* .. or 0 */ + dn->exponent=0; /* (sign is preserved) */ + return dn; + } + + /* have a finite number which is even */ + exp=dn->exponent; + cut=1; /* digit (1-DECDPUN) in Unit */ + up=dn->lsu; /* -> current Unit */ + for (d=0; ddigits-1; d++) { /* [don't strip the final digit] */ + /* slice by powers */ + #if DECDPUN<=4 + uInt quot=QUOT10(*up, cut); + if ((*up-quot*powers[cut])!=0) break; /* found non-0 digit */ + #else + if (*up%powers[cut]!=0) break; /* found non-0 digit */ + #endif + /* have a trailing 0 */ + if (!all) { /* trimming */ + /* [if exp>0 then all trailing 0s are significant for trim] */ + if (exp<=0) { /* if digit might be significant */ + if (exp==0) break; /* then quit */ + exp++; /* next digit might be significant */ + } + } + cut++; /* next power */ + if (cut>DECDPUN) { /* need new Unit */ + up++; + cut=1; + } + } /* d */ + if (d==0) return dn; /* none to drop */ + + /* may need to limit drop if clamping */ + if (set->clamp) { + Int maxd=set->emax-set->digits+1-dn->exponent; + if (maxd<=0) return dn; /* nothing possible */ + if (d>maxd) d=maxd; + } + + /* effect the drop */ + decShiftToLeast(dn->lsu, D2U(dn->digits), d); + dn->exponent+=d; /* maintain numerical value */ + dn->digits-=d; /* new length */ + *dropped=d; /* report the count */ + return dn; + } /* decTrim */ + +/* ------------------------------------------------------------------ */ +/* decReverse -- reverse a Unit array in place */ +/* */ +/* ulo is the start of the array */ +/* uhi is the end of the array (highest Unit to include) */ +/* */ +/* The units ulo through uhi are reversed in place (if the number */ +/* of units is odd, the middle one is untouched). Note that the */ +/* digit(s) in each unit are unaffected. */ +/* ------------------------------------------------------------------ */ +static void decReverse(Unit *ulo, Unit *uhi) { + Unit temp; + for (; ulo=uar; source--, target--) *target=*source; + } + else { + first=uar+D2U(digits+shift)-1; /* where msu of source will end up */ + for (; source>=uar; source--, target--) { + /* split the source Unit and accumulate remainder for next */ + #if DECDPUN<=4 + uInt quot=QUOT10(*source, cut); + uInt rem=*source-quot*powers[cut]; + next+=quot; + #else + uInt rem=*source%powers[cut]; + next+=*source/powers[cut]; + #endif + if (target<=first) *target=(Unit)next; /* write to target iff valid */ + next=rem*powers[DECDPUN-cut]; /* save remainder for next Unit */ + } + } /* shift-move */ + + /* propagate any partial unit to one below and clear the rest */ + for (; target>=uar; target--) { + *target=(Unit)next; + next=0; + } + return digits+shift; + } /* decShiftToMost */ + +/* ------------------------------------------------------------------ */ +/* decShiftToLeast -- shift digits in array towards least significant */ +/* */ +/* uar is the array */ +/* units is length of the array, in units */ +/* shift is the number of digits to remove from the lsu end; it */ +/* must be zero or positive and <= than units*DECDPUN. */ +/* */ +/* returns the new length of the integer in the array, in units */ +/* */ +/* Removed digits are discarded (lost). Units not required to hold */ +/* the final result are unchanged. */ +/* ------------------------------------------------------------------ */ +static Int decShiftToLeast(Unit *uar, Int units, Int shift) { + Unit *target, *up; /* work */ + Int cut, count; /* work */ + Int quot, rem; /* for division */ + + if (shift==0) return units; /* [fastpath] nothing to do */ + if (shift==units*DECDPUN) { /* [fastpath] little to do */ + *uar=0; /* all digits cleared gives zero */ + return 1; /* leaves just the one */ + } + + target=uar; /* both paths */ + cut=MSUDIGITS(shift); + if (cut==DECDPUN) { /* unit-boundary case; easy */ + up=uar+D2U(shift); + for (; updigits is > set->digits) */ +/* set is the relevant context */ +/* status is the status accumulator */ +/* */ +/* returns an allocated decNumber with the rounded result. */ +/* */ +/* lostDigits and other status may be set by this. */ +/* */ +/* Since the input is an operand, it must not be modified. */ +/* Instead, return an allocated decNumber, rounded as required. */ +/* It is the caller's responsibility to free the allocated storage. */ +/* */ +/* If no storage is available then the result cannot be used, so NULL */ +/* is returned. */ +/* ------------------------------------------------------------------ */ +static decNumber *decRoundOperand(const decNumber *dn, decContext *set, + uInt *status) { + decNumber *res; /* result structure */ + uInt newstatus=0; /* status from round */ + Int residue=0; /* rounding accumulator */ + + /* Allocate storage for the returned decNumber, big enough for the */ + /* length specified by the context */ + res=(decNumber *)malloc(sizeof(decNumber) + +(D2U(set->digits)-1)*sizeof(Unit)); + if (res==NULL) { + *status|=DEC_Insufficient_storage; + return NULL; + } + decCopyFit(res, dn, set, &residue, &newstatus); + decApplyRound(res, set, residue, &newstatus); + + /* If that set Inexact then "lost digits" is raised... */ + if (newstatus & DEC_Inexact) newstatus|=DEC_Lost_digits; + *status|=newstatus; + return res; + } /* decRoundOperand */ +#endif + +/* ------------------------------------------------------------------ */ +/* decCopyFit -- copy a number, truncating the coefficient if needed */ +/* */ +/* dest is the target decNumber */ +/* src is the source decNumber */ +/* set is the context [used for length (digits) and rounding mode] */ +/* residue is the residue accumulator */ +/* status contains the current status to be updated */ +/* */ +/* (dest==src is allowed and will be a no-op if fits) */ +/* All fields are updated as required. */ +/* ------------------------------------------------------------------ */ +static void decCopyFit(decNumber *dest, const decNumber *src, + decContext *set, Int *residue, uInt *status) { + dest->bits=src->bits; + dest->exponent=src->exponent; + decSetCoeff(dest, set, src->lsu, src->digits, residue, status); + } /* decCopyFit */ + +/* ------------------------------------------------------------------ */ +/* decSetCoeff -- set the coefficient of a number */ +/* */ +/* dn is the number whose coefficient array is to be set. */ +/* It must have space for set->digits digits */ +/* set is the context [for size] */ +/* lsu -> lsu of the source coefficient [may be dn->lsu] */ +/* len is digits in the source coefficient [may be dn->digits] */ +/* residue is the residue accumulator. This has values as in */ +/* decApplyRound, and will be unchanged unless the */ +/* target size is less than len. In this case, the */ +/* coefficient is truncated and the residue is updated to */ +/* reflect the previous residue and the dropped digits. */ +/* status is the status accumulator, as usual */ +/* */ +/* The coefficient may already be in the number, or it can be an */ +/* external intermediate array. If it is in the number, lsu must == */ +/* dn->lsu and len must == dn->digits. */ +/* */ +/* Note that the coefficient length (len) may be < set->digits, and */ +/* in this case this merely copies the coefficient (or is a no-op */ +/* if dn->lsu==lsu). */ +/* */ +/* Note also that (only internally, from decQuantizeOp and */ +/* decSetSubnormal) the value of set->digits may be less than one, */ +/* indicating a round to left. This routine handles that case */ +/* correctly; caller ensures space. */ +/* */ +/* dn->digits, dn->lsu (and as required), and dn->exponent are */ +/* updated as necessary. dn->bits (sign) is unchanged. */ +/* */ +/* DEC_Rounded status is set if any digits are discarded. */ +/* DEC_Inexact status is set if any non-zero digits are discarded, or */ +/* incoming residue was non-0 (implies rounded) */ +/* ------------------------------------------------------------------ */ +/* mapping array: maps 0-9 to canonical residues, so that a residue */ +/* can be adjusted in the range [-1, +1] and achieve correct rounding */ +/* 0 1 2 3 4 5 6 7 8 9 */ +static const uByte resmap[10]={0, 3, 3, 3, 3, 5, 7, 7, 7, 7}; +static void decSetCoeff(decNumber *dn, decContext *set, const Unit *lsu, + Int len, Int *residue, uInt *status) { + Int discard; /* number of digits to discard */ + uInt cut; /* cut point in Unit */ + const Unit *up; /* work */ + Unit *target; /* .. */ + Int count; /* .. */ + #if DECDPUN<=4 + uInt temp; /* .. */ + #endif + + discard=len-set->digits; /* digits to discard */ + if (discard<=0) { /* no digits are being discarded */ + if (dn->lsu!=lsu) { /* copy needed */ + /* copy the coefficient array to the result number; no shift needed */ + count=len; /* avoids D2U */ + up=lsu; + for (target=dn->lsu; count>0; target++, up++, count-=DECDPUN) + *target=*up; + dn->digits=len; /* set the new length */ + } + /* dn->exponent and residue are unchanged, record any inexactitude */ + if (*residue!=0) *status|=(DEC_Inexact | DEC_Rounded); + return; + } + + /* some digits must be discarded ... */ + dn->exponent+=discard; /* maintain numerical value */ + *status|=DEC_Rounded; /* accumulate Rounded status */ + if (*residue>1) *residue=1; /* previous residue now to right, so reduce */ + + if (discard>len) { /* everything, +1, is being discarded */ + /* guard digit is 0 */ + /* residue is all the number [NB could be all 0s] */ + if (*residue<=0) { /* not already positive */ + count=len; /* avoids D2U */ + for (up=lsu; count>0; up++, count-=DECDPUN) if (*up!=0) { /* found non-0 */ + *residue=1; + break; /* no need to check any others */ + } + } + if (*residue!=0) *status|=DEC_Inexact; /* record inexactitude */ + *dn->lsu=0; /* coefficient will now be 0 */ + dn->digits=1; /* .. */ + return; + } /* total discard */ + + /* partial discard [most common case] */ + /* here, at least the first (most significant) discarded digit exists */ + + /* spin up the number, noting residue during the spin, until get to */ + /* the Unit with the first discarded digit. When reach it, extract */ + /* it and remember its position */ + count=0; + for (up=lsu;; up++) { + count+=DECDPUN; + if (count>=discard) break; /* full ones all checked */ + if (*up!=0) *residue=1; + } /* up */ + + /* here up -> Unit with first discarded digit */ + cut=discard-(count-DECDPUN)-1; + if (cut==DECDPUN-1) { /* unit-boundary case (fast) */ + Unit half=(Unit)powers[DECDPUN]>>1; + /* set residue directly */ + if (*up>=half) { + if (*up>half) *residue=7; + else *residue+=5; /* add sticky bit */ + } + else { /* digits<=0) { /* special for Quantize/Subnormal :-( */ + *dn->lsu=0; /* .. result is 0 */ + dn->digits=1; /* .. */ + } + else { /* shift to least */ + count=set->digits; /* now digits to end up with */ + dn->digits=count; /* set the new length */ + up++; /* move to next */ + /* on unit boundary, so shift-down copy loop is simple */ + for (target=dn->lsu; count>0; target++, up++, count-=DECDPUN) + *target=*up; + } + } /* unit-boundary case */ + + else { /* discard digit is in low digit(s), and not top digit */ + uInt discard1; /* first discarded digit */ + uInt quot, rem; /* for divisions */ + if (cut==0) quot=*up; /* is at bottom of unit */ + else /* cut>0 */ { /* it's not at bottom of unit */ + #if DECDPUN<=4 + quot=QUOT10(*up, cut); + rem=*up-quot*powers[cut]; + #else + rem=*up%powers[cut]; + quot=*up/powers[cut]; + #endif + if (rem!=0) *residue=1; + } + /* discard digit is now at bottom of quot */ + #if DECDPUN<=4 + temp=(quot*6554)>>16; /* fast /10 */ + /* Vowels algorithm here not a win (9 instructions) */ + discard1=quot-X10(temp); + quot=temp; + #else + discard1=quot%10; + quot=quot/10; + #endif + /* here, discard1 is the guard digit, and residue is everything */ + /* else [use mapping array to accumulate residue safely] */ + *residue+=resmap[discard1]; + cut++; /* update cut */ + /* here: up -> Unit of the array with bottom digit */ + /* cut is the division point for each Unit */ + /* quot holds the uncut high-order digits for the current unit */ + if (set->digits<=0) { /* special for Quantize/Subnormal :-( */ + *dn->lsu=0; /* .. result is 0 */ + dn->digits=1; /* .. */ + } + else { /* shift to least needed */ + count=set->digits; /* now digits to end up with */ + dn->digits=count; /* set the new length */ + /* shift-copy the coefficient array to the result number */ + for (target=dn->lsu; ; target++) { + *target=(Unit)quot; + count-=(DECDPUN-cut); + if (count<=0) break; + up++; + quot=*up; + #if DECDPUN<=4 + quot=QUOT10(quot, cut); + rem=*up-quot*powers[cut]; + #else + rem=quot%powers[cut]; + quot=quot/powers[cut]; + #endif + *target=(Unit)(*target+rem*powers[DECDPUN-cut]); + count-=cut; + if (count<=0) break; + } /* shift-copy loop */ + } /* shift to least */ + } /* not unit boundary */ + + if (*residue!=0) *status|=DEC_Inexact; /* record inexactitude */ + return; + } /* decSetCoeff */ + +/* ------------------------------------------------------------------ */ +/* decApplyRound -- apply pending rounding to a number */ +/* */ +/* dn is the number, with space for set->digits digits */ +/* set is the context [for size and rounding mode] */ +/* residue indicates pending rounding, being any accumulated */ +/* guard and sticky information. It may be: */ +/* 6-9: rounding digit is >5 */ +/* 5: rounding digit is exactly half-way */ +/* 1-4: rounding digit is <5 and >0 */ +/* 0: the coefficient is exact */ +/* -1: as 1, but the hidden digits are subtractive, that */ +/* is, of the opposite sign to dn. In this case the */ +/* coefficient must be non-0. This case occurs when */ +/* subtracting a small number (which can be reduced to */ +/* a sticky bit); see decAddOp. */ +/* status is the status accumulator, as usual */ +/* */ +/* This routine applies rounding while keeping the length of the */ +/* coefficient constant. The exponent and status are unchanged */ +/* except if: */ +/* */ +/* -- the coefficient was increased and is all nines (in which */ +/* case Overflow could occur, and is handled directly here so */ +/* the caller does not need to re-test for overflow) */ +/* */ +/* -- the coefficient was decreased and becomes all nines (in which */ +/* case Underflow could occur, and is also handled directly). */ +/* */ +/* All fields in dn are updated as required. */ +/* */ +/* ------------------------------------------------------------------ */ +static void decApplyRound(decNumber *dn, decContext *set, Int residue, + uInt *status) { + Int bump; /* 1 if coefficient needs to be incremented */ + /* -1 if coefficient needs to be decremented */ + + if (residue==0) return; /* nothing to apply */ + + bump=0; /* assume a smooth ride */ + + /* now decide whether, and how, to round, depending on mode */ + switch (set->round) { + case DEC_ROUND_05UP: { /* round zero or five up (for reround) */ + /* This is the same as DEC_ROUND_DOWN unless there is a */ + /* positive residue and the lsd of dn is 0 or 5, in which case */ + /* it is bumped; when residue is <0, the number is therefore */ + /* bumped down unless the final digit was 1 or 6 (in which */ + /* case it is bumped down and then up -- a no-op) */ + Int lsd5=*dn->lsu%5; /* get lsd and quintate */ + if (residue<0 && lsd5!=1) bump=-1; + else if (residue>0 && lsd5==0) bump=1; + /* [bump==1 could be applied directly; use common path for clarity] */ + break;} /* r-05 */ + + case DEC_ROUND_DOWN: { + /* no change, except if negative residue */ + if (residue<0) bump=-1; + break;} /* r-d */ + + case DEC_ROUND_HALF_DOWN: { + if (residue>5) bump=1; + break;} /* r-h-d */ + + case DEC_ROUND_HALF_EVEN: { + if (residue>5) bump=1; /* >0.5 goes up */ + else if (residue==5) { /* exactly 0.5000... */ + /* 0.5 goes up iff [new] lsd is odd */ + if (*dn->lsu & 0x01) bump=1; + } + break;} /* r-h-e */ + + case DEC_ROUND_HALF_UP: { + if (residue>=5) bump=1; + break;} /* r-h-u */ + + case DEC_ROUND_UP: { + if (residue>0) bump=1; + break;} /* r-u */ + + case DEC_ROUND_CEILING: { + /* same as _UP for positive numbers, and as _DOWN for negatives */ + /* [negative residue cannot occur on 0] */ + if (decNumberIsNegative(dn)) { + if (residue<0) bump=-1; + } + else { + if (residue>0) bump=1; + } + break;} /* r-c */ + + case DEC_ROUND_FLOOR: { + /* same as _UP for negative numbers, and as _DOWN for positive */ + /* [negative residue cannot occur on 0] */ + if (!decNumberIsNegative(dn)) { + if (residue<0) bump=-1; + } + else { + if (residue>0) bump=1; + } + break;} /* r-f */ + + default: { /* e.g., DEC_ROUND_MAX */ + *status|=DEC_Invalid_context; + #if DECTRACE || (DECCHECK && DECVERB) + printf("Unknown rounding mode: %d\n", set->round); + #endif + break;} + } /* switch */ + + /* now bump the number, up or down, if need be */ + if (bump==0) return; /* no action required */ + + /* Simply use decUnitAddSub unless bumping up and the number is */ + /* all nines. In this special case set to 100... explicitly */ + /* and adjust the exponent by one (as otherwise could overflow */ + /* the array) */ + /* Similarly handle all-nines result if bumping down. */ + if (bump>0) { + Unit *up; /* work */ + uInt count=dn->digits; /* digits to be checked */ + for (up=dn->lsu; ; up++) { + if (count<=DECDPUN) { + /* this is the last Unit (the msu) */ + if (*up!=powers[count]-1) break; /* not still 9s */ + /* here if it, too, is all nines */ + *up=(Unit)powers[count-1]; /* here 999 -> 100 etc. */ + for (up=up-1; up>=dn->lsu; up--) *up=0; /* others all to 0 */ + dn->exponent++; /* and bump exponent */ + /* [which, very rarely, could cause Overflow...] */ + if ((dn->exponent+dn->digits)>set->emax+1) { + decSetOverflow(dn, set, status); + } + return; /* done */ + } + /* a full unit to check, with more to come */ + if (*up!=DECDPUNMAX) break; /* not still 9s */ + count-=DECDPUN; + } /* up */ + } /* bump>0 */ + else { /* -1 */ + /* here checking for a pre-bump of 1000... (leading 1, all */ + /* other digits zero) */ + Unit *up, *sup; /* work */ + uInt count=dn->digits; /* digits to be checked */ + for (up=dn->lsu; ; up++) { + if (count<=DECDPUN) { + /* this is the last Unit (the msu) */ + if (*up!=powers[count-1]) break; /* not 100.. */ + /* here if have the 1000... case */ + sup=up; /* save msu pointer */ + *up=(Unit)powers[count]-1; /* here 100 in msu -> 999 */ + /* others all to all-nines, too */ + for (up=up-1; up>=dn->lsu; up--) *up=(Unit)powers[DECDPUN]-1; + dn->exponent--; /* and bump exponent */ + + /* iff the number was at the subnormal boundary (exponent=etiny) */ + /* then the exponent is now out of range, so it will in fact get */ + /* clamped to etiny and the final 9 dropped. */ + /* printf(">> emin=%d exp=%d sdig=%d\n", set->emin, */ + /* dn->exponent, set->digits); */ + if (dn->exponent+1==set->emin-set->digits+1) { + if (count==1 && dn->digits==1) *sup=0; /* here 9 -> 0[.9] */ + else { + *sup=(Unit)powers[count-1]-1; /* here 999.. in msu -> 99.. */ + dn->digits--; + } + dn->exponent++; + *status|=DEC_Underflow | DEC_Subnormal | DEC_Inexact | DEC_Rounded; + } + return; /* done */ + } + + /* a full unit to check, with more to come */ + if (*up!=0) break; /* not still 0s */ + count-=DECDPUN; + } /* up */ + + } /* bump<0 */ + + /* Actual bump needed. Do it. */ + decUnitAddSub(dn->lsu, D2U(dn->digits), uarrone, 1, 0, dn->lsu, bump); + } /* decApplyRound */ + +#if DECSUBSET +/* ------------------------------------------------------------------ */ +/* decFinish -- finish processing a number */ +/* */ +/* dn is the number */ +/* set is the context */ +/* residue is the rounding accumulator (as in decApplyRound) */ +/* status is the accumulator */ +/* */ +/* This finishes off the current number by: */ +/* 1. If not extended: */ +/* a. Converting a zero result to clean '0' */ +/* b. Reducing positive exponents to 0, if would fit in digits */ +/* 2. Checking for overflow and subnormals (always) */ +/* Note this is just Finalize when no subset arithmetic. */ +/* All fields are updated as required. */ +/* ------------------------------------------------------------------ */ +static void decFinish(decNumber *dn, decContext *set, Int *residue, + uInt *status) { + if (!set->extended) { + if ISZERO(dn) { /* value is zero */ + dn->exponent=0; /* clean exponent .. */ + dn->bits=0; /* .. and sign */ + return; /* no error possible */ + } + if (dn->exponent>=0) { /* non-negative exponent */ + /* >0; reduce to integer if possible */ + if (set->digits >= (dn->exponent+dn->digits)) { + dn->digits=decShiftToMost(dn->lsu, dn->digits, dn->exponent); + dn->exponent=0; + } + } + } /* !extended */ + + decFinalize(dn, set, residue, status); + } /* decFinish */ +#endif + +/* ------------------------------------------------------------------ */ +/* decFinalize -- final check, clamp, and round of a number */ +/* */ +/* dn is the number */ +/* set is the context */ +/* residue is the rounding accumulator (as in decApplyRound) */ +/* status is the status accumulator */ +/* */ +/* This finishes off the current number by checking for subnormal */ +/* results, applying any pending rounding, checking for overflow, */ +/* and applying any clamping. */ +/* Underflow and overflow conditions are raised as appropriate. */ +/* All fields are updated as required. */ +/* ------------------------------------------------------------------ */ +static void decFinalize(decNumber *dn, decContext *set, Int *residue, + uInt *status) { + Int shift; /* shift needed if clamping */ + Int tinyexp=set->emin-dn->digits+1; /* precalculate subnormal boundary */ + + /* Must be careful, here, when checking the exponent as the */ + /* adjusted exponent could overflow 31 bits [because it may already */ + /* be up to twice the expected]. */ + + /* First test for subnormal. This must be done before any final */ + /* round as the result could be rounded to Nmin or 0. */ + if (dn->exponent<=tinyexp) { /* prefilter */ + Int comp; + decNumber nmin; + /* A very nasty case here is dn == Nmin and residue<0 */ + if (dn->exponentemin; + comp=decCompare(dn, &nmin, 1); /* (signless compare) */ + if (comp==BADINT) { /* oops */ + *status|=DEC_Insufficient_storage; /* abandon... */ + return; + } + if (*residue<0 && comp==0) { /* neg residue and dn==Nmin */ + decApplyRound(dn, set, *residue, status); /* might force down */ + decSetSubnormal(dn, set, residue, status); + return; + } + } + + /* now apply any pending round (this could raise overflow). */ + if (*residue!=0) decApplyRound(dn, set, *residue, status); + + /* Check for overflow [redundant in the 'rare' case] or clamp */ + if (dn->exponent<=set->emax-set->digits+1) return; /* neither needed */ + + + /* here when might have an overflow or clamp to do */ + if (dn->exponent>set->emax-dn->digits+1) { /* too big */ + decSetOverflow(dn, set, status); + return; + } + /* here when the result is normal but in clamp range */ + if (!set->clamp) return; + + /* here when need to apply the IEEE exponent clamp (fold-down) */ + shift=dn->exponent-(set->emax-set->digits+1); + + /* shift coefficient (if non-zero) */ + if (!ISZERO(dn)) { + dn->digits=decShiftToMost(dn->lsu, dn->digits, shift); + } + dn->exponent-=shift; /* adjust the exponent to match */ + *status|=DEC_Clamped; /* and record the dirty deed */ + return; + } /* decFinalize */ + +/* ------------------------------------------------------------------ */ +/* decSetOverflow -- set number to proper overflow value */ +/* */ +/* dn is the number (used for sign [only] and result) */ +/* set is the context [used for the rounding mode, etc.] */ +/* status contains the current status to be updated */ +/* */ +/* This sets the sign of a number and sets its value to either */ +/* Infinity or the maximum finite value, depending on the sign of */ +/* dn and the rounding mode, following IEEE 854 rules. */ +/* ------------------------------------------------------------------ */ +static void decSetOverflow(decNumber *dn, decContext *set, uInt *status) { + Flag needmax=0; /* result is maximum finite value */ + uByte sign=dn->bits&DECNEG; /* clean and save sign bit */ + + if (ISZERO(dn)) { /* zero does not overflow magnitude */ + Int emax=set->emax; /* limit value */ + if (set->clamp) emax-=set->digits-1; /* lower if clamping */ + if (dn->exponent>emax) { /* clamp required */ + dn->exponent=emax; + *status|=DEC_Clamped; + } + return; + } + + decNumberZero(dn); + switch (set->round) { + case DEC_ROUND_DOWN: { + needmax=1; /* never Infinity */ + break;} /* r-d */ + case DEC_ROUND_05UP: { + needmax=1; /* never Infinity */ + break;} /* r-05 */ + case DEC_ROUND_CEILING: { + if (sign) needmax=1; /* Infinity if non-negative */ + break;} /* r-c */ + case DEC_ROUND_FLOOR: { + if (!sign) needmax=1; /* Infinity if negative */ + break;} /* r-f */ + default: break; /* Infinity in all other cases */ + } + if (needmax) { + decSetMaxValue(dn, set); + dn->bits=sign; /* set sign */ + } + else dn->bits=sign|DECINF; /* Value is +/-Infinity */ + *status|=DEC_Overflow | DEC_Inexact | DEC_Rounded; + } /* decSetOverflow */ + +/* ------------------------------------------------------------------ */ +/* decSetMaxValue -- set number to +Nmax (maximum normal value) */ +/* */ +/* dn is the number to set */ +/* set is the context [used for digits and emax] */ +/* */ +/* This sets the number to the maximum positive value. */ +/* ------------------------------------------------------------------ */ +static void decSetMaxValue(decNumber *dn, decContext *set) { + Unit *up; /* work */ + Int count=set->digits; /* nines to add */ + dn->digits=count; + /* fill in all nines to set maximum value */ + for (up=dn->lsu; ; up++) { + if (count>DECDPUN) *up=DECDPUNMAX; /* unit full o'nines */ + else { /* this is the msu */ + *up=(Unit)(powers[count]-1); + break; + } + count-=DECDPUN; /* filled those digits */ + } /* up */ + dn->bits=0; /* + sign */ + dn->exponent=set->emax-set->digits+1; + } /* decSetMaxValue */ + +/* ------------------------------------------------------------------ */ +/* decSetSubnormal -- process value whose exponent is extended) { + decNumberZero(dn); + /* always full overflow */ + *status|=DEC_Underflow | DEC_Subnormal | DEC_Inexact | DEC_Rounded; + return; + } + #endif + + /* Full arithmetic -- allow subnormals, rounded to minimum exponent */ + /* (Etiny) if needed */ + etiny=set->emin-(set->digits-1); /* smallest allowed exponent */ + + if ISZERO(dn) { /* value is zero */ + /* residue can never be non-zero here */ + #if DECCHECK + if (*residue!=0) { + printf("++ Subnormal 0 residue %ld\n", (LI)*residue); + *status|=DEC_Invalid_operation; + } + #endif + if (dn->exponentexponent=etiny; + *status|=DEC_Clamped; + } + return; + } + + *status|=DEC_Subnormal; /* have a non-zero subnormal */ + adjust=etiny-dn->exponent; /* calculate digits to remove */ + if (adjust<=0) { /* not out of range; unrounded */ + /* residue can never be non-zero here, except in the Nmin-residue */ + /* case (which is a subnormal result), so can take fast-path here */ + /* it may already be inexact (from setting the coefficient) */ + if (*status&DEC_Inexact) *status|=DEC_Underflow; + return; + } + + /* adjust>0, so need to rescale the result so exponent becomes Etiny */ + /* [this code is similar to that in rescale] */ + workset=*set; /* clone rounding, etc. */ + workset.digits=dn->digits-adjust; /* set requested length */ + workset.emin-=adjust; /* and adjust emin to match */ + /* [note that the latter can be <1, here, similar to Rescale case] */ + decSetCoeff(dn, &workset, dn->lsu, dn->digits, residue, status); + decApplyRound(dn, &workset, *residue, status); + + /* Use 754R/854 default rule: Underflow is set iff Inexact */ + /* [independent of whether trapped] */ + if (*status&DEC_Inexact) *status|=DEC_Underflow; + + /* if rounded up a 999s case, exponent will be off by one; adjust */ + /* back if so [it will fit, because it was shortened earlier] */ + if (dn->exponent>etiny) { + dn->digits=decShiftToMost(dn->lsu, dn->digits, 1); + dn->exponent--; /* (re)adjust the exponent. */ + } + + /* if rounded to zero, it is by definition clamped... */ + if (ISZERO(dn)) *status|=DEC_Clamped; + } /* decSetSubnormal */ + +/* ------------------------------------------------------------------ */ +/* decCheckMath - check entry conditions for a math function */ +/* */ +/* This checks the context and the operand */ +/* */ +/* rhs is the operand to check */ +/* set is the context to check */ +/* status is unchanged if both are good */ +/* */ +/* returns non-zero if status is changed, 0 otherwise */ +/* */ +/* Restrictions enforced: */ +/* */ +/* digits, emax, and -emin in the context must be less than */ +/* DEC_MAX_MATH (999999), and A must be within these bounds if */ +/* non-zero. Invalid_operation is set in the status if a */ +/* restriction is violated. */ +/* ------------------------------------------------------------------ */ +static uInt decCheckMath(const decNumber *rhs, decContext *set, + uInt *status) { + uInt save=*status; /* record */ + if (set->digits>DEC_MAX_MATH + || set->emax>DEC_MAX_MATH + || -set->emin>DEC_MAX_MATH) *status|=DEC_Invalid_context; + else if ((rhs->digits>DEC_MAX_MATH + || rhs->exponent+rhs->digits>DEC_MAX_MATH+1 + || rhs->exponent+rhs->digits<2*(1-DEC_MAX_MATH)) + && !ISZERO(rhs)) *status|=DEC_Invalid_operation; + return (*status!=save); + } /* decCheckMath */ + +/* ------------------------------------------------------------------ */ +/* decGetInt -- get integer from a number */ +/* */ +/* dn is the number [which will not be altered] */ +/* */ +/* returns one of: */ +/* BADINT if there is a non-zero fraction */ +/* the converted integer */ +/* BIGEVEN if the integer is even and magnitude > 2*10**9 */ +/* BIGODD if the integer is odd and magnitude > 2*10**9 */ +/* */ +/* This checks and gets a whole number from the input decNumber. */ +/* The sign can be determined from dn by the caller when BIGEVEN or */ +/* BIGODD is returned. */ +/* ------------------------------------------------------------------ */ +static Int decGetInt(const decNumber *dn) { + Int theInt; /* result accumulator */ + const Unit *up; /* work */ + Int got; /* digits (real or not) processed */ + Int ilength=dn->digits+dn->exponent; /* integral length */ + Flag neg=decNumberIsNegative(dn); /* 1 if -ve */ + + /* The number must be an integer that fits in 10 digits */ + /* Assert, here, that 10 is enough for any rescale Etiny */ + #if DEC_MAX_EMAX > 999999999 + #error GetInt may need updating [for Emax] + #endif + #if DEC_MIN_EMIN < -999999999 + #error GetInt may need updating [for Emin] + #endif + if (ISZERO(dn)) return 0; /* zeros are OK, with any exponent */ + + up=dn->lsu; /* ready for lsu */ + theInt=0; /* ready to accumulate */ + if (dn->exponent>=0) { /* relatively easy */ + /* no fractional part [usual]; allow for positive exponent */ + got=dn->exponent; + } + else { /* -ve exponent; some fractional part to check and discard */ + Int count=-dn->exponent; /* digits to discard */ + /* spin up whole units until reach the Unit with the unit digit */ + for (; count>=DECDPUN; up++) { + if (*up!=0) return BADINT; /* non-zero Unit to discard */ + count-=DECDPUN; + } + if (count==0) got=0; /* [a multiple of DECDPUN] */ + else { /* [not multiple of DECDPUN] */ + Int rem; /* work */ + /* slice off fraction digits and check for non-zero */ + #if DECDPUN<=4 + theInt=QUOT10(*up, count); + rem=*up-theInt*powers[count]; + #else + rem=*up%powers[count]; /* slice off discards */ + theInt=*up/powers[count]; + #endif + if (rem!=0) return BADINT; /* non-zero fraction */ + /* it looks good */ + got=DECDPUN-count; /* number of digits so far */ + up++; /* ready for next */ + } + } + /* now it's known there's no fractional part */ + + /* tricky code now, to accumulate up to 9.3 digits */ + if (got==0) {theInt=*up; got+=DECDPUN; up++;} /* ensure lsu is there */ + + if (ilength<11) { + Int save=theInt; + /* collect any remaining unit(s) */ + for (; got1999999997) ilength=11; + else if (!neg && theInt>999999999) ilength=11; + if (ilength==11) theInt=save; /* restore correct low bit */ + } + } + + if (ilength>10) { /* too big */ + if (theInt&1) return BIGODD; /* bottom bit 1 */ + return BIGEVEN; /* bottom bit 0 */ + } + + if (neg) theInt=-theInt; /* apply sign */ + return theInt; + } /* decGetInt */ + +/* ------------------------------------------------------------------ */ +/* decDecap -- decapitate the coefficient of a number */ +/* */ +/* dn is the number to be decapitated */ +/* drop is the number of digits to be removed from the left of dn; */ +/* this must be <= dn->digits (if equal, the coefficient is */ +/* set to 0) */ +/* */ +/* Returns dn; dn->digits will be <= the initial digits less drop */ +/* (after removing drop digits there may be leading zero digits */ +/* which will also be removed). Only dn->lsu and dn->digits change. */ +/* ------------------------------------------------------------------ */ +static decNumber *decDecap(decNumber *dn, Int drop) { + Unit *msu; /* -> target cut point */ + Int cut; /* work */ + if (drop>=dn->digits) { /* losing the whole thing */ + #if DECCHECK + if (drop>dn->digits) + printf("decDecap called with drop>digits [%ld>%ld]\n", + (LI)drop, (LI)dn->digits); + #endif + dn->lsu[0]=0; + dn->digits=1; + return dn; + } + msu=dn->lsu+D2U(dn->digits-drop)-1; /* -> likely msu */ + cut=MSUDIGITS(dn->digits-drop); /* digits to be in use in msu */ + if (cut!=DECDPUN) *msu%=powers[cut]; /* clear left digits */ + /* that may have left leading zero digits, so do a proper count... */ + dn->digits=decGetDigits(dn->lsu, msu-dn->lsu+1); + return dn; + } /* decDecap */ + +/* ------------------------------------------------------------------ */ +/* decBiStr -- compare string with pairwise options */ +/* */ +/* targ is the string to compare */ +/* str1 is one of the strings to compare against (length may be 0) */ +/* str2 is the other; it must be the same length as str1 */ +/* */ +/* returns 1 if strings compare equal, (that is, it is the same */ +/* length as str1 and str2, and each character of targ is in either */ +/* str1 or str2 in the corresponding position), or 0 otherwise */ +/* */ +/* This is used for generic caseless compare, including the awkward */ +/* case of the Turkish dotted and dotless Is. Use as (for example): */ +/* if (decBiStr(test, "mike", "MIKE")) ... */ +/* ------------------------------------------------------------------ */ +static Flag decBiStr(const char *targ, const char *str1, const char *str2) { + for (;;targ++, str1++, str2++) { + if (*targ!=*str1 && *targ!=*str2) return 0; + /* *targ has a match in one (or both, if terminator) */ + if (*targ=='\0') break; + } /* forever */ + return 1; + } /* decBiStr */ + +/* ------------------------------------------------------------------ */ +/* decNaNs -- handle NaN operand or operands */ +/* */ +/* res is the result number */ +/* lhs is the first operand */ +/* rhs is the second operand, or NULL if none */ +/* context is used to limit payload length */ +/* status contains the current status */ +/* returns res in case convenient */ +/* */ +/* Called when one or both operands is a NaN, and propagates the */ +/* appropriate result to res. When an sNaN is found, it is changed */ +/* to a qNaN and Invalid operation is set. */ +/* ------------------------------------------------------------------ */ +static decNumber * decNaNs(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set, + uInt *status) { + /* This decision tree ends up with LHS being the source pointer, */ + /* and status updated if need be */ + if (lhs->bits & DECSNAN) + *status|=DEC_Invalid_operation | DEC_sNaN; + else if (rhs==NULL); + else if (rhs->bits & DECSNAN) { + lhs=rhs; + *status|=DEC_Invalid_operation | DEC_sNaN; + } + else if (lhs->bits & DECNAN); + else lhs=rhs; + + /* propagate the payload */ + if (lhs->digits<=set->digits) decNumberCopy(res, lhs); /* easy */ + else { /* too long */ + const Unit *ul; + Unit *ur, *uresp1; + /* copy safe number of units, then decapitate */ + res->bits=lhs->bits; /* need sign etc. */ + uresp1=res->lsu+D2U(set->digits); + for (ur=res->lsu, ul=lhs->lsu; urdigits=D2U(set->digits)*DECDPUN; + /* maybe still too long */ + if (res->digits>set->digits) decDecap(res, res->digits-set->digits); + } + + res->bits&=~DECSNAN; /* convert any sNaN to NaN, while */ + res->bits|=DECNAN; /* .. preserving sign */ + res->exponent=0; /* clean exponent */ + /* [coefficient was copied/decapitated] */ + return res; + } /* decNaNs */ + +/* ------------------------------------------------------------------ */ +/* decStatus -- apply non-zero status */ +/* */ +/* dn is the number to set if error */ +/* status contains the current status (not yet in context) */ +/* set is the context */ +/* */ +/* If the status is an error status, the number is set to a NaN, */ +/* unless the error was an overflow, divide-by-zero, or underflow, */ +/* in which case the number will have already been set. */ +/* */ +/* The context status is then updated with the new status. Note that */ +/* this may raise a signal, so control may never return from this */ +/* routine (hence resources must be recovered before it is called). */ +/* ------------------------------------------------------------------ */ +static void decStatus(decNumber *dn, uInt status, decContext *set) { + if (status & DEC_NaNs) { /* error status -> NaN */ + /* if cause was an sNaN, clear and propagate [NaN is already set up] */ + if (status & DEC_sNaN) status&=~DEC_sNaN; + else { + decNumberZero(dn); /* other error: clean throughout */ + dn->bits=DECNAN; /* and make a quiet NaN */ + } + } + decContextSetStatus(set, status); /* [may not return] */ + return; + } /* decStatus */ + +/* ------------------------------------------------------------------ */ +/* decGetDigits -- count digits in a Units array */ +/* */ +/* uar is the Unit array holding the number (this is often an */ +/* accumulator of some sort) */ +/* len is the length of the array in units [>=1] */ +/* */ +/* returns the number of (significant) digits in the array */ +/* */ +/* All leading zeros are excluded, except the last if the array has */ +/* only zero Units. */ +/* ------------------------------------------------------------------ */ +/* This may be called twice during some operations. */ +static Int decGetDigits(Unit *uar, Int len) { + Unit *up=uar+(len-1); /* -> msu */ + Int digits=(len-1)*DECDPUN+1; /* possible digits excluding msu */ + #if DECDPUN>4 + uInt const *pow; /* work */ + #endif + /* (at least 1 in final msu) */ + #if DECCHECK + if (len<1) printf("decGetDigits called with len<1 [%ld]\n", (LI)len); + #endif + + for (; up>=uar; up--) { + if (*up==0) { /* unit is all 0s */ + if (digits==1) break; /* a zero has one digit */ + digits-=DECDPUN; /* adjust for 0 unit */ + continue;} + /* found the first (most significant) non-zero Unit */ + #if DECDPUN>1 /* not done yet */ + if (*up<10) break; /* is 1-9 */ + digits++; + #if DECDPUN>2 /* not done yet */ + if (*up<100) break; /* is 10-99 */ + digits++; + #if DECDPUN>3 /* not done yet */ + if (*up<1000) break; /* is 100-999 */ + digits++; + #if DECDPUN>4 /* count the rest ... */ + for (pow=&powers[4]; *up>=*pow; pow++) digits++; + #endif + #endif + #endif + #endif + break; + } /* up */ + return digits; + } /* decGetDigits */ + +#if DECTRACE | DECCHECK +/* ------------------------------------------------------------------ */ +/* decNumberShow -- display a number [debug aid] */ +/* dn is the number to show */ +/* */ +/* Shows: sign, exponent, coefficient (msu first), digits */ +/* or: sign, special-value */ +/* ------------------------------------------------------------------ */ +/* this is public so other modules can use it */ +void decNumberShow(const decNumber *dn) { + const Unit *up; /* work */ + uInt u, d; /* .. */ + Int cut; /* .. */ + char isign='+'; /* main sign */ + if (dn==NULL) { + printf("NULL\n"); + return;} + if (decNumberIsNegative(dn)) isign='-'; + printf(" >> %c ", isign); + if (dn->bits&DECSPECIAL) { /* Is a special value */ + if (decNumberIsInfinite(dn)) printf("Infinity"); + else { /* a NaN */ + if (dn->bits&DECSNAN) printf("sNaN"); /* signalling NaN */ + else printf("NaN"); + } + /* if coefficient and exponent are 0, no more to do */ + if (dn->exponent==0 && dn->digits==1 && *dn->lsu==0) { + printf("\n"); + return;} + /* drop through to report other information */ + printf(" "); + } + + /* now carefully display the coefficient */ + up=dn->lsu+D2U(dn->digits)-1; /* msu */ + printf("%ld", (LI)*up); + for (up=up-1; up>=dn->lsu; up--) { + u=*up; + printf(":"); + for (cut=DECDPUN-1; cut>=0; cut--) { + d=u/powers[cut]; + u-=d*powers[cut]; + printf("%ld", (LI)d); + } /* cut */ + } /* up */ + if (dn->exponent!=0) { + char esign='+'; + if (dn->exponent<0) esign='-'; + printf(" E%c%ld", esign, (LI)abs(dn->exponent)); + } + printf(" [%ld]\n", (LI)dn->digits); + } /* decNumberShow */ +#endif + +#if DECTRACE || DECCHECK +/* ------------------------------------------------------------------ */ +/* decDumpAr -- display a unit array [debug/check aid] */ +/* name is a single-character tag name */ +/* ar is the array to display */ +/* len is the length of the array in Units */ +/* ------------------------------------------------------------------ */ +static void decDumpAr(char name, const Unit *ar, Int len) { + Int i; + const char *spec; + #if DECDPUN==9 + spec="%09d "; + #elif DECDPUN==8 + spec="%08d "; + #elif DECDPUN==7 + spec="%07d "; + #elif DECDPUN==6 + spec="%06d "; + #elif DECDPUN==5 + spec="%05d "; + #elif DECDPUN==4 + spec="%04d "; + #elif DECDPUN==3 + spec="%03d "; + #elif DECDPUN==2 + spec="%02d "; + #else + spec="%d "; + #endif + printf(" :%c: ", name); + for (i=len-1; i>=0; i--) { + if (i==len-1) printf("%ld ", (LI)ar[i]); + else printf(spec, ar[i]); + } + printf("\n"); + return;} +#endif + +#if DECCHECK +/* ------------------------------------------------------------------ */ +/* decCheckOperands -- check operand(s) to a routine */ +/* res is the result structure (not checked; it will be set to */ +/* quiet NaN if error found (and it is not NULL)) */ +/* lhs is the first operand (may be DECUNRESU) */ +/* rhs is the second (may be DECUNUSED) */ +/* set is the context (may be DECUNCONT) */ +/* returns 0 if both operands, and the context are clean, or 1 */ +/* otherwise (in which case the context will show an error, */ +/* unless NULL). Note that res is not cleaned; caller should */ +/* handle this so res=NULL case is safe. */ +/* The caller is expected to abandon immediately if 1 is returned. */ +/* ------------------------------------------------------------------ */ +static Flag decCheckOperands(decNumber *res, const decNumber *lhs, + const decNumber *rhs, decContext *set) { + Flag bad=0; + if (set==NULL) { /* oops; hopeless */ + #if DECTRACE || DECVERB + printf("Reference to context is NULL.\n"); + #endif + bad=1; + return 1;} + else if (set!=DECUNCONT + && (set->digits<1 || set->round>=DEC_ROUND_MAX)) { + bad=1; + #if DECTRACE || DECVERB + printf("Bad context [digits=%ld round=%ld].\n", + (LI)set->digits, (LI)set->round); + #endif + } + else { + if (res==NULL) { + bad=1; + #if DECTRACE + /* this one not DECVERB as standard tests include NULL */ + printf("Reference to result is NULL.\n"); + #endif + } + if (!bad && lhs!=DECUNUSED) bad=(decCheckNumber(lhs)); + if (!bad && rhs!=DECUNUSED) bad=(decCheckNumber(rhs)); + } + if (bad) { + if (set!=DECUNCONT) decContextSetStatus(set, DEC_Invalid_operation); + if (res!=DECUNRESU && res!=NULL) { + decNumberZero(res); + res->bits=DECNAN; /* qNaN */ + } + } + return bad; + } /* decCheckOperands */ + +/* ------------------------------------------------------------------ */ +/* decCheckNumber -- check a number */ +/* dn is the number to check */ +/* returns 0 if the number is clean, or 1 otherwise */ +/* */ +/* The number is considered valid if it could be a result from some */ +/* operation in some valid context. */ +/* ------------------------------------------------------------------ */ +static Flag decCheckNumber(const decNumber *dn) { + const Unit *up; /* work */ + uInt maxuint; /* .. */ + Int ae, d, digits; /* .. */ + Int emin, emax; /* .. */ + + if (dn==NULL) { /* hopeless */ + #if DECTRACE + /* this one not DECVERB as standard tests include NULL */ + printf("Reference to decNumber is NULL.\n"); + #endif + return 1;} + + /* check special values */ + if (dn->bits & DECSPECIAL) { + if (dn->exponent!=0) { + #if DECTRACE || DECVERB + printf("Exponent %ld (not 0) for a special value [%02x].\n", + (LI)dn->exponent, dn->bits); + #endif + return 1;} + + /* 2003.09.08: NaNs may now have coefficients, so next tests Inf only */ + if (decNumberIsInfinite(dn)) { + if (dn->digits!=1) { + #if DECTRACE || DECVERB + printf("Digits %ld (not 1) for an infinity.\n", (LI)dn->digits); + #endif + return 1;} + if (*dn->lsu!=0) { + #if DECTRACE || DECVERB + printf("LSU %ld (not 0) for an infinity.\n", (LI)*dn->lsu); + #endif + decDumpAr('I', dn->lsu, D2U(dn->digits)); + return 1;} + } /* Inf */ + /* 2002.12.26: negative NaNs can now appear through proposed IEEE */ + /* concrete formats (decimal64, etc.). */ + return 0; + } + + /* check the coefficient */ + if (dn->digits<1 || dn->digits>DECNUMMAXP) { + #if DECTRACE || DECVERB + printf("Digits %ld in number.\n", (LI)dn->digits); + #endif + return 1;} + + d=dn->digits; + + for (up=dn->lsu; d>0; up++) { + if (d>DECDPUN) maxuint=DECDPUNMAX; + else { /* reached the msu */ + maxuint=powers[d]-1; + if (dn->digits>1 && *upmaxuint) { + #if DECTRACE || DECVERB + printf("Bad Unit [%08lx] in %ld-digit number at offset %ld [maxuint %ld].\n", + (LI)*up, (LI)dn->digits, (LI)(up-dn->lsu), (LI)maxuint); + #endif + return 1;} + d-=DECDPUN; + } + + /* check the exponent. Note that input operands can have exponents */ + /* which are out of the set->emin/set->emax and set->digits range */ + /* (just as they can have more digits than set->digits). */ + ae=dn->exponent+dn->digits-1; /* adjusted exponent */ + emax=DECNUMMAXE; + emin=DECNUMMINE; + digits=DECNUMMAXP; + if (ae+emax) { + #if DECTRACE || DECVERB + printf("Adjusted exponent overflow [%ld].\n", (LI)ae); + decNumberShow(dn); + #endif + return 1;} + + return 0; /* it's OK */ + } /* decCheckNumber */ + +/* ------------------------------------------------------------------ */ +/* decCheckInexact -- check a normal finite inexact result has digits */ +/* dn is the number to check */ +/* set is the context (for status and precision) */ +/* sets Invalid operation, etc., if some digits are missing */ +/* [this check is not made for DECSUBSET compilation or when */ +/* subnormal is not set] */ +/* ------------------------------------------------------------------ */ +static void decCheckInexact(const decNumber *dn, decContext *set) { + #if !DECSUBSET && DECEXTFLAG + if ((set->status & (DEC_Inexact|DEC_Subnormal))==DEC_Inexact + && (set->digits!=dn->digits) && !(dn->bits & DECSPECIAL)) { + #if DECTRACE || DECVERB + printf("Insufficient digits [%ld] on normal Inexact result.\n", + (LI)dn->digits); + decNumberShow(dn); + #endif + decContextSetStatus(set, DEC_Invalid_operation); + } + #else + /* next is a noop for quiet compiler */ + if (dn!=NULL && dn->digits==0) set->status|=DEC_Invalid_operation; + #endif + return; + } /* decCheckInexact */ +#endif + +#if DECALLOC +#undef malloc +#undef free +/* ------------------------------------------------------------------ */ +/* decMalloc -- accountable allocation routine */ +/* n is the number of bytes to allocate */ +/* */ +/* Semantics is the same as the stdlib malloc routine, but bytes */ +/* allocated are accounted for globally, and corruption fences are */ +/* added before and after the 'actual' storage. */ +/* ------------------------------------------------------------------ */ +/* This routine allocates storage with an extra twelve bytes; 8 are */ +/* at the start and hold: */ +/* 0-3 the original length requested */ +/* 4-7 buffer corruption detection fence (DECFENCE, x4) */ +/* The 4 bytes at the end also hold a corruption fence (DECFENCE, x4) */ +/* ------------------------------------------------------------------ */ +static void *decMalloc(size_t n) { + uInt size=n+12; /* true size */ + void *alloc; /* -> allocated storage */ + uInt *j; /* work */ + uByte *b, *b0; /* .. */ + + alloc=malloc(size); /* -> allocated storage */ + if (alloc==NULL) return NULL; /* out of strorage */ + b0=(uByte *)alloc; /* as bytes */ + decAllocBytes+=n; /* account for storage */ + j=(uInt *)alloc; /* -> first four bytes */ + *j=n; /* save n */ + /* printf(" alloc ++ dAB: %ld (%d)\n", decAllocBytes, n); */ + for (b=b0+4; b play area */ + } /* decMalloc */ + +/* ------------------------------------------------------------------ */ +/* decFree -- accountable free routine */ +/* alloc is the storage to free */ +/* */ +/* Semantics is the same as the stdlib malloc routine, except that */ +/* the global storage accounting is updated and the fences are */ +/* checked to ensure that no routine has written 'out of bounds'. */ +/* ------------------------------------------------------------------ */ +/* This routine first checks that the fences have not been corrupted. */ +/* It then frees the storage using the 'truw' storage address (that */ +/* is, offset by 8). */ +/* ------------------------------------------------------------------ */ +static void decFree(void *alloc) { + uInt *j, n; /* pointer, original length */ + uByte *b, *b0; /* work */ + + if (alloc==NULL) return; /* allowed; it's a nop */ + b0=(uByte *)alloc; /* as bytes */ + b0-=8; /* -> true start of storage */ + j=(uInt *)b0; /* -> first four bytes */ + n=*j; /* lift */ + for (b=b0+4; bexponent+dn->digits-1; /* [0 if special] */ + if (dn->digits>DECIMAL128_Pmax /* too many digits */ + || ae>DECIMAL128_Emax /* likely overflow */ + || aeround; /* use supplied rounding */ + decNumberPlus(&dw, dn, &dc); /* (round and check) */ + /* [this changes -0 to 0, so enforce the sign...] */ + dw.bits|=dn->bits&DECNEG; + status=dc.status; /* save status */ + dn=&dw; /* use the work number */ + } /* maybe out of range */ + + if (dn->bits&DECSPECIAL) { /* a special value */ + if (dn->bits&DECINF) targhi=DECIMAL_Inf<<24; + else { /* sNaN or qNaN */ + if ((*dn->lsu!=0 || dn->digits>1) /* non-zero coefficient */ + && (dn->digitsbits&DECNAN) targhi|=DECIMAL_NaN<<24; + else targhi|=DECIMAL_sNaN<<24; + } /* a NaN */ + } /* special */ + + else { /* is finite */ + if (decNumberIsZero(dn)) { /* is a zero */ + /* set and clamp exponent */ + if (dn->exponent<-DECIMAL128_Bias) { + exp=0; /* low clamp */ + status|=DEC_Clamped; + } + else { + exp=dn->exponent+DECIMAL128_Bias; /* bias exponent */ + if (exp>DECIMAL128_Ehigh) { /* top clamp */ + exp=DECIMAL128_Ehigh; + status|=DEC_Clamped; + } + } + comb=(exp>>9) & 0x18; /* msd=0, exp top 2 bits .. */ + } + else { /* non-zero finite number */ + uInt msd; /* work */ + Int pad=0; /* coefficient pad digits */ + + /* the dn is known to fit, but it may need to be padded */ + exp=(uInt)(dn->exponent+DECIMAL128_Bias); /* bias exponent */ + if (exp>DECIMAL128_Ehigh) { /* fold-down case */ + pad=exp-DECIMAL128_Ehigh; + exp=DECIMAL128_Ehigh; /* [to maximum] */ + status|=DEC_Clamped; + } + + /* [fastpath for common case is not a win, here] */ + decDigitsToDPD(dn, targar, pad); + /* save and clear the top digit */ + msd=targhi>>14; + targhi&=0x00003fff; + + /* create the combination field */ + if (msd>=8) comb=0x18 | ((exp>>11) & 0x06) | (msd & 0x01); + else comb=((exp>>9) & 0x18) | msd; + } + targhi|=comb<<26; /* add combination field .. */ + targhi|=(exp&0xfff)<<14; /* .. and exponent continuation */ + } /* finite */ + + if (dn->bits&DECNEG) targhi|=0x80000000; /* add sign bit */ + + /* now write to storage; this is endian */ + pu=(uInt *)d128->bytes; /* overlay */ + if (DECLITEND) { + pu[0]=targlo; /* directly store the low int */ + pu[1]=targml; /* then the mid-low */ + pu[2]=targmh; /* then the mid-high */ + pu[3]=targhi; /* then the high int */ + } + else { + pu[0]=targhi; /* directly store the high int */ + pu[1]=targmh; /* then the mid-high */ + pu[2]=targml; /* then the mid-low */ + pu[3]=targlo; /* then the low int */ + } + + if (status!=0) decContextSetStatus(set, status); /* pass on status */ + /* decimal128Show(d128); */ + return d128; + } /* decimal128FromNumber */ + +/* ------------------------------------------------------------------ */ +/* decimal128ToNumber -- convert decimal128 to decNumber */ +/* d128 is the source decimal128 */ +/* dn is the target number, with appropriate space */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decNumber * decimal128ToNumber(const decimal128 *d128, decNumber *dn) { + uInt msd; /* coefficient MSD */ + uInt exp; /* exponent top two bits */ + uInt comb; /* combination field */ + const uInt *pu; /* work */ + Int need; /* .. */ + uInt sourar[4]; /* source 128-bit */ + #define sourhi sourar[3] /* name the word with the sign */ + #define sourmh sourar[2] /* and the mid-high word */ + #define sourml sourar[1] /* and the mod-low word */ + #define sourlo sourar[0] /* and the lowest word */ + + /* load source from storage; this is endian */ + pu=(const uInt *)d128->bytes; /* overlay */ + if (DECLITEND) { + sourlo=pu[0]; /* directly load the low int */ + sourml=pu[1]; /* then the mid-low */ + sourmh=pu[2]; /* then the mid-high */ + sourhi=pu[3]; /* then the high int */ + } + else { + sourhi=pu[0]; /* directly load the high int */ + sourmh=pu[1]; /* then the mid-high */ + sourml=pu[2]; /* then the mid-low */ + sourlo=pu[3]; /* then the low int */ + } + + comb=(sourhi>>26)&0x1f; /* combination field */ + + decNumberZero(dn); /* clean number */ + if (sourhi&0x80000000) dn->bits=DECNEG; /* set sign if negative */ + + msd=COMBMSD[comb]; /* decode the combination field */ + exp=COMBEXP[comb]; /* .. */ + + if (exp==3) { /* is a special */ + if (msd==0) { + dn->bits|=DECINF; + return dn; /* no coefficient needed */ + } + else if (sourhi&0x02000000) dn->bits|=DECSNAN; + else dn->bits|=DECNAN; + msd=0; /* no top digit */ + } + else { /* is a finite number */ + dn->exponent=(exp<<12)+((sourhi>>14)&0xfff)-DECIMAL128_Bias; /* unbiased */ + } + + /* get the coefficient */ + sourhi&=0x00003fff; /* clean coefficient continuation */ + if (msd) { /* non-zero msd */ + sourhi|=msd<<14; /* prefix to coefficient */ + need=12; /* process 12 declets */ + } + else { /* msd=0 */ + if (sourhi) need=11; /* declets to process */ + else if (sourmh) need=10; + else if (sourml) need=7; + else if (sourlo) need=4; + else return dn; /* easy: coefficient is 0 */ + } /*msd=0 */ + + decDigitsFromDPD(dn, sourar, need); /* process declets */ + /* decNumberShow(dn); */ + return dn; + } /* decimal128ToNumber */ + +/* ------------------------------------------------------------------ */ +/* to-scientific-string -- conversion to numeric string */ +/* to-engineering-string -- conversion to numeric string */ +/* */ +/* decimal128ToString(d128, string); */ +/* decimal128ToEngString(d128, string); */ +/* */ +/* d128 is the decimal128 format number to convert */ +/* string is the string where the result will be laid out */ +/* */ +/* string must be at least 24 characters */ +/* */ +/* No error is possible, and no status can be set. */ +/* ------------------------------------------------------------------ */ +char * decimal128ToEngString(const decimal128 *d128, char *string){ + decNumber dn; /* work */ + decimal128ToNumber(d128, &dn); + decNumberToEngString(&dn, string); + return string; + } /* decimal128ToEngString */ + +char * decimal128ToString(const decimal128 *d128, char *string){ + uInt msd; /* coefficient MSD */ + Int exp; /* exponent top two bits or full */ + uInt comb; /* combination field */ + char *cstart; /* coefficient start */ + char *c; /* output pointer in string */ + const uInt *pu; /* work */ + char *s, *t; /* .. (source, target) */ + Int dpd; /* .. */ + Int pre, e; /* .. */ + const uByte *u; /* .. */ + + uInt sourar[4]; /* source 128-bit */ + #define sourhi sourar[3] /* name the word with the sign */ + #define sourmh sourar[2] /* and the mid-high word */ + #define sourml sourar[1] /* and the mod-low word */ + #define sourlo sourar[0] /* and the lowest word */ + + /* load source from storage; this is endian */ + pu=(const uInt *)d128->bytes; /* overlay */ + if (DECLITEND) { + sourlo=pu[0]; /* directly load the low int */ + sourml=pu[1]; /* then the mid-low */ + sourmh=pu[2]; /* then the mid-high */ + sourhi=pu[3]; /* then the high int */ + } + else { + sourhi=pu[0]; /* directly load the high int */ + sourmh=pu[1]; /* then the mid-high */ + sourml=pu[2]; /* then the mid-low */ + sourlo=pu[3]; /* then the low int */ + } + + c=string; /* where result will go */ + if (((Int)sourhi)<0) *c++='-'; /* handle sign */ + + comb=(sourhi>>26)&0x1f; /* combination field */ + msd=COMBMSD[comb]; /* decode the combination field */ + exp=COMBEXP[comb]; /* .. */ + + if (exp==3) { + if (msd==0) { /* infinity */ + strcpy(c, "Inf"); + strcpy(c+3, "inity"); + return string; /* easy */ + } + if (sourhi&0x02000000) *c++='s'; /* sNaN */ + strcpy(c, "NaN"); /* complete word */ + c+=3; /* step past */ + if (sourlo==0 && sourml==0 && sourmh==0 + && (sourhi&0x0003ffff)==0) return string; /* zero payload */ + /* otherwise drop through to add integer; set correct exp */ + exp=0; msd=0; /* setup for following code */ + } + else exp=(exp<<12)+((sourhi>>14)&0xfff)-DECIMAL128_Bias; /* unbiased */ + + /* convert 34 digits of significand to characters */ + cstart=c; /* save start of coefficient */ + if (msd) *c++='0'+(char)msd; /* non-zero most significant digit */ + + /* Now decode the declets. After extracting each one, it is */ + /* decoded to binary and then to a 4-char sequence by table lookup; */ + /* the 4-chars are a 1-char length (significant digits, except 000 */ + /* has length 0). This allows us to left-align the first declet */ + /* with non-zero content, then remaining ones are full 3-char */ + /* length. We use fixed-length memcpys because variable-length */ + /* causes a subroutine call in GCC. (These are length 4 for speed */ + /* and are safe because the array has an extra terminator byte.) */ + #define dpd2char u=&BIN2CHAR[DPD2BIN[dpd]*4]; \ + if (c!=cstart) {memcpy(c, u+1, 4); c+=3;} \ + else if (*u) {memcpy(c, u+4-*u, 4); c+=*u;} + dpd=(sourhi>>4)&0x3ff; /* declet 1 */ + dpd2char; + dpd=((sourhi&0xf)<<6) | (sourmh>>26); /* declet 2 */ + dpd2char; + dpd=(sourmh>>16)&0x3ff; /* declet 3 */ + dpd2char; + dpd=(sourmh>>6)&0x3ff; /* declet 4 */ + dpd2char; + dpd=((sourmh&0x3f)<<4) | (sourml>>28); /* declet 5 */ + dpd2char; + dpd=(sourml>>18)&0x3ff; /* declet 6 */ + dpd2char; + dpd=(sourml>>8)&0x3ff; /* declet 7 */ + dpd2char; + dpd=((sourml&0xff)<<2) | (sourlo>>30); /* declet 8 */ + dpd2char; + dpd=(sourlo>>20)&0x3ff; /* declet 9 */ + dpd2char; + dpd=(sourlo>>10)&0x3ff; /* declet 10 */ + dpd2char; + dpd=(sourlo)&0x3ff; /* declet 11 */ + dpd2char; + + if (c==cstart) *c++='0'; /* all zeros -- make 0 */ + + if (exp==0) { /* integer or NaN case -- easy */ + *c='\0'; /* terminate */ + return string; + } + + /* non-0 exponent */ + e=0; /* assume no E */ + pre=c-cstart+exp; + /* [here, pre-exp is the digits count (==1 for zero)] */ + if (exp>0 || pre<-5) { /* need exponential form */ + e=pre-1; /* calculate E value */ + pre=1; /* assume one digit before '.' */ + } /* exponential form */ + + /* modify the coefficient, adding 0s, '.', and E+nn as needed */ + s=c-1; /* source (LSD) */ + if (pre>0) { /* ddd.ddd (plain), perhaps with E */ + char *dotat=cstart+pre; + if (dotat=dotat; s--, t--) *t=*s; /* open the gap; leave t at gap */ + *t='.'; /* insert the dot */ + c++; /* length increased by one */ + } + + /* finally add the E-part, if needed; it will never be 0, and has */ + /* a maximum length of 4 digits */ + if (e!=0) { + *c++='E'; /* starts with E */ + *c++='+'; /* assume positive */ + if (e<0) { + *(c-1)='-'; /* oops, need '-' */ + e=-e; /* uInt, please */ + } + if (e<1000) { /* 3 (or fewer) digits case */ + u=&BIN2CHAR[e*4]; /* -> length byte */ + memcpy(c, u+4-*u, 4); /* copy fixed 4 characters [is safe] */ + c+=*u; /* bump pointer appropriately */ + } + else { /* 4-digits */ + Int thou=((e>>3)*1049)>>17; /* e/1000 */ + Int rem=e-(1000*thou); /* e%1000 */ + *c++='0'+(char)thou; + u=&BIN2CHAR[rem*4]; /* -> length byte */ + memcpy(c, u+1, 4); /* copy fixed 3+1 characters [is safe] */ + c+=3; /* bump pointer, always 3 digits */ + } + } + *c='\0'; /* add terminator */ + /*printf("res %s\n", string); */ + return string; + } /* pre>0 */ + + /* -5<=pre<=0: here for plain 0.ddd or 0.000ddd forms (can never have E) */ + t=c+1-pre; + *(t+1)='\0'; /* can add terminator now */ + for (; s>=cstart; s--, t--) *t=*s; /* shift whole coefficient right */ + c=cstart; + *c++='0'; /* always starts with 0. */ + *c++='.'; + for (; pre<0; pre++) *c++='0'; /* add any 0's after '.' */ + /*printf("res %s\n", string); */ + return string; + } /* decimal128ToString */ + +/* ------------------------------------------------------------------ */ +/* to-number -- conversion from numeric string */ +/* */ +/* decimal128FromString(result, string, set); */ +/* */ +/* result is the decimal128 format number which gets the result of */ +/* the conversion */ +/* *string is the character string which should contain a valid */ +/* number (which may be a special value) */ +/* set is the context */ +/* */ +/* The context is supplied to this routine is used for error handling */ +/* (setting of status and traps) and for the rounding mode, only. */ +/* If an error occurs, the result will be a valid decimal128 NaN. */ +/* ------------------------------------------------------------------ */ +decimal128 * decimal128FromString(decimal128 *result, const char *string, + decContext *set) { + decContext dc; /* work */ + decNumber dn; /* .. */ + + decContextDefault(&dc, DEC_INIT_DECIMAL128); /* no traps, please */ + dc.round=set->round; /* use supplied rounding */ + + decNumberFromString(&dn, string, &dc); /* will round if needed */ + decimal128FromNumber(result, &dn, &dc); + if (dc.status!=0) { /* something happened */ + decContextSetStatus(set, dc.status); /* .. pass it on */ + } + return result; + } /* decimal128FromString */ + +/* ------------------------------------------------------------------ */ +/* decimal128IsCanonical -- test whether encoding is canonical */ +/* d128 is the source decimal128 */ +/* returns 1 if the encoding of d128 is canonical, 0 otherwise */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +uint32_t decimal128IsCanonical(const decimal128 *d128) { + decNumber dn; /* work */ + decimal128 canon; /* .. */ + decContext dc; /* .. */ + decContextDefault(&dc, DEC_INIT_DECIMAL128); + decimal128ToNumber(d128, &dn); + decimal128FromNumber(&canon, &dn, &dc);/* canon will now be canonical */ + return memcmp(d128, &canon, DECIMAL128_Bytes)==0; + } /* decimal128IsCanonical */ + +/* ------------------------------------------------------------------ */ +/* decimal128Canonical -- copy an encoding, ensuring it is canonical */ +/* d128 is the source decimal128 */ +/* result is the target (may be the same decimal128) */ +/* returns result */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decimal128 * decimal128Canonical(decimal128 *result, const decimal128 *d128) { + decNumber dn; /* work */ + decContext dc; /* .. */ + decContextDefault(&dc, DEC_INIT_DECIMAL128); + decimal128ToNumber(d128, &dn); + decimal128FromNumber(result, &dn, &dc);/* result will now be canonical */ + return result; + } /* decimal128Canonical */ + +#if DECTRACE || DECCHECK +/* Macros for accessing decimal128 fields. These assume the argument + is a reference (pointer) to the decimal128 structure, and the + decimal128 is in network byte order (big-endian) */ +/* Get sign */ +#define decimal128Sign(d) ((unsigned)(d)->bytes[0]>>7) + +/* Get combination field */ +#define decimal128Comb(d) (((d)->bytes[0] & 0x7c)>>2) + +/* Get exponent continuation [does not remove bias] */ +#define decimal128ExpCon(d) ((((d)->bytes[0] & 0x03)<<10) \ + | ((unsigned)(d)->bytes[1]<<2) \ + | ((unsigned)(d)->bytes[2]>>6)) + +/* Set sign [this assumes sign previously 0] */ +#define decimal128SetSign(d, b) { \ + (d)->bytes[0]|=((unsigned)(b)<<7);} + +/* Set exponent continuation [does not apply bias] */ +/* This assumes range has been checked and exponent previously 0; */ +/* type of exponent must be unsigned */ +#define decimal128SetExpCon(d, e) { \ + (d)->bytes[0]|=(uint8_t)((e)>>10); \ + (d)->bytes[1] =(uint8_t)(((e)&0x3fc)>>2); \ + (d)->bytes[2]|=(uint8_t)(((e)&0x03)<<6);} + +/* ------------------------------------------------------------------ */ +/* decimal128Show -- display a decimal128 in hexadecimal [debug aid] */ +/* d128 -- the number to show */ +/* ------------------------------------------------------------------ */ +/* Also shows sign/cob/expconfields extracted */ +void decimal128Show(const decimal128 *d128) { + char buf[DECIMAL128_Bytes*2+1]; + Int i, j=0; + + if (DECLITEND) { + for (i=0; ibytes[15-i]); + } + printf(" D128> %s [S:%d Cb:%02x Ec:%02x] LittleEndian\n", buf, + d128->bytes[15]>>7, (d128->bytes[15]>>2)&0x1f, + ((d128->bytes[15]&0x3)<<10)|(d128->bytes[14]<<2)| + (d128->bytes[13]>>6)); + } + else { + for (i=0; ibytes[i]); + } + printf(" D128> %s [S:%d Cb:%02x Ec:%02x] BigEndian\n", buf, + decimal128Sign(d128), decimal128Comb(d128), + decimal128ExpCon(d128)); + } + } /* decimal128Show */ +#endif diff --git a/qemu/libdecnumber/dpd/decimal32.c b/qemu/libdecnumber/dpd/decimal32.c new file mode 100644 index 00000000..53f29789 --- /dev/null +++ b/qemu/libdecnumber/dpd/decimal32.c @@ -0,0 +1,488 @@ +/* Decimal 32-bit format module for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal 32-bit format module */ +/* ------------------------------------------------------------------ */ +/* This module comprises the routines for decimal32 format numbers. */ +/* Conversions are supplied to and from decNumber and String. */ +/* */ +/* This is used when decNumber provides operations, either for all */ +/* operations or as a proxy between decNumber and decSingle. */ +/* */ +/* Error handling is the same as decNumber (qv.). */ +/* ------------------------------------------------------------------ */ +#include "qemu/osdep.h" + +#include "libdecnumber/dconfig.h" +#define DECNUMDIGITS 7 /* make decNumbers with space for 7 */ +#include "libdecnumber/decNumber.h" +#include "libdecnumber/decNumberLocal.h" +#include "libdecnumber/dpd/decimal32.h" + +/* Utility tables and routines [in decimal64.c] */ +extern const uInt COMBEXP[32], COMBMSD[32]; +extern const uByte BIN2CHAR[4001]; + +extern void decDigitsToDPD(const decNumber *, uInt *, Int); +extern void decDigitsFromDPD(decNumber *, const uInt *, Int); + +#if DECTRACE || DECCHECK +void decimal32Show(const decimal32 *); /* for debug */ +extern void decNumberShow(const decNumber *); /* .. */ +#endif + +/* Useful macro */ +/* Clear a structure (e.g., a decNumber) */ +#define DEC_clear(d) memset(d, 0, sizeof(*d)) + +/* ------------------------------------------------------------------ */ +/* decimal32FromNumber -- convert decNumber to decimal32 */ +/* */ +/* ds is the target decimal32 */ +/* dn is the source number (assumed valid) */ +/* set is the context, used only for reporting errors */ +/* */ +/* The set argument is used only for status reporting and for the */ +/* rounding mode (used if the coefficient is more than DECIMAL32_Pmax */ +/* digits or an overflow is detected). If the exponent is out of the */ +/* valid range then Overflow or Underflow will be raised. */ +/* After Underflow a subnormal result is possible. */ +/* */ +/* DEC_Clamped is set if the number has to be 'folded down' to fit, */ +/* by reducing its exponent and multiplying the coefficient by a */ +/* power of ten, or if the exponent on a zero had to be clamped. */ +/* ------------------------------------------------------------------ */ +decimal32 * decimal32FromNumber(decimal32 *d32, const decNumber *dn, + decContext *set) { + uInt status=0; /* status accumulator */ + Int ae; /* adjusted exponent */ + decNumber dw; /* work */ + decContext dc; /* .. */ + uInt *pu; /* .. */ + uInt comb, exp; /* .. */ + uInt targ=0; /* target 32-bit */ + + /* If the number has too many digits, or the exponent could be */ + /* out of range then reduce the number under the appropriate */ + /* constraints. This could push the number to Infinity or zero, */ + /* so this check and rounding must be done before generating the */ + /* decimal32] */ + ae=dn->exponent+dn->digits-1; /* [0 if special] */ + if (dn->digits>DECIMAL32_Pmax /* too many digits */ + || ae>DECIMAL32_Emax /* likely overflow */ + || aeround; /* use supplied rounding */ + decNumberPlus(&dw, dn, &dc); /* (round and check) */ + /* [this changes -0 to 0, so enforce the sign...] */ + dw.bits|=dn->bits&DECNEG; + status=dc.status; /* save status */ + dn=&dw; /* use the work number */ + } /* maybe out of range */ + + if (dn->bits&DECSPECIAL) { /* a special value */ + if (dn->bits&DECINF) targ=DECIMAL_Inf<<24; + else { /* sNaN or qNaN */ + if ((*dn->lsu!=0 || dn->digits>1) /* non-zero coefficient */ + && (dn->digitsbits&DECNAN) targ|=DECIMAL_NaN<<24; + else targ|=DECIMAL_sNaN<<24; + } /* a NaN */ + } /* special */ + + else { /* is finite */ + if (decNumberIsZero(dn)) { /* is a zero */ + /* set and clamp exponent */ + if (dn->exponent<-DECIMAL32_Bias) { + exp=0; /* low clamp */ + status|=DEC_Clamped; + } + else { + exp=dn->exponent+DECIMAL32_Bias; /* bias exponent */ + if (exp>DECIMAL32_Ehigh) { /* top clamp */ + exp=DECIMAL32_Ehigh; + status|=DEC_Clamped; + } + } + comb=(exp>>3) & 0x18; /* msd=0, exp top 2 bits .. */ + } + else { /* non-zero finite number */ + uInt msd; /* work */ + Int pad=0; /* coefficient pad digits */ + + /* the dn is known to fit, but it may need to be padded */ + exp=(uInt)(dn->exponent+DECIMAL32_Bias); /* bias exponent */ + if (exp>DECIMAL32_Ehigh) { /* fold-down case */ + pad=exp-DECIMAL32_Ehigh; + exp=DECIMAL32_Ehigh; /* [to maximum] */ + status|=DEC_Clamped; + } + + /* fastpath common case */ + if (DECDPUN==3 && pad==0) { + targ=BIN2DPD[dn->lsu[0]]; + if (dn->digits>3) targ|=(uInt)(BIN2DPD[dn->lsu[1]])<<10; + msd=(dn->digits==7 ? dn->lsu[2] : 0); + } + else { /* general case */ + decDigitsToDPD(dn, &targ, pad); + /* save and clear the top digit */ + msd=targ>>20; + targ&=0x000fffff; + } + + /* create the combination field */ + if (msd>=8) comb=0x18 | ((exp>>5) & 0x06) | (msd & 0x01); + else comb=((exp>>3) & 0x18) | msd; + } + targ|=comb<<26; /* add combination field .. */ + targ|=(exp&0x3f)<<20; /* .. and exponent continuation */ + } /* finite */ + + if (dn->bits&DECNEG) targ|=0x80000000; /* add sign bit */ + + /* now write to storage; this is endian */ + pu=(uInt *)d32->bytes; /* overlay */ + *pu=targ; /* directly store the int */ + + if (status!=0) decContextSetStatus(set, status); /* pass on status */ + /* decimal32Show(d32); */ + return d32; + } /* decimal32FromNumber */ + +/* ------------------------------------------------------------------ */ +/* decimal32ToNumber -- convert decimal32 to decNumber */ +/* d32 is the source decimal32 */ +/* dn is the target number, with appropriate space */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decNumber * decimal32ToNumber(const decimal32 *d32, decNumber *dn) { + uInt msd; /* coefficient MSD */ + uInt exp; /* exponent top two bits */ + uInt comb; /* combination field */ + uInt sour; /* source 32-bit */ + const uInt *pu; /* work */ + + /* load source from storage; this is endian */ + pu=(const uInt *)d32->bytes; /* overlay */ + sour=*pu; /* directly load the int */ + + comb=(sour>>26)&0x1f; /* combination field */ + + decNumberZero(dn); /* clean number */ + if (sour&0x80000000) dn->bits=DECNEG; /* set sign if negative */ + + msd=COMBMSD[comb]; /* decode the combination field */ + exp=COMBEXP[comb]; /* .. */ + + if (exp==3) { /* is a special */ + if (msd==0) { + dn->bits|=DECINF; + return dn; /* no coefficient needed */ + } + else if (sour&0x02000000) dn->bits|=DECSNAN; + else dn->bits|=DECNAN; + msd=0; /* no top digit */ + } + else { /* is a finite number */ + dn->exponent=(exp<<6)+((sour>>20)&0x3f)-DECIMAL32_Bias; /* unbiased */ + } + + /* get the coefficient */ + sour&=0x000fffff; /* clean coefficient continuation */ + if (msd) { /* non-zero msd */ + sour|=msd<<20; /* prefix to coefficient */ + decDigitsFromDPD(dn, &sour, 3); /* process 3 declets */ + return dn; + } + /* msd=0 */ + if (!sour) return dn; /* easy: coefficient is 0 */ + if (sour&0x000ffc00) /* need 2 declets? */ + decDigitsFromDPD(dn, &sour, 2); /* process 2 declets */ + else + decDigitsFromDPD(dn, &sour, 1); /* process 1 declet */ + return dn; + } /* decimal32ToNumber */ + +/* ------------------------------------------------------------------ */ +/* to-scientific-string -- conversion to numeric string */ +/* to-engineering-string -- conversion to numeric string */ +/* */ +/* decimal32ToString(d32, string); */ +/* decimal32ToEngString(d32, string); */ +/* */ +/* d32 is the decimal32 format number to convert */ +/* string is the string where the result will be laid out */ +/* */ +/* string must be at least 24 characters */ +/* */ +/* No error is possible, and no status can be set. */ +/* ------------------------------------------------------------------ */ +char * decimal32ToEngString(const decimal32 *d32, char *string){ + decNumber dn; /* work */ + decimal32ToNumber(d32, &dn); + decNumberToEngString(&dn, string); + return string; + } /* decimal32ToEngString */ + +char * decimal32ToString(const decimal32 *d32, char *string){ + uInt msd; /* coefficient MSD */ + Int exp; /* exponent top two bits or full */ + uInt comb; /* combination field */ + char *cstart; /* coefficient start */ + char *c; /* output pointer in string */ + const uInt *pu; /* work */ + const uByte *u; /* .. */ + char *s, *t; /* .. (source, target) */ + Int dpd; /* .. */ + Int pre, e; /* .. */ + uInt sour; /* source 32-bit */ + + /* load source from storage; this is endian */ + pu=(const uInt *)d32->bytes; /* overlay */ + sour=*pu; /* directly load the int */ + + c=string; /* where result will go */ + if (((Int)sour)<0) *c++='-'; /* handle sign */ + + comb=(sour>>26)&0x1f; /* combination field */ + msd=COMBMSD[comb]; /* decode the combination field */ + exp=COMBEXP[comb]; /* .. */ + + if (exp==3) { + if (msd==0) { /* infinity */ + strcpy(c, "Inf"); + strcpy(c+3, "inity"); + return string; /* easy */ + } + if (sour&0x02000000) *c++='s'; /* sNaN */ + strcpy(c, "NaN"); /* complete word */ + c+=3; /* step past */ + if ((sour&0x000fffff)==0) return string; /* zero payload */ + /* otherwise drop through to add integer; set correct exp */ + exp=0; msd=0; /* setup for following code */ + } + else exp=(exp<<6)+((sour>>20)&0x3f)-DECIMAL32_Bias; /* unbiased */ + + /* convert 7 digits of significand to characters */ + cstart=c; /* save start of coefficient */ + if (msd) *c++='0'+(char)msd; /* non-zero most significant digit */ + + /* Now decode the declets. After extracting each one, it is */ + /* decoded to binary and then to a 4-char sequence by table lookup; */ + /* the 4-chars are a 1-char length (significant digits, except 000 */ + /* has length 0). This allows us to left-align the first declet */ + /* with non-zero content, then remaining ones are full 3-char */ + /* length. We use fixed-length memcpys because variable-length */ + /* causes a subroutine call in GCC. (These are length 4 for speed */ + /* and are safe because the array has an extra terminator byte.) */ + #define dpd2char u=&BIN2CHAR[DPD2BIN[dpd]*4]; \ + if (c!=cstart) {memcpy(c, u+1, 4); c+=3;} \ + else if (*u) {memcpy(c, u+4-*u, 4); c+=*u;} + + dpd=(sour>>10)&0x3ff; /* declet 1 */ + dpd2char; + dpd=(sour)&0x3ff; /* declet 2 */ + dpd2char; + + if (c==cstart) *c++='0'; /* all zeros -- make 0 */ + + if (exp==0) { /* integer or NaN case -- easy */ + *c='\0'; /* terminate */ + return string; + } + + /* non-0 exponent */ + e=0; /* assume no E */ + pre=c-cstart+exp; + /* [here, pre-exp is the digits count (==1 for zero)] */ + if (exp>0 || pre<-5) { /* need exponential form */ + e=pre-1; /* calculate E value */ + pre=1; /* assume one digit before '.' */ + } /* exponential form */ + + /* modify the coefficient, adding 0s, '.', and E+nn as needed */ + s=c-1; /* source (LSD) */ + if (pre>0) { /* ddd.ddd (plain), perhaps with E */ + char *dotat=cstart+pre; + if (dotat=dotat; s--, t--) *t=*s; /* open the gap; leave t at gap */ + *t='.'; /* insert the dot */ + c++; /* length increased by one */ + } + + /* finally add the E-part, if needed; it will never be 0, and has */ + /* a maximum length of 3 digits (E-101 case) */ + if (e!=0) { + *c++='E'; /* starts with E */ + *c++='+'; /* assume positive */ + if (e<0) { + *(c-1)='-'; /* oops, need '-' */ + e=-e; /* uInt, please */ + } + u=&BIN2CHAR[e*4]; /* -> length byte */ + memcpy(c, u+4-*u, 4); /* copy fixed 4 characters [is safe] */ + c+=*u; /* bump pointer appropriately */ + } + *c='\0'; /* add terminator */ + /*printf("res %s\n", string); */ + return string; + } /* pre>0 */ + + /* -5<=pre<=0: here for plain 0.ddd or 0.000ddd forms (can never have E) */ + t=c+1-pre; + *(t+1)='\0'; /* can add terminator now */ + for (; s>=cstart; s--, t--) *t=*s; /* shift whole coefficient right */ + c=cstart; + *c++='0'; /* always starts with 0. */ + *c++='.'; + for (; pre<0; pre++) *c++='0'; /* add any 0's after '.' */ + /*printf("res %s\n", string); */ + return string; + } /* decimal32ToString */ + +/* ------------------------------------------------------------------ */ +/* to-number -- conversion from numeric string */ +/* */ +/* decimal32FromString(result, string, set); */ +/* */ +/* result is the decimal32 format number which gets the result of */ +/* the conversion */ +/* *string is the character string which should contain a valid */ +/* number (which may be a special value) */ +/* set is the context */ +/* */ +/* The context is supplied to this routine is used for error handling */ +/* (setting of status and traps) and for the rounding mode, only. */ +/* If an error occurs, the result will be a valid decimal32 NaN. */ +/* ------------------------------------------------------------------ */ +decimal32 * decimal32FromString(decimal32 *result, const char *string, + decContext *set) { + decContext dc; /* work */ + decNumber dn; /* .. */ + + decContextDefault(&dc, DEC_INIT_DECIMAL32); /* no traps, please */ + dc.round=set->round; /* use supplied rounding */ + + decNumberFromString(&dn, string, &dc); /* will round if needed */ + decimal32FromNumber(result, &dn, &dc); + if (dc.status!=0) { /* something happened */ + decContextSetStatus(set, dc.status); /* .. pass it on */ + } + return result; + } /* decimal32FromString */ + +/* ------------------------------------------------------------------ */ +/* decimal32IsCanonical -- test whether encoding is canonical */ +/* d32 is the source decimal32 */ +/* returns 1 if the encoding of d32 is canonical, 0 otherwise */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +uint32_t decimal32IsCanonical(const decimal32 *d32) { + decNumber dn; /* work */ + decimal32 canon; /* .. */ + decContext dc; /* .. */ + decContextDefault(&dc, DEC_INIT_DECIMAL32); + decimal32ToNumber(d32, &dn); + decimal32FromNumber(&canon, &dn, &dc);/* canon will now be canonical */ + return memcmp(d32, &canon, DECIMAL32_Bytes)==0; + } /* decimal32IsCanonical */ + +/* ------------------------------------------------------------------ */ +/* decimal32Canonical -- copy an encoding, ensuring it is canonical */ +/* d32 is the source decimal32 */ +/* result is the target (may be the same decimal32) */ +/* returns result */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decimal32 * decimal32Canonical(decimal32 *result, const decimal32 *d32) { + decNumber dn; /* work */ + decContext dc; /* .. */ + decContextDefault(&dc, DEC_INIT_DECIMAL32); + decimal32ToNumber(d32, &dn); + decimal32FromNumber(result, &dn, &dc);/* result will now be canonical */ + return result; + } /* decimal32Canonical */ + +#if DECTRACE || DECCHECK +/* Macros for accessing decimal32 fields. These assume the argument + is a reference (pointer) to the decimal32 structure, and the + decimal32 is in network byte order (big-endian) */ +/* Get sign */ +#define decimal32Sign(d) ((unsigned)(d)->bytes[0]>>7) + +/* Get combination field */ +#define decimal32Comb(d) (((d)->bytes[0] & 0x7c)>>2) + +/* Get exponent continuation [does not remove bias] */ +#define decimal32ExpCon(d) ((((d)->bytes[0] & 0x03)<<4) \ + | ((unsigned)(d)->bytes[1]>>4)) + +/* Set sign [this assumes sign previously 0] */ +#define decimal32SetSign(d, b) { \ + (d)->bytes[0]|=((unsigned)(b)<<7);} + +/* Set exponent continuation [does not apply bias] */ +/* This assumes range has been checked and exponent previously 0; */ +/* type of exponent must be unsigned */ +#define decimal32SetExpCon(d, e) { \ + (d)->bytes[0]|=(uint8_t)((e)>>4); \ + (d)->bytes[1]|=(uint8_t)(((e)&0x0F)<<4);} + +/* ------------------------------------------------------------------ */ +/* decimal32Show -- display a decimal32 in hexadecimal [debug aid] */ +/* d32 -- the number to show */ +/* ------------------------------------------------------------------ */ +/* Also shows sign/cob/expconfields extracted - valid bigendian only */ +void decimal32Show(const decimal32 *d32) { + char buf[DECIMAL32_Bytes*2+1]; + Int i, j=0; + + if (DECLITEND) { + for (i=0; ibytes[3-i]); + } + printf(" D32> %s [S:%d Cb:%02x Ec:%02x] LittleEndian\n", buf, + d32->bytes[3]>>7, (d32->bytes[3]>>2)&0x1f, + ((d32->bytes[3]&0x3)<<4)| (d32->bytes[2]>>4)); + } + else { + for (i=0; ibytes[i]); + } + printf(" D32> %s [S:%d Cb:%02x Ec:%02x] BigEndian\n", buf, + decimal32Sign(d32), decimal32Comb(d32), decimal32ExpCon(d32)); + } + } /* decimal32Show */ +#endif diff --git a/qemu/libdecnumber/dpd/decimal64.c b/qemu/libdecnumber/dpd/decimal64.c new file mode 100644 index 00000000..48161764 --- /dev/null +++ b/qemu/libdecnumber/dpd/decimal64.c @@ -0,0 +1,849 @@ +/* Decimal 64-bit format module for the decNumber C Library. + Copyright (C) 2005, 2007 Free Software Foundation, Inc. + Contributed by IBM Corporation. Author Mike Cowlishaw. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2, or (at your option) any later + version. + + In addition to the permissions in the GNU General Public License, + the Free Software Foundation gives you unlimited permission to link + the compiled version of this file into combinations with other + programs, and to distribute those combinations without any + restriction coming from the use of this file. (The General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into a combine executable.) + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING. If not, write to the Free + Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. */ + +/* ------------------------------------------------------------------ */ +/* Decimal 64-bit format module */ +/* ------------------------------------------------------------------ */ +/* This module comprises the routines for decimal64 format numbers. */ +/* Conversions are supplied to and from decNumber and String. */ +/* */ +/* This is used when decNumber provides operations, either for all */ +/* operations or as a proxy between decNumber and decSingle. */ +/* */ +/* Error handling is the same as decNumber (qv.). */ +/* ------------------------------------------------------------------ */ +#include "qemu/osdep.h" + +#include "libdecnumber/dconfig.h" +#define DECNUMDIGITS 16 /* make decNumbers with space for 16 */ +#include "libdecnumber/decNumber.h" +#include "libdecnumber/decNumberLocal.h" +#include "libdecnumber/dpd/decimal64.h" + +/* Utility routines and tables [in decimal64.c]; externs for C++ */ +extern const uInt COMBEXP[32], COMBMSD[32]; +extern const uByte BIN2CHAR[4001]; + +extern void decDigitsFromDPD(decNumber *, const uInt *, Int); +extern void decDigitsToDPD(const decNumber *, uInt *, Int); + +#if DECTRACE || DECCHECK +void decimal64Show(const decimal64 *); /* for debug */ +extern void decNumberShow(const decNumber *); /* .. */ +#endif + +/* Useful macro */ +/* Clear a structure (e.g., a decNumber) */ +#define DEC_clear(d) memset(d, 0, sizeof(*d)) + +/* define and include the tables to use for conversions */ +#define DEC_BIN2CHAR 1 +#define DEC_DPD2BIN 1 +#define DEC_BIN2DPD 1 /* used for all sizes */ +#include "libdecnumber/decDPD.h" + +/* ------------------------------------------------------------------ */ +/* decimal64FromNumber -- convert decNumber to decimal64 */ +/* */ +/* ds is the target decimal64 */ +/* dn is the source number (assumed valid) */ +/* set is the context, used only for reporting errors */ +/* */ +/* The set argument is used only for status reporting and for the */ +/* rounding mode (used if the coefficient is more than DECIMAL64_Pmax */ +/* digits or an overflow is detected). If the exponent is out of the */ +/* valid range then Overflow or Underflow will be raised. */ +/* After Underflow a subnormal result is possible. */ +/* */ +/* DEC_Clamped is set if the number has to be 'folded down' to fit, */ +/* by reducing its exponent and multiplying the coefficient by a */ +/* power of ten, or if the exponent on a zero had to be clamped. */ +/* ------------------------------------------------------------------ */ +decimal64 * decimal64FromNumber(decimal64 *d64, const decNumber *dn, + decContext *set) { + uInt status=0; /* status accumulator */ + Int ae; /* adjusted exponent */ + decNumber dw; /* work */ + decContext dc; /* .. */ + uInt *pu; /* .. */ + uInt comb, exp; /* .. */ + uInt targar[2]={0, 0}; /* target 64-bit */ + #define targhi targar[1] /* name the word with the sign */ + #define targlo targar[0] /* and the other */ + + /* If the number has too many digits, or the exponent could be */ + /* out of range then reduce the number under the appropriate */ + /* constraints. This could push the number to Infinity or zero, */ + /* so this check and rounding must be done before generating the */ + /* decimal64] */ + ae=dn->exponent+dn->digits-1; /* [0 if special] */ + if (dn->digits>DECIMAL64_Pmax /* too many digits */ + || ae>DECIMAL64_Emax /* likely overflow */ + || aeround; /* use supplied rounding */ + decNumberPlus(&dw, dn, &dc); /* (round and check) */ + /* [this changes -0 to 0, so enforce the sign...] */ + dw.bits|=dn->bits&DECNEG; + status=dc.status; /* save status */ + dn=&dw; /* use the work number */ + } /* maybe out of range */ + + if (dn->bits&DECSPECIAL) { /* a special value */ + if (dn->bits&DECINF) targhi=DECIMAL_Inf<<24; + else { /* sNaN or qNaN */ + if ((*dn->lsu!=0 || dn->digits>1) /* non-zero coefficient */ + && (dn->digitsbits&DECNAN) targhi|=DECIMAL_NaN<<24; + else targhi|=DECIMAL_sNaN<<24; + } /* a NaN */ + } /* special */ + + else { /* is finite */ + if (decNumberIsZero(dn)) { /* is a zero */ + /* set and clamp exponent */ + if (dn->exponent<-DECIMAL64_Bias) { + exp=0; /* low clamp */ + status|=DEC_Clamped; + } + else { + exp=dn->exponent+DECIMAL64_Bias; /* bias exponent */ + if (exp>DECIMAL64_Ehigh) { /* top clamp */ + exp=DECIMAL64_Ehigh; + status|=DEC_Clamped; + } + } + comb=(exp>>5) & 0x18; /* msd=0, exp top 2 bits .. */ + } + else { /* non-zero finite number */ + uInt msd; /* work */ + Int pad=0; /* coefficient pad digits */ + + /* the dn is known to fit, but it may need to be padded */ + exp=(uInt)(dn->exponent+DECIMAL64_Bias); /* bias exponent */ + if (exp>DECIMAL64_Ehigh) { /* fold-down case */ + pad=exp-DECIMAL64_Ehigh; + exp=DECIMAL64_Ehigh; /* [to maximum] */ + status|=DEC_Clamped; + } + + /* fastpath common case */ + if (DECDPUN==3 && pad==0) { + uInt dpd[6]={0,0,0,0,0,0}; + uInt i; + Int d=dn->digits; + for (i=0; d>0; i++, d-=3) dpd[i]=BIN2DPD[dn->lsu[i]]; + targlo =dpd[0]; + targlo|=dpd[1]<<10; + targlo|=dpd[2]<<20; + if (dn->digits>6) { + targlo|=dpd[3]<<30; + targhi =dpd[3]>>2; + targhi|=dpd[4]<<8; + } + msd=dpd[5]; /* [did not really need conversion] */ + } + else { /* general case */ + decDigitsToDPD(dn, targar, pad); + /* save and clear the top digit */ + msd=targhi>>18; + targhi&=0x0003ffff; + } + + /* create the combination field */ + if (msd>=8) comb=0x18 | ((exp>>7) & 0x06) | (msd & 0x01); + else comb=((exp>>5) & 0x18) | msd; + } + targhi|=comb<<26; /* add combination field .. */ + targhi|=(exp&0xff)<<18; /* .. and exponent continuation */ + } /* finite */ + + if (dn->bits&DECNEG) targhi|=0x80000000; /* add sign bit */ + + /* now write to storage; this is now always endian */ + pu=(uInt *)d64->bytes; /* overlay */ + if (DECLITEND) { + pu[0]=targar[0]; /* directly store the low int */ + pu[1]=targar[1]; /* then the high int */ + } + else { + pu[0]=targar[1]; /* directly store the high int */ + pu[1]=targar[0]; /* then the low int */ + } + + if (status!=0) decContextSetStatus(set, status); /* pass on status */ + /* decimal64Show(d64); */ + return d64; + } /* decimal64FromNumber */ + +/* ------------------------------------------------------------------ */ +/* decimal64ToNumber -- convert decimal64 to decNumber */ +/* d64 is the source decimal64 */ +/* dn is the target number, with appropriate space */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decNumber * decimal64ToNumber(const decimal64 *d64, decNumber *dn) { + uInt msd; /* coefficient MSD */ + uInt exp; /* exponent top two bits */ + uInt comb; /* combination field */ + const uInt *pu; /* work */ + Int need; /* .. */ + uInt sourar[2]; /* source 64-bit */ + #define sourhi sourar[1] /* name the word with the sign */ + #define sourlo sourar[0] /* and the lower word */ + + /* load source from storage; this is endian */ + pu=(const uInt *)d64->bytes; /* overlay */ + if (DECLITEND) { + sourlo=pu[0]; /* directly load the low int */ + sourhi=pu[1]; /* then the high int */ + } + else { + sourhi=pu[0]; /* directly load the high int */ + sourlo=pu[1]; /* then the low int */ + } + + comb=(sourhi>>26)&0x1f; /* combination field */ + + decNumberZero(dn); /* clean number */ + if (sourhi&0x80000000) dn->bits=DECNEG; /* set sign if negative */ + + msd=COMBMSD[comb]; /* decode the combination field */ + exp=COMBEXP[comb]; /* .. */ + + if (exp==3) { /* is a special */ + if (msd==0) { + dn->bits|=DECINF; + return dn; /* no coefficient needed */ + } + else if (sourhi&0x02000000) dn->bits|=DECSNAN; + else dn->bits|=DECNAN; + msd=0; /* no top digit */ + } + else { /* is a finite number */ + dn->exponent=(exp<<8)+((sourhi>>18)&0xff)-DECIMAL64_Bias; /* unbiased */ + } + + /* get the coefficient */ + sourhi&=0x0003ffff; /* clean coefficient continuation */ + if (msd) { /* non-zero msd */ + sourhi|=msd<<18; /* prefix to coefficient */ + need=6; /* process 6 declets */ + } + else { /* msd=0 */ + if (!sourhi) { /* top word 0 */ + if (!sourlo) return dn; /* easy: coefficient is 0 */ + need=3; /* process at least 3 declets */ + if (sourlo&0xc0000000) need++; /* process 4 declets */ + /* [could reduce some more, here] */ + } + else { /* some bits in top word, msd=0 */ + need=4; /* process at least 4 declets */ + if (sourhi&0x0003ff00) need++; /* top declet!=0, process 5 */ + } + } /*msd=0 */ + + decDigitsFromDPD(dn, sourar, need); /* process declets */ + return dn; + } /* decimal64ToNumber */ + + +/* ------------------------------------------------------------------ */ +/* to-scientific-string -- conversion to numeric string */ +/* to-engineering-string -- conversion to numeric string */ +/* */ +/* decimal64ToString(d64, string); */ +/* decimal64ToEngString(d64, string); */ +/* */ +/* d64 is the decimal64 format number to convert */ +/* string is the string where the result will be laid out */ +/* */ +/* string must be at least 24 characters */ +/* */ +/* No error is possible, and no status can be set. */ +/* ------------------------------------------------------------------ */ +char * decimal64ToEngString(const decimal64 *d64, char *string){ + decNumber dn; /* work */ + decimal64ToNumber(d64, &dn); + decNumberToEngString(&dn, string); + return string; + } /* decimal64ToEngString */ + +char * decimal64ToString(const decimal64 *d64, char *string){ + uInt msd; /* coefficient MSD */ + Int exp; /* exponent top two bits or full */ + uInt comb; /* combination field */ + char *cstart; /* coefficient start */ + char *c; /* output pointer in string */ + const uInt *pu; /* work */ + char *s, *t; /* .. (source, target) */ + Int dpd; /* .. */ + Int pre, e; /* .. */ + const uByte *u; /* .. */ + + uInt sourar[2]; /* source 64-bit */ + #define sourhi sourar[1] /* name the word with the sign */ + #define sourlo sourar[0] /* and the lower word */ + + /* load source from storage; this is endian */ + pu=(const uInt *)d64->bytes; /* overlay */ + if (DECLITEND) { + sourlo=pu[0]; /* directly load the low int */ + sourhi=pu[1]; /* then the high int */ + } + else { + sourhi=pu[0]; /* directly load the high int */ + sourlo=pu[1]; /* then the low int */ + } + + c=string; /* where result will go */ + if (((Int)sourhi)<0) *c++='-'; /* handle sign */ + + comb=(sourhi>>26)&0x1f; /* combination field */ + msd=COMBMSD[comb]; /* decode the combination field */ + exp=COMBEXP[comb]; /* .. */ + + if (exp==3) { + if (msd==0) { /* infinity */ + strcpy(c, "Inf"); + strcpy(c+3, "inity"); + return string; /* easy */ + } + if (sourhi&0x02000000) *c++='s'; /* sNaN */ + strcpy(c, "NaN"); /* complete word */ + c+=3; /* step past */ + if (sourlo==0 && (sourhi&0x0003ffff)==0) return string; /* zero payload */ + /* otherwise drop through to add integer; set correct exp */ + exp=0; msd=0; /* setup for following code */ + } + else exp=(exp<<8)+((sourhi>>18)&0xff)-DECIMAL64_Bias; + + /* convert 16 digits of significand to characters */ + cstart=c; /* save start of coefficient */ + if (msd) *c++='0'+(char)msd; /* non-zero most significant digit */ + + /* Now decode the declets. After extracting each one, it is */ + /* decoded to binary and then to a 4-char sequence by table lookup; */ + /* the 4-chars are a 1-char length (significant digits, except 000 */ + /* has length 0). This allows us to left-align the first declet */ + /* with non-zero content, then remaining ones are full 3-char */ + /* length. We use fixed-length memcpys because variable-length */ + /* causes a subroutine call in GCC. (These are length 4 for speed */ + /* and are safe because the array has an extra terminator byte.) */ + #define dpd2char u=&BIN2CHAR[DPD2BIN[dpd]*4]; \ + if (c!=cstart) {memcpy(c, u+1, 4); c+=3;} \ + else if (*u) {memcpy(c, u+4-*u, 4); c+=*u;} + + dpd=(sourhi>>8)&0x3ff; /* declet 1 */ + dpd2char; + dpd=((sourhi&0xff)<<2) | (sourlo>>30); /* declet 2 */ + dpd2char; + dpd=(sourlo>>20)&0x3ff; /* declet 3 */ + dpd2char; + dpd=(sourlo>>10)&0x3ff; /* declet 4 */ + dpd2char; + dpd=(sourlo)&0x3ff; /* declet 5 */ + dpd2char; + + if (c==cstart) *c++='0'; /* all zeros -- make 0 */ + + if (exp==0) { /* integer or NaN case -- easy */ + *c='\0'; /* terminate */ + return string; + } + + /* non-0 exponent */ + e=0; /* assume no E */ + pre=c-cstart+exp; + /* [here, pre-exp is the digits count (==1 for zero)] */ + if (exp>0 || pre<-5) { /* need exponential form */ + e=pre-1; /* calculate E value */ + pre=1; /* assume one digit before '.' */ + } /* exponential form */ + + /* modify the coefficient, adding 0s, '.', and E+nn as needed */ + s=c-1; /* source (LSD) */ + if (pre>0) { /* ddd.ddd (plain), perhaps with E */ + char *dotat=cstart+pre; + if (dotat=dotat; s--, t--) *t=*s; /* open the gap; leave t at gap */ + *t='.'; /* insert the dot */ + c++; /* length increased by one */ + } + + /* finally add the E-part, if needed; it will never be 0, and has */ + /* a maximum length of 3 digits */ + if (e!=0) { + *c++='E'; /* starts with E */ + *c++='+'; /* assume positive */ + if (e<0) { + *(c-1)='-'; /* oops, need '-' */ + e=-e; /* uInt, please */ + } + u=&BIN2CHAR[e*4]; /* -> length byte */ + memcpy(c, u+4-*u, 4); /* copy fixed 4 characters [is safe] */ + c+=*u; /* bump pointer appropriately */ + } + *c='\0'; /* add terminator */ + /*printf("res %s\n", string); */ + return string; + } /* pre>0 */ + + /* -5<=pre<=0: here for plain 0.ddd or 0.000ddd forms (can never have E) */ + t=c+1-pre; + *(t+1)='\0'; /* can add terminator now */ + for (; s>=cstart; s--, t--) *t=*s; /* shift whole coefficient right */ + c=cstart; + *c++='0'; /* always starts with 0. */ + *c++='.'; + for (; pre<0; pre++) *c++='0'; /* add any 0's after '.' */ + /*printf("res %s\n", string); */ + return string; + } /* decimal64ToString */ + +/* ------------------------------------------------------------------ */ +/* to-number -- conversion from numeric string */ +/* */ +/* decimal64FromString(result, string, set); */ +/* */ +/* result is the decimal64 format number which gets the result of */ +/* the conversion */ +/* *string is the character string which should contain a valid */ +/* number (which may be a special value) */ +/* set is the context */ +/* */ +/* The context is supplied to this routine is used for error handling */ +/* (setting of status and traps) and for the rounding mode, only. */ +/* If an error occurs, the result will be a valid decimal64 NaN. */ +/* ------------------------------------------------------------------ */ +decimal64 * decimal64FromString(decimal64 *result, const char *string, + decContext *set) { + decContext dc; /* work */ + decNumber dn; /* .. */ + + decContextDefault(&dc, DEC_INIT_DECIMAL64); /* no traps, please */ + dc.round=set->round; /* use supplied rounding */ + + decNumberFromString(&dn, string, &dc); /* will round if needed */ + + decimal64FromNumber(result, &dn, &dc); + if (dc.status!=0) { /* something happened */ + decContextSetStatus(set, dc.status); /* .. pass it on */ + } + return result; + } /* decimal64FromString */ + +/* ------------------------------------------------------------------ */ +/* decimal64IsCanonical -- test whether encoding is canonical */ +/* d64 is the source decimal64 */ +/* returns 1 if the encoding of d64 is canonical, 0 otherwise */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +uint32_t decimal64IsCanonical(const decimal64 *d64) { + decNumber dn; /* work */ + decimal64 canon; /* .. */ + decContext dc; /* .. */ + decContextDefault(&dc, DEC_INIT_DECIMAL64); + decimal64ToNumber(d64, &dn); + decimal64FromNumber(&canon, &dn, &dc);/* canon will now be canonical */ + return memcmp(d64, &canon, DECIMAL64_Bytes)==0; + } /* decimal64IsCanonical */ + +/* ------------------------------------------------------------------ */ +/* decimal64Canonical -- copy an encoding, ensuring it is canonical */ +/* d64 is the source decimal64 */ +/* result is the target (may be the same decimal64) */ +/* returns result */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +decimal64 * decimal64Canonical(decimal64 *result, const decimal64 *d64) { + decNumber dn; /* work */ + decContext dc; /* .. */ + decContextDefault(&dc, DEC_INIT_DECIMAL64); + decimal64ToNumber(d64, &dn); + decimal64FromNumber(result, &dn, &dc);/* result will now be canonical */ + return result; + } /* decimal64Canonical */ + +#if DECTRACE || DECCHECK +/* Macros for accessing decimal64 fields. These assume the + argument is a reference (pointer) to the decimal64 structure, + and the decimal64 is in network byte order (big-endian) */ +/* Get sign */ +#define decimal64Sign(d) ((unsigned)(d)->bytes[0]>>7) + +/* Get combination field */ +#define decimal64Comb(d) (((d)->bytes[0] & 0x7c)>>2) + +/* Get exponent continuation [does not remove bias] */ +#define decimal64ExpCon(d) ((((d)->bytes[0] & 0x03)<<6) \ + | ((unsigned)(d)->bytes[1]>>2)) + +/* Set sign [this assumes sign previously 0] */ +#define decimal64SetSign(d, b) { \ + (d)->bytes[0]|=((unsigned)(b)<<7);} + +/* Set exponent continuation [does not apply bias] */ +/* This assumes range has been checked and exponent previously 0; */ +/* type of exponent must be unsigned */ +#define decimal64SetExpCon(d, e) { \ + (d)->bytes[0]|=(uint8_t)((e)>>6); \ + (d)->bytes[1]|=(uint8_t)(((e)&0x3F)<<2);} + +/* ------------------------------------------------------------------ */ +/* decimal64Show -- display a decimal64 in hexadecimal [debug aid] */ +/* d64 -- the number to show */ +/* ------------------------------------------------------------------ */ +/* Also shows sign/cob/expconfields extracted */ +void decimal64Show(const decimal64 *d64) { + char buf[DECIMAL64_Bytes*2+1]; + Int i, j=0; + + if (DECLITEND) { + for (i=0; ibytes[7-i]); + } + printf(" D64> %s [S:%d Cb:%02x Ec:%02x] LittleEndian\n", buf, + d64->bytes[7]>>7, (d64->bytes[7]>>2)&0x1f, + ((d64->bytes[7]&0x3)<<6)| (d64->bytes[6]>>2)); + } + else { /* big-endian */ + for (i=0; ibytes[i]); + } + printf(" D64> %s [S:%d Cb:%02x Ec:%02x] BigEndian\n", buf, + decimal64Sign(d64), decimal64Comb(d64), decimal64ExpCon(d64)); + } + } /* decimal64Show */ +#endif + +/* ================================================================== */ +/* Shared utility routines and tables */ +/* ================================================================== */ +/* define and include the conversion tables to use for shared code */ +#if DECDPUN==3 + #define DEC_DPD2BIN 1 +#else + #define DEC_DPD2BCD 1 +#endif +#include "libdecnumber/decDPD.h" + +/* The maximum number of decNumberUnits needed for a working copy of */ +/* the units array is the ceiling of digits/DECDPUN, where digits is */ +/* the maximum number of digits in any of the formats for which this */ +/* is used. decimal128.h must not be included in this module, so, as */ +/* a very special case, that number is defined as a literal here. */ +#define DECMAX754 34 +#define DECMAXUNITS ((DECMAX754+DECDPUN-1)/DECDPUN) + +/* ------------------------------------------------------------------ */ +/* Combination field lookup tables (uInts to save measurable work) */ +/* */ +/* COMBEXP - 2-bit most-significant-bits of exponent */ +/* [11 if an Infinity or NaN] */ +/* COMBMSD - 4-bit most-significant-digit */ +/* [0=Infinity, 1=NaN if COMBEXP=11] */ +/* */ +/* Both are indexed by the 5-bit combination field (0-31) */ +/* ------------------------------------------------------------------ */ +const uInt COMBEXP[32]={0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, + 0, 0, 1, 1, 2, 2, 3, 3}; +const uInt COMBMSD[32]={0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 8, 9, 8, 9, 0, 1}; + +/* ------------------------------------------------------------------ */ +/* decDigitsToDPD -- pack coefficient into DPD form */ +/* */ +/* dn is the source number (assumed valid, max DECMAX754 digits) */ +/* targ is 1, 2, or 4-element uInt array, which the caller must */ +/* have cleared to zeros */ +/* shift is the number of 0 digits to add on the right (normally 0) */ +/* */ +/* The coefficient must be known small enough to fit. The full */ +/* coefficient is copied, including the leading 'odd' digit. This */ +/* digit is retrieved and packed into the combination field by the */ +/* caller. */ +/* */ +/* The target uInts are altered only as necessary to receive the */ +/* digits of the decNumber. When more than one uInt is needed, they */ +/* are filled from left to right (that is, the uInt at offset 0 will */ +/* end up with the least-significant digits). */ +/* */ +/* shift is used for 'fold-down' padding. */ +/* */ +/* No error is possible. */ +/* ------------------------------------------------------------------ */ +#if DECDPUN<=4 +/* Constant multipliers for divide-by-power-of five using reciprocal */ +/* multiply, after removing powers of 2 by shifting, and final shift */ +/* of 17 [we only need up to **4] */ +static const uInt multies[]={131073, 26215, 5243, 1049, 210}; +/* QUOT10 -- macro to return the quotient of unit u divided by 10**n */ +#define QUOT10(u, n) ((((uInt)(u)>>(n))*multies[n])>>17) +#endif +void decDigitsToDPD(const decNumber *dn, uInt *targ, Int shift) { + Int cut; /* work */ + Int n; /* output bunch counter */ + Int digits=dn->digits; /* digit countdown */ + uInt dpd; /* densely packed decimal value */ + uInt bin; /* binary value 0-999 */ + uInt *uout=targ; /* -> current output uInt */ + uInt uoff=0; /* -> current output offset [from right] */ + const Unit *inu=dn->lsu; /* -> current input unit */ + Unit uar[DECMAXUNITS]; /* working copy of units, iff shifted */ + #if DECDPUN!=3 /* not fast path */ + Unit in; /* current unit */ + #endif + + if (shift!=0) { /* shift towards most significant required */ + /* shift the units array to the left by pad digits and copy */ + /* [this code is a special case of decShiftToMost, which could */ + /* be used instead if exposed and the array were copied first] */ + const Unit *source; /* .. */ + Unit *target, *first; /* .. */ + uInt next=0; /* work */ + + source=dn->lsu+D2U(digits)-1; /* where msu comes from */ + target=uar+D2U(digits)-1+D2U(shift);/* where upper part of first cut goes */ + cut=DECDPUN-MSUDIGITS(shift); /* where to slice */ + if (cut==0) { /* unit-boundary case */ + for (; source>=dn->lsu; source--, target--) *target=*source; + } + else { + first=uar+D2U(digits+shift)-1; /* where msu will end up */ + for (; source>=dn->lsu; source--, target--) { + /* split the source Unit and accumulate remainder for next */ + #if DECDPUN<=4 + uInt quot=QUOT10(*source, cut); + uInt rem=*source-quot*DECPOWERS[cut]; + next+=quot; + #else + uInt rem=*source%DECPOWERS[cut]; + next+=*source/DECPOWERS[cut]; + #endif + if (target<=first) *target=(Unit)next; /* write to target iff valid */ + next=rem*DECPOWERS[DECDPUN-cut]; /* save remainder for next Unit */ + } + } /* shift-move */ + /* propagate remainder to one below and clear the rest */ + for (; target>=uar; target--) { + *target=(Unit)next; + next=0; + } + digits+=shift; /* add count (shift) of zeros added */ + inu=uar; /* use units in working array */ + } + + /* now densely pack the coefficient into DPD declets */ + + #if DECDPUN!=3 /* not fast path */ + in=*inu; /* current unit */ + cut=0; /* at lowest digit */ + bin=0; /* [keep compiler quiet] */ + #endif + + for(n=0; digits>0; n++) { /* each output bunch */ + #if DECDPUN==3 /* fast path, 3-at-a-time */ + bin=*inu; /* 3 digits ready for convert */ + digits-=3; /* [may go negative] */ + inu++; /* may need another */ + + #else /* must collect digit-by-digit */ + Unit dig; /* current digit */ + Int j; /* digit-in-declet count */ + for (j=0; j<3; j++) { + #if DECDPUN<=4 + Unit temp=(Unit)((uInt)(in*6554)>>16); + dig=(Unit)(in-X10(temp)); + in=temp; + #else + dig=in%10; + in=in/10; + #endif + if (j==0) bin=dig; + else if (j==1) bin+=X10(dig); + else /* j==2 */ bin+=X100(dig); + digits--; + if (digits==0) break; /* [also protects *inu below] */ + cut++; + if (cut==DECDPUN) {inu++; in=*inu; cut=0;} + } + #endif + /* here there are 3 digits in bin, or have used all input digits */ + + dpd=BIN2DPD[bin]; + + /* write declet to uInt array */ + *uout|=dpd<>(10-uoff); /* collect top bits */ + } /* n declets */ + return; + } /* decDigitsToDPD */ + +/* ------------------------------------------------------------------ */ +/* decDigitsFromDPD -- unpack a format's coefficient */ +/* */ +/* dn is the target number, with 7, 16, or 34-digit space. */ +/* sour is a 1, 2, or 4-element uInt array containing only declets */ +/* declets is the number of (right-aligned) declets in sour to */ +/* be processed. This may be 1 more than the obvious number in */ +/* a format, as any top digit is prefixed to the coefficient */ +/* continuation field. It also may be as small as 1, as the */ +/* caller may pre-process leading zero declets. */ +/* */ +/* When doing the 'extra declet' case care is taken to avoid writing */ +/* extra digits when there are leading zeros, as these could overflow */ +/* the units array when DECDPUN is not 3. */ +/* */ +/* The target uInts are used only as necessary to process declets */ +/* declets into the decNumber. When more than one uInt is needed, */ +/* they are used from left to right (that is, the uInt at offset 0 */ +/* provides the least-significant digits). */ +/* */ +/* dn->digits is set, but not the sign or exponent. */ +/* No error is possible [the redundant 888 codes are allowed]. */ +/* ------------------------------------------------------------------ */ +void decDigitsFromDPD(decNumber *dn, const uInt *sour, Int declets) { + + uInt dpd; /* collector for 10 bits */ + Int n; /* counter */ + Unit *uout=dn->lsu; /* -> current output unit */ + Unit *last=uout; /* will be unit containing msd */ + const uInt *uin=sour; /* -> current input uInt */ + uInt uoff=0; /* -> current input offset [from right] */ + + #if DECDPUN!=3 + uInt bcd; /* BCD result */ + uInt nibble; /* work */ + Unit out=0; /* accumulator */ + Int cut=0; /* power of ten in current unit */ + #endif + #if DECDPUN>4 + uInt const *pow; /* work */ + #endif + + /* Expand the densely-packed integer, right to left */ + for (n=declets-1; n>=0; n--) { /* count down declets of 10 bits */ + dpd=*uin>>uoff; + uoff+=10; + if (uoff>32) { /* crossed uInt boundary */ + uin++; + uoff-=32; + dpd|=*uin<<(10-uoff); /* get waiting bits */ + } + dpd&=0x3ff; /* clear uninteresting bits */ + + #if DECDPUN==3 + if (dpd==0) *uout=0; + else { + *uout=DPD2BIN[dpd]; /* convert 10 bits to binary 0-999 */ + last=uout; /* record most significant unit */ + } + uout++; + } /* n */ + + #else /* DECDPUN!=3 */ + if (dpd==0) { /* fastpath [e.g., leading zeros] */ + /* write out three 0 digits (nibbles); out may have digit(s) */ + cut++; + if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} + if (n==0) break; /* [as below, works even if MSD=0] */ + cut++; + if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} + cut++; + if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} + continue; + } + + bcd=DPD2BCD[dpd]; /* convert 10 bits to 12 bits BCD */ + + /* now accumulate the 3 BCD nibbles into units */ + nibble=bcd & 0x00f; + if (nibble) out=(Unit)(out+nibble*DECPOWERS[cut]); + cut++; + if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} + bcd>>=4; + + /* if this is the last declet and the remaining nibbles in bcd */ + /* are 00 then process no more nibbles, because this could be */ + /* the 'odd' MSD declet and writing any more Units would then */ + /* overflow the unit array */ + if (n==0 && !bcd) break; + + nibble=bcd & 0x00f; + if (nibble) out=(Unit)(out+nibble*DECPOWERS[cut]); + cut++; + if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} + bcd>>=4; + + nibble=bcd & 0x00f; + if (nibble) out=(Unit)(out+nibble*DECPOWERS[cut]); + cut++; + if (cut==DECDPUN) {*uout=out; if (out) {last=uout; out=0;} uout++; cut=0;} + } /* n */ + if (cut!=0) { /* some more left over */ + *uout=out; /* write out final unit */ + if (out) last=uout; /* and note if non-zero */ + } + #endif + + /* here, last points to the most significant unit with digits; */ + /* inspect it to get the final digits count -- this is essentially */ + /* the same code as decGetDigits in decNumber.c */ + dn->digits=(last-dn->lsu)*DECDPUN+1; /* floor of digits, plus */ + /* must be at least 1 digit */ + #if DECDPUN>1 + if (*last<10) return; /* common odd digit or 0 */ + dn->digits++; /* must be 2 at least */ + #if DECDPUN>2 + if (*last<100) return; /* 10-99 */ + dn->digits++; /* must be 3 at least */ + #if DECDPUN>3 + if (*last<1000) return; /* 100-999 */ + dn->digits++; /* must be 4 at least */ + #if DECDPUN>4 + for (pow=&DECPOWERS[4]; *last>=*pow; pow++) dn->digits++; + #endif + #endif + #endif + #endif + return; + } /*decDigitsFromDPD */ diff --git a/qemu/m68k.h b/qemu/m68k.h index dffdf7e7..ada19e15 100644 --- a/qemu/m68k.h +++ b/qemu/m68k.h @@ -1,3020 +1,1434 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_M68K_H -#define UNICORN_AUTOGEN_M68K_H -#define arm_release arm_release_m68k -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_m68k -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_m68k -#define use_idiv_instructions_rt use_idiv_instructions_rt_m68k -#define tcg_target_deposit_valid tcg_target_deposit_valid_m68k -#define helper_power_down helper_power_down_m68k -#define check_exit_request check_exit_request_m68k -#define address_space_unregister address_space_unregister_m68k -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_m68k -#define phys_mem_clean phys_mem_clean_m68k -#define tb_cleanup tb_cleanup_m68k +#ifndef UNICORN_AUTOGEN_m68k_H +#define UNICORN_AUTOGEN_m68k_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _m68k +#endif +#define arm_arch arm_arch_m68k +#define tb_target_set_jmp_target tb_target_set_jmp_target_m68k +#define have_bmi1 have_bmi1_m68k +#define have_popcnt have_popcnt_m68k +#define have_avx1 have_avx1_m68k +#define have_avx2 have_avx2_m68k +#define have_isa have_isa_m68k +#define have_altivec have_altivec_m68k +#define have_vsx have_vsx_m68k +#define flush_icache_range flush_icache_range_m68k +#define s390_facilities s390_facilities_m68k +#define tcg_dump_op tcg_dump_op_m68k +#define tcg_dump_ops tcg_dump_ops_m68k +#define tcg_gen_and_i64 tcg_gen_and_i64_m68k +#define tcg_gen_discard_i64 tcg_gen_discard_i64_m68k +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_m68k +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_m68k +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_m68k +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_m68k +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_m68k +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_m68k +#define tcg_gen_ld_i64 tcg_gen_ld_i64_m68k +#define tcg_gen_mov_i64 tcg_gen_mov_i64_m68k +#define tcg_gen_movi_i64 tcg_gen_movi_i64_m68k +#define tcg_gen_mul_i64 tcg_gen_mul_i64_m68k +#define tcg_gen_or_i64 tcg_gen_or_i64_m68k +#define tcg_gen_sar_i64 tcg_gen_sar_i64_m68k +#define tcg_gen_shl_i64 tcg_gen_shl_i64_m68k +#define tcg_gen_shr_i64 tcg_gen_shr_i64_m68k +#define tcg_gen_st_i64 tcg_gen_st_i64_m68k +#define tcg_gen_xor_i64 tcg_gen_xor_i64_m68k +#define cpu_icount_to_ns cpu_icount_to_ns_m68k +#define cpu_is_stopped cpu_is_stopped_m68k +#define cpu_get_ticks cpu_get_ticks_m68k +#define cpu_get_clock cpu_get_clock_m68k +#define cpu_resume cpu_resume_m68k +#define qemu_init_vcpu qemu_init_vcpu_m68k +#define cpu_stop_current cpu_stop_current_m68k +#define resume_all_vcpus resume_all_vcpus_m68k +#define vm_start vm_start_m68k +#define address_space_dispatch_compact address_space_dispatch_compact_m68k +#define flatview_translate flatview_translate_m68k +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_m68k +#define qemu_get_cpu qemu_get_cpu_m68k +#define cpu_address_space_init cpu_address_space_init_m68k +#define cpu_get_address_space cpu_get_address_space_m68k +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_m68k +#define cpu_exec_initfn cpu_exec_initfn_m68k +#define cpu_exec_realizefn cpu_exec_realizefn_m68k +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_m68k +#define cpu_watchpoint_insert cpu_watchpoint_insert_m68k +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_m68k +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_m68k +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_m68k +#define cpu_breakpoint_insert cpu_breakpoint_insert_m68k +#define cpu_breakpoint_remove cpu_breakpoint_remove_m68k +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_m68k +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_m68k +#define cpu_abort cpu_abort_m68k +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_m68k +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_m68k +#define flatview_add_to_dispatch flatview_add_to_dispatch_m68k +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_m68k +#define qemu_ram_get_offset qemu_ram_get_offset_m68k +#define qemu_ram_get_used_length qemu_ram_get_used_length_m68k +#define qemu_ram_is_shared qemu_ram_is_shared_m68k +#define qemu_ram_pagesize qemu_ram_pagesize_m68k +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_m68k +#define qemu_ram_alloc qemu_ram_alloc_m68k +#define qemu_ram_free qemu_ram_free_m68k +#define qemu_map_ram_ptr qemu_map_ram_ptr_m68k +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_m68k +#define qemu_ram_block_from_host qemu_ram_block_from_host_m68k +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_m68k +#define cpu_check_watchpoint cpu_check_watchpoint_m68k +#define iotlb_to_section iotlb_to_section_m68k +#define address_space_dispatch_new address_space_dispatch_new_m68k +#define address_space_dispatch_free address_space_dispatch_free_m68k +#define flatview_read_continue flatview_read_continue_m68k +#define address_space_read_full address_space_read_full_m68k +#define address_space_write address_space_write_m68k +#define address_space_rw address_space_rw_m68k +#define cpu_physical_memory_rw cpu_physical_memory_rw_m68k +#define address_space_write_rom address_space_write_rom_m68k +#define cpu_flush_icache_range cpu_flush_icache_range_m68k +#define cpu_exec_init_all cpu_exec_init_all_m68k +#define address_space_access_valid address_space_access_valid_m68k +#define address_space_map address_space_map_m68k +#define address_space_unmap address_space_unmap_m68k +#define cpu_physical_memory_map cpu_physical_memory_map_m68k +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_m68k +#define cpu_memory_rw_debug cpu_memory_rw_debug_m68k +#define qemu_target_page_size qemu_target_page_size_m68k +#define qemu_target_page_bits qemu_target_page_bits_m68k +#define qemu_target_page_bits_min qemu_target_page_bits_min_m68k +#define target_words_bigendian target_words_bigendian_m68k +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_m68k +#define ram_block_discard_range ram_block_discard_range_m68k +#define ramblock_is_pmem ramblock_is_pmem_m68k +#define page_size_init page_size_init_m68k +#define set_preferred_target_page_bits set_preferred_target_page_bits_m68k +#define finalize_target_page_bits finalize_target_page_bits_m68k +#define cpu_outb cpu_outb_m68k +#define cpu_outw cpu_outw_m68k +#define cpu_outl cpu_outl_m68k +#define cpu_inb cpu_inb_m68k +#define cpu_inw cpu_inw_m68k +#define cpu_inl cpu_inl_m68k #define memory_map memory_map_m68k +#define memory_map_io memory_map_io_m68k #define memory_map_ptr memory_map_ptr_m68k #define memory_unmap memory_unmap_m68k #define memory_free memory_free_m68k -#define free_code_gen_buffer free_code_gen_buffer_m68k -#define helper_raise_exception helper_raise_exception_m68k -#define tcg_enabled tcg_enabled_m68k -#define tcg_exec_init tcg_exec_init_m68k -#define memory_register_types memory_register_types_m68k -#define cpu_exec_init_all cpu_exec_init_all_m68k -#define vm_start vm_start_m68k -#define resume_all_vcpus resume_all_vcpus_m68k -#define a15_l2ctlr_read a15_l2ctlr_read_m68k -#define a64_translate_init a64_translate_init_m68k -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_m68k -#define aa64_cacheop_access aa64_cacheop_access_m68k -#define aa64_daif_access aa64_daif_access_m68k -#define aa64_daif_write aa64_daif_write_m68k -#define aa64_dczid_read aa64_dczid_read_m68k -#define aa64_fpcr_read aa64_fpcr_read_m68k -#define aa64_fpcr_write aa64_fpcr_write_m68k -#define aa64_fpsr_read aa64_fpsr_read_m68k -#define aa64_fpsr_write aa64_fpsr_write_m68k -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_m68k -#define aa64_zva_access aa64_zva_access_m68k -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_m68k -#define aarch64_restore_sp aarch64_restore_sp_m68k -#define aarch64_save_sp aarch64_save_sp_m68k -#define accel_find accel_find_m68k -#define accel_init_machine accel_init_machine_m68k -#define accel_type accel_type_m68k -#define access_with_adjusted_size access_with_adjusted_size_m68k -#define add128 add128_m68k -#define add16_sat add16_sat_m68k -#define add16_usat add16_usat_m68k -#define add192 add192_m68k -#define add8_sat add8_sat_m68k -#define add8_usat add8_usat_m68k -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_m68k -#define add_cpreg_to_list add_cpreg_to_list_m68k -#define addFloat128Sigs addFloat128Sigs_m68k -#define addFloat32Sigs addFloat32Sigs_m68k -#define addFloat64Sigs addFloat64Sigs_m68k -#define addFloatx80Sigs addFloatx80Sigs_m68k -#define add_qemu_ldst_label add_qemu_ldst_label_m68k -#define address_space_access_valid address_space_access_valid_m68k -#define address_space_destroy address_space_destroy_m68k -#define address_space_destroy_dispatch address_space_destroy_dispatch_m68k -#define address_space_get_flatview address_space_get_flatview_m68k -#define address_space_init address_space_init_m68k -#define address_space_init_dispatch address_space_init_dispatch_m68k -#define address_space_lookup_region address_space_lookup_region_m68k -#define address_space_map address_space_map_m68k -#define address_space_read address_space_read_m68k -#define address_space_rw address_space_rw_m68k -#define address_space_translate address_space_translate_m68k -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_m68k -#define address_space_translate_internal address_space_translate_internal_m68k -#define address_space_unmap address_space_unmap_m68k -#define address_space_update_topology address_space_update_topology_m68k -#define address_space_update_topology_pass address_space_update_topology_pass_m68k -#define address_space_write address_space_write_m68k -#define addrrange_contains addrrange_contains_m68k -#define addrrange_end addrrange_end_m68k -#define addrrange_equal addrrange_equal_m68k -#define addrrange_intersection addrrange_intersection_m68k -#define addrrange_intersects addrrange_intersects_m68k -#define addrrange_make addrrange_make_m68k -#define adjust_endianness adjust_endianness_m68k -#define all_helpers all_helpers_m68k -#define alloc_code_gen_buffer alloc_code_gen_buffer_m68k -#define alloc_entry alloc_entry_m68k -#define always_true always_true_m68k -#define arm1026_initfn arm1026_initfn_m68k -#define arm1136_initfn arm1136_initfn_m68k -#define arm1136_r2_initfn arm1136_r2_initfn_m68k -#define arm1176_initfn arm1176_initfn_m68k -#define arm11mpcore_initfn arm11mpcore_initfn_m68k -#define arm926_initfn arm926_initfn_m68k -#define arm946_initfn arm946_initfn_m68k -#define arm_ccnt_enabled arm_ccnt_enabled_m68k -#define arm_cp_read_zero arm_cp_read_zero_m68k -#define arm_cp_reset_ignore arm_cp_reset_ignore_m68k -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_m68k -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_m68k -#define arm_cpu_finalizefn arm_cpu_finalizefn_m68k -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_m68k -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_m68k -#define arm_cpu_initfn arm_cpu_initfn_m68k -#define arm_cpu_list arm_cpu_list_m68k -#define cpu_loop_exit cpu_loop_exit_m68k -#define arm_cpu_post_init arm_cpu_post_init_m68k -#define arm_cpu_realizefn arm_cpu_realizefn_m68k -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_m68k -#define arm_cpu_register_types arm_cpu_register_types_m68k -#define cpu_resume_from_signal cpu_resume_from_signal_m68k -#define arm_cpus arm_cpus_m68k -#define arm_cpu_set_pc arm_cpu_set_pc_m68k -#define arm_cp_write_ignore arm_cp_write_ignore_m68k -#define arm_current_el arm_current_el_m68k -#define arm_dc_feature arm_dc_feature_m68k -#define arm_debug_excp_handler arm_debug_excp_handler_m68k -#define arm_debug_target_el arm_debug_target_el_m68k -#define arm_el_is_aa64 arm_el_is_aa64_m68k -#define arm_env_get_cpu arm_env_get_cpu_m68k -#define arm_excp_target_el arm_excp_target_el_m68k -#define arm_excp_unmasked arm_excp_unmasked_m68k -#define arm_feature arm_feature_m68k -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_m68k -#define gen_intermediate_code gen_intermediate_code_m68k -#define gen_intermediate_code_pc gen_intermediate_code_pc_m68k -#define arm_gen_test_cc arm_gen_test_cc_m68k -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_m68k -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_m68k -#define arm_handle_psci_call arm_handle_psci_call_m68k -#define arm_is_psci_call arm_is_psci_call_m68k -#define arm_is_secure arm_is_secure_m68k -#define arm_is_secure_below_el3 arm_is_secure_below_el3_m68k -#define arm_ldl_code arm_ldl_code_m68k -#define arm_lduw_code arm_lduw_code_m68k -#define arm_log_exception arm_log_exception_m68k -#define arm_reg_read arm_reg_read_m68k -#define arm_reg_reset arm_reg_reset_m68k -#define arm_reg_write arm_reg_write_m68k -#define restore_state_to_opc restore_state_to_opc_m68k -#define arm_rmode_to_sf arm_rmode_to_sf_m68k -#define arm_singlestep_active arm_singlestep_active_m68k -#define tlb_fill tlb_fill_m68k -#define tlb_flush tlb_flush_m68k -#define tlb_flush_page tlb_flush_page_m68k -#define tlb_set_page tlb_set_page_m68k -#define arm_translate_init arm_translate_init_m68k -#define arm_v7m_class_init arm_v7m_class_init_m68k -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_m68k -#define ats_access ats_access_m68k -#define ats_write ats_write_m68k -#define bad_mode_switch bad_mode_switch_m68k -#define bank_number bank_number_m68k -#define bitmap_zero_extend bitmap_zero_extend_m68k -#define bp_wp_matches bp_wp_matches_m68k -#define breakpoint_invalidate breakpoint_invalidate_m68k -#define build_page_bitmap build_page_bitmap_m68k -#define bus_add_child bus_add_child_m68k -#define bus_class_init bus_class_init_m68k -#define bus_info bus_info_m68k -#define bus_unparent bus_unparent_m68k -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_m68k -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_m68k -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_m68k -#define call_recip_estimate call_recip_estimate_m68k -#define can_merge can_merge_m68k -#define capacity_increase capacity_increase_m68k -#define ccsidr_read ccsidr_read_m68k -#define check_ap check_ap_m68k -#define check_breakpoints check_breakpoints_m68k -#define check_watchpoints check_watchpoints_m68k -#define cho cho_m68k -#define clear_bit clear_bit_m68k -#define clz32 clz32_m68k -#define clz64 clz64_m68k -#define cmp_flatrange_addr cmp_flatrange_addr_m68k -#define code_gen_alloc code_gen_alloc_m68k -#define commonNaNToFloat128 commonNaNToFloat128_m68k -#define commonNaNToFloat16 commonNaNToFloat16_m68k -#define commonNaNToFloat32 commonNaNToFloat32_m68k -#define commonNaNToFloat64 commonNaNToFloat64_m68k -#define commonNaNToFloatx80 commonNaNToFloatx80_m68k -#define compute_abs_deadline compute_abs_deadline_m68k -#define cond_name cond_name_m68k -#define configure_accelerator configure_accelerator_m68k -#define container_get container_get_m68k -#define container_info container_info_m68k -#define container_register_types container_register_types_m68k -#define contextidr_write contextidr_write_m68k -#define core_log_global_start core_log_global_start_m68k -#define core_log_global_stop core_log_global_stop_m68k -#define core_memory_listener core_memory_listener_m68k -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_m68k -#define cortex_a15_initfn cortex_a15_initfn_m68k -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_m68k -#define cortex_a8_initfn cortex_a8_initfn_m68k -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_m68k -#define cortex_a9_initfn cortex_a9_initfn_m68k -#define cortex_m3_initfn cortex_m3_initfn_m68k -#define count_cpreg count_cpreg_m68k -#define countLeadingZeros32 countLeadingZeros32_m68k -#define countLeadingZeros64 countLeadingZeros64_m68k -#define cp_access_ok cp_access_ok_m68k -#define cpacr_write cpacr_write_m68k -#define cpreg_field_is_64bit cpreg_field_is_64bit_m68k -#define cp_reginfo cp_reginfo_m68k -#define cpreg_key_compare cpreg_key_compare_m68k -#define cpreg_make_keylist cpreg_make_keylist_m68k -#define cp_reg_reset cp_reg_reset_m68k -#define cpreg_to_kvm_id cpreg_to_kvm_id_m68k -#define cpsr_read cpsr_read_m68k -#define cpsr_write cpsr_write_m68k -#define cptype_valid cptype_valid_m68k -#define cpu_abort cpu_abort_m68k -#define cpu_arm_exec cpu_arm_exec_m68k -#define cpu_arm_gen_code cpu_arm_gen_code_m68k -#define cpu_arm_init cpu_arm_init_m68k -#define cpu_breakpoint_insert cpu_breakpoint_insert_m68k -#define cpu_breakpoint_remove cpu_breakpoint_remove_m68k -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_m68k -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_m68k -#define cpu_can_do_io cpu_can_do_io_m68k -#define cpu_can_run cpu_can_run_m68k -#define cpu_class_init cpu_class_init_m68k -#define cpu_common_class_by_name cpu_common_class_by_name_m68k -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_m68k -#define cpu_common_get_arch_id cpu_common_get_arch_id_m68k -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_m68k -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_m68k -#define cpu_common_has_work cpu_common_has_work_m68k -#define cpu_common_initfn cpu_common_initfn_m68k -#define cpu_common_noop cpu_common_noop_m68k -#define cpu_common_parse_features cpu_common_parse_features_m68k -#define cpu_common_realizefn cpu_common_realizefn_m68k -#define cpu_common_reset cpu_common_reset_m68k -#define cpu_dump_statistics cpu_dump_statistics_m68k -#define cpu_exec_init cpu_exec_init_m68k -#define cpu_flush_icache_range cpu_flush_icache_range_m68k -#define cpu_gen_init cpu_gen_init_m68k -#define cpu_get_clock cpu_get_clock_m68k -#define cpu_get_real_ticks cpu_get_real_ticks_m68k -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_m68k -#define cpu_handle_debug_exception cpu_handle_debug_exception_m68k -#define cpu_handle_guest_debug cpu_handle_guest_debug_m68k -#define cpu_inb cpu_inb_m68k -#define cpu_inl cpu_inl_m68k -#define cpu_interrupt cpu_interrupt_m68k -#define cpu_interrupt_handler cpu_interrupt_handler_m68k -#define cpu_inw cpu_inw_m68k -#define cpu_io_recompile cpu_io_recompile_m68k -#define cpu_is_stopped cpu_is_stopped_m68k -#define cpu_ldl_code cpu_ldl_code_m68k -#define cpu_ldub_code cpu_ldub_code_m68k -#define cpu_lduw_code cpu_lduw_code_m68k -#define cpu_memory_rw_debug cpu_memory_rw_debug_m68k -#define cpu_mmu_index cpu_mmu_index_m68k -#define cpu_outb cpu_outb_m68k -#define cpu_outl cpu_outl_m68k -#define cpu_outw cpu_outw_m68k -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_m68k -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_m68k -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_m68k -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_m68k -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_m68k -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_m68k -#define cpu_physical_memory_map cpu_physical_memory_map_m68k -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_m68k -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_m68k -#define cpu_physical_memory_rw cpu_physical_memory_rw_m68k -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_m68k -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_m68k -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_m68k -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_m68k -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_m68k -#define cpu_register cpu_register_m68k -#define cpu_register_types cpu_register_types_m68k -#define cpu_restore_state cpu_restore_state_m68k -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_m68k -#define cpu_single_step cpu_single_step_m68k -#define cpu_tb_exec cpu_tb_exec_m68k -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_m68k -#define cpu_to_be64 cpu_to_be64_m68k -#define cpu_to_le32 cpu_to_le32_m68k -#define cpu_to_le64 cpu_to_le64_m68k -#define cpu_type_info cpu_type_info_m68k -#define cpu_unassigned_access cpu_unassigned_access_m68k -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_m68k -#define cpu_watchpoint_insert cpu_watchpoint_insert_m68k -#define cpu_watchpoint_remove cpu_watchpoint_remove_m68k -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_m68k -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_m68k -#define crc32c_table crc32c_table_m68k -#define create_new_memory_mapping create_new_memory_mapping_m68k -#define csselr_write csselr_write_m68k -#define cto32 cto32_m68k -#define ctr_el0_access ctr_el0_access_m68k -#define ctz32 ctz32_m68k -#define ctz64 ctz64_m68k -#define dacr_write dacr_write_m68k -#define dbgbcr_write dbgbcr_write_m68k -#define dbgbvr_write dbgbvr_write_m68k -#define dbgwcr_write dbgwcr_write_m68k -#define dbgwvr_write dbgwvr_write_m68k -#define debug_cp_reginfo debug_cp_reginfo_m68k -#define debug_frame debug_frame_m68k -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_m68k -#define define_arm_cp_regs define_arm_cp_regs_m68k -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_m68k -#define define_debug_regs define_debug_regs_m68k -#define define_one_arm_cp_reg define_one_arm_cp_reg_m68k -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_m68k -#define deposit32 deposit32_m68k -#define deposit64 deposit64_m68k -#define deregister_tm_clones deregister_tm_clones_m68k -#define device_class_base_init device_class_base_init_m68k -#define device_class_init device_class_init_m68k -#define device_finalize device_finalize_m68k -#define device_get_realized device_get_realized_m68k -#define device_initfn device_initfn_m68k -#define device_post_init device_post_init_m68k -#define device_reset device_reset_m68k -#define device_set_realized device_set_realized_m68k -#define device_type_info device_type_info_m68k -#define disas_arm_insn disas_arm_insn_m68k -#define disas_coproc_insn disas_coproc_insn_m68k -#define disas_dsp_insn disas_dsp_insn_m68k -#define disas_iwmmxt_insn disas_iwmmxt_insn_m68k -#define disas_neon_data_insn disas_neon_data_insn_m68k -#define disas_neon_ls_insn disas_neon_ls_insn_m68k -#define disas_thumb2_insn disas_thumb2_insn_m68k -#define disas_thumb_insn disas_thumb_insn_m68k -#define disas_vfp_insn disas_vfp_insn_m68k -#define disas_vfp_v8_insn disas_vfp_v8_insn_m68k -#define do_arm_semihosting do_arm_semihosting_m68k -#define do_clz16 do_clz16_m68k -#define do_clz8 do_clz8_m68k -#define do_constant_folding do_constant_folding_m68k -#define do_constant_folding_2 do_constant_folding_2_m68k -#define do_constant_folding_cond do_constant_folding_cond_m68k -#define do_constant_folding_cond2 do_constant_folding_cond2_m68k -#define do_constant_folding_cond_32 do_constant_folding_cond_32_m68k -#define do_constant_folding_cond_64 do_constant_folding_cond_64_m68k -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_m68k -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_m68k -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_m68k -#define do_ssat do_ssat_m68k -#define do_usad do_usad_m68k -#define do_usat do_usat_m68k -#define do_v7m_exception_exit do_v7m_exception_exit_m68k -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_m68k -#define dummy_func dummy_func_m68k -#define dummy_section dummy_section_m68k -#define _DYNAMIC _DYNAMIC_m68k -#define _edata _edata_m68k -#define _end _end_m68k -#define end_list end_list_m68k -#define eq128 eq128_m68k -#define ErrorClass_lookup ErrorClass_lookup_m68k -#define error_copy error_copy_m68k -#define error_exit error_exit_m68k -#define error_get_class error_get_class_m68k -#define error_get_pretty error_get_pretty_m68k -#define error_setg_file_open error_setg_file_open_m68k -#define estimateDiv128To64 estimateDiv128To64_m68k -#define estimateSqrt32 estimateSqrt32_m68k -#define excnames excnames_m68k -#define excp_is_internal excp_is_internal_m68k -#define extended_addresses_enabled extended_addresses_enabled_m68k -#define extended_mpu_ap_bits extended_mpu_ap_bits_m68k -#define extract32 extract32_m68k -#define extract64 extract64_m68k -#define extractFloat128Exp extractFloat128Exp_m68k -#define extractFloat128Frac0 extractFloat128Frac0_m68k -#define extractFloat128Frac1 extractFloat128Frac1_m68k -#define extractFloat128Sign extractFloat128Sign_m68k -#define extractFloat16Exp extractFloat16Exp_m68k -#define extractFloat16Frac extractFloat16Frac_m68k -#define extractFloat16Sign extractFloat16Sign_m68k -#define extractFloat32Exp extractFloat32Exp_m68k -#define extractFloat32Frac extractFloat32Frac_m68k -#define extractFloat32Sign extractFloat32Sign_m68k -#define extractFloat64Exp extractFloat64Exp_m68k -#define extractFloat64Frac extractFloat64Frac_m68k -#define extractFloat64Sign extractFloat64Sign_m68k -#define extractFloatx80Exp extractFloatx80Exp_m68k -#define extractFloatx80Frac extractFloatx80Frac_m68k -#define extractFloatx80Sign extractFloatx80Sign_m68k -#define fcse_write fcse_write_m68k -#define find_better_copy find_better_copy_m68k -#define find_default_machine find_default_machine_m68k -#define find_desc_by_name find_desc_by_name_m68k -#define find_first_bit find_first_bit_m68k -#define find_paging_enabled_cpu find_paging_enabled_cpu_m68k -#define find_ram_block find_ram_block_m68k -#define find_ram_offset find_ram_offset_m68k -#define find_string find_string_m68k -#define find_type find_type_m68k -#define _fini _fini_m68k -#define flatrange_equal flatrange_equal_m68k -#define flatview_destroy flatview_destroy_m68k -#define flatview_init flatview_init_m68k -#define flatview_insert flatview_insert_m68k -#define flatview_lookup flatview_lookup_m68k -#define flatview_ref flatview_ref_m68k -#define flatview_simplify flatview_simplify_m68k #define flatview_unref flatview_unref_m68k -#define float128_add float128_add_m68k -#define float128_compare float128_compare_m68k -#define float128_compare_internal float128_compare_internal_m68k -#define float128_compare_quiet float128_compare_quiet_m68k -#define float128_default_nan float128_default_nan_m68k -#define float128_div float128_div_m68k -#define float128_eq float128_eq_m68k -#define float128_eq_quiet float128_eq_quiet_m68k -#define float128_is_quiet_nan float128_is_quiet_nan_m68k -#define float128_is_signaling_nan float128_is_signaling_nan_m68k -#define float128_le float128_le_m68k -#define float128_le_quiet float128_le_quiet_m68k -#define float128_lt float128_lt_m68k -#define float128_lt_quiet float128_lt_quiet_m68k -#define float128_maybe_silence_nan float128_maybe_silence_nan_m68k -#define float128_mul float128_mul_m68k -#define float128_rem float128_rem_m68k -#define float128_round_to_int float128_round_to_int_m68k -#define float128_scalbn float128_scalbn_m68k -#define float128_sqrt float128_sqrt_m68k -#define float128_sub float128_sub_m68k -#define float128ToCommonNaN float128ToCommonNaN_m68k -#define float128_to_float32 float128_to_float32_m68k -#define float128_to_float64 float128_to_float64_m68k -#define float128_to_floatx80 float128_to_floatx80_m68k -#define float128_to_int32 float128_to_int32_m68k -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_m68k -#define float128_to_int64 float128_to_int64_m68k -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_m68k -#define float128_unordered float128_unordered_m68k -#define float128_unordered_quiet float128_unordered_quiet_m68k -#define float16_default_nan float16_default_nan_m68k +#define address_space_get_flatview address_space_get_flatview_m68k +#define memory_region_transaction_begin memory_region_transaction_begin_m68k +#define memory_region_transaction_commit memory_region_transaction_commit_m68k +#define memory_region_init memory_region_init_m68k +#define memory_region_access_valid memory_region_access_valid_m68k +#define memory_region_dispatch_read memory_region_dispatch_read_m68k +#define memory_region_dispatch_write memory_region_dispatch_write_m68k +#define memory_region_init_io memory_region_init_io_m68k +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_m68k +#define memory_region_size memory_region_size_m68k +#define memory_region_set_readonly memory_region_set_readonly_m68k +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_m68k +#define memory_region_from_host memory_region_from_host_m68k +#define memory_region_get_ram_addr memory_region_get_ram_addr_m68k +#define memory_region_add_subregion memory_region_add_subregion_m68k +#define memory_region_del_subregion memory_region_del_subregion_m68k +#define memory_region_find memory_region_find_m68k +#define memory_listener_register memory_listener_register_m68k +#define memory_listener_unregister memory_listener_unregister_m68k +#define address_space_remove_listeners address_space_remove_listeners_m68k +#define address_space_init address_space_init_m68k +#define address_space_destroy address_space_destroy_m68k +#define memory_region_init_ram memory_region_init_ram_m68k +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_m68k +#define exec_inline_op exec_inline_op_m68k +#define floatx80_default_nan floatx80_default_nan_m68k +#define float_raise float_raise_m68k #define float16_is_quiet_nan float16_is_quiet_nan_m68k #define float16_is_signaling_nan float16_is_signaling_nan_m68k -#define float16_maybe_silence_nan float16_maybe_silence_nan_m68k -#define float16ToCommonNaN float16ToCommonNaN_m68k -#define float16_to_float32 float16_to_float32_m68k -#define float16_to_float64 float16_to_float64_m68k -#define float32_abs float32_abs_m68k -#define float32_add float32_add_m68k -#define float32_chs float32_chs_m68k -#define float32_compare float32_compare_m68k -#define float32_compare_internal float32_compare_internal_m68k -#define float32_compare_quiet float32_compare_quiet_m68k -#define float32_default_nan float32_default_nan_m68k -#define float32_div float32_div_m68k -#define float32_eq float32_eq_m68k -#define float32_eq_quiet float32_eq_quiet_m68k -#define float32_exp2 float32_exp2_m68k -#define float32_exp2_coefficients float32_exp2_coefficients_m68k -#define float32_is_any_nan float32_is_any_nan_m68k -#define float32_is_infinity float32_is_infinity_m68k -#define float32_is_neg float32_is_neg_m68k #define float32_is_quiet_nan float32_is_quiet_nan_m68k #define float32_is_signaling_nan float32_is_signaling_nan_m68k -#define float32_is_zero float32_is_zero_m68k -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_m68k -#define float32_le float32_le_m68k -#define float32_le_quiet float32_le_quiet_m68k -#define float32_log2 float32_log2_m68k -#define float32_lt float32_lt_m68k -#define float32_lt_quiet float32_lt_quiet_m68k +#define float64_is_quiet_nan float64_is_quiet_nan_m68k +#define float64_is_signaling_nan float64_is_signaling_nan_m68k +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_m68k +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_m68k +#define floatx80_silence_nan floatx80_silence_nan_m68k +#define propagateFloatx80NaN propagateFloatx80NaN_m68k +#define float128_is_quiet_nan float128_is_quiet_nan_m68k +#define float128_is_signaling_nan float128_is_signaling_nan_m68k +#define float128_silence_nan float128_silence_nan_m68k +#define float16_add float16_add_m68k +#define float16_sub float16_sub_m68k +#define float32_add float32_add_m68k +#define float32_sub float32_sub_m68k +#define float64_add float64_add_m68k +#define float64_sub float64_sub_m68k +#define float16_mul float16_mul_m68k +#define float32_mul float32_mul_m68k +#define float64_mul float64_mul_m68k +#define float16_muladd float16_muladd_m68k +#define float32_muladd float32_muladd_m68k +#define float64_muladd float64_muladd_m68k +#define float16_div float16_div_m68k +#define float32_div float32_div_m68k +#define float64_div float64_div_m68k +#define float16_to_float32 float16_to_float32_m68k +#define float16_to_float64 float16_to_float64_m68k +#define float32_to_float16 float32_to_float16_m68k +#define float32_to_float64 float32_to_float64_m68k +#define float64_to_float16 float64_to_float16_m68k +#define float64_to_float32 float64_to_float32_m68k +#define float16_round_to_int float16_round_to_int_m68k +#define float32_round_to_int float32_round_to_int_m68k +#define float64_round_to_int float64_round_to_int_m68k +#define float16_to_int16_scalbn float16_to_int16_scalbn_m68k +#define float16_to_int32_scalbn float16_to_int32_scalbn_m68k +#define float16_to_int64_scalbn float16_to_int64_scalbn_m68k +#define float32_to_int16_scalbn float32_to_int16_scalbn_m68k +#define float32_to_int32_scalbn float32_to_int32_scalbn_m68k +#define float32_to_int64_scalbn float32_to_int64_scalbn_m68k +#define float64_to_int16_scalbn float64_to_int16_scalbn_m68k +#define float64_to_int32_scalbn float64_to_int32_scalbn_m68k +#define float64_to_int64_scalbn float64_to_int64_scalbn_m68k +#define float16_to_int16 float16_to_int16_m68k +#define float16_to_int32 float16_to_int32_m68k +#define float16_to_int64 float16_to_int64_m68k +#define float32_to_int16 float32_to_int16_m68k +#define float32_to_int32 float32_to_int32_m68k +#define float32_to_int64 float32_to_int64_m68k +#define float64_to_int16 float64_to_int16_m68k +#define float64_to_int32 float64_to_int32_m68k +#define float64_to_int64 float64_to_int64_m68k +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_m68k +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_m68k +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_m68k +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_m68k +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_m68k +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_m68k +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_m68k +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_m68k +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_m68k +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_m68k +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_m68k +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_m68k +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_m68k +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_m68k +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_m68k +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_m68k +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_m68k +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_m68k +#define float16_to_uint16 float16_to_uint16_m68k +#define float16_to_uint32 float16_to_uint32_m68k +#define float16_to_uint64 float16_to_uint64_m68k +#define float32_to_uint16 float32_to_uint16_m68k +#define float32_to_uint32 float32_to_uint32_m68k +#define float32_to_uint64 float32_to_uint64_m68k +#define float64_to_uint16 float64_to_uint16_m68k +#define float64_to_uint32 float64_to_uint32_m68k +#define float64_to_uint64 float64_to_uint64_m68k +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_m68k +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_m68k +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_m68k +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_m68k +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_m68k +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_m68k +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_m68k +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_m68k +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_m68k +#define int64_to_float16_scalbn int64_to_float16_scalbn_m68k +#define int32_to_float16_scalbn int32_to_float16_scalbn_m68k +#define int16_to_float16_scalbn int16_to_float16_scalbn_m68k +#define int64_to_float16 int64_to_float16_m68k +#define int32_to_float16 int32_to_float16_m68k +#define int16_to_float16 int16_to_float16_m68k +#define int64_to_float32_scalbn int64_to_float32_scalbn_m68k +#define int32_to_float32_scalbn int32_to_float32_scalbn_m68k +#define int16_to_float32_scalbn int16_to_float32_scalbn_m68k +#define int64_to_float32 int64_to_float32_m68k +#define int32_to_float32 int32_to_float32_m68k +#define int16_to_float32 int16_to_float32_m68k +#define int64_to_float64_scalbn int64_to_float64_scalbn_m68k +#define int32_to_float64_scalbn int32_to_float64_scalbn_m68k +#define int16_to_float64_scalbn int16_to_float64_scalbn_m68k +#define int64_to_float64 int64_to_float64_m68k +#define int32_to_float64 int32_to_float64_m68k +#define int16_to_float64 int16_to_float64_m68k +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_m68k +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_m68k +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_m68k +#define uint64_to_float16 uint64_to_float16_m68k +#define uint32_to_float16 uint32_to_float16_m68k +#define uint16_to_float16 uint16_to_float16_m68k +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_m68k +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_m68k +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_m68k +#define uint64_to_float32 uint64_to_float32_m68k +#define uint32_to_float32 uint32_to_float32_m68k +#define uint16_to_float32 uint16_to_float32_m68k +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_m68k +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_m68k +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_m68k +#define uint64_to_float64 uint64_to_float64_m68k +#define uint32_to_float64 uint32_to_float64_m68k +#define uint16_to_float64 uint16_to_float64_m68k +#define float16_min float16_min_m68k +#define float16_minnum float16_minnum_m68k +#define float16_minnummag float16_minnummag_m68k +#define float16_max float16_max_m68k +#define float16_maxnum float16_maxnum_m68k +#define float16_maxnummag float16_maxnummag_m68k +#define float32_min float32_min_m68k +#define float32_minnum float32_minnum_m68k +#define float32_minnummag float32_minnummag_m68k #define float32_max float32_max_m68k #define float32_maxnum float32_maxnum_m68k #define float32_maxnummag float32_maxnummag_m68k -#define float32_maybe_silence_nan float32_maybe_silence_nan_m68k -#define float32_min float32_min_m68k -#define float32_minmax float32_minmax_m68k -#define float32_minnum float32_minnum_m68k -#define float32_minnummag float32_minnummag_m68k -#define float32_mul float32_mul_m68k -#define float32_muladd float32_muladd_m68k -#define float32_rem float32_rem_m68k -#define float32_round_to_int float32_round_to_int_m68k -#define float32_scalbn float32_scalbn_m68k -#define float32_set_sign float32_set_sign_m68k -#define float32_sqrt float32_sqrt_m68k -#define float32_squash_input_denormal float32_squash_input_denormal_m68k -#define float32_sub float32_sub_m68k -#define float32ToCommonNaN float32ToCommonNaN_m68k -#define float32_to_float128 float32_to_float128_m68k -#define float32_to_float16 float32_to_float16_m68k -#define float32_to_float64 float32_to_float64_m68k -#define float32_to_floatx80 float32_to_floatx80_m68k -#define float32_to_int16 float32_to_int16_m68k -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_m68k -#define float32_to_int32 float32_to_int32_m68k -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_m68k -#define float32_to_int64 float32_to_int64_m68k -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_m68k -#define float32_to_uint16 float32_to_uint16_m68k -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_m68k -#define float32_to_uint32 float32_to_uint32_m68k -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_m68k -#define float32_to_uint64 float32_to_uint64_m68k -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_m68k -#define float32_unordered float32_unordered_m68k -#define float32_unordered_quiet float32_unordered_quiet_m68k -#define float64_abs float64_abs_m68k -#define float64_add float64_add_m68k -#define float64_chs float64_chs_m68k -#define float64_compare float64_compare_m68k -#define float64_compare_internal float64_compare_internal_m68k -#define float64_compare_quiet float64_compare_quiet_m68k -#define float64_default_nan float64_default_nan_m68k -#define float64_div float64_div_m68k -#define float64_eq float64_eq_m68k -#define float64_eq_quiet float64_eq_quiet_m68k -#define float64_is_any_nan float64_is_any_nan_m68k -#define float64_is_infinity float64_is_infinity_m68k -#define float64_is_neg float64_is_neg_m68k -#define float64_is_quiet_nan float64_is_quiet_nan_m68k -#define float64_is_signaling_nan float64_is_signaling_nan_m68k -#define float64_is_zero float64_is_zero_m68k -#define float64_le float64_le_m68k -#define float64_le_quiet float64_le_quiet_m68k -#define float64_log2 float64_log2_m68k -#define float64_lt float64_lt_m68k -#define float64_lt_quiet float64_lt_quiet_m68k +#define float64_min float64_min_m68k +#define float64_minnum float64_minnum_m68k +#define float64_minnummag float64_minnummag_m68k #define float64_max float64_max_m68k #define float64_maxnum float64_maxnum_m68k #define float64_maxnummag float64_maxnummag_m68k -#define float64_maybe_silence_nan float64_maybe_silence_nan_m68k -#define float64_min float64_min_m68k -#define float64_minmax float64_minmax_m68k -#define float64_minnum float64_minnum_m68k -#define float64_minnummag float64_minnummag_m68k -#define float64_mul float64_mul_m68k -#define float64_muladd float64_muladd_m68k -#define float64_rem float64_rem_m68k -#define float64_round_to_int float64_round_to_int_m68k +#define float16_compare float16_compare_m68k +#define float16_compare_quiet float16_compare_quiet_m68k +#define float32_compare float32_compare_m68k +#define float32_compare_quiet float32_compare_quiet_m68k +#define float64_compare float64_compare_m68k +#define float64_compare_quiet float64_compare_quiet_m68k +#define float16_scalbn float16_scalbn_m68k +#define float32_scalbn float32_scalbn_m68k #define float64_scalbn float64_scalbn_m68k -#define float64_set_sign float64_set_sign_m68k +#define float16_sqrt float16_sqrt_m68k +#define float32_sqrt float32_sqrt_m68k #define float64_sqrt float64_sqrt_m68k +#define float16_default_nan float16_default_nan_m68k +#define float32_default_nan float32_default_nan_m68k +#define float64_default_nan float64_default_nan_m68k +#define float128_default_nan float128_default_nan_m68k +#define float16_silence_nan float16_silence_nan_m68k +#define float32_silence_nan float32_silence_nan_m68k +#define float64_silence_nan float64_silence_nan_m68k +#define float16_squash_input_denormal float16_squash_input_denormal_m68k +#define float32_squash_input_denormal float32_squash_input_denormal_m68k #define float64_squash_input_denormal float64_squash_input_denormal_m68k -#define float64_sub float64_sub_m68k -#define float64ToCommonNaN float64ToCommonNaN_m68k -#define float64_to_float128 float64_to_float128_m68k -#define float64_to_float16 float64_to_float16_m68k -#define float64_to_float32 float64_to_float32_m68k +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_m68k +#define roundAndPackFloatx80 roundAndPackFloatx80_m68k +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_m68k +#define int32_to_floatx80 int32_to_floatx80_m68k +#define int32_to_float128 int32_to_float128_m68k +#define int64_to_floatx80 int64_to_floatx80_m68k +#define int64_to_float128 int64_to_float128_m68k +#define uint64_to_float128 uint64_to_float128_m68k +#define float32_to_floatx80 float32_to_floatx80_m68k +#define float32_to_float128 float32_to_float128_m68k +#define float32_rem float32_rem_m68k +#define float32_exp2 float32_exp2_m68k +#define float32_log2 float32_log2_m68k +#define float32_eq float32_eq_m68k +#define float32_le float32_le_m68k +#define float32_lt float32_lt_m68k +#define float32_unordered float32_unordered_m68k +#define float32_eq_quiet float32_eq_quiet_m68k +#define float32_le_quiet float32_le_quiet_m68k +#define float32_lt_quiet float32_lt_quiet_m68k +#define float32_unordered_quiet float32_unordered_quiet_m68k #define float64_to_floatx80 float64_to_floatx80_m68k -#define float64_to_int16 float64_to_int16_m68k -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_m68k -#define float64_to_int32 float64_to_int32_m68k -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_m68k -#define float64_to_int64 float64_to_int64_m68k -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_m68k -#define float64_to_uint16 float64_to_uint16_m68k -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_m68k -#define float64_to_uint32 float64_to_uint32_m68k -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_m68k -#define float64_to_uint64 float64_to_uint64_m68k -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_m68k -#define float64_trunc_to_int float64_trunc_to_int_m68k +#define float64_to_float128 float64_to_float128_m68k +#define float64_rem float64_rem_m68k +#define float64_log2 float64_log2_m68k +#define float64_eq float64_eq_m68k +#define float64_le float64_le_m68k +#define float64_lt float64_lt_m68k #define float64_unordered float64_unordered_m68k +#define float64_eq_quiet float64_eq_quiet_m68k +#define float64_le_quiet float64_le_quiet_m68k +#define float64_lt_quiet float64_lt_quiet_m68k #define float64_unordered_quiet float64_unordered_quiet_m68k -#define float_raise float_raise_m68k -#define floatx80_add floatx80_add_m68k -#define floatx80_compare floatx80_compare_m68k -#define floatx80_compare_internal floatx80_compare_internal_m68k -#define floatx80_compare_quiet floatx80_compare_quiet_m68k -#define floatx80_default_nan floatx80_default_nan_m68k -#define floatx80_div floatx80_div_m68k -#define floatx80_eq floatx80_eq_m68k -#define floatx80_eq_quiet floatx80_eq_quiet_m68k -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_m68k -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_m68k -#define floatx80_le floatx80_le_m68k -#define floatx80_le_quiet floatx80_le_quiet_m68k -#define floatx80_lt floatx80_lt_m68k -#define floatx80_lt_quiet floatx80_lt_quiet_m68k -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_m68k -#define floatx80_mul floatx80_mul_m68k -#define floatx80_rem floatx80_rem_m68k -#define floatx80_round_to_int floatx80_round_to_int_m68k -#define floatx80_scalbn floatx80_scalbn_m68k -#define floatx80_sqrt floatx80_sqrt_m68k -#define floatx80_sub floatx80_sub_m68k -#define floatx80ToCommonNaN floatx80ToCommonNaN_m68k -#define floatx80_to_float128 floatx80_to_float128_m68k -#define floatx80_to_float32 floatx80_to_float32_m68k -#define floatx80_to_float64 floatx80_to_float64_m68k #define floatx80_to_int32 floatx80_to_int32_m68k #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_m68k #define floatx80_to_int64 floatx80_to_int64_m68k #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_m68k +#define floatx80_to_float32 floatx80_to_float32_m68k +#define floatx80_to_float64 floatx80_to_float64_m68k +#define floatx80_to_float128 floatx80_to_float128_m68k +#define floatx80_round floatx80_round_m68k +#define floatx80_round_to_int floatx80_round_to_int_m68k +#define floatx80_add floatx80_add_m68k +#define floatx80_sub floatx80_sub_m68k +#define floatx80_mul floatx80_mul_m68k +#define floatx80_div floatx80_div_m68k +#define floatx80_rem floatx80_rem_m68k +#define floatx80_sqrt floatx80_sqrt_m68k +#define floatx80_eq floatx80_eq_m68k +#define floatx80_le floatx80_le_m68k +#define floatx80_lt floatx80_lt_m68k #define floatx80_unordered floatx80_unordered_m68k +#define floatx80_eq_quiet floatx80_eq_quiet_m68k +#define floatx80_le_quiet floatx80_le_quiet_m68k +#define floatx80_lt_quiet floatx80_lt_quiet_m68k #define floatx80_unordered_quiet floatx80_unordered_quiet_m68k -#define flush_icache_range flush_icache_range_m68k -#define format_string format_string_m68k -#define fp_decode_rm fp_decode_rm_m68k -#define frame_dummy frame_dummy_m68k -#define free_range free_range_m68k -#define fstat64 fstat64_m68k -#define futex_wait futex_wait_m68k -#define futex_wake futex_wake_m68k -#define gen_aa32_ld16s gen_aa32_ld16s_m68k -#define gen_aa32_ld16u gen_aa32_ld16u_m68k -#define gen_aa32_ld32u gen_aa32_ld32u_m68k -#define gen_aa32_ld64 gen_aa32_ld64_m68k -#define gen_aa32_ld8s gen_aa32_ld8s_m68k -#define gen_aa32_ld8u gen_aa32_ld8u_m68k -#define gen_aa32_st16 gen_aa32_st16_m68k -#define gen_aa32_st32 gen_aa32_st32_m68k -#define gen_aa32_st64 gen_aa32_st64_m68k -#define gen_aa32_st8 gen_aa32_st8_m68k -#define gen_adc gen_adc_m68k -#define gen_adc_CC gen_adc_CC_m68k -#define gen_add16 gen_add16_m68k -#define gen_add_carry gen_add_carry_m68k -#define gen_add_CC gen_add_CC_m68k -#define gen_add_datah_offset gen_add_datah_offset_m68k -#define gen_add_data_offset gen_add_data_offset_m68k -#define gen_addq gen_addq_m68k -#define gen_addq_lo gen_addq_lo_m68k -#define gen_addq_msw gen_addq_msw_m68k -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_m68k -#define gen_arm_shift_im gen_arm_shift_im_m68k -#define gen_arm_shift_reg gen_arm_shift_reg_m68k -#define gen_bx gen_bx_m68k -#define gen_bx_im gen_bx_im_m68k -#define gen_clrex gen_clrex_m68k -#define generate_memory_topology generate_memory_topology_m68k -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_m68k -#define gen_exception gen_exception_m68k -#define gen_exception_insn gen_exception_insn_m68k -#define gen_exception_internal gen_exception_internal_m68k -#define gen_exception_internal_insn gen_exception_internal_insn_m68k -#define gen_exception_return gen_exception_return_m68k -#define gen_goto_tb gen_goto_tb_m68k -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_m68k -#define gen_helper_add_saturate gen_helper_add_saturate_m68k -#define gen_helper_add_setq gen_helper_add_setq_m68k -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_m68k -#define gen_helper_clz32 gen_helper_clz32_m68k -#define gen_helper_clz64 gen_helper_clz64_m68k -#define gen_helper_clz_arm gen_helper_clz_arm_m68k -#define gen_helper_cpsr_read gen_helper_cpsr_read_m68k -#define gen_helper_cpsr_write gen_helper_cpsr_write_m68k -#define gen_helper_crc32_arm gen_helper_crc32_arm_m68k -#define gen_helper_crc32c gen_helper_crc32c_m68k -#define gen_helper_crypto_aese gen_helper_crypto_aese_m68k -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_m68k -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_m68k -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_m68k -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_m68k -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_m68k -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_m68k -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_m68k -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_m68k -#define gen_helper_double_saturate gen_helper_double_saturate_m68k -#define gen_helper_exception_internal gen_helper_exception_internal_m68k -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_m68k -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_m68k -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_m68k -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_m68k -#define gen_helper_get_user_reg gen_helper_get_user_reg_m68k -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_m68k -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_m68k -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_m68k -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_m68k -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_m68k -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_m68k -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_m68k -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_m68k -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_m68k -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_m68k -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_m68k -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_m68k -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_m68k -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_m68k -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_m68k -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_m68k -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_m68k -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_m68k -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_m68k -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_m68k -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_m68k -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_m68k -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_m68k -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_m68k -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_m68k -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_m68k -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_m68k -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_m68k -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_m68k -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_m68k -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_m68k -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_m68k -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_m68k -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_m68k -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_m68k -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_m68k -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_m68k -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_m68k -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_m68k -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_m68k -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_m68k -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_m68k -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_m68k -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_m68k -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_m68k -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_m68k -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_m68k -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_m68k -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_m68k -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_m68k -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_m68k -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_m68k -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_m68k -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_m68k -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_m68k -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_m68k -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_m68k -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_m68k -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_m68k -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_m68k -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_m68k -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_m68k -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_m68k -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_m68k -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_m68k -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_m68k -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_m68k -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_m68k -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_m68k -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_m68k -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_m68k -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_m68k -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_m68k -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_m68k -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_m68k -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_m68k -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_m68k -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_m68k -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_m68k -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_m68k -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_m68k -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_m68k -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_m68k -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_m68k -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_m68k -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_m68k -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_m68k -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_m68k -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_m68k -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_m68k -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_m68k -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_m68k -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_m68k -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_m68k -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_m68k -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_m68k -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_m68k -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_m68k -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_m68k -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_m68k -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_m68k -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_m68k -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_m68k -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_m68k -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_m68k -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_m68k -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_m68k -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_m68k -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_m68k -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_m68k -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_m68k -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_m68k -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_m68k -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_m68k -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_m68k -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_m68k -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_m68k -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_m68k -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_m68k -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_m68k -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_m68k -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_m68k -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_m68k -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_m68k -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_m68k -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_m68k -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_m68k -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_m68k -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_m68k -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_m68k -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_m68k -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_m68k -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_m68k -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_m68k -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_m68k -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_m68k -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_m68k -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_m68k -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_m68k -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_m68k -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_m68k -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_m68k -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_m68k -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_m68k -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_m68k -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_m68k -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_m68k -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_m68k -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_m68k -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_m68k -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_m68k -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_m68k -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_m68k -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_m68k -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_m68k -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_m68k -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_m68k -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_m68k -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_m68k -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_m68k -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_m68k -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_m68k -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_m68k -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_m68k -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_m68k -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_m68k -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_m68k -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_m68k -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_m68k -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_m68k -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_m68k -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_m68k -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_m68k -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_m68k -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_m68k -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_m68k -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_m68k -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_m68k -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_m68k -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_m68k -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_m68k -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_m68k -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_m68k -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_m68k -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_m68k -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_m68k -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_m68k -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_m68k -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_m68k -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_m68k -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_m68k -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_m68k -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_m68k -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_m68k -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_m68k -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_m68k -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_m68k -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_m68k -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_m68k -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_m68k -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_m68k -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_m68k -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_m68k -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_m68k -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_m68k -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_m68k -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_m68k -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_m68k -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_m68k -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_m68k -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_m68k -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_m68k -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_m68k -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_m68k -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_m68k -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_m68k -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_m68k -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_m68k -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_m68k -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_m68k -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_m68k -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_m68k -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_m68k -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_m68k -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_m68k -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_m68k -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_m68k -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_m68k -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_m68k -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_m68k -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_m68k -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_m68k -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_m68k -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_m68k -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_m68k -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_m68k -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_m68k -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_m68k -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_m68k -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_m68k -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_m68k -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_m68k -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_m68k -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_m68k -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_m68k -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_m68k -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_m68k -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_m68k -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_m68k -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_m68k -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_m68k -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_m68k -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_m68k -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_m68k -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_m68k -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_m68k -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_m68k -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_m68k -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_m68k -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_m68k -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_m68k -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_m68k -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_m68k -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_m68k -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_m68k -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_m68k -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_m68k -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_m68k -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_m68k -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_m68k -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_m68k -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_m68k -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_m68k -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_m68k -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_m68k -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_m68k -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_m68k -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_m68k -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_m68k -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_m68k -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_m68k -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_m68k -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_m68k -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_m68k -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_m68k -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_m68k -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_m68k -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_m68k -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_m68k -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_m68k -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_m68k -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_m68k -#define gen_helper_neon_tbl gen_helper_neon_tbl_m68k -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_m68k -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_m68k -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_m68k -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_m68k -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_m68k -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_m68k -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_m68k -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_m68k -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_m68k -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_m68k -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_m68k -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_m68k -#define gen_helper_neon_zip16 gen_helper_neon_zip16_m68k -#define gen_helper_neon_zip8 gen_helper_neon_zip8_m68k -#define gen_helper_pre_hvc gen_helper_pre_hvc_m68k -#define gen_helper_pre_smc gen_helper_pre_smc_m68k -#define gen_helper_qadd16 gen_helper_qadd16_m68k -#define gen_helper_qadd8 gen_helper_qadd8_m68k -#define gen_helper_qaddsubx gen_helper_qaddsubx_m68k -#define gen_helper_qsub16 gen_helper_qsub16_m68k -#define gen_helper_qsub8 gen_helper_qsub8_m68k -#define gen_helper_qsubaddx gen_helper_qsubaddx_m68k -#define gen_helper_rbit gen_helper_rbit_m68k -#define gen_helper_recpe_f32 gen_helper_recpe_f32_m68k -#define gen_helper_recpe_u32 gen_helper_recpe_u32_m68k -#define gen_helper_recps_f32 gen_helper_recps_f32_m68k -#define gen_helper_rintd gen_helper_rintd_m68k -#define gen_helper_rintd_exact gen_helper_rintd_exact_m68k -#define gen_helper_rints gen_helper_rints_m68k -#define gen_helper_rints_exact gen_helper_rints_exact_m68k -#define gen_helper_ror_cc gen_helper_ror_cc_m68k -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_m68k -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_m68k -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_m68k -#define gen_helper_sadd16 gen_helper_sadd16_m68k -#define gen_helper_sadd8 gen_helper_sadd8_m68k -#define gen_helper_saddsubx gen_helper_saddsubx_m68k -#define gen_helper_sar_cc gen_helper_sar_cc_m68k -#define gen_helper_sdiv gen_helper_sdiv_m68k -#define gen_helper_sel_flags gen_helper_sel_flags_m68k -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_m68k -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_m68k -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_m68k -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_m68k -#define gen_helper_set_rmode gen_helper_set_rmode_m68k -#define gen_helper_set_user_reg gen_helper_set_user_reg_m68k -#define gen_helper_shadd16 gen_helper_shadd16_m68k -#define gen_helper_shadd8 gen_helper_shadd8_m68k -#define gen_helper_shaddsubx gen_helper_shaddsubx_m68k -#define gen_helper_shl_cc gen_helper_shl_cc_m68k -#define gen_helper_shr_cc gen_helper_shr_cc_m68k -#define gen_helper_shsub16 gen_helper_shsub16_m68k -#define gen_helper_shsub8 gen_helper_shsub8_m68k -#define gen_helper_shsubaddx gen_helper_shsubaddx_m68k -#define gen_helper_ssat gen_helper_ssat_m68k -#define gen_helper_ssat16 gen_helper_ssat16_m68k -#define gen_helper_ssub16 gen_helper_ssub16_m68k -#define gen_helper_ssub8 gen_helper_ssub8_m68k -#define gen_helper_ssubaddx gen_helper_ssubaddx_m68k -#define gen_helper_sub_saturate gen_helper_sub_saturate_m68k -#define gen_helper_sxtb16 gen_helper_sxtb16_m68k -#define gen_helper_uadd16 gen_helper_uadd16_m68k -#define gen_helper_uadd8 gen_helper_uadd8_m68k -#define gen_helper_uaddsubx gen_helper_uaddsubx_m68k -#define gen_helper_udiv gen_helper_udiv_m68k -#define gen_helper_uhadd16 gen_helper_uhadd16_m68k -#define gen_helper_uhadd8 gen_helper_uhadd8_m68k -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_m68k -#define gen_helper_uhsub16 gen_helper_uhsub16_m68k -#define gen_helper_uhsub8 gen_helper_uhsub8_m68k -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_m68k -#define gen_helper_uqadd16 gen_helper_uqadd16_m68k -#define gen_helper_uqadd8 gen_helper_uqadd8_m68k -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_m68k -#define gen_helper_uqsub16 gen_helper_uqsub16_m68k -#define gen_helper_uqsub8 gen_helper_uqsub8_m68k -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_m68k -#define gen_helper_usad8 gen_helper_usad8_m68k -#define gen_helper_usat gen_helper_usat_m68k -#define gen_helper_usat16 gen_helper_usat16_m68k -#define gen_helper_usub16 gen_helper_usub16_m68k -#define gen_helper_usub8 gen_helper_usub8_m68k -#define gen_helper_usubaddx gen_helper_usubaddx_m68k -#define gen_helper_uxtb16 gen_helper_uxtb16_m68k -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_m68k -#define gen_helper_v7m_msr gen_helper_v7m_msr_m68k -#define gen_helper_vfp_absd gen_helper_vfp_absd_m68k -#define gen_helper_vfp_abss gen_helper_vfp_abss_m68k -#define gen_helper_vfp_addd gen_helper_vfp_addd_m68k -#define gen_helper_vfp_adds gen_helper_vfp_adds_m68k -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_m68k -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_m68k -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_m68k -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_m68k -#define gen_helper_vfp_divd gen_helper_vfp_divd_m68k -#define gen_helper_vfp_divs gen_helper_vfp_divs_m68k -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_m68k -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_m68k -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_m68k -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_m68k -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_m68k -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_m68k -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_m68k -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_m68k -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_m68k -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_m68k -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_m68k -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_m68k -#define gen_helper_vfp_mins gen_helper_vfp_mins_m68k -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_m68k -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_m68k -#define gen_helper_vfp_muld gen_helper_vfp_muld_m68k -#define gen_helper_vfp_muls gen_helper_vfp_muls_m68k -#define gen_helper_vfp_negd gen_helper_vfp_negd_m68k -#define gen_helper_vfp_negs gen_helper_vfp_negs_m68k -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_m68k -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_m68k -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_m68k -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_m68k -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_m68k -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_m68k -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_m68k -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_m68k -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_m68k -#define gen_helper_vfp_subd gen_helper_vfp_subd_m68k -#define gen_helper_vfp_subs gen_helper_vfp_subs_m68k -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_m68k -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_m68k -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_m68k -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_m68k -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_m68k -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_m68k -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_m68k -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_m68k -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_m68k -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_m68k -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_m68k -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_m68k -#define gen_helper_vfp_touid gen_helper_vfp_touid_m68k -#define gen_helper_vfp_touis gen_helper_vfp_touis_m68k -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_m68k -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_m68k -#define gen_helper_vfp_tould gen_helper_vfp_tould_m68k -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_m68k -#define gen_helper_vfp_touls gen_helper_vfp_touls_m68k -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_m68k -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_m68k -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_m68k -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_m68k -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_m68k -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_m68k -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_m68k -#define gen_helper_wfe gen_helper_wfe_m68k -#define gen_helper_wfi gen_helper_wfi_m68k -#define gen_hvc gen_hvc_m68k -#define gen_intermediate_code_internal gen_intermediate_code_internal_m68k -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_m68k -#define gen_iwmmxt_address gen_iwmmxt_address_m68k -#define gen_iwmmxt_shift gen_iwmmxt_shift_m68k -#define gen_jmp gen_jmp_m68k -#define gen_load_and_replicate gen_load_and_replicate_m68k -#define gen_load_exclusive gen_load_exclusive_m68k -#define gen_logic_CC gen_logic_CC_m68k -#define gen_logicq_cc gen_logicq_cc_m68k -#define gen_lookup_tb gen_lookup_tb_m68k -#define gen_mov_F0_vreg gen_mov_F0_vreg_m68k -#define gen_mov_F1_vreg gen_mov_F1_vreg_m68k -#define gen_mov_vreg_F0 gen_mov_vreg_F0_m68k -#define gen_muls_i64_i32 gen_muls_i64_i32_m68k -#define gen_mulu_i64_i32 gen_mulu_i64_i32_m68k -#define gen_mulxy gen_mulxy_m68k -#define gen_neon_add gen_neon_add_m68k -#define gen_neon_addl gen_neon_addl_m68k -#define gen_neon_addl_saturate gen_neon_addl_saturate_m68k -#define gen_neon_bsl gen_neon_bsl_m68k -#define gen_neon_dup_high16 gen_neon_dup_high16_m68k -#define gen_neon_dup_low16 gen_neon_dup_low16_m68k -#define gen_neon_dup_u8 gen_neon_dup_u8_m68k -#define gen_neon_mull gen_neon_mull_m68k -#define gen_neon_narrow gen_neon_narrow_m68k -#define gen_neon_narrow_op gen_neon_narrow_op_m68k -#define gen_neon_narrow_sats gen_neon_narrow_sats_m68k -#define gen_neon_narrow_satu gen_neon_narrow_satu_m68k -#define gen_neon_negl gen_neon_negl_m68k -#define gen_neon_rsb gen_neon_rsb_m68k -#define gen_neon_shift_narrow gen_neon_shift_narrow_m68k -#define gen_neon_subl gen_neon_subl_m68k -#define gen_neon_trn_u16 gen_neon_trn_u16_m68k -#define gen_neon_trn_u8 gen_neon_trn_u8_m68k -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_m68k -#define gen_neon_unzip gen_neon_unzip_m68k -#define gen_neon_widen gen_neon_widen_m68k -#define gen_neon_zip gen_neon_zip_m68k +#define float128_to_int32 float128_to_int32_m68k +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_m68k +#define float128_to_int64 float128_to_int64_m68k +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_m68k +#define float128_to_uint64 float128_to_uint64_m68k +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_m68k +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_m68k +#define float128_to_uint32 float128_to_uint32_m68k +#define float128_to_float32 float128_to_float32_m68k +#define float128_to_float64 float128_to_float64_m68k +#define float128_to_floatx80 float128_to_floatx80_m68k +#define float128_round_to_int float128_round_to_int_m68k +#define float128_add float128_add_m68k +#define float128_sub float128_sub_m68k +#define float128_mul float128_mul_m68k +#define float128_div float128_div_m68k +#define float128_rem float128_rem_m68k +#define float128_sqrt float128_sqrt_m68k +#define float128_eq float128_eq_m68k +#define float128_le float128_le_m68k +#define float128_lt float128_lt_m68k +#define float128_unordered float128_unordered_m68k +#define float128_eq_quiet float128_eq_quiet_m68k +#define float128_le_quiet float128_le_quiet_m68k +#define float128_lt_quiet float128_lt_quiet_m68k +#define float128_unordered_quiet float128_unordered_quiet_m68k +#define floatx80_compare floatx80_compare_m68k +#define floatx80_compare_quiet floatx80_compare_quiet_m68k +#define float128_compare float128_compare_m68k +#define float128_compare_quiet float128_compare_quiet_m68k +#define floatx80_scalbn floatx80_scalbn_m68k +#define float128_scalbn float128_scalbn_m68k +#define softfloat_init softfloat_init_m68k +#define tcg_optimize tcg_optimize_m68k #define gen_new_label gen_new_label_m68k -#define gen_nop_hint gen_nop_hint_m68k -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_m68k -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_m68k -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_m68k -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_m68k -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_m68k -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_m68k -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_m68k -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_m68k -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_m68k -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_m68k -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_m68k -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_m68k -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_m68k -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_m68k -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_m68k -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_m68k -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_m68k -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_m68k -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_m68k -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_m68k -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_m68k -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_m68k -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_m68k -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_m68k -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_m68k -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_m68k -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_m68k -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_m68k -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_m68k -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_m68k -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_m68k -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_m68k -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_m68k -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_m68k -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_m68k -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_m68k -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_m68k -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_m68k -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_m68k -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_m68k -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_m68k -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_m68k -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_m68k -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_m68k -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_m68k -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_m68k -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_m68k -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_m68k -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_m68k -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_m68k -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_m68k -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_m68k -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_m68k -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_m68k -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_m68k -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_m68k -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_m68k -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_m68k -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_m68k -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_m68k -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_m68k -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_m68k -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_m68k -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_m68k -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_m68k -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_m68k -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_m68k -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_m68k -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_m68k -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_m68k -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_m68k -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_m68k -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_m68k -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_m68k -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_m68k -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_m68k -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_m68k -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_m68k -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_m68k -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_m68k -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_m68k -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_m68k -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_m68k -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_m68k -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_m68k -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_m68k -#define gen_rev16 gen_rev16_m68k -#define gen_revsh gen_revsh_m68k -#define gen_rfe gen_rfe_m68k -#define gen_sar gen_sar_m68k -#define gen_sbc_CC gen_sbc_CC_m68k -#define gen_sbfx gen_sbfx_m68k -#define gen_set_CF_bit31 gen_set_CF_bit31_m68k -#define gen_set_condexec gen_set_condexec_m68k -#define gen_set_cpsr gen_set_cpsr_m68k -#define gen_set_label gen_set_label_m68k -#define gen_set_pc_im gen_set_pc_im_m68k -#define gen_set_psr gen_set_psr_m68k -#define gen_set_psr_im gen_set_psr_im_m68k -#define gen_shl gen_shl_m68k -#define gen_shr gen_shr_m68k -#define gen_smc gen_smc_m68k -#define gen_smul_dual gen_smul_dual_m68k -#define gen_srs gen_srs_m68k -#define gen_ss_advance gen_ss_advance_m68k -#define gen_step_complete_exception gen_step_complete_exception_m68k -#define gen_store_exclusive gen_store_exclusive_m68k -#define gen_storeq_reg gen_storeq_reg_m68k -#define gen_sub_carry gen_sub_carry_m68k -#define gen_sub_CC gen_sub_CC_m68k -#define gen_subq_msw gen_subq_msw_m68k -#define gen_swap_half gen_swap_half_m68k -#define gen_thumb2_data_op gen_thumb2_data_op_m68k -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_m68k -#define gen_ubfx gen_ubfx_m68k -#define gen_vfp_abs gen_vfp_abs_m68k -#define gen_vfp_add gen_vfp_add_m68k -#define gen_vfp_cmp gen_vfp_cmp_m68k -#define gen_vfp_cmpe gen_vfp_cmpe_m68k -#define gen_vfp_div gen_vfp_div_m68k -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_m68k -#define gen_vfp_F1_mul gen_vfp_F1_mul_m68k -#define gen_vfp_F1_neg gen_vfp_F1_neg_m68k -#define gen_vfp_ld gen_vfp_ld_m68k -#define gen_vfp_mrs gen_vfp_mrs_m68k -#define gen_vfp_msr gen_vfp_msr_m68k -#define gen_vfp_mul gen_vfp_mul_m68k -#define gen_vfp_neg gen_vfp_neg_m68k -#define gen_vfp_shto gen_vfp_shto_m68k -#define gen_vfp_sito gen_vfp_sito_m68k -#define gen_vfp_slto gen_vfp_slto_m68k -#define gen_vfp_sqrt gen_vfp_sqrt_m68k -#define gen_vfp_st gen_vfp_st_m68k -#define gen_vfp_sub gen_vfp_sub_m68k -#define gen_vfp_tosh gen_vfp_tosh_m68k -#define gen_vfp_tosi gen_vfp_tosi_m68k -#define gen_vfp_tosiz gen_vfp_tosiz_m68k -#define gen_vfp_tosl gen_vfp_tosl_m68k -#define gen_vfp_touh gen_vfp_touh_m68k -#define gen_vfp_toui gen_vfp_toui_m68k -#define gen_vfp_touiz gen_vfp_touiz_m68k -#define gen_vfp_toul gen_vfp_toul_m68k -#define gen_vfp_uhto gen_vfp_uhto_m68k -#define gen_vfp_uito gen_vfp_uito_m68k -#define gen_vfp_ulto gen_vfp_ulto_m68k -#define get_arm_cp_reginfo get_arm_cp_reginfo_m68k -#define get_clock get_clock_m68k -#define get_clock_realtime get_clock_realtime_m68k -#define get_constraint_priority get_constraint_priority_m68k -#define get_float_exception_flags get_float_exception_flags_m68k -#define get_float_rounding_mode get_float_rounding_mode_m68k -#define get_fpstatus_ptr get_fpstatus_ptr_m68k -#define get_level1_table_address get_level1_table_address_m68k -#define get_mem_index get_mem_index_m68k -#define get_next_param_value get_next_param_value_m68k -#define get_opt_name get_opt_name_m68k -#define get_opt_value get_opt_value_m68k -#define get_page_addr_code get_page_addr_code_m68k -#define get_param_value get_param_value_m68k -#define get_phys_addr get_phys_addr_m68k -#define get_phys_addr_lpae get_phys_addr_lpae_m68k -#define get_phys_addr_mpu get_phys_addr_mpu_m68k -#define get_phys_addr_v5 get_phys_addr_v5_m68k -#define get_phys_addr_v6 get_phys_addr_v6_m68k -#define get_system_memory get_system_memory_m68k -#define get_ticks_per_sec get_ticks_per_sec_m68k -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_m68k -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__m68k -#define gt_cntfrq_access gt_cntfrq_access_m68k -#define gt_cnt_read gt_cnt_read_m68k -#define gt_cnt_reset gt_cnt_reset_m68k -#define gt_counter_access gt_counter_access_m68k -#define gt_ctl_write gt_ctl_write_m68k -#define gt_cval_write gt_cval_write_m68k -#define gt_get_countervalue gt_get_countervalue_m68k -#define gt_pct_access gt_pct_access_m68k -#define gt_ptimer_access gt_ptimer_access_m68k -#define gt_recalc_timer gt_recalc_timer_m68k -#define gt_timer_access gt_timer_access_m68k -#define gt_tval_read gt_tval_read_m68k -#define gt_tval_write gt_tval_write_m68k -#define gt_vct_access gt_vct_access_m68k -#define gt_vtimer_access gt_vtimer_access_m68k -#define guest_phys_blocks_free guest_phys_blocks_free_m68k -#define guest_phys_blocks_init guest_phys_blocks_init_m68k -#define handle_vcvt handle_vcvt_m68k -#define handle_vminmaxnm handle_vminmaxnm_m68k -#define handle_vrint handle_vrint_m68k -#define handle_vsel handle_vsel_m68k -#define has_help_option has_help_option_m68k -#define have_bmi1 have_bmi1_m68k -#define have_bmi2 have_bmi2_m68k -#define hcr_write hcr_write_m68k -#define helper_access_check_cp_reg helper_access_check_cp_reg_m68k -#define helper_add_saturate helper_add_saturate_m68k -#define helper_add_setq helper_add_setq_m68k -#define helper_add_usaturate helper_add_usaturate_m68k -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_m68k -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_m68k -#define helper_be_ldq_mmu helper_be_ldq_mmu_m68k -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_m68k -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_m68k -#define helper_be_ldul_mmu helper_be_ldul_mmu_m68k -#define helper_be_lduw_mmu helper_be_lduw_mmu_m68k -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_m68k -#define helper_be_stl_mmu helper_be_stl_mmu_m68k -#define helper_be_stq_mmu helper_be_stq_mmu_m68k -#define helper_be_stw_mmu helper_be_stw_mmu_m68k -#define helper_clear_pstate_ss helper_clear_pstate_ss_m68k -#define helper_clz_arm helper_clz_arm_m68k -#define helper_cpsr_read helper_cpsr_read_m68k -#define helper_cpsr_write helper_cpsr_write_m68k -#define helper_crc32_arm helper_crc32_arm_m68k -#define helper_crc32c helper_crc32c_m68k -#define helper_crypto_aese helper_crypto_aese_m68k -#define helper_crypto_aesmc helper_crypto_aesmc_m68k -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_m68k -#define helper_crypto_sha1h helper_crypto_sha1h_m68k -#define helper_crypto_sha1su1 helper_crypto_sha1su1_m68k -#define helper_crypto_sha256h helper_crypto_sha256h_m68k -#define helper_crypto_sha256h2 helper_crypto_sha256h2_m68k -#define helper_crypto_sha256su0 helper_crypto_sha256su0_m68k -#define helper_crypto_sha256su1 helper_crypto_sha256su1_m68k -#define helper_dc_zva helper_dc_zva_m68k -#define helper_double_saturate helper_double_saturate_m68k -#define helper_exception_internal helper_exception_internal_m68k -#define helper_exception_return helper_exception_return_m68k -#define helper_exception_with_syndrome helper_exception_with_syndrome_m68k -#define helper_get_cp_reg helper_get_cp_reg_m68k -#define helper_get_cp_reg64 helper_get_cp_reg64_m68k -#define helper_get_r13_banked helper_get_r13_banked_m68k -#define helper_get_user_reg helper_get_user_reg_m68k -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_m68k -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_m68k -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_m68k -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_m68k -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_m68k -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_m68k -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_m68k -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_m68k -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_m68k -#define helper_iwmmxt_addub helper_iwmmxt_addub_m68k -#define helper_iwmmxt_addul helper_iwmmxt_addul_m68k -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_m68k -#define helper_iwmmxt_align helper_iwmmxt_align_m68k -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_m68k -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_m68k -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_m68k -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_m68k -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_m68k -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_m68k -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_m68k -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_m68k -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_m68k -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_m68k -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_m68k -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_m68k -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_m68k -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_m68k -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_m68k -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_m68k -#define helper_iwmmxt_insr helper_iwmmxt_insr_m68k -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_m68k -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_m68k -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_m68k -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_m68k -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_m68k -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_m68k -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_m68k -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_m68k -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_m68k -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_m68k -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_m68k -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_m68k -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_m68k -#define helper_iwmmxt_minub helper_iwmmxt_minub_m68k -#define helper_iwmmxt_minul helper_iwmmxt_minul_m68k -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_m68k -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_m68k -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_m68k -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_m68k -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_m68k -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_m68k -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_m68k -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_m68k -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_m68k -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_m68k -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_m68k -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_m68k -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_m68k -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_m68k -#define helper_iwmmxt_packul helper_iwmmxt_packul_m68k -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_m68k -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_m68k -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_m68k -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_m68k -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_m68k -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_m68k -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_m68k -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_m68k -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_m68k -#define helper_iwmmxt_slll helper_iwmmxt_slll_m68k -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_m68k -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_m68k -#define helper_iwmmxt_sral helper_iwmmxt_sral_m68k -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_m68k -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_m68k -#define helper_iwmmxt_srll helper_iwmmxt_srll_m68k -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_m68k -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_m68k -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_m68k -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_m68k -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_m68k -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_m68k -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_m68k -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_m68k -#define helper_iwmmxt_subub helper_iwmmxt_subub_m68k -#define helper_iwmmxt_subul helper_iwmmxt_subul_m68k -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_m68k -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_m68k -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_m68k -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_m68k -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_m68k -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_m68k -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_m68k -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_m68k -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_m68k -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_m68k -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_m68k -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_m68k -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_m68k -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_m68k -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_m68k -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_m68k -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_m68k -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_m68k -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_m68k -#define helper_ldb_cmmu helper_ldb_cmmu_m68k -#define helper_ldb_mmu helper_ldb_mmu_m68k -#define helper_ldl_cmmu helper_ldl_cmmu_m68k -#define helper_ldl_mmu helper_ldl_mmu_m68k -#define helper_ldq_cmmu helper_ldq_cmmu_m68k -#define helper_ldq_mmu helper_ldq_mmu_m68k -#define helper_ldw_cmmu helper_ldw_cmmu_m68k -#define helper_ldw_mmu helper_ldw_mmu_m68k -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_m68k -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_m68k -#define helper_le_ldq_mmu helper_le_ldq_mmu_m68k -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_m68k -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_m68k -#define helper_le_ldul_mmu helper_le_ldul_mmu_m68k -#define helper_le_lduw_mmu helper_le_lduw_mmu_m68k -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_m68k -#define helper_le_stl_mmu helper_le_stl_mmu_m68k -#define helper_le_stq_mmu helper_le_stq_mmu_m68k -#define helper_le_stw_mmu helper_le_stw_mmu_m68k -#define helper_msr_i_pstate helper_msr_i_pstate_m68k -#define helper_neon_abd_f32 helper_neon_abd_f32_m68k -#define helper_neon_abdl_s16 helper_neon_abdl_s16_m68k -#define helper_neon_abdl_s32 helper_neon_abdl_s32_m68k -#define helper_neon_abdl_s64 helper_neon_abdl_s64_m68k -#define helper_neon_abdl_u16 helper_neon_abdl_u16_m68k -#define helper_neon_abdl_u32 helper_neon_abdl_u32_m68k -#define helper_neon_abdl_u64 helper_neon_abdl_u64_m68k -#define helper_neon_abd_s16 helper_neon_abd_s16_m68k -#define helper_neon_abd_s32 helper_neon_abd_s32_m68k -#define helper_neon_abd_s8 helper_neon_abd_s8_m68k -#define helper_neon_abd_u16 helper_neon_abd_u16_m68k -#define helper_neon_abd_u32 helper_neon_abd_u32_m68k -#define helper_neon_abd_u8 helper_neon_abd_u8_m68k -#define helper_neon_abs_s16 helper_neon_abs_s16_m68k -#define helper_neon_abs_s8 helper_neon_abs_s8_m68k -#define helper_neon_acge_f32 helper_neon_acge_f32_m68k -#define helper_neon_acge_f64 helper_neon_acge_f64_m68k -#define helper_neon_acgt_f32 helper_neon_acgt_f32_m68k -#define helper_neon_acgt_f64 helper_neon_acgt_f64_m68k -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_m68k -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_m68k -#define helper_neon_addl_u16 helper_neon_addl_u16_m68k -#define helper_neon_addl_u32 helper_neon_addl_u32_m68k -#define helper_neon_add_u16 helper_neon_add_u16_m68k -#define helper_neon_add_u8 helper_neon_add_u8_m68k -#define helper_neon_ceq_f32 helper_neon_ceq_f32_m68k -#define helper_neon_ceq_u16 helper_neon_ceq_u16_m68k -#define helper_neon_ceq_u32 helper_neon_ceq_u32_m68k -#define helper_neon_ceq_u8 helper_neon_ceq_u8_m68k -#define helper_neon_cge_f32 helper_neon_cge_f32_m68k -#define helper_neon_cge_s16 helper_neon_cge_s16_m68k -#define helper_neon_cge_s32 helper_neon_cge_s32_m68k -#define helper_neon_cge_s8 helper_neon_cge_s8_m68k -#define helper_neon_cge_u16 helper_neon_cge_u16_m68k -#define helper_neon_cge_u32 helper_neon_cge_u32_m68k -#define helper_neon_cge_u8 helper_neon_cge_u8_m68k -#define helper_neon_cgt_f32 helper_neon_cgt_f32_m68k -#define helper_neon_cgt_s16 helper_neon_cgt_s16_m68k -#define helper_neon_cgt_s32 helper_neon_cgt_s32_m68k -#define helper_neon_cgt_s8 helper_neon_cgt_s8_m68k -#define helper_neon_cgt_u16 helper_neon_cgt_u16_m68k -#define helper_neon_cgt_u32 helper_neon_cgt_u32_m68k -#define helper_neon_cgt_u8 helper_neon_cgt_u8_m68k -#define helper_neon_cls_s16 helper_neon_cls_s16_m68k -#define helper_neon_cls_s32 helper_neon_cls_s32_m68k -#define helper_neon_cls_s8 helper_neon_cls_s8_m68k -#define helper_neon_clz_u16 helper_neon_clz_u16_m68k -#define helper_neon_clz_u8 helper_neon_clz_u8_m68k -#define helper_neon_cnt_u8 helper_neon_cnt_u8_m68k -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_m68k -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_m68k -#define helper_neon_hadd_s16 helper_neon_hadd_s16_m68k -#define helper_neon_hadd_s32 helper_neon_hadd_s32_m68k -#define helper_neon_hadd_s8 helper_neon_hadd_s8_m68k -#define helper_neon_hadd_u16 helper_neon_hadd_u16_m68k -#define helper_neon_hadd_u32 helper_neon_hadd_u32_m68k -#define helper_neon_hadd_u8 helper_neon_hadd_u8_m68k -#define helper_neon_hsub_s16 helper_neon_hsub_s16_m68k -#define helper_neon_hsub_s32 helper_neon_hsub_s32_m68k -#define helper_neon_hsub_s8 helper_neon_hsub_s8_m68k -#define helper_neon_hsub_u16 helper_neon_hsub_u16_m68k -#define helper_neon_hsub_u32 helper_neon_hsub_u32_m68k -#define helper_neon_hsub_u8 helper_neon_hsub_u8_m68k -#define helper_neon_max_s16 helper_neon_max_s16_m68k -#define helper_neon_max_s32 helper_neon_max_s32_m68k -#define helper_neon_max_s8 helper_neon_max_s8_m68k -#define helper_neon_max_u16 helper_neon_max_u16_m68k -#define helper_neon_max_u32 helper_neon_max_u32_m68k -#define helper_neon_max_u8 helper_neon_max_u8_m68k -#define helper_neon_min_s16 helper_neon_min_s16_m68k -#define helper_neon_min_s32 helper_neon_min_s32_m68k -#define helper_neon_min_s8 helper_neon_min_s8_m68k -#define helper_neon_min_u16 helper_neon_min_u16_m68k -#define helper_neon_min_u32 helper_neon_min_u32_m68k -#define helper_neon_min_u8 helper_neon_min_u8_m68k -#define helper_neon_mull_p8 helper_neon_mull_p8_m68k -#define helper_neon_mull_s16 helper_neon_mull_s16_m68k -#define helper_neon_mull_s8 helper_neon_mull_s8_m68k -#define helper_neon_mull_u16 helper_neon_mull_u16_m68k -#define helper_neon_mull_u8 helper_neon_mull_u8_m68k -#define helper_neon_mul_p8 helper_neon_mul_p8_m68k -#define helper_neon_mul_u16 helper_neon_mul_u16_m68k -#define helper_neon_mul_u8 helper_neon_mul_u8_m68k -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_m68k -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_m68k -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_m68k -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_m68k -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_m68k -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_m68k -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_m68k -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_m68k -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_m68k -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_m68k -#define helper_neon_narrow_u16 helper_neon_narrow_u16_m68k -#define helper_neon_narrow_u8 helper_neon_narrow_u8_m68k -#define helper_neon_negl_u16 helper_neon_negl_u16_m68k -#define helper_neon_negl_u32 helper_neon_negl_u32_m68k -#define helper_neon_paddl_u16 helper_neon_paddl_u16_m68k -#define helper_neon_paddl_u32 helper_neon_paddl_u32_m68k -#define helper_neon_padd_u16 helper_neon_padd_u16_m68k -#define helper_neon_padd_u8 helper_neon_padd_u8_m68k -#define helper_neon_pmax_s16 helper_neon_pmax_s16_m68k -#define helper_neon_pmax_s8 helper_neon_pmax_s8_m68k -#define helper_neon_pmax_u16 helper_neon_pmax_u16_m68k -#define helper_neon_pmax_u8 helper_neon_pmax_u8_m68k -#define helper_neon_pmin_s16 helper_neon_pmin_s16_m68k -#define helper_neon_pmin_s8 helper_neon_pmin_s8_m68k -#define helper_neon_pmin_u16 helper_neon_pmin_u16_m68k -#define helper_neon_pmin_u8 helper_neon_pmin_u8_m68k -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_m68k -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_m68k -#define helper_neon_qabs_s16 helper_neon_qabs_s16_m68k -#define helper_neon_qabs_s32 helper_neon_qabs_s32_m68k -#define helper_neon_qabs_s64 helper_neon_qabs_s64_m68k -#define helper_neon_qabs_s8 helper_neon_qabs_s8_m68k -#define helper_neon_qadd_s16 helper_neon_qadd_s16_m68k -#define helper_neon_qadd_s32 helper_neon_qadd_s32_m68k -#define helper_neon_qadd_s64 helper_neon_qadd_s64_m68k -#define helper_neon_qadd_s8 helper_neon_qadd_s8_m68k -#define helper_neon_qadd_u16 helper_neon_qadd_u16_m68k -#define helper_neon_qadd_u32 helper_neon_qadd_u32_m68k -#define helper_neon_qadd_u64 helper_neon_qadd_u64_m68k -#define helper_neon_qadd_u8 helper_neon_qadd_u8_m68k -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_m68k -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_m68k -#define helper_neon_qneg_s16 helper_neon_qneg_s16_m68k -#define helper_neon_qneg_s32 helper_neon_qneg_s32_m68k -#define helper_neon_qneg_s64 helper_neon_qneg_s64_m68k -#define helper_neon_qneg_s8 helper_neon_qneg_s8_m68k -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_m68k -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_m68k -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_m68k -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_m68k -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_m68k -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_m68k -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_m68k -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_m68k -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_m68k -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_m68k -#define helper_neon_qshl_s16 helper_neon_qshl_s16_m68k -#define helper_neon_qshl_s32 helper_neon_qshl_s32_m68k -#define helper_neon_qshl_s64 helper_neon_qshl_s64_m68k -#define helper_neon_qshl_s8 helper_neon_qshl_s8_m68k -#define helper_neon_qshl_u16 helper_neon_qshl_u16_m68k -#define helper_neon_qshl_u32 helper_neon_qshl_u32_m68k -#define helper_neon_qshl_u64 helper_neon_qshl_u64_m68k -#define helper_neon_qshl_u8 helper_neon_qshl_u8_m68k -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_m68k -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_m68k -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_m68k -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_m68k -#define helper_neon_qsub_s16 helper_neon_qsub_s16_m68k -#define helper_neon_qsub_s32 helper_neon_qsub_s32_m68k -#define helper_neon_qsub_s64 helper_neon_qsub_s64_m68k -#define helper_neon_qsub_s8 helper_neon_qsub_s8_m68k -#define helper_neon_qsub_u16 helper_neon_qsub_u16_m68k -#define helper_neon_qsub_u32 helper_neon_qsub_u32_m68k -#define helper_neon_qsub_u64 helper_neon_qsub_u64_m68k -#define helper_neon_qsub_u8 helper_neon_qsub_u8_m68k -#define helper_neon_qunzip16 helper_neon_qunzip16_m68k -#define helper_neon_qunzip32 helper_neon_qunzip32_m68k -#define helper_neon_qunzip8 helper_neon_qunzip8_m68k -#define helper_neon_qzip16 helper_neon_qzip16_m68k -#define helper_neon_qzip32 helper_neon_qzip32_m68k -#define helper_neon_qzip8 helper_neon_qzip8_m68k -#define helper_neon_rbit_u8 helper_neon_rbit_u8_m68k -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_m68k -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_m68k -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_m68k -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_m68k -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_m68k -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_m68k -#define helper_neon_rshl_s16 helper_neon_rshl_s16_m68k -#define helper_neon_rshl_s32 helper_neon_rshl_s32_m68k -#define helper_neon_rshl_s64 helper_neon_rshl_s64_m68k -#define helper_neon_rshl_s8 helper_neon_rshl_s8_m68k -#define helper_neon_rshl_u16 helper_neon_rshl_u16_m68k -#define helper_neon_rshl_u32 helper_neon_rshl_u32_m68k -#define helper_neon_rshl_u64 helper_neon_rshl_u64_m68k -#define helper_neon_rshl_u8 helper_neon_rshl_u8_m68k -#define helper_neon_shl_s16 helper_neon_shl_s16_m68k -#define helper_neon_shl_s32 helper_neon_shl_s32_m68k -#define helper_neon_shl_s64 helper_neon_shl_s64_m68k -#define helper_neon_shl_s8 helper_neon_shl_s8_m68k -#define helper_neon_shl_u16 helper_neon_shl_u16_m68k -#define helper_neon_shl_u32 helper_neon_shl_u32_m68k -#define helper_neon_shl_u64 helper_neon_shl_u64_m68k -#define helper_neon_shl_u8 helper_neon_shl_u8_m68k -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_m68k -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_m68k -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_m68k -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_m68k -#define helper_neon_subl_u16 helper_neon_subl_u16_m68k -#define helper_neon_subl_u32 helper_neon_subl_u32_m68k -#define helper_neon_sub_u16 helper_neon_sub_u16_m68k -#define helper_neon_sub_u8 helper_neon_sub_u8_m68k -#define helper_neon_tbl helper_neon_tbl_m68k -#define helper_neon_tst_u16 helper_neon_tst_u16_m68k -#define helper_neon_tst_u32 helper_neon_tst_u32_m68k -#define helper_neon_tst_u8 helper_neon_tst_u8_m68k -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_m68k -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_m68k -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_m68k -#define helper_neon_unzip16 helper_neon_unzip16_m68k -#define helper_neon_unzip8 helper_neon_unzip8_m68k -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_m68k -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_m68k -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_m68k -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_m68k -#define helper_neon_widen_s16 helper_neon_widen_s16_m68k -#define helper_neon_widen_s8 helper_neon_widen_s8_m68k -#define helper_neon_widen_u16 helper_neon_widen_u16_m68k -#define helper_neon_widen_u8 helper_neon_widen_u8_m68k -#define helper_neon_zip16 helper_neon_zip16_m68k -#define helper_neon_zip8 helper_neon_zip8_m68k -#define helper_pre_hvc helper_pre_hvc_m68k -#define helper_pre_smc helper_pre_smc_m68k -#define helper_qadd16 helper_qadd16_m68k -#define helper_qadd8 helper_qadd8_m68k -#define helper_qaddsubx helper_qaddsubx_m68k -#define helper_qsub16 helper_qsub16_m68k -#define helper_qsub8 helper_qsub8_m68k -#define helper_qsubaddx helper_qsubaddx_m68k -#define helper_rbit helper_rbit_m68k -#define helper_recpe_f32 helper_recpe_f32_m68k -#define helper_recpe_f64 helper_recpe_f64_m68k -#define helper_recpe_u32 helper_recpe_u32_m68k -#define helper_recps_f32 helper_recps_f32_m68k -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_m68k -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_m68k -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_m68k -#define helper_ret_stb_mmu helper_ret_stb_mmu_m68k -#define helper_rintd helper_rintd_m68k -#define helper_rintd_exact helper_rintd_exact_m68k -#define helper_rints helper_rints_m68k -#define helper_rints_exact helper_rints_exact_m68k -#define helper_ror_cc helper_ror_cc_m68k -#define helper_rsqrte_f32 helper_rsqrte_f32_m68k -#define helper_rsqrte_f64 helper_rsqrte_f64_m68k -#define helper_rsqrte_u32 helper_rsqrte_u32_m68k -#define helper_rsqrts_f32 helper_rsqrts_f32_m68k -#define helper_sadd16 helper_sadd16_m68k -#define helper_sadd8 helper_sadd8_m68k -#define helper_saddsubx helper_saddsubx_m68k -#define helper_sar_cc helper_sar_cc_m68k -#define helper_sdiv helper_sdiv_m68k -#define helper_sel_flags helper_sel_flags_m68k -#define helper_set_cp_reg helper_set_cp_reg_m68k -#define helper_set_cp_reg64 helper_set_cp_reg64_m68k -#define helper_set_neon_rmode helper_set_neon_rmode_m68k -#define helper_set_r13_banked helper_set_r13_banked_m68k -#define helper_set_rmode helper_set_rmode_m68k -#define helper_set_user_reg helper_set_user_reg_m68k -#define helper_shadd16 helper_shadd16_m68k -#define helper_shadd8 helper_shadd8_m68k -#define helper_shaddsubx helper_shaddsubx_m68k -#define helper_shl_cc helper_shl_cc_m68k -#define helper_shr_cc helper_shr_cc_m68k -#define helper_shsub16 helper_shsub16_m68k -#define helper_shsub8 helper_shsub8_m68k -#define helper_shsubaddx helper_shsubaddx_m68k -#define helper_ssat helper_ssat_m68k -#define helper_ssat16 helper_ssat16_m68k -#define helper_ssub16 helper_ssub16_m68k -#define helper_ssub8 helper_ssub8_m68k -#define helper_ssubaddx helper_ssubaddx_m68k -#define helper_stb_mmu helper_stb_mmu_m68k -#define helper_stl_mmu helper_stl_mmu_m68k -#define helper_stq_mmu helper_stq_mmu_m68k -#define helper_stw_mmu helper_stw_mmu_m68k -#define helper_sub_saturate helper_sub_saturate_m68k -#define helper_sub_usaturate helper_sub_usaturate_m68k -#define helper_sxtb16 helper_sxtb16_m68k -#define helper_uadd16 helper_uadd16_m68k -#define helper_uadd8 helper_uadd8_m68k -#define helper_uaddsubx helper_uaddsubx_m68k -#define helper_udiv helper_udiv_m68k -#define helper_uhadd16 helper_uhadd16_m68k -#define helper_uhadd8 helper_uhadd8_m68k -#define helper_uhaddsubx helper_uhaddsubx_m68k -#define helper_uhsub16 helper_uhsub16_m68k -#define helper_uhsub8 helper_uhsub8_m68k -#define helper_uhsubaddx helper_uhsubaddx_m68k -#define helper_uqadd16 helper_uqadd16_m68k -#define helper_uqadd8 helper_uqadd8_m68k -#define helper_uqaddsubx helper_uqaddsubx_m68k -#define helper_uqsub16 helper_uqsub16_m68k -#define helper_uqsub8 helper_uqsub8_m68k -#define helper_uqsubaddx helper_uqsubaddx_m68k -#define helper_usad8 helper_usad8_m68k -#define helper_usat helper_usat_m68k -#define helper_usat16 helper_usat16_m68k -#define helper_usub16 helper_usub16_m68k -#define helper_usub8 helper_usub8_m68k -#define helper_usubaddx helper_usubaddx_m68k -#define helper_uxtb16 helper_uxtb16_m68k -#define helper_v7m_mrs helper_v7m_mrs_m68k -#define helper_v7m_msr helper_v7m_msr_m68k -#define helper_vfp_absd helper_vfp_absd_m68k -#define helper_vfp_abss helper_vfp_abss_m68k -#define helper_vfp_addd helper_vfp_addd_m68k -#define helper_vfp_adds helper_vfp_adds_m68k -#define helper_vfp_cmpd helper_vfp_cmpd_m68k -#define helper_vfp_cmped helper_vfp_cmped_m68k -#define helper_vfp_cmpes helper_vfp_cmpes_m68k -#define helper_vfp_cmps helper_vfp_cmps_m68k -#define helper_vfp_divd helper_vfp_divd_m68k -#define helper_vfp_divs helper_vfp_divs_m68k -#define helper_vfp_fcvtds helper_vfp_fcvtds_m68k -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_m68k -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_m68k -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_m68k -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_m68k -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_m68k -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_m68k -#define helper_vfp_maxd helper_vfp_maxd_m68k -#define helper_vfp_maxnumd helper_vfp_maxnumd_m68k -#define helper_vfp_maxnums helper_vfp_maxnums_m68k -#define helper_vfp_maxs helper_vfp_maxs_m68k -#define helper_vfp_mind helper_vfp_mind_m68k -#define helper_vfp_minnumd helper_vfp_minnumd_m68k -#define helper_vfp_minnums helper_vfp_minnums_m68k -#define helper_vfp_mins helper_vfp_mins_m68k -#define helper_vfp_muladdd helper_vfp_muladdd_m68k -#define helper_vfp_muladds helper_vfp_muladds_m68k -#define helper_vfp_muld helper_vfp_muld_m68k -#define helper_vfp_muls helper_vfp_muls_m68k -#define helper_vfp_negd helper_vfp_negd_m68k -#define helper_vfp_negs helper_vfp_negs_m68k -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_m68k -#define helper_vfp_shtod helper_vfp_shtod_m68k -#define helper_vfp_shtos helper_vfp_shtos_m68k -#define helper_vfp_sitod helper_vfp_sitod_m68k -#define helper_vfp_sitos helper_vfp_sitos_m68k -#define helper_vfp_sltod helper_vfp_sltod_m68k -#define helper_vfp_sltos helper_vfp_sltos_m68k -#define helper_vfp_sqrtd helper_vfp_sqrtd_m68k -#define helper_vfp_sqrts helper_vfp_sqrts_m68k -#define helper_vfp_sqtod helper_vfp_sqtod_m68k -#define helper_vfp_sqtos helper_vfp_sqtos_m68k -#define helper_vfp_subd helper_vfp_subd_m68k -#define helper_vfp_subs helper_vfp_subs_m68k -#define helper_vfp_toshd helper_vfp_toshd_m68k -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_m68k -#define helper_vfp_toshs helper_vfp_toshs_m68k -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_m68k -#define helper_vfp_tosid helper_vfp_tosid_m68k -#define helper_vfp_tosis helper_vfp_tosis_m68k -#define helper_vfp_tosizd helper_vfp_tosizd_m68k -#define helper_vfp_tosizs helper_vfp_tosizs_m68k -#define helper_vfp_tosld helper_vfp_tosld_m68k -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_m68k -#define helper_vfp_tosls helper_vfp_tosls_m68k -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_m68k -#define helper_vfp_tosqd helper_vfp_tosqd_m68k -#define helper_vfp_tosqs helper_vfp_tosqs_m68k -#define helper_vfp_touhd helper_vfp_touhd_m68k -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_m68k -#define helper_vfp_touhs helper_vfp_touhs_m68k -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_m68k -#define helper_vfp_touid helper_vfp_touid_m68k -#define helper_vfp_touis helper_vfp_touis_m68k -#define helper_vfp_touizd helper_vfp_touizd_m68k -#define helper_vfp_touizs helper_vfp_touizs_m68k -#define helper_vfp_tould helper_vfp_tould_m68k -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_m68k -#define helper_vfp_touls helper_vfp_touls_m68k -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_m68k -#define helper_vfp_touqd helper_vfp_touqd_m68k -#define helper_vfp_touqs helper_vfp_touqs_m68k -#define helper_vfp_uhtod helper_vfp_uhtod_m68k -#define helper_vfp_uhtos helper_vfp_uhtos_m68k -#define helper_vfp_uitod helper_vfp_uitod_m68k -#define helper_vfp_uitos helper_vfp_uitos_m68k -#define helper_vfp_ultod helper_vfp_ultod_m68k -#define helper_vfp_ultos helper_vfp_ultos_m68k -#define helper_vfp_uqtod helper_vfp_uqtod_m68k -#define helper_vfp_uqtos helper_vfp_uqtos_m68k -#define helper_wfe helper_wfe_m68k -#define helper_wfi helper_wfi_m68k -#define hex2decimal hex2decimal_m68k -#define hw_breakpoint_update hw_breakpoint_update_m68k -#define hw_breakpoint_update_all hw_breakpoint_update_all_m68k -#define hw_watchpoint_update hw_watchpoint_update_m68k -#define hw_watchpoint_update_all hw_watchpoint_update_all_m68k -#define _init _init_m68k -#define init_cpreg_list init_cpreg_list_m68k -#define init_lists init_lists_m68k -#define input_type_enum input_type_enum_m68k -#define int128_2_64 int128_2_64_m68k -#define int128_add int128_add_m68k -#define int128_addto int128_addto_m68k -#define int128_and int128_and_m68k -#define int128_eq int128_eq_m68k -#define int128_ge int128_ge_m68k -#define int128_get64 int128_get64_m68k -#define int128_gt int128_gt_m68k -#define int128_le int128_le_m68k -#define int128_lt int128_lt_m68k -#define int128_make64 int128_make64_m68k -#define int128_max int128_max_m68k -#define int128_min int128_min_m68k -#define int128_ne int128_ne_m68k -#define int128_neg int128_neg_m68k -#define int128_nz int128_nz_m68k -#define int128_rshift int128_rshift_m68k -#define int128_sub int128_sub_m68k -#define int128_subfrom int128_subfrom_m68k -#define int128_zero int128_zero_m68k -#define int16_to_float32 int16_to_float32_m68k -#define int16_to_float64 int16_to_float64_m68k -#define int32_to_float128 int32_to_float128_m68k -#define int32_to_float32 int32_to_float32_m68k -#define int32_to_float64 int32_to_float64_m68k -#define int32_to_floatx80 int32_to_floatx80_m68k -#define int64_to_float128 int64_to_float128_m68k -#define int64_to_float32 int64_to_float32_m68k -#define int64_to_float64 int64_to_float64_m68k -#define int64_to_floatx80 int64_to_floatx80_m68k -#define invalidate_and_set_dirty invalidate_and_set_dirty_m68k -#define invalidate_page_bitmap invalidate_page_bitmap_m68k -#define io_mem_read io_mem_read_m68k -#define io_mem_write io_mem_write_m68k -#define io_readb io_readb_m68k -#define io_readl io_readl_m68k -#define io_readq io_readq_m68k -#define io_readw io_readw_m68k -#define iotlb_to_region iotlb_to_region_m68k -#define io_writeb io_writeb_m68k -#define io_writel io_writel_m68k -#define io_writeq io_writeq_m68k -#define io_writew io_writew_m68k -#define is_a64 is_a64_m68k -#define is_help_option is_help_option_m68k -#define isr_read isr_read_m68k -#define is_valid_option_list is_valid_option_list_m68k -#define iwmmxt_load_creg iwmmxt_load_creg_m68k -#define iwmmxt_load_reg iwmmxt_load_reg_m68k -#define iwmmxt_store_creg iwmmxt_store_creg_m68k -#define iwmmxt_store_reg iwmmxt_store_reg_m68k -#define __jit_debug_descriptor __jit_debug_descriptor_m68k -#define __jit_debug_register_code __jit_debug_register_code_m68k -#define kvm_to_cpreg_id kvm_to_cpreg_id_m68k -#define last_ram_offset last_ram_offset_m68k -#define ldl_be_p ldl_be_p_m68k -#define ldl_be_phys ldl_be_phys_m68k -#define ldl_he_p ldl_he_p_m68k -#define ldl_le_p ldl_le_p_m68k -#define ldl_le_phys ldl_le_phys_m68k -#define ldl_phys ldl_phys_m68k -#define ldl_phys_internal ldl_phys_internal_m68k -#define ldq_be_p ldq_be_p_m68k -#define ldq_be_phys ldq_be_phys_m68k -#define ldq_he_p ldq_he_p_m68k -#define ldq_le_p ldq_le_p_m68k -#define ldq_le_phys ldq_le_phys_m68k -#define ldq_phys ldq_phys_m68k -#define ldq_phys_internal ldq_phys_internal_m68k -#define ldst_name ldst_name_m68k -#define ldub_p ldub_p_m68k -#define ldub_phys ldub_phys_m68k -#define lduw_be_p lduw_be_p_m68k -#define lduw_be_phys lduw_be_phys_m68k -#define lduw_he_p lduw_he_p_m68k -#define lduw_le_p lduw_le_p_m68k -#define lduw_le_phys lduw_le_phys_m68k -#define lduw_phys lduw_phys_m68k -#define lduw_phys_internal lduw_phys_internal_m68k -#define le128 le128_m68k -#define linked_bp_matches linked_bp_matches_m68k -#define listener_add_address_space listener_add_address_space_m68k -#define load_cpu_offset load_cpu_offset_m68k -#define load_reg load_reg_m68k -#define load_reg_var load_reg_var_m68k -#define log_cpu_state log_cpu_state_m68k -#define lpae_cp_reginfo lpae_cp_reginfo_m68k -#define lt128 lt128_m68k -#define machine_class_init machine_class_init_m68k -#define machine_finalize machine_finalize_m68k -#define machine_info machine_info_m68k -#define machine_initfn machine_initfn_m68k -#define machine_register_types machine_register_types_m68k -#define machvirt_init machvirt_init_m68k -#define machvirt_machine_init machvirt_machine_init_m68k -#define maj maj_m68k -#define mapping_conflict mapping_conflict_m68k -#define mapping_contiguous mapping_contiguous_m68k -#define mapping_have_same_region mapping_have_same_region_m68k -#define mapping_merge mapping_merge_m68k -#define mem_add mem_add_m68k -#define mem_begin mem_begin_m68k -#define mem_commit mem_commit_m68k -#define memory_access_is_direct memory_access_is_direct_m68k -#define memory_access_size memory_access_size_m68k -#define memory_init memory_init_m68k -#define memory_listener_match memory_listener_match_m68k -#define memory_listener_register memory_listener_register_m68k -#define memory_listener_unregister memory_listener_unregister_m68k -#define memory_map_init memory_map_init_m68k -#define memory_mapping_filter memory_mapping_filter_m68k -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_m68k -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_m68k -#define memory_mapping_list_free memory_mapping_list_free_m68k -#define memory_mapping_list_init memory_mapping_list_init_m68k -#define memory_region_access_valid memory_region_access_valid_m68k -#define memory_region_add_subregion memory_region_add_subregion_m68k -#define memory_region_add_subregion_common memory_region_add_subregion_common_m68k -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_m68k -#define memory_region_big_endian memory_region_big_endian_m68k -#define memory_region_clear_pending memory_region_clear_pending_m68k -#define memory_region_del_subregion memory_region_del_subregion_m68k -#define memory_region_destructor_alias memory_region_destructor_alias_m68k -#define memory_region_destructor_none memory_region_destructor_none_m68k -#define memory_region_destructor_ram memory_region_destructor_ram_m68k -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_m68k -#define memory_region_dispatch_read memory_region_dispatch_read_m68k -#define memory_region_dispatch_read1 memory_region_dispatch_read1_m68k -#define memory_region_dispatch_write memory_region_dispatch_write_m68k -#define memory_region_escape_name memory_region_escape_name_m68k -#define memory_region_finalize memory_region_finalize_m68k -#define memory_region_find memory_region_find_m68k -#define memory_region_get_addr memory_region_get_addr_m68k -#define memory_region_get_alignment memory_region_get_alignment_m68k -#define memory_region_get_container memory_region_get_container_m68k -#define memory_region_get_fd memory_region_get_fd_m68k -#define memory_region_get_may_overlap memory_region_get_may_overlap_m68k -#define memory_region_get_priority memory_region_get_priority_m68k -#define memory_region_get_ram_addr memory_region_get_ram_addr_m68k -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_m68k -#define memory_region_get_size memory_region_get_size_m68k -#define memory_region_info memory_region_info_m68k -#define memory_region_init memory_region_init_m68k -#define memory_region_init_alias memory_region_init_alias_m68k -#define memory_region_initfn memory_region_initfn_m68k -#define memory_region_init_io memory_region_init_io_m68k -#define memory_region_init_ram memory_region_init_ram_m68k -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_m68k -#define memory_region_init_reservation memory_region_init_reservation_m68k -#define memory_region_is_iommu memory_region_is_iommu_m68k -#define memory_region_is_logging memory_region_is_logging_m68k -#define memory_region_is_mapped memory_region_is_mapped_m68k -#define memory_region_is_ram memory_region_is_ram_m68k -#define memory_region_is_rom memory_region_is_rom_m68k -#define memory_region_is_romd memory_region_is_romd_m68k -#define memory_region_is_skip_dump memory_region_is_skip_dump_m68k -#define memory_region_is_unassigned memory_region_is_unassigned_m68k -#define memory_region_name memory_region_name_m68k -#define memory_region_need_escape memory_region_need_escape_m68k -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_m68k -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_m68k -#define memory_region_present memory_region_present_m68k -#define memory_region_read_accessor memory_region_read_accessor_m68k -#define memory_region_readd_subregion memory_region_readd_subregion_m68k -#define memory_region_ref memory_region_ref_m68k -#define memory_region_resolve_container memory_region_resolve_container_m68k -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_m68k -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_m68k -#define memory_region_set_address memory_region_set_address_m68k -#define memory_region_set_alias_offset memory_region_set_alias_offset_m68k -#define memory_region_set_enabled memory_region_set_enabled_m68k -#define memory_region_set_readonly memory_region_set_readonly_m68k -#define memory_region_set_skip_dump memory_region_set_skip_dump_m68k -#define memory_region_size memory_region_size_m68k -#define memory_region_to_address_space memory_region_to_address_space_m68k -#define memory_region_transaction_begin memory_region_transaction_begin_m68k -#define memory_region_transaction_commit memory_region_transaction_commit_m68k -#define memory_region_unref memory_region_unref_m68k -#define memory_region_update_container_subregions memory_region_update_container_subregions_m68k -#define memory_region_write_accessor memory_region_write_accessor_m68k -#define memory_region_wrong_endianness memory_region_wrong_endianness_m68k -#define memory_try_enable_merging memory_try_enable_merging_m68k -#define module_call_init module_call_init_m68k -#define module_load module_load_m68k -#define mpidr_cp_reginfo mpidr_cp_reginfo_m68k -#define mpidr_read mpidr_read_m68k -#define msr_mask msr_mask_m68k -#define mul128By64To192 mul128By64To192_m68k -#define mul128To256 mul128To256_m68k -#define mul64To128 mul64To128_m68k -#define muldiv64 muldiv64_m68k -#define neon_2rm_is_float_op neon_2rm_is_float_op_m68k -#define neon_2rm_sizes neon_2rm_sizes_m68k -#define neon_3r_sizes neon_3r_sizes_m68k -#define neon_get_scalar neon_get_scalar_m68k -#define neon_load_reg neon_load_reg_m68k -#define neon_load_reg64 neon_load_reg64_m68k -#define neon_load_scratch neon_load_scratch_m68k -#define neon_ls_element_type neon_ls_element_type_m68k -#define neon_reg_offset neon_reg_offset_m68k -#define neon_store_reg neon_store_reg_m68k -#define neon_store_reg64 neon_store_reg64_m68k -#define neon_store_scratch neon_store_scratch_m68k -#define new_ldst_label new_ldst_label_m68k -#define next_list next_list_m68k -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_m68k -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_m68k -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_m68k -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_m68k -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_m68k -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_m68k -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_m68k -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_m68k -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_m68k -#define not_v6_cp_reginfo not_v6_cp_reginfo_m68k -#define not_v7_cp_reginfo not_v7_cp_reginfo_m68k -#define not_v8_cp_reginfo not_v8_cp_reginfo_m68k -#define object_child_foreach object_child_foreach_m68k -#define object_class_foreach object_class_foreach_m68k -#define object_class_foreach_tramp object_class_foreach_tramp_m68k -#define object_class_get_list object_class_get_list_m68k -#define object_class_get_list_tramp object_class_get_list_tramp_m68k -#define object_class_get_parent object_class_get_parent_m68k -#define object_deinit object_deinit_m68k -#define object_dynamic_cast object_dynamic_cast_m68k -#define object_finalize object_finalize_m68k -#define object_finalize_child_property object_finalize_child_property_m68k -#define object_get_child_property object_get_child_property_m68k -#define object_get_link_property object_get_link_property_m68k -#define object_get_root object_get_root_m68k -#define object_initialize_with_type object_initialize_with_type_m68k -#define object_init_with_type object_init_with_type_m68k -#define object_instance_init object_instance_init_m68k -#define object_new_with_type object_new_with_type_m68k -#define object_post_init_with_type object_post_init_with_type_m68k -#define object_property_add_alias object_property_add_alias_m68k -#define object_property_add_link object_property_add_link_m68k -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_m68k -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_m68k -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_m68k -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_m68k -#define object_property_allow_set_link object_property_allow_set_link_m68k -#define object_property_del object_property_del_m68k -#define object_property_del_all object_property_del_all_m68k -#define object_property_find object_property_find_m68k -#define object_property_get object_property_get_m68k -#define object_property_get_bool object_property_get_bool_m68k -#define object_property_get_int object_property_get_int_m68k -#define object_property_get_link object_property_get_link_m68k -#define object_property_get_qobject object_property_get_qobject_m68k -#define object_property_get_str object_property_get_str_m68k -#define object_property_get_type object_property_get_type_m68k -#define object_property_is_child object_property_is_child_m68k -#define object_property_set object_property_set_m68k -#define object_property_set_description object_property_set_description_m68k -#define object_property_set_link object_property_set_link_m68k -#define object_property_set_qobject object_property_set_qobject_m68k -#define object_release_link_property object_release_link_property_m68k -#define object_resolve_abs_path object_resolve_abs_path_m68k -#define object_resolve_child_property object_resolve_child_property_m68k -#define object_resolve_link object_resolve_link_m68k -#define object_resolve_link_property object_resolve_link_property_m68k -#define object_resolve_partial_path object_resolve_partial_path_m68k -#define object_resolve_path object_resolve_path_m68k -#define object_resolve_path_component object_resolve_path_component_m68k -#define object_resolve_path_type object_resolve_path_type_m68k -#define object_set_link_property object_set_link_property_m68k -#define object_unparent object_unparent_m68k -#define omap_cachemaint_write omap_cachemaint_write_m68k -#define omap_cp_reginfo omap_cp_reginfo_m68k -#define omap_threadid_write omap_threadid_write_m68k -#define omap_ticonfig_write omap_ticonfig_write_m68k -#define omap_wfi_write omap_wfi_write_m68k -#define op_bits op_bits_m68k -#define open_modeflags open_modeflags_m68k -#define op_to_mov op_to_mov_m68k -#define op_to_movi op_to_movi_m68k -#define output_type_enum output_type_enum_m68k -#define packFloat128 packFloat128_m68k -#define packFloat16 packFloat16_m68k -#define packFloat32 packFloat32_m68k -#define packFloat64 packFloat64_m68k -#define packFloatx80 packFloatx80_m68k -#define page_find page_find_m68k -#define page_find_alloc page_find_alloc_m68k -#define page_flush_tb page_flush_tb_m68k -#define page_flush_tb_1 page_flush_tb_1_m68k -#define page_init page_init_m68k -#define page_size_init page_size_init_m68k -#define par par_m68k -#define parse_array parse_array_m68k -#define parse_error parse_error_m68k -#define parse_escape parse_escape_m68k -#define parse_keyword parse_keyword_m68k -#define parse_literal parse_literal_m68k -#define parse_object parse_object_m68k -#define parse_optional parse_optional_m68k -#define parse_option_bool parse_option_bool_m68k -#define parse_option_number parse_option_number_m68k -#define parse_option_size parse_option_size_m68k -#define parse_pair parse_pair_m68k -#define parser_context_free parser_context_free_m68k -#define parser_context_new parser_context_new_m68k -#define parser_context_peek_token parser_context_peek_token_m68k -#define parser_context_pop_token parser_context_pop_token_m68k -#define parser_context_restore parser_context_restore_m68k -#define parser_context_save parser_context_save_m68k -#define parse_str parse_str_m68k -#define parse_type_bool parse_type_bool_m68k -#define parse_type_int parse_type_int_m68k -#define parse_type_number parse_type_number_m68k -#define parse_type_size parse_type_size_m68k -#define parse_type_str parse_type_str_m68k -#define parse_value parse_value_m68k -#define par_write par_write_m68k -#define patch_reloc patch_reloc_m68k -#define phys_map_node_alloc phys_map_node_alloc_m68k -#define phys_map_node_reserve phys_map_node_reserve_m68k -#define phys_mem_alloc phys_mem_alloc_m68k -#define phys_mem_set_alloc phys_mem_set_alloc_m68k -#define phys_page_compact phys_page_compact_m68k -#define phys_page_compact_all phys_page_compact_all_m68k -#define phys_page_find phys_page_find_m68k -#define phys_page_set phys_page_set_m68k -#define phys_page_set_level phys_page_set_level_m68k -#define phys_section_add phys_section_add_m68k -#define phys_section_destroy phys_section_destroy_m68k -#define phys_sections_free phys_sections_free_m68k -#define pickNaN pickNaN_m68k -#define pickNaNMulAdd pickNaNMulAdd_m68k -#define pmccfiltr_write pmccfiltr_write_m68k -#define pmccntr_read pmccntr_read_m68k -#define pmccntr_sync pmccntr_sync_m68k -#define pmccntr_write pmccntr_write_m68k -#define pmccntr_write32 pmccntr_write32_m68k -#define pmcntenclr_write pmcntenclr_write_m68k -#define pmcntenset_write pmcntenset_write_m68k -#define pmcr_write pmcr_write_m68k -#define pmintenclr_write pmintenclr_write_m68k -#define pmintenset_write pmintenset_write_m68k -#define pmovsr_write pmovsr_write_m68k -#define pmreg_access pmreg_access_m68k -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_m68k -#define pmsav5_data_ap_read pmsav5_data_ap_read_m68k -#define pmsav5_data_ap_write pmsav5_data_ap_write_m68k -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_m68k -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_m68k -#define pmuserenr_write pmuserenr_write_m68k -#define pmxevtyper_write pmxevtyper_write_m68k -#define print_type_bool print_type_bool_m68k -#define print_type_int print_type_int_m68k -#define print_type_number print_type_number_m68k -#define print_type_size print_type_size_m68k -#define print_type_str print_type_str_m68k -#define propagateFloat128NaN propagateFloat128NaN_m68k -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_m68k -#define propagateFloat32NaN propagateFloat32NaN_m68k -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_m68k -#define propagateFloat64NaN propagateFloat64NaN_m68k -#define propagateFloatx80NaN propagateFloatx80NaN_m68k -#define property_get_alias property_get_alias_m68k -#define property_get_bool property_get_bool_m68k -#define property_get_str property_get_str_m68k -#define property_get_uint16_ptr property_get_uint16_ptr_m68k -#define property_get_uint32_ptr property_get_uint32_ptr_m68k -#define property_get_uint64_ptr property_get_uint64_ptr_m68k -#define property_get_uint8_ptr property_get_uint8_ptr_m68k -#define property_release_alias property_release_alias_m68k -#define property_release_bool property_release_bool_m68k -#define property_release_str property_release_str_m68k -#define property_resolve_alias property_resolve_alias_m68k -#define property_set_alias property_set_alias_m68k -#define property_set_bool property_set_bool_m68k -#define property_set_str property_set_str_m68k -#define pstate_read pstate_read_m68k -#define pstate_write pstate_write_m68k -#define pxa250_initfn pxa250_initfn_m68k -#define pxa255_initfn pxa255_initfn_m68k -#define pxa260_initfn pxa260_initfn_m68k -#define pxa261_initfn pxa261_initfn_m68k -#define pxa262_initfn pxa262_initfn_m68k -#define pxa270a0_initfn pxa270a0_initfn_m68k -#define pxa270a1_initfn pxa270a1_initfn_m68k -#define pxa270b0_initfn pxa270b0_initfn_m68k -#define pxa270b1_initfn pxa270b1_initfn_m68k -#define pxa270c0_initfn pxa270c0_initfn_m68k -#define pxa270c5_initfn pxa270c5_initfn_m68k -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_m68k -#define qapi_dealloc_end_list qapi_dealloc_end_list_m68k -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_m68k -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_m68k -#define qapi_dealloc_next_list qapi_dealloc_next_list_m68k -#define qapi_dealloc_pop qapi_dealloc_pop_m68k -#define qapi_dealloc_push qapi_dealloc_push_m68k -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_m68k -#define qapi_dealloc_start_list qapi_dealloc_start_list_m68k -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_m68k -#define qapi_dealloc_start_union qapi_dealloc_start_union_m68k -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_m68k -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_m68k -#define qapi_dealloc_type_int qapi_dealloc_type_int_m68k -#define qapi_dealloc_type_number qapi_dealloc_type_number_m68k -#define qapi_dealloc_type_size qapi_dealloc_type_size_m68k -#define qapi_dealloc_type_str qapi_dealloc_type_str_m68k -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_m68k -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_m68k -#define qapi_free_boolList qapi_free_boolList_m68k -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_m68k -#define qapi_free_int16List qapi_free_int16List_m68k -#define qapi_free_int32List qapi_free_int32List_m68k -#define qapi_free_int64List qapi_free_int64List_m68k -#define qapi_free_int8List qapi_free_int8List_m68k -#define qapi_free_intList qapi_free_intList_m68k -#define qapi_free_numberList qapi_free_numberList_m68k -#define qapi_free_strList qapi_free_strList_m68k -#define qapi_free_uint16List qapi_free_uint16List_m68k -#define qapi_free_uint32List qapi_free_uint32List_m68k -#define qapi_free_uint64List qapi_free_uint64List_m68k -#define qapi_free_uint8List qapi_free_uint8List_m68k -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_m68k -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_m68k -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_m68k -#define qbool_destroy_obj qbool_destroy_obj_m68k -#define qbool_from_int qbool_from_int_m68k -#define qbool_get_int qbool_get_int_m68k -#define qbool_type qbool_type_m68k -#define qbus_create qbus_create_m68k -#define qbus_create_inplace qbus_create_inplace_m68k -#define qbus_finalize qbus_finalize_m68k -#define qbus_initfn qbus_initfn_m68k -#define qbus_realize qbus_realize_m68k -#define qdev_create qdev_create_m68k -#define qdev_get_type qdev_get_type_m68k -#define qdev_register_types qdev_register_types_m68k -#define qdev_set_parent_bus qdev_set_parent_bus_m68k -#define qdev_try_create qdev_try_create_m68k -#define qdict_add_key qdict_add_key_m68k -#define qdict_array_split qdict_array_split_m68k -#define qdict_clone_shallow qdict_clone_shallow_m68k -#define qdict_del qdict_del_m68k -#define qdict_destroy_obj qdict_destroy_obj_m68k -#define qdict_entry_key qdict_entry_key_m68k -#define qdict_entry_value qdict_entry_value_m68k -#define qdict_extract_subqdict qdict_extract_subqdict_m68k -#define qdict_find qdict_find_m68k -#define qdict_first qdict_first_m68k -#define qdict_flatten qdict_flatten_m68k -#define qdict_flatten_qdict qdict_flatten_qdict_m68k -#define qdict_flatten_qlist qdict_flatten_qlist_m68k -#define qdict_get qdict_get_m68k -#define qdict_get_bool qdict_get_bool_m68k -#define qdict_get_double qdict_get_double_m68k -#define qdict_get_int qdict_get_int_m68k -#define qdict_get_obj qdict_get_obj_m68k -#define qdict_get_qdict qdict_get_qdict_m68k -#define qdict_get_qlist qdict_get_qlist_m68k -#define qdict_get_str qdict_get_str_m68k -#define qdict_get_try_bool qdict_get_try_bool_m68k -#define qdict_get_try_int qdict_get_try_int_m68k -#define qdict_get_try_str qdict_get_try_str_m68k -#define qdict_haskey qdict_haskey_m68k -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_m68k -#define qdict_iter qdict_iter_m68k -#define qdict_join qdict_join_m68k -#define qdict_new qdict_new_m68k -#define qdict_next qdict_next_m68k -#define qdict_next_entry qdict_next_entry_m68k -#define qdict_put_obj qdict_put_obj_m68k -#define qdict_size qdict_size_m68k -#define qdict_type qdict_type_m68k -#define qemu_clock_get_us qemu_clock_get_us_m68k -#define qemu_clock_ptr qemu_clock_ptr_m68k -#define qemu_clocks qemu_clocks_m68k -#define qemu_get_cpu qemu_get_cpu_m68k -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_m68k -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_m68k -#define qemu_get_ram_block qemu_get_ram_block_m68k -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_m68k -#define qemu_get_ram_fd qemu_get_ram_fd_m68k -#define qemu_get_ram_ptr qemu_get_ram_ptr_m68k -#define qemu_host_page_mask qemu_host_page_mask_m68k -#define qemu_host_page_size qemu_host_page_size_m68k -#define qemu_init_vcpu qemu_init_vcpu_m68k -#define qemu_ld_helpers qemu_ld_helpers_m68k -#define qemu_log_close qemu_log_close_m68k -#define qemu_log_enabled qemu_log_enabled_m68k -#define qemu_log_flush qemu_log_flush_m68k -#define qemu_loglevel_mask qemu_loglevel_mask_m68k -#define qemu_log_vprintf qemu_log_vprintf_m68k -#define qemu_oom_check qemu_oom_check_m68k -#define qemu_parse_fd qemu_parse_fd_m68k -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_m68k -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_m68k -#define qemu_ram_alloc qemu_ram_alloc_m68k -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_m68k -#define qemu_ram_foreach_block qemu_ram_foreach_block_m68k -#define qemu_ram_free qemu_ram_free_m68k -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_m68k -#define qemu_ram_ptr_length qemu_ram_ptr_length_m68k -#define qemu_ram_remap qemu_ram_remap_m68k -#define qemu_ram_setup_dump qemu_ram_setup_dump_m68k -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_m68k -#define qemu_real_host_page_size qemu_real_host_page_size_m68k -#define qemu_st_helpers qemu_st_helpers_m68k -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_m68k -#define qemu_try_memalign qemu_try_memalign_m68k -#define qentry_destroy qentry_destroy_m68k -#define qerror_human qerror_human_m68k -#define qerror_report qerror_report_m68k -#define qerror_report_err qerror_report_err_m68k -#define qfloat_destroy_obj qfloat_destroy_obj_m68k -#define qfloat_from_double qfloat_from_double_m68k -#define qfloat_get_double qfloat_get_double_m68k -#define qfloat_type qfloat_type_m68k -#define qint_destroy_obj qint_destroy_obj_m68k -#define qint_from_int qint_from_int_m68k -#define qint_get_int qint_get_int_m68k -#define qint_type qint_type_m68k -#define qlist_append_obj qlist_append_obj_m68k -#define qlist_copy qlist_copy_m68k -#define qlist_copy_elem qlist_copy_elem_m68k -#define qlist_destroy_obj qlist_destroy_obj_m68k -#define qlist_empty qlist_empty_m68k -#define qlist_entry_obj qlist_entry_obj_m68k -#define qlist_first qlist_first_m68k -#define qlist_iter qlist_iter_m68k -#define qlist_new qlist_new_m68k -#define qlist_next qlist_next_m68k -#define qlist_peek qlist_peek_m68k -#define qlist_pop qlist_pop_m68k -#define qlist_size qlist_size_m68k -#define qlist_size_iter qlist_size_iter_m68k -#define qlist_type qlist_type_m68k -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_m68k -#define qmp_input_end_list qmp_input_end_list_m68k -#define qmp_input_end_struct qmp_input_end_struct_m68k -#define qmp_input_get_next_type qmp_input_get_next_type_m68k -#define qmp_input_get_object qmp_input_get_object_m68k -#define qmp_input_get_visitor qmp_input_get_visitor_m68k -#define qmp_input_next_list qmp_input_next_list_m68k -#define qmp_input_optional qmp_input_optional_m68k -#define qmp_input_pop qmp_input_pop_m68k -#define qmp_input_push qmp_input_push_m68k -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_m68k -#define qmp_input_start_list qmp_input_start_list_m68k -#define qmp_input_start_struct qmp_input_start_struct_m68k -#define qmp_input_type_bool qmp_input_type_bool_m68k -#define qmp_input_type_int qmp_input_type_int_m68k -#define qmp_input_type_number qmp_input_type_number_m68k -#define qmp_input_type_str qmp_input_type_str_m68k -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_m68k -#define qmp_input_visitor_new qmp_input_visitor_new_m68k -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_m68k -#define qmp_output_add_obj qmp_output_add_obj_m68k -#define qmp_output_end_list qmp_output_end_list_m68k -#define qmp_output_end_struct qmp_output_end_struct_m68k -#define qmp_output_first qmp_output_first_m68k -#define qmp_output_get_qobject qmp_output_get_qobject_m68k -#define qmp_output_get_visitor qmp_output_get_visitor_m68k -#define qmp_output_last qmp_output_last_m68k -#define qmp_output_next_list qmp_output_next_list_m68k -#define qmp_output_pop qmp_output_pop_m68k -#define qmp_output_push_obj qmp_output_push_obj_m68k -#define qmp_output_start_list qmp_output_start_list_m68k -#define qmp_output_start_struct qmp_output_start_struct_m68k -#define qmp_output_type_bool qmp_output_type_bool_m68k -#define qmp_output_type_int qmp_output_type_int_m68k -#define qmp_output_type_number qmp_output_type_number_m68k -#define qmp_output_type_str qmp_output_type_str_m68k -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_m68k -#define qmp_output_visitor_new qmp_output_visitor_new_m68k -#define qobject_decref qobject_decref_m68k -#define qobject_to_qbool qobject_to_qbool_m68k -#define qobject_to_qdict qobject_to_qdict_m68k -#define qobject_to_qfloat qobject_to_qfloat_m68k -#define qobject_to_qint qobject_to_qint_m68k -#define qobject_to_qlist qobject_to_qlist_m68k -#define qobject_to_qstring qobject_to_qstring_m68k -#define qobject_type qobject_type_m68k -#define qstring_append qstring_append_m68k -#define qstring_append_chr qstring_append_chr_m68k -#define qstring_append_int qstring_append_int_m68k -#define qstring_destroy_obj qstring_destroy_obj_m68k -#define qstring_from_escaped_str qstring_from_escaped_str_m68k -#define qstring_from_str qstring_from_str_m68k -#define qstring_from_substr qstring_from_substr_m68k -#define qstring_get_length qstring_get_length_m68k -#define qstring_get_str qstring_get_str_m68k -#define qstring_new qstring_new_m68k -#define qstring_type qstring_type_m68k -#define ram_block_add ram_block_add_m68k -#define ram_size ram_size_m68k -#define range_compare range_compare_m68k -#define range_covers_byte range_covers_byte_m68k -#define range_get_last range_get_last_m68k -#define range_merge range_merge_m68k -#define ranges_can_merge ranges_can_merge_m68k -#define raw_read raw_read_m68k -#define raw_write raw_write_m68k -#define rcon rcon_m68k -#define read_raw_cp_reg read_raw_cp_reg_m68k -#define recip_estimate recip_estimate_m68k -#define recip_sqrt_estimate recip_sqrt_estimate_m68k -#define register_cp_regs_for_features register_cp_regs_for_features_m68k -#define register_multipage register_multipage_m68k -#define register_subpage register_subpage_m68k -#define register_tm_clones register_tm_clones_m68k -#define register_types_object register_types_object_m68k -#define regnames regnames_m68k -#define render_memory_region render_memory_region_m68k -#define reset_all_temps reset_all_temps_m68k -#define reset_temp reset_temp_m68k -#define rol32 rol32_m68k -#define rol64 rol64_m68k -#define ror32 ror32_m68k -#define ror64 ror64_m68k -#define roundAndPackFloat128 roundAndPackFloat128_m68k -#define roundAndPackFloat16 roundAndPackFloat16_m68k -#define roundAndPackFloat32 roundAndPackFloat32_m68k -#define roundAndPackFloat64 roundAndPackFloat64_m68k -#define roundAndPackFloatx80 roundAndPackFloatx80_m68k -#define roundAndPackInt32 roundAndPackInt32_m68k -#define roundAndPackInt64 roundAndPackInt64_m68k -#define roundAndPackUint64 roundAndPackUint64_m68k -#define round_to_inf round_to_inf_m68k -#define run_on_cpu run_on_cpu_m68k -#define s0 s0_m68k -#define S0 S0_m68k -#define s1 s1_m68k -#define S1 S1_m68k -#define sa1100_initfn sa1100_initfn_m68k -#define sa1110_initfn sa1110_initfn_m68k -#define save_globals save_globals_m68k -#define scr_write scr_write_m68k -#define sctlr_write sctlr_write_m68k -#define set_bit set_bit_m68k -#define set_bits set_bits_m68k -#define set_default_nan_mode set_default_nan_mode_m68k -#define set_feature set_feature_m68k -#define set_float_detect_tininess set_float_detect_tininess_m68k -#define set_float_exception_flags set_float_exception_flags_m68k -#define set_float_rounding_mode set_float_rounding_mode_m68k -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_m68k -#define set_flush_to_zero set_flush_to_zero_m68k -#define set_swi_errno set_swi_errno_m68k -#define sextract32 sextract32_m68k -#define sextract64 sextract64_m68k -#define shift128ExtraRightJamming shift128ExtraRightJamming_m68k -#define shift128Right shift128Right_m68k -#define shift128RightJamming shift128RightJamming_m68k -#define shift32RightJamming shift32RightJamming_m68k -#define shift64ExtraRightJamming shift64ExtraRightJamming_m68k -#define shift64RightJamming shift64RightJamming_m68k -#define shifter_out_im shifter_out_im_m68k -#define shortShift128Left shortShift128Left_m68k -#define shortShift192Left shortShift192Left_m68k -#define simple_mpu_ap_bits simple_mpu_ap_bits_m68k -#define size_code_gen_buffer size_code_gen_buffer_m68k -#define softmmu_lock_user softmmu_lock_user_m68k -#define softmmu_lock_user_string softmmu_lock_user_string_m68k -#define softmmu_tget32 softmmu_tget32_m68k -#define softmmu_tget8 softmmu_tget8_m68k -#define softmmu_tput32 softmmu_tput32_m68k -#define softmmu_unlock_user softmmu_unlock_user_m68k -#define sort_constraints sort_constraints_m68k -#define sp_el0_access sp_el0_access_m68k -#define spsel_read spsel_read_m68k -#define spsel_write spsel_write_m68k -#define start_list start_list_m68k -#define stb_p stb_p_m68k -#define stb_phys stb_phys_m68k -#define stl_be_p stl_be_p_m68k -#define stl_be_phys stl_be_phys_m68k -#define stl_he_p stl_he_p_m68k -#define stl_le_p stl_le_p_m68k -#define stl_le_phys stl_le_phys_m68k -#define stl_phys stl_phys_m68k -#define stl_phys_internal stl_phys_internal_m68k -#define stl_phys_notdirty stl_phys_notdirty_m68k -#define store_cpu_offset store_cpu_offset_m68k -#define store_reg store_reg_m68k -#define store_reg_bx store_reg_bx_m68k -#define store_reg_from_load store_reg_from_load_m68k -#define stq_be_p stq_be_p_m68k -#define stq_be_phys stq_be_phys_m68k -#define stq_he_p stq_he_p_m68k -#define stq_le_p stq_le_p_m68k -#define stq_le_phys stq_le_phys_m68k -#define stq_phys stq_phys_m68k -#define string_input_get_visitor string_input_get_visitor_m68k -#define string_input_visitor_cleanup string_input_visitor_cleanup_m68k -#define string_input_visitor_new string_input_visitor_new_m68k -#define strongarm_cp_reginfo strongarm_cp_reginfo_m68k -#define strstart strstart_m68k -#define strtosz strtosz_m68k -#define strtosz_suffix strtosz_suffix_m68k -#define stw_be_p stw_be_p_m68k -#define stw_be_phys stw_be_phys_m68k -#define stw_he_p stw_he_p_m68k -#define stw_le_p stw_le_p_m68k -#define stw_le_phys stw_le_phys_m68k -#define stw_phys stw_phys_m68k -#define stw_phys_internal stw_phys_internal_m68k -#define sub128 sub128_m68k -#define sub16_sat sub16_sat_m68k -#define sub16_usat sub16_usat_m68k -#define sub192 sub192_m68k -#define sub8_sat sub8_sat_m68k -#define sub8_usat sub8_usat_m68k -#define subFloat128Sigs subFloat128Sigs_m68k -#define subFloat32Sigs subFloat32Sigs_m68k -#define subFloat64Sigs subFloat64Sigs_m68k -#define subFloatx80Sigs subFloatx80Sigs_m68k -#define subpage_accepts subpage_accepts_m68k -#define subpage_init subpage_init_m68k -#define subpage_ops subpage_ops_m68k -#define subpage_read subpage_read_m68k -#define subpage_register subpage_register_m68k -#define subpage_write subpage_write_m68k -#define suffix_mul suffix_mul_m68k -#define swap_commutative swap_commutative_m68k -#define swap_commutative2 swap_commutative2_m68k -#define switch_mode switch_mode_m68k -#define switch_v7m_sp switch_v7m_sp_m68k -#define syn_aa32_bkpt syn_aa32_bkpt_m68k -#define syn_aa32_hvc syn_aa32_hvc_m68k -#define syn_aa32_smc syn_aa32_smc_m68k -#define syn_aa32_svc syn_aa32_svc_m68k -#define syn_breakpoint syn_breakpoint_m68k -#define sync_globals sync_globals_m68k -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_m68k -#define syn_cp14_rt_trap syn_cp14_rt_trap_m68k -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_m68k -#define syn_cp15_rt_trap syn_cp15_rt_trap_m68k -#define syn_data_abort syn_data_abort_m68k -#define syn_fp_access_trap syn_fp_access_trap_m68k -#define syn_insn_abort syn_insn_abort_m68k -#define syn_swstep syn_swstep_m68k -#define syn_uncategorized syn_uncategorized_m68k -#define syn_watchpoint syn_watchpoint_m68k -#define syscall_err syscall_err_m68k -#define system_bus_class_init system_bus_class_init_m68k -#define system_bus_info system_bus_info_m68k -#define t2ee_cp_reginfo t2ee_cp_reginfo_m68k -#define table_logic_cc table_logic_cc_m68k -#define target_parse_constraint target_parse_constraint_m68k -#define target_words_bigendian target_words_bigendian_m68k -#define tb_add_jump tb_add_jump_m68k -#define tb_alloc tb_alloc_m68k -#define tb_alloc_page tb_alloc_page_m68k -#define tb_check_watchpoint tb_check_watchpoint_m68k -#define tb_find_fast tb_find_fast_m68k -#define tb_find_pc tb_find_pc_m68k -#define tb_find_slow tb_find_slow_m68k -#define tb_flush tb_flush_m68k -#define tb_flush_jmp_cache tb_flush_jmp_cache_m68k -#define tb_free tb_free_m68k -#define tb_gen_code tb_gen_code_m68k -#define tb_hash_remove tb_hash_remove_m68k -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_m68k -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_m68k -#define tb_invalidate_phys_range tb_invalidate_phys_range_m68k -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_m68k -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_m68k -#define tb_jmp_remove tb_jmp_remove_m68k -#define tb_link_page tb_link_page_m68k -#define tb_page_remove tb_page_remove_m68k -#define tb_phys_hash_func tb_phys_hash_func_m68k -#define tb_phys_invalidate tb_phys_invalidate_m68k -#define tb_reset_jump tb_reset_jump_m68k -#define tb_set_jmp_target tb_set_jmp_target_m68k -#define tcg_accel_class_init tcg_accel_class_init_m68k -#define tcg_accel_type tcg_accel_type_m68k -#define tcg_add_param_i32 tcg_add_param_i32_m68k -#define tcg_add_param_i64 tcg_add_param_i64_m68k -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_m68k -#define tcg_allowed tcg_allowed_m68k -#define tcg_canonicalize_memop tcg_canonicalize_memop_m68k -#define tcg_commit tcg_commit_m68k -#define tcg_cond_to_jcc tcg_cond_to_jcc_m68k -#define tcg_constant_folding tcg_constant_folding_m68k +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_m68k +#define tcg_expand_vec_op tcg_expand_vec_op_m68k +#define tcg_register_jit tcg_register_jit_m68k +#define tcg_tb_insert tcg_tb_insert_m68k +#define tcg_tb_remove tcg_tb_remove_m68k +#define tcg_tb_lookup tcg_tb_lookup_m68k +#define tcg_tb_foreach tcg_tb_foreach_m68k +#define tcg_nb_tbs tcg_nb_tbs_m68k +#define tcg_region_reset_all tcg_region_reset_all_m68k +#define tcg_region_init tcg_region_init_m68k +#define tcg_code_size tcg_code_size_m68k +#define tcg_code_capacity tcg_code_capacity_m68k +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_m68k +#define tcg_malloc_internal tcg_malloc_internal_m68k +#define tcg_pool_reset tcg_pool_reset_m68k +#define tcg_context_init tcg_context_init_m68k +#define tcg_tb_alloc tcg_tb_alloc_m68k +#define tcg_prologue_init tcg_prologue_init_m68k +#define tcg_func_start tcg_func_start_m68k +#define tcg_set_frame tcg_set_frame_m68k +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_m68k +#define tcg_temp_new_internal tcg_temp_new_internal_m68k +#define tcg_temp_new_vec tcg_temp_new_vec_m68k +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_m68k +#define tcg_temp_free_internal tcg_temp_free_internal_m68k #define tcg_const_i32 tcg_const_i32_m68k #define tcg_const_i64 tcg_const_i64_m68k #define tcg_const_local_i32 tcg_const_local_i32_m68k #define tcg_const_local_i64 tcg_const_local_i64_m68k -#define tcg_context_init tcg_context_init_m68k -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_m68k -#define tcg_cpu_exec tcg_cpu_exec_m68k -#define tcg_current_code_size tcg_current_code_size_m68k -#define tcg_dump_info tcg_dump_info_m68k -#define tcg_dump_ops tcg_dump_ops_m68k -#define tcg_exec_all tcg_exec_all_m68k -#define tcg_find_helper tcg_find_helper_m68k -#define tcg_func_start tcg_func_start_m68k -#define tcg_gen_abs_i32 tcg_gen_abs_i32_m68k -#define tcg_gen_add2_i32 tcg_gen_add2_i32_m68k -#define tcg_gen_add_i32 tcg_gen_add_i32_m68k -#define tcg_gen_add_i64 tcg_gen_add_i64_m68k -#define tcg_gen_addi_i32 tcg_gen_addi_i32_m68k -#define tcg_gen_addi_i64 tcg_gen_addi_i64_m68k -#define tcg_gen_andc_i32 tcg_gen_andc_i32_m68k -#define tcg_gen_and_i32 tcg_gen_and_i32_m68k -#define tcg_gen_and_i64 tcg_gen_and_i64_m68k -#define tcg_gen_andi_i32 tcg_gen_andi_i32_m68k -#define tcg_gen_andi_i64 tcg_gen_andi_i64_m68k -#define tcg_gen_br tcg_gen_br_m68k -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_m68k -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_m68k -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_m68k -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_m68k -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_m68k +#define tcg_op_supported tcg_op_supported_m68k #define tcg_gen_callN tcg_gen_callN_m68k +#define tcg_op_remove tcg_op_remove_m68k +#define tcg_emit_op tcg_emit_op_m68k +#define tcg_op_insert_before tcg_op_insert_before_m68k +#define tcg_op_insert_after tcg_op_insert_after_m68k +#define tcg_cpu_exec_time tcg_cpu_exec_time_m68k #define tcg_gen_code tcg_gen_code_m68k -#define tcg_gen_code_common tcg_gen_code_common_m68k -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_m68k -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_m68k -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_m68k -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_m68k -#define tcg_gen_exit_tb tcg_gen_exit_tb_m68k -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_m68k -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_m68k -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_m68k -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_m68k -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_m68k -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_m68k -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_m68k -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_m68k -#define tcg_gen_goto_tb tcg_gen_goto_tb_m68k -#define tcg_gen_ld_i32 tcg_gen_ld_i32_m68k -#define tcg_gen_ld_i64 tcg_gen_ld_i64_m68k -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_m68k -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_m68k -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_m68k -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_m68k -#define tcg_gen_mov_i32 tcg_gen_mov_i32_m68k -#define tcg_gen_mov_i64 tcg_gen_mov_i64_m68k -#define tcg_gen_movi_i32 tcg_gen_movi_i32_m68k -#define tcg_gen_movi_i64 tcg_gen_movi_i64_m68k -#define tcg_gen_mul_i32 tcg_gen_mul_i32_m68k -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_m68k -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_m68k -#define tcg_gen_neg_i32 tcg_gen_neg_i32_m68k -#define tcg_gen_neg_i64 tcg_gen_neg_i64_m68k -#define tcg_gen_not_i32 tcg_gen_not_i32_m68k -#define tcg_gen_op0 tcg_gen_op0_m68k -#define tcg_gen_op1i tcg_gen_op1i_m68k -#define tcg_gen_op2_i32 tcg_gen_op2_i32_m68k -#define tcg_gen_op2_i64 tcg_gen_op2_i64_m68k -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_m68k -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_m68k -#define tcg_gen_op3_i32 tcg_gen_op3_i32_m68k -#define tcg_gen_op3_i64 tcg_gen_op3_i64_m68k -#define tcg_gen_op4_i32 tcg_gen_op4_i32_m68k -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_m68k -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_m68k -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_m68k -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_m68k -#define tcg_gen_op6_i32 tcg_gen_op6_i32_m68k -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_m68k -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_m68k -#define tcg_gen_orc_i32 tcg_gen_orc_i32_m68k -#define tcg_gen_or_i32 tcg_gen_or_i32_m68k -#define tcg_gen_or_i64 tcg_gen_or_i64_m68k +#define tcg_gen_op1 tcg_gen_op1_m68k +#define tcg_gen_op2 tcg_gen_op2_m68k +#define tcg_gen_op3 tcg_gen_op3_m68k +#define tcg_gen_op4 tcg_gen_op4_m68k +#define tcg_gen_op5 tcg_gen_op5_m68k +#define tcg_gen_op6 tcg_gen_op6_m68k +#define tcg_gen_mb tcg_gen_mb_m68k +#define tcg_gen_addi_i32 tcg_gen_addi_i32_m68k +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_m68k +#define tcg_gen_subi_i32 tcg_gen_subi_i32_m68k +#define tcg_gen_andi_i32 tcg_gen_andi_i32_m68k #define tcg_gen_ori_i32 tcg_gen_ori_i32_m68k -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_m68k -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_m68k -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_m68k -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_m68k +#define tcg_gen_xori_i32 tcg_gen_xori_i32_m68k +#define tcg_gen_shli_i32 tcg_gen_shli_i32_m68k +#define tcg_gen_shri_i32 tcg_gen_shri_i32_m68k +#define tcg_gen_sari_i32 tcg_gen_sari_i32_m68k +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_m68k +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_m68k +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_m68k +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_m68k +#define tcg_gen_muli_i32 tcg_gen_muli_i32_m68k +#define tcg_gen_div_i32 tcg_gen_div_i32_m68k +#define tcg_gen_rem_i32 tcg_gen_rem_i32_m68k +#define tcg_gen_divu_i32 tcg_gen_divu_i32_m68k +#define tcg_gen_remu_i32 tcg_gen_remu_i32_m68k +#define tcg_gen_andc_i32 tcg_gen_andc_i32_m68k +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_m68k +#define tcg_gen_nand_i32 tcg_gen_nand_i32_m68k +#define tcg_gen_nor_i32 tcg_gen_nor_i32_m68k +#define tcg_gen_orc_i32 tcg_gen_orc_i32_m68k +#define tcg_gen_clz_i32 tcg_gen_clz_i32_m68k +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_m68k +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_m68k +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_m68k +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_m68k +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_m68k #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_m68k #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_m68k #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_m68k #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_m68k -#define tcg_gen_sar_i32 tcg_gen_sar_i32_m68k -#define tcg_gen_sari_i32 tcg_gen_sari_i32_m68k -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_m68k -#define tcg_gen_shl_i32 tcg_gen_shl_i32_m68k -#define tcg_gen_shl_i64 tcg_gen_shl_i64_m68k -#define tcg_gen_shli_i32 tcg_gen_shli_i32_m68k +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_m68k +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_m68k +#define tcg_gen_extract_i32 tcg_gen_extract_i32_m68k +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_m68k +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_m68k +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_m68k +#define tcg_gen_add2_i32 tcg_gen_add2_i32_m68k +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_m68k +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_m68k +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_m68k +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_m68k +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_m68k +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_m68k +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_m68k +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_m68k +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_m68k +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_m68k +#define tcg_gen_smin_i32 tcg_gen_smin_i32_m68k +#define tcg_gen_umin_i32 tcg_gen_umin_i32_m68k +#define tcg_gen_smax_i32 tcg_gen_smax_i32_m68k +#define tcg_gen_umax_i32 tcg_gen_umax_i32_m68k +#define tcg_gen_abs_i32 tcg_gen_abs_i32_m68k +#define tcg_gen_addi_i64 tcg_gen_addi_i64_m68k +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_m68k +#define tcg_gen_subi_i64 tcg_gen_subi_i64_m68k +#define tcg_gen_andi_i64 tcg_gen_andi_i64_m68k +#define tcg_gen_ori_i64 tcg_gen_ori_i64_m68k +#define tcg_gen_xori_i64 tcg_gen_xori_i64_m68k #define tcg_gen_shli_i64 tcg_gen_shli_i64_m68k -#define tcg_gen_shr_i32 tcg_gen_shr_i32_m68k -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_m68k -#define tcg_gen_shr_i64 tcg_gen_shr_i64_m68k -#define tcg_gen_shri_i32 tcg_gen_shri_i32_m68k #define tcg_gen_shri_i64 tcg_gen_shri_i64_m68k -#define tcg_gen_st_i32 tcg_gen_st_i32_m68k -#define tcg_gen_st_i64 tcg_gen_st_i64_m68k -#define tcg_gen_sub_i32 tcg_gen_sub_i32_m68k -#define tcg_gen_sub_i64 tcg_gen_sub_i64_m68k -#define tcg_gen_subi_i32 tcg_gen_subi_i32_m68k -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_m68k -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_m68k -#define tcg_gen_xor_i32 tcg_gen_xor_i32_m68k -#define tcg_gen_xor_i64 tcg_gen_xor_i64_m68k -#define tcg_gen_xori_i32 tcg_gen_xori_i32_m68k -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_m68k -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_m68k -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_m68k -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_m68k -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_m68k -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_m68k -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_m68k -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_m68k -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_m68k -#define tcg_handle_interrupt tcg_handle_interrupt_m68k -#define tcg_init tcg_init_m68k -#define tcg_invert_cond tcg_invert_cond_m68k -#define tcg_la_bb_end tcg_la_bb_end_m68k -#define tcg_la_br_end tcg_la_br_end_m68k -#define tcg_la_func_end tcg_la_func_end_m68k -#define tcg_liveness_analysis tcg_liveness_analysis_m68k -#define tcg_malloc tcg_malloc_m68k -#define tcg_malloc_internal tcg_malloc_internal_m68k -#define tcg_op_defs_org tcg_op_defs_org_m68k -#define tcg_opt_gen_mov tcg_opt_gen_mov_m68k -#define tcg_opt_gen_movi tcg_opt_gen_movi_m68k -#define tcg_optimize tcg_optimize_m68k -#define tcg_out16 tcg_out16_m68k -#define tcg_out32 tcg_out32_m68k -#define tcg_out64 tcg_out64_m68k -#define tcg_out8 tcg_out8_m68k -#define tcg_out_addi tcg_out_addi_m68k -#define tcg_out_branch tcg_out_branch_m68k -#define tcg_out_brcond32 tcg_out_brcond32_m68k -#define tcg_out_brcond64 tcg_out_brcond64_m68k -#define tcg_out_bswap32 tcg_out_bswap32_m68k -#define tcg_out_bswap64 tcg_out_bswap64_m68k -#define tcg_out_call tcg_out_call_m68k -#define tcg_out_cmp tcg_out_cmp_m68k -#define tcg_out_ext16s tcg_out_ext16s_m68k -#define tcg_out_ext16u tcg_out_ext16u_m68k -#define tcg_out_ext32s tcg_out_ext32s_m68k -#define tcg_out_ext32u tcg_out_ext32u_m68k -#define tcg_out_ext8s tcg_out_ext8s_m68k -#define tcg_out_ext8u tcg_out_ext8u_m68k -#define tcg_out_jmp tcg_out_jmp_m68k -#define tcg_out_jxx tcg_out_jxx_m68k -#define tcg_out_label tcg_out_label_m68k -#define tcg_out_ld tcg_out_ld_m68k -#define tcg_out_modrm tcg_out_modrm_m68k -#define tcg_out_modrm_offset tcg_out_modrm_offset_m68k -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_m68k -#define tcg_out_mov tcg_out_mov_m68k -#define tcg_out_movcond32 tcg_out_movcond32_m68k -#define tcg_out_movcond64 tcg_out_movcond64_m68k -#define tcg_out_movi tcg_out_movi_m68k -#define tcg_out_op tcg_out_op_m68k -#define tcg_out_pop tcg_out_pop_m68k -#define tcg_out_push tcg_out_push_m68k -#define tcg_out_qemu_ld tcg_out_qemu_ld_m68k -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_m68k -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_m68k -#define tcg_out_qemu_st tcg_out_qemu_st_m68k -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_m68k -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_m68k -#define tcg_out_reloc tcg_out_reloc_m68k -#define tcg_out_rolw_8 tcg_out_rolw_8_m68k -#define tcg_out_setcond32 tcg_out_setcond32_m68k -#define tcg_out_setcond64 tcg_out_setcond64_m68k -#define tcg_out_shifti tcg_out_shifti_m68k -#define tcg_out_st tcg_out_st_m68k -#define tcg_out_tb_finalize tcg_out_tb_finalize_m68k -#define tcg_out_tb_init tcg_out_tb_init_m68k -#define tcg_out_tlb_load tcg_out_tlb_load_m68k -#define tcg_out_vex_modrm tcg_out_vex_modrm_m68k -#define tcg_patch32 tcg_patch32_m68k -#define tcg_patch8 tcg_patch8_m68k -#define tcg_pcrel_diff tcg_pcrel_diff_m68k -#define tcg_pool_reset tcg_pool_reset_m68k -#define tcg_prologue_init tcg_prologue_init_m68k -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_m68k -#define tcg_reg_alloc tcg_reg_alloc_m68k -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_m68k -#define tcg_reg_alloc_call tcg_reg_alloc_call_m68k -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_m68k -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_m68k -#define tcg_reg_alloc_op tcg_reg_alloc_op_m68k -#define tcg_reg_alloc_start tcg_reg_alloc_start_m68k -#define tcg_reg_free tcg_reg_free_m68k -#define tcg_reg_sync tcg_reg_sync_m68k -#define tcg_set_frame tcg_set_frame_m68k -#define tcg_set_nop tcg_set_nop_m68k -#define tcg_swap_cond tcg_swap_cond_m68k -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_m68k -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_m68k -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_m68k -#define tcg_target_const_match tcg_target_const_match_m68k -#define tcg_target_init tcg_target_init_m68k -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_m68k -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_m68k -#define tcg_temp_alloc tcg_temp_alloc_m68k -#define tcg_temp_free_i32 tcg_temp_free_i32_m68k -#define tcg_temp_free_i64 tcg_temp_free_i64_m68k -#define tcg_temp_free_internal tcg_temp_free_internal_m68k -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_m68k -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_m68k -#define tcg_temp_new_i32 tcg_temp_new_i32_m68k -#define tcg_temp_new_i64 tcg_temp_new_i64_m68k -#define tcg_temp_new_internal tcg_temp_new_internal_m68k -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_m68k -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_m68k -#define tdb_hash tdb_hash_m68k -#define teecr_write teecr_write_m68k -#define teehbr_access teehbr_access_m68k -#define temp_allocate_frame temp_allocate_frame_m68k -#define temp_dead temp_dead_m68k -#define temps_are_copies temps_are_copies_m68k -#define temp_save temp_save_m68k -#define temp_sync temp_sync_m68k -#define tgen_arithi tgen_arithi_m68k -#define tgen_arithr tgen_arithr_m68k -#define thumb2_logic_op thumb2_logic_op_m68k -#define ti925t_initfn ti925t_initfn_m68k -#define tlb_add_large_page tlb_add_large_page_m68k -#define tlb_flush_entry tlb_flush_entry_m68k -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_m68k -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_m68k -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_m68k -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_m68k -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_m68k -#define tlbi_aa64_va_write tlbi_aa64_va_write_m68k -#define tlbiall_is_write tlbiall_is_write_m68k -#define tlbiall_write tlbiall_write_m68k -#define tlbiasid_is_write tlbiasid_is_write_m68k -#define tlbiasid_write tlbiasid_write_m68k -#define tlbimvaa_is_write tlbimvaa_is_write_m68k -#define tlbimvaa_write tlbimvaa_write_m68k -#define tlbimva_is_write tlbimva_is_write_m68k -#define tlbimva_write tlbimva_write_m68k -#define tlb_is_dirty_ram tlb_is_dirty_ram_m68k +#define tcg_gen_sari_i64 tcg_gen_sari_i64_m68k +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_m68k +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_m68k +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_m68k +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_m68k +#define tcg_gen_muli_i64 tcg_gen_muli_i64_m68k +#define tcg_gen_div_i64 tcg_gen_div_i64_m68k +#define tcg_gen_rem_i64 tcg_gen_rem_i64_m68k +#define tcg_gen_divu_i64 tcg_gen_divu_i64_m68k +#define tcg_gen_remu_i64 tcg_gen_remu_i64_m68k +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_m68k +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_m68k +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_m68k +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_m68k +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_m68k +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_m68k +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_m68k +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_m68k +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_m68k +#define tcg_gen_not_i64 tcg_gen_not_i64_m68k +#define tcg_gen_andc_i64 tcg_gen_andc_i64_m68k +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_m68k +#define tcg_gen_nand_i64 tcg_gen_nand_i64_m68k +#define tcg_gen_nor_i64 tcg_gen_nor_i64_m68k +#define tcg_gen_orc_i64 tcg_gen_orc_i64_m68k +#define tcg_gen_clz_i64 tcg_gen_clz_i64_m68k +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_m68k +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_m68k +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_m68k +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_m68k +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_m68k +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_m68k +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_m68k +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_m68k +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_m68k +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_m68k +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_m68k +#define tcg_gen_extract_i64 tcg_gen_extract_i64_m68k +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_m68k +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_m68k +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_m68k +#define tcg_gen_add2_i64 tcg_gen_add2_i64_m68k +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_m68k +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_m68k +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_m68k +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_m68k +#define tcg_gen_smin_i64 tcg_gen_smin_i64_m68k +#define tcg_gen_umin_i64 tcg_gen_umin_i64_m68k +#define tcg_gen_smax_i64 tcg_gen_smax_i64_m68k +#define tcg_gen_umax_i64 tcg_gen_umax_i64_m68k +#define tcg_gen_abs_i64 tcg_gen_abs_i64_m68k +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_m68k +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_m68k +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_m68k +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_m68k +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_m68k +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_m68k +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_m68k +#define tcg_gen_exit_tb tcg_gen_exit_tb_m68k +#define tcg_gen_goto_tb tcg_gen_goto_tb_m68k +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_m68k +#define check_exit_request check_exit_request_m68k +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_m68k +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_m68k +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_m68k +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_m68k +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_m68k +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_m68k +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_m68k +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_m68k +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_m68k +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_m68k +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_m68k +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_m68k +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_m68k +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_m68k +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_m68k +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_m68k +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_m68k +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_m68k +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_m68k +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_m68k +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_m68k +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_m68k +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_m68k +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_m68k +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_m68k +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_m68k +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_m68k +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_m68k +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_m68k +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_m68k +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_m68k +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_m68k +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_m68k +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_m68k +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_m68k +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_m68k +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_m68k +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_m68k +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_m68k +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_m68k +#define simd_desc simd_desc_m68k +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_m68k +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_m68k +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_m68k +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_m68k +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_m68k +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_m68k +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_m68k +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_m68k +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_m68k +#define tcg_gen_gvec_2 tcg_gen_gvec_2_m68k +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_m68k +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_m68k +#define tcg_gen_gvec_3 tcg_gen_gvec_3_m68k +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_m68k +#define tcg_gen_gvec_4 tcg_gen_gvec_4_m68k +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_m68k +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_m68k +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_m68k +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_m68k +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_m68k +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_m68k +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_m68k +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_m68k +#define tcg_gen_gvec_not tcg_gen_gvec_not_m68k +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_m68k +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_m68k +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_m68k +#define tcg_gen_gvec_add tcg_gen_gvec_add_m68k +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_m68k +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_m68k +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_m68k +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_m68k +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_m68k +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_m68k +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_m68k +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_m68k +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_m68k +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_m68k +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_m68k +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_m68k +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_m68k +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_m68k +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_m68k +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_m68k +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_m68k +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_m68k +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_m68k +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_m68k +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_m68k +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_m68k +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_m68k +#define tcg_gen_gvec_and tcg_gen_gvec_and_m68k +#define tcg_gen_gvec_or tcg_gen_gvec_or_m68k +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_m68k +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_m68k +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_m68k +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_m68k +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_m68k +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_m68k +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_m68k +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_m68k +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_m68k +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_m68k +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_m68k +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_m68k +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_m68k +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_m68k +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_m68k +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_m68k +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_m68k +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_m68k +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_m68k +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_m68k +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_m68k +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_m68k +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_m68k +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_m68k +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_m68k +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_m68k +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_m68k +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_m68k +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_m68k +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_m68k +#define vec_gen_2 vec_gen_2_m68k +#define vec_gen_3 vec_gen_3_m68k +#define vec_gen_4 vec_gen_4_m68k +#define tcg_gen_mov_vec tcg_gen_mov_vec_m68k +#define tcg_const_zeros_vec tcg_const_zeros_vec_m68k +#define tcg_const_ones_vec tcg_const_ones_vec_m68k +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_m68k +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_m68k +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_m68k +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_m68k +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_m68k +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_m68k +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_m68k +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_m68k +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_m68k +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_m68k +#define tcg_gen_ld_vec tcg_gen_ld_vec_m68k +#define tcg_gen_st_vec tcg_gen_st_vec_m68k +#define tcg_gen_stl_vec tcg_gen_stl_vec_m68k +#define tcg_gen_and_vec tcg_gen_and_vec_m68k +#define tcg_gen_or_vec tcg_gen_or_vec_m68k +#define tcg_gen_xor_vec tcg_gen_xor_vec_m68k +#define tcg_gen_andc_vec tcg_gen_andc_vec_m68k +#define tcg_gen_orc_vec tcg_gen_orc_vec_m68k +#define tcg_gen_nand_vec tcg_gen_nand_vec_m68k +#define tcg_gen_nor_vec tcg_gen_nor_vec_m68k +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_m68k +#define tcg_gen_not_vec tcg_gen_not_vec_m68k +#define tcg_gen_neg_vec tcg_gen_neg_vec_m68k +#define tcg_gen_abs_vec tcg_gen_abs_vec_m68k +#define tcg_gen_shli_vec tcg_gen_shli_vec_m68k +#define tcg_gen_shri_vec tcg_gen_shri_vec_m68k +#define tcg_gen_sari_vec tcg_gen_sari_vec_m68k +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_m68k +#define tcg_gen_add_vec tcg_gen_add_vec_m68k +#define tcg_gen_sub_vec tcg_gen_sub_vec_m68k +#define tcg_gen_mul_vec tcg_gen_mul_vec_m68k +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_m68k +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_m68k +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_m68k +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_m68k +#define tcg_gen_smin_vec tcg_gen_smin_vec_m68k +#define tcg_gen_umin_vec tcg_gen_umin_vec_m68k +#define tcg_gen_smax_vec tcg_gen_smax_vec_m68k +#define tcg_gen_umax_vec tcg_gen_umax_vec_m68k +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_m68k +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_m68k +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_m68k +#define tcg_gen_shls_vec tcg_gen_shls_vec_m68k +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_m68k +#define tcg_gen_sars_vec tcg_gen_sars_vec_m68k +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_m68k +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_m68k +#define tb_htable_lookup tb_htable_lookup_m68k +#define tb_set_jmp_target tb_set_jmp_target_m68k +#define cpu_exec cpu_exec_m68k +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_m68k +#define cpu_reloading_memory_map cpu_reloading_memory_map_m68k +#define cpu_loop_exit cpu_loop_exit_m68k +#define cpu_loop_exit_restore cpu_loop_exit_restore_m68k +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_m68k +#define tlb_init tlb_init_m68k +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_m68k +#define tlb_flush tlb_flush_m68k +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_m68k +#define tlb_flush_all_cpus tlb_flush_all_cpus_m68k +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_m68k +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_m68k +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_m68k +#define tlb_flush_page tlb_flush_page_m68k +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_m68k +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_m68k +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_m68k +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_m68k #define tlb_protect_code tlb_protect_code_m68k -#define tlb_reset_dirty_range tlb_reset_dirty_range_m68k -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_m68k +#define tlb_unprotect_code tlb_unprotect_code_m68k +#define tlb_reset_dirty tlb_reset_dirty_m68k #define tlb_set_dirty tlb_set_dirty_m68k -#define tlb_set_dirty1 tlb_set_dirty1_m68k -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_m68k +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_m68k +#define tlb_set_page tlb_set_page_m68k +#define get_page_addr_code_hostp get_page_addr_code_hostp_m68k +#define get_page_addr_code get_page_addr_code_m68k +#define probe_access probe_access_m68k #define tlb_vaddr_to_host tlb_vaddr_to_host_m68k -#define token_get_type token_get_type_m68k -#define token_get_value token_get_value_m68k -#define token_is_escape token_is_escape_m68k -#define token_is_keyword token_is_keyword_m68k -#define token_is_operator token_is_operator_m68k -#define tokens_append_from_iter tokens_append_from_iter_m68k -#define to_qiv to_qiv_m68k -#define to_qov to_qov_m68k -#define tosa_init tosa_init_m68k -#define tosa_machine_init tosa_machine_init_m68k -#define tswap32 tswap32_m68k -#define tswap64 tswap64_m68k -#define type_class_get_size type_class_get_size_m68k -#define type_get_by_name type_get_by_name_m68k -#define type_get_parent type_get_parent_m68k -#define type_has_parent type_has_parent_m68k -#define type_initialize type_initialize_m68k -#define type_initialize_interface type_initialize_interface_m68k -#define type_is_ancestor type_is_ancestor_m68k -#define type_new type_new_m68k -#define type_object_get_size type_object_get_size_m68k -#define type_register_internal type_register_internal_m68k -#define type_table_add type_table_add_m68k -#define type_table_get type_table_get_m68k -#define type_table_lookup type_table_lookup_m68k -#define uint16_to_float32 uint16_to_float32_m68k -#define uint16_to_float64 uint16_to_float64_m68k -#define uint32_to_float32 uint32_to_float32_m68k -#define uint32_to_float64 uint32_to_float64_m68k -#define uint64_to_float128 uint64_to_float128_m68k -#define uint64_to_float32 uint64_to_float32_m68k -#define uint64_to_float64 uint64_to_float64_m68k -#define unassigned_io_ops unassigned_io_ops_m68k -#define unassigned_io_read unassigned_io_read_m68k -#define unassigned_io_write unassigned_io_write_m68k -#define unassigned_mem_accepts unassigned_mem_accepts_m68k +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_m68k +#define helper_le_lduw_mmu helper_le_lduw_mmu_m68k +#define helper_be_lduw_mmu helper_be_lduw_mmu_m68k +#define helper_le_ldul_mmu helper_le_ldul_mmu_m68k +#define helper_be_ldul_mmu helper_be_ldul_mmu_m68k +#define helper_le_ldq_mmu helper_le_ldq_mmu_m68k +#define helper_be_ldq_mmu helper_be_ldq_mmu_m68k +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_m68k +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_m68k +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_m68k +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_m68k +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_m68k +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_m68k +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_m68k +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_m68k +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_m68k +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_m68k +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_m68k +#define cpu_ldub_data_ra cpu_ldub_data_ra_m68k +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_m68k +#define cpu_lduw_data_ra cpu_lduw_data_ra_m68k +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_m68k +#define cpu_ldl_data_ra cpu_ldl_data_ra_m68k +#define cpu_ldq_data_ra cpu_ldq_data_ra_m68k +#define cpu_ldub_data cpu_ldub_data_m68k +#define cpu_ldsb_data cpu_ldsb_data_m68k +#define cpu_lduw_data cpu_lduw_data_m68k +#define cpu_ldsw_data cpu_ldsw_data_m68k +#define cpu_ldl_data cpu_ldl_data_m68k +#define cpu_ldq_data cpu_ldq_data_m68k +#define helper_ret_stb_mmu helper_ret_stb_mmu_m68k +#define helper_le_stw_mmu helper_le_stw_mmu_m68k +#define helper_be_stw_mmu helper_be_stw_mmu_m68k +#define helper_le_stl_mmu helper_le_stl_mmu_m68k +#define helper_be_stl_mmu helper_be_stl_mmu_m68k +#define helper_le_stq_mmu helper_le_stq_mmu_m68k +#define helper_be_stq_mmu helper_be_stq_mmu_m68k +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_m68k +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_m68k +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_m68k +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_m68k +#define cpu_stb_data_ra cpu_stb_data_ra_m68k +#define cpu_stw_data_ra cpu_stw_data_ra_m68k +#define cpu_stl_data_ra cpu_stl_data_ra_m68k +#define cpu_stq_data_ra cpu_stq_data_ra_m68k +#define cpu_stb_data cpu_stb_data_m68k +#define cpu_stw_data cpu_stw_data_m68k +#define cpu_stl_data cpu_stl_data_m68k +#define cpu_stq_data cpu_stq_data_m68k +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_m68k +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_m68k +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_m68k +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_m68k +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_m68k +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_m68k +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_m68k +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_m68k +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_m68k +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_m68k +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_m68k +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_m68k +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_m68k +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_m68k +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_m68k +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_m68k +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_m68k +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_m68k +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_m68k +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_m68k +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_m68k +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_m68k +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_m68k +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_m68k +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_m68k +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_m68k +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_m68k +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_m68k +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_m68k +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_m68k +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_m68k +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_m68k +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_m68k +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_m68k +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_m68k +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_m68k +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_m68k +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_m68k +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_m68k +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_m68k +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_m68k +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_m68k +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_m68k +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_m68k +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_m68k +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_m68k +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_m68k +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_m68k +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_m68k +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_m68k +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_m68k +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_m68k +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_m68k +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_m68k +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_m68k +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_m68k +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_m68k +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_m68k +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_m68k +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_m68k +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_m68k +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_m68k +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_m68k +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_m68k +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_m68k +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_m68k +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_m68k +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_m68k +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_m68k +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_m68k +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_m68k +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_m68k +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_m68k +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_m68k +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_m68k +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_m68k +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_m68k +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_m68k +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_m68k +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_m68k +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_m68k +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_m68k +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_m68k +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_m68k +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_m68k +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_m68k +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_m68k +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_m68k +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_m68k +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_m68k +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_m68k +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_m68k +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_m68k +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_m68k +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_m68k +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_m68k +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_m68k +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_m68k +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_m68k +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_m68k +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_m68k +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_m68k +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_m68k +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_m68k +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_m68k +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_m68k +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_m68k +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_m68k +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_m68k +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_m68k +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_m68k +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_m68k +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_m68k +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_m68k +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_m68k +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_m68k +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_m68k +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_m68k +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_m68k +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_m68k +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_m68k +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_m68k +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_m68k +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_m68k +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_m68k +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_m68k +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_m68k +#define helper_atomic_xchgb helper_atomic_xchgb_m68k +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_m68k +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_m68k +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_m68k +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_m68k +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_m68k +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_m68k +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_m68k +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_m68k +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_m68k +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_m68k +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_m68k +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_m68k +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_m68k +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_m68k +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_m68k +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_m68k +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_m68k +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_m68k +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_m68k +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_m68k +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_m68k +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_m68k +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_m68k +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_m68k +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_m68k +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_m68k +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_m68k +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_m68k +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_m68k +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_m68k +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_m68k +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_m68k +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_m68k +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_m68k +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_m68k +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_m68k +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_m68k +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_m68k +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_m68k +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_m68k +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_m68k +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_m68k +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_m68k +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_m68k +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_m68k +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_m68k +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_m68k +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_m68k +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_m68k +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_m68k +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_m68k +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_m68k +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_m68k +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_m68k +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_m68k +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_m68k +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_m68k +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_m68k +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_m68k +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_m68k +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_m68k +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_m68k +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_m68k +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_m68k +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_m68k +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_m68k +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_m68k +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_m68k +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_m68k +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_m68k +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_m68k +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_m68k +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_m68k +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_m68k +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_m68k +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_m68k +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_m68k +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_m68k +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_m68k +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_m68k +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_m68k +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_m68k +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_m68k +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_m68k +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_m68k +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_m68k +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_m68k +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_m68k +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_m68k +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_m68k +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_m68k +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_m68k +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_m68k +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_m68k +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_m68k +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_m68k +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_m68k +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_m68k +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_m68k +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_m68k +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_m68k +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_m68k +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_m68k +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_m68k +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_m68k +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_m68k +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_m68k +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_m68k +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_m68k +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_m68k +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_m68k +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_m68k +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_m68k +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_m68k +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_m68k +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_m68k +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_m68k +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_m68k +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_m68k +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_m68k +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_m68k +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_m68k +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_m68k +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_m68k +#define cpu_ldub_code cpu_ldub_code_m68k +#define cpu_lduw_code cpu_lduw_code_m68k +#define cpu_ldl_code cpu_ldl_code_m68k +#define cpu_ldq_code cpu_ldq_code_m68k +#define helper_div_i32 helper_div_i32_m68k +#define helper_rem_i32 helper_rem_i32_m68k +#define helper_divu_i32 helper_divu_i32_m68k +#define helper_remu_i32 helper_remu_i32_m68k +#define helper_shl_i64 helper_shl_i64_m68k +#define helper_shr_i64 helper_shr_i64_m68k +#define helper_sar_i64 helper_sar_i64_m68k +#define helper_div_i64 helper_div_i64_m68k +#define helper_rem_i64 helper_rem_i64_m68k +#define helper_divu_i64 helper_divu_i64_m68k +#define helper_remu_i64 helper_remu_i64_m68k +#define helper_muluh_i64 helper_muluh_i64_m68k +#define helper_mulsh_i64 helper_mulsh_i64_m68k +#define helper_clz_i32 helper_clz_i32_m68k +#define helper_ctz_i32 helper_ctz_i32_m68k +#define helper_clz_i64 helper_clz_i64_m68k +#define helper_ctz_i64 helper_ctz_i64_m68k +#define helper_clrsb_i32 helper_clrsb_i32_m68k +#define helper_clrsb_i64 helper_clrsb_i64_m68k +#define helper_ctpop_i32 helper_ctpop_i32_m68k +#define helper_ctpop_i64 helper_ctpop_i64_m68k +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_m68k +#define helper_exit_atomic helper_exit_atomic_m68k +#define helper_gvec_add8 helper_gvec_add8_m68k +#define helper_gvec_add16 helper_gvec_add16_m68k +#define helper_gvec_add32 helper_gvec_add32_m68k +#define helper_gvec_add64 helper_gvec_add64_m68k +#define helper_gvec_adds8 helper_gvec_adds8_m68k +#define helper_gvec_adds16 helper_gvec_adds16_m68k +#define helper_gvec_adds32 helper_gvec_adds32_m68k +#define helper_gvec_adds64 helper_gvec_adds64_m68k +#define helper_gvec_sub8 helper_gvec_sub8_m68k +#define helper_gvec_sub16 helper_gvec_sub16_m68k +#define helper_gvec_sub32 helper_gvec_sub32_m68k +#define helper_gvec_sub64 helper_gvec_sub64_m68k +#define helper_gvec_subs8 helper_gvec_subs8_m68k +#define helper_gvec_subs16 helper_gvec_subs16_m68k +#define helper_gvec_subs32 helper_gvec_subs32_m68k +#define helper_gvec_subs64 helper_gvec_subs64_m68k +#define helper_gvec_mul8 helper_gvec_mul8_m68k +#define helper_gvec_mul16 helper_gvec_mul16_m68k +#define helper_gvec_mul32 helper_gvec_mul32_m68k +#define helper_gvec_mul64 helper_gvec_mul64_m68k +#define helper_gvec_muls8 helper_gvec_muls8_m68k +#define helper_gvec_muls16 helper_gvec_muls16_m68k +#define helper_gvec_muls32 helper_gvec_muls32_m68k +#define helper_gvec_muls64 helper_gvec_muls64_m68k +#define helper_gvec_neg8 helper_gvec_neg8_m68k +#define helper_gvec_neg16 helper_gvec_neg16_m68k +#define helper_gvec_neg32 helper_gvec_neg32_m68k +#define helper_gvec_neg64 helper_gvec_neg64_m68k +#define helper_gvec_abs8 helper_gvec_abs8_m68k +#define helper_gvec_abs16 helper_gvec_abs16_m68k +#define helper_gvec_abs32 helper_gvec_abs32_m68k +#define helper_gvec_abs64 helper_gvec_abs64_m68k +#define helper_gvec_mov helper_gvec_mov_m68k +#define helper_gvec_dup64 helper_gvec_dup64_m68k +#define helper_gvec_dup32 helper_gvec_dup32_m68k +#define helper_gvec_dup16 helper_gvec_dup16_m68k +#define helper_gvec_dup8 helper_gvec_dup8_m68k +#define helper_gvec_not helper_gvec_not_m68k +#define helper_gvec_and helper_gvec_and_m68k +#define helper_gvec_or helper_gvec_or_m68k +#define helper_gvec_xor helper_gvec_xor_m68k +#define helper_gvec_andc helper_gvec_andc_m68k +#define helper_gvec_orc helper_gvec_orc_m68k +#define helper_gvec_nand helper_gvec_nand_m68k +#define helper_gvec_nor helper_gvec_nor_m68k +#define helper_gvec_eqv helper_gvec_eqv_m68k +#define helper_gvec_ands helper_gvec_ands_m68k +#define helper_gvec_xors helper_gvec_xors_m68k +#define helper_gvec_ors helper_gvec_ors_m68k +#define helper_gvec_shl8i helper_gvec_shl8i_m68k +#define helper_gvec_shl16i helper_gvec_shl16i_m68k +#define helper_gvec_shl32i helper_gvec_shl32i_m68k +#define helper_gvec_shl64i helper_gvec_shl64i_m68k +#define helper_gvec_shr8i helper_gvec_shr8i_m68k +#define helper_gvec_shr16i helper_gvec_shr16i_m68k +#define helper_gvec_shr32i helper_gvec_shr32i_m68k +#define helper_gvec_shr64i helper_gvec_shr64i_m68k +#define helper_gvec_sar8i helper_gvec_sar8i_m68k +#define helper_gvec_sar16i helper_gvec_sar16i_m68k +#define helper_gvec_sar32i helper_gvec_sar32i_m68k +#define helper_gvec_sar64i helper_gvec_sar64i_m68k +#define helper_gvec_shl8v helper_gvec_shl8v_m68k +#define helper_gvec_shl16v helper_gvec_shl16v_m68k +#define helper_gvec_shl32v helper_gvec_shl32v_m68k +#define helper_gvec_shl64v helper_gvec_shl64v_m68k +#define helper_gvec_shr8v helper_gvec_shr8v_m68k +#define helper_gvec_shr16v helper_gvec_shr16v_m68k +#define helper_gvec_shr32v helper_gvec_shr32v_m68k +#define helper_gvec_shr64v helper_gvec_shr64v_m68k +#define helper_gvec_sar8v helper_gvec_sar8v_m68k +#define helper_gvec_sar16v helper_gvec_sar16v_m68k +#define helper_gvec_sar32v helper_gvec_sar32v_m68k +#define helper_gvec_sar64v helper_gvec_sar64v_m68k +#define helper_gvec_eq8 helper_gvec_eq8_m68k +#define helper_gvec_ne8 helper_gvec_ne8_m68k +#define helper_gvec_lt8 helper_gvec_lt8_m68k +#define helper_gvec_le8 helper_gvec_le8_m68k +#define helper_gvec_ltu8 helper_gvec_ltu8_m68k +#define helper_gvec_leu8 helper_gvec_leu8_m68k +#define helper_gvec_eq16 helper_gvec_eq16_m68k +#define helper_gvec_ne16 helper_gvec_ne16_m68k +#define helper_gvec_lt16 helper_gvec_lt16_m68k +#define helper_gvec_le16 helper_gvec_le16_m68k +#define helper_gvec_ltu16 helper_gvec_ltu16_m68k +#define helper_gvec_leu16 helper_gvec_leu16_m68k +#define helper_gvec_eq32 helper_gvec_eq32_m68k +#define helper_gvec_ne32 helper_gvec_ne32_m68k +#define helper_gvec_lt32 helper_gvec_lt32_m68k +#define helper_gvec_le32 helper_gvec_le32_m68k +#define helper_gvec_ltu32 helper_gvec_ltu32_m68k +#define helper_gvec_leu32 helper_gvec_leu32_m68k +#define helper_gvec_eq64 helper_gvec_eq64_m68k +#define helper_gvec_ne64 helper_gvec_ne64_m68k +#define helper_gvec_lt64 helper_gvec_lt64_m68k +#define helper_gvec_le64 helper_gvec_le64_m68k +#define helper_gvec_ltu64 helper_gvec_ltu64_m68k +#define helper_gvec_leu64 helper_gvec_leu64_m68k +#define helper_gvec_ssadd8 helper_gvec_ssadd8_m68k +#define helper_gvec_ssadd16 helper_gvec_ssadd16_m68k +#define helper_gvec_ssadd32 helper_gvec_ssadd32_m68k +#define helper_gvec_ssadd64 helper_gvec_ssadd64_m68k +#define helper_gvec_sssub8 helper_gvec_sssub8_m68k +#define helper_gvec_sssub16 helper_gvec_sssub16_m68k +#define helper_gvec_sssub32 helper_gvec_sssub32_m68k +#define helper_gvec_sssub64 helper_gvec_sssub64_m68k +#define helper_gvec_usadd8 helper_gvec_usadd8_m68k +#define helper_gvec_usadd16 helper_gvec_usadd16_m68k +#define helper_gvec_usadd32 helper_gvec_usadd32_m68k +#define helper_gvec_usadd64 helper_gvec_usadd64_m68k +#define helper_gvec_ussub8 helper_gvec_ussub8_m68k +#define helper_gvec_ussub16 helper_gvec_ussub16_m68k +#define helper_gvec_ussub32 helper_gvec_ussub32_m68k +#define helper_gvec_ussub64 helper_gvec_ussub64_m68k +#define helper_gvec_smin8 helper_gvec_smin8_m68k +#define helper_gvec_smin16 helper_gvec_smin16_m68k +#define helper_gvec_smin32 helper_gvec_smin32_m68k +#define helper_gvec_smin64 helper_gvec_smin64_m68k +#define helper_gvec_smax8 helper_gvec_smax8_m68k +#define helper_gvec_smax16 helper_gvec_smax16_m68k +#define helper_gvec_smax32 helper_gvec_smax32_m68k +#define helper_gvec_smax64 helper_gvec_smax64_m68k +#define helper_gvec_umin8 helper_gvec_umin8_m68k +#define helper_gvec_umin16 helper_gvec_umin16_m68k +#define helper_gvec_umin32 helper_gvec_umin32_m68k +#define helper_gvec_umin64 helper_gvec_umin64_m68k +#define helper_gvec_umax8 helper_gvec_umax8_m68k +#define helper_gvec_umax16 helper_gvec_umax16_m68k +#define helper_gvec_umax32 helper_gvec_umax32_m68k +#define helper_gvec_umax64 helper_gvec_umax64_m68k +#define helper_gvec_bitsel helper_gvec_bitsel_m68k +#define cpu_restore_state cpu_restore_state_m68k +#define page_collection_lock page_collection_lock_m68k +#define page_collection_unlock page_collection_unlock_m68k +#define free_code_gen_buffer free_code_gen_buffer_m68k +#define tcg_exec_init tcg_exec_init_m68k +#define tb_cleanup tb_cleanup_m68k +#define tb_flush tb_flush_m68k +#define tb_phys_invalidate tb_phys_invalidate_m68k +#define tb_gen_code tb_gen_code_m68k +#define tb_exec_lock tb_exec_lock_m68k +#define tb_exec_unlock tb_exec_unlock_m68k +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_m68k +#define tb_invalidate_phys_range tb_invalidate_phys_range_m68k +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_m68k +#define tb_check_watchpoint tb_check_watchpoint_m68k +#define cpu_io_recompile cpu_io_recompile_m68k +#define tb_flush_jmp_cache tb_flush_jmp_cache_m68k +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_m68k +#define translator_loop_temp_check translator_loop_temp_check_m68k +#define translator_loop translator_loop_m68k +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_m68k +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_m68k +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_m68k +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_m68k +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_m68k +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_m68k #define unassigned_mem_ops unassigned_mem_ops_m68k -#define unassigned_mem_read unassigned_mem_read_m68k -#define unassigned_mem_write unassigned_mem_write_m68k -#define update_spsel update_spsel_m68k -#define v6_cp_reginfo v6_cp_reginfo_m68k -#define v6k_cp_reginfo v6k_cp_reginfo_m68k -#define v7_cp_reginfo v7_cp_reginfo_m68k -#define v7mp_cp_reginfo v7mp_cp_reginfo_m68k -#define v7m_pop v7m_pop_m68k -#define v7m_push v7m_push_m68k -#define v8_cp_reginfo v8_cp_reginfo_m68k -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_m68k -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_m68k -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_m68k -#define vapa_cp_reginfo vapa_cp_reginfo_m68k -#define vbar_write vbar_write_m68k -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_m68k -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_m68k -#define vfp_get_fpcr vfp_get_fpcr_m68k -#define vfp_get_fpscr vfp_get_fpscr_m68k -#define vfp_get_fpsr vfp_get_fpsr_m68k -#define vfp_reg_offset vfp_reg_offset_m68k -#define vfp_set_fpcr vfp_set_fpcr_m68k -#define vfp_set_fpscr vfp_set_fpscr_m68k -#define vfp_set_fpsr vfp_set_fpsr_m68k -#define visit_end_implicit_struct visit_end_implicit_struct_m68k -#define visit_end_list visit_end_list_m68k -#define visit_end_struct visit_end_struct_m68k -#define visit_end_union visit_end_union_m68k -#define visit_get_next_type visit_get_next_type_m68k -#define visit_next_list visit_next_list_m68k -#define visit_optional visit_optional_m68k -#define visit_start_implicit_struct visit_start_implicit_struct_m68k -#define visit_start_list visit_start_list_m68k -#define visit_start_struct visit_start_struct_m68k -#define visit_start_union visit_start_union_m68k -#define vmsa_cp_reginfo vmsa_cp_reginfo_m68k -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_m68k -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_m68k -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_m68k -#define vmsa_ttbcr_write vmsa_ttbcr_write_m68k -#define vmsa_ttbr_write vmsa_ttbr_write_m68k -#define write_cpustate_to_list write_cpustate_to_list_m68k -#define write_list_to_cpustate write_list_to_cpustate_m68k -#define write_raw_cp_reg write_raw_cp_reg_m68k -#define X86CPURegister32_lookup X86CPURegister32_lookup_m68k -#define x86_op_defs x86_op_defs_m68k -#define xpsr_read xpsr_read_m68k -#define xpsr_write xpsr_write_m68k -#define xscale_cpar_write xscale_cpar_write_m68k -#define xscale_cp_reginfo xscale_cp_reginfo_m68k +#define floatx80_infinity floatx80_infinity_m68k +#define dup_const_func dup_const_func_m68k +#define gen_helper_raise_exception gen_helper_raise_exception_m68k +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_m68k +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_m68k +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_m68k +#define gen_helper_cpsr_read gen_helper_cpsr_read_m68k +#define gen_helper_cpsr_write gen_helper_cpsr_write_m68k +#define cpu_m68k_init cpu_m68k_init_m68k +#define helper_reds32 helper_reds32_m68k +#define helper_redf32 helper_redf32_m68k +#define helper_exts32 helper_exts32_m68k +#define helper_extf32 helper_extf32_m68k +#define helper_extf64 helper_extf64_m68k +#define helper_redf64 helper_redf64_m68k +#define helper_firound helper_firound_m68k +#define cpu_m68k_set_fpcr cpu_m68k_set_fpcr_m68k +#define helper_fitrunc helper_fitrunc_m68k +#define helper_set_fpcr helper_set_fpcr_m68k +#define helper_fsround helper_fsround_m68k +#define helper_fdround helper_fdround_m68k +#define helper_fsqrt helper_fsqrt_m68k +#define helper_fssqrt helper_fssqrt_m68k +#define helper_fdsqrt helper_fdsqrt_m68k +#define helper_fabs helper_fabs_m68k +#define helper_fsabs helper_fsabs_m68k +#define helper_fdabs helper_fdabs_m68k +#define helper_fneg helper_fneg_m68k +#define helper_fsneg helper_fsneg_m68k +#define helper_fdneg helper_fdneg_m68k +#define helper_fadd helper_fadd_m68k +#define helper_fsadd helper_fsadd_m68k +#define helper_fdadd helper_fdadd_m68k +#define helper_fsub helper_fsub_m68k +#define helper_fssub helper_fssub_m68k +#define helper_fdsub helper_fdsub_m68k +#define helper_fmul helper_fmul_m68k +#define helper_fsmul helper_fsmul_m68k +#define helper_fdmul helper_fdmul_m68k +#define helper_fsglmul helper_fsglmul_m68k +#define helper_fdiv helper_fdiv_m68k +#define helper_fsdiv helper_fsdiv_m68k +#define helper_fddiv helper_fddiv_m68k +#define helper_fsgldiv helper_fsgldiv_m68k +#define helper_fcmp helper_fcmp_m68k +#define helper_ftst helper_ftst_m68k +#define helper_fconst helper_fconst_m68k +#define helper_fmovemx_st_predec helper_fmovemx_st_predec_m68k +#define helper_fmovemx_st_postinc helper_fmovemx_st_postinc_m68k +#define helper_fmovemx_ld_postinc helper_fmovemx_ld_postinc_m68k +#define helper_fmovemd_st_predec helper_fmovemd_st_predec_m68k +#define helper_fmovemd_st_postinc helper_fmovemd_st_postinc_m68k +#define helper_fmovemd_ld_postinc helper_fmovemd_ld_postinc_m68k +#define helper_fmod helper_fmod_m68k +#define helper_frem helper_frem_m68k +#define helper_fgetexp helper_fgetexp_m68k +#define helper_fgetman helper_fgetman_m68k +#define helper_fscale helper_fscale_m68k +#define helper_flognp1 helper_flognp1_m68k +#define helper_flogn helper_flogn_m68k +#define helper_flog10 helper_flog10_m68k +#define helper_flog2 helper_flog2_m68k +#define helper_fetox helper_fetox_m68k +#define helper_ftwotox helper_ftwotox_m68k +#define helper_ftentox helper_ftentox_m68k +#define helper_ftan helper_ftan_m68k +#define helper_fsin helper_fsin_m68k +#define helper_fcos helper_fcos_m68k +#define helper_fsincos helper_fsincos_m68k +#define helper_fatan helper_fatan_m68k +#define helper_fasin helper_fasin_m68k +#define helper_facos helper_facos_m68k +#define helper_fatanh helper_fatanh_m68k +#define helper_ftanh helper_ftanh_m68k +#define helper_fsinh helper_fsinh_m68k +#define helper_fcosh helper_fcosh_m68k +#define helper_cf_movec_to helper_cf_movec_to_m68k +#define helper_m68k_movec_to helper_m68k_movec_to_m68k +#define helper_m68k_movec_from helper_m68k_movec_from_m68k +#define helper_set_macsr helper_set_macsr_m68k +#define m68k_switch_sp m68k_switch_sp_m68k +#define m68k_cpu_get_phys_page_debug m68k_cpu_get_phys_page_debug_m68k +#define m68k_set_irq_level m68k_set_irq_level_m68k +#define m68k_cpu_tlb_fill m68k_cpu_tlb_fill_m68k +#define helper_bitrev helper_bitrev_m68k +#define helper_ff1 helper_ff1_m68k +#define helper_sats helper_sats_m68k +#define cpu_m68k_set_sr cpu_m68k_set_sr_m68k +#define helper_set_sr helper_set_sr_m68k +#define helper_mac_move helper_mac_move_m68k +#define helper_macmuls helper_macmuls_m68k +#define helper_macmulu helper_macmulu_m68k +#define helper_macmulf helper_macmulf_m68k +#define helper_macsats helper_macsats_m68k +#define helper_macsatu helper_macsatu_m68k +#define helper_macsatf helper_macsatf_m68k +#define helper_mac_set_flags helper_mac_set_flags_m68k +#define cpu_m68k_get_ccr cpu_m68k_get_ccr_m68k +#define helper_get_ccr helper_get_ccr_m68k +#define cpu_m68k_set_ccr cpu_m68k_set_ccr_m68k +#define helper_set_ccr helper_set_ccr_m68k +#define helper_flush_flags helper_flush_flags_m68k +#define helper_get_macf helper_get_macf_m68k +#define helper_get_macs helper_get_macs_m68k +#define helper_get_macu helper_get_macu_m68k +#define helper_get_mac_extf helper_get_mac_extf_m68k +#define helper_get_mac_exti helper_get_mac_exti_m68k +#define helper_set_mac_extf helper_set_mac_extf_m68k +#define helper_set_mac_exts helper_set_mac_exts_m68k +#define helper_set_mac_extu helper_set_mac_extu_m68k +#define helper_ptest helper_ptest_m68k +#define helper_pflush helper_pflush_m68k +#define helper_reset helper_reset_m68k +#define m68k_cpu_do_interrupt m68k_cpu_do_interrupt_m68k +#define m68k_cpu_transaction_failed m68k_cpu_transaction_failed_m68k +#define m68k_cpu_exec_interrupt m68k_cpu_exec_interrupt_m68k +#define helper_raise_exception helper_raise_exception_m68k +#define helper_divuw helper_divuw_m68k +#define helper_divsw helper_divsw_m68k +#define helper_divul helper_divul_m68k +#define helper_divsl helper_divsl_m68k +#define helper_divull helper_divull_m68k +#define helper_divsll helper_divsll_m68k +#define helper_cas2w helper_cas2w_m68k +#define helper_cas2l helper_cas2l_m68k +#define helper_cas2l_parallel helper_cas2l_parallel_m68k +#define helper_bfexts_mem helper_bfexts_mem_m68k +#define helper_bfextu_mem helper_bfextu_mem_m68k +#define helper_bfins_mem helper_bfins_mem_m68k +#define helper_bfchg_mem helper_bfchg_mem_m68k +#define helper_bfclr_mem helper_bfclr_mem_m68k +#define helper_bfset_mem helper_bfset_mem_m68k +#define helper_bfffo_reg helper_bfffo_reg_m68k +#define helper_bfffo_mem helper_bfffo_mem_m68k +#define helper_chk helper_chk_m68k +#define helper_chk2 helper_chk2_m68k +#define floatx80_mod floatx80_mod_m68k +#define floatx80_getman floatx80_getman_m68k +#define floatx80_getexp floatx80_getexp_m68k +#define floatx80_scale floatx80_scale_m68k +#define floatx80_move floatx80_move_m68k +#define floatx80_lognp1 floatx80_lognp1_m68k +#define floatx80_logn floatx80_logn_m68k +#define floatx80_log10 floatx80_log10_m68k +#define floatx80_log2 floatx80_log2_m68k +#define floatx80_etox floatx80_etox_m68k +#define floatx80_twotox floatx80_twotox_m68k +#define floatx80_tentox floatx80_tentox_m68k +#define floatx80_tan floatx80_tan_m68k +#define floatx80_sin floatx80_sin_m68k +#define floatx80_cos floatx80_cos_m68k +#define floatx80_atan floatx80_atan_m68k +#define floatx80_asin floatx80_asin_m68k +#define floatx80_acos floatx80_acos_m68k +#define floatx80_atanh floatx80_atanh_m68k +#define floatx80_etoxm1 floatx80_etoxm1_m68k +#define floatx80_tanh floatx80_tanh_m68k +#define floatx80_sinh floatx80_sinh_m68k +#define floatx80_cosh floatx80_cosh_m68k +#define m68k_tcg_init m68k_tcg_init_m68k +#define register_m68k_insns register_m68k_insns_m68k +#define gen_intermediate_code gen_intermediate_code_m68k +#define restore_state_to_opc restore_state_to_opc_m68k +#define m68k_reg_reset m68k_reg_reset_m68k +#define m68k_reg_read m68k_reg_read_m68k +#define m68k_reg_write m68k_reg_write_m68k #endif diff --git a/qemu/memory.c b/qemu/memory.c deleted file mode 100644 index 50ebefd1..00000000 --- a/qemu/memory.c +++ /dev/null @@ -1,1616 +0,0 @@ -/* - * Physical memory management - * - * Copyright 2011 Red Hat, Inc. and/or its affiliates - * - * Authors: - * Avi Kivity - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "exec/memory.h" -#include "exec/address-spaces.h" -#include "exec/ioport.h" -#include "qapi/visitor.h" -#include "qemu/bitops.h" -#include "qom/object.h" -#include - -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" -#include "sysemu/sysemu.h" - -//#define DEBUG_UNASSIGNED - - -// Unicorn engine -MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms) -{ - MemoryRegion *ram = g_new(MemoryRegion, 1); - - memory_region_init_ram(uc, ram, NULL, "pc.ram", size, perms, &error_abort); - if (ram->ram_addr == -1) - // out of memory - return NULL; - - memory_region_add_subregion(get_system_memory(uc), begin, ram); - - if (uc->current_cpu) - tlb_flush(uc->current_cpu, 1); - - return ram; -} - -MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr) -{ - MemoryRegion *ram = g_new(MemoryRegion, 1); - - memory_region_init_ram_ptr(uc, ram, NULL, "pc.ram", size, ptr); - ram->perms = perms; - if (ram->ram_addr == -1) - // out of memory - return NULL; - - memory_region_add_subregion(get_system_memory(uc), begin, ram); - - if (uc->current_cpu) - tlb_flush(uc->current_cpu, 1); - - return ram; -} - -static void memory_region_update_container_subregions(MemoryRegion *subregion); - -void memory_unmap(struct uc_struct *uc, MemoryRegion *mr) -{ - int i; - target_ulong addr; - Object *obj; - - // Make sure all pages associated with the MemoryRegion are flushed - // Only need to do this if we are in a running state - if (uc->current_cpu) { - for (addr = mr->addr; addr < mr->end; addr += uc->target_page_size) { - tlb_flush_page(uc->current_cpu, addr); - } - } - memory_region_del_subregion(get_system_memory(uc), mr); - - for (i = 0; i < uc->mapped_block_count; i++) { - if (uc->mapped_blocks[i] == mr) { - uc->mapped_block_count--; - //shift remainder of array down over deleted pointer - memmove(&uc->mapped_blocks[i], &uc->mapped_blocks[i + 1], sizeof(MemoryRegion*) * (uc->mapped_block_count - i)); - mr->destructor(mr); - obj = OBJECT(mr); - obj->ref = 1; - obj->free = g_free; - g_free((char *)mr->name); - mr->name = NULL; - object_property_del_child(mr->uc, qdev_get_machine(mr->uc), obj, &error_abort); - break; - } - } -} - -int memory_free(struct uc_struct *uc) -{ - MemoryRegion *mr; - Object *obj; - int i; - - for (i = 0; i < uc->mapped_block_count; i++) { - mr = uc->mapped_blocks[i]; - mr->enabled = false; - memory_region_del_subregion(get_system_memory(uc), mr); - mr->destructor(mr); - obj = OBJECT(mr); - obj->ref = 1; - obj->free = g_free; - object_property_del_child(mr->uc, qdev_get_machine(mr->uc), obj, &error_abort); - } - - return 0; -} - -static void memory_init(struct uc_struct *uc) -{ -} - -typedef struct AddrRange AddrRange; - -/* - * Note that signed integers are needed for negative offsetting in aliases - * (large MemoryRegion::alias_offset). - */ -struct AddrRange { - Int128 start; - Int128 size; -}; - -static AddrRange addrrange_make(Int128 start, Int128 size) -{ - AddrRange ar; - ar.start = start; - ar.size = size; - return ar; -} - -static bool addrrange_equal(AddrRange r1, AddrRange r2) -{ - return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); -} - -static Int128 addrrange_end(AddrRange r) -{ - return int128_add(r.start, r.size); -} - -static bool addrrange_contains(AddrRange range, Int128 addr) -{ - return int128_ge(addr, range.start) - && int128_lt(addr, addrrange_end(range)); -} - -static bool addrrange_intersects(AddrRange r1, AddrRange r2) -{ - return addrrange_contains(r1, r2.start) - || addrrange_contains(r2, r1.start); -} - -static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) -{ - Int128 start = int128_max(r1.start, r2.start); - Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); - return addrrange_make(start, int128_sub(end, start)); -} - -enum ListenerDirection { Forward, Reverse }; - -static bool memory_listener_match(MemoryListener *listener, - MemoryRegionSection *section) -{ - return !listener->address_space_filter - || listener->address_space_filter == section->address_space; -} - -#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, ...) \ - do { \ - MemoryListener *_listener; \ - \ - switch (_direction) { \ - case Forward: \ - QTAILQ_FOREACH(_listener, &uc->memory_listeners, link) { \ - if (_listener->_callback) { \ - _listener->_callback(_listener, ##__VA_ARGS__); \ - } \ - } \ - break; \ - case Reverse: \ - QTAILQ_FOREACH_REVERSE(_listener, &uc->memory_listeners, \ - memory_listeners, link) { \ - if (_listener->_callback) { \ - _listener->_callback(_listener, ##__VA_ARGS__); \ - } \ - } \ - break; \ - default: \ - abort(); \ - } \ - } while (0) - -#define MEMORY_LISTENER_CALL(_callback, _direction, _section, ...) \ - do { \ - MemoryListener *_listener; \ - \ - switch (_direction) { \ - case Forward: \ - QTAILQ_FOREACH(_listener, &uc->memory_listeners, link) { \ - if (_listener->_callback \ - && memory_listener_match(_listener, _section)) { \ - _listener->_callback(_listener, _section, ##__VA_ARGS__); \ - } \ - } \ - break; \ - case Reverse: \ - QTAILQ_FOREACH_REVERSE(_listener, &uc->memory_listeners, \ - memory_listeners, link) { \ - if (_listener->_callback \ - && memory_listener_match(_listener, _section)) { \ - _listener->_callback(_listener, _section, ##__VA_ARGS__); \ - } \ - } \ - break; \ - default: \ - abort(); \ - } \ - } while (0) - -/* No need to ref/unref .mr, the FlatRange keeps it alive. */ -#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \ - do { MemoryRegionSection _mrs = MemoryRegionSection_make((fr)->mr, as, (fr)->offset_in_region, \ - (fr)->addr.size, int128_get64((fr)->addr.start), (fr)->readonly); \ - MEMORY_LISTENER_CALL(callback, dir, &_mrs); } while(0); - -/* - MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ - .mr = (fr)->mr, \ - .address_space = (as), \ - .offset_within_region = (fr)->offset_in_region, \ - .size = (fr)->addr.size, \ - .offset_within_address_space = int128_get64((fr)->addr.start), \ - .readonly = (fr)->readonly, \ - })) -*/ - -typedef struct FlatRange FlatRange; -typedef struct FlatView FlatView; - -/* Range of memory in the global map. Addresses are absolute. */ -struct FlatRange { - MemoryRegion *mr; - hwaddr offset_in_region; - AddrRange addr; - uint8_t dirty_log_mask; - bool romd_mode; - bool readonly; -}; - -/* Flattened global view of current active memory hierarchy. Kept in sorted - * order. - */ -struct FlatView { - unsigned ref; - FlatRange *ranges; - unsigned nr; - unsigned nr_allocated; -}; - -typedef struct AddressSpaceOps AddressSpaceOps; - -#define FOR_EACH_FLAT_RANGE(var, view) \ - for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) - -static bool flatrange_equal(FlatRange *a, FlatRange *b) -{ - return a->mr == b->mr - && addrrange_equal(a->addr, b->addr) - && a->offset_in_region == b->offset_in_region - && a->romd_mode == b->romd_mode - && a->readonly == b->readonly; -} - -static void flatview_init(FlatView *view) -{ - view->ref = 1; - view->ranges = NULL; - view->nr = 0; - view->nr_allocated = 0; -} - -/* Insert a range into a given position. Caller is responsible for maintaining - * sorting order. - */ -static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) -{ - if (view->nr == view->nr_allocated) { - view->nr_allocated = MAX(2 * view->nr, 10); - view->ranges = g_realloc(view->ranges, - view->nr_allocated * sizeof(*view->ranges)); - } - memmove(view->ranges + pos + 1, view->ranges + pos, - (view->nr - pos) * sizeof(FlatRange)); - view->ranges[pos] = *range; - memory_region_ref(range->mr); - ++view->nr; -} - -static void flatview_destroy(FlatView *view) -{ - int i; - - for (i = 0; i < view->nr; i++) { - memory_region_unref(view->ranges[i].mr); - } - g_free(view->ranges); - g_free(view); -} - -static void flatview_ref(FlatView *view) -{ - atomic_inc(&view->ref); -} - -static void flatview_unref(FlatView *view) -{ - if (atomic_fetch_dec(&view->ref) == 1) { - flatview_destroy(view); - } -} - -static bool can_merge(FlatRange *r1, FlatRange *r2) -{ - return int128_eq(addrrange_end(r1->addr), r2->addr.start) - && r1->mr == r2->mr - && int128_eq(int128_add(int128_make64(r1->offset_in_region), - r1->addr.size), - int128_make64(r2->offset_in_region)) - && r1->dirty_log_mask == r2->dirty_log_mask - && r1->romd_mode == r2->romd_mode - && r1->readonly == r2->readonly; -} - -/* Attempt to simplify a view by merging adjacent ranges */ -static void flatview_simplify(FlatView *view) -{ - unsigned i, j; - - i = 0; - while (i < view->nr) { - j = i + 1; - while (j < view->nr - && can_merge(&view->ranges[j-1], &view->ranges[j])) { - int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); - ++j; - } - ++i; - memmove(&view->ranges[i], &view->ranges[j], - (view->nr - j) * sizeof(view->ranges[j])); - view->nr -= j - i; - } -} - -static bool memory_region_big_endian(MemoryRegion *mr) -{ -#ifdef TARGET_WORDS_BIGENDIAN - return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; -#else - return mr->ops->endianness == DEVICE_BIG_ENDIAN; -#endif -} - -static bool memory_region_wrong_endianness(MemoryRegion *mr) -{ -#ifdef TARGET_WORDS_BIGENDIAN - return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; -#else - return mr->ops->endianness == DEVICE_BIG_ENDIAN; -#endif -} - -static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) -{ - if (memory_region_wrong_endianness(mr)) { - switch (size) { - case 1: - break; - case 2: - *data = bswap16(*data); - break; - case 4: - *data = bswap32(*data); - break; - case 8: - *data = bswap64(*data); - break; - default: - abort(); - } - } -} - -static void memory_region_oldmmio_read_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) -{ - uint64_t tmp; - - tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); - *value |= (tmp & mask) << shift; -} - -static void memory_region_read_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) -{ - uint64_t tmp; - - tmp = mr->ops->read(mr->uc, mr->opaque, addr, size); - *value |= (tmp & mask) << shift; -} - -static void memory_region_oldmmio_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) -{ - uint64_t tmp; - - tmp = (*value >> shift) & mask; - mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); -} - -static void memory_region_write_accessor(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask) -{ - uint64_t tmp; - - tmp = (*value >> shift) & mask; - mr->ops->write(mr->uc, mr->opaque, addr, tmp, size); -} - -static void access_with_adjusted_size(hwaddr addr, - uint64_t *value, - unsigned size, - unsigned access_size_min, - unsigned access_size_max, - void (*access)(MemoryRegion *mr, - hwaddr addr, - uint64_t *value, - unsigned size, - unsigned shift, - uint64_t mask), - MemoryRegion *mr) -{ - uint64_t access_mask; - unsigned access_size; - unsigned i; - - if (!access_size_min) { - access_size_min = 1; - } - if (!access_size_max) { - access_size_max = 4; - } - - /* FIXME: support unaligned access? */ - access_size = MAX(MIN(size, access_size_max), access_size_min); - access_mask = (0-1ULL) >> (64 - access_size * 8); - if (memory_region_big_endian(mr)) { - for (i = 0; i < size; i += access_size) { - access(mr, addr + i, value, access_size, - (size - access_size - i) * 8, access_mask); - } - } else { - for (i = 0; i < size; i += access_size) { - access(mr, addr + i, value, access_size, i * 8, access_mask); - } - } -} - -static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) -{ - AddressSpace *as; - - while (mr->container) { - mr = mr->container; - } - QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) { - if (mr == as->root) { - return as; - } - } - return NULL; -} - -/* Render a memory region into the global view. Ranges in @view obscure - * ranges in @mr. - */ -static void render_memory_region(FlatView *view, - MemoryRegion *mr, - Int128 base, - AddrRange clip, - bool readonly) -{ - MemoryRegion *subregion; - unsigned i; - hwaddr offset_in_region; - Int128 remain; - Int128 now; - FlatRange fr; - AddrRange tmp; - - if (!mr->enabled) { - return; - } - - int128_addto(&base, int128_make64(mr->addr)); - readonly |= mr->readonly; - - tmp = addrrange_make(base, mr->size); - - if (!addrrange_intersects(tmp, clip)) { - return; - } - - clip = addrrange_intersection(tmp, clip); - - if (mr->alias) { - int128_subfrom(&base, int128_make64(mr->alias->addr)); - int128_subfrom(&base, int128_make64(mr->alias_offset)); - render_memory_region(view, mr->alias, base, clip, readonly); - return; - } - - /* Render subregions in priority order. */ - QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { - render_memory_region(view, subregion, base, clip, readonly); - } - - if (!mr->terminates) { - return; - } - - offset_in_region = int128_get64(int128_sub(clip.start, base)); - base = clip.start; - remain = clip.size; - - fr.mr = mr; - fr.dirty_log_mask = mr->dirty_log_mask; - fr.romd_mode = mr->romd_mode; - fr.readonly = readonly; - - /* Render the region itself into any gaps left by the current view. */ - for (i = 0; i < view->nr && int128_nz(remain); ++i) { - if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { - continue; - } - if (int128_lt(base, view->ranges[i].addr.start)) { - now = int128_min(remain, - int128_sub(view->ranges[i].addr.start, base)); - fr.offset_in_region = offset_in_region; - fr.addr = addrrange_make(base, now); - flatview_insert(view, i, &fr); - ++i; - int128_addto(&base, now); - offset_in_region += int128_get64(now); - int128_subfrom(&remain, now); - } - now = int128_sub(int128_min(int128_add(base, remain), - addrrange_end(view->ranges[i].addr)), - base); - int128_addto(&base, now); - offset_in_region += int128_get64(now); - int128_subfrom(&remain, now); - } - if (int128_nz(remain)) { - fr.offset_in_region = offset_in_region; - fr.addr = addrrange_make(base, remain); - flatview_insert(view, i, &fr); - } -} - -/* Render a memory topology into a list of disjoint absolute ranges. */ -static FlatView *generate_memory_topology(MemoryRegion *mr) -{ - FlatView *view; - - view = g_new(FlatView, 1); - flatview_init(view); - - if (mr) { - render_memory_region(view, mr, int128_zero(), - addrrange_make(int128_zero(), int128_2_64()), false); - } - flatview_simplify(view); - - return view; -} - -static FlatView *address_space_get_flatview(AddressSpace *as) -{ - FlatView *view; - - view = as->current_map; - flatview_ref(view); - return view; -} - -static void address_space_update_topology_pass(AddressSpace *as, - const FlatView *old_view, - const FlatView *new_view, - bool adding) -{ - unsigned iold, inew; - FlatRange *frold, *frnew; - struct uc_struct *uc = as->uc; - - /* Generate a symmetric difference of the old and new memory maps. - * Kill ranges in the old map, and instantiate ranges in the new map. - */ - iold = inew = 0; - while (iold < old_view->nr || inew < new_view->nr) { - if (iold < old_view->nr) { - frold = &old_view->ranges[iold]; - } else { - frold = NULL; - } - if (inew < new_view->nr) { - frnew = &new_view->ranges[inew]; - } else { - frnew = NULL; - } - - if (frold - && (!frnew - || int128_lt(frold->addr.start, frnew->addr.start) - || (int128_eq(frold->addr.start, frnew->addr.start) - && !flatrange_equal(frold, frnew)))) { - /* In old but not in new, or in both but attributes changed. */ - - if (!adding) { - MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); - } - - ++iold; - } else if (frold && frnew && flatrange_equal(frold, frnew)) { - /* In both and unchanged (except logging may have changed) */ - - if (adding) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); - if (frold->dirty_log_mask && !frnew->dirty_log_mask) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop); - } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start); - } - } - - ++iold; - ++inew; - } else { - /* In new */ - - if (adding) { - MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); - } - - ++inew; - } - } -} - - -static void address_space_update_topology(AddressSpace *as) -{ - FlatView *old_view = address_space_get_flatview(as); - FlatView *new_view = generate_memory_topology(as->root); - - address_space_update_topology_pass(as, old_view, new_view, false); - address_space_update_topology_pass(as, old_view, new_view, true); - - flatview_unref(as->current_map); - as->current_map = new_view; - - /* Note that all the old MemoryRegions are still alive up to this - * point. This relieves most MemoryListeners from the need to - * ref/unref the MemoryRegions they get---unless they use them - * outside the iothread mutex, in which case precise reference - * counting is necessary. - */ - flatview_unref(old_view); -} - -void memory_region_transaction_begin(struct uc_struct *uc) -{ - ++uc->memory_region_transaction_depth; -} - -static void memory_region_clear_pending(struct uc_struct *uc) -{ - uc->memory_region_update_pending = false; -} - -void memory_region_transaction_commit(struct uc_struct *uc) -{ - AddressSpace *as; - - assert(uc->memory_region_transaction_depth); - --uc->memory_region_transaction_depth; - if (!uc->memory_region_transaction_depth) { - if (uc->memory_region_update_pending) { - MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); - - QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) { - address_space_update_topology(as); - } - - MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); - } - memory_region_clear_pending(uc); - } -} - -static void memory_region_destructor_none(MemoryRegion *mr) -{ -} - -static void memory_region_destructor_ram(MemoryRegion *mr) -{ - qemu_ram_free(mr->uc, mr->ram_addr); -} - -static void memory_region_destructor_alias(MemoryRegion *mr) -{ - memory_region_unref(mr->alias); -} - -static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) -{ - qemu_ram_free_from_ptr(mr->uc, mr->ram_addr); -} - -static bool memory_region_need_escape(char c) -{ - return c == '/' || c == '[' || c == '\\' || c == ']'; -} - -static char *memory_region_escape_name(const char *name) -{ - const char *p; - char *escaped, *q; - uint8_t c; - size_t bytes = 0; - - for (p = name; *p; p++) { - bytes += memory_region_need_escape(*p) ? 4 : 1; - } - if (bytes == p - name) { - return g_memdup(name, bytes + 1); - } - - escaped = g_malloc(bytes + 1); - for (p = name, q = escaped; *p; p++) { - c = *p; - if (unlikely(memory_region_need_escape(c))) { - *q++ = '\\'; - *q++ = 'x'; - *q++ = "0123456789abcdef"[c >> 4]; - c = "0123456789abcdef"[c & 15]; - } - *q++ = c; - } - *q = 0; - return escaped; -} - -void memory_region_init(struct uc_struct *uc, MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size) -{ - if (!owner) { - owner = qdev_get_machine(uc); - uc->owner = owner; - } - - object_initialize(uc, mr, sizeof(*mr), TYPE_MEMORY_REGION); - mr->uc = uc; - mr->size = int128_make64(size); - if (size == UINT64_MAX) { - mr->size = int128_2_64(); - } - mr->name = g_strdup(name); - - if (name) { - char *escaped_name = memory_region_escape_name(name); - char *name_array = g_strdup_printf("%s[*]", escaped_name); - object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); - object_unref(uc, OBJECT(mr)); - g_free(name_array); - g_free(escaped_name); - } -} - -static void memory_region_get_addr(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - uint64_t value = mr->addr; - - visit_type_uint64(v, &value, name, errp); -} - -static void memory_region_get_container(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - gchar *path = (gchar *)""; - - if (mr->container) { - path = object_get_canonical_path(OBJECT(mr->container)); - } - visit_type_str(v, &path, name, errp); - if (mr->container) { - g_free(path); - } -} - -static Object *memory_region_resolve_container(struct uc_struct *uc, Object *obj, void *opaque, - const char *part) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - - return OBJECT(mr->container); -} - -static void memory_region_get_priority(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - int32_t value = mr->priority; - - visit_type_int32(v, &value, name, errp); -} - -static bool memory_region_get_may_overlap(struct uc_struct *uc, Object *obj, Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - - return mr->may_overlap; -} - -static void memory_region_get_size(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - uint64_t value = memory_region_size(mr); - - visit_type_uint64(v, &value, name, errp); -} - -static void memory_region_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - ObjectProperty *op; - - mr->ops = &unassigned_mem_ops; - mr->enabled = true; - mr->romd_mode = true; - mr->destructor = memory_region_destructor_none; - QTAILQ_INIT(&mr->subregions); - - op = object_property_add(OBJECT(mr), "container", - "link<" TYPE_MEMORY_REGION ">", - memory_region_get_container, - NULL, /* memory_region_set_container */ - NULL, NULL, &error_abort); - op->resolve = memory_region_resolve_container; - - object_property_add(OBJECT(mr), "addr", "uint64", - memory_region_get_addr, - NULL, /* memory_region_set_addr */ - NULL, NULL, &error_abort); - object_property_add(OBJECT(mr), "priority", "uint32", - memory_region_get_priority, - NULL, /* memory_region_set_priority */ - NULL, NULL, &error_abort); - object_property_add_bool(mr->uc, OBJECT(mr), "may-overlap", - memory_region_get_may_overlap, - NULL, /* memory_region_set_may_overlap */ - &error_abort); - object_property_add(OBJECT(mr), "size", "uint64", - memory_region_get_size, - NULL, /* memory_region_set_size, */ - NULL, NULL, &error_abort); -} - -static uint64_t unassigned_mem_read(struct uc_struct* uc, hwaddr addr, unsigned size) -{ -#ifdef DEBUG_UNASSIGNED - printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); -#endif - if (uc->current_cpu != NULL) { - cpu_unassigned_access(uc->current_cpu, addr, false, false, 0, size); - } - return 0; -} - -static void unassigned_mem_write(struct uc_struct* uc, hwaddr addr, - uint64_t val, unsigned size) -{ -#ifdef DEBUG_UNASSIGNED - printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); -#endif - if (uc->current_cpu != NULL) { - cpu_unassigned_access(uc->current_cpu, addr, true, false, 0, size); - } -} - -static bool unassigned_mem_accepts(void *opaque, hwaddr addr, - unsigned size, bool is_write) -{ - return false; -} - -const MemoryRegionOps unassigned_mem_ops = { - NULL, - NULL, - - DEVICE_NATIVE_ENDIAN, - - {0,0,false,unassigned_mem_accepts}, -}; - -bool memory_region_access_valid(MemoryRegion *mr, - hwaddr addr, - unsigned size, - bool is_write) -{ - int access_size_min, access_size_max; - int access_size, i; - - if (!mr->ops->valid.unaligned && (addr & (size - 1))) { - return false; - } - - if (!mr->ops->valid.accepts) { - return true; - } - - access_size_min = mr->ops->valid.min_access_size; - if (!mr->ops->valid.min_access_size) { - access_size_min = 1; - } - - access_size_max = mr->ops->valid.max_access_size; - if (!mr->ops->valid.max_access_size) { - access_size_max = 4; - } - - access_size = MAX(MIN(size, access_size_max), access_size_min); - for (i = 0; i < size; i += access_size) { - if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, - is_write)) { - return false; - } - } - - return true; -} - -static uint64_t memory_region_dispatch_read1(MemoryRegion *mr, - hwaddr addr, - unsigned size) -{ - uint64_t data = 0; - - if (mr->ops->read) { - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_read_accessor, mr); - } else { - access_with_adjusted_size(addr, &data, size, 1, 4, - memory_region_oldmmio_read_accessor, mr); - } - - return data; -} - -static bool memory_region_dispatch_read(MemoryRegion *mr, - hwaddr addr, - uint64_t *pval, - unsigned size) -{ - if (!memory_region_access_valid(mr, addr, size, false)) { - *pval = unassigned_mem_read(mr->uc, addr, size); - return true; - } - - *pval = memory_region_dispatch_read1(mr, addr, size); - adjust_endianness(mr, pval, size); - return false; -} - -static bool memory_region_dispatch_write(MemoryRegion *mr, - hwaddr addr, - uint64_t data, - unsigned size) -{ - if (!memory_region_access_valid(mr, addr, size, true)) { - unassigned_mem_write(mr->uc, addr, data, size); - return true; - } - - adjust_endianness(mr, &data, size); - - if (mr->ops->write) { - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_accessor, mr); - } else { - access_with_adjusted_size(addr, &data, size, 1, 4, - memory_region_oldmmio_write_accessor, mr); - } - return false; -} - -void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr, - Object *owner, - const MemoryRegionOps *ops, - void *opaque, - const char *name, - uint64_t size) -{ - memory_region_init(uc, mr, owner, name, size); - mr->ops = ops; - mr->opaque = opaque; - mr->terminates = true; - mr->ram_addr = ~(ram_addr_t)0; -} - -void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size, - uint32_t perms, - Error **errp) -{ - memory_region_init(uc, mr, owner, name, size); - mr->ram = true; - if (!(perms & UC_PROT_WRITE)) { - mr->readonly = true; - } - mr->perms = perms; - mr->terminates = true; - mr->destructor = memory_region_destructor_ram; - mr->ram_addr = qemu_ram_alloc(size, mr, errp); -} - -void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size, - void *ptr) -{ - memory_region_init(uc, mr, owner, name, size); - mr->ram = true; - mr->terminates = true; - mr->destructor = memory_region_destructor_ram_from_ptr; - - /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ - assert(ptr != NULL); - mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort); -} - -void memory_region_set_skip_dump(MemoryRegion *mr) -{ - mr->skip_dump = true; -} - -void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr, - Object *owner, - const char *name, - MemoryRegion *orig, - hwaddr offset, - uint64_t size) -{ - memory_region_init(uc, mr, owner, name, size); - memory_region_ref(orig); - mr->destructor = memory_region_destructor_alias; - mr->alias = orig; - mr->alias_offset = offset; -} - -void memory_region_init_reservation(struct uc_struct *uc, MemoryRegion *mr, - Object *owner, - const char *name, - uint64_t size) -{ - memory_region_init_io(uc, mr, owner, &unassigned_mem_ops, mr, name, size); -} - -static void memory_region_finalize(struct uc_struct *uc, Object *obj, void *opaque) -{ - MemoryRegion *mr = MEMORY_REGION(uc, obj); - - assert(QTAILQ_EMPTY(&mr->subregions)); - // assert(memory_region_transaction_depth == 0); - mr->destructor(mr); - g_free((char *)mr->name); -} - -void memory_region_ref(MemoryRegion *mr) -{ - /* MMIO callbacks most likely will access data that belongs - * to the owner, hence the need to ref/unref the owner whenever - * the memory region is in use. - * - * The memory region is a child of its owner. As long as the - * owner doesn't call unparent itself on the memory region, - * ref-ing the owner will also keep the memory region alive. - * Memory regions without an owner are supposed to never go away, - * but we still ref/unref them for debugging purposes. - */ - Object *obj = OBJECT(mr); - if (obj && obj->parent) { - object_ref(obj->parent); - } else { - object_ref(obj); - } -} - -void memory_region_unref(MemoryRegion *mr) -{ - Object *obj = OBJECT(mr); - if (obj && obj->parent) { - object_unref(mr->uc, obj->parent); - } else { - object_unref(mr->uc, obj); - } -} - -uint64_t memory_region_size(MemoryRegion *mr) -{ - if (int128_eq(mr->size, int128_2_64())) { - return UINT64_MAX; - } - return int128_get64(mr->size); -} - -const char *memory_region_name(const MemoryRegion *mr) -{ - if (!mr->name) { - ((MemoryRegion *)mr)->name = - object_get_canonical_path_component(OBJECT(mr)); - } - return mr->name; -} - -bool memory_region_is_ram(MemoryRegion *mr) -{ - return mr->ram; -} - -bool memory_region_is_skip_dump(MemoryRegion *mr) -{ - return mr->skip_dump; -} - -bool memory_region_is_logging(MemoryRegion *mr) -{ - return mr->dirty_log_mask; -} - -bool memory_region_is_rom(MemoryRegion *mr) -{ - return mr->ram && mr->readonly; -} - -bool memory_region_is_iommu(MemoryRegion *mr) -{ - return mr->iommu_ops != 0; -} - -void memory_region_set_readonly(MemoryRegion *mr, bool readonly) -{ - if (mr->readonly != readonly) { - memory_region_transaction_begin(mr->uc); - mr->readonly = readonly; - if (readonly) { - mr->perms &= ~UC_PROT_WRITE; - } - else { - mr->perms |= UC_PROT_WRITE; - } - mr->uc->memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(mr->uc); - } -} - -void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) -{ - if (mr->romd_mode != romd_mode) { - memory_region_transaction_begin(mr->uc); - mr->romd_mode = romd_mode; - mr->uc->memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(mr->uc); - } -} - -int memory_region_get_fd(MemoryRegion *mr) -{ - if (mr->alias) { - return memory_region_get_fd(mr->alias); - } - - assert(mr->terminates); - - return qemu_get_ram_fd(mr->uc, mr->ram_addr & TARGET_PAGE_MASK); -} - -void *memory_region_get_ram_ptr(MemoryRegion *mr) -{ - if (mr->alias) { - return (char*)memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; - } - - assert(mr->terminates); - - return qemu_get_ram_ptr(mr->uc, mr->ram_addr & TARGET_PAGE_MASK); -} - -static void memory_region_update_container_subregions(MemoryRegion *subregion) -{ - hwaddr offset = subregion->addr; - MemoryRegion *mr = subregion->container; - MemoryRegion *other; - - memory_region_transaction_begin(mr->uc); - - memory_region_ref(subregion); - QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { - if (subregion->may_overlap || other->may_overlap) { - continue; - } - if (int128_ge(int128_make64(offset), - int128_add(int128_make64(other->addr), other->size)) - || int128_le(int128_add(int128_make64(offset), subregion->size), - int128_make64(other->addr))) { - continue; - } -#if 0 - printf("warning: subregion collision %llx/%llx (%s) " - "vs %llx/%llx (%s)\n", - (unsigned long long)offset, - (unsigned long long)int128_get64(subregion->size), - subregion->name, - (unsigned long long)other->addr, - (unsigned long long)int128_get64(other->size), - other->name); -#endif - } - QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { - if (subregion->priority >= other->priority) { - QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); - goto done; - } - } - QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); -done: - mr->uc->memory_region_update_pending |= mr->enabled && subregion->enabled; - memory_region_transaction_commit(mr->uc); -} - -static void memory_region_add_subregion_common(MemoryRegion *mr, - hwaddr offset, - MemoryRegion *subregion) -{ - assert(!subregion->container); - subregion->container = mr; - subregion->addr = offset; - subregion->end = offset + int128_get64(subregion->size); - memory_region_update_container_subregions(subregion); -} - -void memory_region_add_subregion(MemoryRegion *mr, - hwaddr offset, - MemoryRegion *subregion) -{ - subregion->may_overlap = false; - subregion->priority = 0; - memory_region_add_subregion_common(mr, offset, subregion); -} - -void memory_region_add_subregion_overlap(MemoryRegion *mr, - hwaddr offset, - MemoryRegion *subregion, - int priority) -{ - subregion->may_overlap = true; - subregion->priority = priority; - memory_region_add_subregion_common(mr, offset, subregion); -} - -void memory_region_del_subregion(MemoryRegion *mr, - MemoryRegion *subregion) -{ - memory_region_transaction_begin(mr->uc); - assert(subregion->container == mr); - subregion->container = NULL; - QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); - memory_region_unref(subregion); - mr->uc->memory_region_update_pending |= mr->enabled && subregion->enabled; - memory_region_transaction_commit(mr->uc); -} - -void memory_region_set_enabled(MemoryRegion *mr, bool enabled) -{ - if (enabled == mr->enabled) { - return; - } - memory_region_transaction_begin(mr->uc); - mr->enabled = enabled; - mr->uc->memory_region_update_pending = true; - memory_region_transaction_commit(mr->uc); -} - -static void memory_region_readd_subregion(MemoryRegion *mr) -{ - MemoryRegion *container = mr->container; - - if (container) { - memory_region_transaction_begin(mr->uc); - memory_region_ref(mr); - memory_region_del_subregion(container, mr); - mr->container = container; - memory_region_update_container_subregions(mr); - memory_region_unref(mr); - memory_region_transaction_commit(mr->uc); - } -} - -void memory_region_set_address(MemoryRegion *mr, hwaddr addr) -{ - if (addr != mr->addr) { - mr->addr = addr; - memory_region_readd_subregion(mr); - } -} - -void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) -{ - assert(mr->alias); - - if (offset == mr->alias_offset) { - return; - } - - memory_region_transaction_begin(mr->uc); - mr->alias_offset = offset; - mr->uc->memory_region_update_pending |= mr->enabled; - memory_region_transaction_commit(mr->uc); -} - -ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) -{ - return mr->ram_addr; -} - -uint64_t memory_region_get_alignment(const MemoryRegion *mr) -{ - return mr->align; -} - -static int cmp_flatrange_addr(const void *addr_, const void *fr_) -{ - const AddrRange *addr = addr_; - const FlatRange *fr = fr_; - - if (int128_le(addrrange_end(*addr), fr->addr.start)) { - return -1; - } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { - return 1; - } - return 0; -} - -static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) -{ - return bsearch(&addr, view->ranges, view->nr, - sizeof(FlatRange), cmp_flatrange_addr); -} - -bool memory_region_present(MemoryRegion *container, hwaddr addr) -{ - MemoryRegion *mr = memory_region_find(container, addr, 1).mr; - if (!mr || (mr == container)) { - return false; - } - memory_region_unref(mr); - return true; -} - -bool memory_region_is_mapped(MemoryRegion *mr) -{ - return mr->container ? true : false; -} - -MemoryRegionSection memory_region_find(MemoryRegion *mr, - hwaddr addr, uint64_t size) -{ - MemoryRegionSection ret = { NULL }; - MemoryRegion *root; - AddressSpace *as; - AddrRange range; - FlatView *view; - FlatRange *fr; - - addr += mr->addr; - for (root = mr; root->container; ) { - root = root->container; - addr += root->addr; - } - - as = memory_region_to_address_space(root); - if (!as) { - return ret; - } - range = addrrange_make(int128_make64(addr), int128_make64(size)); - - view = address_space_get_flatview(as); - fr = flatview_lookup(view, range); - if (!fr) { - flatview_unref(view); - return ret; - } - - while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { - --fr; - } - - ret.mr = fr->mr; - ret.address_space = as; - range = addrrange_intersection(range, fr->addr); - ret.offset_within_region = fr->offset_in_region; - ret.offset_within_region += int128_get64(int128_sub(range.start, - fr->addr.start)); - ret.size = range.size; - ret.offset_within_address_space = int128_get64(range.start); - ret.readonly = fr->readonly; - memory_region_ref(ret.mr); - - flatview_unref(view); - return ret; -} - -static void listener_add_address_space(MemoryListener *listener, - AddressSpace *as) -{ - FlatView *view; - FlatRange *fr; - - if (listener->address_space_filter - && listener->address_space_filter != as) { - return; - } - - if (listener->address_space_filter->uc->global_dirty_log) { - if (listener->log_global_start) { - listener->log_global_start(listener); - } - } - - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - MemoryRegionSection section = MemoryRegionSection_make( - fr->mr, - as, - fr->offset_in_region, - fr->addr.size, - int128_get64(fr->addr.start), - fr->readonly); - if (listener->region_add) { - listener->region_add(listener, §ion); - } - } - flatview_unref(view); -} - -void memory_listener_register(struct uc_struct* uc, MemoryListener *listener, AddressSpace *filter) -{ - MemoryListener *other = NULL; - AddressSpace *as; - - listener->address_space_filter = filter; - if (QTAILQ_EMPTY(&uc->memory_listeners) - || listener->priority >= QTAILQ_LAST(&uc->memory_listeners, - memory_listeners)->priority) { - QTAILQ_INSERT_TAIL(&uc->memory_listeners, listener, link); - } else { - QTAILQ_FOREACH(other, &uc->memory_listeners, link) { - if (listener->priority < other->priority) { - break; - } - } - QTAILQ_INSERT_BEFORE(other, listener, link); - } - - QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) { - listener_add_address_space(listener, as); - } -} - -void memory_listener_unregister(struct uc_struct *uc, MemoryListener *listener) -{ - QTAILQ_REMOVE(&uc->memory_listeners, listener, link); -} - -void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root, const char *name) -{ - if (QTAILQ_EMPTY(&uc->address_spaces)) { - memory_init(uc); - } - - memory_region_transaction_begin(uc); - as->uc = uc; - as->root = root; - as->current_map = g_new(FlatView, 1); - flatview_init(as->current_map); - QTAILQ_INSERT_TAIL(&uc->address_spaces, as, address_spaces_link); - as->name = g_strdup(name ? name : "anonymous"); - address_space_init_dispatch(as); - uc->memory_region_update_pending |= root->enabled; - memory_region_transaction_commit(uc); -} - -void address_space_destroy(AddressSpace *as) -{ - MemoryListener *listener; - - /* Flush out anything from MemoryListeners listening in on this */ - memory_region_transaction_begin(as->uc); - as->root = NULL; - memory_region_transaction_commit(as->uc); - QTAILQ_REMOVE(&as->uc->address_spaces, as, address_spaces_link); - address_space_unregister(as); - - address_space_destroy_dispatch(as); - - // TODO(danghvu): why assert fail here? - QTAILQ_FOREACH(listener, &as->uc->memory_listeners, link) { - // assert(listener->address_space_filter != as); - } - - flatview_unref(as->current_map); - g_free(as->name); -} - -bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size) -{ - return memory_region_dispatch_read(mr, addr, pval, size); -} - -bool io_mem_write(MemoryRegion *mr, hwaddr addr, - uint64_t val, unsigned size) -{ - return memory_region_dispatch_write(mr, addr, val, size); -} - -typedef struct MemoryRegionList MemoryRegionList; - -struct MemoryRegionList { - const MemoryRegion *mr; - QTAILQ_ENTRY(MemoryRegionList) queue; -}; - -typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead; - -static const TypeInfo memory_region_info = { - TYPE_MEMORY_REGION, - TYPE_OBJECT, - - 0, - sizeof(MemoryRegion), - NULL, - - memory_region_initfn, - NULL, - memory_region_finalize, -}; - -void memory_register_types(struct uc_struct *uc) -{ - type_register_static(uc, &memory_region_info); -} - diff --git a/qemu/memory_ldst.inc.c b/qemu/memory_ldst.inc.c new file mode 100644 index 00000000..8301bcca --- /dev/null +++ b/qemu/memory_ldst.inc.c @@ -0,0 +1,497 @@ +/* + * Physical memory access templates + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2015 Linaro, Inc. + * Copyright (c) 2016 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* warning: addr must be aligned */ +static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result, + enum device_endian endian) +{ + uint8_t *ptr; + uint64_t val; + MemoryRegion *mr; + hwaddr l = 4; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, false, attrs); + if (l < 4 || !memory_access_is_direct(mr, false)) { + release_lock |= prepare_mmio_access(mr); + + /* I/O case */ + r = memory_region_dispatch_read(uc, mr, addr1, &val, + MO_32 | devend_memop(endian), attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + val = ldl_le_p(ptr); + break; + case DEVICE_BIG_ENDIAN: + val = ldl_be_p(ptr); + break; + default: + val = ldl_p(ptr); + break; + } + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); + return val; +} + +uint32_t glue(address_space_ldl, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_ldl_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +uint32_t glue(address_space_ldl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_ldl_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +uint32_t glue(address_space_ldl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_ldl_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_BIG_ENDIAN); +} + +/* warning: addr must be aligned */ +static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result, + enum device_endian endian) +{ + uint8_t *ptr; + uint64_t val; + MemoryRegion *mr; + hwaddr l = 8; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, false, attrs); + if (l < 8 || !memory_access_is_direct(mr, false)) { + release_lock |= prepare_mmio_access(mr); + + /* I/O case */ + r = memory_region_dispatch_read(uc, mr, addr1, &val, + MO_64 | devend_memop(endian), attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + val = ldq_le_p(ptr); + break; + case DEVICE_BIG_ENDIAN: + val = ldq_be_p(ptr); + break; + default: + val = ldq_p(ptr); + break; + } + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); + return val; +} + +uint64_t glue(address_space_ldq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_ldq_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +uint64_t glue(address_space_ldq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_ldq_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +uint64_t glue(address_space_ldq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_ldq_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_BIG_ENDIAN); +} + +uint32_t glue(address_space_ldub, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + uint8_t *ptr; + uint64_t val; + MemoryRegion *mr; + hwaddr l = 1; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, false, attrs); + if (!memory_access_is_direct(mr, false)) { + release_lock |= prepare_mmio_access(mr); + + /* I/O case */ + r = memory_region_dispatch_read(uc, mr, addr1, &val, MO_8, attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + val = ldub_p(ptr); + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); + return val; +} + +/* warning: addr must be aligned */ +static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result, + enum device_endian endian) +{ + uint8_t *ptr; + uint64_t val; + MemoryRegion *mr; + hwaddr l = 2; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, false, attrs); + if (l < 2 || !memory_access_is_direct(mr, false)) { + release_lock |= prepare_mmio_access(mr); + + /* I/O case */ + r = memory_region_dispatch_read(uc, mr, addr1, &val, + MO_16 | devend_memop(endian), attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + val = lduw_le_p(ptr); + break; + case DEVICE_BIG_ENDIAN: + val = lduw_be_p(ptr); + break; + default: + val = lduw_p(ptr); + break; + } + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); + return val; +} + +uint32_t glue(address_space_lduw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_lduw_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +uint32_t glue(address_space_lduw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_lduw_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +uint32_t glue(address_space_lduw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, MemTxAttrs attrs, MemTxResult *result) +{ + return glue(address_space_lduw_internal, SUFFIX)(uc, ARG1, addr, attrs, result, + DEVICE_BIG_ENDIAN); +} + +/* warning: addr must be aligned. The ram page is not masked as dirty + and the code inside is not invalidated. It is useful if the dirty + bits are used to track modified PTEs */ +void glue(address_space_stl_notdirty, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 4; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, true, attrs); + if (l < 4 || !memory_access_is_direct(mr, true)) { + release_lock |= prepare_mmio_access(mr); + + r = memory_region_dispatch_write(uc, mr, addr1, val, MO_32, attrs); + } else { + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + stl_p(ptr, val); + + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); +} + +/* warning: addr must be aligned */ +static inline void glue(address_space_stl_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, + MemTxResult *result, enum device_endian endian) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 4; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, true, attrs); + if (l < 4 || !memory_access_is_direct(mr, true)) { + release_lock |= prepare_mmio_access(mr); + r = memory_region_dispatch_write(uc, mr, addr1, val, + MO_32 | devend_memop(endian), attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + stl_le_p(ptr, val); + break; + case DEVICE_BIG_ENDIAN: + stl_be_p(ptr, val); + break; + default: + stl_p(ptr, val); + break; + } + invalidate_and_set_dirty(mr, addr1, 4); + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); +} + +void glue(address_space_stl, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stl_internal, SUFFIX)(uc, ARG1, addr, val, attrs, + result, DEVICE_NATIVE_ENDIAN); +} + +void glue(address_space_stl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stl_internal, SUFFIX)(uc, ARG1, addr, val, attrs, + result, DEVICE_LITTLE_ENDIAN); +} + +void glue(address_space_stl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stl_internal, SUFFIX)(uc, ARG1, addr, val, attrs, + result, DEVICE_BIG_ENDIAN); +} + +void glue(address_space_stb, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 1; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, true, attrs); + if (!memory_access_is_direct(mr, true)) { + release_lock |= prepare_mmio_access(mr); + r = memory_region_dispatch_write(uc, mr, addr1, val, MO_8, attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + stb_p(ptr, val); + invalidate_and_set_dirty(mr, addr1, 1); + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); +} + +/* warning: addr must be aligned */ +static inline void glue(address_space_stw_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, + MemTxResult *result, enum device_endian endian) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 2; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, true, attrs); + if (l < 2 || !memory_access_is_direct(mr, true)) { + release_lock |= prepare_mmio_access(mr); + r = memory_region_dispatch_write(uc, mr, addr1, val, + MO_16 | devend_memop(endian), attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + stw_le_p(ptr, val); + break; + case DEVICE_BIG_ENDIAN: + stw_be_p(ptr, val); + break; + default: + stw_p(ptr, val); + break; + } + invalidate_and_set_dirty(mr, addr1, 2); + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); +} + +void glue(address_space_stw, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stw_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +void glue(address_space_stw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stw_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +void glue(address_space_stw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stw_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, + DEVICE_BIG_ENDIAN); +} + +static void glue(address_space_stq_internal, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint64_t val, MemTxAttrs attrs, + MemTxResult *result, enum device_endian endian) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 8; + hwaddr addr1; + MemTxResult r; + bool release_lock = false; + + //RCU_READ_LOCK(); + mr = TRANSLATE(addr, &addr1, &l, true, attrs); + if (l < 8 || !memory_access_is_direct(mr, true)) { + release_lock |= prepare_mmio_access(mr); + r = memory_region_dispatch_write(uc, mr, addr1, val, + MO_64 | devend_memop(endian), attrs); + } else { + /* RAM case */ + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + stq_le_p(ptr, val); + break; + case DEVICE_BIG_ENDIAN: + stq_be_p(ptr, val); + break; + default: + stq_p(ptr, val); + break; + } + invalidate_and_set_dirty(mr, addr1, 8); + r = MEMTX_OK; + } + if (result) { + *result = r; + } + //RCU_READ_UNLOCK(); +} + +void glue(address_space_stq, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stq_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, + DEVICE_NATIVE_ENDIAN); +} + +void glue(address_space_stq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stq_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, + DEVICE_LITTLE_ENDIAN); +} + +void glue(address_space_stq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL, + hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result) +{ + glue(address_space_stq_internal, SUFFIX)(uc, ARG1, addr, val, attrs, result, + DEVICE_BIG_ENDIAN); +} + +#undef ARG1_DECL +#undef ARG1 +#undef SUFFIX +#undef TRANSLATE +#undef RCU_READ_LOCK +#undef RCU_READ_UNLOCK diff --git a/qemu/mips.h b/qemu/mips.h index 36ad9a4d..c543784e 100644 --- a/qemu/mips.h +++ b/qemu/mips.h @@ -1,3260 +1,1431 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_MIPS_H -#define UNICORN_AUTOGEN_MIPS_H -#define arm_release arm_release_mips -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mips -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mips -#define use_idiv_instructions_rt use_idiv_instructions_rt_mips -#define tcg_target_deposit_valid tcg_target_deposit_valid_mips -#define helper_power_down helper_power_down_mips -#define check_exit_request check_exit_request_mips -#define address_space_unregister address_space_unregister_mips -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips -#define phys_mem_clean phys_mem_clean_mips -#define tb_cleanup tb_cleanup_mips +#ifndef UNICORN_AUTOGEN_mips_H +#define UNICORN_AUTOGEN_mips_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _mips +#endif +#define arm_arch arm_arch_mips +#define tb_target_set_jmp_target tb_target_set_jmp_target_mips +#define have_bmi1 have_bmi1_mips +#define have_popcnt have_popcnt_mips +#define have_avx1 have_avx1_mips +#define have_avx2 have_avx2_mips +#define have_isa have_isa_mips +#define have_altivec have_altivec_mips +#define have_vsx have_vsx_mips +#define flush_icache_range flush_icache_range_mips +#define s390_facilities s390_facilities_mips +#define tcg_dump_op tcg_dump_op_mips +#define tcg_dump_ops tcg_dump_ops_mips +#define tcg_gen_and_i64 tcg_gen_and_i64_mips +#define tcg_gen_discard_i64 tcg_gen_discard_i64_mips +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mips +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mips +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mips +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mips +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mips +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mips +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips +#define tcg_gen_mul_i64 tcg_gen_mul_i64_mips +#define tcg_gen_or_i64 tcg_gen_or_i64_mips +#define tcg_gen_sar_i64 tcg_gen_sar_i64_mips +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips +#define tcg_gen_st_i64 tcg_gen_st_i64_mips +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips +#define cpu_icount_to_ns cpu_icount_to_ns_mips +#define cpu_is_stopped cpu_is_stopped_mips +#define cpu_get_ticks cpu_get_ticks_mips +#define cpu_get_clock cpu_get_clock_mips +#define cpu_resume cpu_resume_mips +#define qemu_init_vcpu qemu_init_vcpu_mips +#define cpu_stop_current cpu_stop_current_mips +#define resume_all_vcpus resume_all_vcpus_mips +#define vm_start vm_start_mips +#define address_space_dispatch_compact address_space_dispatch_compact_mips +#define flatview_translate flatview_translate_mips +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips +#define qemu_get_cpu qemu_get_cpu_mips +#define cpu_address_space_init cpu_address_space_init_mips +#define cpu_get_address_space cpu_get_address_space_mips +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_mips +#define cpu_exec_initfn cpu_exec_initfn_mips +#define cpu_exec_realizefn cpu_exec_realizefn_mips +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips +#define cpu_watchpoint_insert cpu_watchpoint_insert_mips +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips +#define cpu_breakpoint_insert cpu_breakpoint_insert_mips +#define cpu_breakpoint_remove cpu_breakpoint_remove_mips +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips +#define cpu_abort cpu_abort_mips +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips +#define flatview_add_to_dispatch flatview_add_to_dispatch_mips +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_mips +#define qemu_ram_get_offset qemu_ram_get_offset_mips +#define qemu_ram_get_used_length qemu_ram_get_used_length_mips +#define qemu_ram_is_shared qemu_ram_is_shared_mips +#define qemu_ram_pagesize qemu_ram_pagesize_mips +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips +#define qemu_ram_alloc qemu_ram_alloc_mips +#define qemu_ram_free qemu_ram_free_mips +#define qemu_map_ram_ptr qemu_map_ram_ptr_mips +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_mips +#define qemu_ram_block_from_host qemu_ram_block_from_host_mips +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips +#define cpu_check_watchpoint cpu_check_watchpoint_mips +#define iotlb_to_section iotlb_to_section_mips +#define address_space_dispatch_new address_space_dispatch_new_mips +#define address_space_dispatch_free address_space_dispatch_free_mips +#define flatview_read_continue flatview_read_continue_mips +#define address_space_read_full address_space_read_full_mips +#define address_space_write address_space_write_mips +#define address_space_rw address_space_rw_mips +#define cpu_physical_memory_rw cpu_physical_memory_rw_mips +#define address_space_write_rom address_space_write_rom_mips +#define cpu_flush_icache_range cpu_flush_icache_range_mips +#define cpu_exec_init_all cpu_exec_init_all_mips +#define address_space_access_valid address_space_access_valid_mips +#define address_space_map address_space_map_mips +#define address_space_unmap address_space_unmap_mips +#define cpu_physical_memory_map cpu_physical_memory_map_mips +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips +#define cpu_memory_rw_debug cpu_memory_rw_debug_mips +#define qemu_target_page_size qemu_target_page_size_mips +#define qemu_target_page_bits qemu_target_page_bits_mips +#define qemu_target_page_bits_min qemu_target_page_bits_min_mips +#define target_words_bigendian target_words_bigendian_mips +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips +#define ram_block_discard_range ram_block_discard_range_mips +#define ramblock_is_pmem ramblock_is_pmem_mips +#define page_size_init page_size_init_mips +#define set_preferred_target_page_bits set_preferred_target_page_bits_mips +#define finalize_target_page_bits finalize_target_page_bits_mips +#define cpu_outb cpu_outb_mips +#define cpu_outw cpu_outw_mips +#define cpu_outl cpu_outl_mips +#define cpu_inb cpu_inb_mips +#define cpu_inw cpu_inw_mips +#define cpu_inl cpu_inl_mips #define memory_map memory_map_mips +#define memory_map_io memory_map_io_mips #define memory_map_ptr memory_map_ptr_mips #define memory_unmap memory_unmap_mips #define memory_free memory_free_mips -#define free_code_gen_buffer free_code_gen_buffer_mips -#define helper_raise_exception helper_raise_exception_mips -#define tcg_enabled tcg_enabled_mips -#define tcg_exec_init tcg_exec_init_mips -#define memory_register_types memory_register_types_mips -#define cpu_exec_init_all cpu_exec_init_all_mips -#define vm_start vm_start_mips -#define resume_all_vcpus resume_all_vcpus_mips -#define a15_l2ctlr_read a15_l2ctlr_read_mips -#define a64_translate_init a64_translate_init_mips -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mips -#define aa64_cacheop_access aa64_cacheop_access_mips -#define aa64_daif_access aa64_daif_access_mips -#define aa64_daif_write aa64_daif_write_mips -#define aa64_dczid_read aa64_dczid_read_mips -#define aa64_fpcr_read aa64_fpcr_read_mips -#define aa64_fpcr_write aa64_fpcr_write_mips -#define aa64_fpsr_read aa64_fpsr_read_mips -#define aa64_fpsr_write aa64_fpsr_write_mips -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mips -#define aa64_zva_access aa64_zva_access_mips -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mips -#define aarch64_restore_sp aarch64_restore_sp_mips -#define aarch64_save_sp aarch64_save_sp_mips -#define accel_find accel_find_mips -#define accel_init_machine accel_init_machine_mips -#define accel_type accel_type_mips -#define access_with_adjusted_size access_with_adjusted_size_mips -#define add128 add128_mips -#define add16_sat add16_sat_mips -#define add16_usat add16_usat_mips -#define add192 add192_mips -#define add8_sat add8_sat_mips -#define add8_usat add8_usat_mips -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mips -#define add_cpreg_to_list add_cpreg_to_list_mips -#define addFloat128Sigs addFloat128Sigs_mips -#define addFloat32Sigs addFloat32Sigs_mips -#define addFloat64Sigs addFloat64Sigs_mips -#define addFloatx80Sigs addFloatx80Sigs_mips -#define add_qemu_ldst_label add_qemu_ldst_label_mips -#define address_space_access_valid address_space_access_valid_mips -#define address_space_destroy address_space_destroy_mips -#define address_space_destroy_dispatch address_space_destroy_dispatch_mips -#define address_space_get_flatview address_space_get_flatview_mips -#define address_space_init address_space_init_mips -#define address_space_init_dispatch address_space_init_dispatch_mips -#define address_space_lookup_region address_space_lookup_region_mips -#define address_space_map address_space_map_mips -#define address_space_read address_space_read_mips -#define address_space_rw address_space_rw_mips -#define address_space_translate address_space_translate_mips -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips -#define address_space_translate_internal address_space_translate_internal_mips -#define address_space_unmap address_space_unmap_mips -#define address_space_update_topology address_space_update_topology_mips -#define address_space_update_topology_pass address_space_update_topology_pass_mips -#define address_space_write address_space_write_mips -#define addrrange_contains addrrange_contains_mips -#define addrrange_end addrrange_end_mips -#define addrrange_equal addrrange_equal_mips -#define addrrange_intersection addrrange_intersection_mips -#define addrrange_intersects addrrange_intersects_mips -#define addrrange_make addrrange_make_mips -#define adjust_endianness adjust_endianness_mips -#define all_helpers all_helpers_mips -#define alloc_code_gen_buffer alloc_code_gen_buffer_mips -#define alloc_entry alloc_entry_mips -#define always_true always_true_mips -#define arm1026_initfn arm1026_initfn_mips -#define arm1136_initfn arm1136_initfn_mips -#define arm1136_r2_initfn arm1136_r2_initfn_mips -#define arm1176_initfn arm1176_initfn_mips -#define arm11mpcore_initfn arm11mpcore_initfn_mips -#define arm926_initfn arm926_initfn_mips -#define arm946_initfn arm946_initfn_mips -#define arm_ccnt_enabled arm_ccnt_enabled_mips -#define arm_cp_read_zero arm_cp_read_zero_mips -#define arm_cp_reset_ignore arm_cp_reset_ignore_mips -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mips -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mips -#define arm_cpu_finalizefn arm_cpu_finalizefn_mips -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mips -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mips -#define arm_cpu_initfn arm_cpu_initfn_mips -#define arm_cpu_list arm_cpu_list_mips -#define cpu_loop_exit cpu_loop_exit_mips -#define arm_cpu_post_init arm_cpu_post_init_mips -#define arm_cpu_realizefn arm_cpu_realizefn_mips -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mips -#define arm_cpu_register_types arm_cpu_register_types_mips -#define cpu_resume_from_signal cpu_resume_from_signal_mips -#define arm_cpus arm_cpus_mips -#define arm_cpu_set_pc arm_cpu_set_pc_mips -#define arm_cp_write_ignore arm_cp_write_ignore_mips -#define arm_current_el arm_current_el_mips -#define arm_dc_feature arm_dc_feature_mips -#define arm_debug_excp_handler arm_debug_excp_handler_mips -#define arm_debug_target_el arm_debug_target_el_mips -#define arm_el_is_aa64 arm_el_is_aa64_mips -#define arm_env_get_cpu arm_env_get_cpu_mips -#define arm_excp_target_el arm_excp_target_el_mips -#define arm_excp_unmasked arm_excp_unmasked_mips -#define arm_feature arm_feature_mips -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mips -#define gen_intermediate_code gen_intermediate_code_mips -#define gen_intermediate_code_pc gen_intermediate_code_pc_mips -#define arm_gen_test_cc arm_gen_test_cc_mips -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mips -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mips -#define arm_handle_psci_call arm_handle_psci_call_mips -#define arm_is_psci_call arm_is_psci_call_mips -#define arm_is_secure arm_is_secure_mips -#define arm_is_secure_below_el3 arm_is_secure_below_el3_mips -#define arm_ldl_code arm_ldl_code_mips -#define arm_lduw_code arm_lduw_code_mips -#define arm_log_exception arm_log_exception_mips -#define arm_reg_read arm_reg_read_mips -#define arm_reg_reset arm_reg_reset_mips -#define arm_reg_write arm_reg_write_mips -#define restore_state_to_opc restore_state_to_opc_mips -#define arm_rmode_to_sf arm_rmode_to_sf_mips -#define arm_singlestep_active arm_singlestep_active_mips -#define tlb_fill tlb_fill_mips -#define tlb_flush tlb_flush_mips -#define tlb_flush_page tlb_flush_page_mips -#define tlb_set_page tlb_set_page_mips -#define arm_translate_init arm_translate_init_mips -#define arm_v7m_class_init arm_v7m_class_init_mips -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mips -#define ats_access ats_access_mips -#define ats_write ats_write_mips -#define bad_mode_switch bad_mode_switch_mips -#define bank_number bank_number_mips -#define bitmap_zero_extend bitmap_zero_extend_mips -#define bp_wp_matches bp_wp_matches_mips -#define breakpoint_invalidate breakpoint_invalidate_mips -#define build_page_bitmap build_page_bitmap_mips -#define bus_add_child bus_add_child_mips -#define bus_class_init bus_class_init_mips -#define bus_info bus_info_mips -#define bus_unparent bus_unparent_mips -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mips -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mips -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mips -#define call_recip_estimate call_recip_estimate_mips -#define can_merge can_merge_mips -#define capacity_increase capacity_increase_mips -#define ccsidr_read ccsidr_read_mips -#define check_ap check_ap_mips -#define check_breakpoints check_breakpoints_mips -#define check_watchpoints check_watchpoints_mips -#define cho cho_mips -#define clear_bit clear_bit_mips -#define clz32 clz32_mips -#define clz64 clz64_mips -#define cmp_flatrange_addr cmp_flatrange_addr_mips -#define code_gen_alloc code_gen_alloc_mips -#define commonNaNToFloat128 commonNaNToFloat128_mips -#define commonNaNToFloat16 commonNaNToFloat16_mips -#define commonNaNToFloat32 commonNaNToFloat32_mips -#define commonNaNToFloat64 commonNaNToFloat64_mips -#define commonNaNToFloatx80 commonNaNToFloatx80_mips -#define compute_abs_deadline compute_abs_deadline_mips -#define cond_name cond_name_mips -#define configure_accelerator configure_accelerator_mips -#define container_get container_get_mips -#define container_info container_info_mips -#define container_register_types container_register_types_mips -#define contextidr_write contextidr_write_mips -#define core_log_global_start core_log_global_start_mips -#define core_log_global_stop core_log_global_stop_mips -#define core_memory_listener core_memory_listener_mips -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mips -#define cortex_a15_initfn cortex_a15_initfn_mips -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mips -#define cortex_a8_initfn cortex_a8_initfn_mips -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mips -#define cortex_a9_initfn cortex_a9_initfn_mips -#define cortex_m3_initfn cortex_m3_initfn_mips -#define count_cpreg count_cpreg_mips -#define countLeadingZeros32 countLeadingZeros32_mips -#define countLeadingZeros64 countLeadingZeros64_mips -#define cp_access_ok cp_access_ok_mips -#define cpacr_write cpacr_write_mips -#define cpreg_field_is_64bit cpreg_field_is_64bit_mips -#define cp_reginfo cp_reginfo_mips -#define cpreg_key_compare cpreg_key_compare_mips -#define cpreg_make_keylist cpreg_make_keylist_mips -#define cp_reg_reset cp_reg_reset_mips -#define cpreg_to_kvm_id cpreg_to_kvm_id_mips -#define cpsr_read cpsr_read_mips -#define cpsr_write cpsr_write_mips -#define cptype_valid cptype_valid_mips -#define cpu_abort cpu_abort_mips -#define cpu_arm_exec cpu_arm_exec_mips -#define cpu_arm_gen_code cpu_arm_gen_code_mips -#define cpu_arm_init cpu_arm_init_mips -#define cpu_breakpoint_insert cpu_breakpoint_insert_mips -#define cpu_breakpoint_remove cpu_breakpoint_remove_mips -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips -#define cpu_can_do_io cpu_can_do_io_mips -#define cpu_can_run cpu_can_run_mips -#define cpu_class_init cpu_class_init_mips -#define cpu_common_class_by_name cpu_common_class_by_name_mips -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips -#define cpu_common_get_arch_id cpu_common_get_arch_id_mips -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mips -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mips -#define cpu_common_has_work cpu_common_has_work_mips -#define cpu_common_initfn cpu_common_initfn_mips -#define cpu_common_noop cpu_common_noop_mips -#define cpu_common_parse_features cpu_common_parse_features_mips -#define cpu_common_realizefn cpu_common_realizefn_mips -#define cpu_common_reset cpu_common_reset_mips -#define cpu_dump_statistics cpu_dump_statistics_mips -#define cpu_exec_init cpu_exec_init_mips -#define cpu_flush_icache_range cpu_flush_icache_range_mips -#define cpu_gen_init cpu_gen_init_mips -#define cpu_get_clock cpu_get_clock_mips -#define cpu_get_real_ticks cpu_get_real_ticks_mips -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mips -#define cpu_handle_debug_exception cpu_handle_debug_exception_mips -#define cpu_handle_guest_debug cpu_handle_guest_debug_mips -#define cpu_inb cpu_inb_mips -#define cpu_inl cpu_inl_mips -#define cpu_interrupt cpu_interrupt_mips -#define cpu_interrupt_handler cpu_interrupt_handler_mips -#define cpu_inw cpu_inw_mips -#define cpu_io_recompile cpu_io_recompile_mips -#define cpu_is_stopped cpu_is_stopped_mips -#define cpu_ldl_code cpu_ldl_code_mips -#define cpu_ldub_code cpu_ldub_code_mips -#define cpu_lduw_code cpu_lduw_code_mips -#define cpu_memory_rw_debug cpu_memory_rw_debug_mips -#define cpu_mmu_index cpu_mmu_index_mips -#define cpu_outb cpu_outb_mips -#define cpu_outl cpu_outl_mips -#define cpu_outw cpu_outw_mips -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mips -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips -#define cpu_physical_memory_map cpu_physical_memory_map_mips -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips -#define cpu_physical_memory_rw cpu_physical_memory_rw_mips -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips -#define cpu_register cpu_register_mips -#define cpu_register_types cpu_register_types_mips -#define cpu_restore_state cpu_restore_state_mips -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mips -#define cpu_single_step cpu_single_step_mips -#define cpu_tb_exec cpu_tb_exec_mips -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mips -#define cpu_to_be64 cpu_to_be64_mips -#define cpu_to_le32 cpu_to_le32_mips -#define cpu_to_le64 cpu_to_le64_mips -#define cpu_type_info cpu_type_info_mips -#define cpu_unassigned_access cpu_unassigned_access_mips -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips -#define cpu_watchpoint_insert cpu_watchpoint_insert_mips -#define cpu_watchpoint_remove cpu_watchpoint_remove_mips -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips -#define crc32c_table crc32c_table_mips -#define create_new_memory_mapping create_new_memory_mapping_mips -#define csselr_write csselr_write_mips -#define cto32 cto32_mips -#define ctr_el0_access ctr_el0_access_mips -#define ctz32 ctz32_mips -#define ctz64 ctz64_mips -#define dacr_write dacr_write_mips -#define dbgbcr_write dbgbcr_write_mips -#define dbgbvr_write dbgbvr_write_mips -#define dbgwcr_write dbgwcr_write_mips -#define dbgwvr_write dbgwvr_write_mips -#define debug_cp_reginfo debug_cp_reginfo_mips -#define debug_frame debug_frame_mips -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mips -#define define_arm_cp_regs define_arm_cp_regs_mips -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mips -#define define_debug_regs define_debug_regs_mips -#define define_one_arm_cp_reg define_one_arm_cp_reg_mips -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mips -#define deposit32 deposit32_mips -#define deposit64 deposit64_mips -#define deregister_tm_clones deregister_tm_clones_mips -#define device_class_base_init device_class_base_init_mips -#define device_class_init device_class_init_mips -#define device_finalize device_finalize_mips -#define device_get_realized device_get_realized_mips -#define device_initfn device_initfn_mips -#define device_post_init device_post_init_mips -#define device_reset device_reset_mips -#define device_set_realized device_set_realized_mips -#define device_type_info device_type_info_mips -#define disas_arm_insn disas_arm_insn_mips -#define disas_coproc_insn disas_coproc_insn_mips -#define disas_dsp_insn disas_dsp_insn_mips -#define disas_iwmmxt_insn disas_iwmmxt_insn_mips -#define disas_neon_data_insn disas_neon_data_insn_mips -#define disas_neon_ls_insn disas_neon_ls_insn_mips -#define disas_thumb2_insn disas_thumb2_insn_mips -#define disas_thumb_insn disas_thumb_insn_mips -#define disas_vfp_insn disas_vfp_insn_mips -#define disas_vfp_v8_insn disas_vfp_v8_insn_mips -#define do_arm_semihosting do_arm_semihosting_mips -#define do_clz16 do_clz16_mips -#define do_clz8 do_clz8_mips -#define do_constant_folding do_constant_folding_mips -#define do_constant_folding_2 do_constant_folding_2_mips -#define do_constant_folding_cond do_constant_folding_cond_mips -#define do_constant_folding_cond2 do_constant_folding_cond2_mips -#define do_constant_folding_cond_32 do_constant_folding_cond_32_mips -#define do_constant_folding_cond_64 do_constant_folding_cond_64_mips -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mips -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mips -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mips -#define do_ssat do_ssat_mips -#define do_usad do_usad_mips -#define do_usat do_usat_mips -#define do_v7m_exception_exit do_v7m_exception_exit_mips -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mips -#define dummy_func dummy_func_mips -#define dummy_section dummy_section_mips -#define _DYNAMIC _DYNAMIC_mips -#define _edata _edata_mips -#define _end _end_mips -#define end_list end_list_mips -#define eq128 eq128_mips -#define ErrorClass_lookup ErrorClass_lookup_mips -#define error_copy error_copy_mips -#define error_exit error_exit_mips -#define error_get_class error_get_class_mips -#define error_get_pretty error_get_pretty_mips -#define error_setg_file_open error_setg_file_open_mips -#define estimateDiv128To64 estimateDiv128To64_mips -#define estimateSqrt32 estimateSqrt32_mips -#define excnames excnames_mips -#define excp_is_internal excp_is_internal_mips -#define extended_addresses_enabled extended_addresses_enabled_mips -#define extended_mpu_ap_bits extended_mpu_ap_bits_mips -#define extract32 extract32_mips -#define extract64 extract64_mips -#define extractFloat128Exp extractFloat128Exp_mips -#define extractFloat128Frac0 extractFloat128Frac0_mips -#define extractFloat128Frac1 extractFloat128Frac1_mips -#define extractFloat128Sign extractFloat128Sign_mips -#define extractFloat16Exp extractFloat16Exp_mips -#define extractFloat16Frac extractFloat16Frac_mips -#define extractFloat16Sign extractFloat16Sign_mips -#define extractFloat32Exp extractFloat32Exp_mips -#define extractFloat32Frac extractFloat32Frac_mips -#define extractFloat32Sign extractFloat32Sign_mips -#define extractFloat64Exp extractFloat64Exp_mips -#define extractFloat64Frac extractFloat64Frac_mips -#define extractFloat64Sign extractFloat64Sign_mips -#define extractFloatx80Exp extractFloatx80Exp_mips -#define extractFloatx80Frac extractFloatx80Frac_mips -#define extractFloatx80Sign extractFloatx80Sign_mips -#define fcse_write fcse_write_mips -#define find_better_copy find_better_copy_mips -#define find_default_machine find_default_machine_mips -#define find_desc_by_name find_desc_by_name_mips -#define find_first_bit find_first_bit_mips -#define find_paging_enabled_cpu find_paging_enabled_cpu_mips -#define find_ram_block find_ram_block_mips -#define find_ram_offset find_ram_offset_mips -#define find_string find_string_mips -#define find_type find_type_mips -#define _fini _fini_mips -#define flatrange_equal flatrange_equal_mips -#define flatview_destroy flatview_destroy_mips -#define flatview_init flatview_init_mips -#define flatview_insert flatview_insert_mips -#define flatview_lookup flatview_lookup_mips -#define flatview_ref flatview_ref_mips -#define flatview_simplify flatview_simplify_mips #define flatview_unref flatview_unref_mips -#define float128_add float128_add_mips -#define float128_compare float128_compare_mips -#define float128_compare_internal float128_compare_internal_mips -#define float128_compare_quiet float128_compare_quiet_mips -#define float128_default_nan float128_default_nan_mips -#define float128_div float128_div_mips -#define float128_eq float128_eq_mips -#define float128_eq_quiet float128_eq_quiet_mips -#define float128_is_quiet_nan float128_is_quiet_nan_mips -#define float128_is_signaling_nan float128_is_signaling_nan_mips -#define float128_le float128_le_mips -#define float128_le_quiet float128_le_quiet_mips -#define float128_lt float128_lt_mips -#define float128_lt_quiet float128_lt_quiet_mips -#define float128_maybe_silence_nan float128_maybe_silence_nan_mips -#define float128_mul float128_mul_mips -#define float128_rem float128_rem_mips -#define float128_round_to_int float128_round_to_int_mips -#define float128_scalbn float128_scalbn_mips -#define float128_sqrt float128_sqrt_mips -#define float128_sub float128_sub_mips -#define float128ToCommonNaN float128ToCommonNaN_mips -#define float128_to_float32 float128_to_float32_mips -#define float128_to_float64 float128_to_float64_mips -#define float128_to_floatx80 float128_to_floatx80_mips -#define float128_to_int32 float128_to_int32_mips -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips -#define float128_to_int64 float128_to_int64_mips -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips -#define float128_unordered float128_unordered_mips -#define float128_unordered_quiet float128_unordered_quiet_mips -#define float16_default_nan float16_default_nan_mips +#define address_space_get_flatview address_space_get_flatview_mips +#define memory_region_transaction_begin memory_region_transaction_begin_mips +#define memory_region_transaction_commit memory_region_transaction_commit_mips +#define memory_region_init memory_region_init_mips +#define memory_region_access_valid memory_region_access_valid_mips +#define memory_region_dispatch_read memory_region_dispatch_read_mips +#define memory_region_dispatch_write memory_region_dispatch_write_mips +#define memory_region_init_io memory_region_init_io_mips +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips +#define memory_region_size memory_region_size_mips +#define memory_region_set_readonly memory_region_set_readonly_mips +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips +#define memory_region_from_host memory_region_from_host_mips +#define memory_region_get_ram_addr memory_region_get_ram_addr_mips +#define memory_region_add_subregion memory_region_add_subregion_mips +#define memory_region_del_subregion memory_region_del_subregion_mips +#define memory_region_find memory_region_find_mips +#define memory_listener_register memory_listener_register_mips +#define memory_listener_unregister memory_listener_unregister_mips +#define address_space_remove_listeners address_space_remove_listeners_mips +#define address_space_init address_space_init_mips +#define address_space_destroy address_space_destroy_mips +#define memory_region_init_ram memory_region_init_ram_mips +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips +#define exec_inline_op exec_inline_op_mips +#define floatx80_default_nan floatx80_default_nan_mips +#define float_raise float_raise_mips #define float16_is_quiet_nan float16_is_quiet_nan_mips #define float16_is_signaling_nan float16_is_signaling_nan_mips -#define float16_maybe_silence_nan float16_maybe_silence_nan_mips -#define float16ToCommonNaN float16ToCommonNaN_mips -#define float16_to_float32 float16_to_float32_mips -#define float16_to_float64 float16_to_float64_mips -#define float32_abs float32_abs_mips -#define float32_add float32_add_mips -#define float32_chs float32_chs_mips -#define float32_compare float32_compare_mips -#define float32_compare_internal float32_compare_internal_mips -#define float32_compare_quiet float32_compare_quiet_mips -#define float32_default_nan float32_default_nan_mips -#define float32_div float32_div_mips -#define float32_eq float32_eq_mips -#define float32_eq_quiet float32_eq_quiet_mips -#define float32_exp2 float32_exp2_mips -#define float32_exp2_coefficients float32_exp2_coefficients_mips -#define float32_is_any_nan float32_is_any_nan_mips -#define float32_is_infinity float32_is_infinity_mips -#define float32_is_neg float32_is_neg_mips #define float32_is_quiet_nan float32_is_quiet_nan_mips #define float32_is_signaling_nan float32_is_signaling_nan_mips -#define float32_is_zero float32_is_zero_mips -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mips -#define float32_le float32_le_mips -#define float32_le_quiet float32_le_quiet_mips -#define float32_log2 float32_log2_mips -#define float32_lt float32_lt_mips -#define float32_lt_quiet float32_lt_quiet_mips +#define float64_is_quiet_nan float64_is_quiet_nan_mips +#define float64_is_signaling_nan float64_is_signaling_nan_mips +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips +#define floatx80_silence_nan floatx80_silence_nan_mips +#define propagateFloatx80NaN propagateFloatx80NaN_mips +#define float128_is_quiet_nan float128_is_quiet_nan_mips +#define float128_is_signaling_nan float128_is_signaling_nan_mips +#define float128_silence_nan float128_silence_nan_mips +#define float16_add float16_add_mips +#define float16_sub float16_sub_mips +#define float32_add float32_add_mips +#define float32_sub float32_sub_mips +#define float64_add float64_add_mips +#define float64_sub float64_sub_mips +#define float16_mul float16_mul_mips +#define float32_mul float32_mul_mips +#define float64_mul float64_mul_mips +#define float16_muladd float16_muladd_mips +#define float32_muladd float32_muladd_mips +#define float64_muladd float64_muladd_mips +#define float16_div float16_div_mips +#define float32_div float32_div_mips +#define float64_div float64_div_mips +#define float16_to_float32 float16_to_float32_mips +#define float16_to_float64 float16_to_float64_mips +#define float32_to_float16 float32_to_float16_mips +#define float32_to_float64 float32_to_float64_mips +#define float64_to_float16 float64_to_float16_mips +#define float64_to_float32 float64_to_float32_mips +#define float16_round_to_int float16_round_to_int_mips +#define float32_round_to_int float32_round_to_int_mips +#define float64_round_to_int float64_round_to_int_mips +#define float16_to_int16_scalbn float16_to_int16_scalbn_mips +#define float16_to_int32_scalbn float16_to_int32_scalbn_mips +#define float16_to_int64_scalbn float16_to_int64_scalbn_mips +#define float32_to_int16_scalbn float32_to_int16_scalbn_mips +#define float32_to_int32_scalbn float32_to_int32_scalbn_mips +#define float32_to_int64_scalbn float32_to_int64_scalbn_mips +#define float64_to_int16_scalbn float64_to_int16_scalbn_mips +#define float64_to_int32_scalbn float64_to_int32_scalbn_mips +#define float64_to_int64_scalbn float64_to_int64_scalbn_mips +#define float16_to_int16 float16_to_int16_mips +#define float16_to_int32 float16_to_int32_mips +#define float16_to_int64 float16_to_int64_mips +#define float32_to_int16 float32_to_int16_mips +#define float32_to_int32 float32_to_int32_mips +#define float32_to_int64 float32_to_int64_mips +#define float64_to_int16 float64_to_int16_mips +#define float64_to_int32 float64_to_int32_mips +#define float64_to_int64 float64_to_int64_mips +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mips +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mips +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mips +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_mips +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_mips +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_mips +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_mips +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_mips +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_mips +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_mips +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_mips +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_mips +#define float16_to_uint16 float16_to_uint16_mips +#define float16_to_uint32 float16_to_uint32_mips +#define float16_to_uint64 float16_to_uint64_mips +#define float32_to_uint16 float32_to_uint16_mips +#define float32_to_uint32 float32_to_uint32_mips +#define float32_to_uint64 float32_to_uint64_mips +#define float64_to_uint16 float64_to_uint16_mips +#define float64_to_uint32 float64_to_uint32_mips +#define float64_to_uint64 float64_to_uint64_mips +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mips +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mips +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mips +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips +#define int64_to_float16_scalbn int64_to_float16_scalbn_mips +#define int32_to_float16_scalbn int32_to_float16_scalbn_mips +#define int16_to_float16_scalbn int16_to_float16_scalbn_mips +#define int64_to_float16 int64_to_float16_mips +#define int32_to_float16 int32_to_float16_mips +#define int16_to_float16 int16_to_float16_mips +#define int64_to_float32_scalbn int64_to_float32_scalbn_mips +#define int32_to_float32_scalbn int32_to_float32_scalbn_mips +#define int16_to_float32_scalbn int16_to_float32_scalbn_mips +#define int64_to_float32 int64_to_float32_mips +#define int32_to_float32 int32_to_float32_mips +#define int16_to_float32 int16_to_float32_mips +#define int64_to_float64_scalbn int64_to_float64_scalbn_mips +#define int32_to_float64_scalbn int32_to_float64_scalbn_mips +#define int16_to_float64_scalbn int16_to_float64_scalbn_mips +#define int64_to_float64 int64_to_float64_mips +#define int32_to_float64 int32_to_float64_mips +#define int16_to_float64 int16_to_float64_mips +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_mips +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_mips +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_mips +#define uint64_to_float16 uint64_to_float16_mips +#define uint32_to_float16 uint32_to_float16_mips +#define uint16_to_float16 uint16_to_float16_mips +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_mips +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_mips +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_mips +#define uint64_to_float32 uint64_to_float32_mips +#define uint32_to_float32 uint32_to_float32_mips +#define uint16_to_float32 uint16_to_float32_mips +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_mips +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_mips +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_mips +#define uint64_to_float64 uint64_to_float64_mips +#define uint32_to_float64 uint32_to_float64_mips +#define uint16_to_float64 uint16_to_float64_mips +#define float16_min float16_min_mips +#define float16_minnum float16_minnum_mips +#define float16_minnummag float16_minnummag_mips +#define float16_max float16_max_mips +#define float16_maxnum float16_maxnum_mips +#define float16_maxnummag float16_maxnummag_mips +#define float32_min float32_min_mips +#define float32_minnum float32_minnum_mips +#define float32_minnummag float32_minnummag_mips #define float32_max float32_max_mips #define float32_maxnum float32_maxnum_mips #define float32_maxnummag float32_maxnummag_mips -#define float32_maybe_silence_nan float32_maybe_silence_nan_mips -#define float32_min float32_min_mips -#define float32_minmax float32_minmax_mips -#define float32_minnum float32_minnum_mips -#define float32_minnummag float32_minnummag_mips -#define float32_mul float32_mul_mips -#define float32_muladd float32_muladd_mips -#define float32_rem float32_rem_mips -#define float32_round_to_int float32_round_to_int_mips -#define float32_scalbn float32_scalbn_mips -#define float32_set_sign float32_set_sign_mips -#define float32_sqrt float32_sqrt_mips -#define float32_squash_input_denormal float32_squash_input_denormal_mips -#define float32_sub float32_sub_mips -#define float32ToCommonNaN float32ToCommonNaN_mips -#define float32_to_float128 float32_to_float128_mips -#define float32_to_float16 float32_to_float16_mips -#define float32_to_float64 float32_to_float64_mips -#define float32_to_floatx80 float32_to_floatx80_mips -#define float32_to_int16 float32_to_int16_mips -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips -#define float32_to_int32 float32_to_int32_mips -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips -#define float32_to_int64 float32_to_int64_mips -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips -#define float32_to_uint16 float32_to_uint16_mips -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips -#define float32_to_uint32 float32_to_uint32_mips -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips -#define float32_to_uint64 float32_to_uint64_mips -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips -#define float32_unordered float32_unordered_mips -#define float32_unordered_quiet float32_unordered_quiet_mips -#define float64_abs float64_abs_mips -#define float64_add float64_add_mips -#define float64_chs float64_chs_mips -#define float64_compare float64_compare_mips -#define float64_compare_internal float64_compare_internal_mips -#define float64_compare_quiet float64_compare_quiet_mips -#define float64_default_nan float64_default_nan_mips -#define float64_div float64_div_mips -#define float64_eq float64_eq_mips -#define float64_eq_quiet float64_eq_quiet_mips -#define float64_is_any_nan float64_is_any_nan_mips -#define float64_is_infinity float64_is_infinity_mips -#define float64_is_neg float64_is_neg_mips -#define float64_is_quiet_nan float64_is_quiet_nan_mips -#define float64_is_signaling_nan float64_is_signaling_nan_mips -#define float64_is_zero float64_is_zero_mips -#define float64_le float64_le_mips -#define float64_le_quiet float64_le_quiet_mips -#define float64_log2 float64_log2_mips -#define float64_lt float64_lt_mips -#define float64_lt_quiet float64_lt_quiet_mips +#define float64_min float64_min_mips +#define float64_minnum float64_minnum_mips +#define float64_minnummag float64_minnummag_mips #define float64_max float64_max_mips #define float64_maxnum float64_maxnum_mips #define float64_maxnummag float64_maxnummag_mips -#define float64_maybe_silence_nan float64_maybe_silence_nan_mips -#define float64_min float64_min_mips -#define float64_minmax float64_minmax_mips -#define float64_minnum float64_minnum_mips -#define float64_minnummag float64_minnummag_mips -#define float64_mul float64_mul_mips -#define float64_muladd float64_muladd_mips -#define float64_rem float64_rem_mips -#define float64_round_to_int float64_round_to_int_mips +#define float16_compare float16_compare_mips +#define float16_compare_quiet float16_compare_quiet_mips +#define float32_compare float32_compare_mips +#define float32_compare_quiet float32_compare_quiet_mips +#define float64_compare float64_compare_mips +#define float64_compare_quiet float64_compare_quiet_mips +#define float16_scalbn float16_scalbn_mips +#define float32_scalbn float32_scalbn_mips #define float64_scalbn float64_scalbn_mips -#define float64_set_sign float64_set_sign_mips +#define float16_sqrt float16_sqrt_mips +#define float32_sqrt float32_sqrt_mips #define float64_sqrt float64_sqrt_mips +#define float16_default_nan float16_default_nan_mips +#define float32_default_nan float32_default_nan_mips +#define float64_default_nan float64_default_nan_mips +#define float128_default_nan float128_default_nan_mips +#define float16_silence_nan float16_silence_nan_mips +#define float32_silence_nan float32_silence_nan_mips +#define float64_silence_nan float64_silence_nan_mips +#define float16_squash_input_denormal float16_squash_input_denormal_mips +#define float32_squash_input_denormal float32_squash_input_denormal_mips #define float64_squash_input_denormal float64_squash_input_denormal_mips -#define float64_sub float64_sub_mips -#define float64ToCommonNaN float64ToCommonNaN_mips -#define float64_to_float128 float64_to_float128_mips -#define float64_to_float16 float64_to_float16_mips -#define float64_to_float32 float64_to_float32_mips +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips +#define roundAndPackFloatx80 roundAndPackFloatx80_mips +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips +#define int32_to_floatx80 int32_to_floatx80_mips +#define int32_to_float128 int32_to_float128_mips +#define int64_to_floatx80 int64_to_floatx80_mips +#define int64_to_float128 int64_to_float128_mips +#define uint64_to_float128 uint64_to_float128_mips +#define float32_to_floatx80 float32_to_floatx80_mips +#define float32_to_float128 float32_to_float128_mips +#define float32_rem float32_rem_mips +#define float32_exp2 float32_exp2_mips +#define float32_log2 float32_log2_mips +#define float32_eq float32_eq_mips +#define float32_le float32_le_mips +#define float32_lt float32_lt_mips +#define float32_unordered float32_unordered_mips +#define float32_eq_quiet float32_eq_quiet_mips +#define float32_le_quiet float32_le_quiet_mips +#define float32_lt_quiet float32_lt_quiet_mips +#define float32_unordered_quiet float32_unordered_quiet_mips #define float64_to_floatx80 float64_to_floatx80_mips -#define float64_to_int16 float64_to_int16_mips -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips -#define float64_to_int32 float64_to_int32_mips -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips -#define float64_to_int64 float64_to_int64_mips -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips -#define float64_to_uint16 float64_to_uint16_mips -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips -#define float64_to_uint32 float64_to_uint32_mips -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips -#define float64_to_uint64 float64_to_uint64_mips -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips -#define float64_trunc_to_int float64_trunc_to_int_mips +#define float64_to_float128 float64_to_float128_mips +#define float64_rem float64_rem_mips +#define float64_log2 float64_log2_mips +#define float64_eq float64_eq_mips +#define float64_le float64_le_mips +#define float64_lt float64_lt_mips #define float64_unordered float64_unordered_mips +#define float64_eq_quiet float64_eq_quiet_mips +#define float64_le_quiet float64_le_quiet_mips +#define float64_lt_quiet float64_lt_quiet_mips #define float64_unordered_quiet float64_unordered_quiet_mips -#define float_raise float_raise_mips -#define floatx80_add floatx80_add_mips -#define floatx80_compare floatx80_compare_mips -#define floatx80_compare_internal floatx80_compare_internal_mips -#define floatx80_compare_quiet floatx80_compare_quiet_mips -#define floatx80_default_nan floatx80_default_nan_mips -#define floatx80_div floatx80_div_mips -#define floatx80_eq floatx80_eq_mips -#define floatx80_eq_quiet floatx80_eq_quiet_mips -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips -#define floatx80_le floatx80_le_mips -#define floatx80_le_quiet floatx80_le_quiet_mips -#define floatx80_lt floatx80_lt_mips -#define floatx80_lt_quiet floatx80_lt_quiet_mips -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mips -#define floatx80_mul floatx80_mul_mips -#define floatx80_rem floatx80_rem_mips -#define floatx80_round_to_int floatx80_round_to_int_mips -#define floatx80_scalbn floatx80_scalbn_mips -#define floatx80_sqrt floatx80_sqrt_mips -#define floatx80_sub floatx80_sub_mips -#define floatx80ToCommonNaN floatx80ToCommonNaN_mips -#define floatx80_to_float128 floatx80_to_float128_mips -#define floatx80_to_float32 floatx80_to_float32_mips -#define floatx80_to_float64 floatx80_to_float64_mips #define floatx80_to_int32 floatx80_to_int32_mips #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips #define floatx80_to_int64 floatx80_to_int64_mips #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips +#define floatx80_to_float32 floatx80_to_float32_mips +#define floatx80_to_float64 floatx80_to_float64_mips +#define floatx80_to_float128 floatx80_to_float128_mips +#define floatx80_round floatx80_round_mips +#define floatx80_round_to_int floatx80_round_to_int_mips +#define floatx80_add floatx80_add_mips +#define floatx80_sub floatx80_sub_mips +#define floatx80_mul floatx80_mul_mips +#define floatx80_div floatx80_div_mips +#define floatx80_rem floatx80_rem_mips +#define floatx80_sqrt floatx80_sqrt_mips +#define floatx80_eq floatx80_eq_mips +#define floatx80_le floatx80_le_mips +#define floatx80_lt floatx80_lt_mips #define floatx80_unordered floatx80_unordered_mips +#define floatx80_eq_quiet floatx80_eq_quiet_mips +#define floatx80_le_quiet floatx80_le_quiet_mips +#define floatx80_lt_quiet floatx80_lt_quiet_mips #define floatx80_unordered_quiet floatx80_unordered_quiet_mips -#define flush_icache_range flush_icache_range_mips -#define format_string format_string_mips -#define fp_decode_rm fp_decode_rm_mips -#define frame_dummy frame_dummy_mips -#define free_range free_range_mips -#define fstat64 fstat64_mips -#define futex_wait futex_wait_mips -#define futex_wake futex_wake_mips -#define gen_aa32_ld16s gen_aa32_ld16s_mips -#define gen_aa32_ld16u gen_aa32_ld16u_mips -#define gen_aa32_ld32u gen_aa32_ld32u_mips -#define gen_aa32_ld64 gen_aa32_ld64_mips -#define gen_aa32_ld8s gen_aa32_ld8s_mips -#define gen_aa32_ld8u gen_aa32_ld8u_mips -#define gen_aa32_st16 gen_aa32_st16_mips -#define gen_aa32_st32 gen_aa32_st32_mips -#define gen_aa32_st64 gen_aa32_st64_mips -#define gen_aa32_st8 gen_aa32_st8_mips -#define gen_adc gen_adc_mips -#define gen_adc_CC gen_adc_CC_mips -#define gen_add16 gen_add16_mips -#define gen_add_carry gen_add_carry_mips -#define gen_add_CC gen_add_CC_mips -#define gen_add_datah_offset gen_add_datah_offset_mips -#define gen_add_data_offset gen_add_data_offset_mips -#define gen_addq gen_addq_mips -#define gen_addq_lo gen_addq_lo_mips -#define gen_addq_msw gen_addq_msw_mips -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mips -#define gen_arm_shift_im gen_arm_shift_im_mips -#define gen_arm_shift_reg gen_arm_shift_reg_mips -#define gen_bx gen_bx_mips -#define gen_bx_im gen_bx_im_mips -#define gen_clrex gen_clrex_mips -#define generate_memory_topology generate_memory_topology_mips -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mips -#define gen_exception gen_exception_mips -#define gen_exception_insn gen_exception_insn_mips -#define gen_exception_internal gen_exception_internal_mips -#define gen_exception_internal_insn gen_exception_internal_insn_mips -#define gen_exception_return gen_exception_return_mips -#define gen_goto_tb gen_goto_tb_mips -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mips -#define gen_helper_add_saturate gen_helper_add_saturate_mips -#define gen_helper_add_setq gen_helper_add_setq_mips -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mips -#define gen_helper_clz32 gen_helper_clz32_mips -#define gen_helper_clz64 gen_helper_clz64_mips -#define gen_helper_clz_arm gen_helper_clz_arm_mips -#define gen_helper_cpsr_read gen_helper_cpsr_read_mips -#define gen_helper_cpsr_write gen_helper_cpsr_write_mips -#define gen_helper_crc32_arm gen_helper_crc32_arm_mips -#define gen_helper_crc32c gen_helper_crc32c_mips -#define gen_helper_crypto_aese gen_helper_crypto_aese_mips -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mips -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mips -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mips -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mips -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mips -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mips -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mips -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mips -#define gen_helper_double_saturate gen_helper_double_saturate_mips -#define gen_helper_exception_internal gen_helper_exception_internal_mips -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mips -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mips -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mips -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mips -#define gen_helper_get_user_reg gen_helper_get_user_reg_mips -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mips -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mips -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mips -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mips -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mips -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mips -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mips -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mips -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mips -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mips -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mips -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mips -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mips -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mips -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mips -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mips -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mips -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mips -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mips -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mips -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mips -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mips -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mips -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mips -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mips -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mips -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mips -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mips -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mips -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mips -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mips -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mips -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mips -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mips -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mips -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mips -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mips -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mips -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mips -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mips -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mips -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mips -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mips -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mips -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mips -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mips -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mips -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mips -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mips -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mips -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mips -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mips -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mips -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mips -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mips -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mips -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mips -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mips -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mips -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mips -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mips -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mips -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mips -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mips -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mips -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mips -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mips -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mips -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mips -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mips -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mips -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mips -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mips -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mips -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mips -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mips -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mips -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mips -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mips -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mips -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mips -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mips -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mips -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mips -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mips -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mips -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mips -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mips -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mips -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mips -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mips -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mips -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mips -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mips -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mips -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mips -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mips -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mips -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mips -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mips -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mips -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mips -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mips -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mips -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mips -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mips -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mips -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mips -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mips -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mips -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mips -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mips -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mips -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mips -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mips -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mips -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mips -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mips -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mips -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mips -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mips -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mips -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mips -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mips -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mips -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mips -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mips -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mips -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mips -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mips -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mips -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mips -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mips -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mips -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mips -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mips -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mips -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mips -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mips -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mips -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mips -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mips -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mips -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mips -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mips -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mips -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mips -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mips -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mips -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mips -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mips -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mips -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mips -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mips -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mips -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mips -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mips -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mips -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mips -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mips -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mips -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mips -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mips -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mips -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mips -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mips -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mips -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mips -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mips -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mips -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mips -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mips -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mips -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mips -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mips -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mips -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mips -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mips -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mips -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mips -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mips -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mips -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mips -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mips -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mips -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mips -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mips -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mips -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mips -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mips -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mips -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mips -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mips -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mips -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mips -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mips -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mips -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mips -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mips -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mips -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mips -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mips -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mips -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mips -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mips -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mips -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mips -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mips -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mips -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mips -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mips -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mips -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mips -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mips -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mips -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mips -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mips -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mips -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mips -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mips -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mips -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mips -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mips -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mips -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mips -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mips -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mips -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mips -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mips -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mips -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mips -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mips -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mips -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mips -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mips -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mips -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mips -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mips -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mips -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mips -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mips -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mips -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mips -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mips -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mips -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mips -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mips -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mips -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mips -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mips -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mips -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mips -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mips -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mips -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mips -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mips -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mips -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mips -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mips -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mips -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mips -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mips -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mips -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mips -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mips -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mips -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mips -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mips -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mips -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mips -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mips -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mips -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mips -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mips -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mips -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mips -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mips -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mips -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mips -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mips -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mips -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mips -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mips -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mips -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mips -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mips -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mips -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mips -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mips -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mips -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mips -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mips -#define gen_helper_neon_tbl gen_helper_neon_tbl_mips -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mips -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mips -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mips -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mips -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mips -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mips -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mips -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mips -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mips -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mips -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mips -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mips -#define gen_helper_neon_zip16 gen_helper_neon_zip16_mips -#define gen_helper_neon_zip8 gen_helper_neon_zip8_mips -#define gen_helper_pre_hvc gen_helper_pre_hvc_mips -#define gen_helper_pre_smc gen_helper_pre_smc_mips -#define gen_helper_qadd16 gen_helper_qadd16_mips -#define gen_helper_qadd8 gen_helper_qadd8_mips -#define gen_helper_qaddsubx gen_helper_qaddsubx_mips -#define gen_helper_qsub16 gen_helper_qsub16_mips -#define gen_helper_qsub8 gen_helper_qsub8_mips -#define gen_helper_qsubaddx gen_helper_qsubaddx_mips -#define gen_helper_rbit gen_helper_rbit_mips -#define gen_helper_recpe_f32 gen_helper_recpe_f32_mips -#define gen_helper_recpe_u32 gen_helper_recpe_u32_mips -#define gen_helper_recps_f32 gen_helper_recps_f32_mips -#define gen_helper_rintd gen_helper_rintd_mips -#define gen_helper_rintd_exact gen_helper_rintd_exact_mips -#define gen_helper_rints gen_helper_rints_mips -#define gen_helper_rints_exact gen_helper_rints_exact_mips -#define gen_helper_ror_cc gen_helper_ror_cc_mips -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mips -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mips -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mips -#define gen_helper_sadd16 gen_helper_sadd16_mips -#define gen_helper_sadd8 gen_helper_sadd8_mips -#define gen_helper_saddsubx gen_helper_saddsubx_mips -#define gen_helper_sar_cc gen_helper_sar_cc_mips -#define gen_helper_sdiv gen_helper_sdiv_mips -#define gen_helper_sel_flags gen_helper_sel_flags_mips -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mips -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mips -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mips -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mips -#define gen_helper_set_rmode gen_helper_set_rmode_mips -#define gen_helper_set_user_reg gen_helper_set_user_reg_mips -#define gen_helper_shadd16 gen_helper_shadd16_mips -#define gen_helper_shadd8 gen_helper_shadd8_mips -#define gen_helper_shaddsubx gen_helper_shaddsubx_mips -#define gen_helper_shl_cc gen_helper_shl_cc_mips -#define gen_helper_shr_cc gen_helper_shr_cc_mips -#define gen_helper_shsub16 gen_helper_shsub16_mips -#define gen_helper_shsub8 gen_helper_shsub8_mips -#define gen_helper_shsubaddx gen_helper_shsubaddx_mips -#define gen_helper_ssat gen_helper_ssat_mips -#define gen_helper_ssat16 gen_helper_ssat16_mips -#define gen_helper_ssub16 gen_helper_ssub16_mips -#define gen_helper_ssub8 gen_helper_ssub8_mips -#define gen_helper_ssubaddx gen_helper_ssubaddx_mips -#define gen_helper_sub_saturate gen_helper_sub_saturate_mips -#define gen_helper_sxtb16 gen_helper_sxtb16_mips -#define gen_helper_uadd16 gen_helper_uadd16_mips -#define gen_helper_uadd8 gen_helper_uadd8_mips -#define gen_helper_uaddsubx gen_helper_uaddsubx_mips -#define gen_helper_udiv gen_helper_udiv_mips -#define gen_helper_uhadd16 gen_helper_uhadd16_mips -#define gen_helper_uhadd8 gen_helper_uhadd8_mips -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mips -#define gen_helper_uhsub16 gen_helper_uhsub16_mips -#define gen_helper_uhsub8 gen_helper_uhsub8_mips -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mips -#define gen_helper_uqadd16 gen_helper_uqadd16_mips -#define gen_helper_uqadd8 gen_helper_uqadd8_mips -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mips -#define gen_helper_uqsub16 gen_helper_uqsub16_mips -#define gen_helper_uqsub8 gen_helper_uqsub8_mips -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mips -#define gen_helper_usad8 gen_helper_usad8_mips -#define gen_helper_usat gen_helper_usat_mips -#define gen_helper_usat16 gen_helper_usat16_mips -#define gen_helper_usub16 gen_helper_usub16_mips -#define gen_helper_usub8 gen_helper_usub8_mips -#define gen_helper_usubaddx gen_helper_usubaddx_mips -#define gen_helper_uxtb16 gen_helper_uxtb16_mips -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mips -#define gen_helper_v7m_msr gen_helper_v7m_msr_mips -#define gen_helper_vfp_absd gen_helper_vfp_absd_mips -#define gen_helper_vfp_abss gen_helper_vfp_abss_mips -#define gen_helper_vfp_addd gen_helper_vfp_addd_mips -#define gen_helper_vfp_adds gen_helper_vfp_adds_mips -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mips -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mips -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mips -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mips -#define gen_helper_vfp_divd gen_helper_vfp_divd_mips -#define gen_helper_vfp_divs gen_helper_vfp_divs_mips -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mips -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mips -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mips -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mips -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mips -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mips -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mips -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mips -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mips -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mips -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mips -#define gen_helper_vfp_mins gen_helper_vfp_mins_mips -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mips -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mips -#define gen_helper_vfp_muld gen_helper_vfp_muld_mips -#define gen_helper_vfp_muls gen_helper_vfp_muls_mips -#define gen_helper_vfp_negd gen_helper_vfp_negd_mips -#define gen_helper_vfp_negs gen_helper_vfp_negs_mips -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mips -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mips -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mips -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mips -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mips -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mips -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mips -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mips -#define gen_helper_vfp_subd gen_helper_vfp_subd_mips -#define gen_helper_vfp_subs gen_helper_vfp_subs_mips -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mips -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mips -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mips -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mips -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mips -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mips -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mips -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mips -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mips -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mips -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mips -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mips -#define gen_helper_vfp_touid gen_helper_vfp_touid_mips -#define gen_helper_vfp_touis gen_helper_vfp_touis_mips -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mips -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mips -#define gen_helper_vfp_tould gen_helper_vfp_tould_mips -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mips -#define gen_helper_vfp_touls gen_helper_vfp_touls_mips -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mips -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mips -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mips -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mips -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mips -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mips -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mips -#define gen_helper_wfe gen_helper_wfe_mips -#define gen_helper_wfi gen_helper_wfi_mips -#define gen_hvc gen_hvc_mips -#define gen_intermediate_code_internal gen_intermediate_code_internal_mips -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mips -#define gen_iwmmxt_address gen_iwmmxt_address_mips -#define gen_iwmmxt_shift gen_iwmmxt_shift_mips -#define gen_jmp gen_jmp_mips -#define gen_load_and_replicate gen_load_and_replicate_mips -#define gen_load_exclusive gen_load_exclusive_mips -#define gen_logic_CC gen_logic_CC_mips -#define gen_logicq_cc gen_logicq_cc_mips -#define gen_lookup_tb gen_lookup_tb_mips -#define gen_mov_F0_vreg gen_mov_F0_vreg_mips -#define gen_mov_F1_vreg gen_mov_F1_vreg_mips -#define gen_mov_vreg_F0 gen_mov_vreg_F0_mips -#define gen_muls_i64_i32 gen_muls_i64_i32_mips -#define gen_mulu_i64_i32 gen_mulu_i64_i32_mips -#define gen_mulxy gen_mulxy_mips -#define gen_neon_add gen_neon_add_mips -#define gen_neon_addl gen_neon_addl_mips -#define gen_neon_addl_saturate gen_neon_addl_saturate_mips -#define gen_neon_bsl gen_neon_bsl_mips -#define gen_neon_dup_high16 gen_neon_dup_high16_mips -#define gen_neon_dup_low16 gen_neon_dup_low16_mips -#define gen_neon_dup_u8 gen_neon_dup_u8_mips -#define gen_neon_mull gen_neon_mull_mips -#define gen_neon_narrow gen_neon_narrow_mips -#define gen_neon_narrow_op gen_neon_narrow_op_mips -#define gen_neon_narrow_sats gen_neon_narrow_sats_mips -#define gen_neon_narrow_satu gen_neon_narrow_satu_mips -#define gen_neon_negl gen_neon_negl_mips -#define gen_neon_rsb gen_neon_rsb_mips -#define gen_neon_shift_narrow gen_neon_shift_narrow_mips -#define gen_neon_subl gen_neon_subl_mips -#define gen_neon_trn_u16 gen_neon_trn_u16_mips -#define gen_neon_trn_u8 gen_neon_trn_u8_mips -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mips -#define gen_neon_unzip gen_neon_unzip_mips -#define gen_neon_widen gen_neon_widen_mips -#define gen_neon_zip gen_neon_zip_mips +#define float128_to_int32 float128_to_int32_mips +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips +#define float128_to_int64 float128_to_int64_mips +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips +#define float128_to_uint64 float128_to_uint64_mips +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mips +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mips +#define float128_to_uint32 float128_to_uint32_mips +#define float128_to_float32 float128_to_float32_mips +#define float128_to_float64 float128_to_float64_mips +#define float128_to_floatx80 float128_to_floatx80_mips +#define float128_round_to_int float128_round_to_int_mips +#define float128_add float128_add_mips +#define float128_sub float128_sub_mips +#define float128_mul float128_mul_mips +#define float128_div float128_div_mips +#define float128_rem float128_rem_mips +#define float128_sqrt float128_sqrt_mips +#define float128_eq float128_eq_mips +#define float128_le float128_le_mips +#define float128_lt float128_lt_mips +#define float128_unordered float128_unordered_mips +#define float128_eq_quiet float128_eq_quiet_mips +#define float128_le_quiet float128_le_quiet_mips +#define float128_lt_quiet float128_lt_quiet_mips +#define float128_unordered_quiet float128_unordered_quiet_mips +#define floatx80_compare floatx80_compare_mips +#define floatx80_compare_quiet floatx80_compare_quiet_mips +#define float128_compare float128_compare_mips +#define float128_compare_quiet float128_compare_quiet_mips +#define floatx80_scalbn floatx80_scalbn_mips +#define float128_scalbn float128_scalbn_mips +#define softfloat_init softfloat_init_mips +#define tcg_optimize tcg_optimize_mips #define gen_new_label gen_new_label_mips -#define gen_nop_hint gen_nop_hint_mips -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mips -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mips -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mips -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mips -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mips -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mips -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mips -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mips -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mips -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mips -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mips -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mips -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mips -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mips -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mips -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mips -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mips -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mips -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mips -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mips -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mips -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mips -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mips -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mips -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mips -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mips -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mips -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mips -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mips -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mips -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mips -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mips -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mips -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mips -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mips -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mips -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mips -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mips -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mips -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mips -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mips -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mips -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mips -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mips -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mips -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mips -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mips -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mips -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mips -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mips -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mips -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mips -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mips -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mips -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mips -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mips -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mips -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mips -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mips -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mips -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mips -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mips -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mips -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mips -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mips -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mips -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mips -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mips -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mips -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mips -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mips -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mips -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mips -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mips -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mips -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mips -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mips -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mips -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mips -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mips -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mips -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mips -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mips -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mips -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mips -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mips -#define gen_rev16 gen_rev16_mips -#define gen_revsh gen_revsh_mips -#define gen_rfe gen_rfe_mips -#define gen_sar gen_sar_mips -#define gen_sbc_CC gen_sbc_CC_mips -#define gen_sbfx gen_sbfx_mips -#define gen_set_CF_bit31 gen_set_CF_bit31_mips -#define gen_set_condexec gen_set_condexec_mips -#define gen_set_cpsr gen_set_cpsr_mips -#define gen_set_label gen_set_label_mips -#define gen_set_pc_im gen_set_pc_im_mips -#define gen_set_psr gen_set_psr_mips -#define gen_set_psr_im gen_set_psr_im_mips -#define gen_shl gen_shl_mips -#define gen_shr gen_shr_mips -#define gen_smc gen_smc_mips -#define gen_smul_dual gen_smul_dual_mips -#define gen_srs gen_srs_mips -#define gen_ss_advance gen_ss_advance_mips -#define gen_step_complete_exception gen_step_complete_exception_mips -#define gen_store_exclusive gen_store_exclusive_mips -#define gen_storeq_reg gen_storeq_reg_mips -#define gen_sub_carry gen_sub_carry_mips -#define gen_sub_CC gen_sub_CC_mips -#define gen_subq_msw gen_subq_msw_mips -#define gen_swap_half gen_swap_half_mips -#define gen_thumb2_data_op gen_thumb2_data_op_mips -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mips -#define gen_ubfx gen_ubfx_mips -#define gen_vfp_abs gen_vfp_abs_mips -#define gen_vfp_add gen_vfp_add_mips -#define gen_vfp_cmp gen_vfp_cmp_mips -#define gen_vfp_cmpe gen_vfp_cmpe_mips -#define gen_vfp_div gen_vfp_div_mips -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mips -#define gen_vfp_F1_mul gen_vfp_F1_mul_mips -#define gen_vfp_F1_neg gen_vfp_F1_neg_mips -#define gen_vfp_ld gen_vfp_ld_mips -#define gen_vfp_mrs gen_vfp_mrs_mips -#define gen_vfp_msr gen_vfp_msr_mips -#define gen_vfp_mul gen_vfp_mul_mips -#define gen_vfp_neg gen_vfp_neg_mips -#define gen_vfp_shto gen_vfp_shto_mips -#define gen_vfp_sito gen_vfp_sito_mips -#define gen_vfp_slto gen_vfp_slto_mips -#define gen_vfp_sqrt gen_vfp_sqrt_mips -#define gen_vfp_st gen_vfp_st_mips -#define gen_vfp_sub gen_vfp_sub_mips -#define gen_vfp_tosh gen_vfp_tosh_mips -#define gen_vfp_tosi gen_vfp_tosi_mips -#define gen_vfp_tosiz gen_vfp_tosiz_mips -#define gen_vfp_tosl gen_vfp_tosl_mips -#define gen_vfp_touh gen_vfp_touh_mips -#define gen_vfp_toui gen_vfp_toui_mips -#define gen_vfp_touiz gen_vfp_touiz_mips -#define gen_vfp_toul gen_vfp_toul_mips -#define gen_vfp_uhto gen_vfp_uhto_mips -#define gen_vfp_uito gen_vfp_uito_mips -#define gen_vfp_ulto gen_vfp_ulto_mips -#define get_arm_cp_reginfo get_arm_cp_reginfo_mips -#define get_clock get_clock_mips -#define get_clock_realtime get_clock_realtime_mips -#define get_constraint_priority get_constraint_priority_mips -#define get_float_exception_flags get_float_exception_flags_mips -#define get_float_rounding_mode get_float_rounding_mode_mips -#define get_fpstatus_ptr get_fpstatus_ptr_mips -#define get_level1_table_address get_level1_table_address_mips -#define get_mem_index get_mem_index_mips -#define get_next_param_value get_next_param_value_mips -#define get_opt_name get_opt_name_mips -#define get_opt_value get_opt_value_mips -#define get_page_addr_code get_page_addr_code_mips -#define get_param_value get_param_value_mips -#define get_phys_addr get_phys_addr_mips -#define get_phys_addr_lpae get_phys_addr_lpae_mips -#define get_phys_addr_mpu get_phys_addr_mpu_mips -#define get_phys_addr_v5 get_phys_addr_v5_mips -#define get_phys_addr_v6 get_phys_addr_v6_mips -#define get_system_memory get_system_memory_mips -#define get_ticks_per_sec get_ticks_per_sec_mips -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mips -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mips -#define gt_cntfrq_access gt_cntfrq_access_mips -#define gt_cnt_read gt_cnt_read_mips -#define gt_cnt_reset gt_cnt_reset_mips -#define gt_counter_access gt_counter_access_mips -#define gt_ctl_write gt_ctl_write_mips -#define gt_cval_write gt_cval_write_mips -#define gt_get_countervalue gt_get_countervalue_mips -#define gt_pct_access gt_pct_access_mips -#define gt_ptimer_access gt_ptimer_access_mips -#define gt_recalc_timer gt_recalc_timer_mips -#define gt_timer_access gt_timer_access_mips -#define gt_tval_read gt_tval_read_mips -#define gt_tval_write gt_tval_write_mips -#define gt_vct_access gt_vct_access_mips -#define gt_vtimer_access gt_vtimer_access_mips -#define guest_phys_blocks_free guest_phys_blocks_free_mips -#define guest_phys_blocks_init guest_phys_blocks_init_mips -#define handle_vcvt handle_vcvt_mips -#define handle_vminmaxnm handle_vminmaxnm_mips -#define handle_vrint handle_vrint_mips -#define handle_vsel handle_vsel_mips -#define has_help_option has_help_option_mips -#define have_bmi1 have_bmi1_mips -#define have_bmi2 have_bmi2_mips -#define hcr_write hcr_write_mips -#define helper_access_check_cp_reg helper_access_check_cp_reg_mips -#define helper_add_saturate helper_add_saturate_mips -#define helper_add_setq helper_add_setq_mips -#define helper_add_usaturate helper_add_usaturate_mips -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mips -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mips -#define helper_be_ldq_mmu helper_be_ldq_mmu_mips -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips -#define helper_be_ldul_mmu helper_be_ldul_mmu_mips -#define helper_be_lduw_mmu helper_be_lduw_mmu_mips -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mips -#define helper_be_stl_mmu helper_be_stl_mmu_mips -#define helper_be_stq_mmu helper_be_stq_mmu_mips -#define helper_be_stw_mmu helper_be_stw_mmu_mips -#define helper_clear_pstate_ss helper_clear_pstate_ss_mips -#define helper_clz_arm helper_clz_arm_mips -#define helper_cpsr_read helper_cpsr_read_mips -#define helper_cpsr_write helper_cpsr_write_mips -#define helper_crc32_arm helper_crc32_arm_mips -#define helper_crc32c helper_crc32c_mips -#define helper_crypto_aese helper_crypto_aese_mips -#define helper_crypto_aesmc helper_crypto_aesmc_mips -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mips -#define helper_crypto_sha1h helper_crypto_sha1h_mips -#define helper_crypto_sha1su1 helper_crypto_sha1su1_mips -#define helper_crypto_sha256h helper_crypto_sha256h_mips -#define helper_crypto_sha256h2 helper_crypto_sha256h2_mips -#define helper_crypto_sha256su0 helper_crypto_sha256su0_mips -#define helper_crypto_sha256su1 helper_crypto_sha256su1_mips -#define helper_dc_zva helper_dc_zva_mips -#define helper_double_saturate helper_double_saturate_mips -#define helper_exception_internal helper_exception_internal_mips -#define helper_exception_return helper_exception_return_mips -#define helper_exception_with_syndrome helper_exception_with_syndrome_mips -#define helper_get_cp_reg helper_get_cp_reg_mips -#define helper_get_cp_reg64 helper_get_cp_reg64_mips -#define helper_get_r13_banked helper_get_r13_banked_mips -#define helper_get_user_reg helper_get_user_reg_mips -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mips -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mips -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mips -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mips -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mips -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mips -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mips -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mips -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mips -#define helper_iwmmxt_addub helper_iwmmxt_addub_mips -#define helper_iwmmxt_addul helper_iwmmxt_addul_mips -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mips -#define helper_iwmmxt_align helper_iwmmxt_align_mips -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mips -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mips -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mips -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mips -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mips -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mips -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mips -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mips -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mips -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mips -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mips -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mips -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mips -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mips -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mips -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mips -#define helper_iwmmxt_insr helper_iwmmxt_insr_mips -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mips -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mips -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mips -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mips -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mips -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mips -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mips -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mips -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mips -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mips -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mips -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mips -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mips -#define helper_iwmmxt_minub helper_iwmmxt_minub_mips -#define helper_iwmmxt_minul helper_iwmmxt_minul_mips -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mips -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mips -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mips -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mips -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mips -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mips -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mips -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mips -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mips -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mips -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mips -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mips -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mips -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mips -#define helper_iwmmxt_packul helper_iwmmxt_packul_mips -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mips -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mips -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mips -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mips -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mips -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mips -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mips -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mips -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mips -#define helper_iwmmxt_slll helper_iwmmxt_slll_mips -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mips -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mips -#define helper_iwmmxt_sral helper_iwmmxt_sral_mips -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mips -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mips -#define helper_iwmmxt_srll helper_iwmmxt_srll_mips -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mips -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mips -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mips -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mips -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mips -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mips -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mips -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mips -#define helper_iwmmxt_subub helper_iwmmxt_subub_mips -#define helper_iwmmxt_subul helper_iwmmxt_subul_mips -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mips -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mips -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mips -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mips -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mips -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mips -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mips -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mips -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mips -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mips -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mips -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mips -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mips -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mips -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mips -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mips -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mips -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mips -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mips -#define helper_ldb_cmmu helper_ldb_cmmu_mips -#define helper_ldb_mmu helper_ldb_mmu_mips -#define helper_ldl_cmmu helper_ldl_cmmu_mips -#define helper_ldl_mmu helper_ldl_mmu_mips -#define helper_ldq_cmmu helper_ldq_cmmu_mips -#define helper_ldq_mmu helper_ldq_mmu_mips -#define helper_ldw_cmmu helper_ldw_cmmu_mips -#define helper_ldw_mmu helper_ldw_mmu_mips -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mips -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mips -#define helper_le_ldq_mmu helper_le_ldq_mmu_mips -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips -#define helper_le_ldul_mmu helper_le_ldul_mmu_mips -#define helper_le_lduw_mmu helper_le_lduw_mmu_mips -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mips -#define helper_le_stl_mmu helper_le_stl_mmu_mips -#define helper_le_stq_mmu helper_le_stq_mmu_mips -#define helper_le_stw_mmu helper_le_stw_mmu_mips -#define helper_msr_i_pstate helper_msr_i_pstate_mips -#define helper_neon_abd_f32 helper_neon_abd_f32_mips -#define helper_neon_abdl_s16 helper_neon_abdl_s16_mips -#define helper_neon_abdl_s32 helper_neon_abdl_s32_mips -#define helper_neon_abdl_s64 helper_neon_abdl_s64_mips -#define helper_neon_abdl_u16 helper_neon_abdl_u16_mips -#define helper_neon_abdl_u32 helper_neon_abdl_u32_mips -#define helper_neon_abdl_u64 helper_neon_abdl_u64_mips -#define helper_neon_abd_s16 helper_neon_abd_s16_mips -#define helper_neon_abd_s32 helper_neon_abd_s32_mips -#define helper_neon_abd_s8 helper_neon_abd_s8_mips -#define helper_neon_abd_u16 helper_neon_abd_u16_mips -#define helper_neon_abd_u32 helper_neon_abd_u32_mips -#define helper_neon_abd_u8 helper_neon_abd_u8_mips -#define helper_neon_abs_s16 helper_neon_abs_s16_mips -#define helper_neon_abs_s8 helper_neon_abs_s8_mips -#define helper_neon_acge_f32 helper_neon_acge_f32_mips -#define helper_neon_acge_f64 helper_neon_acge_f64_mips -#define helper_neon_acgt_f32 helper_neon_acgt_f32_mips -#define helper_neon_acgt_f64 helper_neon_acgt_f64_mips -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mips -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mips -#define helper_neon_addl_u16 helper_neon_addl_u16_mips -#define helper_neon_addl_u32 helper_neon_addl_u32_mips -#define helper_neon_add_u16 helper_neon_add_u16_mips -#define helper_neon_add_u8 helper_neon_add_u8_mips -#define helper_neon_ceq_f32 helper_neon_ceq_f32_mips -#define helper_neon_ceq_u16 helper_neon_ceq_u16_mips -#define helper_neon_ceq_u32 helper_neon_ceq_u32_mips -#define helper_neon_ceq_u8 helper_neon_ceq_u8_mips -#define helper_neon_cge_f32 helper_neon_cge_f32_mips -#define helper_neon_cge_s16 helper_neon_cge_s16_mips -#define helper_neon_cge_s32 helper_neon_cge_s32_mips -#define helper_neon_cge_s8 helper_neon_cge_s8_mips -#define helper_neon_cge_u16 helper_neon_cge_u16_mips -#define helper_neon_cge_u32 helper_neon_cge_u32_mips -#define helper_neon_cge_u8 helper_neon_cge_u8_mips -#define helper_neon_cgt_f32 helper_neon_cgt_f32_mips -#define helper_neon_cgt_s16 helper_neon_cgt_s16_mips -#define helper_neon_cgt_s32 helper_neon_cgt_s32_mips -#define helper_neon_cgt_s8 helper_neon_cgt_s8_mips -#define helper_neon_cgt_u16 helper_neon_cgt_u16_mips -#define helper_neon_cgt_u32 helper_neon_cgt_u32_mips -#define helper_neon_cgt_u8 helper_neon_cgt_u8_mips -#define helper_neon_cls_s16 helper_neon_cls_s16_mips -#define helper_neon_cls_s32 helper_neon_cls_s32_mips -#define helper_neon_cls_s8 helper_neon_cls_s8_mips -#define helper_neon_clz_u16 helper_neon_clz_u16_mips -#define helper_neon_clz_u8 helper_neon_clz_u8_mips -#define helper_neon_cnt_u8 helper_neon_cnt_u8_mips -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mips -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mips -#define helper_neon_hadd_s16 helper_neon_hadd_s16_mips -#define helper_neon_hadd_s32 helper_neon_hadd_s32_mips -#define helper_neon_hadd_s8 helper_neon_hadd_s8_mips -#define helper_neon_hadd_u16 helper_neon_hadd_u16_mips -#define helper_neon_hadd_u32 helper_neon_hadd_u32_mips -#define helper_neon_hadd_u8 helper_neon_hadd_u8_mips -#define helper_neon_hsub_s16 helper_neon_hsub_s16_mips -#define helper_neon_hsub_s32 helper_neon_hsub_s32_mips -#define helper_neon_hsub_s8 helper_neon_hsub_s8_mips -#define helper_neon_hsub_u16 helper_neon_hsub_u16_mips -#define helper_neon_hsub_u32 helper_neon_hsub_u32_mips -#define helper_neon_hsub_u8 helper_neon_hsub_u8_mips -#define helper_neon_max_s16 helper_neon_max_s16_mips -#define helper_neon_max_s32 helper_neon_max_s32_mips -#define helper_neon_max_s8 helper_neon_max_s8_mips -#define helper_neon_max_u16 helper_neon_max_u16_mips -#define helper_neon_max_u32 helper_neon_max_u32_mips -#define helper_neon_max_u8 helper_neon_max_u8_mips -#define helper_neon_min_s16 helper_neon_min_s16_mips -#define helper_neon_min_s32 helper_neon_min_s32_mips -#define helper_neon_min_s8 helper_neon_min_s8_mips -#define helper_neon_min_u16 helper_neon_min_u16_mips -#define helper_neon_min_u32 helper_neon_min_u32_mips -#define helper_neon_min_u8 helper_neon_min_u8_mips -#define helper_neon_mull_p8 helper_neon_mull_p8_mips -#define helper_neon_mull_s16 helper_neon_mull_s16_mips -#define helper_neon_mull_s8 helper_neon_mull_s8_mips -#define helper_neon_mull_u16 helper_neon_mull_u16_mips -#define helper_neon_mull_u8 helper_neon_mull_u8_mips -#define helper_neon_mul_p8 helper_neon_mul_p8_mips -#define helper_neon_mul_u16 helper_neon_mul_u16_mips -#define helper_neon_mul_u8 helper_neon_mul_u8_mips -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mips -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mips -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mips -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mips -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mips -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mips -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mips -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mips -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mips -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mips -#define helper_neon_narrow_u16 helper_neon_narrow_u16_mips -#define helper_neon_narrow_u8 helper_neon_narrow_u8_mips -#define helper_neon_negl_u16 helper_neon_negl_u16_mips -#define helper_neon_negl_u32 helper_neon_negl_u32_mips -#define helper_neon_paddl_u16 helper_neon_paddl_u16_mips -#define helper_neon_paddl_u32 helper_neon_paddl_u32_mips -#define helper_neon_padd_u16 helper_neon_padd_u16_mips -#define helper_neon_padd_u8 helper_neon_padd_u8_mips -#define helper_neon_pmax_s16 helper_neon_pmax_s16_mips -#define helper_neon_pmax_s8 helper_neon_pmax_s8_mips -#define helper_neon_pmax_u16 helper_neon_pmax_u16_mips -#define helper_neon_pmax_u8 helper_neon_pmax_u8_mips -#define helper_neon_pmin_s16 helper_neon_pmin_s16_mips -#define helper_neon_pmin_s8 helper_neon_pmin_s8_mips -#define helper_neon_pmin_u16 helper_neon_pmin_u16_mips -#define helper_neon_pmin_u8 helper_neon_pmin_u8_mips -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mips -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mips -#define helper_neon_qabs_s16 helper_neon_qabs_s16_mips -#define helper_neon_qabs_s32 helper_neon_qabs_s32_mips -#define helper_neon_qabs_s64 helper_neon_qabs_s64_mips -#define helper_neon_qabs_s8 helper_neon_qabs_s8_mips -#define helper_neon_qadd_s16 helper_neon_qadd_s16_mips -#define helper_neon_qadd_s32 helper_neon_qadd_s32_mips -#define helper_neon_qadd_s64 helper_neon_qadd_s64_mips -#define helper_neon_qadd_s8 helper_neon_qadd_s8_mips -#define helper_neon_qadd_u16 helper_neon_qadd_u16_mips -#define helper_neon_qadd_u32 helper_neon_qadd_u32_mips -#define helper_neon_qadd_u64 helper_neon_qadd_u64_mips -#define helper_neon_qadd_u8 helper_neon_qadd_u8_mips -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mips -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mips -#define helper_neon_qneg_s16 helper_neon_qneg_s16_mips -#define helper_neon_qneg_s32 helper_neon_qneg_s32_mips -#define helper_neon_qneg_s64 helper_neon_qneg_s64_mips -#define helper_neon_qneg_s8 helper_neon_qneg_s8_mips -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mips -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mips -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mips -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mips -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mips -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mips -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mips -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mips -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mips -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mips -#define helper_neon_qshl_s16 helper_neon_qshl_s16_mips -#define helper_neon_qshl_s32 helper_neon_qshl_s32_mips -#define helper_neon_qshl_s64 helper_neon_qshl_s64_mips -#define helper_neon_qshl_s8 helper_neon_qshl_s8_mips -#define helper_neon_qshl_u16 helper_neon_qshl_u16_mips -#define helper_neon_qshl_u32 helper_neon_qshl_u32_mips -#define helper_neon_qshl_u64 helper_neon_qshl_u64_mips -#define helper_neon_qshl_u8 helper_neon_qshl_u8_mips -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mips -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mips -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mips -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mips -#define helper_neon_qsub_s16 helper_neon_qsub_s16_mips -#define helper_neon_qsub_s32 helper_neon_qsub_s32_mips -#define helper_neon_qsub_s64 helper_neon_qsub_s64_mips -#define helper_neon_qsub_s8 helper_neon_qsub_s8_mips -#define helper_neon_qsub_u16 helper_neon_qsub_u16_mips -#define helper_neon_qsub_u32 helper_neon_qsub_u32_mips -#define helper_neon_qsub_u64 helper_neon_qsub_u64_mips -#define helper_neon_qsub_u8 helper_neon_qsub_u8_mips -#define helper_neon_qunzip16 helper_neon_qunzip16_mips -#define helper_neon_qunzip32 helper_neon_qunzip32_mips -#define helper_neon_qunzip8 helper_neon_qunzip8_mips -#define helper_neon_qzip16 helper_neon_qzip16_mips -#define helper_neon_qzip32 helper_neon_qzip32_mips -#define helper_neon_qzip8 helper_neon_qzip8_mips -#define helper_neon_rbit_u8 helper_neon_rbit_u8_mips -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mips -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mips -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mips -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mips -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mips -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mips -#define helper_neon_rshl_s16 helper_neon_rshl_s16_mips -#define helper_neon_rshl_s32 helper_neon_rshl_s32_mips -#define helper_neon_rshl_s64 helper_neon_rshl_s64_mips -#define helper_neon_rshl_s8 helper_neon_rshl_s8_mips -#define helper_neon_rshl_u16 helper_neon_rshl_u16_mips -#define helper_neon_rshl_u32 helper_neon_rshl_u32_mips -#define helper_neon_rshl_u64 helper_neon_rshl_u64_mips -#define helper_neon_rshl_u8 helper_neon_rshl_u8_mips -#define helper_neon_shl_s16 helper_neon_shl_s16_mips -#define helper_neon_shl_s32 helper_neon_shl_s32_mips -#define helper_neon_shl_s64 helper_neon_shl_s64_mips -#define helper_neon_shl_s8 helper_neon_shl_s8_mips -#define helper_neon_shl_u16 helper_neon_shl_u16_mips -#define helper_neon_shl_u32 helper_neon_shl_u32_mips -#define helper_neon_shl_u64 helper_neon_shl_u64_mips -#define helper_neon_shl_u8 helper_neon_shl_u8_mips -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mips -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mips -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mips -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mips -#define helper_neon_subl_u16 helper_neon_subl_u16_mips -#define helper_neon_subl_u32 helper_neon_subl_u32_mips -#define helper_neon_sub_u16 helper_neon_sub_u16_mips -#define helper_neon_sub_u8 helper_neon_sub_u8_mips -#define helper_neon_tbl helper_neon_tbl_mips -#define helper_neon_tst_u16 helper_neon_tst_u16_mips -#define helper_neon_tst_u32 helper_neon_tst_u32_mips -#define helper_neon_tst_u8 helper_neon_tst_u8_mips -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mips -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mips -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mips -#define helper_neon_unzip16 helper_neon_unzip16_mips -#define helper_neon_unzip8 helper_neon_unzip8_mips -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mips -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mips -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mips -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mips -#define helper_neon_widen_s16 helper_neon_widen_s16_mips -#define helper_neon_widen_s8 helper_neon_widen_s8_mips -#define helper_neon_widen_u16 helper_neon_widen_u16_mips -#define helper_neon_widen_u8 helper_neon_widen_u8_mips -#define helper_neon_zip16 helper_neon_zip16_mips -#define helper_neon_zip8 helper_neon_zip8_mips -#define helper_pre_hvc helper_pre_hvc_mips -#define helper_pre_smc helper_pre_smc_mips -#define helper_qadd16 helper_qadd16_mips -#define helper_qadd8 helper_qadd8_mips -#define helper_qaddsubx helper_qaddsubx_mips -#define helper_qsub16 helper_qsub16_mips -#define helper_qsub8 helper_qsub8_mips -#define helper_qsubaddx helper_qsubaddx_mips -#define helper_rbit helper_rbit_mips -#define helper_recpe_f32 helper_recpe_f32_mips -#define helper_recpe_f64 helper_recpe_f64_mips -#define helper_recpe_u32 helper_recpe_u32_mips -#define helper_recps_f32 helper_recps_f32_mips -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mips -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips -#define helper_ret_stb_mmu helper_ret_stb_mmu_mips -#define helper_rintd helper_rintd_mips -#define helper_rintd_exact helper_rintd_exact_mips -#define helper_rints helper_rints_mips -#define helper_rints_exact helper_rints_exact_mips -#define helper_ror_cc helper_ror_cc_mips -#define helper_rsqrte_f32 helper_rsqrte_f32_mips -#define helper_rsqrte_f64 helper_rsqrte_f64_mips -#define helper_rsqrte_u32 helper_rsqrte_u32_mips -#define helper_rsqrts_f32 helper_rsqrts_f32_mips -#define helper_sadd16 helper_sadd16_mips -#define helper_sadd8 helper_sadd8_mips -#define helper_saddsubx helper_saddsubx_mips -#define helper_sar_cc helper_sar_cc_mips -#define helper_sdiv helper_sdiv_mips -#define helper_sel_flags helper_sel_flags_mips -#define helper_set_cp_reg helper_set_cp_reg_mips -#define helper_set_cp_reg64 helper_set_cp_reg64_mips -#define helper_set_neon_rmode helper_set_neon_rmode_mips -#define helper_set_r13_banked helper_set_r13_banked_mips -#define helper_set_rmode helper_set_rmode_mips -#define helper_set_user_reg helper_set_user_reg_mips -#define helper_shadd16 helper_shadd16_mips -#define helper_shadd8 helper_shadd8_mips -#define helper_shaddsubx helper_shaddsubx_mips -#define helper_shl_cc helper_shl_cc_mips -#define helper_shr_cc helper_shr_cc_mips -#define helper_shsub16 helper_shsub16_mips -#define helper_shsub8 helper_shsub8_mips -#define helper_shsubaddx helper_shsubaddx_mips -#define helper_ssat helper_ssat_mips -#define helper_ssat16 helper_ssat16_mips -#define helper_ssub16 helper_ssub16_mips -#define helper_ssub8 helper_ssub8_mips -#define helper_ssubaddx helper_ssubaddx_mips -#define helper_stb_mmu helper_stb_mmu_mips -#define helper_stl_mmu helper_stl_mmu_mips -#define helper_stq_mmu helper_stq_mmu_mips -#define helper_stw_mmu helper_stw_mmu_mips -#define helper_sub_saturate helper_sub_saturate_mips -#define helper_sub_usaturate helper_sub_usaturate_mips -#define helper_sxtb16 helper_sxtb16_mips -#define helper_uadd16 helper_uadd16_mips -#define helper_uadd8 helper_uadd8_mips -#define helper_uaddsubx helper_uaddsubx_mips -#define helper_udiv helper_udiv_mips -#define helper_uhadd16 helper_uhadd16_mips -#define helper_uhadd8 helper_uhadd8_mips -#define helper_uhaddsubx helper_uhaddsubx_mips -#define helper_uhsub16 helper_uhsub16_mips -#define helper_uhsub8 helper_uhsub8_mips -#define helper_uhsubaddx helper_uhsubaddx_mips -#define helper_uqadd16 helper_uqadd16_mips -#define helper_uqadd8 helper_uqadd8_mips -#define helper_uqaddsubx helper_uqaddsubx_mips -#define helper_uqsub16 helper_uqsub16_mips -#define helper_uqsub8 helper_uqsub8_mips -#define helper_uqsubaddx helper_uqsubaddx_mips -#define helper_usad8 helper_usad8_mips -#define helper_usat helper_usat_mips -#define helper_usat16 helper_usat16_mips -#define helper_usub16 helper_usub16_mips -#define helper_usub8 helper_usub8_mips -#define helper_usubaddx helper_usubaddx_mips -#define helper_uxtb16 helper_uxtb16_mips -#define helper_v7m_mrs helper_v7m_mrs_mips -#define helper_v7m_msr helper_v7m_msr_mips -#define helper_vfp_absd helper_vfp_absd_mips -#define helper_vfp_abss helper_vfp_abss_mips -#define helper_vfp_addd helper_vfp_addd_mips -#define helper_vfp_adds helper_vfp_adds_mips -#define helper_vfp_cmpd helper_vfp_cmpd_mips -#define helper_vfp_cmped helper_vfp_cmped_mips -#define helper_vfp_cmpes helper_vfp_cmpes_mips -#define helper_vfp_cmps helper_vfp_cmps_mips -#define helper_vfp_divd helper_vfp_divd_mips -#define helper_vfp_divs helper_vfp_divs_mips -#define helper_vfp_fcvtds helper_vfp_fcvtds_mips -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mips -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mips -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mips -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mips -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mips -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mips -#define helper_vfp_maxd helper_vfp_maxd_mips -#define helper_vfp_maxnumd helper_vfp_maxnumd_mips -#define helper_vfp_maxnums helper_vfp_maxnums_mips -#define helper_vfp_maxs helper_vfp_maxs_mips -#define helper_vfp_mind helper_vfp_mind_mips -#define helper_vfp_minnumd helper_vfp_minnumd_mips -#define helper_vfp_minnums helper_vfp_minnums_mips -#define helper_vfp_mins helper_vfp_mins_mips -#define helper_vfp_muladdd helper_vfp_muladdd_mips -#define helper_vfp_muladds helper_vfp_muladds_mips -#define helper_vfp_muld helper_vfp_muld_mips -#define helper_vfp_muls helper_vfp_muls_mips -#define helper_vfp_negd helper_vfp_negd_mips -#define helper_vfp_negs helper_vfp_negs_mips -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mips -#define helper_vfp_shtod helper_vfp_shtod_mips -#define helper_vfp_shtos helper_vfp_shtos_mips -#define helper_vfp_sitod helper_vfp_sitod_mips -#define helper_vfp_sitos helper_vfp_sitos_mips -#define helper_vfp_sltod helper_vfp_sltod_mips -#define helper_vfp_sltos helper_vfp_sltos_mips -#define helper_vfp_sqrtd helper_vfp_sqrtd_mips -#define helper_vfp_sqrts helper_vfp_sqrts_mips -#define helper_vfp_sqtod helper_vfp_sqtod_mips -#define helper_vfp_sqtos helper_vfp_sqtos_mips -#define helper_vfp_subd helper_vfp_subd_mips -#define helper_vfp_subs helper_vfp_subs_mips -#define helper_vfp_toshd helper_vfp_toshd_mips -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mips -#define helper_vfp_toshs helper_vfp_toshs_mips -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mips -#define helper_vfp_tosid helper_vfp_tosid_mips -#define helper_vfp_tosis helper_vfp_tosis_mips -#define helper_vfp_tosizd helper_vfp_tosizd_mips -#define helper_vfp_tosizs helper_vfp_tosizs_mips -#define helper_vfp_tosld helper_vfp_tosld_mips -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mips -#define helper_vfp_tosls helper_vfp_tosls_mips -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mips -#define helper_vfp_tosqd helper_vfp_tosqd_mips -#define helper_vfp_tosqs helper_vfp_tosqs_mips -#define helper_vfp_touhd helper_vfp_touhd_mips -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mips -#define helper_vfp_touhs helper_vfp_touhs_mips -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mips -#define helper_vfp_touid helper_vfp_touid_mips -#define helper_vfp_touis helper_vfp_touis_mips -#define helper_vfp_touizd helper_vfp_touizd_mips -#define helper_vfp_touizs helper_vfp_touizs_mips -#define helper_vfp_tould helper_vfp_tould_mips -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mips -#define helper_vfp_touls helper_vfp_touls_mips -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mips -#define helper_vfp_touqd helper_vfp_touqd_mips -#define helper_vfp_touqs helper_vfp_touqs_mips -#define helper_vfp_uhtod helper_vfp_uhtod_mips -#define helper_vfp_uhtos helper_vfp_uhtos_mips -#define helper_vfp_uitod helper_vfp_uitod_mips -#define helper_vfp_uitos helper_vfp_uitos_mips -#define helper_vfp_ultod helper_vfp_ultod_mips -#define helper_vfp_ultos helper_vfp_ultos_mips -#define helper_vfp_uqtod helper_vfp_uqtod_mips -#define helper_vfp_uqtos helper_vfp_uqtos_mips -#define helper_wfe helper_wfe_mips -#define helper_wfi helper_wfi_mips -#define hex2decimal hex2decimal_mips -#define hw_breakpoint_update hw_breakpoint_update_mips -#define hw_breakpoint_update_all hw_breakpoint_update_all_mips -#define hw_watchpoint_update hw_watchpoint_update_mips -#define hw_watchpoint_update_all hw_watchpoint_update_all_mips -#define _init _init_mips -#define init_cpreg_list init_cpreg_list_mips -#define init_lists init_lists_mips -#define input_type_enum input_type_enum_mips -#define int128_2_64 int128_2_64_mips -#define int128_add int128_add_mips -#define int128_addto int128_addto_mips -#define int128_and int128_and_mips -#define int128_eq int128_eq_mips -#define int128_ge int128_ge_mips -#define int128_get64 int128_get64_mips -#define int128_gt int128_gt_mips -#define int128_le int128_le_mips -#define int128_lt int128_lt_mips -#define int128_make64 int128_make64_mips -#define int128_max int128_max_mips -#define int128_min int128_min_mips -#define int128_ne int128_ne_mips -#define int128_neg int128_neg_mips -#define int128_nz int128_nz_mips -#define int128_rshift int128_rshift_mips -#define int128_sub int128_sub_mips -#define int128_subfrom int128_subfrom_mips -#define int128_zero int128_zero_mips -#define int16_to_float32 int16_to_float32_mips -#define int16_to_float64 int16_to_float64_mips -#define int32_to_float128 int32_to_float128_mips -#define int32_to_float32 int32_to_float32_mips -#define int32_to_float64 int32_to_float64_mips -#define int32_to_floatx80 int32_to_floatx80_mips -#define int64_to_float128 int64_to_float128_mips -#define int64_to_float32 int64_to_float32_mips -#define int64_to_float64 int64_to_float64_mips -#define int64_to_floatx80 int64_to_floatx80_mips -#define invalidate_and_set_dirty invalidate_and_set_dirty_mips -#define invalidate_page_bitmap invalidate_page_bitmap_mips -#define io_mem_read io_mem_read_mips -#define io_mem_write io_mem_write_mips -#define io_readb io_readb_mips -#define io_readl io_readl_mips -#define io_readq io_readq_mips -#define io_readw io_readw_mips -#define iotlb_to_region iotlb_to_region_mips -#define io_writeb io_writeb_mips -#define io_writel io_writel_mips -#define io_writeq io_writeq_mips -#define io_writew io_writew_mips -#define is_a64 is_a64_mips -#define is_help_option is_help_option_mips -#define isr_read isr_read_mips -#define is_valid_option_list is_valid_option_list_mips -#define iwmmxt_load_creg iwmmxt_load_creg_mips -#define iwmmxt_load_reg iwmmxt_load_reg_mips -#define iwmmxt_store_creg iwmmxt_store_creg_mips -#define iwmmxt_store_reg iwmmxt_store_reg_mips -#define __jit_debug_descriptor __jit_debug_descriptor_mips -#define __jit_debug_register_code __jit_debug_register_code_mips -#define kvm_to_cpreg_id kvm_to_cpreg_id_mips -#define last_ram_offset last_ram_offset_mips -#define ldl_be_p ldl_be_p_mips -#define ldl_be_phys ldl_be_phys_mips -#define ldl_he_p ldl_he_p_mips -#define ldl_le_p ldl_le_p_mips -#define ldl_le_phys ldl_le_phys_mips -#define ldl_phys ldl_phys_mips -#define ldl_phys_internal ldl_phys_internal_mips -#define ldq_be_p ldq_be_p_mips -#define ldq_be_phys ldq_be_phys_mips -#define ldq_he_p ldq_he_p_mips -#define ldq_le_p ldq_le_p_mips -#define ldq_le_phys ldq_le_phys_mips -#define ldq_phys ldq_phys_mips -#define ldq_phys_internal ldq_phys_internal_mips -#define ldst_name ldst_name_mips -#define ldub_p ldub_p_mips -#define ldub_phys ldub_phys_mips -#define lduw_be_p lduw_be_p_mips -#define lduw_be_phys lduw_be_phys_mips -#define lduw_he_p lduw_he_p_mips -#define lduw_le_p lduw_le_p_mips -#define lduw_le_phys lduw_le_phys_mips -#define lduw_phys lduw_phys_mips -#define lduw_phys_internal lduw_phys_internal_mips -#define le128 le128_mips -#define linked_bp_matches linked_bp_matches_mips -#define listener_add_address_space listener_add_address_space_mips -#define load_cpu_offset load_cpu_offset_mips -#define load_reg load_reg_mips -#define load_reg_var load_reg_var_mips -#define log_cpu_state log_cpu_state_mips -#define lpae_cp_reginfo lpae_cp_reginfo_mips -#define lt128 lt128_mips -#define machine_class_init machine_class_init_mips -#define machine_finalize machine_finalize_mips -#define machine_info machine_info_mips -#define machine_initfn machine_initfn_mips -#define machine_register_types machine_register_types_mips -#define machvirt_init machvirt_init_mips -#define machvirt_machine_init machvirt_machine_init_mips -#define maj maj_mips -#define mapping_conflict mapping_conflict_mips -#define mapping_contiguous mapping_contiguous_mips -#define mapping_have_same_region mapping_have_same_region_mips -#define mapping_merge mapping_merge_mips -#define mem_add mem_add_mips -#define mem_begin mem_begin_mips -#define mem_commit mem_commit_mips -#define memory_access_is_direct memory_access_is_direct_mips -#define memory_access_size memory_access_size_mips -#define memory_init memory_init_mips -#define memory_listener_match memory_listener_match_mips -#define memory_listener_register memory_listener_register_mips -#define memory_listener_unregister memory_listener_unregister_mips -#define memory_map_init memory_map_init_mips -#define memory_mapping_filter memory_mapping_filter_mips -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mips -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips -#define memory_mapping_list_free memory_mapping_list_free_mips -#define memory_mapping_list_init memory_mapping_list_init_mips -#define memory_region_access_valid memory_region_access_valid_mips -#define memory_region_add_subregion memory_region_add_subregion_mips -#define memory_region_add_subregion_common memory_region_add_subregion_common_mips -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips -#define memory_region_big_endian memory_region_big_endian_mips -#define memory_region_clear_pending memory_region_clear_pending_mips -#define memory_region_del_subregion memory_region_del_subregion_mips -#define memory_region_destructor_alias memory_region_destructor_alias_mips -#define memory_region_destructor_none memory_region_destructor_none_mips -#define memory_region_destructor_ram memory_region_destructor_ram_mips -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mips -#define memory_region_dispatch_read memory_region_dispatch_read_mips -#define memory_region_dispatch_read1 memory_region_dispatch_read1_mips -#define memory_region_dispatch_write memory_region_dispatch_write_mips -#define memory_region_escape_name memory_region_escape_name_mips -#define memory_region_finalize memory_region_finalize_mips -#define memory_region_find memory_region_find_mips -#define memory_region_get_addr memory_region_get_addr_mips -#define memory_region_get_alignment memory_region_get_alignment_mips -#define memory_region_get_container memory_region_get_container_mips -#define memory_region_get_fd memory_region_get_fd_mips -#define memory_region_get_may_overlap memory_region_get_may_overlap_mips -#define memory_region_get_priority memory_region_get_priority_mips -#define memory_region_get_ram_addr memory_region_get_ram_addr_mips -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips -#define memory_region_get_size memory_region_get_size_mips -#define memory_region_info memory_region_info_mips -#define memory_region_init memory_region_init_mips -#define memory_region_init_alias memory_region_init_alias_mips -#define memory_region_initfn memory_region_initfn_mips -#define memory_region_init_io memory_region_init_io_mips -#define memory_region_init_ram memory_region_init_ram_mips -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips -#define memory_region_init_reservation memory_region_init_reservation_mips -#define memory_region_is_iommu memory_region_is_iommu_mips -#define memory_region_is_logging memory_region_is_logging_mips -#define memory_region_is_mapped memory_region_is_mapped_mips -#define memory_region_is_ram memory_region_is_ram_mips -#define memory_region_is_rom memory_region_is_rom_mips -#define memory_region_is_romd memory_region_is_romd_mips -#define memory_region_is_skip_dump memory_region_is_skip_dump_mips -#define memory_region_is_unassigned memory_region_is_unassigned_mips -#define memory_region_name memory_region_name_mips -#define memory_region_need_escape memory_region_need_escape_mips -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mips -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mips -#define memory_region_present memory_region_present_mips -#define memory_region_read_accessor memory_region_read_accessor_mips -#define memory_region_readd_subregion memory_region_readd_subregion_mips -#define memory_region_ref memory_region_ref_mips -#define memory_region_resolve_container memory_region_resolve_container_mips -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mips -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips -#define memory_region_set_address memory_region_set_address_mips -#define memory_region_set_alias_offset memory_region_set_alias_offset_mips -#define memory_region_set_enabled memory_region_set_enabled_mips -#define memory_region_set_readonly memory_region_set_readonly_mips -#define memory_region_set_skip_dump memory_region_set_skip_dump_mips -#define memory_region_size memory_region_size_mips -#define memory_region_to_address_space memory_region_to_address_space_mips -#define memory_region_transaction_begin memory_region_transaction_begin_mips -#define memory_region_transaction_commit memory_region_transaction_commit_mips -#define memory_region_unref memory_region_unref_mips -#define memory_region_update_container_subregions memory_region_update_container_subregions_mips -#define memory_region_write_accessor memory_region_write_accessor_mips -#define memory_region_wrong_endianness memory_region_wrong_endianness_mips -#define memory_try_enable_merging memory_try_enable_merging_mips -#define module_call_init module_call_init_mips -#define module_load module_load_mips -#define mpidr_cp_reginfo mpidr_cp_reginfo_mips -#define mpidr_read mpidr_read_mips -#define msr_mask msr_mask_mips -#define mul128By64To192 mul128By64To192_mips -#define mul128To256 mul128To256_mips -#define mul64To128 mul64To128_mips -#define muldiv64 muldiv64_mips -#define neon_2rm_is_float_op neon_2rm_is_float_op_mips -#define neon_2rm_sizes neon_2rm_sizes_mips -#define neon_3r_sizes neon_3r_sizes_mips -#define neon_get_scalar neon_get_scalar_mips -#define neon_load_reg neon_load_reg_mips -#define neon_load_reg64 neon_load_reg64_mips -#define neon_load_scratch neon_load_scratch_mips -#define neon_ls_element_type neon_ls_element_type_mips -#define neon_reg_offset neon_reg_offset_mips -#define neon_store_reg neon_store_reg_mips -#define neon_store_reg64 neon_store_reg64_mips -#define neon_store_scratch neon_store_scratch_mips -#define new_ldst_label new_ldst_label_mips -#define next_list next_list_mips -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mips -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mips -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mips -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mips -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mips -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mips -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mips -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips -#define not_v6_cp_reginfo not_v6_cp_reginfo_mips -#define not_v7_cp_reginfo not_v7_cp_reginfo_mips -#define not_v8_cp_reginfo not_v8_cp_reginfo_mips -#define object_child_foreach object_child_foreach_mips -#define object_class_foreach object_class_foreach_mips -#define object_class_foreach_tramp object_class_foreach_tramp_mips -#define object_class_get_list object_class_get_list_mips -#define object_class_get_list_tramp object_class_get_list_tramp_mips -#define object_class_get_parent object_class_get_parent_mips -#define object_deinit object_deinit_mips -#define object_dynamic_cast object_dynamic_cast_mips -#define object_finalize object_finalize_mips -#define object_finalize_child_property object_finalize_child_property_mips -#define object_get_child_property object_get_child_property_mips -#define object_get_link_property object_get_link_property_mips -#define object_get_root object_get_root_mips -#define object_initialize_with_type object_initialize_with_type_mips -#define object_init_with_type object_init_with_type_mips -#define object_instance_init object_instance_init_mips -#define object_new_with_type object_new_with_type_mips -#define object_post_init_with_type object_post_init_with_type_mips -#define object_property_add_alias object_property_add_alias_mips -#define object_property_add_link object_property_add_link_mips -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mips -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mips -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mips -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mips -#define object_property_allow_set_link object_property_allow_set_link_mips -#define object_property_del object_property_del_mips -#define object_property_del_all object_property_del_all_mips -#define object_property_find object_property_find_mips -#define object_property_get object_property_get_mips -#define object_property_get_bool object_property_get_bool_mips -#define object_property_get_int object_property_get_int_mips -#define object_property_get_link object_property_get_link_mips -#define object_property_get_qobject object_property_get_qobject_mips -#define object_property_get_str object_property_get_str_mips -#define object_property_get_type object_property_get_type_mips -#define object_property_is_child object_property_is_child_mips -#define object_property_set object_property_set_mips -#define object_property_set_description object_property_set_description_mips -#define object_property_set_link object_property_set_link_mips -#define object_property_set_qobject object_property_set_qobject_mips -#define object_release_link_property object_release_link_property_mips -#define object_resolve_abs_path object_resolve_abs_path_mips -#define object_resolve_child_property object_resolve_child_property_mips -#define object_resolve_link object_resolve_link_mips -#define object_resolve_link_property object_resolve_link_property_mips -#define object_resolve_partial_path object_resolve_partial_path_mips -#define object_resolve_path object_resolve_path_mips -#define object_resolve_path_component object_resolve_path_component_mips -#define object_resolve_path_type object_resolve_path_type_mips -#define object_set_link_property object_set_link_property_mips -#define object_unparent object_unparent_mips -#define omap_cachemaint_write omap_cachemaint_write_mips -#define omap_cp_reginfo omap_cp_reginfo_mips -#define omap_threadid_write omap_threadid_write_mips -#define omap_ticonfig_write omap_ticonfig_write_mips -#define omap_wfi_write omap_wfi_write_mips -#define op_bits op_bits_mips -#define open_modeflags open_modeflags_mips -#define op_to_mov op_to_mov_mips -#define op_to_movi op_to_movi_mips -#define output_type_enum output_type_enum_mips -#define packFloat128 packFloat128_mips -#define packFloat16 packFloat16_mips -#define packFloat32 packFloat32_mips -#define packFloat64 packFloat64_mips -#define packFloatx80 packFloatx80_mips -#define page_find page_find_mips -#define page_find_alloc page_find_alloc_mips -#define page_flush_tb page_flush_tb_mips -#define page_flush_tb_1 page_flush_tb_1_mips -#define page_init page_init_mips -#define page_size_init page_size_init_mips -#define par par_mips -#define parse_array parse_array_mips -#define parse_error parse_error_mips -#define parse_escape parse_escape_mips -#define parse_keyword parse_keyword_mips -#define parse_literal parse_literal_mips -#define parse_object parse_object_mips -#define parse_optional parse_optional_mips -#define parse_option_bool parse_option_bool_mips -#define parse_option_number parse_option_number_mips -#define parse_option_size parse_option_size_mips -#define parse_pair parse_pair_mips -#define parser_context_free parser_context_free_mips -#define parser_context_new parser_context_new_mips -#define parser_context_peek_token parser_context_peek_token_mips -#define parser_context_pop_token parser_context_pop_token_mips -#define parser_context_restore parser_context_restore_mips -#define parser_context_save parser_context_save_mips -#define parse_str parse_str_mips -#define parse_type_bool parse_type_bool_mips -#define parse_type_int parse_type_int_mips -#define parse_type_number parse_type_number_mips -#define parse_type_size parse_type_size_mips -#define parse_type_str parse_type_str_mips -#define parse_value parse_value_mips -#define par_write par_write_mips -#define patch_reloc patch_reloc_mips -#define phys_map_node_alloc phys_map_node_alloc_mips -#define phys_map_node_reserve phys_map_node_reserve_mips -#define phys_mem_alloc phys_mem_alloc_mips -#define phys_mem_set_alloc phys_mem_set_alloc_mips -#define phys_page_compact phys_page_compact_mips -#define phys_page_compact_all phys_page_compact_all_mips -#define phys_page_find phys_page_find_mips -#define phys_page_set phys_page_set_mips -#define phys_page_set_level phys_page_set_level_mips -#define phys_section_add phys_section_add_mips -#define phys_section_destroy phys_section_destroy_mips -#define phys_sections_free phys_sections_free_mips -#define pickNaN pickNaN_mips -#define pickNaNMulAdd pickNaNMulAdd_mips -#define pmccfiltr_write pmccfiltr_write_mips -#define pmccntr_read pmccntr_read_mips -#define pmccntr_sync pmccntr_sync_mips -#define pmccntr_write pmccntr_write_mips -#define pmccntr_write32 pmccntr_write32_mips -#define pmcntenclr_write pmcntenclr_write_mips -#define pmcntenset_write pmcntenset_write_mips -#define pmcr_write pmcr_write_mips -#define pmintenclr_write pmintenclr_write_mips -#define pmintenset_write pmintenset_write_mips -#define pmovsr_write pmovsr_write_mips -#define pmreg_access pmreg_access_mips -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mips -#define pmsav5_data_ap_read pmsav5_data_ap_read_mips -#define pmsav5_data_ap_write pmsav5_data_ap_write_mips -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mips -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mips -#define pmuserenr_write pmuserenr_write_mips -#define pmxevtyper_write pmxevtyper_write_mips -#define print_type_bool print_type_bool_mips -#define print_type_int print_type_int_mips -#define print_type_number print_type_number_mips -#define print_type_size print_type_size_mips -#define print_type_str print_type_str_mips -#define propagateFloat128NaN propagateFloat128NaN_mips -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mips -#define propagateFloat32NaN propagateFloat32NaN_mips -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mips -#define propagateFloat64NaN propagateFloat64NaN_mips -#define propagateFloatx80NaN propagateFloatx80NaN_mips -#define property_get_alias property_get_alias_mips -#define property_get_bool property_get_bool_mips -#define property_get_str property_get_str_mips -#define property_get_uint16_ptr property_get_uint16_ptr_mips -#define property_get_uint32_ptr property_get_uint32_ptr_mips -#define property_get_uint64_ptr property_get_uint64_ptr_mips -#define property_get_uint8_ptr property_get_uint8_ptr_mips -#define property_release_alias property_release_alias_mips -#define property_release_bool property_release_bool_mips -#define property_release_str property_release_str_mips -#define property_resolve_alias property_resolve_alias_mips -#define property_set_alias property_set_alias_mips -#define property_set_bool property_set_bool_mips -#define property_set_str property_set_str_mips -#define pstate_read pstate_read_mips -#define pstate_write pstate_write_mips -#define pxa250_initfn pxa250_initfn_mips -#define pxa255_initfn pxa255_initfn_mips -#define pxa260_initfn pxa260_initfn_mips -#define pxa261_initfn pxa261_initfn_mips -#define pxa262_initfn pxa262_initfn_mips -#define pxa270a0_initfn pxa270a0_initfn_mips -#define pxa270a1_initfn pxa270a1_initfn_mips -#define pxa270b0_initfn pxa270b0_initfn_mips -#define pxa270b1_initfn pxa270b1_initfn_mips -#define pxa270c0_initfn pxa270c0_initfn_mips -#define pxa270c5_initfn pxa270c5_initfn_mips -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mips -#define qapi_dealloc_end_list qapi_dealloc_end_list_mips -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mips -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mips -#define qapi_dealloc_next_list qapi_dealloc_next_list_mips -#define qapi_dealloc_pop qapi_dealloc_pop_mips -#define qapi_dealloc_push qapi_dealloc_push_mips -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mips -#define qapi_dealloc_start_list qapi_dealloc_start_list_mips -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mips -#define qapi_dealloc_start_union qapi_dealloc_start_union_mips -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mips -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mips -#define qapi_dealloc_type_int qapi_dealloc_type_int_mips -#define qapi_dealloc_type_number qapi_dealloc_type_number_mips -#define qapi_dealloc_type_size qapi_dealloc_type_size_mips -#define qapi_dealloc_type_str qapi_dealloc_type_str_mips -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mips -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mips -#define qapi_free_boolList qapi_free_boolList_mips -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mips -#define qapi_free_int16List qapi_free_int16List_mips -#define qapi_free_int32List qapi_free_int32List_mips -#define qapi_free_int64List qapi_free_int64List_mips -#define qapi_free_int8List qapi_free_int8List_mips -#define qapi_free_intList qapi_free_intList_mips -#define qapi_free_numberList qapi_free_numberList_mips -#define qapi_free_strList qapi_free_strList_mips -#define qapi_free_uint16List qapi_free_uint16List_mips -#define qapi_free_uint32List qapi_free_uint32List_mips -#define qapi_free_uint64List qapi_free_uint64List_mips -#define qapi_free_uint8List qapi_free_uint8List_mips -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mips -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mips -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mips -#define qbool_destroy_obj qbool_destroy_obj_mips -#define qbool_from_int qbool_from_int_mips -#define qbool_get_int qbool_get_int_mips -#define qbool_type qbool_type_mips -#define qbus_create qbus_create_mips -#define qbus_create_inplace qbus_create_inplace_mips -#define qbus_finalize qbus_finalize_mips -#define qbus_initfn qbus_initfn_mips -#define qbus_realize qbus_realize_mips -#define qdev_create qdev_create_mips -#define qdev_get_type qdev_get_type_mips -#define qdev_register_types qdev_register_types_mips -#define qdev_set_parent_bus qdev_set_parent_bus_mips -#define qdev_try_create qdev_try_create_mips -#define qdict_add_key qdict_add_key_mips -#define qdict_array_split qdict_array_split_mips -#define qdict_clone_shallow qdict_clone_shallow_mips -#define qdict_del qdict_del_mips -#define qdict_destroy_obj qdict_destroy_obj_mips -#define qdict_entry_key qdict_entry_key_mips -#define qdict_entry_value qdict_entry_value_mips -#define qdict_extract_subqdict qdict_extract_subqdict_mips -#define qdict_find qdict_find_mips -#define qdict_first qdict_first_mips -#define qdict_flatten qdict_flatten_mips -#define qdict_flatten_qdict qdict_flatten_qdict_mips -#define qdict_flatten_qlist qdict_flatten_qlist_mips -#define qdict_get qdict_get_mips -#define qdict_get_bool qdict_get_bool_mips -#define qdict_get_double qdict_get_double_mips -#define qdict_get_int qdict_get_int_mips -#define qdict_get_obj qdict_get_obj_mips -#define qdict_get_qdict qdict_get_qdict_mips -#define qdict_get_qlist qdict_get_qlist_mips -#define qdict_get_str qdict_get_str_mips -#define qdict_get_try_bool qdict_get_try_bool_mips -#define qdict_get_try_int qdict_get_try_int_mips -#define qdict_get_try_str qdict_get_try_str_mips -#define qdict_haskey qdict_haskey_mips -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mips -#define qdict_iter qdict_iter_mips -#define qdict_join qdict_join_mips -#define qdict_new qdict_new_mips -#define qdict_next qdict_next_mips -#define qdict_next_entry qdict_next_entry_mips -#define qdict_put_obj qdict_put_obj_mips -#define qdict_size qdict_size_mips -#define qdict_type qdict_type_mips -#define qemu_clock_get_us qemu_clock_get_us_mips -#define qemu_clock_ptr qemu_clock_ptr_mips -#define qemu_clocks qemu_clocks_mips -#define qemu_get_cpu qemu_get_cpu_mips -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mips -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mips -#define qemu_get_ram_block qemu_get_ram_block_mips -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mips -#define qemu_get_ram_fd qemu_get_ram_fd_mips -#define qemu_get_ram_ptr qemu_get_ram_ptr_mips -#define qemu_host_page_mask qemu_host_page_mask_mips -#define qemu_host_page_size qemu_host_page_size_mips -#define qemu_init_vcpu qemu_init_vcpu_mips -#define qemu_ld_helpers qemu_ld_helpers_mips -#define qemu_log_close qemu_log_close_mips -#define qemu_log_enabled qemu_log_enabled_mips -#define qemu_log_flush qemu_log_flush_mips -#define qemu_loglevel_mask qemu_loglevel_mask_mips -#define qemu_log_vprintf qemu_log_vprintf_mips -#define qemu_oom_check qemu_oom_check_mips -#define qemu_parse_fd qemu_parse_fd_mips -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mips -#define qemu_ram_alloc qemu_ram_alloc_mips -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips -#define qemu_ram_foreach_block qemu_ram_foreach_block_mips -#define qemu_ram_free qemu_ram_free_mips -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mips -#define qemu_ram_ptr_length qemu_ram_ptr_length_mips -#define qemu_ram_remap qemu_ram_remap_mips -#define qemu_ram_setup_dump qemu_ram_setup_dump_mips -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mips -#define qemu_real_host_page_size qemu_real_host_page_size_mips -#define qemu_st_helpers qemu_st_helpers_mips -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mips -#define qemu_try_memalign qemu_try_memalign_mips -#define qentry_destroy qentry_destroy_mips -#define qerror_human qerror_human_mips -#define qerror_report qerror_report_mips -#define qerror_report_err qerror_report_err_mips -#define qfloat_destroy_obj qfloat_destroy_obj_mips -#define qfloat_from_double qfloat_from_double_mips -#define qfloat_get_double qfloat_get_double_mips -#define qfloat_type qfloat_type_mips -#define qint_destroy_obj qint_destroy_obj_mips -#define qint_from_int qint_from_int_mips -#define qint_get_int qint_get_int_mips -#define qint_type qint_type_mips -#define qlist_append_obj qlist_append_obj_mips -#define qlist_copy qlist_copy_mips -#define qlist_copy_elem qlist_copy_elem_mips -#define qlist_destroy_obj qlist_destroy_obj_mips -#define qlist_empty qlist_empty_mips -#define qlist_entry_obj qlist_entry_obj_mips -#define qlist_first qlist_first_mips -#define qlist_iter qlist_iter_mips -#define qlist_new qlist_new_mips -#define qlist_next qlist_next_mips -#define qlist_peek qlist_peek_mips -#define qlist_pop qlist_pop_mips -#define qlist_size qlist_size_mips -#define qlist_size_iter qlist_size_iter_mips -#define qlist_type qlist_type_mips -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mips -#define qmp_input_end_list qmp_input_end_list_mips -#define qmp_input_end_struct qmp_input_end_struct_mips -#define qmp_input_get_next_type qmp_input_get_next_type_mips -#define qmp_input_get_object qmp_input_get_object_mips -#define qmp_input_get_visitor qmp_input_get_visitor_mips -#define qmp_input_next_list qmp_input_next_list_mips -#define qmp_input_optional qmp_input_optional_mips -#define qmp_input_pop qmp_input_pop_mips -#define qmp_input_push qmp_input_push_mips -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mips -#define qmp_input_start_list qmp_input_start_list_mips -#define qmp_input_start_struct qmp_input_start_struct_mips -#define qmp_input_type_bool qmp_input_type_bool_mips -#define qmp_input_type_int qmp_input_type_int_mips -#define qmp_input_type_number qmp_input_type_number_mips -#define qmp_input_type_str qmp_input_type_str_mips -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mips -#define qmp_input_visitor_new qmp_input_visitor_new_mips -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mips -#define qmp_output_add_obj qmp_output_add_obj_mips -#define qmp_output_end_list qmp_output_end_list_mips -#define qmp_output_end_struct qmp_output_end_struct_mips -#define qmp_output_first qmp_output_first_mips -#define qmp_output_get_qobject qmp_output_get_qobject_mips -#define qmp_output_get_visitor qmp_output_get_visitor_mips -#define qmp_output_last qmp_output_last_mips -#define qmp_output_next_list qmp_output_next_list_mips -#define qmp_output_pop qmp_output_pop_mips -#define qmp_output_push_obj qmp_output_push_obj_mips -#define qmp_output_start_list qmp_output_start_list_mips -#define qmp_output_start_struct qmp_output_start_struct_mips -#define qmp_output_type_bool qmp_output_type_bool_mips -#define qmp_output_type_int qmp_output_type_int_mips -#define qmp_output_type_number qmp_output_type_number_mips -#define qmp_output_type_str qmp_output_type_str_mips -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mips -#define qmp_output_visitor_new qmp_output_visitor_new_mips -#define qobject_decref qobject_decref_mips -#define qobject_to_qbool qobject_to_qbool_mips -#define qobject_to_qdict qobject_to_qdict_mips -#define qobject_to_qfloat qobject_to_qfloat_mips -#define qobject_to_qint qobject_to_qint_mips -#define qobject_to_qlist qobject_to_qlist_mips -#define qobject_to_qstring qobject_to_qstring_mips -#define qobject_type qobject_type_mips -#define qstring_append qstring_append_mips -#define qstring_append_chr qstring_append_chr_mips -#define qstring_append_int qstring_append_int_mips -#define qstring_destroy_obj qstring_destroy_obj_mips -#define qstring_from_escaped_str qstring_from_escaped_str_mips -#define qstring_from_str qstring_from_str_mips -#define qstring_from_substr qstring_from_substr_mips -#define qstring_get_length qstring_get_length_mips -#define qstring_get_str qstring_get_str_mips -#define qstring_new qstring_new_mips -#define qstring_type qstring_type_mips -#define ram_block_add ram_block_add_mips -#define ram_size ram_size_mips -#define range_compare range_compare_mips -#define range_covers_byte range_covers_byte_mips -#define range_get_last range_get_last_mips -#define range_merge range_merge_mips -#define ranges_can_merge ranges_can_merge_mips -#define raw_read raw_read_mips -#define raw_write raw_write_mips -#define rcon rcon_mips -#define read_raw_cp_reg read_raw_cp_reg_mips -#define recip_estimate recip_estimate_mips -#define recip_sqrt_estimate recip_sqrt_estimate_mips -#define register_cp_regs_for_features register_cp_regs_for_features_mips -#define register_multipage register_multipage_mips -#define register_subpage register_subpage_mips -#define register_tm_clones register_tm_clones_mips -#define register_types_object register_types_object_mips -#define regnames regnames_mips -#define render_memory_region render_memory_region_mips -#define reset_all_temps reset_all_temps_mips -#define reset_temp reset_temp_mips -#define rol32 rol32_mips -#define rol64 rol64_mips -#define ror32 ror32_mips -#define ror64 ror64_mips -#define roundAndPackFloat128 roundAndPackFloat128_mips -#define roundAndPackFloat16 roundAndPackFloat16_mips -#define roundAndPackFloat32 roundAndPackFloat32_mips -#define roundAndPackFloat64 roundAndPackFloat64_mips -#define roundAndPackFloatx80 roundAndPackFloatx80_mips -#define roundAndPackInt32 roundAndPackInt32_mips -#define roundAndPackInt64 roundAndPackInt64_mips -#define roundAndPackUint64 roundAndPackUint64_mips -#define round_to_inf round_to_inf_mips -#define run_on_cpu run_on_cpu_mips -#define s0 s0_mips -#define S0 S0_mips -#define s1 s1_mips -#define S1 S1_mips -#define sa1100_initfn sa1100_initfn_mips -#define sa1110_initfn sa1110_initfn_mips -#define save_globals save_globals_mips -#define scr_write scr_write_mips -#define sctlr_write sctlr_write_mips -#define set_bit set_bit_mips -#define set_bits set_bits_mips -#define set_default_nan_mode set_default_nan_mode_mips -#define set_feature set_feature_mips -#define set_float_detect_tininess set_float_detect_tininess_mips -#define set_float_exception_flags set_float_exception_flags_mips -#define set_float_rounding_mode set_float_rounding_mode_mips -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mips -#define set_flush_to_zero set_flush_to_zero_mips -#define set_swi_errno set_swi_errno_mips -#define sextract32 sextract32_mips -#define sextract64 sextract64_mips -#define shift128ExtraRightJamming shift128ExtraRightJamming_mips -#define shift128Right shift128Right_mips -#define shift128RightJamming shift128RightJamming_mips -#define shift32RightJamming shift32RightJamming_mips -#define shift64ExtraRightJamming shift64ExtraRightJamming_mips -#define shift64RightJamming shift64RightJamming_mips -#define shifter_out_im shifter_out_im_mips -#define shortShift128Left shortShift128Left_mips -#define shortShift192Left shortShift192Left_mips -#define simple_mpu_ap_bits simple_mpu_ap_bits_mips -#define size_code_gen_buffer size_code_gen_buffer_mips -#define softmmu_lock_user softmmu_lock_user_mips -#define softmmu_lock_user_string softmmu_lock_user_string_mips -#define softmmu_tget32 softmmu_tget32_mips -#define softmmu_tget8 softmmu_tget8_mips -#define softmmu_tput32 softmmu_tput32_mips -#define softmmu_unlock_user softmmu_unlock_user_mips -#define sort_constraints sort_constraints_mips -#define sp_el0_access sp_el0_access_mips -#define spsel_read spsel_read_mips -#define spsel_write spsel_write_mips -#define start_list start_list_mips -#define stb_p stb_p_mips -#define stb_phys stb_phys_mips -#define stl_be_p stl_be_p_mips -#define stl_be_phys stl_be_phys_mips -#define stl_he_p stl_he_p_mips -#define stl_le_p stl_le_p_mips -#define stl_le_phys stl_le_phys_mips -#define stl_phys stl_phys_mips -#define stl_phys_internal stl_phys_internal_mips -#define stl_phys_notdirty stl_phys_notdirty_mips -#define store_cpu_offset store_cpu_offset_mips -#define store_reg store_reg_mips -#define store_reg_bx store_reg_bx_mips -#define store_reg_from_load store_reg_from_load_mips -#define stq_be_p stq_be_p_mips -#define stq_be_phys stq_be_phys_mips -#define stq_he_p stq_he_p_mips -#define stq_le_p stq_le_p_mips -#define stq_le_phys stq_le_phys_mips -#define stq_phys stq_phys_mips -#define string_input_get_visitor string_input_get_visitor_mips -#define string_input_visitor_cleanup string_input_visitor_cleanup_mips -#define string_input_visitor_new string_input_visitor_new_mips -#define strongarm_cp_reginfo strongarm_cp_reginfo_mips -#define strstart strstart_mips -#define strtosz strtosz_mips -#define strtosz_suffix strtosz_suffix_mips -#define stw_be_p stw_be_p_mips -#define stw_be_phys stw_be_phys_mips -#define stw_he_p stw_he_p_mips -#define stw_le_p stw_le_p_mips -#define stw_le_phys stw_le_phys_mips -#define stw_phys stw_phys_mips -#define stw_phys_internal stw_phys_internal_mips -#define sub128 sub128_mips -#define sub16_sat sub16_sat_mips -#define sub16_usat sub16_usat_mips -#define sub192 sub192_mips -#define sub8_sat sub8_sat_mips -#define sub8_usat sub8_usat_mips -#define subFloat128Sigs subFloat128Sigs_mips -#define subFloat32Sigs subFloat32Sigs_mips -#define subFloat64Sigs subFloat64Sigs_mips -#define subFloatx80Sigs subFloatx80Sigs_mips -#define subpage_accepts subpage_accepts_mips -#define subpage_init subpage_init_mips -#define subpage_ops subpage_ops_mips -#define subpage_read subpage_read_mips -#define subpage_register subpage_register_mips -#define subpage_write subpage_write_mips -#define suffix_mul suffix_mul_mips -#define swap_commutative swap_commutative_mips -#define swap_commutative2 swap_commutative2_mips -#define switch_mode switch_mode_mips -#define switch_v7m_sp switch_v7m_sp_mips -#define syn_aa32_bkpt syn_aa32_bkpt_mips -#define syn_aa32_hvc syn_aa32_hvc_mips -#define syn_aa32_smc syn_aa32_smc_mips -#define syn_aa32_svc syn_aa32_svc_mips -#define syn_breakpoint syn_breakpoint_mips -#define sync_globals sync_globals_mips -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mips -#define syn_cp14_rt_trap syn_cp14_rt_trap_mips -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mips -#define syn_cp15_rt_trap syn_cp15_rt_trap_mips -#define syn_data_abort syn_data_abort_mips -#define syn_fp_access_trap syn_fp_access_trap_mips -#define syn_insn_abort syn_insn_abort_mips -#define syn_swstep syn_swstep_mips -#define syn_uncategorized syn_uncategorized_mips -#define syn_watchpoint syn_watchpoint_mips -#define syscall_err syscall_err_mips -#define system_bus_class_init system_bus_class_init_mips -#define system_bus_info system_bus_info_mips -#define t2ee_cp_reginfo t2ee_cp_reginfo_mips -#define table_logic_cc table_logic_cc_mips -#define target_parse_constraint target_parse_constraint_mips -#define target_words_bigendian target_words_bigendian_mips -#define tb_add_jump tb_add_jump_mips -#define tb_alloc tb_alloc_mips -#define tb_alloc_page tb_alloc_page_mips -#define tb_check_watchpoint tb_check_watchpoint_mips -#define tb_find_fast tb_find_fast_mips -#define tb_find_pc tb_find_pc_mips -#define tb_find_slow tb_find_slow_mips -#define tb_flush tb_flush_mips -#define tb_flush_jmp_cache tb_flush_jmp_cache_mips -#define tb_free tb_free_mips -#define tb_gen_code tb_gen_code_mips -#define tb_hash_remove tb_hash_remove_mips -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips -#define tb_invalidate_phys_range tb_invalidate_phys_range_mips -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mips -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mips -#define tb_jmp_remove tb_jmp_remove_mips -#define tb_link_page tb_link_page_mips -#define tb_page_remove tb_page_remove_mips -#define tb_phys_hash_func tb_phys_hash_func_mips -#define tb_phys_invalidate tb_phys_invalidate_mips -#define tb_reset_jump tb_reset_jump_mips -#define tb_set_jmp_target tb_set_jmp_target_mips -#define tcg_accel_class_init tcg_accel_class_init_mips -#define tcg_accel_type tcg_accel_type_mips -#define tcg_add_param_i32 tcg_add_param_i32_mips -#define tcg_add_param_i64 tcg_add_param_i64_mips -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mips -#define tcg_allowed tcg_allowed_mips -#define tcg_canonicalize_memop tcg_canonicalize_memop_mips -#define tcg_commit tcg_commit_mips -#define tcg_cond_to_jcc tcg_cond_to_jcc_mips -#define tcg_constant_folding tcg_constant_folding_mips +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_mips +#define tcg_expand_vec_op tcg_expand_vec_op_mips +#define tcg_register_jit tcg_register_jit_mips +#define tcg_tb_insert tcg_tb_insert_mips +#define tcg_tb_remove tcg_tb_remove_mips +#define tcg_tb_lookup tcg_tb_lookup_mips +#define tcg_tb_foreach tcg_tb_foreach_mips +#define tcg_nb_tbs tcg_nb_tbs_mips +#define tcg_region_reset_all tcg_region_reset_all_mips +#define tcg_region_init tcg_region_init_mips +#define tcg_code_size tcg_code_size_mips +#define tcg_code_capacity tcg_code_capacity_mips +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mips +#define tcg_malloc_internal tcg_malloc_internal_mips +#define tcg_pool_reset tcg_pool_reset_mips +#define tcg_context_init tcg_context_init_mips +#define tcg_tb_alloc tcg_tb_alloc_mips +#define tcg_prologue_init tcg_prologue_init_mips +#define tcg_func_start tcg_func_start_mips +#define tcg_set_frame tcg_set_frame_mips +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips +#define tcg_temp_new_internal tcg_temp_new_internal_mips +#define tcg_temp_new_vec tcg_temp_new_vec_mips +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mips +#define tcg_temp_free_internal tcg_temp_free_internal_mips #define tcg_const_i32 tcg_const_i32_mips #define tcg_const_i64 tcg_const_i64_mips #define tcg_const_local_i32 tcg_const_local_i32_mips #define tcg_const_local_i64 tcg_const_local_i64_mips -#define tcg_context_init tcg_context_init_mips -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mips -#define tcg_cpu_exec tcg_cpu_exec_mips -#define tcg_current_code_size tcg_current_code_size_mips -#define tcg_dump_info tcg_dump_info_mips -#define tcg_dump_ops tcg_dump_ops_mips -#define tcg_exec_all tcg_exec_all_mips -#define tcg_find_helper tcg_find_helper_mips -#define tcg_func_start tcg_func_start_mips -#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips -#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips -#define tcg_gen_add_i32 tcg_gen_add_i32_mips -#define tcg_gen_add_i64 tcg_gen_add_i64_mips -#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips -#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips -#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips -#define tcg_gen_and_i32 tcg_gen_and_i32_mips -#define tcg_gen_and_i64 tcg_gen_and_i64_mips -#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips -#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips -#define tcg_gen_br tcg_gen_br_mips -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips +#define tcg_op_supported tcg_op_supported_mips #define tcg_gen_callN tcg_gen_callN_mips +#define tcg_op_remove tcg_op_remove_mips +#define tcg_emit_op tcg_emit_op_mips +#define tcg_op_insert_before tcg_op_insert_before_mips +#define tcg_op_insert_after tcg_op_insert_after_mips +#define tcg_cpu_exec_time tcg_cpu_exec_time_mips #define tcg_gen_code tcg_gen_code_mips -#define tcg_gen_code_common tcg_gen_code_common_mips -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mips -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mips -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips -#define tcg_gen_exit_tb tcg_gen_exit_tb_mips -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips -#define tcg_gen_goto_tb tcg_gen_goto_tb_mips -#define tcg_gen_ld_i32 tcg_gen_ld_i32_mips -#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mips -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mips -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips -#define tcg_gen_mov_i32 tcg_gen_mov_i32_mips -#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips -#define tcg_gen_movi_i32 tcg_gen_movi_i32_mips -#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips -#define tcg_gen_mul_i32 tcg_gen_mul_i32_mips -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips -#define tcg_gen_neg_i32 tcg_gen_neg_i32_mips -#define tcg_gen_neg_i64 tcg_gen_neg_i64_mips -#define tcg_gen_not_i32 tcg_gen_not_i32_mips -#define tcg_gen_op0 tcg_gen_op0_mips -#define tcg_gen_op1i tcg_gen_op1i_mips -#define tcg_gen_op2_i32 tcg_gen_op2_i32_mips -#define tcg_gen_op2_i64 tcg_gen_op2_i64_mips -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mips -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mips -#define tcg_gen_op3_i32 tcg_gen_op3_i32_mips -#define tcg_gen_op3_i64 tcg_gen_op3_i64_mips -#define tcg_gen_op4_i32 tcg_gen_op4_i32_mips -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mips -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mips -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mips -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mips -#define tcg_gen_op6_i32 tcg_gen_op6_i32_mips -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mips -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mips -#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips -#define tcg_gen_or_i32 tcg_gen_or_i32_mips -#define tcg_gen_or_i64 tcg_gen_or_i64_mips +#define tcg_gen_op1 tcg_gen_op1_mips +#define tcg_gen_op2 tcg_gen_op2_mips +#define tcg_gen_op3 tcg_gen_op3_mips +#define tcg_gen_op4 tcg_gen_op4_mips +#define tcg_gen_op5 tcg_gen_op5_mips +#define tcg_gen_op6 tcg_gen_op6_mips +#define tcg_gen_mb tcg_gen_mb_mips +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mips +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips #define tcg_gen_ori_i32 tcg_gen_ori_i32_mips -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mips +#define tcg_gen_muli_i32 tcg_gen_muli_i32_mips +#define tcg_gen_div_i32 tcg_gen_div_i32_mips +#define tcg_gen_rem_i32 tcg_gen_rem_i32_mips +#define tcg_gen_divu_i32 tcg_gen_divu_i32_mips +#define tcg_gen_remu_i32 tcg_gen_remu_i32_mips +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mips +#define tcg_gen_nand_i32 tcg_gen_nand_i32_mips +#define tcg_gen_nor_i32 tcg_gen_nor_i32_mips +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips +#define tcg_gen_clz_i32 tcg_gen_clz_i32_mips +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mips +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mips +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mips +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mips +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mips #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips -#define tcg_gen_sar_i32 tcg_gen_sar_i32_mips -#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips -#define tcg_gen_shl_i32 tcg_gen_shl_i32_mips -#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips -#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mips +#define tcg_gen_extract_i32 tcg_gen_extract_i32_mips +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mips +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mips +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mips +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mips +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips +#define tcg_gen_smin_i32 tcg_gen_smin_i32_mips +#define tcg_gen_umin_i32 tcg_gen_umin_i32_mips +#define tcg_gen_smax_i32 tcg_gen_smax_i32_mips +#define tcg_gen_umax_i32 tcg_gen_umax_i32_mips +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mips +#define tcg_gen_subi_i64 tcg_gen_subi_i64_mips +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips +#define tcg_gen_ori_i64 tcg_gen_ori_i64_mips +#define tcg_gen_xori_i64 tcg_gen_xori_i64_mips #define tcg_gen_shli_i64 tcg_gen_shli_i64_mips -#define tcg_gen_shr_i32 tcg_gen_shr_i32_mips -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mips -#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips -#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips #define tcg_gen_shri_i64 tcg_gen_shri_i64_mips -#define tcg_gen_st_i32 tcg_gen_st_i32_mips -#define tcg_gen_st_i64 tcg_gen_st_i64_mips -#define tcg_gen_sub_i32 tcg_gen_sub_i32_mips -#define tcg_gen_sub_i64 tcg_gen_sub_i64_mips -#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mips -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mips -#define tcg_gen_xor_i32 tcg_gen_xor_i32_mips -#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips -#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mips -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mips -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mips -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mips -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mips -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mips -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mips -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mips -#define tcg_handle_interrupt tcg_handle_interrupt_mips -#define tcg_init tcg_init_mips -#define tcg_invert_cond tcg_invert_cond_mips -#define tcg_la_bb_end tcg_la_bb_end_mips -#define tcg_la_br_end tcg_la_br_end_mips -#define tcg_la_func_end tcg_la_func_end_mips -#define tcg_liveness_analysis tcg_liveness_analysis_mips -#define tcg_malloc tcg_malloc_mips -#define tcg_malloc_internal tcg_malloc_internal_mips -#define tcg_op_defs_org tcg_op_defs_org_mips -#define tcg_opt_gen_mov tcg_opt_gen_mov_mips -#define tcg_opt_gen_movi tcg_opt_gen_movi_mips -#define tcg_optimize tcg_optimize_mips -#define tcg_out16 tcg_out16_mips -#define tcg_out32 tcg_out32_mips -#define tcg_out64 tcg_out64_mips -#define tcg_out8 tcg_out8_mips -#define tcg_out_addi tcg_out_addi_mips -#define tcg_out_branch tcg_out_branch_mips -#define tcg_out_brcond32 tcg_out_brcond32_mips -#define tcg_out_brcond64 tcg_out_brcond64_mips -#define tcg_out_bswap32 tcg_out_bswap32_mips -#define tcg_out_bswap64 tcg_out_bswap64_mips -#define tcg_out_call tcg_out_call_mips -#define tcg_out_cmp tcg_out_cmp_mips -#define tcg_out_ext16s tcg_out_ext16s_mips -#define tcg_out_ext16u tcg_out_ext16u_mips -#define tcg_out_ext32s tcg_out_ext32s_mips -#define tcg_out_ext32u tcg_out_ext32u_mips -#define tcg_out_ext8s tcg_out_ext8s_mips -#define tcg_out_ext8u tcg_out_ext8u_mips -#define tcg_out_jmp tcg_out_jmp_mips -#define tcg_out_jxx tcg_out_jxx_mips -#define tcg_out_label tcg_out_label_mips -#define tcg_out_ld tcg_out_ld_mips -#define tcg_out_modrm tcg_out_modrm_mips -#define tcg_out_modrm_offset tcg_out_modrm_offset_mips -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mips -#define tcg_out_mov tcg_out_mov_mips -#define tcg_out_movcond32 tcg_out_movcond32_mips -#define tcg_out_movcond64 tcg_out_movcond64_mips -#define tcg_out_movi tcg_out_movi_mips -#define tcg_out_op tcg_out_op_mips -#define tcg_out_pop tcg_out_pop_mips -#define tcg_out_push tcg_out_push_mips -#define tcg_out_qemu_ld tcg_out_qemu_ld_mips -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mips -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mips -#define tcg_out_qemu_st tcg_out_qemu_st_mips -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mips -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mips -#define tcg_out_reloc tcg_out_reloc_mips -#define tcg_out_rolw_8 tcg_out_rolw_8_mips -#define tcg_out_setcond32 tcg_out_setcond32_mips -#define tcg_out_setcond64 tcg_out_setcond64_mips -#define tcg_out_shifti tcg_out_shifti_mips -#define tcg_out_st tcg_out_st_mips -#define tcg_out_tb_finalize tcg_out_tb_finalize_mips -#define tcg_out_tb_init tcg_out_tb_init_mips -#define tcg_out_tlb_load tcg_out_tlb_load_mips -#define tcg_out_vex_modrm tcg_out_vex_modrm_mips -#define tcg_patch32 tcg_patch32_mips -#define tcg_patch8 tcg_patch8_mips -#define tcg_pcrel_diff tcg_pcrel_diff_mips -#define tcg_pool_reset tcg_pool_reset_mips -#define tcg_prologue_init tcg_prologue_init_mips -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mips -#define tcg_reg_alloc tcg_reg_alloc_mips -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mips -#define tcg_reg_alloc_call tcg_reg_alloc_call_mips -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mips -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mips -#define tcg_reg_alloc_op tcg_reg_alloc_op_mips -#define tcg_reg_alloc_start tcg_reg_alloc_start_mips -#define tcg_reg_free tcg_reg_free_mips -#define tcg_reg_sync tcg_reg_sync_mips -#define tcg_set_frame tcg_set_frame_mips -#define tcg_set_nop tcg_set_nop_mips -#define tcg_swap_cond tcg_swap_cond_mips -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mips -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mips -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mips -#define tcg_target_const_match tcg_target_const_match_mips -#define tcg_target_init tcg_target_init_mips -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mips -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mips -#define tcg_temp_alloc tcg_temp_alloc_mips -#define tcg_temp_free_i32 tcg_temp_free_i32_mips -#define tcg_temp_free_i64 tcg_temp_free_i64_mips -#define tcg_temp_free_internal tcg_temp_free_internal_mips -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mips -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mips -#define tcg_temp_new_i32 tcg_temp_new_i32_mips -#define tcg_temp_new_i64 tcg_temp_new_i64_mips -#define tcg_temp_new_internal tcg_temp_new_internal_mips -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mips -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mips -#define tdb_hash tdb_hash_mips -#define teecr_write teecr_write_mips -#define teehbr_access teehbr_access_mips -#define temp_allocate_frame temp_allocate_frame_mips -#define temp_dead temp_dead_mips -#define temps_are_copies temps_are_copies_mips -#define temp_save temp_save_mips -#define temp_sync temp_sync_mips -#define tgen_arithi tgen_arithi_mips -#define tgen_arithr tgen_arithr_mips -#define thumb2_logic_op thumb2_logic_op_mips -#define ti925t_initfn ti925t_initfn_mips -#define tlb_add_large_page tlb_add_large_page_mips -#define tlb_flush_entry tlb_flush_entry_mips -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mips -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mips -#define tlbi_aa64_va_write tlbi_aa64_va_write_mips -#define tlbiall_is_write tlbiall_is_write_mips -#define tlbiall_write tlbiall_write_mips -#define tlbiasid_is_write tlbiasid_is_write_mips -#define tlbiasid_write tlbiasid_write_mips -#define tlbimvaa_is_write tlbimvaa_is_write_mips -#define tlbimvaa_write tlbimvaa_write_mips -#define tlbimva_is_write tlbimva_is_write_mips -#define tlbimva_write tlbimva_write_mips -#define tlb_is_dirty_ram tlb_is_dirty_ram_mips +#define tcg_gen_sari_i64 tcg_gen_sari_i64_mips +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mips +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mips +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mips +#define tcg_gen_muli_i64 tcg_gen_muli_i64_mips +#define tcg_gen_div_i64 tcg_gen_div_i64_mips +#define tcg_gen_rem_i64 tcg_gen_rem_i64_mips +#define tcg_gen_divu_i64 tcg_gen_divu_i64_mips +#define tcg_gen_remu_i64 tcg_gen_remu_i64_mips +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mips +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mips +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mips +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mips +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mips +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mips +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mips +#define tcg_gen_not_i64 tcg_gen_not_i64_mips +#define tcg_gen_andc_i64 tcg_gen_andc_i64_mips +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mips +#define tcg_gen_nand_i64 tcg_gen_nand_i64_mips +#define tcg_gen_nor_i64 tcg_gen_nor_i64_mips +#define tcg_gen_orc_i64 tcg_gen_orc_i64_mips +#define tcg_gen_clz_i64 tcg_gen_clz_i64_mips +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mips +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mips +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mips +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mips +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mips +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mips +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mips +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mips +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mips +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mips +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mips +#define tcg_gen_extract_i64 tcg_gen_extract_i64_mips +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mips +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mips +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips +#define tcg_gen_add2_i64 tcg_gen_add2_i64_mips +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mips +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mips +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mips +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mips +#define tcg_gen_smin_i64 tcg_gen_smin_i64_mips +#define tcg_gen_umin_i64 tcg_gen_umin_i64_mips +#define tcg_gen_smax_i64 tcg_gen_smax_i64_mips +#define tcg_gen_umax_i64 tcg_gen_umax_i64_mips +#define tcg_gen_abs_i64 tcg_gen_abs_i64_mips +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mips +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mips +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mips +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mips +#define tcg_gen_exit_tb tcg_gen_exit_tb_mips +#define tcg_gen_goto_tb tcg_gen_goto_tb_mips +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mips +#define check_exit_request check_exit_request_mips +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mips +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mips +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mips +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mips +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mips +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mips +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mips +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mips +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mips +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mips +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mips +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mips +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mips +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mips +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mips +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mips +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mips +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mips +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mips +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mips +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mips +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mips +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mips +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mips +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mips +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mips +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mips +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mips +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mips +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mips +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mips +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mips +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mips +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mips +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mips +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mips +#define simd_desc simd_desc_mips +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mips +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mips +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mips +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mips +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mips +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mips +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mips +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mips +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mips +#define tcg_gen_gvec_2 tcg_gen_gvec_2_mips +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_mips +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_mips +#define tcg_gen_gvec_3 tcg_gen_gvec_3_mips +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_mips +#define tcg_gen_gvec_4 tcg_gen_gvec_4_mips +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_mips +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mips +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips +#define tcg_gen_gvec_not tcg_gen_gvec_not_mips +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mips +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mips +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mips +#define tcg_gen_gvec_add tcg_gen_gvec_add_mips +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_mips +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_mips +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_mips +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mips +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mips +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mips +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_mips +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_mips +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_mips +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_mips +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mips +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mips +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mips +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mips +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_mips +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_mips +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_mips +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_mips +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mips +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mips +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mips +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_mips +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_mips +#define tcg_gen_gvec_and tcg_gen_gvec_and_mips +#define tcg_gen_gvec_or tcg_gen_gvec_or_mips +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_mips +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_mips +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_mips +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_mips +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_mips +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mips +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_mips +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_mips +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_mips +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_mips +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_mips +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mips +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mips +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_mips +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mips +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mips +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_mips +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mips +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mips +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_mips +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_mips +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mips +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_mips +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mips +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mips +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mips +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mips +#define vec_gen_2 vec_gen_2_mips +#define vec_gen_3 vec_gen_3_mips +#define vec_gen_4 vec_gen_4_mips +#define tcg_gen_mov_vec tcg_gen_mov_vec_mips +#define tcg_const_zeros_vec tcg_const_zeros_vec_mips +#define tcg_const_ones_vec tcg_const_ones_vec_mips +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mips +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mips +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mips +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mips +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mips +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mips +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_mips +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mips +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mips +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mips +#define tcg_gen_ld_vec tcg_gen_ld_vec_mips +#define tcg_gen_st_vec tcg_gen_st_vec_mips +#define tcg_gen_stl_vec tcg_gen_stl_vec_mips +#define tcg_gen_and_vec tcg_gen_and_vec_mips +#define tcg_gen_or_vec tcg_gen_or_vec_mips +#define tcg_gen_xor_vec tcg_gen_xor_vec_mips +#define tcg_gen_andc_vec tcg_gen_andc_vec_mips +#define tcg_gen_orc_vec tcg_gen_orc_vec_mips +#define tcg_gen_nand_vec tcg_gen_nand_vec_mips +#define tcg_gen_nor_vec tcg_gen_nor_vec_mips +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_mips +#define tcg_gen_not_vec tcg_gen_not_vec_mips +#define tcg_gen_neg_vec tcg_gen_neg_vec_mips +#define tcg_gen_abs_vec tcg_gen_abs_vec_mips +#define tcg_gen_shli_vec tcg_gen_shli_vec_mips +#define tcg_gen_shri_vec tcg_gen_shri_vec_mips +#define tcg_gen_sari_vec tcg_gen_sari_vec_mips +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_mips +#define tcg_gen_add_vec tcg_gen_add_vec_mips +#define tcg_gen_sub_vec tcg_gen_sub_vec_mips +#define tcg_gen_mul_vec tcg_gen_mul_vec_mips +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mips +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_mips +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_mips +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_mips +#define tcg_gen_smin_vec tcg_gen_smin_vec_mips +#define tcg_gen_umin_vec tcg_gen_umin_vec_mips +#define tcg_gen_smax_vec tcg_gen_smax_vec_mips +#define tcg_gen_umax_vec tcg_gen_umax_vec_mips +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_mips +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_mips +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_mips +#define tcg_gen_shls_vec tcg_gen_shls_vec_mips +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_mips +#define tcg_gen_sars_vec tcg_gen_sars_vec_mips +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mips +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mips +#define tb_htable_lookup tb_htable_lookup_mips +#define tb_set_jmp_target tb_set_jmp_target_mips +#define cpu_exec cpu_exec_mips +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips +#define cpu_reloading_memory_map cpu_reloading_memory_map_mips +#define cpu_loop_exit cpu_loop_exit_mips +#define cpu_loop_exit_restore cpu_loop_exit_restore_mips +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips +#define tlb_init tlb_init_mips +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips +#define tlb_flush tlb_flush_mips +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mips +#define tlb_flush_all_cpus tlb_flush_all_cpus_mips +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mips +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mips +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips +#define tlb_flush_page tlb_flush_page_mips +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mips +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mips +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mips +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mips #define tlb_protect_code tlb_protect_code_mips -#define tlb_reset_dirty_range tlb_reset_dirty_range_mips -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mips +#define tlb_unprotect_code tlb_unprotect_code_mips +#define tlb_reset_dirty tlb_reset_dirty_mips #define tlb_set_dirty tlb_set_dirty_mips -#define tlb_set_dirty1 tlb_set_dirty1_mips -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mips +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips +#define tlb_set_page tlb_set_page_mips +#define get_page_addr_code_hostp get_page_addr_code_hostp_mips +#define get_page_addr_code get_page_addr_code_mips +#define probe_access probe_access_mips #define tlb_vaddr_to_host tlb_vaddr_to_host_mips -#define token_get_type token_get_type_mips -#define token_get_value token_get_value_mips -#define token_is_escape token_is_escape_mips -#define token_is_keyword token_is_keyword_mips -#define token_is_operator token_is_operator_mips -#define tokens_append_from_iter tokens_append_from_iter_mips -#define to_qiv to_qiv_mips -#define to_qov to_qov_mips -#define tosa_init tosa_init_mips -#define tosa_machine_init tosa_machine_init_mips -#define tswap32 tswap32_mips -#define tswap64 tswap64_mips -#define type_class_get_size type_class_get_size_mips -#define type_get_by_name type_get_by_name_mips -#define type_get_parent type_get_parent_mips -#define type_has_parent type_has_parent_mips -#define type_initialize type_initialize_mips -#define type_initialize_interface type_initialize_interface_mips -#define type_is_ancestor type_is_ancestor_mips -#define type_new type_new_mips -#define type_object_get_size type_object_get_size_mips -#define type_register_internal type_register_internal_mips -#define type_table_add type_table_add_mips -#define type_table_get type_table_get_mips -#define type_table_lookup type_table_lookup_mips -#define uint16_to_float32 uint16_to_float32_mips -#define uint16_to_float64 uint16_to_float64_mips -#define uint32_to_float32 uint32_to_float32_mips -#define uint32_to_float64 uint32_to_float64_mips -#define uint64_to_float128 uint64_to_float128_mips -#define uint64_to_float32 uint64_to_float32_mips -#define uint64_to_float64 uint64_to_float64_mips -#define unassigned_io_ops unassigned_io_ops_mips -#define unassigned_io_read unassigned_io_read_mips -#define unassigned_io_write unassigned_io_write_mips -#define unassigned_mem_accepts unassigned_mem_accepts_mips +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips +#define helper_le_lduw_mmu helper_le_lduw_mmu_mips +#define helper_be_lduw_mmu helper_be_lduw_mmu_mips +#define helper_le_ldul_mmu helper_le_ldul_mmu_mips +#define helper_be_ldul_mmu helper_be_ldul_mmu_mips +#define helper_le_ldq_mmu helper_le_ldq_mmu_mips +#define helper_be_ldq_mmu helper_be_ldq_mmu_mips +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mips +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mips +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mips +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mips +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mips +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mips +#define cpu_ldub_data_ra cpu_ldub_data_ra_mips +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_mips +#define cpu_lduw_data_ra cpu_lduw_data_ra_mips +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_mips +#define cpu_ldl_data_ra cpu_ldl_data_ra_mips +#define cpu_ldq_data_ra cpu_ldq_data_ra_mips +#define cpu_ldub_data cpu_ldub_data_mips +#define cpu_ldsb_data cpu_ldsb_data_mips +#define cpu_lduw_data cpu_lduw_data_mips +#define cpu_ldsw_data cpu_ldsw_data_mips +#define cpu_ldl_data cpu_ldl_data_mips +#define cpu_ldq_data cpu_ldq_data_mips +#define helper_ret_stb_mmu helper_ret_stb_mmu_mips +#define helper_le_stw_mmu helper_le_stw_mmu_mips +#define helper_be_stw_mmu helper_be_stw_mmu_mips +#define helper_le_stl_mmu helper_le_stl_mmu_mips +#define helper_be_stl_mmu helper_be_stl_mmu_mips +#define helper_le_stq_mmu helper_le_stq_mmu_mips +#define helper_be_stq_mmu helper_be_stq_mmu_mips +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mips +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mips +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mips +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mips +#define cpu_stb_data_ra cpu_stb_data_ra_mips +#define cpu_stw_data_ra cpu_stw_data_ra_mips +#define cpu_stl_data_ra cpu_stl_data_ra_mips +#define cpu_stq_data_ra cpu_stq_data_ra_mips +#define cpu_stb_data cpu_stb_data_mips +#define cpu_stw_data cpu_stw_data_mips +#define cpu_stl_data cpu_stl_data_mips +#define cpu_stq_data cpu_stq_data_mips +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mips +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mips +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mips +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mips +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mips +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mips +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mips +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mips +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mips +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mips +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mips +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mips +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mips +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mips +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mips +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mips +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mips +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mips +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mips +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mips +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mips +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mips +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mips +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mips +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mips +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mips +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mips +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mips +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mips +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mips +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mips +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mips +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mips +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mips +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mips +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mips +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mips +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mips +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mips +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mips +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mips +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mips +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mips +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mips +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mips +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mips +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mips +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mips +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mips +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mips +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mips +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mips +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mips +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mips +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mips +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mips +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mips +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mips +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mips +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mips +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mips +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mips +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mips +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mips +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mips +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mips +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mips +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mips +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mips +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mips +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mips +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mips +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mips +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mips +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mips +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mips +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mips +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mips +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mips +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mips +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mips +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mips +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mips +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mips +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mips +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mips +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mips +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mips +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mips +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mips +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mips +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mips +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mips +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mips +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mips +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mips +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mips +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mips +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mips +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mips +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mips +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mips +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mips +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mips +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mips +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mips +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mips +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mips +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mips +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mips +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mips +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mips +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mips +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mips +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mips +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mips +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mips +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mips +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mips +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mips +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mips +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mips +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mips +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mips +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mips +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mips +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mips +#define helper_atomic_xchgb helper_atomic_xchgb_mips +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_mips +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_mips +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_mips +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mips +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_mips +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_mips +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_mips +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mips +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mips +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mips +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mips +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mips +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mips +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mips +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mips +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mips +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mips +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_mips +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mips +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mips +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mips +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mips +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mips +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mips +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mips +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mips +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mips +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mips +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mips +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mips +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mips +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mips +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mips +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mips +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mips +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_mips +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mips +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mips +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mips +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mips +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mips +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mips +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mips +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mips +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mips +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mips +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mips +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mips +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mips +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mips +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mips +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mips +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mips +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_mips +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mips +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mips +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mips +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mips +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mips +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mips +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mips +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mips +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mips +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mips +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mips +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mips +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mips +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mips +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mips +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mips +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mips +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_mips +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mips +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mips +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mips +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mips +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mips +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mips +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mips +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mips +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mips +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mips +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mips +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mips +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mips +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mips +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mips +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mips +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mips +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_mips +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mips +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mips +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mips +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mips +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mips +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mips +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mips +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mips +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mips +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mips +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mips +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mips +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mips +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mips +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mips +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mips +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mips +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_mips +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mips +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mips +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mips +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mips +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mips +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mips +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mips +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mips +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mips +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mips +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mips +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mips +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mips +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mips +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mips +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mips +#define cpu_ldub_code cpu_ldub_code_mips +#define cpu_lduw_code cpu_lduw_code_mips +#define cpu_ldl_code cpu_ldl_code_mips +#define cpu_ldq_code cpu_ldq_code_mips +#define helper_div_i32 helper_div_i32_mips +#define helper_rem_i32 helper_rem_i32_mips +#define helper_divu_i32 helper_divu_i32_mips +#define helper_remu_i32 helper_remu_i32_mips +#define helper_shl_i64 helper_shl_i64_mips +#define helper_shr_i64 helper_shr_i64_mips +#define helper_sar_i64 helper_sar_i64_mips +#define helper_div_i64 helper_div_i64_mips +#define helper_rem_i64 helper_rem_i64_mips +#define helper_divu_i64 helper_divu_i64_mips +#define helper_remu_i64 helper_remu_i64_mips +#define helper_muluh_i64 helper_muluh_i64_mips +#define helper_mulsh_i64 helper_mulsh_i64_mips +#define helper_clz_i32 helper_clz_i32_mips +#define helper_ctz_i32 helper_ctz_i32_mips +#define helper_clz_i64 helper_clz_i64_mips +#define helper_ctz_i64 helper_ctz_i64_mips +#define helper_clrsb_i32 helper_clrsb_i32_mips +#define helper_clrsb_i64 helper_clrsb_i64_mips +#define helper_ctpop_i32 helper_ctpop_i32_mips +#define helper_ctpop_i64 helper_ctpop_i64_mips +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_mips +#define helper_exit_atomic helper_exit_atomic_mips +#define helper_gvec_add8 helper_gvec_add8_mips +#define helper_gvec_add16 helper_gvec_add16_mips +#define helper_gvec_add32 helper_gvec_add32_mips +#define helper_gvec_add64 helper_gvec_add64_mips +#define helper_gvec_adds8 helper_gvec_adds8_mips +#define helper_gvec_adds16 helper_gvec_adds16_mips +#define helper_gvec_adds32 helper_gvec_adds32_mips +#define helper_gvec_adds64 helper_gvec_adds64_mips +#define helper_gvec_sub8 helper_gvec_sub8_mips +#define helper_gvec_sub16 helper_gvec_sub16_mips +#define helper_gvec_sub32 helper_gvec_sub32_mips +#define helper_gvec_sub64 helper_gvec_sub64_mips +#define helper_gvec_subs8 helper_gvec_subs8_mips +#define helper_gvec_subs16 helper_gvec_subs16_mips +#define helper_gvec_subs32 helper_gvec_subs32_mips +#define helper_gvec_subs64 helper_gvec_subs64_mips +#define helper_gvec_mul8 helper_gvec_mul8_mips +#define helper_gvec_mul16 helper_gvec_mul16_mips +#define helper_gvec_mul32 helper_gvec_mul32_mips +#define helper_gvec_mul64 helper_gvec_mul64_mips +#define helper_gvec_muls8 helper_gvec_muls8_mips +#define helper_gvec_muls16 helper_gvec_muls16_mips +#define helper_gvec_muls32 helper_gvec_muls32_mips +#define helper_gvec_muls64 helper_gvec_muls64_mips +#define helper_gvec_neg8 helper_gvec_neg8_mips +#define helper_gvec_neg16 helper_gvec_neg16_mips +#define helper_gvec_neg32 helper_gvec_neg32_mips +#define helper_gvec_neg64 helper_gvec_neg64_mips +#define helper_gvec_abs8 helper_gvec_abs8_mips +#define helper_gvec_abs16 helper_gvec_abs16_mips +#define helper_gvec_abs32 helper_gvec_abs32_mips +#define helper_gvec_abs64 helper_gvec_abs64_mips +#define helper_gvec_mov helper_gvec_mov_mips +#define helper_gvec_dup64 helper_gvec_dup64_mips +#define helper_gvec_dup32 helper_gvec_dup32_mips +#define helper_gvec_dup16 helper_gvec_dup16_mips +#define helper_gvec_dup8 helper_gvec_dup8_mips +#define helper_gvec_not helper_gvec_not_mips +#define helper_gvec_and helper_gvec_and_mips +#define helper_gvec_or helper_gvec_or_mips +#define helper_gvec_xor helper_gvec_xor_mips +#define helper_gvec_andc helper_gvec_andc_mips +#define helper_gvec_orc helper_gvec_orc_mips +#define helper_gvec_nand helper_gvec_nand_mips +#define helper_gvec_nor helper_gvec_nor_mips +#define helper_gvec_eqv helper_gvec_eqv_mips +#define helper_gvec_ands helper_gvec_ands_mips +#define helper_gvec_xors helper_gvec_xors_mips +#define helper_gvec_ors helper_gvec_ors_mips +#define helper_gvec_shl8i helper_gvec_shl8i_mips +#define helper_gvec_shl16i helper_gvec_shl16i_mips +#define helper_gvec_shl32i helper_gvec_shl32i_mips +#define helper_gvec_shl64i helper_gvec_shl64i_mips +#define helper_gvec_shr8i helper_gvec_shr8i_mips +#define helper_gvec_shr16i helper_gvec_shr16i_mips +#define helper_gvec_shr32i helper_gvec_shr32i_mips +#define helper_gvec_shr64i helper_gvec_shr64i_mips +#define helper_gvec_sar8i helper_gvec_sar8i_mips +#define helper_gvec_sar16i helper_gvec_sar16i_mips +#define helper_gvec_sar32i helper_gvec_sar32i_mips +#define helper_gvec_sar64i helper_gvec_sar64i_mips +#define helper_gvec_shl8v helper_gvec_shl8v_mips +#define helper_gvec_shl16v helper_gvec_shl16v_mips +#define helper_gvec_shl32v helper_gvec_shl32v_mips +#define helper_gvec_shl64v helper_gvec_shl64v_mips +#define helper_gvec_shr8v helper_gvec_shr8v_mips +#define helper_gvec_shr16v helper_gvec_shr16v_mips +#define helper_gvec_shr32v helper_gvec_shr32v_mips +#define helper_gvec_shr64v helper_gvec_shr64v_mips +#define helper_gvec_sar8v helper_gvec_sar8v_mips +#define helper_gvec_sar16v helper_gvec_sar16v_mips +#define helper_gvec_sar32v helper_gvec_sar32v_mips +#define helper_gvec_sar64v helper_gvec_sar64v_mips +#define helper_gvec_eq8 helper_gvec_eq8_mips +#define helper_gvec_ne8 helper_gvec_ne8_mips +#define helper_gvec_lt8 helper_gvec_lt8_mips +#define helper_gvec_le8 helper_gvec_le8_mips +#define helper_gvec_ltu8 helper_gvec_ltu8_mips +#define helper_gvec_leu8 helper_gvec_leu8_mips +#define helper_gvec_eq16 helper_gvec_eq16_mips +#define helper_gvec_ne16 helper_gvec_ne16_mips +#define helper_gvec_lt16 helper_gvec_lt16_mips +#define helper_gvec_le16 helper_gvec_le16_mips +#define helper_gvec_ltu16 helper_gvec_ltu16_mips +#define helper_gvec_leu16 helper_gvec_leu16_mips +#define helper_gvec_eq32 helper_gvec_eq32_mips +#define helper_gvec_ne32 helper_gvec_ne32_mips +#define helper_gvec_lt32 helper_gvec_lt32_mips +#define helper_gvec_le32 helper_gvec_le32_mips +#define helper_gvec_ltu32 helper_gvec_ltu32_mips +#define helper_gvec_leu32 helper_gvec_leu32_mips +#define helper_gvec_eq64 helper_gvec_eq64_mips +#define helper_gvec_ne64 helper_gvec_ne64_mips +#define helper_gvec_lt64 helper_gvec_lt64_mips +#define helper_gvec_le64 helper_gvec_le64_mips +#define helper_gvec_ltu64 helper_gvec_ltu64_mips +#define helper_gvec_leu64 helper_gvec_leu64_mips +#define helper_gvec_ssadd8 helper_gvec_ssadd8_mips +#define helper_gvec_ssadd16 helper_gvec_ssadd16_mips +#define helper_gvec_ssadd32 helper_gvec_ssadd32_mips +#define helper_gvec_ssadd64 helper_gvec_ssadd64_mips +#define helper_gvec_sssub8 helper_gvec_sssub8_mips +#define helper_gvec_sssub16 helper_gvec_sssub16_mips +#define helper_gvec_sssub32 helper_gvec_sssub32_mips +#define helper_gvec_sssub64 helper_gvec_sssub64_mips +#define helper_gvec_usadd8 helper_gvec_usadd8_mips +#define helper_gvec_usadd16 helper_gvec_usadd16_mips +#define helper_gvec_usadd32 helper_gvec_usadd32_mips +#define helper_gvec_usadd64 helper_gvec_usadd64_mips +#define helper_gvec_ussub8 helper_gvec_ussub8_mips +#define helper_gvec_ussub16 helper_gvec_ussub16_mips +#define helper_gvec_ussub32 helper_gvec_ussub32_mips +#define helper_gvec_ussub64 helper_gvec_ussub64_mips +#define helper_gvec_smin8 helper_gvec_smin8_mips +#define helper_gvec_smin16 helper_gvec_smin16_mips +#define helper_gvec_smin32 helper_gvec_smin32_mips +#define helper_gvec_smin64 helper_gvec_smin64_mips +#define helper_gvec_smax8 helper_gvec_smax8_mips +#define helper_gvec_smax16 helper_gvec_smax16_mips +#define helper_gvec_smax32 helper_gvec_smax32_mips +#define helper_gvec_smax64 helper_gvec_smax64_mips +#define helper_gvec_umin8 helper_gvec_umin8_mips +#define helper_gvec_umin16 helper_gvec_umin16_mips +#define helper_gvec_umin32 helper_gvec_umin32_mips +#define helper_gvec_umin64 helper_gvec_umin64_mips +#define helper_gvec_umax8 helper_gvec_umax8_mips +#define helper_gvec_umax16 helper_gvec_umax16_mips +#define helper_gvec_umax32 helper_gvec_umax32_mips +#define helper_gvec_umax64 helper_gvec_umax64_mips +#define helper_gvec_bitsel helper_gvec_bitsel_mips +#define cpu_restore_state cpu_restore_state_mips +#define page_collection_lock page_collection_lock_mips +#define page_collection_unlock page_collection_unlock_mips +#define free_code_gen_buffer free_code_gen_buffer_mips +#define tcg_exec_init tcg_exec_init_mips +#define tb_cleanup tb_cleanup_mips +#define tb_flush tb_flush_mips +#define tb_phys_invalidate tb_phys_invalidate_mips +#define tb_gen_code tb_gen_code_mips +#define tb_exec_lock tb_exec_lock_mips +#define tb_exec_unlock tb_exec_unlock_mips +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips +#define tb_invalidate_phys_range tb_invalidate_phys_range_mips +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips +#define tb_check_watchpoint tb_check_watchpoint_mips +#define cpu_io_recompile cpu_io_recompile_mips +#define tb_flush_jmp_cache tb_flush_jmp_cache_mips +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mips +#define translator_loop_temp_check translator_loop_temp_check_mips +#define translator_loop translator_loop_mips +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mips +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mips +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mips +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mips +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mips +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mips #define unassigned_mem_ops unassigned_mem_ops_mips -#define unassigned_mem_read unassigned_mem_read_mips -#define unassigned_mem_write unassigned_mem_write_mips -#define update_spsel update_spsel_mips -#define v6_cp_reginfo v6_cp_reginfo_mips -#define v6k_cp_reginfo v6k_cp_reginfo_mips -#define v7_cp_reginfo v7_cp_reginfo_mips -#define v7mp_cp_reginfo v7mp_cp_reginfo_mips -#define v7m_pop v7m_pop_mips -#define v7m_push v7m_push_mips -#define v8_cp_reginfo v8_cp_reginfo_mips -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mips -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mips -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mips -#define vapa_cp_reginfo vapa_cp_reginfo_mips -#define vbar_write vbar_write_mips -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mips -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mips -#define vfp_get_fpcr vfp_get_fpcr_mips -#define vfp_get_fpscr vfp_get_fpscr_mips -#define vfp_get_fpsr vfp_get_fpsr_mips -#define vfp_reg_offset vfp_reg_offset_mips -#define vfp_set_fpcr vfp_set_fpcr_mips -#define vfp_set_fpscr vfp_set_fpscr_mips -#define vfp_set_fpsr vfp_set_fpsr_mips -#define visit_end_implicit_struct visit_end_implicit_struct_mips -#define visit_end_list visit_end_list_mips -#define visit_end_struct visit_end_struct_mips -#define visit_end_union visit_end_union_mips -#define visit_get_next_type visit_get_next_type_mips -#define visit_next_list visit_next_list_mips -#define visit_optional visit_optional_mips -#define visit_start_implicit_struct visit_start_implicit_struct_mips -#define visit_start_list visit_start_list_mips -#define visit_start_struct visit_start_struct_mips -#define visit_start_union visit_start_union_mips -#define vmsa_cp_reginfo vmsa_cp_reginfo_mips -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mips -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mips -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mips -#define vmsa_ttbcr_write vmsa_ttbcr_write_mips -#define vmsa_ttbr_write vmsa_ttbr_write_mips -#define write_cpustate_to_list write_cpustate_to_list_mips -#define write_list_to_cpustate write_list_to_cpustate_mips -#define write_raw_cp_reg write_raw_cp_reg_mips -#define X86CPURegister32_lookup X86CPURegister32_lookup_mips -#define x86_op_defs x86_op_defs_mips -#define xpsr_read xpsr_read_mips -#define xpsr_write xpsr_write_mips -#define xscale_cpar_write xscale_cpar_write_mips -#define xscale_cp_reginfo xscale_cp_reginfo_mips -#define cpu_mips_exec cpu_mips_exec_mips +#define floatx80_infinity floatx80_infinity_mips +#define dup_const_func dup_const_func_mips +#define gen_helper_raise_exception gen_helper_raise_exception_mips +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_mips +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips +#define gen_helper_cpsr_read gen_helper_cpsr_read_mips +#define gen_helper_cpsr_write gen_helper_cpsr_write_mips +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips +#define helper_mfc0_random helper_mfc0_random_mips +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips +#define helper_mfc0_tcbind helper_mfc0_tcbind_mips +#define helper_mftc0_tcbind helper_mftc0_tcbind_mips +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips +#define helper_mfc0_tchalt helper_mfc0_tchalt_mips +#define helper_mftc0_tchalt helper_mftc0_tchalt_mips +#define helper_mfc0_tccontext helper_mfc0_tccontext_mips +#define helper_mftc0_tccontext helper_mftc0_tccontext_mips +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips +#define helper_mfc0_count helper_mfc0_count_mips +#define helper_mfc0_saar helper_mfc0_saar_mips +#define helper_mfhc0_saar helper_mfhc0_saar_mips +#define helper_mftc0_entryhi helper_mftc0_entryhi_mips +#define helper_mftc0_cause helper_mftc0_cause_mips +#define helper_mftc0_status helper_mftc0_status_mips +#define helper_mfc0_lladdr helper_mfc0_lladdr_mips +#define helper_mfc0_maar helper_mfc0_maar_mips +#define helper_mfhc0_maar helper_mfhc0_maar_mips +#define helper_mfc0_watchlo helper_mfc0_watchlo_mips +#define helper_mfc0_watchhi helper_mfc0_watchhi_mips +#define helper_mfhc0_watchhi helper_mfhc0_watchhi_mips +#define helper_mfc0_debug helper_mfc0_debug_mips +#define helper_mftc0_debug helper_mftc0_debug_mips +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips +#define helper_dmfc0_maar helper_dmfc0_maar_mips +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips +#define helper_dmfc0_watchhi helper_dmfc0_watchhi_mips +#define helper_dmfc0_saar helper_dmfc0_saar_mips +#define helper_mtc0_index helper_mtc0_index_mips +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips +#define helper_mtc0_yqmask helper_mtc0_yqmask_mips +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips +#define helper_mtc0_tcbind helper_mtc0_tcbind_mips +#define helper_mttc0_tcbind helper_mttc0_tcbind_mips +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips +#define helper_mtc0_tchalt helper_mtc0_tchalt_mips +#define helper_mttc0_tchalt helper_mttc0_tchalt_mips +#define helper_mtc0_tccontext helper_mtc0_tccontext_mips +#define helper_mttc0_tccontext helper_mttc0_tccontext_mips +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips +#define helper_mtc0_context helper_mtc0_context_mips +#define helper_mtc0_memorymapid helper_mtc0_memorymapid_mips +#define update_pagemask update_pagemask_mips +#define helper_mtc0_pagemask helper_mtc0_pagemask_mips +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips +#define helper_mtc0_segctl0 helper_mtc0_segctl0_mips +#define helper_mtc0_segctl1 helper_mtc0_segctl1_mips +#define helper_mtc0_segctl2 helper_mtc0_segctl2_mips +#define helper_mtc0_pwfield helper_mtc0_pwfield_mips +#define helper_mtc0_pwsize helper_mtc0_pwsize_mips +#define helper_mtc0_wired helper_mtc0_wired_mips +#define helper_mtc0_pwctl helper_mtc0_pwctl_mips +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips +#define helper_mtc0_hwrena helper_mtc0_hwrena_mips +#define helper_mtc0_count helper_mtc0_count_mips +#define helper_mtc0_saari helper_mtc0_saari_mips +#define helper_mtc0_saar helper_mtc0_saar_mips +#define helper_mthc0_saar helper_mthc0_saar_mips +#define helper_mtc0_entryhi helper_mtc0_entryhi_mips +#define helper_mttc0_entryhi helper_mttc0_entryhi_mips +#define helper_mtc0_compare helper_mtc0_compare_mips +#define helper_mtc0_status helper_mtc0_status_mips +#define helper_mttc0_status helper_mttc0_status_mips +#define helper_mtc0_intctl helper_mtc0_intctl_mips +#define helper_mtc0_srsctl helper_mtc0_srsctl_mips +#define helper_mtc0_cause helper_mtc0_cause_mips +#define helper_mttc0_cause helper_mttc0_cause_mips +#define helper_mftc0_epc helper_mftc0_epc_mips +#define helper_mftc0_ebase helper_mftc0_ebase_mips +#define helper_mtc0_ebase helper_mtc0_ebase_mips +#define helper_mttc0_ebase helper_mttc0_ebase_mips +#define helper_mftc0_configx helper_mftc0_configx_mips +#define helper_mtc0_config0 helper_mtc0_config0_mips +#define helper_mtc0_config2 helper_mtc0_config2_mips +#define helper_mtc0_config3 helper_mtc0_config3_mips +#define helper_mtc0_config4 helper_mtc0_config4_mips +#define helper_mtc0_config5 helper_mtc0_config5_mips +#define helper_mtc0_lladdr helper_mtc0_lladdr_mips +#define helper_mtc0_maar helper_mtc0_maar_mips +#define helper_mthc0_maar helper_mthc0_maar_mips +#define helper_mtc0_maari helper_mtc0_maari_mips +#define helper_mtc0_watchlo helper_mtc0_watchlo_mips +#define helper_mtc0_watchhi helper_mtc0_watchhi_mips +#define helper_mthc0_watchhi helper_mthc0_watchhi_mips +#define helper_mtc0_xcontext helper_mtc0_xcontext_mips +#define helper_mtc0_framemask helper_mtc0_framemask_mips +#define helper_mtc0_debug helper_mtc0_debug_mips +#define helper_mttc0_debug helper_mttc0_debug_mips +#define helper_mtc0_performance0 helper_mtc0_performance0_mips +#define helper_mtc0_errctl helper_mtc0_errctl_mips +#define helper_mtc0_taglo helper_mtc0_taglo_mips +#define helper_mtc0_datalo helper_mtc0_datalo_mips +#define helper_mtc0_taghi helper_mtc0_taghi_mips +#define helper_mtc0_datahi helper_mtc0_datahi_mips +#define helper_mftgpr helper_mftgpr_mips +#define helper_mftlo helper_mftlo_mips +#define helper_mfthi helper_mfthi_mips +#define helper_mftacx helper_mftacx_mips +#define helper_mftdsp helper_mftdsp_mips +#define helper_mttgpr helper_mttgpr_mips +#define helper_mttlo helper_mttlo_mips +#define helper_mtthi helper_mtthi_mips +#define helper_mttacx helper_mttacx_mips +#define helper_mttdsp helper_mttdsp_mips +#define helper_dmt helper_dmt_mips +#define helper_emt helper_emt_mips +#define helper_dvpe helper_dvpe_mips +#define helper_evpe helper_evpe_mips +#define helper_dvp helper_dvp_mips +#define helper_evp helper_evp_mips #define cpu_mips_get_random cpu_mips_get_random_mips -#define cpu_mips_get_count cpu_mips_get_count_mips -#define cpu_mips_store_count cpu_mips_store_count_mips -#define cpu_mips_store_compare cpu_mips_store_compare_mips -#define cpu_mips_start_count cpu_mips_start_count_mips -#define cpu_mips_stop_count cpu_mips_stop_count_mips -#define mips_machine_init mips_machine_init_mips -#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mips -#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mips -#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mips -#define mips_cpu_register_types mips_cpu_register_types_mips #define cpu_mips_init cpu_mips_init_mips -#define cpu_state_reset cpu_state_reset_mips -#define helper_msa_andi_b helper_msa_andi_b_mips -#define helper_msa_ori_b helper_msa_ori_b_mips -#define helper_msa_nori_b helper_msa_nori_b_mips -#define helper_msa_xori_b helper_msa_xori_b_mips -#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips -#define helper_msa_bmzi_b helper_msa_bmzi_b_mips -#define helper_msa_bseli_b helper_msa_bseli_b_mips -#define helper_msa_shf_df helper_msa_shf_df_mips -#define helper_msa_and_v helper_msa_and_v_mips -#define helper_msa_or_v helper_msa_or_v_mips -#define helper_msa_nor_v helper_msa_nor_v_mips -#define helper_msa_xor_v helper_msa_xor_v_mips -#define helper_msa_bmnz_v helper_msa_bmnz_v_mips -#define helper_msa_bmz_v helper_msa_bmz_v_mips -#define helper_msa_bsel_v helper_msa_bsel_v_mips -#define helper_msa_addvi_df helper_msa_addvi_df_mips -#define helper_msa_subvi_df helper_msa_subvi_df_mips -#define helper_msa_ceqi_df helper_msa_ceqi_df_mips -#define helper_msa_clei_s_df helper_msa_clei_s_df_mips -#define helper_msa_clei_u_df helper_msa_clei_u_df_mips -#define helper_msa_clti_s_df helper_msa_clti_s_df_mips -#define helper_msa_clti_u_df helper_msa_clti_u_df_mips -#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips -#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips -#define helper_msa_mini_s_df helper_msa_mini_s_df_mips -#define helper_msa_mini_u_df helper_msa_mini_u_df_mips -#define helper_msa_ldi_df helper_msa_ldi_df_mips -#define helper_msa_slli_df helper_msa_slli_df_mips -#define helper_msa_srai_df helper_msa_srai_df_mips -#define helper_msa_srli_df helper_msa_srli_df_mips -#define helper_msa_bclri_df helper_msa_bclri_df_mips -#define helper_msa_bseti_df helper_msa_bseti_df_mips -#define helper_msa_bnegi_df helper_msa_bnegi_df_mips -#define helper_msa_sat_s_df helper_msa_sat_s_df_mips -#define helper_msa_sat_u_df helper_msa_sat_u_df_mips -#define helper_msa_srari_df helper_msa_srari_df_mips -#define helper_msa_srlri_df helper_msa_srlri_df_mips -#define helper_msa_binsli_df helper_msa_binsli_df_mips -#define helper_msa_binsri_df helper_msa_binsri_df_mips -#define helper_msa_sll_df helper_msa_sll_df_mips -#define helper_msa_sra_df helper_msa_sra_df_mips -#define helper_msa_srl_df helper_msa_srl_df_mips -#define helper_msa_bclr_df helper_msa_bclr_df_mips -#define helper_msa_bset_df helper_msa_bset_df_mips -#define helper_msa_bneg_df helper_msa_bneg_df_mips -#define helper_msa_addv_df helper_msa_addv_df_mips -#define helper_msa_subv_df helper_msa_subv_df_mips -#define helper_msa_max_s_df helper_msa_max_s_df_mips -#define helper_msa_max_u_df helper_msa_max_u_df_mips -#define helper_msa_min_s_df helper_msa_min_s_df_mips -#define helper_msa_min_u_df helper_msa_min_u_df_mips -#define helper_msa_max_a_df helper_msa_max_a_df_mips -#define helper_msa_min_a_df helper_msa_min_a_df_mips -#define helper_msa_ceq_df helper_msa_ceq_df_mips -#define helper_msa_clt_s_df helper_msa_clt_s_df_mips -#define helper_msa_clt_u_df helper_msa_clt_u_df_mips -#define helper_msa_cle_s_df helper_msa_cle_s_df_mips -#define helper_msa_cle_u_df helper_msa_cle_u_df_mips -#define helper_msa_add_a_df helper_msa_add_a_df_mips -#define helper_msa_adds_a_df helper_msa_adds_a_df_mips -#define helper_msa_adds_s_df helper_msa_adds_s_df_mips -#define helper_msa_adds_u_df helper_msa_adds_u_df_mips -#define helper_msa_ave_s_df helper_msa_ave_s_df_mips -#define helper_msa_ave_u_df helper_msa_ave_u_df_mips -#define helper_msa_aver_s_df helper_msa_aver_s_df_mips -#define helper_msa_aver_u_df helper_msa_aver_u_df_mips -#define helper_msa_subs_s_df helper_msa_subs_s_df_mips -#define helper_msa_subs_u_df helper_msa_subs_u_df_mips -#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips -#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips -#define helper_msa_asub_s_df helper_msa_asub_s_df_mips -#define helper_msa_asub_u_df helper_msa_asub_u_df_mips -#define helper_msa_mulv_df helper_msa_mulv_df_mips -#define helper_msa_div_s_df helper_msa_div_s_df_mips -#define helper_msa_div_u_df helper_msa_div_u_df_mips -#define helper_msa_mod_s_df helper_msa_mod_s_df_mips -#define helper_msa_mod_u_df helper_msa_mod_u_df_mips -#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips -#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips -#define helper_msa_srar_df helper_msa_srar_df_mips -#define helper_msa_srlr_df helper_msa_srlr_df_mips -#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mips -#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mips -#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mips -#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mips -#define helper_msa_mul_q_df helper_msa_mul_q_df_mips -#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips -#define helper_msa_sld_df helper_msa_sld_df_mips -#define helper_msa_maddv_df helper_msa_maddv_df_mips -#define helper_msa_msubv_df helper_msa_msubv_df_mips -#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips -#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips -#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips -#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips -#define helper_msa_binsl_df helper_msa_binsl_df_mips -#define helper_msa_binsr_df helper_msa_binsr_df_mips -#define helper_msa_madd_q_df helper_msa_madd_q_df_mips -#define helper_msa_msub_q_df helper_msa_msub_q_df_mips -#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips -#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips -#define helper_msa_splat_df helper_msa_splat_df_mips -#define helper_msa_pckev_df helper_msa_pckev_df_mips -#define helper_msa_pckod_df helper_msa_pckod_df_mips -#define helper_msa_ilvl_df helper_msa_ilvl_df_mips -#define helper_msa_ilvr_df helper_msa_ilvr_df_mips -#define helper_msa_ilvev_df helper_msa_ilvev_df_mips -#define helper_msa_ilvod_df helper_msa_ilvod_df_mips -#define helper_msa_vshf_df helper_msa_vshf_df_mips -#define helper_msa_sldi_df helper_msa_sldi_df_mips -#define helper_msa_splati_df helper_msa_splati_df_mips -#define helper_msa_copy_s_df helper_msa_copy_s_df_mips -#define helper_msa_copy_u_df helper_msa_copy_u_df_mips -#define helper_msa_insert_df helper_msa_insert_df_mips -#define helper_msa_insve_df helper_msa_insve_df_mips -#define helper_msa_ctcmsa helper_msa_ctcmsa_mips -#define helper_msa_cfcmsa helper_msa_cfcmsa_mips -#define helper_msa_move_v helper_msa_move_v_mips -#define helper_msa_fill_df helper_msa_fill_df_mips -#define helper_msa_nlzc_df helper_msa_nlzc_df_mips -#define helper_msa_nloc_df helper_msa_nloc_df_mips -#define helper_msa_pcnt_df helper_msa_pcnt_df_mips -#define helper_msa_fcaf_df helper_msa_fcaf_df_mips -#define helper_msa_fcun_df helper_msa_fcun_df_mips -#define helper_msa_fceq_df helper_msa_fceq_df_mips -#define helper_msa_fcueq_df helper_msa_fcueq_df_mips -#define helper_msa_fclt_df helper_msa_fclt_df_mips -#define helper_msa_fcult_df helper_msa_fcult_df_mips -#define helper_msa_fcle_df helper_msa_fcle_df_mips -#define helper_msa_fcule_df helper_msa_fcule_df_mips -#define helper_msa_fsaf_df helper_msa_fsaf_df_mips -#define helper_msa_fsun_df helper_msa_fsun_df_mips -#define helper_msa_fseq_df helper_msa_fseq_df_mips -#define helper_msa_fsueq_df helper_msa_fsueq_df_mips -#define helper_msa_fslt_df helper_msa_fslt_df_mips -#define helper_msa_fsult_df helper_msa_fsult_df_mips -#define helper_msa_fsle_df helper_msa_fsle_df_mips -#define helper_msa_fsule_df helper_msa_fsule_df_mips -#define helper_msa_fcor_df helper_msa_fcor_df_mips -#define helper_msa_fcune_df helper_msa_fcune_df_mips -#define helper_msa_fcne_df helper_msa_fcne_df_mips -#define helper_msa_fsor_df helper_msa_fsor_df_mips -#define helper_msa_fsune_df helper_msa_fsune_df_mips -#define helper_msa_fsne_df helper_msa_fsne_df_mips -#define helper_msa_fadd_df helper_msa_fadd_df_mips -#define helper_msa_fsub_df helper_msa_fsub_df_mips -#define helper_msa_fmul_df helper_msa_fmul_df_mips -#define helper_msa_fdiv_df helper_msa_fdiv_df_mips -#define helper_msa_fmadd_df helper_msa_fmadd_df_mips -#define helper_msa_fmsub_df helper_msa_fmsub_df_mips -#define helper_msa_fexp2_df helper_msa_fexp2_df_mips -#define helper_msa_fexdo_df helper_msa_fexdo_df_mips -#define helper_msa_ftq_df helper_msa_ftq_df_mips -#define helper_msa_fmin_df helper_msa_fmin_df_mips -#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips -#define helper_msa_fmax_df helper_msa_fmax_df_mips -#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips -#define helper_msa_fclass_df helper_msa_fclass_df_mips -#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips -#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips -#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips -#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips -#define helper_msa_frcp_df helper_msa_frcp_df_mips -#define helper_msa_frint_df helper_msa_frint_df_mips -#define helper_msa_flog2_df helper_msa_flog2_df_mips -#define helper_msa_fexupl_df helper_msa_fexupl_df_mips -#define helper_msa_fexupr_df helper_msa_fexupr_df_mips -#define helper_msa_ffql_df helper_msa_ffql_df_mips -#define helper_msa_ffqr_df helper_msa_ffqr_df_mips -#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips -#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips -#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips -#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips -#define helper_paddsb helper_paddsb_mips -#define helper_paddusb helper_paddusb_mips -#define helper_paddsh helper_paddsh_mips -#define helper_paddush helper_paddush_mips -#define helper_paddb helper_paddb_mips -#define helper_paddh helper_paddh_mips -#define helper_paddw helper_paddw_mips -#define helper_psubsb helper_psubsb_mips -#define helper_psubusb helper_psubusb_mips -#define helper_psubsh helper_psubsh_mips -#define helper_psubush helper_psubush_mips -#define helper_psubb helper_psubb_mips -#define helper_psubh helper_psubh_mips -#define helper_psubw helper_psubw_mips -#define helper_pshufh helper_pshufh_mips -#define helper_packsswh helper_packsswh_mips -#define helper_packsshb helper_packsshb_mips -#define helper_packushb helper_packushb_mips -#define helper_punpcklwd helper_punpcklwd_mips -#define helper_punpckhwd helper_punpckhwd_mips -#define helper_punpcklhw helper_punpcklhw_mips -#define helper_punpckhhw helper_punpckhhw_mips -#define helper_punpcklbh helper_punpcklbh_mips -#define helper_punpckhbh helper_punpckhbh_mips -#define helper_pavgh helper_pavgh_mips -#define helper_pavgb helper_pavgb_mips -#define helper_pmaxsh helper_pmaxsh_mips -#define helper_pminsh helper_pminsh_mips -#define helper_pmaxub helper_pmaxub_mips -#define helper_pminub helper_pminub_mips -#define helper_pcmpeqw helper_pcmpeqw_mips -#define helper_pcmpgtw helper_pcmpgtw_mips -#define helper_pcmpeqh helper_pcmpeqh_mips -#define helper_pcmpgth helper_pcmpgth_mips -#define helper_pcmpeqb helper_pcmpeqb_mips -#define helper_pcmpgtb helper_pcmpgtb_mips -#define helper_psllw helper_psllw_mips -#define helper_psrlw helper_psrlw_mips -#define helper_psraw helper_psraw_mips -#define helper_psllh helper_psllh_mips -#define helper_psrlh helper_psrlh_mips -#define helper_psrah helper_psrah_mips -#define helper_pmullh helper_pmullh_mips -#define helper_pmulhh helper_pmulhh_mips -#define helper_pmulhuh helper_pmulhuh_mips -#define helper_pmaddhw helper_pmaddhw_mips -#define helper_pasubub helper_pasubub_mips -#define helper_biadd helper_biadd_mips -#define helper_pmovmskb helper_pmovmskb_mips #define helper_absq_s_ph helper_absq_s_ph_mips #define helper_absq_s_qb helper_absq_s_qb_mips #define helper_absq_s_w helper_absq_s_w_mips +#define helper_absq_s_ob helper_absq_s_ob_mips +#define helper_absq_s_qh helper_absq_s_qh_mips +#define helper_absq_s_pw helper_absq_s_pw_mips #define helper_addqh_ph helper_addqh_ph_mips #define helper_addqh_r_ph helper_addqh_r_ph_mips #define helper_addqh_r_w helper_addqh_r_w_mips @@ -3279,35 +1450,89 @@ #define helper_subu_qb helper_subu_qb_mips #define helper_subu_s_ph helper_subu_s_ph_mips #define helper_subu_s_qb helper_subu_s_qb_mips +#define helper_adduh_ob helper_adduh_ob_mips +#define helper_adduh_r_ob helper_adduh_r_ob_mips +#define helper_subuh_ob helper_subuh_ob_mips +#define helper_subuh_r_ob helper_subuh_r_ob_mips +#define helper_addq_pw helper_addq_pw_mips +#define helper_addq_qh helper_addq_qh_mips +#define helper_addq_s_pw helper_addq_s_pw_mips +#define helper_addq_s_qh helper_addq_s_qh_mips +#define helper_addu_ob helper_addu_ob_mips +#define helper_addu_qh helper_addu_qh_mips +#define helper_addu_s_ob helper_addu_s_ob_mips +#define helper_addu_s_qh helper_addu_s_qh_mips +#define helper_subq_pw helper_subq_pw_mips +#define helper_subq_qh helper_subq_qh_mips +#define helper_subq_s_pw helper_subq_s_pw_mips +#define helper_subq_s_qh helper_subq_s_qh_mips +#define helper_subu_ob helper_subu_ob_mips +#define helper_subu_qh helper_subu_qh_mips +#define helper_subu_s_ob helper_subu_s_ob_mips +#define helper_subu_s_qh helper_subu_s_qh_mips #define helper_subuh_qb helper_subuh_qb_mips #define helper_subuh_r_qb helper_subuh_r_qb_mips #define helper_addsc helper_addsc_mips #define helper_addwc helper_addwc_mips #define helper_modsub helper_modsub_mips #define helper_raddu_w_qb helper_raddu_w_qb_mips +#define helper_raddu_l_ob helper_raddu_l_ob_mips #define helper_precr_qb_ph helper_precr_qb_ph_mips #define helper_precrq_qb_ph helper_precrq_qb_ph_mips #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips #define helper_precrq_ph_w helper_precrq_ph_w_mips #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips +#define helper_precr_ob_qh helper_precr_ob_qh_mips +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips +#define helper_precrq_ob_qh helper_precrq_ob_qh_mips +#define helper_precrq_qh_pw helper_precrq_qh_pw_mips +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips +#define helper_precrq_pw_l helper_precrq_pw_l_mips #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips +#define helper_precequ_qh_obl helper_precequ_qh_obl_mips +#define helper_precequ_qh_obr helper_precequ_qh_obr_mips +#define helper_precequ_qh_obla helper_precequ_qh_obla_mips +#define helper_precequ_qh_obra helper_precequ_qh_obra_mips #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips +#define helper_preceu_qh_obl helper_preceu_qh_obl_mips +#define helper_preceu_qh_obr helper_preceu_qh_obr_mips +#define helper_preceu_qh_obla helper_preceu_qh_obla_mips +#define helper_preceu_qh_obra helper_preceu_qh_obra_mips #define helper_shll_qb helper_shll_qb_mips #define helper_shrl_qb helper_shrl_qb_mips #define helper_shra_qb helper_shra_qb_mips #define helper_shra_r_qb helper_shra_r_qb_mips +#define helper_shll_ob helper_shll_ob_mips +#define helper_shrl_ob helper_shrl_ob_mips +#define helper_shra_ob helper_shra_ob_mips +#define helper_shra_r_ob helper_shra_r_ob_mips #define helper_shll_ph helper_shll_ph_mips #define helper_shll_s_ph helper_shll_s_ph_mips +#define helper_shll_qh helper_shll_qh_mips +#define helper_shll_s_qh helper_shll_s_qh_mips +#define helper_shrl_qh helper_shrl_qh_mips +#define helper_shra_qh helper_shra_qh_mips +#define helper_shra_r_qh helper_shra_r_qh_mips #define helper_shll_s_w helper_shll_s_w_mips #define helper_shra_r_w helper_shra_r_w_mips +#define helper_shll_pw helper_shll_pw_mips +#define helper_shll_s_pw helper_shll_s_pw_mips +#define helper_shra_pw helper_shra_pw_mips +#define helper_shra_r_pw helper_shra_r_pw_mips #define helper_shrl_ph helper_shrl_ph_mips #define helper_shra_ph helper_shra_ph_mips #define helper_shra_r_ph helper_shra_r_ph_mips @@ -3321,10 +1546,20 @@ #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips #define helper_mulsa_w_ph helper_mulsa_w_ph_mips +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips +#define helper_mulq_rs_qh helper_mulq_rs_qh_mips +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips #define helper_dpau_h_qbl helper_dpau_h_qbl_mips #define helper_dpau_h_qbr helper_dpau_h_qbr_mips #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips +#define helper_dpau_h_obl helper_dpau_h_obl_mips +#define helper_dpau_h_obr helper_dpau_h_obr_mips +#define helper_dpsu_h_obl helper_dpsu_h_obl_mips +#define helper_dpsu_h_obr helper_dpsu_h_obr_mips #define helper_dpa_w_ph helper_dpa_w_ph_mips #define helper_dpax_w_ph helper_dpax_w_ph_mips #define helper_dps_w_ph helper_dps_w_ph_mips @@ -3335,200 +1570,92 @@ #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips +#define helper_dpa_w_qh helper_dpa_w_qh_mips +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips +#define helper_dps_w_qh helper_dps_w_qh_mips +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips #define helper_maq_s_w_phl helper_maq_s_w_phl_mips #define helper_maq_s_w_phr helper_maq_s_w_phr_mips #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips #define helper_mulq_s_w helper_mulq_s_w_mips #define helper_mulq_rs_w helper_mulq_rs_w_mips +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips +#define helper_dmadd helper_dmadd_mips +#define helper_dmaddu helper_dmaddu_mips +#define helper_dmsub helper_dmsub_mips +#define helper_dmsubu helper_dmsubu_mips #define helper_bitrev helper_bitrev_mips #define helper_insv helper_insv_mips +#define helper_dinsv helper_dinsv_mips #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips #define helper_cmpu_le_qb helper_cmpu_le_qb_mips #define helper_cmp_eq_ph helper_cmp_eq_ph_mips #define helper_cmp_lt_ph helper_cmp_lt_ph_mips #define helper_cmp_le_ph helper_cmp_le_ph_mips +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips +#define helper_cmpu_le_ob helper_cmpu_le_ob_mips +#define helper_cmp_eq_qh helper_cmp_eq_qh_mips +#define helper_cmp_lt_qh helper_cmp_lt_qh_mips +#define helper_cmp_le_qh helper_cmp_le_qh_mips +#define helper_cmp_eq_pw helper_cmp_eq_pw_mips +#define helper_cmp_lt_pw helper_cmp_lt_pw_mips +#define helper_cmp_le_pw helper_cmp_le_pw_mips +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips #define helper_pick_qb helper_pick_qb_mips #define helper_pick_ph helper_pick_ph_mips +#define helper_pick_ob helper_pick_ob_mips +#define helper_pick_qh helper_pick_qh_mips +#define helper_pick_pw helper_pick_pw_mips #define helper_packrl_ph helper_packrl_ph_mips +#define helper_packrl_pw helper_packrl_pw_mips #define helper_extr_w helper_extr_w_mips #define helper_extr_r_w helper_extr_r_w_mips #define helper_extr_rs_w helper_extr_rs_w_mips +#define helper_dextr_w helper_dextr_w_mips +#define helper_dextr_r_w helper_dextr_r_w_mips +#define helper_dextr_rs_w helper_dextr_rs_w_mips +#define helper_dextr_l helper_dextr_l_mips +#define helper_dextr_r_l helper_dextr_r_l_mips +#define helper_dextr_rs_l helper_dextr_rs_l_mips #define helper_extr_s_h helper_extr_s_h_mips +#define helper_dextr_s_h helper_dextr_s_h_mips #define helper_extp helper_extp_mips #define helper_extpdp helper_extpdp_mips +#define helper_dextp helper_dextp_mips +#define helper_dextpdp helper_dextpdp_mips #define helper_shilo helper_shilo_mips +#define helper_dshilo helper_dshilo_mips #define helper_mthlip helper_mthlip_mips +#define helper_dmthlip helper_dmthlip_mips #define cpu_wrdsp cpu_wrdsp_mips #define helper_wrdsp helper_wrdsp_mips #define cpu_rddsp cpu_rddsp_mips #define helper_rddsp helper_rddsp_mips -#define helper_raise_exception_err helper_raise_exception_err_mips -#define helper_clo helper_clo_mips -#define helper_clz helper_clz_mips -#define helper_muls helper_muls_mips -#define helper_mulsu helper_mulsu_mips -#define helper_macc helper_macc_mips -#define helper_macchi helper_macchi_mips -#define helper_maccu helper_maccu_mips -#define helper_macchiu helper_macchiu_mips -#define helper_msac helper_msac_mips -#define helper_msachi helper_msachi_mips -#define helper_msacu helper_msacu_mips -#define helper_msachiu helper_msachiu_mips -#define helper_mulhi helper_mulhi_mips -#define helper_mulhiu helper_mulhiu_mips -#define helper_mulshi helper_mulshi_mips -#define helper_mulshiu helper_mulshiu_mips -#define helper_bitswap helper_bitswap_mips -#define helper_ll helper_ll_mips -#define helper_sc helper_sc_mips -#define helper_swl helper_swl_mips -#define helper_swr helper_swr_mips -#define helper_lwm helper_lwm_mips -#define helper_swm helper_swm_mips -#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips -#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips -#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips -#define helper_mfc0_random helper_mfc0_random_mips -#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips -#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips -#define helper_mfc0_tcbind helper_mfc0_tcbind_mips -#define helper_mftc0_tcbind helper_mftc0_tcbind_mips -#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips -#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips -#define helper_mfc0_tchalt helper_mfc0_tchalt_mips -#define helper_mftc0_tchalt helper_mftc0_tchalt_mips -#define helper_mfc0_tccontext helper_mfc0_tccontext_mips -#define helper_mftc0_tccontext helper_mftc0_tccontext_mips -#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips -#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips -#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips -#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips -#define helper_mfc0_count helper_mfc0_count_mips -#define helper_mftc0_entryhi helper_mftc0_entryhi_mips -#define helper_mftc0_cause helper_mftc0_cause_mips -#define helper_mftc0_status helper_mftc0_status_mips -#define helper_mfc0_lladdr helper_mfc0_lladdr_mips -#define helper_mfc0_watchlo helper_mfc0_watchlo_mips -#define helper_mfc0_watchhi helper_mfc0_watchhi_mips -#define helper_mfc0_debug helper_mfc0_debug_mips -#define helper_mftc0_debug helper_mftc0_debug_mips -#define helper_mtc0_index helper_mtc0_index_mips -#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips -#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips -#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips -#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips -#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips -#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips -#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips -#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips -#define helper_mtc0_yqmask helper_mtc0_yqmask_mips -#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips -#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips -#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips -#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips -#define helper_mtc0_tcbind helper_mtc0_tcbind_mips -#define helper_mttc0_tcbind helper_mttc0_tcbind_mips -#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips -#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips -#define helper_mtc0_tchalt helper_mtc0_tchalt_mips -#define helper_mttc0_tchalt helper_mttc0_tchalt_mips -#define helper_mtc0_tccontext helper_mtc0_tccontext_mips -#define helper_mttc0_tccontext helper_mttc0_tccontext_mips -#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips -#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips -#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips -#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips -#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips -#define helper_mtc0_context helper_mtc0_context_mips -#define helper_mtc0_pagemask helper_mtc0_pagemask_mips -#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips -#define helper_mtc0_wired helper_mtc0_wired_mips -#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips -#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips -#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips -#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips -#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips -#define helper_mtc0_hwrena helper_mtc0_hwrena_mips -#define helper_mtc0_count helper_mtc0_count_mips -#define helper_mtc0_entryhi helper_mtc0_entryhi_mips -#define helper_mttc0_entryhi helper_mttc0_entryhi_mips -#define helper_mtc0_compare helper_mtc0_compare_mips -#define helper_mtc0_status helper_mtc0_status_mips -#define helper_mttc0_status helper_mttc0_status_mips -#define helper_mtc0_intctl helper_mtc0_intctl_mips -#define helper_mtc0_srsctl helper_mtc0_srsctl_mips -#define helper_mtc0_cause helper_mtc0_cause_mips -#define helper_mttc0_cause helper_mttc0_cause_mips -#define helper_mftc0_epc helper_mftc0_epc_mips -#define helper_mftc0_ebase helper_mftc0_ebase_mips -#define helper_mtc0_ebase helper_mtc0_ebase_mips -#define helper_mttc0_ebase helper_mttc0_ebase_mips -#define helper_mftc0_configx helper_mftc0_configx_mips -#define helper_mtc0_config0 helper_mtc0_config0_mips -#define helper_mtc0_config2 helper_mtc0_config2_mips -#define helper_mtc0_config4 helper_mtc0_config4_mips -#define helper_mtc0_config5 helper_mtc0_config5_mips -#define helper_mtc0_lladdr helper_mtc0_lladdr_mips -#define helper_mtc0_watchlo helper_mtc0_watchlo_mips -#define helper_mtc0_watchhi helper_mtc0_watchhi_mips -#define helper_mtc0_xcontext helper_mtc0_xcontext_mips -#define helper_mtc0_framemask helper_mtc0_framemask_mips -#define helper_mtc0_debug helper_mtc0_debug_mips -#define helper_mttc0_debug helper_mttc0_debug_mips -#define helper_mtc0_performance0 helper_mtc0_performance0_mips -#define helper_mtc0_taglo helper_mtc0_taglo_mips -#define helper_mtc0_datalo helper_mtc0_datalo_mips -#define helper_mtc0_taghi helper_mtc0_taghi_mips -#define helper_mtc0_datahi helper_mtc0_datahi_mips -#define helper_mftgpr helper_mftgpr_mips -#define helper_mftlo helper_mftlo_mips -#define helper_mfthi helper_mfthi_mips -#define helper_mftacx helper_mftacx_mips -#define helper_mftdsp helper_mftdsp_mips -#define helper_mttgpr helper_mttgpr_mips -#define helper_mttlo helper_mttlo_mips -#define helper_mtthi helper_mtthi_mips -#define helper_mttacx helper_mttacx_mips -#define helper_mttdsp helper_mttdsp_mips -#define helper_dmt helper_dmt_mips -#define helper_emt helper_emt_mips -#define helper_dvpe helper_dvpe_mips -#define helper_evpe helper_evpe_mips -#define helper_fork helper_fork_mips -#define helper_yield helper_yield_mips -#define r4k_helper_tlbinv r4k_helper_tlbinv_mips -#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips -#define r4k_helper_tlbwi r4k_helper_tlbwi_mips -#define r4k_helper_tlbwr r4k_helper_tlbwr_mips -#define r4k_helper_tlbp r4k_helper_tlbp_mips -#define r4k_helper_tlbr r4k_helper_tlbr_mips -#define helper_tlbwi helper_tlbwi_mips -#define helper_tlbwr helper_tlbwr_mips -#define helper_tlbp helper_tlbp_mips -#define helper_tlbr helper_tlbr_mips -#define helper_tlbinv helper_tlbinv_mips -#define helper_tlbinvf helper_tlbinvf_mips -#define helper_di helper_di_mips -#define helper_ei helper_ei_mips -#define helper_eret helper_eret_mips -#define helper_deret helper_deret_mips -#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips -#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips -#define helper_rdhwr_cc helper_rdhwr_cc_mips -#define helper_rdhwr_ccres helper_rdhwr_ccres_mips -#define helper_pmon helper_pmon_mips -#define helper_wait helper_wait_mips -#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips -#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mips -#define ieee_rm ieee_rm_mips #define helper_cfc1 helper_cfc1_mips #define helper_ctc1 helper_ctc1_mips #define ieee_ex_to_mips ieee_ex_to_mips_mips @@ -3537,8 +1664,8 @@ #define helper_float_cvtd_s helper_float_cvtd_s_mips #define helper_float_cvtd_w helper_float_cvtd_w_mips #define helper_float_cvtd_l helper_float_cvtd_l_mips -#define helper_float_cvtl_d helper_float_cvtl_d_mips -#define helper_float_cvtl_s helper_float_cvtl_s_mips +#define helper_float_cvt_l_d helper_float_cvt_l_d_mips +#define helper_float_cvt_l_s helper_float_cvt_l_s_mips #define helper_float_cvtps_pw helper_float_cvtps_pw_mips #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips #define helper_float_cvts_d helper_float_cvts_d_mips @@ -3546,46 +1673,50 @@ #define helper_float_cvts_l helper_float_cvts_l_mips #define helper_float_cvts_pl helper_float_cvts_pl_mips #define helper_float_cvts_pu helper_float_cvts_pu_mips -#define helper_float_cvtw_s helper_float_cvtw_s_mips -#define helper_float_cvtw_d helper_float_cvtw_d_mips -#define helper_float_roundl_d helper_float_roundl_d_mips -#define helper_float_roundl_s helper_float_roundl_s_mips -#define helper_float_roundw_d helper_float_roundw_d_mips -#define helper_float_roundw_s helper_float_roundw_s_mips -#define helper_float_truncl_d helper_float_truncl_d_mips -#define helper_float_truncl_s helper_float_truncl_s_mips -#define helper_float_truncw_d helper_float_truncw_d_mips -#define helper_float_truncw_s helper_float_truncw_s_mips -#define helper_float_ceill_d helper_float_ceill_d_mips -#define helper_float_ceill_s helper_float_ceill_s_mips -#define helper_float_ceilw_d helper_float_ceilw_d_mips -#define helper_float_ceilw_s helper_float_ceilw_s_mips -#define helper_float_floorl_d helper_float_floorl_d_mips -#define helper_float_floorl_s helper_float_floorl_s_mips -#define helper_float_floorw_d helper_float_floorw_d_mips -#define helper_float_floorw_s helper_float_floorw_s_mips +#define helper_float_cvt_w_s helper_float_cvt_w_s_mips +#define helper_float_cvt_w_d helper_float_cvt_w_d_mips +#define helper_float_round_l_d helper_float_round_l_d_mips +#define helper_float_round_l_s helper_float_round_l_s_mips +#define helper_float_round_w_d helper_float_round_w_d_mips +#define helper_float_round_w_s helper_float_round_w_s_mips +#define helper_float_trunc_l_d helper_float_trunc_l_d_mips +#define helper_float_trunc_l_s helper_float_trunc_l_s_mips +#define helper_float_trunc_w_d helper_float_trunc_w_d_mips +#define helper_float_trunc_w_s helper_float_trunc_w_s_mips +#define helper_float_ceil_l_d helper_float_ceil_l_d_mips +#define helper_float_ceil_l_s helper_float_ceil_l_s_mips +#define helper_float_ceil_w_d helper_float_ceil_w_d_mips +#define helper_float_ceil_w_s helper_float_ceil_w_s_mips +#define helper_float_floor_l_d helper_float_floor_l_d_mips +#define helper_float_floor_l_s helper_float_floor_l_s_mips +#define helper_float_floor_w_d helper_float_floor_w_d_mips +#define helper_float_floor_w_s helper_float_floor_w_s_mips +#define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mips +#define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mips +#define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mips +#define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mips +#define helper_float_round_2008_l_d helper_float_round_2008_l_d_mips +#define helper_float_round_2008_l_s helper_float_round_2008_l_s_mips +#define helper_float_round_2008_w_d helper_float_round_2008_w_d_mips +#define helper_float_round_2008_w_s helper_float_round_2008_w_s_mips +#define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mips +#define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mips +#define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mips +#define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mips +#define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mips +#define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mips +#define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mips +#define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mips +#define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mips +#define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mips +#define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mips +#define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mips #define helper_float_abs_d helper_float_abs_d_mips #define helper_float_abs_s helper_float_abs_s_mips #define helper_float_abs_ps helper_float_abs_ps_mips #define helper_float_chs_d helper_float_chs_d_mips #define helper_float_chs_s helper_float_chs_s_mips #define helper_float_chs_ps helper_float_chs_ps_mips -#define helper_float_maddf_s helper_float_maddf_s_mips -#define helper_float_maddf_d helper_float_maddf_d_mips -#define helper_float_msubf_s helper_float_msubf_s_mips -#define helper_float_msubf_d helper_float_msubf_d_mips -#define helper_float_max_s helper_float_max_s_mips -#define helper_float_max_d helper_float_max_d_mips -#define helper_float_maxa_s helper_float_maxa_s_mips -#define helper_float_maxa_d helper_float_maxa_d_mips -#define helper_float_min_s helper_float_min_s_mips -#define helper_float_min_d helper_float_min_d_mips -#define helper_float_mina_s helper_float_mina_s_mips -#define helper_float_mina_d helper_float_mina_d_mips -#define helper_float_rint_s helper_float_rint_s_mips -#define helper_float_rint_d helper_float_rint_d_mips -#define helper_float_class_s helper_float_class_s_mips -#define helper_float_class_d helper_float_class_d_mips #define helper_float_recip_d helper_float_recip_d_mips #define helper_float_recip_s helper_float_recip_s_mips #define helper_float_rsqrt_d helper_float_rsqrt_d_mips @@ -3596,6 +1727,12 @@ #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips +#define helper_float_rint_s helper_float_rint_s_mips +#define helper_float_rint_d helper_float_rint_d_mips +#define float_class_s float_class_s_mips +#define helper_float_class_s helper_float_class_s_mips +#define float_class_d float_class_d_mips +#define helper_float_class_d helper_float_class_d_mips #define helper_float_add_d helper_float_add_d_mips #define helper_float_add_s helper_float_add_s_mips #define helper_float_add_ps helper_float_add_ps_mips @@ -3608,6 +1745,22 @@ #define helper_float_div_d helper_float_div_d_mips #define helper_float_div_s helper_float_div_s_mips #define helper_float_div_ps helper_float_div_ps_mips +#define helper_float_recip2_d helper_float_recip2_d_mips +#define helper_float_recip2_s helper_float_recip2_s_mips +#define helper_float_recip2_ps helper_float_recip2_ps_mips +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips +#define helper_float_addr_ps helper_float_addr_ps_mips +#define helper_float_mulr_ps helper_float_mulr_ps_mips +#define helper_float_max_s helper_float_max_s_mips +#define helper_float_max_d helper_float_max_d_mips +#define helper_float_maxa_s helper_float_maxa_s_mips +#define helper_float_maxa_d helper_float_maxa_d_mips +#define helper_float_min_s helper_float_min_s_mips +#define helper_float_min_d helper_float_min_d_mips +#define helper_float_mina_s helper_float_mina_s_mips +#define helper_float_mina_d helper_float_mina_d_mips #define helper_float_madd_d helper_float_madd_d_mips #define helper_float_madd_s helper_float_madd_s_mips #define helper_float_madd_ps helper_float_madd_ps_mips @@ -3620,14 +1773,10 @@ #define helper_float_nmsub_d helper_float_nmsub_d_mips #define helper_float_nmsub_s helper_float_nmsub_s_mips #define helper_float_nmsub_ps helper_float_nmsub_ps_mips -#define helper_float_recip2_d helper_float_recip2_d_mips -#define helper_float_recip2_s helper_float_recip2_s_mips -#define helper_float_recip2_ps helper_float_recip2_ps_mips -#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips -#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips -#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips -#define helper_float_addr_ps helper_float_addr_ps_mips -#define helper_float_mulr_ps helper_float_mulr_ps_mips +#define helper_float_maddf_s helper_float_maddf_s_mips +#define helper_float_maddf_d helper_float_maddf_d_mips +#define helper_float_msubf_s helper_float_msubf_s_mips +#define helper_float_msubf_d helper_float_msubf_d_mips #define helper_cmp_d_f helper_cmp_d_f_mips #define helper_cmpabs_d_f helper_cmpabs_d_f_mips #define helper_cmp_d_un helper_cmp_d_un_mips @@ -3768,161 +1917,475 @@ #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips -#define helper_msa_ld_df helper_msa_ld_df_mips -#define helper_msa_st_df helper_msa_st_df_mips #define no_mmu_map_address no_mmu_map_address_mips #define fixed_mmu_map_address fixed_mmu_map_address_mips #define r4k_map_address r4k_map_address_mips +#define cpu_mips_tlb_flush cpu_mips_tlb_flush_mips +#define sync_c0_status sync_c0_status_mips +#define cpu_mips_store_status cpu_mips_store_status_mips +#define cpu_mips_store_cause cpu_mips_store_cause_mips #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips -#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mips +#define mips_cpu_tlb_fill mips_cpu_tlb_fill_mips #define cpu_mips_translate_address cpu_mips_translate_address_mips #define exception_resume_pc exception_resume_pc_mips #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips #define r4k_invalidate_tlb r4k_invalidate_tlb_mips -#define helper_absq_s_ob helper_absq_s_ob_mips -#define helper_absq_s_qh helper_absq_s_qh_mips -#define helper_absq_s_pw helper_absq_s_pw_mips -#define helper_adduh_ob helper_adduh_ob_mips -#define helper_adduh_r_ob helper_adduh_r_ob_mips -#define helper_subuh_ob helper_subuh_ob_mips -#define helper_subuh_r_ob helper_subuh_r_ob_mips -#define helper_addq_pw helper_addq_pw_mips -#define helper_addq_qh helper_addq_qh_mips -#define helper_addq_s_pw helper_addq_s_pw_mips -#define helper_addq_s_qh helper_addq_s_qh_mips -#define helper_addu_ob helper_addu_ob_mips -#define helper_addu_qh helper_addu_qh_mips -#define helper_addu_s_ob helper_addu_s_ob_mips -#define helper_addu_s_qh helper_addu_s_qh_mips -#define helper_subq_pw helper_subq_pw_mips -#define helper_subq_qh helper_subq_qh_mips -#define helper_subq_s_pw helper_subq_s_pw_mips -#define helper_subq_s_qh helper_subq_s_qh_mips -#define helper_subu_ob helper_subu_ob_mips -#define helper_subu_qh helper_subu_qh_mips -#define helper_subu_s_ob helper_subu_s_ob_mips -#define helper_subu_s_qh helper_subu_s_qh_mips -#define helper_raddu_l_ob helper_raddu_l_ob_mips -#define helper_precr_ob_qh helper_precr_ob_qh_mips -#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips -#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips -#define helper_precrq_ob_qh helper_precrq_ob_qh_mips -#define helper_precrq_qh_pw helper_precrq_qh_pw_mips -#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips -#define helper_precrq_pw_l helper_precrq_pw_l_mips -#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips -#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips -#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips -#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips -#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips -#define helper_precequ_qh_obl helper_precequ_qh_obl_mips -#define helper_precequ_qh_obr helper_precequ_qh_obr_mips -#define helper_precequ_qh_obla helper_precequ_qh_obla_mips -#define helper_precequ_qh_obra helper_precequ_qh_obra_mips -#define helper_preceu_qh_obl helper_preceu_qh_obl_mips -#define helper_preceu_qh_obr helper_preceu_qh_obr_mips -#define helper_preceu_qh_obla helper_preceu_qh_obla_mips -#define helper_preceu_qh_obra helper_preceu_qh_obra_mips -#define helper_shll_ob helper_shll_ob_mips -#define helper_shrl_ob helper_shrl_ob_mips -#define helper_shra_ob helper_shra_ob_mips -#define helper_shra_r_ob helper_shra_r_ob_mips -#define helper_shll_qh helper_shll_qh_mips -#define helper_shll_s_qh helper_shll_s_qh_mips -#define helper_shrl_qh helper_shrl_qh_mips -#define helper_shra_qh helper_shra_qh_mips -#define helper_shra_r_qh helper_shra_r_qh_mips -#define helper_shll_pw helper_shll_pw_mips -#define helper_shll_s_pw helper_shll_s_pw_mips -#define helper_shra_pw helper_shra_pw_mips -#define helper_shra_r_pw helper_shra_r_pw_mips -#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips -#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips -#define helper_mulq_rs_qh helper_mulq_rs_qh_mips -#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips -#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips -#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips -#define helper_dpau_h_obl helper_dpau_h_obl_mips -#define helper_dpau_h_obr helper_dpau_h_obr_mips -#define helper_dpsu_h_obl helper_dpsu_h_obl_mips -#define helper_dpsu_h_obr helper_dpsu_h_obr_mips -#define helper_dpa_w_qh helper_dpa_w_qh_mips -#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips -#define helper_dps_w_qh helper_dps_w_qh_mips -#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips -#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips -#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips -#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips -#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips -#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips -#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips -#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips -#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips -#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips -#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips -#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips -#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips -#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips -#define helper_dmadd helper_dmadd_mips -#define helper_dmaddu helper_dmaddu_mips -#define helper_dmsub helper_dmsub_mips -#define helper_dmsubu helper_dmsubu_mips -#define helper_dinsv helper_dinsv_mips -#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips -#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips -#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips -#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips -#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips -#define helper_cmpu_le_ob helper_cmpu_le_ob_mips -#define helper_cmp_eq_qh helper_cmp_eq_qh_mips -#define helper_cmp_lt_qh helper_cmp_lt_qh_mips -#define helper_cmp_le_qh helper_cmp_le_qh_mips -#define helper_cmp_eq_pw helper_cmp_eq_pw_mips -#define helper_cmp_lt_pw helper_cmp_lt_pw_mips -#define helper_cmp_le_pw helper_cmp_le_pw_mips -#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips -#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips -#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips -#define helper_pick_ob helper_pick_ob_mips -#define helper_pick_qh helper_pick_qh_mips -#define helper_pick_pw helper_pick_pw_mips -#define helper_packrl_pw helper_packrl_pw_mips -#define helper_dextr_w helper_dextr_w_mips -#define helper_dextr_r_w helper_dextr_r_w_mips -#define helper_dextr_rs_w helper_dextr_rs_w_mips -#define helper_dextr_l helper_dextr_l_mips -#define helper_dextr_r_l helper_dextr_r_l_mips -#define helper_dextr_rs_l helper_dextr_rs_l_mips -#define helper_dextr_s_h helper_dextr_s_h_mips -#define helper_dextp helper_dextp_mips -#define helper_dextpdp helper_dextpdp_mips -#define helper_dshilo helper_dshilo_mips -#define helper_dmthlip helper_dmthlip_mips -#define helper_dclo helper_dclo_mips -#define helper_dclz helper_dclz_mips +#define do_raise_exception_err do_raise_exception_err_mips +#define helper_paddsb helper_paddsb_mips +#define helper_paddusb helper_paddusb_mips +#define helper_paddsh helper_paddsh_mips +#define helper_paddush helper_paddush_mips +#define helper_paddb helper_paddb_mips +#define helper_paddh helper_paddh_mips +#define helper_paddw helper_paddw_mips +#define helper_psubsb helper_psubsb_mips +#define helper_psubusb helper_psubusb_mips +#define helper_psubsh helper_psubsh_mips +#define helper_psubush helper_psubush_mips +#define helper_psubb helper_psubb_mips +#define helper_psubh helper_psubh_mips +#define helper_psubw helper_psubw_mips +#define helper_pshufh helper_pshufh_mips +#define helper_packsswh helper_packsswh_mips +#define helper_packsshb helper_packsshb_mips +#define helper_packushb helper_packushb_mips +#define helper_punpcklwd helper_punpcklwd_mips +#define helper_punpckhwd helper_punpckhwd_mips +#define helper_punpcklhw helper_punpcklhw_mips +#define helper_punpckhhw helper_punpckhhw_mips +#define helper_punpcklbh helper_punpcklbh_mips +#define helper_punpckhbh helper_punpckhbh_mips +#define helper_pavgh helper_pavgh_mips +#define helper_pavgb helper_pavgb_mips +#define helper_pmaxsh helper_pmaxsh_mips +#define helper_pminsh helper_pminsh_mips +#define helper_pmaxub helper_pmaxub_mips +#define helper_pminub helper_pminub_mips +#define helper_pcmpeqw helper_pcmpeqw_mips +#define helper_pcmpgtw helper_pcmpgtw_mips +#define helper_pcmpeqh helper_pcmpeqh_mips +#define helper_pcmpgth helper_pcmpgth_mips +#define helper_pcmpeqb helper_pcmpeqb_mips +#define helper_pcmpgtb helper_pcmpgtb_mips +#define helper_psllw helper_psllw_mips +#define helper_psrlw helper_psrlw_mips +#define helper_psraw helper_psraw_mips +#define helper_psllh helper_psllh_mips +#define helper_psrlh helper_psrlh_mips +#define helper_psrah helper_psrah_mips +#define helper_pmullh helper_pmullh_mips +#define helper_pmulhh helper_pmulhh_mips +#define helper_pmulhuh helper_pmulhuh_mips +#define helper_pmaddhw helper_pmaddhw_mips +#define helper_pasubub helper_pasubub_mips +#define helper_biadd helper_biadd_mips +#define helper_pmovmskb helper_pmovmskb_mips +#define helper_msa_nloc_b helper_msa_nloc_b_mips +#define helper_msa_nloc_h helper_msa_nloc_h_mips +#define helper_msa_nloc_w helper_msa_nloc_w_mips +#define helper_msa_nloc_d helper_msa_nloc_d_mips +#define helper_msa_nlzc_b helper_msa_nlzc_b_mips +#define helper_msa_nlzc_h helper_msa_nlzc_h_mips +#define helper_msa_nlzc_w helper_msa_nlzc_w_mips +#define helper_msa_nlzc_d helper_msa_nlzc_d_mips +#define helper_msa_pcnt_b helper_msa_pcnt_b_mips +#define helper_msa_pcnt_h helper_msa_pcnt_h_mips +#define helper_msa_pcnt_w helper_msa_pcnt_w_mips +#define helper_msa_pcnt_d helper_msa_pcnt_d_mips +#define helper_msa_binsl_b helper_msa_binsl_b_mips +#define helper_msa_binsl_h helper_msa_binsl_h_mips +#define helper_msa_binsl_w helper_msa_binsl_w_mips +#define helper_msa_binsl_d helper_msa_binsl_d_mips +#define helper_msa_binsr_b helper_msa_binsr_b_mips +#define helper_msa_binsr_h helper_msa_binsr_h_mips +#define helper_msa_binsr_w helper_msa_binsr_w_mips +#define helper_msa_binsr_d helper_msa_binsr_d_mips +#define helper_msa_bmnz_v helper_msa_bmnz_v_mips +#define helper_msa_bmz_v helper_msa_bmz_v_mips +#define helper_msa_bsel_v helper_msa_bsel_v_mips +#define helper_msa_bclr_b helper_msa_bclr_b_mips +#define helper_msa_bclr_h helper_msa_bclr_h_mips +#define helper_msa_bclr_w helper_msa_bclr_w_mips +#define helper_msa_bclr_d helper_msa_bclr_d_mips +#define helper_msa_bneg_b helper_msa_bneg_b_mips +#define helper_msa_bneg_h helper_msa_bneg_h_mips +#define helper_msa_bneg_w helper_msa_bneg_w_mips +#define helper_msa_bneg_d helper_msa_bneg_d_mips +#define helper_msa_bset_b helper_msa_bset_b_mips +#define helper_msa_bset_h helper_msa_bset_h_mips +#define helper_msa_bset_w helper_msa_bset_w_mips +#define helper_msa_bset_d helper_msa_bset_d_mips +#define helper_msa_add_a_b helper_msa_add_a_b_mips +#define helper_msa_add_a_h helper_msa_add_a_h_mips +#define helper_msa_add_a_w helper_msa_add_a_w_mips +#define helper_msa_add_a_d helper_msa_add_a_d_mips +#define helper_msa_adds_a_b helper_msa_adds_a_b_mips +#define helper_msa_adds_a_h helper_msa_adds_a_h_mips +#define helper_msa_adds_a_w helper_msa_adds_a_w_mips +#define helper_msa_adds_a_d helper_msa_adds_a_d_mips +#define helper_msa_adds_s_b helper_msa_adds_s_b_mips +#define helper_msa_adds_s_h helper_msa_adds_s_h_mips +#define helper_msa_adds_s_w helper_msa_adds_s_w_mips +#define helper_msa_adds_s_d helper_msa_adds_s_d_mips +#define helper_msa_adds_u_b helper_msa_adds_u_b_mips +#define helper_msa_adds_u_h helper_msa_adds_u_h_mips +#define helper_msa_adds_u_w helper_msa_adds_u_w_mips +#define helper_msa_adds_u_d helper_msa_adds_u_d_mips +#define helper_msa_addv_b helper_msa_addv_b_mips +#define helper_msa_addv_h helper_msa_addv_h_mips +#define helper_msa_addv_w helper_msa_addv_w_mips +#define helper_msa_addv_d helper_msa_addv_d_mips +#define helper_msa_hadd_s_h helper_msa_hadd_s_h_mips +#define helper_msa_hadd_s_w helper_msa_hadd_s_w_mips +#define helper_msa_hadd_s_d helper_msa_hadd_s_d_mips +#define helper_msa_hadd_u_h helper_msa_hadd_u_h_mips +#define helper_msa_hadd_u_w helper_msa_hadd_u_w_mips +#define helper_msa_hadd_u_d helper_msa_hadd_u_d_mips +#define helper_msa_ave_s_b helper_msa_ave_s_b_mips +#define helper_msa_ave_s_h helper_msa_ave_s_h_mips +#define helper_msa_ave_s_w helper_msa_ave_s_w_mips +#define helper_msa_ave_s_d helper_msa_ave_s_d_mips +#define helper_msa_ave_u_b helper_msa_ave_u_b_mips +#define helper_msa_ave_u_h helper_msa_ave_u_h_mips +#define helper_msa_ave_u_w helper_msa_ave_u_w_mips +#define helper_msa_ave_u_d helper_msa_ave_u_d_mips +#define helper_msa_aver_s_b helper_msa_aver_s_b_mips +#define helper_msa_aver_s_h helper_msa_aver_s_h_mips +#define helper_msa_aver_s_w helper_msa_aver_s_w_mips +#define helper_msa_aver_s_d helper_msa_aver_s_d_mips +#define helper_msa_aver_u_b helper_msa_aver_u_b_mips +#define helper_msa_aver_u_h helper_msa_aver_u_h_mips +#define helper_msa_aver_u_w helper_msa_aver_u_w_mips +#define helper_msa_aver_u_d helper_msa_aver_u_d_mips +#define helper_msa_ceq_b helper_msa_ceq_b_mips +#define helper_msa_ceq_h helper_msa_ceq_h_mips +#define helper_msa_ceq_w helper_msa_ceq_w_mips +#define helper_msa_ceq_d helper_msa_ceq_d_mips +#define helper_msa_cle_s_b helper_msa_cle_s_b_mips +#define helper_msa_cle_s_h helper_msa_cle_s_h_mips +#define helper_msa_cle_s_w helper_msa_cle_s_w_mips +#define helper_msa_cle_s_d helper_msa_cle_s_d_mips +#define helper_msa_cle_u_b helper_msa_cle_u_b_mips +#define helper_msa_cle_u_h helper_msa_cle_u_h_mips +#define helper_msa_cle_u_w helper_msa_cle_u_w_mips +#define helper_msa_cle_u_d helper_msa_cle_u_d_mips +#define helper_msa_clt_s_b helper_msa_clt_s_b_mips +#define helper_msa_clt_s_h helper_msa_clt_s_h_mips +#define helper_msa_clt_s_w helper_msa_clt_s_w_mips +#define helper_msa_clt_s_d helper_msa_clt_s_d_mips +#define helper_msa_clt_u_b helper_msa_clt_u_b_mips +#define helper_msa_clt_u_h helper_msa_clt_u_h_mips +#define helper_msa_clt_u_w helper_msa_clt_u_w_mips +#define helper_msa_clt_u_d helper_msa_clt_u_d_mips +#define helper_msa_div_s_b helper_msa_div_s_b_mips +#define helper_msa_div_s_h helper_msa_div_s_h_mips +#define helper_msa_div_s_w helper_msa_div_s_w_mips +#define helper_msa_div_s_d helper_msa_div_s_d_mips +#define helper_msa_div_u_b helper_msa_div_u_b_mips +#define helper_msa_div_u_h helper_msa_div_u_h_mips +#define helper_msa_div_u_w helper_msa_div_u_w_mips +#define helper_msa_div_u_d helper_msa_div_u_d_mips +#define helper_msa_max_a_b helper_msa_max_a_b_mips +#define helper_msa_max_a_h helper_msa_max_a_h_mips +#define helper_msa_max_a_w helper_msa_max_a_w_mips +#define helper_msa_max_a_d helper_msa_max_a_d_mips +#define helper_msa_max_s_b helper_msa_max_s_b_mips +#define helper_msa_max_s_h helper_msa_max_s_h_mips +#define helper_msa_max_s_w helper_msa_max_s_w_mips +#define helper_msa_max_s_d helper_msa_max_s_d_mips +#define helper_msa_max_u_b helper_msa_max_u_b_mips +#define helper_msa_max_u_h helper_msa_max_u_h_mips +#define helper_msa_max_u_w helper_msa_max_u_w_mips +#define helper_msa_max_u_d helper_msa_max_u_d_mips +#define helper_msa_min_a_b helper_msa_min_a_b_mips +#define helper_msa_min_a_h helper_msa_min_a_h_mips +#define helper_msa_min_a_w helper_msa_min_a_w_mips +#define helper_msa_min_a_d helper_msa_min_a_d_mips +#define helper_msa_min_s_b helper_msa_min_s_b_mips +#define helper_msa_min_s_h helper_msa_min_s_h_mips +#define helper_msa_min_s_w helper_msa_min_s_w_mips +#define helper_msa_min_s_d helper_msa_min_s_d_mips +#define helper_msa_min_u_b helper_msa_min_u_b_mips +#define helper_msa_min_u_h helper_msa_min_u_h_mips +#define helper_msa_min_u_w helper_msa_min_u_w_mips +#define helper_msa_min_u_d helper_msa_min_u_d_mips +#define helper_msa_mod_s_b helper_msa_mod_s_b_mips +#define helper_msa_mod_s_h helper_msa_mod_s_h_mips +#define helper_msa_mod_s_w helper_msa_mod_s_w_mips +#define helper_msa_mod_s_d helper_msa_mod_s_d_mips +#define helper_msa_mod_u_b helper_msa_mod_u_b_mips +#define helper_msa_mod_u_h helper_msa_mod_u_h_mips +#define helper_msa_mod_u_w helper_msa_mod_u_w_mips +#define helper_msa_mod_u_d helper_msa_mod_u_d_mips +#define helper_msa_asub_s_b helper_msa_asub_s_b_mips +#define helper_msa_asub_s_h helper_msa_asub_s_h_mips +#define helper_msa_asub_s_w helper_msa_asub_s_w_mips +#define helper_msa_asub_s_d helper_msa_asub_s_d_mips +#define helper_msa_asub_u_b helper_msa_asub_u_b_mips +#define helper_msa_asub_u_h helper_msa_asub_u_h_mips +#define helper_msa_asub_u_w helper_msa_asub_u_w_mips +#define helper_msa_asub_u_d helper_msa_asub_u_d_mips +#define helper_msa_hsub_s_h helper_msa_hsub_s_h_mips +#define helper_msa_hsub_s_w helper_msa_hsub_s_w_mips +#define helper_msa_hsub_s_d helper_msa_hsub_s_d_mips +#define helper_msa_hsub_u_h helper_msa_hsub_u_h_mips +#define helper_msa_hsub_u_w helper_msa_hsub_u_w_mips +#define helper_msa_hsub_u_d helper_msa_hsub_u_d_mips +#define helper_msa_ilvev_b helper_msa_ilvev_b_mips +#define helper_msa_ilvev_h helper_msa_ilvev_h_mips +#define helper_msa_ilvev_w helper_msa_ilvev_w_mips +#define helper_msa_ilvev_d helper_msa_ilvev_d_mips +#define helper_msa_ilvod_b helper_msa_ilvod_b_mips +#define helper_msa_ilvod_h helper_msa_ilvod_h_mips +#define helper_msa_ilvod_w helper_msa_ilvod_w_mips +#define helper_msa_ilvod_d helper_msa_ilvod_d_mips +#define helper_msa_ilvl_b helper_msa_ilvl_b_mips +#define helper_msa_ilvl_h helper_msa_ilvl_h_mips +#define helper_msa_ilvl_w helper_msa_ilvl_w_mips +#define helper_msa_ilvl_d helper_msa_ilvl_d_mips +#define helper_msa_ilvr_b helper_msa_ilvr_b_mips +#define helper_msa_ilvr_h helper_msa_ilvr_h_mips +#define helper_msa_ilvr_w helper_msa_ilvr_w_mips +#define helper_msa_ilvr_d helper_msa_ilvr_d_mips +#define helper_msa_and_v helper_msa_and_v_mips +#define helper_msa_nor_v helper_msa_nor_v_mips +#define helper_msa_or_v helper_msa_or_v_mips +#define helper_msa_xor_v helper_msa_xor_v_mips +#define helper_msa_move_v helper_msa_move_v_mips +#define helper_msa_pckev_b helper_msa_pckev_b_mips +#define helper_msa_pckev_h helper_msa_pckev_h_mips +#define helper_msa_pckev_w helper_msa_pckev_w_mips +#define helper_msa_pckev_d helper_msa_pckev_d_mips +#define helper_msa_pckod_b helper_msa_pckod_b_mips +#define helper_msa_pckod_h helper_msa_pckod_h_mips +#define helper_msa_pckod_w helper_msa_pckod_w_mips +#define helper_msa_pckod_d helper_msa_pckod_d_mips +#define helper_msa_sll_b helper_msa_sll_b_mips +#define helper_msa_sll_h helper_msa_sll_h_mips +#define helper_msa_sll_w helper_msa_sll_w_mips +#define helper_msa_sll_d helper_msa_sll_d_mips +#define helper_msa_sra_b helper_msa_sra_b_mips +#define helper_msa_sra_h helper_msa_sra_h_mips +#define helper_msa_sra_w helper_msa_sra_w_mips +#define helper_msa_sra_d helper_msa_sra_d_mips +#define helper_msa_srar_b helper_msa_srar_b_mips +#define helper_msa_srar_h helper_msa_srar_h_mips +#define helper_msa_srar_w helper_msa_srar_w_mips +#define helper_msa_srar_d helper_msa_srar_d_mips +#define helper_msa_srl_b helper_msa_srl_b_mips +#define helper_msa_srl_h helper_msa_srl_h_mips +#define helper_msa_srl_w helper_msa_srl_w_mips +#define helper_msa_srl_d helper_msa_srl_d_mips +#define helper_msa_srlr_b helper_msa_srlr_b_mips +#define helper_msa_srlr_h helper_msa_srlr_h_mips +#define helper_msa_srlr_w helper_msa_srlr_w_mips +#define helper_msa_srlr_d helper_msa_srlr_d_mips +#define helper_msa_andi_b helper_msa_andi_b_mips +#define helper_msa_ori_b helper_msa_ori_b_mips +#define helper_msa_nori_b helper_msa_nori_b_mips +#define helper_msa_xori_b helper_msa_xori_b_mips +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips +#define helper_msa_bmzi_b helper_msa_bmzi_b_mips +#define helper_msa_bseli_b helper_msa_bseli_b_mips +#define helper_msa_shf_df helper_msa_shf_df_mips +#define helper_msa_addvi_df helper_msa_addvi_df_mips +#define helper_msa_subvi_df helper_msa_subvi_df_mips +#define helper_msa_ceqi_df helper_msa_ceqi_df_mips +#define helper_msa_clei_s_df helper_msa_clei_s_df_mips +#define helper_msa_clei_u_df helper_msa_clei_u_df_mips +#define helper_msa_clti_s_df helper_msa_clti_s_df_mips +#define helper_msa_clti_u_df helper_msa_clti_u_df_mips +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips +#define helper_msa_mini_s_df helper_msa_mini_s_df_mips +#define helper_msa_mini_u_df helper_msa_mini_u_df_mips +#define helper_msa_ldi_df helper_msa_ldi_df_mips +#define helper_msa_slli_df helper_msa_slli_df_mips +#define helper_msa_srai_df helper_msa_srai_df_mips +#define helper_msa_srli_df helper_msa_srli_df_mips +#define helper_msa_bclri_df helper_msa_bclri_df_mips +#define helper_msa_bseti_df helper_msa_bseti_df_mips +#define helper_msa_bnegi_df helper_msa_bnegi_df_mips +#define helper_msa_sat_s_df helper_msa_sat_s_df_mips +#define helper_msa_sat_u_df helper_msa_sat_u_df_mips +#define helper_msa_srari_df helper_msa_srari_df_mips +#define helper_msa_srlri_df helper_msa_srlri_df_mips +#define helper_msa_binsli_df helper_msa_binsli_df_mips +#define helper_msa_binsri_df helper_msa_binsri_df_mips +#define helper_msa_subv_df helper_msa_subv_df_mips +#define helper_msa_subs_s_df helper_msa_subs_s_df_mips +#define helper_msa_subs_u_df helper_msa_subs_u_df_mips +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips +#define helper_msa_mulv_df helper_msa_mulv_df_mips +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips +#define helper_msa_mul_q_df helper_msa_mul_q_df_mips +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips +#define helper_msa_sld_df helper_msa_sld_df_mips +#define helper_msa_maddv_df helper_msa_maddv_df_mips +#define helper_msa_msubv_df helper_msa_msubv_df_mips +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips +#define helper_msa_binsl_df helper_msa_binsl_df_mips +#define helper_msa_binsr_df helper_msa_binsr_df_mips +#define helper_msa_madd_q_df helper_msa_madd_q_df_mips +#define helper_msa_msub_q_df helper_msa_msub_q_df_mips +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips +#define helper_msa_splat_df helper_msa_splat_df_mips +#define helper_msa_vshf_df helper_msa_vshf_df_mips +#define helper_msa_sldi_df helper_msa_sldi_df_mips +#define helper_msa_splati_df helper_msa_splati_df_mips +#define helper_msa_copy_s_b helper_msa_copy_s_b_mips +#define helper_msa_copy_s_h helper_msa_copy_s_h_mips +#define helper_msa_copy_s_w helper_msa_copy_s_w_mips +#define helper_msa_copy_s_d helper_msa_copy_s_d_mips +#define helper_msa_copy_u_b helper_msa_copy_u_b_mips +#define helper_msa_copy_u_h helper_msa_copy_u_h_mips +#define helper_msa_copy_u_w helper_msa_copy_u_w_mips +#define helper_msa_insert_b helper_msa_insert_b_mips +#define helper_msa_insert_h helper_msa_insert_h_mips +#define helper_msa_insert_w helper_msa_insert_w_mips +#define helper_msa_insert_d helper_msa_insert_d_mips +#define helper_msa_insve_df helper_msa_insve_df_mips +#define helper_msa_ctcmsa helper_msa_ctcmsa_mips +#define helper_msa_cfcmsa helper_msa_cfcmsa_mips +#define helper_msa_fill_df helper_msa_fill_df_mips +#define helper_msa_fcaf_df helper_msa_fcaf_df_mips +#define helper_msa_fcun_df helper_msa_fcun_df_mips +#define helper_msa_fceq_df helper_msa_fceq_df_mips +#define helper_msa_fcueq_df helper_msa_fcueq_df_mips +#define helper_msa_fclt_df helper_msa_fclt_df_mips +#define helper_msa_fcult_df helper_msa_fcult_df_mips +#define helper_msa_fcle_df helper_msa_fcle_df_mips +#define helper_msa_fcule_df helper_msa_fcule_df_mips +#define helper_msa_fsaf_df helper_msa_fsaf_df_mips +#define helper_msa_fsun_df helper_msa_fsun_df_mips +#define helper_msa_fseq_df helper_msa_fseq_df_mips +#define helper_msa_fsueq_df helper_msa_fsueq_df_mips +#define helper_msa_fslt_df helper_msa_fslt_df_mips +#define helper_msa_fsult_df helper_msa_fsult_df_mips +#define helper_msa_fsle_df helper_msa_fsle_df_mips +#define helper_msa_fsule_df helper_msa_fsule_df_mips +#define helper_msa_fcor_df helper_msa_fcor_df_mips +#define helper_msa_fcune_df helper_msa_fcune_df_mips +#define helper_msa_fcne_df helper_msa_fcne_df_mips +#define helper_msa_fsor_df helper_msa_fsor_df_mips +#define helper_msa_fsune_df helper_msa_fsune_df_mips +#define helper_msa_fsne_df helper_msa_fsne_df_mips +#define helper_msa_fadd_df helper_msa_fadd_df_mips +#define helper_msa_fsub_df helper_msa_fsub_df_mips +#define helper_msa_fmul_df helper_msa_fmul_df_mips +#define helper_msa_fdiv_df helper_msa_fdiv_df_mips +#define helper_msa_fmadd_df helper_msa_fmadd_df_mips +#define helper_msa_fmsub_df helper_msa_fmsub_df_mips +#define helper_msa_fexp2_df helper_msa_fexp2_df_mips +#define helper_msa_fexdo_df helper_msa_fexdo_df_mips +#define helper_msa_ftq_df helper_msa_ftq_df_mips +#define helper_msa_fmin_df helper_msa_fmin_df_mips +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips +#define helper_msa_fmax_df helper_msa_fmax_df_mips +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips +#define helper_msa_fclass_df helper_msa_fclass_df_mips +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips +#define helper_msa_frcp_df helper_msa_frcp_df_mips +#define helper_msa_frint_df helper_msa_frint_df_mips +#define helper_msa_flog2_df helper_msa_flog2_df_mips +#define helper_msa_fexupl_df helper_msa_fexupl_df_mips +#define helper_msa_fexupr_df helper_msa_fexupr_df_mips +#define helper_msa_ffql_df helper_msa_ffql_df_mips +#define helper_msa_ffqr_df helper_msa_ffqr_df_mips +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips +#define helper_raise_exception_err helper_raise_exception_err_mips +#define helper_raise_exception helper_raise_exception_mips +#define helper_raise_exception_debug helper_raise_exception_debug_mips +#define helper_muls helper_muls_mips +#define helper_mulsu helper_mulsu_mips +#define helper_macc helper_macc_mips +#define helper_macchi helper_macchi_mips +#define helper_maccu helper_maccu_mips +#define helper_macchiu helper_macchiu_mips +#define helper_msac helper_msac_mips +#define helper_msachi helper_msachi_mips +#define helper_msacu helper_msacu_mips +#define helper_msachiu helper_msachiu_mips +#define helper_mulhi helper_mulhi_mips +#define helper_mulhiu helper_mulhiu_mips +#define helper_mulshi helper_mulshi_mips +#define helper_mulshiu helper_mulshiu_mips #define helper_dbitswap helper_dbitswap_mips +#define helper_bitswap helper_bitswap_mips +#define helper_rotx helper_rotx_mips +#define helper_ll helper_ll_mips #define helper_lld helper_lld_mips -#define helper_scd helper_scd_mips +#define helper_swl helper_swl_mips +#define helper_swr helper_swr_mips #define helper_sdl helper_sdl_mips #define helper_sdr helper_sdr_mips +#define helper_lwm helper_lwm_mips +#define helper_swm helper_swm_mips #define helper_ldm helper_ldm_mips #define helper_sdm helper_sdm_mips -#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips -#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips -#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips -#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips -#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips -#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips -#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips -#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips -#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips +#define helper_fork helper_fork_mips +#define helper_yield helper_yield_mips +#define r4k_helper_tlbinv r4k_helper_tlbinv_mips +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips +#define r4k_helper_tlbwi r4k_helper_tlbwi_mips +#define r4k_helper_tlbwr r4k_helper_tlbwr_mips +#define r4k_helper_tlbp r4k_helper_tlbp_mips +#define r4k_helper_tlbr r4k_helper_tlbr_mips +#define helper_tlbwi helper_tlbwi_mips +#define helper_tlbwr helper_tlbwr_mips +#define helper_tlbp helper_tlbp_mips +#define helper_tlbr helper_tlbr_mips +#define helper_tlbinv helper_tlbinv_mips +#define helper_tlbinvf helper_tlbinvf_mips +#define helper_ginvt helper_ginvt_mips +#define helper_di helper_di_mips +#define helper_ei helper_ei_mips +#define helper_eret helper_eret_mips +#define helper_eretnc helper_eretnc_mips +#define helper_deret helper_deret_mips +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips +#define helper_rdhwr_cc helper_rdhwr_cc_mips +#define helper_rdhwr_ccres helper_rdhwr_ccres_mips +#define helper_rdhwr_performance helper_rdhwr_performance_mips +#define helper_rdhwr_xnp helper_rdhwr_xnp_mips +#define helper_pmon helper_pmon_mips +#define helper_wait helper_wait_mips +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips +#define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mips +#define helper_msa_ld_b helper_msa_ld_b_mips +#define helper_msa_ld_h helper_msa_ld_h_mips +#define helper_msa_ld_w helper_msa_ld_w_mips +#define helper_msa_ld_d helper_msa_ld_d_mips +#define helper_msa_st_b helper_msa_st_b_mips +#define helper_msa_st_h helper_msa_st_h_mips +#define helper_msa_st_w helper_msa_st_w_mips +#define helper_msa_st_d helper_msa_st_d_mips +#define helper_cache helper_cache_mips +#define gen_intermediate_code gen_intermediate_code_mips +#define mips_tcg_init mips_tcg_init_mips +#define cpu_mips_realize_env cpu_mips_realize_env_mips +#define cpu_state_reset cpu_state_reset_mips +#define restore_state_to_opc restore_state_to_opc_mips #define mips_reg_reset mips_reg_reset_mips #define mips_reg_read mips_reg_read_mips #define mips_reg_write mips_reg_write_mips -#define mips_tcg_init mips_tcg_init_mips -#define mips_cpu_list mips_cpu_list_mips -#define mips_release mips_release_mips -#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mips -#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mips +#define ieee_rm ieee_rm_mips +#define mips_defs mips_defs_mips +#define mips_defs_number mips_defs_number_mips +#define gen_helper_float_class_s gen_helper_float_class_s_mips +#define gen_helper_float_class_d gen_helper_float_class_d_mips #endif diff --git a/qemu/mips64.h b/qemu/mips64.h index e464d4b3..14769400 100644 --- a/qemu/mips64.h +++ b/qemu/mips64.h @@ -1,3260 +1,1431 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_MIPS64_H -#define UNICORN_AUTOGEN_MIPS64_H -#define arm_release arm_release_mips64 -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mips64 -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mips64 -#define use_idiv_instructions_rt use_idiv_instructions_rt_mips64 -#define tcg_target_deposit_valid tcg_target_deposit_valid_mips64 -#define helper_power_down helper_power_down_mips64 -#define check_exit_request check_exit_request_mips64 -#define address_space_unregister address_space_unregister_mips64 -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64 -#define phys_mem_clean phys_mem_clean_mips64 -#define tb_cleanup tb_cleanup_mips64 +#ifndef UNICORN_AUTOGEN_mips64_H +#define UNICORN_AUTOGEN_mips64_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _mips64 +#endif +#define arm_arch arm_arch_mips64 +#define tb_target_set_jmp_target tb_target_set_jmp_target_mips64 +#define have_bmi1 have_bmi1_mips64 +#define have_popcnt have_popcnt_mips64 +#define have_avx1 have_avx1_mips64 +#define have_avx2 have_avx2_mips64 +#define have_isa have_isa_mips64 +#define have_altivec have_altivec_mips64 +#define have_vsx have_vsx_mips64 +#define flush_icache_range flush_icache_range_mips64 +#define s390_facilities s390_facilities_mips64 +#define tcg_dump_op tcg_dump_op_mips64 +#define tcg_dump_ops tcg_dump_ops_mips64 +#define tcg_gen_and_i64 tcg_gen_and_i64_mips64 +#define tcg_gen_discard_i64 tcg_gen_discard_i64_mips64 +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mips64 +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mips64 +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mips64 +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mips64 +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mips64 +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mips64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64 +#define tcg_gen_mul_i64 tcg_gen_mul_i64_mips64 +#define tcg_gen_or_i64 tcg_gen_or_i64_mips64 +#define tcg_gen_sar_i64 tcg_gen_sar_i64_mips64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64 +#define tcg_gen_st_i64 tcg_gen_st_i64_mips64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64 +#define cpu_icount_to_ns cpu_icount_to_ns_mips64 +#define cpu_is_stopped cpu_is_stopped_mips64 +#define cpu_get_ticks cpu_get_ticks_mips64 +#define cpu_get_clock cpu_get_clock_mips64 +#define cpu_resume cpu_resume_mips64 +#define qemu_init_vcpu qemu_init_vcpu_mips64 +#define cpu_stop_current cpu_stop_current_mips64 +#define resume_all_vcpus resume_all_vcpus_mips64 +#define vm_start vm_start_mips64 +#define address_space_dispatch_compact address_space_dispatch_compact_mips64 +#define flatview_translate flatview_translate_mips64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64 +#define qemu_get_cpu qemu_get_cpu_mips64 +#define cpu_address_space_init cpu_address_space_init_mips64 +#define cpu_get_address_space cpu_get_address_space_mips64 +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_mips64 +#define cpu_exec_initfn cpu_exec_initfn_mips64 +#define cpu_exec_realizefn cpu_exec_realizefn_mips64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_mips64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_mips64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_mips64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64 +#define cpu_abort cpu_abort_mips64 +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64 +#define flatview_add_to_dispatch flatview_add_to_dispatch_mips64 +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_mips64 +#define qemu_ram_get_offset qemu_ram_get_offset_mips64 +#define qemu_ram_get_used_length qemu_ram_get_used_length_mips64 +#define qemu_ram_is_shared qemu_ram_is_shared_mips64 +#define qemu_ram_pagesize qemu_ram_pagesize_mips64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64 +#define qemu_ram_alloc qemu_ram_alloc_mips64 +#define qemu_ram_free qemu_ram_free_mips64 +#define qemu_map_ram_ptr qemu_map_ram_ptr_mips64 +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_mips64 +#define qemu_ram_block_from_host qemu_ram_block_from_host_mips64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64 +#define cpu_check_watchpoint cpu_check_watchpoint_mips64 +#define iotlb_to_section iotlb_to_section_mips64 +#define address_space_dispatch_new address_space_dispatch_new_mips64 +#define address_space_dispatch_free address_space_dispatch_free_mips64 +#define flatview_read_continue flatview_read_continue_mips64 +#define address_space_read_full address_space_read_full_mips64 +#define address_space_write address_space_write_mips64 +#define address_space_rw address_space_rw_mips64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64 +#define address_space_write_rom address_space_write_rom_mips64 +#define cpu_flush_icache_range cpu_flush_icache_range_mips64 +#define cpu_exec_init_all cpu_exec_init_all_mips64 +#define address_space_access_valid address_space_access_valid_mips64 +#define address_space_map address_space_map_mips64 +#define address_space_unmap address_space_unmap_mips64 +#define cpu_physical_memory_map cpu_physical_memory_map_mips64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64 +#define qemu_target_page_size qemu_target_page_size_mips64 +#define qemu_target_page_bits qemu_target_page_bits_mips64 +#define qemu_target_page_bits_min qemu_target_page_bits_min_mips64 +#define target_words_bigendian target_words_bigendian_mips64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64 +#define ram_block_discard_range ram_block_discard_range_mips64 +#define ramblock_is_pmem ramblock_is_pmem_mips64 +#define page_size_init page_size_init_mips64 +#define set_preferred_target_page_bits set_preferred_target_page_bits_mips64 +#define finalize_target_page_bits finalize_target_page_bits_mips64 +#define cpu_outb cpu_outb_mips64 +#define cpu_outw cpu_outw_mips64 +#define cpu_outl cpu_outl_mips64 +#define cpu_inb cpu_inb_mips64 +#define cpu_inw cpu_inw_mips64 +#define cpu_inl cpu_inl_mips64 #define memory_map memory_map_mips64 +#define memory_map_io memory_map_io_mips64 #define memory_map_ptr memory_map_ptr_mips64 #define memory_unmap memory_unmap_mips64 #define memory_free memory_free_mips64 -#define free_code_gen_buffer free_code_gen_buffer_mips64 -#define helper_raise_exception helper_raise_exception_mips64 -#define tcg_enabled tcg_enabled_mips64 -#define tcg_exec_init tcg_exec_init_mips64 -#define memory_register_types memory_register_types_mips64 -#define cpu_exec_init_all cpu_exec_init_all_mips64 -#define vm_start vm_start_mips64 -#define resume_all_vcpus resume_all_vcpus_mips64 -#define a15_l2ctlr_read a15_l2ctlr_read_mips64 -#define a64_translate_init a64_translate_init_mips64 -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mips64 -#define aa64_cacheop_access aa64_cacheop_access_mips64 -#define aa64_daif_access aa64_daif_access_mips64 -#define aa64_daif_write aa64_daif_write_mips64 -#define aa64_dczid_read aa64_dczid_read_mips64 -#define aa64_fpcr_read aa64_fpcr_read_mips64 -#define aa64_fpcr_write aa64_fpcr_write_mips64 -#define aa64_fpsr_read aa64_fpsr_read_mips64 -#define aa64_fpsr_write aa64_fpsr_write_mips64 -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mips64 -#define aa64_zva_access aa64_zva_access_mips64 -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mips64 -#define aarch64_restore_sp aarch64_restore_sp_mips64 -#define aarch64_save_sp aarch64_save_sp_mips64 -#define accel_find accel_find_mips64 -#define accel_init_machine accel_init_machine_mips64 -#define accel_type accel_type_mips64 -#define access_with_adjusted_size access_with_adjusted_size_mips64 -#define add128 add128_mips64 -#define add16_sat add16_sat_mips64 -#define add16_usat add16_usat_mips64 -#define add192 add192_mips64 -#define add8_sat add8_sat_mips64 -#define add8_usat add8_usat_mips64 -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mips64 -#define add_cpreg_to_list add_cpreg_to_list_mips64 -#define addFloat128Sigs addFloat128Sigs_mips64 -#define addFloat32Sigs addFloat32Sigs_mips64 -#define addFloat64Sigs addFloat64Sigs_mips64 -#define addFloatx80Sigs addFloatx80Sigs_mips64 -#define add_qemu_ldst_label add_qemu_ldst_label_mips64 -#define address_space_access_valid address_space_access_valid_mips64 -#define address_space_destroy address_space_destroy_mips64 -#define address_space_destroy_dispatch address_space_destroy_dispatch_mips64 -#define address_space_get_flatview address_space_get_flatview_mips64 -#define address_space_init address_space_init_mips64 -#define address_space_init_dispatch address_space_init_dispatch_mips64 -#define address_space_lookup_region address_space_lookup_region_mips64 -#define address_space_map address_space_map_mips64 -#define address_space_read address_space_read_mips64 -#define address_space_rw address_space_rw_mips64 -#define address_space_translate address_space_translate_mips64 -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64 -#define address_space_translate_internal address_space_translate_internal_mips64 -#define address_space_unmap address_space_unmap_mips64 -#define address_space_update_topology address_space_update_topology_mips64 -#define address_space_update_topology_pass address_space_update_topology_pass_mips64 -#define address_space_write address_space_write_mips64 -#define addrrange_contains addrrange_contains_mips64 -#define addrrange_end addrrange_end_mips64 -#define addrrange_equal addrrange_equal_mips64 -#define addrrange_intersection addrrange_intersection_mips64 -#define addrrange_intersects addrrange_intersects_mips64 -#define addrrange_make addrrange_make_mips64 -#define adjust_endianness adjust_endianness_mips64 -#define all_helpers all_helpers_mips64 -#define alloc_code_gen_buffer alloc_code_gen_buffer_mips64 -#define alloc_entry alloc_entry_mips64 -#define always_true always_true_mips64 -#define arm1026_initfn arm1026_initfn_mips64 -#define arm1136_initfn arm1136_initfn_mips64 -#define arm1136_r2_initfn arm1136_r2_initfn_mips64 -#define arm1176_initfn arm1176_initfn_mips64 -#define arm11mpcore_initfn arm11mpcore_initfn_mips64 -#define arm926_initfn arm926_initfn_mips64 -#define arm946_initfn arm946_initfn_mips64 -#define arm_ccnt_enabled arm_ccnt_enabled_mips64 -#define arm_cp_read_zero arm_cp_read_zero_mips64 -#define arm_cp_reset_ignore arm_cp_reset_ignore_mips64 -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mips64 -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mips64 -#define arm_cpu_finalizefn arm_cpu_finalizefn_mips64 -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mips64 -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mips64 -#define arm_cpu_initfn arm_cpu_initfn_mips64 -#define arm_cpu_list arm_cpu_list_mips64 -#define cpu_loop_exit cpu_loop_exit_mips64 -#define arm_cpu_post_init arm_cpu_post_init_mips64 -#define arm_cpu_realizefn arm_cpu_realizefn_mips64 -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mips64 -#define arm_cpu_register_types arm_cpu_register_types_mips64 -#define cpu_resume_from_signal cpu_resume_from_signal_mips64 -#define arm_cpus arm_cpus_mips64 -#define arm_cpu_set_pc arm_cpu_set_pc_mips64 -#define arm_cp_write_ignore arm_cp_write_ignore_mips64 -#define arm_current_el arm_current_el_mips64 -#define arm_dc_feature arm_dc_feature_mips64 -#define arm_debug_excp_handler arm_debug_excp_handler_mips64 -#define arm_debug_target_el arm_debug_target_el_mips64 -#define arm_el_is_aa64 arm_el_is_aa64_mips64 -#define arm_env_get_cpu arm_env_get_cpu_mips64 -#define arm_excp_target_el arm_excp_target_el_mips64 -#define arm_excp_unmasked arm_excp_unmasked_mips64 -#define arm_feature arm_feature_mips64 -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mips64 -#define gen_intermediate_code gen_intermediate_code_mips64 -#define gen_intermediate_code_pc gen_intermediate_code_pc_mips64 -#define arm_gen_test_cc arm_gen_test_cc_mips64 -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mips64 -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mips64 -#define arm_handle_psci_call arm_handle_psci_call_mips64 -#define arm_is_psci_call arm_is_psci_call_mips64 -#define arm_is_secure arm_is_secure_mips64 -#define arm_is_secure_below_el3 arm_is_secure_below_el3_mips64 -#define arm_ldl_code arm_ldl_code_mips64 -#define arm_lduw_code arm_lduw_code_mips64 -#define arm_log_exception arm_log_exception_mips64 -#define arm_reg_read arm_reg_read_mips64 -#define arm_reg_reset arm_reg_reset_mips64 -#define arm_reg_write arm_reg_write_mips64 -#define restore_state_to_opc restore_state_to_opc_mips64 -#define arm_rmode_to_sf arm_rmode_to_sf_mips64 -#define arm_singlestep_active arm_singlestep_active_mips64 -#define tlb_fill tlb_fill_mips64 -#define tlb_flush tlb_flush_mips64 -#define tlb_flush_page tlb_flush_page_mips64 -#define tlb_set_page tlb_set_page_mips64 -#define arm_translate_init arm_translate_init_mips64 -#define arm_v7m_class_init arm_v7m_class_init_mips64 -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mips64 -#define ats_access ats_access_mips64 -#define ats_write ats_write_mips64 -#define bad_mode_switch bad_mode_switch_mips64 -#define bank_number bank_number_mips64 -#define bitmap_zero_extend bitmap_zero_extend_mips64 -#define bp_wp_matches bp_wp_matches_mips64 -#define breakpoint_invalidate breakpoint_invalidate_mips64 -#define build_page_bitmap build_page_bitmap_mips64 -#define bus_add_child bus_add_child_mips64 -#define bus_class_init bus_class_init_mips64 -#define bus_info bus_info_mips64 -#define bus_unparent bus_unparent_mips64 -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mips64 -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mips64 -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mips64 -#define call_recip_estimate call_recip_estimate_mips64 -#define can_merge can_merge_mips64 -#define capacity_increase capacity_increase_mips64 -#define ccsidr_read ccsidr_read_mips64 -#define check_ap check_ap_mips64 -#define check_breakpoints check_breakpoints_mips64 -#define check_watchpoints check_watchpoints_mips64 -#define cho cho_mips64 -#define clear_bit clear_bit_mips64 -#define clz32 clz32_mips64 -#define clz64 clz64_mips64 -#define cmp_flatrange_addr cmp_flatrange_addr_mips64 -#define code_gen_alloc code_gen_alloc_mips64 -#define commonNaNToFloat128 commonNaNToFloat128_mips64 -#define commonNaNToFloat16 commonNaNToFloat16_mips64 -#define commonNaNToFloat32 commonNaNToFloat32_mips64 -#define commonNaNToFloat64 commonNaNToFloat64_mips64 -#define commonNaNToFloatx80 commonNaNToFloatx80_mips64 -#define compute_abs_deadline compute_abs_deadline_mips64 -#define cond_name cond_name_mips64 -#define configure_accelerator configure_accelerator_mips64 -#define container_get container_get_mips64 -#define container_info container_info_mips64 -#define container_register_types container_register_types_mips64 -#define contextidr_write contextidr_write_mips64 -#define core_log_global_start core_log_global_start_mips64 -#define core_log_global_stop core_log_global_stop_mips64 -#define core_memory_listener core_memory_listener_mips64 -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mips64 -#define cortex_a15_initfn cortex_a15_initfn_mips64 -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mips64 -#define cortex_a8_initfn cortex_a8_initfn_mips64 -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mips64 -#define cortex_a9_initfn cortex_a9_initfn_mips64 -#define cortex_m3_initfn cortex_m3_initfn_mips64 -#define count_cpreg count_cpreg_mips64 -#define countLeadingZeros32 countLeadingZeros32_mips64 -#define countLeadingZeros64 countLeadingZeros64_mips64 -#define cp_access_ok cp_access_ok_mips64 -#define cpacr_write cpacr_write_mips64 -#define cpreg_field_is_64bit cpreg_field_is_64bit_mips64 -#define cp_reginfo cp_reginfo_mips64 -#define cpreg_key_compare cpreg_key_compare_mips64 -#define cpreg_make_keylist cpreg_make_keylist_mips64 -#define cp_reg_reset cp_reg_reset_mips64 -#define cpreg_to_kvm_id cpreg_to_kvm_id_mips64 -#define cpsr_read cpsr_read_mips64 -#define cpsr_write cpsr_write_mips64 -#define cptype_valid cptype_valid_mips64 -#define cpu_abort cpu_abort_mips64 -#define cpu_arm_exec cpu_arm_exec_mips64 -#define cpu_arm_gen_code cpu_arm_gen_code_mips64 -#define cpu_arm_init cpu_arm_init_mips64 -#define cpu_breakpoint_insert cpu_breakpoint_insert_mips64 -#define cpu_breakpoint_remove cpu_breakpoint_remove_mips64 -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64 -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64 -#define cpu_can_do_io cpu_can_do_io_mips64 -#define cpu_can_run cpu_can_run_mips64 -#define cpu_class_init cpu_class_init_mips64 -#define cpu_common_class_by_name cpu_common_class_by_name_mips64 -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips64 -#define cpu_common_get_arch_id cpu_common_get_arch_id_mips64 -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mips64 -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mips64 -#define cpu_common_has_work cpu_common_has_work_mips64 -#define cpu_common_initfn cpu_common_initfn_mips64 -#define cpu_common_noop cpu_common_noop_mips64 -#define cpu_common_parse_features cpu_common_parse_features_mips64 -#define cpu_common_realizefn cpu_common_realizefn_mips64 -#define cpu_common_reset cpu_common_reset_mips64 -#define cpu_dump_statistics cpu_dump_statistics_mips64 -#define cpu_exec_init cpu_exec_init_mips64 -#define cpu_flush_icache_range cpu_flush_icache_range_mips64 -#define cpu_gen_init cpu_gen_init_mips64 -#define cpu_get_clock cpu_get_clock_mips64 -#define cpu_get_real_ticks cpu_get_real_ticks_mips64 -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mips64 -#define cpu_handle_debug_exception cpu_handle_debug_exception_mips64 -#define cpu_handle_guest_debug cpu_handle_guest_debug_mips64 -#define cpu_inb cpu_inb_mips64 -#define cpu_inl cpu_inl_mips64 -#define cpu_interrupt cpu_interrupt_mips64 -#define cpu_interrupt_handler cpu_interrupt_handler_mips64 -#define cpu_inw cpu_inw_mips64 -#define cpu_io_recompile cpu_io_recompile_mips64 -#define cpu_is_stopped cpu_is_stopped_mips64 -#define cpu_ldl_code cpu_ldl_code_mips64 -#define cpu_ldub_code cpu_ldub_code_mips64 -#define cpu_lduw_code cpu_lduw_code_mips64 -#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64 -#define cpu_mmu_index cpu_mmu_index_mips64 -#define cpu_outb cpu_outb_mips64 -#define cpu_outl cpu_outl_mips64 -#define cpu_outw cpu_outw_mips64 -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips64 -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mips64 -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips64 -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips64 -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips64 -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64 -#define cpu_physical_memory_map cpu_physical_memory_map_mips64 -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips64 -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips64 -#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64 -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips64 -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips64 -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64 -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips64 -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips64 -#define cpu_register cpu_register_mips64 -#define cpu_register_types cpu_register_types_mips64 -#define cpu_restore_state cpu_restore_state_mips64 -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mips64 -#define cpu_single_step cpu_single_step_mips64 -#define cpu_tb_exec cpu_tb_exec_mips64 -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mips64 -#define cpu_to_be64 cpu_to_be64_mips64 -#define cpu_to_le32 cpu_to_le32_mips64 -#define cpu_to_le64 cpu_to_le64_mips64 -#define cpu_type_info cpu_type_info_mips64 -#define cpu_unassigned_access cpu_unassigned_access_mips64 -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64 -#define cpu_watchpoint_insert cpu_watchpoint_insert_mips64 -#define cpu_watchpoint_remove cpu_watchpoint_remove_mips64 -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64 -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64 -#define crc32c_table crc32c_table_mips64 -#define create_new_memory_mapping create_new_memory_mapping_mips64 -#define csselr_write csselr_write_mips64 -#define cto32 cto32_mips64 -#define ctr_el0_access ctr_el0_access_mips64 -#define ctz32 ctz32_mips64 -#define ctz64 ctz64_mips64 -#define dacr_write dacr_write_mips64 -#define dbgbcr_write dbgbcr_write_mips64 -#define dbgbvr_write dbgbvr_write_mips64 -#define dbgwcr_write dbgwcr_write_mips64 -#define dbgwvr_write dbgwvr_write_mips64 -#define debug_cp_reginfo debug_cp_reginfo_mips64 -#define debug_frame debug_frame_mips64 -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mips64 -#define define_arm_cp_regs define_arm_cp_regs_mips64 -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mips64 -#define define_debug_regs define_debug_regs_mips64 -#define define_one_arm_cp_reg define_one_arm_cp_reg_mips64 -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mips64 -#define deposit32 deposit32_mips64 -#define deposit64 deposit64_mips64 -#define deregister_tm_clones deregister_tm_clones_mips64 -#define device_class_base_init device_class_base_init_mips64 -#define device_class_init device_class_init_mips64 -#define device_finalize device_finalize_mips64 -#define device_get_realized device_get_realized_mips64 -#define device_initfn device_initfn_mips64 -#define device_post_init device_post_init_mips64 -#define device_reset device_reset_mips64 -#define device_set_realized device_set_realized_mips64 -#define device_type_info device_type_info_mips64 -#define disas_arm_insn disas_arm_insn_mips64 -#define disas_coproc_insn disas_coproc_insn_mips64 -#define disas_dsp_insn disas_dsp_insn_mips64 -#define disas_iwmmxt_insn disas_iwmmxt_insn_mips64 -#define disas_neon_data_insn disas_neon_data_insn_mips64 -#define disas_neon_ls_insn disas_neon_ls_insn_mips64 -#define disas_thumb2_insn disas_thumb2_insn_mips64 -#define disas_thumb_insn disas_thumb_insn_mips64 -#define disas_vfp_insn disas_vfp_insn_mips64 -#define disas_vfp_v8_insn disas_vfp_v8_insn_mips64 -#define do_arm_semihosting do_arm_semihosting_mips64 -#define do_clz16 do_clz16_mips64 -#define do_clz8 do_clz8_mips64 -#define do_constant_folding do_constant_folding_mips64 -#define do_constant_folding_2 do_constant_folding_2_mips64 -#define do_constant_folding_cond do_constant_folding_cond_mips64 -#define do_constant_folding_cond2 do_constant_folding_cond2_mips64 -#define do_constant_folding_cond_32 do_constant_folding_cond_32_mips64 -#define do_constant_folding_cond_64 do_constant_folding_cond_64_mips64 -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mips64 -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mips64 -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mips64 -#define do_ssat do_ssat_mips64 -#define do_usad do_usad_mips64 -#define do_usat do_usat_mips64 -#define do_v7m_exception_exit do_v7m_exception_exit_mips64 -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mips64 -#define dummy_func dummy_func_mips64 -#define dummy_section dummy_section_mips64 -#define _DYNAMIC _DYNAMIC_mips64 -#define _edata _edata_mips64 -#define _end _end_mips64 -#define end_list end_list_mips64 -#define eq128 eq128_mips64 -#define ErrorClass_lookup ErrorClass_lookup_mips64 -#define error_copy error_copy_mips64 -#define error_exit error_exit_mips64 -#define error_get_class error_get_class_mips64 -#define error_get_pretty error_get_pretty_mips64 -#define error_setg_file_open error_setg_file_open_mips64 -#define estimateDiv128To64 estimateDiv128To64_mips64 -#define estimateSqrt32 estimateSqrt32_mips64 -#define excnames excnames_mips64 -#define excp_is_internal excp_is_internal_mips64 -#define extended_addresses_enabled extended_addresses_enabled_mips64 -#define extended_mpu_ap_bits extended_mpu_ap_bits_mips64 -#define extract32 extract32_mips64 -#define extract64 extract64_mips64 -#define extractFloat128Exp extractFloat128Exp_mips64 -#define extractFloat128Frac0 extractFloat128Frac0_mips64 -#define extractFloat128Frac1 extractFloat128Frac1_mips64 -#define extractFloat128Sign extractFloat128Sign_mips64 -#define extractFloat16Exp extractFloat16Exp_mips64 -#define extractFloat16Frac extractFloat16Frac_mips64 -#define extractFloat16Sign extractFloat16Sign_mips64 -#define extractFloat32Exp extractFloat32Exp_mips64 -#define extractFloat32Frac extractFloat32Frac_mips64 -#define extractFloat32Sign extractFloat32Sign_mips64 -#define extractFloat64Exp extractFloat64Exp_mips64 -#define extractFloat64Frac extractFloat64Frac_mips64 -#define extractFloat64Sign extractFloat64Sign_mips64 -#define extractFloatx80Exp extractFloatx80Exp_mips64 -#define extractFloatx80Frac extractFloatx80Frac_mips64 -#define extractFloatx80Sign extractFloatx80Sign_mips64 -#define fcse_write fcse_write_mips64 -#define find_better_copy find_better_copy_mips64 -#define find_default_machine find_default_machine_mips64 -#define find_desc_by_name find_desc_by_name_mips64 -#define find_first_bit find_first_bit_mips64 -#define find_paging_enabled_cpu find_paging_enabled_cpu_mips64 -#define find_ram_block find_ram_block_mips64 -#define find_ram_offset find_ram_offset_mips64 -#define find_string find_string_mips64 -#define find_type find_type_mips64 -#define _fini _fini_mips64 -#define flatrange_equal flatrange_equal_mips64 -#define flatview_destroy flatview_destroy_mips64 -#define flatview_init flatview_init_mips64 -#define flatview_insert flatview_insert_mips64 -#define flatview_lookup flatview_lookup_mips64 -#define flatview_ref flatview_ref_mips64 -#define flatview_simplify flatview_simplify_mips64 #define flatview_unref flatview_unref_mips64 -#define float128_add float128_add_mips64 -#define float128_compare float128_compare_mips64 -#define float128_compare_internal float128_compare_internal_mips64 -#define float128_compare_quiet float128_compare_quiet_mips64 -#define float128_default_nan float128_default_nan_mips64 -#define float128_div float128_div_mips64 -#define float128_eq float128_eq_mips64 -#define float128_eq_quiet float128_eq_quiet_mips64 -#define float128_is_quiet_nan float128_is_quiet_nan_mips64 -#define float128_is_signaling_nan float128_is_signaling_nan_mips64 -#define float128_le float128_le_mips64 -#define float128_le_quiet float128_le_quiet_mips64 -#define float128_lt float128_lt_mips64 -#define float128_lt_quiet float128_lt_quiet_mips64 -#define float128_maybe_silence_nan float128_maybe_silence_nan_mips64 -#define float128_mul float128_mul_mips64 -#define float128_rem float128_rem_mips64 -#define float128_round_to_int float128_round_to_int_mips64 -#define float128_scalbn float128_scalbn_mips64 -#define float128_sqrt float128_sqrt_mips64 -#define float128_sub float128_sub_mips64 -#define float128ToCommonNaN float128ToCommonNaN_mips64 -#define float128_to_float32 float128_to_float32_mips64 -#define float128_to_float64 float128_to_float64_mips64 -#define float128_to_floatx80 float128_to_floatx80_mips64 -#define float128_to_int32 float128_to_int32_mips64 -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64 -#define float128_to_int64 float128_to_int64_mips64 -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64 -#define float128_unordered float128_unordered_mips64 -#define float128_unordered_quiet float128_unordered_quiet_mips64 -#define float16_default_nan float16_default_nan_mips64 +#define address_space_get_flatview address_space_get_flatview_mips64 +#define memory_region_transaction_begin memory_region_transaction_begin_mips64 +#define memory_region_transaction_commit memory_region_transaction_commit_mips64 +#define memory_region_init memory_region_init_mips64 +#define memory_region_access_valid memory_region_access_valid_mips64 +#define memory_region_dispatch_read memory_region_dispatch_read_mips64 +#define memory_region_dispatch_write memory_region_dispatch_write_mips64 +#define memory_region_init_io memory_region_init_io_mips64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64 +#define memory_region_size memory_region_size_mips64 +#define memory_region_set_readonly memory_region_set_readonly_mips64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64 +#define memory_region_from_host memory_region_from_host_mips64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_mips64 +#define memory_region_add_subregion memory_region_add_subregion_mips64 +#define memory_region_del_subregion memory_region_del_subregion_mips64 +#define memory_region_find memory_region_find_mips64 +#define memory_listener_register memory_listener_register_mips64 +#define memory_listener_unregister memory_listener_unregister_mips64 +#define address_space_remove_listeners address_space_remove_listeners_mips64 +#define address_space_init address_space_init_mips64 +#define address_space_destroy address_space_destroy_mips64 +#define memory_region_init_ram memory_region_init_ram_mips64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64 +#define exec_inline_op exec_inline_op_mips64 +#define floatx80_default_nan floatx80_default_nan_mips64 +#define float_raise float_raise_mips64 #define float16_is_quiet_nan float16_is_quiet_nan_mips64 #define float16_is_signaling_nan float16_is_signaling_nan_mips64 -#define float16_maybe_silence_nan float16_maybe_silence_nan_mips64 -#define float16ToCommonNaN float16ToCommonNaN_mips64 -#define float16_to_float32 float16_to_float32_mips64 -#define float16_to_float64 float16_to_float64_mips64 -#define float32_abs float32_abs_mips64 -#define float32_add float32_add_mips64 -#define float32_chs float32_chs_mips64 -#define float32_compare float32_compare_mips64 -#define float32_compare_internal float32_compare_internal_mips64 -#define float32_compare_quiet float32_compare_quiet_mips64 -#define float32_default_nan float32_default_nan_mips64 -#define float32_div float32_div_mips64 -#define float32_eq float32_eq_mips64 -#define float32_eq_quiet float32_eq_quiet_mips64 -#define float32_exp2 float32_exp2_mips64 -#define float32_exp2_coefficients float32_exp2_coefficients_mips64 -#define float32_is_any_nan float32_is_any_nan_mips64 -#define float32_is_infinity float32_is_infinity_mips64 -#define float32_is_neg float32_is_neg_mips64 #define float32_is_quiet_nan float32_is_quiet_nan_mips64 #define float32_is_signaling_nan float32_is_signaling_nan_mips64 -#define float32_is_zero float32_is_zero_mips64 -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mips64 -#define float32_le float32_le_mips64 -#define float32_le_quiet float32_le_quiet_mips64 -#define float32_log2 float32_log2_mips64 -#define float32_lt float32_lt_mips64 -#define float32_lt_quiet float32_lt_quiet_mips64 +#define float64_is_quiet_nan float64_is_quiet_nan_mips64 +#define float64_is_signaling_nan float64_is_signaling_nan_mips64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64 +#define floatx80_silence_nan floatx80_silence_nan_mips64 +#define propagateFloatx80NaN propagateFloatx80NaN_mips64 +#define float128_is_quiet_nan float128_is_quiet_nan_mips64 +#define float128_is_signaling_nan float128_is_signaling_nan_mips64 +#define float128_silence_nan float128_silence_nan_mips64 +#define float16_add float16_add_mips64 +#define float16_sub float16_sub_mips64 +#define float32_add float32_add_mips64 +#define float32_sub float32_sub_mips64 +#define float64_add float64_add_mips64 +#define float64_sub float64_sub_mips64 +#define float16_mul float16_mul_mips64 +#define float32_mul float32_mul_mips64 +#define float64_mul float64_mul_mips64 +#define float16_muladd float16_muladd_mips64 +#define float32_muladd float32_muladd_mips64 +#define float64_muladd float64_muladd_mips64 +#define float16_div float16_div_mips64 +#define float32_div float32_div_mips64 +#define float64_div float64_div_mips64 +#define float16_to_float32 float16_to_float32_mips64 +#define float16_to_float64 float16_to_float64_mips64 +#define float32_to_float16 float32_to_float16_mips64 +#define float32_to_float64 float32_to_float64_mips64 +#define float64_to_float16 float64_to_float16_mips64 +#define float64_to_float32 float64_to_float32_mips64 +#define float16_round_to_int float16_round_to_int_mips64 +#define float32_round_to_int float32_round_to_int_mips64 +#define float64_round_to_int float64_round_to_int_mips64 +#define float16_to_int16_scalbn float16_to_int16_scalbn_mips64 +#define float16_to_int32_scalbn float16_to_int32_scalbn_mips64 +#define float16_to_int64_scalbn float16_to_int64_scalbn_mips64 +#define float32_to_int16_scalbn float32_to_int16_scalbn_mips64 +#define float32_to_int32_scalbn float32_to_int32_scalbn_mips64 +#define float32_to_int64_scalbn float32_to_int64_scalbn_mips64 +#define float64_to_int16_scalbn float64_to_int16_scalbn_mips64 +#define float64_to_int32_scalbn float64_to_int32_scalbn_mips64 +#define float64_to_int64_scalbn float64_to_int64_scalbn_mips64 +#define float16_to_int16 float16_to_int16_mips64 +#define float16_to_int32 float16_to_int32_mips64 +#define float16_to_int64 float16_to_int64_mips64 +#define float32_to_int16 float32_to_int16_mips64 +#define float32_to_int32 float32_to_int32_mips64 +#define float32_to_int64 float32_to_int64_mips64 +#define float64_to_int16 float64_to_int16_mips64 +#define float64_to_int32 float64_to_int32_mips64 +#define float64_to_int64 float64_to_int64_mips64 +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mips64 +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mips64 +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mips64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64 +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_mips64 +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_mips64 +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_mips64 +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_mips64 +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_mips64 +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_mips64 +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_mips64 +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_mips64 +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_mips64 +#define float16_to_uint16 float16_to_uint16_mips64 +#define float16_to_uint32 float16_to_uint32_mips64 +#define float16_to_uint64 float16_to_uint64_mips64 +#define float32_to_uint16 float32_to_uint16_mips64 +#define float32_to_uint32 float32_to_uint32_mips64 +#define float32_to_uint64 float32_to_uint64_mips64 +#define float64_to_uint16 float64_to_uint16_mips64 +#define float64_to_uint32 float64_to_uint32_mips64 +#define float64_to_uint64 float64_to_uint64_mips64 +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mips64 +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mips64 +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mips64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64 +#define int64_to_float16_scalbn int64_to_float16_scalbn_mips64 +#define int32_to_float16_scalbn int32_to_float16_scalbn_mips64 +#define int16_to_float16_scalbn int16_to_float16_scalbn_mips64 +#define int64_to_float16 int64_to_float16_mips64 +#define int32_to_float16 int32_to_float16_mips64 +#define int16_to_float16 int16_to_float16_mips64 +#define int64_to_float32_scalbn int64_to_float32_scalbn_mips64 +#define int32_to_float32_scalbn int32_to_float32_scalbn_mips64 +#define int16_to_float32_scalbn int16_to_float32_scalbn_mips64 +#define int64_to_float32 int64_to_float32_mips64 +#define int32_to_float32 int32_to_float32_mips64 +#define int16_to_float32 int16_to_float32_mips64 +#define int64_to_float64_scalbn int64_to_float64_scalbn_mips64 +#define int32_to_float64_scalbn int32_to_float64_scalbn_mips64 +#define int16_to_float64_scalbn int16_to_float64_scalbn_mips64 +#define int64_to_float64 int64_to_float64_mips64 +#define int32_to_float64 int32_to_float64_mips64 +#define int16_to_float64 int16_to_float64_mips64 +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_mips64 +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_mips64 +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_mips64 +#define uint64_to_float16 uint64_to_float16_mips64 +#define uint32_to_float16 uint32_to_float16_mips64 +#define uint16_to_float16 uint16_to_float16_mips64 +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_mips64 +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_mips64 +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_mips64 +#define uint64_to_float32 uint64_to_float32_mips64 +#define uint32_to_float32 uint32_to_float32_mips64 +#define uint16_to_float32 uint16_to_float32_mips64 +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_mips64 +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_mips64 +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_mips64 +#define uint64_to_float64 uint64_to_float64_mips64 +#define uint32_to_float64 uint32_to_float64_mips64 +#define uint16_to_float64 uint16_to_float64_mips64 +#define float16_min float16_min_mips64 +#define float16_minnum float16_minnum_mips64 +#define float16_minnummag float16_minnummag_mips64 +#define float16_max float16_max_mips64 +#define float16_maxnum float16_maxnum_mips64 +#define float16_maxnummag float16_maxnummag_mips64 +#define float32_min float32_min_mips64 +#define float32_minnum float32_minnum_mips64 +#define float32_minnummag float32_minnummag_mips64 #define float32_max float32_max_mips64 #define float32_maxnum float32_maxnum_mips64 #define float32_maxnummag float32_maxnummag_mips64 -#define float32_maybe_silence_nan float32_maybe_silence_nan_mips64 -#define float32_min float32_min_mips64 -#define float32_minmax float32_minmax_mips64 -#define float32_minnum float32_minnum_mips64 -#define float32_minnummag float32_minnummag_mips64 -#define float32_mul float32_mul_mips64 -#define float32_muladd float32_muladd_mips64 -#define float32_rem float32_rem_mips64 -#define float32_round_to_int float32_round_to_int_mips64 -#define float32_scalbn float32_scalbn_mips64 -#define float32_set_sign float32_set_sign_mips64 -#define float32_sqrt float32_sqrt_mips64 -#define float32_squash_input_denormal float32_squash_input_denormal_mips64 -#define float32_sub float32_sub_mips64 -#define float32ToCommonNaN float32ToCommonNaN_mips64 -#define float32_to_float128 float32_to_float128_mips64 -#define float32_to_float16 float32_to_float16_mips64 -#define float32_to_float64 float32_to_float64_mips64 -#define float32_to_floatx80 float32_to_floatx80_mips64 -#define float32_to_int16 float32_to_int16_mips64 -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64 -#define float32_to_int32 float32_to_int32_mips64 -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64 -#define float32_to_int64 float32_to_int64_mips64 -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64 -#define float32_to_uint16 float32_to_uint16_mips64 -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64 -#define float32_to_uint32 float32_to_uint32_mips64 -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64 -#define float32_to_uint64 float32_to_uint64_mips64 -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64 -#define float32_unordered float32_unordered_mips64 -#define float32_unordered_quiet float32_unordered_quiet_mips64 -#define float64_abs float64_abs_mips64 -#define float64_add float64_add_mips64 -#define float64_chs float64_chs_mips64 -#define float64_compare float64_compare_mips64 -#define float64_compare_internal float64_compare_internal_mips64 -#define float64_compare_quiet float64_compare_quiet_mips64 -#define float64_default_nan float64_default_nan_mips64 -#define float64_div float64_div_mips64 -#define float64_eq float64_eq_mips64 -#define float64_eq_quiet float64_eq_quiet_mips64 -#define float64_is_any_nan float64_is_any_nan_mips64 -#define float64_is_infinity float64_is_infinity_mips64 -#define float64_is_neg float64_is_neg_mips64 -#define float64_is_quiet_nan float64_is_quiet_nan_mips64 -#define float64_is_signaling_nan float64_is_signaling_nan_mips64 -#define float64_is_zero float64_is_zero_mips64 -#define float64_le float64_le_mips64 -#define float64_le_quiet float64_le_quiet_mips64 -#define float64_log2 float64_log2_mips64 -#define float64_lt float64_lt_mips64 -#define float64_lt_quiet float64_lt_quiet_mips64 +#define float64_min float64_min_mips64 +#define float64_minnum float64_minnum_mips64 +#define float64_minnummag float64_minnummag_mips64 #define float64_max float64_max_mips64 #define float64_maxnum float64_maxnum_mips64 #define float64_maxnummag float64_maxnummag_mips64 -#define float64_maybe_silence_nan float64_maybe_silence_nan_mips64 -#define float64_min float64_min_mips64 -#define float64_minmax float64_minmax_mips64 -#define float64_minnum float64_minnum_mips64 -#define float64_minnummag float64_minnummag_mips64 -#define float64_mul float64_mul_mips64 -#define float64_muladd float64_muladd_mips64 -#define float64_rem float64_rem_mips64 -#define float64_round_to_int float64_round_to_int_mips64 +#define float16_compare float16_compare_mips64 +#define float16_compare_quiet float16_compare_quiet_mips64 +#define float32_compare float32_compare_mips64 +#define float32_compare_quiet float32_compare_quiet_mips64 +#define float64_compare float64_compare_mips64 +#define float64_compare_quiet float64_compare_quiet_mips64 +#define float16_scalbn float16_scalbn_mips64 +#define float32_scalbn float32_scalbn_mips64 #define float64_scalbn float64_scalbn_mips64 -#define float64_set_sign float64_set_sign_mips64 +#define float16_sqrt float16_sqrt_mips64 +#define float32_sqrt float32_sqrt_mips64 #define float64_sqrt float64_sqrt_mips64 +#define float16_default_nan float16_default_nan_mips64 +#define float32_default_nan float32_default_nan_mips64 +#define float64_default_nan float64_default_nan_mips64 +#define float128_default_nan float128_default_nan_mips64 +#define float16_silence_nan float16_silence_nan_mips64 +#define float32_silence_nan float32_silence_nan_mips64 +#define float64_silence_nan float64_silence_nan_mips64 +#define float16_squash_input_denormal float16_squash_input_denormal_mips64 +#define float32_squash_input_denormal float32_squash_input_denormal_mips64 #define float64_squash_input_denormal float64_squash_input_denormal_mips64 -#define float64_sub float64_sub_mips64 -#define float64ToCommonNaN float64ToCommonNaN_mips64 -#define float64_to_float128 float64_to_float128_mips64 -#define float64_to_float16 float64_to_float16_mips64 -#define float64_to_float32 float64_to_float32_mips64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64 +#define roundAndPackFloatx80 roundAndPackFloatx80_mips64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64 +#define int32_to_floatx80 int32_to_floatx80_mips64 +#define int32_to_float128 int32_to_float128_mips64 +#define int64_to_floatx80 int64_to_floatx80_mips64 +#define int64_to_float128 int64_to_float128_mips64 +#define uint64_to_float128 uint64_to_float128_mips64 +#define float32_to_floatx80 float32_to_floatx80_mips64 +#define float32_to_float128 float32_to_float128_mips64 +#define float32_rem float32_rem_mips64 +#define float32_exp2 float32_exp2_mips64 +#define float32_log2 float32_log2_mips64 +#define float32_eq float32_eq_mips64 +#define float32_le float32_le_mips64 +#define float32_lt float32_lt_mips64 +#define float32_unordered float32_unordered_mips64 +#define float32_eq_quiet float32_eq_quiet_mips64 +#define float32_le_quiet float32_le_quiet_mips64 +#define float32_lt_quiet float32_lt_quiet_mips64 +#define float32_unordered_quiet float32_unordered_quiet_mips64 #define float64_to_floatx80 float64_to_floatx80_mips64 -#define float64_to_int16 float64_to_int16_mips64 -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64 -#define float64_to_int32 float64_to_int32_mips64 -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64 -#define float64_to_int64 float64_to_int64_mips64 -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64 -#define float64_to_uint16 float64_to_uint16_mips64 -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64 -#define float64_to_uint32 float64_to_uint32_mips64 -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64 -#define float64_to_uint64 float64_to_uint64_mips64 -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64 -#define float64_trunc_to_int float64_trunc_to_int_mips64 +#define float64_to_float128 float64_to_float128_mips64 +#define float64_rem float64_rem_mips64 +#define float64_log2 float64_log2_mips64 +#define float64_eq float64_eq_mips64 +#define float64_le float64_le_mips64 +#define float64_lt float64_lt_mips64 #define float64_unordered float64_unordered_mips64 +#define float64_eq_quiet float64_eq_quiet_mips64 +#define float64_le_quiet float64_le_quiet_mips64 +#define float64_lt_quiet float64_lt_quiet_mips64 #define float64_unordered_quiet float64_unordered_quiet_mips64 -#define float_raise float_raise_mips64 -#define floatx80_add floatx80_add_mips64 -#define floatx80_compare floatx80_compare_mips64 -#define floatx80_compare_internal floatx80_compare_internal_mips64 -#define floatx80_compare_quiet floatx80_compare_quiet_mips64 -#define floatx80_default_nan floatx80_default_nan_mips64 -#define floatx80_div floatx80_div_mips64 -#define floatx80_eq floatx80_eq_mips64 -#define floatx80_eq_quiet floatx80_eq_quiet_mips64 -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64 -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64 -#define floatx80_le floatx80_le_mips64 -#define floatx80_le_quiet floatx80_le_quiet_mips64 -#define floatx80_lt floatx80_lt_mips64 -#define floatx80_lt_quiet floatx80_lt_quiet_mips64 -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mips64 -#define floatx80_mul floatx80_mul_mips64 -#define floatx80_rem floatx80_rem_mips64 -#define floatx80_round_to_int floatx80_round_to_int_mips64 -#define floatx80_scalbn floatx80_scalbn_mips64 -#define floatx80_sqrt floatx80_sqrt_mips64 -#define floatx80_sub floatx80_sub_mips64 -#define floatx80ToCommonNaN floatx80ToCommonNaN_mips64 -#define floatx80_to_float128 floatx80_to_float128_mips64 -#define floatx80_to_float32 floatx80_to_float32_mips64 -#define floatx80_to_float64 floatx80_to_float64_mips64 #define floatx80_to_int32 floatx80_to_int32_mips64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips64 #define floatx80_to_int64 floatx80_to_int64_mips64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips64 +#define floatx80_to_float32 floatx80_to_float32_mips64 +#define floatx80_to_float64 floatx80_to_float64_mips64 +#define floatx80_to_float128 floatx80_to_float128_mips64 +#define floatx80_round floatx80_round_mips64 +#define floatx80_round_to_int floatx80_round_to_int_mips64 +#define floatx80_add floatx80_add_mips64 +#define floatx80_sub floatx80_sub_mips64 +#define floatx80_mul floatx80_mul_mips64 +#define floatx80_div floatx80_div_mips64 +#define floatx80_rem floatx80_rem_mips64 +#define floatx80_sqrt floatx80_sqrt_mips64 +#define floatx80_eq floatx80_eq_mips64 +#define floatx80_le floatx80_le_mips64 +#define floatx80_lt floatx80_lt_mips64 #define floatx80_unordered floatx80_unordered_mips64 +#define floatx80_eq_quiet floatx80_eq_quiet_mips64 +#define floatx80_le_quiet floatx80_le_quiet_mips64 +#define floatx80_lt_quiet floatx80_lt_quiet_mips64 #define floatx80_unordered_quiet floatx80_unordered_quiet_mips64 -#define flush_icache_range flush_icache_range_mips64 -#define format_string format_string_mips64 -#define fp_decode_rm fp_decode_rm_mips64 -#define frame_dummy frame_dummy_mips64 -#define free_range free_range_mips64 -#define fstat64 fstat64_mips64 -#define futex_wait futex_wait_mips64 -#define futex_wake futex_wake_mips64 -#define gen_aa32_ld16s gen_aa32_ld16s_mips64 -#define gen_aa32_ld16u gen_aa32_ld16u_mips64 -#define gen_aa32_ld32u gen_aa32_ld32u_mips64 -#define gen_aa32_ld64 gen_aa32_ld64_mips64 -#define gen_aa32_ld8s gen_aa32_ld8s_mips64 -#define gen_aa32_ld8u gen_aa32_ld8u_mips64 -#define gen_aa32_st16 gen_aa32_st16_mips64 -#define gen_aa32_st32 gen_aa32_st32_mips64 -#define gen_aa32_st64 gen_aa32_st64_mips64 -#define gen_aa32_st8 gen_aa32_st8_mips64 -#define gen_adc gen_adc_mips64 -#define gen_adc_CC gen_adc_CC_mips64 -#define gen_add16 gen_add16_mips64 -#define gen_add_carry gen_add_carry_mips64 -#define gen_add_CC gen_add_CC_mips64 -#define gen_add_datah_offset gen_add_datah_offset_mips64 -#define gen_add_data_offset gen_add_data_offset_mips64 -#define gen_addq gen_addq_mips64 -#define gen_addq_lo gen_addq_lo_mips64 -#define gen_addq_msw gen_addq_msw_mips64 -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mips64 -#define gen_arm_shift_im gen_arm_shift_im_mips64 -#define gen_arm_shift_reg gen_arm_shift_reg_mips64 -#define gen_bx gen_bx_mips64 -#define gen_bx_im gen_bx_im_mips64 -#define gen_clrex gen_clrex_mips64 -#define generate_memory_topology generate_memory_topology_mips64 -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mips64 -#define gen_exception gen_exception_mips64 -#define gen_exception_insn gen_exception_insn_mips64 -#define gen_exception_internal gen_exception_internal_mips64 -#define gen_exception_internal_insn gen_exception_internal_insn_mips64 -#define gen_exception_return gen_exception_return_mips64 -#define gen_goto_tb gen_goto_tb_mips64 -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mips64 -#define gen_helper_add_saturate gen_helper_add_saturate_mips64 -#define gen_helper_add_setq gen_helper_add_setq_mips64 -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mips64 -#define gen_helper_clz32 gen_helper_clz32_mips64 -#define gen_helper_clz64 gen_helper_clz64_mips64 -#define gen_helper_clz_arm gen_helper_clz_arm_mips64 -#define gen_helper_cpsr_read gen_helper_cpsr_read_mips64 -#define gen_helper_cpsr_write gen_helper_cpsr_write_mips64 -#define gen_helper_crc32_arm gen_helper_crc32_arm_mips64 -#define gen_helper_crc32c gen_helper_crc32c_mips64 -#define gen_helper_crypto_aese gen_helper_crypto_aese_mips64 -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mips64 -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mips64 -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mips64 -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mips64 -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mips64 -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mips64 -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mips64 -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mips64 -#define gen_helper_double_saturate gen_helper_double_saturate_mips64 -#define gen_helper_exception_internal gen_helper_exception_internal_mips64 -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mips64 -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mips64 -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mips64 -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mips64 -#define gen_helper_get_user_reg gen_helper_get_user_reg_mips64 -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mips64 -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mips64 -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mips64 -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mips64 -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mips64 -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mips64 -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mips64 -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mips64 -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mips64 -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mips64 -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mips64 -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mips64 -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mips64 -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mips64 -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mips64 -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mips64 -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mips64 -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mips64 -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mips64 -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mips64 -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mips64 -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mips64 -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mips64 -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mips64 -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mips64 -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mips64 -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mips64 -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mips64 -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mips64 -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mips64 -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mips64 -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mips64 -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mips64 -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mips64 -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mips64 -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mips64 -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mips64 -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mips64 -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mips64 -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mips64 -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mips64 -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mips64 -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mips64 -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mips64 -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mips64 -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mips64 -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mips64 -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mips64 -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mips64 -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mips64 -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mips64 -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mips64 -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mips64 -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mips64 -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mips64 -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mips64 -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mips64 -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mips64 -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mips64 -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mips64 -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mips64 -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mips64 -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mips64 -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mips64 -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mips64 -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mips64 -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mips64 -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mips64 -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mips64 -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mips64 -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mips64 -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mips64 -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mips64 -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mips64 -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mips64 -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mips64 -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mips64 -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mips64 -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mips64 -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mips64 -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mips64 -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mips64 -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mips64 -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mips64 -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mips64 -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mips64 -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mips64 -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mips64 -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mips64 -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mips64 -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mips64 -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mips64 -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mips64 -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mips64 -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mips64 -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mips64 -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mips64 -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mips64 -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mips64 -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mips64 -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mips64 -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mips64 -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mips64 -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mips64 -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mips64 -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mips64 -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mips64 -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mips64 -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mips64 -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mips64 -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mips64 -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mips64 -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mips64 -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mips64 -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mips64 -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mips64 -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mips64 -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mips64 -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mips64 -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mips64 -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mips64 -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mips64 -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mips64 -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mips64 -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mips64 -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mips64 -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mips64 -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mips64 -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mips64 -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mips64 -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mips64 -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mips64 -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mips64 -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mips64 -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mips64 -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mips64 -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mips64 -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mips64 -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mips64 -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mips64 -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mips64 -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mips64 -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mips64 -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mips64 -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mips64 -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mips64 -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mips64 -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mips64 -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mips64 -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mips64 -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mips64 -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mips64 -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mips64 -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mips64 -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mips64 -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mips64 -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mips64 -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mips64 -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mips64 -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mips64 -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mips64 -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mips64 -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mips64 -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mips64 -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mips64 -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mips64 -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mips64 -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mips64 -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mips64 -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mips64 -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mips64 -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mips64 -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mips64 -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mips64 -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mips64 -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mips64 -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mips64 -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mips64 -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mips64 -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mips64 -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mips64 -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mips64 -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mips64 -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mips64 -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mips64 -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mips64 -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mips64 -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mips64 -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mips64 -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mips64 -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mips64 -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mips64 -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mips64 -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mips64 -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mips64 -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mips64 -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mips64 -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mips64 -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mips64 -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mips64 -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mips64 -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mips64 -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mips64 -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mips64 -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mips64 -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mips64 -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mips64 -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mips64 -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mips64 -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mips64 -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mips64 -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mips64 -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mips64 -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mips64 -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mips64 -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mips64 -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mips64 -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mips64 -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mips64 -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mips64 -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mips64 -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mips64 -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mips64 -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mips64 -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mips64 -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mips64 -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mips64 -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mips64 -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mips64 -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mips64 -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mips64 -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mips64 -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mips64 -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mips64 -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mips64 -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mips64 -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mips64 -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mips64 -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mips64 -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mips64 -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mips64 -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mips64 -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mips64 -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mips64 -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mips64 -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mips64 -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mips64 -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mips64 -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mips64 -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mips64 -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mips64 -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mips64 -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mips64 -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mips64 -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mips64 -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mips64 -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mips64 -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mips64 -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mips64 -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mips64 -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mips64 -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mips64 -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mips64 -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mips64 -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mips64 -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mips64 -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mips64 -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mips64 -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mips64 -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mips64 -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mips64 -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mips64 -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mips64 -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mips64 -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mips64 -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mips64 -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mips64 -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mips64 -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mips64 -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mips64 -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mips64 -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mips64 -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mips64 -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mips64 -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mips64 -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mips64 -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mips64 -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mips64 -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mips64 -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mips64 -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mips64 -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mips64 -#define gen_helper_neon_tbl gen_helper_neon_tbl_mips64 -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mips64 -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mips64 -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mips64 -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mips64 -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mips64 -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mips64 -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mips64 -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mips64 -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mips64 -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mips64 -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mips64 -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mips64 -#define gen_helper_neon_zip16 gen_helper_neon_zip16_mips64 -#define gen_helper_neon_zip8 gen_helper_neon_zip8_mips64 -#define gen_helper_pre_hvc gen_helper_pre_hvc_mips64 -#define gen_helper_pre_smc gen_helper_pre_smc_mips64 -#define gen_helper_qadd16 gen_helper_qadd16_mips64 -#define gen_helper_qadd8 gen_helper_qadd8_mips64 -#define gen_helper_qaddsubx gen_helper_qaddsubx_mips64 -#define gen_helper_qsub16 gen_helper_qsub16_mips64 -#define gen_helper_qsub8 gen_helper_qsub8_mips64 -#define gen_helper_qsubaddx gen_helper_qsubaddx_mips64 -#define gen_helper_rbit gen_helper_rbit_mips64 -#define gen_helper_recpe_f32 gen_helper_recpe_f32_mips64 -#define gen_helper_recpe_u32 gen_helper_recpe_u32_mips64 -#define gen_helper_recps_f32 gen_helper_recps_f32_mips64 -#define gen_helper_rintd gen_helper_rintd_mips64 -#define gen_helper_rintd_exact gen_helper_rintd_exact_mips64 -#define gen_helper_rints gen_helper_rints_mips64 -#define gen_helper_rints_exact gen_helper_rints_exact_mips64 -#define gen_helper_ror_cc gen_helper_ror_cc_mips64 -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mips64 -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mips64 -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mips64 -#define gen_helper_sadd16 gen_helper_sadd16_mips64 -#define gen_helper_sadd8 gen_helper_sadd8_mips64 -#define gen_helper_saddsubx gen_helper_saddsubx_mips64 -#define gen_helper_sar_cc gen_helper_sar_cc_mips64 -#define gen_helper_sdiv gen_helper_sdiv_mips64 -#define gen_helper_sel_flags gen_helper_sel_flags_mips64 -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mips64 -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mips64 -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mips64 -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mips64 -#define gen_helper_set_rmode gen_helper_set_rmode_mips64 -#define gen_helper_set_user_reg gen_helper_set_user_reg_mips64 -#define gen_helper_shadd16 gen_helper_shadd16_mips64 -#define gen_helper_shadd8 gen_helper_shadd8_mips64 -#define gen_helper_shaddsubx gen_helper_shaddsubx_mips64 -#define gen_helper_shl_cc gen_helper_shl_cc_mips64 -#define gen_helper_shr_cc gen_helper_shr_cc_mips64 -#define gen_helper_shsub16 gen_helper_shsub16_mips64 -#define gen_helper_shsub8 gen_helper_shsub8_mips64 -#define gen_helper_shsubaddx gen_helper_shsubaddx_mips64 -#define gen_helper_ssat gen_helper_ssat_mips64 -#define gen_helper_ssat16 gen_helper_ssat16_mips64 -#define gen_helper_ssub16 gen_helper_ssub16_mips64 -#define gen_helper_ssub8 gen_helper_ssub8_mips64 -#define gen_helper_ssubaddx gen_helper_ssubaddx_mips64 -#define gen_helper_sub_saturate gen_helper_sub_saturate_mips64 -#define gen_helper_sxtb16 gen_helper_sxtb16_mips64 -#define gen_helper_uadd16 gen_helper_uadd16_mips64 -#define gen_helper_uadd8 gen_helper_uadd8_mips64 -#define gen_helper_uaddsubx gen_helper_uaddsubx_mips64 -#define gen_helper_udiv gen_helper_udiv_mips64 -#define gen_helper_uhadd16 gen_helper_uhadd16_mips64 -#define gen_helper_uhadd8 gen_helper_uhadd8_mips64 -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mips64 -#define gen_helper_uhsub16 gen_helper_uhsub16_mips64 -#define gen_helper_uhsub8 gen_helper_uhsub8_mips64 -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mips64 -#define gen_helper_uqadd16 gen_helper_uqadd16_mips64 -#define gen_helper_uqadd8 gen_helper_uqadd8_mips64 -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mips64 -#define gen_helper_uqsub16 gen_helper_uqsub16_mips64 -#define gen_helper_uqsub8 gen_helper_uqsub8_mips64 -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mips64 -#define gen_helper_usad8 gen_helper_usad8_mips64 -#define gen_helper_usat gen_helper_usat_mips64 -#define gen_helper_usat16 gen_helper_usat16_mips64 -#define gen_helper_usub16 gen_helper_usub16_mips64 -#define gen_helper_usub8 gen_helper_usub8_mips64 -#define gen_helper_usubaddx gen_helper_usubaddx_mips64 -#define gen_helper_uxtb16 gen_helper_uxtb16_mips64 -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mips64 -#define gen_helper_v7m_msr gen_helper_v7m_msr_mips64 -#define gen_helper_vfp_absd gen_helper_vfp_absd_mips64 -#define gen_helper_vfp_abss gen_helper_vfp_abss_mips64 -#define gen_helper_vfp_addd gen_helper_vfp_addd_mips64 -#define gen_helper_vfp_adds gen_helper_vfp_adds_mips64 -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mips64 -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mips64 -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mips64 -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mips64 -#define gen_helper_vfp_divd gen_helper_vfp_divd_mips64 -#define gen_helper_vfp_divs gen_helper_vfp_divs_mips64 -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mips64 -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mips64 -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mips64 -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mips64 -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mips64 -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mips64 -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64 -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mips64 -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mips64 -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mips64 -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mips64 -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mips64 -#define gen_helper_vfp_mins gen_helper_vfp_mins_mips64 -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mips64 -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mips64 -#define gen_helper_vfp_muld gen_helper_vfp_muld_mips64 -#define gen_helper_vfp_muls gen_helper_vfp_muls_mips64 -#define gen_helper_vfp_negd gen_helper_vfp_negd_mips64 -#define gen_helper_vfp_negs gen_helper_vfp_negs_mips64 -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64 -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mips64 -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mips64 -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mips64 -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mips64 -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mips64 -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mips64 -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mips64 -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mips64 -#define gen_helper_vfp_subd gen_helper_vfp_subd_mips64 -#define gen_helper_vfp_subs gen_helper_vfp_subs_mips64 -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mips64 -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mips64 -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mips64 -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mips64 -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mips64 -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mips64 -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mips64 -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mips64 -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mips64 -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mips64 -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mips64 -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mips64 -#define gen_helper_vfp_touid gen_helper_vfp_touid_mips64 -#define gen_helper_vfp_touis gen_helper_vfp_touis_mips64 -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mips64 -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mips64 -#define gen_helper_vfp_tould gen_helper_vfp_tould_mips64 -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mips64 -#define gen_helper_vfp_touls gen_helper_vfp_touls_mips64 -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mips64 -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mips64 -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mips64 -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mips64 -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mips64 -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mips64 -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mips64 -#define gen_helper_wfe gen_helper_wfe_mips64 -#define gen_helper_wfi gen_helper_wfi_mips64 -#define gen_hvc gen_hvc_mips64 -#define gen_intermediate_code_internal gen_intermediate_code_internal_mips64 -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mips64 -#define gen_iwmmxt_address gen_iwmmxt_address_mips64 -#define gen_iwmmxt_shift gen_iwmmxt_shift_mips64 -#define gen_jmp gen_jmp_mips64 -#define gen_load_and_replicate gen_load_and_replicate_mips64 -#define gen_load_exclusive gen_load_exclusive_mips64 -#define gen_logic_CC gen_logic_CC_mips64 -#define gen_logicq_cc gen_logicq_cc_mips64 -#define gen_lookup_tb gen_lookup_tb_mips64 -#define gen_mov_F0_vreg gen_mov_F0_vreg_mips64 -#define gen_mov_F1_vreg gen_mov_F1_vreg_mips64 -#define gen_mov_vreg_F0 gen_mov_vreg_F0_mips64 -#define gen_muls_i64_i32 gen_muls_i64_i32_mips64 -#define gen_mulu_i64_i32 gen_mulu_i64_i32_mips64 -#define gen_mulxy gen_mulxy_mips64 -#define gen_neon_add gen_neon_add_mips64 -#define gen_neon_addl gen_neon_addl_mips64 -#define gen_neon_addl_saturate gen_neon_addl_saturate_mips64 -#define gen_neon_bsl gen_neon_bsl_mips64 -#define gen_neon_dup_high16 gen_neon_dup_high16_mips64 -#define gen_neon_dup_low16 gen_neon_dup_low16_mips64 -#define gen_neon_dup_u8 gen_neon_dup_u8_mips64 -#define gen_neon_mull gen_neon_mull_mips64 -#define gen_neon_narrow gen_neon_narrow_mips64 -#define gen_neon_narrow_op gen_neon_narrow_op_mips64 -#define gen_neon_narrow_sats gen_neon_narrow_sats_mips64 -#define gen_neon_narrow_satu gen_neon_narrow_satu_mips64 -#define gen_neon_negl gen_neon_negl_mips64 -#define gen_neon_rsb gen_neon_rsb_mips64 -#define gen_neon_shift_narrow gen_neon_shift_narrow_mips64 -#define gen_neon_subl gen_neon_subl_mips64 -#define gen_neon_trn_u16 gen_neon_trn_u16_mips64 -#define gen_neon_trn_u8 gen_neon_trn_u8_mips64 -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mips64 -#define gen_neon_unzip gen_neon_unzip_mips64 -#define gen_neon_widen gen_neon_widen_mips64 -#define gen_neon_zip gen_neon_zip_mips64 +#define float128_to_int32 float128_to_int32_mips64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64 +#define float128_to_int64 float128_to_int64_mips64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64 +#define float128_to_uint64 float128_to_uint64_mips64 +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mips64 +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mips64 +#define float128_to_uint32 float128_to_uint32_mips64 +#define float128_to_float32 float128_to_float32_mips64 +#define float128_to_float64 float128_to_float64_mips64 +#define float128_to_floatx80 float128_to_floatx80_mips64 +#define float128_round_to_int float128_round_to_int_mips64 +#define float128_add float128_add_mips64 +#define float128_sub float128_sub_mips64 +#define float128_mul float128_mul_mips64 +#define float128_div float128_div_mips64 +#define float128_rem float128_rem_mips64 +#define float128_sqrt float128_sqrt_mips64 +#define float128_eq float128_eq_mips64 +#define float128_le float128_le_mips64 +#define float128_lt float128_lt_mips64 +#define float128_unordered float128_unordered_mips64 +#define float128_eq_quiet float128_eq_quiet_mips64 +#define float128_le_quiet float128_le_quiet_mips64 +#define float128_lt_quiet float128_lt_quiet_mips64 +#define float128_unordered_quiet float128_unordered_quiet_mips64 +#define floatx80_compare floatx80_compare_mips64 +#define floatx80_compare_quiet floatx80_compare_quiet_mips64 +#define float128_compare float128_compare_mips64 +#define float128_compare_quiet float128_compare_quiet_mips64 +#define floatx80_scalbn floatx80_scalbn_mips64 +#define float128_scalbn float128_scalbn_mips64 +#define softfloat_init softfloat_init_mips64 +#define tcg_optimize tcg_optimize_mips64 #define gen_new_label gen_new_label_mips64 -#define gen_nop_hint gen_nop_hint_mips64 -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mips64 -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mips64 -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mips64 -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mips64 -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mips64 -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mips64 -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mips64 -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mips64 -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mips64 -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mips64 -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mips64 -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mips64 -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mips64 -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mips64 -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mips64 -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mips64 -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mips64 -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mips64 -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mips64 -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mips64 -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mips64 -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mips64 -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mips64 -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mips64 -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mips64 -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mips64 -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mips64 -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mips64 -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mips64 -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mips64 -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mips64 -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mips64 -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mips64 -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mips64 -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mips64 -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mips64 -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mips64 -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mips64 -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mips64 -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mips64 -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mips64 -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mips64 -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mips64 -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mips64 -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mips64 -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mips64 -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mips64 -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mips64 -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mips64 -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mips64 -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mips64 -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mips64 -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mips64 -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mips64 -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mips64 -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mips64 -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mips64 -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mips64 -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mips64 -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mips64 -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mips64 -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mips64 -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mips64 -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mips64 -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mips64 -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mips64 -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mips64 -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mips64 -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mips64 -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mips64 -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mips64 -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mips64 -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mips64 -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mips64 -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mips64 -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mips64 -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mips64 -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mips64 -#define gen_rev16 gen_rev16_mips64 -#define gen_revsh gen_revsh_mips64 -#define gen_rfe gen_rfe_mips64 -#define gen_sar gen_sar_mips64 -#define gen_sbc_CC gen_sbc_CC_mips64 -#define gen_sbfx gen_sbfx_mips64 -#define gen_set_CF_bit31 gen_set_CF_bit31_mips64 -#define gen_set_condexec gen_set_condexec_mips64 -#define gen_set_cpsr gen_set_cpsr_mips64 -#define gen_set_label gen_set_label_mips64 -#define gen_set_pc_im gen_set_pc_im_mips64 -#define gen_set_psr gen_set_psr_mips64 -#define gen_set_psr_im gen_set_psr_im_mips64 -#define gen_shl gen_shl_mips64 -#define gen_shr gen_shr_mips64 -#define gen_smc gen_smc_mips64 -#define gen_smul_dual gen_smul_dual_mips64 -#define gen_srs gen_srs_mips64 -#define gen_ss_advance gen_ss_advance_mips64 -#define gen_step_complete_exception gen_step_complete_exception_mips64 -#define gen_store_exclusive gen_store_exclusive_mips64 -#define gen_storeq_reg gen_storeq_reg_mips64 -#define gen_sub_carry gen_sub_carry_mips64 -#define gen_sub_CC gen_sub_CC_mips64 -#define gen_subq_msw gen_subq_msw_mips64 -#define gen_swap_half gen_swap_half_mips64 -#define gen_thumb2_data_op gen_thumb2_data_op_mips64 -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mips64 -#define gen_ubfx gen_ubfx_mips64 -#define gen_vfp_abs gen_vfp_abs_mips64 -#define gen_vfp_add gen_vfp_add_mips64 -#define gen_vfp_cmp gen_vfp_cmp_mips64 -#define gen_vfp_cmpe gen_vfp_cmpe_mips64 -#define gen_vfp_div gen_vfp_div_mips64 -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mips64 -#define gen_vfp_F1_mul gen_vfp_F1_mul_mips64 -#define gen_vfp_F1_neg gen_vfp_F1_neg_mips64 -#define gen_vfp_ld gen_vfp_ld_mips64 -#define gen_vfp_mrs gen_vfp_mrs_mips64 -#define gen_vfp_msr gen_vfp_msr_mips64 -#define gen_vfp_mul gen_vfp_mul_mips64 -#define gen_vfp_neg gen_vfp_neg_mips64 -#define gen_vfp_shto gen_vfp_shto_mips64 -#define gen_vfp_sito gen_vfp_sito_mips64 -#define gen_vfp_slto gen_vfp_slto_mips64 -#define gen_vfp_sqrt gen_vfp_sqrt_mips64 -#define gen_vfp_st gen_vfp_st_mips64 -#define gen_vfp_sub gen_vfp_sub_mips64 -#define gen_vfp_tosh gen_vfp_tosh_mips64 -#define gen_vfp_tosi gen_vfp_tosi_mips64 -#define gen_vfp_tosiz gen_vfp_tosiz_mips64 -#define gen_vfp_tosl gen_vfp_tosl_mips64 -#define gen_vfp_touh gen_vfp_touh_mips64 -#define gen_vfp_toui gen_vfp_toui_mips64 -#define gen_vfp_touiz gen_vfp_touiz_mips64 -#define gen_vfp_toul gen_vfp_toul_mips64 -#define gen_vfp_uhto gen_vfp_uhto_mips64 -#define gen_vfp_uito gen_vfp_uito_mips64 -#define gen_vfp_ulto gen_vfp_ulto_mips64 -#define get_arm_cp_reginfo get_arm_cp_reginfo_mips64 -#define get_clock get_clock_mips64 -#define get_clock_realtime get_clock_realtime_mips64 -#define get_constraint_priority get_constraint_priority_mips64 -#define get_float_exception_flags get_float_exception_flags_mips64 -#define get_float_rounding_mode get_float_rounding_mode_mips64 -#define get_fpstatus_ptr get_fpstatus_ptr_mips64 -#define get_level1_table_address get_level1_table_address_mips64 -#define get_mem_index get_mem_index_mips64 -#define get_next_param_value get_next_param_value_mips64 -#define get_opt_name get_opt_name_mips64 -#define get_opt_value get_opt_value_mips64 -#define get_page_addr_code get_page_addr_code_mips64 -#define get_param_value get_param_value_mips64 -#define get_phys_addr get_phys_addr_mips64 -#define get_phys_addr_lpae get_phys_addr_lpae_mips64 -#define get_phys_addr_mpu get_phys_addr_mpu_mips64 -#define get_phys_addr_v5 get_phys_addr_v5_mips64 -#define get_phys_addr_v6 get_phys_addr_v6_mips64 -#define get_system_memory get_system_memory_mips64 -#define get_ticks_per_sec get_ticks_per_sec_mips64 -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mips64 -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mips64 -#define gt_cntfrq_access gt_cntfrq_access_mips64 -#define gt_cnt_read gt_cnt_read_mips64 -#define gt_cnt_reset gt_cnt_reset_mips64 -#define gt_counter_access gt_counter_access_mips64 -#define gt_ctl_write gt_ctl_write_mips64 -#define gt_cval_write gt_cval_write_mips64 -#define gt_get_countervalue gt_get_countervalue_mips64 -#define gt_pct_access gt_pct_access_mips64 -#define gt_ptimer_access gt_ptimer_access_mips64 -#define gt_recalc_timer gt_recalc_timer_mips64 -#define gt_timer_access gt_timer_access_mips64 -#define gt_tval_read gt_tval_read_mips64 -#define gt_tval_write gt_tval_write_mips64 -#define gt_vct_access gt_vct_access_mips64 -#define gt_vtimer_access gt_vtimer_access_mips64 -#define guest_phys_blocks_free guest_phys_blocks_free_mips64 -#define guest_phys_blocks_init guest_phys_blocks_init_mips64 -#define handle_vcvt handle_vcvt_mips64 -#define handle_vminmaxnm handle_vminmaxnm_mips64 -#define handle_vrint handle_vrint_mips64 -#define handle_vsel handle_vsel_mips64 -#define has_help_option has_help_option_mips64 -#define have_bmi1 have_bmi1_mips64 -#define have_bmi2 have_bmi2_mips64 -#define hcr_write hcr_write_mips64 -#define helper_access_check_cp_reg helper_access_check_cp_reg_mips64 -#define helper_add_saturate helper_add_saturate_mips64 -#define helper_add_setq helper_add_setq_mips64 -#define helper_add_usaturate helper_add_usaturate_mips64 -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mips64 -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mips64 -#define helper_be_ldq_mmu helper_be_ldq_mmu_mips64 -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64 -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64 -#define helper_be_ldul_mmu helper_be_ldul_mmu_mips64 -#define helper_be_lduw_mmu helper_be_lduw_mmu_mips64 -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mips64 -#define helper_be_stl_mmu helper_be_stl_mmu_mips64 -#define helper_be_stq_mmu helper_be_stq_mmu_mips64 -#define helper_be_stw_mmu helper_be_stw_mmu_mips64 -#define helper_clear_pstate_ss helper_clear_pstate_ss_mips64 -#define helper_clz_arm helper_clz_arm_mips64 -#define helper_cpsr_read helper_cpsr_read_mips64 -#define helper_cpsr_write helper_cpsr_write_mips64 -#define helper_crc32_arm helper_crc32_arm_mips64 -#define helper_crc32c helper_crc32c_mips64 -#define helper_crypto_aese helper_crypto_aese_mips64 -#define helper_crypto_aesmc helper_crypto_aesmc_mips64 -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mips64 -#define helper_crypto_sha1h helper_crypto_sha1h_mips64 -#define helper_crypto_sha1su1 helper_crypto_sha1su1_mips64 -#define helper_crypto_sha256h helper_crypto_sha256h_mips64 -#define helper_crypto_sha256h2 helper_crypto_sha256h2_mips64 -#define helper_crypto_sha256su0 helper_crypto_sha256su0_mips64 -#define helper_crypto_sha256su1 helper_crypto_sha256su1_mips64 -#define helper_dc_zva helper_dc_zva_mips64 -#define helper_double_saturate helper_double_saturate_mips64 -#define helper_exception_internal helper_exception_internal_mips64 -#define helper_exception_return helper_exception_return_mips64 -#define helper_exception_with_syndrome helper_exception_with_syndrome_mips64 -#define helper_get_cp_reg helper_get_cp_reg_mips64 -#define helper_get_cp_reg64 helper_get_cp_reg64_mips64 -#define helper_get_r13_banked helper_get_r13_banked_mips64 -#define helper_get_user_reg helper_get_user_reg_mips64 -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mips64 -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mips64 -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mips64 -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mips64 -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mips64 -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mips64 -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mips64 -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mips64 -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mips64 -#define helper_iwmmxt_addub helper_iwmmxt_addub_mips64 -#define helper_iwmmxt_addul helper_iwmmxt_addul_mips64 -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mips64 -#define helper_iwmmxt_align helper_iwmmxt_align_mips64 -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mips64 -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mips64 -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mips64 -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mips64 -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mips64 -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mips64 -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mips64 -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mips64 -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mips64 -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mips64 -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mips64 -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mips64 -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mips64 -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mips64 -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mips64 -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mips64 -#define helper_iwmmxt_insr helper_iwmmxt_insr_mips64 -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mips64 -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mips64 -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mips64 -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mips64 -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mips64 -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mips64 -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mips64 -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mips64 -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mips64 -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mips64 -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mips64 -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mips64 -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mips64 -#define helper_iwmmxt_minub helper_iwmmxt_minub_mips64 -#define helper_iwmmxt_minul helper_iwmmxt_minul_mips64 -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mips64 -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mips64 -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mips64 -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mips64 -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mips64 -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mips64 -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mips64 -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mips64 -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mips64 -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mips64 -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mips64 -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mips64 -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mips64 -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mips64 -#define helper_iwmmxt_packul helper_iwmmxt_packul_mips64 -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mips64 -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mips64 -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mips64 -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mips64 -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mips64 -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mips64 -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mips64 -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mips64 -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mips64 -#define helper_iwmmxt_slll helper_iwmmxt_slll_mips64 -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mips64 -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mips64 -#define helper_iwmmxt_sral helper_iwmmxt_sral_mips64 -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mips64 -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mips64 -#define helper_iwmmxt_srll helper_iwmmxt_srll_mips64 -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mips64 -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mips64 -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mips64 -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mips64 -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mips64 -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mips64 -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mips64 -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mips64 -#define helper_iwmmxt_subub helper_iwmmxt_subub_mips64 -#define helper_iwmmxt_subul helper_iwmmxt_subul_mips64 -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mips64 -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mips64 -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mips64 -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mips64 -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mips64 -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mips64 -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mips64 -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mips64 -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mips64 -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mips64 -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mips64 -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mips64 -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mips64 -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mips64 -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mips64 -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mips64 -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mips64 -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mips64 -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mips64 -#define helper_ldb_cmmu helper_ldb_cmmu_mips64 -#define helper_ldb_mmu helper_ldb_mmu_mips64 -#define helper_ldl_cmmu helper_ldl_cmmu_mips64 -#define helper_ldl_mmu helper_ldl_mmu_mips64 -#define helper_ldq_cmmu helper_ldq_cmmu_mips64 -#define helper_ldq_mmu helper_ldq_mmu_mips64 -#define helper_ldw_cmmu helper_ldw_cmmu_mips64 -#define helper_ldw_mmu helper_ldw_mmu_mips64 -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mips64 -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mips64 -#define helper_le_ldq_mmu helper_le_ldq_mmu_mips64 -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64 -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64 -#define helper_le_ldul_mmu helper_le_ldul_mmu_mips64 -#define helper_le_lduw_mmu helper_le_lduw_mmu_mips64 -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mips64 -#define helper_le_stl_mmu helper_le_stl_mmu_mips64 -#define helper_le_stq_mmu helper_le_stq_mmu_mips64 -#define helper_le_stw_mmu helper_le_stw_mmu_mips64 -#define helper_msr_i_pstate helper_msr_i_pstate_mips64 -#define helper_neon_abd_f32 helper_neon_abd_f32_mips64 -#define helper_neon_abdl_s16 helper_neon_abdl_s16_mips64 -#define helper_neon_abdl_s32 helper_neon_abdl_s32_mips64 -#define helper_neon_abdl_s64 helper_neon_abdl_s64_mips64 -#define helper_neon_abdl_u16 helper_neon_abdl_u16_mips64 -#define helper_neon_abdl_u32 helper_neon_abdl_u32_mips64 -#define helper_neon_abdl_u64 helper_neon_abdl_u64_mips64 -#define helper_neon_abd_s16 helper_neon_abd_s16_mips64 -#define helper_neon_abd_s32 helper_neon_abd_s32_mips64 -#define helper_neon_abd_s8 helper_neon_abd_s8_mips64 -#define helper_neon_abd_u16 helper_neon_abd_u16_mips64 -#define helper_neon_abd_u32 helper_neon_abd_u32_mips64 -#define helper_neon_abd_u8 helper_neon_abd_u8_mips64 -#define helper_neon_abs_s16 helper_neon_abs_s16_mips64 -#define helper_neon_abs_s8 helper_neon_abs_s8_mips64 -#define helper_neon_acge_f32 helper_neon_acge_f32_mips64 -#define helper_neon_acge_f64 helper_neon_acge_f64_mips64 -#define helper_neon_acgt_f32 helper_neon_acgt_f32_mips64 -#define helper_neon_acgt_f64 helper_neon_acgt_f64_mips64 -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mips64 -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mips64 -#define helper_neon_addl_u16 helper_neon_addl_u16_mips64 -#define helper_neon_addl_u32 helper_neon_addl_u32_mips64 -#define helper_neon_add_u16 helper_neon_add_u16_mips64 -#define helper_neon_add_u8 helper_neon_add_u8_mips64 -#define helper_neon_ceq_f32 helper_neon_ceq_f32_mips64 -#define helper_neon_ceq_u16 helper_neon_ceq_u16_mips64 -#define helper_neon_ceq_u32 helper_neon_ceq_u32_mips64 -#define helper_neon_ceq_u8 helper_neon_ceq_u8_mips64 -#define helper_neon_cge_f32 helper_neon_cge_f32_mips64 -#define helper_neon_cge_s16 helper_neon_cge_s16_mips64 -#define helper_neon_cge_s32 helper_neon_cge_s32_mips64 -#define helper_neon_cge_s8 helper_neon_cge_s8_mips64 -#define helper_neon_cge_u16 helper_neon_cge_u16_mips64 -#define helper_neon_cge_u32 helper_neon_cge_u32_mips64 -#define helper_neon_cge_u8 helper_neon_cge_u8_mips64 -#define helper_neon_cgt_f32 helper_neon_cgt_f32_mips64 -#define helper_neon_cgt_s16 helper_neon_cgt_s16_mips64 -#define helper_neon_cgt_s32 helper_neon_cgt_s32_mips64 -#define helper_neon_cgt_s8 helper_neon_cgt_s8_mips64 -#define helper_neon_cgt_u16 helper_neon_cgt_u16_mips64 -#define helper_neon_cgt_u32 helper_neon_cgt_u32_mips64 -#define helper_neon_cgt_u8 helper_neon_cgt_u8_mips64 -#define helper_neon_cls_s16 helper_neon_cls_s16_mips64 -#define helper_neon_cls_s32 helper_neon_cls_s32_mips64 -#define helper_neon_cls_s8 helper_neon_cls_s8_mips64 -#define helper_neon_clz_u16 helper_neon_clz_u16_mips64 -#define helper_neon_clz_u8 helper_neon_clz_u8_mips64 -#define helper_neon_cnt_u8 helper_neon_cnt_u8_mips64 -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mips64 -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mips64 -#define helper_neon_hadd_s16 helper_neon_hadd_s16_mips64 -#define helper_neon_hadd_s32 helper_neon_hadd_s32_mips64 -#define helper_neon_hadd_s8 helper_neon_hadd_s8_mips64 -#define helper_neon_hadd_u16 helper_neon_hadd_u16_mips64 -#define helper_neon_hadd_u32 helper_neon_hadd_u32_mips64 -#define helper_neon_hadd_u8 helper_neon_hadd_u8_mips64 -#define helper_neon_hsub_s16 helper_neon_hsub_s16_mips64 -#define helper_neon_hsub_s32 helper_neon_hsub_s32_mips64 -#define helper_neon_hsub_s8 helper_neon_hsub_s8_mips64 -#define helper_neon_hsub_u16 helper_neon_hsub_u16_mips64 -#define helper_neon_hsub_u32 helper_neon_hsub_u32_mips64 -#define helper_neon_hsub_u8 helper_neon_hsub_u8_mips64 -#define helper_neon_max_s16 helper_neon_max_s16_mips64 -#define helper_neon_max_s32 helper_neon_max_s32_mips64 -#define helper_neon_max_s8 helper_neon_max_s8_mips64 -#define helper_neon_max_u16 helper_neon_max_u16_mips64 -#define helper_neon_max_u32 helper_neon_max_u32_mips64 -#define helper_neon_max_u8 helper_neon_max_u8_mips64 -#define helper_neon_min_s16 helper_neon_min_s16_mips64 -#define helper_neon_min_s32 helper_neon_min_s32_mips64 -#define helper_neon_min_s8 helper_neon_min_s8_mips64 -#define helper_neon_min_u16 helper_neon_min_u16_mips64 -#define helper_neon_min_u32 helper_neon_min_u32_mips64 -#define helper_neon_min_u8 helper_neon_min_u8_mips64 -#define helper_neon_mull_p8 helper_neon_mull_p8_mips64 -#define helper_neon_mull_s16 helper_neon_mull_s16_mips64 -#define helper_neon_mull_s8 helper_neon_mull_s8_mips64 -#define helper_neon_mull_u16 helper_neon_mull_u16_mips64 -#define helper_neon_mull_u8 helper_neon_mull_u8_mips64 -#define helper_neon_mul_p8 helper_neon_mul_p8_mips64 -#define helper_neon_mul_u16 helper_neon_mul_u16_mips64 -#define helper_neon_mul_u8 helper_neon_mul_u8_mips64 -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mips64 -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mips64 -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mips64 -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mips64 -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mips64 -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mips64 -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mips64 -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mips64 -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mips64 -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mips64 -#define helper_neon_narrow_u16 helper_neon_narrow_u16_mips64 -#define helper_neon_narrow_u8 helper_neon_narrow_u8_mips64 -#define helper_neon_negl_u16 helper_neon_negl_u16_mips64 -#define helper_neon_negl_u32 helper_neon_negl_u32_mips64 -#define helper_neon_paddl_u16 helper_neon_paddl_u16_mips64 -#define helper_neon_paddl_u32 helper_neon_paddl_u32_mips64 -#define helper_neon_padd_u16 helper_neon_padd_u16_mips64 -#define helper_neon_padd_u8 helper_neon_padd_u8_mips64 -#define helper_neon_pmax_s16 helper_neon_pmax_s16_mips64 -#define helper_neon_pmax_s8 helper_neon_pmax_s8_mips64 -#define helper_neon_pmax_u16 helper_neon_pmax_u16_mips64 -#define helper_neon_pmax_u8 helper_neon_pmax_u8_mips64 -#define helper_neon_pmin_s16 helper_neon_pmin_s16_mips64 -#define helper_neon_pmin_s8 helper_neon_pmin_s8_mips64 -#define helper_neon_pmin_u16 helper_neon_pmin_u16_mips64 -#define helper_neon_pmin_u8 helper_neon_pmin_u8_mips64 -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mips64 -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mips64 -#define helper_neon_qabs_s16 helper_neon_qabs_s16_mips64 -#define helper_neon_qabs_s32 helper_neon_qabs_s32_mips64 -#define helper_neon_qabs_s64 helper_neon_qabs_s64_mips64 -#define helper_neon_qabs_s8 helper_neon_qabs_s8_mips64 -#define helper_neon_qadd_s16 helper_neon_qadd_s16_mips64 -#define helper_neon_qadd_s32 helper_neon_qadd_s32_mips64 -#define helper_neon_qadd_s64 helper_neon_qadd_s64_mips64 -#define helper_neon_qadd_s8 helper_neon_qadd_s8_mips64 -#define helper_neon_qadd_u16 helper_neon_qadd_u16_mips64 -#define helper_neon_qadd_u32 helper_neon_qadd_u32_mips64 -#define helper_neon_qadd_u64 helper_neon_qadd_u64_mips64 -#define helper_neon_qadd_u8 helper_neon_qadd_u8_mips64 -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mips64 -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mips64 -#define helper_neon_qneg_s16 helper_neon_qneg_s16_mips64 -#define helper_neon_qneg_s32 helper_neon_qneg_s32_mips64 -#define helper_neon_qneg_s64 helper_neon_qneg_s64_mips64 -#define helper_neon_qneg_s8 helper_neon_qneg_s8_mips64 -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mips64 -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mips64 -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mips64 -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mips64 -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mips64 -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mips64 -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mips64 -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mips64 -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mips64 -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mips64 -#define helper_neon_qshl_s16 helper_neon_qshl_s16_mips64 -#define helper_neon_qshl_s32 helper_neon_qshl_s32_mips64 -#define helper_neon_qshl_s64 helper_neon_qshl_s64_mips64 -#define helper_neon_qshl_s8 helper_neon_qshl_s8_mips64 -#define helper_neon_qshl_u16 helper_neon_qshl_u16_mips64 -#define helper_neon_qshl_u32 helper_neon_qshl_u32_mips64 -#define helper_neon_qshl_u64 helper_neon_qshl_u64_mips64 -#define helper_neon_qshl_u8 helper_neon_qshl_u8_mips64 -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mips64 -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mips64 -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mips64 -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mips64 -#define helper_neon_qsub_s16 helper_neon_qsub_s16_mips64 -#define helper_neon_qsub_s32 helper_neon_qsub_s32_mips64 -#define helper_neon_qsub_s64 helper_neon_qsub_s64_mips64 -#define helper_neon_qsub_s8 helper_neon_qsub_s8_mips64 -#define helper_neon_qsub_u16 helper_neon_qsub_u16_mips64 -#define helper_neon_qsub_u32 helper_neon_qsub_u32_mips64 -#define helper_neon_qsub_u64 helper_neon_qsub_u64_mips64 -#define helper_neon_qsub_u8 helper_neon_qsub_u8_mips64 -#define helper_neon_qunzip16 helper_neon_qunzip16_mips64 -#define helper_neon_qunzip32 helper_neon_qunzip32_mips64 -#define helper_neon_qunzip8 helper_neon_qunzip8_mips64 -#define helper_neon_qzip16 helper_neon_qzip16_mips64 -#define helper_neon_qzip32 helper_neon_qzip32_mips64 -#define helper_neon_qzip8 helper_neon_qzip8_mips64 -#define helper_neon_rbit_u8 helper_neon_rbit_u8_mips64 -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mips64 -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mips64 -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mips64 -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mips64 -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mips64 -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mips64 -#define helper_neon_rshl_s16 helper_neon_rshl_s16_mips64 -#define helper_neon_rshl_s32 helper_neon_rshl_s32_mips64 -#define helper_neon_rshl_s64 helper_neon_rshl_s64_mips64 -#define helper_neon_rshl_s8 helper_neon_rshl_s8_mips64 -#define helper_neon_rshl_u16 helper_neon_rshl_u16_mips64 -#define helper_neon_rshl_u32 helper_neon_rshl_u32_mips64 -#define helper_neon_rshl_u64 helper_neon_rshl_u64_mips64 -#define helper_neon_rshl_u8 helper_neon_rshl_u8_mips64 -#define helper_neon_shl_s16 helper_neon_shl_s16_mips64 -#define helper_neon_shl_s32 helper_neon_shl_s32_mips64 -#define helper_neon_shl_s64 helper_neon_shl_s64_mips64 -#define helper_neon_shl_s8 helper_neon_shl_s8_mips64 -#define helper_neon_shl_u16 helper_neon_shl_u16_mips64 -#define helper_neon_shl_u32 helper_neon_shl_u32_mips64 -#define helper_neon_shl_u64 helper_neon_shl_u64_mips64 -#define helper_neon_shl_u8 helper_neon_shl_u8_mips64 -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mips64 -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mips64 -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mips64 -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mips64 -#define helper_neon_subl_u16 helper_neon_subl_u16_mips64 -#define helper_neon_subl_u32 helper_neon_subl_u32_mips64 -#define helper_neon_sub_u16 helper_neon_sub_u16_mips64 -#define helper_neon_sub_u8 helper_neon_sub_u8_mips64 -#define helper_neon_tbl helper_neon_tbl_mips64 -#define helper_neon_tst_u16 helper_neon_tst_u16_mips64 -#define helper_neon_tst_u32 helper_neon_tst_u32_mips64 -#define helper_neon_tst_u8 helper_neon_tst_u8_mips64 -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mips64 -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mips64 -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mips64 -#define helper_neon_unzip16 helper_neon_unzip16_mips64 -#define helper_neon_unzip8 helper_neon_unzip8_mips64 -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mips64 -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mips64 -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mips64 -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mips64 -#define helper_neon_widen_s16 helper_neon_widen_s16_mips64 -#define helper_neon_widen_s8 helper_neon_widen_s8_mips64 -#define helper_neon_widen_u16 helper_neon_widen_u16_mips64 -#define helper_neon_widen_u8 helper_neon_widen_u8_mips64 -#define helper_neon_zip16 helper_neon_zip16_mips64 -#define helper_neon_zip8 helper_neon_zip8_mips64 -#define helper_pre_hvc helper_pre_hvc_mips64 -#define helper_pre_smc helper_pre_smc_mips64 -#define helper_qadd16 helper_qadd16_mips64 -#define helper_qadd8 helper_qadd8_mips64 -#define helper_qaddsubx helper_qaddsubx_mips64 -#define helper_qsub16 helper_qsub16_mips64 -#define helper_qsub8 helper_qsub8_mips64 -#define helper_qsubaddx helper_qsubaddx_mips64 -#define helper_rbit helper_rbit_mips64 -#define helper_recpe_f32 helper_recpe_f32_mips64 -#define helper_recpe_f64 helper_recpe_f64_mips64 -#define helper_recpe_u32 helper_recpe_u32_mips64 -#define helper_recps_f32 helper_recps_f32_mips64 -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mips64 -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64 -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64 -#define helper_ret_stb_mmu helper_ret_stb_mmu_mips64 -#define helper_rintd helper_rintd_mips64 -#define helper_rintd_exact helper_rintd_exact_mips64 -#define helper_rints helper_rints_mips64 -#define helper_rints_exact helper_rints_exact_mips64 -#define helper_ror_cc helper_ror_cc_mips64 -#define helper_rsqrte_f32 helper_rsqrte_f32_mips64 -#define helper_rsqrte_f64 helper_rsqrte_f64_mips64 -#define helper_rsqrte_u32 helper_rsqrte_u32_mips64 -#define helper_rsqrts_f32 helper_rsqrts_f32_mips64 -#define helper_sadd16 helper_sadd16_mips64 -#define helper_sadd8 helper_sadd8_mips64 -#define helper_saddsubx helper_saddsubx_mips64 -#define helper_sar_cc helper_sar_cc_mips64 -#define helper_sdiv helper_sdiv_mips64 -#define helper_sel_flags helper_sel_flags_mips64 -#define helper_set_cp_reg helper_set_cp_reg_mips64 -#define helper_set_cp_reg64 helper_set_cp_reg64_mips64 -#define helper_set_neon_rmode helper_set_neon_rmode_mips64 -#define helper_set_r13_banked helper_set_r13_banked_mips64 -#define helper_set_rmode helper_set_rmode_mips64 -#define helper_set_user_reg helper_set_user_reg_mips64 -#define helper_shadd16 helper_shadd16_mips64 -#define helper_shadd8 helper_shadd8_mips64 -#define helper_shaddsubx helper_shaddsubx_mips64 -#define helper_shl_cc helper_shl_cc_mips64 -#define helper_shr_cc helper_shr_cc_mips64 -#define helper_shsub16 helper_shsub16_mips64 -#define helper_shsub8 helper_shsub8_mips64 -#define helper_shsubaddx helper_shsubaddx_mips64 -#define helper_ssat helper_ssat_mips64 -#define helper_ssat16 helper_ssat16_mips64 -#define helper_ssub16 helper_ssub16_mips64 -#define helper_ssub8 helper_ssub8_mips64 -#define helper_ssubaddx helper_ssubaddx_mips64 -#define helper_stb_mmu helper_stb_mmu_mips64 -#define helper_stl_mmu helper_stl_mmu_mips64 -#define helper_stq_mmu helper_stq_mmu_mips64 -#define helper_stw_mmu helper_stw_mmu_mips64 -#define helper_sub_saturate helper_sub_saturate_mips64 -#define helper_sub_usaturate helper_sub_usaturate_mips64 -#define helper_sxtb16 helper_sxtb16_mips64 -#define helper_uadd16 helper_uadd16_mips64 -#define helper_uadd8 helper_uadd8_mips64 -#define helper_uaddsubx helper_uaddsubx_mips64 -#define helper_udiv helper_udiv_mips64 -#define helper_uhadd16 helper_uhadd16_mips64 -#define helper_uhadd8 helper_uhadd8_mips64 -#define helper_uhaddsubx helper_uhaddsubx_mips64 -#define helper_uhsub16 helper_uhsub16_mips64 -#define helper_uhsub8 helper_uhsub8_mips64 -#define helper_uhsubaddx helper_uhsubaddx_mips64 -#define helper_uqadd16 helper_uqadd16_mips64 -#define helper_uqadd8 helper_uqadd8_mips64 -#define helper_uqaddsubx helper_uqaddsubx_mips64 -#define helper_uqsub16 helper_uqsub16_mips64 -#define helper_uqsub8 helper_uqsub8_mips64 -#define helper_uqsubaddx helper_uqsubaddx_mips64 -#define helper_usad8 helper_usad8_mips64 -#define helper_usat helper_usat_mips64 -#define helper_usat16 helper_usat16_mips64 -#define helper_usub16 helper_usub16_mips64 -#define helper_usub8 helper_usub8_mips64 -#define helper_usubaddx helper_usubaddx_mips64 -#define helper_uxtb16 helper_uxtb16_mips64 -#define helper_v7m_mrs helper_v7m_mrs_mips64 -#define helper_v7m_msr helper_v7m_msr_mips64 -#define helper_vfp_absd helper_vfp_absd_mips64 -#define helper_vfp_abss helper_vfp_abss_mips64 -#define helper_vfp_addd helper_vfp_addd_mips64 -#define helper_vfp_adds helper_vfp_adds_mips64 -#define helper_vfp_cmpd helper_vfp_cmpd_mips64 -#define helper_vfp_cmped helper_vfp_cmped_mips64 -#define helper_vfp_cmpes helper_vfp_cmpes_mips64 -#define helper_vfp_cmps helper_vfp_cmps_mips64 -#define helper_vfp_divd helper_vfp_divd_mips64 -#define helper_vfp_divs helper_vfp_divs_mips64 -#define helper_vfp_fcvtds helper_vfp_fcvtds_mips64 -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mips64 -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mips64 -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mips64 -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mips64 -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mips64 -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mips64 -#define helper_vfp_maxd helper_vfp_maxd_mips64 -#define helper_vfp_maxnumd helper_vfp_maxnumd_mips64 -#define helper_vfp_maxnums helper_vfp_maxnums_mips64 -#define helper_vfp_maxs helper_vfp_maxs_mips64 -#define helper_vfp_mind helper_vfp_mind_mips64 -#define helper_vfp_minnumd helper_vfp_minnumd_mips64 -#define helper_vfp_minnums helper_vfp_minnums_mips64 -#define helper_vfp_mins helper_vfp_mins_mips64 -#define helper_vfp_muladdd helper_vfp_muladdd_mips64 -#define helper_vfp_muladds helper_vfp_muladds_mips64 -#define helper_vfp_muld helper_vfp_muld_mips64 -#define helper_vfp_muls helper_vfp_muls_mips64 -#define helper_vfp_negd helper_vfp_negd_mips64 -#define helper_vfp_negs helper_vfp_negs_mips64 -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mips64 -#define helper_vfp_shtod helper_vfp_shtod_mips64 -#define helper_vfp_shtos helper_vfp_shtos_mips64 -#define helper_vfp_sitod helper_vfp_sitod_mips64 -#define helper_vfp_sitos helper_vfp_sitos_mips64 -#define helper_vfp_sltod helper_vfp_sltod_mips64 -#define helper_vfp_sltos helper_vfp_sltos_mips64 -#define helper_vfp_sqrtd helper_vfp_sqrtd_mips64 -#define helper_vfp_sqrts helper_vfp_sqrts_mips64 -#define helper_vfp_sqtod helper_vfp_sqtod_mips64 -#define helper_vfp_sqtos helper_vfp_sqtos_mips64 -#define helper_vfp_subd helper_vfp_subd_mips64 -#define helper_vfp_subs helper_vfp_subs_mips64 -#define helper_vfp_toshd helper_vfp_toshd_mips64 -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mips64 -#define helper_vfp_toshs helper_vfp_toshs_mips64 -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mips64 -#define helper_vfp_tosid helper_vfp_tosid_mips64 -#define helper_vfp_tosis helper_vfp_tosis_mips64 -#define helper_vfp_tosizd helper_vfp_tosizd_mips64 -#define helper_vfp_tosizs helper_vfp_tosizs_mips64 -#define helper_vfp_tosld helper_vfp_tosld_mips64 -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mips64 -#define helper_vfp_tosls helper_vfp_tosls_mips64 -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mips64 -#define helper_vfp_tosqd helper_vfp_tosqd_mips64 -#define helper_vfp_tosqs helper_vfp_tosqs_mips64 -#define helper_vfp_touhd helper_vfp_touhd_mips64 -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mips64 -#define helper_vfp_touhs helper_vfp_touhs_mips64 -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mips64 -#define helper_vfp_touid helper_vfp_touid_mips64 -#define helper_vfp_touis helper_vfp_touis_mips64 -#define helper_vfp_touizd helper_vfp_touizd_mips64 -#define helper_vfp_touizs helper_vfp_touizs_mips64 -#define helper_vfp_tould helper_vfp_tould_mips64 -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mips64 -#define helper_vfp_touls helper_vfp_touls_mips64 -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mips64 -#define helper_vfp_touqd helper_vfp_touqd_mips64 -#define helper_vfp_touqs helper_vfp_touqs_mips64 -#define helper_vfp_uhtod helper_vfp_uhtod_mips64 -#define helper_vfp_uhtos helper_vfp_uhtos_mips64 -#define helper_vfp_uitod helper_vfp_uitod_mips64 -#define helper_vfp_uitos helper_vfp_uitos_mips64 -#define helper_vfp_ultod helper_vfp_ultod_mips64 -#define helper_vfp_ultos helper_vfp_ultos_mips64 -#define helper_vfp_uqtod helper_vfp_uqtod_mips64 -#define helper_vfp_uqtos helper_vfp_uqtos_mips64 -#define helper_wfe helper_wfe_mips64 -#define helper_wfi helper_wfi_mips64 -#define hex2decimal hex2decimal_mips64 -#define hw_breakpoint_update hw_breakpoint_update_mips64 -#define hw_breakpoint_update_all hw_breakpoint_update_all_mips64 -#define hw_watchpoint_update hw_watchpoint_update_mips64 -#define hw_watchpoint_update_all hw_watchpoint_update_all_mips64 -#define _init _init_mips64 -#define init_cpreg_list init_cpreg_list_mips64 -#define init_lists init_lists_mips64 -#define input_type_enum input_type_enum_mips64 -#define int128_2_64 int128_2_64_mips64 -#define int128_add int128_add_mips64 -#define int128_addto int128_addto_mips64 -#define int128_and int128_and_mips64 -#define int128_eq int128_eq_mips64 -#define int128_ge int128_ge_mips64 -#define int128_get64 int128_get64_mips64 -#define int128_gt int128_gt_mips64 -#define int128_le int128_le_mips64 -#define int128_lt int128_lt_mips64 -#define int128_make64 int128_make64_mips64 -#define int128_max int128_max_mips64 -#define int128_min int128_min_mips64 -#define int128_ne int128_ne_mips64 -#define int128_neg int128_neg_mips64 -#define int128_nz int128_nz_mips64 -#define int128_rshift int128_rshift_mips64 -#define int128_sub int128_sub_mips64 -#define int128_subfrom int128_subfrom_mips64 -#define int128_zero int128_zero_mips64 -#define int16_to_float32 int16_to_float32_mips64 -#define int16_to_float64 int16_to_float64_mips64 -#define int32_to_float128 int32_to_float128_mips64 -#define int32_to_float32 int32_to_float32_mips64 -#define int32_to_float64 int32_to_float64_mips64 -#define int32_to_floatx80 int32_to_floatx80_mips64 -#define int64_to_float128 int64_to_float128_mips64 -#define int64_to_float32 int64_to_float32_mips64 -#define int64_to_float64 int64_to_float64_mips64 -#define int64_to_floatx80 int64_to_floatx80_mips64 -#define invalidate_and_set_dirty invalidate_and_set_dirty_mips64 -#define invalidate_page_bitmap invalidate_page_bitmap_mips64 -#define io_mem_read io_mem_read_mips64 -#define io_mem_write io_mem_write_mips64 -#define io_readb io_readb_mips64 -#define io_readl io_readl_mips64 -#define io_readq io_readq_mips64 -#define io_readw io_readw_mips64 -#define iotlb_to_region iotlb_to_region_mips64 -#define io_writeb io_writeb_mips64 -#define io_writel io_writel_mips64 -#define io_writeq io_writeq_mips64 -#define io_writew io_writew_mips64 -#define is_a64 is_a64_mips64 -#define is_help_option is_help_option_mips64 -#define isr_read isr_read_mips64 -#define is_valid_option_list is_valid_option_list_mips64 -#define iwmmxt_load_creg iwmmxt_load_creg_mips64 -#define iwmmxt_load_reg iwmmxt_load_reg_mips64 -#define iwmmxt_store_creg iwmmxt_store_creg_mips64 -#define iwmmxt_store_reg iwmmxt_store_reg_mips64 -#define __jit_debug_descriptor __jit_debug_descriptor_mips64 -#define __jit_debug_register_code __jit_debug_register_code_mips64 -#define kvm_to_cpreg_id kvm_to_cpreg_id_mips64 -#define last_ram_offset last_ram_offset_mips64 -#define ldl_be_p ldl_be_p_mips64 -#define ldl_be_phys ldl_be_phys_mips64 -#define ldl_he_p ldl_he_p_mips64 -#define ldl_le_p ldl_le_p_mips64 -#define ldl_le_phys ldl_le_phys_mips64 -#define ldl_phys ldl_phys_mips64 -#define ldl_phys_internal ldl_phys_internal_mips64 -#define ldq_be_p ldq_be_p_mips64 -#define ldq_be_phys ldq_be_phys_mips64 -#define ldq_he_p ldq_he_p_mips64 -#define ldq_le_p ldq_le_p_mips64 -#define ldq_le_phys ldq_le_phys_mips64 -#define ldq_phys ldq_phys_mips64 -#define ldq_phys_internal ldq_phys_internal_mips64 -#define ldst_name ldst_name_mips64 -#define ldub_p ldub_p_mips64 -#define ldub_phys ldub_phys_mips64 -#define lduw_be_p lduw_be_p_mips64 -#define lduw_be_phys lduw_be_phys_mips64 -#define lduw_he_p lduw_he_p_mips64 -#define lduw_le_p lduw_le_p_mips64 -#define lduw_le_phys lduw_le_phys_mips64 -#define lduw_phys lduw_phys_mips64 -#define lduw_phys_internal lduw_phys_internal_mips64 -#define le128 le128_mips64 -#define linked_bp_matches linked_bp_matches_mips64 -#define listener_add_address_space listener_add_address_space_mips64 -#define load_cpu_offset load_cpu_offset_mips64 -#define load_reg load_reg_mips64 -#define load_reg_var load_reg_var_mips64 -#define log_cpu_state log_cpu_state_mips64 -#define lpae_cp_reginfo lpae_cp_reginfo_mips64 -#define lt128 lt128_mips64 -#define machine_class_init machine_class_init_mips64 -#define machine_finalize machine_finalize_mips64 -#define machine_info machine_info_mips64 -#define machine_initfn machine_initfn_mips64 -#define machine_register_types machine_register_types_mips64 -#define machvirt_init machvirt_init_mips64 -#define machvirt_machine_init machvirt_machine_init_mips64 -#define maj maj_mips64 -#define mapping_conflict mapping_conflict_mips64 -#define mapping_contiguous mapping_contiguous_mips64 -#define mapping_have_same_region mapping_have_same_region_mips64 -#define mapping_merge mapping_merge_mips64 -#define mem_add mem_add_mips64 -#define mem_begin mem_begin_mips64 -#define mem_commit mem_commit_mips64 -#define memory_access_is_direct memory_access_is_direct_mips64 -#define memory_access_size memory_access_size_mips64 -#define memory_init memory_init_mips64 -#define memory_listener_match memory_listener_match_mips64 -#define memory_listener_register memory_listener_register_mips64 -#define memory_listener_unregister memory_listener_unregister_mips64 -#define memory_map_init memory_map_init_mips64 -#define memory_mapping_filter memory_mapping_filter_mips64 -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mips64 -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64 -#define memory_mapping_list_free memory_mapping_list_free_mips64 -#define memory_mapping_list_init memory_mapping_list_init_mips64 -#define memory_region_access_valid memory_region_access_valid_mips64 -#define memory_region_add_subregion memory_region_add_subregion_mips64 -#define memory_region_add_subregion_common memory_region_add_subregion_common_mips64 -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips64 -#define memory_region_big_endian memory_region_big_endian_mips64 -#define memory_region_clear_pending memory_region_clear_pending_mips64 -#define memory_region_del_subregion memory_region_del_subregion_mips64 -#define memory_region_destructor_alias memory_region_destructor_alias_mips64 -#define memory_region_destructor_none memory_region_destructor_none_mips64 -#define memory_region_destructor_ram memory_region_destructor_ram_mips64 -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mips64 -#define memory_region_dispatch_read memory_region_dispatch_read_mips64 -#define memory_region_dispatch_read1 memory_region_dispatch_read1_mips64 -#define memory_region_dispatch_write memory_region_dispatch_write_mips64 -#define memory_region_escape_name memory_region_escape_name_mips64 -#define memory_region_finalize memory_region_finalize_mips64 -#define memory_region_find memory_region_find_mips64 -#define memory_region_get_addr memory_region_get_addr_mips64 -#define memory_region_get_alignment memory_region_get_alignment_mips64 -#define memory_region_get_container memory_region_get_container_mips64 -#define memory_region_get_fd memory_region_get_fd_mips64 -#define memory_region_get_may_overlap memory_region_get_may_overlap_mips64 -#define memory_region_get_priority memory_region_get_priority_mips64 -#define memory_region_get_ram_addr memory_region_get_ram_addr_mips64 -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64 -#define memory_region_get_size memory_region_get_size_mips64 -#define memory_region_info memory_region_info_mips64 -#define memory_region_init memory_region_init_mips64 -#define memory_region_init_alias memory_region_init_alias_mips64 -#define memory_region_initfn memory_region_initfn_mips64 -#define memory_region_init_io memory_region_init_io_mips64 -#define memory_region_init_ram memory_region_init_ram_mips64 -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64 -#define memory_region_init_reservation memory_region_init_reservation_mips64 -#define memory_region_is_iommu memory_region_is_iommu_mips64 -#define memory_region_is_logging memory_region_is_logging_mips64 -#define memory_region_is_mapped memory_region_is_mapped_mips64 -#define memory_region_is_ram memory_region_is_ram_mips64 -#define memory_region_is_rom memory_region_is_rom_mips64 -#define memory_region_is_romd memory_region_is_romd_mips64 -#define memory_region_is_skip_dump memory_region_is_skip_dump_mips64 -#define memory_region_is_unassigned memory_region_is_unassigned_mips64 -#define memory_region_name memory_region_name_mips64 -#define memory_region_need_escape memory_region_need_escape_mips64 -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mips64 -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mips64 -#define memory_region_present memory_region_present_mips64 -#define memory_region_read_accessor memory_region_read_accessor_mips64 -#define memory_region_readd_subregion memory_region_readd_subregion_mips64 -#define memory_region_ref memory_region_ref_mips64 -#define memory_region_resolve_container memory_region_resolve_container_mips64 -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mips64 -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64 -#define memory_region_set_address memory_region_set_address_mips64 -#define memory_region_set_alias_offset memory_region_set_alias_offset_mips64 -#define memory_region_set_enabled memory_region_set_enabled_mips64 -#define memory_region_set_readonly memory_region_set_readonly_mips64 -#define memory_region_set_skip_dump memory_region_set_skip_dump_mips64 -#define memory_region_size memory_region_size_mips64 -#define memory_region_to_address_space memory_region_to_address_space_mips64 -#define memory_region_transaction_begin memory_region_transaction_begin_mips64 -#define memory_region_transaction_commit memory_region_transaction_commit_mips64 -#define memory_region_unref memory_region_unref_mips64 -#define memory_region_update_container_subregions memory_region_update_container_subregions_mips64 -#define memory_region_write_accessor memory_region_write_accessor_mips64 -#define memory_region_wrong_endianness memory_region_wrong_endianness_mips64 -#define memory_try_enable_merging memory_try_enable_merging_mips64 -#define module_call_init module_call_init_mips64 -#define module_load module_load_mips64 -#define mpidr_cp_reginfo mpidr_cp_reginfo_mips64 -#define mpidr_read mpidr_read_mips64 -#define msr_mask msr_mask_mips64 -#define mul128By64To192 mul128By64To192_mips64 -#define mul128To256 mul128To256_mips64 -#define mul64To128 mul64To128_mips64 -#define muldiv64 muldiv64_mips64 -#define neon_2rm_is_float_op neon_2rm_is_float_op_mips64 -#define neon_2rm_sizes neon_2rm_sizes_mips64 -#define neon_3r_sizes neon_3r_sizes_mips64 -#define neon_get_scalar neon_get_scalar_mips64 -#define neon_load_reg neon_load_reg_mips64 -#define neon_load_reg64 neon_load_reg64_mips64 -#define neon_load_scratch neon_load_scratch_mips64 -#define neon_ls_element_type neon_ls_element_type_mips64 -#define neon_reg_offset neon_reg_offset_mips64 -#define neon_store_reg neon_store_reg_mips64 -#define neon_store_reg64 neon_store_reg64_mips64 -#define neon_store_scratch neon_store_scratch_mips64 -#define new_ldst_label new_ldst_label_mips64 -#define next_list next_list_mips64 -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mips64 -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mips64 -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mips64 -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mips64 -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64 -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mips64 -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mips64 -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mips64 -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64 -#define not_v6_cp_reginfo not_v6_cp_reginfo_mips64 -#define not_v7_cp_reginfo not_v7_cp_reginfo_mips64 -#define not_v8_cp_reginfo not_v8_cp_reginfo_mips64 -#define object_child_foreach object_child_foreach_mips64 -#define object_class_foreach object_class_foreach_mips64 -#define object_class_foreach_tramp object_class_foreach_tramp_mips64 -#define object_class_get_list object_class_get_list_mips64 -#define object_class_get_list_tramp object_class_get_list_tramp_mips64 -#define object_class_get_parent object_class_get_parent_mips64 -#define object_deinit object_deinit_mips64 -#define object_dynamic_cast object_dynamic_cast_mips64 -#define object_finalize object_finalize_mips64 -#define object_finalize_child_property object_finalize_child_property_mips64 -#define object_get_child_property object_get_child_property_mips64 -#define object_get_link_property object_get_link_property_mips64 -#define object_get_root object_get_root_mips64 -#define object_initialize_with_type object_initialize_with_type_mips64 -#define object_init_with_type object_init_with_type_mips64 -#define object_instance_init object_instance_init_mips64 -#define object_new_with_type object_new_with_type_mips64 -#define object_post_init_with_type object_post_init_with_type_mips64 -#define object_property_add_alias object_property_add_alias_mips64 -#define object_property_add_link object_property_add_link_mips64 -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mips64 -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mips64 -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mips64 -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mips64 -#define object_property_allow_set_link object_property_allow_set_link_mips64 -#define object_property_del object_property_del_mips64 -#define object_property_del_all object_property_del_all_mips64 -#define object_property_find object_property_find_mips64 -#define object_property_get object_property_get_mips64 -#define object_property_get_bool object_property_get_bool_mips64 -#define object_property_get_int object_property_get_int_mips64 -#define object_property_get_link object_property_get_link_mips64 -#define object_property_get_qobject object_property_get_qobject_mips64 -#define object_property_get_str object_property_get_str_mips64 -#define object_property_get_type object_property_get_type_mips64 -#define object_property_is_child object_property_is_child_mips64 -#define object_property_set object_property_set_mips64 -#define object_property_set_description object_property_set_description_mips64 -#define object_property_set_link object_property_set_link_mips64 -#define object_property_set_qobject object_property_set_qobject_mips64 -#define object_release_link_property object_release_link_property_mips64 -#define object_resolve_abs_path object_resolve_abs_path_mips64 -#define object_resolve_child_property object_resolve_child_property_mips64 -#define object_resolve_link object_resolve_link_mips64 -#define object_resolve_link_property object_resolve_link_property_mips64 -#define object_resolve_partial_path object_resolve_partial_path_mips64 -#define object_resolve_path object_resolve_path_mips64 -#define object_resolve_path_component object_resolve_path_component_mips64 -#define object_resolve_path_type object_resolve_path_type_mips64 -#define object_set_link_property object_set_link_property_mips64 -#define object_unparent object_unparent_mips64 -#define omap_cachemaint_write omap_cachemaint_write_mips64 -#define omap_cp_reginfo omap_cp_reginfo_mips64 -#define omap_threadid_write omap_threadid_write_mips64 -#define omap_ticonfig_write omap_ticonfig_write_mips64 -#define omap_wfi_write omap_wfi_write_mips64 -#define op_bits op_bits_mips64 -#define open_modeflags open_modeflags_mips64 -#define op_to_mov op_to_mov_mips64 -#define op_to_movi op_to_movi_mips64 -#define output_type_enum output_type_enum_mips64 -#define packFloat128 packFloat128_mips64 -#define packFloat16 packFloat16_mips64 -#define packFloat32 packFloat32_mips64 -#define packFloat64 packFloat64_mips64 -#define packFloatx80 packFloatx80_mips64 -#define page_find page_find_mips64 -#define page_find_alloc page_find_alloc_mips64 -#define page_flush_tb page_flush_tb_mips64 -#define page_flush_tb_1 page_flush_tb_1_mips64 -#define page_init page_init_mips64 -#define page_size_init page_size_init_mips64 -#define par par_mips64 -#define parse_array parse_array_mips64 -#define parse_error parse_error_mips64 -#define parse_escape parse_escape_mips64 -#define parse_keyword parse_keyword_mips64 -#define parse_literal parse_literal_mips64 -#define parse_object parse_object_mips64 -#define parse_optional parse_optional_mips64 -#define parse_option_bool parse_option_bool_mips64 -#define parse_option_number parse_option_number_mips64 -#define parse_option_size parse_option_size_mips64 -#define parse_pair parse_pair_mips64 -#define parser_context_free parser_context_free_mips64 -#define parser_context_new parser_context_new_mips64 -#define parser_context_peek_token parser_context_peek_token_mips64 -#define parser_context_pop_token parser_context_pop_token_mips64 -#define parser_context_restore parser_context_restore_mips64 -#define parser_context_save parser_context_save_mips64 -#define parse_str parse_str_mips64 -#define parse_type_bool parse_type_bool_mips64 -#define parse_type_int parse_type_int_mips64 -#define parse_type_number parse_type_number_mips64 -#define parse_type_size parse_type_size_mips64 -#define parse_type_str parse_type_str_mips64 -#define parse_value parse_value_mips64 -#define par_write par_write_mips64 -#define patch_reloc patch_reloc_mips64 -#define phys_map_node_alloc phys_map_node_alloc_mips64 -#define phys_map_node_reserve phys_map_node_reserve_mips64 -#define phys_mem_alloc phys_mem_alloc_mips64 -#define phys_mem_set_alloc phys_mem_set_alloc_mips64 -#define phys_page_compact phys_page_compact_mips64 -#define phys_page_compact_all phys_page_compact_all_mips64 -#define phys_page_find phys_page_find_mips64 -#define phys_page_set phys_page_set_mips64 -#define phys_page_set_level phys_page_set_level_mips64 -#define phys_section_add phys_section_add_mips64 -#define phys_section_destroy phys_section_destroy_mips64 -#define phys_sections_free phys_sections_free_mips64 -#define pickNaN pickNaN_mips64 -#define pickNaNMulAdd pickNaNMulAdd_mips64 -#define pmccfiltr_write pmccfiltr_write_mips64 -#define pmccntr_read pmccntr_read_mips64 -#define pmccntr_sync pmccntr_sync_mips64 -#define pmccntr_write pmccntr_write_mips64 -#define pmccntr_write32 pmccntr_write32_mips64 -#define pmcntenclr_write pmcntenclr_write_mips64 -#define pmcntenset_write pmcntenset_write_mips64 -#define pmcr_write pmcr_write_mips64 -#define pmintenclr_write pmintenclr_write_mips64 -#define pmintenset_write pmintenset_write_mips64 -#define pmovsr_write pmovsr_write_mips64 -#define pmreg_access pmreg_access_mips64 -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mips64 -#define pmsav5_data_ap_read pmsav5_data_ap_read_mips64 -#define pmsav5_data_ap_write pmsav5_data_ap_write_mips64 -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mips64 -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mips64 -#define pmuserenr_write pmuserenr_write_mips64 -#define pmxevtyper_write pmxevtyper_write_mips64 -#define print_type_bool print_type_bool_mips64 -#define print_type_int print_type_int_mips64 -#define print_type_number print_type_number_mips64 -#define print_type_size print_type_size_mips64 -#define print_type_str print_type_str_mips64 -#define propagateFloat128NaN propagateFloat128NaN_mips64 -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mips64 -#define propagateFloat32NaN propagateFloat32NaN_mips64 -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mips64 -#define propagateFloat64NaN propagateFloat64NaN_mips64 -#define propagateFloatx80NaN propagateFloatx80NaN_mips64 -#define property_get_alias property_get_alias_mips64 -#define property_get_bool property_get_bool_mips64 -#define property_get_str property_get_str_mips64 -#define property_get_uint16_ptr property_get_uint16_ptr_mips64 -#define property_get_uint32_ptr property_get_uint32_ptr_mips64 -#define property_get_uint64_ptr property_get_uint64_ptr_mips64 -#define property_get_uint8_ptr property_get_uint8_ptr_mips64 -#define property_release_alias property_release_alias_mips64 -#define property_release_bool property_release_bool_mips64 -#define property_release_str property_release_str_mips64 -#define property_resolve_alias property_resolve_alias_mips64 -#define property_set_alias property_set_alias_mips64 -#define property_set_bool property_set_bool_mips64 -#define property_set_str property_set_str_mips64 -#define pstate_read pstate_read_mips64 -#define pstate_write pstate_write_mips64 -#define pxa250_initfn pxa250_initfn_mips64 -#define pxa255_initfn pxa255_initfn_mips64 -#define pxa260_initfn pxa260_initfn_mips64 -#define pxa261_initfn pxa261_initfn_mips64 -#define pxa262_initfn pxa262_initfn_mips64 -#define pxa270a0_initfn pxa270a0_initfn_mips64 -#define pxa270a1_initfn pxa270a1_initfn_mips64 -#define pxa270b0_initfn pxa270b0_initfn_mips64 -#define pxa270b1_initfn pxa270b1_initfn_mips64 -#define pxa270c0_initfn pxa270c0_initfn_mips64 -#define pxa270c5_initfn pxa270c5_initfn_mips64 -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mips64 -#define qapi_dealloc_end_list qapi_dealloc_end_list_mips64 -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mips64 -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mips64 -#define qapi_dealloc_next_list qapi_dealloc_next_list_mips64 -#define qapi_dealloc_pop qapi_dealloc_pop_mips64 -#define qapi_dealloc_push qapi_dealloc_push_mips64 -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mips64 -#define qapi_dealloc_start_list qapi_dealloc_start_list_mips64 -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mips64 -#define qapi_dealloc_start_union qapi_dealloc_start_union_mips64 -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mips64 -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mips64 -#define qapi_dealloc_type_int qapi_dealloc_type_int_mips64 -#define qapi_dealloc_type_number qapi_dealloc_type_number_mips64 -#define qapi_dealloc_type_size qapi_dealloc_type_size_mips64 -#define qapi_dealloc_type_str qapi_dealloc_type_str_mips64 -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mips64 -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mips64 -#define qapi_free_boolList qapi_free_boolList_mips64 -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mips64 -#define qapi_free_int16List qapi_free_int16List_mips64 -#define qapi_free_int32List qapi_free_int32List_mips64 -#define qapi_free_int64List qapi_free_int64List_mips64 -#define qapi_free_int8List qapi_free_int8List_mips64 -#define qapi_free_intList qapi_free_intList_mips64 -#define qapi_free_numberList qapi_free_numberList_mips64 -#define qapi_free_strList qapi_free_strList_mips64 -#define qapi_free_uint16List qapi_free_uint16List_mips64 -#define qapi_free_uint32List qapi_free_uint32List_mips64 -#define qapi_free_uint64List qapi_free_uint64List_mips64 -#define qapi_free_uint8List qapi_free_uint8List_mips64 -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mips64 -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mips64 -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mips64 -#define qbool_destroy_obj qbool_destroy_obj_mips64 -#define qbool_from_int qbool_from_int_mips64 -#define qbool_get_int qbool_get_int_mips64 -#define qbool_type qbool_type_mips64 -#define qbus_create qbus_create_mips64 -#define qbus_create_inplace qbus_create_inplace_mips64 -#define qbus_finalize qbus_finalize_mips64 -#define qbus_initfn qbus_initfn_mips64 -#define qbus_realize qbus_realize_mips64 -#define qdev_create qdev_create_mips64 -#define qdev_get_type qdev_get_type_mips64 -#define qdev_register_types qdev_register_types_mips64 -#define qdev_set_parent_bus qdev_set_parent_bus_mips64 -#define qdev_try_create qdev_try_create_mips64 -#define qdict_add_key qdict_add_key_mips64 -#define qdict_array_split qdict_array_split_mips64 -#define qdict_clone_shallow qdict_clone_shallow_mips64 -#define qdict_del qdict_del_mips64 -#define qdict_destroy_obj qdict_destroy_obj_mips64 -#define qdict_entry_key qdict_entry_key_mips64 -#define qdict_entry_value qdict_entry_value_mips64 -#define qdict_extract_subqdict qdict_extract_subqdict_mips64 -#define qdict_find qdict_find_mips64 -#define qdict_first qdict_first_mips64 -#define qdict_flatten qdict_flatten_mips64 -#define qdict_flatten_qdict qdict_flatten_qdict_mips64 -#define qdict_flatten_qlist qdict_flatten_qlist_mips64 -#define qdict_get qdict_get_mips64 -#define qdict_get_bool qdict_get_bool_mips64 -#define qdict_get_double qdict_get_double_mips64 -#define qdict_get_int qdict_get_int_mips64 -#define qdict_get_obj qdict_get_obj_mips64 -#define qdict_get_qdict qdict_get_qdict_mips64 -#define qdict_get_qlist qdict_get_qlist_mips64 -#define qdict_get_str qdict_get_str_mips64 -#define qdict_get_try_bool qdict_get_try_bool_mips64 -#define qdict_get_try_int qdict_get_try_int_mips64 -#define qdict_get_try_str qdict_get_try_str_mips64 -#define qdict_haskey qdict_haskey_mips64 -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mips64 -#define qdict_iter qdict_iter_mips64 -#define qdict_join qdict_join_mips64 -#define qdict_new qdict_new_mips64 -#define qdict_next qdict_next_mips64 -#define qdict_next_entry qdict_next_entry_mips64 -#define qdict_put_obj qdict_put_obj_mips64 -#define qdict_size qdict_size_mips64 -#define qdict_type qdict_type_mips64 -#define qemu_clock_get_us qemu_clock_get_us_mips64 -#define qemu_clock_ptr qemu_clock_ptr_mips64 -#define qemu_clocks qemu_clocks_mips64 -#define qemu_get_cpu qemu_get_cpu_mips64 -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mips64 -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mips64 -#define qemu_get_ram_block qemu_get_ram_block_mips64 -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mips64 -#define qemu_get_ram_fd qemu_get_ram_fd_mips64 -#define qemu_get_ram_ptr qemu_get_ram_ptr_mips64 -#define qemu_host_page_mask qemu_host_page_mask_mips64 -#define qemu_host_page_size qemu_host_page_size_mips64 -#define qemu_init_vcpu qemu_init_vcpu_mips64 -#define qemu_ld_helpers qemu_ld_helpers_mips64 -#define qemu_log_close qemu_log_close_mips64 -#define qemu_log_enabled qemu_log_enabled_mips64 -#define qemu_log_flush qemu_log_flush_mips64 -#define qemu_loglevel_mask qemu_loglevel_mask_mips64 -#define qemu_log_vprintf qemu_log_vprintf_mips64 -#define qemu_oom_check qemu_oom_check_mips64 -#define qemu_parse_fd qemu_parse_fd_mips64 -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64 -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mips64 -#define qemu_ram_alloc qemu_ram_alloc_mips64 -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64 -#define qemu_ram_foreach_block qemu_ram_foreach_block_mips64 -#define qemu_ram_free qemu_ram_free_mips64 -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mips64 -#define qemu_ram_ptr_length qemu_ram_ptr_length_mips64 -#define qemu_ram_remap qemu_ram_remap_mips64 -#define qemu_ram_setup_dump qemu_ram_setup_dump_mips64 -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mips64 -#define qemu_real_host_page_size qemu_real_host_page_size_mips64 -#define qemu_st_helpers qemu_st_helpers_mips64 -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mips64 -#define qemu_try_memalign qemu_try_memalign_mips64 -#define qentry_destroy qentry_destroy_mips64 -#define qerror_human qerror_human_mips64 -#define qerror_report qerror_report_mips64 -#define qerror_report_err qerror_report_err_mips64 -#define qfloat_destroy_obj qfloat_destroy_obj_mips64 -#define qfloat_from_double qfloat_from_double_mips64 -#define qfloat_get_double qfloat_get_double_mips64 -#define qfloat_type qfloat_type_mips64 -#define qint_destroy_obj qint_destroy_obj_mips64 -#define qint_from_int qint_from_int_mips64 -#define qint_get_int qint_get_int_mips64 -#define qint_type qint_type_mips64 -#define qlist_append_obj qlist_append_obj_mips64 -#define qlist_copy qlist_copy_mips64 -#define qlist_copy_elem qlist_copy_elem_mips64 -#define qlist_destroy_obj qlist_destroy_obj_mips64 -#define qlist_empty qlist_empty_mips64 -#define qlist_entry_obj qlist_entry_obj_mips64 -#define qlist_first qlist_first_mips64 -#define qlist_iter qlist_iter_mips64 -#define qlist_new qlist_new_mips64 -#define qlist_next qlist_next_mips64 -#define qlist_peek qlist_peek_mips64 -#define qlist_pop qlist_pop_mips64 -#define qlist_size qlist_size_mips64 -#define qlist_size_iter qlist_size_iter_mips64 -#define qlist_type qlist_type_mips64 -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mips64 -#define qmp_input_end_list qmp_input_end_list_mips64 -#define qmp_input_end_struct qmp_input_end_struct_mips64 -#define qmp_input_get_next_type qmp_input_get_next_type_mips64 -#define qmp_input_get_object qmp_input_get_object_mips64 -#define qmp_input_get_visitor qmp_input_get_visitor_mips64 -#define qmp_input_next_list qmp_input_next_list_mips64 -#define qmp_input_optional qmp_input_optional_mips64 -#define qmp_input_pop qmp_input_pop_mips64 -#define qmp_input_push qmp_input_push_mips64 -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mips64 -#define qmp_input_start_list qmp_input_start_list_mips64 -#define qmp_input_start_struct qmp_input_start_struct_mips64 -#define qmp_input_type_bool qmp_input_type_bool_mips64 -#define qmp_input_type_int qmp_input_type_int_mips64 -#define qmp_input_type_number qmp_input_type_number_mips64 -#define qmp_input_type_str qmp_input_type_str_mips64 -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mips64 -#define qmp_input_visitor_new qmp_input_visitor_new_mips64 -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mips64 -#define qmp_output_add_obj qmp_output_add_obj_mips64 -#define qmp_output_end_list qmp_output_end_list_mips64 -#define qmp_output_end_struct qmp_output_end_struct_mips64 -#define qmp_output_first qmp_output_first_mips64 -#define qmp_output_get_qobject qmp_output_get_qobject_mips64 -#define qmp_output_get_visitor qmp_output_get_visitor_mips64 -#define qmp_output_last qmp_output_last_mips64 -#define qmp_output_next_list qmp_output_next_list_mips64 -#define qmp_output_pop qmp_output_pop_mips64 -#define qmp_output_push_obj qmp_output_push_obj_mips64 -#define qmp_output_start_list qmp_output_start_list_mips64 -#define qmp_output_start_struct qmp_output_start_struct_mips64 -#define qmp_output_type_bool qmp_output_type_bool_mips64 -#define qmp_output_type_int qmp_output_type_int_mips64 -#define qmp_output_type_number qmp_output_type_number_mips64 -#define qmp_output_type_str qmp_output_type_str_mips64 -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mips64 -#define qmp_output_visitor_new qmp_output_visitor_new_mips64 -#define qobject_decref qobject_decref_mips64 -#define qobject_to_qbool qobject_to_qbool_mips64 -#define qobject_to_qdict qobject_to_qdict_mips64 -#define qobject_to_qfloat qobject_to_qfloat_mips64 -#define qobject_to_qint qobject_to_qint_mips64 -#define qobject_to_qlist qobject_to_qlist_mips64 -#define qobject_to_qstring qobject_to_qstring_mips64 -#define qobject_type qobject_type_mips64 -#define qstring_append qstring_append_mips64 -#define qstring_append_chr qstring_append_chr_mips64 -#define qstring_append_int qstring_append_int_mips64 -#define qstring_destroy_obj qstring_destroy_obj_mips64 -#define qstring_from_escaped_str qstring_from_escaped_str_mips64 -#define qstring_from_str qstring_from_str_mips64 -#define qstring_from_substr qstring_from_substr_mips64 -#define qstring_get_length qstring_get_length_mips64 -#define qstring_get_str qstring_get_str_mips64 -#define qstring_new qstring_new_mips64 -#define qstring_type qstring_type_mips64 -#define ram_block_add ram_block_add_mips64 -#define ram_size ram_size_mips64 -#define range_compare range_compare_mips64 -#define range_covers_byte range_covers_byte_mips64 -#define range_get_last range_get_last_mips64 -#define range_merge range_merge_mips64 -#define ranges_can_merge ranges_can_merge_mips64 -#define raw_read raw_read_mips64 -#define raw_write raw_write_mips64 -#define rcon rcon_mips64 -#define read_raw_cp_reg read_raw_cp_reg_mips64 -#define recip_estimate recip_estimate_mips64 -#define recip_sqrt_estimate recip_sqrt_estimate_mips64 -#define register_cp_regs_for_features register_cp_regs_for_features_mips64 -#define register_multipage register_multipage_mips64 -#define register_subpage register_subpage_mips64 -#define register_tm_clones register_tm_clones_mips64 -#define register_types_object register_types_object_mips64 -#define regnames regnames_mips64 -#define render_memory_region render_memory_region_mips64 -#define reset_all_temps reset_all_temps_mips64 -#define reset_temp reset_temp_mips64 -#define rol32 rol32_mips64 -#define rol64 rol64_mips64 -#define ror32 ror32_mips64 -#define ror64 ror64_mips64 -#define roundAndPackFloat128 roundAndPackFloat128_mips64 -#define roundAndPackFloat16 roundAndPackFloat16_mips64 -#define roundAndPackFloat32 roundAndPackFloat32_mips64 -#define roundAndPackFloat64 roundAndPackFloat64_mips64 -#define roundAndPackFloatx80 roundAndPackFloatx80_mips64 -#define roundAndPackInt32 roundAndPackInt32_mips64 -#define roundAndPackInt64 roundAndPackInt64_mips64 -#define roundAndPackUint64 roundAndPackUint64_mips64 -#define round_to_inf round_to_inf_mips64 -#define run_on_cpu run_on_cpu_mips64 -#define s0 s0_mips64 -#define S0 S0_mips64 -#define s1 s1_mips64 -#define S1 S1_mips64 -#define sa1100_initfn sa1100_initfn_mips64 -#define sa1110_initfn sa1110_initfn_mips64 -#define save_globals save_globals_mips64 -#define scr_write scr_write_mips64 -#define sctlr_write sctlr_write_mips64 -#define set_bit set_bit_mips64 -#define set_bits set_bits_mips64 -#define set_default_nan_mode set_default_nan_mode_mips64 -#define set_feature set_feature_mips64 -#define set_float_detect_tininess set_float_detect_tininess_mips64 -#define set_float_exception_flags set_float_exception_flags_mips64 -#define set_float_rounding_mode set_float_rounding_mode_mips64 -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mips64 -#define set_flush_to_zero set_flush_to_zero_mips64 -#define set_swi_errno set_swi_errno_mips64 -#define sextract32 sextract32_mips64 -#define sextract64 sextract64_mips64 -#define shift128ExtraRightJamming shift128ExtraRightJamming_mips64 -#define shift128Right shift128Right_mips64 -#define shift128RightJamming shift128RightJamming_mips64 -#define shift32RightJamming shift32RightJamming_mips64 -#define shift64ExtraRightJamming shift64ExtraRightJamming_mips64 -#define shift64RightJamming shift64RightJamming_mips64 -#define shifter_out_im shifter_out_im_mips64 -#define shortShift128Left shortShift128Left_mips64 -#define shortShift192Left shortShift192Left_mips64 -#define simple_mpu_ap_bits simple_mpu_ap_bits_mips64 -#define size_code_gen_buffer size_code_gen_buffer_mips64 -#define softmmu_lock_user softmmu_lock_user_mips64 -#define softmmu_lock_user_string softmmu_lock_user_string_mips64 -#define softmmu_tget32 softmmu_tget32_mips64 -#define softmmu_tget8 softmmu_tget8_mips64 -#define softmmu_tput32 softmmu_tput32_mips64 -#define softmmu_unlock_user softmmu_unlock_user_mips64 -#define sort_constraints sort_constraints_mips64 -#define sp_el0_access sp_el0_access_mips64 -#define spsel_read spsel_read_mips64 -#define spsel_write spsel_write_mips64 -#define start_list start_list_mips64 -#define stb_p stb_p_mips64 -#define stb_phys stb_phys_mips64 -#define stl_be_p stl_be_p_mips64 -#define stl_be_phys stl_be_phys_mips64 -#define stl_he_p stl_he_p_mips64 -#define stl_le_p stl_le_p_mips64 -#define stl_le_phys stl_le_phys_mips64 -#define stl_phys stl_phys_mips64 -#define stl_phys_internal stl_phys_internal_mips64 -#define stl_phys_notdirty stl_phys_notdirty_mips64 -#define store_cpu_offset store_cpu_offset_mips64 -#define store_reg store_reg_mips64 -#define store_reg_bx store_reg_bx_mips64 -#define store_reg_from_load store_reg_from_load_mips64 -#define stq_be_p stq_be_p_mips64 -#define stq_be_phys stq_be_phys_mips64 -#define stq_he_p stq_he_p_mips64 -#define stq_le_p stq_le_p_mips64 -#define stq_le_phys stq_le_phys_mips64 -#define stq_phys stq_phys_mips64 -#define string_input_get_visitor string_input_get_visitor_mips64 -#define string_input_visitor_cleanup string_input_visitor_cleanup_mips64 -#define string_input_visitor_new string_input_visitor_new_mips64 -#define strongarm_cp_reginfo strongarm_cp_reginfo_mips64 -#define strstart strstart_mips64 -#define strtosz strtosz_mips64 -#define strtosz_suffix strtosz_suffix_mips64 -#define stw_be_p stw_be_p_mips64 -#define stw_be_phys stw_be_phys_mips64 -#define stw_he_p stw_he_p_mips64 -#define stw_le_p stw_le_p_mips64 -#define stw_le_phys stw_le_phys_mips64 -#define stw_phys stw_phys_mips64 -#define stw_phys_internal stw_phys_internal_mips64 -#define sub128 sub128_mips64 -#define sub16_sat sub16_sat_mips64 -#define sub16_usat sub16_usat_mips64 -#define sub192 sub192_mips64 -#define sub8_sat sub8_sat_mips64 -#define sub8_usat sub8_usat_mips64 -#define subFloat128Sigs subFloat128Sigs_mips64 -#define subFloat32Sigs subFloat32Sigs_mips64 -#define subFloat64Sigs subFloat64Sigs_mips64 -#define subFloatx80Sigs subFloatx80Sigs_mips64 -#define subpage_accepts subpage_accepts_mips64 -#define subpage_init subpage_init_mips64 -#define subpage_ops subpage_ops_mips64 -#define subpage_read subpage_read_mips64 -#define subpage_register subpage_register_mips64 -#define subpage_write subpage_write_mips64 -#define suffix_mul suffix_mul_mips64 -#define swap_commutative swap_commutative_mips64 -#define swap_commutative2 swap_commutative2_mips64 -#define switch_mode switch_mode_mips64 -#define switch_v7m_sp switch_v7m_sp_mips64 -#define syn_aa32_bkpt syn_aa32_bkpt_mips64 -#define syn_aa32_hvc syn_aa32_hvc_mips64 -#define syn_aa32_smc syn_aa32_smc_mips64 -#define syn_aa32_svc syn_aa32_svc_mips64 -#define syn_breakpoint syn_breakpoint_mips64 -#define sync_globals sync_globals_mips64 -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mips64 -#define syn_cp14_rt_trap syn_cp14_rt_trap_mips64 -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mips64 -#define syn_cp15_rt_trap syn_cp15_rt_trap_mips64 -#define syn_data_abort syn_data_abort_mips64 -#define syn_fp_access_trap syn_fp_access_trap_mips64 -#define syn_insn_abort syn_insn_abort_mips64 -#define syn_swstep syn_swstep_mips64 -#define syn_uncategorized syn_uncategorized_mips64 -#define syn_watchpoint syn_watchpoint_mips64 -#define syscall_err syscall_err_mips64 -#define system_bus_class_init system_bus_class_init_mips64 -#define system_bus_info system_bus_info_mips64 -#define t2ee_cp_reginfo t2ee_cp_reginfo_mips64 -#define table_logic_cc table_logic_cc_mips64 -#define target_parse_constraint target_parse_constraint_mips64 -#define target_words_bigendian target_words_bigendian_mips64 -#define tb_add_jump tb_add_jump_mips64 -#define tb_alloc tb_alloc_mips64 -#define tb_alloc_page tb_alloc_page_mips64 -#define tb_check_watchpoint tb_check_watchpoint_mips64 -#define tb_find_fast tb_find_fast_mips64 -#define tb_find_pc tb_find_pc_mips64 -#define tb_find_slow tb_find_slow_mips64 -#define tb_flush tb_flush_mips64 -#define tb_flush_jmp_cache tb_flush_jmp_cache_mips64 -#define tb_free tb_free_mips64 -#define tb_gen_code tb_gen_code_mips64 -#define tb_hash_remove tb_hash_remove_mips64 -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64 -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64 -#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64 -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mips64 -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mips64 -#define tb_jmp_remove tb_jmp_remove_mips64 -#define tb_link_page tb_link_page_mips64 -#define tb_page_remove tb_page_remove_mips64 -#define tb_phys_hash_func tb_phys_hash_func_mips64 -#define tb_phys_invalidate tb_phys_invalidate_mips64 -#define tb_reset_jump tb_reset_jump_mips64 -#define tb_set_jmp_target tb_set_jmp_target_mips64 -#define tcg_accel_class_init tcg_accel_class_init_mips64 -#define tcg_accel_type tcg_accel_type_mips64 -#define tcg_add_param_i32 tcg_add_param_i32_mips64 -#define tcg_add_param_i64 tcg_add_param_i64_mips64 -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mips64 -#define tcg_allowed tcg_allowed_mips64 -#define tcg_canonicalize_memop tcg_canonicalize_memop_mips64 -#define tcg_commit tcg_commit_mips64 -#define tcg_cond_to_jcc tcg_cond_to_jcc_mips64 -#define tcg_constant_folding tcg_constant_folding_mips64 +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_mips64 +#define tcg_expand_vec_op tcg_expand_vec_op_mips64 +#define tcg_register_jit tcg_register_jit_mips64 +#define tcg_tb_insert tcg_tb_insert_mips64 +#define tcg_tb_remove tcg_tb_remove_mips64 +#define tcg_tb_lookup tcg_tb_lookup_mips64 +#define tcg_tb_foreach tcg_tb_foreach_mips64 +#define tcg_nb_tbs tcg_nb_tbs_mips64 +#define tcg_region_reset_all tcg_region_reset_all_mips64 +#define tcg_region_init tcg_region_init_mips64 +#define tcg_code_size tcg_code_size_mips64 +#define tcg_code_capacity tcg_code_capacity_mips64 +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mips64 +#define tcg_malloc_internal tcg_malloc_internal_mips64 +#define tcg_pool_reset tcg_pool_reset_mips64 +#define tcg_context_init tcg_context_init_mips64 +#define tcg_tb_alloc tcg_tb_alloc_mips64 +#define tcg_prologue_init tcg_prologue_init_mips64 +#define tcg_func_start tcg_func_start_mips64 +#define tcg_set_frame tcg_set_frame_mips64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64 +#define tcg_temp_new_internal tcg_temp_new_internal_mips64 +#define tcg_temp_new_vec tcg_temp_new_vec_mips64 +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mips64 +#define tcg_temp_free_internal tcg_temp_free_internal_mips64 #define tcg_const_i32 tcg_const_i32_mips64 #define tcg_const_i64 tcg_const_i64_mips64 #define tcg_const_local_i32 tcg_const_local_i32_mips64 #define tcg_const_local_i64 tcg_const_local_i64_mips64 -#define tcg_context_init tcg_context_init_mips64 -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mips64 -#define tcg_cpu_exec tcg_cpu_exec_mips64 -#define tcg_current_code_size tcg_current_code_size_mips64 -#define tcg_dump_info tcg_dump_info_mips64 -#define tcg_dump_ops tcg_dump_ops_mips64 -#define tcg_exec_all tcg_exec_all_mips64 -#define tcg_find_helper tcg_find_helper_mips64 -#define tcg_func_start tcg_func_start_mips64 -#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64 -#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64 -#define tcg_gen_add_i32 tcg_gen_add_i32_mips64 -#define tcg_gen_add_i64 tcg_gen_add_i64_mips64 -#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64 -#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64 -#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64 -#define tcg_gen_and_i32 tcg_gen_and_i32_mips64 -#define tcg_gen_and_i64 tcg_gen_and_i64_mips64 -#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64 -#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64 -#define tcg_gen_br tcg_gen_br_mips64 -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64 -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64 -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64 -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64 -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64 +#define tcg_op_supported tcg_op_supported_mips64 #define tcg_gen_callN tcg_gen_callN_mips64 +#define tcg_op_remove tcg_op_remove_mips64 +#define tcg_emit_op tcg_emit_op_mips64 +#define tcg_op_insert_before tcg_op_insert_before_mips64 +#define tcg_op_insert_after tcg_op_insert_after_mips64 +#define tcg_cpu_exec_time tcg_cpu_exec_time_mips64 #define tcg_gen_code tcg_gen_code_mips64 -#define tcg_gen_code_common tcg_gen_code_common_mips64 -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mips64 -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64 -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mips64 -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64 -#define tcg_gen_exit_tb tcg_gen_exit_tb_mips64 -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64 -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64 -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64 -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64 -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64 -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64 -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64 -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64 -#define tcg_gen_goto_tb tcg_gen_goto_tb_mips64 -#define tcg_gen_ld_i32 tcg_gen_ld_i32_mips64 -#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64 -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mips64 -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mips64 -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64 -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64 -#define tcg_gen_mov_i32 tcg_gen_mov_i32_mips64 -#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64 -#define tcg_gen_movi_i32 tcg_gen_movi_i32_mips64 -#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64 -#define tcg_gen_mul_i32 tcg_gen_mul_i32_mips64 -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64 -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64 -#define tcg_gen_neg_i32 tcg_gen_neg_i32_mips64 -#define tcg_gen_neg_i64 tcg_gen_neg_i64_mips64 -#define tcg_gen_not_i32 tcg_gen_not_i32_mips64 -#define tcg_gen_op0 tcg_gen_op0_mips64 -#define tcg_gen_op1i tcg_gen_op1i_mips64 -#define tcg_gen_op2_i32 tcg_gen_op2_i32_mips64 -#define tcg_gen_op2_i64 tcg_gen_op2_i64_mips64 -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mips64 -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mips64 -#define tcg_gen_op3_i32 tcg_gen_op3_i32_mips64 -#define tcg_gen_op3_i64 tcg_gen_op3_i64_mips64 -#define tcg_gen_op4_i32 tcg_gen_op4_i32_mips64 -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mips64 -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mips64 -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mips64 -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mips64 -#define tcg_gen_op6_i32 tcg_gen_op6_i32_mips64 -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mips64 -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mips64 -#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64 -#define tcg_gen_or_i32 tcg_gen_or_i32_mips64 -#define tcg_gen_or_i64 tcg_gen_or_i64_mips64 +#define tcg_gen_op1 tcg_gen_op1_mips64 +#define tcg_gen_op2 tcg_gen_op2_mips64 +#define tcg_gen_op3 tcg_gen_op3_mips64 +#define tcg_gen_op4 tcg_gen_op4_mips64 +#define tcg_gen_op5 tcg_gen_op5_mips64 +#define tcg_gen_op6 tcg_gen_op6_mips64 +#define tcg_gen_mb tcg_gen_mb_mips64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64 +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mips64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_mips64 -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64 -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64 -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64 -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64 +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mips64 +#define tcg_gen_muli_i32 tcg_gen_muli_i32_mips64 +#define tcg_gen_div_i32 tcg_gen_div_i32_mips64 +#define tcg_gen_rem_i32 tcg_gen_rem_i32_mips64 +#define tcg_gen_divu_i32 tcg_gen_divu_i32_mips64 +#define tcg_gen_remu_i32 tcg_gen_remu_i32_mips64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64 +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mips64 +#define tcg_gen_nand_i32 tcg_gen_nand_i32_mips64 +#define tcg_gen_nor_i32 tcg_gen_nor_i32_mips64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64 +#define tcg_gen_clz_i32 tcg_gen_clz_i32_mips64 +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mips64 +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mips64 +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mips64 +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mips64 +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mips64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips64 -#define tcg_gen_sar_i32 tcg_gen_sar_i32_mips64 -#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64 -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64 -#define tcg_gen_shl_i32 tcg_gen_shl_i32_mips64 -#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64 -#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64 +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mips64 +#define tcg_gen_extract_i32 tcg_gen_extract_i32_mips64 +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mips64 +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mips64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64 +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mips64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64 +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mips64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64 +#define tcg_gen_smin_i32 tcg_gen_smin_i32_mips64 +#define tcg_gen_umin_i32 tcg_gen_umin_i32_mips64 +#define tcg_gen_smax_i32 tcg_gen_smax_i32_mips64 +#define tcg_gen_umax_i32 tcg_gen_umax_i32_mips64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64 +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mips64 +#define tcg_gen_subi_i64 tcg_gen_subi_i64_mips64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64 +#define tcg_gen_ori_i64 tcg_gen_ori_i64_mips64 +#define tcg_gen_xori_i64 tcg_gen_xori_i64_mips64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_mips64 -#define tcg_gen_shr_i32 tcg_gen_shr_i32_mips64 -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mips64 -#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64 -#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_mips64 -#define tcg_gen_st_i32 tcg_gen_st_i32_mips64 -#define tcg_gen_st_i64 tcg_gen_st_i64_mips64 -#define tcg_gen_sub_i32 tcg_gen_sub_i32_mips64 -#define tcg_gen_sub_i64 tcg_gen_sub_i64_mips64 -#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64 -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mips64 -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mips64 -#define tcg_gen_xor_i32 tcg_gen_xor_i32_mips64 -#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64 -#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64 -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mips64 -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mips64 -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mips64 -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mips64 -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mips64 -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64 -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mips64 -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mips64 -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mips64 -#define tcg_handle_interrupt tcg_handle_interrupt_mips64 -#define tcg_init tcg_init_mips64 -#define tcg_invert_cond tcg_invert_cond_mips64 -#define tcg_la_bb_end tcg_la_bb_end_mips64 -#define tcg_la_br_end tcg_la_br_end_mips64 -#define tcg_la_func_end tcg_la_func_end_mips64 -#define tcg_liveness_analysis tcg_liveness_analysis_mips64 -#define tcg_malloc tcg_malloc_mips64 -#define tcg_malloc_internal tcg_malloc_internal_mips64 -#define tcg_op_defs_org tcg_op_defs_org_mips64 -#define tcg_opt_gen_mov tcg_opt_gen_mov_mips64 -#define tcg_opt_gen_movi tcg_opt_gen_movi_mips64 -#define tcg_optimize tcg_optimize_mips64 -#define tcg_out16 tcg_out16_mips64 -#define tcg_out32 tcg_out32_mips64 -#define tcg_out64 tcg_out64_mips64 -#define tcg_out8 tcg_out8_mips64 -#define tcg_out_addi tcg_out_addi_mips64 -#define tcg_out_branch tcg_out_branch_mips64 -#define tcg_out_brcond32 tcg_out_brcond32_mips64 -#define tcg_out_brcond64 tcg_out_brcond64_mips64 -#define tcg_out_bswap32 tcg_out_bswap32_mips64 -#define tcg_out_bswap64 tcg_out_bswap64_mips64 -#define tcg_out_call tcg_out_call_mips64 -#define tcg_out_cmp tcg_out_cmp_mips64 -#define tcg_out_ext16s tcg_out_ext16s_mips64 -#define tcg_out_ext16u tcg_out_ext16u_mips64 -#define tcg_out_ext32s tcg_out_ext32s_mips64 -#define tcg_out_ext32u tcg_out_ext32u_mips64 -#define tcg_out_ext8s tcg_out_ext8s_mips64 -#define tcg_out_ext8u tcg_out_ext8u_mips64 -#define tcg_out_jmp tcg_out_jmp_mips64 -#define tcg_out_jxx tcg_out_jxx_mips64 -#define tcg_out_label tcg_out_label_mips64 -#define tcg_out_ld tcg_out_ld_mips64 -#define tcg_out_modrm tcg_out_modrm_mips64 -#define tcg_out_modrm_offset tcg_out_modrm_offset_mips64 -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mips64 -#define tcg_out_mov tcg_out_mov_mips64 -#define tcg_out_movcond32 tcg_out_movcond32_mips64 -#define tcg_out_movcond64 tcg_out_movcond64_mips64 -#define tcg_out_movi tcg_out_movi_mips64 -#define tcg_out_op tcg_out_op_mips64 -#define tcg_out_pop tcg_out_pop_mips64 -#define tcg_out_push tcg_out_push_mips64 -#define tcg_out_qemu_ld tcg_out_qemu_ld_mips64 -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mips64 -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mips64 -#define tcg_out_qemu_st tcg_out_qemu_st_mips64 -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mips64 -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mips64 -#define tcg_out_reloc tcg_out_reloc_mips64 -#define tcg_out_rolw_8 tcg_out_rolw_8_mips64 -#define tcg_out_setcond32 tcg_out_setcond32_mips64 -#define tcg_out_setcond64 tcg_out_setcond64_mips64 -#define tcg_out_shifti tcg_out_shifti_mips64 -#define tcg_out_st tcg_out_st_mips64 -#define tcg_out_tb_finalize tcg_out_tb_finalize_mips64 -#define tcg_out_tb_init tcg_out_tb_init_mips64 -#define tcg_out_tlb_load tcg_out_tlb_load_mips64 -#define tcg_out_vex_modrm tcg_out_vex_modrm_mips64 -#define tcg_patch32 tcg_patch32_mips64 -#define tcg_patch8 tcg_patch8_mips64 -#define tcg_pcrel_diff tcg_pcrel_diff_mips64 -#define tcg_pool_reset tcg_pool_reset_mips64 -#define tcg_prologue_init tcg_prologue_init_mips64 -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mips64 -#define tcg_reg_alloc tcg_reg_alloc_mips64 -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mips64 -#define tcg_reg_alloc_call tcg_reg_alloc_call_mips64 -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mips64 -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mips64 -#define tcg_reg_alloc_op tcg_reg_alloc_op_mips64 -#define tcg_reg_alloc_start tcg_reg_alloc_start_mips64 -#define tcg_reg_free tcg_reg_free_mips64 -#define tcg_reg_sync tcg_reg_sync_mips64 -#define tcg_set_frame tcg_set_frame_mips64 -#define tcg_set_nop tcg_set_nop_mips64 -#define tcg_swap_cond tcg_swap_cond_mips64 -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mips64 -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mips64 -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mips64 -#define tcg_target_const_match tcg_target_const_match_mips64 -#define tcg_target_init tcg_target_init_mips64 -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mips64 -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mips64 -#define tcg_temp_alloc tcg_temp_alloc_mips64 -#define tcg_temp_free_i32 tcg_temp_free_i32_mips64 -#define tcg_temp_free_i64 tcg_temp_free_i64_mips64 -#define tcg_temp_free_internal tcg_temp_free_internal_mips64 -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mips64 -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mips64 -#define tcg_temp_new_i32 tcg_temp_new_i32_mips64 -#define tcg_temp_new_i64 tcg_temp_new_i64_mips64 -#define tcg_temp_new_internal tcg_temp_new_internal_mips64 -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mips64 -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mips64 -#define tdb_hash tdb_hash_mips64 -#define teecr_write teecr_write_mips64 -#define teehbr_access teehbr_access_mips64 -#define temp_allocate_frame temp_allocate_frame_mips64 -#define temp_dead temp_dead_mips64 -#define temps_are_copies temps_are_copies_mips64 -#define temp_save temp_save_mips64 -#define temp_sync temp_sync_mips64 -#define tgen_arithi tgen_arithi_mips64 -#define tgen_arithr tgen_arithr_mips64 -#define thumb2_logic_op thumb2_logic_op_mips64 -#define ti925t_initfn ti925t_initfn_mips64 -#define tlb_add_large_page tlb_add_large_page_mips64 -#define tlb_flush_entry tlb_flush_entry_mips64 -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64 -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64 -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips64 -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mips64 -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mips64 -#define tlbi_aa64_va_write tlbi_aa64_va_write_mips64 -#define tlbiall_is_write tlbiall_is_write_mips64 -#define tlbiall_write tlbiall_write_mips64 -#define tlbiasid_is_write tlbiasid_is_write_mips64 -#define tlbiasid_write tlbiasid_write_mips64 -#define tlbimvaa_is_write tlbimvaa_is_write_mips64 -#define tlbimvaa_write tlbimvaa_write_mips64 -#define tlbimva_is_write tlbimva_is_write_mips64 -#define tlbimva_write tlbimva_write_mips64 -#define tlb_is_dirty_ram tlb_is_dirty_ram_mips64 +#define tcg_gen_sari_i64 tcg_gen_sari_i64_mips64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64 +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mips64 +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mips64 +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mips64 +#define tcg_gen_muli_i64 tcg_gen_muli_i64_mips64 +#define tcg_gen_div_i64 tcg_gen_div_i64_mips64 +#define tcg_gen_rem_i64 tcg_gen_rem_i64_mips64 +#define tcg_gen_divu_i64 tcg_gen_divu_i64_mips64 +#define tcg_gen_remu_i64 tcg_gen_remu_i64_mips64 +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mips64 +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mips64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64 +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mips64 +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mips64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64 +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mips64 +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mips64 +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mips64 +#define tcg_gen_not_i64 tcg_gen_not_i64_mips64 +#define tcg_gen_andc_i64 tcg_gen_andc_i64_mips64 +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mips64 +#define tcg_gen_nand_i64 tcg_gen_nand_i64_mips64 +#define tcg_gen_nor_i64 tcg_gen_nor_i64_mips64 +#define tcg_gen_orc_i64 tcg_gen_orc_i64_mips64 +#define tcg_gen_clz_i64 tcg_gen_clz_i64_mips64 +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mips64 +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mips64 +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mips64 +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mips64 +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mips64 +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mips64 +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mips64 +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mips64 +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mips64 +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mips64 +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mips64 +#define tcg_gen_extract_i64 tcg_gen_extract_i64_mips64 +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mips64 +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mips64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64 +#define tcg_gen_add2_i64 tcg_gen_add2_i64_mips64 +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mips64 +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mips64 +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mips64 +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mips64 +#define tcg_gen_smin_i64 tcg_gen_smin_i64_mips64 +#define tcg_gen_umin_i64 tcg_gen_umin_i64_mips64 +#define tcg_gen_smax_i64 tcg_gen_smax_i64_mips64 +#define tcg_gen_umax_i64 tcg_gen_umax_i64_mips64 +#define tcg_gen_abs_i64 tcg_gen_abs_i64_mips64 +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mips64 +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mips64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64 +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mips64 +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mips64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_mips64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_mips64 +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mips64 +#define check_exit_request check_exit_request_mips64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64 +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mips64 +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mips64 +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mips64 +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mips64 +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mips64 +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mips64 +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mips64 +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mips64 +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mips64 +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mips64 +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mips64 +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mips64 +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mips64 +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mips64 +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mips64 +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mips64 +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mips64 +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mips64 +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mips64 +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mips64 +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mips64 +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mips64 +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mips64 +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mips64 +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mips64 +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mips64 +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mips64 +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mips64 +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mips64 +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mips64 +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mips64 +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mips64 +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mips64 +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mips64 +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mips64 +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mips64 +#define simd_desc simd_desc_mips64 +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mips64 +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mips64 +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mips64 +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mips64 +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mips64 +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mips64 +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mips64 +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mips64 +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mips64 +#define tcg_gen_gvec_2 tcg_gen_gvec_2_mips64 +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_mips64 +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_mips64 +#define tcg_gen_gvec_3 tcg_gen_gvec_3_mips64 +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_mips64 +#define tcg_gen_gvec_4 tcg_gen_gvec_4_mips64 +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_mips64 +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips64 +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips64 +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mips64 +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips64 +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips64 +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips64 +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips64 +#define tcg_gen_gvec_not tcg_gen_gvec_not_mips64 +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mips64 +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mips64 +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mips64 +#define tcg_gen_gvec_add tcg_gen_gvec_add_mips64 +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_mips64 +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_mips64 +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_mips64 +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mips64 +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mips64 +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mips64 +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_mips64 +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_mips64 +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_mips64 +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_mips64 +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mips64 +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mips64 +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mips64 +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mips64 +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_mips64 +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_mips64 +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_mips64 +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_mips64 +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mips64 +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mips64 +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mips64 +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_mips64 +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_mips64 +#define tcg_gen_gvec_and tcg_gen_gvec_and_mips64 +#define tcg_gen_gvec_or tcg_gen_gvec_or_mips64 +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_mips64 +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_mips64 +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_mips64 +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_mips64 +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_mips64 +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mips64 +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips64 +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_mips64 +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_mips64 +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_mips64 +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_mips64 +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_mips64 +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mips64 +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mips64 +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_mips64 +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mips64 +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mips64 +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_mips64 +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mips64 +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mips64 +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_mips64 +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_mips64 +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mips64 +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_mips64 +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mips64 +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mips64 +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mips64 +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips64 +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips64 +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mips64 +#define vec_gen_2 vec_gen_2_mips64 +#define vec_gen_3 vec_gen_3_mips64 +#define vec_gen_4 vec_gen_4_mips64 +#define tcg_gen_mov_vec tcg_gen_mov_vec_mips64 +#define tcg_const_zeros_vec tcg_const_zeros_vec_mips64 +#define tcg_const_ones_vec tcg_const_ones_vec_mips64 +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mips64 +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mips64 +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mips64 +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mips64 +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mips64 +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mips64 +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_mips64 +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mips64 +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mips64 +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mips64 +#define tcg_gen_ld_vec tcg_gen_ld_vec_mips64 +#define tcg_gen_st_vec tcg_gen_st_vec_mips64 +#define tcg_gen_stl_vec tcg_gen_stl_vec_mips64 +#define tcg_gen_and_vec tcg_gen_and_vec_mips64 +#define tcg_gen_or_vec tcg_gen_or_vec_mips64 +#define tcg_gen_xor_vec tcg_gen_xor_vec_mips64 +#define tcg_gen_andc_vec tcg_gen_andc_vec_mips64 +#define tcg_gen_orc_vec tcg_gen_orc_vec_mips64 +#define tcg_gen_nand_vec tcg_gen_nand_vec_mips64 +#define tcg_gen_nor_vec tcg_gen_nor_vec_mips64 +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_mips64 +#define tcg_gen_not_vec tcg_gen_not_vec_mips64 +#define tcg_gen_neg_vec tcg_gen_neg_vec_mips64 +#define tcg_gen_abs_vec tcg_gen_abs_vec_mips64 +#define tcg_gen_shli_vec tcg_gen_shli_vec_mips64 +#define tcg_gen_shri_vec tcg_gen_shri_vec_mips64 +#define tcg_gen_sari_vec tcg_gen_sari_vec_mips64 +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_mips64 +#define tcg_gen_add_vec tcg_gen_add_vec_mips64 +#define tcg_gen_sub_vec tcg_gen_sub_vec_mips64 +#define tcg_gen_mul_vec tcg_gen_mul_vec_mips64 +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mips64 +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_mips64 +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_mips64 +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_mips64 +#define tcg_gen_smin_vec tcg_gen_smin_vec_mips64 +#define tcg_gen_umin_vec tcg_gen_umin_vec_mips64 +#define tcg_gen_smax_vec tcg_gen_smax_vec_mips64 +#define tcg_gen_umax_vec tcg_gen_umax_vec_mips64 +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_mips64 +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_mips64 +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_mips64 +#define tcg_gen_shls_vec tcg_gen_shls_vec_mips64 +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_mips64 +#define tcg_gen_sars_vec tcg_gen_sars_vec_mips64 +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mips64 +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mips64 +#define tb_htable_lookup tb_htable_lookup_mips64 +#define tb_set_jmp_target tb_set_jmp_target_mips64 +#define cpu_exec cpu_exec_mips64 +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64 +#define cpu_reloading_memory_map cpu_reloading_memory_map_mips64 +#define cpu_loop_exit cpu_loop_exit_mips64 +#define cpu_loop_exit_restore cpu_loop_exit_restore_mips64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64 +#define tlb_init tlb_init_mips64 +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips64 +#define tlb_flush tlb_flush_mips64 +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mips64 +#define tlb_flush_all_cpus tlb_flush_all_cpus_mips64 +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mips64 +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mips64 +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64 +#define tlb_flush_page tlb_flush_page_mips64 +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mips64 +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mips64 +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mips64 +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mips64 #define tlb_protect_code tlb_protect_code_mips64 -#define tlb_reset_dirty_range tlb_reset_dirty_range_mips64 -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mips64 +#define tlb_unprotect_code tlb_unprotect_code_mips64 +#define tlb_reset_dirty tlb_reset_dirty_mips64 #define tlb_set_dirty tlb_set_dirty_mips64 -#define tlb_set_dirty1 tlb_set_dirty1_mips64 -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mips64 +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips64 +#define tlb_set_page tlb_set_page_mips64 +#define get_page_addr_code_hostp get_page_addr_code_hostp_mips64 +#define get_page_addr_code get_page_addr_code_mips64 +#define probe_access probe_access_mips64 #define tlb_vaddr_to_host tlb_vaddr_to_host_mips64 -#define token_get_type token_get_type_mips64 -#define token_get_value token_get_value_mips64 -#define token_is_escape token_is_escape_mips64 -#define token_is_keyword token_is_keyword_mips64 -#define token_is_operator token_is_operator_mips64 -#define tokens_append_from_iter tokens_append_from_iter_mips64 -#define to_qiv to_qiv_mips64 -#define to_qov to_qov_mips64 -#define tosa_init tosa_init_mips64 -#define tosa_machine_init tosa_machine_init_mips64 -#define tswap32 tswap32_mips64 -#define tswap64 tswap64_mips64 -#define type_class_get_size type_class_get_size_mips64 -#define type_get_by_name type_get_by_name_mips64 -#define type_get_parent type_get_parent_mips64 -#define type_has_parent type_has_parent_mips64 -#define type_initialize type_initialize_mips64 -#define type_initialize_interface type_initialize_interface_mips64 -#define type_is_ancestor type_is_ancestor_mips64 -#define type_new type_new_mips64 -#define type_object_get_size type_object_get_size_mips64 -#define type_register_internal type_register_internal_mips64 -#define type_table_add type_table_add_mips64 -#define type_table_get type_table_get_mips64 -#define type_table_lookup type_table_lookup_mips64 -#define uint16_to_float32 uint16_to_float32_mips64 -#define uint16_to_float64 uint16_to_float64_mips64 -#define uint32_to_float32 uint32_to_float32_mips64 -#define uint32_to_float64 uint32_to_float64_mips64 -#define uint64_to_float128 uint64_to_float128_mips64 -#define uint64_to_float32 uint64_to_float32_mips64 -#define uint64_to_float64 uint64_to_float64_mips64 -#define unassigned_io_ops unassigned_io_ops_mips64 -#define unassigned_io_read unassigned_io_read_mips64 -#define unassigned_io_write unassigned_io_write_mips64 -#define unassigned_mem_accepts unassigned_mem_accepts_mips64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_mips64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_mips64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_mips64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_mips64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_mips64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_mips64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64 +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mips64 +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mips64 +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mips64 +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mips64 +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mips64 +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mips64 +#define cpu_ldub_data_ra cpu_ldub_data_ra_mips64 +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_mips64 +#define cpu_lduw_data_ra cpu_lduw_data_ra_mips64 +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_mips64 +#define cpu_ldl_data_ra cpu_ldl_data_ra_mips64 +#define cpu_ldq_data_ra cpu_ldq_data_ra_mips64 +#define cpu_ldub_data cpu_ldub_data_mips64 +#define cpu_ldsb_data cpu_ldsb_data_mips64 +#define cpu_lduw_data cpu_lduw_data_mips64 +#define cpu_ldsw_data cpu_ldsw_data_mips64 +#define cpu_ldl_data cpu_ldl_data_mips64 +#define cpu_ldq_data cpu_ldq_data_mips64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_mips64 +#define helper_le_stw_mmu helper_le_stw_mmu_mips64 +#define helper_be_stw_mmu helper_be_stw_mmu_mips64 +#define helper_le_stl_mmu helper_le_stl_mmu_mips64 +#define helper_be_stl_mmu helper_be_stl_mmu_mips64 +#define helper_le_stq_mmu helper_le_stq_mmu_mips64 +#define helper_be_stq_mmu helper_be_stq_mmu_mips64 +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mips64 +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mips64 +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mips64 +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mips64 +#define cpu_stb_data_ra cpu_stb_data_ra_mips64 +#define cpu_stw_data_ra cpu_stw_data_ra_mips64 +#define cpu_stl_data_ra cpu_stl_data_ra_mips64 +#define cpu_stq_data_ra cpu_stq_data_ra_mips64 +#define cpu_stb_data cpu_stb_data_mips64 +#define cpu_stw_data cpu_stw_data_mips64 +#define cpu_stl_data cpu_stl_data_mips64 +#define cpu_stq_data cpu_stq_data_mips64 +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mips64 +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mips64 +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mips64 +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mips64 +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mips64 +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mips64 +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mips64 +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mips64 +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mips64 +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mips64 +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mips64 +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mips64 +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mips64 +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mips64 +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mips64 +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mips64 +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mips64 +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mips64 +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mips64 +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mips64 +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mips64 +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mips64 +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mips64 +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mips64 +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mips64 +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mips64 +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mips64 +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mips64 +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mips64 +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mips64 +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mips64 +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mips64 +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mips64 +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mips64 +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mips64 +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mips64 +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mips64 +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mips64 +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mips64 +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mips64 +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mips64 +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mips64 +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mips64 +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mips64 +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mips64 +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mips64 +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mips64 +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mips64 +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mips64 +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mips64 +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mips64 +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mips64 +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mips64 +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mips64 +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mips64 +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mips64 +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mips64 +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mips64 +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mips64 +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mips64 +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mips64 +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mips64 +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mips64 +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mips64 +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mips64 +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mips64 +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mips64 +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mips64 +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mips64 +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mips64 +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mips64 +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mips64 +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mips64 +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mips64 +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mips64 +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mips64 +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mips64 +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mips64 +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mips64 +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mips64 +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mips64 +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mips64 +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mips64 +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mips64 +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mips64 +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mips64 +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mips64 +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mips64 +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mips64 +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mips64 +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mips64 +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mips64 +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mips64 +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mips64 +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mips64 +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mips64 +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mips64 +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mips64 +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mips64 +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mips64 +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mips64 +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mips64 +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mips64 +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mips64 +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mips64 +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mips64 +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mips64 +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mips64 +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mips64 +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mips64 +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mips64 +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mips64 +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mips64 +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mips64 +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mips64 +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mips64 +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mips64 +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mips64 +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mips64 +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mips64 +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mips64 +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mips64 +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mips64 +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mips64 +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mips64 +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mips64 +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mips64 +#define helper_atomic_xchgb helper_atomic_xchgb_mips64 +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_mips64 +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_mips64 +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_mips64 +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mips64 +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_mips64 +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_mips64 +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_mips64 +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mips64 +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mips64 +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mips64 +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mips64 +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mips64 +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mips64 +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mips64 +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mips64 +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mips64 +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mips64 +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_mips64 +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mips64 +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mips64 +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mips64 +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mips64 +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mips64 +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mips64 +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mips64 +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mips64 +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mips64 +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mips64 +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mips64 +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mips64 +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mips64 +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mips64 +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mips64 +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mips64 +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mips64 +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_mips64 +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mips64 +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mips64 +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mips64 +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mips64 +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mips64 +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mips64 +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mips64 +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mips64 +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mips64 +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mips64 +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mips64 +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mips64 +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mips64 +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mips64 +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mips64 +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mips64 +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mips64 +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_mips64 +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mips64 +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mips64 +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mips64 +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mips64 +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mips64 +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mips64 +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mips64 +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mips64 +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mips64 +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mips64 +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mips64 +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mips64 +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mips64 +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mips64 +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mips64 +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mips64 +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mips64 +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_mips64 +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mips64 +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mips64 +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mips64 +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mips64 +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mips64 +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mips64 +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mips64 +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mips64 +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mips64 +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mips64 +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mips64 +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mips64 +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mips64 +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mips64 +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mips64 +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mips64 +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mips64 +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_mips64 +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mips64 +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mips64 +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mips64 +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mips64 +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mips64 +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mips64 +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mips64 +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mips64 +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mips64 +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mips64 +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mips64 +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mips64 +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mips64 +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mips64 +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mips64 +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mips64 +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mips64 +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_mips64 +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mips64 +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mips64 +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mips64 +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mips64 +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mips64 +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mips64 +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mips64 +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mips64 +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mips64 +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mips64 +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mips64 +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mips64 +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mips64 +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mips64 +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mips64 +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mips64 +#define cpu_ldub_code cpu_ldub_code_mips64 +#define cpu_lduw_code cpu_lduw_code_mips64 +#define cpu_ldl_code cpu_ldl_code_mips64 +#define cpu_ldq_code cpu_ldq_code_mips64 +#define helper_div_i32 helper_div_i32_mips64 +#define helper_rem_i32 helper_rem_i32_mips64 +#define helper_divu_i32 helper_divu_i32_mips64 +#define helper_remu_i32 helper_remu_i32_mips64 +#define helper_shl_i64 helper_shl_i64_mips64 +#define helper_shr_i64 helper_shr_i64_mips64 +#define helper_sar_i64 helper_sar_i64_mips64 +#define helper_div_i64 helper_div_i64_mips64 +#define helper_rem_i64 helper_rem_i64_mips64 +#define helper_divu_i64 helper_divu_i64_mips64 +#define helper_remu_i64 helper_remu_i64_mips64 +#define helper_muluh_i64 helper_muluh_i64_mips64 +#define helper_mulsh_i64 helper_mulsh_i64_mips64 +#define helper_clz_i32 helper_clz_i32_mips64 +#define helper_ctz_i32 helper_ctz_i32_mips64 +#define helper_clz_i64 helper_clz_i64_mips64 +#define helper_ctz_i64 helper_ctz_i64_mips64 +#define helper_clrsb_i32 helper_clrsb_i32_mips64 +#define helper_clrsb_i64 helper_clrsb_i64_mips64 +#define helper_ctpop_i32 helper_ctpop_i32_mips64 +#define helper_ctpop_i64 helper_ctpop_i64_mips64 +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_mips64 +#define helper_exit_atomic helper_exit_atomic_mips64 +#define helper_gvec_add8 helper_gvec_add8_mips64 +#define helper_gvec_add16 helper_gvec_add16_mips64 +#define helper_gvec_add32 helper_gvec_add32_mips64 +#define helper_gvec_add64 helper_gvec_add64_mips64 +#define helper_gvec_adds8 helper_gvec_adds8_mips64 +#define helper_gvec_adds16 helper_gvec_adds16_mips64 +#define helper_gvec_adds32 helper_gvec_adds32_mips64 +#define helper_gvec_adds64 helper_gvec_adds64_mips64 +#define helper_gvec_sub8 helper_gvec_sub8_mips64 +#define helper_gvec_sub16 helper_gvec_sub16_mips64 +#define helper_gvec_sub32 helper_gvec_sub32_mips64 +#define helper_gvec_sub64 helper_gvec_sub64_mips64 +#define helper_gvec_subs8 helper_gvec_subs8_mips64 +#define helper_gvec_subs16 helper_gvec_subs16_mips64 +#define helper_gvec_subs32 helper_gvec_subs32_mips64 +#define helper_gvec_subs64 helper_gvec_subs64_mips64 +#define helper_gvec_mul8 helper_gvec_mul8_mips64 +#define helper_gvec_mul16 helper_gvec_mul16_mips64 +#define helper_gvec_mul32 helper_gvec_mul32_mips64 +#define helper_gvec_mul64 helper_gvec_mul64_mips64 +#define helper_gvec_muls8 helper_gvec_muls8_mips64 +#define helper_gvec_muls16 helper_gvec_muls16_mips64 +#define helper_gvec_muls32 helper_gvec_muls32_mips64 +#define helper_gvec_muls64 helper_gvec_muls64_mips64 +#define helper_gvec_neg8 helper_gvec_neg8_mips64 +#define helper_gvec_neg16 helper_gvec_neg16_mips64 +#define helper_gvec_neg32 helper_gvec_neg32_mips64 +#define helper_gvec_neg64 helper_gvec_neg64_mips64 +#define helper_gvec_abs8 helper_gvec_abs8_mips64 +#define helper_gvec_abs16 helper_gvec_abs16_mips64 +#define helper_gvec_abs32 helper_gvec_abs32_mips64 +#define helper_gvec_abs64 helper_gvec_abs64_mips64 +#define helper_gvec_mov helper_gvec_mov_mips64 +#define helper_gvec_dup64 helper_gvec_dup64_mips64 +#define helper_gvec_dup32 helper_gvec_dup32_mips64 +#define helper_gvec_dup16 helper_gvec_dup16_mips64 +#define helper_gvec_dup8 helper_gvec_dup8_mips64 +#define helper_gvec_not helper_gvec_not_mips64 +#define helper_gvec_and helper_gvec_and_mips64 +#define helper_gvec_or helper_gvec_or_mips64 +#define helper_gvec_xor helper_gvec_xor_mips64 +#define helper_gvec_andc helper_gvec_andc_mips64 +#define helper_gvec_orc helper_gvec_orc_mips64 +#define helper_gvec_nand helper_gvec_nand_mips64 +#define helper_gvec_nor helper_gvec_nor_mips64 +#define helper_gvec_eqv helper_gvec_eqv_mips64 +#define helper_gvec_ands helper_gvec_ands_mips64 +#define helper_gvec_xors helper_gvec_xors_mips64 +#define helper_gvec_ors helper_gvec_ors_mips64 +#define helper_gvec_shl8i helper_gvec_shl8i_mips64 +#define helper_gvec_shl16i helper_gvec_shl16i_mips64 +#define helper_gvec_shl32i helper_gvec_shl32i_mips64 +#define helper_gvec_shl64i helper_gvec_shl64i_mips64 +#define helper_gvec_shr8i helper_gvec_shr8i_mips64 +#define helper_gvec_shr16i helper_gvec_shr16i_mips64 +#define helper_gvec_shr32i helper_gvec_shr32i_mips64 +#define helper_gvec_shr64i helper_gvec_shr64i_mips64 +#define helper_gvec_sar8i helper_gvec_sar8i_mips64 +#define helper_gvec_sar16i helper_gvec_sar16i_mips64 +#define helper_gvec_sar32i helper_gvec_sar32i_mips64 +#define helper_gvec_sar64i helper_gvec_sar64i_mips64 +#define helper_gvec_shl8v helper_gvec_shl8v_mips64 +#define helper_gvec_shl16v helper_gvec_shl16v_mips64 +#define helper_gvec_shl32v helper_gvec_shl32v_mips64 +#define helper_gvec_shl64v helper_gvec_shl64v_mips64 +#define helper_gvec_shr8v helper_gvec_shr8v_mips64 +#define helper_gvec_shr16v helper_gvec_shr16v_mips64 +#define helper_gvec_shr32v helper_gvec_shr32v_mips64 +#define helper_gvec_shr64v helper_gvec_shr64v_mips64 +#define helper_gvec_sar8v helper_gvec_sar8v_mips64 +#define helper_gvec_sar16v helper_gvec_sar16v_mips64 +#define helper_gvec_sar32v helper_gvec_sar32v_mips64 +#define helper_gvec_sar64v helper_gvec_sar64v_mips64 +#define helper_gvec_eq8 helper_gvec_eq8_mips64 +#define helper_gvec_ne8 helper_gvec_ne8_mips64 +#define helper_gvec_lt8 helper_gvec_lt8_mips64 +#define helper_gvec_le8 helper_gvec_le8_mips64 +#define helper_gvec_ltu8 helper_gvec_ltu8_mips64 +#define helper_gvec_leu8 helper_gvec_leu8_mips64 +#define helper_gvec_eq16 helper_gvec_eq16_mips64 +#define helper_gvec_ne16 helper_gvec_ne16_mips64 +#define helper_gvec_lt16 helper_gvec_lt16_mips64 +#define helper_gvec_le16 helper_gvec_le16_mips64 +#define helper_gvec_ltu16 helper_gvec_ltu16_mips64 +#define helper_gvec_leu16 helper_gvec_leu16_mips64 +#define helper_gvec_eq32 helper_gvec_eq32_mips64 +#define helper_gvec_ne32 helper_gvec_ne32_mips64 +#define helper_gvec_lt32 helper_gvec_lt32_mips64 +#define helper_gvec_le32 helper_gvec_le32_mips64 +#define helper_gvec_ltu32 helper_gvec_ltu32_mips64 +#define helper_gvec_leu32 helper_gvec_leu32_mips64 +#define helper_gvec_eq64 helper_gvec_eq64_mips64 +#define helper_gvec_ne64 helper_gvec_ne64_mips64 +#define helper_gvec_lt64 helper_gvec_lt64_mips64 +#define helper_gvec_le64 helper_gvec_le64_mips64 +#define helper_gvec_ltu64 helper_gvec_ltu64_mips64 +#define helper_gvec_leu64 helper_gvec_leu64_mips64 +#define helper_gvec_ssadd8 helper_gvec_ssadd8_mips64 +#define helper_gvec_ssadd16 helper_gvec_ssadd16_mips64 +#define helper_gvec_ssadd32 helper_gvec_ssadd32_mips64 +#define helper_gvec_ssadd64 helper_gvec_ssadd64_mips64 +#define helper_gvec_sssub8 helper_gvec_sssub8_mips64 +#define helper_gvec_sssub16 helper_gvec_sssub16_mips64 +#define helper_gvec_sssub32 helper_gvec_sssub32_mips64 +#define helper_gvec_sssub64 helper_gvec_sssub64_mips64 +#define helper_gvec_usadd8 helper_gvec_usadd8_mips64 +#define helper_gvec_usadd16 helper_gvec_usadd16_mips64 +#define helper_gvec_usadd32 helper_gvec_usadd32_mips64 +#define helper_gvec_usadd64 helper_gvec_usadd64_mips64 +#define helper_gvec_ussub8 helper_gvec_ussub8_mips64 +#define helper_gvec_ussub16 helper_gvec_ussub16_mips64 +#define helper_gvec_ussub32 helper_gvec_ussub32_mips64 +#define helper_gvec_ussub64 helper_gvec_ussub64_mips64 +#define helper_gvec_smin8 helper_gvec_smin8_mips64 +#define helper_gvec_smin16 helper_gvec_smin16_mips64 +#define helper_gvec_smin32 helper_gvec_smin32_mips64 +#define helper_gvec_smin64 helper_gvec_smin64_mips64 +#define helper_gvec_smax8 helper_gvec_smax8_mips64 +#define helper_gvec_smax16 helper_gvec_smax16_mips64 +#define helper_gvec_smax32 helper_gvec_smax32_mips64 +#define helper_gvec_smax64 helper_gvec_smax64_mips64 +#define helper_gvec_umin8 helper_gvec_umin8_mips64 +#define helper_gvec_umin16 helper_gvec_umin16_mips64 +#define helper_gvec_umin32 helper_gvec_umin32_mips64 +#define helper_gvec_umin64 helper_gvec_umin64_mips64 +#define helper_gvec_umax8 helper_gvec_umax8_mips64 +#define helper_gvec_umax16 helper_gvec_umax16_mips64 +#define helper_gvec_umax32 helper_gvec_umax32_mips64 +#define helper_gvec_umax64 helper_gvec_umax64_mips64 +#define helper_gvec_bitsel helper_gvec_bitsel_mips64 +#define cpu_restore_state cpu_restore_state_mips64 +#define page_collection_lock page_collection_lock_mips64 +#define page_collection_unlock page_collection_unlock_mips64 +#define free_code_gen_buffer free_code_gen_buffer_mips64 +#define tcg_exec_init tcg_exec_init_mips64 +#define tb_cleanup tb_cleanup_mips64 +#define tb_flush tb_flush_mips64 +#define tb_phys_invalidate tb_phys_invalidate_mips64 +#define tb_gen_code tb_gen_code_mips64 +#define tb_exec_lock tb_exec_lock_mips64 +#define tb_exec_unlock tb_exec_unlock_mips64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64 +#define tb_check_watchpoint tb_check_watchpoint_mips64 +#define cpu_io_recompile cpu_io_recompile_mips64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_mips64 +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mips64 +#define translator_loop_temp_check translator_loop_temp_check_mips64 +#define translator_loop translator_loop_mips64 +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mips64 +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mips64 +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mips64 +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mips64 +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mips64 +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mips64 #define unassigned_mem_ops unassigned_mem_ops_mips64 -#define unassigned_mem_read unassigned_mem_read_mips64 -#define unassigned_mem_write unassigned_mem_write_mips64 -#define update_spsel update_spsel_mips64 -#define v6_cp_reginfo v6_cp_reginfo_mips64 -#define v6k_cp_reginfo v6k_cp_reginfo_mips64 -#define v7_cp_reginfo v7_cp_reginfo_mips64 -#define v7mp_cp_reginfo v7mp_cp_reginfo_mips64 -#define v7m_pop v7m_pop_mips64 -#define v7m_push v7m_push_mips64 -#define v8_cp_reginfo v8_cp_reginfo_mips64 -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mips64 -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mips64 -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mips64 -#define vapa_cp_reginfo vapa_cp_reginfo_mips64 -#define vbar_write vbar_write_mips64 -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mips64 -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mips64 -#define vfp_get_fpcr vfp_get_fpcr_mips64 -#define vfp_get_fpscr vfp_get_fpscr_mips64 -#define vfp_get_fpsr vfp_get_fpsr_mips64 -#define vfp_reg_offset vfp_reg_offset_mips64 -#define vfp_set_fpcr vfp_set_fpcr_mips64 -#define vfp_set_fpscr vfp_set_fpscr_mips64 -#define vfp_set_fpsr vfp_set_fpsr_mips64 -#define visit_end_implicit_struct visit_end_implicit_struct_mips64 -#define visit_end_list visit_end_list_mips64 -#define visit_end_struct visit_end_struct_mips64 -#define visit_end_union visit_end_union_mips64 -#define visit_get_next_type visit_get_next_type_mips64 -#define visit_next_list visit_next_list_mips64 -#define visit_optional visit_optional_mips64 -#define visit_start_implicit_struct visit_start_implicit_struct_mips64 -#define visit_start_list visit_start_list_mips64 -#define visit_start_struct visit_start_struct_mips64 -#define visit_start_union visit_start_union_mips64 -#define vmsa_cp_reginfo vmsa_cp_reginfo_mips64 -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mips64 -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mips64 -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mips64 -#define vmsa_ttbcr_write vmsa_ttbcr_write_mips64 -#define vmsa_ttbr_write vmsa_ttbr_write_mips64 -#define write_cpustate_to_list write_cpustate_to_list_mips64 -#define write_list_to_cpustate write_list_to_cpustate_mips64 -#define write_raw_cp_reg write_raw_cp_reg_mips64 -#define X86CPURegister32_lookup X86CPURegister32_lookup_mips64 -#define x86_op_defs x86_op_defs_mips64 -#define xpsr_read xpsr_read_mips64 -#define xpsr_write xpsr_write_mips64 -#define xscale_cpar_write xscale_cpar_write_mips64 -#define xscale_cp_reginfo xscale_cp_reginfo_mips64 -#define cpu_mips_exec cpu_mips_exec_mips64 +#define floatx80_infinity floatx80_infinity_mips64 +#define dup_const_func dup_const_func_mips64 +#define gen_helper_raise_exception gen_helper_raise_exception_mips64 +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_mips64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_mips64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_mips64 +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64 +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64 +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64 +#define helper_mfc0_random helper_mfc0_random_mips64 +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64 +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64 +#define helper_mfc0_tcbind helper_mfc0_tcbind_mips64 +#define helper_mftc0_tcbind helper_mftc0_tcbind_mips64 +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64 +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64 +#define helper_mfc0_tchalt helper_mfc0_tchalt_mips64 +#define helper_mftc0_tchalt helper_mftc0_tchalt_mips64 +#define helper_mfc0_tccontext helper_mfc0_tccontext_mips64 +#define helper_mftc0_tccontext helper_mftc0_tccontext_mips64 +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64 +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64 +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64 +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64 +#define helper_mfc0_count helper_mfc0_count_mips64 +#define helper_mfc0_saar helper_mfc0_saar_mips64 +#define helper_mfhc0_saar helper_mfhc0_saar_mips64 +#define helper_mftc0_entryhi helper_mftc0_entryhi_mips64 +#define helper_mftc0_cause helper_mftc0_cause_mips64 +#define helper_mftc0_status helper_mftc0_status_mips64 +#define helper_mfc0_lladdr helper_mfc0_lladdr_mips64 +#define helper_mfc0_maar helper_mfc0_maar_mips64 +#define helper_mfhc0_maar helper_mfhc0_maar_mips64 +#define helper_mfc0_watchlo helper_mfc0_watchlo_mips64 +#define helper_mfc0_watchhi helper_mfc0_watchhi_mips64 +#define helper_mfhc0_watchhi helper_mfhc0_watchhi_mips64 +#define helper_mfc0_debug helper_mfc0_debug_mips64 +#define helper_mftc0_debug helper_mftc0_debug_mips64 +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64 +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64 +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64 +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64 +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64 +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64 +#define helper_dmfc0_maar helper_dmfc0_maar_mips64 +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64 +#define helper_dmfc0_watchhi helper_dmfc0_watchhi_mips64 +#define helper_dmfc0_saar helper_dmfc0_saar_mips64 +#define helper_mtc0_index helper_mtc0_index_mips64 +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64 +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64 +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64 +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64 +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64 +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64 +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64 +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64 +#define helper_mtc0_yqmask helper_mtc0_yqmask_mips64 +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64 +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64 +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64 +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64 +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64 +#define helper_mtc0_tcbind helper_mtc0_tcbind_mips64 +#define helper_mttc0_tcbind helper_mttc0_tcbind_mips64 +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64 +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64 +#define helper_mtc0_tchalt helper_mtc0_tchalt_mips64 +#define helper_mttc0_tchalt helper_mttc0_tchalt_mips64 +#define helper_mtc0_tccontext helper_mtc0_tccontext_mips64 +#define helper_mttc0_tccontext helper_mttc0_tccontext_mips64 +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64 +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64 +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64 +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64 +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64 +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64 +#define helper_mtc0_context helper_mtc0_context_mips64 +#define helper_mtc0_memorymapid helper_mtc0_memorymapid_mips64 +#define update_pagemask update_pagemask_mips64 +#define helper_mtc0_pagemask helper_mtc0_pagemask_mips64 +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64 +#define helper_mtc0_segctl0 helper_mtc0_segctl0_mips64 +#define helper_mtc0_segctl1 helper_mtc0_segctl1_mips64 +#define helper_mtc0_segctl2 helper_mtc0_segctl2_mips64 +#define helper_mtc0_pwfield helper_mtc0_pwfield_mips64 +#define helper_mtc0_pwsize helper_mtc0_pwsize_mips64 +#define helper_mtc0_wired helper_mtc0_wired_mips64 +#define helper_mtc0_pwctl helper_mtc0_pwctl_mips64 +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64 +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64 +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64 +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64 +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64 +#define helper_mtc0_hwrena helper_mtc0_hwrena_mips64 +#define helper_mtc0_count helper_mtc0_count_mips64 +#define helper_mtc0_saari helper_mtc0_saari_mips64 +#define helper_mtc0_saar helper_mtc0_saar_mips64 +#define helper_mthc0_saar helper_mthc0_saar_mips64 +#define helper_mtc0_entryhi helper_mtc0_entryhi_mips64 +#define helper_mttc0_entryhi helper_mttc0_entryhi_mips64 +#define helper_mtc0_compare helper_mtc0_compare_mips64 +#define helper_mtc0_status helper_mtc0_status_mips64 +#define helper_mttc0_status helper_mttc0_status_mips64 +#define helper_mtc0_intctl helper_mtc0_intctl_mips64 +#define helper_mtc0_srsctl helper_mtc0_srsctl_mips64 +#define helper_mtc0_cause helper_mtc0_cause_mips64 +#define helper_mttc0_cause helper_mttc0_cause_mips64 +#define helper_mftc0_epc helper_mftc0_epc_mips64 +#define helper_mftc0_ebase helper_mftc0_ebase_mips64 +#define helper_mtc0_ebase helper_mtc0_ebase_mips64 +#define helper_mttc0_ebase helper_mttc0_ebase_mips64 +#define helper_mftc0_configx helper_mftc0_configx_mips64 +#define helper_mtc0_config0 helper_mtc0_config0_mips64 +#define helper_mtc0_config2 helper_mtc0_config2_mips64 +#define helper_mtc0_config3 helper_mtc0_config3_mips64 +#define helper_mtc0_config4 helper_mtc0_config4_mips64 +#define helper_mtc0_config5 helper_mtc0_config5_mips64 +#define helper_mtc0_lladdr helper_mtc0_lladdr_mips64 +#define helper_mtc0_maar helper_mtc0_maar_mips64 +#define helper_mthc0_maar helper_mthc0_maar_mips64 +#define helper_mtc0_maari helper_mtc0_maari_mips64 +#define helper_mtc0_watchlo helper_mtc0_watchlo_mips64 +#define helper_mtc0_watchhi helper_mtc0_watchhi_mips64 +#define helper_mthc0_watchhi helper_mthc0_watchhi_mips64 +#define helper_mtc0_xcontext helper_mtc0_xcontext_mips64 +#define helper_mtc0_framemask helper_mtc0_framemask_mips64 +#define helper_mtc0_debug helper_mtc0_debug_mips64 +#define helper_mttc0_debug helper_mttc0_debug_mips64 +#define helper_mtc0_performance0 helper_mtc0_performance0_mips64 +#define helper_mtc0_errctl helper_mtc0_errctl_mips64 +#define helper_mtc0_taglo helper_mtc0_taglo_mips64 +#define helper_mtc0_datalo helper_mtc0_datalo_mips64 +#define helper_mtc0_taghi helper_mtc0_taghi_mips64 +#define helper_mtc0_datahi helper_mtc0_datahi_mips64 +#define helper_mftgpr helper_mftgpr_mips64 +#define helper_mftlo helper_mftlo_mips64 +#define helper_mfthi helper_mfthi_mips64 +#define helper_mftacx helper_mftacx_mips64 +#define helper_mftdsp helper_mftdsp_mips64 +#define helper_mttgpr helper_mttgpr_mips64 +#define helper_mttlo helper_mttlo_mips64 +#define helper_mtthi helper_mtthi_mips64 +#define helper_mttacx helper_mttacx_mips64 +#define helper_mttdsp helper_mttdsp_mips64 +#define helper_dmt helper_dmt_mips64 +#define helper_emt helper_emt_mips64 +#define helper_dvpe helper_dvpe_mips64 +#define helper_evpe helper_evpe_mips64 +#define helper_dvp helper_dvp_mips64 +#define helper_evp helper_evp_mips64 #define cpu_mips_get_random cpu_mips_get_random_mips64 -#define cpu_mips_get_count cpu_mips_get_count_mips64 -#define cpu_mips_store_count cpu_mips_store_count_mips64 -#define cpu_mips_store_compare cpu_mips_store_compare_mips64 -#define cpu_mips_start_count cpu_mips_start_count_mips64 -#define cpu_mips_stop_count cpu_mips_stop_count_mips64 -#define mips_machine_init mips_machine_init_mips64 -#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mips64 -#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mips64 -#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mips64 -#define mips_cpu_register_types mips_cpu_register_types_mips64 #define cpu_mips_init cpu_mips_init_mips64 -#define cpu_state_reset cpu_state_reset_mips64 -#define helper_msa_andi_b helper_msa_andi_b_mips64 -#define helper_msa_ori_b helper_msa_ori_b_mips64 -#define helper_msa_nori_b helper_msa_nori_b_mips64 -#define helper_msa_xori_b helper_msa_xori_b_mips64 -#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64 -#define helper_msa_bmzi_b helper_msa_bmzi_b_mips64 -#define helper_msa_bseli_b helper_msa_bseli_b_mips64 -#define helper_msa_shf_df helper_msa_shf_df_mips64 -#define helper_msa_and_v helper_msa_and_v_mips64 -#define helper_msa_or_v helper_msa_or_v_mips64 -#define helper_msa_nor_v helper_msa_nor_v_mips64 -#define helper_msa_xor_v helper_msa_xor_v_mips64 -#define helper_msa_bmnz_v helper_msa_bmnz_v_mips64 -#define helper_msa_bmz_v helper_msa_bmz_v_mips64 -#define helper_msa_bsel_v helper_msa_bsel_v_mips64 -#define helper_msa_addvi_df helper_msa_addvi_df_mips64 -#define helper_msa_subvi_df helper_msa_subvi_df_mips64 -#define helper_msa_ceqi_df helper_msa_ceqi_df_mips64 -#define helper_msa_clei_s_df helper_msa_clei_s_df_mips64 -#define helper_msa_clei_u_df helper_msa_clei_u_df_mips64 -#define helper_msa_clti_s_df helper_msa_clti_s_df_mips64 -#define helper_msa_clti_u_df helper_msa_clti_u_df_mips64 -#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64 -#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64 -#define helper_msa_mini_s_df helper_msa_mini_s_df_mips64 -#define helper_msa_mini_u_df helper_msa_mini_u_df_mips64 -#define helper_msa_ldi_df helper_msa_ldi_df_mips64 -#define helper_msa_slli_df helper_msa_slli_df_mips64 -#define helper_msa_srai_df helper_msa_srai_df_mips64 -#define helper_msa_srli_df helper_msa_srli_df_mips64 -#define helper_msa_bclri_df helper_msa_bclri_df_mips64 -#define helper_msa_bseti_df helper_msa_bseti_df_mips64 -#define helper_msa_bnegi_df helper_msa_bnegi_df_mips64 -#define helper_msa_sat_s_df helper_msa_sat_s_df_mips64 -#define helper_msa_sat_u_df helper_msa_sat_u_df_mips64 -#define helper_msa_srari_df helper_msa_srari_df_mips64 -#define helper_msa_srlri_df helper_msa_srlri_df_mips64 -#define helper_msa_binsli_df helper_msa_binsli_df_mips64 -#define helper_msa_binsri_df helper_msa_binsri_df_mips64 -#define helper_msa_sll_df helper_msa_sll_df_mips64 -#define helper_msa_sra_df helper_msa_sra_df_mips64 -#define helper_msa_srl_df helper_msa_srl_df_mips64 -#define helper_msa_bclr_df helper_msa_bclr_df_mips64 -#define helper_msa_bset_df helper_msa_bset_df_mips64 -#define helper_msa_bneg_df helper_msa_bneg_df_mips64 -#define helper_msa_addv_df helper_msa_addv_df_mips64 -#define helper_msa_subv_df helper_msa_subv_df_mips64 -#define helper_msa_max_s_df helper_msa_max_s_df_mips64 -#define helper_msa_max_u_df helper_msa_max_u_df_mips64 -#define helper_msa_min_s_df helper_msa_min_s_df_mips64 -#define helper_msa_min_u_df helper_msa_min_u_df_mips64 -#define helper_msa_max_a_df helper_msa_max_a_df_mips64 -#define helper_msa_min_a_df helper_msa_min_a_df_mips64 -#define helper_msa_ceq_df helper_msa_ceq_df_mips64 -#define helper_msa_clt_s_df helper_msa_clt_s_df_mips64 -#define helper_msa_clt_u_df helper_msa_clt_u_df_mips64 -#define helper_msa_cle_s_df helper_msa_cle_s_df_mips64 -#define helper_msa_cle_u_df helper_msa_cle_u_df_mips64 -#define helper_msa_add_a_df helper_msa_add_a_df_mips64 -#define helper_msa_adds_a_df helper_msa_adds_a_df_mips64 -#define helper_msa_adds_s_df helper_msa_adds_s_df_mips64 -#define helper_msa_adds_u_df helper_msa_adds_u_df_mips64 -#define helper_msa_ave_s_df helper_msa_ave_s_df_mips64 -#define helper_msa_ave_u_df helper_msa_ave_u_df_mips64 -#define helper_msa_aver_s_df helper_msa_aver_s_df_mips64 -#define helper_msa_aver_u_df helper_msa_aver_u_df_mips64 -#define helper_msa_subs_s_df helper_msa_subs_s_df_mips64 -#define helper_msa_subs_u_df helper_msa_subs_u_df_mips64 -#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64 -#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64 -#define helper_msa_asub_s_df helper_msa_asub_s_df_mips64 -#define helper_msa_asub_u_df helper_msa_asub_u_df_mips64 -#define helper_msa_mulv_df helper_msa_mulv_df_mips64 -#define helper_msa_div_s_df helper_msa_div_s_df_mips64 -#define helper_msa_div_u_df helper_msa_div_u_df_mips64 -#define helper_msa_mod_s_df helper_msa_mod_s_df_mips64 -#define helper_msa_mod_u_df helper_msa_mod_u_df_mips64 -#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64 -#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64 -#define helper_msa_srar_df helper_msa_srar_df_mips64 -#define helper_msa_srlr_df helper_msa_srlr_df_mips64 -#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mips64 -#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mips64 -#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mips64 -#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mips64 -#define helper_msa_mul_q_df helper_msa_mul_q_df_mips64 -#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64 -#define helper_msa_sld_df helper_msa_sld_df_mips64 -#define helper_msa_maddv_df helper_msa_maddv_df_mips64 -#define helper_msa_msubv_df helper_msa_msubv_df_mips64 -#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64 -#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64 -#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64 -#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64 -#define helper_msa_binsl_df helper_msa_binsl_df_mips64 -#define helper_msa_binsr_df helper_msa_binsr_df_mips64 -#define helper_msa_madd_q_df helper_msa_madd_q_df_mips64 -#define helper_msa_msub_q_df helper_msa_msub_q_df_mips64 -#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64 -#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64 -#define helper_msa_splat_df helper_msa_splat_df_mips64 -#define helper_msa_pckev_df helper_msa_pckev_df_mips64 -#define helper_msa_pckod_df helper_msa_pckod_df_mips64 -#define helper_msa_ilvl_df helper_msa_ilvl_df_mips64 -#define helper_msa_ilvr_df helper_msa_ilvr_df_mips64 -#define helper_msa_ilvev_df helper_msa_ilvev_df_mips64 -#define helper_msa_ilvod_df helper_msa_ilvod_df_mips64 -#define helper_msa_vshf_df helper_msa_vshf_df_mips64 -#define helper_msa_sldi_df helper_msa_sldi_df_mips64 -#define helper_msa_splati_df helper_msa_splati_df_mips64 -#define helper_msa_copy_s_df helper_msa_copy_s_df_mips64 -#define helper_msa_copy_u_df helper_msa_copy_u_df_mips64 -#define helper_msa_insert_df helper_msa_insert_df_mips64 -#define helper_msa_insve_df helper_msa_insve_df_mips64 -#define helper_msa_ctcmsa helper_msa_ctcmsa_mips64 -#define helper_msa_cfcmsa helper_msa_cfcmsa_mips64 -#define helper_msa_move_v helper_msa_move_v_mips64 -#define helper_msa_fill_df helper_msa_fill_df_mips64 -#define helper_msa_nlzc_df helper_msa_nlzc_df_mips64 -#define helper_msa_nloc_df helper_msa_nloc_df_mips64 -#define helper_msa_pcnt_df helper_msa_pcnt_df_mips64 -#define helper_msa_fcaf_df helper_msa_fcaf_df_mips64 -#define helper_msa_fcun_df helper_msa_fcun_df_mips64 -#define helper_msa_fceq_df helper_msa_fceq_df_mips64 -#define helper_msa_fcueq_df helper_msa_fcueq_df_mips64 -#define helper_msa_fclt_df helper_msa_fclt_df_mips64 -#define helper_msa_fcult_df helper_msa_fcult_df_mips64 -#define helper_msa_fcle_df helper_msa_fcle_df_mips64 -#define helper_msa_fcule_df helper_msa_fcule_df_mips64 -#define helper_msa_fsaf_df helper_msa_fsaf_df_mips64 -#define helper_msa_fsun_df helper_msa_fsun_df_mips64 -#define helper_msa_fseq_df helper_msa_fseq_df_mips64 -#define helper_msa_fsueq_df helper_msa_fsueq_df_mips64 -#define helper_msa_fslt_df helper_msa_fslt_df_mips64 -#define helper_msa_fsult_df helper_msa_fsult_df_mips64 -#define helper_msa_fsle_df helper_msa_fsle_df_mips64 -#define helper_msa_fsule_df helper_msa_fsule_df_mips64 -#define helper_msa_fcor_df helper_msa_fcor_df_mips64 -#define helper_msa_fcune_df helper_msa_fcune_df_mips64 -#define helper_msa_fcne_df helper_msa_fcne_df_mips64 -#define helper_msa_fsor_df helper_msa_fsor_df_mips64 -#define helper_msa_fsune_df helper_msa_fsune_df_mips64 -#define helper_msa_fsne_df helper_msa_fsne_df_mips64 -#define helper_msa_fadd_df helper_msa_fadd_df_mips64 -#define helper_msa_fsub_df helper_msa_fsub_df_mips64 -#define helper_msa_fmul_df helper_msa_fmul_df_mips64 -#define helper_msa_fdiv_df helper_msa_fdiv_df_mips64 -#define helper_msa_fmadd_df helper_msa_fmadd_df_mips64 -#define helper_msa_fmsub_df helper_msa_fmsub_df_mips64 -#define helper_msa_fexp2_df helper_msa_fexp2_df_mips64 -#define helper_msa_fexdo_df helper_msa_fexdo_df_mips64 -#define helper_msa_ftq_df helper_msa_ftq_df_mips64 -#define helper_msa_fmin_df helper_msa_fmin_df_mips64 -#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64 -#define helper_msa_fmax_df helper_msa_fmax_df_mips64 -#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64 -#define helper_msa_fclass_df helper_msa_fclass_df_mips64 -#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64 -#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64 -#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64 -#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64 -#define helper_msa_frcp_df helper_msa_frcp_df_mips64 -#define helper_msa_frint_df helper_msa_frint_df_mips64 -#define helper_msa_flog2_df helper_msa_flog2_df_mips64 -#define helper_msa_fexupl_df helper_msa_fexupl_df_mips64 -#define helper_msa_fexupr_df helper_msa_fexupr_df_mips64 -#define helper_msa_ffql_df helper_msa_ffql_df_mips64 -#define helper_msa_ffqr_df helper_msa_ffqr_df_mips64 -#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64 -#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64 -#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64 -#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64 -#define helper_paddsb helper_paddsb_mips64 -#define helper_paddusb helper_paddusb_mips64 -#define helper_paddsh helper_paddsh_mips64 -#define helper_paddush helper_paddush_mips64 -#define helper_paddb helper_paddb_mips64 -#define helper_paddh helper_paddh_mips64 -#define helper_paddw helper_paddw_mips64 -#define helper_psubsb helper_psubsb_mips64 -#define helper_psubusb helper_psubusb_mips64 -#define helper_psubsh helper_psubsh_mips64 -#define helper_psubush helper_psubush_mips64 -#define helper_psubb helper_psubb_mips64 -#define helper_psubh helper_psubh_mips64 -#define helper_psubw helper_psubw_mips64 -#define helper_pshufh helper_pshufh_mips64 -#define helper_packsswh helper_packsswh_mips64 -#define helper_packsshb helper_packsshb_mips64 -#define helper_packushb helper_packushb_mips64 -#define helper_punpcklwd helper_punpcklwd_mips64 -#define helper_punpckhwd helper_punpckhwd_mips64 -#define helper_punpcklhw helper_punpcklhw_mips64 -#define helper_punpckhhw helper_punpckhhw_mips64 -#define helper_punpcklbh helper_punpcklbh_mips64 -#define helper_punpckhbh helper_punpckhbh_mips64 -#define helper_pavgh helper_pavgh_mips64 -#define helper_pavgb helper_pavgb_mips64 -#define helper_pmaxsh helper_pmaxsh_mips64 -#define helper_pminsh helper_pminsh_mips64 -#define helper_pmaxub helper_pmaxub_mips64 -#define helper_pminub helper_pminub_mips64 -#define helper_pcmpeqw helper_pcmpeqw_mips64 -#define helper_pcmpgtw helper_pcmpgtw_mips64 -#define helper_pcmpeqh helper_pcmpeqh_mips64 -#define helper_pcmpgth helper_pcmpgth_mips64 -#define helper_pcmpeqb helper_pcmpeqb_mips64 -#define helper_pcmpgtb helper_pcmpgtb_mips64 -#define helper_psllw helper_psllw_mips64 -#define helper_psrlw helper_psrlw_mips64 -#define helper_psraw helper_psraw_mips64 -#define helper_psllh helper_psllh_mips64 -#define helper_psrlh helper_psrlh_mips64 -#define helper_psrah helper_psrah_mips64 -#define helper_pmullh helper_pmullh_mips64 -#define helper_pmulhh helper_pmulhh_mips64 -#define helper_pmulhuh helper_pmulhuh_mips64 -#define helper_pmaddhw helper_pmaddhw_mips64 -#define helper_pasubub helper_pasubub_mips64 -#define helper_biadd helper_biadd_mips64 -#define helper_pmovmskb helper_pmovmskb_mips64 #define helper_absq_s_ph helper_absq_s_ph_mips64 #define helper_absq_s_qb helper_absq_s_qb_mips64 #define helper_absq_s_w helper_absq_s_w_mips64 +#define helper_absq_s_ob helper_absq_s_ob_mips64 +#define helper_absq_s_qh helper_absq_s_qh_mips64 +#define helper_absq_s_pw helper_absq_s_pw_mips64 #define helper_addqh_ph helper_addqh_ph_mips64 #define helper_addqh_r_ph helper_addqh_r_ph_mips64 #define helper_addqh_r_w helper_addqh_r_w_mips64 @@ -3279,35 +1450,89 @@ #define helper_subu_qb helper_subu_qb_mips64 #define helper_subu_s_ph helper_subu_s_ph_mips64 #define helper_subu_s_qb helper_subu_s_qb_mips64 +#define helper_adduh_ob helper_adduh_ob_mips64 +#define helper_adduh_r_ob helper_adduh_r_ob_mips64 +#define helper_subuh_ob helper_subuh_ob_mips64 +#define helper_subuh_r_ob helper_subuh_r_ob_mips64 +#define helper_addq_pw helper_addq_pw_mips64 +#define helper_addq_qh helper_addq_qh_mips64 +#define helper_addq_s_pw helper_addq_s_pw_mips64 +#define helper_addq_s_qh helper_addq_s_qh_mips64 +#define helper_addu_ob helper_addu_ob_mips64 +#define helper_addu_qh helper_addu_qh_mips64 +#define helper_addu_s_ob helper_addu_s_ob_mips64 +#define helper_addu_s_qh helper_addu_s_qh_mips64 +#define helper_subq_pw helper_subq_pw_mips64 +#define helper_subq_qh helper_subq_qh_mips64 +#define helper_subq_s_pw helper_subq_s_pw_mips64 +#define helper_subq_s_qh helper_subq_s_qh_mips64 +#define helper_subu_ob helper_subu_ob_mips64 +#define helper_subu_qh helper_subu_qh_mips64 +#define helper_subu_s_ob helper_subu_s_ob_mips64 +#define helper_subu_s_qh helper_subu_s_qh_mips64 #define helper_subuh_qb helper_subuh_qb_mips64 #define helper_subuh_r_qb helper_subuh_r_qb_mips64 #define helper_addsc helper_addsc_mips64 #define helper_addwc helper_addwc_mips64 #define helper_modsub helper_modsub_mips64 #define helper_raddu_w_qb helper_raddu_w_qb_mips64 +#define helper_raddu_l_ob helper_raddu_l_ob_mips64 #define helper_precr_qb_ph helper_precr_qb_ph_mips64 #define helper_precrq_qb_ph helper_precrq_qb_ph_mips64 #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips64 #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips64 #define helper_precrq_ph_w helper_precrq_ph_w_mips64 #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips64 +#define helper_precr_ob_qh helper_precr_ob_qh_mips64 +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64 +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64 +#define helper_precrq_ob_qh helper_precrq_ob_qh_mips64 +#define helper_precrq_qh_pw helper_precrq_qh_pw_mips64 +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64 +#define helper_precrq_pw_l helper_precrq_pw_l_mips64 #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips64 +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64 +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64 +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64 +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64 +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64 #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips64 #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips64 #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips64 #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips64 +#define helper_precequ_qh_obl helper_precequ_qh_obl_mips64 +#define helper_precequ_qh_obr helper_precequ_qh_obr_mips64 +#define helper_precequ_qh_obla helper_precequ_qh_obla_mips64 +#define helper_precequ_qh_obra helper_precequ_qh_obra_mips64 #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips64 #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips64 #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips64 #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips64 +#define helper_preceu_qh_obl helper_preceu_qh_obl_mips64 +#define helper_preceu_qh_obr helper_preceu_qh_obr_mips64 +#define helper_preceu_qh_obla helper_preceu_qh_obla_mips64 +#define helper_preceu_qh_obra helper_preceu_qh_obra_mips64 #define helper_shll_qb helper_shll_qb_mips64 #define helper_shrl_qb helper_shrl_qb_mips64 #define helper_shra_qb helper_shra_qb_mips64 #define helper_shra_r_qb helper_shra_r_qb_mips64 +#define helper_shll_ob helper_shll_ob_mips64 +#define helper_shrl_ob helper_shrl_ob_mips64 +#define helper_shra_ob helper_shra_ob_mips64 +#define helper_shra_r_ob helper_shra_r_ob_mips64 #define helper_shll_ph helper_shll_ph_mips64 #define helper_shll_s_ph helper_shll_s_ph_mips64 +#define helper_shll_qh helper_shll_qh_mips64 +#define helper_shll_s_qh helper_shll_s_qh_mips64 +#define helper_shrl_qh helper_shrl_qh_mips64 +#define helper_shra_qh helper_shra_qh_mips64 +#define helper_shra_r_qh helper_shra_r_qh_mips64 #define helper_shll_s_w helper_shll_s_w_mips64 #define helper_shra_r_w helper_shra_r_w_mips64 +#define helper_shll_pw helper_shll_pw_mips64 +#define helper_shll_s_pw helper_shll_s_pw_mips64 +#define helper_shra_pw helper_shra_pw_mips64 +#define helper_shra_r_pw helper_shra_r_pw_mips64 #define helper_shrl_ph helper_shrl_ph_mips64 #define helper_shra_ph helper_shra_ph_mips64 #define helper_shra_r_ph helper_shra_r_ph_mips64 @@ -3321,10 +1546,20 @@ #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips64 #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips64 #define helper_mulsa_w_ph helper_mulsa_w_ph_mips64 +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64 +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64 +#define helper_mulq_rs_qh helper_mulq_rs_qh_mips64 +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64 +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64 +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64 #define helper_dpau_h_qbl helper_dpau_h_qbl_mips64 #define helper_dpau_h_qbr helper_dpau_h_qbr_mips64 #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips64 #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips64 +#define helper_dpau_h_obl helper_dpau_h_obl_mips64 +#define helper_dpau_h_obr helper_dpau_h_obr_mips64 +#define helper_dpsu_h_obl helper_dpsu_h_obl_mips64 +#define helper_dpsu_h_obr helper_dpsu_h_obr_mips64 #define helper_dpa_w_ph helper_dpa_w_ph_mips64 #define helper_dpax_w_ph helper_dpax_w_ph_mips64 #define helper_dps_w_ph helper_dps_w_ph_mips64 @@ -3335,200 +1570,92 @@ #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips64 #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips64 #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips64 +#define helper_dpa_w_qh helper_dpa_w_qh_mips64 +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64 +#define helper_dps_w_qh helper_dps_w_qh_mips64 +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64 #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips64 #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips64 +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64 +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64 +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64 #define helper_maq_s_w_phl helper_maq_s_w_phl_mips64 #define helper_maq_s_w_phr helper_maq_s_w_phr_mips64 #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips64 #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips64 #define helper_mulq_s_w helper_mulq_s_w_mips64 #define helper_mulq_rs_w helper_mulq_rs_w_mips64 +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64 +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64 +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64 +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64 +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64 +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64 +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64 +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64 +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64 +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64 +#define helper_dmadd helper_dmadd_mips64 +#define helper_dmaddu helper_dmaddu_mips64 +#define helper_dmsub helper_dmsub_mips64 +#define helper_dmsubu helper_dmsubu_mips64 #define helper_bitrev helper_bitrev_mips64 #define helper_insv helper_insv_mips64 +#define helper_dinsv helper_dinsv_mips64 #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips64 #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips64 #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips64 +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64 +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64 +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64 #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips64 #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips64 #define helper_cmpu_le_qb helper_cmpu_le_qb_mips64 #define helper_cmp_eq_ph helper_cmp_eq_ph_mips64 #define helper_cmp_lt_ph helper_cmp_lt_ph_mips64 #define helper_cmp_le_ph helper_cmp_le_ph_mips64 +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64 +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64 +#define helper_cmpu_le_ob helper_cmpu_le_ob_mips64 +#define helper_cmp_eq_qh helper_cmp_eq_qh_mips64 +#define helper_cmp_lt_qh helper_cmp_lt_qh_mips64 +#define helper_cmp_le_qh helper_cmp_le_qh_mips64 +#define helper_cmp_eq_pw helper_cmp_eq_pw_mips64 +#define helper_cmp_lt_pw helper_cmp_lt_pw_mips64 +#define helper_cmp_le_pw helper_cmp_le_pw_mips64 +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64 +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64 +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64 #define helper_pick_qb helper_pick_qb_mips64 #define helper_pick_ph helper_pick_ph_mips64 +#define helper_pick_ob helper_pick_ob_mips64 +#define helper_pick_qh helper_pick_qh_mips64 +#define helper_pick_pw helper_pick_pw_mips64 #define helper_packrl_ph helper_packrl_ph_mips64 +#define helper_packrl_pw helper_packrl_pw_mips64 #define helper_extr_w helper_extr_w_mips64 #define helper_extr_r_w helper_extr_r_w_mips64 #define helper_extr_rs_w helper_extr_rs_w_mips64 +#define helper_dextr_w helper_dextr_w_mips64 +#define helper_dextr_r_w helper_dextr_r_w_mips64 +#define helper_dextr_rs_w helper_dextr_rs_w_mips64 +#define helper_dextr_l helper_dextr_l_mips64 +#define helper_dextr_r_l helper_dextr_r_l_mips64 +#define helper_dextr_rs_l helper_dextr_rs_l_mips64 #define helper_extr_s_h helper_extr_s_h_mips64 +#define helper_dextr_s_h helper_dextr_s_h_mips64 #define helper_extp helper_extp_mips64 #define helper_extpdp helper_extpdp_mips64 +#define helper_dextp helper_dextp_mips64 +#define helper_dextpdp helper_dextpdp_mips64 #define helper_shilo helper_shilo_mips64 +#define helper_dshilo helper_dshilo_mips64 #define helper_mthlip helper_mthlip_mips64 +#define helper_dmthlip helper_dmthlip_mips64 #define cpu_wrdsp cpu_wrdsp_mips64 #define helper_wrdsp helper_wrdsp_mips64 #define cpu_rddsp cpu_rddsp_mips64 #define helper_rddsp helper_rddsp_mips64 -#define helper_raise_exception_err helper_raise_exception_err_mips64 -#define helper_clo helper_clo_mips64 -#define helper_clz helper_clz_mips64 -#define helper_muls helper_muls_mips64 -#define helper_mulsu helper_mulsu_mips64 -#define helper_macc helper_macc_mips64 -#define helper_macchi helper_macchi_mips64 -#define helper_maccu helper_maccu_mips64 -#define helper_macchiu helper_macchiu_mips64 -#define helper_msac helper_msac_mips64 -#define helper_msachi helper_msachi_mips64 -#define helper_msacu helper_msacu_mips64 -#define helper_msachiu helper_msachiu_mips64 -#define helper_mulhi helper_mulhi_mips64 -#define helper_mulhiu helper_mulhiu_mips64 -#define helper_mulshi helper_mulshi_mips64 -#define helper_mulshiu helper_mulshiu_mips64 -#define helper_bitswap helper_bitswap_mips64 -#define helper_ll helper_ll_mips64 -#define helper_sc helper_sc_mips64 -#define helper_swl helper_swl_mips64 -#define helper_swr helper_swr_mips64 -#define helper_lwm helper_lwm_mips64 -#define helper_swm helper_swm_mips64 -#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64 -#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64 -#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64 -#define helper_mfc0_random helper_mfc0_random_mips64 -#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64 -#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64 -#define helper_mfc0_tcbind helper_mfc0_tcbind_mips64 -#define helper_mftc0_tcbind helper_mftc0_tcbind_mips64 -#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64 -#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64 -#define helper_mfc0_tchalt helper_mfc0_tchalt_mips64 -#define helper_mftc0_tchalt helper_mftc0_tchalt_mips64 -#define helper_mfc0_tccontext helper_mfc0_tccontext_mips64 -#define helper_mftc0_tccontext helper_mftc0_tccontext_mips64 -#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64 -#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64 -#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64 -#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64 -#define helper_mfc0_count helper_mfc0_count_mips64 -#define helper_mftc0_entryhi helper_mftc0_entryhi_mips64 -#define helper_mftc0_cause helper_mftc0_cause_mips64 -#define helper_mftc0_status helper_mftc0_status_mips64 -#define helper_mfc0_lladdr helper_mfc0_lladdr_mips64 -#define helper_mfc0_watchlo helper_mfc0_watchlo_mips64 -#define helper_mfc0_watchhi helper_mfc0_watchhi_mips64 -#define helper_mfc0_debug helper_mfc0_debug_mips64 -#define helper_mftc0_debug helper_mftc0_debug_mips64 -#define helper_mtc0_index helper_mtc0_index_mips64 -#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64 -#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64 -#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64 -#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64 -#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64 -#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64 -#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64 -#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64 -#define helper_mtc0_yqmask helper_mtc0_yqmask_mips64 -#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64 -#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64 -#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64 -#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64 -#define helper_mtc0_tcbind helper_mtc0_tcbind_mips64 -#define helper_mttc0_tcbind helper_mttc0_tcbind_mips64 -#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64 -#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64 -#define helper_mtc0_tchalt helper_mtc0_tchalt_mips64 -#define helper_mttc0_tchalt helper_mttc0_tchalt_mips64 -#define helper_mtc0_tccontext helper_mtc0_tccontext_mips64 -#define helper_mttc0_tccontext helper_mttc0_tccontext_mips64 -#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64 -#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64 -#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64 -#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64 -#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64 -#define helper_mtc0_context helper_mtc0_context_mips64 -#define helper_mtc0_pagemask helper_mtc0_pagemask_mips64 -#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64 -#define helper_mtc0_wired helper_mtc0_wired_mips64 -#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64 -#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64 -#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64 -#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64 -#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64 -#define helper_mtc0_hwrena helper_mtc0_hwrena_mips64 -#define helper_mtc0_count helper_mtc0_count_mips64 -#define helper_mtc0_entryhi helper_mtc0_entryhi_mips64 -#define helper_mttc0_entryhi helper_mttc0_entryhi_mips64 -#define helper_mtc0_compare helper_mtc0_compare_mips64 -#define helper_mtc0_status helper_mtc0_status_mips64 -#define helper_mttc0_status helper_mttc0_status_mips64 -#define helper_mtc0_intctl helper_mtc0_intctl_mips64 -#define helper_mtc0_srsctl helper_mtc0_srsctl_mips64 -#define helper_mtc0_cause helper_mtc0_cause_mips64 -#define helper_mttc0_cause helper_mttc0_cause_mips64 -#define helper_mftc0_epc helper_mftc0_epc_mips64 -#define helper_mftc0_ebase helper_mftc0_ebase_mips64 -#define helper_mtc0_ebase helper_mtc0_ebase_mips64 -#define helper_mttc0_ebase helper_mttc0_ebase_mips64 -#define helper_mftc0_configx helper_mftc0_configx_mips64 -#define helper_mtc0_config0 helper_mtc0_config0_mips64 -#define helper_mtc0_config2 helper_mtc0_config2_mips64 -#define helper_mtc0_config4 helper_mtc0_config4_mips64 -#define helper_mtc0_config5 helper_mtc0_config5_mips64 -#define helper_mtc0_lladdr helper_mtc0_lladdr_mips64 -#define helper_mtc0_watchlo helper_mtc0_watchlo_mips64 -#define helper_mtc0_watchhi helper_mtc0_watchhi_mips64 -#define helper_mtc0_xcontext helper_mtc0_xcontext_mips64 -#define helper_mtc0_framemask helper_mtc0_framemask_mips64 -#define helper_mtc0_debug helper_mtc0_debug_mips64 -#define helper_mttc0_debug helper_mttc0_debug_mips64 -#define helper_mtc0_performance0 helper_mtc0_performance0_mips64 -#define helper_mtc0_taglo helper_mtc0_taglo_mips64 -#define helper_mtc0_datalo helper_mtc0_datalo_mips64 -#define helper_mtc0_taghi helper_mtc0_taghi_mips64 -#define helper_mtc0_datahi helper_mtc0_datahi_mips64 -#define helper_mftgpr helper_mftgpr_mips64 -#define helper_mftlo helper_mftlo_mips64 -#define helper_mfthi helper_mfthi_mips64 -#define helper_mftacx helper_mftacx_mips64 -#define helper_mftdsp helper_mftdsp_mips64 -#define helper_mttgpr helper_mttgpr_mips64 -#define helper_mttlo helper_mttlo_mips64 -#define helper_mtthi helper_mtthi_mips64 -#define helper_mttacx helper_mttacx_mips64 -#define helper_mttdsp helper_mttdsp_mips64 -#define helper_dmt helper_dmt_mips64 -#define helper_emt helper_emt_mips64 -#define helper_dvpe helper_dvpe_mips64 -#define helper_evpe helper_evpe_mips64 -#define helper_fork helper_fork_mips64 -#define helper_yield helper_yield_mips64 -#define r4k_helper_tlbinv r4k_helper_tlbinv_mips64 -#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64 -#define r4k_helper_tlbwi r4k_helper_tlbwi_mips64 -#define r4k_helper_tlbwr r4k_helper_tlbwr_mips64 -#define r4k_helper_tlbp r4k_helper_tlbp_mips64 -#define r4k_helper_tlbr r4k_helper_tlbr_mips64 -#define helper_tlbwi helper_tlbwi_mips64 -#define helper_tlbwr helper_tlbwr_mips64 -#define helper_tlbp helper_tlbp_mips64 -#define helper_tlbr helper_tlbr_mips64 -#define helper_tlbinv helper_tlbinv_mips64 -#define helper_tlbinvf helper_tlbinvf_mips64 -#define helper_di helper_di_mips64 -#define helper_ei helper_ei_mips64 -#define helper_eret helper_eret_mips64 -#define helper_deret helper_deret_mips64 -#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64 -#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64 -#define helper_rdhwr_cc helper_rdhwr_cc_mips64 -#define helper_rdhwr_ccres helper_rdhwr_ccres_mips64 -#define helper_pmon helper_pmon_mips64 -#define helper_wait helper_wait_mips64 -#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64 -#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mips64 -#define ieee_rm ieee_rm_mips64 #define helper_cfc1 helper_cfc1_mips64 #define helper_ctc1 helper_ctc1_mips64 #define ieee_ex_to_mips ieee_ex_to_mips_mips64 @@ -3537,8 +1664,8 @@ #define helper_float_cvtd_s helper_float_cvtd_s_mips64 #define helper_float_cvtd_w helper_float_cvtd_w_mips64 #define helper_float_cvtd_l helper_float_cvtd_l_mips64 -#define helper_float_cvtl_d helper_float_cvtl_d_mips64 -#define helper_float_cvtl_s helper_float_cvtl_s_mips64 +#define helper_float_cvt_l_d helper_float_cvt_l_d_mips64 +#define helper_float_cvt_l_s helper_float_cvt_l_s_mips64 #define helper_float_cvtps_pw helper_float_cvtps_pw_mips64 #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips64 #define helper_float_cvts_d helper_float_cvts_d_mips64 @@ -3546,46 +1673,50 @@ #define helper_float_cvts_l helper_float_cvts_l_mips64 #define helper_float_cvts_pl helper_float_cvts_pl_mips64 #define helper_float_cvts_pu helper_float_cvts_pu_mips64 -#define helper_float_cvtw_s helper_float_cvtw_s_mips64 -#define helper_float_cvtw_d helper_float_cvtw_d_mips64 -#define helper_float_roundl_d helper_float_roundl_d_mips64 -#define helper_float_roundl_s helper_float_roundl_s_mips64 -#define helper_float_roundw_d helper_float_roundw_d_mips64 -#define helper_float_roundw_s helper_float_roundw_s_mips64 -#define helper_float_truncl_d helper_float_truncl_d_mips64 -#define helper_float_truncl_s helper_float_truncl_s_mips64 -#define helper_float_truncw_d helper_float_truncw_d_mips64 -#define helper_float_truncw_s helper_float_truncw_s_mips64 -#define helper_float_ceill_d helper_float_ceill_d_mips64 -#define helper_float_ceill_s helper_float_ceill_s_mips64 -#define helper_float_ceilw_d helper_float_ceilw_d_mips64 -#define helper_float_ceilw_s helper_float_ceilw_s_mips64 -#define helper_float_floorl_d helper_float_floorl_d_mips64 -#define helper_float_floorl_s helper_float_floorl_s_mips64 -#define helper_float_floorw_d helper_float_floorw_d_mips64 -#define helper_float_floorw_s helper_float_floorw_s_mips64 +#define helper_float_cvt_w_s helper_float_cvt_w_s_mips64 +#define helper_float_cvt_w_d helper_float_cvt_w_d_mips64 +#define helper_float_round_l_d helper_float_round_l_d_mips64 +#define helper_float_round_l_s helper_float_round_l_s_mips64 +#define helper_float_round_w_d helper_float_round_w_d_mips64 +#define helper_float_round_w_s helper_float_round_w_s_mips64 +#define helper_float_trunc_l_d helper_float_trunc_l_d_mips64 +#define helper_float_trunc_l_s helper_float_trunc_l_s_mips64 +#define helper_float_trunc_w_d helper_float_trunc_w_d_mips64 +#define helper_float_trunc_w_s helper_float_trunc_w_s_mips64 +#define helper_float_ceil_l_d helper_float_ceil_l_d_mips64 +#define helper_float_ceil_l_s helper_float_ceil_l_s_mips64 +#define helper_float_ceil_w_d helper_float_ceil_w_d_mips64 +#define helper_float_ceil_w_s helper_float_ceil_w_s_mips64 +#define helper_float_floor_l_d helper_float_floor_l_d_mips64 +#define helper_float_floor_l_s helper_float_floor_l_s_mips64 +#define helper_float_floor_w_d helper_float_floor_w_d_mips64 +#define helper_float_floor_w_s helper_float_floor_w_s_mips64 +#define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mips64 +#define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mips64 +#define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mips64 +#define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mips64 +#define helper_float_round_2008_l_d helper_float_round_2008_l_d_mips64 +#define helper_float_round_2008_l_s helper_float_round_2008_l_s_mips64 +#define helper_float_round_2008_w_d helper_float_round_2008_w_d_mips64 +#define helper_float_round_2008_w_s helper_float_round_2008_w_s_mips64 +#define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mips64 +#define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mips64 +#define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mips64 +#define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mips64 +#define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mips64 +#define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mips64 +#define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mips64 +#define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mips64 +#define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mips64 +#define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mips64 +#define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mips64 +#define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mips64 #define helper_float_abs_d helper_float_abs_d_mips64 #define helper_float_abs_s helper_float_abs_s_mips64 #define helper_float_abs_ps helper_float_abs_ps_mips64 #define helper_float_chs_d helper_float_chs_d_mips64 #define helper_float_chs_s helper_float_chs_s_mips64 #define helper_float_chs_ps helper_float_chs_ps_mips64 -#define helper_float_maddf_s helper_float_maddf_s_mips64 -#define helper_float_maddf_d helper_float_maddf_d_mips64 -#define helper_float_msubf_s helper_float_msubf_s_mips64 -#define helper_float_msubf_d helper_float_msubf_d_mips64 -#define helper_float_max_s helper_float_max_s_mips64 -#define helper_float_max_d helper_float_max_d_mips64 -#define helper_float_maxa_s helper_float_maxa_s_mips64 -#define helper_float_maxa_d helper_float_maxa_d_mips64 -#define helper_float_min_s helper_float_min_s_mips64 -#define helper_float_min_d helper_float_min_d_mips64 -#define helper_float_mina_s helper_float_mina_s_mips64 -#define helper_float_mina_d helper_float_mina_d_mips64 -#define helper_float_rint_s helper_float_rint_s_mips64 -#define helper_float_rint_d helper_float_rint_d_mips64 -#define helper_float_class_s helper_float_class_s_mips64 -#define helper_float_class_d helper_float_class_d_mips64 #define helper_float_recip_d helper_float_recip_d_mips64 #define helper_float_recip_s helper_float_recip_s_mips64 #define helper_float_rsqrt_d helper_float_rsqrt_d_mips64 @@ -3596,6 +1727,12 @@ #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips64 #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips64 #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips64 +#define helper_float_rint_s helper_float_rint_s_mips64 +#define helper_float_rint_d helper_float_rint_d_mips64 +#define float_class_s float_class_s_mips64 +#define helper_float_class_s helper_float_class_s_mips64 +#define float_class_d float_class_d_mips64 +#define helper_float_class_d helper_float_class_d_mips64 #define helper_float_add_d helper_float_add_d_mips64 #define helper_float_add_s helper_float_add_s_mips64 #define helper_float_add_ps helper_float_add_ps_mips64 @@ -3608,6 +1745,22 @@ #define helper_float_div_d helper_float_div_d_mips64 #define helper_float_div_s helper_float_div_s_mips64 #define helper_float_div_ps helper_float_div_ps_mips64 +#define helper_float_recip2_d helper_float_recip2_d_mips64 +#define helper_float_recip2_s helper_float_recip2_s_mips64 +#define helper_float_recip2_ps helper_float_recip2_ps_mips64 +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64 +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64 +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64 +#define helper_float_addr_ps helper_float_addr_ps_mips64 +#define helper_float_mulr_ps helper_float_mulr_ps_mips64 +#define helper_float_max_s helper_float_max_s_mips64 +#define helper_float_max_d helper_float_max_d_mips64 +#define helper_float_maxa_s helper_float_maxa_s_mips64 +#define helper_float_maxa_d helper_float_maxa_d_mips64 +#define helper_float_min_s helper_float_min_s_mips64 +#define helper_float_min_d helper_float_min_d_mips64 +#define helper_float_mina_s helper_float_mina_s_mips64 +#define helper_float_mina_d helper_float_mina_d_mips64 #define helper_float_madd_d helper_float_madd_d_mips64 #define helper_float_madd_s helper_float_madd_s_mips64 #define helper_float_madd_ps helper_float_madd_ps_mips64 @@ -3620,14 +1773,10 @@ #define helper_float_nmsub_d helper_float_nmsub_d_mips64 #define helper_float_nmsub_s helper_float_nmsub_s_mips64 #define helper_float_nmsub_ps helper_float_nmsub_ps_mips64 -#define helper_float_recip2_d helper_float_recip2_d_mips64 -#define helper_float_recip2_s helper_float_recip2_s_mips64 -#define helper_float_recip2_ps helper_float_recip2_ps_mips64 -#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64 -#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64 -#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64 -#define helper_float_addr_ps helper_float_addr_ps_mips64 -#define helper_float_mulr_ps helper_float_mulr_ps_mips64 +#define helper_float_maddf_s helper_float_maddf_s_mips64 +#define helper_float_maddf_d helper_float_maddf_d_mips64 +#define helper_float_msubf_s helper_float_msubf_s_mips64 +#define helper_float_msubf_d helper_float_msubf_d_mips64 #define helper_cmp_d_f helper_cmp_d_f_mips64 #define helper_cmpabs_d_f helper_cmpabs_d_f_mips64 #define helper_cmp_d_un helper_cmp_d_un_mips64 @@ -3768,161 +1917,475 @@ #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips64 #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips64 #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips64 -#define helper_msa_ld_df helper_msa_ld_df_mips64 -#define helper_msa_st_df helper_msa_st_df_mips64 #define no_mmu_map_address no_mmu_map_address_mips64 #define fixed_mmu_map_address fixed_mmu_map_address_mips64 #define r4k_map_address r4k_map_address_mips64 +#define cpu_mips_tlb_flush cpu_mips_tlb_flush_mips64 +#define sync_c0_status sync_c0_status_mips64 +#define cpu_mips_store_status cpu_mips_store_status_mips64 +#define cpu_mips_store_cause cpu_mips_store_cause_mips64 #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips64 -#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mips64 +#define mips_cpu_tlb_fill mips_cpu_tlb_fill_mips64 #define cpu_mips_translate_address cpu_mips_translate_address_mips64 #define exception_resume_pc exception_resume_pc_mips64 #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips64 #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips64 #define r4k_invalidate_tlb r4k_invalidate_tlb_mips64 -#define helper_absq_s_ob helper_absq_s_ob_mips64 -#define helper_absq_s_qh helper_absq_s_qh_mips64 -#define helper_absq_s_pw helper_absq_s_pw_mips64 -#define helper_adduh_ob helper_adduh_ob_mips64 -#define helper_adduh_r_ob helper_adduh_r_ob_mips64 -#define helper_subuh_ob helper_subuh_ob_mips64 -#define helper_subuh_r_ob helper_subuh_r_ob_mips64 -#define helper_addq_pw helper_addq_pw_mips64 -#define helper_addq_qh helper_addq_qh_mips64 -#define helper_addq_s_pw helper_addq_s_pw_mips64 -#define helper_addq_s_qh helper_addq_s_qh_mips64 -#define helper_addu_ob helper_addu_ob_mips64 -#define helper_addu_qh helper_addu_qh_mips64 -#define helper_addu_s_ob helper_addu_s_ob_mips64 -#define helper_addu_s_qh helper_addu_s_qh_mips64 -#define helper_subq_pw helper_subq_pw_mips64 -#define helper_subq_qh helper_subq_qh_mips64 -#define helper_subq_s_pw helper_subq_s_pw_mips64 -#define helper_subq_s_qh helper_subq_s_qh_mips64 -#define helper_subu_ob helper_subu_ob_mips64 -#define helper_subu_qh helper_subu_qh_mips64 -#define helper_subu_s_ob helper_subu_s_ob_mips64 -#define helper_subu_s_qh helper_subu_s_qh_mips64 -#define helper_raddu_l_ob helper_raddu_l_ob_mips64 -#define helper_precr_ob_qh helper_precr_ob_qh_mips64 -#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64 -#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64 -#define helper_precrq_ob_qh helper_precrq_ob_qh_mips64 -#define helper_precrq_qh_pw helper_precrq_qh_pw_mips64 -#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64 -#define helper_precrq_pw_l helper_precrq_pw_l_mips64 -#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64 -#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64 -#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64 -#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64 -#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64 -#define helper_precequ_qh_obl helper_precequ_qh_obl_mips64 -#define helper_precequ_qh_obr helper_precequ_qh_obr_mips64 -#define helper_precequ_qh_obla helper_precequ_qh_obla_mips64 -#define helper_precequ_qh_obra helper_precequ_qh_obra_mips64 -#define helper_preceu_qh_obl helper_preceu_qh_obl_mips64 -#define helper_preceu_qh_obr helper_preceu_qh_obr_mips64 -#define helper_preceu_qh_obla helper_preceu_qh_obla_mips64 -#define helper_preceu_qh_obra helper_preceu_qh_obra_mips64 -#define helper_shll_ob helper_shll_ob_mips64 -#define helper_shrl_ob helper_shrl_ob_mips64 -#define helper_shra_ob helper_shra_ob_mips64 -#define helper_shra_r_ob helper_shra_r_ob_mips64 -#define helper_shll_qh helper_shll_qh_mips64 -#define helper_shll_s_qh helper_shll_s_qh_mips64 -#define helper_shrl_qh helper_shrl_qh_mips64 -#define helper_shra_qh helper_shra_qh_mips64 -#define helper_shra_r_qh helper_shra_r_qh_mips64 -#define helper_shll_pw helper_shll_pw_mips64 -#define helper_shll_s_pw helper_shll_s_pw_mips64 -#define helper_shra_pw helper_shra_pw_mips64 -#define helper_shra_r_pw helper_shra_r_pw_mips64 -#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64 -#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64 -#define helper_mulq_rs_qh helper_mulq_rs_qh_mips64 -#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64 -#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64 -#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64 -#define helper_dpau_h_obl helper_dpau_h_obl_mips64 -#define helper_dpau_h_obr helper_dpau_h_obr_mips64 -#define helper_dpsu_h_obl helper_dpsu_h_obl_mips64 -#define helper_dpsu_h_obr helper_dpsu_h_obr_mips64 -#define helper_dpa_w_qh helper_dpa_w_qh_mips64 -#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64 -#define helper_dps_w_qh helper_dps_w_qh_mips64 -#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64 -#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64 -#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64 -#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64 -#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64 -#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64 -#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64 -#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64 -#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64 -#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64 -#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64 -#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64 -#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64 -#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64 -#define helper_dmadd helper_dmadd_mips64 -#define helper_dmaddu helper_dmaddu_mips64 -#define helper_dmsub helper_dmsub_mips64 -#define helper_dmsubu helper_dmsubu_mips64 -#define helper_dinsv helper_dinsv_mips64 -#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64 -#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64 -#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64 -#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64 -#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64 -#define helper_cmpu_le_ob helper_cmpu_le_ob_mips64 -#define helper_cmp_eq_qh helper_cmp_eq_qh_mips64 -#define helper_cmp_lt_qh helper_cmp_lt_qh_mips64 -#define helper_cmp_le_qh helper_cmp_le_qh_mips64 -#define helper_cmp_eq_pw helper_cmp_eq_pw_mips64 -#define helper_cmp_lt_pw helper_cmp_lt_pw_mips64 -#define helper_cmp_le_pw helper_cmp_le_pw_mips64 -#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64 -#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64 -#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64 -#define helper_pick_ob helper_pick_ob_mips64 -#define helper_pick_qh helper_pick_qh_mips64 -#define helper_pick_pw helper_pick_pw_mips64 -#define helper_packrl_pw helper_packrl_pw_mips64 -#define helper_dextr_w helper_dextr_w_mips64 -#define helper_dextr_r_w helper_dextr_r_w_mips64 -#define helper_dextr_rs_w helper_dextr_rs_w_mips64 -#define helper_dextr_l helper_dextr_l_mips64 -#define helper_dextr_r_l helper_dextr_r_l_mips64 -#define helper_dextr_rs_l helper_dextr_rs_l_mips64 -#define helper_dextr_s_h helper_dextr_s_h_mips64 -#define helper_dextp helper_dextp_mips64 -#define helper_dextpdp helper_dextpdp_mips64 -#define helper_dshilo helper_dshilo_mips64 -#define helper_dmthlip helper_dmthlip_mips64 -#define helper_dclo helper_dclo_mips64 -#define helper_dclz helper_dclz_mips64 +#define do_raise_exception_err do_raise_exception_err_mips64 +#define helper_paddsb helper_paddsb_mips64 +#define helper_paddusb helper_paddusb_mips64 +#define helper_paddsh helper_paddsh_mips64 +#define helper_paddush helper_paddush_mips64 +#define helper_paddb helper_paddb_mips64 +#define helper_paddh helper_paddh_mips64 +#define helper_paddw helper_paddw_mips64 +#define helper_psubsb helper_psubsb_mips64 +#define helper_psubusb helper_psubusb_mips64 +#define helper_psubsh helper_psubsh_mips64 +#define helper_psubush helper_psubush_mips64 +#define helper_psubb helper_psubb_mips64 +#define helper_psubh helper_psubh_mips64 +#define helper_psubw helper_psubw_mips64 +#define helper_pshufh helper_pshufh_mips64 +#define helper_packsswh helper_packsswh_mips64 +#define helper_packsshb helper_packsshb_mips64 +#define helper_packushb helper_packushb_mips64 +#define helper_punpcklwd helper_punpcklwd_mips64 +#define helper_punpckhwd helper_punpckhwd_mips64 +#define helper_punpcklhw helper_punpcklhw_mips64 +#define helper_punpckhhw helper_punpckhhw_mips64 +#define helper_punpcklbh helper_punpcklbh_mips64 +#define helper_punpckhbh helper_punpckhbh_mips64 +#define helper_pavgh helper_pavgh_mips64 +#define helper_pavgb helper_pavgb_mips64 +#define helper_pmaxsh helper_pmaxsh_mips64 +#define helper_pminsh helper_pminsh_mips64 +#define helper_pmaxub helper_pmaxub_mips64 +#define helper_pminub helper_pminub_mips64 +#define helper_pcmpeqw helper_pcmpeqw_mips64 +#define helper_pcmpgtw helper_pcmpgtw_mips64 +#define helper_pcmpeqh helper_pcmpeqh_mips64 +#define helper_pcmpgth helper_pcmpgth_mips64 +#define helper_pcmpeqb helper_pcmpeqb_mips64 +#define helper_pcmpgtb helper_pcmpgtb_mips64 +#define helper_psllw helper_psllw_mips64 +#define helper_psrlw helper_psrlw_mips64 +#define helper_psraw helper_psraw_mips64 +#define helper_psllh helper_psllh_mips64 +#define helper_psrlh helper_psrlh_mips64 +#define helper_psrah helper_psrah_mips64 +#define helper_pmullh helper_pmullh_mips64 +#define helper_pmulhh helper_pmulhh_mips64 +#define helper_pmulhuh helper_pmulhuh_mips64 +#define helper_pmaddhw helper_pmaddhw_mips64 +#define helper_pasubub helper_pasubub_mips64 +#define helper_biadd helper_biadd_mips64 +#define helper_pmovmskb helper_pmovmskb_mips64 +#define helper_msa_nloc_b helper_msa_nloc_b_mips64 +#define helper_msa_nloc_h helper_msa_nloc_h_mips64 +#define helper_msa_nloc_w helper_msa_nloc_w_mips64 +#define helper_msa_nloc_d helper_msa_nloc_d_mips64 +#define helper_msa_nlzc_b helper_msa_nlzc_b_mips64 +#define helper_msa_nlzc_h helper_msa_nlzc_h_mips64 +#define helper_msa_nlzc_w helper_msa_nlzc_w_mips64 +#define helper_msa_nlzc_d helper_msa_nlzc_d_mips64 +#define helper_msa_pcnt_b helper_msa_pcnt_b_mips64 +#define helper_msa_pcnt_h helper_msa_pcnt_h_mips64 +#define helper_msa_pcnt_w helper_msa_pcnt_w_mips64 +#define helper_msa_pcnt_d helper_msa_pcnt_d_mips64 +#define helper_msa_binsl_b helper_msa_binsl_b_mips64 +#define helper_msa_binsl_h helper_msa_binsl_h_mips64 +#define helper_msa_binsl_w helper_msa_binsl_w_mips64 +#define helper_msa_binsl_d helper_msa_binsl_d_mips64 +#define helper_msa_binsr_b helper_msa_binsr_b_mips64 +#define helper_msa_binsr_h helper_msa_binsr_h_mips64 +#define helper_msa_binsr_w helper_msa_binsr_w_mips64 +#define helper_msa_binsr_d helper_msa_binsr_d_mips64 +#define helper_msa_bmnz_v helper_msa_bmnz_v_mips64 +#define helper_msa_bmz_v helper_msa_bmz_v_mips64 +#define helper_msa_bsel_v helper_msa_bsel_v_mips64 +#define helper_msa_bclr_b helper_msa_bclr_b_mips64 +#define helper_msa_bclr_h helper_msa_bclr_h_mips64 +#define helper_msa_bclr_w helper_msa_bclr_w_mips64 +#define helper_msa_bclr_d helper_msa_bclr_d_mips64 +#define helper_msa_bneg_b helper_msa_bneg_b_mips64 +#define helper_msa_bneg_h helper_msa_bneg_h_mips64 +#define helper_msa_bneg_w helper_msa_bneg_w_mips64 +#define helper_msa_bneg_d helper_msa_bneg_d_mips64 +#define helper_msa_bset_b helper_msa_bset_b_mips64 +#define helper_msa_bset_h helper_msa_bset_h_mips64 +#define helper_msa_bset_w helper_msa_bset_w_mips64 +#define helper_msa_bset_d helper_msa_bset_d_mips64 +#define helper_msa_add_a_b helper_msa_add_a_b_mips64 +#define helper_msa_add_a_h helper_msa_add_a_h_mips64 +#define helper_msa_add_a_w helper_msa_add_a_w_mips64 +#define helper_msa_add_a_d helper_msa_add_a_d_mips64 +#define helper_msa_adds_a_b helper_msa_adds_a_b_mips64 +#define helper_msa_adds_a_h helper_msa_adds_a_h_mips64 +#define helper_msa_adds_a_w helper_msa_adds_a_w_mips64 +#define helper_msa_adds_a_d helper_msa_adds_a_d_mips64 +#define helper_msa_adds_s_b helper_msa_adds_s_b_mips64 +#define helper_msa_adds_s_h helper_msa_adds_s_h_mips64 +#define helper_msa_adds_s_w helper_msa_adds_s_w_mips64 +#define helper_msa_adds_s_d helper_msa_adds_s_d_mips64 +#define helper_msa_adds_u_b helper_msa_adds_u_b_mips64 +#define helper_msa_adds_u_h helper_msa_adds_u_h_mips64 +#define helper_msa_adds_u_w helper_msa_adds_u_w_mips64 +#define helper_msa_adds_u_d helper_msa_adds_u_d_mips64 +#define helper_msa_addv_b helper_msa_addv_b_mips64 +#define helper_msa_addv_h helper_msa_addv_h_mips64 +#define helper_msa_addv_w helper_msa_addv_w_mips64 +#define helper_msa_addv_d helper_msa_addv_d_mips64 +#define helper_msa_hadd_s_h helper_msa_hadd_s_h_mips64 +#define helper_msa_hadd_s_w helper_msa_hadd_s_w_mips64 +#define helper_msa_hadd_s_d helper_msa_hadd_s_d_mips64 +#define helper_msa_hadd_u_h helper_msa_hadd_u_h_mips64 +#define helper_msa_hadd_u_w helper_msa_hadd_u_w_mips64 +#define helper_msa_hadd_u_d helper_msa_hadd_u_d_mips64 +#define helper_msa_ave_s_b helper_msa_ave_s_b_mips64 +#define helper_msa_ave_s_h helper_msa_ave_s_h_mips64 +#define helper_msa_ave_s_w helper_msa_ave_s_w_mips64 +#define helper_msa_ave_s_d helper_msa_ave_s_d_mips64 +#define helper_msa_ave_u_b helper_msa_ave_u_b_mips64 +#define helper_msa_ave_u_h helper_msa_ave_u_h_mips64 +#define helper_msa_ave_u_w helper_msa_ave_u_w_mips64 +#define helper_msa_ave_u_d helper_msa_ave_u_d_mips64 +#define helper_msa_aver_s_b helper_msa_aver_s_b_mips64 +#define helper_msa_aver_s_h helper_msa_aver_s_h_mips64 +#define helper_msa_aver_s_w helper_msa_aver_s_w_mips64 +#define helper_msa_aver_s_d helper_msa_aver_s_d_mips64 +#define helper_msa_aver_u_b helper_msa_aver_u_b_mips64 +#define helper_msa_aver_u_h helper_msa_aver_u_h_mips64 +#define helper_msa_aver_u_w helper_msa_aver_u_w_mips64 +#define helper_msa_aver_u_d helper_msa_aver_u_d_mips64 +#define helper_msa_ceq_b helper_msa_ceq_b_mips64 +#define helper_msa_ceq_h helper_msa_ceq_h_mips64 +#define helper_msa_ceq_w helper_msa_ceq_w_mips64 +#define helper_msa_ceq_d helper_msa_ceq_d_mips64 +#define helper_msa_cle_s_b helper_msa_cle_s_b_mips64 +#define helper_msa_cle_s_h helper_msa_cle_s_h_mips64 +#define helper_msa_cle_s_w helper_msa_cle_s_w_mips64 +#define helper_msa_cle_s_d helper_msa_cle_s_d_mips64 +#define helper_msa_cle_u_b helper_msa_cle_u_b_mips64 +#define helper_msa_cle_u_h helper_msa_cle_u_h_mips64 +#define helper_msa_cle_u_w helper_msa_cle_u_w_mips64 +#define helper_msa_cle_u_d helper_msa_cle_u_d_mips64 +#define helper_msa_clt_s_b helper_msa_clt_s_b_mips64 +#define helper_msa_clt_s_h helper_msa_clt_s_h_mips64 +#define helper_msa_clt_s_w helper_msa_clt_s_w_mips64 +#define helper_msa_clt_s_d helper_msa_clt_s_d_mips64 +#define helper_msa_clt_u_b helper_msa_clt_u_b_mips64 +#define helper_msa_clt_u_h helper_msa_clt_u_h_mips64 +#define helper_msa_clt_u_w helper_msa_clt_u_w_mips64 +#define helper_msa_clt_u_d helper_msa_clt_u_d_mips64 +#define helper_msa_div_s_b helper_msa_div_s_b_mips64 +#define helper_msa_div_s_h helper_msa_div_s_h_mips64 +#define helper_msa_div_s_w helper_msa_div_s_w_mips64 +#define helper_msa_div_s_d helper_msa_div_s_d_mips64 +#define helper_msa_div_u_b helper_msa_div_u_b_mips64 +#define helper_msa_div_u_h helper_msa_div_u_h_mips64 +#define helper_msa_div_u_w helper_msa_div_u_w_mips64 +#define helper_msa_div_u_d helper_msa_div_u_d_mips64 +#define helper_msa_max_a_b helper_msa_max_a_b_mips64 +#define helper_msa_max_a_h helper_msa_max_a_h_mips64 +#define helper_msa_max_a_w helper_msa_max_a_w_mips64 +#define helper_msa_max_a_d helper_msa_max_a_d_mips64 +#define helper_msa_max_s_b helper_msa_max_s_b_mips64 +#define helper_msa_max_s_h helper_msa_max_s_h_mips64 +#define helper_msa_max_s_w helper_msa_max_s_w_mips64 +#define helper_msa_max_s_d helper_msa_max_s_d_mips64 +#define helper_msa_max_u_b helper_msa_max_u_b_mips64 +#define helper_msa_max_u_h helper_msa_max_u_h_mips64 +#define helper_msa_max_u_w helper_msa_max_u_w_mips64 +#define helper_msa_max_u_d helper_msa_max_u_d_mips64 +#define helper_msa_min_a_b helper_msa_min_a_b_mips64 +#define helper_msa_min_a_h helper_msa_min_a_h_mips64 +#define helper_msa_min_a_w helper_msa_min_a_w_mips64 +#define helper_msa_min_a_d helper_msa_min_a_d_mips64 +#define helper_msa_min_s_b helper_msa_min_s_b_mips64 +#define helper_msa_min_s_h helper_msa_min_s_h_mips64 +#define helper_msa_min_s_w helper_msa_min_s_w_mips64 +#define helper_msa_min_s_d helper_msa_min_s_d_mips64 +#define helper_msa_min_u_b helper_msa_min_u_b_mips64 +#define helper_msa_min_u_h helper_msa_min_u_h_mips64 +#define helper_msa_min_u_w helper_msa_min_u_w_mips64 +#define helper_msa_min_u_d helper_msa_min_u_d_mips64 +#define helper_msa_mod_s_b helper_msa_mod_s_b_mips64 +#define helper_msa_mod_s_h helper_msa_mod_s_h_mips64 +#define helper_msa_mod_s_w helper_msa_mod_s_w_mips64 +#define helper_msa_mod_s_d helper_msa_mod_s_d_mips64 +#define helper_msa_mod_u_b helper_msa_mod_u_b_mips64 +#define helper_msa_mod_u_h helper_msa_mod_u_h_mips64 +#define helper_msa_mod_u_w helper_msa_mod_u_w_mips64 +#define helper_msa_mod_u_d helper_msa_mod_u_d_mips64 +#define helper_msa_asub_s_b helper_msa_asub_s_b_mips64 +#define helper_msa_asub_s_h helper_msa_asub_s_h_mips64 +#define helper_msa_asub_s_w helper_msa_asub_s_w_mips64 +#define helper_msa_asub_s_d helper_msa_asub_s_d_mips64 +#define helper_msa_asub_u_b helper_msa_asub_u_b_mips64 +#define helper_msa_asub_u_h helper_msa_asub_u_h_mips64 +#define helper_msa_asub_u_w helper_msa_asub_u_w_mips64 +#define helper_msa_asub_u_d helper_msa_asub_u_d_mips64 +#define helper_msa_hsub_s_h helper_msa_hsub_s_h_mips64 +#define helper_msa_hsub_s_w helper_msa_hsub_s_w_mips64 +#define helper_msa_hsub_s_d helper_msa_hsub_s_d_mips64 +#define helper_msa_hsub_u_h helper_msa_hsub_u_h_mips64 +#define helper_msa_hsub_u_w helper_msa_hsub_u_w_mips64 +#define helper_msa_hsub_u_d helper_msa_hsub_u_d_mips64 +#define helper_msa_ilvev_b helper_msa_ilvev_b_mips64 +#define helper_msa_ilvev_h helper_msa_ilvev_h_mips64 +#define helper_msa_ilvev_w helper_msa_ilvev_w_mips64 +#define helper_msa_ilvev_d helper_msa_ilvev_d_mips64 +#define helper_msa_ilvod_b helper_msa_ilvod_b_mips64 +#define helper_msa_ilvod_h helper_msa_ilvod_h_mips64 +#define helper_msa_ilvod_w helper_msa_ilvod_w_mips64 +#define helper_msa_ilvod_d helper_msa_ilvod_d_mips64 +#define helper_msa_ilvl_b helper_msa_ilvl_b_mips64 +#define helper_msa_ilvl_h helper_msa_ilvl_h_mips64 +#define helper_msa_ilvl_w helper_msa_ilvl_w_mips64 +#define helper_msa_ilvl_d helper_msa_ilvl_d_mips64 +#define helper_msa_ilvr_b helper_msa_ilvr_b_mips64 +#define helper_msa_ilvr_h helper_msa_ilvr_h_mips64 +#define helper_msa_ilvr_w helper_msa_ilvr_w_mips64 +#define helper_msa_ilvr_d helper_msa_ilvr_d_mips64 +#define helper_msa_and_v helper_msa_and_v_mips64 +#define helper_msa_nor_v helper_msa_nor_v_mips64 +#define helper_msa_or_v helper_msa_or_v_mips64 +#define helper_msa_xor_v helper_msa_xor_v_mips64 +#define helper_msa_move_v helper_msa_move_v_mips64 +#define helper_msa_pckev_b helper_msa_pckev_b_mips64 +#define helper_msa_pckev_h helper_msa_pckev_h_mips64 +#define helper_msa_pckev_w helper_msa_pckev_w_mips64 +#define helper_msa_pckev_d helper_msa_pckev_d_mips64 +#define helper_msa_pckod_b helper_msa_pckod_b_mips64 +#define helper_msa_pckod_h helper_msa_pckod_h_mips64 +#define helper_msa_pckod_w helper_msa_pckod_w_mips64 +#define helper_msa_pckod_d helper_msa_pckod_d_mips64 +#define helper_msa_sll_b helper_msa_sll_b_mips64 +#define helper_msa_sll_h helper_msa_sll_h_mips64 +#define helper_msa_sll_w helper_msa_sll_w_mips64 +#define helper_msa_sll_d helper_msa_sll_d_mips64 +#define helper_msa_sra_b helper_msa_sra_b_mips64 +#define helper_msa_sra_h helper_msa_sra_h_mips64 +#define helper_msa_sra_w helper_msa_sra_w_mips64 +#define helper_msa_sra_d helper_msa_sra_d_mips64 +#define helper_msa_srar_b helper_msa_srar_b_mips64 +#define helper_msa_srar_h helper_msa_srar_h_mips64 +#define helper_msa_srar_w helper_msa_srar_w_mips64 +#define helper_msa_srar_d helper_msa_srar_d_mips64 +#define helper_msa_srl_b helper_msa_srl_b_mips64 +#define helper_msa_srl_h helper_msa_srl_h_mips64 +#define helper_msa_srl_w helper_msa_srl_w_mips64 +#define helper_msa_srl_d helper_msa_srl_d_mips64 +#define helper_msa_srlr_b helper_msa_srlr_b_mips64 +#define helper_msa_srlr_h helper_msa_srlr_h_mips64 +#define helper_msa_srlr_w helper_msa_srlr_w_mips64 +#define helper_msa_srlr_d helper_msa_srlr_d_mips64 +#define helper_msa_andi_b helper_msa_andi_b_mips64 +#define helper_msa_ori_b helper_msa_ori_b_mips64 +#define helper_msa_nori_b helper_msa_nori_b_mips64 +#define helper_msa_xori_b helper_msa_xori_b_mips64 +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64 +#define helper_msa_bmzi_b helper_msa_bmzi_b_mips64 +#define helper_msa_bseli_b helper_msa_bseli_b_mips64 +#define helper_msa_shf_df helper_msa_shf_df_mips64 +#define helper_msa_addvi_df helper_msa_addvi_df_mips64 +#define helper_msa_subvi_df helper_msa_subvi_df_mips64 +#define helper_msa_ceqi_df helper_msa_ceqi_df_mips64 +#define helper_msa_clei_s_df helper_msa_clei_s_df_mips64 +#define helper_msa_clei_u_df helper_msa_clei_u_df_mips64 +#define helper_msa_clti_s_df helper_msa_clti_s_df_mips64 +#define helper_msa_clti_u_df helper_msa_clti_u_df_mips64 +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64 +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64 +#define helper_msa_mini_s_df helper_msa_mini_s_df_mips64 +#define helper_msa_mini_u_df helper_msa_mini_u_df_mips64 +#define helper_msa_ldi_df helper_msa_ldi_df_mips64 +#define helper_msa_slli_df helper_msa_slli_df_mips64 +#define helper_msa_srai_df helper_msa_srai_df_mips64 +#define helper_msa_srli_df helper_msa_srli_df_mips64 +#define helper_msa_bclri_df helper_msa_bclri_df_mips64 +#define helper_msa_bseti_df helper_msa_bseti_df_mips64 +#define helper_msa_bnegi_df helper_msa_bnegi_df_mips64 +#define helper_msa_sat_s_df helper_msa_sat_s_df_mips64 +#define helper_msa_sat_u_df helper_msa_sat_u_df_mips64 +#define helper_msa_srari_df helper_msa_srari_df_mips64 +#define helper_msa_srlri_df helper_msa_srlri_df_mips64 +#define helper_msa_binsli_df helper_msa_binsli_df_mips64 +#define helper_msa_binsri_df helper_msa_binsri_df_mips64 +#define helper_msa_subv_df helper_msa_subv_df_mips64 +#define helper_msa_subs_s_df helper_msa_subs_s_df_mips64 +#define helper_msa_subs_u_df helper_msa_subs_u_df_mips64 +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64 +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64 +#define helper_msa_mulv_df helper_msa_mulv_df_mips64 +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64 +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64 +#define helper_msa_mul_q_df helper_msa_mul_q_df_mips64 +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64 +#define helper_msa_sld_df helper_msa_sld_df_mips64 +#define helper_msa_maddv_df helper_msa_maddv_df_mips64 +#define helper_msa_msubv_df helper_msa_msubv_df_mips64 +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64 +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64 +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64 +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64 +#define helper_msa_binsl_df helper_msa_binsl_df_mips64 +#define helper_msa_binsr_df helper_msa_binsr_df_mips64 +#define helper_msa_madd_q_df helper_msa_madd_q_df_mips64 +#define helper_msa_msub_q_df helper_msa_msub_q_df_mips64 +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64 +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64 +#define helper_msa_splat_df helper_msa_splat_df_mips64 +#define helper_msa_vshf_df helper_msa_vshf_df_mips64 +#define helper_msa_sldi_df helper_msa_sldi_df_mips64 +#define helper_msa_splati_df helper_msa_splati_df_mips64 +#define helper_msa_copy_s_b helper_msa_copy_s_b_mips64 +#define helper_msa_copy_s_h helper_msa_copy_s_h_mips64 +#define helper_msa_copy_s_w helper_msa_copy_s_w_mips64 +#define helper_msa_copy_s_d helper_msa_copy_s_d_mips64 +#define helper_msa_copy_u_b helper_msa_copy_u_b_mips64 +#define helper_msa_copy_u_h helper_msa_copy_u_h_mips64 +#define helper_msa_copy_u_w helper_msa_copy_u_w_mips64 +#define helper_msa_insert_b helper_msa_insert_b_mips64 +#define helper_msa_insert_h helper_msa_insert_h_mips64 +#define helper_msa_insert_w helper_msa_insert_w_mips64 +#define helper_msa_insert_d helper_msa_insert_d_mips64 +#define helper_msa_insve_df helper_msa_insve_df_mips64 +#define helper_msa_ctcmsa helper_msa_ctcmsa_mips64 +#define helper_msa_cfcmsa helper_msa_cfcmsa_mips64 +#define helper_msa_fill_df helper_msa_fill_df_mips64 +#define helper_msa_fcaf_df helper_msa_fcaf_df_mips64 +#define helper_msa_fcun_df helper_msa_fcun_df_mips64 +#define helper_msa_fceq_df helper_msa_fceq_df_mips64 +#define helper_msa_fcueq_df helper_msa_fcueq_df_mips64 +#define helper_msa_fclt_df helper_msa_fclt_df_mips64 +#define helper_msa_fcult_df helper_msa_fcult_df_mips64 +#define helper_msa_fcle_df helper_msa_fcle_df_mips64 +#define helper_msa_fcule_df helper_msa_fcule_df_mips64 +#define helper_msa_fsaf_df helper_msa_fsaf_df_mips64 +#define helper_msa_fsun_df helper_msa_fsun_df_mips64 +#define helper_msa_fseq_df helper_msa_fseq_df_mips64 +#define helper_msa_fsueq_df helper_msa_fsueq_df_mips64 +#define helper_msa_fslt_df helper_msa_fslt_df_mips64 +#define helper_msa_fsult_df helper_msa_fsult_df_mips64 +#define helper_msa_fsle_df helper_msa_fsle_df_mips64 +#define helper_msa_fsule_df helper_msa_fsule_df_mips64 +#define helper_msa_fcor_df helper_msa_fcor_df_mips64 +#define helper_msa_fcune_df helper_msa_fcune_df_mips64 +#define helper_msa_fcne_df helper_msa_fcne_df_mips64 +#define helper_msa_fsor_df helper_msa_fsor_df_mips64 +#define helper_msa_fsune_df helper_msa_fsune_df_mips64 +#define helper_msa_fsne_df helper_msa_fsne_df_mips64 +#define helper_msa_fadd_df helper_msa_fadd_df_mips64 +#define helper_msa_fsub_df helper_msa_fsub_df_mips64 +#define helper_msa_fmul_df helper_msa_fmul_df_mips64 +#define helper_msa_fdiv_df helper_msa_fdiv_df_mips64 +#define helper_msa_fmadd_df helper_msa_fmadd_df_mips64 +#define helper_msa_fmsub_df helper_msa_fmsub_df_mips64 +#define helper_msa_fexp2_df helper_msa_fexp2_df_mips64 +#define helper_msa_fexdo_df helper_msa_fexdo_df_mips64 +#define helper_msa_ftq_df helper_msa_ftq_df_mips64 +#define helper_msa_fmin_df helper_msa_fmin_df_mips64 +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64 +#define helper_msa_fmax_df helper_msa_fmax_df_mips64 +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64 +#define helper_msa_fclass_df helper_msa_fclass_df_mips64 +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64 +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64 +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64 +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64 +#define helper_msa_frcp_df helper_msa_frcp_df_mips64 +#define helper_msa_frint_df helper_msa_frint_df_mips64 +#define helper_msa_flog2_df helper_msa_flog2_df_mips64 +#define helper_msa_fexupl_df helper_msa_fexupl_df_mips64 +#define helper_msa_fexupr_df helper_msa_fexupr_df_mips64 +#define helper_msa_ffql_df helper_msa_ffql_df_mips64 +#define helper_msa_ffqr_df helper_msa_ffqr_df_mips64 +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64 +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64 +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64 +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64 +#define helper_raise_exception_err helper_raise_exception_err_mips64 +#define helper_raise_exception helper_raise_exception_mips64 +#define helper_raise_exception_debug helper_raise_exception_debug_mips64 +#define helper_muls helper_muls_mips64 +#define helper_mulsu helper_mulsu_mips64 +#define helper_macc helper_macc_mips64 +#define helper_macchi helper_macchi_mips64 +#define helper_maccu helper_maccu_mips64 +#define helper_macchiu helper_macchiu_mips64 +#define helper_msac helper_msac_mips64 +#define helper_msachi helper_msachi_mips64 +#define helper_msacu helper_msacu_mips64 +#define helper_msachiu helper_msachiu_mips64 +#define helper_mulhi helper_mulhi_mips64 +#define helper_mulhiu helper_mulhiu_mips64 +#define helper_mulshi helper_mulshi_mips64 +#define helper_mulshiu helper_mulshiu_mips64 #define helper_dbitswap helper_dbitswap_mips64 +#define helper_bitswap helper_bitswap_mips64 +#define helper_rotx helper_rotx_mips64 +#define helper_ll helper_ll_mips64 #define helper_lld helper_lld_mips64 -#define helper_scd helper_scd_mips64 +#define helper_swl helper_swl_mips64 +#define helper_swr helper_swr_mips64 #define helper_sdl helper_sdl_mips64 #define helper_sdr helper_sdr_mips64 +#define helper_lwm helper_lwm_mips64 +#define helper_swm helper_swm_mips64 #define helper_ldm helper_ldm_mips64 #define helper_sdm helper_sdm_mips64 -#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64 -#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64 -#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64 -#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64 -#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64 -#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64 -#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64 -#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64 -#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64 +#define helper_fork helper_fork_mips64 +#define helper_yield helper_yield_mips64 +#define r4k_helper_tlbinv r4k_helper_tlbinv_mips64 +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64 +#define r4k_helper_tlbwi r4k_helper_tlbwi_mips64 +#define r4k_helper_tlbwr r4k_helper_tlbwr_mips64 +#define r4k_helper_tlbp r4k_helper_tlbp_mips64 +#define r4k_helper_tlbr r4k_helper_tlbr_mips64 +#define helper_tlbwi helper_tlbwi_mips64 +#define helper_tlbwr helper_tlbwr_mips64 +#define helper_tlbp helper_tlbp_mips64 +#define helper_tlbr helper_tlbr_mips64 +#define helper_tlbinv helper_tlbinv_mips64 +#define helper_tlbinvf helper_tlbinvf_mips64 +#define helper_ginvt helper_ginvt_mips64 +#define helper_di helper_di_mips64 +#define helper_ei helper_ei_mips64 +#define helper_eret helper_eret_mips64 +#define helper_eretnc helper_eretnc_mips64 +#define helper_deret helper_deret_mips64 +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64 +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64 +#define helper_rdhwr_cc helper_rdhwr_cc_mips64 +#define helper_rdhwr_ccres helper_rdhwr_ccres_mips64 +#define helper_rdhwr_performance helper_rdhwr_performance_mips64 +#define helper_rdhwr_xnp helper_rdhwr_xnp_mips64 +#define helper_pmon helper_pmon_mips64 +#define helper_wait helper_wait_mips64 +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64 +#define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mips64 +#define helper_msa_ld_b helper_msa_ld_b_mips64 +#define helper_msa_ld_h helper_msa_ld_h_mips64 +#define helper_msa_ld_w helper_msa_ld_w_mips64 +#define helper_msa_ld_d helper_msa_ld_d_mips64 +#define helper_msa_st_b helper_msa_st_b_mips64 +#define helper_msa_st_h helper_msa_st_h_mips64 +#define helper_msa_st_w helper_msa_st_w_mips64 +#define helper_msa_st_d helper_msa_st_d_mips64 +#define helper_cache helper_cache_mips64 +#define gen_intermediate_code gen_intermediate_code_mips64 +#define mips_tcg_init mips_tcg_init_mips64 +#define cpu_mips_realize_env cpu_mips_realize_env_mips64 +#define cpu_state_reset cpu_state_reset_mips64 +#define restore_state_to_opc restore_state_to_opc_mips64 #define mips_reg_reset mips_reg_reset_mips64 #define mips_reg_read mips_reg_read_mips64 #define mips_reg_write mips_reg_write_mips64 -#define mips_tcg_init mips_tcg_init_mips64 -#define mips_cpu_list mips_cpu_list_mips64 -#define mips_release mips_release_mips64 -#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mips64 -#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mips64 +#define ieee_rm ieee_rm_mips64 +#define mips_defs mips_defs_mips64 +#define mips_defs_number mips_defs_number_mips64 +#define gen_helper_float_class_s gen_helper_float_class_s_mips64 +#define gen_helper_float_class_d gen_helper_float_class_d_mips64 #endif diff --git a/qemu/mips64el.h b/qemu/mips64el.h index afe0d47f..f4337206 100644 --- a/qemu/mips64el.h +++ b/qemu/mips64el.h @@ -1,3260 +1,1431 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_MIPS64EL_H -#define UNICORN_AUTOGEN_MIPS64EL_H -#define arm_release arm_release_mips64el -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mips64el -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mips64el -#define use_idiv_instructions_rt use_idiv_instructions_rt_mips64el -#define tcg_target_deposit_valid tcg_target_deposit_valid_mips64el -#define helper_power_down helper_power_down_mips64el -#define check_exit_request check_exit_request_mips64el -#define address_space_unregister address_space_unregister_mips64el -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64el -#define phys_mem_clean phys_mem_clean_mips64el -#define tb_cleanup tb_cleanup_mips64el +#ifndef UNICORN_AUTOGEN_mips64el_H +#define UNICORN_AUTOGEN_mips64el_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _mips64el +#endif +#define arm_arch arm_arch_mips64el +#define tb_target_set_jmp_target tb_target_set_jmp_target_mips64el +#define have_bmi1 have_bmi1_mips64el +#define have_popcnt have_popcnt_mips64el +#define have_avx1 have_avx1_mips64el +#define have_avx2 have_avx2_mips64el +#define have_isa have_isa_mips64el +#define have_altivec have_altivec_mips64el +#define have_vsx have_vsx_mips64el +#define flush_icache_range flush_icache_range_mips64el +#define s390_facilities s390_facilities_mips64el +#define tcg_dump_op tcg_dump_op_mips64el +#define tcg_dump_ops tcg_dump_ops_mips64el +#define tcg_gen_and_i64 tcg_gen_and_i64_mips64el +#define tcg_gen_discard_i64 tcg_gen_discard_i64_mips64el +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mips64el +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mips64el +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mips64el +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mips64el +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mips64el +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mips64el +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64el +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64el +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64el +#define tcg_gen_mul_i64 tcg_gen_mul_i64_mips64el +#define tcg_gen_or_i64 tcg_gen_or_i64_mips64el +#define tcg_gen_sar_i64 tcg_gen_sar_i64_mips64el +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64el +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64el +#define tcg_gen_st_i64 tcg_gen_st_i64_mips64el +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64el +#define cpu_icount_to_ns cpu_icount_to_ns_mips64el +#define cpu_is_stopped cpu_is_stopped_mips64el +#define cpu_get_ticks cpu_get_ticks_mips64el +#define cpu_get_clock cpu_get_clock_mips64el +#define cpu_resume cpu_resume_mips64el +#define qemu_init_vcpu qemu_init_vcpu_mips64el +#define cpu_stop_current cpu_stop_current_mips64el +#define resume_all_vcpus resume_all_vcpus_mips64el +#define vm_start vm_start_mips64el +#define address_space_dispatch_compact address_space_dispatch_compact_mips64el +#define flatview_translate flatview_translate_mips64el +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64el +#define qemu_get_cpu qemu_get_cpu_mips64el +#define cpu_address_space_init cpu_address_space_init_mips64el +#define cpu_get_address_space cpu_get_address_space_mips64el +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_mips64el +#define cpu_exec_initfn cpu_exec_initfn_mips64el +#define cpu_exec_realizefn cpu_exec_realizefn_mips64el +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64el +#define cpu_watchpoint_insert cpu_watchpoint_insert_mips64el +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64el +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64el +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64el +#define cpu_breakpoint_insert cpu_breakpoint_insert_mips64el +#define cpu_breakpoint_remove cpu_breakpoint_remove_mips64el +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64el +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64el +#define cpu_abort cpu_abort_mips64el +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mips64el +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64el +#define flatview_add_to_dispatch flatview_add_to_dispatch_mips64el +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_mips64el +#define qemu_ram_get_offset qemu_ram_get_offset_mips64el +#define qemu_ram_get_used_length qemu_ram_get_used_length_mips64el +#define qemu_ram_is_shared qemu_ram_is_shared_mips64el +#define qemu_ram_pagesize qemu_ram_pagesize_mips64el +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64el +#define qemu_ram_alloc qemu_ram_alloc_mips64el +#define qemu_ram_free qemu_ram_free_mips64el +#define qemu_map_ram_ptr qemu_map_ram_ptr_mips64el +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_mips64el +#define qemu_ram_block_from_host qemu_ram_block_from_host_mips64el +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64el +#define cpu_check_watchpoint cpu_check_watchpoint_mips64el +#define iotlb_to_section iotlb_to_section_mips64el +#define address_space_dispatch_new address_space_dispatch_new_mips64el +#define address_space_dispatch_free address_space_dispatch_free_mips64el +#define flatview_read_continue flatview_read_continue_mips64el +#define address_space_read_full address_space_read_full_mips64el +#define address_space_write address_space_write_mips64el +#define address_space_rw address_space_rw_mips64el +#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64el +#define address_space_write_rom address_space_write_rom_mips64el +#define cpu_flush_icache_range cpu_flush_icache_range_mips64el +#define cpu_exec_init_all cpu_exec_init_all_mips64el +#define address_space_access_valid address_space_access_valid_mips64el +#define address_space_map address_space_map_mips64el +#define address_space_unmap address_space_unmap_mips64el +#define cpu_physical_memory_map cpu_physical_memory_map_mips64el +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64el +#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64el +#define qemu_target_page_size qemu_target_page_size_mips64el +#define qemu_target_page_bits qemu_target_page_bits_mips64el +#define qemu_target_page_bits_min qemu_target_page_bits_min_mips64el +#define target_words_bigendian target_words_bigendian_mips64el +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64el +#define ram_block_discard_range ram_block_discard_range_mips64el +#define ramblock_is_pmem ramblock_is_pmem_mips64el +#define page_size_init page_size_init_mips64el +#define set_preferred_target_page_bits set_preferred_target_page_bits_mips64el +#define finalize_target_page_bits finalize_target_page_bits_mips64el +#define cpu_outb cpu_outb_mips64el +#define cpu_outw cpu_outw_mips64el +#define cpu_outl cpu_outl_mips64el +#define cpu_inb cpu_inb_mips64el +#define cpu_inw cpu_inw_mips64el +#define cpu_inl cpu_inl_mips64el #define memory_map memory_map_mips64el +#define memory_map_io memory_map_io_mips64el #define memory_map_ptr memory_map_ptr_mips64el #define memory_unmap memory_unmap_mips64el #define memory_free memory_free_mips64el -#define free_code_gen_buffer free_code_gen_buffer_mips64el -#define helper_raise_exception helper_raise_exception_mips64el -#define tcg_enabled tcg_enabled_mips64el -#define tcg_exec_init tcg_exec_init_mips64el -#define memory_register_types memory_register_types_mips64el -#define cpu_exec_init_all cpu_exec_init_all_mips64el -#define vm_start vm_start_mips64el -#define resume_all_vcpus resume_all_vcpus_mips64el -#define a15_l2ctlr_read a15_l2ctlr_read_mips64el -#define a64_translate_init a64_translate_init_mips64el -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mips64el -#define aa64_cacheop_access aa64_cacheop_access_mips64el -#define aa64_daif_access aa64_daif_access_mips64el -#define aa64_daif_write aa64_daif_write_mips64el -#define aa64_dczid_read aa64_dczid_read_mips64el -#define aa64_fpcr_read aa64_fpcr_read_mips64el -#define aa64_fpcr_write aa64_fpcr_write_mips64el -#define aa64_fpsr_read aa64_fpsr_read_mips64el -#define aa64_fpsr_write aa64_fpsr_write_mips64el -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mips64el -#define aa64_zva_access aa64_zva_access_mips64el -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mips64el -#define aarch64_restore_sp aarch64_restore_sp_mips64el -#define aarch64_save_sp aarch64_save_sp_mips64el -#define accel_find accel_find_mips64el -#define accel_init_machine accel_init_machine_mips64el -#define accel_type accel_type_mips64el -#define access_with_adjusted_size access_with_adjusted_size_mips64el -#define add128 add128_mips64el -#define add16_sat add16_sat_mips64el -#define add16_usat add16_usat_mips64el -#define add192 add192_mips64el -#define add8_sat add8_sat_mips64el -#define add8_usat add8_usat_mips64el -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mips64el -#define add_cpreg_to_list add_cpreg_to_list_mips64el -#define addFloat128Sigs addFloat128Sigs_mips64el -#define addFloat32Sigs addFloat32Sigs_mips64el -#define addFloat64Sigs addFloat64Sigs_mips64el -#define addFloatx80Sigs addFloatx80Sigs_mips64el -#define add_qemu_ldst_label add_qemu_ldst_label_mips64el -#define address_space_access_valid address_space_access_valid_mips64el -#define address_space_destroy address_space_destroy_mips64el -#define address_space_destroy_dispatch address_space_destroy_dispatch_mips64el -#define address_space_get_flatview address_space_get_flatview_mips64el -#define address_space_init address_space_init_mips64el -#define address_space_init_dispatch address_space_init_dispatch_mips64el -#define address_space_lookup_region address_space_lookup_region_mips64el -#define address_space_map address_space_map_mips64el -#define address_space_read address_space_read_mips64el -#define address_space_rw address_space_rw_mips64el -#define address_space_translate address_space_translate_mips64el -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64el -#define address_space_translate_internal address_space_translate_internal_mips64el -#define address_space_unmap address_space_unmap_mips64el -#define address_space_update_topology address_space_update_topology_mips64el -#define address_space_update_topology_pass address_space_update_topology_pass_mips64el -#define address_space_write address_space_write_mips64el -#define addrrange_contains addrrange_contains_mips64el -#define addrrange_end addrrange_end_mips64el -#define addrrange_equal addrrange_equal_mips64el -#define addrrange_intersection addrrange_intersection_mips64el -#define addrrange_intersects addrrange_intersects_mips64el -#define addrrange_make addrrange_make_mips64el -#define adjust_endianness adjust_endianness_mips64el -#define all_helpers all_helpers_mips64el -#define alloc_code_gen_buffer alloc_code_gen_buffer_mips64el -#define alloc_entry alloc_entry_mips64el -#define always_true always_true_mips64el -#define arm1026_initfn arm1026_initfn_mips64el -#define arm1136_initfn arm1136_initfn_mips64el -#define arm1136_r2_initfn arm1136_r2_initfn_mips64el -#define arm1176_initfn arm1176_initfn_mips64el -#define arm11mpcore_initfn arm11mpcore_initfn_mips64el -#define arm926_initfn arm926_initfn_mips64el -#define arm946_initfn arm946_initfn_mips64el -#define arm_ccnt_enabled arm_ccnt_enabled_mips64el -#define arm_cp_read_zero arm_cp_read_zero_mips64el -#define arm_cp_reset_ignore arm_cp_reset_ignore_mips64el -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mips64el -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mips64el -#define arm_cpu_finalizefn arm_cpu_finalizefn_mips64el -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mips64el -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mips64el -#define arm_cpu_initfn arm_cpu_initfn_mips64el -#define arm_cpu_list arm_cpu_list_mips64el -#define cpu_loop_exit cpu_loop_exit_mips64el -#define arm_cpu_post_init arm_cpu_post_init_mips64el -#define arm_cpu_realizefn arm_cpu_realizefn_mips64el -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mips64el -#define arm_cpu_register_types arm_cpu_register_types_mips64el -#define cpu_resume_from_signal cpu_resume_from_signal_mips64el -#define arm_cpus arm_cpus_mips64el -#define arm_cpu_set_pc arm_cpu_set_pc_mips64el -#define arm_cp_write_ignore arm_cp_write_ignore_mips64el -#define arm_current_el arm_current_el_mips64el -#define arm_dc_feature arm_dc_feature_mips64el -#define arm_debug_excp_handler arm_debug_excp_handler_mips64el -#define arm_debug_target_el arm_debug_target_el_mips64el -#define arm_el_is_aa64 arm_el_is_aa64_mips64el -#define arm_env_get_cpu arm_env_get_cpu_mips64el -#define arm_excp_target_el arm_excp_target_el_mips64el -#define arm_excp_unmasked arm_excp_unmasked_mips64el -#define arm_feature arm_feature_mips64el -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mips64el -#define gen_intermediate_code gen_intermediate_code_mips64el -#define gen_intermediate_code_pc gen_intermediate_code_pc_mips64el -#define arm_gen_test_cc arm_gen_test_cc_mips64el -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mips64el -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mips64el -#define arm_handle_psci_call arm_handle_psci_call_mips64el -#define arm_is_psci_call arm_is_psci_call_mips64el -#define arm_is_secure arm_is_secure_mips64el -#define arm_is_secure_below_el3 arm_is_secure_below_el3_mips64el -#define arm_ldl_code arm_ldl_code_mips64el -#define arm_lduw_code arm_lduw_code_mips64el -#define arm_log_exception arm_log_exception_mips64el -#define arm_reg_read arm_reg_read_mips64el -#define arm_reg_reset arm_reg_reset_mips64el -#define arm_reg_write arm_reg_write_mips64el -#define restore_state_to_opc restore_state_to_opc_mips64el -#define arm_rmode_to_sf arm_rmode_to_sf_mips64el -#define arm_singlestep_active arm_singlestep_active_mips64el -#define tlb_fill tlb_fill_mips64el -#define tlb_flush tlb_flush_mips64el -#define tlb_flush_page tlb_flush_page_mips64el -#define tlb_set_page tlb_set_page_mips64el -#define arm_translate_init arm_translate_init_mips64el -#define arm_v7m_class_init arm_v7m_class_init_mips64el -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mips64el -#define ats_access ats_access_mips64el -#define ats_write ats_write_mips64el -#define bad_mode_switch bad_mode_switch_mips64el -#define bank_number bank_number_mips64el -#define bitmap_zero_extend bitmap_zero_extend_mips64el -#define bp_wp_matches bp_wp_matches_mips64el -#define breakpoint_invalidate breakpoint_invalidate_mips64el -#define build_page_bitmap build_page_bitmap_mips64el -#define bus_add_child bus_add_child_mips64el -#define bus_class_init bus_class_init_mips64el -#define bus_info bus_info_mips64el -#define bus_unparent bus_unparent_mips64el -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mips64el -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mips64el -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mips64el -#define call_recip_estimate call_recip_estimate_mips64el -#define can_merge can_merge_mips64el -#define capacity_increase capacity_increase_mips64el -#define ccsidr_read ccsidr_read_mips64el -#define check_ap check_ap_mips64el -#define check_breakpoints check_breakpoints_mips64el -#define check_watchpoints check_watchpoints_mips64el -#define cho cho_mips64el -#define clear_bit clear_bit_mips64el -#define clz32 clz32_mips64el -#define clz64 clz64_mips64el -#define cmp_flatrange_addr cmp_flatrange_addr_mips64el -#define code_gen_alloc code_gen_alloc_mips64el -#define commonNaNToFloat128 commonNaNToFloat128_mips64el -#define commonNaNToFloat16 commonNaNToFloat16_mips64el -#define commonNaNToFloat32 commonNaNToFloat32_mips64el -#define commonNaNToFloat64 commonNaNToFloat64_mips64el -#define commonNaNToFloatx80 commonNaNToFloatx80_mips64el -#define compute_abs_deadline compute_abs_deadline_mips64el -#define cond_name cond_name_mips64el -#define configure_accelerator configure_accelerator_mips64el -#define container_get container_get_mips64el -#define container_info container_info_mips64el -#define container_register_types container_register_types_mips64el -#define contextidr_write contextidr_write_mips64el -#define core_log_global_start core_log_global_start_mips64el -#define core_log_global_stop core_log_global_stop_mips64el -#define core_memory_listener core_memory_listener_mips64el -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mips64el -#define cortex_a15_initfn cortex_a15_initfn_mips64el -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mips64el -#define cortex_a8_initfn cortex_a8_initfn_mips64el -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mips64el -#define cortex_a9_initfn cortex_a9_initfn_mips64el -#define cortex_m3_initfn cortex_m3_initfn_mips64el -#define count_cpreg count_cpreg_mips64el -#define countLeadingZeros32 countLeadingZeros32_mips64el -#define countLeadingZeros64 countLeadingZeros64_mips64el -#define cp_access_ok cp_access_ok_mips64el -#define cpacr_write cpacr_write_mips64el -#define cpreg_field_is_64bit cpreg_field_is_64bit_mips64el -#define cp_reginfo cp_reginfo_mips64el -#define cpreg_key_compare cpreg_key_compare_mips64el -#define cpreg_make_keylist cpreg_make_keylist_mips64el -#define cp_reg_reset cp_reg_reset_mips64el -#define cpreg_to_kvm_id cpreg_to_kvm_id_mips64el -#define cpsr_read cpsr_read_mips64el -#define cpsr_write cpsr_write_mips64el -#define cptype_valid cptype_valid_mips64el -#define cpu_abort cpu_abort_mips64el -#define cpu_arm_exec cpu_arm_exec_mips64el -#define cpu_arm_gen_code cpu_arm_gen_code_mips64el -#define cpu_arm_init cpu_arm_init_mips64el -#define cpu_breakpoint_insert cpu_breakpoint_insert_mips64el -#define cpu_breakpoint_remove cpu_breakpoint_remove_mips64el -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64el -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64el -#define cpu_can_do_io cpu_can_do_io_mips64el -#define cpu_can_run cpu_can_run_mips64el -#define cpu_class_init cpu_class_init_mips64el -#define cpu_common_class_by_name cpu_common_class_by_name_mips64el -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips64el -#define cpu_common_get_arch_id cpu_common_get_arch_id_mips64el -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mips64el -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mips64el -#define cpu_common_has_work cpu_common_has_work_mips64el -#define cpu_common_initfn cpu_common_initfn_mips64el -#define cpu_common_noop cpu_common_noop_mips64el -#define cpu_common_parse_features cpu_common_parse_features_mips64el -#define cpu_common_realizefn cpu_common_realizefn_mips64el -#define cpu_common_reset cpu_common_reset_mips64el -#define cpu_dump_statistics cpu_dump_statistics_mips64el -#define cpu_exec_init cpu_exec_init_mips64el -#define cpu_flush_icache_range cpu_flush_icache_range_mips64el -#define cpu_gen_init cpu_gen_init_mips64el -#define cpu_get_clock cpu_get_clock_mips64el -#define cpu_get_real_ticks cpu_get_real_ticks_mips64el -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mips64el -#define cpu_handle_debug_exception cpu_handle_debug_exception_mips64el -#define cpu_handle_guest_debug cpu_handle_guest_debug_mips64el -#define cpu_inb cpu_inb_mips64el -#define cpu_inl cpu_inl_mips64el -#define cpu_interrupt cpu_interrupt_mips64el -#define cpu_interrupt_handler cpu_interrupt_handler_mips64el -#define cpu_inw cpu_inw_mips64el -#define cpu_io_recompile cpu_io_recompile_mips64el -#define cpu_is_stopped cpu_is_stopped_mips64el -#define cpu_ldl_code cpu_ldl_code_mips64el -#define cpu_ldub_code cpu_ldub_code_mips64el -#define cpu_lduw_code cpu_lduw_code_mips64el -#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64el -#define cpu_mmu_index cpu_mmu_index_mips64el -#define cpu_outb cpu_outb_mips64el -#define cpu_outl cpu_outl_mips64el -#define cpu_outw cpu_outw_mips64el -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips64el -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mips64el -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips64el -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips64el -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips64el -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64el -#define cpu_physical_memory_map cpu_physical_memory_map_mips64el -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips64el -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips64el -#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64el -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips64el -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips64el -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64el -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips64el -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips64el -#define cpu_register cpu_register_mips64el -#define cpu_register_types cpu_register_types_mips64el -#define cpu_restore_state cpu_restore_state_mips64el -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mips64el -#define cpu_single_step cpu_single_step_mips64el -#define cpu_tb_exec cpu_tb_exec_mips64el -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mips64el -#define cpu_to_be64 cpu_to_be64_mips64el -#define cpu_to_le32 cpu_to_le32_mips64el -#define cpu_to_le64 cpu_to_le64_mips64el -#define cpu_type_info cpu_type_info_mips64el -#define cpu_unassigned_access cpu_unassigned_access_mips64el -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64el -#define cpu_watchpoint_insert cpu_watchpoint_insert_mips64el -#define cpu_watchpoint_remove cpu_watchpoint_remove_mips64el -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64el -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64el -#define crc32c_table crc32c_table_mips64el -#define create_new_memory_mapping create_new_memory_mapping_mips64el -#define csselr_write csselr_write_mips64el -#define cto32 cto32_mips64el -#define ctr_el0_access ctr_el0_access_mips64el -#define ctz32 ctz32_mips64el -#define ctz64 ctz64_mips64el -#define dacr_write dacr_write_mips64el -#define dbgbcr_write dbgbcr_write_mips64el -#define dbgbvr_write dbgbvr_write_mips64el -#define dbgwcr_write dbgwcr_write_mips64el -#define dbgwvr_write dbgwvr_write_mips64el -#define debug_cp_reginfo debug_cp_reginfo_mips64el -#define debug_frame debug_frame_mips64el -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mips64el -#define define_arm_cp_regs define_arm_cp_regs_mips64el -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mips64el -#define define_debug_regs define_debug_regs_mips64el -#define define_one_arm_cp_reg define_one_arm_cp_reg_mips64el -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mips64el -#define deposit32 deposit32_mips64el -#define deposit64 deposit64_mips64el -#define deregister_tm_clones deregister_tm_clones_mips64el -#define device_class_base_init device_class_base_init_mips64el -#define device_class_init device_class_init_mips64el -#define device_finalize device_finalize_mips64el -#define device_get_realized device_get_realized_mips64el -#define device_initfn device_initfn_mips64el -#define device_post_init device_post_init_mips64el -#define device_reset device_reset_mips64el -#define device_set_realized device_set_realized_mips64el -#define device_type_info device_type_info_mips64el -#define disas_arm_insn disas_arm_insn_mips64el -#define disas_coproc_insn disas_coproc_insn_mips64el -#define disas_dsp_insn disas_dsp_insn_mips64el -#define disas_iwmmxt_insn disas_iwmmxt_insn_mips64el -#define disas_neon_data_insn disas_neon_data_insn_mips64el -#define disas_neon_ls_insn disas_neon_ls_insn_mips64el -#define disas_thumb2_insn disas_thumb2_insn_mips64el -#define disas_thumb_insn disas_thumb_insn_mips64el -#define disas_vfp_insn disas_vfp_insn_mips64el -#define disas_vfp_v8_insn disas_vfp_v8_insn_mips64el -#define do_arm_semihosting do_arm_semihosting_mips64el -#define do_clz16 do_clz16_mips64el -#define do_clz8 do_clz8_mips64el -#define do_constant_folding do_constant_folding_mips64el -#define do_constant_folding_2 do_constant_folding_2_mips64el -#define do_constant_folding_cond do_constant_folding_cond_mips64el -#define do_constant_folding_cond2 do_constant_folding_cond2_mips64el -#define do_constant_folding_cond_32 do_constant_folding_cond_32_mips64el -#define do_constant_folding_cond_64 do_constant_folding_cond_64_mips64el -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mips64el -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mips64el -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mips64el -#define do_ssat do_ssat_mips64el -#define do_usad do_usad_mips64el -#define do_usat do_usat_mips64el -#define do_v7m_exception_exit do_v7m_exception_exit_mips64el -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mips64el -#define dummy_func dummy_func_mips64el -#define dummy_section dummy_section_mips64el -#define _DYNAMIC _DYNAMIC_mips64el -#define _edata _edata_mips64el -#define _end _end_mips64el -#define end_list end_list_mips64el -#define eq128 eq128_mips64el -#define ErrorClass_lookup ErrorClass_lookup_mips64el -#define error_copy error_copy_mips64el -#define error_exit error_exit_mips64el -#define error_get_class error_get_class_mips64el -#define error_get_pretty error_get_pretty_mips64el -#define error_setg_file_open error_setg_file_open_mips64el -#define estimateDiv128To64 estimateDiv128To64_mips64el -#define estimateSqrt32 estimateSqrt32_mips64el -#define excnames excnames_mips64el -#define excp_is_internal excp_is_internal_mips64el -#define extended_addresses_enabled extended_addresses_enabled_mips64el -#define extended_mpu_ap_bits extended_mpu_ap_bits_mips64el -#define extract32 extract32_mips64el -#define extract64 extract64_mips64el -#define extractFloat128Exp extractFloat128Exp_mips64el -#define extractFloat128Frac0 extractFloat128Frac0_mips64el -#define extractFloat128Frac1 extractFloat128Frac1_mips64el -#define extractFloat128Sign extractFloat128Sign_mips64el -#define extractFloat16Exp extractFloat16Exp_mips64el -#define extractFloat16Frac extractFloat16Frac_mips64el -#define extractFloat16Sign extractFloat16Sign_mips64el -#define extractFloat32Exp extractFloat32Exp_mips64el -#define extractFloat32Frac extractFloat32Frac_mips64el -#define extractFloat32Sign extractFloat32Sign_mips64el -#define extractFloat64Exp extractFloat64Exp_mips64el -#define extractFloat64Frac extractFloat64Frac_mips64el -#define extractFloat64Sign extractFloat64Sign_mips64el -#define extractFloatx80Exp extractFloatx80Exp_mips64el -#define extractFloatx80Frac extractFloatx80Frac_mips64el -#define extractFloatx80Sign extractFloatx80Sign_mips64el -#define fcse_write fcse_write_mips64el -#define find_better_copy find_better_copy_mips64el -#define find_default_machine find_default_machine_mips64el -#define find_desc_by_name find_desc_by_name_mips64el -#define find_first_bit find_first_bit_mips64el -#define find_paging_enabled_cpu find_paging_enabled_cpu_mips64el -#define find_ram_block find_ram_block_mips64el -#define find_ram_offset find_ram_offset_mips64el -#define find_string find_string_mips64el -#define find_type find_type_mips64el -#define _fini _fini_mips64el -#define flatrange_equal flatrange_equal_mips64el -#define flatview_destroy flatview_destroy_mips64el -#define flatview_init flatview_init_mips64el -#define flatview_insert flatview_insert_mips64el -#define flatview_lookup flatview_lookup_mips64el -#define flatview_ref flatview_ref_mips64el -#define flatview_simplify flatview_simplify_mips64el #define flatview_unref flatview_unref_mips64el -#define float128_add float128_add_mips64el -#define float128_compare float128_compare_mips64el -#define float128_compare_internal float128_compare_internal_mips64el -#define float128_compare_quiet float128_compare_quiet_mips64el -#define float128_default_nan float128_default_nan_mips64el -#define float128_div float128_div_mips64el -#define float128_eq float128_eq_mips64el -#define float128_eq_quiet float128_eq_quiet_mips64el -#define float128_is_quiet_nan float128_is_quiet_nan_mips64el -#define float128_is_signaling_nan float128_is_signaling_nan_mips64el -#define float128_le float128_le_mips64el -#define float128_le_quiet float128_le_quiet_mips64el -#define float128_lt float128_lt_mips64el -#define float128_lt_quiet float128_lt_quiet_mips64el -#define float128_maybe_silence_nan float128_maybe_silence_nan_mips64el -#define float128_mul float128_mul_mips64el -#define float128_rem float128_rem_mips64el -#define float128_round_to_int float128_round_to_int_mips64el -#define float128_scalbn float128_scalbn_mips64el -#define float128_sqrt float128_sqrt_mips64el -#define float128_sub float128_sub_mips64el -#define float128ToCommonNaN float128ToCommonNaN_mips64el -#define float128_to_float32 float128_to_float32_mips64el -#define float128_to_float64 float128_to_float64_mips64el -#define float128_to_floatx80 float128_to_floatx80_mips64el -#define float128_to_int32 float128_to_int32_mips64el -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64el -#define float128_to_int64 float128_to_int64_mips64el -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64el -#define float128_unordered float128_unordered_mips64el -#define float128_unordered_quiet float128_unordered_quiet_mips64el -#define float16_default_nan float16_default_nan_mips64el +#define address_space_get_flatview address_space_get_flatview_mips64el +#define memory_region_transaction_begin memory_region_transaction_begin_mips64el +#define memory_region_transaction_commit memory_region_transaction_commit_mips64el +#define memory_region_init memory_region_init_mips64el +#define memory_region_access_valid memory_region_access_valid_mips64el +#define memory_region_dispatch_read memory_region_dispatch_read_mips64el +#define memory_region_dispatch_write memory_region_dispatch_write_mips64el +#define memory_region_init_io memory_region_init_io_mips64el +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64el +#define memory_region_size memory_region_size_mips64el +#define memory_region_set_readonly memory_region_set_readonly_mips64el +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64el +#define memory_region_from_host memory_region_from_host_mips64el +#define memory_region_get_ram_addr memory_region_get_ram_addr_mips64el +#define memory_region_add_subregion memory_region_add_subregion_mips64el +#define memory_region_del_subregion memory_region_del_subregion_mips64el +#define memory_region_find memory_region_find_mips64el +#define memory_listener_register memory_listener_register_mips64el +#define memory_listener_unregister memory_listener_unregister_mips64el +#define address_space_remove_listeners address_space_remove_listeners_mips64el +#define address_space_init address_space_init_mips64el +#define address_space_destroy address_space_destroy_mips64el +#define memory_region_init_ram memory_region_init_ram_mips64el +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64el +#define exec_inline_op exec_inline_op_mips64el +#define floatx80_default_nan floatx80_default_nan_mips64el +#define float_raise float_raise_mips64el #define float16_is_quiet_nan float16_is_quiet_nan_mips64el #define float16_is_signaling_nan float16_is_signaling_nan_mips64el -#define float16_maybe_silence_nan float16_maybe_silence_nan_mips64el -#define float16ToCommonNaN float16ToCommonNaN_mips64el -#define float16_to_float32 float16_to_float32_mips64el -#define float16_to_float64 float16_to_float64_mips64el -#define float32_abs float32_abs_mips64el -#define float32_add float32_add_mips64el -#define float32_chs float32_chs_mips64el -#define float32_compare float32_compare_mips64el -#define float32_compare_internal float32_compare_internal_mips64el -#define float32_compare_quiet float32_compare_quiet_mips64el -#define float32_default_nan float32_default_nan_mips64el -#define float32_div float32_div_mips64el -#define float32_eq float32_eq_mips64el -#define float32_eq_quiet float32_eq_quiet_mips64el -#define float32_exp2 float32_exp2_mips64el -#define float32_exp2_coefficients float32_exp2_coefficients_mips64el -#define float32_is_any_nan float32_is_any_nan_mips64el -#define float32_is_infinity float32_is_infinity_mips64el -#define float32_is_neg float32_is_neg_mips64el #define float32_is_quiet_nan float32_is_quiet_nan_mips64el #define float32_is_signaling_nan float32_is_signaling_nan_mips64el -#define float32_is_zero float32_is_zero_mips64el -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mips64el -#define float32_le float32_le_mips64el -#define float32_le_quiet float32_le_quiet_mips64el -#define float32_log2 float32_log2_mips64el -#define float32_lt float32_lt_mips64el -#define float32_lt_quiet float32_lt_quiet_mips64el +#define float64_is_quiet_nan float64_is_quiet_nan_mips64el +#define float64_is_signaling_nan float64_is_signaling_nan_mips64el +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64el +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64el +#define floatx80_silence_nan floatx80_silence_nan_mips64el +#define propagateFloatx80NaN propagateFloatx80NaN_mips64el +#define float128_is_quiet_nan float128_is_quiet_nan_mips64el +#define float128_is_signaling_nan float128_is_signaling_nan_mips64el +#define float128_silence_nan float128_silence_nan_mips64el +#define float16_add float16_add_mips64el +#define float16_sub float16_sub_mips64el +#define float32_add float32_add_mips64el +#define float32_sub float32_sub_mips64el +#define float64_add float64_add_mips64el +#define float64_sub float64_sub_mips64el +#define float16_mul float16_mul_mips64el +#define float32_mul float32_mul_mips64el +#define float64_mul float64_mul_mips64el +#define float16_muladd float16_muladd_mips64el +#define float32_muladd float32_muladd_mips64el +#define float64_muladd float64_muladd_mips64el +#define float16_div float16_div_mips64el +#define float32_div float32_div_mips64el +#define float64_div float64_div_mips64el +#define float16_to_float32 float16_to_float32_mips64el +#define float16_to_float64 float16_to_float64_mips64el +#define float32_to_float16 float32_to_float16_mips64el +#define float32_to_float64 float32_to_float64_mips64el +#define float64_to_float16 float64_to_float16_mips64el +#define float64_to_float32 float64_to_float32_mips64el +#define float16_round_to_int float16_round_to_int_mips64el +#define float32_round_to_int float32_round_to_int_mips64el +#define float64_round_to_int float64_round_to_int_mips64el +#define float16_to_int16_scalbn float16_to_int16_scalbn_mips64el +#define float16_to_int32_scalbn float16_to_int32_scalbn_mips64el +#define float16_to_int64_scalbn float16_to_int64_scalbn_mips64el +#define float32_to_int16_scalbn float32_to_int16_scalbn_mips64el +#define float32_to_int32_scalbn float32_to_int32_scalbn_mips64el +#define float32_to_int64_scalbn float32_to_int64_scalbn_mips64el +#define float64_to_int16_scalbn float64_to_int16_scalbn_mips64el +#define float64_to_int32_scalbn float64_to_int32_scalbn_mips64el +#define float64_to_int64_scalbn float64_to_int64_scalbn_mips64el +#define float16_to_int16 float16_to_int16_mips64el +#define float16_to_int32 float16_to_int32_mips64el +#define float16_to_int64 float16_to_int64_mips64el +#define float32_to_int16 float32_to_int16_mips64el +#define float32_to_int32 float32_to_int32_mips64el +#define float32_to_int64 float32_to_int64_mips64el +#define float64_to_int16 float64_to_int16_mips64el +#define float64_to_int32 float64_to_int32_mips64el +#define float64_to_int64 float64_to_int64_mips64el +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mips64el +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mips64el +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mips64el +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64el +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64el +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64el +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64el +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64el +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64el +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_mips64el +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_mips64el +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_mips64el +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_mips64el +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_mips64el +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_mips64el +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_mips64el +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_mips64el +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_mips64el +#define float16_to_uint16 float16_to_uint16_mips64el +#define float16_to_uint32 float16_to_uint32_mips64el +#define float16_to_uint64 float16_to_uint64_mips64el +#define float32_to_uint16 float32_to_uint16_mips64el +#define float32_to_uint32 float32_to_uint32_mips64el +#define float32_to_uint64 float32_to_uint64_mips64el +#define float64_to_uint16 float64_to_uint16_mips64el +#define float64_to_uint32 float64_to_uint32_mips64el +#define float64_to_uint64 float64_to_uint64_mips64el +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mips64el +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mips64el +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mips64el +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64el +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64el +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64el +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64el +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64el +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64el +#define int64_to_float16_scalbn int64_to_float16_scalbn_mips64el +#define int32_to_float16_scalbn int32_to_float16_scalbn_mips64el +#define int16_to_float16_scalbn int16_to_float16_scalbn_mips64el +#define int64_to_float16 int64_to_float16_mips64el +#define int32_to_float16 int32_to_float16_mips64el +#define int16_to_float16 int16_to_float16_mips64el +#define int64_to_float32_scalbn int64_to_float32_scalbn_mips64el +#define int32_to_float32_scalbn int32_to_float32_scalbn_mips64el +#define int16_to_float32_scalbn int16_to_float32_scalbn_mips64el +#define int64_to_float32 int64_to_float32_mips64el +#define int32_to_float32 int32_to_float32_mips64el +#define int16_to_float32 int16_to_float32_mips64el +#define int64_to_float64_scalbn int64_to_float64_scalbn_mips64el +#define int32_to_float64_scalbn int32_to_float64_scalbn_mips64el +#define int16_to_float64_scalbn int16_to_float64_scalbn_mips64el +#define int64_to_float64 int64_to_float64_mips64el +#define int32_to_float64 int32_to_float64_mips64el +#define int16_to_float64 int16_to_float64_mips64el +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_mips64el +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_mips64el +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_mips64el +#define uint64_to_float16 uint64_to_float16_mips64el +#define uint32_to_float16 uint32_to_float16_mips64el +#define uint16_to_float16 uint16_to_float16_mips64el +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_mips64el +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_mips64el +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_mips64el +#define uint64_to_float32 uint64_to_float32_mips64el +#define uint32_to_float32 uint32_to_float32_mips64el +#define uint16_to_float32 uint16_to_float32_mips64el +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_mips64el +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_mips64el +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_mips64el +#define uint64_to_float64 uint64_to_float64_mips64el +#define uint32_to_float64 uint32_to_float64_mips64el +#define uint16_to_float64 uint16_to_float64_mips64el +#define float16_min float16_min_mips64el +#define float16_minnum float16_minnum_mips64el +#define float16_minnummag float16_minnummag_mips64el +#define float16_max float16_max_mips64el +#define float16_maxnum float16_maxnum_mips64el +#define float16_maxnummag float16_maxnummag_mips64el +#define float32_min float32_min_mips64el +#define float32_minnum float32_minnum_mips64el +#define float32_minnummag float32_minnummag_mips64el #define float32_max float32_max_mips64el #define float32_maxnum float32_maxnum_mips64el #define float32_maxnummag float32_maxnummag_mips64el -#define float32_maybe_silence_nan float32_maybe_silence_nan_mips64el -#define float32_min float32_min_mips64el -#define float32_minmax float32_minmax_mips64el -#define float32_minnum float32_minnum_mips64el -#define float32_minnummag float32_minnummag_mips64el -#define float32_mul float32_mul_mips64el -#define float32_muladd float32_muladd_mips64el -#define float32_rem float32_rem_mips64el -#define float32_round_to_int float32_round_to_int_mips64el -#define float32_scalbn float32_scalbn_mips64el -#define float32_set_sign float32_set_sign_mips64el -#define float32_sqrt float32_sqrt_mips64el -#define float32_squash_input_denormal float32_squash_input_denormal_mips64el -#define float32_sub float32_sub_mips64el -#define float32ToCommonNaN float32ToCommonNaN_mips64el -#define float32_to_float128 float32_to_float128_mips64el -#define float32_to_float16 float32_to_float16_mips64el -#define float32_to_float64 float32_to_float64_mips64el -#define float32_to_floatx80 float32_to_floatx80_mips64el -#define float32_to_int16 float32_to_int16_mips64el -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64el -#define float32_to_int32 float32_to_int32_mips64el -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64el -#define float32_to_int64 float32_to_int64_mips64el -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64el -#define float32_to_uint16 float32_to_uint16_mips64el -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64el -#define float32_to_uint32 float32_to_uint32_mips64el -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64el -#define float32_to_uint64 float32_to_uint64_mips64el -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64el -#define float32_unordered float32_unordered_mips64el -#define float32_unordered_quiet float32_unordered_quiet_mips64el -#define float64_abs float64_abs_mips64el -#define float64_add float64_add_mips64el -#define float64_chs float64_chs_mips64el -#define float64_compare float64_compare_mips64el -#define float64_compare_internal float64_compare_internal_mips64el -#define float64_compare_quiet float64_compare_quiet_mips64el -#define float64_default_nan float64_default_nan_mips64el -#define float64_div float64_div_mips64el -#define float64_eq float64_eq_mips64el -#define float64_eq_quiet float64_eq_quiet_mips64el -#define float64_is_any_nan float64_is_any_nan_mips64el -#define float64_is_infinity float64_is_infinity_mips64el -#define float64_is_neg float64_is_neg_mips64el -#define float64_is_quiet_nan float64_is_quiet_nan_mips64el -#define float64_is_signaling_nan float64_is_signaling_nan_mips64el -#define float64_is_zero float64_is_zero_mips64el -#define float64_le float64_le_mips64el -#define float64_le_quiet float64_le_quiet_mips64el -#define float64_log2 float64_log2_mips64el -#define float64_lt float64_lt_mips64el -#define float64_lt_quiet float64_lt_quiet_mips64el +#define float64_min float64_min_mips64el +#define float64_minnum float64_minnum_mips64el +#define float64_minnummag float64_minnummag_mips64el #define float64_max float64_max_mips64el #define float64_maxnum float64_maxnum_mips64el #define float64_maxnummag float64_maxnummag_mips64el -#define float64_maybe_silence_nan float64_maybe_silence_nan_mips64el -#define float64_min float64_min_mips64el -#define float64_minmax float64_minmax_mips64el -#define float64_minnum float64_minnum_mips64el -#define float64_minnummag float64_minnummag_mips64el -#define float64_mul float64_mul_mips64el -#define float64_muladd float64_muladd_mips64el -#define float64_rem float64_rem_mips64el -#define float64_round_to_int float64_round_to_int_mips64el +#define float16_compare float16_compare_mips64el +#define float16_compare_quiet float16_compare_quiet_mips64el +#define float32_compare float32_compare_mips64el +#define float32_compare_quiet float32_compare_quiet_mips64el +#define float64_compare float64_compare_mips64el +#define float64_compare_quiet float64_compare_quiet_mips64el +#define float16_scalbn float16_scalbn_mips64el +#define float32_scalbn float32_scalbn_mips64el #define float64_scalbn float64_scalbn_mips64el -#define float64_set_sign float64_set_sign_mips64el +#define float16_sqrt float16_sqrt_mips64el +#define float32_sqrt float32_sqrt_mips64el #define float64_sqrt float64_sqrt_mips64el +#define float16_default_nan float16_default_nan_mips64el +#define float32_default_nan float32_default_nan_mips64el +#define float64_default_nan float64_default_nan_mips64el +#define float128_default_nan float128_default_nan_mips64el +#define float16_silence_nan float16_silence_nan_mips64el +#define float32_silence_nan float32_silence_nan_mips64el +#define float64_silence_nan float64_silence_nan_mips64el +#define float16_squash_input_denormal float16_squash_input_denormal_mips64el +#define float32_squash_input_denormal float32_squash_input_denormal_mips64el #define float64_squash_input_denormal float64_squash_input_denormal_mips64el -#define float64_sub float64_sub_mips64el -#define float64ToCommonNaN float64ToCommonNaN_mips64el -#define float64_to_float128 float64_to_float128_mips64el -#define float64_to_float16 float64_to_float16_mips64el -#define float64_to_float32 float64_to_float32_mips64el +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64el +#define roundAndPackFloatx80 roundAndPackFloatx80_mips64el +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64el +#define int32_to_floatx80 int32_to_floatx80_mips64el +#define int32_to_float128 int32_to_float128_mips64el +#define int64_to_floatx80 int64_to_floatx80_mips64el +#define int64_to_float128 int64_to_float128_mips64el +#define uint64_to_float128 uint64_to_float128_mips64el +#define float32_to_floatx80 float32_to_floatx80_mips64el +#define float32_to_float128 float32_to_float128_mips64el +#define float32_rem float32_rem_mips64el +#define float32_exp2 float32_exp2_mips64el +#define float32_log2 float32_log2_mips64el +#define float32_eq float32_eq_mips64el +#define float32_le float32_le_mips64el +#define float32_lt float32_lt_mips64el +#define float32_unordered float32_unordered_mips64el +#define float32_eq_quiet float32_eq_quiet_mips64el +#define float32_le_quiet float32_le_quiet_mips64el +#define float32_lt_quiet float32_lt_quiet_mips64el +#define float32_unordered_quiet float32_unordered_quiet_mips64el #define float64_to_floatx80 float64_to_floatx80_mips64el -#define float64_to_int16 float64_to_int16_mips64el -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64el -#define float64_to_int32 float64_to_int32_mips64el -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64el -#define float64_to_int64 float64_to_int64_mips64el -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64el -#define float64_to_uint16 float64_to_uint16_mips64el -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64el -#define float64_to_uint32 float64_to_uint32_mips64el -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64el -#define float64_to_uint64 float64_to_uint64_mips64el -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64el -#define float64_trunc_to_int float64_trunc_to_int_mips64el +#define float64_to_float128 float64_to_float128_mips64el +#define float64_rem float64_rem_mips64el +#define float64_log2 float64_log2_mips64el +#define float64_eq float64_eq_mips64el +#define float64_le float64_le_mips64el +#define float64_lt float64_lt_mips64el #define float64_unordered float64_unordered_mips64el +#define float64_eq_quiet float64_eq_quiet_mips64el +#define float64_le_quiet float64_le_quiet_mips64el +#define float64_lt_quiet float64_lt_quiet_mips64el #define float64_unordered_quiet float64_unordered_quiet_mips64el -#define float_raise float_raise_mips64el -#define floatx80_add floatx80_add_mips64el -#define floatx80_compare floatx80_compare_mips64el -#define floatx80_compare_internal floatx80_compare_internal_mips64el -#define floatx80_compare_quiet floatx80_compare_quiet_mips64el -#define floatx80_default_nan floatx80_default_nan_mips64el -#define floatx80_div floatx80_div_mips64el -#define floatx80_eq floatx80_eq_mips64el -#define floatx80_eq_quiet floatx80_eq_quiet_mips64el -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64el -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64el -#define floatx80_le floatx80_le_mips64el -#define floatx80_le_quiet floatx80_le_quiet_mips64el -#define floatx80_lt floatx80_lt_mips64el -#define floatx80_lt_quiet floatx80_lt_quiet_mips64el -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mips64el -#define floatx80_mul floatx80_mul_mips64el -#define floatx80_rem floatx80_rem_mips64el -#define floatx80_round_to_int floatx80_round_to_int_mips64el -#define floatx80_scalbn floatx80_scalbn_mips64el -#define floatx80_sqrt floatx80_sqrt_mips64el -#define floatx80_sub floatx80_sub_mips64el -#define floatx80ToCommonNaN floatx80ToCommonNaN_mips64el -#define floatx80_to_float128 floatx80_to_float128_mips64el -#define floatx80_to_float32 floatx80_to_float32_mips64el -#define floatx80_to_float64 floatx80_to_float64_mips64el #define floatx80_to_int32 floatx80_to_int32_mips64el #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips64el #define floatx80_to_int64 floatx80_to_int64_mips64el #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips64el +#define floatx80_to_float32 floatx80_to_float32_mips64el +#define floatx80_to_float64 floatx80_to_float64_mips64el +#define floatx80_to_float128 floatx80_to_float128_mips64el +#define floatx80_round floatx80_round_mips64el +#define floatx80_round_to_int floatx80_round_to_int_mips64el +#define floatx80_add floatx80_add_mips64el +#define floatx80_sub floatx80_sub_mips64el +#define floatx80_mul floatx80_mul_mips64el +#define floatx80_div floatx80_div_mips64el +#define floatx80_rem floatx80_rem_mips64el +#define floatx80_sqrt floatx80_sqrt_mips64el +#define floatx80_eq floatx80_eq_mips64el +#define floatx80_le floatx80_le_mips64el +#define floatx80_lt floatx80_lt_mips64el #define floatx80_unordered floatx80_unordered_mips64el +#define floatx80_eq_quiet floatx80_eq_quiet_mips64el +#define floatx80_le_quiet floatx80_le_quiet_mips64el +#define floatx80_lt_quiet floatx80_lt_quiet_mips64el #define floatx80_unordered_quiet floatx80_unordered_quiet_mips64el -#define flush_icache_range flush_icache_range_mips64el -#define format_string format_string_mips64el -#define fp_decode_rm fp_decode_rm_mips64el -#define frame_dummy frame_dummy_mips64el -#define free_range free_range_mips64el -#define fstat64 fstat64_mips64el -#define futex_wait futex_wait_mips64el -#define futex_wake futex_wake_mips64el -#define gen_aa32_ld16s gen_aa32_ld16s_mips64el -#define gen_aa32_ld16u gen_aa32_ld16u_mips64el -#define gen_aa32_ld32u gen_aa32_ld32u_mips64el -#define gen_aa32_ld64 gen_aa32_ld64_mips64el -#define gen_aa32_ld8s gen_aa32_ld8s_mips64el -#define gen_aa32_ld8u gen_aa32_ld8u_mips64el -#define gen_aa32_st16 gen_aa32_st16_mips64el -#define gen_aa32_st32 gen_aa32_st32_mips64el -#define gen_aa32_st64 gen_aa32_st64_mips64el -#define gen_aa32_st8 gen_aa32_st8_mips64el -#define gen_adc gen_adc_mips64el -#define gen_adc_CC gen_adc_CC_mips64el -#define gen_add16 gen_add16_mips64el -#define gen_add_carry gen_add_carry_mips64el -#define gen_add_CC gen_add_CC_mips64el -#define gen_add_datah_offset gen_add_datah_offset_mips64el -#define gen_add_data_offset gen_add_data_offset_mips64el -#define gen_addq gen_addq_mips64el -#define gen_addq_lo gen_addq_lo_mips64el -#define gen_addq_msw gen_addq_msw_mips64el -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mips64el -#define gen_arm_shift_im gen_arm_shift_im_mips64el -#define gen_arm_shift_reg gen_arm_shift_reg_mips64el -#define gen_bx gen_bx_mips64el -#define gen_bx_im gen_bx_im_mips64el -#define gen_clrex gen_clrex_mips64el -#define generate_memory_topology generate_memory_topology_mips64el -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mips64el -#define gen_exception gen_exception_mips64el -#define gen_exception_insn gen_exception_insn_mips64el -#define gen_exception_internal gen_exception_internal_mips64el -#define gen_exception_internal_insn gen_exception_internal_insn_mips64el -#define gen_exception_return gen_exception_return_mips64el -#define gen_goto_tb gen_goto_tb_mips64el -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mips64el -#define gen_helper_add_saturate gen_helper_add_saturate_mips64el -#define gen_helper_add_setq gen_helper_add_setq_mips64el -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mips64el -#define gen_helper_clz32 gen_helper_clz32_mips64el -#define gen_helper_clz64 gen_helper_clz64_mips64el -#define gen_helper_clz_arm gen_helper_clz_arm_mips64el -#define gen_helper_cpsr_read gen_helper_cpsr_read_mips64el -#define gen_helper_cpsr_write gen_helper_cpsr_write_mips64el -#define gen_helper_crc32_arm gen_helper_crc32_arm_mips64el -#define gen_helper_crc32c gen_helper_crc32c_mips64el -#define gen_helper_crypto_aese gen_helper_crypto_aese_mips64el -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mips64el -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mips64el -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mips64el -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mips64el -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mips64el -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mips64el -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mips64el -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mips64el -#define gen_helper_double_saturate gen_helper_double_saturate_mips64el -#define gen_helper_exception_internal gen_helper_exception_internal_mips64el -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mips64el -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mips64el -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mips64el -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mips64el -#define gen_helper_get_user_reg gen_helper_get_user_reg_mips64el -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mips64el -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mips64el -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mips64el -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mips64el -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mips64el -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mips64el -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mips64el -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mips64el -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mips64el -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mips64el -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mips64el -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mips64el -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mips64el -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mips64el -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mips64el -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mips64el -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mips64el -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mips64el -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mips64el -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mips64el -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mips64el -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mips64el -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mips64el -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mips64el -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mips64el -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mips64el -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mips64el -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mips64el -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mips64el -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mips64el -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mips64el -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mips64el -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mips64el -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mips64el -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mips64el -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mips64el -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mips64el -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mips64el -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mips64el -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mips64el -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mips64el -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mips64el -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mips64el -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mips64el -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mips64el -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mips64el -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mips64el -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mips64el -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mips64el -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mips64el -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mips64el -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mips64el -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mips64el -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mips64el -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mips64el -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mips64el -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mips64el -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mips64el -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mips64el -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mips64el -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mips64el -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mips64el -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mips64el -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mips64el -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mips64el -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mips64el -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mips64el -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mips64el -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mips64el -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mips64el -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mips64el -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mips64el -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mips64el -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mips64el -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mips64el -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mips64el -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mips64el -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mips64el -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mips64el -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mips64el -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mips64el -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mips64el -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mips64el -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mips64el -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mips64el -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mips64el -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mips64el -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mips64el -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mips64el -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mips64el -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mips64el -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mips64el -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mips64el -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mips64el -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mips64el -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mips64el -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mips64el -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mips64el -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mips64el -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mips64el -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mips64el -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mips64el -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mips64el -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mips64el -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mips64el -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mips64el -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mips64el -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mips64el -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mips64el -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mips64el -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mips64el -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mips64el -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mips64el -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mips64el -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mips64el -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mips64el -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mips64el -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mips64el -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mips64el -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mips64el -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mips64el -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mips64el -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mips64el -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mips64el -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mips64el -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mips64el -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mips64el -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mips64el -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mips64el -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mips64el -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mips64el -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mips64el -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mips64el -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mips64el -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mips64el -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mips64el -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mips64el -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mips64el -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mips64el -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mips64el -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mips64el -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mips64el -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mips64el -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mips64el -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mips64el -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mips64el -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mips64el -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mips64el -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mips64el -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mips64el -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mips64el -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mips64el -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mips64el -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mips64el -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mips64el -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mips64el -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mips64el -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mips64el -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mips64el -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mips64el -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mips64el -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mips64el -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mips64el -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mips64el -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mips64el -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mips64el -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mips64el -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mips64el -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mips64el -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mips64el -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mips64el -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mips64el -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mips64el -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mips64el -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mips64el -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mips64el -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mips64el -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mips64el -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mips64el -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mips64el -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mips64el -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mips64el -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mips64el -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mips64el -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mips64el -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mips64el -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mips64el -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mips64el -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mips64el -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mips64el -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mips64el -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mips64el -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mips64el -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mips64el -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mips64el -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mips64el -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mips64el -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mips64el -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mips64el -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mips64el -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mips64el -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mips64el -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mips64el -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mips64el -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mips64el -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mips64el -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mips64el -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mips64el -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mips64el -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mips64el -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mips64el -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mips64el -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mips64el -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mips64el -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mips64el -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mips64el -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mips64el -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mips64el -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mips64el -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mips64el -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mips64el -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mips64el -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mips64el -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mips64el -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mips64el -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mips64el -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mips64el -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mips64el -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mips64el -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mips64el -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mips64el -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mips64el -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mips64el -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mips64el -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mips64el -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mips64el -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mips64el -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mips64el -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mips64el -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mips64el -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mips64el -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mips64el -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mips64el -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mips64el -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mips64el -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mips64el -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mips64el -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mips64el -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mips64el -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mips64el -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mips64el -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mips64el -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mips64el -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mips64el -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mips64el -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mips64el -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mips64el -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mips64el -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mips64el -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mips64el -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mips64el -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mips64el -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mips64el -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mips64el -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mips64el -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mips64el -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mips64el -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mips64el -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mips64el -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mips64el -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mips64el -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mips64el -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mips64el -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mips64el -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mips64el -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mips64el -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mips64el -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mips64el -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mips64el -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mips64el -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mips64el -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mips64el -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mips64el -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mips64el -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mips64el -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mips64el -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mips64el -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mips64el -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mips64el -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mips64el -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mips64el -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mips64el -#define gen_helper_neon_tbl gen_helper_neon_tbl_mips64el -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mips64el -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mips64el -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mips64el -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mips64el -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mips64el -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mips64el -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mips64el -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mips64el -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mips64el -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mips64el -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mips64el -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mips64el -#define gen_helper_neon_zip16 gen_helper_neon_zip16_mips64el -#define gen_helper_neon_zip8 gen_helper_neon_zip8_mips64el -#define gen_helper_pre_hvc gen_helper_pre_hvc_mips64el -#define gen_helper_pre_smc gen_helper_pre_smc_mips64el -#define gen_helper_qadd16 gen_helper_qadd16_mips64el -#define gen_helper_qadd8 gen_helper_qadd8_mips64el -#define gen_helper_qaddsubx gen_helper_qaddsubx_mips64el -#define gen_helper_qsub16 gen_helper_qsub16_mips64el -#define gen_helper_qsub8 gen_helper_qsub8_mips64el -#define gen_helper_qsubaddx gen_helper_qsubaddx_mips64el -#define gen_helper_rbit gen_helper_rbit_mips64el -#define gen_helper_recpe_f32 gen_helper_recpe_f32_mips64el -#define gen_helper_recpe_u32 gen_helper_recpe_u32_mips64el -#define gen_helper_recps_f32 gen_helper_recps_f32_mips64el -#define gen_helper_rintd gen_helper_rintd_mips64el -#define gen_helper_rintd_exact gen_helper_rintd_exact_mips64el -#define gen_helper_rints gen_helper_rints_mips64el -#define gen_helper_rints_exact gen_helper_rints_exact_mips64el -#define gen_helper_ror_cc gen_helper_ror_cc_mips64el -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mips64el -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mips64el -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mips64el -#define gen_helper_sadd16 gen_helper_sadd16_mips64el -#define gen_helper_sadd8 gen_helper_sadd8_mips64el -#define gen_helper_saddsubx gen_helper_saddsubx_mips64el -#define gen_helper_sar_cc gen_helper_sar_cc_mips64el -#define gen_helper_sdiv gen_helper_sdiv_mips64el -#define gen_helper_sel_flags gen_helper_sel_flags_mips64el -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mips64el -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mips64el -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mips64el -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mips64el -#define gen_helper_set_rmode gen_helper_set_rmode_mips64el -#define gen_helper_set_user_reg gen_helper_set_user_reg_mips64el -#define gen_helper_shadd16 gen_helper_shadd16_mips64el -#define gen_helper_shadd8 gen_helper_shadd8_mips64el -#define gen_helper_shaddsubx gen_helper_shaddsubx_mips64el -#define gen_helper_shl_cc gen_helper_shl_cc_mips64el -#define gen_helper_shr_cc gen_helper_shr_cc_mips64el -#define gen_helper_shsub16 gen_helper_shsub16_mips64el -#define gen_helper_shsub8 gen_helper_shsub8_mips64el -#define gen_helper_shsubaddx gen_helper_shsubaddx_mips64el -#define gen_helper_ssat gen_helper_ssat_mips64el -#define gen_helper_ssat16 gen_helper_ssat16_mips64el -#define gen_helper_ssub16 gen_helper_ssub16_mips64el -#define gen_helper_ssub8 gen_helper_ssub8_mips64el -#define gen_helper_ssubaddx gen_helper_ssubaddx_mips64el -#define gen_helper_sub_saturate gen_helper_sub_saturate_mips64el -#define gen_helper_sxtb16 gen_helper_sxtb16_mips64el -#define gen_helper_uadd16 gen_helper_uadd16_mips64el -#define gen_helper_uadd8 gen_helper_uadd8_mips64el -#define gen_helper_uaddsubx gen_helper_uaddsubx_mips64el -#define gen_helper_udiv gen_helper_udiv_mips64el -#define gen_helper_uhadd16 gen_helper_uhadd16_mips64el -#define gen_helper_uhadd8 gen_helper_uhadd8_mips64el -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mips64el -#define gen_helper_uhsub16 gen_helper_uhsub16_mips64el -#define gen_helper_uhsub8 gen_helper_uhsub8_mips64el -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mips64el -#define gen_helper_uqadd16 gen_helper_uqadd16_mips64el -#define gen_helper_uqadd8 gen_helper_uqadd8_mips64el -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mips64el -#define gen_helper_uqsub16 gen_helper_uqsub16_mips64el -#define gen_helper_uqsub8 gen_helper_uqsub8_mips64el -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mips64el -#define gen_helper_usad8 gen_helper_usad8_mips64el -#define gen_helper_usat gen_helper_usat_mips64el -#define gen_helper_usat16 gen_helper_usat16_mips64el -#define gen_helper_usub16 gen_helper_usub16_mips64el -#define gen_helper_usub8 gen_helper_usub8_mips64el -#define gen_helper_usubaddx gen_helper_usubaddx_mips64el -#define gen_helper_uxtb16 gen_helper_uxtb16_mips64el -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mips64el -#define gen_helper_v7m_msr gen_helper_v7m_msr_mips64el -#define gen_helper_vfp_absd gen_helper_vfp_absd_mips64el -#define gen_helper_vfp_abss gen_helper_vfp_abss_mips64el -#define gen_helper_vfp_addd gen_helper_vfp_addd_mips64el -#define gen_helper_vfp_adds gen_helper_vfp_adds_mips64el -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mips64el -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mips64el -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mips64el -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mips64el -#define gen_helper_vfp_divd gen_helper_vfp_divd_mips64el -#define gen_helper_vfp_divs gen_helper_vfp_divs_mips64el -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mips64el -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mips64el -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mips64el -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mips64el -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mips64el -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mips64el -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64el -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mips64el -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mips64el -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mips64el -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mips64el -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mips64el -#define gen_helper_vfp_mins gen_helper_vfp_mins_mips64el -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mips64el -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mips64el -#define gen_helper_vfp_muld gen_helper_vfp_muld_mips64el -#define gen_helper_vfp_muls gen_helper_vfp_muls_mips64el -#define gen_helper_vfp_negd gen_helper_vfp_negd_mips64el -#define gen_helper_vfp_negs gen_helper_vfp_negs_mips64el -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64el -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mips64el -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mips64el -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mips64el -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mips64el -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mips64el -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mips64el -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mips64el -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mips64el -#define gen_helper_vfp_subd gen_helper_vfp_subd_mips64el -#define gen_helper_vfp_subs gen_helper_vfp_subs_mips64el -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mips64el -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mips64el -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mips64el -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mips64el -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mips64el -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mips64el -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mips64el -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mips64el -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mips64el -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mips64el -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mips64el -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mips64el -#define gen_helper_vfp_touid gen_helper_vfp_touid_mips64el -#define gen_helper_vfp_touis gen_helper_vfp_touis_mips64el -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mips64el -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mips64el -#define gen_helper_vfp_tould gen_helper_vfp_tould_mips64el -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mips64el -#define gen_helper_vfp_touls gen_helper_vfp_touls_mips64el -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mips64el -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mips64el -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mips64el -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mips64el -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mips64el -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mips64el -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mips64el -#define gen_helper_wfe gen_helper_wfe_mips64el -#define gen_helper_wfi gen_helper_wfi_mips64el -#define gen_hvc gen_hvc_mips64el -#define gen_intermediate_code_internal gen_intermediate_code_internal_mips64el -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mips64el -#define gen_iwmmxt_address gen_iwmmxt_address_mips64el -#define gen_iwmmxt_shift gen_iwmmxt_shift_mips64el -#define gen_jmp gen_jmp_mips64el -#define gen_load_and_replicate gen_load_and_replicate_mips64el -#define gen_load_exclusive gen_load_exclusive_mips64el -#define gen_logic_CC gen_logic_CC_mips64el -#define gen_logicq_cc gen_logicq_cc_mips64el -#define gen_lookup_tb gen_lookup_tb_mips64el -#define gen_mov_F0_vreg gen_mov_F0_vreg_mips64el -#define gen_mov_F1_vreg gen_mov_F1_vreg_mips64el -#define gen_mov_vreg_F0 gen_mov_vreg_F0_mips64el -#define gen_muls_i64_i32 gen_muls_i64_i32_mips64el -#define gen_mulu_i64_i32 gen_mulu_i64_i32_mips64el -#define gen_mulxy gen_mulxy_mips64el -#define gen_neon_add gen_neon_add_mips64el -#define gen_neon_addl gen_neon_addl_mips64el -#define gen_neon_addl_saturate gen_neon_addl_saturate_mips64el -#define gen_neon_bsl gen_neon_bsl_mips64el -#define gen_neon_dup_high16 gen_neon_dup_high16_mips64el -#define gen_neon_dup_low16 gen_neon_dup_low16_mips64el -#define gen_neon_dup_u8 gen_neon_dup_u8_mips64el -#define gen_neon_mull gen_neon_mull_mips64el -#define gen_neon_narrow gen_neon_narrow_mips64el -#define gen_neon_narrow_op gen_neon_narrow_op_mips64el -#define gen_neon_narrow_sats gen_neon_narrow_sats_mips64el -#define gen_neon_narrow_satu gen_neon_narrow_satu_mips64el -#define gen_neon_negl gen_neon_negl_mips64el -#define gen_neon_rsb gen_neon_rsb_mips64el -#define gen_neon_shift_narrow gen_neon_shift_narrow_mips64el -#define gen_neon_subl gen_neon_subl_mips64el -#define gen_neon_trn_u16 gen_neon_trn_u16_mips64el -#define gen_neon_trn_u8 gen_neon_trn_u8_mips64el -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mips64el -#define gen_neon_unzip gen_neon_unzip_mips64el -#define gen_neon_widen gen_neon_widen_mips64el -#define gen_neon_zip gen_neon_zip_mips64el +#define float128_to_int32 float128_to_int32_mips64el +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64el +#define float128_to_int64 float128_to_int64_mips64el +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64el +#define float128_to_uint64 float128_to_uint64_mips64el +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mips64el +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mips64el +#define float128_to_uint32 float128_to_uint32_mips64el +#define float128_to_float32 float128_to_float32_mips64el +#define float128_to_float64 float128_to_float64_mips64el +#define float128_to_floatx80 float128_to_floatx80_mips64el +#define float128_round_to_int float128_round_to_int_mips64el +#define float128_add float128_add_mips64el +#define float128_sub float128_sub_mips64el +#define float128_mul float128_mul_mips64el +#define float128_div float128_div_mips64el +#define float128_rem float128_rem_mips64el +#define float128_sqrt float128_sqrt_mips64el +#define float128_eq float128_eq_mips64el +#define float128_le float128_le_mips64el +#define float128_lt float128_lt_mips64el +#define float128_unordered float128_unordered_mips64el +#define float128_eq_quiet float128_eq_quiet_mips64el +#define float128_le_quiet float128_le_quiet_mips64el +#define float128_lt_quiet float128_lt_quiet_mips64el +#define float128_unordered_quiet float128_unordered_quiet_mips64el +#define floatx80_compare floatx80_compare_mips64el +#define floatx80_compare_quiet floatx80_compare_quiet_mips64el +#define float128_compare float128_compare_mips64el +#define float128_compare_quiet float128_compare_quiet_mips64el +#define floatx80_scalbn floatx80_scalbn_mips64el +#define float128_scalbn float128_scalbn_mips64el +#define softfloat_init softfloat_init_mips64el +#define tcg_optimize tcg_optimize_mips64el #define gen_new_label gen_new_label_mips64el -#define gen_nop_hint gen_nop_hint_mips64el -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mips64el -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mips64el -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mips64el -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mips64el -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mips64el -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mips64el -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mips64el -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mips64el -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mips64el -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mips64el -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mips64el -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mips64el -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mips64el -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mips64el -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mips64el -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mips64el -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mips64el -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mips64el -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mips64el -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mips64el -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mips64el -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mips64el -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mips64el -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mips64el -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mips64el -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mips64el -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mips64el -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mips64el -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mips64el -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mips64el -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mips64el -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mips64el -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mips64el -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mips64el -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mips64el -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mips64el -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mips64el -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mips64el -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mips64el -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mips64el -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mips64el -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mips64el -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mips64el -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mips64el -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mips64el -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mips64el -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mips64el -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mips64el -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mips64el -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mips64el -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mips64el -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mips64el -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mips64el -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mips64el -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mips64el -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mips64el -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mips64el -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mips64el -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mips64el -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mips64el -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mips64el -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mips64el -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mips64el -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mips64el -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mips64el -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mips64el -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mips64el -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mips64el -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mips64el -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mips64el -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mips64el -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mips64el -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mips64el -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mips64el -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mips64el -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mips64el -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mips64el -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mips64el -#define gen_rev16 gen_rev16_mips64el -#define gen_revsh gen_revsh_mips64el -#define gen_rfe gen_rfe_mips64el -#define gen_sar gen_sar_mips64el -#define gen_sbc_CC gen_sbc_CC_mips64el -#define gen_sbfx gen_sbfx_mips64el -#define gen_set_CF_bit31 gen_set_CF_bit31_mips64el -#define gen_set_condexec gen_set_condexec_mips64el -#define gen_set_cpsr gen_set_cpsr_mips64el -#define gen_set_label gen_set_label_mips64el -#define gen_set_pc_im gen_set_pc_im_mips64el -#define gen_set_psr gen_set_psr_mips64el -#define gen_set_psr_im gen_set_psr_im_mips64el -#define gen_shl gen_shl_mips64el -#define gen_shr gen_shr_mips64el -#define gen_smc gen_smc_mips64el -#define gen_smul_dual gen_smul_dual_mips64el -#define gen_srs gen_srs_mips64el -#define gen_ss_advance gen_ss_advance_mips64el -#define gen_step_complete_exception gen_step_complete_exception_mips64el -#define gen_store_exclusive gen_store_exclusive_mips64el -#define gen_storeq_reg gen_storeq_reg_mips64el -#define gen_sub_carry gen_sub_carry_mips64el -#define gen_sub_CC gen_sub_CC_mips64el -#define gen_subq_msw gen_subq_msw_mips64el -#define gen_swap_half gen_swap_half_mips64el -#define gen_thumb2_data_op gen_thumb2_data_op_mips64el -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mips64el -#define gen_ubfx gen_ubfx_mips64el -#define gen_vfp_abs gen_vfp_abs_mips64el -#define gen_vfp_add gen_vfp_add_mips64el -#define gen_vfp_cmp gen_vfp_cmp_mips64el -#define gen_vfp_cmpe gen_vfp_cmpe_mips64el -#define gen_vfp_div gen_vfp_div_mips64el -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mips64el -#define gen_vfp_F1_mul gen_vfp_F1_mul_mips64el -#define gen_vfp_F1_neg gen_vfp_F1_neg_mips64el -#define gen_vfp_ld gen_vfp_ld_mips64el -#define gen_vfp_mrs gen_vfp_mrs_mips64el -#define gen_vfp_msr gen_vfp_msr_mips64el -#define gen_vfp_mul gen_vfp_mul_mips64el -#define gen_vfp_neg gen_vfp_neg_mips64el -#define gen_vfp_shto gen_vfp_shto_mips64el -#define gen_vfp_sito gen_vfp_sito_mips64el -#define gen_vfp_slto gen_vfp_slto_mips64el -#define gen_vfp_sqrt gen_vfp_sqrt_mips64el -#define gen_vfp_st gen_vfp_st_mips64el -#define gen_vfp_sub gen_vfp_sub_mips64el -#define gen_vfp_tosh gen_vfp_tosh_mips64el -#define gen_vfp_tosi gen_vfp_tosi_mips64el -#define gen_vfp_tosiz gen_vfp_tosiz_mips64el -#define gen_vfp_tosl gen_vfp_tosl_mips64el -#define gen_vfp_touh gen_vfp_touh_mips64el -#define gen_vfp_toui gen_vfp_toui_mips64el -#define gen_vfp_touiz gen_vfp_touiz_mips64el -#define gen_vfp_toul gen_vfp_toul_mips64el -#define gen_vfp_uhto gen_vfp_uhto_mips64el -#define gen_vfp_uito gen_vfp_uito_mips64el -#define gen_vfp_ulto gen_vfp_ulto_mips64el -#define get_arm_cp_reginfo get_arm_cp_reginfo_mips64el -#define get_clock get_clock_mips64el -#define get_clock_realtime get_clock_realtime_mips64el -#define get_constraint_priority get_constraint_priority_mips64el -#define get_float_exception_flags get_float_exception_flags_mips64el -#define get_float_rounding_mode get_float_rounding_mode_mips64el -#define get_fpstatus_ptr get_fpstatus_ptr_mips64el -#define get_level1_table_address get_level1_table_address_mips64el -#define get_mem_index get_mem_index_mips64el -#define get_next_param_value get_next_param_value_mips64el -#define get_opt_name get_opt_name_mips64el -#define get_opt_value get_opt_value_mips64el -#define get_page_addr_code get_page_addr_code_mips64el -#define get_param_value get_param_value_mips64el -#define get_phys_addr get_phys_addr_mips64el -#define get_phys_addr_lpae get_phys_addr_lpae_mips64el -#define get_phys_addr_mpu get_phys_addr_mpu_mips64el -#define get_phys_addr_v5 get_phys_addr_v5_mips64el -#define get_phys_addr_v6 get_phys_addr_v6_mips64el -#define get_system_memory get_system_memory_mips64el -#define get_ticks_per_sec get_ticks_per_sec_mips64el -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mips64el -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mips64el -#define gt_cntfrq_access gt_cntfrq_access_mips64el -#define gt_cnt_read gt_cnt_read_mips64el -#define gt_cnt_reset gt_cnt_reset_mips64el -#define gt_counter_access gt_counter_access_mips64el -#define gt_ctl_write gt_ctl_write_mips64el -#define gt_cval_write gt_cval_write_mips64el -#define gt_get_countervalue gt_get_countervalue_mips64el -#define gt_pct_access gt_pct_access_mips64el -#define gt_ptimer_access gt_ptimer_access_mips64el -#define gt_recalc_timer gt_recalc_timer_mips64el -#define gt_timer_access gt_timer_access_mips64el -#define gt_tval_read gt_tval_read_mips64el -#define gt_tval_write gt_tval_write_mips64el -#define gt_vct_access gt_vct_access_mips64el -#define gt_vtimer_access gt_vtimer_access_mips64el -#define guest_phys_blocks_free guest_phys_blocks_free_mips64el -#define guest_phys_blocks_init guest_phys_blocks_init_mips64el -#define handle_vcvt handle_vcvt_mips64el -#define handle_vminmaxnm handle_vminmaxnm_mips64el -#define handle_vrint handle_vrint_mips64el -#define handle_vsel handle_vsel_mips64el -#define has_help_option has_help_option_mips64el -#define have_bmi1 have_bmi1_mips64el -#define have_bmi2 have_bmi2_mips64el -#define hcr_write hcr_write_mips64el -#define helper_access_check_cp_reg helper_access_check_cp_reg_mips64el -#define helper_add_saturate helper_add_saturate_mips64el -#define helper_add_setq helper_add_setq_mips64el -#define helper_add_usaturate helper_add_usaturate_mips64el -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mips64el -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mips64el -#define helper_be_ldq_mmu helper_be_ldq_mmu_mips64el -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64el -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64el -#define helper_be_ldul_mmu helper_be_ldul_mmu_mips64el -#define helper_be_lduw_mmu helper_be_lduw_mmu_mips64el -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mips64el -#define helper_be_stl_mmu helper_be_stl_mmu_mips64el -#define helper_be_stq_mmu helper_be_stq_mmu_mips64el -#define helper_be_stw_mmu helper_be_stw_mmu_mips64el -#define helper_clear_pstate_ss helper_clear_pstate_ss_mips64el -#define helper_clz_arm helper_clz_arm_mips64el -#define helper_cpsr_read helper_cpsr_read_mips64el -#define helper_cpsr_write helper_cpsr_write_mips64el -#define helper_crc32_arm helper_crc32_arm_mips64el -#define helper_crc32c helper_crc32c_mips64el -#define helper_crypto_aese helper_crypto_aese_mips64el -#define helper_crypto_aesmc helper_crypto_aesmc_mips64el -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mips64el -#define helper_crypto_sha1h helper_crypto_sha1h_mips64el -#define helper_crypto_sha1su1 helper_crypto_sha1su1_mips64el -#define helper_crypto_sha256h helper_crypto_sha256h_mips64el -#define helper_crypto_sha256h2 helper_crypto_sha256h2_mips64el -#define helper_crypto_sha256su0 helper_crypto_sha256su0_mips64el -#define helper_crypto_sha256su1 helper_crypto_sha256su1_mips64el -#define helper_dc_zva helper_dc_zva_mips64el -#define helper_double_saturate helper_double_saturate_mips64el -#define helper_exception_internal helper_exception_internal_mips64el -#define helper_exception_return helper_exception_return_mips64el -#define helper_exception_with_syndrome helper_exception_with_syndrome_mips64el -#define helper_get_cp_reg helper_get_cp_reg_mips64el -#define helper_get_cp_reg64 helper_get_cp_reg64_mips64el -#define helper_get_r13_banked helper_get_r13_banked_mips64el -#define helper_get_user_reg helper_get_user_reg_mips64el -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mips64el -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mips64el -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mips64el -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mips64el -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mips64el -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mips64el -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mips64el -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mips64el -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mips64el -#define helper_iwmmxt_addub helper_iwmmxt_addub_mips64el -#define helper_iwmmxt_addul helper_iwmmxt_addul_mips64el -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mips64el -#define helper_iwmmxt_align helper_iwmmxt_align_mips64el -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mips64el -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mips64el -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mips64el -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mips64el -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mips64el -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mips64el -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mips64el -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mips64el -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mips64el -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mips64el -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mips64el -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mips64el -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mips64el -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mips64el -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mips64el -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mips64el -#define helper_iwmmxt_insr helper_iwmmxt_insr_mips64el -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mips64el -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mips64el -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mips64el -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mips64el -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mips64el -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mips64el -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mips64el -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mips64el -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mips64el -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mips64el -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mips64el -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mips64el -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mips64el -#define helper_iwmmxt_minub helper_iwmmxt_minub_mips64el -#define helper_iwmmxt_minul helper_iwmmxt_minul_mips64el -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mips64el -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mips64el -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mips64el -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mips64el -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mips64el -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mips64el -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mips64el -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mips64el -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mips64el -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mips64el -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mips64el -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mips64el -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mips64el -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mips64el -#define helper_iwmmxt_packul helper_iwmmxt_packul_mips64el -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mips64el -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mips64el -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mips64el -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mips64el -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mips64el -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mips64el -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mips64el -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mips64el -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mips64el -#define helper_iwmmxt_slll helper_iwmmxt_slll_mips64el -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mips64el -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mips64el -#define helper_iwmmxt_sral helper_iwmmxt_sral_mips64el -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mips64el -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mips64el -#define helper_iwmmxt_srll helper_iwmmxt_srll_mips64el -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mips64el -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mips64el -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mips64el -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mips64el -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mips64el -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mips64el -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mips64el -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mips64el -#define helper_iwmmxt_subub helper_iwmmxt_subub_mips64el -#define helper_iwmmxt_subul helper_iwmmxt_subul_mips64el -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mips64el -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mips64el -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mips64el -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mips64el -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mips64el -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mips64el -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mips64el -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mips64el -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mips64el -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mips64el -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mips64el -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mips64el -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mips64el -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mips64el -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mips64el -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mips64el -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mips64el -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mips64el -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mips64el -#define helper_ldb_cmmu helper_ldb_cmmu_mips64el -#define helper_ldb_mmu helper_ldb_mmu_mips64el -#define helper_ldl_cmmu helper_ldl_cmmu_mips64el -#define helper_ldl_mmu helper_ldl_mmu_mips64el -#define helper_ldq_cmmu helper_ldq_cmmu_mips64el -#define helper_ldq_mmu helper_ldq_mmu_mips64el -#define helper_ldw_cmmu helper_ldw_cmmu_mips64el -#define helper_ldw_mmu helper_ldw_mmu_mips64el -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mips64el -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mips64el -#define helper_le_ldq_mmu helper_le_ldq_mmu_mips64el -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64el -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64el -#define helper_le_ldul_mmu helper_le_ldul_mmu_mips64el -#define helper_le_lduw_mmu helper_le_lduw_mmu_mips64el -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mips64el -#define helper_le_stl_mmu helper_le_stl_mmu_mips64el -#define helper_le_stq_mmu helper_le_stq_mmu_mips64el -#define helper_le_stw_mmu helper_le_stw_mmu_mips64el -#define helper_msr_i_pstate helper_msr_i_pstate_mips64el -#define helper_neon_abd_f32 helper_neon_abd_f32_mips64el -#define helper_neon_abdl_s16 helper_neon_abdl_s16_mips64el -#define helper_neon_abdl_s32 helper_neon_abdl_s32_mips64el -#define helper_neon_abdl_s64 helper_neon_abdl_s64_mips64el -#define helper_neon_abdl_u16 helper_neon_abdl_u16_mips64el -#define helper_neon_abdl_u32 helper_neon_abdl_u32_mips64el -#define helper_neon_abdl_u64 helper_neon_abdl_u64_mips64el -#define helper_neon_abd_s16 helper_neon_abd_s16_mips64el -#define helper_neon_abd_s32 helper_neon_abd_s32_mips64el -#define helper_neon_abd_s8 helper_neon_abd_s8_mips64el -#define helper_neon_abd_u16 helper_neon_abd_u16_mips64el -#define helper_neon_abd_u32 helper_neon_abd_u32_mips64el -#define helper_neon_abd_u8 helper_neon_abd_u8_mips64el -#define helper_neon_abs_s16 helper_neon_abs_s16_mips64el -#define helper_neon_abs_s8 helper_neon_abs_s8_mips64el -#define helper_neon_acge_f32 helper_neon_acge_f32_mips64el -#define helper_neon_acge_f64 helper_neon_acge_f64_mips64el -#define helper_neon_acgt_f32 helper_neon_acgt_f32_mips64el -#define helper_neon_acgt_f64 helper_neon_acgt_f64_mips64el -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mips64el -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mips64el -#define helper_neon_addl_u16 helper_neon_addl_u16_mips64el -#define helper_neon_addl_u32 helper_neon_addl_u32_mips64el -#define helper_neon_add_u16 helper_neon_add_u16_mips64el -#define helper_neon_add_u8 helper_neon_add_u8_mips64el -#define helper_neon_ceq_f32 helper_neon_ceq_f32_mips64el -#define helper_neon_ceq_u16 helper_neon_ceq_u16_mips64el -#define helper_neon_ceq_u32 helper_neon_ceq_u32_mips64el -#define helper_neon_ceq_u8 helper_neon_ceq_u8_mips64el -#define helper_neon_cge_f32 helper_neon_cge_f32_mips64el -#define helper_neon_cge_s16 helper_neon_cge_s16_mips64el -#define helper_neon_cge_s32 helper_neon_cge_s32_mips64el -#define helper_neon_cge_s8 helper_neon_cge_s8_mips64el -#define helper_neon_cge_u16 helper_neon_cge_u16_mips64el -#define helper_neon_cge_u32 helper_neon_cge_u32_mips64el -#define helper_neon_cge_u8 helper_neon_cge_u8_mips64el -#define helper_neon_cgt_f32 helper_neon_cgt_f32_mips64el -#define helper_neon_cgt_s16 helper_neon_cgt_s16_mips64el -#define helper_neon_cgt_s32 helper_neon_cgt_s32_mips64el -#define helper_neon_cgt_s8 helper_neon_cgt_s8_mips64el -#define helper_neon_cgt_u16 helper_neon_cgt_u16_mips64el -#define helper_neon_cgt_u32 helper_neon_cgt_u32_mips64el -#define helper_neon_cgt_u8 helper_neon_cgt_u8_mips64el -#define helper_neon_cls_s16 helper_neon_cls_s16_mips64el -#define helper_neon_cls_s32 helper_neon_cls_s32_mips64el -#define helper_neon_cls_s8 helper_neon_cls_s8_mips64el -#define helper_neon_clz_u16 helper_neon_clz_u16_mips64el -#define helper_neon_clz_u8 helper_neon_clz_u8_mips64el -#define helper_neon_cnt_u8 helper_neon_cnt_u8_mips64el -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mips64el -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mips64el -#define helper_neon_hadd_s16 helper_neon_hadd_s16_mips64el -#define helper_neon_hadd_s32 helper_neon_hadd_s32_mips64el -#define helper_neon_hadd_s8 helper_neon_hadd_s8_mips64el -#define helper_neon_hadd_u16 helper_neon_hadd_u16_mips64el -#define helper_neon_hadd_u32 helper_neon_hadd_u32_mips64el -#define helper_neon_hadd_u8 helper_neon_hadd_u8_mips64el -#define helper_neon_hsub_s16 helper_neon_hsub_s16_mips64el -#define helper_neon_hsub_s32 helper_neon_hsub_s32_mips64el -#define helper_neon_hsub_s8 helper_neon_hsub_s8_mips64el -#define helper_neon_hsub_u16 helper_neon_hsub_u16_mips64el -#define helper_neon_hsub_u32 helper_neon_hsub_u32_mips64el -#define helper_neon_hsub_u8 helper_neon_hsub_u8_mips64el -#define helper_neon_max_s16 helper_neon_max_s16_mips64el -#define helper_neon_max_s32 helper_neon_max_s32_mips64el -#define helper_neon_max_s8 helper_neon_max_s8_mips64el -#define helper_neon_max_u16 helper_neon_max_u16_mips64el -#define helper_neon_max_u32 helper_neon_max_u32_mips64el -#define helper_neon_max_u8 helper_neon_max_u8_mips64el -#define helper_neon_min_s16 helper_neon_min_s16_mips64el -#define helper_neon_min_s32 helper_neon_min_s32_mips64el -#define helper_neon_min_s8 helper_neon_min_s8_mips64el -#define helper_neon_min_u16 helper_neon_min_u16_mips64el -#define helper_neon_min_u32 helper_neon_min_u32_mips64el -#define helper_neon_min_u8 helper_neon_min_u8_mips64el -#define helper_neon_mull_p8 helper_neon_mull_p8_mips64el -#define helper_neon_mull_s16 helper_neon_mull_s16_mips64el -#define helper_neon_mull_s8 helper_neon_mull_s8_mips64el -#define helper_neon_mull_u16 helper_neon_mull_u16_mips64el -#define helper_neon_mull_u8 helper_neon_mull_u8_mips64el -#define helper_neon_mul_p8 helper_neon_mul_p8_mips64el -#define helper_neon_mul_u16 helper_neon_mul_u16_mips64el -#define helper_neon_mul_u8 helper_neon_mul_u8_mips64el -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mips64el -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mips64el -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mips64el -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mips64el -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mips64el -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mips64el -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mips64el -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mips64el -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mips64el -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mips64el -#define helper_neon_narrow_u16 helper_neon_narrow_u16_mips64el -#define helper_neon_narrow_u8 helper_neon_narrow_u8_mips64el -#define helper_neon_negl_u16 helper_neon_negl_u16_mips64el -#define helper_neon_negl_u32 helper_neon_negl_u32_mips64el -#define helper_neon_paddl_u16 helper_neon_paddl_u16_mips64el -#define helper_neon_paddl_u32 helper_neon_paddl_u32_mips64el -#define helper_neon_padd_u16 helper_neon_padd_u16_mips64el -#define helper_neon_padd_u8 helper_neon_padd_u8_mips64el -#define helper_neon_pmax_s16 helper_neon_pmax_s16_mips64el -#define helper_neon_pmax_s8 helper_neon_pmax_s8_mips64el -#define helper_neon_pmax_u16 helper_neon_pmax_u16_mips64el -#define helper_neon_pmax_u8 helper_neon_pmax_u8_mips64el -#define helper_neon_pmin_s16 helper_neon_pmin_s16_mips64el -#define helper_neon_pmin_s8 helper_neon_pmin_s8_mips64el -#define helper_neon_pmin_u16 helper_neon_pmin_u16_mips64el -#define helper_neon_pmin_u8 helper_neon_pmin_u8_mips64el -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mips64el -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mips64el -#define helper_neon_qabs_s16 helper_neon_qabs_s16_mips64el -#define helper_neon_qabs_s32 helper_neon_qabs_s32_mips64el -#define helper_neon_qabs_s64 helper_neon_qabs_s64_mips64el -#define helper_neon_qabs_s8 helper_neon_qabs_s8_mips64el -#define helper_neon_qadd_s16 helper_neon_qadd_s16_mips64el -#define helper_neon_qadd_s32 helper_neon_qadd_s32_mips64el -#define helper_neon_qadd_s64 helper_neon_qadd_s64_mips64el -#define helper_neon_qadd_s8 helper_neon_qadd_s8_mips64el -#define helper_neon_qadd_u16 helper_neon_qadd_u16_mips64el -#define helper_neon_qadd_u32 helper_neon_qadd_u32_mips64el -#define helper_neon_qadd_u64 helper_neon_qadd_u64_mips64el -#define helper_neon_qadd_u8 helper_neon_qadd_u8_mips64el -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mips64el -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mips64el -#define helper_neon_qneg_s16 helper_neon_qneg_s16_mips64el -#define helper_neon_qneg_s32 helper_neon_qneg_s32_mips64el -#define helper_neon_qneg_s64 helper_neon_qneg_s64_mips64el -#define helper_neon_qneg_s8 helper_neon_qneg_s8_mips64el -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mips64el -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mips64el -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mips64el -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mips64el -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mips64el -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mips64el -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mips64el -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mips64el -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mips64el -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mips64el -#define helper_neon_qshl_s16 helper_neon_qshl_s16_mips64el -#define helper_neon_qshl_s32 helper_neon_qshl_s32_mips64el -#define helper_neon_qshl_s64 helper_neon_qshl_s64_mips64el -#define helper_neon_qshl_s8 helper_neon_qshl_s8_mips64el -#define helper_neon_qshl_u16 helper_neon_qshl_u16_mips64el -#define helper_neon_qshl_u32 helper_neon_qshl_u32_mips64el -#define helper_neon_qshl_u64 helper_neon_qshl_u64_mips64el -#define helper_neon_qshl_u8 helper_neon_qshl_u8_mips64el -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mips64el -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mips64el -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mips64el -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mips64el -#define helper_neon_qsub_s16 helper_neon_qsub_s16_mips64el -#define helper_neon_qsub_s32 helper_neon_qsub_s32_mips64el -#define helper_neon_qsub_s64 helper_neon_qsub_s64_mips64el -#define helper_neon_qsub_s8 helper_neon_qsub_s8_mips64el -#define helper_neon_qsub_u16 helper_neon_qsub_u16_mips64el -#define helper_neon_qsub_u32 helper_neon_qsub_u32_mips64el -#define helper_neon_qsub_u64 helper_neon_qsub_u64_mips64el -#define helper_neon_qsub_u8 helper_neon_qsub_u8_mips64el -#define helper_neon_qunzip16 helper_neon_qunzip16_mips64el -#define helper_neon_qunzip32 helper_neon_qunzip32_mips64el -#define helper_neon_qunzip8 helper_neon_qunzip8_mips64el -#define helper_neon_qzip16 helper_neon_qzip16_mips64el -#define helper_neon_qzip32 helper_neon_qzip32_mips64el -#define helper_neon_qzip8 helper_neon_qzip8_mips64el -#define helper_neon_rbit_u8 helper_neon_rbit_u8_mips64el -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mips64el -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mips64el -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mips64el -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mips64el -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mips64el -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mips64el -#define helper_neon_rshl_s16 helper_neon_rshl_s16_mips64el -#define helper_neon_rshl_s32 helper_neon_rshl_s32_mips64el -#define helper_neon_rshl_s64 helper_neon_rshl_s64_mips64el -#define helper_neon_rshl_s8 helper_neon_rshl_s8_mips64el -#define helper_neon_rshl_u16 helper_neon_rshl_u16_mips64el -#define helper_neon_rshl_u32 helper_neon_rshl_u32_mips64el -#define helper_neon_rshl_u64 helper_neon_rshl_u64_mips64el -#define helper_neon_rshl_u8 helper_neon_rshl_u8_mips64el -#define helper_neon_shl_s16 helper_neon_shl_s16_mips64el -#define helper_neon_shl_s32 helper_neon_shl_s32_mips64el -#define helper_neon_shl_s64 helper_neon_shl_s64_mips64el -#define helper_neon_shl_s8 helper_neon_shl_s8_mips64el -#define helper_neon_shl_u16 helper_neon_shl_u16_mips64el -#define helper_neon_shl_u32 helper_neon_shl_u32_mips64el -#define helper_neon_shl_u64 helper_neon_shl_u64_mips64el -#define helper_neon_shl_u8 helper_neon_shl_u8_mips64el -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mips64el -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mips64el -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mips64el -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mips64el -#define helper_neon_subl_u16 helper_neon_subl_u16_mips64el -#define helper_neon_subl_u32 helper_neon_subl_u32_mips64el -#define helper_neon_sub_u16 helper_neon_sub_u16_mips64el -#define helper_neon_sub_u8 helper_neon_sub_u8_mips64el -#define helper_neon_tbl helper_neon_tbl_mips64el -#define helper_neon_tst_u16 helper_neon_tst_u16_mips64el -#define helper_neon_tst_u32 helper_neon_tst_u32_mips64el -#define helper_neon_tst_u8 helper_neon_tst_u8_mips64el -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mips64el -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mips64el -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mips64el -#define helper_neon_unzip16 helper_neon_unzip16_mips64el -#define helper_neon_unzip8 helper_neon_unzip8_mips64el -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mips64el -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mips64el -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mips64el -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mips64el -#define helper_neon_widen_s16 helper_neon_widen_s16_mips64el -#define helper_neon_widen_s8 helper_neon_widen_s8_mips64el -#define helper_neon_widen_u16 helper_neon_widen_u16_mips64el -#define helper_neon_widen_u8 helper_neon_widen_u8_mips64el -#define helper_neon_zip16 helper_neon_zip16_mips64el -#define helper_neon_zip8 helper_neon_zip8_mips64el -#define helper_pre_hvc helper_pre_hvc_mips64el -#define helper_pre_smc helper_pre_smc_mips64el -#define helper_qadd16 helper_qadd16_mips64el -#define helper_qadd8 helper_qadd8_mips64el -#define helper_qaddsubx helper_qaddsubx_mips64el -#define helper_qsub16 helper_qsub16_mips64el -#define helper_qsub8 helper_qsub8_mips64el -#define helper_qsubaddx helper_qsubaddx_mips64el -#define helper_rbit helper_rbit_mips64el -#define helper_recpe_f32 helper_recpe_f32_mips64el -#define helper_recpe_f64 helper_recpe_f64_mips64el -#define helper_recpe_u32 helper_recpe_u32_mips64el -#define helper_recps_f32 helper_recps_f32_mips64el -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mips64el -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64el -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64el -#define helper_ret_stb_mmu helper_ret_stb_mmu_mips64el -#define helper_rintd helper_rintd_mips64el -#define helper_rintd_exact helper_rintd_exact_mips64el -#define helper_rints helper_rints_mips64el -#define helper_rints_exact helper_rints_exact_mips64el -#define helper_ror_cc helper_ror_cc_mips64el -#define helper_rsqrte_f32 helper_rsqrte_f32_mips64el -#define helper_rsqrte_f64 helper_rsqrte_f64_mips64el -#define helper_rsqrte_u32 helper_rsqrte_u32_mips64el -#define helper_rsqrts_f32 helper_rsqrts_f32_mips64el -#define helper_sadd16 helper_sadd16_mips64el -#define helper_sadd8 helper_sadd8_mips64el -#define helper_saddsubx helper_saddsubx_mips64el -#define helper_sar_cc helper_sar_cc_mips64el -#define helper_sdiv helper_sdiv_mips64el -#define helper_sel_flags helper_sel_flags_mips64el -#define helper_set_cp_reg helper_set_cp_reg_mips64el -#define helper_set_cp_reg64 helper_set_cp_reg64_mips64el -#define helper_set_neon_rmode helper_set_neon_rmode_mips64el -#define helper_set_r13_banked helper_set_r13_banked_mips64el -#define helper_set_rmode helper_set_rmode_mips64el -#define helper_set_user_reg helper_set_user_reg_mips64el -#define helper_shadd16 helper_shadd16_mips64el -#define helper_shadd8 helper_shadd8_mips64el -#define helper_shaddsubx helper_shaddsubx_mips64el -#define helper_shl_cc helper_shl_cc_mips64el -#define helper_shr_cc helper_shr_cc_mips64el -#define helper_shsub16 helper_shsub16_mips64el -#define helper_shsub8 helper_shsub8_mips64el -#define helper_shsubaddx helper_shsubaddx_mips64el -#define helper_ssat helper_ssat_mips64el -#define helper_ssat16 helper_ssat16_mips64el -#define helper_ssub16 helper_ssub16_mips64el -#define helper_ssub8 helper_ssub8_mips64el -#define helper_ssubaddx helper_ssubaddx_mips64el -#define helper_stb_mmu helper_stb_mmu_mips64el -#define helper_stl_mmu helper_stl_mmu_mips64el -#define helper_stq_mmu helper_stq_mmu_mips64el -#define helper_stw_mmu helper_stw_mmu_mips64el -#define helper_sub_saturate helper_sub_saturate_mips64el -#define helper_sub_usaturate helper_sub_usaturate_mips64el -#define helper_sxtb16 helper_sxtb16_mips64el -#define helper_uadd16 helper_uadd16_mips64el -#define helper_uadd8 helper_uadd8_mips64el -#define helper_uaddsubx helper_uaddsubx_mips64el -#define helper_udiv helper_udiv_mips64el -#define helper_uhadd16 helper_uhadd16_mips64el -#define helper_uhadd8 helper_uhadd8_mips64el -#define helper_uhaddsubx helper_uhaddsubx_mips64el -#define helper_uhsub16 helper_uhsub16_mips64el -#define helper_uhsub8 helper_uhsub8_mips64el -#define helper_uhsubaddx helper_uhsubaddx_mips64el -#define helper_uqadd16 helper_uqadd16_mips64el -#define helper_uqadd8 helper_uqadd8_mips64el -#define helper_uqaddsubx helper_uqaddsubx_mips64el -#define helper_uqsub16 helper_uqsub16_mips64el -#define helper_uqsub8 helper_uqsub8_mips64el -#define helper_uqsubaddx helper_uqsubaddx_mips64el -#define helper_usad8 helper_usad8_mips64el -#define helper_usat helper_usat_mips64el -#define helper_usat16 helper_usat16_mips64el -#define helper_usub16 helper_usub16_mips64el -#define helper_usub8 helper_usub8_mips64el -#define helper_usubaddx helper_usubaddx_mips64el -#define helper_uxtb16 helper_uxtb16_mips64el -#define helper_v7m_mrs helper_v7m_mrs_mips64el -#define helper_v7m_msr helper_v7m_msr_mips64el -#define helper_vfp_absd helper_vfp_absd_mips64el -#define helper_vfp_abss helper_vfp_abss_mips64el -#define helper_vfp_addd helper_vfp_addd_mips64el -#define helper_vfp_adds helper_vfp_adds_mips64el -#define helper_vfp_cmpd helper_vfp_cmpd_mips64el -#define helper_vfp_cmped helper_vfp_cmped_mips64el -#define helper_vfp_cmpes helper_vfp_cmpes_mips64el -#define helper_vfp_cmps helper_vfp_cmps_mips64el -#define helper_vfp_divd helper_vfp_divd_mips64el -#define helper_vfp_divs helper_vfp_divs_mips64el -#define helper_vfp_fcvtds helper_vfp_fcvtds_mips64el -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mips64el -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mips64el -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mips64el -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mips64el -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mips64el -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mips64el -#define helper_vfp_maxd helper_vfp_maxd_mips64el -#define helper_vfp_maxnumd helper_vfp_maxnumd_mips64el -#define helper_vfp_maxnums helper_vfp_maxnums_mips64el -#define helper_vfp_maxs helper_vfp_maxs_mips64el -#define helper_vfp_mind helper_vfp_mind_mips64el -#define helper_vfp_minnumd helper_vfp_minnumd_mips64el -#define helper_vfp_minnums helper_vfp_minnums_mips64el -#define helper_vfp_mins helper_vfp_mins_mips64el -#define helper_vfp_muladdd helper_vfp_muladdd_mips64el -#define helper_vfp_muladds helper_vfp_muladds_mips64el -#define helper_vfp_muld helper_vfp_muld_mips64el -#define helper_vfp_muls helper_vfp_muls_mips64el -#define helper_vfp_negd helper_vfp_negd_mips64el -#define helper_vfp_negs helper_vfp_negs_mips64el -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mips64el -#define helper_vfp_shtod helper_vfp_shtod_mips64el -#define helper_vfp_shtos helper_vfp_shtos_mips64el -#define helper_vfp_sitod helper_vfp_sitod_mips64el -#define helper_vfp_sitos helper_vfp_sitos_mips64el -#define helper_vfp_sltod helper_vfp_sltod_mips64el -#define helper_vfp_sltos helper_vfp_sltos_mips64el -#define helper_vfp_sqrtd helper_vfp_sqrtd_mips64el -#define helper_vfp_sqrts helper_vfp_sqrts_mips64el -#define helper_vfp_sqtod helper_vfp_sqtod_mips64el -#define helper_vfp_sqtos helper_vfp_sqtos_mips64el -#define helper_vfp_subd helper_vfp_subd_mips64el -#define helper_vfp_subs helper_vfp_subs_mips64el -#define helper_vfp_toshd helper_vfp_toshd_mips64el -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mips64el -#define helper_vfp_toshs helper_vfp_toshs_mips64el -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mips64el -#define helper_vfp_tosid helper_vfp_tosid_mips64el -#define helper_vfp_tosis helper_vfp_tosis_mips64el -#define helper_vfp_tosizd helper_vfp_tosizd_mips64el -#define helper_vfp_tosizs helper_vfp_tosizs_mips64el -#define helper_vfp_tosld helper_vfp_tosld_mips64el -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mips64el -#define helper_vfp_tosls helper_vfp_tosls_mips64el -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mips64el -#define helper_vfp_tosqd helper_vfp_tosqd_mips64el -#define helper_vfp_tosqs helper_vfp_tosqs_mips64el -#define helper_vfp_touhd helper_vfp_touhd_mips64el -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mips64el -#define helper_vfp_touhs helper_vfp_touhs_mips64el -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mips64el -#define helper_vfp_touid helper_vfp_touid_mips64el -#define helper_vfp_touis helper_vfp_touis_mips64el -#define helper_vfp_touizd helper_vfp_touizd_mips64el -#define helper_vfp_touizs helper_vfp_touizs_mips64el -#define helper_vfp_tould helper_vfp_tould_mips64el -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mips64el -#define helper_vfp_touls helper_vfp_touls_mips64el -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mips64el -#define helper_vfp_touqd helper_vfp_touqd_mips64el -#define helper_vfp_touqs helper_vfp_touqs_mips64el -#define helper_vfp_uhtod helper_vfp_uhtod_mips64el -#define helper_vfp_uhtos helper_vfp_uhtos_mips64el -#define helper_vfp_uitod helper_vfp_uitod_mips64el -#define helper_vfp_uitos helper_vfp_uitos_mips64el -#define helper_vfp_ultod helper_vfp_ultod_mips64el -#define helper_vfp_ultos helper_vfp_ultos_mips64el -#define helper_vfp_uqtod helper_vfp_uqtod_mips64el -#define helper_vfp_uqtos helper_vfp_uqtos_mips64el -#define helper_wfe helper_wfe_mips64el -#define helper_wfi helper_wfi_mips64el -#define hex2decimal hex2decimal_mips64el -#define hw_breakpoint_update hw_breakpoint_update_mips64el -#define hw_breakpoint_update_all hw_breakpoint_update_all_mips64el -#define hw_watchpoint_update hw_watchpoint_update_mips64el -#define hw_watchpoint_update_all hw_watchpoint_update_all_mips64el -#define _init _init_mips64el -#define init_cpreg_list init_cpreg_list_mips64el -#define init_lists init_lists_mips64el -#define input_type_enum input_type_enum_mips64el -#define int128_2_64 int128_2_64_mips64el -#define int128_add int128_add_mips64el -#define int128_addto int128_addto_mips64el -#define int128_and int128_and_mips64el -#define int128_eq int128_eq_mips64el -#define int128_ge int128_ge_mips64el -#define int128_get64 int128_get64_mips64el -#define int128_gt int128_gt_mips64el -#define int128_le int128_le_mips64el -#define int128_lt int128_lt_mips64el -#define int128_make64 int128_make64_mips64el -#define int128_max int128_max_mips64el -#define int128_min int128_min_mips64el -#define int128_ne int128_ne_mips64el -#define int128_neg int128_neg_mips64el -#define int128_nz int128_nz_mips64el -#define int128_rshift int128_rshift_mips64el -#define int128_sub int128_sub_mips64el -#define int128_subfrom int128_subfrom_mips64el -#define int128_zero int128_zero_mips64el -#define int16_to_float32 int16_to_float32_mips64el -#define int16_to_float64 int16_to_float64_mips64el -#define int32_to_float128 int32_to_float128_mips64el -#define int32_to_float32 int32_to_float32_mips64el -#define int32_to_float64 int32_to_float64_mips64el -#define int32_to_floatx80 int32_to_floatx80_mips64el -#define int64_to_float128 int64_to_float128_mips64el -#define int64_to_float32 int64_to_float32_mips64el -#define int64_to_float64 int64_to_float64_mips64el -#define int64_to_floatx80 int64_to_floatx80_mips64el -#define invalidate_and_set_dirty invalidate_and_set_dirty_mips64el -#define invalidate_page_bitmap invalidate_page_bitmap_mips64el -#define io_mem_read io_mem_read_mips64el -#define io_mem_write io_mem_write_mips64el -#define io_readb io_readb_mips64el -#define io_readl io_readl_mips64el -#define io_readq io_readq_mips64el -#define io_readw io_readw_mips64el -#define iotlb_to_region iotlb_to_region_mips64el -#define io_writeb io_writeb_mips64el -#define io_writel io_writel_mips64el -#define io_writeq io_writeq_mips64el -#define io_writew io_writew_mips64el -#define is_a64 is_a64_mips64el -#define is_help_option is_help_option_mips64el -#define isr_read isr_read_mips64el -#define is_valid_option_list is_valid_option_list_mips64el -#define iwmmxt_load_creg iwmmxt_load_creg_mips64el -#define iwmmxt_load_reg iwmmxt_load_reg_mips64el -#define iwmmxt_store_creg iwmmxt_store_creg_mips64el -#define iwmmxt_store_reg iwmmxt_store_reg_mips64el -#define __jit_debug_descriptor __jit_debug_descriptor_mips64el -#define __jit_debug_register_code __jit_debug_register_code_mips64el -#define kvm_to_cpreg_id kvm_to_cpreg_id_mips64el -#define last_ram_offset last_ram_offset_mips64el -#define ldl_be_p ldl_be_p_mips64el -#define ldl_be_phys ldl_be_phys_mips64el -#define ldl_he_p ldl_he_p_mips64el -#define ldl_le_p ldl_le_p_mips64el -#define ldl_le_phys ldl_le_phys_mips64el -#define ldl_phys ldl_phys_mips64el -#define ldl_phys_internal ldl_phys_internal_mips64el -#define ldq_be_p ldq_be_p_mips64el -#define ldq_be_phys ldq_be_phys_mips64el -#define ldq_he_p ldq_he_p_mips64el -#define ldq_le_p ldq_le_p_mips64el -#define ldq_le_phys ldq_le_phys_mips64el -#define ldq_phys ldq_phys_mips64el -#define ldq_phys_internal ldq_phys_internal_mips64el -#define ldst_name ldst_name_mips64el -#define ldub_p ldub_p_mips64el -#define ldub_phys ldub_phys_mips64el -#define lduw_be_p lduw_be_p_mips64el -#define lduw_be_phys lduw_be_phys_mips64el -#define lduw_he_p lduw_he_p_mips64el -#define lduw_le_p lduw_le_p_mips64el -#define lduw_le_phys lduw_le_phys_mips64el -#define lduw_phys lduw_phys_mips64el -#define lduw_phys_internal lduw_phys_internal_mips64el -#define le128 le128_mips64el -#define linked_bp_matches linked_bp_matches_mips64el -#define listener_add_address_space listener_add_address_space_mips64el -#define load_cpu_offset load_cpu_offset_mips64el -#define load_reg load_reg_mips64el -#define load_reg_var load_reg_var_mips64el -#define log_cpu_state log_cpu_state_mips64el -#define lpae_cp_reginfo lpae_cp_reginfo_mips64el -#define lt128 lt128_mips64el -#define machine_class_init machine_class_init_mips64el -#define machine_finalize machine_finalize_mips64el -#define machine_info machine_info_mips64el -#define machine_initfn machine_initfn_mips64el -#define machine_register_types machine_register_types_mips64el -#define machvirt_init machvirt_init_mips64el -#define machvirt_machine_init machvirt_machine_init_mips64el -#define maj maj_mips64el -#define mapping_conflict mapping_conflict_mips64el -#define mapping_contiguous mapping_contiguous_mips64el -#define mapping_have_same_region mapping_have_same_region_mips64el -#define mapping_merge mapping_merge_mips64el -#define mem_add mem_add_mips64el -#define mem_begin mem_begin_mips64el -#define mem_commit mem_commit_mips64el -#define memory_access_is_direct memory_access_is_direct_mips64el -#define memory_access_size memory_access_size_mips64el -#define memory_init memory_init_mips64el -#define memory_listener_match memory_listener_match_mips64el -#define memory_listener_register memory_listener_register_mips64el -#define memory_listener_unregister memory_listener_unregister_mips64el -#define memory_map_init memory_map_init_mips64el -#define memory_mapping_filter memory_mapping_filter_mips64el -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mips64el -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64el -#define memory_mapping_list_free memory_mapping_list_free_mips64el -#define memory_mapping_list_init memory_mapping_list_init_mips64el -#define memory_region_access_valid memory_region_access_valid_mips64el -#define memory_region_add_subregion memory_region_add_subregion_mips64el -#define memory_region_add_subregion_common memory_region_add_subregion_common_mips64el -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips64el -#define memory_region_big_endian memory_region_big_endian_mips64el -#define memory_region_clear_pending memory_region_clear_pending_mips64el -#define memory_region_del_subregion memory_region_del_subregion_mips64el -#define memory_region_destructor_alias memory_region_destructor_alias_mips64el -#define memory_region_destructor_none memory_region_destructor_none_mips64el -#define memory_region_destructor_ram memory_region_destructor_ram_mips64el -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mips64el -#define memory_region_dispatch_read memory_region_dispatch_read_mips64el -#define memory_region_dispatch_read1 memory_region_dispatch_read1_mips64el -#define memory_region_dispatch_write memory_region_dispatch_write_mips64el -#define memory_region_escape_name memory_region_escape_name_mips64el -#define memory_region_finalize memory_region_finalize_mips64el -#define memory_region_find memory_region_find_mips64el -#define memory_region_get_addr memory_region_get_addr_mips64el -#define memory_region_get_alignment memory_region_get_alignment_mips64el -#define memory_region_get_container memory_region_get_container_mips64el -#define memory_region_get_fd memory_region_get_fd_mips64el -#define memory_region_get_may_overlap memory_region_get_may_overlap_mips64el -#define memory_region_get_priority memory_region_get_priority_mips64el -#define memory_region_get_ram_addr memory_region_get_ram_addr_mips64el -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64el -#define memory_region_get_size memory_region_get_size_mips64el -#define memory_region_info memory_region_info_mips64el -#define memory_region_init memory_region_init_mips64el -#define memory_region_init_alias memory_region_init_alias_mips64el -#define memory_region_initfn memory_region_initfn_mips64el -#define memory_region_init_io memory_region_init_io_mips64el -#define memory_region_init_ram memory_region_init_ram_mips64el -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64el -#define memory_region_init_reservation memory_region_init_reservation_mips64el -#define memory_region_is_iommu memory_region_is_iommu_mips64el -#define memory_region_is_logging memory_region_is_logging_mips64el -#define memory_region_is_mapped memory_region_is_mapped_mips64el -#define memory_region_is_ram memory_region_is_ram_mips64el -#define memory_region_is_rom memory_region_is_rom_mips64el -#define memory_region_is_romd memory_region_is_romd_mips64el -#define memory_region_is_skip_dump memory_region_is_skip_dump_mips64el -#define memory_region_is_unassigned memory_region_is_unassigned_mips64el -#define memory_region_name memory_region_name_mips64el -#define memory_region_need_escape memory_region_need_escape_mips64el -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mips64el -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mips64el -#define memory_region_present memory_region_present_mips64el -#define memory_region_read_accessor memory_region_read_accessor_mips64el -#define memory_region_readd_subregion memory_region_readd_subregion_mips64el -#define memory_region_ref memory_region_ref_mips64el -#define memory_region_resolve_container memory_region_resolve_container_mips64el -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mips64el -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64el -#define memory_region_set_address memory_region_set_address_mips64el -#define memory_region_set_alias_offset memory_region_set_alias_offset_mips64el -#define memory_region_set_enabled memory_region_set_enabled_mips64el -#define memory_region_set_readonly memory_region_set_readonly_mips64el -#define memory_region_set_skip_dump memory_region_set_skip_dump_mips64el -#define memory_region_size memory_region_size_mips64el -#define memory_region_to_address_space memory_region_to_address_space_mips64el -#define memory_region_transaction_begin memory_region_transaction_begin_mips64el -#define memory_region_transaction_commit memory_region_transaction_commit_mips64el -#define memory_region_unref memory_region_unref_mips64el -#define memory_region_update_container_subregions memory_region_update_container_subregions_mips64el -#define memory_region_write_accessor memory_region_write_accessor_mips64el -#define memory_region_wrong_endianness memory_region_wrong_endianness_mips64el -#define memory_try_enable_merging memory_try_enable_merging_mips64el -#define module_call_init module_call_init_mips64el -#define module_load module_load_mips64el -#define mpidr_cp_reginfo mpidr_cp_reginfo_mips64el -#define mpidr_read mpidr_read_mips64el -#define msr_mask msr_mask_mips64el -#define mul128By64To192 mul128By64To192_mips64el -#define mul128To256 mul128To256_mips64el -#define mul64To128 mul64To128_mips64el -#define muldiv64 muldiv64_mips64el -#define neon_2rm_is_float_op neon_2rm_is_float_op_mips64el -#define neon_2rm_sizes neon_2rm_sizes_mips64el -#define neon_3r_sizes neon_3r_sizes_mips64el -#define neon_get_scalar neon_get_scalar_mips64el -#define neon_load_reg neon_load_reg_mips64el -#define neon_load_reg64 neon_load_reg64_mips64el -#define neon_load_scratch neon_load_scratch_mips64el -#define neon_ls_element_type neon_ls_element_type_mips64el -#define neon_reg_offset neon_reg_offset_mips64el -#define neon_store_reg neon_store_reg_mips64el -#define neon_store_reg64 neon_store_reg64_mips64el -#define neon_store_scratch neon_store_scratch_mips64el -#define new_ldst_label new_ldst_label_mips64el -#define next_list next_list_mips64el -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mips64el -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mips64el -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mips64el -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mips64el -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64el -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mips64el -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mips64el -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mips64el -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64el -#define not_v6_cp_reginfo not_v6_cp_reginfo_mips64el -#define not_v7_cp_reginfo not_v7_cp_reginfo_mips64el -#define not_v8_cp_reginfo not_v8_cp_reginfo_mips64el -#define object_child_foreach object_child_foreach_mips64el -#define object_class_foreach object_class_foreach_mips64el -#define object_class_foreach_tramp object_class_foreach_tramp_mips64el -#define object_class_get_list object_class_get_list_mips64el -#define object_class_get_list_tramp object_class_get_list_tramp_mips64el -#define object_class_get_parent object_class_get_parent_mips64el -#define object_deinit object_deinit_mips64el -#define object_dynamic_cast object_dynamic_cast_mips64el -#define object_finalize object_finalize_mips64el -#define object_finalize_child_property object_finalize_child_property_mips64el -#define object_get_child_property object_get_child_property_mips64el -#define object_get_link_property object_get_link_property_mips64el -#define object_get_root object_get_root_mips64el -#define object_initialize_with_type object_initialize_with_type_mips64el -#define object_init_with_type object_init_with_type_mips64el -#define object_instance_init object_instance_init_mips64el -#define object_new_with_type object_new_with_type_mips64el -#define object_post_init_with_type object_post_init_with_type_mips64el -#define object_property_add_alias object_property_add_alias_mips64el -#define object_property_add_link object_property_add_link_mips64el -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mips64el -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mips64el -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mips64el -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mips64el -#define object_property_allow_set_link object_property_allow_set_link_mips64el -#define object_property_del object_property_del_mips64el -#define object_property_del_all object_property_del_all_mips64el -#define object_property_find object_property_find_mips64el -#define object_property_get object_property_get_mips64el -#define object_property_get_bool object_property_get_bool_mips64el -#define object_property_get_int object_property_get_int_mips64el -#define object_property_get_link object_property_get_link_mips64el -#define object_property_get_qobject object_property_get_qobject_mips64el -#define object_property_get_str object_property_get_str_mips64el -#define object_property_get_type object_property_get_type_mips64el -#define object_property_is_child object_property_is_child_mips64el -#define object_property_set object_property_set_mips64el -#define object_property_set_description object_property_set_description_mips64el -#define object_property_set_link object_property_set_link_mips64el -#define object_property_set_qobject object_property_set_qobject_mips64el -#define object_release_link_property object_release_link_property_mips64el -#define object_resolve_abs_path object_resolve_abs_path_mips64el -#define object_resolve_child_property object_resolve_child_property_mips64el -#define object_resolve_link object_resolve_link_mips64el -#define object_resolve_link_property object_resolve_link_property_mips64el -#define object_resolve_partial_path object_resolve_partial_path_mips64el -#define object_resolve_path object_resolve_path_mips64el -#define object_resolve_path_component object_resolve_path_component_mips64el -#define object_resolve_path_type object_resolve_path_type_mips64el -#define object_set_link_property object_set_link_property_mips64el -#define object_unparent object_unparent_mips64el -#define omap_cachemaint_write omap_cachemaint_write_mips64el -#define omap_cp_reginfo omap_cp_reginfo_mips64el -#define omap_threadid_write omap_threadid_write_mips64el -#define omap_ticonfig_write omap_ticonfig_write_mips64el -#define omap_wfi_write omap_wfi_write_mips64el -#define op_bits op_bits_mips64el -#define open_modeflags open_modeflags_mips64el -#define op_to_mov op_to_mov_mips64el -#define op_to_movi op_to_movi_mips64el -#define output_type_enum output_type_enum_mips64el -#define packFloat128 packFloat128_mips64el -#define packFloat16 packFloat16_mips64el -#define packFloat32 packFloat32_mips64el -#define packFloat64 packFloat64_mips64el -#define packFloatx80 packFloatx80_mips64el -#define page_find page_find_mips64el -#define page_find_alloc page_find_alloc_mips64el -#define page_flush_tb page_flush_tb_mips64el -#define page_flush_tb_1 page_flush_tb_1_mips64el -#define page_init page_init_mips64el -#define page_size_init page_size_init_mips64el -#define par par_mips64el -#define parse_array parse_array_mips64el -#define parse_error parse_error_mips64el -#define parse_escape parse_escape_mips64el -#define parse_keyword parse_keyword_mips64el -#define parse_literal parse_literal_mips64el -#define parse_object parse_object_mips64el -#define parse_optional parse_optional_mips64el -#define parse_option_bool parse_option_bool_mips64el -#define parse_option_number parse_option_number_mips64el -#define parse_option_size parse_option_size_mips64el -#define parse_pair parse_pair_mips64el -#define parser_context_free parser_context_free_mips64el -#define parser_context_new parser_context_new_mips64el -#define parser_context_peek_token parser_context_peek_token_mips64el -#define parser_context_pop_token parser_context_pop_token_mips64el -#define parser_context_restore parser_context_restore_mips64el -#define parser_context_save parser_context_save_mips64el -#define parse_str parse_str_mips64el -#define parse_type_bool parse_type_bool_mips64el -#define parse_type_int parse_type_int_mips64el -#define parse_type_number parse_type_number_mips64el -#define parse_type_size parse_type_size_mips64el -#define parse_type_str parse_type_str_mips64el -#define parse_value parse_value_mips64el -#define par_write par_write_mips64el -#define patch_reloc patch_reloc_mips64el -#define phys_map_node_alloc phys_map_node_alloc_mips64el -#define phys_map_node_reserve phys_map_node_reserve_mips64el -#define phys_mem_alloc phys_mem_alloc_mips64el -#define phys_mem_set_alloc phys_mem_set_alloc_mips64el -#define phys_page_compact phys_page_compact_mips64el -#define phys_page_compact_all phys_page_compact_all_mips64el -#define phys_page_find phys_page_find_mips64el -#define phys_page_set phys_page_set_mips64el -#define phys_page_set_level phys_page_set_level_mips64el -#define phys_section_add phys_section_add_mips64el -#define phys_section_destroy phys_section_destroy_mips64el -#define phys_sections_free phys_sections_free_mips64el -#define pickNaN pickNaN_mips64el -#define pickNaNMulAdd pickNaNMulAdd_mips64el -#define pmccfiltr_write pmccfiltr_write_mips64el -#define pmccntr_read pmccntr_read_mips64el -#define pmccntr_sync pmccntr_sync_mips64el -#define pmccntr_write pmccntr_write_mips64el -#define pmccntr_write32 pmccntr_write32_mips64el -#define pmcntenclr_write pmcntenclr_write_mips64el -#define pmcntenset_write pmcntenset_write_mips64el -#define pmcr_write pmcr_write_mips64el -#define pmintenclr_write pmintenclr_write_mips64el -#define pmintenset_write pmintenset_write_mips64el -#define pmovsr_write pmovsr_write_mips64el -#define pmreg_access pmreg_access_mips64el -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mips64el -#define pmsav5_data_ap_read pmsav5_data_ap_read_mips64el -#define pmsav5_data_ap_write pmsav5_data_ap_write_mips64el -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mips64el -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mips64el -#define pmuserenr_write pmuserenr_write_mips64el -#define pmxevtyper_write pmxevtyper_write_mips64el -#define print_type_bool print_type_bool_mips64el -#define print_type_int print_type_int_mips64el -#define print_type_number print_type_number_mips64el -#define print_type_size print_type_size_mips64el -#define print_type_str print_type_str_mips64el -#define propagateFloat128NaN propagateFloat128NaN_mips64el -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mips64el -#define propagateFloat32NaN propagateFloat32NaN_mips64el -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mips64el -#define propagateFloat64NaN propagateFloat64NaN_mips64el -#define propagateFloatx80NaN propagateFloatx80NaN_mips64el -#define property_get_alias property_get_alias_mips64el -#define property_get_bool property_get_bool_mips64el -#define property_get_str property_get_str_mips64el -#define property_get_uint16_ptr property_get_uint16_ptr_mips64el -#define property_get_uint32_ptr property_get_uint32_ptr_mips64el -#define property_get_uint64_ptr property_get_uint64_ptr_mips64el -#define property_get_uint8_ptr property_get_uint8_ptr_mips64el -#define property_release_alias property_release_alias_mips64el -#define property_release_bool property_release_bool_mips64el -#define property_release_str property_release_str_mips64el -#define property_resolve_alias property_resolve_alias_mips64el -#define property_set_alias property_set_alias_mips64el -#define property_set_bool property_set_bool_mips64el -#define property_set_str property_set_str_mips64el -#define pstate_read pstate_read_mips64el -#define pstate_write pstate_write_mips64el -#define pxa250_initfn pxa250_initfn_mips64el -#define pxa255_initfn pxa255_initfn_mips64el -#define pxa260_initfn pxa260_initfn_mips64el -#define pxa261_initfn pxa261_initfn_mips64el -#define pxa262_initfn pxa262_initfn_mips64el -#define pxa270a0_initfn pxa270a0_initfn_mips64el -#define pxa270a1_initfn pxa270a1_initfn_mips64el -#define pxa270b0_initfn pxa270b0_initfn_mips64el -#define pxa270b1_initfn pxa270b1_initfn_mips64el -#define pxa270c0_initfn pxa270c0_initfn_mips64el -#define pxa270c5_initfn pxa270c5_initfn_mips64el -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mips64el -#define qapi_dealloc_end_list qapi_dealloc_end_list_mips64el -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mips64el -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mips64el -#define qapi_dealloc_next_list qapi_dealloc_next_list_mips64el -#define qapi_dealloc_pop qapi_dealloc_pop_mips64el -#define qapi_dealloc_push qapi_dealloc_push_mips64el -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mips64el -#define qapi_dealloc_start_list qapi_dealloc_start_list_mips64el -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mips64el -#define qapi_dealloc_start_union qapi_dealloc_start_union_mips64el -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mips64el -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mips64el -#define qapi_dealloc_type_int qapi_dealloc_type_int_mips64el -#define qapi_dealloc_type_number qapi_dealloc_type_number_mips64el -#define qapi_dealloc_type_size qapi_dealloc_type_size_mips64el -#define qapi_dealloc_type_str qapi_dealloc_type_str_mips64el -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mips64el -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mips64el -#define qapi_free_boolList qapi_free_boolList_mips64el -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mips64el -#define qapi_free_int16List qapi_free_int16List_mips64el -#define qapi_free_int32List qapi_free_int32List_mips64el -#define qapi_free_int64List qapi_free_int64List_mips64el -#define qapi_free_int8List qapi_free_int8List_mips64el -#define qapi_free_intList qapi_free_intList_mips64el -#define qapi_free_numberList qapi_free_numberList_mips64el -#define qapi_free_strList qapi_free_strList_mips64el -#define qapi_free_uint16List qapi_free_uint16List_mips64el -#define qapi_free_uint32List qapi_free_uint32List_mips64el -#define qapi_free_uint64List qapi_free_uint64List_mips64el -#define qapi_free_uint8List qapi_free_uint8List_mips64el -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mips64el -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mips64el -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mips64el -#define qbool_destroy_obj qbool_destroy_obj_mips64el -#define qbool_from_int qbool_from_int_mips64el -#define qbool_get_int qbool_get_int_mips64el -#define qbool_type qbool_type_mips64el -#define qbus_create qbus_create_mips64el -#define qbus_create_inplace qbus_create_inplace_mips64el -#define qbus_finalize qbus_finalize_mips64el -#define qbus_initfn qbus_initfn_mips64el -#define qbus_realize qbus_realize_mips64el -#define qdev_create qdev_create_mips64el -#define qdev_get_type qdev_get_type_mips64el -#define qdev_register_types qdev_register_types_mips64el -#define qdev_set_parent_bus qdev_set_parent_bus_mips64el -#define qdev_try_create qdev_try_create_mips64el -#define qdict_add_key qdict_add_key_mips64el -#define qdict_array_split qdict_array_split_mips64el -#define qdict_clone_shallow qdict_clone_shallow_mips64el -#define qdict_del qdict_del_mips64el -#define qdict_destroy_obj qdict_destroy_obj_mips64el -#define qdict_entry_key qdict_entry_key_mips64el -#define qdict_entry_value qdict_entry_value_mips64el -#define qdict_extract_subqdict qdict_extract_subqdict_mips64el -#define qdict_find qdict_find_mips64el -#define qdict_first qdict_first_mips64el -#define qdict_flatten qdict_flatten_mips64el -#define qdict_flatten_qdict qdict_flatten_qdict_mips64el -#define qdict_flatten_qlist qdict_flatten_qlist_mips64el -#define qdict_get qdict_get_mips64el -#define qdict_get_bool qdict_get_bool_mips64el -#define qdict_get_double qdict_get_double_mips64el -#define qdict_get_int qdict_get_int_mips64el -#define qdict_get_obj qdict_get_obj_mips64el -#define qdict_get_qdict qdict_get_qdict_mips64el -#define qdict_get_qlist qdict_get_qlist_mips64el -#define qdict_get_str qdict_get_str_mips64el -#define qdict_get_try_bool qdict_get_try_bool_mips64el -#define qdict_get_try_int qdict_get_try_int_mips64el -#define qdict_get_try_str qdict_get_try_str_mips64el -#define qdict_haskey qdict_haskey_mips64el -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mips64el -#define qdict_iter qdict_iter_mips64el -#define qdict_join qdict_join_mips64el -#define qdict_new qdict_new_mips64el -#define qdict_next qdict_next_mips64el -#define qdict_next_entry qdict_next_entry_mips64el -#define qdict_put_obj qdict_put_obj_mips64el -#define qdict_size qdict_size_mips64el -#define qdict_type qdict_type_mips64el -#define qemu_clock_get_us qemu_clock_get_us_mips64el -#define qemu_clock_ptr qemu_clock_ptr_mips64el -#define qemu_clocks qemu_clocks_mips64el -#define qemu_get_cpu qemu_get_cpu_mips64el -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mips64el -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mips64el -#define qemu_get_ram_block qemu_get_ram_block_mips64el -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mips64el -#define qemu_get_ram_fd qemu_get_ram_fd_mips64el -#define qemu_get_ram_ptr qemu_get_ram_ptr_mips64el -#define qemu_host_page_mask qemu_host_page_mask_mips64el -#define qemu_host_page_size qemu_host_page_size_mips64el -#define qemu_init_vcpu qemu_init_vcpu_mips64el -#define qemu_ld_helpers qemu_ld_helpers_mips64el -#define qemu_log_close qemu_log_close_mips64el -#define qemu_log_enabled qemu_log_enabled_mips64el -#define qemu_log_flush qemu_log_flush_mips64el -#define qemu_loglevel_mask qemu_loglevel_mask_mips64el -#define qemu_log_vprintf qemu_log_vprintf_mips64el -#define qemu_oom_check qemu_oom_check_mips64el -#define qemu_parse_fd qemu_parse_fd_mips64el -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64el -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mips64el -#define qemu_ram_alloc qemu_ram_alloc_mips64el -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64el -#define qemu_ram_foreach_block qemu_ram_foreach_block_mips64el -#define qemu_ram_free qemu_ram_free_mips64el -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mips64el -#define qemu_ram_ptr_length qemu_ram_ptr_length_mips64el -#define qemu_ram_remap qemu_ram_remap_mips64el -#define qemu_ram_setup_dump qemu_ram_setup_dump_mips64el -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mips64el -#define qemu_real_host_page_size qemu_real_host_page_size_mips64el -#define qemu_st_helpers qemu_st_helpers_mips64el -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mips64el -#define qemu_try_memalign qemu_try_memalign_mips64el -#define qentry_destroy qentry_destroy_mips64el -#define qerror_human qerror_human_mips64el -#define qerror_report qerror_report_mips64el -#define qerror_report_err qerror_report_err_mips64el -#define qfloat_destroy_obj qfloat_destroy_obj_mips64el -#define qfloat_from_double qfloat_from_double_mips64el -#define qfloat_get_double qfloat_get_double_mips64el -#define qfloat_type qfloat_type_mips64el -#define qint_destroy_obj qint_destroy_obj_mips64el -#define qint_from_int qint_from_int_mips64el -#define qint_get_int qint_get_int_mips64el -#define qint_type qint_type_mips64el -#define qlist_append_obj qlist_append_obj_mips64el -#define qlist_copy qlist_copy_mips64el -#define qlist_copy_elem qlist_copy_elem_mips64el -#define qlist_destroy_obj qlist_destroy_obj_mips64el -#define qlist_empty qlist_empty_mips64el -#define qlist_entry_obj qlist_entry_obj_mips64el -#define qlist_first qlist_first_mips64el -#define qlist_iter qlist_iter_mips64el -#define qlist_new qlist_new_mips64el -#define qlist_next qlist_next_mips64el -#define qlist_peek qlist_peek_mips64el -#define qlist_pop qlist_pop_mips64el -#define qlist_size qlist_size_mips64el -#define qlist_size_iter qlist_size_iter_mips64el -#define qlist_type qlist_type_mips64el -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mips64el -#define qmp_input_end_list qmp_input_end_list_mips64el -#define qmp_input_end_struct qmp_input_end_struct_mips64el -#define qmp_input_get_next_type qmp_input_get_next_type_mips64el -#define qmp_input_get_object qmp_input_get_object_mips64el -#define qmp_input_get_visitor qmp_input_get_visitor_mips64el -#define qmp_input_next_list qmp_input_next_list_mips64el -#define qmp_input_optional qmp_input_optional_mips64el -#define qmp_input_pop qmp_input_pop_mips64el -#define qmp_input_push qmp_input_push_mips64el -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mips64el -#define qmp_input_start_list qmp_input_start_list_mips64el -#define qmp_input_start_struct qmp_input_start_struct_mips64el -#define qmp_input_type_bool qmp_input_type_bool_mips64el -#define qmp_input_type_int qmp_input_type_int_mips64el -#define qmp_input_type_number qmp_input_type_number_mips64el -#define qmp_input_type_str qmp_input_type_str_mips64el -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mips64el -#define qmp_input_visitor_new qmp_input_visitor_new_mips64el -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mips64el -#define qmp_output_add_obj qmp_output_add_obj_mips64el -#define qmp_output_end_list qmp_output_end_list_mips64el -#define qmp_output_end_struct qmp_output_end_struct_mips64el -#define qmp_output_first qmp_output_first_mips64el -#define qmp_output_get_qobject qmp_output_get_qobject_mips64el -#define qmp_output_get_visitor qmp_output_get_visitor_mips64el -#define qmp_output_last qmp_output_last_mips64el -#define qmp_output_next_list qmp_output_next_list_mips64el -#define qmp_output_pop qmp_output_pop_mips64el -#define qmp_output_push_obj qmp_output_push_obj_mips64el -#define qmp_output_start_list qmp_output_start_list_mips64el -#define qmp_output_start_struct qmp_output_start_struct_mips64el -#define qmp_output_type_bool qmp_output_type_bool_mips64el -#define qmp_output_type_int qmp_output_type_int_mips64el -#define qmp_output_type_number qmp_output_type_number_mips64el -#define qmp_output_type_str qmp_output_type_str_mips64el -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mips64el -#define qmp_output_visitor_new qmp_output_visitor_new_mips64el -#define qobject_decref qobject_decref_mips64el -#define qobject_to_qbool qobject_to_qbool_mips64el -#define qobject_to_qdict qobject_to_qdict_mips64el -#define qobject_to_qfloat qobject_to_qfloat_mips64el -#define qobject_to_qint qobject_to_qint_mips64el -#define qobject_to_qlist qobject_to_qlist_mips64el -#define qobject_to_qstring qobject_to_qstring_mips64el -#define qobject_type qobject_type_mips64el -#define qstring_append qstring_append_mips64el -#define qstring_append_chr qstring_append_chr_mips64el -#define qstring_append_int qstring_append_int_mips64el -#define qstring_destroy_obj qstring_destroy_obj_mips64el -#define qstring_from_escaped_str qstring_from_escaped_str_mips64el -#define qstring_from_str qstring_from_str_mips64el -#define qstring_from_substr qstring_from_substr_mips64el -#define qstring_get_length qstring_get_length_mips64el -#define qstring_get_str qstring_get_str_mips64el -#define qstring_new qstring_new_mips64el -#define qstring_type qstring_type_mips64el -#define ram_block_add ram_block_add_mips64el -#define ram_size ram_size_mips64el -#define range_compare range_compare_mips64el -#define range_covers_byte range_covers_byte_mips64el -#define range_get_last range_get_last_mips64el -#define range_merge range_merge_mips64el -#define ranges_can_merge ranges_can_merge_mips64el -#define raw_read raw_read_mips64el -#define raw_write raw_write_mips64el -#define rcon rcon_mips64el -#define read_raw_cp_reg read_raw_cp_reg_mips64el -#define recip_estimate recip_estimate_mips64el -#define recip_sqrt_estimate recip_sqrt_estimate_mips64el -#define register_cp_regs_for_features register_cp_regs_for_features_mips64el -#define register_multipage register_multipage_mips64el -#define register_subpage register_subpage_mips64el -#define register_tm_clones register_tm_clones_mips64el -#define register_types_object register_types_object_mips64el -#define regnames regnames_mips64el -#define render_memory_region render_memory_region_mips64el -#define reset_all_temps reset_all_temps_mips64el -#define reset_temp reset_temp_mips64el -#define rol32 rol32_mips64el -#define rol64 rol64_mips64el -#define ror32 ror32_mips64el -#define ror64 ror64_mips64el -#define roundAndPackFloat128 roundAndPackFloat128_mips64el -#define roundAndPackFloat16 roundAndPackFloat16_mips64el -#define roundAndPackFloat32 roundAndPackFloat32_mips64el -#define roundAndPackFloat64 roundAndPackFloat64_mips64el -#define roundAndPackFloatx80 roundAndPackFloatx80_mips64el -#define roundAndPackInt32 roundAndPackInt32_mips64el -#define roundAndPackInt64 roundAndPackInt64_mips64el -#define roundAndPackUint64 roundAndPackUint64_mips64el -#define round_to_inf round_to_inf_mips64el -#define run_on_cpu run_on_cpu_mips64el -#define s0 s0_mips64el -#define S0 S0_mips64el -#define s1 s1_mips64el -#define S1 S1_mips64el -#define sa1100_initfn sa1100_initfn_mips64el -#define sa1110_initfn sa1110_initfn_mips64el -#define save_globals save_globals_mips64el -#define scr_write scr_write_mips64el -#define sctlr_write sctlr_write_mips64el -#define set_bit set_bit_mips64el -#define set_bits set_bits_mips64el -#define set_default_nan_mode set_default_nan_mode_mips64el -#define set_feature set_feature_mips64el -#define set_float_detect_tininess set_float_detect_tininess_mips64el -#define set_float_exception_flags set_float_exception_flags_mips64el -#define set_float_rounding_mode set_float_rounding_mode_mips64el -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mips64el -#define set_flush_to_zero set_flush_to_zero_mips64el -#define set_swi_errno set_swi_errno_mips64el -#define sextract32 sextract32_mips64el -#define sextract64 sextract64_mips64el -#define shift128ExtraRightJamming shift128ExtraRightJamming_mips64el -#define shift128Right shift128Right_mips64el -#define shift128RightJamming shift128RightJamming_mips64el -#define shift32RightJamming shift32RightJamming_mips64el -#define shift64ExtraRightJamming shift64ExtraRightJamming_mips64el -#define shift64RightJamming shift64RightJamming_mips64el -#define shifter_out_im shifter_out_im_mips64el -#define shortShift128Left shortShift128Left_mips64el -#define shortShift192Left shortShift192Left_mips64el -#define simple_mpu_ap_bits simple_mpu_ap_bits_mips64el -#define size_code_gen_buffer size_code_gen_buffer_mips64el -#define softmmu_lock_user softmmu_lock_user_mips64el -#define softmmu_lock_user_string softmmu_lock_user_string_mips64el -#define softmmu_tget32 softmmu_tget32_mips64el -#define softmmu_tget8 softmmu_tget8_mips64el -#define softmmu_tput32 softmmu_tput32_mips64el -#define softmmu_unlock_user softmmu_unlock_user_mips64el -#define sort_constraints sort_constraints_mips64el -#define sp_el0_access sp_el0_access_mips64el -#define spsel_read spsel_read_mips64el -#define spsel_write spsel_write_mips64el -#define start_list start_list_mips64el -#define stb_p stb_p_mips64el -#define stb_phys stb_phys_mips64el -#define stl_be_p stl_be_p_mips64el -#define stl_be_phys stl_be_phys_mips64el -#define stl_he_p stl_he_p_mips64el -#define stl_le_p stl_le_p_mips64el -#define stl_le_phys stl_le_phys_mips64el -#define stl_phys stl_phys_mips64el -#define stl_phys_internal stl_phys_internal_mips64el -#define stl_phys_notdirty stl_phys_notdirty_mips64el -#define store_cpu_offset store_cpu_offset_mips64el -#define store_reg store_reg_mips64el -#define store_reg_bx store_reg_bx_mips64el -#define store_reg_from_load store_reg_from_load_mips64el -#define stq_be_p stq_be_p_mips64el -#define stq_be_phys stq_be_phys_mips64el -#define stq_he_p stq_he_p_mips64el -#define stq_le_p stq_le_p_mips64el -#define stq_le_phys stq_le_phys_mips64el -#define stq_phys stq_phys_mips64el -#define string_input_get_visitor string_input_get_visitor_mips64el -#define string_input_visitor_cleanup string_input_visitor_cleanup_mips64el -#define string_input_visitor_new string_input_visitor_new_mips64el -#define strongarm_cp_reginfo strongarm_cp_reginfo_mips64el -#define strstart strstart_mips64el -#define strtosz strtosz_mips64el -#define strtosz_suffix strtosz_suffix_mips64el -#define stw_be_p stw_be_p_mips64el -#define stw_be_phys stw_be_phys_mips64el -#define stw_he_p stw_he_p_mips64el -#define stw_le_p stw_le_p_mips64el -#define stw_le_phys stw_le_phys_mips64el -#define stw_phys stw_phys_mips64el -#define stw_phys_internal stw_phys_internal_mips64el -#define sub128 sub128_mips64el -#define sub16_sat sub16_sat_mips64el -#define sub16_usat sub16_usat_mips64el -#define sub192 sub192_mips64el -#define sub8_sat sub8_sat_mips64el -#define sub8_usat sub8_usat_mips64el -#define subFloat128Sigs subFloat128Sigs_mips64el -#define subFloat32Sigs subFloat32Sigs_mips64el -#define subFloat64Sigs subFloat64Sigs_mips64el -#define subFloatx80Sigs subFloatx80Sigs_mips64el -#define subpage_accepts subpage_accepts_mips64el -#define subpage_init subpage_init_mips64el -#define subpage_ops subpage_ops_mips64el -#define subpage_read subpage_read_mips64el -#define subpage_register subpage_register_mips64el -#define subpage_write subpage_write_mips64el -#define suffix_mul suffix_mul_mips64el -#define swap_commutative swap_commutative_mips64el -#define swap_commutative2 swap_commutative2_mips64el -#define switch_mode switch_mode_mips64el -#define switch_v7m_sp switch_v7m_sp_mips64el -#define syn_aa32_bkpt syn_aa32_bkpt_mips64el -#define syn_aa32_hvc syn_aa32_hvc_mips64el -#define syn_aa32_smc syn_aa32_smc_mips64el -#define syn_aa32_svc syn_aa32_svc_mips64el -#define syn_breakpoint syn_breakpoint_mips64el -#define sync_globals sync_globals_mips64el -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mips64el -#define syn_cp14_rt_trap syn_cp14_rt_trap_mips64el -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mips64el -#define syn_cp15_rt_trap syn_cp15_rt_trap_mips64el -#define syn_data_abort syn_data_abort_mips64el -#define syn_fp_access_trap syn_fp_access_trap_mips64el -#define syn_insn_abort syn_insn_abort_mips64el -#define syn_swstep syn_swstep_mips64el -#define syn_uncategorized syn_uncategorized_mips64el -#define syn_watchpoint syn_watchpoint_mips64el -#define syscall_err syscall_err_mips64el -#define system_bus_class_init system_bus_class_init_mips64el -#define system_bus_info system_bus_info_mips64el -#define t2ee_cp_reginfo t2ee_cp_reginfo_mips64el -#define table_logic_cc table_logic_cc_mips64el -#define target_parse_constraint target_parse_constraint_mips64el -#define target_words_bigendian target_words_bigendian_mips64el -#define tb_add_jump tb_add_jump_mips64el -#define tb_alloc tb_alloc_mips64el -#define tb_alloc_page tb_alloc_page_mips64el -#define tb_check_watchpoint tb_check_watchpoint_mips64el -#define tb_find_fast tb_find_fast_mips64el -#define tb_find_pc tb_find_pc_mips64el -#define tb_find_slow tb_find_slow_mips64el -#define tb_flush tb_flush_mips64el -#define tb_flush_jmp_cache tb_flush_jmp_cache_mips64el -#define tb_free tb_free_mips64el -#define tb_gen_code tb_gen_code_mips64el -#define tb_hash_remove tb_hash_remove_mips64el -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64el -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64el -#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64el -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mips64el -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mips64el -#define tb_jmp_remove tb_jmp_remove_mips64el -#define tb_link_page tb_link_page_mips64el -#define tb_page_remove tb_page_remove_mips64el -#define tb_phys_hash_func tb_phys_hash_func_mips64el -#define tb_phys_invalidate tb_phys_invalidate_mips64el -#define tb_reset_jump tb_reset_jump_mips64el -#define tb_set_jmp_target tb_set_jmp_target_mips64el -#define tcg_accel_class_init tcg_accel_class_init_mips64el -#define tcg_accel_type tcg_accel_type_mips64el -#define tcg_add_param_i32 tcg_add_param_i32_mips64el -#define tcg_add_param_i64 tcg_add_param_i64_mips64el -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mips64el -#define tcg_allowed tcg_allowed_mips64el -#define tcg_canonicalize_memop tcg_canonicalize_memop_mips64el -#define tcg_commit tcg_commit_mips64el -#define tcg_cond_to_jcc tcg_cond_to_jcc_mips64el -#define tcg_constant_folding tcg_constant_folding_mips64el +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_mips64el +#define tcg_expand_vec_op tcg_expand_vec_op_mips64el +#define tcg_register_jit tcg_register_jit_mips64el +#define tcg_tb_insert tcg_tb_insert_mips64el +#define tcg_tb_remove tcg_tb_remove_mips64el +#define tcg_tb_lookup tcg_tb_lookup_mips64el +#define tcg_tb_foreach tcg_tb_foreach_mips64el +#define tcg_nb_tbs tcg_nb_tbs_mips64el +#define tcg_region_reset_all tcg_region_reset_all_mips64el +#define tcg_region_init tcg_region_init_mips64el +#define tcg_code_size tcg_code_size_mips64el +#define tcg_code_capacity tcg_code_capacity_mips64el +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mips64el +#define tcg_malloc_internal tcg_malloc_internal_mips64el +#define tcg_pool_reset tcg_pool_reset_mips64el +#define tcg_context_init tcg_context_init_mips64el +#define tcg_tb_alloc tcg_tb_alloc_mips64el +#define tcg_prologue_init tcg_prologue_init_mips64el +#define tcg_func_start tcg_func_start_mips64el +#define tcg_set_frame tcg_set_frame_mips64el +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64el +#define tcg_temp_new_internal tcg_temp_new_internal_mips64el +#define tcg_temp_new_vec tcg_temp_new_vec_mips64el +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mips64el +#define tcg_temp_free_internal tcg_temp_free_internal_mips64el #define tcg_const_i32 tcg_const_i32_mips64el #define tcg_const_i64 tcg_const_i64_mips64el #define tcg_const_local_i32 tcg_const_local_i32_mips64el #define tcg_const_local_i64 tcg_const_local_i64_mips64el -#define tcg_context_init tcg_context_init_mips64el -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mips64el -#define tcg_cpu_exec tcg_cpu_exec_mips64el -#define tcg_current_code_size tcg_current_code_size_mips64el -#define tcg_dump_info tcg_dump_info_mips64el -#define tcg_dump_ops tcg_dump_ops_mips64el -#define tcg_exec_all tcg_exec_all_mips64el -#define tcg_find_helper tcg_find_helper_mips64el -#define tcg_func_start tcg_func_start_mips64el -#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64el -#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64el -#define tcg_gen_add_i32 tcg_gen_add_i32_mips64el -#define tcg_gen_add_i64 tcg_gen_add_i64_mips64el -#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64el -#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64el -#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64el -#define tcg_gen_and_i32 tcg_gen_and_i32_mips64el -#define tcg_gen_and_i64 tcg_gen_and_i64_mips64el -#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64el -#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64el -#define tcg_gen_br tcg_gen_br_mips64el -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64el -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64el -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64el -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64el -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64el +#define tcg_op_supported tcg_op_supported_mips64el #define tcg_gen_callN tcg_gen_callN_mips64el +#define tcg_op_remove tcg_op_remove_mips64el +#define tcg_emit_op tcg_emit_op_mips64el +#define tcg_op_insert_before tcg_op_insert_before_mips64el +#define tcg_op_insert_after tcg_op_insert_after_mips64el +#define tcg_cpu_exec_time tcg_cpu_exec_time_mips64el #define tcg_gen_code tcg_gen_code_mips64el -#define tcg_gen_code_common tcg_gen_code_common_mips64el -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mips64el -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64el -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mips64el -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64el -#define tcg_gen_exit_tb tcg_gen_exit_tb_mips64el -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64el -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64el -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64el -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64el -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64el -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64el -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64el -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64el -#define tcg_gen_goto_tb tcg_gen_goto_tb_mips64el -#define tcg_gen_ld_i32 tcg_gen_ld_i32_mips64el -#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64el -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mips64el -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mips64el -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64el -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64el -#define tcg_gen_mov_i32 tcg_gen_mov_i32_mips64el -#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64el -#define tcg_gen_movi_i32 tcg_gen_movi_i32_mips64el -#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64el -#define tcg_gen_mul_i32 tcg_gen_mul_i32_mips64el -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64el -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64el -#define tcg_gen_neg_i32 tcg_gen_neg_i32_mips64el -#define tcg_gen_neg_i64 tcg_gen_neg_i64_mips64el -#define tcg_gen_not_i32 tcg_gen_not_i32_mips64el -#define tcg_gen_op0 tcg_gen_op0_mips64el -#define tcg_gen_op1i tcg_gen_op1i_mips64el -#define tcg_gen_op2_i32 tcg_gen_op2_i32_mips64el -#define tcg_gen_op2_i64 tcg_gen_op2_i64_mips64el -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mips64el -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mips64el -#define tcg_gen_op3_i32 tcg_gen_op3_i32_mips64el -#define tcg_gen_op3_i64 tcg_gen_op3_i64_mips64el -#define tcg_gen_op4_i32 tcg_gen_op4_i32_mips64el -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mips64el -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mips64el -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mips64el -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mips64el -#define tcg_gen_op6_i32 tcg_gen_op6_i32_mips64el -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mips64el -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mips64el -#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64el -#define tcg_gen_or_i32 tcg_gen_or_i32_mips64el -#define tcg_gen_or_i64 tcg_gen_or_i64_mips64el +#define tcg_gen_op1 tcg_gen_op1_mips64el +#define tcg_gen_op2 tcg_gen_op2_mips64el +#define tcg_gen_op3 tcg_gen_op3_mips64el +#define tcg_gen_op4 tcg_gen_op4_mips64el +#define tcg_gen_op5 tcg_gen_op5_mips64el +#define tcg_gen_op6 tcg_gen_op6_mips64el +#define tcg_gen_mb tcg_gen_mb_mips64el +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64el +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mips64el +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64el +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64el #define tcg_gen_ori_i32 tcg_gen_ori_i32_mips64el -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64el -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64el -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64el -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64el +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64el +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64el +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64el +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64el +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64el +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64el +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64el +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mips64el +#define tcg_gen_muli_i32 tcg_gen_muli_i32_mips64el +#define tcg_gen_div_i32 tcg_gen_div_i32_mips64el +#define tcg_gen_rem_i32 tcg_gen_rem_i32_mips64el +#define tcg_gen_divu_i32 tcg_gen_divu_i32_mips64el +#define tcg_gen_remu_i32 tcg_gen_remu_i32_mips64el +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64el +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mips64el +#define tcg_gen_nand_i32 tcg_gen_nand_i32_mips64el +#define tcg_gen_nor_i32 tcg_gen_nor_i32_mips64el +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64el +#define tcg_gen_clz_i32 tcg_gen_clz_i32_mips64el +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mips64el +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mips64el +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mips64el +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mips64el +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mips64el #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips64el #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips64el #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips64el #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips64el -#define tcg_gen_sar_i32 tcg_gen_sar_i32_mips64el -#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64el -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64el -#define tcg_gen_shl_i32 tcg_gen_shl_i32_mips64el -#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64el -#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64el +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64el +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mips64el +#define tcg_gen_extract_i32 tcg_gen_extract_i32_mips64el +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mips64el +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mips64el +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64el +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64el +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mips64el +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64el +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64el +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mips64el +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64el +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64el +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64el +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64el +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64el +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64el +#define tcg_gen_smin_i32 tcg_gen_smin_i32_mips64el +#define tcg_gen_umin_i32 tcg_gen_umin_i32_mips64el +#define tcg_gen_smax_i32 tcg_gen_smax_i32_mips64el +#define tcg_gen_umax_i32 tcg_gen_umax_i32_mips64el +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64el +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64el +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mips64el +#define tcg_gen_subi_i64 tcg_gen_subi_i64_mips64el +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64el +#define tcg_gen_ori_i64 tcg_gen_ori_i64_mips64el +#define tcg_gen_xori_i64 tcg_gen_xori_i64_mips64el #define tcg_gen_shli_i64 tcg_gen_shli_i64_mips64el -#define tcg_gen_shr_i32 tcg_gen_shr_i32_mips64el -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mips64el -#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64el -#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64el #define tcg_gen_shri_i64 tcg_gen_shri_i64_mips64el -#define tcg_gen_st_i32 tcg_gen_st_i32_mips64el -#define tcg_gen_st_i64 tcg_gen_st_i64_mips64el -#define tcg_gen_sub_i32 tcg_gen_sub_i32_mips64el -#define tcg_gen_sub_i64 tcg_gen_sub_i64_mips64el -#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64el -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mips64el -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mips64el -#define tcg_gen_xor_i32 tcg_gen_xor_i32_mips64el -#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64el -#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64el -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mips64el -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mips64el -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mips64el -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mips64el -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mips64el -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64el -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mips64el -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mips64el -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mips64el -#define tcg_handle_interrupt tcg_handle_interrupt_mips64el -#define tcg_init tcg_init_mips64el -#define tcg_invert_cond tcg_invert_cond_mips64el -#define tcg_la_bb_end tcg_la_bb_end_mips64el -#define tcg_la_br_end tcg_la_br_end_mips64el -#define tcg_la_func_end tcg_la_func_end_mips64el -#define tcg_liveness_analysis tcg_liveness_analysis_mips64el -#define tcg_malloc tcg_malloc_mips64el -#define tcg_malloc_internal tcg_malloc_internal_mips64el -#define tcg_op_defs_org tcg_op_defs_org_mips64el -#define tcg_opt_gen_mov tcg_opt_gen_mov_mips64el -#define tcg_opt_gen_movi tcg_opt_gen_movi_mips64el -#define tcg_optimize tcg_optimize_mips64el -#define tcg_out16 tcg_out16_mips64el -#define tcg_out32 tcg_out32_mips64el -#define tcg_out64 tcg_out64_mips64el -#define tcg_out8 tcg_out8_mips64el -#define tcg_out_addi tcg_out_addi_mips64el -#define tcg_out_branch tcg_out_branch_mips64el -#define tcg_out_brcond32 tcg_out_brcond32_mips64el -#define tcg_out_brcond64 tcg_out_brcond64_mips64el -#define tcg_out_bswap32 tcg_out_bswap32_mips64el -#define tcg_out_bswap64 tcg_out_bswap64_mips64el -#define tcg_out_call tcg_out_call_mips64el -#define tcg_out_cmp tcg_out_cmp_mips64el -#define tcg_out_ext16s tcg_out_ext16s_mips64el -#define tcg_out_ext16u tcg_out_ext16u_mips64el -#define tcg_out_ext32s tcg_out_ext32s_mips64el -#define tcg_out_ext32u tcg_out_ext32u_mips64el -#define tcg_out_ext8s tcg_out_ext8s_mips64el -#define tcg_out_ext8u tcg_out_ext8u_mips64el -#define tcg_out_jmp tcg_out_jmp_mips64el -#define tcg_out_jxx tcg_out_jxx_mips64el -#define tcg_out_label tcg_out_label_mips64el -#define tcg_out_ld tcg_out_ld_mips64el -#define tcg_out_modrm tcg_out_modrm_mips64el -#define tcg_out_modrm_offset tcg_out_modrm_offset_mips64el -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mips64el -#define tcg_out_mov tcg_out_mov_mips64el -#define tcg_out_movcond32 tcg_out_movcond32_mips64el -#define tcg_out_movcond64 tcg_out_movcond64_mips64el -#define tcg_out_movi tcg_out_movi_mips64el -#define tcg_out_op tcg_out_op_mips64el -#define tcg_out_pop tcg_out_pop_mips64el -#define tcg_out_push tcg_out_push_mips64el -#define tcg_out_qemu_ld tcg_out_qemu_ld_mips64el -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mips64el -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mips64el -#define tcg_out_qemu_st tcg_out_qemu_st_mips64el -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mips64el -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mips64el -#define tcg_out_reloc tcg_out_reloc_mips64el -#define tcg_out_rolw_8 tcg_out_rolw_8_mips64el -#define tcg_out_setcond32 tcg_out_setcond32_mips64el -#define tcg_out_setcond64 tcg_out_setcond64_mips64el -#define tcg_out_shifti tcg_out_shifti_mips64el -#define tcg_out_st tcg_out_st_mips64el -#define tcg_out_tb_finalize tcg_out_tb_finalize_mips64el -#define tcg_out_tb_init tcg_out_tb_init_mips64el -#define tcg_out_tlb_load tcg_out_tlb_load_mips64el -#define tcg_out_vex_modrm tcg_out_vex_modrm_mips64el -#define tcg_patch32 tcg_patch32_mips64el -#define tcg_patch8 tcg_patch8_mips64el -#define tcg_pcrel_diff tcg_pcrel_diff_mips64el -#define tcg_pool_reset tcg_pool_reset_mips64el -#define tcg_prologue_init tcg_prologue_init_mips64el -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mips64el -#define tcg_reg_alloc tcg_reg_alloc_mips64el -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mips64el -#define tcg_reg_alloc_call tcg_reg_alloc_call_mips64el -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mips64el -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mips64el -#define tcg_reg_alloc_op tcg_reg_alloc_op_mips64el -#define tcg_reg_alloc_start tcg_reg_alloc_start_mips64el -#define tcg_reg_free tcg_reg_free_mips64el -#define tcg_reg_sync tcg_reg_sync_mips64el -#define tcg_set_frame tcg_set_frame_mips64el -#define tcg_set_nop tcg_set_nop_mips64el -#define tcg_swap_cond tcg_swap_cond_mips64el -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mips64el -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mips64el -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mips64el -#define tcg_target_const_match tcg_target_const_match_mips64el -#define tcg_target_init tcg_target_init_mips64el -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mips64el -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mips64el -#define tcg_temp_alloc tcg_temp_alloc_mips64el -#define tcg_temp_free_i32 tcg_temp_free_i32_mips64el -#define tcg_temp_free_i64 tcg_temp_free_i64_mips64el -#define tcg_temp_free_internal tcg_temp_free_internal_mips64el -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mips64el -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mips64el -#define tcg_temp_new_i32 tcg_temp_new_i32_mips64el -#define tcg_temp_new_i64 tcg_temp_new_i64_mips64el -#define tcg_temp_new_internal tcg_temp_new_internal_mips64el -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mips64el -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mips64el -#define tdb_hash tdb_hash_mips64el -#define teecr_write teecr_write_mips64el -#define teehbr_access teehbr_access_mips64el -#define temp_allocate_frame temp_allocate_frame_mips64el -#define temp_dead temp_dead_mips64el -#define temps_are_copies temps_are_copies_mips64el -#define temp_save temp_save_mips64el -#define temp_sync temp_sync_mips64el -#define tgen_arithi tgen_arithi_mips64el -#define tgen_arithr tgen_arithr_mips64el -#define thumb2_logic_op thumb2_logic_op_mips64el -#define ti925t_initfn ti925t_initfn_mips64el -#define tlb_add_large_page tlb_add_large_page_mips64el -#define tlb_flush_entry tlb_flush_entry_mips64el -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64el -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64el -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips64el -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mips64el -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mips64el -#define tlbi_aa64_va_write tlbi_aa64_va_write_mips64el -#define tlbiall_is_write tlbiall_is_write_mips64el -#define tlbiall_write tlbiall_write_mips64el -#define tlbiasid_is_write tlbiasid_is_write_mips64el -#define tlbiasid_write tlbiasid_write_mips64el -#define tlbimvaa_is_write tlbimvaa_is_write_mips64el -#define tlbimvaa_write tlbimvaa_write_mips64el -#define tlbimva_is_write tlbimva_is_write_mips64el -#define tlbimva_write tlbimva_write_mips64el -#define tlb_is_dirty_ram tlb_is_dirty_ram_mips64el +#define tcg_gen_sari_i64 tcg_gen_sari_i64_mips64el +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64el +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mips64el +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mips64el +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mips64el +#define tcg_gen_muli_i64 tcg_gen_muli_i64_mips64el +#define tcg_gen_div_i64 tcg_gen_div_i64_mips64el +#define tcg_gen_rem_i64 tcg_gen_rem_i64_mips64el +#define tcg_gen_divu_i64 tcg_gen_divu_i64_mips64el +#define tcg_gen_remu_i64 tcg_gen_remu_i64_mips64el +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mips64el +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mips64el +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64el +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mips64el +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mips64el +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64el +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mips64el +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mips64el +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mips64el +#define tcg_gen_not_i64 tcg_gen_not_i64_mips64el +#define tcg_gen_andc_i64 tcg_gen_andc_i64_mips64el +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mips64el +#define tcg_gen_nand_i64 tcg_gen_nand_i64_mips64el +#define tcg_gen_nor_i64 tcg_gen_nor_i64_mips64el +#define tcg_gen_orc_i64 tcg_gen_orc_i64_mips64el +#define tcg_gen_clz_i64 tcg_gen_clz_i64_mips64el +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mips64el +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mips64el +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mips64el +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mips64el +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mips64el +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mips64el +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mips64el +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mips64el +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mips64el +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mips64el +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mips64el +#define tcg_gen_extract_i64 tcg_gen_extract_i64_mips64el +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mips64el +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mips64el +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64el +#define tcg_gen_add2_i64 tcg_gen_add2_i64_mips64el +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mips64el +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mips64el +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mips64el +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mips64el +#define tcg_gen_smin_i64 tcg_gen_smin_i64_mips64el +#define tcg_gen_umin_i64 tcg_gen_umin_i64_mips64el +#define tcg_gen_smax_i64 tcg_gen_smax_i64_mips64el +#define tcg_gen_umax_i64 tcg_gen_umax_i64_mips64el +#define tcg_gen_abs_i64 tcg_gen_abs_i64_mips64el +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mips64el +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mips64el +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64el +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64el +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64el +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mips64el +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mips64el +#define tcg_gen_exit_tb tcg_gen_exit_tb_mips64el +#define tcg_gen_goto_tb tcg_gen_goto_tb_mips64el +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mips64el +#define check_exit_request check_exit_request_mips64el +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64el +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64el +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64el +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64el +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mips64el +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mips64el +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mips64el +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mips64el +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mips64el +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mips64el +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mips64el +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mips64el +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mips64el +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mips64el +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mips64el +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mips64el +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mips64el +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mips64el +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mips64el +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mips64el +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mips64el +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mips64el +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mips64el +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mips64el +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mips64el +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mips64el +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mips64el +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mips64el +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mips64el +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mips64el +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mips64el +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mips64el +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mips64el +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mips64el +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mips64el +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mips64el +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mips64el +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mips64el +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mips64el +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mips64el +#define simd_desc simd_desc_mips64el +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mips64el +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mips64el +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mips64el +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mips64el +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mips64el +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mips64el +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mips64el +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mips64el +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mips64el +#define tcg_gen_gvec_2 tcg_gen_gvec_2_mips64el +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_mips64el +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_mips64el +#define tcg_gen_gvec_3 tcg_gen_gvec_3_mips64el +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_mips64el +#define tcg_gen_gvec_4 tcg_gen_gvec_4_mips64el +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_mips64el +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mips64el +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mips64el +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mips64el +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mips64el +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mips64el +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mips64el +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mips64el +#define tcg_gen_gvec_not tcg_gen_gvec_not_mips64el +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mips64el +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mips64el +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mips64el +#define tcg_gen_gvec_add tcg_gen_gvec_add_mips64el +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_mips64el +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_mips64el +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_mips64el +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mips64el +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mips64el +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mips64el +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_mips64el +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_mips64el +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_mips64el +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_mips64el +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mips64el +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mips64el +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mips64el +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mips64el +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_mips64el +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_mips64el +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_mips64el +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_mips64el +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mips64el +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mips64el +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mips64el +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_mips64el +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_mips64el +#define tcg_gen_gvec_and tcg_gen_gvec_and_mips64el +#define tcg_gen_gvec_or tcg_gen_gvec_or_mips64el +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_mips64el +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_mips64el +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_mips64el +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_mips64el +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_mips64el +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mips64el +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mips64el +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_mips64el +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_mips64el +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_mips64el +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_mips64el +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_mips64el +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mips64el +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mips64el +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_mips64el +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mips64el +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mips64el +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_mips64el +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mips64el +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mips64el +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_mips64el +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_mips64el +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mips64el +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_mips64el +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mips64el +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mips64el +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mips64el +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mips64el +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mips64el +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mips64el +#define vec_gen_2 vec_gen_2_mips64el +#define vec_gen_3 vec_gen_3_mips64el +#define vec_gen_4 vec_gen_4_mips64el +#define tcg_gen_mov_vec tcg_gen_mov_vec_mips64el +#define tcg_const_zeros_vec tcg_const_zeros_vec_mips64el +#define tcg_const_ones_vec tcg_const_ones_vec_mips64el +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mips64el +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mips64el +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mips64el +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mips64el +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mips64el +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mips64el +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_mips64el +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mips64el +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mips64el +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mips64el +#define tcg_gen_ld_vec tcg_gen_ld_vec_mips64el +#define tcg_gen_st_vec tcg_gen_st_vec_mips64el +#define tcg_gen_stl_vec tcg_gen_stl_vec_mips64el +#define tcg_gen_and_vec tcg_gen_and_vec_mips64el +#define tcg_gen_or_vec tcg_gen_or_vec_mips64el +#define tcg_gen_xor_vec tcg_gen_xor_vec_mips64el +#define tcg_gen_andc_vec tcg_gen_andc_vec_mips64el +#define tcg_gen_orc_vec tcg_gen_orc_vec_mips64el +#define tcg_gen_nand_vec tcg_gen_nand_vec_mips64el +#define tcg_gen_nor_vec tcg_gen_nor_vec_mips64el +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_mips64el +#define tcg_gen_not_vec tcg_gen_not_vec_mips64el +#define tcg_gen_neg_vec tcg_gen_neg_vec_mips64el +#define tcg_gen_abs_vec tcg_gen_abs_vec_mips64el +#define tcg_gen_shli_vec tcg_gen_shli_vec_mips64el +#define tcg_gen_shri_vec tcg_gen_shri_vec_mips64el +#define tcg_gen_sari_vec tcg_gen_sari_vec_mips64el +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_mips64el +#define tcg_gen_add_vec tcg_gen_add_vec_mips64el +#define tcg_gen_sub_vec tcg_gen_sub_vec_mips64el +#define tcg_gen_mul_vec tcg_gen_mul_vec_mips64el +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mips64el +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_mips64el +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_mips64el +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_mips64el +#define tcg_gen_smin_vec tcg_gen_smin_vec_mips64el +#define tcg_gen_umin_vec tcg_gen_umin_vec_mips64el +#define tcg_gen_smax_vec tcg_gen_smax_vec_mips64el +#define tcg_gen_umax_vec tcg_gen_umax_vec_mips64el +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_mips64el +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_mips64el +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_mips64el +#define tcg_gen_shls_vec tcg_gen_shls_vec_mips64el +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_mips64el +#define tcg_gen_sars_vec tcg_gen_sars_vec_mips64el +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mips64el +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mips64el +#define tb_htable_lookup tb_htable_lookup_mips64el +#define tb_set_jmp_target tb_set_jmp_target_mips64el +#define cpu_exec cpu_exec_mips64el +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mips64el +#define cpu_reloading_memory_map cpu_reloading_memory_map_mips64el +#define cpu_loop_exit cpu_loop_exit_mips64el +#define cpu_loop_exit_restore cpu_loop_exit_restore_mips64el +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mips64el +#define tlb_init tlb_init_mips64el +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mips64el +#define tlb_flush tlb_flush_mips64el +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mips64el +#define tlb_flush_all_cpus tlb_flush_all_cpus_mips64el +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mips64el +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mips64el +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mips64el +#define tlb_flush_page tlb_flush_page_mips64el +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mips64el +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mips64el +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mips64el +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mips64el #define tlb_protect_code tlb_protect_code_mips64el -#define tlb_reset_dirty_range tlb_reset_dirty_range_mips64el -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mips64el +#define tlb_unprotect_code tlb_unprotect_code_mips64el +#define tlb_reset_dirty tlb_reset_dirty_mips64el #define tlb_set_dirty tlb_set_dirty_mips64el -#define tlb_set_dirty1 tlb_set_dirty1_mips64el -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mips64el +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mips64el +#define tlb_set_page tlb_set_page_mips64el +#define get_page_addr_code_hostp get_page_addr_code_hostp_mips64el +#define get_page_addr_code get_page_addr_code_mips64el +#define probe_access probe_access_mips64el #define tlb_vaddr_to_host tlb_vaddr_to_host_mips64el -#define token_get_type token_get_type_mips64el -#define token_get_value token_get_value_mips64el -#define token_is_escape token_is_escape_mips64el -#define token_is_keyword token_is_keyword_mips64el -#define token_is_operator token_is_operator_mips64el -#define tokens_append_from_iter tokens_append_from_iter_mips64el -#define to_qiv to_qiv_mips64el -#define to_qov to_qov_mips64el -#define tosa_init tosa_init_mips64el -#define tosa_machine_init tosa_machine_init_mips64el -#define tswap32 tswap32_mips64el -#define tswap64 tswap64_mips64el -#define type_class_get_size type_class_get_size_mips64el -#define type_get_by_name type_get_by_name_mips64el -#define type_get_parent type_get_parent_mips64el -#define type_has_parent type_has_parent_mips64el -#define type_initialize type_initialize_mips64el -#define type_initialize_interface type_initialize_interface_mips64el -#define type_is_ancestor type_is_ancestor_mips64el -#define type_new type_new_mips64el -#define type_object_get_size type_object_get_size_mips64el -#define type_register_internal type_register_internal_mips64el -#define type_table_add type_table_add_mips64el -#define type_table_get type_table_get_mips64el -#define type_table_lookup type_table_lookup_mips64el -#define uint16_to_float32 uint16_to_float32_mips64el -#define uint16_to_float64 uint16_to_float64_mips64el -#define uint32_to_float32 uint32_to_float32_mips64el -#define uint32_to_float64 uint32_to_float64_mips64el -#define uint64_to_float128 uint64_to_float128_mips64el -#define uint64_to_float32 uint64_to_float32_mips64el -#define uint64_to_float64 uint64_to_float64_mips64el -#define unassigned_io_ops unassigned_io_ops_mips64el -#define unassigned_io_read unassigned_io_read_mips64el -#define unassigned_io_write unassigned_io_write_mips64el -#define unassigned_mem_accepts unassigned_mem_accepts_mips64el +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64el +#define helper_le_lduw_mmu helper_le_lduw_mmu_mips64el +#define helper_be_lduw_mmu helper_be_lduw_mmu_mips64el +#define helper_le_ldul_mmu helper_le_ldul_mmu_mips64el +#define helper_be_ldul_mmu helper_be_ldul_mmu_mips64el +#define helper_le_ldq_mmu helper_le_ldq_mmu_mips64el +#define helper_be_ldq_mmu helper_be_ldq_mmu_mips64el +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64el +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64el +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64el +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64el +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64el +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mips64el +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mips64el +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mips64el +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mips64el +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mips64el +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mips64el +#define cpu_ldub_data_ra cpu_ldub_data_ra_mips64el +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_mips64el +#define cpu_lduw_data_ra cpu_lduw_data_ra_mips64el +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_mips64el +#define cpu_ldl_data_ra cpu_ldl_data_ra_mips64el +#define cpu_ldq_data_ra cpu_ldq_data_ra_mips64el +#define cpu_ldub_data cpu_ldub_data_mips64el +#define cpu_ldsb_data cpu_ldsb_data_mips64el +#define cpu_lduw_data cpu_lduw_data_mips64el +#define cpu_ldsw_data cpu_ldsw_data_mips64el +#define cpu_ldl_data cpu_ldl_data_mips64el +#define cpu_ldq_data cpu_ldq_data_mips64el +#define helper_ret_stb_mmu helper_ret_stb_mmu_mips64el +#define helper_le_stw_mmu helper_le_stw_mmu_mips64el +#define helper_be_stw_mmu helper_be_stw_mmu_mips64el +#define helper_le_stl_mmu helper_le_stl_mmu_mips64el +#define helper_be_stl_mmu helper_be_stl_mmu_mips64el +#define helper_le_stq_mmu helper_le_stq_mmu_mips64el +#define helper_be_stq_mmu helper_be_stq_mmu_mips64el +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mips64el +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mips64el +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mips64el +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mips64el +#define cpu_stb_data_ra cpu_stb_data_ra_mips64el +#define cpu_stw_data_ra cpu_stw_data_ra_mips64el +#define cpu_stl_data_ra cpu_stl_data_ra_mips64el +#define cpu_stq_data_ra cpu_stq_data_ra_mips64el +#define cpu_stb_data cpu_stb_data_mips64el +#define cpu_stw_data cpu_stw_data_mips64el +#define cpu_stl_data cpu_stl_data_mips64el +#define cpu_stq_data cpu_stq_data_mips64el +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mips64el +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mips64el +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mips64el +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mips64el +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mips64el +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mips64el +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mips64el +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mips64el +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mips64el +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mips64el +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mips64el +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mips64el +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mips64el +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mips64el +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mips64el +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mips64el +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mips64el +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mips64el +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mips64el +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mips64el +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mips64el +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mips64el +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mips64el +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mips64el +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mips64el +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mips64el +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mips64el +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mips64el +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mips64el +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mips64el +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mips64el +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mips64el +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mips64el +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mips64el +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mips64el +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mips64el +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mips64el +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mips64el +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mips64el +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mips64el +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mips64el +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mips64el +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mips64el +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mips64el +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mips64el +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mips64el +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mips64el +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mips64el +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mips64el +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mips64el +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mips64el +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mips64el +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mips64el +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mips64el +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mips64el +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mips64el +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mips64el +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mips64el +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mips64el +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mips64el +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mips64el +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mips64el +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mips64el +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mips64el +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mips64el +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mips64el +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mips64el +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mips64el +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mips64el +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mips64el +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mips64el +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mips64el +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mips64el +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mips64el +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mips64el +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mips64el +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mips64el +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mips64el +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mips64el +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mips64el +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mips64el +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mips64el +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mips64el +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mips64el +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mips64el +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mips64el +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mips64el +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mips64el +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mips64el +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mips64el +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mips64el +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mips64el +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mips64el +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mips64el +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mips64el +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mips64el +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mips64el +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mips64el +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mips64el +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mips64el +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mips64el +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mips64el +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mips64el +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mips64el +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mips64el +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mips64el +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mips64el +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mips64el +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mips64el +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mips64el +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mips64el +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mips64el +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mips64el +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mips64el +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mips64el +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mips64el +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mips64el +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mips64el +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mips64el +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mips64el +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mips64el +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mips64el +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mips64el +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mips64el +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mips64el +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mips64el +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mips64el +#define helper_atomic_xchgb helper_atomic_xchgb_mips64el +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_mips64el +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_mips64el +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_mips64el +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mips64el +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_mips64el +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_mips64el +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_mips64el +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mips64el +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mips64el +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mips64el +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mips64el +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mips64el +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mips64el +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mips64el +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mips64el +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mips64el +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mips64el +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_mips64el +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mips64el +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mips64el +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mips64el +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mips64el +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mips64el +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mips64el +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mips64el +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mips64el +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mips64el +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mips64el +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mips64el +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mips64el +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mips64el +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mips64el +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mips64el +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mips64el +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mips64el +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_mips64el +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mips64el +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mips64el +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mips64el +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mips64el +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mips64el +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mips64el +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mips64el +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mips64el +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mips64el +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mips64el +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mips64el +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mips64el +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mips64el +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mips64el +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mips64el +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mips64el +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mips64el +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_mips64el +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mips64el +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mips64el +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mips64el +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mips64el +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mips64el +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mips64el +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mips64el +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mips64el +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mips64el +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mips64el +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mips64el +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mips64el +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mips64el +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mips64el +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mips64el +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mips64el +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mips64el +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_mips64el +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mips64el +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mips64el +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mips64el +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mips64el +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mips64el +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mips64el +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mips64el +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mips64el +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mips64el +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mips64el +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mips64el +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mips64el +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mips64el +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mips64el +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mips64el +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mips64el +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mips64el +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_mips64el +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mips64el +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mips64el +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mips64el +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mips64el +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mips64el +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mips64el +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mips64el +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mips64el +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mips64el +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mips64el +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mips64el +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mips64el +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mips64el +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mips64el +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mips64el +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mips64el +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mips64el +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_mips64el +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mips64el +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mips64el +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mips64el +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mips64el +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mips64el +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mips64el +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mips64el +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mips64el +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mips64el +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mips64el +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mips64el +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mips64el +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mips64el +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mips64el +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mips64el +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mips64el +#define cpu_ldub_code cpu_ldub_code_mips64el +#define cpu_lduw_code cpu_lduw_code_mips64el +#define cpu_ldl_code cpu_ldl_code_mips64el +#define cpu_ldq_code cpu_ldq_code_mips64el +#define helper_div_i32 helper_div_i32_mips64el +#define helper_rem_i32 helper_rem_i32_mips64el +#define helper_divu_i32 helper_divu_i32_mips64el +#define helper_remu_i32 helper_remu_i32_mips64el +#define helper_shl_i64 helper_shl_i64_mips64el +#define helper_shr_i64 helper_shr_i64_mips64el +#define helper_sar_i64 helper_sar_i64_mips64el +#define helper_div_i64 helper_div_i64_mips64el +#define helper_rem_i64 helper_rem_i64_mips64el +#define helper_divu_i64 helper_divu_i64_mips64el +#define helper_remu_i64 helper_remu_i64_mips64el +#define helper_muluh_i64 helper_muluh_i64_mips64el +#define helper_mulsh_i64 helper_mulsh_i64_mips64el +#define helper_clz_i32 helper_clz_i32_mips64el +#define helper_ctz_i32 helper_ctz_i32_mips64el +#define helper_clz_i64 helper_clz_i64_mips64el +#define helper_ctz_i64 helper_ctz_i64_mips64el +#define helper_clrsb_i32 helper_clrsb_i32_mips64el +#define helper_clrsb_i64 helper_clrsb_i64_mips64el +#define helper_ctpop_i32 helper_ctpop_i32_mips64el +#define helper_ctpop_i64 helper_ctpop_i64_mips64el +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_mips64el +#define helper_exit_atomic helper_exit_atomic_mips64el +#define helper_gvec_add8 helper_gvec_add8_mips64el +#define helper_gvec_add16 helper_gvec_add16_mips64el +#define helper_gvec_add32 helper_gvec_add32_mips64el +#define helper_gvec_add64 helper_gvec_add64_mips64el +#define helper_gvec_adds8 helper_gvec_adds8_mips64el +#define helper_gvec_adds16 helper_gvec_adds16_mips64el +#define helper_gvec_adds32 helper_gvec_adds32_mips64el +#define helper_gvec_adds64 helper_gvec_adds64_mips64el +#define helper_gvec_sub8 helper_gvec_sub8_mips64el +#define helper_gvec_sub16 helper_gvec_sub16_mips64el +#define helper_gvec_sub32 helper_gvec_sub32_mips64el +#define helper_gvec_sub64 helper_gvec_sub64_mips64el +#define helper_gvec_subs8 helper_gvec_subs8_mips64el +#define helper_gvec_subs16 helper_gvec_subs16_mips64el +#define helper_gvec_subs32 helper_gvec_subs32_mips64el +#define helper_gvec_subs64 helper_gvec_subs64_mips64el +#define helper_gvec_mul8 helper_gvec_mul8_mips64el +#define helper_gvec_mul16 helper_gvec_mul16_mips64el +#define helper_gvec_mul32 helper_gvec_mul32_mips64el +#define helper_gvec_mul64 helper_gvec_mul64_mips64el +#define helper_gvec_muls8 helper_gvec_muls8_mips64el +#define helper_gvec_muls16 helper_gvec_muls16_mips64el +#define helper_gvec_muls32 helper_gvec_muls32_mips64el +#define helper_gvec_muls64 helper_gvec_muls64_mips64el +#define helper_gvec_neg8 helper_gvec_neg8_mips64el +#define helper_gvec_neg16 helper_gvec_neg16_mips64el +#define helper_gvec_neg32 helper_gvec_neg32_mips64el +#define helper_gvec_neg64 helper_gvec_neg64_mips64el +#define helper_gvec_abs8 helper_gvec_abs8_mips64el +#define helper_gvec_abs16 helper_gvec_abs16_mips64el +#define helper_gvec_abs32 helper_gvec_abs32_mips64el +#define helper_gvec_abs64 helper_gvec_abs64_mips64el +#define helper_gvec_mov helper_gvec_mov_mips64el +#define helper_gvec_dup64 helper_gvec_dup64_mips64el +#define helper_gvec_dup32 helper_gvec_dup32_mips64el +#define helper_gvec_dup16 helper_gvec_dup16_mips64el +#define helper_gvec_dup8 helper_gvec_dup8_mips64el +#define helper_gvec_not helper_gvec_not_mips64el +#define helper_gvec_and helper_gvec_and_mips64el +#define helper_gvec_or helper_gvec_or_mips64el +#define helper_gvec_xor helper_gvec_xor_mips64el +#define helper_gvec_andc helper_gvec_andc_mips64el +#define helper_gvec_orc helper_gvec_orc_mips64el +#define helper_gvec_nand helper_gvec_nand_mips64el +#define helper_gvec_nor helper_gvec_nor_mips64el +#define helper_gvec_eqv helper_gvec_eqv_mips64el +#define helper_gvec_ands helper_gvec_ands_mips64el +#define helper_gvec_xors helper_gvec_xors_mips64el +#define helper_gvec_ors helper_gvec_ors_mips64el +#define helper_gvec_shl8i helper_gvec_shl8i_mips64el +#define helper_gvec_shl16i helper_gvec_shl16i_mips64el +#define helper_gvec_shl32i helper_gvec_shl32i_mips64el +#define helper_gvec_shl64i helper_gvec_shl64i_mips64el +#define helper_gvec_shr8i helper_gvec_shr8i_mips64el +#define helper_gvec_shr16i helper_gvec_shr16i_mips64el +#define helper_gvec_shr32i helper_gvec_shr32i_mips64el +#define helper_gvec_shr64i helper_gvec_shr64i_mips64el +#define helper_gvec_sar8i helper_gvec_sar8i_mips64el +#define helper_gvec_sar16i helper_gvec_sar16i_mips64el +#define helper_gvec_sar32i helper_gvec_sar32i_mips64el +#define helper_gvec_sar64i helper_gvec_sar64i_mips64el +#define helper_gvec_shl8v helper_gvec_shl8v_mips64el +#define helper_gvec_shl16v helper_gvec_shl16v_mips64el +#define helper_gvec_shl32v helper_gvec_shl32v_mips64el +#define helper_gvec_shl64v helper_gvec_shl64v_mips64el +#define helper_gvec_shr8v helper_gvec_shr8v_mips64el +#define helper_gvec_shr16v helper_gvec_shr16v_mips64el +#define helper_gvec_shr32v helper_gvec_shr32v_mips64el +#define helper_gvec_shr64v helper_gvec_shr64v_mips64el +#define helper_gvec_sar8v helper_gvec_sar8v_mips64el +#define helper_gvec_sar16v helper_gvec_sar16v_mips64el +#define helper_gvec_sar32v helper_gvec_sar32v_mips64el +#define helper_gvec_sar64v helper_gvec_sar64v_mips64el +#define helper_gvec_eq8 helper_gvec_eq8_mips64el +#define helper_gvec_ne8 helper_gvec_ne8_mips64el +#define helper_gvec_lt8 helper_gvec_lt8_mips64el +#define helper_gvec_le8 helper_gvec_le8_mips64el +#define helper_gvec_ltu8 helper_gvec_ltu8_mips64el +#define helper_gvec_leu8 helper_gvec_leu8_mips64el +#define helper_gvec_eq16 helper_gvec_eq16_mips64el +#define helper_gvec_ne16 helper_gvec_ne16_mips64el +#define helper_gvec_lt16 helper_gvec_lt16_mips64el +#define helper_gvec_le16 helper_gvec_le16_mips64el +#define helper_gvec_ltu16 helper_gvec_ltu16_mips64el +#define helper_gvec_leu16 helper_gvec_leu16_mips64el +#define helper_gvec_eq32 helper_gvec_eq32_mips64el +#define helper_gvec_ne32 helper_gvec_ne32_mips64el +#define helper_gvec_lt32 helper_gvec_lt32_mips64el +#define helper_gvec_le32 helper_gvec_le32_mips64el +#define helper_gvec_ltu32 helper_gvec_ltu32_mips64el +#define helper_gvec_leu32 helper_gvec_leu32_mips64el +#define helper_gvec_eq64 helper_gvec_eq64_mips64el +#define helper_gvec_ne64 helper_gvec_ne64_mips64el +#define helper_gvec_lt64 helper_gvec_lt64_mips64el +#define helper_gvec_le64 helper_gvec_le64_mips64el +#define helper_gvec_ltu64 helper_gvec_ltu64_mips64el +#define helper_gvec_leu64 helper_gvec_leu64_mips64el +#define helper_gvec_ssadd8 helper_gvec_ssadd8_mips64el +#define helper_gvec_ssadd16 helper_gvec_ssadd16_mips64el +#define helper_gvec_ssadd32 helper_gvec_ssadd32_mips64el +#define helper_gvec_ssadd64 helper_gvec_ssadd64_mips64el +#define helper_gvec_sssub8 helper_gvec_sssub8_mips64el +#define helper_gvec_sssub16 helper_gvec_sssub16_mips64el +#define helper_gvec_sssub32 helper_gvec_sssub32_mips64el +#define helper_gvec_sssub64 helper_gvec_sssub64_mips64el +#define helper_gvec_usadd8 helper_gvec_usadd8_mips64el +#define helper_gvec_usadd16 helper_gvec_usadd16_mips64el +#define helper_gvec_usadd32 helper_gvec_usadd32_mips64el +#define helper_gvec_usadd64 helper_gvec_usadd64_mips64el +#define helper_gvec_ussub8 helper_gvec_ussub8_mips64el +#define helper_gvec_ussub16 helper_gvec_ussub16_mips64el +#define helper_gvec_ussub32 helper_gvec_ussub32_mips64el +#define helper_gvec_ussub64 helper_gvec_ussub64_mips64el +#define helper_gvec_smin8 helper_gvec_smin8_mips64el +#define helper_gvec_smin16 helper_gvec_smin16_mips64el +#define helper_gvec_smin32 helper_gvec_smin32_mips64el +#define helper_gvec_smin64 helper_gvec_smin64_mips64el +#define helper_gvec_smax8 helper_gvec_smax8_mips64el +#define helper_gvec_smax16 helper_gvec_smax16_mips64el +#define helper_gvec_smax32 helper_gvec_smax32_mips64el +#define helper_gvec_smax64 helper_gvec_smax64_mips64el +#define helper_gvec_umin8 helper_gvec_umin8_mips64el +#define helper_gvec_umin16 helper_gvec_umin16_mips64el +#define helper_gvec_umin32 helper_gvec_umin32_mips64el +#define helper_gvec_umin64 helper_gvec_umin64_mips64el +#define helper_gvec_umax8 helper_gvec_umax8_mips64el +#define helper_gvec_umax16 helper_gvec_umax16_mips64el +#define helper_gvec_umax32 helper_gvec_umax32_mips64el +#define helper_gvec_umax64 helper_gvec_umax64_mips64el +#define helper_gvec_bitsel helper_gvec_bitsel_mips64el +#define cpu_restore_state cpu_restore_state_mips64el +#define page_collection_lock page_collection_lock_mips64el +#define page_collection_unlock page_collection_unlock_mips64el +#define free_code_gen_buffer free_code_gen_buffer_mips64el +#define tcg_exec_init tcg_exec_init_mips64el +#define tb_cleanup tb_cleanup_mips64el +#define tb_flush tb_flush_mips64el +#define tb_phys_invalidate tb_phys_invalidate_mips64el +#define tb_gen_code tb_gen_code_mips64el +#define tb_exec_lock tb_exec_lock_mips64el +#define tb_exec_unlock tb_exec_unlock_mips64el +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64el +#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64el +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64el +#define tb_check_watchpoint tb_check_watchpoint_mips64el +#define cpu_io_recompile cpu_io_recompile_mips64el +#define tb_flush_jmp_cache tb_flush_jmp_cache_mips64el +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mips64el +#define translator_loop_temp_check translator_loop_temp_check_mips64el +#define translator_loop translator_loop_mips64el +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mips64el +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mips64el +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mips64el +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mips64el +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mips64el +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mips64el #define unassigned_mem_ops unassigned_mem_ops_mips64el -#define unassigned_mem_read unassigned_mem_read_mips64el -#define unassigned_mem_write unassigned_mem_write_mips64el -#define update_spsel update_spsel_mips64el -#define v6_cp_reginfo v6_cp_reginfo_mips64el -#define v6k_cp_reginfo v6k_cp_reginfo_mips64el -#define v7_cp_reginfo v7_cp_reginfo_mips64el -#define v7mp_cp_reginfo v7mp_cp_reginfo_mips64el -#define v7m_pop v7m_pop_mips64el -#define v7m_push v7m_push_mips64el -#define v8_cp_reginfo v8_cp_reginfo_mips64el -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mips64el -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mips64el -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mips64el -#define vapa_cp_reginfo vapa_cp_reginfo_mips64el -#define vbar_write vbar_write_mips64el -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mips64el -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mips64el -#define vfp_get_fpcr vfp_get_fpcr_mips64el -#define vfp_get_fpscr vfp_get_fpscr_mips64el -#define vfp_get_fpsr vfp_get_fpsr_mips64el -#define vfp_reg_offset vfp_reg_offset_mips64el -#define vfp_set_fpcr vfp_set_fpcr_mips64el -#define vfp_set_fpscr vfp_set_fpscr_mips64el -#define vfp_set_fpsr vfp_set_fpsr_mips64el -#define visit_end_implicit_struct visit_end_implicit_struct_mips64el -#define visit_end_list visit_end_list_mips64el -#define visit_end_struct visit_end_struct_mips64el -#define visit_end_union visit_end_union_mips64el -#define visit_get_next_type visit_get_next_type_mips64el -#define visit_next_list visit_next_list_mips64el -#define visit_optional visit_optional_mips64el -#define visit_start_implicit_struct visit_start_implicit_struct_mips64el -#define visit_start_list visit_start_list_mips64el -#define visit_start_struct visit_start_struct_mips64el -#define visit_start_union visit_start_union_mips64el -#define vmsa_cp_reginfo vmsa_cp_reginfo_mips64el -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mips64el -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mips64el -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mips64el -#define vmsa_ttbcr_write vmsa_ttbcr_write_mips64el -#define vmsa_ttbr_write vmsa_ttbr_write_mips64el -#define write_cpustate_to_list write_cpustate_to_list_mips64el -#define write_list_to_cpustate write_list_to_cpustate_mips64el -#define write_raw_cp_reg write_raw_cp_reg_mips64el -#define X86CPURegister32_lookup X86CPURegister32_lookup_mips64el -#define x86_op_defs x86_op_defs_mips64el -#define xpsr_read xpsr_read_mips64el -#define xpsr_write xpsr_write_mips64el -#define xscale_cpar_write xscale_cpar_write_mips64el -#define xscale_cp_reginfo xscale_cp_reginfo_mips64el -#define cpu_mips_exec cpu_mips_exec_mips64el +#define floatx80_infinity floatx80_infinity_mips64el +#define dup_const_func dup_const_func_mips64el +#define gen_helper_raise_exception gen_helper_raise_exception_mips64el +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_mips64el +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64el +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64el +#define gen_helper_cpsr_read gen_helper_cpsr_read_mips64el +#define gen_helper_cpsr_write gen_helper_cpsr_write_mips64el +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64el +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64el +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64el +#define helper_mfc0_random helper_mfc0_random_mips64el +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64el +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64el +#define helper_mfc0_tcbind helper_mfc0_tcbind_mips64el +#define helper_mftc0_tcbind helper_mftc0_tcbind_mips64el +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64el +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64el +#define helper_mfc0_tchalt helper_mfc0_tchalt_mips64el +#define helper_mftc0_tchalt helper_mftc0_tchalt_mips64el +#define helper_mfc0_tccontext helper_mfc0_tccontext_mips64el +#define helper_mftc0_tccontext helper_mftc0_tccontext_mips64el +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64el +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64el +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64el +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64el +#define helper_mfc0_count helper_mfc0_count_mips64el +#define helper_mfc0_saar helper_mfc0_saar_mips64el +#define helper_mfhc0_saar helper_mfhc0_saar_mips64el +#define helper_mftc0_entryhi helper_mftc0_entryhi_mips64el +#define helper_mftc0_cause helper_mftc0_cause_mips64el +#define helper_mftc0_status helper_mftc0_status_mips64el +#define helper_mfc0_lladdr helper_mfc0_lladdr_mips64el +#define helper_mfc0_maar helper_mfc0_maar_mips64el +#define helper_mfhc0_maar helper_mfhc0_maar_mips64el +#define helper_mfc0_watchlo helper_mfc0_watchlo_mips64el +#define helper_mfc0_watchhi helper_mfc0_watchhi_mips64el +#define helper_mfhc0_watchhi helper_mfhc0_watchhi_mips64el +#define helper_mfc0_debug helper_mfc0_debug_mips64el +#define helper_mftc0_debug helper_mftc0_debug_mips64el +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64el +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64el +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64el +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64el +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64el +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64el +#define helper_dmfc0_maar helper_dmfc0_maar_mips64el +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64el +#define helper_dmfc0_watchhi helper_dmfc0_watchhi_mips64el +#define helper_dmfc0_saar helper_dmfc0_saar_mips64el +#define helper_mtc0_index helper_mtc0_index_mips64el +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64el +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64el +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64el +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64el +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64el +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64el +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64el +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64el +#define helper_mtc0_yqmask helper_mtc0_yqmask_mips64el +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64el +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64el +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64el +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64el +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64el +#define helper_mtc0_tcbind helper_mtc0_tcbind_mips64el +#define helper_mttc0_tcbind helper_mttc0_tcbind_mips64el +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64el +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64el +#define helper_mtc0_tchalt helper_mtc0_tchalt_mips64el +#define helper_mttc0_tchalt helper_mttc0_tchalt_mips64el +#define helper_mtc0_tccontext helper_mtc0_tccontext_mips64el +#define helper_mttc0_tccontext helper_mttc0_tccontext_mips64el +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64el +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64el +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64el +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64el +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64el +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64el +#define helper_mtc0_context helper_mtc0_context_mips64el +#define helper_mtc0_memorymapid helper_mtc0_memorymapid_mips64el +#define update_pagemask update_pagemask_mips64el +#define helper_mtc0_pagemask helper_mtc0_pagemask_mips64el +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64el +#define helper_mtc0_segctl0 helper_mtc0_segctl0_mips64el +#define helper_mtc0_segctl1 helper_mtc0_segctl1_mips64el +#define helper_mtc0_segctl2 helper_mtc0_segctl2_mips64el +#define helper_mtc0_pwfield helper_mtc0_pwfield_mips64el +#define helper_mtc0_pwsize helper_mtc0_pwsize_mips64el +#define helper_mtc0_wired helper_mtc0_wired_mips64el +#define helper_mtc0_pwctl helper_mtc0_pwctl_mips64el +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64el +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64el +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64el +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64el +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64el +#define helper_mtc0_hwrena helper_mtc0_hwrena_mips64el +#define helper_mtc0_count helper_mtc0_count_mips64el +#define helper_mtc0_saari helper_mtc0_saari_mips64el +#define helper_mtc0_saar helper_mtc0_saar_mips64el +#define helper_mthc0_saar helper_mthc0_saar_mips64el +#define helper_mtc0_entryhi helper_mtc0_entryhi_mips64el +#define helper_mttc0_entryhi helper_mttc0_entryhi_mips64el +#define helper_mtc0_compare helper_mtc0_compare_mips64el +#define helper_mtc0_status helper_mtc0_status_mips64el +#define helper_mttc0_status helper_mttc0_status_mips64el +#define helper_mtc0_intctl helper_mtc0_intctl_mips64el +#define helper_mtc0_srsctl helper_mtc0_srsctl_mips64el +#define helper_mtc0_cause helper_mtc0_cause_mips64el +#define helper_mttc0_cause helper_mttc0_cause_mips64el +#define helper_mftc0_epc helper_mftc0_epc_mips64el +#define helper_mftc0_ebase helper_mftc0_ebase_mips64el +#define helper_mtc0_ebase helper_mtc0_ebase_mips64el +#define helper_mttc0_ebase helper_mttc0_ebase_mips64el +#define helper_mftc0_configx helper_mftc0_configx_mips64el +#define helper_mtc0_config0 helper_mtc0_config0_mips64el +#define helper_mtc0_config2 helper_mtc0_config2_mips64el +#define helper_mtc0_config3 helper_mtc0_config3_mips64el +#define helper_mtc0_config4 helper_mtc0_config4_mips64el +#define helper_mtc0_config5 helper_mtc0_config5_mips64el +#define helper_mtc0_lladdr helper_mtc0_lladdr_mips64el +#define helper_mtc0_maar helper_mtc0_maar_mips64el +#define helper_mthc0_maar helper_mthc0_maar_mips64el +#define helper_mtc0_maari helper_mtc0_maari_mips64el +#define helper_mtc0_watchlo helper_mtc0_watchlo_mips64el +#define helper_mtc0_watchhi helper_mtc0_watchhi_mips64el +#define helper_mthc0_watchhi helper_mthc0_watchhi_mips64el +#define helper_mtc0_xcontext helper_mtc0_xcontext_mips64el +#define helper_mtc0_framemask helper_mtc0_framemask_mips64el +#define helper_mtc0_debug helper_mtc0_debug_mips64el +#define helper_mttc0_debug helper_mttc0_debug_mips64el +#define helper_mtc0_performance0 helper_mtc0_performance0_mips64el +#define helper_mtc0_errctl helper_mtc0_errctl_mips64el +#define helper_mtc0_taglo helper_mtc0_taglo_mips64el +#define helper_mtc0_datalo helper_mtc0_datalo_mips64el +#define helper_mtc0_taghi helper_mtc0_taghi_mips64el +#define helper_mtc0_datahi helper_mtc0_datahi_mips64el +#define helper_mftgpr helper_mftgpr_mips64el +#define helper_mftlo helper_mftlo_mips64el +#define helper_mfthi helper_mfthi_mips64el +#define helper_mftacx helper_mftacx_mips64el +#define helper_mftdsp helper_mftdsp_mips64el +#define helper_mttgpr helper_mttgpr_mips64el +#define helper_mttlo helper_mttlo_mips64el +#define helper_mtthi helper_mtthi_mips64el +#define helper_mttacx helper_mttacx_mips64el +#define helper_mttdsp helper_mttdsp_mips64el +#define helper_dmt helper_dmt_mips64el +#define helper_emt helper_emt_mips64el +#define helper_dvpe helper_dvpe_mips64el +#define helper_evpe helper_evpe_mips64el +#define helper_dvp helper_dvp_mips64el +#define helper_evp helper_evp_mips64el #define cpu_mips_get_random cpu_mips_get_random_mips64el -#define cpu_mips_get_count cpu_mips_get_count_mips64el -#define cpu_mips_store_count cpu_mips_store_count_mips64el -#define cpu_mips_store_compare cpu_mips_store_compare_mips64el -#define cpu_mips_start_count cpu_mips_start_count_mips64el -#define cpu_mips_stop_count cpu_mips_stop_count_mips64el -#define mips_machine_init mips_machine_init_mips64el -#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mips64el -#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mips64el -#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mips64el -#define mips_cpu_register_types mips_cpu_register_types_mips64el #define cpu_mips_init cpu_mips_init_mips64el -#define cpu_state_reset cpu_state_reset_mips64el -#define helper_msa_andi_b helper_msa_andi_b_mips64el -#define helper_msa_ori_b helper_msa_ori_b_mips64el -#define helper_msa_nori_b helper_msa_nori_b_mips64el -#define helper_msa_xori_b helper_msa_xori_b_mips64el -#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64el -#define helper_msa_bmzi_b helper_msa_bmzi_b_mips64el -#define helper_msa_bseli_b helper_msa_bseli_b_mips64el -#define helper_msa_shf_df helper_msa_shf_df_mips64el -#define helper_msa_and_v helper_msa_and_v_mips64el -#define helper_msa_or_v helper_msa_or_v_mips64el -#define helper_msa_nor_v helper_msa_nor_v_mips64el -#define helper_msa_xor_v helper_msa_xor_v_mips64el -#define helper_msa_bmnz_v helper_msa_bmnz_v_mips64el -#define helper_msa_bmz_v helper_msa_bmz_v_mips64el -#define helper_msa_bsel_v helper_msa_bsel_v_mips64el -#define helper_msa_addvi_df helper_msa_addvi_df_mips64el -#define helper_msa_subvi_df helper_msa_subvi_df_mips64el -#define helper_msa_ceqi_df helper_msa_ceqi_df_mips64el -#define helper_msa_clei_s_df helper_msa_clei_s_df_mips64el -#define helper_msa_clei_u_df helper_msa_clei_u_df_mips64el -#define helper_msa_clti_s_df helper_msa_clti_s_df_mips64el -#define helper_msa_clti_u_df helper_msa_clti_u_df_mips64el -#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64el -#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64el -#define helper_msa_mini_s_df helper_msa_mini_s_df_mips64el -#define helper_msa_mini_u_df helper_msa_mini_u_df_mips64el -#define helper_msa_ldi_df helper_msa_ldi_df_mips64el -#define helper_msa_slli_df helper_msa_slli_df_mips64el -#define helper_msa_srai_df helper_msa_srai_df_mips64el -#define helper_msa_srli_df helper_msa_srli_df_mips64el -#define helper_msa_bclri_df helper_msa_bclri_df_mips64el -#define helper_msa_bseti_df helper_msa_bseti_df_mips64el -#define helper_msa_bnegi_df helper_msa_bnegi_df_mips64el -#define helper_msa_sat_s_df helper_msa_sat_s_df_mips64el -#define helper_msa_sat_u_df helper_msa_sat_u_df_mips64el -#define helper_msa_srari_df helper_msa_srari_df_mips64el -#define helper_msa_srlri_df helper_msa_srlri_df_mips64el -#define helper_msa_binsli_df helper_msa_binsli_df_mips64el -#define helper_msa_binsri_df helper_msa_binsri_df_mips64el -#define helper_msa_sll_df helper_msa_sll_df_mips64el -#define helper_msa_sra_df helper_msa_sra_df_mips64el -#define helper_msa_srl_df helper_msa_srl_df_mips64el -#define helper_msa_bclr_df helper_msa_bclr_df_mips64el -#define helper_msa_bset_df helper_msa_bset_df_mips64el -#define helper_msa_bneg_df helper_msa_bneg_df_mips64el -#define helper_msa_addv_df helper_msa_addv_df_mips64el -#define helper_msa_subv_df helper_msa_subv_df_mips64el -#define helper_msa_max_s_df helper_msa_max_s_df_mips64el -#define helper_msa_max_u_df helper_msa_max_u_df_mips64el -#define helper_msa_min_s_df helper_msa_min_s_df_mips64el -#define helper_msa_min_u_df helper_msa_min_u_df_mips64el -#define helper_msa_max_a_df helper_msa_max_a_df_mips64el -#define helper_msa_min_a_df helper_msa_min_a_df_mips64el -#define helper_msa_ceq_df helper_msa_ceq_df_mips64el -#define helper_msa_clt_s_df helper_msa_clt_s_df_mips64el -#define helper_msa_clt_u_df helper_msa_clt_u_df_mips64el -#define helper_msa_cle_s_df helper_msa_cle_s_df_mips64el -#define helper_msa_cle_u_df helper_msa_cle_u_df_mips64el -#define helper_msa_add_a_df helper_msa_add_a_df_mips64el -#define helper_msa_adds_a_df helper_msa_adds_a_df_mips64el -#define helper_msa_adds_s_df helper_msa_adds_s_df_mips64el -#define helper_msa_adds_u_df helper_msa_adds_u_df_mips64el -#define helper_msa_ave_s_df helper_msa_ave_s_df_mips64el -#define helper_msa_ave_u_df helper_msa_ave_u_df_mips64el -#define helper_msa_aver_s_df helper_msa_aver_s_df_mips64el -#define helper_msa_aver_u_df helper_msa_aver_u_df_mips64el -#define helper_msa_subs_s_df helper_msa_subs_s_df_mips64el -#define helper_msa_subs_u_df helper_msa_subs_u_df_mips64el -#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64el -#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64el -#define helper_msa_asub_s_df helper_msa_asub_s_df_mips64el -#define helper_msa_asub_u_df helper_msa_asub_u_df_mips64el -#define helper_msa_mulv_df helper_msa_mulv_df_mips64el -#define helper_msa_div_s_df helper_msa_div_s_df_mips64el -#define helper_msa_div_u_df helper_msa_div_u_df_mips64el -#define helper_msa_mod_s_df helper_msa_mod_s_df_mips64el -#define helper_msa_mod_u_df helper_msa_mod_u_df_mips64el -#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64el -#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64el -#define helper_msa_srar_df helper_msa_srar_df_mips64el -#define helper_msa_srlr_df helper_msa_srlr_df_mips64el -#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mips64el -#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mips64el -#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mips64el -#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mips64el -#define helper_msa_mul_q_df helper_msa_mul_q_df_mips64el -#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64el -#define helper_msa_sld_df helper_msa_sld_df_mips64el -#define helper_msa_maddv_df helper_msa_maddv_df_mips64el -#define helper_msa_msubv_df helper_msa_msubv_df_mips64el -#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64el -#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64el -#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64el -#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64el -#define helper_msa_binsl_df helper_msa_binsl_df_mips64el -#define helper_msa_binsr_df helper_msa_binsr_df_mips64el -#define helper_msa_madd_q_df helper_msa_madd_q_df_mips64el -#define helper_msa_msub_q_df helper_msa_msub_q_df_mips64el -#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64el -#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64el -#define helper_msa_splat_df helper_msa_splat_df_mips64el -#define helper_msa_pckev_df helper_msa_pckev_df_mips64el -#define helper_msa_pckod_df helper_msa_pckod_df_mips64el -#define helper_msa_ilvl_df helper_msa_ilvl_df_mips64el -#define helper_msa_ilvr_df helper_msa_ilvr_df_mips64el -#define helper_msa_ilvev_df helper_msa_ilvev_df_mips64el -#define helper_msa_ilvod_df helper_msa_ilvod_df_mips64el -#define helper_msa_vshf_df helper_msa_vshf_df_mips64el -#define helper_msa_sldi_df helper_msa_sldi_df_mips64el -#define helper_msa_splati_df helper_msa_splati_df_mips64el -#define helper_msa_copy_s_df helper_msa_copy_s_df_mips64el -#define helper_msa_copy_u_df helper_msa_copy_u_df_mips64el -#define helper_msa_insert_df helper_msa_insert_df_mips64el -#define helper_msa_insve_df helper_msa_insve_df_mips64el -#define helper_msa_ctcmsa helper_msa_ctcmsa_mips64el -#define helper_msa_cfcmsa helper_msa_cfcmsa_mips64el -#define helper_msa_move_v helper_msa_move_v_mips64el -#define helper_msa_fill_df helper_msa_fill_df_mips64el -#define helper_msa_nlzc_df helper_msa_nlzc_df_mips64el -#define helper_msa_nloc_df helper_msa_nloc_df_mips64el -#define helper_msa_pcnt_df helper_msa_pcnt_df_mips64el -#define helper_msa_fcaf_df helper_msa_fcaf_df_mips64el -#define helper_msa_fcun_df helper_msa_fcun_df_mips64el -#define helper_msa_fceq_df helper_msa_fceq_df_mips64el -#define helper_msa_fcueq_df helper_msa_fcueq_df_mips64el -#define helper_msa_fclt_df helper_msa_fclt_df_mips64el -#define helper_msa_fcult_df helper_msa_fcult_df_mips64el -#define helper_msa_fcle_df helper_msa_fcle_df_mips64el -#define helper_msa_fcule_df helper_msa_fcule_df_mips64el -#define helper_msa_fsaf_df helper_msa_fsaf_df_mips64el -#define helper_msa_fsun_df helper_msa_fsun_df_mips64el -#define helper_msa_fseq_df helper_msa_fseq_df_mips64el -#define helper_msa_fsueq_df helper_msa_fsueq_df_mips64el -#define helper_msa_fslt_df helper_msa_fslt_df_mips64el -#define helper_msa_fsult_df helper_msa_fsult_df_mips64el -#define helper_msa_fsle_df helper_msa_fsle_df_mips64el -#define helper_msa_fsule_df helper_msa_fsule_df_mips64el -#define helper_msa_fcor_df helper_msa_fcor_df_mips64el -#define helper_msa_fcune_df helper_msa_fcune_df_mips64el -#define helper_msa_fcne_df helper_msa_fcne_df_mips64el -#define helper_msa_fsor_df helper_msa_fsor_df_mips64el -#define helper_msa_fsune_df helper_msa_fsune_df_mips64el -#define helper_msa_fsne_df helper_msa_fsne_df_mips64el -#define helper_msa_fadd_df helper_msa_fadd_df_mips64el -#define helper_msa_fsub_df helper_msa_fsub_df_mips64el -#define helper_msa_fmul_df helper_msa_fmul_df_mips64el -#define helper_msa_fdiv_df helper_msa_fdiv_df_mips64el -#define helper_msa_fmadd_df helper_msa_fmadd_df_mips64el -#define helper_msa_fmsub_df helper_msa_fmsub_df_mips64el -#define helper_msa_fexp2_df helper_msa_fexp2_df_mips64el -#define helper_msa_fexdo_df helper_msa_fexdo_df_mips64el -#define helper_msa_ftq_df helper_msa_ftq_df_mips64el -#define helper_msa_fmin_df helper_msa_fmin_df_mips64el -#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64el -#define helper_msa_fmax_df helper_msa_fmax_df_mips64el -#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64el -#define helper_msa_fclass_df helper_msa_fclass_df_mips64el -#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64el -#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64el -#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64el -#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64el -#define helper_msa_frcp_df helper_msa_frcp_df_mips64el -#define helper_msa_frint_df helper_msa_frint_df_mips64el -#define helper_msa_flog2_df helper_msa_flog2_df_mips64el -#define helper_msa_fexupl_df helper_msa_fexupl_df_mips64el -#define helper_msa_fexupr_df helper_msa_fexupr_df_mips64el -#define helper_msa_ffql_df helper_msa_ffql_df_mips64el -#define helper_msa_ffqr_df helper_msa_ffqr_df_mips64el -#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64el -#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64el -#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64el -#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64el -#define helper_paddsb helper_paddsb_mips64el -#define helper_paddusb helper_paddusb_mips64el -#define helper_paddsh helper_paddsh_mips64el -#define helper_paddush helper_paddush_mips64el -#define helper_paddb helper_paddb_mips64el -#define helper_paddh helper_paddh_mips64el -#define helper_paddw helper_paddw_mips64el -#define helper_psubsb helper_psubsb_mips64el -#define helper_psubusb helper_psubusb_mips64el -#define helper_psubsh helper_psubsh_mips64el -#define helper_psubush helper_psubush_mips64el -#define helper_psubb helper_psubb_mips64el -#define helper_psubh helper_psubh_mips64el -#define helper_psubw helper_psubw_mips64el -#define helper_pshufh helper_pshufh_mips64el -#define helper_packsswh helper_packsswh_mips64el -#define helper_packsshb helper_packsshb_mips64el -#define helper_packushb helper_packushb_mips64el -#define helper_punpcklwd helper_punpcklwd_mips64el -#define helper_punpckhwd helper_punpckhwd_mips64el -#define helper_punpcklhw helper_punpcklhw_mips64el -#define helper_punpckhhw helper_punpckhhw_mips64el -#define helper_punpcklbh helper_punpcklbh_mips64el -#define helper_punpckhbh helper_punpckhbh_mips64el -#define helper_pavgh helper_pavgh_mips64el -#define helper_pavgb helper_pavgb_mips64el -#define helper_pmaxsh helper_pmaxsh_mips64el -#define helper_pminsh helper_pminsh_mips64el -#define helper_pmaxub helper_pmaxub_mips64el -#define helper_pminub helper_pminub_mips64el -#define helper_pcmpeqw helper_pcmpeqw_mips64el -#define helper_pcmpgtw helper_pcmpgtw_mips64el -#define helper_pcmpeqh helper_pcmpeqh_mips64el -#define helper_pcmpgth helper_pcmpgth_mips64el -#define helper_pcmpeqb helper_pcmpeqb_mips64el -#define helper_pcmpgtb helper_pcmpgtb_mips64el -#define helper_psllw helper_psllw_mips64el -#define helper_psrlw helper_psrlw_mips64el -#define helper_psraw helper_psraw_mips64el -#define helper_psllh helper_psllh_mips64el -#define helper_psrlh helper_psrlh_mips64el -#define helper_psrah helper_psrah_mips64el -#define helper_pmullh helper_pmullh_mips64el -#define helper_pmulhh helper_pmulhh_mips64el -#define helper_pmulhuh helper_pmulhuh_mips64el -#define helper_pmaddhw helper_pmaddhw_mips64el -#define helper_pasubub helper_pasubub_mips64el -#define helper_biadd helper_biadd_mips64el -#define helper_pmovmskb helper_pmovmskb_mips64el #define helper_absq_s_ph helper_absq_s_ph_mips64el #define helper_absq_s_qb helper_absq_s_qb_mips64el #define helper_absq_s_w helper_absq_s_w_mips64el +#define helper_absq_s_ob helper_absq_s_ob_mips64el +#define helper_absq_s_qh helper_absq_s_qh_mips64el +#define helper_absq_s_pw helper_absq_s_pw_mips64el #define helper_addqh_ph helper_addqh_ph_mips64el #define helper_addqh_r_ph helper_addqh_r_ph_mips64el #define helper_addqh_r_w helper_addqh_r_w_mips64el @@ -3279,35 +1450,89 @@ #define helper_subu_qb helper_subu_qb_mips64el #define helper_subu_s_ph helper_subu_s_ph_mips64el #define helper_subu_s_qb helper_subu_s_qb_mips64el +#define helper_adduh_ob helper_adduh_ob_mips64el +#define helper_adduh_r_ob helper_adduh_r_ob_mips64el +#define helper_subuh_ob helper_subuh_ob_mips64el +#define helper_subuh_r_ob helper_subuh_r_ob_mips64el +#define helper_addq_pw helper_addq_pw_mips64el +#define helper_addq_qh helper_addq_qh_mips64el +#define helper_addq_s_pw helper_addq_s_pw_mips64el +#define helper_addq_s_qh helper_addq_s_qh_mips64el +#define helper_addu_ob helper_addu_ob_mips64el +#define helper_addu_qh helper_addu_qh_mips64el +#define helper_addu_s_ob helper_addu_s_ob_mips64el +#define helper_addu_s_qh helper_addu_s_qh_mips64el +#define helper_subq_pw helper_subq_pw_mips64el +#define helper_subq_qh helper_subq_qh_mips64el +#define helper_subq_s_pw helper_subq_s_pw_mips64el +#define helper_subq_s_qh helper_subq_s_qh_mips64el +#define helper_subu_ob helper_subu_ob_mips64el +#define helper_subu_qh helper_subu_qh_mips64el +#define helper_subu_s_ob helper_subu_s_ob_mips64el +#define helper_subu_s_qh helper_subu_s_qh_mips64el #define helper_subuh_qb helper_subuh_qb_mips64el #define helper_subuh_r_qb helper_subuh_r_qb_mips64el #define helper_addsc helper_addsc_mips64el #define helper_addwc helper_addwc_mips64el #define helper_modsub helper_modsub_mips64el #define helper_raddu_w_qb helper_raddu_w_qb_mips64el +#define helper_raddu_l_ob helper_raddu_l_ob_mips64el #define helper_precr_qb_ph helper_precr_qb_ph_mips64el #define helper_precrq_qb_ph helper_precrq_qb_ph_mips64el #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips64el #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips64el #define helper_precrq_ph_w helper_precrq_ph_w_mips64el #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips64el +#define helper_precr_ob_qh helper_precr_ob_qh_mips64el +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64el +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64el +#define helper_precrq_ob_qh helper_precrq_ob_qh_mips64el +#define helper_precrq_qh_pw helper_precrq_qh_pw_mips64el +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64el +#define helper_precrq_pw_l helper_precrq_pw_l_mips64el #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips64el +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64el +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64el +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64el +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64el +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64el #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips64el #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips64el #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips64el #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips64el +#define helper_precequ_qh_obl helper_precequ_qh_obl_mips64el +#define helper_precequ_qh_obr helper_precequ_qh_obr_mips64el +#define helper_precequ_qh_obla helper_precequ_qh_obla_mips64el +#define helper_precequ_qh_obra helper_precequ_qh_obra_mips64el #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips64el #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips64el #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips64el #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips64el +#define helper_preceu_qh_obl helper_preceu_qh_obl_mips64el +#define helper_preceu_qh_obr helper_preceu_qh_obr_mips64el +#define helper_preceu_qh_obla helper_preceu_qh_obla_mips64el +#define helper_preceu_qh_obra helper_preceu_qh_obra_mips64el #define helper_shll_qb helper_shll_qb_mips64el #define helper_shrl_qb helper_shrl_qb_mips64el #define helper_shra_qb helper_shra_qb_mips64el #define helper_shra_r_qb helper_shra_r_qb_mips64el +#define helper_shll_ob helper_shll_ob_mips64el +#define helper_shrl_ob helper_shrl_ob_mips64el +#define helper_shra_ob helper_shra_ob_mips64el +#define helper_shra_r_ob helper_shra_r_ob_mips64el #define helper_shll_ph helper_shll_ph_mips64el #define helper_shll_s_ph helper_shll_s_ph_mips64el +#define helper_shll_qh helper_shll_qh_mips64el +#define helper_shll_s_qh helper_shll_s_qh_mips64el +#define helper_shrl_qh helper_shrl_qh_mips64el +#define helper_shra_qh helper_shra_qh_mips64el +#define helper_shra_r_qh helper_shra_r_qh_mips64el #define helper_shll_s_w helper_shll_s_w_mips64el #define helper_shra_r_w helper_shra_r_w_mips64el +#define helper_shll_pw helper_shll_pw_mips64el +#define helper_shll_s_pw helper_shll_s_pw_mips64el +#define helper_shra_pw helper_shra_pw_mips64el +#define helper_shra_r_pw helper_shra_r_pw_mips64el #define helper_shrl_ph helper_shrl_ph_mips64el #define helper_shra_ph helper_shra_ph_mips64el #define helper_shra_r_ph helper_shra_r_ph_mips64el @@ -3321,10 +1546,20 @@ #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips64el #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips64el #define helper_mulsa_w_ph helper_mulsa_w_ph_mips64el +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64el +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64el +#define helper_mulq_rs_qh helper_mulq_rs_qh_mips64el +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64el +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64el +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64el #define helper_dpau_h_qbl helper_dpau_h_qbl_mips64el #define helper_dpau_h_qbr helper_dpau_h_qbr_mips64el #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips64el #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips64el +#define helper_dpau_h_obl helper_dpau_h_obl_mips64el +#define helper_dpau_h_obr helper_dpau_h_obr_mips64el +#define helper_dpsu_h_obl helper_dpsu_h_obl_mips64el +#define helper_dpsu_h_obr helper_dpsu_h_obr_mips64el #define helper_dpa_w_ph helper_dpa_w_ph_mips64el #define helper_dpax_w_ph helper_dpax_w_ph_mips64el #define helper_dps_w_ph helper_dps_w_ph_mips64el @@ -3335,200 +1570,92 @@ #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips64el #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips64el #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips64el +#define helper_dpa_w_qh helper_dpa_w_qh_mips64el +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64el +#define helper_dps_w_qh helper_dps_w_qh_mips64el +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64el #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips64el #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips64el +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64el +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64el +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64el #define helper_maq_s_w_phl helper_maq_s_w_phl_mips64el #define helper_maq_s_w_phr helper_maq_s_w_phr_mips64el #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips64el #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips64el #define helper_mulq_s_w helper_mulq_s_w_mips64el #define helper_mulq_rs_w helper_mulq_rs_w_mips64el +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64el +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64el +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64el +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64el +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64el +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64el +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64el +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64el +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64el +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64el +#define helper_dmadd helper_dmadd_mips64el +#define helper_dmaddu helper_dmaddu_mips64el +#define helper_dmsub helper_dmsub_mips64el +#define helper_dmsubu helper_dmsubu_mips64el #define helper_bitrev helper_bitrev_mips64el #define helper_insv helper_insv_mips64el +#define helper_dinsv helper_dinsv_mips64el #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips64el #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips64el #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips64el +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64el +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64el +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64el #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips64el #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips64el #define helper_cmpu_le_qb helper_cmpu_le_qb_mips64el #define helper_cmp_eq_ph helper_cmp_eq_ph_mips64el #define helper_cmp_lt_ph helper_cmp_lt_ph_mips64el #define helper_cmp_le_ph helper_cmp_le_ph_mips64el +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64el +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64el +#define helper_cmpu_le_ob helper_cmpu_le_ob_mips64el +#define helper_cmp_eq_qh helper_cmp_eq_qh_mips64el +#define helper_cmp_lt_qh helper_cmp_lt_qh_mips64el +#define helper_cmp_le_qh helper_cmp_le_qh_mips64el +#define helper_cmp_eq_pw helper_cmp_eq_pw_mips64el +#define helper_cmp_lt_pw helper_cmp_lt_pw_mips64el +#define helper_cmp_le_pw helper_cmp_le_pw_mips64el +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64el +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64el +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64el #define helper_pick_qb helper_pick_qb_mips64el #define helper_pick_ph helper_pick_ph_mips64el +#define helper_pick_ob helper_pick_ob_mips64el +#define helper_pick_qh helper_pick_qh_mips64el +#define helper_pick_pw helper_pick_pw_mips64el #define helper_packrl_ph helper_packrl_ph_mips64el +#define helper_packrl_pw helper_packrl_pw_mips64el #define helper_extr_w helper_extr_w_mips64el #define helper_extr_r_w helper_extr_r_w_mips64el #define helper_extr_rs_w helper_extr_rs_w_mips64el +#define helper_dextr_w helper_dextr_w_mips64el +#define helper_dextr_r_w helper_dextr_r_w_mips64el +#define helper_dextr_rs_w helper_dextr_rs_w_mips64el +#define helper_dextr_l helper_dextr_l_mips64el +#define helper_dextr_r_l helper_dextr_r_l_mips64el +#define helper_dextr_rs_l helper_dextr_rs_l_mips64el #define helper_extr_s_h helper_extr_s_h_mips64el +#define helper_dextr_s_h helper_dextr_s_h_mips64el #define helper_extp helper_extp_mips64el #define helper_extpdp helper_extpdp_mips64el +#define helper_dextp helper_dextp_mips64el +#define helper_dextpdp helper_dextpdp_mips64el #define helper_shilo helper_shilo_mips64el +#define helper_dshilo helper_dshilo_mips64el #define helper_mthlip helper_mthlip_mips64el +#define helper_dmthlip helper_dmthlip_mips64el #define cpu_wrdsp cpu_wrdsp_mips64el #define helper_wrdsp helper_wrdsp_mips64el #define cpu_rddsp cpu_rddsp_mips64el #define helper_rddsp helper_rddsp_mips64el -#define helper_raise_exception_err helper_raise_exception_err_mips64el -#define helper_clo helper_clo_mips64el -#define helper_clz helper_clz_mips64el -#define helper_muls helper_muls_mips64el -#define helper_mulsu helper_mulsu_mips64el -#define helper_macc helper_macc_mips64el -#define helper_macchi helper_macchi_mips64el -#define helper_maccu helper_maccu_mips64el -#define helper_macchiu helper_macchiu_mips64el -#define helper_msac helper_msac_mips64el -#define helper_msachi helper_msachi_mips64el -#define helper_msacu helper_msacu_mips64el -#define helper_msachiu helper_msachiu_mips64el -#define helper_mulhi helper_mulhi_mips64el -#define helper_mulhiu helper_mulhiu_mips64el -#define helper_mulshi helper_mulshi_mips64el -#define helper_mulshiu helper_mulshiu_mips64el -#define helper_bitswap helper_bitswap_mips64el -#define helper_ll helper_ll_mips64el -#define helper_sc helper_sc_mips64el -#define helper_swl helper_swl_mips64el -#define helper_swr helper_swr_mips64el -#define helper_lwm helper_lwm_mips64el -#define helper_swm helper_swm_mips64el -#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64el -#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64el -#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64el -#define helper_mfc0_random helper_mfc0_random_mips64el -#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64el -#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64el -#define helper_mfc0_tcbind helper_mfc0_tcbind_mips64el -#define helper_mftc0_tcbind helper_mftc0_tcbind_mips64el -#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64el -#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64el -#define helper_mfc0_tchalt helper_mfc0_tchalt_mips64el -#define helper_mftc0_tchalt helper_mftc0_tchalt_mips64el -#define helper_mfc0_tccontext helper_mfc0_tccontext_mips64el -#define helper_mftc0_tccontext helper_mftc0_tccontext_mips64el -#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64el -#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64el -#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64el -#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64el -#define helper_mfc0_count helper_mfc0_count_mips64el -#define helper_mftc0_entryhi helper_mftc0_entryhi_mips64el -#define helper_mftc0_cause helper_mftc0_cause_mips64el -#define helper_mftc0_status helper_mftc0_status_mips64el -#define helper_mfc0_lladdr helper_mfc0_lladdr_mips64el -#define helper_mfc0_watchlo helper_mfc0_watchlo_mips64el -#define helper_mfc0_watchhi helper_mfc0_watchhi_mips64el -#define helper_mfc0_debug helper_mfc0_debug_mips64el -#define helper_mftc0_debug helper_mftc0_debug_mips64el -#define helper_mtc0_index helper_mtc0_index_mips64el -#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64el -#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64el -#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64el -#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64el -#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64el -#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64el -#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64el -#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64el -#define helper_mtc0_yqmask helper_mtc0_yqmask_mips64el -#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64el -#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64el -#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64el -#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64el -#define helper_mtc0_tcbind helper_mtc0_tcbind_mips64el -#define helper_mttc0_tcbind helper_mttc0_tcbind_mips64el -#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64el -#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64el -#define helper_mtc0_tchalt helper_mtc0_tchalt_mips64el -#define helper_mttc0_tchalt helper_mttc0_tchalt_mips64el -#define helper_mtc0_tccontext helper_mtc0_tccontext_mips64el -#define helper_mttc0_tccontext helper_mttc0_tccontext_mips64el -#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64el -#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64el -#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64el -#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64el -#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64el -#define helper_mtc0_context helper_mtc0_context_mips64el -#define helper_mtc0_pagemask helper_mtc0_pagemask_mips64el -#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64el -#define helper_mtc0_wired helper_mtc0_wired_mips64el -#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64el -#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64el -#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64el -#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64el -#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64el -#define helper_mtc0_hwrena helper_mtc0_hwrena_mips64el -#define helper_mtc0_count helper_mtc0_count_mips64el -#define helper_mtc0_entryhi helper_mtc0_entryhi_mips64el -#define helper_mttc0_entryhi helper_mttc0_entryhi_mips64el -#define helper_mtc0_compare helper_mtc0_compare_mips64el -#define helper_mtc0_status helper_mtc0_status_mips64el -#define helper_mttc0_status helper_mttc0_status_mips64el -#define helper_mtc0_intctl helper_mtc0_intctl_mips64el -#define helper_mtc0_srsctl helper_mtc0_srsctl_mips64el -#define helper_mtc0_cause helper_mtc0_cause_mips64el -#define helper_mttc0_cause helper_mttc0_cause_mips64el -#define helper_mftc0_epc helper_mftc0_epc_mips64el -#define helper_mftc0_ebase helper_mftc0_ebase_mips64el -#define helper_mtc0_ebase helper_mtc0_ebase_mips64el -#define helper_mttc0_ebase helper_mttc0_ebase_mips64el -#define helper_mftc0_configx helper_mftc0_configx_mips64el -#define helper_mtc0_config0 helper_mtc0_config0_mips64el -#define helper_mtc0_config2 helper_mtc0_config2_mips64el -#define helper_mtc0_config4 helper_mtc0_config4_mips64el -#define helper_mtc0_config5 helper_mtc0_config5_mips64el -#define helper_mtc0_lladdr helper_mtc0_lladdr_mips64el -#define helper_mtc0_watchlo helper_mtc0_watchlo_mips64el -#define helper_mtc0_watchhi helper_mtc0_watchhi_mips64el -#define helper_mtc0_xcontext helper_mtc0_xcontext_mips64el -#define helper_mtc0_framemask helper_mtc0_framemask_mips64el -#define helper_mtc0_debug helper_mtc0_debug_mips64el -#define helper_mttc0_debug helper_mttc0_debug_mips64el -#define helper_mtc0_performance0 helper_mtc0_performance0_mips64el -#define helper_mtc0_taglo helper_mtc0_taglo_mips64el -#define helper_mtc0_datalo helper_mtc0_datalo_mips64el -#define helper_mtc0_taghi helper_mtc0_taghi_mips64el -#define helper_mtc0_datahi helper_mtc0_datahi_mips64el -#define helper_mftgpr helper_mftgpr_mips64el -#define helper_mftlo helper_mftlo_mips64el -#define helper_mfthi helper_mfthi_mips64el -#define helper_mftacx helper_mftacx_mips64el -#define helper_mftdsp helper_mftdsp_mips64el -#define helper_mttgpr helper_mttgpr_mips64el -#define helper_mttlo helper_mttlo_mips64el -#define helper_mtthi helper_mtthi_mips64el -#define helper_mttacx helper_mttacx_mips64el -#define helper_mttdsp helper_mttdsp_mips64el -#define helper_dmt helper_dmt_mips64el -#define helper_emt helper_emt_mips64el -#define helper_dvpe helper_dvpe_mips64el -#define helper_evpe helper_evpe_mips64el -#define helper_fork helper_fork_mips64el -#define helper_yield helper_yield_mips64el -#define r4k_helper_tlbinv r4k_helper_tlbinv_mips64el -#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64el -#define r4k_helper_tlbwi r4k_helper_tlbwi_mips64el -#define r4k_helper_tlbwr r4k_helper_tlbwr_mips64el -#define r4k_helper_tlbp r4k_helper_tlbp_mips64el -#define r4k_helper_tlbr r4k_helper_tlbr_mips64el -#define helper_tlbwi helper_tlbwi_mips64el -#define helper_tlbwr helper_tlbwr_mips64el -#define helper_tlbp helper_tlbp_mips64el -#define helper_tlbr helper_tlbr_mips64el -#define helper_tlbinv helper_tlbinv_mips64el -#define helper_tlbinvf helper_tlbinvf_mips64el -#define helper_di helper_di_mips64el -#define helper_ei helper_ei_mips64el -#define helper_eret helper_eret_mips64el -#define helper_deret helper_deret_mips64el -#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64el -#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64el -#define helper_rdhwr_cc helper_rdhwr_cc_mips64el -#define helper_rdhwr_ccres helper_rdhwr_ccres_mips64el -#define helper_pmon helper_pmon_mips64el -#define helper_wait helper_wait_mips64el -#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64el -#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mips64el -#define ieee_rm ieee_rm_mips64el #define helper_cfc1 helper_cfc1_mips64el #define helper_ctc1 helper_ctc1_mips64el #define ieee_ex_to_mips ieee_ex_to_mips_mips64el @@ -3537,8 +1664,8 @@ #define helper_float_cvtd_s helper_float_cvtd_s_mips64el #define helper_float_cvtd_w helper_float_cvtd_w_mips64el #define helper_float_cvtd_l helper_float_cvtd_l_mips64el -#define helper_float_cvtl_d helper_float_cvtl_d_mips64el -#define helper_float_cvtl_s helper_float_cvtl_s_mips64el +#define helper_float_cvt_l_d helper_float_cvt_l_d_mips64el +#define helper_float_cvt_l_s helper_float_cvt_l_s_mips64el #define helper_float_cvtps_pw helper_float_cvtps_pw_mips64el #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips64el #define helper_float_cvts_d helper_float_cvts_d_mips64el @@ -3546,46 +1673,50 @@ #define helper_float_cvts_l helper_float_cvts_l_mips64el #define helper_float_cvts_pl helper_float_cvts_pl_mips64el #define helper_float_cvts_pu helper_float_cvts_pu_mips64el -#define helper_float_cvtw_s helper_float_cvtw_s_mips64el -#define helper_float_cvtw_d helper_float_cvtw_d_mips64el -#define helper_float_roundl_d helper_float_roundl_d_mips64el -#define helper_float_roundl_s helper_float_roundl_s_mips64el -#define helper_float_roundw_d helper_float_roundw_d_mips64el -#define helper_float_roundw_s helper_float_roundw_s_mips64el -#define helper_float_truncl_d helper_float_truncl_d_mips64el -#define helper_float_truncl_s helper_float_truncl_s_mips64el -#define helper_float_truncw_d helper_float_truncw_d_mips64el -#define helper_float_truncw_s helper_float_truncw_s_mips64el -#define helper_float_ceill_d helper_float_ceill_d_mips64el -#define helper_float_ceill_s helper_float_ceill_s_mips64el -#define helper_float_ceilw_d helper_float_ceilw_d_mips64el -#define helper_float_ceilw_s helper_float_ceilw_s_mips64el -#define helper_float_floorl_d helper_float_floorl_d_mips64el -#define helper_float_floorl_s helper_float_floorl_s_mips64el -#define helper_float_floorw_d helper_float_floorw_d_mips64el -#define helper_float_floorw_s helper_float_floorw_s_mips64el +#define helper_float_cvt_w_s helper_float_cvt_w_s_mips64el +#define helper_float_cvt_w_d helper_float_cvt_w_d_mips64el +#define helper_float_round_l_d helper_float_round_l_d_mips64el +#define helper_float_round_l_s helper_float_round_l_s_mips64el +#define helper_float_round_w_d helper_float_round_w_d_mips64el +#define helper_float_round_w_s helper_float_round_w_s_mips64el +#define helper_float_trunc_l_d helper_float_trunc_l_d_mips64el +#define helper_float_trunc_l_s helper_float_trunc_l_s_mips64el +#define helper_float_trunc_w_d helper_float_trunc_w_d_mips64el +#define helper_float_trunc_w_s helper_float_trunc_w_s_mips64el +#define helper_float_ceil_l_d helper_float_ceil_l_d_mips64el +#define helper_float_ceil_l_s helper_float_ceil_l_s_mips64el +#define helper_float_ceil_w_d helper_float_ceil_w_d_mips64el +#define helper_float_ceil_w_s helper_float_ceil_w_s_mips64el +#define helper_float_floor_l_d helper_float_floor_l_d_mips64el +#define helper_float_floor_l_s helper_float_floor_l_s_mips64el +#define helper_float_floor_w_d helper_float_floor_w_d_mips64el +#define helper_float_floor_w_s helper_float_floor_w_s_mips64el +#define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mips64el +#define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mips64el +#define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mips64el +#define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mips64el +#define helper_float_round_2008_l_d helper_float_round_2008_l_d_mips64el +#define helper_float_round_2008_l_s helper_float_round_2008_l_s_mips64el +#define helper_float_round_2008_w_d helper_float_round_2008_w_d_mips64el +#define helper_float_round_2008_w_s helper_float_round_2008_w_s_mips64el +#define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mips64el +#define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mips64el +#define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mips64el +#define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mips64el +#define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mips64el +#define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mips64el +#define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mips64el +#define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mips64el +#define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mips64el +#define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mips64el +#define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mips64el +#define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mips64el #define helper_float_abs_d helper_float_abs_d_mips64el #define helper_float_abs_s helper_float_abs_s_mips64el #define helper_float_abs_ps helper_float_abs_ps_mips64el #define helper_float_chs_d helper_float_chs_d_mips64el #define helper_float_chs_s helper_float_chs_s_mips64el #define helper_float_chs_ps helper_float_chs_ps_mips64el -#define helper_float_maddf_s helper_float_maddf_s_mips64el -#define helper_float_maddf_d helper_float_maddf_d_mips64el -#define helper_float_msubf_s helper_float_msubf_s_mips64el -#define helper_float_msubf_d helper_float_msubf_d_mips64el -#define helper_float_max_s helper_float_max_s_mips64el -#define helper_float_max_d helper_float_max_d_mips64el -#define helper_float_maxa_s helper_float_maxa_s_mips64el -#define helper_float_maxa_d helper_float_maxa_d_mips64el -#define helper_float_min_s helper_float_min_s_mips64el -#define helper_float_min_d helper_float_min_d_mips64el -#define helper_float_mina_s helper_float_mina_s_mips64el -#define helper_float_mina_d helper_float_mina_d_mips64el -#define helper_float_rint_s helper_float_rint_s_mips64el -#define helper_float_rint_d helper_float_rint_d_mips64el -#define helper_float_class_s helper_float_class_s_mips64el -#define helper_float_class_d helper_float_class_d_mips64el #define helper_float_recip_d helper_float_recip_d_mips64el #define helper_float_recip_s helper_float_recip_s_mips64el #define helper_float_rsqrt_d helper_float_rsqrt_d_mips64el @@ -3596,6 +1727,12 @@ #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips64el #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips64el #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips64el +#define helper_float_rint_s helper_float_rint_s_mips64el +#define helper_float_rint_d helper_float_rint_d_mips64el +#define float_class_s float_class_s_mips64el +#define helper_float_class_s helper_float_class_s_mips64el +#define float_class_d float_class_d_mips64el +#define helper_float_class_d helper_float_class_d_mips64el #define helper_float_add_d helper_float_add_d_mips64el #define helper_float_add_s helper_float_add_s_mips64el #define helper_float_add_ps helper_float_add_ps_mips64el @@ -3608,6 +1745,22 @@ #define helper_float_div_d helper_float_div_d_mips64el #define helper_float_div_s helper_float_div_s_mips64el #define helper_float_div_ps helper_float_div_ps_mips64el +#define helper_float_recip2_d helper_float_recip2_d_mips64el +#define helper_float_recip2_s helper_float_recip2_s_mips64el +#define helper_float_recip2_ps helper_float_recip2_ps_mips64el +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64el +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64el +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64el +#define helper_float_addr_ps helper_float_addr_ps_mips64el +#define helper_float_mulr_ps helper_float_mulr_ps_mips64el +#define helper_float_max_s helper_float_max_s_mips64el +#define helper_float_max_d helper_float_max_d_mips64el +#define helper_float_maxa_s helper_float_maxa_s_mips64el +#define helper_float_maxa_d helper_float_maxa_d_mips64el +#define helper_float_min_s helper_float_min_s_mips64el +#define helper_float_min_d helper_float_min_d_mips64el +#define helper_float_mina_s helper_float_mina_s_mips64el +#define helper_float_mina_d helper_float_mina_d_mips64el #define helper_float_madd_d helper_float_madd_d_mips64el #define helper_float_madd_s helper_float_madd_s_mips64el #define helper_float_madd_ps helper_float_madd_ps_mips64el @@ -3620,14 +1773,10 @@ #define helper_float_nmsub_d helper_float_nmsub_d_mips64el #define helper_float_nmsub_s helper_float_nmsub_s_mips64el #define helper_float_nmsub_ps helper_float_nmsub_ps_mips64el -#define helper_float_recip2_d helper_float_recip2_d_mips64el -#define helper_float_recip2_s helper_float_recip2_s_mips64el -#define helper_float_recip2_ps helper_float_recip2_ps_mips64el -#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64el -#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64el -#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64el -#define helper_float_addr_ps helper_float_addr_ps_mips64el -#define helper_float_mulr_ps helper_float_mulr_ps_mips64el +#define helper_float_maddf_s helper_float_maddf_s_mips64el +#define helper_float_maddf_d helper_float_maddf_d_mips64el +#define helper_float_msubf_s helper_float_msubf_s_mips64el +#define helper_float_msubf_d helper_float_msubf_d_mips64el #define helper_cmp_d_f helper_cmp_d_f_mips64el #define helper_cmpabs_d_f helper_cmpabs_d_f_mips64el #define helper_cmp_d_un helper_cmp_d_un_mips64el @@ -3768,161 +1917,475 @@ #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips64el #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips64el #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips64el -#define helper_msa_ld_df helper_msa_ld_df_mips64el -#define helper_msa_st_df helper_msa_st_df_mips64el #define no_mmu_map_address no_mmu_map_address_mips64el #define fixed_mmu_map_address fixed_mmu_map_address_mips64el #define r4k_map_address r4k_map_address_mips64el +#define cpu_mips_tlb_flush cpu_mips_tlb_flush_mips64el +#define sync_c0_status sync_c0_status_mips64el +#define cpu_mips_store_status cpu_mips_store_status_mips64el +#define cpu_mips_store_cause cpu_mips_store_cause_mips64el #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips64el -#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mips64el +#define mips_cpu_tlb_fill mips_cpu_tlb_fill_mips64el #define cpu_mips_translate_address cpu_mips_translate_address_mips64el #define exception_resume_pc exception_resume_pc_mips64el #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips64el #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips64el #define r4k_invalidate_tlb r4k_invalidate_tlb_mips64el -#define helper_absq_s_ob helper_absq_s_ob_mips64el -#define helper_absq_s_qh helper_absq_s_qh_mips64el -#define helper_absq_s_pw helper_absq_s_pw_mips64el -#define helper_adduh_ob helper_adduh_ob_mips64el -#define helper_adduh_r_ob helper_adduh_r_ob_mips64el -#define helper_subuh_ob helper_subuh_ob_mips64el -#define helper_subuh_r_ob helper_subuh_r_ob_mips64el -#define helper_addq_pw helper_addq_pw_mips64el -#define helper_addq_qh helper_addq_qh_mips64el -#define helper_addq_s_pw helper_addq_s_pw_mips64el -#define helper_addq_s_qh helper_addq_s_qh_mips64el -#define helper_addu_ob helper_addu_ob_mips64el -#define helper_addu_qh helper_addu_qh_mips64el -#define helper_addu_s_ob helper_addu_s_ob_mips64el -#define helper_addu_s_qh helper_addu_s_qh_mips64el -#define helper_subq_pw helper_subq_pw_mips64el -#define helper_subq_qh helper_subq_qh_mips64el -#define helper_subq_s_pw helper_subq_s_pw_mips64el -#define helper_subq_s_qh helper_subq_s_qh_mips64el -#define helper_subu_ob helper_subu_ob_mips64el -#define helper_subu_qh helper_subu_qh_mips64el -#define helper_subu_s_ob helper_subu_s_ob_mips64el -#define helper_subu_s_qh helper_subu_s_qh_mips64el -#define helper_raddu_l_ob helper_raddu_l_ob_mips64el -#define helper_precr_ob_qh helper_precr_ob_qh_mips64el -#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64el -#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64el -#define helper_precrq_ob_qh helper_precrq_ob_qh_mips64el -#define helper_precrq_qh_pw helper_precrq_qh_pw_mips64el -#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64el -#define helper_precrq_pw_l helper_precrq_pw_l_mips64el -#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64el -#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64el -#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64el -#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64el -#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64el -#define helper_precequ_qh_obl helper_precequ_qh_obl_mips64el -#define helper_precequ_qh_obr helper_precequ_qh_obr_mips64el -#define helper_precequ_qh_obla helper_precequ_qh_obla_mips64el -#define helper_precequ_qh_obra helper_precequ_qh_obra_mips64el -#define helper_preceu_qh_obl helper_preceu_qh_obl_mips64el -#define helper_preceu_qh_obr helper_preceu_qh_obr_mips64el -#define helper_preceu_qh_obla helper_preceu_qh_obla_mips64el -#define helper_preceu_qh_obra helper_preceu_qh_obra_mips64el -#define helper_shll_ob helper_shll_ob_mips64el -#define helper_shrl_ob helper_shrl_ob_mips64el -#define helper_shra_ob helper_shra_ob_mips64el -#define helper_shra_r_ob helper_shra_r_ob_mips64el -#define helper_shll_qh helper_shll_qh_mips64el -#define helper_shll_s_qh helper_shll_s_qh_mips64el -#define helper_shrl_qh helper_shrl_qh_mips64el -#define helper_shra_qh helper_shra_qh_mips64el -#define helper_shra_r_qh helper_shra_r_qh_mips64el -#define helper_shll_pw helper_shll_pw_mips64el -#define helper_shll_s_pw helper_shll_s_pw_mips64el -#define helper_shra_pw helper_shra_pw_mips64el -#define helper_shra_r_pw helper_shra_r_pw_mips64el -#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64el -#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64el -#define helper_mulq_rs_qh helper_mulq_rs_qh_mips64el -#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64el -#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64el -#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64el -#define helper_dpau_h_obl helper_dpau_h_obl_mips64el -#define helper_dpau_h_obr helper_dpau_h_obr_mips64el -#define helper_dpsu_h_obl helper_dpsu_h_obl_mips64el -#define helper_dpsu_h_obr helper_dpsu_h_obr_mips64el -#define helper_dpa_w_qh helper_dpa_w_qh_mips64el -#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64el -#define helper_dps_w_qh helper_dps_w_qh_mips64el -#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64el -#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64el -#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64el -#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64el -#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64el -#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64el -#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64el -#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64el -#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64el -#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64el -#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64el -#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64el -#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64el -#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64el -#define helper_dmadd helper_dmadd_mips64el -#define helper_dmaddu helper_dmaddu_mips64el -#define helper_dmsub helper_dmsub_mips64el -#define helper_dmsubu helper_dmsubu_mips64el -#define helper_dinsv helper_dinsv_mips64el -#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64el -#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64el -#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64el -#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64el -#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64el -#define helper_cmpu_le_ob helper_cmpu_le_ob_mips64el -#define helper_cmp_eq_qh helper_cmp_eq_qh_mips64el -#define helper_cmp_lt_qh helper_cmp_lt_qh_mips64el -#define helper_cmp_le_qh helper_cmp_le_qh_mips64el -#define helper_cmp_eq_pw helper_cmp_eq_pw_mips64el -#define helper_cmp_lt_pw helper_cmp_lt_pw_mips64el -#define helper_cmp_le_pw helper_cmp_le_pw_mips64el -#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64el -#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64el -#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64el -#define helper_pick_ob helper_pick_ob_mips64el -#define helper_pick_qh helper_pick_qh_mips64el -#define helper_pick_pw helper_pick_pw_mips64el -#define helper_packrl_pw helper_packrl_pw_mips64el -#define helper_dextr_w helper_dextr_w_mips64el -#define helper_dextr_r_w helper_dextr_r_w_mips64el -#define helper_dextr_rs_w helper_dextr_rs_w_mips64el -#define helper_dextr_l helper_dextr_l_mips64el -#define helper_dextr_r_l helper_dextr_r_l_mips64el -#define helper_dextr_rs_l helper_dextr_rs_l_mips64el -#define helper_dextr_s_h helper_dextr_s_h_mips64el -#define helper_dextp helper_dextp_mips64el -#define helper_dextpdp helper_dextpdp_mips64el -#define helper_dshilo helper_dshilo_mips64el -#define helper_dmthlip helper_dmthlip_mips64el -#define helper_dclo helper_dclo_mips64el -#define helper_dclz helper_dclz_mips64el +#define do_raise_exception_err do_raise_exception_err_mips64el +#define helper_paddsb helper_paddsb_mips64el +#define helper_paddusb helper_paddusb_mips64el +#define helper_paddsh helper_paddsh_mips64el +#define helper_paddush helper_paddush_mips64el +#define helper_paddb helper_paddb_mips64el +#define helper_paddh helper_paddh_mips64el +#define helper_paddw helper_paddw_mips64el +#define helper_psubsb helper_psubsb_mips64el +#define helper_psubusb helper_psubusb_mips64el +#define helper_psubsh helper_psubsh_mips64el +#define helper_psubush helper_psubush_mips64el +#define helper_psubb helper_psubb_mips64el +#define helper_psubh helper_psubh_mips64el +#define helper_psubw helper_psubw_mips64el +#define helper_pshufh helper_pshufh_mips64el +#define helper_packsswh helper_packsswh_mips64el +#define helper_packsshb helper_packsshb_mips64el +#define helper_packushb helper_packushb_mips64el +#define helper_punpcklwd helper_punpcklwd_mips64el +#define helper_punpckhwd helper_punpckhwd_mips64el +#define helper_punpcklhw helper_punpcklhw_mips64el +#define helper_punpckhhw helper_punpckhhw_mips64el +#define helper_punpcklbh helper_punpcklbh_mips64el +#define helper_punpckhbh helper_punpckhbh_mips64el +#define helper_pavgh helper_pavgh_mips64el +#define helper_pavgb helper_pavgb_mips64el +#define helper_pmaxsh helper_pmaxsh_mips64el +#define helper_pminsh helper_pminsh_mips64el +#define helper_pmaxub helper_pmaxub_mips64el +#define helper_pminub helper_pminub_mips64el +#define helper_pcmpeqw helper_pcmpeqw_mips64el +#define helper_pcmpgtw helper_pcmpgtw_mips64el +#define helper_pcmpeqh helper_pcmpeqh_mips64el +#define helper_pcmpgth helper_pcmpgth_mips64el +#define helper_pcmpeqb helper_pcmpeqb_mips64el +#define helper_pcmpgtb helper_pcmpgtb_mips64el +#define helper_psllw helper_psllw_mips64el +#define helper_psrlw helper_psrlw_mips64el +#define helper_psraw helper_psraw_mips64el +#define helper_psllh helper_psllh_mips64el +#define helper_psrlh helper_psrlh_mips64el +#define helper_psrah helper_psrah_mips64el +#define helper_pmullh helper_pmullh_mips64el +#define helper_pmulhh helper_pmulhh_mips64el +#define helper_pmulhuh helper_pmulhuh_mips64el +#define helper_pmaddhw helper_pmaddhw_mips64el +#define helper_pasubub helper_pasubub_mips64el +#define helper_biadd helper_biadd_mips64el +#define helper_pmovmskb helper_pmovmskb_mips64el +#define helper_msa_nloc_b helper_msa_nloc_b_mips64el +#define helper_msa_nloc_h helper_msa_nloc_h_mips64el +#define helper_msa_nloc_w helper_msa_nloc_w_mips64el +#define helper_msa_nloc_d helper_msa_nloc_d_mips64el +#define helper_msa_nlzc_b helper_msa_nlzc_b_mips64el +#define helper_msa_nlzc_h helper_msa_nlzc_h_mips64el +#define helper_msa_nlzc_w helper_msa_nlzc_w_mips64el +#define helper_msa_nlzc_d helper_msa_nlzc_d_mips64el +#define helper_msa_pcnt_b helper_msa_pcnt_b_mips64el +#define helper_msa_pcnt_h helper_msa_pcnt_h_mips64el +#define helper_msa_pcnt_w helper_msa_pcnt_w_mips64el +#define helper_msa_pcnt_d helper_msa_pcnt_d_mips64el +#define helper_msa_binsl_b helper_msa_binsl_b_mips64el +#define helper_msa_binsl_h helper_msa_binsl_h_mips64el +#define helper_msa_binsl_w helper_msa_binsl_w_mips64el +#define helper_msa_binsl_d helper_msa_binsl_d_mips64el +#define helper_msa_binsr_b helper_msa_binsr_b_mips64el +#define helper_msa_binsr_h helper_msa_binsr_h_mips64el +#define helper_msa_binsr_w helper_msa_binsr_w_mips64el +#define helper_msa_binsr_d helper_msa_binsr_d_mips64el +#define helper_msa_bmnz_v helper_msa_bmnz_v_mips64el +#define helper_msa_bmz_v helper_msa_bmz_v_mips64el +#define helper_msa_bsel_v helper_msa_bsel_v_mips64el +#define helper_msa_bclr_b helper_msa_bclr_b_mips64el +#define helper_msa_bclr_h helper_msa_bclr_h_mips64el +#define helper_msa_bclr_w helper_msa_bclr_w_mips64el +#define helper_msa_bclr_d helper_msa_bclr_d_mips64el +#define helper_msa_bneg_b helper_msa_bneg_b_mips64el +#define helper_msa_bneg_h helper_msa_bneg_h_mips64el +#define helper_msa_bneg_w helper_msa_bneg_w_mips64el +#define helper_msa_bneg_d helper_msa_bneg_d_mips64el +#define helper_msa_bset_b helper_msa_bset_b_mips64el +#define helper_msa_bset_h helper_msa_bset_h_mips64el +#define helper_msa_bset_w helper_msa_bset_w_mips64el +#define helper_msa_bset_d helper_msa_bset_d_mips64el +#define helper_msa_add_a_b helper_msa_add_a_b_mips64el +#define helper_msa_add_a_h helper_msa_add_a_h_mips64el +#define helper_msa_add_a_w helper_msa_add_a_w_mips64el +#define helper_msa_add_a_d helper_msa_add_a_d_mips64el +#define helper_msa_adds_a_b helper_msa_adds_a_b_mips64el +#define helper_msa_adds_a_h helper_msa_adds_a_h_mips64el +#define helper_msa_adds_a_w helper_msa_adds_a_w_mips64el +#define helper_msa_adds_a_d helper_msa_adds_a_d_mips64el +#define helper_msa_adds_s_b helper_msa_adds_s_b_mips64el +#define helper_msa_adds_s_h helper_msa_adds_s_h_mips64el +#define helper_msa_adds_s_w helper_msa_adds_s_w_mips64el +#define helper_msa_adds_s_d helper_msa_adds_s_d_mips64el +#define helper_msa_adds_u_b helper_msa_adds_u_b_mips64el +#define helper_msa_adds_u_h helper_msa_adds_u_h_mips64el +#define helper_msa_adds_u_w helper_msa_adds_u_w_mips64el +#define helper_msa_adds_u_d helper_msa_adds_u_d_mips64el +#define helper_msa_addv_b helper_msa_addv_b_mips64el +#define helper_msa_addv_h helper_msa_addv_h_mips64el +#define helper_msa_addv_w helper_msa_addv_w_mips64el +#define helper_msa_addv_d helper_msa_addv_d_mips64el +#define helper_msa_hadd_s_h helper_msa_hadd_s_h_mips64el +#define helper_msa_hadd_s_w helper_msa_hadd_s_w_mips64el +#define helper_msa_hadd_s_d helper_msa_hadd_s_d_mips64el +#define helper_msa_hadd_u_h helper_msa_hadd_u_h_mips64el +#define helper_msa_hadd_u_w helper_msa_hadd_u_w_mips64el +#define helper_msa_hadd_u_d helper_msa_hadd_u_d_mips64el +#define helper_msa_ave_s_b helper_msa_ave_s_b_mips64el +#define helper_msa_ave_s_h helper_msa_ave_s_h_mips64el +#define helper_msa_ave_s_w helper_msa_ave_s_w_mips64el +#define helper_msa_ave_s_d helper_msa_ave_s_d_mips64el +#define helper_msa_ave_u_b helper_msa_ave_u_b_mips64el +#define helper_msa_ave_u_h helper_msa_ave_u_h_mips64el +#define helper_msa_ave_u_w helper_msa_ave_u_w_mips64el +#define helper_msa_ave_u_d helper_msa_ave_u_d_mips64el +#define helper_msa_aver_s_b helper_msa_aver_s_b_mips64el +#define helper_msa_aver_s_h helper_msa_aver_s_h_mips64el +#define helper_msa_aver_s_w helper_msa_aver_s_w_mips64el +#define helper_msa_aver_s_d helper_msa_aver_s_d_mips64el +#define helper_msa_aver_u_b helper_msa_aver_u_b_mips64el +#define helper_msa_aver_u_h helper_msa_aver_u_h_mips64el +#define helper_msa_aver_u_w helper_msa_aver_u_w_mips64el +#define helper_msa_aver_u_d helper_msa_aver_u_d_mips64el +#define helper_msa_ceq_b helper_msa_ceq_b_mips64el +#define helper_msa_ceq_h helper_msa_ceq_h_mips64el +#define helper_msa_ceq_w helper_msa_ceq_w_mips64el +#define helper_msa_ceq_d helper_msa_ceq_d_mips64el +#define helper_msa_cle_s_b helper_msa_cle_s_b_mips64el +#define helper_msa_cle_s_h helper_msa_cle_s_h_mips64el +#define helper_msa_cle_s_w helper_msa_cle_s_w_mips64el +#define helper_msa_cle_s_d helper_msa_cle_s_d_mips64el +#define helper_msa_cle_u_b helper_msa_cle_u_b_mips64el +#define helper_msa_cle_u_h helper_msa_cle_u_h_mips64el +#define helper_msa_cle_u_w helper_msa_cle_u_w_mips64el +#define helper_msa_cle_u_d helper_msa_cle_u_d_mips64el +#define helper_msa_clt_s_b helper_msa_clt_s_b_mips64el +#define helper_msa_clt_s_h helper_msa_clt_s_h_mips64el +#define helper_msa_clt_s_w helper_msa_clt_s_w_mips64el +#define helper_msa_clt_s_d helper_msa_clt_s_d_mips64el +#define helper_msa_clt_u_b helper_msa_clt_u_b_mips64el +#define helper_msa_clt_u_h helper_msa_clt_u_h_mips64el +#define helper_msa_clt_u_w helper_msa_clt_u_w_mips64el +#define helper_msa_clt_u_d helper_msa_clt_u_d_mips64el +#define helper_msa_div_s_b helper_msa_div_s_b_mips64el +#define helper_msa_div_s_h helper_msa_div_s_h_mips64el +#define helper_msa_div_s_w helper_msa_div_s_w_mips64el +#define helper_msa_div_s_d helper_msa_div_s_d_mips64el +#define helper_msa_div_u_b helper_msa_div_u_b_mips64el +#define helper_msa_div_u_h helper_msa_div_u_h_mips64el +#define helper_msa_div_u_w helper_msa_div_u_w_mips64el +#define helper_msa_div_u_d helper_msa_div_u_d_mips64el +#define helper_msa_max_a_b helper_msa_max_a_b_mips64el +#define helper_msa_max_a_h helper_msa_max_a_h_mips64el +#define helper_msa_max_a_w helper_msa_max_a_w_mips64el +#define helper_msa_max_a_d helper_msa_max_a_d_mips64el +#define helper_msa_max_s_b helper_msa_max_s_b_mips64el +#define helper_msa_max_s_h helper_msa_max_s_h_mips64el +#define helper_msa_max_s_w helper_msa_max_s_w_mips64el +#define helper_msa_max_s_d helper_msa_max_s_d_mips64el +#define helper_msa_max_u_b helper_msa_max_u_b_mips64el +#define helper_msa_max_u_h helper_msa_max_u_h_mips64el +#define helper_msa_max_u_w helper_msa_max_u_w_mips64el +#define helper_msa_max_u_d helper_msa_max_u_d_mips64el +#define helper_msa_min_a_b helper_msa_min_a_b_mips64el +#define helper_msa_min_a_h helper_msa_min_a_h_mips64el +#define helper_msa_min_a_w helper_msa_min_a_w_mips64el +#define helper_msa_min_a_d helper_msa_min_a_d_mips64el +#define helper_msa_min_s_b helper_msa_min_s_b_mips64el +#define helper_msa_min_s_h helper_msa_min_s_h_mips64el +#define helper_msa_min_s_w helper_msa_min_s_w_mips64el +#define helper_msa_min_s_d helper_msa_min_s_d_mips64el +#define helper_msa_min_u_b helper_msa_min_u_b_mips64el +#define helper_msa_min_u_h helper_msa_min_u_h_mips64el +#define helper_msa_min_u_w helper_msa_min_u_w_mips64el +#define helper_msa_min_u_d helper_msa_min_u_d_mips64el +#define helper_msa_mod_s_b helper_msa_mod_s_b_mips64el +#define helper_msa_mod_s_h helper_msa_mod_s_h_mips64el +#define helper_msa_mod_s_w helper_msa_mod_s_w_mips64el +#define helper_msa_mod_s_d helper_msa_mod_s_d_mips64el +#define helper_msa_mod_u_b helper_msa_mod_u_b_mips64el +#define helper_msa_mod_u_h helper_msa_mod_u_h_mips64el +#define helper_msa_mod_u_w helper_msa_mod_u_w_mips64el +#define helper_msa_mod_u_d helper_msa_mod_u_d_mips64el +#define helper_msa_asub_s_b helper_msa_asub_s_b_mips64el +#define helper_msa_asub_s_h helper_msa_asub_s_h_mips64el +#define helper_msa_asub_s_w helper_msa_asub_s_w_mips64el +#define helper_msa_asub_s_d helper_msa_asub_s_d_mips64el +#define helper_msa_asub_u_b helper_msa_asub_u_b_mips64el +#define helper_msa_asub_u_h helper_msa_asub_u_h_mips64el +#define helper_msa_asub_u_w helper_msa_asub_u_w_mips64el +#define helper_msa_asub_u_d helper_msa_asub_u_d_mips64el +#define helper_msa_hsub_s_h helper_msa_hsub_s_h_mips64el +#define helper_msa_hsub_s_w helper_msa_hsub_s_w_mips64el +#define helper_msa_hsub_s_d helper_msa_hsub_s_d_mips64el +#define helper_msa_hsub_u_h helper_msa_hsub_u_h_mips64el +#define helper_msa_hsub_u_w helper_msa_hsub_u_w_mips64el +#define helper_msa_hsub_u_d helper_msa_hsub_u_d_mips64el +#define helper_msa_ilvev_b helper_msa_ilvev_b_mips64el +#define helper_msa_ilvev_h helper_msa_ilvev_h_mips64el +#define helper_msa_ilvev_w helper_msa_ilvev_w_mips64el +#define helper_msa_ilvev_d helper_msa_ilvev_d_mips64el +#define helper_msa_ilvod_b helper_msa_ilvod_b_mips64el +#define helper_msa_ilvod_h helper_msa_ilvod_h_mips64el +#define helper_msa_ilvod_w helper_msa_ilvod_w_mips64el +#define helper_msa_ilvod_d helper_msa_ilvod_d_mips64el +#define helper_msa_ilvl_b helper_msa_ilvl_b_mips64el +#define helper_msa_ilvl_h helper_msa_ilvl_h_mips64el +#define helper_msa_ilvl_w helper_msa_ilvl_w_mips64el +#define helper_msa_ilvl_d helper_msa_ilvl_d_mips64el +#define helper_msa_ilvr_b helper_msa_ilvr_b_mips64el +#define helper_msa_ilvr_h helper_msa_ilvr_h_mips64el +#define helper_msa_ilvr_w helper_msa_ilvr_w_mips64el +#define helper_msa_ilvr_d helper_msa_ilvr_d_mips64el +#define helper_msa_and_v helper_msa_and_v_mips64el +#define helper_msa_nor_v helper_msa_nor_v_mips64el +#define helper_msa_or_v helper_msa_or_v_mips64el +#define helper_msa_xor_v helper_msa_xor_v_mips64el +#define helper_msa_move_v helper_msa_move_v_mips64el +#define helper_msa_pckev_b helper_msa_pckev_b_mips64el +#define helper_msa_pckev_h helper_msa_pckev_h_mips64el +#define helper_msa_pckev_w helper_msa_pckev_w_mips64el +#define helper_msa_pckev_d helper_msa_pckev_d_mips64el +#define helper_msa_pckod_b helper_msa_pckod_b_mips64el +#define helper_msa_pckod_h helper_msa_pckod_h_mips64el +#define helper_msa_pckod_w helper_msa_pckod_w_mips64el +#define helper_msa_pckod_d helper_msa_pckod_d_mips64el +#define helper_msa_sll_b helper_msa_sll_b_mips64el +#define helper_msa_sll_h helper_msa_sll_h_mips64el +#define helper_msa_sll_w helper_msa_sll_w_mips64el +#define helper_msa_sll_d helper_msa_sll_d_mips64el +#define helper_msa_sra_b helper_msa_sra_b_mips64el +#define helper_msa_sra_h helper_msa_sra_h_mips64el +#define helper_msa_sra_w helper_msa_sra_w_mips64el +#define helper_msa_sra_d helper_msa_sra_d_mips64el +#define helper_msa_srar_b helper_msa_srar_b_mips64el +#define helper_msa_srar_h helper_msa_srar_h_mips64el +#define helper_msa_srar_w helper_msa_srar_w_mips64el +#define helper_msa_srar_d helper_msa_srar_d_mips64el +#define helper_msa_srl_b helper_msa_srl_b_mips64el +#define helper_msa_srl_h helper_msa_srl_h_mips64el +#define helper_msa_srl_w helper_msa_srl_w_mips64el +#define helper_msa_srl_d helper_msa_srl_d_mips64el +#define helper_msa_srlr_b helper_msa_srlr_b_mips64el +#define helper_msa_srlr_h helper_msa_srlr_h_mips64el +#define helper_msa_srlr_w helper_msa_srlr_w_mips64el +#define helper_msa_srlr_d helper_msa_srlr_d_mips64el +#define helper_msa_andi_b helper_msa_andi_b_mips64el +#define helper_msa_ori_b helper_msa_ori_b_mips64el +#define helper_msa_nori_b helper_msa_nori_b_mips64el +#define helper_msa_xori_b helper_msa_xori_b_mips64el +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64el +#define helper_msa_bmzi_b helper_msa_bmzi_b_mips64el +#define helper_msa_bseli_b helper_msa_bseli_b_mips64el +#define helper_msa_shf_df helper_msa_shf_df_mips64el +#define helper_msa_addvi_df helper_msa_addvi_df_mips64el +#define helper_msa_subvi_df helper_msa_subvi_df_mips64el +#define helper_msa_ceqi_df helper_msa_ceqi_df_mips64el +#define helper_msa_clei_s_df helper_msa_clei_s_df_mips64el +#define helper_msa_clei_u_df helper_msa_clei_u_df_mips64el +#define helper_msa_clti_s_df helper_msa_clti_s_df_mips64el +#define helper_msa_clti_u_df helper_msa_clti_u_df_mips64el +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64el +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64el +#define helper_msa_mini_s_df helper_msa_mini_s_df_mips64el +#define helper_msa_mini_u_df helper_msa_mini_u_df_mips64el +#define helper_msa_ldi_df helper_msa_ldi_df_mips64el +#define helper_msa_slli_df helper_msa_slli_df_mips64el +#define helper_msa_srai_df helper_msa_srai_df_mips64el +#define helper_msa_srli_df helper_msa_srli_df_mips64el +#define helper_msa_bclri_df helper_msa_bclri_df_mips64el +#define helper_msa_bseti_df helper_msa_bseti_df_mips64el +#define helper_msa_bnegi_df helper_msa_bnegi_df_mips64el +#define helper_msa_sat_s_df helper_msa_sat_s_df_mips64el +#define helper_msa_sat_u_df helper_msa_sat_u_df_mips64el +#define helper_msa_srari_df helper_msa_srari_df_mips64el +#define helper_msa_srlri_df helper_msa_srlri_df_mips64el +#define helper_msa_binsli_df helper_msa_binsli_df_mips64el +#define helper_msa_binsri_df helper_msa_binsri_df_mips64el +#define helper_msa_subv_df helper_msa_subv_df_mips64el +#define helper_msa_subs_s_df helper_msa_subs_s_df_mips64el +#define helper_msa_subs_u_df helper_msa_subs_u_df_mips64el +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64el +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64el +#define helper_msa_mulv_df helper_msa_mulv_df_mips64el +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64el +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64el +#define helper_msa_mul_q_df helper_msa_mul_q_df_mips64el +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64el +#define helper_msa_sld_df helper_msa_sld_df_mips64el +#define helper_msa_maddv_df helper_msa_maddv_df_mips64el +#define helper_msa_msubv_df helper_msa_msubv_df_mips64el +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64el +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64el +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64el +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64el +#define helper_msa_binsl_df helper_msa_binsl_df_mips64el +#define helper_msa_binsr_df helper_msa_binsr_df_mips64el +#define helper_msa_madd_q_df helper_msa_madd_q_df_mips64el +#define helper_msa_msub_q_df helper_msa_msub_q_df_mips64el +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64el +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64el +#define helper_msa_splat_df helper_msa_splat_df_mips64el +#define helper_msa_vshf_df helper_msa_vshf_df_mips64el +#define helper_msa_sldi_df helper_msa_sldi_df_mips64el +#define helper_msa_splati_df helper_msa_splati_df_mips64el +#define helper_msa_copy_s_b helper_msa_copy_s_b_mips64el +#define helper_msa_copy_s_h helper_msa_copy_s_h_mips64el +#define helper_msa_copy_s_w helper_msa_copy_s_w_mips64el +#define helper_msa_copy_s_d helper_msa_copy_s_d_mips64el +#define helper_msa_copy_u_b helper_msa_copy_u_b_mips64el +#define helper_msa_copy_u_h helper_msa_copy_u_h_mips64el +#define helper_msa_copy_u_w helper_msa_copy_u_w_mips64el +#define helper_msa_insert_b helper_msa_insert_b_mips64el +#define helper_msa_insert_h helper_msa_insert_h_mips64el +#define helper_msa_insert_w helper_msa_insert_w_mips64el +#define helper_msa_insert_d helper_msa_insert_d_mips64el +#define helper_msa_insve_df helper_msa_insve_df_mips64el +#define helper_msa_ctcmsa helper_msa_ctcmsa_mips64el +#define helper_msa_cfcmsa helper_msa_cfcmsa_mips64el +#define helper_msa_fill_df helper_msa_fill_df_mips64el +#define helper_msa_fcaf_df helper_msa_fcaf_df_mips64el +#define helper_msa_fcun_df helper_msa_fcun_df_mips64el +#define helper_msa_fceq_df helper_msa_fceq_df_mips64el +#define helper_msa_fcueq_df helper_msa_fcueq_df_mips64el +#define helper_msa_fclt_df helper_msa_fclt_df_mips64el +#define helper_msa_fcult_df helper_msa_fcult_df_mips64el +#define helper_msa_fcle_df helper_msa_fcle_df_mips64el +#define helper_msa_fcule_df helper_msa_fcule_df_mips64el +#define helper_msa_fsaf_df helper_msa_fsaf_df_mips64el +#define helper_msa_fsun_df helper_msa_fsun_df_mips64el +#define helper_msa_fseq_df helper_msa_fseq_df_mips64el +#define helper_msa_fsueq_df helper_msa_fsueq_df_mips64el +#define helper_msa_fslt_df helper_msa_fslt_df_mips64el +#define helper_msa_fsult_df helper_msa_fsult_df_mips64el +#define helper_msa_fsle_df helper_msa_fsle_df_mips64el +#define helper_msa_fsule_df helper_msa_fsule_df_mips64el +#define helper_msa_fcor_df helper_msa_fcor_df_mips64el +#define helper_msa_fcune_df helper_msa_fcune_df_mips64el +#define helper_msa_fcne_df helper_msa_fcne_df_mips64el +#define helper_msa_fsor_df helper_msa_fsor_df_mips64el +#define helper_msa_fsune_df helper_msa_fsune_df_mips64el +#define helper_msa_fsne_df helper_msa_fsne_df_mips64el +#define helper_msa_fadd_df helper_msa_fadd_df_mips64el +#define helper_msa_fsub_df helper_msa_fsub_df_mips64el +#define helper_msa_fmul_df helper_msa_fmul_df_mips64el +#define helper_msa_fdiv_df helper_msa_fdiv_df_mips64el +#define helper_msa_fmadd_df helper_msa_fmadd_df_mips64el +#define helper_msa_fmsub_df helper_msa_fmsub_df_mips64el +#define helper_msa_fexp2_df helper_msa_fexp2_df_mips64el +#define helper_msa_fexdo_df helper_msa_fexdo_df_mips64el +#define helper_msa_ftq_df helper_msa_ftq_df_mips64el +#define helper_msa_fmin_df helper_msa_fmin_df_mips64el +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64el +#define helper_msa_fmax_df helper_msa_fmax_df_mips64el +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64el +#define helper_msa_fclass_df helper_msa_fclass_df_mips64el +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64el +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64el +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64el +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64el +#define helper_msa_frcp_df helper_msa_frcp_df_mips64el +#define helper_msa_frint_df helper_msa_frint_df_mips64el +#define helper_msa_flog2_df helper_msa_flog2_df_mips64el +#define helper_msa_fexupl_df helper_msa_fexupl_df_mips64el +#define helper_msa_fexupr_df helper_msa_fexupr_df_mips64el +#define helper_msa_ffql_df helper_msa_ffql_df_mips64el +#define helper_msa_ffqr_df helper_msa_ffqr_df_mips64el +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64el +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64el +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64el +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64el +#define helper_raise_exception_err helper_raise_exception_err_mips64el +#define helper_raise_exception helper_raise_exception_mips64el +#define helper_raise_exception_debug helper_raise_exception_debug_mips64el +#define helper_muls helper_muls_mips64el +#define helper_mulsu helper_mulsu_mips64el +#define helper_macc helper_macc_mips64el +#define helper_macchi helper_macchi_mips64el +#define helper_maccu helper_maccu_mips64el +#define helper_macchiu helper_macchiu_mips64el +#define helper_msac helper_msac_mips64el +#define helper_msachi helper_msachi_mips64el +#define helper_msacu helper_msacu_mips64el +#define helper_msachiu helper_msachiu_mips64el +#define helper_mulhi helper_mulhi_mips64el +#define helper_mulhiu helper_mulhiu_mips64el +#define helper_mulshi helper_mulshi_mips64el +#define helper_mulshiu helper_mulshiu_mips64el #define helper_dbitswap helper_dbitswap_mips64el +#define helper_bitswap helper_bitswap_mips64el +#define helper_rotx helper_rotx_mips64el +#define helper_ll helper_ll_mips64el #define helper_lld helper_lld_mips64el -#define helper_scd helper_scd_mips64el +#define helper_swl helper_swl_mips64el +#define helper_swr helper_swr_mips64el #define helper_sdl helper_sdl_mips64el #define helper_sdr helper_sdr_mips64el +#define helper_lwm helper_lwm_mips64el +#define helper_swm helper_swm_mips64el #define helper_ldm helper_ldm_mips64el #define helper_sdm helper_sdm_mips64el -#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64el -#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64el -#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64el -#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64el -#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64el -#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64el -#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64el -#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64el -#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64el +#define helper_fork helper_fork_mips64el +#define helper_yield helper_yield_mips64el +#define r4k_helper_tlbinv r4k_helper_tlbinv_mips64el +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64el +#define r4k_helper_tlbwi r4k_helper_tlbwi_mips64el +#define r4k_helper_tlbwr r4k_helper_tlbwr_mips64el +#define r4k_helper_tlbp r4k_helper_tlbp_mips64el +#define r4k_helper_tlbr r4k_helper_tlbr_mips64el +#define helper_tlbwi helper_tlbwi_mips64el +#define helper_tlbwr helper_tlbwr_mips64el +#define helper_tlbp helper_tlbp_mips64el +#define helper_tlbr helper_tlbr_mips64el +#define helper_tlbinv helper_tlbinv_mips64el +#define helper_tlbinvf helper_tlbinvf_mips64el +#define helper_ginvt helper_ginvt_mips64el +#define helper_di helper_di_mips64el +#define helper_ei helper_ei_mips64el +#define helper_eret helper_eret_mips64el +#define helper_eretnc helper_eretnc_mips64el +#define helper_deret helper_deret_mips64el +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64el +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64el +#define helper_rdhwr_cc helper_rdhwr_cc_mips64el +#define helper_rdhwr_ccres helper_rdhwr_ccres_mips64el +#define helper_rdhwr_performance helper_rdhwr_performance_mips64el +#define helper_rdhwr_xnp helper_rdhwr_xnp_mips64el +#define helper_pmon helper_pmon_mips64el +#define helper_wait helper_wait_mips64el +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64el +#define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mips64el +#define helper_msa_ld_b helper_msa_ld_b_mips64el +#define helper_msa_ld_h helper_msa_ld_h_mips64el +#define helper_msa_ld_w helper_msa_ld_w_mips64el +#define helper_msa_ld_d helper_msa_ld_d_mips64el +#define helper_msa_st_b helper_msa_st_b_mips64el +#define helper_msa_st_h helper_msa_st_h_mips64el +#define helper_msa_st_w helper_msa_st_w_mips64el +#define helper_msa_st_d helper_msa_st_d_mips64el +#define helper_cache helper_cache_mips64el +#define gen_intermediate_code gen_intermediate_code_mips64el +#define mips_tcg_init mips_tcg_init_mips64el +#define cpu_mips_realize_env cpu_mips_realize_env_mips64el +#define cpu_state_reset cpu_state_reset_mips64el +#define restore_state_to_opc restore_state_to_opc_mips64el #define mips_reg_reset mips_reg_reset_mips64el #define mips_reg_read mips_reg_read_mips64el #define mips_reg_write mips_reg_write_mips64el -#define mips_tcg_init mips_tcg_init_mips64el -#define mips_cpu_list mips_cpu_list_mips64el -#define mips_release mips_release_mips64el -#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mips64el -#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mips64el +#define ieee_rm ieee_rm_mips64el +#define mips_defs mips_defs_mips64el +#define mips_defs_number mips_defs_number_mips64el +#define gen_helper_float_class_s gen_helper_float_class_s_mips64el +#define gen_helper_float_class_d gen_helper_float_class_d_mips64el #endif diff --git a/qemu/mipsel.h b/qemu/mipsel.h index a04123e7..fca4abf3 100644 --- a/qemu/mipsel.h +++ b/qemu/mipsel.h @@ -1,3260 +1,1431 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_MIPSEL_H -#define UNICORN_AUTOGEN_MIPSEL_H -#define arm_release arm_release_mipsel -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mipsel -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mipsel -#define use_idiv_instructions_rt use_idiv_instructions_rt_mipsel -#define tcg_target_deposit_valid tcg_target_deposit_valid_mipsel -#define helper_power_down helper_power_down_mipsel -#define check_exit_request check_exit_request_mipsel -#define address_space_unregister address_space_unregister_mipsel -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mipsel -#define phys_mem_clean phys_mem_clean_mipsel -#define tb_cleanup tb_cleanup_mipsel +#ifndef UNICORN_AUTOGEN_mipsel_H +#define UNICORN_AUTOGEN_mipsel_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _mipsel +#endif +#define arm_arch arm_arch_mipsel +#define tb_target_set_jmp_target tb_target_set_jmp_target_mipsel +#define have_bmi1 have_bmi1_mipsel +#define have_popcnt have_popcnt_mipsel +#define have_avx1 have_avx1_mipsel +#define have_avx2 have_avx2_mipsel +#define have_isa have_isa_mipsel +#define have_altivec have_altivec_mipsel +#define have_vsx have_vsx_mipsel +#define flush_icache_range flush_icache_range_mipsel +#define s390_facilities s390_facilities_mipsel +#define tcg_dump_op tcg_dump_op_mipsel +#define tcg_dump_ops tcg_dump_ops_mipsel +#define tcg_gen_and_i64 tcg_gen_and_i64_mipsel +#define tcg_gen_discard_i64 tcg_gen_discard_i64_mipsel +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_mipsel +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_mipsel +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_mipsel +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_mipsel +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_mipsel +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_mipsel +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mipsel +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mipsel +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mipsel +#define tcg_gen_mul_i64 tcg_gen_mul_i64_mipsel +#define tcg_gen_or_i64 tcg_gen_or_i64_mipsel +#define tcg_gen_sar_i64 tcg_gen_sar_i64_mipsel +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mipsel +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mipsel +#define tcg_gen_st_i64 tcg_gen_st_i64_mipsel +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mipsel +#define cpu_icount_to_ns cpu_icount_to_ns_mipsel +#define cpu_is_stopped cpu_is_stopped_mipsel +#define cpu_get_ticks cpu_get_ticks_mipsel +#define cpu_get_clock cpu_get_clock_mipsel +#define cpu_resume cpu_resume_mipsel +#define qemu_init_vcpu qemu_init_vcpu_mipsel +#define cpu_stop_current cpu_stop_current_mipsel +#define resume_all_vcpus resume_all_vcpus_mipsel +#define vm_start vm_start_mipsel +#define address_space_dispatch_compact address_space_dispatch_compact_mipsel +#define flatview_translate flatview_translate_mipsel +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mipsel +#define qemu_get_cpu qemu_get_cpu_mipsel +#define cpu_address_space_init cpu_address_space_init_mipsel +#define cpu_get_address_space cpu_get_address_space_mipsel +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_mipsel +#define cpu_exec_initfn cpu_exec_initfn_mipsel +#define cpu_exec_realizefn cpu_exec_realizefn_mipsel +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mipsel +#define cpu_watchpoint_insert cpu_watchpoint_insert_mipsel +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mipsel +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mipsel +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mipsel +#define cpu_breakpoint_insert cpu_breakpoint_insert_mipsel +#define cpu_breakpoint_remove cpu_breakpoint_remove_mipsel +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mipsel +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mipsel +#define cpu_abort cpu_abort_mipsel +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_mipsel +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mipsel +#define flatview_add_to_dispatch flatview_add_to_dispatch_mipsel +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_mipsel +#define qemu_ram_get_offset qemu_ram_get_offset_mipsel +#define qemu_ram_get_used_length qemu_ram_get_used_length_mipsel +#define qemu_ram_is_shared qemu_ram_is_shared_mipsel +#define qemu_ram_pagesize qemu_ram_pagesize_mipsel +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mipsel +#define qemu_ram_alloc qemu_ram_alloc_mipsel +#define qemu_ram_free qemu_ram_free_mipsel +#define qemu_map_ram_ptr qemu_map_ram_ptr_mipsel +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_mipsel +#define qemu_ram_block_from_host qemu_ram_block_from_host_mipsel +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mipsel +#define cpu_check_watchpoint cpu_check_watchpoint_mipsel +#define iotlb_to_section iotlb_to_section_mipsel +#define address_space_dispatch_new address_space_dispatch_new_mipsel +#define address_space_dispatch_free address_space_dispatch_free_mipsel +#define flatview_read_continue flatview_read_continue_mipsel +#define address_space_read_full address_space_read_full_mipsel +#define address_space_write address_space_write_mipsel +#define address_space_rw address_space_rw_mipsel +#define cpu_physical_memory_rw cpu_physical_memory_rw_mipsel +#define address_space_write_rom address_space_write_rom_mipsel +#define cpu_flush_icache_range cpu_flush_icache_range_mipsel +#define cpu_exec_init_all cpu_exec_init_all_mipsel +#define address_space_access_valid address_space_access_valid_mipsel +#define address_space_map address_space_map_mipsel +#define address_space_unmap address_space_unmap_mipsel +#define cpu_physical_memory_map cpu_physical_memory_map_mipsel +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mipsel +#define cpu_memory_rw_debug cpu_memory_rw_debug_mipsel +#define qemu_target_page_size qemu_target_page_size_mipsel +#define qemu_target_page_bits qemu_target_page_bits_mipsel +#define qemu_target_page_bits_min qemu_target_page_bits_min_mipsel +#define target_words_bigendian target_words_bigendian_mipsel +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mipsel +#define ram_block_discard_range ram_block_discard_range_mipsel +#define ramblock_is_pmem ramblock_is_pmem_mipsel +#define page_size_init page_size_init_mipsel +#define set_preferred_target_page_bits set_preferred_target_page_bits_mipsel +#define finalize_target_page_bits finalize_target_page_bits_mipsel +#define cpu_outb cpu_outb_mipsel +#define cpu_outw cpu_outw_mipsel +#define cpu_outl cpu_outl_mipsel +#define cpu_inb cpu_inb_mipsel +#define cpu_inw cpu_inw_mipsel +#define cpu_inl cpu_inl_mipsel #define memory_map memory_map_mipsel +#define memory_map_io memory_map_io_mipsel #define memory_map_ptr memory_map_ptr_mipsel #define memory_unmap memory_unmap_mipsel #define memory_free memory_free_mipsel -#define free_code_gen_buffer free_code_gen_buffer_mipsel -#define helper_raise_exception helper_raise_exception_mipsel -#define tcg_enabled tcg_enabled_mipsel -#define tcg_exec_init tcg_exec_init_mipsel -#define memory_register_types memory_register_types_mipsel -#define cpu_exec_init_all cpu_exec_init_all_mipsel -#define vm_start vm_start_mipsel -#define resume_all_vcpus resume_all_vcpus_mipsel -#define a15_l2ctlr_read a15_l2ctlr_read_mipsel -#define a64_translate_init a64_translate_init_mipsel -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mipsel -#define aa64_cacheop_access aa64_cacheop_access_mipsel -#define aa64_daif_access aa64_daif_access_mipsel -#define aa64_daif_write aa64_daif_write_mipsel -#define aa64_dczid_read aa64_dczid_read_mipsel -#define aa64_fpcr_read aa64_fpcr_read_mipsel -#define aa64_fpcr_write aa64_fpcr_write_mipsel -#define aa64_fpsr_read aa64_fpsr_read_mipsel -#define aa64_fpsr_write aa64_fpsr_write_mipsel -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mipsel -#define aa64_zva_access aa64_zva_access_mipsel -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mipsel -#define aarch64_restore_sp aarch64_restore_sp_mipsel -#define aarch64_save_sp aarch64_save_sp_mipsel -#define accel_find accel_find_mipsel -#define accel_init_machine accel_init_machine_mipsel -#define accel_type accel_type_mipsel -#define access_with_adjusted_size access_with_adjusted_size_mipsel -#define add128 add128_mipsel -#define add16_sat add16_sat_mipsel -#define add16_usat add16_usat_mipsel -#define add192 add192_mipsel -#define add8_sat add8_sat_mipsel -#define add8_usat add8_usat_mipsel -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mipsel -#define add_cpreg_to_list add_cpreg_to_list_mipsel -#define addFloat128Sigs addFloat128Sigs_mipsel -#define addFloat32Sigs addFloat32Sigs_mipsel -#define addFloat64Sigs addFloat64Sigs_mipsel -#define addFloatx80Sigs addFloatx80Sigs_mipsel -#define add_qemu_ldst_label add_qemu_ldst_label_mipsel -#define address_space_access_valid address_space_access_valid_mipsel -#define address_space_destroy address_space_destroy_mipsel -#define address_space_destroy_dispatch address_space_destroy_dispatch_mipsel -#define address_space_get_flatview address_space_get_flatview_mipsel -#define address_space_init address_space_init_mipsel -#define address_space_init_dispatch address_space_init_dispatch_mipsel -#define address_space_lookup_region address_space_lookup_region_mipsel -#define address_space_map address_space_map_mipsel -#define address_space_read address_space_read_mipsel -#define address_space_rw address_space_rw_mipsel -#define address_space_translate address_space_translate_mipsel -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mipsel -#define address_space_translate_internal address_space_translate_internal_mipsel -#define address_space_unmap address_space_unmap_mipsel -#define address_space_update_topology address_space_update_topology_mipsel -#define address_space_update_topology_pass address_space_update_topology_pass_mipsel -#define address_space_write address_space_write_mipsel -#define addrrange_contains addrrange_contains_mipsel -#define addrrange_end addrrange_end_mipsel -#define addrrange_equal addrrange_equal_mipsel -#define addrrange_intersection addrrange_intersection_mipsel -#define addrrange_intersects addrrange_intersects_mipsel -#define addrrange_make addrrange_make_mipsel -#define adjust_endianness adjust_endianness_mipsel -#define all_helpers all_helpers_mipsel -#define alloc_code_gen_buffer alloc_code_gen_buffer_mipsel -#define alloc_entry alloc_entry_mipsel -#define always_true always_true_mipsel -#define arm1026_initfn arm1026_initfn_mipsel -#define arm1136_initfn arm1136_initfn_mipsel -#define arm1136_r2_initfn arm1136_r2_initfn_mipsel -#define arm1176_initfn arm1176_initfn_mipsel -#define arm11mpcore_initfn arm11mpcore_initfn_mipsel -#define arm926_initfn arm926_initfn_mipsel -#define arm946_initfn arm946_initfn_mipsel -#define arm_ccnt_enabled arm_ccnt_enabled_mipsel -#define arm_cp_read_zero arm_cp_read_zero_mipsel -#define arm_cp_reset_ignore arm_cp_reset_ignore_mipsel -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mipsel -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mipsel -#define arm_cpu_finalizefn arm_cpu_finalizefn_mipsel -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mipsel -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mipsel -#define arm_cpu_initfn arm_cpu_initfn_mipsel -#define arm_cpu_list arm_cpu_list_mipsel -#define cpu_loop_exit cpu_loop_exit_mipsel -#define arm_cpu_post_init arm_cpu_post_init_mipsel -#define arm_cpu_realizefn arm_cpu_realizefn_mipsel -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mipsel -#define arm_cpu_register_types arm_cpu_register_types_mipsel -#define cpu_resume_from_signal cpu_resume_from_signal_mipsel -#define arm_cpus arm_cpus_mipsel -#define arm_cpu_set_pc arm_cpu_set_pc_mipsel -#define arm_cp_write_ignore arm_cp_write_ignore_mipsel -#define arm_current_el arm_current_el_mipsel -#define arm_dc_feature arm_dc_feature_mipsel -#define arm_debug_excp_handler arm_debug_excp_handler_mipsel -#define arm_debug_target_el arm_debug_target_el_mipsel -#define arm_el_is_aa64 arm_el_is_aa64_mipsel -#define arm_env_get_cpu arm_env_get_cpu_mipsel -#define arm_excp_target_el arm_excp_target_el_mipsel -#define arm_excp_unmasked arm_excp_unmasked_mipsel -#define arm_feature arm_feature_mipsel -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mipsel -#define gen_intermediate_code gen_intermediate_code_mipsel -#define gen_intermediate_code_pc gen_intermediate_code_pc_mipsel -#define arm_gen_test_cc arm_gen_test_cc_mipsel -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mipsel -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mipsel -#define arm_handle_psci_call arm_handle_psci_call_mipsel -#define arm_is_psci_call arm_is_psci_call_mipsel -#define arm_is_secure arm_is_secure_mipsel -#define arm_is_secure_below_el3 arm_is_secure_below_el3_mipsel -#define arm_ldl_code arm_ldl_code_mipsel -#define arm_lduw_code arm_lduw_code_mipsel -#define arm_log_exception arm_log_exception_mipsel -#define arm_reg_read arm_reg_read_mipsel -#define arm_reg_reset arm_reg_reset_mipsel -#define arm_reg_write arm_reg_write_mipsel -#define restore_state_to_opc restore_state_to_opc_mipsel -#define arm_rmode_to_sf arm_rmode_to_sf_mipsel -#define arm_singlestep_active arm_singlestep_active_mipsel -#define tlb_fill tlb_fill_mipsel -#define tlb_flush tlb_flush_mipsel -#define tlb_flush_page tlb_flush_page_mipsel -#define tlb_set_page tlb_set_page_mipsel -#define arm_translate_init arm_translate_init_mipsel -#define arm_v7m_class_init arm_v7m_class_init_mipsel -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mipsel -#define ats_access ats_access_mipsel -#define ats_write ats_write_mipsel -#define bad_mode_switch bad_mode_switch_mipsel -#define bank_number bank_number_mipsel -#define bitmap_zero_extend bitmap_zero_extend_mipsel -#define bp_wp_matches bp_wp_matches_mipsel -#define breakpoint_invalidate breakpoint_invalidate_mipsel -#define build_page_bitmap build_page_bitmap_mipsel -#define bus_add_child bus_add_child_mipsel -#define bus_class_init bus_class_init_mipsel -#define bus_info bus_info_mipsel -#define bus_unparent bus_unparent_mipsel -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mipsel -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mipsel -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mipsel -#define call_recip_estimate call_recip_estimate_mipsel -#define can_merge can_merge_mipsel -#define capacity_increase capacity_increase_mipsel -#define ccsidr_read ccsidr_read_mipsel -#define check_ap check_ap_mipsel -#define check_breakpoints check_breakpoints_mipsel -#define check_watchpoints check_watchpoints_mipsel -#define cho cho_mipsel -#define clear_bit clear_bit_mipsel -#define clz32 clz32_mipsel -#define clz64 clz64_mipsel -#define cmp_flatrange_addr cmp_flatrange_addr_mipsel -#define code_gen_alloc code_gen_alloc_mipsel -#define commonNaNToFloat128 commonNaNToFloat128_mipsel -#define commonNaNToFloat16 commonNaNToFloat16_mipsel -#define commonNaNToFloat32 commonNaNToFloat32_mipsel -#define commonNaNToFloat64 commonNaNToFloat64_mipsel -#define commonNaNToFloatx80 commonNaNToFloatx80_mipsel -#define compute_abs_deadline compute_abs_deadline_mipsel -#define cond_name cond_name_mipsel -#define configure_accelerator configure_accelerator_mipsel -#define container_get container_get_mipsel -#define container_info container_info_mipsel -#define container_register_types container_register_types_mipsel -#define contextidr_write contextidr_write_mipsel -#define core_log_global_start core_log_global_start_mipsel -#define core_log_global_stop core_log_global_stop_mipsel -#define core_memory_listener core_memory_listener_mipsel -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mipsel -#define cortex_a15_initfn cortex_a15_initfn_mipsel -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mipsel -#define cortex_a8_initfn cortex_a8_initfn_mipsel -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mipsel -#define cortex_a9_initfn cortex_a9_initfn_mipsel -#define cortex_m3_initfn cortex_m3_initfn_mipsel -#define count_cpreg count_cpreg_mipsel -#define countLeadingZeros32 countLeadingZeros32_mipsel -#define countLeadingZeros64 countLeadingZeros64_mipsel -#define cp_access_ok cp_access_ok_mipsel -#define cpacr_write cpacr_write_mipsel -#define cpreg_field_is_64bit cpreg_field_is_64bit_mipsel -#define cp_reginfo cp_reginfo_mipsel -#define cpreg_key_compare cpreg_key_compare_mipsel -#define cpreg_make_keylist cpreg_make_keylist_mipsel -#define cp_reg_reset cp_reg_reset_mipsel -#define cpreg_to_kvm_id cpreg_to_kvm_id_mipsel -#define cpsr_read cpsr_read_mipsel -#define cpsr_write cpsr_write_mipsel -#define cptype_valid cptype_valid_mipsel -#define cpu_abort cpu_abort_mipsel -#define cpu_arm_exec cpu_arm_exec_mipsel -#define cpu_arm_gen_code cpu_arm_gen_code_mipsel -#define cpu_arm_init cpu_arm_init_mipsel -#define cpu_breakpoint_insert cpu_breakpoint_insert_mipsel -#define cpu_breakpoint_remove cpu_breakpoint_remove_mipsel -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mipsel -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mipsel -#define cpu_can_do_io cpu_can_do_io_mipsel -#define cpu_can_run cpu_can_run_mipsel -#define cpu_class_init cpu_class_init_mipsel -#define cpu_common_class_by_name cpu_common_class_by_name_mipsel -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mipsel -#define cpu_common_get_arch_id cpu_common_get_arch_id_mipsel -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mipsel -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mipsel -#define cpu_common_has_work cpu_common_has_work_mipsel -#define cpu_common_initfn cpu_common_initfn_mipsel -#define cpu_common_noop cpu_common_noop_mipsel -#define cpu_common_parse_features cpu_common_parse_features_mipsel -#define cpu_common_realizefn cpu_common_realizefn_mipsel -#define cpu_common_reset cpu_common_reset_mipsel -#define cpu_dump_statistics cpu_dump_statistics_mipsel -#define cpu_exec_init cpu_exec_init_mipsel -#define cpu_flush_icache_range cpu_flush_icache_range_mipsel -#define cpu_gen_init cpu_gen_init_mipsel -#define cpu_get_clock cpu_get_clock_mipsel -#define cpu_get_real_ticks cpu_get_real_ticks_mipsel -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mipsel -#define cpu_handle_debug_exception cpu_handle_debug_exception_mipsel -#define cpu_handle_guest_debug cpu_handle_guest_debug_mipsel -#define cpu_inb cpu_inb_mipsel -#define cpu_inl cpu_inl_mipsel -#define cpu_interrupt cpu_interrupt_mipsel -#define cpu_interrupt_handler cpu_interrupt_handler_mipsel -#define cpu_inw cpu_inw_mipsel -#define cpu_io_recompile cpu_io_recompile_mipsel -#define cpu_is_stopped cpu_is_stopped_mipsel -#define cpu_ldl_code cpu_ldl_code_mipsel -#define cpu_ldub_code cpu_ldub_code_mipsel -#define cpu_lduw_code cpu_lduw_code_mipsel -#define cpu_memory_rw_debug cpu_memory_rw_debug_mipsel -#define cpu_mmu_index cpu_mmu_index_mipsel -#define cpu_outb cpu_outb_mipsel -#define cpu_outl cpu_outl_mipsel -#define cpu_outw cpu_outw_mipsel -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mipsel -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mipsel -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mipsel -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mipsel -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mipsel -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mipsel -#define cpu_physical_memory_map cpu_physical_memory_map_mipsel -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mipsel -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mipsel -#define cpu_physical_memory_rw cpu_physical_memory_rw_mipsel -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mipsel -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mipsel -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mipsel -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mipsel -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mipsel -#define cpu_register cpu_register_mipsel -#define cpu_register_types cpu_register_types_mipsel -#define cpu_restore_state cpu_restore_state_mipsel -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mipsel -#define cpu_single_step cpu_single_step_mipsel -#define cpu_tb_exec cpu_tb_exec_mipsel -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mipsel -#define cpu_to_be64 cpu_to_be64_mipsel -#define cpu_to_le32 cpu_to_le32_mipsel -#define cpu_to_le64 cpu_to_le64_mipsel -#define cpu_type_info cpu_type_info_mipsel -#define cpu_unassigned_access cpu_unassigned_access_mipsel -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mipsel -#define cpu_watchpoint_insert cpu_watchpoint_insert_mipsel -#define cpu_watchpoint_remove cpu_watchpoint_remove_mipsel -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mipsel -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mipsel -#define crc32c_table crc32c_table_mipsel -#define create_new_memory_mapping create_new_memory_mapping_mipsel -#define csselr_write csselr_write_mipsel -#define cto32 cto32_mipsel -#define ctr_el0_access ctr_el0_access_mipsel -#define ctz32 ctz32_mipsel -#define ctz64 ctz64_mipsel -#define dacr_write dacr_write_mipsel -#define dbgbcr_write dbgbcr_write_mipsel -#define dbgbvr_write dbgbvr_write_mipsel -#define dbgwcr_write dbgwcr_write_mipsel -#define dbgwvr_write dbgwvr_write_mipsel -#define debug_cp_reginfo debug_cp_reginfo_mipsel -#define debug_frame debug_frame_mipsel -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mipsel -#define define_arm_cp_regs define_arm_cp_regs_mipsel -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mipsel -#define define_debug_regs define_debug_regs_mipsel -#define define_one_arm_cp_reg define_one_arm_cp_reg_mipsel -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mipsel -#define deposit32 deposit32_mipsel -#define deposit64 deposit64_mipsel -#define deregister_tm_clones deregister_tm_clones_mipsel -#define device_class_base_init device_class_base_init_mipsel -#define device_class_init device_class_init_mipsel -#define device_finalize device_finalize_mipsel -#define device_get_realized device_get_realized_mipsel -#define device_initfn device_initfn_mipsel -#define device_post_init device_post_init_mipsel -#define device_reset device_reset_mipsel -#define device_set_realized device_set_realized_mipsel -#define device_type_info device_type_info_mipsel -#define disas_arm_insn disas_arm_insn_mipsel -#define disas_coproc_insn disas_coproc_insn_mipsel -#define disas_dsp_insn disas_dsp_insn_mipsel -#define disas_iwmmxt_insn disas_iwmmxt_insn_mipsel -#define disas_neon_data_insn disas_neon_data_insn_mipsel -#define disas_neon_ls_insn disas_neon_ls_insn_mipsel -#define disas_thumb2_insn disas_thumb2_insn_mipsel -#define disas_thumb_insn disas_thumb_insn_mipsel -#define disas_vfp_insn disas_vfp_insn_mipsel -#define disas_vfp_v8_insn disas_vfp_v8_insn_mipsel -#define do_arm_semihosting do_arm_semihosting_mipsel -#define do_clz16 do_clz16_mipsel -#define do_clz8 do_clz8_mipsel -#define do_constant_folding do_constant_folding_mipsel -#define do_constant_folding_2 do_constant_folding_2_mipsel -#define do_constant_folding_cond do_constant_folding_cond_mipsel -#define do_constant_folding_cond2 do_constant_folding_cond2_mipsel -#define do_constant_folding_cond_32 do_constant_folding_cond_32_mipsel -#define do_constant_folding_cond_64 do_constant_folding_cond_64_mipsel -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mipsel -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mipsel -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mipsel -#define do_ssat do_ssat_mipsel -#define do_usad do_usad_mipsel -#define do_usat do_usat_mipsel -#define do_v7m_exception_exit do_v7m_exception_exit_mipsel -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mipsel -#define dummy_func dummy_func_mipsel -#define dummy_section dummy_section_mipsel -#define _DYNAMIC _DYNAMIC_mipsel -#define _edata _edata_mipsel -#define _end _end_mipsel -#define end_list end_list_mipsel -#define eq128 eq128_mipsel -#define ErrorClass_lookup ErrorClass_lookup_mipsel -#define error_copy error_copy_mipsel -#define error_exit error_exit_mipsel -#define error_get_class error_get_class_mipsel -#define error_get_pretty error_get_pretty_mipsel -#define error_setg_file_open error_setg_file_open_mipsel -#define estimateDiv128To64 estimateDiv128To64_mipsel -#define estimateSqrt32 estimateSqrt32_mipsel -#define excnames excnames_mipsel -#define excp_is_internal excp_is_internal_mipsel -#define extended_addresses_enabled extended_addresses_enabled_mipsel -#define extended_mpu_ap_bits extended_mpu_ap_bits_mipsel -#define extract32 extract32_mipsel -#define extract64 extract64_mipsel -#define extractFloat128Exp extractFloat128Exp_mipsel -#define extractFloat128Frac0 extractFloat128Frac0_mipsel -#define extractFloat128Frac1 extractFloat128Frac1_mipsel -#define extractFloat128Sign extractFloat128Sign_mipsel -#define extractFloat16Exp extractFloat16Exp_mipsel -#define extractFloat16Frac extractFloat16Frac_mipsel -#define extractFloat16Sign extractFloat16Sign_mipsel -#define extractFloat32Exp extractFloat32Exp_mipsel -#define extractFloat32Frac extractFloat32Frac_mipsel -#define extractFloat32Sign extractFloat32Sign_mipsel -#define extractFloat64Exp extractFloat64Exp_mipsel -#define extractFloat64Frac extractFloat64Frac_mipsel -#define extractFloat64Sign extractFloat64Sign_mipsel -#define extractFloatx80Exp extractFloatx80Exp_mipsel -#define extractFloatx80Frac extractFloatx80Frac_mipsel -#define extractFloatx80Sign extractFloatx80Sign_mipsel -#define fcse_write fcse_write_mipsel -#define find_better_copy find_better_copy_mipsel -#define find_default_machine find_default_machine_mipsel -#define find_desc_by_name find_desc_by_name_mipsel -#define find_first_bit find_first_bit_mipsel -#define find_paging_enabled_cpu find_paging_enabled_cpu_mipsel -#define find_ram_block find_ram_block_mipsel -#define find_ram_offset find_ram_offset_mipsel -#define find_string find_string_mipsel -#define find_type find_type_mipsel -#define _fini _fini_mipsel -#define flatrange_equal flatrange_equal_mipsel -#define flatview_destroy flatview_destroy_mipsel -#define flatview_init flatview_init_mipsel -#define flatview_insert flatview_insert_mipsel -#define flatview_lookup flatview_lookup_mipsel -#define flatview_ref flatview_ref_mipsel -#define flatview_simplify flatview_simplify_mipsel #define flatview_unref flatview_unref_mipsel -#define float128_add float128_add_mipsel -#define float128_compare float128_compare_mipsel -#define float128_compare_internal float128_compare_internal_mipsel -#define float128_compare_quiet float128_compare_quiet_mipsel -#define float128_default_nan float128_default_nan_mipsel -#define float128_div float128_div_mipsel -#define float128_eq float128_eq_mipsel -#define float128_eq_quiet float128_eq_quiet_mipsel -#define float128_is_quiet_nan float128_is_quiet_nan_mipsel -#define float128_is_signaling_nan float128_is_signaling_nan_mipsel -#define float128_le float128_le_mipsel -#define float128_le_quiet float128_le_quiet_mipsel -#define float128_lt float128_lt_mipsel -#define float128_lt_quiet float128_lt_quiet_mipsel -#define float128_maybe_silence_nan float128_maybe_silence_nan_mipsel -#define float128_mul float128_mul_mipsel -#define float128_rem float128_rem_mipsel -#define float128_round_to_int float128_round_to_int_mipsel -#define float128_scalbn float128_scalbn_mipsel -#define float128_sqrt float128_sqrt_mipsel -#define float128_sub float128_sub_mipsel -#define float128ToCommonNaN float128ToCommonNaN_mipsel -#define float128_to_float32 float128_to_float32_mipsel -#define float128_to_float64 float128_to_float64_mipsel -#define float128_to_floatx80 float128_to_floatx80_mipsel -#define float128_to_int32 float128_to_int32_mipsel -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mipsel -#define float128_to_int64 float128_to_int64_mipsel -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mipsel -#define float128_unordered float128_unordered_mipsel -#define float128_unordered_quiet float128_unordered_quiet_mipsel -#define float16_default_nan float16_default_nan_mipsel +#define address_space_get_flatview address_space_get_flatview_mipsel +#define memory_region_transaction_begin memory_region_transaction_begin_mipsel +#define memory_region_transaction_commit memory_region_transaction_commit_mipsel +#define memory_region_init memory_region_init_mipsel +#define memory_region_access_valid memory_region_access_valid_mipsel +#define memory_region_dispatch_read memory_region_dispatch_read_mipsel +#define memory_region_dispatch_write memory_region_dispatch_write_mipsel +#define memory_region_init_io memory_region_init_io_mipsel +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mipsel +#define memory_region_size memory_region_size_mipsel +#define memory_region_set_readonly memory_region_set_readonly_mipsel +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mipsel +#define memory_region_from_host memory_region_from_host_mipsel +#define memory_region_get_ram_addr memory_region_get_ram_addr_mipsel +#define memory_region_add_subregion memory_region_add_subregion_mipsel +#define memory_region_del_subregion memory_region_del_subregion_mipsel +#define memory_region_find memory_region_find_mipsel +#define memory_listener_register memory_listener_register_mipsel +#define memory_listener_unregister memory_listener_unregister_mipsel +#define address_space_remove_listeners address_space_remove_listeners_mipsel +#define address_space_init address_space_init_mipsel +#define address_space_destroy address_space_destroy_mipsel +#define memory_region_init_ram memory_region_init_ram_mipsel +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mipsel +#define exec_inline_op exec_inline_op_mipsel +#define floatx80_default_nan floatx80_default_nan_mipsel +#define float_raise float_raise_mipsel #define float16_is_quiet_nan float16_is_quiet_nan_mipsel #define float16_is_signaling_nan float16_is_signaling_nan_mipsel -#define float16_maybe_silence_nan float16_maybe_silence_nan_mipsel -#define float16ToCommonNaN float16ToCommonNaN_mipsel -#define float16_to_float32 float16_to_float32_mipsel -#define float16_to_float64 float16_to_float64_mipsel -#define float32_abs float32_abs_mipsel -#define float32_add float32_add_mipsel -#define float32_chs float32_chs_mipsel -#define float32_compare float32_compare_mipsel -#define float32_compare_internal float32_compare_internal_mipsel -#define float32_compare_quiet float32_compare_quiet_mipsel -#define float32_default_nan float32_default_nan_mipsel -#define float32_div float32_div_mipsel -#define float32_eq float32_eq_mipsel -#define float32_eq_quiet float32_eq_quiet_mipsel -#define float32_exp2 float32_exp2_mipsel -#define float32_exp2_coefficients float32_exp2_coefficients_mipsel -#define float32_is_any_nan float32_is_any_nan_mipsel -#define float32_is_infinity float32_is_infinity_mipsel -#define float32_is_neg float32_is_neg_mipsel #define float32_is_quiet_nan float32_is_quiet_nan_mipsel #define float32_is_signaling_nan float32_is_signaling_nan_mipsel -#define float32_is_zero float32_is_zero_mipsel -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mipsel -#define float32_le float32_le_mipsel -#define float32_le_quiet float32_le_quiet_mipsel -#define float32_log2 float32_log2_mipsel -#define float32_lt float32_lt_mipsel -#define float32_lt_quiet float32_lt_quiet_mipsel +#define float64_is_quiet_nan float64_is_quiet_nan_mipsel +#define float64_is_signaling_nan float64_is_signaling_nan_mipsel +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mipsel +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mipsel +#define floatx80_silence_nan floatx80_silence_nan_mipsel +#define propagateFloatx80NaN propagateFloatx80NaN_mipsel +#define float128_is_quiet_nan float128_is_quiet_nan_mipsel +#define float128_is_signaling_nan float128_is_signaling_nan_mipsel +#define float128_silence_nan float128_silence_nan_mipsel +#define float16_add float16_add_mipsel +#define float16_sub float16_sub_mipsel +#define float32_add float32_add_mipsel +#define float32_sub float32_sub_mipsel +#define float64_add float64_add_mipsel +#define float64_sub float64_sub_mipsel +#define float16_mul float16_mul_mipsel +#define float32_mul float32_mul_mipsel +#define float64_mul float64_mul_mipsel +#define float16_muladd float16_muladd_mipsel +#define float32_muladd float32_muladd_mipsel +#define float64_muladd float64_muladd_mipsel +#define float16_div float16_div_mipsel +#define float32_div float32_div_mipsel +#define float64_div float64_div_mipsel +#define float16_to_float32 float16_to_float32_mipsel +#define float16_to_float64 float16_to_float64_mipsel +#define float32_to_float16 float32_to_float16_mipsel +#define float32_to_float64 float32_to_float64_mipsel +#define float64_to_float16 float64_to_float16_mipsel +#define float64_to_float32 float64_to_float32_mipsel +#define float16_round_to_int float16_round_to_int_mipsel +#define float32_round_to_int float32_round_to_int_mipsel +#define float64_round_to_int float64_round_to_int_mipsel +#define float16_to_int16_scalbn float16_to_int16_scalbn_mipsel +#define float16_to_int32_scalbn float16_to_int32_scalbn_mipsel +#define float16_to_int64_scalbn float16_to_int64_scalbn_mipsel +#define float32_to_int16_scalbn float32_to_int16_scalbn_mipsel +#define float32_to_int32_scalbn float32_to_int32_scalbn_mipsel +#define float32_to_int64_scalbn float32_to_int64_scalbn_mipsel +#define float64_to_int16_scalbn float64_to_int16_scalbn_mipsel +#define float64_to_int32_scalbn float64_to_int32_scalbn_mipsel +#define float64_to_int64_scalbn float64_to_int64_scalbn_mipsel +#define float16_to_int16 float16_to_int16_mipsel +#define float16_to_int32 float16_to_int32_mipsel +#define float16_to_int64 float16_to_int64_mipsel +#define float32_to_int16 float32_to_int16_mipsel +#define float32_to_int32 float32_to_int32_mipsel +#define float32_to_int64 float32_to_int64_mipsel +#define float64_to_int16 float64_to_int16_mipsel +#define float64_to_int32 float64_to_int32_mipsel +#define float64_to_int64 float64_to_int64_mipsel +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_mipsel +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_mipsel +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_mipsel +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mipsel +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mipsel +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mipsel +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mipsel +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mipsel +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mipsel +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_mipsel +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_mipsel +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_mipsel +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_mipsel +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_mipsel +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_mipsel +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_mipsel +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_mipsel +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_mipsel +#define float16_to_uint16 float16_to_uint16_mipsel +#define float16_to_uint32 float16_to_uint32_mipsel +#define float16_to_uint64 float16_to_uint64_mipsel +#define float32_to_uint16 float32_to_uint16_mipsel +#define float32_to_uint32 float32_to_uint32_mipsel +#define float32_to_uint64 float32_to_uint64_mipsel +#define float64_to_uint16 float64_to_uint16_mipsel +#define float64_to_uint32 float64_to_uint32_mipsel +#define float64_to_uint64 float64_to_uint64_mipsel +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_mipsel +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_mipsel +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_mipsel +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mipsel +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mipsel +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mipsel +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mipsel +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mipsel +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mipsel +#define int64_to_float16_scalbn int64_to_float16_scalbn_mipsel +#define int32_to_float16_scalbn int32_to_float16_scalbn_mipsel +#define int16_to_float16_scalbn int16_to_float16_scalbn_mipsel +#define int64_to_float16 int64_to_float16_mipsel +#define int32_to_float16 int32_to_float16_mipsel +#define int16_to_float16 int16_to_float16_mipsel +#define int64_to_float32_scalbn int64_to_float32_scalbn_mipsel +#define int32_to_float32_scalbn int32_to_float32_scalbn_mipsel +#define int16_to_float32_scalbn int16_to_float32_scalbn_mipsel +#define int64_to_float32 int64_to_float32_mipsel +#define int32_to_float32 int32_to_float32_mipsel +#define int16_to_float32 int16_to_float32_mipsel +#define int64_to_float64_scalbn int64_to_float64_scalbn_mipsel +#define int32_to_float64_scalbn int32_to_float64_scalbn_mipsel +#define int16_to_float64_scalbn int16_to_float64_scalbn_mipsel +#define int64_to_float64 int64_to_float64_mipsel +#define int32_to_float64 int32_to_float64_mipsel +#define int16_to_float64 int16_to_float64_mipsel +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_mipsel +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_mipsel +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_mipsel +#define uint64_to_float16 uint64_to_float16_mipsel +#define uint32_to_float16 uint32_to_float16_mipsel +#define uint16_to_float16 uint16_to_float16_mipsel +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_mipsel +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_mipsel +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_mipsel +#define uint64_to_float32 uint64_to_float32_mipsel +#define uint32_to_float32 uint32_to_float32_mipsel +#define uint16_to_float32 uint16_to_float32_mipsel +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_mipsel +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_mipsel +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_mipsel +#define uint64_to_float64 uint64_to_float64_mipsel +#define uint32_to_float64 uint32_to_float64_mipsel +#define uint16_to_float64 uint16_to_float64_mipsel +#define float16_min float16_min_mipsel +#define float16_minnum float16_minnum_mipsel +#define float16_minnummag float16_minnummag_mipsel +#define float16_max float16_max_mipsel +#define float16_maxnum float16_maxnum_mipsel +#define float16_maxnummag float16_maxnummag_mipsel +#define float32_min float32_min_mipsel +#define float32_minnum float32_minnum_mipsel +#define float32_minnummag float32_minnummag_mipsel #define float32_max float32_max_mipsel #define float32_maxnum float32_maxnum_mipsel #define float32_maxnummag float32_maxnummag_mipsel -#define float32_maybe_silence_nan float32_maybe_silence_nan_mipsel -#define float32_min float32_min_mipsel -#define float32_minmax float32_minmax_mipsel -#define float32_minnum float32_minnum_mipsel -#define float32_minnummag float32_minnummag_mipsel -#define float32_mul float32_mul_mipsel -#define float32_muladd float32_muladd_mipsel -#define float32_rem float32_rem_mipsel -#define float32_round_to_int float32_round_to_int_mipsel -#define float32_scalbn float32_scalbn_mipsel -#define float32_set_sign float32_set_sign_mipsel -#define float32_sqrt float32_sqrt_mipsel -#define float32_squash_input_denormal float32_squash_input_denormal_mipsel -#define float32_sub float32_sub_mipsel -#define float32ToCommonNaN float32ToCommonNaN_mipsel -#define float32_to_float128 float32_to_float128_mipsel -#define float32_to_float16 float32_to_float16_mipsel -#define float32_to_float64 float32_to_float64_mipsel -#define float32_to_floatx80 float32_to_floatx80_mipsel -#define float32_to_int16 float32_to_int16_mipsel -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mipsel -#define float32_to_int32 float32_to_int32_mipsel -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mipsel -#define float32_to_int64 float32_to_int64_mipsel -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mipsel -#define float32_to_uint16 float32_to_uint16_mipsel -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mipsel -#define float32_to_uint32 float32_to_uint32_mipsel -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mipsel -#define float32_to_uint64 float32_to_uint64_mipsel -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mipsel -#define float32_unordered float32_unordered_mipsel -#define float32_unordered_quiet float32_unordered_quiet_mipsel -#define float64_abs float64_abs_mipsel -#define float64_add float64_add_mipsel -#define float64_chs float64_chs_mipsel -#define float64_compare float64_compare_mipsel -#define float64_compare_internal float64_compare_internal_mipsel -#define float64_compare_quiet float64_compare_quiet_mipsel -#define float64_default_nan float64_default_nan_mipsel -#define float64_div float64_div_mipsel -#define float64_eq float64_eq_mipsel -#define float64_eq_quiet float64_eq_quiet_mipsel -#define float64_is_any_nan float64_is_any_nan_mipsel -#define float64_is_infinity float64_is_infinity_mipsel -#define float64_is_neg float64_is_neg_mipsel -#define float64_is_quiet_nan float64_is_quiet_nan_mipsel -#define float64_is_signaling_nan float64_is_signaling_nan_mipsel -#define float64_is_zero float64_is_zero_mipsel -#define float64_le float64_le_mipsel -#define float64_le_quiet float64_le_quiet_mipsel -#define float64_log2 float64_log2_mipsel -#define float64_lt float64_lt_mipsel -#define float64_lt_quiet float64_lt_quiet_mipsel +#define float64_min float64_min_mipsel +#define float64_minnum float64_minnum_mipsel +#define float64_minnummag float64_minnummag_mipsel #define float64_max float64_max_mipsel #define float64_maxnum float64_maxnum_mipsel #define float64_maxnummag float64_maxnummag_mipsel -#define float64_maybe_silence_nan float64_maybe_silence_nan_mipsel -#define float64_min float64_min_mipsel -#define float64_minmax float64_minmax_mipsel -#define float64_minnum float64_minnum_mipsel -#define float64_minnummag float64_minnummag_mipsel -#define float64_mul float64_mul_mipsel -#define float64_muladd float64_muladd_mipsel -#define float64_rem float64_rem_mipsel -#define float64_round_to_int float64_round_to_int_mipsel +#define float16_compare float16_compare_mipsel +#define float16_compare_quiet float16_compare_quiet_mipsel +#define float32_compare float32_compare_mipsel +#define float32_compare_quiet float32_compare_quiet_mipsel +#define float64_compare float64_compare_mipsel +#define float64_compare_quiet float64_compare_quiet_mipsel +#define float16_scalbn float16_scalbn_mipsel +#define float32_scalbn float32_scalbn_mipsel #define float64_scalbn float64_scalbn_mipsel -#define float64_set_sign float64_set_sign_mipsel +#define float16_sqrt float16_sqrt_mipsel +#define float32_sqrt float32_sqrt_mipsel #define float64_sqrt float64_sqrt_mipsel +#define float16_default_nan float16_default_nan_mipsel +#define float32_default_nan float32_default_nan_mipsel +#define float64_default_nan float64_default_nan_mipsel +#define float128_default_nan float128_default_nan_mipsel +#define float16_silence_nan float16_silence_nan_mipsel +#define float32_silence_nan float32_silence_nan_mipsel +#define float64_silence_nan float64_silence_nan_mipsel +#define float16_squash_input_denormal float16_squash_input_denormal_mipsel +#define float32_squash_input_denormal float32_squash_input_denormal_mipsel #define float64_squash_input_denormal float64_squash_input_denormal_mipsel -#define float64_sub float64_sub_mipsel -#define float64ToCommonNaN float64ToCommonNaN_mipsel -#define float64_to_float128 float64_to_float128_mipsel -#define float64_to_float16 float64_to_float16_mipsel -#define float64_to_float32 float64_to_float32_mipsel +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mipsel +#define roundAndPackFloatx80 roundAndPackFloatx80_mipsel +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mipsel +#define int32_to_floatx80 int32_to_floatx80_mipsel +#define int32_to_float128 int32_to_float128_mipsel +#define int64_to_floatx80 int64_to_floatx80_mipsel +#define int64_to_float128 int64_to_float128_mipsel +#define uint64_to_float128 uint64_to_float128_mipsel +#define float32_to_floatx80 float32_to_floatx80_mipsel +#define float32_to_float128 float32_to_float128_mipsel +#define float32_rem float32_rem_mipsel +#define float32_exp2 float32_exp2_mipsel +#define float32_log2 float32_log2_mipsel +#define float32_eq float32_eq_mipsel +#define float32_le float32_le_mipsel +#define float32_lt float32_lt_mipsel +#define float32_unordered float32_unordered_mipsel +#define float32_eq_quiet float32_eq_quiet_mipsel +#define float32_le_quiet float32_le_quiet_mipsel +#define float32_lt_quiet float32_lt_quiet_mipsel +#define float32_unordered_quiet float32_unordered_quiet_mipsel #define float64_to_floatx80 float64_to_floatx80_mipsel -#define float64_to_int16 float64_to_int16_mipsel -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mipsel -#define float64_to_int32 float64_to_int32_mipsel -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mipsel -#define float64_to_int64 float64_to_int64_mipsel -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mipsel -#define float64_to_uint16 float64_to_uint16_mipsel -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mipsel -#define float64_to_uint32 float64_to_uint32_mipsel -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mipsel -#define float64_to_uint64 float64_to_uint64_mipsel -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mipsel -#define float64_trunc_to_int float64_trunc_to_int_mipsel +#define float64_to_float128 float64_to_float128_mipsel +#define float64_rem float64_rem_mipsel +#define float64_log2 float64_log2_mipsel +#define float64_eq float64_eq_mipsel +#define float64_le float64_le_mipsel +#define float64_lt float64_lt_mipsel #define float64_unordered float64_unordered_mipsel +#define float64_eq_quiet float64_eq_quiet_mipsel +#define float64_le_quiet float64_le_quiet_mipsel +#define float64_lt_quiet float64_lt_quiet_mipsel #define float64_unordered_quiet float64_unordered_quiet_mipsel -#define float_raise float_raise_mipsel -#define floatx80_add floatx80_add_mipsel -#define floatx80_compare floatx80_compare_mipsel -#define floatx80_compare_internal floatx80_compare_internal_mipsel -#define floatx80_compare_quiet floatx80_compare_quiet_mipsel -#define floatx80_default_nan floatx80_default_nan_mipsel -#define floatx80_div floatx80_div_mipsel -#define floatx80_eq floatx80_eq_mipsel -#define floatx80_eq_quiet floatx80_eq_quiet_mipsel -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mipsel -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mipsel -#define floatx80_le floatx80_le_mipsel -#define floatx80_le_quiet floatx80_le_quiet_mipsel -#define floatx80_lt floatx80_lt_mipsel -#define floatx80_lt_quiet floatx80_lt_quiet_mipsel -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mipsel -#define floatx80_mul floatx80_mul_mipsel -#define floatx80_rem floatx80_rem_mipsel -#define floatx80_round_to_int floatx80_round_to_int_mipsel -#define floatx80_scalbn floatx80_scalbn_mipsel -#define floatx80_sqrt floatx80_sqrt_mipsel -#define floatx80_sub floatx80_sub_mipsel -#define floatx80ToCommonNaN floatx80ToCommonNaN_mipsel -#define floatx80_to_float128 floatx80_to_float128_mipsel -#define floatx80_to_float32 floatx80_to_float32_mipsel -#define floatx80_to_float64 floatx80_to_float64_mipsel #define floatx80_to_int32 floatx80_to_int32_mipsel #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mipsel #define floatx80_to_int64 floatx80_to_int64_mipsel #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mipsel +#define floatx80_to_float32 floatx80_to_float32_mipsel +#define floatx80_to_float64 floatx80_to_float64_mipsel +#define floatx80_to_float128 floatx80_to_float128_mipsel +#define floatx80_round floatx80_round_mipsel +#define floatx80_round_to_int floatx80_round_to_int_mipsel +#define floatx80_add floatx80_add_mipsel +#define floatx80_sub floatx80_sub_mipsel +#define floatx80_mul floatx80_mul_mipsel +#define floatx80_div floatx80_div_mipsel +#define floatx80_rem floatx80_rem_mipsel +#define floatx80_sqrt floatx80_sqrt_mipsel +#define floatx80_eq floatx80_eq_mipsel +#define floatx80_le floatx80_le_mipsel +#define floatx80_lt floatx80_lt_mipsel #define floatx80_unordered floatx80_unordered_mipsel +#define floatx80_eq_quiet floatx80_eq_quiet_mipsel +#define floatx80_le_quiet floatx80_le_quiet_mipsel +#define floatx80_lt_quiet floatx80_lt_quiet_mipsel #define floatx80_unordered_quiet floatx80_unordered_quiet_mipsel -#define flush_icache_range flush_icache_range_mipsel -#define format_string format_string_mipsel -#define fp_decode_rm fp_decode_rm_mipsel -#define frame_dummy frame_dummy_mipsel -#define free_range free_range_mipsel -#define fstat64 fstat64_mipsel -#define futex_wait futex_wait_mipsel -#define futex_wake futex_wake_mipsel -#define gen_aa32_ld16s gen_aa32_ld16s_mipsel -#define gen_aa32_ld16u gen_aa32_ld16u_mipsel -#define gen_aa32_ld32u gen_aa32_ld32u_mipsel -#define gen_aa32_ld64 gen_aa32_ld64_mipsel -#define gen_aa32_ld8s gen_aa32_ld8s_mipsel -#define gen_aa32_ld8u gen_aa32_ld8u_mipsel -#define gen_aa32_st16 gen_aa32_st16_mipsel -#define gen_aa32_st32 gen_aa32_st32_mipsel -#define gen_aa32_st64 gen_aa32_st64_mipsel -#define gen_aa32_st8 gen_aa32_st8_mipsel -#define gen_adc gen_adc_mipsel -#define gen_adc_CC gen_adc_CC_mipsel -#define gen_add16 gen_add16_mipsel -#define gen_add_carry gen_add_carry_mipsel -#define gen_add_CC gen_add_CC_mipsel -#define gen_add_datah_offset gen_add_datah_offset_mipsel -#define gen_add_data_offset gen_add_data_offset_mipsel -#define gen_addq gen_addq_mipsel -#define gen_addq_lo gen_addq_lo_mipsel -#define gen_addq_msw gen_addq_msw_mipsel -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mipsel -#define gen_arm_shift_im gen_arm_shift_im_mipsel -#define gen_arm_shift_reg gen_arm_shift_reg_mipsel -#define gen_bx gen_bx_mipsel -#define gen_bx_im gen_bx_im_mipsel -#define gen_clrex gen_clrex_mipsel -#define generate_memory_topology generate_memory_topology_mipsel -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mipsel -#define gen_exception gen_exception_mipsel -#define gen_exception_insn gen_exception_insn_mipsel -#define gen_exception_internal gen_exception_internal_mipsel -#define gen_exception_internal_insn gen_exception_internal_insn_mipsel -#define gen_exception_return gen_exception_return_mipsel -#define gen_goto_tb gen_goto_tb_mipsel -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mipsel -#define gen_helper_add_saturate gen_helper_add_saturate_mipsel -#define gen_helper_add_setq gen_helper_add_setq_mipsel -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mipsel -#define gen_helper_clz32 gen_helper_clz32_mipsel -#define gen_helper_clz64 gen_helper_clz64_mipsel -#define gen_helper_clz_arm gen_helper_clz_arm_mipsel -#define gen_helper_cpsr_read gen_helper_cpsr_read_mipsel -#define gen_helper_cpsr_write gen_helper_cpsr_write_mipsel -#define gen_helper_crc32_arm gen_helper_crc32_arm_mipsel -#define gen_helper_crc32c gen_helper_crc32c_mipsel -#define gen_helper_crypto_aese gen_helper_crypto_aese_mipsel -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mipsel -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mipsel -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mipsel -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mipsel -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mipsel -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mipsel -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mipsel -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mipsel -#define gen_helper_double_saturate gen_helper_double_saturate_mipsel -#define gen_helper_exception_internal gen_helper_exception_internal_mipsel -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mipsel -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mipsel -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mipsel -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mipsel -#define gen_helper_get_user_reg gen_helper_get_user_reg_mipsel -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mipsel -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mipsel -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mipsel -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mipsel -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mipsel -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mipsel -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mipsel -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mipsel -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mipsel -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mipsel -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mipsel -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mipsel -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mipsel -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mipsel -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mipsel -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mipsel -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mipsel -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mipsel -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mipsel -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mipsel -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mipsel -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mipsel -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mipsel -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mipsel -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mipsel -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mipsel -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mipsel -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mipsel -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mipsel -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mipsel -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mipsel -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mipsel -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mipsel -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mipsel -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mipsel -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mipsel -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mipsel -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mipsel -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mipsel -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mipsel -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mipsel -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mipsel -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mipsel -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mipsel -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mipsel -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mipsel -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mipsel -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mipsel -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mipsel -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mipsel -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mipsel -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mipsel -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mipsel -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mipsel -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mipsel -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mipsel -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mipsel -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mipsel -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mipsel -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mipsel -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mipsel -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mipsel -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mipsel -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mipsel -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mipsel -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mipsel -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mipsel -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mipsel -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mipsel -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mipsel -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mipsel -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mipsel -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mipsel -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mipsel -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mipsel -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mipsel -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mipsel -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mipsel -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mipsel -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mipsel -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mipsel -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mipsel -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mipsel -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mipsel -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mipsel -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mipsel -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mipsel -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mipsel -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mipsel -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mipsel -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mipsel -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mipsel -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mipsel -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mipsel -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mipsel -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mipsel -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mipsel -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mipsel -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mipsel -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mipsel -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mipsel -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mipsel -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mipsel -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mipsel -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mipsel -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mipsel -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mipsel -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mipsel -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mipsel -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mipsel -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mipsel -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mipsel -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mipsel -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mipsel -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mipsel -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mipsel -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mipsel -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mipsel -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mipsel -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mipsel -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mipsel -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mipsel -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mipsel -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mipsel -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mipsel -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mipsel -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mipsel -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mipsel -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mipsel -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mipsel -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mipsel -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mipsel -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mipsel -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mipsel -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mipsel -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mipsel -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mipsel -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mipsel -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mipsel -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mipsel -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mipsel -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mipsel -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mipsel -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mipsel -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mipsel -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mipsel -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mipsel -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mipsel -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mipsel -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mipsel -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mipsel -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mipsel -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mipsel -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mipsel -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mipsel -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mipsel -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mipsel -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mipsel -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mipsel -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mipsel -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mipsel -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mipsel -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mipsel -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mipsel -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mipsel -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mipsel -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mipsel -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mipsel -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mipsel -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mipsel -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mipsel -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mipsel -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mipsel -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mipsel -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mipsel -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mipsel -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mipsel -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mipsel -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mipsel -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mipsel -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mipsel -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mipsel -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mipsel -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mipsel -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mipsel -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mipsel -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mipsel -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mipsel -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mipsel -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mipsel -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mipsel -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mipsel -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mipsel -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mipsel -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mipsel -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mipsel -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mipsel -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mipsel -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mipsel -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mipsel -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mipsel -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mipsel -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mipsel -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mipsel -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mipsel -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mipsel -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mipsel -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mipsel -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mipsel -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mipsel -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mipsel -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mipsel -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mipsel -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mipsel -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mipsel -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mipsel -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mipsel -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mipsel -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mipsel -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mipsel -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mipsel -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mipsel -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mipsel -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mipsel -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mipsel -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mipsel -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mipsel -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mipsel -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mipsel -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mipsel -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mipsel -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mipsel -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mipsel -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mipsel -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mipsel -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mipsel -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mipsel -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mipsel -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mipsel -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mipsel -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mipsel -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mipsel -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mipsel -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mipsel -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mipsel -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mipsel -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mipsel -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mipsel -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mipsel -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mipsel -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mipsel -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mipsel -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mipsel -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mipsel -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mipsel -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mipsel -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mipsel -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mipsel -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mipsel -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mipsel -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mipsel -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mipsel -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mipsel -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mipsel -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mipsel -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mipsel -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mipsel -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mipsel -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mipsel -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mipsel -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mipsel -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mipsel -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mipsel -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mipsel -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mipsel -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mipsel -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mipsel -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mipsel -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mipsel -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mipsel -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mipsel -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mipsel -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mipsel -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mipsel -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mipsel -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mipsel -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mipsel -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mipsel -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mipsel -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mipsel -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mipsel -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mipsel -#define gen_helper_neon_tbl gen_helper_neon_tbl_mipsel -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mipsel -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mipsel -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mipsel -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mipsel -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mipsel -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mipsel -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mipsel -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mipsel -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mipsel -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mipsel -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mipsel -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mipsel -#define gen_helper_neon_zip16 gen_helper_neon_zip16_mipsel -#define gen_helper_neon_zip8 gen_helper_neon_zip8_mipsel -#define gen_helper_pre_hvc gen_helper_pre_hvc_mipsel -#define gen_helper_pre_smc gen_helper_pre_smc_mipsel -#define gen_helper_qadd16 gen_helper_qadd16_mipsel -#define gen_helper_qadd8 gen_helper_qadd8_mipsel -#define gen_helper_qaddsubx gen_helper_qaddsubx_mipsel -#define gen_helper_qsub16 gen_helper_qsub16_mipsel -#define gen_helper_qsub8 gen_helper_qsub8_mipsel -#define gen_helper_qsubaddx gen_helper_qsubaddx_mipsel -#define gen_helper_rbit gen_helper_rbit_mipsel -#define gen_helper_recpe_f32 gen_helper_recpe_f32_mipsel -#define gen_helper_recpe_u32 gen_helper_recpe_u32_mipsel -#define gen_helper_recps_f32 gen_helper_recps_f32_mipsel -#define gen_helper_rintd gen_helper_rintd_mipsel -#define gen_helper_rintd_exact gen_helper_rintd_exact_mipsel -#define gen_helper_rints gen_helper_rints_mipsel -#define gen_helper_rints_exact gen_helper_rints_exact_mipsel -#define gen_helper_ror_cc gen_helper_ror_cc_mipsel -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mipsel -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mipsel -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mipsel -#define gen_helper_sadd16 gen_helper_sadd16_mipsel -#define gen_helper_sadd8 gen_helper_sadd8_mipsel -#define gen_helper_saddsubx gen_helper_saddsubx_mipsel -#define gen_helper_sar_cc gen_helper_sar_cc_mipsel -#define gen_helper_sdiv gen_helper_sdiv_mipsel -#define gen_helper_sel_flags gen_helper_sel_flags_mipsel -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mipsel -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mipsel -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mipsel -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mipsel -#define gen_helper_set_rmode gen_helper_set_rmode_mipsel -#define gen_helper_set_user_reg gen_helper_set_user_reg_mipsel -#define gen_helper_shadd16 gen_helper_shadd16_mipsel -#define gen_helper_shadd8 gen_helper_shadd8_mipsel -#define gen_helper_shaddsubx gen_helper_shaddsubx_mipsel -#define gen_helper_shl_cc gen_helper_shl_cc_mipsel -#define gen_helper_shr_cc gen_helper_shr_cc_mipsel -#define gen_helper_shsub16 gen_helper_shsub16_mipsel -#define gen_helper_shsub8 gen_helper_shsub8_mipsel -#define gen_helper_shsubaddx gen_helper_shsubaddx_mipsel -#define gen_helper_ssat gen_helper_ssat_mipsel -#define gen_helper_ssat16 gen_helper_ssat16_mipsel -#define gen_helper_ssub16 gen_helper_ssub16_mipsel -#define gen_helper_ssub8 gen_helper_ssub8_mipsel -#define gen_helper_ssubaddx gen_helper_ssubaddx_mipsel -#define gen_helper_sub_saturate gen_helper_sub_saturate_mipsel -#define gen_helper_sxtb16 gen_helper_sxtb16_mipsel -#define gen_helper_uadd16 gen_helper_uadd16_mipsel -#define gen_helper_uadd8 gen_helper_uadd8_mipsel -#define gen_helper_uaddsubx gen_helper_uaddsubx_mipsel -#define gen_helper_udiv gen_helper_udiv_mipsel -#define gen_helper_uhadd16 gen_helper_uhadd16_mipsel -#define gen_helper_uhadd8 gen_helper_uhadd8_mipsel -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mipsel -#define gen_helper_uhsub16 gen_helper_uhsub16_mipsel -#define gen_helper_uhsub8 gen_helper_uhsub8_mipsel -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mipsel -#define gen_helper_uqadd16 gen_helper_uqadd16_mipsel -#define gen_helper_uqadd8 gen_helper_uqadd8_mipsel -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mipsel -#define gen_helper_uqsub16 gen_helper_uqsub16_mipsel -#define gen_helper_uqsub8 gen_helper_uqsub8_mipsel -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mipsel -#define gen_helper_usad8 gen_helper_usad8_mipsel -#define gen_helper_usat gen_helper_usat_mipsel -#define gen_helper_usat16 gen_helper_usat16_mipsel -#define gen_helper_usub16 gen_helper_usub16_mipsel -#define gen_helper_usub8 gen_helper_usub8_mipsel -#define gen_helper_usubaddx gen_helper_usubaddx_mipsel -#define gen_helper_uxtb16 gen_helper_uxtb16_mipsel -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mipsel -#define gen_helper_v7m_msr gen_helper_v7m_msr_mipsel -#define gen_helper_vfp_absd gen_helper_vfp_absd_mipsel -#define gen_helper_vfp_abss gen_helper_vfp_abss_mipsel -#define gen_helper_vfp_addd gen_helper_vfp_addd_mipsel -#define gen_helper_vfp_adds gen_helper_vfp_adds_mipsel -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mipsel -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mipsel -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mipsel -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mipsel -#define gen_helper_vfp_divd gen_helper_vfp_divd_mipsel -#define gen_helper_vfp_divs gen_helper_vfp_divs_mipsel -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mipsel -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mipsel -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mipsel -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mipsel -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mipsel -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mipsel -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mipsel -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mipsel -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mipsel -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mipsel -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mipsel -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mipsel -#define gen_helper_vfp_mins gen_helper_vfp_mins_mipsel -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mipsel -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mipsel -#define gen_helper_vfp_muld gen_helper_vfp_muld_mipsel -#define gen_helper_vfp_muls gen_helper_vfp_muls_mipsel -#define gen_helper_vfp_negd gen_helper_vfp_negd_mipsel -#define gen_helper_vfp_negs gen_helper_vfp_negs_mipsel -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mipsel -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mipsel -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mipsel -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mipsel -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mipsel -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mipsel -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mipsel -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mipsel -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mipsel -#define gen_helper_vfp_subd gen_helper_vfp_subd_mipsel -#define gen_helper_vfp_subs gen_helper_vfp_subs_mipsel -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mipsel -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mipsel -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mipsel -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mipsel -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mipsel -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mipsel -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mipsel -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mipsel -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mipsel -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mipsel -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mipsel -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mipsel -#define gen_helper_vfp_touid gen_helper_vfp_touid_mipsel -#define gen_helper_vfp_touis gen_helper_vfp_touis_mipsel -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mipsel -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mipsel -#define gen_helper_vfp_tould gen_helper_vfp_tould_mipsel -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mipsel -#define gen_helper_vfp_touls gen_helper_vfp_touls_mipsel -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mipsel -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mipsel -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mipsel -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mipsel -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mipsel -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mipsel -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mipsel -#define gen_helper_wfe gen_helper_wfe_mipsel -#define gen_helper_wfi gen_helper_wfi_mipsel -#define gen_hvc gen_hvc_mipsel -#define gen_intermediate_code_internal gen_intermediate_code_internal_mipsel -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mipsel -#define gen_iwmmxt_address gen_iwmmxt_address_mipsel -#define gen_iwmmxt_shift gen_iwmmxt_shift_mipsel -#define gen_jmp gen_jmp_mipsel -#define gen_load_and_replicate gen_load_and_replicate_mipsel -#define gen_load_exclusive gen_load_exclusive_mipsel -#define gen_logic_CC gen_logic_CC_mipsel -#define gen_logicq_cc gen_logicq_cc_mipsel -#define gen_lookup_tb gen_lookup_tb_mipsel -#define gen_mov_F0_vreg gen_mov_F0_vreg_mipsel -#define gen_mov_F1_vreg gen_mov_F1_vreg_mipsel -#define gen_mov_vreg_F0 gen_mov_vreg_F0_mipsel -#define gen_muls_i64_i32 gen_muls_i64_i32_mipsel -#define gen_mulu_i64_i32 gen_mulu_i64_i32_mipsel -#define gen_mulxy gen_mulxy_mipsel -#define gen_neon_add gen_neon_add_mipsel -#define gen_neon_addl gen_neon_addl_mipsel -#define gen_neon_addl_saturate gen_neon_addl_saturate_mipsel -#define gen_neon_bsl gen_neon_bsl_mipsel -#define gen_neon_dup_high16 gen_neon_dup_high16_mipsel -#define gen_neon_dup_low16 gen_neon_dup_low16_mipsel -#define gen_neon_dup_u8 gen_neon_dup_u8_mipsel -#define gen_neon_mull gen_neon_mull_mipsel -#define gen_neon_narrow gen_neon_narrow_mipsel -#define gen_neon_narrow_op gen_neon_narrow_op_mipsel -#define gen_neon_narrow_sats gen_neon_narrow_sats_mipsel -#define gen_neon_narrow_satu gen_neon_narrow_satu_mipsel -#define gen_neon_negl gen_neon_negl_mipsel -#define gen_neon_rsb gen_neon_rsb_mipsel -#define gen_neon_shift_narrow gen_neon_shift_narrow_mipsel -#define gen_neon_subl gen_neon_subl_mipsel -#define gen_neon_trn_u16 gen_neon_trn_u16_mipsel -#define gen_neon_trn_u8 gen_neon_trn_u8_mipsel -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mipsel -#define gen_neon_unzip gen_neon_unzip_mipsel -#define gen_neon_widen gen_neon_widen_mipsel -#define gen_neon_zip gen_neon_zip_mipsel +#define float128_to_int32 float128_to_int32_mipsel +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mipsel +#define float128_to_int64 float128_to_int64_mipsel +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mipsel +#define float128_to_uint64 float128_to_uint64_mipsel +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_mipsel +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_mipsel +#define float128_to_uint32 float128_to_uint32_mipsel +#define float128_to_float32 float128_to_float32_mipsel +#define float128_to_float64 float128_to_float64_mipsel +#define float128_to_floatx80 float128_to_floatx80_mipsel +#define float128_round_to_int float128_round_to_int_mipsel +#define float128_add float128_add_mipsel +#define float128_sub float128_sub_mipsel +#define float128_mul float128_mul_mipsel +#define float128_div float128_div_mipsel +#define float128_rem float128_rem_mipsel +#define float128_sqrt float128_sqrt_mipsel +#define float128_eq float128_eq_mipsel +#define float128_le float128_le_mipsel +#define float128_lt float128_lt_mipsel +#define float128_unordered float128_unordered_mipsel +#define float128_eq_quiet float128_eq_quiet_mipsel +#define float128_le_quiet float128_le_quiet_mipsel +#define float128_lt_quiet float128_lt_quiet_mipsel +#define float128_unordered_quiet float128_unordered_quiet_mipsel +#define floatx80_compare floatx80_compare_mipsel +#define floatx80_compare_quiet floatx80_compare_quiet_mipsel +#define float128_compare float128_compare_mipsel +#define float128_compare_quiet float128_compare_quiet_mipsel +#define floatx80_scalbn floatx80_scalbn_mipsel +#define float128_scalbn float128_scalbn_mipsel +#define softfloat_init softfloat_init_mipsel +#define tcg_optimize tcg_optimize_mipsel #define gen_new_label gen_new_label_mipsel -#define gen_nop_hint gen_nop_hint_mipsel -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mipsel -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mipsel -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mipsel -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mipsel -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mipsel -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mipsel -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mipsel -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mipsel -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mipsel -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mipsel -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mipsel -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mipsel -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mipsel -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mipsel -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mipsel -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mipsel -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mipsel -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mipsel -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mipsel -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mipsel -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mipsel -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mipsel -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mipsel -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mipsel -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mipsel -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mipsel -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mipsel -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mipsel -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mipsel -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mipsel -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mipsel -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mipsel -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mipsel -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mipsel -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mipsel -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mipsel -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mipsel -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mipsel -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mipsel -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mipsel -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mipsel -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mipsel -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mipsel -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mipsel -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mipsel -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mipsel -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mipsel -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mipsel -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mipsel -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mipsel -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mipsel -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mipsel -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mipsel -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mipsel -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mipsel -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mipsel -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mipsel -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mipsel -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mipsel -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mipsel -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mipsel -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mipsel -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mipsel -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mipsel -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mipsel -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mipsel -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mipsel -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mipsel -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mipsel -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mipsel -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mipsel -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mipsel -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mipsel -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mipsel -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mipsel -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mipsel -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mipsel -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mipsel -#define gen_rev16 gen_rev16_mipsel -#define gen_revsh gen_revsh_mipsel -#define gen_rfe gen_rfe_mipsel -#define gen_sar gen_sar_mipsel -#define gen_sbc_CC gen_sbc_CC_mipsel -#define gen_sbfx gen_sbfx_mipsel -#define gen_set_CF_bit31 gen_set_CF_bit31_mipsel -#define gen_set_condexec gen_set_condexec_mipsel -#define gen_set_cpsr gen_set_cpsr_mipsel -#define gen_set_label gen_set_label_mipsel -#define gen_set_pc_im gen_set_pc_im_mipsel -#define gen_set_psr gen_set_psr_mipsel -#define gen_set_psr_im gen_set_psr_im_mipsel -#define gen_shl gen_shl_mipsel -#define gen_shr gen_shr_mipsel -#define gen_smc gen_smc_mipsel -#define gen_smul_dual gen_smul_dual_mipsel -#define gen_srs gen_srs_mipsel -#define gen_ss_advance gen_ss_advance_mipsel -#define gen_step_complete_exception gen_step_complete_exception_mipsel -#define gen_store_exclusive gen_store_exclusive_mipsel -#define gen_storeq_reg gen_storeq_reg_mipsel -#define gen_sub_carry gen_sub_carry_mipsel -#define gen_sub_CC gen_sub_CC_mipsel -#define gen_subq_msw gen_subq_msw_mipsel -#define gen_swap_half gen_swap_half_mipsel -#define gen_thumb2_data_op gen_thumb2_data_op_mipsel -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mipsel -#define gen_ubfx gen_ubfx_mipsel -#define gen_vfp_abs gen_vfp_abs_mipsel -#define gen_vfp_add gen_vfp_add_mipsel -#define gen_vfp_cmp gen_vfp_cmp_mipsel -#define gen_vfp_cmpe gen_vfp_cmpe_mipsel -#define gen_vfp_div gen_vfp_div_mipsel -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mipsel -#define gen_vfp_F1_mul gen_vfp_F1_mul_mipsel -#define gen_vfp_F1_neg gen_vfp_F1_neg_mipsel -#define gen_vfp_ld gen_vfp_ld_mipsel -#define gen_vfp_mrs gen_vfp_mrs_mipsel -#define gen_vfp_msr gen_vfp_msr_mipsel -#define gen_vfp_mul gen_vfp_mul_mipsel -#define gen_vfp_neg gen_vfp_neg_mipsel -#define gen_vfp_shto gen_vfp_shto_mipsel -#define gen_vfp_sito gen_vfp_sito_mipsel -#define gen_vfp_slto gen_vfp_slto_mipsel -#define gen_vfp_sqrt gen_vfp_sqrt_mipsel -#define gen_vfp_st gen_vfp_st_mipsel -#define gen_vfp_sub gen_vfp_sub_mipsel -#define gen_vfp_tosh gen_vfp_tosh_mipsel -#define gen_vfp_tosi gen_vfp_tosi_mipsel -#define gen_vfp_tosiz gen_vfp_tosiz_mipsel -#define gen_vfp_tosl gen_vfp_tosl_mipsel -#define gen_vfp_touh gen_vfp_touh_mipsel -#define gen_vfp_toui gen_vfp_toui_mipsel -#define gen_vfp_touiz gen_vfp_touiz_mipsel -#define gen_vfp_toul gen_vfp_toul_mipsel -#define gen_vfp_uhto gen_vfp_uhto_mipsel -#define gen_vfp_uito gen_vfp_uito_mipsel -#define gen_vfp_ulto gen_vfp_ulto_mipsel -#define get_arm_cp_reginfo get_arm_cp_reginfo_mipsel -#define get_clock get_clock_mipsel -#define get_clock_realtime get_clock_realtime_mipsel -#define get_constraint_priority get_constraint_priority_mipsel -#define get_float_exception_flags get_float_exception_flags_mipsel -#define get_float_rounding_mode get_float_rounding_mode_mipsel -#define get_fpstatus_ptr get_fpstatus_ptr_mipsel -#define get_level1_table_address get_level1_table_address_mipsel -#define get_mem_index get_mem_index_mipsel -#define get_next_param_value get_next_param_value_mipsel -#define get_opt_name get_opt_name_mipsel -#define get_opt_value get_opt_value_mipsel -#define get_page_addr_code get_page_addr_code_mipsel -#define get_param_value get_param_value_mipsel -#define get_phys_addr get_phys_addr_mipsel -#define get_phys_addr_lpae get_phys_addr_lpae_mipsel -#define get_phys_addr_mpu get_phys_addr_mpu_mipsel -#define get_phys_addr_v5 get_phys_addr_v5_mipsel -#define get_phys_addr_v6 get_phys_addr_v6_mipsel -#define get_system_memory get_system_memory_mipsel -#define get_ticks_per_sec get_ticks_per_sec_mipsel -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mipsel -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mipsel -#define gt_cntfrq_access gt_cntfrq_access_mipsel -#define gt_cnt_read gt_cnt_read_mipsel -#define gt_cnt_reset gt_cnt_reset_mipsel -#define gt_counter_access gt_counter_access_mipsel -#define gt_ctl_write gt_ctl_write_mipsel -#define gt_cval_write gt_cval_write_mipsel -#define gt_get_countervalue gt_get_countervalue_mipsel -#define gt_pct_access gt_pct_access_mipsel -#define gt_ptimer_access gt_ptimer_access_mipsel -#define gt_recalc_timer gt_recalc_timer_mipsel -#define gt_timer_access gt_timer_access_mipsel -#define gt_tval_read gt_tval_read_mipsel -#define gt_tval_write gt_tval_write_mipsel -#define gt_vct_access gt_vct_access_mipsel -#define gt_vtimer_access gt_vtimer_access_mipsel -#define guest_phys_blocks_free guest_phys_blocks_free_mipsel -#define guest_phys_blocks_init guest_phys_blocks_init_mipsel -#define handle_vcvt handle_vcvt_mipsel -#define handle_vminmaxnm handle_vminmaxnm_mipsel -#define handle_vrint handle_vrint_mipsel -#define handle_vsel handle_vsel_mipsel -#define has_help_option has_help_option_mipsel -#define have_bmi1 have_bmi1_mipsel -#define have_bmi2 have_bmi2_mipsel -#define hcr_write hcr_write_mipsel -#define helper_access_check_cp_reg helper_access_check_cp_reg_mipsel -#define helper_add_saturate helper_add_saturate_mipsel -#define helper_add_setq helper_add_setq_mipsel -#define helper_add_usaturate helper_add_usaturate_mipsel -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mipsel -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mipsel -#define helper_be_ldq_mmu helper_be_ldq_mmu_mipsel -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mipsel -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mipsel -#define helper_be_ldul_mmu helper_be_ldul_mmu_mipsel -#define helper_be_lduw_mmu helper_be_lduw_mmu_mipsel -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mipsel -#define helper_be_stl_mmu helper_be_stl_mmu_mipsel -#define helper_be_stq_mmu helper_be_stq_mmu_mipsel -#define helper_be_stw_mmu helper_be_stw_mmu_mipsel -#define helper_clear_pstate_ss helper_clear_pstate_ss_mipsel -#define helper_clz_arm helper_clz_arm_mipsel -#define helper_cpsr_read helper_cpsr_read_mipsel -#define helper_cpsr_write helper_cpsr_write_mipsel -#define helper_crc32_arm helper_crc32_arm_mipsel -#define helper_crc32c helper_crc32c_mipsel -#define helper_crypto_aese helper_crypto_aese_mipsel -#define helper_crypto_aesmc helper_crypto_aesmc_mipsel -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mipsel -#define helper_crypto_sha1h helper_crypto_sha1h_mipsel -#define helper_crypto_sha1su1 helper_crypto_sha1su1_mipsel -#define helper_crypto_sha256h helper_crypto_sha256h_mipsel -#define helper_crypto_sha256h2 helper_crypto_sha256h2_mipsel -#define helper_crypto_sha256su0 helper_crypto_sha256su0_mipsel -#define helper_crypto_sha256su1 helper_crypto_sha256su1_mipsel -#define helper_dc_zva helper_dc_zva_mipsel -#define helper_double_saturate helper_double_saturate_mipsel -#define helper_exception_internal helper_exception_internal_mipsel -#define helper_exception_return helper_exception_return_mipsel -#define helper_exception_with_syndrome helper_exception_with_syndrome_mipsel -#define helper_get_cp_reg helper_get_cp_reg_mipsel -#define helper_get_cp_reg64 helper_get_cp_reg64_mipsel -#define helper_get_r13_banked helper_get_r13_banked_mipsel -#define helper_get_user_reg helper_get_user_reg_mipsel -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mipsel -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mipsel -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mipsel -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mipsel -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mipsel -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mipsel -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mipsel -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mipsel -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mipsel -#define helper_iwmmxt_addub helper_iwmmxt_addub_mipsel -#define helper_iwmmxt_addul helper_iwmmxt_addul_mipsel -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mipsel -#define helper_iwmmxt_align helper_iwmmxt_align_mipsel -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mipsel -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mipsel -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mipsel -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mipsel -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mipsel -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mipsel -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mipsel -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mipsel -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mipsel -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mipsel -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mipsel -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mipsel -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mipsel -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mipsel -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mipsel -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mipsel -#define helper_iwmmxt_insr helper_iwmmxt_insr_mipsel -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mipsel -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mipsel -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mipsel -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mipsel -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mipsel -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mipsel -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mipsel -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mipsel -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mipsel -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mipsel -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mipsel -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mipsel -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mipsel -#define helper_iwmmxt_minub helper_iwmmxt_minub_mipsel -#define helper_iwmmxt_minul helper_iwmmxt_minul_mipsel -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mipsel -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mipsel -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mipsel -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mipsel -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mipsel -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mipsel -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mipsel -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mipsel -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mipsel -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mipsel -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mipsel -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mipsel -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mipsel -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mipsel -#define helper_iwmmxt_packul helper_iwmmxt_packul_mipsel -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mipsel -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mipsel -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mipsel -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mipsel -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mipsel -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mipsel -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mipsel -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mipsel -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mipsel -#define helper_iwmmxt_slll helper_iwmmxt_slll_mipsel -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mipsel -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mipsel -#define helper_iwmmxt_sral helper_iwmmxt_sral_mipsel -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mipsel -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mipsel -#define helper_iwmmxt_srll helper_iwmmxt_srll_mipsel -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mipsel -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mipsel -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mipsel -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mipsel -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mipsel -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mipsel -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mipsel -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mipsel -#define helper_iwmmxt_subub helper_iwmmxt_subub_mipsel -#define helper_iwmmxt_subul helper_iwmmxt_subul_mipsel -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mipsel -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mipsel -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mipsel -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mipsel -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mipsel -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mipsel -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mipsel -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mipsel -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mipsel -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mipsel -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mipsel -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mipsel -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mipsel -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mipsel -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mipsel -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mipsel -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mipsel -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mipsel -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mipsel -#define helper_ldb_cmmu helper_ldb_cmmu_mipsel -#define helper_ldb_mmu helper_ldb_mmu_mipsel -#define helper_ldl_cmmu helper_ldl_cmmu_mipsel -#define helper_ldl_mmu helper_ldl_mmu_mipsel -#define helper_ldq_cmmu helper_ldq_cmmu_mipsel -#define helper_ldq_mmu helper_ldq_mmu_mipsel -#define helper_ldw_cmmu helper_ldw_cmmu_mipsel -#define helper_ldw_mmu helper_ldw_mmu_mipsel -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mipsel -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mipsel -#define helper_le_ldq_mmu helper_le_ldq_mmu_mipsel -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mipsel -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mipsel -#define helper_le_ldul_mmu helper_le_ldul_mmu_mipsel -#define helper_le_lduw_mmu helper_le_lduw_mmu_mipsel -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mipsel -#define helper_le_stl_mmu helper_le_stl_mmu_mipsel -#define helper_le_stq_mmu helper_le_stq_mmu_mipsel -#define helper_le_stw_mmu helper_le_stw_mmu_mipsel -#define helper_msr_i_pstate helper_msr_i_pstate_mipsel -#define helper_neon_abd_f32 helper_neon_abd_f32_mipsel -#define helper_neon_abdl_s16 helper_neon_abdl_s16_mipsel -#define helper_neon_abdl_s32 helper_neon_abdl_s32_mipsel -#define helper_neon_abdl_s64 helper_neon_abdl_s64_mipsel -#define helper_neon_abdl_u16 helper_neon_abdl_u16_mipsel -#define helper_neon_abdl_u32 helper_neon_abdl_u32_mipsel -#define helper_neon_abdl_u64 helper_neon_abdl_u64_mipsel -#define helper_neon_abd_s16 helper_neon_abd_s16_mipsel -#define helper_neon_abd_s32 helper_neon_abd_s32_mipsel -#define helper_neon_abd_s8 helper_neon_abd_s8_mipsel -#define helper_neon_abd_u16 helper_neon_abd_u16_mipsel -#define helper_neon_abd_u32 helper_neon_abd_u32_mipsel -#define helper_neon_abd_u8 helper_neon_abd_u8_mipsel -#define helper_neon_abs_s16 helper_neon_abs_s16_mipsel -#define helper_neon_abs_s8 helper_neon_abs_s8_mipsel -#define helper_neon_acge_f32 helper_neon_acge_f32_mipsel -#define helper_neon_acge_f64 helper_neon_acge_f64_mipsel -#define helper_neon_acgt_f32 helper_neon_acgt_f32_mipsel -#define helper_neon_acgt_f64 helper_neon_acgt_f64_mipsel -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mipsel -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mipsel -#define helper_neon_addl_u16 helper_neon_addl_u16_mipsel -#define helper_neon_addl_u32 helper_neon_addl_u32_mipsel -#define helper_neon_add_u16 helper_neon_add_u16_mipsel -#define helper_neon_add_u8 helper_neon_add_u8_mipsel -#define helper_neon_ceq_f32 helper_neon_ceq_f32_mipsel -#define helper_neon_ceq_u16 helper_neon_ceq_u16_mipsel -#define helper_neon_ceq_u32 helper_neon_ceq_u32_mipsel -#define helper_neon_ceq_u8 helper_neon_ceq_u8_mipsel -#define helper_neon_cge_f32 helper_neon_cge_f32_mipsel -#define helper_neon_cge_s16 helper_neon_cge_s16_mipsel -#define helper_neon_cge_s32 helper_neon_cge_s32_mipsel -#define helper_neon_cge_s8 helper_neon_cge_s8_mipsel -#define helper_neon_cge_u16 helper_neon_cge_u16_mipsel -#define helper_neon_cge_u32 helper_neon_cge_u32_mipsel -#define helper_neon_cge_u8 helper_neon_cge_u8_mipsel -#define helper_neon_cgt_f32 helper_neon_cgt_f32_mipsel -#define helper_neon_cgt_s16 helper_neon_cgt_s16_mipsel -#define helper_neon_cgt_s32 helper_neon_cgt_s32_mipsel -#define helper_neon_cgt_s8 helper_neon_cgt_s8_mipsel -#define helper_neon_cgt_u16 helper_neon_cgt_u16_mipsel -#define helper_neon_cgt_u32 helper_neon_cgt_u32_mipsel -#define helper_neon_cgt_u8 helper_neon_cgt_u8_mipsel -#define helper_neon_cls_s16 helper_neon_cls_s16_mipsel -#define helper_neon_cls_s32 helper_neon_cls_s32_mipsel -#define helper_neon_cls_s8 helper_neon_cls_s8_mipsel -#define helper_neon_clz_u16 helper_neon_clz_u16_mipsel -#define helper_neon_clz_u8 helper_neon_clz_u8_mipsel -#define helper_neon_cnt_u8 helper_neon_cnt_u8_mipsel -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mipsel -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mipsel -#define helper_neon_hadd_s16 helper_neon_hadd_s16_mipsel -#define helper_neon_hadd_s32 helper_neon_hadd_s32_mipsel -#define helper_neon_hadd_s8 helper_neon_hadd_s8_mipsel -#define helper_neon_hadd_u16 helper_neon_hadd_u16_mipsel -#define helper_neon_hadd_u32 helper_neon_hadd_u32_mipsel -#define helper_neon_hadd_u8 helper_neon_hadd_u8_mipsel -#define helper_neon_hsub_s16 helper_neon_hsub_s16_mipsel -#define helper_neon_hsub_s32 helper_neon_hsub_s32_mipsel -#define helper_neon_hsub_s8 helper_neon_hsub_s8_mipsel -#define helper_neon_hsub_u16 helper_neon_hsub_u16_mipsel -#define helper_neon_hsub_u32 helper_neon_hsub_u32_mipsel -#define helper_neon_hsub_u8 helper_neon_hsub_u8_mipsel -#define helper_neon_max_s16 helper_neon_max_s16_mipsel -#define helper_neon_max_s32 helper_neon_max_s32_mipsel -#define helper_neon_max_s8 helper_neon_max_s8_mipsel -#define helper_neon_max_u16 helper_neon_max_u16_mipsel -#define helper_neon_max_u32 helper_neon_max_u32_mipsel -#define helper_neon_max_u8 helper_neon_max_u8_mipsel -#define helper_neon_min_s16 helper_neon_min_s16_mipsel -#define helper_neon_min_s32 helper_neon_min_s32_mipsel -#define helper_neon_min_s8 helper_neon_min_s8_mipsel -#define helper_neon_min_u16 helper_neon_min_u16_mipsel -#define helper_neon_min_u32 helper_neon_min_u32_mipsel -#define helper_neon_min_u8 helper_neon_min_u8_mipsel -#define helper_neon_mull_p8 helper_neon_mull_p8_mipsel -#define helper_neon_mull_s16 helper_neon_mull_s16_mipsel -#define helper_neon_mull_s8 helper_neon_mull_s8_mipsel -#define helper_neon_mull_u16 helper_neon_mull_u16_mipsel -#define helper_neon_mull_u8 helper_neon_mull_u8_mipsel -#define helper_neon_mul_p8 helper_neon_mul_p8_mipsel -#define helper_neon_mul_u16 helper_neon_mul_u16_mipsel -#define helper_neon_mul_u8 helper_neon_mul_u8_mipsel -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mipsel -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mipsel -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mipsel -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mipsel -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mipsel -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mipsel -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mipsel -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mipsel -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mipsel -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mipsel -#define helper_neon_narrow_u16 helper_neon_narrow_u16_mipsel -#define helper_neon_narrow_u8 helper_neon_narrow_u8_mipsel -#define helper_neon_negl_u16 helper_neon_negl_u16_mipsel -#define helper_neon_negl_u32 helper_neon_negl_u32_mipsel -#define helper_neon_paddl_u16 helper_neon_paddl_u16_mipsel -#define helper_neon_paddl_u32 helper_neon_paddl_u32_mipsel -#define helper_neon_padd_u16 helper_neon_padd_u16_mipsel -#define helper_neon_padd_u8 helper_neon_padd_u8_mipsel -#define helper_neon_pmax_s16 helper_neon_pmax_s16_mipsel -#define helper_neon_pmax_s8 helper_neon_pmax_s8_mipsel -#define helper_neon_pmax_u16 helper_neon_pmax_u16_mipsel -#define helper_neon_pmax_u8 helper_neon_pmax_u8_mipsel -#define helper_neon_pmin_s16 helper_neon_pmin_s16_mipsel -#define helper_neon_pmin_s8 helper_neon_pmin_s8_mipsel -#define helper_neon_pmin_u16 helper_neon_pmin_u16_mipsel -#define helper_neon_pmin_u8 helper_neon_pmin_u8_mipsel -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mipsel -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mipsel -#define helper_neon_qabs_s16 helper_neon_qabs_s16_mipsel -#define helper_neon_qabs_s32 helper_neon_qabs_s32_mipsel -#define helper_neon_qabs_s64 helper_neon_qabs_s64_mipsel -#define helper_neon_qabs_s8 helper_neon_qabs_s8_mipsel -#define helper_neon_qadd_s16 helper_neon_qadd_s16_mipsel -#define helper_neon_qadd_s32 helper_neon_qadd_s32_mipsel -#define helper_neon_qadd_s64 helper_neon_qadd_s64_mipsel -#define helper_neon_qadd_s8 helper_neon_qadd_s8_mipsel -#define helper_neon_qadd_u16 helper_neon_qadd_u16_mipsel -#define helper_neon_qadd_u32 helper_neon_qadd_u32_mipsel -#define helper_neon_qadd_u64 helper_neon_qadd_u64_mipsel -#define helper_neon_qadd_u8 helper_neon_qadd_u8_mipsel -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mipsel -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mipsel -#define helper_neon_qneg_s16 helper_neon_qneg_s16_mipsel -#define helper_neon_qneg_s32 helper_neon_qneg_s32_mipsel -#define helper_neon_qneg_s64 helper_neon_qneg_s64_mipsel -#define helper_neon_qneg_s8 helper_neon_qneg_s8_mipsel -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mipsel -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mipsel -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mipsel -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mipsel -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mipsel -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mipsel -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mipsel -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mipsel -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mipsel -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mipsel -#define helper_neon_qshl_s16 helper_neon_qshl_s16_mipsel -#define helper_neon_qshl_s32 helper_neon_qshl_s32_mipsel -#define helper_neon_qshl_s64 helper_neon_qshl_s64_mipsel -#define helper_neon_qshl_s8 helper_neon_qshl_s8_mipsel -#define helper_neon_qshl_u16 helper_neon_qshl_u16_mipsel -#define helper_neon_qshl_u32 helper_neon_qshl_u32_mipsel -#define helper_neon_qshl_u64 helper_neon_qshl_u64_mipsel -#define helper_neon_qshl_u8 helper_neon_qshl_u8_mipsel -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mipsel -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mipsel -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mipsel -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mipsel -#define helper_neon_qsub_s16 helper_neon_qsub_s16_mipsel -#define helper_neon_qsub_s32 helper_neon_qsub_s32_mipsel -#define helper_neon_qsub_s64 helper_neon_qsub_s64_mipsel -#define helper_neon_qsub_s8 helper_neon_qsub_s8_mipsel -#define helper_neon_qsub_u16 helper_neon_qsub_u16_mipsel -#define helper_neon_qsub_u32 helper_neon_qsub_u32_mipsel -#define helper_neon_qsub_u64 helper_neon_qsub_u64_mipsel -#define helper_neon_qsub_u8 helper_neon_qsub_u8_mipsel -#define helper_neon_qunzip16 helper_neon_qunzip16_mipsel -#define helper_neon_qunzip32 helper_neon_qunzip32_mipsel -#define helper_neon_qunzip8 helper_neon_qunzip8_mipsel -#define helper_neon_qzip16 helper_neon_qzip16_mipsel -#define helper_neon_qzip32 helper_neon_qzip32_mipsel -#define helper_neon_qzip8 helper_neon_qzip8_mipsel -#define helper_neon_rbit_u8 helper_neon_rbit_u8_mipsel -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mipsel -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mipsel -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mipsel -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mipsel -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mipsel -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mipsel -#define helper_neon_rshl_s16 helper_neon_rshl_s16_mipsel -#define helper_neon_rshl_s32 helper_neon_rshl_s32_mipsel -#define helper_neon_rshl_s64 helper_neon_rshl_s64_mipsel -#define helper_neon_rshl_s8 helper_neon_rshl_s8_mipsel -#define helper_neon_rshl_u16 helper_neon_rshl_u16_mipsel -#define helper_neon_rshl_u32 helper_neon_rshl_u32_mipsel -#define helper_neon_rshl_u64 helper_neon_rshl_u64_mipsel -#define helper_neon_rshl_u8 helper_neon_rshl_u8_mipsel -#define helper_neon_shl_s16 helper_neon_shl_s16_mipsel -#define helper_neon_shl_s32 helper_neon_shl_s32_mipsel -#define helper_neon_shl_s64 helper_neon_shl_s64_mipsel -#define helper_neon_shl_s8 helper_neon_shl_s8_mipsel -#define helper_neon_shl_u16 helper_neon_shl_u16_mipsel -#define helper_neon_shl_u32 helper_neon_shl_u32_mipsel -#define helper_neon_shl_u64 helper_neon_shl_u64_mipsel -#define helper_neon_shl_u8 helper_neon_shl_u8_mipsel -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mipsel -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mipsel -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mipsel -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mipsel -#define helper_neon_subl_u16 helper_neon_subl_u16_mipsel -#define helper_neon_subl_u32 helper_neon_subl_u32_mipsel -#define helper_neon_sub_u16 helper_neon_sub_u16_mipsel -#define helper_neon_sub_u8 helper_neon_sub_u8_mipsel -#define helper_neon_tbl helper_neon_tbl_mipsel -#define helper_neon_tst_u16 helper_neon_tst_u16_mipsel -#define helper_neon_tst_u32 helper_neon_tst_u32_mipsel -#define helper_neon_tst_u8 helper_neon_tst_u8_mipsel -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mipsel -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mipsel -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mipsel -#define helper_neon_unzip16 helper_neon_unzip16_mipsel -#define helper_neon_unzip8 helper_neon_unzip8_mipsel -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mipsel -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mipsel -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mipsel -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mipsel -#define helper_neon_widen_s16 helper_neon_widen_s16_mipsel -#define helper_neon_widen_s8 helper_neon_widen_s8_mipsel -#define helper_neon_widen_u16 helper_neon_widen_u16_mipsel -#define helper_neon_widen_u8 helper_neon_widen_u8_mipsel -#define helper_neon_zip16 helper_neon_zip16_mipsel -#define helper_neon_zip8 helper_neon_zip8_mipsel -#define helper_pre_hvc helper_pre_hvc_mipsel -#define helper_pre_smc helper_pre_smc_mipsel -#define helper_qadd16 helper_qadd16_mipsel -#define helper_qadd8 helper_qadd8_mipsel -#define helper_qaddsubx helper_qaddsubx_mipsel -#define helper_qsub16 helper_qsub16_mipsel -#define helper_qsub8 helper_qsub8_mipsel -#define helper_qsubaddx helper_qsubaddx_mipsel -#define helper_rbit helper_rbit_mipsel -#define helper_recpe_f32 helper_recpe_f32_mipsel -#define helper_recpe_f64 helper_recpe_f64_mipsel -#define helper_recpe_u32 helper_recpe_u32_mipsel -#define helper_recps_f32 helper_recps_f32_mipsel -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mipsel -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mipsel -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mipsel -#define helper_ret_stb_mmu helper_ret_stb_mmu_mipsel -#define helper_rintd helper_rintd_mipsel -#define helper_rintd_exact helper_rintd_exact_mipsel -#define helper_rints helper_rints_mipsel -#define helper_rints_exact helper_rints_exact_mipsel -#define helper_ror_cc helper_ror_cc_mipsel -#define helper_rsqrte_f32 helper_rsqrte_f32_mipsel -#define helper_rsqrte_f64 helper_rsqrte_f64_mipsel -#define helper_rsqrte_u32 helper_rsqrte_u32_mipsel -#define helper_rsqrts_f32 helper_rsqrts_f32_mipsel -#define helper_sadd16 helper_sadd16_mipsel -#define helper_sadd8 helper_sadd8_mipsel -#define helper_saddsubx helper_saddsubx_mipsel -#define helper_sar_cc helper_sar_cc_mipsel -#define helper_sdiv helper_sdiv_mipsel -#define helper_sel_flags helper_sel_flags_mipsel -#define helper_set_cp_reg helper_set_cp_reg_mipsel -#define helper_set_cp_reg64 helper_set_cp_reg64_mipsel -#define helper_set_neon_rmode helper_set_neon_rmode_mipsel -#define helper_set_r13_banked helper_set_r13_banked_mipsel -#define helper_set_rmode helper_set_rmode_mipsel -#define helper_set_user_reg helper_set_user_reg_mipsel -#define helper_shadd16 helper_shadd16_mipsel -#define helper_shadd8 helper_shadd8_mipsel -#define helper_shaddsubx helper_shaddsubx_mipsel -#define helper_shl_cc helper_shl_cc_mipsel -#define helper_shr_cc helper_shr_cc_mipsel -#define helper_shsub16 helper_shsub16_mipsel -#define helper_shsub8 helper_shsub8_mipsel -#define helper_shsubaddx helper_shsubaddx_mipsel -#define helper_ssat helper_ssat_mipsel -#define helper_ssat16 helper_ssat16_mipsel -#define helper_ssub16 helper_ssub16_mipsel -#define helper_ssub8 helper_ssub8_mipsel -#define helper_ssubaddx helper_ssubaddx_mipsel -#define helper_stb_mmu helper_stb_mmu_mipsel -#define helper_stl_mmu helper_stl_mmu_mipsel -#define helper_stq_mmu helper_stq_mmu_mipsel -#define helper_stw_mmu helper_stw_mmu_mipsel -#define helper_sub_saturate helper_sub_saturate_mipsel -#define helper_sub_usaturate helper_sub_usaturate_mipsel -#define helper_sxtb16 helper_sxtb16_mipsel -#define helper_uadd16 helper_uadd16_mipsel -#define helper_uadd8 helper_uadd8_mipsel -#define helper_uaddsubx helper_uaddsubx_mipsel -#define helper_udiv helper_udiv_mipsel -#define helper_uhadd16 helper_uhadd16_mipsel -#define helper_uhadd8 helper_uhadd8_mipsel -#define helper_uhaddsubx helper_uhaddsubx_mipsel -#define helper_uhsub16 helper_uhsub16_mipsel -#define helper_uhsub8 helper_uhsub8_mipsel -#define helper_uhsubaddx helper_uhsubaddx_mipsel -#define helper_uqadd16 helper_uqadd16_mipsel -#define helper_uqadd8 helper_uqadd8_mipsel -#define helper_uqaddsubx helper_uqaddsubx_mipsel -#define helper_uqsub16 helper_uqsub16_mipsel -#define helper_uqsub8 helper_uqsub8_mipsel -#define helper_uqsubaddx helper_uqsubaddx_mipsel -#define helper_usad8 helper_usad8_mipsel -#define helper_usat helper_usat_mipsel -#define helper_usat16 helper_usat16_mipsel -#define helper_usub16 helper_usub16_mipsel -#define helper_usub8 helper_usub8_mipsel -#define helper_usubaddx helper_usubaddx_mipsel -#define helper_uxtb16 helper_uxtb16_mipsel -#define helper_v7m_mrs helper_v7m_mrs_mipsel -#define helper_v7m_msr helper_v7m_msr_mipsel -#define helper_vfp_absd helper_vfp_absd_mipsel -#define helper_vfp_abss helper_vfp_abss_mipsel -#define helper_vfp_addd helper_vfp_addd_mipsel -#define helper_vfp_adds helper_vfp_adds_mipsel -#define helper_vfp_cmpd helper_vfp_cmpd_mipsel -#define helper_vfp_cmped helper_vfp_cmped_mipsel -#define helper_vfp_cmpes helper_vfp_cmpes_mipsel -#define helper_vfp_cmps helper_vfp_cmps_mipsel -#define helper_vfp_divd helper_vfp_divd_mipsel -#define helper_vfp_divs helper_vfp_divs_mipsel -#define helper_vfp_fcvtds helper_vfp_fcvtds_mipsel -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mipsel -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mipsel -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mipsel -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mipsel -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mipsel -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mipsel -#define helper_vfp_maxd helper_vfp_maxd_mipsel -#define helper_vfp_maxnumd helper_vfp_maxnumd_mipsel -#define helper_vfp_maxnums helper_vfp_maxnums_mipsel -#define helper_vfp_maxs helper_vfp_maxs_mipsel -#define helper_vfp_mind helper_vfp_mind_mipsel -#define helper_vfp_minnumd helper_vfp_minnumd_mipsel -#define helper_vfp_minnums helper_vfp_minnums_mipsel -#define helper_vfp_mins helper_vfp_mins_mipsel -#define helper_vfp_muladdd helper_vfp_muladdd_mipsel -#define helper_vfp_muladds helper_vfp_muladds_mipsel -#define helper_vfp_muld helper_vfp_muld_mipsel -#define helper_vfp_muls helper_vfp_muls_mipsel -#define helper_vfp_negd helper_vfp_negd_mipsel -#define helper_vfp_negs helper_vfp_negs_mipsel -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mipsel -#define helper_vfp_shtod helper_vfp_shtod_mipsel -#define helper_vfp_shtos helper_vfp_shtos_mipsel -#define helper_vfp_sitod helper_vfp_sitod_mipsel -#define helper_vfp_sitos helper_vfp_sitos_mipsel -#define helper_vfp_sltod helper_vfp_sltod_mipsel -#define helper_vfp_sltos helper_vfp_sltos_mipsel -#define helper_vfp_sqrtd helper_vfp_sqrtd_mipsel -#define helper_vfp_sqrts helper_vfp_sqrts_mipsel -#define helper_vfp_sqtod helper_vfp_sqtod_mipsel -#define helper_vfp_sqtos helper_vfp_sqtos_mipsel -#define helper_vfp_subd helper_vfp_subd_mipsel -#define helper_vfp_subs helper_vfp_subs_mipsel -#define helper_vfp_toshd helper_vfp_toshd_mipsel -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mipsel -#define helper_vfp_toshs helper_vfp_toshs_mipsel -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mipsel -#define helper_vfp_tosid helper_vfp_tosid_mipsel -#define helper_vfp_tosis helper_vfp_tosis_mipsel -#define helper_vfp_tosizd helper_vfp_tosizd_mipsel -#define helper_vfp_tosizs helper_vfp_tosizs_mipsel -#define helper_vfp_tosld helper_vfp_tosld_mipsel -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mipsel -#define helper_vfp_tosls helper_vfp_tosls_mipsel -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mipsel -#define helper_vfp_tosqd helper_vfp_tosqd_mipsel -#define helper_vfp_tosqs helper_vfp_tosqs_mipsel -#define helper_vfp_touhd helper_vfp_touhd_mipsel -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mipsel -#define helper_vfp_touhs helper_vfp_touhs_mipsel -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mipsel -#define helper_vfp_touid helper_vfp_touid_mipsel -#define helper_vfp_touis helper_vfp_touis_mipsel -#define helper_vfp_touizd helper_vfp_touizd_mipsel -#define helper_vfp_touizs helper_vfp_touizs_mipsel -#define helper_vfp_tould helper_vfp_tould_mipsel -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mipsel -#define helper_vfp_touls helper_vfp_touls_mipsel -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mipsel -#define helper_vfp_touqd helper_vfp_touqd_mipsel -#define helper_vfp_touqs helper_vfp_touqs_mipsel -#define helper_vfp_uhtod helper_vfp_uhtod_mipsel -#define helper_vfp_uhtos helper_vfp_uhtos_mipsel -#define helper_vfp_uitod helper_vfp_uitod_mipsel -#define helper_vfp_uitos helper_vfp_uitos_mipsel -#define helper_vfp_ultod helper_vfp_ultod_mipsel -#define helper_vfp_ultos helper_vfp_ultos_mipsel -#define helper_vfp_uqtod helper_vfp_uqtod_mipsel -#define helper_vfp_uqtos helper_vfp_uqtos_mipsel -#define helper_wfe helper_wfe_mipsel -#define helper_wfi helper_wfi_mipsel -#define hex2decimal hex2decimal_mipsel -#define hw_breakpoint_update hw_breakpoint_update_mipsel -#define hw_breakpoint_update_all hw_breakpoint_update_all_mipsel -#define hw_watchpoint_update hw_watchpoint_update_mipsel -#define hw_watchpoint_update_all hw_watchpoint_update_all_mipsel -#define _init _init_mipsel -#define init_cpreg_list init_cpreg_list_mipsel -#define init_lists init_lists_mipsel -#define input_type_enum input_type_enum_mipsel -#define int128_2_64 int128_2_64_mipsel -#define int128_add int128_add_mipsel -#define int128_addto int128_addto_mipsel -#define int128_and int128_and_mipsel -#define int128_eq int128_eq_mipsel -#define int128_ge int128_ge_mipsel -#define int128_get64 int128_get64_mipsel -#define int128_gt int128_gt_mipsel -#define int128_le int128_le_mipsel -#define int128_lt int128_lt_mipsel -#define int128_make64 int128_make64_mipsel -#define int128_max int128_max_mipsel -#define int128_min int128_min_mipsel -#define int128_ne int128_ne_mipsel -#define int128_neg int128_neg_mipsel -#define int128_nz int128_nz_mipsel -#define int128_rshift int128_rshift_mipsel -#define int128_sub int128_sub_mipsel -#define int128_subfrom int128_subfrom_mipsel -#define int128_zero int128_zero_mipsel -#define int16_to_float32 int16_to_float32_mipsel -#define int16_to_float64 int16_to_float64_mipsel -#define int32_to_float128 int32_to_float128_mipsel -#define int32_to_float32 int32_to_float32_mipsel -#define int32_to_float64 int32_to_float64_mipsel -#define int32_to_floatx80 int32_to_floatx80_mipsel -#define int64_to_float128 int64_to_float128_mipsel -#define int64_to_float32 int64_to_float32_mipsel -#define int64_to_float64 int64_to_float64_mipsel -#define int64_to_floatx80 int64_to_floatx80_mipsel -#define invalidate_and_set_dirty invalidate_and_set_dirty_mipsel -#define invalidate_page_bitmap invalidate_page_bitmap_mipsel -#define io_mem_read io_mem_read_mipsel -#define io_mem_write io_mem_write_mipsel -#define io_readb io_readb_mipsel -#define io_readl io_readl_mipsel -#define io_readq io_readq_mipsel -#define io_readw io_readw_mipsel -#define iotlb_to_region iotlb_to_region_mipsel -#define io_writeb io_writeb_mipsel -#define io_writel io_writel_mipsel -#define io_writeq io_writeq_mipsel -#define io_writew io_writew_mipsel -#define is_a64 is_a64_mipsel -#define is_help_option is_help_option_mipsel -#define isr_read isr_read_mipsel -#define is_valid_option_list is_valid_option_list_mipsel -#define iwmmxt_load_creg iwmmxt_load_creg_mipsel -#define iwmmxt_load_reg iwmmxt_load_reg_mipsel -#define iwmmxt_store_creg iwmmxt_store_creg_mipsel -#define iwmmxt_store_reg iwmmxt_store_reg_mipsel -#define __jit_debug_descriptor __jit_debug_descriptor_mipsel -#define __jit_debug_register_code __jit_debug_register_code_mipsel -#define kvm_to_cpreg_id kvm_to_cpreg_id_mipsel -#define last_ram_offset last_ram_offset_mipsel -#define ldl_be_p ldl_be_p_mipsel -#define ldl_be_phys ldl_be_phys_mipsel -#define ldl_he_p ldl_he_p_mipsel -#define ldl_le_p ldl_le_p_mipsel -#define ldl_le_phys ldl_le_phys_mipsel -#define ldl_phys ldl_phys_mipsel -#define ldl_phys_internal ldl_phys_internal_mipsel -#define ldq_be_p ldq_be_p_mipsel -#define ldq_be_phys ldq_be_phys_mipsel -#define ldq_he_p ldq_he_p_mipsel -#define ldq_le_p ldq_le_p_mipsel -#define ldq_le_phys ldq_le_phys_mipsel -#define ldq_phys ldq_phys_mipsel -#define ldq_phys_internal ldq_phys_internal_mipsel -#define ldst_name ldst_name_mipsel -#define ldub_p ldub_p_mipsel -#define ldub_phys ldub_phys_mipsel -#define lduw_be_p lduw_be_p_mipsel -#define lduw_be_phys lduw_be_phys_mipsel -#define lduw_he_p lduw_he_p_mipsel -#define lduw_le_p lduw_le_p_mipsel -#define lduw_le_phys lduw_le_phys_mipsel -#define lduw_phys lduw_phys_mipsel -#define lduw_phys_internal lduw_phys_internal_mipsel -#define le128 le128_mipsel -#define linked_bp_matches linked_bp_matches_mipsel -#define listener_add_address_space listener_add_address_space_mipsel -#define load_cpu_offset load_cpu_offset_mipsel -#define load_reg load_reg_mipsel -#define load_reg_var load_reg_var_mipsel -#define log_cpu_state log_cpu_state_mipsel -#define lpae_cp_reginfo lpae_cp_reginfo_mipsel -#define lt128 lt128_mipsel -#define machine_class_init machine_class_init_mipsel -#define machine_finalize machine_finalize_mipsel -#define machine_info machine_info_mipsel -#define machine_initfn machine_initfn_mipsel -#define machine_register_types machine_register_types_mipsel -#define machvirt_init machvirt_init_mipsel -#define machvirt_machine_init machvirt_machine_init_mipsel -#define maj maj_mipsel -#define mapping_conflict mapping_conflict_mipsel -#define mapping_contiguous mapping_contiguous_mipsel -#define mapping_have_same_region mapping_have_same_region_mipsel -#define mapping_merge mapping_merge_mipsel -#define mem_add mem_add_mipsel -#define mem_begin mem_begin_mipsel -#define mem_commit mem_commit_mipsel -#define memory_access_is_direct memory_access_is_direct_mipsel -#define memory_access_size memory_access_size_mipsel -#define memory_init memory_init_mipsel -#define memory_listener_match memory_listener_match_mipsel -#define memory_listener_register memory_listener_register_mipsel -#define memory_listener_unregister memory_listener_unregister_mipsel -#define memory_map_init memory_map_init_mipsel -#define memory_mapping_filter memory_mapping_filter_mipsel -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mipsel -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mipsel -#define memory_mapping_list_free memory_mapping_list_free_mipsel -#define memory_mapping_list_init memory_mapping_list_init_mipsel -#define memory_region_access_valid memory_region_access_valid_mipsel -#define memory_region_add_subregion memory_region_add_subregion_mipsel -#define memory_region_add_subregion_common memory_region_add_subregion_common_mipsel -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mipsel -#define memory_region_big_endian memory_region_big_endian_mipsel -#define memory_region_clear_pending memory_region_clear_pending_mipsel -#define memory_region_del_subregion memory_region_del_subregion_mipsel -#define memory_region_destructor_alias memory_region_destructor_alias_mipsel -#define memory_region_destructor_none memory_region_destructor_none_mipsel -#define memory_region_destructor_ram memory_region_destructor_ram_mipsel -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mipsel -#define memory_region_dispatch_read memory_region_dispatch_read_mipsel -#define memory_region_dispatch_read1 memory_region_dispatch_read1_mipsel -#define memory_region_dispatch_write memory_region_dispatch_write_mipsel -#define memory_region_escape_name memory_region_escape_name_mipsel -#define memory_region_finalize memory_region_finalize_mipsel -#define memory_region_find memory_region_find_mipsel -#define memory_region_get_addr memory_region_get_addr_mipsel -#define memory_region_get_alignment memory_region_get_alignment_mipsel -#define memory_region_get_container memory_region_get_container_mipsel -#define memory_region_get_fd memory_region_get_fd_mipsel -#define memory_region_get_may_overlap memory_region_get_may_overlap_mipsel -#define memory_region_get_priority memory_region_get_priority_mipsel -#define memory_region_get_ram_addr memory_region_get_ram_addr_mipsel -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mipsel -#define memory_region_get_size memory_region_get_size_mipsel -#define memory_region_info memory_region_info_mipsel -#define memory_region_init memory_region_init_mipsel -#define memory_region_init_alias memory_region_init_alias_mipsel -#define memory_region_initfn memory_region_initfn_mipsel -#define memory_region_init_io memory_region_init_io_mipsel -#define memory_region_init_ram memory_region_init_ram_mipsel -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mipsel -#define memory_region_init_reservation memory_region_init_reservation_mipsel -#define memory_region_is_iommu memory_region_is_iommu_mipsel -#define memory_region_is_logging memory_region_is_logging_mipsel -#define memory_region_is_mapped memory_region_is_mapped_mipsel -#define memory_region_is_ram memory_region_is_ram_mipsel -#define memory_region_is_rom memory_region_is_rom_mipsel -#define memory_region_is_romd memory_region_is_romd_mipsel -#define memory_region_is_skip_dump memory_region_is_skip_dump_mipsel -#define memory_region_is_unassigned memory_region_is_unassigned_mipsel -#define memory_region_name memory_region_name_mipsel -#define memory_region_need_escape memory_region_need_escape_mipsel -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mipsel -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mipsel -#define memory_region_present memory_region_present_mipsel -#define memory_region_read_accessor memory_region_read_accessor_mipsel -#define memory_region_readd_subregion memory_region_readd_subregion_mipsel -#define memory_region_ref memory_region_ref_mipsel -#define memory_region_resolve_container memory_region_resolve_container_mipsel -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mipsel -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mipsel -#define memory_region_set_address memory_region_set_address_mipsel -#define memory_region_set_alias_offset memory_region_set_alias_offset_mipsel -#define memory_region_set_enabled memory_region_set_enabled_mipsel -#define memory_region_set_readonly memory_region_set_readonly_mipsel -#define memory_region_set_skip_dump memory_region_set_skip_dump_mipsel -#define memory_region_size memory_region_size_mipsel -#define memory_region_to_address_space memory_region_to_address_space_mipsel -#define memory_region_transaction_begin memory_region_transaction_begin_mipsel -#define memory_region_transaction_commit memory_region_transaction_commit_mipsel -#define memory_region_unref memory_region_unref_mipsel -#define memory_region_update_container_subregions memory_region_update_container_subregions_mipsel -#define memory_region_write_accessor memory_region_write_accessor_mipsel -#define memory_region_wrong_endianness memory_region_wrong_endianness_mipsel -#define memory_try_enable_merging memory_try_enable_merging_mipsel -#define module_call_init module_call_init_mipsel -#define module_load module_load_mipsel -#define mpidr_cp_reginfo mpidr_cp_reginfo_mipsel -#define mpidr_read mpidr_read_mipsel -#define msr_mask msr_mask_mipsel -#define mul128By64To192 mul128By64To192_mipsel -#define mul128To256 mul128To256_mipsel -#define mul64To128 mul64To128_mipsel -#define muldiv64 muldiv64_mipsel -#define neon_2rm_is_float_op neon_2rm_is_float_op_mipsel -#define neon_2rm_sizes neon_2rm_sizes_mipsel -#define neon_3r_sizes neon_3r_sizes_mipsel -#define neon_get_scalar neon_get_scalar_mipsel -#define neon_load_reg neon_load_reg_mipsel -#define neon_load_reg64 neon_load_reg64_mipsel -#define neon_load_scratch neon_load_scratch_mipsel -#define neon_ls_element_type neon_ls_element_type_mipsel -#define neon_reg_offset neon_reg_offset_mipsel -#define neon_store_reg neon_store_reg_mipsel -#define neon_store_reg64 neon_store_reg64_mipsel -#define neon_store_scratch neon_store_scratch_mipsel -#define new_ldst_label new_ldst_label_mipsel -#define next_list next_list_mipsel -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mipsel -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mipsel -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mipsel -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mipsel -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mipsel -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mipsel -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mipsel -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mipsel -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mipsel -#define not_v6_cp_reginfo not_v6_cp_reginfo_mipsel -#define not_v7_cp_reginfo not_v7_cp_reginfo_mipsel -#define not_v8_cp_reginfo not_v8_cp_reginfo_mipsel -#define object_child_foreach object_child_foreach_mipsel -#define object_class_foreach object_class_foreach_mipsel -#define object_class_foreach_tramp object_class_foreach_tramp_mipsel -#define object_class_get_list object_class_get_list_mipsel -#define object_class_get_list_tramp object_class_get_list_tramp_mipsel -#define object_class_get_parent object_class_get_parent_mipsel -#define object_deinit object_deinit_mipsel -#define object_dynamic_cast object_dynamic_cast_mipsel -#define object_finalize object_finalize_mipsel -#define object_finalize_child_property object_finalize_child_property_mipsel -#define object_get_child_property object_get_child_property_mipsel -#define object_get_link_property object_get_link_property_mipsel -#define object_get_root object_get_root_mipsel -#define object_initialize_with_type object_initialize_with_type_mipsel -#define object_init_with_type object_init_with_type_mipsel -#define object_instance_init object_instance_init_mipsel -#define object_new_with_type object_new_with_type_mipsel -#define object_post_init_with_type object_post_init_with_type_mipsel -#define object_property_add_alias object_property_add_alias_mipsel -#define object_property_add_link object_property_add_link_mipsel -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mipsel -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mipsel -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mipsel -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mipsel -#define object_property_allow_set_link object_property_allow_set_link_mipsel -#define object_property_del object_property_del_mipsel -#define object_property_del_all object_property_del_all_mipsel -#define object_property_find object_property_find_mipsel -#define object_property_get object_property_get_mipsel -#define object_property_get_bool object_property_get_bool_mipsel -#define object_property_get_int object_property_get_int_mipsel -#define object_property_get_link object_property_get_link_mipsel -#define object_property_get_qobject object_property_get_qobject_mipsel -#define object_property_get_str object_property_get_str_mipsel -#define object_property_get_type object_property_get_type_mipsel -#define object_property_is_child object_property_is_child_mipsel -#define object_property_set object_property_set_mipsel -#define object_property_set_description object_property_set_description_mipsel -#define object_property_set_link object_property_set_link_mipsel -#define object_property_set_qobject object_property_set_qobject_mipsel -#define object_release_link_property object_release_link_property_mipsel -#define object_resolve_abs_path object_resolve_abs_path_mipsel -#define object_resolve_child_property object_resolve_child_property_mipsel -#define object_resolve_link object_resolve_link_mipsel -#define object_resolve_link_property object_resolve_link_property_mipsel -#define object_resolve_partial_path object_resolve_partial_path_mipsel -#define object_resolve_path object_resolve_path_mipsel -#define object_resolve_path_component object_resolve_path_component_mipsel -#define object_resolve_path_type object_resolve_path_type_mipsel -#define object_set_link_property object_set_link_property_mipsel -#define object_unparent object_unparent_mipsel -#define omap_cachemaint_write omap_cachemaint_write_mipsel -#define omap_cp_reginfo omap_cp_reginfo_mipsel -#define omap_threadid_write omap_threadid_write_mipsel -#define omap_ticonfig_write omap_ticonfig_write_mipsel -#define omap_wfi_write omap_wfi_write_mipsel -#define op_bits op_bits_mipsel -#define open_modeflags open_modeflags_mipsel -#define op_to_mov op_to_mov_mipsel -#define op_to_movi op_to_movi_mipsel -#define output_type_enum output_type_enum_mipsel -#define packFloat128 packFloat128_mipsel -#define packFloat16 packFloat16_mipsel -#define packFloat32 packFloat32_mipsel -#define packFloat64 packFloat64_mipsel -#define packFloatx80 packFloatx80_mipsel -#define page_find page_find_mipsel -#define page_find_alloc page_find_alloc_mipsel -#define page_flush_tb page_flush_tb_mipsel -#define page_flush_tb_1 page_flush_tb_1_mipsel -#define page_init page_init_mipsel -#define page_size_init page_size_init_mipsel -#define par par_mipsel -#define parse_array parse_array_mipsel -#define parse_error parse_error_mipsel -#define parse_escape parse_escape_mipsel -#define parse_keyword parse_keyword_mipsel -#define parse_literal parse_literal_mipsel -#define parse_object parse_object_mipsel -#define parse_optional parse_optional_mipsel -#define parse_option_bool parse_option_bool_mipsel -#define parse_option_number parse_option_number_mipsel -#define parse_option_size parse_option_size_mipsel -#define parse_pair parse_pair_mipsel -#define parser_context_free parser_context_free_mipsel -#define parser_context_new parser_context_new_mipsel -#define parser_context_peek_token parser_context_peek_token_mipsel -#define parser_context_pop_token parser_context_pop_token_mipsel -#define parser_context_restore parser_context_restore_mipsel -#define parser_context_save parser_context_save_mipsel -#define parse_str parse_str_mipsel -#define parse_type_bool parse_type_bool_mipsel -#define parse_type_int parse_type_int_mipsel -#define parse_type_number parse_type_number_mipsel -#define parse_type_size parse_type_size_mipsel -#define parse_type_str parse_type_str_mipsel -#define parse_value parse_value_mipsel -#define par_write par_write_mipsel -#define patch_reloc patch_reloc_mipsel -#define phys_map_node_alloc phys_map_node_alloc_mipsel -#define phys_map_node_reserve phys_map_node_reserve_mipsel -#define phys_mem_alloc phys_mem_alloc_mipsel -#define phys_mem_set_alloc phys_mem_set_alloc_mipsel -#define phys_page_compact phys_page_compact_mipsel -#define phys_page_compact_all phys_page_compact_all_mipsel -#define phys_page_find phys_page_find_mipsel -#define phys_page_set phys_page_set_mipsel -#define phys_page_set_level phys_page_set_level_mipsel -#define phys_section_add phys_section_add_mipsel -#define phys_section_destroy phys_section_destroy_mipsel -#define phys_sections_free phys_sections_free_mipsel -#define pickNaN pickNaN_mipsel -#define pickNaNMulAdd pickNaNMulAdd_mipsel -#define pmccfiltr_write pmccfiltr_write_mipsel -#define pmccntr_read pmccntr_read_mipsel -#define pmccntr_sync pmccntr_sync_mipsel -#define pmccntr_write pmccntr_write_mipsel -#define pmccntr_write32 pmccntr_write32_mipsel -#define pmcntenclr_write pmcntenclr_write_mipsel -#define pmcntenset_write pmcntenset_write_mipsel -#define pmcr_write pmcr_write_mipsel -#define pmintenclr_write pmintenclr_write_mipsel -#define pmintenset_write pmintenset_write_mipsel -#define pmovsr_write pmovsr_write_mipsel -#define pmreg_access pmreg_access_mipsel -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mipsel -#define pmsav5_data_ap_read pmsav5_data_ap_read_mipsel -#define pmsav5_data_ap_write pmsav5_data_ap_write_mipsel -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mipsel -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mipsel -#define pmuserenr_write pmuserenr_write_mipsel -#define pmxevtyper_write pmxevtyper_write_mipsel -#define print_type_bool print_type_bool_mipsel -#define print_type_int print_type_int_mipsel -#define print_type_number print_type_number_mipsel -#define print_type_size print_type_size_mipsel -#define print_type_str print_type_str_mipsel -#define propagateFloat128NaN propagateFloat128NaN_mipsel -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mipsel -#define propagateFloat32NaN propagateFloat32NaN_mipsel -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mipsel -#define propagateFloat64NaN propagateFloat64NaN_mipsel -#define propagateFloatx80NaN propagateFloatx80NaN_mipsel -#define property_get_alias property_get_alias_mipsel -#define property_get_bool property_get_bool_mipsel -#define property_get_str property_get_str_mipsel -#define property_get_uint16_ptr property_get_uint16_ptr_mipsel -#define property_get_uint32_ptr property_get_uint32_ptr_mipsel -#define property_get_uint64_ptr property_get_uint64_ptr_mipsel -#define property_get_uint8_ptr property_get_uint8_ptr_mipsel -#define property_release_alias property_release_alias_mipsel -#define property_release_bool property_release_bool_mipsel -#define property_release_str property_release_str_mipsel -#define property_resolve_alias property_resolve_alias_mipsel -#define property_set_alias property_set_alias_mipsel -#define property_set_bool property_set_bool_mipsel -#define property_set_str property_set_str_mipsel -#define pstate_read pstate_read_mipsel -#define pstate_write pstate_write_mipsel -#define pxa250_initfn pxa250_initfn_mipsel -#define pxa255_initfn pxa255_initfn_mipsel -#define pxa260_initfn pxa260_initfn_mipsel -#define pxa261_initfn pxa261_initfn_mipsel -#define pxa262_initfn pxa262_initfn_mipsel -#define pxa270a0_initfn pxa270a0_initfn_mipsel -#define pxa270a1_initfn pxa270a1_initfn_mipsel -#define pxa270b0_initfn pxa270b0_initfn_mipsel -#define pxa270b1_initfn pxa270b1_initfn_mipsel -#define pxa270c0_initfn pxa270c0_initfn_mipsel -#define pxa270c5_initfn pxa270c5_initfn_mipsel -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mipsel -#define qapi_dealloc_end_list qapi_dealloc_end_list_mipsel -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mipsel -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mipsel -#define qapi_dealloc_next_list qapi_dealloc_next_list_mipsel -#define qapi_dealloc_pop qapi_dealloc_pop_mipsel -#define qapi_dealloc_push qapi_dealloc_push_mipsel -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mipsel -#define qapi_dealloc_start_list qapi_dealloc_start_list_mipsel -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mipsel -#define qapi_dealloc_start_union qapi_dealloc_start_union_mipsel -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mipsel -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mipsel -#define qapi_dealloc_type_int qapi_dealloc_type_int_mipsel -#define qapi_dealloc_type_number qapi_dealloc_type_number_mipsel -#define qapi_dealloc_type_size qapi_dealloc_type_size_mipsel -#define qapi_dealloc_type_str qapi_dealloc_type_str_mipsel -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mipsel -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mipsel -#define qapi_free_boolList qapi_free_boolList_mipsel -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mipsel -#define qapi_free_int16List qapi_free_int16List_mipsel -#define qapi_free_int32List qapi_free_int32List_mipsel -#define qapi_free_int64List qapi_free_int64List_mipsel -#define qapi_free_int8List qapi_free_int8List_mipsel -#define qapi_free_intList qapi_free_intList_mipsel -#define qapi_free_numberList qapi_free_numberList_mipsel -#define qapi_free_strList qapi_free_strList_mipsel -#define qapi_free_uint16List qapi_free_uint16List_mipsel -#define qapi_free_uint32List qapi_free_uint32List_mipsel -#define qapi_free_uint64List qapi_free_uint64List_mipsel -#define qapi_free_uint8List qapi_free_uint8List_mipsel -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mipsel -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mipsel -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mipsel -#define qbool_destroy_obj qbool_destroy_obj_mipsel -#define qbool_from_int qbool_from_int_mipsel -#define qbool_get_int qbool_get_int_mipsel -#define qbool_type qbool_type_mipsel -#define qbus_create qbus_create_mipsel -#define qbus_create_inplace qbus_create_inplace_mipsel -#define qbus_finalize qbus_finalize_mipsel -#define qbus_initfn qbus_initfn_mipsel -#define qbus_realize qbus_realize_mipsel -#define qdev_create qdev_create_mipsel -#define qdev_get_type qdev_get_type_mipsel -#define qdev_register_types qdev_register_types_mipsel -#define qdev_set_parent_bus qdev_set_parent_bus_mipsel -#define qdev_try_create qdev_try_create_mipsel -#define qdict_add_key qdict_add_key_mipsel -#define qdict_array_split qdict_array_split_mipsel -#define qdict_clone_shallow qdict_clone_shallow_mipsel -#define qdict_del qdict_del_mipsel -#define qdict_destroy_obj qdict_destroy_obj_mipsel -#define qdict_entry_key qdict_entry_key_mipsel -#define qdict_entry_value qdict_entry_value_mipsel -#define qdict_extract_subqdict qdict_extract_subqdict_mipsel -#define qdict_find qdict_find_mipsel -#define qdict_first qdict_first_mipsel -#define qdict_flatten qdict_flatten_mipsel -#define qdict_flatten_qdict qdict_flatten_qdict_mipsel -#define qdict_flatten_qlist qdict_flatten_qlist_mipsel -#define qdict_get qdict_get_mipsel -#define qdict_get_bool qdict_get_bool_mipsel -#define qdict_get_double qdict_get_double_mipsel -#define qdict_get_int qdict_get_int_mipsel -#define qdict_get_obj qdict_get_obj_mipsel -#define qdict_get_qdict qdict_get_qdict_mipsel -#define qdict_get_qlist qdict_get_qlist_mipsel -#define qdict_get_str qdict_get_str_mipsel -#define qdict_get_try_bool qdict_get_try_bool_mipsel -#define qdict_get_try_int qdict_get_try_int_mipsel -#define qdict_get_try_str qdict_get_try_str_mipsel -#define qdict_haskey qdict_haskey_mipsel -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mipsel -#define qdict_iter qdict_iter_mipsel -#define qdict_join qdict_join_mipsel -#define qdict_new qdict_new_mipsel -#define qdict_next qdict_next_mipsel -#define qdict_next_entry qdict_next_entry_mipsel -#define qdict_put_obj qdict_put_obj_mipsel -#define qdict_size qdict_size_mipsel -#define qdict_type qdict_type_mipsel -#define qemu_clock_get_us qemu_clock_get_us_mipsel -#define qemu_clock_ptr qemu_clock_ptr_mipsel -#define qemu_clocks qemu_clocks_mipsel -#define qemu_get_cpu qemu_get_cpu_mipsel -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mipsel -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mipsel -#define qemu_get_ram_block qemu_get_ram_block_mipsel -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mipsel -#define qemu_get_ram_fd qemu_get_ram_fd_mipsel -#define qemu_get_ram_ptr qemu_get_ram_ptr_mipsel -#define qemu_host_page_mask qemu_host_page_mask_mipsel -#define qemu_host_page_size qemu_host_page_size_mipsel -#define qemu_init_vcpu qemu_init_vcpu_mipsel -#define qemu_ld_helpers qemu_ld_helpers_mipsel -#define qemu_log_close qemu_log_close_mipsel -#define qemu_log_enabled qemu_log_enabled_mipsel -#define qemu_log_flush qemu_log_flush_mipsel -#define qemu_loglevel_mask qemu_loglevel_mask_mipsel -#define qemu_log_vprintf qemu_log_vprintf_mipsel -#define qemu_oom_check qemu_oom_check_mipsel -#define qemu_parse_fd qemu_parse_fd_mipsel -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mipsel -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mipsel -#define qemu_ram_alloc qemu_ram_alloc_mipsel -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mipsel -#define qemu_ram_foreach_block qemu_ram_foreach_block_mipsel -#define qemu_ram_free qemu_ram_free_mipsel -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mipsel -#define qemu_ram_ptr_length qemu_ram_ptr_length_mipsel -#define qemu_ram_remap qemu_ram_remap_mipsel -#define qemu_ram_setup_dump qemu_ram_setup_dump_mipsel -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mipsel -#define qemu_real_host_page_size qemu_real_host_page_size_mipsel -#define qemu_st_helpers qemu_st_helpers_mipsel -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mipsel -#define qemu_try_memalign qemu_try_memalign_mipsel -#define qentry_destroy qentry_destroy_mipsel -#define qerror_human qerror_human_mipsel -#define qerror_report qerror_report_mipsel -#define qerror_report_err qerror_report_err_mipsel -#define qfloat_destroy_obj qfloat_destroy_obj_mipsel -#define qfloat_from_double qfloat_from_double_mipsel -#define qfloat_get_double qfloat_get_double_mipsel -#define qfloat_type qfloat_type_mipsel -#define qint_destroy_obj qint_destroy_obj_mipsel -#define qint_from_int qint_from_int_mipsel -#define qint_get_int qint_get_int_mipsel -#define qint_type qint_type_mipsel -#define qlist_append_obj qlist_append_obj_mipsel -#define qlist_copy qlist_copy_mipsel -#define qlist_copy_elem qlist_copy_elem_mipsel -#define qlist_destroy_obj qlist_destroy_obj_mipsel -#define qlist_empty qlist_empty_mipsel -#define qlist_entry_obj qlist_entry_obj_mipsel -#define qlist_first qlist_first_mipsel -#define qlist_iter qlist_iter_mipsel -#define qlist_new qlist_new_mipsel -#define qlist_next qlist_next_mipsel -#define qlist_peek qlist_peek_mipsel -#define qlist_pop qlist_pop_mipsel -#define qlist_size qlist_size_mipsel -#define qlist_size_iter qlist_size_iter_mipsel -#define qlist_type qlist_type_mipsel -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mipsel -#define qmp_input_end_list qmp_input_end_list_mipsel -#define qmp_input_end_struct qmp_input_end_struct_mipsel -#define qmp_input_get_next_type qmp_input_get_next_type_mipsel -#define qmp_input_get_object qmp_input_get_object_mipsel -#define qmp_input_get_visitor qmp_input_get_visitor_mipsel -#define qmp_input_next_list qmp_input_next_list_mipsel -#define qmp_input_optional qmp_input_optional_mipsel -#define qmp_input_pop qmp_input_pop_mipsel -#define qmp_input_push qmp_input_push_mipsel -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mipsel -#define qmp_input_start_list qmp_input_start_list_mipsel -#define qmp_input_start_struct qmp_input_start_struct_mipsel -#define qmp_input_type_bool qmp_input_type_bool_mipsel -#define qmp_input_type_int qmp_input_type_int_mipsel -#define qmp_input_type_number qmp_input_type_number_mipsel -#define qmp_input_type_str qmp_input_type_str_mipsel -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mipsel -#define qmp_input_visitor_new qmp_input_visitor_new_mipsel -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mipsel -#define qmp_output_add_obj qmp_output_add_obj_mipsel -#define qmp_output_end_list qmp_output_end_list_mipsel -#define qmp_output_end_struct qmp_output_end_struct_mipsel -#define qmp_output_first qmp_output_first_mipsel -#define qmp_output_get_qobject qmp_output_get_qobject_mipsel -#define qmp_output_get_visitor qmp_output_get_visitor_mipsel -#define qmp_output_last qmp_output_last_mipsel -#define qmp_output_next_list qmp_output_next_list_mipsel -#define qmp_output_pop qmp_output_pop_mipsel -#define qmp_output_push_obj qmp_output_push_obj_mipsel -#define qmp_output_start_list qmp_output_start_list_mipsel -#define qmp_output_start_struct qmp_output_start_struct_mipsel -#define qmp_output_type_bool qmp_output_type_bool_mipsel -#define qmp_output_type_int qmp_output_type_int_mipsel -#define qmp_output_type_number qmp_output_type_number_mipsel -#define qmp_output_type_str qmp_output_type_str_mipsel -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mipsel -#define qmp_output_visitor_new qmp_output_visitor_new_mipsel -#define qobject_decref qobject_decref_mipsel -#define qobject_to_qbool qobject_to_qbool_mipsel -#define qobject_to_qdict qobject_to_qdict_mipsel -#define qobject_to_qfloat qobject_to_qfloat_mipsel -#define qobject_to_qint qobject_to_qint_mipsel -#define qobject_to_qlist qobject_to_qlist_mipsel -#define qobject_to_qstring qobject_to_qstring_mipsel -#define qobject_type qobject_type_mipsel -#define qstring_append qstring_append_mipsel -#define qstring_append_chr qstring_append_chr_mipsel -#define qstring_append_int qstring_append_int_mipsel -#define qstring_destroy_obj qstring_destroy_obj_mipsel -#define qstring_from_escaped_str qstring_from_escaped_str_mipsel -#define qstring_from_str qstring_from_str_mipsel -#define qstring_from_substr qstring_from_substr_mipsel -#define qstring_get_length qstring_get_length_mipsel -#define qstring_get_str qstring_get_str_mipsel -#define qstring_new qstring_new_mipsel -#define qstring_type qstring_type_mipsel -#define ram_block_add ram_block_add_mipsel -#define ram_size ram_size_mipsel -#define range_compare range_compare_mipsel -#define range_covers_byte range_covers_byte_mipsel -#define range_get_last range_get_last_mipsel -#define range_merge range_merge_mipsel -#define ranges_can_merge ranges_can_merge_mipsel -#define raw_read raw_read_mipsel -#define raw_write raw_write_mipsel -#define rcon rcon_mipsel -#define read_raw_cp_reg read_raw_cp_reg_mipsel -#define recip_estimate recip_estimate_mipsel -#define recip_sqrt_estimate recip_sqrt_estimate_mipsel -#define register_cp_regs_for_features register_cp_regs_for_features_mipsel -#define register_multipage register_multipage_mipsel -#define register_subpage register_subpage_mipsel -#define register_tm_clones register_tm_clones_mipsel -#define register_types_object register_types_object_mipsel -#define regnames regnames_mipsel -#define render_memory_region render_memory_region_mipsel -#define reset_all_temps reset_all_temps_mipsel -#define reset_temp reset_temp_mipsel -#define rol32 rol32_mipsel -#define rol64 rol64_mipsel -#define ror32 ror32_mipsel -#define ror64 ror64_mipsel -#define roundAndPackFloat128 roundAndPackFloat128_mipsel -#define roundAndPackFloat16 roundAndPackFloat16_mipsel -#define roundAndPackFloat32 roundAndPackFloat32_mipsel -#define roundAndPackFloat64 roundAndPackFloat64_mipsel -#define roundAndPackFloatx80 roundAndPackFloatx80_mipsel -#define roundAndPackInt32 roundAndPackInt32_mipsel -#define roundAndPackInt64 roundAndPackInt64_mipsel -#define roundAndPackUint64 roundAndPackUint64_mipsel -#define round_to_inf round_to_inf_mipsel -#define run_on_cpu run_on_cpu_mipsel -#define s0 s0_mipsel -#define S0 S0_mipsel -#define s1 s1_mipsel -#define S1 S1_mipsel -#define sa1100_initfn sa1100_initfn_mipsel -#define sa1110_initfn sa1110_initfn_mipsel -#define save_globals save_globals_mipsel -#define scr_write scr_write_mipsel -#define sctlr_write sctlr_write_mipsel -#define set_bit set_bit_mipsel -#define set_bits set_bits_mipsel -#define set_default_nan_mode set_default_nan_mode_mipsel -#define set_feature set_feature_mipsel -#define set_float_detect_tininess set_float_detect_tininess_mipsel -#define set_float_exception_flags set_float_exception_flags_mipsel -#define set_float_rounding_mode set_float_rounding_mode_mipsel -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mipsel -#define set_flush_to_zero set_flush_to_zero_mipsel -#define set_swi_errno set_swi_errno_mipsel -#define sextract32 sextract32_mipsel -#define sextract64 sextract64_mipsel -#define shift128ExtraRightJamming shift128ExtraRightJamming_mipsel -#define shift128Right shift128Right_mipsel -#define shift128RightJamming shift128RightJamming_mipsel -#define shift32RightJamming shift32RightJamming_mipsel -#define shift64ExtraRightJamming shift64ExtraRightJamming_mipsel -#define shift64RightJamming shift64RightJamming_mipsel -#define shifter_out_im shifter_out_im_mipsel -#define shortShift128Left shortShift128Left_mipsel -#define shortShift192Left shortShift192Left_mipsel -#define simple_mpu_ap_bits simple_mpu_ap_bits_mipsel -#define size_code_gen_buffer size_code_gen_buffer_mipsel -#define softmmu_lock_user softmmu_lock_user_mipsel -#define softmmu_lock_user_string softmmu_lock_user_string_mipsel -#define softmmu_tget32 softmmu_tget32_mipsel -#define softmmu_tget8 softmmu_tget8_mipsel -#define softmmu_tput32 softmmu_tput32_mipsel -#define softmmu_unlock_user softmmu_unlock_user_mipsel -#define sort_constraints sort_constraints_mipsel -#define sp_el0_access sp_el0_access_mipsel -#define spsel_read spsel_read_mipsel -#define spsel_write spsel_write_mipsel -#define start_list start_list_mipsel -#define stb_p stb_p_mipsel -#define stb_phys stb_phys_mipsel -#define stl_be_p stl_be_p_mipsel -#define stl_be_phys stl_be_phys_mipsel -#define stl_he_p stl_he_p_mipsel -#define stl_le_p stl_le_p_mipsel -#define stl_le_phys stl_le_phys_mipsel -#define stl_phys stl_phys_mipsel -#define stl_phys_internal stl_phys_internal_mipsel -#define stl_phys_notdirty stl_phys_notdirty_mipsel -#define store_cpu_offset store_cpu_offset_mipsel -#define store_reg store_reg_mipsel -#define store_reg_bx store_reg_bx_mipsel -#define store_reg_from_load store_reg_from_load_mipsel -#define stq_be_p stq_be_p_mipsel -#define stq_be_phys stq_be_phys_mipsel -#define stq_he_p stq_he_p_mipsel -#define stq_le_p stq_le_p_mipsel -#define stq_le_phys stq_le_phys_mipsel -#define stq_phys stq_phys_mipsel -#define string_input_get_visitor string_input_get_visitor_mipsel -#define string_input_visitor_cleanup string_input_visitor_cleanup_mipsel -#define string_input_visitor_new string_input_visitor_new_mipsel -#define strongarm_cp_reginfo strongarm_cp_reginfo_mipsel -#define strstart strstart_mipsel -#define strtosz strtosz_mipsel -#define strtosz_suffix strtosz_suffix_mipsel -#define stw_be_p stw_be_p_mipsel -#define stw_be_phys stw_be_phys_mipsel -#define stw_he_p stw_he_p_mipsel -#define stw_le_p stw_le_p_mipsel -#define stw_le_phys stw_le_phys_mipsel -#define stw_phys stw_phys_mipsel -#define stw_phys_internal stw_phys_internal_mipsel -#define sub128 sub128_mipsel -#define sub16_sat sub16_sat_mipsel -#define sub16_usat sub16_usat_mipsel -#define sub192 sub192_mipsel -#define sub8_sat sub8_sat_mipsel -#define sub8_usat sub8_usat_mipsel -#define subFloat128Sigs subFloat128Sigs_mipsel -#define subFloat32Sigs subFloat32Sigs_mipsel -#define subFloat64Sigs subFloat64Sigs_mipsel -#define subFloatx80Sigs subFloatx80Sigs_mipsel -#define subpage_accepts subpage_accepts_mipsel -#define subpage_init subpage_init_mipsel -#define subpage_ops subpage_ops_mipsel -#define subpage_read subpage_read_mipsel -#define subpage_register subpage_register_mipsel -#define subpage_write subpage_write_mipsel -#define suffix_mul suffix_mul_mipsel -#define swap_commutative swap_commutative_mipsel -#define swap_commutative2 swap_commutative2_mipsel -#define switch_mode switch_mode_mipsel -#define switch_v7m_sp switch_v7m_sp_mipsel -#define syn_aa32_bkpt syn_aa32_bkpt_mipsel -#define syn_aa32_hvc syn_aa32_hvc_mipsel -#define syn_aa32_smc syn_aa32_smc_mipsel -#define syn_aa32_svc syn_aa32_svc_mipsel -#define syn_breakpoint syn_breakpoint_mipsel -#define sync_globals sync_globals_mipsel -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mipsel -#define syn_cp14_rt_trap syn_cp14_rt_trap_mipsel -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mipsel -#define syn_cp15_rt_trap syn_cp15_rt_trap_mipsel -#define syn_data_abort syn_data_abort_mipsel -#define syn_fp_access_trap syn_fp_access_trap_mipsel -#define syn_insn_abort syn_insn_abort_mipsel -#define syn_swstep syn_swstep_mipsel -#define syn_uncategorized syn_uncategorized_mipsel -#define syn_watchpoint syn_watchpoint_mipsel -#define syscall_err syscall_err_mipsel -#define system_bus_class_init system_bus_class_init_mipsel -#define system_bus_info system_bus_info_mipsel -#define t2ee_cp_reginfo t2ee_cp_reginfo_mipsel -#define table_logic_cc table_logic_cc_mipsel -#define target_parse_constraint target_parse_constraint_mipsel -#define target_words_bigendian target_words_bigendian_mipsel -#define tb_add_jump tb_add_jump_mipsel -#define tb_alloc tb_alloc_mipsel -#define tb_alloc_page tb_alloc_page_mipsel -#define tb_check_watchpoint tb_check_watchpoint_mipsel -#define tb_find_fast tb_find_fast_mipsel -#define tb_find_pc tb_find_pc_mipsel -#define tb_find_slow tb_find_slow_mipsel -#define tb_flush tb_flush_mipsel -#define tb_flush_jmp_cache tb_flush_jmp_cache_mipsel -#define tb_free tb_free_mipsel -#define tb_gen_code tb_gen_code_mipsel -#define tb_hash_remove tb_hash_remove_mipsel -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mipsel -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mipsel -#define tb_invalidate_phys_range tb_invalidate_phys_range_mipsel -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mipsel -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mipsel -#define tb_jmp_remove tb_jmp_remove_mipsel -#define tb_link_page tb_link_page_mipsel -#define tb_page_remove tb_page_remove_mipsel -#define tb_phys_hash_func tb_phys_hash_func_mipsel -#define tb_phys_invalidate tb_phys_invalidate_mipsel -#define tb_reset_jump tb_reset_jump_mipsel -#define tb_set_jmp_target tb_set_jmp_target_mipsel -#define tcg_accel_class_init tcg_accel_class_init_mipsel -#define tcg_accel_type tcg_accel_type_mipsel -#define tcg_add_param_i32 tcg_add_param_i32_mipsel -#define tcg_add_param_i64 tcg_add_param_i64_mipsel -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mipsel -#define tcg_allowed tcg_allowed_mipsel -#define tcg_canonicalize_memop tcg_canonicalize_memop_mipsel -#define tcg_commit tcg_commit_mipsel -#define tcg_cond_to_jcc tcg_cond_to_jcc_mipsel -#define tcg_constant_folding tcg_constant_folding_mipsel +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_mipsel +#define tcg_expand_vec_op tcg_expand_vec_op_mipsel +#define tcg_register_jit tcg_register_jit_mipsel +#define tcg_tb_insert tcg_tb_insert_mipsel +#define tcg_tb_remove tcg_tb_remove_mipsel +#define tcg_tb_lookup tcg_tb_lookup_mipsel +#define tcg_tb_foreach tcg_tb_foreach_mipsel +#define tcg_nb_tbs tcg_nb_tbs_mipsel +#define tcg_region_reset_all tcg_region_reset_all_mipsel +#define tcg_region_init tcg_region_init_mipsel +#define tcg_code_size tcg_code_size_mipsel +#define tcg_code_capacity tcg_code_capacity_mipsel +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_mipsel +#define tcg_malloc_internal tcg_malloc_internal_mipsel +#define tcg_pool_reset tcg_pool_reset_mipsel +#define tcg_context_init tcg_context_init_mipsel +#define tcg_tb_alloc tcg_tb_alloc_mipsel +#define tcg_prologue_init tcg_prologue_init_mipsel +#define tcg_func_start tcg_func_start_mipsel +#define tcg_set_frame tcg_set_frame_mipsel +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mipsel +#define tcg_temp_new_internal tcg_temp_new_internal_mipsel +#define tcg_temp_new_vec tcg_temp_new_vec_mipsel +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_mipsel +#define tcg_temp_free_internal tcg_temp_free_internal_mipsel #define tcg_const_i32 tcg_const_i32_mipsel #define tcg_const_i64 tcg_const_i64_mipsel #define tcg_const_local_i32 tcg_const_local_i32_mipsel #define tcg_const_local_i64 tcg_const_local_i64_mipsel -#define tcg_context_init tcg_context_init_mipsel -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mipsel -#define tcg_cpu_exec tcg_cpu_exec_mipsel -#define tcg_current_code_size tcg_current_code_size_mipsel -#define tcg_dump_info tcg_dump_info_mipsel -#define tcg_dump_ops tcg_dump_ops_mipsel -#define tcg_exec_all tcg_exec_all_mipsel -#define tcg_find_helper tcg_find_helper_mipsel -#define tcg_func_start tcg_func_start_mipsel -#define tcg_gen_abs_i32 tcg_gen_abs_i32_mipsel -#define tcg_gen_add2_i32 tcg_gen_add2_i32_mipsel -#define tcg_gen_add_i32 tcg_gen_add_i32_mipsel -#define tcg_gen_add_i64 tcg_gen_add_i64_mipsel -#define tcg_gen_addi_i32 tcg_gen_addi_i32_mipsel -#define tcg_gen_addi_i64 tcg_gen_addi_i64_mipsel -#define tcg_gen_andc_i32 tcg_gen_andc_i32_mipsel -#define tcg_gen_and_i32 tcg_gen_and_i32_mipsel -#define tcg_gen_and_i64 tcg_gen_and_i64_mipsel -#define tcg_gen_andi_i32 tcg_gen_andi_i32_mipsel -#define tcg_gen_andi_i64 tcg_gen_andi_i64_mipsel -#define tcg_gen_br tcg_gen_br_mipsel -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mipsel -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mipsel -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mipsel -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mipsel -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mipsel +#define tcg_op_supported tcg_op_supported_mipsel #define tcg_gen_callN tcg_gen_callN_mipsel +#define tcg_op_remove tcg_op_remove_mipsel +#define tcg_emit_op tcg_emit_op_mipsel +#define tcg_op_insert_before tcg_op_insert_before_mipsel +#define tcg_op_insert_after tcg_op_insert_after_mipsel +#define tcg_cpu_exec_time tcg_cpu_exec_time_mipsel #define tcg_gen_code tcg_gen_code_mipsel -#define tcg_gen_code_common tcg_gen_code_common_mipsel -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mipsel -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mipsel -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mipsel -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mipsel -#define tcg_gen_exit_tb tcg_gen_exit_tb_mipsel -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mipsel -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mipsel -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mipsel -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mipsel -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mipsel -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mipsel -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mipsel -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mipsel -#define tcg_gen_goto_tb tcg_gen_goto_tb_mipsel -#define tcg_gen_ld_i32 tcg_gen_ld_i32_mipsel -#define tcg_gen_ld_i64 tcg_gen_ld_i64_mipsel -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mipsel -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mipsel -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mipsel -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mipsel -#define tcg_gen_mov_i32 tcg_gen_mov_i32_mipsel -#define tcg_gen_mov_i64 tcg_gen_mov_i64_mipsel -#define tcg_gen_movi_i32 tcg_gen_movi_i32_mipsel -#define tcg_gen_movi_i64 tcg_gen_movi_i64_mipsel -#define tcg_gen_mul_i32 tcg_gen_mul_i32_mipsel -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mipsel -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mipsel -#define tcg_gen_neg_i32 tcg_gen_neg_i32_mipsel -#define tcg_gen_neg_i64 tcg_gen_neg_i64_mipsel -#define tcg_gen_not_i32 tcg_gen_not_i32_mipsel -#define tcg_gen_op0 tcg_gen_op0_mipsel -#define tcg_gen_op1i tcg_gen_op1i_mipsel -#define tcg_gen_op2_i32 tcg_gen_op2_i32_mipsel -#define tcg_gen_op2_i64 tcg_gen_op2_i64_mipsel -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mipsel -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mipsel -#define tcg_gen_op3_i32 tcg_gen_op3_i32_mipsel -#define tcg_gen_op3_i64 tcg_gen_op3_i64_mipsel -#define tcg_gen_op4_i32 tcg_gen_op4_i32_mipsel -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mipsel -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mipsel -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mipsel -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mipsel -#define tcg_gen_op6_i32 tcg_gen_op6_i32_mipsel -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mipsel -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mipsel -#define tcg_gen_orc_i32 tcg_gen_orc_i32_mipsel -#define tcg_gen_or_i32 tcg_gen_or_i32_mipsel -#define tcg_gen_or_i64 tcg_gen_or_i64_mipsel +#define tcg_gen_op1 tcg_gen_op1_mipsel +#define tcg_gen_op2 tcg_gen_op2_mipsel +#define tcg_gen_op3 tcg_gen_op3_mipsel +#define tcg_gen_op4 tcg_gen_op4_mipsel +#define tcg_gen_op5 tcg_gen_op5_mipsel +#define tcg_gen_op6 tcg_gen_op6_mipsel +#define tcg_gen_mb tcg_gen_mb_mipsel +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mipsel +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_mipsel +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mipsel +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mipsel #define tcg_gen_ori_i32 tcg_gen_ori_i32_mipsel -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mipsel -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mipsel -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mipsel -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mipsel +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mipsel +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mipsel +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mipsel +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mipsel +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mipsel +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mipsel +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mipsel +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_mipsel +#define tcg_gen_muli_i32 tcg_gen_muli_i32_mipsel +#define tcg_gen_div_i32 tcg_gen_div_i32_mipsel +#define tcg_gen_rem_i32 tcg_gen_rem_i32_mipsel +#define tcg_gen_divu_i32 tcg_gen_divu_i32_mipsel +#define tcg_gen_remu_i32 tcg_gen_remu_i32_mipsel +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mipsel +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_mipsel +#define tcg_gen_nand_i32 tcg_gen_nand_i32_mipsel +#define tcg_gen_nor_i32 tcg_gen_nor_i32_mipsel +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mipsel +#define tcg_gen_clz_i32 tcg_gen_clz_i32_mipsel +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_mipsel +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_mipsel +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_mipsel +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_mipsel +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_mipsel #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mipsel #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mipsel #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mipsel #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mipsel -#define tcg_gen_sar_i32 tcg_gen_sar_i32_mipsel -#define tcg_gen_sari_i32 tcg_gen_sari_i32_mipsel -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mipsel -#define tcg_gen_shl_i32 tcg_gen_shl_i32_mipsel -#define tcg_gen_shl_i64 tcg_gen_shl_i64_mipsel -#define tcg_gen_shli_i32 tcg_gen_shli_i32_mipsel +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mipsel +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_mipsel +#define tcg_gen_extract_i32 tcg_gen_extract_i32_mipsel +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_mipsel +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_mipsel +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mipsel +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mipsel +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_mipsel +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mipsel +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mipsel +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_mipsel +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mipsel +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mipsel +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mipsel +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mipsel +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mipsel +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mipsel +#define tcg_gen_smin_i32 tcg_gen_smin_i32_mipsel +#define tcg_gen_umin_i32 tcg_gen_umin_i32_mipsel +#define tcg_gen_smax_i32 tcg_gen_smax_i32_mipsel +#define tcg_gen_umax_i32 tcg_gen_umax_i32_mipsel +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mipsel +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mipsel +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_mipsel +#define tcg_gen_subi_i64 tcg_gen_subi_i64_mipsel +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mipsel +#define tcg_gen_ori_i64 tcg_gen_ori_i64_mipsel +#define tcg_gen_xori_i64 tcg_gen_xori_i64_mipsel #define tcg_gen_shli_i64 tcg_gen_shli_i64_mipsel -#define tcg_gen_shr_i32 tcg_gen_shr_i32_mipsel -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mipsel -#define tcg_gen_shr_i64 tcg_gen_shr_i64_mipsel -#define tcg_gen_shri_i32 tcg_gen_shri_i32_mipsel #define tcg_gen_shri_i64 tcg_gen_shri_i64_mipsel -#define tcg_gen_st_i32 tcg_gen_st_i32_mipsel -#define tcg_gen_st_i64 tcg_gen_st_i64_mipsel -#define tcg_gen_sub_i32 tcg_gen_sub_i32_mipsel -#define tcg_gen_sub_i64 tcg_gen_sub_i64_mipsel -#define tcg_gen_subi_i32 tcg_gen_subi_i32_mipsel -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mipsel -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mipsel -#define tcg_gen_xor_i32 tcg_gen_xor_i32_mipsel -#define tcg_gen_xor_i64 tcg_gen_xor_i64_mipsel -#define tcg_gen_xori_i32 tcg_gen_xori_i32_mipsel -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mipsel -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mipsel -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mipsel -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mipsel -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mipsel -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mipsel -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mipsel -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mipsel -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mipsel -#define tcg_handle_interrupt tcg_handle_interrupt_mipsel -#define tcg_init tcg_init_mipsel -#define tcg_invert_cond tcg_invert_cond_mipsel -#define tcg_la_bb_end tcg_la_bb_end_mipsel -#define tcg_la_br_end tcg_la_br_end_mipsel -#define tcg_la_func_end tcg_la_func_end_mipsel -#define tcg_liveness_analysis tcg_liveness_analysis_mipsel -#define tcg_malloc tcg_malloc_mipsel -#define tcg_malloc_internal tcg_malloc_internal_mipsel -#define tcg_op_defs_org tcg_op_defs_org_mipsel -#define tcg_opt_gen_mov tcg_opt_gen_mov_mipsel -#define tcg_opt_gen_movi tcg_opt_gen_movi_mipsel -#define tcg_optimize tcg_optimize_mipsel -#define tcg_out16 tcg_out16_mipsel -#define tcg_out32 tcg_out32_mipsel -#define tcg_out64 tcg_out64_mipsel -#define tcg_out8 tcg_out8_mipsel -#define tcg_out_addi tcg_out_addi_mipsel -#define tcg_out_branch tcg_out_branch_mipsel -#define tcg_out_brcond32 tcg_out_brcond32_mipsel -#define tcg_out_brcond64 tcg_out_brcond64_mipsel -#define tcg_out_bswap32 tcg_out_bswap32_mipsel -#define tcg_out_bswap64 tcg_out_bswap64_mipsel -#define tcg_out_call tcg_out_call_mipsel -#define tcg_out_cmp tcg_out_cmp_mipsel -#define tcg_out_ext16s tcg_out_ext16s_mipsel -#define tcg_out_ext16u tcg_out_ext16u_mipsel -#define tcg_out_ext32s tcg_out_ext32s_mipsel -#define tcg_out_ext32u tcg_out_ext32u_mipsel -#define tcg_out_ext8s tcg_out_ext8s_mipsel -#define tcg_out_ext8u tcg_out_ext8u_mipsel -#define tcg_out_jmp tcg_out_jmp_mipsel -#define tcg_out_jxx tcg_out_jxx_mipsel -#define tcg_out_label tcg_out_label_mipsel -#define tcg_out_ld tcg_out_ld_mipsel -#define tcg_out_modrm tcg_out_modrm_mipsel -#define tcg_out_modrm_offset tcg_out_modrm_offset_mipsel -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mipsel -#define tcg_out_mov tcg_out_mov_mipsel -#define tcg_out_movcond32 tcg_out_movcond32_mipsel -#define tcg_out_movcond64 tcg_out_movcond64_mipsel -#define tcg_out_movi tcg_out_movi_mipsel -#define tcg_out_op tcg_out_op_mipsel -#define tcg_out_pop tcg_out_pop_mipsel -#define tcg_out_push tcg_out_push_mipsel -#define tcg_out_qemu_ld tcg_out_qemu_ld_mipsel -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mipsel -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mipsel -#define tcg_out_qemu_st tcg_out_qemu_st_mipsel -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mipsel -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mipsel -#define tcg_out_reloc tcg_out_reloc_mipsel -#define tcg_out_rolw_8 tcg_out_rolw_8_mipsel -#define tcg_out_setcond32 tcg_out_setcond32_mipsel -#define tcg_out_setcond64 tcg_out_setcond64_mipsel -#define tcg_out_shifti tcg_out_shifti_mipsel -#define tcg_out_st tcg_out_st_mipsel -#define tcg_out_tb_finalize tcg_out_tb_finalize_mipsel -#define tcg_out_tb_init tcg_out_tb_init_mipsel -#define tcg_out_tlb_load tcg_out_tlb_load_mipsel -#define tcg_out_vex_modrm tcg_out_vex_modrm_mipsel -#define tcg_patch32 tcg_patch32_mipsel -#define tcg_patch8 tcg_patch8_mipsel -#define tcg_pcrel_diff tcg_pcrel_diff_mipsel -#define tcg_pool_reset tcg_pool_reset_mipsel -#define tcg_prologue_init tcg_prologue_init_mipsel -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mipsel -#define tcg_reg_alloc tcg_reg_alloc_mipsel -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mipsel -#define tcg_reg_alloc_call tcg_reg_alloc_call_mipsel -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mipsel -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mipsel -#define tcg_reg_alloc_op tcg_reg_alloc_op_mipsel -#define tcg_reg_alloc_start tcg_reg_alloc_start_mipsel -#define tcg_reg_free tcg_reg_free_mipsel -#define tcg_reg_sync tcg_reg_sync_mipsel -#define tcg_set_frame tcg_set_frame_mipsel -#define tcg_set_nop tcg_set_nop_mipsel -#define tcg_swap_cond tcg_swap_cond_mipsel -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mipsel -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mipsel -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mipsel -#define tcg_target_const_match tcg_target_const_match_mipsel -#define tcg_target_init tcg_target_init_mipsel -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mipsel -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mipsel -#define tcg_temp_alloc tcg_temp_alloc_mipsel -#define tcg_temp_free_i32 tcg_temp_free_i32_mipsel -#define tcg_temp_free_i64 tcg_temp_free_i64_mipsel -#define tcg_temp_free_internal tcg_temp_free_internal_mipsel -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mipsel -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mipsel -#define tcg_temp_new_i32 tcg_temp_new_i32_mipsel -#define tcg_temp_new_i64 tcg_temp_new_i64_mipsel -#define tcg_temp_new_internal tcg_temp_new_internal_mipsel -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mipsel -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mipsel -#define tdb_hash tdb_hash_mipsel -#define teecr_write teecr_write_mipsel -#define teehbr_access teehbr_access_mipsel -#define temp_allocate_frame temp_allocate_frame_mipsel -#define temp_dead temp_dead_mipsel -#define temps_are_copies temps_are_copies_mipsel -#define temp_save temp_save_mipsel -#define temp_sync temp_sync_mipsel -#define tgen_arithi tgen_arithi_mipsel -#define tgen_arithr tgen_arithr_mipsel -#define thumb2_logic_op thumb2_logic_op_mipsel -#define ti925t_initfn ti925t_initfn_mipsel -#define tlb_add_large_page tlb_add_large_page_mipsel -#define tlb_flush_entry tlb_flush_entry_mipsel -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mipsel -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mipsel -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mipsel -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mipsel -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mipsel -#define tlbi_aa64_va_write tlbi_aa64_va_write_mipsel -#define tlbiall_is_write tlbiall_is_write_mipsel -#define tlbiall_write tlbiall_write_mipsel -#define tlbiasid_is_write tlbiasid_is_write_mipsel -#define tlbiasid_write tlbiasid_write_mipsel -#define tlbimvaa_is_write tlbimvaa_is_write_mipsel -#define tlbimvaa_write tlbimvaa_write_mipsel -#define tlbimva_is_write tlbimva_is_write_mipsel -#define tlbimva_write tlbimva_write_mipsel -#define tlb_is_dirty_ram tlb_is_dirty_ram_mipsel +#define tcg_gen_sari_i64 tcg_gen_sari_i64_mipsel +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mipsel +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_mipsel +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_mipsel +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_mipsel +#define tcg_gen_muli_i64 tcg_gen_muli_i64_mipsel +#define tcg_gen_div_i64 tcg_gen_div_i64_mipsel +#define tcg_gen_rem_i64 tcg_gen_rem_i64_mipsel +#define tcg_gen_divu_i64 tcg_gen_divu_i64_mipsel +#define tcg_gen_remu_i64 tcg_gen_remu_i64_mipsel +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_mipsel +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_mipsel +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mipsel +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_mipsel +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_mipsel +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mipsel +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_mipsel +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_mipsel +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_mipsel +#define tcg_gen_not_i64 tcg_gen_not_i64_mipsel +#define tcg_gen_andc_i64 tcg_gen_andc_i64_mipsel +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_mipsel +#define tcg_gen_nand_i64 tcg_gen_nand_i64_mipsel +#define tcg_gen_nor_i64 tcg_gen_nor_i64_mipsel +#define tcg_gen_orc_i64 tcg_gen_orc_i64_mipsel +#define tcg_gen_clz_i64 tcg_gen_clz_i64_mipsel +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_mipsel +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_mipsel +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_mipsel +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_mipsel +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_mipsel +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_mipsel +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_mipsel +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_mipsel +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_mipsel +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_mipsel +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_mipsel +#define tcg_gen_extract_i64 tcg_gen_extract_i64_mipsel +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_mipsel +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_mipsel +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mipsel +#define tcg_gen_add2_i64 tcg_gen_add2_i64_mipsel +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_mipsel +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_mipsel +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_mipsel +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_mipsel +#define tcg_gen_smin_i64 tcg_gen_smin_i64_mipsel +#define tcg_gen_umin_i64 tcg_gen_umin_i64_mipsel +#define tcg_gen_smax_i64 tcg_gen_smax_i64_mipsel +#define tcg_gen_umax_i64 tcg_gen_umax_i64_mipsel +#define tcg_gen_abs_i64 tcg_gen_abs_i64_mipsel +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_mipsel +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_mipsel +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mipsel +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mipsel +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mipsel +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_mipsel +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_mipsel +#define tcg_gen_exit_tb tcg_gen_exit_tb_mipsel +#define tcg_gen_goto_tb tcg_gen_goto_tb_mipsel +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_mipsel +#define check_exit_request check_exit_request_mipsel +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mipsel +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mipsel +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mipsel +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mipsel +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_mipsel +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_mipsel +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_mipsel +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_mipsel +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_mipsel +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_mipsel +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_mipsel +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_mipsel +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_mipsel +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_mipsel +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_mipsel +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_mipsel +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_mipsel +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_mipsel +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_mipsel +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_mipsel +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_mipsel +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_mipsel +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_mipsel +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_mipsel +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_mipsel +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_mipsel +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_mipsel +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_mipsel +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_mipsel +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_mipsel +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_mipsel +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_mipsel +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_mipsel +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_mipsel +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_mipsel +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_mipsel +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_mipsel +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_mipsel +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_mipsel +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_mipsel +#define simd_desc simd_desc_mipsel +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_mipsel +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_mipsel +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_mipsel +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_mipsel +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_mipsel +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_mipsel +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_mipsel +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_mipsel +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_mipsel +#define tcg_gen_gvec_2 tcg_gen_gvec_2_mipsel +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_mipsel +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_mipsel +#define tcg_gen_gvec_3 tcg_gen_gvec_3_mipsel +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_mipsel +#define tcg_gen_gvec_4 tcg_gen_gvec_4_mipsel +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_mipsel +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_mipsel +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_mipsel +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_mipsel +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_mipsel +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_mipsel +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_mipsel +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_mipsel +#define tcg_gen_gvec_not tcg_gen_gvec_not_mipsel +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_mipsel +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_mipsel +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_mipsel +#define tcg_gen_gvec_add tcg_gen_gvec_add_mipsel +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_mipsel +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_mipsel +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_mipsel +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_mipsel +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_mipsel +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_mipsel +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_mipsel +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_mipsel +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_mipsel +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_mipsel +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_mipsel +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_mipsel +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_mipsel +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_mipsel +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_mipsel +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_mipsel +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_mipsel +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_mipsel +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_mipsel +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_mipsel +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_mipsel +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_mipsel +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_mipsel +#define tcg_gen_gvec_and tcg_gen_gvec_and_mipsel +#define tcg_gen_gvec_or tcg_gen_gvec_or_mipsel +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_mipsel +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_mipsel +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_mipsel +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_mipsel +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_mipsel +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_mipsel +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_mipsel +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_mipsel +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_mipsel +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_mipsel +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_mipsel +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_mipsel +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_mipsel +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_mipsel +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_mipsel +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_mipsel +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_mipsel +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_mipsel +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_mipsel +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_mipsel +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_mipsel +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_mipsel +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_mipsel +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_mipsel +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_mipsel +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_mipsel +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_mipsel +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_mipsel +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_mipsel +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_mipsel +#define vec_gen_2 vec_gen_2_mipsel +#define vec_gen_3 vec_gen_3_mipsel +#define vec_gen_4 vec_gen_4_mipsel +#define tcg_gen_mov_vec tcg_gen_mov_vec_mipsel +#define tcg_const_zeros_vec tcg_const_zeros_vec_mipsel +#define tcg_const_ones_vec tcg_const_ones_vec_mipsel +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_mipsel +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_mipsel +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_mipsel +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_mipsel +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_mipsel +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_mipsel +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_mipsel +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_mipsel +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_mipsel +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_mipsel +#define tcg_gen_ld_vec tcg_gen_ld_vec_mipsel +#define tcg_gen_st_vec tcg_gen_st_vec_mipsel +#define tcg_gen_stl_vec tcg_gen_stl_vec_mipsel +#define tcg_gen_and_vec tcg_gen_and_vec_mipsel +#define tcg_gen_or_vec tcg_gen_or_vec_mipsel +#define tcg_gen_xor_vec tcg_gen_xor_vec_mipsel +#define tcg_gen_andc_vec tcg_gen_andc_vec_mipsel +#define tcg_gen_orc_vec tcg_gen_orc_vec_mipsel +#define tcg_gen_nand_vec tcg_gen_nand_vec_mipsel +#define tcg_gen_nor_vec tcg_gen_nor_vec_mipsel +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_mipsel +#define tcg_gen_not_vec tcg_gen_not_vec_mipsel +#define tcg_gen_neg_vec tcg_gen_neg_vec_mipsel +#define tcg_gen_abs_vec tcg_gen_abs_vec_mipsel +#define tcg_gen_shli_vec tcg_gen_shli_vec_mipsel +#define tcg_gen_shri_vec tcg_gen_shri_vec_mipsel +#define tcg_gen_sari_vec tcg_gen_sari_vec_mipsel +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_mipsel +#define tcg_gen_add_vec tcg_gen_add_vec_mipsel +#define tcg_gen_sub_vec tcg_gen_sub_vec_mipsel +#define tcg_gen_mul_vec tcg_gen_mul_vec_mipsel +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_mipsel +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_mipsel +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_mipsel +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_mipsel +#define tcg_gen_smin_vec tcg_gen_smin_vec_mipsel +#define tcg_gen_umin_vec tcg_gen_umin_vec_mipsel +#define tcg_gen_smax_vec tcg_gen_smax_vec_mipsel +#define tcg_gen_umax_vec tcg_gen_umax_vec_mipsel +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_mipsel +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_mipsel +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_mipsel +#define tcg_gen_shls_vec tcg_gen_shls_vec_mipsel +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_mipsel +#define tcg_gen_sars_vec tcg_gen_sars_vec_mipsel +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_mipsel +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_mipsel +#define tb_htable_lookup tb_htable_lookup_mipsel +#define tb_set_jmp_target tb_set_jmp_target_mipsel +#define cpu_exec cpu_exec_mipsel +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_mipsel +#define cpu_reloading_memory_map cpu_reloading_memory_map_mipsel +#define cpu_loop_exit cpu_loop_exit_mipsel +#define cpu_loop_exit_restore cpu_loop_exit_restore_mipsel +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_mipsel +#define tlb_init tlb_init_mipsel +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_mipsel +#define tlb_flush tlb_flush_mipsel +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_mipsel +#define tlb_flush_all_cpus tlb_flush_all_cpus_mipsel +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_mipsel +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_mipsel +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_mipsel +#define tlb_flush_page tlb_flush_page_mipsel +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_mipsel +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_mipsel +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_mipsel +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_mipsel #define tlb_protect_code tlb_protect_code_mipsel -#define tlb_reset_dirty_range tlb_reset_dirty_range_mipsel -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mipsel +#define tlb_unprotect_code tlb_unprotect_code_mipsel +#define tlb_reset_dirty tlb_reset_dirty_mipsel #define tlb_set_dirty tlb_set_dirty_mipsel -#define tlb_set_dirty1 tlb_set_dirty1_mipsel -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mipsel +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_mipsel +#define tlb_set_page tlb_set_page_mipsel +#define get_page_addr_code_hostp get_page_addr_code_hostp_mipsel +#define get_page_addr_code get_page_addr_code_mipsel +#define probe_access probe_access_mipsel #define tlb_vaddr_to_host tlb_vaddr_to_host_mipsel -#define token_get_type token_get_type_mipsel -#define token_get_value token_get_value_mipsel -#define token_is_escape token_is_escape_mipsel -#define token_is_keyword token_is_keyword_mipsel -#define token_is_operator token_is_operator_mipsel -#define tokens_append_from_iter tokens_append_from_iter_mipsel -#define to_qiv to_qiv_mipsel -#define to_qov to_qov_mipsel -#define tosa_init tosa_init_mipsel -#define tosa_machine_init tosa_machine_init_mipsel -#define tswap32 tswap32_mipsel -#define tswap64 tswap64_mipsel -#define type_class_get_size type_class_get_size_mipsel -#define type_get_by_name type_get_by_name_mipsel -#define type_get_parent type_get_parent_mipsel -#define type_has_parent type_has_parent_mipsel -#define type_initialize type_initialize_mipsel -#define type_initialize_interface type_initialize_interface_mipsel -#define type_is_ancestor type_is_ancestor_mipsel -#define type_new type_new_mipsel -#define type_object_get_size type_object_get_size_mipsel -#define type_register_internal type_register_internal_mipsel -#define type_table_add type_table_add_mipsel -#define type_table_get type_table_get_mipsel -#define type_table_lookup type_table_lookup_mipsel -#define uint16_to_float32 uint16_to_float32_mipsel -#define uint16_to_float64 uint16_to_float64_mipsel -#define uint32_to_float32 uint32_to_float32_mipsel -#define uint32_to_float64 uint32_to_float64_mipsel -#define uint64_to_float128 uint64_to_float128_mipsel -#define uint64_to_float32 uint64_to_float32_mipsel -#define uint64_to_float64 uint64_to_float64_mipsel -#define unassigned_io_ops unassigned_io_ops_mipsel -#define unassigned_io_read unassigned_io_read_mipsel -#define unassigned_io_write unassigned_io_write_mipsel -#define unassigned_mem_accepts unassigned_mem_accepts_mipsel +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mipsel +#define helper_le_lduw_mmu helper_le_lduw_mmu_mipsel +#define helper_be_lduw_mmu helper_be_lduw_mmu_mipsel +#define helper_le_ldul_mmu helper_le_ldul_mmu_mipsel +#define helper_be_ldul_mmu helper_be_ldul_mmu_mipsel +#define helper_le_ldq_mmu helper_le_ldq_mmu_mipsel +#define helper_be_ldq_mmu helper_be_ldq_mmu_mipsel +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mipsel +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mipsel +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mipsel +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mipsel +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mipsel +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_mipsel +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_mipsel +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_mipsel +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_mipsel +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_mipsel +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_mipsel +#define cpu_ldub_data_ra cpu_ldub_data_ra_mipsel +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_mipsel +#define cpu_lduw_data_ra cpu_lduw_data_ra_mipsel +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_mipsel +#define cpu_ldl_data_ra cpu_ldl_data_ra_mipsel +#define cpu_ldq_data_ra cpu_ldq_data_ra_mipsel +#define cpu_ldub_data cpu_ldub_data_mipsel +#define cpu_ldsb_data cpu_ldsb_data_mipsel +#define cpu_lduw_data cpu_lduw_data_mipsel +#define cpu_ldsw_data cpu_ldsw_data_mipsel +#define cpu_ldl_data cpu_ldl_data_mipsel +#define cpu_ldq_data cpu_ldq_data_mipsel +#define helper_ret_stb_mmu helper_ret_stb_mmu_mipsel +#define helper_le_stw_mmu helper_le_stw_mmu_mipsel +#define helper_be_stw_mmu helper_be_stw_mmu_mipsel +#define helper_le_stl_mmu helper_le_stl_mmu_mipsel +#define helper_be_stl_mmu helper_be_stl_mmu_mipsel +#define helper_le_stq_mmu helper_le_stq_mmu_mipsel +#define helper_be_stq_mmu helper_be_stq_mmu_mipsel +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_mipsel +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_mipsel +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_mipsel +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_mipsel +#define cpu_stb_data_ra cpu_stb_data_ra_mipsel +#define cpu_stw_data_ra cpu_stw_data_ra_mipsel +#define cpu_stl_data_ra cpu_stl_data_ra_mipsel +#define cpu_stq_data_ra cpu_stq_data_ra_mipsel +#define cpu_stb_data cpu_stb_data_mipsel +#define cpu_stw_data cpu_stw_data_mipsel +#define cpu_stl_data cpu_stl_data_mipsel +#define cpu_stq_data cpu_stq_data_mipsel +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_mipsel +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_mipsel +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_mipsel +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_mipsel +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_mipsel +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_mipsel +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_mipsel +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_mipsel +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_mipsel +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_mipsel +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_mipsel +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_mipsel +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_mipsel +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_mipsel +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_mipsel +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_mipsel +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_mipsel +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_mipsel +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_mipsel +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_mipsel +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_mipsel +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_mipsel +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_mipsel +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_mipsel +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_mipsel +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_mipsel +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_mipsel +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_mipsel +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_mipsel +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_mipsel +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_mipsel +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_mipsel +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_mipsel +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_mipsel +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_mipsel +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_mipsel +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_mipsel +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_mipsel +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_mipsel +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_mipsel +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_mipsel +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_mipsel +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_mipsel +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_mipsel +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_mipsel +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_mipsel +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_mipsel +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_mipsel +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_mipsel +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_mipsel +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_mipsel +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_mipsel +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_mipsel +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_mipsel +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_mipsel +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_mipsel +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_mipsel +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_mipsel +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_mipsel +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_mipsel +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_mipsel +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_mipsel +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_mipsel +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_mipsel +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_mipsel +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_mipsel +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_mipsel +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_mipsel +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_mipsel +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_mipsel +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_mipsel +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_mipsel +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_mipsel +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_mipsel +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_mipsel +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_mipsel +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_mipsel +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_mipsel +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_mipsel +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_mipsel +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_mipsel +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_mipsel +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_mipsel +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_mipsel +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_mipsel +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_mipsel +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_mipsel +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_mipsel +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_mipsel +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_mipsel +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_mipsel +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_mipsel +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_mipsel +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_mipsel +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_mipsel +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_mipsel +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_mipsel +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_mipsel +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_mipsel +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_mipsel +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_mipsel +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_mipsel +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_mipsel +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_mipsel +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_mipsel +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_mipsel +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_mipsel +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_mipsel +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_mipsel +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_mipsel +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_mipsel +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_mipsel +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_mipsel +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_mipsel +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_mipsel +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_mipsel +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_mipsel +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_mipsel +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_mipsel +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_mipsel +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_mipsel +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_mipsel +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_mipsel +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_mipsel +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_mipsel +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_mipsel +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_mipsel +#define helper_atomic_xchgb helper_atomic_xchgb_mipsel +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_mipsel +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_mipsel +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_mipsel +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_mipsel +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_mipsel +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_mipsel +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_mipsel +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_mipsel +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_mipsel +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_mipsel +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_mipsel +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_mipsel +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_mipsel +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_mipsel +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_mipsel +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_mipsel +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_mipsel +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_mipsel +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_mipsel +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_mipsel +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_mipsel +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_mipsel +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_mipsel +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_mipsel +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_mipsel +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_mipsel +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_mipsel +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_mipsel +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_mipsel +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_mipsel +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_mipsel +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_mipsel +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_mipsel +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_mipsel +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_mipsel +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_mipsel +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_mipsel +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_mipsel +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_mipsel +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_mipsel +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_mipsel +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_mipsel +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_mipsel +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_mipsel +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_mipsel +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_mipsel +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_mipsel +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_mipsel +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_mipsel +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_mipsel +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_mipsel +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_mipsel +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_mipsel +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_mipsel +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_mipsel +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_mipsel +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_mipsel +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_mipsel +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_mipsel +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_mipsel +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_mipsel +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_mipsel +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_mipsel +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_mipsel +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_mipsel +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_mipsel +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_mipsel +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_mipsel +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_mipsel +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_mipsel +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_mipsel +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_mipsel +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_mipsel +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_mipsel +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_mipsel +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_mipsel +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_mipsel +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_mipsel +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_mipsel +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_mipsel +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_mipsel +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_mipsel +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_mipsel +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_mipsel +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_mipsel +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_mipsel +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_mipsel +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_mipsel +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_mipsel +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_mipsel +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_mipsel +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_mipsel +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_mipsel +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_mipsel +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_mipsel +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_mipsel +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_mipsel +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_mipsel +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_mipsel +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_mipsel +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_mipsel +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_mipsel +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_mipsel +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_mipsel +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_mipsel +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_mipsel +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_mipsel +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_mipsel +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_mipsel +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_mipsel +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_mipsel +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_mipsel +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_mipsel +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_mipsel +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_mipsel +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_mipsel +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_mipsel +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_mipsel +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_mipsel +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_mipsel +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_mipsel +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_mipsel +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_mipsel +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_mipsel +#define cpu_ldub_code cpu_ldub_code_mipsel +#define cpu_lduw_code cpu_lduw_code_mipsel +#define cpu_ldl_code cpu_ldl_code_mipsel +#define cpu_ldq_code cpu_ldq_code_mipsel +#define helper_div_i32 helper_div_i32_mipsel +#define helper_rem_i32 helper_rem_i32_mipsel +#define helper_divu_i32 helper_divu_i32_mipsel +#define helper_remu_i32 helper_remu_i32_mipsel +#define helper_shl_i64 helper_shl_i64_mipsel +#define helper_shr_i64 helper_shr_i64_mipsel +#define helper_sar_i64 helper_sar_i64_mipsel +#define helper_div_i64 helper_div_i64_mipsel +#define helper_rem_i64 helper_rem_i64_mipsel +#define helper_divu_i64 helper_divu_i64_mipsel +#define helper_remu_i64 helper_remu_i64_mipsel +#define helper_muluh_i64 helper_muluh_i64_mipsel +#define helper_mulsh_i64 helper_mulsh_i64_mipsel +#define helper_clz_i32 helper_clz_i32_mipsel +#define helper_ctz_i32 helper_ctz_i32_mipsel +#define helper_clz_i64 helper_clz_i64_mipsel +#define helper_ctz_i64 helper_ctz_i64_mipsel +#define helper_clrsb_i32 helper_clrsb_i32_mipsel +#define helper_clrsb_i64 helper_clrsb_i64_mipsel +#define helper_ctpop_i32 helper_ctpop_i32_mipsel +#define helper_ctpop_i64 helper_ctpop_i64_mipsel +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_mipsel +#define helper_exit_atomic helper_exit_atomic_mipsel +#define helper_gvec_add8 helper_gvec_add8_mipsel +#define helper_gvec_add16 helper_gvec_add16_mipsel +#define helper_gvec_add32 helper_gvec_add32_mipsel +#define helper_gvec_add64 helper_gvec_add64_mipsel +#define helper_gvec_adds8 helper_gvec_adds8_mipsel +#define helper_gvec_adds16 helper_gvec_adds16_mipsel +#define helper_gvec_adds32 helper_gvec_adds32_mipsel +#define helper_gvec_adds64 helper_gvec_adds64_mipsel +#define helper_gvec_sub8 helper_gvec_sub8_mipsel +#define helper_gvec_sub16 helper_gvec_sub16_mipsel +#define helper_gvec_sub32 helper_gvec_sub32_mipsel +#define helper_gvec_sub64 helper_gvec_sub64_mipsel +#define helper_gvec_subs8 helper_gvec_subs8_mipsel +#define helper_gvec_subs16 helper_gvec_subs16_mipsel +#define helper_gvec_subs32 helper_gvec_subs32_mipsel +#define helper_gvec_subs64 helper_gvec_subs64_mipsel +#define helper_gvec_mul8 helper_gvec_mul8_mipsel +#define helper_gvec_mul16 helper_gvec_mul16_mipsel +#define helper_gvec_mul32 helper_gvec_mul32_mipsel +#define helper_gvec_mul64 helper_gvec_mul64_mipsel +#define helper_gvec_muls8 helper_gvec_muls8_mipsel +#define helper_gvec_muls16 helper_gvec_muls16_mipsel +#define helper_gvec_muls32 helper_gvec_muls32_mipsel +#define helper_gvec_muls64 helper_gvec_muls64_mipsel +#define helper_gvec_neg8 helper_gvec_neg8_mipsel +#define helper_gvec_neg16 helper_gvec_neg16_mipsel +#define helper_gvec_neg32 helper_gvec_neg32_mipsel +#define helper_gvec_neg64 helper_gvec_neg64_mipsel +#define helper_gvec_abs8 helper_gvec_abs8_mipsel +#define helper_gvec_abs16 helper_gvec_abs16_mipsel +#define helper_gvec_abs32 helper_gvec_abs32_mipsel +#define helper_gvec_abs64 helper_gvec_abs64_mipsel +#define helper_gvec_mov helper_gvec_mov_mipsel +#define helper_gvec_dup64 helper_gvec_dup64_mipsel +#define helper_gvec_dup32 helper_gvec_dup32_mipsel +#define helper_gvec_dup16 helper_gvec_dup16_mipsel +#define helper_gvec_dup8 helper_gvec_dup8_mipsel +#define helper_gvec_not helper_gvec_not_mipsel +#define helper_gvec_and helper_gvec_and_mipsel +#define helper_gvec_or helper_gvec_or_mipsel +#define helper_gvec_xor helper_gvec_xor_mipsel +#define helper_gvec_andc helper_gvec_andc_mipsel +#define helper_gvec_orc helper_gvec_orc_mipsel +#define helper_gvec_nand helper_gvec_nand_mipsel +#define helper_gvec_nor helper_gvec_nor_mipsel +#define helper_gvec_eqv helper_gvec_eqv_mipsel +#define helper_gvec_ands helper_gvec_ands_mipsel +#define helper_gvec_xors helper_gvec_xors_mipsel +#define helper_gvec_ors helper_gvec_ors_mipsel +#define helper_gvec_shl8i helper_gvec_shl8i_mipsel +#define helper_gvec_shl16i helper_gvec_shl16i_mipsel +#define helper_gvec_shl32i helper_gvec_shl32i_mipsel +#define helper_gvec_shl64i helper_gvec_shl64i_mipsel +#define helper_gvec_shr8i helper_gvec_shr8i_mipsel +#define helper_gvec_shr16i helper_gvec_shr16i_mipsel +#define helper_gvec_shr32i helper_gvec_shr32i_mipsel +#define helper_gvec_shr64i helper_gvec_shr64i_mipsel +#define helper_gvec_sar8i helper_gvec_sar8i_mipsel +#define helper_gvec_sar16i helper_gvec_sar16i_mipsel +#define helper_gvec_sar32i helper_gvec_sar32i_mipsel +#define helper_gvec_sar64i helper_gvec_sar64i_mipsel +#define helper_gvec_shl8v helper_gvec_shl8v_mipsel +#define helper_gvec_shl16v helper_gvec_shl16v_mipsel +#define helper_gvec_shl32v helper_gvec_shl32v_mipsel +#define helper_gvec_shl64v helper_gvec_shl64v_mipsel +#define helper_gvec_shr8v helper_gvec_shr8v_mipsel +#define helper_gvec_shr16v helper_gvec_shr16v_mipsel +#define helper_gvec_shr32v helper_gvec_shr32v_mipsel +#define helper_gvec_shr64v helper_gvec_shr64v_mipsel +#define helper_gvec_sar8v helper_gvec_sar8v_mipsel +#define helper_gvec_sar16v helper_gvec_sar16v_mipsel +#define helper_gvec_sar32v helper_gvec_sar32v_mipsel +#define helper_gvec_sar64v helper_gvec_sar64v_mipsel +#define helper_gvec_eq8 helper_gvec_eq8_mipsel +#define helper_gvec_ne8 helper_gvec_ne8_mipsel +#define helper_gvec_lt8 helper_gvec_lt8_mipsel +#define helper_gvec_le8 helper_gvec_le8_mipsel +#define helper_gvec_ltu8 helper_gvec_ltu8_mipsel +#define helper_gvec_leu8 helper_gvec_leu8_mipsel +#define helper_gvec_eq16 helper_gvec_eq16_mipsel +#define helper_gvec_ne16 helper_gvec_ne16_mipsel +#define helper_gvec_lt16 helper_gvec_lt16_mipsel +#define helper_gvec_le16 helper_gvec_le16_mipsel +#define helper_gvec_ltu16 helper_gvec_ltu16_mipsel +#define helper_gvec_leu16 helper_gvec_leu16_mipsel +#define helper_gvec_eq32 helper_gvec_eq32_mipsel +#define helper_gvec_ne32 helper_gvec_ne32_mipsel +#define helper_gvec_lt32 helper_gvec_lt32_mipsel +#define helper_gvec_le32 helper_gvec_le32_mipsel +#define helper_gvec_ltu32 helper_gvec_ltu32_mipsel +#define helper_gvec_leu32 helper_gvec_leu32_mipsel +#define helper_gvec_eq64 helper_gvec_eq64_mipsel +#define helper_gvec_ne64 helper_gvec_ne64_mipsel +#define helper_gvec_lt64 helper_gvec_lt64_mipsel +#define helper_gvec_le64 helper_gvec_le64_mipsel +#define helper_gvec_ltu64 helper_gvec_ltu64_mipsel +#define helper_gvec_leu64 helper_gvec_leu64_mipsel +#define helper_gvec_ssadd8 helper_gvec_ssadd8_mipsel +#define helper_gvec_ssadd16 helper_gvec_ssadd16_mipsel +#define helper_gvec_ssadd32 helper_gvec_ssadd32_mipsel +#define helper_gvec_ssadd64 helper_gvec_ssadd64_mipsel +#define helper_gvec_sssub8 helper_gvec_sssub8_mipsel +#define helper_gvec_sssub16 helper_gvec_sssub16_mipsel +#define helper_gvec_sssub32 helper_gvec_sssub32_mipsel +#define helper_gvec_sssub64 helper_gvec_sssub64_mipsel +#define helper_gvec_usadd8 helper_gvec_usadd8_mipsel +#define helper_gvec_usadd16 helper_gvec_usadd16_mipsel +#define helper_gvec_usadd32 helper_gvec_usadd32_mipsel +#define helper_gvec_usadd64 helper_gvec_usadd64_mipsel +#define helper_gvec_ussub8 helper_gvec_ussub8_mipsel +#define helper_gvec_ussub16 helper_gvec_ussub16_mipsel +#define helper_gvec_ussub32 helper_gvec_ussub32_mipsel +#define helper_gvec_ussub64 helper_gvec_ussub64_mipsel +#define helper_gvec_smin8 helper_gvec_smin8_mipsel +#define helper_gvec_smin16 helper_gvec_smin16_mipsel +#define helper_gvec_smin32 helper_gvec_smin32_mipsel +#define helper_gvec_smin64 helper_gvec_smin64_mipsel +#define helper_gvec_smax8 helper_gvec_smax8_mipsel +#define helper_gvec_smax16 helper_gvec_smax16_mipsel +#define helper_gvec_smax32 helper_gvec_smax32_mipsel +#define helper_gvec_smax64 helper_gvec_smax64_mipsel +#define helper_gvec_umin8 helper_gvec_umin8_mipsel +#define helper_gvec_umin16 helper_gvec_umin16_mipsel +#define helper_gvec_umin32 helper_gvec_umin32_mipsel +#define helper_gvec_umin64 helper_gvec_umin64_mipsel +#define helper_gvec_umax8 helper_gvec_umax8_mipsel +#define helper_gvec_umax16 helper_gvec_umax16_mipsel +#define helper_gvec_umax32 helper_gvec_umax32_mipsel +#define helper_gvec_umax64 helper_gvec_umax64_mipsel +#define helper_gvec_bitsel helper_gvec_bitsel_mipsel +#define cpu_restore_state cpu_restore_state_mipsel +#define page_collection_lock page_collection_lock_mipsel +#define page_collection_unlock page_collection_unlock_mipsel +#define free_code_gen_buffer free_code_gen_buffer_mipsel +#define tcg_exec_init tcg_exec_init_mipsel +#define tb_cleanup tb_cleanup_mipsel +#define tb_flush tb_flush_mipsel +#define tb_phys_invalidate tb_phys_invalidate_mipsel +#define tb_gen_code tb_gen_code_mipsel +#define tb_exec_lock tb_exec_lock_mipsel +#define tb_exec_unlock tb_exec_unlock_mipsel +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mipsel +#define tb_invalidate_phys_range tb_invalidate_phys_range_mipsel +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mipsel +#define tb_check_watchpoint tb_check_watchpoint_mipsel +#define cpu_io_recompile cpu_io_recompile_mipsel +#define tb_flush_jmp_cache tb_flush_jmp_cache_mipsel +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_mipsel +#define translator_loop_temp_check translator_loop_temp_check_mipsel +#define translator_loop translator_loop_mipsel +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_mipsel +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_mipsel +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_mipsel +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_mipsel +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_mipsel +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_mipsel #define unassigned_mem_ops unassigned_mem_ops_mipsel -#define unassigned_mem_read unassigned_mem_read_mipsel -#define unassigned_mem_write unassigned_mem_write_mipsel -#define update_spsel update_spsel_mipsel -#define v6_cp_reginfo v6_cp_reginfo_mipsel -#define v6k_cp_reginfo v6k_cp_reginfo_mipsel -#define v7_cp_reginfo v7_cp_reginfo_mipsel -#define v7mp_cp_reginfo v7mp_cp_reginfo_mipsel -#define v7m_pop v7m_pop_mipsel -#define v7m_push v7m_push_mipsel -#define v8_cp_reginfo v8_cp_reginfo_mipsel -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mipsel -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mipsel -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mipsel -#define vapa_cp_reginfo vapa_cp_reginfo_mipsel -#define vbar_write vbar_write_mipsel -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mipsel -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mipsel -#define vfp_get_fpcr vfp_get_fpcr_mipsel -#define vfp_get_fpscr vfp_get_fpscr_mipsel -#define vfp_get_fpsr vfp_get_fpsr_mipsel -#define vfp_reg_offset vfp_reg_offset_mipsel -#define vfp_set_fpcr vfp_set_fpcr_mipsel -#define vfp_set_fpscr vfp_set_fpscr_mipsel -#define vfp_set_fpsr vfp_set_fpsr_mipsel -#define visit_end_implicit_struct visit_end_implicit_struct_mipsel -#define visit_end_list visit_end_list_mipsel -#define visit_end_struct visit_end_struct_mipsel -#define visit_end_union visit_end_union_mipsel -#define visit_get_next_type visit_get_next_type_mipsel -#define visit_next_list visit_next_list_mipsel -#define visit_optional visit_optional_mipsel -#define visit_start_implicit_struct visit_start_implicit_struct_mipsel -#define visit_start_list visit_start_list_mipsel -#define visit_start_struct visit_start_struct_mipsel -#define visit_start_union visit_start_union_mipsel -#define vmsa_cp_reginfo vmsa_cp_reginfo_mipsel -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mipsel -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mipsel -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mipsel -#define vmsa_ttbcr_write vmsa_ttbcr_write_mipsel -#define vmsa_ttbr_write vmsa_ttbr_write_mipsel -#define write_cpustate_to_list write_cpustate_to_list_mipsel -#define write_list_to_cpustate write_list_to_cpustate_mipsel -#define write_raw_cp_reg write_raw_cp_reg_mipsel -#define X86CPURegister32_lookup X86CPURegister32_lookup_mipsel -#define x86_op_defs x86_op_defs_mipsel -#define xpsr_read xpsr_read_mipsel -#define xpsr_write xpsr_write_mipsel -#define xscale_cpar_write xscale_cpar_write_mipsel -#define xscale_cp_reginfo xscale_cp_reginfo_mipsel -#define cpu_mips_exec cpu_mips_exec_mipsel +#define floatx80_infinity floatx80_infinity_mipsel +#define dup_const_func dup_const_func_mipsel +#define gen_helper_raise_exception gen_helper_raise_exception_mipsel +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_mipsel +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mipsel +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mipsel +#define gen_helper_cpsr_read gen_helper_cpsr_read_mipsel +#define gen_helper_cpsr_write gen_helper_cpsr_write_mipsel +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mipsel +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mipsel +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mipsel +#define helper_mfc0_random helper_mfc0_random_mipsel +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mipsel +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mipsel +#define helper_mfc0_tcbind helper_mfc0_tcbind_mipsel +#define helper_mftc0_tcbind helper_mftc0_tcbind_mipsel +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mipsel +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mipsel +#define helper_mfc0_tchalt helper_mfc0_tchalt_mipsel +#define helper_mftc0_tchalt helper_mftc0_tchalt_mipsel +#define helper_mfc0_tccontext helper_mfc0_tccontext_mipsel +#define helper_mftc0_tccontext helper_mftc0_tccontext_mipsel +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mipsel +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mipsel +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mipsel +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mipsel +#define helper_mfc0_count helper_mfc0_count_mipsel +#define helper_mfc0_saar helper_mfc0_saar_mipsel +#define helper_mfhc0_saar helper_mfhc0_saar_mipsel +#define helper_mftc0_entryhi helper_mftc0_entryhi_mipsel +#define helper_mftc0_cause helper_mftc0_cause_mipsel +#define helper_mftc0_status helper_mftc0_status_mipsel +#define helper_mfc0_lladdr helper_mfc0_lladdr_mipsel +#define helper_mfc0_maar helper_mfc0_maar_mipsel +#define helper_mfhc0_maar helper_mfhc0_maar_mipsel +#define helper_mfc0_watchlo helper_mfc0_watchlo_mipsel +#define helper_mfc0_watchhi helper_mfc0_watchhi_mipsel +#define helper_mfhc0_watchhi helper_mfhc0_watchhi_mipsel +#define helper_mfc0_debug helper_mfc0_debug_mipsel +#define helper_mftc0_debug helper_mftc0_debug_mipsel +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mipsel +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mipsel +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mipsel +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mipsel +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mipsel +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mipsel +#define helper_dmfc0_maar helper_dmfc0_maar_mipsel +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mipsel +#define helper_dmfc0_watchhi helper_dmfc0_watchhi_mipsel +#define helper_dmfc0_saar helper_dmfc0_saar_mipsel +#define helper_mtc0_index helper_mtc0_index_mipsel +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mipsel +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mipsel +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mipsel +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mipsel +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mipsel +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mipsel +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mipsel +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mipsel +#define helper_mtc0_yqmask helper_mtc0_yqmask_mipsel +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mipsel +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mipsel +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mipsel +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mipsel +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mipsel +#define helper_mtc0_tcbind helper_mtc0_tcbind_mipsel +#define helper_mttc0_tcbind helper_mttc0_tcbind_mipsel +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mipsel +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mipsel +#define helper_mtc0_tchalt helper_mtc0_tchalt_mipsel +#define helper_mttc0_tchalt helper_mttc0_tchalt_mipsel +#define helper_mtc0_tccontext helper_mtc0_tccontext_mipsel +#define helper_mttc0_tccontext helper_mttc0_tccontext_mipsel +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mipsel +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mipsel +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mipsel +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mipsel +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mipsel +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mipsel +#define helper_mtc0_context helper_mtc0_context_mipsel +#define helper_mtc0_memorymapid helper_mtc0_memorymapid_mipsel +#define update_pagemask update_pagemask_mipsel +#define helper_mtc0_pagemask helper_mtc0_pagemask_mipsel +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mipsel +#define helper_mtc0_segctl0 helper_mtc0_segctl0_mipsel +#define helper_mtc0_segctl1 helper_mtc0_segctl1_mipsel +#define helper_mtc0_segctl2 helper_mtc0_segctl2_mipsel +#define helper_mtc0_pwfield helper_mtc0_pwfield_mipsel +#define helper_mtc0_pwsize helper_mtc0_pwsize_mipsel +#define helper_mtc0_wired helper_mtc0_wired_mipsel +#define helper_mtc0_pwctl helper_mtc0_pwctl_mipsel +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mipsel +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mipsel +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mipsel +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mipsel +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mipsel +#define helper_mtc0_hwrena helper_mtc0_hwrena_mipsel +#define helper_mtc0_count helper_mtc0_count_mipsel +#define helper_mtc0_saari helper_mtc0_saari_mipsel +#define helper_mtc0_saar helper_mtc0_saar_mipsel +#define helper_mthc0_saar helper_mthc0_saar_mipsel +#define helper_mtc0_entryhi helper_mtc0_entryhi_mipsel +#define helper_mttc0_entryhi helper_mttc0_entryhi_mipsel +#define helper_mtc0_compare helper_mtc0_compare_mipsel +#define helper_mtc0_status helper_mtc0_status_mipsel +#define helper_mttc0_status helper_mttc0_status_mipsel +#define helper_mtc0_intctl helper_mtc0_intctl_mipsel +#define helper_mtc0_srsctl helper_mtc0_srsctl_mipsel +#define helper_mtc0_cause helper_mtc0_cause_mipsel +#define helper_mttc0_cause helper_mttc0_cause_mipsel +#define helper_mftc0_epc helper_mftc0_epc_mipsel +#define helper_mftc0_ebase helper_mftc0_ebase_mipsel +#define helper_mtc0_ebase helper_mtc0_ebase_mipsel +#define helper_mttc0_ebase helper_mttc0_ebase_mipsel +#define helper_mftc0_configx helper_mftc0_configx_mipsel +#define helper_mtc0_config0 helper_mtc0_config0_mipsel +#define helper_mtc0_config2 helper_mtc0_config2_mipsel +#define helper_mtc0_config3 helper_mtc0_config3_mipsel +#define helper_mtc0_config4 helper_mtc0_config4_mipsel +#define helper_mtc0_config5 helper_mtc0_config5_mipsel +#define helper_mtc0_lladdr helper_mtc0_lladdr_mipsel +#define helper_mtc0_maar helper_mtc0_maar_mipsel +#define helper_mthc0_maar helper_mthc0_maar_mipsel +#define helper_mtc0_maari helper_mtc0_maari_mipsel +#define helper_mtc0_watchlo helper_mtc0_watchlo_mipsel +#define helper_mtc0_watchhi helper_mtc0_watchhi_mipsel +#define helper_mthc0_watchhi helper_mthc0_watchhi_mipsel +#define helper_mtc0_xcontext helper_mtc0_xcontext_mipsel +#define helper_mtc0_framemask helper_mtc0_framemask_mipsel +#define helper_mtc0_debug helper_mtc0_debug_mipsel +#define helper_mttc0_debug helper_mttc0_debug_mipsel +#define helper_mtc0_performance0 helper_mtc0_performance0_mipsel +#define helper_mtc0_errctl helper_mtc0_errctl_mipsel +#define helper_mtc0_taglo helper_mtc0_taglo_mipsel +#define helper_mtc0_datalo helper_mtc0_datalo_mipsel +#define helper_mtc0_taghi helper_mtc0_taghi_mipsel +#define helper_mtc0_datahi helper_mtc0_datahi_mipsel +#define helper_mftgpr helper_mftgpr_mipsel +#define helper_mftlo helper_mftlo_mipsel +#define helper_mfthi helper_mfthi_mipsel +#define helper_mftacx helper_mftacx_mipsel +#define helper_mftdsp helper_mftdsp_mipsel +#define helper_mttgpr helper_mttgpr_mipsel +#define helper_mttlo helper_mttlo_mipsel +#define helper_mtthi helper_mtthi_mipsel +#define helper_mttacx helper_mttacx_mipsel +#define helper_mttdsp helper_mttdsp_mipsel +#define helper_dmt helper_dmt_mipsel +#define helper_emt helper_emt_mipsel +#define helper_dvpe helper_dvpe_mipsel +#define helper_evpe helper_evpe_mipsel +#define helper_dvp helper_dvp_mipsel +#define helper_evp helper_evp_mipsel #define cpu_mips_get_random cpu_mips_get_random_mipsel -#define cpu_mips_get_count cpu_mips_get_count_mipsel -#define cpu_mips_store_count cpu_mips_store_count_mipsel -#define cpu_mips_store_compare cpu_mips_store_compare_mipsel -#define cpu_mips_start_count cpu_mips_start_count_mipsel -#define cpu_mips_stop_count cpu_mips_stop_count_mipsel -#define mips_machine_init mips_machine_init_mipsel -#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mipsel -#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mipsel -#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mipsel -#define mips_cpu_register_types mips_cpu_register_types_mipsel #define cpu_mips_init cpu_mips_init_mipsel -#define cpu_state_reset cpu_state_reset_mipsel -#define helper_msa_andi_b helper_msa_andi_b_mipsel -#define helper_msa_ori_b helper_msa_ori_b_mipsel -#define helper_msa_nori_b helper_msa_nori_b_mipsel -#define helper_msa_xori_b helper_msa_xori_b_mipsel -#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mipsel -#define helper_msa_bmzi_b helper_msa_bmzi_b_mipsel -#define helper_msa_bseli_b helper_msa_bseli_b_mipsel -#define helper_msa_shf_df helper_msa_shf_df_mipsel -#define helper_msa_and_v helper_msa_and_v_mipsel -#define helper_msa_or_v helper_msa_or_v_mipsel -#define helper_msa_nor_v helper_msa_nor_v_mipsel -#define helper_msa_xor_v helper_msa_xor_v_mipsel -#define helper_msa_bmnz_v helper_msa_bmnz_v_mipsel -#define helper_msa_bmz_v helper_msa_bmz_v_mipsel -#define helper_msa_bsel_v helper_msa_bsel_v_mipsel -#define helper_msa_addvi_df helper_msa_addvi_df_mipsel -#define helper_msa_subvi_df helper_msa_subvi_df_mipsel -#define helper_msa_ceqi_df helper_msa_ceqi_df_mipsel -#define helper_msa_clei_s_df helper_msa_clei_s_df_mipsel -#define helper_msa_clei_u_df helper_msa_clei_u_df_mipsel -#define helper_msa_clti_s_df helper_msa_clti_s_df_mipsel -#define helper_msa_clti_u_df helper_msa_clti_u_df_mipsel -#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mipsel -#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mipsel -#define helper_msa_mini_s_df helper_msa_mini_s_df_mipsel -#define helper_msa_mini_u_df helper_msa_mini_u_df_mipsel -#define helper_msa_ldi_df helper_msa_ldi_df_mipsel -#define helper_msa_slli_df helper_msa_slli_df_mipsel -#define helper_msa_srai_df helper_msa_srai_df_mipsel -#define helper_msa_srli_df helper_msa_srli_df_mipsel -#define helper_msa_bclri_df helper_msa_bclri_df_mipsel -#define helper_msa_bseti_df helper_msa_bseti_df_mipsel -#define helper_msa_bnegi_df helper_msa_bnegi_df_mipsel -#define helper_msa_sat_s_df helper_msa_sat_s_df_mipsel -#define helper_msa_sat_u_df helper_msa_sat_u_df_mipsel -#define helper_msa_srari_df helper_msa_srari_df_mipsel -#define helper_msa_srlri_df helper_msa_srlri_df_mipsel -#define helper_msa_binsli_df helper_msa_binsli_df_mipsel -#define helper_msa_binsri_df helper_msa_binsri_df_mipsel -#define helper_msa_sll_df helper_msa_sll_df_mipsel -#define helper_msa_sra_df helper_msa_sra_df_mipsel -#define helper_msa_srl_df helper_msa_srl_df_mipsel -#define helper_msa_bclr_df helper_msa_bclr_df_mipsel -#define helper_msa_bset_df helper_msa_bset_df_mipsel -#define helper_msa_bneg_df helper_msa_bneg_df_mipsel -#define helper_msa_addv_df helper_msa_addv_df_mipsel -#define helper_msa_subv_df helper_msa_subv_df_mipsel -#define helper_msa_max_s_df helper_msa_max_s_df_mipsel -#define helper_msa_max_u_df helper_msa_max_u_df_mipsel -#define helper_msa_min_s_df helper_msa_min_s_df_mipsel -#define helper_msa_min_u_df helper_msa_min_u_df_mipsel -#define helper_msa_max_a_df helper_msa_max_a_df_mipsel -#define helper_msa_min_a_df helper_msa_min_a_df_mipsel -#define helper_msa_ceq_df helper_msa_ceq_df_mipsel -#define helper_msa_clt_s_df helper_msa_clt_s_df_mipsel -#define helper_msa_clt_u_df helper_msa_clt_u_df_mipsel -#define helper_msa_cle_s_df helper_msa_cle_s_df_mipsel -#define helper_msa_cle_u_df helper_msa_cle_u_df_mipsel -#define helper_msa_add_a_df helper_msa_add_a_df_mipsel -#define helper_msa_adds_a_df helper_msa_adds_a_df_mipsel -#define helper_msa_adds_s_df helper_msa_adds_s_df_mipsel -#define helper_msa_adds_u_df helper_msa_adds_u_df_mipsel -#define helper_msa_ave_s_df helper_msa_ave_s_df_mipsel -#define helper_msa_ave_u_df helper_msa_ave_u_df_mipsel -#define helper_msa_aver_s_df helper_msa_aver_s_df_mipsel -#define helper_msa_aver_u_df helper_msa_aver_u_df_mipsel -#define helper_msa_subs_s_df helper_msa_subs_s_df_mipsel -#define helper_msa_subs_u_df helper_msa_subs_u_df_mipsel -#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mipsel -#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mipsel -#define helper_msa_asub_s_df helper_msa_asub_s_df_mipsel -#define helper_msa_asub_u_df helper_msa_asub_u_df_mipsel -#define helper_msa_mulv_df helper_msa_mulv_df_mipsel -#define helper_msa_div_s_df helper_msa_div_s_df_mipsel -#define helper_msa_div_u_df helper_msa_div_u_df_mipsel -#define helper_msa_mod_s_df helper_msa_mod_s_df_mipsel -#define helper_msa_mod_u_df helper_msa_mod_u_df_mipsel -#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mipsel -#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mipsel -#define helper_msa_srar_df helper_msa_srar_df_mipsel -#define helper_msa_srlr_df helper_msa_srlr_df_mipsel -#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mipsel -#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mipsel -#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mipsel -#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mipsel -#define helper_msa_mul_q_df helper_msa_mul_q_df_mipsel -#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mipsel -#define helper_msa_sld_df helper_msa_sld_df_mipsel -#define helper_msa_maddv_df helper_msa_maddv_df_mipsel -#define helper_msa_msubv_df helper_msa_msubv_df_mipsel -#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mipsel -#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mipsel -#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mipsel -#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mipsel -#define helper_msa_binsl_df helper_msa_binsl_df_mipsel -#define helper_msa_binsr_df helper_msa_binsr_df_mipsel -#define helper_msa_madd_q_df helper_msa_madd_q_df_mipsel -#define helper_msa_msub_q_df helper_msa_msub_q_df_mipsel -#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mipsel -#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mipsel -#define helper_msa_splat_df helper_msa_splat_df_mipsel -#define helper_msa_pckev_df helper_msa_pckev_df_mipsel -#define helper_msa_pckod_df helper_msa_pckod_df_mipsel -#define helper_msa_ilvl_df helper_msa_ilvl_df_mipsel -#define helper_msa_ilvr_df helper_msa_ilvr_df_mipsel -#define helper_msa_ilvev_df helper_msa_ilvev_df_mipsel -#define helper_msa_ilvod_df helper_msa_ilvod_df_mipsel -#define helper_msa_vshf_df helper_msa_vshf_df_mipsel -#define helper_msa_sldi_df helper_msa_sldi_df_mipsel -#define helper_msa_splati_df helper_msa_splati_df_mipsel -#define helper_msa_copy_s_df helper_msa_copy_s_df_mipsel -#define helper_msa_copy_u_df helper_msa_copy_u_df_mipsel -#define helper_msa_insert_df helper_msa_insert_df_mipsel -#define helper_msa_insve_df helper_msa_insve_df_mipsel -#define helper_msa_ctcmsa helper_msa_ctcmsa_mipsel -#define helper_msa_cfcmsa helper_msa_cfcmsa_mipsel -#define helper_msa_move_v helper_msa_move_v_mipsel -#define helper_msa_fill_df helper_msa_fill_df_mipsel -#define helper_msa_nlzc_df helper_msa_nlzc_df_mipsel -#define helper_msa_nloc_df helper_msa_nloc_df_mipsel -#define helper_msa_pcnt_df helper_msa_pcnt_df_mipsel -#define helper_msa_fcaf_df helper_msa_fcaf_df_mipsel -#define helper_msa_fcun_df helper_msa_fcun_df_mipsel -#define helper_msa_fceq_df helper_msa_fceq_df_mipsel -#define helper_msa_fcueq_df helper_msa_fcueq_df_mipsel -#define helper_msa_fclt_df helper_msa_fclt_df_mipsel -#define helper_msa_fcult_df helper_msa_fcult_df_mipsel -#define helper_msa_fcle_df helper_msa_fcle_df_mipsel -#define helper_msa_fcule_df helper_msa_fcule_df_mipsel -#define helper_msa_fsaf_df helper_msa_fsaf_df_mipsel -#define helper_msa_fsun_df helper_msa_fsun_df_mipsel -#define helper_msa_fseq_df helper_msa_fseq_df_mipsel -#define helper_msa_fsueq_df helper_msa_fsueq_df_mipsel -#define helper_msa_fslt_df helper_msa_fslt_df_mipsel -#define helper_msa_fsult_df helper_msa_fsult_df_mipsel -#define helper_msa_fsle_df helper_msa_fsle_df_mipsel -#define helper_msa_fsule_df helper_msa_fsule_df_mipsel -#define helper_msa_fcor_df helper_msa_fcor_df_mipsel -#define helper_msa_fcune_df helper_msa_fcune_df_mipsel -#define helper_msa_fcne_df helper_msa_fcne_df_mipsel -#define helper_msa_fsor_df helper_msa_fsor_df_mipsel -#define helper_msa_fsune_df helper_msa_fsune_df_mipsel -#define helper_msa_fsne_df helper_msa_fsne_df_mipsel -#define helper_msa_fadd_df helper_msa_fadd_df_mipsel -#define helper_msa_fsub_df helper_msa_fsub_df_mipsel -#define helper_msa_fmul_df helper_msa_fmul_df_mipsel -#define helper_msa_fdiv_df helper_msa_fdiv_df_mipsel -#define helper_msa_fmadd_df helper_msa_fmadd_df_mipsel -#define helper_msa_fmsub_df helper_msa_fmsub_df_mipsel -#define helper_msa_fexp2_df helper_msa_fexp2_df_mipsel -#define helper_msa_fexdo_df helper_msa_fexdo_df_mipsel -#define helper_msa_ftq_df helper_msa_ftq_df_mipsel -#define helper_msa_fmin_df helper_msa_fmin_df_mipsel -#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mipsel -#define helper_msa_fmax_df helper_msa_fmax_df_mipsel -#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mipsel -#define helper_msa_fclass_df helper_msa_fclass_df_mipsel -#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mipsel -#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mipsel -#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mipsel -#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mipsel -#define helper_msa_frcp_df helper_msa_frcp_df_mipsel -#define helper_msa_frint_df helper_msa_frint_df_mipsel -#define helper_msa_flog2_df helper_msa_flog2_df_mipsel -#define helper_msa_fexupl_df helper_msa_fexupl_df_mipsel -#define helper_msa_fexupr_df helper_msa_fexupr_df_mipsel -#define helper_msa_ffql_df helper_msa_ffql_df_mipsel -#define helper_msa_ffqr_df helper_msa_ffqr_df_mipsel -#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mipsel -#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mipsel -#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mipsel -#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mipsel -#define helper_paddsb helper_paddsb_mipsel -#define helper_paddusb helper_paddusb_mipsel -#define helper_paddsh helper_paddsh_mipsel -#define helper_paddush helper_paddush_mipsel -#define helper_paddb helper_paddb_mipsel -#define helper_paddh helper_paddh_mipsel -#define helper_paddw helper_paddw_mipsel -#define helper_psubsb helper_psubsb_mipsel -#define helper_psubusb helper_psubusb_mipsel -#define helper_psubsh helper_psubsh_mipsel -#define helper_psubush helper_psubush_mipsel -#define helper_psubb helper_psubb_mipsel -#define helper_psubh helper_psubh_mipsel -#define helper_psubw helper_psubw_mipsel -#define helper_pshufh helper_pshufh_mipsel -#define helper_packsswh helper_packsswh_mipsel -#define helper_packsshb helper_packsshb_mipsel -#define helper_packushb helper_packushb_mipsel -#define helper_punpcklwd helper_punpcklwd_mipsel -#define helper_punpckhwd helper_punpckhwd_mipsel -#define helper_punpcklhw helper_punpcklhw_mipsel -#define helper_punpckhhw helper_punpckhhw_mipsel -#define helper_punpcklbh helper_punpcklbh_mipsel -#define helper_punpckhbh helper_punpckhbh_mipsel -#define helper_pavgh helper_pavgh_mipsel -#define helper_pavgb helper_pavgb_mipsel -#define helper_pmaxsh helper_pmaxsh_mipsel -#define helper_pminsh helper_pminsh_mipsel -#define helper_pmaxub helper_pmaxub_mipsel -#define helper_pminub helper_pminub_mipsel -#define helper_pcmpeqw helper_pcmpeqw_mipsel -#define helper_pcmpgtw helper_pcmpgtw_mipsel -#define helper_pcmpeqh helper_pcmpeqh_mipsel -#define helper_pcmpgth helper_pcmpgth_mipsel -#define helper_pcmpeqb helper_pcmpeqb_mipsel -#define helper_pcmpgtb helper_pcmpgtb_mipsel -#define helper_psllw helper_psllw_mipsel -#define helper_psrlw helper_psrlw_mipsel -#define helper_psraw helper_psraw_mipsel -#define helper_psllh helper_psllh_mipsel -#define helper_psrlh helper_psrlh_mipsel -#define helper_psrah helper_psrah_mipsel -#define helper_pmullh helper_pmullh_mipsel -#define helper_pmulhh helper_pmulhh_mipsel -#define helper_pmulhuh helper_pmulhuh_mipsel -#define helper_pmaddhw helper_pmaddhw_mipsel -#define helper_pasubub helper_pasubub_mipsel -#define helper_biadd helper_biadd_mipsel -#define helper_pmovmskb helper_pmovmskb_mipsel #define helper_absq_s_ph helper_absq_s_ph_mipsel #define helper_absq_s_qb helper_absq_s_qb_mipsel #define helper_absq_s_w helper_absq_s_w_mipsel +#define helper_absq_s_ob helper_absq_s_ob_mipsel +#define helper_absq_s_qh helper_absq_s_qh_mipsel +#define helper_absq_s_pw helper_absq_s_pw_mipsel #define helper_addqh_ph helper_addqh_ph_mipsel #define helper_addqh_r_ph helper_addqh_r_ph_mipsel #define helper_addqh_r_w helper_addqh_r_w_mipsel @@ -3279,35 +1450,89 @@ #define helper_subu_qb helper_subu_qb_mipsel #define helper_subu_s_ph helper_subu_s_ph_mipsel #define helper_subu_s_qb helper_subu_s_qb_mipsel +#define helper_adduh_ob helper_adduh_ob_mipsel +#define helper_adduh_r_ob helper_adduh_r_ob_mipsel +#define helper_subuh_ob helper_subuh_ob_mipsel +#define helper_subuh_r_ob helper_subuh_r_ob_mipsel +#define helper_addq_pw helper_addq_pw_mipsel +#define helper_addq_qh helper_addq_qh_mipsel +#define helper_addq_s_pw helper_addq_s_pw_mipsel +#define helper_addq_s_qh helper_addq_s_qh_mipsel +#define helper_addu_ob helper_addu_ob_mipsel +#define helper_addu_qh helper_addu_qh_mipsel +#define helper_addu_s_ob helper_addu_s_ob_mipsel +#define helper_addu_s_qh helper_addu_s_qh_mipsel +#define helper_subq_pw helper_subq_pw_mipsel +#define helper_subq_qh helper_subq_qh_mipsel +#define helper_subq_s_pw helper_subq_s_pw_mipsel +#define helper_subq_s_qh helper_subq_s_qh_mipsel +#define helper_subu_ob helper_subu_ob_mipsel +#define helper_subu_qh helper_subu_qh_mipsel +#define helper_subu_s_ob helper_subu_s_ob_mipsel +#define helper_subu_s_qh helper_subu_s_qh_mipsel #define helper_subuh_qb helper_subuh_qb_mipsel #define helper_subuh_r_qb helper_subuh_r_qb_mipsel #define helper_addsc helper_addsc_mipsel #define helper_addwc helper_addwc_mipsel #define helper_modsub helper_modsub_mipsel #define helper_raddu_w_qb helper_raddu_w_qb_mipsel +#define helper_raddu_l_ob helper_raddu_l_ob_mipsel #define helper_precr_qb_ph helper_precr_qb_ph_mipsel #define helper_precrq_qb_ph helper_precrq_qb_ph_mipsel #define helper_precr_sra_ph_w helper_precr_sra_ph_w_mipsel #define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mipsel #define helper_precrq_ph_w helper_precrq_ph_w_mipsel #define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mipsel +#define helper_precr_ob_qh helper_precr_ob_qh_mipsel +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mipsel +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mipsel +#define helper_precrq_ob_qh helper_precrq_ob_qh_mipsel +#define helper_precrq_qh_pw helper_precrq_qh_pw_mipsel +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mipsel +#define helper_precrq_pw_l helper_precrq_pw_l_mipsel #define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mipsel +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mipsel +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mipsel +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mipsel +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mipsel +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mipsel #define helper_precequ_ph_qbl helper_precequ_ph_qbl_mipsel #define helper_precequ_ph_qbr helper_precequ_ph_qbr_mipsel #define helper_precequ_ph_qbla helper_precequ_ph_qbla_mipsel #define helper_precequ_ph_qbra helper_precequ_ph_qbra_mipsel +#define helper_precequ_qh_obl helper_precequ_qh_obl_mipsel +#define helper_precequ_qh_obr helper_precequ_qh_obr_mipsel +#define helper_precequ_qh_obla helper_precequ_qh_obla_mipsel +#define helper_precequ_qh_obra helper_precequ_qh_obra_mipsel #define helper_preceu_ph_qbl helper_preceu_ph_qbl_mipsel #define helper_preceu_ph_qbr helper_preceu_ph_qbr_mipsel #define helper_preceu_ph_qbla helper_preceu_ph_qbla_mipsel #define helper_preceu_ph_qbra helper_preceu_ph_qbra_mipsel +#define helper_preceu_qh_obl helper_preceu_qh_obl_mipsel +#define helper_preceu_qh_obr helper_preceu_qh_obr_mipsel +#define helper_preceu_qh_obla helper_preceu_qh_obla_mipsel +#define helper_preceu_qh_obra helper_preceu_qh_obra_mipsel #define helper_shll_qb helper_shll_qb_mipsel #define helper_shrl_qb helper_shrl_qb_mipsel #define helper_shra_qb helper_shra_qb_mipsel #define helper_shra_r_qb helper_shra_r_qb_mipsel +#define helper_shll_ob helper_shll_ob_mipsel +#define helper_shrl_ob helper_shrl_ob_mipsel +#define helper_shra_ob helper_shra_ob_mipsel +#define helper_shra_r_ob helper_shra_r_ob_mipsel #define helper_shll_ph helper_shll_ph_mipsel #define helper_shll_s_ph helper_shll_s_ph_mipsel +#define helper_shll_qh helper_shll_qh_mipsel +#define helper_shll_s_qh helper_shll_s_qh_mipsel +#define helper_shrl_qh helper_shrl_qh_mipsel +#define helper_shra_qh helper_shra_qh_mipsel +#define helper_shra_r_qh helper_shra_r_qh_mipsel #define helper_shll_s_w helper_shll_s_w_mipsel #define helper_shra_r_w helper_shra_r_w_mipsel +#define helper_shll_pw helper_shll_pw_mipsel +#define helper_shll_s_pw helper_shll_s_pw_mipsel +#define helper_shra_pw helper_shra_pw_mipsel +#define helper_shra_r_pw helper_shra_r_pw_mipsel #define helper_shrl_ph helper_shrl_ph_mipsel #define helper_shra_ph helper_shra_ph_mipsel #define helper_shra_r_ph helper_shra_r_ph_mipsel @@ -3321,10 +1546,20 @@ #define helper_muleq_s_w_phr helper_muleq_s_w_phr_mipsel #define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mipsel #define helper_mulsa_w_ph helper_mulsa_w_ph_mipsel +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mipsel +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mipsel +#define helper_mulq_rs_qh helper_mulq_rs_qh_mipsel +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mipsel +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mipsel +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mipsel #define helper_dpau_h_qbl helper_dpau_h_qbl_mipsel #define helper_dpau_h_qbr helper_dpau_h_qbr_mipsel #define helper_dpsu_h_qbl helper_dpsu_h_qbl_mipsel #define helper_dpsu_h_qbr helper_dpsu_h_qbr_mipsel +#define helper_dpau_h_obl helper_dpau_h_obl_mipsel +#define helper_dpau_h_obr helper_dpau_h_obr_mipsel +#define helper_dpsu_h_obl helper_dpsu_h_obl_mipsel +#define helper_dpsu_h_obr helper_dpsu_h_obr_mipsel #define helper_dpa_w_ph helper_dpa_w_ph_mipsel #define helper_dpax_w_ph helper_dpax_w_ph_mipsel #define helper_dps_w_ph helper_dps_w_ph_mipsel @@ -3335,200 +1570,92 @@ #define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mipsel #define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mipsel #define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mipsel +#define helper_dpa_w_qh helper_dpa_w_qh_mipsel +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mipsel +#define helper_dps_w_qh helper_dps_w_qh_mipsel +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mipsel #define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mipsel #define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mipsel +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mipsel +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mipsel +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mipsel #define helper_maq_s_w_phl helper_maq_s_w_phl_mipsel #define helper_maq_s_w_phr helper_maq_s_w_phr_mipsel #define helper_maq_sa_w_phl helper_maq_sa_w_phl_mipsel #define helper_maq_sa_w_phr helper_maq_sa_w_phr_mipsel #define helper_mulq_s_w helper_mulq_s_w_mipsel #define helper_mulq_rs_w helper_mulq_rs_w_mipsel +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mipsel +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mipsel +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mipsel +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mipsel +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mipsel +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mipsel +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mipsel +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mipsel +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mipsel +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mipsel +#define helper_dmadd helper_dmadd_mipsel +#define helper_dmaddu helper_dmaddu_mipsel +#define helper_dmsub helper_dmsub_mipsel +#define helper_dmsubu helper_dmsubu_mipsel #define helper_bitrev helper_bitrev_mipsel #define helper_insv helper_insv_mipsel +#define helper_dinsv helper_dinsv_mipsel #define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mipsel #define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mipsel #define helper_cmpgu_le_qb helper_cmpgu_le_qb_mipsel +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mipsel +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mipsel +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mipsel #define helper_cmpu_eq_qb helper_cmpu_eq_qb_mipsel #define helper_cmpu_lt_qb helper_cmpu_lt_qb_mipsel #define helper_cmpu_le_qb helper_cmpu_le_qb_mipsel #define helper_cmp_eq_ph helper_cmp_eq_ph_mipsel #define helper_cmp_lt_ph helper_cmp_lt_ph_mipsel #define helper_cmp_le_ph helper_cmp_le_ph_mipsel +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mipsel +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mipsel +#define helper_cmpu_le_ob helper_cmpu_le_ob_mipsel +#define helper_cmp_eq_qh helper_cmp_eq_qh_mipsel +#define helper_cmp_lt_qh helper_cmp_lt_qh_mipsel +#define helper_cmp_le_qh helper_cmp_le_qh_mipsel +#define helper_cmp_eq_pw helper_cmp_eq_pw_mipsel +#define helper_cmp_lt_pw helper_cmp_lt_pw_mipsel +#define helper_cmp_le_pw helper_cmp_le_pw_mipsel +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mipsel +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mipsel +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mipsel #define helper_pick_qb helper_pick_qb_mipsel #define helper_pick_ph helper_pick_ph_mipsel +#define helper_pick_ob helper_pick_ob_mipsel +#define helper_pick_qh helper_pick_qh_mipsel +#define helper_pick_pw helper_pick_pw_mipsel #define helper_packrl_ph helper_packrl_ph_mipsel +#define helper_packrl_pw helper_packrl_pw_mipsel #define helper_extr_w helper_extr_w_mipsel #define helper_extr_r_w helper_extr_r_w_mipsel #define helper_extr_rs_w helper_extr_rs_w_mipsel +#define helper_dextr_w helper_dextr_w_mipsel +#define helper_dextr_r_w helper_dextr_r_w_mipsel +#define helper_dextr_rs_w helper_dextr_rs_w_mipsel +#define helper_dextr_l helper_dextr_l_mipsel +#define helper_dextr_r_l helper_dextr_r_l_mipsel +#define helper_dextr_rs_l helper_dextr_rs_l_mipsel #define helper_extr_s_h helper_extr_s_h_mipsel +#define helper_dextr_s_h helper_dextr_s_h_mipsel #define helper_extp helper_extp_mipsel #define helper_extpdp helper_extpdp_mipsel +#define helper_dextp helper_dextp_mipsel +#define helper_dextpdp helper_dextpdp_mipsel #define helper_shilo helper_shilo_mipsel +#define helper_dshilo helper_dshilo_mipsel #define helper_mthlip helper_mthlip_mipsel +#define helper_dmthlip helper_dmthlip_mipsel #define cpu_wrdsp cpu_wrdsp_mipsel #define helper_wrdsp helper_wrdsp_mipsel #define cpu_rddsp cpu_rddsp_mipsel #define helper_rddsp helper_rddsp_mipsel -#define helper_raise_exception_err helper_raise_exception_err_mipsel -#define helper_clo helper_clo_mipsel -#define helper_clz helper_clz_mipsel -#define helper_muls helper_muls_mipsel -#define helper_mulsu helper_mulsu_mipsel -#define helper_macc helper_macc_mipsel -#define helper_macchi helper_macchi_mipsel -#define helper_maccu helper_maccu_mipsel -#define helper_macchiu helper_macchiu_mipsel -#define helper_msac helper_msac_mipsel -#define helper_msachi helper_msachi_mipsel -#define helper_msacu helper_msacu_mipsel -#define helper_msachiu helper_msachiu_mipsel -#define helper_mulhi helper_mulhi_mipsel -#define helper_mulhiu helper_mulhiu_mipsel -#define helper_mulshi helper_mulshi_mipsel -#define helper_mulshiu helper_mulshiu_mipsel -#define helper_bitswap helper_bitswap_mipsel -#define helper_ll helper_ll_mipsel -#define helper_sc helper_sc_mipsel -#define helper_swl helper_swl_mipsel -#define helper_swr helper_swr_mipsel -#define helper_lwm helper_lwm_mipsel -#define helper_swm helper_swm_mipsel -#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mipsel -#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mipsel -#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mipsel -#define helper_mfc0_random helper_mfc0_random_mipsel -#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mipsel -#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mipsel -#define helper_mfc0_tcbind helper_mfc0_tcbind_mipsel -#define helper_mftc0_tcbind helper_mftc0_tcbind_mipsel -#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mipsel -#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mipsel -#define helper_mfc0_tchalt helper_mfc0_tchalt_mipsel -#define helper_mftc0_tchalt helper_mftc0_tchalt_mipsel -#define helper_mfc0_tccontext helper_mfc0_tccontext_mipsel -#define helper_mftc0_tccontext helper_mftc0_tccontext_mipsel -#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mipsel -#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mipsel -#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mipsel -#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mipsel -#define helper_mfc0_count helper_mfc0_count_mipsel -#define helper_mftc0_entryhi helper_mftc0_entryhi_mipsel -#define helper_mftc0_cause helper_mftc0_cause_mipsel -#define helper_mftc0_status helper_mftc0_status_mipsel -#define helper_mfc0_lladdr helper_mfc0_lladdr_mipsel -#define helper_mfc0_watchlo helper_mfc0_watchlo_mipsel -#define helper_mfc0_watchhi helper_mfc0_watchhi_mipsel -#define helper_mfc0_debug helper_mfc0_debug_mipsel -#define helper_mftc0_debug helper_mftc0_debug_mipsel -#define helper_mtc0_index helper_mtc0_index_mipsel -#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mipsel -#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mipsel -#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mipsel -#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mipsel -#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mipsel -#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mipsel -#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mipsel -#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mipsel -#define helper_mtc0_yqmask helper_mtc0_yqmask_mipsel -#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mipsel -#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mipsel -#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mipsel -#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mipsel -#define helper_mtc0_tcbind helper_mtc0_tcbind_mipsel -#define helper_mttc0_tcbind helper_mttc0_tcbind_mipsel -#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mipsel -#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mipsel -#define helper_mtc0_tchalt helper_mtc0_tchalt_mipsel -#define helper_mttc0_tchalt helper_mttc0_tchalt_mipsel -#define helper_mtc0_tccontext helper_mtc0_tccontext_mipsel -#define helper_mttc0_tccontext helper_mttc0_tccontext_mipsel -#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mipsel -#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mipsel -#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mipsel -#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mipsel -#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mipsel -#define helper_mtc0_context helper_mtc0_context_mipsel -#define helper_mtc0_pagemask helper_mtc0_pagemask_mipsel -#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mipsel -#define helper_mtc0_wired helper_mtc0_wired_mipsel -#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mipsel -#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mipsel -#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mipsel -#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mipsel -#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mipsel -#define helper_mtc0_hwrena helper_mtc0_hwrena_mipsel -#define helper_mtc0_count helper_mtc0_count_mipsel -#define helper_mtc0_entryhi helper_mtc0_entryhi_mipsel -#define helper_mttc0_entryhi helper_mttc0_entryhi_mipsel -#define helper_mtc0_compare helper_mtc0_compare_mipsel -#define helper_mtc0_status helper_mtc0_status_mipsel -#define helper_mttc0_status helper_mttc0_status_mipsel -#define helper_mtc0_intctl helper_mtc0_intctl_mipsel -#define helper_mtc0_srsctl helper_mtc0_srsctl_mipsel -#define helper_mtc0_cause helper_mtc0_cause_mipsel -#define helper_mttc0_cause helper_mttc0_cause_mipsel -#define helper_mftc0_epc helper_mftc0_epc_mipsel -#define helper_mftc0_ebase helper_mftc0_ebase_mipsel -#define helper_mtc0_ebase helper_mtc0_ebase_mipsel -#define helper_mttc0_ebase helper_mttc0_ebase_mipsel -#define helper_mftc0_configx helper_mftc0_configx_mipsel -#define helper_mtc0_config0 helper_mtc0_config0_mipsel -#define helper_mtc0_config2 helper_mtc0_config2_mipsel -#define helper_mtc0_config4 helper_mtc0_config4_mipsel -#define helper_mtc0_config5 helper_mtc0_config5_mipsel -#define helper_mtc0_lladdr helper_mtc0_lladdr_mipsel -#define helper_mtc0_watchlo helper_mtc0_watchlo_mipsel -#define helper_mtc0_watchhi helper_mtc0_watchhi_mipsel -#define helper_mtc0_xcontext helper_mtc0_xcontext_mipsel -#define helper_mtc0_framemask helper_mtc0_framemask_mipsel -#define helper_mtc0_debug helper_mtc0_debug_mipsel -#define helper_mttc0_debug helper_mttc0_debug_mipsel -#define helper_mtc0_performance0 helper_mtc0_performance0_mipsel -#define helper_mtc0_taglo helper_mtc0_taglo_mipsel -#define helper_mtc0_datalo helper_mtc0_datalo_mipsel -#define helper_mtc0_taghi helper_mtc0_taghi_mipsel -#define helper_mtc0_datahi helper_mtc0_datahi_mipsel -#define helper_mftgpr helper_mftgpr_mipsel -#define helper_mftlo helper_mftlo_mipsel -#define helper_mfthi helper_mfthi_mipsel -#define helper_mftacx helper_mftacx_mipsel -#define helper_mftdsp helper_mftdsp_mipsel -#define helper_mttgpr helper_mttgpr_mipsel -#define helper_mttlo helper_mttlo_mipsel -#define helper_mtthi helper_mtthi_mipsel -#define helper_mttacx helper_mttacx_mipsel -#define helper_mttdsp helper_mttdsp_mipsel -#define helper_dmt helper_dmt_mipsel -#define helper_emt helper_emt_mipsel -#define helper_dvpe helper_dvpe_mipsel -#define helper_evpe helper_evpe_mipsel -#define helper_fork helper_fork_mipsel -#define helper_yield helper_yield_mipsel -#define r4k_helper_tlbinv r4k_helper_tlbinv_mipsel -#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mipsel -#define r4k_helper_tlbwi r4k_helper_tlbwi_mipsel -#define r4k_helper_tlbwr r4k_helper_tlbwr_mipsel -#define r4k_helper_tlbp r4k_helper_tlbp_mipsel -#define r4k_helper_tlbr r4k_helper_tlbr_mipsel -#define helper_tlbwi helper_tlbwi_mipsel -#define helper_tlbwr helper_tlbwr_mipsel -#define helper_tlbp helper_tlbp_mipsel -#define helper_tlbr helper_tlbr_mipsel -#define helper_tlbinv helper_tlbinv_mipsel -#define helper_tlbinvf helper_tlbinvf_mipsel -#define helper_di helper_di_mipsel -#define helper_ei helper_ei_mipsel -#define helper_eret helper_eret_mipsel -#define helper_deret helper_deret_mipsel -#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mipsel -#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mipsel -#define helper_rdhwr_cc helper_rdhwr_cc_mipsel -#define helper_rdhwr_ccres helper_rdhwr_ccres_mipsel -#define helper_pmon helper_pmon_mipsel -#define helper_wait helper_wait_mipsel -#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mipsel -#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mipsel -#define ieee_rm ieee_rm_mipsel #define helper_cfc1 helper_cfc1_mipsel #define helper_ctc1 helper_ctc1_mipsel #define ieee_ex_to_mips ieee_ex_to_mips_mipsel @@ -3537,8 +1664,8 @@ #define helper_float_cvtd_s helper_float_cvtd_s_mipsel #define helper_float_cvtd_w helper_float_cvtd_w_mipsel #define helper_float_cvtd_l helper_float_cvtd_l_mipsel -#define helper_float_cvtl_d helper_float_cvtl_d_mipsel -#define helper_float_cvtl_s helper_float_cvtl_s_mipsel +#define helper_float_cvt_l_d helper_float_cvt_l_d_mipsel +#define helper_float_cvt_l_s helper_float_cvt_l_s_mipsel #define helper_float_cvtps_pw helper_float_cvtps_pw_mipsel #define helper_float_cvtpw_ps helper_float_cvtpw_ps_mipsel #define helper_float_cvts_d helper_float_cvts_d_mipsel @@ -3546,46 +1673,50 @@ #define helper_float_cvts_l helper_float_cvts_l_mipsel #define helper_float_cvts_pl helper_float_cvts_pl_mipsel #define helper_float_cvts_pu helper_float_cvts_pu_mipsel -#define helper_float_cvtw_s helper_float_cvtw_s_mipsel -#define helper_float_cvtw_d helper_float_cvtw_d_mipsel -#define helper_float_roundl_d helper_float_roundl_d_mipsel -#define helper_float_roundl_s helper_float_roundl_s_mipsel -#define helper_float_roundw_d helper_float_roundw_d_mipsel -#define helper_float_roundw_s helper_float_roundw_s_mipsel -#define helper_float_truncl_d helper_float_truncl_d_mipsel -#define helper_float_truncl_s helper_float_truncl_s_mipsel -#define helper_float_truncw_d helper_float_truncw_d_mipsel -#define helper_float_truncw_s helper_float_truncw_s_mipsel -#define helper_float_ceill_d helper_float_ceill_d_mipsel -#define helper_float_ceill_s helper_float_ceill_s_mipsel -#define helper_float_ceilw_d helper_float_ceilw_d_mipsel -#define helper_float_ceilw_s helper_float_ceilw_s_mipsel -#define helper_float_floorl_d helper_float_floorl_d_mipsel -#define helper_float_floorl_s helper_float_floorl_s_mipsel -#define helper_float_floorw_d helper_float_floorw_d_mipsel -#define helper_float_floorw_s helper_float_floorw_s_mipsel +#define helper_float_cvt_w_s helper_float_cvt_w_s_mipsel +#define helper_float_cvt_w_d helper_float_cvt_w_d_mipsel +#define helper_float_round_l_d helper_float_round_l_d_mipsel +#define helper_float_round_l_s helper_float_round_l_s_mipsel +#define helper_float_round_w_d helper_float_round_w_d_mipsel +#define helper_float_round_w_s helper_float_round_w_s_mipsel +#define helper_float_trunc_l_d helper_float_trunc_l_d_mipsel +#define helper_float_trunc_l_s helper_float_trunc_l_s_mipsel +#define helper_float_trunc_w_d helper_float_trunc_w_d_mipsel +#define helper_float_trunc_w_s helper_float_trunc_w_s_mipsel +#define helper_float_ceil_l_d helper_float_ceil_l_d_mipsel +#define helper_float_ceil_l_s helper_float_ceil_l_s_mipsel +#define helper_float_ceil_w_d helper_float_ceil_w_d_mipsel +#define helper_float_ceil_w_s helper_float_ceil_w_s_mipsel +#define helper_float_floor_l_d helper_float_floor_l_d_mipsel +#define helper_float_floor_l_s helper_float_floor_l_s_mipsel +#define helper_float_floor_w_d helper_float_floor_w_d_mipsel +#define helper_float_floor_w_s helper_float_floor_w_s_mipsel +#define helper_float_cvt_2008_l_d helper_float_cvt_2008_l_d_mipsel +#define helper_float_cvt_2008_l_s helper_float_cvt_2008_l_s_mipsel +#define helper_float_cvt_2008_w_d helper_float_cvt_2008_w_d_mipsel +#define helper_float_cvt_2008_w_s helper_float_cvt_2008_w_s_mipsel +#define helper_float_round_2008_l_d helper_float_round_2008_l_d_mipsel +#define helper_float_round_2008_l_s helper_float_round_2008_l_s_mipsel +#define helper_float_round_2008_w_d helper_float_round_2008_w_d_mipsel +#define helper_float_round_2008_w_s helper_float_round_2008_w_s_mipsel +#define helper_float_trunc_2008_l_d helper_float_trunc_2008_l_d_mipsel +#define helper_float_trunc_2008_l_s helper_float_trunc_2008_l_s_mipsel +#define helper_float_trunc_2008_w_d helper_float_trunc_2008_w_d_mipsel +#define helper_float_trunc_2008_w_s helper_float_trunc_2008_w_s_mipsel +#define helper_float_ceil_2008_l_d helper_float_ceil_2008_l_d_mipsel +#define helper_float_ceil_2008_l_s helper_float_ceil_2008_l_s_mipsel +#define helper_float_ceil_2008_w_d helper_float_ceil_2008_w_d_mipsel +#define helper_float_ceil_2008_w_s helper_float_ceil_2008_w_s_mipsel +#define helper_float_floor_2008_l_d helper_float_floor_2008_l_d_mipsel +#define helper_float_floor_2008_l_s helper_float_floor_2008_l_s_mipsel +#define helper_float_floor_2008_w_d helper_float_floor_2008_w_d_mipsel +#define helper_float_floor_2008_w_s helper_float_floor_2008_w_s_mipsel #define helper_float_abs_d helper_float_abs_d_mipsel #define helper_float_abs_s helper_float_abs_s_mipsel #define helper_float_abs_ps helper_float_abs_ps_mipsel #define helper_float_chs_d helper_float_chs_d_mipsel #define helper_float_chs_s helper_float_chs_s_mipsel #define helper_float_chs_ps helper_float_chs_ps_mipsel -#define helper_float_maddf_s helper_float_maddf_s_mipsel -#define helper_float_maddf_d helper_float_maddf_d_mipsel -#define helper_float_msubf_s helper_float_msubf_s_mipsel -#define helper_float_msubf_d helper_float_msubf_d_mipsel -#define helper_float_max_s helper_float_max_s_mipsel -#define helper_float_max_d helper_float_max_d_mipsel -#define helper_float_maxa_s helper_float_maxa_s_mipsel -#define helper_float_maxa_d helper_float_maxa_d_mipsel -#define helper_float_min_s helper_float_min_s_mipsel -#define helper_float_min_d helper_float_min_d_mipsel -#define helper_float_mina_s helper_float_mina_s_mipsel -#define helper_float_mina_d helper_float_mina_d_mipsel -#define helper_float_rint_s helper_float_rint_s_mipsel -#define helper_float_rint_d helper_float_rint_d_mipsel -#define helper_float_class_s helper_float_class_s_mipsel -#define helper_float_class_d helper_float_class_d_mipsel #define helper_float_recip_d helper_float_recip_d_mipsel #define helper_float_recip_s helper_float_recip_s_mipsel #define helper_float_rsqrt_d helper_float_rsqrt_d_mipsel @@ -3596,6 +1727,12 @@ #define helper_float_rsqrt1_d helper_float_rsqrt1_d_mipsel #define helper_float_rsqrt1_s helper_float_rsqrt1_s_mipsel #define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mipsel +#define helper_float_rint_s helper_float_rint_s_mipsel +#define helper_float_rint_d helper_float_rint_d_mipsel +#define float_class_s float_class_s_mipsel +#define helper_float_class_s helper_float_class_s_mipsel +#define float_class_d float_class_d_mipsel +#define helper_float_class_d helper_float_class_d_mipsel #define helper_float_add_d helper_float_add_d_mipsel #define helper_float_add_s helper_float_add_s_mipsel #define helper_float_add_ps helper_float_add_ps_mipsel @@ -3608,6 +1745,22 @@ #define helper_float_div_d helper_float_div_d_mipsel #define helper_float_div_s helper_float_div_s_mipsel #define helper_float_div_ps helper_float_div_ps_mipsel +#define helper_float_recip2_d helper_float_recip2_d_mipsel +#define helper_float_recip2_s helper_float_recip2_s_mipsel +#define helper_float_recip2_ps helper_float_recip2_ps_mipsel +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mipsel +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mipsel +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mipsel +#define helper_float_addr_ps helper_float_addr_ps_mipsel +#define helper_float_mulr_ps helper_float_mulr_ps_mipsel +#define helper_float_max_s helper_float_max_s_mipsel +#define helper_float_max_d helper_float_max_d_mipsel +#define helper_float_maxa_s helper_float_maxa_s_mipsel +#define helper_float_maxa_d helper_float_maxa_d_mipsel +#define helper_float_min_s helper_float_min_s_mipsel +#define helper_float_min_d helper_float_min_d_mipsel +#define helper_float_mina_s helper_float_mina_s_mipsel +#define helper_float_mina_d helper_float_mina_d_mipsel #define helper_float_madd_d helper_float_madd_d_mipsel #define helper_float_madd_s helper_float_madd_s_mipsel #define helper_float_madd_ps helper_float_madd_ps_mipsel @@ -3620,14 +1773,10 @@ #define helper_float_nmsub_d helper_float_nmsub_d_mipsel #define helper_float_nmsub_s helper_float_nmsub_s_mipsel #define helper_float_nmsub_ps helper_float_nmsub_ps_mipsel -#define helper_float_recip2_d helper_float_recip2_d_mipsel -#define helper_float_recip2_s helper_float_recip2_s_mipsel -#define helper_float_recip2_ps helper_float_recip2_ps_mipsel -#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mipsel -#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mipsel -#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mipsel -#define helper_float_addr_ps helper_float_addr_ps_mipsel -#define helper_float_mulr_ps helper_float_mulr_ps_mipsel +#define helper_float_maddf_s helper_float_maddf_s_mipsel +#define helper_float_maddf_d helper_float_maddf_d_mipsel +#define helper_float_msubf_s helper_float_msubf_s_mipsel +#define helper_float_msubf_d helper_float_msubf_d_mipsel #define helper_cmp_d_f helper_cmp_d_f_mipsel #define helper_cmpabs_d_f helper_cmpabs_d_f_mipsel #define helper_cmp_d_un helper_cmp_d_un_mipsel @@ -3768,161 +1917,475 @@ #define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mipsel #define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mipsel #define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mipsel -#define helper_msa_ld_df helper_msa_ld_df_mipsel -#define helper_msa_st_df helper_msa_st_df_mipsel #define no_mmu_map_address no_mmu_map_address_mipsel #define fixed_mmu_map_address fixed_mmu_map_address_mipsel #define r4k_map_address r4k_map_address_mipsel +#define cpu_mips_tlb_flush cpu_mips_tlb_flush_mipsel +#define sync_c0_status sync_c0_status_mipsel +#define cpu_mips_store_status cpu_mips_store_status_mipsel +#define cpu_mips_store_cause cpu_mips_store_cause_mipsel #define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mipsel -#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mipsel +#define mips_cpu_tlb_fill mips_cpu_tlb_fill_mipsel #define cpu_mips_translate_address cpu_mips_translate_address_mipsel #define exception_resume_pc exception_resume_pc_mipsel #define mips_cpu_do_interrupt mips_cpu_do_interrupt_mipsel #define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mipsel #define r4k_invalidate_tlb r4k_invalidate_tlb_mipsel -#define helper_absq_s_ob helper_absq_s_ob_mipsel -#define helper_absq_s_qh helper_absq_s_qh_mipsel -#define helper_absq_s_pw helper_absq_s_pw_mipsel -#define helper_adduh_ob helper_adduh_ob_mipsel -#define helper_adduh_r_ob helper_adduh_r_ob_mipsel -#define helper_subuh_ob helper_subuh_ob_mipsel -#define helper_subuh_r_ob helper_subuh_r_ob_mipsel -#define helper_addq_pw helper_addq_pw_mipsel -#define helper_addq_qh helper_addq_qh_mipsel -#define helper_addq_s_pw helper_addq_s_pw_mipsel -#define helper_addq_s_qh helper_addq_s_qh_mipsel -#define helper_addu_ob helper_addu_ob_mipsel -#define helper_addu_qh helper_addu_qh_mipsel -#define helper_addu_s_ob helper_addu_s_ob_mipsel -#define helper_addu_s_qh helper_addu_s_qh_mipsel -#define helper_subq_pw helper_subq_pw_mipsel -#define helper_subq_qh helper_subq_qh_mipsel -#define helper_subq_s_pw helper_subq_s_pw_mipsel -#define helper_subq_s_qh helper_subq_s_qh_mipsel -#define helper_subu_ob helper_subu_ob_mipsel -#define helper_subu_qh helper_subu_qh_mipsel -#define helper_subu_s_ob helper_subu_s_ob_mipsel -#define helper_subu_s_qh helper_subu_s_qh_mipsel -#define helper_raddu_l_ob helper_raddu_l_ob_mipsel -#define helper_precr_ob_qh helper_precr_ob_qh_mipsel -#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mipsel -#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mipsel -#define helper_precrq_ob_qh helper_precrq_ob_qh_mipsel -#define helper_precrq_qh_pw helper_precrq_qh_pw_mipsel -#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mipsel -#define helper_precrq_pw_l helper_precrq_pw_l_mipsel -#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mipsel -#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mipsel -#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mipsel -#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mipsel -#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mipsel -#define helper_precequ_qh_obl helper_precequ_qh_obl_mipsel -#define helper_precequ_qh_obr helper_precequ_qh_obr_mipsel -#define helper_precequ_qh_obla helper_precequ_qh_obla_mipsel -#define helper_precequ_qh_obra helper_precequ_qh_obra_mipsel -#define helper_preceu_qh_obl helper_preceu_qh_obl_mipsel -#define helper_preceu_qh_obr helper_preceu_qh_obr_mipsel -#define helper_preceu_qh_obla helper_preceu_qh_obla_mipsel -#define helper_preceu_qh_obra helper_preceu_qh_obra_mipsel -#define helper_shll_ob helper_shll_ob_mipsel -#define helper_shrl_ob helper_shrl_ob_mipsel -#define helper_shra_ob helper_shra_ob_mipsel -#define helper_shra_r_ob helper_shra_r_ob_mipsel -#define helper_shll_qh helper_shll_qh_mipsel -#define helper_shll_s_qh helper_shll_s_qh_mipsel -#define helper_shrl_qh helper_shrl_qh_mipsel -#define helper_shra_qh helper_shra_qh_mipsel -#define helper_shra_r_qh helper_shra_r_qh_mipsel -#define helper_shll_pw helper_shll_pw_mipsel -#define helper_shll_s_pw helper_shll_s_pw_mipsel -#define helper_shra_pw helper_shra_pw_mipsel -#define helper_shra_r_pw helper_shra_r_pw_mipsel -#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mipsel -#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mipsel -#define helper_mulq_rs_qh helper_mulq_rs_qh_mipsel -#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mipsel -#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mipsel -#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mipsel -#define helper_dpau_h_obl helper_dpau_h_obl_mipsel -#define helper_dpau_h_obr helper_dpau_h_obr_mipsel -#define helper_dpsu_h_obl helper_dpsu_h_obl_mipsel -#define helper_dpsu_h_obr helper_dpsu_h_obr_mipsel -#define helper_dpa_w_qh helper_dpa_w_qh_mipsel -#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mipsel -#define helper_dps_w_qh helper_dps_w_qh_mipsel -#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mipsel -#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mipsel -#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mipsel -#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mipsel -#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mipsel -#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mipsel -#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mipsel -#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mipsel -#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mipsel -#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mipsel -#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mipsel -#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mipsel -#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mipsel -#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mipsel -#define helper_dmadd helper_dmadd_mipsel -#define helper_dmaddu helper_dmaddu_mipsel -#define helper_dmsub helper_dmsub_mipsel -#define helper_dmsubu helper_dmsubu_mipsel -#define helper_dinsv helper_dinsv_mipsel -#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mipsel -#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mipsel -#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mipsel -#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mipsel -#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mipsel -#define helper_cmpu_le_ob helper_cmpu_le_ob_mipsel -#define helper_cmp_eq_qh helper_cmp_eq_qh_mipsel -#define helper_cmp_lt_qh helper_cmp_lt_qh_mipsel -#define helper_cmp_le_qh helper_cmp_le_qh_mipsel -#define helper_cmp_eq_pw helper_cmp_eq_pw_mipsel -#define helper_cmp_lt_pw helper_cmp_lt_pw_mipsel -#define helper_cmp_le_pw helper_cmp_le_pw_mipsel -#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mipsel -#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mipsel -#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mipsel -#define helper_pick_ob helper_pick_ob_mipsel -#define helper_pick_qh helper_pick_qh_mipsel -#define helper_pick_pw helper_pick_pw_mipsel -#define helper_packrl_pw helper_packrl_pw_mipsel -#define helper_dextr_w helper_dextr_w_mipsel -#define helper_dextr_r_w helper_dextr_r_w_mipsel -#define helper_dextr_rs_w helper_dextr_rs_w_mipsel -#define helper_dextr_l helper_dextr_l_mipsel -#define helper_dextr_r_l helper_dextr_r_l_mipsel -#define helper_dextr_rs_l helper_dextr_rs_l_mipsel -#define helper_dextr_s_h helper_dextr_s_h_mipsel -#define helper_dextp helper_dextp_mipsel -#define helper_dextpdp helper_dextpdp_mipsel -#define helper_dshilo helper_dshilo_mipsel -#define helper_dmthlip helper_dmthlip_mipsel -#define helper_dclo helper_dclo_mipsel -#define helper_dclz helper_dclz_mipsel +#define do_raise_exception_err do_raise_exception_err_mipsel +#define helper_paddsb helper_paddsb_mipsel +#define helper_paddusb helper_paddusb_mipsel +#define helper_paddsh helper_paddsh_mipsel +#define helper_paddush helper_paddush_mipsel +#define helper_paddb helper_paddb_mipsel +#define helper_paddh helper_paddh_mipsel +#define helper_paddw helper_paddw_mipsel +#define helper_psubsb helper_psubsb_mipsel +#define helper_psubusb helper_psubusb_mipsel +#define helper_psubsh helper_psubsh_mipsel +#define helper_psubush helper_psubush_mipsel +#define helper_psubb helper_psubb_mipsel +#define helper_psubh helper_psubh_mipsel +#define helper_psubw helper_psubw_mipsel +#define helper_pshufh helper_pshufh_mipsel +#define helper_packsswh helper_packsswh_mipsel +#define helper_packsshb helper_packsshb_mipsel +#define helper_packushb helper_packushb_mipsel +#define helper_punpcklwd helper_punpcklwd_mipsel +#define helper_punpckhwd helper_punpckhwd_mipsel +#define helper_punpcklhw helper_punpcklhw_mipsel +#define helper_punpckhhw helper_punpckhhw_mipsel +#define helper_punpcklbh helper_punpcklbh_mipsel +#define helper_punpckhbh helper_punpckhbh_mipsel +#define helper_pavgh helper_pavgh_mipsel +#define helper_pavgb helper_pavgb_mipsel +#define helper_pmaxsh helper_pmaxsh_mipsel +#define helper_pminsh helper_pminsh_mipsel +#define helper_pmaxub helper_pmaxub_mipsel +#define helper_pminub helper_pminub_mipsel +#define helper_pcmpeqw helper_pcmpeqw_mipsel +#define helper_pcmpgtw helper_pcmpgtw_mipsel +#define helper_pcmpeqh helper_pcmpeqh_mipsel +#define helper_pcmpgth helper_pcmpgth_mipsel +#define helper_pcmpeqb helper_pcmpeqb_mipsel +#define helper_pcmpgtb helper_pcmpgtb_mipsel +#define helper_psllw helper_psllw_mipsel +#define helper_psrlw helper_psrlw_mipsel +#define helper_psraw helper_psraw_mipsel +#define helper_psllh helper_psllh_mipsel +#define helper_psrlh helper_psrlh_mipsel +#define helper_psrah helper_psrah_mipsel +#define helper_pmullh helper_pmullh_mipsel +#define helper_pmulhh helper_pmulhh_mipsel +#define helper_pmulhuh helper_pmulhuh_mipsel +#define helper_pmaddhw helper_pmaddhw_mipsel +#define helper_pasubub helper_pasubub_mipsel +#define helper_biadd helper_biadd_mipsel +#define helper_pmovmskb helper_pmovmskb_mipsel +#define helper_msa_nloc_b helper_msa_nloc_b_mipsel +#define helper_msa_nloc_h helper_msa_nloc_h_mipsel +#define helper_msa_nloc_w helper_msa_nloc_w_mipsel +#define helper_msa_nloc_d helper_msa_nloc_d_mipsel +#define helper_msa_nlzc_b helper_msa_nlzc_b_mipsel +#define helper_msa_nlzc_h helper_msa_nlzc_h_mipsel +#define helper_msa_nlzc_w helper_msa_nlzc_w_mipsel +#define helper_msa_nlzc_d helper_msa_nlzc_d_mipsel +#define helper_msa_pcnt_b helper_msa_pcnt_b_mipsel +#define helper_msa_pcnt_h helper_msa_pcnt_h_mipsel +#define helper_msa_pcnt_w helper_msa_pcnt_w_mipsel +#define helper_msa_pcnt_d helper_msa_pcnt_d_mipsel +#define helper_msa_binsl_b helper_msa_binsl_b_mipsel +#define helper_msa_binsl_h helper_msa_binsl_h_mipsel +#define helper_msa_binsl_w helper_msa_binsl_w_mipsel +#define helper_msa_binsl_d helper_msa_binsl_d_mipsel +#define helper_msa_binsr_b helper_msa_binsr_b_mipsel +#define helper_msa_binsr_h helper_msa_binsr_h_mipsel +#define helper_msa_binsr_w helper_msa_binsr_w_mipsel +#define helper_msa_binsr_d helper_msa_binsr_d_mipsel +#define helper_msa_bmnz_v helper_msa_bmnz_v_mipsel +#define helper_msa_bmz_v helper_msa_bmz_v_mipsel +#define helper_msa_bsel_v helper_msa_bsel_v_mipsel +#define helper_msa_bclr_b helper_msa_bclr_b_mipsel +#define helper_msa_bclr_h helper_msa_bclr_h_mipsel +#define helper_msa_bclr_w helper_msa_bclr_w_mipsel +#define helper_msa_bclr_d helper_msa_bclr_d_mipsel +#define helper_msa_bneg_b helper_msa_bneg_b_mipsel +#define helper_msa_bneg_h helper_msa_bneg_h_mipsel +#define helper_msa_bneg_w helper_msa_bneg_w_mipsel +#define helper_msa_bneg_d helper_msa_bneg_d_mipsel +#define helper_msa_bset_b helper_msa_bset_b_mipsel +#define helper_msa_bset_h helper_msa_bset_h_mipsel +#define helper_msa_bset_w helper_msa_bset_w_mipsel +#define helper_msa_bset_d helper_msa_bset_d_mipsel +#define helper_msa_add_a_b helper_msa_add_a_b_mipsel +#define helper_msa_add_a_h helper_msa_add_a_h_mipsel +#define helper_msa_add_a_w helper_msa_add_a_w_mipsel +#define helper_msa_add_a_d helper_msa_add_a_d_mipsel +#define helper_msa_adds_a_b helper_msa_adds_a_b_mipsel +#define helper_msa_adds_a_h helper_msa_adds_a_h_mipsel +#define helper_msa_adds_a_w helper_msa_adds_a_w_mipsel +#define helper_msa_adds_a_d helper_msa_adds_a_d_mipsel +#define helper_msa_adds_s_b helper_msa_adds_s_b_mipsel +#define helper_msa_adds_s_h helper_msa_adds_s_h_mipsel +#define helper_msa_adds_s_w helper_msa_adds_s_w_mipsel +#define helper_msa_adds_s_d helper_msa_adds_s_d_mipsel +#define helper_msa_adds_u_b helper_msa_adds_u_b_mipsel +#define helper_msa_adds_u_h helper_msa_adds_u_h_mipsel +#define helper_msa_adds_u_w helper_msa_adds_u_w_mipsel +#define helper_msa_adds_u_d helper_msa_adds_u_d_mipsel +#define helper_msa_addv_b helper_msa_addv_b_mipsel +#define helper_msa_addv_h helper_msa_addv_h_mipsel +#define helper_msa_addv_w helper_msa_addv_w_mipsel +#define helper_msa_addv_d helper_msa_addv_d_mipsel +#define helper_msa_hadd_s_h helper_msa_hadd_s_h_mipsel +#define helper_msa_hadd_s_w helper_msa_hadd_s_w_mipsel +#define helper_msa_hadd_s_d helper_msa_hadd_s_d_mipsel +#define helper_msa_hadd_u_h helper_msa_hadd_u_h_mipsel +#define helper_msa_hadd_u_w helper_msa_hadd_u_w_mipsel +#define helper_msa_hadd_u_d helper_msa_hadd_u_d_mipsel +#define helper_msa_ave_s_b helper_msa_ave_s_b_mipsel +#define helper_msa_ave_s_h helper_msa_ave_s_h_mipsel +#define helper_msa_ave_s_w helper_msa_ave_s_w_mipsel +#define helper_msa_ave_s_d helper_msa_ave_s_d_mipsel +#define helper_msa_ave_u_b helper_msa_ave_u_b_mipsel +#define helper_msa_ave_u_h helper_msa_ave_u_h_mipsel +#define helper_msa_ave_u_w helper_msa_ave_u_w_mipsel +#define helper_msa_ave_u_d helper_msa_ave_u_d_mipsel +#define helper_msa_aver_s_b helper_msa_aver_s_b_mipsel +#define helper_msa_aver_s_h helper_msa_aver_s_h_mipsel +#define helper_msa_aver_s_w helper_msa_aver_s_w_mipsel +#define helper_msa_aver_s_d helper_msa_aver_s_d_mipsel +#define helper_msa_aver_u_b helper_msa_aver_u_b_mipsel +#define helper_msa_aver_u_h helper_msa_aver_u_h_mipsel +#define helper_msa_aver_u_w helper_msa_aver_u_w_mipsel +#define helper_msa_aver_u_d helper_msa_aver_u_d_mipsel +#define helper_msa_ceq_b helper_msa_ceq_b_mipsel +#define helper_msa_ceq_h helper_msa_ceq_h_mipsel +#define helper_msa_ceq_w helper_msa_ceq_w_mipsel +#define helper_msa_ceq_d helper_msa_ceq_d_mipsel +#define helper_msa_cle_s_b helper_msa_cle_s_b_mipsel +#define helper_msa_cle_s_h helper_msa_cle_s_h_mipsel +#define helper_msa_cle_s_w helper_msa_cle_s_w_mipsel +#define helper_msa_cle_s_d helper_msa_cle_s_d_mipsel +#define helper_msa_cle_u_b helper_msa_cle_u_b_mipsel +#define helper_msa_cle_u_h helper_msa_cle_u_h_mipsel +#define helper_msa_cle_u_w helper_msa_cle_u_w_mipsel +#define helper_msa_cle_u_d helper_msa_cle_u_d_mipsel +#define helper_msa_clt_s_b helper_msa_clt_s_b_mipsel +#define helper_msa_clt_s_h helper_msa_clt_s_h_mipsel +#define helper_msa_clt_s_w helper_msa_clt_s_w_mipsel +#define helper_msa_clt_s_d helper_msa_clt_s_d_mipsel +#define helper_msa_clt_u_b helper_msa_clt_u_b_mipsel +#define helper_msa_clt_u_h helper_msa_clt_u_h_mipsel +#define helper_msa_clt_u_w helper_msa_clt_u_w_mipsel +#define helper_msa_clt_u_d helper_msa_clt_u_d_mipsel +#define helper_msa_div_s_b helper_msa_div_s_b_mipsel +#define helper_msa_div_s_h helper_msa_div_s_h_mipsel +#define helper_msa_div_s_w helper_msa_div_s_w_mipsel +#define helper_msa_div_s_d helper_msa_div_s_d_mipsel +#define helper_msa_div_u_b helper_msa_div_u_b_mipsel +#define helper_msa_div_u_h helper_msa_div_u_h_mipsel +#define helper_msa_div_u_w helper_msa_div_u_w_mipsel +#define helper_msa_div_u_d helper_msa_div_u_d_mipsel +#define helper_msa_max_a_b helper_msa_max_a_b_mipsel +#define helper_msa_max_a_h helper_msa_max_a_h_mipsel +#define helper_msa_max_a_w helper_msa_max_a_w_mipsel +#define helper_msa_max_a_d helper_msa_max_a_d_mipsel +#define helper_msa_max_s_b helper_msa_max_s_b_mipsel +#define helper_msa_max_s_h helper_msa_max_s_h_mipsel +#define helper_msa_max_s_w helper_msa_max_s_w_mipsel +#define helper_msa_max_s_d helper_msa_max_s_d_mipsel +#define helper_msa_max_u_b helper_msa_max_u_b_mipsel +#define helper_msa_max_u_h helper_msa_max_u_h_mipsel +#define helper_msa_max_u_w helper_msa_max_u_w_mipsel +#define helper_msa_max_u_d helper_msa_max_u_d_mipsel +#define helper_msa_min_a_b helper_msa_min_a_b_mipsel +#define helper_msa_min_a_h helper_msa_min_a_h_mipsel +#define helper_msa_min_a_w helper_msa_min_a_w_mipsel +#define helper_msa_min_a_d helper_msa_min_a_d_mipsel +#define helper_msa_min_s_b helper_msa_min_s_b_mipsel +#define helper_msa_min_s_h helper_msa_min_s_h_mipsel +#define helper_msa_min_s_w helper_msa_min_s_w_mipsel +#define helper_msa_min_s_d helper_msa_min_s_d_mipsel +#define helper_msa_min_u_b helper_msa_min_u_b_mipsel +#define helper_msa_min_u_h helper_msa_min_u_h_mipsel +#define helper_msa_min_u_w helper_msa_min_u_w_mipsel +#define helper_msa_min_u_d helper_msa_min_u_d_mipsel +#define helper_msa_mod_s_b helper_msa_mod_s_b_mipsel +#define helper_msa_mod_s_h helper_msa_mod_s_h_mipsel +#define helper_msa_mod_s_w helper_msa_mod_s_w_mipsel +#define helper_msa_mod_s_d helper_msa_mod_s_d_mipsel +#define helper_msa_mod_u_b helper_msa_mod_u_b_mipsel +#define helper_msa_mod_u_h helper_msa_mod_u_h_mipsel +#define helper_msa_mod_u_w helper_msa_mod_u_w_mipsel +#define helper_msa_mod_u_d helper_msa_mod_u_d_mipsel +#define helper_msa_asub_s_b helper_msa_asub_s_b_mipsel +#define helper_msa_asub_s_h helper_msa_asub_s_h_mipsel +#define helper_msa_asub_s_w helper_msa_asub_s_w_mipsel +#define helper_msa_asub_s_d helper_msa_asub_s_d_mipsel +#define helper_msa_asub_u_b helper_msa_asub_u_b_mipsel +#define helper_msa_asub_u_h helper_msa_asub_u_h_mipsel +#define helper_msa_asub_u_w helper_msa_asub_u_w_mipsel +#define helper_msa_asub_u_d helper_msa_asub_u_d_mipsel +#define helper_msa_hsub_s_h helper_msa_hsub_s_h_mipsel +#define helper_msa_hsub_s_w helper_msa_hsub_s_w_mipsel +#define helper_msa_hsub_s_d helper_msa_hsub_s_d_mipsel +#define helper_msa_hsub_u_h helper_msa_hsub_u_h_mipsel +#define helper_msa_hsub_u_w helper_msa_hsub_u_w_mipsel +#define helper_msa_hsub_u_d helper_msa_hsub_u_d_mipsel +#define helper_msa_ilvev_b helper_msa_ilvev_b_mipsel +#define helper_msa_ilvev_h helper_msa_ilvev_h_mipsel +#define helper_msa_ilvev_w helper_msa_ilvev_w_mipsel +#define helper_msa_ilvev_d helper_msa_ilvev_d_mipsel +#define helper_msa_ilvod_b helper_msa_ilvod_b_mipsel +#define helper_msa_ilvod_h helper_msa_ilvod_h_mipsel +#define helper_msa_ilvod_w helper_msa_ilvod_w_mipsel +#define helper_msa_ilvod_d helper_msa_ilvod_d_mipsel +#define helper_msa_ilvl_b helper_msa_ilvl_b_mipsel +#define helper_msa_ilvl_h helper_msa_ilvl_h_mipsel +#define helper_msa_ilvl_w helper_msa_ilvl_w_mipsel +#define helper_msa_ilvl_d helper_msa_ilvl_d_mipsel +#define helper_msa_ilvr_b helper_msa_ilvr_b_mipsel +#define helper_msa_ilvr_h helper_msa_ilvr_h_mipsel +#define helper_msa_ilvr_w helper_msa_ilvr_w_mipsel +#define helper_msa_ilvr_d helper_msa_ilvr_d_mipsel +#define helper_msa_and_v helper_msa_and_v_mipsel +#define helper_msa_nor_v helper_msa_nor_v_mipsel +#define helper_msa_or_v helper_msa_or_v_mipsel +#define helper_msa_xor_v helper_msa_xor_v_mipsel +#define helper_msa_move_v helper_msa_move_v_mipsel +#define helper_msa_pckev_b helper_msa_pckev_b_mipsel +#define helper_msa_pckev_h helper_msa_pckev_h_mipsel +#define helper_msa_pckev_w helper_msa_pckev_w_mipsel +#define helper_msa_pckev_d helper_msa_pckev_d_mipsel +#define helper_msa_pckod_b helper_msa_pckod_b_mipsel +#define helper_msa_pckod_h helper_msa_pckod_h_mipsel +#define helper_msa_pckod_w helper_msa_pckod_w_mipsel +#define helper_msa_pckod_d helper_msa_pckod_d_mipsel +#define helper_msa_sll_b helper_msa_sll_b_mipsel +#define helper_msa_sll_h helper_msa_sll_h_mipsel +#define helper_msa_sll_w helper_msa_sll_w_mipsel +#define helper_msa_sll_d helper_msa_sll_d_mipsel +#define helper_msa_sra_b helper_msa_sra_b_mipsel +#define helper_msa_sra_h helper_msa_sra_h_mipsel +#define helper_msa_sra_w helper_msa_sra_w_mipsel +#define helper_msa_sra_d helper_msa_sra_d_mipsel +#define helper_msa_srar_b helper_msa_srar_b_mipsel +#define helper_msa_srar_h helper_msa_srar_h_mipsel +#define helper_msa_srar_w helper_msa_srar_w_mipsel +#define helper_msa_srar_d helper_msa_srar_d_mipsel +#define helper_msa_srl_b helper_msa_srl_b_mipsel +#define helper_msa_srl_h helper_msa_srl_h_mipsel +#define helper_msa_srl_w helper_msa_srl_w_mipsel +#define helper_msa_srl_d helper_msa_srl_d_mipsel +#define helper_msa_srlr_b helper_msa_srlr_b_mipsel +#define helper_msa_srlr_h helper_msa_srlr_h_mipsel +#define helper_msa_srlr_w helper_msa_srlr_w_mipsel +#define helper_msa_srlr_d helper_msa_srlr_d_mipsel +#define helper_msa_andi_b helper_msa_andi_b_mipsel +#define helper_msa_ori_b helper_msa_ori_b_mipsel +#define helper_msa_nori_b helper_msa_nori_b_mipsel +#define helper_msa_xori_b helper_msa_xori_b_mipsel +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mipsel +#define helper_msa_bmzi_b helper_msa_bmzi_b_mipsel +#define helper_msa_bseli_b helper_msa_bseli_b_mipsel +#define helper_msa_shf_df helper_msa_shf_df_mipsel +#define helper_msa_addvi_df helper_msa_addvi_df_mipsel +#define helper_msa_subvi_df helper_msa_subvi_df_mipsel +#define helper_msa_ceqi_df helper_msa_ceqi_df_mipsel +#define helper_msa_clei_s_df helper_msa_clei_s_df_mipsel +#define helper_msa_clei_u_df helper_msa_clei_u_df_mipsel +#define helper_msa_clti_s_df helper_msa_clti_s_df_mipsel +#define helper_msa_clti_u_df helper_msa_clti_u_df_mipsel +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mipsel +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mipsel +#define helper_msa_mini_s_df helper_msa_mini_s_df_mipsel +#define helper_msa_mini_u_df helper_msa_mini_u_df_mipsel +#define helper_msa_ldi_df helper_msa_ldi_df_mipsel +#define helper_msa_slli_df helper_msa_slli_df_mipsel +#define helper_msa_srai_df helper_msa_srai_df_mipsel +#define helper_msa_srli_df helper_msa_srli_df_mipsel +#define helper_msa_bclri_df helper_msa_bclri_df_mipsel +#define helper_msa_bseti_df helper_msa_bseti_df_mipsel +#define helper_msa_bnegi_df helper_msa_bnegi_df_mipsel +#define helper_msa_sat_s_df helper_msa_sat_s_df_mipsel +#define helper_msa_sat_u_df helper_msa_sat_u_df_mipsel +#define helper_msa_srari_df helper_msa_srari_df_mipsel +#define helper_msa_srlri_df helper_msa_srlri_df_mipsel +#define helper_msa_binsli_df helper_msa_binsli_df_mipsel +#define helper_msa_binsri_df helper_msa_binsri_df_mipsel +#define helper_msa_subv_df helper_msa_subv_df_mipsel +#define helper_msa_subs_s_df helper_msa_subs_s_df_mipsel +#define helper_msa_subs_u_df helper_msa_subs_u_df_mipsel +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mipsel +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mipsel +#define helper_msa_mulv_df helper_msa_mulv_df_mipsel +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mipsel +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mipsel +#define helper_msa_mul_q_df helper_msa_mul_q_df_mipsel +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mipsel +#define helper_msa_sld_df helper_msa_sld_df_mipsel +#define helper_msa_maddv_df helper_msa_maddv_df_mipsel +#define helper_msa_msubv_df helper_msa_msubv_df_mipsel +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mipsel +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mipsel +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mipsel +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mipsel +#define helper_msa_binsl_df helper_msa_binsl_df_mipsel +#define helper_msa_binsr_df helper_msa_binsr_df_mipsel +#define helper_msa_madd_q_df helper_msa_madd_q_df_mipsel +#define helper_msa_msub_q_df helper_msa_msub_q_df_mipsel +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mipsel +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mipsel +#define helper_msa_splat_df helper_msa_splat_df_mipsel +#define helper_msa_vshf_df helper_msa_vshf_df_mipsel +#define helper_msa_sldi_df helper_msa_sldi_df_mipsel +#define helper_msa_splati_df helper_msa_splati_df_mipsel +#define helper_msa_copy_s_b helper_msa_copy_s_b_mipsel +#define helper_msa_copy_s_h helper_msa_copy_s_h_mipsel +#define helper_msa_copy_s_w helper_msa_copy_s_w_mipsel +#define helper_msa_copy_s_d helper_msa_copy_s_d_mipsel +#define helper_msa_copy_u_b helper_msa_copy_u_b_mipsel +#define helper_msa_copy_u_h helper_msa_copy_u_h_mipsel +#define helper_msa_copy_u_w helper_msa_copy_u_w_mipsel +#define helper_msa_insert_b helper_msa_insert_b_mipsel +#define helper_msa_insert_h helper_msa_insert_h_mipsel +#define helper_msa_insert_w helper_msa_insert_w_mipsel +#define helper_msa_insert_d helper_msa_insert_d_mipsel +#define helper_msa_insve_df helper_msa_insve_df_mipsel +#define helper_msa_ctcmsa helper_msa_ctcmsa_mipsel +#define helper_msa_cfcmsa helper_msa_cfcmsa_mipsel +#define helper_msa_fill_df helper_msa_fill_df_mipsel +#define helper_msa_fcaf_df helper_msa_fcaf_df_mipsel +#define helper_msa_fcun_df helper_msa_fcun_df_mipsel +#define helper_msa_fceq_df helper_msa_fceq_df_mipsel +#define helper_msa_fcueq_df helper_msa_fcueq_df_mipsel +#define helper_msa_fclt_df helper_msa_fclt_df_mipsel +#define helper_msa_fcult_df helper_msa_fcult_df_mipsel +#define helper_msa_fcle_df helper_msa_fcle_df_mipsel +#define helper_msa_fcule_df helper_msa_fcule_df_mipsel +#define helper_msa_fsaf_df helper_msa_fsaf_df_mipsel +#define helper_msa_fsun_df helper_msa_fsun_df_mipsel +#define helper_msa_fseq_df helper_msa_fseq_df_mipsel +#define helper_msa_fsueq_df helper_msa_fsueq_df_mipsel +#define helper_msa_fslt_df helper_msa_fslt_df_mipsel +#define helper_msa_fsult_df helper_msa_fsult_df_mipsel +#define helper_msa_fsle_df helper_msa_fsle_df_mipsel +#define helper_msa_fsule_df helper_msa_fsule_df_mipsel +#define helper_msa_fcor_df helper_msa_fcor_df_mipsel +#define helper_msa_fcune_df helper_msa_fcune_df_mipsel +#define helper_msa_fcne_df helper_msa_fcne_df_mipsel +#define helper_msa_fsor_df helper_msa_fsor_df_mipsel +#define helper_msa_fsune_df helper_msa_fsune_df_mipsel +#define helper_msa_fsne_df helper_msa_fsne_df_mipsel +#define helper_msa_fadd_df helper_msa_fadd_df_mipsel +#define helper_msa_fsub_df helper_msa_fsub_df_mipsel +#define helper_msa_fmul_df helper_msa_fmul_df_mipsel +#define helper_msa_fdiv_df helper_msa_fdiv_df_mipsel +#define helper_msa_fmadd_df helper_msa_fmadd_df_mipsel +#define helper_msa_fmsub_df helper_msa_fmsub_df_mipsel +#define helper_msa_fexp2_df helper_msa_fexp2_df_mipsel +#define helper_msa_fexdo_df helper_msa_fexdo_df_mipsel +#define helper_msa_ftq_df helper_msa_ftq_df_mipsel +#define helper_msa_fmin_df helper_msa_fmin_df_mipsel +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mipsel +#define helper_msa_fmax_df helper_msa_fmax_df_mipsel +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mipsel +#define helper_msa_fclass_df helper_msa_fclass_df_mipsel +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mipsel +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mipsel +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mipsel +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mipsel +#define helper_msa_frcp_df helper_msa_frcp_df_mipsel +#define helper_msa_frint_df helper_msa_frint_df_mipsel +#define helper_msa_flog2_df helper_msa_flog2_df_mipsel +#define helper_msa_fexupl_df helper_msa_fexupl_df_mipsel +#define helper_msa_fexupr_df helper_msa_fexupr_df_mipsel +#define helper_msa_ffql_df helper_msa_ffql_df_mipsel +#define helper_msa_ffqr_df helper_msa_ffqr_df_mipsel +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mipsel +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mipsel +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mipsel +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mipsel +#define helper_raise_exception_err helper_raise_exception_err_mipsel +#define helper_raise_exception helper_raise_exception_mipsel +#define helper_raise_exception_debug helper_raise_exception_debug_mipsel +#define helper_muls helper_muls_mipsel +#define helper_mulsu helper_mulsu_mipsel +#define helper_macc helper_macc_mipsel +#define helper_macchi helper_macchi_mipsel +#define helper_maccu helper_maccu_mipsel +#define helper_macchiu helper_macchiu_mipsel +#define helper_msac helper_msac_mipsel +#define helper_msachi helper_msachi_mipsel +#define helper_msacu helper_msacu_mipsel +#define helper_msachiu helper_msachiu_mipsel +#define helper_mulhi helper_mulhi_mipsel +#define helper_mulhiu helper_mulhiu_mipsel +#define helper_mulshi helper_mulshi_mipsel +#define helper_mulshiu helper_mulshiu_mipsel #define helper_dbitswap helper_dbitswap_mipsel +#define helper_bitswap helper_bitswap_mipsel +#define helper_rotx helper_rotx_mipsel +#define helper_ll helper_ll_mipsel #define helper_lld helper_lld_mipsel -#define helper_scd helper_scd_mipsel +#define helper_swl helper_swl_mipsel +#define helper_swr helper_swr_mipsel #define helper_sdl helper_sdl_mipsel #define helper_sdr helper_sdr_mipsel +#define helper_lwm helper_lwm_mipsel +#define helper_swm helper_swm_mipsel #define helper_ldm helper_ldm_mipsel #define helper_sdm helper_sdm_mipsel -#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mipsel -#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mipsel -#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mipsel -#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mipsel -#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mipsel -#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mipsel -#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mipsel -#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mipsel -#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mipsel +#define helper_fork helper_fork_mipsel +#define helper_yield helper_yield_mipsel +#define r4k_helper_tlbinv r4k_helper_tlbinv_mipsel +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mipsel +#define r4k_helper_tlbwi r4k_helper_tlbwi_mipsel +#define r4k_helper_tlbwr r4k_helper_tlbwr_mipsel +#define r4k_helper_tlbp r4k_helper_tlbp_mipsel +#define r4k_helper_tlbr r4k_helper_tlbr_mipsel +#define helper_tlbwi helper_tlbwi_mipsel +#define helper_tlbwr helper_tlbwr_mipsel +#define helper_tlbp helper_tlbp_mipsel +#define helper_tlbr helper_tlbr_mipsel +#define helper_tlbinv helper_tlbinv_mipsel +#define helper_tlbinvf helper_tlbinvf_mipsel +#define helper_ginvt helper_ginvt_mipsel +#define helper_di helper_di_mipsel +#define helper_ei helper_ei_mipsel +#define helper_eret helper_eret_mipsel +#define helper_eretnc helper_eretnc_mipsel +#define helper_deret helper_deret_mipsel +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mipsel +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mipsel +#define helper_rdhwr_cc helper_rdhwr_cc_mipsel +#define helper_rdhwr_ccres helper_rdhwr_ccres_mipsel +#define helper_rdhwr_performance helper_rdhwr_performance_mipsel +#define helper_rdhwr_xnp helper_rdhwr_xnp_mipsel +#define helper_pmon helper_pmon_mipsel +#define helper_wait helper_wait_mipsel +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mipsel +#define mips_cpu_do_transaction_failed mips_cpu_do_transaction_failed_mipsel +#define helper_msa_ld_b helper_msa_ld_b_mipsel +#define helper_msa_ld_h helper_msa_ld_h_mipsel +#define helper_msa_ld_w helper_msa_ld_w_mipsel +#define helper_msa_ld_d helper_msa_ld_d_mipsel +#define helper_msa_st_b helper_msa_st_b_mipsel +#define helper_msa_st_h helper_msa_st_h_mipsel +#define helper_msa_st_w helper_msa_st_w_mipsel +#define helper_msa_st_d helper_msa_st_d_mipsel +#define helper_cache helper_cache_mipsel +#define gen_intermediate_code gen_intermediate_code_mipsel +#define mips_tcg_init mips_tcg_init_mipsel +#define cpu_mips_realize_env cpu_mips_realize_env_mipsel +#define cpu_state_reset cpu_state_reset_mipsel +#define restore_state_to_opc restore_state_to_opc_mipsel #define mips_reg_reset mips_reg_reset_mipsel #define mips_reg_read mips_reg_read_mipsel #define mips_reg_write mips_reg_write_mipsel -#define mips_tcg_init mips_tcg_init_mipsel -#define mips_cpu_list mips_cpu_list_mipsel -#define mips_release mips_release_mipsel -#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mipsel -#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mipsel +#define ieee_rm ieee_rm_mipsel +#define mips_defs mips_defs_mipsel +#define mips_defs_number mips_defs_number_mipsel +#define gen_helper_float_class_s gen_helper_float_class_s_mipsel +#define gen_helper_float_class_d gen_helper_float_class_d_mipsel #endif diff --git a/qemu/ppc.h b/qemu/ppc.h new file mode 100644 index 00000000..67d0016c --- /dev/null +++ b/qemu/ppc.h @@ -0,0 +1,1707 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_ppc_H +#define UNICORN_AUTOGEN_ppc_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _ppc +#endif +#define arm_arch arm_arch_ppc +#define tb_target_set_jmp_target tb_target_set_jmp_target_ppc +#define have_bmi1 have_bmi1_ppc +#define have_popcnt have_popcnt_ppc +#define have_avx1 have_avx1_ppc +#define have_avx2 have_avx2_ppc +#define have_isa have_isa_ppc +#define have_altivec have_altivec_ppc +#define have_vsx have_vsx_ppc +#define flush_icache_range flush_icache_range_ppc +#define s390_facilities s390_facilities_ppc +#define tcg_dump_op tcg_dump_op_ppc +#define tcg_dump_ops tcg_dump_ops_ppc +#define tcg_gen_and_i64 tcg_gen_and_i64_ppc +#define tcg_gen_discard_i64 tcg_gen_discard_i64_ppc +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_ppc +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_ppc +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_ppc +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_ppc +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_ppc +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_ppc +#define tcg_gen_ld_i64 tcg_gen_ld_i64_ppc +#define tcg_gen_mov_i64 tcg_gen_mov_i64_ppc +#define tcg_gen_movi_i64 tcg_gen_movi_i64_ppc +#define tcg_gen_mul_i64 tcg_gen_mul_i64_ppc +#define tcg_gen_or_i64 tcg_gen_or_i64_ppc +#define tcg_gen_sar_i64 tcg_gen_sar_i64_ppc +#define tcg_gen_shl_i64 tcg_gen_shl_i64_ppc +#define tcg_gen_shr_i64 tcg_gen_shr_i64_ppc +#define tcg_gen_st_i64 tcg_gen_st_i64_ppc +#define tcg_gen_xor_i64 tcg_gen_xor_i64_ppc +#define cpu_icount_to_ns cpu_icount_to_ns_ppc +#define cpu_is_stopped cpu_is_stopped_ppc +#define cpu_get_ticks cpu_get_ticks_ppc +#define cpu_get_clock cpu_get_clock_ppc +#define cpu_resume cpu_resume_ppc +#define qemu_init_vcpu qemu_init_vcpu_ppc +#define cpu_stop_current cpu_stop_current_ppc +#define resume_all_vcpus resume_all_vcpus_ppc +#define vm_start vm_start_ppc +#define address_space_dispatch_compact address_space_dispatch_compact_ppc +#define flatview_translate flatview_translate_ppc +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_ppc +#define qemu_get_cpu qemu_get_cpu_ppc +#define cpu_address_space_init cpu_address_space_init_ppc +#define cpu_get_address_space cpu_get_address_space_ppc +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_ppc +#define cpu_exec_initfn cpu_exec_initfn_ppc +#define cpu_exec_realizefn cpu_exec_realizefn_ppc +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_ppc +#define cpu_watchpoint_insert cpu_watchpoint_insert_ppc +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_ppc +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_ppc +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_ppc +#define cpu_breakpoint_insert cpu_breakpoint_insert_ppc +#define cpu_breakpoint_remove cpu_breakpoint_remove_ppc +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_ppc +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_ppc +#define cpu_abort cpu_abort_ppc +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_ppc +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_ppc +#define flatview_add_to_dispatch flatview_add_to_dispatch_ppc +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_ppc +#define qemu_ram_get_offset qemu_ram_get_offset_ppc +#define qemu_ram_get_used_length qemu_ram_get_used_length_ppc +#define qemu_ram_is_shared qemu_ram_is_shared_ppc +#define qemu_ram_pagesize qemu_ram_pagesize_ppc +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_ppc +#define qemu_ram_alloc qemu_ram_alloc_ppc +#define qemu_ram_free qemu_ram_free_ppc +#define qemu_map_ram_ptr qemu_map_ram_ptr_ppc +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_ppc +#define qemu_ram_block_from_host qemu_ram_block_from_host_ppc +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_ppc +#define cpu_check_watchpoint cpu_check_watchpoint_ppc +#define iotlb_to_section iotlb_to_section_ppc +#define address_space_dispatch_new address_space_dispatch_new_ppc +#define address_space_dispatch_free address_space_dispatch_free_ppc +#define flatview_read_continue flatview_read_continue_ppc +#define address_space_read_full address_space_read_full_ppc +#define address_space_write address_space_write_ppc +#define address_space_rw address_space_rw_ppc +#define cpu_physical_memory_rw cpu_physical_memory_rw_ppc +#define address_space_write_rom address_space_write_rom_ppc +#define cpu_flush_icache_range cpu_flush_icache_range_ppc +#define cpu_exec_init_all cpu_exec_init_all_ppc +#define address_space_access_valid address_space_access_valid_ppc +#define address_space_map address_space_map_ppc +#define address_space_unmap address_space_unmap_ppc +#define cpu_physical_memory_map cpu_physical_memory_map_ppc +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_ppc +#define cpu_memory_rw_debug cpu_memory_rw_debug_ppc +#define qemu_target_page_size qemu_target_page_size_ppc +#define qemu_target_page_bits qemu_target_page_bits_ppc +#define qemu_target_page_bits_min qemu_target_page_bits_min_ppc +#define target_words_bigendian target_words_bigendian_ppc +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_ppc +#define ram_block_discard_range ram_block_discard_range_ppc +#define ramblock_is_pmem ramblock_is_pmem_ppc +#define page_size_init page_size_init_ppc +#define set_preferred_target_page_bits set_preferred_target_page_bits_ppc +#define finalize_target_page_bits finalize_target_page_bits_ppc +#define cpu_outb cpu_outb_ppc +#define cpu_outw cpu_outw_ppc +#define cpu_outl cpu_outl_ppc +#define cpu_inb cpu_inb_ppc +#define cpu_inw cpu_inw_ppc +#define cpu_inl cpu_inl_ppc +#define memory_map memory_map_ppc +#define memory_map_io memory_map_io_ppc +#define memory_map_ptr memory_map_ptr_ppc +#define memory_unmap memory_unmap_ppc +#define memory_free memory_free_ppc +#define flatview_unref flatview_unref_ppc +#define address_space_get_flatview address_space_get_flatview_ppc +#define memory_region_transaction_begin memory_region_transaction_begin_ppc +#define memory_region_transaction_commit memory_region_transaction_commit_ppc +#define memory_region_init memory_region_init_ppc +#define memory_region_access_valid memory_region_access_valid_ppc +#define memory_region_dispatch_read memory_region_dispatch_read_ppc +#define memory_region_dispatch_write memory_region_dispatch_write_ppc +#define memory_region_init_io memory_region_init_io_ppc +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_ppc +#define memory_region_size memory_region_size_ppc +#define memory_region_set_readonly memory_region_set_readonly_ppc +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_ppc +#define memory_region_from_host memory_region_from_host_ppc +#define memory_region_get_ram_addr memory_region_get_ram_addr_ppc +#define memory_region_add_subregion memory_region_add_subregion_ppc +#define memory_region_del_subregion memory_region_del_subregion_ppc +#define memory_region_find memory_region_find_ppc +#define memory_listener_register memory_listener_register_ppc +#define memory_listener_unregister memory_listener_unregister_ppc +#define address_space_remove_listeners address_space_remove_listeners_ppc +#define address_space_init address_space_init_ppc +#define address_space_destroy address_space_destroy_ppc +#define memory_region_init_ram memory_region_init_ram_ppc +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_ppc +#define exec_inline_op exec_inline_op_ppc +#define floatx80_default_nan floatx80_default_nan_ppc +#define float_raise float_raise_ppc +#define float16_is_quiet_nan float16_is_quiet_nan_ppc +#define float16_is_signaling_nan float16_is_signaling_nan_ppc +#define float32_is_quiet_nan float32_is_quiet_nan_ppc +#define float32_is_signaling_nan float32_is_signaling_nan_ppc +#define float64_is_quiet_nan float64_is_quiet_nan_ppc +#define float64_is_signaling_nan float64_is_signaling_nan_ppc +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_ppc +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_ppc +#define floatx80_silence_nan floatx80_silence_nan_ppc +#define propagateFloatx80NaN propagateFloatx80NaN_ppc +#define float128_is_quiet_nan float128_is_quiet_nan_ppc +#define float128_is_signaling_nan float128_is_signaling_nan_ppc +#define float128_silence_nan float128_silence_nan_ppc +#define float16_add float16_add_ppc +#define float16_sub float16_sub_ppc +#define float32_add float32_add_ppc +#define float32_sub float32_sub_ppc +#define float64_add float64_add_ppc +#define float64_sub float64_sub_ppc +#define float16_mul float16_mul_ppc +#define float32_mul float32_mul_ppc +#define float64_mul float64_mul_ppc +#define float16_muladd float16_muladd_ppc +#define float32_muladd float32_muladd_ppc +#define float64_muladd float64_muladd_ppc +#define float16_div float16_div_ppc +#define float32_div float32_div_ppc +#define float64_div float64_div_ppc +#define float16_to_float32 float16_to_float32_ppc +#define float16_to_float64 float16_to_float64_ppc +#define float32_to_float16 float32_to_float16_ppc +#define float32_to_float64 float32_to_float64_ppc +#define float64_to_float16 float64_to_float16_ppc +#define float64_to_float32 float64_to_float32_ppc +#define float16_round_to_int float16_round_to_int_ppc +#define float32_round_to_int float32_round_to_int_ppc +#define float64_round_to_int float64_round_to_int_ppc +#define float16_to_int16_scalbn float16_to_int16_scalbn_ppc +#define float16_to_int32_scalbn float16_to_int32_scalbn_ppc +#define float16_to_int64_scalbn float16_to_int64_scalbn_ppc +#define float32_to_int16_scalbn float32_to_int16_scalbn_ppc +#define float32_to_int32_scalbn float32_to_int32_scalbn_ppc +#define float32_to_int64_scalbn float32_to_int64_scalbn_ppc +#define float64_to_int16_scalbn float64_to_int16_scalbn_ppc +#define float64_to_int32_scalbn float64_to_int32_scalbn_ppc +#define float64_to_int64_scalbn float64_to_int64_scalbn_ppc +#define float16_to_int16 float16_to_int16_ppc +#define float16_to_int32 float16_to_int32_ppc +#define float16_to_int64 float16_to_int64_ppc +#define float32_to_int16 float32_to_int16_ppc +#define float32_to_int32 float32_to_int32_ppc +#define float32_to_int64 float32_to_int64_ppc +#define float64_to_int16 float64_to_int16_ppc +#define float64_to_int32 float64_to_int32_ppc +#define float64_to_int64 float64_to_int64_ppc +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_ppc +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_ppc +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_ppc +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_ppc +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_ppc +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_ppc +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_ppc +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_ppc +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_ppc +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_ppc +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_ppc +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_ppc +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_ppc +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_ppc +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_ppc +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_ppc +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_ppc +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_ppc +#define float16_to_uint16 float16_to_uint16_ppc +#define float16_to_uint32 float16_to_uint32_ppc +#define float16_to_uint64 float16_to_uint64_ppc +#define float32_to_uint16 float32_to_uint16_ppc +#define float32_to_uint32 float32_to_uint32_ppc +#define float32_to_uint64 float32_to_uint64_ppc +#define float64_to_uint16 float64_to_uint16_ppc +#define float64_to_uint32 float64_to_uint32_ppc +#define float64_to_uint64 float64_to_uint64_ppc +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_ppc +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_ppc +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_ppc +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_ppc +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_ppc +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_ppc +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_ppc +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_ppc +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_ppc +#define int64_to_float16_scalbn int64_to_float16_scalbn_ppc +#define int32_to_float16_scalbn int32_to_float16_scalbn_ppc +#define int16_to_float16_scalbn int16_to_float16_scalbn_ppc +#define int64_to_float16 int64_to_float16_ppc +#define int32_to_float16 int32_to_float16_ppc +#define int16_to_float16 int16_to_float16_ppc +#define int64_to_float32_scalbn int64_to_float32_scalbn_ppc +#define int32_to_float32_scalbn int32_to_float32_scalbn_ppc +#define int16_to_float32_scalbn int16_to_float32_scalbn_ppc +#define int64_to_float32 int64_to_float32_ppc +#define int32_to_float32 int32_to_float32_ppc +#define int16_to_float32 int16_to_float32_ppc +#define int64_to_float64_scalbn int64_to_float64_scalbn_ppc +#define int32_to_float64_scalbn int32_to_float64_scalbn_ppc +#define int16_to_float64_scalbn int16_to_float64_scalbn_ppc +#define int64_to_float64 int64_to_float64_ppc +#define int32_to_float64 int32_to_float64_ppc +#define int16_to_float64 int16_to_float64_ppc +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_ppc +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_ppc +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_ppc +#define uint64_to_float16 uint64_to_float16_ppc +#define uint32_to_float16 uint32_to_float16_ppc +#define uint16_to_float16 uint16_to_float16_ppc +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_ppc +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_ppc +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_ppc +#define uint64_to_float32 uint64_to_float32_ppc +#define uint32_to_float32 uint32_to_float32_ppc +#define uint16_to_float32 uint16_to_float32_ppc +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_ppc +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_ppc +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_ppc +#define uint64_to_float64 uint64_to_float64_ppc +#define uint32_to_float64 uint32_to_float64_ppc +#define uint16_to_float64 uint16_to_float64_ppc +#define float16_min float16_min_ppc +#define float16_minnum float16_minnum_ppc +#define float16_minnummag float16_minnummag_ppc +#define float16_max float16_max_ppc +#define float16_maxnum float16_maxnum_ppc +#define float16_maxnummag float16_maxnummag_ppc +#define float32_min float32_min_ppc +#define float32_minnum float32_minnum_ppc +#define float32_minnummag float32_minnummag_ppc +#define float32_max float32_max_ppc +#define float32_maxnum float32_maxnum_ppc +#define float32_maxnummag float32_maxnummag_ppc +#define float64_min float64_min_ppc +#define float64_minnum float64_minnum_ppc +#define float64_minnummag float64_minnummag_ppc +#define float64_max float64_max_ppc +#define float64_maxnum float64_maxnum_ppc +#define float64_maxnummag float64_maxnummag_ppc +#define float16_compare float16_compare_ppc +#define float16_compare_quiet float16_compare_quiet_ppc +#define float32_compare float32_compare_ppc +#define float32_compare_quiet float32_compare_quiet_ppc +#define float64_compare float64_compare_ppc +#define float64_compare_quiet float64_compare_quiet_ppc +#define float16_scalbn float16_scalbn_ppc +#define float32_scalbn float32_scalbn_ppc +#define float64_scalbn float64_scalbn_ppc +#define float16_sqrt float16_sqrt_ppc +#define float32_sqrt float32_sqrt_ppc +#define float64_sqrt float64_sqrt_ppc +#define float16_default_nan float16_default_nan_ppc +#define float32_default_nan float32_default_nan_ppc +#define float64_default_nan float64_default_nan_ppc +#define float128_default_nan float128_default_nan_ppc +#define float16_silence_nan float16_silence_nan_ppc +#define float32_silence_nan float32_silence_nan_ppc +#define float64_silence_nan float64_silence_nan_ppc +#define float16_squash_input_denormal float16_squash_input_denormal_ppc +#define float32_squash_input_denormal float32_squash_input_denormal_ppc +#define float64_squash_input_denormal float64_squash_input_denormal_ppc +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_ppc +#define roundAndPackFloatx80 roundAndPackFloatx80_ppc +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_ppc +#define int32_to_floatx80 int32_to_floatx80_ppc +#define int32_to_float128 int32_to_float128_ppc +#define int64_to_floatx80 int64_to_floatx80_ppc +#define int64_to_float128 int64_to_float128_ppc +#define uint64_to_float128 uint64_to_float128_ppc +#define float32_to_floatx80 float32_to_floatx80_ppc +#define float32_to_float128 float32_to_float128_ppc +#define float32_rem float32_rem_ppc +#define float32_exp2 float32_exp2_ppc +#define float32_log2 float32_log2_ppc +#define float32_eq float32_eq_ppc +#define float32_le float32_le_ppc +#define float32_lt float32_lt_ppc +#define float32_unordered float32_unordered_ppc +#define float32_eq_quiet float32_eq_quiet_ppc +#define float32_le_quiet float32_le_quiet_ppc +#define float32_lt_quiet float32_lt_quiet_ppc +#define float32_unordered_quiet float32_unordered_quiet_ppc +#define float64_to_floatx80 float64_to_floatx80_ppc +#define float64_to_float128 float64_to_float128_ppc +#define float64_rem float64_rem_ppc +#define float64_log2 float64_log2_ppc +#define float64_eq float64_eq_ppc +#define float64_le float64_le_ppc +#define float64_lt float64_lt_ppc +#define float64_unordered float64_unordered_ppc +#define float64_eq_quiet float64_eq_quiet_ppc +#define float64_le_quiet float64_le_quiet_ppc +#define float64_lt_quiet float64_lt_quiet_ppc +#define float64_unordered_quiet float64_unordered_quiet_ppc +#define floatx80_to_int32 floatx80_to_int32_ppc +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_ppc +#define floatx80_to_int64 floatx80_to_int64_ppc +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_ppc +#define floatx80_to_float32 floatx80_to_float32_ppc +#define floatx80_to_float64 floatx80_to_float64_ppc +#define floatx80_to_float128 floatx80_to_float128_ppc +#define floatx80_round floatx80_round_ppc +#define floatx80_round_to_int floatx80_round_to_int_ppc +#define floatx80_add floatx80_add_ppc +#define floatx80_sub floatx80_sub_ppc +#define floatx80_mul floatx80_mul_ppc +#define floatx80_div floatx80_div_ppc +#define floatx80_rem floatx80_rem_ppc +#define floatx80_sqrt floatx80_sqrt_ppc +#define floatx80_eq floatx80_eq_ppc +#define floatx80_le floatx80_le_ppc +#define floatx80_lt floatx80_lt_ppc +#define floatx80_unordered floatx80_unordered_ppc +#define floatx80_eq_quiet floatx80_eq_quiet_ppc +#define floatx80_le_quiet floatx80_le_quiet_ppc +#define floatx80_lt_quiet floatx80_lt_quiet_ppc +#define floatx80_unordered_quiet floatx80_unordered_quiet_ppc +#define float128_to_int32 float128_to_int32_ppc +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_ppc +#define float128_to_int64 float128_to_int64_ppc +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_ppc +#define float128_to_uint64 float128_to_uint64_ppc +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_ppc +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_ppc +#define float128_to_uint32 float128_to_uint32_ppc +#define float128_to_float32 float128_to_float32_ppc +#define float128_to_float64 float128_to_float64_ppc +#define float128_to_floatx80 float128_to_floatx80_ppc +#define float128_round_to_int float128_round_to_int_ppc +#define float128_add float128_add_ppc +#define float128_sub float128_sub_ppc +#define float128_mul float128_mul_ppc +#define float128_div float128_div_ppc +#define float128_rem float128_rem_ppc +#define float128_sqrt float128_sqrt_ppc +#define float128_eq float128_eq_ppc +#define float128_le float128_le_ppc +#define float128_lt float128_lt_ppc +#define float128_unordered float128_unordered_ppc +#define float128_eq_quiet float128_eq_quiet_ppc +#define float128_le_quiet float128_le_quiet_ppc +#define float128_lt_quiet float128_lt_quiet_ppc +#define float128_unordered_quiet float128_unordered_quiet_ppc +#define floatx80_compare floatx80_compare_ppc +#define floatx80_compare_quiet floatx80_compare_quiet_ppc +#define float128_compare float128_compare_ppc +#define float128_compare_quiet float128_compare_quiet_ppc +#define floatx80_scalbn floatx80_scalbn_ppc +#define float128_scalbn float128_scalbn_ppc +#define softfloat_init softfloat_init_ppc +#define tcg_optimize tcg_optimize_ppc +#define gen_new_label gen_new_label_ppc +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_ppc +#define tcg_expand_vec_op tcg_expand_vec_op_ppc +#define tcg_register_jit tcg_register_jit_ppc +#define tcg_tb_insert tcg_tb_insert_ppc +#define tcg_tb_remove tcg_tb_remove_ppc +#define tcg_tb_lookup tcg_tb_lookup_ppc +#define tcg_tb_foreach tcg_tb_foreach_ppc +#define tcg_nb_tbs tcg_nb_tbs_ppc +#define tcg_region_reset_all tcg_region_reset_all_ppc +#define tcg_region_init tcg_region_init_ppc +#define tcg_code_size tcg_code_size_ppc +#define tcg_code_capacity tcg_code_capacity_ppc +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_ppc +#define tcg_malloc_internal tcg_malloc_internal_ppc +#define tcg_pool_reset tcg_pool_reset_ppc +#define tcg_context_init tcg_context_init_ppc +#define tcg_tb_alloc tcg_tb_alloc_ppc +#define tcg_prologue_init tcg_prologue_init_ppc +#define tcg_func_start tcg_func_start_ppc +#define tcg_set_frame tcg_set_frame_ppc +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_ppc +#define tcg_temp_new_internal tcg_temp_new_internal_ppc +#define tcg_temp_new_vec tcg_temp_new_vec_ppc +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_ppc +#define tcg_temp_free_internal tcg_temp_free_internal_ppc +#define tcg_const_i32 tcg_const_i32_ppc +#define tcg_const_i64 tcg_const_i64_ppc +#define tcg_const_local_i32 tcg_const_local_i32_ppc +#define tcg_const_local_i64 tcg_const_local_i64_ppc +#define tcg_op_supported tcg_op_supported_ppc +#define tcg_gen_callN tcg_gen_callN_ppc +#define tcg_op_remove tcg_op_remove_ppc +#define tcg_emit_op tcg_emit_op_ppc +#define tcg_op_insert_before tcg_op_insert_before_ppc +#define tcg_op_insert_after tcg_op_insert_after_ppc +#define tcg_cpu_exec_time tcg_cpu_exec_time_ppc +#define tcg_gen_code tcg_gen_code_ppc +#define tcg_gen_op1 tcg_gen_op1_ppc +#define tcg_gen_op2 tcg_gen_op2_ppc +#define tcg_gen_op3 tcg_gen_op3_ppc +#define tcg_gen_op4 tcg_gen_op4_ppc +#define tcg_gen_op5 tcg_gen_op5_ppc +#define tcg_gen_op6 tcg_gen_op6_ppc +#define tcg_gen_mb tcg_gen_mb_ppc +#define tcg_gen_addi_i32 tcg_gen_addi_i32_ppc +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_ppc +#define tcg_gen_subi_i32 tcg_gen_subi_i32_ppc +#define tcg_gen_andi_i32 tcg_gen_andi_i32_ppc +#define tcg_gen_ori_i32 tcg_gen_ori_i32_ppc +#define tcg_gen_xori_i32 tcg_gen_xori_i32_ppc +#define tcg_gen_shli_i32 tcg_gen_shli_i32_ppc +#define tcg_gen_shri_i32 tcg_gen_shri_i32_ppc +#define tcg_gen_sari_i32 tcg_gen_sari_i32_ppc +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_ppc +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_ppc +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_ppc +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_ppc +#define tcg_gen_muli_i32 tcg_gen_muli_i32_ppc +#define tcg_gen_div_i32 tcg_gen_div_i32_ppc +#define tcg_gen_rem_i32 tcg_gen_rem_i32_ppc +#define tcg_gen_divu_i32 tcg_gen_divu_i32_ppc +#define tcg_gen_remu_i32 tcg_gen_remu_i32_ppc +#define tcg_gen_andc_i32 tcg_gen_andc_i32_ppc +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_ppc +#define tcg_gen_nand_i32 tcg_gen_nand_i32_ppc +#define tcg_gen_nor_i32 tcg_gen_nor_i32_ppc +#define tcg_gen_orc_i32 tcg_gen_orc_i32_ppc +#define tcg_gen_clz_i32 tcg_gen_clz_i32_ppc +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_ppc +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_ppc +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_ppc +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_ppc +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_ppc +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_ppc +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_ppc +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_ppc +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_ppc +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_ppc +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_ppc +#define tcg_gen_extract_i32 tcg_gen_extract_i32_ppc +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_ppc +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_ppc +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_ppc +#define tcg_gen_add2_i32 tcg_gen_add2_i32_ppc +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_ppc +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_ppc +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_ppc +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_ppc +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_ppc +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_ppc +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_ppc +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_ppc +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_ppc +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_ppc +#define tcg_gen_smin_i32 tcg_gen_smin_i32_ppc +#define tcg_gen_umin_i32 tcg_gen_umin_i32_ppc +#define tcg_gen_smax_i32 tcg_gen_smax_i32_ppc +#define tcg_gen_umax_i32 tcg_gen_umax_i32_ppc +#define tcg_gen_abs_i32 tcg_gen_abs_i32_ppc +#define tcg_gen_addi_i64 tcg_gen_addi_i64_ppc +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_ppc +#define tcg_gen_subi_i64 tcg_gen_subi_i64_ppc +#define tcg_gen_andi_i64 tcg_gen_andi_i64_ppc +#define tcg_gen_ori_i64 tcg_gen_ori_i64_ppc +#define tcg_gen_xori_i64 tcg_gen_xori_i64_ppc +#define tcg_gen_shli_i64 tcg_gen_shli_i64_ppc +#define tcg_gen_shri_i64 tcg_gen_shri_i64_ppc +#define tcg_gen_sari_i64 tcg_gen_sari_i64_ppc +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_ppc +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_ppc +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_ppc +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_ppc +#define tcg_gen_muli_i64 tcg_gen_muli_i64_ppc +#define tcg_gen_div_i64 tcg_gen_div_i64_ppc +#define tcg_gen_rem_i64 tcg_gen_rem_i64_ppc +#define tcg_gen_divu_i64 tcg_gen_divu_i64_ppc +#define tcg_gen_remu_i64 tcg_gen_remu_i64_ppc +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_ppc +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_ppc +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_ppc +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_ppc +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_ppc +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_ppc +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_ppc +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_ppc +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_ppc +#define tcg_gen_not_i64 tcg_gen_not_i64_ppc +#define tcg_gen_andc_i64 tcg_gen_andc_i64_ppc +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_ppc +#define tcg_gen_nand_i64 tcg_gen_nand_i64_ppc +#define tcg_gen_nor_i64 tcg_gen_nor_i64_ppc +#define tcg_gen_orc_i64 tcg_gen_orc_i64_ppc +#define tcg_gen_clz_i64 tcg_gen_clz_i64_ppc +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_ppc +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_ppc +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_ppc +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_ppc +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_ppc +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_ppc +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_ppc +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_ppc +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_ppc +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_ppc +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_ppc +#define tcg_gen_extract_i64 tcg_gen_extract_i64_ppc +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_ppc +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_ppc +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_ppc +#define tcg_gen_add2_i64 tcg_gen_add2_i64_ppc +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_ppc +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_ppc +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_ppc +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_ppc +#define tcg_gen_smin_i64 tcg_gen_smin_i64_ppc +#define tcg_gen_umin_i64 tcg_gen_umin_i64_ppc +#define tcg_gen_smax_i64 tcg_gen_smax_i64_ppc +#define tcg_gen_umax_i64 tcg_gen_umax_i64_ppc +#define tcg_gen_abs_i64 tcg_gen_abs_i64_ppc +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_ppc +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_ppc +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_ppc +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_ppc +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_ppc +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_ppc +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_ppc +#define tcg_gen_exit_tb tcg_gen_exit_tb_ppc +#define tcg_gen_goto_tb tcg_gen_goto_tb_ppc +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_ppc +#define check_exit_request check_exit_request_ppc +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_ppc +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_ppc +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_ppc +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_ppc +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_ppc +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_ppc +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_ppc +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_ppc +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_ppc +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_ppc +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_ppc +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_ppc +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_ppc +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_ppc +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_ppc +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_ppc +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_ppc +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_ppc +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_ppc +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_ppc +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_ppc +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_ppc +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_ppc +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_ppc +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_ppc +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_ppc +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_ppc +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_ppc +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_ppc +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_ppc +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_ppc +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_ppc +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_ppc +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_ppc +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_ppc +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_ppc +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_ppc +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_ppc +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_ppc +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_ppc +#define simd_desc simd_desc_ppc +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_ppc +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_ppc +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_ppc +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_ppc +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_ppc +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_ppc +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_ppc +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_ppc +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_ppc +#define tcg_gen_gvec_2 tcg_gen_gvec_2_ppc +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_ppc +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_ppc +#define tcg_gen_gvec_3 tcg_gen_gvec_3_ppc +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_ppc +#define tcg_gen_gvec_4 tcg_gen_gvec_4_ppc +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_ppc +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_ppc +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_ppc +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_ppc +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_ppc +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_ppc +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_ppc +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_ppc +#define tcg_gen_gvec_not tcg_gen_gvec_not_ppc +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_ppc +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_ppc +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_ppc +#define tcg_gen_gvec_add tcg_gen_gvec_add_ppc +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_ppc +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_ppc +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_ppc +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_ppc +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_ppc +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_ppc +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_ppc +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_ppc +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_ppc +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_ppc +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_ppc +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_ppc +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_ppc +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_ppc +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_ppc +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_ppc +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_ppc +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_ppc +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_ppc +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_ppc +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_ppc +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_ppc +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_ppc +#define tcg_gen_gvec_and tcg_gen_gvec_and_ppc +#define tcg_gen_gvec_or tcg_gen_gvec_or_ppc +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_ppc +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_ppc +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_ppc +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_ppc +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_ppc +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_ppc +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_ppc +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_ppc +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_ppc +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_ppc +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_ppc +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_ppc +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_ppc +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_ppc +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_ppc +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_ppc +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_ppc +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_ppc +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_ppc +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_ppc +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_ppc +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_ppc +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_ppc +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_ppc +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_ppc +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_ppc +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_ppc +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_ppc +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_ppc +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_ppc +#define vec_gen_2 vec_gen_2_ppc +#define vec_gen_3 vec_gen_3_ppc +#define vec_gen_4 vec_gen_4_ppc +#define tcg_gen_mov_vec tcg_gen_mov_vec_ppc +#define tcg_const_zeros_vec tcg_const_zeros_vec_ppc +#define tcg_const_ones_vec tcg_const_ones_vec_ppc +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_ppc +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_ppc +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_ppc +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_ppc +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_ppc +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_ppc +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_ppc +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_ppc +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_ppc +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_ppc +#define tcg_gen_ld_vec tcg_gen_ld_vec_ppc +#define tcg_gen_st_vec tcg_gen_st_vec_ppc +#define tcg_gen_stl_vec tcg_gen_stl_vec_ppc +#define tcg_gen_and_vec tcg_gen_and_vec_ppc +#define tcg_gen_or_vec tcg_gen_or_vec_ppc +#define tcg_gen_xor_vec tcg_gen_xor_vec_ppc +#define tcg_gen_andc_vec tcg_gen_andc_vec_ppc +#define tcg_gen_orc_vec tcg_gen_orc_vec_ppc +#define tcg_gen_nand_vec tcg_gen_nand_vec_ppc +#define tcg_gen_nor_vec tcg_gen_nor_vec_ppc +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_ppc +#define tcg_gen_not_vec tcg_gen_not_vec_ppc +#define tcg_gen_neg_vec tcg_gen_neg_vec_ppc +#define tcg_gen_abs_vec tcg_gen_abs_vec_ppc +#define tcg_gen_shli_vec tcg_gen_shli_vec_ppc +#define tcg_gen_shri_vec tcg_gen_shri_vec_ppc +#define tcg_gen_sari_vec tcg_gen_sari_vec_ppc +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_ppc +#define tcg_gen_add_vec tcg_gen_add_vec_ppc +#define tcg_gen_sub_vec tcg_gen_sub_vec_ppc +#define tcg_gen_mul_vec tcg_gen_mul_vec_ppc +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_ppc +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_ppc +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_ppc +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_ppc +#define tcg_gen_smin_vec tcg_gen_smin_vec_ppc +#define tcg_gen_umin_vec tcg_gen_umin_vec_ppc +#define tcg_gen_smax_vec tcg_gen_smax_vec_ppc +#define tcg_gen_umax_vec tcg_gen_umax_vec_ppc +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_ppc +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_ppc +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_ppc +#define tcg_gen_shls_vec tcg_gen_shls_vec_ppc +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_ppc +#define tcg_gen_sars_vec tcg_gen_sars_vec_ppc +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_ppc +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_ppc +#define tb_htable_lookup tb_htable_lookup_ppc +#define tb_set_jmp_target tb_set_jmp_target_ppc +#define cpu_exec cpu_exec_ppc +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_ppc +#define cpu_reloading_memory_map cpu_reloading_memory_map_ppc +#define cpu_loop_exit cpu_loop_exit_ppc +#define cpu_loop_exit_restore cpu_loop_exit_restore_ppc +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_ppc +#define tlb_init tlb_init_ppc +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_ppc +#define tlb_flush tlb_flush_ppc +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_ppc +#define tlb_flush_all_cpus tlb_flush_all_cpus_ppc +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_ppc +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_ppc +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_ppc +#define tlb_flush_page tlb_flush_page_ppc +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_ppc +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_ppc +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_ppc +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_ppc +#define tlb_protect_code tlb_protect_code_ppc +#define tlb_unprotect_code tlb_unprotect_code_ppc +#define tlb_reset_dirty tlb_reset_dirty_ppc +#define tlb_set_dirty tlb_set_dirty_ppc +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_ppc +#define tlb_set_page tlb_set_page_ppc +#define get_page_addr_code_hostp get_page_addr_code_hostp_ppc +#define get_page_addr_code get_page_addr_code_ppc +#define probe_access probe_access_ppc +#define tlb_vaddr_to_host tlb_vaddr_to_host_ppc +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_ppc +#define helper_le_lduw_mmu helper_le_lduw_mmu_ppc +#define helper_be_lduw_mmu helper_be_lduw_mmu_ppc +#define helper_le_ldul_mmu helper_le_ldul_mmu_ppc +#define helper_be_ldul_mmu helper_be_ldul_mmu_ppc +#define helper_le_ldq_mmu helper_le_ldq_mmu_ppc +#define helper_be_ldq_mmu helper_be_ldq_mmu_ppc +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_ppc +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_ppc +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_ppc +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_ppc +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_ppc +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_ppc +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_ppc +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_ppc +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_ppc +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_ppc +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_ppc +#define cpu_ldub_data_ra cpu_ldub_data_ra_ppc +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_ppc +#define cpu_lduw_data_ra cpu_lduw_data_ra_ppc +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_ppc +#define cpu_ldl_data_ra cpu_ldl_data_ra_ppc +#define cpu_ldq_data_ra cpu_ldq_data_ra_ppc +#define cpu_ldub_data cpu_ldub_data_ppc +#define cpu_ldsb_data cpu_ldsb_data_ppc +#define cpu_lduw_data cpu_lduw_data_ppc +#define cpu_ldsw_data cpu_ldsw_data_ppc +#define cpu_ldl_data cpu_ldl_data_ppc +#define cpu_ldq_data cpu_ldq_data_ppc +#define helper_ret_stb_mmu helper_ret_stb_mmu_ppc +#define helper_le_stw_mmu helper_le_stw_mmu_ppc +#define helper_be_stw_mmu helper_be_stw_mmu_ppc +#define helper_le_stl_mmu helper_le_stl_mmu_ppc +#define helper_be_stl_mmu helper_be_stl_mmu_ppc +#define helper_le_stq_mmu helper_le_stq_mmu_ppc +#define helper_be_stq_mmu helper_be_stq_mmu_ppc +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_ppc +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_ppc +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_ppc +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_ppc +#define cpu_stb_data_ra cpu_stb_data_ra_ppc +#define cpu_stw_data_ra cpu_stw_data_ra_ppc +#define cpu_stl_data_ra cpu_stl_data_ra_ppc +#define cpu_stq_data_ra cpu_stq_data_ra_ppc +#define cpu_stb_data cpu_stb_data_ppc +#define cpu_stw_data cpu_stw_data_ppc +#define cpu_stl_data cpu_stl_data_ppc +#define cpu_stq_data cpu_stq_data_ppc +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_ppc +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_ppc +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_ppc +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_ppc +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_ppc +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_ppc +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_ppc +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_ppc +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_ppc +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_ppc +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_ppc +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_ppc +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_ppc +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_ppc +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_ppc +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_ppc +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_ppc +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_ppc +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_ppc +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_ppc +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_ppc +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_ppc +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_ppc +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_ppc +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_ppc +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_ppc +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_ppc +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_ppc +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_ppc +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_ppc +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_ppc +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_ppc +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_ppc +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_ppc +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_ppc +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_ppc +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_ppc +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_ppc +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_ppc +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_ppc +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_ppc +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_ppc +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_ppc +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_ppc +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_ppc +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_ppc +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_ppc +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_ppc +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_ppc +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_ppc +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_ppc +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_ppc +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_ppc +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_ppc +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_ppc +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_ppc +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_ppc +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_ppc +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_ppc +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_ppc +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_ppc +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_ppc +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_ppc +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_ppc +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_ppc +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_ppc +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_ppc +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_ppc +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_ppc +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_ppc +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_ppc +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_ppc +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_ppc +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_ppc +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_ppc +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_ppc +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_ppc +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_ppc +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_ppc +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_ppc +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_ppc +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_ppc +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_ppc +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_ppc +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_ppc +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_ppc +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_ppc +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_ppc +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_ppc +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_ppc +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_ppc +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_ppc +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_ppc +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_ppc +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_ppc +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_ppc +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_ppc +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_ppc +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_ppc +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_ppc +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_ppc +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_ppc +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_ppc +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_ppc +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_ppc +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_ppc +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_ppc +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_ppc +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_ppc +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_ppc +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_ppc +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_ppc +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_ppc +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_ppc +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_ppc +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_ppc +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_ppc +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_ppc +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_ppc +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_ppc +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_ppc +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_ppc +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_ppc +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_ppc +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_ppc +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_ppc +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_ppc +#define helper_atomic_xchgb helper_atomic_xchgb_ppc +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_ppc +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_ppc +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_ppc +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_ppc +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_ppc +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_ppc +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_ppc +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_ppc +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_ppc +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_ppc +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_ppc +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_ppc +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_ppc +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_ppc +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_ppc +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_ppc +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_ppc +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_ppc +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_ppc +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_ppc +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_ppc +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_ppc +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_ppc +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_ppc +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_ppc +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_ppc +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_ppc +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_ppc +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_ppc +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_ppc +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_ppc +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_ppc +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_ppc +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_ppc +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_ppc +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_ppc +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_ppc +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_ppc +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_ppc +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_ppc +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_ppc +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_ppc +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_ppc +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_ppc +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_ppc +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_ppc +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_ppc +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_ppc +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_ppc +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_ppc +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_ppc +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_ppc +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_ppc +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_ppc +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_ppc +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_ppc +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_ppc +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_ppc +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_ppc +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_ppc +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_ppc +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_ppc +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_ppc +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_ppc +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_ppc +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_ppc +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_ppc +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_ppc +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_ppc +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_ppc +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_ppc +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_ppc +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_ppc +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_ppc +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_ppc +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_ppc +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_ppc +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_ppc +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_ppc +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_ppc +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_ppc +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_ppc +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_ppc +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_ppc +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_ppc +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_ppc +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_ppc +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_ppc +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_ppc +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_ppc +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_ppc +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_ppc +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_ppc +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_ppc +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_ppc +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_ppc +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_ppc +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_ppc +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_ppc +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_ppc +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_ppc +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_ppc +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_ppc +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_ppc +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_ppc +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_ppc +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_ppc +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_ppc +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_ppc +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_ppc +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_ppc +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_ppc +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_ppc +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_ppc +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_ppc +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_ppc +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_ppc +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_ppc +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_ppc +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_ppc +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_ppc +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_ppc +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_ppc +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_ppc +#define cpu_ldub_code cpu_ldub_code_ppc +#define cpu_lduw_code cpu_lduw_code_ppc +#define cpu_ldl_code cpu_ldl_code_ppc +#define cpu_ldq_code cpu_ldq_code_ppc +#define helper_div_i32 helper_div_i32_ppc +#define helper_rem_i32 helper_rem_i32_ppc +#define helper_divu_i32 helper_divu_i32_ppc +#define helper_remu_i32 helper_remu_i32_ppc +#define helper_shl_i64 helper_shl_i64_ppc +#define helper_shr_i64 helper_shr_i64_ppc +#define helper_sar_i64 helper_sar_i64_ppc +#define helper_div_i64 helper_div_i64_ppc +#define helper_rem_i64 helper_rem_i64_ppc +#define helper_divu_i64 helper_divu_i64_ppc +#define helper_remu_i64 helper_remu_i64_ppc +#define helper_muluh_i64 helper_muluh_i64_ppc +#define helper_mulsh_i64 helper_mulsh_i64_ppc +#define helper_clz_i32 helper_clz_i32_ppc +#define helper_ctz_i32 helper_ctz_i32_ppc +#define helper_clz_i64 helper_clz_i64_ppc +#define helper_ctz_i64 helper_ctz_i64_ppc +#define helper_clrsb_i32 helper_clrsb_i32_ppc +#define helper_clrsb_i64 helper_clrsb_i64_ppc +#define helper_ctpop_i32 helper_ctpop_i32_ppc +#define helper_ctpop_i64 helper_ctpop_i64_ppc +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_ppc +#define helper_exit_atomic helper_exit_atomic_ppc +#define helper_gvec_add8 helper_gvec_add8_ppc +#define helper_gvec_add16 helper_gvec_add16_ppc +#define helper_gvec_add32 helper_gvec_add32_ppc +#define helper_gvec_add64 helper_gvec_add64_ppc +#define helper_gvec_adds8 helper_gvec_adds8_ppc +#define helper_gvec_adds16 helper_gvec_adds16_ppc +#define helper_gvec_adds32 helper_gvec_adds32_ppc +#define helper_gvec_adds64 helper_gvec_adds64_ppc +#define helper_gvec_sub8 helper_gvec_sub8_ppc +#define helper_gvec_sub16 helper_gvec_sub16_ppc +#define helper_gvec_sub32 helper_gvec_sub32_ppc +#define helper_gvec_sub64 helper_gvec_sub64_ppc +#define helper_gvec_subs8 helper_gvec_subs8_ppc +#define helper_gvec_subs16 helper_gvec_subs16_ppc +#define helper_gvec_subs32 helper_gvec_subs32_ppc +#define helper_gvec_subs64 helper_gvec_subs64_ppc +#define helper_gvec_mul8 helper_gvec_mul8_ppc +#define helper_gvec_mul16 helper_gvec_mul16_ppc +#define helper_gvec_mul32 helper_gvec_mul32_ppc +#define helper_gvec_mul64 helper_gvec_mul64_ppc +#define helper_gvec_muls8 helper_gvec_muls8_ppc +#define helper_gvec_muls16 helper_gvec_muls16_ppc +#define helper_gvec_muls32 helper_gvec_muls32_ppc +#define helper_gvec_muls64 helper_gvec_muls64_ppc +#define helper_gvec_neg8 helper_gvec_neg8_ppc +#define helper_gvec_neg16 helper_gvec_neg16_ppc +#define helper_gvec_neg32 helper_gvec_neg32_ppc +#define helper_gvec_neg64 helper_gvec_neg64_ppc +#define helper_gvec_abs8 helper_gvec_abs8_ppc +#define helper_gvec_abs16 helper_gvec_abs16_ppc +#define helper_gvec_abs32 helper_gvec_abs32_ppc +#define helper_gvec_abs64 helper_gvec_abs64_ppc +#define helper_gvec_mov helper_gvec_mov_ppc +#define helper_gvec_dup64 helper_gvec_dup64_ppc +#define helper_gvec_dup32 helper_gvec_dup32_ppc +#define helper_gvec_dup16 helper_gvec_dup16_ppc +#define helper_gvec_dup8 helper_gvec_dup8_ppc +#define helper_gvec_not helper_gvec_not_ppc +#define helper_gvec_and helper_gvec_and_ppc +#define helper_gvec_or helper_gvec_or_ppc +#define helper_gvec_xor helper_gvec_xor_ppc +#define helper_gvec_andc helper_gvec_andc_ppc +#define helper_gvec_orc helper_gvec_orc_ppc +#define helper_gvec_nand helper_gvec_nand_ppc +#define helper_gvec_nor helper_gvec_nor_ppc +#define helper_gvec_eqv helper_gvec_eqv_ppc +#define helper_gvec_ands helper_gvec_ands_ppc +#define helper_gvec_xors helper_gvec_xors_ppc +#define helper_gvec_ors helper_gvec_ors_ppc +#define helper_gvec_shl8i helper_gvec_shl8i_ppc +#define helper_gvec_shl16i helper_gvec_shl16i_ppc +#define helper_gvec_shl32i helper_gvec_shl32i_ppc +#define helper_gvec_shl64i helper_gvec_shl64i_ppc +#define helper_gvec_shr8i helper_gvec_shr8i_ppc +#define helper_gvec_shr16i helper_gvec_shr16i_ppc +#define helper_gvec_shr32i helper_gvec_shr32i_ppc +#define helper_gvec_shr64i helper_gvec_shr64i_ppc +#define helper_gvec_sar8i helper_gvec_sar8i_ppc +#define helper_gvec_sar16i helper_gvec_sar16i_ppc +#define helper_gvec_sar32i helper_gvec_sar32i_ppc +#define helper_gvec_sar64i helper_gvec_sar64i_ppc +#define helper_gvec_shl8v helper_gvec_shl8v_ppc +#define helper_gvec_shl16v helper_gvec_shl16v_ppc +#define helper_gvec_shl32v helper_gvec_shl32v_ppc +#define helper_gvec_shl64v helper_gvec_shl64v_ppc +#define helper_gvec_shr8v helper_gvec_shr8v_ppc +#define helper_gvec_shr16v helper_gvec_shr16v_ppc +#define helper_gvec_shr32v helper_gvec_shr32v_ppc +#define helper_gvec_shr64v helper_gvec_shr64v_ppc +#define helper_gvec_sar8v helper_gvec_sar8v_ppc +#define helper_gvec_sar16v helper_gvec_sar16v_ppc +#define helper_gvec_sar32v helper_gvec_sar32v_ppc +#define helper_gvec_sar64v helper_gvec_sar64v_ppc +#define helper_gvec_eq8 helper_gvec_eq8_ppc +#define helper_gvec_ne8 helper_gvec_ne8_ppc +#define helper_gvec_lt8 helper_gvec_lt8_ppc +#define helper_gvec_le8 helper_gvec_le8_ppc +#define helper_gvec_ltu8 helper_gvec_ltu8_ppc +#define helper_gvec_leu8 helper_gvec_leu8_ppc +#define helper_gvec_eq16 helper_gvec_eq16_ppc +#define helper_gvec_ne16 helper_gvec_ne16_ppc +#define helper_gvec_lt16 helper_gvec_lt16_ppc +#define helper_gvec_le16 helper_gvec_le16_ppc +#define helper_gvec_ltu16 helper_gvec_ltu16_ppc +#define helper_gvec_leu16 helper_gvec_leu16_ppc +#define helper_gvec_eq32 helper_gvec_eq32_ppc +#define helper_gvec_ne32 helper_gvec_ne32_ppc +#define helper_gvec_lt32 helper_gvec_lt32_ppc +#define helper_gvec_le32 helper_gvec_le32_ppc +#define helper_gvec_ltu32 helper_gvec_ltu32_ppc +#define helper_gvec_leu32 helper_gvec_leu32_ppc +#define helper_gvec_eq64 helper_gvec_eq64_ppc +#define helper_gvec_ne64 helper_gvec_ne64_ppc +#define helper_gvec_lt64 helper_gvec_lt64_ppc +#define helper_gvec_le64 helper_gvec_le64_ppc +#define helper_gvec_ltu64 helper_gvec_ltu64_ppc +#define helper_gvec_leu64 helper_gvec_leu64_ppc +#define helper_gvec_ssadd8 helper_gvec_ssadd8_ppc +#define helper_gvec_ssadd16 helper_gvec_ssadd16_ppc +#define helper_gvec_ssadd32 helper_gvec_ssadd32_ppc +#define helper_gvec_ssadd64 helper_gvec_ssadd64_ppc +#define helper_gvec_sssub8 helper_gvec_sssub8_ppc +#define helper_gvec_sssub16 helper_gvec_sssub16_ppc +#define helper_gvec_sssub32 helper_gvec_sssub32_ppc +#define helper_gvec_sssub64 helper_gvec_sssub64_ppc +#define helper_gvec_usadd8 helper_gvec_usadd8_ppc +#define helper_gvec_usadd16 helper_gvec_usadd16_ppc +#define helper_gvec_usadd32 helper_gvec_usadd32_ppc +#define helper_gvec_usadd64 helper_gvec_usadd64_ppc +#define helper_gvec_ussub8 helper_gvec_ussub8_ppc +#define helper_gvec_ussub16 helper_gvec_ussub16_ppc +#define helper_gvec_ussub32 helper_gvec_ussub32_ppc +#define helper_gvec_ussub64 helper_gvec_ussub64_ppc +#define helper_gvec_smin8 helper_gvec_smin8_ppc +#define helper_gvec_smin16 helper_gvec_smin16_ppc +#define helper_gvec_smin32 helper_gvec_smin32_ppc +#define helper_gvec_smin64 helper_gvec_smin64_ppc +#define helper_gvec_smax8 helper_gvec_smax8_ppc +#define helper_gvec_smax16 helper_gvec_smax16_ppc +#define helper_gvec_smax32 helper_gvec_smax32_ppc +#define helper_gvec_smax64 helper_gvec_smax64_ppc +#define helper_gvec_umin8 helper_gvec_umin8_ppc +#define helper_gvec_umin16 helper_gvec_umin16_ppc +#define helper_gvec_umin32 helper_gvec_umin32_ppc +#define helper_gvec_umin64 helper_gvec_umin64_ppc +#define helper_gvec_umax8 helper_gvec_umax8_ppc +#define helper_gvec_umax16 helper_gvec_umax16_ppc +#define helper_gvec_umax32 helper_gvec_umax32_ppc +#define helper_gvec_umax64 helper_gvec_umax64_ppc +#define helper_gvec_bitsel helper_gvec_bitsel_ppc +#define cpu_restore_state cpu_restore_state_ppc +#define page_collection_lock page_collection_lock_ppc +#define page_collection_unlock page_collection_unlock_ppc +#define free_code_gen_buffer free_code_gen_buffer_ppc +#define tcg_exec_init tcg_exec_init_ppc +#define tb_cleanup tb_cleanup_ppc +#define tb_flush tb_flush_ppc +#define tb_phys_invalidate tb_phys_invalidate_ppc +#define tb_gen_code tb_gen_code_ppc +#define tb_exec_lock tb_exec_lock_ppc +#define tb_exec_unlock tb_exec_unlock_ppc +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_ppc +#define tb_invalidate_phys_range tb_invalidate_phys_range_ppc +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_ppc +#define tb_check_watchpoint tb_check_watchpoint_ppc +#define cpu_io_recompile cpu_io_recompile_ppc +#define tb_flush_jmp_cache tb_flush_jmp_cache_ppc +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_ppc +#define translator_loop_temp_check translator_loop_temp_check_ppc +#define translator_loop translator_loop_ppc +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_ppc +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_ppc +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_ppc +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_ppc +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_ppc +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_ppc +#define unassigned_mem_ops unassigned_mem_ops_ppc +#define floatx80_infinity floatx80_infinity_ppc +#define dup_const_func dup_const_func_ppc +#define gen_helper_raise_exception gen_helper_raise_exception_ppc +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_ppc +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_ppc +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_ppc +#define gen_helper_cpsr_read gen_helper_cpsr_read_ppc +#define gen_helper_cpsr_write gen_helper_cpsr_write_ppc +#define ppc_cpu_unrealize ppc_cpu_unrealize_ppc +#define ppc_cpu_instance_finalize ppc_cpu_instance_finalize_ppc +#define ppc_reg_reset ppc_reg_reset_ppc +#define ppc_reg_read ppc_reg_read_ppc +#define ppc_reg_write ppc_reg_write_ppc +#define ppc_cpu_do_interrupt ppc_cpu_do_interrupt_ppc +#define ppc_cpu_do_system_reset ppc_cpu_do_system_reset_ppc +#define ppc_cpu_do_fwnmi_machine_check ppc_cpu_do_fwnmi_machine_check_ppc +#define ppc_cpu_exec_interrupt ppc_cpu_exec_interrupt_ppc +#define raise_exception_err_ra raise_exception_err_ra_ppc +#define raise_exception_err raise_exception_err_ppc +#define raise_exception raise_exception_ppc +#define raise_exception_ra raise_exception_ra_ppc +#define helper_raise_exception_err helper_raise_exception_err_ppc +#define helper_store_msr helper_store_msr_ppc +#define helper_rfi helper_rfi_ppc +#define helper_40x_rfci helper_40x_rfci_ppc +#define helper_rfdi helper_rfdi_ppc +#define helper_rfci helper_rfci_ppc +#define helper_rfmci helper_rfmci_ppc +#define helper_tw helper_tw_ppc +#define helper_rfsvc helper_rfsvc_ppc +#define helper_msgclr helper_msgclr_ppc +#define helper_msgsnd helper_msgsnd_ppc +#define helper_book3s_msgclr helper_book3s_msgclr_ppc +#define ppc_cpu_do_unaligned_access ppc_cpu_do_unaligned_access_ppc +#define helper_divweu helper_divweu_ppc +#define helper_divwe helper_divwe_ppc +#define helper_sraw helper_sraw_ppc +#define helper_popcntb helper_popcntb_ppc +#define helper_div helper_div_ppc +#define helper_divo helper_divo_ppc +#define helper_divs helper_divs_ppc +#define helper_divso helper_divso_ppc +#define helper_602_mfrom helper_602_mfrom_ppc +#define helper_mtvscr helper_mtvscr_ppc +#define helper_vaddcuw helper_vaddcuw_ppc +#define helper_vprtybw helper_vprtybw_ppc +#define helper_vprtybd helper_vprtybd_ppc +#define helper_vprtybq helper_vprtybq_ppc +#define helper_vmuluwm helper_vmuluwm_ppc +#define helper_vaddfp helper_vaddfp_ppc +#define helper_vsubfp helper_vsubfp_ppc +#define helper_vminfp helper_vminfp_ppc +#define helper_vmaxfp helper_vmaxfp_ppc +#define helper_vmaddfp helper_vmaddfp_ppc +#define helper_vnmsubfp helper_vnmsubfp_ppc +#define helper_vaddsbs helper_vaddsbs_ppc +#define helper_vsubsbs helper_vsubsbs_ppc +#define helper_vsubshs helper_vsubshs_ppc +#define helper_vaddsws helper_vaddsws_ppc +#define helper_vsubsws helper_vsubsws_ppc +#define helper_vaddubs helper_vaddubs_ppc +#define helper_vsububs helper_vsububs_ppc +#define helper_vadduhs helper_vadduhs_ppc +#define helper_vsubuhs helper_vsubuhs_ppc +#define helper_vadduws helper_vadduws_ppc +#define helper_vsubuws helper_vsubuws_ppc +#define helper_vavgsb helper_vavgsb_ppc +#define helper_vavgub helper_vavgub_ppc +#define helper_vavgsh helper_vavgsh_ppc +#define helper_vavguh helper_vavguh_ppc +#define helper_vavgsw helper_vavgsw_ppc +#define helper_vabsdub helper_vabsdub_ppc +#define helper_vabsduh helper_vabsduh_ppc +#define helper_vabsduw helper_vabsduw_ppc +#define helper_vcfux helper_vcfux_ppc +#define helper_vcfsx helper_vcfsx_ppc +#define helper_vcmpequb helper_vcmpequb_ppc +#define helper_vcmpequb_dot helper_vcmpequb_dot_ppc +#define helper_vcmpequw helper_vcmpequw_ppc +#define helper_vcmpequw_dot helper_vcmpequw_dot_ppc +#define helper_vcmpequd helper_vcmpequd_ppc +#define helper_vcmpequd_dot helper_vcmpequd_dot_ppc +#define helper_vcmpgtub helper_vcmpgtub_ppc +#define helper_vcmpgtub_dot helper_vcmpgtub_dot_ppc +#define helper_vcmpgtuh helper_vcmpgtuh_ppc +#define helper_vcmpgtuh_dot helper_vcmpgtuh_dot_ppc +#define helper_vcmpgtuw helper_vcmpgtuw_ppc +#define helper_vcmpgtuw_dot helper_vcmpgtuw_dot_ppc +#define helper_vcmpgtud helper_vcmpgtud_ppc +#define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc +#define helper_vcmpgtud helper_vcmpgtud_ppc +#define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc +#define helper_vcmpgtsb helper_vcmpgtsb_ppc +#define helper_vcmpgtsb_dot helper_vcmpgtsb_dot_ppc +#define helper_vcmpgtsh helper_vcmpgtsh_ppc +#define helper_vcmpgtsh_dot helper_vcmpgtsh_dot_ppc +#define helper_vcmpgtsw helper_vcmpgtsw_ppc +#define helper_vcmpgtsw_dot helper_vcmpgtsw_dot_ppc +#define helper_vcmpgtsd helper_vcmpgtsd_ppc +#define helper_vcmpgtsd_dot helper_vcmpgtsd_dot_ppc +#define helper_vcmpnezb helper_vcmpnezb_ppc +#define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc +#define helper_vcmpnezb helper_vcmpnezb_ppc +#define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc +#define helper_vcmpnezw helper_vcmpnezw_ppc +#define helper_vcmpnezw_dot helper_vcmpnezw_dot_ppc +#define helper_vcmpneb helper_vcmpneb_ppc +#define helper_vcmpneb_dot helper_vcmpneb_dot_ppc +#define helper_vcmpneb helper_vcmpneb_ppc +#define helper_vcmpneb_dot helper_vcmpneb_dot_ppc +#define helper_vcmpneh helper_vcmpneh_ppc +#define helper_vcmpneh_dot helper_vcmpneh_dot_ppc +#define helper_vcmpnew helper_vcmpnew_ppc +#define helper_vcmpnew_dot helper_vcmpnew_dot_ppc +#define helper_vcmpeqfp helper_vcmpeqfp_ppc +#define helper_vcmpeqfp_dot helper_vcmpeqfp_dot_ppc +#define helper_vcmpgefp helper_vcmpgefp_ppc +#define helper_vcmpgefp_dot helper_vcmpgefp_dot_ppc +#define helper_vcmpgtfp helper_vcmpgtfp_ppc +#define helper_vcmpgtfp_dot helper_vcmpgtfp_dot_ppc +#define helper_vcmpbfp helper_vcmpbfp_ppc +#define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc +#define helper_vcmpbfp helper_vcmpbfp_ppc +#define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc +#define helper_vctuxs helper_vctuxs_ppc +#define helper_vctsxs helper_vctsxs_ppc +#define helper_vclzlsbb helper_vclzlsbb_ppc +#define helper_vctzlsbb helper_vctzlsbb_ppc +#define helper_vmhaddshs helper_vmhaddshs_ppc +#define helper_vmhraddshs helper_vmhraddshs_ppc +#define helper_vmladduhm helper_vmladduhm_ppc +#define helper_vmhraddshs helper_vmhraddshs_ppc +#define helper_vmladduhm helper_vmladduhm_ppc +#define helper_vmrglb helper_vmrglb_ppc +#define helper_vmrghb helper_vmrghb_ppc +#define helper_vmrglh helper_vmrglh_ppc +#define helper_vmrghh helper_vmrghh_ppc +#define helper_vmrglw helper_vmrglw_ppc +#define helper_vmrghw helper_vmrghw_ppc +#define helper_vmsummbm helper_vmsummbm_ppc +#define helper_vmsumshs helper_vmsumshs_ppc +#define helper_vmsumubm helper_vmsumubm_ppc +#define helper_vmsumuhm helper_vmsumuhm_ppc +#define helper_vmulesb helper_vmulesb_ppc +#define helper_vmulosb helper_vmulosb_ppc +#define helper_vmulesh helper_vmulesh_ppc +#define helper_vmulesw helper_vmulesw_ppc +#define helper_vmuleub helper_vmuleub_ppc +#define helper_vmuloub helper_vmuloub_ppc +#define helper_vmuleuh helper_vmuleuh_ppc +#define helper_vmulouh helper_vmulouh_ppc +#define helper_vmuleuw helper_vmuleuw_ppc +#define helper_vmulouw helper_vmulouw_ppc +#define helper_vperm helper_vperm_ppc +#define helper_vpermr helper_vpermr_ppc +#define helper_vbpermd helper_vbpermd_ppc +#define helper_vpmsumb helper_vpmsumb_ppc +#define helper_vpmsumh helper_vpmsumh_ppc +#define helper_vpmsumw helper_vpmsumw_ppc +#define helper_vpmsumd helper_vpmsumd_ppc +#define helper_vpkpx helper_vpkpx_ppc +#define helper_vpkshss helper_vpkshss_ppc +#define helper_vpkshus helper_vpkshus_ppc +#define helper_vpkswss helper_vpkswss_ppc +#define helper_vpkswus helper_vpkswus_ppc +#define helper_vpksdss helper_vpksdss_ppc +#define helper_vpksdus helper_vpksdus_ppc +#define helper_vpkuhus helper_vpkuhus_ppc +#define helper_vpkuwus helper_vpkuwus_ppc +#define helper_vpkudus helper_vpkudus_ppc +#define helper_vpkuhum helper_vpkuhum_ppc +#define helper_vpkuwum helper_vpkuwum_ppc +#define helper_vpkudum helper_vpkudum_ppc +#define helper_vrefp helper_vrefp_ppc +#define helper_vrfin helper_vrfin_ppc +#define helper_vrfim helper_vrfim_ppc +#define helper_vrfip helper_vrfip_ppc +#define helper_vrfiz helper_vrfiz_ppc +#define helper_vrlb helper_vrlb_ppc +#define helper_vrlh helper_vrlh_ppc +#define helper_vrlw helper_vrlw_ppc +#define helper_vrld helper_vrld_ppc +#define helper_vrsqrtefp helper_vrsqrtefp_ppc +#define helper_vrldmi helper_vrldmi_ppc +#define helper_vrlwmi helper_vrlwmi_ppc +#define helper_vrldnm helper_vrldnm_ppc +#define helper_vrlwnm helper_vrlwnm_ppc +#define helper_vsel helper_vsel_ppc +#define helper_vexptefp helper_vexptefp_ppc +#define helper_vlogefp helper_vlogefp_ppc +#define helper_vextublx helper_vextublx_ppc +#define helper_vextuhlx helper_vextuhlx_ppc +#define helper_vextuwlx helper_vextuwlx_ppc +#define helper_vextubrx helper_vextubrx_ppc +#define helper_vextuhrx helper_vextuhrx_ppc +#define helper_vextuwrx helper_vextuwrx_ppc +#define helper_vslv helper_vslv_ppc +#define helper_vsrv helper_vsrv_ppc +#define helper_vsldoi helper_vsldoi_ppc +#define helper_vslo helper_vslo_ppc +#define helper_vinsertb helper_vinsertb_ppc +#define helper_vinserth helper_vinserth_ppc +#define helper_vinsertw helper_vinsertw_ppc +#define helper_vinsertd helper_vinsertd_ppc +#define helper_vextractub helper_vextractub_ppc +#define helper_vextractuh helper_vextractuh_ppc +#define helper_vextractuw helper_vextractuw_ppc +#define helper_vextractd helper_vextractd_ppc +#define helper_xxextractuw helper_xxextractuw_ppc +#define helper_xxinsertw helper_xxinsertw_ppc +#define helper_vextsb2w helper_vextsb2w_ppc +#define helper_vextsb2d helper_vextsb2d_ppc +#define helper_vextsh2w helper_vextsh2w_ppc +#define helper_vextsh2d helper_vextsh2d_ppc +#define helper_vnegw helper_vnegw_ppc +#define helper_vnegd helper_vnegd_ppc +#define helper_vsro helper_vsro_ppc +#define helper_vsubcuw helper_vsubcuw_ppc +#define helper_vsumsws helper_vsumsws_ppc +#define helper_vsum2sws helper_vsum2sws_ppc +#define helper_vsum4sbs helper_vsum4sbs_ppc +#define helper_vsum4shs helper_vsum4shs_ppc +#define helper_vsum4ubs helper_vsum4ubs_ppc +#define helper_vupklpx helper_vupklpx_ppc +#define helper_vupkhpx helper_vupkhpx_ppc +#define helper_vupkhsb helper_vupkhsb_ppc +#define helper_vupkhsh helper_vupkhsh_ppc +#define helper_vupkhsw helper_vupkhsw_ppc +#define helper_vupklsb helper_vupklsb_ppc +#define helper_vupklsh helper_vupklsh_ppc +#define helper_vupklsw helper_vupklsw_ppc +#define helper_vclzb helper_vclzb_ppc +#define helper_vclzh helper_vclzh_ppc +#define helper_vctzb helper_vctzb_ppc +#define helper_vctzh helper_vctzh_ppc +#define helper_vctzw helper_vctzw_ppc +#define helper_vctzd helper_vctzd_ppc +#define helper_vpopcntb helper_vpopcntb_ppc +#define helper_vpopcnth helper_vpopcnth_ppc +#define helper_vpopcntw helper_vpopcntw_ppc +#define helper_vpopcntd helper_vpopcntd_ppc +#define helper_vadduqm helper_vadduqm_ppc +#define helper_vaddeuqm helper_vaddeuqm_ppc +#define helper_vaddcuq helper_vaddcuq_ppc +#define helper_vaddecuq helper_vaddecuq_ppc +#define helper_vsubuqm helper_vsubuqm_ppc +#define helper_vsubeuqm helper_vsubeuqm_ppc +#define helper_vsubcuq helper_vsubcuq_ppc +#define helper_vsubecuq helper_vsubecuq_ppc +#define helper_bcdadd helper_bcdadd_ppc +#define helper_bcdsub helper_bcdsub_ppc +#define helper_bcdcfn helper_bcdcfn_ppc +#define helper_bcdctn helper_bcdctn_ppc +#define helper_bcdcfz helper_bcdcfz_ppc +#define helper_bcdctz helper_bcdctz_ppc +#define helper_bcdcfsq helper_bcdcfsq_ppc +#define helper_bcdctsq helper_bcdctsq_ppc +#define helper_bcdcpsgn helper_bcdcpsgn_ppc +#define helper_bcdsetsgn helper_bcdsetsgn_ppc +#define helper_bcds helper_bcds_ppc +#define helper_bcdus helper_bcdus_ppc +#define helper_bcdsr helper_bcdsr_ppc +#define helper_bcdtrunc helper_bcdtrunc_ppc +#define helper_bcdutrunc helper_bcdutrunc_ppc +#define helper_vsbox helper_vsbox_ppc +#define helper_vcipher helper_vcipher_ppc +#define helper_vcipherlast helper_vcipherlast_ppc +#define helper_vncipher helper_vncipher_ppc +#define helper_vncipherlast helper_vncipherlast_ppc +#define helper_vshasigmaw helper_vshasigmaw_ppc +#define helper_vshasigmad helper_vshasigmad_ppc +#define helper_vpermxor helper_vpermxor_ppc +#define helper_brinc helper_brinc_ppc +#define helper_cntlsw32 helper_cntlsw32_ppc +#define helper_cntlzw32 helper_cntlzw32_ppc +#define helper_dlmzb helper_dlmzb_ppc +#define helper_lmw helper_lmw_ppc +#define helper_lsw helper_lsw_ppc +#define helper_lswx helper_lswx_ppc +#define helper_stsw helper_stsw_ppc +#define helper_dcbz helper_dcbz_ppc +#define helper_dcbzep helper_dcbzep_ppc +#define helper_icbi helper_icbi_ppc +#define helper_icbiep helper_icbiep_ppc +#define helper_lscbx helper_lscbx_ppc +#define helper_lvebx helper_lvebx_ppc +#define helper_lvehx helper_lvehx_ppc +#define helper_lvewx helper_lvewx_ppc +#define helper_stvebx helper_stvebx_ppc +#define helper_stvehx helper_stvehx_ppc +#define helper_stvewx helper_stvewx_ppc +#define helper_tbegin helper_tbegin_ppc +#define helper_load_dump_spr helper_load_dump_spr_ppc +#define helper_store_dump_spr helper_store_dump_spr_ppc +#define helper_hfscr_facility_check helper_hfscr_facility_check_ppc +#define helper_fscr_facility_check helper_fscr_facility_check_ppc +#define helper_msr_facility_check helper_msr_facility_check_ppc +#define helper_store_sdr1 helper_store_sdr1_ppc +#define helper_store_pidr helper_store_pidr_ppc +#define helper_store_lpidr helper_store_lpidr_ppc +#define helper_store_hid0_601 helper_store_hid0_601_ppc +#define helper_store_403_pbr helper_store_403_pbr_ppc +#define helper_store_40x_dbcr0 helper_store_40x_dbcr0_ppc +#define helper_store_40x_sler helper_store_40x_sler_ppc +#define helper_clcs helper_clcs_ppc +#define ppc_store_msr ppc_store_msr_ppc +#define helper_fixup_thrm helper_fixup_thrm_ppc +#define store_40x_sler store_40x_sler_ppc +#define dump_mmu dump_mmu_ppc +#define ppc_cpu_get_phys_page_debug ppc_cpu_get_phys_page_debug_ppc +#define helper_store_ibatu helper_store_ibatu_ppc +#define helper_store_ibatl helper_store_ibatl_ppc +#define helper_store_dbatu helper_store_dbatu_ppc +#define helper_store_dbatl helper_store_dbatl_ppc +#define helper_store_601_batu helper_store_601_batu_ppc +#define helper_store_601_batl helper_store_601_batl_ppc +#define ppc_tlb_invalidate_all ppc_tlb_invalidate_all_ppc +#define ppc_tlb_invalidate_one ppc_tlb_invalidate_one_ppc +#define ppc_store_sdr1 ppc_store_sdr1_ppc +#define helper_load_sr helper_load_sr_ppc +#define helper_store_sr helper_store_sr_ppc +#define helper_tlbia helper_tlbia_ppc +#define helper_tlbie helper_tlbie_ppc +#define helper_tlbiva helper_tlbiva_ppc +#define helper_6xx_tlbd helper_6xx_tlbd_ppc +#define helper_6xx_tlbi helper_6xx_tlbi_ppc +#define helper_74xx_tlbd helper_74xx_tlbd_ppc +#define helper_74xx_tlbi helper_74xx_tlbi_ppc +#define helper_rac helper_rac_ppc +#define helper_4xx_tlbre_hi helper_4xx_tlbre_hi_ppc +#define helper_4xx_tlbre_lo helper_4xx_tlbre_lo_ppc +#define helper_4xx_tlbwe_hi helper_4xx_tlbwe_hi_ppc +#define helper_4xx_tlbwe_lo helper_4xx_tlbwe_lo_ppc +#define helper_4xx_tlbsx helper_4xx_tlbsx_ppc +#define helper_440_tlbwe helper_440_tlbwe_ppc +#define helper_440_tlbre helper_440_tlbre_ppc +#define helper_440_tlbsx helper_440_tlbsx_ppc +#define helper_booke_setpid helper_booke_setpid_ppc +#define helper_booke_set_eplc helper_booke_set_eplc_ppc +#define helper_booke_set_epsc helper_booke_set_epsc_ppc +#define helper_booke206_tlbwe helper_booke206_tlbwe_ppc +#define helper_booke206_tlbre helper_booke206_tlbre_ppc +#define helper_booke206_tlbsx helper_booke206_tlbsx_ppc +#define helper_booke206_tlbivax helper_booke206_tlbivax_ppc +#define helper_booke206_tlbilx0 helper_booke206_tlbilx0_ppc +#define helper_booke206_tlbilx1 helper_booke206_tlbilx1_ppc +#define helper_booke206_tlbilx3 helper_booke206_tlbilx3_ppc +#define helper_booke206_tlbflush helper_booke206_tlbflush_ppc +#define helper_check_tlb_flush_local helper_check_tlb_flush_local_ppc +#define helper_check_tlb_flush_global helper_check_tlb_flush_global_ppc +#define ppc_cpu_tlb_fill ppc_cpu_tlb_fill_ppc +#define helper_load_tbl helper_load_tbl_ppc +#define helper_load_tbu helper_load_tbu_ppc +#define helper_load_atbl helper_load_atbl_ppc +#define helper_load_atbu helper_load_atbu_ppc +#define helper_load_vtb helper_load_vtb_ppc +#define helper_load_601_rtcl helper_load_601_rtcl_ppc +#define helper_load_601_rtcu helper_load_601_rtcu_ppc +#define helper_store_tbl helper_store_tbl_ppc +#define helper_store_tbu helper_store_tbu_ppc +#define helper_store_atbl helper_store_atbl_ppc +#define helper_store_atbu helper_store_atbu_ppc +#define helper_store_601_rtcl helper_store_601_rtcl_ppc +#define helper_store_601_rtcu helper_store_601_rtcu_ppc +#define helper_load_decr helper_load_decr_ppc +#define helper_store_decr helper_store_decr_ppc +#define helper_load_hdecr helper_load_hdecr_ppc +#define helper_store_hdecr helper_store_hdecr_ppc +#define helper_store_vtb helper_store_vtb_ppc +#define helper_store_tbu40 helper_store_tbu40_ppc +#define helper_load_40x_pit helper_load_40x_pit_ppc +#define helper_store_40x_pit helper_store_40x_pit_ppc +#define helper_store_booke_tcr helper_store_booke_tcr_ppc +#define helper_store_booke_tsr helper_store_booke_tsr_ppc +#define helper_load_dcr helper_load_dcr_ppc +#define helper_store_dcr helper_store_dcr_ppc +#define helper_raise_exception helper_raise_exception_ppc +#define helper_book3s_msgsnd helper_book3s_msgsnd_ppc +#define helper_cmpb helper_cmpb_ppc +#define helper_mfvscr helper_mfvscr_ppc +#define helper_vaddshs helper_vaddshs_ppc +#define helper_vavguw helper_vavguw_ppc +#define helper_vcmpequh helper_vcmpequh_ppc +#define helper_vcmpequh_dot helper_vcmpequh_dot_ppc +#define helper_vcmpnezh helper_vcmpnezh_ppc +#define helper_vcmpnezh_dot helper_vcmpnezh_dot_ppc +#define helper_vmsumshm helper_vmsumshm_ppc +#define helper_vmsumuhs helper_vmsumuhs_ppc +#define helper_vmulosh helper_vmulosh_ppc +#define helper_vmulosw helper_vmulosw_ppc +#define helper_vbpermq helper_vbpermq_ppc +#define helper_vextsw2d helper_vextsw2d_ppc +#define helper_stmw helper_stmw_ppc +#define ppc_translate_init ppc_translate_init_ppc +#define cpu_ppc_init cpu_ppc_init_ppc +#define gen_intermediate_code gen_intermediate_code_ppc +#define restore_state_to_opc restore_state_to_opc_ppc +#define ppc_set_irq ppc_set_irq_ppc +#define ppc6xx_irq_init ppc6xx_irq_init_ppc +#define ppc40x_core_reset ppc40x_core_reset_ppc +#define ppc40x_chip_reset ppc40x_chip_reset_ppc +#define ppc40x_system_reset ppc40x_system_reset_ppc +#define store_40x_dbcr0 store_40x_dbcr0_ppc +#define ppc40x_irq_init ppc40x_irq_init_ppc +#define ppce500_irq_init ppce500_irq_init_ppc +#define ppce500_set_mpic_proxy ppce500_set_mpic_proxy_ppc +#define cpu_ppc_get_tb cpu_ppc_get_tb_ppc +#define cpu_ppc_load_tbl cpu_ppc_load_tbl_ppc +#define cpu_ppc_load_tbu cpu_ppc_load_tbu_ppc +#define cpu_ppc_store_tbl cpu_ppc_store_tbl_ppc +#define cpu_ppc_store_tbu cpu_ppc_store_tbu_ppc +#define cpu_ppc_load_atbl cpu_ppc_load_atbl_ppc +#define cpu_ppc_load_atbu cpu_ppc_load_atbu_ppc +#define cpu_ppc_store_atbl cpu_ppc_store_atbl_ppc +#define cpu_ppc_store_atbu cpu_ppc_store_atbu_ppc +#define cpu_ppc_load_vtb cpu_ppc_load_vtb_ppc +#define cpu_ppc_store_vtb cpu_ppc_store_vtb_ppc +#define cpu_ppc_store_tbu40 cpu_ppc_store_tbu40_ppc +#define ppc_decr_clear_on_delivery ppc_decr_clear_on_delivery_ppc +#define cpu_ppc_load_decr cpu_ppc_load_decr_ppc +#define cpu_ppc_load_hdecr cpu_ppc_load_hdecr_ppc +#define cpu_ppc_load_purr cpu_ppc_load_purr_ppc +#define cpu_ppc_store_decr cpu_ppc_store_decr_ppc +#define cpu_ppc_store_hdecr cpu_ppc_store_hdecr_ppc +#define cpu_ppc_store_purr cpu_ppc_store_purr_ppc +#define cpu_ppc_tb_init cpu_ppc_tb_init_ppc +#define cpu_ppc601_load_rtcu cpu_ppc601_load_rtcu_ppc +#define cpu_ppc601_store_rtcu cpu_ppc601_store_rtcu_ppc +#define cpu_ppc601_load_rtcl cpu_ppc601_load_rtcl_ppc +#define cpu_ppc601_store_rtcl cpu_ppc601_store_rtcl_ppc +#define load_40x_pit load_40x_pit_ppc +#define store_40x_pit store_40x_pit_ppc +#define ppc_40x_timers_init ppc_40x_timers_init_ppc +#define ppc_dcr_read ppc_dcr_read_ppc +#define ppc_dcr_write ppc_dcr_write_ppc +#define ppc_dcr_register ppc_dcr_register_ppc +#define ppc_dcr_init ppc_dcr_init_ppc +#define ppc_cpu_pir ppc_cpu_pir_ppc +#define ppc_irq_reset ppc_irq_reset_ppc +#endif diff --git a/qemu/ppc64.h b/qemu/ppc64.h new file mode 100644 index 00000000..7f41ecc8 --- /dev/null +++ b/qemu/ppc64.h @@ -0,0 +1,1707 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_ppc64_H +#define UNICORN_AUTOGEN_ppc64_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _ppc64 +#endif +#define arm_arch arm_arch_ppc64 +#define tb_target_set_jmp_target tb_target_set_jmp_target_ppc64 +#define have_bmi1 have_bmi1_ppc64 +#define have_popcnt have_popcnt_ppc64 +#define have_avx1 have_avx1_ppc64 +#define have_avx2 have_avx2_ppc64 +#define have_isa have_isa_ppc64 +#define have_altivec have_altivec_ppc64 +#define have_vsx have_vsx_ppc64 +#define flush_icache_range flush_icache_range_ppc64 +#define s390_facilities s390_facilities_ppc64 +#define tcg_dump_op tcg_dump_op_ppc64 +#define tcg_dump_ops tcg_dump_ops_ppc64 +#define tcg_gen_and_i64 tcg_gen_and_i64_ppc64 +#define tcg_gen_discard_i64 tcg_gen_discard_i64_ppc64 +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_ppc64 +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_ppc64 +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_ppc64 +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_ppc64 +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_ppc64 +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_ppc64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_ppc64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_ppc64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_ppc64 +#define tcg_gen_mul_i64 tcg_gen_mul_i64_ppc64 +#define tcg_gen_or_i64 tcg_gen_or_i64_ppc64 +#define tcg_gen_sar_i64 tcg_gen_sar_i64_ppc64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_ppc64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_ppc64 +#define tcg_gen_st_i64 tcg_gen_st_i64_ppc64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_ppc64 +#define cpu_icount_to_ns cpu_icount_to_ns_ppc64 +#define cpu_is_stopped cpu_is_stopped_ppc64 +#define cpu_get_ticks cpu_get_ticks_ppc64 +#define cpu_get_clock cpu_get_clock_ppc64 +#define cpu_resume cpu_resume_ppc64 +#define qemu_init_vcpu qemu_init_vcpu_ppc64 +#define cpu_stop_current cpu_stop_current_ppc64 +#define resume_all_vcpus resume_all_vcpus_ppc64 +#define vm_start vm_start_ppc64 +#define address_space_dispatch_compact address_space_dispatch_compact_ppc64 +#define flatview_translate flatview_translate_ppc64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_ppc64 +#define qemu_get_cpu qemu_get_cpu_ppc64 +#define cpu_address_space_init cpu_address_space_init_ppc64 +#define cpu_get_address_space cpu_get_address_space_ppc64 +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_ppc64 +#define cpu_exec_initfn cpu_exec_initfn_ppc64 +#define cpu_exec_realizefn cpu_exec_realizefn_ppc64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_ppc64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_ppc64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_ppc64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_ppc64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_ppc64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_ppc64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_ppc64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_ppc64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_ppc64 +#define cpu_abort cpu_abort_ppc64 +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_ppc64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_ppc64 +#define flatview_add_to_dispatch flatview_add_to_dispatch_ppc64 +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_ppc64 +#define qemu_ram_get_offset qemu_ram_get_offset_ppc64 +#define qemu_ram_get_used_length qemu_ram_get_used_length_ppc64 +#define qemu_ram_is_shared qemu_ram_is_shared_ppc64 +#define qemu_ram_pagesize qemu_ram_pagesize_ppc64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_ppc64 +#define qemu_ram_alloc qemu_ram_alloc_ppc64 +#define qemu_ram_free qemu_ram_free_ppc64 +#define qemu_map_ram_ptr qemu_map_ram_ptr_ppc64 +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_ppc64 +#define qemu_ram_block_from_host qemu_ram_block_from_host_ppc64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_ppc64 +#define cpu_check_watchpoint cpu_check_watchpoint_ppc64 +#define iotlb_to_section iotlb_to_section_ppc64 +#define address_space_dispatch_new address_space_dispatch_new_ppc64 +#define address_space_dispatch_free address_space_dispatch_free_ppc64 +#define flatview_read_continue flatview_read_continue_ppc64 +#define address_space_read_full address_space_read_full_ppc64 +#define address_space_write address_space_write_ppc64 +#define address_space_rw address_space_rw_ppc64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_ppc64 +#define address_space_write_rom address_space_write_rom_ppc64 +#define cpu_flush_icache_range cpu_flush_icache_range_ppc64 +#define cpu_exec_init_all cpu_exec_init_all_ppc64 +#define address_space_access_valid address_space_access_valid_ppc64 +#define address_space_map address_space_map_ppc64 +#define address_space_unmap address_space_unmap_ppc64 +#define cpu_physical_memory_map cpu_physical_memory_map_ppc64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_ppc64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_ppc64 +#define qemu_target_page_size qemu_target_page_size_ppc64 +#define qemu_target_page_bits qemu_target_page_bits_ppc64 +#define qemu_target_page_bits_min qemu_target_page_bits_min_ppc64 +#define target_words_bigendian target_words_bigendian_ppc64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_ppc64 +#define ram_block_discard_range ram_block_discard_range_ppc64 +#define ramblock_is_pmem ramblock_is_pmem_ppc64 +#define page_size_init page_size_init_ppc64 +#define set_preferred_target_page_bits set_preferred_target_page_bits_ppc64 +#define finalize_target_page_bits finalize_target_page_bits_ppc64 +#define cpu_outb cpu_outb_ppc64 +#define cpu_outw cpu_outw_ppc64 +#define cpu_outl cpu_outl_ppc64 +#define cpu_inb cpu_inb_ppc64 +#define cpu_inw cpu_inw_ppc64 +#define cpu_inl cpu_inl_ppc64 +#define memory_map memory_map_ppc64 +#define memory_map_io memory_map_io_ppc64 +#define memory_map_ptr memory_map_ptr_ppc64 +#define memory_unmap memory_unmap_ppc64 +#define memory_free memory_free_ppc64 +#define flatview_unref flatview_unref_ppc64 +#define address_space_get_flatview address_space_get_flatview_ppc64 +#define memory_region_transaction_begin memory_region_transaction_begin_ppc64 +#define memory_region_transaction_commit memory_region_transaction_commit_ppc64 +#define memory_region_init memory_region_init_ppc64 +#define memory_region_access_valid memory_region_access_valid_ppc64 +#define memory_region_dispatch_read memory_region_dispatch_read_ppc64 +#define memory_region_dispatch_write memory_region_dispatch_write_ppc64 +#define memory_region_init_io memory_region_init_io_ppc64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_ppc64 +#define memory_region_size memory_region_size_ppc64 +#define memory_region_set_readonly memory_region_set_readonly_ppc64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_ppc64 +#define memory_region_from_host memory_region_from_host_ppc64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_ppc64 +#define memory_region_add_subregion memory_region_add_subregion_ppc64 +#define memory_region_del_subregion memory_region_del_subregion_ppc64 +#define memory_region_find memory_region_find_ppc64 +#define memory_listener_register memory_listener_register_ppc64 +#define memory_listener_unregister memory_listener_unregister_ppc64 +#define address_space_remove_listeners address_space_remove_listeners_ppc64 +#define address_space_init address_space_init_ppc64 +#define address_space_destroy address_space_destroy_ppc64 +#define memory_region_init_ram memory_region_init_ram_ppc64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_ppc64 +#define exec_inline_op exec_inline_op_ppc64 +#define floatx80_default_nan floatx80_default_nan_ppc64 +#define float_raise float_raise_ppc64 +#define float16_is_quiet_nan float16_is_quiet_nan_ppc64 +#define float16_is_signaling_nan float16_is_signaling_nan_ppc64 +#define float32_is_quiet_nan float32_is_quiet_nan_ppc64 +#define float32_is_signaling_nan float32_is_signaling_nan_ppc64 +#define float64_is_quiet_nan float64_is_quiet_nan_ppc64 +#define float64_is_signaling_nan float64_is_signaling_nan_ppc64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_ppc64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_ppc64 +#define floatx80_silence_nan floatx80_silence_nan_ppc64 +#define propagateFloatx80NaN propagateFloatx80NaN_ppc64 +#define float128_is_quiet_nan float128_is_quiet_nan_ppc64 +#define float128_is_signaling_nan float128_is_signaling_nan_ppc64 +#define float128_silence_nan float128_silence_nan_ppc64 +#define float16_add float16_add_ppc64 +#define float16_sub float16_sub_ppc64 +#define float32_add float32_add_ppc64 +#define float32_sub float32_sub_ppc64 +#define float64_add float64_add_ppc64 +#define float64_sub float64_sub_ppc64 +#define float16_mul float16_mul_ppc64 +#define float32_mul float32_mul_ppc64 +#define float64_mul float64_mul_ppc64 +#define float16_muladd float16_muladd_ppc64 +#define float32_muladd float32_muladd_ppc64 +#define float64_muladd float64_muladd_ppc64 +#define float16_div float16_div_ppc64 +#define float32_div float32_div_ppc64 +#define float64_div float64_div_ppc64 +#define float16_to_float32 float16_to_float32_ppc64 +#define float16_to_float64 float16_to_float64_ppc64 +#define float32_to_float16 float32_to_float16_ppc64 +#define float32_to_float64 float32_to_float64_ppc64 +#define float64_to_float16 float64_to_float16_ppc64 +#define float64_to_float32 float64_to_float32_ppc64 +#define float16_round_to_int float16_round_to_int_ppc64 +#define float32_round_to_int float32_round_to_int_ppc64 +#define float64_round_to_int float64_round_to_int_ppc64 +#define float16_to_int16_scalbn float16_to_int16_scalbn_ppc64 +#define float16_to_int32_scalbn float16_to_int32_scalbn_ppc64 +#define float16_to_int64_scalbn float16_to_int64_scalbn_ppc64 +#define float32_to_int16_scalbn float32_to_int16_scalbn_ppc64 +#define float32_to_int32_scalbn float32_to_int32_scalbn_ppc64 +#define float32_to_int64_scalbn float32_to_int64_scalbn_ppc64 +#define float64_to_int16_scalbn float64_to_int16_scalbn_ppc64 +#define float64_to_int32_scalbn float64_to_int32_scalbn_ppc64 +#define float64_to_int64_scalbn float64_to_int64_scalbn_ppc64 +#define float16_to_int16 float16_to_int16_ppc64 +#define float16_to_int32 float16_to_int32_ppc64 +#define float16_to_int64 float16_to_int64_ppc64 +#define float32_to_int16 float32_to_int16_ppc64 +#define float32_to_int32 float32_to_int32_ppc64 +#define float32_to_int64 float32_to_int64_ppc64 +#define float64_to_int16 float64_to_int16_ppc64 +#define float64_to_int32 float64_to_int32_ppc64 +#define float64_to_int64 float64_to_int64_ppc64 +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_ppc64 +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_ppc64 +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_ppc64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_ppc64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_ppc64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_ppc64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_ppc64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_ppc64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_ppc64 +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_ppc64 +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_ppc64 +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_ppc64 +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_ppc64 +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_ppc64 +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_ppc64 +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_ppc64 +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_ppc64 +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_ppc64 +#define float16_to_uint16 float16_to_uint16_ppc64 +#define float16_to_uint32 float16_to_uint32_ppc64 +#define float16_to_uint64 float16_to_uint64_ppc64 +#define float32_to_uint16 float32_to_uint16_ppc64 +#define float32_to_uint32 float32_to_uint32_ppc64 +#define float32_to_uint64 float32_to_uint64_ppc64 +#define float64_to_uint16 float64_to_uint16_ppc64 +#define float64_to_uint32 float64_to_uint32_ppc64 +#define float64_to_uint64 float64_to_uint64_ppc64 +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_ppc64 +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_ppc64 +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_ppc64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_ppc64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_ppc64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_ppc64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_ppc64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_ppc64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_ppc64 +#define int64_to_float16_scalbn int64_to_float16_scalbn_ppc64 +#define int32_to_float16_scalbn int32_to_float16_scalbn_ppc64 +#define int16_to_float16_scalbn int16_to_float16_scalbn_ppc64 +#define int64_to_float16 int64_to_float16_ppc64 +#define int32_to_float16 int32_to_float16_ppc64 +#define int16_to_float16 int16_to_float16_ppc64 +#define int64_to_float32_scalbn int64_to_float32_scalbn_ppc64 +#define int32_to_float32_scalbn int32_to_float32_scalbn_ppc64 +#define int16_to_float32_scalbn int16_to_float32_scalbn_ppc64 +#define int64_to_float32 int64_to_float32_ppc64 +#define int32_to_float32 int32_to_float32_ppc64 +#define int16_to_float32 int16_to_float32_ppc64 +#define int64_to_float64_scalbn int64_to_float64_scalbn_ppc64 +#define int32_to_float64_scalbn int32_to_float64_scalbn_ppc64 +#define int16_to_float64_scalbn int16_to_float64_scalbn_ppc64 +#define int64_to_float64 int64_to_float64_ppc64 +#define int32_to_float64 int32_to_float64_ppc64 +#define int16_to_float64 int16_to_float64_ppc64 +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_ppc64 +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_ppc64 +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_ppc64 +#define uint64_to_float16 uint64_to_float16_ppc64 +#define uint32_to_float16 uint32_to_float16_ppc64 +#define uint16_to_float16 uint16_to_float16_ppc64 +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_ppc64 +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_ppc64 +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_ppc64 +#define uint64_to_float32 uint64_to_float32_ppc64 +#define uint32_to_float32 uint32_to_float32_ppc64 +#define uint16_to_float32 uint16_to_float32_ppc64 +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_ppc64 +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_ppc64 +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_ppc64 +#define uint64_to_float64 uint64_to_float64_ppc64 +#define uint32_to_float64 uint32_to_float64_ppc64 +#define uint16_to_float64 uint16_to_float64_ppc64 +#define float16_min float16_min_ppc64 +#define float16_minnum float16_minnum_ppc64 +#define float16_minnummag float16_minnummag_ppc64 +#define float16_max float16_max_ppc64 +#define float16_maxnum float16_maxnum_ppc64 +#define float16_maxnummag float16_maxnummag_ppc64 +#define float32_min float32_min_ppc64 +#define float32_minnum float32_minnum_ppc64 +#define float32_minnummag float32_minnummag_ppc64 +#define float32_max float32_max_ppc64 +#define float32_maxnum float32_maxnum_ppc64 +#define float32_maxnummag float32_maxnummag_ppc64 +#define float64_min float64_min_ppc64 +#define float64_minnum float64_minnum_ppc64 +#define float64_minnummag float64_minnummag_ppc64 +#define float64_max float64_max_ppc64 +#define float64_maxnum float64_maxnum_ppc64 +#define float64_maxnummag float64_maxnummag_ppc64 +#define float16_compare float16_compare_ppc64 +#define float16_compare_quiet float16_compare_quiet_ppc64 +#define float32_compare float32_compare_ppc64 +#define float32_compare_quiet float32_compare_quiet_ppc64 +#define float64_compare float64_compare_ppc64 +#define float64_compare_quiet float64_compare_quiet_ppc64 +#define float16_scalbn float16_scalbn_ppc64 +#define float32_scalbn float32_scalbn_ppc64 +#define float64_scalbn float64_scalbn_ppc64 +#define float16_sqrt float16_sqrt_ppc64 +#define float32_sqrt float32_sqrt_ppc64 +#define float64_sqrt float64_sqrt_ppc64 +#define float16_default_nan float16_default_nan_ppc64 +#define float32_default_nan float32_default_nan_ppc64 +#define float64_default_nan float64_default_nan_ppc64 +#define float128_default_nan float128_default_nan_ppc64 +#define float16_silence_nan float16_silence_nan_ppc64 +#define float32_silence_nan float32_silence_nan_ppc64 +#define float64_silence_nan float64_silence_nan_ppc64 +#define float16_squash_input_denormal float16_squash_input_denormal_ppc64 +#define float32_squash_input_denormal float32_squash_input_denormal_ppc64 +#define float64_squash_input_denormal float64_squash_input_denormal_ppc64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_ppc64 +#define roundAndPackFloatx80 roundAndPackFloatx80_ppc64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_ppc64 +#define int32_to_floatx80 int32_to_floatx80_ppc64 +#define int32_to_float128 int32_to_float128_ppc64 +#define int64_to_floatx80 int64_to_floatx80_ppc64 +#define int64_to_float128 int64_to_float128_ppc64 +#define uint64_to_float128 uint64_to_float128_ppc64 +#define float32_to_floatx80 float32_to_floatx80_ppc64 +#define float32_to_float128 float32_to_float128_ppc64 +#define float32_rem float32_rem_ppc64 +#define float32_exp2 float32_exp2_ppc64 +#define float32_log2 float32_log2_ppc64 +#define float32_eq float32_eq_ppc64 +#define float32_le float32_le_ppc64 +#define float32_lt float32_lt_ppc64 +#define float32_unordered float32_unordered_ppc64 +#define float32_eq_quiet float32_eq_quiet_ppc64 +#define float32_le_quiet float32_le_quiet_ppc64 +#define float32_lt_quiet float32_lt_quiet_ppc64 +#define float32_unordered_quiet float32_unordered_quiet_ppc64 +#define float64_to_floatx80 float64_to_floatx80_ppc64 +#define float64_to_float128 float64_to_float128_ppc64 +#define float64_rem float64_rem_ppc64 +#define float64_log2 float64_log2_ppc64 +#define float64_eq float64_eq_ppc64 +#define float64_le float64_le_ppc64 +#define float64_lt float64_lt_ppc64 +#define float64_unordered float64_unordered_ppc64 +#define float64_eq_quiet float64_eq_quiet_ppc64 +#define float64_le_quiet float64_le_quiet_ppc64 +#define float64_lt_quiet float64_lt_quiet_ppc64 +#define float64_unordered_quiet float64_unordered_quiet_ppc64 +#define floatx80_to_int32 floatx80_to_int32_ppc64 +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_ppc64 +#define floatx80_to_int64 floatx80_to_int64_ppc64 +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_ppc64 +#define floatx80_to_float32 floatx80_to_float32_ppc64 +#define floatx80_to_float64 floatx80_to_float64_ppc64 +#define floatx80_to_float128 floatx80_to_float128_ppc64 +#define floatx80_round floatx80_round_ppc64 +#define floatx80_round_to_int floatx80_round_to_int_ppc64 +#define floatx80_add floatx80_add_ppc64 +#define floatx80_sub floatx80_sub_ppc64 +#define floatx80_mul floatx80_mul_ppc64 +#define floatx80_div floatx80_div_ppc64 +#define floatx80_rem floatx80_rem_ppc64 +#define floatx80_sqrt floatx80_sqrt_ppc64 +#define floatx80_eq floatx80_eq_ppc64 +#define floatx80_le floatx80_le_ppc64 +#define floatx80_lt floatx80_lt_ppc64 +#define floatx80_unordered floatx80_unordered_ppc64 +#define floatx80_eq_quiet floatx80_eq_quiet_ppc64 +#define floatx80_le_quiet floatx80_le_quiet_ppc64 +#define floatx80_lt_quiet floatx80_lt_quiet_ppc64 +#define floatx80_unordered_quiet floatx80_unordered_quiet_ppc64 +#define float128_to_int32 float128_to_int32_ppc64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_ppc64 +#define float128_to_int64 float128_to_int64_ppc64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_ppc64 +#define float128_to_uint64 float128_to_uint64_ppc64 +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_ppc64 +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_ppc64 +#define float128_to_uint32 float128_to_uint32_ppc64 +#define float128_to_float32 float128_to_float32_ppc64 +#define float128_to_float64 float128_to_float64_ppc64 +#define float128_to_floatx80 float128_to_floatx80_ppc64 +#define float128_round_to_int float128_round_to_int_ppc64 +#define float128_add float128_add_ppc64 +#define float128_sub float128_sub_ppc64 +#define float128_mul float128_mul_ppc64 +#define float128_div float128_div_ppc64 +#define float128_rem float128_rem_ppc64 +#define float128_sqrt float128_sqrt_ppc64 +#define float128_eq float128_eq_ppc64 +#define float128_le float128_le_ppc64 +#define float128_lt float128_lt_ppc64 +#define float128_unordered float128_unordered_ppc64 +#define float128_eq_quiet float128_eq_quiet_ppc64 +#define float128_le_quiet float128_le_quiet_ppc64 +#define float128_lt_quiet float128_lt_quiet_ppc64 +#define float128_unordered_quiet float128_unordered_quiet_ppc64 +#define floatx80_compare floatx80_compare_ppc64 +#define floatx80_compare_quiet floatx80_compare_quiet_ppc64 +#define float128_compare float128_compare_ppc64 +#define float128_compare_quiet float128_compare_quiet_ppc64 +#define floatx80_scalbn floatx80_scalbn_ppc64 +#define float128_scalbn float128_scalbn_ppc64 +#define softfloat_init softfloat_init_ppc64 +#define tcg_optimize tcg_optimize_ppc64 +#define gen_new_label gen_new_label_ppc64 +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_ppc64 +#define tcg_expand_vec_op tcg_expand_vec_op_ppc64 +#define tcg_register_jit tcg_register_jit_ppc64 +#define tcg_tb_insert tcg_tb_insert_ppc64 +#define tcg_tb_remove tcg_tb_remove_ppc64 +#define tcg_tb_lookup tcg_tb_lookup_ppc64 +#define tcg_tb_foreach tcg_tb_foreach_ppc64 +#define tcg_nb_tbs tcg_nb_tbs_ppc64 +#define tcg_region_reset_all tcg_region_reset_all_ppc64 +#define tcg_region_init tcg_region_init_ppc64 +#define tcg_code_size tcg_code_size_ppc64 +#define tcg_code_capacity tcg_code_capacity_ppc64 +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_ppc64 +#define tcg_malloc_internal tcg_malloc_internal_ppc64 +#define tcg_pool_reset tcg_pool_reset_ppc64 +#define tcg_context_init tcg_context_init_ppc64 +#define tcg_tb_alloc tcg_tb_alloc_ppc64 +#define tcg_prologue_init tcg_prologue_init_ppc64 +#define tcg_func_start tcg_func_start_ppc64 +#define tcg_set_frame tcg_set_frame_ppc64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_ppc64 +#define tcg_temp_new_internal tcg_temp_new_internal_ppc64 +#define tcg_temp_new_vec tcg_temp_new_vec_ppc64 +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_ppc64 +#define tcg_temp_free_internal tcg_temp_free_internal_ppc64 +#define tcg_const_i32 tcg_const_i32_ppc64 +#define tcg_const_i64 tcg_const_i64_ppc64 +#define tcg_const_local_i32 tcg_const_local_i32_ppc64 +#define tcg_const_local_i64 tcg_const_local_i64_ppc64 +#define tcg_op_supported tcg_op_supported_ppc64 +#define tcg_gen_callN tcg_gen_callN_ppc64 +#define tcg_op_remove tcg_op_remove_ppc64 +#define tcg_emit_op tcg_emit_op_ppc64 +#define tcg_op_insert_before tcg_op_insert_before_ppc64 +#define tcg_op_insert_after tcg_op_insert_after_ppc64 +#define tcg_cpu_exec_time tcg_cpu_exec_time_ppc64 +#define tcg_gen_code tcg_gen_code_ppc64 +#define tcg_gen_op1 tcg_gen_op1_ppc64 +#define tcg_gen_op2 tcg_gen_op2_ppc64 +#define tcg_gen_op3 tcg_gen_op3_ppc64 +#define tcg_gen_op4 tcg_gen_op4_ppc64 +#define tcg_gen_op5 tcg_gen_op5_ppc64 +#define tcg_gen_op6 tcg_gen_op6_ppc64 +#define tcg_gen_mb tcg_gen_mb_ppc64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_ppc64 +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_ppc64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_ppc64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_ppc64 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_ppc64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_ppc64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_ppc64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_ppc64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_ppc64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_ppc64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_ppc64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_ppc64 +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_ppc64 +#define tcg_gen_muli_i32 tcg_gen_muli_i32_ppc64 +#define tcg_gen_div_i32 tcg_gen_div_i32_ppc64 +#define tcg_gen_rem_i32 tcg_gen_rem_i32_ppc64 +#define tcg_gen_divu_i32 tcg_gen_divu_i32_ppc64 +#define tcg_gen_remu_i32 tcg_gen_remu_i32_ppc64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_ppc64 +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_ppc64 +#define tcg_gen_nand_i32 tcg_gen_nand_i32_ppc64 +#define tcg_gen_nor_i32 tcg_gen_nor_i32_ppc64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_ppc64 +#define tcg_gen_clz_i32 tcg_gen_clz_i32_ppc64 +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_ppc64 +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_ppc64 +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_ppc64 +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_ppc64 +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_ppc64 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_ppc64 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_ppc64 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_ppc64 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_ppc64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_ppc64 +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_ppc64 +#define tcg_gen_extract_i32 tcg_gen_extract_i32_ppc64 +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_ppc64 +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_ppc64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_ppc64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_ppc64 +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_ppc64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_ppc64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_ppc64 +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_ppc64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_ppc64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_ppc64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_ppc64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_ppc64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_ppc64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_ppc64 +#define tcg_gen_smin_i32 tcg_gen_smin_i32_ppc64 +#define tcg_gen_umin_i32 tcg_gen_umin_i32_ppc64 +#define tcg_gen_smax_i32 tcg_gen_smax_i32_ppc64 +#define tcg_gen_umax_i32 tcg_gen_umax_i32_ppc64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_ppc64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_ppc64 +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_ppc64 +#define tcg_gen_subi_i64 tcg_gen_subi_i64_ppc64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_ppc64 +#define tcg_gen_ori_i64 tcg_gen_ori_i64_ppc64 +#define tcg_gen_xori_i64 tcg_gen_xori_i64_ppc64 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_ppc64 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_ppc64 +#define tcg_gen_sari_i64 tcg_gen_sari_i64_ppc64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_ppc64 +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_ppc64 +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_ppc64 +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_ppc64 +#define tcg_gen_muli_i64 tcg_gen_muli_i64_ppc64 +#define tcg_gen_div_i64 tcg_gen_div_i64_ppc64 +#define tcg_gen_rem_i64 tcg_gen_rem_i64_ppc64 +#define tcg_gen_divu_i64 tcg_gen_divu_i64_ppc64 +#define tcg_gen_remu_i64 tcg_gen_remu_i64_ppc64 +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_ppc64 +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_ppc64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_ppc64 +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_ppc64 +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_ppc64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_ppc64 +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_ppc64 +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_ppc64 +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_ppc64 +#define tcg_gen_not_i64 tcg_gen_not_i64_ppc64 +#define tcg_gen_andc_i64 tcg_gen_andc_i64_ppc64 +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_ppc64 +#define tcg_gen_nand_i64 tcg_gen_nand_i64_ppc64 +#define tcg_gen_nor_i64 tcg_gen_nor_i64_ppc64 +#define tcg_gen_orc_i64 tcg_gen_orc_i64_ppc64 +#define tcg_gen_clz_i64 tcg_gen_clz_i64_ppc64 +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_ppc64 +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_ppc64 +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_ppc64 +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_ppc64 +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_ppc64 +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_ppc64 +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_ppc64 +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_ppc64 +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_ppc64 +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_ppc64 +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_ppc64 +#define tcg_gen_extract_i64 tcg_gen_extract_i64_ppc64 +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_ppc64 +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_ppc64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_ppc64 +#define tcg_gen_add2_i64 tcg_gen_add2_i64_ppc64 +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_ppc64 +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_ppc64 +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_ppc64 +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_ppc64 +#define tcg_gen_smin_i64 tcg_gen_smin_i64_ppc64 +#define tcg_gen_umin_i64 tcg_gen_umin_i64_ppc64 +#define tcg_gen_smax_i64 tcg_gen_smax_i64_ppc64 +#define tcg_gen_umax_i64 tcg_gen_umax_i64_ppc64 +#define tcg_gen_abs_i64 tcg_gen_abs_i64_ppc64 +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_ppc64 +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_ppc64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_ppc64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_ppc64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_ppc64 +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_ppc64 +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_ppc64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_ppc64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_ppc64 +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_ppc64 +#define check_exit_request check_exit_request_ppc64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_ppc64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_ppc64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_ppc64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_ppc64 +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_ppc64 +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_ppc64 +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_ppc64 +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_ppc64 +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_ppc64 +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_ppc64 +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_ppc64 +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_ppc64 +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_ppc64 +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_ppc64 +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_ppc64 +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_ppc64 +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_ppc64 +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_ppc64 +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_ppc64 +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_ppc64 +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_ppc64 +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_ppc64 +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_ppc64 +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_ppc64 +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_ppc64 +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_ppc64 +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_ppc64 +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_ppc64 +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_ppc64 +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_ppc64 +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_ppc64 +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_ppc64 +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_ppc64 +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_ppc64 +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_ppc64 +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_ppc64 +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_ppc64 +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_ppc64 +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_ppc64 +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_ppc64 +#define simd_desc simd_desc_ppc64 +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_ppc64 +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_ppc64 +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_ppc64 +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_ppc64 +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_ppc64 +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_ppc64 +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_ppc64 +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_ppc64 +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_ppc64 +#define tcg_gen_gvec_2 tcg_gen_gvec_2_ppc64 +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_ppc64 +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_ppc64 +#define tcg_gen_gvec_3 tcg_gen_gvec_3_ppc64 +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_ppc64 +#define tcg_gen_gvec_4 tcg_gen_gvec_4_ppc64 +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_ppc64 +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_ppc64 +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_ppc64 +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_ppc64 +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_ppc64 +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_ppc64 +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_ppc64 +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_ppc64 +#define tcg_gen_gvec_not tcg_gen_gvec_not_ppc64 +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_ppc64 +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_ppc64 +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_ppc64 +#define tcg_gen_gvec_add tcg_gen_gvec_add_ppc64 +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_ppc64 +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_ppc64 +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_ppc64 +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_ppc64 +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_ppc64 +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_ppc64 +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_ppc64 +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_ppc64 +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_ppc64 +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_ppc64 +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_ppc64 +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_ppc64 +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_ppc64 +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_ppc64 +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_ppc64 +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_ppc64 +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_ppc64 +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_ppc64 +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_ppc64 +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_ppc64 +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_ppc64 +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_ppc64 +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_ppc64 +#define tcg_gen_gvec_and tcg_gen_gvec_and_ppc64 +#define tcg_gen_gvec_or tcg_gen_gvec_or_ppc64 +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_ppc64 +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_ppc64 +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_ppc64 +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_ppc64 +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_ppc64 +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_ppc64 +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_ppc64 +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_ppc64 +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_ppc64 +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_ppc64 +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_ppc64 +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_ppc64 +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_ppc64 +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_ppc64 +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_ppc64 +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_ppc64 +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_ppc64 +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_ppc64 +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_ppc64 +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_ppc64 +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_ppc64 +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_ppc64 +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_ppc64 +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_ppc64 +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_ppc64 +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_ppc64 +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_ppc64 +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_ppc64 +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_ppc64 +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_ppc64 +#define vec_gen_2 vec_gen_2_ppc64 +#define vec_gen_3 vec_gen_3_ppc64 +#define vec_gen_4 vec_gen_4_ppc64 +#define tcg_gen_mov_vec tcg_gen_mov_vec_ppc64 +#define tcg_const_zeros_vec tcg_const_zeros_vec_ppc64 +#define tcg_const_ones_vec tcg_const_ones_vec_ppc64 +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_ppc64 +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_ppc64 +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_ppc64 +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_ppc64 +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_ppc64 +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_ppc64 +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_ppc64 +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_ppc64 +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_ppc64 +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_ppc64 +#define tcg_gen_ld_vec tcg_gen_ld_vec_ppc64 +#define tcg_gen_st_vec tcg_gen_st_vec_ppc64 +#define tcg_gen_stl_vec tcg_gen_stl_vec_ppc64 +#define tcg_gen_and_vec tcg_gen_and_vec_ppc64 +#define tcg_gen_or_vec tcg_gen_or_vec_ppc64 +#define tcg_gen_xor_vec tcg_gen_xor_vec_ppc64 +#define tcg_gen_andc_vec tcg_gen_andc_vec_ppc64 +#define tcg_gen_orc_vec tcg_gen_orc_vec_ppc64 +#define tcg_gen_nand_vec tcg_gen_nand_vec_ppc64 +#define tcg_gen_nor_vec tcg_gen_nor_vec_ppc64 +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_ppc64 +#define tcg_gen_not_vec tcg_gen_not_vec_ppc64 +#define tcg_gen_neg_vec tcg_gen_neg_vec_ppc64 +#define tcg_gen_abs_vec tcg_gen_abs_vec_ppc64 +#define tcg_gen_shli_vec tcg_gen_shli_vec_ppc64 +#define tcg_gen_shri_vec tcg_gen_shri_vec_ppc64 +#define tcg_gen_sari_vec tcg_gen_sari_vec_ppc64 +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_ppc64 +#define tcg_gen_add_vec tcg_gen_add_vec_ppc64 +#define tcg_gen_sub_vec tcg_gen_sub_vec_ppc64 +#define tcg_gen_mul_vec tcg_gen_mul_vec_ppc64 +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_ppc64 +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_ppc64 +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_ppc64 +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_ppc64 +#define tcg_gen_smin_vec tcg_gen_smin_vec_ppc64 +#define tcg_gen_umin_vec tcg_gen_umin_vec_ppc64 +#define tcg_gen_smax_vec tcg_gen_smax_vec_ppc64 +#define tcg_gen_umax_vec tcg_gen_umax_vec_ppc64 +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_ppc64 +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_ppc64 +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_ppc64 +#define tcg_gen_shls_vec tcg_gen_shls_vec_ppc64 +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_ppc64 +#define tcg_gen_sars_vec tcg_gen_sars_vec_ppc64 +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_ppc64 +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_ppc64 +#define tb_htable_lookup tb_htable_lookup_ppc64 +#define tb_set_jmp_target tb_set_jmp_target_ppc64 +#define cpu_exec cpu_exec_ppc64 +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_ppc64 +#define cpu_reloading_memory_map cpu_reloading_memory_map_ppc64 +#define cpu_loop_exit cpu_loop_exit_ppc64 +#define cpu_loop_exit_restore cpu_loop_exit_restore_ppc64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_ppc64 +#define tlb_init tlb_init_ppc64 +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_ppc64 +#define tlb_flush tlb_flush_ppc64 +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_ppc64 +#define tlb_flush_all_cpus tlb_flush_all_cpus_ppc64 +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_ppc64 +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_ppc64 +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_ppc64 +#define tlb_flush_page tlb_flush_page_ppc64 +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_ppc64 +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_ppc64 +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_ppc64 +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_ppc64 +#define tlb_protect_code tlb_protect_code_ppc64 +#define tlb_unprotect_code tlb_unprotect_code_ppc64 +#define tlb_reset_dirty tlb_reset_dirty_ppc64 +#define tlb_set_dirty tlb_set_dirty_ppc64 +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_ppc64 +#define tlb_set_page tlb_set_page_ppc64 +#define get_page_addr_code_hostp get_page_addr_code_hostp_ppc64 +#define get_page_addr_code get_page_addr_code_ppc64 +#define probe_access probe_access_ppc64 +#define tlb_vaddr_to_host tlb_vaddr_to_host_ppc64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_ppc64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_ppc64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_ppc64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_ppc64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_ppc64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_ppc64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_ppc64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_ppc64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_ppc64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_ppc64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_ppc64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_ppc64 +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_ppc64 +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_ppc64 +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_ppc64 +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_ppc64 +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_ppc64 +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_ppc64 +#define cpu_ldub_data_ra cpu_ldub_data_ra_ppc64 +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_ppc64 +#define cpu_lduw_data_ra cpu_lduw_data_ra_ppc64 +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_ppc64 +#define cpu_ldl_data_ra cpu_ldl_data_ra_ppc64 +#define cpu_ldq_data_ra cpu_ldq_data_ra_ppc64 +#define cpu_ldub_data cpu_ldub_data_ppc64 +#define cpu_ldsb_data cpu_ldsb_data_ppc64 +#define cpu_lduw_data cpu_lduw_data_ppc64 +#define cpu_ldsw_data cpu_ldsw_data_ppc64 +#define cpu_ldl_data cpu_ldl_data_ppc64 +#define cpu_ldq_data cpu_ldq_data_ppc64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_ppc64 +#define helper_le_stw_mmu helper_le_stw_mmu_ppc64 +#define helper_be_stw_mmu helper_be_stw_mmu_ppc64 +#define helper_le_stl_mmu helper_le_stl_mmu_ppc64 +#define helper_be_stl_mmu helper_be_stl_mmu_ppc64 +#define helper_le_stq_mmu helper_le_stq_mmu_ppc64 +#define helper_be_stq_mmu helper_be_stq_mmu_ppc64 +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_ppc64 +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_ppc64 +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_ppc64 +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_ppc64 +#define cpu_stb_data_ra cpu_stb_data_ra_ppc64 +#define cpu_stw_data_ra cpu_stw_data_ra_ppc64 +#define cpu_stl_data_ra cpu_stl_data_ra_ppc64 +#define cpu_stq_data_ra cpu_stq_data_ra_ppc64 +#define cpu_stb_data cpu_stb_data_ppc64 +#define cpu_stw_data cpu_stw_data_ppc64 +#define cpu_stl_data cpu_stl_data_ppc64 +#define cpu_stq_data cpu_stq_data_ppc64 +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_ppc64 +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_ppc64 +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_ppc64 +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_ppc64 +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_ppc64 +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_ppc64 +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_ppc64 +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_ppc64 +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_ppc64 +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_ppc64 +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_ppc64 +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_ppc64 +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_ppc64 +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_ppc64 +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_ppc64 +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_ppc64 +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_ppc64 +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_ppc64 +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_ppc64 +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_ppc64 +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_ppc64 +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_ppc64 +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_ppc64 +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_ppc64 +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_ppc64 +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_ppc64 +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_ppc64 +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_ppc64 +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_ppc64 +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_ppc64 +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_ppc64 +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_ppc64 +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_ppc64 +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_ppc64 +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_ppc64 +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_ppc64 +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_ppc64 +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_ppc64 +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_ppc64 +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_ppc64 +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_ppc64 +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_ppc64 +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_ppc64 +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_ppc64 +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_ppc64 +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_ppc64 +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_ppc64 +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_ppc64 +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_ppc64 +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_ppc64 +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_ppc64 +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_ppc64 +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_ppc64 +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_ppc64 +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_ppc64 +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_ppc64 +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_ppc64 +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_ppc64 +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_ppc64 +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_ppc64 +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_ppc64 +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_ppc64 +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_ppc64 +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_ppc64 +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_ppc64 +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_ppc64 +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_ppc64 +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_ppc64 +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_ppc64 +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_ppc64 +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_ppc64 +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_ppc64 +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_ppc64 +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_ppc64 +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_ppc64 +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_ppc64 +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_ppc64 +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_ppc64 +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_ppc64 +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_ppc64 +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_ppc64 +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_ppc64 +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_ppc64 +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_ppc64 +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_ppc64 +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_ppc64 +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_ppc64 +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_ppc64 +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_ppc64 +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_ppc64 +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_ppc64 +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_ppc64 +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_ppc64 +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_ppc64 +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_ppc64 +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_ppc64 +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_ppc64 +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_ppc64 +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_ppc64 +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_ppc64 +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_ppc64 +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_ppc64 +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_ppc64 +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_ppc64 +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_ppc64 +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_ppc64 +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_ppc64 +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_ppc64 +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_ppc64 +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_ppc64 +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_ppc64 +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_ppc64 +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_ppc64 +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_ppc64 +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_ppc64 +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_ppc64 +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_ppc64 +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_ppc64 +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_ppc64 +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_ppc64 +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_ppc64 +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_ppc64 +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_ppc64 +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_ppc64 +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_ppc64 +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_ppc64 +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_ppc64 +#define helper_atomic_xchgb helper_atomic_xchgb_ppc64 +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_ppc64 +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_ppc64 +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_ppc64 +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_ppc64 +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_ppc64 +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_ppc64 +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_ppc64 +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_ppc64 +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_ppc64 +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_ppc64 +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_ppc64 +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_ppc64 +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_ppc64 +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_ppc64 +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_ppc64 +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_ppc64 +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_ppc64 +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_ppc64 +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_ppc64 +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_ppc64 +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_ppc64 +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_ppc64 +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_ppc64 +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_ppc64 +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_ppc64 +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_ppc64 +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_ppc64 +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_ppc64 +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_ppc64 +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_ppc64 +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_ppc64 +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_ppc64 +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_ppc64 +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_ppc64 +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_ppc64 +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_ppc64 +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_ppc64 +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_ppc64 +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_ppc64 +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_ppc64 +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_ppc64 +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_ppc64 +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_ppc64 +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_ppc64 +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_ppc64 +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_ppc64 +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_ppc64 +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_ppc64 +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_ppc64 +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_ppc64 +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_ppc64 +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_ppc64 +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_ppc64 +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_ppc64 +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_ppc64 +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_ppc64 +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_ppc64 +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_ppc64 +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_ppc64 +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_ppc64 +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_ppc64 +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_ppc64 +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_ppc64 +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_ppc64 +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_ppc64 +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_ppc64 +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_ppc64 +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_ppc64 +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_ppc64 +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_ppc64 +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_ppc64 +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_ppc64 +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_ppc64 +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_ppc64 +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_ppc64 +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_ppc64 +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_ppc64 +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_ppc64 +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_ppc64 +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_ppc64 +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_ppc64 +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_ppc64 +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_ppc64 +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_ppc64 +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_ppc64 +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_ppc64 +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_ppc64 +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_ppc64 +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_ppc64 +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_ppc64 +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_ppc64 +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_ppc64 +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_ppc64 +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_ppc64 +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_ppc64 +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_ppc64 +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_ppc64 +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_ppc64 +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_ppc64 +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_ppc64 +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_ppc64 +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_ppc64 +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_ppc64 +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_ppc64 +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_ppc64 +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_ppc64 +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_ppc64 +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_ppc64 +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_ppc64 +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_ppc64 +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_ppc64 +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_ppc64 +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_ppc64 +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_ppc64 +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_ppc64 +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_ppc64 +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_ppc64 +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_ppc64 +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_ppc64 +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_ppc64 +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_ppc64 +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_ppc64 +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_ppc64 +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_ppc64 +#define cpu_ldub_code cpu_ldub_code_ppc64 +#define cpu_lduw_code cpu_lduw_code_ppc64 +#define cpu_ldl_code cpu_ldl_code_ppc64 +#define cpu_ldq_code cpu_ldq_code_ppc64 +#define helper_div_i32 helper_div_i32_ppc64 +#define helper_rem_i32 helper_rem_i32_ppc64 +#define helper_divu_i32 helper_divu_i32_ppc64 +#define helper_remu_i32 helper_remu_i32_ppc64 +#define helper_shl_i64 helper_shl_i64_ppc64 +#define helper_shr_i64 helper_shr_i64_ppc64 +#define helper_sar_i64 helper_sar_i64_ppc64 +#define helper_div_i64 helper_div_i64_ppc64 +#define helper_rem_i64 helper_rem_i64_ppc64 +#define helper_divu_i64 helper_divu_i64_ppc64 +#define helper_remu_i64 helper_remu_i64_ppc64 +#define helper_muluh_i64 helper_muluh_i64_ppc64 +#define helper_mulsh_i64 helper_mulsh_i64_ppc64 +#define helper_clz_i32 helper_clz_i32_ppc64 +#define helper_ctz_i32 helper_ctz_i32_ppc64 +#define helper_clz_i64 helper_clz_i64_ppc64 +#define helper_ctz_i64 helper_ctz_i64_ppc64 +#define helper_clrsb_i32 helper_clrsb_i32_ppc64 +#define helper_clrsb_i64 helper_clrsb_i64_ppc64 +#define helper_ctpop_i32 helper_ctpop_i32_ppc64 +#define helper_ctpop_i64 helper_ctpop_i64_ppc64 +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_ppc64 +#define helper_exit_atomic helper_exit_atomic_ppc64 +#define helper_gvec_add8 helper_gvec_add8_ppc64 +#define helper_gvec_add16 helper_gvec_add16_ppc64 +#define helper_gvec_add32 helper_gvec_add32_ppc64 +#define helper_gvec_add64 helper_gvec_add64_ppc64 +#define helper_gvec_adds8 helper_gvec_adds8_ppc64 +#define helper_gvec_adds16 helper_gvec_adds16_ppc64 +#define helper_gvec_adds32 helper_gvec_adds32_ppc64 +#define helper_gvec_adds64 helper_gvec_adds64_ppc64 +#define helper_gvec_sub8 helper_gvec_sub8_ppc64 +#define helper_gvec_sub16 helper_gvec_sub16_ppc64 +#define helper_gvec_sub32 helper_gvec_sub32_ppc64 +#define helper_gvec_sub64 helper_gvec_sub64_ppc64 +#define helper_gvec_subs8 helper_gvec_subs8_ppc64 +#define helper_gvec_subs16 helper_gvec_subs16_ppc64 +#define helper_gvec_subs32 helper_gvec_subs32_ppc64 +#define helper_gvec_subs64 helper_gvec_subs64_ppc64 +#define helper_gvec_mul8 helper_gvec_mul8_ppc64 +#define helper_gvec_mul16 helper_gvec_mul16_ppc64 +#define helper_gvec_mul32 helper_gvec_mul32_ppc64 +#define helper_gvec_mul64 helper_gvec_mul64_ppc64 +#define helper_gvec_muls8 helper_gvec_muls8_ppc64 +#define helper_gvec_muls16 helper_gvec_muls16_ppc64 +#define helper_gvec_muls32 helper_gvec_muls32_ppc64 +#define helper_gvec_muls64 helper_gvec_muls64_ppc64 +#define helper_gvec_neg8 helper_gvec_neg8_ppc64 +#define helper_gvec_neg16 helper_gvec_neg16_ppc64 +#define helper_gvec_neg32 helper_gvec_neg32_ppc64 +#define helper_gvec_neg64 helper_gvec_neg64_ppc64 +#define helper_gvec_abs8 helper_gvec_abs8_ppc64 +#define helper_gvec_abs16 helper_gvec_abs16_ppc64 +#define helper_gvec_abs32 helper_gvec_abs32_ppc64 +#define helper_gvec_abs64 helper_gvec_abs64_ppc64 +#define helper_gvec_mov helper_gvec_mov_ppc64 +#define helper_gvec_dup64 helper_gvec_dup64_ppc64 +#define helper_gvec_dup32 helper_gvec_dup32_ppc64 +#define helper_gvec_dup16 helper_gvec_dup16_ppc64 +#define helper_gvec_dup8 helper_gvec_dup8_ppc64 +#define helper_gvec_not helper_gvec_not_ppc64 +#define helper_gvec_and helper_gvec_and_ppc64 +#define helper_gvec_or helper_gvec_or_ppc64 +#define helper_gvec_xor helper_gvec_xor_ppc64 +#define helper_gvec_andc helper_gvec_andc_ppc64 +#define helper_gvec_orc helper_gvec_orc_ppc64 +#define helper_gvec_nand helper_gvec_nand_ppc64 +#define helper_gvec_nor helper_gvec_nor_ppc64 +#define helper_gvec_eqv helper_gvec_eqv_ppc64 +#define helper_gvec_ands helper_gvec_ands_ppc64 +#define helper_gvec_xors helper_gvec_xors_ppc64 +#define helper_gvec_ors helper_gvec_ors_ppc64 +#define helper_gvec_shl8i helper_gvec_shl8i_ppc64 +#define helper_gvec_shl16i helper_gvec_shl16i_ppc64 +#define helper_gvec_shl32i helper_gvec_shl32i_ppc64 +#define helper_gvec_shl64i helper_gvec_shl64i_ppc64 +#define helper_gvec_shr8i helper_gvec_shr8i_ppc64 +#define helper_gvec_shr16i helper_gvec_shr16i_ppc64 +#define helper_gvec_shr32i helper_gvec_shr32i_ppc64 +#define helper_gvec_shr64i helper_gvec_shr64i_ppc64 +#define helper_gvec_sar8i helper_gvec_sar8i_ppc64 +#define helper_gvec_sar16i helper_gvec_sar16i_ppc64 +#define helper_gvec_sar32i helper_gvec_sar32i_ppc64 +#define helper_gvec_sar64i helper_gvec_sar64i_ppc64 +#define helper_gvec_shl8v helper_gvec_shl8v_ppc64 +#define helper_gvec_shl16v helper_gvec_shl16v_ppc64 +#define helper_gvec_shl32v helper_gvec_shl32v_ppc64 +#define helper_gvec_shl64v helper_gvec_shl64v_ppc64 +#define helper_gvec_shr8v helper_gvec_shr8v_ppc64 +#define helper_gvec_shr16v helper_gvec_shr16v_ppc64 +#define helper_gvec_shr32v helper_gvec_shr32v_ppc64 +#define helper_gvec_shr64v helper_gvec_shr64v_ppc64 +#define helper_gvec_sar8v helper_gvec_sar8v_ppc64 +#define helper_gvec_sar16v helper_gvec_sar16v_ppc64 +#define helper_gvec_sar32v helper_gvec_sar32v_ppc64 +#define helper_gvec_sar64v helper_gvec_sar64v_ppc64 +#define helper_gvec_eq8 helper_gvec_eq8_ppc64 +#define helper_gvec_ne8 helper_gvec_ne8_ppc64 +#define helper_gvec_lt8 helper_gvec_lt8_ppc64 +#define helper_gvec_le8 helper_gvec_le8_ppc64 +#define helper_gvec_ltu8 helper_gvec_ltu8_ppc64 +#define helper_gvec_leu8 helper_gvec_leu8_ppc64 +#define helper_gvec_eq16 helper_gvec_eq16_ppc64 +#define helper_gvec_ne16 helper_gvec_ne16_ppc64 +#define helper_gvec_lt16 helper_gvec_lt16_ppc64 +#define helper_gvec_le16 helper_gvec_le16_ppc64 +#define helper_gvec_ltu16 helper_gvec_ltu16_ppc64 +#define helper_gvec_leu16 helper_gvec_leu16_ppc64 +#define helper_gvec_eq32 helper_gvec_eq32_ppc64 +#define helper_gvec_ne32 helper_gvec_ne32_ppc64 +#define helper_gvec_lt32 helper_gvec_lt32_ppc64 +#define helper_gvec_le32 helper_gvec_le32_ppc64 +#define helper_gvec_ltu32 helper_gvec_ltu32_ppc64 +#define helper_gvec_leu32 helper_gvec_leu32_ppc64 +#define helper_gvec_eq64 helper_gvec_eq64_ppc64 +#define helper_gvec_ne64 helper_gvec_ne64_ppc64 +#define helper_gvec_lt64 helper_gvec_lt64_ppc64 +#define helper_gvec_le64 helper_gvec_le64_ppc64 +#define helper_gvec_ltu64 helper_gvec_ltu64_ppc64 +#define helper_gvec_leu64 helper_gvec_leu64_ppc64 +#define helper_gvec_ssadd8 helper_gvec_ssadd8_ppc64 +#define helper_gvec_ssadd16 helper_gvec_ssadd16_ppc64 +#define helper_gvec_ssadd32 helper_gvec_ssadd32_ppc64 +#define helper_gvec_ssadd64 helper_gvec_ssadd64_ppc64 +#define helper_gvec_sssub8 helper_gvec_sssub8_ppc64 +#define helper_gvec_sssub16 helper_gvec_sssub16_ppc64 +#define helper_gvec_sssub32 helper_gvec_sssub32_ppc64 +#define helper_gvec_sssub64 helper_gvec_sssub64_ppc64 +#define helper_gvec_usadd8 helper_gvec_usadd8_ppc64 +#define helper_gvec_usadd16 helper_gvec_usadd16_ppc64 +#define helper_gvec_usadd32 helper_gvec_usadd32_ppc64 +#define helper_gvec_usadd64 helper_gvec_usadd64_ppc64 +#define helper_gvec_ussub8 helper_gvec_ussub8_ppc64 +#define helper_gvec_ussub16 helper_gvec_ussub16_ppc64 +#define helper_gvec_ussub32 helper_gvec_ussub32_ppc64 +#define helper_gvec_ussub64 helper_gvec_ussub64_ppc64 +#define helper_gvec_smin8 helper_gvec_smin8_ppc64 +#define helper_gvec_smin16 helper_gvec_smin16_ppc64 +#define helper_gvec_smin32 helper_gvec_smin32_ppc64 +#define helper_gvec_smin64 helper_gvec_smin64_ppc64 +#define helper_gvec_smax8 helper_gvec_smax8_ppc64 +#define helper_gvec_smax16 helper_gvec_smax16_ppc64 +#define helper_gvec_smax32 helper_gvec_smax32_ppc64 +#define helper_gvec_smax64 helper_gvec_smax64_ppc64 +#define helper_gvec_umin8 helper_gvec_umin8_ppc64 +#define helper_gvec_umin16 helper_gvec_umin16_ppc64 +#define helper_gvec_umin32 helper_gvec_umin32_ppc64 +#define helper_gvec_umin64 helper_gvec_umin64_ppc64 +#define helper_gvec_umax8 helper_gvec_umax8_ppc64 +#define helper_gvec_umax16 helper_gvec_umax16_ppc64 +#define helper_gvec_umax32 helper_gvec_umax32_ppc64 +#define helper_gvec_umax64 helper_gvec_umax64_ppc64 +#define helper_gvec_bitsel helper_gvec_bitsel_ppc64 +#define cpu_restore_state cpu_restore_state_ppc64 +#define page_collection_lock page_collection_lock_ppc64 +#define page_collection_unlock page_collection_unlock_ppc64 +#define free_code_gen_buffer free_code_gen_buffer_ppc64 +#define tcg_exec_init tcg_exec_init_ppc64 +#define tb_cleanup tb_cleanup_ppc64 +#define tb_flush tb_flush_ppc64 +#define tb_phys_invalidate tb_phys_invalidate_ppc64 +#define tb_gen_code tb_gen_code_ppc64 +#define tb_exec_lock tb_exec_lock_ppc64 +#define tb_exec_unlock tb_exec_unlock_ppc64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_ppc64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_ppc64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_ppc64 +#define tb_check_watchpoint tb_check_watchpoint_ppc64 +#define cpu_io_recompile cpu_io_recompile_ppc64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_ppc64 +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_ppc64 +#define translator_loop_temp_check translator_loop_temp_check_ppc64 +#define translator_loop translator_loop_ppc64 +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_ppc64 +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_ppc64 +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_ppc64 +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_ppc64 +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_ppc64 +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_ppc64 +#define unassigned_mem_ops unassigned_mem_ops_ppc64 +#define floatx80_infinity floatx80_infinity_ppc64 +#define dup_const_func dup_const_func_ppc64 +#define gen_helper_raise_exception gen_helper_raise_exception_ppc64 +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_ppc64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_ppc64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_ppc64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_ppc64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_ppc64 +#define ppc_cpu_unrealize ppc_cpu_unrealize_ppc64 +#define ppc_cpu_instance_finalize ppc_cpu_instance_finalize_ppc64 +#define ppc_reg_reset ppc_reg_reset_ppc64 +#define ppc_reg_read ppc_reg_read_ppc64 +#define ppc_reg_write ppc_reg_write_ppc64 +#define ppc_cpu_do_interrupt ppc_cpu_do_interrupt_ppc64 +#define ppc_cpu_do_system_reset ppc_cpu_do_system_reset_ppc64 +#define ppc_cpu_do_fwnmi_machine_check ppc_cpu_do_fwnmi_machine_check_ppc64 +#define ppc_cpu_exec_interrupt ppc_cpu_exec_interrupt_ppc64 +#define raise_exception_err_ra raise_exception_err_ra_ppc64 +#define raise_exception_err raise_exception_err_ppc64 +#define raise_exception raise_exception_ppc64 +#define raise_exception_ra raise_exception_ra_ppc64 +#define helper_raise_exception_err helper_raise_exception_err_ppc64 +#define helper_store_msr helper_store_msr_ppc64 +#define helper_rfi helper_rfi_ppc64 +#define helper_40x_rfci helper_40x_rfci_ppc64 +#define helper_rfdi helper_rfdi_ppc64 +#define helper_rfci helper_rfci_ppc64 +#define helper_rfmci helper_rfmci_ppc64 +#define helper_tw helper_tw_ppc64 +#define helper_rfsvc helper_rfsvc_ppc64 +#define helper_msgclr helper_msgclr_ppc64 +#define helper_msgsnd helper_msgsnd_ppc64 +#define helper_book3s_msgclr helper_book3s_msgclr_ppc64 +#define ppc_cpu_do_unaligned_access ppc_cpu_do_unaligned_access_ppc64 +#define helper_divweu helper_divweu_ppc64 +#define helper_divwe helper_divwe_ppc64 +#define helper_sraw helper_sraw_ppc64 +#define helper_popcntb helper_popcntb_ppc64 +#define helper_div helper_div_ppc64 +#define helper_divo helper_divo_ppc64 +#define helper_divs helper_divs_ppc64 +#define helper_divso helper_divso_ppc64 +#define helper_602_mfrom helper_602_mfrom_ppc64 +#define helper_mtvscr helper_mtvscr_ppc64 +#define helper_vaddcuw helper_vaddcuw_ppc64 +#define helper_vprtybw helper_vprtybw_ppc64 +#define helper_vprtybd helper_vprtybd_ppc64 +#define helper_vprtybq helper_vprtybq_ppc64 +#define helper_vmuluwm helper_vmuluwm_ppc64 +#define helper_vaddfp helper_vaddfp_ppc64 +#define helper_vsubfp helper_vsubfp_ppc64 +#define helper_vminfp helper_vminfp_ppc64 +#define helper_vmaxfp helper_vmaxfp_ppc64 +#define helper_vmaddfp helper_vmaddfp_ppc64 +#define helper_vnmsubfp helper_vnmsubfp_ppc64 +#define helper_vaddsbs helper_vaddsbs_ppc64 +#define helper_vsubsbs helper_vsubsbs_ppc64 +#define helper_vsubshs helper_vsubshs_ppc64 +#define helper_vaddsws helper_vaddsws_ppc64 +#define helper_vsubsws helper_vsubsws_ppc64 +#define helper_vaddubs helper_vaddubs_ppc64 +#define helper_vsububs helper_vsububs_ppc64 +#define helper_vadduhs helper_vadduhs_ppc64 +#define helper_vsubuhs helper_vsubuhs_ppc64 +#define helper_vadduws helper_vadduws_ppc64 +#define helper_vsubuws helper_vsubuws_ppc64 +#define helper_vavgsb helper_vavgsb_ppc64 +#define helper_vavgub helper_vavgub_ppc64 +#define helper_vavgsh helper_vavgsh_ppc64 +#define helper_vavguh helper_vavguh_ppc64 +#define helper_vavgsw helper_vavgsw_ppc64 +#define helper_vabsdub helper_vabsdub_ppc64 +#define helper_vabsduh helper_vabsduh_ppc64 +#define helper_vabsduw helper_vabsduw_ppc64 +#define helper_vcfux helper_vcfux_ppc64 +#define helper_vcfsx helper_vcfsx_ppc64 +#define helper_vcmpequb helper_vcmpequb_ppc64 +#define helper_vcmpequb_dot helper_vcmpequb_dot_ppc64 +#define helper_vcmpequw helper_vcmpequw_ppc64 +#define helper_vcmpequw_dot helper_vcmpequw_dot_ppc64 +#define helper_vcmpequd helper_vcmpequd_ppc64 +#define helper_vcmpequd_dot helper_vcmpequd_dot_ppc64 +#define helper_vcmpgtub helper_vcmpgtub_ppc64 +#define helper_vcmpgtub_dot helper_vcmpgtub_dot_ppc64 +#define helper_vcmpgtuh helper_vcmpgtuh_ppc64 +#define helper_vcmpgtuh_dot helper_vcmpgtuh_dot_ppc64 +#define helper_vcmpgtuw helper_vcmpgtuw_ppc64 +#define helper_vcmpgtuw_dot helper_vcmpgtuw_dot_ppc64 +#define helper_vcmpgtud helper_vcmpgtud_ppc64 +#define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc64 +#define helper_vcmpgtud helper_vcmpgtud_ppc64 +#define helper_vcmpgtud_dot helper_vcmpgtud_dot_ppc64 +#define helper_vcmpgtsb helper_vcmpgtsb_ppc64 +#define helper_vcmpgtsb_dot helper_vcmpgtsb_dot_ppc64 +#define helper_vcmpgtsh helper_vcmpgtsh_ppc64 +#define helper_vcmpgtsh_dot helper_vcmpgtsh_dot_ppc64 +#define helper_vcmpgtsw helper_vcmpgtsw_ppc64 +#define helper_vcmpgtsw_dot helper_vcmpgtsw_dot_ppc64 +#define helper_vcmpgtsd helper_vcmpgtsd_ppc64 +#define helper_vcmpgtsd_dot helper_vcmpgtsd_dot_ppc64 +#define helper_vcmpnezb helper_vcmpnezb_ppc64 +#define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc64 +#define helper_vcmpnezb helper_vcmpnezb_ppc64 +#define helper_vcmpnezb_dot helper_vcmpnezb_dot_ppc64 +#define helper_vcmpnezw helper_vcmpnezw_ppc64 +#define helper_vcmpnezw_dot helper_vcmpnezw_dot_ppc64 +#define helper_vcmpneb helper_vcmpneb_ppc64 +#define helper_vcmpneb_dot helper_vcmpneb_dot_ppc64 +#define helper_vcmpneb helper_vcmpneb_ppc64 +#define helper_vcmpneb_dot helper_vcmpneb_dot_ppc64 +#define helper_vcmpneh helper_vcmpneh_ppc64 +#define helper_vcmpneh_dot helper_vcmpneh_dot_ppc64 +#define helper_vcmpnew helper_vcmpnew_ppc64 +#define helper_vcmpnew_dot helper_vcmpnew_dot_ppc64 +#define helper_vcmpeqfp helper_vcmpeqfp_ppc64 +#define helper_vcmpeqfp_dot helper_vcmpeqfp_dot_ppc64 +#define helper_vcmpgefp helper_vcmpgefp_ppc64 +#define helper_vcmpgefp_dot helper_vcmpgefp_dot_ppc64 +#define helper_vcmpgtfp helper_vcmpgtfp_ppc64 +#define helper_vcmpgtfp_dot helper_vcmpgtfp_dot_ppc64 +#define helper_vcmpbfp helper_vcmpbfp_ppc64 +#define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc64 +#define helper_vcmpbfp helper_vcmpbfp_ppc64 +#define helper_vcmpbfp_dot helper_vcmpbfp_dot_ppc64 +#define helper_vctuxs helper_vctuxs_ppc64 +#define helper_vctsxs helper_vctsxs_ppc64 +#define helper_vclzlsbb helper_vclzlsbb_ppc64 +#define helper_vctzlsbb helper_vctzlsbb_ppc64 +#define helper_vmhaddshs helper_vmhaddshs_ppc64 +#define helper_vmhraddshs helper_vmhraddshs_ppc64 +#define helper_vmladduhm helper_vmladduhm_ppc64 +#define helper_vmhraddshs helper_vmhraddshs_ppc64 +#define helper_vmladduhm helper_vmladduhm_ppc64 +#define helper_vmrglb helper_vmrglb_ppc64 +#define helper_vmrghb helper_vmrghb_ppc64 +#define helper_vmrglh helper_vmrglh_ppc64 +#define helper_vmrghh helper_vmrghh_ppc64 +#define helper_vmrglw helper_vmrglw_ppc64 +#define helper_vmrghw helper_vmrghw_ppc64 +#define helper_vmsummbm helper_vmsummbm_ppc64 +#define helper_vmsumshs helper_vmsumshs_ppc64 +#define helper_vmsumubm helper_vmsumubm_ppc64 +#define helper_vmsumuhm helper_vmsumuhm_ppc64 +#define helper_vmulesb helper_vmulesb_ppc64 +#define helper_vmulosb helper_vmulosb_ppc64 +#define helper_vmulesh helper_vmulesh_ppc64 +#define helper_vmulesw helper_vmulesw_ppc64 +#define helper_vmuleub helper_vmuleub_ppc64 +#define helper_vmuloub helper_vmuloub_ppc64 +#define helper_vmuleuh helper_vmuleuh_ppc64 +#define helper_vmulouh helper_vmulouh_ppc64 +#define helper_vmuleuw helper_vmuleuw_ppc64 +#define helper_vmulouw helper_vmulouw_ppc64 +#define helper_vperm helper_vperm_ppc64 +#define helper_vpermr helper_vpermr_ppc64 +#define helper_vbpermd helper_vbpermd_ppc64 +#define helper_vpmsumb helper_vpmsumb_ppc64 +#define helper_vpmsumh helper_vpmsumh_ppc64 +#define helper_vpmsumw helper_vpmsumw_ppc64 +#define helper_vpmsumd helper_vpmsumd_ppc64 +#define helper_vpkpx helper_vpkpx_ppc64 +#define helper_vpkshss helper_vpkshss_ppc64 +#define helper_vpkshus helper_vpkshus_ppc64 +#define helper_vpkswss helper_vpkswss_ppc64 +#define helper_vpkswus helper_vpkswus_ppc64 +#define helper_vpksdss helper_vpksdss_ppc64 +#define helper_vpksdus helper_vpksdus_ppc64 +#define helper_vpkuhus helper_vpkuhus_ppc64 +#define helper_vpkuwus helper_vpkuwus_ppc64 +#define helper_vpkudus helper_vpkudus_ppc64 +#define helper_vpkuhum helper_vpkuhum_ppc64 +#define helper_vpkuwum helper_vpkuwum_ppc64 +#define helper_vpkudum helper_vpkudum_ppc64 +#define helper_vrefp helper_vrefp_ppc64 +#define helper_vrfin helper_vrfin_ppc64 +#define helper_vrfim helper_vrfim_ppc64 +#define helper_vrfip helper_vrfip_ppc64 +#define helper_vrfiz helper_vrfiz_ppc64 +#define helper_vrlb helper_vrlb_ppc64 +#define helper_vrlh helper_vrlh_ppc64 +#define helper_vrlw helper_vrlw_ppc64 +#define helper_vrld helper_vrld_ppc64 +#define helper_vrsqrtefp helper_vrsqrtefp_ppc64 +#define helper_vrldmi helper_vrldmi_ppc64 +#define helper_vrlwmi helper_vrlwmi_ppc64 +#define helper_vrldnm helper_vrldnm_ppc64 +#define helper_vrlwnm helper_vrlwnm_ppc64 +#define helper_vsel helper_vsel_ppc64 +#define helper_vexptefp helper_vexptefp_ppc64 +#define helper_vlogefp helper_vlogefp_ppc64 +#define helper_vextublx helper_vextublx_ppc64 +#define helper_vextuhlx helper_vextuhlx_ppc64 +#define helper_vextuwlx helper_vextuwlx_ppc64 +#define helper_vextubrx helper_vextubrx_ppc64 +#define helper_vextuhrx helper_vextuhrx_ppc64 +#define helper_vextuwrx helper_vextuwrx_ppc64 +#define helper_vslv helper_vslv_ppc64 +#define helper_vsrv helper_vsrv_ppc64 +#define helper_vsldoi helper_vsldoi_ppc64 +#define helper_vslo helper_vslo_ppc64 +#define helper_vinsertb helper_vinsertb_ppc64 +#define helper_vinserth helper_vinserth_ppc64 +#define helper_vinsertw helper_vinsertw_ppc64 +#define helper_vinsertd helper_vinsertd_ppc64 +#define helper_vextractub helper_vextractub_ppc64 +#define helper_vextractuh helper_vextractuh_ppc64 +#define helper_vextractuw helper_vextractuw_ppc64 +#define helper_vextractd helper_vextractd_ppc64 +#define helper_xxextractuw helper_xxextractuw_ppc64 +#define helper_xxinsertw helper_xxinsertw_ppc64 +#define helper_vextsb2w helper_vextsb2w_ppc64 +#define helper_vextsb2d helper_vextsb2d_ppc64 +#define helper_vextsh2w helper_vextsh2w_ppc64 +#define helper_vextsh2d helper_vextsh2d_ppc64 +#define helper_vnegw helper_vnegw_ppc64 +#define helper_vnegd helper_vnegd_ppc64 +#define helper_vsro helper_vsro_ppc64 +#define helper_vsubcuw helper_vsubcuw_ppc64 +#define helper_vsumsws helper_vsumsws_ppc64 +#define helper_vsum2sws helper_vsum2sws_ppc64 +#define helper_vsum4sbs helper_vsum4sbs_ppc64 +#define helper_vsum4shs helper_vsum4shs_ppc64 +#define helper_vsum4ubs helper_vsum4ubs_ppc64 +#define helper_vupklpx helper_vupklpx_ppc64 +#define helper_vupkhpx helper_vupkhpx_ppc64 +#define helper_vupkhsb helper_vupkhsb_ppc64 +#define helper_vupkhsh helper_vupkhsh_ppc64 +#define helper_vupkhsw helper_vupkhsw_ppc64 +#define helper_vupklsb helper_vupklsb_ppc64 +#define helper_vupklsh helper_vupklsh_ppc64 +#define helper_vupklsw helper_vupklsw_ppc64 +#define helper_vclzb helper_vclzb_ppc64 +#define helper_vclzh helper_vclzh_ppc64 +#define helper_vctzb helper_vctzb_ppc64 +#define helper_vctzh helper_vctzh_ppc64 +#define helper_vctzw helper_vctzw_ppc64 +#define helper_vctzd helper_vctzd_ppc64 +#define helper_vpopcntb helper_vpopcntb_ppc64 +#define helper_vpopcnth helper_vpopcnth_ppc64 +#define helper_vpopcntw helper_vpopcntw_ppc64 +#define helper_vpopcntd helper_vpopcntd_ppc64 +#define helper_vadduqm helper_vadduqm_ppc64 +#define helper_vaddeuqm helper_vaddeuqm_ppc64 +#define helper_vaddcuq helper_vaddcuq_ppc64 +#define helper_vaddecuq helper_vaddecuq_ppc64 +#define helper_vsubuqm helper_vsubuqm_ppc64 +#define helper_vsubeuqm helper_vsubeuqm_ppc64 +#define helper_vsubcuq helper_vsubcuq_ppc64 +#define helper_vsubecuq helper_vsubecuq_ppc64 +#define helper_bcdadd helper_bcdadd_ppc64 +#define helper_bcdsub helper_bcdsub_ppc64 +#define helper_bcdcfn helper_bcdcfn_ppc64 +#define helper_bcdctn helper_bcdctn_ppc64 +#define helper_bcdcfz helper_bcdcfz_ppc64 +#define helper_bcdctz helper_bcdctz_ppc64 +#define helper_bcdcfsq helper_bcdcfsq_ppc64 +#define helper_bcdctsq helper_bcdctsq_ppc64 +#define helper_bcdcpsgn helper_bcdcpsgn_ppc64 +#define helper_bcdsetsgn helper_bcdsetsgn_ppc64 +#define helper_bcds helper_bcds_ppc64 +#define helper_bcdus helper_bcdus_ppc64 +#define helper_bcdsr helper_bcdsr_ppc64 +#define helper_bcdtrunc helper_bcdtrunc_ppc64 +#define helper_bcdutrunc helper_bcdutrunc_ppc64 +#define helper_vsbox helper_vsbox_ppc64 +#define helper_vcipher helper_vcipher_ppc64 +#define helper_vcipherlast helper_vcipherlast_ppc64 +#define helper_vncipher helper_vncipher_ppc64 +#define helper_vncipherlast helper_vncipherlast_ppc64 +#define helper_vshasigmaw helper_vshasigmaw_ppc64 +#define helper_vshasigmad helper_vshasigmad_ppc64 +#define helper_vpermxor helper_vpermxor_ppc64 +#define helper_brinc helper_brinc_ppc64 +#define helper_cntlsw32 helper_cntlsw32_ppc64 +#define helper_cntlzw32 helper_cntlzw32_ppc64 +#define helper_dlmzb helper_dlmzb_ppc64 +#define helper_lmw helper_lmw_ppc64 +#define helper_lsw helper_lsw_ppc64 +#define helper_lswx helper_lswx_ppc64 +#define helper_stsw helper_stsw_ppc64 +#define helper_dcbz helper_dcbz_ppc64 +#define helper_dcbzep helper_dcbzep_ppc64 +#define helper_icbi helper_icbi_ppc64 +#define helper_icbiep helper_icbiep_ppc64 +#define helper_lscbx helper_lscbx_ppc64 +#define helper_lvebx helper_lvebx_ppc64 +#define helper_lvehx helper_lvehx_ppc64 +#define helper_lvewx helper_lvewx_ppc64 +#define helper_stvebx helper_stvebx_ppc64 +#define helper_stvehx helper_stvehx_ppc64 +#define helper_stvewx helper_stvewx_ppc64 +#define helper_tbegin helper_tbegin_ppc64 +#define helper_load_dump_spr helper_load_dump_spr_ppc64 +#define helper_store_dump_spr helper_store_dump_spr_ppc64 +#define helper_hfscr_facility_check helper_hfscr_facility_check_ppc64 +#define helper_fscr_facility_check helper_fscr_facility_check_ppc64 +#define helper_msr_facility_check helper_msr_facility_check_ppc64 +#define helper_store_sdr1 helper_store_sdr1_ppc64 +#define helper_store_pidr helper_store_pidr_ppc64 +#define helper_store_lpidr helper_store_lpidr_ppc64 +#define helper_store_hid0_601 helper_store_hid0_601_ppc64 +#define helper_store_403_pbr helper_store_403_pbr_ppc64 +#define helper_store_40x_dbcr0 helper_store_40x_dbcr0_ppc64 +#define helper_store_40x_sler helper_store_40x_sler_ppc64 +#define helper_clcs helper_clcs_ppc64 +#define ppc_store_msr ppc_store_msr_ppc64 +#define helper_fixup_thrm helper_fixup_thrm_ppc64 +#define store_40x_sler store_40x_sler_ppc64 +#define dump_mmu dump_mmu_ppc64 +#define ppc_cpu_get_phys_page_debug ppc_cpu_get_phys_page_debug_ppc64 +#define helper_store_ibatu helper_store_ibatu_ppc64 +#define helper_store_ibatl helper_store_ibatl_ppc64 +#define helper_store_dbatu helper_store_dbatu_ppc64 +#define helper_store_dbatl helper_store_dbatl_ppc64 +#define helper_store_601_batu helper_store_601_batu_ppc64 +#define helper_store_601_batl helper_store_601_batl_ppc64 +#define ppc_tlb_invalidate_all ppc_tlb_invalidate_all_ppc64 +#define ppc_tlb_invalidate_one ppc_tlb_invalidate_one_ppc64 +#define ppc_store_sdr1 ppc_store_sdr1_ppc64 +#define helper_load_sr helper_load_sr_ppc64 +#define helper_store_sr helper_store_sr_ppc64 +#define helper_tlbia helper_tlbia_ppc64 +#define helper_tlbie helper_tlbie_ppc64 +#define helper_tlbiva helper_tlbiva_ppc64 +#define helper_6xx_tlbd helper_6xx_tlbd_ppc64 +#define helper_6xx_tlbi helper_6xx_tlbi_ppc64 +#define helper_74xx_tlbd helper_74xx_tlbd_ppc64 +#define helper_74xx_tlbi helper_74xx_tlbi_ppc64 +#define helper_rac helper_rac_ppc64 +#define helper_4xx_tlbre_hi helper_4xx_tlbre_hi_ppc64 +#define helper_4xx_tlbre_lo helper_4xx_tlbre_lo_ppc64 +#define helper_4xx_tlbwe_hi helper_4xx_tlbwe_hi_ppc64 +#define helper_4xx_tlbwe_lo helper_4xx_tlbwe_lo_ppc64 +#define helper_4xx_tlbsx helper_4xx_tlbsx_ppc64 +#define helper_440_tlbwe helper_440_tlbwe_ppc64 +#define helper_440_tlbre helper_440_tlbre_ppc64 +#define helper_440_tlbsx helper_440_tlbsx_ppc64 +#define helper_booke_setpid helper_booke_setpid_ppc64 +#define helper_booke_set_eplc helper_booke_set_eplc_ppc64 +#define helper_booke_set_epsc helper_booke_set_epsc_ppc64 +#define helper_booke206_tlbwe helper_booke206_tlbwe_ppc64 +#define helper_booke206_tlbre helper_booke206_tlbre_ppc64 +#define helper_booke206_tlbsx helper_booke206_tlbsx_ppc64 +#define helper_booke206_tlbivax helper_booke206_tlbivax_ppc64 +#define helper_booke206_tlbilx0 helper_booke206_tlbilx0_ppc64 +#define helper_booke206_tlbilx1 helper_booke206_tlbilx1_ppc64 +#define helper_booke206_tlbilx3 helper_booke206_tlbilx3_ppc64 +#define helper_booke206_tlbflush helper_booke206_tlbflush_ppc64 +#define helper_check_tlb_flush_local helper_check_tlb_flush_local_ppc64 +#define helper_check_tlb_flush_global helper_check_tlb_flush_global_ppc64 +#define ppc_cpu_tlb_fill ppc_cpu_tlb_fill_ppc64 +#define helper_load_tbl helper_load_tbl_ppc64 +#define helper_load_tbu helper_load_tbu_ppc64 +#define helper_load_atbl helper_load_atbl_ppc64 +#define helper_load_atbu helper_load_atbu_ppc64 +#define helper_load_vtb helper_load_vtb_ppc64 +#define helper_load_601_rtcl helper_load_601_rtcl_ppc64 +#define helper_load_601_rtcu helper_load_601_rtcu_ppc64 +#define helper_store_tbl helper_store_tbl_ppc64 +#define helper_store_tbu helper_store_tbu_ppc64 +#define helper_store_atbl helper_store_atbl_ppc64 +#define helper_store_atbu helper_store_atbu_ppc64 +#define helper_store_601_rtcl helper_store_601_rtcl_ppc64 +#define helper_store_601_rtcu helper_store_601_rtcu_ppc64 +#define helper_load_decr helper_load_decr_ppc64 +#define helper_store_decr helper_store_decr_ppc64 +#define helper_load_hdecr helper_load_hdecr_ppc64 +#define helper_store_hdecr helper_store_hdecr_ppc64 +#define helper_store_vtb helper_store_vtb_ppc64 +#define helper_store_tbu40 helper_store_tbu40_ppc64 +#define helper_load_40x_pit helper_load_40x_pit_ppc64 +#define helper_store_40x_pit helper_store_40x_pit_ppc64 +#define helper_store_booke_tcr helper_store_booke_tcr_ppc64 +#define helper_store_booke_tsr helper_store_booke_tsr_ppc64 +#define helper_load_dcr helper_load_dcr_ppc64 +#define helper_store_dcr helper_store_dcr_ppc64 +#define helper_raise_exception helper_raise_exception_ppc64 +#define helper_book3s_msgsnd helper_book3s_msgsnd_ppc64 +#define helper_cmpb helper_cmpb_ppc64 +#define helper_mfvscr helper_mfvscr_ppc64 +#define helper_vaddshs helper_vaddshs_ppc64 +#define helper_vavguw helper_vavguw_ppc64 +#define helper_vcmpequh helper_vcmpequh_ppc64 +#define helper_vcmpequh_dot helper_vcmpequh_dot_ppc64 +#define helper_vcmpnezh helper_vcmpnezh_ppc64 +#define helper_vcmpnezh_dot helper_vcmpnezh_dot_ppc64 +#define helper_vmsumshm helper_vmsumshm_ppc64 +#define helper_vmsumuhs helper_vmsumuhs_ppc64 +#define helper_vmulosh helper_vmulosh_ppc64 +#define helper_vmulosw helper_vmulosw_ppc64 +#define helper_vbpermq helper_vbpermq_ppc64 +#define helper_vextsw2d helper_vextsw2d_ppc64 +#define helper_stmw helper_stmw_ppc64 +#define ppc_translate_init ppc_translate_init_ppc64 +#define cpu_ppc_init cpu_ppc_init_ppc64 +#define gen_intermediate_code gen_intermediate_code_ppc64 +#define restore_state_to_opc restore_state_to_opc_ppc64 +#define ppc_set_irq ppc_set_irq_ppc64 +#define ppc6xx_irq_init ppc6xx_irq_init_ppc64 +#define ppc40x_core_reset ppc40x_core_reset_ppc64 +#define ppc40x_chip_reset ppc40x_chip_reset_ppc64 +#define ppc40x_system_reset ppc40x_system_reset_ppc64 +#define store_40x_dbcr0 store_40x_dbcr0_ppc64 +#define ppc40x_irq_init ppc40x_irq_init_ppc64 +#define ppce500_irq_init ppce500_irq_init_ppc64 +#define ppce500_set_mpic_proxy ppce500_set_mpic_proxy_ppc64 +#define cpu_ppc_get_tb cpu_ppc_get_tb_ppc64 +#define cpu_ppc_load_tbl cpu_ppc_load_tbl_ppc64 +#define cpu_ppc_load_tbu cpu_ppc_load_tbu_ppc64 +#define cpu_ppc_store_tbl cpu_ppc_store_tbl_ppc64 +#define cpu_ppc_store_tbu cpu_ppc_store_tbu_ppc64 +#define cpu_ppc_load_atbl cpu_ppc_load_atbl_ppc64 +#define cpu_ppc_load_atbu cpu_ppc_load_atbu_ppc64 +#define cpu_ppc_store_atbl cpu_ppc_store_atbl_ppc64 +#define cpu_ppc_store_atbu cpu_ppc_store_atbu_ppc64 +#define cpu_ppc_load_vtb cpu_ppc_load_vtb_ppc64 +#define cpu_ppc_store_vtb cpu_ppc_store_vtb_ppc64 +#define cpu_ppc_store_tbu40 cpu_ppc_store_tbu40_ppc64 +#define ppc_decr_clear_on_delivery ppc_decr_clear_on_delivery_ppc64 +#define cpu_ppc_load_decr cpu_ppc_load_decr_ppc64 +#define cpu_ppc_load_hdecr cpu_ppc_load_hdecr_ppc64 +#define cpu_ppc_load_purr cpu_ppc_load_purr_ppc64 +#define cpu_ppc_store_decr cpu_ppc_store_decr_ppc64 +#define cpu_ppc_store_hdecr cpu_ppc_store_hdecr_ppc64 +#define cpu_ppc_store_purr cpu_ppc_store_purr_ppc64 +#define cpu_ppc_tb_init cpu_ppc_tb_init_ppc64 +#define cpu_ppc601_load_rtcu cpu_ppc601_load_rtcu_ppc64 +#define cpu_ppc601_store_rtcu cpu_ppc601_store_rtcu_ppc64 +#define cpu_ppc601_load_rtcl cpu_ppc601_load_rtcl_ppc64 +#define cpu_ppc601_store_rtcl cpu_ppc601_store_rtcl_ppc64 +#define load_40x_pit load_40x_pit_ppc64 +#define store_40x_pit store_40x_pit_ppc64 +#define ppc_40x_timers_init ppc_40x_timers_init_ppc64 +#define ppc_dcr_read ppc_dcr_read_ppc64 +#define ppc_dcr_write ppc_dcr_write_ppc64 +#define ppc_dcr_register ppc_dcr_register_ppc64 +#define ppc_dcr_init ppc_dcr_init_ppc64 +#define ppc_cpu_pir ppc_cpu_pir_ppc64 +#define ppc_irq_reset ppc_irq_reset_ppc64 +#endif diff --git a/qemu/qapi-types.c b/qemu/qapi-types.c deleted file mode 100644 index 173c654b..00000000 --- a/qemu/qapi-types.c +++ /dev/null @@ -1,293 +0,0 @@ -/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * deallocation functions for schema-defined QAPI types - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * Michael Roth - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/dealloc-visitor.h" -#include "qapi-types.h" -#include "qapi-visit.h" - -const char *ErrorClass_lookup[] = { - "GenericError", - "CommandNotFound", - "DeviceEncrypted", - "DeviceNotActive", - "DeviceNotFound", - "KVMMissingCap", - NULL, -}; - -const char *X86CPURegister32_lookup[] = { - "EAX", - "EBX", - "ECX", - "EDX", - "ESP", - "EBP", - "ESI", - "EDI", - NULL, -}; - - -#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DEF_H -#define QAPI_TYPES_BUILTIN_CLEANUP_DEF_H - - -void qapi_free_strList(strList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_strList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_intList(intList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_intList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_numberList(numberList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_numberList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_boolList(boolList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_boolList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int8List(int8List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int8List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int16List(int16List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int16List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int32List(int32List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int32List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_int64List(int64List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_int64List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint8List(uint8List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint8List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint16List(uint16List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint16List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint32List(uint32List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint32List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -void qapi_free_uint64List(uint64List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_uint64List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - -#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DEF_H */ - - -void qapi_free_ErrorClassList(ErrorClassList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_ErrorClassList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - - -void qapi_free_X86CPURegister32List(X86CPURegister32List *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_X86CPURegister32List(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - - -void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_X86CPUFeatureWordInfoList(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - - -void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_X86CPUFeatureWordInfo(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} - diff --git a/qemu/qapi-types.h b/qemu/qapi-types.h deleted file mode 100644 index 944e8825..00000000 --- a/qemu/qapi-types.h +++ /dev/null @@ -1,228 +0,0 @@ -/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI types - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QAPI_TYPES_H -#define QAPI_TYPES_H - -#include "unicorn/platform.h" - - -#ifndef QAPI_TYPES_BUILTIN_STRUCT_DECL_H -#define QAPI_TYPES_BUILTIN_STRUCT_DECL_H - - -typedef struct strList -{ - union { - char *value; - uint64_t padding; - }; - struct strList *next; -} strList; - -typedef struct intList -{ - union { - int64_t value; - uint64_t padding; - }; - struct intList *next; -} intList; - -typedef struct numberList -{ - union { - double value; - uint64_t padding; - }; - struct numberList *next; -} numberList; - -typedef struct boolList -{ - union { - bool value; - uint64_t padding; - }; - struct boolList *next; -} boolList; - -typedef struct int8List -{ - union { - int8_t value; - uint64_t padding; - }; - struct int8List *next; -} int8List; - -typedef struct int16List -{ - union { - int16_t value; - uint64_t padding; - }; - struct int16List *next; -} int16List; - -typedef struct int32List -{ - union { - int32_t value; - uint64_t padding; - }; - struct int32List *next; -} int32List; - -typedef struct int64List -{ - union { - int64_t value; - uint64_t padding; - }; - struct int64List *next; -} int64List; - -typedef struct uint8List -{ - union { - uint8_t value; - uint64_t padding; - }; - struct uint8List *next; -} uint8List; - -typedef struct uint16List -{ - union { - uint16_t value; - uint64_t padding; - }; - struct uint16List *next; -} uint16List; - -typedef struct uint32List -{ - union { - uint32_t value; - uint64_t padding; - }; - struct uint32List *next; -} uint32List; - -typedef struct uint64List -{ - union { - uint64_t value; - uint64_t padding; - }; - struct uint64List *next; -} uint64List; - -#endif /* QAPI_TYPES_BUILTIN_STRUCT_DECL_H */ - - -extern const char *ErrorClass_lookup[]; -typedef enum ErrorClass -{ - ERROR_CLASS_GENERIC_ERROR = 0, - ERROR_CLASS_COMMAND_NOT_FOUND = 1, - ERROR_CLASS_DEVICE_ENCRYPTED = 2, - ERROR_CLASS_DEVICE_NOT_ACTIVE = 3, - ERROR_CLASS_DEVICE_NOT_FOUND = 4, - ERROR_CLASS_KVM_MISSING_CAP = 5, - ERROR_CLASS_MAX = 6, -} ErrorClass; - -typedef struct ErrorClassList -{ - union { - ErrorClass value; - uint64_t padding; - }; - struct ErrorClassList *next; -} ErrorClassList; - -extern const char *X86CPURegister32_lookup[]; -typedef enum X86CPURegister32 -{ - X86_CPU_REGISTER32_EAX = 0, - X86_CPU_REGISTER32_EBX = 1, - X86_CPU_REGISTER32_ECX = 2, - X86_CPU_REGISTER32_EDX = 3, - X86_CPU_REGISTER32_ESP = 4, - X86_CPU_REGISTER32_EBP = 5, - X86_CPU_REGISTER32_ESI = 6, - X86_CPU_REGISTER32_EDI = 7, - X86_CPU_REGISTER32_MAX = 8, -} X86CPURegister32; - -typedef struct X86CPURegister32List -{ - union { - X86CPURegister32 value; - uint64_t padding; - }; - struct X86CPURegister32List *next; -} X86CPURegister32List; - - -typedef struct X86CPUFeatureWordInfo X86CPUFeatureWordInfo; - -typedef struct X86CPUFeatureWordInfoList -{ - union { - X86CPUFeatureWordInfo *value; - uint64_t padding; - }; - struct X86CPUFeatureWordInfoList *next; -} X86CPUFeatureWordInfoList; - -#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DECL_H -#define QAPI_TYPES_BUILTIN_CLEANUP_DECL_H - -void qapi_free_strList(strList *obj); -void qapi_free_intList(intList *obj); -void qapi_free_numberList(numberList *obj); -void qapi_free_boolList(boolList *obj); -void qapi_free_int8List(int8List *obj); -void qapi_free_int16List(int16List *obj); -void qapi_free_int32List(int32List *obj); -void qapi_free_int64List(int64List *obj); -void qapi_free_uint8List(uint8List *obj); -void qapi_free_uint16List(uint16List *obj); -void qapi_free_uint32List(uint32List *obj); -void qapi_free_uint64List(uint64List *obj); - -#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DECL_H */ - - -void qapi_free_ErrorClassList(ErrorClassList *obj); - -void qapi_free_X86CPURegister32List(X86CPURegister32List *obj); - -struct X86CPUFeatureWordInfo -{ - int64_t cpuid_input_eax; - bool has_cpuid_input_ecx; - int64_t cpuid_input_ecx; - X86CPURegister32 cpuid_register; - int64_t features; -}; - -void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj); -void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj); - -#endif diff --git a/qemu/qapi-visit.c b/qemu/qapi-visit.c deleted file mode 100644 index 7733bb55..00000000 --- a/qemu/qapi-visit.c +++ /dev/null @@ -1,428 +0,0 @@ -/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI visitor functions - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qemu-common.h" -#include "qapi-visit.h" - -void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - strList *native_i = (strList *)i; - visit_type_str(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - intList *native_i = (intList *)i; - visit_type_int(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - numberList *native_i = (numberList *)i; - visit_type_number(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - boolList *native_i = (boolList *)i; - visit_type_bool(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int8List *native_i = (int8List *)i; - visit_type_int8(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int16List *native_i = (int16List *)i; - visit_type_int16(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int32List *native_i = (int32List *)i; - visit_type_int32(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - int64List *native_i = (int64List *)i; - visit_type_int64(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint8List *native_i = (uint8List *)i; - visit_type_uint8(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint16List *native_i = (uint16List *)i; - visit_type_uint16(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint32List *native_i = (uint32List *)i; - visit_type_uint32(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - uint64List *native_i = (uint64List *)i; - visit_type_uint64(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - ErrorClassList *native_i = (ErrorClassList *)i; - visit_type_ErrorClass(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp) -{ - visit_type_enum(m, (int *)obj, ErrorClass_lookup, "ErrorClass", name, errp); -} - -void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - X86CPURegister32List *native_i = (X86CPURegister32List *)i; - visit_type_X86CPURegister32(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} - -void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp) -{ - visit_type_enum(m, (int *)obj, X86CPURegister32_lookup, "X86CPURegister32", name, errp); -} - -static void visit_type_X86CPUFeatureWordInfo_fields(Visitor *m, X86CPUFeatureWordInfo **obj, Error **errp) -{ - Error *err = NULL; - visit_type_int(m, &(*obj)->cpuid_input_eax, "cpuid-input-eax", &err); - if (err) { - goto out; - } - visit_optional(m, &(*obj)->has_cpuid_input_ecx, "cpuid-input-ecx", &err); - if (!err && (*obj)->has_cpuid_input_ecx) { - visit_type_int(m, &(*obj)->cpuid_input_ecx, "cpuid-input-ecx", &err); - } - if (err) { - goto out; - } - visit_type_X86CPURegister32(m, &(*obj)->cpuid_register, "cpuid-register", &err); - if (err) { - goto out; - } - visit_type_int(m, &(*obj)->features, "features", &err); - if (err) { - goto out; - } - -out: - error_propagate(errp, err); -} - -void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp) -{ - Error *err = NULL; - - visit_start_struct(m, (void **)obj, "X86CPUFeatureWordInfo", name, sizeof(X86CPUFeatureWordInfo), &err); - if (!err) { - if (*obj) { - visit_type_X86CPUFeatureWordInfo_fields(m, obj, errp); - } - visit_end_struct(m, &err); - } - error_propagate(errp, err); -} - -void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - X86CPUFeatureWordInfoList *native_i = (X86CPUFeatureWordInfoList *)i; - visit_type_X86CPUFeatureWordInfo(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} diff --git a/qemu/qapi-visit.h b/qemu/qapi-visit.h deleted file mode 100644 index 51bd0887..00000000 --- a/qemu/qapi-visit.h +++ /dev/null @@ -1,51 +0,0 @@ -/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI visitor functions - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QAPI_VISIT_H -#define QAPI_VISIT_H - -#include "qapi/visitor.h" -#include "qapi-types.h" - - -#ifndef QAPI_VISIT_BUILTIN_VISITOR_DECL_H -#define QAPI_VISIT_BUILTIN_VISITOR_DECL_H - -void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp); -void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp); -void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp); -void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp); -void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp); -void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp); -void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp); -void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp); -void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp); -void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp); -void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp); -void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp); - -#endif /* QAPI_VISIT_BUILTIN_VISITOR_DECL_H */ - - -void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp); -void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp); - -void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp); -void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp); - -void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp); -void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp); - -#endif diff --git a/qemu/qapi/Makefile.objs b/qemu/qapi/Makefile.objs deleted file mode 100644 index 00e40d60..00000000 --- a/qemu/qapi/Makefile.objs +++ /dev/null @@ -1,3 +0,0 @@ -util-obj-y = qapi-visit-core.o qapi-dealloc-visitor.o qmp-input-visitor.o -util-obj-y += qmp-output-visitor.o -util-obj-y += string-input-visitor.o diff --git a/qemu/qapi/qapi-dealloc-visitor.c b/qemu/qapi/qapi-dealloc-visitor.c deleted file mode 100644 index a14a1c71..00000000 --- a/qemu/qapi/qapi-dealloc-visitor.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Dealloc Visitor - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Michael Roth - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/dealloc-visitor.h" -#include "qemu/queue.h" -#include "qemu-common.h" -#include "qapi/qmp/types.h" -#include "qapi/visitor-impl.h" - -typedef struct StackEntry -{ - void *value; - bool is_list_head; - QTAILQ_ENTRY(StackEntry) node; -} StackEntry; - -struct QapiDeallocVisitor -{ - Visitor visitor; - QTAILQ_HEAD(, StackEntry) stack; - bool is_list_head; -}; - -static QapiDeallocVisitor *to_qov(Visitor *v) -{ - return container_of(v, QapiDeallocVisitor, visitor); -} - -static void qapi_dealloc_push(QapiDeallocVisitor *qov, void *value) -{ - StackEntry *e = g_malloc0(sizeof(*e)); - - e->value = value; - - /* see if we're just pushing a list head tracker */ - if (value == NULL) { - e->is_list_head = true; - } - QTAILQ_INSERT_HEAD(&qov->stack, e, node); -} - -static void *qapi_dealloc_pop(QapiDeallocVisitor *qov) -{ - StackEntry *e = QTAILQ_FIRST(&qov->stack); - QObject *value; - QTAILQ_REMOVE(&qov->stack, e, node); - value = e->value; - g_free(e); - return value; -} - -static void qapi_dealloc_start_struct(Visitor *v, void **obj, const char *kind, - const char *name, size_t unused, - Error **errp) -{ - QapiDeallocVisitor *qov = to_qov(v); - qapi_dealloc_push(qov, obj); -} - -static void qapi_dealloc_end_struct(Visitor *v, Error **errp) -{ - QapiDeallocVisitor *qov = to_qov(v); - void **obj = qapi_dealloc_pop(qov); - if (obj) { - g_free(*obj); - } -} - -static void qapi_dealloc_start_implicit_struct(Visitor *v, - void **obj, - size_t size, - Error **errp) -{ - QapiDeallocVisitor *qov = to_qov(v); - qapi_dealloc_push(qov, obj); -} - -static void qapi_dealloc_end_implicit_struct(Visitor *v, Error **errp) -{ - QapiDeallocVisitor *qov = to_qov(v); - void **obj = qapi_dealloc_pop(qov); - if (obj) { - g_free(*obj); - } -} - -static void qapi_dealloc_start_list(Visitor *v, const char *name, Error **errp) -{ - QapiDeallocVisitor *qov = to_qov(v); - qapi_dealloc_push(qov, NULL); -} - -static GenericList *qapi_dealloc_next_list(Visitor *v, GenericList **listp, - Error **errp) -{ - GenericList *list = *listp; - QapiDeallocVisitor *qov = to_qov(v); - StackEntry *e = QTAILQ_FIRST(&qov->stack); - - if (e && e->is_list_head) { - e->is_list_head = false; - return list; - } - - if (list) { - list = list->next; - g_free(*listp); - return list; - } - - return NULL; -} - -static void qapi_dealloc_end_list(Visitor *v, Error **errp) -{ - QapiDeallocVisitor *qov = to_qov(v); - void *obj = qapi_dealloc_pop(qov); - assert(obj == NULL); /* should've been list head tracker with no payload */ -} - -static void qapi_dealloc_type_str(Visitor *v, char **obj, const char *name, - Error **errp) -{ - if (obj) { - g_free(*obj); - } -} - -static void qapi_dealloc_type_int(Visitor *v, int64_t *obj, const char *name, - Error **errp) -{ -} - -static void qapi_dealloc_type_bool(Visitor *v, bool *obj, const char *name, - Error **errp) -{ -} - -static void qapi_dealloc_type_number(Visitor *v, double *obj, const char *name, - Error **errp) -{ -} - -static void qapi_dealloc_type_size(Visitor *v, uint64_t *obj, const char *name, - Error **errp) -{ -} - -static void qapi_dealloc_type_enum(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, - Error **errp) -{ -} - -/* If there's no data present, the dealloc visitor has nothing to free. - * Thus, indicate to visitor code that the subsequent union fields can - * be skipped. This is not an error condition, since the cleanup of the - * rest of an object can continue unhindered, so leave errp unset in - * these cases. - * - * NOTE: In cases where we're attempting to deallocate an object that - * may have missing fields, the field indicating the union type may - * be missing. In such a case, it's possible we don't have enough - * information to differentiate data_present == false from a case where - * data *is* present but happens to be a scalar with a value of 0. - * This is okay, since in the case of the dealloc visitor there's no - * work that needs to done in either situation. - * - * The current inability in QAPI code to more thoroughly verify a union - * type in such cases will likely need to be addressed if we wish to - * implement this interface for other types of visitors in the future, - * however. - */ -static bool qapi_dealloc_start_union(Visitor *v, bool data_present, - Error **errp) -{ - return data_present; -} - -Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v) -{ - return &v->visitor; -} - -void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *v) -{ - g_free(v); -} - -QapiDeallocVisitor *qapi_dealloc_visitor_new(void) -{ - QapiDeallocVisitor *v; - - v = g_malloc0(sizeof(*v)); - - v->visitor.start_struct = qapi_dealloc_start_struct; - v->visitor.end_struct = qapi_dealloc_end_struct; - v->visitor.start_implicit_struct = qapi_dealloc_start_implicit_struct; - v->visitor.end_implicit_struct = qapi_dealloc_end_implicit_struct; - v->visitor.start_list = qapi_dealloc_start_list; - v->visitor.next_list = qapi_dealloc_next_list; - v->visitor.end_list = qapi_dealloc_end_list; - v->visitor.type_enum = qapi_dealloc_type_enum; - v->visitor.type_int = qapi_dealloc_type_int; - v->visitor.type_bool = qapi_dealloc_type_bool; - v->visitor.type_str = qapi_dealloc_type_str; - v->visitor.type_number = qapi_dealloc_type_number; - v->visitor.type_size = qapi_dealloc_type_size; - v->visitor.start_union = qapi_dealloc_start_union; - - QTAILQ_INIT(&v->stack); - - return v; -} diff --git a/qemu/qapi/qapi-visit-core.c b/qemu/qapi/qapi-visit-core.c deleted file mode 100644 index 5f918403..00000000 --- a/qemu/qapi/qapi-visit-core.c +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Core Definitions for QAPI Visitor Classes - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qemu-common.h" -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qerror.h" -#include "qapi/visitor.h" -#include "qapi/visitor-impl.h" - -void visit_start_struct(Visitor *v, void **obj, const char *kind, - const char *name, size_t size, Error **errp) -{ - v->start_struct(v, obj, kind, name, size, errp); -} - -void visit_end_struct(Visitor *v, Error **errp) -{ - v->end_struct(v, errp); -} - -void visit_start_implicit_struct(Visitor *v, void **obj, size_t size, - Error **errp) -{ - if (v->start_implicit_struct) { - v->start_implicit_struct(v, obj, size, errp); - } -} - -void visit_end_implicit_struct(Visitor *v, Error **errp) -{ - if (v->end_implicit_struct) { - v->end_implicit_struct(v, errp); - } -} - -void visit_start_list(Visitor *v, const char *name, Error **errp) -{ - v->start_list(v, name, errp); -} - -GenericList *visit_next_list(Visitor *v, GenericList **list, Error **errp) -{ - return v->next_list(v, list, errp); -} - -void visit_end_list(Visitor *v, Error **errp) -{ - v->end_list(v, errp); -} - -bool visit_start_union(Visitor *v, bool data_present, Error **errp) -{ - if (v->start_union) { - return v->start_union(v, data_present, errp); - } - return true; -} - -void visit_end_union(Visitor *v, bool data_present, Error **errp) -{ - if (v->end_union) { - v->end_union(v, data_present, errp); - } -} - -void visit_optional(Visitor *v, bool *present, const char *name, - Error **errp) -{ - if (v->optional) { - v->optional(v, present, name, errp); - } -} - -void visit_get_next_type(Visitor *v, int *obj, const int *qtypes, - const char *name, Error **errp) -{ - if (v->get_next_type) { - v->get_next_type(v, obj, qtypes, name, errp); - } -} - -void visit_type_enum(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, Error **errp) -{ - v->type_enum(v, obj, strings, kind, name, errp); -} - -void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp) -{ - v->type_int(v, obj, name, errp); -} - -void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_uint8) { - v->type_uint8(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - if (value < 0 || value > UINT8_MAX) { - error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", - "uint8_t"); - return; - } - *obj = (uint8_t)value; - } -} - -void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_uint16) { - v->type_uint16(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - if (value < 0 || value > UINT16_MAX) { - error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", - "uint16_t"); - return; - } - *obj = (uint16_t)value; - } -} - -void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_uint32) { - v->type_uint32(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - if (value < 0 || value > UINT32_MAX) { - error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", - "uint32_t"); - return; - } - *obj = (uint32_t)value; - } -} - -void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_uint64) { - v->type_uint64(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - *obj = value; - } -} - -void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_int8) { - v->type_int8(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - if (value < INT8_MIN || value > INT8_MAX) { - error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", - "int8_t"); - return; - } - *obj = (int8_t)value; - } -} - -void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_int16) { - v->type_int16(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - if (value < INT16_MIN || value > INT16_MAX) { - error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", - "int16_t"); - return; - } - *obj = (int16_t)value; - } -} - -void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_int32) { - v->type_int32(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - if (value < INT32_MIN || value > INT32_MAX) { - error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", - "int32_t"); - return; - } - *obj = (int32_t)value; - } -} - -void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp) -{ - if (v->type_int64) { - v->type_int64(v, obj, name, errp); - } else { - v->type_int(v, obj, name, errp); - } -} - -void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp) -{ - int64_t value; - - if (v->type_size) { - v->type_size(v, obj, name, errp); - } else if (v->type_uint64) { - v->type_uint64(v, obj, name, errp); - } else { - value = *obj; - v->type_int(v, &value, name, errp); - *obj = value; - } -} - -void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp) -{ - v->type_bool(v, obj, name, errp); -} - -void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp) -{ - v->type_str(v, obj, name, errp); -} - -void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp) -{ - v->type_number(v, obj, name, errp); -} - -void output_type_enum(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, - Error **errp) -{ - int i = 0; - int value = *obj; - char *enum_str; - - assert(strings); - while (strings[i++] != NULL); - if (value < 0 || value >= i - 1) { - error_set(errp, QERR_INVALID_PARAMETER, name ? name : "null"); - return; - } - - enum_str = (char *)strings[value]; - visit_type_str(v, &enum_str, name, errp); -} - -void input_type_enum(Visitor *v, int *obj, const char *strings[], - const char *kind, const char *name, - Error **errp) -{ - Error *local_err = NULL; - int64_t value = 0; - char *enum_str; - - assert(strings); - - visit_type_str(v, &enum_str, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - - while (strings[value] != NULL) { - if (strcmp(strings[value], enum_str) == 0) { - break; - } - value++; - } - - if (strings[value] == NULL) { - error_set(errp, QERR_INVALID_PARAMETER, enum_str); - g_free(enum_str); - return; - } - - g_free(enum_str); - *obj = (int)value; -} diff --git a/qemu/qapi/qmp-input-visitor.c b/qemu/qapi/qmp-input-visitor.c deleted file mode 100644 index 33dd754b..00000000 --- a/qemu/qapi/qmp-input-visitor.c +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Input Visitor - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/qmp-input-visitor.h" -#include "qapi/visitor-impl.h" -#include "qemu/queue.h" -#include "qemu-common.h" -#include "qapi/qmp/types.h" -#include "qapi/qmp/qerror.h" - -#define QIV_STACK_SIZE 1024 - -typedef struct StackObject -{ - QObject *obj; - const QListEntry *entry; - GHashTable *h; -} StackObject; - -struct QmpInputVisitor -{ - Visitor visitor; - StackObject stack[QIV_STACK_SIZE]; - int nb_stack; - bool strict; -}; - -static QmpInputVisitor *to_qiv(Visitor *v) -{ - return container_of(v, QmpInputVisitor, visitor); -} - -static QObject *qmp_input_get_object(QmpInputVisitor *qiv, - const char *name, - bool consume) -{ - QObject *qobj = qiv->stack[qiv->nb_stack - 1].obj; - - if (qobj) { - if (name && qobject_type(qobj) == QTYPE_QDICT) { - if (qiv->stack[qiv->nb_stack - 1].h && consume) { - g_hash_table_remove(qiv->stack[qiv->nb_stack - 1].h, name); - } - return qdict_get(qobject_to_qdict(qobj), name); - } else if (qiv->stack[qiv->nb_stack - 1].entry) { - return qlist_entry_obj(qiv->stack[qiv->nb_stack - 1].entry); - } - } - - return qobj; -} - -static void qdict_add_key(const char *key, QObject *obj, void *opaque) -{ - GHashTable *h = opaque; - g_hash_table_insert(h, (gpointer) key, NULL); -} - -static void qmp_input_push(QmpInputVisitor *qiv, QObject *obj, Error **errp) -{ - GHashTable *h; - - if (qiv->nb_stack >= QIV_STACK_SIZE) { - error_setg(errp, "An internal buffer overran"); - return; - } - - qiv->stack[qiv->nb_stack].obj = obj; - qiv->stack[qiv->nb_stack].entry = NULL; - qiv->stack[qiv->nb_stack].h = NULL; - - if (qiv->strict && qobject_type(obj) == QTYPE_QDICT) { - h = g_hash_table_new(g_str_hash, g_str_equal); - qdict_iter(qobject_to_qdict(obj), qdict_add_key, h); - qiv->stack[qiv->nb_stack].h = h; - } - - qiv->nb_stack++; -} - -/** Only for qmp_input_pop. */ -static gboolean always_true(gpointer key, gpointer val, gpointer user_pkey) -{ - *(const char **)user_pkey = (const char *)key; - return TRUE; -} - -static void qmp_input_pop(QmpInputVisitor *qiv, Error **errp) -{ - assert(qiv->nb_stack > 0); - - if (qiv->strict) { - GHashTable * const top_ht = qiv->stack[qiv->nb_stack - 1].h; - if (top_ht) { - if (g_hash_table_size(top_ht)) { - const char *key; - g_hash_table_find(top_ht, always_true, (gpointer)&key); - error_set(errp, QERR_QMP_EXTRA_MEMBER, key); - } - g_hash_table_unref(top_ht); - } - } - - qiv->nb_stack--; -} - -static void qmp_input_start_struct(Visitor *v, void **obj, const char *kind, - const char *name, size_t size, Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, true); - Error *err = NULL; - - if (!qobj || qobject_type(qobj) != QTYPE_QDICT) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "QDict"); - return; - } - - qmp_input_push(qiv, qobj, &err); - if (err) { - error_propagate(errp, err); - return; - } - - if (obj) { - *obj = g_malloc0(size); - } -} - -static void qmp_input_end_struct(Visitor *v, Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - - qmp_input_pop(qiv, errp); -} - -static void qmp_input_start_implicit_struct(Visitor *v, void **obj, - size_t size, Error **errp) -{ - if (obj) { - *obj = g_malloc0(size); - } -} - -static void qmp_input_end_implicit_struct(Visitor *v, Error **errp) -{ -} - -static void qmp_input_start_list(Visitor *v, const char *name, Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, true); - - if (!qobj || qobject_type(qobj) != QTYPE_QLIST) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "list"); - return; - } - - qmp_input_push(qiv, qobj, errp); -} - -static GenericList *qmp_input_next_list(Visitor *v, GenericList **list, - Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - GenericList *entry; - StackObject *so = &qiv->stack[qiv->nb_stack - 1]; - bool first; - - if (so->entry == NULL) { - so->entry = qlist_first(qobject_to_qlist(so->obj)); - first = true; - } else { - so->entry = qlist_next(so->entry); - first = false; - } - - if (so->entry == NULL) { - return NULL; - } - - entry = g_malloc0(sizeof(*entry)); - if (first) { - *list = entry; - } else { - (*list)->next = entry; - } - - return entry; -} - -static void qmp_input_end_list(Visitor *v, Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - - qmp_input_pop(qiv, errp); -} - -static void qmp_input_get_next_type(Visitor *v, int *kind, const int *qobjects, - const char *name, Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, false); - - if (!qobj) { - error_set(errp, QERR_MISSING_PARAMETER, name ? name : "null"); - return; - } - *kind = qobjects[qobject_type(qobj)]; -} - -static void qmp_input_type_int(Visitor *v, int64_t *obj, const char *name, - Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, true); - - if (!qobj || qobject_type(qobj) != QTYPE_QINT) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "integer"); - return; - } - - *obj = qint_get_int(qobject_to_qint(qobj)); -} - -static void qmp_input_type_bool(Visitor *v, bool *obj, const char *name, - Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, true); - - if (!qobj || qobject_type(qobj) != QTYPE_QBOOL) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "boolean"); - return; - } - - *obj = qbool_get_int(qobject_to_qbool(qobj)); -} - -static void qmp_input_type_str(Visitor *v, char **obj, const char *name, - Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, true); - - if (!qobj || qobject_type(qobj) != QTYPE_QSTRING) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "string"); - return; - } - - *obj = g_strdup(qstring_get_str(qobject_to_qstring(qobj))); -} - -static void qmp_input_type_number(Visitor *v, double *obj, const char *name, - Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, true); - - if (!qobj || (qobject_type(qobj) != QTYPE_QFLOAT && - qobject_type(qobj) != QTYPE_QINT)) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "number"); - return; - } - - if (qobject_type(qobj) == QTYPE_QINT) { - *obj = (double)qint_get_int(qobject_to_qint(qobj)); - } else { - *obj = qfloat_get_double(qobject_to_qfloat(qobj)); - } -} - -static void qmp_input_optional(Visitor *v, bool *present, const char *name, - Error **errp) -{ - QmpInputVisitor *qiv = to_qiv(v); - QObject *qobj = qmp_input_get_object(qiv, name, true); - - if (!qobj) { - *present = false; - return; - } - - *present = true; -} - -Visitor *qmp_input_get_visitor(QmpInputVisitor *v) -{ - return &v->visitor; -} - -void qmp_input_visitor_cleanup(QmpInputVisitor *v) -{ - qobject_decref(v->stack[0].obj); - g_free(v); -} - -QmpInputVisitor *qmp_input_visitor_new(QObject *obj) -{ - QmpInputVisitor *v; - - v = g_malloc0(sizeof(*v)); - - v->visitor.start_struct = qmp_input_start_struct; - v->visitor.end_struct = qmp_input_end_struct; - v->visitor.start_implicit_struct = qmp_input_start_implicit_struct; - v->visitor.end_implicit_struct = qmp_input_end_implicit_struct; - v->visitor.start_list = qmp_input_start_list; - v->visitor.next_list = qmp_input_next_list; - v->visitor.end_list = qmp_input_end_list; - v->visitor.type_enum = input_type_enum; - v->visitor.type_int = qmp_input_type_int; - v->visitor.type_bool = qmp_input_type_bool; - v->visitor.type_str = qmp_input_type_str; - v->visitor.type_number = qmp_input_type_number; - v->visitor.optional = qmp_input_optional; - v->visitor.get_next_type = qmp_input_get_next_type; - - qmp_input_push(v, obj, NULL); - qobject_incref(obj); - - return v; -} - -QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj) -{ - QmpInputVisitor *v; - - v = qmp_input_visitor_new(obj); - v->strict = true; - - return v; -} diff --git a/qemu/qapi/qmp-output-visitor.c b/qemu/qapi/qmp-output-visitor.c deleted file mode 100644 index 96b33846..00000000 --- a/qemu/qapi/qmp-output-visitor.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Core Definitions for QAPI/QMP Command Registry - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/qmp-output-visitor.h" -#include "qapi/visitor-impl.h" -#include "qemu/queue.h" -#include "qemu-common.h" -#include "qapi/qmp/types.h" -#include "qapi/qmp/qerror.h" - -typedef struct QStackEntry -{ - QObject *value; - bool is_list_head; - QTAILQ_ENTRY(QStackEntry) node; -} QStackEntry; - -typedef QTAILQ_HEAD(QStack, QStackEntry) QStack; - -struct QmpOutputVisitor -{ - Visitor visitor; - QStack stack; -}; - -#define qmp_output_add(qov, name, value) \ - qmp_output_add_obj(qov, name, QOBJECT(value)) -#define qmp_output_push(qov, value) qmp_output_push_obj(qov, QOBJECT(value)) - -static QmpOutputVisitor *to_qov(Visitor *v) -{ - return container_of(v, QmpOutputVisitor, visitor); -} - -static void qmp_output_push_obj(QmpOutputVisitor *qov, QObject *value) -{ - QStackEntry *e = g_malloc0(sizeof(*e)); - - e->value = value; - if (qobject_type(e->value) == QTYPE_QLIST) { - e->is_list_head = true; - } - QTAILQ_INSERT_HEAD(&qov->stack, e, node); -} - -static QObject *qmp_output_pop(QmpOutputVisitor *qov) -{ - QStackEntry *e = QTAILQ_FIRST(&qov->stack); - QObject *value; - QTAILQ_REMOVE(&qov->stack, e, node); - value = e->value; - g_free(e); - return value; -} - -static QObject *qmp_output_first(QmpOutputVisitor *qov) -{ - QStackEntry *e = QTAILQ_LAST(&qov->stack, QStack); - - /* FIXME - find a better way to deal with NULL values */ - if (!e) { - return NULL; - } - - return e->value; -} - -static QObject *qmp_output_last(QmpOutputVisitor *qov) -{ - QStackEntry *e = QTAILQ_FIRST(&qov->stack); - return e->value; -} - -static void qmp_output_add_obj(QmpOutputVisitor *qov, const char *name, - QObject *value) -{ - QObject *cur; - - if (QTAILQ_EMPTY(&qov->stack)) { - qmp_output_push_obj(qov, value); - return; - } - - cur = qmp_output_last(qov); - - switch (qobject_type(cur)) { - case QTYPE_QDICT: - qdict_put_obj(qobject_to_qdict(cur), name, value); - break; - case QTYPE_QLIST: - qlist_append_obj(qobject_to_qlist(cur), value); - break; - default: - qobject_decref(qmp_output_pop(qov)); - qmp_output_push_obj(qov, value); - break; - } -} - -static void qmp_output_start_struct(Visitor *v, void **obj, const char *kind, - const char *name, size_t unused, - Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - QDict *dict = qdict_new(); - - qmp_output_add(qov, name, dict); - qmp_output_push(qov, dict); -} - -static void qmp_output_end_struct(Visitor *v, Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - qmp_output_pop(qov); -} - -static void qmp_output_start_list(Visitor *v, const char *name, Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - QList *list = qlist_new(); - - qmp_output_add(qov, name, list); - qmp_output_push(qov, list); -} - -static GenericList *qmp_output_next_list(Visitor *v, GenericList **listp, - Error **errp) -{ - GenericList *list = *listp; - QmpOutputVisitor *qov = to_qov(v); - QStackEntry *e = QTAILQ_FIRST(&qov->stack); - - assert(e); - if (e->is_list_head) { - e->is_list_head = false; - return list; - } - - return list ? list->next : NULL; -} - -static void qmp_output_end_list(Visitor *v, Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - qmp_output_pop(qov); -} - -static void qmp_output_type_int(Visitor *v, int64_t *obj, const char *name, - Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - qmp_output_add(qov, name, qint_from_int(*obj)); -} - -static void qmp_output_type_bool(Visitor *v, bool *obj, const char *name, - Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - qmp_output_add(qov, name, qbool_from_int(*obj)); -} - -static void qmp_output_type_str(Visitor *v, char **obj, const char *name, - Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - if (*obj) { - qmp_output_add(qov, name, qstring_from_str(*obj)); - } else { - qmp_output_add(qov, name, qstring_from_str("")); - } -} - -static void qmp_output_type_number(Visitor *v, double *obj, const char *name, - Error **errp) -{ - QmpOutputVisitor *qov = to_qov(v); - qmp_output_add(qov, name, qfloat_from_double(*obj)); -} - -QObject *qmp_output_get_qobject(QmpOutputVisitor *qov) -{ - QObject *obj = qmp_output_first(qov); - if (obj) { - qobject_incref(obj); - } - return obj; -} - -Visitor *qmp_output_get_visitor(QmpOutputVisitor *v) -{ - return &v->visitor; -} - -void qmp_output_visitor_cleanup(QmpOutputVisitor *v) -{ - QStackEntry *e, *tmp; - - /* The bottom QStackEntry, if any, owns the root QObject. See the - * qmp_output_push_obj() invocations in qmp_output_add_obj(). */ - QObject *root = QTAILQ_EMPTY(&v->stack) ? NULL : qmp_output_first(v); - - QTAILQ_FOREACH_SAFE(e, &v->stack, node, tmp) { - QTAILQ_REMOVE(&v->stack, e, node); - g_free(e); - } - - qobject_decref(root); - g_free(v); -} - -QmpOutputVisitor *qmp_output_visitor_new(void) -{ - QmpOutputVisitor *v; - - v = g_malloc0(sizeof(*v)); - - v->visitor.start_struct = qmp_output_start_struct; - v->visitor.end_struct = qmp_output_end_struct; - v->visitor.start_list = qmp_output_start_list; - v->visitor.next_list = qmp_output_next_list; - v->visitor.end_list = qmp_output_end_list; - v->visitor.type_enum = output_type_enum; - v->visitor.type_int = qmp_output_type_int; - v->visitor.type_bool = qmp_output_type_bool; - v->visitor.type_str = qmp_output_type_str; - v->visitor.type_number = qmp_output_type_number; - - QTAILQ_INIT(&v->stack); - - return v; -} diff --git a/qemu/qapi/string-input-visitor.c b/qemu/qapi/string-input-visitor.c deleted file mode 100644 index cc5826e4..00000000 --- a/qemu/qapi/string-input-visitor.c +++ /dev/null @@ -1,325 +0,0 @@ -/* - * String parsing visitor - * - * Copyright Red Hat, Inc. 2012 - * - * Author: Paolo Bonzini - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qemu-common.h" -#include "qapi/string-input-visitor.h" -#include "qapi/visitor-impl.h" -#include "qapi/qmp/qerror.h" -#include "qemu/queue.h" -#include "qemu/range.h" -#include // strtoll - - -struct StringInputVisitor -{ - Visitor visitor; - - bool head; - - GList *ranges; - GList *cur_range; - int64_t cur; - - const char *string; -}; - -static void free_range(void *range, void *dummy) -{ - g_free(range); -} - -static void parse_str(StringInputVisitor *siv, Error **errp) -{ - char *str = (char *) siv->string; - long long start, end; - Range *cur; - char *endptr; - - if (siv->ranges) { - return; - } - - do { - errno = 0; - start = strtoll(str, &endptr, 0); - if (errno == 0 && endptr > str) { - if (*endptr == '\0') { - cur = g_malloc0(sizeof(*cur)); - cur->begin = start; - cur->end = start + 1; - siv->ranges = g_list_insert_sorted_merged(siv->ranges, cur, - range_compare); - cur = NULL; - str = NULL; - } else if (*endptr == '-') { - str = endptr + 1; - errno = 0; - end = strtoll(str, &endptr, 0); - if (errno == 0 && endptr > str && start <= end && - (start > INT64_MAX - 65536 || - end < start + 65536)) { - if (*endptr == '\0') { - cur = g_malloc0(sizeof(*cur)); - cur->begin = start; - cur->end = end + 1; - siv->ranges = - g_list_insert_sorted_merged(siv->ranges, - cur, - range_compare); - cur = NULL; - str = NULL; - } else if (*endptr == ',') { - str = endptr + 1; - cur = g_malloc0(sizeof(*cur)); - cur->begin = start; - cur->end = end + 1; - siv->ranges = - g_list_insert_sorted_merged(siv->ranges, - cur, - range_compare); - cur = NULL; - } else { - goto error; - } - } else { - goto error; - } - } else if (*endptr == ',') { - str = endptr + 1; - cur = g_malloc0(sizeof(*cur)); - cur->begin = start; - cur->end = start + 1; - siv->ranges = g_list_insert_sorted_merged(siv->ranges, - cur, - range_compare); - cur = NULL; - } else { - goto error; - } - } else { - goto error; - } - } while (str); - - return; -error: - g_list_foreach(siv->ranges, free_range, NULL); - g_list_free(siv->ranges); - siv->ranges = NULL; -} - -static void -start_list(Visitor *v, const char *name, Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - - parse_str(siv, errp); - - siv->cur_range = g_list_first(siv->ranges); - if (siv->cur_range) { - Range *r = siv->cur_range->data; - if (r) { - siv->cur = r->begin; - } - } -} - -static GenericList * -next_list(Visitor *v, GenericList **list, Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - GenericList **link; - Range *r; - - if (!siv->ranges || !siv->cur_range) { - return NULL; - } - - r = siv->cur_range->data; - if (!r) { - return NULL; - } - - if ((uint64_t)siv->cur < r->begin || (uint64_t)siv->cur >= r->end) { - siv->cur_range = g_list_next(siv->cur_range); - if (!siv->cur_range) { - return NULL; - } - r = siv->cur_range->data; - if (!r) { - return NULL; - } - siv->cur = r->begin; - } - - if (siv->head) { - link = list; - siv->head = false; - } else { - link = &(*list)->next; - } - - *link = g_malloc0(sizeof **link); - return *link; -} - -static void -end_list(Visitor *v, Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - siv->head = true; -} - -static void parse_type_int(Visitor *v, int64_t *obj, const char *name, - Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - - if (!siv->string) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "integer"); - return; - } - - parse_str(siv, errp); - - if (!siv->ranges) { - goto error; - } - - if (!siv->cur_range) { - Range *r; - - siv->cur_range = g_list_first(siv->ranges); - if (!siv->cur_range) { - goto error; - } - - r = siv->cur_range->data; - if (!r) { - goto error; - } - - siv->cur = r->begin; - } - - *obj = siv->cur; - siv->cur++; - return; - -error: - error_set(errp, QERR_INVALID_PARAMETER_VALUE, name, - "an int64 value or range"); -} - -static void parse_type_bool(Visitor *v, bool *obj, const char *name, - Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - - if (siv->string) { - if (!strcasecmp(siv->string, "on") || - !strcasecmp(siv->string, "yes") || - !strcasecmp(siv->string, "true")) { - *obj = true; - return; - } - if (!strcasecmp(siv->string, "off") || - !strcasecmp(siv->string, "no") || - !strcasecmp(siv->string, "false")) { - *obj = false; - return; - } - } - - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "boolean"); -} - -static void parse_type_str(Visitor *v, char **obj, const char *name, - Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - if (siv->string) { - *obj = g_strdup(siv->string); - } else { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "string"); - } -} - -static void parse_type_number(Visitor *v, double *obj, const char *name, - Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - char *endp = (char *) siv->string; - double val; - - errno = 0; - if (siv->string) { - val = strtod(siv->string, &endp); - } - if (!siv->string || errno || endp == siv->string || *endp) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", - "number"); - return; - } - - *obj = val; -} - -static void parse_optional(Visitor *v, bool *present, const char *name, - Error **errp) -{ - StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); - - if (!siv->string) { - *present = false; - return; - } - - *present = true; -} - -Visitor *string_input_get_visitor(StringInputVisitor *v) -{ - return &v->visitor; -} - -void string_input_visitor_cleanup(StringInputVisitor *v) -{ - g_list_foreach(v->ranges, free_range, NULL); - g_list_free(v->ranges); - g_free(v); -} - -StringInputVisitor *string_input_visitor_new(const char *str) -{ - StringInputVisitor *v; - - v = g_malloc0(sizeof(*v)); - - v->visitor.type_enum = input_type_enum; - v->visitor.type_int = parse_type_int; - v->visitor.type_size = NULL; - v->visitor.type_bool = parse_type_bool; - v->visitor.type_str = parse_type_str; - v->visitor.type_number = parse_type_number; - v->visitor.start_list = start_list; - v->visitor.next_list = next_list; - v->visitor.end_list = end_list; - v->visitor.optional = parse_optional; - - v->string = str; - v->head = true; - return v; -} diff --git a/qemu/qemu-timer.c b/qemu/qemu-timer.c deleted file mode 100644 index 28e5121c..00000000 --- a/qemu/qemu-timer.c +++ /dev/null @@ -1,103 +0,0 @@ -/* - * QEMU System Emulator - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "sysemu/sysemu.h" - -#include "hw/hw.h" - -#include "qemu/timer.h" - -#ifdef CONFIG_PPOLL -#include -#endif - -#ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK -#include -#endif - -#include "uc_priv.h" - -/***********************************************************/ -/* timers */ - -typedef struct QEMUClock { - /* We rely on BQL to protect the timerlists */ - QLIST_HEAD(, QEMUTimerList) timerlists; - - int64_t last; - - QEMUClockType type; - bool enabled; -} QEMUClock; - -static QEMUClock qemu_clocks[QEMU_CLOCK_MAX]; - -/** - * qemu_clock_ptr: - * @type: type of clock - * - * Translate a clock type into a pointer to QEMUClock object. - * - * Returns: a pointer to the QEMUClock object - */ -static inline QEMUClock *qemu_clock_ptr(QEMUClockType type) -{ - return &qemu_clocks[type]; -} - -/* return the host CPU cycle counter and handle stop/restart */ -int64_t cpu_get_ticks(void) -{ - return cpu_get_real_ticks(); -} - -/* return the host CPU monotonic timer and handle stop/restart */ -int64_t cpu_get_clock(void) -{ - return get_clock(); -} - -int64_t qemu_clock_get_ns(QEMUClockType type) -{ - int64_t now, last; - QEMUClock *clock = qemu_clock_ptr(type); - - switch (type) { - case QEMU_CLOCK_REALTIME: - return get_clock(); - default: - case QEMU_CLOCK_VIRTUAL: - return cpu_get_clock(); - case QEMU_CLOCK_HOST: - now = get_clock_realtime(); - last = clock->last; - clock->last = now; - if (now < last) { - // notifier_list_notify(&clock->reset_notifiers, &now); // FIXME - } - return now; - } -} diff --git a/qemu/qobject/Makefile.objs b/qemu/qobject/Makefile.objs deleted file mode 100644 index 8d852545..00000000 --- a/qemu/qobject/Makefile.objs +++ /dev/null @@ -1,2 +0,0 @@ -util-obj-y = qint.o qstring.o qdict.o qlist.o qfloat.o qbool.o -util-obj-y += qerror.o diff --git a/qemu/qobject/qbool.c b/qemu/qobject/qbool.c deleted file mode 100644 index df4a23b7..00000000 --- a/qemu/qobject/qbool.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * QBool Module - * - * Copyright IBM, Corp. 2009 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qobject.h" -#include "qemu-common.h" - -static void qbool_destroy_obj(QObject *obj); - -static const QType qbool_type = { - QTYPE_QBOOL, - qbool_destroy_obj, -}; - -/** - * qbool_from_int(): Create a new QBool from an int - * - * Return strong reference. - */ -QBool *qbool_from_int(int value) -{ - QBool *qb; - - qb = g_malloc(sizeof(*qb)); - qb->value = value; - QOBJECT_INIT(qb, &qbool_type); - - return qb; -} - -/** - * qbool_get_int(): Get the stored int - */ -int qbool_get_int(const QBool *qb) -{ - return qb->value; -} - -/** - * qobject_to_qbool(): Convert a QObject into a QBool - */ -QBool *qobject_to_qbool(const QObject *obj) -{ - if (qobject_type(obj) != QTYPE_QBOOL) - return NULL; - - return container_of(obj, QBool, base); -} - -/** - * qbool_destroy_obj(): Free all memory allocated by a - * QBool object - */ -static void qbool_destroy_obj(QObject *obj) -{ - assert(obj != NULL); - g_free(qobject_to_qbool(obj)); -} diff --git a/qemu/qobject/qdict.c b/qemu/qobject/qdict.c deleted file mode 100644 index e1a96a42..00000000 --- a/qemu/qobject/qdict.c +++ /dev/null @@ -1,699 +0,0 @@ -/* - * QDict Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qapi/qmp/qint.h" -#include "qapi/qmp/qfloat.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qobject.h" -#include "qemu/queue.h" -#include "qemu-common.h" - -static void qdict_destroy_obj(QObject *obj); - -static const QType qdict_type = { - QTYPE_QDICT, - qdict_destroy_obj, -}; - -/** - * qdict_new(): Create a new QDict - * - * Return strong reference. - */ -QDict *qdict_new(void) -{ - QDict *qdict; - - qdict = g_malloc0(sizeof(*qdict)); - QOBJECT_INIT(qdict, &qdict_type); - - return qdict; -} - -/** - * qobject_to_qdict(): Convert a QObject into a QDict - */ -QDict *qobject_to_qdict(const QObject *obj) -{ - if (qobject_type(obj) != QTYPE_QDICT) - return NULL; - - return container_of(obj, QDict, base); -} - -/** - * tdb_hash(): based on the hash agorithm from gdbm, via tdb - * (from module-init-tools) - */ -static unsigned int tdb_hash(const char *name) -{ - unsigned value; /* Used to compute the hash value. */ - unsigned i; /* Used to cycle through random values. */ - - /* Set the initial value from the key size. */ - for (value = 0x238F13AF * strlen(name), i=0; name[i]; i++) - value = (value + (((const unsigned char *)name)[i] << (i*5 % 24))); - - return (1103515243 * value + 12345); -} - -/** - * alloc_entry(): allocate a new QDictEntry - */ -static QDictEntry *alloc_entry(const char *key, QObject *value) -{ - QDictEntry *entry; - - entry = g_malloc0(sizeof(*entry)); - entry->key = g_strdup(key); - entry->value = value; - - return entry; -} - -/** - * qdict_entry_value(): Return qdict entry value - * - * Return weak reference. - */ -QObject *qdict_entry_value(const QDictEntry *entry) -{ - return entry->value; -} - -/** - * qdict_entry_key(): Return qdict entry key - * - * Return a *pointer* to the string, it has to be duplicated before being - * stored. - */ -const char *qdict_entry_key(const QDictEntry *entry) -{ - return entry->key; -} - -/** - * qdict_find(): List lookup function - */ -static QDictEntry *qdict_find(const QDict *qdict, - const char *key, unsigned int bucket) -{ - QDictEntry *entry; - - QLIST_FOREACH(entry, &qdict->table[bucket], next) - if (!strcmp(entry->key, key)) - return entry; - - return NULL; -} - -/** - * qdict_put_obj(): Put a new QObject into the dictionary - * - * Insert the pair 'key:value' into 'qdict', if 'key' already exists - * its 'value' will be replaced. - * - * This is done by freeing the reference to the stored QObject and - * storing the new one in the same entry. - * - * NOTE: ownership of 'value' is transferred to the QDict - */ -void qdict_put_obj(QDict *qdict, const char *key, QObject *value) -{ - unsigned int bucket; - QDictEntry *entry; - - bucket = tdb_hash(key) % QDICT_BUCKET_MAX; - entry = qdict_find(qdict, key, bucket); - if (entry) { - /* replace key's value */ - qobject_decref(entry->value); - entry->value = value; - } else { - /* allocate a new entry */ - entry = alloc_entry(key, value); - QLIST_INSERT_HEAD(&qdict->table[bucket], entry, next); - qdict->size++; - } -} - -/** - * qdict_get(): Lookup for a given 'key' - * - * Return a weak reference to the QObject associated with 'key' if - * 'key' is present in the dictionary, NULL otherwise. - */ -QObject *qdict_get(const QDict *qdict, const char *key) -{ - QDictEntry *entry; - - entry = qdict_find(qdict, key, tdb_hash(key) % QDICT_BUCKET_MAX); - return (entry == NULL ? NULL : entry->value); -} - -/** - * qdict_haskey(): Check if 'key' exists - * - * Return 1 if 'key' exists in the dict, 0 otherwise - */ -int qdict_haskey(const QDict *qdict, const char *key) -{ - unsigned int bucket = tdb_hash(key) % QDICT_BUCKET_MAX; - return (qdict_find(qdict, key, bucket) == NULL ? 0 : 1); -} - -/** - * qdict_size(): Return the size of the dictionary - */ -size_t qdict_size(const QDict *qdict) -{ - return qdict->size; -} - -/** - * qdict_get_obj(): Get a QObject of a specific type - */ -static QObject *qdict_get_obj(const QDict *qdict, const char *key, - qtype_code type) -{ - QObject *obj; - - obj = qdict_get(qdict, key); - assert(obj != NULL); - assert(qobject_type(obj) == type); - - return obj; -} - -/** - * qdict_get_double(): Get an number mapped by 'key' - * - * This function assumes that 'key' exists and it stores a - * QFloat or QInt object. - * - * Return number mapped by 'key'. - */ -double qdict_get_double(const QDict *qdict, const char *key) -{ - QObject *obj = qdict_get(qdict, key); - - assert(obj); - switch (qobject_type(obj)) { - case QTYPE_QFLOAT: - return qfloat_get_double(qobject_to_qfloat(obj)); - case QTYPE_QINT: - return (double)qint_get_int(qobject_to_qint(obj)); - default: - abort(); - } -} - -/** - * qdict_get_int(): Get an integer mapped by 'key' - * - * This function assumes that 'key' exists and it stores a - * QInt object. - * - * Return integer mapped by 'key'. - */ -int64_t qdict_get_int(const QDict *qdict, const char *key) -{ - QObject *obj = qdict_get_obj(qdict, key, QTYPE_QINT); - return qint_get_int(qobject_to_qint(obj)); -} - -/** - * qdict_get_bool(): Get a bool mapped by 'key' - * - * This function assumes that 'key' exists and it stores a - * QBool object. - * - * Return bool mapped by 'key'. - */ -int qdict_get_bool(const QDict *qdict, const char *key) -{ - QObject *obj = qdict_get_obj(qdict, key, QTYPE_QBOOL); - return qbool_get_int(qobject_to_qbool(obj)); -} - -/** - * qdict_get_qlist(): Get the QList mapped by 'key' - * - * This function assumes that 'key' exists and it stores a - * QList object. - * - * Return QList mapped by 'key'. - */ -QList *qdict_get_qlist(const QDict *qdict, const char *key) -{ - return qobject_to_qlist(qdict_get_obj(qdict, key, QTYPE_QLIST)); -} - -/** - * qdict_get_qdict(): Get the QDict mapped by 'key' - * - * This function assumes that 'key' exists and it stores a - * QDict object. - * - * Return QDict mapped by 'key'. - */ -QDict *qdict_get_qdict(const QDict *qdict, const char *key) -{ - return qobject_to_qdict(qdict_get_obj(qdict, key, QTYPE_QDICT)); -} - -/** - * qdict_get_str(): Get a pointer to the stored string mapped - * by 'key' - * - * This function assumes that 'key' exists and it stores a - * QString object. - * - * Return pointer to the string mapped by 'key'. - */ -const char *qdict_get_str(const QDict *qdict, const char *key) -{ - QObject *obj = qdict_get_obj(qdict, key, QTYPE_QSTRING); - return qstring_get_str(qobject_to_qstring(obj)); -} - -/** - * qdict_get_try_int(): Try to get integer mapped by 'key' - * - * Return integer mapped by 'key', if it is not present in - * the dictionary or if the stored object is not of QInt type - * 'def_value' will be returned. - */ -int64_t qdict_get_try_int(const QDict *qdict, const char *key, - int64_t def_value) -{ - QObject *obj; - - obj = qdict_get(qdict, key); - if (!obj || qobject_type(obj) != QTYPE_QINT) - return def_value; - - return qint_get_int(qobject_to_qint(obj)); -} - -/** - * qdict_get_try_bool(): Try to get a bool mapped by 'key' - * - * Return bool mapped by 'key', if it is not present in the - * dictionary or if the stored object is not of QBool type - * 'def_value' will be returned. - */ -int qdict_get_try_bool(const QDict *qdict, const char *key, int def_value) -{ - QObject *obj; - - obj = qdict_get(qdict, key); - if (!obj || qobject_type(obj) != QTYPE_QBOOL) - return def_value; - - return qbool_get_int(qobject_to_qbool(obj)); -} - -/** - * qdict_get_try_str(): Try to get a pointer to the stored string - * mapped by 'key' - * - * Return a pointer to the string mapped by 'key', if it is not present - * in the dictionary or if the stored object is not of QString type - * NULL will be returned. - */ -const char *qdict_get_try_str(const QDict *qdict, const char *key) -{ - QObject *obj; - - obj = qdict_get(qdict, key); - if (!obj || qobject_type(obj) != QTYPE_QSTRING) - return NULL; - - return qstring_get_str(qobject_to_qstring(obj)); -} - -/** - * qdict_iter(): Iterate over all the dictionary's stored values. - * - * This function allows the user to provide an iterator, which will be - * called for each stored value in the dictionary. - */ -void qdict_iter(const QDict *qdict, - void (*iter)(const char *key, QObject *obj, void *opaque), - void *opaque) -{ - int i; - QDictEntry *entry; - - for (i = 0; i < QDICT_BUCKET_MAX; i++) { - QLIST_FOREACH(entry, &qdict->table[i], next) - iter(entry->key, entry->value, opaque); - } -} - -static QDictEntry *qdict_next_entry(const QDict *qdict, int first_bucket) -{ - int i; - - for (i = first_bucket; i < QDICT_BUCKET_MAX; i++) { - if (!QLIST_EMPTY(&qdict->table[i])) { - return QLIST_FIRST(&qdict->table[i]); - } - } - - return NULL; -} - -/** - * qdict_first(): Return first qdict entry for iteration. - */ -const QDictEntry *qdict_first(const QDict *qdict) -{ - return qdict_next_entry(qdict, 0); -} - -/** - * qdict_next(): Return next qdict entry in an iteration. - */ -const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry) -{ - QDictEntry *ret; - - ret = QLIST_NEXT(entry, next); - if (!ret) { - unsigned int bucket = tdb_hash(entry->key) % QDICT_BUCKET_MAX; - ret = qdict_next_entry(qdict, bucket + 1); - } - - return ret; -} - -/** - * qdict_clone_shallow(): Clones a given QDict. Its entries are not copied, but - * another reference is added. - */ -QDict *qdict_clone_shallow(const QDict *src) -{ - QDict *dest; - QDictEntry *entry; - int i; - - dest = qdict_new(); - - for (i = 0; i < QDICT_BUCKET_MAX; i++) { - QLIST_FOREACH(entry, &src->table[i], next) { - qobject_incref(entry->value); - qdict_put_obj(dest, entry->key, entry->value); - } - } - - return dest; -} - -/** - * qentry_destroy(): Free all the memory allocated by a QDictEntry - */ -static void qentry_destroy(QDictEntry *e) -{ - assert(e != NULL); - assert(e->key != NULL); - assert(e->value != NULL); - - qobject_decref(e->value); - g_free(e->key); - g_free(e); -} - -/** - * qdict_del(): Delete a 'key:value' pair from the dictionary - * - * This will destroy all data allocated by this entry. - */ -void qdict_del(QDict *qdict, const char *key) -{ - QDictEntry *entry; - - entry = qdict_find(qdict, key, tdb_hash(key) % QDICT_BUCKET_MAX); - if (entry) { - QLIST_REMOVE(entry, next); - qentry_destroy(entry); - qdict->size--; - } -} - -/** - * qdict_destroy_obj(): Free all the memory allocated by a QDict - */ -static void qdict_destroy_obj(QObject *obj) -{ - int i; - QDict *qdict; - - assert(obj != NULL); - qdict = qobject_to_qdict(obj); - - for (i = 0; i < QDICT_BUCKET_MAX; i++) { - QDictEntry *entry = QLIST_FIRST(&qdict->table[i]); - while (entry) { - QDictEntry *tmp = QLIST_NEXT(entry, next); - QLIST_REMOVE(entry, next); - qentry_destroy(entry); - entry = tmp; - } - } - - g_free(qdict); -} - -static void qdict_flatten_qdict(QDict *qdict, QDict *target, - const char *prefix); - -static void qdict_flatten_qlist(QList *qlist, QDict *target, const char *prefix) -{ - QObject *value; - const QListEntry *entry; - char *new_key; - int i; - - /* This function is never called with prefix == NULL, i.e., it is always - * called from within qdict_flatten_q(list|dict)(). Therefore, it does not - * need to remove list entries during the iteration (the whole list will be - * deleted eventually anyway from qdict_flatten_qdict()). */ - assert(prefix); - - entry = qlist_first(qlist); - - for (i = 0; entry; entry = qlist_next(entry), i++) { - value = qlist_entry_obj(entry); - new_key = g_strdup_printf("%s.%i", prefix, i); - - if (qobject_type(value) == QTYPE_QDICT) { - qdict_flatten_qdict(qobject_to_qdict(value), target, new_key); - } else if (qobject_type(value) == QTYPE_QLIST) { - qdict_flatten_qlist(qobject_to_qlist(value), target, new_key); - } else { - /* All other types are moved to the target unchanged. */ - qobject_incref(value); - qdict_put_obj(target, new_key, value); - } - - g_free(new_key); - } -} - -static void qdict_flatten_qdict(QDict *qdict, QDict *target, const char *prefix) -{ - QObject *value; - const QDictEntry *entry, *next; - char *new_key; - bool delete; - - entry = qdict_first(qdict); - - while (entry != NULL) { - - next = qdict_next(qdict, entry); - value = qdict_entry_value(entry); - new_key = NULL; - delete = false; - - if (prefix) { - new_key = g_strdup_printf("%s.%s", prefix, entry->key); - } - - if (qobject_type(value) == QTYPE_QDICT) { - /* Entries of QDicts are processed recursively, the QDict object - * itself disappears. */ - qdict_flatten_qdict(qobject_to_qdict(value), target, - new_key ? new_key : entry->key); - delete = true; - } else if (qobject_type(value) == QTYPE_QLIST) { - qdict_flatten_qlist(qobject_to_qlist(value), target, - new_key ? new_key : entry->key); - delete = true; - } else if (prefix) { - /* All other objects are moved to the target unchanged. */ - qobject_incref(value); - qdict_put_obj(target, new_key, value); - delete = true; - } - - g_free(new_key); - - if (delete) { - qdict_del(qdict, entry->key); - - /* Restart loop after modifying the iterated QDict */ - entry = qdict_first(qdict); - continue; - } - - entry = next; - } -} - -/** - * qdict_flatten(): For each nested QDict with key x, all fields with key y - * are moved to this QDict and their key is renamed to "x.y". For each nested - * QList with key x, the field at index y is moved to this QDict with the key - * "x.y" (i.e., the reverse of what qdict_array_split() does). - * This operation is applied recursively for nested QDicts and QLists. - */ -void qdict_flatten(QDict *qdict) -{ - qdict_flatten_qdict(qdict, qdict, NULL); -} - -/* extract all the src QDict entries starting by start into dst */ -void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start) - -{ - const QDictEntry *entry, *next; - const char *p; - - *dst = qdict_new(); - entry = qdict_first(src); - - while (entry != NULL) { - next = qdict_next(src, entry); - if (strstart(entry->key, start, &p)) { - qobject_incref(entry->value); - qdict_put_obj(*dst, p, entry->value); - qdict_del(src, entry->key); - } - entry = next; - } -} - -static bool qdict_has_prefixed_entries(const QDict *src, const char *start) -{ - const QDictEntry *entry; - - for (entry = qdict_first(src); entry; entry = qdict_next(src, entry)) { - if (strstart(entry->key, start, NULL)) { - return true; - } - } - - return false; -} - -/** - * qdict_array_split(): This function moves array-like elements of a QDict into - * a new QList. Every entry in the original QDict with a key "%u" or one - * prefixed "%u.", where %u designates an unsigned integer starting at 0 and - * incrementally counting up, will be moved to a new QDict at index %u in the - * output QList with the key prefix removed, if that prefix is "%u.". If the - * whole key is just "%u", the whole QObject will be moved unchanged without - * creating a new QDict. The function terminates when there is no entry in the - * QDict with a prefix directly (incrementally) following the last one; it also - * returns if there are both entries with "%u" and "%u." for the same index %u. - * Example: {"0.a": 42, "0.b": 23, "1.x": 0, "4.y": 1, "o.o": 7, "2": 66} - * (or {"1.x": 0, "4.y": 1, "0.a": 42, "o.o": 7, "0.b": 23, "2": 66}) - * => [{"a": 42, "b": 23}, {"x": 0}, 66] - * and {"4.y": 1, "o.o": 7} (remainder of the old QDict) - */ -void qdict_array_split(QDict *src, QList **dst) -{ - unsigned i; - - *dst = qlist_new(); - - for (i = 0; i < UINT_MAX; i++) { - QObject *subqobj; - bool is_subqdict; - QDict *subqdict; - char indexstr[32], prefix[32]; - size_t snprintf_ret; - - snprintf_ret = snprintf(indexstr, 32, "%u", i); - assert(snprintf_ret < 32); - - subqobj = qdict_get(src, indexstr); - - snprintf_ret = snprintf(prefix, 32, "%u.", i); - assert(snprintf_ret < 32); - - is_subqdict = qdict_has_prefixed_entries(src, prefix); - - // There may be either a single subordinate object (named "%u") or - // multiple objects (each with a key prefixed "%u."), but not both. - if (!subqobj == !is_subqdict) { - break; - } - - if (is_subqdict) { - qdict_extract_subqdict(src, &subqdict, prefix); - assert(qdict_size(subqdict) > 0); - } else { - qobject_incref(subqobj); - qdict_del(src, indexstr); - } - - qlist_append_obj(*dst, (subqobj!=NULL) ? subqobj : QOBJECT(subqdict)); - } -} - -/** - * qdict_join(): Absorb the src QDict into the dest QDict, that is, move all - * elements from src to dest. - * - * If an element from src has a key already present in dest, it will not be - * moved unless overwrite is true. - * - * If overwrite is true, the conflicting values in dest will be discarded and - * replaced by the corresponding values from src. - * - * Therefore, with overwrite being true, the src QDict will always be empty when - * this function returns. If overwrite is false, the src QDict will be empty - * iff there were no conflicts. - */ -void qdict_join(QDict *dest, QDict *src, bool overwrite) -{ - const QDictEntry *entry, *next; - - entry = qdict_first(src); - while (entry) { - next = qdict_next(src, entry); - - if (overwrite || !qdict_haskey(dest, entry->key)) { - qobject_incref(entry->value); - qdict_put_obj(dest, entry->key, entry->value); - qdict_del(src, entry->key); - } - - entry = next; - } -} diff --git a/qemu/qobject/qerror.c b/qemu/qobject/qerror.c deleted file mode 100644 index 5589854a..00000000 --- a/qemu/qobject/qerror.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * QError Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qerror.h" -#include "qemu-common.h" - - -/** - * qerror_human(): Format QError data into human-readable string. - */ -QString *qerror_human(const QError *qerror) -{ - return qstring_from_str(qerror->err_msg); -} - -void qerror_report(ErrorClass eclass, const char *fmt, ...) -{ -} - -/* Evil... */ -struct Error -{ - char *msg; - ErrorClass err_class; -}; - -void qerror_report_err(Error *err) -{ -} diff --git a/qemu/qobject/qfloat.c b/qemu/qobject/qfloat.c deleted file mode 100644 index d6d4d3a2..00000000 --- a/qemu/qobject/qfloat.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * QFloat Module - * - * Copyright IBM, Corp. 2009 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/qmp/qfloat.h" -#include "qapi/qmp/qobject.h" -#include "qemu-common.h" - -static void qfloat_destroy_obj(QObject *obj); - -static const QType qfloat_type = { - QTYPE_QFLOAT, - qfloat_destroy_obj, -}; - -/** - * qfloat_from_int(): Create a new QFloat from a float - * - * Return strong reference. - */ -QFloat *qfloat_from_double(double value) -{ - QFloat *qf; - - qf = g_malloc(sizeof(*qf)); - qf->value = value; - QOBJECT_INIT(qf, &qfloat_type); - - return qf; -} - -/** - * qfloat_get_double(): Get the stored float - */ -double qfloat_get_double(const QFloat *qf) -{ - return qf->value; -} - -/** - * qobject_to_qfloat(): Convert a QObject into a QFloat - */ -QFloat *qobject_to_qfloat(const QObject *obj) -{ - if (qobject_type(obj) != QTYPE_QFLOAT) - return NULL; - - return container_of(obj, QFloat, base); -} - -/** - * qfloat_destroy_obj(): Free all memory allocated by a - * QFloat object - */ -static void qfloat_destroy_obj(QObject *obj) -{ - assert(obj != NULL); - g_free(qobject_to_qfloat(obj)); -} diff --git a/qemu/qobject/qint.c b/qemu/qobject/qint.c deleted file mode 100644 index e285d134..00000000 --- a/qemu/qobject/qint.c +++ /dev/null @@ -1,67 +0,0 @@ -/* - * QInt Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qapi/qmp/qint.h" -#include "qapi/qmp/qobject.h" -#include "qemu-common.h" - -static void qint_destroy_obj(QObject *obj); - -static const QType qint_type = { - QTYPE_QINT, - qint_destroy_obj, -}; - -/** - * qint_from_int(): Create a new QInt from an int64_t - * - * Return strong reference. - */ -QInt *qint_from_int(int64_t value) -{ - QInt *qi; - - qi = g_malloc(sizeof(*qi)); - qi->value = value; - QOBJECT_INIT(qi, &qint_type); - - return qi; -} - -/** - * qint_get_int(): Get the stored integer - */ -int64_t qint_get_int(const QInt *qi) -{ - return qi->value; -} - -/** - * qobject_to_qint(): Convert a QObject into a QInt - */ -QInt *qobject_to_qint(const QObject *obj) -{ - if (qobject_type(obj) != QTYPE_QINT) - return NULL; - - return container_of(obj, QInt, base); -} - -/** - * qint_destroy_obj(): Free all memory allocated by a - * QInt object - */ -static void qint_destroy_obj(QObject *obj) -{ - assert(obj != NULL); - g_free(qobject_to_qint(obj)); -} diff --git a/qemu/qobject/qlist.c b/qemu/qobject/qlist.c deleted file mode 100644 index 60ce805d..00000000 --- a/qemu/qobject/qlist.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * QList Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qobject.h" -#include "qemu/queue.h" -#include "qemu-common.h" - -static void qlist_destroy_obj(QObject *obj); - -static const QType qlist_type = { - QTYPE_QLIST, - qlist_destroy_obj, -}; - -/** - * qlist_new(): Create a new QList - * - * Return strong reference. - */ -QList *qlist_new(void) -{ - QList *qlist; - - qlist = g_malloc(sizeof(*qlist)); - QTAILQ_INIT(&qlist->head); - QOBJECT_INIT(qlist, &qlist_type); - - return qlist; -} - -static void qlist_copy_elem(QObject *obj, void *opaque) -{ - QList *dst = opaque; - - qobject_incref(obj); - qlist_append_obj(dst, obj); -} - -QList *qlist_copy(QList *src) -{ - QList *dst = qlist_new(); - - qlist_iter(src, qlist_copy_elem, dst); - - return dst; -} - -/** - * qlist_append_obj(): Append an QObject into QList - * - * NOTE: ownership of 'value' is transferred to the QList - */ -void qlist_append_obj(QList *qlist, QObject *value) -{ - QListEntry *entry; - - entry = g_malloc(sizeof(*entry)); - entry->value = value; - - QTAILQ_INSERT_TAIL(&qlist->head, entry, next); -} - -/** - * qlist_iter(): Iterate over all the list's stored values. - * - * This function allows the user to provide an iterator, which will be - * called for each stored value in the list. - */ -void qlist_iter(const QList *qlist, - void (*iter)(QObject *obj, void *opaque), void *opaque) -{ - QListEntry *entry; - - QTAILQ_FOREACH(entry, &qlist->head, next) - iter(entry->value, opaque); -} - -QObject *qlist_pop(QList *qlist) -{ - QListEntry *entry; - QObject *ret; - - if (qlist == NULL || QTAILQ_EMPTY(&qlist->head)) { - return NULL; - } - - entry = QTAILQ_FIRST(&qlist->head); - QTAILQ_REMOVE(&qlist->head, entry, next); - - ret = entry->value; - g_free(entry); - - return ret; -} - -QObject *qlist_peek(QList *qlist) -{ - QListEntry *entry; - QObject *ret; - - if (qlist == NULL || QTAILQ_EMPTY(&qlist->head)) { - return NULL; - } - - entry = QTAILQ_FIRST(&qlist->head); - - ret = entry->value; - - return ret; -} - -int qlist_empty(const QList *qlist) -{ - return QTAILQ_EMPTY(&qlist->head); -} - -static void qlist_size_iter(QObject *obj, void *opaque) -{ - size_t *count = opaque; - (*count)++; -} - -size_t qlist_size(const QList *qlist) -{ - size_t count = 0; - qlist_iter(qlist, qlist_size_iter, &count); - return count; -} - -/** - * qobject_to_qlist(): Convert a QObject into a QList - */ -QList *qobject_to_qlist(const QObject *obj) -{ - if (qobject_type(obj) != QTYPE_QLIST) { - return NULL; - } - - return container_of(obj, QList, base); -} - -/** - * qlist_destroy_obj(): Free all the memory allocated by a QList - */ -static void qlist_destroy_obj(QObject *obj) -{ - QList *qlist; - QListEntry *entry, *next_entry; - - assert(obj != NULL); - qlist = qobject_to_qlist(obj); - - QTAILQ_FOREACH_SAFE(entry, &qlist->head, next, next_entry) { - QTAILQ_REMOVE(&qlist->head, entry, next); - qobject_decref(entry->value); - g_free(entry); - } - - g_free(qlist); -} diff --git a/qemu/qobject/qstring.c b/qemu/qobject/qstring.c deleted file mode 100644 index 542810a3..00000000 --- a/qemu/qobject/qstring.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * QString Module - * - * Copyright (C) 2009 Red Hat Inc. - * - * Authors: - * Luiz Capitulino - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qstring.h" -#include "qemu-common.h" - -static void qstring_destroy_obj(QObject *obj); - -static const QType qstring_type = { - QTYPE_QSTRING, - qstring_destroy_obj, -}; - -/** - * qstring_new(): Create a new empty QString - * - * Return strong reference. - */ -QString *qstring_new(void) -{ - return qstring_from_str(""); -} - -/** - * qstring_get_length(): Get the length of a QString - */ -size_t qstring_get_length(const QString *qstring) -{ - return qstring->length; -} - -/** - * qstring_from_substr(): Create a new QString from a C string substring - * - * Return string reference - */ -QString *qstring_from_substr(const char *str, int start, int end) -{ - QString *qstring; - - qstring = g_malloc(sizeof(*qstring)); - - qstring->length = end - start + 1; - qstring->capacity = qstring->length; - - qstring->string = g_malloc(qstring->capacity + 1); - memcpy(qstring->string, str + start, qstring->length); - qstring->string[qstring->length] = 0; - - QOBJECT_INIT(qstring, &qstring_type); - - return qstring; -} - -/** - * qstring_from_str(): Create a new QString from a regular C string - * - * Return strong reference. - */ -QString *qstring_from_str(const char *str) -{ - return qstring_from_substr(str, 0, strlen(str) - 1); -} - -static void capacity_increase(QString *qstring, size_t len) -{ - if (qstring->capacity < (qstring->length + len)) { - qstring->capacity += len; - qstring->capacity *= 2; /* use exponential growth */ - - qstring->string = g_realloc(qstring->string, qstring->capacity + 1); - } -} - -/* qstring_append(): Append a C string to a QString - */ -void qstring_append(QString *qstring, const char *str) -{ - size_t len = strlen(str); - - capacity_increase(qstring, len); - memcpy(qstring->string + qstring->length, str, len); - qstring->length += len; - qstring->string[qstring->length] = 0; -} - -void qstring_append_int(QString *qstring, int64_t value) -{ - char num[32]; - - snprintf(num, sizeof(num), "%" PRId64, value); - qstring_append(qstring, num); -} - -/** - * qstring_append_chr(): Append a C char to a QString - */ -void qstring_append_chr(QString *qstring, int c) -{ - capacity_increase(qstring, 1); - qstring->string[qstring->length++] = c; - qstring->string[qstring->length] = 0; -} - -/** - * qobject_to_qstring(): Convert a QObject to a QString - */ -QString *qobject_to_qstring(const QObject *obj) -{ - if (qobject_type(obj) != QTYPE_QSTRING) - return NULL; - - return container_of(obj, QString, base); -} - -/** - * qstring_get_str(): Return a pointer to the stored string - * - * NOTE: Should be used with caution, if the object is deallocated - * this pointer becomes invalid. - */ -const char *qstring_get_str(const QString *qstring) -{ - return qstring->string; -} - -/** - * qstring_destroy_obj(): Free all memory allocated by a QString - * object - */ -static void qstring_destroy_obj(QObject *obj) -{ - QString *qs; - - assert(obj != NULL); - qs = qobject_to_qstring(obj); - g_free(qs->string); - g_free(qs); -} diff --git a/qemu/qom/Makefile.objs b/qemu/qom/Makefile.objs deleted file mode 100644 index 6a93ac73..00000000 --- a/qemu/qom/Makefile.objs +++ /dev/null @@ -1,2 +0,0 @@ -common-obj-y = object.o container.o qom-qobject.o -common-obj-y += cpu.o diff --git a/qemu/qom/container.c b/qemu/qom/container.c deleted file mode 100644 index e0e18f75..00000000 --- a/qemu/qom/container.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Device Container - * - * Copyright IBM, Corp. 2012 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qom/object.h" -#include "qemu/module.h" -#include - -static const TypeInfo container_info = { - "container", - TYPE_OBJECT, - 0, - sizeof(Object), -}; - -void container_register_types(struct uc_struct *uc) -{ - type_register_static(uc, &container_info); -} - -Object *container_get(struct uc_struct *uc, Object *root, const char *path) -{ - Object *obj, *child; - gchar **parts; - int i; - - parts = g_strsplit(path, "/", 0); - assert(parts != NULL && parts[0] != NULL && !parts[0][0]); - obj = root; - - for (i = 1; parts[i] != NULL; i++, obj = child) { - child = object_resolve_path_component(uc, obj, parts[i]); - if (!child) { - child = object_new(uc, "container"); - object_property_add_child(obj, parts[i], child, NULL); - } - } - - g_strfreev(parts); - - return obj; -} diff --git a/qemu/qom/cpu.c b/qemu/qom/cpu.c deleted file mode 100644 index 2c3a1935..00000000 --- a/qemu/qom/cpu.c +++ /dev/null @@ -1,284 +0,0 @@ -/* - * QEMU CPU model - * - * Copyright (c) 2012-2014 SUSE LINUX Products GmbH - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see - * - */ - -#include "qemu-common.h" -#include "qemu/log.h" -#include "uc_priv.h" - -bool cpu_exists(struct uc_struct* uc, int64_t id) -{ - CPUState *cpu = uc->cpu; - CPUClass *cc = CPU_GET_CLASS(uc, cpu); - - if (cc->get_arch_id(cpu) == id) { - return true; - } - return false; -} - -CPUState *cpu_generic_init(struct uc_struct *uc, const char *typename, const char *cpu_model) -{ - char *str, *name, *featurestr; - CPUState *cpu; - ObjectClass *oc; - CPUClass *cc; - Error *err = NULL; - - str = g_strdup(cpu_model); - name = strtok(str, ","); - - oc = cpu_class_by_name(uc, typename, name); - if (oc == NULL) { - g_free(str); - return NULL; - } - - cpu = CPU(object_new(uc, object_class_get_name(oc))); - cc = CPU_GET_CLASS(uc, cpu); - - featurestr = strtok(NULL, ","); - cc->parse_features(cpu, featurestr, &err); - g_free(str); - if (err != NULL) { - goto out; - } - - object_property_set_bool(uc, OBJECT(cpu), true, "realized", &err); - -out: - if (err != NULL) { - error_free(err); - object_unref(uc, OBJECT(cpu)); - return NULL; - } - - return cpu; -} - -bool cpu_paging_enabled(const CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - return cc->get_paging_enabled(cpu); -} - -static bool cpu_common_get_paging_enabled(const CPUState *cpu) -{ - return false; -} - -void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, - Error **errp) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - cc->get_memory_mapping(cpu, list, errp); -} - -static void cpu_common_get_memory_mapping(CPUState *cpu, - MemoryMappingList *list, - Error **errp) -{ - error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); -} - -void cpu_reset_interrupt(CPUState *cpu, int mask) -{ - cpu->interrupt_request &= ~mask; -} - -void cpu_exit(CPUState *cpu) -{ - cpu->exit_request = 1; - cpu->tcg_exit_req = 1; -} - -static void cpu_common_noop(CPUState *cpu) -{ -} - -static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) -{ - return false; -} - -void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, - int flags) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - if (cc->dump_state) { - cc->dump_state(cpu, f, cpu_fprintf, flags); - } -} - -void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, - int flags) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - if (cc->dump_statistics) { - cc->dump_statistics(cpu, f, cpu_fprintf, flags); - } -} - -void cpu_reset(CPUState *cpu) -{ - CPUClass *klass = CPU_GET_CLASS(cpu->uc, cpu); - - if (klass->reset != NULL) { - (*klass->reset)(cpu); - } -} - -static void cpu_common_reset(CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); - - if (qemu_loglevel_mask(CPU_LOG_RESET)) { - qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); - log_cpu_state(cpu, cc->reset_dump_flags); - } - - cpu->interrupt_request = 0; - cpu->current_tb = NULL; - cpu->halted = 0; - cpu->mem_io_pc = 0; - cpu->mem_io_vaddr = 0; - cpu->icount_extra = 0; - cpu->icount_decr.u32 = 0; - cpu->can_do_io = 0; - memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *)); -} - -static bool cpu_common_has_work(CPUState *cs) -{ - return false; -} - -ObjectClass *cpu_class_by_name(struct uc_struct *uc, const char *typename, const char *cpu_model) -{ - CPUClass *cc = CPU_CLASS(uc, object_class_by_name(uc, typename)); - - return cc->class_by_name(uc, cpu_model); -} - -static ObjectClass *cpu_common_class_by_name(struct uc_struct *uc, const char *cpu_model) -{ - return NULL; -} - -static void cpu_common_parse_features(CPUState *cpu, char *features, - Error **errp) -{ - char *featurestr; /* Single "key=value" string being parsed */ - char *val; - Error *err = NULL; - - featurestr = features ? strtok(features, ",") : NULL; - - while (featurestr) { - val = strchr(featurestr, '='); - if (val) { - *val = 0; - val++; - object_property_parse(cpu->uc, OBJECT(cpu), val, featurestr, &err); - if (err) { - error_propagate(errp, err); - return; - } - } else { - error_setg(errp, "Expected key=value format, found %s.", - featurestr); - return; - } - featurestr = strtok(NULL, ","); - } -} - -static int cpu_common_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - CPUState *cpu = CPU(dev); - - if (dev->hotplugged) { - cpu_resume(cpu); - } - - return 0; -} - -static void cpu_common_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static int64_t cpu_common_get_arch_id(CPUState *cpu) -{ - return cpu->cpu_index; -} - -static void cpu_class_init(struct uc_struct *uc, ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(uc, klass); - CPUClass *k = CPU_CLASS(uc, klass); - - k->class_by_name = cpu_common_class_by_name; - k->parse_features = cpu_common_parse_features; - k->reset = cpu_common_reset; - k->get_arch_id = cpu_common_get_arch_id; - k->has_work = cpu_common_has_work; - k->get_paging_enabled = cpu_common_get_paging_enabled; - k->get_memory_mapping = cpu_common_get_memory_mapping; - k->debug_excp_handler = cpu_common_noop; - k->cpu_exec_enter = cpu_common_noop; - k->cpu_exec_exit = cpu_common_noop; - k->cpu_exec_interrupt = cpu_common_exec_interrupt; - dc->realize = cpu_common_realizefn; - /* - * Reason: CPUs still need special care by board code: wiring up - * IRQs, adding reset handlers, halting non-first CPUs, ... - */ - dc->cannot_instantiate_with_device_add_yet = true; -} - -static const TypeInfo cpu_type_info = { - TYPE_CPU, - TYPE_DEVICE, - - sizeof(CPUClass), - sizeof(CPUState), - NULL, - - cpu_common_initfn, - NULL, - NULL, - - NULL, - - cpu_class_init, - NULL, - NULL, - - true, -}; - -void cpu_register_types(struct uc_struct *uc) -{ - type_register_static(uc, &cpu_type_info); -} diff --git a/qemu/qom/object.c b/qemu/qom/object.c deleted file mode 100644 index 8457c762..00000000 --- a/qemu/qom/object.c +++ /dev/null @@ -1,1691 +0,0 @@ -/* - * QEMU Object Model - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qom/object.h" -#include "qemu-common.h" -#include "qapi/visitor.h" -#include "qapi-visit.h" -#include "qapi/string-input-visitor.h" -#include "qapi/qmp/qerror.h" - -/* TODO: replace QObject with a simpler visitor to avoid a dependency - * of the QOM core on QObject? */ -#include "qom/qom-qobject.h" -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qint.h" -#include "qapi/qmp/qstring.h" - -#include "uc_priv.h" - -#define MAX_INTERFACES 32 - -typedef struct InterfaceImpl InterfaceImpl; -typedef struct TypeImpl TypeImpl; - -struct InterfaceImpl -{ - const char *typename; -}; - -struct TypeImpl -{ - const char *name; - const char *parent; - - size_t class_size; - size_t instance_size; - void *instance_userdata; - - void (*class_init)(struct uc_struct *uc, ObjectClass *klass, void *data); - void (*class_base_init)(ObjectClass *klass, void *data); - void (*class_finalize)(ObjectClass *klass, void *data); - - void *class_data; - - void (*instance_init)(struct uc_struct *uc, Object *obj, void *opaque); - void (*instance_post_init)(struct uc_struct *uc, Object *obj); - void (*instance_finalize)(struct uc_struct *uc, Object *obj, void *opaque); - - bool abstract; - - TypeImpl *parent_type; - ObjectClass *class; - - int num_interfaces; - InterfaceImpl interfaces[MAX_INTERFACES]; -}; - - -static GHashTable *type_table_get(struct uc_struct *uc) -{ - if (uc->type_table == NULL) { - uc->type_table = g_hash_table_new(g_str_hash, g_str_equal); - } - - return uc->type_table; -} - - -static void type_table_add(struct uc_struct *uc, TypeImpl *ti) -{ - assert(!uc->enumerating_types); - g_hash_table_insert(type_table_get(uc), (void *)ti->name, ti); -} - -static TypeImpl *type_table_lookup(struct uc_struct *uc, const char *name) -{ - return g_hash_table_lookup(type_table_get(uc), name); -} - -static TypeImpl *type_new(struct uc_struct *uc, const TypeInfo *info) -{ - TypeImpl *ti = g_malloc0(sizeof(*ti)); - int i; - - g_assert(info->name != NULL); - - if (type_table_lookup(uc, info->name) != NULL) { - fprintf(stderr, "Registering `%s' which already exists\n", info->name); - abort(); - } - - ti->name = g_strdup(info->name); - ti->parent = g_strdup(info->parent); - - ti->class_size = info->class_size; - ti->instance_size = info->instance_size; - - ti->class_init = info->class_init; - ti->class_base_init = info->class_base_init; - ti->class_finalize = info->class_finalize; - ti->class_data = info->class_data; - - ti->instance_userdata = info->instance_userdata; - ti->instance_init = info->instance_init; - ti->instance_post_init = info->instance_post_init; - ti->instance_finalize = info->instance_finalize; - - ti->abstract = info->abstract; - - for (i = 0; info->interfaces && info->interfaces[i].type; i++) { - ti->interfaces[i].typename = g_strdup(info->interfaces[i].type); - } - ti->num_interfaces = i; - - return ti; -} - -static TypeImpl *type_register_internal(struct uc_struct *uc, const TypeInfo *info) -{ - TypeImpl *ti; - ti = type_new(uc, info); - - type_table_add(uc, ti); - return ti; -} - -TypeImpl *type_register(struct uc_struct *uc, const TypeInfo *info) -{ - assert(info->parent); - return type_register_internal(uc, info); -} - -TypeImpl *type_register_static(struct uc_struct *uc, const TypeInfo *info) -{ - return type_register(uc, info); -} - -static TypeImpl *type_get_by_name(struct uc_struct *uc, const char *name) -{ - if (name == NULL) { - return NULL; - } - - return type_table_lookup(uc, name); -} - -static TypeImpl *type_get_parent(struct uc_struct *uc, TypeImpl *type) -{ - if (!type->parent_type && type->parent) { - type->parent_type = type_get_by_name(uc, type->parent); - g_assert(type->parent_type != NULL); - } - - return type->parent_type; -} - -static bool type_has_parent(TypeImpl *type) -{ - return (type->parent != NULL); -} - -static size_t type_class_get_size(struct uc_struct *uc, TypeImpl *ti) -{ - if (ti->class_size) { - return ti->class_size; - } - - if (type_has_parent(ti)) { - return type_class_get_size(uc, type_get_parent(uc, ti)); - } - - return sizeof(ObjectClass); -} - -static size_t type_object_get_size(struct uc_struct *uc, TypeImpl *ti) -{ - if (ti->instance_size) { - return ti->instance_size; - } - - if (type_has_parent(ti)) { - return type_object_get_size(uc, type_get_parent(uc, ti)); - } - - return 0; -} - -static bool type_is_ancestor(struct uc_struct *uc, TypeImpl *type, TypeImpl *target_type) -{ - assert(target_type); - - /* Check if typename is a direct ancestor of type */ - while (type) { - if (type == target_type) { - return true; - } - - type = type_get_parent(uc, type); - } - - return false; -} - -static void type_initialize(struct uc_struct *uc, TypeImpl *ti); - -static void type_initialize_interface(struct uc_struct *uc, TypeImpl *ti, TypeImpl *interface_type, - TypeImpl *parent_type) -{ - InterfaceClass *new_iface; - TypeInfo info = { 0 }; - TypeImpl *iface_impl; - - info.parent = parent_type->name; - info.name = g_strdup_printf("%s::%s", ti->name, interface_type->name); - info.abstract = true; - - iface_impl = type_new(uc, &info); - iface_impl->parent_type = parent_type; - type_initialize(uc, iface_impl); - g_free((char *)info.name); - - new_iface = (InterfaceClass *)iface_impl->class; - new_iface->concrete_class = ti->class; - new_iface->interface_type = interface_type; - - ti->class->interfaces = g_slist_append(ti->class->interfaces, - iface_impl->class); -} - -static void type_initialize(struct uc_struct *uc, TypeImpl *ti) -{ - TypeImpl *parent; - - if (ti->class) { - return; - } - - ti->class_size = type_class_get_size(uc, ti); - ti->instance_size = type_object_get_size(uc, ti); - - ti->class = g_malloc0(ti->class_size); - - parent = type_get_parent(uc, ti); - if (parent) { - GSList *e; - int i; - type_initialize(uc, parent); - - g_assert(parent->class_size <= ti->class_size); - memcpy(ti->class, parent->class, parent->class_size); - ti->class->interfaces = NULL; - - for (e = parent->class->interfaces; e; e = e->next) { - InterfaceClass *iface = e->data; - ObjectClass *klass = OBJECT_CLASS(iface); - - type_initialize_interface(uc, ti, iface->interface_type, klass->type); - } - - for (i = 0; i < ti->num_interfaces; i++) { - TypeImpl *t = type_get_by_name(uc, ti->interfaces[i].typename); - for (e = ti->class->interfaces; e; e = e->next) { - TypeImpl *target_type = OBJECT_CLASS(e->data)->type; - - if (type_is_ancestor(uc, target_type, t)) { - break; - } - } - - if (e) { - continue; - } - - type_initialize_interface(uc, ti, t, t); - } - } - - ti->class->type = ti; - - while (parent) { - if (parent->class_base_init) { - parent->class_base_init(ti->class, ti->class_data); - } - parent = type_get_parent(uc, parent); - } - - if (ti->class_init) { - ti->class_init(uc, ti->class, ti->class_data); - } -} - -static void object_init_with_type(struct uc_struct *uc, Object *obj, TypeImpl *ti) -{ - if (type_has_parent(ti)) { - object_init_with_type(uc, obj, type_get_parent(uc, ti)); - } - - if (ti->instance_init) { - ti->instance_init(uc, obj, ti->instance_userdata); - } -} - -static void object_post_init_with_type(struct uc_struct *uc, Object *obj, TypeImpl *ti) -{ - if (ti->instance_post_init) { - ti->instance_post_init(uc, obj); - } - - if (type_has_parent(ti)) { - object_post_init_with_type(uc, obj, type_get_parent(uc, ti)); - } -} - -static void object_initialize_with_type(struct uc_struct *uc, void *data, size_t size, TypeImpl *type) -{ - Object *obj = data; - - g_assert(type != NULL); - type_initialize(uc, type); - - g_assert(type->instance_size >= sizeof(Object)); - g_assert(type->abstract == false); - g_assert(size >= type->instance_size); - - memset(obj, 0, type->instance_size); - obj->class_ = type->class; - object_ref(obj); - QTAILQ_INIT(&obj->properties); - object_init_with_type(uc, obj, type); - object_post_init_with_type(uc, obj, type); -} - -void object_initialize(struct uc_struct *uc, void *data, size_t size, const char *typename) -{ - TypeImpl *type = type_get_by_name(uc, typename); - - object_initialize_with_type(uc, data, size, type); -} - -static inline bool object_property_is_child(ObjectProperty *prop) -{ - return strstart(prop->type, "child<", NULL); -} - -static void object_property_del_all(struct uc_struct *uc, Object *obj) -{ - while (!QTAILQ_EMPTY(&obj->properties)) { - ObjectProperty *prop = QTAILQ_FIRST(&obj->properties); - - QTAILQ_REMOVE(&obj->properties, prop, node); - - if (prop->release) { - prop->release(uc, obj, prop->name, prop->opaque); - } - - g_free(prop->name); - g_free(prop->type); - g_free(prop->description); - g_free(prop); - } -} - -void object_property_del_child(struct uc_struct *uc, Object *obj, Object *child, Error **errp) -{ - ObjectProperty *prop; - - QTAILQ_FOREACH(prop, &obj->properties, node) { - if (object_property_is_child(prop) && prop->opaque == child) { - object_property_del(uc, obj, prop->name, errp); - break; - } - } -} - -void object_unparent(struct uc_struct *uc, Object *obj) -{ - if (obj->parent) { - object_property_del_child(uc, obj->parent, obj, NULL); - } -} - -static void object_deinit(struct uc_struct *uc, Object *obj, TypeImpl *type) -{ - if (type->instance_finalize) { - type->instance_finalize(uc, obj, type->instance_userdata); - } - - if (type_has_parent(type)) { - object_deinit(uc, obj, type_get_parent(uc, type)); - } -} - -static void object_finalize(struct uc_struct *uc, void *data) -{ - Object *obj = data; - TypeImpl *ti = obj->class_->type; - - object_property_del_all(uc, obj); - object_deinit(uc, obj, ti); - - g_assert(obj->ref == 0); - if (obj->free) { - obj->free(obj); - } -} - -static Object *object_new_with_type(struct uc_struct *uc, Type type) -{ - Object *obj; - - g_assert(type != NULL); - type_initialize(uc, type); - - obj = g_malloc(type->instance_size); - object_initialize_with_type(uc, obj, type->instance_size, type); - obj->free = g_free; - - return obj; -} - -Object *object_new(struct uc_struct *uc, const char *typename) -{ - TypeImpl *ti = type_get_by_name(uc, typename); - - return object_new_with_type(uc, ti); -} - -Object *object_dynamic_cast(struct uc_struct *uc, Object *obj, const char *typename) -{ - if (obj && object_class_dynamic_cast(uc, object_get_class(obj), typename)) { - return obj; - } - - return NULL; -} - -Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char *typename, - const char *file, int line, const char *func) -{ -#ifdef CONFIG_QOM_CAST_DEBUG - int i; - Object *inst; - - for (i = 0; obj && i < OBJECT_CLASS_CAST_CACHE; i++) { - if (obj->class->object_cast_cache[i] == typename) { - goto out; - } - } - - inst = object_dynamic_cast(uc, obj, typename); - - if (!inst && obj) { - fprintf(stderr, "%s:%d:%s: Object %p is not an instance of type %s\n", - file, line, func, obj, typename); - abort(); - } - - assert(obj == inst); - - if (obj && obj == inst) { - for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { - obj->class->object_cast_cache[i - 1] = - obj->class->object_cast_cache[i]; - } - obj->class->object_cast_cache[i - 1] = typename; - } - -out: -#endif - return obj; -} - -ObjectClass *object_class_dynamic_cast(struct uc_struct *uc, ObjectClass *class, - const char *typename) -{ - ObjectClass *ret = NULL; - TypeImpl *target_type; - TypeImpl *type; - - if (!class) { - return NULL; - } - - /* A simple fast path that can trigger a lot for leaf classes. */ - type = class->type; - if (type->name == typename) { - return class; - } - - target_type = type_get_by_name(uc, typename); - if (!target_type) { - /* target class type unknown, so fail the cast */ - return NULL; - } - - if (type->class->interfaces && - type_is_ancestor(uc, target_type, uc->type_interface)) { - int found = 0; - GSList *i; - - for (i = class->interfaces; i; i = i->next) { - ObjectClass *target_class = i->data; - - if (type_is_ancestor(uc, target_class->type, target_type)) { - ret = target_class; - found++; - } - } - - /* The match was ambiguous, don't allow a cast */ - if (found > 1) { - ret = NULL; - } - } else if (type_is_ancestor(uc, type, target_type)) { - ret = class; - } - - return ret; -} - -ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass *class, - const char *typename, - const char *file, int line, - const char *func) -{ - ObjectClass *ret; - -#ifdef CONFIG_QOM_CAST_DEBUG - int i; - - for (i = 0; class && i < OBJECT_CLASS_CAST_CACHE; i++) { - if (class->class_cast_cache[i] == typename) { - ret = class; - goto out; - } - } -#else - if (!class || !class->interfaces) { - return class; - } -#endif - - ret = object_class_dynamic_cast(uc, class, typename); - if (!ret && class) { - fprintf(stderr, "%s:%d:%s: Object %p is not an instance of type %s\n", - file, line, func, class, typename); - abort(); - } - -#ifdef CONFIG_QOM_CAST_DEBUG - if (class && ret == class) { - for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { - class->class_cast_cache[i - 1] = class->class_cast_cache[i]; - } - class->class_cast_cache[i - 1] = typename; - } -out: -#endif - return ret; -} - -const char *object_get_typename(Object *obj) -{ - return obj->class_->type->name; -} - -ObjectClass *object_get_class(Object *obj) -{ - return obj->class_; -} - -bool object_class_is_abstract(ObjectClass *klass) -{ - return klass->type->abstract; -} - -const char *object_class_get_name(ObjectClass *klass) -{ - return klass->type->name; -} - -ObjectClass *object_class_by_name(struct uc_struct *uc, const char *typename) -{ - TypeImpl *type = type_get_by_name(uc, typename); - - if (!type) { - return NULL; - } - - type_initialize(uc, type); - - return type->class; -} - -ObjectClass *object_class_get_parent(struct uc_struct *uc, ObjectClass *class) -{ - TypeImpl *type = type_get_parent(uc, class->type); - - if (!type) { - return NULL; - } - - type_initialize(uc, type); - - return type->class; -} - -typedef struct OCFData -{ - void (*fn)(ObjectClass *klass, void *opaque); - const char *implements_type; - bool include_abstract; - void *opaque; - struct uc_struct *uc; -} OCFData; - -static void object_class_foreach_tramp(gpointer key, gpointer value, - gpointer opaque) -{ - OCFData *data = opaque; - TypeImpl *type = value; - ObjectClass *k; - - type_initialize(data->uc, type); - k = type->class; - - if (!data->include_abstract && type->abstract) { - return; - } - - if (data->implements_type && - !object_class_dynamic_cast(data->uc, k, data->implements_type)) { - return; - } - - data->fn(k, data->opaque); -} - -void object_class_foreach(struct uc_struct *uc, void (*fn)(ObjectClass *klass, void *opaque), - const char *implements_type, bool include_abstract, - void *opaque) -{ - OCFData data = { fn, implements_type, include_abstract, opaque, uc }; - - uc->enumerating_types = true; - g_hash_table_foreach(type_table_get(uc), object_class_foreach_tramp, &data); - uc->enumerating_types = false; -} - -int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque), - void *opaque) -{ - ObjectProperty *prop, *next; - int ret = 0; - - QTAILQ_FOREACH_SAFE(prop, &obj->properties, node, next) { - if (object_property_is_child(prop)) { - ret = fn(prop->opaque, opaque); - if (ret != 0) { - break; - } - } - } - return ret; -} - -static void object_class_get_list_tramp(ObjectClass *klass, void *opaque) -{ - GSList **list = opaque; - - *list = g_slist_prepend(*list, klass); -} - -GSList *object_class_get_list(struct uc_struct *uc, const char *implements_type, - bool include_abstract) -{ - GSList *list = NULL; - - object_class_foreach(uc, object_class_get_list_tramp, - implements_type, include_abstract, &list); - return list; -} - -void object_ref(Object *obj) -{ - if (!obj) { - return; - } - atomic_inc(&obj->ref); -} - -void object_unref(struct uc_struct *uc, Object *obj) -{ - if (!obj) { - return; - } - g_assert(obj->ref > 0); - - /* parent always holds a reference to its children */ - if (atomic_fetch_dec(&obj->ref) == 1) { - object_finalize(uc, obj); - } -} - -ObjectProperty * -object_property_add(Object *obj, const char *name, const char *type, - ObjectPropertyAccessor *get, - ObjectPropertySetAccessor *set, - ObjectPropertyRelease *release, - void *opaque, Error **errp) -{ - ObjectProperty *prop; - size_t name_len = strlen(name); - - if (name_len >= 3 && !memcmp(name + name_len - 3, "[*]", 4)) { - int i; - ObjectProperty *ret; - char *name_no_array = g_strdup(name); - - name_no_array[name_len - 3] = '\0'; - for (i = 0; ; ++i) { - char *full_name = g_strdup_printf("%s[%d]", name_no_array, i); - - ret = object_property_add(obj, full_name, type, get, set, - release, opaque, NULL); - g_free(full_name); - if (ret) { - break; - } - } - g_free(name_no_array); - return ret; - } - - QTAILQ_FOREACH(prop, &obj->properties, node) { - if (strcmp(prop->name, name) == 0) { - error_setg(errp, "attempt to add duplicate property '%s'" - " to object (type '%s')", name, - object_get_typename(obj)); - return NULL; - } - } - - prop = g_malloc0(sizeof(*prop)); - - prop->name = g_strdup(name); - prop->type = g_strdup(type); - - prop->get = get; - prop->set = set; - prop->release = release; - prop->opaque = opaque; - - QTAILQ_INSERT_TAIL(&obj->properties, prop, node); - return prop; -} - -ObjectProperty *object_property_find(Object *obj, const char *name, - Error **errp) -{ - ObjectProperty *prop; - - QTAILQ_FOREACH(prop, &obj->properties, node) { - if (strcmp(prop->name, name) == 0) { - return prop; - } - } - - error_setg(errp, "Property '.%s' not found", name); - return NULL; -} - -void object_property_del(struct uc_struct *uc, Object *obj, const char *name, Error **errp) -{ - ObjectProperty *prop = object_property_find(obj, name, errp); - if (prop == NULL) { - return; - } - - if (prop->release) { - prop->release(uc, obj, name, prop->opaque); - } - - QTAILQ_REMOVE(&obj->properties, prop, node); - - g_free(prop->name); - g_free(prop->type); - g_free(prop->description); - g_free(prop); -} - -void object_property_get(struct uc_struct *uc, Object *obj, Visitor *v, const char *name, - Error **errp) -{ - ObjectProperty *prop = object_property_find(obj, name, errp); - if (prop == NULL) { - return; - } - - if (!prop->get) { - error_set(errp, QERR_PERMISSION_DENIED); - } else { - prop->get(uc, obj, v, prop->opaque, name, errp); - } -} - -void object_property_set(struct uc_struct *uc, Object *obj, Visitor *v, const char *name, - Error **errp) -{ - ObjectProperty *prop = object_property_find(obj, name, errp); - if (prop == NULL) { - return; - } - - if (!prop->set) { - error_set(errp, QERR_PERMISSION_DENIED); - } else { - if (prop->set(uc, obj, v, prop->opaque, name, errp)) - error_set(errp, QERR_UNDEFINED_ERROR); - } -} - -void object_property_set_str(struct uc_struct *uc, Object *obj, const char *value, - const char *name, Error **errp) -{ - QString *qstr = qstring_from_str(value); - object_property_set_qobject(uc, obj, QOBJECT(qstr), name, errp); - - QDECREF(qstr); -} - -char *object_property_get_str(struct uc_struct *uc, Object *obj, const char *name, - Error **errp) -{ - QObject *ret = object_property_get_qobject(uc, obj, name, errp); - QString *qstring; - char *retval; - - if (!ret) { - return NULL; - } - qstring = qobject_to_qstring(ret); - if (!qstring) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, "string"); - retval = NULL; - } else { - retval = g_strdup(qstring_get_str(qstring)); - } - - QDECREF(qstring); - return retval; -} - -void object_property_set_link(struct uc_struct *uc, Object *obj, Object *value, - const char *name, Error **errp) -{ - if (value) { - gchar *path = object_get_canonical_path(value); - object_property_set_str(uc, obj, path, name, errp); - g_free(path); - } else { - object_property_set_str(uc, obj, "", name, errp); - } -} - -Object *object_property_get_link(struct uc_struct *uc, Object *obj, const char *name, - Error **errp) -{ - char *str = object_property_get_str(uc, obj, name, errp); - Object *target = NULL; - - if (str && *str) { - target = object_resolve_path(uc, str, NULL); - if (!target) { - error_set(errp, QERR_DEVICE_NOT_FOUND, str); - } - } - - g_free(str); - return target; -} - -void object_property_set_bool(struct uc_struct *uc, Object *obj, bool value, - const char *name, Error **errp) -{ - QBool *qbool = qbool_from_int(value); - object_property_set_qobject(uc, obj, QOBJECT(qbool), name, errp); - - QDECREF(qbool); -} - -bool object_property_get_bool(struct uc_struct *uc, Object *obj, const char *name, - Error **errp) -{ - QObject *ret = object_property_get_qobject(uc, obj, name, errp); - QBool *qbool; - bool retval; - - if (!ret) { - return false; - } - qbool = qobject_to_qbool(ret); - if (!qbool) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, "boolean"); - retval = false; - } else { - retval = qbool_get_int(qbool); - } - - QDECREF(qbool); - return retval; -} - -void object_property_set_int(struct uc_struct *uc, Object *obj, int64_t value, - const char *name, Error **errp) -{ - QInt *qint = qint_from_int(value); - object_property_set_qobject(uc, obj, QOBJECT(qint), name, errp); - - QDECREF(qint); -} - -int64_t object_property_get_int(struct uc_struct *uc, Object *obj, const char *name, - Error **errp) -{ - QObject *ret = object_property_get_qobject(uc, obj, name, errp); - QInt *qint; - int64_t retval; - - if (!ret) { - return -1; - } - qint = qobject_to_qint(ret); - if (!qint) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, "int"); - retval = -1; - } else { - retval = qint_get_int(qint); - } - - QDECREF(qint); - return retval; -} - -void object_property_parse(struct uc_struct *uc, Object *obj, const char *string, - const char *name, Error **errp) -{ - StringInputVisitor *mi; - mi = string_input_visitor_new(string); - object_property_set(uc, obj, string_input_get_visitor(mi), name, errp); - - string_input_visitor_cleanup(mi); -} - -const char *object_property_get_type(Object *obj, const char *name, Error **errp) -{ - ObjectProperty *prop = object_property_find(obj, name, errp); - if (prop == NULL) { - return NULL; - } - - return prop->type; -} - -Object *object_get_root(struct uc_struct *uc) -{ - if (!uc->root) { - uc->root = object_new(uc, "container"); - } - - return uc->root; -} - -static void object_get_child_property(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - Object *child = opaque; - gchar *path; - - path = object_get_canonical_path(child); - visit_type_str(v, &path, name, errp); - g_free(path); -} - -static Object *object_resolve_child_property(struct uc_struct *uc, Object *parent, void *opaque, const gchar *part) -{ - return opaque; -} - -static void object_finalize_child_property(struct uc_struct *uc, Object *obj, const char *name, - void *opaque) -{ - Object *child = opaque; - - if (child->class_->unparent) { - (child->class_->unparent)(uc, child); - } - child->parent = NULL; - object_unref(uc, child); -} - -void object_property_add_child(Object *obj, const char *name, - Object *child, Error **errp) -{ - Error *local_err = NULL; - gchar *type; - ObjectProperty *op; - - if (child->parent != NULL) { - error_setg(errp, "child object is already parented"); - return; - } - - type = g_strdup_printf("child<%s>", object_get_typename(OBJECT(child))); - - op = object_property_add(obj, name, type, object_get_child_property, NULL, - object_finalize_child_property, child, &local_err); - if (local_err) { - error_propagate(errp, local_err); - goto out; - } - - op->resolve = object_resolve_child_property; - object_ref(child); - child->parent = obj; - -out: - g_free(type); -} - -void object_property_allow_set_link(Object *obj, const char *name, - Object *val, Error **errp) -{ - /* Allow the link to be set, always */ -} - -typedef struct { - Object **child; - void (*check)(Object *, const char *, Object *, Error **); - ObjectPropertyLinkFlags flags; -} LinkProperty; - -static void object_get_link_property(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - LinkProperty *lprop = opaque; - Object **child = lprop->child; - gchar *path; - - if (*child) { - path = object_get_canonical_path(*child); - visit_type_str(v, &path, name, errp); - g_free(path); - } else { - path = (gchar *)""; - visit_type_str(v, &path, name, errp); - } -} - -/* - * object_resolve_link: - * - * Lookup an object and ensure its type matches the link property type. This - * is similar to object_resolve_path() except type verification against the - * link property is performed. - * - * Returns: The matched object or NULL on path lookup failures. - */ -static Object *object_resolve_link(struct uc_struct *uc, Object *obj, const char *name, - const char *path, Error **errp) -{ - const char *type; - gchar *target_type; - bool ambiguous = false; - Object *target; - - /* Go from link to FOO. */ - type = object_property_get_type(obj, name, NULL); - target_type = g_strndup(&type[5], strlen(type) - 6); - target = object_resolve_path_type(uc, path, target_type, &ambiguous); - - if (ambiguous) { - error_set(errp, ERROR_CLASS_GENERIC_ERROR, - "Path '%s' does not uniquely identify an object", path); - } else if (!target) { - target = object_resolve_path(uc, path, &ambiguous); - if (target || ambiguous) { - error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, target_type); - } else { - error_set(errp, QERR_DEVICE_NOT_FOUND, path); - } - target = NULL; - } - g_free(target_type); - - return target; -} - -static int object_set_link_property(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - Error *local_err = NULL; - LinkProperty *prop = opaque; - Object **child = prop->child; - Object *old_target = *child; - Object *new_target = NULL; - char *path = NULL; - - visit_type_str(v, &path, name, &local_err); - - if (!local_err && strcmp(path, "") != 0) { - new_target = object_resolve_link(uc, obj, name, path, &local_err); - } - - g_free(path); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - - prop->check(obj, name, new_target, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - - object_ref(new_target); - *child = new_target; - object_unref(uc, old_target); - - return 0; -} - -static Object *object_resolve_link_property(struct uc_struct *uc, Object *parent, void *opaque, const gchar *part) -{ - LinkProperty *lprop = opaque; - - return *lprop->child; -} - -static void object_release_link_property(struct uc_struct *uc, Object *obj, const char *name, - void *opaque) -{ - LinkProperty *prop = opaque; - - if ((prop->flags & OBJ_PROP_LINK_UNREF_ON_RELEASE) && *prop->child) { - object_unref(uc, *prop->child); - } - g_free(prop); -} - -void object_property_add_link(Object *obj, const char *name, - const char *type, Object **child, - void (*check)(Object *, const char *, - Object *, Error **), - ObjectPropertyLinkFlags flags, - Error **errp) -{ - Error *local_err = NULL; - LinkProperty *prop = g_malloc(sizeof(*prop)); - gchar *full_type; - ObjectProperty *op; - - prop->child = child; - prop->check = check; - prop->flags = flags; - - full_type = g_strdup_printf("link<%s>", type); - - op = object_property_add(obj, name, full_type, - object_get_link_property, - check ? object_set_link_property : NULL, - object_release_link_property, - prop, - &local_err); - if (local_err) { - error_propagate(errp, local_err); - g_free(prop); - goto out; - } - - op->resolve = object_resolve_link_property; - -out: - g_free(full_type); -} - -gchar *object_get_canonical_path_component(Object *obj) -{ - ObjectProperty *prop = NULL; - - g_assert(obj); - g_assert(obj->parent != NULL); - - QTAILQ_FOREACH(prop, &obj->parent->properties, node) { - if (!object_property_is_child(prop)) { - continue; - } - - if (prop->opaque == obj) { - return g_strdup(prop->name); - } - } - - /* obj had a parent but was not a child, should never happen */ - g_assert_not_reached(); - return NULL; -} - -gchar *object_get_canonical_path(Object *obj) -{ - Object *root = object_get_root(NULL); - char *newpath, *path = NULL; - - while (obj != root) { - char *component = object_get_canonical_path_component(obj); - - if (path) { - newpath = g_strdup_printf("%s/%s", component, path); - g_free(component); - g_free(path); - path = newpath; - } else { - path = component; - } - - obj = obj->parent; - } - - newpath = g_strdup_printf("/%s", path ? path : ""); - g_free(path); - - return newpath; -} - -Object *object_resolve_path_component(struct uc_struct *uc, Object *parent, const gchar *part) -{ - ObjectProperty *prop = object_property_find(parent, part, NULL); - if (prop == NULL) { - return NULL; - } - - if (prop->resolve) { - return prop->resolve(uc, parent, prop->opaque, part); - } else { - return NULL; - } -} - -static Object *object_resolve_abs_path(struct uc_struct *uc, Object *parent, - gchar **parts, - const char *typename, - int index) -{ - Object *child; - - if (parts[index] == NULL) { - return object_dynamic_cast(uc, parent, typename); - } - - if (strcmp(parts[index], "") == 0) { - return object_resolve_abs_path(uc, parent, parts, typename, index + 1); - } - - child = object_resolve_path_component(uc, parent, parts[index]); - if (!child) { - return NULL; - } - - return object_resolve_abs_path(uc, child, parts, typename, index + 1); -} - -static Object *object_resolve_partial_path(struct uc_struct *uc, Object *parent, - gchar **parts, - const char *typename, - bool *ambiguous) -{ - Object *obj; - ObjectProperty *prop; - - obj = object_resolve_abs_path(uc, parent, parts, typename, 0); - - QTAILQ_FOREACH(prop, &parent->properties, node) { - Object *found; - - if (!object_property_is_child(prop)) { - continue; - } - - found = object_resolve_partial_path(uc, prop->opaque, parts, - typename, ambiguous); - if (found) { - if (obj) { - if (ambiguous) { - *ambiguous = true; - } - return NULL; - } - obj = found; - } - - if (ambiguous && *ambiguous) { - return NULL; - } - } - - return obj; -} - -Object *object_resolve_path_type(struct uc_struct *uc, const char *path, const char *typename, - bool *ambiguous) -{ - Object *obj; - gchar **parts; - - parts = g_strsplit(path, "/", 0); - assert(parts); - - if (parts[0] == NULL || strcmp(parts[0], "") != 0) { - if (ambiguous) { - *ambiguous = false; - } - obj = object_resolve_partial_path(uc, object_get_root(NULL), parts, - typename, ambiguous); - } else { - obj = object_resolve_abs_path(uc, object_get_root(NULL), parts, typename, 1); - } - - g_strfreev(parts); - - return obj; -} - -Object *object_resolve_path(struct uc_struct *uc, const char *path, bool *ambiguous) -{ - return object_resolve_path_type(uc, path, TYPE_OBJECT, ambiguous); -} - -typedef struct StringProperty -{ - char *(*get)(struct uc_struct *uc, Object *, Error **); - int (*set)(struct uc_struct *uc, Object *, const char *, Error **); -} StringProperty; - -static void property_get_str(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - StringProperty *prop = opaque; - char *value; - - value = prop->get(uc, obj, errp); - if (value) { - visit_type_str(v, &value, name, errp); - g_free(value); - } -} - -static int property_set_str(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - StringProperty *prop = opaque; - char *value; - Error *local_err = NULL; - - visit_type_str(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - - prop->set(uc, obj, value, errp); - g_free(value); - - return 0; -} - -static void property_release_str(struct uc_struct *uc, Object *obj, const char *name, - void *opaque) -{ - StringProperty *prop = opaque; - g_free(prop); -} - -void object_property_add_str(Object *obj, const char *name, - char *(*get)(struct uc_struct *uc, Object *, Error **), - int (*set)(struct uc_struct *uc, Object *, const char *, Error **), - Error **errp) -{ - Error *local_err = NULL; - StringProperty *prop = g_malloc0(sizeof(*prop)); - - prop->get = get; - prop->set = set; - - object_property_add(obj, name, "string", - get ? property_get_str : NULL, - set ? property_set_str : NULL, - property_release_str, - prop, &local_err); - if (local_err) { - error_propagate(errp, local_err); - g_free(prop); - } -} - -typedef struct BoolProperty -{ - bool (*get)(struct uc_struct *uc, Object *, Error **); - int (*set)(struct uc_struct *uc, Object *, bool, Error **); -} BoolProperty; - -static void property_get_bool(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - BoolProperty *prop = opaque; - bool value; - - value = prop->get(uc, obj, errp); - visit_type_bool(v, &value, name, errp); -} - -static int property_set_bool(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - BoolProperty *prop = opaque; - bool value; - Error *local_err = NULL; - - visit_type_bool(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - - return prop->set(uc, obj, value, errp); -} - -static void property_release_bool(struct uc_struct *uc, Object *obj, const char *name, - void *opaque) -{ - BoolProperty *prop = opaque; - g_free(prop); -} - -void object_property_add_bool(struct uc_struct *uc, Object *obj, const char *name, - bool (*get)(struct uc_struct *uc, Object *, Error **), - int (*set)(struct uc_struct *uc, Object *, bool, Error **), - Error **errp) -{ - Error *local_err = NULL; - BoolProperty *prop = g_malloc0(sizeof(*prop)); - - prop->get = get; - prop->set = set; - - object_property_add(obj, name, "bool", - get ? property_get_bool : NULL, - set ? property_set_bool : NULL, - property_release_bool, - prop, &local_err); - if (local_err) { - error_propagate(errp, local_err); - g_free(prop); - } -} - -static char *qdev_get_type(struct uc_struct *uc, Object *obj, Error **errp) -{ - return g_strdup(object_get_typename(obj)); -} - -static void property_get_uint8_ptr(struct uc_struct *uc, Object *obj, Visitor *v, - void *opaque, const char *name, - Error **errp) -{ - uint8_t value = *(uint8_t *)opaque; - visit_type_uint8(v, &value, name, errp); -} - -static void property_get_uint16_ptr(struct uc_struct *uc, Object *obj, Visitor *v, - void *opaque, const char *name, - Error **errp) -{ - uint16_t value = *(uint16_t *)opaque; - visit_type_uint16(v, &value, name, errp); -} - -static void property_get_uint32_ptr(struct uc_struct *uc, Object *obj, Visitor *v, - void *opaque, const char *name, - Error **errp) -{ - uint32_t value = *(uint32_t *)opaque; - visit_type_uint32(v, &value, name, errp); -} - -static void property_get_uint64_ptr(struct uc_struct *uc, Object *obj, Visitor *v, - void *opaque, const char *name, - Error **errp) -{ - uint64_t value = *(uint64_t *)opaque; - visit_type_uint64(v, &value, name, errp); -} - -void object_property_add_uint8_ptr(Object *obj, const char *name, - const uint8_t *v, Error **errp) -{ - object_property_add(obj, name, "uint8", property_get_uint8_ptr, - NULL, NULL, (void *)v, errp); -} - -void object_property_add_uint16_ptr(Object *obj, const char *name, - const uint16_t *v, Error **errp) -{ - object_property_add(obj, name, "uint16", property_get_uint16_ptr, - NULL, NULL, (void *)v, errp); -} - -void object_property_add_uint32_ptr(Object *obj, const char *name, - const uint32_t *v, Error **errp) -{ - object_property_add(obj, name, "uint32", property_get_uint32_ptr, - NULL, NULL, (void *)v, errp); -} - -void object_property_add_uint64_ptr(Object *obj, const char *name, - const uint64_t *v, Error **errp) -{ - object_property_add(obj, name, "uint64", property_get_uint64_ptr, - NULL, NULL, (void *)v, errp); -} - -typedef struct { - Object *target_obj; - const char *target_name; -} AliasProperty; - -static void property_get_alias(struct uc_struct *uc, Object *obj, struct Visitor *v, void *opaque, - const char *name, Error **errp) -{ - AliasProperty *prop = opaque; - - object_property_get(uc, prop->target_obj, v, prop->target_name, errp); -} - -static int property_set_alias(struct uc_struct *uc, Object *obj, struct Visitor *v, void *opaque, - const char *name, Error **errp) -{ - AliasProperty *prop = opaque; - - object_property_set(uc, prop->target_obj, v, prop->target_name, errp); - - return 0; -} - -static Object *property_resolve_alias(struct uc_struct *uc, Object *obj, void *opaque, - const gchar *part) -{ - AliasProperty *prop = opaque; - - return object_resolve_path_component(uc, prop->target_obj, prop->target_name); -} - -static void property_release_alias(struct uc_struct *uc, Object *obj, const char *name, void *opaque) -{ - AliasProperty *prop = opaque; - - g_free(prop); -} - -void object_property_add_alias(Object *obj, const char *name, - Object *target_obj, const char *target_name, - Error **errp) -{ - AliasProperty *prop; - ObjectProperty *op; - ObjectProperty *target_prop; - gchar *prop_type; - Error *local_err = NULL; - - target_prop = object_property_find(target_obj, target_name, errp); - if (!target_prop) { - return; - } - - if (object_property_is_child(target_prop)) { - prop_type = g_strdup_printf("link%s", - target_prop->type + strlen("child")); - } else { - prop_type = g_strdup(target_prop->type); - } - - prop = g_malloc(sizeof(*prop)); - prop->target_obj = target_obj; - prop->target_name = target_name; - - op = object_property_add(obj, name, prop_type, - property_get_alias, - property_set_alias, - property_release_alias, - prop, &local_err); - if (local_err) { - error_propagate(errp, local_err); - g_free(prop); - goto out; - } - op->resolve = property_resolve_alias; - - object_property_set_description(obj, name, - target_prop->description, - &error_abort); - -out: - g_free(prop_type); -} - -void object_property_set_description(Object *obj, const char *name, - const char *description, Error **errp) -{ - ObjectProperty *op; - - op = object_property_find(obj, name, errp); - if (!op) { - return; - } - - g_free(op->description); - op->description = g_strdup(description); -} - -static void object_instance_init(struct uc_struct *uc, Object *obj, void *opaque) -{ - object_property_add_str(obj, "type", qdev_get_type, NULL, NULL); -} - -void register_types_object(struct uc_struct *uc) -{ - static TypeInfo interface_info = { - TYPE_INTERFACE, // name - NULL, - - sizeof(InterfaceClass), // class_size - 0, - NULL, - - NULL, - NULL, - NULL, - - NULL, - - NULL, - NULL, - NULL, - - true, // abstract - }; - - static TypeInfo object_info = { - TYPE_OBJECT, - NULL, - - 0, - sizeof(Object), - NULL, - - object_instance_init, - NULL, - NULL, - - NULL, - - NULL, - NULL, - NULL, - - true, - }; - - uc->type_interface = type_register_internal(uc, &interface_info); - type_register_internal(uc, &object_info); -} diff --git a/qemu/qom/qom-qobject.c b/qemu/qom/qom-qobject.c deleted file mode 100644 index f1579ff2..00000000 --- a/qemu/qom/qom-qobject.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * QEMU Object Model - QObject wrappers - * - * Copyright (C) 2012 Red Hat, Inc. - * - * Author: Paolo Bonzini - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qemu-common.h" -#include "qom/object.h" -#include "qom/qom-qobject.h" -#include "qapi/visitor.h" -#include "qapi/qmp-input-visitor.h" -#include "qapi/qmp-output-visitor.h" - -void object_property_set_qobject(struct uc_struct *uc, Object *obj, QObject *value, - const char *name, Error **errp) -{ - QmpInputVisitor *mi; - mi = qmp_input_visitor_new(value); - object_property_set(uc, obj, qmp_input_get_visitor(mi), name, errp); // qq - - qmp_input_visitor_cleanup(mi); -} - -QObject *object_property_get_qobject(struct uc_struct *uc, Object *obj, const char *name, - Error **errp) -{ - QObject *ret = NULL; - Error *local_err = NULL; - QmpOutputVisitor *mo; - - mo = qmp_output_visitor_new(); - object_property_get(uc, obj, qmp_output_get_visitor(mo), name, &local_err); - if (!local_err) { - ret = qmp_output_get_qobject(mo); - } - error_propagate(errp, local_err); - qmp_output_visitor_cleanup(mo); - return ret; -} diff --git a/qemu/riscv32.h b/qemu/riscv32.h new file mode 100644 index 00000000..dceb3ead --- /dev/null +++ b/qemu/riscv32.h @@ -0,0 +1,1373 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_riscv32_H +#define UNICORN_AUTOGEN_riscv32_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _riscv32 +#endif +#define arm_arch arm_arch_riscv32 +#define tb_target_set_jmp_target tb_target_set_jmp_target_riscv32 +#define have_bmi1 have_bmi1_riscv32 +#define have_popcnt have_popcnt_riscv32 +#define have_avx1 have_avx1_riscv32 +#define have_avx2 have_avx2_riscv32 +#define have_isa have_isa_riscv32 +#define have_altivec have_altivec_riscv32 +#define have_vsx have_vsx_riscv32 +#define flush_icache_range flush_icache_range_riscv32 +#define s390_facilities s390_facilities_riscv32 +#define tcg_dump_op tcg_dump_op_riscv32 +#define tcg_dump_ops tcg_dump_ops_riscv32 +#define tcg_gen_and_i64 tcg_gen_and_i64_riscv32 +#define tcg_gen_discard_i64 tcg_gen_discard_i64_riscv32 +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_riscv32 +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_riscv32 +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_riscv32 +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_riscv32 +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_riscv32 +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_riscv32 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_riscv32 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_riscv32 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_riscv32 +#define tcg_gen_mul_i64 tcg_gen_mul_i64_riscv32 +#define tcg_gen_or_i64 tcg_gen_or_i64_riscv32 +#define tcg_gen_sar_i64 tcg_gen_sar_i64_riscv32 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_riscv32 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_riscv32 +#define tcg_gen_st_i64 tcg_gen_st_i64_riscv32 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_riscv32 +#define cpu_icount_to_ns cpu_icount_to_ns_riscv32 +#define cpu_is_stopped cpu_is_stopped_riscv32 +#define cpu_get_ticks cpu_get_ticks_riscv32 +#define cpu_get_clock cpu_get_clock_riscv32 +#define cpu_resume cpu_resume_riscv32 +#define qemu_init_vcpu qemu_init_vcpu_riscv32 +#define cpu_stop_current cpu_stop_current_riscv32 +#define resume_all_vcpus resume_all_vcpus_riscv32 +#define vm_start vm_start_riscv32 +#define address_space_dispatch_compact address_space_dispatch_compact_riscv32 +#define flatview_translate flatview_translate_riscv32 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_riscv32 +#define qemu_get_cpu qemu_get_cpu_riscv32 +#define cpu_address_space_init cpu_address_space_init_riscv32 +#define cpu_get_address_space cpu_get_address_space_riscv32 +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_riscv32 +#define cpu_exec_initfn cpu_exec_initfn_riscv32 +#define cpu_exec_realizefn cpu_exec_realizefn_riscv32 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_riscv32 +#define cpu_watchpoint_insert cpu_watchpoint_insert_riscv32 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_riscv32 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_riscv32 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_riscv32 +#define cpu_breakpoint_insert cpu_breakpoint_insert_riscv32 +#define cpu_breakpoint_remove cpu_breakpoint_remove_riscv32 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_riscv32 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_riscv32 +#define cpu_abort cpu_abort_riscv32 +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_riscv32 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_riscv32 +#define flatview_add_to_dispatch flatview_add_to_dispatch_riscv32 +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_riscv32 +#define qemu_ram_get_offset qemu_ram_get_offset_riscv32 +#define qemu_ram_get_used_length qemu_ram_get_used_length_riscv32 +#define qemu_ram_is_shared qemu_ram_is_shared_riscv32 +#define qemu_ram_pagesize qemu_ram_pagesize_riscv32 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_riscv32 +#define qemu_ram_alloc qemu_ram_alloc_riscv32 +#define qemu_ram_free qemu_ram_free_riscv32 +#define qemu_map_ram_ptr qemu_map_ram_ptr_riscv32 +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_riscv32 +#define qemu_ram_block_from_host qemu_ram_block_from_host_riscv32 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_riscv32 +#define cpu_check_watchpoint cpu_check_watchpoint_riscv32 +#define iotlb_to_section iotlb_to_section_riscv32 +#define address_space_dispatch_new address_space_dispatch_new_riscv32 +#define address_space_dispatch_free address_space_dispatch_free_riscv32 +#define flatview_read_continue flatview_read_continue_riscv32 +#define address_space_read_full address_space_read_full_riscv32 +#define address_space_write address_space_write_riscv32 +#define address_space_rw address_space_rw_riscv32 +#define cpu_physical_memory_rw cpu_physical_memory_rw_riscv32 +#define address_space_write_rom address_space_write_rom_riscv32 +#define cpu_flush_icache_range cpu_flush_icache_range_riscv32 +#define cpu_exec_init_all cpu_exec_init_all_riscv32 +#define address_space_access_valid address_space_access_valid_riscv32 +#define address_space_map address_space_map_riscv32 +#define address_space_unmap address_space_unmap_riscv32 +#define cpu_physical_memory_map cpu_physical_memory_map_riscv32 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_riscv32 +#define cpu_memory_rw_debug cpu_memory_rw_debug_riscv32 +#define qemu_target_page_size qemu_target_page_size_riscv32 +#define qemu_target_page_bits qemu_target_page_bits_riscv32 +#define qemu_target_page_bits_min qemu_target_page_bits_min_riscv32 +#define target_words_bigendian target_words_bigendian_riscv32 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_riscv32 +#define ram_block_discard_range ram_block_discard_range_riscv32 +#define ramblock_is_pmem ramblock_is_pmem_riscv32 +#define page_size_init page_size_init_riscv32 +#define set_preferred_target_page_bits set_preferred_target_page_bits_riscv32 +#define finalize_target_page_bits finalize_target_page_bits_riscv32 +#define cpu_outb cpu_outb_riscv32 +#define cpu_outw cpu_outw_riscv32 +#define cpu_outl cpu_outl_riscv32 +#define cpu_inb cpu_inb_riscv32 +#define cpu_inw cpu_inw_riscv32 +#define cpu_inl cpu_inl_riscv32 +#define memory_map memory_map_riscv32 +#define memory_map_io memory_map_io_riscv32 +#define memory_map_ptr memory_map_ptr_riscv32 +#define memory_unmap memory_unmap_riscv32 +#define memory_free memory_free_riscv32 +#define flatview_unref flatview_unref_riscv32 +#define address_space_get_flatview address_space_get_flatview_riscv32 +#define memory_region_transaction_begin memory_region_transaction_begin_riscv32 +#define memory_region_transaction_commit memory_region_transaction_commit_riscv32 +#define memory_region_init memory_region_init_riscv32 +#define memory_region_access_valid memory_region_access_valid_riscv32 +#define memory_region_dispatch_read memory_region_dispatch_read_riscv32 +#define memory_region_dispatch_write memory_region_dispatch_write_riscv32 +#define memory_region_init_io memory_region_init_io_riscv32 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_riscv32 +#define memory_region_size memory_region_size_riscv32 +#define memory_region_set_readonly memory_region_set_readonly_riscv32 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_riscv32 +#define memory_region_from_host memory_region_from_host_riscv32 +#define memory_region_get_ram_addr memory_region_get_ram_addr_riscv32 +#define memory_region_add_subregion memory_region_add_subregion_riscv32 +#define memory_region_del_subregion memory_region_del_subregion_riscv32 +#define memory_region_find memory_region_find_riscv32 +#define memory_listener_register memory_listener_register_riscv32 +#define memory_listener_unregister memory_listener_unregister_riscv32 +#define address_space_remove_listeners address_space_remove_listeners_riscv32 +#define address_space_init address_space_init_riscv32 +#define address_space_destroy address_space_destroy_riscv32 +#define memory_region_init_ram memory_region_init_ram_riscv32 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_riscv32 +#define exec_inline_op exec_inline_op_riscv32 +#define floatx80_default_nan floatx80_default_nan_riscv32 +#define float_raise float_raise_riscv32 +#define float16_is_quiet_nan float16_is_quiet_nan_riscv32 +#define float16_is_signaling_nan float16_is_signaling_nan_riscv32 +#define float32_is_quiet_nan float32_is_quiet_nan_riscv32 +#define float32_is_signaling_nan float32_is_signaling_nan_riscv32 +#define float64_is_quiet_nan float64_is_quiet_nan_riscv32 +#define float64_is_signaling_nan float64_is_signaling_nan_riscv32 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_riscv32 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_riscv32 +#define floatx80_silence_nan floatx80_silence_nan_riscv32 +#define propagateFloatx80NaN propagateFloatx80NaN_riscv32 +#define float128_is_quiet_nan float128_is_quiet_nan_riscv32 +#define float128_is_signaling_nan float128_is_signaling_nan_riscv32 +#define float128_silence_nan float128_silence_nan_riscv32 +#define float16_add float16_add_riscv32 +#define float16_sub float16_sub_riscv32 +#define float32_add float32_add_riscv32 +#define float32_sub float32_sub_riscv32 +#define float64_add float64_add_riscv32 +#define float64_sub float64_sub_riscv32 +#define float16_mul float16_mul_riscv32 +#define float32_mul float32_mul_riscv32 +#define float64_mul float64_mul_riscv32 +#define float16_muladd float16_muladd_riscv32 +#define float32_muladd float32_muladd_riscv32 +#define float64_muladd float64_muladd_riscv32 +#define float16_div float16_div_riscv32 +#define float32_div float32_div_riscv32 +#define float64_div float64_div_riscv32 +#define float16_to_float32 float16_to_float32_riscv32 +#define float16_to_float64 float16_to_float64_riscv32 +#define float32_to_float16 float32_to_float16_riscv32 +#define float32_to_float64 float32_to_float64_riscv32 +#define float64_to_float16 float64_to_float16_riscv32 +#define float64_to_float32 float64_to_float32_riscv32 +#define float16_round_to_int float16_round_to_int_riscv32 +#define float32_round_to_int float32_round_to_int_riscv32 +#define float64_round_to_int float64_round_to_int_riscv32 +#define float16_to_int16_scalbn float16_to_int16_scalbn_riscv32 +#define float16_to_int32_scalbn float16_to_int32_scalbn_riscv32 +#define float16_to_int64_scalbn float16_to_int64_scalbn_riscv32 +#define float32_to_int16_scalbn float32_to_int16_scalbn_riscv32 +#define float32_to_int32_scalbn float32_to_int32_scalbn_riscv32 +#define float32_to_int64_scalbn float32_to_int64_scalbn_riscv32 +#define float64_to_int16_scalbn float64_to_int16_scalbn_riscv32 +#define float64_to_int32_scalbn float64_to_int32_scalbn_riscv32 +#define float64_to_int64_scalbn float64_to_int64_scalbn_riscv32 +#define float16_to_int16 float16_to_int16_riscv32 +#define float16_to_int32 float16_to_int32_riscv32 +#define float16_to_int64 float16_to_int64_riscv32 +#define float32_to_int16 float32_to_int16_riscv32 +#define float32_to_int32 float32_to_int32_riscv32 +#define float32_to_int64 float32_to_int64_riscv32 +#define float64_to_int16 float64_to_int16_riscv32 +#define float64_to_int32 float64_to_int32_riscv32 +#define float64_to_int64 float64_to_int64_riscv32 +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_riscv32 +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_riscv32 +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_riscv32 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_riscv32 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_riscv32 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_riscv32 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_riscv32 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_riscv32 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_riscv32 +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_riscv32 +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_riscv32 +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_riscv32 +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_riscv32 +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_riscv32 +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_riscv32 +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_riscv32 +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_riscv32 +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_riscv32 +#define float16_to_uint16 float16_to_uint16_riscv32 +#define float16_to_uint32 float16_to_uint32_riscv32 +#define float16_to_uint64 float16_to_uint64_riscv32 +#define float32_to_uint16 float32_to_uint16_riscv32 +#define float32_to_uint32 float32_to_uint32_riscv32 +#define float32_to_uint64 float32_to_uint64_riscv32 +#define float64_to_uint16 float64_to_uint16_riscv32 +#define float64_to_uint32 float64_to_uint32_riscv32 +#define float64_to_uint64 float64_to_uint64_riscv32 +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_riscv32 +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_riscv32 +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_riscv32 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_riscv32 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_riscv32 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_riscv32 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_riscv32 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_riscv32 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_riscv32 +#define int64_to_float16_scalbn int64_to_float16_scalbn_riscv32 +#define int32_to_float16_scalbn int32_to_float16_scalbn_riscv32 +#define int16_to_float16_scalbn int16_to_float16_scalbn_riscv32 +#define int64_to_float16 int64_to_float16_riscv32 +#define int32_to_float16 int32_to_float16_riscv32 +#define int16_to_float16 int16_to_float16_riscv32 +#define int64_to_float32_scalbn int64_to_float32_scalbn_riscv32 +#define int32_to_float32_scalbn int32_to_float32_scalbn_riscv32 +#define int16_to_float32_scalbn int16_to_float32_scalbn_riscv32 +#define int64_to_float32 int64_to_float32_riscv32 +#define int32_to_float32 int32_to_float32_riscv32 +#define int16_to_float32 int16_to_float32_riscv32 +#define int64_to_float64_scalbn int64_to_float64_scalbn_riscv32 +#define int32_to_float64_scalbn int32_to_float64_scalbn_riscv32 +#define int16_to_float64_scalbn int16_to_float64_scalbn_riscv32 +#define int64_to_float64 int64_to_float64_riscv32 +#define int32_to_float64 int32_to_float64_riscv32 +#define int16_to_float64 int16_to_float64_riscv32 +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_riscv32 +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_riscv32 +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_riscv32 +#define uint64_to_float16 uint64_to_float16_riscv32 +#define uint32_to_float16 uint32_to_float16_riscv32 +#define uint16_to_float16 uint16_to_float16_riscv32 +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_riscv32 +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_riscv32 +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_riscv32 +#define uint64_to_float32 uint64_to_float32_riscv32 +#define uint32_to_float32 uint32_to_float32_riscv32 +#define uint16_to_float32 uint16_to_float32_riscv32 +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_riscv32 +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_riscv32 +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_riscv32 +#define uint64_to_float64 uint64_to_float64_riscv32 +#define uint32_to_float64 uint32_to_float64_riscv32 +#define uint16_to_float64 uint16_to_float64_riscv32 +#define float16_min float16_min_riscv32 +#define float16_minnum float16_minnum_riscv32 +#define float16_minnummag float16_minnummag_riscv32 +#define float16_max float16_max_riscv32 +#define float16_maxnum float16_maxnum_riscv32 +#define float16_maxnummag float16_maxnummag_riscv32 +#define float32_min float32_min_riscv32 +#define float32_minnum float32_minnum_riscv32 +#define float32_minnummag float32_minnummag_riscv32 +#define float32_max float32_max_riscv32 +#define float32_maxnum float32_maxnum_riscv32 +#define float32_maxnummag float32_maxnummag_riscv32 +#define float64_min float64_min_riscv32 +#define float64_minnum float64_minnum_riscv32 +#define float64_minnummag float64_minnummag_riscv32 +#define float64_max float64_max_riscv32 +#define float64_maxnum float64_maxnum_riscv32 +#define float64_maxnummag float64_maxnummag_riscv32 +#define float16_compare float16_compare_riscv32 +#define float16_compare_quiet float16_compare_quiet_riscv32 +#define float32_compare float32_compare_riscv32 +#define float32_compare_quiet float32_compare_quiet_riscv32 +#define float64_compare float64_compare_riscv32 +#define float64_compare_quiet float64_compare_quiet_riscv32 +#define float16_scalbn float16_scalbn_riscv32 +#define float32_scalbn float32_scalbn_riscv32 +#define float64_scalbn float64_scalbn_riscv32 +#define float16_sqrt float16_sqrt_riscv32 +#define float32_sqrt float32_sqrt_riscv32 +#define float64_sqrt float64_sqrt_riscv32 +#define float16_default_nan float16_default_nan_riscv32 +#define float32_default_nan float32_default_nan_riscv32 +#define float64_default_nan float64_default_nan_riscv32 +#define float128_default_nan float128_default_nan_riscv32 +#define float16_silence_nan float16_silence_nan_riscv32 +#define float32_silence_nan float32_silence_nan_riscv32 +#define float64_silence_nan float64_silence_nan_riscv32 +#define float16_squash_input_denormal float16_squash_input_denormal_riscv32 +#define float32_squash_input_denormal float32_squash_input_denormal_riscv32 +#define float64_squash_input_denormal float64_squash_input_denormal_riscv32 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_riscv32 +#define roundAndPackFloatx80 roundAndPackFloatx80_riscv32 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_riscv32 +#define int32_to_floatx80 int32_to_floatx80_riscv32 +#define int32_to_float128 int32_to_float128_riscv32 +#define int64_to_floatx80 int64_to_floatx80_riscv32 +#define int64_to_float128 int64_to_float128_riscv32 +#define uint64_to_float128 uint64_to_float128_riscv32 +#define float32_to_floatx80 float32_to_floatx80_riscv32 +#define float32_to_float128 float32_to_float128_riscv32 +#define float32_rem float32_rem_riscv32 +#define float32_exp2 float32_exp2_riscv32 +#define float32_log2 float32_log2_riscv32 +#define float32_eq float32_eq_riscv32 +#define float32_le float32_le_riscv32 +#define float32_lt float32_lt_riscv32 +#define float32_unordered float32_unordered_riscv32 +#define float32_eq_quiet float32_eq_quiet_riscv32 +#define float32_le_quiet float32_le_quiet_riscv32 +#define float32_lt_quiet float32_lt_quiet_riscv32 +#define float32_unordered_quiet float32_unordered_quiet_riscv32 +#define float64_to_floatx80 float64_to_floatx80_riscv32 +#define float64_to_float128 float64_to_float128_riscv32 +#define float64_rem float64_rem_riscv32 +#define float64_log2 float64_log2_riscv32 +#define float64_eq float64_eq_riscv32 +#define float64_le float64_le_riscv32 +#define float64_lt float64_lt_riscv32 +#define float64_unordered float64_unordered_riscv32 +#define float64_eq_quiet float64_eq_quiet_riscv32 +#define float64_le_quiet float64_le_quiet_riscv32 +#define float64_lt_quiet float64_lt_quiet_riscv32 +#define float64_unordered_quiet float64_unordered_quiet_riscv32 +#define floatx80_to_int32 floatx80_to_int32_riscv32 +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_riscv32 +#define floatx80_to_int64 floatx80_to_int64_riscv32 +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_riscv32 +#define floatx80_to_float32 floatx80_to_float32_riscv32 +#define floatx80_to_float64 floatx80_to_float64_riscv32 +#define floatx80_to_float128 floatx80_to_float128_riscv32 +#define floatx80_round floatx80_round_riscv32 +#define floatx80_round_to_int floatx80_round_to_int_riscv32 +#define floatx80_add floatx80_add_riscv32 +#define floatx80_sub floatx80_sub_riscv32 +#define floatx80_mul floatx80_mul_riscv32 +#define floatx80_div floatx80_div_riscv32 +#define floatx80_rem floatx80_rem_riscv32 +#define floatx80_sqrt floatx80_sqrt_riscv32 +#define floatx80_eq floatx80_eq_riscv32 +#define floatx80_le floatx80_le_riscv32 +#define floatx80_lt floatx80_lt_riscv32 +#define floatx80_unordered floatx80_unordered_riscv32 +#define floatx80_eq_quiet floatx80_eq_quiet_riscv32 +#define floatx80_le_quiet floatx80_le_quiet_riscv32 +#define floatx80_lt_quiet floatx80_lt_quiet_riscv32 +#define floatx80_unordered_quiet floatx80_unordered_quiet_riscv32 +#define float128_to_int32 float128_to_int32_riscv32 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_riscv32 +#define float128_to_int64 float128_to_int64_riscv32 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_riscv32 +#define float128_to_uint64 float128_to_uint64_riscv32 +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_riscv32 +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_riscv32 +#define float128_to_uint32 float128_to_uint32_riscv32 +#define float128_to_float32 float128_to_float32_riscv32 +#define float128_to_float64 float128_to_float64_riscv32 +#define float128_to_floatx80 float128_to_floatx80_riscv32 +#define float128_round_to_int float128_round_to_int_riscv32 +#define float128_add float128_add_riscv32 +#define float128_sub float128_sub_riscv32 +#define float128_mul float128_mul_riscv32 +#define float128_div float128_div_riscv32 +#define float128_rem float128_rem_riscv32 +#define float128_sqrt float128_sqrt_riscv32 +#define float128_eq float128_eq_riscv32 +#define float128_le float128_le_riscv32 +#define float128_lt float128_lt_riscv32 +#define float128_unordered float128_unordered_riscv32 +#define float128_eq_quiet float128_eq_quiet_riscv32 +#define float128_le_quiet float128_le_quiet_riscv32 +#define float128_lt_quiet float128_lt_quiet_riscv32 +#define float128_unordered_quiet float128_unordered_quiet_riscv32 +#define floatx80_compare floatx80_compare_riscv32 +#define floatx80_compare_quiet floatx80_compare_quiet_riscv32 +#define float128_compare float128_compare_riscv32 +#define float128_compare_quiet float128_compare_quiet_riscv32 +#define floatx80_scalbn floatx80_scalbn_riscv32 +#define float128_scalbn float128_scalbn_riscv32 +#define softfloat_init softfloat_init_riscv32 +#define tcg_optimize tcg_optimize_riscv32 +#define gen_new_label gen_new_label_riscv32 +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_riscv32 +#define tcg_expand_vec_op tcg_expand_vec_op_riscv32 +#define tcg_register_jit tcg_register_jit_riscv32 +#define tcg_tb_insert tcg_tb_insert_riscv32 +#define tcg_tb_remove tcg_tb_remove_riscv32 +#define tcg_tb_lookup tcg_tb_lookup_riscv32 +#define tcg_tb_foreach tcg_tb_foreach_riscv32 +#define tcg_nb_tbs tcg_nb_tbs_riscv32 +#define tcg_region_reset_all tcg_region_reset_all_riscv32 +#define tcg_region_init tcg_region_init_riscv32 +#define tcg_code_size tcg_code_size_riscv32 +#define tcg_code_capacity tcg_code_capacity_riscv32 +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_riscv32 +#define tcg_malloc_internal tcg_malloc_internal_riscv32 +#define tcg_pool_reset tcg_pool_reset_riscv32 +#define tcg_context_init tcg_context_init_riscv32 +#define tcg_tb_alloc tcg_tb_alloc_riscv32 +#define tcg_prologue_init tcg_prologue_init_riscv32 +#define tcg_func_start tcg_func_start_riscv32 +#define tcg_set_frame tcg_set_frame_riscv32 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_riscv32 +#define tcg_temp_new_internal tcg_temp_new_internal_riscv32 +#define tcg_temp_new_vec tcg_temp_new_vec_riscv32 +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_riscv32 +#define tcg_temp_free_internal tcg_temp_free_internal_riscv32 +#define tcg_const_i32 tcg_const_i32_riscv32 +#define tcg_const_i64 tcg_const_i64_riscv32 +#define tcg_const_local_i32 tcg_const_local_i32_riscv32 +#define tcg_const_local_i64 tcg_const_local_i64_riscv32 +#define tcg_op_supported tcg_op_supported_riscv32 +#define tcg_gen_callN tcg_gen_callN_riscv32 +#define tcg_op_remove tcg_op_remove_riscv32 +#define tcg_emit_op tcg_emit_op_riscv32 +#define tcg_op_insert_before tcg_op_insert_before_riscv32 +#define tcg_op_insert_after tcg_op_insert_after_riscv32 +#define tcg_cpu_exec_time tcg_cpu_exec_time_riscv32 +#define tcg_gen_code tcg_gen_code_riscv32 +#define tcg_gen_op1 tcg_gen_op1_riscv32 +#define tcg_gen_op2 tcg_gen_op2_riscv32 +#define tcg_gen_op3 tcg_gen_op3_riscv32 +#define tcg_gen_op4 tcg_gen_op4_riscv32 +#define tcg_gen_op5 tcg_gen_op5_riscv32 +#define tcg_gen_op6 tcg_gen_op6_riscv32 +#define tcg_gen_mb tcg_gen_mb_riscv32 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_riscv32 +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_riscv32 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_riscv32 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_riscv32 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_riscv32 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_riscv32 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_riscv32 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_riscv32 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_riscv32 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_riscv32 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_riscv32 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_riscv32 +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_riscv32 +#define tcg_gen_muli_i32 tcg_gen_muli_i32_riscv32 +#define tcg_gen_div_i32 tcg_gen_div_i32_riscv32 +#define tcg_gen_rem_i32 tcg_gen_rem_i32_riscv32 +#define tcg_gen_divu_i32 tcg_gen_divu_i32_riscv32 +#define tcg_gen_remu_i32 tcg_gen_remu_i32_riscv32 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_riscv32 +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_riscv32 +#define tcg_gen_nand_i32 tcg_gen_nand_i32_riscv32 +#define tcg_gen_nor_i32 tcg_gen_nor_i32_riscv32 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_riscv32 +#define tcg_gen_clz_i32 tcg_gen_clz_i32_riscv32 +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_riscv32 +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_riscv32 +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_riscv32 +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_riscv32 +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_riscv32 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_riscv32 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_riscv32 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_riscv32 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_riscv32 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_riscv32 +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_riscv32 +#define tcg_gen_extract_i32 tcg_gen_extract_i32_riscv32 +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_riscv32 +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_riscv32 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_riscv32 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_riscv32 +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_riscv32 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_riscv32 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_riscv32 +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_riscv32 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_riscv32 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_riscv32 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_riscv32 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_riscv32 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_riscv32 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_riscv32 +#define tcg_gen_smin_i32 tcg_gen_smin_i32_riscv32 +#define tcg_gen_umin_i32 tcg_gen_umin_i32_riscv32 +#define tcg_gen_smax_i32 tcg_gen_smax_i32_riscv32 +#define tcg_gen_umax_i32 tcg_gen_umax_i32_riscv32 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_riscv32 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_riscv32 +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_riscv32 +#define tcg_gen_subi_i64 tcg_gen_subi_i64_riscv32 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_riscv32 +#define tcg_gen_ori_i64 tcg_gen_ori_i64_riscv32 +#define tcg_gen_xori_i64 tcg_gen_xori_i64_riscv32 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_riscv32 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_riscv32 +#define tcg_gen_sari_i64 tcg_gen_sari_i64_riscv32 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_riscv32 +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_riscv32 +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_riscv32 +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_riscv32 +#define tcg_gen_muli_i64 tcg_gen_muli_i64_riscv32 +#define tcg_gen_div_i64 tcg_gen_div_i64_riscv32 +#define tcg_gen_rem_i64 tcg_gen_rem_i64_riscv32 +#define tcg_gen_divu_i64 tcg_gen_divu_i64_riscv32 +#define tcg_gen_remu_i64 tcg_gen_remu_i64_riscv32 +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_riscv32 +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_riscv32 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_riscv32 +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_riscv32 +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_riscv32 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_riscv32 +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_riscv32 +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_riscv32 +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_riscv32 +#define tcg_gen_not_i64 tcg_gen_not_i64_riscv32 +#define tcg_gen_andc_i64 tcg_gen_andc_i64_riscv32 +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_riscv32 +#define tcg_gen_nand_i64 tcg_gen_nand_i64_riscv32 +#define tcg_gen_nor_i64 tcg_gen_nor_i64_riscv32 +#define tcg_gen_orc_i64 tcg_gen_orc_i64_riscv32 +#define tcg_gen_clz_i64 tcg_gen_clz_i64_riscv32 +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_riscv32 +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_riscv32 +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_riscv32 +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_riscv32 +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_riscv32 +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_riscv32 +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_riscv32 +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_riscv32 +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_riscv32 +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_riscv32 +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_riscv32 +#define tcg_gen_extract_i64 tcg_gen_extract_i64_riscv32 +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_riscv32 +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_riscv32 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_riscv32 +#define tcg_gen_add2_i64 tcg_gen_add2_i64_riscv32 +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_riscv32 +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_riscv32 +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_riscv32 +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_riscv32 +#define tcg_gen_smin_i64 tcg_gen_smin_i64_riscv32 +#define tcg_gen_umin_i64 tcg_gen_umin_i64_riscv32 +#define tcg_gen_smax_i64 tcg_gen_smax_i64_riscv32 +#define tcg_gen_umax_i64 tcg_gen_umax_i64_riscv32 +#define tcg_gen_abs_i64 tcg_gen_abs_i64_riscv32 +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_riscv32 +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_riscv32 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_riscv32 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_riscv32 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_riscv32 +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_riscv32 +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_riscv32 +#define tcg_gen_exit_tb tcg_gen_exit_tb_riscv32 +#define tcg_gen_goto_tb tcg_gen_goto_tb_riscv32 +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_riscv32 +#define check_exit_request check_exit_request_riscv32 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_riscv32 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_riscv32 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_riscv32 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_riscv32 +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_riscv32 +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_riscv32 +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_riscv32 +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_riscv32 +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_riscv32 +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_riscv32 +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_riscv32 +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_riscv32 +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_riscv32 +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_riscv32 +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_riscv32 +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_riscv32 +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_riscv32 +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_riscv32 +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_riscv32 +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_riscv32 +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_riscv32 +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_riscv32 +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_riscv32 +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_riscv32 +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_riscv32 +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_riscv32 +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_riscv32 +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_riscv32 +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_riscv32 +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_riscv32 +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_riscv32 +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_riscv32 +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_riscv32 +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_riscv32 +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_riscv32 +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_riscv32 +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_riscv32 +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_riscv32 +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_riscv32 +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_riscv32 +#define simd_desc simd_desc_riscv32 +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_riscv32 +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_riscv32 +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_riscv32 +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_riscv32 +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_riscv32 +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_riscv32 +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_riscv32 +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_riscv32 +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_riscv32 +#define tcg_gen_gvec_2 tcg_gen_gvec_2_riscv32 +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_riscv32 +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_riscv32 +#define tcg_gen_gvec_3 tcg_gen_gvec_3_riscv32 +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_riscv32 +#define tcg_gen_gvec_4 tcg_gen_gvec_4_riscv32 +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_riscv32 +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_riscv32 +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_riscv32 +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_riscv32 +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_riscv32 +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_riscv32 +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_riscv32 +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_riscv32 +#define tcg_gen_gvec_not tcg_gen_gvec_not_riscv32 +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_riscv32 +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_riscv32 +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_riscv32 +#define tcg_gen_gvec_add tcg_gen_gvec_add_riscv32 +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_riscv32 +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_riscv32 +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_riscv32 +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_riscv32 +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_riscv32 +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_riscv32 +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_riscv32 +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_riscv32 +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_riscv32 +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_riscv32 +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_riscv32 +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_riscv32 +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_riscv32 +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_riscv32 +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_riscv32 +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_riscv32 +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_riscv32 +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_riscv32 +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_riscv32 +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_riscv32 +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_riscv32 +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_riscv32 +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_riscv32 +#define tcg_gen_gvec_and tcg_gen_gvec_and_riscv32 +#define tcg_gen_gvec_or tcg_gen_gvec_or_riscv32 +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_riscv32 +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_riscv32 +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_riscv32 +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_riscv32 +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_riscv32 +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_riscv32 +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_riscv32 +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_riscv32 +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_riscv32 +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_riscv32 +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_riscv32 +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_riscv32 +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_riscv32 +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_riscv32 +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_riscv32 +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_riscv32 +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_riscv32 +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_riscv32 +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_riscv32 +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_riscv32 +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_riscv32 +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_riscv32 +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_riscv32 +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_riscv32 +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_riscv32 +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_riscv32 +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_riscv32 +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_riscv32 +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_riscv32 +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_riscv32 +#define vec_gen_2 vec_gen_2_riscv32 +#define vec_gen_3 vec_gen_3_riscv32 +#define vec_gen_4 vec_gen_4_riscv32 +#define tcg_gen_mov_vec tcg_gen_mov_vec_riscv32 +#define tcg_const_zeros_vec tcg_const_zeros_vec_riscv32 +#define tcg_const_ones_vec tcg_const_ones_vec_riscv32 +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_riscv32 +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_riscv32 +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_riscv32 +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_riscv32 +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_riscv32 +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_riscv32 +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_riscv32 +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_riscv32 +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_riscv32 +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_riscv32 +#define tcg_gen_ld_vec tcg_gen_ld_vec_riscv32 +#define tcg_gen_st_vec tcg_gen_st_vec_riscv32 +#define tcg_gen_stl_vec tcg_gen_stl_vec_riscv32 +#define tcg_gen_and_vec tcg_gen_and_vec_riscv32 +#define tcg_gen_or_vec tcg_gen_or_vec_riscv32 +#define tcg_gen_xor_vec tcg_gen_xor_vec_riscv32 +#define tcg_gen_andc_vec tcg_gen_andc_vec_riscv32 +#define tcg_gen_orc_vec tcg_gen_orc_vec_riscv32 +#define tcg_gen_nand_vec tcg_gen_nand_vec_riscv32 +#define tcg_gen_nor_vec tcg_gen_nor_vec_riscv32 +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_riscv32 +#define tcg_gen_not_vec tcg_gen_not_vec_riscv32 +#define tcg_gen_neg_vec tcg_gen_neg_vec_riscv32 +#define tcg_gen_abs_vec tcg_gen_abs_vec_riscv32 +#define tcg_gen_shli_vec tcg_gen_shli_vec_riscv32 +#define tcg_gen_shri_vec tcg_gen_shri_vec_riscv32 +#define tcg_gen_sari_vec tcg_gen_sari_vec_riscv32 +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_riscv32 +#define tcg_gen_add_vec tcg_gen_add_vec_riscv32 +#define tcg_gen_sub_vec tcg_gen_sub_vec_riscv32 +#define tcg_gen_mul_vec tcg_gen_mul_vec_riscv32 +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_riscv32 +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_riscv32 +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_riscv32 +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_riscv32 +#define tcg_gen_smin_vec tcg_gen_smin_vec_riscv32 +#define tcg_gen_umin_vec tcg_gen_umin_vec_riscv32 +#define tcg_gen_smax_vec tcg_gen_smax_vec_riscv32 +#define tcg_gen_umax_vec tcg_gen_umax_vec_riscv32 +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_riscv32 +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_riscv32 +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_riscv32 +#define tcg_gen_shls_vec tcg_gen_shls_vec_riscv32 +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_riscv32 +#define tcg_gen_sars_vec tcg_gen_sars_vec_riscv32 +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_riscv32 +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_riscv32 +#define tb_htable_lookup tb_htable_lookup_riscv32 +#define tb_set_jmp_target tb_set_jmp_target_riscv32 +#define cpu_exec cpu_exec_riscv32 +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_riscv32 +#define cpu_reloading_memory_map cpu_reloading_memory_map_riscv32 +#define cpu_loop_exit cpu_loop_exit_riscv32 +#define cpu_loop_exit_restore cpu_loop_exit_restore_riscv32 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_riscv32 +#define tlb_init tlb_init_riscv32 +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_riscv32 +#define tlb_flush tlb_flush_riscv32 +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_riscv32 +#define tlb_flush_all_cpus tlb_flush_all_cpus_riscv32 +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_riscv32 +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_riscv32 +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_riscv32 +#define tlb_flush_page tlb_flush_page_riscv32 +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_riscv32 +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_riscv32 +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_riscv32 +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_riscv32 +#define tlb_protect_code tlb_protect_code_riscv32 +#define tlb_unprotect_code tlb_unprotect_code_riscv32 +#define tlb_reset_dirty tlb_reset_dirty_riscv32 +#define tlb_set_dirty tlb_set_dirty_riscv32 +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_riscv32 +#define tlb_set_page tlb_set_page_riscv32 +#define get_page_addr_code_hostp get_page_addr_code_hostp_riscv32 +#define get_page_addr_code get_page_addr_code_riscv32 +#define probe_access probe_access_riscv32 +#define tlb_vaddr_to_host tlb_vaddr_to_host_riscv32 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_riscv32 +#define helper_le_lduw_mmu helper_le_lduw_mmu_riscv32 +#define helper_be_lduw_mmu helper_be_lduw_mmu_riscv32 +#define helper_le_ldul_mmu helper_le_ldul_mmu_riscv32 +#define helper_be_ldul_mmu helper_be_ldul_mmu_riscv32 +#define helper_le_ldq_mmu helper_le_ldq_mmu_riscv32 +#define helper_be_ldq_mmu helper_be_ldq_mmu_riscv32 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_riscv32 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_riscv32 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_riscv32 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_riscv32 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_riscv32 +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_riscv32 +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_riscv32 +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_riscv32 +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_riscv32 +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_riscv32 +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_riscv32 +#define cpu_ldub_data_ra cpu_ldub_data_ra_riscv32 +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_riscv32 +#define cpu_lduw_data_ra cpu_lduw_data_ra_riscv32 +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_riscv32 +#define cpu_ldl_data_ra cpu_ldl_data_ra_riscv32 +#define cpu_ldq_data_ra cpu_ldq_data_ra_riscv32 +#define cpu_ldub_data cpu_ldub_data_riscv32 +#define cpu_ldsb_data cpu_ldsb_data_riscv32 +#define cpu_lduw_data cpu_lduw_data_riscv32 +#define cpu_ldsw_data cpu_ldsw_data_riscv32 +#define cpu_ldl_data cpu_ldl_data_riscv32 +#define cpu_ldq_data cpu_ldq_data_riscv32 +#define helper_ret_stb_mmu helper_ret_stb_mmu_riscv32 +#define helper_le_stw_mmu helper_le_stw_mmu_riscv32 +#define helper_be_stw_mmu helper_be_stw_mmu_riscv32 +#define helper_le_stl_mmu helper_le_stl_mmu_riscv32 +#define helper_be_stl_mmu helper_be_stl_mmu_riscv32 +#define helper_le_stq_mmu helper_le_stq_mmu_riscv32 +#define helper_be_stq_mmu helper_be_stq_mmu_riscv32 +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_riscv32 +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_riscv32 +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_riscv32 +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_riscv32 +#define cpu_stb_data_ra cpu_stb_data_ra_riscv32 +#define cpu_stw_data_ra cpu_stw_data_ra_riscv32 +#define cpu_stl_data_ra cpu_stl_data_ra_riscv32 +#define cpu_stq_data_ra cpu_stq_data_ra_riscv32 +#define cpu_stb_data cpu_stb_data_riscv32 +#define cpu_stw_data cpu_stw_data_riscv32 +#define cpu_stl_data cpu_stl_data_riscv32 +#define cpu_stq_data cpu_stq_data_riscv32 +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_riscv32 +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_riscv32 +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_riscv32 +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_riscv32 +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_riscv32 +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_riscv32 +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_riscv32 +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_riscv32 +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_riscv32 +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_riscv32 +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_riscv32 +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_riscv32 +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_riscv32 +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_riscv32 +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_riscv32 +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_riscv32 +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_riscv32 +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_riscv32 +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_riscv32 +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_riscv32 +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_riscv32 +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_riscv32 +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_riscv32 +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_riscv32 +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_riscv32 +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_riscv32 +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_riscv32 +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_riscv32 +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_riscv32 +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_riscv32 +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_riscv32 +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_riscv32 +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_riscv32 +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_riscv32 +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_riscv32 +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_riscv32 +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_riscv32 +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_riscv32 +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_riscv32 +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_riscv32 +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_riscv32 +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_riscv32 +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_riscv32 +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_riscv32 +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_riscv32 +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_riscv32 +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_riscv32 +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_riscv32 +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_riscv32 +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_riscv32 +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_riscv32 +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_riscv32 +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_riscv32 +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_riscv32 +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_riscv32 +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_riscv32 +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_riscv32 +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_riscv32 +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_riscv32 +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_riscv32 +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_riscv32 +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_riscv32 +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_riscv32 +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_riscv32 +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_riscv32 +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_riscv32 +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_riscv32 +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_riscv32 +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_riscv32 +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_riscv32 +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_riscv32 +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_riscv32 +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_riscv32 +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_riscv32 +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_riscv32 +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_riscv32 +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_riscv32 +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_riscv32 +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_riscv32 +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_riscv32 +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_riscv32 +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_riscv32 +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_riscv32 +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_riscv32 +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_riscv32 +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_riscv32 +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_riscv32 +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_riscv32 +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_riscv32 +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_riscv32 +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_riscv32 +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_riscv32 +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_riscv32 +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_riscv32 +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_riscv32 +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_riscv32 +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_riscv32 +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_riscv32 +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_riscv32 +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_riscv32 +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_riscv32 +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_riscv32 +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_riscv32 +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_riscv32 +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_riscv32 +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_riscv32 +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_riscv32 +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_riscv32 +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_riscv32 +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_riscv32 +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_riscv32 +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_riscv32 +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_riscv32 +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_riscv32 +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_riscv32 +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_riscv32 +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_riscv32 +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_riscv32 +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_riscv32 +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_riscv32 +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_riscv32 +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_riscv32 +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_riscv32 +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_riscv32 +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_riscv32 +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_riscv32 +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_riscv32 +#define helper_atomic_xchgb helper_atomic_xchgb_riscv32 +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_riscv32 +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_riscv32 +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_riscv32 +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_riscv32 +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_riscv32 +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_riscv32 +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_riscv32 +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_riscv32 +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_riscv32 +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_riscv32 +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_riscv32 +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_riscv32 +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_riscv32 +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_riscv32 +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_riscv32 +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_riscv32 +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_riscv32 +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_riscv32 +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_riscv32 +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_riscv32 +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_riscv32 +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_riscv32 +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_riscv32 +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_riscv32 +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_riscv32 +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_riscv32 +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_riscv32 +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_riscv32 +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_riscv32 +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_riscv32 +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_riscv32 +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_riscv32 +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_riscv32 +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_riscv32 +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_riscv32 +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_riscv32 +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_riscv32 +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_riscv32 +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_riscv32 +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_riscv32 +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_riscv32 +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_riscv32 +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_riscv32 +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_riscv32 +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_riscv32 +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_riscv32 +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_riscv32 +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_riscv32 +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_riscv32 +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_riscv32 +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_riscv32 +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_riscv32 +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_riscv32 +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_riscv32 +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_riscv32 +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_riscv32 +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_riscv32 +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_riscv32 +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_riscv32 +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_riscv32 +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_riscv32 +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_riscv32 +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_riscv32 +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_riscv32 +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_riscv32 +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_riscv32 +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_riscv32 +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_riscv32 +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_riscv32 +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_riscv32 +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_riscv32 +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_riscv32 +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_riscv32 +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_riscv32 +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_riscv32 +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_riscv32 +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_riscv32 +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_riscv32 +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_riscv32 +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_riscv32 +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_riscv32 +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_riscv32 +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_riscv32 +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_riscv32 +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_riscv32 +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_riscv32 +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_riscv32 +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_riscv32 +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_riscv32 +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_riscv32 +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_riscv32 +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_riscv32 +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_riscv32 +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_riscv32 +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_riscv32 +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_riscv32 +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_riscv32 +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_riscv32 +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_riscv32 +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_riscv32 +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_riscv32 +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_riscv32 +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_riscv32 +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_riscv32 +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_riscv32 +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_riscv32 +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_riscv32 +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_riscv32 +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_riscv32 +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_riscv32 +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_riscv32 +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_riscv32 +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_riscv32 +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_riscv32 +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_riscv32 +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_riscv32 +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_riscv32 +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_riscv32 +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_riscv32 +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_riscv32 +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_riscv32 +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_riscv32 +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_riscv32 +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_riscv32 +#define cpu_ldub_code cpu_ldub_code_riscv32 +#define cpu_lduw_code cpu_lduw_code_riscv32 +#define cpu_ldl_code cpu_ldl_code_riscv32 +#define cpu_ldq_code cpu_ldq_code_riscv32 +#define helper_div_i32 helper_div_i32_riscv32 +#define helper_rem_i32 helper_rem_i32_riscv32 +#define helper_divu_i32 helper_divu_i32_riscv32 +#define helper_remu_i32 helper_remu_i32_riscv32 +#define helper_shl_i64 helper_shl_i64_riscv32 +#define helper_shr_i64 helper_shr_i64_riscv32 +#define helper_sar_i64 helper_sar_i64_riscv32 +#define helper_div_i64 helper_div_i64_riscv32 +#define helper_rem_i64 helper_rem_i64_riscv32 +#define helper_divu_i64 helper_divu_i64_riscv32 +#define helper_remu_i64 helper_remu_i64_riscv32 +#define helper_muluh_i64 helper_muluh_i64_riscv32 +#define helper_mulsh_i64 helper_mulsh_i64_riscv32 +#define helper_clz_i32 helper_clz_i32_riscv32 +#define helper_ctz_i32 helper_ctz_i32_riscv32 +#define helper_clz_i64 helper_clz_i64_riscv32 +#define helper_ctz_i64 helper_ctz_i64_riscv32 +#define helper_clrsb_i32 helper_clrsb_i32_riscv32 +#define helper_clrsb_i64 helper_clrsb_i64_riscv32 +#define helper_ctpop_i32 helper_ctpop_i32_riscv32 +#define helper_ctpop_i64 helper_ctpop_i64_riscv32 +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_riscv32 +#define helper_exit_atomic helper_exit_atomic_riscv32 +#define helper_gvec_add8 helper_gvec_add8_riscv32 +#define helper_gvec_add16 helper_gvec_add16_riscv32 +#define helper_gvec_add32 helper_gvec_add32_riscv32 +#define helper_gvec_add64 helper_gvec_add64_riscv32 +#define helper_gvec_adds8 helper_gvec_adds8_riscv32 +#define helper_gvec_adds16 helper_gvec_adds16_riscv32 +#define helper_gvec_adds32 helper_gvec_adds32_riscv32 +#define helper_gvec_adds64 helper_gvec_adds64_riscv32 +#define helper_gvec_sub8 helper_gvec_sub8_riscv32 +#define helper_gvec_sub16 helper_gvec_sub16_riscv32 +#define helper_gvec_sub32 helper_gvec_sub32_riscv32 +#define helper_gvec_sub64 helper_gvec_sub64_riscv32 +#define helper_gvec_subs8 helper_gvec_subs8_riscv32 +#define helper_gvec_subs16 helper_gvec_subs16_riscv32 +#define helper_gvec_subs32 helper_gvec_subs32_riscv32 +#define helper_gvec_subs64 helper_gvec_subs64_riscv32 +#define helper_gvec_mul8 helper_gvec_mul8_riscv32 +#define helper_gvec_mul16 helper_gvec_mul16_riscv32 +#define helper_gvec_mul32 helper_gvec_mul32_riscv32 +#define helper_gvec_mul64 helper_gvec_mul64_riscv32 +#define helper_gvec_muls8 helper_gvec_muls8_riscv32 +#define helper_gvec_muls16 helper_gvec_muls16_riscv32 +#define helper_gvec_muls32 helper_gvec_muls32_riscv32 +#define helper_gvec_muls64 helper_gvec_muls64_riscv32 +#define helper_gvec_neg8 helper_gvec_neg8_riscv32 +#define helper_gvec_neg16 helper_gvec_neg16_riscv32 +#define helper_gvec_neg32 helper_gvec_neg32_riscv32 +#define helper_gvec_neg64 helper_gvec_neg64_riscv32 +#define helper_gvec_abs8 helper_gvec_abs8_riscv32 +#define helper_gvec_abs16 helper_gvec_abs16_riscv32 +#define helper_gvec_abs32 helper_gvec_abs32_riscv32 +#define helper_gvec_abs64 helper_gvec_abs64_riscv32 +#define helper_gvec_mov helper_gvec_mov_riscv32 +#define helper_gvec_dup64 helper_gvec_dup64_riscv32 +#define helper_gvec_dup32 helper_gvec_dup32_riscv32 +#define helper_gvec_dup16 helper_gvec_dup16_riscv32 +#define helper_gvec_dup8 helper_gvec_dup8_riscv32 +#define helper_gvec_not helper_gvec_not_riscv32 +#define helper_gvec_and helper_gvec_and_riscv32 +#define helper_gvec_or helper_gvec_or_riscv32 +#define helper_gvec_xor helper_gvec_xor_riscv32 +#define helper_gvec_andc helper_gvec_andc_riscv32 +#define helper_gvec_orc helper_gvec_orc_riscv32 +#define helper_gvec_nand helper_gvec_nand_riscv32 +#define helper_gvec_nor helper_gvec_nor_riscv32 +#define helper_gvec_eqv helper_gvec_eqv_riscv32 +#define helper_gvec_ands helper_gvec_ands_riscv32 +#define helper_gvec_xors helper_gvec_xors_riscv32 +#define helper_gvec_ors helper_gvec_ors_riscv32 +#define helper_gvec_shl8i helper_gvec_shl8i_riscv32 +#define helper_gvec_shl16i helper_gvec_shl16i_riscv32 +#define helper_gvec_shl32i helper_gvec_shl32i_riscv32 +#define helper_gvec_shl64i helper_gvec_shl64i_riscv32 +#define helper_gvec_shr8i helper_gvec_shr8i_riscv32 +#define helper_gvec_shr16i helper_gvec_shr16i_riscv32 +#define helper_gvec_shr32i helper_gvec_shr32i_riscv32 +#define helper_gvec_shr64i helper_gvec_shr64i_riscv32 +#define helper_gvec_sar8i helper_gvec_sar8i_riscv32 +#define helper_gvec_sar16i helper_gvec_sar16i_riscv32 +#define helper_gvec_sar32i helper_gvec_sar32i_riscv32 +#define helper_gvec_sar64i helper_gvec_sar64i_riscv32 +#define helper_gvec_shl8v helper_gvec_shl8v_riscv32 +#define helper_gvec_shl16v helper_gvec_shl16v_riscv32 +#define helper_gvec_shl32v helper_gvec_shl32v_riscv32 +#define helper_gvec_shl64v helper_gvec_shl64v_riscv32 +#define helper_gvec_shr8v helper_gvec_shr8v_riscv32 +#define helper_gvec_shr16v helper_gvec_shr16v_riscv32 +#define helper_gvec_shr32v helper_gvec_shr32v_riscv32 +#define helper_gvec_shr64v helper_gvec_shr64v_riscv32 +#define helper_gvec_sar8v helper_gvec_sar8v_riscv32 +#define helper_gvec_sar16v helper_gvec_sar16v_riscv32 +#define helper_gvec_sar32v helper_gvec_sar32v_riscv32 +#define helper_gvec_sar64v helper_gvec_sar64v_riscv32 +#define helper_gvec_eq8 helper_gvec_eq8_riscv32 +#define helper_gvec_ne8 helper_gvec_ne8_riscv32 +#define helper_gvec_lt8 helper_gvec_lt8_riscv32 +#define helper_gvec_le8 helper_gvec_le8_riscv32 +#define helper_gvec_ltu8 helper_gvec_ltu8_riscv32 +#define helper_gvec_leu8 helper_gvec_leu8_riscv32 +#define helper_gvec_eq16 helper_gvec_eq16_riscv32 +#define helper_gvec_ne16 helper_gvec_ne16_riscv32 +#define helper_gvec_lt16 helper_gvec_lt16_riscv32 +#define helper_gvec_le16 helper_gvec_le16_riscv32 +#define helper_gvec_ltu16 helper_gvec_ltu16_riscv32 +#define helper_gvec_leu16 helper_gvec_leu16_riscv32 +#define helper_gvec_eq32 helper_gvec_eq32_riscv32 +#define helper_gvec_ne32 helper_gvec_ne32_riscv32 +#define helper_gvec_lt32 helper_gvec_lt32_riscv32 +#define helper_gvec_le32 helper_gvec_le32_riscv32 +#define helper_gvec_ltu32 helper_gvec_ltu32_riscv32 +#define helper_gvec_leu32 helper_gvec_leu32_riscv32 +#define helper_gvec_eq64 helper_gvec_eq64_riscv32 +#define helper_gvec_ne64 helper_gvec_ne64_riscv32 +#define helper_gvec_lt64 helper_gvec_lt64_riscv32 +#define helper_gvec_le64 helper_gvec_le64_riscv32 +#define helper_gvec_ltu64 helper_gvec_ltu64_riscv32 +#define helper_gvec_leu64 helper_gvec_leu64_riscv32 +#define helper_gvec_ssadd8 helper_gvec_ssadd8_riscv32 +#define helper_gvec_ssadd16 helper_gvec_ssadd16_riscv32 +#define helper_gvec_ssadd32 helper_gvec_ssadd32_riscv32 +#define helper_gvec_ssadd64 helper_gvec_ssadd64_riscv32 +#define helper_gvec_sssub8 helper_gvec_sssub8_riscv32 +#define helper_gvec_sssub16 helper_gvec_sssub16_riscv32 +#define helper_gvec_sssub32 helper_gvec_sssub32_riscv32 +#define helper_gvec_sssub64 helper_gvec_sssub64_riscv32 +#define helper_gvec_usadd8 helper_gvec_usadd8_riscv32 +#define helper_gvec_usadd16 helper_gvec_usadd16_riscv32 +#define helper_gvec_usadd32 helper_gvec_usadd32_riscv32 +#define helper_gvec_usadd64 helper_gvec_usadd64_riscv32 +#define helper_gvec_ussub8 helper_gvec_ussub8_riscv32 +#define helper_gvec_ussub16 helper_gvec_ussub16_riscv32 +#define helper_gvec_ussub32 helper_gvec_ussub32_riscv32 +#define helper_gvec_ussub64 helper_gvec_ussub64_riscv32 +#define helper_gvec_smin8 helper_gvec_smin8_riscv32 +#define helper_gvec_smin16 helper_gvec_smin16_riscv32 +#define helper_gvec_smin32 helper_gvec_smin32_riscv32 +#define helper_gvec_smin64 helper_gvec_smin64_riscv32 +#define helper_gvec_smax8 helper_gvec_smax8_riscv32 +#define helper_gvec_smax16 helper_gvec_smax16_riscv32 +#define helper_gvec_smax32 helper_gvec_smax32_riscv32 +#define helper_gvec_smax64 helper_gvec_smax64_riscv32 +#define helper_gvec_umin8 helper_gvec_umin8_riscv32 +#define helper_gvec_umin16 helper_gvec_umin16_riscv32 +#define helper_gvec_umin32 helper_gvec_umin32_riscv32 +#define helper_gvec_umin64 helper_gvec_umin64_riscv32 +#define helper_gvec_umax8 helper_gvec_umax8_riscv32 +#define helper_gvec_umax16 helper_gvec_umax16_riscv32 +#define helper_gvec_umax32 helper_gvec_umax32_riscv32 +#define helper_gvec_umax64 helper_gvec_umax64_riscv32 +#define helper_gvec_bitsel helper_gvec_bitsel_riscv32 +#define cpu_restore_state cpu_restore_state_riscv32 +#define page_collection_lock page_collection_lock_riscv32 +#define page_collection_unlock page_collection_unlock_riscv32 +#define free_code_gen_buffer free_code_gen_buffer_riscv32 +#define tcg_exec_init tcg_exec_init_riscv32 +#define tb_cleanup tb_cleanup_riscv32 +#define tb_flush tb_flush_riscv32 +#define tb_phys_invalidate tb_phys_invalidate_riscv32 +#define tb_gen_code tb_gen_code_riscv32 +#define tb_exec_lock tb_exec_lock_riscv32 +#define tb_exec_unlock tb_exec_unlock_riscv32 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_riscv32 +#define tb_invalidate_phys_range tb_invalidate_phys_range_riscv32 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_riscv32 +#define tb_check_watchpoint tb_check_watchpoint_riscv32 +#define cpu_io_recompile cpu_io_recompile_riscv32 +#define tb_flush_jmp_cache tb_flush_jmp_cache_riscv32 +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_riscv32 +#define translator_loop_temp_check translator_loop_temp_check_riscv32 +#define translator_loop translator_loop_riscv32 +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_riscv32 +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_riscv32 +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_riscv32 +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_riscv32 +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_riscv32 +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_riscv32 +#define unassigned_mem_ops unassigned_mem_ops_riscv32 +#define floatx80_infinity floatx80_infinity_riscv32 +#define dup_const_func dup_const_func_riscv32 +#define gen_helper_raise_exception gen_helper_raise_exception_riscv32 +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_riscv32 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_riscv32 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_riscv32 +#define gen_helper_cpsr_read gen_helper_cpsr_read_riscv32 +#define gen_helper_cpsr_write gen_helper_cpsr_write_riscv32 +#define riscv_cpu_mmu_index riscv_cpu_mmu_index_riscv32 +#define riscv_cpu_exec_interrupt riscv_cpu_exec_interrupt_riscv32 +#define riscv_cpu_fp_enabled riscv_cpu_fp_enabled_riscv32 +#define riscv_cpu_swap_hypervisor_regs riscv_cpu_swap_hypervisor_regs_riscv32 +#define riscv_cpu_virt_enabled riscv_cpu_virt_enabled_riscv32 +#define riscv_cpu_set_virt_enabled riscv_cpu_set_virt_enabled_riscv32 +#define riscv_cpu_force_hs_excep_enabled riscv_cpu_force_hs_excep_enabled_riscv32 +#define riscv_cpu_set_force_hs_excep riscv_cpu_set_force_hs_excep_riscv32 +#define riscv_cpu_claim_interrupts riscv_cpu_claim_interrupts_riscv32 +#define riscv_cpu_update_mip riscv_cpu_update_mip_riscv32 +#define riscv_cpu_set_rdtime_fn riscv_cpu_set_rdtime_fn_riscv32 +#define riscv_cpu_set_mode riscv_cpu_set_mode_riscv32 +#define riscv_cpu_get_phys_page_debug riscv_cpu_get_phys_page_debug_riscv32 +#define riscv_cpu_do_transaction_failed riscv_cpu_do_transaction_failed_riscv32 +#define riscv_cpu_do_unaligned_access riscv_cpu_do_unaligned_access_riscv32 +#define riscv_cpu_tlb_fill riscv_cpu_tlb_fill_riscv32 +#define riscv_cpu_do_interrupt riscv_cpu_do_interrupt_riscv32 +#define riscv_get_csr_ops riscv_get_csr_ops_riscv32 +#define riscv_set_csr_ops riscv_set_csr_ops_riscv32 +#define riscv_csrrw riscv_csrrw_riscv32 +#define riscv_csrrw_debug riscv_csrrw_debug_riscv32 +#define riscv_cpu_get_fflags riscv_cpu_get_fflags_riscv32 +#define riscv_cpu_set_fflags riscv_cpu_set_fflags_riscv32 +#define helper_set_rounding_mode helper_set_rounding_mode_riscv32 +#define helper_fmadd_s helper_fmadd_s_riscv32 +#define helper_fmadd_d helper_fmadd_d_riscv32 +#define helper_fmsub_s helper_fmsub_s_riscv32 +#define helper_fmsub_d helper_fmsub_d_riscv32 +#define helper_fnmsub_s helper_fnmsub_s_riscv32 +#define helper_fnmsub_d helper_fnmsub_d_riscv32 +#define helper_fnmadd_s helper_fnmadd_s_riscv32 +#define helper_fnmadd_d helper_fnmadd_d_riscv32 +#define helper_fadd_s helper_fadd_s_riscv32 +#define helper_fsub_s helper_fsub_s_riscv32 +#define helper_fmul_s helper_fmul_s_riscv32 +#define helper_fdiv_s helper_fdiv_s_riscv32 +#define helper_fmin_s helper_fmin_s_riscv32 +#define helper_fmax_s helper_fmax_s_riscv32 +#define helper_fsqrt_s helper_fsqrt_s_riscv32 +#define helper_fle_s helper_fle_s_riscv32 +#define helper_flt_s helper_flt_s_riscv32 +#define helper_feq_s helper_feq_s_riscv32 +#define helper_fcvt_w_s helper_fcvt_w_s_riscv32 +#define helper_fcvt_wu_s helper_fcvt_wu_s_riscv32 +#define helper_fcvt_s_w helper_fcvt_s_w_riscv32 +#define helper_fcvt_s_wu helper_fcvt_s_wu_riscv32 +#define helper_fclass_s helper_fclass_s_riscv32 +#define helper_fadd_d helper_fadd_d_riscv32 +#define helper_fsub_d helper_fsub_d_riscv32 +#define helper_fmul_d helper_fmul_d_riscv32 +#define helper_fdiv_d helper_fdiv_d_riscv32 +#define helper_fmin_d helper_fmin_d_riscv32 +#define helper_fmax_d helper_fmax_d_riscv32 +#define helper_fcvt_s_d helper_fcvt_s_d_riscv32 +#define helper_fcvt_d_s helper_fcvt_d_s_riscv32 +#define helper_fsqrt_d helper_fsqrt_d_riscv32 +#define helper_fle_d helper_fle_d_riscv32 +#define helper_flt_d helper_flt_d_riscv32 +#define helper_feq_d helper_feq_d_riscv32 +#define helper_fcvt_w_d helper_fcvt_w_d_riscv32 +#define helper_fcvt_wu_d helper_fcvt_wu_d_riscv32 +#define helper_fcvt_d_w helper_fcvt_d_w_riscv32 +#define helper_fcvt_d_wu helper_fcvt_d_wu_riscv32 +#define helper_fclass_d helper_fclass_d_riscv32 +#define riscv_raise_exception riscv_raise_exception_riscv32 +#define helper_raise_exception helper_raise_exception_riscv32 +#define helper_uc_riscv_exit helper_uc_riscv_exit_riscv32 +#define helper_csrrw helper_csrrw_riscv32 +#define helper_csrrs helper_csrrs_riscv32 +#define helper_csrrc helper_csrrc_riscv32 +#define helper_sret helper_sret_riscv32 +#define helper_mret helper_mret_riscv32 +#define helper_wfi helper_wfi_riscv32 +#define helper_tlb_flush helper_tlb_flush_riscv32 +#define pmp_hart_has_privs pmp_hart_has_privs_riscv32 +#define pmpcfg_csr_write pmpcfg_csr_write_riscv32 +#define pmpcfg_csr_read pmpcfg_csr_read_riscv32 +#define pmpaddr_csr_write pmpaddr_csr_write_riscv32 +#define pmpaddr_csr_read pmpaddr_csr_read_riscv32 +#define gen_intermediate_code gen_intermediate_code_riscv32 +#define riscv_translate_init riscv_translate_init_riscv32 +#define restore_state_to_opc restore_state_to_opc_riscv32 +#define cpu_riscv_init cpu_riscv_init_riscv32 +#define riscv_reg_reset riscv_reg_reset_riscv32 +#define riscv_reg_read riscv_reg_read_riscv32 +#define riscv_reg_write riscv_reg_write_riscv32 +#define helper_fcvt_l_s helper_fcvt_l_s_riscv32 +#define helper_fcvt_lu_s helper_fcvt_lu_s_riscv32 +#define helper_fcvt_s_l helper_fcvt_s_l_riscv32 +#define helper_fcvt_s_lu helper_fcvt_s_lu_riscv32 +#define helper_fcvt_l_d helper_fcvt_l_d_riscv32 +#define helper_fcvt_lu_d helper_fcvt_lu_d_riscv32 +#define helper_fcvt_d_l helper_fcvt_d_l_riscv32 +#define helper_fcvt_d_lu helper_fcvt_d_lu_riscv32 +#define gen_helper_tlb_flush gen_helper_tlb_flush_riscv32 +#define riscv_fpr_regnames riscv_fpr_regnames_riscv32 +#define riscv_int_regnames riscv_int_regnames_riscv32 +#endif diff --git a/qemu/riscv64.h b/qemu/riscv64.h new file mode 100644 index 00000000..ada4239a --- /dev/null +++ b/qemu/riscv64.h @@ -0,0 +1,1373 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_riscv64_H +#define UNICORN_AUTOGEN_riscv64_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _riscv64 +#endif +#define arm_arch arm_arch_riscv64 +#define tb_target_set_jmp_target tb_target_set_jmp_target_riscv64 +#define have_bmi1 have_bmi1_riscv64 +#define have_popcnt have_popcnt_riscv64 +#define have_avx1 have_avx1_riscv64 +#define have_avx2 have_avx2_riscv64 +#define have_isa have_isa_riscv64 +#define have_altivec have_altivec_riscv64 +#define have_vsx have_vsx_riscv64 +#define flush_icache_range flush_icache_range_riscv64 +#define s390_facilities s390_facilities_riscv64 +#define tcg_dump_op tcg_dump_op_riscv64 +#define tcg_dump_ops tcg_dump_ops_riscv64 +#define tcg_gen_and_i64 tcg_gen_and_i64_riscv64 +#define tcg_gen_discard_i64 tcg_gen_discard_i64_riscv64 +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_riscv64 +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_riscv64 +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_riscv64 +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_riscv64 +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_riscv64 +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_riscv64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_riscv64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_riscv64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_riscv64 +#define tcg_gen_mul_i64 tcg_gen_mul_i64_riscv64 +#define tcg_gen_or_i64 tcg_gen_or_i64_riscv64 +#define tcg_gen_sar_i64 tcg_gen_sar_i64_riscv64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_riscv64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_riscv64 +#define tcg_gen_st_i64 tcg_gen_st_i64_riscv64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_riscv64 +#define cpu_icount_to_ns cpu_icount_to_ns_riscv64 +#define cpu_is_stopped cpu_is_stopped_riscv64 +#define cpu_get_ticks cpu_get_ticks_riscv64 +#define cpu_get_clock cpu_get_clock_riscv64 +#define cpu_resume cpu_resume_riscv64 +#define qemu_init_vcpu qemu_init_vcpu_riscv64 +#define cpu_stop_current cpu_stop_current_riscv64 +#define resume_all_vcpus resume_all_vcpus_riscv64 +#define vm_start vm_start_riscv64 +#define address_space_dispatch_compact address_space_dispatch_compact_riscv64 +#define flatview_translate flatview_translate_riscv64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_riscv64 +#define qemu_get_cpu qemu_get_cpu_riscv64 +#define cpu_address_space_init cpu_address_space_init_riscv64 +#define cpu_get_address_space cpu_get_address_space_riscv64 +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_riscv64 +#define cpu_exec_initfn cpu_exec_initfn_riscv64 +#define cpu_exec_realizefn cpu_exec_realizefn_riscv64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_riscv64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_riscv64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_riscv64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_riscv64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_riscv64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_riscv64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_riscv64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_riscv64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_riscv64 +#define cpu_abort cpu_abort_riscv64 +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_riscv64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_riscv64 +#define flatview_add_to_dispatch flatview_add_to_dispatch_riscv64 +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_riscv64 +#define qemu_ram_get_offset qemu_ram_get_offset_riscv64 +#define qemu_ram_get_used_length qemu_ram_get_used_length_riscv64 +#define qemu_ram_is_shared qemu_ram_is_shared_riscv64 +#define qemu_ram_pagesize qemu_ram_pagesize_riscv64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_riscv64 +#define qemu_ram_alloc qemu_ram_alloc_riscv64 +#define qemu_ram_free qemu_ram_free_riscv64 +#define qemu_map_ram_ptr qemu_map_ram_ptr_riscv64 +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_riscv64 +#define qemu_ram_block_from_host qemu_ram_block_from_host_riscv64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_riscv64 +#define cpu_check_watchpoint cpu_check_watchpoint_riscv64 +#define iotlb_to_section iotlb_to_section_riscv64 +#define address_space_dispatch_new address_space_dispatch_new_riscv64 +#define address_space_dispatch_free address_space_dispatch_free_riscv64 +#define flatview_read_continue flatview_read_continue_riscv64 +#define address_space_read_full address_space_read_full_riscv64 +#define address_space_write address_space_write_riscv64 +#define address_space_rw address_space_rw_riscv64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_riscv64 +#define address_space_write_rom address_space_write_rom_riscv64 +#define cpu_flush_icache_range cpu_flush_icache_range_riscv64 +#define cpu_exec_init_all cpu_exec_init_all_riscv64 +#define address_space_access_valid address_space_access_valid_riscv64 +#define address_space_map address_space_map_riscv64 +#define address_space_unmap address_space_unmap_riscv64 +#define cpu_physical_memory_map cpu_physical_memory_map_riscv64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_riscv64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_riscv64 +#define qemu_target_page_size qemu_target_page_size_riscv64 +#define qemu_target_page_bits qemu_target_page_bits_riscv64 +#define qemu_target_page_bits_min qemu_target_page_bits_min_riscv64 +#define target_words_bigendian target_words_bigendian_riscv64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_riscv64 +#define ram_block_discard_range ram_block_discard_range_riscv64 +#define ramblock_is_pmem ramblock_is_pmem_riscv64 +#define page_size_init page_size_init_riscv64 +#define set_preferred_target_page_bits set_preferred_target_page_bits_riscv64 +#define finalize_target_page_bits finalize_target_page_bits_riscv64 +#define cpu_outb cpu_outb_riscv64 +#define cpu_outw cpu_outw_riscv64 +#define cpu_outl cpu_outl_riscv64 +#define cpu_inb cpu_inb_riscv64 +#define cpu_inw cpu_inw_riscv64 +#define cpu_inl cpu_inl_riscv64 +#define memory_map memory_map_riscv64 +#define memory_map_io memory_map_io_riscv64 +#define memory_map_ptr memory_map_ptr_riscv64 +#define memory_unmap memory_unmap_riscv64 +#define memory_free memory_free_riscv64 +#define flatview_unref flatview_unref_riscv64 +#define address_space_get_flatview address_space_get_flatview_riscv64 +#define memory_region_transaction_begin memory_region_transaction_begin_riscv64 +#define memory_region_transaction_commit memory_region_transaction_commit_riscv64 +#define memory_region_init memory_region_init_riscv64 +#define memory_region_access_valid memory_region_access_valid_riscv64 +#define memory_region_dispatch_read memory_region_dispatch_read_riscv64 +#define memory_region_dispatch_write memory_region_dispatch_write_riscv64 +#define memory_region_init_io memory_region_init_io_riscv64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_riscv64 +#define memory_region_size memory_region_size_riscv64 +#define memory_region_set_readonly memory_region_set_readonly_riscv64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_riscv64 +#define memory_region_from_host memory_region_from_host_riscv64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_riscv64 +#define memory_region_add_subregion memory_region_add_subregion_riscv64 +#define memory_region_del_subregion memory_region_del_subregion_riscv64 +#define memory_region_find memory_region_find_riscv64 +#define memory_listener_register memory_listener_register_riscv64 +#define memory_listener_unregister memory_listener_unregister_riscv64 +#define address_space_remove_listeners address_space_remove_listeners_riscv64 +#define address_space_init address_space_init_riscv64 +#define address_space_destroy address_space_destroy_riscv64 +#define memory_region_init_ram memory_region_init_ram_riscv64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_riscv64 +#define exec_inline_op exec_inline_op_riscv64 +#define floatx80_default_nan floatx80_default_nan_riscv64 +#define float_raise float_raise_riscv64 +#define float16_is_quiet_nan float16_is_quiet_nan_riscv64 +#define float16_is_signaling_nan float16_is_signaling_nan_riscv64 +#define float32_is_quiet_nan float32_is_quiet_nan_riscv64 +#define float32_is_signaling_nan float32_is_signaling_nan_riscv64 +#define float64_is_quiet_nan float64_is_quiet_nan_riscv64 +#define float64_is_signaling_nan float64_is_signaling_nan_riscv64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_riscv64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_riscv64 +#define floatx80_silence_nan floatx80_silence_nan_riscv64 +#define propagateFloatx80NaN propagateFloatx80NaN_riscv64 +#define float128_is_quiet_nan float128_is_quiet_nan_riscv64 +#define float128_is_signaling_nan float128_is_signaling_nan_riscv64 +#define float128_silence_nan float128_silence_nan_riscv64 +#define float16_add float16_add_riscv64 +#define float16_sub float16_sub_riscv64 +#define float32_add float32_add_riscv64 +#define float32_sub float32_sub_riscv64 +#define float64_add float64_add_riscv64 +#define float64_sub float64_sub_riscv64 +#define float16_mul float16_mul_riscv64 +#define float32_mul float32_mul_riscv64 +#define float64_mul float64_mul_riscv64 +#define float16_muladd float16_muladd_riscv64 +#define float32_muladd float32_muladd_riscv64 +#define float64_muladd float64_muladd_riscv64 +#define float16_div float16_div_riscv64 +#define float32_div float32_div_riscv64 +#define float64_div float64_div_riscv64 +#define float16_to_float32 float16_to_float32_riscv64 +#define float16_to_float64 float16_to_float64_riscv64 +#define float32_to_float16 float32_to_float16_riscv64 +#define float32_to_float64 float32_to_float64_riscv64 +#define float64_to_float16 float64_to_float16_riscv64 +#define float64_to_float32 float64_to_float32_riscv64 +#define float16_round_to_int float16_round_to_int_riscv64 +#define float32_round_to_int float32_round_to_int_riscv64 +#define float64_round_to_int float64_round_to_int_riscv64 +#define float16_to_int16_scalbn float16_to_int16_scalbn_riscv64 +#define float16_to_int32_scalbn float16_to_int32_scalbn_riscv64 +#define float16_to_int64_scalbn float16_to_int64_scalbn_riscv64 +#define float32_to_int16_scalbn float32_to_int16_scalbn_riscv64 +#define float32_to_int32_scalbn float32_to_int32_scalbn_riscv64 +#define float32_to_int64_scalbn float32_to_int64_scalbn_riscv64 +#define float64_to_int16_scalbn float64_to_int16_scalbn_riscv64 +#define float64_to_int32_scalbn float64_to_int32_scalbn_riscv64 +#define float64_to_int64_scalbn float64_to_int64_scalbn_riscv64 +#define float16_to_int16 float16_to_int16_riscv64 +#define float16_to_int32 float16_to_int32_riscv64 +#define float16_to_int64 float16_to_int64_riscv64 +#define float32_to_int16 float32_to_int16_riscv64 +#define float32_to_int32 float32_to_int32_riscv64 +#define float32_to_int64 float32_to_int64_riscv64 +#define float64_to_int16 float64_to_int16_riscv64 +#define float64_to_int32 float64_to_int32_riscv64 +#define float64_to_int64 float64_to_int64_riscv64 +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_riscv64 +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_riscv64 +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_riscv64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_riscv64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_riscv64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_riscv64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_riscv64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_riscv64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_riscv64 +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_riscv64 +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_riscv64 +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_riscv64 +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_riscv64 +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_riscv64 +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_riscv64 +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_riscv64 +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_riscv64 +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_riscv64 +#define float16_to_uint16 float16_to_uint16_riscv64 +#define float16_to_uint32 float16_to_uint32_riscv64 +#define float16_to_uint64 float16_to_uint64_riscv64 +#define float32_to_uint16 float32_to_uint16_riscv64 +#define float32_to_uint32 float32_to_uint32_riscv64 +#define float32_to_uint64 float32_to_uint64_riscv64 +#define float64_to_uint16 float64_to_uint16_riscv64 +#define float64_to_uint32 float64_to_uint32_riscv64 +#define float64_to_uint64 float64_to_uint64_riscv64 +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_riscv64 +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_riscv64 +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_riscv64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_riscv64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_riscv64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_riscv64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_riscv64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_riscv64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_riscv64 +#define int64_to_float16_scalbn int64_to_float16_scalbn_riscv64 +#define int32_to_float16_scalbn int32_to_float16_scalbn_riscv64 +#define int16_to_float16_scalbn int16_to_float16_scalbn_riscv64 +#define int64_to_float16 int64_to_float16_riscv64 +#define int32_to_float16 int32_to_float16_riscv64 +#define int16_to_float16 int16_to_float16_riscv64 +#define int64_to_float32_scalbn int64_to_float32_scalbn_riscv64 +#define int32_to_float32_scalbn int32_to_float32_scalbn_riscv64 +#define int16_to_float32_scalbn int16_to_float32_scalbn_riscv64 +#define int64_to_float32 int64_to_float32_riscv64 +#define int32_to_float32 int32_to_float32_riscv64 +#define int16_to_float32 int16_to_float32_riscv64 +#define int64_to_float64_scalbn int64_to_float64_scalbn_riscv64 +#define int32_to_float64_scalbn int32_to_float64_scalbn_riscv64 +#define int16_to_float64_scalbn int16_to_float64_scalbn_riscv64 +#define int64_to_float64 int64_to_float64_riscv64 +#define int32_to_float64 int32_to_float64_riscv64 +#define int16_to_float64 int16_to_float64_riscv64 +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_riscv64 +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_riscv64 +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_riscv64 +#define uint64_to_float16 uint64_to_float16_riscv64 +#define uint32_to_float16 uint32_to_float16_riscv64 +#define uint16_to_float16 uint16_to_float16_riscv64 +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_riscv64 +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_riscv64 +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_riscv64 +#define uint64_to_float32 uint64_to_float32_riscv64 +#define uint32_to_float32 uint32_to_float32_riscv64 +#define uint16_to_float32 uint16_to_float32_riscv64 +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_riscv64 +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_riscv64 +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_riscv64 +#define uint64_to_float64 uint64_to_float64_riscv64 +#define uint32_to_float64 uint32_to_float64_riscv64 +#define uint16_to_float64 uint16_to_float64_riscv64 +#define float16_min float16_min_riscv64 +#define float16_minnum float16_minnum_riscv64 +#define float16_minnummag float16_minnummag_riscv64 +#define float16_max float16_max_riscv64 +#define float16_maxnum float16_maxnum_riscv64 +#define float16_maxnummag float16_maxnummag_riscv64 +#define float32_min float32_min_riscv64 +#define float32_minnum float32_minnum_riscv64 +#define float32_minnummag float32_minnummag_riscv64 +#define float32_max float32_max_riscv64 +#define float32_maxnum float32_maxnum_riscv64 +#define float32_maxnummag float32_maxnummag_riscv64 +#define float64_min float64_min_riscv64 +#define float64_minnum float64_minnum_riscv64 +#define float64_minnummag float64_minnummag_riscv64 +#define float64_max float64_max_riscv64 +#define float64_maxnum float64_maxnum_riscv64 +#define float64_maxnummag float64_maxnummag_riscv64 +#define float16_compare float16_compare_riscv64 +#define float16_compare_quiet float16_compare_quiet_riscv64 +#define float32_compare float32_compare_riscv64 +#define float32_compare_quiet float32_compare_quiet_riscv64 +#define float64_compare float64_compare_riscv64 +#define float64_compare_quiet float64_compare_quiet_riscv64 +#define float16_scalbn float16_scalbn_riscv64 +#define float32_scalbn float32_scalbn_riscv64 +#define float64_scalbn float64_scalbn_riscv64 +#define float16_sqrt float16_sqrt_riscv64 +#define float32_sqrt float32_sqrt_riscv64 +#define float64_sqrt float64_sqrt_riscv64 +#define float16_default_nan float16_default_nan_riscv64 +#define float32_default_nan float32_default_nan_riscv64 +#define float64_default_nan float64_default_nan_riscv64 +#define float128_default_nan float128_default_nan_riscv64 +#define float16_silence_nan float16_silence_nan_riscv64 +#define float32_silence_nan float32_silence_nan_riscv64 +#define float64_silence_nan float64_silence_nan_riscv64 +#define float16_squash_input_denormal float16_squash_input_denormal_riscv64 +#define float32_squash_input_denormal float32_squash_input_denormal_riscv64 +#define float64_squash_input_denormal float64_squash_input_denormal_riscv64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_riscv64 +#define roundAndPackFloatx80 roundAndPackFloatx80_riscv64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_riscv64 +#define int32_to_floatx80 int32_to_floatx80_riscv64 +#define int32_to_float128 int32_to_float128_riscv64 +#define int64_to_floatx80 int64_to_floatx80_riscv64 +#define int64_to_float128 int64_to_float128_riscv64 +#define uint64_to_float128 uint64_to_float128_riscv64 +#define float32_to_floatx80 float32_to_floatx80_riscv64 +#define float32_to_float128 float32_to_float128_riscv64 +#define float32_rem float32_rem_riscv64 +#define float32_exp2 float32_exp2_riscv64 +#define float32_log2 float32_log2_riscv64 +#define float32_eq float32_eq_riscv64 +#define float32_le float32_le_riscv64 +#define float32_lt float32_lt_riscv64 +#define float32_unordered float32_unordered_riscv64 +#define float32_eq_quiet float32_eq_quiet_riscv64 +#define float32_le_quiet float32_le_quiet_riscv64 +#define float32_lt_quiet float32_lt_quiet_riscv64 +#define float32_unordered_quiet float32_unordered_quiet_riscv64 +#define float64_to_floatx80 float64_to_floatx80_riscv64 +#define float64_to_float128 float64_to_float128_riscv64 +#define float64_rem float64_rem_riscv64 +#define float64_log2 float64_log2_riscv64 +#define float64_eq float64_eq_riscv64 +#define float64_le float64_le_riscv64 +#define float64_lt float64_lt_riscv64 +#define float64_unordered float64_unordered_riscv64 +#define float64_eq_quiet float64_eq_quiet_riscv64 +#define float64_le_quiet float64_le_quiet_riscv64 +#define float64_lt_quiet float64_lt_quiet_riscv64 +#define float64_unordered_quiet float64_unordered_quiet_riscv64 +#define floatx80_to_int32 floatx80_to_int32_riscv64 +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_riscv64 +#define floatx80_to_int64 floatx80_to_int64_riscv64 +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_riscv64 +#define floatx80_to_float32 floatx80_to_float32_riscv64 +#define floatx80_to_float64 floatx80_to_float64_riscv64 +#define floatx80_to_float128 floatx80_to_float128_riscv64 +#define floatx80_round floatx80_round_riscv64 +#define floatx80_round_to_int floatx80_round_to_int_riscv64 +#define floatx80_add floatx80_add_riscv64 +#define floatx80_sub floatx80_sub_riscv64 +#define floatx80_mul floatx80_mul_riscv64 +#define floatx80_div floatx80_div_riscv64 +#define floatx80_rem floatx80_rem_riscv64 +#define floatx80_sqrt floatx80_sqrt_riscv64 +#define floatx80_eq floatx80_eq_riscv64 +#define floatx80_le floatx80_le_riscv64 +#define floatx80_lt floatx80_lt_riscv64 +#define floatx80_unordered floatx80_unordered_riscv64 +#define floatx80_eq_quiet floatx80_eq_quiet_riscv64 +#define floatx80_le_quiet floatx80_le_quiet_riscv64 +#define floatx80_lt_quiet floatx80_lt_quiet_riscv64 +#define floatx80_unordered_quiet floatx80_unordered_quiet_riscv64 +#define float128_to_int32 float128_to_int32_riscv64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_riscv64 +#define float128_to_int64 float128_to_int64_riscv64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_riscv64 +#define float128_to_uint64 float128_to_uint64_riscv64 +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_riscv64 +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_riscv64 +#define float128_to_uint32 float128_to_uint32_riscv64 +#define float128_to_float32 float128_to_float32_riscv64 +#define float128_to_float64 float128_to_float64_riscv64 +#define float128_to_floatx80 float128_to_floatx80_riscv64 +#define float128_round_to_int float128_round_to_int_riscv64 +#define float128_add float128_add_riscv64 +#define float128_sub float128_sub_riscv64 +#define float128_mul float128_mul_riscv64 +#define float128_div float128_div_riscv64 +#define float128_rem float128_rem_riscv64 +#define float128_sqrt float128_sqrt_riscv64 +#define float128_eq float128_eq_riscv64 +#define float128_le float128_le_riscv64 +#define float128_lt float128_lt_riscv64 +#define float128_unordered float128_unordered_riscv64 +#define float128_eq_quiet float128_eq_quiet_riscv64 +#define float128_le_quiet float128_le_quiet_riscv64 +#define float128_lt_quiet float128_lt_quiet_riscv64 +#define float128_unordered_quiet float128_unordered_quiet_riscv64 +#define floatx80_compare floatx80_compare_riscv64 +#define floatx80_compare_quiet floatx80_compare_quiet_riscv64 +#define float128_compare float128_compare_riscv64 +#define float128_compare_quiet float128_compare_quiet_riscv64 +#define floatx80_scalbn floatx80_scalbn_riscv64 +#define float128_scalbn float128_scalbn_riscv64 +#define softfloat_init softfloat_init_riscv64 +#define tcg_optimize tcg_optimize_riscv64 +#define gen_new_label gen_new_label_riscv64 +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_riscv64 +#define tcg_expand_vec_op tcg_expand_vec_op_riscv64 +#define tcg_register_jit tcg_register_jit_riscv64 +#define tcg_tb_insert tcg_tb_insert_riscv64 +#define tcg_tb_remove tcg_tb_remove_riscv64 +#define tcg_tb_lookup tcg_tb_lookup_riscv64 +#define tcg_tb_foreach tcg_tb_foreach_riscv64 +#define tcg_nb_tbs tcg_nb_tbs_riscv64 +#define tcg_region_reset_all tcg_region_reset_all_riscv64 +#define tcg_region_init tcg_region_init_riscv64 +#define tcg_code_size tcg_code_size_riscv64 +#define tcg_code_capacity tcg_code_capacity_riscv64 +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_riscv64 +#define tcg_malloc_internal tcg_malloc_internal_riscv64 +#define tcg_pool_reset tcg_pool_reset_riscv64 +#define tcg_context_init tcg_context_init_riscv64 +#define tcg_tb_alloc tcg_tb_alloc_riscv64 +#define tcg_prologue_init tcg_prologue_init_riscv64 +#define tcg_func_start tcg_func_start_riscv64 +#define tcg_set_frame tcg_set_frame_riscv64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_riscv64 +#define tcg_temp_new_internal tcg_temp_new_internal_riscv64 +#define tcg_temp_new_vec tcg_temp_new_vec_riscv64 +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_riscv64 +#define tcg_temp_free_internal tcg_temp_free_internal_riscv64 +#define tcg_const_i32 tcg_const_i32_riscv64 +#define tcg_const_i64 tcg_const_i64_riscv64 +#define tcg_const_local_i32 tcg_const_local_i32_riscv64 +#define tcg_const_local_i64 tcg_const_local_i64_riscv64 +#define tcg_op_supported tcg_op_supported_riscv64 +#define tcg_gen_callN tcg_gen_callN_riscv64 +#define tcg_op_remove tcg_op_remove_riscv64 +#define tcg_emit_op tcg_emit_op_riscv64 +#define tcg_op_insert_before tcg_op_insert_before_riscv64 +#define tcg_op_insert_after tcg_op_insert_after_riscv64 +#define tcg_cpu_exec_time tcg_cpu_exec_time_riscv64 +#define tcg_gen_code tcg_gen_code_riscv64 +#define tcg_gen_op1 tcg_gen_op1_riscv64 +#define tcg_gen_op2 tcg_gen_op2_riscv64 +#define tcg_gen_op3 tcg_gen_op3_riscv64 +#define tcg_gen_op4 tcg_gen_op4_riscv64 +#define tcg_gen_op5 tcg_gen_op5_riscv64 +#define tcg_gen_op6 tcg_gen_op6_riscv64 +#define tcg_gen_mb tcg_gen_mb_riscv64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_riscv64 +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_riscv64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_riscv64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_riscv64 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_riscv64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_riscv64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_riscv64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_riscv64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_riscv64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_riscv64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_riscv64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_riscv64 +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_riscv64 +#define tcg_gen_muli_i32 tcg_gen_muli_i32_riscv64 +#define tcg_gen_div_i32 tcg_gen_div_i32_riscv64 +#define tcg_gen_rem_i32 tcg_gen_rem_i32_riscv64 +#define tcg_gen_divu_i32 tcg_gen_divu_i32_riscv64 +#define tcg_gen_remu_i32 tcg_gen_remu_i32_riscv64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_riscv64 +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_riscv64 +#define tcg_gen_nand_i32 tcg_gen_nand_i32_riscv64 +#define tcg_gen_nor_i32 tcg_gen_nor_i32_riscv64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_riscv64 +#define tcg_gen_clz_i32 tcg_gen_clz_i32_riscv64 +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_riscv64 +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_riscv64 +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_riscv64 +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_riscv64 +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_riscv64 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_riscv64 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_riscv64 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_riscv64 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_riscv64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_riscv64 +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_riscv64 +#define tcg_gen_extract_i32 tcg_gen_extract_i32_riscv64 +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_riscv64 +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_riscv64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_riscv64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_riscv64 +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_riscv64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_riscv64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_riscv64 +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_riscv64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_riscv64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_riscv64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_riscv64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_riscv64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_riscv64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_riscv64 +#define tcg_gen_smin_i32 tcg_gen_smin_i32_riscv64 +#define tcg_gen_umin_i32 tcg_gen_umin_i32_riscv64 +#define tcg_gen_smax_i32 tcg_gen_smax_i32_riscv64 +#define tcg_gen_umax_i32 tcg_gen_umax_i32_riscv64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_riscv64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_riscv64 +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_riscv64 +#define tcg_gen_subi_i64 tcg_gen_subi_i64_riscv64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_riscv64 +#define tcg_gen_ori_i64 tcg_gen_ori_i64_riscv64 +#define tcg_gen_xori_i64 tcg_gen_xori_i64_riscv64 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_riscv64 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_riscv64 +#define tcg_gen_sari_i64 tcg_gen_sari_i64_riscv64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_riscv64 +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_riscv64 +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_riscv64 +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_riscv64 +#define tcg_gen_muli_i64 tcg_gen_muli_i64_riscv64 +#define tcg_gen_div_i64 tcg_gen_div_i64_riscv64 +#define tcg_gen_rem_i64 tcg_gen_rem_i64_riscv64 +#define tcg_gen_divu_i64 tcg_gen_divu_i64_riscv64 +#define tcg_gen_remu_i64 tcg_gen_remu_i64_riscv64 +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_riscv64 +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_riscv64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_riscv64 +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_riscv64 +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_riscv64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_riscv64 +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_riscv64 +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_riscv64 +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_riscv64 +#define tcg_gen_not_i64 tcg_gen_not_i64_riscv64 +#define tcg_gen_andc_i64 tcg_gen_andc_i64_riscv64 +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_riscv64 +#define tcg_gen_nand_i64 tcg_gen_nand_i64_riscv64 +#define tcg_gen_nor_i64 tcg_gen_nor_i64_riscv64 +#define tcg_gen_orc_i64 tcg_gen_orc_i64_riscv64 +#define tcg_gen_clz_i64 tcg_gen_clz_i64_riscv64 +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_riscv64 +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_riscv64 +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_riscv64 +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_riscv64 +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_riscv64 +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_riscv64 +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_riscv64 +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_riscv64 +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_riscv64 +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_riscv64 +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_riscv64 +#define tcg_gen_extract_i64 tcg_gen_extract_i64_riscv64 +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_riscv64 +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_riscv64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_riscv64 +#define tcg_gen_add2_i64 tcg_gen_add2_i64_riscv64 +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_riscv64 +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_riscv64 +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_riscv64 +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_riscv64 +#define tcg_gen_smin_i64 tcg_gen_smin_i64_riscv64 +#define tcg_gen_umin_i64 tcg_gen_umin_i64_riscv64 +#define tcg_gen_smax_i64 tcg_gen_smax_i64_riscv64 +#define tcg_gen_umax_i64 tcg_gen_umax_i64_riscv64 +#define tcg_gen_abs_i64 tcg_gen_abs_i64_riscv64 +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_riscv64 +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_riscv64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_riscv64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_riscv64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_riscv64 +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_riscv64 +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_riscv64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_riscv64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_riscv64 +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_riscv64 +#define check_exit_request check_exit_request_riscv64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_riscv64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_riscv64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_riscv64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_riscv64 +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_riscv64 +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_riscv64 +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_riscv64 +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_riscv64 +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_riscv64 +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_riscv64 +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_riscv64 +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_riscv64 +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_riscv64 +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_riscv64 +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_riscv64 +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_riscv64 +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_riscv64 +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_riscv64 +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_riscv64 +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_riscv64 +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_riscv64 +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_riscv64 +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_riscv64 +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_riscv64 +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_riscv64 +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_riscv64 +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_riscv64 +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_riscv64 +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_riscv64 +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_riscv64 +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_riscv64 +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_riscv64 +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_riscv64 +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_riscv64 +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_riscv64 +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_riscv64 +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_riscv64 +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_riscv64 +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_riscv64 +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_riscv64 +#define simd_desc simd_desc_riscv64 +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_riscv64 +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_riscv64 +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_riscv64 +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_riscv64 +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_riscv64 +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_riscv64 +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_riscv64 +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_riscv64 +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_riscv64 +#define tcg_gen_gvec_2 tcg_gen_gvec_2_riscv64 +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_riscv64 +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_riscv64 +#define tcg_gen_gvec_3 tcg_gen_gvec_3_riscv64 +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_riscv64 +#define tcg_gen_gvec_4 tcg_gen_gvec_4_riscv64 +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_riscv64 +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_riscv64 +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_riscv64 +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_riscv64 +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_riscv64 +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_riscv64 +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_riscv64 +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_riscv64 +#define tcg_gen_gvec_not tcg_gen_gvec_not_riscv64 +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_riscv64 +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_riscv64 +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_riscv64 +#define tcg_gen_gvec_add tcg_gen_gvec_add_riscv64 +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_riscv64 +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_riscv64 +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_riscv64 +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_riscv64 +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_riscv64 +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_riscv64 +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_riscv64 +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_riscv64 +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_riscv64 +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_riscv64 +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_riscv64 +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_riscv64 +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_riscv64 +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_riscv64 +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_riscv64 +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_riscv64 +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_riscv64 +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_riscv64 +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_riscv64 +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_riscv64 +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_riscv64 +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_riscv64 +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_riscv64 +#define tcg_gen_gvec_and tcg_gen_gvec_and_riscv64 +#define tcg_gen_gvec_or tcg_gen_gvec_or_riscv64 +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_riscv64 +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_riscv64 +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_riscv64 +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_riscv64 +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_riscv64 +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_riscv64 +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_riscv64 +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_riscv64 +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_riscv64 +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_riscv64 +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_riscv64 +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_riscv64 +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_riscv64 +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_riscv64 +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_riscv64 +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_riscv64 +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_riscv64 +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_riscv64 +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_riscv64 +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_riscv64 +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_riscv64 +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_riscv64 +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_riscv64 +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_riscv64 +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_riscv64 +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_riscv64 +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_riscv64 +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_riscv64 +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_riscv64 +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_riscv64 +#define vec_gen_2 vec_gen_2_riscv64 +#define vec_gen_3 vec_gen_3_riscv64 +#define vec_gen_4 vec_gen_4_riscv64 +#define tcg_gen_mov_vec tcg_gen_mov_vec_riscv64 +#define tcg_const_zeros_vec tcg_const_zeros_vec_riscv64 +#define tcg_const_ones_vec tcg_const_ones_vec_riscv64 +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_riscv64 +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_riscv64 +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_riscv64 +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_riscv64 +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_riscv64 +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_riscv64 +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_riscv64 +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_riscv64 +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_riscv64 +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_riscv64 +#define tcg_gen_ld_vec tcg_gen_ld_vec_riscv64 +#define tcg_gen_st_vec tcg_gen_st_vec_riscv64 +#define tcg_gen_stl_vec tcg_gen_stl_vec_riscv64 +#define tcg_gen_and_vec tcg_gen_and_vec_riscv64 +#define tcg_gen_or_vec tcg_gen_or_vec_riscv64 +#define tcg_gen_xor_vec tcg_gen_xor_vec_riscv64 +#define tcg_gen_andc_vec tcg_gen_andc_vec_riscv64 +#define tcg_gen_orc_vec tcg_gen_orc_vec_riscv64 +#define tcg_gen_nand_vec tcg_gen_nand_vec_riscv64 +#define tcg_gen_nor_vec tcg_gen_nor_vec_riscv64 +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_riscv64 +#define tcg_gen_not_vec tcg_gen_not_vec_riscv64 +#define tcg_gen_neg_vec tcg_gen_neg_vec_riscv64 +#define tcg_gen_abs_vec tcg_gen_abs_vec_riscv64 +#define tcg_gen_shli_vec tcg_gen_shli_vec_riscv64 +#define tcg_gen_shri_vec tcg_gen_shri_vec_riscv64 +#define tcg_gen_sari_vec tcg_gen_sari_vec_riscv64 +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_riscv64 +#define tcg_gen_add_vec tcg_gen_add_vec_riscv64 +#define tcg_gen_sub_vec tcg_gen_sub_vec_riscv64 +#define tcg_gen_mul_vec tcg_gen_mul_vec_riscv64 +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_riscv64 +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_riscv64 +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_riscv64 +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_riscv64 +#define tcg_gen_smin_vec tcg_gen_smin_vec_riscv64 +#define tcg_gen_umin_vec tcg_gen_umin_vec_riscv64 +#define tcg_gen_smax_vec tcg_gen_smax_vec_riscv64 +#define tcg_gen_umax_vec tcg_gen_umax_vec_riscv64 +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_riscv64 +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_riscv64 +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_riscv64 +#define tcg_gen_shls_vec tcg_gen_shls_vec_riscv64 +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_riscv64 +#define tcg_gen_sars_vec tcg_gen_sars_vec_riscv64 +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_riscv64 +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_riscv64 +#define tb_htable_lookup tb_htable_lookup_riscv64 +#define tb_set_jmp_target tb_set_jmp_target_riscv64 +#define cpu_exec cpu_exec_riscv64 +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_riscv64 +#define cpu_reloading_memory_map cpu_reloading_memory_map_riscv64 +#define cpu_loop_exit cpu_loop_exit_riscv64 +#define cpu_loop_exit_restore cpu_loop_exit_restore_riscv64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_riscv64 +#define tlb_init tlb_init_riscv64 +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_riscv64 +#define tlb_flush tlb_flush_riscv64 +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_riscv64 +#define tlb_flush_all_cpus tlb_flush_all_cpus_riscv64 +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_riscv64 +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_riscv64 +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_riscv64 +#define tlb_flush_page tlb_flush_page_riscv64 +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_riscv64 +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_riscv64 +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_riscv64 +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_riscv64 +#define tlb_protect_code tlb_protect_code_riscv64 +#define tlb_unprotect_code tlb_unprotect_code_riscv64 +#define tlb_reset_dirty tlb_reset_dirty_riscv64 +#define tlb_set_dirty tlb_set_dirty_riscv64 +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_riscv64 +#define tlb_set_page tlb_set_page_riscv64 +#define get_page_addr_code_hostp get_page_addr_code_hostp_riscv64 +#define get_page_addr_code get_page_addr_code_riscv64 +#define probe_access probe_access_riscv64 +#define tlb_vaddr_to_host tlb_vaddr_to_host_riscv64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_riscv64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_riscv64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_riscv64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_riscv64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_riscv64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_riscv64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_riscv64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_riscv64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_riscv64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_riscv64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_riscv64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_riscv64 +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_riscv64 +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_riscv64 +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_riscv64 +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_riscv64 +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_riscv64 +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_riscv64 +#define cpu_ldub_data_ra cpu_ldub_data_ra_riscv64 +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_riscv64 +#define cpu_lduw_data_ra cpu_lduw_data_ra_riscv64 +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_riscv64 +#define cpu_ldl_data_ra cpu_ldl_data_ra_riscv64 +#define cpu_ldq_data_ra cpu_ldq_data_ra_riscv64 +#define cpu_ldub_data cpu_ldub_data_riscv64 +#define cpu_ldsb_data cpu_ldsb_data_riscv64 +#define cpu_lduw_data cpu_lduw_data_riscv64 +#define cpu_ldsw_data cpu_ldsw_data_riscv64 +#define cpu_ldl_data cpu_ldl_data_riscv64 +#define cpu_ldq_data cpu_ldq_data_riscv64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_riscv64 +#define helper_le_stw_mmu helper_le_stw_mmu_riscv64 +#define helper_be_stw_mmu helper_be_stw_mmu_riscv64 +#define helper_le_stl_mmu helper_le_stl_mmu_riscv64 +#define helper_be_stl_mmu helper_be_stl_mmu_riscv64 +#define helper_le_stq_mmu helper_le_stq_mmu_riscv64 +#define helper_be_stq_mmu helper_be_stq_mmu_riscv64 +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_riscv64 +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_riscv64 +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_riscv64 +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_riscv64 +#define cpu_stb_data_ra cpu_stb_data_ra_riscv64 +#define cpu_stw_data_ra cpu_stw_data_ra_riscv64 +#define cpu_stl_data_ra cpu_stl_data_ra_riscv64 +#define cpu_stq_data_ra cpu_stq_data_ra_riscv64 +#define cpu_stb_data cpu_stb_data_riscv64 +#define cpu_stw_data cpu_stw_data_riscv64 +#define cpu_stl_data cpu_stl_data_riscv64 +#define cpu_stq_data cpu_stq_data_riscv64 +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_riscv64 +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_riscv64 +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_riscv64 +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_riscv64 +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_riscv64 +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_riscv64 +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_riscv64 +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_riscv64 +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_riscv64 +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_riscv64 +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_riscv64 +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_riscv64 +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_riscv64 +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_riscv64 +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_riscv64 +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_riscv64 +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_riscv64 +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_riscv64 +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_riscv64 +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_riscv64 +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_riscv64 +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_riscv64 +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_riscv64 +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_riscv64 +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_riscv64 +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_riscv64 +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_riscv64 +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_riscv64 +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_riscv64 +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_riscv64 +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_riscv64 +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_riscv64 +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_riscv64 +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_riscv64 +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_riscv64 +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_riscv64 +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_riscv64 +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_riscv64 +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_riscv64 +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_riscv64 +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_riscv64 +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_riscv64 +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_riscv64 +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_riscv64 +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_riscv64 +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_riscv64 +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_riscv64 +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_riscv64 +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_riscv64 +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_riscv64 +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_riscv64 +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_riscv64 +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_riscv64 +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_riscv64 +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_riscv64 +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_riscv64 +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_riscv64 +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_riscv64 +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_riscv64 +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_riscv64 +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_riscv64 +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_riscv64 +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_riscv64 +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_riscv64 +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_riscv64 +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_riscv64 +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_riscv64 +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_riscv64 +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_riscv64 +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_riscv64 +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_riscv64 +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_riscv64 +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_riscv64 +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_riscv64 +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_riscv64 +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_riscv64 +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_riscv64 +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_riscv64 +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_riscv64 +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_riscv64 +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_riscv64 +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_riscv64 +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_riscv64 +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_riscv64 +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_riscv64 +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_riscv64 +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_riscv64 +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_riscv64 +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_riscv64 +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_riscv64 +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_riscv64 +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_riscv64 +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_riscv64 +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_riscv64 +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_riscv64 +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_riscv64 +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_riscv64 +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_riscv64 +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_riscv64 +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_riscv64 +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_riscv64 +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_riscv64 +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_riscv64 +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_riscv64 +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_riscv64 +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_riscv64 +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_riscv64 +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_riscv64 +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_riscv64 +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_riscv64 +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_riscv64 +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_riscv64 +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_riscv64 +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_riscv64 +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_riscv64 +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_riscv64 +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_riscv64 +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_riscv64 +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_riscv64 +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_riscv64 +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_riscv64 +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_riscv64 +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_riscv64 +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_riscv64 +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_riscv64 +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_riscv64 +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_riscv64 +#define helper_atomic_xchgb helper_atomic_xchgb_riscv64 +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_riscv64 +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_riscv64 +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_riscv64 +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_riscv64 +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_riscv64 +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_riscv64 +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_riscv64 +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_riscv64 +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_riscv64 +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_riscv64 +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_riscv64 +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_riscv64 +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_riscv64 +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_riscv64 +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_riscv64 +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_riscv64 +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_riscv64 +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_riscv64 +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_riscv64 +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_riscv64 +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_riscv64 +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_riscv64 +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_riscv64 +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_riscv64 +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_riscv64 +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_riscv64 +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_riscv64 +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_riscv64 +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_riscv64 +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_riscv64 +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_riscv64 +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_riscv64 +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_riscv64 +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_riscv64 +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_riscv64 +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_riscv64 +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_riscv64 +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_riscv64 +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_riscv64 +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_riscv64 +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_riscv64 +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_riscv64 +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_riscv64 +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_riscv64 +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_riscv64 +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_riscv64 +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_riscv64 +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_riscv64 +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_riscv64 +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_riscv64 +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_riscv64 +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_riscv64 +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_riscv64 +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_riscv64 +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_riscv64 +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_riscv64 +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_riscv64 +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_riscv64 +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_riscv64 +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_riscv64 +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_riscv64 +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_riscv64 +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_riscv64 +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_riscv64 +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_riscv64 +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_riscv64 +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_riscv64 +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_riscv64 +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_riscv64 +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_riscv64 +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_riscv64 +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_riscv64 +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_riscv64 +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_riscv64 +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_riscv64 +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_riscv64 +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_riscv64 +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_riscv64 +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_riscv64 +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_riscv64 +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_riscv64 +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_riscv64 +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_riscv64 +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_riscv64 +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_riscv64 +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_riscv64 +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_riscv64 +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_riscv64 +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_riscv64 +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_riscv64 +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_riscv64 +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_riscv64 +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_riscv64 +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_riscv64 +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_riscv64 +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_riscv64 +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_riscv64 +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_riscv64 +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_riscv64 +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_riscv64 +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_riscv64 +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_riscv64 +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_riscv64 +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_riscv64 +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_riscv64 +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_riscv64 +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_riscv64 +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_riscv64 +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_riscv64 +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_riscv64 +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_riscv64 +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_riscv64 +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_riscv64 +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_riscv64 +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_riscv64 +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_riscv64 +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_riscv64 +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_riscv64 +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_riscv64 +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_riscv64 +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_riscv64 +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_riscv64 +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_riscv64 +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_riscv64 +#define cpu_ldub_code cpu_ldub_code_riscv64 +#define cpu_lduw_code cpu_lduw_code_riscv64 +#define cpu_ldl_code cpu_ldl_code_riscv64 +#define cpu_ldq_code cpu_ldq_code_riscv64 +#define helper_div_i32 helper_div_i32_riscv64 +#define helper_rem_i32 helper_rem_i32_riscv64 +#define helper_divu_i32 helper_divu_i32_riscv64 +#define helper_remu_i32 helper_remu_i32_riscv64 +#define helper_shl_i64 helper_shl_i64_riscv64 +#define helper_shr_i64 helper_shr_i64_riscv64 +#define helper_sar_i64 helper_sar_i64_riscv64 +#define helper_div_i64 helper_div_i64_riscv64 +#define helper_rem_i64 helper_rem_i64_riscv64 +#define helper_divu_i64 helper_divu_i64_riscv64 +#define helper_remu_i64 helper_remu_i64_riscv64 +#define helper_muluh_i64 helper_muluh_i64_riscv64 +#define helper_mulsh_i64 helper_mulsh_i64_riscv64 +#define helper_clz_i32 helper_clz_i32_riscv64 +#define helper_ctz_i32 helper_ctz_i32_riscv64 +#define helper_clz_i64 helper_clz_i64_riscv64 +#define helper_ctz_i64 helper_ctz_i64_riscv64 +#define helper_clrsb_i32 helper_clrsb_i32_riscv64 +#define helper_clrsb_i64 helper_clrsb_i64_riscv64 +#define helper_ctpop_i32 helper_ctpop_i32_riscv64 +#define helper_ctpop_i64 helper_ctpop_i64_riscv64 +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_riscv64 +#define helper_exit_atomic helper_exit_atomic_riscv64 +#define helper_gvec_add8 helper_gvec_add8_riscv64 +#define helper_gvec_add16 helper_gvec_add16_riscv64 +#define helper_gvec_add32 helper_gvec_add32_riscv64 +#define helper_gvec_add64 helper_gvec_add64_riscv64 +#define helper_gvec_adds8 helper_gvec_adds8_riscv64 +#define helper_gvec_adds16 helper_gvec_adds16_riscv64 +#define helper_gvec_adds32 helper_gvec_adds32_riscv64 +#define helper_gvec_adds64 helper_gvec_adds64_riscv64 +#define helper_gvec_sub8 helper_gvec_sub8_riscv64 +#define helper_gvec_sub16 helper_gvec_sub16_riscv64 +#define helper_gvec_sub32 helper_gvec_sub32_riscv64 +#define helper_gvec_sub64 helper_gvec_sub64_riscv64 +#define helper_gvec_subs8 helper_gvec_subs8_riscv64 +#define helper_gvec_subs16 helper_gvec_subs16_riscv64 +#define helper_gvec_subs32 helper_gvec_subs32_riscv64 +#define helper_gvec_subs64 helper_gvec_subs64_riscv64 +#define helper_gvec_mul8 helper_gvec_mul8_riscv64 +#define helper_gvec_mul16 helper_gvec_mul16_riscv64 +#define helper_gvec_mul32 helper_gvec_mul32_riscv64 +#define helper_gvec_mul64 helper_gvec_mul64_riscv64 +#define helper_gvec_muls8 helper_gvec_muls8_riscv64 +#define helper_gvec_muls16 helper_gvec_muls16_riscv64 +#define helper_gvec_muls32 helper_gvec_muls32_riscv64 +#define helper_gvec_muls64 helper_gvec_muls64_riscv64 +#define helper_gvec_neg8 helper_gvec_neg8_riscv64 +#define helper_gvec_neg16 helper_gvec_neg16_riscv64 +#define helper_gvec_neg32 helper_gvec_neg32_riscv64 +#define helper_gvec_neg64 helper_gvec_neg64_riscv64 +#define helper_gvec_abs8 helper_gvec_abs8_riscv64 +#define helper_gvec_abs16 helper_gvec_abs16_riscv64 +#define helper_gvec_abs32 helper_gvec_abs32_riscv64 +#define helper_gvec_abs64 helper_gvec_abs64_riscv64 +#define helper_gvec_mov helper_gvec_mov_riscv64 +#define helper_gvec_dup64 helper_gvec_dup64_riscv64 +#define helper_gvec_dup32 helper_gvec_dup32_riscv64 +#define helper_gvec_dup16 helper_gvec_dup16_riscv64 +#define helper_gvec_dup8 helper_gvec_dup8_riscv64 +#define helper_gvec_not helper_gvec_not_riscv64 +#define helper_gvec_and helper_gvec_and_riscv64 +#define helper_gvec_or helper_gvec_or_riscv64 +#define helper_gvec_xor helper_gvec_xor_riscv64 +#define helper_gvec_andc helper_gvec_andc_riscv64 +#define helper_gvec_orc helper_gvec_orc_riscv64 +#define helper_gvec_nand helper_gvec_nand_riscv64 +#define helper_gvec_nor helper_gvec_nor_riscv64 +#define helper_gvec_eqv helper_gvec_eqv_riscv64 +#define helper_gvec_ands helper_gvec_ands_riscv64 +#define helper_gvec_xors helper_gvec_xors_riscv64 +#define helper_gvec_ors helper_gvec_ors_riscv64 +#define helper_gvec_shl8i helper_gvec_shl8i_riscv64 +#define helper_gvec_shl16i helper_gvec_shl16i_riscv64 +#define helper_gvec_shl32i helper_gvec_shl32i_riscv64 +#define helper_gvec_shl64i helper_gvec_shl64i_riscv64 +#define helper_gvec_shr8i helper_gvec_shr8i_riscv64 +#define helper_gvec_shr16i helper_gvec_shr16i_riscv64 +#define helper_gvec_shr32i helper_gvec_shr32i_riscv64 +#define helper_gvec_shr64i helper_gvec_shr64i_riscv64 +#define helper_gvec_sar8i helper_gvec_sar8i_riscv64 +#define helper_gvec_sar16i helper_gvec_sar16i_riscv64 +#define helper_gvec_sar32i helper_gvec_sar32i_riscv64 +#define helper_gvec_sar64i helper_gvec_sar64i_riscv64 +#define helper_gvec_shl8v helper_gvec_shl8v_riscv64 +#define helper_gvec_shl16v helper_gvec_shl16v_riscv64 +#define helper_gvec_shl32v helper_gvec_shl32v_riscv64 +#define helper_gvec_shl64v helper_gvec_shl64v_riscv64 +#define helper_gvec_shr8v helper_gvec_shr8v_riscv64 +#define helper_gvec_shr16v helper_gvec_shr16v_riscv64 +#define helper_gvec_shr32v helper_gvec_shr32v_riscv64 +#define helper_gvec_shr64v helper_gvec_shr64v_riscv64 +#define helper_gvec_sar8v helper_gvec_sar8v_riscv64 +#define helper_gvec_sar16v helper_gvec_sar16v_riscv64 +#define helper_gvec_sar32v helper_gvec_sar32v_riscv64 +#define helper_gvec_sar64v helper_gvec_sar64v_riscv64 +#define helper_gvec_eq8 helper_gvec_eq8_riscv64 +#define helper_gvec_ne8 helper_gvec_ne8_riscv64 +#define helper_gvec_lt8 helper_gvec_lt8_riscv64 +#define helper_gvec_le8 helper_gvec_le8_riscv64 +#define helper_gvec_ltu8 helper_gvec_ltu8_riscv64 +#define helper_gvec_leu8 helper_gvec_leu8_riscv64 +#define helper_gvec_eq16 helper_gvec_eq16_riscv64 +#define helper_gvec_ne16 helper_gvec_ne16_riscv64 +#define helper_gvec_lt16 helper_gvec_lt16_riscv64 +#define helper_gvec_le16 helper_gvec_le16_riscv64 +#define helper_gvec_ltu16 helper_gvec_ltu16_riscv64 +#define helper_gvec_leu16 helper_gvec_leu16_riscv64 +#define helper_gvec_eq32 helper_gvec_eq32_riscv64 +#define helper_gvec_ne32 helper_gvec_ne32_riscv64 +#define helper_gvec_lt32 helper_gvec_lt32_riscv64 +#define helper_gvec_le32 helper_gvec_le32_riscv64 +#define helper_gvec_ltu32 helper_gvec_ltu32_riscv64 +#define helper_gvec_leu32 helper_gvec_leu32_riscv64 +#define helper_gvec_eq64 helper_gvec_eq64_riscv64 +#define helper_gvec_ne64 helper_gvec_ne64_riscv64 +#define helper_gvec_lt64 helper_gvec_lt64_riscv64 +#define helper_gvec_le64 helper_gvec_le64_riscv64 +#define helper_gvec_ltu64 helper_gvec_ltu64_riscv64 +#define helper_gvec_leu64 helper_gvec_leu64_riscv64 +#define helper_gvec_ssadd8 helper_gvec_ssadd8_riscv64 +#define helper_gvec_ssadd16 helper_gvec_ssadd16_riscv64 +#define helper_gvec_ssadd32 helper_gvec_ssadd32_riscv64 +#define helper_gvec_ssadd64 helper_gvec_ssadd64_riscv64 +#define helper_gvec_sssub8 helper_gvec_sssub8_riscv64 +#define helper_gvec_sssub16 helper_gvec_sssub16_riscv64 +#define helper_gvec_sssub32 helper_gvec_sssub32_riscv64 +#define helper_gvec_sssub64 helper_gvec_sssub64_riscv64 +#define helper_gvec_usadd8 helper_gvec_usadd8_riscv64 +#define helper_gvec_usadd16 helper_gvec_usadd16_riscv64 +#define helper_gvec_usadd32 helper_gvec_usadd32_riscv64 +#define helper_gvec_usadd64 helper_gvec_usadd64_riscv64 +#define helper_gvec_ussub8 helper_gvec_ussub8_riscv64 +#define helper_gvec_ussub16 helper_gvec_ussub16_riscv64 +#define helper_gvec_ussub32 helper_gvec_ussub32_riscv64 +#define helper_gvec_ussub64 helper_gvec_ussub64_riscv64 +#define helper_gvec_smin8 helper_gvec_smin8_riscv64 +#define helper_gvec_smin16 helper_gvec_smin16_riscv64 +#define helper_gvec_smin32 helper_gvec_smin32_riscv64 +#define helper_gvec_smin64 helper_gvec_smin64_riscv64 +#define helper_gvec_smax8 helper_gvec_smax8_riscv64 +#define helper_gvec_smax16 helper_gvec_smax16_riscv64 +#define helper_gvec_smax32 helper_gvec_smax32_riscv64 +#define helper_gvec_smax64 helper_gvec_smax64_riscv64 +#define helper_gvec_umin8 helper_gvec_umin8_riscv64 +#define helper_gvec_umin16 helper_gvec_umin16_riscv64 +#define helper_gvec_umin32 helper_gvec_umin32_riscv64 +#define helper_gvec_umin64 helper_gvec_umin64_riscv64 +#define helper_gvec_umax8 helper_gvec_umax8_riscv64 +#define helper_gvec_umax16 helper_gvec_umax16_riscv64 +#define helper_gvec_umax32 helper_gvec_umax32_riscv64 +#define helper_gvec_umax64 helper_gvec_umax64_riscv64 +#define helper_gvec_bitsel helper_gvec_bitsel_riscv64 +#define cpu_restore_state cpu_restore_state_riscv64 +#define page_collection_lock page_collection_lock_riscv64 +#define page_collection_unlock page_collection_unlock_riscv64 +#define free_code_gen_buffer free_code_gen_buffer_riscv64 +#define tcg_exec_init tcg_exec_init_riscv64 +#define tb_cleanup tb_cleanup_riscv64 +#define tb_flush tb_flush_riscv64 +#define tb_phys_invalidate tb_phys_invalidate_riscv64 +#define tb_gen_code tb_gen_code_riscv64 +#define tb_exec_lock tb_exec_lock_riscv64 +#define tb_exec_unlock tb_exec_unlock_riscv64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_riscv64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_riscv64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_riscv64 +#define tb_check_watchpoint tb_check_watchpoint_riscv64 +#define cpu_io_recompile cpu_io_recompile_riscv64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_riscv64 +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_riscv64 +#define translator_loop_temp_check translator_loop_temp_check_riscv64 +#define translator_loop translator_loop_riscv64 +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_riscv64 +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_riscv64 +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_riscv64 +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_riscv64 +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_riscv64 +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_riscv64 +#define unassigned_mem_ops unassigned_mem_ops_riscv64 +#define floatx80_infinity floatx80_infinity_riscv64 +#define dup_const_func dup_const_func_riscv64 +#define gen_helper_raise_exception gen_helper_raise_exception_riscv64 +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_riscv64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_riscv64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_riscv64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_riscv64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_riscv64 +#define riscv_cpu_mmu_index riscv_cpu_mmu_index_riscv64 +#define riscv_cpu_exec_interrupt riscv_cpu_exec_interrupt_riscv64 +#define riscv_cpu_fp_enabled riscv_cpu_fp_enabled_riscv64 +#define riscv_cpu_swap_hypervisor_regs riscv_cpu_swap_hypervisor_regs_riscv64 +#define riscv_cpu_virt_enabled riscv_cpu_virt_enabled_riscv64 +#define riscv_cpu_set_virt_enabled riscv_cpu_set_virt_enabled_riscv64 +#define riscv_cpu_force_hs_excep_enabled riscv_cpu_force_hs_excep_enabled_riscv64 +#define riscv_cpu_set_force_hs_excep riscv_cpu_set_force_hs_excep_riscv64 +#define riscv_cpu_claim_interrupts riscv_cpu_claim_interrupts_riscv64 +#define riscv_cpu_update_mip riscv_cpu_update_mip_riscv64 +#define riscv_cpu_set_rdtime_fn riscv_cpu_set_rdtime_fn_riscv64 +#define riscv_cpu_set_mode riscv_cpu_set_mode_riscv64 +#define riscv_cpu_get_phys_page_debug riscv_cpu_get_phys_page_debug_riscv64 +#define riscv_cpu_do_transaction_failed riscv_cpu_do_transaction_failed_riscv64 +#define riscv_cpu_do_unaligned_access riscv_cpu_do_unaligned_access_riscv64 +#define riscv_cpu_tlb_fill riscv_cpu_tlb_fill_riscv64 +#define riscv_cpu_do_interrupt riscv_cpu_do_interrupt_riscv64 +#define riscv_get_csr_ops riscv_get_csr_ops_riscv64 +#define riscv_set_csr_ops riscv_set_csr_ops_riscv64 +#define riscv_csrrw riscv_csrrw_riscv64 +#define riscv_csrrw_debug riscv_csrrw_debug_riscv64 +#define riscv_cpu_get_fflags riscv_cpu_get_fflags_riscv64 +#define riscv_cpu_set_fflags riscv_cpu_set_fflags_riscv64 +#define helper_set_rounding_mode helper_set_rounding_mode_riscv64 +#define helper_fmadd_s helper_fmadd_s_riscv64 +#define helper_fmadd_d helper_fmadd_d_riscv64 +#define helper_fmsub_s helper_fmsub_s_riscv64 +#define helper_fmsub_d helper_fmsub_d_riscv64 +#define helper_fnmsub_s helper_fnmsub_s_riscv64 +#define helper_fnmsub_d helper_fnmsub_d_riscv64 +#define helper_fnmadd_s helper_fnmadd_s_riscv64 +#define helper_fnmadd_d helper_fnmadd_d_riscv64 +#define helper_fadd_s helper_fadd_s_riscv64 +#define helper_fsub_s helper_fsub_s_riscv64 +#define helper_fmul_s helper_fmul_s_riscv64 +#define helper_fdiv_s helper_fdiv_s_riscv64 +#define helper_fmin_s helper_fmin_s_riscv64 +#define helper_fmax_s helper_fmax_s_riscv64 +#define helper_fsqrt_s helper_fsqrt_s_riscv64 +#define helper_fle_s helper_fle_s_riscv64 +#define helper_flt_s helper_flt_s_riscv64 +#define helper_feq_s helper_feq_s_riscv64 +#define helper_fcvt_w_s helper_fcvt_w_s_riscv64 +#define helper_fcvt_wu_s helper_fcvt_wu_s_riscv64 +#define helper_fcvt_s_w helper_fcvt_s_w_riscv64 +#define helper_fcvt_s_wu helper_fcvt_s_wu_riscv64 +#define helper_fclass_s helper_fclass_s_riscv64 +#define helper_fadd_d helper_fadd_d_riscv64 +#define helper_fsub_d helper_fsub_d_riscv64 +#define helper_fmul_d helper_fmul_d_riscv64 +#define helper_fdiv_d helper_fdiv_d_riscv64 +#define helper_fmin_d helper_fmin_d_riscv64 +#define helper_fmax_d helper_fmax_d_riscv64 +#define helper_fcvt_s_d helper_fcvt_s_d_riscv64 +#define helper_fcvt_d_s helper_fcvt_d_s_riscv64 +#define helper_fsqrt_d helper_fsqrt_d_riscv64 +#define helper_fle_d helper_fle_d_riscv64 +#define helper_flt_d helper_flt_d_riscv64 +#define helper_feq_d helper_feq_d_riscv64 +#define helper_fcvt_w_d helper_fcvt_w_d_riscv64 +#define helper_fcvt_wu_d helper_fcvt_wu_d_riscv64 +#define helper_fcvt_d_w helper_fcvt_d_w_riscv64 +#define helper_fcvt_d_wu helper_fcvt_d_wu_riscv64 +#define helper_fclass_d helper_fclass_d_riscv64 +#define riscv_raise_exception riscv_raise_exception_riscv64 +#define helper_raise_exception helper_raise_exception_riscv64 +#define helper_uc_riscv_exit helper_uc_riscv_exit_riscv64 +#define helper_csrrw helper_csrrw_riscv64 +#define helper_csrrs helper_csrrs_riscv64 +#define helper_csrrc helper_csrrc_riscv64 +#define helper_sret helper_sret_riscv64 +#define helper_mret helper_mret_riscv64 +#define helper_wfi helper_wfi_riscv64 +#define helper_tlb_flush helper_tlb_flush_riscv64 +#define pmp_hart_has_privs pmp_hart_has_privs_riscv64 +#define pmpcfg_csr_write pmpcfg_csr_write_riscv64 +#define pmpcfg_csr_read pmpcfg_csr_read_riscv64 +#define pmpaddr_csr_write pmpaddr_csr_write_riscv64 +#define pmpaddr_csr_read pmpaddr_csr_read_riscv64 +#define gen_intermediate_code gen_intermediate_code_riscv64 +#define riscv_translate_init riscv_translate_init_riscv64 +#define restore_state_to_opc restore_state_to_opc_riscv64 +#define cpu_riscv_init cpu_riscv_init_riscv64 +#define riscv_reg_reset riscv_reg_reset_riscv64 +#define riscv_reg_read riscv_reg_read_riscv64 +#define riscv_reg_write riscv_reg_write_riscv64 +#define helper_fcvt_l_s helper_fcvt_l_s_riscv64 +#define helper_fcvt_lu_s helper_fcvt_lu_s_riscv64 +#define helper_fcvt_s_l helper_fcvt_s_l_riscv64 +#define helper_fcvt_s_lu helper_fcvt_s_lu_riscv64 +#define helper_fcvt_l_d helper_fcvt_l_d_riscv64 +#define helper_fcvt_lu_d helper_fcvt_lu_d_riscv64 +#define helper_fcvt_d_l helper_fcvt_d_l_riscv64 +#define helper_fcvt_d_lu helper_fcvt_d_lu_riscv64 +#define gen_helper_tlb_flush gen_helper_tlb_flush_riscv64 +#define riscv_fpr_regnames riscv_fpr_regnames_riscv64 +#define riscv_int_regnames riscv_int_regnames_riscv64 +#endif diff --git a/qemu/rules.mak b/qemu/rules.mak index d4144b69..694865b6 100644 --- a/qemu/rules.mak +++ b/qemu/rules.mak @@ -1,10 +1,19 @@ +# These are used when we want to do substitutions without confusing Make +NULL := +SPACE := $(NULL) # +COMMA := , + # Don't use implicit rules or variables # we have explicit rules for everything MAKEFLAGS += -rR # Files with this suffixes are final, don't try to generate them # using implicit rules +%/trace-events: +%.hx: +%.py: +%.objs: %.d: %.h: %.c: @@ -12,12 +21,18 @@ MAKEFLAGS += -rR %.cpp: %.m: %.mak: +clean-target: # Flags for dependency generation -QEMU_DGFLAGS += -MMD -MP -MT $@ -MF $(*D)/$(*F).d +QEMU_DGFLAGS += -MMD -MP -MT $@ -MF $(@D)/$(*F).d -# Same as -I$(SRC_PATH) -I., but for the nested source/object directories -QEMU_INCLUDES += -I$(/dev/null 2>&1 && echo OK), $2, $3) +cc-c-option = $(if $(shell $(CC) $1 $2 -c -o /dev/null -xc /dev/null \ + >/dev/null 2>&1 && echo OK), $2, $3) -VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc +VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc Kconfig% %.json.in set-vpath = $(if $1,$(foreach PATTERN,$(VPATH_SUFFIXES),$(eval vpath $(PATTERN) $1))) # install-prog list, dir @@ -138,7 +159,7 @@ endef # Looks in the PATH if the argument contains no slash, else only considers one # specific directory. Returns an # empty string if the program doesn't exist # there. -find-in-path = $(if $(find-string /, $1), \ +find-in-path = $(if $(findstring /, $1), \ $(wildcard $1), \ $(wildcard $(patsubst %, %/$1, $(subst :, ,$(PATH))))) @@ -178,8 +199,8 @@ TRACETOOL=$(PYTHON) $(SRC_PATH)/scripts/tracetool.py config-%.h: config-%.h-timestamp @cmp $< $@ >/dev/null 2>&1 || cp $< $@ -config-%.h-timestamp: config-%.mak - $(call quiet-command, sh $(SRC_PATH)/scripts/create_config < $< > $@, " GEN $(TARGET_DIR)config-$*.h") +config-%.h-timestamp: config-%.mak $(SRC_PATH)/scripts/create_config + $(call quiet-command, sh $(SRC_PATH)/scripts/create_config < $< > $@,"GEN","$(TARGET_DIR)config-$*.h") .PHONY: clean-timestamp clean-timestamp: @@ -192,15 +213,15 @@ clean: clean-timestamp # save-vars # Usage: $(call save-vars, vars) # Save each variable $v in $vars as save-vars-$v, save their object's -# variables, then clear $v. +# variables, then clear $v. saved-vars-$v contains the variables that +# where saved for the objects, in order to speedup load-vars. define save-vars $(foreach v,$1, $(eval save-vars-$v := $(value $v)) - $(foreach o,$($v), - $(foreach k,cflags libs objs, - $(if $($o-$k), - $(eval save-vars-$o-$k := $($o-$k)) - $(eval $o-$k := )))) + $(eval saved-vars-$v := $(foreach o,$($v), \ + $(if $($o-cflags), $o-cflags $(eval save-vars-$o-cflags := $($o-cflags))$(eval $o-cflags := )) \ + $(if $($o-libs), $o-libs $(eval save-vars-$o-libs := $($o-libs))$(eval $o-libs := )) \ + $(if $($o-objs), $o-objs $(eval save-vars-$o-objs := $($o-objs))$(eval $o-objs := )))) $(eval $v := )) endef @@ -213,12 +234,10 @@ define load-vars $(eval $2-new-value := $(value $2)) $(foreach v,$1, $(eval $v := $(value save-vars-$v)) - $(foreach o,$($v), - $(foreach k,cflags libs objs, - $(if $(save-vars-$o-$k), - $(eval $o-$k := $(save-vars-$o-$k)) - $(eval save-vars-$o-$k := )))) - $(eval save-vars-$v := )) + $(foreach o,$(saved-vars-$v), + $(eval $o := $(save-vars-$o)) $(eval save-vars-$o := )) + $(eval save-vars-$v := ) + $(eval saved-vars-$v := )) $(eval $2 := $(value $2) $($2-new-value)) endef @@ -308,7 +327,7 @@ endef # ../water/ice.mo-libs = -licemaker # ../water/ice.mo-objs = ../water/ice1.o ../water/ice2.o # -# Note that 'hot' didn't include 'season/' in the input, so 'summer.o' is not +# Note that 'hot' didn't include 'water/' in the input, so 'steam.o' is not # included. # define unnest-vars @@ -317,7 +336,17 @@ define unnest-vars $(if $1,$(call fix-paths,$1/,,$2)) # Descend and include every subdir Makefile.objs - $(foreach v, $2, $(call unnest-var-recursive,$1,$2,$v)) + $(foreach v, $2, + $(call unnest-var-recursive,$1,$2,$v) + # Pass the .mo-cflags and .mo-libs along to its member objects + $(foreach o, $(filter %.mo,$($v)), + $(foreach p,$($o-objs), + $(if $($o-cflags), $(eval $p-cflags += $($o-cflags))) + $(if $($o-libs), $(eval $p-libs += $($o-libs)))))) + + # For all %.mo objects that are directly added into -y, just expand them + $(foreach v,$(filter %-y,$2), + $(eval $v := $(foreach o,$($v),$(if $($o-objs),$($o-objs),$o)))) $(foreach v,$(filter %-m,$2), # All .o found in *-m variables are single object modules, create .mo @@ -332,6 +361,7 @@ define unnest-vars # For non-module build, add -m to -y $(if $(CONFIG_MODULES), $(foreach o,$($v), + $(eval $($o-objs): CFLAGS += $(DSO_OBJ_CFLAGS)) $(eval $o: $($o-objs))) $(eval $(patsubst %-m,%-y,$v) += $($v)) $(eval modules: $($v:%.mo=%$(DSOSUF))), @@ -344,18 +374,67 @@ define unnest-vars # according to .mo-objs. Report error if not set $(if $($o-objs), $(eval $(o:%.mo=%$(DSOSUF)): module-common.o $($o-objs)), - $(error $o added in $v but $o-objs is not set)) - # Pass the .mo-cflags and .mo-libs along to member objects - $(foreach p,$($o-objs), - $(if $($o-cflags), $(eval $p-cflags += $($o-cflags))) - $(if $($o-libs), $(eval $p-libs += $($o-libs))))) + $(error $o added in $v but $o-objs is not set))) $(shell mkdir -p ./ $(sort $(dir $($v)))) # Include all the .d files - $(eval -include $(addsuffix *.d, $(sort $(dir $($v))))) + $(eval -include $(patsubst %.o,%.d,$(patsubst %.mo,%.d,$($v)))) $(eval $v := $(filter-out %/,$($v)))) - - # For all %.mo objects that are directly added into -y, expand them to %.mo-objs - $(foreach v,$2, - $(eval $v := $(foreach o,$($v),$(if $($o-objs),$($o-objs),$o)))) - endef + +TEXI2MAN = $(call quiet-command, \ + perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $(TEXI2PODFLAGS) $< $@.pod && \ + $(POD2MAN) --section=$(subst .,,$(suffix $@)) --center=" " --release=" " $@.pod > $@, \ + "GEN","$@") + +%.1: + $(call TEXI2MAN) +%.7: + $(call TEXI2MAN) +%.8: + $(call TEXI2MAN) + +GEN_SUBST = $(call quiet-command, \ + sed -e "s!@libexecdir@!$(libexecdir)!g" < $< > $@, \ + "GEN","$@") + +%.json: %.json.in + $(call GEN_SUBST) + +# Support for building multiple output files by atomically executing +# a single rule which depends on several input files (so the rule +# will be executed exactly once, not once per output file, and +# not multiple times in parallel.) For more explanation see: +# https://www.cmcrossroads.com/article/atomic-rules-gnu-make + +# Given a space-separated list of filenames, create the name of +# a 'sentinel' file to use to indicate that they have been built. +# We use fixed text on the end to avoid accidentally triggering +# automatic pattern rules, and . on the start to make the file +# not show up in ls output. +sentinel = .$(subst $(SPACE),_,$(subst /,_,$1)).sentinel. + +# Define an atomic rule that builds multiple outputs from multiple inputs. +# To use: +# $(call atomic,out1 out2 ...,in1 in2 ...) +# rule to do the operation +# +# Make 4.3 will have native support for this, and you would be able +# to instead write: +# out1 out2 ... &: in1 in2 ... +# rule to do the operation +# +# The way this works is that it creates a make rule +# "out1 out2 ... : sentinel-file ; @:" which says that the sentinel +# depends on the dependencies, and the rule to do that is "do nothing". +# Then we have a rule +# "sentinel-file : in1 in2 ..." +# whose commands start with "touch sentinel-file" and then continue +# with the rule text provided by the user of this 'atomic' function. +# The foreach... is there to delete the sentinel file if any of the +# output files don't exist, so that we correctly rebuild in that situation. +atomic = $(eval $1: $(call sentinel,$1) ; @:) \ + $(call sentinel,$1) : $2 ; @touch $$@ \ + $(foreach t,$1,$(if $(wildcard $t),,$(shell rm -f $(call sentinel,$1)))) + +print-%: + @echo '$*=$($*)' diff --git a/qemu/scripts/create_config b/qemu/scripts/create_config index a2860021..6d8f08b3 100755 --- a/qemu/scripts/create_config +++ b/qemu/scripts/create_config @@ -5,10 +5,20 @@ echo "/* Automatically generated by create_config - do not modify */" while read line; do case $line in - qemu_*dir=*) # qemu-specific directory configuration + VERSION=*) # configuration + version=${line#*=} + major=$(echo "$version" | cut -d. -f1) + minor=$(echo "$version" | cut -d. -f2) + micro=$(echo "$version" | cut -d. -f3) + echo "#define QEMU_VERSION \"$version\"" + echo "#define QEMU_VERSION_MAJOR $major" + echo "#define QEMU_VERSION_MINOR $minor" + echo "#define QEMU_VERSION_MICRO $micro" + ;; + qemu_*dir=* | qemu_*path=*) # qemu-specific directory configuration name=${line%=*} value=${line#*=} - define_name=`echo $name | LC_ALL=C tr '[a-z]' '[A-Z]'` + define_name=$(echo $name | LC_ALL=C tr '[a-z]' '[A-Z]') eval "define_value=\"$value\"" echo "#define CONFIG_$define_name \"$define_value\"" # save for the next definitions @@ -18,20 +28,60 @@ case $line in # save for the next definitions prefix=${line#*=} ;; + IASL=*) # iasl executable + value=${line#*=} + echo "#define CONFIG_IASL $value" + ;; + CONFIG_AUDIO_DRIVERS=*) + drivers=${line#*=} + echo "#define CONFIG_AUDIO_DRIVERS \\" + for drv in $drivers; do + echo " \"${drv}\",\\" + done + echo "" + ;; + CONFIG_BDRV_RW_WHITELIST=*) + echo "#define CONFIG_BDRV_RW_WHITELIST\\" + for drv in ${line#*=}; do + echo " \"${drv}\",\\" + done + echo " NULL" + ;; + CONFIG_BDRV_RO_WHITELIST=*) + echo "#define CONFIG_BDRV_RO_WHITELIST\\" + for drv in ${line#*=}; do + echo " \"${drv}\",\\" + done + echo " NULL" + ;; CONFIG_*=y) # configuration name=${line%=*} echo "#define $name 1" ;; + CONFIG_*=n) # configuration + ;; CONFIG_*=*) # configuration name=${line%=*} value=${line#*=} echo "#define $name $value" ;; + HAVE_*=y) # configuration + name=${line%=*} + echo "#define $name 1" + ;; + HAVE_*=*) # configuration + name=${line%=*} + value=${line#*=} + echo "#define $name $value" + ;; ARCH=*) # configuration arch=${line#*=} - arch_name=`echo $arch | LC_ALL=C tr '[a-z]' '[A-Z]'` + arch_name=$(echo $arch | LC_ALL=C tr '[a-z]' '[A-Z]') echo "#define HOST_$arch_name 1" ;; + HOST_USB=*) + # do nothing + ;; HOST_CC=*) # do nothing ;; @@ -46,7 +96,7 @@ case $line in ;; TARGET_BASE_ARCH=*) # configuration target_base_arch=${line#*=} - base_arch_name=`echo $target_base_arch | LC_ALL=C tr '[a-z]' '[A-Z]'` + base_arch_name=$(echo $target_base_arch | LC_ALL=C tr '[a-z]' '[A-Z]') echo "#define TARGET_$base_arch_name 1" ;; TARGET_XML_FILES=*) @@ -71,6 +121,9 @@ case $line in value=${line#*=} echo "#define $name $value" ;; + DSOSUF=*) + echo "#define HOST_DSOSUF \"${line#*=}\"" + ;; esac done # read diff --git a/qemu/scripts/ordereddict.py b/qemu/scripts/ordereddict.py deleted file mode 100644 index f103954f..00000000 --- a/qemu/scripts/ordereddict.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) 2009 Raymond Hettinger -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation files -# (the "Software"), to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, merge, -# publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, -# subject to the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. - -try: - from UserDict import UserDict - from UserDict import DictMixin -except ImportError: - from collections import UserDict - try: - from collections import MutableMapping as DictMixin - except ImportError: - from collections.abc import MutableMapping as DictMixin - -class OrderedDict(dict, DictMixin): - - def __init__(self, *args, **kwds): - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = reversed(self).next() - else: - key = iter(self).next() - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other diff --git a/qemu/scripts/qapi-build.sh b/qemu/scripts/qapi-build.sh deleted file mode 100644 index 0cf55027..00000000 --- a/qemu/scripts/qapi-build.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh - -# Run this scripts to create qapi below files in root dir -# ../qapi-types.c -# ../qapi-types.h -# ../qapi-visit.c -# ../qapi-visit.h - -python qapi-types.py -h -o .. -b -i qapi-schema.json -python qapi-types.py -c -o .. -b -i qapi-schema.json - -python qapi-visit.py -h -o .. -b -i qapi-schema.json -python qapi-visit.py -c -o .. -b -i qapi-schema.json - diff --git a/qemu/scripts/qapi-schema.json b/qemu/scripts/qapi-schema.json deleted file mode 100644 index 37e53150..00000000 --- a/qemu/scripts/qapi-schema.json +++ /dev/null @@ -1,39 +0,0 @@ -# -*- Mode: Python -*- -# -# QAPI Schema - -# QAPI common definitions -{ 'include': 'qapi/common.json' } - -## -# @X86CPURegister32 -# -# A X86 32-bit register -# -# Since: 1.5 -## -{ 'enum': 'X86CPURegister32', - 'data': [ 'EAX', 'EBX', 'ECX', 'EDX', 'ESP', 'EBP', 'ESI', 'EDI' ] } - -## -# @X86CPUFeatureWordInfo -# -# Information about a X86 CPU feature word -# -# @cpuid-input-eax: Input EAX value for CPUID instruction for that feature word -# -# @cpuid-input-ecx: #optional Input ECX value for CPUID instruction for that -# feature word -# -# @cpuid-register: Output register containing the feature bits -# -# @features: value of output register, containing the feature bits -# -# Since: 1.5 -## -{ 'type': 'X86CPUFeatureWordInfo', - 'data': { 'cpuid-input-eax': 'int', - '*cpuid-input-ecx': 'int', - 'cpuid-register': 'X86CPURegister32', - 'features': 'int' } } - diff --git a/qemu/scripts/qapi-types.py b/qemu/scripts/qapi-types.py deleted file mode 100644 index 7b060858..00000000 --- a/qemu/scripts/qapi-types.py +++ /dev/null @@ -1,464 +0,0 @@ -# -# QAPI types generator -# -# Copyright IBM, Corp. 2011 -# -# Authors: -# Anthony Liguori -# -# This work is licensed under the terms of the GNU GPL, version 2. -# See the COPYING file in the top-level directory. - -from ordereddict import OrderedDict -from qapi import * -import sys -import os -import getopt -import errno - -def generate_fwd_struct(name, members, builtin_type=False): - if builtin_type: - return mcgen(''' - -typedef struct %(name)sList -{ - union { - %(type)s value; - uint64_t padding; - }; - struct %(name)sList *next; -} %(name)sList; -''', - type=c_type(name), - name=name) - - return mcgen(''' - -typedef struct %(name)s %(name)s; - -typedef struct %(name)sList -{ - union { - %(name)s *value; - uint64_t padding; - }; - struct %(name)sList *next; -} %(name)sList; -''', - name=name) - -def generate_fwd_enum_struct(name, members): - return mcgen(''' -typedef struct %(name)sList -{ - union { - %(name)s value; - uint64_t padding; - }; - struct %(name)sList *next; -} %(name)sList; -''', - name=name) - -def generate_struct_fields(members): - ret = '' - - for argname, argentry, optional, structured in parse_args(members): - if optional: - ret += mcgen(''' - bool has_%(c_name)s; -''', - c_name=c_var(argname)) - if structured: - push_indent() - ret += generate_struct({ "field": argname, "data": argentry}) - pop_indent() - else: - ret += mcgen(''' - %(c_type)s %(c_name)s; -''', - c_type=c_type(argentry), c_name=c_var(argname)) - - return ret - -def generate_struct(expr): - - structname = expr.get('type', "") - fieldname = expr.get('field', "") - members = expr['data'] - base = expr.get('base') - - ret = mcgen(''' -struct %(name)s -{ -''', - name=structname) - - if base: - ret += generate_struct_fields({'base': base}) - - ret += generate_struct_fields(members) - - if len(fieldname): - fieldname = " " + fieldname - ret += mcgen(''' -}%(field)s; -''', - field=fieldname) - - return ret - -def generate_enum_lookup(name, values): - ret = mcgen(''' -const char *%(name)s_lookup[] = { -''', - name=name) - i = 0 - for value in values: - ret += mcgen(''' - "%(value)s", -''', - value=value) - - ret += mcgen(''' - NULL, -}; - -''') - return ret - -def generate_enum(name, values): - lookup_decl = mcgen(''' -extern const char *%(name)s_lookup[]; -''', - name=name) - - enum_decl = mcgen(''' -typedef enum %(name)s -{ -''', - name=name) - - # append automatically generated _MAX value - enum_values = values + [ 'MAX' ] - - i = 0 - for value in enum_values: - enum_full_value = generate_enum_full_value(name, value) - enum_decl += mcgen(''' - %(enum_full_value)s = %(i)d, -''', - enum_full_value = enum_full_value, - i=i) - i += 1 - - enum_decl += mcgen(''' -} %(name)s; -''', - name=name) - - return lookup_decl + enum_decl - -def generate_anon_union_qtypes(expr): - - name = expr['union'] - members = expr['data'] - - ret = mcgen(''' -const int %(name)s_qtypes[QTYPE_MAX] = { -''', - name=name) - - for key in members: - qapi_type = members[key] - if qapi_type in builtin_type_qtypes: - qtype = builtin_type_qtypes[qapi_type] - elif find_struct(qapi_type): - qtype = "QTYPE_QDICT" - elif find_union(qapi_type): - qtype = "QTYPE_QDICT" - elif find_enum(qapi_type): - qtype = "QTYPE_QSTRING" - else: - assert False, "Invalid anonymous union member" - - ret += mcgen(''' - [ %(qtype)s ] = %(abbrev)s_KIND_%(enum)s, -''', - qtype = qtype, - abbrev = de_camel_case(name).upper(), - enum = c_fun(de_camel_case(key),False).upper()) - - ret += mcgen(''' -}; -''') - return ret - - -def generate_union(expr): - - name = expr['union'] - typeinfo = expr['data'] - - base = expr.get('base') - discriminator = expr.get('discriminator') - - enum_define = discriminator_find_enum_define(expr) - if enum_define: - discriminator_type_name = enum_define['enum_name'] - else: - discriminator_type_name = '%sKind' % (name) - - ret = mcgen(''' -struct %(name)s -{ - %(discriminator_type_name)s kind; - union { - void *data; -''', - name=name, - discriminator_type_name=discriminator_type_name) - - for key in typeinfo: - ret += mcgen(''' - %(c_type)s %(c_name)s; -''', - c_type=c_type(typeinfo[key]), - c_name=c_fun(key)) - - ret += mcgen(''' - }; -''') - - if base: - base_fields = find_struct(base)['data'] - if discriminator: - base_fields = base_fields.copy() - del base_fields[discriminator] - ret += generate_struct_fields(base_fields) - else: - assert not discriminator - - ret += mcgen(''' -}; -''') - if discriminator == {}: - ret += mcgen(''' -extern const int %(name)s_qtypes[]; -''', - name=name) - - - return ret - -def generate_type_cleanup_decl(name): - ret = mcgen(''' -void qapi_free_%(type)s(%(c_type)s obj); -''', - c_type=c_type(name),type=name) - return ret - -def generate_type_cleanup(name): - ret = mcgen(''' - -void qapi_free_%(type)s(%(c_type)s obj) -{ - QapiDeallocVisitor *md; - Visitor *v; - - if (!obj) { - return; - } - - md = qapi_dealloc_visitor_new(); - v = qapi_dealloc_get_visitor(md); - visit_type_%(type)s(v, &obj, NULL, NULL); - qapi_dealloc_visitor_cleanup(md); -} -''', - c_type=c_type(name),type=name) - return ret - - -try: - opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:i:o:", - ["source", "header", "builtins", - "prefix=", "input-file=", "output-dir="]) -except getopt.GetoptError as err: - print(str(err)) - sys.exit(1) - -output_dir = "" -input_file = "" -prefix = "" -c_file = 'qapi-types.c' -h_file = 'qapi-types.h' - -do_c = False -do_h = False -do_builtins = False - -for o, a in opts: - if o in ("-p", "--prefix"): - prefix = a - elif o in ("-i", "--input-file"): - input_file = a - elif o in ("-o", "--output-dir"): - output_dir = a + "/" - elif o in ("-c", "--source"): - do_c = True - elif o in ("-h", "--header"): - do_h = True - elif o in ("-b", "--builtins"): - do_builtins = True - -if not do_c and not do_h: - do_c = True - do_h = True - -c_file = output_dir + prefix + c_file -h_file = output_dir + prefix + h_file - -try: - os.makedirs(output_dir) -except os.error as e: - if e.errno != errno.EEXIST: - raise - -def maybe_open(really, name, opt): - if really: - return open(name, opt) - else: - try: - import StringIO - return StringIO.StringIO() - except ImportError: - from io import StringIO - return StringIO() - -fdef = maybe_open(do_c, c_file, 'w') -fdecl = maybe_open(do_h, h_file, 'w') - -fdef.write(mcgen(''' -/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * deallocation functions for schema-defined QAPI types - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * Michael Roth - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qapi/dealloc-visitor.h" -#include "%(prefix)sqapi-types.h" -#include "%(prefix)sqapi-visit.h" - -''', prefix=prefix)) - -fdecl.write(mcgen(''' -/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI types - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef %(guard)s -#define %(guard)s - -#include "unicorn/platform.h" - -''', - guard=guardname(h_file))) - -exprs = parse_schema(input_file) -exprs = filter(lambda expr: 'gen' not in expr, exprs) -exprs = list(exprs) - -fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL")) -for typename in builtin_types: - fdecl.write(generate_fwd_struct(typename, None, builtin_type=True)) -fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL")) - -for expr in exprs: - ret = "\n" - if 'type' in expr: - ret += generate_fwd_struct(expr['type'], expr['data']) - elif 'enum' in expr: - ret += generate_enum(expr['enum'], expr['data']) + "\n" - ret += generate_fwd_enum_struct(expr['enum'], expr['data']) - fdef.write(generate_enum_lookup(expr['enum'], expr['data'])) - elif 'union' in expr: - ret += generate_fwd_struct(expr['union'], expr['data']) + "\n" - enum_define = discriminator_find_enum_define(expr) - if not enum_define: - ret += generate_enum('%sKind' % expr['union'], expr['data'].keys()) - fdef.write(generate_enum_lookup('%sKind' % expr['union'], - expr['data'].keys())) - if expr.get('discriminator') == {}: - fdef.write(generate_anon_union_qtypes(expr)) - else: - continue - fdecl.write(ret) - -# to avoid header dependency hell, we always generate declarations -# for built-in types in our header files and simply guard them -fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) -for typename in builtin_types: - fdecl.write(generate_type_cleanup_decl(typename + "List")) -fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) - -# ...this doesn't work for cases where we link in multiple objects that -# have the functions defined, so we use -b option to provide control -# over these cases -if do_builtins: - fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) - for typename in builtin_types: - fdef.write(generate_type_cleanup(typename + "List")) - fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) - -for expr in exprs: - ret = "\n" - if 'type' in expr: - ret += generate_struct(expr) + "\n" - ret += generate_type_cleanup_decl(expr['type'] + "List") - fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n") - ret += generate_type_cleanup_decl(expr['type']) - fdef.write(generate_type_cleanup(expr['type']) + "\n") - elif 'union' in expr: - ret += generate_union(expr) - ret += generate_type_cleanup_decl(expr['union'] + "List") - fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n") - ret += generate_type_cleanup_decl(expr['union']) - fdef.write(generate_type_cleanup(expr['union']) + "\n") - elif 'enum' in expr: - ret += generate_type_cleanup_decl(expr['enum'] + "List") - fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n") - else: - continue - fdecl.write(ret) - -fdecl.write(''' -#endif -''') - -fdecl.flush() -fdecl.close() - -fdef.flush() -fdef.close() diff --git a/qemu/scripts/qapi-visit.py b/qemu/scripts/qapi-visit.py deleted file mode 100644 index beb5af51..00000000 --- a/qemu/scripts/qapi-visit.py +++ /dev/null @@ -1,597 +0,0 @@ -# -# QAPI visitor generator -# -# Copyright IBM, Corp. 2011 -# Copyright (C) 2014 Red Hat, Inc. -# -# Authors: -# Anthony Liguori -# Michael Roth -# Markus Armbruster -# -# This work is licensed under the terms of the GNU GPL, version 2. -# See the COPYING file in the top-level directory. - -from ordereddict import OrderedDict -from qapi import * -import re -import sys -import os -import getopt -import errno - -implicit_structs = [] - -def generate_visit_implicit_struct(type): - global implicit_structs - if type in implicit_structs: - return '' - implicit_structs.append(type) - return mcgen(''' - -static void visit_type_implicit_%(c_type)s(Visitor *m, %(c_type)s **obj, Error **errp) -{ - Error *err = NULL; - - visit_start_implicit_struct(m, (void **)obj, sizeof(%(c_type)s), &err); - if (!err) { - visit_type_%(c_type)s_fields(m, obj, errp); - visit_end_implicit_struct(m, &err); - } - error_propagate(errp, err); -} -''', - c_type=type_name(type)) - -def generate_visit_struct_fields(name, field_prefix, fn_prefix, members, base = None): - substructs = [] - ret = '' - if not fn_prefix: - full_name = name - else: - full_name = "%s_%s" % (name, fn_prefix) - - for argname, argentry, optional, structured in parse_args(members): - if structured: - if not fn_prefix: - nested_fn_prefix = argname - else: - nested_fn_prefix = "%s_%s" % (fn_prefix, argname) - - nested_field_prefix = "%s%s." % (field_prefix, argname) - ret += generate_visit_struct_fields(name, nested_field_prefix, - nested_fn_prefix, argentry) - ret += mcgen(''' - -static void visit_type_%(full_name)s_field_%(c_name)s(Visitor *m, %(name)s **obj, Error **errp) -{ -''', - name=name, full_name=full_name, c_name=c_var(argname)) - ret += generate_visit_struct_body(full_name, argname, argentry) - ret += mcgen(''' -} -''') - - if base: - ret += generate_visit_implicit_struct(base) - - ret += mcgen(''' - -static void visit_type_%(full_name)s_fields(Visitor *m, %(name)s **obj, Error **errp) -{ - Error *err = NULL; -''', - name=name, full_name=full_name) - push_indent() - - if base: - ret += mcgen(''' -visit_type_implicit_%(type)s(m, &(*obj)->%(c_prefix)s%(c_name)s, &err); -if (err) { - goto out; -} -''', - c_prefix=c_var(field_prefix), - type=type_name(base), c_name=c_var('base')) - - for argname, argentry, optional, structured in parse_args(members): - if optional: - ret += mcgen(''' -visit_optional(m, &(*obj)->%(c_prefix)shas_%(c_name)s, "%(name)s", &err); -if (!err && (*obj)->%(prefix)shas_%(c_name)s) { -''', - c_prefix=c_var(field_prefix), prefix=field_prefix, - c_name=c_var(argname), name=argname) - push_indent() - - if structured: - ret += mcgen(''' -visit_type_%(full_name)s_field_%(c_name)s(m, obj, &err); -''', - full_name=full_name, c_name=c_var(argname)) - else: - ret += mcgen(''' -visit_type_%(type)s(m, &(*obj)->%(c_prefix)s%(c_name)s, "%(name)s", &err); -''', - c_prefix=c_var(field_prefix), prefix=field_prefix, - type=type_name(argentry), c_name=c_var(argname), - name=argname) - - if optional: - pop_indent() - ret += mcgen(''' -} -''') - ret += mcgen(''' -if (err) { - goto out; -} -''') - - pop_indent() - if re.search('^ *goto out\\;', ret, re.MULTILINE): - ret += mcgen(''' - -out: -''') - ret += mcgen(''' - error_propagate(errp, err); -} -''') - return ret - - -def generate_visit_struct_body(field_prefix, name, members): - ret = mcgen(''' - Error *err = NULL; - -''') - - if not field_prefix: - full_name = name - else: - full_name = "%s_%s" % (field_prefix, name) - - if len(field_prefix): - ret += mcgen(''' - visit_start_struct(m, NULL, "", "%(name)s", 0, &err); -''', - name=name) - else: - ret += mcgen(''' - visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); -''', - name=name) - - ret += mcgen(''' - if (!err) { - if (*obj) { - visit_type_%(name)s_fields(m, obj, errp); - } - visit_end_struct(m, &err); - } - error_propagate(errp, err); -''', - name=full_name) - - return ret - -def generate_visit_struct(expr): - - name = expr['type'] - members = expr['data'] - base = expr.get('base') - - ret = generate_visit_struct_fields(name, "", "", members, base) - - ret += mcgen(''' - -void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp) -{ -''', - name=name) - - ret += generate_visit_struct_body("", name, members) - - ret += mcgen(''' -} -''') - return ret - -def generate_visit_list(name, members): - return mcgen(''' - -void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp) -{ - Error *err = NULL; - GenericList *i, **prev; - - visit_start_list(m, name, &err); - if (err) { - goto out; - } - - for (prev = (GenericList **)obj; - !err && (i = visit_next_list(m, prev, &err)) != NULL; - prev = &i) { - %(name)sList *native_i = (%(name)sList *)i; - visit_type_%(name)s(m, &native_i->value, NULL, &err); - } - - error_propagate(errp, err); - err = NULL; - visit_end_list(m, &err); -out: - error_propagate(errp, err); -} -''', - name=name) - -def generate_visit_enum(name, members): - return mcgen(''' - -void visit_type_%(name)s(Visitor *m, %(name)s *obj, const char *name, Error **errp) -{ - visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp); -} -''', - name=name) - -def generate_visit_anon_union(name, members): - ret = mcgen(''' - -void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp) -{ - Error *err = NULL; - - visit_start_implicit_struct(m, (void**) obj, sizeof(%(name)s), &err); - if (err) { - goto out; - } - visit_get_next_type(m, (int*) &(*obj)->kind, %(name)s_qtypes, name, &err); - if (err) { - goto out_end; - } - switch ((*obj)->kind) { -''', - name=name) - - # For anon union, always use the default enum type automatically generated - # as "'%sKind' % (name)" - disc_type = '%sKind' % (name) - - for key in members: - assert (members[key] in builtin_types - or find_struct(members[key]) - or find_union(members[key]) - or find_enum(members[key])), "Invalid anonymous union member" - - enum_full_value = generate_enum_full_value(disc_type, key) - ret += mcgen(''' - case %(enum_full_value)s: - visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, name, &err); - break; -''', - enum_full_value = enum_full_value, - c_type = type_name(members[key]), - c_name = c_fun(key)) - - ret += mcgen(''' - default: - abort(); - } -out_end: - error_propagate(errp, err); - err = NULL; - visit_end_implicit_struct(m, &err); -out: - error_propagate(errp, err); -} -''') - - return ret - - -def generate_visit_union(expr): - - name = expr['union'] - members = expr['data'] - - base = expr.get('base') - discriminator = expr.get('discriminator') - - if discriminator == {}: - assert not base - return generate_visit_anon_union(name, members) - - enum_define = discriminator_find_enum_define(expr) - if enum_define: - # Use the enum type as discriminator - ret = "" - disc_type = enum_define['enum_name'] - else: - # There will always be a discriminator in the C switch code, by default it - # is an enum type generated silently as "'%sKind' % (name)" - ret = generate_visit_enum('%sKind' % name, members.keys()) - disc_type = '%sKind' % (name) - - if base: - base_fields = find_struct(base)['data'] - if discriminator: - base_fields = base_fields.copy() - del base_fields[discriminator] - ret += generate_visit_struct_fields(name, "", "", base_fields) - - if discriminator: - for key in members: - ret += generate_visit_implicit_struct(members[key]) - - ret += mcgen(''' - -void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp) -{ - Error *err = NULL; - - visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); - if (err) { - goto out; - } - if (*obj) { -''', - name=name) - - if base: - ret += mcgen(''' - visit_type_%(name)s_fields(m, obj, &err); - if (err) { - goto out_obj; - } -''', - name=name) - - if not discriminator: - disc_key = "type" - else: - disc_key = discriminator - ret += mcgen(''' - visit_type_%(disc_type)s(m, &(*obj)->kind, "%(disc_key)s", &err); - if (err) { - goto out_obj; - } - if (!visit_start_union(m, !!(*obj)->data, &err) || err) { - goto out_obj; - } - switch ((*obj)->kind) { -''', - disc_type = disc_type, - disc_key = disc_key) - - for key in members: - if not discriminator: - fmt = 'visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", &err);' - else: - fmt = 'visit_type_implicit_%(c_type)s(m, &(*obj)->%(c_name)s, &err);' - - enum_full_value = generate_enum_full_value(disc_type, key) - ret += mcgen(''' - case %(enum_full_value)s: - ''' + fmt + ''' - break; -''', - enum_full_value = enum_full_value, - c_type=type_name(members[key]), - c_name=c_fun(key)) - - ret += mcgen(''' - default: - abort(); - } -out_obj: - error_propagate(errp, err); - err = NULL; - visit_end_union(m, !!(*obj)->data, &err); - error_propagate(errp, err); - err = NULL; - } - visit_end_struct(m, &err); -out: - error_propagate(errp, err); -} -''') - - return ret - -def generate_declaration(name, members, genlist=True, builtin_type=False): - ret = "" - if not builtin_type: - ret += mcgen(''' - -void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp); -''', - name=name) - - if genlist: - ret += mcgen(''' -void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp); -''', - name=name) - - return ret - -def generate_enum_declaration(name, members, genlist=True): - ret = "" - if genlist: - ret += mcgen(''' -void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp); -''', - name=name) - - return ret - -def generate_decl_enum(name, members, genlist=True): - return mcgen(''' - -void visit_type_%(name)s(Visitor *m, %(name)s *obj, const char *name, Error **errp); -''', - name=name) - -try: - opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:i:o:", - ["source", "header", "builtins", "prefix=", - "input-file=", "output-dir="]) -except getopt.GetoptError as err: - print(str(err)) - sys.exit(1) - -input_file = "" -output_dir = "" -prefix = "" -c_file = 'qapi-visit.c' -h_file = 'qapi-visit.h' - -do_c = False -do_h = False -do_builtins = False - -for o, a in opts: - if o in ("-p", "--prefix"): - prefix = a - elif o in ("-i", "--input-file"): - input_file = a - elif o in ("-o", "--output-dir"): - output_dir = a + "/" - elif o in ("-c", "--source"): - do_c = True - elif o in ("-h", "--header"): - do_h = True - elif o in ("-b", "--builtins"): - do_builtins = True - -if not do_c and not do_h: - do_c = True - do_h = True - -c_file = output_dir + prefix + c_file -h_file = output_dir + prefix + h_file - -try: - os.makedirs(output_dir) -except os.error as e: - if e.errno != errno.EEXIST: - raise - -def maybe_open(really, name, opt): - if really: - return open(name, opt) - else: - try: - import StringIO - return StringIO.StringIO() - except ImportError: - from io import StringIO - return StringIO() - -fdef = maybe_open(do_c, c_file, 'w') -fdecl = maybe_open(do_h, h_file, 'w') - -fdef.write(mcgen(''' -/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI visitor functions - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#include "qemu-common.h" -#include "%(header)s" -''', - header=basename(h_file))) - -fdecl.write(mcgen(''' -/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ - -/* - * schema-defined QAPI visitor functions - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef %(guard)s -#define %(guard)s - -#include "qapi/visitor.h" -#include "%(prefix)sqapi-types.h" - -''', - prefix=prefix, guard=guardname(h_file))) - -exprs = parse_schema(input_file) - -# to avoid header dependency hell, we always generate declarations -# for built-in types in our header files and simply guard them -fdecl.write(guardstart("QAPI_VISIT_BUILTIN_VISITOR_DECL")) -for typename in builtin_types: - fdecl.write(generate_declaration(typename, None, genlist=True, - builtin_type=True)) -fdecl.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DECL")) - -# ...this doesn't work for cases where we link in multiple objects that -# have the functions defined, so we use -b option to provide control -# over these cases -if do_builtins: - for typename in builtin_types: - fdef.write(generate_visit_list(typename, None)) - -for expr in exprs: - if 'type' in expr: - ret = generate_visit_struct(expr) - ret += generate_visit_list(expr['type'], expr['data']) - fdef.write(ret) - - ret = generate_declaration(expr['type'], expr['data']) - fdecl.write(ret) - elif 'union' in expr: - ret = generate_visit_union(expr) - ret += generate_visit_list(expr['union'], expr['data']) - fdef.write(ret) - - enum_define = discriminator_find_enum_define(expr) - ret = "" - if not enum_define: - ret = generate_decl_enum('%sKind' % expr['union'], - expr['data'].keys()) - ret += generate_declaration(expr['union'], expr['data']) - fdecl.write(ret) - elif 'enum' in expr: - ret = generate_visit_list(expr['enum'], expr['data']) - ret += generate_visit_enum(expr['enum'], expr['data']) - fdef.write(ret) - - ret = generate_decl_enum(expr['enum'], expr['data']) - ret += generate_enum_declaration(expr['enum'], expr['data']) - fdecl.write(ret) - -fdecl.write(''' -#endif -''') - -fdecl.flush() -fdecl.close() - -fdef.flush() -fdef.close() diff --git a/qemu/scripts/qapi.py b/qemu/scripts/qapi.py deleted file mode 100644 index 429f211d..00000000 --- a/qemu/scripts/qapi.py +++ /dev/null @@ -1,605 +0,0 @@ -# -# QAPI helper library -# -# Copyright IBM, Corp. 2011 -# Copyright (c) 2013 Red Hat Inc. -# -# Authors: -# Anthony Liguori -# Markus Armbruster -# -# This work is licensed under the terms of the GNU GPL, version 2. -# See the COPYING file in the top-level directory. - -import re -from ordereddict import OrderedDict -import os -import sys - -try: - basestring -except NameError: - basestring = str - -builtin_types = [ - 'str', 'int', 'number', 'bool', - 'int8', 'int16', 'int32', 'int64', - 'uint8', 'uint16', 'uint32', 'uint64' -] - -builtin_type_qtypes = { - 'str': 'QTYPE_QSTRING', - 'int': 'QTYPE_QINT', - 'number': 'QTYPE_QFLOAT', - 'bool': 'QTYPE_QBOOL', - 'int8': 'QTYPE_QINT', - 'int16': 'QTYPE_QINT', - 'int32': 'QTYPE_QINT', - 'int64': 'QTYPE_QINT', - 'uint8': 'QTYPE_QINT', - 'uint16': 'QTYPE_QINT', - 'uint32': 'QTYPE_QINT', - 'uint64': 'QTYPE_QINT', -} - -def error_path(parent): - res = "" - while parent: - res = ("In file included from %s:%d:\n" % (parent['file'], - parent['line'])) + res - parent = parent['parent'] - return res - -class QAPISchemaError(Exception): - def __init__(self, schema, msg): - self.input_file = schema.input_file - self.msg = msg - self.col = 1 - self.line = schema.line - for ch in schema.src[schema.line_pos:schema.pos]: - if ch == '\t': - self.col = (self.col + 7) % 8 + 1 - else: - self.col += 1 - self.info = schema.parent_info - - def __str__(self): - return error_path(self.info) + \ - "%s:%d:%d: %s" % (self.input_file, self.line, self.col, self.msg) - -class QAPIExprError(Exception): - def __init__(self, expr_info, msg): - self.info = expr_info - self.msg = msg - - def __str__(self): - return error_path(self.info['parent']) + \ - "%s:%d: %s" % (self.info['file'], self.info['line'], self.msg) - -class QAPISchema: - - def __init__(self, fp, input_relname=None, include_hist=[], - previously_included=[], parent_info=None): - """ include_hist is a stack used to detect inclusion cycles - previously_included is a global state used to avoid multiple - inclusions of the same file""" - input_fname = os.path.abspath(fp.name) - if input_relname is None: - input_relname = fp.name - self.input_dir = os.path.dirname(input_fname) - self.input_file = input_relname - self.include_hist = include_hist + [(input_relname, input_fname)] - previously_included.append(input_fname) - self.parent_info = parent_info - self.src = fp.read() - if self.src == '' or self.src[-1] != '\n': - self.src += '\n' - self.cursor = 0 - self.line = 1 - self.line_pos = 0 - self.exprs = [] - self.accept() - - while self.tok != None: - expr_info = {'file': input_relname, 'line': self.line, 'parent': self.parent_info} - expr = self.get_expr(False) - if isinstance(expr, dict) and "include" in expr: - if len(expr) != 1: - raise QAPIExprError(expr_info, "Invalid 'include' directive") - include = expr["include"] - if not isinstance(include, str): - raise QAPIExprError(expr_info, - 'Expected a file name (string), got: %s' - % include) - include_path = os.path.join(self.input_dir, include) - for elem in self.include_hist: - if include_path == elem[1]: - raise QAPIExprError(expr_info, "Inclusion loop for %s" - % include) - # skip multiple include of the same file - if include_path in previously_included: - continue - try: - fobj = open(include_path, 'r') - except IOError as e: - raise QAPIExprError(expr_info, - '%s: %s' % (e.strerror, include)) - exprs_include = QAPISchema(fobj, include, self.include_hist, - previously_included, expr_info) - self.exprs.extend(exprs_include.exprs) - else: - expr_elem = {'expr': expr, - 'info': expr_info} - self.exprs.append(expr_elem) - - def accept(self): - while True: - self.tok = self.src[self.cursor] - self.pos = self.cursor - self.cursor += 1 - self.val = None - - if self.tok == '#': - self.cursor = self.src.find('\n', self.cursor) - elif self.tok in ['{', '}', ':', ',', '[', ']']: - return - elif self.tok == "'": - string = '' - esc = False - while True: - ch = self.src[self.cursor] - self.cursor += 1 - if ch == '\n': - raise QAPISchemaError(self, - 'Missing terminating "\'"') - if esc: - string += ch - esc = False - elif ch == "\\": - esc = True - elif ch == "'": - self.val = string - return - else: - string += ch - elif self.tok == '\n': - if self.cursor == len(self.src): - self.tok = None - return - self.line += 1 - self.line_pos = self.cursor - elif not self.tok.isspace(): - raise QAPISchemaError(self, 'Stray "%s"' % self.tok) - - def get_members(self): - expr = OrderedDict() - if self.tok == '}': - self.accept() - return expr - if self.tok != "'": - raise QAPISchemaError(self, 'Expected string or "}"') - while True: - key = self.val - self.accept() - if self.tok != ':': - raise QAPISchemaError(self, 'Expected ":"') - self.accept() - if key in expr: - raise QAPISchemaError(self, 'Duplicate key "%s"' % key) - expr[key] = self.get_expr(True) - if self.tok == '}': - self.accept() - return expr - if self.tok != ',': - raise QAPISchemaError(self, 'Expected "," or "}"') - self.accept() - if self.tok != "'": - raise QAPISchemaError(self, 'Expected string') - - def get_values(self): - expr = [] - if self.tok == ']': - self.accept() - return expr - if not self.tok in [ '{', '[', "'" ]: - raise QAPISchemaError(self, 'Expected "{", "[", "]" or string') - while True: - expr.append(self.get_expr(True)) - if self.tok == ']': - self.accept() - return expr - if self.tok != ',': - raise QAPISchemaError(self, 'Expected "," or "]"') - self.accept() - - def get_expr(self, nested): - if self.tok != '{' and not nested: - raise QAPISchemaError(self, 'Expected "{"') - if self.tok == '{': - self.accept() - expr = self.get_members() - elif self.tok == '[': - self.accept() - expr = self.get_values() - elif self.tok == "'": - expr = self.val - self.accept() - else: - raise QAPISchemaError(self, 'Expected "{", "[" or string') - return expr - -def find_base_fields(base): - base_struct_define = find_struct(base) - if not base_struct_define: - return None - return base_struct_define['data'] - -# Return the discriminator enum define if discriminator is specified as an -# enum type, otherwise return None. -def discriminator_find_enum_define(expr): - base = expr.get('base') - discriminator = expr.get('discriminator') - - if not (discriminator and base): - return None - - base_fields = find_base_fields(base) - if not base_fields: - return None - - discriminator_type = base_fields.get(discriminator) - if not discriminator_type: - return None - - return find_enum(discriminator_type) - -def check_event(expr, expr_info): - params = expr.get('data') - if params: - for argname, argentry, optional, structured in parse_args(params): - if structured: - raise QAPIExprError(expr_info, - "Nested structure define in event is not " - "supported, event '%s', argname '%s'" - % (expr['event'], argname)) - -def check_union(expr, expr_info): - name = expr['union'] - base = expr.get('base') - discriminator = expr.get('discriminator') - members = expr['data'] - - # If the object has a member 'base', its value must name a complex type. - if base: - base_fields = find_base_fields(base) - if not base_fields: - raise QAPIExprError(expr_info, - "Base '%s' is not a valid type" - % base) - - # If the union object has no member 'discriminator', it's an - # ordinary union. - if not discriminator: - enum_define = None - - # Else if the value of member 'discriminator' is {}, it's an - # anonymous union. - elif discriminator == {}: - enum_define = None - - # Else, it's a flat union. - else: - # The object must have a member 'base'. - if not base: - raise QAPIExprError(expr_info, - "Flat union '%s' must have a base field" - % name) - # The value of member 'discriminator' must name a member of the - # base type. - discriminator_type = base_fields.get(discriminator) - if not discriminator_type: - raise QAPIExprError(expr_info, - "Discriminator '%s' is not a member of base " - "type '%s'" - % (discriminator, base)) - enum_define = find_enum(discriminator_type) - # Do not allow string discriminator - if not enum_define: - raise QAPIExprError(expr_info, - "Discriminator '%s' must be of enumeration " - "type" % discriminator) - - # Check every branch - for (key, value) in members.items(): - # If this named member's value names an enum type, then all members - # of 'data' must also be members of the enum type. - if enum_define and not key in enum_define['enum_values']: - raise QAPIExprError(expr_info, - "Discriminator value '%s' is not found in " - "enum '%s'" % - (key, enum_define["enum_name"])) - # Todo: add checking for values. Key is checked as above, value can be - # also checked here, but we need more functions to handle array case. - -def check_exprs(schema): - for expr_elem in schema.exprs: - expr = expr_elem['expr'] - if 'union' in expr: - check_union(expr, expr_elem['info']) - if 'event' in expr: - check_event(expr, expr_elem['info']) - -def parse_schema(input_file): - try: - schema = QAPISchema(open(input_file, "r")) - except (QAPISchemaError, QAPIExprError) as e: - print >>sys.stderr, e - exit(1) - - exprs = [] - - for expr_elem in schema.exprs: - expr = expr_elem['expr'] - if 'enum' in expr: - add_enum(expr['enum'], expr['data']) - elif 'union' in expr: - add_union(expr) - elif 'type' in expr: - add_struct(expr) - exprs.append(expr) - - # Try again for hidden UnionKind enum - for expr_elem in schema.exprs: - expr = expr_elem['expr'] - if 'union' in expr: - if not discriminator_find_enum_define(expr): - add_enum('%sKind' % expr['union']) - - try: - check_exprs(schema) - except QAPIExprError as e: - print >>sys.stderr, e - exit(1) - - return exprs - -def parse_args(typeinfo): - if isinstance(typeinfo, basestring): - struct = find_struct(typeinfo) - assert struct != None - typeinfo = struct['data'] - - for member in typeinfo: - argname = member - argentry = typeinfo[member] - optional = False - structured = False - if member.startswith('*'): - argname = member[1:] - optional = True - if isinstance(argentry, OrderedDict): - structured = True - yield (argname, argentry, optional, structured) - -def de_camel_case(name): - new_name = '' - for ch in name: - if ch.isupper() and new_name: - new_name += '_' - if ch == '-': - new_name += '_' - else: - new_name += ch.lower() - return new_name - -def camel_case(name): - new_name = '' - first = True - for ch in name: - if ch in ['_', '-']: - first = True - elif first: - new_name += ch.upper() - first = False - else: - new_name += ch.lower() - return new_name - -def c_var(name, protect=True): - # ANSI X3J11/88-090, 3.1.1 - c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue', - 'default', 'do', 'double', 'else', 'enum', 'extern', 'float', - 'for', 'goto', 'if', 'int', 'long', 'register', 'return', - 'short', 'signed', 'sizeof', 'static', 'struct', 'switch', - 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while']) - # ISO/IEC 9899:1999, 6.4.1 - c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary']) - # ISO/IEC 9899:2011, 6.4.1 - c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn', - '_Static_assert', '_Thread_local']) - # GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html - # excluding _.* - gcc_words = set(['asm', 'typeof']) - # C++ ISO/IEC 14882:2003 2.11 - cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete', - 'dynamic_cast', 'explicit', 'false', 'friend', 'mutable', - 'namespace', 'new', 'operator', 'private', 'protected', - 'public', 'reinterpret_cast', 'static_cast', 'template', - 'this', 'throw', 'true', 'try', 'typeid', 'typename', - 'using', 'virtual', 'wchar_t', - # alternative representations - 'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not', - 'not_eq', 'or', 'or_eq', 'xor', 'xor_eq']) - # namespace pollution: - polluted_words = set(['unix', 'errno']) - if protect and (name in c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words): - return "q_" + name - return name.replace('-', '_').lstrip("*") - -def c_fun(name, protect=True): - return c_var(name, protect).replace('.', '_') - -def c_list_type(name): - return '%sList' % name - -def type_name(name): - if type(name) == list: - return c_list_type(name[0]) - return name - -enum_types = [] -struct_types = [] -union_types = [] - -def add_struct(definition): - global struct_types - struct_types.append(definition) - -def find_struct(name): - global struct_types - for struct in struct_types: - if struct['type'] == name: - return struct - return None - -def add_union(definition): - global union_types - union_types.append(definition) - -def find_union(name): - global union_types - for union in union_types: - if union['union'] == name: - return union - return None - -def add_enum(name, enum_values = None): - global enum_types - enum_types.append({"enum_name": name, "enum_values": enum_values}) - -def find_enum(name): - global enum_types - for enum in enum_types: - if enum['enum_name'] == name: - return enum - return None - -def is_enum(name): - return find_enum(name) != None - -eatspace = '\033EATSPACE.' - -# A special suffix is added in c_type() for pointer types, and it's -# stripped in mcgen(). So please notice this when you check the return -# value of c_type() outside mcgen(). -def c_type(name, is_param=False): - if name == 'str': - if is_param: - return 'const char *' + eatspace - return 'char *' + eatspace - - elif name == 'int': - return 'int64_t' - elif (name == 'int8' or name == 'int16' or name == 'int32' or - name == 'int64' or name == 'uint8' or name == 'uint16' or - name == 'uint32' or name == 'uint64'): - return name + '_t' - elif name == 'size': - return 'uint64_t' - elif name == 'bool': - return 'bool' - elif name == 'number': - return 'double' - elif type(name) == list: - return '%s *%s' % (c_list_type(name[0]), eatspace) - elif is_enum(name): - return name - elif name == None or len(name) == 0: - return 'void' - elif name == name.upper(): - return '%sEvent *%s' % (camel_case(name), eatspace) - else: - return '%s *%s' % (name, eatspace) - -def is_c_ptr(name): - suffix = "*" + eatspace - return c_type(name).endswith(suffix) - -def genindent(count): - ret = "" - for i in range(count): - ret += " " - return ret - -indent_level = 0 - -def push_indent(indent_amount=4): - global indent_level - indent_level += indent_amount - -def pop_indent(indent_amount=4): - global indent_level - indent_level -= indent_amount - -def cgen(code, **kwds): - indent = genindent(indent_level) - lines = code.split('\n') - lines = map(lambda x: indent + x, lines) - return '\n'.join(lines) % kwds + '\n' - -def mcgen(code, **kwds): - raw = cgen('\n'.join(code.split('\n')[1:-1]), **kwds) - return re.sub(re.escape(eatspace) + ' *', '', raw) - -def basename(filename): - return filename.split("/")[-1] - -def guardname(filename): - guard = basename(filename).rsplit(".", 1)[0] - for substr in [".", " ", "-"]: - guard = guard.replace(substr, "_") - return guard.upper() + '_H' - -def guardstart(name): - return mcgen(''' - -#ifndef %(name)s -#define %(name)s - -''', - name=guardname(name)) - -def guardend(name): - return mcgen(''' - -#endif /* %(name)s */ - -''', - name=guardname(name)) - -# ENUMName -> ENUM_NAME, EnumName1 -> ENUM_NAME1 -# ENUM_NAME -> ENUM_NAME, ENUM_NAME1 -> ENUM_NAME1, ENUM_Name2 -> ENUM_NAME2 -# ENUM24_Name -> ENUM24_NAME -def _generate_enum_string(value): - c_fun_str = c_fun(value, False) - if value.isupper(): - return c_fun_str - - new_name = '' - l = len(c_fun_str) - for i in range(l): - c = c_fun_str[i] - # When c is upper and no "_" appears before, do more checks - if c.isupper() and (i > 0) and c_fun_str[i - 1] != "_": - # Case 1: next string is lower - # Case 2: previous string is digit - if (i < (l - 1) and c_fun_str[i + 1].islower()) or \ - c_fun_str[i - 1].isdigit(): - new_name += '_' - new_name += c - return new_name.lstrip('_').upper() - -def generate_enum_full_value(enum_name, enum_value): - abbrev_string = _generate_enum_string(enum_name) - value_string = _generate_enum_string(enum_value) - return "%s_%s" % (abbrev_string, value_string) diff --git a/qemu/scripts/qapi/common.json b/qemu/scripts/qapi/common.json deleted file mode 100644 index c87eac4c..00000000 --- a/qemu/scripts/qapi/common.json +++ /dev/null @@ -1,30 +0,0 @@ -# -*- Mode: Python -*- -# -# QAPI common definitions - -## -# @ErrorClass -# -# QEMU error classes -# -# @GenericError: this is used for errors that don't require a specific error -# class. This should be the default case for most errors -# -# @CommandNotFound: the requested command has not been found -# -# @DeviceEncrypted: the requested operation can't be fulfilled because the -# selected device is encrypted -# -# @DeviceNotActive: a device has failed to be become active -# -# @DeviceNotFound: the requested device has not been found -# -# @KVMMissingCap: the requested operation can't be fulfilled because a -# required KVM capability is missing -# -# Since: 1.2 -## -{ 'enum': 'ErrorClass', - 'data': [ 'GenericError', 'CommandNotFound', 'DeviceEncrypted', - 'DeviceNotActive', 'DeviceNotFound', 'KVMMissingCap' ] } - diff --git a/qemu/cpus.c b/qemu/softmmu/cpus.c similarity index 56% rename from qemu/cpus.c rename to qemu/softmmu/cpus.c index 28509d54..f983f634 100644 --- a/qemu/cpus.c +++ b/qemu/softmmu/cpus.c @@ -22,31 +22,24 @@ * THE SOFTWARE. */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -/* Needed early for CONFIG_BSD etc. */ -#include "config-host.h" -#include "sysemu/sysemu.h" +#include "sysemu/tcg.h" #include "sysemu/cpus.h" -#include "qemu/thread.h" - -#include "exec/address-spaces.h" // debug, can be removed later +#include "qemu/bitmap.h" +#include "tcg/tcg.h" +#include "exec/tb-hash.h" #include "uc_priv.h" -static bool cpu_can_run(CPUState *cpu); -static void cpu_handle_guest_debug(CPUState *cpu); -static int tcg_cpu_exec(struct uc_struct *uc, CPUArchState *env); -static bool tcg_exec_all(struct uc_struct* uc); -static int qemu_tcg_init_vcpu(CPUState *cpu); -static void qemu_tcg_cpu_loop(struct uc_struct *uc); -int vm_start(struct uc_struct* uc) +int64_t cpu_icount_to_ns(int64_t icount) { - if (resume_all_vcpus(uc)) { - return -1; - } - return 0; + // return icount << atomic_read(&timers_state.icount_time_shift); + // from configure_icount(QemuOpts *opts, Error **errp) + /* 125MIPS seems a reasonable initial guess at the guest speed. + It will be corrected fairly quickly anyway. */ + // timers_state.icount_time_shift = 3; + + return icount << 3; } bool cpu_is_stopped(CPUState *cpu) @@ -54,85 +47,52 @@ bool cpu_is_stopped(CPUState *cpu) return cpu->stopped; } -void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) +/* return the time elapsed in VM between vm_start and vm_stop. Unless + * icount is active, cpu_get_ticks() uses units of the host CPU cycle + * counter. + */ +int64_t cpu_get_ticks(void) { - func(data); + return cpu_get_host_ticks(); } -int resume_all_vcpus(struct uc_struct *uc) +/* Return the monotonic time elapsed in VM, i.e., + * the time between vm_start and vm_stop + */ +int64_t cpu_get_clock(void) { - CPUState *cpu = uc->cpu; - // Fix call multiple time (vu). - // We have to check whether this is the second time, then reset all CPU. - if (!cpu->created) { - cpu->created = true; - cpu->halted = 0; - if (qemu_init_vcpu(cpu)) - return -1; + return get_clock(); +} + +static bool cpu_can_run(CPUState *cpu) +{ + if (cpu->stop) { + return false; } - - cpu->exit_request = 0; - - //qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); - cpu_resume(cpu); - qemu_tcg_cpu_loop(uc); - - return 0; + if (cpu_is_stopped(cpu)) { + return false; + } + return true; } -int qemu_init_vcpu(CPUState *cpu) +static void cpu_handle_guest_debug(CPUState *cpu) { - cpu->nr_cores = smp_cores; - cpu->nr_threads = smp_threads; cpu->stopped = true; - - if (tcg_enabled(cpu->uc)) - return qemu_tcg_init_vcpu(cpu); - - return 0; } -static void qemu_tcg_cpu_loop(struct uc_struct *uc) -{ - CPUState *cpu = uc->cpu; - - //qemu_tcg_init_cpu_signals(); - - cpu->created = true; - - while (1) { - if (tcg_exec_all(uc)) - break; - } - - cpu->created = false; -} - -static int qemu_tcg_init_vcpu(CPUState *cpu) -{ - tcg_cpu_address_space_init(cpu, cpu->as); - - return 0; -} - -static int tcg_cpu_exec(struct uc_struct *uc, CPUArchState *env) -{ - return cpu_exec(uc, env); -} - -static bool tcg_exec_all(struct uc_struct* uc) +static int tcg_cpu_exec(struct uc_struct *uc) { int r; bool finish = false; + while (!uc->exit_request) { CPUState *cpu = uc->cpu; - CPUArchState *env = cpu->env_ptr; //qemu_clock_enable(QEMU_CLOCK_VIRTUAL, // (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { uc->quit_request = false; - r = tcg_cpu_exec(uc, env); + r = cpu_exec(uc, cpu); // quit current TB but continue emulating? if (uc->quit_request) { @@ -145,10 +105,8 @@ static bool tcg_exec_all(struct uc_struct* uc) } // save invalid memory access error & quit - if (env->invalid_error) { + if (uc->invalid_error) { // printf(">>> invalid memory accessed, STOP = %u!!!\n", env->invalid_error); - uc->invalid_addr = env->invalid_addr; - uc->invalid_error = env->invalid_error; finish = true; break; } @@ -173,41 +131,74 @@ static bool tcg_exec_all(struct uc_struct* uc) return finish; } -static bool cpu_can_run(CPUState *cpu) +void cpu_resume(CPUState *cpu) { - if (cpu->stop) { - return false; - } - if (cpu_is_stopped(cpu)) { - return false; - } - return true; + cpu->stop = false; + cpu->stopped = false; } -static void cpu_handle_guest_debug(CPUState *cpu) +static void qemu_tcg_init_vcpu(CPUState *cpu) { + /* + * Initialize TCG regions--once. Now is a good time, because: + * (1) TCG's init context, prologue and target globals have been set up. + * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the + * -accel flag is processed, so the check doesn't work then). + */ + tcg_region_init(cpu->uc->tcg_ctx); + + cpu->created = true; +} + +void qemu_init_vcpu(CPUState *cpu) +{ + cpu->nr_cores = 1; + cpu->nr_threads = 1; cpu->stopped = true; + + qemu_tcg_init_vcpu(cpu); + + return; } -#if 0 -#ifndef _WIN32 -static void qemu_tcg_init_cpu_signals(void) +void cpu_stop_current(struct uc_struct *uc) { - sigset_t set; - struct sigaction sigact; - - memset(&sigact, 0, sizeof(sigact)); - sigact.sa_handler = cpu_signal; - sigaction(SIG_IPI, &sigact, NULL); - - sigemptyset(&set); - sigaddset(&set, SIG_IPI); - pthread_sigmask(SIG_UNBLOCK, &set, NULL); + if (uc->cpu) { + uc->cpu->stop = false; + uc->cpu->stopped = true; + cpu_exit(uc->cpu); + } } -#else /* _WIN32 */ -static void qemu_tcg_init_cpu_signals(void) + +void resume_all_vcpus(struct uc_struct* uc) { -} -#endif /* _WIN32 */ -#endif + CPUState *cpu = uc->cpu; + cpu->halted = 0; + cpu->exit_request = 0; + cpu->exception_index = -1; + cpu_resume(cpu); + /* static void qemu_tcg_cpu_loop(struct uc_struct *uc) */ + cpu->created = true; + while (true) { + if (tcg_cpu_exec(uc)) { + break; + } + } + // clear the cache of the addr_end address, since the generated code + // at that address is to exit emulation, but not for the instruction there. + // if we dont do this, next time we cannot emulate at that address + TranslationBlock *tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(uc, uc->addr_end)]; + if (tb) { + qht_remove(&uc->tcg_ctx->tb_ctx.htable, tb, tb->hash); + tb_flush_jmp_cache(cpu, uc->addr_end); + } + + + cpu->created = false; +} + +void vm_start(struct uc_struct* uc) +{ + resume_all_vcpus(uc); +} diff --git a/qemu/ioport.c b/qemu/softmmu/ioport.c similarity index 69% rename from qemu/ioport.c rename to qemu/softmmu/ioport.c index 336b43dc..e55d0295 100644 --- a/qemu/ioport.c +++ b/qemu/softmmu/ioport.c @@ -25,46 +25,35 @@ * splitted out ioport related stuffs from vl.c. */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "exec/ioport.h" +#include "qemu/osdep.h" +#include "cpu.h" #include "exec/memory.h" -#include "exec/address-spaces.h" - #include "uc_priv.h" -//#define DEBUG_IOPORT - -#ifdef DEBUG_IOPORT -# define LOG_IOPORT(...) qemu_log_mask(CPU_LOG_IOPORT, ## __VA_ARGS__) -#else -# define LOG_IOPORT(...) do { } while (0) -#endif - -typedef struct MemoryRegionPortioList { - MemoryRegion mr; - void *portio_opaque; - MemoryRegionPortio ports[]; -} MemoryRegionPortioList; - -static uint64_t unassigned_io_read(struct uc_struct* uc, void *opaque, hwaddr addr, unsigned size) +static uint64_t unassigned_io_read(struct uc_struct *uc, void* opaque, hwaddr addr, unsigned size) { - return 0-1ULL; +#ifdef _MSC_VER + return (uint64_t)0xffffffffffffffffULL; +#else + return (uint64_t)-1ULL; +#endif } -static void unassigned_io_write(struct uc_struct* uc, void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static void unassigned_io_write(struct uc_struct *uc, void* opaque, hwaddr addr, uint64_t data, unsigned size) { } const MemoryRegionOps unassigned_io_ops = { - unassigned_io_read, - unassigned_io_write, - DEVICE_NATIVE_ENDIAN, + .read = unassigned_io_read, + .write = unassigned_io_write, + .endianness = DEVICE_NATIVE_ENDIAN, }; -void cpu_outb(struct uc_struct *uc, pio_addr_t addr, uint8_t val) +void cpu_outb(struct uc_struct *uc, uint32_t addr, uint8_t val) { + // address_space_write(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + // &val, 1); + //LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); // Unicorn: call registered OUT callbacks struct hook *hook; @@ -77,8 +66,14 @@ void cpu_outb(struct uc_struct *uc, pio_addr_t addr, uint8_t val) } } -void cpu_outw(struct uc_struct *uc, pio_addr_t addr, uint16_t val) +void cpu_outw(struct uc_struct *uc, uint32_t addr, uint16_t val) { + // uint8_t buf[2]; + + // stw_p(buf, val); + // address_space_write(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + // buf, 2); + //LOG_IOPORT("outw: %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); // Unicorn: call registered OUT callbacks struct hook *hook; @@ -91,8 +86,14 @@ void cpu_outw(struct uc_struct *uc, pio_addr_t addr, uint16_t val) } } -void cpu_outl(struct uc_struct *uc, pio_addr_t addr, uint32_t val) +void cpu_outl(struct uc_struct *uc, uint32_t addr, uint32_t val) { + // uint8_t buf[4]; + + // stl_p(buf, val); + // address_space_write(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + // buf, 4); + //LOG_IOPORT("outl: %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); // Unicorn: call registered OUT callbacks struct hook *hook; @@ -105,8 +106,13 @@ void cpu_outl(struct uc_struct *uc, pio_addr_t addr, uint32_t val) } } -uint8_t cpu_inb(struct uc_struct *uc, pio_addr_t addr) +uint8_t cpu_inb(struct uc_struct *uc, uint32_t addr) { + // uint8_t val; + + // address_space_read(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, + // &val, 1); + //LOG_IOPORT("inb : %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); // Unicorn: call registered IN callbacks struct hook *hook; @@ -121,8 +127,14 @@ uint8_t cpu_inb(struct uc_struct *uc, pio_addr_t addr) return 0; } -uint16_t cpu_inw(struct uc_struct *uc, pio_addr_t addr) +uint16_t cpu_inw(struct uc_struct *uc, uint32_t addr) { + // uint8_t buf[2]; + // uint16_t val; + + // address_space_read(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 2); + // val = lduw_p(buf); + //LOG_IOPORT("inw : %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); // Unicorn: call registered IN callbacks struct hook *hook; @@ -137,8 +149,16 @@ uint16_t cpu_inw(struct uc_struct *uc, pio_addr_t addr) return 0; } -uint32_t cpu_inl(struct uc_struct *uc, pio_addr_t addr) +uint32_t cpu_inl(struct uc_struct *uc, uint32_t addr) { + // uint8_t buf[4]; + // uint32_t val; + + // printf("inl_addr=%x\n", addr); + + // address_space_read(&uc->address_space_io, addr, MEMTXATTRS_UNSPECIFIED, buf, 4); + // val = ldl_p(buf); + //LOG_IOPORT("inl : %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); // Unicorn: call registered IN callbacks struct hook *hook; diff --git a/qemu/softmmu/memory.c b/qemu/softmmu/memory.c new file mode 100644 index 00000000..c79d9038 --- /dev/null +++ b/qemu/softmmu/memory.c @@ -0,0 +1,1346 @@ +/* + * Physical memory management + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/memory.h" +#include "qemu/bitops.h" + +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "sysemu/tcg.h" +#include "exec/exec-all.h" +#include "uc_priv.h" + +//#define DEBUG_UNASSIGNED + +typedef struct AddrRange AddrRange; + +/* + * Note that signed integers are needed for negative offsetting in aliases + * (large MemoryRegion::alias_offset). + */ +struct AddrRange { + Int128 start; + Int128 size; +}; + +// Unicorn engine +MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms) +{ + MemoryRegion *ram = g_new(MemoryRegion, 1); + + memory_region_init_ram(uc, ram, size, perms); + if (ram->addr == -1) { + // out of memory + return NULL; + } + + memory_region_add_subregion(uc->system_memory, begin, ram); + + if (uc->cpu) { + tlb_flush(uc->cpu); + } + + return ram; +} + +MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr) +{ + MemoryRegion *ram = g_new(MemoryRegion, 1); + + memory_region_init_ram_ptr(uc, ram, size, ptr); + ram->perms = perms; + if (ram->addr == -1) { + // out of memory + return NULL; + } + + memory_region_add_subregion(uc->system_memory, begin, ram); + + if (uc->cpu) { + tlb_flush(uc->cpu); + } + + return ram; +} + +typedef struct _mmio_cbs { + uc_cb_mmio_read_t read; + void *user_data_read; + uc_cb_mmio_write_t write; + void *user_data_write; +} mmio_cbs; + +static uint64_t mmio_read_wrapper(struct uc_struct *uc, void *opaque, hwaddr addr, unsigned size) +{ + mmio_cbs* cbs = (mmio_cbs*)opaque; + + if (cbs->read) { + return cbs->read(uc, addr, size, cbs->user_data_read); + } else { + return 0; + } +} + +static void mmio_write_wrapper(struct uc_struct *uc, void *opaque, hwaddr addr, uint64_t data, unsigned size) +{ + mmio_cbs* cbs = (mmio_cbs*)opaque; + + if (cbs->write) { + cbs->write(uc, addr, size, data, cbs->user_data_write); + } +} + +static void mmio_region_destructor_uc(MemoryRegion *mr) +{ + g_free(mr->opaque); +} + +MemoryRegion *memory_map_io(struct uc_struct *uc, ram_addr_t begin, size_t size, + uc_cb_mmio_read_t read_cb, uc_cb_mmio_write_t write_cb, + void *user_data_read, void *user_data_write) +{ + MemoryRegion *mmio = g_new(MemoryRegion, 1); + MemoryRegionOps *ops = g_new(MemoryRegionOps, 1); + mmio_cbs* opaques = g_new(mmio_cbs, 1); + opaques->read = read_cb; + opaques->write = write_cb; + opaques->user_data_read = user_data_read; + opaques->user_data_write = user_data_write; + + memset(ops, 0, sizeof(*ops)); + + ops->read = mmio_read_wrapper; + ops->write = mmio_write_wrapper; + ops->endianness = DEVICE_NATIVE_ENDIAN; + + memory_region_init_io(uc, mmio, ops, opaques, size); + + mmio->destructor = mmio_region_destructor_uc; + + mmio->perms = 0; + + if (read_cb) + mmio->perms |= UC_PROT_READ; + + if (write_cb) + mmio->perms |= UC_PROT_WRITE; + + memory_region_add_subregion(uc->system_memory, begin, mmio); + + if (uc->cpu) + tlb_flush(uc->cpu); + + return mmio; +} + +void memory_unmap(struct uc_struct *uc, MemoryRegion *mr) +{ + int i; + hwaddr addr; + + // Make sure all pages associated with the MemoryRegion are flushed + // Only need to do this if we are in a running state + if (uc->cpu) { + for (addr = mr->addr; addr < mr->end; addr += uc->target_page_size) { + tlb_flush_page(uc->cpu, addr); + } + } + memory_region_del_subregion(uc->system_memory, mr); + + for (i = 0; i < uc->mapped_block_count; i++) { + if (uc->mapped_blocks[i] == mr) { + uc->mapped_block_count--; + //shift remainder of array down over deleted pointer + memmove(&uc->mapped_blocks[i], &uc->mapped_blocks[i + 1], sizeof(MemoryRegion*) * (uc->mapped_block_count - i)); + mr->destructor(mr); + g_free(mr); + break; + } + } +} + +int memory_free(struct uc_struct *uc) +{ + MemoryRegion *mr; + int i; + + for (i = 0; i < uc->mapped_block_count; i++) { + mr = uc->mapped_blocks[i]; + mr->enabled = false; + memory_region_del_subregion(uc->system_memory, mr); + mr->destructor(mr); + /* destroy subregion */ + g_free(mr); + } + + return 0; +} + +static AddrRange addrrange_make(Int128 start, Int128 size) +{ + return (AddrRange) { start, size }; +} + +static bool addrrange_equal(AddrRange r1, AddrRange r2) +{ + return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); +} + +static Int128 addrrange_end(AddrRange r) +{ + return int128_add(r.start, r.size); +} + +static bool addrrange_contains(AddrRange range, Int128 addr) +{ + return int128_ge(addr, range.start) + && int128_lt(addr, addrrange_end(range)); +} + +static bool addrrange_intersects(AddrRange r1, AddrRange r2) +{ + return addrrange_contains(r1, r2.start) + || addrrange_contains(r2, r1.start); +} + +static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) +{ + Int128 start = int128_max(r1.start, r2.start); + Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); + return addrrange_make(start, int128_sub(end, start)); +} + +enum ListenerDirection { Forward, Reverse }; + +#define MEMORY_LISTENER_CALL_GLOBAL(uc, _callback, _direction) \ + do { \ + MemoryListener *_listener; \ + \ + switch (_direction) { \ + case Forward: \ + QTAILQ_FOREACH(_listener, &uc->memory_listeners, link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener); \ + } \ + } \ + break; \ + case Reverse: \ + QTAILQ_FOREACH_REVERSE(_listener, &uc->memory_listeners, link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener); \ + } \ + } \ + break; \ + default: \ + abort(); \ + } \ + } while (0) + +#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section) \ + do { \ + MemoryListener *_listener; \ + \ + switch (_direction) { \ + case Forward: \ + QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, _section); \ + } \ + } \ + break; \ + case Reverse: \ + QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, _section); \ + } \ + } \ + break; \ + default: \ + abort(); \ + } \ + } while (0) + +/* No need to ref/unref .mr, the FlatRange keeps it alive. */ +#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \ + do { \ + MemoryRegionSection mrs = section_from_flat_range(fr, \ + address_space_to_flatview(as)); \ + MEMORY_LISTENER_CALL(as, callback, dir, &mrs); \ + } while(0) + +/* Range of memory in the global map. Addresses are absolute. */ +struct FlatRange { + MemoryRegion *mr; + hwaddr offset_in_region; + AddrRange addr; + bool readonly; +}; + +#define FOR_EACH_FLAT_RANGE(var, view) \ + for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) + +static inline MemoryRegionSection +section_from_flat_range(FlatRange *fr, FlatView *fv) +{ + return (MemoryRegionSection) { + .mr = fr->mr, + .fv = fv, + .offset_within_region = fr->offset_in_region, + .size = fr->addr.size, + .offset_within_address_space = int128_get64(fr->addr.start), + .readonly = fr->readonly, + }; +} + +static bool flatrange_equal(FlatRange *a, FlatRange *b) +{ + return a->mr == b->mr + && addrrange_equal(a->addr, b->addr) + && a->offset_in_region == b->offset_in_region + && a->readonly == b->readonly; +} + +static FlatView *flatview_new(MemoryRegion *mr_root) +{ + FlatView *view; + + view = g_new0(FlatView, 1); + view->ref = 1; + view->root = mr_root; + + return view; +} + +/* Insert a range into a given position. Caller is responsible for maintaining + * sorting order. + */ +static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) +{ + if (view->nr == view->nr_allocated) { + view->nr_allocated = MAX(2 * view->nr, 10); + view->ranges = g_realloc(view->ranges, + view->nr_allocated * sizeof(*view->ranges)); + } + memmove(view->ranges + pos + 1, view->ranges + pos, + (view->nr - pos) * sizeof(FlatRange)); + view->ranges[pos] = *range; + ++view->nr; +} + +static inline void flatview_ref(FlatView *view) +{ + view->ref++; +} + +static void flatview_destroy(FlatView *view) +{ + if (view->dispatch) { + address_space_dispatch_free(view->dispatch); + } + g_free(view->ranges); + g_free(view); +} + +void flatview_unref(FlatView *view) +{ + view->ref--; + if (view->ref <= 0) { + flatview_destroy(view); + } +} + +static bool can_merge(FlatRange *r1, FlatRange *r2) +{ + return int128_eq(addrrange_end(r1->addr), r2->addr.start) + && r1->mr == r2->mr + && int128_eq(int128_add(int128_make64(r1->offset_in_region), + r1->addr.size), + int128_make64(r2->offset_in_region)) + && r1->readonly == r2->readonly; +} + +/* Attempt to simplify a view by merging adjacent ranges */ +static void flatview_simplify(FlatView *view) +{ + unsigned i, j; + + i = 0; + while (i < view->nr) { + j = i + 1; + while (j < view->nr + && can_merge(&view->ranges[j-1], &view->ranges[j])) { + int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); + ++j; + } + ++i; + memmove(&view->ranges[i], &view->ranges[j], + (view->nr - j) * sizeof(view->ranges[j])); + view->nr -= j - i; + } +} + +static bool memory_region_big_endian(MemoryRegion *mr) +{ +#ifdef TARGET_WORDS_BIGENDIAN + return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; +#else + return mr->ops->endianness == DEVICE_BIG_ENDIAN; +#endif +} + +static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) +{ + if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { + switch (op & MO_SIZE) { + case MO_8: + break; + case MO_16: + *data = bswap16(*data); + break; + case MO_32: + *data = bswap32(*data); + break; + case MO_64: + *data = bswap64(*data); + break; + default: + g_assert_not_reached(); + } + } +} + +static inline void memory_region_shift_read_access(uint64_t *value, + signed shift, + uint64_t mask, + uint64_t tmp) +{ + if (shift >= 0) { + *value |= (tmp & mask) << shift; + } else { + *value |= (tmp & mask) >> -shift; + } +} + +static inline uint64_t memory_region_shift_write_access(uint64_t *value, + signed shift, + uint64_t mask) +{ + uint64_t tmp; + + if (shift >= 0) { + tmp = (*value >> shift) & mask; + } else { + tmp = (*value << -shift) & mask; + } + + return tmp; +} + +static MemTxResult memory_region_read_accessor(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp; + + tmp = mr->ops->read(uc, mr->opaque, addr, size); + memory_region_shift_read_access(value, shift, mask, tmp); + return MEMTX_OK; +} + +static MemTxResult memory_region_read_with_attrs_accessor(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp = 0; + MemTxResult r; + + r = mr->ops->read_with_attrs(uc, mr->opaque, addr, &tmp, size, attrs); + memory_region_shift_read_access(value, shift, mask, tmp); + return r; +} + +static MemTxResult memory_region_write_accessor(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp = memory_region_shift_write_access(value, shift, mask); + + mr->ops->write(uc, mr->opaque, addr, tmp, size); + return MEMTX_OK; +} + +static MemTxResult memory_region_write_with_attrs_accessor(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs) +{ + uint64_t tmp = memory_region_shift_write_access(value, shift, mask); + + return mr->ops->write_with_attrs(uc, mr->opaque, addr, tmp, size, attrs); +} + +static MemTxResult access_with_adjusted_size(struct uc_struct *uc, hwaddr addr, + uint64_t *value, + unsigned size, + unsigned access_size_min, + unsigned access_size_max, + MemTxResult (*access_fn) + (struct uc_struct *uc, + MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + signed shift, + uint64_t mask, + MemTxAttrs attrs), + MemoryRegion *mr, + MemTxAttrs attrs) +{ + uint64_t access_mask; + unsigned access_size; + unsigned i; + MemTxResult r = MEMTX_OK; + + if (!access_size_min) { + access_size_min = 1; + } + if (!access_size_max) { + access_size_max = 4; + } + + /* FIXME: support unaligned access? */ + access_size = MAX(MIN(size, access_size_max), access_size_min); + access_mask = MAKE_64BIT_MASK(0, access_size * 8); + if (memory_region_big_endian(mr)) { + for (i = 0; i < size; i += access_size) { + r |= access_fn(uc, mr, addr + i, value, access_size, + (size - access_size - i) * 8, access_mask, attrs); + } + } else { + for (i = 0; i < size; i += access_size) { + r |= access_fn(uc, mr, addr + i, value, access_size, i * 8, + access_mask, attrs); + } + } + return r; +} + +static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) +{ + AddressSpace *as; + + while (mr->container) { + mr = mr->container; + } + QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) { + if (mr == as->root) { + return as; + } + } + return NULL; +} + +/* Render a memory region into the global view. Ranges in @view obscure + * ranges in @mr. + */ +static void render_memory_region(FlatView *view, + MemoryRegion *mr, + Int128 base, + AddrRange clip, + bool readonly) +{ + MemoryRegion *subregion; + unsigned i; + hwaddr offset_in_region; + Int128 remain; + Int128 now; + FlatRange fr; + AddrRange tmp; + + if (!mr->enabled) { + return; + } + + int128_addto(&base, int128_make64(mr->addr)); + readonly |= mr->readonly; + + tmp = addrrange_make(base, mr->size); + + if (!addrrange_intersects(tmp, clip)) { + return; + } + + clip = addrrange_intersection(tmp, clip); + + /* Render subregions in priority order. */ + QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { + render_memory_region(view, subregion, base, clip, readonly); + } + + if (!mr->terminates) { + return; + } + + offset_in_region = int128_get64(int128_sub(clip.start, base)); + base = clip.start; + remain = clip.size; + + fr.mr = mr; + fr.readonly = readonly; + + /* Render the region itself into any gaps left by the current view. */ + for (i = 0; i < view->nr && int128_nz(remain); ++i) { + if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { + continue; + } + if (int128_lt(base, view->ranges[i].addr.start)) { + now = int128_min(remain, + int128_sub(view->ranges[i].addr.start, base)); + fr.offset_in_region = offset_in_region; + fr.addr = addrrange_make(base, now); + flatview_insert(view, i, &fr); + ++i; + int128_addto(&base, now); + offset_in_region += int128_get64(now); + int128_subfrom(&remain, now); + } + now = int128_sub(int128_min(int128_add(base, remain), + addrrange_end(view->ranges[i].addr)), + base); + int128_addto(&base, now); + offset_in_region += int128_get64(now); + int128_subfrom(&remain, now); + } + if (int128_nz(remain)) { + fr.offset_in_region = offset_in_region; + fr.addr = addrrange_make(base, remain); + flatview_insert(view, i, &fr); + } +} + +static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) +{ + while (mr->enabled) { + if (!mr->terminates) { + unsigned int found = 0; + MemoryRegion *child, *next = NULL; + QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { + if (child->enabled) { + if (++found > 1) { + next = NULL; + break; + } + if (!child->addr && int128_ge(mr->size, child->size)) { + /* A child is included in its entirety. If it's the only + * enabled one, use it in the hope of finding an alias down the + * way. This will also let us share FlatViews. + */ + next = child; + } + } + } + if (found == 0) { + return NULL; + } + if (next) { + mr = next; + continue; + } + } + + return mr; + } + + return NULL; +} + +/* Render a memory topology into a list of disjoint absolute ranges. */ +static FlatView *generate_memory_topology(struct uc_struct *uc, MemoryRegion *mr) +{ + int i; + FlatView *view; + FlatView *old_view; + + view = flatview_new(mr); + + if (mr) { + render_memory_region(view, mr, int128_zero(), + addrrange_make(int128_zero(), int128_2_64()), + false); + } + flatview_simplify(view); + + view->dispatch = address_space_dispatch_new(uc, view); + for (i = 0; i < view->nr; i++) { + MemoryRegionSection mrs = + section_from_flat_range(&view->ranges[i], view); + flatview_add_to_dispatch(uc, view, &mrs); + } + address_space_dispatch_compact(view->dispatch); + + old_view = g_hash_table_lookup(uc->flat_views, mr); + if (old_view != view) { + g_hash_table_replace(uc->flat_views, mr, view); + if (old_view) { + flatview_unref(old_view); + } + } + + return view; +} + +FlatView *address_space_get_flatview(AddressSpace *as) +{ + FlatView *view; + + view = address_space_to_flatview(as); + + return view; +} + +static void address_space_update_topology_pass(AddressSpace *as, + const FlatView *old_view, + const FlatView *new_view, + bool adding) +{ + unsigned iold, inew; + FlatRange *frold, *frnew; + + /* Generate a symmetric difference of the old and new memory maps. + * Kill ranges in the old map, and instantiate ranges in the new map. + */ + iold = inew = 0; + while (iold < old_view->nr || inew < new_view->nr) { + if (iold < old_view->nr) { + frold = &old_view->ranges[iold]; + } else { + frold = NULL; + } + if (inew < new_view->nr) { + frnew = &new_view->ranges[inew]; + } else { + frnew = NULL; + } + + if (frold + && (!frnew + || int128_lt(frold->addr.start, frnew->addr.start) + || (int128_eq(frold->addr.start, frnew->addr.start) + && !flatrange_equal(frold, frnew)))) { + /* In old but not in new, or in both but attributes changed. */ + + if (!adding) { + MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); + } + + ++iold; + } else if (frold && frnew && flatrange_equal(frold, frnew)) { + /* In both and unchanged (except logging may have changed) */ + + if (adding) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); + } + + ++iold; + ++inew; + } else { + /* In new */ + + if (adding) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); + } + + ++inew; + } + } +} + +static void flatviews_init(struct uc_struct *uc) +{ + if (uc->flat_views) { + return; + } + + uc->flat_views = g_hash_table_new_full(NULL, NULL, NULL, + (GDestroyNotify) flatview_unref); +} + +static void flatviews_reset(struct uc_struct *uc) +{ + AddressSpace *as; + + if (uc->flat_views) { + g_hash_table_destroy(uc->flat_views); + uc->flat_views = NULL; + } + flatviews_init(uc); + + /* Render unique FVs */ + QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) { + MemoryRegion *physmr = memory_region_get_flatview_root(as->root); + + if (g_hash_table_lookup(uc->flat_views, physmr)) { + continue; + } + + generate_memory_topology(uc, physmr); + } +} + +static void address_space_set_flatview(AddressSpace *as) +{ + FlatView *old_view = address_space_to_flatview(as); + MemoryRegion *physmr = memory_region_get_flatview_root(as->root); + FlatView *new_view = g_hash_table_lookup(as->uc->flat_views, physmr); + + assert(new_view); + + if (old_view == new_view) { + return; + } + + flatview_ref(new_view); + if (!QTAILQ_EMPTY(&as->listeners)) { + FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; + + if (!old_view2) { + old_view2 = &tmpview; + } + address_space_update_topology_pass(as, old_view2, new_view, false); + address_space_update_topology_pass(as, old_view2, new_view, true); + } + + as->current_map = new_view; + if (old_view) { + flatview_unref(old_view); + } +} + +static void address_space_update_topology(AddressSpace *as) +{ + MemoryRegion *physmr = memory_region_get_flatview_root(as->root); + + flatviews_init(as->uc); + if (!g_hash_table_lookup(as->uc->flat_views, physmr)) { + generate_memory_topology(as->uc, physmr); + } + address_space_set_flatview(as); +} + +void memory_region_transaction_begin(void) +{ +} + +void memory_region_transaction_commit(MemoryRegion *mr) +{ + AddressSpace *as; + + if (mr->uc->memory_region_update_pending) { + flatviews_reset(mr->uc); + + MEMORY_LISTENER_CALL_GLOBAL(mr->uc, begin, Forward); + + QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) { + address_space_set_flatview(as); + } + mr->uc->memory_region_update_pending = false; + MEMORY_LISTENER_CALL_GLOBAL(mr->uc, commit, Forward); + } +} + +static void memory_region_destructor_none(MemoryRegion *mr) +{ +} + +static void memory_region_destructor_ram(MemoryRegion *mr) +{ + qemu_ram_free(mr->uc, mr->ram_block); +} + +void memory_region_init(struct uc_struct *uc, + MemoryRegion *mr, + uint64_t size) +{ + memset(mr, 0, sizeof(*mr)); + mr->uc = uc; + /* memory_region_initfn */ + mr->ops = &unassigned_mem_ops; + mr->enabled = true; + mr->destructor = memory_region_destructor_none; + QTAILQ_INIT(&mr->subregions); + + mr->size = int128_make64(size); + if (size == UINT64_MAX) { + mr->size = int128_2_64(); + } +} + +static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); +#endif + return 0; +} + +static void unassigned_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); +#endif +} + +static bool unassigned_mem_accepts(struct uc_struct *uc, void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return false; +} + +const MemoryRegionOps unassigned_mem_ops = { + .valid.accepts = unassigned_mem_accepts, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +bool memory_region_access_valid(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool is_write, + MemTxAttrs attrs) +{ + if (mr->ops->valid.accepts + && !mr->ops->valid.accepts(uc, mr->opaque, addr, size, is_write, attrs)) { + return false; + } + + if (!mr->ops->valid.unaligned && (addr & (size - 1))) { + return false; + } + + /* Treat zero as compatibility all valid */ + if (!mr->ops->valid.max_access_size) { + return true; + } + + if (size > mr->ops->valid.max_access_size + || size < mr->ops->valid.min_access_size) { + return false; + } + return true; +} + +static MemTxResult memory_region_dispatch_read1(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + unsigned size, + MemTxAttrs attrs) +{ + *pval = 0; + + if (mr->ops->read) { + return access_with_adjusted_size(uc, addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_accessor, + mr, attrs); + } else { + return access_with_adjusted_size(uc, addr, pval, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_with_attrs_accessor, + mr, attrs); + } +} + +MemTxResult memory_region_dispatch_read(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + MemOp op, + MemTxAttrs attrs) +{ + unsigned size = memop_size(op); + MemTxResult r; + + if (!memory_region_access_valid(uc, mr, addr, size, false, attrs)) { + *pval = unassigned_mem_read(mr, addr, size); + return MEMTX_DECODE_ERROR; + } + + r = memory_region_dispatch_read1(uc, mr, addr, pval, size, attrs); + adjust_endianness(mr, pval, op); + return r; +} + +MemTxResult memory_region_dispatch_write(struct uc_struct *uc, MemoryRegion *mr, + hwaddr addr, + uint64_t data, + MemOp op, + MemTxAttrs attrs) +{ + unsigned size = memop_size(op); + + if (!memory_region_access_valid(uc, mr, addr, size, true, attrs)) { + unassigned_mem_write(mr, addr, data, size); + return MEMTX_DECODE_ERROR; + } + + adjust_endianness(mr, &data, op); + + if (mr->ops->write) { + return access_with_adjusted_size(uc, addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_accessor, mr, + attrs); + } else { + return + access_with_adjusted_size(uc, addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_with_attrs_accessor, + mr, attrs); + } +} + +void memory_region_init_io(struct uc_struct *uc, + MemoryRegion *mr, + const MemoryRegionOps *ops, + void *opaque, + uint64_t size) +{ + memory_region_init(uc, mr, size); + mr->ops = ops ? ops : &unassigned_mem_ops; + mr->opaque = opaque; + mr->terminates = true; +} + +void memory_region_init_ram_ptr(struct uc_struct *uc, + MemoryRegion *mr, + uint64_t size, + void *ptr) +{ + memory_region_init(uc, mr, size); + mr->ram = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + + /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ + assert(ptr != NULL); + mr->ram_block = qemu_ram_alloc_from_ptr(uc, size, ptr, mr); +} + +uint64_t memory_region_size(MemoryRegion *mr) +{ + if (int128_eq(mr->size, int128_2_64())) { + return UINT64_MAX; + } + return int128_get64(mr->size); +} + +void memory_region_set_readonly(MemoryRegion *mr, bool readonly) +{ + if (mr->readonly != readonly) { + memory_region_transaction_begin(); + mr->readonly = readonly; + memory_region_transaction_commit(mr); + } +} + +void *memory_region_get_ram_ptr(MemoryRegion *mr) +{ + void *ptr; + + ptr = qemu_map_ram_ptr(mr->uc, mr->ram_block, 0); + + return ptr; +} + +MemoryRegion *memory_region_from_host(struct uc_struct *uc, + void *ptr, ram_addr_t *offset) +{ + RAMBlock *block; + + block = qemu_ram_block_from_host(uc, ptr, false, offset); + if (!block) { + return NULL; + } + + return block->mr; +} + +ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) +{ + return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; +} + +static void memory_region_update_container_subregions(MemoryRegion *subregion) +{ + MemoryRegion *mr = subregion->container; + MemoryRegion *other; + + memory_region_transaction_begin(); + + QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { + QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); + goto done; + } + QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); + +done: + mr->uc->memory_region_update_pending = true; + memory_region_transaction_commit(mr); +} + +static void memory_region_add_subregion_common(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion) +{ + assert(!subregion->container); + subregion->container = mr; + subregion->addr = offset; + subregion->end = offset + int128_get64(subregion->size); + memory_region_update_container_subregions(subregion); +} + +void memory_region_add_subregion(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion) +{ + memory_region_add_subregion_common(mr, offset, subregion); +} + +void memory_region_del_subregion(MemoryRegion *mr, + MemoryRegion *subregion) +{ + memory_region_transaction_begin(); + assert(subregion->container == mr); + subregion->container = NULL; + QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); + mr->uc->memory_region_update_pending = true; + memory_region_transaction_commit(mr); +} + +static int cmp_flatrange_addr(const void *addr_, const void *fr_) +{ + const AddrRange *addr = addr_; + const FlatRange *fr = fr_; + + if (int128_le(addrrange_end(*addr), fr->addr.start)) { + return -1; + } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { + return 1; + } + return 0; +} + +static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) +{ + return bsearch(&addr, view->ranges, view->nr, + sizeof(FlatRange), cmp_flatrange_addr); +} + +/* Same as memory_region_find, but it does not add a reference to the + * returned region. It must be called from an RCU critical section. + */ +static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, + hwaddr addr, uint64_t size) +{ + MemoryRegionSection ret = { .mr = NULL }; + MemoryRegion *root; + AddressSpace *as; + AddrRange range; + FlatView *view; + FlatRange *fr; + + addr += mr->addr; + for (root = mr; root->container; ) { + root = root->container; + addr += root->addr; + } + + as = memory_region_to_address_space(root); + if (!as) { + return ret; + } + range = addrrange_make(int128_make64(addr), int128_make64(size)); + + view = address_space_to_flatview(as); + fr = flatview_lookup(view, range); + if (!fr) { + return ret; + } + + while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { + --fr; + } + + ret.mr = fr->mr; + ret.fv = view; + range = addrrange_intersection(range, fr->addr); + ret.offset_within_region = fr->offset_in_region; + ret.offset_within_region += int128_get64(int128_sub(range.start, + fr->addr.start)); + ret.size = range.size; + ret.offset_within_address_space = int128_get64(range.start); + ret.readonly = fr->readonly; + return ret; +} + +MemoryRegionSection memory_region_find(MemoryRegion *mr, + hwaddr addr, uint64_t size) +{ + MemoryRegionSection ret; + + ret = memory_region_find_rcu(mr, addr, size); + return ret; +} + +static void listener_add_address_space(MemoryListener *listener, + AddressSpace *as) +{ + FlatView *view; + FlatRange *fr; + + if (listener->begin) { + listener->begin(listener); + } + + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + MemoryRegionSection section = section_from_flat_range(fr, view); + + if (listener->region_add) { + listener->region_add(listener, §ion); + } + } + if (listener->commit) { + listener->commit(listener); + } +} + +static void listener_del_address_space(MemoryListener *listener, + AddressSpace *as) +{ + FlatView *view; + FlatRange *fr; + + if (listener->begin) { + listener->begin(listener); + } + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + MemoryRegionSection section = section_from_flat_range(fr, view); + + if (listener->region_del) { + listener->region_del(listener, §ion); + } + } + if (listener->commit) { + listener->commit(listener); + } +} + +void memory_listener_register(MemoryListener *listener, AddressSpace *as) +{ + listener->address_space = as; + QTAILQ_INSERT_TAIL(&as->uc->memory_listeners, listener, link); + QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); + + listener_add_address_space(listener, as); +} + +void memory_listener_unregister(MemoryListener *listener) +{ + if (!listener->address_space) { + return; + } + + listener_del_address_space(listener, listener->address_space); + QTAILQ_REMOVE(&listener->address_space->uc->memory_listeners, listener, link); + QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); + listener->address_space = NULL; +} + +void address_space_remove_listeners(AddressSpace *as) +{ + while (!QTAILQ_EMPTY(&as->listeners)) { + memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); + } +} + +void address_space_init(struct uc_struct *uc, + AddressSpace *as, + MemoryRegion *root) +{ + as->uc = uc; + as->root = root; + as->current_map = NULL; + QTAILQ_INIT(&as->listeners); + QTAILQ_INSERT_TAIL(&uc->address_spaces, as, address_spaces_link); + address_space_update_topology(as); +} + +void address_space_destroy(AddressSpace *as) +{ + MemoryRegion *root = as->root; + + /* Flush out anything from MemoryListeners listening in on this */ + memory_region_transaction_begin(); + as->root = NULL; + memory_region_transaction_commit(root); + QTAILQ_REMOVE(&as->uc->address_spaces, as, address_spaces_link); + + /* At this point, as->dispatch and as->current_map are dummy + * entries that the guest should never use. Wait for the old + * values to expire before freeing the data. + */ + as->root = root; + flatview_unref(as->current_map); +} + +void memory_region_init_ram(struct uc_struct *uc, + MemoryRegion *mr, + uint64_t size, + uint32_t perms) +{ + memory_region_init(uc, mr, size); + mr->ram = true; + if (!(perms & UC_PROT_WRITE)) { + mr->readonly = true; + } + mr->perms = perms; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->ram_block = qemu_ram_alloc(uc, size, mr); +} diff --git a/qemu/memory_mapping.c b/qemu/softmmu/memory_mapping.c similarity index 60% rename from qemu/memory_mapping.c rename to qemu/softmmu/memory_mapping.c index 324daa86..6f4fde13 100644 --- a/qemu/memory_mapping.c +++ b/qemu/softmmu/memory_mapping.c @@ -11,17 +11,11 @@ * */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "glib_compat.h" +#include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu-all.h" #include "sysemu/memory_mapping.h" #include "exec/memory.h" -#include "exec/address-spaces.h" - -#include "uc_priv.h" //#define DEBUG_GUEST_PHYS_REGION_ADD @@ -154,113 +148,3 @@ void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, /* this region can not be merged into any existed memory mapping. */ create_new_memory_mapping(list, phys_addr, virt_addr, length); } - -void memory_mapping_list_free(MemoryMappingList *list) -{ - MemoryMapping *p, *q; - - QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { - QTAILQ_REMOVE(&list->head, p, next); - g_free(p); - } - - list->num = 0; - list->last_mapping = NULL; -} - -void memory_mapping_list_init(MemoryMappingList *list) -{ - list->num = 0; - list->last_mapping = NULL; - QTAILQ_INIT(&list->head); -} - -void guest_phys_blocks_free(GuestPhysBlockList *list) -{ - GuestPhysBlock *p, *q; - - QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { - QTAILQ_REMOVE(&list->head, p, next); - g_free(p); - } - list->num = 0; -} - -void guest_phys_blocks_init(GuestPhysBlockList *list) -{ - list->num = 0; - QTAILQ_INIT(&list->head); -} - -typedef struct GuestPhysListener { - GuestPhysBlockList *list; - MemoryListener listener; -} GuestPhysListener; - -void qemu_get_guest_memory_mapping(struct uc_struct *uc, - MemoryMappingList *list, - const GuestPhysBlockList *guest_phys_blocks, - Error **errp) -{ - CPUState *cpu = uc->cpu; - GuestPhysBlock *block; - ram_addr_t offset, length; - - if (cpu_paging_enabled(cpu)) { - Error *err = NULL; - cpu_get_memory_mapping(cpu, list, &err); - if (err) { - error_propagate(errp, err); - return; - } - return; - } - - /* - * If the guest doesn't use paging, the virtual address is equal to physical - * address. - */ - QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { - offset = block->target_start; - length = block->target_end - block->target_start; - create_new_memory_mapping(list, offset, offset, length); - } -} - -void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list, - const GuestPhysBlockList *guest_phys_blocks) -{ - GuestPhysBlock *block; - - QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { - create_new_memory_mapping(list, block->target_start, 0, - block->target_end - block->target_start); - } -} - -void memory_mapping_filter(MemoryMappingList *list, int64_t begin, - int64_t length) -{ - MemoryMapping *cur, *next; - - QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) { - if (cur->phys_addr >= begin + length || - cur->phys_addr + cur->length <= begin) { - QTAILQ_REMOVE(&list->head, cur, next); - list->num--; - continue; - } - - if (cur->phys_addr < begin) { - cur->length -= begin - cur->phys_addr; - if (cur->virt_addr) { - cur->virt_addr += begin - cur->phys_addr; - } - cur->phys_addr = begin; - } - - if (cur->phys_addr + cur->length > begin + length) { - cur->length -= cur->phys_addr + cur->length - begin - length; - } - } -} diff --git a/qemu/hw/sparc64/sun4u.c b/qemu/softmmu/vl.c similarity index 54% rename from qemu/hw/sparc64/sun4u.c rename to qemu/softmmu/vl.c index 1995d311..6860afd9 100644 --- a/qemu/hw/sparc64/sun4u.c +++ b/qemu/softmmu/vl.c @@ -1,7 +1,7 @@ /* - * QEMU Sun4u/Sun4v System Emulator + * QEMU System Emulator * - * Copyright (c) 2005 Fabrice Bellard + * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -21,43 +21,48 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include "hw/hw.h" -#include "hw/sparc/sparc.h" -#include "qemu/timer.h" + #include "sysemu/sysemu.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" +#include "sysemu/cpus.h" +#include "uc_priv.h" + +void init_real_host_page_size(struct uc_struct *uc); +void init_cache_info(struct uc_struct *uc); -/* Sun4u hardware initialisation */ -static int sun4u_init(struct uc_struct *uc, MachineState *machine) +DEFAULT_VISIBILITY +int machine_initialize(struct uc_struct *uc) { - const char *cpu_model = machine->cpu_model; - SPARCCPU *cpu; + init_get_clock(); - if (cpu_model == NULL) - cpu_model = "Sun UltraSparc IV"; + /* Init uc->qemu_real_host_page_size. */ + init_real_host_page_size(uc); - cpu = cpu_sparc_init(uc, cpu_model); - if (cpu == NULL) { - fprintf(stderr, "Unable to find Sparc CPU definition\n"); - return -1; - } + /* Init uc->qemu_icache_linesize. */ + init_cache_info(uc); - return 0; + // Initialize arch specific. + uc->init_arch(uc); + + /* Init memory. */ + uc->cpu_exec_init_all(uc); + + uc->target_page(uc); + + /* Init tcg. use DEFAULT_CODE_GEN_BUFFER_SIZE. */ + uc->tcg_exec_init(uc, 0); + + /* Init cpu. use default cpu_model. */ + return uc->cpus_init(uc, NULL); } -void sun4u_machine_init(struct uc_struct *uc) +void qemu_system_reset_request(struct uc_struct* uc) { - static QEMUMachine sun4u_machine = { - NULL, - "sun4u", - sun4u_init, - NULL, - 1, // XXX for now - 1, - UC_ARCH_SPARC, - }; - - qemu_register_machine(uc, &sun4u_machine, TYPE_MACHINE, NULL); + cpu_stop(uc); +} + +void qemu_system_shutdown_request(struct uc_struct *uc) +{ + /* TODO: shutdown(exit program) immediately? */ + cpu_stop(uc); } diff --git a/qemu/softmmu_template.h b/qemu/softmmu_template.h deleted file mode 100644 index 3e8fa9f6..00000000 --- a/qemu/softmmu_template.h +++ /dev/null @@ -1,1091 +0,0 @@ -/* - * Software MMU support - * - * Generate helpers used by TCG for qemu_ld/st ops and code load - * functions. - * - * Included from target op helpers and exec.c. - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#include "qemu/timer.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" -#include "uc_priv.h" - -#define DATA_SIZE (1 << SHIFT) - -#if DATA_SIZE == 8 -#define SUFFIX q -#define LSUFFIX q -#define SDATA_TYPE int64_t -#define DATA_TYPE uint64_t -#elif DATA_SIZE == 4 -#define SUFFIX l -#define LSUFFIX l -#define SDATA_TYPE int32_t -#define DATA_TYPE uint32_t -#elif DATA_SIZE == 2 -#define SUFFIX w -#define LSUFFIX uw -#define SDATA_TYPE int16_t -#define DATA_TYPE uint16_t -#elif DATA_SIZE == 1 -#define SUFFIX b -#define LSUFFIX ub -#define SDATA_TYPE int8_t -#define DATA_TYPE uint8_t -#else -#error unsupported data size -#endif - - -/* For the benefit of TCG generated code, we want to avoid the complication - of ABI-specific return type promotion and always return a value extended - to the register size of the host. This is tcg_target_long, except in the - case of a 32-bit host and 64-bit data, and for that we always have - uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ -#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 -# define WORD_TYPE DATA_TYPE -# define USUFFIX SUFFIX -#else -# define WORD_TYPE tcg_target_ulong -# define USUFFIX glue(u, SUFFIX) -# define SSUFFIX glue(s, SUFFIX) -#endif - -#ifdef SOFTMMU_CODE_ACCESS -#define READ_ACCESS_TYPE MMU_INST_FETCH -#define ADDR_READ addr_code -#else -#define READ_ACCESS_TYPE MMU_DATA_LOAD -#define ADDR_READ addr_read -#endif - -#if DATA_SIZE == 8 -# define BSWAP(X) bswap64(X) -#elif DATA_SIZE == 4 -# define BSWAP(X) bswap32(X) -#elif DATA_SIZE == 2 -# define BSWAP(X) bswap16(X) -#else -# define BSWAP(X) (X) -#endif - -#ifdef TARGET_WORDS_BIGENDIAN -# define TGT_BE(X) (X) -# define TGT_LE(X) BSWAP(X) -#else -# define TGT_BE(X) BSWAP(X) -# define TGT_LE(X) (X) -#endif - -#if DATA_SIZE == 1 -# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) -# define helper_be_ld_name helper_le_ld_name -# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) -# define helper_be_lds_name helper_le_lds_name -# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) -# define helper_be_st_name helper_le_st_name -#else -# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) -# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) -# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) -# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) -# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) -# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) -#endif - -#ifdef TARGET_WORDS_BIGENDIAN -# define helper_te_ld_name helper_be_ld_name -# define helper_te_st_name helper_be_st_name -#else -# define helper_te_ld_name helper_le_ld_name -# define helper_te_st_name helper_le_st_name -#endif - -/* macro to check the victim tlb */ -#define VICTIM_TLB_HIT(ty) \ - /* we are about to do a page table walk. our last hope is the \ - * victim tlb. try to refill from the victim tlb before walking the \ - * page table. */ \ - int vidx; \ - hwaddr tmpiotlb; \ - CPUTLBEntry tmptlb; \ - for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ - if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ - /* found entry in victim tlb, swap tlb and iotlb */ \ - tmptlb = env->tlb_table[mmu_idx][index]; \ - env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \ - env->tlb_v_table[mmu_idx][vidx] = tmptlb; \ - tmpiotlb = env->iotlb[mmu_idx][index]; \ - env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \ - env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \ - break; \ - } \ - } \ - /* return true when there is a vtlb hit, i.e. vidx >=0 */ \ - return (vidx >= 0) - -#ifndef victim_tlb_hit_funcs -#define victim_tlb_hit_funcs -static inline bool victim_tlb_hit_read(CPUArchState *env, target_ulong addr, int mmu_idx, int index) -{ - VICTIM_TLB_HIT(ADDR_READ); -} - -static inline bool victim_tlb_hit_write(CPUArchState *env, target_ulong addr, int mmu_idx, int index) -{ - VICTIM_TLB_HIT(addr_write); -} -#endif // victim_tlb_hit_funcs - -#ifndef SOFTMMU_CODE_ACCESS -static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, - hwaddr physaddr, - target_ulong addr, - uintptr_t retaddr) -{ - uint64_t val; - CPUState *cpu = ENV_GET_CPU(env); - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - cpu->mem_io_pc = retaddr; - if (mr != &(cpu->uc->io_mem_rom) && mr != &(cpu->uc->io_mem_notdirty) - && !cpu_can_do_io(cpu)) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - io_mem_read(mr, physaddr, &val, 1 << SHIFT); - return (DATA_TYPE)val; -} -#endif - -#ifdef SOFTMMU_CODE_ACCESS -static QEMU_UNUSED_FUNC -#endif -WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, - uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - uintptr_t haddr; - DATA_TYPE res; - int error_code; - struct hook *hook; - bool handled; - HOOK_FOREACH_VAR_DECLARE; - - struct uc_struct *uc = env->uc; - MemoryRegion *mr = memory_mapping(uc, addr); - - // memory might be still unmapped while reading or fetching - if (mr == NULL) { - handled = false; -#if defined(SOFTMMU_CODE_ACCESS) - error_code = UC_ERR_FETCH_UNMAPPED; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } -#else - error_code = UC_ERR_READ_UNMAPPED; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } -#endif - if (handled) { - env->invalid_error = UC_ERR_OK; - mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? - } else { - env->invalid_addr = addr; - env->invalid_error = error_code; - // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return 0; - } - } - -#if defined(SOFTMMU_CODE_ACCESS) - // Unicorn: callback on fetch from NX - if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } - - if (handled) { - env->invalid_error = UC_ERR_OK; - } else { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_FETCH_PROT; - // printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return 0; - } - } -#endif - - // Unicorn: callback on memory read - // NOTE: this happens before the actual read, so we cannot tell - // the callback if read access is succesful, or not. - // See UC_HOOK_MEM_READ_AFTER & UC_MEM_READ_AFTER if you only care - // about successful read - if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { - if (!uc->size_recur_mem) { // disabling read callback if in recursive call - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, addr, DATA_SIZE, 0, hook->user_data); - } - } - } - - // Unicorn: callback on non-readable memory - if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } - - if (handled) { - env->invalid_error = UC_ERR_OK; - } else { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_READ_PROT; - // printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return 0; - } - } - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - /* If the TLB entry addend is invalidated by any callbacks (perhaps due to - a TLB flush), reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) - || env->tlb_table[mmu_idx][index].addend == -1) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; -#if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; -#else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; - } -#endif - if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - if (ioaddr == 0) { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_READ_UNMAPPED; - // printf("Invalid memory read at " TARGET_FMT_lx "\n", addr); - cpu_exit(env->uc->current_cpu); - return 0; - } else { - env->invalid_error = UC_ERR_OK; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); - res = TGT_LE(res); - goto _out; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - DATA_TYPE res1, res2; - unsigned shift; - do_unaligned_access: -#ifdef ALIGNED_ONLY - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; -#if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; -#else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; -#endif - addr1 = addr & ~(DATA_SIZE - 1); - addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - uc->size_recur_mem = DATA_SIZE - (addr - addr1); // size already treated by callback - res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); - uc->size_recur_mem = (addr2 - addr); - res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); - uc->size_recur_mem = 0; - shift = (addr & (DATA_SIZE - 1)) * 8; - - /* Little-endian combine. */ - res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); - goto _out; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; -#if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; -#else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; - } -#endif - - haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); -#if DATA_SIZE == 1 - res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); -#else - res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); -#endif - -_out: - // Unicorn: callback on successful read - if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { - if (!uc->size_recur_mem) { // disabling read callback if in recursive call - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_AFTER) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, addr, DATA_SIZE, res, hook->user_data); - } - } - } - - return res; -} - -#if DATA_SIZE > 1 -#ifdef SOFTMMU_CODE_ACCESS -static QEMU_UNUSED_FUNC -#endif -WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, - uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - uintptr_t haddr; - DATA_TYPE res; - int error_code; - struct hook *hook; - bool handled; - HOOK_FOREACH_VAR_DECLARE; - - struct uc_struct *uc = env->uc; - MemoryRegion *mr = memory_mapping(uc, addr); - - // memory can be unmapped while reading or fetching - if (mr == NULL) { - handled = false; -#if defined(SOFTMMU_CODE_ACCESS) - error_code = UC_ERR_FETCH_UNMAPPED; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } -#else - error_code = UC_ERR_READ_UNMAPPED; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } -#endif - if (handled) { - env->invalid_error = UC_ERR_OK; - mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? - } else { - env->invalid_addr = addr; - env->invalid_error = error_code; - // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return 0; - } - } - -#if defined(SOFTMMU_CODE_ACCESS) - // Unicorn: callback on fetch from NX - if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } - - if (handled) { - env->invalid_error = UC_ERR_OK; - } else { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_FETCH_PROT; - // printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return 0; - } - } -#endif - - // Unicorn: callback on memory read - // NOTE: this happens before the actual read, so we cannot tell - // the callback if read access is succesful, or not. - // See UC_HOOK_MEM_READ_AFTER & UC_MEM_READ_AFTER if you only care - // about successful read - if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { - if (!uc->size_recur_mem) { // disabling read callback if in recursive call - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, addr, DATA_SIZE, 0, hook->user_data); - } - } - } - - // Unicorn: callback on non-readable memory - if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) - break; - } - - if (handled) { - env->invalid_error = UC_ERR_OK; - } else { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_READ_PROT; - // printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return 0; - } - } - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - /* If the TLB entry addend is invalidated by any callbacks (perhaps due to - a TLB flush), reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) - || env->tlb_table[mmu_idx][index].addend == -1) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; -#if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; -#else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; - } -#endif - if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { - tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - - if (ioaddr == 0) { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_READ_UNMAPPED; - // printf("Invalid memory read at " TARGET_FMT_lx "\n", addr); - cpu_exit(env->uc->current_cpu); - return 0; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); - res = TGT_BE(res); - goto _out; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - DATA_TYPE res1, res2; - unsigned shift; - do_unaligned_access: -#ifdef ALIGNED_ONLY - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; -#if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; -#else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; -#endif - addr1 = addr & ~(DATA_SIZE - 1); - addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - uc->size_recur_mem = DATA_SIZE - (addr - addr1); // size already treated by callback - res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); - uc->size_recur_mem = (addr2 - addr); - res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); - uc->size_recur_mem = 0; - shift = (addr & (DATA_SIZE - 1)) * 8; - - /* Big-endian combine. */ - res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); - goto _out; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - // mmu_idx, retaddr); - env->invalid_addr = addr; -#if defined(SOFTMMU_CODE_ACCESS) - env->invalid_error = UC_ERR_FETCH_UNALIGNED; -#else - env->invalid_error = UC_ERR_READ_UNALIGNED; -#endif - cpu_exit(uc->current_cpu); - return 0; - } -#endif - - haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); - res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); - -_out: - // Unicorn: callback on successful read - if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { - if (!uc->size_recur_mem) { // disabling read callback if in recursive call - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_AFTER) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, addr, DATA_SIZE, res, hook->user_data); - } - } - } - - return res; -} -#endif /* DATA_SIZE > 1 */ - -DATA_TYPE -glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, - int mmu_idx) -{ - return helper_te_ld_name (env, addr, mmu_idx, GETRA()); -} - -#ifndef SOFTMMU_CODE_ACCESS - -/* Provide signed versions of the load routines as well. We can of course - avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ -#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS -WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr) -{ - return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr); -} - -# if DATA_SIZE > 1 -WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr) -{ - return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr); -} -# endif -#endif - -static inline void glue(io_write, SUFFIX)(CPUArchState *env, - hwaddr physaddr, - DATA_TYPE val, - target_ulong addr, - uintptr_t retaddr) -{ - CPUState *cpu = ENV_GET_CPU(env); - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - if (mr != &(cpu->uc->io_mem_rom) && mr != &(cpu->uc->io_mem_notdirty) - && !cpu_can_do_io(cpu)) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - cpu->mem_io_pc = retaddr; - io_mem_write(mr, physaddr, val, 1 << SHIFT); -} - -void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - int mmu_idx, uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - uintptr_t haddr; - struct hook *hook; - bool handled; - HOOK_FOREACH_VAR_DECLARE; - - struct uc_struct *uc = env->uc; - MemoryRegion *mr = memory_mapping(uc, addr); - - if (!uc->size_recur_mem) { // disabling write callback if in recursive call - // Unicorn: callback on memory write - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - ((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, addr, DATA_SIZE, val, hook->user_data); - } - } - - // Unicorn: callback on invalid memory - if (mr == NULL) { - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, addr, DATA_SIZE, val, hook->user_data))) - break; - } - - if (!handled) { - // save error & quit - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNMAPPED; - // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return; - } else { - env->invalid_error = UC_ERR_OK; - mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? - } - } - - // Unicorn: callback on non-writable memory - if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, addr, DATA_SIZE, val, hook->user_data))) - break; - } - - if (handled) { - env->invalid_error = UC_ERR_OK; - } else { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_PROT; - // printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return; - } - } - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - //cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - // mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; - } -#endif - if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - if (ioaddr == 0) { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNMAPPED; - // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); - cpu_exit(env->uc->current_cpu); - return; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - val = TGT_LE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); - return; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - int i; - do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; -#endif - /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { - /* Little-endian extract. */ - uint8_t val8 = (uint8_t)(val >> (i * 8)); - // size already treated, this is used only for diabling the write cb - uc->size_recur_mem = DATA_SIZE - i ; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - mmu_idx, retaddr + GETPC_ADJ); - if (env->invalid_error != UC_ERR_OK) - break; - } - uc->size_recur_mem = 0; - return; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; - } -#endif - - haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); -#if DATA_SIZE == 1 - glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); -#else - glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); -#endif -} - -#if DATA_SIZE > 1 -void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - int mmu_idx, uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - uintptr_t haddr; - struct hook *hook; - bool handled; - HOOK_FOREACH_VAR_DECLARE; - - struct uc_struct *uc = env->uc; - MemoryRegion *mr = memory_mapping(uc, addr); - - if (!uc->size_recur_mem) { // disabling write callback if in recursive call - // Unicorn: callback on memory write - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - ((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, addr, DATA_SIZE, val, hook->user_data); - } - } - - // Unicorn: callback on invalid memory - if (mr == NULL) { - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, addr, DATA_SIZE, val, hook->user_data))) - break; - } - - if (!handled) { - // save error & quit - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNMAPPED; - // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return; - } else { - env->invalid_error = UC_ERR_OK; - mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? - } - } - - // Unicorn: callback on non-writable memory - if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable - handled = false; - HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) { - if (hook->to_delete) - continue; - if (!HOOK_BOUND_CHECK(hook, addr)) - continue; - if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, addr, DATA_SIZE, val, hook->user_data))) - break; - } - - if (handled) { - env->invalid_error = UC_ERR_OK; - } else { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_PROT; - // printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr); - cpu_exit(uc->current_cpu); - return; - } - } - - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; - } -#endif - if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); - } - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - } - - /* Handle an IO access. */ - if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; - if ((addr & (DATA_SIZE - 1)) != 0) { - goto do_unaligned_access; - } - ioaddr = env->iotlb[mmu_idx][index]; - if (ioaddr == 0) { - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNMAPPED; - // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); - cpu_exit(env->uc->current_cpu); - return; - } - - /* ??? Note that the io helpers always read data in the target - byte ordering. We should push the LE/BE request down into io. */ - val = TGT_BE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); - return; - } - - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - int i; - do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; -#endif - /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { - /* Big-endian extract. */ - uint8_t val8 = (uint8_t)(val >> (((DATA_SIZE - 1) * 8) - (i * 8))); - // size already treated, this is used only for diabling the write cb - uc->size_recur_mem = DATA_SIZE - i ; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - mmu_idx, retaddr + GETPC_ADJ); - if (env->invalid_error != UC_ERR_OK) - break; - } - uc->size_recur_mem = 0; - return; - } - - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - env->invalid_addr = addr; - env->invalid_error = UC_ERR_WRITE_UNALIGNED; - cpu_exit(uc->current_cpu); - return; - } -#endif - - haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); - glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); -} -#endif /* DATA_SIZE > 1 */ - -void -glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, - DATA_TYPE val, int mmu_idx) -{ - helper_te_st_name(env, addr, val, mmu_idx, GETRA()); -} - -#endif /* !defined(SOFTMMU_CODE_ACCESS) */ - -#undef READ_ACCESS_TYPE -#undef SHIFT -#undef DATA_TYPE -#undef SUFFIX -#undef LSUFFIX -#undef DATA_SIZE -#undef ADDR_READ -#undef WORD_TYPE -#undef SDATA_TYPE -#undef USUFFIX -#undef SSUFFIX -#undef BSWAP -#undef TGT_BE -#undef TGT_LE -#undef CPU_BE -#undef CPU_LE -#undef helper_le_ld_name -#undef helper_be_ld_name -#undef helper_le_lds_name -#undef helper_be_lds_name -#undef helper_le_st_name -#undef helper_be_st_name -#undef helper_te_ld_name -#undef helper_te_st_name diff --git a/qemu/sparc.h b/qemu/sparc.h index 4fbf55e7..b3ea2084 100644 --- a/qemu/sparc.h +++ b/qemu/sparc.h @@ -1,3028 +1,1283 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_SPARC_H -#define UNICORN_AUTOGEN_SPARC_H -#define arm_release arm_release_sparc -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_sparc -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_sparc -#define use_idiv_instructions_rt use_idiv_instructions_rt_sparc -#define tcg_target_deposit_valid tcg_target_deposit_valid_sparc -#define helper_power_down helper_power_down_sparc -#define check_exit_request check_exit_request_sparc -#define address_space_unregister address_space_unregister_sparc -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc -#define phys_mem_clean phys_mem_clean_sparc -#define tb_cleanup tb_cleanup_sparc +#ifndef UNICORN_AUTOGEN_sparc_H +#define UNICORN_AUTOGEN_sparc_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _sparc +#endif +#define arm_arch arm_arch_sparc +#define tb_target_set_jmp_target tb_target_set_jmp_target_sparc +#define have_bmi1 have_bmi1_sparc +#define have_popcnt have_popcnt_sparc +#define have_avx1 have_avx1_sparc +#define have_avx2 have_avx2_sparc +#define have_isa have_isa_sparc +#define have_altivec have_altivec_sparc +#define have_vsx have_vsx_sparc +#define flush_icache_range flush_icache_range_sparc +#define s390_facilities s390_facilities_sparc +#define tcg_dump_op tcg_dump_op_sparc +#define tcg_dump_ops tcg_dump_ops_sparc +#define tcg_gen_and_i64 tcg_gen_and_i64_sparc +#define tcg_gen_discard_i64 tcg_gen_discard_i64_sparc +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_sparc +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_sparc +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_sparc +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_sparc +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_sparc +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_sparc +#define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc +#define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc +#define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc +#define tcg_gen_mul_i64 tcg_gen_mul_i64_sparc +#define tcg_gen_or_i64 tcg_gen_or_i64_sparc +#define tcg_gen_sar_i64 tcg_gen_sar_i64_sparc +#define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc +#define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc +#define tcg_gen_st_i64 tcg_gen_st_i64_sparc +#define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc +#define cpu_icount_to_ns cpu_icount_to_ns_sparc +#define cpu_is_stopped cpu_is_stopped_sparc +#define cpu_get_ticks cpu_get_ticks_sparc +#define cpu_get_clock cpu_get_clock_sparc +#define cpu_resume cpu_resume_sparc +#define qemu_init_vcpu qemu_init_vcpu_sparc +#define cpu_stop_current cpu_stop_current_sparc +#define resume_all_vcpus resume_all_vcpus_sparc +#define vm_start vm_start_sparc +#define address_space_dispatch_compact address_space_dispatch_compact_sparc +#define flatview_translate flatview_translate_sparc +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc +#define qemu_get_cpu qemu_get_cpu_sparc +#define cpu_address_space_init cpu_address_space_init_sparc +#define cpu_get_address_space cpu_get_address_space_sparc +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_sparc +#define cpu_exec_initfn cpu_exec_initfn_sparc +#define cpu_exec_realizefn cpu_exec_realizefn_sparc +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc +#define cpu_watchpoint_insert cpu_watchpoint_insert_sparc +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc +#define cpu_breakpoint_insert cpu_breakpoint_insert_sparc +#define cpu_breakpoint_remove cpu_breakpoint_remove_sparc +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc +#define cpu_abort cpu_abort_sparc +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_sparc +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc +#define flatview_add_to_dispatch flatview_add_to_dispatch_sparc +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_sparc +#define qemu_ram_get_offset qemu_ram_get_offset_sparc +#define qemu_ram_get_used_length qemu_ram_get_used_length_sparc +#define qemu_ram_is_shared qemu_ram_is_shared_sparc +#define qemu_ram_pagesize qemu_ram_pagesize_sparc +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc +#define qemu_ram_alloc qemu_ram_alloc_sparc +#define qemu_ram_free qemu_ram_free_sparc +#define qemu_map_ram_ptr qemu_map_ram_ptr_sparc +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_sparc +#define qemu_ram_block_from_host qemu_ram_block_from_host_sparc +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc +#define cpu_check_watchpoint cpu_check_watchpoint_sparc +#define iotlb_to_section iotlb_to_section_sparc +#define address_space_dispatch_new address_space_dispatch_new_sparc +#define address_space_dispatch_free address_space_dispatch_free_sparc +#define flatview_read_continue flatview_read_continue_sparc +#define address_space_read_full address_space_read_full_sparc +#define address_space_write address_space_write_sparc +#define address_space_rw address_space_rw_sparc +#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc +#define address_space_write_rom address_space_write_rom_sparc +#define cpu_flush_icache_range cpu_flush_icache_range_sparc +#define cpu_exec_init_all cpu_exec_init_all_sparc +#define address_space_access_valid address_space_access_valid_sparc +#define address_space_map address_space_map_sparc +#define address_space_unmap address_space_unmap_sparc +#define cpu_physical_memory_map cpu_physical_memory_map_sparc +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc +#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc +#define qemu_target_page_size qemu_target_page_size_sparc +#define qemu_target_page_bits qemu_target_page_bits_sparc +#define qemu_target_page_bits_min qemu_target_page_bits_min_sparc +#define target_words_bigendian target_words_bigendian_sparc +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc +#define ram_block_discard_range ram_block_discard_range_sparc +#define ramblock_is_pmem ramblock_is_pmem_sparc +#define page_size_init page_size_init_sparc +#define set_preferred_target_page_bits set_preferred_target_page_bits_sparc +#define finalize_target_page_bits finalize_target_page_bits_sparc +#define cpu_outb cpu_outb_sparc +#define cpu_outw cpu_outw_sparc +#define cpu_outl cpu_outl_sparc +#define cpu_inb cpu_inb_sparc +#define cpu_inw cpu_inw_sparc +#define cpu_inl cpu_inl_sparc #define memory_map memory_map_sparc +#define memory_map_io memory_map_io_sparc #define memory_map_ptr memory_map_ptr_sparc #define memory_unmap memory_unmap_sparc #define memory_free memory_free_sparc -#define free_code_gen_buffer free_code_gen_buffer_sparc -#define helper_raise_exception helper_raise_exception_sparc -#define tcg_enabled tcg_enabled_sparc -#define tcg_exec_init tcg_exec_init_sparc -#define memory_register_types memory_register_types_sparc -#define cpu_exec_init_all cpu_exec_init_all_sparc -#define vm_start vm_start_sparc -#define resume_all_vcpus resume_all_vcpus_sparc -#define a15_l2ctlr_read a15_l2ctlr_read_sparc -#define a64_translate_init a64_translate_init_sparc -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_sparc -#define aa64_cacheop_access aa64_cacheop_access_sparc -#define aa64_daif_access aa64_daif_access_sparc -#define aa64_daif_write aa64_daif_write_sparc -#define aa64_dczid_read aa64_dczid_read_sparc -#define aa64_fpcr_read aa64_fpcr_read_sparc -#define aa64_fpcr_write aa64_fpcr_write_sparc -#define aa64_fpsr_read aa64_fpsr_read_sparc -#define aa64_fpsr_write aa64_fpsr_write_sparc -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_sparc -#define aa64_zva_access aa64_zva_access_sparc -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_sparc -#define aarch64_restore_sp aarch64_restore_sp_sparc -#define aarch64_save_sp aarch64_save_sp_sparc -#define accel_find accel_find_sparc -#define accel_init_machine accel_init_machine_sparc -#define accel_type accel_type_sparc -#define access_with_adjusted_size access_with_adjusted_size_sparc -#define add128 add128_sparc -#define add16_sat add16_sat_sparc -#define add16_usat add16_usat_sparc -#define add192 add192_sparc -#define add8_sat add8_sat_sparc -#define add8_usat add8_usat_sparc -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_sparc -#define add_cpreg_to_list add_cpreg_to_list_sparc -#define addFloat128Sigs addFloat128Sigs_sparc -#define addFloat32Sigs addFloat32Sigs_sparc -#define addFloat64Sigs addFloat64Sigs_sparc -#define addFloatx80Sigs addFloatx80Sigs_sparc -#define add_qemu_ldst_label add_qemu_ldst_label_sparc -#define address_space_access_valid address_space_access_valid_sparc -#define address_space_destroy address_space_destroy_sparc -#define address_space_destroy_dispatch address_space_destroy_dispatch_sparc -#define address_space_get_flatview address_space_get_flatview_sparc -#define address_space_init address_space_init_sparc -#define address_space_init_dispatch address_space_init_dispatch_sparc -#define address_space_lookup_region address_space_lookup_region_sparc -#define address_space_map address_space_map_sparc -#define address_space_read address_space_read_sparc -#define address_space_rw address_space_rw_sparc -#define address_space_translate address_space_translate_sparc -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc -#define address_space_translate_internal address_space_translate_internal_sparc -#define address_space_unmap address_space_unmap_sparc -#define address_space_update_topology address_space_update_topology_sparc -#define address_space_update_topology_pass address_space_update_topology_pass_sparc -#define address_space_write address_space_write_sparc -#define addrrange_contains addrrange_contains_sparc -#define addrrange_end addrrange_end_sparc -#define addrrange_equal addrrange_equal_sparc -#define addrrange_intersection addrrange_intersection_sparc -#define addrrange_intersects addrrange_intersects_sparc -#define addrrange_make addrrange_make_sparc -#define adjust_endianness adjust_endianness_sparc -#define all_helpers all_helpers_sparc -#define alloc_code_gen_buffer alloc_code_gen_buffer_sparc -#define alloc_entry alloc_entry_sparc -#define always_true always_true_sparc -#define arm1026_initfn arm1026_initfn_sparc -#define arm1136_initfn arm1136_initfn_sparc -#define arm1136_r2_initfn arm1136_r2_initfn_sparc -#define arm1176_initfn arm1176_initfn_sparc -#define arm11mpcore_initfn arm11mpcore_initfn_sparc -#define arm926_initfn arm926_initfn_sparc -#define arm946_initfn arm946_initfn_sparc -#define arm_ccnt_enabled arm_ccnt_enabled_sparc -#define arm_cp_read_zero arm_cp_read_zero_sparc -#define arm_cp_reset_ignore arm_cp_reset_ignore_sparc -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_sparc -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_sparc -#define arm_cpu_finalizefn arm_cpu_finalizefn_sparc -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_sparc -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_sparc -#define arm_cpu_initfn arm_cpu_initfn_sparc -#define arm_cpu_list arm_cpu_list_sparc -#define cpu_loop_exit cpu_loop_exit_sparc -#define arm_cpu_post_init arm_cpu_post_init_sparc -#define arm_cpu_realizefn arm_cpu_realizefn_sparc -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_sparc -#define arm_cpu_register_types arm_cpu_register_types_sparc -#define cpu_resume_from_signal cpu_resume_from_signal_sparc -#define arm_cpus arm_cpus_sparc -#define arm_cpu_set_pc arm_cpu_set_pc_sparc -#define arm_cp_write_ignore arm_cp_write_ignore_sparc -#define arm_current_el arm_current_el_sparc -#define arm_dc_feature arm_dc_feature_sparc -#define arm_debug_excp_handler arm_debug_excp_handler_sparc -#define arm_debug_target_el arm_debug_target_el_sparc -#define arm_el_is_aa64 arm_el_is_aa64_sparc -#define arm_env_get_cpu arm_env_get_cpu_sparc -#define arm_excp_target_el arm_excp_target_el_sparc -#define arm_excp_unmasked arm_excp_unmasked_sparc -#define arm_feature arm_feature_sparc -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_sparc -#define gen_intermediate_code gen_intermediate_code_sparc -#define gen_intermediate_code_pc gen_intermediate_code_pc_sparc -#define arm_gen_test_cc arm_gen_test_cc_sparc -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_sparc -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_sparc -#define arm_handle_psci_call arm_handle_psci_call_sparc -#define arm_is_psci_call arm_is_psci_call_sparc -#define arm_is_secure arm_is_secure_sparc -#define arm_is_secure_below_el3 arm_is_secure_below_el3_sparc -#define arm_ldl_code arm_ldl_code_sparc -#define arm_lduw_code arm_lduw_code_sparc -#define arm_log_exception arm_log_exception_sparc -#define arm_reg_read arm_reg_read_sparc -#define arm_reg_reset arm_reg_reset_sparc -#define arm_reg_write arm_reg_write_sparc -#define restore_state_to_opc restore_state_to_opc_sparc -#define arm_rmode_to_sf arm_rmode_to_sf_sparc -#define arm_singlestep_active arm_singlestep_active_sparc -#define tlb_fill tlb_fill_sparc -#define tlb_flush tlb_flush_sparc -#define tlb_flush_page tlb_flush_page_sparc -#define tlb_set_page tlb_set_page_sparc -#define arm_translate_init arm_translate_init_sparc -#define arm_v7m_class_init arm_v7m_class_init_sparc -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_sparc -#define ats_access ats_access_sparc -#define ats_write ats_write_sparc -#define bad_mode_switch bad_mode_switch_sparc -#define bank_number bank_number_sparc -#define bitmap_zero_extend bitmap_zero_extend_sparc -#define bp_wp_matches bp_wp_matches_sparc -#define breakpoint_invalidate breakpoint_invalidate_sparc -#define build_page_bitmap build_page_bitmap_sparc -#define bus_add_child bus_add_child_sparc -#define bus_class_init bus_class_init_sparc -#define bus_info bus_info_sparc -#define bus_unparent bus_unparent_sparc -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_sparc -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_sparc -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_sparc -#define call_recip_estimate call_recip_estimate_sparc -#define can_merge can_merge_sparc -#define capacity_increase capacity_increase_sparc -#define ccsidr_read ccsidr_read_sparc -#define check_ap check_ap_sparc -#define check_breakpoints check_breakpoints_sparc -#define check_watchpoints check_watchpoints_sparc -#define cho cho_sparc -#define clear_bit clear_bit_sparc -#define clz32 clz32_sparc -#define clz64 clz64_sparc -#define cmp_flatrange_addr cmp_flatrange_addr_sparc -#define code_gen_alloc code_gen_alloc_sparc -#define commonNaNToFloat128 commonNaNToFloat128_sparc -#define commonNaNToFloat16 commonNaNToFloat16_sparc -#define commonNaNToFloat32 commonNaNToFloat32_sparc -#define commonNaNToFloat64 commonNaNToFloat64_sparc -#define commonNaNToFloatx80 commonNaNToFloatx80_sparc -#define compute_abs_deadline compute_abs_deadline_sparc -#define cond_name cond_name_sparc -#define configure_accelerator configure_accelerator_sparc -#define container_get container_get_sparc -#define container_info container_info_sparc -#define container_register_types container_register_types_sparc -#define contextidr_write contextidr_write_sparc -#define core_log_global_start core_log_global_start_sparc -#define core_log_global_stop core_log_global_stop_sparc -#define core_memory_listener core_memory_listener_sparc -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_sparc -#define cortex_a15_initfn cortex_a15_initfn_sparc -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_sparc -#define cortex_a8_initfn cortex_a8_initfn_sparc -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_sparc -#define cortex_a9_initfn cortex_a9_initfn_sparc -#define cortex_m3_initfn cortex_m3_initfn_sparc -#define count_cpreg count_cpreg_sparc -#define countLeadingZeros32 countLeadingZeros32_sparc -#define countLeadingZeros64 countLeadingZeros64_sparc -#define cp_access_ok cp_access_ok_sparc -#define cpacr_write cpacr_write_sparc -#define cpreg_field_is_64bit cpreg_field_is_64bit_sparc -#define cp_reginfo cp_reginfo_sparc -#define cpreg_key_compare cpreg_key_compare_sparc -#define cpreg_make_keylist cpreg_make_keylist_sparc -#define cp_reg_reset cp_reg_reset_sparc -#define cpreg_to_kvm_id cpreg_to_kvm_id_sparc -#define cpsr_read cpsr_read_sparc -#define cpsr_write cpsr_write_sparc -#define cptype_valid cptype_valid_sparc -#define cpu_abort cpu_abort_sparc -#define cpu_arm_exec cpu_arm_exec_sparc -#define cpu_arm_gen_code cpu_arm_gen_code_sparc -#define cpu_arm_init cpu_arm_init_sparc -#define cpu_breakpoint_insert cpu_breakpoint_insert_sparc -#define cpu_breakpoint_remove cpu_breakpoint_remove_sparc -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc -#define cpu_can_do_io cpu_can_do_io_sparc -#define cpu_can_run cpu_can_run_sparc -#define cpu_class_init cpu_class_init_sparc -#define cpu_common_class_by_name cpu_common_class_by_name_sparc -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_sparc -#define cpu_common_get_arch_id cpu_common_get_arch_id_sparc -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_sparc -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_sparc -#define cpu_common_has_work cpu_common_has_work_sparc -#define cpu_common_initfn cpu_common_initfn_sparc -#define cpu_common_noop cpu_common_noop_sparc -#define cpu_common_parse_features cpu_common_parse_features_sparc -#define cpu_common_realizefn cpu_common_realizefn_sparc -#define cpu_common_reset cpu_common_reset_sparc -#define cpu_dump_statistics cpu_dump_statistics_sparc -#define cpu_exec_init cpu_exec_init_sparc -#define cpu_flush_icache_range cpu_flush_icache_range_sparc -#define cpu_gen_init cpu_gen_init_sparc -#define cpu_get_clock cpu_get_clock_sparc -#define cpu_get_real_ticks cpu_get_real_ticks_sparc -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_sparc -#define cpu_handle_debug_exception cpu_handle_debug_exception_sparc -#define cpu_handle_guest_debug cpu_handle_guest_debug_sparc -#define cpu_inb cpu_inb_sparc -#define cpu_inl cpu_inl_sparc -#define cpu_interrupt cpu_interrupt_sparc -#define cpu_interrupt_handler cpu_interrupt_handler_sparc -#define cpu_inw cpu_inw_sparc -#define cpu_io_recompile cpu_io_recompile_sparc -#define cpu_is_stopped cpu_is_stopped_sparc -#define cpu_ldl_code cpu_ldl_code_sparc -#define cpu_ldub_code cpu_ldub_code_sparc -#define cpu_lduw_code cpu_lduw_code_sparc -#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc -#define cpu_mmu_index cpu_mmu_index_sparc -#define cpu_outb cpu_outb_sparc -#define cpu_outl cpu_outl_sparc -#define cpu_outw cpu_outw_sparc -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_sparc -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_sparc -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_sparc -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_sparc -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_sparc -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc -#define cpu_physical_memory_map cpu_physical_memory_map_sparc -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_sparc -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_sparc -#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_sparc -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_sparc -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_sparc -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_sparc -#define cpu_register cpu_register_sparc -#define cpu_register_types cpu_register_types_sparc -#define cpu_restore_state cpu_restore_state_sparc -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_sparc -#define cpu_single_step cpu_single_step_sparc -#define cpu_tb_exec cpu_tb_exec_sparc -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_sparc -#define cpu_to_be64 cpu_to_be64_sparc -#define cpu_to_le32 cpu_to_le32_sparc -#define cpu_to_le64 cpu_to_le64_sparc -#define cpu_type_info cpu_type_info_sparc -#define cpu_unassigned_access cpu_unassigned_access_sparc -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc -#define cpu_watchpoint_insert cpu_watchpoint_insert_sparc -#define cpu_watchpoint_remove cpu_watchpoint_remove_sparc -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc -#define crc32c_table crc32c_table_sparc -#define create_new_memory_mapping create_new_memory_mapping_sparc -#define csselr_write csselr_write_sparc -#define cto32 cto32_sparc -#define ctr_el0_access ctr_el0_access_sparc -#define ctz32 ctz32_sparc -#define ctz64 ctz64_sparc -#define dacr_write dacr_write_sparc -#define dbgbcr_write dbgbcr_write_sparc -#define dbgbvr_write dbgbvr_write_sparc -#define dbgwcr_write dbgwcr_write_sparc -#define dbgwvr_write dbgwvr_write_sparc -#define debug_cp_reginfo debug_cp_reginfo_sparc -#define debug_frame debug_frame_sparc -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_sparc -#define define_arm_cp_regs define_arm_cp_regs_sparc -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_sparc -#define define_debug_regs define_debug_regs_sparc -#define define_one_arm_cp_reg define_one_arm_cp_reg_sparc -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_sparc -#define deposit32 deposit32_sparc -#define deposit64 deposit64_sparc -#define deregister_tm_clones deregister_tm_clones_sparc -#define device_class_base_init device_class_base_init_sparc -#define device_class_init device_class_init_sparc -#define device_finalize device_finalize_sparc -#define device_get_realized device_get_realized_sparc -#define device_initfn device_initfn_sparc -#define device_post_init device_post_init_sparc -#define device_reset device_reset_sparc -#define device_set_realized device_set_realized_sparc -#define device_type_info device_type_info_sparc -#define disas_arm_insn disas_arm_insn_sparc -#define disas_coproc_insn disas_coproc_insn_sparc -#define disas_dsp_insn disas_dsp_insn_sparc -#define disas_iwmmxt_insn disas_iwmmxt_insn_sparc -#define disas_neon_data_insn disas_neon_data_insn_sparc -#define disas_neon_ls_insn disas_neon_ls_insn_sparc -#define disas_thumb2_insn disas_thumb2_insn_sparc -#define disas_thumb_insn disas_thumb_insn_sparc -#define disas_vfp_insn disas_vfp_insn_sparc -#define disas_vfp_v8_insn disas_vfp_v8_insn_sparc -#define do_arm_semihosting do_arm_semihosting_sparc -#define do_clz16 do_clz16_sparc -#define do_clz8 do_clz8_sparc -#define do_constant_folding do_constant_folding_sparc -#define do_constant_folding_2 do_constant_folding_2_sparc -#define do_constant_folding_cond do_constant_folding_cond_sparc -#define do_constant_folding_cond2 do_constant_folding_cond2_sparc -#define do_constant_folding_cond_32 do_constant_folding_cond_32_sparc -#define do_constant_folding_cond_64 do_constant_folding_cond_64_sparc -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_sparc -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_sparc -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_sparc -#define do_ssat do_ssat_sparc -#define do_usad do_usad_sparc -#define do_usat do_usat_sparc -#define do_v7m_exception_exit do_v7m_exception_exit_sparc -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_sparc -#define dummy_func dummy_func_sparc -#define dummy_section dummy_section_sparc -#define _DYNAMIC _DYNAMIC_sparc -#define _edata _edata_sparc -#define _end _end_sparc -#define end_list end_list_sparc -#define eq128 eq128_sparc -#define ErrorClass_lookup ErrorClass_lookup_sparc -#define error_copy error_copy_sparc -#define error_exit error_exit_sparc -#define error_get_class error_get_class_sparc -#define error_get_pretty error_get_pretty_sparc -#define error_setg_file_open error_setg_file_open_sparc -#define estimateDiv128To64 estimateDiv128To64_sparc -#define estimateSqrt32 estimateSqrt32_sparc -#define excnames excnames_sparc -#define excp_is_internal excp_is_internal_sparc -#define extended_addresses_enabled extended_addresses_enabled_sparc -#define extended_mpu_ap_bits extended_mpu_ap_bits_sparc -#define extract32 extract32_sparc -#define extract64 extract64_sparc -#define extractFloat128Exp extractFloat128Exp_sparc -#define extractFloat128Frac0 extractFloat128Frac0_sparc -#define extractFloat128Frac1 extractFloat128Frac1_sparc -#define extractFloat128Sign extractFloat128Sign_sparc -#define extractFloat16Exp extractFloat16Exp_sparc -#define extractFloat16Frac extractFloat16Frac_sparc -#define extractFloat16Sign extractFloat16Sign_sparc -#define extractFloat32Exp extractFloat32Exp_sparc -#define extractFloat32Frac extractFloat32Frac_sparc -#define extractFloat32Sign extractFloat32Sign_sparc -#define extractFloat64Exp extractFloat64Exp_sparc -#define extractFloat64Frac extractFloat64Frac_sparc -#define extractFloat64Sign extractFloat64Sign_sparc -#define extractFloatx80Exp extractFloatx80Exp_sparc -#define extractFloatx80Frac extractFloatx80Frac_sparc -#define extractFloatx80Sign extractFloatx80Sign_sparc -#define fcse_write fcse_write_sparc -#define find_better_copy find_better_copy_sparc -#define find_default_machine find_default_machine_sparc -#define find_desc_by_name find_desc_by_name_sparc -#define find_first_bit find_first_bit_sparc -#define find_paging_enabled_cpu find_paging_enabled_cpu_sparc -#define find_ram_block find_ram_block_sparc -#define find_ram_offset find_ram_offset_sparc -#define find_string find_string_sparc -#define find_type find_type_sparc -#define _fini _fini_sparc -#define flatrange_equal flatrange_equal_sparc -#define flatview_destroy flatview_destroy_sparc -#define flatview_init flatview_init_sparc -#define flatview_insert flatview_insert_sparc -#define flatview_lookup flatview_lookup_sparc -#define flatview_ref flatview_ref_sparc -#define flatview_simplify flatview_simplify_sparc #define flatview_unref flatview_unref_sparc -#define float128_add float128_add_sparc -#define float128_compare float128_compare_sparc -#define float128_compare_internal float128_compare_internal_sparc -#define float128_compare_quiet float128_compare_quiet_sparc -#define float128_default_nan float128_default_nan_sparc -#define float128_div float128_div_sparc -#define float128_eq float128_eq_sparc -#define float128_eq_quiet float128_eq_quiet_sparc -#define float128_is_quiet_nan float128_is_quiet_nan_sparc -#define float128_is_signaling_nan float128_is_signaling_nan_sparc -#define float128_le float128_le_sparc -#define float128_le_quiet float128_le_quiet_sparc -#define float128_lt float128_lt_sparc -#define float128_lt_quiet float128_lt_quiet_sparc -#define float128_maybe_silence_nan float128_maybe_silence_nan_sparc -#define float128_mul float128_mul_sparc -#define float128_rem float128_rem_sparc -#define float128_round_to_int float128_round_to_int_sparc -#define float128_scalbn float128_scalbn_sparc -#define float128_sqrt float128_sqrt_sparc -#define float128_sub float128_sub_sparc -#define float128ToCommonNaN float128ToCommonNaN_sparc -#define float128_to_float32 float128_to_float32_sparc -#define float128_to_float64 float128_to_float64_sparc -#define float128_to_floatx80 float128_to_floatx80_sparc -#define float128_to_int32 float128_to_int32_sparc -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc -#define float128_to_int64 float128_to_int64_sparc -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc -#define float128_unordered float128_unordered_sparc -#define float128_unordered_quiet float128_unordered_quiet_sparc -#define float16_default_nan float16_default_nan_sparc +#define address_space_get_flatview address_space_get_flatview_sparc +#define memory_region_transaction_begin memory_region_transaction_begin_sparc +#define memory_region_transaction_commit memory_region_transaction_commit_sparc +#define memory_region_init memory_region_init_sparc +#define memory_region_access_valid memory_region_access_valid_sparc +#define memory_region_dispatch_read memory_region_dispatch_read_sparc +#define memory_region_dispatch_write memory_region_dispatch_write_sparc +#define memory_region_init_io memory_region_init_io_sparc +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc +#define memory_region_size memory_region_size_sparc +#define memory_region_set_readonly memory_region_set_readonly_sparc +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc +#define memory_region_from_host memory_region_from_host_sparc +#define memory_region_get_ram_addr memory_region_get_ram_addr_sparc +#define memory_region_add_subregion memory_region_add_subregion_sparc +#define memory_region_del_subregion memory_region_del_subregion_sparc +#define memory_region_find memory_region_find_sparc +#define memory_listener_register memory_listener_register_sparc +#define memory_listener_unregister memory_listener_unregister_sparc +#define address_space_remove_listeners address_space_remove_listeners_sparc +#define address_space_init address_space_init_sparc +#define address_space_destroy address_space_destroy_sparc +#define memory_region_init_ram memory_region_init_ram_sparc +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc +#define exec_inline_op exec_inline_op_sparc +#define floatx80_default_nan floatx80_default_nan_sparc +#define float_raise float_raise_sparc #define float16_is_quiet_nan float16_is_quiet_nan_sparc #define float16_is_signaling_nan float16_is_signaling_nan_sparc -#define float16_maybe_silence_nan float16_maybe_silence_nan_sparc -#define float16ToCommonNaN float16ToCommonNaN_sparc -#define float16_to_float32 float16_to_float32_sparc -#define float16_to_float64 float16_to_float64_sparc -#define float32_abs float32_abs_sparc -#define float32_add float32_add_sparc -#define float32_chs float32_chs_sparc -#define float32_compare float32_compare_sparc -#define float32_compare_internal float32_compare_internal_sparc -#define float32_compare_quiet float32_compare_quiet_sparc -#define float32_default_nan float32_default_nan_sparc -#define float32_div float32_div_sparc -#define float32_eq float32_eq_sparc -#define float32_eq_quiet float32_eq_quiet_sparc -#define float32_exp2 float32_exp2_sparc -#define float32_exp2_coefficients float32_exp2_coefficients_sparc -#define float32_is_any_nan float32_is_any_nan_sparc -#define float32_is_infinity float32_is_infinity_sparc -#define float32_is_neg float32_is_neg_sparc #define float32_is_quiet_nan float32_is_quiet_nan_sparc #define float32_is_signaling_nan float32_is_signaling_nan_sparc -#define float32_is_zero float32_is_zero_sparc -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_sparc -#define float32_le float32_le_sparc -#define float32_le_quiet float32_le_quiet_sparc -#define float32_log2 float32_log2_sparc -#define float32_lt float32_lt_sparc -#define float32_lt_quiet float32_lt_quiet_sparc +#define float64_is_quiet_nan float64_is_quiet_nan_sparc +#define float64_is_signaling_nan float64_is_signaling_nan_sparc +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc +#define floatx80_silence_nan floatx80_silence_nan_sparc +#define propagateFloatx80NaN propagateFloatx80NaN_sparc +#define float128_is_quiet_nan float128_is_quiet_nan_sparc +#define float128_is_signaling_nan float128_is_signaling_nan_sparc +#define float128_silence_nan float128_silence_nan_sparc +#define float16_add float16_add_sparc +#define float16_sub float16_sub_sparc +#define float32_add float32_add_sparc +#define float32_sub float32_sub_sparc +#define float64_add float64_add_sparc +#define float64_sub float64_sub_sparc +#define float16_mul float16_mul_sparc +#define float32_mul float32_mul_sparc +#define float64_mul float64_mul_sparc +#define float16_muladd float16_muladd_sparc +#define float32_muladd float32_muladd_sparc +#define float64_muladd float64_muladd_sparc +#define float16_div float16_div_sparc +#define float32_div float32_div_sparc +#define float64_div float64_div_sparc +#define float16_to_float32 float16_to_float32_sparc +#define float16_to_float64 float16_to_float64_sparc +#define float32_to_float16 float32_to_float16_sparc +#define float32_to_float64 float32_to_float64_sparc +#define float64_to_float16 float64_to_float16_sparc +#define float64_to_float32 float64_to_float32_sparc +#define float16_round_to_int float16_round_to_int_sparc +#define float32_round_to_int float32_round_to_int_sparc +#define float64_round_to_int float64_round_to_int_sparc +#define float16_to_int16_scalbn float16_to_int16_scalbn_sparc +#define float16_to_int32_scalbn float16_to_int32_scalbn_sparc +#define float16_to_int64_scalbn float16_to_int64_scalbn_sparc +#define float32_to_int16_scalbn float32_to_int16_scalbn_sparc +#define float32_to_int32_scalbn float32_to_int32_scalbn_sparc +#define float32_to_int64_scalbn float32_to_int64_scalbn_sparc +#define float64_to_int16_scalbn float64_to_int16_scalbn_sparc +#define float64_to_int32_scalbn float64_to_int32_scalbn_sparc +#define float64_to_int64_scalbn float64_to_int64_scalbn_sparc +#define float16_to_int16 float16_to_int16_sparc +#define float16_to_int32 float16_to_int32_sparc +#define float16_to_int64 float16_to_int64_sparc +#define float32_to_int16 float32_to_int16_sparc +#define float32_to_int32 float32_to_int32_sparc +#define float32_to_int64 float32_to_int64_sparc +#define float64_to_int16 float64_to_int16_sparc +#define float64_to_int32 float64_to_int32_sparc +#define float64_to_int64 float64_to_int64_sparc +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_sparc +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_sparc +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_sparc +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_sparc +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_sparc +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_sparc +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_sparc +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_sparc +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_sparc +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_sparc +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_sparc +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_sparc +#define float16_to_uint16 float16_to_uint16_sparc +#define float16_to_uint32 float16_to_uint32_sparc +#define float16_to_uint64 float16_to_uint64_sparc +#define float32_to_uint16 float32_to_uint16_sparc +#define float32_to_uint32 float32_to_uint32_sparc +#define float32_to_uint64 float32_to_uint64_sparc +#define float64_to_uint16 float64_to_uint16_sparc +#define float64_to_uint32 float64_to_uint32_sparc +#define float64_to_uint64 float64_to_uint64_sparc +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_sparc +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_sparc +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_sparc +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc +#define int64_to_float16_scalbn int64_to_float16_scalbn_sparc +#define int32_to_float16_scalbn int32_to_float16_scalbn_sparc +#define int16_to_float16_scalbn int16_to_float16_scalbn_sparc +#define int64_to_float16 int64_to_float16_sparc +#define int32_to_float16 int32_to_float16_sparc +#define int16_to_float16 int16_to_float16_sparc +#define int64_to_float32_scalbn int64_to_float32_scalbn_sparc +#define int32_to_float32_scalbn int32_to_float32_scalbn_sparc +#define int16_to_float32_scalbn int16_to_float32_scalbn_sparc +#define int64_to_float32 int64_to_float32_sparc +#define int32_to_float32 int32_to_float32_sparc +#define int16_to_float32 int16_to_float32_sparc +#define int64_to_float64_scalbn int64_to_float64_scalbn_sparc +#define int32_to_float64_scalbn int32_to_float64_scalbn_sparc +#define int16_to_float64_scalbn int16_to_float64_scalbn_sparc +#define int64_to_float64 int64_to_float64_sparc +#define int32_to_float64 int32_to_float64_sparc +#define int16_to_float64 int16_to_float64_sparc +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_sparc +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_sparc +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_sparc +#define uint64_to_float16 uint64_to_float16_sparc +#define uint32_to_float16 uint32_to_float16_sparc +#define uint16_to_float16 uint16_to_float16_sparc +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_sparc +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_sparc +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_sparc +#define uint64_to_float32 uint64_to_float32_sparc +#define uint32_to_float32 uint32_to_float32_sparc +#define uint16_to_float32 uint16_to_float32_sparc +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_sparc +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_sparc +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_sparc +#define uint64_to_float64 uint64_to_float64_sparc +#define uint32_to_float64 uint32_to_float64_sparc +#define uint16_to_float64 uint16_to_float64_sparc +#define float16_min float16_min_sparc +#define float16_minnum float16_minnum_sparc +#define float16_minnummag float16_minnummag_sparc +#define float16_max float16_max_sparc +#define float16_maxnum float16_maxnum_sparc +#define float16_maxnummag float16_maxnummag_sparc +#define float32_min float32_min_sparc +#define float32_minnum float32_minnum_sparc +#define float32_minnummag float32_minnummag_sparc #define float32_max float32_max_sparc #define float32_maxnum float32_maxnum_sparc #define float32_maxnummag float32_maxnummag_sparc -#define float32_maybe_silence_nan float32_maybe_silence_nan_sparc -#define float32_min float32_min_sparc -#define float32_minmax float32_minmax_sparc -#define float32_minnum float32_minnum_sparc -#define float32_minnummag float32_minnummag_sparc -#define float32_mul float32_mul_sparc -#define float32_muladd float32_muladd_sparc -#define float32_rem float32_rem_sparc -#define float32_round_to_int float32_round_to_int_sparc -#define float32_scalbn float32_scalbn_sparc -#define float32_set_sign float32_set_sign_sparc -#define float32_sqrt float32_sqrt_sparc -#define float32_squash_input_denormal float32_squash_input_denormal_sparc -#define float32_sub float32_sub_sparc -#define float32ToCommonNaN float32ToCommonNaN_sparc -#define float32_to_float128 float32_to_float128_sparc -#define float32_to_float16 float32_to_float16_sparc -#define float32_to_float64 float32_to_float64_sparc -#define float32_to_floatx80 float32_to_floatx80_sparc -#define float32_to_int16 float32_to_int16_sparc -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc -#define float32_to_int32 float32_to_int32_sparc -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc -#define float32_to_int64 float32_to_int64_sparc -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc -#define float32_to_uint16 float32_to_uint16_sparc -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc -#define float32_to_uint32 float32_to_uint32_sparc -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc -#define float32_to_uint64 float32_to_uint64_sparc -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc -#define float32_unordered float32_unordered_sparc -#define float32_unordered_quiet float32_unordered_quiet_sparc -#define float64_abs float64_abs_sparc -#define float64_add float64_add_sparc -#define float64_chs float64_chs_sparc -#define float64_compare float64_compare_sparc -#define float64_compare_internal float64_compare_internal_sparc -#define float64_compare_quiet float64_compare_quiet_sparc -#define float64_default_nan float64_default_nan_sparc -#define float64_div float64_div_sparc -#define float64_eq float64_eq_sparc -#define float64_eq_quiet float64_eq_quiet_sparc -#define float64_is_any_nan float64_is_any_nan_sparc -#define float64_is_infinity float64_is_infinity_sparc -#define float64_is_neg float64_is_neg_sparc -#define float64_is_quiet_nan float64_is_quiet_nan_sparc -#define float64_is_signaling_nan float64_is_signaling_nan_sparc -#define float64_is_zero float64_is_zero_sparc -#define float64_le float64_le_sparc -#define float64_le_quiet float64_le_quiet_sparc -#define float64_log2 float64_log2_sparc -#define float64_lt float64_lt_sparc -#define float64_lt_quiet float64_lt_quiet_sparc +#define float64_min float64_min_sparc +#define float64_minnum float64_minnum_sparc +#define float64_minnummag float64_minnummag_sparc #define float64_max float64_max_sparc #define float64_maxnum float64_maxnum_sparc #define float64_maxnummag float64_maxnummag_sparc -#define float64_maybe_silence_nan float64_maybe_silence_nan_sparc -#define float64_min float64_min_sparc -#define float64_minmax float64_minmax_sparc -#define float64_minnum float64_minnum_sparc -#define float64_minnummag float64_minnummag_sparc -#define float64_mul float64_mul_sparc -#define float64_muladd float64_muladd_sparc -#define float64_rem float64_rem_sparc -#define float64_round_to_int float64_round_to_int_sparc +#define float16_compare float16_compare_sparc +#define float16_compare_quiet float16_compare_quiet_sparc +#define float32_compare float32_compare_sparc +#define float32_compare_quiet float32_compare_quiet_sparc +#define float64_compare float64_compare_sparc +#define float64_compare_quiet float64_compare_quiet_sparc +#define float16_scalbn float16_scalbn_sparc +#define float32_scalbn float32_scalbn_sparc #define float64_scalbn float64_scalbn_sparc -#define float64_set_sign float64_set_sign_sparc +#define float16_sqrt float16_sqrt_sparc +#define float32_sqrt float32_sqrt_sparc #define float64_sqrt float64_sqrt_sparc +#define float16_default_nan float16_default_nan_sparc +#define float32_default_nan float32_default_nan_sparc +#define float64_default_nan float64_default_nan_sparc +#define float128_default_nan float128_default_nan_sparc +#define float16_silence_nan float16_silence_nan_sparc +#define float32_silence_nan float32_silence_nan_sparc +#define float64_silence_nan float64_silence_nan_sparc +#define float16_squash_input_denormal float16_squash_input_denormal_sparc +#define float32_squash_input_denormal float32_squash_input_denormal_sparc #define float64_squash_input_denormal float64_squash_input_denormal_sparc -#define float64_sub float64_sub_sparc -#define float64ToCommonNaN float64ToCommonNaN_sparc -#define float64_to_float128 float64_to_float128_sparc -#define float64_to_float16 float64_to_float16_sparc -#define float64_to_float32 float64_to_float32_sparc +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc +#define roundAndPackFloatx80 roundAndPackFloatx80_sparc +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc +#define int32_to_floatx80 int32_to_floatx80_sparc +#define int32_to_float128 int32_to_float128_sparc +#define int64_to_floatx80 int64_to_floatx80_sparc +#define int64_to_float128 int64_to_float128_sparc +#define uint64_to_float128 uint64_to_float128_sparc +#define float32_to_floatx80 float32_to_floatx80_sparc +#define float32_to_float128 float32_to_float128_sparc +#define float32_rem float32_rem_sparc +#define float32_exp2 float32_exp2_sparc +#define float32_log2 float32_log2_sparc +#define float32_eq float32_eq_sparc +#define float32_le float32_le_sparc +#define float32_lt float32_lt_sparc +#define float32_unordered float32_unordered_sparc +#define float32_eq_quiet float32_eq_quiet_sparc +#define float32_le_quiet float32_le_quiet_sparc +#define float32_lt_quiet float32_lt_quiet_sparc +#define float32_unordered_quiet float32_unordered_quiet_sparc #define float64_to_floatx80 float64_to_floatx80_sparc -#define float64_to_int16 float64_to_int16_sparc -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc -#define float64_to_int32 float64_to_int32_sparc -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc -#define float64_to_int64 float64_to_int64_sparc -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc -#define float64_to_uint16 float64_to_uint16_sparc -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc -#define float64_to_uint32 float64_to_uint32_sparc -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc -#define float64_to_uint64 float64_to_uint64_sparc -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc -#define float64_trunc_to_int float64_trunc_to_int_sparc +#define float64_to_float128 float64_to_float128_sparc +#define float64_rem float64_rem_sparc +#define float64_log2 float64_log2_sparc +#define float64_eq float64_eq_sparc +#define float64_le float64_le_sparc +#define float64_lt float64_lt_sparc #define float64_unordered float64_unordered_sparc +#define float64_eq_quiet float64_eq_quiet_sparc +#define float64_le_quiet float64_le_quiet_sparc +#define float64_lt_quiet float64_lt_quiet_sparc #define float64_unordered_quiet float64_unordered_quiet_sparc -#define float_raise float_raise_sparc -#define floatx80_add floatx80_add_sparc -#define floatx80_compare floatx80_compare_sparc -#define floatx80_compare_internal floatx80_compare_internal_sparc -#define floatx80_compare_quiet floatx80_compare_quiet_sparc -#define floatx80_default_nan floatx80_default_nan_sparc -#define floatx80_div floatx80_div_sparc -#define floatx80_eq floatx80_eq_sparc -#define floatx80_eq_quiet floatx80_eq_quiet_sparc -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc -#define floatx80_le floatx80_le_sparc -#define floatx80_le_quiet floatx80_le_quiet_sparc -#define floatx80_lt floatx80_lt_sparc -#define floatx80_lt_quiet floatx80_lt_quiet_sparc -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_sparc -#define floatx80_mul floatx80_mul_sparc -#define floatx80_rem floatx80_rem_sparc -#define floatx80_round_to_int floatx80_round_to_int_sparc -#define floatx80_scalbn floatx80_scalbn_sparc -#define floatx80_sqrt floatx80_sqrt_sparc -#define floatx80_sub floatx80_sub_sparc -#define floatx80ToCommonNaN floatx80ToCommonNaN_sparc -#define floatx80_to_float128 floatx80_to_float128_sparc -#define floatx80_to_float32 floatx80_to_float32_sparc -#define floatx80_to_float64 floatx80_to_float64_sparc #define floatx80_to_int32 floatx80_to_int32_sparc #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_sparc #define floatx80_to_int64 floatx80_to_int64_sparc #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_sparc +#define floatx80_to_float32 floatx80_to_float32_sparc +#define floatx80_to_float64 floatx80_to_float64_sparc +#define floatx80_to_float128 floatx80_to_float128_sparc +#define floatx80_round floatx80_round_sparc +#define floatx80_round_to_int floatx80_round_to_int_sparc +#define floatx80_add floatx80_add_sparc +#define floatx80_sub floatx80_sub_sparc +#define floatx80_mul floatx80_mul_sparc +#define floatx80_div floatx80_div_sparc +#define floatx80_rem floatx80_rem_sparc +#define floatx80_sqrt floatx80_sqrt_sparc +#define floatx80_eq floatx80_eq_sparc +#define floatx80_le floatx80_le_sparc +#define floatx80_lt floatx80_lt_sparc #define floatx80_unordered floatx80_unordered_sparc +#define floatx80_eq_quiet floatx80_eq_quiet_sparc +#define floatx80_le_quiet floatx80_le_quiet_sparc +#define floatx80_lt_quiet floatx80_lt_quiet_sparc #define floatx80_unordered_quiet floatx80_unordered_quiet_sparc -#define flush_icache_range flush_icache_range_sparc -#define format_string format_string_sparc -#define fp_decode_rm fp_decode_rm_sparc -#define frame_dummy frame_dummy_sparc -#define free_range free_range_sparc -#define fstat64 fstat64_sparc -#define futex_wait futex_wait_sparc -#define futex_wake futex_wake_sparc -#define gen_aa32_ld16s gen_aa32_ld16s_sparc -#define gen_aa32_ld16u gen_aa32_ld16u_sparc -#define gen_aa32_ld32u gen_aa32_ld32u_sparc -#define gen_aa32_ld64 gen_aa32_ld64_sparc -#define gen_aa32_ld8s gen_aa32_ld8s_sparc -#define gen_aa32_ld8u gen_aa32_ld8u_sparc -#define gen_aa32_st16 gen_aa32_st16_sparc -#define gen_aa32_st32 gen_aa32_st32_sparc -#define gen_aa32_st64 gen_aa32_st64_sparc -#define gen_aa32_st8 gen_aa32_st8_sparc -#define gen_adc gen_adc_sparc -#define gen_adc_CC gen_adc_CC_sparc -#define gen_add16 gen_add16_sparc -#define gen_add_carry gen_add_carry_sparc -#define gen_add_CC gen_add_CC_sparc -#define gen_add_datah_offset gen_add_datah_offset_sparc -#define gen_add_data_offset gen_add_data_offset_sparc -#define gen_addq gen_addq_sparc -#define gen_addq_lo gen_addq_lo_sparc -#define gen_addq_msw gen_addq_msw_sparc -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_sparc -#define gen_arm_shift_im gen_arm_shift_im_sparc -#define gen_arm_shift_reg gen_arm_shift_reg_sparc -#define gen_bx gen_bx_sparc -#define gen_bx_im gen_bx_im_sparc -#define gen_clrex gen_clrex_sparc -#define generate_memory_topology generate_memory_topology_sparc -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_sparc -#define gen_exception gen_exception_sparc -#define gen_exception_insn gen_exception_insn_sparc -#define gen_exception_internal gen_exception_internal_sparc -#define gen_exception_internal_insn gen_exception_internal_insn_sparc -#define gen_exception_return gen_exception_return_sparc -#define gen_goto_tb gen_goto_tb_sparc -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_sparc -#define gen_helper_add_saturate gen_helper_add_saturate_sparc -#define gen_helper_add_setq gen_helper_add_setq_sparc -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_sparc -#define gen_helper_clz32 gen_helper_clz32_sparc -#define gen_helper_clz64 gen_helper_clz64_sparc -#define gen_helper_clz_arm gen_helper_clz_arm_sparc -#define gen_helper_cpsr_read gen_helper_cpsr_read_sparc -#define gen_helper_cpsr_write gen_helper_cpsr_write_sparc -#define gen_helper_crc32_arm gen_helper_crc32_arm_sparc -#define gen_helper_crc32c gen_helper_crc32c_sparc -#define gen_helper_crypto_aese gen_helper_crypto_aese_sparc -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_sparc -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_sparc -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_sparc -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_sparc -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_sparc -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_sparc -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_sparc -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_sparc -#define gen_helper_double_saturate gen_helper_double_saturate_sparc -#define gen_helper_exception_internal gen_helper_exception_internal_sparc -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_sparc -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_sparc -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_sparc -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_sparc -#define gen_helper_get_user_reg gen_helper_get_user_reg_sparc -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_sparc -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_sparc -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_sparc -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_sparc -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_sparc -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_sparc -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_sparc -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_sparc -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_sparc -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_sparc -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_sparc -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_sparc -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_sparc -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_sparc -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_sparc -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_sparc -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_sparc -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_sparc -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_sparc -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_sparc -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_sparc -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_sparc -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_sparc -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_sparc -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_sparc -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_sparc -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_sparc -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_sparc -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_sparc -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_sparc -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_sparc -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_sparc -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_sparc -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_sparc -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_sparc -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_sparc -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_sparc -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_sparc -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_sparc -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_sparc -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_sparc -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_sparc -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_sparc -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_sparc -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_sparc -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_sparc -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_sparc -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_sparc -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_sparc -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_sparc -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_sparc -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_sparc -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_sparc -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_sparc -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_sparc -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_sparc -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_sparc -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_sparc -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_sparc -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_sparc -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_sparc -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_sparc -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_sparc -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_sparc -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_sparc -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_sparc -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_sparc -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_sparc -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_sparc -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_sparc -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_sparc -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_sparc -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_sparc -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_sparc -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_sparc -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_sparc -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_sparc -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_sparc -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_sparc -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_sparc -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_sparc -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_sparc -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_sparc -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_sparc -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_sparc -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_sparc -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_sparc -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_sparc -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_sparc -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_sparc -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_sparc -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_sparc -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_sparc -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_sparc -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_sparc -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_sparc -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_sparc -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_sparc -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_sparc -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_sparc -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_sparc -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_sparc -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_sparc -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_sparc -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_sparc -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_sparc -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_sparc -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_sparc -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_sparc -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_sparc -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_sparc -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_sparc -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_sparc -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_sparc -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_sparc -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_sparc -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_sparc -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_sparc -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_sparc -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_sparc -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_sparc -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_sparc -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_sparc -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_sparc -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_sparc -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_sparc -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_sparc -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_sparc -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_sparc -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_sparc -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_sparc -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_sparc -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_sparc -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_sparc -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_sparc -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_sparc -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_sparc -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_sparc -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_sparc -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_sparc -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_sparc -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_sparc -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_sparc -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_sparc -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_sparc -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_sparc -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_sparc -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_sparc -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_sparc -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_sparc -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_sparc -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_sparc -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_sparc -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_sparc -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_sparc -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_sparc -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_sparc -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_sparc -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_sparc -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_sparc -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_sparc -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_sparc -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_sparc -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_sparc -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_sparc -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_sparc -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_sparc -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_sparc -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_sparc -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_sparc -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_sparc -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_sparc -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_sparc -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_sparc -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_sparc -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_sparc -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_sparc -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_sparc -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_sparc -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_sparc -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_sparc -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_sparc -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_sparc -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_sparc -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_sparc -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_sparc -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_sparc -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_sparc -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_sparc -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_sparc -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_sparc -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_sparc -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_sparc -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_sparc -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_sparc -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_sparc -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_sparc -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_sparc -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_sparc -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_sparc -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_sparc -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_sparc -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_sparc -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_sparc -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_sparc -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_sparc -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_sparc -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_sparc -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_sparc -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_sparc -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_sparc -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_sparc -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_sparc -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_sparc -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_sparc -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_sparc -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_sparc -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_sparc -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_sparc -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_sparc -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_sparc -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_sparc -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_sparc -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_sparc -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_sparc -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_sparc -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_sparc -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_sparc -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_sparc -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_sparc -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_sparc -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_sparc -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_sparc -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_sparc -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_sparc -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_sparc -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_sparc -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_sparc -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_sparc -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_sparc -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_sparc -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_sparc -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_sparc -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_sparc -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_sparc -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_sparc -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_sparc -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_sparc -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_sparc -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_sparc -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_sparc -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_sparc -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_sparc -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_sparc -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_sparc -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_sparc -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_sparc -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_sparc -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_sparc -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_sparc -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_sparc -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_sparc -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_sparc -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_sparc -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_sparc -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_sparc -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_sparc -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_sparc -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_sparc -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_sparc -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_sparc -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_sparc -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_sparc -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_sparc -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_sparc -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_sparc -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_sparc -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_sparc -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_sparc -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_sparc -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_sparc -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_sparc -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_sparc -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_sparc -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_sparc -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_sparc -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_sparc -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_sparc -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_sparc -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_sparc -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_sparc -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_sparc -#define gen_helper_neon_tbl gen_helper_neon_tbl_sparc -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_sparc -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_sparc -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_sparc -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_sparc -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_sparc -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_sparc -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_sparc -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_sparc -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_sparc -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_sparc -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_sparc -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_sparc -#define gen_helper_neon_zip16 gen_helper_neon_zip16_sparc -#define gen_helper_neon_zip8 gen_helper_neon_zip8_sparc -#define gen_helper_pre_hvc gen_helper_pre_hvc_sparc -#define gen_helper_pre_smc gen_helper_pre_smc_sparc -#define gen_helper_qadd16 gen_helper_qadd16_sparc -#define gen_helper_qadd8 gen_helper_qadd8_sparc -#define gen_helper_qaddsubx gen_helper_qaddsubx_sparc -#define gen_helper_qsub16 gen_helper_qsub16_sparc -#define gen_helper_qsub8 gen_helper_qsub8_sparc -#define gen_helper_qsubaddx gen_helper_qsubaddx_sparc -#define gen_helper_rbit gen_helper_rbit_sparc -#define gen_helper_recpe_f32 gen_helper_recpe_f32_sparc -#define gen_helper_recpe_u32 gen_helper_recpe_u32_sparc -#define gen_helper_recps_f32 gen_helper_recps_f32_sparc -#define gen_helper_rintd gen_helper_rintd_sparc -#define gen_helper_rintd_exact gen_helper_rintd_exact_sparc -#define gen_helper_rints gen_helper_rints_sparc -#define gen_helper_rints_exact gen_helper_rints_exact_sparc -#define gen_helper_ror_cc gen_helper_ror_cc_sparc -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_sparc -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_sparc -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_sparc -#define gen_helper_sadd16 gen_helper_sadd16_sparc -#define gen_helper_sadd8 gen_helper_sadd8_sparc -#define gen_helper_saddsubx gen_helper_saddsubx_sparc -#define gen_helper_sar_cc gen_helper_sar_cc_sparc -#define gen_helper_sdiv gen_helper_sdiv_sparc -#define gen_helper_sel_flags gen_helper_sel_flags_sparc -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_sparc -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_sparc -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_sparc -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_sparc -#define gen_helper_set_rmode gen_helper_set_rmode_sparc -#define gen_helper_set_user_reg gen_helper_set_user_reg_sparc -#define gen_helper_shadd16 gen_helper_shadd16_sparc -#define gen_helper_shadd8 gen_helper_shadd8_sparc -#define gen_helper_shaddsubx gen_helper_shaddsubx_sparc -#define gen_helper_shl_cc gen_helper_shl_cc_sparc -#define gen_helper_shr_cc gen_helper_shr_cc_sparc -#define gen_helper_shsub16 gen_helper_shsub16_sparc -#define gen_helper_shsub8 gen_helper_shsub8_sparc -#define gen_helper_shsubaddx gen_helper_shsubaddx_sparc -#define gen_helper_ssat gen_helper_ssat_sparc -#define gen_helper_ssat16 gen_helper_ssat16_sparc -#define gen_helper_ssub16 gen_helper_ssub16_sparc -#define gen_helper_ssub8 gen_helper_ssub8_sparc -#define gen_helper_ssubaddx gen_helper_ssubaddx_sparc -#define gen_helper_sub_saturate gen_helper_sub_saturate_sparc -#define gen_helper_sxtb16 gen_helper_sxtb16_sparc -#define gen_helper_uadd16 gen_helper_uadd16_sparc -#define gen_helper_uadd8 gen_helper_uadd8_sparc -#define gen_helper_uaddsubx gen_helper_uaddsubx_sparc -#define gen_helper_udiv gen_helper_udiv_sparc -#define gen_helper_uhadd16 gen_helper_uhadd16_sparc -#define gen_helper_uhadd8 gen_helper_uhadd8_sparc -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_sparc -#define gen_helper_uhsub16 gen_helper_uhsub16_sparc -#define gen_helper_uhsub8 gen_helper_uhsub8_sparc -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_sparc -#define gen_helper_uqadd16 gen_helper_uqadd16_sparc -#define gen_helper_uqadd8 gen_helper_uqadd8_sparc -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_sparc -#define gen_helper_uqsub16 gen_helper_uqsub16_sparc -#define gen_helper_uqsub8 gen_helper_uqsub8_sparc -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_sparc -#define gen_helper_usad8 gen_helper_usad8_sparc -#define gen_helper_usat gen_helper_usat_sparc -#define gen_helper_usat16 gen_helper_usat16_sparc -#define gen_helper_usub16 gen_helper_usub16_sparc -#define gen_helper_usub8 gen_helper_usub8_sparc -#define gen_helper_usubaddx gen_helper_usubaddx_sparc -#define gen_helper_uxtb16 gen_helper_uxtb16_sparc -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_sparc -#define gen_helper_v7m_msr gen_helper_v7m_msr_sparc -#define gen_helper_vfp_absd gen_helper_vfp_absd_sparc -#define gen_helper_vfp_abss gen_helper_vfp_abss_sparc -#define gen_helper_vfp_addd gen_helper_vfp_addd_sparc -#define gen_helper_vfp_adds gen_helper_vfp_adds_sparc -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_sparc -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_sparc -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_sparc -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_sparc -#define gen_helper_vfp_divd gen_helper_vfp_divd_sparc -#define gen_helper_vfp_divs gen_helper_vfp_divs_sparc -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_sparc -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_sparc -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_sparc -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_sparc -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_sparc -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_sparc -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_sparc -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_sparc -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_sparc -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_sparc -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_sparc -#define gen_helper_vfp_mins gen_helper_vfp_mins_sparc -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_sparc -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_sparc -#define gen_helper_vfp_muld gen_helper_vfp_muld_sparc -#define gen_helper_vfp_muls gen_helper_vfp_muls_sparc -#define gen_helper_vfp_negd gen_helper_vfp_negd_sparc -#define gen_helper_vfp_negs gen_helper_vfp_negs_sparc -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_sparc -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_sparc -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_sparc -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_sparc -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_sparc -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_sparc -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_sparc -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_sparc -#define gen_helper_vfp_subd gen_helper_vfp_subd_sparc -#define gen_helper_vfp_subs gen_helper_vfp_subs_sparc -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_sparc -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_sparc -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_sparc -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_sparc -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_sparc -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_sparc -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_sparc -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_sparc -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_sparc -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_sparc -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_sparc -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_sparc -#define gen_helper_vfp_touid gen_helper_vfp_touid_sparc -#define gen_helper_vfp_touis gen_helper_vfp_touis_sparc -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_sparc -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_sparc -#define gen_helper_vfp_tould gen_helper_vfp_tould_sparc -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_sparc -#define gen_helper_vfp_touls gen_helper_vfp_touls_sparc -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_sparc -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_sparc -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_sparc -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_sparc -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_sparc -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_sparc -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_sparc -#define gen_helper_wfe gen_helper_wfe_sparc -#define gen_helper_wfi gen_helper_wfi_sparc -#define gen_hvc gen_hvc_sparc -#define gen_intermediate_code_internal gen_intermediate_code_internal_sparc -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_sparc -#define gen_iwmmxt_address gen_iwmmxt_address_sparc -#define gen_iwmmxt_shift gen_iwmmxt_shift_sparc -#define gen_jmp gen_jmp_sparc -#define gen_load_and_replicate gen_load_and_replicate_sparc -#define gen_load_exclusive gen_load_exclusive_sparc -#define gen_logic_CC gen_logic_CC_sparc -#define gen_logicq_cc gen_logicq_cc_sparc -#define gen_lookup_tb gen_lookup_tb_sparc -#define gen_mov_F0_vreg gen_mov_F0_vreg_sparc -#define gen_mov_F1_vreg gen_mov_F1_vreg_sparc -#define gen_mov_vreg_F0 gen_mov_vreg_F0_sparc -#define gen_muls_i64_i32 gen_muls_i64_i32_sparc -#define gen_mulu_i64_i32 gen_mulu_i64_i32_sparc -#define gen_mulxy gen_mulxy_sparc -#define gen_neon_add gen_neon_add_sparc -#define gen_neon_addl gen_neon_addl_sparc -#define gen_neon_addl_saturate gen_neon_addl_saturate_sparc -#define gen_neon_bsl gen_neon_bsl_sparc -#define gen_neon_dup_high16 gen_neon_dup_high16_sparc -#define gen_neon_dup_low16 gen_neon_dup_low16_sparc -#define gen_neon_dup_u8 gen_neon_dup_u8_sparc -#define gen_neon_mull gen_neon_mull_sparc -#define gen_neon_narrow gen_neon_narrow_sparc -#define gen_neon_narrow_op gen_neon_narrow_op_sparc -#define gen_neon_narrow_sats gen_neon_narrow_sats_sparc -#define gen_neon_narrow_satu gen_neon_narrow_satu_sparc -#define gen_neon_negl gen_neon_negl_sparc -#define gen_neon_rsb gen_neon_rsb_sparc -#define gen_neon_shift_narrow gen_neon_shift_narrow_sparc -#define gen_neon_subl gen_neon_subl_sparc -#define gen_neon_trn_u16 gen_neon_trn_u16_sparc -#define gen_neon_trn_u8 gen_neon_trn_u8_sparc -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_sparc -#define gen_neon_unzip gen_neon_unzip_sparc -#define gen_neon_widen gen_neon_widen_sparc -#define gen_neon_zip gen_neon_zip_sparc +#define float128_to_int32 float128_to_int32_sparc +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc +#define float128_to_int64 float128_to_int64_sparc +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc +#define float128_to_uint64 float128_to_uint64_sparc +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_sparc +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_sparc +#define float128_to_uint32 float128_to_uint32_sparc +#define float128_to_float32 float128_to_float32_sparc +#define float128_to_float64 float128_to_float64_sparc +#define float128_to_floatx80 float128_to_floatx80_sparc +#define float128_round_to_int float128_round_to_int_sparc +#define float128_add float128_add_sparc +#define float128_sub float128_sub_sparc +#define float128_mul float128_mul_sparc +#define float128_div float128_div_sparc +#define float128_rem float128_rem_sparc +#define float128_sqrt float128_sqrt_sparc +#define float128_eq float128_eq_sparc +#define float128_le float128_le_sparc +#define float128_lt float128_lt_sparc +#define float128_unordered float128_unordered_sparc +#define float128_eq_quiet float128_eq_quiet_sparc +#define float128_le_quiet float128_le_quiet_sparc +#define float128_lt_quiet float128_lt_quiet_sparc +#define float128_unordered_quiet float128_unordered_quiet_sparc +#define floatx80_compare floatx80_compare_sparc +#define floatx80_compare_quiet floatx80_compare_quiet_sparc +#define float128_compare float128_compare_sparc +#define float128_compare_quiet float128_compare_quiet_sparc +#define floatx80_scalbn floatx80_scalbn_sparc +#define float128_scalbn float128_scalbn_sparc +#define softfloat_init softfloat_init_sparc +#define tcg_optimize tcg_optimize_sparc #define gen_new_label gen_new_label_sparc -#define gen_nop_hint gen_nop_hint_sparc -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_sparc -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_sparc -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_sparc -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_sparc -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_sparc -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_sparc -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_sparc -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_sparc -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_sparc -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_sparc -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_sparc -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_sparc -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_sparc -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_sparc -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_sparc -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_sparc -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_sparc -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_sparc -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_sparc -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_sparc -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_sparc -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_sparc -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_sparc -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_sparc -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_sparc -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_sparc -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_sparc -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_sparc -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_sparc -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_sparc -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_sparc -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_sparc -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_sparc -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_sparc -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_sparc -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_sparc -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_sparc -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_sparc -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_sparc -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_sparc -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_sparc -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_sparc -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_sparc -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_sparc -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_sparc -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_sparc -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_sparc -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_sparc -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_sparc -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_sparc -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_sparc -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_sparc -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_sparc -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_sparc -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_sparc -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_sparc -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_sparc -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_sparc -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_sparc -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_sparc -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_sparc -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_sparc -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_sparc -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_sparc -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_sparc -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_sparc -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_sparc -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_sparc -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_sparc -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_sparc -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_sparc -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_sparc -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_sparc -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_sparc -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_sparc -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_sparc -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_sparc -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_sparc -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_sparc -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_sparc -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_sparc -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_sparc -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_sparc -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_sparc -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_sparc -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_sparc -#define gen_rev16 gen_rev16_sparc -#define gen_revsh gen_revsh_sparc -#define gen_rfe gen_rfe_sparc -#define gen_sar gen_sar_sparc -#define gen_sbc_CC gen_sbc_CC_sparc -#define gen_sbfx gen_sbfx_sparc -#define gen_set_CF_bit31 gen_set_CF_bit31_sparc -#define gen_set_condexec gen_set_condexec_sparc -#define gen_set_cpsr gen_set_cpsr_sparc -#define gen_set_label gen_set_label_sparc -#define gen_set_pc_im gen_set_pc_im_sparc -#define gen_set_psr gen_set_psr_sparc -#define gen_set_psr_im gen_set_psr_im_sparc -#define gen_shl gen_shl_sparc -#define gen_shr gen_shr_sparc -#define gen_smc gen_smc_sparc -#define gen_smul_dual gen_smul_dual_sparc -#define gen_srs gen_srs_sparc -#define gen_ss_advance gen_ss_advance_sparc -#define gen_step_complete_exception gen_step_complete_exception_sparc -#define gen_store_exclusive gen_store_exclusive_sparc -#define gen_storeq_reg gen_storeq_reg_sparc -#define gen_sub_carry gen_sub_carry_sparc -#define gen_sub_CC gen_sub_CC_sparc -#define gen_subq_msw gen_subq_msw_sparc -#define gen_swap_half gen_swap_half_sparc -#define gen_thumb2_data_op gen_thumb2_data_op_sparc -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_sparc -#define gen_ubfx gen_ubfx_sparc -#define gen_vfp_abs gen_vfp_abs_sparc -#define gen_vfp_add gen_vfp_add_sparc -#define gen_vfp_cmp gen_vfp_cmp_sparc -#define gen_vfp_cmpe gen_vfp_cmpe_sparc -#define gen_vfp_div gen_vfp_div_sparc -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_sparc -#define gen_vfp_F1_mul gen_vfp_F1_mul_sparc -#define gen_vfp_F1_neg gen_vfp_F1_neg_sparc -#define gen_vfp_ld gen_vfp_ld_sparc -#define gen_vfp_mrs gen_vfp_mrs_sparc -#define gen_vfp_msr gen_vfp_msr_sparc -#define gen_vfp_mul gen_vfp_mul_sparc -#define gen_vfp_neg gen_vfp_neg_sparc -#define gen_vfp_shto gen_vfp_shto_sparc -#define gen_vfp_sito gen_vfp_sito_sparc -#define gen_vfp_slto gen_vfp_slto_sparc -#define gen_vfp_sqrt gen_vfp_sqrt_sparc -#define gen_vfp_st gen_vfp_st_sparc -#define gen_vfp_sub gen_vfp_sub_sparc -#define gen_vfp_tosh gen_vfp_tosh_sparc -#define gen_vfp_tosi gen_vfp_tosi_sparc -#define gen_vfp_tosiz gen_vfp_tosiz_sparc -#define gen_vfp_tosl gen_vfp_tosl_sparc -#define gen_vfp_touh gen_vfp_touh_sparc -#define gen_vfp_toui gen_vfp_toui_sparc -#define gen_vfp_touiz gen_vfp_touiz_sparc -#define gen_vfp_toul gen_vfp_toul_sparc -#define gen_vfp_uhto gen_vfp_uhto_sparc -#define gen_vfp_uito gen_vfp_uito_sparc -#define gen_vfp_ulto gen_vfp_ulto_sparc -#define get_arm_cp_reginfo get_arm_cp_reginfo_sparc -#define get_clock get_clock_sparc -#define get_clock_realtime get_clock_realtime_sparc -#define get_constraint_priority get_constraint_priority_sparc -#define get_float_exception_flags get_float_exception_flags_sparc -#define get_float_rounding_mode get_float_rounding_mode_sparc -#define get_fpstatus_ptr get_fpstatus_ptr_sparc -#define get_level1_table_address get_level1_table_address_sparc -#define get_mem_index get_mem_index_sparc -#define get_next_param_value get_next_param_value_sparc -#define get_opt_name get_opt_name_sparc -#define get_opt_value get_opt_value_sparc -#define get_page_addr_code get_page_addr_code_sparc -#define get_param_value get_param_value_sparc -#define get_phys_addr get_phys_addr_sparc -#define get_phys_addr_lpae get_phys_addr_lpae_sparc -#define get_phys_addr_mpu get_phys_addr_mpu_sparc -#define get_phys_addr_v5 get_phys_addr_v5_sparc -#define get_phys_addr_v6 get_phys_addr_v6_sparc -#define get_system_memory get_system_memory_sparc -#define get_ticks_per_sec get_ticks_per_sec_sparc -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_sparc -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__sparc -#define gt_cntfrq_access gt_cntfrq_access_sparc -#define gt_cnt_read gt_cnt_read_sparc -#define gt_cnt_reset gt_cnt_reset_sparc -#define gt_counter_access gt_counter_access_sparc -#define gt_ctl_write gt_ctl_write_sparc -#define gt_cval_write gt_cval_write_sparc -#define gt_get_countervalue gt_get_countervalue_sparc -#define gt_pct_access gt_pct_access_sparc -#define gt_ptimer_access gt_ptimer_access_sparc -#define gt_recalc_timer gt_recalc_timer_sparc -#define gt_timer_access gt_timer_access_sparc -#define gt_tval_read gt_tval_read_sparc -#define gt_tval_write gt_tval_write_sparc -#define gt_vct_access gt_vct_access_sparc -#define gt_vtimer_access gt_vtimer_access_sparc -#define guest_phys_blocks_free guest_phys_blocks_free_sparc -#define guest_phys_blocks_init guest_phys_blocks_init_sparc -#define handle_vcvt handle_vcvt_sparc -#define handle_vminmaxnm handle_vminmaxnm_sparc -#define handle_vrint handle_vrint_sparc -#define handle_vsel handle_vsel_sparc -#define has_help_option has_help_option_sparc -#define have_bmi1 have_bmi1_sparc -#define have_bmi2 have_bmi2_sparc -#define hcr_write hcr_write_sparc -#define helper_access_check_cp_reg helper_access_check_cp_reg_sparc -#define helper_add_saturate helper_add_saturate_sparc -#define helper_add_setq helper_add_setq_sparc -#define helper_add_usaturate helper_add_usaturate_sparc -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_sparc -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_sparc -#define helper_be_ldq_mmu helper_be_ldq_mmu_sparc -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc -#define helper_be_ldul_mmu helper_be_ldul_mmu_sparc -#define helper_be_lduw_mmu helper_be_lduw_mmu_sparc -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_sparc -#define helper_be_stl_mmu helper_be_stl_mmu_sparc -#define helper_be_stq_mmu helper_be_stq_mmu_sparc -#define helper_be_stw_mmu helper_be_stw_mmu_sparc -#define helper_clear_pstate_ss helper_clear_pstate_ss_sparc -#define helper_clz_arm helper_clz_arm_sparc -#define helper_cpsr_read helper_cpsr_read_sparc -#define helper_cpsr_write helper_cpsr_write_sparc -#define helper_crc32_arm helper_crc32_arm_sparc -#define helper_crc32c helper_crc32c_sparc -#define helper_crypto_aese helper_crypto_aese_sparc -#define helper_crypto_aesmc helper_crypto_aesmc_sparc -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_sparc -#define helper_crypto_sha1h helper_crypto_sha1h_sparc -#define helper_crypto_sha1su1 helper_crypto_sha1su1_sparc -#define helper_crypto_sha256h helper_crypto_sha256h_sparc -#define helper_crypto_sha256h2 helper_crypto_sha256h2_sparc -#define helper_crypto_sha256su0 helper_crypto_sha256su0_sparc -#define helper_crypto_sha256su1 helper_crypto_sha256su1_sparc -#define helper_dc_zva helper_dc_zva_sparc -#define helper_double_saturate helper_double_saturate_sparc -#define helper_exception_internal helper_exception_internal_sparc -#define helper_exception_return helper_exception_return_sparc -#define helper_exception_with_syndrome helper_exception_with_syndrome_sparc -#define helper_get_cp_reg helper_get_cp_reg_sparc -#define helper_get_cp_reg64 helper_get_cp_reg64_sparc -#define helper_get_r13_banked helper_get_r13_banked_sparc -#define helper_get_user_reg helper_get_user_reg_sparc -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_sparc -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_sparc -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_sparc -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_sparc -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_sparc -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_sparc -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_sparc -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_sparc -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_sparc -#define helper_iwmmxt_addub helper_iwmmxt_addub_sparc -#define helper_iwmmxt_addul helper_iwmmxt_addul_sparc -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_sparc -#define helper_iwmmxt_align helper_iwmmxt_align_sparc -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_sparc -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_sparc -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_sparc -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_sparc -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_sparc -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_sparc -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_sparc -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_sparc -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_sparc -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_sparc -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_sparc -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_sparc -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_sparc -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_sparc -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_sparc -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_sparc -#define helper_iwmmxt_insr helper_iwmmxt_insr_sparc -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_sparc -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_sparc -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_sparc -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_sparc -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_sparc -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_sparc -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_sparc -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_sparc -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_sparc -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_sparc -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_sparc -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_sparc -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_sparc -#define helper_iwmmxt_minub helper_iwmmxt_minub_sparc -#define helper_iwmmxt_minul helper_iwmmxt_minul_sparc -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_sparc -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_sparc -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_sparc -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_sparc -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_sparc -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_sparc -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_sparc -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_sparc -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_sparc -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_sparc -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_sparc -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_sparc -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_sparc -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_sparc -#define helper_iwmmxt_packul helper_iwmmxt_packul_sparc -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_sparc -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_sparc -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_sparc -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_sparc -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_sparc -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_sparc -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_sparc -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_sparc -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_sparc -#define helper_iwmmxt_slll helper_iwmmxt_slll_sparc -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_sparc -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_sparc -#define helper_iwmmxt_sral helper_iwmmxt_sral_sparc -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_sparc -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_sparc -#define helper_iwmmxt_srll helper_iwmmxt_srll_sparc -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_sparc -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_sparc -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_sparc -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_sparc -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_sparc -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_sparc -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_sparc -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_sparc -#define helper_iwmmxt_subub helper_iwmmxt_subub_sparc -#define helper_iwmmxt_subul helper_iwmmxt_subul_sparc -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_sparc -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_sparc -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_sparc -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_sparc -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_sparc -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_sparc -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_sparc -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_sparc -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_sparc -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_sparc -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_sparc -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_sparc -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_sparc -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_sparc -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_sparc -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_sparc -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_sparc -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_sparc -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_sparc -#define helper_ldb_cmmu helper_ldb_cmmu_sparc -#define helper_ldb_mmu helper_ldb_mmu_sparc -#define helper_ldl_cmmu helper_ldl_cmmu_sparc -#define helper_ldl_mmu helper_ldl_mmu_sparc -#define helper_ldq_cmmu helper_ldq_cmmu_sparc -#define helper_ldq_mmu helper_ldq_mmu_sparc -#define helper_ldw_cmmu helper_ldw_cmmu_sparc -#define helper_ldw_mmu helper_ldw_mmu_sparc -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_sparc -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_sparc -#define helper_le_ldq_mmu helper_le_ldq_mmu_sparc -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc -#define helper_le_ldul_mmu helper_le_ldul_mmu_sparc -#define helper_le_lduw_mmu helper_le_lduw_mmu_sparc -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_sparc -#define helper_le_stl_mmu helper_le_stl_mmu_sparc -#define helper_le_stq_mmu helper_le_stq_mmu_sparc -#define helper_le_stw_mmu helper_le_stw_mmu_sparc -#define helper_msr_i_pstate helper_msr_i_pstate_sparc -#define helper_neon_abd_f32 helper_neon_abd_f32_sparc -#define helper_neon_abdl_s16 helper_neon_abdl_s16_sparc -#define helper_neon_abdl_s32 helper_neon_abdl_s32_sparc -#define helper_neon_abdl_s64 helper_neon_abdl_s64_sparc -#define helper_neon_abdl_u16 helper_neon_abdl_u16_sparc -#define helper_neon_abdl_u32 helper_neon_abdl_u32_sparc -#define helper_neon_abdl_u64 helper_neon_abdl_u64_sparc -#define helper_neon_abd_s16 helper_neon_abd_s16_sparc -#define helper_neon_abd_s32 helper_neon_abd_s32_sparc -#define helper_neon_abd_s8 helper_neon_abd_s8_sparc -#define helper_neon_abd_u16 helper_neon_abd_u16_sparc -#define helper_neon_abd_u32 helper_neon_abd_u32_sparc -#define helper_neon_abd_u8 helper_neon_abd_u8_sparc -#define helper_neon_abs_s16 helper_neon_abs_s16_sparc -#define helper_neon_abs_s8 helper_neon_abs_s8_sparc -#define helper_neon_acge_f32 helper_neon_acge_f32_sparc -#define helper_neon_acge_f64 helper_neon_acge_f64_sparc -#define helper_neon_acgt_f32 helper_neon_acgt_f32_sparc -#define helper_neon_acgt_f64 helper_neon_acgt_f64_sparc -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_sparc -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_sparc -#define helper_neon_addl_u16 helper_neon_addl_u16_sparc -#define helper_neon_addl_u32 helper_neon_addl_u32_sparc -#define helper_neon_add_u16 helper_neon_add_u16_sparc -#define helper_neon_add_u8 helper_neon_add_u8_sparc -#define helper_neon_ceq_f32 helper_neon_ceq_f32_sparc -#define helper_neon_ceq_u16 helper_neon_ceq_u16_sparc -#define helper_neon_ceq_u32 helper_neon_ceq_u32_sparc -#define helper_neon_ceq_u8 helper_neon_ceq_u8_sparc -#define helper_neon_cge_f32 helper_neon_cge_f32_sparc -#define helper_neon_cge_s16 helper_neon_cge_s16_sparc -#define helper_neon_cge_s32 helper_neon_cge_s32_sparc -#define helper_neon_cge_s8 helper_neon_cge_s8_sparc -#define helper_neon_cge_u16 helper_neon_cge_u16_sparc -#define helper_neon_cge_u32 helper_neon_cge_u32_sparc -#define helper_neon_cge_u8 helper_neon_cge_u8_sparc -#define helper_neon_cgt_f32 helper_neon_cgt_f32_sparc -#define helper_neon_cgt_s16 helper_neon_cgt_s16_sparc -#define helper_neon_cgt_s32 helper_neon_cgt_s32_sparc -#define helper_neon_cgt_s8 helper_neon_cgt_s8_sparc -#define helper_neon_cgt_u16 helper_neon_cgt_u16_sparc -#define helper_neon_cgt_u32 helper_neon_cgt_u32_sparc -#define helper_neon_cgt_u8 helper_neon_cgt_u8_sparc -#define helper_neon_cls_s16 helper_neon_cls_s16_sparc -#define helper_neon_cls_s32 helper_neon_cls_s32_sparc -#define helper_neon_cls_s8 helper_neon_cls_s8_sparc -#define helper_neon_clz_u16 helper_neon_clz_u16_sparc -#define helper_neon_clz_u8 helper_neon_clz_u8_sparc -#define helper_neon_cnt_u8 helper_neon_cnt_u8_sparc -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_sparc -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_sparc -#define helper_neon_hadd_s16 helper_neon_hadd_s16_sparc -#define helper_neon_hadd_s32 helper_neon_hadd_s32_sparc -#define helper_neon_hadd_s8 helper_neon_hadd_s8_sparc -#define helper_neon_hadd_u16 helper_neon_hadd_u16_sparc -#define helper_neon_hadd_u32 helper_neon_hadd_u32_sparc -#define helper_neon_hadd_u8 helper_neon_hadd_u8_sparc -#define helper_neon_hsub_s16 helper_neon_hsub_s16_sparc -#define helper_neon_hsub_s32 helper_neon_hsub_s32_sparc -#define helper_neon_hsub_s8 helper_neon_hsub_s8_sparc -#define helper_neon_hsub_u16 helper_neon_hsub_u16_sparc -#define helper_neon_hsub_u32 helper_neon_hsub_u32_sparc -#define helper_neon_hsub_u8 helper_neon_hsub_u8_sparc -#define helper_neon_max_s16 helper_neon_max_s16_sparc -#define helper_neon_max_s32 helper_neon_max_s32_sparc -#define helper_neon_max_s8 helper_neon_max_s8_sparc -#define helper_neon_max_u16 helper_neon_max_u16_sparc -#define helper_neon_max_u32 helper_neon_max_u32_sparc -#define helper_neon_max_u8 helper_neon_max_u8_sparc -#define helper_neon_min_s16 helper_neon_min_s16_sparc -#define helper_neon_min_s32 helper_neon_min_s32_sparc -#define helper_neon_min_s8 helper_neon_min_s8_sparc -#define helper_neon_min_u16 helper_neon_min_u16_sparc -#define helper_neon_min_u32 helper_neon_min_u32_sparc -#define helper_neon_min_u8 helper_neon_min_u8_sparc -#define helper_neon_mull_p8 helper_neon_mull_p8_sparc -#define helper_neon_mull_s16 helper_neon_mull_s16_sparc -#define helper_neon_mull_s8 helper_neon_mull_s8_sparc -#define helper_neon_mull_u16 helper_neon_mull_u16_sparc -#define helper_neon_mull_u8 helper_neon_mull_u8_sparc -#define helper_neon_mul_p8 helper_neon_mul_p8_sparc -#define helper_neon_mul_u16 helper_neon_mul_u16_sparc -#define helper_neon_mul_u8 helper_neon_mul_u8_sparc -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_sparc -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_sparc -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_sparc -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_sparc -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_sparc -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_sparc -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_sparc -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_sparc -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_sparc -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_sparc -#define helper_neon_narrow_u16 helper_neon_narrow_u16_sparc -#define helper_neon_narrow_u8 helper_neon_narrow_u8_sparc -#define helper_neon_negl_u16 helper_neon_negl_u16_sparc -#define helper_neon_negl_u32 helper_neon_negl_u32_sparc -#define helper_neon_paddl_u16 helper_neon_paddl_u16_sparc -#define helper_neon_paddl_u32 helper_neon_paddl_u32_sparc -#define helper_neon_padd_u16 helper_neon_padd_u16_sparc -#define helper_neon_padd_u8 helper_neon_padd_u8_sparc -#define helper_neon_pmax_s16 helper_neon_pmax_s16_sparc -#define helper_neon_pmax_s8 helper_neon_pmax_s8_sparc -#define helper_neon_pmax_u16 helper_neon_pmax_u16_sparc -#define helper_neon_pmax_u8 helper_neon_pmax_u8_sparc -#define helper_neon_pmin_s16 helper_neon_pmin_s16_sparc -#define helper_neon_pmin_s8 helper_neon_pmin_s8_sparc -#define helper_neon_pmin_u16 helper_neon_pmin_u16_sparc -#define helper_neon_pmin_u8 helper_neon_pmin_u8_sparc -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_sparc -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_sparc -#define helper_neon_qabs_s16 helper_neon_qabs_s16_sparc -#define helper_neon_qabs_s32 helper_neon_qabs_s32_sparc -#define helper_neon_qabs_s64 helper_neon_qabs_s64_sparc -#define helper_neon_qabs_s8 helper_neon_qabs_s8_sparc -#define helper_neon_qadd_s16 helper_neon_qadd_s16_sparc -#define helper_neon_qadd_s32 helper_neon_qadd_s32_sparc -#define helper_neon_qadd_s64 helper_neon_qadd_s64_sparc -#define helper_neon_qadd_s8 helper_neon_qadd_s8_sparc -#define helper_neon_qadd_u16 helper_neon_qadd_u16_sparc -#define helper_neon_qadd_u32 helper_neon_qadd_u32_sparc -#define helper_neon_qadd_u64 helper_neon_qadd_u64_sparc -#define helper_neon_qadd_u8 helper_neon_qadd_u8_sparc -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_sparc -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_sparc -#define helper_neon_qneg_s16 helper_neon_qneg_s16_sparc -#define helper_neon_qneg_s32 helper_neon_qneg_s32_sparc -#define helper_neon_qneg_s64 helper_neon_qneg_s64_sparc -#define helper_neon_qneg_s8 helper_neon_qneg_s8_sparc -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_sparc -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_sparc -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_sparc -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_sparc -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_sparc -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_sparc -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_sparc -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_sparc -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_sparc -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_sparc -#define helper_neon_qshl_s16 helper_neon_qshl_s16_sparc -#define helper_neon_qshl_s32 helper_neon_qshl_s32_sparc -#define helper_neon_qshl_s64 helper_neon_qshl_s64_sparc -#define helper_neon_qshl_s8 helper_neon_qshl_s8_sparc -#define helper_neon_qshl_u16 helper_neon_qshl_u16_sparc -#define helper_neon_qshl_u32 helper_neon_qshl_u32_sparc -#define helper_neon_qshl_u64 helper_neon_qshl_u64_sparc -#define helper_neon_qshl_u8 helper_neon_qshl_u8_sparc -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_sparc -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_sparc -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_sparc -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_sparc -#define helper_neon_qsub_s16 helper_neon_qsub_s16_sparc -#define helper_neon_qsub_s32 helper_neon_qsub_s32_sparc -#define helper_neon_qsub_s64 helper_neon_qsub_s64_sparc -#define helper_neon_qsub_s8 helper_neon_qsub_s8_sparc -#define helper_neon_qsub_u16 helper_neon_qsub_u16_sparc -#define helper_neon_qsub_u32 helper_neon_qsub_u32_sparc -#define helper_neon_qsub_u64 helper_neon_qsub_u64_sparc -#define helper_neon_qsub_u8 helper_neon_qsub_u8_sparc -#define helper_neon_qunzip16 helper_neon_qunzip16_sparc -#define helper_neon_qunzip32 helper_neon_qunzip32_sparc -#define helper_neon_qunzip8 helper_neon_qunzip8_sparc -#define helper_neon_qzip16 helper_neon_qzip16_sparc -#define helper_neon_qzip32 helper_neon_qzip32_sparc -#define helper_neon_qzip8 helper_neon_qzip8_sparc -#define helper_neon_rbit_u8 helper_neon_rbit_u8_sparc -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_sparc -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_sparc -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_sparc -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_sparc -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_sparc -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_sparc -#define helper_neon_rshl_s16 helper_neon_rshl_s16_sparc -#define helper_neon_rshl_s32 helper_neon_rshl_s32_sparc -#define helper_neon_rshl_s64 helper_neon_rshl_s64_sparc -#define helper_neon_rshl_s8 helper_neon_rshl_s8_sparc -#define helper_neon_rshl_u16 helper_neon_rshl_u16_sparc -#define helper_neon_rshl_u32 helper_neon_rshl_u32_sparc -#define helper_neon_rshl_u64 helper_neon_rshl_u64_sparc -#define helper_neon_rshl_u8 helper_neon_rshl_u8_sparc -#define helper_neon_shl_s16 helper_neon_shl_s16_sparc -#define helper_neon_shl_s32 helper_neon_shl_s32_sparc -#define helper_neon_shl_s64 helper_neon_shl_s64_sparc -#define helper_neon_shl_s8 helper_neon_shl_s8_sparc -#define helper_neon_shl_u16 helper_neon_shl_u16_sparc -#define helper_neon_shl_u32 helper_neon_shl_u32_sparc -#define helper_neon_shl_u64 helper_neon_shl_u64_sparc -#define helper_neon_shl_u8 helper_neon_shl_u8_sparc -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_sparc -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_sparc -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_sparc -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_sparc -#define helper_neon_subl_u16 helper_neon_subl_u16_sparc -#define helper_neon_subl_u32 helper_neon_subl_u32_sparc -#define helper_neon_sub_u16 helper_neon_sub_u16_sparc -#define helper_neon_sub_u8 helper_neon_sub_u8_sparc -#define helper_neon_tbl helper_neon_tbl_sparc -#define helper_neon_tst_u16 helper_neon_tst_u16_sparc -#define helper_neon_tst_u32 helper_neon_tst_u32_sparc -#define helper_neon_tst_u8 helper_neon_tst_u8_sparc -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_sparc -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_sparc -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_sparc -#define helper_neon_unzip16 helper_neon_unzip16_sparc -#define helper_neon_unzip8 helper_neon_unzip8_sparc -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_sparc -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_sparc -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_sparc -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_sparc -#define helper_neon_widen_s16 helper_neon_widen_s16_sparc -#define helper_neon_widen_s8 helper_neon_widen_s8_sparc -#define helper_neon_widen_u16 helper_neon_widen_u16_sparc -#define helper_neon_widen_u8 helper_neon_widen_u8_sparc -#define helper_neon_zip16 helper_neon_zip16_sparc -#define helper_neon_zip8 helper_neon_zip8_sparc -#define helper_pre_hvc helper_pre_hvc_sparc -#define helper_pre_smc helper_pre_smc_sparc -#define helper_qadd16 helper_qadd16_sparc -#define helper_qadd8 helper_qadd8_sparc -#define helper_qaddsubx helper_qaddsubx_sparc -#define helper_qsub16 helper_qsub16_sparc -#define helper_qsub8 helper_qsub8_sparc -#define helper_qsubaddx helper_qsubaddx_sparc -#define helper_rbit helper_rbit_sparc -#define helper_recpe_f32 helper_recpe_f32_sparc -#define helper_recpe_f64 helper_recpe_f64_sparc -#define helper_recpe_u32 helper_recpe_u32_sparc -#define helper_recps_f32 helper_recps_f32_sparc -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_sparc -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc -#define helper_ret_stb_mmu helper_ret_stb_mmu_sparc -#define helper_rintd helper_rintd_sparc -#define helper_rintd_exact helper_rintd_exact_sparc -#define helper_rints helper_rints_sparc -#define helper_rints_exact helper_rints_exact_sparc -#define helper_ror_cc helper_ror_cc_sparc -#define helper_rsqrte_f32 helper_rsqrte_f32_sparc -#define helper_rsqrte_f64 helper_rsqrte_f64_sparc -#define helper_rsqrte_u32 helper_rsqrte_u32_sparc -#define helper_rsqrts_f32 helper_rsqrts_f32_sparc -#define helper_sadd16 helper_sadd16_sparc -#define helper_sadd8 helper_sadd8_sparc -#define helper_saddsubx helper_saddsubx_sparc -#define helper_sar_cc helper_sar_cc_sparc -#define helper_sdiv helper_sdiv_sparc -#define helper_sel_flags helper_sel_flags_sparc -#define helper_set_cp_reg helper_set_cp_reg_sparc -#define helper_set_cp_reg64 helper_set_cp_reg64_sparc -#define helper_set_neon_rmode helper_set_neon_rmode_sparc -#define helper_set_r13_banked helper_set_r13_banked_sparc -#define helper_set_rmode helper_set_rmode_sparc -#define helper_set_user_reg helper_set_user_reg_sparc -#define helper_shadd16 helper_shadd16_sparc -#define helper_shadd8 helper_shadd8_sparc -#define helper_shaddsubx helper_shaddsubx_sparc -#define helper_shl_cc helper_shl_cc_sparc -#define helper_shr_cc helper_shr_cc_sparc -#define helper_shsub16 helper_shsub16_sparc -#define helper_shsub8 helper_shsub8_sparc -#define helper_shsubaddx helper_shsubaddx_sparc -#define helper_ssat helper_ssat_sparc -#define helper_ssat16 helper_ssat16_sparc -#define helper_ssub16 helper_ssub16_sparc -#define helper_ssub8 helper_ssub8_sparc -#define helper_ssubaddx helper_ssubaddx_sparc -#define helper_stb_mmu helper_stb_mmu_sparc -#define helper_stl_mmu helper_stl_mmu_sparc -#define helper_stq_mmu helper_stq_mmu_sparc -#define helper_stw_mmu helper_stw_mmu_sparc -#define helper_sub_saturate helper_sub_saturate_sparc -#define helper_sub_usaturate helper_sub_usaturate_sparc -#define helper_sxtb16 helper_sxtb16_sparc -#define helper_uadd16 helper_uadd16_sparc -#define helper_uadd8 helper_uadd8_sparc -#define helper_uaddsubx helper_uaddsubx_sparc -#define helper_udiv helper_udiv_sparc -#define helper_uhadd16 helper_uhadd16_sparc -#define helper_uhadd8 helper_uhadd8_sparc -#define helper_uhaddsubx helper_uhaddsubx_sparc -#define helper_uhsub16 helper_uhsub16_sparc -#define helper_uhsub8 helper_uhsub8_sparc -#define helper_uhsubaddx helper_uhsubaddx_sparc -#define helper_uqadd16 helper_uqadd16_sparc -#define helper_uqadd8 helper_uqadd8_sparc -#define helper_uqaddsubx helper_uqaddsubx_sparc -#define helper_uqsub16 helper_uqsub16_sparc -#define helper_uqsub8 helper_uqsub8_sparc -#define helper_uqsubaddx helper_uqsubaddx_sparc -#define helper_usad8 helper_usad8_sparc -#define helper_usat helper_usat_sparc -#define helper_usat16 helper_usat16_sparc -#define helper_usub16 helper_usub16_sparc -#define helper_usub8 helper_usub8_sparc -#define helper_usubaddx helper_usubaddx_sparc -#define helper_uxtb16 helper_uxtb16_sparc -#define helper_v7m_mrs helper_v7m_mrs_sparc -#define helper_v7m_msr helper_v7m_msr_sparc -#define helper_vfp_absd helper_vfp_absd_sparc -#define helper_vfp_abss helper_vfp_abss_sparc -#define helper_vfp_addd helper_vfp_addd_sparc -#define helper_vfp_adds helper_vfp_adds_sparc -#define helper_vfp_cmpd helper_vfp_cmpd_sparc -#define helper_vfp_cmped helper_vfp_cmped_sparc -#define helper_vfp_cmpes helper_vfp_cmpes_sparc -#define helper_vfp_cmps helper_vfp_cmps_sparc -#define helper_vfp_divd helper_vfp_divd_sparc -#define helper_vfp_divs helper_vfp_divs_sparc -#define helper_vfp_fcvtds helper_vfp_fcvtds_sparc -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_sparc -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_sparc -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_sparc -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_sparc -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_sparc -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_sparc -#define helper_vfp_maxd helper_vfp_maxd_sparc -#define helper_vfp_maxnumd helper_vfp_maxnumd_sparc -#define helper_vfp_maxnums helper_vfp_maxnums_sparc -#define helper_vfp_maxs helper_vfp_maxs_sparc -#define helper_vfp_mind helper_vfp_mind_sparc -#define helper_vfp_minnumd helper_vfp_minnumd_sparc -#define helper_vfp_minnums helper_vfp_minnums_sparc -#define helper_vfp_mins helper_vfp_mins_sparc -#define helper_vfp_muladdd helper_vfp_muladdd_sparc -#define helper_vfp_muladds helper_vfp_muladds_sparc -#define helper_vfp_muld helper_vfp_muld_sparc -#define helper_vfp_muls helper_vfp_muls_sparc -#define helper_vfp_negd helper_vfp_negd_sparc -#define helper_vfp_negs helper_vfp_negs_sparc -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_sparc -#define helper_vfp_shtod helper_vfp_shtod_sparc -#define helper_vfp_shtos helper_vfp_shtos_sparc -#define helper_vfp_sitod helper_vfp_sitod_sparc -#define helper_vfp_sitos helper_vfp_sitos_sparc -#define helper_vfp_sltod helper_vfp_sltod_sparc -#define helper_vfp_sltos helper_vfp_sltos_sparc -#define helper_vfp_sqrtd helper_vfp_sqrtd_sparc -#define helper_vfp_sqrts helper_vfp_sqrts_sparc -#define helper_vfp_sqtod helper_vfp_sqtod_sparc -#define helper_vfp_sqtos helper_vfp_sqtos_sparc -#define helper_vfp_subd helper_vfp_subd_sparc -#define helper_vfp_subs helper_vfp_subs_sparc -#define helper_vfp_toshd helper_vfp_toshd_sparc -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_sparc -#define helper_vfp_toshs helper_vfp_toshs_sparc -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_sparc -#define helper_vfp_tosid helper_vfp_tosid_sparc -#define helper_vfp_tosis helper_vfp_tosis_sparc -#define helper_vfp_tosizd helper_vfp_tosizd_sparc -#define helper_vfp_tosizs helper_vfp_tosizs_sparc -#define helper_vfp_tosld helper_vfp_tosld_sparc -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_sparc -#define helper_vfp_tosls helper_vfp_tosls_sparc -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_sparc -#define helper_vfp_tosqd helper_vfp_tosqd_sparc -#define helper_vfp_tosqs helper_vfp_tosqs_sparc -#define helper_vfp_touhd helper_vfp_touhd_sparc -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_sparc -#define helper_vfp_touhs helper_vfp_touhs_sparc -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_sparc -#define helper_vfp_touid helper_vfp_touid_sparc -#define helper_vfp_touis helper_vfp_touis_sparc -#define helper_vfp_touizd helper_vfp_touizd_sparc -#define helper_vfp_touizs helper_vfp_touizs_sparc -#define helper_vfp_tould helper_vfp_tould_sparc -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_sparc -#define helper_vfp_touls helper_vfp_touls_sparc -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_sparc -#define helper_vfp_touqd helper_vfp_touqd_sparc -#define helper_vfp_touqs helper_vfp_touqs_sparc -#define helper_vfp_uhtod helper_vfp_uhtod_sparc -#define helper_vfp_uhtos helper_vfp_uhtos_sparc -#define helper_vfp_uitod helper_vfp_uitod_sparc -#define helper_vfp_uitos helper_vfp_uitos_sparc -#define helper_vfp_ultod helper_vfp_ultod_sparc -#define helper_vfp_ultos helper_vfp_ultos_sparc -#define helper_vfp_uqtod helper_vfp_uqtod_sparc -#define helper_vfp_uqtos helper_vfp_uqtos_sparc -#define helper_wfe helper_wfe_sparc -#define helper_wfi helper_wfi_sparc -#define hex2decimal hex2decimal_sparc -#define hw_breakpoint_update hw_breakpoint_update_sparc -#define hw_breakpoint_update_all hw_breakpoint_update_all_sparc -#define hw_watchpoint_update hw_watchpoint_update_sparc -#define hw_watchpoint_update_all hw_watchpoint_update_all_sparc -#define _init _init_sparc -#define init_cpreg_list init_cpreg_list_sparc -#define init_lists init_lists_sparc -#define input_type_enum input_type_enum_sparc -#define int128_2_64 int128_2_64_sparc -#define int128_add int128_add_sparc -#define int128_addto int128_addto_sparc -#define int128_and int128_and_sparc -#define int128_eq int128_eq_sparc -#define int128_ge int128_ge_sparc -#define int128_get64 int128_get64_sparc -#define int128_gt int128_gt_sparc -#define int128_le int128_le_sparc -#define int128_lt int128_lt_sparc -#define int128_make64 int128_make64_sparc -#define int128_max int128_max_sparc -#define int128_min int128_min_sparc -#define int128_ne int128_ne_sparc -#define int128_neg int128_neg_sparc -#define int128_nz int128_nz_sparc -#define int128_rshift int128_rshift_sparc -#define int128_sub int128_sub_sparc -#define int128_subfrom int128_subfrom_sparc -#define int128_zero int128_zero_sparc -#define int16_to_float32 int16_to_float32_sparc -#define int16_to_float64 int16_to_float64_sparc -#define int32_to_float128 int32_to_float128_sparc -#define int32_to_float32 int32_to_float32_sparc -#define int32_to_float64 int32_to_float64_sparc -#define int32_to_floatx80 int32_to_floatx80_sparc -#define int64_to_float128 int64_to_float128_sparc -#define int64_to_float32 int64_to_float32_sparc -#define int64_to_float64 int64_to_float64_sparc -#define int64_to_floatx80 int64_to_floatx80_sparc -#define invalidate_and_set_dirty invalidate_and_set_dirty_sparc -#define invalidate_page_bitmap invalidate_page_bitmap_sparc -#define io_mem_read io_mem_read_sparc -#define io_mem_write io_mem_write_sparc -#define io_readb io_readb_sparc -#define io_readl io_readl_sparc -#define io_readq io_readq_sparc -#define io_readw io_readw_sparc -#define iotlb_to_region iotlb_to_region_sparc -#define io_writeb io_writeb_sparc -#define io_writel io_writel_sparc -#define io_writeq io_writeq_sparc -#define io_writew io_writew_sparc -#define is_a64 is_a64_sparc -#define is_help_option is_help_option_sparc -#define isr_read isr_read_sparc -#define is_valid_option_list is_valid_option_list_sparc -#define iwmmxt_load_creg iwmmxt_load_creg_sparc -#define iwmmxt_load_reg iwmmxt_load_reg_sparc -#define iwmmxt_store_creg iwmmxt_store_creg_sparc -#define iwmmxt_store_reg iwmmxt_store_reg_sparc -#define __jit_debug_descriptor __jit_debug_descriptor_sparc -#define __jit_debug_register_code __jit_debug_register_code_sparc -#define kvm_to_cpreg_id kvm_to_cpreg_id_sparc -#define last_ram_offset last_ram_offset_sparc -#define ldl_be_p ldl_be_p_sparc -#define ldl_be_phys ldl_be_phys_sparc -#define ldl_he_p ldl_he_p_sparc -#define ldl_le_p ldl_le_p_sparc -#define ldl_le_phys ldl_le_phys_sparc -#define ldl_phys ldl_phys_sparc -#define ldl_phys_internal ldl_phys_internal_sparc -#define ldq_be_p ldq_be_p_sparc -#define ldq_be_phys ldq_be_phys_sparc -#define ldq_he_p ldq_he_p_sparc -#define ldq_le_p ldq_le_p_sparc -#define ldq_le_phys ldq_le_phys_sparc -#define ldq_phys ldq_phys_sparc -#define ldq_phys_internal ldq_phys_internal_sparc -#define ldst_name ldst_name_sparc -#define ldub_p ldub_p_sparc -#define ldub_phys ldub_phys_sparc -#define lduw_be_p lduw_be_p_sparc -#define lduw_be_phys lduw_be_phys_sparc -#define lduw_he_p lduw_he_p_sparc -#define lduw_le_p lduw_le_p_sparc -#define lduw_le_phys lduw_le_phys_sparc -#define lduw_phys lduw_phys_sparc -#define lduw_phys_internal lduw_phys_internal_sparc -#define le128 le128_sparc -#define linked_bp_matches linked_bp_matches_sparc -#define listener_add_address_space listener_add_address_space_sparc -#define load_cpu_offset load_cpu_offset_sparc -#define load_reg load_reg_sparc -#define load_reg_var load_reg_var_sparc -#define log_cpu_state log_cpu_state_sparc -#define lpae_cp_reginfo lpae_cp_reginfo_sparc -#define lt128 lt128_sparc -#define machine_class_init machine_class_init_sparc -#define machine_finalize machine_finalize_sparc -#define machine_info machine_info_sparc -#define machine_initfn machine_initfn_sparc -#define machine_register_types machine_register_types_sparc -#define machvirt_init machvirt_init_sparc -#define machvirt_machine_init machvirt_machine_init_sparc -#define maj maj_sparc -#define mapping_conflict mapping_conflict_sparc -#define mapping_contiguous mapping_contiguous_sparc -#define mapping_have_same_region mapping_have_same_region_sparc -#define mapping_merge mapping_merge_sparc -#define mem_add mem_add_sparc -#define mem_begin mem_begin_sparc -#define mem_commit mem_commit_sparc -#define memory_access_is_direct memory_access_is_direct_sparc -#define memory_access_size memory_access_size_sparc -#define memory_init memory_init_sparc -#define memory_listener_match memory_listener_match_sparc -#define memory_listener_register memory_listener_register_sparc -#define memory_listener_unregister memory_listener_unregister_sparc -#define memory_map_init memory_map_init_sparc -#define memory_mapping_filter memory_mapping_filter_sparc -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_sparc -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc -#define memory_mapping_list_free memory_mapping_list_free_sparc -#define memory_mapping_list_init memory_mapping_list_init_sparc -#define memory_region_access_valid memory_region_access_valid_sparc -#define memory_region_add_subregion memory_region_add_subregion_sparc -#define memory_region_add_subregion_common memory_region_add_subregion_common_sparc -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_sparc -#define memory_region_big_endian memory_region_big_endian_sparc -#define memory_region_clear_pending memory_region_clear_pending_sparc -#define memory_region_del_subregion memory_region_del_subregion_sparc -#define memory_region_destructor_alias memory_region_destructor_alias_sparc -#define memory_region_destructor_none memory_region_destructor_none_sparc -#define memory_region_destructor_ram memory_region_destructor_ram_sparc -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_sparc -#define memory_region_dispatch_read memory_region_dispatch_read_sparc -#define memory_region_dispatch_read1 memory_region_dispatch_read1_sparc -#define memory_region_dispatch_write memory_region_dispatch_write_sparc -#define memory_region_escape_name memory_region_escape_name_sparc -#define memory_region_finalize memory_region_finalize_sparc -#define memory_region_find memory_region_find_sparc -#define memory_region_get_addr memory_region_get_addr_sparc -#define memory_region_get_alignment memory_region_get_alignment_sparc -#define memory_region_get_container memory_region_get_container_sparc -#define memory_region_get_fd memory_region_get_fd_sparc -#define memory_region_get_may_overlap memory_region_get_may_overlap_sparc -#define memory_region_get_priority memory_region_get_priority_sparc -#define memory_region_get_ram_addr memory_region_get_ram_addr_sparc -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc -#define memory_region_get_size memory_region_get_size_sparc -#define memory_region_info memory_region_info_sparc -#define memory_region_init memory_region_init_sparc -#define memory_region_init_alias memory_region_init_alias_sparc -#define memory_region_initfn memory_region_initfn_sparc -#define memory_region_init_io memory_region_init_io_sparc -#define memory_region_init_ram memory_region_init_ram_sparc -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc -#define memory_region_init_reservation memory_region_init_reservation_sparc -#define memory_region_is_iommu memory_region_is_iommu_sparc -#define memory_region_is_logging memory_region_is_logging_sparc -#define memory_region_is_mapped memory_region_is_mapped_sparc -#define memory_region_is_ram memory_region_is_ram_sparc -#define memory_region_is_rom memory_region_is_rom_sparc -#define memory_region_is_romd memory_region_is_romd_sparc -#define memory_region_is_skip_dump memory_region_is_skip_dump_sparc -#define memory_region_is_unassigned memory_region_is_unassigned_sparc -#define memory_region_name memory_region_name_sparc -#define memory_region_need_escape memory_region_need_escape_sparc -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_sparc -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_sparc -#define memory_region_present memory_region_present_sparc -#define memory_region_read_accessor memory_region_read_accessor_sparc -#define memory_region_readd_subregion memory_region_readd_subregion_sparc -#define memory_region_ref memory_region_ref_sparc -#define memory_region_resolve_container memory_region_resolve_container_sparc -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_sparc -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc -#define memory_region_set_address memory_region_set_address_sparc -#define memory_region_set_alias_offset memory_region_set_alias_offset_sparc -#define memory_region_set_enabled memory_region_set_enabled_sparc -#define memory_region_set_readonly memory_region_set_readonly_sparc -#define memory_region_set_skip_dump memory_region_set_skip_dump_sparc -#define memory_region_size memory_region_size_sparc -#define memory_region_to_address_space memory_region_to_address_space_sparc -#define memory_region_transaction_begin memory_region_transaction_begin_sparc -#define memory_region_transaction_commit memory_region_transaction_commit_sparc -#define memory_region_unref memory_region_unref_sparc -#define memory_region_update_container_subregions memory_region_update_container_subregions_sparc -#define memory_region_write_accessor memory_region_write_accessor_sparc -#define memory_region_wrong_endianness memory_region_wrong_endianness_sparc -#define memory_try_enable_merging memory_try_enable_merging_sparc -#define module_call_init module_call_init_sparc -#define module_load module_load_sparc -#define mpidr_cp_reginfo mpidr_cp_reginfo_sparc -#define mpidr_read mpidr_read_sparc -#define msr_mask msr_mask_sparc -#define mul128By64To192 mul128By64To192_sparc -#define mul128To256 mul128To256_sparc -#define mul64To128 mul64To128_sparc -#define muldiv64 muldiv64_sparc -#define neon_2rm_is_float_op neon_2rm_is_float_op_sparc -#define neon_2rm_sizes neon_2rm_sizes_sparc -#define neon_3r_sizes neon_3r_sizes_sparc -#define neon_get_scalar neon_get_scalar_sparc -#define neon_load_reg neon_load_reg_sparc -#define neon_load_reg64 neon_load_reg64_sparc -#define neon_load_scratch neon_load_scratch_sparc -#define neon_ls_element_type neon_ls_element_type_sparc -#define neon_reg_offset neon_reg_offset_sparc -#define neon_store_reg neon_store_reg_sparc -#define neon_store_reg64 neon_store_reg64_sparc -#define neon_store_scratch neon_store_scratch_sparc -#define new_ldst_label new_ldst_label_sparc -#define next_list next_list_sparc -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_sparc -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_sparc -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_sparc -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_sparc -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_sparc -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_sparc -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_sparc -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc -#define not_v6_cp_reginfo not_v6_cp_reginfo_sparc -#define not_v7_cp_reginfo not_v7_cp_reginfo_sparc -#define not_v8_cp_reginfo not_v8_cp_reginfo_sparc -#define object_child_foreach object_child_foreach_sparc -#define object_class_foreach object_class_foreach_sparc -#define object_class_foreach_tramp object_class_foreach_tramp_sparc -#define object_class_get_list object_class_get_list_sparc -#define object_class_get_list_tramp object_class_get_list_tramp_sparc -#define object_class_get_parent object_class_get_parent_sparc -#define object_deinit object_deinit_sparc -#define object_dynamic_cast object_dynamic_cast_sparc -#define object_finalize object_finalize_sparc -#define object_finalize_child_property object_finalize_child_property_sparc -#define object_get_child_property object_get_child_property_sparc -#define object_get_link_property object_get_link_property_sparc -#define object_get_root object_get_root_sparc -#define object_initialize_with_type object_initialize_with_type_sparc -#define object_init_with_type object_init_with_type_sparc -#define object_instance_init object_instance_init_sparc -#define object_new_with_type object_new_with_type_sparc -#define object_post_init_with_type object_post_init_with_type_sparc -#define object_property_add_alias object_property_add_alias_sparc -#define object_property_add_link object_property_add_link_sparc -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_sparc -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_sparc -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_sparc -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_sparc -#define object_property_allow_set_link object_property_allow_set_link_sparc -#define object_property_del object_property_del_sparc -#define object_property_del_all object_property_del_all_sparc -#define object_property_find object_property_find_sparc -#define object_property_get object_property_get_sparc -#define object_property_get_bool object_property_get_bool_sparc -#define object_property_get_int object_property_get_int_sparc -#define object_property_get_link object_property_get_link_sparc -#define object_property_get_qobject object_property_get_qobject_sparc -#define object_property_get_str object_property_get_str_sparc -#define object_property_get_type object_property_get_type_sparc -#define object_property_is_child object_property_is_child_sparc -#define object_property_set object_property_set_sparc -#define object_property_set_description object_property_set_description_sparc -#define object_property_set_link object_property_set_link_sparc -#define object_property_set_qobject object_property_set_qobject_sparc -#define object_release_link_property object_release_link_property_sparc -#define object_resolve_abs_path object_resolve_abs_path_sparc -#define object_resolve_child_property object_resolve_child_property_sparc -#define object_resolve_link object_resolve_link_sparc -#define object_resolve_link_property object_resolve_link_property_sparc -#define object_resolve_partial_path object_resolve_partial_path_sparc -#define object_resolve_path object_resolve_path_sparc -#define object_resolve_path_component object_resolve_path_component_sparc -#define object_resolve_path_type object_resolve_path_type_sparc -#define object_set_link_property object_set_link_property_sparc -#define object_unparent object_unparent_sparc -#define omap_cachemaint_write omap_cachemaint_write_sparc -#define omap_cp_reginfo omap_cp_reginfo_sparc -#define omap_threadid_write omap_threadid_write_sparc -#define omap_ticonfig_write omap_ticonfig_write_sparc -#define omap_wfi_write omap_wfi_write_sparc -#define op_bits op_bits_sparc -#define open_modeflags open_modeflags_sparc -#define op_to_mov op_to_mov_sparc -#define op_to_movi op_to_movi_sparc -#define output_type_enum output_type_enum_sparc -#define packFloat128 packFloat128_sparc -#define packFloat16 packFloat16_sparc -#define packFloat32 packFloat32_sparc -#define packFloat64 packFloat64_sparc -#define packFloatx80 packFloatx80_sparc -#define page_find page_find_sparc -#define page_find_alloc page_find_alloc_sparc -#define page_flush_tb page_flush_tb_sparc -#define page_flush_tb_1 page_flush_tb_1_sparc -#define page_init page_init_sparc -#define page_size_init page_size_init_sparc -#define par par_sparc -#define parse_array parse_array_sparc -#define parse_error parse_error_sparc -#define parse_escape parse_escape_sparc -#define parse_keyword parse_keyword_sparc -#define parse_literal parse_literal_sparc -#define parse_object parse_object_sparc -#define parse_optional parse_optional_sparc -#define parse_option_bool parse_option_bool_sparc -#define parse_option_number parse_option_number_sparc -#define parse_option_size parse_option_size_sparc -#define parse_pair parse_pair_sparc -#define parser_context_free parser_context_free_sparc -#define parser_context_new parser_context_new_sparc -#define parser_context_peek_token parser_context_peek_token_sparc -#define parser_context_pop_token parser_context_pop_token_sparc -#define parser_context_restore parser_context_restore_sparc -#define parser_context_save parser_context_save_sparc -#define parse_str parse_str_sparc -#define parse_type_bool parse_type_bool_sparc -#define parse_type_int parse_type_int_sparc -#define parse_type_number parse_type_number_sparc -#define parse_type_size parse_type_size_sparc -#define parse_type_str parse_type_str_sparc -#define parse_value parse_value_sparc -#define par_write par_write_sparc -#define patch_reloc patch_reloc_sparc -#define phys_map_node_alloc phys_map_node_alloc_sparc -#define phys_map_node_reserve phys_map_node_reserve_sparc -#define phys_mem_alloc phys_mem_alloc_sparc -#define phys_mem_set_alloc phys_mem_set_alloc_sparc -#define phys_page_compact phys_page_compact_sparc -#define phys_page_compact_all phys_page_compact_all_sparc -#define phys_page_find phys_page_find_sparc -#define phys_page_set phys_page_set_sparc -#define phys_page_set_level phys_page_set_level_sparc -#define phys_section_add phys_section_add_sparc -#define phys_section_destroy phys_section_destroy_sparc -#define phys_sections_free phys_sections_free_sparc -#define pickNaN pickNaN_sparc -#define pickNaNMulAdd pickNaNMulAdd_sparc -#define pmccfiltr_write pmccfiltr_write_sparc -#define pmccntr_read pmccntr_read_sparc -#define pmccntr_sync pmccntr_sync_sparc -#define pmccntr_write pmccntr_write_sparc -#define pmccntr_write32 pmccntr_write32_sparc -#define pmcntenclr_write pmcntenclr_write_sparc -#define pmcntenset_write pmcntenset_write_sparc -#define pmcr_write pmcr_write_sparc -#define pmintenclr_write pmintenclr_write_sparc -#define pmintenset_write pmintenset_write_sparc -#define pmovsr_write pmovsr_write_sparc -#define pmreg_access pmreg_access_sparc -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_sparc -#define pmsav5_data_ap_read pmsav5_data_ap_read_sparc -#define pmsav5_data_ap_write pmsav5_data_ap_write_sparc -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_sparc -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_sparc -#define pmuserenr_write pmuserenr_write_sparc -#define pmxevtyper_write pmxevtyper_write_sparc -#define print_type_bool print_type_bool_sparc -#define print_type_int print_type_int_sparc -#define print_type_number print_type_number_sparc -#define print_type_size print_type_size_sparc -#define print_type_str print_type_str_sparc -#define propagateFloat128NaN propagateFloat128NaN_sparc -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_sparc -#define propagateFloat32NaN propagateFloat32NaN_sparc -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_sparc -#define propagateFloat64NaN propagateFloat64NaN_sparc -#define propagateFloatx80NaN propagateFloatx80NaN_sparc -#define property_get_alias property_get_alias_sparc -#define property_get_bool property_get_bool_sparc -#define property_get_str property_get_str_sparc -#define property_get_uint16_ptr property_get_uint16_ptr_sparc -#define property_get_uint32_ptr property_get_uint32_ptr_sparc -#define property_get_uint64_ptr property_get_uint64_ptr_sparc -#define property_get_uint8_ptr property_get_uint8_ptr_sparc -#define property_release_alias property_release_alias_sparc -#define property_release_bool property_release_bool_sparc -#define property_release_str property_release_str_sparc -#define property_resolve_alias property_resolve_alias_sparc -#define property_set_alias property_set_alias_sparc -#define property_set_bool property_set_bool_sparc -#define property_set_str property_set_str_sparc -#define pstate_read pstate_read_sparc -#define pstate_write pstate_write_sparc -#define pxa250_initfn pxa250_initfn_sparc -#define pxa255_initfn pxa255_initfn_sparc -#define pxa260_initfn pxa260_initfn_sparc -#define pxa261_initfn pxa261_initfn_sparc -#define pxa262_initfn pxa262_initfn_sparc -#define pxa270a0_initfn pxa270a0_initfn_sparc -#define pxa270a1_initfn pxa270a1_initfn_sparc -#define pxa270b0_initfn pxa270b0_initfn_sparc -#define pxa270b1_initfn pxa270b1_initfn_sparc -#define pxa270c0_initfn pxa270c0_initfn_sparc -#define pxa270c5_initfn pxa270c5_initfn_sparc -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_sparc -#define qapi_dealloc_end_list qapi_dealloc_end_list_sparc -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_sparc -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_sparc -#define qapi_dealloc_next_list qapi_dealloc_next_list_sparc -#define qapi_dealloc_pop qapi_dealloc_pop_sparc -#define qapi_dealloc_push qapi_dealloc_push_sparc -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_sparc -#define qapi_dealloc_start_list qapi_dealloc_start_list_sparc -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_sparc -#define qapi_dealloc_start_union qapi_dealloc_start_union_sparc -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_sparc -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_sparc -#define qapi_dealloc_type_int qapi_dealloc_type_int_sparc -#define qapi_dealloc_type_number qapi_dealloc_type_number_sparc -#define qapi_dealloc_type_size qapi_dealloc_type_size_sparc -#define qapi_dealloc_type_str qapi_dealloc_type_str_sparc -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_sparc -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_sparc -#define qapi_free_boolList qapi_free_boolList_sparc -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_sparc -#define qapi_free_int16List qapi_free_int16List_sparc -#define qapi_free_int32List qapi_free_int32List_sparc -#define qapi_free_int64List qapi_free_int64List_sparc -#define qapi_free_int8List qapi_free_int8List_sparc -#define qapi_free_intList qapi_free_intList_sparc -#define qapi_free_numberList qapi_free_numberList_sparc -#define qapi_free_strList qapi_free_strList_sparc -#define qapi_free_uint16List qapi_free_uint16List_sparc -#define qapi_free_uint32List qapi_free_uint32List_sparc -#define qapi_free_uint64List qapi_free_uint64List_sparc -#define qapi_free_uint8List qapi_free_uint8List_sparc -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_sparc -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_sparc -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_sparc -#define qbool_destroy_obj qbool_destroy_obj_sparc -#define qbool_from_int qbool_from_int_sparc -#define qbool_get_int qbool_get_int_sparc -#define qbool_type qbool_type_sparc -#define qbus_create qbus_create_sparc -#define qbus_create_inplace qbus_create_inplace_sparc -#define qbus_finalize qbus_finalize_sparc -#define qbus_initfn qbus_initfn_sparc -#define qbus_realize qbus_realize_sparc -#define qdev_create qdev_create_sparc -#define qdev_get_type qdev_get_type_sparc -#define qdev_register_types qdev_register_types_sparc -#define qdev_set_parent_bus qdev_set_parent_bus_sparc -#define qdev_try_create qdev_try_create_sparc -#define qdict_add_key qdict_add_key_sparc -#define qdict_array_split qdict_array_split_sparc -#define qdict_clone_shallow qdict_clone_shallow_sparc -#define qdict_del qdict_del_sparc -#define qdict_destroy_obj qdict_destroy_obj_sparc -#define qdict_entry_key qdict_entry_key_sparc -#define qdict_entry_value qdict_entry_value_sparc -#define qdict_extract_subqdict qdict_extract_subqdict_sparc -#define qdict_find qdict_find_sparc -#define qdict_first qdict_first_sparc -#define qdict_flatten qdict_flatten_sparc -#define qdict_flatten_qdict qdict_flatten_qdict_sparc -#define qdict_flatten_qlist qdict_flatten_qlist_sparc -#define qdict_get qdict_get_sparc -#define qdict_get_bool qdict_get_bool_sparc -#define qdict_get_double qdict_get_double_sparc -#define qdict_get_int qdict_get_int_sparc -#define qdict_get_obj qdict_get_obj_sparc -#define qdict_get_qdict qdict_get_qdict_sparc -#define qdict_get_qlist qdict_get_qlist_sparc -#define qdict_get_str qdict_get_str_sparc -#define qdict_get_try_bool qdict_get_try_bool_sparc -#define qdict_get_try_int qdict_get_try_int_sparc -#define qdict_get_try_str qdict_get_try_str_sparc -#define qdict_haskey qdict_haskey_sparc -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_sparc -#define qdict_iter qdict_iter_sparc -#define qdict_join qdict_join_sparc -#define qdict_new qdict_new_sparc -#define qdict_next qdict_next_sparc -#define qdict_next_entry qdict_next_entry_sparc -#define qdict_put_obj qdict_put_obj_sparc -#define qdict_size qdict_size_sparc -#define qdict_type qdict_type_sparc -#define qemu_clock_get_us qemu_clock_get_us_sparc -#define qemu_clock_ptr qemu_clock_ptr_sparc -#define qemu_clocks qemu_clocks_sparc -#define qemu_get_cpu qemu_get_cpu_sparc -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_sparc -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_sparc -#define qemu_get_ram_block qemu_get_ram_block_sparc -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_sparc -#define qemu_get_ram_fd qemu_get_ram_fd_sparc -#define qemu_get_ram_ptr qemu_get_ram_ptr_sparc -#define qemu_host_page_mask qemu_host_page_mask_sparc -#define qemu_host_page_size qemu_host_page_size_sparc -#define qemu_init_vcpu qemu_init_vcpu_sparc -#define qemu_ld_helpers qemu_ld_helpers_sparc -#define qemu_log_close qemu_log_close_sparc -#define qemu_log_enabled qemu_log_enabled_sparc -#define qemu_log_flush qemu_log_flush_sparc -#define qemu_loglevel_mask qemu_loglevel_mask_sparc -#define qemu_log_vprintf qemu_log_vprintf_sparc -#define qemu_oom_check qemu_oom_check_sparc -#define qemu_parse_fd qemu_parse_fd_sparc -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_sparc -#define qemu_ram_alloc qemu_ram_alloc_sparc -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc -#define qemu_ram_foreach_block qemu_ram_foreach_block_sparc -#define qemu_ram_free qemu_ram_free_sparc -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_sparc -#define qemu_ram_ptr_length qemu_ram_ptr_length_sparc -#define qemu_ram_remap qemu_ram_remap_sparc -#define qemu_ram_setup_dump qemu_ram_setup_dump_sparc -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_sparc -#define qemu_real_host_page_size qemu_real_host_page_size_sparc -#define qemu_st_helpers qemu_st_helpers_sparc -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_sparc -#define qemu_try_memalign qemu_try_memalign_sparc -#define qentry_destroy qentry_destroy_sparc -#define qerror_human qerror_human_sparc -#define qerror_report qerror_report_sparc -#define qerror_report_err qerror_report_err_sparc -#define qfloat_destroy_obj qfloat_destroy_obj_sparc -#define qfloat_from_double qfloat_from_double_sparc -#define qfloat_get_double qfloat_get_double_sparc -#define qfloat_type qfloat_type_sparc -#define qint_destroy_obj qint_destroy_obj_sparc -#define qint_from_int qint_from_int_sparc -#define qint_get_int qint_get_int_sparc -#define qint_type qint_type_sparc -#define qlist_append_obj qlist_append_obj_sparc -#define qlist_copy qlist_copy_sparc -#define qlist_copy_elem qlist_copy_elem_sparc -#define qlist_destroy_obj qlist_destroy_obj_sparc -#define qlist_empty qlist_empty_sparc -#define qlist_entry_obj qlist_entry_obj_sparc -#define qlist_first qlist_first_sparc -#define qlist_iter qlist_iter_sparc -#define qlist_new qlist_new_sparc -#define qlist_next qlist_next_sparc -#define qlist_peek qlist_peek_sparc -#define qlist_pop qlist_pop_sparc -#define qlist_size qlist_size_sparc -#define qlist_size_iter qlist_size_iter_sparc -#define qlist_type qlist_type_sparc -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_sparc -#define qmp_input_end_list qmp_input_end_list_sparc -#define qmp_input_end_struct qmp_input_end_struct_sparc -#define qmp_input_get_next_type qmp_input_get_next_type_sparc -#define qmp_input_get_object qmp_input_get_object_sparc -#define qmp_input_get_visitor qmp_input_get_visitor_sparc -#define qmp_input_next_list qmp_input_next_list_sparc -#define qmp_input_optional qmp_input_optional_sparc -#define qmp_input_pop qmp_input_pop_sparc -#define qmp_input_push qmp_input_push_sparc -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_sparc -#define qmp_input_start_list qmp_input_start_list_sparc -#define qmp_input_start_struct qmp_input_start_struct_sparc -#define qmp_input_type_bool qmp_input_type_bool_sparc -#define qmp_input_type_int qmp_input_type_int_sparc -#define qmp_input_type_number qmp_input_type_number_sparc -#define qmp_input_type_str qmp_input_type_str_sparc -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_sparc -#define qmp_input_visitor_new qmp_input_visitor_new_sparc -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_sparc -#define qmp_output_add_obj qmp_output_add_obj_sparc -#define qmp_output_end_list qmp_output_end_list_sparc -#define qmp_output_end_struct qmp_output_end_struct_sparc -#define qmp_output_first qmp_output_first_sparc -#define qmp_output_get_qobject qmp_output_get_qobject_sparc -#define qmp_output_get_visitor qmp_output_get_visitor_sparc -#define qmp_output_last qmp_output_last_sparc -#define qmp_output_next_list qmp_output_next_list_sparc -#define qmp_output_pop qmp_output_pop_sparc -#define qmp_output_push_obj qmp_output_push_obj_sparc -#define qmp_output_start_list qmp_output_start_list_sparc -#define qmp_output_start_struct qmp_output_start_struct_sparc -#define qmp_output_type_bool qmp_output_type_bool_sparc -#define qmp_output_type_int qmp_output_type_int_sparc -#define qmp_output_type_number qmp_output_type_number_sparc -#define qmp_output_type_str qmp_output_type_str_sparc -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_sparc -#define qmp_output_visitor_new qmp_output_visitor_new_sparc -#define qobject_decref qobject_decref_sparc -#define qobject_to_qbool qobject_to_qbool_sparc -#define qobject_to_qdict qobject_to_qdict_sparc -#define qobject_to_qfloat qobject_to_qfloat_sparc -#define qobject_to_qint qobject_to_qint_sparc -#define qobject_to_qlist qobject_to_qlist_sparc -#define qobject_to_qstring qobject_to_qstring_sparc -#define qobject_type qobject_type_sparc -#define qstring_append qstring_append_sparc -#define qstring_append_chr qstring_append_chr_sparc -#define qstring_append_int qstring_append_int_sparc -#define qstring_destroy_obj qstring_destroy_obj_sparc -#define qstring_from_escaped_str qstring_from_escaped_str_sparc -#define qstring_from_str qstring_from_str_sparc -#define qstring_from_substr qstring_from_substr_sparc -#define qstring_get_length qstring_get_length_sparc -#define qstring_get_str qstring_get_str_sparc -#define qstring_new qstring_new_sparc -#define qstring_type qstring_type_sparc -#define ram_block_add ram_block_add_sparc -#define ram_size ram_size_sparc -#define range_compare range_compare_sparc -#define range_covers_byte range_covers_byte_sparc -#define range_get_last range_get_last_sparc -#define range_merge range_merge_sparc -#define ranges_can_merge ranges_can_merge_sparc -#define raw_read raw_read_sparc -#define raw_write raw_write_sparc -#define rcon rcon_sparc -#define read_raw_cp_reg read_raw_cp_reg_sparc -#define recip_estimate recip_estimate_sparc -#define recip_sqrt_estimate recip_sqrt_estimate_sparc -#define register_cp_regs_for_features register_cp_regs_for_features_sparc -#define register_multipage register_multipage_sparc -#define register_subpage register_subpage_sparc -#define register_tm_clones register_tm_clones_sparc -#define register_types_object register_types_object_sparc -#define regnames regnames_sparc -#define render_memory_region render_memory_region_sparc -#define reset_all_temps reset_all_temps_sparc -#define reset_temp reset_temp_sparc -#define rol32 rol32_sparc -#define rol64 rol64_sparc -#define ror32 ror32_sparc -#define ror64 ror64_sparc -#define roundAndPackFloat128 roundAndPackFloat128_sparc -#define roundAndPackFloat16 roundAndPackFloat16_sparc -#define roundAndPackFloat32 roundAndPackFloat32_sparc -#define roundAndPackFloat64 roundAndPackFloat64_sparc -#define roundAndPackFloatx80 roundAndPackFloatx80_sparc -#define roundAndPackInt32 roundAndPackInt32_sparc -#define roundAndPackInt64 roundAndPackInt64_sparc -#define roundAndPackUint64 roundAndPackUint64_sparc -#define round_to_inf round_to_inf_sparc -#define run_on_cpu run_on_cpu_sparc -#define s0 s0_sparc -#define S0 S0_sparc -#define s1 s1_sparc -#define S1 S1_sparc -#define sa1100_initfn sa1100_initfn_sparc -#define sa1110_initfn sa1110_initfn_sparc -#define save_globals save_globals_sparc -#define scr_write scr_write_sparc -#define sctlr_write sctlr_write_sparc -#define set_bit set_bit_sparc -#define set_bits set_bits_sparc -#define set_default_nan_mode set_default_nan_mode_sparc -#define set_feature set_feature_sparc -#define set_float_detect_tininess set_float_detect_tininess_sparc -#define set_float_exception_flags set_float_exception_flags_sparc -#define set_float_rounding_mode set_float_rounding_mode_sparc -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_sparc -#define set_flush_to_zero set_flush_to_zero_sparc -#define set_swi_errno set_swi_errno_sparc -#define sextract32 sextract32_sparc -#define sextract64 sextract64_sparc -#define shift128ExtraRightJamming shift128ExtraRightJamming_sparc -#define shift128Right shift128Right_sparc -#define shift128RightJamming shift128RightJamming_sparc -#define shift32RightJamming shift32RightJamming_sparc -#define shift64ExtraRightJamming shift64ExtraRightJamming_sparc -#define shift64RightJamming shift64RightJamming_sparc -#define shifter_out_im shifter_out_im_sparc -#define shortShift128Left shortShift128Left_sparc -#define shortShift192Left shortShift192Left_sparc -#define simple_mpu_ap_bits simple_mpu_ap_bits_sparc -#define size_code_gen_buffer size_code_gen_buffer_sparc -#define softmmu_lock_user softmmu_lock_user_sparc -#define softmmu_lock_user_string softmmu_lock_user_string_sparc -#define softmmu_tget32 softmmu_tget32_sparc -#define softmmu_tget8 softmmu_tget8_sparc -#define softmmu_tput32 softmmu_tput32_sparc -#define softmmu_unlock_user softmmu_unlock_user_sparc -#define sort_constraints sort_constraints_sparc -#define sp_el0_access sp_el0_access_sparc -#define spsel_read spsel_read_sparc -#define spsel_write spsel_write_sparc -#define start_list start_list_sparc -#define stb_p stb_p_sparc -#define stb_phys stb_phys_sparc -#define stl_be_p stl_be_p_sparc -#define stl_be_phys stl_be_phys_sparc -#define stl_he_p stl_he_p_sparc -#define stl_le_p stl_le_p_sparc -#define stl_le_phys stl_le_phys_sparc -#define stl_phys stl_phys_sparc -#define stl_phys_internal stl_phys_internal_sparc -#define stl_phys_notdirty stl_phys_notdirty_sparc -#define store_cpu_offset store_cpu_offset_sparc -#define store_reg store_reg_sparc -#define store_reg_bx store_reg_bx_sparc -#define store_reg_from_load store_reg_from_load_sparc -#define stq_be_p stq_be_p_sparc -#define stq_be_phys stq_be_phys_sparc -#define stq_he_p stq_he_p_sparc -#define stq_le_p stq_le_p_sparc -#define stq_le_phys stq_le_phys_sparc -#define stq_phys stq_phys_sparc -#define string_input_get_visitor string_input_get_visitor_sparc -#define string_input_visitor_cleanup string_input_visitor_cleanup_sparc -#define string_input_visitor_new string_input_visitor_new_sparc -#define strongarm_cp_reginfo strongarm_cp_reginfo_sparc -#define strstart strstart_sparc -#define strtosz strtosz_sparc -#define strtosz_suffix strtosz_suffix_sparc -#define stw_be_p stw_be_p_sparc -#define stw_be_phys stw_be_phys_sparc -#define stw_he_p stw_he_p_sparc -#define stw_le_p stw_le_p_sparc -#define stw_le_phys stw_le_phys_sparc -#define stw_phys stw_phys_sparc -#define stw_phys_internal stw_phys_internal_sparc -#define sub128 sub128_sparc -#define sub16_sat sub16_sat_sparc -#define sub16_usat sub16_usat_sparc -#define sub192 sub192_sparc -#define sub8_sat sub8_sat_sparc -#define sub8_usat sub8_usat_sparc -#define subFloat128Sigs subFloat128Sigs_sparc -#define subFloat32Sigs subFloat32Sigs_sparc -#define subFloat64Sigs subFloat64Sigs_sparc -#define subFloatx80Sigs subFloatx80Sigs_sparc -#define subpage_accepts subpage_accepts_sparc -#define subpage_init subpage_init_sparc -#define subpage_ops subpage_ops_sparc -#define subpage_read subpage_read_sparc -#define subpage_register subpage_register_sparc -#define subpage_write subpage_write_sparc -#define suffix_mul suffix_mul_sparc -#define swap_commutative swap_commutative_sparc -#define swap_commutative2 swap_commutative2_sparc -#define switch_mode switch_mode_sparc -#define switch_v7m_sp switch_v7m_sp_sparc -#define syn_aa32_bkpt syn_aa32_bkpt_sparc -#define syn_aa32_hvc syn_aa32_hvc_sparc -#define syn_aa32_smc syn_aa32_smc_sparc -#define syn_aa32_svc syn_aa32_svc_sparc -#define syn_breakpoint syn_breakpoint_sparc -#define sync_globals sync_globals_sparc -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_sparc -#define syn_cp14_rt_trap syn_cp14_rt_trap_sparc -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_sparc -#define syn_cp15_rt_trap syn_cp15_rt_trap_sparc -#define syn_data_abort syn_data_abort_sparc -#define syn_fp_access_trap syn_fp_access_trap_sparc -#define syn_insn_abort syn_insn_abort_sparc -#define syn_swstep syn_swstep_sparc -#define syn_uncategorized syn_uncategorized_sparc -#define syn_watchpoint syn_watchpoint_sparc -#define syscall_err syscall_err_sparc -#define system_bus_class_init system_bus_class_init_sparc -#define system_bus_info system_bus_info_sparc -#define t2ee_cp_reginfo t2ee_cp_reginfo_sparc -#define table_logic_cc table_logic_cc_sparc -#define target_parse_constraint target_parse_constraint_sparc -#define target_words_bigendian target_words_bigendian_sparc -#define tb_add_jump tb_add_jump_sparc -#define tb_alloc tb_alloc_sparc -#define tb_alloc_page tb_alloc_page_sparc -#define tb_check_watchpoint tb_check_watchpoint_sparc -#define tb_find_fast tb_find_fast_sparc -#define tb_find_pc tb_find_pc_sparc -#define tb_find_slow tb_find_slow_sparc -#define tb_flush tb_flush_sparc -#define tb_flush_jmp_cache tb_flush_jmp_cache_sparc -#define tb_free tb_free_sparc -#define tb_gen_code tb_gen_code_sparc -#define tb_hash_remove tb_hash_remove_sparc -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc -#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_sparc -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_sparc -#define tb_jmp_remove tb_jmp_remove_sparc -#define tb_link_page tb_link_page_sparc -#define tb_page_remove tb_page_remove_sparc -#define tb_phys_hash_func tb_phys_hash_func_sparc -#define tb_phys_invalidate tb_phys_invalidate_sparc -#define tb_reset_jump tb_reset_jump_sparc -#define tb_set_jmp_target tb_set_jmp_target_sparc -#define tcg_accel_class_init tcg_accel_class_init_sparc -#define tcg_accel_type tcg_accel_type_sparc -#define tcg_add_param_i32 tcg_add_param_i32_sparc -#define tcg_add_param_i64 tcg_add_param_i64_sparc -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_sparc -#define tcg_allowed tcg_allowed_sparc -#define tcg_canonicalize_memop tcg_canonicalize_memop_sparc -#define tcg_commit tcg_commit_sparc -#define tcg_cond_to_jcc tcg_cond_to_jcc_sparc -#define tcg_constant_folding tcg_constant_folding_sparc +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_sparc +#define tcg_expand_vec_op tcg_expand_vec_op_sparc +#define tcg_register_jit tcg_register_jit_sparc +#define tcg_tb_insert tcg_tb_insert_sparc +#define tcg_tb_remove tcg_tb_remove_sparc +#define tcg_tb_lookup tcg_tb_lookup_sparc +#define tcg_tb_foreach tcg_tb_foreach_sparc +#define tcg_nb_tbs tcg_nb_tbs_sparc +#define tcg_region_reset_all tcg_region_reset_all_sparc +#define tcg_region_init tcg_region_init_sparc +#define tcg_code_size tcg_code_size_sparc +#define tcg_code_capacity tcg_code_capacity_sparc +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_sparc +#define tcg_malloc_internal tcg_malloc_internal_sparc +#define tcg_pool_reset tcg_pool_reset_sparc +#define tcg_context_init tcg_context_init_sparc +#define tcg_tb_alloc tcg_tb_alloc_sparc +#define tcg_prologue_init tcg_prologue_init_sparc +#define tcg_func_start tcg_func_start_sparc +#define tcg_set_frame tcg_set_frame_sparc +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc +#define tcg_temp_new_internal tcg_temp_new_internal_sparc +#define tcg_temp_new_vec tcg_temp_new_vec_sparc +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_sparc +#define tcg_temp_free_internal tcg_temp_free_internal_sparc #define tcg_const_i32 tcg_const_i32_sparc #define tcg_const_i64 tcg_const_i64_sparc #define tcg_const_local_i32 tcg_const_local_i32_sparc #define tcg_const_local_i64 tcg_const_local_i64_sparc -#define tcg_context_init tcg_context_init_sparc -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_sparc -#define tcg_cpu_exec tcg_cpu_exec_sparc -#define tcg_current_code_size tcg_current_code_size_sparc -#define tcg_dump_info tcg_dump_info_sparc -#define tcg_dump_ops tcg_dump_ops_sparc -#define tcg_exec_all tcg_exec_all_sparc -#define tcg_find_helper tcg_find_helper_sparc -#define tcg_func_start tcg_func_start_sparc -#define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc -#define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc -#define tcg_gen_add_i32 tcg_gen_add_i32_sparc -#define tcg_gen_add_i64 tcg_gen_add_i64_sparc -#define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc -#define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc -#define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc -#define tcg_gen_and_i32 tcg_gen_and_i32_sparc -#define tcg_gen_and_i64 tcg_gen_and_i64_sparc -#define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc -#define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc -#define tcg_gen_br tcg_gen_br_sparc -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc +#define tcg_op_supported tcg_op_supported_sparc #define tcg_gen_callN tcg_gen_callN_sparc +#define tcg_op_remove tcg_op_remove_sparc +#define tcg_emit_op tcg_emit_op_sparc +#define tcg_op_insert_before tcg_op_insert_before_sparc +#define tcg_op_insert_after tcg_op_insert_after_sparc +#define tcg_cpu_exec_time tcg_cpu_exec_time_sparc #define tcg_gen_code tcg_gen_code_sparc -#define tcg_gen_code_common tcg_gen_code_common_sparc -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_sparc -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_sparc -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc -#define tcg_gen_exit_tb tcg_gen_exit_tb_sparc -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc -#define tcg_gen_goto_tb tcg_gen_goto_tb_sparc -#define tcg_gen_ld_i32 tcg_gen_ld_i32_sparc -#define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_sparc -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_sparc -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc -#define tcg_gen_mov_i32 tcg_gen_mov_i32_sparc -#define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc -#define tcg_gen_movi_i32 tcg_gen_movi_i32_sparc -#define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc -#define tcg_gen_mul_i32 tcg_gen_mul_i32_sparc -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc -#define tcg_gen_neg_i32 tcg_gen_neg_i32_sparc -#define tcg_gen_neg_i64 tcg_gen_neg_i64_sparc -#define tcg_gen_not_i32 tcg_gen_not_i32_sparc -#define tcg_gen_op0 tcg_gen_op0_sparc -#define tcg_gen_op1i tcg_gen_op1i_sparc -#define tcg_gen_op2_i32 tcg_gen_op2_i32_sparc -#define tcg_gen_op2_i64 tcg_gen_op2_i64_sparc -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_sparc -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_sparc -#define tcg_gen_op3_i32 tcg_gen_op3_i32_sparc -#define tcg_gen_op3_i64 tcg_gen_op3_i64_sparc -#define tcg_gen_op4_i32 tcg_gen_op4_i32_sparc -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_sparc -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_sparc -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_sparc -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_sparc -#define tcg_gen_op6_i32 tcg_gen_op6_i32_sparc -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_sparc -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_sparc -#define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc -#define tcg_gen_or_i32 tcg_gen_or_i32_sparc -#define tcg_gen_or_i64 tcg_gen_or_i64_sparc +#define tcg_gen_op1 tcg_gen_op1_sparc +#define tcg_gen_op2 tcg_gen_op2_sparc +#define tcg_gen_op3 tcg_gen_op3_sparc +#define tcg_gen_op4 tcg_gen_op4_sparc +#define tcg_gen_op5 tcg_gen_op5_sparc +#define tcg_gen_op6 tcg_gen_op6_sparc +#define tcg_gen_mb tcg_gen_mb_sparc +#define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_sparc +#define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc +#define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc #define tcg_gen_ori_i32 tcg_gen_ori_i32_sparc -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc +#define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc +#define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc +#define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc +#define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_sparc +#define tcg_gen_muli_i32 tcg_gen_muli_i32_sparc +#define tcg_gen_div_i32 tcg_gen_div_i32_sparc +#define tcg_gen_rem_i32 tcg_gen_rem_i32_sparc +#define tcg_gen_divu_i32 tcg_gen_divu_i32_sparc +#define tcg_gen_remu_i32 tcg_gen_remu_i32_sparc +#define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_sparc +#define tcg_gen_nand_i32 tcg_gen_nand_i32_sparc +#define tcg_gen_nor_i32 tcg_gen_nor_i32_sparc +#define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc +#define tcg_gen_clz_i32 tcg_gen_clz_i32_sparc +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_sparc +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_sparc +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_sparc +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_sparc +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_sparc #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_sparc #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_sparc #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_sparc #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_sparc -#define tcg_gen_sar_i32 tcg_gen_sar_i32_sparc -#define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc -#define tcg_gen_shl_i32 tcg_gen_shl_i32_sparc -#define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc -#define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_sparc +#define tcg_gen_extract_i32 tcg_gen_extract_i32_sparc +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_sparc +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_sparc +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc +#define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_sparc +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_sparc +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc +#define tcg_gen_smin_i32 tcg_gen_smin_i32_sparc +#define tcg_gen_umin_i32 tcg_gen_umin_i32_sparc +#define tcg_gen_smax_i32 tcg_gen_smax_i32_sparc +#define tcg_gen_umax_i32 tcg_gen_umax_i32_sparc +#define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc +#define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_sparc +#define tcg_gen_subi_i64 tcg_gen_subi_i64_sparc +#define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc +#define tcg_gen_ori_i64 tcg_gen_ori_i64_sparc +#define tcg_gen_xori_i64 tcg_gen_xori_i64_sparc #define tcg_gen_shli_i64 tcg_gen_shli_i64_sparc -#define tcg_gen_shr_i32 tcg_gen_shr_i32_sparc -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_sparc -#define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc -#define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc #define tcg_gen_shri_i64 tcg_gen_shri_i64_sparc -#define tcg_gen_st_i32 tcg_gen_st_i32_sparc -#define tcg_gen_st_i64 tcg_gen_st_i64_sparc -#define tcg_gen_sub_i32 tcg_gen_sub_i32_sparc -#define tcg_gen_sub_i64 tcg_gen_sub_i64_sparc -#define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_sparc -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_sparc -#define tcg_gen_xor_i32 tcg_gen_xor_i32_sparc -#define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc -#define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_sparc -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_sparc -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_sparc -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_sparc -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_sparc -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_sparc -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_sparc -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_sparc -#define tcg_handle_interrupt tcg_handle_interrupt_sparc -#define tcg_init tcg_init_sparc -#define tcg_invert_cond tcg_invert_cond_sparc -#define tcg_la_bb_end tcg_la_bb_end_sparc -#define tcg_la_br_end tcg_la_br_end_sparc -#define tcg_la_func_end tcg_la_func_end_sparc -#define tcg_liveness_analysis tcg_liveness_analysis_sparc -#define tcg_malloc tcg_malloc_sparc -#define tcg_malloc_internal tcg_malloc_internal_sparc -#define tcg_op_defs_org tcg_op_defs_org_sparc -#define tcg_opt_gen_mov tcg_opt_gen_mov_sparc -#define tcg_opt_gen_movi tcg_opt_gen_movi_sparc -#define tcg_optimize tcg_optimize_sparc -#define tcg_out16 tcg_out16_sparc -#define tcg_out32 tcg_out32_sparc -#define tcg_out64 tcg_out64_sparc -#define tcg_out8 tcg_out8_sparc -#define tcg_out_addi tcg_out_addi_sparc -#define tcg_out_branch tcg_out_branch_sparc -#define tcg_out_brcond32 tcg_out_brcond32_sparc -#define tcg_out_brcond64 tcg_out_brcond64_sparc -#define tcg_out_bswap32 tcg_out_bswap32_sparc -#define tcg_out_bswap64 tcg_out_bswap64_sparc -#define tcg_out_call tcg_out_call_sparc -#define tcg_out_cmp tcg_out_cmp_sparc -#define tcg_out_ext16s tcg_out_ext16s_sparc -#define tcg_out_ext16u tcg_out_ext16u_sparc -#define tcg_out_ext32s tcg_out_ext32s_sparc -#define tcg_out_ext32u tcg_out_ext32u_sparc -#define tcg_out_ext8s tcg_out_ext8s_sparc -#define tcg_out_ext8u tcg_out_ext8u_sparc -#define tcg_out_jmp tcg_out_jmp_sparc -#define tcg_out_jxx tcg_out_jxx_sparc -#define tcg_out_label tcg_out_label_sparc -#define tcg_out_ld tcg_out_ld_sparc -#define tcg_out_modrm tcg_out_modrm_sparc -#define tcg_out_modrm_offset tcg_out_modrm_offset_sparc -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_sparc -#define tcg_out_mov tcg_out_mov_sparc -#define tcg_out_movcond32 tcg_out_movcond32_sparc -#define tcg_out_movcond64 tcg_out_movcond64_sparc -#define tcg_out_movi tcg_out_movi_sparc -#define tcg_out_op tcg_out_op_sparc -#define tcg_out_pop tcg_out_pop_sparc -#define tcg_out_push tcg_out_push_sparc -#define tcg_out_qemu_ld tcg_out_qemu_ld_sparc -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_sparc -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_sparc -#define tcg_out_qemu_st tcg_out_qemu_st_sparc -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_sparc -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_sparc -#define tcg_out_reloc tcg_out_reloc_sparc -#define tcg_out_rolw_8 tcg_out_rolw_8_sparc -#define tcg_out_setcond32 tcg_out_setcond32_sparc -#define tcg_out_setcond64 tcg_out_setcond64_sparc -#define tcg_out_shifti tcg_out_shifti_sparc -#define tcg_out_st tcg_out_st_sparc -#define tcg_out_tb_finalize tcg_out_tb_finalize_sparc -#define tcg_out_tb_init tcg_out_tb_init_sparc -#define tcg_out_tlb_load tcg_out_tlb_load_sparc -#define tcg_out_vex_modrm tcg_out_vex_modrm_sparc -#define tcg_patch32 tcg_patch32_sparc -#define tcg_patch8 tcg_patch8_sparc -#define tcg_pcrel_diff tcg_pcrel_diff_sparc -#define tcg_pool_reset tcg_pool_reset_sparc -#define tcg_prologue_init tcg_prologue_init_sparc -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_sparc -#define tcg_reg_alloc tcg_reg_alloc_sparc -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_sparc -#define tcg_reg_alloc_call tcg_reg_alloc_call_sparc -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_sparc -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_sparc -#define tcg_reg_alloc_op tcg_reg_alloc_op_sparc -#define tcg_reg_alloc_start tcg_reg_alloc_start_sparc -#define tcg_reg_free tcg_reg_free_sparc -#define tcg_reg_sync tcg_reg_sync_sparc -#define tcg_set_frame tcg_set_frame_sparc -#define tcg_set_nop tcg_set_nop_sparc -#define tcg_swap_cond tcg_swap_cond_sparc -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_sparc -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_sparc -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_sparc -#define tcg_target_const_match tcg_target_const_match_sparc -#define tcg_target_init tcg_target_init_sparc -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_sparc -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_sparc -#define tcg_temp_alloc tcg_temp_alloc_sparc -#define tcg_temp_free_i32 tcg_temp_free_i32_sparc -#define tcg_temp_free_i64 tcg_temp_free_i64_sparc -#define tcg_temp_free_internal tcg_temp_free_internal_sparc -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_sparc -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_sparc -#define tcg_temp_new_i32 tcg_temp_new_i32_sparc -#define tcg_temp_new_i64 tcg_temp_new_i64_sparc -#define tcg_temp_new_internal tcg_temp_new_internal_sparc -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_sparc -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_sparc -#define tdb_hash tdb_hash_sparc -#define teecr_write teecr_write_sparc -#define teehbr_access teehbr_access_sparc -#define temp_allocate_frame temp_allocate_frame_sparc -#define temp_dead temp_dead_sparc -#define temps_are_copies temps_are_copies_sparc -#define temp_save temp_save_sparc -#define temp_sync temp_sync_sparc -#define tgen_arithi tgen_arithi_sparc -#define tgen_arithr tgen_arithr_sparc -#define thumb2_logic_op thumb2_logic_op_sparc -#define ti925t_initfn ti925t_initfn_sparc -#define tlb_add_large_page tlb_add_large_page_sparc -#define tlb_flush_entry tlb_flush_entry_sparc -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_sparc -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_sparc -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_sparc -#define tlbi_aa64_va_write tlbi_aa64_va_write_sparc -#define tlbiall_is_write tlbiall_is_write_sparc -#define tlbiall_write tlbiall_write_sparc -#define tlbiasid_is_write tlbiasid_is_write_sparc -#define tlbiasid_write tlbiasid_write_sparc -#define tlbimvaa_is_write tlbimvaa_is_write_sparc -#define tlbimvaa_write tlbimvaa_write_sparc -#define tlbimva_is_write tlbimva_is_write_sparc -#define tlbimva_write tlbimva_write_sparc -#define tlb_is_dirty_ram tlb_is_dirty_ram_sparc +#define tcg_gen_sari_i64 tcg_gen_sari_i64_sparc +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_sparc +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_sparc +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_sparc +#define tcg_gen_muli_i64 tcg_gen_muli_i64_sparc +#define tcg_gen_div_i64 tcg_gen_div_i64_sparc +#define tcg_gen_rem_i64 tcg_gen_rem_i64_sparc +#define tcg_gen_divu_i64 tcg_gen_divu_i64_sparc +#define tcg_gen_remu_i64 tcg_gen_remu_i64_sparc +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_sparc +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_sparc +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_sparc +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_sparc +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_sparc +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_sparc +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_sparc +#define tcg_gen_not_i64 tcg_gen_not_i64_sparc +#define tcg_gen_andc_i64 tcg_gen_andc_i64_sparc +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_sparc +#define tcg_gen_nand_i64 tcg_gen_nand_i64_sparc +#define tcg_gen_nor_i64 tcg_gen_nor_i64_sparc +#define tcg_gen_orc_i64 tcg_gen_orc_i64_sparc +#define tcg_gen_clz_i64 tcg_gen_clz_i64_sparc +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_sparc +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_sparc +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_sparc +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_sparc +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_sparc +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_sparc +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_sparc +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_sparc +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_sparc +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_sparc +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_sparc +#define tcg_gen_extract_i64 tcg_gen_extract_i64_sparc +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_sparc +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_sparc +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc +#define tcg_gen_add2_i64 tcg_gen_add2_i64_sparc +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_sparc +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_sparc +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_sparc +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_sparc +#define tcg_gen_smin_i64 tcg_gen_smin_i64_sparc +#define tcg_gen_umin_i64 tcg_gen_umin_i64_sparc +#define tcg_gen_smax_i64 tcg_gen_smax_i64_sparc +#define tcg_gen_umax_i64 tcg_gen_umax_i64_sparc +#define tcg_gen_abs_i64 tcg_gen_abs_i64_sparc +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_sparc +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_sparc +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_sparc +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_sparc +#define tcg_gen_exit_tb tcg_gen_exit_tb_sparc +#define tcg_gen_goto_tb tcg_gen_goto_tb_sparc +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_sparc +#define check_exit_request check_exit_request_sparc +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_sparc +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_sparc +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_sparc +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_sparc +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_sparc +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_sparc +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_sparc +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_sparc +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_sparc +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_sparc +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_sparc +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_sparc +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_sparc +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_sparc +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_sparc +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_sparc +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_sparc +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_sparc +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_sparc +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_sparc +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_sparc +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_sparc +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_sparc +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_sparc +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_sparc +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_sparc +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_sparc +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_sparc +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_sparc +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_sparc +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_sparc +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_sparc +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_sparc +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_sparc +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_sparc +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_sparc +#define simd_desc simd_desc_sparc +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_sparc +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_sparc +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_sparc +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_sparc +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_sparc +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_sparc +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_sparc +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_sparc +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_sparc +#define tcg_gen_gvec_2 tcg_gen_gvec_2_sparc +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_sparc +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_sparc +#define tcg_gen_gvec_3 tcg_gen_gvec_3_sparc +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_sparc +#define tcg_gen_gvec_4 tcg_gen_gvec_4_sparc +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_sparc +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_sparc +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_sparc +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_sparc +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_sparc +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_sparc +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_sparc +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_sparc +#define tcg_gen_gvec_not tcg_gen_gvec_not_sparc +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_sparc +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_sparc +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_sparc +#define tcg_gen_gvec_add tcg_gen_gvec_add_sparc +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_sparc +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_sparc +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_sparc +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_sparc +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_sparc +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_sparc +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_sparc +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_sparc +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_sparc +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_sparc +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_sparc +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_sparc +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_sparc +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_sparc +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_sparc +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_sparc +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_sparc +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_sparc +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_sparc +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_sparc +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_sparc +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_sparc +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_sparc +#define tcg_gen_gvec_and tcg_gen_gvec_and_sparc +#define tcg_gen_gvec_or tcg_gen_gvec_or_sparc +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_sparc +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_sparc +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_sparc +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_sparc +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_sparc +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_sparc +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_sparc +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_sparc +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_sparc +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_sparc +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_sparc +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_sparc +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_sparc +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_sparc +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_sparc +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_sparc +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_sparc +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_sparc +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_sparc +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_sparc +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_sparc +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_sparc +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_sparc +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_sparc +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_sparc +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_sparc +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_sparc +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_sparc +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_sparc +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_sparc +#define vec_gen_2 vec_gen_2_sparc +#define vec_gen_3 vec_gen_3_sparc +#define vec_gen_4 vec_gen_4_sparc +#define tcg_gen_mov_vec tcg_gen_mov_vec_sparc +#define tcg_const_zeros_vec tcg_const_zeros_vec_sparc +#define tcg_const_ones_vec tcg_const_ones_vec_sparc +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_sparc +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_sparc +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_sparc +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_sparc +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_sparc +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_sparc +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_sparc +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_sparc +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_sparc +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_sparc +#define tcg_gen_ld_vec tcg_gen_ld_vec_sparc +#define tcg_gen_st_vec tcg_gen_st_vec_sparc +#define tcg_gen_stl_vec tcg_gen_stl_vec_sparc +#define tcg_gen_and_vec tcg_gen_and_vec_sparc +#define tcg_gen_or_vec tcg_gen_or_vec_sparc +#define tcg_gen_xor_vec tcg_gen_xor_vec_sparc +#define tcg_gen_andc_vec tcg_gen_andc_vec_sparc +#define tcg_gen_orc_vec tcg_gen_orc_vec_sparc +#define tcg_gen_nand_vec tcg_gen_nand_vec_sparc +#define tcg_gen_nor_vec tcg_gen_nor_vec_sparc +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_sparc +#define tcg_gen_not_vec tcg_gen_not_vec_sparc +#define tcg_gen_neg_vec tcg_gen_neg_vec_sparc +#define tcg_gen_abs_vec tcg_gen_abs_vec_sparc +#define tcg_gen_shli_vec tcg_gen_shli_vec_sparc +#define tcg_gen_shri_vec tcg_gen_shri_vec_sparc +#define tcg_gen_sari_vec tcg_gen_sari_vec_sparc +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_sparc +#define tcg_gen_add_vec tcg_gen_add_vec_sparc +#define tcg_gen_sub_vec tcg_gen_sub_vec_sparc +#define tcg_gen_mul_vec tcg_gen_mul_vec_sparc +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_sparc +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_sparc +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_sparc +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_sparc +#define tcg_gen_smin_vec tcg_gen_smin_vec_sparc +#define tcg_gen_umin_vec tcg_gen_umin_vec_sparc +#define tcg_gen_smax_vec tcg_gen_smax_vec_sparc +#define tcg_gen_umax_vec tcg_gen_umax_vec_sparc +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_sparc +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_sparc +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_sparc +#define tcg_gen_shls_vec tcg_gen_shls_vec_sparc +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_sparc +#define tcg_gen_sars_vec tcg_gen_sars_vec_sparc +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_sparc +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_sparc +#define tb_htable_lookup tb_htable_lookup_sparc +#define tb_set_jmp_target tb_set_jmp_target_sparc +#define cpu_exec cpu_exec_sparc +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc +#define cpu_reloading_memory_map cpu_reloading_memory_map_sparc +#define cpu_loop_exit cpu_loop_exit_sparc +#define cpu_loop_exit_restore cpu_loop_exit_restore_sparc +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc +#define tlb_init tlb_init_sparc +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_sparc +#define tlb_flush tlb_flush_sparc +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_sparc +#define tlb_flush_all_cpus tlb_flush_all_cpus_sparc +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_sparc +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_sparc +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc +#define tlb_flush_page tlb_flush_page_sparc +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_sparc +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_sparc +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_sparc +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_sparc #define tlb_protect_code tlb_protect_code_sparc -#define tlb_reset_dirty_range tlb_reset_dirty_range_sparc -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_sparc +#define tlb_unprotect_code tlb_unprotect_code_sparc +#define tlb_reset_dirty tlb_reset_dirty_sparc #define tlb_set_dirty tlb_set_dirty_sparc -#define tlb_set_dirty1 tlb_set_dirty1_sparc -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_sparc +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_sparc +#define tlb_set_page tlb_set_page_sparc +#define get_page_addr_code_hostp get_page_addr_code_hostp_sparc +#define get_page_addr_code get_page_addr_code_sparc +#define probe_access probe_access_sparc #define tlb_vaddr_to_host tlb_vaddr_to_host_sparc -#define token_get_type token_get_type_sparc -#define token_get_value token_get_value_sparc -#define token_is_escape token_is_escape_sparc -#define token_is_keyword token_is_keyword_sparc -#define token_is_operator token_is_operator_sparc -#define tokens_append_from_iter tokens_append_from_iter_sparc -#define to_qiv to_qiv_sparc -#define to_qov to_qov_sparc -#define tosa_init tosa_init_sparc -#define tosa_machine_init tosa_machine_init_sparc -#define tswap32 tswap32_sparc -#define tswap64 tswap64_sparc -#define type_class_get_size type_class_get_size_sparc -#define type_get_by_name type_get_by_name_sparc -#define type_get_parent type_get_parent_sparc -#define type_has_parent type_has_parent_sparc -#define type_initialize type_initialize_sparc -#define type_initialize_interface type_initialize_interface_sparc -#define type_is_ancestor type_is_ancestor_sparc -#define type_new type_new_sparc -#define type_object_get_size type_object_get_size_sparc -#define type_register_internal type_register_internal_sparc -#define type_table_add type_table_add_sparc -#define type_table_get type_table_get_sparc -#define type_table_lookup type_table_lookup_sparc -#define uint16_to_float32 uint16_to_float32_sparc -#define uint16_to_float64 uint16_to_float64_sparc -#define uint32_to_float32 uint32_to_float32_sparc -#define uint32_to_float64 uint32_to_float64_sparc -#define uint64_to_float128 uint64_to_float128_sparc -#define uint64_to_float32 uint64_to_float32_sparc -#define uint64_to_float64 uint64_to_float64_sparc -#define unassigned_io_ops unassigned_io_ops_sparc -#define unassigned_io_read unassigned_io_read_sparc -#define unassigned_io_write unassigned_io_write_sparc -#define unassigned_mem_accepts unassigned_mem_accepts_sparc +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc +#define helper_le_lduw_mmu helper_le_lduw_mmu_sparc +#define helper_be_lduw_mmu helper_be_lduw_mmu_sparc +#define helper_le_ldul_mmu helper_le_ldul_mmu_sparc +#define helper_be_ldul_mmu helper_be_ldul_mmu_sparc +#define helper_le_ldq_mmu helper_le_ldq_mmu_sparc +#define helper_be_ldq_mmu helper_be_ldq_mmu_sparc +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_sparc +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_sparc +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_sparc +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_sparc +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_sparc +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_sparc +#define cpu_ldub_data_ra cpu_ldub_data_ra_sparc +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_sparc +#define cpu_lduw_data_ra cpu_lduw_data_ra_sparc +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_sparc +#define cpu_ldl_data_ra cpu_ldl_data_ra_sparc +#define cpu_ldq_data_ra cpu_ldq_data_ra_sparc +#define cpu_ldub_data cpu_ldub_data_sparc +#define cpu_ldsb_data cpu_ldsb_data_sparc +#define cpu_lduw_data cpu_lduw_data_sparc +#define cpu_ldsw_data cpu_ldsw_data_sparc +#define cpu_ldl_data cpu_ldl_data_sparc +#define cpu_ldq_data cpu_ldq_data_sparc +#define helper_ret_stb_mmu helper_ret_stb_mmu_sparc +#define helper_le_stw_mmu helper_le_stw_mmu_sparc +#define helper_be_stw_mmu helper_be_stw_mmu_sparc +#define helper_le_stl_mmu helper_le_stl_mmu_sparc +#define helper_be_stl_mmu helper_be_stl_mmu_sparc +#define helper_le_stq_mmu helper_le_stq_mmu_sparc +#define helper_be_stq_mmu helper_be_stq_mmu_sparc +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_sparc +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_sparc +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_sparc +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_sparc +#define cpu_stb_data_ra cpu_stb_data_ra_sparc +#define cpu_stw_data_ra cpu_stw_data_ra_sparc +#define cpu_stl_data_ra cpu_stl_data_ra_sparc +#define cpu_stq_data_ra cpu_stq_data_ra_sparc +#define cpu_stb_data cpu_stb_data_sparc +#define cpu_stw_data cpu_stw_data_sparc +#define cpu_stl_data cpu_stl_data_sparc +#define cpu_stq_data cpu_stq_data_sparc +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_sparc +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_sparc +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_sparc +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_sparc +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_sparc +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_sparc +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_sparc +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_sparc +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_sparc +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_sparc +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_sparc +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_sparc +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_sparc +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_sparc +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_sparc +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_sparc +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_sparc +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_sparc +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_sparc +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_sparc +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_sparc +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_sparc +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_sparc +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_sparc +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_sparc +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_sparc +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_sparc +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_sparc +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_sparc +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_sparc +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_sparc +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_sparc +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_sparc +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_sparc +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_sparc +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_sparc +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_sparc +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_sparc +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_sparc +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_sparc +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_sparc +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_sparc +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_sparc +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_sparc +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_sparc +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_sparc +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_sparc +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_sparc +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_sparc +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_sparc +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_sparc +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_sparc +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_sparc +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_sparc +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_sparc +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_sparc +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_sparc +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_sparc +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_sparc +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_sparc +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_sparc +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_sparc +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_sparc +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_sparc +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_sparc +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_sparc +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_sparc +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_sparc +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_sparc +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_sparc +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_sparc +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_sparc +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_sparc +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_sparc +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_sparc +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_sparc +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_sparc +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_sparc +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_sparc +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_sparc +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_sparc +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_sparc +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_sparc +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_sparc +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_sparc +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_sparc +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_sparc +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_sparc +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_sparc +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_sparc +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_sparc +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_sparc +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_sparc +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_sparc +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_sparc +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_sparc +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_sparc +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_sparc +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_sparc +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_sparc +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_sparc +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_sparc +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_sparc +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_sparc +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_sparc +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_sparc +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_sparc +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_sparc +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_sparc +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_sparc +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_sparc +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_sparc +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_sparc +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_sparc +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_sparc +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_sparc +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_sparc +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_sparc +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_sparc +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_sparc +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_sparc +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_sparc +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_sparc +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_sparc +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_sparc +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_sparc +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_sparc +#define helper_atomic_xchgb helper_atomic_xchgb_sparc +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_sparc +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_sparc +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_sparc +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_sparc +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_sparc +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_sparc +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_sparc +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_sparc +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_sparc +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_sparc +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_sparc +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_sparc +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_sparc +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_sparc +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_sparc +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_sparc +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_sparc +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_sparc +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_sparc +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_sparc +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_sparc +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_sparc +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_sparc +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_sparc +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_sparc +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_sparc +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_sparc +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_sparc +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_sparc +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_sparc +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_sparc +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_sparc +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_sparc +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_sparc +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_sparc +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_sparc +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_sparc +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_sparc +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_sparc +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_sparc +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_sparc +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_sparc +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_sparc +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_sparc +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_sparc +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_sparc +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_sparc +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_sparc +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_sparc +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_sparc +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_sparc +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_sparc +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_sparc +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_sparc +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_sparc +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_sparc +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_sparc +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_sparc +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_sparc +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_sparc +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_sparc +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_sparc +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_sparc +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_sparc +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_sparc +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_sparc +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_sparc +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_sparc +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_sparc +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_sparc +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_sparc +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_sparc +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_sparc +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_sparc +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_sparc +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_sparc +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_sparc +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_sparc +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_sparc +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_sparc +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_sparc +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_sparc +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_sparc +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_sparc +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_sparc +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_sparc +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_sparc +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_sparc +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_sparc +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_sparc +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_sparc +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_sparc +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_sparc +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_sparc +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_sparc +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_sparc +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_sparc +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_sparc +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_sparc +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_sparc +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_sparc +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_sparc +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_sparc +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_sparc +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_sparc +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_sparc +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_sparc +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_sparc +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_sparc +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_sparc +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_sparc +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_sparc +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_sparc +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_sparc +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_sparc +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_sparc +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_sparc +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_sparc +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_sparc +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_sparc +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_sparc +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_sparc +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_sparc +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_sparc +#define cpu_ldub_code cpu_ldub_code_sparc +#define cpu_lduw_code cpu_lduw_code_sparc +#define cpu_ldl_code cpu_ldl_code_sparc +#define cpu_ldq_code cpu_ldq_code_sparc +#define helper_div_i32 helper_div_i32_sparc +#define helper_rem_i32 helper_rem_i32_sparc +#define helper_divu_i32 helper_divu_i32_sparc +#define helper_remu_i32 helper_remu_i32_sparc +#define helper_shl_i64 helper_shl_i64_sparc +#define helper_shr_i64 helper_shr_i64_sparc +#define helper_sar_i64 helper_sar_i64_sparc +#define helper_div_i64 helper_div_i64_sparc +#define helper_rem_i64 helper_rem_i64_sparc +#define helper_divu_i64 helper_divu_i64_sparc +#define helper_remu_i64 helper_remu_i64_sparc +#define helper_muluh_i64 helper_muluh_i64_sparc +#define helper_mulsh_i64 helper_mulsh_i64_sparc +#define helper_clz_i32 helper_clz_i32_sparc +#define helper_ctz_i32 helper_ctz_i32_sparc +#define helper_clz_i64 helper_clz_i64_sparc +#define helper_ctz_i64 helper_ctz_i64_sparc +#define helper_clrsb_i32 helper_clrsb_i32_sparc +#define helper_clrsb_i64 helper_clrsb_i64_sparc +#define helper_ctpop_i32 helper_ctpop_i32_sparc +#define helper_ctpop_i64 helper_ctpop_i64_sparc +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_sparc +#define helper_exit_atomic helper_exit_atomic_sparc +#define helper_gvec_add8 helper_gvec_add8_sparc +#define helper_gvec_add16 helper_gvec_add16_sparc +#define helper_gvec_add32 helper_gvec_add32_sparc +#define helper_gvec_add64 helper_gvec_add64_sparc +#define helper_gvec_adds8 helper_gvec_adds8_sparc +#define helper_gvec_adds16 helper_gvec_adds16_sparc +#define helper_gvec_adds32 helper_gvec_adds32_sparc +#define helper_gvec_adds64 helper_gvec_adds64_sparc +#define helper_gvec_sub8 helper_gvec_sub8_sparc +#define helper_gvec_sub16 helper_gvec_sub16_sparc +#define helper_gvec_sub32 helper_gvec_sub32_sparc +#define helper_gvec_sub64 helper_gvec_sub64_sparc +#define helper_gvec_subs8 helper_gvec_subs8_sparc +#define helper_gvec_subs16 helper_gvec_subs16_sparc +#define helper_gvec_subs32 helper_gvec_subs32_sparc +#define helper_gvec_subs64 helper_gvec_subs64_sparc +#define helper_gvec_mul8 helper_gvec_mul8_sparc +#define helper_gvec_mul16 helper_gvec_mul16_sparc +#define helper_gvec_mul32 helper_gvec_mul32_sparc +#define helper_gvec_mul64 helper_gvec_mul64_sparc +#define helper_gvec_muls8 helper_gvec_muls8_sparc +#define helper_gvec_muls16 helper_gvec_muls16_sparc +#define helper_gvec_muls32 helper_gvec_muls32_sparc +#define helper_gvec_muls64 helper_gvec_muls64_sparc +#define helper_gvec_neg8 helper_gvec_neg8_sparc +#define helper_gvec_neg16 helper_gvec_neg16_sparc +#define helper_gvec_neg32 helper_gvec_neg32_sparc +#define helper_gvec_neg64 helper_gvec_neg64_sparc +#define helper_gvec_abs8 helper_gvec_abs8_sparc +#define helper_gvec_abs16 helper_gvec_abs16_sparc +#define helper_gvec_abs32 helper_gvec_abs32_sparc +#define helper_gvec_abs64 helper_gvec_abs64_sparc +#define helper_gvec_mov helper_gvec_mov_sparc +#define helper_gvec_dup64 helper_gvec_dup64_sparc +#define helper_gvec_dup32 helper_gvec_dup32_sparc +#define helper_gvec_dup16 helper_gvec_dup16_sparc +#define helper_gvec_dup8 helper_gvec_dup8_sparc +#define helper_gvec_not helper_gvec_not_sparc +#define helper_gvec_and helper_gvec_and_sparc +#define helper_gvec_or helper_gvec_or_sparc +#define helper_gvec_xor helper_gvec_xor_sparc +#define helper_gvec_andc helper_gvec_andc_sparc +#define helper_gvec_orc helper_gvec_orc_sparc +#define helper_gvec_nand helper_gvec_nand_sparc +#define helper_gvec_nor helper_gvec_nor_sparc +#define helper_gvec_eqv helper_gvec_eqv_sparc +#define helper_gvec_ands helper_gvec_ands_sparc +#define helper_gvec_xors helper_gvec_xors_sparc +#define helper_gvec_ors helper_gvec_ors_sparc +#define helper_gvec_shl8i helper_gvec_shl8i_sparc +#define helper_gvec_shl16i helper_gvec_shl16i_sparc +#define helper_gvec_shl32i helper_gvec_shl32i_sparc +#define helper_gvec_shl64i helper_gvec_shl64i_sparc +#define helper_gvec_shr8i helper_gvec_shr8i_sparc +#define helper_gvec_shr16i helper_gvec_shr16i_sparc +#define helper_gvec_shr32i helper_gvec_shr32i_sparc +#define helper_gvec_shr64i helper_gvec_shr64i_sparc +#define helper_gvec_sar8i helper_gvec_sar8i_sparc +#define helper_gvec_sar16i helper_gvec_sar16i_sparc +#define helper_gvec_sar32i helper_gvec_sar32i_sparc +#define helper_gvec_sar64i helper_gvec_sar64i_sparc +#define helper_gvec_shl8v helper_gvec_shl8v_sparc +#define helper_gvec_shl16v helper_gvec_shl16v_sparc +#define helper_gvec_shl32v helper_gvec_shl32v_sparc +#define helper_gvec_shl64v helper_gvec_shl64v_sparc +#define helper_gvec_shr8v helper_gvec_shr8v_sparc +#define helper_gvec_shr16v helper_gvec_shr16v_sparc +#define helper_gvec_shr32v helper_gvec_shr32v_sparc +#define helper_gvec_shr64v helper_gvec_shr64v_sparc +#define helper_gvec_sar8v helper_gvec_sar8v_sparc +#define helper_gvec_sar16v helper_gvec_sar16v_sparc +#define helper_gvec_sar32v helper_gvec_sar32v_sparc +#define helper_gvec_sar64v helper_gvec_sar64v_sparc +#define helper_gvec_eq8 helper_gvec_eq8_sparc +#define helper_gvec_ne8 helper_gvec_ne8_sparc +#define helper_gvec_lt8 helper_gvec_lt8_sparc +#define helper_gvec_le8 helper_gvec_le8_sparc +#define helper_gvec_ltu8 helper_gvec_ltu8_sparc +#define helper_gvec_leu8 helper_gvec_leu8_sparc +#define helper_gvec_eq16 helper_gvec_eq16_sparc +#define helper_gvec_ne16 helper_gvec_ne16_sparc +#define helper_gvec_lt16 helper_gvec_lt16_sparc +#define helper_gvec_le16 helper_gvec_le16_sparc +#define helper_gvec_ltu16 helper_gvec_ltu16_sparc +#define helper_gvec_leu16 helper_gvec_leu16_sparc +#define helper_gvec_eq32 helper_gvec_eq32_sparc +#define helper_gvec_ne32 helper_gvec_ne32_sparc +#define helper_gvec_lt32 helper_gvec_lt32_sparc +#define helper_gvec_le32 helper_gvec_le32_sparc +#define helper_gvec_ltu32 helper_gvec_ltu32_sparc +#define helper_gvec_leu32 helper_gvec_leu32_sparc +#define helper_gvec_eq64 helper_gvec_eq64_sparc +#define helper_gvec_ne64 helper_gvec_ne64_sparc +#define helper_gvec_lt64 helper_gvec_lt64_sparc +#define helper_gvec_le64 helper_gvec_le64_sparc +#define helper_gvec_ltu64 helper_gvec_ltu64_sparc +#define helper_gvec_leu64 helper_gvec_leu64_sparc +#define helper_gvec_ssadd8 helper_gvec_ssadd8_sparc +#define helper_gvec_ssadd16 helper_gvec_ssadd16_sparc +#define helper_gvec_ssadd32 helper_gvec_ssadd32_sparc +#define helper_gvec_ssadd64 helper_gvec_ssadd64_sparc +#define helper_gvec_sssub8 helper_gvec_sssub8_sparc +#define helper_gvec_sssub16 helper_gvec_sssub16_sparc +#define helper_gvec_sssub32 helper_gvec_sssub32_sparc +#define helper_gvec_sssub64 helper_gvec_sssub64_sparc +#define helper_gvec_usadd8 helper_gvec_usadd8_sparc +#define helper_gvec_usadd16 helper_gvec_usadd16_sparc +#define helper_gvec_usadd32 helper_gvec_usadd32_sparc +#define helper_gvec_usadd64 helper_gvec_usadd64_sparc +#define helper_gvec_ussub8 helper_gvec_ussub8_sparc +#define helper_gvec_ussub16 helper_gvec_ussub16_sparc +#define helper_gvec_ussub32 helper_gvec_ussub32_sparc +#define helper_gvec_ussub64 helper_gvec_ussub64_sparc +#define helper_gvec_smin8 helper_gvec_smin8_sparc +#define helper_gvec_smin16 helper_gvec_smin16_sparc +#define helper_gvec_smin32 helper_gvec_smin32_sparc +#define helper_gvec_smin64 helper_gvec_smin64_sparc +#define helper_gvec_smax8 helper_gvec_smax8_sparc +#define helper_gvec_smax16 helper_gvec_smax16_sparc +#define helper_gvec_smax32 helper_gvec_smax32_sparc +#define helper_gvec_smax64 helper_gvec_smax64_sparc +#define helper_gvec_umin8 helper_gvec_umin8_sparc +#define helper_gvec_umin16 helper_gvec_umin16_sparc +#define helper_gvec_umin32 helper_gvec_umin32_sparc +#define helper_gvec_umin64 helper_gvec_umin64_sparc +#define helper_gvec_umax8 helper_gvec_umax8_sparc +#define helper_gvec_umax16 helper_gvec_umax16_sparc +#define helper_gvec_umax32 helper_gvec_umax32_sparc +#define helper_gvec_umax64 helper_gvec_umax64_sparc +#define helper_gvec_bitsel helper_gvec_bitsel_sparc +#define cpu_restore_state cpu_restore_state_sparc +#define page_collection_lock page_collection_lock_sparc +#define page_collection_unlock page_collection_unlock_sparc +#define free_code_gen_buffer free_code_gen_buffer_sparc +#define tcg_exec_init tcg_exec_init_sparc +#define tb_cleanup tb_cleanup_sparc +#define tb_flush tb_flush_sparc +#define tb_phys_invalidate tb_phys_invalidate_sparc +#define tb_gen_code tb_gen_code_sparc +#define tb_exec_lock tb_exec_lock_sparc +#define tb_exec_unlock tb_exec_unlock_sparc +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc +#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc +#define tb_check_watchpoint tb_check_watchpoint_sparc +#define cpu_io_recompile cpu_io_recompile_sparc +#define tb_flush_jmp_cache tb_flush_jmp_cache_sparc +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_sparc +#define translator_loop_temp_check translator_loop_temp_check_sparc +#define translator_loop translator_loop_sparc +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_sparc +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_sparc +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_sparc +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_sparc +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_sparc +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_sparc #define unassigned_mem_ops unassigned_mem_ops_sparc -#define unassigned_mem_read unassigned_mem_read_sparc -#define unassigned_mem_write unassigned_mem_write_sparc -#define update_spsel update_spsel_sparc -#define v6_cp_reginfo v6_cp_reginfo_sparc -#define v6k_cp_reginfo v6k_cp_reginfo_sparc -#define v7_cp_reginfo v7_cp_reginfo_sparc -#define v7mp_cp_reginfo v7mp_cp_reginfo_sparc -#define v7m_pop v7m_pop_sparc -#define v7m_push v7m_push_sparc -#define v8_cp_reginfo v8_cp_reginfo_sparc -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_sparc -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_sparc -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_sparc -#define vapa_cp_reginfo vapa_cp_reginfo_sparc -#define vbar_write vbar_write_sparc -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_sparc -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_sparc -#define vfp_get_fpcr vfp_get_fpcr_sparc -#define vfp_get_fpscr vfp_get_fpscr_sparc -#define vfp_get_fpsr vfp_get_fpsr_sparc -#define vfp_reg_offset vfp_reg_offset_sparc -#define vfp_set_fpcr vfp_set_fpcr_sparc -#define vfp_set_fpscr vfp_set_fpscr_sparc -#define vfp_set_fpsr vfp_set_fpsr_sparc -#define visit_end_implicit_struct visit_end_implicit_struct_sparc -#define visit_end_list visit_end_list_sparc -#define visit_end_struct visit_end_struct_sparc -#define visit_end_union visit_end_union_sparc -#define visit_get_next_type visit_get_next_type_sparc -#define visit_next_list visit_next_list_sparc -#define visit_optional visit_optional_sparc -#define visit_start_implicit_struct visit_start_implicit_struct_sparc -#define visit_start_list visit_start_list_sparc -#define visit_start_struct visit_start_struct_sparc -#define visit_start_union visit_start_union_sparc -#define vmsa_cp_reginfo vmsa_cp_reginfo_sparc -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_sparc -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_sparc -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_sparc -#define vmsa_ttbcr_write vmsa_ttbcr_write_sparc -#define vmsa_ttbr_write vmsa_ttbr_write_sparc -#define write_cpustate_to_list write_cpustate_to_list_sparc -#define write_list_to_cpustate write_list_to_cpustate_sparc -#define write_raw_cp_reg write_raw_cp_reg_sparc -#define X86CPURegister32_lookup X86CPURegister32_lookup_sparc -#define x86_op_defs x86_op_defs_sparc -#define xpsr_read xpsr_read_sparc -#define xpsr_write xpsr_write_sparc -#define xscale_cpar_write xscale_cpar_write_sparc -#define xscale_cp_reginfo xscale_cp_reginfo_sparc -#define cpu_sparc_exec cpu_sparc_exec_sparc +#define floatx80_infinity floatx80_infinity_sparc +#define dup_const_func dup_const_func_sparc +#define gen_helper_raise_exception gen_helper_raise_exception_sparc +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_sparc +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc +#define gen_helper_cpsr_read gen_helper_cpsr_read_sparc +#define gen_helper_cpsr_write gen_helper_cpsr_write_sparc #define helper_compute_psr helper_compute_psr_sparc #define helper_compute_C_icc helper_compute_C_icc_sparc -#define cpu_sparc_init cpu_sparc_init_sparc #define cpu_sparc_set_id cpu_sparc_set_id_sparc -#define sparc_cpu_register_types sparc_cpu_register_types_sparc +#define cpu_sparc_init cpu_sparc_init_sparc +#define helper_check_ieee_exceptions helper_check_ieee_exceptions_sparc #define helper_fadds helper_fadds_sparc #define helper_faddd helper_faddd_sparc #define helper_faddq helper_faddq_sparc @@ -3036,11 +1291,17 @@ #define helper_fdivd helper_fdivd_sparc #define helper_fdivq helper_fdivq_sparc #define helper_fsmuld helper_fsmuld_sparc +#define helper_fsmulq helper_fsmulq_sparc #define helper_fdmulq helper_fdmulq_sparc #define helper_fnegs helper_fnegs_sparc +#define helper_fnegd helper_fnegd_sparc +#define helper_fnegq helper_fnegq_sparc #define helper_fitos helper_fitos_sparc #define helper_fitod helper_fitod_sparc #define helper_fitoq helper_fitoq_sparc +#define helper_fxtos helper_fxtos_sparc +#define helper_fxtod helper_fxtod_sparc +#define helper_fxtoq helper_fxtoq_sparc #define helper_fdtos helper_fdtos_sparc #define helper_fstod helper_fstod_sparc #define helper_fqtos helper_fqtos_sparc @@ -3050,7 +1311,12 @@ #define helper_fstoi helper_fstoi_sparc #define helper_fdtoi helper_fdtoi_sparc #define helper_fqtoi helper_fqtoi_sparc +#define helper_fstox helper_fstox_sparc +#define helper_fdtox helper_fdtox_sparc +#define helper_fqtox helper_fqtox_sparc #define helper_fabss helper_fabss_sparc +#define helper_fabsd helper_fabsd_sparc +#define helper_fabsq helper_fabsq_sparc #define helper_fsqrts helper_fsqrts_sparc #define helper_fsqrtd helper_fsqrtd_sparc #define helper_fsqrtq helper_fsqrtq_sparc @@ -3060,33 +1326,91 @@ #define helper_fcmped helper_fcmped_sparc #define helper_fcmpq helper_fcmpq_sparc #define helper_fcmpeq helper_fcmpeq_sparc +#define helper_fcmps_fcc1 helper_fcmps_fcc1_sparc +#define helper_fcmpd_fcc1 helper_fcmpd_fcc1_sparc +#define helper_fcmpq_fcc1 helper_fcmpq_fcc1_sparc +#define helper_fcmps_fcc2 helper_fcmps_fcc2_sparc +#define helper_fcmpd_fcc2 helper_fcmpd_fcc2_sparc +#define helper_fcmpq_fcc2 helper_fcmpq_fcc2_sparc +#define helper_fcmps_fcc3 helper_fcmps_fcc3_sparc +#define helper_fcmpd_fcc3 helper_fcmpd_fcc3_sparc +#define helper_fcmpq_fcc3 helper_fcmpq_fcc3_sparc +#define helper_fcmpes_fcc1 helper_fcmpes_fcc1_sparc +#define helper_fcmped_fcc1 helper_fcmped_fcc1_sparc +#define helper_fcmpeq_fcc1 helper_fcmpeq_fcc1_sparc +#define helper_fcmpes_fcc2 helper_fcmpes_fcc2_sparc +#define helper_fcmped_fcc2 helper_fcmped_fcc2_sparc +#define helper_fcmpeq_fcc2 helper_fcmpeq_fcc2_sparc +#define helper_fcmpes_fcc3 helper_fcmpes_fcc3_sparc +#define helper_fcmped_fcc3 helper_fcmped_fcc3_sparc +#define helper_fcmpeq_fcc3 helper_fcmpeq_fcc3_sparc #define helper_ldfsr helper_ldfsr_sparc +#define helper_ldxfsr helper_ldxfsr_sparc +#define cpu_raise_exception_ra cpu_raise_exception_ra_sparc +#define helper_raise_exception helper_raise_exception_sparc #define helper_debug helper_debug_sparc +#define helper_tick_set_count helper_tick_set_count_sparc +#define helper_tick_get_count helper_tick_get_count_sparc +#define helper_tick_set_limit helper_tick_set_limit_sparc +#define helper_udiv helper_udiv_sparc #define helper_udiv_cc helper_udiv_cc_sparc +#define helper_sdiv helper_sdiv_sparc #define helper_sdiv_cc helper_sdiv_cc_sparc +#define helper_sdivx helper_sdivx_sparc +#define helper_udivx helper_udivx_sparc #define helper_taddcctv helper_taddcctv_sparc #define helper_tsubcctv helper_tsubcctv_sparc +#define helper_power_down helper_power_down_sparc #define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc +#define leon3_irq_manager leon3_irq_manager_sparc +#define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc +#define cpu_tsptr cpu_tsptr_sparc +#define helper_set_softint helper_set_softint_sparc +#define helper_clear_softint helper_clear_softint_sparc +#define helper_write_softint helper_write_softint_sparc #define helper_check_align helper_check_align_sparc #define helper_ld_asi helper_ld_asi_sparc #define helper_st_asi helper_st_asi_sparc -#define helper_cas_asi helper_cas_asi_sparc -#define helper_ldqf helper_ldqf_sparc -#define helper_stqf helper_stqf_sparc -#define sparc_cpu_unassigned_access sparc_cpu_unassigned_access_sparc +#define sparc_cpu_do_transaction_failed sparc_cpu_do_transaction_failed_sparc #define sparc_cpu_do_unaligned_access sparc_cpu_do_unaligned_access_sparc -#define sparc_cpu_handle_mmu_fault sparc_cpu_handle_mmu_fault_sparc -#define dump_mmu dump_mmu_sparc +#define sparc_cpu_tlb_fill sparc_cpu_tlb_fill_sparc +#define mmu_probe mmu_probe_sparc +#define sparc_cpu_memory_rw_debug sparc_cpu_memory_rw_debug_sparc +#define cpu_get_phys_page_nofault cpu_get_phys_page_nofault_sparc #define sparc_cpu_get_phys_page_debug sparc_cpu_get_phys_page_debug_sparc -#define sparc_reg_reset sparc_reg_reset_sparc -#define sparc_reg_read sparc_reg_read_sparc -#define sparc_reg_write sparc_reg_write_sparc -#define gen_intermediate_code_init gen_intermediate_code_init_sparc +#define gen_intermediate_code gen_intermediate_code_sparc +#define sparc_tcg_init sparc_tcg_init_sparc +#define restore_state_to_opc restore_state_to_opc_sparc #define cpu_set_cwp cpu_set_cwp_sparc #define cpu_get_psr cpu_get_psr_sparc +#define cpu_put_psr_raw cpu_put_psr_raw_sparc #define cpu_put_psr cpu_put_psr_sparc #define cpu_cwp_inc cpu_cwp_inc_sparc #define cpu_cwp_dec cpu_cwp_dec_sparc +#define helper_rett helper_rett_sparc #define helper_save helper_save_sparc #define helper_restore helper_restore_sparc +#define helper_flushw helper_flushw_sparc +#define helper_saved helper_saved_sparc +#define helper_restored helper_restored_sparc +#define helper_wrpsr helper_wrpsr_sparc +#define helper_rdpsr helper_rdpsr_sparc +#define cpu_get_ccr cpu_get_ccr_sparc +#define cpu_put_ccr cpu_put_ccr_sparc +#define cpu_get_cwp64 cpu_get_cwp64_sparc +#define cpu_put_cwp64 cpu_put_cwp64_sparc +#define helper_rdccr helper_rdccr_sparc +#define helper_wrccr helper_wrccr_sparc +#define helper_rdcwp helper_rdcwp_sparc +#define helper_wrcwp helper_wrcwp_sparc +#define cpu_gl_switch_gregs cpu_gl_switch_gregs_sparc +#define helper_wrgl helper_wrgl_sparc +#define cpu_change_pstate cpu_change_pstate_sparc +#define helper_wrpstate helper_wrpstate_sparc +#define helper_wrpil helper_wrpil_sparc +#define helper_done helper_done_sparc +#define helper_retry helper_retry_sparc +#define sparc_reg_reset sparc_reg_reset_sparc +#define sparc_reg_read sparc_reg_read_sparc +#define sparc_reg_write sparc_reg_write_sparc #endif diff --git a/qemu/sparc64.h b/qemu/sparc64.h index f3895222..5cee6484 100644 --- a/qemu/sparc64.h +++ b/qemu/sparc64.h @@ -1,3028 +1,1283 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_SPARC64_H -#define UNICORN_AUTOGEN_SPARC64_H -#define arm_release arm_release_sparc64 -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_sparc64 -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_sparc64 -#define use_idiv_instructions_rt use_idiv_instructions_rt_sparc64 -#define tcg_target_deposit_valid tcg_target_deposit_valid_sparc64 -#define helper_power_down helper_power_down_sparc64 -#define check_exit_request check_exit_request_sparc64 -#define address_space_unregister address_space_unregister_sparc64 -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc64 -#define phys_mem_clean phys_mem_clean_sparc64 -#define tb_cleanup tb_cleanup_sparc64 +#ifndef UNICORN_AUTOGEN_sparc64_H +#define UNICORN_AUTOGEN_sparc64_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _sparc64 +#endif +#define arm_arch arm_arch_sparc64 +#define tb_target_set_jmp_target tb_target_set_jmp_target_sparc64 +#define have_bmi1 have_bmi1_sparc64 +#define have_popcnt have_popcnt_sparc64 +#define have_avx1 have_avx1_sparc64 +#define have_avx2 have_avx2_sparc64 +#define have_isa have_isa_sparc64 +#define have_altivec have_altivec_sparc64 +#define have_vsx have_vsx_sparc64 +#define flush_icache_range flush_icache_range_sparc64 +#define s390_facilities s390_facilities_sparc64 +#define tcg_dump_op tcg_dump_op_sparc64 +#define tcg_dump_ops tcg_dump_ops_sparc64 +#define tcg_gen_and_i64 tcg_gen_and_i64_sparc64 +#define tcg_gen_discard_i64 tcg_gen_discard_i64_sparc64 +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_sparc64 +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_sparc64 +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_sparc64 +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_sparc64 +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_sparc64 +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_sparc64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc64 +#define tcg_gen_mul_i64 tcg_gen_mul_i64_sparc64 +#define tcg_gen_or_i64 tcg_gen_or_i64_sparc64 +#define tcg_gen_sar_i64 tcg_gen_sar_i64_sparc64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc64 +#define tcg_gen_st_i64 tcg_gen_st_i64_sparc64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc64 +#define cpu_icount_to_ns cpu_icount_to_ns_sparc64 +#define cpu_is_stopped cpu_is_stopped_sparc64 +#define cpu_get_ticks cpu_get_ticks_sparc64 +#define cpu_get_clock cpu_get_clock_sparc64 +#define cpu_resume cpu_resume_sparc64 +#define qemu_init_vcpu qemu_init_vcpu_sparc64 +#define cpu_stop_current cpu_stop_current_sparc64 +#define resume_all_vcpus resume_all_vcpus_sparc64 +#define vm_start vm_start_sparc64 +#define address_space_dispatch_compact address_space_dispatch_compact_sparc64 +#define flatview_translate flatview_translate_sparc64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc64 +#define qemu_get_cpu qemu_get_cpu_sparc64 +#define cpu_address_space_init cpu_address_space_init_sparc64 +#define cpu_get_address_space cpu_get_address_space_sparc64 +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_sparc64 +#define cpu_exec_initfn cpu_exec_initfn_sparc64 +#define cpu_exec_realizefn cpu_exec_realizefn_sparc64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_sparc64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_sparc64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_sparc64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc64 +#define cpu_abort cpu_abort_sparc64 +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_sparc64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc64 +#define flatview_add_to_dispatch flatview_add_to_dispatch_sparc64 +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_sparc64 +#define qemu_ram_get_offset qemu_ram_get_offset_sparc64 +#define qemu_ram_get_used_length qemu_ram_get_used_length_sparc64 +#define qemu_ram_is_shared qemu_ram_is_shared_sparc64 +#define qemu_ram_pagesize qemu_ram_pagesize_sparc64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc64 +#define qemu_ram_alloc qemu_ram_alloc_sparc64 +#define qemu_ram_free qemu_ram_free_sparc64 +#define qemu_map_ram_ptr qemu_map_ram_ptr_sparc64 +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_sparc64 +#define qemu_ram_block_from_host qemu_ram_block_from_host_sparc64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc64 +#define cpu_check_watchpoint cpu_check_watchpoint_sparc64 +#define iotlb_to_section iotlb_to_section_sparc64 +#define address_space_dispatch_new address_space_dispatch_new_sparc64 +#define address_space_dispatch_free address_space_dispatch_free_sparc64 +#define flatview_read_continue flatview_read_continue_sparc64 +#define address_space_read_full address_space_read_full_sparc64 +#define address_space_write address_space_write_sparc64 +#define address_space_rw address_space_rw_sparc64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc64 +#define address_space_write_rom address_space_write_rom_sparc64 +#define cpu_flush_icache_range cpu_flush_icache_range_sparc64 +#define cpu_exec_init_all cpu_exec_init_all_sparc64 +#define address_space_access_valid address_space_access_valid_sparc64 +#define address_space_map address_space_map_sparc64 +#define address_space_unmap address_space_unmap_sparc64 +#define cpu_physical_memory_map cpu_physical_memory_map_sparc64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc64 +#define qemu_target_page_size qemu_target_page_size_sparc64 +#define qemu_target_page_bits qemu_target_page_bits_sparc64 +#define qemu_target_page_bits_min qemu_target_page_bits_min_sparc64 +#define target_words_bigendian target_words_bigendian_sparc64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc64 +#define ram_block_discard_range ram_block_discard_range_sparc64 +#define ramblock_is_pmem ramblock_is_pmem_sparc64 +#define page_size_init page_size_init_sparc64 +#define set_preferred_target_page_bits set_preferred_target_page_bits_sparc64 +#define finalize_target_page_bits finalize_target_page_bits_sparc64 +#define cpu_outb cpu_outb_sparc64 +#define cpu_outw cpu_outw_sparc64 +#define cpu_outl cpu_outl_sparc64 +#define cpu_inb cpu_inb_sparc64 +#define cpu_inw cpu_inw_sparc64 +#define cpu_inl cpu_inl_sparc64 #define memory_map memory_map_sparc64 +#define memory_map_io memory_map_io_sparc64 #define memory_map_ptr memory_map_ptr_sparc64 #define memory_unmap memory_unmap_sparc64 #define memory_free memory_free_sparc64 -#define free_code_gen_buffer free_code_gen_buffer_sparc64 -#define helper_raise_exception helper_raise_exception_sparc64 -#define tcg_enabled tcg_enabled_sparc64 -#define tcg_exec_init tcg_exec_init_sparc64 -#define memory_register_types memory_register_types_sparc64 -#define cpu_exec_init_all cpu_exec_init_all_sparc64 -#define vm_start vm_start_sparc64 -#define resume_all_vcpus resume_all_vcpus_sparc64 -#define a15_l2ctlr_read a15_l2ctlr_read_sparc64 -#define a64_translate_init a64_translate_init_sparc64 -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_sparc64 -#define aa64_cacheop_access aa64_cacheop_access_sparc64 -#define aa64_daif_access aa64_daif_access_sparc64 -#define aa64_daif_write aa64_daif_write_sparc64 -#define aa64_dczid_read aa64_dczid_read_sparc64 -#define aa64_fpcr_read aa64_fpcr_read_sparc64 -#define aa64_fpcr_write aa64_fpcr_write_sparc64 -#define aa64_fpsr_read aa64_fpsr_read_sparc64 -#define aa64_fpsr_write aa64_fpsr_write_sparc64 -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_sparc64 -#define aa64_zva_access aa64_zva_access_sparc64 -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_sparc64 -#define aarch64_restore_sp aarch64_restore_sp_sparc64 -#define aarch64_save_sp aarch64_save_sp_sparc64 -#define accel_find accel_find_sparc64 -#define accel_init_machine accel_init_machine_sparc64 -#define accel_type accel_type_sparc64 -#define access_with_adjusted_size access_with_adjusted_size_sparc64 -#define add128 add128_sparc64 -#define add16_sat add16_sat_sparc64 -#define add16_usat add16_usat_sparc64 -#define add192 add192_sparc64 -#define add8_sat add8_sat_sparc64 -#define add8_usat add8_usat_sparc64 -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_sparc64 -#define add_cpreg_to_list add_cpreg_to_list_sparc64 -#define addFloat128Sigs addFloat128Sigs_sparc64 -#define addFloat32Sigs addFloat32Sigs_sparc64 -#define addFloat64Sigs addFloat64Sigs_sparc64 -#define addFloatx80Sigs addFloatx80Sigs_sparc64 -#define add_qemu_ldst_label add_qemu_ldst_label_sparc64 -#define address_space_access_valid address_space_access_valid_sparc64 -#define address_space_destroy address_space_destroy_sparc64 -#define address_space_destroy_dispatch address_space_destroy_dispatch_sparc64 -#define address_space_get_flatview address_space_get_flatview_sparc64 -#define address_space_init address_space_init_sparc64 -#define address_space_init_dispatch address_space_init_dispatch_sparc64 -#define address_space_lookup_region address_space_lookup_region_sparc64 -#define address_space_map address_space_map_sparc64 -#define address_space_read address_space_read_sparc64 -#define address_space_rw address_space_rw_sparc64 -#define address_space_translate address_space_translate_sparc64 -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc64 -#define address_space_translate_internal address_space_translate_internal_sparc64 -#define address_space_unmap address_space_unmap_sparc64 -#define address_space_update_topology address_space_update_topology_sparc64 -#define address_space_update_topology_pass address_space_update_topology_pass_sparc64 -#define address_space_write address_space_write_sparc64 -#define addrrange_contains addrrange_contains_sparc64 -#define addrrange_end addrrange_end_sparc64 -#define addrrange_equal addrrange_equal_sparc64 -#define addrrange_intersection addrrange_intersection_sparc64 -#define addrrange_intersects addrrange_intersects_sparc64 -#define addrrange_make addrrange_make_sparc64 -#define adjust_endianness adjust_endianness_sparc64 -#define all_helpers all_helpers_sparc64 -#define alloc_code_gen_buffer alloc_code_gen_buffer_sparc64 -#define alloc_entry alloc_entry_sparc64 -#define always_true always_true_sparc64 -#define arm1026_initfn arm1026_initfn_sparc64 -#define arm1136_initfn arm1136_initfn_sparc64 -#define arm1136_r2_initfn arm1136_r2_initfn_sparc64 -#define arm1176_initfn arm1176_initfn_sparc64 -#define arm11mpcore_initfn arm11mpcore_initfn_sparc64 -#define arm926_initfn arm926_initfn_sparc64 -#define arm946_initfn arm946_initfn_sparc64 -#define arm_ccnt_enabled arm_ccnt_enabled_sparc64 -#define arm_cp_read_zero arm_cp_read_zero_sparc64 -#define arm_cp_reset_ignore arm_cp_reset_ignore_sparc64 -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_sparc64 -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_sparc64 -#define arm_cpu_finalizefn arm_cpu_finalizefn_sparc64 -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_sparc64 -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_sparc64 -#define arm_cpu_initfn arm_cpu_initfn_sparc64 -#define arm_cpu_list arm_cpu_list_sparc64 -#define cpu_loop_exit cpu_loop_exit_sparc64 -#define arm_cpu_post_init arm_cpu_post_init_sparc64 -#define arm_cpu_realizefn arm_cpu_realizefn_sparc64 -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_sparc64 -#define arm_cpu_register_types arm_cpu_register_types_sparc64 -#define cpu_resume_from_signal cpu_resume_from_signal_sparc64 -#define arm_cpus arm_cpus_sparc64 -#define arm_cpu_set_pc arm_cpu_set_pc_sparc64 -#define arm_cp_write_ignore arm_cp_write_ignore_sparc64 -#define arm_current_el arm_current_el_sparc64 -#define arm_dc_feature arm_dc_feature_sparc64 -#define arm_debug_excp_handler arm_debug_excp_handler_sparc64 -#define arm_debug_target_el arm_debug_target_el_sparc64 -#define arm_el_is_aa64 arm_el_is_aa64_sparc64 -#define arm_env_get_cpu arm_env_get_cpu_sparc64 -#define arm_excp_target_el arm_excp_target_el_sparc64 -#define arm_excp_unmasked arm_excp_unmasked_sparc64 -#define arm_feature arm_feature_sparc64 -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_sparc64 -#define gen_intermediate_code gen_intermediate_code_sparc64 -#define gen_intermediate_code_pc gen_intermediate_code_pc_sparc64 -#define arm_gen_test_cc arm_gen_test_cc_sparc64 -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_sparc64 -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_sparc64 -#define arm_handle_psci_call arm_handle_psci_call_sparc64 -#define arm_is_psci_call arm_is_psci_call_sparc64 -#define arm_is_secure arm_is_secure_sparc64 -#define arm_is_secure_below_el3 arm_is_secure_below_el3_sparc64 -#define arm_ldl_code arm_ldl_code_sparc64 -#define arm_lduw_code arm_lduw_code_sparc64 -#define arm_log_exception arm_log_exception_sparc64 -#define arm_reg_read arm_reg_read_sparc64 -#define arm_reg_reset arm_reg_reset_sparc64 -#define arm_reg_write arm_reg_write_sparc64 -#define restore_state_to_opc restore_state_to_opc_sparc64 -#define arm_rmode_to_sf arm_rmode_to_sf_sparc64 -#define arm_singlestep_active arm_singlestep_active_sparc64 -#define tlb_fill tlb_fill_sparc64 -#define tlb_flush tlb_flush_sparc64 -#define tlb_flush_page tlb_flush_page_sparc64 -#define tlb_set_page tlb_set_page_sparc64 -#define arm_translate_init arm_translate_init_sparc64 -#define arm_v7m_class_init arm_v7m_class_init_sparc64 -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_sparc64 -#define ats_access ats_access_sparc64 -#define ats_write ats_write_sparc64 -#define bad_mode_switch bad_mode_switch_sparc64 -#define bank_number bank_number_sparc64 -#define bitmap_zero_extend bitmap_zero_extend_sparc64 -#define bp_wp_matches bp_wp_matches_sparc64 -#define breakpoint_invalidate breakpoint_invalidate_sparc64 -#define build_page_bitmap build_page_bitmap_sparc64 -#define bus_add_child bus_add_child_sparc64 -#define bus_class_init bus_class_init_sparc64 -#define bus_info bus_info_sparc64 -#define bus_unparent bus_unparent_sparc64 -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_sparc64 -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_sparc64 -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_sparc64 -#define call_recip_estimate call_recip_estimate_sparc64 -#define can_merge can_merge_sparc64 -#define capacity_increase capacity_increase_sparc64 -#define ccsidr_read ccsidr_read_sparc64 -#define check_ap check_ap_sparc64 -#define check_breakpoints check_breakpoints_sparc64 -#define check_watchpoints check_watchpoints_sparc64 -#define cho cho_sparc64 -#define clear_bit clear_bit_sparc64 -#define clz32 clz32_sparc64 -#define clz64 clz64_sparc64 -#define cmp_flatrange_addr cmp_flatrange_addr_sparc64 -#define code_gen_alloc code_gen_alloc_sparc64 -#define commonNaNToFloat128 commonNaNToFloat128_sparc64 -#define commonNaNToFloat16 commonNaNToFloat16_sparc64 -#define commonNaNToFloat32 commonNaNToFloat32_sparc64 -#define commonNaNToFloat64 commonNaNToFloat64_sparc64 -#define commonNaNToFloatx80 commonNaNToFloatx80_sparc64 -#define compute_abs_deadline compute_abs_deadline_sparc64 -#define cond_name cond_name_sparc64 -#define configure_accelerator configure_accelerator_sparc64 -#define container_get container_get_sparc64 -#define container_info container_info_sparc64 -#define container_register_types container_register_types_sparc64 -#define contextidr_write contextidr_write_sparc64 -#define core_log_global_start core_log_global_start_sparc64 -#define core_log_global_stop core_log_global_stop_sparc64 -#define core_memory_listener core_memory_listener_sparc64 -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_sparc64 -#define cortex_a15_initfn cortex_a15_initfn_sparc64 -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_sparc64 -#define cortex_a8_initfn cortex_a8_initfn_sparc64 -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_sparc64 -#define cortex_a9_initfn cortex_a9_initfn_sparc64 -#define cortex_m3_initfn cortex_m3_initfn_sparc64 -#define count_cpreg count_cpreg_sparc64 -#define countLeadingZeros32 countLeadingZeros32_sparc64 -#define countLeadingZeros64 countLeadingZeros64_sparc64 -#define cp_access_ok cp_access_ok_sparc64 -#define cpacr_write cpacr_write_sparc64 -#define cpreg_field_is_64bit cpreg_field_is_64bit_sparc64 -#define cp_reginfo cp_reginfo_sparc64 -#define cpreg_key_compare cpreg_key_compare_sparc64 -#define cpreg_make_keylist cpreg_make_keylist_sparc64 -#define cp_reg_reset cp_reg_reset_sparc64 -#define cpreg_to_kvm_id cpreg_to_kvm_id_sparc64 -#define cpsr_read cpsr_read_sparc64 -#define cpsr_write cpsr_write_sparc64 -#define cptype_valid cptype_valid_sparc64 -#define cpu_abort cpu_abort_sparc64 -#define cpu_arm_exec cpu_arm_exec_sparc64 -#define cpu_arm_gen_code cpu_arm_gen_code_sparc64 -#define cpu_arm_init cpu_arm_init_sparc64 -#define cpu_breakpoint_insert cpu_breakpoint_insert_sparc64 -#define cpu_breakpoint_remove cpu_breakpoint_remove_sparc64 -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc64 -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc64 -#define cpu_can_do_io cpu_can_do_io_sparc64 -#define cpu_can_run cpu_can_run_sparc64 -#define cpu_class_init cpu_class_init_sparc64 -#define cpu_common_class_by_name cpu_common_class_by_name_sparc64 -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_sparc64 -#define cpu_common_get_arch_id cpu_common_get_arch_id_sparc64 -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_sparc64 -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_sparc64 -#define cpu_common_has_work cpu_common_has_work_sparc64 -#define cpu_common_initfn cpu_common_initfn_sparc64 -#define cpu_common_noop cpu_common_noop_sparc64 -#define cpu_common_parse_features cpu_common_parse_features_sparc64 -#define cpu_common_realizefn cpu_common_realizefn_sparc64 -#define cpu_common_reset cpu_common_reset_sparc64 -#define cpu_dump_statistics cpu_dump_statistics_sparc64 -#define cpu_exec_init cpu_exec_init_sparc64 -#define cpu_flush_icache_range cpu_flush_icache_range_sparc64 -#define cpu_gen_init cpu_gen_init_sparc64 -#define cpu_get_clock cpu_get_clock_sparc64 -#define cpu_get_real_ticks cpu_get_real_ticks_sparc64 -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_sparc64 -#define cpu_handle_debug_exception cpu_handle_debug_exception_sparc64 -#define cpu_handle_guest_debug cpu_handle_guest_debug_sparc64 -#define cpu_inb cpu_inb_sparc64 -#define cpu_inl cpu_inl_sparc64 -#define cpu_interrupt cpu_interrupt_sparc64 -#define cpu_interrupt_handler cpu_interrupt_handler_sparc64 -#define cpu_inw cpu_inw_sparc64 -#define cpu_io_recompile cpu_io_recompile_sparc64 -#define cpu_is_stopped cpu_is_stopped_sparc64 -#define cpu_ldl_code cpu_ldl_code_sparc64 -#define cpu_ldub_code cpu_ldub_code_sparc64 -#define cpu_lduw_code cpu_lduw_code_sparc64 -#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc64 -#define cpu_mmu_index cpu_mmu_index_sparc64 -#define cpu_outb cpu_outb_sparc64 -#define cpu_outl cpu_outl_sparc64 -#define cpu_outw cpu_outw_sparc64 -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_sparc64 -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_sparc64 -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_sparc64 -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_sparc64 -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_sparc64 -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc64 -#define cpu_physical_memory_map cpu_physical_memory_map_sparc64 -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_sparc64 -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_sparc64 -#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc64 -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_sparc64 -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_sparc64 -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc64 -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_sparc64 -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_sparc64 -#define cpu_register cpu_register_sparc64 -#define cpu_register_types cpu_register_types_sparc64 -#define cpu_restore_state cpu_restore_state_sparc64 -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_sparc64 -#define cpu_single_step cpu_single_step_sparc64 -#define cpu_tb_exec cpu_tb_exec_sparc64 -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_sparc64 -#define cpu_to_be64 cpu_to_be64_sparc64 -#define cpu_to_le32 cpu_to_le32_sparc64 -#define cpu_to_le64 cpu_to_le64_sparc64 -#define cpu_type_info cpu_type_info_sparc64 -#define cpu_unassigned_access cpu_unassigned_access_sparc64 -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc64 -#define cpu_watchpoint_insert cpu_watchpoint_insert_sparc64 -#define cpu_watchpoint_remove cpu_watchpoint_remove_sparc64 -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc64 -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc64 -#define crc32c_table crc32c_table_sparc64 -#define create_new_memory_mapping create_new_memory_mapping_sparc64 -#define csselr_write csselr_write_sparc64 -#define cto32 cto32_sparc64 -#define ctr_el0_access ctr_el0_access_sparc64 -#define ctz32 ctz32_sparc64 -#define ctz64 ctz64_sparc64 -#define dacr_write dacr_write_sparc64 -#define dbgbcr_write dbgbcr_write_sparc64 -#define dbgbvr_write dbgbvr_write_sparc64 -#define dbgwcr_write dbgwcr_write_sparc64 -#define dbgwvr_write dbgwvr_write_sparc64 -#define debug_cp_reginfo debug_cp_reginfo_sparc64 -#define debug_frame debug_frame_sparc64 -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_sparc64 -#define define_arm_cp_regs define_arm_cp_regs_sparc64 -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_sparc64 -#define define_debug_regs define_debug_regs_sparc64 -#define define_one_arm_cp_reg define_one_arm_cp_reg_sparc64 -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_sparc64 -#define deposit32 deposit32_sparc64 -#define deposit64 deposit64_sparc64 -#define deregister_tm_clones deregister_tm_clones_sparc64 -#define device_class_base_init device_class_base_init_sparc64 -#define device_class_init device_class_init_sparc64 -#define device_finalize device_finalize_sparc64 -#define device_get_realized device_get_realized_sparc64 -#define device_initfn device_initfn_sparc64 -#define device_post_init device_post_init_sparc64 -#define device_reset device_reset_sparc64 -#define device_set_realized device_set_realized_sparc64 -#define device_type_info device_type_info_sparc64 -#define disas_arm_insn disas_arm_insn_sparc64 -#define disas_coproc_insn disas_coproc_insn_sparc64 -#define disas_dsp_insn disas_dsp_insn_sparc64 -#define disas_iwmmxt_insn disas_iwmmxt_insn_sparc64 -#define disas_neon_data_insn disas_neon_data_insn_sparc64 -#define disas_neon_ls_insn disas_neon_ls_insn_sparc64 -#define disas_thumb2_insn disas_thumb2_insn_sparc64 -#define disas_thumb_insn disas_thumb_insn_sparc64 -#define disas_vfp_insn disas_vfp_insn_sparc64 -#define disas_vfp_v8_insn disas_vfp_v8_insn_sparc64 -#define do_arm_semihosting do_arm_semihosting_sparc64 -#define do_clz16 do_clz16_sparc64 -#define do_clz8 do_clz8_sparc64 -#define do_constant_folding do_constant_folding_sparc64 -#define do_constant_folding_2 do_constant_folding_2_sparc64 -#define do_constant_folding_cond do_constant_folding_cond_sparc64 -#define do_constant_folding_cond2 do_constant_folding_cond2_sparc64 -#define do_constant_folding_cond_32 do_constant_folding_cond_32_sparc64 -#define do_constant_folding_cond_64 do_constant_folding_cond_64_sparc64 -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_sparc64 -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_sparc64 -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_sparc64 -#define do_ssat do_ssat_sparc64 -#define do_usad do_usad_sparc64 -#define do_usat do_usat_sparc64 -#define do_v7m_exception_exit do_v7m_exception_exit_sparc64 -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_sparc64 -#define dummy_func dummy_func_sparc64 -#define dummy_section dummy_section_sparc64 -#define _DYNAMIC _DYNAMIC_sparc64 -#define _edata _edata_sparc64 -#define _end _end_sparc64 -#define end_list end_list_sparc64 -#define eq128 eq128_sparc64 -#define ErrorClass_lookup ErrorClass_lookup_sparc64 -#define error_copy error_copy_sparc64 -#define error_exit error_exit_sparc64 -#define error_get_class error_get_class_sparc64 -#define error_get_pretty error_get_pretty_sparc64 -#define error_setg_file_open error_setg_file_open_sparc64 -#define estimateDiv128To64 estimateDiv128To64_sparc64 -#define estimateSqrt32 estimateSqrt32_sparc64 -#define excnames excnames_sparc64 -#define excp_is_internal excp_is_internal_sparc64 -#define extended_addresses_enabled extended_addresses_enabled_sparc64 -#define extended_mpu_ap_bits extended_mpu_ap_bits_sparc64 -#define extract32 extract32_sparc64 -#define extract64 extract64_sparc64 -#define extractFloat128Exp extractFloat128Exp_sparc64 -#define extractFloat128Frac0 extractFloat128Frac0_sparc64 -#define extractFloat128Frac1 extractFloat128Frac1_sparc64 -#define extractFloat128Sign extractFloat128Sign_sparc64 -#define extractFloat16Exp extractFloat16Exp_sparc64 -#define extractFloat16Frac extractFloat16Frac_sparc64 -#define extractFloat16Sign extractFloat16Sign_sparc64 -#define extractFloat32Exp extractFloat32Exp_sparc64 -#define extractFloat32Frac extractFloat32Frac_sparc64 -#define extractFloat32Sign extractFloat32Sign_sparc64 -#define extractFloat64Exp extractFloat64Exp_sparc64 -#define extractFloat64Frac extractFloat64Frac_sparc64 -#define extractFloat64Sign extractFloat64Sign_sparc64 -#define extractFloatx80Exp extractFloatx80Exp_sparc64 -#define extractFloatx80Frac extractFloatx80Frac_sparc64 -#define extractFloatx80Sign extractFloatx80Sign_sparc64 -#define fcse_write fcse_write_sparc64 -#define find_better_copy find_better_copy_sparc64 -#define find_default_machine find_default_machine_sparc64 -#define find_desc_by_name find_desc_by_name_sparc64 -#define find_first_bit find_first_bit_sparc64 -#define find_paging_enabled_cpu find_paging_enabled_cpu_sparc64 -#define find_ram_block find_ram_block_sparc64 -#define find_ram_offset find_ram_offset_sparc64 -#define find_string find_string_sparc64 -#define find_type find_type_sparc64 -#define _fini _fini_sparc64 -#define flatrange_equal flatrange_equal_sparc64 -#define flatview_destroy flatview_destroy_sparc64 -#define flatview_init flatview_init_sparc64 -#define flatview_insert flatview_insert_sparc64 -#define flatview_lookup flatview_lookup_sparc64 -#define flatview_ref flatview_ref_sparc64 -#define flatview_simplify flatview_simplify_sparc64 #define flatview_unref flatview_unref_sparc64 -#define float128_add float128_add_sparc64 -#define float128_compare float128_compare_sparc64 -#define float128_compare_internal float128_compare_internal_sparc64 -#define float128_compare_quiet float128_compare_quiet_sparc64 -#define float128_default_nan float128_default_nan_sparc64 -#define float128_div float128_div_sparc64 -#define float128_eq float128_eq_sparc64 -#define float128_eq_quiet float128_eq_quiet_sparc64 -#define float128_is_quiet_nan float128_is_quiet_nan_sparc64 -#define float128_is_signaling_nan float128_is_signaling_nan_sparc64 -#define float128_le float128_le_sparc64 -#define float128_le_quiet float128_le_quiet_sparc64 -#define float128_lt float128_lt_sparc64 -#define float128_lt_quiet float128_lt_quiet_sparc64 -#define float128_maybe_silence_nan float128_maybe_silence_nan_sparc64 -#define float128_mul float128_mul_sparc64 -#define float128_rem float128_rem_sparc64 -#define float128_round_to_int float128_round_to_int_sparc64 -#define float128_scalbn float128_scalbn_sparc64 -#define float128_sqrt float128_sqrt_sparc64 -#define float128_sub float128_sub_sparc64 -#define float128ToCommonNaN float128ToCommonNaN_sparc64 -#define float128_to_float32 float128_to_float32_sparc64 -#define float128_to_float64 float128_to_float64_sparc64 -#define float128_to_floatx80 float128_to_floatx80_sparc64 -#define float128_to_int32 float128_to_int32_sparc64 -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc64 -#define float128_to_int64 float128_to_int64_sparc64 -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc64 -#define float128_unordered float128_unordered_sparc64 -#define float128_unordered_quiet float128_unordered_quiet_sparc64 -#define float16_default_nan float16_default_nan_sparc64 +#define address_space_get_flatview address_space_get_flatview_sparc64 +#define memory_region_transaction_begin memory_region_transaction_begin_sparc64 +#define memory_region_transaction_commit memory_region_transaction_commit_sparc64 +#define memory_region_init memory_region_init_sparc64 +#define memory_region_access_valid memory_region_access_valid_sparc64 +#define memory_region_dispatch_read memory_region_dispatch_read_sparc64 +#define memory_region_dispatch_write memory_region_dispatch_write_sparc64 +#define memory_region_init_io memory_region_init_io_sparc64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc64 +#define memory_region_size memory_region_size_sparc64 +#define memory_region_set_readonly memory_region_set_readonly_sparc64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc64 +#define memory_region_from_host memory_region_from_host_sparc64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_sparc64 +#define memory_region_add_subregion memory_region_add_subregion_sparc64 +#define memory_region_del_subregion memory_region_del_subregion_sparc64 +#define memory_region_find memory_region_find_sparc64 +#define memory_listener_register memory_listener_register_sparc64 +#define memory_listener_unregister memory_listener_unregister_sparc64 +#define address_space_remove_listeners address_space_remove_listeners_sparc64 +#define address_space_init address_space_init_sparc64 +#define address_space_destroy address_space_destroy_sparc64 +#define memory_region_init_ram memory_region_init_ram_sparc64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc64 +#define exec_inline_op exec_inline_op_sparc64 +#define floatx80_default_nan floatx80_default_nan_sparc64 +#define float_raise float_raise_sparc64 #define float16_is_quiet_nan float16_is_quiet_nan_sparc64 #define float16_is_signaling_nan float16_is_signaling_nan_sparc64 -#define float16_maybe_silence_nan float16_maybe_silence_nan_sparc64 -#define float16ToCommonNaN float16ToCommonNaN_sparc64 -#define float16_to_float32 float16_to_float32_sparc64 -#define float16_to_float64 float16_to_float64_sparc64 -#define float32_abs float32_abs_sparc64 -#define float32_add float32_add_sparc64 -#define float32_chs float32_chs_sparc64 -#define float32_compare float32_compare_sparc64 -#define float32_compare_internal float32_compare_internal_sparc64 -#define float32_compare_quiet float32_compare_quiet_sparc64 -#define float32_default_nan float32_default_nan_sparc64 -#define float32_div float32_div_sparc64 -#define float32_eq float32_eq_sparc64 -#define float32_eq_quiet float32_eq_quiet_sparc64 -#define float32_exp2 float32_exp2_sparc64 -#define float32_exp2_coefficients float32_exp2_coefficients_sparc64 -#define float32_is_any_nan float32_is_any_nan_sparc64 -#define float32_is_infinity float32_is_infinity_sparc64 -#define float32_is_neg float32_is_neg_sparc64 #define float32_is_quiet_nan float32_is_quiet_nan_sparc64 #define float32_is_signaling_nan float32_is_signaling_nan_sparc64 -#define float32_is_zero float32_is_zero_sparc64 -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_sparc64 -#define float32_le float32_le_sparc64 -#define float32_le_quiet float32_le_quiet_sparc64 -#define float32_log2 float32_log2_sparc64 -#define float32_lt float32_lt_sparc64 -#define float32_lt_quiet float32_lt_quiet_sparc64 +#define float64_is_quiet_nan float64_is_quiet_nan_sparc64 +#define float64_is_signaling_nan float64_is_signaling_nan_sparc64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc64 +#define floatx80_silence_nan floatx80_silence_nan_sparc64 +#define propagateFloatx80NaN propagateFloatx80NaN_sparc64 +#define float128_is_quiet_nan float128_is_quiet_nan_sparc64 +#define float128_is_signaling_nan float128_is_signaling_nan_sparc64 +#define float128_silence_nan float128_silence_nan_sparc64 +#define float16_add float16_add_sparc64 +#define float16_sub float16_sub_sparc64 +#define float32_add float32_add_sparc64 +#define float32_sub float32_sub_sparc64 +#define float64_add float64_add_sparc64 +#define float64_sub float64_sub_sparc64 +#define float16_mul float16_mul_sparc64 +#define float32_mul float32_mul_sparc64 +#define float64_mul float64_mul_sparc64 +#define float16_muladd float16_muladd_sparc64 +#define float32_muladd float32_muladd_sparc64 +#define float64_muladd float64_muladd_sparc64 +#define float16_div float16_div_sparc64 +#define float32_div float32_div_sparc64 +#define float64_div float64_div_sparc64 +#define float16_to_float32 float16_to_float32_sparc64 +#define float16_to_float64 float16_to_float64_sparc64 +#define float32_to_float16 float32_to_float16_sparc64 +#define float32_to_float64 float32_to_float64_sparc64 +#define float64_to_float16 float64_to_float16_sparc64 +#define float64_to_float32 float64_to_float32_sparc64 +#define float16_round_to_int float16_round_to_int_sparc64 +#define float32_round_to_int float32_round_to_int_sparc64 +#define float64_round_to_int float64_round_to_int_sparc64 +#define float16_to_int16_scalbn float16_to_int16_scalbn_sparc64 +#define float16_to_int32_scalbn float16_to_int32_scalbn_sparc64 +#define float16_to_int64_scalbn float16_to_int64_scalbn_sparc64 +#define float32_to_int16_scalbn float32_to_int16_scalbn_sparc64 +#define float32_to_int32_scalbn float32_to_int32_scalbn_sparc64 +#define float32_to_int64_scalbn float32_to_int64_scalbn_sparc64 +#define float64_to_int16_scalbn float64_to_int16_scalbn_sparc64 +#define float64_to_int32_scalbn float64_to_int32_scalbn_sparc64 +#define float64_to_int64_scalbn float64_to_int64_scalbn_sparc64 +#define float16_to_int16 float16_to_int16_sparc64 +#define float16_to_int32 float16_to_int32_sparc64 +#define float16_to_int64 float16_to_int64_sparc64 +#define float32_to_int16 float32_to_int16_sparc64 +#define float32_to_int32 float32_to_int32_sparc64 +#define float32_to_int64 float32_to_int64_sparc64 +#define float64_to_int16 float64_to_int16_sparc64 +#define float64_to_int32 float64_to_int32_sparc64 +#define float64_to_int64 float64_to_int64_sparc64 +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_sparc64 +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_sparc64 +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_sparc64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc64 +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_sparc64 +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_sparc64 +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_sparc64 +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_sparc64 +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_sparc64 +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_sparc64 +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_sparc64 +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_sparc64 +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_sparc64 +#define float16_to_uint16 float16_to_uint16_sparc64 +#define float16_to_uint32 float16_to_uint32_sparc64 +#define float16_to_uint64 float16_to_uint64_sparc64 +#define float32_to_uint16 float32_to_uint16_sparc64 +#define float32_to_uint32 float32_to_uint32_sparc64 +#define float32_to_uint64 float32_to_uint64_sparc64 +#define float64_to_uint16 float64_to_uint16_sparc64 +#define float64_to_uint32 float64_to_uint32_sparc64 +#define float64_to_uint64 float64_to_uint64_sparc64 +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_sparc64 +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_sparc64 +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_sparc64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc64 +#define int64_to_float16_scalbn int64_to_float16_scalbn_sparc64 +#define int32_to_float16_scalbn int32_to_float16_scalbn_sparc64 +#define int16_to_float16_scalbn int16_to_float16_scalbn_sparc64 +#define int64_to_float16 int64_to_float16_sparc64 +#define int32_to_float16 int32_to_float16_sparc64 +#define int16_to_float16 int16_to_float16_sparc64 +#define int64_to_float32_scalbn int64_to_float32_scalbn_sparc64 +#define int32_to_float32_scalbn int32_to_float32_scalbn_sparc64 +#define int16_to_float32_scalbn int16_to_float32_scalbn_sparc64 +#define int64_to_float32 int64_to_float32_sparc64 +#define int32_to_float32 int32_to_float32_sparc64 +#define int16_to_float32 int16_to_float32_sparc64 +#define int64_to_float64_scalbn int64_to_float64_scalbn_sparc64 +#define int32_to_float64_scalbn int32_to_float64_scalbn_sparc64 +#define int16_to_float64_scalbn int16_to_float64_scalbn_sparc64 +#define int64_to_float64 int64_to_float64_sparc64 +#define int32_to_float64 int32_to_float64_sparc64 +#define int16_to_float64 int16_to_float64_sparc64 +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_sparc64 +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_sparc64 +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_sparc64 +#define uint64_to_float16 uint64_to_float16_sparc64 +#define uint32_to_float16 uint32_to_float16_sparc64 +#define uint16_to_float16 uint16_to_float16_sparc64 +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_sparc64 +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_sparc64 +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_sparc64 +#define uint64_to_float32 uint64_to_float32_sparc64 +#define uint32_to_float32 uint32_to_float32_sparc64 +#define uint16_to_float32 uint16_to_float32_sparc64 +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_sparc64 +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_sparc64 +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_sparc64 +#define uint64_to_float64 uint64_to_float64_sparc64 +#define uint32_to_float64 uint32_to_float64_sparc64 +#define uint16_to_float64 uint16_to_float64_sparc64 +#define float16_min float16_min_sparc64 +#define float16_minnum float16_minnum_sparc64 +#define float16_minnummag float16_minnummag_sparc64 +#define float16_max float16_max_sparc64 +#define float16_maxnum float16_maxnum_sparc64 +#define float16_maxnummag float16_maxnummag_sparc64 +#define float32_min float32_min_sparc64 +#define float32_minnum float32_minnum_sparc64 +#define float32_minnummag float32_minnummag_sparc64 #define float32_max float32_max_sparc64 #define float32_maxnum float32_maxnum_sparc64 #define float32_maxnummag float32_maxnummag_sparc64 -#define float32_maybe_silence_nan float32_maybe_silence_nan_sparc64 -#define float32_min float32_min_sparc64 -#define float32_minmax float32_minmax_sparc64 -#define float32_minnum float32_minnum_sparc64 -#define float32_minnummag float32_minnummag_sparc64 -#define float32_mul float32_mul_sparc64 -#define float32_muladd float32_muladd_sparc64 -#define float32_rem float32_rem_sparc64 -#define float32_round_to_int float32_round_to_int_sparc64 -#define float32_scalbn float32_scalbn_sparc64 -#define float32_set_sign float32_set_sign_sparc64 -#define float32_sqrt float32_sqrt_sparc64 -#define float32_squash_input_denormal float32_squash_input_denormal_sparc64 -#define float32_sub float32_sub_sparc64 -#define float32ToCommonNaN float32ToCommonNaN_sparc64 -#define float32_to_float128 float32_to_float128_sparc64 -#define float32_to_float16 float32_to_float16_sparc64 -#define float32_to_float64 float32_to_float64_sparc64 -#define float32_to_floatx80 float32_to_floatx80_sparc64 -#define float32_to_int16 float32_to_int16_sparc64 -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc64 -#define float32_to_int32 float32_to_int32_sparc64 -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc64 -#define float32_to_int64 float32_to_int64_sparc64 -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc64 -#define float32_to_uint16 float32_to_uint16_sparc64 -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc64 -#define float32_to_uint32 float32_to_uint32_sparc64 -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc64 -#define float32_to_uint64 float32_to_uint64_sparc64 -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc64 -#define float32_unordered float32_unordered_sparc64 -#define float32_unordered_quiet float32_unordered_quiet_sparc64 -#define float64_abs float64_abs_sparc64 -#define float64_add float64_add_sparc64 -#define float64_chs float64_chs_sparc64 -#define float64_compare float64_compare_sparc64 -#define float64_compare_internal float64_compare_internal_sparc64 -#define float64_compare_quiet float64_compare_quiet_sparc64 -#define float64_default_nan float64_default_nan_sparc64 -#define float64_div float64_div_sparc64 -#define float64_eq float64_eq_sparc64 -#define float64_eq_quiet float64_eq_quiet_sparc64 -#define float64_is_any_nan float64_is_any_nan_sparc64 -#define float64_is_infinity float64_is_infinity_sparc64 -#define float64_is_neg float64_is_neg_sparc64 -#define float64_is_quiet_nan float64_is_quiet_nan_sparc64 -#define float64_is_signaling_nan float64_is_signaling_nan_sparc64 -#define float64_is_zero float64_is_zero_sparc64 -#define float64_le float64_le_sparc64 -#define float64_le_quiet float64_le_quiet_sparc64 -#define float64_log2 float64_log2_sparc64 -#define float64_lt float64_lt_sparc64 -#define float64_lt_quiet float64_lt_quiet_sparc64 +#define float64_min float64_min_sparc64 +#define float64_minnum float64_minnum_sparc64 +#define float64_minnummag float64_minnummag_sparc64 #define float64_max float64_max_sparc64 #define float64_maxnum float64_maxnum_sparc64 #define float64_maxnummag float64_maxnummag_sparc64 -#define float64_maybe_silence_nan float64_maybe_silence_nan_sparc64 -#define float64_min float64_min_sparc64 -#define float64_minmax float64_minmax_sparc64 -#define float64_minnum float64_minnum_sparc64 -#define float64_minnummag float64_minnummag_sparc64 -#define float64_mul float64_mul_sparc64 -#define float64_muladd float64_muladd_sparc64 -#define float64_rem float64_rem_sparc64 -#define float64_round_to_int float64_round_to_int_sparc64 +#define float16_compare float16_compare_sparc64 +#define float16_compare_quiet float16_compare_quiet_sparc64 +#define float32_compare float32_compare_sparc64 +#define float32_compare_quiet float32_compare_quiet_sparc64 +#define float64_compare float64_compare_sparc64 +#define float64_compare_quiet float64_compare_quiet_sparc64 +#define float16_scalbn float16_scalbn_sparc64 +#define float32_scalbn float32_scalbn_sparc64 #define float64_scalbn float64_scalbn_sparc64 -#define float64_set_sign float64_set_sign_sparc64 +#define float16_sqrt float16_sqrt_sparc64 +#define float32_sqrt float32_sqrt_sparc64 #define float64_sqrt float64_sqrt_sparc64 +#define float16_default_nan float16_default_nan_sparc64 +#define float32_default_nan float32_default_nan_sparc64 +#define float64_default_nan float64_default_nan_sparc64 +#define float128_default_nan float128_default_nan_sparc64 +#define float16_silence_nan float16_silence_nan_sparc64 +#define float32_silence_nan float32_silence_nan_sparc64 +#define float64_silence_nan float64_silence_nan_sparc64 +#define float16_squash_input_denormal float16_squash_input_denormal_sparc64 +#define float32_squash_input_denormal float32_squash_input_denormal_sparc64 #define float64_squash_input_denormal float64_squash_input_denormal_sparc64 -#define float64_sub float64_sub_sparc64 -#define float64ToCommonNaN float64ToCommonNaN_sparc64 -#define float64_to_float128 float64_to_float128_sparc64 -#define float64_to_float16 float64_to_float16_sparc64 -#define float64_to_float32 float64_to_float32_sparc64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc64 +#define roundAndPackFloatx80 roundAndPackFloatx80_sparc64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc64 +#define int32_to_floatx80 int32_to_floatx80_sparc64 +#define int32_to_float128 int32_to_float128_sparc64 +#define int64_to_floatx80 int64_to_floatx80_sparc64 +#define int64_to_float128 int64_to_float128_sparc64 +#define uint64_to_float128 uint64_to_float128_sparc64 +#define float32_to_floatx80 float32_to_floatx80_sparc64 +#define float32_to_float128 float32_to_float128_sparc64 +#define float32_rem float32_rem_sparc64 +#define float32_exp2 float32_exp2_sparc64 +#define float32_log2 float32_log2_sparc64 +#define float32_eq float32_eq_sparc64 +#define float32_le float32_le_sparc64 +#define float32_lt float32_lt_sparc64 +#define float32_unordered float32_unordered_sparc64 +#define float32_eq_quiet float32_eq_quiet_sparc64 +#define float32_le_quiet float32_le_quiet_sparc64 +#define float32_lt_quiet float32_lt_quiet_sparc64 +#define float32_unordered_quiet float32_unordered_quiet_sparc64 #define float64_to_floatx80 float64_to_floatx80_sparc64 -#define float64_to_int16 float64_to_int16_sparc64 -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc64 -#define float64_to_int32 float64_to_int32_sparc64 -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc64 -#define float64_to_int64 float64_to_int64_sparc64 -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc64 -#define float64_to_uint16 float64_to_uint16_sparc64 -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc64 -#define float64_to_uint32 float64_to_uint32_sparc64 -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc64 -#define float64_to_uint64 float64_to_uint64_sparc64 -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc64 -#define float64_trunc_to_int float64_trunc_to_int_sparc64 +#define float64_to_float128 float64_to_float128_sparc64 +#define float64_rem float64_rem_sparc64 +#define float64_log2 float64_log2_sparc64 +#define float64_eq float64_eq_sparc64 +#define float64_le float64_le_sparc64 +#define float64_lt float64_lt_sparc64 #define float64_unordered float64_unordered_sparc64 +#define float64_eq_quiet float64_eq_quiet_sparc64 +#define float64_le_quiet float64_le_quiet_sparc64 +#define float64_lt_quiet float64_lt_quiet_sparc64 #define float64_unordered_quiet float64_unordered_quiet_sparc64 -#define float_raise float_raise_sparc64 -#define floatx80_add floatx80_add_sparc64 -#define floatx80_compare floatx80_compare_sparc64 -#define floatx80_compare_internal floatx80_compare_internal_sparc64 -#define floatx80_compare_quiet floatx80_compare_quiet_sparc64 -#define floatx80_default_nan floatx80_default_nan_sparc64 -#define floatx80_div floatx80_div_sparc64 -#define floatx80_eq floatx80_eq_sparc64 -#define floatx80_eq_quiet floatx80_eq_quiet_sparc64 -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc64 -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc64 -#define floatx80_le floatx80_le_sparc64 -#define floatx80_le_quiet floatx80_le_quiet_sparc64 -#define floatx80_lt floatx80_lt_sparc64 -#define floatx80_lt_quiet floatx80_lt_quiet_sparc64 -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_sparc64 -#define floatx80_mul floatx80_mul_sparc64 -#define floatx80_rem floatx80_rem_sparc64 -#define floatx80_round_to_int floatx80_round_to_int_sparc64 -#define floatx80_scalbn floatx80_scalbn_sparc64 -#define floatx80_sqrt floatx80_sqrt_sparc64 -#define floatx80_sub floatx80_sub_sparc64 -#define floatx80ToCommonNaN floatx80ToCommonNaN_sparc64 -#define floatx80_to_float128 floatx80_to_float128_sparc64 -#define floatx80_to_float32 floatx80_to_float32_sparc64 -#define floatx80_to_float64 floatx80_to_float64_sparc64 #define floatx80_to_int32 floatx80_to_int32_sparc64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_sparc64 #define floatx80_to_int64 floatx80_to_int64_sparc64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_sparc64 +#define floatx80_to_float32 floatx80_to_float32_sparc64 +#define floatx80_to_float64 floatx80_to_float64_sparc64 +#define floatx80_to_float128 floatx80_to_float128_sparc64 +#define floatx80_round floatx80_round_sparc64 +#define floatx80_round_to_int floatx80_round_to_int_sparc64 +#define floatx80_add floatx80_add_sparc64 +#define floatx80_sub floatx80_sub_sparc64 +#define floatx80_mul floatx80_mul_sparc64 +#define floatx80_div floatx80_div_sparc64 +#define floatx80_rem floatx80_rem_sparc64 +#define floatx80_sqrt floatx80_sqrt_sparc64 +#define floatx80_eq floatx80_eq_sparc64 +#define floatx80_le floatx80_le_sparc64 +#define floatx80_lt floatx80_lt_sparc64 #define floatx80_unordered floatx80_unordered_sparc64 +#define floatx80_eq_quiet floatx80_eq_quiet_sparc64 +#define floatx80_le_quiet floatx80_le_quiet_sparc64 +#define floatx80_lt_quiet floatx80_lt_quiet_sparc64 #define floatx80_unordered_quiet floatx80_unordered_quiet_sparc64 -#define flush_icache_range flush_icache_range_sparc64 -#define format_string format_string_sparc64 -#define fp_decode_rm fp_decode_rm_sparc64 -#define frame_dummy frame_dummy_sparc64 -#define free_range free_range_sparc64 -#define fstat64 fstat64_sparc64 -#define futex_wait futex_wait_sparc64 -#define futex_wake futex_wake_sparc64 -#define gen_aa32_ld16s gen_aa32_ld16s_sparc64 -#define gen_aa32_ld16u gen_aa32_ld16u_sparc64 -#define gen_aa32_ld32u gen_aa32_ld32u_sparc64 -#define gen_aa32_ld64 gen_aa32_ld64_sparc64 -#define gen_aa32_ld8s gen_aa32_ld8s_sparc64 -#define gen_aa32_ld8u gen_aa32_ld8u_sparc64 -#define gen_aa32_st16 gen_aa32_st16_sparc64 -#define gen_aa32_st32 gen_aa32_st32_sparc64 -#define gen_aa32_st64 gen_aa32_st64_sparc64 -#define gen_aa32_st8 gen_aa32_st8_sparc64 -#define gen_adc gen_adc_sparc64 -#define gen_adc_CC gen_adc_CC_sparc64 -#define gen_add16 gen_add16_sparc64 -#define gen_add_carry gen_add_carry_sparc64 -#define gen_add_CC gen_add_CC_sparc64 -#define gen_add_datah_offset gen_add_datah_offset_sparc64 -#define gen_add_data_offset gen_add_data_offset_sparc64 -#define gen_addq gen_addq_sparc64 -#define gen_addq_lo gen_addq_lo_sparc64 -#define gen_addq_msw gen_addq_msw_sparc64 -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_sparc64 -#define gen_arm_shift_im gen_arm_shift_im_sparc64 -#define gen_arm_shift_reg gen_arm_shift_reg_sparc64 -#define gen_bx gen_bx_sparc64 -#define gen_bx_im gen_bx_im_sparc64 -#define gen_clrex gen_clrex_sparc64 -#define generate_memory_topology generate_memory_topology_sparc64 -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_sparc64 -#define gen_exception gen_exception_sparc64 -#define gen_exception_insn gen_exception_insn_sparc64 -#define gen_exception_internal gen_exception_internal_sparc64 -#define gen_exception_internal_insn gen_exception_internal_insn_sparc64 -#define gen_exception_return gen_exception_return_sparc64 -#define gen_goto_tb gen_goto_tb_sparc64 -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_sparc64 -#define gen_helper_add_saturate gen_helper_add_saturate_sparc64 -#define gen_helper_add_setq gen_helper_add_setq_sparc64 -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_sparc64 -#define gen_helper_clz32 gen_helper_clz32_sparc64 -#define gen_helper_clz64 gen_helper_clz64_sparc64 -#define gen_helper_clz_arm gen_helper_clz_arm_sparc64 -#define gen_helper_cpsr_read gen_helper_cpsr_read_sparc64 -#define gen_helper_cpsr_write gen_helper_cpsr_write_sparc64 -#define gen_helper_crc32_arm gen_helper_crc32_arm_sparc64 -#define gen_helper_crc32c gen_helper_crc32c_sparc64 -#define gen_helper_crypto_aese gen_helper_crypto_aese_sparc64 -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_sparc64 -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_sparc64 -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_sparc64 -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_sparc64 -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_sparc64 -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_sparc64 -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_sparc64 -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_sparc64 -#define gen_helper_double_saturate gen_helper_double_saturate_sparc64 -#define gen_helper_exception_internal gen_helper_exception_internal_sparc64 -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_sparc64 -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_sparc64 -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_sparc64 -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_sparc64 -#define gen_helper_get_user_reg gen_helper_get_user_reg_sparc64 -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_sparc64 -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_sparc64 -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_sparc64 -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_sparc64 -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_sparc64 -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_sparc64 -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_sparc64 -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_sparc64 -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_sparc64 -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_sparc64 -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_sparc64 -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_sparc64 -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_sparc64 -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_sparc64 -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_sparc64 -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_sparc64 -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_sparc64 -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_sparc64 -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_sparc64 -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_sparc64 -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_sparc64 -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_sparc64 -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_sparc64 -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_sparc64 -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_sparc64 -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_sparc64 -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_sparc64 -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_sparc64 -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_sparc64 -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_sparc64 -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_sparc64 -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_sparc64 -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_sparc64 -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_sparc64 -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_sparc64 -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_sparc64 -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_sparc64 -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_sparc64 -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_sparc64 -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_sparc64 -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_sparc64 -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_sparc64 -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_sparc64 -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_sparc64 -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_sparc64 -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_sparc64 -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_sparc64 -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_sparc64 -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_sparc64 -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_sparc64 -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_sparc64 -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_sparc64 -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_sparc64 -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_sparc64 -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_sparc64 -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_sparc64 -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_sparc64 -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_sparc64 -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_sparc64 -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_sparc64 -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_sparc64 -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_sparc64 -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_sparc64 -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_sparc64 -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_sparc64 -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_sparc64 -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_sparc64 -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_sparc64 -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_sparc64 -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_sparc64 -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_sparc64 -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_sparc64 -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_sparc64 -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_sparc64 -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_sparc64 -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_sparc64 -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_sparc64 -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_sparc64 -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_sparc64 -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_sparc64 -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_sparc64 -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_sparc64 -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_sparc64 -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_sparc64 -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_sparc64 -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_sparc64 -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_sparc64 -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_sparc64 -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_sparc64 -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_sparc64 -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_sparc64 -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_sparc64 -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_sparc64 -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_sparc64 -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_sparc64 -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_sparc64 -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_sparc64 -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_sparc64 -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_sparc64 -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_sparc64 -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_sparc64 -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_sparc64 -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_sparc64 -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_sparc64 -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_sparc64 -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_sparc64 -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_sparc64 -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_sparc64 -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_sparc64 -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_sparc64 -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_sparc64 -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_sparc64 -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_sparc64 -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_sparc64 -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_sparc64 -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_sparc64 -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_sparc64 -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_sparc64 -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_sparc64 -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_sparc64 -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_sparc64 -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_sparc64 -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_sparc64 -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_sparc64 -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_sparc64 -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_sparc64 -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_sparc64 -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_sparc64 -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_sparc64 -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_sparc64 -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_sparc64 -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_sparc64 -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_sparc64 -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_sparc64 -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_sparc64 -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_sparc64 -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_sparc64 -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_sparc64 -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_sparc64 -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_sparc64 -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_sparc64 -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_sparc64 -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_sparc64 -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_sparc64 -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_sparc64 -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_sparc64 -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_sparc64 -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_sparc64 -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_sparc64 -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_sparc64 -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_sparc64 -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_sparc64 -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_sparc64 -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_sparc64 -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_sparc64 -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_sparc64 -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_sparc64 -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_sparc64 -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_sparc64 -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_sparc64 -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_sparc64 -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_sparc64 -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_sparc64 -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_sparc64 -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_sparc64 -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_sparc64 -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_sparc64 -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_sparc64 -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_sparc64 -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_sparc64 -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_sparc64 -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_sparc64 -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_sparc64 -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_sparc64 -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_sparc64 -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_sparc64 -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_sparc64 -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_sparc64 -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_sparc64 -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_sparc64 -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_sparc64 -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_sparc64 -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_sparc64 -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_sparc64 -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_sparc64 -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_sparc64 -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_sparc64 -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_sparc64 -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_sparc64 -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_sparc64 -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_sparc64 -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_sparc64 -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_sparc64 -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_sparc64 -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_sparc64 -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_sparc64 -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_sparc64 -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_sparc64 -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_sparc64 -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_sparc64 -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_sparc64 -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_sparc64 -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_sparc64 -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_sparc64 -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_sparc64 -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_sparc64 -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_sparc64 -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_sparc64 -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_sparc64 -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_sparc64 -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_sparc64 -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_sparc64 -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_sparc64 -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_sparc64 -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_sparc64 -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_sparc64 -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_sparc64 -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_sparc64 -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_sparc64 -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_sparc64 -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_sparc64 -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_sparc64 -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_sparc64 -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_sparc64 -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_sparc64 -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_sparc64 -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_sparc64 -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_sparc64 -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_sparc64 -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_sparc64 -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_sparc64 -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_sparc64 -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_sparc64 -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_sparc64 -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_sparc64 -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_sparc64 -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_sparc64 -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_sparc64 -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_sparc64 -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_sparc64 -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_sparc64 -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_sparc64 -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_sparc64 -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_sparc64 -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_sparc64 -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_sparc64 -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_sparc64 -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_sparc64 -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_sparc64 -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_sparc64 -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_sparc64 -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_sparc64 -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_sparc64 -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_sparc64 -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_sparc64 -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_sparc64 -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_sparc64 -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_sparc64 -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_sparc64 -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_sparc64 -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_sparc64 -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_sparc64 -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_sparc64 -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_sparc64 -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_sparc64 -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_sparc64 -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_sparc64 -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_sparc64 -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_sparc64 -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_sparc64 -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_sparc64 -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_sparc64 -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_sparc64 -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_sparc64 -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_sparc64 -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_sparc64 -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_sparc64 -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_sparc64 -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_sparc64 -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_sparc64 -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_sparc64 -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_sparc64 -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_sparc64 -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_sparc64 -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_sparc64 -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_sparc64 -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_sparc64 -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_sparc64 -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_sparc64 -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_sparc64 -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_sparc64 -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_sparc64 -#define gen_helper_neon_tbl gen_helper_neon_tbl_sparc64 -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_sparc64 -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_sparc64 -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_sparc64 -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_sparc64 -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_sparc64 -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_sparc64 -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_sparc64 -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_sparc64 -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_sparc64 -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_sparc64 -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_sparc64 -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_sparc64 -#define gen_helper_neon_zip16 gen_helper_neon_zip16_sparc64 -#define gen_helper_neon_zip8 gen_helper_neon_zip8_sparc64 -#define gen_helper_pre_hvc gen_helper_pre_hvc_sparc64 -#define gen_helper_pre_smc gen_helper_pre_smc_sparc64 -#define gen_helper_qadd16 gen_helper_qadd16_sparc64 -#define gen_helper_qadd8 gen_helper_qadd8_sparc64 -#define gen_helper_qaddsubx gen_helper_qaddsubx_sparc64 -#define gen_helper_qsub16 gen_helper_qsub16_sparc64 -#define gen_helper_qsub8 gen_helper_qsub8_sparc64 -#define gen_helper_qsubaddx gen_helper_qsubaddx_sparc64 -#define gen_helper_rbit gen_helper_rbit_sparc64 -#define gen_helper_recpe_f32 gen_helper_recpe_f32_sparc64 -#define gen_helper_recpe_u32 gen_helper_recpe_u32_sparc64 -#define gen_helper_recps_f32 gen_helper_recps_f32_sparc64 -#define gen_helper_rintd gen_helper_rintd_sparc64 -#define gen_helper_rintd_exact gen_helper_rintd_exact_sparc64 -#define gen_helper_rints gen_helper_rints_sparc64 -#define gen_helper_rints_exact gen_helper_rints_exact_sparc64 -#define gen_helper_ror_cc gen_helper_ror_cc_sparc64 -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_sparc64 -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_sparc64 -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_sparc64 -#define gen_helper_sadd16 gen_helper_sadd16_sparc64 -#define gen_helper_sadd8 gen_helper_sadd8_sparc64 -#define gen_helper_saddsubx gen_helper_saddsubx_sparc64 -#define gen_helper_sar_cc gen_helper_sar_cc_sparc64 -#define gen_helper_sdiv gen_helper_sdiv_sparc64 -#define gen_helper_sel_flags gen_helper_sel_flags_sparc64 -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_sparc64 -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_sparc64 -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_sparc64 -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_sparc64 -#define gen_helper_set_rmode gen_helper_set_rmode_sparc64 -#define gen_helper_set_user_reg gen_helper_set_user_reg_sparc64 -#define gen_helper_shadd16 gen_helper_shadd16_sparc64 -#define gen_helper_shadd8 gen_helper_shadd8_sparc64 -#define gen_helper_shaddsubx gen_helper_shaddsubx_sparc64 -#define gen_helper_shl_cc gen_helper_shl_cc_sparc64 -#define gen_helper_shr_cc gen_helper_shr_cc_sparc64 -#define gen_helper_shsub16 gen_helper_shsub16_sparc64 -#define gen_helper_shsub8 gen_helper_shsub8_sparc64 -#define gen_helper_shsubaddx gen_helper_shsubaddx_sparc64 -#define gen_helper_ssat gen_helper_ssat_sparc64 -#define gen_helper_ssat16 gen_helper_ssat16_sparc64 -#define gen_helper_ssub16 gen_helper_ssub16_sparc64 -#define gen_helper_ssub8 gen_helper_ssub8_sparc64 -#define gen_helper_ssubaddx gen_helper_ssubaddx_sparc64 -#define gen_helper_sub_saturate gen_helper_sub_saturate_sparc64 -#define gen_helper_sxtb16 gen_helper_sxtb16_sparc64 -#define gen_helper_uadd16 gen_helper_uadd16_sparc64 -#define gen_helper_uadd8 gen_helper_uadd8_sparc64 -#define gen_helper_uaddsubx gen_helper_uaddsubx_sparc64 -#define gen_helper_udiv gen_helper_udiv_sparc64 -#define gen_helper_uhadd16 gen_helper_uhadd16_sparc64 -#define gen_helper_uhadd8 gen_helper_uhadd8_sparc64 -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_sparc64 -#define gen_helper_uhsub16 gen_helper_uhsub16_sparc64 -#define gen_helper_uhsub8 gen_helper_uhsub8_sparc64 -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_sparc64 -#define gen_helper_uqadd16 gen_helper_uqadd16_sparc64 -#define gen_helper_uqadd8 gen_helper_uqadd8_sparc64 -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_sparc64 -#define gen_helper_uqsub16 gen_helper_uqsub16_sparc64 -#define gen_helper_uqsub8 gen_helper_uqsub8_sparc64 -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_sparc64 -#define gen_helper_usad8 gen_helper_usad8_sparc64 -#define gen_helper_usat gen_helper_usat_sparc64 -#define gen_helper_usat16 gen_helper_usat16_sparc64 -#define gen_helper_usub16 gen_helper_usub16_sparc64 -#define gen_helper_usub8 gen_helper_usub8_sparc64 -#define gen_helper_usubaddx gen_helper_usubaddx_sparc64 -#define gen_helper_uxtb16 gen_helper_uxtb16_sparc64 -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_sparc64 -#define gen_helper_v7m_msr gen_helper_v7m_msr_sparc64 -#define gen_helper_vfp_absd gen_helper_vfp_absd_sparc64 -#define gen_helper_vfp_abss gen_helper_vfp_abss_sparc64 -#define gen_helper_vfp_addd gen_helper_vfp_addd_sparc64 -#define gen_helper_vfp_adds gen_helper_vfp_adds_sparc64 -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_sparc64 -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_sparc64 -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_sparc64 -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_sparc64 -#define gen_helper_vfp_divd gen_helper_vfp_divd_sparc64 -#define gen_helper_vfp_divs gen_helper_vfp_divs_sparc64 -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_sparc64 -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_sparc64 -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_sparc64 -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_sparc64 -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_sparc64 -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_sparc64 -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc64 -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_sparc64 -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_sparc64 -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_sparc64 -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_sparc64 -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_sparc64 -#define gen_helper_vfp_mins gen_helper_vfp_mins_sparc64 -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_sparc64 -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_sparc64 -#define gen_helper_vfp_muld gen_helper_vfp_muld_sparc64 -#define gen_helper_vfp_muls gen_helper_vfp_muls_sparc64 -#define gen_helper_vfp_negd gen_helper_vfp_negd_sparc64 -#define gen_helper_vfp_negs gen_helper_vfp_negs_sparc64 -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc64 -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_sparc64 -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_sparc64 -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_sparc64 -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_sparc64 -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_sparc64 -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_sparc64 -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_sparc64 -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_sparc64 -#define gen_helper_vfp_subd gen_helper_vfp_subd_sparc64 -#define gen_helper_vfp_subs gen_helper_vfp_subs_sparc64 -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_sparc64 -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_sparc64 -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_sparc64 -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_sparc64 -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_sparc64 -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_sparc64 -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_sparc64 -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_sparc64 -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_sparc64 -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_sparc64 -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_sparc64 -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_sparc64 -#define gen_helper_vfp_touid gen_helper_vfp_touid_sparc64 -#define gen_helper_vfp_touis gen_helper_vfp_touis_sparc64 -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_sparc64 -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_sparc64 -#define gen_helper_vfp_tould gen_helper_vfp_tould_sparc64 -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_sparc64 -#define gen_helper_vfp_touls gen_helper_vfp_touls_sparc64 -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_sparc64 -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_sparc64 -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_sparc64 -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_sparc64 -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_sparc64 -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_sparc64 -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_sparc64 -#define gen_helper_wfe gen_helper_wfe_sparc64 -#define gen_helper_wfi gen_helper_wfi_sparc64 -#define gen_hvc gen_hvc_sparc64 -#define gen_intermediate_code_internal gen_intermediate_code_internal_sparc64 -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_sparc64 -#define gen_iwmmxt_address gen_iwmmxt_address_sparc64 -#define gen_iwmmxt_shift gen_iwmmxt_shift_sparc64 -#define gen_jmp gen_jmp_sparc64 -#define gen_load_and_replicate gen_load_and_replicate_sparc64 -#define gen_load_exclusive gen_load_exclusive_sparc64 -#define gen_logic_CC gen_logic_CC_sparc64 -#define gen_logicq_cc gen_logicq_cc_sparc64 -#define gen_lookup_tb gen_lookup_tb_sparc64 -#define gen_mov_F0_vreg gen_mov_F0_vreg_sparc64 -#define gen_mov_F1_vreg gen_mov_F1_vreg_sparc64 -#define gen_mov_vreg_F0 gen_mov_vreg_F0_sparc64 -#define gen_muls_i64_i32 gen_muls_i64_i32_sparc64 -#define gen_mulu_i64_i32 gen_mulu_i64_i32_sparc64 -#define gen_mulxy gen_mulxy_sparc64 -#define gen_neon_add gen_neon_add_sparc64 -#define gen_neon_addl gen_neon_addl_sparc64 -#define gen_neon_addl_saturate gen_neon_addl_saturate_sparc64 -#define gen_neon_bsl gen_neon_bsl_sparc64 -#define gen_neon_dup_high16 gen_neon_dup_high16_sparc64 -#define gen_neon_dup_low16 gen_neon_dup_low16_sparc64 -#define gen_neon_dup_u8 gen_neon_dup_u8_sparc64 -#define gen_neon_mull gen_neon_mull_sparc64 -#define gen_neon_narrow gen_neon_narrow_sparc64 -#define gen_neon_narrow_op gen_neon_narrow_op_sparc64 -#define gen_neon_narrow_sats gen_neon_narrow_sats_sparc64 -#define gen_neon_narrow_satu gen_neon_narrow_satu_sparc64 -#define gen_neon_negl gen_neon_negl_sparc64 -#define gen_neon_rsb gen_neon_rsb_sparc64 -#define gen_neon_shift_narrow gen_neon_shift_narrow_sparc64 -#define gen_neon_subl gen_neon_subl_sparc64 -#define gen_neon_trn_u16 gen_neon_trn_u16_sparc64 -#define gen_neon_trn_u8 gen_neon_trn_u8_sparc64 -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_sparc64 -#define gen_neon_unzip gen_neon_unzip_sparc64 -#define gen_neon_widen gen_neon_widen_sparc64 -#define gen_neon_zip gen_neon_zip_sparc64 +#define float128_to_int32 float128_to_int32_sparc64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc64 +#define float128_to_int64 float128_to_int64_sparc64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc64 +#define float128_to_uint64 float128_to_uint64_sparc64 +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_sparc64 +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_sparc64 +#define float128_to_uint32 float128_to_uint32_sparc64 +#define float128_to_float32 float128_to_float32_sparc64 +#define float128_to_float64 float128_to_float64_sparc64 +#define float128_to_floatx80 float128_to_floatx80_sparc64 +#define float128_round_to_int float128_round_to_int_sparc64 +#define float128_add float128_add_sparc64 +#define float128_sub float128_sub_sparc64 +#define float128_mul float128_mul_sparc64 +#define float128_div float128_div_sparc64 +#define float128_rem float128_rem_sparc64 +#define float128_sqrt float128_sqrt_sparc64 +#define float128_eq float128_eq_sparc64 +#define float128_le float128_le_sparc64 +#define float128_lt float128_lt_sparc64 +#define float128_unordered float128_unordered_sparc64 +#define float128_eq_quiet float128_eq_quiet_sparc64 +#define float128_le_quiet float128_le_quiet_sparc64 +#define float128_lt_quiet float128_lt_quiet_sparc64 +#define float128_unordered_quiet float128_unordered_quiet_sparc64 +#define floatx80_compare floatx80_compare_sparc64 +#define floatx80_compare_quiet floatx80_compare_quiet_sparc64 +#define float128_compare float128_compare_sparc64 +#define float128_compare_quiet float128_compare_quiet_sparc64 +#define floatx80_scalbn floatx80_scalbn_sparc64 +#define float128_scalbn float128_scalbn_sparc64 +#define softfloat_init softfloat_init_sparc64 +#define tcg_optimize tcg_optimize_sparc64 #define gen_new_label gen_new_label_sparc64 -#define gen_nop_hint gen_nop_hint_sparc64 -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_sparc64 -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_sparc64 -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_sparc64 -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_sparc64 -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_sparc64 -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_sparc64 -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_sparc64 -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_sparc64 -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_sparc64 -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_sparc64 -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_sparc64 -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_sparc64 -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_sparc64 -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_sparc64 -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_sparc64 -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_sparc64 -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_sparc64 -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_sparc64 -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_sparc64 -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_sparc64 -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_sparc64 -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_sparc64 -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_sparc64 -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_sparc64 -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_sparc64 -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_sparc64 -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_sparc64 -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_sparc64 -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_sparc64 -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_sparc64 -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_sparc64 -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_sparc64 -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_sparc64 -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_sparc64 -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_sparc64 -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_sparc64 -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_sparc64 -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_sparc64 -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_sparc64 -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_sparc64 -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_sparc64 -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_sparc64 -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_sparc64 -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_sparc64 -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_sparc64 -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_sparc64 -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_sparc64 -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_sparc64 -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_sparc64 -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_sparc64 -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_sparc64 -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_sparc64 -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_sparc64 -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_sparc64 -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_sparc64 -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_sparc64 -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_sparc64 -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_sparc64 -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_sparc64 -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_sparc64 -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_sparc64 -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_sparc64 -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_sparc64 -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_sparc64 -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_sparc64 -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_sparc64 -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_sparc64 -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_sparc64 -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_sparc64 -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_sparc64 -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_sparc64 -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_sparc64 -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_sparc64 -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_sparc64 -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_sparc64 -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_sparc64 -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_sparc64 -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_sparc64 -#define gen_rev16 gen_rev16_sparc64 -#define gen_revsh gen_revsh_sparc64 -#define gen_rfe gen_rfe_sparc64 -#define gen_sar gen_sar_sparc64 -#define gen_sbc_CC gen_sbc_CC_sparc64 -#define gen_sbfx gen_sbfx_sparc64 -#define gen_set_CF_bit31 gen_set_CF_bit31_sparc64 -#define gen_set_condexec gen_set_condexec_sparc64 -#define gen_set_cpsr gen_set_cpsr_sparc64 -#define gen_set_label gen_set_label_sparc64 -#define gen_set_pc_im gen_set_pc_im_sparc64 -#define gen_set_psr gen_set_psr_sparc64 -#define gen_set_psr_im gen_set_psr_im_sparc64 -#define gen_shl gen_shl_sparc64 -#define gen_shr gen_shr_sparc64 -#define gen_smc gen_smc_sparc64 -#define gen_smul_dual gen_smul_dual_sparc64 -#define gen_srs gen_srs_sparc64 -#define gen_ss_advance gen_ss_advance_sparc64 -#define gen_step_complete_exception gen_step_complete_exception_sparc64 -#define gen_store_exclusive gen_store_exclusive_sparc64 -#define gen_storeq_reg gen_storeq_reg_sparc64 -#define gen_sub_carry gen_sub_carry_sparc64 -#define gen_sub_CC gen_sub_CC_sparc64 -#define gen_subq_msw gen_subq_msw_sparc64 -#define gen_swap_half gen_swap_half_sparc64 -#define gen_thumb2_data_op gen_thumb2_data_op_sparc64 -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_sparc64 -#define gen_ubfx gen_ubfx_sparc64 -#define gen_vfp_abs gen_vfp_abs_sparc64 -#define gen_vfp_add gen_vfp_add_sparc64 -#define gen_vfp_cmp gen_vfp_cmp_sparc64 -#define gen_vfp_cmpe gen_vfp_cmpe_sparc64 -#define gen_vfp_div gen_vfp_div_sparc64 -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_sparc64 -#define gen_vfp_F1_mul gen_vfp_F1_mul_sparc64 -#define gen_vfp_F1_neg gen_vfp_F1_neg_sparc64 -#define gen_vfp_ld gen_vfp_ld_sparc64 -#define gen_vfp_mrs gen_vfp_mrs_sparc64 -#define gen_vfp_msr gen_vfp_msr_sparc64 -#define gen_vfp_mul gen_vfp_mul_sparc64 -#define gen_vfp_neg gen_vfp_neg_sparc64 -#define gen_vfp_shto gen_vfp_shto_sparc64 -#define gen_vfp_sito gen_vfp_sito_sparc64 -#define gen_vfp_slto gen_vfp_slto_sparc64 -#define gen_vfp_sqrt gen_vfp_sqrt_sparc64 -#define gen_vfp_st gen_vfp_st_sparc64 -#define gen_vfp_sub gen_vfp_sub_sparc64 -#define gen_vfp_tosh gen_vfp_tosh_sparc64 -#define gen_vfp_tosi gen_vfp_tosi_sparc64 -#define gen_vfp_tosiz gen_vfp_tosiz_sparc64 -#define gen_vfp_tosl gen_vfp_tosl_sparc64 -#define gen_vfp_touh gen_vfp_touh_sparc64 -#define gen_vfp_toui gen_vfp_toui_sparc64 -#define gen_vfp_touiz gen_vfp_touiz_sparc64 -#define gen_vfp_toul gen_vfp_toul_sparc64 -#define gen_vfp_uhto gen_vfp_uhto_sparc64 -#define gen_vfp_uito gen_vfp_uito_sparc64 -#define gen_vfp_ulto gen_vfp_ulto_sparc64 -#define get_arm_cp_reginfo get_arm_cp_reginfo_sparc64 -#define get_clock get_clock_sparc64 -#define get_clock_realtime get_clock_realtime_sparc64 -#define get_constraint_priority get_constraint_priority_sparc64 -#define get_float_exception_flags get_float_exception_flags_sparc64 -#define get_float_rounding_mode get_float_rounding_mode_sparc64 -#define get_fpstatus_ptr get_fpstatus_ptr_sparc64 -#define get_level1_table_address get_level1_table_address_sparc64 -#define get_mem_index get_mem_index_sparc64 -#define get_next_param_value get_next_param_value_sparc64 -#define get_opt_name get_opt_name_sparc64 -#define get_opt_value get_opt_value_sparc64 -#define get_page_addr_code get_page_addr_code_sparc64 -#define get_param_value get_param_value_sparc64 -#define get_phys_addr get_phys_addr_sparc64 -#define get_phys_addr_lpae get_phys_addr_lpae_sparc64 -#define get_phys_addr_mpu get_phys_addr_mpu_sparc64 -#define get_phys_addr_v5 get_phys_addr_v5_sparc64 -#define get_phys_addr_v6 get_phys_addr_v6_sparc64 -#define get_system_memory get_system_memory_sparc64 -#define get_ticks_per_sec get_ticks_per_sec_sparc64 -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_sparc64 -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__sparc64 -#define gt_cntfrq_access gt_cntfrq_access_sparc64 -#define gt_cnt_read gt_cnt_read_sparc64 -#define gt_cnt_reset gt_cnt_reset_sparc64 -#define gt_counter_access gt_counter_access_sparc64 -#define gt_ctl_write gt_ctl_write_sparc64 -#define gt_cval_write gt_cval_write_sparc64 -#define gt_get_countervalue gt_get_countervalue_sparc64 -#define gt_pct_access gt_pct_access_sparc64 -#define gt_ptimer_access gt_ptimer_access_sparc64 -#define gt_recalc_timer gt_recalc_timer_sparc64 -#define gt_timer_access gt_timer_access_sparc64 -#define gt_tval_read gt_tval_read_sparc64 -#define gt_tval_write gt_tval_write_sparc64 -#define gt_vct_access gt_vct_access_sparc64 -#define gt_vtimer_access gt_vtimer_access_sparc64 -#define guest_phys_blocks_free guest_phys_blocks_free_sparc64 -#define guest_phys_blocks_init guest_phys_blocks_init_sparc64 -#define handle_vcvt handle_vcvt_sparc64 -#define handle_vminmaxnm handle_vminmaxnm_sparc64 -#define handle_vrint handle_vrint_sparc64 -#define handle_vsel handle_vsel_sparc64 -#define has_help_option has_help_option_sparc64 -#define have_bmi1 have_bmi1_sparc64 -#define have_bmi2 have_bmi2_sparc64 -#define hcr_write hcr_write_sparc64 -#define helper_access_check_cp_reg helper_access_check_cp_reg_sparc64 -#define helper_add_saturate helper_add_saturate_sparc64 -#define helper_add_setq helper_add_setq_sparc64 -#define helper_add_usaturate helper_add_usaturate_sparc64 -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_sparc64 -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_sparc64 -#define helper_be_ldq_mmu helper_be_ldq_mmu_sparc64 -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc64 -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc64 -#define helper_be_ldul_mmu helper_be_ldul_mmu_sparc64 -#define helper_be_lduw_mmu helper_be_lduw_mmu_sparc64 -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_sparc64 -#define helper_be_stl_mmu helper_be_stl_mmu_sparc64 -#define helper_be_stq_mmu helper_be_stq_mmu_sparc64 -#define helper_be_stw_mmu helper_be_stw_mmu_sparc64 -#define helper_clear_pstate_ss helper_clear_pstate_ss_sparc64 -#define helper_clz_arm helper_clz_arm_sparc64 -#define helper_cpsr_read helper_cpsr_read_sparc64 -#define helper_cpsr_write helper_cpsr_write_sparc64 -#define helper_crc32_arm helper_crc32_arm_sparc64 -#define helper_crc32c helper_crc32c_sparc64 -#define helper_crypto_aese helper_crypto_aese_sparc64 -#define helper_crypto_aesmc helper_crypto_aesmc_sparc64 -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_sparc64 -#define helper_crypto_sha1h helper_crypto_sha1h_sparc64 -#define helper_crypto_sha1su1 helper_crypto_sha1su1_sparc64 -#define helper_crypto_sha256h helper_crypto_sha256h_sparc64 -#define helper_crypto_sha256h2 helper_crypto_sha256h2_sparc64 -#define helper_crypto_sha256su0 helper_crypto_sha256su0_sparc64 -#define helper_crypto_sha256su1 helper_crypto_sha256su1_sparc64 -#define helper_dc_zva helper_dc_zva_sparc64 -#define helper_double_saturate helper_double_saturate_sparc64 -#define helper_exception_internal helper_exception_internal_sparc64 -#define helper_exception_return helper_exception_return_sparc64 -#define helper_exception_with_syndrome helper_exception_with_syndrome_sparc64 -#define helper_get_cp_reg helper_get_cp_reg_sparc64 -#define helper_get_cp_reg64 helper_get_cp_reg64_sparc64 -#define helper_get_r13_banked helper_get_r13_banked_sparc64 -#define helper_get_user_reg helper_get_user_reg_sparc64 -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_sparc64 -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_sparc64 -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_sparc64 -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_sparc64 -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_sparc64 -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_sparc64 -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_sparc64 -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_sparc64 -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_sparc64 -#define helper_iwmmxt_addub helper_iwmmxt_addub_sparc64 -#define helper_iwmmxt_addul helper_iwmmxt_addul_sparc64 -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_sparc64 -#define helper_iwmmxt_align helper_iwmmxt_align_sparc64 -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_sparc64 -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_sparc64 -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_sparc64 -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_sparc64 -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_sparc64 -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_sparc64 -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_sparc64 -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_sparc64 -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_sparc64 -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_sparc64 -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_sparc64 -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_sparc64 -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_sparc64 -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_sparc64 -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_sparc64 -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_sparc64 -#define helper_iwmmxt_insr helper_iwmmxt_insr_sparc64 -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_sparc64 -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_sparc64 -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_sparc64 -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_sparc64 -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_sparc64 -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_sparc64 -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_sparc64 -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_sparc64 -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_sparc64 -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_sparc64 -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_sparc64 -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_sparc64 -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_sparc64 -#define helper_iwmmxt_minub helper_iwmmxt_minub_sparc64 -#define helper_iwmmxt_minul helper_iwmmxt_minul_sparc64 -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_sparc64 -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_sparc64 -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_sparc64 -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_sparc64 -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_sparc64 -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_sparc64 -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_sparc64 -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_sparc64 -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_sparc64 -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_sparc64 -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_sparc64 -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_sparc64 -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_sparc64 -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_sparc64 -#define helper_iwmmxt_packul helper_iwmmxt_packul_sparc64 -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_sparc64 -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_sparc64 -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_sparc64 -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_sparc64 -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_sparc64 -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_sparc64 -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_sparc64 -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_sparc64 -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_sparc64 -#define helper_iwmmxt_slll helper_iwmmxt_slll_sparc64 -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_sparc64 -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_sparc64 -#define helper_iwmmxt_sral helper_iwmmxt_sral_sparc64 -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_sparc64 -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_sparc64 -#define helper_iwmmxt_srll helper_iwmmxt_srll_sparc64 -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_sparc64 -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_sparc64 -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_sparc64 -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_sparc64 -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_sparc64 -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_sparc64 -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_sparc64 -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_sparc64 -#define helper_iwmmxt_subub helper_iwmmxt_subub_sparc64 -#define helper_iwmmxt_subul helper_iwmmxt_subul_sparc64 -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_sparc64 -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_sparc64 -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_sparc64 -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_sparc64 -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_sparc64 -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_sparc64 -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_sparc64 -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_sparc64 -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_sparc64 -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_sparc64 -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_sparc64 -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_sparc64 -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_sparc64 -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_sparc64 -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_sparc64 -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_sparc64 -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_sparc64 -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_sparc64 -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_sparc64 -#define helper_ldb_cmmu helper_ldb_cmmu_sparc64 -#define helper_ldb_mmu helper_ldb_mmu_sparc64 -#define helper_ldl_cmmu helper_ldl_cmmu_sparc64 -#define helper_ldl_mmu helper_ldl_mmu_sparc64 -#define helper_ldq_cmmu helper_ldq_cmmu_sparc64 -#define helper_ldq_mmu helper_ldq_mmu_sparc64 -#define helper_ldw_cmmu helper_ldw_cmmu_sparc64 -#define helper_ldw_mmu helper_ldw_mmu_sparc64 -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_sparc64 -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_sparc64 -#define helper_le_ldq_mmu helper_le_ldq_mmu_sparc64 -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc64 -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc64 -#define helper_le_ldul_mmu helper_le_ldul_mmu_sparc64 -#define helper_le_lduw_mmu helper_le_lduw_mmu_sparc64 -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_sparc64 -#define helper_le_stl_mmu helper_le_stl_mmu_sparc64 -#define helper_le_stq_mmu helper_le_stq_mmu_sparc64 -#define helper_le_stw_mmu helper_le_stw_mmu_sparc64 -#define helper_msr_i_pstate helper_msr_i_pstate_sparc64 -#define helper_neon_abd_f32 helper_neon_abd_f32_sparc64 -#define helper_neon_abdl_s16 helper_neon_abdl_s16_sparc64 -#define helper_neon_abdl_s32 helper_neon_abdl_s32_sparc64 -#define helper_neon_abdl_s64 helper_neon_abdl_s64_sparc64 -#define helper_neon_abdl_u16 helper_neon_abdl_u16_sparc64 -#define helper_neon_abdl_u32 helper_neon_abdl_u32_sparc64 -#define helper_neon_abdl_u64 helper_neon_abdl_u64_sparc64 -#define helper_neon_abd_s16 helper_neon_abd_s16_sparc64 -#define helper_neon_abd_s32 helper_neon_abd_s32_sparc64 -#define helper_neon_abd_s8 helper_neon_abd_s8_sparc64 -#define helper_neon_abd_u16 helper_neon_abd_u16_sparc64 -#define helper_neon_abd_u32 helper_neon_abd_u32_sparc64 -#define helper_neon_abd_u8 helper_neon_abd_u8_sparc64 -#define helper_neon_abs_s16 helper_neon_abs_s16_sparc64 -#define helper_neon_abs_s8 helper_neon_abs_s8_sparc64 -#define helper_neon_acge_f32 helper_neon_acge_f32_sparc64 -#define helper_neon_acge_f64 helper_neon_acge_f64_sparc64 -#define helper_neon_acgt_f32 helper_neon_acgt_f32_sparc64 -#define helper_neon_acgt_f64 helper_neon_acgt_f64_sparc64 -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_sparc64 -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_sparc64 -#define helper_neon_addl_u16 helper_neon_addl_u16_sparc64 -#define helper_neon_addl_u32 helper_neon_addl_u32_sparc64 -#define helper_neon_add_u16 helper_neon_add_u16_sparc64 -#define helper_neon_add_u8 helper_neon_add_u8_sparc64 -#define helper_neon_ceq_f32 helper_neon_ceq_f32_sparc64 -#define helper_neon_ceq_u16 helper_neon_ceq_u16_sparc64 -#define helper_neon_ceq_u32 helper_neon_ceq_u32_sparc64 -#define helper_neon_ceq_u8 helper_neon_ceq_u8_sparc64 -#define helper_neon_cge_f32 helper_neon_cge_f32_sparc64 -#define helper_neon_cge_s16 helper_neon_cge_s16_sparc64 -#define helper_neon_cge_s32 helper_neon_cge_s32_sparc64 -#define helper_neon_cge_s8 helper_neon_cge_s8_sparc64 -#define helper_neon_cge_u16 helper_neon_cge_u16_sparc64 -#define helper_neon_cge_u32 helper_neon_cge_u32_sparc64 -#define helper_neon_cge_u8 helper_neon_cge_u8_sparc64 -#define helper_neon_cgt_f32 helper_neon_cgt_f32_sparc64 -#define helper_neon_cgt_s16 helper_neon_cgt_s16_sparc64 -#define helper_neon_cgt_s32 helper_neon_cgt_s32_sparc64 -#define helper_neon_cgt_s8 helper_neon_cgt_s8_sparc64 -#define helper_neon_cgt_u16 helper_neon_cgt_u16_sparc64 -#define helper_neon_cgt_u32 helper_neon_cgt_u32_sparc64 -#define helper_neon_cgt_u8 helper_neon_cgt_u8_sparc64 -#define helper_neon_cls_s16 helper_neon_cls_s16_sparc64 -#define helper_neon_cls_s32 helper_neon_cls_s32_sparc64 -#define helper_neon_cls_s8 helper_neon_cls_s8_sparc64 -#define helper_neon_clz_u16 helper_neon_clz_u16_sparc64 -#define helper_neon_clz_u8 helper_neon_clz_u8_sparc64 -#define helper_neon_cnt_u8 helper_neon_cnt_u8_sparc64 -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_sparc64 -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_sparc64 -#define helper_neon_hadd_s16 helper_neon_hadd_s16_sparc64 -#define helper_neon_hadd_s32 helper_neon_hadd_s32_sparc64 -#define helper_neon_hadd_s8 helper_neon_hadd_s8_sparc64 -#define helper_neon_hadd_u16 helper_neon_hadd_u16_sparc64 -#define helper_neon_hadd_u32 helper_neon_hadd_u32_sparc64 -#define helper_neon_hadd_u8 helper_neon_hadd_u8_sparc64 -#define helper_neon_hsub_s16 helper_neon_hsub_s16_sparc64 -#define helper_neon_hsub_s32 helper_neon_hsub_s32_sparc64 -#define helper_neon_hsub_s8 helper_neon_hsub_s8_sparc64 -#define helper_neon_hsub_u16 helper_neon_hsub_u16_sparc64 -#define helper_neon_hsub_u32 helper_neon_hsub_u32_sparc64 -#define helper_neon_hsub_u8 helper_neon_hsub_u8_sparc64 -#define helper_neon_max_s16 helper_neon_max_s16_sparc64 -#define helper_neon_max_s32 helper_neon_max_s32_sparc64 -#define helper_neon_max_s8 helper_neon_max_s8_sparc64 -#define helper_neon_max_u16 helper_neon_max_u16_sparc64 -#define helper_neon_max_u32 helper_neon_max_u32_sparc64 -#define helper_neon_max_u8 helper_neon_max_u8_sparc64 -#define helper_neon_min_s16 helper_neon_min_s16_sparc64 -#define helper_neon_min_s32 helper_neon_min_s32_sparc64 -#define helper_neon_min_s8 helper_neon_min_s8_sparc64 -#define helper_neon_min_u16 helper_neon_min_u16_sparc64 -#define helper_neon_min_u32 helper_neon_min_u32_sparc64 -#define helper_neon_min_u8 helper_neon_min_u8_sparc64 -#define helper_neon_mull_p8 helper_neon_mull_p8_sparc64 -#define helper_neon_mull_s16 helper_neon_mull_s16_sparc64 -#define helper_neon_mull_s8 helper_neon_mull_s8_sparc64 -#define helper_neon_mull_u16 helper_neon_mull_u16_sparc64 -#define helper_neon_mull_u8 helper_neon_mull_u8_sparc64 -#define helper_neon_mul_p8 helper_neon_mul_p8_sparc64 -#define helper_neon_mul_u16 helper_neon_mul_u16_sparc64 -#define helper_neon_mul_u8 helper_neon_mul_u8_sparc64 -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_sparc64 -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_sparc64 -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_sparc64 -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_sparc64 -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_sparc64 -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_sparc64 -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_sparc64 -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_sparc64 -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_sparc64 -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_sparc64 -#define helper_neon_narrow_u16 helper_neon_narrow_u16_sparc64 -#define helper_neon_narrow_u8 helper_neon_narrow_u8_sparc64 -#define helper_neon_negl_u16 helper_neon_negl_u16_sparc64 -#define helper_neon_negl_u32 helper_neon_negl_u32_sparc64 -#define helper_neon_paddl_u16 helper_neon_paddl_u16_sparc64 -#define helper_neon_paddl_u32 helper_neon_paddl_u32_sparc64 -#define helper_neon_padd_u16 helper_neon_padd_u16_sparc64 -#define helper_neon_padd_u8 helper_neon_padd_u8_sparc64 -#define helper_neon_pmax_s16 helper_neon_pmax_s16_sparc64 -#define helper_neon_pmax_s8 helper_neon_pmax_s8_sparc64 -#define helper_neon_pmax_u16 helper_neon_pmax_u16_sparc64 -#define helper_neon_pmax_u8 helper_neon_pmax_u8_sparc64 -#define helper_neon_pmin_s16 helper_neon_pmin_s16_sparc64 -#define helper_neon_pmin_s8 helper_neon_pmin_s8_sparc64 -#define helper_neon_pmin_u16 helper_neon_pmin_u16_sparc64 -#define helper_neon_pmin_u8 helper_neon_pmin_u8_sparc64 -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_sparc64 -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_sparc64 -#define helper_neon_qabs_s16 helper_neon_qabs_s16_sparc64 -#define helper_neon_qabs_s32 helper_neon_qabs_s32_sparc64 -#define helper_neon_qabs_s64 helper_neon_qabs_s64_sparc64 -#define helper_neon_qabs_s8 helper_neon_qabs_s8_sparc64 -#define helper_neon_qadd_s16 helper_neon_qadd_s16_sparc64 -#define helper_neon_qadd_s32 helper_neon_qadd_s32_sparc64 -#define helper_neon_qadd_s64 helper_neon_qadd_s64_sparc64 -#define helper_neon_qadd_s8 helper_neon_qadd_s8_sparc64 -#define helper_neon_qadd_u16 helper_neon_qadd_u16_sparc64 -#define helper_neon_qadd_u32 helper_neon_qadd_u32_sparc64 -#define helper_neon_qadd_u64 helper_neon_qadd_u64_sparc64 -#define helper_neon_qadd_u8 helper_neon_qadd_u8_sparc64 -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_sparc64 -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_sparc64 -#define helper_neon_qneg_s16 helper_neon_qneg_s16_sparc64 -#define helper_neon_qneg_s32 helper_neon_qneg_s32_sparc64 -#define helper_neon_qneg_s64 helper_neon_qneg_s64_sparc64 -#define helper_neon_qneg_s8 helper_neon_qneg_s8_sparc64 -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_sparc64 -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_sparc64 -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_sparc64 -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_sparc64 -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_sparc64 -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_sparc64 -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_sparc64 -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_sparc64 -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_sparc64 -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_sparc64 -#define helper_neon_qshl_s16 helper_neon_qshl_s16_sparc64 -#define helper_neon_qshl_s32 helper_neon_qshl_s32_sparc64 -#define helper_neon_qshl_s64 helper_neon_qshl_s64_sparc64 -#define helper_neon_qshl_s8 helper_neon_qshl_s8_sparc64 -#define helper_neon_qshl_u16 helper_neon_qshl_u16_sparc64 -#define helper_neon_qshl_u32 helper_neon_qshl_u32_sparc64 -#define helper_neon_qshl_u64 helper_neon_qshl_u64_sparc64 -#define helper_neon_qshl_u8 helper_neon_qshl_u8_sparc64 -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_sparc64 -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_sparc64 -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_sparc64 -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_sparc64 -#define helper_neon_qsub_s16 helper_neon_qsub_s16_sparc64 -#define helper_neon_qsub_s32 helper_neon_qsub_s32_sparc64 -#define helper_neon_qsub_s64 helper_neon_qsub_s64_sparc64 -#define helper_neon_qsub_s8 helper_neon_qsub_s8_sparc64 -#define helper_neon_qsub_u16 helper_neon_qsub_u16_sparc64 -#define helper_neon_qsub_u32 helper_neon_qsub_u32_sparc64 -#define helper_neon_qsub_u64 helper_neon_qsub_u64_sparc64 -#define helper_neon_qsub_u8 helper_neon_qsub_u8_sparc64 -#define helper_neon_qunzip16 helper_neon_qunzip16_sparc64 -#define helper_neon_qunzip32 helper_neon_qunzip32_sparc64 -#define helper_neon_qunzip8 helper_neon_qunzip8_sparc64 -#define helper_neon_qzip16 helper_neon_qzip16_sparc64 -#define helper_neon_qzip32 helper_neon_qzip32_sparc64 -#define helper_neon_qzip8 helper_neon_qzip8_sparc64 -#define helper_neon_rbit_u8 helper_neon_rbit_u8_sparc64 -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_sparc64 -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_sparc64 -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_sparc64 -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_sparc64 -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_sparc64 -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_sparc64 -#define helper_neon_rshl_s16 helper_neon_rshl_s16_sparc64 -#define helper_neon_rshl_s32 helper_neon_rshl_s32_sparc64 -#define helper_neon_rshl_s64 helper_neon_rshl_s64_sparc64 -#define helper_neon_rshl_s8 helper_neon_rshl_s8_sparc64 -#define helper_neon_rshl_u16 helper_neon_rshl_u16_sparc64 -#define helper_neon_rshl_u32 helper_neon_rshl_u32_sparc64 -#define helper_neon_rshl_u64 helper_neon_rshl_u64_sparc64 -#define helper_neon_rshl_u8 helper_neon_rshl_u8_sparc64 -#define helper_neon_shl_s16 helper_neon_shl_s16_sparc64 -#define helper_neon_shl_s32 helper_neon_shl_s32_sparc64 -#define helper_neon_shl_s64 helper_neon_shl_s64_sparc64 -#define helper_neon_shl_s8 helper_neon_shl_s8_sparc64 -#define helper_neon_shl_u16 helper_neon_shl_u16_sparc64 -#define helper_neon_shl_u32 helper_neon_shl_u32_sparc64 -#define helper_neon_shl_u64 helper_neon_shl_u64_sparc64 -#define helper_neon_shl_u8 helper_neon_shl_u8_sparc64 -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_sparc64 -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_sparc64 -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_sparc64 -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_sparc64 -#define helper_neon_subl_u16 helper_neon_subl_u16_sparc64 -#define helper_neon_subl_u32 helper_neon_subl_u32_sparc64 -#define helper_neon_sub_u16 helper_neon_sub_u16_sparc64 -#define helper_neon_sub_u8 helper_neon_sub_u8_sparc64 -#define helper_neon_tbl helper_neon_tbl_sparc64 -#define helper_neon_tst_u16 helper_neon_tst_u16_sparc64 -#define helper_neon_tst_u32 helper_neon_tst_u32_sparc64 -#define helper_neon_tst_u8 helper_neon_tst_u8_sparc64 -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_sparc64 -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_sparc64 -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_sparc64 -#define helper_neon_unzip16 helper_neon_unzip16_sparc64 -#define helper_neon_unzip8 helper_neon_unzip8_sparc64 -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_sparc64 -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_sparc64 -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_sparc64 -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_sparc64 -#define helper_neon_widen_s16 helper_neon_widen_s16_sparc64 -#define helper_neon_widen_s8 helper_neon_widen_s8_sparc64 -#define helper_neon_widen_u16 helper_neon_widen_u16_sparc64 -#define helper_neon_widen_u8 helper_neon_widen_u8_sparc64 -#define helper_neon_zip16 helper_neon_zip16_sparc64 -#define helper_neon_zip8 helper_neon_zip8_sparc64 -#define helper_pre_hvc helper_pre_hvc_sparc64 -#define helper_pre_smc helper_pre_smc_sparc64 -#define helper_qadd16 helper_qadd16_sparc64 -#define helper_qadd8 helper_qadd8_sparc64 -#define helper_qaddsubx helper_qaddsubx_sparc64 -#define helper_qsub16 helper_qsub16_sparc64 -#define helper_qsub8 helper_qsub8_sparc64 -#define helper_qsubaddx helper_qsubaddx_sparc64 -#define helper_rbit helper_rbit_sparc64 -#define helper_recpe_f32 helper_recpe_f32_sparc64 -#define helper_recpe_f64 helper_recpe_f64_sparc64 -#define helper_recpe_u32 helper_recpe_u32_sparc64 -#define helper_recps_f32 helper_recps_f32_sparc64 -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_sparc64 -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc64 -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc64 -#define helper_ret_stb_mmu helper_ret_stb_mmu_sparc64 -#define helper_rintd helper_rintd_sparc64 -#define helper_rintd_exact helper_rintd_exact_sparc64 -#define helper_rints helper_rints_sparc64 -#define helper_rints_exact helper_rints_exact_sparc64 -#define helper_ror_cc helper_ror_cc_sparc64 -#define helper_rsqrte_f32 helper_rsqrte_f32_sparc64 -#define helper_rsqrte_f64 helper_rsqrte_f64_sparc64 -#define helper_rsqrte_u32 helper_rsqrte_u32_sparc64 -#define helper_rsqrts_f32 helper_rsqrts_f32_sparc64 -#define helper_sadd16 helper_sadd16_sparc64 -#define helper_sadd8 helper_sadd8_sparc64 -#define helper_saddsubx helper_saddsubx_sparc64 -#define helper_sar_cc helper_sar_cc_sparc64 -#define helper_sdiv helper_sdiv_sparc64 -#define helper_sel_flags helper_sel_flags_sparc64 -#define helper_set_cp_reg helper_set_cp_reg_sparc64 -#define helper_set_cp_reg64 helper_set_cp_reg64_sparc64 -#define helper_set_neon_rmode helper_set_neon_rmode_sparc64 -#define helper_set_r13_banked helper_set_r13_banked_sparc64 -#define helper_set_rmode helper_set_rmode_sparc64 -#define helper_set_user_reg helper_set_user_reg_sparc64 -#define helper_shadd16 helper_shadd16_sparc64 -#define helper_shadd8 helper_shadd8_sparc64 -#define helper_shaddsubx helper_shaddsubx_sparc64 -#define helper_shl_cc helper_shl_cc_sparc64 -#define helper_shr_cc helper_shr_cc_sparc64 -#define helper_shsub16 helper_shsub16_sparc64 -#define helper_shsub8 helper_shsub8_sparc64 -#define helper_shsubaddx helper_shsubaddx_sparc64 -#define helper_ssat helper_ssat_sparc64 -#define helper_ssat16 helper_ssat16_sparc64 -#define helper_ssub16 helper_ssub16_sparc64 -#define helper_ssub8 helper_ssub8_sparc64 -#define helper_ssubaddx helper_ssubaddx_sparc64 -#define helper_stb_mmu helper_stb_mmu_sparc64 -#define helper_stl_mmu helper_stl_mmu_sparc64 -#define helper_stq_mmu helper_stq_mmu_sparc64 -#define helper_stw_mmu helper_stw_mmu_sparc64 -#define helper_sub_saturate helper_sub_saturate_sparc64 -#define helper_sub_usaturate helper_sub_usaturate_sparc64 -#define helper_sxtb16 helper_sxtb16_sparc64 -#define helper_uadd16 helper_uadd16_sparc64 -#define helper_uadd8 helper_uadd8_sparc64 -#define helper_uaddsubx helper_uaddsubx_sparc64 -#define helper_udiv helper_udiv_sparc64 -#define helper_uhadd16 helper_uhadd16_sparc64 -#define helper_uhadd8 helper_uhadd8_sparc64 -#define helper_uhaddsubx helper_uhaddsubx_sparc64 -#define helper_uhsub16 helper_uhsub16_sparc64 -#define helper_uhsub8 helper_uhsub8_sparc64 -#define helper_uhsubaddx helper_uhsubaddx_sparc64 -#define helper_uqadd16 helper_uqadd16_sparc64 -#define helper_uqadd8 helper_uqadd8_sparc64 -#define helper_uqaddsubx helper_uqaddsubx_sparc64 -#define helper_uqsub16 helper_uqsub16_sparc64 -#define helper_uqsub8 helper_uqsub8_sparc64 -#define helper_uqsubaddx helper_uqsubaddx_sparc64 -#define helper_usad8 helper_usad8_sparc64 -#define helper_usat helper_usat_sparc64 -#define helper_usat16 helper_usat16_sparc64 -#define helper_usub16 helper_usub16_sparc64 -#define helper_usub8 helper_usub8_sparc64 -#define helper_usubaddx helper_usubaddx_sparc64 -#define helper_uxtb16 helper_uxtb16_sparc64 -#define helper_v7m_mrs helper_v7m_mrs_sparc64 -#define helper_v7m_msr helper_v7m_msr_sparc64 -#define helper_vfp_absd helper_vfp_absd_sparc64 -#define helper_vfp_abss helper_vfp_abss_sparc64 -#define helper_vfp_addd helper_vfp_addd_sparc64 -#define helper_vfp_adds helper_vfp_adds_sparc64 -#define helper_vfp_cmpd helper_vfp_cmpd_sparc64 -#define helper_vfp_cmped helper_vfp_cmped_sparc64 -#define helper_vfp_cmpes helper_vfp_cmpes_sparc64 -#define helper_vfp_cmps helper_vfp_cmps_sparc64 -#define helper_vfp_divd helper_vfp_divd_sparc64 -#define helper_vfp_divs helper_vfp_divs_sparc64 -#define helper_vfp_fcvtds helper_vfp_fcvtds_sparc64 -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_sparc64 -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_sparc64 -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_sparc64 -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_sparc64 -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_sparc64 -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_sparc64 -#define helper_vfp_maxd helper_vfp_maxd_sparc64 -#define helper_vfp_maxnumd helper_vfp_maxnumd_sparc64 -#define helper_vfp_maxnums helper_vfp_maxnums_sparc64 -#define helper_vfp_maxs helper_vfp_maxs_sparc64 -#define helper_vfp_mind helper_vfp_mind_sparc64 -#define helper_vfp_minnumd helper_vfp_minnumd_sparc64 -#define helper_vfp_minnums helper_vfp_minnums_sparc64 -#define helper_vfp_mins helper_vfp_mins_sparc64 -#define helper_vfp_muladdd helper_vfp_muladdd_sparc64 -#define helper_vfp_muladds helper_vfp_muladds_sparc64 -#define helper_vfp_muld helper_vfp_muld_sparc64 -#define helper_vfp_muls helper_vfp_muls_sparc64 -#define helper_vfp_negd helper_vfp_negd_sparc64 -#define helper_vfp_negs helper_vfp_negs_sparc64 -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_sparc64 -#define helper_vfp_shtod helper_vfp_shtod_sparc64 -#define helper_vfp_shtos helper_vfp_shtos_sparc64 -#define helper_vfp_sitod helper_vfp_sitod_sparc64 -#define helper_vfp_sitos helper_vfp_sitos_sparc64 -#define helper_vfp_sltod helper_vfp_sltod_sparc64 -#define helper_vfp_sltos helper_vfp_sltos_sparc64 -#define helper_vfp_sqrtd helper_vfp_sqrtd_sparc64 -#define helper_vfp_sqrts helper_vfp_sqrts_sparc64 -#define helper_vfp_sqtod helper_vfp_sqtod_sparc64 -#define helper_vfp_sqtos helper_vfp_sqtos_sparc64 -#define helper_vfp_subd helper_vfp_subd_sparc64 -#define helper_vfp_subs helper_vfp_subs_sparc64 -#define helper_vfp_toshd helper_vfp_toshd_sparc64 -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_sparc64 -#define helper_vfp_toshs helper_vfp_toshs_sparc64 -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_sparc64 -#define helper_vfp_tosid helper_vfp_tosid_sparc64 -#define helper_vfp_tosis helper_vfp_tosis_sparc64 -#define helper_vfp_tosizd helper_vfp_tosizd_sparc64 -#define helper_vfp_tosizs helper_vfp_tosizs_sparc64 -#define helper_vfp_tosld helper_vfp_tosld_sparc64 -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_sparc64 -#define helper_vfp_tosls helper_vfp_tosls_sparc64 -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_sparc64 -#define helper_vfp_tosqd helper_vfp_tosqd_sparc64 -#define helper_vfp_tosqs helper_vfp_tosqs_sparc64 -#define helper_vfp_touhd helper_vfp_touhd_sparc64 -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_sparc64 -#define helper_vfp_touhs helper_vfp_touhs_sparc64 -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_sparc64 -#define helper_vfp_touid helper_vfp_touid_sparc64 -#define helper_vfp_touis helper_vfp_touis_sparc64 -#define helper_vfp_touizd helper_vfp_touizd_sparc64 -#define helper_vfp_touizs helper_vfp_touizs_sparc64 -#define helper_vfp_tould helper_vfp_tould_sparc64 -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_sparc64 -#define helper_vfp_touls helper_vfp_touls_sparc64 -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_sparc64 -#define helper_vfp_touqd helper_vfp_touqd_sparc64 -#define helper_vfp_touqs helper_vfp_touqs_sparc64 -#define helper_vfp_uhtod helper_vfp_uhtod_sparc64 -#define helper_vfp_uhtos helper_vfp_uhtos_sparc64 -#define helper_vfp_uitod helper_vfp_uitod_sparc64 -#define helper_vfp_uitos helper_vfp_uitos_sparc64 -#define helper_vfp_ultod helper_vfp_ultod_sparc64 -#define helper_vfp_ultos helper_vfp_ultos_sparc64 -#define helper_vfp_uqtod helper_vfp_uqtod_sparc64 -#define helper_vfp_uqtos helper_vfp_uqtos_sparc64 -#define helper_wfe helper_wfe_sparc64 -#define helper_wfi helper_wfi_sparc64 -#define hex2decimal hex2decimal_sparc64 -#define hw_breakpoint_update hw_breakpoint_update_sparc64 -#define hw_breakpoint_update_all hw_breakpoint_update_all_sparc64 -#define hw_watchpoint_update hw_watchpoint_update_sparc64 -#define hw_watchpoint_update_all hw_watchpoint_update_all_sparc64 -#define _init _init_sparc64 -#define init_cpreg_list init_cpreg_list_sparc64 -#define init_lists init_lists_sparc64 -#define input_type_enum input_type_enum_sparc64 -#define int128_2_64 int128_2_64_sparc64 -#define int128_add int128_add_sparc64 -#define int128_addto int128_addto_sparc64 -#define int128_and int128_and_sparc64 -#define int128_eq int128_eq_sparc64 -#define int128_ge int128_ge_sparc64 -#define int128_get64 int128_get64_sparc64 -#define int128_gt int128_gt_sparc64 -#define int128_le int128_le_sparc64 -#define int128_lt int128_lt_sparc64 -#define int128_make64 int128_make64_sparc64 -#define int128_max int128_max_sparc64 -#define int128_min int128_min_sparc64 -#define int128_ne int128_ne_sparc64 -#define int128_neg int128_neg_sparc64 -#define int128_nz int128_nz_sparc64 -#define int128_rshift int128_rshift_sparc64 -#define int128_sub int128_sub_sparc64 -#define int128_subfrom int128_subfrom_sparc64 -#define int128_zero int128_zero_sparc64 -#define int16_to_float32 int16_to_float32_sparc64 -#define int16_to_float64 int16_to_float64_sparc64 -#define int32_to_float128 int32_to_float128_sparc64 -#define int32_to_float32 int32_to_float32_sparc64 -#define int32_to_float64 int32_to_float64_sparc64 -#define int32_to_floatx80 int32_to_floatx80_sparc64 -#define int64_to_float128 int64_to_float128_sparc64 -#define int64_to_float32 int64_to_float32_sparc64 -#define int64_to_float64 int64_to_float64_sparc64 -#define int64_to_floatx80 int64_to_floatx80_sparc64 -#define invalidate_and_set_dirty invalidate_and_set_dirty_sparc64 -#define invalidate_page_bitmap invalidate_page_bitmap_sparc64 -#define io_mem_read io_mem_read_sparc64 -#define io_mem_write io_mem_write_sparc64 -#define io_readb io_readb_sparc64 -#define io_readl io_readl_sparc64 -#define io_readq io_readq_sparc64 -#define io_readw io_readw_sparc64 -#define iotlb_to_region iotlb_to_region_sparc64 -#define io_writeb io_writeb_sparc64 -#define io_writel io_writel_sparc64 -#define io_writeq io_writeq_sparc64 -#define io_writew io_writew_sparc64 -#define is_a64 is_a64_sparc64 -#define is_help_option is_help_option_sparc64 -#define isr_read isr_read_sparc64 -#define is_valid_option_list is_valid_option_list_sparc64 -#define iwmmxt_load_creg iwmmxt_load_creg_sparc64 -#define iwmmxt_load_reg iwmmxt_load_reg_sparc64 -#define iwmmxt_store_creg iwmmxt_store_creg_sparc64 -#define iwmmxt_store_reg iwmmxt_store_reg_sparc64 -#define __jit_debug_descriptor __jit_debug_descriptor_sparc64 -#define __jit_debug_register_code __jit_debug_register_code_sparc64 -#define kvm_to_cpreg_id kvm_to_cpreg_id_sparc64 -#define last_ram_offset last_ram_offset_sparc64 -#define ldl_be_p ldl_be_p_sparc64 -#define ldl_be_phys ldl_be_phys_sparc64 -#define ldl_he_p ldl_he_p_sparc64 -#define ldl_le_p ldl_le_p_sparc64 -#define ldl_le_phys ldl_le_phys_sparc64 -#define ldl_phys ldl_phys_sparc64 -#define ldl_phys_internal ldl_phys_internal_sparc64 -#define ldq_be_p ldq_be_p_sparc64 -#define ldq_be_phys ldq_be_phys_sparc64 -#define ldq_he_p ldq_he_p_sparc64 -#define ldq_le_p ldq_le_p_sparc64 -#define ldq_le_phys ldq_le_phys_sparc64 -#define ldq_phys ldq_phys_sparc64 -#define ldq_phys_internal ldq_phys_internal_sparc64 -#define ldst_name ldst_name_sparc64 -#define ldub_p ldub_p_sparc64 -#define ldub_phys ldub_phys_sparc64 -#define lduw_be_p lduw_be_p_sparc64 -#define lduw_be_phys lduw_be_phys_sparc64 -#define lduw_he_p lduw_he_p_sparc64 -#define lduw_le_p lduw_le_p_sparc64 -#define lduw_le_phys lduw_le_phys_sparc64 -#define lduw_phys lduw_phys_sparc64 -#define lduw_phys_internal lduw_phys_internal_sparc64 -#define le128 le128_sparc64 -#define linked_bp_matches linked_bp_matches_sparc64 -#define listener_add_address_space listener_add_address_space_sparc64 -#define load_cpu_offset load_cpu_offset_sparc64 -#define load_reg load_reg_sparc64 -#define load_reg_var load_reg_var_sparc64 -#define log_cpu_state log_cpu_state_sparc64 -#define lpae_cp_reginfo lpae_cp_reginfo_sparc64 -#define lt128 lt128_sparc64 -#define machine_class_init machine_class_init_sparc64 -#define machine_finalize machine_finalize_sparc64 -#define machine_info machine_info_sparc64 -#define machine_initfn machine_initfn_sparc64 -#define machine_register_types machine_register_types_sparc64 -#define machvirt_init machvirt_init_sparc64 -#define machvirt_machine_init machvirt_machine_init_sparc64 -#define maj maj_sparc64 -#define mapping_conflict mapping_conflict_sparc64 -#define mapping_contiguous mapping_contiguous_sparc64 -#define mapping_have_same_region mapping_have_same_region_sparc64 -#define mapping_merge mapping_merge_sparc64 -#define mem_add mem_add_sparc64 -#define mem_begin mem_begin_sparc64 -#define mem_commit mem_commit_sparc64 -#define memory_access_is_direct memory_access_is_direct_sparc64 -#define memory_access_size memory_access_size_sparc64 -#define memory_init memory_init_sparc64 -#define memory_listener_match memory_listener_match_sparc64 -#define memory_listener_register memory_listener_register_sparc64 -#define memory_listener_unregister memory_listener_unregister_sparc64 -#define memory_map_init memory_map_init_sparc64 -#define memory_mapping_filter memory_mapping_filter_sparc64 -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_sparc64 -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc64 -#define memory_mapping_list_free memory_mapping_list_free_sparc64 -#define memory_mapping_list_init memory_mapping_list_init_sparc64 -#define memory_region_access_valid memory_region_access_valid_sparc64 -#define memory_region_add_subregion memory_region_add_subregion_sparc64 -#define memory_region_add_subregion_common memory_region_add_subregion_common_sparc64 -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_sparc64 -#define memory_region_big_endian memory_region_big_endian_sparc64 -#define memory_region_clear_pending memory_region_clear_pending_sparc64 -#define memory_region_del_subregion memory_region_del_subregion_sparc64 -#define memory_region_destructor_alias memory_region_destructor_alias_sparc64 -#define memory_region_destructor_none memory_region_destructor_none_sparc64 -#define memory_region_destructor_ram memory_region_destructor_ram_sparc64 -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_sparc64 -#define memory_region_dispatch_read memory_region_dispatch_read_sparc64 -#define memory_region_dispatch_read1 memory_region_dispatch_read1_sparc64 -#define memory_region_dispatch_write memory_region_dispatch_write_sparc64 -#define memory_region_escape_name memory_region_escape_name_sparc64 -#define memory_region_finalize memory_region_finalize_sparc64 -#define memory_region_find memory_region_find_sparc64 -#define memory_region_get_addr memory_region_get_addr_sparc64 -#define memory_region_get_alignment memory_region_get_alignment_sparc64 -#define memory_region_get_container memory_region_get_container_sparc64 -#define memory_region_get_fd memory_region_get_fd_sparc64 -#define memory_region_get_may_overlap memory_region_get_may_overlap_sparc64 -#define memory_region_get_priority memory_region_get_priority_sparc64 -#define memory_region_get_ram_addr memory_region_get_ram_addr_sparc64 -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc64 -#define memory_region_get_size memory_region_get_size_sparc64 -#define memory_region_info memory_region_info_sparc64 -#define memory_region_init memory_region_init_sparc64 -#define memory_region_init_alias memory_region_init_alias_sparc64 -#define memory_region_initfn memory_region_initfn_sparc64 -#define memory_region_init_io memory_region_init_io_sparc64 -#define memory_region_init_ram memory_region_init_ram_sparc64 -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc64 -#define memory_region_init_reservation memory_region_init_reservation_sparc64 -#define memory_region_is_iommu memory_region_is_iommu_sparc64 -#define memory_region_is_logging memory_region_is_logging_sparc64 -#define memory_region_is_mapped memory_region_is_mapped_sparc64 -#define memory_region_is_ram memory_region_is_ram_sparc64 -#define memory_region_is_rom memory_region_is_rom_sparc64 -#define memory_region_is_romd memory_region_is_romd_sparc64 -#define memory_region_is_skip_dump memory_region_is_skip_dump_sparc64 -#define memory_region_is_unassigned memory_region_is_unassigned_sparc64 -#define memory_region_name memory_region_name_sparc64 -#define memory_region_need_escape memory_region_need_escape_sparc64 -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_sparc64 -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_sparc64 -#define memory_region_present memory_region_present_sparc64 -#define memory_region_read_accessor memory_region_read_accessor_sparc64 -#define memory_region_readd_subregion memory_region_readd_subregion_sparc64 -#define memory_region_ref memory_region_ref_sparc64 -#define memory_region_resolve_container memory_region_resolve_container_sparc64 -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_sparc64 -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc64 -#define memory_region_set_address memory_region_set_address_sparc64 -#define memory_region_set_alias_offset memory_region_set_alias_offset_sparc64 -#define memory_region_set_enabled memory_region_set_enabled_sparc64 -#define memory_region_set_readonly memory_region_set_readonly_sparc64 -#define memory_region_set_skip_dump memory_region_set_skip_dump_sparc64 -#define memory_region_size memory_region_size_sparc64 -#define memory_region_to_address_space memory_region_to_address_space_sparc64 -#define memory_region_transaction_begin memory_region_transaction_begin_sparc64 -#define memory_region_transaction_commit memory_region_transaction_commit_sparc64 -#define memory_region_unref memory_region_unref_sparc64 -#define memory_region_update_container_subregions memory_region_update_container_subregions_sparc64 -#define memory_region_write_accessor memory_region_write_accessor_sparc64 -#define memory_region_wrong_endianness memory_region_wrong_endianness_sparc64 -#define memory_try_enable_merging memory_try_enable_merging_sparc64 -#define module_call_init module_call_init_sparc64 -#define module_load module_load_sparc64 -#define mpidr_cp_reginfo mpidr_cp_reginfo_sparc64 -#define mpidr_read mpidr_read_sparc64 -#define msr_mask msr_mask_sparc64 -#define mul128By64To192 mul128By64To192_sparc64 -#define mul128To256 mul128To256_sparc64 -#define mul64To128 mul64To128_sparc64 -#define muldiv64 muldiv64_sparc64 -#define neon_2rm_is_float_op neon_2rm_is_float_op_sparc64 -#define neon_2rm_sizes neon_2rm_sizes_sparc64 -#define neon_3r_sizes neon_3r_sizes_sparc64 -#define neon_get_scalar neon_get_scalar_sparc64 -#define neon_load_reg neon_load_reg_sparc64 -#define neon_load_reg64 neon_load_reg64_sparc64 -#define neon_load_scratch neon_load_scratch_sparc64 -#define neon_ls_element_type neon_ls_element_type_sparc64 -#define neon_reg_offset neon_reg_offset_sparc64 -#define neon_store_reg neon_store_reg_sparc64 -#define neon_store_reg64 neon_store_reg64_sparc64 -#define neon_store_scratch neon_store_scratch_sparc64 -#define new_ldst_label new_ldst_label_sparc64 -#define next_list next_list_sparc64 -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_sparc64 -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_sparc64 -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_sparc64 -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_sparc64 -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc64 -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_sparc64 -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_sparc64 -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_sparc64 -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc64 -#define not_v6_cp_reginfo not_v6_cp_reginfo_sparc64 -#define not_v7_cp_reginfo not_v7_cp_reginfo_sparc64 -#define not_v8_cp_reginfo not_v8_cp_reginfo_sparc64 -#define object_child_foreach object_child_foreach_sparc64 -#define object_class_foreach object_class_foreach_sparc64 -#define object_class_foreach_tramp object_class_foreach_tramp_sparc64 -#define object_class_get_list object_class_get_list_sparc64 -#define object_class_get_list_tramp object_class_get_list_tramp_sparc64 -#define object_class_get_parent object_class_get_parent_sparc64 -#define object_deinit object_deinit_sparc64 -#define object_dynamic_cast object_dynamic_cast_sparc64 -#define object_finalize object_finalize_sparc64 -#define object_finalize_child_property object_finalize_child_property_sparc64 -#define object_get_child_property object_get_child_property_sparc64 -#define object_get_link_property object_get_link_property_sparc64 -#define object_get_root object_get_root_sparc64 -#define object_initialize_with_type object_initialize_with_type_sparc64 -#define object_init_with_type object_init_with_type_sparc64 -#define object_instance_init object_instance_init_sparc64 -#define object_new_with_type object_new_with_type_sparc64 -#define object_post_init_with_type object_post_init_with_type_sparc64 -#define object_property_add_alias object_property_add_alias_sparc64 -#define object_property_add_link object_property_add_link_sparc64 -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_sparc64 -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_sparc64 -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_sparc64 -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_sparc64 -#define object_property_allow_set_link object_property_allow_set_link_sparc64 -#define object_property_del object_property_del_sparc64 -#define object_property_del_all object_property_del_all_sparc64 -#define object_property_find object_property_find_sparc64 -#define object_property_get object_property_get_sparc64 -#define object_property_get_bool object_property_get_bool_sparc64 -#define object_property_get_int object_property_get_int_sparc64 -#define object_property_get_link object_property_get_link_sparc64 -#define object_property_get_qobject object_property_get_qobject_sparc64 -#define object_property_get_str object_property_get_str_sparc64 -#define object_property_get_type object_property_get_type_sparc64 -#define object_property_is_child object_property_is_child_sparc64 -#define object_property_set object_property_set_sparc64 -#define object_property_set_description object_property_set_description_sparc64 -#define object_property_set_link object_property_set_link_sparc64 -#define object_property_set_qobject object_property_set_qobject_sparc64 -#define object_release_link_property object_release_link_property_sparc64 -#define object_resolve_abs_path object_resolve_abs_path_sparc64 -#define object_resolve_child_property object_resolve_child_property_sparc64 -#define object_resolve_link object_resolve_link_sparc64 -#define object_resolve_link_property object_resolve_link_property_sparc64 -#define object_resolve_partial_path object_resolve_partial_path_sparc64 -#define object_resolve_path object_resolve_path_sparc64 -#define object_resolve_path_component object_resolve_path_component_sparc64 -#define object_resolve_path_type object_resolve_path_type_sparc64 -#define object_set_link_property object_set_link_property_sparc64 -#define object_unparent object_unparent_sparc64 -#define omap_cachemaint_write omap_cachemaint_write_sparc64 -#define omap_cp_reginfo omap_cp_reginfo_sparc64 -#define omap_threadid_write omap_threadid_write_sparc64 -#define omap_ticonfig_write omap_ticonfig_write_sparc64 -#define omap_wfi_write omap_wfi_write_sparc64 -#define op_bits op_bits_sparc64 -#define open_modeflags open_modeflags_sparc64 -#define op_to_mov op_to_mov_sparc64 -#define op_to_movi op_to_movi_sparc64 -#define output_type_enum output_type_enum_sparc64 -#define packFloat128 packFloat128_sparc64 -#define packFloat16 packFloat16_sparc64 -#define packFloat32 packFloat32_sparc64 -#define packFloat64 packFloat64_sparc64 -#define packFloatx80 packFloatx80_sparc64 -#define page_find page_find_sparc64 -#define page_find_alloc page_find_alloc_sparc64 -#define page_flush_tb page_flush_tb_sparc64 -#define page_flush_tb_1 page_flush_tb_1_sparc64 -#define page_init page_init_sparc64 -#define page_size_init page_size_init_sparc64 -#define par par_sparc64 -#define parse_array parse_array_sparc64 -#define parse_error parse_error_sparc64 -#define parse_escape parse_escape_sparc64 -#define parse_keyword parse_keyword_sparc64 -#define parse_literal parse_literal_sparc64 -#define parse_object parse_object_sparc64 -#define parse_optional parse_optional_sparc64 -#define parse_option_bool parse_option_bool_sparc64 -#define parse_option_number parse_option_number_sparc64 -#define parse_option_size parse_option_size_sparc64 -#define parse_pair parse_pair_sparc64 -#define parser_context_free parser_context_free_sparc64 -#define parser_context_new parser_context_new_sparc64 -#define parser_context_peek_token parser_context_peek_token_sparc64 -#define parser_context_pop_token parser_context_pop_token_sparc64 -#define parser_context_restore parser_context_restore_sparc64 -#define parser_context_save parser_context_save_sparc64 -#define parse_str parse_str_sparc64 -#define parse_type_bool parse_type_bool_sparc64 -#define parse_type_int parse_type_int_sparc64 -#define parse_type_number parse_type_number_sparc64 -#define parse_type_size parse_type_size_sparc64 -#define parse_type_str parse_type_str_sparc64 -#define parse_value parse_value_sparc64 -#define par_write par_write_sparc64 -#define patch_reloc patch_reloc_sparc64 -#define phys_map_node_alloc phys_map_node_alloc_sparc64 -#define phys_map_node_reserve phys_map_node_reserve_sparc64 -#define phys_mem_alloc phys_mem_alloc_sparc64 -#define phys_mem_set_alloc phys_mem_set_alloc_sparc64 -#define phys_page_compact phys_page_compact_sparc64 -#define phys_page_compact_all phys_page_compact_all_sparc64 -#define phys_page_find phys_page_find_sparc64 -#define phys_page_set phys_page_set_sparc64 -#define phys_page_set_level phys_page_set_level_sparc64 -#define phys_section_add phys_section_add_sparc64 -#define phys_section_destroy phys_section_destroy_sparc64 -#define phys_sections_free phys_sections_free_sparc64 -#define pickNaN pickNaN_sparc64 -#define pickNaNMulAdd pickNaNMulAdd_sparc64 -#define pmccfiltr_write pmccfiltr_write_sparc64 -#define pmccntr_read pmccntr_read_sparc64 -#define pmccntr_sync pmccntr_sync_sparc64 -#define pmccntr_write pmccntr_write_sparc64 -#define pmccntr_write32 pmccntr_write32_sparc64 -#define pmcntenclr_write pmcntenclr_write_sparc64 -#define pmcntenset_write pmcntenset_write_sparc64 -#define pmcr_write pmcr_write_sparc64 -#define pmintenclr_write pmintenclr_write_sparc64 -#define pmintenset_write pmintenset_write_sparc64 -#define pmovsr_write pmovsr_write_sparc64 -#define pmreg_access pmreg_access_sparc64 -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_sparc64 -#define pmsav5_data_ap_read pmsav5_data_ap_read_sparc64 -#define pmsav5_data_ap_write pmsav5_data_ap_write_sparc64 -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_sparc64 -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_sparc64 -#define pmuserenr_write pmuserenr_write_sparc64 -#define pmxevtyper_write pmxevtyper_write_sparc64 -#define print_type_bool print_type_bool_sparc64 -#define print_type_int print_type_int_sparc64 -#define print_type_number print_type_number_sparc64 -#define print_type_size print_type_size_sparc64 -#define print_type_str print_type_str_sparc64 -#define propagateFloat128NaN propagateFloat128NaN_sparc64 -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_sparc64 -#define propagateFloat32NaN propagateFloat32NaN_sparc64 -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_sparc64 -#define propagateFloat64NaN propagateFloat64NaN_sparc64 -#define propagateFloatx80NaN propagateFloatx80NaN_sparc64 -#define property_get_alias property_get_alias_sparc64 -#define property_get_bool property_get_bool_sparc64 -#define property_get_str property_get_str_sparc64 -#define property_get_uint16_ptr property_get_uint16_ptr_sparc64 -#define property_get_uint32_ptr property_get_uint32_ptr_sparc64 -#define property_get_uint64_ptr property_get_uint64_ptr_sparc64 -#define property_get_uint8_ptr property_get_uint8_ptr_sparc64 -#define property_release_alias property_release_alias_sparc64 -#define property_release_bool property_release_bool_sparc64 -#define property_release_str property_release_str_sparc64 -#define property_resolve_alias property_resolve_alias_sparc64 -#define property_set_alias property_set_alias_sparc64 -#define property_set_bool property_set_bool_sparc64 -#define property_set_str property_set_str_sparc64 -#define pstate_read pstate_read_sparc64 -#define pstate_write pstate_write_sparc64 -#define pxa250_initfn pxa250_initfn_sparc64 -#define pxa255_initfn pxa255_initfn_sparc64 -#define pxa260_initfn pxa260_initfn_sparc64 -#define pxa261_initfn pxa261_initfn_sparc64 -#define pxa262_initfn pxa262_initfn_sparc64 -#define pxa270a0_initfn pxa270a0_initfn_sparc64 -#define pxa270a1_initfn pxa270a1_initfn_sparc64 -#define pxa270b0_initfn pxa270b0_initfn_sparc64 -#define pxa270b1_initfn pxa270b1_initfn_sparc64 -#define pxa270c0_initfn pxa270c0_initfn_sparc64 -#define pxa270c5_initfn pxa270c5_initfn_sparc64 -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_sparc64 -#define qapi_dealloc_end_list qapi_dealloc_end_list_sparc64 -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_sparc64 -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_sparc64 -#define qapi_dealloc_next_list qapi_dealloc_next_list_sparc64 -#define qapi_dealloc_pop qapi_dealloc_pop_sparc64 -#define qapi_dealloc_push qapi_dealloc_push_sparc64 -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_sparc64 -#define qapi_dealloc_start_list qapi_dealloc_start_list_sparc64 -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_sparc64 -#define qapi_dealloc_start_union qapi_dealloc_start_union_sparc64 -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_sparc64 -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_sparc64 -#define qapi_dealloc_type_int qapi_dealloc_type_int_sparc64 -#define qapi_dealloc_type_number qapi_dealloc_type_number_sparc64 -#define qapi_dealloc_type_size qapi_dealloc_type_size_sparc64 -#define qapi_dealloc_type_str qapi_dealloc_type_str_sparc64 -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_sparc64 -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_sparc64 -#define qapi_free_boolList qapi_free_boolList_sparc64 -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_sparc64 -#define qapi_free_int16List qapi_free_int16List_sparc64 -#define qapi_free_int32List qapi_free_int32List_sparc64 -#define qapi_free_int64List qapi_free_int64List_sparc64 -#define qapi_free_int8List qapi_free_int8List_sparc64 -#define qapi_free_intList qapi_free_intList_sparc64 -#define qapi_free_numberList qapi_free_numberList_sparc64 -#define qapi_free_strList qapi_free_strList_sparc64 -#define qapi_free_uint16List qapi_free_uint16List_sparc64 -#define qapi_free_uint32List qapi_free_uint32List_sparc64 -#define qapi_free_uint64List qapi_free_uint64List_sparc64 -#define qapi_free_uint8List qapi_free_uint8List_sparc64 -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_sparc64 -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_sparc64 -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_sparc64 -#define qbool_destroy_obj qbool_destroy_obj_sparc64 -#define qbool_from_int qbool_from_int_sparc64 -#define qbool_get_int qbool_get_int_sparc64 -#define qbool_type qbool_type_sparc64 -#define qbus_create qbus_create_sparc64 -#define qbus_create_inplace qbus_create_inplace_sparc64 -#define qbus_finalize qbus_finalize_sparc64 -#define qbus_initfn qbus_initfn_sparc64 -#define qbus_realize qbus_realize_sparc64 -#define qdev_create qdev_create_sparc64 -#define qdev_get_type qdev_get_type_sparc64 -#define qdev_register_types qdev_register_types_sparc64 -#define qdev_set_parent_bus qdev_set_parent_bus_sparc64 -#define qdev_try_create qdev_try_create_sparc64 -#define qdict_add_key qdict_add_key_sparc64 -#define qdict_array_split qdict_array_split_sparc64 -#define qdict_clone_shallow qdict_clone_shallow_sparc64 -#define qdict_del qdict_del_sparc64 -#define qdict_destroy_obj qdict_destroy_obj_sparc64 -#define qdict_entry_key qdict_entry_key_sparc64 -#define qdict_entry_value qdict_entry_value_sparc64 -#define qdict_extract_subqdict qdict_extract_subqdict_sparc64 -#define qdict_find qdict_find_sparc64 -#define qdict_first qdict_first_sparc64 -#define qdict_flatten qdict_flatten_sparc64 -#define qdict_flatten_qdict qdict_flatten_qdict_sparc64 -#define qdict_flatten_qlist qdict_flatten_qlist_sparc64 -#define qdict_get qdict_get_sparc64 -#define qdict_get_bool qdict_get_bool_sparc64 -#define qdict_get_double qdict_get_double_sparc64 -#define qdict_get_int qdict_get_int_sparc64 -#define qdict_get_obj qdict_get_obj_sparc64 -#define qdict_get_qdict qdict_get_qdict_sparc64 -#define qdict_get_qlist qdict_get_qlist_sparc64 -#define qdict_get_str qdict_get_str_sparc64 -#define qdict_get_try_bool qdict_get_try_bool_sparc64 -#define qdict_get_try_int qdict_get_try_int_sparc64 -#define qdict_get_try_str qdict_get_try_str_sparc64 -#define qdict_haskey qdict_haskey_sparc64 -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_sparc64 -#define qdict_iter qdict_iter_sparc64 -#define qdict_join qdict_join_sparc64 -#define qdict_new qdict_new_sparc64 -#define qdict_next qdict_next_sparc64 -#define qdict_next_entry qdict_next_entry_sparc64 -#define qdict_put_obj qdict_put_obj_sparc64 -#define qdict_size qdict_size_sparc64 -#define qdict_type qdict_type_sparc64 -#define qemu_clock_get_us qemu_clock_get_us_sparc64 -#define qemu_clock_ptr qemu_clock_ptr_sparc64 -#define qemu_clocks qemu_clocks_sparc64 -#define qemu_get_cpu qemu_get_cpu_sparc64 -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_sparc64 -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_sparc64 -#define qemu_get_ram_block qemu_get_ram_block_sparc64 -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_sparc64 -#define qemu_get_ram_fd qemu_get_ram_fd_sparc64 -#define qemu_get_ram_ptr qemu_get_ram_ptr_sparc64 -#define qemu_host_page_mask qemu_host_page_mask_sparc64 -#define qemu_host_page_size qemu_host_page_size_sparc64 -#define qemu_init_vcpu qemu_init_vcpu_sparc64 -#define qemu_ld_helpers qemu_ld_helpers_sparc64 -#define qemu_log_close qemu_log_close_sparc64 -#define qemu_log_enabled qemu_log_enabled_sparc64 -#define qemu_log_flush qemu_log_flush_sparc64 -#define qemu_loglevel_mask qemu_loglevel_mask_sparc64 -#define qemu_log_vprintf qemu_log_vprintf_sparc64 -#define qemu_oom_check qemu_oom_check_sparc64 -#define qemu_parse_fd qemu_parse_fd_sparc64 -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc64 -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_sparc64 -#define qemu_ram_alloc qemu_ram_alloc_sparc64 -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc64 -#define qemu_ram_foreach_block qemu_ram_foreach_block_sparc64 -#define qemu_ram_free qemu_ram_free_sparc64 -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_sparc64 -#define qemu_ram_ptr_length qemu_ram_ptr_length_sparc64 -#define qemu_ram_remap qemu_ram_remap_sparc64 -#define qemu_ram_setup_dump qemu_ram_setup_dump_sparc64 -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_sparc64 -#define qemu_real_host_page_size qemu_real_host_page_size_sparc64 -#define qemu_st_helpers qemu_st_helpers_sparc64 -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_sparc64 -#define qemu_try_memalign qemu_try_memalign_sparc64 -#define qentry_destroy qentry_destroy_sparc64 -#define qerror_human qerror_human_sparc64 -#define qerror_report qerror_report_sparc64 -#define qerror_report_err qerror_report_err_sparc64 -#define qfloat_destroy_obj qfloat_destroy_obj_sparc64 -#define qfloat_from_double qfloat_from_double_sparc64 -#define qfloat_get_double qfloat_get_double_sparc64 -#define qfloat_type qfloat_type_sparc64 -#define qint_destroy_obj qint_destroy_obj_sparc64 -#define qint_from_int qint_from_int_sparc64 -#define qint_get_int qint_get_int_sparc64 -#define qint_type qint_type_sparc64 -#define qlist_append_obj qlist_append_obj_sparc64 -#define qlist_copy qlist_copy_sparc64 -#define qlist_copy_elem qlist_copy_elem_sparc64 -#define qlist_destroy_obj qlist_destroy_obj_sparc64 -#define qlist_empty qlist_empty_sparc64 -#define qlist_entry_obj qlist_entry_obj_sparc64 -#define qlist_first qlist_first_sparc64 -#define qlist_iter qlist_iter_sparc64 -#define qlist_new qlist_new_sparc64 -#define qlist_next qlist_next_sparc64 -#define qlist_peek qlist_peek_sparc64 -#define qlist_pop qlist_pop_sparc64 -#define qlist_size qlist_size_sparc64 -#define qlist_size_iter qlist_size_iter_sparc64 -#define qlist_type qlist_type_sparc64 -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_sparc64 -#define qmp_input_end_list qmp_input_end_list_sparc64 -#define qmp_input_end_struct qmp_input_end_struct_sparc64 -#define qmp_input_get_next_type qmp_input_get_next_type_sparc64 -#define qmp_input_get_object qmp_input_get_object_sparc64 -#define qmp_input_get_visitor qmp_input_get_visitor_sparc64 -#define qmp_input_next_list qmp_input_next_list_sparc64 -#define qmp_input_optional qmp_input_optional_sparc64 -#define qmp_input_pop qmp_input_pop_sparc64 -#define qmp_input_push qmp_input_push_sparc64 -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_sparc64 -#define qmp_input_start_list qmp_input_start_list_sparc64 -#define qmp_input_start_struct qmp_input_start_struct_sparc64 -#define qmp_input_type_bool qmp_input_type_bool_sparc64 -#define qmp_input_type_int qmp_input_type_int_sparc64 -#define qmp_input_type_number qmp_input_type_number_sparc64 -#define qmp_input_type_str qmp_input_type_str_sparc64 -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_sparc64 -#define qmp_input_visitor_new qmp_input_visitor_new_sparc64 -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_sparc64 -#define qmp_output_add_obj qmp_output_add_obj_sparc64 -#define qmp_output_end_list qmp_output_end_list_sparc64 -#define qmp_output_end_struct qmp_output_end_struct_sparc64 -#define qmp_output_first qmp_output_first_sparc64 -#define qmp_output_get_qobject qmp_output_get_qobject_sparc64 -#define qmp_output_get_visitor qmp_output_get_visitor_sparc64 -#define qmp_output_last qmp_output_last_sparc64 -#define qmp_output_next_list qmp_output_next_list_sparc64 -#define qmp_output_pop qmp_output_pop_sparc64 -#define qmp_output_push_obj qmp_output_push_obj_sparc64 -#define qmp_output_start_list qmp_output_start_list_sparc64 -#define qmp_output_start_struct qmp_output_start_struct_sparc64 -#define qmp_output_type_bool qmp_output_type_bool_sparc64 -#define qmp_output_type_int qmp_output_type_int_sparc64 -#define qmp_output_type_number qmp_output_type_number_sparc64 -#define qmp_output_type_str qmp_output_type_str_sparc64 -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_sparc64 -#define qmp_output_visitor_new qmp_output_visitor_new_sparc64 -#define qobject_decref qobject_decref_sparc64 -#define qobject_to_qbool qobject_to_qbool_sparc64 -#define qobject_to_qdict qobject_to_qdict_sparc64 -#define qobject_to_qfloat qobject_to_qfloat_sparc64 -#define qobject_to_qint qobject_to_qint_sparc64 -#define qobject_to_qlist qobject_to_qlist_sparc64 -#define qobject_to_qstring qobject_to_qstring_sparc64 -#define qobject_type qobject_type_sparc64 -#define qstring_append qstring_append_sparc64 -#define qstring_append_chr qstring_append_chr_sparc64 -#define qstring_append_int qstring_append_int_sparc64 -#define qstring_destroy_obj qstring_destroy_obj_sparc64 -#define qstring_from_escaped_str qstring_from_escaped_str_sparc64 -#define qstring_from_str qstring_from_str_sparc64 -#define qstring_from_substr qstring_from_substr_sparc64 -#define qstring_get_length qstring_get_length_sparc64 -#define qstring_get_str qstring_get_str_sparc64 -#define qstring_new qstring_new_sparc64 -#define qstring_type qstring_type_sparc64 -#define ram_block_add ram_block_add_sparc64 -#define ram_size ram_size_sparc64 -#define range_compare range_compare_sparc64 -#define range_covers_byte range_covers_byte_sparc64 -#define range_get_last range_get_last_sparc64 -#define range_merge range_merge_sparc64 -#define ranges_can_merge ranges_can_merge_sparc64 -#define raw_read raw_read_sparc64 -#define raw_write raw_write_sparc64 -#define rcon rcon_sparc64 -#define read_raw_cp_reg read_raw_cp_reg_sparc64 -#define recip_estimate recip_estimate_sparc64 -#define recip_sqrt_estimate recip_sqrt_estimate_sparc64 -#define register_cp_regs_for_features register_cp_regs_for_features_sparc64 -#define register_multipage register_multipage_sparc64 -#define register_subpage register_subpage_sparc64 -#define register_tm_clones register_tm_clones_sparc64 -#define register_types_object register_types_object_sparc64 -#define regnames regnames_sparc64 -#define render_memory_region render_memory_region_sparc64 -#define reset_all_temps reset_all_temps_sparc64 -#define reset_temp reset_temp_sparc64 -#define rol32 rol32_sparc64 -#define rol64 rol64_sparc64 -#define ror32 ror32_sparc64 -#define ror64 ror64_sparc64 -#define roundAndPackFloat128 roundAndPackFloat128_sparc64 -#define roundAndPackFloat16 roundAndPackFloat16_sparc64 -#define roundAndPackFloat32 roundAndPackFloat32_sparc64 -#define roundAndPackFloat64 roundAndPackFloat64_sparc64 -#define roundAndPackFloatx80 roundAndPackFloatx80_sparc64 -#define roundAndPackInt32 roundAndPackInt32_sparc64 -#define roundAndPackInt64 roundAndPackInt64_sparc64 -#define roundAndPackUint64 roundAndPackUint64_sparc64 -#define round_to_inf round_to_inf_sparc64 -#define run_on_cpu run_on_cpu_sparc64 -#define s0 s0_sparc64 -#define S0 S0_sparc64 -#define s1 s1_sparc64 -#define S1 S1_sparc64 -#define sa1100_initfn sa1100_initfn_sparc64 -#define sa1110_initfn sa1110_initfn_sparc64 -#define save_globals save_globals_sparc64 -#define scr_write scr_write_sparc64 -#define sctlr_write sctlr_write_sparc64 -#define set_bit set_bit_sparc64 -#define set_bits set_bits_sparc64 -#define set_default_nan_mode set_default_nan_mode_sparc64 -#define set_feature set_feature_sparc64 -#define set_float_detect_tininess set_float_detect_tininess_sparc64 -#define set_float_exception_flags set_float_exception_flags_sparc64 -#define set_float_rounding_mode set_float_rounding_mode_sparc64 -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_sparc64 -#define set_flush_to_zero set_flush_to_zero_sparc64 -#define set_swi_errno set_swi_errno_sparc64 -#define sextract32 sextract32_sparc64 -#define sextract64 sextract64_sparc64 -#define shift128ExtraRightJamming shift128ExtraRightJamming_sparc64 -#define shift128Right shift128Right_sparc64 -#define shift128RightJamming shift128RightJamming_sparc64 -#define shift32RightJamming shift32RightJamming_sparc64 -#define shift64ExtraRightJamming shift64ExtraRightJamming_sparc64 -#define shift64RightJamming shift64RightJamming_sparc64 -#define shifter_out_im shifter_out_im_sparc64 -#define shortShift128Left shortShift128Left_sparc64 -#define shortShift192Left shortShift192Left_sparc64 -#define simple_mpu_ap_bits simple_mpu_ap_bits_sparc64 -#define size_code_gen_buffer size_code_gen_buffer_sparc64 -#define softmmu_lock_user softmmu_lock_user_sparc64 -#define softmmu_lock_user_string softmmu_lock_user_string_sparc64 -#define softmmu_tget32 softmmu_tget32_sparc64 -#define softmmu_tget8 softmmu_tget8_sparc64 -#define softmmu_tput32 softmmu_tput32_sparc64 -#define softmmu_unlock_user softmmu_unlock_user_sparc64 -#define sort_constraints sort_constraints_sparc64 -#define sp_el0_access sp_el0_access_sparc64 -#define spsel_read spsel_read_sparc64 -#define spsel_write spsel_write_sparc64 -#define start_list start_list_sparc64 -#define stb_p stb_p_sparc64 -#define stb_phys stb_phys_sparc64 -#define stl_be_p stl_be_p_sparc64 -#define stl_be_phys stl_be_phys_sparc64 -#define stl_he_p stl_he_p_sparc64 -#define stl_le_p stl_le_p_sparc64 -#define stl_le_phys stl_le_phys_sparc64 -#define stl_phys stl_phys_sparc64 -#define stl_phys_internal stl_phys_internal_sparc64 -#define stl_phys_notdirty stl_phys_notdirty_sparc64 -#define store_cpu_offset store_cpu_offset_sparc64 -#define store_reg store_reg_sparc64 -#define store_reg_bx store_reg_bx_sparc64 -#define store_reg_from_load store_reg_from_load_sparc64 -#define stq_be_p stq_be_p_sparc64 -#define stq_be_phys stq_be_phys_sparc64 -#define stq_he_p stq_he_p_sparc64 -#define stq_le_p stq_le_p_sparc64 -#define stq_le_phys stq_le_phys_sparc64 -#define stq_phys stq_phys_sparc64 -#define string_input_get_visitor string_input_get_visitor_sparc64 -#define string_input_visitor_cleanup string_input_visitor_cleanup_sparc64 -#define string_input_visitor_new string_input_visitor_new_sparc64 -#define strongarm_cp_reginfo strongarm_cp_reginfo_sparc64 -#define strstart strstart_sparc64 -#define strtosz strtosz_sparc64 -#define strtosz_suffix strtosz_suffix_sparc64 -#define stw_be_p stw_be_p_sparc64 -#define stw_be_phys stw_be_phys_sparc64 -#define stw_he_p stw_he_p_sparc64 -#define stw_le_p stw_le_p_sparc64 -#define stw_le_phys stw_le_phys_sparc64 -#define stw_phys stw_phys_sparc64 -#define stw_phys_internal stw_phys_internal_sparc64 -#define sub128 sub128_sparc64 -#define sub16_sat sub16_sat_sparc64 -#define sub16_usat sub16_usat_sparc64 -#define sub192 sub192_sparc64 -#define sub8_sat sub8_sat_sparc64 -#define sub8_usat sub8_usat_sparc64 -#define subFloat128Sigs subFloat128Sigs_sparc64 -#define subFloat32Sigs subFloat32Sigs_sparc64 -#define subFloat64Sigs subFloat64Sigs_sparc64 -#define subFloatx80Sigs subFloatx80Sigs_sparc64 -#define subpage_accepts subpage_accepts_sparc64 -#define subpage_init subpage_init_sparc64 -#define subpage_ops subpage_ops_sparc64 -#define subpage_read subpage_read_sparc64 -#define subpage_register subpage_register_sparc64 -#define subpage_write subpage_write_sparc64 -#define suffix_mul suffix_mul_sparc64 -#define swap_commutative swap_commutative_sparc64 -#define swap_commutative2 swap_commutative2_sparc64 -#define switch_mode switch_mode_sparc64 -#define switch_v7m_sp switch_v7m_sp_sparc64 -#define syn_aa32_bkpt syn_aa32_bkpt_sparc64 -#define syn_aa32_hvc syn_aa32_hvc_sparc64 -#define syn_aa32_smc syn_aa32_smc_sparc64 -#define syn_aa32_svc syn_aa32_svc_sparc64 -#define syn_breakpoint syn_breakpoint_sparc64 -#define sync_globals sync_globals_sparc64 -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_sparc64 -#define syn_cp14_rt_trap syn_cp14_rt_trap_sparc64 -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_sparc64 -#define syn_cp15_rt_trap syn_cp15_rt_trap_sparc64 -#define syn_data_abort syn_data_abort_sparc64 -#define syn_fp_access_trap syn_fp_access_trap_sparc64 -#define syn_insn_abort syn_insn_abort_sparc64 -#define syn_swstep syn_swstep_sparc64 -#define syn_uncategorized syn_uncategorized_sparc64 -#define syn_watchpoint syn_watchpoint_sparc64 -#define syscall_err syscall_err_sparc64 -#define system_bus_class_init system_bus_class_init_sparc64 -#define system_bus_info system_bus_info_sparc64 -#define t2ee_cp_reginfo t2ee_cp_reginfo_sparc64 -#define table_logic_cc table_logic_cc_sparc64 -#define target_parse_constraint target_parse_constraint_sparc64 -#define target_words_bigendian target_words_bigendian_sparc64 -#define tb_add_jump tb_add_jump_sparc64 -#define tb_alloc tb_alloc_sparc64 -#define tb_alloc_page tb_alloc_page_sparc64 -#define tb_check_watchpoint tb_check_watchpoint_sparc64 -#define tb_find_fast tb_find_fast_sparc64 -#define tb_find_pc tb_find_pc_sparc64 -#define tb_find_slow tb_find_slow_sparc64 -#define tb_flush tb_flush_sparc64 -#define tb_flush_jmp_cache tb_flush_jmp_cache_sparc64 -#define tb_free tb_free_sparc64 -#define tb_gen_code tb_gen_code_sparc64 -#define tb_hash_remove tb_hash_remove_sparc64 -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc64 -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc64 -#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc64 -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_sparc64 -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_sparc64 -#define tb_jmp_remove tb_jmp_remove_sparc64 -#define tb_link_page tb_link_page_sparc64 -#define tb_page_remove tb_page_remove_sparc64 -#define tb_phys_hash_func tb_phys_hash_func_sparc64 -#define tb_phys_invalidate tb_phys_invalidate_sparc64 -#define tb_reset_jump tb_reset_jump_sparc64 -#define tb_set_jmp_target tb_set_jmp_target_sparc64 -#define tcg_accel_class_init tcg_accel_class_init_sparc64 -#define tcg_accel_type tcg_accel_type_sparc64 -#define tcg_add_param_i32 tcg_add_param_i32_sparc64 -#define tcg_add_param_i64 tcg_add_param_i64_sparc64 -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_sparc64 -#define tcg_allowed tcg_allowed_sparc64 -#define tcg_canonicalize_memop tcg_canonicalize_memop_sparc64 -#define tcg_commit tcg_commit_sparc64 -#define tcg_cond_to_jcc tcg_cond_to_jcc_sparc64 -#define tcg_constant_folding tcg_constant_folding_sparc64 +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_sparc64 +#define tcg_expand_vec_op tcg_expand_vec_op_sparc64 +#define tcg_register_jit tcg_register_jit_sparc64 +#define tcg_tb_insert tcg_tb_insert_sparc64 +#define tcg_tb_remove tcg_tb_remove_sparc64 +#define tcg_tb_lookup tcg_tb_lookup_sparc64 +#define tcg_tb_foreach tcg_tb_foreach_sparc64 +#define tcg_nb_tbs tcg_nb_tbs_sparc64 +#define tcg_region_reset_all tcg_region_reset_all_sparc64 +#define tcg_region_init tcg_region_init_sparc64 +#define tcg_code_size tcg_code_size_sparc64 +#define tcg_code_capacity tcg_code_capacity_sparc64 +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_sparc64 +#define tcg_malloc_internal tcg_malloc_internal_sparc64 +#define tcg_pool_reset tcg_pool_reset_sparc64 +#define tcg_context_init tcg_context_init_sparc64 +#define tcg_tb_alloc tcg_tb_alloc_sparc64 +#define tcg_prologue_init tcg_prologue_init_sparc64 +#define tcg_func_start tcg_func_start_sparc64 +#define tcg_set_frame tcg_set_frame_sparc64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc64 +#define tcg_temp_new_internal tcg_temp_new_internal_sparc64 +#define tcg_temp_new_vec tcg_temp_new_vec_sparc64 +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_sparc64 +#define tcg_temp_free_internal tcg_temp_free_internal_sparc64 #define tcg_const_i32 tcg_const_i32_sparc64 #define tcg_const_i64 tcg_const_i64_sparc64 #define tcg_const_local_i32 tcg_const_local_i32_sparc64 #define tcg_const_local_i64 tcg_const_local_i64_sparc64 -#define tcg_context_init tcg_context_init_sparc64 -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_sparc64 -#define tcg_cpu_exec tcg_cpu_exec_sparc64 -#define tcg_current_code_size tcg_current_code_size_sparc64 -#define tcg_dump_info tcg_dump_info_sparc64 -#define tcg_dump_ops tcg_dump_ops_sparc64 -#define tcg_exec_all tcg_exec_all_sparc64 -#define tcg_find_helper tcg_find_helper_sparc64 -#define tcg_func_start tcg_func_start_sparc64 -#define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc64 -#define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc64 -#define tcg_gen_add_i32 tcg_gen_add_i32_sparc64 -#define tcg_gen_add_i64 tcg_gen_add_i64_sparc64 -#define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc64 -#define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc64 -#define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc64 -#define tcg_gen_and_i32 tcg_gen_and_i32_sparc64 -#define tcg_gen_and_i64 tcg_gen_and_i64_sparc64 -#define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc64 -#define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc64 -#define tcg_gen_br tcg_gen_br_sparc64 -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc64 -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc64 -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc64 -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc64 -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc64 +#define tcg_op_supported tcg_op_supported_sparc64 #define tcg_gen_callN tcg_gen_callN_sparc64 +#define tcg_op_remove tcg_op_remove_sparc64 +#define tcg_emit_op tcg_emit_op_sparc64 +#define tcg_op_insert_before tcg_op_insert_before_sparc64 +#define tcg_op_insert_after tcg_op_insert_after_sparc64 +#define tcg_cpu_exec_time tcg_cpu_exec_time_sparc64 #define tcg_gen_code tcg_gen_code_sparc64 -#define tcg_gen_code_common tcg_gen_code_common_sparc64 -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_sparc64 -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc64 -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_sparc64 -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc64 -#define tcg_gen_exit_tb tcg_gen_exit_tb_sparc64 -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc64 -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc64 -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc64 -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc64 -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc64 -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc64 -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc64 -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc64 -#define tcg_gen_goto_tb tcg_gen_goto_tb_sparc64 -#define tcg_gen_ld_i32 tcg_gen_ld_i32_sparc64 -#define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc64 -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_sparc64 -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_sparc64 -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc64 -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc64 -#define tcg_gen_mov_i32 tcg_gen_mov_i32_sparc64 -#define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc64 -#define tcg_gen_movi_i32 tcg_gen_movi_i32_sparc64 -#define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc64 -#define tcg_gen_mul_i32 tcg_gen_mul_i32_sparc64 -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc64 -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc64 -#define tcg_gen_neg_i32 tcg_gen_neg_i32_sparc64 -#define tcg_gen_neg_i64 tcg_gen_neg_i64_sparc64 -#define tcg_gen_not_i32 tcg_gen_not_i32_sparc64 -#define tcg_gen_op0 tcg_gen_op0_sparc64 -#define tcg_gen_op1i tcg_gen_op1i_sparc64 -#define tcg_gen_op2_i32 tcg_gen_op2_i32_sparc64 -#define tcg_gen_op2_i64 tcg_gen_op2_i64_sparc64 -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_sparc64 -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_sparc64 -#define tcg_gen_op3_i32 tcg_gen_op3_i32_sparc64 -#define tcg_gen_op3_i64 tcg_gen_op3_i64_sparc64 -#define tcg_gen_op4_i32 tcg_gen_op4_i32_sparc64 -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_sparc64 -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_sparc64 -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_sparc64 -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_sparc64 -#define tcg_gen_op6_i32 tcg_gen_op6_i32_sparc64 -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_sparc64 -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_sparc64 -#define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc64 -#define tcg_gen_or_i32 tcg_gen_or_i32_sparc64 -#define tcg_gen_or_i64 tcg_gen_or_i64_sparc64 +#define tcg_gen_op1 tcg_gen_op1_sparc64 +#define tcg_gen_op2 tcg_gen_op2_sparc64 +#define tcg_gen_op3 tcg_gen_op3_sparc64 +#define tcg_gen_op4 tcg_gen_op4_sparc64 +#define tcg_gen_op5 tcg_gen_op5_sparc64 +#define tcg_gen_op6 tcg_gen_op6_sparc64 +#define tcg_gen_mb tcg_gen_mb_sparc64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc64 +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_sparc64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_sparc64 -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc64 -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc64 -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc64 -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc64 +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_sparc64 +#define tcg_gen_muli_i32 tcg_gen_muli_i32_sparc64 +#define tcg_gen_div_i32 tcg_gen_div_i32_sparc64 +#define tcg_gen_rem_i32 tcg_gen_rem_i32_sparc64 +#define tcg_gen_divu_i32 tcg_gen_divu_i32_sparc64 +#define tcg_gen_remu_i32 tcg_gen_remu_i32_sparc64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc64 +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_sparc64 +#define tcg_gen_nand_i32 tcg_gen_nand_i32_sparc64 +#define tcg_gen_nor_i32 tcg_gen_nor_i32_sparc64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc64 +#define tcg_gen_clz_i32 tcg_gen_clz_i32_sparc64 +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_sparc64 +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_sparc64 +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_sparc64 +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_sparc64 +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_sparc64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_sparc64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_sparc64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_sparc64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_sparc64 -#define tcg_gen_sar_i32 tcg_gen_sar_i32_sparc64 -#define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc64 -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc64 -#define tcg_gen_shl_i32 tcg_gen_shl_i32_sparc64 -#define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc64 -#define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc64 +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_sparc64 +#define tcg_gen_extract_i32 tcg_gen_extract_i32_sparc64 +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_sparc64 +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_sparc64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc64 +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_sparc64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc64 +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_sparc64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc64 +#define tcg_gen_smin_i32 tcg_gen_smin_i32_sparc64 +#define tcg_gen_umin_i32 tcg_gen_umin_i32_sparc64 +#define tcg_gen_smax_i32 tcg_gen_smax_i32_sparc64 +#define tcg_gen_umax_i32 tcg_gen_umax_i32_sparc64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc64 +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_sparc64 +#define tcg_gen_subi_i64 tcg_gen_subi_i64_sparc64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc64 +#define tcg_gen_ori_i64 tcg_gen_ori_i64_sparc64 +#define tcg_gen_xori_i64 tcg_gen_xori_i64_sparc64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_sparc64 -#define tcg_gen_shr_i32 tcg_gen_shr_i32_sparc64 -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_sparc64 -#define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc64 -#define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_sparc64 -#define tcg_gen_st_i32 tcg_gen_st_i32_sparc64 -#define tcg_gen_st_i64 tcg_gen_st_i64_sparc64 -#define tcg_gen_sub_i32 tcg_gen_sub_i32_sparc64 -#define tcg_gen_sub_i64 tcg_gen_sub_i64_sparc64 -#define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc64 -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_sparc64 -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_sparc64 -#define tcg_gen_xor_i32 tcg_gen_xor_i32_sparc64 -#define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc64 -#define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc64 -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_sparc64 -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_sparc64 -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_sparc64 -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_sparc64 -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_sparc64 -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc64 -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_sparc64 -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_sparc64 -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_sparc64 -#define tcg_handle_interrupt tcg_handle_interrupt_sparc64 -#define tcg_init tcg_init_sparc64 -#define tcg_invert_cond tcg_invert_cond_sparc64 -#define tcg_la_bb_end tcg_la_bb_end_sparc64 -#define tcg_la_br_end tcg_la_br_end_sparc64 -#define tcg_la_func_end tcg_la_func_end_sparc64 -#define tcg_liveness_analysis tcg_liveness_analysis_sparc64 -#define tcg_malloc tcg_malloc_sparc64 -#define tcg_malloc_internal tcg_malloc_internal_sparc64 -#define tcg_op_defs_org tcg_op_defs_org_sparc64 -#define tcg_opt_gen_mov tcg_opt_gen_mov_sparc64 -#define tcg_opt_gen_movi tcg_opt_gen_movi_sparc64 -#define tcg_optimize tcg_optimize_sparc64 -#define tcg_out16 tcg_out16_sparc64 -#define tcg_out32 tcg_out32_sparc64 -#define tcg_out64 tcg_out64_sparc64 -#define tcg_out8 tcg_out8_sparc64 -#define tcg_out_addi tcg_out_addi_sparc64 -#define tcg_out_branch tcg_out_branch_sparc64 -#define tcg_out_brcond32 tcg_out_brcond32_sparc64 -#define tcg_out_brcond64 tcg_out_brcond64_sparc64 -#define tcg_out_bswap32 tcg_out_bswap32_sparc64 -#define tcg_out_bswap64 tcg_out_bswap64_sparc64 -#define tcg_out_call tcg_out_call_sparc64 -#define tcg_out_cmp tcg_out_cmp_sparc64 -#define tcg_out_ext16s tcg_out_ext16s_sparc64 -#define tcg_out_ext16u tcg_out_ext16u_sparc64 -#define tcg_out_ext32s tcg_out_ext32s_sparc64 -#define tcg_out_ext32u tcg_out_ext32u_sparc64 -#define tcg_out_ext8s tcg_out_ext8s_sparc64 -#define tcg_out_ext8u tcg_out_ext8u_sparc64 -#define tcg_out_jmp tcg_out_jmp_sparc64 -#define tcg_out_jxx tcg_out_jxx_sparc64 -#define tcg_out_label tcg_out_label_sparc64 -#define tcg_out_ld tcg_out_ld_sparc64 -#define tcg_out_modrm tcg_out_modrm_sparc64 -#define tcg_out_modrm_offset tcg_out_modrm_offset_sparc64 -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_sparc64 -#define tcg_out_mov tcg_out_mov_sparc64 -#define tcg_out_movcond32 tcg_out_movcond32_sparc64 -#define tcg_out_movcond64 tcg_out_movcond64_sparc64 -#define tcg_out_movi tcg_out_movi_sparc64 -#define tcg_out_op tcg_out_op_sparc64 -#define tcg_out_pop tcg_out_pop_sparc64 -#define tcg_out_push tcg_out_push_sparc64 -#define tcg_out_qemu_ld tcg_out_qemu_ld_sparc64 -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_sparc64 -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_sparc64 -#define tcg_out_qemu_st tcg_out_qemu_st_sparc64 -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_sparc64 -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_sparc64 -#define tcg_out_reloc tcg_out_reloc_sparc64 -#define tcg_out_rolw_8 tcg_out_rolw_8_sparc64 -#define tcg_out_setcond32 tcg_out_setcond32_sparc64 -#define tcg_out_setcond64 tcg_out_setcond64_sparc64 -#define tcg_out_shifti tcg_out_shifti_sparc64 -#define tcg_out_st tcg_out_st_sparc64 -#define tcg_out_tb_finalize tcg_out_tb_finalize_sparc64 -#define tcg_out_tb_init tcg_out_tb_init_sparc64 -#define tcg_out_tlb_load tcg_out_tlb_load_sparc64 -#define tcg_out_vex_modrm tcg_out_vex_modrm_sparc64 -#define tcg_patch32 tcg_patch32_sparc64 -#define tcg_patch8 tcg_patch8_sparc64 -#define tcg_pcrel_diff tcg_pcrel_diff_sparc64 -#define tcg_pool_reset tcg_pool_reset_sparc64 -#define tcg_prologue_init tcg_prologue_init_sparc64 -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_sparc64 -#define tcg_reg_alloc tcg_reg_alloc_sparc64 -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_sparc64 -#define tcg_reg_alloc_call tcg_reg_alloc_call_sparc64 -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_sparc64 -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_sparc64 -#define tcg_reg_alloc_op tcg_reg_alloc_op_sparc64 -#define tcg_reg_alloc_start tcg_reg_alloc_start_sparc64 -#define tcg_reg_free tcg_reg_free_sparc64 -#define tcg_reg_sync tcg_reg_sync_sparc64 -#define tcg_set_frame tcg_set_frame_sparc64 -#define tcg_set_nop tcg_set_nop_sparc64 -#define tcg_swap_cond tcg_swap_cond_sparc64 -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_sparc64 -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_sparc64 -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_sparc64 -#define tcg_target_const_match tcg_target_const_match_sparc64 -#define tcg_target_init tcg_target_init_sparc64 -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_sparc64 -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_sparc64 -#define tcg_temp_alloc tcg_temp_alloc_sparc64 -#define tcg_temp_free_i32 tcg_temp_free_i32_sparc64 -#define tcg_temp_free_i64 tcg_temp_free_i64_sparc64 -#define tcg_temp_free_internal tcg_temp_free_internal_sparc64 -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_sparc64 -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_sparc64 -#define tcg_temp_new_i32 tcg_temp_new_i32_sparc64 -#define tcg_temp_new_i64 tcg_temp_new_i64_sparc64 -#define tcg_temp_new_internal tcg_temp_new_internal_sparc64 -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_sparc64 -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_sparc64 -#define tdb_hash tdb_hash_sparc64 -#define teecr_write teecr_write_sparc64 -#define teehbr_access teehbr_access_sparc64 -#define temp_allocate_frame temp_allocate_frame_sparc64 -#define temp_dead temp_dead_sparc64 -#define temps_are_copies temps_are_copies_sparc64 -#define temp_save temp_save_sparc64 -#define temp_sync temp_sync_sparc64 -#define tgen_arithi tgen_arithi_sparc64 -#define tgen_arithr tgen_arithr_sparc64 -#define thumb2_logic_op thumb2_logic_op_sparc64 -#define ti925t_initfn ti925t_initfn_sparc64 -#define tlb_add_large_page tlb_add_large_page_sparc64 -#define tlb_flush_entry tlb_flush_entry_sparc64 -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc64 -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc64 -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_sparc64 -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_sparc64 -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_sparc64 -#define tlbi_aa64_va_write tlbi_aa64_va_write_sparc64 -#define tlbiall_is_write tlbiall_is_write_sparc64 -#define tlbiall_write tlbiall_write_sparc64 -#define tlbiasid_is_write tlbiasid_is_write_sparc64 -#define tlbiasid_write tlbiasid_write_sparc64 -#define tlbimvaa_is_write tlbimvaa_is_write_sparc64 -#define tlbimvaa_write tlbimvaa_write_sparc64 -#define tlbimva_is_write tlbimva_is_write_sparc64 -#define tlbimva_write tlbimva_write_sparc64 -#define tlb_is_dirty_ram tlb_is_dirty_ram_sparc64 +#define tcg_gen_sari_i64 tcg_gen_sari_i64_sparc64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc64 +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_sparc64 +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_sparc64 +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_sparc64 +#define tcg_gen_muli_i64 tcg_gen_muli_i64_sparc64 +#define tcg_gen_div_i64 tcg_gen_div_i64_sparc64 +#define tcg_gen_rem_i64 tcg_gen_rem_i64_sparc64 +#define tcg_gen_divu_i64 tcg_gen_divu_i64_sparc64 +#define tcg_gen_remu_i64 tcg_gen_remu_i64_sparc64 +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_sparc64 +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_sparc64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc64 +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_sparc64 +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_sparc64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc64 +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_sparc64 +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_sparc64 +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_sparc64 +#define tcg_gen_not_i64 tcg_gen_not_i64_sparc64 +#define tcg_gen_andc_i64 tcg_gen_andc_i64_sparc64 +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_sparc64 +#define tcg_gen_nand_i64 tcg_gen_nand_i64_sparc64 +#define tcg_gen_nor_i64 tcg_gen_nor_i64_sparc64 +#define tcg_gen_orc_i64 tcg_gen_orc_i64_sparc64 +#define tcg_gen_clz_i64 tcg_gen_clz_i64_sparc64 +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_sparc64 +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_sparc64 +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_sparc64 +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_sparc64 +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_sparc64 +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_sparc64 +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_sparc64 +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_sparc64 +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_sparc64 +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_sparc64 +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_sparc64 +#define tcg_gen_extract_i64 tcg_gen_extract_i64_sparc64 +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_sparc64 +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_sparc64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc64 +#define tcg_gen_add2_i64 tcg_gen_add2_i64_sparc64 +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_sparc64 +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_sparc64 +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_sparc64 +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_sparc64 +#define tcg_gen_smin_i64 tcg_gen_smin_i64_sparc64 +#define tcg_gen_umin_i64 tcg_gen_umin_i64_sparc64 +#define tcg_gen_smax_i64 tcg_gen_smax_i64_sparc64 +#define tcg_gen_umax_i64 tcg_gen_umax_i64_sparc64 +#define tcg_gen_abs_i64 tcg_gen_abs_i64_sparc64 +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_sparc64 +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_sparc64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc64 +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_sparc64 +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_sparc64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_sparc64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_sparc64 +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_sparc64 +#define check_exit_request check_exit_request_sparc64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc64 +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_sparc64 +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_sparc64 +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_sparc64 +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_sparc64 +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_sparc64 +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_sparc64 +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_sparc64 +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_sparc64 +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_sparc64 +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_sparc64 +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_sparc64 +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_sparc64 +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_sparc64 +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_sparc64 +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_sparc64 +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_sparc64 +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_sparc64 +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_sparc64 +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_sparc64 +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_sparc64 +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_sparc64 +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_sparc64 +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_sparc64 +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_sparc64 +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_sparc64 +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_sparc64 +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_sparc64 +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_sparc64 +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_sparc64 +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_sparc64 +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_sparc64 +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_sparc64 +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_sparc64 +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_sparc64 +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_sparc64 +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_sparc64 +#define simd_desc simd_desc_sparc64 +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_sparc64 +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_sparc64 +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_sparc64 +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_sparc64 +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_sparc64 +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_sparc64 +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_sparc64 +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_sparc64 +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_sparc64 +#define tcg_gen_gvec_2 tcg_gen_gvec_2_sparc64 +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_sparc64 +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_sparc64 +#define tcg_gen_gvec_3 tcg_gen_gvec_3_sparc64 +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_sparc64 +#define tcg_gen_gvec_4 tcg_gen_gvec_4_sparc64 +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_sparc64 +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_sparc64 +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_sparc64 +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_sparc64 +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_sparc64 +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_sparc64 +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_sparc64 +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_sparc64 +#define tcg_gen_gvec_not tcg_gen_gvec_not_sparc64 +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_sparc64 +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_sparc64 +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_sparc64 +#define tcg_gen_gvec_add tcg_gen_gvec_add_sparc64 +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_sparc64 +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_sparc64 +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_sparc64 +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_sparc64 +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_sparc64 +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_sparc64 +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_sparc64 +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_sparc64 +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_sparc64 +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_sparc64 +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_sparc64 +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_sparc64 +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_sparc64 +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_sparc64 +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_sparc64 +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_sparc64 +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_sparc64 +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_sparc64 +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_sparc64 +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_sparc64 +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_sparc64 +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_sparc64 +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_sparc64 +#define tcg_gen_gvec_and tcg_gen_gvec_and_sparc64 +#define tcg_gen_gvec_or tcg_gen_gvec_or_sparc64 +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_sparc64 +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_sparc64 +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_sparc64 +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_sparc64 +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_sparc64 +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_sparc64 +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_sparc64 +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_sparc64 +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_sparc64 +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_sparc64 +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_sparc64 +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_sparc64 +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_sparc64 +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_sparc64 +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_sparc64 +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_sparc64 +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_sparc64 +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_sparc64 +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_sparc64 +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_sparc64 +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_sparc64 +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_sparc64 +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_sparc64 +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_sparc64 +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_sparc64 +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_sparc64 +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_sparc64 +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_sparc64 +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_sparc64 +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_sparc64 +#define vec_gen_2 vec_gen_2_sparc64 +#define vec_gen_3 vec_gen_3_sparc64 +#define vec_gen_4 vec_gen_4_sparc64 +#define tcg_gen_mov_vec tcg_gen_mov_vec_sparc64 +#define tcg_const_zeros_vec tcg_const_zeros_vec_sparc64 +#define tcg_const_ones_vec tcg_const_ones_vec_sparc64 +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_sparc64 +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_sparc64 +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_sparc64 +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_sparc64 +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_sparc64 +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_sparc64 +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_sparc64 +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_sparc64 +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_sparc64 +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_sparc64 +#define tcg_gen_ld_vec tcg_gen_ld_vec_sparc64 +#define tcg_gen_st_vec tcg_gen_st_vec_sparc64 +#define tcg_gen_stl_vec tcg_gen_stl_vec_sparc64 +#define tcg_gen_and_vec tcg_gen_and_vec_sparc64 +#define tcg_gen_or_vec tcg_gen_or_vec_sparc64 +#define tcg_gen_xor_vec tcg_gen_xor_vec_sparc64 +#define tcg_gen_andc_vec tcg_gen_andc_vec_sparc64 +#define tcg_gen_orc_vec tcg_gen_orc_vec_sparc64 +#define tcg_gen_nand_vec tcg_gen_nand_vec_sparc64 +#define tcg_gen_nor_vec tcg_gen_nor_vec_sparc64 +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_sparc64 +#define tcg_gen_not_vec tcg_gen_not_vec_sparc64 +#define tcg_gen_neg_vec tcg_gen_neg_vec_sparc64 +#define tcg_gen_abs_vec tcg_gen_abs_vec_sparc64 +#define tcg_gen_shli_vec tcg_gen_shli_vec_sparc64 +#define tcg_gen_shri_vec tcg_gen_shri_vec_sparc64 +#define tcg_gen_sari_vec tcg_gen_sari_vec_sparc64 +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_sparc64 +#define tcg_gen_add_vec tcg_gen_add_vec_sparc64 +#define tcg_gen_sub_vec tcg_gen_sub_vec_sparc64 +#define tcg_gen_mul_vec tcg_gen_mul_vec_sparc64 +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_sparc64 +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_sparc64 +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_sparc64 +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_sparc64 +#define tcg_gen_smin_vec tcg_gen_smin_vec_sparc64 +#define tcg_gen_umin_vec tcg_gen_umin_vec_sparc64 +#define tcg_gen_smax_vec tcg_gen_smax_vec_sparc64 +#define tcg_gen_umax_vec tcg_gen_umax_vec_sparc64 +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_sparc64 +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_sparc64 +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_sparc64 +#define tcg_gen_shls_vec tcg_gen_shls_vec_sparc64 +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_sparc64 +#define tcg_gen_sars_vec tcg_gen_sars_vec_sparc64 +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_sparc64 +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_sparc64 +#define tb_htable_lookup tb_htable_lookup_sparc64 +#define tb_set_jmp_target tb_set_jmp_target_sparc64 +#define cpu_exec cpu_exec_sparc64 +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_sparc64 +#define cpu_reloading_memory_map cpu_reloading_memory_map_sparc64 +#define cpu_loop_exit cpu_loop_exit_sparc64 +#define cpu_loop_exit_restore cpu_loop_exit_restore_sparc64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_sparc64 +#define tlb_init tlb_init_sparc64 +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_sparc64 +#define tlb_flush tlb_flush_sparc64 +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_sparc64 +#define tlb_flush_all_cpus tlb_flush_all_cpus_sparc64 +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_sparc64 +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_sparc64 +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_sparc64 +#define tlb_flush_page tlb_flush_page_sparc64 +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_sparc64 +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_sparc64 +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_sparc64 +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_sparc64 #define tlb_protect_code tlb_protect_code_sparc64 -#define tlb_reset_dirty_range tlb_reset_dirty_range_sparc64 -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_sparc64 +#define tlb_unprotect_code tlb_unprotect_code_sparc64 +#define tlb_reset_dirty tlb_reset_dirty_sparc64 #define tlb_set_dirty tlb_set_dirty_sparc64 -#define tlb_set_dirty1 tlb_set_dirty1_sparc64 -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_sparc64 +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_sparc64 +#define tlb_set_page tlb_set_page_sparc64 +#define get_page_addr_code_hostp get_page_addr_code_hostp_sparc64 +#define get_page_addr_code get_page_addr_code_sparc64 +#define probe_access probe_access_sparc64 #define tlb_vaddr_to_host tlb_vaddr_to_host_sparc64 -#define token_get_type token_get_type_sparc64 -#define token_get_value token_get_value_sparc64 -#define token_is_escape token_is_escape_sparc64 -#define token_is_keyword token_is_keyword_sparc64 -#define token_is_operator token_is_operator_sparc64 -#define tokens_append_from_iter tokens_append_from_iter_sparc64 -#define to_qiv to_qiv_sparc64 -#define to_qov to_qov_sparc64 -#define tosa_init tosa_init_sparc64 -#define tosa_machine_init tosa_machine_init_sparc64 -#define tswap32 tswap32_sparc64 -#define tswap64 tswap64_sparc64 -#define type_class_get_size type_class_get_size_sparc64 -#define type_get_by_name type_get_by_name_sparc64 -#define type_get_parent type_get_parent_sparc64 -#define type_has_parent type_has_parent_sparc64 -#define type_initialize type_initialize_sparc64 -#define type_initialize_interface type_initialize_interface_sparc64 -#define type_is_ancestor type_is_ancestor_sparc64 -#define type_new type_new_sparc64 -#define type_object_get_size type_object_get_size_sparc64 -#define type_register_internal type_register_internal_sparc64 -#define type_table_add type_table_add_sparc64 -#define type_table_get type_table_get_sparc64 -#define type_table_lookup type_table_lookup_sparc64 -#define uint16_to_float32 uint16_to_float32_sparc64 -#define uint16_to_float64 uint16_to_float64_sparc64 -#define uint32_to_float32 uint32_to_float32_sparc64 -#define uint32_to_float64 uint32_to_float64_sparc64 -#define uint64_to_float128 uint64_to_float128_sparc64 -#define uint64_to_float32 uint64_to_float32_sparc64 -#define uint64_to_float64 uint64_to_float64_sparc64 -#define unassigned_io_ops unassigned_io_ops_sparc64 -#define unassigned_io_read unassigned_io_read_sparc64 -#define unassigned_io_write unassigned_io_write_sparc64 -#define unassigned_mem_accepts unassigned_mem_accepts_sparc64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_sparc64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_sparc64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_sparc64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_sparc64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_sparc64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_sparc64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc64 +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_sparc64 +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_sparc64 +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_sparc64 +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_sparc64 +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_sparc64 +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_sparc64 +#define cpu_ldub_data_ra cpu_ldub_data_ra_sparc64 +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_sparc64 +#define cpu_lduw_data_ra cpu_lduw_data_ra_sparc64 +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_sparc64 +#define cpu_ldl_data_ra cpu_ldl_data_ra_sparc64 +#define cpu_ldq_data_ra cpu_ldq_data_ra_sparc64 +#define cpu_ldub_data cpu_ldub_data_sparc64 +#define cpu_ldsb_data cpu_ldsb_data_sparc64 +#define cpu_lduw_data cpu_lduw_data_sparc64 +#define cpu_ldsw_data cpu_ldsw_data_sparc64 +#define cpu_ldl_data cpu_ldl_data_sparc64 +#define cpu_ldq_data cpu_ldq_data_sparc64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_sparc64 +#define helper_le_stw_mmu helper_le_stw_mmu_sparc64 +#define helper_be_stw_mmu helper_be_stw_mmu_sparc64 +#define helper_le_stl_mmu helper_le_stl_mmu_sparc64 +#define helper_be_stl_mmu helper_be_stl_mmu_sparc64 +#define helper_le_stq_mmu helper_le_stq_mmu_sparc64 +#define helper_be_stq_mmu helper_be_stq_mmu_sparc64 +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_sparc64 +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_sparc64 +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_sparc64 +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_sparc64 +#define cpu_stb_data_ra cpu_stb_data_ra_sparc64 +#define cpu_stw_data_ra cpu_stw_data_ra_sparc64 +#define cpu_stl_data_ra cpu_stl_data_ra_sparc64 +#define cpu_stq_data_ra cpu_stq_data_ra_sparc64 +#define cpu_stb_data cpu_stb_data_sparc64 +#define cpu_stw_data cpu_stw_data_sparc64 +#define cpu_stl_data cpu_stl_data_sparc64 +#define cpu_stq_data cpu_stq_data_sparc64 +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_sparc64 +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_sparc64 +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_sparc64 +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_sparc64 +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_sparc64 +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_sparc64 +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_sparc64 +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_sparc64 +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_sparc64 +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_sparc64 +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_sparc64 +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_sparc64 +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_sparc64 +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_sparc64 +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_sparc64 +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_sparc64 +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_sparc64 +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_sparc64 +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_sparc64 +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_sparc64 +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_sparc64 +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_sparc64 +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_sparc64 +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_sparc64 +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_sparc64 +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_sparc64 +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_sparc64 +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_sparc64 +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_sparc64 +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_sparc64 +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_sparc64 +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_sparc64 +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_sparc64 +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_sparc64 +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_sparc64 +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_sparc64 +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_sparc64 +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_sparc64 +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_sparc64 +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_sparc64 +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_sparc64 +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_sparc64 +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_sparc64 +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_sparc64 +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_sparc64 +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_sparc64 +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_sparc64 +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_sparc64 +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_sparc64 +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_sparc64 +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_sparc64 +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_sparc64 +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_sparc64 +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_sparc64 +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_sparc64 +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_sparc64 +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_sparc64 +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_sparc64 +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_sparc64 +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_sparc64 +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_sparc64 +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_sparc64 +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_sparc64 +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_sparc64 +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_sparc64 +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_sparc64 +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_sparc64 +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_sparc64 +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_sparc64 +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_sparc64 +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_sparc64 +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_sparc64 +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_sparc64 +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_sparc64 +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_sparc64 +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_sparc64 +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_sparc64 +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_sparc64 +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_sparc64 +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_sparc64 +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_sparc64 +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_sparc64 +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_sparc64 +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_sparc64 +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_sparc64 +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_sparc64 +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_sparc64 +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_sparc64 +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_sparc64 +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_sparc64 +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_sparc64 +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_sparc64 +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_sparc64 +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_sparc64 +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_sparc64 +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_sparc64 +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_sparc64 +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_sparc64 +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_sparc64 +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_sparc64 +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_sparc64 +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_sparc64 +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_sparc64 +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_sparc64 +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_sparc64 +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_sparc64 +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_sparc64 +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_sparc64 +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_sparc64 +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_sparc64 +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_sparc64 +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_sparc64 +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_sparc64 +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_sparc64 +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_sparc64 +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_sparc64 +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_sparc64 +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_sparc64 +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_sparc64 +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_sparc64 +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_sparc64 +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_sparc64 +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_sparc64 +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_sparc64 +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_sparc64 +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_sparc64 +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_sparc64 +#define helper_atomic_xchgb helper_atomic_xchgb_sparc64 +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_sparc64 +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_sparc64 +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_sparc64 +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_sparc64 +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_sparc64 +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_sparc64 +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_sparc64 +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_sparc64 +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_sparc64 +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_sparc64 +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_sparc64 +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_sparc64 +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_sparc64 +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_sparc64 +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_sparc64 +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_sparc64 +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_sparc64 +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_sparc64 +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_sparc64 +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_sparc64 +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_sparc64 +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_sparc64 +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_sparc64 +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_sparc64 +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_sparc64 +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_sparc64 +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_sparc64 +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_sparc64 +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_sparc64 +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_sparc64 +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_sparc64 +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_sparc64 +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_sparc64 +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_sparc64 +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_sparc64 +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_sparc64 +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_sparc64 +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_sparc64 +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_sparc64 +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_sparc64 +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_sparc64 +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_sparc64 +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_sparc64 +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_sparc64 +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_sparc64 +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_sparc64 +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_sparc64 +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_sparc64 +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_sparc64 +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_sparc64 +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_sparc64 +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_sparc64 +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_sparc64 +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_sparc64 +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_sparc64 +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_sparc64 +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_sparc64 +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_sparc64 +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_sparc64 +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_sparc64 +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_sparc64 +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_sparc64 +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_sparc64 +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_sparc64 +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_sparc64 +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_sparc64 +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_sparc64 +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_sparc64 +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_sparc64 +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_sparc64 +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_sparc64 +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_sparc64 +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_sparc64 +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_sparc64 +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_sparc64 +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_sparc64 +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_sparc64 +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_sparc64 +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_sparc64 +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_sparc64 +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_sparc64 +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_sparc64 +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_sparc64 +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_sparc64 +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_sparc64 +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_sparc64 +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_sparc64 +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_sparc64 +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_sparc64 +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_sparc64 +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_sparc64 +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_sparc64 +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_sparc64 +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_sparc64 +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_sparc64 +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_sparc64 +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_sparc64 +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_sparc64 +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_sparc64 +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_sparc64 +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_sparc64 +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_sparc64 +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_sparc64 +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_sparc64 +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_sparc64 +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_sparc64 +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_sparc64 +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_sparc64 +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_sparc64 +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_sparc64 +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_sparc64 +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_sparc64 +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_sparc64 +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_sparc64 +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_sparc64 +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_sparc64 +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_sparc64 +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_sparc64 +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_sparc64 +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_sparc64 +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_sparc64 +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_sparc64 +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_sparc64 +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_sparc64 +#define cpu_ldub_code cpu_ldub_code_sparc64 +#define cpu_lduw_code cpu_lduw_code_sparc64 +#define cpu_ldl_code cpu_ldl_code_sparc64 +#define cpu_ldq_code cpu_ldq_code_sparc64 +#define helper_div_i32 helper_div_i32_sparc64 +#define helper_rem_i32 helper_rem_i32_sparc64 +#define helper_divu_i32 helper_divu_i32_sparc64 +#define helper_remu_i32 helper_remu_i32_sparc64 +#define helper_shl_i64 helper_shl_i64_sparc64 +#define helper_shr_i64 helper_shr_i64_sparc64 +#define helper_sar_i64 helper_sar_i64_sparc64 +#define helper_div_i64 helper_div_i64_sparc64 +#define helper_rem_i64 helper_rem_i64_sparc64 +#define helper_divu_i64 helper_divu_i64_sparc64 +#define helper_remu_i64 helper_remu_i64_sparc64 +#define helper_muluh_i64 helper_muluh_i64_sparc64 +#define helper_mulsh_i64 helper_mulsh_i64_sparc64 +#define helper_clz_i32 helper_clz_i32_sparc64 +#define helper_ctz_i32 helper_ctz_i32_sparc64 +#define helper_clz_i64 helper_clz_i64_sparc64 +#define helper_ctz_i64 helper_ctz_i64_sparc64 +#define helper_clrsb_i32 helper_clrsb_i32_sparc64 +#define helper_clrsb_i64 helper_clrsb_i64_sparc64 +#define helper_ctpop_i32 helper_ctpop_i32_sparc64 +#define helper_ctpop_i64 helper_ctpop_i64_sparc64 +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_sparc64 +#define helper_exit_atomic helper_exit_atomic_sparc64 +#define helper_gvec_add8 helper_gvec_add8_sparc64 +#define helper_gvec_add16 helper_gvec_add16_sparc64 +#define helper_gvec_add32 helper_gvec_add32_sparc64 +#define helper_gvec_add64 helper_gvec_add64_sparc64 +#define helper_gvec_adds8 helper_gvec_adds8_sparc64 +#define helper_gvec_adds16 helper_gvec_adds16_sparc64 +#define helper_gvec_adds32 helper_gvec_adds32_sparc64 +#define helper_gvec_adds64 helper_gvec_adds64_sparc64 +#define helper_gvec_sub8 helper_gvec_sub8_sparc64 +#define helper_gvec_sub16 helper_gvec_sub16_sparc64 +#define helper_gvec_sub32 helper_gvec_sub32_sparc64 +#define helper_gvec_sub64 helper_gvec_sub64_sparc64 +#define helper_gvec_subs8 helper_gvec_subs8_sparc64 +#define helper_gvec_subs16 helper_gvec_subs16_sparc64 +#define helper_gvec_subs32 helper_gvec_subs32_sparc64 +#define helper_gvec_subs64 helper_gvec_subs64_sparc64 +#define helper_gvec_mul8 helper_gvec_mul8_sparc64 +#define helper_gvec_mul16 helper_gvec_mul16_sparc64 +#define helper_gvec_mul32 helper_gvec_mul32_sparc64 +#define helper_gvec_mul64 helper_gvec_mul64_sparc64 +#define helper_gvec_muls8 helper_gvec_muls8_sparc64 +#define helper_gvec_muls16 helper_gvec_muls16_sparc64 +#define helper_gvec_muls32 helper_gvec_muls32_sparc64 +#define helper_gvec_muls64 helper_gvec_muls64_sparc64 +#define helper_gvec_neg8 helper_gvec_neg8_sparc64 +#define helper_gvec_neg16 helper_gvec_neg16_sparc64 +#define helper_gvec_neg32 helper_gvec_neg32_sparc64 +#define helper_gvec_neg64 helper_gvec_neg64_sparc64 +#define helper_gvec_abs8 helper_gvec_abs8_sparc64 +#define helper_gvec_abs16 helper_gvec_abs16_sparc64 +#define helper_gvec_abs32 helper_gvec_abs32_sparc64 +#define helper_gvec_abs64 helper_gvec_abs64_sparc64 +#define helper_gvec_mov helper_gvec_mov_sparc64 +#define helper_gvec_dup64 helper_gvec_dup64_sparc64 +#define helper_gvec_dup32 helper_gvec_dup32_sparc64 +#define helper_gvec_dup16 helper_gvec_dup16_sparc64 +#define helper_gvec_dup8 helper_gvec_dup8_sparc64 +#define helper_gvec_not helper_gvec_not_sparc64 +#define helper_gvec_and helper_gvec_and_sparc64 +#define helper_gvec_or helper_gvec_or_sparc64 +#define helper_gvec_xor helper_gvec_xor_sparc64 +#define helper_gvec_andc helper_gvec_andc_sparc64 +#define helper_gvec_orc helper_gvec_orc_sparc64 +#define helper_gvec_nand helper_gvec_nand_sparc64 +#define helper_gvec_nor helper_gvec_nor_sparc64 +#define helper_gvec_eqv helper_gvec_eqv_sparc64 +#define helper_gvec_ands helper_gvec_ands_sparc64 +#define helper_gvec_xors helper_gvec_xors_sparc64 +#define helper_gvec_ors helper_gvec_ors_sparc64 +#define helper_gvec_shl8i helper_gvec_shl8i_sparc64 +#define helper_gvec_shl16i helper_gvec_shl16i_sparc64 +#define helper_gvec_shl32i helper_gvec_shl32i_sparc64 +#define helper_gvec_shl64i helper_gvec_shl64i_sparc64 +#define helper_gvec_shr8i helper_gvec_shr8i_sparc64 +#define helper_gvec_shr16i helper_gvec_shr16i_sparc64 +#define helper_gvec_shr32i helper_gvec_shr32i_sparc64 +#define helper_gvec_shr64i helper_gvec_shr64i_sparc64 +#define helper_gvec_sar8i helper_gvec_sar8i_sparc64 +#define helper_gvec_sar16i helper_gvec_sar16i_sparc64 +#define helper_gvec_sar32i helper_gvec_sar32i_sparc64 +#define helper_gvec_sar64i helper_gvec_sar64i_sparc64 +#define helper_gvec_shl8v helper_gvec_shl8v_sparc64 +#define helper_gvec_shl16v helper_gvec_shl16v_sparc64 +#define helper_gvec_shl32v helper_gvec_shl32v_sparc64 +#define helper_gvec_shl64v helper_gvec_shl64v_sparc64 +#define helper_gvec_shr8v helper_gvec_shr8v_sparc64 +#define helper_gvec_shr16v helper_gvec_shr16v_sparc64 +#define helper_gvec_shr32v helper_gvec_shr32v_sparc64 +#define helper_gvec_shr64v helper_gvec_shr64v_sparc64 +#define helper_gvec_sar8v helper_gvec_sar8v_sparc64 +#define helper_gvec_sar16v helper_gvec_sar16v_sparc64 +#define helper_gvec_sar32v helper_gvec_sar32v_sparc64 +#define helper_gvec_sar64v helper_gvec_sar64v_sparc64 +#define helper_gvec_eq8 helper_gvec_eq8_sparc64 +#define helper_gvec_ne8 helper_gvec_ne8_sparc64 +#define helper_gvec_lt8 helper_gvec_lt8_sparc64 +#define helper_gvec_le8 helper_gvec_le8_sparc64 +#define helper_gvec_ltu8 helper_gvec_ltu8_sparc64 +#define helper_gvec_leu8 helper_gvec_leu8_sparc64 +#define helper_gvec_eq16 helper_gvec_eq16_sparc64 +#define helper_gvec_ne16 helper_gvec_ne16_sparc64 +#define helper_gvec_lt16 helper_gvec_lt16_sparc64 +#define helper_gvec_le16 helper_gvec_le16_sparc64 +#define helper_gvec_ltu16 helper_gvec_ltu16_sparc64 +#define helper_gvec_leu16 helper_gvec_leu16_sparc64 +#define helper_gvec_eq32 helper_gvec_eq32_sparc64 +#define helper_gvec_ne32 helper_gvec_ne32_sparc64 +#define helper_gvec_lt32 helper_gvec_lt32_sparc64 +#define helper_gvec_le32 helper_gvec_le32_sparc64 +#define helper_gvec_ltu32 helper_gvec_ltu32_sparc64 +#define helper_gvec_leu32 helper_gvec_leu32_sparc64 +#define helper_gvec_eq64 helper_gvec_eq64_sparc64 +#define helper_gvec_ne64 helper_gvec_ne64_sparc64 +#define helper_gvec_lt64 helper_gvec_lt64_sparc64 +#define helper_gvec_le64 helper_gvec_le64_sparc64 +#define helper_gvec_ltu64 helper_gvec_ltu64_sparc64 +#define helper_gvec_leu64 helper_gvec_leu64_sparc64 +#define helper_gvec_ssadd8 helper_gvec_ssadd8_sparc64 +#define helper_gvec_ssadd16 helper_gvec_ssadd16_sparc64 +#define helper_gvec_ssadd32 helper_gvec_ssadd32_sparc64 +#define helper_gvec_ssadd64 helper_gvec_ssadd64_sparc64 +#define helper_gvec_sssub8 helper_gvec_sssub8_sparc64 +#define helper_gvec_sssub16 helper_gvec_sssub16_sparc64 +#define helper_gvec_sssub32 helper_gvec_sssub32_sparc64 +#define helper_gvec_sssub64 helper_gvec_sssub64_sparc64 +#define helper_gvec_usadd8 helper_gvec_usadd8_sparc64 +#define helper_gvec_usadd16 helper_gvec_usadd16_sparc64 +#define helper_gvec_usadd32 helper_gvec_usadd32_sparc64 +#define helper_gvec_usadd64 helper_gvec_usadd64_sparc64 +#define helper_gvec_ussub8 helper_gvec_ussub8_sparc64 +#define helper_gvec_ussub16 helper_gvec_ussub16_sparc64 +#define helper_gvec_ussub32 helper_gvec_ussub32_sparc64 +#define helper_gvec_ussub64 helper_gvec_ussub64_sparc64 +#define helper_gvec_smin8 helper_gvec_smin8_sparc64 +#define helper_gvec_smin16 helper_gvec_smin16_sparc64 +#define helper_gvec_smin32 helper_gvec_smin32_sparc64 +#define helper_gvec_smin64 helper_gvec_smin64_sparc64 +#define helper_gvec_smax8 helper_gvec_smax8_sparc64 +#define helper_gvec_smax16 helper_gvec_smax16_sparc64 +#define helper_gvec_smax32 helper_gvec_smax32_sparc64 +#define helper_gvec_smax64 helper_gvec_smax64_sparc64 +#define helper_gvec_umin8 helper_gvec_umin8_sparc64 +#define helper_gvec_umin16 helper_gvec_umin16_sparc64 +#define helper_gvec_umin32 helper_gvec_umin32_sparc64 +#define helper_gvec_umin64 helper_gvec_umin64_sparc64 +#define helper_gvec_umax8 helper_gvec_umax8_sparc64 +#define helper_gvec_umax16 helper_gvec_umax16_sparc64 +#define helper_gvec_umax32 helper_gvec_umax32_sparc64 +#define helper_gvec_umax64 helper_gvec_umax64_sparc64 +#define helper_gvec_bitsel helper_gvec_bitsel_sparc64 +#define cpu_restore_state cpu_restore_state_sparc64 +#define page_collection_lock page_collection_lock_sparc64 +#define page_collection_unlock page_collection_unlock_sparc64 +#define free_code_gen_buffer free_code_gen_buffer_sparc64 +#define tcg_exec_init tcg_exec_init_sparc64 +#define tb_cleanup tb_cleanup_sparc64 +#define tb_flush tb_flush_sparc64 +#define tb_phys_invalidate tb_phys_invalidate_sparc64 +#define tb_gen_code tb_gen_code_sparc64 +#define tb_exec_lock tb_exec_lock_sparc64 +#define tb_exec_unlock tb_exec_unlock_sparc64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc64 +#define tb_check_watchpoint tb_check_watchpoint_sparc64 +#define cpu_io_recompile cpu_io_recompile_sparc64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_sparc64 +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_sparc64 +#define translator_loop_temp_check translator_loop_temp_check_sparc64 +#define translator_loop translator_loop_sparc64 +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_sparc64 +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_sparc64 +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_sparc64 +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_sparc64 +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_sparc64 +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_sparc64 #define unassigned_mem_ops unassigned_mem_ops_sparc64 -#define unassigned_mem_read unassigned_mem_read_sparc64 -#define unassigned_mem_write unassigned_mem_write_sparc64 -#define update_spsel update_spsel_sparc64 -#define v6_cp_reginfo v6_cp_reginfo_sparc64 -#define v6k_cp_reginfo v6k_cp_reginfo_sparc64 -#define v7_cp_reginfo v7_cp_reginfo_sparc64 -#define v7mp_cp_reginfo v7mp_cp_reginfo_sparc64 -#define v7m_pop v7m_pop_sparc64 -#define v7m_push v7m_push_sparc64 -#define v8_cp_reginfo v8_cp_reginfo_sparc64 -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_sparc64 -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_sparc64 -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_sparc64 -#define vapa_cp_reginfo vapa_cp_reginfo_sparc64 -#define vbar_write vbar_write_sparc64 -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_sparc64 -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_sparc64 -#define vfp_get_fpcr vfp_get_fpcr_sparc64 -#define vfp_get_fpscr vfp_get_fpscr_sparc64 -#define vfp_get_fpsr vfp_get_fpsr_sparc64 -#define vfp_reg_offset vfp_reg_offset_sparc64 -#define vfp_set_fpcr vfp_set_fpcr_sparc64 -#define vfp_set_fpscr vfp_set_fpscr_sparc64 -#define vfp_set_fpsr vfp_set_fpsr_sparc64 -#define visit_end_implicit_struct visit_end_implicit_struct_sparc64 -#define visit_end_list visit_end_list_sparc64 -#define visit_end_struct visit_end_struct_sparc64 -#define visit_end_union visit_end_union_sparc64 -#define visit_get_next_type visit_get_next_type_sparc64 -#define visit_next_list visit_next_list_sparc64 -#define visit_optional visit_optional_sparc64 -#define visit_start_implicit_struct visit_start_implicit_struct_sparc64 -#define visit_start_list visit_start_list_sparc64 -#define visit_start_struct visit_start_struct_sparc64 -#define visit_start_union visit_start_union_sparc64 -#define vmsa_cp_reginfo vmsa_cp_reginfo_sparc64 -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_sparc64 -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_sparc64 -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_sparc64 -#define vmsa_ttbcr_write vmsa_ttbcr_write_sparc64 -#define vmsa_ttbr_write vmsa_ttbr_write_sparc64 -#define write_cpustate_to_list write_cpustate_to_list_sparc64 -#define write_list_to_cpustate write_list_to_cpustate_sparc64 -#define write_raw_cp_reg write_raw_cp_reg_sparc64 -#define X86CPURegister32_lookup X86CPURegister32_lookup_sparc64 -#define x86_op_defs x86_op_defs_sparc64 -#define xpsr_read xpsr_read_sparc64 -#define xpsr_write xpsr_write_sparc64 -#define xscale_cpar_write xscale_cpar_write_sparc64 -#define xscale_cp_reginfo xscale_cp_reginfo_sparc64 -#define cpu_sparc_exec cpu_sparc_exec_sparc64 +#define floatx80_infinity floatx80_infinity_sparc64 +#define dup_const_func dup_const_func_sparc64 +#define gen_helper_raise_exception gen_helper_raise_exception_sparc64 +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_sparc64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_sparc64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_sparc64 #define helper_compute_psr helper_compute_psr_sparc64 #define helper_compute_C_icc helper_compute_C_icc_sparc64 -#define cpu_sparc_init cpu_sparc_init_sparc64 #define cpu_sparc_set_id cpu_sparc_set_id_sparc64 -#define sparc_cpu_register_types sparc_cpu_register_types_sparc64 +#define cpu_sparc_init cpu_sparc_init_sparc64 +#define helper_check_ieee_exceptions helper_check_ieee_exceptions_sparc64 #define helper_fadds helper_fadds_sparc64 #define helper_faddd helper_faddd_sparc64 #define helper_faddq helper_faddq_sparc64 @@ -3036,11 +1291,17 @@ #define helper_fdivd helper_fdivd_sparc64 #define helper_fdivq helper_fdivq_sparc64 #define helper_fsmuld helper_fsmuld_sparc64 +#define helper_fsmulq helper_fsmulq_sparc64 #define helper_fdmulq helper_fdmulq_sparc64 #define helper_fnegs helper_fnegs_sparc64 +#define helper_fnegd helper_fnegd_sparc64 +#define helper_fnegq helper_fnegq_sparc64 #define helper_fitos helper_fitos_sparc64 #define helper_fitod helper_fitod_sparc64 #define helper_fitoq helper_fitoq_sparc64 +#define helper_fxtos helper_fxtos_sparc64 +#define helper_fxtod helper_fxtod_sparc64 +#define helper_fxtoq helper_fxtoq_sparc64 #define helper_fdtos helper_fdtos_sparc64 #define helper_fstod helper_fstod_sparc64 #define helper_fqtos helper_fqtos_sparc64 @@ -3050,7 +1311,12 @@ #define helper_fstoi helper_fstoi_sparc64 #define helper_fdtoi helper_fdtoi_sparc64 #define helper_fqtoi helper_fqtoi_sparc64 +#define helper_fstox helper_fstox_sparc64 +#define helper_fdtox helper_fdtox_sparc64 +#define helper_fqtox helper_fqtox_sparc64 #define helper_fabss helper_fabss_sparc64 +#define helper_fabsd helper_fabsd_sparc64 +#define helper_fabsq helper_fabsq_sparc64 #define helper_fsqrts helper_fsqrts_sparc64 #define helper_fsqrtd helper_fsqrtd_sparc64 #define helper_fsqrtq helper_fsqrtq_sparc64 @@ -3060,33 +1326,91 @@ #define helper_fcmped helper_fcmped_sparc64 #define helper_fcmpq helper_fcmpq_sparc64 #define helper_fcmpeq helper_fcmpeq_sparc64 +#define helper_fcmps_fcc1 helper_fcmps_fcc1_sparc64 +#define helper_fcmpd_fcc1 helper_fcmpd_fcc1_sparc64 +#define helper_fcmpq_fcc1 helper_fcmpq_fcc1_sparc64 +#define helper_fcmps_fcc2 helper_fcmps_fcc2_sparc64 +#define helper_fcmpd_fcc2 helper_fcmpd_fcc2_sparc64 +#define helper_fcmpq_fcc2 helper_fcmpq_fcc2_sparc64 +#define helper_fcmps_fcc3 helper_fcmps_fcc3_sparc64 +#define helper_fcmpd_fcc3 helper_fcmpd_fcc3_sparc64 +#define helper_fcmpq_fcc3 helper_fcmpq_fcc3_sparc64 +#define helper_fcmpes_fcc1 helper_fcmpes_fcc1_sparc64 +#define helper_fcmped_fcc1 helper_fcmped_fcc1_sparc64 +#define helper_fcmpeq_fcc1 helper_fcmpeq_fcc1_sparc64 +#define helper_fcmpes_fcc2 helper_fcmpes_fcc2_sparc64 +#define helper_fcmped_fcc2 helper_fcmped_fcc2_sparc64 +#define helper_fcmpeq_fcc2 helper_fcmpeq_fcc2_sparc64 +#define helper_fcmpes_fcc3 helper_fcmpes_fcc3_sparc64 +#define helper_fcmped_fcc3 helper_fcmped_fcc3_sparc64 +#define helper_fcmpeq_fcc3 helper_fcmpeq_fcc3_sparc64 #define helper_ldfsr helper_ldfsr_sparc64 +#define helper_ldxfsr helper_ldxfsr_sparc64 +#define cpu_raise_exception_ra cpu_raise_exception_ra_sparc64 +#define helper_raise_exception helper_raise_exception_sparc64 #define helper_debug helper_debug_sparc64 +#define helper_tick_set_count helper_tick_set_count_sparc64 +#define helper_tick_get_count helper_tick_get_count_sparc64 +#define helper_tick_set_limit helper_tick_set_limit_sparc64 +#define helper_udiv helper_udiv_sparc64 #define helper_udiv_cc helper_udiv_cc_sparc64 +#define helper_sdiv helper_sdiv_sparc64 #define helper_sdiv_cc helper_sdiv_cc_sparc64 +#define helper_sdivx helper_sdivx_sparc64 +#define helper_udivx helper_udivx_sparc64 #define helper_taddcctv helper_taddcctv_sparc64 #define helper_tsubcctv helper_tsubcctv_sparc64 +#define helper_power_down helper_power_down_sparc64 #define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc64 +#define leon3_irq_manager leon3_irq_manager_sparc64 +#define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc64 +#define cpu_tsptr cpu_tsptr_sparc64 +#define helper_set_softint helper_set_softint_sparc64 +#define helper_clear_softint helper_clear_softint_sparc64 +#define helper_write_softint helper_write_softint_sparc64 #define helper_check_align helper_check_align_sparc64 #define helper_ld_asi helper_ld_asi_sparc64 #define helper_st_asi helper_st_asi_sparc64 -#define helper_cas_asi helper_cas_asi_sparc64 -#define helper_ldqf helper_ldqf_sparc64 -#define helper_stqf helper_stqf_sparc64 -#define sparc_cpu_unassigned_access sparc_cpu_unassigned_access_sparc64 +#define sparc_cpu_do_transaction_failed sparc_cpu_do_transaction_failed_sparc64 #define sparc_cpu_do_unaligned_access sparc_cpu_do_unaligned_access_sparc64 -#define sparc_cpu_handle_mmu_fault sparc_cpu_handle_mmu_fault_sparc64 -#define dump_mmu dump_mmu_sparc64 +#define sparc_cpu_tlb_fill sparc_cpu_tlb_fill_sparc64 +#define mmu_probe mmu_probe_sparc64 +#define sparc_cpu_memory_rw_debug sparc_cpu_memory_rw_debug_sparc64 +#define cpu_get_phys_page_nofault cpu_get_phys_page_nofault_sparc64 #define sparc_cpu_get_phys_page_debug sparc_cpu_get_phys_page_debug_sparc64 -#define sparc_reg_reset sparc_reg_reset_sparc64 -#define sparc_reg_read sparc_reg_read_sparc64 -#define sparc_reg_write sparc_reg_write_sparc64 -#define gen_intermediate_code_init gen_intermediate_code_init_sparc64 +#define gen_intermediate_code gen_intermediate_code_sparc64 +#define sparc_tcg_init sparc_tcg_init_sparc64 +#define restore_state_to_opc restore_state_to_opc_sparc64 #define cpu_set_cwp cpu_set_cwp_sparc64 #define cpu_get_psr cpu_get_psr_sparc64 +#define cpu_put_psr_raw cpu_put_psr_raw_sparc64 #define cpu_put_psr cpu_put_psr_sparc64 #define cpu_cwp_inc cpu_cwp_inc_sparc64 #define cpu_cwp_dec cpu_cwp_dec_sparc64 +#define helper_rett helper_rett_sparc64 #define helper_save helper_save_sparc64 #define helper_restore helper_restore_sparc64 +#define helper_flushw helper_flushw_sparc64 +#define helper_saved helper_saved_sparc64 +#define helper_restored helper_restored_sparc64 +#define helper_wrpsr helper_wrpsr_sparc64 +#define helper_rdpsr helper_rdpsr_sparc64 +#define cpu_get_ccr cpu_get_ccr_sparc64 +#define cpu_put_ccr cpu_put_ccr_sparc64 +#define cpu_get_cwp64 cpu_get_cwp64_sparc64 +#define cpu_put_cwp64 cpu_put_cwp64_sparc64 +#define helper_rdccr helper_rdccr_sparc64 +#define helper_wrccr helper_wrccr_sparc64 +#define helper_rdcwp helper_rdcwp_sparc64 +#define helper_wrcwp helper_wrcwp_sparc64 +#define cpu_gl_switch_gregs cpu_gl_switch_gregs_sparc64 +#define helper_wrgl helper_wrgl_sparc64 +#define cpu_change_pstate cpu_change_pstate_sparc64 +#define helper_wrpstate helper_wrpstate_sparc64 +#define helper_wrpil helper_wrpil_sparc64 +#define helper_done helper_done_sparc64 +#define helper_retry helper_retry_sparc64 +#define sparc_reg_reset sparc_reg_reset_sparc64 +#define sparc_reg_read sparc_reg_read_sparc64 +#define sparc_reg_write sparc_reg_write_sparc64 #endif diff --git a/qemu/target-arm/Makefile.objs b/qemu/target-arm/Makefile.objs deleted file mode 100644 index 483d620c..00000000 --- a/qemu/target-arm/Makefile.objs +++ /dev/null @@ -1,6 +0,0 @@ -obj-y += translate.o op_helper.o helper.o cpu.o -obj-y += neon_helper.o iwmmxt_helper.o -obj-$(CONFIG_SOFTMMU) += psci.o -obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o unicorn_aarch64.o -obj-$(TARGET_ARM) += unicorn_arm.o -obj-y += crypto_helper.o diff --git a/qemu/target-arm/cpu-qom.h b/qemu/target-arm/cpu-qom.h deleted file mode 100644 index fad08eb3..00000000 --- a/qemu/target-arm/cpu-qom.h +++ /dev/null @@ -1,219 +0,0 @@ -/* - * QEMU ARM CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see - * - */ -#ifndef QEMU_ARM_CPU_QOM_H -#define QEMU_ARM_CPU_QOM_H - -#include "qom/cpu.h" - -#define TYPE_ARM_CPU "arm-cpu" - -#define ARM_CPU_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, ARMCPUClass, (klass), TYPE_ARM_CPU) -#define ARM_CPU(uc, obj) ((ARMCPU *)obj) -#define ARM_CPU_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, ARMCPUClass, (obj), TYPE_ARM_CPU) - -/** - * ARMCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * - * An ARM CPU model. - */ -typedef struct ARMCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - void (*parent_reset)(CPUState *cpu); -} ARMCPUClass; - -/** - * ARMCPU: - * @env: #CPUARMState - * - * An ARM CPU core. - */ -typedef struct ARMCPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUARMState env; - - /* Coprocessor information */ - GHashTable *cp_regs; - /* For marshalling (mostly coprocessor) register state between the - * kernel and QEMU (for KVM) and between two QEMUs (for migration), - * we use these arrays. - */ - /* List of register indexes managed via these arrays; (full KVM style - * 64 bit indexes, not CPRegInfo 32 bit indexes) - */ - uint64_t *cpreg_indexes; - /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */ - uint64_t *cpreg_values; - /* Length of the indexes, values, reset_values arrays */ - int32_t cpreg_array_len; - /* These are used only for migration: incoming data arrives in - * these fields and is sanity checked in post_load before copying - * to the working data structures above. - */ - uint64_t *cpreg_vmstate_indexes; - uint64_t *cpreg_vmstate_values; - int32_t cpreg_vmstate_array_len; - - /* Timers used by the generic (architected) timer */ - //QEMUTimer *gt_timer[NUM_GTIMERS]; - /* GPIO outputs for generic timer */ - //qemu_irq gt_timer_outputs[NUM_GTIMERS]; - - /* 'compatible' string for this CPU for Linux device trees */ - const char *dtb_compatible; - - /* PSCI version for this CPU - * Bits[31:16] = Major Version - * Bits[15:0] = Minor Version - */ - uint32_t psci_version; - - /* Should CPU start in PSCI powered-off state? */ - bool start_powered_off; - /* CPU currently in PSCI powered-off state */ - bool powered_off; - - /* PSCI conduit used to invoke PSCI methods - * 0 - disabled, 1 - smc, 2 - hvc - */ - uint32_t psci_conduit; - - /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or - * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type. - */ - uint32_t kvm_target; - - /* KVM init features for this CPU */ - uint32_t kvm_init_features[7]; - - /* The instance init functions for implementation-specific subclasses - * set these fields to specify the implementation-dependent values of - * various constant registers and reset values of non-constant - * registers. - * Some of these might become QOM properties eventually. - * Field names match the official register names as defined in the - * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix - * is used for reset values of non-constant registers; no reset_ - * prefix means a constant register. - */ - uint32_t midr; - uint32_t reset_fpsid; - uint32_t mvfr0; - uint32_t mvfr1; - uint32_t mvfr2; - uint32_t ctr; - uint32_t reset_sctlr; - uint32_t id_pfr0; - uint32_t id_pfr1; - uint32_t id_dfr0; - uint32_t id_afr0; - uint32_t id_mmfr0; - uint32_t id_mmfr1; - uint32_t id_mmfr2; - uint32_t id_mmfr3; - uint32_t id_isar0; - uint32_t id_isar1; - uint32_t id_isar2; - uint32_t id_isar3; - uint32_t id_isar4; - uint32_t id_isar5; - uint64_t id_aa64pfr0; - uint64_t id_aa64pfr1; - uint64_t id_aa64dfr0; - uint64_t id_aa64dfr1; - uint64_t id_aa64afr0; - uint64_t id_aa64afr1; - uint64_t id_aa64isar0; - uint64_t id_aa64isar1; - uint64_t id_aa64mmfr0; - uint64_t id_aa64mmfr1; - uint32_t dbgdidr; - uint32_t clidr; - /* The elements of this array are the CCSIDR values for each cache, - * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. - */ - uint32_t ccsidr[16]; - uint64_t reset_cbar; - uint32_t reset_auxcr; - bool reset_hivecs; - /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ - uint32_t dcz_blocksize; - uint64_t rvbar; -} ARMCPU; - -#define TYPE_AARCH64_CPU "aarch64-cpu" -#define AARCH64_CPU_CLASS(klass) \ - OBJECT_CLASS_CHECK(AArch64CPUClass, (klass), TYPE_AARCH64_CPU) -#define AARCH64_CPU_GET_CLASS(obj) \ - OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AArch64_CPU) - -typedef struct AArch64CPUClass { - /*< private >*/ - ARMCPUClass parent_class; - /*< public >*/ -} AArch64CPUClass; - -static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) -{ - return container_of(env, ARMCPU, env); -} - -#define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) - -#define ENV_OFFSET offsetof(ARMCPU, env) - -#ifndef CONFIG_USER_ONLY -extern const struct VMStateDescription vmstate_arm_cpu; -#endif - -void register_cp_regs_for_features(ARMCPU *cpu); -void init_cpreg_list(ARMCPU *cpu); - -void arm_cpu_do_interrupt(CPUState *cpu); -void arm_v7m_cpu_do_interrupt(CPUState *cpu); -bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); - -hwaddr arm_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); - -int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); -int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); - -/* Callback functions for the generic timer's timers. */ -void arm_gt_ptimer_cb(void *opaque); -void arm_gt_vtimer_cb(void *opaque); - -#ifdef TARGET_AARCH64 -int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); -int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); - -void aarch64_cpu_do_interrupt(CPUState *cs); -#endif - -#endif diff --git a/qemu/target-arm/cpu.c b/qemu/target-arm/cpu.c deleted file mode 100644 index 8bff25bf..00000000 --- a/qemu/target-arm/cpu.c +++ /dev/null @@ -1,1110 +0,0 @@ -/* - * QEMU ARM CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see - * - */ - -#include "cpu.h" -#include "internals.h" -#include "qemu-common.h" -#include "qapi/qmp/qerror.h" -#include "hw/arm/arm.h" -#include "sysemu/sysemu.h" - -#include "uc_priv.h" - -static void arm_cpu_set_pc(CPUState *cs, vaddr value) -{ - ARMCPU *cpu = ARM_CPU(NULL, cs); - - cpu->env.regs[15] = value; -} - -static bool arm_cpu_has_work(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(NULL, cs); - - return !cpu->powered_off - && cs->interrupt_request & - (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD - | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ - | CPU_INTERRUPT_EXITTB); -} - -static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) -{ - /* Reset a single ARMCPRegInfo register */ - ARMCPRegInfo *ri = value; - ARMCPU *cpu = opaque; - - if (ri->type & ARM_CP_SPECIAL) { - return; - } - - if (ri->resetfn) { - ri->resetfn(&cpu->env, ri); - return; - } - - /* A zero offset is never possible as it would be regs[0] - * so we use it to indicate that reset is being handled elsewhere. - * This is basically only used for fields in non-core coprocessors - * (like the pxa2xx ones). - */ - if (!ri->fieldoffset) { - return; - } - - if (cpreg_field_is_64bit(ri)) { - CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue; - } else { - CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue; - } -} - -/* CPUClass::reset() */ -static void arm_cpu_reset(CPUState *s) -{ - CPUARMState *env = s->env_ptr; - ARMCPU *cpu = ARM_CPU(env->uc, s); - ARMCPUClass *acc = ARM_CPU_GET_CLASS(env->uc, cpu); - - acc->parent_reset(s); - - memset(env, 0, offsetof(CPUARMState, features)); - g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu); - env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; - env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0; - env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1; - env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2; - - cpu->powered_off = cpu->start_powered_off; - s->halted = cpu->start_powered_off; - - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; - } - - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - /* 64 bit CPUs always start in 64 bit mode */ - env->aarch64 = 1; -#if defined(CONFIG_USER_ONLY) - env->pstate = PSTATE_MODE_EL0t; - /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */ - env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE; - /* and to the FP/Neon instructions */ - env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3); -#else - env->pstate = PSTATE_MODE_EL1h; - env->pc = cpu->rvbar; -#endif - } else { -#if defined(CONFIG_USER_ONLY) - /* Userspace expects access to cp10 and cp11 for FP/Neon */ - env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 4, 0xf); -#endif - } - -#if defined(CONFIG_USER_ONLY) - env->uncached_cpsr = ARM_CPU_MODE_USR; - /* For user mode we must enable access to coprocessors */ - env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30; - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - env->cp15.c15_cpar = 3; - } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { - env->cp15.c15_cpar = 1; - } -#else - /* SVC mode with interrupts disabled. */ - env->uncached_cpsr = ARM_CPU_MODE_SVC; - env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; - /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is - * clear at reset. Initial SP and PC are loaded from ROM. - */ - if (IS_M(env)) { - uint32_t initial_msp; /* Loaded from 0x0 */ - uint32_t initial_pc; /* Loaded from 0x4 */ - - env->daif &= ~PSTATE_I; -#if 0 - uint8_t *rom; - rom = rom_ptr(0); - if (rom) { - /* Address zero is covered by ROM which hasn't yet been - * copied into physical memory. - */ - initial_msp = ldl_p(rom); - initial_pc = ldl_p(rom + 4); - } else -#endif - { - /* Address zero not covered by a ROM blob, or the ROM blob - * is in non-modifiable memory and this is a second reset after - * it got copied into memory. In the latter case, rom_ptr - * will return a NULL pointer and we should use ldl_phys instead. - */ - initial_msp = ldl_phys(s->as, 0); - initial_pc = ldl_phys(s->as, 4); - } - - env->regs[13] = initial_msp & 0xFFFFFFFC; - env->regs[15] = initial_pc & ~1; - env->thumb = initial_pc & 1; - } - - // Unicorn: force Thumb mode by setting of uc_open() - env->thumb = env->uc->thumb; - - env->bswap_code = env->uc->bswap_code; - - if (env->cp15.c1_sys & SCTLR_V) { - env->regs[15] = 0xFFFF0000; - } - - env->vfp.xregs[ARM_VFP_FPEXC] = 0; -#endif - set_flush_to_zero(1, &env->vfp.standard_fp_status); - set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); - set_default_nan_mode(1, &env->vfp.standard_fp_status); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.fp_status); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.standard_fp_status); - tlb_flush(s, 1); - - hw_breakpoint_update_all(cpu); - hw_watchpoint_update_all(cpu); -} - -bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - CPUARMState *env = cs->env_ptr; - CPUClass *cc = CPU_GET_CLASS(env->uc, cs); - bool ret = false; - - if (interrupt_request & CPU_INTERRUPT_FIQ - && arm_excp_unmasked(cs, EXCP_FIQ)) { - cs->exception_index = EXCP_FIQ; - cc->do_interrupt(cs); - ret = true; - } - if (interrupt_request & CPU_INTERRUPT_HARD - && arm_excp_unmasked(cs, EXCP_IRQ)) { - cs->exception_index = EXCP_IRQ; - cc->do_interrupt(cs); - ret = true; - } - if (interrupt_request & CPU_INTERRUPT_VIRQ - && arm_excp_unmasked(cs, EXCP_VIRQ)) { - cs->exception_index = EXCP_VIRQ; - cc->do_interrupt(cs); - ret = true; - } - if (interrupt_request & CPU_INTERRUPT_VFIQ - && arm_excp_unmasked(cs, EXCP_VFIQ)) { - cs->exception_index = EXCP_VFIQ; - cc->do_interrupt(cs); - ret = true; - } - - return ret; -} - -#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) -static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - CPUARMState *env = cs->env_ptr; - CPUClass *cc = CPU_GET_CLASS(env->uc, cs); - bool ret = false; - - - if (interrupt_request & CPU_INTERRUPT_FIQ - && !(env->daif & PSTATE_F)) { - cs->exception_index = EXCP_FIQ; - cc->do_interrupt(cs); - ret = true; - } - /* ARMv7-M interrupt return works by loading a magic value - * into the PC. On real hardware the load causes the - * return to occur. The qemu implementation performs the - * jump normally, then does the exception return when the - * CPU tries to execute code at the magic address. - * This will cause the magic PC value to be pushed to - * the stack if an interrupt occurred at the wrong time. - * We avoid this by disabling interrupts when - * pc contains a magic address. - */ - if (interrupt_request & CPU_INTERRUPT_HARD - && !(env->daif & PSTATE_I) - && (env->regs[15] < 0xfffffff0)) { - cs->exception_index = EXCP_IRQ; - cc->do_interrupt(cs); - ret = true; - } - return ret; -} -#endif - -static inline void set_feature(CPUARMState *env, int feature) -{ - env->features |= 1ULL << feature; -} - -static void arm_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - CPUState *cs = CPU(obj); - ARMCPU *cpu = ARM_CPU(uc, obj); - - cs->env_ptr = &cpu->env; - cpu_exec_init(&cpu->env, opaque); - cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, - g_free, g_free); - -#if 0 -#ifndef CONFIG_USER_ONLY - /* Our inbound IRQ and FIQ lines */ - - cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, - arm_gt_ptimer_cb, cpu); - cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, - arm_gt_vtimer_cb, cpu); - //qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs, - // ARRAY_SIZE(cpu->gt_timer_outputs)); -#endif -#endif - - /* DTB consumers generally don't in fact care what the 'compatible' - * string is, so always provide some string and trust that a hypothetical - * picky DTB consumer will also provide a helpful error message. - */ - cpu->dtb_compatible = "qemu,unknown"; - cpu->psci_version = 1; /* By default assume PSCI v0.1 */ - cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; - - if (tcg_enabled(uc)) { - cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ - arm_translate_init(uc); - } -} - -static void arm_cpu_post_init(struct uc_struct *uc, Object *obj) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || - arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { - //qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property, - // &error_abort); - } - - if (!arm_feature(&cpu->env, ARM_FEATURE_M)) { - //qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property, - // &error_abort); - } - - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - //qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property, - // &error_abort); - } -} - -static void arm_cpu_finalizefn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - g_hash_table_destroy(cpu->cp_regs); -} - -static int arm_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - ARMCPU *cpu = ARM_CPU(uc, dev); - ARMCPUClass *acc = ARM_CPU_GET_CLASS(uc, dev); - CPUARMState *env = &cpu->env; - - /* Some features automatically imply others: */ - if (arm_feature(env, ARM_FEATURE_V8)) { - set_feature(env, ARM_FEATURE_V7); - set_feature(env, ARM_FEATURE_ARM_DIV); - set_feature(env, ARM_FEATURE_LPAE); - } - if (arm_feature(env, ARM_FEATURE_V7)) { - set_feature(env, ARM_FEATURE_VAPA); - set_feature(env, ARM_FEATURE_THUMB2); - set_feature(env, ARM_FEATURE_MPIDR); - if (!arm_feature(env, ARM_FEATURE_M)) { - set_feature(env, ARM_FEATURE_V6K); - } else { - set_feature(env, ARM_FEATURE_V6); - } - } - if (arm_feature(env, ARM_FEATURE_V6K)) { - set_feature(env, ARM_FEATURE_V6); - set_feature(env, ARM_FEATURE_MVFR); - } - if (arm_feature(env, ARM_FEATURE_V6)) { - set_feature(env, ARM_FEATURE_V5); - if (!arm_feature(env, ARM_FEATURE_M)) { - set_feature(env, ARM_FEATURE_AUXCR); - } - } - if (arm_feature(env, ARM_FEATURE_V5)) { - set_feature(env, ARM_FEATURE_V4T); - } - if (arm_feature(env, ARM_FEATURE_M)) { - set_feature(env, ARM_FEATURE_THUMB_DIV); - } - if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { - set_feature(env, ARM_FEATURE_THUMB_DIV); - } - if (arm_feature(env, ARM_FEATURE_VFP4)) { - set_feature(env, ARM_FEATURE_VFP3); - set_feature(env, ARM_FEATURE_VFP_FP16); - } - if (arm_feature(env, ARM_FEATURE_VFP3)) { - set_feature(env, ARM_FEATURE_VFP); - } - if (arm_feature(env, ARM_FEATURE_LPAE)) { - set_feature(env, ARM_FEATURE_V7MP); - set_feature(env, ARM_FEATURE_PXN); - } - if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { - set_feature(env, ARM_FEATURE_CBAR); - } - - if (cpu->reset_hivecs) { - cpu->reset_sctlr |= (1 << 13); - } - - register_cp_regs_for_features(cpu); - arm_cpu_register_gdb_regs_for_features(cpu); - - init_cpreg_list(cpu); - - qemu_init_vcpu(cs); - cpu_reset(cs); - - acc->parent_realize(uc, dev, errp); - - return 0; -} - -static ObjectClass *arm_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model) -{ - ObjectClass *oc; - char *typename; - - if (!cpu_model) { - return NULL; - } - - typename = g_strdup_printf("%s-" TYPE_ARM_CPU, cpu_model); - oc = object_class_by_name(uc, typename); - g_free(typename); - if (!oc || !object_class_dynamic_cast(uc, oc, TYPE_ARM_CPU) || - object_class_is_abstract(oc)) { - return NULL; - } - return oc; -} - -/* CPU models. These are not needed for the AArch64 linux-user build. */ -#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) - -static void arm926_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,arm926"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_VFP); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); - cpu->midr = 0x41069265; - cpu->reset_fpsid = 0x41011090; - cpu->ctr = 0x1dd20d2; - cpu->reset_sctlr = 0x00090078; -} - -static void arm946_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,arm946"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_MPU); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - cpu->midr = 0x41059461; - cpu->ctr = 0x0f004006; - cpu->reset_sctlr = 0x00000078; -} - -static void arm1026_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,arm1026"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_VFP); - set_feature(&cpu->env, ARM_FEATURE_AUXCR); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); - cpu->midr = 0x4106a262; - cpu->reset_fpsid = 0x410110a0; - cpu->ctr = 0x1dd20d2; - cpu->reset_sctlr = 0x00090078; - cpu->reset_auxcr = 1; - { - /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ - ARMCPRegInfo ifar = { 0 }; - ifar.name = "IFAR"; - ifar.cp = 15; - ifar.crn = 6; - ifar.crm = 0; - ifar.opc1 = 0; - ifar.opc2 = 1; - ifar.access = PL1_RW; - ifar.fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]); - ifar.resetvalue = 0; - define_one_arm_cp_reg(cpu, &ifar); - } -} - -static void arm1136_r2_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an - * older core than plain "arm1136". In particular this does not - * have the v6K features. - * These ID register values are correct for 1136 but may be wrong - * for 1136_r2 (in particular r0p2 does not actually implement most - * of the ID registers). - */ - - cpu->dtb_compatible = "arm,arm1136"; - set_feature(&cpu->env, ARM_FEATURE_V6); - set_feature(&cpu->env, ARM_FEATURE_VFP); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); - set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); - cpu->midr = 0x4107b362; - cpu->reset_fpsid = 0x410120b4; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; - cpu->ctr = 0x1dd20d2; - cpu->reset_sctlr = 0x00050078; - cpu->id_pfr0 = 0x111; - cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0x2; - cpu->id_afr0 = 0x3; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222110; - cpu->id_isar0 = 0x00140011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11231111; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x141; - cpu->reset_auxcr = 7; -} - -static void arm1136_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,arm1136"; - set_feature(&cpu->env, ARM_FEATURE_V6K); - set_feature(&cpu->env, ARM_FEATURE_V6); - set_feature(&cpu->env, ARM_FEATURE_VFP); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); - set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); - cpu->midr = 0x4117b363; - cpu->reset_fpsid = 0x410120b4; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; - cpu->ctr = 0x1dd20d2; - cpu->reset_sctlr = 0x00050078; - cpu->id_pfr0 = 0x111; - cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0x2; - cpu->id_afr0 = 0x3; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222110; - cpu->id_isar0 = 0x00140011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11231111; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x141; - cpu->reset_auxcr = 7; -} - -static void arm1176_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,arm1176"; - set_feature(&cpu->env, ARM_FEATURE_V6K); - set_feature(&cpu->env, ARM_FEATURE_VFP); - set_feature(&cpu->env, ARM_FEATURE_VAPA); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); - set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); - cpu->midr = 0x410fb767; - cpu->reset_fpsid = 0x410120b5; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; - cpu->ctr = 0x1dd20d2; - cpu->reset_sctlr = 0x00050078; - cpu->id_pfr0 = 0x111; - cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x33; - cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222100; - cpu->id_isar0 = 0x0140011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11231121; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x01141; - cpu->reset_auxcr = 7; -} - -static void arm11mpcore_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,arm11mpcore"; - set_feature(&cpu->env, ARM_FEATURE_V6K); - set_feature(&cpu->env, ARM_FEATURE_VFP); - set_feature(&cpu->env, ARM_FEATURE_VAPA); - set_feature(&cpu->env, ARM_FEATURE_MPIDR); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - cpu->midr = 0x410fb022; - cpu->reset_fpsid = 0x410120b4; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; - cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ - cpu->id_pfr0 = 0x111; - cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0; - cpu->id_afr0 = 0x2; - cpu->id_mmfr0 = 0x01100103; - cpu->id_mmfr1 = 0x10020302; - cpu->id_mmfr2 = 0x01222000; - cpu->id_isar0 = 0x00100011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11221011; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x141; - cpu->reset_auxcr = 1; -} - -static void cortex_m3_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_M); - cpu->midr = 0x410fc231; -} - -static void arm_v7m_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(uc, oc); - -#ifndef CONFIG_USER_ONLY - cc->do_interrupt = arm_v7m_cpu_do_interrupt; -#endif - - cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt; -} - -static const ARMCPRegInfo cortexa8_cp_reginfo[] = { - { "L2LOCKDOWN", 15,9,0, 0,1,0, 0, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - { "L2AUXCR", 15,9,0, 0,1,2, 0, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - REGINFO_SENTINEL -}; - -static void cortex_a8_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,cortex-a8"; - set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_VFP3); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - cpu->midr = 0x410fc080; - cpu->reset_fpsid = 0x410330c0; - cpu->mvfr0 = 0x11110222; - cpu->mvfr1 = 0x00011100; - cpu->ctr = 0x82048004; - cpu->reset_sctlr = 0x00c50078; - cpu->id_pfr0 = 0x1031; - cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x400; - cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x31100003; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01202000; - cpu->id_mmfr3 = 0x11; - cpu->id_isar0 = 0x00101111; - cpu->id_isar1 = 0x12112111; - cpu->id_isar2 = 0x21232031; - cpu->id_isar3 = 0x11112131; - cpu->id_isar4 = 0x00111142; - cpu->dbgdidr = 0x15141000; - cpu->clidr = (1 << 27) | (2 << 24) | 3; - cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ - cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ - cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ - cpu->reset_auxcr = 2; - define_arm_cp_regs(cpu, cortexa8_cp_reginfo); -} - -static const ARMCPRegInfo cortexa9_cp_reginfo[] = { - /* power_control should be set to maximum latency. Again, - * default to 0 and set by private hook - */ - { "A9_PWRCTL", 15,15,0, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_power_control) }, - { "A9_DIAG", 15,15,0, 0,0,1, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_diagnostic) }, - { "A9_PWRDIAG",15,15,0, 0,0,2, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_power_diagnostic) }, - { "NEONBUSY", 15,15,1, 0,0,0, 0, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - /* TLB lockdown control */ - { "TLB_LOCKR", 15,15,4, 0,5,2, 0, - ARM_CP_NOP, PL1_W, NULL, 0 }, - { "TLB_LOCKW", 15,15,4, 0,5,4, 0, - ARM_CP_NOP, PL1_W, NULL, 0, }, - { "TLB_VA", 15,15,5, 0,5,2, 0, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - { "TLB_PA", 15,15,6, 0,5,2, 0, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - { "TLB_ATTR", 15,15,7, 0,5,2, 0, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - REGINFO_SENTINEL -}; - -static void cortex_a9_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,cortex-a9"; - set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_VFP3); - set_feature(&cpu->env, ARM_FEATURE_VFP_FP16); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); - /* Note that A9 supports the MP extensions even for - * A9UP and single-core A9MP (which are both different - * and valid configurations; we don't model A9UP). - */ - set_feature(&cpu->env, ARM_FEATURE_V7MP); - set_feature(&cpu->env, ARM_FEATURE_CBAR); - cpu->midr = 0x410fc090; - cpu->reset_fpsid = 0x41033090; - cpu->mvfr0 = 0x11110222; - cpu->mvfr1 = 0x01111111; - cpu->ctr = 0x80038003; - cpu->reset_sctlr = 0x00c50078; - cpu->id_pfr0 = 0x1031; - cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x000; - cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x00100103; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01230000; - cpu->id_mmfr3 = 0x00002111; - cpu->id_isar0 = 0x00101111; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232041; - cpu->id_isar3 = 0x11112131; - cpu->id_isar4 = 0x00111142; - cpu->dbgdidr = 0x35141000; - cpu->clidr = (1 << 27) | (1 << 24) | 3; - cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ - cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ - define_arm_cp_regs(cpu, cortexa9_cp_reginfo); -} - -#ifndef CONFIG_USER_ONLY -static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* Linux wants the number of processors from here. - * Might as well set the interrupt-controller bit too. - */ - return ((smp_cpus - 1) << 24) | (1 << 23); -} -#endif - -static const ARMCPRegInfo cortexa15_cp_reginfo[] = { -#ifndef CONFIG_USER_ONLY - { "L2CTLR", 15,9,0, 0,1,2, 0, - 0, PL1_RW, NULL, 0, 0, - NULL, a15_l2ctlr_read, arm_cp_write_ignore, }, -#endif - { "L2ECTLR", 15,9,0, 0,1,3, 0, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - REGINFO_SENTINEL -}; - -static void cortex_a15_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "arm,cortex-a15"; - set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_VFP4); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); - set_feature(&cpu->env, ARM_FEATURE_ARM_DIV); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_LPAE); - cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; - cpu->midr = 0x412fc0f1; - cpu->reset_fpsid = 0x410430f0; - cpu->mvfr0 = 0x10110222; - cpu->mvfr1 = 0x11111111; - cpu->ctr = 0x8444c004; - cpu->reset_sctlr = 0x00c50078; - cpu->id_pfr0 = 0x00001131; - cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x02010555; - cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10201105; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01240000; - cpu->id_mmfr3 = 0x02102211; - cpu->id_isar0 = 0x02101110; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232041; - cpu->id_isar3 = 0x11112131; - cpu->id_isar4 = 0x10011142; - cpu->dbgdidr = 0x3515f021; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ - cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ - cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ - define_arm_cp_regs(cpu, cortexa15_cp_reginfo); -} - -static void ti925t_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - set_feature(&cpu->env, ARM_FEATURE_V4T); - set_feature(&cpu->env, ARM_FEATURE_OMAPCP); - cpu->midr = ARM_CPUID_TI925T; - cpu->ctr = 0x5109149; - cpu->reset_sctlr = 0x00000070; -} - -static void sa1100_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "intel,sa1100"; - set_feature(&cpu->env, ARM_FEATURE_STRONGARM); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - cpu->midr = 0x4401A11B; - cpu->reset_sctlr = 0x00000070; -} - -static void sa1110_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - set_feature(&cpu->env, ARM_FEATURE_STRONGARM); - set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); - cpu->midr = 0x6901B119; - cpu->reset_sctlr = 0x00000070; -} - -static void pxa250_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052100; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa255_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052d00; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa260_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052903; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa261_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052d05; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa262_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052d06; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270a0_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054110; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270a1_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054111; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270b0_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054112; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270b1_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054113; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270c0_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054114; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270c5_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054117; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -#ifdef CONFIG_USER_ONLY -static void arm_any_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_VFP4); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); - set_feature(&cpu->env, ARM_FEATURE_V8_AES); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); - set_feature(&cpu->env, ARM_FEATURE_CRC); - cpu->midr = 0xffffffff; -} -#endif - -#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */ - -typedef struct ARMCPUInfo { - const char *name; - void (*initfn)(struct uc_struct *uc, Object *obj, void *opaque); - void (*class_init)(struct uc_struct *uc, ObjectClass *oc, void *data); -} ARMCPUInfo; - -static const ARMCPUInfo arm_cpus[] = { -#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) - { "arm926", arm926_initfn }, - { "arm946", arm946_initfn }, - { "arm1026", arm1026_initfn }, - /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an - * older core than plain "arm1136". In particular this does not - * have the v6K features. - */ - { "arm1136-r2", arm1136_r2_initfn }, - { "arm1136", arm1136_initfn }, - { "arm1176", arm1176_initfn }, - { "arm11mpcore", arm11mpcore_initfn }, - { "cortex-m3", cortex_m3_initfn, arm_v7m_class_init }, - { "cortex-a8", cortex_a8_initfn }, - { "cortex-a9", cortex_a9_initfn }, - { "cortex-a15", cortex_a15_initfn }, - { "ti925t", ti925t_initfn }, - { "sa1100", sa1100_initfn }, - { "sa1110", sa1110_initfn }, - { "pxa250", pxa250_initfn }, - { "pxa255", pxa255_initfn }, - { "pxa260", pxa260_initfn }, - { "pxa261", pxa261_initfn }, - { "pxa262", pxa262_initfn }, - /* "pxa270" is an alias for "pxa270-a0" */ - { "pxa270", pxa270a0_initfn }, - { "pxa270-a0", pxa270a0_initfn }, - { "pxa270-a1", pxa270a1_initfn }, - { "pxa270-b0", pxa270b0_initfn }, - { "pxa270-b1", pxa270b1_initfn }, - { "pxa270-c0", pxa270c0_initfn }, - { "pxa270-c5", pxa270c5_initfn }, -#ifdef CONFIG_USER_ONLY - { "any", arm_any_initfn }, -#endif -#endif - { NULL } -}; - -static void arm_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - ARMCPUClass *acc = ARM_CPU_CLASS(uc, oc); - CPUClass *cc = CPU_CLASS(uc, acc); - DeviceClass *dc = DEVICE_CLASS(uc, oc); - - acc->parent_realize = dc->realize; - dc->realize = arm_cpu_realizefn; - //dc->props = arm_cpu_properties; - - acc->parent_reset = cc->reset; - cc->reset = arm_cpu_reset; - - cc->class_by_name = arm_cpu_class_by_name; - cc->has_work = arm_cpu_has_work; - cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; - //cc->dump_state = arm_cpu_dump_state; - cc->set_pc = arm_cpu_set_pc; -#ifdef CONFIG_USER_ONLY - cc->handle_mmu_fault = arm_cpu_handle_mmu_fault; -#else - cc->do_interrupt = arm_cpu_do_interrupt; - cc->get_phys_page_debug = arm_cpu_get_phys_page_debug; -#endif - cc->debug_excp_handler = arm_debug_excp_handler; -} - -static void cpu_register(struct uc_struct *uc, const ARMCPUInfo *info) -{ - TypeInfo type_info = { 0 }; - type_info.parent = TYPE_ARM_CPU; - type_info.instance_size = sizeof(ARMCPU); - type_info.instance_init = info->initfn; - type_info.class_size = sizeof(ARMCPUClass); - type_info.class_init = info->class_init; - - type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name); - type_register(uc, &type_info); - g_free((void *)type_info.name); -} - -void arm_cpu_register_types(void *opaque) -{ - const ARMCPUInfo *info = arm_cpus; - - TypeInfo arm_cpu_type_info = { 0 }; - arm_cpu_type_info.name = TYPE_ARM_CPU, - arm_cpu_type_info.parent = TYPE_CPU, - arm_cpu_type_info.instance_userdata = opaque, - arm_cpu_type_info.instance_size = sizeof(ARMCPU), - arm_cpu_type_info.instance_init = arm_cpu_initfn, - arm_cpu_type_info.instance_post_init = arm_cpu_post_init, - arm_cpu_type_info.instance_finalize = arm_cpu_finalizefn, - arm_cpu_type_info.abstract = true, - arm_cpu_type_info.class_size = sizeof(ARMCPUClass), - arm_cpu_type_info.class_init = arm_cpu_class_init, - - type_register_static(opaque, &arm_cpu_type_info); - - while (info->name) { - cpu_register(opaque, info); - info++; - } -} diff --git a/qemu/target-arm/cpu.h b/qemu/target-arm/cpu.h deleted file mode 100644 index 5314cb4f..00000000 --- a/qemu/target-arm/cpu.h +++ /dev/null @@ -1,1548 +0,0 @@ -/* - * ARM virtual CPU header - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef CPU_ARM_H -#define CPU_ARM_H - -#include "config.h" - -#include "kvm-consts.h" - -#if defined(TARGET_AARCH64) - /* AArch64 definitions */ -# define TARGET_LONG_BITS 64 -# define ELF_MACHINE EM_AARCH64 -#else -# define TARGET_LONG_BITS 32 -# define ELF_MACHINE EM_ARM -#endif - -#define CPUArchState struct CPUARMState - -#include "qemu-common.h" -#include "exec/cpu-defs.h" - -#include "fpu/softfloat.h" - -#define TARGET_HAS_ICE 1 - -#define EXCP_UDEF 1 /* undefined instruction */ -#define EXCP_SWI 2 /* software interrupt */ -#define EXCP_PREFETCH_ABORT 3 -#define EXCP_DATA_ABORT 4 -#define EXCP_IRQ 5 -#define EXCP_FIQ 6 -#define EXCP_BKPT 7 -#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ -#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ -#define EXCP_STREX 10 -#define EXCP_HVC 11 /* HyperVisor Call */ -#define EXCP_HYP_TRAP 12 -#define EXCP_SMC 13 /* Secure Monitor Call */ -#define EXCP_VIRQ 14 -#define EXCP_VFIQ 15 - -#define ARMV7M_EXCP_RESET 1 -#define ARMV7M_EXCP_NMI 2 -#define ARMV7M_EXCP_HARD 3 -#define ARMV7M_EXCP_MEM 4 -#define ARMV7M_EXCP_BUS 5 -#define ARMV7M_EXCP_USAGE 6 -#define ARMV7M_EXCP_SVC 11 -#define ARMV7M_EXCP_DEBUG 12 -#define ARMV7M_EXCP_PENDSV 14 -#define ARMV7M_EXCP_SYSTICK 15 - -/* ARM-specific interrupt pending bits. */ -#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 -#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 -#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 - -/* The usual mapping for an AArch64 system register to its AArch32 - * counterpart is for the 32 bit world to have access to the lower - * half only (with writes leaving the upper half untouched). It's - * therefore useful to be able to pass TCG the offset of the least - * significant half of a uint64_t struct member. - */ -#ifdef HOST_WORDS_BIGENDIAN -#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) -#define offsetofhigh32(S, M) offsetof(S, M) -#else -#define offsetoflow32(S, M) offsetof(S, M) -#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) -#endif - -/* Meanings of the ARMCPU object's four inbound GPIO lines */ -#define ARM_CPU_IRQ 0 -#define ARM_CPU_FIQ 1 -#define ARM_CPU_VIRQ 2 -#define ARM_CPU_VFIQ 3 - -typedef void ARMWriteCPFunc(void *opaque, int cp_info, - int srcreg, int operand, uint32_t value); -typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info, - int dstreg, int operand); - -struct arm_boot_info; - -#define NB_MMU_MODES 4 - -/* We currently assume float and double are IEEE single and double - precision respectively. - Doing runtime conversions is tricky because VFP registers may contain - integer values (eg. as the result of a FTOSI instruction). - s<2n> maps to the least significant half of d - s<2n+1> maps to the most significant half of d - */ - -/* CPU state for each instance of a generic timer (in cp15 c14) */ -typedef struct ARMGenericTimer { - uint64_t cval; /* Timer CompareValue register */ - uint64_t ctl; /* Timer Control register */ -} ARMGenericTimer; - -#define GTIMER_PHYS 0 -#define GTIMER_VIRT 1 -#define NUM_GTIMERS 2 - -typedef struct CPUARMState { - /* Regs for current mode. */ - uint32_t regs[16]; - - /* 32/64 switch only happens when taking and returning from - * exceptions so the overlap semantics are taken care of then - * instead of having a complicated union. - */ - /* Regs for A64 mode. */ - uint64_t xregs[32]; - uint64_t pc; - /* PSTATE isn't an architectural register for ARMv8. However, it is - * convenient for us to assemble the underlying state into a 32 bit format - * identical to the architectural format used for the SPSR. (This is also - * what the Linux kernel's 'pstate' field in signal handlers and KVM's - * 'pstate' register are.) Of the PSTATE bits: - * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same - * semantics as for AArch32, as described in the comments on each field) - * nRW (also known as M[4]) is kept, inverted, in env->aarch64 - * DAIF (exception masks) are kept in env->daif - * all other bits are stored in their correct places in env->pstate - */ - uint32_t pstate; - uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ - - /* Frequently accessed CPSR bits are stored separately for efficiency. - This contains all the other bits. Use cpsr_{read,write} to access - the whole CPSR. */ - uint32_t uncached_cpsr; - uint32_t spsr; - - /* Banked registers. */ - uint64_t banked_spsr[8]; - uint32_t banked_r13[8]; - uint32_t banked_r14[8]; - - /* These hold r8-r12. */ - uint32_t usr_regs[5]; - uint32_t fiq_regs[5]; - - /* cpsr flag cache for faster execution */ - uint32_t CF; /* 0 or 1 */ - uint32_t VF; /* V is the bit 31. All other bits are undefined */ - uint32_t NF; /* N is bit 31. All other bits are undefined. */ - uint32_t ZF; /* Z set if zero. */ - uint32_t QF; /* 0 or 1 */ - uint32_t GE; /* cpsr[19:16] */ - uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ - uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ - uint64_t daif; /* exception masks, in the bits they are in in PSTATE */ - - uint64_t elr_el[4]; /* AArch64 exception link regs */ - uint64_t sp_el[4]; /* AArch64 banked stack pointers */ - - /* System control coprocessor (cp15) */ - struct { - uint32_t c0_cpuid; - uint64_t c0_cssel; /* Cache size selection. */ - uint64_t c1_sys; /* System control register. */ - uint64_t c1_coproc; /* Coprocessor access register. */ - uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ - uint64_t ttbr0_el1; /* MMU translation table base 0. */ - uint64_t ttbr1_el1; /* MMU translation table base 1. */ - uint64_t c2_control; /* MMU translation table base control. */ - uint32_t c2_mask; /* MMU translation table base selection mask. */ - uint32_t c2_base_mask; /* MMU translation table base 0 mask. */ - uint32_t c2_data; /* MPU data cachable bits. */ - uint32_t c2_insn; /* MPU instruction cachable bits. */ - uint32_t c3; /* MMU domain access control register - MPU write buffer control. */ - uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ - uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ - uint64_t hcr_el2; /* Hypervisor configuration register */ - uint64_t scr_el3; /* Secure configuration register. */ - uint32_t ifsr_el2; /* Fault status registers. */ - uint64_t esr_el[4]; - uint32_t c6_region[8]; /* MPU base/size registers. */ - uint64_t far_el[4]; /* Fault address registers. */ - uint64_t par_el1; /* Translation result. */ - uint32_t c9_insn; /* Cache lockdown registers. */ - uint32_t c9_data; - uint64_t c9_pmcr; /* performance monitor control register */ - uint64_t c9_pmcnten; /* perf monitor counter enables */ - uint32_t c9_pmovsr; /* perf monitor overflow status */ - uint32_t c9_pmxevtyper; /* perf monitor event type */ - uint32_t c9_pmuserenr; /* perf monitor user enable */ - uint32_t c9_pminten; /* perf monitor interrupt enables */ - uint64_t mair_el1; - uint64_t vbar_el[4]; /* vector base address register */ - uint32_t c13_fcse; /* FCSE PID. */ - uint64_t contextidr_el1; /* Context ID. */ - uint64_t tpidr_el0; /* User RW Thread register. */ - uint64_t tpidrro_el0; /* User RO Thread register. */ - uint64_t tpidr_el1; /* Privileged Thread register. */ - uint64_t c14_cntfrq; /* Counter Frequency register */ - uint64_t c14_cntkctl; /* Timer Control register */ - ARMGenericTimer c14_timer[NUM_GTIMERS]; - uint32_t c15_cpar; /* XScale Coprocessor Access Register */ - uint32_t c15_ticonfig; /* TI925T configuration byte. */ - uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ - uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ - uint32_t c15_threadid; /* TI debugger thread-ID. */ - uint32_t c15_config_base_address; /* SCU base address. */ - uint32_t c15_diagnostic; /* diagnostic register */ - uint32_t c15_power_diagnostic; - uint32_t c15_power_control; /* power control */ - uint64_t dbgbvr[16]; /* breakpoint value registers */ - uint64_t dbgbcr[16]; /* breakpoint control registers */ - uint64_t dbgwvr[16]; /* watchpoint value registers */ - uint64_t dbgwcr[16]; /* watchpoint control registers */ - uint64_t mdscr_el1; - /* If the counter is enabled, this stores the last time the counter - * was reset. Otherwise it stores the counter value - */ - uint64_t c15_ccnt; - uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */ - } cp15; - - struct { - uint32_t other_sp; - uint32_t vecbase; - uint32_t basepri; - uint32_t control; - int current_sp; - int exception; - int pending_exception; - } v7m; - - /* Information associated with an exception about to be taken: - * code which raises an exception must set cs->exception_index and - * the relevant parts of this structure; the cpu_do_interrupt function - * will then set the guest-visible registers as part of the exception - * entry process. - */ - struct { - uint32_t syndrome; /* AArch64 format syndrome register */ - uint32_t fsr; /* AArch32 format fault status register info */ - uint64_t vaddress; /* virtual addr associated with exception, if any */ - /* If we implement EL2 we will also need to store information - * about the intermediate physical address for stage 2 faults. - */ - } exception; - - /* Thumb-2 EE state. */ - uint32_t teecr; - uint32_t teehbr; - - /* VFP coprocessor state. */ - struct { - /* VFP/Neon register state. Note that the mapping between S, D and Q - * views of the register bank differs between AArch64 and AArch32: - * In AArch32: - * Qn = regs[2n+1]:regs[2n] - * Dn = regs[n] - * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n - * (and regs[32] to regs[63] are inaccessible) - * In AArch64: - * Qn = regs[2n+1]:regs[2n] - * Dn = regs[2n] - * Sn = regs[2n] bits 31..0 - * This corresponds to the architecturally defined mapping between - * the two execution states, and means we do not need to explicitly - * map these registers when changing states. - */ - float64 regs[64]; - - uint32_t xregs[16]; - /* We store these fpcsr fields separately for convenience. */ - int vec_len; - int vec_stride; - - /* scratch space when Tn are not sufficient. */ - uint32_t scratch[8]; - - /* fp_status is the "normal" fp status. standard_fp_status retains - * values corresponding to the ARM "Standard FPSCR Value", ie - * default-NaN, flush-to-zero, round-to-nearest and is used by - * any operations (generally Neon) which the architecture defines - * as controlled by the standard FPSCR value rather than the FPSCR. - * - * To avoid having to transfer exception bits around, we simply - * say that the FPSCR cumulative exception flags are the logical - * OR of the flags in the two fp statuses. This relies on the - * only thing which needs to read the exception flags being - * an explicit FPSCR read. - */ - float_status fp_status; - float_status standard_fp_status; - } vfp; - uint64_t exclusive_addr; - uint64_t exclusive_val; - uint64_t exclusive_high; -#if defined(CONFIG_USER_ONLY) - uint64_t exclusive_test; - uint32_t exclusive_info; -#endif - - /* iwMMXt coprocessor state. */ - struct { - uint64_t regs[16]; - uint64_t val; - - uint32_t cregs[16]; - } iwmmxt; - - /* For mixed endian mode. */ - bool bswap_code; - -#if defined(CONFIG_USER_ONLY) - /* For usermode syscall translation. */ - int eabi; -#endif - - struct CPUBreakpoint *cpu_breakpoint[16]; - struct CPUWatchpoint *cpu_watchpoint[16]; - - CPU_COMMON - - /* These fields after the common ones so they are preserved on reset. */ - - /* Internal CPU feature flags. */ - uint64_t features; - - void *nvic; - const struct arm_boot_info *boot_info; - - // Unicorn engine - struct uc_struct *uc; -} CPUARMState; - -#include "cpu-qom.h" - -ARMCPU *cpu_arm_init(struct uc_struct *uc, const char *cpu_model); -int cpu_arm_exec(struct uc_struct *uc, CPUARMState *s); -uint32_t do_arm_semihosting(CPUARMState *env); - -static inline bool is_a64(CPUARMState *env) -{ - return env->aarch64; -} - -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_arm_signal_handler(int host_signum, void *pinfo, - void *puc); -int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, - int mmu_idx); - -/** - * pmccntr_sync - * @env: CPUARMState - * - * Synchronises the counter in the PMCCNTR. This must always be called twice, - * once before any action that might affect the timer and again afterwards. - * The function is used to swap the state of the register if required. - * This only happens when not in user mode (!CONFIG_USER_ONLY) - */ -void pmccntr_sync(CPUARMState *env); - -/* SCTLR bit meanings. Several bits have been reused in newer - * versions of the architecture; in that case we define constants - * for both old and new bit meanings. Code which tests against those - * bits should probably check or otherwise arrange that the CPU - * is the architectural version it expects. - */ -#define SCTLR_M (1U << 0) -#define SCTLR_A (1U << 1) -#define SCTLR_C (1U << 2) -#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */ -#define SCTLR_SA (1U << 3) -#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */ -#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */ -#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ -#define SCTLR_CP15BEN (1U << 5) /* v7 onward */ -#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ -#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ -#define SCTLR_ITD (1U << 7) /* v8 onward */ -#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ -#define SCTLR_SED (1U << 8) /* v8 onward */ -#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */ -#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */ -#define SCTLR_F (1U << 10) /* up to v6 */ -#define SCTLR_SW (1U << 10) /* v7 onward */ -#define SCTLR_Z (1U << 11) -#define SCTLR_I (1U << 12) -#define SCTLR_V (1U << 13) -#define SCTLR_RR (1U << 14) /* up to v7 */ -#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */ -#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */ -#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */ -#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */ -#define SCTLR_nTWI (1U << 16) /* v8 onward */ -#define SCTLR_HA (1U << 17) -#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */ -#define SCTLR_nTWE (1U << 18) /* v8 onward */ -#define SCTLR_WXN (1U << 19) -#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ -#define SCTLR_UWXN (1U << 20) /* v7 onward */ -#define SCTLR_FI (1U << 21) -#define SCTLR_U (1U << 22) -#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */ -#define SCTLR_VE (1U << 24) /* up to v7 */ -#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */ -#define SCTLR_EE (1U << 25) -#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */ -#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */ -#define SCTLR_NMFI (1U << 27) -#define SCTLR_TRE (1U << 28) -#define SCTLR_AFE (1U << 29) -#define SCTLR_TE (1U << 30) - -#define CPSR_M (0x1fU) -#define CPSR_T (1U << 5) -#define CPSR_F (1U << 6) -#define CPSR_I (1U << 7) -#define CPSR_A (1U << 8) -#define CPSR_E (1U << 9) -#define CPSR_IT_2_7 (0xfc00U) -#define CPSR_GE (0xfU << 16) -#define CPSR_IL (1U << 20) -/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in - * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use - * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32, - * where it is live state but not accessible to the AArch32 code. - */ -#define CPSR_RESERVED (0x7U << 21) -#define CPSR_J (1U << 24) -#define CPSR_IT_0_1 (3U << 25) -#define CPSR_Q (1U << 27) -#define CPSR_V (1U << 28) -#define CPSR_C (1U << 29) -#define CPSR_Z (1U << 30) -#define CPSR_N (1U << 31) -#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) -#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) - -#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) -#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ - | CPSR_NZCV) -/* Bits writable in user mode. */ -#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) -/* Execution state bits. MRS read as zero, MSR writes ignored. */ -#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL) -/* Mask of bits which may be set by exception return copying them from SPSR */ -#define CPSR_ERET_MASK (~CPSR_RESERVED) - -#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ -#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ -#define TTBCR_PD0 (1U << 4) -#define TTBCR_PD1 (1U << 5) -#define TTBCR_EPD0 (1U << 7) -#define TTBCR_IRGN0 (3U << 8) -#define TTBCR_ORGN0 (3U << 10) -#define TTBCR_SH0 (3U << 12) -#define TTBCR_T1SZ (3U << 16) -#define TTBCR_A1 (1U << 22) -#define TTBCR_EPD1 (1U << 23) -#define TTBCR_IRGN1 (3U << 24) -#define TTBCR_ORGN1 (3U << 26) -#define TTBCR_SH1 (1U << 28) -#define TTBCR_EAE (1U << 31) - -/* Bit definitions for ARMv8 SPSR (PSTATE) format. - * Only these are valid when in AArch64 mode; in - * AArch32 mode SPSRs are basically CPSR-format. - */ -#define PSTATE_SP (1U) -#define PSTATE_M (0xFU) -#define PSTATE_nRW (1U << 4) -#define PSTATE_F (1U << 6) -#define PSTATE_I (1U << 7) -#define PSTATE_A (1U << 8) -#define PSTATE_D (1U << 9) -#define PSTATE_IL (1U << 20) -#define PSTATE_SS (1U << 21) -#define PSTATE_V (1U << 28) -#define PSTATE_C (1U << 29) -#define PSTATE_Z (1U << 30) -#define PSTATE_N (1U << 31) -#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) -#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) -#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF) -/* Mode values for AArch64 */ -#define PSTATE_MODE_EL3h 13 -#define PSTATE_MODE_EL3t 12 -#define PSTATE_MODE_EL2h 9 -#define PSTATE_MODE_EL2t 8 -#define PSTATE_MODE_EL1h 5 -#define PSTATE_MODE_EL1t 4 -#define PSTATE_MODE_EL0t 0 - -/* Map EL and handler into a PSTATE_MODE. */ -static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) -{ - return (el << 2) | handler; -} - -/* Return the current PSTATE value. For the moment we don't support 32<->64 bit - * interprocessing, so we don't attempt to sync with the cpsr state used by - * the 32 bit decoder. - */ -static inline uint32_t pstate_read(CPUARMState *env) -{ - int ZF; - - ZF = (env->ZF == 0); - return (env->NF & 0x80000000) | (ZF << 30) - | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) - | env->pstate | env->daif; -} - -static inline void pstate_write(CPUARMState *env, uint32_t val) -{ - env->ZF = (~val) & PSTATE_Z; - env->NF = val; - env->CF = (val >> 29) & 1; - env->VF = (val << 3) & 0x80000000; - env->daif = val & PSTATE_DAIF; - env->pstate = val & ~CACHED_PSTATE_BITS; -} - -/* Return the current CPSR value. */ -uint32_t cpsr_read(CPUARMState *env); -/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */ -void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask); - -/* Return the current xPSR value. */ -static inline uint32_t xpsr_read(CPUARMState *env) -{ - int ZF; - ZF = (env->ZF == 0); - return (env->NF & 0x80000000) | (ZF << 30) - | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) - | (env->thumb << 24) | ((env->condexec_bits & 3) << 25) - | ((env->condexec_bits & 0xfc) << 8) - | env->v7m.exception; -} - -/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ -static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) -{ - if (mask & CPSR_NZCV) { - env->ZF = (~val) & CPSR_Z; - env->NF = val; - env->CF = (val >> 29) & 1; - env->VF = (val << 3) & 0x80000000; - } - if (mask & CPSR_Q) - env->QF = ((val & CPSR_Q) != 0); - if (mask & (1 << 24)) - env->thumb = ((val & (1 << 24)) != 0); - if (mask & CPSR_IT_0_1) { - env->condexec_bits &= ~3; - env->condexec_bits |= (val >> 25) & 3; - } - if (mask & CPSR_IT_2_7) { - env->condexec_bits &= 3; - env->condexec_bits |= (val >> 8) & 0xfc; - } - if (mask & 0x1ff) { - env->v7m.exception = val & 0x1ff; - } -} - -#define HCR_VM (1ULL << 0) -#define HCR_SWIO (1ULL << 1) -#define HCR_PTW (1ULL << 2) -#define HCR_FMO (1ULL << 3) -#define HCR_IMO (1ULL << 4) -#define HCR_AMO (1ULL << 5) -#define HCR_VF (1ULL << 6) -#define HCR_VI (1ULL << 7) -#define HCR_VSE (1ULL << 8) -#define HCR_FB (1ULL << 9) -#define HCR_BSU_MASK (3ULL << 10) -#define HCR_DC (1ULL << 12) -#define HCR_TWI (1ULL << 13) -#define HCR_TWE (1ULL << 14) -#define HCR_TID0 (1ULL << 15) -#define HCR_TID1 (1ULL << 16) -#define HCR_TID2 (1ULL << 17) -#define HCR_TID3 (1ULL << 18) -#define HCR_TSC (1ULL << 19) -#define HCR_TIDCP (1ULL << 20) -#define HCR_TACR (1ULL << 21) -#define HCR_TSW (1ULL << 22) -#define HCR_TPC (1ULL << 23) -#define HCR_TPU (1ULL << 24) -#define HCR_TTLB (1ULL << 25) -#define HCR_TVM (1ULL << 26) -#define HCR_TGE (1ULL << 27) -#define HCR_TDZ (1ULL << 28) -#define HCR_HCD (1ULL << 29) -#define HCR_TRVM (1ULL << 30) -#define HCR_RW (1ULL << 31) -#define HCR_CD (1ULL << 32) -#define HCR_ID (1ULL << 33) -#define HCR_MASK ((1ULL << 34) - 1) - -#define SCR_NS (1U << 0) -#define SCR_IRQ (1U << 1) -#define SCR_FIQ (1U << 2) -#define SCR_EA (1U << 3) -#define SCR_FW (1U << 4) -#define SCR_AW (1U << 5) -#define SCR_NET (1U << 6) -#define SCR_SMD (1U << 7) -#define SCR_HCE (1U << 8) -#define SCR_SIF (1U << 9) -#define SCR_RW (1U << 10) -#define SCR_ST (1U << 11) -#define SCR_TWI (1U << 12) -#define SCR_TWE (1U << 13) -#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST)) -#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET) - -/* Return the current FPSCR value. */ -uint32_t vfp_get_fpscr(CPUARMState *env); -void vfp_set_fpscr(CPUARMState *env, uint32_t val); - -/* For A64 the FPSCR is split into two logically distinct registers, - * FPCR and FPSR. However since they still use non-overlapping bits - * we store the underlying state in fpscr and just mask on read/write. - */ -#define FPSR_MASK 0xf800009f -#define FPCR_MASK 0x07f79f00 -static inline uint32_t vfp_get_fpsr(CPUARMState *env) -{ - return vfp_get_fpscr(env) & FPSR_MASK; -} - -static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val) -{ - uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK); - vfp_set_fpscr(env, new_fpscr); -} - -static inline uint32_t vfp_get_fpcr(CPUARMState *env) -{ - return vfp_get_fpscr(env) & FPCR_MASK; -} - -static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val) -{ - uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK); - vfp_set_fpscr(env, new_fpscr); -} - -enum arm_cpu_mode { - ARM_CPU_MODE_USR = 0x10, - ARM_CPU_MODE_FIQ = 0x11, - ARM_CPU_MODE_IRQ = 0x12, - ARM_CPU_MODE_SVC = 0x13, - ARM_CPU_MODE_MON = 0x16, - ARM_CPU_MODE_ABT = 0x17, - ARM_CPU_MODE_HYP = 0x1a, - ARM_CPU_MODE_UND = 0x1b, - ARM_CPU_MODE_SYS = 0x1f -}; - -/* VFP system registers. */ -#define ARM_VFP_FPSID 0 -#define ARM_VFP_FPSCR 1 -#define ARM_VFP_MVFR2 5 -#define ARM_VFP_MVFR1 6 -#define ARM_VFP_MVFR0 7 -#define ARM_VFP_FPEXC 8 -#define ARM_VFP_FPINST 9 -#define ARM_VFP_FPINST2 10 - -/* iwMMXt coprocessor control registers. */ -#define ARM_IWMMXT_wCID 0 -#define ARM_IWMMXT_wCon 1 -#define ARM_IWMMXT_wCSSF 2 -#define ARM_IWMMXT_wCASF 3 -#define ARM_IWMMXT_wCGR0 8 -#define ARM_IWMMXT_wCGR1 9 -#define ARM_IWMMXT_wCGR2 10 -#define ARM_IWMMXT_wCGR3 11 - -/* If adding a feature bit which corresponds to a Linux ELF - * HWCAP bit, remember to update the feature-bit-to-hwcap - * mapping in linux-user/elfload.c:get_elf_hwcap(). - */ -enum arm_features { - ARM_FEATURE_VFP, - ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ - ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ - ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ - ARM_FEATURE_V6, - ARM_FEATURE_V6K, - ARM_FEATURE_V7, - ARM_FEATURE_THUMB2, - ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */ - ARM_FEATURE_VFP3, - ARM_FEATURE_VFP_FP16, - ARM_FEATURE_NEON, - ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ - ARM_FEATURE_M, /* Microcontroller profile. */ - ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ - ARM_FEATURE_THUMB2EE, - ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ - ARM_FEATURE_V4T, - ARM_FEATURE_V5, - ARM_FEATURE_STRONGARM, - ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ - ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ - ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ - ARM_FEATURE_GENERIC_TIMER, - ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ - ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ - ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ - ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ - ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ - ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ - ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ - ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ - ARM_FEATURE_V8, - ARM_FEATURE_AARCH64, /* supports 64 bit mode */ - ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ - ARM_FEATURE_CBAR, /* has cp15 CBAR */ - ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ - ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ - ARM_FEATURE_EL2, /* has EL2 Virtualization support */ - ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ - ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ -}; - -static inline int arm_feature(CPUARMState *env, int feature) -{ - return (env->features & (1ULL << feature)) != 0; -} - -#if !defined(CONFIG_USER_ONLY) -/* Return true if exception levels below EL3 are in secure state, - * or would be following an exception return to that level. - * Unlike arm_is_secure() (which is always a question about the - * _current_ state of the CPU) this doesn't care about the current - * EL or mode. - */ -static inline bool arm_is_secure_below_el3(CPUARMState *env) -{ - if (arm_feature(env, ARM_FEATURE_EL3)) { - return !(env->cp15.scr_el3 & SCR_NS); - } else { - /* If EL2 is not supported then the secure state is implementation - * defined, in which case QEMU defaults to non-secure. - */ - return false; - } -} - -/* Return true if the processor is in secure state */ -static inline bool arm_is_secure(CPUARMState *env) -{ - if (arm_feature(env, ARM_FEATURE_EL3)) { - if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { - /* CPU currently in AArch64 state and EL3 */ - return true; - } else if (!is_a64(env) && - (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { - /* CPU currently in AArch32 state and monitor mode */ - return true; - } - } - return arm_is_secure_below_el3(env); -} - -#else -static inline bool arm_is_secure_below_el3(CPUARMState *env) -{ - return false; -} - -static inline bool arm_is_secure(CPUARMState *env) -{ - return false; -} -#endif - -/* Return true if the specified exception level is running in AArch64 state. */ -static inline bool arm_el_is_aa64(CPUARMState *env, int el) -{ - /* We don't currently support EL2, and this isn't valid for EL0 - * (if we're in EL0, is_a64() is what you want, and if we're not in EL0 - * then the state of EL0 isn't well defined.) - */ - assert(el == 1 || el == 3); - - /* AArch64-capable CPUs always run with EL1 in AArch64 mode. This - * is a QEMU-imposed simplification which we may wish to change later. - * If we in future support EL2 and/or EL3, then the state of lower - * exception levels is controlled by the HCR.RW and SCR.RW bits. - */ - return arm_feature(env, ARM_FEATURE_AARCH64); -} - -void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf); -unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx); - -/* Interface between CPU and Interrupt controller. */ -void armv7m_nvic_set_pending(void *opaque, int irq); -int armv7m_nvic_acknowledge_irq(void *opaque); -void armv7m_nvic_complete_irq(void *opaque, int irq); - -/* Interface for defining coprocessor registers. - * Registers are defined in tables of arm_cp_reginfo structs - * which are passed to define_arm_cp_regs(). - */ - -/* When looking up a coprocessor register we look for it - * via an integer which encodes all of: - * coprocessor number - * Crn, Crm, opc1, opc2 fields - * 32 or 64 bit register (ie is it accessed via MRC/MCR - * or via MRRC/MCRR?) - * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field. - * (In this case crn and opc2 should be zero.) - * For AArch64, there is no 32/64 bit size distinction; - * instead all registers have a 2 bit op0, 3 bit op1 and op2, - * and 4 bit CRn and CRm. The encoding patterns are chosen - * to be easy to convert to and from the KVM encodings, and also - * so that the hashtable can contain both AArch32 and AArch64 - * registers (to allow for interprocessing where we might run - * 32 bit code on a 64 bit core). - */ -/* This bit is private to our hashtable cpreg; in KVM register - * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64 - * in the upper bits of the 64 bit ID. - */ -#define CP_REG_AA64_SHIFT 28 -#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT) - -#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \ - (((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \ - ((crm) << 7) | ((opc1) << 3) | (opc2)) - -#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \ - (CP_REG_AA64_MASK | \ - ((cp) << CP_REG_ARM_COPROC_SHIFT) | \ - ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ - ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ - ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ - ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ - ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) - -/* Convert a full 64 bit KVM register ID to the truncated 32 bit - * version used as a key for the coprocessor register hashtable - */ -static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid) -{ - uint32_t cpregid = kvmid; - if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) { - cpregid |= CP_REG_AA64_MASK; - } else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) { - cpregid |= (1 << 15); - } - return cpregid; -} - -/* Convert a truncated 32 bit hashtable key into the full - * 64 bit KVM register ID. - */ -static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) -{ - uint64_t kvmid; - - if (cpregid & CP_REG_AA64_MASK) { - kvmid = cpregid & ~CP_REG_AA64_MASK; - kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64; - } else { - kvmid = cpregid & ~(1 << 15); - if (cpregid & (1 << 15)) { - kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM; - } else { - kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM; - } - } - return kvmid; -} - -/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a - * special-behaviour cp reg and bits [15..8] indicate what behaviour - * it has. Otherwise it is a simple cp reg, where CONST indicates that - * TCG can assume the value to be constant (ie load at translate time) - * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END - * indicates that the TB should not be ended after a write to this register - * (the default is that the TB ends after cp writes). OVERRIDE permits - * a register definition to override a previous definition for the - * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the - * old must have the OVERRIDE bit set. - * NO_MIGRATE indicates that this register should be ignored for migration; - * (eg because any state is accessed via some other coprocessor register). - * IO indicates that this register does I/O and therefore its accesses - * need to be surrounded by gen_io_start()/gen_io_end(). In particular, - * registers which implement clocks or timers require this. - */ -#define ARM_CP_SPECIAL 1 -#define ARM_CP_CONST 2 -#define ARM_CP_64BIT 4 -#define ARM_CP_SUPPRESS_TB_END 8 -#define ARM_CP_OVERRIDE 16 -#define ARM_CP_NO_MIGRATE 32 -#define ARM_CP_IO 64 -#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8)) -#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8)) -#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8)) -#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8)) -#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8)) -#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA -/* Used only as a terminator for ARMCPRegInfo lists */ -#define ARM_CP_SENTINEL 0xffff -/* Mask of only the flag bits in a type field */ -#define ARM_CP_FLAG_MASK 0x7f - -/* Valid values for ARMCPRegInfo state field, indicating which of - * the AArch32 and AArch64 execution states this register is visible in. - * If the reginfo doesn't explicitly specify then it is AArch32 only. - * If the reginfo is declared to be visible in both states then a second - * reginfo is synthesised for the AArch32 view of the AArch64 register, - * such that the AArch32 view is the lower 32 bits of the AArch64 one. - * Note that we rely on the values of these enums as we iterate through - * the various states in some places. - */ -enum { - ARM_CP_STATE_AA32 = 0, - ARM_CP_STATE_AA64 = 1, - ARM_CP_STATE_BOTH = 2, -}; - -/* Return true if cptype is a valid type field. This is used to try to - * catch errors where the sentinel has been accidentally left off the end - * of a list of registers. - */ -static inline bool cptype_valid(int cptype) -{ - return ((cptype & ~ARM_CP_FLAG_MASK) == 0) - || ((cptype & ARM_CP_SPECIAL) && - ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL)); -} - -/* Access rights: - * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM - * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and - * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 - * (ie any of the privileged modes in Secure state, or Monitor mode). - * If a register is accessible in one privilege level it's always accessible - * in higher privilege levels too. Since "Secure PL1" also follows this rule - * (ie anything visible in PL2 is visible in S-PL1, some things are only - * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the - * terminology a little and call this PL3. - * In AArch64 things are somewhat simpler as the PLx bits line up exactly - * with the ELx exception levels. - * - * If access permissions for a register are more complex than can be - * described with these bits, then use a laxer set of restrictions, and - * do the more restrictive/complex check inside a helper function. - */ -#define PL3_R 0x80 -#define PL3_W 0x40 -#define PL2_R (0x20 | PL3_R) -#define PL2_W (0x10 | PL3_W) -#define PL1_R (0x08 | PL2_R) -#define PL1_W (0x04 | PL2_W) -#define PL0_R (0x02 | PL1_R) -#define PL0_W (0x01 | PL1_W) - -#define PL3_RW (PL3_R | PL3_W) -#define PL2_RW (PL2_R | PL2_W) -#define PL1_RW (PL1_R | PL1_W) -#define PL0_RW (PL0_R | PL0_W) - -/* Return the current Exception Level (as per ARMv8; note that this differs - * from the ARMv7 Privilege Level). - */ -static inline int arm_current_el(CPUARMState *env) -{ - if (is_a64(env)) { - return extract32(env->pstate, 2, 2); - } - - switch (env->uncached_cpsr & 0x1f) { - case ARM_CPU_MODE_USR: - return 0; - case ARM_CPU_MODE_HYP: - return 2; - case ARM_CPU_MODE_MON: - return 3; - default: - if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { - /* If EL3 is 32-bit then all secure privileged modes run in - * EL3 - */ - return 3; - } - - return 1; - } -} - -typedef struct ARMCPRegInfo ARMCPRegInfo; - -typedef enum CPAccessResult { - /* Access is permitted */ - CP_ACCESS_OK = 0, - /* Access fails due to a configurable trap or enable which would - * result in a categorized exception syndrome giving information about - * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, - * 0xc or 0x18). - */ - CP_ACCESS_TRAP = 1, - /* Access fails and results in an exception syndrome 0x0 ("uncategorized"). - * Note that this is not a catch-all case -- the set of cases which may - * result in this failure is specifically defined by the architecture. - */ - CP_ACCESS_TRAP_UNCATEGORIZED = 2, -} CPAccessResult; - -/* Access functions for coprocessor registers. These cannot fail and - * may not raise exceptions. - */ -typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); -typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, - uint64_t value); -/* Access permission check functions for coprocessor registers. */ -typedef CPAccessResult CPAccessFn(CPUARMState *env, const ARMCPRegInfo *opaque); -/* Hook function for register reset */ -typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); - -#define CP_ANY 0xff - -/* Definition of an ARM coprocessor register */ -struct ARMCPRegInfo { - /* Name of register (useful mainly for debugging, need not be unique) */ - const char *name; - /* Location of register: coprocessor number and (crn,crm,opc1,opc2) - * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a - * 'wildcard' field -- any value of that field in the MRC/MCR insn - * will be decoded to this register. The register read and write - * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 - * used by the program, so it is possible to register a wildcard and - * then behave differently on read/write if necessary. - * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 - * must both be zero. - * For AArch64-visible registers, opc0 is also used. - * Since there are no "coprocessors" in AArch64, cp is purely used as a - * way to distinguish (for KVM's benefit) guest-visible system registers - * from demuxed ones provided to preserve the "no side effects on - * KVM register read/write from QEMU" semantics. cp==0x13 is guest - * visible (to match KVM's encoding); cp==0 will be converted to - * cp==0x13 when the ARMCPRegInfo is registered, for convenience. - */ - uint8_t cp; - uint8_t crn; - uint8_t crm; - uint8_t opc0; - uint8_t opc1; - uint8_t opc2; - /* Execution state in which this register is visible: ARM_CP_STATE_* */ - int state; - /* Register type: ARM_CP_* bits/values */ - int type; - /* Access rights: PL*_[RW] */ - int access; - /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when - * this register was defined: can be used to hand data through to the - * register read/write functions, since they are passed the ARMCPRegInfo*. - */ - void *opaque; - /* Value of this register, if it is ARM_CP_CONST. Otherwise, if - * fieldoffset is non-zero, the reset value of the register. - */ - uint64_t resetvalue; - /* Offset of the field in CPUARMState for this register. This is not - * needed if either: - * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs - * 2. both readfn and writefn are specified - */ - ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ - /* Function for making any access checks for this register in addition to - * those specified by the 'access' permissions bits. If NULL, no extra - * checks required. The access check is performed at runtime, not at - * translate time. - */ - CPAccessFn *accessfn; - /* Function for handling reads of this register. If NULL, then reads - * will be done by loading from the offset into CPUARMState specified - * by fieldoffset. - */ - CPReadFn *readfn; - /* Function for handling writes of this register. If NULL, then writes - * will be done by writing to the offset into CPUARMState specified - * by fieldoffset. - */ - CPWriteFn *writefn; - /* Function for doing a "raw" read; used when we need to copy - * coprocessor state to the kernel for KVM or out for - * migration. This only needs to be provided if there is also a - * readfn and it has side effects (for instance clear-on-read bits). - */ - CPReadFn *raw_readfn; - /* Function for doing a "raw" write; used when we need to copy KVM - * kernel coprocessor state into userspace, or for inbound - * migration. This only needs to be provided if there is also a - * writefn and it masks out "unwritable" bits or has write-one-to-clear - * or similar behaviour. - */ - CPWriteFn *raw_writefn; - /* Function for resetting the register. If NULL, then reset will be done - * by writing resetvalue to the field specified in fieldoffset. If - * fieldoffset is 0 then no reset will be done. - */ - CPResetFn *resetfn; -}; - -/* Macros which are lvalues for the field in CPUARMState for the - * ARMCPRegInfo *ri. - */ -#define CPREG_FIELD32(env, ri) \ - (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) -#define CPREG_FIELD64(env, ri) \ - (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) - -#define REGINFO_SENTINEL { NULL, 0,0,0,0,0,0, 0, ARM_CP_SENTINEL, 0, NULL, 0,0,0,0,0,0,0,0, } - -void define_arm_cp_regs_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *regs, void *opaque); -void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *regs, void *opaque); -static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) -{ - define_arm_cp_regs_with_opaque(cpu, regs, 0); -} -static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) -{ - define_one_arm_cp_reg_with_opaque(cpu, regs, 0); -} -const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); - -/* CPWriteFn that can be used to implement writes-ignored behaviour */ -void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value); -/* CPReadFn that can be used for read-as-zero behaviour */ -uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); - -/* CPResetFn that does nothing, for use if no reset is required even - * if fieldoffset is non zero. - */ -void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); - -/* Return true if this reginfo struct's field in the cpu state struct - * is 64 bits wide. - */ -static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) -{ - return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); -} - -static inline bool cp_access_ok(int current_el, - const ARMCPRegInfo *ri, int isread) -{ - return (ri->access >> ((current_el * 2) + isread)) & 1; -} - -/** - * write_list_to_cpustate - * @cpu: ARMCPU - * - * For each register listed in the ARMCPU cpreg_indexes list, write - * its value from the cpreg_values list into the ARMCPUState structure. - * This updates TCG's working data structures from KVM data or - * from incoming migration state. - * - * Returns: true if all register values were updated correctly, - * false if some register was unknown or could not be written. - * Note that we do not stop early on failure -- we will attempt - * writing all registers in the list. - */ -bool write_list_to_cpustate(ARMCPU *cpu); - -/** - * write_cpustate_to_list: - * @cpu: ARMCPU - * - * For each register listed in the ARMCPU cpreg_indexes list, write - * its value from the ARMCPUState structure into the cpreg_values list. - * This is used to copy info from TCG's working data structures into - * KVM or for outbound migration. - * - * Returns: true if all register values were read correctly, - * false if some register was unknown or could not be read. - * Note that we do not stop early on failure -- we will attempt - * reading all registers in the list. - */ -bool write_cpustate_to_list(ARMCPU *cpu); - -/* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3. - Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are - conventional cores (ie. Application or Realtime profile). */ - -#define IS_M(env) arm_feature(env, ARM_FEATURE_M) - -#define ARM_CPUID_TI915T 0x54029152 -#define ARM_CPUID_TI925T 0x54029252 - -#if defined(CONFIG_USER_ONLY) -#define TARGET_PAGE_BITS 12 -#else -/* The ARM MMU allows 1k pages. */ -/* ??? Linux doesn't actually use these, and they're deprecated in recent - architecture revisions. Maybe a configure option to disable them. */ -#define TARGET_PAGE_BITS 10 -#endif - -#if defined(TARGET_AARCH64) -# define TARGET_PHYS_ADDR_SPACE_BITS 48 -# define TARGET_VIRT_ADDR_SPACE_BITS 64 -#else -# define TARGET_PHYS_ADDR_SPACE_BITS 40 -# define TARGET_VIRT_ADDR_SPACE_BITS 32 -#endif - -static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx) -{ - CPUARMState *env = cs->env_ptr; - unsigned int cur_el = arm_current_el(env); - unsigned int target_el = arm_excp_target_el(cs, excp_idx); - /* FIXME: Use actual secure state. */ - bool secure = false; - /* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */ - bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2; - - /* Don't take exceptions if they target a lower EL. */ - if (cur_el > target_el) { - return false; - } - - switch (excp_idx) { - case EXCP_FIQ: - if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) { - return true; - } - return !(env->daif & PSTATE_F); - case EXCP_IRQ: - if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) { - return true; - } - return !(env->daif & PSTATE_I); - case EXCP_VFIQ: - if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) { - /* VFIQs are only taken when hypervized and non-secure. */ - return false; - } - return !(env->daif & PSTATE_F); - case EXCP_VIRQ: - if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) { - /* VIRQs are only taken when hypervized and non-secure. */ - return false; - } - return !(env->daif & PSTATE_I); - default: - g_assert_not_reached(); - return false; - } -} - -static inline CPUARMState *cpu_init(struct uc_struct *uc, const char *cpu_model) -{ - ARMCPU *cpu = cpu_arm_init(uc, cpu_model); - if (cpu) { - return &cpu->env; - } - return NULL; -} - -#ifdef TARGET_ARM -#define cpu_exec cpu_arm_exec -#define cpu_gen_code cpu_arm_gen_code -#define cpu_signal_handler cpu_arm_signal_handler -#define cpu_list arm_cpu_list -#endif - -/* MMU modes definitions */ -#define MMU_MODE0_SUFFIX _user -#define MMU_MODE1_SUFFIX _kernel -#define MMU_USER_IDX 0 -static inline int cpu_mmu_index (CPUARMState *env) -{ - return arm_current_el(env); -} - -/* Return the Exception Level targeted by debug exceptions; - * currently always EL1 since we don't implement EL2 or EL3. - */ -static inline int arm_debug_target_el(CPUARMState *env) -{ - return 1; -} - -static inline bool aa64_generate_debug_exceptions(CPUARMState *env) -{ - if (arm_current_el(env) == arm_debug_target_el(env)) { - if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0) - || (env->daif & PSTATE_D)) { - return false; - } - } - return true; -} - -static inline bool aa32_generate_debug_exceptions(CPUARMState *env) -{ - if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) { - return aa64_generate_debug_exceptions(env); - } - return arm_current_el(env) != 2; -} - -/* Return true if debugging exceptions are currently enabled. - * This corresponds to what in ARM ARM pseudocode would be - * if UsingAArch32() then - * return AArch32.GenerateDebugExceptions() - * else - * return AArch64.GenerateDebugExceptions() - * We choose to push the if() down into this function for clarity, - * since the pseudocode has it at all callsites except for the one in - * CheckSoftwareStep(), where it is elided because both branches would - * always return the same value. - * - * Parts of the pseudocode relating to EL2 and EL3 are omitted because we - * don't yet implement those exception levels or their associated trap bits. - */ -static inline bool arm_generate_debug_exceptions(CPUARMState *env) -{ - if (env->aarch64) { - return aa64_generate_debug_exceptions(env); - } else { - return aa32_generate_debug_exceptions(env); - } -} - -/* Is single-stepping active? (Note that the "is EL_D AArch64?" check - * implicitly means this always returns false in pre-v8 CPUs.) - */ -static inline bool arm_singlestep_active(CPUARMState *env) -{ - return extract32(env->cp15.mdscr_el1, 0, 1) - && arm_el_is_aa64(env, arm_debug_target_el(env)) - && arm_generate_debug_exceptions(env); -} - -#include "exec/cpu-all.h" - -/* Bit usage in the TB flags field: bit 31 indicates whether we are - * in 32 or 64 bit mode. The meaning of the other bits depends on that. - */ -#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31 -#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT) - -/* Bit usage when in AArch32 state: */ -#define ARM_TBFLAG_THUMB_SHIFT 0 -#define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT) -#define ARM_TBFLAG_VECLEN_SHIFT 1 -#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT) -#define ARM_TBFLAG_VECSTRIDE_SHIFT 4 -#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT) -#define ARM_TBFLAG_PRIV_SHIFT 6 -#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT) -#define ARM_TBFLAG_VFPEN_SHIFT 7 -#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT) -#define ARM_TBFLAG_CONDEXEC_SHIFT 8 -#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT) -#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16 -#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT) -#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17 -#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT) -#define ARM_TBFLAG_SS_ACTIVE_SHIFT 18 -#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT) -#define ARM_TBFLAG_PSTATE_SS_SHIFT 19 -#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT) -/* We store the bottom two bits of the CPAR as TB flags and handle - * checks on the other bits at runtime - */ -#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20 -#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT) - -/* Bit usage when in AArch64 state */ -#define ARM_TBFLAG_AA64_EL_SHIFT 0 -#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) -#define ARM_TBFLAG_AA64_FPEN_SHIFT 2 -#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) -#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 -#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) -#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 4 -#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) - -/* some convenience accessor macros */ -#define ARM_TBFLAG_AARCH64_STATE(F) \ - (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT) -#define ARM_TBFLAG_THUMB(F) \ - (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT) -#define ARM_TBFLAG_VECLEN(F) \ - (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT) -#define ARM_TBFLAG_VECSTRIDE(F) \ - (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT) -#define ARM_TBFLAG_PRIV(F) \ - (((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT) -#define ARM_TBFLAG_VFPEN(F) \ - (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT) -#define ARM_TBFLAG_CONDEXEC(F) \ - (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT) -#define ARM_TBFLAG_BSWAP_CODE(F) \ - (((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT) -#define ARM_TBFLAG_CPACR_FPEN(F) \ - (((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT) -#define ARM_TBFLAG_SS_ACTIVE(F) \ - (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT) -#define ARM_TBFLAG_PSTATE_SS(F) \ - (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT) -#define ARM_TBFLAG_XSCALE_CPAR(F) \ - (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT) -#define ARM_TBFLAG_AA64_EL(F) \ - (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT) -#define ARM_TBFLAG_AA64_FPEN(F) \ - (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT) -#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \ - (((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) -#define ARM_TBFLAG_AA64_PSTATE_SS(F) \ - (((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) - -static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, - target_ulong *cs_base, int *flags) -{ - int fpen; - - if (arm_feature(env, ARM_FEATURE_V6)) { - fpen = extract32(env->cp15.c1_coproc, 20, 2); - } else { - /* CPACR doesn't exist before v6, so VFP is always accessible */ - fpen = 3; - } - - if (is_a64(env)) { - *pc = env->pc; - *flags = ARM_TBFLAG_AARCH64_STATE_MASK - | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT); - if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) { - *flags |= ARM_TBFLAG_AA64_FPEN_MASK; - } - /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine - * states defined in the ARM ARM for software singlestep: - * SS_ACTIVE PSTATE.SS State - * 0 x Inactive (the TB flag for SS is always 0) - * 1 0 Active-pending - * 1 1 Active-not-pending - */ - if (arm_singlestep_active(env)) { - *flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK; - if (env->pstate & PSTATE_SS) { - *flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK; - } - } - } else { - int privmode; - *pc = env->regs[15]; - *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) - | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) - | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) - | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) - | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT); - if (arm_feature(env, ARM_FEATURE_M)) { - privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1)); - } else { - privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR; - } - if (privmode) { - *flags |= ARM_TBFLAG_PRIV_MASK; - } - if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) - || arm_el_is_aa64(env, 1)) { - *flags |= ARM_TBFLAG_VFPEN_MASK; - } - if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) { - *flags |= ARM_TBFLAG_CPACR_FPEN_MASK; - } - /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine - * states defined in the ARM ARM for software singlestep: - * SS_ACTIVE PSTATE.SS State - * 0 x Inactive (the TB flag for SS is always 0) - * 1 0 Active-pending - * 1 1 Active-not-pending - */ - if (arm_singlestep_active(env)) { - *flags |= ARM_TBFLAG_SS_ACTIVE_MASK; - if (env->uncached_cpsr & PSTATE_SS) { - *flags |= ARM_TBFLAG_PSTATE_SS_MASK; - } - } - *flags |= (extract32(env->cp15.c15_cpar, 0, 2) - << ARM_TBFLAG_XSCALE_CPAR_SHIFT); - } - - *cs_base = 0; -} - -#include "exec/exec-all.h" - -static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb) -{ - if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { - env->pc = tb->pc; - } else { - env->regs[15] = tb->pc; - } -} - -enum { - QEMU_PSCI_CONDUIT_DISABLED = 0, - QEMU_PSCI_CONDUIT_SMC = 1, - QEMU_PSCI_CONDUIT_HVC = 2, -}; - -#endif diff --git a/qemu/target-arm/cpu64.c b/qemu/target-arm/cpu64.c deleted file mode 100644 index 30948ae3..00000000 --- a/qemu/target-arm/cpu64.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * QEMU AArch64 CPU - * - * Copyright (c) 2013 Linaro Ltd - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see - * - */ - -#include "cpu.h" -#include "qemu-common.h" -#include "hw/arm/arm.h" -#include "sysemu/sysemu.h" - -static inline void set_feature(CPUARMState *env, int feature) -{ - env->features |= 1ULL << feature; -} - -#ifndef CONFIG_USER_ONLY -static uint64_t a57_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* Number of processors is in [25:24]; otherwise we RAZ */ - return (smp_cpus - 1) << 24; -} -#endif - -static const ARMCPRegInfo cortexa57_cp_reginfo[] = { -#ifndef CONFIG_USER_ONLY - { "L2CTLR_EL1", 0,11,0, 3,1,2, ARM_CP_STATE_AA64, - 0, PL1_RW, NULL, 0, 0, - NULL, a57_l2ctlr_read, arm_cp_write_ignore, }, - { "L2CTLR", 15,9,0, 0,1,2, 0, - 0, PL1_RW, NULL, 0, 0, - NULL, a57_l2ctlr_read, arm_cp_write_ignore, }, -#endif - { "L2ECTLR_EL1", 0,11,0, 3,1,3, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - { "L2ECTLR", 15,9,0, 0,1,3, 0, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - { "L2ACTLR", 0,15,0, 3,1,0, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - { "CPUACTLR_EL1", 0,15,2, 3,1,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - { "CPUACTLR", 15,0,15, 0,0,0, 0, - ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0, }, - { "CPUECTLR_EL1", 0,15,2, 3,1,1, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - { "CPUECTLR", 15,0,15, 0,1,0, 0, - ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0, }, - { "CPUMERRSR_EL1", 0,15,2, 3,1,2, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - { "CPUMERRSR", 15,0,15, 0,2,0, 0, - ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0 }, - { "L2MERRSR_EL1", 0,15,2, 3,1,3, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - { "L2MERRSR", 15,0,15, 0,3,0, 0, - ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0 }, - REGINFO_SENTINEL -}; - -static void aarch64_a57_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_VFP4); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_V8_AES); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); - set_feature(&cpu->env, ARM_FEATURE_CRC); - cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57; - cpu->midr = 0x411fd070; - cpu->reset_fpsid = 0x41034070; - cpu->mvfr0 = 0x10110222; - cpu->mvfr1 = 0x12111111; - cpu->mvfr2 = 0x00000043; - cpu->ctr = 0x8444c004; - cpu->reset_sctlr = 0x00c50838; - cpu->id_pfr0 = 0x00000131; - cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10101105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01260000; - cpu->id_mmfr3 = 0x02102211; - cpu->id_isar0 = 0x02101110; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232042; - cpu->id_isar3 = 0x01112131; - cpu->id_isar4 = 0x00011142; - cpu->id_isar5 = 0x00011121; - cpu->id_aa64pfr0 = 0x00002222; - cpu->id_aa64dfr0 = 0x10305106; - cpu->id_aa64isar0 = 0x00011120; - cpu->id_aa64mmfr0 = 0x00001124; - cpu->dbgdidr = 0x3516d000; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ - cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ - cpu->dcz_blocksize = 4; /* 64 bytes */ - define_arm_cp_regs(cpu, cortexa57_cp_reginfo); -} - -#ifdef CONFIG_USER_ONLY -static void aarch64_any_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - ARMCPU *cpu = ARM_CPU(uc, obj); - - set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_VFP4); - set_feature(&cpu->env, ARM_FEATURE_NEON); - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - set_feature(&cpu->env, ARM_FEATURE_V8_AES); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); - set_feature(&cpu->env, ARM_FEATURE_CRC); - cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ - cpu->dcz_blocksize = 7; /* 512 bytes */ -} -#endif - -typedef struct ARMCPUInfo { - const char *name; - void (*initfn)(struct uc_struct *uc, Object *obj, void *opaque); - void (*class_init)(struct uc_struct *uc, ObjectClass *oc, void *data); -} ARMCPUInfo; - -static const ARMCPUInfo aarch64_cpus[] = { - { "cortex-a57", aarch64_a57_initfn }, -#ifdef CONFIG_USER_ONLY - { "any", aarch64_any_initfn }, -#endif - { NULL } -}; - -static void aarch64_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static void aarch64_cpu_finalizefn(struct uc_struct *uc, Object *obj, void *opaque) -{ -} - -static void aarch64_cpu_set_pc(CPUState *cs, vaddr value) -{ - //CPUARMState *env = cs->env_ptr; - ARMCPU *cpu = ARM_CPU(NULL, cs); - /* It's OK to look at env for the current mode here, because it's - * never possible for an AArch64 TB to chain to an AArch32 TB. - * (Otherwise we would need to use synchronize_from_tb instead.) - */ - if (is_a64(&cpu->env)) { - cpu->env.pc = value; - } else { - cpu->env.regs[15] = value; - } -} - -static void aarch64_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(uc, oc); - -#if !defined(CONFIG_USER_ONLY) - cc->do_interrupt = aarch64_cpu_do_interrupt; -#endif - cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; - cc->set_pc = aarch64_cpu_set_pc; -} - -static void aarch64_cpu_register(struct uc_struct *uc, const ARMCPUInfo *info) -{ - TypeInfo type_info = { 0 }; - type_info.parent = TYPE_AARCH64_CPU; - type_info.instance_size = sizeof(ARMCPU); - type_info.instance_init = info->initfn; - type_info.class_size = sizeof(ARMCPUClass); - type_info.class_init = info->class_init; - - type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name); - type_register(uc, &type_info); - g_free((void *)type_info.name); -} - -void aarch64_cpu_register_types(void *opaque) -{ - const ARMCPUInfo *info = aarch64_cpus; - - static TypeInfo aarch64_cpu_type_info = { 0 }; - aarch64_cpu_type_info.name = TYPE_AARCH64_CPU; - aarch64_cpu_type_info.parent = TYPE_ARM_CPU; - aarch64_cpu_type_info.instance_size = sizeof(ARMCPU); - aarch64_cpu_type_info.instance_init = aarch64_cpu_initfn; - aarch64_cpu_type_info.instance_finalize = aarch64_cpu_finalizefn; - aarch64_cpu_type_info.abstract = true; - aarch64_cpu_type_info.class_size = sizeof(AArch64CPUClass); - aarch64_cpu_type_info.class_init = aarch64_cpu_class_init; - - type_register_static(opaque, &aarch64_cpu_type_info); - - while (info->name) { - aarch64_cpu_register(opaque, info); - info++; - } -} diff --git a/qemu/target-arm/crypto_helper.c b/qemu/target-arm/crypto_helper.c deleted file mode 100644 index 59988edd..00000000 --- a/qemu/target-arm/crypto_helper.c +++ /dev/null @@ -1,435 +0,0 @@ -/* - * crypto_helper.c - emulate v8 Crypto Extensions instructions - * - * Copyright (C) 2013 - 2014 Linaro Ltd - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - */ - -#include - -#include "cpu.h" -#include "exec/exec-all.h" -#include "exec/helper-proto.h" -#include "qemu/aes.h" - -union CRYPTO_STATE { - uint8_t bytes[16]; - uint32_t words[4]; - uint64_t l[2]; -}; - -void HELPER(crypto_aese)(CPUARMState *env, uint32_t rd, uint32_t rm, - uint32_t decrypt) -{ - static uint8_t const * const sbox[2] = { AES_sbox, AES_isbox }; - static uint8_t const * const shift[2] = { AES_shifts, AES_ishifts }; - - union CRYPTO_STATE rk; - union CRYPTO_STATE st; - int i; - - rk.l[0] = float64_val(env->vfp.regs[rm]); - rk.l[1] = float64_val(env->vfp.regs[rm + 1]); - st.l[0] = float64_val(env->vfp.regs[rd]); - st.l[1] = float64_val(env->vfp.regs[rd + 1]); - - assert(decrypt < 2); - - /* xor state vector with round key */ - rk.l[0] ^= st.l[0]; - rk.l[1] ^= st.l[1]; - - /* combine ShiftRows operation and sbox substitution */ - for (i = 0; i < 16; i++) { - st.bytes[i] = sbox[decrypt][rk.bytes[shift[decrypt][i]]]; - } - - env->vfp.regs[rd] = make_float64(st.l[0]); - env->vfp.regs[rd + 1] = make_float64(st.l[1]); -} - -void HELPER(crypto_aesmc)(CPUARMState *env, uint32_t rd, uint32_t rm, - uint32_t decrypt) -{ - static uint32_t const mc[][256] = { { - /* MixColumns lookup table */ - 0x00000000, 0x03010102, 0x06020204, 0x05030306, - 0x0c040408, 0x0f05050a, 0x0a06060c, 0x0907070e, - 0x18080810, 0x1b090912, 0x1e0a0a14, 0x1d0b0b16, - 0x140c0c18, 0x170d0d1a, 0x120e0e1c, 0x110f0f1e, - 0x30101020, 0x33111122, 0x36121224, 0x35131326, - 0x3c141428, 0x3f15152a, 0x3a16162c, 0x3917172e, - 0x28181830, 0x2b191932, 0x2e1a1a34, 0x2d1b1b36, - 0x241c1c38, 0x271d1d3a, 0x221e1e3c, 0x211f1f3e, - 0x60202040, 0x63212142, 0x66222244, 0x65232346, - 0x6c242448, 0x6f25254a, 0x6a26264c, 0x6927274e, - 0x78282850, 0x7b292952, 0x7e2a2a54, 0x7d2b2b56, - 0x742c2c58, 0x772d2d5a, 0x722e2e5c, 0x712f2f5e, - 0x50303060, 0x53313162, 0x56323264, 0x55333366, - 0x5c343468, 0x5f35356a, 0x5a36366c, 0x5937376e, - 0x48383870, 0x4b393972, 0x4e3a3a74, 0x4d3b3b76, - 0x443c3c78, 0x473d3d7a, 0x423e3e7c, 0x413f3f7e, - 0xc0404080, 0xc3414182, 0xc6424284, 0xc5434386, - 0xcc444488, 0xcf45458a, 0xca46468c, 0xc947478e, - 0xd8484890, 0xdb494992, 0xde4a4a94, 0xdd4b4b96, - 0xd44c4c98, 0xd74d4d9a, 0xd24e4e9c, 0xd14f4f9e, - 0xf05050a0, 0xf35151a2, 0xf65252a4, 0xf55353a6, - 0xfc5454a8, 0xff5555aa, 0xfa5656ac, 0xf95757ae, - 0xe85858b0, 0xeb5959b2, 0xee5a5ab4, 0xed5b5bb6, - 0xe45c5cb8, 0xe75d5dba, 0xe25e5ebc, 0xe15f5fbe, - 0xa06060c0, 0xa36161c2, 0xa66262c4, 0xa56363c6, - 0xac6464c8, 0xaf6565ca, 0xaa6666cc, 0xa96767ce, - 0xb86868d0, 0xbb6969d2, 0xbe6a6ad4, 0xbd6b6bd6, - 0xb46c6cd8, 0xb76d6dda, 0xb26e6edc, 0xb16f6fde, - 0x907070e0, 0x937171e2, 0x967272e4, 0x957373e6, - 0x9c7474e8, 0x9f7575ea, 0x9a7676ec, 0x997777ee, - 0x887878f0, 0x8b7979f2, 0x8e7a7af4, 0x8d7b7bf6, - 0x847c7cf8, 0x877d7dfa, 0x827e7efc, 0x817f7ffe, - 0x9b80801b, 0x98818119, 0x9d82821f, 0x9e83831d, - 0x97848413, 0x94858511, 0x91868617, 0x92878715, - 0x8388880b, 0x80898909, 0x858a8a0f, 0x868b8b0d, - 0x8f8c8c03, 0x8c8d8d01, 0x898e8e07, 0x8a8f8f05, - 0xab90903b, 0xa8919139, 0xad92923f, 0xae93933d, - 0xa7949433, 0xa4959531, 0xa1969637, 0xa2979735, - 0xb398982b, 0xb0999929, 0xb59a9a2f, 0xb69b9b2d, - 0xbf9c9c23, 0xbc9d9d21, 0xb99e9e27, 0xba9f9f25, - 0xfba0a05b, 0xf8a1a159, 0xfda2a25f, 0xfea3a35d, - 0xf7a4a453, 0xf4a5a551, 0xf1a6a657, 0xf2a7a755, - 0xe3a8a84b, 0xe0a9a949, 0xe5aaaa4f, 0xe6abab4d, - 0xefacac43, 0xecadad41, 0xe9aeae47, 0xeaafaf45, - 0xcbb0b07b, 0xc8b1b179, 0xcdb2b27f, 0xceb3b37d, - 0xc7b4b473, 0xc4b5b571, 0xc1b6b677, 0xc2b7b775, - 0xd3b8b86b, 0xd0b9b969, 0xd5baba6f, 0xd6bbbb6d, - 0xdfbcbc63, 0xdcbdbd61, 0xd9bebe67, 0xdabfbf65, - 0x5bc0c09b, 0x58c1c199, 0x5dc2c29f, 0x5ec3c39d, - 0x57c4c493, 0x54c5c591, 0x51c6c697, 0x52c7c795, - 0x43c8c88b, 0x40c9c989, 0x45caca8f, 0x46cbcb8d, - 0x4fcccc83, 0x4ccdcd81, 0x49cece87, 0x4acfcf85, - 0x6bd0d0bb, 0x68d1d1b9, 0x6dd2d2bf, 0x6ed3d3bd, - 0x67d4d4b3, 0x64d5d5b1, 0x61d6d6b7, 0x62d7d7b5, - 0x73d8d8ab, 0x70d9d9a9, 0x75dadaaf, 0x76dbdbad, - 0x7fdcdca3, 0x7cdddda1, 0x79dedea7, 0x7adfdfa5, - 0x3be0e0db, 0x38e1e1d9, 0x3de2e2df, 0x3ee3e3dd, - 0x37e4e4d3, 0x34e5e5d1, 0x31e6e6d7, 0x32e7e7d5, - 0x23e8e8cb, 0x20e9e9c9, 0x25eaeacf, 0x26ebebcd, - 0x2fececc3, 0x2cededc1, 0x29eeeec7, 0x2aefefc5, - 0x0bf0f0fb, 0x08f1f1f9, 0x0df2f2ff, 0x0ef3f3fd, - 0x07f4f4f3, 0x04f5f5f1, 0x01f6f6f7, 0x02f7f7f5, - 0x13f8f8eb, 0x10f9f9e9, 0x15fafaef, 0x16fbfbed, - 0x1ffcfce3, 0x1cfdfde1, 0x19fefee7, 0x1affffe5, - }, { - /* Inverse MixColumns lookup table */ - 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12, - 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a, - 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362, - 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a, - 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2, - 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca, - 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382, - 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba, - 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9, - 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1, - 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9, - 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81, - 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029, - 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411, - 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859, - 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61, - 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf, - 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987, - 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf, - 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7, - 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f, - 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967, - 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f, - 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117, - 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664, - 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c, - 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14, - 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c, - 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684, - 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc, - 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4, - 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc, - 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753, - 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b, - 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23, - 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b, - 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3, - 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b, - 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3, - 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb, - 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88, - 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0, - 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8, - 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0, - 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68, - 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850, - 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418, - 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020, - 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe, - 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6, - 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e, - 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6, - 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e, - 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526, - 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e, - 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56, - 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25, - 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d, - 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255, - 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d, - 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5, - 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd, - 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5, - 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d, - } }; - union CRYPTO_STATE st; - int i; - st.l[0] = float64_val(env->vfp.regs[rm]); - st.l[1] = float64_val(env->vfp.regs[rm + 1]); - - assert(decrypt < 2); - - for (i = 0; i < 16; i += 4) { - st.words[i >> 2] = cpu_to_le32( - mc[decrypt][st.bytes[i]] ^ - rol32(mc[decrypt][st.bytes[i + 1]], 8) ^ - rol32(mc[decrypt][st.bytes[i + 2]], 16) ^ - rol32(mc[decrypt][st.bytes[i + 3]], 24)); - } - - env->vfp.regs[rd] = make_float64(st.l[0]); - env->vfp.regs[rd + 1] = make_float64(st.l[1]); -} - -/* - * SHA-1 logical functions - */ - -static uint32_t cho(uint32_t x, uint32_t y, uint32_t z) -{ - return (x & (y ^ z)) ^ z; -} - -static uint32_t par(uint32_t x, uint32_t y, uint32_t z) -{ - return x ^ y ^ z; -} - -static uint32_t maj(uint32_t x, uint32_t y, uint32_t z) -{ - return (x & y) | ((x | y) & z); -} - -void HELPER(crypto_sha1_3reg)(CPUARMState *env, uint32_t rd, uint32_t rn, - uint32_t rm, uint32_t op) -{ - union CRYPTO_STATE d; - union CRYPTO_STATE n; - union CRYPTO_STATE m; - d.l[0] = float64_val(env->vfp.regs[rd]); - d.l[1] = float64_val(env->vfp.regs[rd + 1]); - n.l[0] = float64_val(env->vfp.regs[rn]); - n.l[1] = float64_val(env->vfp.regs[rn + 1]); - m.l[0] = float64_val(env->vfp.regs[rm]); - m.l[1] = float64_val(env->vfp.regs[rm + 1]); - - if (op == 3) { /* sha1su0 */ - d.l[0] ^= d.l[1] ^ m.l[0]; - d.l[1] ^= n.l[0] ^ m.l[1]; - } else { - int i; - - for (i = 0; i < 4; i++) { - uint32_t t; - - switch (op) { - case 0: /* sha1c */ - t = cho(d.words[1], d.words[2], d.words[3]); - break; - case 1: /* sha1p */ - t = par(d.words[1], d.words[2], d.words[3]); - break; - case 2: /* sha1m */ - t = maj(d.words[1], d.words[2], d.words[3]); - break; - default: - g_assert_not_reached(); - } - t += rol32(d.words[0], 5) + n.words[0] + m.words[i]; - - n.words[0] = d.words[3]; - d.words[3] = d.words[2]; - d.words[2] = ror32(d.words[1], 2); - d.words[1] = d.words[0]; - d.words[0] = t; - } - } - env->vfp.regs[rd] = make_float64(d.l[0]); - env->vfp.regs[rd + 1] = make_float64(d.l[1]); -} - -void HELPER(crypto_sha1h)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ - union CRYPTO_STATE m; - m.l[0] = float64_val(env->vfp.regs[rm]); - m.l[1] = float64_val(env->vfp.regs[rm + 1]); - - m.words[0] = ror32(m.words[0], 2); - m.words[1] = m.words[2] = m.words[3] = 0; - - env->vfp.regs[rd] = make_float64(m.l[0]); - env->vfp.regs[rd + 1] = make_float64(m.l[1]); -} - -void HELPER(crypto_sha1su1)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ - union CRYPTO_STATE d; - union CRYPTO_STATE m; - d.l[0] = float64_val(env->vfp.regs[rd]); - d.l[1] = float64_val(env->vfp.regs[rd + 1]); - m.l[0] = float64_val(env->vfp.regs[rm]); - m.l[1] = float64_val(env->vfp.regs[rm + 1]); - - d.words[0] = rol32(d.words[0] ^ m.words[1], 1); - d.words[1] = rol32(d.words[1] ^ m.words[2], 1); - d.words[2] = rol32(d.words[2] ^ m.words[3], 1); - d.words[3] = rol32(d.words[3] ^ d.words[0], 1); - - env->vfp.regs[rd] = make_float64(d.l[0]); - env->vfp.regs[rd + 1] = make_float64(d.l[1]); -} - -/* - * The SHA-256 logical functions, according to - * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf - */ - -static uint32_t S0(uint32_t x) -{ - return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22); -} - -static uint32_t S1(uint32_t x) -{ - return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25); -} - -static uint32_t s0(uint32_t x) -{ - return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3); -} - -static uint32_t s1(uint32_t x) -{ - return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10); -} - -void HELPER(crypto_sha256h)(CPUARMState *env, uint32_t rd, uint32_t rn, - uint32_t rm) -{ - int i; - union CRYPTO_STATE d; - union CRYPTO_STATE n; - union CRYPTO_STATE m; - d.l[0] = float64_val(env->vfp.regs[rd]); - d.l[1] = float64_val(env->vfp.regs[rd + 1]); - n.l[0] = float64_val(env->vfp.regs[rn]); - n.l[1] = float64_val(env->vfp.regs[rn + 1]); - m.l[0] = float64_val(env->vfp.regs[rm]); - m.l[1] = float64_val(env->vfp.regs[rm + 1]); - - for (i = 0; i < 4; i++) { - uint32_t t = cho(n.words[0], n.words[1], n.words[2]) + n.words[3] - + S1(n.words[0]) + m.words[i]; - - n.words[3] = n.words[2]; - n.words[2] = n.words[1]; - n.words[1] = n.words[0]; - n.words[0] = d.words[3] + t; - - t += maj(d.words[0], d.words[1], d.words[2]) + S0(d.words[0]); - - d.words[3] = d.words[2]; - d.words[2] = d.words[1]; - d.words[1] = d.words[0]; - d.words[0] = t; - } - - env->vfp.regs[rd] = make_float64(d.l[0]); - env->vfp.regs[rd + 1] = make_float64(d.l[1]); -} - -void HELPER(crypto_sha256h2)(CPUARMState *env, uint32_t rd, uint32_t rn, - uint32_t rm) -{ - union CRYPTO_STATE d; - union CRYPTO_STATE n; - union CRYPTO_STATE m; - int i; - - d.l[0] = float64_val(env->vfp.regs[rd]); - d.l[1] = float64_val(env->vfp.regs[rd + 1]); - n.l[0] = float64_val(env->vfp.regs[rn]); - n.l[1] = float64_val(env->vfp.regs[rn + 1]); - m.l[0] = float64_val(env->vfp.regs[rm]); - m.l[1] = float64_val(env->vfp.regs[rm + 1]); - - for (i = 0; i < 4; i++) { - uint32_t t = cho(d.words[0], d.words[1], d.words[2]) + d.words[3] - + S1(d.words[0]) + m.words[i]; - - d.words[3] = d.words[2]; - d.words[2] = d.words[1]; - d.words[1] = d.words[0]; - d.words[0] = n.words[3 - i] + t; - } - - env->vfp.regs[rd] = make_float64(d.l[0]); - env->vfp.regs[rd + 1] = make_float64(d.l[1]); -} - -void HELPER(crypto_sha256su0)(CPUARMState *env, uint32_t rd, uint32_t rm) -{ - union CRYPTO_STATE d; - union CRYPTO_STATE m; - d.l[0] = float64_val(env->vfp.regs[rd]); - d.l[1] = float64_val(env->vfp.regs[rd + 1]); - m.l[0] = float64_val(env->vfp.regs[rm]); - m.l[1] = float64_val(env->vfp.regs[rm + 1]); - - d.words[0] += s0(d.words[1]); - d.words[1] += s0(d.words[2]); - d.words[2] += s0(d.words[3]); - d.words[3] += s0(m.words[0]); - - env->vfp.regs[rd] = make_float64(d.l[0]); - env->vfp.regs[rd + 1] = make_float64(d.l[1]); -} - -void HELPER(crypto_sha256su1)(CPUARMState *env, uint32_t rd, uint32_t rn, - uint32_t rm) -{ - union CRYPTO_STATE d; - union CRYPTO_STATE n; - union CRYPTO_STATE m; - d.l[0] = float64_val(env->vfp.regs[rd]); - d.l[1] = float64_val(env->vfp.regs[rd + 1]); - n.l[0] = float64_val(env->vfp.regs[rn]); - n.l[1] = float64_val(env->vfp.regs[rn + 1]); - m.l[0] = float64_val(env->vfp.regs[rm]); - m.l[1] = float64_val(env->vfp.regs[rm + 1]); - - d.words[0] += s1(m.words[2]) + n.words[1]; - d.words[1] += s1(m.words[3]) + n.words[2]; - d.words[2] += s1(d.words[0]) + n.words[3]; - d.words[3] += s1(d.words[1]) + m.words[0]; - - env->vfp.regs[rd] = make_float64(d.l[0]); - env->vfp.regs[rd + 1] = make_float64(d.l[1]); -} diff --git a/qemu/target-arm/helper-a64.c b/qemu/target-arm/helper-a64.c deleted file mode 100644 index 1d976c17..00000000 --- a/qemu/target-arm/helper-a64.c +++ /dev/null @@ -1,528 +0,0 @@ -/* - * AArch64 specific helpers - * - * Copyright (c) 2013 Alexander Graf - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "exec/helper-proto.h" -#include "qemu/host-utils.h" -#include "sysemu/sysemu.h" -#include "qemu/bitops.h" -#include "internals.h" -#include "qemu/crc32c.h" - -/* C2.4.7 Multiply and divide */ -/* special cases for 0 and LLONG_MIN are mandated by the standard */ -uint64_t HELPER(udiv64)(uint64_t num, uint64_t den) -{ - if (den == 0) { - return 0; - } - return num / den; -} - -int64_t HELPER(sdiv64)(int64_t num, int64_t den) -{ - if (den == 0) { - return 0; - } - if (num == LLONG_MIN && den == -1) { - return LLONG_MIN; - } - return num / den; -} - -uint64_t HELPER(clz64)(uint64_t x) -{ - return clz64(x); -} - -uint64_t HELPER(cls64)(uint64_t x) -{ - return clrsb64(x); -} - -uint32_t HELPER(cls32)(uint32_t x) -{ - return clrsb32(x); -} - -uint32_t HELPER(clz32)(uint32_t x) -{ - return clz32(x); -} - -uint64_t HELPER(rbit64)(uint64_t x) -{ - /* assign the correct byte position */ - x = bswap64(x); - - /* assign the correct nibble position */ - x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4) - | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4); - - /* assign the correct bit position */ - x = ((x & 0x8888888888888888ULL) >> 3) - | ((x & 0x4444444444444444ULL) >> 1) - | ((x & 0x2222222222222222ULL) << 1) - | ((x & 0x1111111111111111ULL) << 3); - - return x; -} - -/* Convert a softfloat float_relation_ (as returned by - * the float*_compare functions) to the correct ARM - * NZCV flag state. - */ -static inline uint32_t float_rel_to_flags(int res) -{ - uint64_t flags; - switch (res) { - case float_relation_equal: - flags = PSTATE_Z | PSTATE_C; - break; - case float_relation_less: - flags = PSTATE_N; - break; - case float_relation_greater: - flags = PSTATE_C; - break; - case float_relation_unordered: - default: - flags = PSTATE_C | PSTATE_V; - break; - } - return flags; -} - -uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status) -{ - return float_rel_to_flags(float32_compare_quiet(x, y, fp_status)); -} - -uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status) -{ - return float_rel_to_flags(float32_compare(x, y, fp_status)); -} - -uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status) -{ - return float_rel_to_flags(float64_compare_quiet(x, y, fp_status)); -} - -uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status) -{ - return float_rel_to_flags(float64_compare(x, y, fp_status)); -} - -float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp) -{ - float_status *fpst = fpstp; - - if ((float32_is_zero(a) && float32_is_infinity(b)) || - (float32_is_infinity(a) && float32_is_zero(b))) { - /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ - return make_float32((1U << 30) | - ((float32_val(a) ^ float32_val(b)) & (1U << 31))); - } - return float32_mul(a, b, fpst); -} - -float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - - if ((float64_is_zero(a) && float64_is_infinity(b)) || - (float64_is_infinity(a) && float64_is_zero(b))) { - /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ - return make_float64((1ULL << 62) | - ((float64_val(a) ^ float64_val(b)) & (1ULL << 63))); - } - return float64_mul(a, b, fpst); -} - -uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices, - uint32_t rn, uint32_t numregs) -{ - /* Helper function for SIMD TBL and TBX. We have to do the table - * lookup part for the 64 bits worth of indices we're passed in. - * result is the initial results vector (either zeroes for TBL - * or some guest values for TBX), rn the register number where - * the table starts, and numregs the number of registers in the table. - * We return the results of the lookups. - */ - int shift; - - for (shift = 0; shift < 64; shift += 8) { - int index = extract64(indices, shift, 8); - if (index < 16 * numregs) { - /* Convert index (a byte offset into the virtual table - * which is a series of 128-bit vectors concatenated) - * into the correct vfp.regs[] element plus a bit offset - * into that element, bearing in mind that the table - * can wrap around from V31 to V0. - */ - int elt = (rn * 2 + (index >> 3)) % 64; - int bitidx = (index & 7) * 8; - uint64_t val = extract64(env->vfp.regs[elt], bitidx, 8); - - result = deposit64(result, shift, 8, val); - } - } - return result; -} - -/* 64bit/double versions of the neon float compare functions */ -uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - return -float64_eq_quiet(a, b, fpst); -} - -uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - return -float64_le(b, a, fpst); -} - -uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - return -float64_lt(b, a, fpst); -} - -/* Reciprocal step and sqrt step. Note that unlike the A32/T32 - * versions, these do a fully fused multiply-add or - * multiply-add-and-halve. - */ -#define float32_two make_float32(0x40000000) -#define float32_three make_float32(0x40400000) -#define float32_one_point_five make_float32(0x3fc00000) - -#define float64_two make_float64(0x4000000000000000ULL) -#define float64_three make_float64(0x4008000000000000ULL) -#define float64_one_point_five make_float64(0x3FF8000000000000ULL) - -float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float32_chs(a); - if ((float32_is_infinity(a) && float32_is_zero(b)) || - (float32_is_infinity(b) && float32_is_zero(a))) { - return float32_two; - } - return float32_muladd(a, b, float32_two, 0, fpst); -} - -float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float64_chs(a); - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_infinity(b) && float64_is_zero(a))) { - return float64_two; - } - return float64_muladd(a, b, float64_two, 0, fpst); -} - -float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float32_chs(a); - if ((float32_is_infinity(a) && float32_is_zero(b)) || - (float32_is_infinity(b) && float32_is_zero(a))) { - return float32_one_point_five; - } - return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst); -} - -float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float64_chs(a); - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_infinity(b) && float64_is_zero(a))) { - return float64_one_point_five; - } - return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst); -} - -/* Pairwise long add: add pairs of adjacent elements into - * double-width elements in the result (eg _s8 is an 8x8->16 op) - */ -uint64_t HELPER(neon_addlp_s8)(uint64_t a) -{ - uint64_t nsignmask = 0x0080008000800080ULL; - uint64_t wsignmask = 0x8000800080008000ULL; - uint64_t elementmask = 0x00ff00ff00ff00ffULL; - uint64_t tmp1, tmp2; - uint64_t res, signres; - - /* Extract odd elements, sign extend each to a 16 bit field */ - tmp1 = a & elementmask; - tmp1 ^= nsignmask; - tmp1 |= wsignmask; - tmp1 = (tmp1 - nsignmask) ^ wsignmask; - /* Ditto for the even elements */ - tmp2 = (a >> 8) & elementmask; - tmp2 ^= nsignmask; - tmp2 |= wsignmask; - tmp2 = (tmp2 - nsignmask) ^ wsignmask; - - /* calculate the result by summing bits 0..14, 16..22, etc, - * and then adjusting the sign bits 15, 23, etc manually. - * This ensures the addition can't overflow the 16 bit field. - */ - signres = (tmp1 ^ tmp2) & wsignmask; - res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask); - res ^= signres; - - return res; -} - -uint64_t HELPER(neon_addlp_u8)(uint64_t a) -{ - uint64_t tmp; - - tmp = a & 0x00ff00ff00ff00ffULL; - tmp += (a >> 8) & 0x00ff00ff00ff00ffULL; - return tmp; -} - -uint64_t HELPER(neon_addlp_s16)(uint64_t a) -{ - int32_t reslo, reshi; - - reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16); - reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48); - - return (uint32_t)reslo | (((uint64_t)reshi) << 32); -} - -uint64_t HELPER(neon_addlp_u16)(uint64_t a) -{ - uint64_t tmp; - - tmp = a & 0x0000ffff0000ffffULL; - tmp += (a >> 16) & 0x0000ffff0000ffffULL; - return tmp; -} - -/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */ -float32 HELPER(frecpx_f32)(float32 a, void *fpstp) -{ - float_status *fpst = fpstp; - uint32_t val32, sbit; - int32_t exp; - - if (float32_is_any_nan(a)) { - float32 nan = a; - if (float32_is_signaling_nan(a)) { - float_raise(float_flag_invalid, fpst); - nan = float32_maybe_silence_nan(a); - } - if (fpst->default_nan_mode) { - nan = float32_default_nan; - } - return nan; - } - - val32 = float32_val(a); - sbit = 0x80000000ULL & val32; - exp = extract32(val32, 23, 8); - - if (exp == 0) { - return make_float32(sbit | (0xfe << 23)); - } else { - return make_float32(sbit | (~exp & 0xff) << 23); - } -} - -float64 HELPER(frecpx_f64)(float64 a, void *fpstp) -{ - float_status *fpst = fpstp; - uint64_t val64, sbit; - int64_t exp; - - if (float64_is_any_nan(a)) { - float64 nan = a; - if (float64_is_signaling_nan(a)) { - float_raise(float_flag_invalid, fpst); - nan = float64_maybe_silence_nan(a); - } - if (fpst->default_nan_mode) { - nan = float64_default_nan; - } - return nan; - } - - val64 = float64_val(a); - sbit = 0x8000000000000000ULL & val64; - exp = extract64(float64_val(a), 52, 11); - - if (exp == 0) { - return make_float64(sbit | (0x7feULL << 52)); - } else { - return make_float64(sbit | (~exp & 0x7ffULL) << 52); - } -} - -float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env) -{ - /* Von Neumann rounding is implemented by using round-to-zero - * and then setting the LSB of the result if Inexact was raised. - */ - float32 r; - float_status *fpst = &env->vfp.fp_status; - float_status tstat = *fpst; - int exflags; - - set_float_rounding_mode(float_round_to_zero, &tstat); - set_float_exception_flags(0, &tstat); - r = float64_to_float32(a, &tstat); - r = float32_maybe_silence_nan(r); - exflags = get_float_exception_flags(&tstat); - if (exflags & float_flag_inexact) { - r = make_float32(float32_val(r) | 1); - } - exflags |= get_float_exception_flags(fpst); - set_float_exception_flags(exflags, fpst); - return r; -} - -/* 64-bit versions of the CRC helpers. Note that although the operation - * (and the prototypes of crc32c() and crc32() mean that only the bottom - * 32 bits of the accumulator and result are used, we pass and return - * uint64_t for convenience of the generated code. Unlike the 32-bit - * instruction set versions, val may genuinely have 64 bits of data in it. - * The upper bytes of val (above the number specified by 'bytes') must have - * been zeroed out by the caller. - */ -uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes) -{ - uint8_t buf[8]; - - stq_le_p(buf, val); - - /* zlib crc32 converts the accumulator and output to one's complement. */ - // return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; - return 0; // FIXME -} - -uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes) -{ - uint8_t buf[8]; - - stq_le_p(buf, val); - - /* Linux crc32c converts the output to one's complement. */ - return crc32c(acc, buf, bytes) ^ 0xffffffff; -} - -#if !defined(CONFIG_USER_ONLY) - -/* Handle a CPU exception. */ -void aarch64_cpu_do_interrupt(CPUState *cs) -{ - CPUARMState *env = cs->env_ptr; - ARMCPU *cpu = ARM_CPU(env->uc, cs); - unsigned int new_el = arm_excp_target_el(cs, cs->exception_index); - target_ulong addr = env->cp15.vbar_el[new_el]; - unsigned int new_mode = aarch64_pstate_mode(new_el, true); - int i; - - if (arm_current_el(env) < new_el) { - if (env->aarch64) { - addr += 0x400; - } else { - addr += 0x600; - } - } else if (pstate_read(env) & PSTATE_SP) { - addr += 0x200; - } - - arm_log_exception(cs->exception_index); - qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env)); - if (qemu_loglevel_mask(CPU_LOG_INT) - && !excp_is_internal(cs->exception_index)) { - qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n", - env->exception.syndrome); - } - - if (arm_is_psci_call(cpu, cs->exception_index)) { - arm_handle_psci_call(cpu); - qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); - return; - } - - switch (cs->exception_index) { - case EXCP_PREFETCH_ABORT: - case EXCP_DATA_ABORT: - env->cp15.far_el[new_el] = env->exception.vaddress; - qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", - env->cp15.far_el[new_el]); - /* fall through */ - case EXCP_BKPT: - case EXCP_UDEF: - case EXCP_SWI: - case EXCP_HVC: - case EXCP_HYP_TRAP: - case EXCP_SMC: - env->cp15.esr_el[new_el] = env->exception.syndrome; - break; - case EXCP_IRQ: - case EXCP_VIRQ: - addr += 0x80; - break; - case EXCP_FIQ: - case EXCP_VFIQ: - addr += 0x100; - break; - default: - cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); - } - - if (is_a64(env)) { - env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); - aarch64_save_sp(env, arm_current_el(env)); - env->elr_el[new_el] = env->pc; - } else { - env->banked_spsr[0] = cpsr_read(env); - if (!env->thumb) { - env->cp15.esr_el[new_el] |= 1 << 25; - } - env->elr_el[new_el] = env->regs[15]; - - for (i = 0; i < 15; i++) { - env->xregs[i] = env->regs[i]; - } - - env->condexec_bits = 0; - } - - pstate_write(env, PSTATE_DAIF | new_mode); - env->aarch64 = 1; - aarch64_restore_sp(env, new_el); - - env->pc = addr; - cs->interrupt_request |= CPU_INTERRUPT_EXITTB; -} -#endif diff --git a/qemu/target-arm/helper-a64.h b/qemu/target-arm/helper-a64.h deleted file mode 100644 index 1d3d10ff..00000000 --- a/qemu/target-arm/helper-a64.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * AArch64 specific helper definitions - * - * Copyright (c) 2013 Alexander Graf - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64) -DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32) -DEF_HELPER_FLAGS_1(clz32, TCG_CALL_NO_RWG_SE, i32, i32) -DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr) -DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr) -DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr) -DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr) -DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32) -DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr) -DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr) -DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) -DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) -DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) -DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) -DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) -DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) -DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) -DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr) -DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env) -DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) -DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) diff --git a/qemu/target-arm/helper.c b/qemu/target-arm/helper.c deleted file mode 100644 index 8f655340..00000000 --- a/qemu/target-arm/helper.c +++ /dev/null @@ -1,5791 +0,0 @@ -#include "cpu.h" -#include "internals.h" -#include "exec/helper-proto.h" -#include "qemu/host-utils.h" -#include "sysemu/sysemu.h" -#include "qemu/bitops.h" -#include "qemu/crc32c.h" -#include "exec/cpu_ldst.h" -#include "arm_ldst.h" - -#ifndef CONFIG_USER_ONLY -static inline int get_phys_addr(CPUARMState *env, target_ulong address, - int access_type, int is_user, - hwaddr *phys_ptr, int *prot, - target_ulong *page_size); - -/* Definitions for the PMCCNTR and PMCR registers */ -#define PMCRD 0x8 -#define PMCRC 0x4 -#define PMCRE 0x1 -#endif - -static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - if (cpreg_field_is_64bit(ri)) { - return CPREG_FIELD64(env, ri); - } else { - return CPREG_FIELD32(env, ri); - } -} - -static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - if (cpreg_field_is_64bit(ri)) { - CPREG_FIELD64(env, ri) = value; - } else { - CPREG_FIELD32(env, ri) = value; - } -} - -static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* Raw read of a coprocessor register (as needed for migration, etc). */ - if (ri->type & ARM_CP_CONST) { - return ri->resetvalue; - } else if (ri->raw_readfn) { - return ri->raw_readfn(env, ri); - } else if (ri->readfn) { - return ri->readfn(env, ri); - } else { - return raw_read(env, ri); - } -} - -static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t v) -{ - /* Raw write of a coprocessor register (as needed for migration, etc). - * Note that constant registers are treated as write-ignored; the - * caller should check for success by whether a readback gives the - * value written. - */ - if (ri->type & ARM_CP_CONST) { - return; - } else if (ri->raw_writefn) { - ri->raw_writefn(env, ri, v); - } else if (ri->writefn) { - ri->writefn(env, ri, v); - } else { - raw_write(env, ri, v); - } -} - -bool write_cpustate_to_list(ARMCPU *cpu) -{ - /* Write the coprocessor state from cpu->env to the (index,value) list. */ - int i; - bool ok = true; - - for (i = 0; i < cpu->cpreg_array_len; i++) { - uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); - const ARMCPRegInfo *ri; - - ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); - if (!ri) { - ok = false; - continue; - } - if (ri->type & ARM_CP_NO_MIGRATE) { - continue; - } - cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri); - } - return ok; -} - -bool write_list_to_cpustate(ARMCPU *cpu) -{ - int i; - bool ok = true; - - for (i = 0; i < cpu->cpreg_array_len; i++) { - uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); - uint64_t v = cpu->cpreg_values[i]; - const ARMCPRegInfo *ri; - - ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); - if (!ri) { - ok = false; - continue; - } - if (ri->type & ARM_CP_NO_MIGRATE) { - continue; - } - /* Write value and confirm it reads back as written - * (to catch read-only registers and partially read-only - * registers where the incoming migration value doesn't match) - */ - write_raw_cp_reg(&cpu->env, ri, v); - if (read_raw_cp_reg(&cpu->env, ri) != v) { - ok = false; - } - } - return ok; -} - -static void add_cpreg_to_list(gpointer key, gpointer opaque) -{ - ARMCPU *cpu = opaque; - uint64_t regidx; - const ARMCPRegInfo *ri; - - regidx = *(uint32_t *)key; - ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); - - if (!(ri->type & ARM_CP_NO_MIGRATE)) { - cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); - /* The value array need not be initialized at this point */ - cpu->cpreg_array_len++; - } -} - -static void count_cpreg(gpointer key, gpointer opaque) -{ - ARMCPU *cpu = opaque; - uint64_t regidx; - const ARMCPRegInfo *ri; - - regidx = *(uint32_t *)key; - ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); - - if (!(ri->type & ARM_CP_NO_MIGRATE)) { - cpu->cpreg_array_len++; - } -} - -static gint cpreg_key_compare(gconstpointer a, gconstpointer b) -{ - uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); - uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); - - if (aidx > bidx) { - return 1; - } - if (aidx < bidx) { - return -1; - } - return 0; -} - -static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata) -{ - GList **plist = udata; - - *plist = g_list_prepend(*plist, key); -} - -void init_cpreg_list(ARMCPU *cpu) -{ - /* Initialise the cpreg_tuples[] array based on the cp_regs hash. - * Note that we require cpreg_tuples[] to be sorted by key ID. - */ - GList *keys = NULL; - int arraylen; - - g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys); - - keys = g_list_sort(keys, cpreg_key_compare); - - cpu->cpreg_array_len = 0; - - g_list_foreach(keys, count_cpreg, cpu); - - arraylen = cpu->cpreg_array_len; - cpu->cpreg_indexes = g_new(uint64_t, arraylen); - cpu->cpreg_values = g_new(uint64_t, arraylen); - cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); - cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); - cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; - cpu->cpreg_array_len = 0; - - g_list_foreach(keys, add_cpreg_to_list, cpu); - - assert(cpu->cpreg_array_len == arraylen); - - g_list_free(keys); -} - -static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - raw_write(env, ri, value); - tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */ -} - -static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - if (raw_read(env, ri) != value) { - /* Unlike real hardware the qemu TLB uses virtual addresses, - * not modified virtual addresses, so this causes a TLB flush. - */ - tlb_flush(CPU(cpu), 1); - raw_write(env, ri, value); - } -} - -static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU) - && !extended_addresses_enabled(env)) { - /* For VMSA (when not using the LPAE long descriptor page table - * format) this register includes the ASID, so do a TLB flush. - * For PMSA it is purely a process ID and no action is needed. - */ - tlb_flush(CPU(cpu), 1); - } - raw_write(env, ri, value); -} - -static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate all (TLBIALL) */ - ARMCPU *cpu = arm_env_get_cpu(env); - - tlb_flush(CPU(cpu), 1); -} - -static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ - ARMCPU *cpu = arm_env_get_cpu(env); - - tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); -} - -static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate by ASID (TLBIASID) */ - ARMCPU *cpu = arm_env_get_cpu(env); - - tlb_flush(CPU(cpu), value == 0); -} - -static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ - ARMCPU *cpu = arm_env_get_cpu(env); - - tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); -} - -/* IS variants of TLB operations must affect all cores */ -static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - //struct uc_struct *uc = env->uc; - // TODO: issue #642 - // tlb_flush(other_cpu, 1); -} - -static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - //struct uc_struct *uc = env->uc; - // TODO: issue #642 - // tlb_flush(other_cpu, value == 0); -} - -static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - //struct uc_struct *uc = env->uc; - // TODO: issue #642 - // tlb_flush(other_cpu, value & TARGET_PAGE_MASK); -} - -static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - //struct uc_struct *uc = env->uc; - // TODO: issue #642 - // tlb_flush(other_cpu, value & TARGET_PAGE_MASK); -} - -static const ARMCPRegInfo cp_reginfo[] = { - { "FCSEIDR", 15,13,0, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c13_fcse), - NULL, NULL, fcse_write, NULL, raw_write, NULL, }, - { "CONTEXTIDR", 0,13,0, 3,0,1, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.contextidr_el1), - NULL, NULL, contextidr_write, NULL, raw_write, NULL, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo not_v8_cp_reginfo[] = { - /* NB: Some of these registers exist in v8 but with more precise - * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). - */ - /* MMU Domain access control / MPU write buffer control */ - { "DACR", 15,3,CP_ANY, 0,CP_ANY,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c3), - NULL, NULL, dacr_write, NULL, raw_write, NULL, }, - /* ??? This covers not just the impdef TLB lockdown registers but also - * some v7VMSA registers relating to TEX remap, so it is overly broad. - */ - { "TLB_LOCKDOWN", 15,10,CP_ANY, 0,CP_ANY,CP_ANY, 0, - ARM_CP_NOP, PL1_RW, }, - /* Cache maintenance ops; some of this space may be overridden later. */ - { "CACHEMAINT", 15,7,CP_ANY, 0,0,CP_ANY, 0, - ARM_CP_NOP | ARM_CP_OVERRIDE, PL1_W, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo not_v6_cp_reginfo[] = { - /* Not all pre-v6 cores implemented this WFI, so this is slightly - * over-broad. - */ - { "WFI_v5", 15,7,8, 0,0,2, 0, - ARM_CP_WFI, PL1_W, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo not_v7_cp_reginfo[] = { - /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which - * is UNPREDICTABLE; we choose to NOP as most implementations do). - */ - { "WFI_v6", 15,7,0, 0,0,4, 0, - ARM_CP_WFI, PL1_W, }, - /* L1 cache lockdown. Not architectural in v6 and earlier but in practice - * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and - * OMAPCP will override this space. - */ - { "DLOCKDOWN", 15,9,0, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_data), }, - { "ILOCKDOWN", 15,9,0, 0,0,1, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_insn), }, - /* v6 doesn't have the cache ID registers but Linux reads them anyway */ - { "DUMMY", 15,0,0, 0,1,CP_ANY, 0, - ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL1_R, NULL, 0 }, - /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; - * implementing it as RAZ means the "debug architecture version" bits - * will read as a reserved value, which should cause Linux to not try - * to use the debug hardware. - */ - { "DBGDIDR", 14,0,0, 0,0,0, 0, - ARM_CP_CONST, PL0_R, NULL, 0 }, - /* MMU TLB control. Note that the wildcarding means we cover not just - * the unified TLB ops but also the dside/iside/inner-shareable variants. - */ - { "TLBIALL", 15,8,CP_ANY, 0,CP_ANY,0, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiall_write, }, - { "TLBIMVA", 15,8,CP_ANY, 0,CP_ANY,1, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimva_write, }, - { "TLBIASID", 15,8,CP_ANY, 0,CP_ANY,2, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiasid_write, }, - { "TLBIMVAA", 15,8,CP_ANY, 0,CP_ANY,3, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimvaa_write, }, - REGINFO_SENTINEL -}; - -static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - uint32_t mask = 0; - - /* In ARMv8 most bits of CPACR_EL1 are RES0. */ - if (!arm_feature(env, ARM_FEATURE_V8)) { - /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. - * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. - * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. - */ - if (arm_feature(env, ARM_FEATURE_VFP)) { - /* VFP coprocessor: cp10 & cp11 [23:20] */ - mask |= (1U << 31) | (1 << 30) | (0xf << 20); - - if (!arm_feature(env, ARM_FEATURE_NEON)) { - /* ASEDIS [31] bit is RAO/WI */ - value |= (1U << 31); - } - - /* VFPv3 and upwards with NEON implement 32 double precision - * registers (D0-D31). - */ - if (!arm_feature(env, ARM_FEATURE_NEON) || - !arm_feature(env, ARM_FEATURE_VFP3)) { - /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ - value |= (1 << 30); - } - } - value &= mask; - } - env->cp15.c1_coproc = value; -} - -static const ARMCPRegInfo v6_cp_reginfo[] = { - /* prefetch by MVA in v6, NOP in v7 */ - { "MVA_prefetch", 15,7,13, 0,0,1, 0, - ARM_CP_NOP, PL1_W, }, - { "ISB", 15,7,5, 0,0,4, 0, - ARM_CP_NOP, PL0_W, }, - { "DSB", 15,7,10, 0,0,4, 0, - ARM_CP_NOP, PL0_W, }, - { "DMB", 15,7,10, 0,0,5, 0, - ARM_CP_NOP, PL0_W, }, - { "IFAR", 15,6,0, 0,0,2, 0, - 0, PL1_RW, NULL, 0, offsetofhigh32(CPUARMState, cp15.far_el[1]), }, - /* Watchpoint Fault Address Register : should actually only be present - * for 1136, 1176, 11MPCore. - */ - { "WFAR", 15,6,0, 0,0,1, 0, - ARM_CP_CONST, PL1_RW, NULL, 0, }, - { "CPACR", 0,1,0, 3,0,2, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c1_coproc), - NULL, NULL, cpacr_write }, - REGINFO_SENTINEL -}; - -static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* Performance monitor registers user accessibility is controlled - * by PMUSERENR. - */ - if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -#ifndef CONFIG_USER_ONLY - -static inline bool arm_ccnt_enabled(CPUARMState *env) -{ - /* This does not support checking PMCCFILTR_EL0 register */ - - if (!(env->cp15.c9_pmcr & PMCRE)) { - return false; - } - - return true; -} - -void pmccntr_sync(CPUARMState *env) -{ - uint64_t temp_ticks; - - temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL), - get_ticks_per_sec(), 1000000); - - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ - temp_ticks /= 64; - } - - if (arm_ccnt_enabled(env)) { - env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; - } -} - -static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - pmccntr_sync(env); - - if (value & PMCRC) { - /* The counter has been reset */ - env->cp15.c15_ccnt = 0; - } - - /* only the DP, X, D and E bits are writable */ - env->cp15.c9_pmcr &= ~0x39; - env->cp15.c9_pmcr |= (value & 0x39); - - pmccntr_sync(env); -} - -static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - uint64_t total_ticks; - - if (!arm_ccnt_enabled(env)) { - /* Counter is disabled, do not change value */ - return env->cp15.c15_ccnt; - } - - total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL), - get_ticks_per_sec(), 1000000); - - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ - total_ticks /= 64; - } - return total_ticks - env->cp15.c15_ccnt; -} - -static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - uint64_t total_ticks; - - if (!arm_ccnt_enabled(env)) { - /* Counter is disabled, set the absolute value */ - env->cp15.c15_ccnt = value; - return; - } - - total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL), - get_ticks_per_sec(), 1000000); - - if (env->cp15.c9_pmcr & PMCRD) { - /* Increment once every 64 processor clock cycles */ - total_ticks /= 64; - } - env->cp15.c15_ccnt = total_ticks - value; -} - -static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - uint64_t cur_val = pmccntr_read(env, NULL); - - pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); -} - -#else /* CONFIG_USER_ONLY */ - -void pmccntr_sync(CPUARMState *env) -{ -} - -#endif - -static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - pmccntr_sync(env); - env->cp15.pmccfiltr_el0 = value & 0x7E000000; - pmccntr_sync(env); -} - -static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - value &= (1U << 31); - env->cp15.c9_pmcnten |= value; -} - -static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - value &= (1U << 31); - env->cp15.c9_pmcnten &= ~value; -} - -static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c9_pmovsr &= ~value; -} - -static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c9_pmxevtyper = value & 0xff; -} - -static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c9_pmuserenr = value & 1; -} - -static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* We have no event counters so only the C bit can be changed */ - value &= (1U << 31); - env->cp15.c9_pminten |= value; -} - -static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - value &= (1U << 31); - env->cp15.c9_pminten &= ~value; -} - -static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Note that even though the AArch64 view of this register has bits - * [10:0] all RES0 we can only mask the bottom 5, to comply with the - * architectural requirements for bits which are RES0 only in some - * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 - * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) - */ - raw_write(env, ri, value & ~0x1FULL); -} - -static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) -{ - /* We only mask off bits that are RES0 both for AArch64 and AArch32. - * For bits that vary between AArch32/64, code needs to check the - * current execution mode before directly using the feature bit. - */ - uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; - - if (!arm_feature(env, ARM_FEATURE_EL2)) { - valid_mask &= ~SCR_HCE; - - /* On ARMv7, SMD (or SCD as it is called in v7) is only - * supported if EL2 exists. The bit is UNK/SBZP when - * EL2 is unavailable. In QEMU ARMv7, we force it to always zero - * when EL2 is unavailable. - */ - if (arm_feature(env, ARM_FEATURE_V7)) { - valid_mask &= ~SCR_SMD; - } - } - - /* Clear all-context RES0 bits. */ - value &= valid_mask; - raw_write(env, ri, value); -} - -static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - return cpu->ccsidr[env->cp15.c0_cssel]; -} - -static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - raw_write(env, ri, value & 0xf); -} - -static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - CPUState *cs = ENV_GET_CPU(env); - uint64_t ret = 0; - - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { - ret |= CPSR_I; - } - if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { - ret |= CPSR_F; - } - /* External aborts are not possible in QEMU so A bit is always clear */ - return ret; -} - -static const ARMCPRegInfo v7_cp_reginfo[] = { - /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ - { "NOP", 15,7,0, 0,0,4, 0, - ARM_CP_NOP, PL1_W, }, - /* Performance monitors are implementation defined in v7, - * but with an ARM recommended set of registers, which we - * follow (although we don't actually implement any counters) - * - * Performance registers fall into three categories: - * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) - * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) - * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) - * For the cases controlled by PMUSERENR we must set .access to PL0_RW - * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. - */ - { "PMCNTENSET", 15,9,12, 0,0,1, 0, - ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcnten), - pmreg_access, NULL, pmcntenset_write, NULL, raw_write }, - { "PMCNTENSET_EL0", 0,9,12, 3,3,1, ARM_CP_STATE_AA64, - 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmcnten), - pmreg_access, NULL, pmcntenset_write, NULL, raw_write }, - { "PMCNTENCLR", 15,9,12, 0,0,2, 0, - ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcnten), - pmreg_access, NULL, pmcntenclr_write, }, - { "PMCNTENCLR_EL0", 0,9,12, 3,3,2, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmcnten), - pmreg_access, NULL, pmcntenclr_write }, - { "PMOVSR", 15,9,12, 0,0,3, 0, - 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmovsr), - pmreg_access, NULL, pmovsr_write, NULL, raw_write }, - /* Unimplemented so WI. */ - { "PMSWINC", 15,9,12, 0,0,4, 0, - ARM_CP_NOP, PL0_W, NULL, 0, 0, - pmreg_access, }, - /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE. - * We choose to RAZ/WI. - */ - { "PMSELR", 15,9,12, 0,0,5, 0, - ARM_CP_CONST, PL0_RW, NULL, 0, 0, - pmreg_access }, -#ifndef CONFIG_USER_ONLY - { "PMCCNTR", 15,9,13, 0,0,0, 0, - ARM_CP_IO, PL0_RW, NULL, 0, 0, - pmreg_access, pmccntr_read, pmccntr_write32, }, - { "PMCCNTR_EL0", 0,9,13, 3,3,0, ARM_CP_STATE_AA64, - ARM_CP_IO, PL0_RW, NULL, 0, 0, - pmreg_access, pmccntr_read, pmccntr_write, }, -#endif - { "PMCCFILTR_EL0", 0,14,15, 3,3,7, ARM_CP_STATE_AA64, - ARM_CP_IO, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.pmccfiltr_el0), - pmreg_access, NULL, pmccfiltr_write, }, - { "PMXEVTYPER", 15,9,13, 0,0,1, 0, - 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmxevtyper), - pmreg_access, NULL, pmxevtyper_write, NULL, raw_write }, - /* Unimplemented, RAZ/WI. */ - { "PMXEVCNTR", 15,9,13, 0,0,2, 0, - ARM_CP_CONST, PL0_RW, NULL, 0, 0, - pmreg_access }, - { "PMUSERENR", 15,9,14, 0,0,0, 0, - 0, PL0_R | PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmuserenr), - NULL, NULL, pmuserenr_write, NULL, raw_write }, - { "PMINTENSET", 15,9,14, 0,0,1, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pminten), - NULL, NULL, pmintenset_write, NULL, raw_write }, - { "PMINTENCLR", 15,9,14, 0,0,2, 0, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pminten), - NULL, NULL, pmintenclr_write, }, - { "VBAR", 0,12,0, 3,0,0, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[1]), - NULL, NULL, vbar_write, }, - { "SCR", 15,1,1, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.scr_el3), - NULL, NULL, scr_write }, - { "CCSIDR", 0,0,0, 3,1,0, ARM_CP_STATE_BOTH, - ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0, - NULL, ccsidr_read, }, - { "CSSELR", 0,0,0, 3,2,0, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c0_cssel), - NULL, NULL, csselr_write, }, - /* Auxiliary ID register: this actually has an IMPDEF value but for now - * just RAZ for all cores: - */ - { "AIDR", 0,0,0, 3,1,7, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, 0 }, - /* Auxiliary fault status registers: these also are IMPDEF, and we - * choose to RAZ/WI for all cores. - */ - { "AFSR0_EL1", 0,5,1, 3,0,0, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - { "AFSR1_EL1", 0,5,1, 3,0,1, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_RW, NULL, 0 }, - /* MAIR can just read-as-written because we don't implement caches - * and so don't need to care about memory attributes. - */ - { "MAIR_EL1", 0,10,2, 3,0,0, ARM_CP_STATE_AA64, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.mair_el1), }, - /* For non-long-descriptor page tables these are PRRR and NMRR; - * regardless they still act as reads-as-written for QEMU. - * The override is necessary because of the overly-broad TLB_LOCKDOWN - * definition. - */ - { "MAIR0", 15,10,2, 0,0,0, ARM_CP_STATE_AA32, - ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.mair_el1), - NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, - { "MAIR1", 15,10,2, 0,0,1, ARM_CP_STATE_AA32, - ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetofhigh32(CPUARMState, cp15.mair_el1), - NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, - { "ISR_EL1", 0,12,1, 3,0,0, ARM_CP_STATE_BOTH, - ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0, - NULL, isr_read }, - /* 32 bit ITLB invalidates */ - { "ITLBIALL", 15,8,5, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiall_write }, - { "ITLBIMVA", 15,8,5, 0,0,1, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimva_write }, - { "ITLBIASID", 15,8,5, 0,0,2, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiasid_write }, - /* 32 bit DTLB invalidates */ - { "DTLBIALL", 15,8,6, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiall_write }, - { "DTLBIMVA", 15,8,6, 0,0,1, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimva_write }, - { "DTLBIASID", 15,8,6, 0,0,2, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiasid_write }, - /* 32 bit TLB invalidates */ - { "TLBIALL", 15,8,7, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiall_write }, - { "TLBIMVA", 15,8,7, 0,0,1, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimva_write }, - { "TLBIASID", 15,8,7, 0,0,2, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiasid_write }, - { "TLBIMVAA", 15,8,7, 0,0,3, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimvaa_write }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo v7mp_cp_reginfo[] = { - /* 32 bit TLB invalidates, Inner Shareable */ - { "TLBIALLIS", 15,8,3, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiall_is_write }, - { "TLBIMVAIS", 15,8,3, 0,0,1, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimva_is_write }, - { "TLBIASIDIS", 15,8,3, 0,0,2, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiasid_is_write }, - { "TLBIMVAAIS", 15,8,3, 0,0,3, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimvaa_is_write }, - REGINFO_SENTINEL -}; - -static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - value &= 1; - env->teecr = value; -} - -static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - if (arm_current_el(env) == 0 && (env->teecr & 1)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static const ARMCPRegInfo t2ee_cp_reginfo[] = { - { "TEECR", 14,0,0, 0,6,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, teecr), - NULL, NULL, teecr_write }, - { "TEEHBR", 14,1,0, 0,6,0, 0, - 0, PL0_RW, NULL, 0, offsetof(CPUARMState, teehbr), - teehbr_access, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo v6k_cp_reginfo[] = { - { "TPIDR_EL0", 0,13,0, 3,3,2, ARM_CP_STATE_AA64, - 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.tpidr_el0), }, - { "TPIDRURW", 15,13,0, 0,0,2, 0, - 0, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.tpidr_el0), - NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, - { "TPIDRRO_EL0", 0,13,0, 3,3,3, ARM_CP_STATE_AA64, - 0, PL0_R|PL1_W, NULL, 0, offsetof(CPUARMState, cp15.tpidrro_el0) }, - { "TPIDRURO", 15,13,0, 0,0,3, 0, - 0, PL0_R|PL1_W, NULL, 0, offsetoflow32(CPUARMState, cp15.tpidrro_el0), - NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, - { "TPIDR_EL1", 0,13,0, 3,0,4, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.tpidr_el1), }, - REGINFO_SENTINEL -}; - -#ifndef CONFIG_USER_ONLY - -static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */ - if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx) -{ - /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ - if (arm_current_el(env) == 0 && - !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx) -{ - /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if - * EL0[PV]TEN is zero. - */ - if (arm_current_el(env) == 0 && - !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static CPAccessResult gt_pct_access(CPUARMState *env, - const ARMCPRegInfo *ri) -{ - return gt_counter_access(env, GTIMER_PHYS); -} - -static CPAccessResult gt_vct_access(CPUARMState *env, - const ARMCPRegInfo *ri) -{ - return gt_counter_access(env, GTIMER_VIRT); -} - -static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return gt_timer_access(env, GTIMER_PHYS); -} - -static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return gt_timer_access(env, GTIMER_VIRT); -} - -static uint64_t gt_get_countervalue(CPUARMState *env) -{ - return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; -} - -static void gt_recalc_timer(ARMCPU *cpu, int timeridx) -{ - ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; - - if (gt->ctl & 1) { - /* Timer enabled: calculate and set current ISTATUS, irq, and - * reset timer to when ISTATUS next has to change - */ - uint64_t count = gt_get_countervalue(&cpu->env); - /* Note that this must be unsigned 64 bit arithmetic: */ - int istatus = count >= gt->cval; - uint64_t nexttick; - - gt->ctl = deposit32(gt->ctl, 2, 1, istatus); - //qemu_set_irq(cpu->gt_timer_outputs[timeridx], - // (istatus && !(gt->ctl & 2))); - if (istatus) { - /* Next transition is when count rolls back over to zero */ - nexttick = UINT64_MAX; - } else { - /* Next transition is when we hit cval */ - nexttick = gt->cval; - } - /* Note that the desired next expiry time might be beyond the - * signed-64-bit range of a QEMUTimer -- in this case we just - * set the timer for as far in the future as possible. When the - * timer expires we will reset the timer for any remaining period. - */ - if (nexttick > INT64_MAX / GTIMER_SCALE) { - nexttick = INT64_MAX / GTIMER_SCALE; - } - //timer_mod(cpu->gt_timer[timeridx], nexttick); - } else { - /* Timer disabled: ISTATUS and timer output always clear */ - gt->ctl &= ~4; - //qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); - //timer_del(cpu->gt_timer[timeridx]); - } -} - -static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ -} - -static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return gt_get_countervalue(env); -} - -static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - int timeridx = ri->opc1 & 1; - - env->cp15.c14_timer[timeridx].cval = value; - //gt_recalc_timer(arm_env_get_cpu(env), timeridx); -} - -static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - int timeridx = ri->crm & 1; - - return (uint32_t)(env->cp15.c14_timer[timeridx].cval - - gt_get_countervalue(env)); -} - -static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - int timeridx = ri->crm & 1; - - env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) + - + sextract64(value, 0, 32); - gt_recalc_timer(arm_env_get_cpu(env), timeridx); -} - -static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int timeridx = ri->crm & 1; - uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; - - env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); - if ((oldval ^ value) & 1) { - /* Enable toggled */ - gt_recalc_timer(cpu, timeridx); - } else if ((oldval ^ value) & 2) { - /* IMASK toggled: don't need to recalculate, - * just set the interrupt line based on ISTATUS - */ - //qemu_set_irq(cpu->gt_timer_outputs[timeridx], - // (oldval & 4) && !(value & 2)); - } -} - -void arm_gt_ptimer_cb(void *opaque) -{ - ARMCPU *cpu = opaque; - - gt_recalc_timer(cpu, GTIMER_PHYS); -} - -void arm_gt_vtimer_cb(void *opaque) -{ - ARMCPU *cpu = opaque; - - gt_recalc_timer(cpu, GTIMER_VIRT); -} - -static const ARMCPRegInfo generic_timer_cp_reginfo[] = { - /* Note that CNTFRQ is purely reads-as-written for the benefit - * of software; writing it doesn't actually change the timer frequency. - * Our reset value matches the fixed frequency we implement the timer at. - */ - { "CNTFRQ", 15,14,0, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_cntfrq), - gt_cntfrq_access, NULL,NULL, NULL,NULL, arm_cp_reset_ignore, }, - { "CNTFRQ_EL0", 0,14,0, 3,3,0, ARM_CP_STATE_AA64, - 0, PL1_RW | PL0_R, NULL, (1000 * 1000 * 1000) / GTIMER_SCALE, offsetof(CPUARMState, cp15.c14_cntfrq), - gt_cntfrq_access, }, - /* overall control: mostly access permissions */ - { "CNTKCTL", 0,14,1, 3,0,0, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c14_cntkctl), }, - /* per-timer control */ - { "CNTP_CTL", 15,14,2, 0,0,1, 0, - ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), - gt_ptimer_access, NULL, gt_ctl_write, NULL,raw_write, arm_cp_reset_ignore, }, - { "CNTP_CTL_EL0", 0,14,2, 3,3,1, ARM_CP_STATE_AA64, - ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), - gt_ptimer_access, NULL,gt_ctl_write, NULL,raw_write, }, - { "CNTV_CTL", 15,14,3, 0,0,1, 0, - ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), - gt_vtimer_access, NULL,gt_ctl_write, NULL,raw_write, arm_cp_reset_ignore, }, - { "CNTV_CTL_EL0", 0,14,3, 3,3,1, ARM_CP_STATE_AA64, - ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), - gt_vtimer_access, NULL,gt_ctl_write, NULL,raw_write, }, - /* TimerValue views: a 32 bit downcounting view of the underlying state */ - { "CNTP_TVAL", 15,14,2, 0,0,0, 0, - ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, - gt_ptimer_access, gt_tval_read, gt_tval_write, }, - { "CNTP_TVAL_EL0", 0,14,2, 3,3,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, - NULL, gt_tval_read, gt_tval_write, }, - { "CNTV_TVAL", 15,14,3, 0,0,0, 0, - ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, - gt_vtimer_access, gt_tval_read, gt_tval_write, }, - { "CNTV_TVAL_EL0", 0,14,3, 3,3,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, - NULL, gt_tval_read, gt_tval_write, }, - /* The counter itself */ - { "CNTPCT", 15,0,14, 0,0, 0, 0, - ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, - gt_pct_access, gt_cnt_read,NULL, NULL,NULL, arm_cp_reset_ignore, }, - { "CNTPCT_EL0", 0,14,0, 3,3,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, - gt_pct_access, gt_cnt_read, NULL, NULL, NULL, gt_cnt_reset, }, - { "CNTVCT", 15,0,14, 0,1,0, 0, - ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, - gt_vct_access, gt_cnt_read,NULL, NULL,NULL, arm_cp_reset_ignore, }, - { "CNTVCT_EL0", 0,14,0, 3,3,2, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, - gt_vct_access, gt_cnt_read, NULL, NULL,NULL, gt_cnt_reset, }, - /* Comparison value, indicating when the timer goes off */ - { "CNTP_CVAL", 15, 0,14, 0,2, 0, 0, - ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), - gt_ptimer_access, NULL, gt_cval_write, NULL, raw_write, arm_cp_reset_ignore, }, - { "CNTP_CVAL_EL0", 0,14,2, 3,3,2, ARM_CP_STATE_AA64, - ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), - gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, }, - { "CNTV_CVAL", 15, 0,14, 0,3,0, 0, - ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), - gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, arm_cp_reset_ignore, }, - { "CNTV_CVAL_EL0", 0,14,3, 3,3,2, ARM_CP_STATE_AA64, - ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), - gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, }, - REGINFO_SENTINEL -}; - -#else -/* In user-mode none of the generic timer registers are accessible, - * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs, - * so instead just don't register any of them. - */ -static const ARMCPRegInfo generic_timer_cp_reginfo[] = { - REGINFO_SENTINEL -}; - -#endif - -static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) -{ - if (arm_feature(env, ARM_FEATURE_LPAE)) { - raw_write(env, ri, value); - } else if (arm_feature(env, ARM_FEATURE_V7)) { - raw_write(env, ri, value & 0xfffff6ff); - } else { - raw_write(env, ri, value & 0xfffff1ff); - } -} - -#ifndef CONFIG_USER_ONLY -/* get_phys_addr() isn't present for user-mode-only targets */ - -static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - if (ri->opc2 & 4) { - /* Other states are only available with TrustZone; in - * a non-TZ implementation these registers don't exist - * at all, which is an Uncategorized trap. This underdecoding - * is safe because the reginfo is NO_MIGRATE. - */ - return CP_ACCESS_TRAP_UNCATEGORIZED; - } - return CP_ACCESS_OK; -} - -static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) -{ - hwaddr phys_addr; - target_ulong page_size; - int prot; - int ret, is_user = ri->opc2 & 2; - int access_type = ri->opc2 & 1; - - ret = get_phys_addr(env, value, access_type, is_user, - &phys_addr, &prot, &page_size); - if (extended_addresses_enabled(env)) { - /* ret is a DFSR/IFSR value for the long descriptor - * translation table format, but with WnR always clear. - * Convert it to a 64-bit PAR. - */ - uint64_t par64 = (1 << 11); /* LPAE bit always set */ - if (ret == 0) { - par64 |= phys_addr & ~0xfffULL; - /* We don't set the ATTR or SH fields in the PAR. */ - } else { - par64 |= 1; /* F */ - par64 |= (ret & 0x3f) << 1; /* FS */ - /* Note that S2WLK and FSTAGE are always zero, because we don't - * implement virtualization and therefore there can't be a stage 2 - * fault. - */ - } - env->cp15.par_el1 = par64; - } else { - /* ret is a DFSR/IFSR value for the short descriptor - * translation table format (with WnR always clear). - * Convert it to a 32-bit PAR. - */ - if (ret == 0) { - /* We do not set any attribute bits in the PAR */ - if (page_size == (1 << 24) - && arm_feature(env, ARM_FEATURE_V7)) { - env->cp15.par_el1 = (phys_addr & 0xff000000) | 1 << 1; - } else { - env->cp15.par_el1 = phys_addr & 0xfffff000; - } - } else { - env->cp15.par_el1 = ((ret & (1 << 10)) >> 5) | - ((ret & (1 << 12)) >> 6) | - ((ret & 0xf) << 1) | 1; - } - } -} -#endif - -static const ARMCPRegInfo vapa_cp_reginfo[] = { - { "PAR", 15,7,4, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.par_el1), - NULL, NULL, par_write }, -#ifndef CONFIG_USER_ONLY - { "ATS", 15,7,8, 0,0,CP_ANY, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - ats_access, NULL, ats_write }, -#endif - REGINFO_SENTINEL -}; - -/* Return basic MPU access permission bits. */ -static uint32_t simple_mpu_ap_bits(uint32_t val) -{ - uint32_t ret; - uint32_t mask; - int i; - ret = 0; - mask = 3; - for (i = 0; i < 16; i += 2) { - ret |= (val >> i) & mask; - mask <<= 2; - } - return ret; -} - -/* Pad basic MPU access permission bits to extended format. */ -static uint32_t extended_mpu_ap_bits(uint32_t val) -{ - uint32_t ret; - uint32_t mask; - int i; - ret = 0; - mask = 3; - for (i = 0; i < 16; i += 2) { - ret |= (val & mask) << i; - mask <<= 2; - } - return ret; -} - -static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); -} - -static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); -} - -static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); -} - -static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); -} - -static const ARMCPRegInfo pmsav5_cp_reginfo[] = { - { "DATA_AP", 15,5,0, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_data_ap), - NULL, pmsav5_data_ap_read, pmsav5_data_ap_write, }, - { "INSN_AP", 15,5,0, 0,0,1, 0, - ARM_CP_NO_MIGRATE,PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_insn_ap), - NULL, pmsav5_insn_ap_read, pmsav5_insn_ap_write, }, - { "DATA_EXT_AP", 15,5,0, 0,0,2, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_data_ap), }, - { "INSN_EXT_AP", 15,5,0, 0,0,3, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_insn_ap), }, - { "DCACHE_CFG", 15,2,0, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_data), }, - { "ICACHE_CFG", 15,2,0, 0,0,1, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_insn), }, - /* Protection region base and size registers */ - { "946_PRBS0", 15,6,0, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[0]) }, - { "946_PRBS1", 15,6,1, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[1]) }, - { "946_PRBS2", 15,6,2, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[2]) }, - { "946_PRBS3", 15,6,3, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[3]) }, - { "946_PRBS4", 15,6,4, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[4]) }, - { "946_PRBS5", 15,6,5, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[5]) }, - { "946_PRBS6", 15,6,6, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[6]) }, - { "946_PRBS7", 15,6,7, 0,0,CP_ANY, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[7]) }, - REGINFO_SENTINEL -}; - -static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - int maskshift = extract32(value, 0, 3); - - if (!arm_feature(env, ARM_FEATURE_V8)) { - if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { - /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when - * using Long-desciptor translation table format */ - value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); - } else if (arm_feature(env, ARM_FEATURE_EL3)) { - /* In an implementation that includes the Security Extensions - * TTBCR has additional fields PD0 [4] and PD1 [5] for - * Short-descriptor translation table format. - */ - value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; - } else { - value &= TTBCR_N; - } - } - - /* Note that we always calculate c2_mask and c2_base_mask, but - * they are only used for short-descriptor tables (ie if EAE is 0); - * for long-descriptor tables the TTBCR fields are used differently - * and the c2_mask and c2_base_mask values are meaningless. - */ - raw_write(env, ri, value); - env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift); - env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift); -} - -static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - if (arm_feature(env, ARM_FEATURE_LPAE)) { - /* With LPAE the TTBCR could result in a change of ASID - * via the TTBCR.A1 bit, so do a TLB flush. - */ - tlb_flush(CPU(cpu), 1); - } - vmsa_ttbcr_raw_write(env, ri, value); -} - -static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ - env->cp15.c2_base_mask = 0xffffc000u; - raw_write(env, ri, 0); - env->cp15.c2_mask = 0; -} - -static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ - tlb_flush(CPU(cpu), 1); - raw_write(env, ri, value); -} - -static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* 64 bit accesses to the TTBRs can change the ASID and so we - * must flush the TLB. - */ - if (cpreg_field_is_64bit(ri)) { - ARMCPU *cpu = arm_env_get_cpu(env); - - tlb_flush(CPU(cpu), 1); - } - raw_write(env, ri, value); -} - -static const ARMCPRegInfo vmsa_cp_reginfo[] = { - { "DFSR", 15,5,0, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.esr_el[1]), - NULL,NULL,NULL,NULL,NULL, arm_cp_reset_ignore, }, - { "IFSR", 15,5,0, 0,0,1, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ifsr_el2), }, - { "ESR_EL1", 0,5,2, 3,0,0, ARM_CP_STATE_AA64, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[1]), }, - { "TTBR0_EL1", 0,2,0, 3,0,0, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr0_el1), - NULL, NULL, vmsa_ttbr_write, }, - { "TTBR1_EL1", 0,2,0, 3,0,1, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr1_el1), - NULL, NULL, vmsa_ttbr_write, }, - { "TCR_EL1", 0,2,0, 3,0,2, ARM_CP_STATE_AA64, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_control), - NULL, NULL,vmsa_tcr_el1_write, NULL,raw_write, vmsa_ttbcr_reset, }, - { "TTBCR", 15,2,0, 0,0,2, 0, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c2_control), - NULL, NULL, vmsa_ttbcr_write, NULL, vmsa_ttbcr_raw_write, arm_cp_reset_ignore, }, - /* 64-bit FAR; this entry also gives us the AArch32 DFAR */ - { "FAR_EL1", 0,6,0, 3,0,0, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[1]), }, - REGINFO_SENTINEL -}; - -static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c15_ticonfig = value & 0xe7; - /* The OS_TYPE bit in this register changes the reported CPUID! */ - env->cp15.c0_cpuid = (value & (1 << 5)) ? - ARM_CPUID_TI915T : ARM_CPUID_TI925T; -} - -static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c15_threadid = value & 0xffff; -} - -static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Wait-for-interrupt (deprecated) */ - cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); -} - -static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* On OMAP there are registers indicating the max/min index of dcache lines - * containing a dirty line; cache flush operations have to reset these. - */ - env->cp15.c15_i_max = 0x000; - env->cp15.c15_i_min = 0xff0; -} - -static const ARMCPRegInfo omap_cp_reginfo[] = { - { "DFSR", 15,5,CP_ANY, 0,CP_ANY,CP_ANY, 0, - ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.esr_el[1]), }, - { "", 15,15,0, 0,0,0, 0, - ARM_CP_NOP, PL1_RW, NULL, 0, 0, }, - { "TICONFIG", 15,15,1, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_ticonfig), - NULL, NULL, omap_ticonfig_write }, - { "IMAX", 15,15,2, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_i_max), }, - { "IMIN", 15,15,3, 0,0,0, 0, - 0, PL1_RW, NULL, 0xff0, offsetof(CPUARMState, cp15.c15_i_min) }, - { "THREADID", 15,15,4, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_threadid), - NULL, NULL, omap_threadid_write }, - { "TI925T_STATUS", 15,15,8, 0,0,0, 0, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, 0, - NULL, arm_cp_read_zero, omap_wfi_write, }, - /* TODO: Peripheral port remap register: - * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller - * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), - * when MMU is off. - */ - { "OMAP_CACHEMAINT", 15,7,CP_ANY, 0,0,CP_ANY, 0, - ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, omap_cachemaint_write }, - { "C9", 15,9,CP_ANY, 0,CP_ANY,CP_ANY, 0, - ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0, 0, }, - REGINFO_SENTINEL -}; - -static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c15_cpar = value & 0x3fff; -} - -static const ARMCPRegInfo xscale_cp_reginfo[] = { - { "XSCALE_CPAR", 15,15,1, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_cpar), - NULL, NULL, xscale_cpar_write, }, - { "XSCALE_AUXCR", 15,1,0, 0,0,1, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c1_xscaleauxcr), }, - /* XScale specific cache-lockdown: since we have no cache we NOP these - * and hope the guest does not really rely on cache behaviour. - */ - { "XSCALE_LOCK_ICACHE_LINE", 15,9,1, 0,0,0, 0, - ARM_CP_NOP, PL1_W }, - { "XSCALE_UNLOCK_ICACHE", 15,9,1, 0,0,1, 0, - ARM_CP_NOP, PL1_W, }, - { "XSCALE_DCACHE_LOCK", 15,9,2, 0,0,0, 0, - ARM_CP_NOP, PL1_RW }, - { "XSCALE_UNLOCK_DCACHE", 15,9,2, 0,0,1, 0, - ARM_CP_NOP, PL1_W, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { - /* RAZ/WI the whole crn=15 space, when we don't have a more specific - * implementation of this implementation-defined space. - * Ideally this should eventually disappear in favour of actually - * implementing the correct behaviour for all cores. - */ - { "C15_IMPDEF", 15,15,CP_ANY, 0,CP_ANY,CP_ANY, 0, - ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { - /* Cache status: RAZ because we have no cache so it's always clean */ - { "CDSR", 15,7,10, 0,0,6, 0, - ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL1_R, NULL, 0 }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { - /* We never have a a block transfer operation in progress */ - { "BXSR", 15,7,12, 0,0,4, 0, - ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, 0 }, - /* The cache ops themselves: these all NOP for QEMU */ - { "IICR", 15, 0,5, 0,0, 0, 0, - ARM_CP_NOP|ARM_CP_64BIT, PL1_W }, - { "IDCR", 15, 0,6, 0,0, 0, 0, - ARM_CP_NOP|ARM_CP_64BIT, PL1_W, }, - { "CDCR", 15, 0,12, 0,0, 0, 0, - ARM_CP_NOP|ARM_CP_64BIT, PL0_W, }, - { "PIR", 15, 0,12, 0,1, 0, 0, - ARM_CP_NOP|ARM_CP_64BIT, PL0_W, }, - { "PDR", 15, 0,12, 0,2, 0, 0, - ARM_CP_NOP|ARM_CP_64BIT, PL0_W, }, - { "CIDCR", 15, 0,14, 0,0, 0, 0, - ARM_CP_NOP|ARM_CP_64BIT, PL1_W, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { - /* The cache test-and-clean instructions always return (1 << 30) - * to indicate that there are no dirty cache lines. - */ - { "TC_DCACHE", 15,7,10, 0,0,3, 0, - ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, (1 << 30) }, - { "TCI_DCACHE", 15,7,14, 0,0,3, 0, - ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, (1 << 30) }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo strongarm_cp_reginfo[] = { - /* Ignore ReadBuffer accesses */ - { "C9_READBUFFER", 15,9,CP_ANY, 0,CP_ANY,CP_ANY, 0, - ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, }, - REGINFO_SENTINEL -}; - -static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - uint32_t mpidr = cs->cpu_index; - /* We don't support setting cluster ID ([8..11]) (known as Aff1 - * in later ARM ARM versions), or any of the higher affinity level fields, - * so these bits always RAZ. - */ - if (arm_feature(env, ARM_FEATURE_V7MP)) { - mpidr |= (1U << 31); - /* Cores which are uniprocessor (non-coherent) - * but still implement the MP extensions set - * bit 30. (For instance, A9UP.) However we do - * not currently model any of those cores. - */ - } - return mpidr; -} - -static const ARMCPRegInfo mpidr_cp_reginfo[] = { - { "MPIDR", 0,0,0, 3,0,5, ARM_CP_STATE_BOTH, - ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0, - NULL, mpidr_read, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo lpae_cp_reginfo[] = { - /* NOP AMAIR0/1: the override is because these clash with the rather - * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo. - */ - { "AMAIR0", 0,10,3, 3,0,0, ARM_CP_STATE_BOTH, - ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 }, - /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ - { "AMAIR1", 15,10,3, 0,0,1, 0, - ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 }, - { "PAR", 15, 0,7, 0,0, 0, 0, - ARM_CP_64BIT, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.par_el1), }, - { "TTBR0", 15, 0,2, 0,0, 0, 0, - ARM_CP_64BIT | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr0_el1), - NULL, NULL, vmsa_ttbr_write, NULL,NULL, arm_cp_reset_ignore }, - { "TTBR1", 15, 0,2, 0,1, 0, 0, - ARM_CP_64BIT | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr1_el1), - NULL, NULL, vmsa_ttbr_write, NULL,NULL, arm_cp_reset_ignore }, - REGINFO_SENTINEL -}; - -static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return vfp_get_fpcr(env); -} - -static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - vfp_set_fpcr(env, value); -} - -static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return vfp_get_fpsr(env); -} - -static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - vfp_set_fpsr(env, value); -} - -static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->daif = value & PSTATE_DAIF; -} - -static CPAccessResult aa64_cacheop_access(CPUARMState *env, - const ARMCPRegInfo *ri) -{ - /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless - * SCTLR_EL1.UCI is set. - */ - if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions - * Page D4-1736 (DDI0487A.b) - */ - -static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate by VA (AArch64 version) */ - ARMCPU *cpu = arm_env_get_cpu(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - tlb_flush_page(CPU(cpu), pageaddr); -} - -static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate by VA, all ASIDs (AArch64 version) */ - ARMCPU *cpu = arm_env_get_cpu(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - tlb_flush_page(CPU(cpu), pageaddr); -} - -static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate by ASID (AArch64 version) */ - ARMCPU *cpu = arm_env_get_cpu(env); - int asid = extract64(value, 48, 16); - tlb_flush(CPU(cpu), asid == 0); -} - -static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - //uint64_t pageaddr = sextract64(value << 12, 0, 56); - //struct uc_struct *uc = env->uc; - // TODO: issue #642 - // tlb_flush(other_cpu, pageaddr); -} - -static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - //uint64_t pageaddr = sextract64(value << 12, 0, 56); - //struct uc_struct *uc = env->uc; - // TODO: issue #642 - // tlb_flush(other_cpu, pageaddr); -} - -static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - //int asid = extract64(value, 48, 16); - //struct uc_struct *uc = env->uc; - // TODO: issue #642 - // tlb_flush(other_cpu, asid == 0); -} - -static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* We don't implement EL2, so the only control on DC ZVA is the - * bit in the SCTLR which can prohibit access for EL0. - */ - if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int dzp_bit = 1 << 4; - - /* DZP indicates whether DC ZVA access is allowed */ - if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) { - dzp_bit = 0; - } - return cpu->dcz_blocksize | dzp_bit; -} - -static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - if (!(env->pstate & PSTATE_SP)) { - /* Access to SP_EL0 is undefined if it's being used as - * the stack pointer. - */ - return CP_ACCESS_TRAP_UNCATEGORIZED; - } - return CP_ACCESS_OK; -} - -static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return env->pstate & PSTATE_SP; -} - -static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) -{ - update_spsel(env, val); -} - -static const ARMCPRegInfo v8_cp_reginfo[] = { - /* Minimal set of EL0-visible registers. This will need to be expanded - * significantly for system emulation of AArch64 CPUs. - */ - { "NZCV", 0,4,2, 3,3,0, ARM_CP_STATE_AA64, - ARM_CP_NZCV, PL0_RW, }, - { "DAIF", 0,4,2, 3,3,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetof(CPUARMState, daif), - aa64_daif_access, NULL, aa64_daif_write, NULL,NULL, arm_cp_reset_ignore }, - { "FPCR", 0,4,4, 3,3,0, ARM_CP_STATE_AA64, - 0, PL0_RW, NULL, 0, 0, - NULL, aa64_fpcr_read, aa64_fpcr_write }, - { "FPSR", 0,4,4, 3,3,1, ARM_CP_STATE_AA64, - 0, PL0_RW, NULL, 0, 0, - NULL, aa64_fpsr_read, aa64_fpsr_write }, - { "DCZID_EL0", 0,0,0, 3,3,7, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL0_R, NULL, 0, 0, - NULL, aa64_dczid_read }, - { "DC_ZVA", 0,7,4, 1,3,1, ARM_CP_STATE_AA64, - ARM_CP_DC_ZVA, PL0_W, NULL, 0, 0, -#ifndef CONFIG_USER_ONLY - /* Avoid overhead of an access check that always passes in user-mode */ - aa64_zva_access, -#endif - }, - { "CURRENTEL", 0,4,2, 3,0,2, ARM_CP_STATE_AA64, - ARM_CP_CURRENTEL, PL1_R, }, - /* Cache ops: all NOPs since we don't emulate caches */ - { "IC_IALLUIS", 0,7,1, 1,0,0, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL1_W, }, - { "IC_IALLU", 0,7,5, 1,0,0, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL1_W, }, - { "IC_IVAU", 0,7,5, 1,3,1, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL0_W, NULL, 0, 0, - aa64_cacheop_access }, - { "DC_IVAC", 0,7,6, 1,0,1, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL1_W, }, - { "DC_ISW", 0,7,6, 1,0,2, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL1_W, }, - { "DC_CVAC", 0,7,10, 1,3,1, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL0_W, NULL, 0, 0, - aa64_cacheop_access }, - { "DC_CSW", 0,7,10, 1,0,2, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL1_W, }, - { "DC_CVAU", 0,7,11, 1,3,1, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL0_W, NULL, 0, 0, - aa64_cacheop_access }, - { "DC_CIVAC", 0,7,14, 1,3,1, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL0_W, NULL, 0, 0, - aa64_cacheop_access }, - { "DC_CISW", 0,7,14, 1,0,2, ARM_CP_STATE_AA64, - ARM_CP_NOP, PL1_W, }, - /* TLBI operations */ - { "TLBI_VMALLE1IS", 0,8,3, 1,0,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiall_is_write }, - { "TLBI_VAE1IS", 0,8,3, 1,0,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_va_is_write }, - { "TLBI_ASIDE1IS", 0,8,3, 1,0,2, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_asid_is_write }, - { "TLBI_VAAE1IS", 0,8,3, 1,0,3, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_vaa_is_write }, - { "TLBI_VALE1IS", 0,8,3, 1,0,5, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_va_is_write }, - { "TLBI_VAALE1IS", 0,8,3, 1,0,7, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_vaa_is_write }, - { "TLBI_VMALLE1", 0,8,7, 1,0,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbiall_write }, - { "TLBI_VAE1", 0,8,7, 1,0,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_va_write }, - { "TLBI_ASIDE1", 0,8,7, 1,0,2, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_asid_write }, - { "TLBI_VAAE1", 0,8,7, 1,0,3, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_vaa_write }, - { "TLBI_VALE1", 0,8,7, 1,0,5, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_va_write }, - { "TLBI_VAALE1", 0,8,7, 1,0,7, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbi_aa64_vaa_write }, -#ifndef CONFIG_USER_ONLY - /* 64 bit address translation operations */ - { "AT_S1E1R", 0,7,8, 1,0,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, ats_write }, - { "AT_S1E1W", 0,7,8, 1,0,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, ats_write }, - { "AT_S1E0R", 0,7,8, 1,0,2, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, ats_write }, - { "AT_S1E0W", 0,7,8, 1,0,3, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, ats_write }, -#endif - /* TLB invalidate last level of translation table walk */ - { "TLBIMVALIS", 15,8,3, 0,0,5, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimva_is_write }, - { "TLBIMVAALIS", 15,8,3, 0,0,7, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimvaa_is_write }, - { "TLBIMVAL", 15,8,7, 0,0,5, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimva_write }, - { "TLBIMVAAL", 15,8,7, 0,0,7, 0, - ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, - NULL, NULL, tlbimvaa_write }, - /* 32 bit cache operations */ - { "ICIALLUIS", 15,7,1, 0,0,0, 0, - ARM_CP_NOP, PL1_W }, - { "BPIALLUIS", 15,7,1, 0,0,6, 0, - ARM_CP_NOP, PL1_W }, - { "ICIALLU", 15,7,5, 0,0,0, 0, - ARM_CP_NOP, PL1_W }, - { "ICIMVAU", 15,7,5, 0,0,1, 0, - ARM_CP_NOP, PL1_W }, - { "BPIALL", 15,7,5, 0,0,6, 0, - ARM_CP_NOP, PL1_W }, - { "BPIMVA", 15,7,5, 0,0,7, 0, - ARM_CP_NOP, PL1_W }, - { "DCIMVAC", 15,7,6, 0,0,1, 0, - ARM_CP_NOP, PL1_W }, - { "DCISW", 15,7,6, 0,0,2, 0, - ARM_CP_NOP, PL1_W }, - { "DCCMVAC", 15,7,10, 0,0,1, 0, - ARM_CP_NOP, PL1_W }, - { "DCCSW", 15,7,10, 0,0,2, 0, - ARM_CP_NOP, PL1_W }, - { "DCCMVAU", 15,7,11, 0,0,1, 0, - ARM_CP_NOP, PL1_W }, - { "DCCIMVAC", 15,7,14, 0,0,1, 0, - ARM_CP_NOP, PL1_W }, - { "DCCISW", 15,7,14, 0,0,2, 0, - ARM_CP_NOP, PL1_W }, - /* MMU Domain access control / MPU write buffer control */ - { "DACR", 15,3,0, 0,0,0, 0, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c3), - NULL, NULL,dacr_write, NULL,raw_write, }, - { "ELR_EL1", 0,4,0, 3,0,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, elr_el[1]) }, - { "SPSR_EL1", 0,4,0, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[0]) }, - /* We rely on the access checks not allowing the guest to write to the - * state field when SPSel indicates that it's being used as the stack - * pointer. - */ - { "SP_EL0", 0,4,1, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, sp_el[0]), - sp_el0_access, }, - { "SPSel", 0,4,2, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, 0, - NULL, spsel_read, spsel_write }, - REGINFO_SENTINEL -}; - -/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ -static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = { - { "VBAR_EL2", 0,12,0, 3,4,0, ARM_CP_STATE_AA64, - 0, PL2_RW, NULL, 0, 0, - NULL, arm_cp_read_zero, arm_cp_write_ignore }, - { "HCR_EL2", 0,1,1, 3,4,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, 0, - NULL, arm_cp_read_zero, arm_cp_write_ignore }, - REGINFO_SENTINEL -}; - -static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - uint64_t valid_mask = HCR_MASK; - - if (arm_feature(env, ARM_FEATURE_EL3)) { - valid_mask &= ~HCR_HCD; - } else { - valid_mask &= ~HCR_TSC; - } - - /* Clear RES0 bits. */ - value &= valid_mask; - - /* These bits change the MMU setup: - * HCR_VM enables stage 2 translation - * HCR_PTW forbids certain page-table setups - * HCR_DC Disables stage1 and enables stage2 translation - */ - if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { - tlb_flush(CPU(cpu), 1); - } - raw_write(env, ri, value); -} - -static const ARMCPRegInfo v8_el2_cp_reginfo[] = { - { "HCR_EL2", 0,1,1, 3,4,0, ARM_CP_STATE_AA64, - 0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.hcr_el2), - NULL, NULL, hcr_write }, - { "ELR_EL2", 0,4,0, 3,4,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, elr_el[2]) }, - { "ESR_EL2", 0,5,2, 3,4,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[2]) }, - { "FAR_EL2", 0,6,0, 3,4,0, ARM_CP_STATE_AA64, - 0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[2]) }, - { "SPSR_EL2", 0,4,0, 3,4,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[6]) }, - { "VBAR_EL2", 0,12,0, 3,4,0, ARM_CP_STATE_AA64, - 0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[2]), - NULL, NULL, vbar_write, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo v8_el3_cp_reginfo[] = { - { "ELR_EL3", 0,4,0, 3,6,1, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, elr_el[3]) }, - { "ESR_EL3", 0,5,2, 3,6,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[3]) }, - { "FAR_EL3", 0,6,0, 3,6,0, ARM_CP_STATE_AA64, - 0, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[3]) }, - { "SPSR_EL3", 0,4,0, 3,6,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[7]) }, - { "VBAR_EL3", 0,12,0, 3,6,0, ARM_CP_STATE_AA64, - 0, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[3]), - NULL, NULL, vbar_write, }, - { "SCR_EL3", 0,1,1, 3,6,0, ARM_CP_STATE_AA64, - ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.scr_el3), - NULL, NULL, scr_write }, - REGINFO_SENTINEL -}; - -static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - if (raw_read(env, ri) == value) { - /* Skip the TLB flush if nothing actually changed; Linux likes - * to do a lot of pointless SCTLR writes. - */ - return; - } - - raw_write(env, ri, value); - /* ??? Lots of these bits are not implemented. */ - /* This may enable/disable the MMU, so do a TLB flush. */ - tlb_flush(CPU(cpu), 1); -} - -static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, - * but the AArch32 CTR has its own reginfo struct) - */ - if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static const ARMCPRegInfo debug_cp_reginfo[] = { - /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped - * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; - * unlike DBGDRAR it is never accessible from EL0. - * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 - * accessor. - */ - { "DBGDRAR", 14,1,0, 0,0,0, 0, - ARM_CP_CONST, PL0_R, NULL, 0 }, - { "MDRAR_EL1", 0,1,0, 2,0,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, 0 }, - { "DBGDSAR", 14,2,0, 0,0,0, 0, - ARM_CP_CONST, PL0_R, NULL, 0 }, - /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ - { "MDSCR_EL1", 14,0,2, 2,0,2, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.mdscr_el1), }, - /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. - * We don't implement the configurable EL0 access. - */ - { "MDCCSR_EL0", 14,0,1, 2,0,0, ARM_CP_STATE_BOTH, - ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, offsetof(CPUARMState, cp15.mdscr_el1), - NULL,NULL,NULL,NULL,NULL, arm_cp_reset_ignore }, - /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */ - { "OSLAR_EL1", 14,1,0, 2,0,4, ARM_CP_STATE_BOTH, - ARM_CP_NOP, PL1_W, }, - /* Dummy OSDLR_EL1: 32-bit Linux will read this */ - { "OSDLR_EL1", 14,1,3, 2,0,4, ARM_CP_STATE_BOTH, - ARM_CP_NOP, PL1_RW, }, - /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't - * implement vector catch debug events yet. - */ - { "DBGVCR", 14,0,7, 0,0,0, 0, - ARM_CP_NOP, PL1_RW, }, - REGINFO_SENTINEL -}; - -static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { - /* 64 bit access versions of the (dummy) debug registers */ - { "DBGDRAR", 14, 0,1, 0,0, 0, 0, - ARM_CP_CONST|ARM_CP_64BIT, PL0_R, NULL, 0 }, - { "DBGDSAR", 14, 0,2, 0,0, 0, 0, - ARM_CP_CONST|ARM_CP_64BIT, PL0_R, NULL, 0 }, - REGINFO_SENTINEL -}; - -void hw_watchpoint_update(ARMCPU *cpu, int n) -{ - CPUARMState *env = &cpu->env; - vaddr len = 0; - vaddr wvr = env->cp15.dbgwvr[n]; - uint64_t wcr = env->cp15.dbgwcr[n]; - int mask; - int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; - - if (env->cpu_watchpoint[n]) { - cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); - env->cpu_watchpoint[n] = NULL; - } - - if (!extract64(wcr, 0, 1)) { - /* E bit clear : watchpoint disabled */ - return; - } - - switch (extract64(wcr, 3, 2)) { - case 0: - /* LSC 00 is reserved and must behave as if the wp is disabled */ - return; - case 1: - flags |= BP_MEM_READ; - break; - case 2: - flags |= BP_MEM_WRITE; - break; - case 3: - flags |= BP_MEM_ACCESS; - break; - } - - /* Attempts to use both MASK and BAS fields simultaneously are - * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, - * thus generating a watchpoint for every byte in the masked region. - */ - mask = extract64(wcr, 24, 4); - if (mask == 1 || mask == 2) { - /* Reserved values of MASK; we must act as if the mask value was - * some non-reserved value, or as if the watchpoint were disabled. - * We choose the latter. - */ - return; - } else if (mask) { - /* Watchpoint covers an aligned area up to 2GB in size */ - len = 1ULL << mask; - /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE - * whether the watchpoint fires when the unmasked bits match; we opt - * to generate the exceptions. - */ - wvr &= ~(len - 1); - } else { - /* Watchpoint covers bytes defined by the byte address select bits */ - int bas = extract64(wcr, 5, 8); - int basstart; - - if (bas == 0) { - /* This must act as if the watchpoint is disabled */ - return; - } - - if (extract64(wvr, 2, 1)) { - /* Deprecated case of an only 4-aligned address. BAS[7:4] are - * ignored, and BAS[3:0] define which bytes to watch. - */ - bas &= 0xf; - } - /* The BAS bits are supposed to be programmed to indicate a contiguous - * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether - * we fire for each byte in the word/doubleword addressed by the WVR. - * We choose to ignore any non-zero bits after the first range of 1s. - */ - basstart = ctz32(bas); - len = cto32(bas >> (basstart & 0x1f)); - wvr += basstart; - } - - cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, - &env->cpu_watchpoint[n]); -} - -void hw_watchpoint_update_all(ARMCPU *cpu) -{ - int i; - CPUARMState *env = &cpu->env; - - /* Completely clear out existing QEMU watchpoints and our array, to - * avoid possible stale entries following migration load. - */ - cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); - memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); - - for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { - hw_watchpoint_update(cpu, i); - } -} - -static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int i = ri->crm; - - /* Bits [63:49] are hardwired to the value of bit [48]; that is, the - * register reads and behaves as if values written are sign extended. - * Bits [1:0] are RES0. - */ - value = sextract64(value, 0, 49) & ~3ULL; - - raw_write(env, ri, value); - hw_watchpoint_update(cpu, i); -} - -static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int i = ri->crm; - - raw_write(env, ri, value); - hw_watchpoint_update(cpu, i); -} - -void hw_breakpoint_update(ARMCPU *cpu, int n) -{ - CPUARMState *env = &cpu->env; - uint64_t bvr = env->cp15.dbgbvr[n]; - uint64_t bcr = env->cp15.dbgbcr[n]; - vaddr addr; - int bt; - int flags = BP_CPU; - - if (env->cpu_breakpoint[n]) { - cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); - env->cpu_breakpoint[n] = NULL; - } - - if (!extract64(bcr, 0, 1)) { - /* E bit clear : watchpoint disabled */ - return; - } - - bt = extract64(bcr, 20, 4); - - switch (bt) { - case 4: /* unlinked address mismatch (reserved if AArch64) */ - case 5: /* linked address mismatch (reserved if AArch64) */ - qemu_log_mask(LOG_UNIMP, - "arm: address mismatch breakpoint types not implemented"); - return; - case 0: /* unlinked address match */ - case 1: /* linked address match */ - { - /* Bits [63:49] are hardwired to the value of bit [48]; that is, - * we behave as if the register was sign extended. Bits [1:0] are - * RES0. The BAS field is used to allow setting breakpoints on 16 - * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether - * a bp will fire if the addresses covered by the bp and the addresses - * covered by the insn overlap but the insn doesn't start at the - * start of the bp address range. We choose to require the insn and - * the bp to have the same address. The constraints on writing to - * BAS enforced in dbgbcr_write mean we have only four cases: - * 0b0000 => no breakpoint - * 0b0011 => breakpoint on addr - * 0b1100 => breakpoint on addr + 2 - * 0b1111 => breakpoint on addr - * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). - */ - int bas = extract64(bcr, 5, 4); - addr = sextract64(bvr, 0, 49) & ~3ULL; - if (bas == 0) { - return; - } - if (bas == 0xc) { - addr += 2; - } - break; - } - case 2: /* unlinked context ID match */ - case 8: /* unlinked VMID match (reserved if no EL2) */ - case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ - qemu_log_mask(LOG_UNIMP, - "arm: unlinked context breakpoint types not implemented"); - return; - case 9: /* linked VMID match (reserved if no EL2) */ - case 11: /* linked context ID and VMID match (reserved if no EL2) */ - case 3: /* linked context ID match */ - default: - /* We must generate no events for Linked context matches (unless - * they are linked to by some other bp/wp, which is handled in - * updates for the linking bp/wp). We choose to also generate no events - * for reserved values. - */ - return; - } - - cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); -} - -void hw_breakpoint_update_all(ARMCPU *cpu) -{ - int i; - CPUARMState *env = &cpu->env; - - /* Completely clear out existing QEMU breakpoints and our array, to - * avoid possible stale entries following migration load. - */ - cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); - memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); - - for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { - hw_breakpoint_update(cpu, i); - } -} - -static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int i = ri->crm; - - raw_write(env, ri, value); - hw_breakpoint_update(cpu, i); -} - -static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int i = ri->crm; - - /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only - * copy of BAS[0]. - */ - value = deposit64(value, 6, 1, extract64(value, 5, 1)); - value = deposit64(value, 8, 1, extract64(value, 7, 1)); - - raw_write(env, ri, value); - hw_breakpoint_update(cpu, i); -} - -static void define_debug_regs(ARMCPU *cpu) -{ - /* Define v7 and v8 architectural debug registers. - * These are just dummy implementations for now. - */ - int i; - int wrps, brps, ctx_cmps; - ARMCPRegInfo dbgdidr = { - "DBGDIDR", 14,0,0, 0,0,0, 0, - ARM_CP_CONST, PL0_R, NULL, cpu->dbgdidr, - }; - - /* Note that all these register fields hold "number of Xs minus 1". */ - brps = extract32(cpu->dbgdidr, 24, 4); - wrps = extract32(cpu->dbgdidr, 28, 4); - ctx_cmps = extract32(cpu->dbgdidr, 20, 4); - - assert(ctx_cmps <= brps); - - /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties - * of the debug registers such as number of breakpoints; - * check that if they both exist then they agree. - */ - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); - assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); - assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); - } - - define_one_arm_cp_reg(cpu, &dbgdidr); - define_arm_cp_regs(cpu, debug_cp_reginfo); - - if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { - define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); - } - - for (i = 0; i < brps + 1; i++) { - ARMCPRegInfo dbgregs[] = { - { "DBGBVR", 14,0,i, 2,0,4,ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgbvr[i]), - NULL, NULL,dbgbvr_write, NULL,raw_write - }, - { "DBGBCR", 14,0,i, 2,0,5, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgbcr[i]), - NULL, NULL,dbgbcr_write, NULL,raw_write - }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, dbgregs); - } - - for (i = 0; i < wrps + 1; i++) { - ARMCPRegInfo dbgregs[] = { - { "DBGWVR", 14,0,i, 2,0,6, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgwvr[i]), - NULL, NULL,dbgwvr_write, NULL,raw_write - }, - { "DBGWCR", 14,0,i, 2,0,7, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgwcr[i]), - NULL, NULL,dbgwcr_write, NULL,raw_write - }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, dbgregs); - } -} - -void register_cp_regs_for_features(ARMCPU *cpu) -{ - /* Register all the coprocessor registers based on feature bits */ - CPUARMState *env = &cpu->env; - if (arm_feature(env, ARM_FEATURE_M)) { - /* M profile has no coprocessor registers */ - return; - } - - define_arm_cp_regs(cpu, cp_reginfo); - if (!arm_feature(env, ARM_FEATURE_V8)) { - /* Must go early as it is full of wildcards that may be - * overridden by later definitions. - */ - define_arm_cp_regs(cpu, not_v8_cp_reginfo); - } - - if (arm_feature(env, ARM_FEATURE_V6)) { - /* The ID registers all have impdef reset values */ - ARMCPRegInfo v6_idregs[] = { - { "ID_PFR0", 0,0,1, 3,0,0, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_pfr0 }, - { "ID_PFR1", 0,0,1, 3,0,1, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_pfr1 }, - { "ID_DFR0", 0,0,1, 3,0,2, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_dfr0 }, - { "ID_AFR0", 0,0,1, 3,0,3, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_afr0 }, - { "ID_MMFR0", 0,0,1, 3,0,4, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr0 }, - { "ID_MMFR1", 0,0,1, 3,0,5, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr1 }, - { "ID_MMFR2", 0,0,1, 3,0,6, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr2 }, - { "ID_MMFR3", 0,0,1, 3,0,7, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr3 }, - { "ID_ISAR0", 0,0,2, 3,0,0, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_isar0 }, - { "ID_ISAR1", 0,0,2, 3,0,1, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_isar1 }, - { "ID_ISAR2", 0,0,2, 3,0,2, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_isar2 }, - { "ID_ISAR3", 0,0,2, 3,0,3, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_isar3 }, - { "ID_ISAR4", 0,0,2, 3,0,4, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_isar4 }, - { "ID_ISAR5", 0,0,2, 3,0,5, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->id_isar5 }, - /* 6..7 are as yet unallocated and must RAZ */ - { "ID_ISAR6", 15,0,2, 0,0,6, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - { "ID_ISAR7", 15,0,2, 0,0,7, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, v6_idregs); - define_arm_cp_regs(cpu, v6_cp_reginfo); - } else { - define_arm_cp_regs(cpu, not_v6_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_V6K)) { - define_arm_cp_regs(cpu, v6k_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_V7MP)) { - define_arm_cp_regs(cpu, v7mp_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_V7)) { - ARMCPRegInfo clidr = { - "CLIDR", 0,0,0, 3,1,1, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->clidr - }; - /* v7 performance monitor control register: same implementor - * field as main ID register, and we implement only the cycle - * count register. - */ -#ifndef CONFIG_USER_ONLY - ARMCPRegInfo pmcr = { - "PMCR", 15,9,12, 0,0,0, 0, - ARM_CP_IO | ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcr), - pmreg_access, NULL,pmcr_write, NULL,raw_write, - }; - ARMCPRegInfo pmcr64 = { - "PMCR_EL0", 0,9,12, 3,3,0, ARM_CP_STATE_AA64, - ARM_CP_IO, PL0_RW, NULL, cpu->midr & 0xff000000, offsetof(CPUARMState, cp15.c9_pmcr), - pmreg_access, NULL,pmcr_write, NULL,raw_write, - }; - define_one_arm_cp_reg(cpu, &pmcr); - define_one_arm_cp_reg(cpu, &pmcr64); -#endif - define_one_arm_cp_reg(cpu, &clidr); - define_arm_cp_regs(cpu, v7_cp_reginfo); - define_debug_regs(cpu); - } else { - define_arm_cp_regs(cpu, not_v7_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_V8)) { - /* AArch64 ID registers, which all have impdef reset values */ - ARMCPRegInfo v8_idregs[] = { - { "ID_AA64PFR0_EL1", 0,0,4, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64pfr0 }, - { "ID_AA64PFR1_EL1", 0,0,4, 3,0,1, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64pfr1}, - { "ID_AA64DFR0_EL1", 0,0,5, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, - /* We mask out the PMUVer field, because we don't currently - * implement the PMU. Not advertising it prevents the guest - * from trying to use it and getting UNDEFs on registers we - * don't implement. - */ - cpu->id_aa64dfr0 & ~0xf00 }, - { "ID_AA64DFR1_EL1", 0,0,5, 3,0,1, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64dfr1 }, - { "ID_AA64AFR0_EL1", 0,0,5, 3,0,4, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64afr0 }, - { "ID_AA64AFR1_EL1", 0,0,5, 3,0,5, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64afr1 }, - { "ID_AA64ISAR0_EL1", 0,0,6, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64isar0 }, - { "ID_AA64ISAR1_EL1", 0,0,6, 3,0,1, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64isar1 }, - { "ID_AA64MMFR0_EL1", 0,0,7, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64mmfr0 }, - { "ID_AA64MMFR1_EL1", 0,0,7, 3,0,1, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64mmfr1 }, - { "MVFR0_EL1", 0,0,3, 3,0,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->mvfr0 }, - { "MVFR1_EL1", 0,0,3, 3,0,1, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->mvfr1 }, - { "MVFR2_EL1", 0,0,3, 3,0,2, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->mvfr2 }, - REGINFO_SENTINEL - }; - ARMCPRegInfo rvbar = { - "RVBAR_EL1", 0,12,0, 3,0,2, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cpu->rvbar - }; - define_one_arm_cp_reg(cpu, &rvbar); - define_arm_cp_regs(cpu, v8_idregs); - define_arm_cp_regs(cpu, v8_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_EL2)) { - define_arm_cp_regs(cpu, v8_el2_cp_reginfo); - } else { - /* If EL2 is missing but higher ELs are enabled, we need to - * register the no_el2 reginfos. - */ - if (arm_feature(env, ARM_FEATURE_EL3)) { - define_arm_cp_regs(cpu, v8_el3_no_el2_cp_reginfo); - } - } - if (arm_feature(env, ARM_FEATURE_EL3)) { - define_arm_cp_regs(cpu, v8_el3_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_MPU)) { - /* These are the MPU registers prior to PMSAv6. Any new - * PMSA core later than the ARM946 will require that we - * implement the PMSAv6 or PMSAv7 registers, which are - * completely different. - */ - assert(!arm_feature(env, ARM_FEATURE_V6)); - define_arm_cp_regs(cpu, pmsav5_cp_reginfo); - } else { - define_arm_cp_regs(cpu, vmsa_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { - define_arm_cp_regs(cpu, t2ee_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { - define_arm_cp_regs(cpu, generic_timer_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_VAPA)) { - define_arm_cp_regs(cpu, vapa_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { - define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { - define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { - define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_OMAPCP)) { - define_arm_cp_regs(cpu, omap_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_STRONGARM)) { - define_arm_cp_regs(cpu, strongarm_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - define_arm_cp_regs(cpu, xscale_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { - define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_LPAE)) { - define_arm_cp_regs(cpu, lpae_cp_reginfo); - } - /* Slightly awkwardly, the OMAP and StrongARM cores need all of - * cp15 crn=0 to be writes-ignored, whereas for other cores they should - * be read-only (ie write causes UNDEF exception). - */ - { - ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { - /* Pre-v8 MIDR space. - * Note that the MIDR isn't a simple constant register because - * of the TI925 behaviour where writes to another register can - * cause the MIDR value to change. - * - * Unimplemented registers in the c15 0 0 0 space default to - * MIDR. Define MIDR first as this entire space, then CTR, TCMTR - * and friends override accordingly. - */ - { "MIDR", 15,0,0, 0,0,CP_ANY, 0, - ARM_CP_OVERRIDE, PL1_R, NULL, cpu->midr, offsetof(CPUARMState, cp15.c0_cpuid), - NULL, NULL,arm_cp_write_ignore, NULL,raw_write, }, - /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ - { "DUMMY", - 15,0,3, 0,0,CP_ANY, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - { "DUMMY", - 15,0,4, 0,0,CP_ANY, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - { "DUMMY", - 15,0,5, 0,0,CP_ANY, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - { "DUMMY", - 15,0,6, 0,0,CP_ANY, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - { "DUMMY", - 15,0,7, 0,0,CP_ANY, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - REGINFO_SENTINEL - }; - ARMCPRegInfo id_v8_midr_cp_reginfo[] = { - /* v8 MIDR -- the wildcard isn't necessary, and nor is the - * variable-MIDR TI925 behaviour. Instead we have a single - * (strictly speaking IMPDEF) alias of the MIDR, REVIDR. - */ - { "MIDR_EL1", 0,0,0, 3,0,0, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->midr }, - { "REVIDR_EL1", 0,0,0, 3,0,6, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_R, NULL, cpu->midr }, - REGINFO_SENTINEL - }; - ARMCPRegInfo id_cp_reginfo[] = { - /* These are common to v8 and pre-v8 */ - { "CTR", 15,0,0, 0,0,1, 0, - ARM_CP_CONST, PL1_R, NULL, cpu->ctr }, - { "CTR_EL0", 0,0,0, 3,3,1, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL0_R, NULL, cpu->ctr, 0, - ctr_el0_access, }, - /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ - { "TCMTR", 15,0,0, 0,0,2, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - { "TLBTR", 15,0,0, 0,0,3, 0, - ARM_CP_CONST, PL1_R, NULL, 0 }, - REGINFO_SENTINEL - }; - ARMCPRegInfo crn0_wi_reginfo = { - "CRN0_WI", 15,0,CP_ANY, 0,CP_ANY,CP_ANY, 0, - ARM_CP_NOP | ARM_CP_OVERRIDE, PL1_W, - }; - if (arm_feature(env, ARM_FEATURE_OMAPCP) || - arm_feature(env, ARM_FEATURE_STRONGARM)) { - ARMCPRegInfo *r; - /* Register the blanket "writes ignored" value first to cover the - * whole space. Then update the specific ID registers to allow write - * access, so that they ignore writes rather than causing them to - * UNDEF. - */ - define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); - for (r = id_pre_v8_midr_cp_reginfo; - r->type != ARM_CP_SENTINEL; r++) { - r->access = PL1_RW; - } - for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { - r->access = PL1_RW; - } - } - if (arm_feature(env, ARM_FEATURE_V8)) { - define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); - } else { - define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); - } - define_arm_cp_regs(cpu, id_cp_reginfo); - } - - if (arm_feature(env, ARM_FEATURE_MPIDR)) { - define_arm_cp_regs(cpu, mpidr_cp_reginfo); - } - - if (arm_feature(env, ARM_FEATURE_AUXCR)) { - ARMCPRegInfo auxcr = { - "ACTLR_EL1", 0,1,0, 3,0,1, ARM_CP_STATE_BOTH, - ARM_CP_CONST, PL1_RW, NULL, cpu->reset_auxcr - }; - define_one_arm_cp_reg(cpu, &auxcr); - } - - if (arm_feature(env, ARM_FEATURE_CBAR)) { - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - /* 32 bit view is [31:18] 0...0 [43:32]. */ - uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) - | extract64(cpu->reset_cbar, 32, 12); - ARMCPRegInfo cbar_reginfo[] = { - { "CBAR", 15,15,0, 0,4,0, 0, - ARM_CP_CONST, PL1_R, NULL, cpu->reset_cbar }, - { "CBAR_EL1", 0,15,3, 3,1,0, ARM_CP_STATE_AA64, - ARM_CP_CONST, PL1_R, NULL, cbar32 }, - REGINFO_SENTINEL - }; - /* We don't implement a r/w 64 bit CBAR currently */ - assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); - define_arm_cp_regs(cpu, cbar_reginfo); - } else { - ARMCPRegInfo cbar = { - "CBAR", 15,15,0, 0,4,0, 0, - 0, PL1_R|PL3_W, NULL, cpu->reset_cbar, offsetof(CPUARMState, cp15.c15_config_base_address) - }; - if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { - cbar.access = PL1_R; - cbar.fieldoffset = 0; - cbar.type = ARM_CP_CONST; - } - define_one_arm_cp_reg(cpu, &cbar); - } - } - - /* Generic registers whose values depend on the implementation */ - { - ARMCPRegInfo sctlr = { - "SCTLR", 0,1,0, 3,0,0, ARM_CP_STATE_BOTH, - 0, PL1_RW, NULL, cpu->reset_sctlr, offsetof(CPUARMState, cp15.c1_sys), - NULL, NULL,sctlr_write, NULL,raw_write, - }; - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - /* Normally we would always end the TB on an SCTLR write, but Linux - * arch/arm/mach-pxa/sleep.S expects two instructions following - * an MMU enable to execute from cache. Imitate this behaviour. - */ - sctlr.type |= ARM_CP_SUPPRESS_TB_END; - } - define_one_arm_cp_reg(cpu, &sctlr); - } -} - -ARMCPU *cpu_arm_init(struct uc_struct *uc, const char *cpu_model) -{ - return ARM_CPU(uc, cpu_generic_init(uc, TYPE_ARM_CPU, cpu_model)); -} - -void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) -{ -#if 0 - CPUState *cs = CPU(cpu); - CPUARMState *env = &cpu->env; - - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, - aarch64_fpu_gdb_set_reg, - 34, "aarch64-fpu.xml", 0); - } else if (arm_feature(env, ARM_FEATURE_NEON)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 51, "arm-neon.xml", 0); - } else if (arm_feature(env, ARM_FEATURE_VFP3)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 35, "arm-vfp3.xml", 0); - } else if (arm_feature(env, ARM_FEATURE_VFP)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 19, "arm-vfp.xml", 0); - } -#endif -} - -/* Sort alphabetically by type name, except for "any". */ -#if 0 -static void arm_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CPUListState *s = user_data; - const char *typename; - char *name; - - typename = object_class_get_name(oc); - name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); - (*s->cpu_fprintf)(s->file, " %s\n", - name); - g_free(name); -} -#endif - -void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) -{ -#if 0 - CPUListState s = { - .file = f, - .cpu_fprintf = cpu_fprintf, - }; - GSList *list; - - list = object_class_get_list(TYPE_ARM_CPU, false); - list = g_slist_sort(list, arm_cpu_list_compare); - (*cpu_fprintf)(f, "Available CPUs:\n"); - g_slist_foreach(list, arm_cpu_list_entry, &s); - g_slist_free(list); -#ifdef CONFIG_KVM - /* The 'host' CPU type is dynamically registered only if KVM is - * enabled, so we have to special-case it here: - */ - (*cpu_fprintf)(f, " host (only available in KVM mode)\n"); -#endif -#endif -} - -static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, - void *opaque, int state, - int crm, int opc1, int opc2) -{ - /* Private utility function for define_one_arm_cp_reg_with_opaque(): - * add a single reginfo struct to the hash table. - */ - uint32_t *key = g_new(uint32_t, 1); - ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); - int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; - if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) { - /* The AArch32 view of a shared register sees the lower 32 bits - * of a 64 bit backing field. It is not migratable as the AArch64 - * view handles that. AArch64 also handles reset. - * We assume it is a cp15 register if the .cp field is left unset. - */ - if (r2->cp == 0) { - r2->cp = 15; - } - r2->type |= ARM_CP_NO_MIGRATE; - r2->resetfn = arm_cp_reset_ignore; -#ifdef HOST_WORDS_BIGENDIAN - if (r2->fieldoffset) { - r2->fieldoffset += sizeof(uint32_t); - } -#endif - } - if (state == ARM_CP_STATE_AA64) { - /* To allow abbreviation of ARMCPRegInfo - * definitions, we treat cp == 0 as equivalent to - * the value for "standard guest-visible sysreg". - * STATE_BOTH definitions are also always "standard - * sysreg" in their AArch64 view (the .cp value may - * be non-zero for the benefit of the AArch32 view). - */ - if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { - r2->cp = CP_REG_ARM64_SYSREG_CP; - } - *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, - r2->opc0, opc1, opc2); - } else { - *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2); - } - if (opaque) { - r2->opaque = opaque; - } - /* reginfo passed to helpers is correct for the actual access, - * and is never ARM_CP_STATE_BOTH: - */ - r2->state = state; - /* Make sure reginfo passed to helpers for wildcarded regs - * has the correct crm/opc1/opc2 for this reg, not CP_ANY: - */ - r2->crm = crm; - r2->opc1 = opc1; - r2->opc2 = opc2; - /* By convention, for wildcarded registers only the first - * entry is used for migration; the others are marked as - * NO_MIGRATE so we don't try to transfer the register - * multiple times. Special registers (ie NOP/WFI) are - * never migratable. - */ - if ((r->type & ARM_CP_SPECIAL) || - ((r->crm == CP_ANY) && crm != 0) || - ((r->opc1 == CP_ANY) && opc1 != 0) || - ((r->opc2 == CP_ANY) && opc2 != 0)) { - r2->type |= ARM_CP_NO_MIGRATE; - } - - /* Overriding of an existing definition must be explicitly - * requested. - */ - if (!(r->type & ARM_CP_OVERRIDE)) { - ARMCPRegInfo *oldreg; - oldreg = g_hash_table_lookup(cpu->cp_regs, key); - if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { - fprintf(stderr, "Register redefined: cp=%d %d bit " - "crn=%d crm=%d opc1=%d opc2=%d, " - "was %s, now %s\n", r2->cp, 32 + 32 * is64, - r2->crn, r2->crm, r2->opc1, r2->opc2, - oldreg->name, r2->name); - g_assert_not_reached(); - } - } - g_hash_table_insert(cpu->cp_regs, key, r2); -} - - -void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *r, void *opaque) -{ - /* Define implementations of coprocessor registers. - * We store these in a hashtable because typically - * there are less than 150 registers in a space which - * is 16*16*16*8*8 = 262144 in size. - * Wildcarding is supported for the crm, opc1 and opc2 fields. - * If a register is defined twice then the second definition is - * used, so this can be used to define some generic registers and - * then override them with implementation specific variations. - * At least one of the original and the second definition should - * include ARM_CP_OVERRIDE in its type bits -- this is just a guard - * against accidental use. - * - * The state field defines whether the register is to be - * visible in the AArch32 or AArch64 execution state. If the - * state is set to ARM_CP_STATE_BOTH then we synthesise a - * reginfo structure for the AArch32 view, which sees the lower - * 32 bits of the 64 bit register. - * - * Only registers visible in AArch64 may set r->opc0; opc0 cannot - * be wildcarded. AArch64 registers are always considered to be 64 - * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of - * the register, if any. - */ - int crm, opc1, opc2, state; - int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; - int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; - int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; - int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; - int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; - int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; - /* 64 bit registers have only CRm and Opc1 fields */ - assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); - /* op0 only exists in the AArch64 encodings */ - assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); - /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ - assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); - /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 - * encodes a minimum access level for the register. We roll this - * runtime check into our general permission check code, so check - * here that the reginfo's specified permissions are strict enough - * to encompass the generic architectural permission check. - */ - if (r->state != ARM_CP_STATE_AA32) { - int mask = 0; - switch (r->opc1) { - case 0: case 1: case 2: - /* min_EL EL1 */ - mask = PL1_RW; - break; - case 3: - /* min_EL EL0 */ - mask = PL0_RW; - break; - case 4: - /* min_EL EL2 */ - mask = PL2_RW; - break; - case 5: - /* unallocated encoding, so not possible */ - assert(false); - break; - case 6: - /* min_EL EL3 */ - mask = PL3_RW; - break; - case 7: - /* min_EL EL1, secure mode only (we don't check the latter) */ - mask = PL1_RW; - break; - default: - /* broken reginfo with out-of-range opc1 */ - assert(false); - break; - } - /* assert our permissions are not too lax (stricter is fine) */ - assert((r->access & ~mask) == 0); - } - - /* Check that the register definition has enough info to handle - * reads and writes if they are permitted. - */ - if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { - if (r->access & PL3_R) { - assert(r->fieldoffset || r->readfn); - } - if (r->access & PL3_W) { - assert(r->fieldoffset || r->writefn); - } - } - /* Bad type field probably means missing sentinel at end of reg list */ - assert(cptype_valid(r->type)); - for (crm = crmmin; crm <= crmmax; crm++) { - for (opc1 = opc1min; opc1 <= opc1max; opc1++) { - for (opc2 = opc2min; opc2 <= opc2max; opc2++) { - for (state = ARM_CP_STATE_AA32; - state <= ARM_CP_STATE_AA64; state++) { - if (r->state != state && r->state != ARM_CP_STATE_BOTH) { - continue; - } - add_cpreg_to_hashtable(cpu, r, opaque, state, - crm, opc1, opc2); - } - } - } - } -} - -void define_arm_cp_regs_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *regs, void *opaque) -{ - /* Define a whole list of registers */ - const ARMCPRegInfo *r; - for (r = regs; r->type != ARM_CP_SENTINEL; r++) { - define_one_arm_cp_reg_with_opaque(cpu, r, opaque); - } -} - -const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) -{ - return g_hash_table_lookup(cpregs, &encoded_cp); -} - -void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Helper coprocessor write function for write-ignore registers */ -} - -uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* Helper coprocessor write function for read-as-zero registers */ - return 0; -} - -void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) -{ - /* Helper coprocessor reset function for do-nothing-on-reset registers */ -} - -static int bad_mode_switch(CPUARMState *env, int mode) -{ - /* Return true if it is not valid for us to switch to - * this CPU mode (ie all the UNPREDICTABLE cases in - * the ARM ARM CPSRWriteByInstr pseudocode). - */ - switch (mode) { - case ARM_CPU_MODE_USR: - case ARM_CPU_MODE_SYS: - case ARM_CPU_MODE_SVC: - case ARM_CPU_MODE_ABT: - case ARM_CPU_MODE_UND: - case ARM_CPU_MODE_IRQ: - case ARM_CPU_MODE_FIQ: - return 0; - case ARM_CPU_MODE_MON: - return !arm_is_secure(env); - default: - return 1; - } -} - -uint32_t cpsr_read(CPUARMState *env) -{ - int ZF; - ZF = (env->ZF == 0); - return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | - (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) - | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) - | ((env->condexec_bits & 0xfc) << 8) - | (env->GE << 16) | (env->daif & CPSR_AIF); -} - -void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) -{ - if (mask & CPSR_NZCV) { - env->ZF = (~val) & CPSR_Z; - env->NF = val; - env->CF = (val >> 29) & 1; - env->VF = (val << 3) & 0x80000000; - } - if (mask & CPSR_Q) - env->QF = ((val & CPSR_Q) != 0); - if (mask & CPSR_T) - env->thumb = ((val & CPSR_T) != 0); - if (mask & CPSR_IT_0_1) { - env->condexec_bits &= ~3; - env->condexec_bits |= (val >> 25) & 3; - } - if (mask & CPSR_IT_2_7) { - env->condexec_bits &= 3; - env->condexec_bits |= (val >> 8) & 0xfc; - } - if (mask & CPSR_GE) { - env->GE = (val >> 16) & 0xf; - } - - env->daif &= ~(CPSR_AIF & mask); - env->daif |= val & CPSR_AIF & mask; - - if ((env->uncached_cpsr ^ val) & mask & CPSR_M) { - if (bad_mode_switch(env, val & CPSR_M)) { - /* Attempt to switch to an invalid mode: this is UNPREDICTABLE. - * We choose to ignore the attempt and leave the CPSR M field - * untouched. - */ - mask &= ~CPSR_M; - } else { - switch_mode(env, val & CPSR_M); - } - } - mask &= ~CACHED_CPSR_BITS; - env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); -} - -/* Sign/zero extend */ -uint32_t HELPER(sxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(int8_t)x; - res |= (uint32_t)(int8_t)(x >> 16) << 16; - return res; -} - -uint32_t HELPER(uxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(uint8_t)x; - res |= (uint32_t)(uint8_t)(x >> 16) << 16; - return res; -} - -uint32_t HELPER(clz_arm)(uint32_t x) -{ - return clz32(x); -} - -int32_t HELPER(sdiv)(int32_t num, int32_t den) -{ - if (den == 0) - return 0; - if (num == INT_MIN && den == -1) - return INT_MIN; - return num / den; -} - -uint32_t HELPER(udiv)(uint32_t num, uint32_t den) -{ - if (den == 0) - return 0; - return num / den; -} - -uint32_t HELPER(rbit)(uint32_t x) -{ - x = ((x & 0xff000000) >> 24) - | ((x & 0x00ff0000) >> 8) - | ((x & 0x0000ff00) << 8) - | ((x & 0x000000ff) << 24); - x = ((x & 0xf0f0f0f0) >> 4) - | ((x & 0x0f0f0f0f) << 4); - x = ((x & 0x88888888) >> 3) - | ((x & 0x44444444) >> 1) - | ((x & 0x22222222) << 1) - | ((x & 0x11111111) << 3); - return x; -} - -#if defined(CONFIG_USER_ONLY) - -int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, - int mmu_idx) -{ - ARMCPU *cpu = ARM_CPU(NULL, cs); - CPUARMState *env = &cpu->env; - - env->exception.vaddress = address; - if (rw == 2) { - cs->exception_index = EXCP_PREFETCH_ABORT; - } else { - cs->exception_index = EXCP_DATA_ABORT; - } - return 1; -} - -/* These should probably raise undefined insn exceptions. */ -void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); -} - -uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); - return 0; -} - -void switch_mode(CPUARMState *env, int mode) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - if (mode != ARM_CPU_MODE_USR) { - cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); - } -} - -void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - cpu_abort(CPU(cpu), "banked r13 write\n"); -} - -uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - cpu_abort(CPU(cpu), "banked r13 read\n"); - return 0; -} - -unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx) -{ - return 1; -} - -#else - -/* Map CPU modes onto saved register banks. */ -int bank_number(int mode) -{ - switch (mode) { - default: - case ARM_CPU_MODE_USR: - case ARM_CPU_MODE_SYS: - return 0; - case ARM_CPU_MODE_SVC: - return 1; - case ARM_CPU_MODE_ABT: - return 2; - case ARM_CPU_MODE_UND: - return 3; - case ARM_CPU_MODE_IRQ: - return 4; - case ARM_CPU_MODE_FIQ: - return 5; - case ARM_CPU_MODE_HYP: - return 6; - case ARM_CPU_MODE_MON: - return 7; - } - //hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode); -} - -void switch_mode(CPUARMState *env, int mode) -{ - int old_mode; - int i; - - old_mode = env->uncached_cpsr & CPSR_M; - if (mode == old_mode) - return; - - if (old_mode == ARM_CPU_MODE_FIQ) { - memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); - memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); - } else if (mode == ARM_CPU_MODE_FIQ) { - memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); - memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); - } - - i = bank_number(old_mode); - env->banked_r13[i] = env->regs[13]; - env->banked_r14[i] = env->regs[14]; - env->banked_spsr[i] = env->spsr; - - i = bank_number(mode); - env->regs[13] = env->banked_r13[i]; - env->regs[14] = env->banked_r14[i]; - env->spsr = env->banked_spsr[i]; -} - -/* - * Determine the target EL for a given exception type. - */ -unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx) -{ - CPUARMState *env = cs->env_ptr; - unsigned int cur_el = arm_current_el(env); - unsigned int target_el; - /* FIXME: Use actual secure state. */ - bool secure = false; - - if (!env->aarch64) { - /* TODO: Add EL2 and 3 exception handling for AArch32. */ - return 1; - } - - switch (excp_idx) { - case EXCP_HVC: - case EXCP_HYP_TRAP: - target_el = 2; - break; - case EXCP_SMC: - target_el = 3; - break; - case EXCP_FIQ: - case EXCP_IRQ: - { - const uint64_t hcr_mask = excp_idx == EXCP_FIQ ? HCR_FMO : HCR_IMO; - const uint32_t scr_mask = excp_idx == EXCP_FIQ ? SCR_FIQ : SCR_IRQ; - - target_el = 1; - if (!secure && (env->cp15.hcr_el2 & hcr_mask)) { - target_el = 2; - } - if (env->cp15.scr_el3 & scr_mask) { - target_el = 3; - } - break; - } - case EXCP_VIRQ: - case EXCP_VFIQ: - target_el = 1; - break; - default: - target_el = MAX(cur_el, 1); - break; - } - return target_el; -} - -static void v7m_push(CPUARMState *env, uint32_t val) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - - env->regs[13] -= 4; - stl_phys(cs->as, env->regs[13], val); -} - -static uint32_t v7m_pop(CPUARMState *env) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - uint32_t val; - - val = ldl_phys(cs->as, env->regs[13]); - env->regs[13] += 4; - return val; -} - -/* Switch to V7M main or process stack pointer. */ -static void switch_v7m_sp(CPUARMState *env, int process) -{ - uint32_t tmp; - if (env->v7m.current_sp != process) { - tmp = env->v7m.other_sp; - env->v7m.other_sp = env->regs[13]; - env->regs[13] = tmp; - env->v7m.current_sp = process; - } -} - -static void do_v7m_exception_exit(CPUARMState *env) -{ - uint32_t type; - uint32_t xpsr; - - type = env->regs[15]; - //if (env->v7m.exception != 0) - // armv7m_nvic_complete_irq(env->nvic, env->v7m.exception); - - /* Switch to the target stack. */ - switch_v7m_sp(env, (type & 4) != 0); - /* Pop registers. */ - env->regs[0] = v7m_pop(env); - env->regs[1] = v7m_pop(env); - env->regs[2] = v7m_pop(env); - env->regs[3] = v7m_pop(env); - env->regs[12] = v7m_pop(env); - env->regs[14] = v7m_pop(env); - env->regs[15] = v7m_pop(env); - xpsr = v7m_pop(env); - xpsr_write(env, xpsr, 0xfffffdff); - /* Undo stack alignment. */ - if (xpsr & 0x200) - env->regs[13] |= 4; - /* ??? The exception return type specifies Thread/Handler mode. However - this is also implied by the xPSR value. Not sure what to do - if there is a mismatch. */ - /* ??? Likewise for mismatches between the CONTROL register and the stack - pointer. */ -} - -void arm_v7m_cpu_do_interrupt(CPUState *cs) -{ - CPUARMState *env = cs->env_ptr; - uint32_t xpsr = xpsr_read(env); - uint32_t lr; - uint32_t addr; - - arm_log_exception(cs->exception_index); - - lr = 0xfffffff1; - if (env->v7m.current_sp) - lr |= 4; - if (env->v7m.exception == 0) - lr |= 8; - - /* For exceptions we just mark as pending on the NVIC, and let that - handle it. */ - /* TODO: Need to escalate if the current priority is higher than the - one we're raising. */ - switch (cs->exception_index) { - case EXCP_UDEF: - //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); - return; - case EXCP_SWI: - /* The PC already points to the next instruction. */ - //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC); - return; - case EXCP_PREFETCH_ABORT: - case EXCP_DATA_ABORT: - /* TODO: if we implemented the MPU registers, this is where we - * should set the MMFAR, etc from exception.fsr and exception.vaddress. - */ - //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM); - return; - case EXCP_BKPT: -#if 0 - if (semihosting_enabled) { - int nr; - nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff; - if (nr == 0xab) { - env->regs[15] += 2; - env->regs[0] = do_arm_semihosting(env); - qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n"); - return; - } - } -#endif - //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG); - return; - case EXCP_IRQ: - //env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic); - break; - case EXCP_EXCEPTION_EXIT: - do_v7m_exception_exit(env); - return; - default: - cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); - return; /* Never happens. Keep compiler happy. */ - } - - /* Align stack pointer. */ - /* ??? Should only do this if Configuration Control Register - STACKALIGN bit is set. */ - if (env->regs[13] & 4) { - env->regs[13] -= 4; - xpsr |= 0x200; - } - /* Switch to the handler mode. */ - v7m_push(env, xpsr); - v7m_push(env, env->regs[15]); - v7m_push(env, env->regs[14]); - v7m_push(env, env->regs[12]); - v7m_push(env, env->regs[3]); - v7m_push(env, env->regs[2]); - v7m_push(env, env->regs[1]); - v7m_push(env, env->regs[0]); - switch_v7m_sp(env, 0); - /* Clear IT bits */ - env->condexec_bits = 0; - env->regs[14] = lr; - addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4); - env->regs[15] = addr & 0xfffffffe; - env->thumb = addr & 1; -} - -/* Handle a CPU exception. */ -void arm_cpu_do_interrupt(CPUState *cs) -{ - CPUARMState *env = cs->env_ptr; - ARMCPU *cpu = ARM_CPU(env->uc, cs); - uint32_t addr; - uint32_t mask; - int new_mode; - uint32_t offset; - uint32_t moe; - - assert(!IS_M(env)); - - arm_log_exception(cs->exception_index); - - if (arm_is_psci_call(cpu, cs->exception_index)) { - arm_handle_psci_call(cpu); - qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); - return; - } - - /* If this is a debug exception we must update the DBGDSCR.MOE bits */ - switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { - case EC_BREAKPOINT: - case EC_BREAKPOINT_SAME_EL: - moe = 1; - break; - case EC_WATCHPOINT: - case EC_WATCHPOINT_SAME_EL: - moe = 10; - break; - case EC_AA32_BKPT: - moe = 3; - break; - case EC_VECTORCATCH: - moe = 5; - break; - default: - moe = 0; - break; - } - - if (moe) { - env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); - } - - /* TODO: Vectored interrupt controller. */ - switch (cs->exception_index) { - case EXCP_UDEF: - new_mode = ARM_CPU_MODE_UND; - addr = 0x04; - mask = CPSR_I; - if (env->thumb) - offset = 2; - else - offset = 4; - break; - case EXCP_SWI: -#if 0 - if (semihosting_enabled) { - /* Check for semihosting interrupt. */ - if (env->thumb) { - mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code) - & 0xff; - } else { - mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code) - & 0xffffff; - } - /* Only intercept calls from privileged modes, to provide some - semblance of security. */ - if (((mask == 0x123456 && !env->thumb) - || (mask == 0xab && env->thumb)) - && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { - env->regs[0] = do_arm_semihosting(env); - qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n"); - return; - } - } -#endif - new_mode = ARM_CPU_MODE_SVC; - addr = 0x08; - mask = CPSR_I; - /* The PC already points to the next instruction. */ - offset = 0; - break; - case EXCP_BKPT: -#if 0 - /* See if this is a semihosting syscall. */ - if (env->thumb && semihosting_enabled) { - mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff; - if (mask == 0xab - && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { - env->regs[15] += 2; - env->regs[0] = do_arm_semihosting(env); - qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n"); - return; - } - } -#endif - env->exception.fsr = 2; - /* Fall through to prefetch abort. */ - case EXCP_PREFETCH_ABORT: - env->cp15.ifsr_el2 = env->exception.fsr; - env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 32, 32, - env->exception.vaddress); - qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", - env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress); - new_mode = ARM_CPU_MODE_ABT; - addr = 0x0c; - mask = CPSR_A | CPSR_I; - offset = 4; - break; - case EXCP_DATA_ABORT: - env->cp15.esr_el[1] = env->exception.fsr; - env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 0, 32, - env->exception.vaddress); - qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", - (uint32_t)env->cp15.esr_el[1], - (uint32_t)env->exception.vaddress); - new_mode = ARM_CPU_MODE_ABT; - addr = 0x10; - mask = CPSR_A | CPSR_I; - offset = 8; - break; - case EXCP_IRQ: - new_mode = ARM_CPU_MODE_IRQ; - addr = 0x18; - /* Disable IRQ and imprecise data aborts. */ - mask = CPSR_A | CPSR_I; - offset = 4; - break; - case EXCP_FIQ: - new_mode = ARM_CPU_MODE_FIQ; - addr = 0x1c; - /* Disable FIQ, IRQ and imprecise data aborts. */ - mask = CPSR_A | CPSR_I | CPSR_F; - offset = 4; - break; - case EXCP_SMC: - new_mode = ARM_CPU_MODE_MON; - addr = 0x08; - mask = CPSR_A | CPSR_I | CPSR_F; - offset = 0; - break; - default: - cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); - return; /* Never happens. Keep compiler happy. */ - } - /* High vectors. */ - if (env->cp15.c1_sys & SCTLR_V) { - /* when enabled, base address cannot be remapped. */ - addr += 0xffff0000; - } else { - /* ARM v7 architectures provide a vector base address register to remap - * the interrupt vector table. - * This register is only followed in non-monitor mode, and has a secure - * and un-secure copy. Since the cpu is always in a un-secure operation - * and is never in monitor mode this feature is always active. - * Note: only bits 31:5 are valid. - */ - addr += env->cp15.vbar_el[1]; - } - - if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { - env->cp15.scr_el3 &= ~SCR_NS; - } - - switch_mode (env, new_mode); - /* For exceptions taken to AArch32 we must clear the SS bit in both - * PSTATE and in the old-state value we save to SPSR_, so zero it now. - */ - env->uncached_cpsr &= ~PSTATE_SS; - env->spsr = cpsr_read(env); - /* Clear IT bits. */ - env->condexec_bits = 0; - /* Switch to the new mode, and to the correct instruction set. */ - env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; - env->daif |= mask; - /* this is a lie, as the was no c1_sys on V4T/V5, but who cares - * and we should just guard the thumb mode on V4 */ - if (arm_feature(env, ARM_FEATURE_V4T)) { - env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0; - } - env->regs[14] = env->regs[15] + offset; - env->regs[15] = addr; - cs->interrupt_request |= CPU_INTERRUPT_EXITTB; -} - -/* Check section/page access permissions. - Returns the page protection flags, or zero if the access is not - permitted. */ -static inline int check_ap(CPUARMState *env, int ap, int domain_prot, - int access_type, int is_user) -{ - int prot_ro; - - if (domain_prot == 3) { - return PAGE_READ | PAGE_WRITE; - } - - if (access_type == 1) - prot_ro = 0; - else - prot_ro = PAGE_READ; - - switch (ap) { - case 0: - if (arm_feature(env, ARM_FEATURE_V7)) { - return 0; - } - if (access_type == 1) - return 0; - switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) { - case SCTLR_S: - return is_user ? 0 : PAGE_READ; - case SCTLR_R: - return PAGE_READ; - default: - return 0; - } - case 1: - return is_user ? 0 : PAGE_READ | PAGE_WRITE; - case 2: - if (is_user) - return prot_ro; - else - return PAGE_READ | PAGE_WRITE; - case 3: - return PAGE_READ | PAGE_WRITE; - case 4: /* Reserved. */ - return 0; - case 5: - return is_user ? 0 : prot_ro; - case 6: - return prot_ro; - case 7: - if (!arm_feature (env, ARM_FEATURE_V6K)) - return 0; - return prot_ro; - default: - abort(); - } -} - -static bool get_level1_table_address(CPUARMState *env, uint32_t *table, - uint32_t address) -{ - if (address & env->cp15.c2_mask) { - if ((env->cp15.c2_control & TTBCR_PD1)) { - /* Translation table walk disabled for TTBR1 */ - return false; - } - *table = env->cp15.ttbr1_el1 & 0xffffc000; - } else { - if ((env->cp15.c2_control & TTBCR_PD0)) { - /* Translation table walk disabled for TTBR0 */ - return false; - } - *table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask; - } - *table |= (address >> 18) & 0x3ffc; - return true; -} - -static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type, - int is_user, hwaddr *phys_ptr, - int *prot, target_ulong *page_size) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - int code; - uint32_t table; - uint32_t desc; - int type; - int ap; - int domain = 0; - int domain_prot; - hwaddr phys_addr; - - /* Pagetable walk. */ - /* Lookup l1 descriptor. */ - if (!get_level1_table_address(env, &table, address)) { - /* Section translation fault if page walk is disabled by PD0 or PD1 */ - code = 5; - goto do_fault; - } - desc = ldl_phys(cs->as, table); - type = (desc & 3); - domain = (desc >> 5) & 0x0f; - domain_prot = (env->cp15.c3 >> (domain * 2)) & 3; - if (type == 0) { - /* Section translation fault. */ - code = 5; - goto do_fault; - } - if (domain_prot == 0 || domain_prot == 2) { - if (type == 2) - code = 9; /* Section domain fault. */ - else - code = 11; /* Page domain fault. */ - goto do_fault; - } - if (type == 2) { - /* 1Mb section. */ - phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); - ap = (desc >> 10) & 3; - code = 13; - *page_size = 1024 * 1024; - } else { - /* Lookup l2 entry. */ - if (type == 1) { - /* Coarse pagetable. */ - table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); - } else { - /* Fine pagetable. */ - table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); - } - desc = ldl_phys(cs->as, table); - switch (desc & 3) { - case 0: /* Page translation fault. */ - code = 7; - goto do_fault; - case 1: /* 64k page. */ - phys_addr = (desc & 0xffff0000) | (address & 0xffff); - ap = (desc >> (4 + ((address >> 13) & 6))) & 3; - *page_size = 0x10000; - break; - case 2: /* 4k page. */ - phys_addr = (desc & 0xfffff000) | (address & 0xfff); - ap = (desc >> (4 + ((address >> 9) & 6))) & 3; - *page_size = 0x1000; - break; - case 3: /* 1k page. */ - if (type == 1) { - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - phys_addr = (desc & 0xfffff000) | (address & 0xfff); - } else { - /* Page translation fault. */ - code = 7; - goto do_fault; - } - } else { - phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); - } - ap = (desc >> 4) & 3; - *page_size = 0x400; - break; - default: - /* Never happens, but compiler isn't smart enough to tell. */ - abort(); - } - code = 15; - } - *prot = check_ap(env, ap, domain_prot, access_type, is_user); - if (!*prot) { - /* Access permission fault. */ - goto do_fault; - } - *prot |= PAGE_EXEC; - *phys_ptr = phys_addr; - return 0; -do_fault: - return code | (domain << 4); -} - -static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, - int is_user, hwaddr *phys_ptr, - int *prot, target_ulong *page_size) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - int code; - uint32_t table; - uint32_t desc; - uint32_t xn; - uint32_t pxn = 0; - int type; - int ap; - int domain = 0; - int domain_prot; - hwaddr phys_addr; - - /* Pagetable walk. */ - /* Lookup l1 descriptor. */ - if (!get_level1_table_address(env, &table, address)) { - /* Section translation fault if page walk is disabled by PD0 or PD1 */ - code = 5; - goto do_fault; - } - desc = ldl_phys(cs->as, table); - type = (desc & 3); - if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { - /* Section translation fault, or attempt to use the encoding - * which is Reserved on implementations without PXN. - */ - code = 5; - goto do_fault; - } - if ((type == 1) || !(desc & (1 << 18))) { - /* Page or Section. */ - domain = (desc >> 5) & 0x0f; - } - domain_prot = (env->cp15.c3 >> (domain * 2)) & 3; - if (domain_prot == 0 || domain_prot == 2) { - if (type != 1) { - code = 9; /* Section domain fault. */ - } else { - code = 11; /* Page domain fault. */ - } - goto do_fault; - } - if (type != 1) { - if (desc & (1 << 18)) { - /* Supersection. */ - phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); - *page_size = 0x1000000; - } else { - /* Section. */ - phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); - *page_size = 0x100000; - } - ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); - xn = desc & (1 << 4); - pxn = desc & 1; - code = 13; - } else { - if (arm_feature(env, ARM_FEATURE_PXN)) { - pxn = (desc >> 2) & 1; - } - /* Lookup l2 entry. */ - table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); - desc = ldl_phys(cs->as, table); - ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); - switch (desc & 3) { - case 0: /* Page translation fault. */ - code = 7; - goto do_fault; - case 1: /* 64k page. */ - phys_addr = (desc & 0xffff0000) | (address & 0xffff); - xn = desc & (1 << 15); - *page_size = 0x10000; - break; - case 2: case 3: /* 4k page. */ - phys_addr = (desc & 0xfffff000) | (address & 0xfff); - xn = desc & 1; - *page_size = 0x1000; - break; - default: - /* Never happens, but compiler isn't smart enough to tell. */ - abort(); - } - code = 15; - } - if (domain_prot == 3) { - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - } else { - if (pxn && !is_user) { - xn = 1; - } - if (xn && access_type == 2) - goto do_fault; - - /* The simplified model uses AP[0] as an access control bit. */ - if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) { - /* Access flag fault. */ - code = (code == 15) ? 6 : 3; - goto do_fault; - } - *prot = check_ap(env, ap, domain_prot, access_type, is_user); - if (!*prot) { - /* Access permission fault. */ - goto do_fault; - } - if (!xn) { - *prot |= PAGE_EXEC; - } - } - *phys_ptr = phys_addr; - return 0; -do_fault: - return code | (domain << 4); -} - -/* Fault type for long-descriptor MMU fault reporting; this corresponds - * to bits [5..2] in the STATUS field in long-format DFSR/IFSR. - */ -typedef enum { - translation_fault = 1, - access_fault = 2, - permission_fault = 3, -} MMUFaultType; - -static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, - int access_type, int is_user, - hwaddr *phys_ptr, int *prot, - target_ulong *page_size_ptr) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - /* Read an LPAE long-descriptor translation table. */ - MMUFaultType fault_type = translation_fault; - uint32_t level = 1; - uint32_t epd; - int32_t tsz; - uint32_t tg; - uint64_t ttbr; - int ttbr_select; - hwaddr descaddr, descmask; - uint32_t tableattrs; - target_ulong page_size; - uint32_t attrs; - int32_t granule_sz = 9; - int32_t va_size = 32; - int32_t tbi = 0; - uint32_t t0sz; - uint32_t t1sz; - - if (arm_el_is_aa64(env, 1)) { - va_size = 64; - if (extract64(address, 55, 1)) - tbi = extract64(env->cp15.c2_control, 38, 1); - else - tbi = extract64(env->cp15.c2_control, 37, 1); - tbi *= 8; - } - - /* Determine whether this address is in the region controlled by - * TTBR0 or TTBR1 (or if it is in neither region and should fault). - * This is a Non-secure PL0/1 stage 1 translation, so controlled by - * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: - */ - t0sz = extract32(env->cp15.c2_control, 0, 6); - if (arm_el_is_aa64(env, 1)) { - t0sz = MIN(t0sz, 39); - t0sz = MAX(t0sz, 16); - } - t1sz = extract32(env->cp15.c2_control, 16, 6); - if (arm_el_is_aa64(env, 1)) { - t1sz = MIN(t1sz, 39); - t1sz = MAX(t1sz, 16); - } - if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) { - /* there is a ttbr0 region and we are in it (high bits all zero) */ - ttbr_select = 0; - } else if (t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) { - /* there is a ttbr1 region and we are in it (high bits all one) */ - ttbr_select = 1; - } else if (!t0sz) { - /* ttbr0 region is "everything not in the ttbr1 region" */ - ttbr_select = 0; - } else if (!t1sz) { - /* ttbr1 region is "everything not in the ttbr0 region" */ - ttbr_select = 1; - } else { - /* in the gap between the two regions, this is a Translation fault */ - fault_type = translation_fault; - goto do_fault; - } - - /* Note that QEMU ignores shareability and cacheability attributes, - * so we don't need to do anything with the SH, ORGN, IRGN fields - * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the - * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently - * implement any ASID-like capability so we can ignore it (instead - * we will always flush the TLB any time the ASID is changed). - */ - if (ttbr_select == 0) { - ttbr = env->cp15.ttbr0_el1; - epd = extract32(env->cp15.c2_control, 7, 1); - tsz = t0sz; - - tg = extract32(env->cp15.c2_control, 14, 2); - if (tg == 1) { /* 64KB pages */ - granule_sz = 13; - } - if (tg == 2) { /* 16KB pages */ - granule_sz = 11; - } - } else { - ttbr = env->cp15.ttbr1_el1; - epd = extract32(env->cp15.c2_control, 23, 1); - tsz = t1sz; - - tg = extract32(env->cp15.c2_control, 30, 2); - if (tg == 3) { /* 64KB pages */ - granule_sz = 13; - } - if (tg == 1) { /* 16KB pages */ - granule_sz = 11; - } - } - - if (epd) { - /* Translation table walk disabled => Translation fault on TLB miss */ - goto do_fault; - } - - /* The starting level depends on the virtual address size (which can be - * up to 48 bits) and the translation granule size. It indicates the number - * of strides (granule_sz bits at a time) needed to consume the bits - * of the input address. In the pseudocode this is: - * level = 4 - RoundUp((inputsize - grainsize) / stride) - * where their 'inputsize' is our 'va_size - tsz', 'grainsize' is - * our 'granule_sz + 3' and 'stride' is our 'granule_sz'. - * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: - * = 4 - (va_size - tsz - granule_sz - 3 + granule_sz - 1) / granule_sz - * = 4 - (va_size - tsz - 4) / granule_sz; - */ - level = 4 - (va_size - tsz - 4) / granule_sz; - - /* Clear the vaddr bits which aren't part of the within-region address, - * so that we don't have to special case things when calculating the - * first descriptor address. - */ - if (tsz) { - address &= (1ULL << (va_size - tsz)) - 1; - } - - descmask = (1ULL << (granule_sz + 3)) - 1; - - /* Now we can extract the actual base address from the TTBR */ - descaddr = extract64(ttbr, 0, 48); - descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1); - - tableattrs = 0; - for (;;) { - uint64_t descriptor; - - descaddr |= (address >> (granule_sz * (4 - level))) & descmask; - descaddr &= ~7ULL; - descriptor = ldq_phys(cs->as, descaddr); - if (!(descriptor & 1) || - (!(descriptor & 2) && (level == 3))) { - /* Invalid, or the Reserved level 3 encoding */ - goto do_fault; - } - descaddr = descriptor & 0xfffffff000ULL; - - if ((descriptor & 2) && (level < 3)) { - /* Table entry. The top five bits are attributes which may - * propagate down through lower levels of the table (and - * which are all arranged so that 0 means "no effect", so - * we can gather them up by ORing in the bits at each level). - */ - tableattrs |= extract64(descriptor, 59, 5); - level++; - continue; - } - /* Block entry at level 1 or 2, or page entry at level 3. - * These are basically the same thing, although the number - * of bits we pull in from the vaddr varies. - */ - page_size = (1ULL << ((granule_sz * (4 - level)) + 3)); - descaddr |= (address & (page_size - 1)); - /* Extract attributes from the descriptor and merge with table attrs */ - attrs = extract64(descriptor, 2, 10) - | (extract64(descriptor, 52, 12) << 10); - attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ - attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ - /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 - * means "force PL1 access only", which means forcing AP[1] to 0. - */ - if (extract32(tableattrs, 2, 1)) { - attrs &= ~(1 << 4); - } - /* Since we're always in the Non-secure state, NSTable is ignored. */ - break; - } - /* Here descaddr is the final physical address, and attributes - * are all in attrs. - */ - fault_type = access_fault; - if ((attrs & (1 << 8)) == 0) { - /* Access flag */ - goto do_fault; - } - fault_type = permission_fault; - if (is_user && !(attrs & (1 << 4))) { - /* Unprivileged access not enabled */ - goto do_fault; - } - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) || - (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) || - (!is_user && (attrs & (1 << 11)))) { - /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally - * treat XN/UXN as UXN for v8. - */ - if (access_type == 2) { - goto do_fault; - } - *prot &= ~PAGE_EXEC; - } - if (attrs & (1 << 5)) { - /* Write access forbidden */ - if (access_type == 1) { - goto do_fault; - } - *prot &= ~PAGE_WRITE; - } - - *phys_ptr = descaddr; - *page_size_ptr = page_size; - return 0; - -do_fault: - /* Long-descriptor format IFSR/DFSR value */ - return (1 << 9) | (fault_type << 2) | level; -} - -static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, - int access_type, int is_user, - hwaddr *phys_ptr, int *prot) -{ - int n; - uint32_t mask; - uint32_t base; - - *phys_ptr = address; - for (n = 7; n >= 0; n--) { - base = env->cp15.c6_region[n]; - if ((base & 1) == 0) - continue; - mask = 1 << ((base >> 1) & 0x1f); - /* Keep this shift separate from the above to avoid an - (undefined) << 32. */ - mask = (mask << 1) - 1; - if (((base ^ address) & ~mask) == 0) - break; - } - if (n < 0) - return 2; - - if (access_type == 2) { - mask = env->cp15.pmsav5_insn_ap; - } else { - mask = env->cp15.pmsav5_data_ap; - } - mask = (mask >> (n * 4)) & 0xf; - switch (mask) { - case 0: - return 1; - case 1: - if (is_user) - return 1; - *prot = PAGE_READ | PAGE_WRITE; - break; - case 2: - *prot = PAGE_READ; - if (!is_user) - *prot |= PAGE_WRITE; - break; - case 3: - *prot = PAGE_READ | PAGE_WRITE; - break; - case 5: - if (is_user) - return 1; - *prot = PAGE_READ; - break; - case 6: - *prot = PAGE_READ; - break; - default: - /* Bad permission. */ - return 1; - } - *prot |= PAGE_EXEC; - return 0; -} - -/* get_phys_addr - get the physical address for this virtual address - * - * Find the physical address corresponding to the given virtual address, - * by doing a translation table walk on MMU based systems or using the - * MPU state on MPU based systems. - * - * Returns 0 if the translation was successful. Otherwise, phys_ptr, - * prot and page_size are not filled in, and the return value provides - * information on why the translation aborted, in the format of a - * DFSR/IFSR fault register, with the following caveats: - * * we honour the short vs long DFSR format differences. - * * the WnR bit is never set (the caller must do this). - * * for MPU based systems we don't bother to return a full FSR format - * value. - * - * @env: CPUARMState - * @address: virtual address to get physical address for - * @access_type: 0 for read, 1 for write, 2 for execute - * @is_user: 0 for privileged access, 1 for user - * @phys_ptr: set to the physical address corresponding to the virtual address - * @prot: set to the permissions for the page containing phys_ptr - * @page_size: set to the size of the page containing phys_ptr - */ -static inline int get_phys_addr(CPUARMState *env, target_ulong address, - int access_type, int is_user, - hwaddr *phys_ptr, int *prot, - target_ulong *page_size) -{ - /* Fast Context Switch Extension. */ - if (address < 0x02000000) - address += env->cp15.c13_fcse; - - if ((env->cp15.c1_sys & SCTLR_M) == 0) { - /* MMU/MPU disabled. */ - *phys_ptr = address; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - *page_size = TARGET_PAGE_SIZE; - return 0; - } else if (arm_feature(env, ARM_FEATURE_MPU)) { - *page_size = TARGET_PAGE_SIZE; - return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr, - prot); - } else if (extended_addresses_enabled(env)) { - return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr, - prot, page_size); - } else if (env->cp15.c1_sys & SCTLR_XP) { - return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr, - prot, page_size); - } else { - return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr, - prot, page_size); - } -} - -int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, - int access_type, int mmu_idx) -{ - CPUARMState *env = cs->env_ptr; - hwaddr phys_addr; - target_ulong page_size; - int prot; - int ret, is_user; - uint32_t syn; - bool same_el = (arm_current_el(env) != 0); - - is_user = mmu_idx == MMU_USER_IDX; - ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot, - &page_size); - if (ret == 0) { - /* Map a single [sub]page. */ - phys_addr &= TARGET_PAGE_MASK; - address &= TARGET_PAGE_MASK; - tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size); - return 0; - } - - /* AArch64 syndrome does not have an LPAE bit */ - syn = ret & ~(1 << 9); - - /* For insn and data aborts we assume there is no instruction syndrome - * information; this is always true for exceptions reported to EL1. - */ - if (access_type == 2) { - syn = syn_insn_abort(same_el, 0, 0, syn); - cs->exception_index = EXCP_PREFETCH_ABORT; - } else { - syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn); - if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) { - ret |= (1 << 11); - } - cs->exception_index = EXCP_DATA_ABORT; - } - - env->exception.syndrome = syn; - env->exception.vaddress = address; - env->exception.fsr = ret; - return 1; -} - -hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - ARMCPU *cpu = ARM_CPU(NULL, cs); - hwaddr phys_addr; - target_ulong page_size; - int prot; - int ret; - - ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size); - - if (ret != 0) { - return -1; - } - - return phys_addr; -} - -void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) -{ - if ((env->uncached_cpsr & CPSR_M) == mode) { - env->regs[13] = val; - } else { - env->banked_r13[bank_number(mode)] = val; - } -} - -uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) -{ - if ((env->uncached_cpsr & CPSR_M) == mode) { - return env->regs[13]; - } else { - return env->banked_r13[bank_number(mode)]; - } -} - -uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - switch (reg) { - case 0: /* APSR */ - return xpsr_read(env) & 0xf8000000; - case 1: /* IAPSR */ - return xpsr_read(env) & 0xf80001ff; - case 2: /* EAPSR */ - return xpsr_read(env) & 0xff00fc00; - case 3: /* xPSR */ - return xpsr_read(env) & 0xff00fdff; - case 5: /* IPSR */ - return xpsr_read(env) & 0x000001ff; - case 6: /* EPSR */ - return xpsr_read(env) & 0x0700fc00; - case 7: /* IEPSR */ - return xpsr_read(env) & 0x0700edff; - case 8: /* MSP */ - return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13]; - case 9: /* PSP */ - return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp; - case 16: /* PRIMASK */ - return (env->daif & PSTATE_I) != 0; - case 17: /* BASEPRI */ - case 18: /* BASEPRI_MAX */ - return env->v7m.basepri; - case 19: /* FAULTMASK */ - return (env->daif & PSTATE_F) != 0; - case 20: /* CONTROL */ - return env->v7m.control; - default: - /* ??? For debugging only. */ - cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg); - return 0; - } -} - -void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - - switch (reg) { - case 0: /* APSR */ - xpsr_write(env, val, 0xf8000000); - break; - case 1: /* IAPSR */ - xpsr_write(env, val, 0xf8000000); - break; - case 2: /* EAPSR */ - xpsr_write(env, val, 0xfe00fc00); - break; - case 3: /* xPSR */ - xpsr_write(env, val, 0xfe00fc00); - break; - case 5: /* IPSR */ - /* IPSR bits are readonly. */ - break; - case 6: /* EPSR */ - xpsr_write(env, val, 0x0600fc00); - break; - case 7: /* IEPSR */ - xpsr_write(env, val, 0x0600fc00); - break; - case 8: /* MSP */ - if (env->v7m.current_sp) - env->v7m.other_sp = val; - else - env->regs[13] = val; - break; - case 9: /* PSP */ - if (env->v7m.current_sp) - env->regs[13] = val; - else - env->v7m.other_sp = val; - break; - case 16: /* PRIMASK */ - if (val & 1) { - env->daif |= PSTATE_I; - } else { - env->daif &= ~PSTATE_I; - } - break; - case 17: /* BASEPRI */ - env->v7m.basepri = val & 0xff; - break; - case 18: /* BASEPRI_MAX */ - val &= 0xff; - if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) - env->v7m.basepri = val; - break; - case 19: /* FAULTMASK */ - if (val & 1) { - env->daif |= PSTATE_F; - } else { - env->daif &= ~PSTATE_F; - } - break; - case 20: /* CONTROL */ - env->v7m.control = val & 3; - switch_v7m_sp(env, (val & 2) != 0); - break; - default: - /* ??? For debugging only. */ - cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg); - return; - } -} - -#endif - -void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) -{ - /* Implement DC ZVA, which zeroes a fixed-length block of memory. - * Note that we do not implement the (architecturally mandated) - * alignment fault for attempts to use this on Device memory - * (which matches the usual QEMU behaviour of not implementing either - * alignment faults or any memory attribute handling). - */ - - ARMCPU *cpu = arm_env_get_cpu(env); - uint64_t blocklen = 4 << cpu->dcz_blocksize; - uint64_t vaddr = vaddr_in & ~(blocklen - 1); - -#ifndef CONFIG_USER_ONLY - { - /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than - * the block size so we might have to do more than one TLB lookup. - * We know that in fact for any v8 CPU the page size is at least 4K - * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only - * 1K as an artefact of legacy v5 subpage support being present in the - * same QEMU executable. - */ - - int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); - // msvc doesnt allow non-constant array sizes, so we work out the size it would be - // TARGET_PAGE_SIZE is 1024 - // blocklen is 64 - // maxidx = (blocklen+TARGET_PAGE_SIZE-1) / TARGET_PAGE_SIZE - // = (64+1024-1) / 1024 - // = 1 -#ifdef _MSC_VER - void *hostaddr[1]; -#else - void *hostaddr[maxidx]; -#endif - int try, i; - - for (try = 0; try < 2; try++) { - - for (i = 0; i < maxidx; i++) { - hostaddr[i] = tlb_vaddr_to_host(env, - vaddr + TARGET_PAGE_SIZE * i, - 1, cpu_mmu_index(env)); - if (!hostaddr[i]) { - break; - } - } - if (i == maxidx) { - /* If it's all in the TLB it's fair game for just writing to; - * we know we don't need to update dirty status, etc. - */ - for (i = 0; i < maxidx - 1; i++) { - memset(hostaddr[i], 0, TARGET_PAGE_SIZE); - } - memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); - return; - } - /* OK, try a store and see if we can populate the tlb. This - * might cause an exception if the memory isn't writable, - * in which case we will longjmp out of here. We must for - * this purpose use the actual register value passed to us - * so that we get the fault address right. - */ - helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA()); - /* Now we can populate the other TLB entries, if any */ - for (i = 0; i < maxidx; i++) { - uint64_t va = vaddr + TARGET_PAGE_SIZE * i; - if (va != (vaddr_in & TARGET_PAGE_MASK)) { - helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA()); - } - } - } - - /* Slow path (probably attempt to do this to an I/O device or - * similar, or clearing of a block of code we have translations - * cached for). Just do a series of byte writes as the architecture - * demands. It's not worth trying to use a cpu_physical_memory_map(), - * memset(), unmap() sequence here because: - * + we'd need to account for the blocksize being larger than a page - * + the direct-RAM access case is almost always going to be dealt - * with in the fastpath code above, so there's no speed benefit - * + we would have to deal with the map returning NULL because the - * bounce buffer was in use - */ - for (i = 0; i < blocklen; i++) { - helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA()); - } - } -#else - memset(g2h(vaddr), 0, blocklen); -#endif -} - -/* Note that signed overflow is undefined in C. The following routines are - careful to use unsigned types where modulo arithmetic is required. - Failure to do so _will_ break on newer gcc. */ - -/* Signed saturating arithmetic. */ - -/* Perform 16-bit signed saturating addition. */ -static inline uint16_t add16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a + b; - if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { - if (a & 0x8000) - res = 0x8000; - else - res = 0x7fff; - } - return res; -} - -/* Perform 8-bit signed saturating addition. */ -static inline uint8_t add8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a + b; - if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { - if (a & 0x80) - res = 0x80; - else - res = 0x7f; - } - return res; -} - -/* Perform 16-bit signed saturating subtraction. */ -static inline uint16_t sub16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a - b; - if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { - if (a & 0x8000) - res = 0x8000; - else - res = 0x7fff; - } - return res; -} - -/* Perform 8-bit signed saturating subtraction. */ -static inline uint8_t sub8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a - b; - if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { - if (a & 0x80) - res = 0x80; - else - res = 0x7f; - } - return res; -} - -#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); -#define PFX q - -#include "op_addsub.h" - -/* Unsigned saturating arithmetic. */ -static inline uint16_t add16_usat(uint16_t a, uint16_t b) -{ - uint16_t res; - res = a + b; - if (res < a) - res = 0xffff; - return res; -} - -static inline uint16_t sub16_usat(uint16_t a, uint16_t b) -{ - if (a > b) - return a - b; - else - return 0; -} - -static inline uint8_t add8_usat(uint8_t a, uint8_t b) -{ - uint8_t res; - res = a + b; - if (res < a) - res = 0xff; - return res; -} - -static inline uint8_t sub8_usat(uint8_t a, uint8_t b) -{ - if (a > b) - return a - b; - else - return 0; -} - -#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); -#define PFX uq - -#include "op_addsub.h" - -/* Signed modulo arithmetic. */ -#define SARITH16(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ - RESULT(sum, n, 16); \ - if (sum >= 0) \ - ge |= 3 << (n * 2); \ - } while(0) - -#define SARITH8(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ - RESULT(sum, n, 8); \ - if (sum >= 0) \ - ge |= 1 << n; \ - } while(0) - - -#define ADD16(a, b, n) SARITH16(a, b, n, +) -#define SUB16(a, b, n) SARITH16(a, b, n, -) -#define ADD8(a, b, n) SARITH8(a, b, n, +) -#define SUB8(a, b, n) SARITH8(a, b, n, -) -#define PFX s -#define ARITH_GE - -#include "op_addsub.h" - -/* Unsigned modulo arithmetic. */ -#define ADD16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 1) \ - ge |= 3 << (n * 2); \ - } while(0) - -#define ADD8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 1) \ - ge |= 1 << n; \ - } while(0) - -#define SUB16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 0) \ - ge |= 3 << (n * 2); \ - } while(0) - -#define SUB8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 0) \ - ge |= 1 << n; \ - } while(0) - -#define PFX u -#define ARITH_GE - -#include "op_addsub.h" - -/* Halved signed arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) -#define PFX sh - -#include "op_addsub.h" - -/* Halved unsigned arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define PFX uh - -#include "op_addsub.h" - -static inline uint8_t do_usad(uint8_t a, uint8_t b) -{ - if (a > b) - return a - b; - else - return b - a; -} - -/* Unsigned sum of absolute byte differences. */ -uint32_t HELPER(usad8)(uint32_t a, uint32_t b) -{ - uint32_t sum; - sum = do_usad(a, b); - sum += do_usad(a >> 8, b >> 8); - sum += do_usad(a >> 16, b >>16); - sum += do_usad(a >> 24, b >> 24); - return sum; -} - -/* For ARMv6 SEL instruction. */ -uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) -{ - uint32_t mask; - - mask = 0; - if (flags & 1) - mask |= 0xff; - if (flags & 2) - mask |= 0xff00; - if (flags & 4) - mask |= 0xff0000; - if (flags & 8) - mask |= 0xff000000; - return (a & mask) | (b & ~mask); -} - -/* VFP support. We follow the convention used for VFP instructions: - Single precision routines have a "s" suffix, double precision a - "d" suffix. */ - -/* Convert host exception flags to vfp form. */ -static inline int vfp_exceptbits_from_host(int host_bits) -{ - int target_bits = 0; - - if (host_bits & float_flag_invalid) - target_bits |= 1; - if (host_bits & float_flag_divbyzero) - target_bits |= 2; - if (host_bits & float_flag_overflow) - target_bits |= 4; - if (host_bits & (float_flag_underflow | float_flag_output_denormal)) - target_bits |= 8; - if (host_bits & float_flag_inexact) - target_bits |= 0x10; - if (host_bits & float_flag_input_denormal) - target_bits |= 0x80; - return target_bits; -} - -uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) -{ - int i; - uint32_t fpscr; - - fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) - | (env->vfp.vec_len << 16) - | (env->vfp.vec_stride << 20); - i = get_float_exception_flags(&env->vfp.fp_status); - i |= get_float_exception_flags(&env->vfp.standard_fp_status); - fpscr |= vfp_exceptbits_from_host(i); - return fpscr; -} - -uint32_t vfp_get_fpscr(CPUARMState *env) -{ - return HELPER(vfp_get_fpscr)(env); -} - -/* Convert vfp exception flags to target form. */ -static inline int vfp_exceptbits_to_host(int target_bits) -{ - int host_bits = 0; - - if (target_bits & 1) - host_bits |= float_flag_invalid; - if (target_bits & 2) - host_bits |= float_flag_divbyzero; - if (target_bits & 4) - host_bits |= float_flag_overflow; - if (target_bits & 8) - host_bits |= float_flag_underflow; - if (target_bits & 0x10) - host_bits |= float_flag_inexact; - if (target_bits & 0x80) - host_bits |= float_flag_input_denormal; - return host_bits; -} - -void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) -{ - int i; - uint32_t changed; - - changed = env->vfp.xregs[ARM_VFP_FPSCR]; - env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); - env->vfp.vec_len = (val >> 16) & 7; - env->vfp.vec_stride = (val >> 20) & 3; - - changed ^= val; - if (changed & (3 << 22)) { - i = (val >> 22) & 3; - switch (i) { - case FPROUNDING_TIEEVEN: - i = float_round_nearest_even; - break; - case FPROUNDING_POSINF: - i = float_round_up; - break; - case FPROUNDING_NEGINF: - i = float_round_down; - break; - case FPROUNDING_ZERO: - i = float_round_to_zero; - break; - } - set_float_rounding_mode(i, &env->vfp.fp_status); - } - if (changed & (1 << 24)) { - set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); - set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); - } - if (changed & (1 << 25)) - set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status); - - i = vfp_exceptbits_to_host(val); - set_float_exception_flags(i, &env->vfp.fp_status); - set_float_exception_flags(0, &env->vfp.standard_fp_status); -} - -void vfp_set_fpscr(CPUARMState *env, uint32_t val) -{ - HELPER(vfp_set_fpscr)(env, val); -} - -#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) - -#define VFP_BINOP(name) \ -float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - return float32_ ## name(a, b, fpst); \ -} \ -float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - return float64_ ## name(a, b, fpst); \ -} -VFP_BINOP(add) -VFP_BINOP(sub) -VFP_BINOP(mul) -VFP_BINOP(div) -VFP_BINOP(min) -VFP_BINOP(max) -VFP_BINOP(minnum) -VFP_BINOP(maxnum) -#undef VFP_BINOP - -float32 VFP_HELPER(neg, s)(float32 a) -{ - return float32_chs(a); -} - -float64 VFP_HELPER(neg, d)(float64 a) -{ - return float64_chs(a); -} - -float32 VFP_HELPER(abs, s)(float32 a) -{ - return float32_abs(a); -} - -float64 VFP_HELPER(abs, d)(float64 a) -{ - return float64_abs(a); -} - -float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) -{ - return float32_sqrt(a, &env->vfp.fp_status); -} - -float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) -{ - return float64_sqrt(a, &env->vfp.fp_status); -} - -/* XXX: check quiet/signaling case */ -#define DO_VFP_cmp(p, type) \ -void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ -{ \ - uint32_t flags; \ - switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ - case 0: flags = 0x6; break; \ - case -1: flags = 0x8; break; \ - case 1: flags = 0x2; break; \ - default: case 2: flags = 0x3; break; \ - } \ - env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ - | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ -} \ -void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ -{ \ - uint32_t flags; \ - switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ - case 0: flags = 0x6; break; \ - case -1: flags = 0x8; break; \ - case 1: flags = 0x2; break; \ - default: case 2: flags = 0x3; break; \ - } \ - env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ - | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ -} -DO_VFP_cmp(s, float32) -DO_VFP_cmp(d, float64) -#undef DO_VFP_cmp - -/* Integer to float and float to integer conversions */ - -#define CONV_ITOF(name, fsz, sign) \ - float##fsz HELPER(name)(uint32_t x, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ -} - -#define CONV_FTOI(name, fsz, sign, round) \ -uint32_t HELPER(name)(float##fsz x, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - if (float##fsz##_is_any_nan(x)) { \ - float_raise(float_flag_invalid, fpst); \ - return 0; \ - } \ - return float##fsz##_to_##sign##int32##round(x, fpst); \ -} - -#define FLOAT_CONVS(name, p, fsz, sign) \ -CONV_ITOF(vfp_##name##to##p, fsz, sign) \ -CONV_FTOI(vfp_to##name##p, fsz, sign, ) \ -CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero) - -FLOAT_CONVS(si, s, 32, ) -FLOAT_CONVS(si, d, 64, ) -FLOAT_CONVS(ui, s, 32, u) -FLOAT_CONVS(ui, d, 64, u) - -#undef CONV_ITOF -#undef CONV_FTOI -#undef FLOAT_CONVS - -/* floating point conversion */ -float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) -{ - float64 r = float32_to_float64(x, &env->vfp.fp_status); - /* ARM requires that S<->D conversion of any kind of NaN generates - * a quiet NaN by forcing the most significant frac bit to 1. - */ - return float64_maybe_silence_nan(r); -} - -float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) -{ - float32 r = float64_to_float32(x, &env->vfp.fp_status); - /* ARM requires that S<->D conversion of any kind of NaN generates - * a quiet NaN by forcing the most significant frac bit to 1. - */ - return float32_maybe_silence_nan(r); -} - -/* VFP3 fixed point conversion. */ -#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ -float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ - void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - float##fsz tmp; \ - tmp = itype##_to_##float##fsz(x, fpst); \ - return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ -} - -/* Notice that we want only input-denormal exception flags from the - * scalbn operation: the other possible flags (overflow+inexact if - * we overflow to infinity, output-denormal) aren't correct for the - * complete scale-and-convert operation. - */ -#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \ -uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \ - uint32_t shift, \ - void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - int old_exc_flags = get_float_exception_flags(fpst); \ - float##fsz tmp; \ - if (float##fsz##_is_any_nan(x)) { \ - float_raise(float_flag_invalid, fpst); \ - return 0; \ - } \ - tmp = float##fsz##_scalbn(x, shift, fpst); \ - old_exc_flags |= get_float_exception_flags(fpst) \ - & float_flag_input_denormal; \ - set_float_exception_flags(old_exc_flags, fpst); \ - return float##fsz##_to_##itype##round(tmp, fpst); \ -} - -#define VFP_CONV_FIX(name, p, fsz, isz, itype) \ -VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) - -#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ -VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) - -VFP_CONV_FIX(sh, d, 64, 64, int16) -VFP_CONV_FIX(sl, d, 64, 64, int32) -VFP_CONV_FIX_A64(sq, d, 64, 64, int64) -VFP_CONV_FIX(uh, d, 64, 64, uint16) -VFP_CONV_FIX(ul, d, 64, 64, uint32) -VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) -VFP_CONV_FIX(sh, s, 32, 32, int16) -VFP_CONV_FIX(sl, s, 32, 32, int32) -VFP_CONV_FIX_A64(sq, s, 32, 64, int64) -VFP_CONV_FIX(uh, s, 32, 32, uint16) -VFP_CONV_FIX(ul, s, 32, 32, uint32) -VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) -#undef VFP_CONV_FIX -#undef VFP_CONV_FIX_FLOAT -#undef VFP_CONV_FLOAT_FIX_ROUND - -/* Set the current fp rounding mode and return the old one. - * The argument is a softfloat float_round_ value. - */ -uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env) -{ - float_status *fp_status = &env->vfp.fp_status; - - uint32_t prev_rmode = get_float_rounding_mode(fp_status); - set_float_rounding_mode(rmode, fp_status); - - return prev_rmode; -} - -/* Set the current fp rounding mode in the standard fp status and return - * the old one. This is for NEON instructions that need to change the - * rounding mode but wish to use the standard FPSCR values for everything - * else. Always set the rounding mode back to the correct value after - * modifying it. - * The argument is a softfloat float_round_ value. - */ -uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) -{ - float_status *fp_status = &env->vfp.standard_fp_status; - - uint32_t prev_rmode = get_float_rounding_mode(fp_status); - set_float_rounding_mode(rmode, fp_status); - - return prev_rmode; -} - -/* Half precision conversions. */ -static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s) -{ - int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; - float32 r = float16_to_float32(make_float16(a), ieee, s); - if (ieee) { - return float32_maybe_silence_nan(r); - } - return r; -} - -static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s) -{ - int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; - float16 r = float32_to_float16(a, ieee, s); - if (ieee) { - r = float16_maybe_silence_nan(r); - } - return float16_val(r); -} - -float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) -{ - return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status); -} - -uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env) -{ - return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status); -} - -float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) -{ - return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status); -} - -uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env) -{ - return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status); -} - -float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env) -{ - int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; - float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status); - if (ieee) { - return float64_maybe_silence_nan(r); - } - return r; -} - -uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env) -{ - int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; - float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status); - if (ieee) { - r = float16_maybe_silence_nan(r); - } - return float16_val(r); -} - -#define float32_two make_float32(0x40000000) -#define float32_three make_float32(0x40400000) -#define float32_one_point_five make_float32(0x3fc00000) - -float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) -{ - float_status *s = &env->vfp.standard_fp_status; - if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || - (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { - if (!(float32_is_zero(a) || float32_is_zero(b))) { - float_raise(float_flag_input_denormal, s); - } - return float32_two; - } - return float32_sub(float32_two, float32_mul(a, b, s), s); -} - -float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) -{ - float_status *s = &env->vfp.standard_fp_status; - float32 product; - if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || - (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { - if (!(float32_is_zero(a) || float32_is_zero(b))) { - float_raise(float_flag_input_denormal, s); - } - return float32_one_point_five; - } - product = float32_mul(a, b, s); - return float32_div(float32_sub(float32_three, product, s), float32_two, s); -} - -/* NEON helpers. */ - -/* Constants 256 and 512 are used in some helpers; we avoid relying on - * int->float conversions at run-time. */ -#define float64_256 make_float64(0x4070000000000000LL) -#define float64_512 make_float64(0x4080000000000000LL) -#define float32_maxnorm make_float32(0x7f7fffff) -#define float64_maxnorm make_float64(0x7fefffffffffffffLL) - -/* Reciprocal functions - * - * The algorithm that must be used to calculate the estimate - * is specified by the ARM ARM, see FPRecipEstimate() - */ - -static float64 recip_estimate(float64 a, float_status *real_fp_status) -{ - /* These calculations mustn't set any fp exception flags, - * so we use a local copy of the fp_status. - */ - float_status dummy_status = *real_fp_status; - float_status *s = &dummy_status; - /* q = (int)(a * 512.0) */ - float64 q = float64_mul(float64_512, a, s); - int64_t q_int = float64_to_int64_round_to_zero(q, s); - - /* r = 1.0 / (((double)q + 0.5) / 512.0) */ - q = int64_to_float64(q_int, s); - q = float64_add(q, float64_half, s); - q = float64_div(q, float64_512, s); - q = float64_div(float64_one, q, s); - - /* s = (int)(256.0 * r + 0.5) */ - q = float64_mul(q, float64_256, s); - q = float64_add(q, float64_half, s); - q_int = float64_to_int64_round_to_zero(q, s); - - /* return (double)s / 256.0 */ - return float64_div(int64_to_float64(q_int, s), float64_256, s); -} - -/* Common wrapper to call recip_estimate */ -static float64 call_recip_estimate(float64 num, int off, float_status *fpst) -{ - uint64_t val64 = float64_val(num); - uint64_t frac = extract64(val64, 0, 52); - int64_t exp = extract64(val64, 52, 11); - uint64_t sbit; - float64 scaled, estimate; - - /* Generate the scaled number for the estimate function */ - if (exp == 0) { - if (extract64(frac, 51, 1) == 0) { - exp = -1; - frac = extract64(frac, 0, 50) << 2; - } else { - frac = extract64(frac, 0, 51) << 1; - } - } - - /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */ - scaled = make_float64((0x3feULL << 52) - | extract64(frac, 44, 8) << 44); - - estimate = recip_estimate(scaled, fpst); - - /* Build new result */ - val64 = float64_val(estimate); - sbit = 0x8000000000000000ULL & val64; - exp = off - exp; - frac = extract64(val64, 0, 52); - - if (exp == 0) { - frac = 1ULL << 51 | extract64(frac, 1, 51); - } else if (exp == -1) { - frac = 1ULL << 50 | extract64(frac, 2, 50); - exp = 0; - } - - return make_float64(sbit | (exp << 52) | frac); -} - -static bool round_to_inf(float_status *fpst, bool sign_bit) -{ - switch (fpst->float_rounding_mode) { - case float_round_nearest_even: /* Round to Nearest */ - return true; - case float_round_up: /* Round to +Inf */ - return !sign_bit; - case float_round_down: /* Round to -Inf */ - return sign_bit; - case float_round_to_zero: /* Round to Zero */ - return false; - default: - break; - } - - g_assert_not_reached(); - return false; -} - -float32 HELPER(recpe_f32)(float32 input, void *fpstp) -{ - float_status *fpst = fpstp; - float32 f32 = float32_squash_input_denormal(input, fpst); - uint32_t f32_val = float32_val(f32); - uint32_t f32_sbit = 0x80000000ULL & f32_val; - int32_t f32_exp = extract32(f32_val, 23, 8); - uint32_t f32_frac = extract32(f32_val, 0, 23); - float64 f64, r64; - uint64_t r64_val; - int64_t r64_exp; - uint64_t r64_frac; - - if (float32_is_any_nan(f32)) { - float32 nan = f32; - if (float32_is_signaling_nan(f32)) { - float_raise(float_flag_invalid, fpst); - nan = float32_maybe_silence_nan(f32); - } - if (fpst->default_nan_mode) { - nan = float32_default_nan; - } - return nan; - } else if (float32_is_infinity(f32)) { - return float32_set_sign(float32_zero, float32_is_neg(f32)); - } else if (float32_is_zero(f32)) { - float_raise(float_flag_divbyzero, fpst); - return float32_set_sign(float32_infinity, float32_is_neg(f32)); - } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) { - /* Abs(value) < 2.0^-128 */ - float_raise(float_flag_overflow | float_flag_inexact, fpst); - if (round_to_inf(fpst, f32_sbit)) { - return float32_set_sign(float32_infinity, float32_is_neg(f32)); - } else { - return float32_set_sign(float32_maxnorm, float32_is_neg(f32)); - } - } else if (f32_exp >= 253 && fpst->flush_to_zero) { - float_raise(float_flag_underflow, fpst); - return float32_set_sign(float32_zero, float32_is_neg(f32)); - } - - - f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29); - r64 = call_recip_estimate(f64, 253, fpst); - r64_val = float64_val(r64); - r64_exp = extract64(r64_val, 52, 11); - r64_frac = extract64(r64_val, 0, 52); - - /* result = sign : result_exp<7:0> : fraction<51:29>; */ - return make_float32(f32_sbit | - (r64_exp & 0xff) << 23 | - extract64(r64_frac, 29, 24)); -} - -float64 HELPER(recpe_f64)(float64 input, void *fpstp) -{ - float_status *fpst = fpstp; - float64 f64 = float64_squash_input_denormal(input, fpst); - uint64_t f64_val = float64_val(f64); - uint64_t f64_sbit = 0x8000000000000000ULL & f64_val; - int64_t f64_exp = extract64(f64_val, 52, 11); - float64 r64; - uint64_t r64_val; - int64_t r64_exp; - uint64_t r64_frac; - - /* Deal with any special cases */ - if (float64_is_any_nan(f64)) { - float64 nan = f64; - if (float64_is_signaling_nan(f64)) { - float_raise(float_flag_invalid, fpst); - nan = float64_maybe_silence_nan(f64); - } - if (fpst->default_nan_mode) { - nan = float64_default_nan; - } - return nan; - } else if (float64_is_infinity(f64)) { - return float64_set_sign(float64_zero, float64_is_neg(f64)); - } else if (float64_is_zero(f64)) { - float_raise(float_flag_divbyzero, fpst); - return float64_set_sign(float64_infinity, float64_is_neg(f64)); - } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { - /* Abs(value) < 2.0^-1024 */ - float_raise(float_flag_overflow | float_flag_inexact, fpst); - if (round_to_inf(fpst, f64_sbit)) { - return float64_set_sign(float64_infinity, float64_is_neg(f64)); - } else { - return float64_set_sign(float64_maxnorm, float64_is_neg(f64)); - } - } else if (f64_exp >= 1023 && fpst->flush_to_zero) { - float_raise(float_flag_underflow, fpst); - return float64_set_sign(float64_zero, float64_is_neg(f64)); - } - - r64 = call_recip_estimate(f64, 2045, fpst); - r64_val = float64_val(r64); - r64_exp = extract64(r64_val, 52, 11); - r64_frac = extract64(r64_val, 0, 52); - - /* result = sign : result_exp<10:0> : fraction<51:0> */ - return make_float64(f64_sbit | - ((r64_exp & 0x7ff) << 52) | - r64_frac); -} - -/* The algorithm that must be used to calculate the estimate - * is specified by the ARM ARM. - */ -static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status) -{ - /* These calculations mustn't set any fp exception flags, - * so we use a local copy of the fp_status. - */ - float_status dummy_status = *real_fp_status; - float_status *s = &dummy_status; - float64 q; - int64_t q_int; - - if (float64_lt(a, float64_half, s)) { - /* range 0.25 <= a < 0.5 */ - - /* a in units of 1/512 rounded down */ - /* q0 = (int)(a * 512.0); */ - q = float64_mul(float64_512, a, s); - q_int = float64_to_int64_round_to_zero(q, s); - - /* reciprocal root r */ - /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */ - q = int64_to_float64(q_int, s); - q = float64_add(q, float64_half, s); - q = float64_div(q, float64_512, s); - q = float64_sqrt(q, s); - q = float64_div(float64_one, q, s); - } else { - /* range 0.5 <= a < 1.0 */ - - int64_t q_int; - - /* a in units of 1/256 rounded down */ - /* q1 = (int)(a * 256.0); */ - q = float64_mul(float64_256, a, s); - q_int = float64_to_int64_round_to_zero(q, s); - - /* reciprocal root r */ - /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */ - q = int64_to_float64(q_int, s); - q = float64_add(q, float64_half, s); - q = float64_div(q, float64_256, s); - q = float64_sqrt(q, s); - q = float64_div(float64_one, q, s); - } - /* r in units of 1/256 rounded to nearest */ - /* s = (int)(256.0 * r + 0.5); */ - - q = float64_mul(q, float64_256,s ); - q = float64_add(q, float64_half, s); - q_int = float64_to_int64_round_to_zero(q, s); - - /* return (double)s / 256.0;*/ - return float64_div(int64_to_float64(q_int, s), float64_256, s); -} - -float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) -{ - float_status *s = fpstp; - float32 f32 = float32_squash_input_denormal(input, s); - uint32_t val = float32_val(f32); - uint32_t f32_sbit = 0x80000000 & val; - int32_t f32_exp = extract32(val, 23, 8); - uint32_t f32_frac = extract32(val, 0, 23); - uint64_t f64_frac; - uint64_t val64; - int result_exp; - float64 f64; - - if (float32_is_any_nan(f32)) { - float32 nan = f32; - if (float32_is_signaling_nan(f32)) { - float_raise(float_flag_invalid, s); - nan = float32_maybe_silence_nan(f32); - } - if (s->default_nan_mode) { - nan = float32_default_nan; - } - return nan; - } else if (float32_is_zero(f32)) { - float_raise(float_flag_divbyzero, s); - return float32_set_sign(float32_infinity, float32_is_neg(f32)); - } else if (float32_is_neg(f32)) { - float_raise(float_flag_invalid, s); - return float32_default_nan; - } else if (float32_is_infinity(f32)) { - return float32_zero; - } - - /* Scale and normalize to a double-precision value between 0.25 and 1.0, - * preserving the parity of the exponent. */ - - f64_frac = ((uint64_t) f32_frac) << 29; - if (f32_exp == 0) { - while (extract64(f64_frac, 51, 1) == 0) { - f64_frac = f64_frac << 1; - f32_exp = f32_exp-1; - } - f64_frac = extract64(f64_frac, 0, 51) << 1; - } - - if (extract64(f32_exp, 0, 1) == 0) { - f64 = make_float64(((uint64_t) f32_sbit) << 32 - | (0x3feULL << 52) - | f64_frac); - } else { - f64 = make_float64(((uint64_t) f32_sbit) << 32 - | (0x3fdULL << 52) - | f64_frac); - } - - result_exp = (380 - f32_exp) / 2; - - f64 = recip_sqrt_estimate(f64, s); - - val64 = float64_val(f64); - - val = ((result_exp & 0xff) << 23) - | ((val64 >> 29) & 0x7fffff); - return make_float32(val); -} - -float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) -{ - float_status *s = fpstp; - float64 f64 = float64_squash_input_denormal(input, s); - uint64_t val = float64_val(f64); - uint64_t f64_sbit = 0x8000000000000000ULL & val; - int64_t f64_exp = extract64(val, 52, 11); - uint64_t f64_frac = extract64(val, 0, 52); - int64_t result_exp; - uint64_t result_frac; - - if (float64_is_any_nan(f64)) { - float64 nan = f64; - if (float64_is_signaling_nan(f64)) { - float_raise(float_flag_invalid, s); - nan = float64_maybe_silence_nan(f64); - } - if (s->default_nan_mode) { - nan = float64_default_nan; - } - return nan; - } else if (float64_is_zero(f64)) { - float_raise(float_flag_divbyzero, s); - return float64_set_sign(float64_infinity, float64_is_neg(f64)); - } else if (float64_is_neg(f64)) { - float_raise(float_flag_invalid, s); - return float64_default_nan; - } else if (float64_is_infinity(f64)) { - return float64_zero; - } - - /* Scale and normalize to a double-precision value between 0.25 and 1.0, - * preserving the parity of the exponent. */ - - if (f64_exp == 0) { - while (extract64(f64_frac, 51, 1) == 0) { - f64_frac = f64_frac << 1; - f64_exp = f64_exp - 1; - } - f64_frac = extract64(f64_frac, 0, 51) << 1; - } - - if (extract64(f64_exp, 0, 1) == 0) { - f64 = make_float64(f64_sbit - | (0x3feULL << 52) - | f64_frac); - } else { - f64 = make_float64(f64_sbit - | (0x3fdULL << 52) - | f64_frac); - } - - result_exp = (3068 - f64_exp) / 2; - - f64 = recip_sqrt_estimate(f64, s); - - result_frac = extract64(float64_val(f64), 0, 52); - - return make_float64(f64_sbit | - ((result_exp & 0x7ff) << 52) | - result_frac); -} - -uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) -{ - float_status *s = fpstp; - float64 f64; - - if ((a & 0x80000000) == 0) { - return 0xffffffff; - } - - f64 = make_float64((0x3feULL << 52) - | ((int64_t)(a & 0x7fffffff) << 21)); - - f64 = recip_estimate(f64, s); - - return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); -} - -uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) -{ - float_status *fpst = fpstp; - float64 f64; - - if ((a & 0xc0000000) == 0) { - return 0xffffffff; - } - - if (a & 0x80000000) { - f64 = make_float64((0x3feULL << 52) - | ((uint64_t)(a & 0x7fffffff) << 21)); - } else { /* bits 31-30 == '01' */ - f64 = make_float64((0x3fdULL << 52) - | ((uint64_t)(a & 0x3fffffff) << 22)); - } - - f64 = recip_sqrt_estimate(f64, fpst); - - return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); -} - -/* VFPv4 fused multiply-accumulate */ -float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) -{ - float_status *fpst = fpstp; - return float32_muladd(a, b, c, 0, fpst); -} - -float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) -{ - float_status *fpst = fpstp; - return float64_muladd(a, b, c, 0, fpst); -} - -/* ARMv8 round to integral */ -float32 HELPER(rints_exact)(float32 x, void *fp_status) -{ - return float32_round_to_int(x, fp_status); -} - -float64 HELPER(rintd_exact)(float64 x, void *fp_status) -{ - return float64_round_to_int(x, fp_status); -} - -float32 HELPER(rints)(float32 x, void *fp_status) -{ - int old_flags = get_float_exception_flags(fp_status), new_flags; - float32 ret; - - ret = float32_round_to_int(x, fp_status); - - /* Suppress any inexact exceptions the conversion produced */ - if (!(old_flags & float_flag_inexact)) { - new_flags = get_float_exception_flags(fp_status); - set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); - } - - return ret; -} - -float64 HELPER(rintd)(float64 x, void *fp_status) -{ - int old_flags = get_float_exception_flags(fp_status), new_flags; - float64 ret; - - ret = float64_round_to_int(x, fp_status); - - new_flags = get_float_exception_flags(fp_status); - - /* Suppress any inexact exceptions the conversion produced */ - if (!(old_flags & float_flag_inexact)) { - new_flags = get_float_exception_flags(fp_status); - set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); - } - - return ret; -} - -/* Convert ARM rounding mode to softfloat */ -int arm_rmode_to_sf(int rmode) -{ - switch (rmode) { - case FPROUNDING_TIEAWAY: - rmode = float_round_ties_away; - break; - case FPROUNDING_ODD: - /* FIXME: add support for TIEAWAY and ODD */ - qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", - rmode); - case FPROUNDING_TIEEVEN: - default: - rmode = float_round_nearest_even; - break; - case FPROUNDING_POSINF: - rmode = float_round_up; - break; - case FPROUNDING_NEGINF: - rmode = float_round_down; - break; - case FPROUNDING_ZERO: - rmode = float_round_to_zero; - break; - } - return rmode; -} - -/* CRC helpers. - * The upper bytes of val (above the number specified by 'bytes') must have - * been zeroed out by the caller. - */ -uint32_t HELPER(crc32_arm)(uint32_t acc, uint32_t val, uint32_t bytes) -{ -#if 0 // FIXME - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* zlib crc32 converts the accumulator and output to one's complement. */ - return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; -#endif - return 0; -} - -uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) -{ - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* Linux crc32c converts the output to one's complement. */ - return crc32c(acc, buf, bytes) ^ 0xffffffff; -} diff --git a/qemu/target-arm/internals.h b/qemu/target-arm/internals.h deleted file mode 100644 index c1ad7574..00000000 --- a/qemu/target-arm/internals.h +++ /dev/null @@ -1,383 +0,0 @@ -/* - * QEMU ARM CPU -- internal functions and types - * - * Copyright (c) 2014 Linaro Ltd - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see - * - * - * This header defines functions, types, etc which need to be shared - * between different source files within target-arm/ but which are - * private to it and not required by the rest of QEMU. - */ - -#ifndef TARGET_ARM_INTERNALS_H -#define TARGET_ARM_INTERNALS_H - -static inline bool excp_is_internal(int excp) -{ - /* Return true if this exception number represents a QEMU-internal - * exception that will not be passed to the guest. - */ - return excp == EXCP_INTERRUPT - || excp == EXCP_HLT - || excp == EXCP_DEBUG - || excp == EXCP_HALTED - || excp == EXCP_EXCEPTION_EXIT - || excp == EXCP_KERNEL_TRAP - || excp == EXCP_STREX; -} - -/* Exception names for debug logging; note that not all of these - * precisely correspond to architectural exceptions. - */ -static const char * const excnames[] = { - NULL, - "Undefined Instruction", - "SVC", - "Prefetch Abort", - "Data Abort", - "IRQ", - "FIQ", - "Breakpoint", - "QEMU v7M exception exit", - "QEMU intercept of kernel commpage", - "QEMU intercept of STREX", - "Hypervisor Call", - "Hypervisor Trap", - "Secure Monitor Call", - "Virtual IRQ", - "Virtual FIQ", -}; - -static inline void arm_log_exception(int idx) -{ - if (qemu_loglevel_mask(CPU_LOG_INT)) { - const char *exc = NULL; - - if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { - exc = excnames[idx]; - } - if (!exc) { - exc = "unknown"; - } - qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); - } -} - -/* Scale factor for generic timers, ie number of ns per tick. - * This gives a 62.5MHz timer. - */ -#define GTIMER_SCALE 16 - -/* - * For AArch64, map a given EL to an index in the banked_spsr array. - */ -static inline unsigned int aarch64_banked_spsr_index(unsigned int el) -{ - static const unsigned int map[4] = { - 0, - 0, /* EL1. */ - 6, /* EL2. */ - 7, /* EL3. */ - }; - assert(el >= 1 && el <= 3); - return map[el]; -} - -int bank_number(int mode); -void switch_mode(CPUARMState *, int); -void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); -void arm_translate_init(struct uc_struct *uc); - -enum arm_fprounding { - FPROUNDING_TIEEVEN, - FPROUNDING_POSINF, - FPROUNDING_NEGINF, - FPROUNDING_ZERO, - FPROUNDING_TIEAWAY, - FPROUNDING_ODD -}; - -int arm_rmode_to_sf(int rmode); - -static inline void aarch64_save_sp(CPUARMState *env, int el) -{ - if (env->pstate & PSTATE_SP) { - env->sp_el[el] = env->xregs[31]; - } else { - env->sp_el[0] = env->xregs[31]; - } -} - -static inline void aarch64_restore_sp(CPUARMState *env, int el) -{ - if (env->pstate & PSTATE_SP) { - env->xregs[31] = env->sp_el[el]; - } else { - env->xregs[31] = env->sp_el[0]; - } -} - -static inline void update_spsel(CPUARMState *env, uint32_t imm) -{ - unsigned int cur_el = arm_current_el(env); - /* Update PSTATE SPSel bit; this requires us to update the - * working stack pointer in xregs[31]. - */ - if (!((imm ^ env->pstate) & PSTATE_SP)) { - return; - } - aarch64_save_sp(env, cur_el); - env->pstate = deposit32(env->pstate, 0, 1, imm); - - /* We rely on illegal updates to SPsel from EL0 to get trapped - * at translation time. - */ - assert(cur_el >= 1 && cur_el <= 3); - aarch64_restore_sp(env, cur_el); -} - -/* Return true if extended addresses are enabled. - * This is always the case if our translation regime is 64 bit, - * but depends on TTBCR.EAE for 32 bit. - */ -static inline bool extended_addresses_enabled(CPUARMState *env) -{ - return arm_el_is_aa64(env, 1) - || ((arm_feature(env, ARM_FEATURE_LPAE) - && (env->cp15.c2_control & TTBCR_EAE))); -} - -/* Valid Syndrome Register EC field values */ -enum arm_exception_class { - EC_UNCATEGORIZED = 0x00, - EC_WFX_TRAP = 0x01, - EC_CP15RTTRAP = 0x03, - EC_CP15RRTTRAP = 0x04, - EC_CP14RTTRAP = 0x05, - EC_CP14DTTRAP = 0x06, - EC_ADVSIMDFPACCESSTRAP = 0x07, - EC_FPIDTRAP = 0x08, - EC_CP14RRTTRAP = 0x0c, - EC_ILLEGALSTATE = 0x0e, - EC_AA32_SVC = 0x11, - EC_AA32_HVC = 0x12, - EC_AA32_SMC = 0x13, - EC_AA64_SVC = 0x15, - EC_AA64_HVC = 0x16, - EC_AA64_SMC = 0x17, - EC_SYSTEMREGISTERTRAP = 0x18, - EC_INSNABORT = 0x20, - EC_INSNABORT_SAME_EL = 0x21, - EC_PCALIGNMENT = 0x22, - EC_DATAABORT = 0x24, - EC_DATAABORT_SAME_EL = 0x25, - EC_SPALIGNMENT = 0x26, - EC_AA32_FPTRAP = 0x28, - EC_AA64_FPTRAP = 0x2c, - EC_SERROR = 0x2f, - EC_BREAKPOINT = 0x30, - EC_BREAKPOINT_SAME_EL = 0x31, - EC_SOFTWARESTEP = 0x32, - EC_SOFTWARESTEP_SAME_EL = 0x33, - EC_WATCHPOINT = 0x34, - EC_WATCHPOINT_SAME_EL = 0x35, - EC_AA32_BKPT = 0x38, - EC_VECTORCATCH = 0x3a, - EC_AA64_BKPT = 0x3c, -}; - -#define ARM_EL_EC_SHIFT 26 -#define ARM_EL_IL_SHIFT 25 -#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) - -/* Utility functions for constructing various kinds of syndrome value. - * Note that in general we follow the AArch64 syndrome values; in a - * few cases the value in HSR for exceptions taken to AArch32 Hyp - * mode differs slightly, so if we ever implemented Hyp mode then the - * syndrome value would need some massaging on exception entry. - * (One example of this is that AArch64 defaults to IL bit set for - * exceptions which don't specifically indicate information about the - * trapping instruction, whereas AArch32 defaults to IL bit clear.) - */ -static inline uint32_t syn_uncategorized(void) -{ - return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL; -} - -static inline uint32_t syn_aa64_svc(uint32_t imm16) -{ - return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); -} - -static inline uint32_t syn_aa64_hvc(uint32_t imm16) -{ - return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); -} - -static inline uint32_t syn_aa64_smc(uint32_t imm16) -{ - return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); -} - -static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb) -{ - return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) - | (is_thumb ? 0 : ARM_EL_IL); -} - -static inline uint32_t syn_aa32_hvc(uint32_t imm16) -{ - return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); -} - -static inline uint32_t syn_aa32_smc(void) -{ - return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL; -} - -static inline uint32_t syn_aa64_bkpt(uint32_t imm16) -{ - return (((unsigned int)EC_AA64_BKPT) << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); -} - -static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_thumb) -{ - return (((unsigned int)EC_AA32_BKPT) << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) - | (is_thumb ? 0 : ARM_EL_IL); -} - -static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2, - int crn, int crm, int rt, - int isread) -{ - return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL - | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5) - | (crm << 1) | isread; -} - -static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2, - int crn, int crm, int rt, int isread, - bool is_thumb) -{ - return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT) - | (is_thumb ? 0 : ARM_EL_IL) - | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) - | (crn << 10) | (rt << 5) | (crm << 1) | isread; -} - -static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2, - int crn, int crm, int rt, int isread, - bool is_thumb) -{ - return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT) - | (is_thumb ? 0 : ARM_EL_IL) - | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) - | (crn << 10) | (rt << 5) | (crm << 1) | isread; -} - -static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm, - int rt, int rt2, int isread, - bool is_thumb) -{ - return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT) - | (is_thumb ? 0 : ARM_EL_IL) - | (cv << 24) | (cond << 20) | (opc1 << 16) - | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; -} - -static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, - int rt, int rt2, int isread, - bool is_thumb) -{ - return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT) - | (is_thumb ? 0 : ARM_EL_IL) - | (cv << 24) | (cond << 20) | (opc1 << 16) - | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; -} - -static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_thumb) -{ - return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) - | (is_thumb ? 0 : ARM_EL_IL) - | (cv << 24) | (cond << 20); -} - -static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) -{ - return (((unsigned int)EC_INSNABORT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) - | (ea << 9) | (s1ptw << 7) | fsc; -} - -static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw, - int wnr, int fsc) -{ - return (((unsigned int) EC_DATAABORT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) - | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; -} - -static inline uint32_t syn_swstep(int same_el, int isv, int ex) -{ - return (((unsigned int)EC_SOFTWARESTEP) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) - | (isv << 24) | (ex << 6) | 0x22; -} - -static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr) -{ - return (((unsigned int)EC_WATCHPOINT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) - | (cm << 8) | (wnr << 6) | 0x22; -} - -static inline uint32_t syn_breakpoint(int same_el) -{ - return (((unsigned int) EC_BREAKPOINT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) - | ARM_EL_IL | 0x22; -} - -/* Update a QEMU watchpoint based on the information the guest has set in the - * DBGWCR_EL1 and DBGWVR_EL1 registers. - */ -void hw_watchpoint_update(ARMCPU *cpu, int n); -/* Update the QEMU watchpoints for every guest watchpoint. This does a - * complete delete-and-reinstate of the QEMU watchpoint list and so is - * suitable for use after migration or on reset. - */ -void hw_watchpoint_update_all(ARMCPU *cpu); -/* Update a QEMU breakpoint based on the information the guest has set in the - * DBGBCR_EL1 and DBGBVR_EL1 registers. - */ -void hw_breakpoint_update(ARMCPU *cpu, int n); -/* Update the QEMU breakpoints for every guest breakpoint. This does a - * complete delete-and-reinstate of the QEMU breakpoint list and so is - * suitable for use after migration or on reset. - */ -void hw_breakpoint_update_all(ARMCPU *cpu); - -/* Callback function for when a watchpoint or breakpoint triggers. */ -void arm_debug_excp_handler(CPUState *cs); - -#ifdef CONFIG_USER_ONLY -static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) -{ - return false; -} -#else -/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ -bool arm_is_psci_call(ARMCPU *cpu, int excp_type); -/* Actually handle a PSCI call */ -void arm_handle_psci_call(ARMCPU *cpu); -#endif - -#endif diff --git a/qemu/target-arm/op_helper.c b/qemu/target-arm/op_helper.c deleted file mode 100644 index 4e28af75..00000000 --- a/qemu/target-arm/op_helper.c +++ /dev/null @@ -1,842 +0,0 @@ -/* - * ARM helper routines - * - * Copyright (c) 2005-2007 CodeSourcery, LLC - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include "cpu.h" -#include "exec/helper-proto.h" -#include "internals.h" -#include "exec/cpu_ldst.h" - -#define SIGNBIT (uint32_t)0x80000000 -#define SIGNBIT64 ((uint64_t)1 << 63) - -static void raise_exception(CPUARMState *env, int tt) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - CPUState *cs = CPU(cpu); - - cs->exception_index = tt; - cpu_loop_exit(cs); -} - -uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def, - uint32_t rn, uint32_t maxindex) -{ - uint32_t val; - uint32_t tmp; - int index; - int shift; - uint64_t *table; - table = (uint64_t *)&env->vfp.regs[rn]; - val = 0; - for (shift = 0; shift < 32; shift += 8) { - index = (ireg >> shift) & 0xff; - if (index < maxindex) { - tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; - val |= tmp << shift; - } else { - val |= def & (0xffU << shift); - } - } - return val; -} - -#if !defined(CONFIG_USER_ONLY) - -/* try to fill the TLB and return an exception if error. If retaddr is - * NULL, it means that the function was called in C code (i.e. not - * from generated code or from helper.c) - */ -void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr) -{ - int ret; - - ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); - if (unlikely(ret)) { - ARMCPU *cpu = ARM_CPU(cs->uc, cs); - CPUARMState *env = &cpu->env; - - if (retaddr) { - /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr); - } - raise_exception(env, cs->exception_index); - } -} -#endif - -uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) -{ - uint32_t res = a + b; - if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) - env->QF = 1; - return res; -} - -uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b) -{ - uint32_t res = a + b; - if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { - env->QF = 1; - res = ~(((int32_t)a >> 31) ^ SIGNBIT); - } - return res; -} - -uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b) -{ - uint32_t res = a - b; - if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { - env->QF = 1; - res = ~(((int32_t)a >> 31) ^ SIGNBIT); - } - return res; -} - -uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val) -{ - uint32_t res; - if (val >= 0x40000000) { - res = ~SIGNBIT; - env->QF = 1; - } else if (val <= (int32_t)0xc0000000) { - res = SIGNBIT; - env->QF = 1; - } else { - res = (uint32_t)val << 1; - } - return res; -} - -uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) -{ - uint32_t res = a + b; - if (res < a) { - env->QF = 1; - res = ~0; - } - return res; -} - -uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) -{ - uint32_t res = a - b; - if (res > a) { - env->QF = 1; - res = 0; - } - return res; -} - -/* Signed saturation. */ -static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift) -{ - int32_t top; - uint32_t mask; - - top = val >> shift; - mask = (1u << shift) - 1; - if (top > 0) { - env->QF = 1; - return mask; - } else if (top < -1) { - env->QF = 1; - return ~mask; - } - return val; -} - -/* Unsigned saturation. */ -static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift) -{ - uint32_t max; - - max = (1u << shift) - 1; - if (val < 0) { - env->QF = 1; - return 0; - } else if (val > max) { - env->QF = 1; - return max; - } - return val; -} - -/* Signed saturate. */ -uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift) -{ - return do_ssat(env, x, shift); -} - -/* Dual halfword signed saturate. */ -uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift) -{ - uint32_t res; - - res = (uint16_t)do_ssat(env, (int16_t)x, shift); - res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16; - return res; -} - -/* Unsigned saturate. */ -uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift) -{ - return do_usat(env, x, shift); -} - -/* Dual halfword unsigned saturate. */ -uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift) -{ - uint32_t res; - - res = (uint16_t)do_usat(env, (int16_t)x, shift); - res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16; - return res; -} - -void HELPER(wfi)(CPUARMState *env) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - - cs->exception_index = EXCP_HLT; - cs->halted = 1; - cpu_loop_exit(cs); -} - -void HELPER(wfe)(CPUARMState *env) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - - /* Don't actually halt the CPU, just yield back to top - * level loop - */ - cs->exception_index = EXCP_YIELD; - cpu_loop_exit(cs); -} - -/* Raise an internal-to-QEMU exception. This is limited to only - * those EXCP values which are special cases for QEMU to interrupt - * execution and not to be used for exceptions which are passed to - * the guest (those must all have syndrome information and thus should - * use exception_with_syndrome). - */ -void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - - assert(excp_is_internal(excp)); - cs->exception_index = excp; - cpu_loop_exit(cs); -} - -/* Raise an exception with the specified syndrome register value */ -void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, - uint32_t syndrome) -{ - CPUState *cs = CPU(arm_env_get_cpu(env)); - - assert(!excp_is_internal(excp)); - cs->exception_index = excp; // qq - env->exception.syndrome = syndrome; - cpu_loop_exit(cs); -} - -uint32_t HELPER(cpsr_read)(CPUARMState *env) -{ - return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED); -} - -void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) -{ - cpsr_write(env, val, mask); -} - -/* Access to user mode registers from privileged modes. */ -uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) -{ - uint32_t val; - - if (regno == 13) { - val = env->banked_r13[0]; - } else if (regno == 14) { - val = env->banked_r14[0]; - } else if (regno >= 8 - && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { - val = env->usr_regs[regno - 8]; - } else { - val = env->regs[regno]; - } - return val; -} - -void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) -{ - if (regno == 13) { - env->banked_r13[0] = val; - } else if (regno == 14) { - env->banked_r14[0] = val; - } else if (regno >= 8 - && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { - env->usr_regs[regno - 8] = val; - } else { - env->regs[regno] = val; - } -} - -void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome) -{ - const ARMCPRegInfo *ri = rip; - - if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 - && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { - env->exception.syndrome = syndrome; - raise_exception(env, EXCP_UDEF); - } - - if (!ri->accessfn) { - return; - } - - switch (ri->accessfn(env, ri)) { - case CP_ACCESS_OK: - return; - case CP_ACCESS_TRAP: - env->exception.syndrome = syndrome; - break; - case CP_ACCESS_TRAP_UNCATEGORIZED: - env->exception.syndrome = syn_uncategorized(); - break; - default: - g_assert_not_reached(); - } - raise_exception(env, EXCP_UDEF); -} - -void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) -{ - const ARMCPRegInfo *ri = rip; - - ri->writefn(env, ri, value); -} - -uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) -{ - const ARMCPRegInfo *ri = rip; - - return ri->readfn(env, ri); -} - -void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) -{ - const ARMCPRegInfo *ri = rip; - - ri->writefn(env, ri, value); -} - -uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) -{ - const ARMCPRegInfo *ri = rip; - - return ri->readfn(env, ri); -} - -void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) -{ - /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set. - * Note that SPSel is never OK from EL0; we rely on handle_msr_i() - * to catch that case at translate time. - */ - if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { - raise_exception(env, EXCP_UDEF); - } - - switch (op) { - case 0x05: /* SPSel */ - update_spsel(env, imm); - break; - case 0x1e: /* DAIFSet */ - env->daif |= (imm << 6) & PSTATE_DAIF; - break; - case 0x1f: /* DAIFClear */ - env->daif &= ~((imm << 6) & PSTATE_DAIF); - break; - default: - g_assert_not_reached(); - } -} - -void HELPER(clear_pstate_ss)(CPUARMState *env) -{ - env->pstate &= ~PSTATE_SS; -} - -void HELPER(pre_hvc)(CPUARMState *env) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int cur_el = arm_current_el(env); - /* FIXME: Use actual secure state. */ - bool secure = false; - bool undef; - - if (arm_is_psci_call(cpu, EXCP_HVC)) { - /* If PSCI is enabled and this looks like a valid PSCI call then - * that overrides the architecturally mandated HVC behaviour. - */ - return; - } - - if (!arm_feature(env, ARM_FEATURE_EL2)) { - /* If EL2 doesn't exist, HVC always UNDEFs */ - undef = true; - } else if (arm_feature(env, ARM_FEATURE_EL3)) { - /* EL3.HCE has priority over EL2.HCD. */ - undef = !(env->cp15.scr_el3 & SCR_HCE); - } else { - undef = env->cp15.hcr_el2 & HCR_HCD; - } - - /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. - * For ARMv8/AArch64, HVC is allowed in EL3. - * Note that we've already trapped HVC from EL0 at translation - * time. - */ - if (secure && (!is_a64(env) || cur_el == 1)) { - undef = true; - } - - if (undef) { - env->exception.syndrome = syn_uncategorized(); - raise_exception(env, EXCP_UDEF); - } -} - -void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) -{ - ARMCPU *cpu = arm_env_get_cpu(env); - int cur_el = arm_current_el(env); - bool secure = arm_is_secure(env); - bool smd = env->cp15.scr_el3 & SCR_SMD; - /* On ARMv8 AArch32, SMD only applies to NS state. - * On ARMv7 SMD only applies to NS state and only if EL2 is available. - * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check - * the EL2 condition here. - */ - bool undef = is_a64(env) ? smd : (!secure && smd); - - if (arm_is_psci_call(cpu, EXCP_SMC)) { - /* If PSCI is enabled and this looks like a valid PSCI call then - * that overrides the architecturally mandated SMC behaviour. - */ - return; - } - - if (!arm_feature(env, ARM_FEATURE_EL3)) { - /* If we have no EL3 then SMC always UNDEFs */ - undef = true; - } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) { - /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */ - env->exception.syndrome = syndrome; - raise_exception(env, EXCP_HYP_TRAP); - } - - if (undef) { - env->exception.syndrome = syn_uncategorized(); - raise_exception(env, EXCP_UDEF); - } -} - -void HELPER(exception_return)(CPUARMState *env) -{ - int cur_el = arm_current_el(env); - unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); - uint32_t spsr = env->banked_spsr[spsr_idx]; - int new_el, i; - - aarch64_save_sp(env, cur_el); - - env->exclusive_addr = -1; - - /* We must squash the PSTATE.SS bit to zero unless both of the - * following hold: - * 1. debug exceptions are currently disabled - * 2. singlestep will be active in the EL we return to - * We check 1 here and 2 after we've done the pstate/cpsr write() to - * transition to the EL we're going to. - */ - if (arm_generate_debug_exceptions(env)) { - spsr &= ~PSTATE_SS; - } - - if (spsr & PSTATE_nRW) { - /* TODO: We currently assume EL1/2/3 are running in AArch64. */ - env->aarch64 = 0; - new_el = 0; - env->uncached_cpsr = 0x10; - cpsr_write(env, spsr, ~0); - if (!arm_singlestep_active(env)) { - env->uncached_cpsr &= ~PSTATE_SS; - } - for (i = 0; i < 15; i++) { - env->regs[i] = env->xregs[i]; - } - - env->regs[15] = env->elr_el[1] & ~0x1; - } else { - new_el = extract32(spsr, 2, 2); - if (new_el > cur_el - || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { - /* Disallow return to an EL which is unimplemented or higher - * than the current one. - */ - goto illegal_return; - } - if (extract32(spsr, 1, 1)) { - /* Return with reserved M[1] bit set */ - goto illegal_return; - } - if (new_el == 0 && (spsr & PSTATE_SP)) { - /* Return to EL0 with M[0] bit set */ - goto illegal_return; - } - env->aarch64 = 1; - pstate_write(env, spsr); - if (!arm_singlestep_active(env)) { - env->pstate &= ~PSTATE_SS; - } - aarch64_restore_sp(env, new_el); - env->pc = env->elr_el[cur_el]; - } - - return; - -illegal_return: - /* Illegal return events of various kinds have architecturally - * mandated behaviour: - * restore NZCV and DAIF from SPSR_ELx - * set PSTATE.IL - * restore PC from ELR_ELx - * no change to exception level, execution state or stack pointer - */ - env->pstate |= PSTATE_IL; - env->pc = env->elr_el[cur_el]; - spsr &= PSTATE_NZCV | PSTATE_DAIF; - spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); - pstate_write(env, spsr); - if (!arm_singlestep_active(env)) { - env->pstate &= ~PSTATE_SS; - } -} - -/* Return true if the linked breakpoint entry lbn passes its checks */ -static bool linked_bp_matches(ARMCPU *cpu, int lbn) -{ - CPUARMState *env = &cpu->env; - uint64_t bcr = env->cp15.dbgbcr[lbn]; - int brps = extract32(cpu->dbgdidr, 24, 4); - int ctx_cmps = extract32(cpu->dbgdidr, 20, 4); - int bt; - uint32_t contextidr; - - /* Links to unimplemented or non-context aware breakpoints are - * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or - * as if linked to an UNKNOWN context-aware breakpoint (in which - * case DBGWCR_EL1.LBN must indicate that breakpoint). - * We choose the former. - */ - if (lbn > brps || lbn < (brps - ctx_cmps)) { - return false; - } - - bcr = env->cp15.dbgbcr[lbn]; - - if (extract64(bcr, 0, 1) == 0) { - /* Linked breakpoint disabled : generate no events */ - return false; - } - - bt = extract64(bcr, 20, 4); - - /* We match the whole register even if this is AArch32 using the - * short descriptor format (in which case it holds both PROCID and ASID), - * since we don't implement the optional v7 context ID masking. - */ - contextidr = extract64(env->cp15.contextidr_el1, 0, 32); - - switch (bt) { - case 3: /* linked context ID match */ - if (arm_current_el(env) > 1) { - /* Context matches never fire in EL2 or (AArch64) EL3 */ - return false; - } - return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32)); - case 5: /* linked address mismatch (reserved in AArch64) */ - case 9: /* linked VMID match (reserved if no EL2) */ - case 11: /* linked context ID and VMID match (reserved if no EL2) */ - default: - /* Links to Unlinked context breakpoints must generate no - * events; we choose to do the same for reserved values too. - */ - return false; - } - - return false; -} - -static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) -{ - CPUARMState *env = &cpu->env; - uint64_t cr; - int pac, hmc, ssc, wt, lbn; - /* TODO: check against CPU security state when we implement TrustZone */ - bool is_secure = false; - - if (is_wp) { - if (!env->cpu_watchpoint[n] - || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) { - return false; - } - cr = env->cp15.dbgwcr[n]; - } else { - uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; - - if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { - return false; - } - cr = env->cp15.dbgbcr[n]; - } - /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is - * enabled and that the address and access type match; for breakpoints - * we know the address matched; check the remaining fields, including - * linked breakpoints. We rely on WCR and BCR having the same layout - * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. - * Note that some combinations of {PAC, HMC, SSC} are reserved and - * must act either like some valid combination or as if the watchpoint - * were disabled. We choose the former, and use this together with - * the fact that EL3 must always be Secure and EL2 must always be - * Non-Secure to simplify the code slightly compared to the full - * table in the ARM ARM. - */ - pac = extract64(cr, 1, 2); - hmc = extract64(cr, 13, 1); - ssc = extract64(cr, 14, 2); - - switch (ssc) { - case 0: - break; - case 1: - case 3: - if (is_secure) { - return false; - } - break; - case 2: - if (!is_secure) { - return false; - } - break; - } - - /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT - * "unprivileged access" instructions should match watchpoints as if - * they were accesses done at EL0, even if the CPU is at EL1 or higher. - * Implementing this would require reworking the core watchpoint code - * to plumb the mmu_idx through to this point. Luckily Linux does not - * rely on this behaviour currently. - * For breakpoints we do want to use the current CPU state. - */ - switch (arm_current_el(env)) { - case 3: - case 2: - if (!hmc) { - return false; - } - break; - case 1: - if (extract32(pac, 0, 1) == 0) { - return false; - } - break; - case 0: - if (extract32(pac, 1, 1) == 0) { - return false; - } - break; - default: - g_assert_not_reached(); - } - - wt = extract64(cr, 20, 1); - lbn = extract64(cr, 16, 4); - - if (wt && !linked_bp_matches(cpu, lbn)) { - return false; - } - - return true; -} - -static bool check_watchpoints(ARMCPU *cpu) -{ - CPUARMState *env = &cpu->env; - int n; - - /* If watchpoints are disabled globally or we can't take debug - * exceptions here then watchpoint firings are ignored. - */ - if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 - || !arm_generate_debug_exceptions(env)) { - return false; - } - - for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { - if (bp_wp_matches(cpu, n, true)) { - return true; - } - } - return false; -} - -static bool check_breakpoints(ARMCPU *cpu) -{ - CPUARMState *env = &cpu->env; - int n; - - /* If breakpoints are disabled globally or we can't take debug - * exceptions here then breakpoint firings are ignored. - */ - if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 - || !arm_generate_debug_exceptions(env)) { - return false; - } - - for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { - if (bp_wp_matches(cpu, n, false)) { - return true; - } - } - return false; -} - -void arm_debug_excp_handler(CPUState *cs) -{ - /* Called by core code when a watchpoint or breakpoint fires; - * need to check which one and raise the appropriate exception. - */ - ARMCPU *cpu = ARM_CPU(cs->uc, cs); - CPUARMState *env = &cpu->env; - CPUWatchpoint *wp_hit = cs->watchpoint_hit; - - if (wp_hit) { - if (wp_hit->flags & BP_CPU) { - cs->watchpoint_hit = NULL; - if (check_watchpoints(cpu)) { - bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; - bool same_el = arm_debug_target_el(env) == arm_current_el(env); - - env->exception.syndrome = syn_watchpoint(same_el, 0, wnr); - if (extended_addresses_enabled(env)) { - env->exception.fsr = (1 << 9) | 0x22; - } else { - env->exception.fsr = 0x2; - } - env->exception.vaddress = wp_hit->hitaddr; - raise_exception(env, EXCP_DATA_ABORT); - } else { - cpu_resume_from_signal(cs, NULL); - } - } - } else { - if (check_breakpoints(cpu)) { - bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); - env->exception.syndrome = syn_breakpoint(same_el); - if (extended_addresses_enabled(env)) { - env->exception.fsr = (1 << 9) | 0x22; - } else { - env->exception.fsr = 0x2; - } - /* FAR is UNKNOWN, so doesn't need setting */ - raise_exception(env, EXCP_PREFETCH_ABORT); - } - } -} - -/* ??? Flag setting arithmetic is awkward because we need to do comparisons. - The only way to do that in TCG is a conditional branch, which clobbers - all our temporaries. For now implement these as helper functions. */ - -/* Similarly for variable shift instructions. */ - -uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - if (shift == 32) - env->CF = x & 1; - else - env->CF = 0; - return 0; - } else if (shift != 0) { - env->CF = (x >> (32 - shift)) & 1; - return x << shift; - } - return x; -} - -uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - if (shift == 32) - env->CF = (x >> 31) & 1; - else - env->CF = 0; - return 0; - } else if (shift != 0) { - env->CF = (x >> (shift - 1)) & 1; - return x >> shift; - } - return x; -} - -uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) -{ - int shift = i & 0xff; - if (shift >= 32) { - env->CF = (x >> 31) & 1; - return (int32_t)x >> 31; - } else if (shift != 0) { - env->CF = (x >> (shift - 1)) & 1; - return (int32_t)x >> shift; - } - return x; -} - -uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) -{ - int shift1, shift; - shift1 = i & 0xff; - shift = shift1 & 0x1f; - if (shift == 0) { - if (shift1 != 0) - env->CF = (x >> 31) & 1; - return x; - } else { - env->CF = (x >> (shift - 1)) & 1; - return ((uint32_t)x >> shift) | (x << (32 - shift)); - } -} diff --git a/qemu/target-arm/psci.c b/qemu/target-arm/psci.c deleted file mode 100644 index 5b305b1b..00000000 --- a/qemu/target-arm/psci.c +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (C) 2014 - Linaro - * Author: Rob Herring - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ -#include -#include -#include -#include -#include -#include "internals.h" - -bool arm_is_psci_call(ARMCPU *cpu, int excp_type) -{ - /* Return true if the r0/x0 value indicates a PSCI call and - * the exception type matches the configured PSCI conduit. This is - * called before the SMC/HVC instruction is executed, to decide whether - * we should treat it as a PSCI call or with the architecturally - * defined behaviour for an SMC or HVC (which might be UNDEF or trap - * to EL2 or to EL3). - */ - CPUARMState *env = &cpu->env; - uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0]; - - switch (excp_type) { - case EXCP_HVC: - if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) { - return false; - } - break; - case EXCP_SMC: - if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { - return false; - } - break; - default: - return false; - } - - switch (param) { - case QEMU_PSCI_0_2_FN_PSCI_VERSION: - case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: - case QEMU_PSCI_0_2_FN_AFFINITY_INFO: - case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: - case QEMU_PSCI_0_2_FN_SYSTEM_RESET: - case QEMU_PSCI_0_2_FN_SYSTEM_OFF: - case QEMU_PSCI_0_1_FN_CPU_ON: - case QEMU_PSCI_0_2_FN_CPU_ON: - case QEMU_PSCI_0_2_FN64_CPU_ON: - case QEMU_PSCI_0_1_FN_CPU_OFF: - case QEMU_PSCI_0_2_FN_CPU_OFF: - case QEMU_PSCI_0_1_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: - case QEMU_PSCI_0_1_FN_MIGRATE: - case QEMU_PSCI_0_2_FN_MIGRATE: - return true; - default: - return false; - } -} - -void arm_handle_psci_call(ARMCPU *cpu) -{ - /* - * This function partially implements the logic for dispatching Power State - * Coordination Interface (PSCI) calls (as described in ARM DEN 0022B.b), - * to the extent required for bringing up and taking down secondary cores, - * and for handling reset and poweroff requests. - * Additional information about the calling convention used is available in - * the document 'SMC Calling Convention' (ARM DEN 0028) - */ - CPUState *cs = CPU(cpu); - CPUARMState *env = &cpu->env; - uint64_t param[4]; - uint64_t context_id, mpidr; - target_ulong entry; - int32_t ret = 0; - int i; - - for (i = 0; i < 4; i++) { - /* - * All PSCI functions take explicit 32-bit or native int sized - * arguments so we can simply zero-extend all arguments regardless - * of which exact function we are about to call. - */ - param[i] = is_a64(env) ? env->xregs[i] : env->regs[i]; - } - - if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) { - ret = QEMU_PSCI_RET_INVALID_PARAMS; - goto err; - } - - switch (param[0]) { - CPUState *target_cpu_state; - ARMCPU *target_cpu; - CPUClass *target_cpu_class; - - case QEMU_PSCI_0_2_FN_PSCI_VERSION: - ret = QEMU_PSCI_0_2_RET_VERSION_0_2; - break; - case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: - ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */ - break; - case QEMU_PSCI_0_2_FN_AFFINITY_INFO: - case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: - mpidr = param[1]; - - switch (param[2]) { - case 0: - target_cpu_state = qemu_get_cpu(env->uc, mpidr & 0xff); - if (!target_cpu_state) { - ret = QEMU_PSCI_RET_INVALID_PARAMS; - break; - } - target_cpu = ARM_CPU(env->uc, target_cpu_state); - ret = target_cpu->powered_off ? 1 : 0; - break; - default: - /* Everything above affinity level 0 is always on. */ - ret = 0; - } - break; - case QEMU_PSCI_0_2_FN_SYSTEM_RESET: - qemu_system_reset_request(env->uc); - /* QEMU reset and shutdown are async requests, but PSCI - * mandates that we never return from the reset/shutdown - * call, so power the CPU off now so it doesn't execute - * anything further. - */ - goto cpu_off; - case QEMU_PSCI_0_2_FN_SYSTEM_OFF: - qemu_system_shutdown_request(); - goto cpu_off; - case QEMU_PSCI_0_1_FN_CPU_ON: - case QEMU_PSCI_0_2_FN_CPU_ON: - case QEMU_PSCI_0_2_FN64_CPU_ON: - mpidr = param[1]; - entry = param[2]; - context_id = param[3]; - - /* change to the cpu we are powering up */ - target_cpu_state = qemu_get_cpu(env->uc, mpidr & 0xff); - if (!target_cpu_state) { - ret = QEMU_PSCI_RET_INVALID_PARAMS; - break; - } - target_cpu = ARM_CPU(env->uc, target_cpu_state); - if (!target_cpu->powered_off) { - ret = QEMU_PSCI_RET_ALREADY_ON; - break; - } - target_cpu_class = CPU_GET_CLASS(env->uc, target_cpu); - - /* Initialize the cpu we are turning on */ - cpu_reset(target_cpu_state); - target_cpu->powered_off = false; - target_cpu_state->halted = 0; - - /* - * The PSCI spec mandates that newly brought up CPUs enter the - * exception level of the caller in the same execution mode as - * the caller, with context_id in x0/r0, respectively. - * - * For now, it is sufficient to assert() that CPUs come out of - * reset in the same mode as the calling CPU, since we only - * implement EL1, which means that - * (a) there is no EL2 for the calling CPU to trap into to change - * its state - * (b) the newly brought up CPU enters EL1 immediately after coming - * out of reset in the default state - */ - assert(is_a64(env) == is_a64(&target_cpu->env)); - if (is_a64(env)) { - if (entry & 1) { - ret = QEMU_PSCI_RET_INVALID_PARAMS; - break; - } - target_cpu->env.xregs[0] = context_id; - } else { - target_cpu->env.regs[0] = context_id; - target_cpu->env.thumb = entry & 1; - } - target_cpu_class->set_pc(target_cpu_state, entry); - - ret = 0; - break; - case QEMU_PSCI_0_1_FN_CPU_OFF: - case QEMU_PSCI_0_2_FN_CPU_OFF: - goto cpu_off; - case QEMU_PSCI_0_1_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: - /* Affinity levels are not supported in QEMU */ - if (param[1] & 0xfffe0000) { - ret = QEMU_PSCI_RET_INVALID_PARAMS; - break; - } - /* Powerdown is not supported, we always go into WFI */ - if (is_a64(env)) { - env->xregs[0] = 0; - } else { - env->regs[0] = 0; - } - helper_wfi(env); - break; - case QEMU_PSCI_0_1_FN_MIGRATE: - case QEMU_PSCI_0_2_FN_MIGRATE: - ret = QEMU_PSCI_RET_NOT_SUPPORTED; - break; - default: - g_assert_not_reached(); - } - -err: - if (is_a64(env)) { - env->xregs[0] = ret; - } else { - env->regs[0] = ret; - } - return; - -cpu_off: - cpu->powered_off = true; - cs->halted = 1; - cs->exception_index = EXCP_HLT; - cpu_loop_exit(cs); - /* notreached */ -} diff --git a/qemu/target-arm/translate.c b/qemu/target-arm/translate.c deleted file mode 100644 index 12cdd71b..00000000 --- a/qemu/target-arm/translate.c +++ /dev/null @@ -1,11639 +0,0 @@ -/* - * ARM translation - * - * Copyright (c) 2003 Fabrice Bellard - * Copyright (c) 2005-2007 CodeSourcery - * Copyright (c) 2007 OpenedHand, Ltd. - * Copyright (c) 2015 Nguyen Anh Quynh (Unicorn engine) - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include -#include -#include -#include -#include "unicorn/platform.h" - -#include "cpu.h" -#include "internals.h" -#include "tcg-op.h" -#include "qemu/log.h" -#include "qemu/bitops.h" -#include "arm_ldst.h" - -#include "exec/helper-proto.h" -#include "exec/helper-gen.h" - -#include "exec/gen-icount.h" - -#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T) -#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) -/* currently all emulated v5 cores are also v5TE, so don't bother */ -#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5) -#define ENABLE_ARCH_5J 0 -#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6) -#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K) -#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2) -#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7) -#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8) - -#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0) - -#include "translate.h" - -#if defined(CONFIG_USER_ONLY) -#define IS_USER(s) 1 -#else -#define IS_USER(s) (s->user) -#endif - -#ifdef CONFIG_USER_ONLY -static TCGv_i64 cpu_exclusive_test; -static TCGv_i32 cpu_exclusive_info; -#endif - - -static const char *regnames[] = - { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; - -/* initialize TCG globals. */ -void arm_translate_init(struct uc_struct *uc) -{ - int i; - TCGContext *tcg_ctx = uc->tcg_ctx; - - tcg_ctx->cpu_env = tcg_global_reg_new_ptr(uc->tcg_ctx, TCG_AREG0, "env"); - - for (i = 0; i < 16; i++) { - tcg_ctx->cpu_R[i] = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, regs[i]), - regnames[i]); - } - tcg_ctx->cpu_CF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, CF), "CF"); - tcg_ctx->cpu_NF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, NF), "NF"); - tcg_ctx->cpu_VF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, VF), "VF"); - tcg_ctx->cpu_ZF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, ZF), "ZF"); - - tcg_ctx->cpu_exclusive_addr = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); - tcg_ctx->cpu_exclusive_val = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_val), "exclusive_val"); -#ifdef CONFIG_USER_ONLY - cpu_exclusive_test = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_test), "exclusive_test"); - cpu_exclusive_info = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_info), "exclusive_info"); -#endif - - a64_translate_init(uc); -} - -static inline TCGv_i32 load_cpu_offset(struct uc_struct *uc, int offset) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offset); - return tmp; -} - -#define load_cpu_field(uc, name) load_cpu_offset(uc, offsetof(CPUARMState, name)) - -static inline void store_cpu_offset(TCGContext *tcg_ctx, TCGv_i32 var, int offset) -{ - tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); - tcg_temp_free_i32(tcg_ctx, var); -} - -#define store_cpu_field(s, var, name) \ - store_cpu_offset(s, var, offsetof(CPUARMState, name)) - -/* Set a variable to the value of a CPU register. */ -static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (reg == 15) { - uint32_t addr; - /* normally, since we updated PC, we need only to add one insn */ - if (s->thumb) - addr = (long)s->pc + 2; - else - addr = (long)s->pc + 4; - tcg_gen_movi_i32(tcg_ctx, var, addr); - } else { - tcg_gen_mov_i32(tcg_ctx, var, tcg_ctx->cpu_R[reg & 0x0f]); - } -} - -/* Create a new temporary and set it to the value of a CPU register. */ -static inline TCGv_i32 load_reg(DisasContext *s, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - load_reg_var(s, tmp, reg); - return tmp; -} - -/* Set a CPU register. The source must be a temporary and will be - marked as dead. */ -static void store_reg(DisasContext *s, int reg, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (reg == 15) { - tcg_gen_andi_i32(tcg_ctx, var, var, ~1); - s->is_jmp = DISAS_JUMP; - } - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[reg & 0x0f], var); - tcg_temp_free_i32(tcg_ctx, var); -} - -/* Value extensions. */ -#define gen_uxtb(var) tcg_gen_ext8u_i32(tcg_ctx, var, var) -#define gen_uxth(var) tcg_gen_ext16u_i32(tcg_ctx, var, var) -#define gen_sxtb(var) tcg_gen_ext8s_i32(tcg_ctx, var, var) -#define gen_sxth(var) tcg_gen_ext16s_i32(tcg_ctx, var, var) - -#define gen_sxtb16(var) gen_helper_sxtb16(tcg_ctx, var, var) -#define gen_uxtb16(var) gen_helper_uxtb16(tcg_ctx, var, var) - - -static inline void gen_set_cpsr(DisasContext *s, TCGv_i32 var, uint32_t mask) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp_mask = tcg_const_i32(tcg_ctx, mask); - gen_helper_cpsr_write(tcg_ctx, tcg_ctx->cpu_env, var, tmp_mask); - tcg_temp_free_i32(tcg_ctx, tmp_mask); -} -/* Set NZCV flags from the high 4 bits of var. */ -#define gen_set_nzcv(s, var) gen_set_cpsr(s, var, CPSR_NZCV) - -static void gen_exception_internal(DisasContext *s, int excp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); - - assert(excp_is_internal(excp)); - gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tcg_excp); - tcg_temp_free_i32(tcg_ctx, tcg_excp); -} - -static void gen_exception(DisasContext *s, int excp, uint32_t syndrome) // qq -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); - TCGv_i32 tcg_syn = tcg_const_i32(tcg_ctx, syndrome); - - gen_helper_exception_with_syndrome(tcg_ctx, tcg_ctx->cpu_env, tcg_excp, tcg_syn); - tcg_temp_free_i32(tcg_ctx, tcg_syn); - tcg_temp_free_i32(tcg_ctx, tcg_excp); -} - -static void gen_ss_advance(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* If the singlestep state is Active-not-pending, advance to - * Active-pending. - */ - if (s->ss_active) { - s->pstate_ss = 0; - gen_helper_clear_pstate_ss(tcg_ctx, tcg_ctx->cpu_env); - } -} - -static void gen_step_complete_exception(DisasContext *s) -{ - /* We just completed step of an insn. Move from Active-not-pending - * to Active-pending, and then also take the swstep exception. - * This corresponds to making the (IMPDEF) choice to prioritize - * swstep exceptions over asynchronous exceptions taken to an exception - * level where debug is disabled. This choice has the advantage that - * we do not need to maintain internal state corresponding to the - * ISV/EX syndrome bits between completion of the step and generation - * of the exception, and our syndrome information is always correct. - */ - gen_ss_advance(s); - gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex)); - s->is_jmp = DISAS_EXC; -} - -static void gen_smul_dual(DisasContext *s, TCGv_i32 a, TCGv_i32 b) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ext16s_i32(tcg_ctx, tmp1, a); - tcg_gen_ext16s_i32(tcg_ctx, tmp2, b); - tcg_gen_mul_i32(tcg_ctx, tmp1, tmp1, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_gen_sari_i32(tcg_ctx, a, a, 16); - tcg_gen_sari_i32(tcg_ctx, b, b, 16); - tcg_gen_mul_i32(tcg_ctx, b, b, a); - tcg_gen_mov_i32(tcg_ctx, a, tmp1); - tcg_temp_free_i32(tcg_ctx, tmp1); -} - -/* Byteswap each halfword. */ -static void gen_rev16(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i32(tcg_ctx, tmp, var, 8); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x00ff00ff); - tcg_gen_shli_i32(tcg_ctx, var, var, 8); - tcg_gen_andi_i32(tcg_ctx, var, var, 0xff00ff00); - tcg_gen_or_i32(tcg_ctx, var, var, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -/* Byteswap low halfword and sign extend. */ -static void gen_revsh(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_ext16u_i32(tcg_ctx, var, var); - tcg_gen_bswap16_i32(tcg_ctx, var, var); - tcg_gen_ext16s_i32(tcg_ctx, var, var); -} - -/* Unsigned bitfield extract. */ -static void gen_ubfx(DisasContext *s, TCGv_i32 var, int shift, uint32_t mask) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (shift) - tcg_gen_shri_i32(tcg_ctx, var, var, shift); - tcg_gen_andi_i32(tcg_ctx, var, var, mask); -} - -/* Signed bitfield extract. */ -static void gen_sbfx(DisasContext *s, TCGv_i32 var, int shift, int width) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t signbit; - - if (shift) - tcg_gen_sari_i32(tcg_ctx, var, var, shift); - if (shift + width < 32) { - signbit = 1u << (width - 1); - tcg_gen_andi_i32(tcg_ctx, var, var, (1u << width) - 1); - tcg_gen_xori_i32(tcg_ctx, var, var, signbit); - tcg_gen_subi_i32(tcg_ctx, var, var, signbit); - } -} - -/* Return (b << 32) + a. Mark inputs as dead */ -static TCGv_i64 gen_addq_msw(DisasContext *s, TCGv_i64 a, TCGv_i32 b) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_extu_i32_i64(tcg_ctx, tmp64, b); - tcg_temp_free_i32(tcg_ctx, b); - tcg_gen_shli_i64(tcg_ctx, tmp64, tmp64, 32); - tcg_gen_add_i64(tcg_ctx, a, tmp64, a); - - tcg_temp_free_i64(tcg_ctx, tmp64); - return a; -} - -/* Return (b << 32) - a. Mark inputs as dead. */ -static TCGv_i64 gen_subq_msw(DisasContext *s, TCGv_i64 a, TCGv_i32 b) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_extu_i32_i64(tcg_ctx, tmp64, b); - tcg_temp_free_i32(tcg_ctx, b); - tcg_gen_shli_i64(tcg_ctx, tmp64, tmp64, 32); - tcg_gen_sub_i64(tcg_ctx, a, tmp64, a); - - tcg_temp_free_i64(tcg_ctx, tmp64); - return a; -} - -/* 32x32->64 multiply. Marks inputs as dead. */ -static TCGv_i64 gen_mulu_i64_i32(DisasContext *s, TCGv_i32 a, TCGv_i32 b) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 ret; - - tcg_gen_mulu2_i32(tcg_ctx, lo, hi, a, b); - tcg_temp_free_i32(tcg_ctx, a); - tcg_temp_free_i32(tcg_ctx, b); - - ret = tcg_temp_new_i64(tcg_ctx); - tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); - tcg_temp_free_i32(tcg_ctx, lo); - tcg_temp_free_i32(tcg_ctx, hi); - - return ret; -} - -static TCGv_i64 gen_muls_i64_i32(DisasContext *s, TCGv_i32 a, TCGv_i32 b) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 ret; - - tcg_gen_muls2_i32(tcg_ctx, lo, hi, a, b); - tcg_temp_free_i32(tcg_ctx, a); - tcg_temp_free_i32(tcg_ctx, b); - - ret = tcg_temp_new_i64(tcg_ctx); - tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); - tcg_temp_free_i32(tcg_ctx, lo); - tcg_temp_free_i32(tcg_ctx, hi); - - return ret; -} - -/* Swap low and high halfwords. */ -static void gen_swap_half(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i32(tcg_ctx, tmp, var, 16); - tcg_gen_shli_i32(tcg_ctx, var, var, 16); - tcg_gen_or_i32(tcg_ctx, var, var, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. - tmp = (t0 ^ t1) & 0x8000; - t0 &= ~0x8000; - t1 &= ~0x8000; - t0 = (t0 + t1) ^ tmp; - */ - -static void gen_add16(DisasContext *s, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x8000); - tcg_gen_andi_i32(tcg_ctx, t0, t0, ~0x8000); - tcg_gen_andi_i32(tcg_ctx, t1, t1, ~0x8000); - tcg_gen_add_i32(tcg_ctx, t0, t0, t1); - tcg_gen_xor_i32(tcg_ctx, t0, t0, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, t1); -} - -/* Set CF to the top bit of var. */ -static void gen_set_CF_bit31(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 31); -} - -/* Set N and Z flags from var. */ -static inline void gen_logic_CC(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, var); - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, var); -} - -/* T0 += T1 + CF. */ -static void gen_adc(DisasContext *s, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_add_i32(tcg_ctx, t0, t0, t1); - tcg_gen_add_i32(tcg_ctx, t0, t0, tcg_ctx->cpu_CF); -} - -/* dest = T0 + T1 + CF. */ -static void gen_add_carry(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_add_i32(tcg_ctx, dest, t0, t1); - tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); -} - -/* dest = T0 - T1 + CF - 1. */ -static void gen_sub_carry(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_sub_i32(tcg_ctx, dest, t0, t1); - tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); - tcg_gen_subi_i32(tcg_ctx, dest, dest, 1); -} - -/* dest = T0 + T1. Compute C, N, V and Z flags */ -static void gen_add_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, t1, tmp); - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); - tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); - tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); - tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); -} - -/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */ -static void gen_adc_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - if (TCG_TARGET_HAS_add2_i32) { - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, tcg_ctx->cpu_CF, tmp); - tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t1, tmp); - } else { - TCGv_i64 q0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 q1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_extu_i32_i64(tcg_ctx, q0, t0); - tcg_gen_extu_i32_i64(tcg_ctx, q1, t1); - tcg_gen_add_i64(tcg_ctx, q0, q0, q1); - tcg_gen_extu_i32_i64(tcg_ctx, q1, tcg_ctx->cpu_CF); - tcg_gen_add_i64(tcg_ctx, q0, q0, q1); - tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, q0); - tcg_temp_free_i64(tcg_ctx, q0); - tcg_temp_free_i64(tcg_ctx, q1); - } - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); - tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); - tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); - tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); -} - -/* dest = T0 - T1. Compute C, N, V and Z flags */ -static void gen_sub_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - tcg_gen_sub_i32(tcg_ctx, tcg_ctx->cpu_NF, t0, t1); - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); - tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_CF, t0, t1); - tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); - tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); -} - -/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */ -static void gen_sbc_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_not_i32(tcg_ctx, tmp, t1); - gen_adc_CC(s, dest, t0, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -#define GEN_SHIFT(name) \ -static void gen_##name(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv_i32 tmp1, tmp2, tmp3; \ - tmp1 = tcg_temp_new_i32(tcg_ctx); \ - tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); \ - tmp2 = tcg_const_i32(tcg_ctx, 0); \ - tmp3 = tcg_const_i32(tcg_ctx, 0x1f); \ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \ - tcg_temp_free_i32(tcg_ctx, tmp3); \ - tcg_gen_andi_i32(tcg_ctx, tmp1, tmp1, 0x1f); \ - tcg_gen_##name##_i32(tcg_ctx, dest, tmp2, tmp1); \ - tcg_temp_free_i32(tcg_ctx, tmp2); \ - tcg_temp_free_i32(tcg_ctx, tmp1); \ -} -GEN_SHIFT(shl) -GEN_SHIFT(shr) -#undef GEN_SHIFT - -static void gen_sar(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp1, tmp2; - tmp1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); - tmp2 = tcg_const_i32(tcg_ctx, 0x1f); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_gen_sar_i32(tcg_ctx, dest, t0, tmp1); - tcg_temp_free_i32(tcg_ctx, tmp1); -} - -static void tcg_gen_abs_i32(DisasContext *s, TCGv_i32 dest, TCGv_i32 src) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 c0 = tcg_const_i32(tcg_ctx, 0); - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_neg_i32(tcg_ctx, tmp, src); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GT, dest, src, c0, src, tmp); - tcg_temp_free_i32(tcg_ctx, c0); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -static void shifter_out_im(DisasContext *s, TCGv_i32 var, int shift) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (shift == 0) { - tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 1); - } else { - tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, shift); - if (shift != 31) { - tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, 1); - } - } -} - -/* Shift by immediate. Includes special handling for shift == 0. */ -static inline void gen_arm_shift_im(DisasContext *s, TCGv_i32 var, int shiftop, - int shift, int flags) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (shiftop) { - case 0: /* LSL */ - if (shift != 0) { - if (flags) - shifter_out_im(s, var, 32 - shift); - tcg_gen_shli_i32(tcg_ctx, var, var, shift); - } - break; - case 1: /* LSR */ - if (shift == 0) { - if (flags) { - tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 31); - } - tcg_gen_movi_i32(tcg_ctx, var, 0); - } else { - if (flags) - shifter_out_im(s, var, shift - 1); - tcg_gen_shri_i32(tcg_ctx, var, var, shift); - } - break; - case 2: /* ASR */ - if (shift == 0) - shift = 32; - if (flags) - shifter_out_im(s, var, shift - 1); - if (shift == 32) - shift = 31; - tcg_gen_sari_i32(tcg_ctx, var, var, shift); - break; - case 3: /* ROR/RRX */ - if (shift != 0) { - if (flags) - shifter_out_im(s, var, shift - 1); - tcg_gen_rotri_i32(tcg_ctx, var, var, shift); break; - } else { - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shli_i32(tcg_ctx, tmp, tcg_ctx->cpu_CF, 31); - if (flags) - shifter_out_im(s, var, 0); - tcg_gen_shri_i32(tcg_ctx, var, var, 1); - tcg_gen_or_i32(tcg_ctx, var, var, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } - } -} - -static inline void gen_arm_shift_reg(DisasContext *s, TCGv_i32 var, int shiftop, - TCGv_i32 shift, int flags) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (flags) { - switch (shiftop) { - case 0: gen_helper_shl_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; - case 1: gen_helper_shr_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; - case 2: gen_helper_sar_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; - case 3: gen_helper_ror_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; - } - } else { - switch (shiftop) { - case 0: - gen_shl(s, var, var, shift); - break; - case 1: - gen_shr(s, var, var, shift); - break; - case 2: - gen_sar(s, var, var, shift); - break; - case 3: tcg_gen_andi_i32(tcg_ctx, shift, shift, 0x1f); - tcg_gen_rotr_i32(tcg_ctx, var, var, shift); break; - } - } - tcg_temp_free_i32(tcg_ctx, shift); -} - -#define PAS_OP(pfx) \ - switch (op2) { \ - case 0: gen_pas_helper(glue(pfx,add16)); break; \ - case 1: gen_pas_helper(glue(pfx,addsubx)); break; \ - case 2: gen_pas_helper(glue(pfx,subaddx)); break; \ - case 3: gen_pas_helper(glue(pfx,sub16)); break; \ - case 4: gen_pas_helper(glue(pfx,add8)); break; \ - case 7: gen_pas_helper(glue(pfx,sub8)); break; \ - } -static void gen_arm_parallel_addsub(DisasContext *s, int op1, int op2, TCGv_i32 a, TCGv_i32 b) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_ptr tmp; - - switch (op1) { -#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b, tmp) - case 1: - tmp = tcg_temp_new_ptr(tcg_ctx); - tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(s) - tcg_temp_free_ptr(tcg_ctx, tmp); - break; - case 5: - tmp = tcg_temp_new_ptr(tcg_ctx); - tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(u) - tcg_temp_free_ptr(tcg_ctx, tmp); - break; -#undef gen_pas_helper -#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b) - case 2: - PAS_OP(q); - break; - case 3: - PAS_OP(sh); - break; - case 6: - PAS_OP(uq); - break; - case 7: - PAS_OP(uh); - break; -#undef gen_pas_helper - } -} -#undef PAS_OP - -/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */ -#define PAS_OP(pfx) \ - switch (op1) { \ - case 0: gen_pas_helper(glue(pfx,add8)); break; \ - case 1: gen_pas_helper(glue(pfx,add16)); break; \ - case 2: gen_pas_helper(glue(pfx,addsubx)); break; \ - case 4: gen_pas_helper(glue(pfx,sub8)); break; \ - case 5: gen_pas_helper(glue(pfx,sub16)); break; \ - case 6: gen_pas_helper(glue(pfx,subaddx)); break; \ - } -static void gen_thumb2_parallel_addsub(DisasContext *s, int op1, int op2, TCGv_i32 a, TCGv_i32 b) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_ptr tmp; - - switch (op2) { -#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b, tmp) - case 0: - tmp = tcg_temp_new_ptr(tcg_ctx); - tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(s) - tcg_temp_free_ptr(tcg_ctx, tmp); - break; - case 4: - tmp = tcg_temp_new_ptr(tcg_ctx); - tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); - PAS_OP(u) - tcg_temp_free_ptr(tcg_ctx, tmp); - break; -#undef gen_pas_helper -#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b) - case 1: - PAS_OP(q); - break; - case 2: - PAS_OP(sh); - break; - case 5: - PAS_OP(uq); - break; - case 6: - PAS_OP(uh); - break; -#undef gen_pas_helper - } -} -#undef PAS_OP - -/* - * generate a conditional branch based on ARM condition code cc. - * This is common between ARM and Aarch64 targets. - */ -void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, int label) -{ - TCGv_i32 tmp; - int inv; - - switch (cc) { - case 0: /* eq: Z */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, label); - break; - case 1: /* ne: !Z */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_ZF, 0, label); - break; - case 2: /* cs: C */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_CF, 0, label); - break; - case 3: /* cc: !C */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_CF, 0, label); - break; - case 4: /* mi: N */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_NF, 0, label); - break; - case 5: /* pl: !N */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_NF, 0, label); - break; - case 6: /* vs: V */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_VF, 0, label); - break; - case 7: /* vc: !V */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_VF, 0, label); - break; - case 8: /* hi: C && !Z */ - inv = gen_new_label(tcg_ctx); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_CF, 0, inv); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_ZF, 0, label); - gen_set_label(tcg_ctx, inv); - break; - case 9: /* ls: !C || Z */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_CF, 0, label); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, label); - break; - case 10: /* ge: N == V -> N ^ V == 0 */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tmp, 0, label); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 11: /* lt: N != V -> N ^ V != 0 */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tmp, 0, label); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 12: /* gt: !Z && N == V */ - inv = gen_new_label(tcg_ctx); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, inv); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tmp, 0, label); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_set_label(tcg_ctx, inv); - break; - case 13: /* le: Z || N != V */ - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, label); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tmp, 0, label); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - default: - /* fprintf(stderr, "Bad condition code 0x%x\n", cc); */ - tmp = tcg_const_i32(tcg_ctx, EXCP_EXCEPTION_EXIT); - gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } -} - -static const uint8_t table_logic_cc[16] = { - 1, /* and */ - 1, /* xor */ - 0, /* sub */ - 0, /* rsb */ - 0, /* add */ - 0, /* adc */ - 0, /* sbc */ - 0, /* rsc */ - 1, /* andl */ - 1, /* xorl */ - 0, /* cmp */ - 0, /* cmn */ - 1, /* orr */ - 1, /* mov */ - 1, /* bic */ - 1, /* mvn */ -}; - -/* Set PC and Thumb state from an immediate address. */ -static inline void gen_bx_im(DisasContext *s, uint32_t addr) -{ - TCGv_i32 tmp; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - s->is_jmp = DISAS_UPDATE; - if (s->thumb != (addr & 1)) { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, addr & 1); - tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, thumb)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], addr & ~1); -} - -/* Set PC and Thumb state from var. var is marked as dead. */ -static inline void gen_bx(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - s->is_jmp = DISAS_UPDATE; - tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[15], var, ~1); - tcg_gen_andi_i32(tcg_ctx, var, var, 1); - store_cpu_field(tcg_ctx, var, thumb); -} - -/* Variant of store_reg which uses branch&exchange logic when storing - to r15 in ARM architecture v7 and above. The source must be a temporary - and will be marked as dead. */ -static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var) -{ - if (reg == 15 && ENABLE_ARCH_7) { - gen_bx(s, var); - } else { - store_reg(s, reg, var); - } -} - -/* Variant of store_reg which uses branch&exchange logic when storing - * to r15 in ARM architecture v5T and above. This is used for storing - * the results of a LDR/LDM/POP into r15, and corresponds to the cases - * in the ARM ARM which use the LoadWritePC() pseudocode function. */ -static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) -{ - if (reg == 15 && ENABLE_ARCH_5) { - gen_bx(s, var); - } else { - store_reg(s, reg, var); - } -} - -/* Abstractions of "generate code to do a guest load/store for - * AArch32", where a vaddr is always 32 bits (and is zero - * extended if we're a 64 bit core) and data is also - * 32 bits unless specifically doing a 64 bit access. - * These functions work like tcg_gen_qemu_{ld,st}* except - * that the address argument is TCGv_i32 rather than TCGv. - */ -#if TARGET_LONG_BITS == 32 - -#define DO_GEN_LD(SUFF, OPC) \ -static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ -{ \ - tcg_gen_qemu_ld_i32(s->uc, val, addr, index, OPC); \ -} - -#define DO_GEN_ST(SUFF, OPC) \ -static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ -{ \ - tcg_gen_qemu_st_i32(s->uc, val, addr, index, OPC); \ -} - -static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) -{ - tcg_gen_qemu_ld_i64(s->uc, val, addr, index, MO_TEQ); -} - -static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) -{ - tcg_gen_qemu_st_i64(s->uc, val, addr, index, MO_TEQ); -} - -#else - -#define DO_GEN_LD(SUFF, OPC) \ -static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv addr64 = tcg_temp_new(tcg_ctx); \ - tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); \ - tcg_gen_qemu_ld_i32(s->uc, val, addr64, index, OPC); \ - tcg_temp_free(tcg_ctx, addr64); \ -} - -#define DO_GEN_ST(SUFF, OPC) \ -static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv addr64 = tcg_temp_new(tcg_ctx); \ - tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); \ - tcg_gen_qemu_st_i32(s->uc, val, addr64, index, OPC); \ - tcg_temp_free(tcg_ctx, addr64); \ -} - -static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv addr64 = tcg_temp_new(tcg_ctx); - tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); - tcg_gen_qemu_ld_i64(s->uc, val, addr64, index, MO_TEQ); - tcg_temp_free(tcg_ctx, addr64); -} - -static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv addr64 = tcg_temp_new(tcg_ctx); - tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); - tcg_gen_qemu_st_i64(s->uc, val, addr64, index, MO_TEQ); - tcg_temp_free(tcg_ctx, addr64); -} - -#endif - -DO_GEN_LD(8s, MO_SB) -DO_GEN_LD(8u, MO_UB) -DO_GEN_LD(16s, MO_TESW) -DO_GEN_LD(16u, MO_TEUW) -DO_GEN_LD(32u, MO_TEUL) -DO_GEN_ST(8, MO_UB) -DO_GEN_ST(16, MO_TEUW) -DO_GEN_ST(32, MO_TEUL) - -static inline void gen_set_pc_im(DisasContext *s, target_ulong val) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], val); -} - -static inline void gen_hvc(DisasContext *s, int imm16) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* The pre HVC helper handles cases when HVC gets trapped - * as an undefined insn by runtime configuration (ie before - * the insn really executes). - */ - gen_set_pc_im(s, s->pc - 4); - gen_helper_pre_hvc(tcg_ctx, tcg_ctx->cpu_env); - /* Otherwise we will treat this as a real exception which - * happens after execution of the insn. (The distinction matters - * for the PC value reported to the exception handler and also - * for single stepping.) - */ - s->svc_imm = imm16; - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_HVC; -} - -static inline void gen_smc(DisasContext *s) -{ - /* As with HVC, we may take an exception either before or after - * the insn executes. - */ - TCGv_i32 tmp; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - gen_set_pc_im(s, s->pc - 4); - tmp = tcg_const_i32(tcg_ctx, syn_aa32_smc()); - gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_SMC; -} - -static inline void -gen_set_condexec (DisasContext *s) -{ - if (s->condexec_mask) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, val); - store_cpu_field(tcg_ctx, tmp, condexec_bits); - } -} - -static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) -{ - gen_set_condexec(s); - gen_set_pc_im(s, s->pc - offset); - gen_exception_internal(s, excp); - s->is_jmp = DISAS_JUMP; -} - -static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn) -{ - gen_set_condexec(s); - gen_set_pc_im(s, s->pc - offset); - gen_exception(s, excp, syn); // qq - s->is_jmp = DISAS_JUMP; -} - -/* Force a TB lookup after an instruction that changes the CPU state. */ -static inline void gen_lookup_tb(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], s->pc & ~1); - s->is_jmp = DISAS_UPDATE; -} - -static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, - TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int val, rm, shift, shiftop; - TCGv_i32 offset; - - if (!(insn & (1 << 25))) { - /* immediate */ - val = insn & 0xfff; - if (!(insn & (1 << 23))) - val = -val; - if (val != 0) - tcg_gen_addi_i32(tcg_ctx, var, var, val); - } else { - /* shift/register */ - rm = (insn) & 0xf; - shift = (insn >> 7) & 0x1f; - shiftop = (insn >> 5) & 3; - offset = load_reg(s, rm); - gen_arm_shift_im(s, offset, shiftop, shift, 0); - if (!(insn & (1 << 23))) - tcg_gen_sub_i32(tcg_ctx, var, var, offset); - else - tcg_gen_add_i32(tcg_ctx, var, var, offset); - tcg_temp_free_i32(tcg_ctx, offset); - } -} - -static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, - int extra, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int val, rm; - TCGv_i32 offset; - - if (insn & (1 << 22)) { - /* immediate */ - val = (insn & 0xf) | ((insn >> 4) & 0xf0); - if (!(insn & (1 << 23))) - val = -val; - val += extra; - if (val != 0) - tcg_gen_addi_i32(tcg_ctx, var, var, val); - } else { - /* register */ - if (extra) - tcg_gen_addi_i32(tcg_ctx, var, var, extra); - rm = (insn) & 0xf; - offset = load_reg(s, rm); - if (!(insn & (1 << 23))) - tcg_gen_sub_i32(tcg_ctx, var, var, offset); - else - tcg_gen_add_i32(tcg_ctx, var, var, offset); - tcg_temp_free_i32(tcg_ctx, offset); - } -} - -static TCGv_ptr get_fpstatus_ptr(DisasContext *s, int neon) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); - int offset; - if (neon) { - offset = offsetof(CPUARMState, vfp.standard_fp_status); - } else { - offset = offsetof(CPUARMState, vfp.fp_status); - } - tcg_gen_addi_ptr(tcg_ctx, statusptr, tcg_ctx->cpu_env, offset); - return statusptr; -} - -#define VFP_OP2(name) \ -static inline void gen_vfp_##name(DisasContext *s, int dp) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); \ - if (dp) { \ - gen_helper_vfp_##name##d(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, fpst); \ - } else { \ - gen_helper_vfp_##name##s(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, fpst); \ - } \ - tcg_temp_free_ptr(tcg_ctx, fpst); \ -} - -VFP_OP2(add) -VFP_OP2(sub) -VFP_OP2(mul) -VFP_OP2(div) - -#undef VFP_OP2 - -static inline void gen_vfp_F1_mul(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* Like gen_vfp_mul() but put result in F1 */ - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); - if (dp) { - gen_helper_vfp_muld(tcg_ctx, tcg_ctx->cpu_F1d, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, fpst); - } else { - gen_helper_vfp_muls(tcg_ctx, tcg_ctx->cpu_F1s, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, fpst); - } - tcg_temp_free_ptr(tcg_ctx, fpst); -} - -static inline void gen_vfp_F1_neg(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* Like gen_vfp_neg() but put result in F1 */ - if (dp) { - gen_helper_vfp_negd(tcg_ctx, tcg_ctx->cpu_F1d, tcg_ctx->cpu_F0d); - } else { - gen_helper_vfp_negs(tcg_ctx, tcg_ctx->cpu_F1s, tcg_ctx->cpu_F0s); - } -} - -static inline void gen_vfp_abs(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - gen_helper_vfp_absd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d); - else - gen_helper_vfp_abss(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s); -} - -static inline void gen_vfp_neg(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - gen_helper_vfp_negd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d); - else - gen_helper_vfp_negs(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s); -} - -static inline void gen_vfp_sqrt(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - gen_helper_vfp_sqrtd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env); - else - gen_helper_vfp_sqrts(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); -} - -static inline void gen_vfp_cmp(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - gen_helper_vfp_cmpd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, tcg_ctx->cpu_env); - else - gen_helper_vfp_cmps(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, tcg_ctx->cpu_env); -} - -static inline void gen_vfp_cmpe(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - gen_helper_vfp_cmped(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, tcg_ctx->cpu_env); - else - gen_helper_vfp_cmpes(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, tcg_ctx->cpu_env); -} - -static inline void gen_vfp_F1_ld0(DisasContext *s, int dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_F1d, 0); - else - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_F1s, 0); -} - -#define VFP_GEN_ITOF(name) \ -static inline void gen_vfp_##name(DisasContext *s, int dp, int neon) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv_ptr statusptr = get_fpstatus_ptr(s, neon); \ - if (dp) { \ - gen_helper_vfp_##name##d(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0s, statusptr); \ - } else { \ - gen_helper_vfp_##name##s(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, statusptr); \ - } \ - tcg_temp_free_ptr(tcg_ctx, statusptr); \ -} - -VFP_GEN_ITOF(uito) -VFP_GEN_ITOF(sito) -#undef VFP_GEN_ITOF - -#define VFP_GEN_FTOI(name) \ -static inline void gen_vfp_##name(DisasContext *s, int dp, int neon) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv_ptr statusptr = get_fpstatus_ptr(s, neon); \ - if (dp) { \ - gen_helper_vfp_##name##d(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0d, statusptr); \ - } else { \ - gen_helper_vfp_##name##s(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, statusptr); \ - } \ - tcg_temp_free_ptr(tcg_ctx, statusptr); \ -} - -VFP_GEN_FTOI(toui) -VFP_GEN_FTOI(touiz) -VFP_GEN_FTOI(tosi) -VFP_GEN_FTOI(tosiz) -#undef VFP_GEN_FTOI - -#define VFP_GEN_FIX(name, round) \ -static inline void gen_vfp_##name(DisasContext *s, int dp, int shift, int neon) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv_i32 tmp_shift = tcg_const_i32(tcg_ctx, shift); \ - TCGv_ptr statusptr = get_fpstatus_ptr(s, neon); \ - if (dp) { \ - gen_helper_vfp_##name##d##round(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, tmp_shift, \ - statusptr); \ - } else { \ - gen_helper_vfp_##name##s##round(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, tmp_shift, \ - statusptr); \ - } \ - tcg_temp_free_i32(tcg_ctx, tmp_shift); \ - tcg_temp_free_ptr(tcg_ctx, statusptr); \ -} -VFP_GEN_FIX(tosh, _round_to_zero) -VFP_GEN_FIX(tosl, _round_to_zero) -VFP_GEN_FIX(touh, _round_to_zero) -VFP_GEN_FIX(toul, _round_to_zero) -VFP_GEN_FIX(shto, ) -VFP_GEN_FIX(slto, ) -VFP_GEN_FIX(uhto, ) -VFP_GEN_FIX(ulto, ) -#undef VFP_GEN_FIX - -static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) { - gen_aa32_ld64(s, tcg_ctx->cpu_F0d, addr, get_mem_index(s)); - } else { - gen_aa32_ld32u(s, tcg_ctx->cpu_F0s, addr, get_mem_index(s)); - } -} - -static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) { - gen_aa32_st64(s, tcg_ctx->cpu_F0d, addr, get_mem_index(s)); - } else { - gen_aa32_st32(s, tcg_ctx->cpu_F0s, addr, get_mem_index(s)); - } -} - -static inline long -vfp_reg_offset (int dp, int reg) -{ - if (dp) - return offsetof(CPUARMState, vfp.regs[reg]); - else if (reg & 1) { - return offsetof(CPUARMState, vfp.regs[reg >> 1]) - + offsetof(CPU_DoubleU, l.upper); - } else { - return offsetof(CPUARMState, vfp.regs[reg >> 1]) - + offsetof(CPU_DoubleU, l.lower); - } -} - -/* Return the offset of a 32-bit piece of a NEON register. - zero is the least significant end of the register. */ -static inline long -neon_reg_offset (int reg, int n) -{ - int sreg; - sreg = reg * 2 + n; - return vfp_reg_offset(0, sreg); -} - -static TCGv_i32 neon_load_reg(TCGContext *tcg_ctx, int reg, int pass) -{ - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); - return tmp; -} - -static void neon_store_reg(TCGContext *tcg_ctx, int reg, int pass, TCGv_i32 var) -{ - tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); - tcg_temp_free_i32(tcg_ctx, var); -} - -static inline void neon_load_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) -{ - tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); -} - -static inline void neon_store_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) -{ - tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); -} - -#define tcg_gen_ld_f32 tcg_gen_ld_i32 -#define tcg_gen_ld_f64 tcg_gen_ld_i64 -#define tcg_gen_st_f32 tcg_gen_st_i32 -#define tcg_gen_st_f64 tcg_gen_st_i64 - -static inline void gen_mov_F0_vreg(DisasContext *s, int dp, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - tcg_gen_ld_f64(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); - else - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); -} - -static inline void gen_mov_F1_vreg(DisasContext *s, int dp, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - tcg_gen_ld_f64(tcg_ctx, tcg_ctx->cpu_F1d, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); - else - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F1s, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); -} - -static inline void gen_mov_vreg_F0(DisasContext *s, int dp, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (dp) - tcg_gen_st_f64(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); - else - tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); -} - -#define ARM_CP_RW_BIT (1 << 20) - -static inline void iwmmxt_load_reg(DisasContext *s, TCGv_i64 var, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); -} - -static inline void iwmmxt_store_reg(DisasContext *s, TCGv_i64 var, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); -} - -static inline TCGv_i32 iwmmxt_load_creg(DisasContext *s, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 var = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); - return var; -} - -static inline void iwmmxt_store_creg(DisasContext *s, int reg, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); - tcg_temp_free_i32(tcg_ctx, var); -} - -static inline void gen_op_iwmmxt_movq_wRn_M0(DisasContext *s, int rn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - iwmmxt_store_reg(s, tcg_ctx->cpu_M0, rn); -} - -static inline void gen_op_iwmmxt_movq_M0_wRn(DisasContext *s, int rn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - iwmmxt_load_reg(s, tcg_ctx->cpu_M0, rn); -} - -static inline void gen_op_iwmmxt_orq_M0_wRn(DisasContext *s, int rn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); - tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); -} - -static inline void gen_op_iwmmxt_andq_M0_wRn(DisasContext *s, int rn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); - tcg_gen_and_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); -} - -static inline void gen_op_iwmmxt_xorq_M0_wRn(DisasContext *s, int rn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); - tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); -} - -#define IWMMXT_OP(name) \ -static inline void gen_op_iwmmxt_##name##_M0_wRn(DisasContext *s, int rn) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); \ - gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ -} - -#define IWMMXT_OP_ENV(name) \ -static inline void gen_op_iwmmxt_##name##_M0_wRn(DisasContext *s, int rn) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); \ - gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ -} - -#define IWMMXT_OP_ENV_SIZE(name) \ -IWMMXT_OP_ENV(name##b) \ -IWMMXT_OP_ENV(name##w) \ -IWMMXT_OP_ENV(name##l) - -#define IWMMXT_OP_ENV1(name) \ -static inline void gen_op_iwmmxt_##name##_M0(DisasContext *s) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0); \ -} - -IWMMXT_OP(maddsq) -IWMMXT_OP(madduq) -IWMMXT_OP(sadb) -IWMMXT_OP(sadw) -IWMMXT_OP(mulslw) -IWMMXT_OP(mulshw) -IWMMXT_OP(mululw) -IWMMXT_OP(muluhw) -IWMMXT_OP(macsw) -IWMMXT_OP(macuw) - -IWMMXT_OP_ENV_SIZE(unpackl) -IWMMXT_OP_ENV_SIZE(unpackh) - -IWMMXT_OP_ENV1(unpacklub) -IWMMXT_OP_ENV1(unpackluw) -IWMMXT_OP_ENV1(unpacklul) -IWMMXT_OP_ENV1(unpackhub) -IWMMXT_OP_ENV1(unpackhuw) -IWMMXT_OP_ENV1(unpackhul) -IWMMXT_OP_ENV1(unpacklsb) -IWMMXT_OP_ENV1(unpacklsw) -IWMMXT_OP_ENV1(unpacklsl) -IWMMXT_OP_ENV1(unpackhsb) -IWMMXT_OP_ENV1(unpackhsw) -IWMMXT_OP_ENV1(unpackhsl) - -IWMMXT_OP_ENV_SIZE(cmpeq) -IWMMXT_OP_ENV_SIZE(cmpgtu) -IWMMXT_OP_ENV_SIZE(cmpgts) - -IWMMXT_OP_ENV_SIZE(mins) -IWMMXT_OP_ENV_SIZE(minu) -IWMMXT_OP_ENV_SIZE(maxs) -IWMMXT_OP_ENV_SIZE(maxu) - -IWMMXT_OP_ENV_SIZE(subn) -IWMMXT_OP_ENV_SIZE(addn) -IWMMXT_OP_ENV_SIZE(subu) -IWMMXT_OP_ENV_SIZE(addu) -IWMMXT_OP_ENV_SIZE(subs) -IWMMXT_OP_ENV_SIZE(adds) - -IWMMXT_OP_ENV(avgb0) -IWMMXT_OP_ENV(avgb1) -IWMMXT_OP_ENV(avgw0) -IWMMXT_OP_ENV(avgw1) - -IWMMXT_OP_ENV(packuw) -IWMMXT_OP_ENV(packul) -IWMMXT_OP_ENV(packuq) -IWMMXT_OP_ENV(packsw) -IWMMXT_OP_ENV(packsl) -IWMMXT_OP_ENV(packsq) - -static void gen_op_iwmmxt_set_mup(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - tmp = load_cpu_field(s->uc, iwmmxt.cregs[ARM_IWMMXT_wCon]); - tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 2); - store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); -} - -static void gen_op_iwmmxt_set_cup(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - tmp = load_cpu_field(s->uc, iwmmxt.cregs[ARM_IWMMXT_wCon]); - tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 1); - store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); -} - -static void gen_op_iwmmxt_setpsr_nz(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - gen_helper_iwmmxt_setpsr_nz(tcg_ctx, tmp, tcg_ctx->cpu_M0); - store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]); -} - -static inline void gen_op_iwmmxt_addl_M0_wRn(DisasContext *s, int rn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); - tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1); - tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); -} - -static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, - TCGv_i32 dest) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int rd; - uint32_t offset; - TCGv_i32 tmp; - - rd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - - offset = (insn & 0xff) << ((insn >> 7) & 2); - if (insn & (1 << 24)) { - /* Pre indexed */ - if (insn & (1 << 23)) - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); - else - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0-offset); - tcg_gen_mov_i32(tcg_ctx, dest, tmp); - if (insn & (1 << 21)) - store_reg(s, rd, tmp); - else - tcg_temp_free_i32(tcg_ctx, tmp); - } else if (insn & (1 << 21)) { - /* Post indexed */ - tcg_gen_mov_i32(tcg_ctx, dest, tmp); - if (insn & (1 << 23)) - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); - else - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0-offset); - store_reg(s, rd, tmp); - } else if (!(insn & (1 << 23))) - return 1; - return 0; -} - -static inline int gen_iwmmxt_shift(DisasContext *s, uint32_t insn, uint32_t mask, TCGv_i32 dest) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int rd = (insn >> 0) & 0xf; - TCGv_i32 tmp; - - if (insn & (1 << 8)) { - if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) { - return 1; - } else { - tmp = iwmmxt_load_creg(s, rd); - } - } else { - tmp = tcg_temp_new_i32(tcg_ctx); - iwmmxt_load_reg(s, tcg_ctx->cpu_V0, rd); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); - } - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, mask); - tcg_gen_mov_i32(tcg_ctx, dest, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - return 0; -} - -/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred - (ie. an undefined instruction). */ -static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int rd, wrd; - int rdhi, rdlo, rd0, rd1, i; - TCGv_i32 addr; - TCGv_i32 tmp, tmp2, tmp3; - - if ((insn & 0x0e000e00) == 0x0c000000) { - if ((insn & 0x0fe00ff0) == 0x0c400000) { - wrd = insn & 0xf; - rdlo = (insn >> 12) & 0xf; - rdhi = (insn >> 16) & 0xf; - if (insn & ARM_CP_RW_BIT) { /* TMRRC */ - iwmmxt_load_reg(s, tcg_ctx->cpu_V0, wrd); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); - } else { /* TMCRR */ - tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); - iwmmxt_store_reg(s, tcg_ctx->cpu_V0, wrd); - gen_op_iwmmxt_set_mup(s); - } - return 0; - } - - wrd = (insn >> 12) & 0xf; - addr = tcg_temp_new_i32(tcg_ctx); - if (gen_iwmmxt_address(s, insn, addr)) { - tcg_temp_free_i32(tcg_ctx, addr); - return 1; - } - if (insn & ARM_CP_RW_BIT) { - if ((insn >> 28) == 0xf) { /* WLDRW wCx */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - iwmmxt_store_creg(s, wrd, tmp); - } else { - i = 1; - if (insn & (1 << 8)) { - if (insn & (1 << 22)) { /* WLDRD */ - gen_aa32_ld64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); - i = 0; - } else { /* WLDRW wRd */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - } - } else { - tmp = tcg_temp_new_i32(tcg_ctx); - if (insn & (1 << 22)) { /* WLDRH */ - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - } else { /* WLDRB */ - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - } - } - if (i) { - tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_M0, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - } - } else { - if ((insn >> 28) == 0xf) { /* WSTRW wCx */ - tmp = iwmmxt_load_creg(s, wrd); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - } else { - gen_op_iwmmxt_movq_M0_wRn(s, wrd); - tmp = tcg_temp_new_i32(tcg_ctx); - if (insn & (1 << 8)) { - if (insn & (1 << 22)) { /* WSTRD */ - gen_aa32_st64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); - } else { /* WSTRW wRd */ - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - } - } else { - if (insn & (1 << 22)) { /* WSTRH */ - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - } else { /* WSTRB */ - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - } - } - } - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_temp_free_i32(tcg_ctx, addr); - return 0; - } - - if ((insn & 0x0f000000) != 0x0e000000) - return 1; - - switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { - case 0x000: /* WOR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - gen_op_iwmmxt_orq_M0_wRn(s, rd1); - gen_op_iwmmxt_setpsr_nz(s); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x011: /* TMCR */ - if (insn & 0xf) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - switch (wrd) { - case ARM_IWMMXT_wCID: - case ARM_IWMMXT_wCASF: - break; - case ARM_IWMMXT_wCon: - gen_op_iwmmxt_set_cup(s); - /* Fall through. */ - case ARM_IWMMXT_wCSSF: - tmp = iwmmxt_load_creg(s, wrd); - tmp2 = load_reg(s, rd); - tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - iwmmxt_store_creg(s, wrd, tmp); - break; - case ARM_IWMMXT_wCGR0: - case ARM_IWMMXT_wCGR1: - case ARM_IWMMXT_wCGR2: - case ARM_IWMMXT_wCGR3: - gen_op_iwmmxt_set_cup(s); - tmp = load_reg(s, rd); - iwmmxt_store_creg(s, wrd, tmp); - break; - default: - return 1; - } - break; - case 0x100: /* WXOR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - gen_op_iwmmxt_xorq_M0_wRn(s, rd1); - gen_op_iwmmxt_setpsr_nz(s); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x111: /* TMRC */ - if (insn & 0xf) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = iwmmxt_load_creg(s, wrd); - store_reg(s, rd, tmp); - break; - case 0x300: /* WANDN */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tcg_gen_neg_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); - gen_op_iwmmxt_andq_M0_wRn(s, rd1); - gen_op_iwmmxt_setpsr_nz(s); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x200: /* WAND */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - gen_op_iwmmxt_andq_M0_wRn(s, rd1); - gen_op_iwmmxt_setpsr_nz(s); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x810: case 0xa10: /* WMADD */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - if (insn & (1 << 21)) - gen_op_iwmmxt_maddsq_M0_wRn(s, rd1); - else - gen_op_iwmmxt_madduq_M0_wRn(s, rd1); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_unpacklb_M0_wRn(s, rd1); - break; - case 1: - gen_op_iwmmxt_unpacklw_M0_wRn(s, rd1); - break; - case 2: - gen_op_iwmmxt_unpackll_M0_wRn(s, rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_unpackhb_M0_wRn(s, rd1); - break; - case 1: - gen_op_iwmmxt_unpackhw_M0_wRn(s, rd1); - break; - case 2: - gen_op_iwmmxt_unpackhl_M0_wRn(s, rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - if (insn & (1 << 22)) - gen_op_iwmmxt_sadw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_sadb_M0_wRn(s, rd1); - if (!(insn & (1 << 20))) - gen_op_iwmmxt_addl_M0_wRn(s, wrd); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - if (insn & (1 << 21)) { - if (insn & (1 << 20)) - gen_op_iwmmxt_mulshw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_mulslw_M0_wRn(s, rd1); - } else { - if (insn & (1 << 20)) - gen_op_iwmmxt_muluhw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_mululw_M0_wRn(s, rd1); - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - if (insn & (1 << 21)) - gen_op_iwmmxt_macsw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_macuw_M0_wRn(s, rd1); - if (!(insn & (1 << 20))) { - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, wrd); - tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_cmpeqb_M0_wRn(s, rd1); - break; - case 1: - gen_op_iwmmxt_cmpeqw_M0_wRn(s, rd1); - break; - case 2: - gen_op_iwmmxt_cmpeql_M0_wRn(s, rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - if (insn & (1 << 22)) { - if (insn & (1 << 20)) - gen_op_iwmmxt_avgw1_M0_wRn(s, rd1); - else - gen_op_iwmmxt_avgw0_M0_wRn(s, rd1); - } else { - if (insn & (1 << 20)) - gen_op_iwmmxt_avgb1_M0_wRn(s, rd1); - else - gen_op_iwmmxt_avgb0_M0_wRn(s, rd1); - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 7); - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rd1); - gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ - if (((insn >> 6) & 3) == 3) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - gen_op_iwmmxt_movq_M0_wRn(s, wrd); - switch ((insn >> 6) & 3) { - case 0: - tmp2 = tcg_const_i32(tcg_ctx, 0xff); - tmp3 = tcg_const_i32(tcg_ctx, (insn & 7) << 3); - break; - case 1: - tmp2 = tcg_const_i32(tcg_ctx, 0xffff); - tmp3 = tcg_const_i32(tcg_ctx, (insn & 3) << 4); - break; - case 2: - tmp2 = tcg_const_i32(tcg_ctx, 0xffffffff); - tmp3 = tcg_const_i32(tcg_ctx, (insn & 1) << 5); - break; - default: - TCGV_UNUSED_I32(tmp2); - TCGV_UNUSED_I32(tmp3); - } - gen_helper_iwmmxt_insr(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - if (rd == 15 || ((insn >> 22) & 3) == 3) - return 1; - gen_op_iwmmxt_movq_M0_wRn(s, wrd); - tmp = tcg_temp_new_i32(tcg_ctx); - switch ((insn >> 22) & 3) { - case 0: - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 7) << 3); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); - if (insn & 8) { - tcg_gen_ext8s_i32(tcg_ctx, tmp, tmp); - } else { - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xff); - } - break; - case 1: - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 3) << 4); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); - if (insn & 8) { - tcg_gen_ext16s_i32(tcg_ctx, tmp, tmp); - } else { - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff); - } - break; - case 2: - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 1) << 5); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); - break; - } - store_reg(s, rd, tmp); - break; - case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ - if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCASF); - switch ((insn >> 22) & 3) { - case 0: - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 7) << 2) + 0); - break; - case 1: - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 3) << 3) + 4); - break; - case 2: - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 1) << 4) + 12); - break; - } - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 28); - gen_set_nzcv(s, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ - if (((insn >> 6) & 3) == 3) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - switch ((insn >> 6) & 3) { - case 0: - gen_helper_iwmmxt_bcstb(tcg_ctx, tcg_ctx->cpu_M0, tmp); - break; - case 1: - gen_helper_iwmmxt_bcstw(tcg_ctx, tcg_ctx->cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_bcstl(tcg_ctx, tcg_ctx->cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ - if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCASF); - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); - switch ((insn >> 22) & 3) { - case 0: - for (i = 0; i < 7; i ++) { - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - } - break; - case 1: - for (i = 0; i < 3; i ++) { - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - } - break; - case 2: - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - break; - } - gen_set_nzcv(s, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_helper_iwmmxt_addcb(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); - break; - case 1: - gen_helper_iwmmxt_addcw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); - break; - case 2: - gen_helper_iwmmxt_addcl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ - if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCASF); - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); - switch ((insn >> 22) & 3) { - case 0: - for (i = 0; i < 7; i ++) { - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - } - break; - case 1: - for (i = 0; i < 3; i ++) { - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - } - break; - case 2: - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - break; - } - gen_set_nzcv(s, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ - rd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) - return 1; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = tcg_temp_new_i32(tcg_ctx); - switch ((insn >> 22) & 3) { - case 0: - gen_helper_iwmmxt_msbb(tcg_ctx, tmp, tcg_ctx->cpu_M0); - break; - case 1: - gen_helper_iwmmxt_msbw(tcg_ctx, tmp, tcg_ctx->cpu_M0); - break; - case 2: - gen_helper_iwmmxt_msbl(tcg_ctx, tmp, tcg_ctx->cpu_M0); - break; - } - store_reg(s, rd, tmp); - break; - case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ - case 0x906: case 0xb06: case 0xd06: case 0xf06: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsb_M0_wRn(s, rd1); - else - gen_op_iwmmxt_cmpgtub_M0_wRn(s, rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_cmpgtuw_M0_wRn(s, rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsl_M0_wRn(s, rd1); - else - gen_op_iwmmxt_cmpgtul_M0_wRn(s, rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ - case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsb_M0(s); - else - gen_op_iwmmxt_unpacklub_M0(s); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsw_M0(s); - else - gen_op_iwmmxt_unpackluw_M0(s); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsl_M0(s); - else - gen_op_iwmmxt_unpacklul_M0(s); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ - case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsb_M0(s); - else - gen_op_iwmmxt_unpackhub_M0(s); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsw_M0(s); - else - gen_op_iwmmxt_unpackhuw_M0(s); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsl_M0(s); - else - gen_op_iwmmxt_unpackhul_M0(s); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ - case 0x214: case 0x614: case 0xa14: case 0xe14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = tcg_temp_new_i32(tcg_ctx); - if (gen_iwmmxt_shift(s, insn, 0xff, tmp)) { - tcg_temp_free_i32(tcg_ctx, tmp); - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_srlw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_srll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_srlq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ - case 0x014: case 0x414: case 0x814: case 0xc14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = tcg_temp_new_i32(tcg_ctx); - if (gen_iwmmxt_shift(s, insn, 0xff, tmp)) { - tcg_temp_free_i32(tcg_ctx, tmp); - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_sraw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_sral(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_sraq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ - case 0x114: case 0x514: case 0x914: case 0xd14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = tcg_temp_new_i32(tcg_ctx); - if (gen_iwmmxt_shift(s, insn, 0xff, tmp)) { - tcg_temp_free_i32(tcg_ctx, tmp); - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_sllw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_slll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_sllq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ - case 0x314: case 0x714: case 0xb14: case 0xf14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = tcg_temp_new_i32(tcg_ctx); - switch ((insn >> 22) & 3) { - case 1: - if (gen_iwmmxt_shift(s, insn, 0xf, tmp)) { - tcg_temp_free_i32(tcg_ctx, tmp); - return 1; - } - gen_helper_iwmmxt_rorw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 2: - if (gen_iwmmxt_shift(s, insn, 0x1f, tmp)) { - tcg_temp_free_i32(tcg_ctx, tmp); - return 1; - } - gen_helper_iwmmxt_rorl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - case 3: - if (gen_iwmmxt_shift(s, insn, 0x3f, tmp)) { - tcg_temp_free_i32(tcg_ctx, tmp); - return 1; - } - gen_helper_iwmmxt_rorq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ - case 0x916: case 0xb16: case 0xd16: case 0xf16: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsb_M0_wRn(s, rd1); - else - gen_op_iwmmxt_minub_M0_wRn(s, rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_minuw_M0_wRn(s, rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsl_M0_wRn(s, rd1); - else - gen_op_iwmmxt_minul_M0_wRn(s, rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ - case 0x816: case 0xa16: case 0xc16: case 0xe16: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsb_M0_wRn(s, rd1); - else - gen_op_iwmmxt_maxub_M0_wRn(s, rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_maxuw_M0_wRn(s, rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsl_M0_wRn(s, rd1); - else - gen_op_iwmmxt_maxul_M0_wRn(s, rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ - case 0x402: case 0x502: case 0x602: case 0x702: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = tcg_const_i32(tcg_ctx, (insn >> 20) & 3); - iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rd1); - gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ - case 0x41a: case 0x51a: case 0x61a: case 0x71a: - case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: - case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 20) & 0xf) { - case 0x0: - gen_op_iwmmxt_subnb_M0_wRn(s, rd1); - break; - case 0x1: - gen_op_iwmmxt_subub_M0_wRn(s, rd1); - break; - case 0x3: - gen_op_iwmmxt_subsb_M0_wRn(s, rd1); - break; - case 0x4: - gen_op_iwmmxt_subnw_M0_wRn(s, rd1); - break; - case 0x5: - gen_op_iwmmxt_subuw_M0_wRn(s, rd1); - break; - case 0x7: - gen_op_iwmmxt_subsw_M0_wRn(s, rd1); - break; - case 0x8: - gen_op_iwmmxt_subnl_M0_wRn(s, rd1); - break; - case 0x9: - gen_op_iwmmxt_subul_M0_wRn(s, rd1); - break; - case 0xb: - gen_op_iwmmxt_subsl_M0_wRn(s, rd1); - break; - default: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ - case 0x41e: case 0x51e: case 0x61e: case 0x71e: - case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: - case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - tmp = tcg_const_i32(tcg_ctx, ((insn >> 16) & 0xf0) | (insn & 0x0f)); - gen_helper_iwmmxt_shufh(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ - case 0x418: case 0x518: case 0x618: case 0x718: - case 0x818: case 0x918: case 0xa18: case 0xb18: - case 0xc18: case 0xd18: case 0xe18: case 0xf18: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 20) & 0xf) { - case 0x0: - gen_op_iwmmxt_addnb_M0_wRn(s, rd1); - break; - case 0x1: - gen_op_iwmmxt_addub_M0_wRn(s, rd1); - break; - case 0x3: - gen_op_iwmmxt_addsb_M0_wRn(s, rd1); - break; - case 0x4: - gen_op_iwmmxt_addnw_M0_wRn(s, rd1); - break; - case 0x5: - gen_op_iwmmxt_adduw_M0_wRn(s, rd1); - break; - case 0x7: - gen_op_iwmmxt_addsw_M0_wRn(s, rd1); - break; - case 0x8: - gen_op_iwmmxt_addnl_M0_wRn(s, rd1); - break; - case 0x9: - gen_op_iwmmxt_addul_M0_wRn(s, rd1); - break; - case 0xb: - gen_op_iwmmxt_addsl_M0_wRn(s, rd1); - break; - default: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ - case 0x408: case 0x508: case 0x608: case 0x708: - case 0x808: case 0x908: case 0xa08: case 0xb08: - case 0xc08: case 0xd08: case 0xe08: case 0xf08: - if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(s, rd0); - switch ((insn >> 22) & 3) { - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsw_M0_wRn(s, rd1); - else - gen_op_iwmmxt_packuw_M0_wRn(s, rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsl_M0_wRn(s, rd1); - else - gen_op_iwmmxt_packul_M0_wRn(s, rd1); - break; - case 3: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsq_M0_wRn(s, rd1); - else - gen_op_iwmmxt_packuq_M0_wRn(s, rd1); - break; - } - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - gen_op_iwmmxt_set_cup(s); - break; - case 0x201: case 0x203: case 0x205: case 0x207: - case 0x209: case 0x20b: case 0x20d: case 0x20f: - case 0x211: case 0x213: case 0x215: case 0x217: - case 0x219: case 0x21b: case 0x21d: case 0x21f: - wrd = (insn >> 5) & 0xf; - rd0 = (insn >> 12) & 0xf; - rd1 = (insn >> 0) & 0xf; - if (rd0 == 0xf || rd1 == 0xf) - return 1; - gen_op_iwmmxt_movq_M0_wRn(s, wrd); - tmp = load_reg(s, rd0); - tmp2 = load_reg(s, rd1); - switch ((insn >> 16) & 0xf) { - case 0x0: /* TMIA */ - gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); - break; - case 0x8: /* TMIAPH */ - gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); - break; - case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ - if (insn & (1 << 16)) - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); - if (insn & (1 << 17)) - tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); - gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); - break; - default: - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - return 1; - } - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_op_iwmmxt_movq_wRn_M0(s, wrd); - gen_op_iwmmxt_set_mup(s); - break; - default: - return 1; - } - - return 0; -} - -/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred - (ie. an undefined instruction). */ -static int disas_dsp_insn(DisasContext *s, uint32_t insn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int acc, rd0, rd1, rdhi, rdlo; - TCGv_i32 tmp, tmp2; - - if ((insn & 0x0ff00f10) == 0x0e200010) { - /* Multiply with Internal Accumulate Format */ - rd0 = (insn >> 12) & 0xf; - rd1 = insn & 0xf; - acc = (insn >> 5) & 7; - - if (acc != 0) - return 1; - - tmp = load_reg(s, rd0); - tmp2 = load_reg(s, rd1); - switch ((insn >> 16) & 0xf) { - case 0x0: /* MIA */ - gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); - break; - case 0x8: /* MIAPH */ - gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); - break; - case 0xc: /* MIABB */ - case 0xd: /* MIABT */ - case 0xe: /* MIATB */ - case 0xf: /* MIATT */ - if (insn & (1 << 16)) - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); - if (insn & (1 << 17)) - tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); - gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); - break; - default: - return 1; - } - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - - gen_op_iwmmxt_movq_wRn_M0(s, acc); - return 0; - } - - if ((insn & 0x0fe00ff8) == 0x0c400000) { - /* Internal Accumulator Access Format */ - rdhi = (insn >> 16) & 0xf; - rdlo = (insn >> 12) & 0xf; - acc = insn & 7; - - if (acc != 0) - return 1; - - if (insn & ARM_CP_RW_BIT) { /* MRA */ - iwmmxt_load_reg(s, tcg_ctx->cpu_V0, acc); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); - tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_R[rdhi], (1 << (40 - 32)) - 1); - } else { /* MAR */ - tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); - iwmmxt_store_reg(s, tcg_ctx->cpu_V0, acc); - } - return 0; - } - - return 1; -} - -// this causes "warning C4293: shift count negative or too big, undefined behavior" -// on msvc, so is replaced with separate versions for the shift to perform. -//#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n)) -#if 0 -#define VFP_SREG(insn, bigbit, smallbit) \ - ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) -#endif - -#define VFP_REG_SHR_NEG(insn, n) ((insn) << -(n)) -#define VFP_SREG_NEG(insn, bigbit, smallbit) \ - ((VFP_REG_SHR_NEG(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) - -#define VFP_REG_SHR_POS(x, n) ((insn) >> (n)) -#define VFP_SREG_POS(insn, bigbit, smallbit) \ - ((VFP_REG_SHR_POS(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) - -#define VFP_DREG(reg, insn, bigbit, smallbit) do { \ - if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \ - reg = (((insn) >> (bigbit)) & 0x0f) \ - | (((insn) >> ((smallbit) - 4)) & 0x10); \ - } else { \ - if (insn & (1 << (smallbit))) \ - return 1; \ - reg = ((insn) >> (bigbit)) & 0x0f; \ - }} while (0) - -#define VFP_SREG_D(insn) VFP_SREG_POS(insn, 12, 22) -#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) -#define VFP_SREG_N(insn) VFP_SREG_POS(insn, 16, 7) -#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) -#define VFP_SREG_M(insn) VFP_SREG_NEG(insn, 0, 5) -#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) - -/* Move between integer and VFP cores. */ -static TCGv_i32 gen_vfp_mrs(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, tmp, tcg_ctx->cpu_F0s); - return tmp; -} - -static void gen_vfp_msr(DisasContext *s, TCGv_i32 tmp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_F0s, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -static void gen_neon_dup_u8(DisasContext *s, TCGv_i32 var, int shift) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - if (shift) - tcg_gen_shri_i32(tcg_ctx, var, var, shift); - tcg_gen_ext8u_i32(tcg_ctx, var, var); - tcg_gen_shli_i32(tcg_ctx, tmp, var, 8); - tcg_gen_or_i32(tcg_ctx, var, var, tmp); - tcg_gen_shli_i32(tcg_ctx, tmp, var, 16); - tcg_gen_or_i32(tcg_ctx, var, var, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -static void gen_neon_dup_low16(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ext16u_i32(tcg_ctx, var, var); - tcg_gen_shli_i32(tcg_ctx, tmp, var, 16); - tcg_gen_or_i32(tcg_ctx, var, var, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -static void gen_neon_dup_high16(DisasContext *s, TCGv_i32 var) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, var, var, 0xffff0000); - tcg_gen_shri_i32(tcg_ctx, tmp, var, 16); - tcg_gen_or_i32(tcg_ctx, var, var, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); -} - -static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size) -{ - /* Load a single Neon element and replicate into a 32 bit TCG reg */ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - switch (size) { - case 0: - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - gen_neon_dup_u8(s, tmp, 0); - break; - case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - gen_neon_dup_low16(s, tmp); - break; - case 2: - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - default: /* Avoid compiler warnings. */ - abort(); - } - return tmp; -} - -static int handle_vsel(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, - uint32_t dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t cc = extract32(insn, 20, 2); - - if (dp) { - TCGv_i64 frn, frm, dest; - TCGv_i64 tmp, zero, zf, nf, vf; - - zero = tcg_const_i64(tcg_ctx, 0); - - frn = tcg_temp_new_i64(tcg_ctx); - frm = tcg_temp_new_i64(tcg_ctx); - dest = tcg_temp_new_i64(tcg_ctx); - - zf = tcg_temp_new_i64(tcg_ctx); - nf = tcg_temp_new_i64(tcg_ctx); - vf = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_extu_i32_i64(tcg_ctx, zf, tcg_ctx->cpu_ZF); - tcg_gen_ext_i32_i64(tcg_ctx, nf, tcg_ctx->cpu_NF); - tcg_gen_ext_i32_i64(tcg_ctx, vf, tcg_ctx->cpu_VF); - - tcg_gen_ld_f64(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f64(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); - switch (cc) { - case 0: /* eq: Z */ - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, dest, zf, zero, - frn, frm); - break; - case 1: /* vs: V */ - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, dest, vf, zero, - frn, frm); - break; - case 2: /* ge: N == V -> N ^ V == 0 */ - tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, - frn, frm); - tcg_temp_free_i64(tcg_ctx, tmp); - break; - case 3: /* gt: !Z && N == V */ - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, dest, zf, zero, - frn, frm); - tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, - dest, frm); - tcg_temp_free_i64(tcg_ctx, tmp); - break; - } - tcg_gen_st_f64(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i64(tcg_ctx, frn); - tcg_temp_free_i64(tcg_ctx, frm); - tcg_temp_free_i64(tcg_ctx, dest); - - tcg_temp_free_i64(tcg_ctx, zf); - tcg_temp_free_i64(tcg_ctx, nf); - tcg_temp_free_i64(tcg_ctx, vf); - - tcg_temp_free_i64(tcg_ctx, zero); - } else { - TCGv_i32 frn, frm, dest; - TCGv_i32 tmp, zero; - - zero = tcg_const_i32(tcg_ctx, 0); - - frn = tcg_temp_new_i32(tcg_ctx); - frm = tcg_temp_new_i32(tcg_ctx); - dest = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_f32(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f32(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); - switch (cc) { - case 0: /* eq: Z */ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, dest, tcg_ctx->cpu_ZF, zero, - frn, frm); - break; - case 1: /* vs: V */ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, dest, tcg_ctx->cpu_VF, zero, - frn, frm); - break; - case 2: /* ge: N == V -> N ^ V == 0 */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, - frn, frm); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 3: /* gt: !Z && N == V */ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, dest, tcg_ctx->cpu_ZF, zero, - frn, frm); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, - dest, frm); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - } - tcg_gen_st_f32(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i32(tcg_ctx, frn); - tcg_temp_free_i32(tcg_ctx, frm); - tcg_temp_free_i32(tcg_ctx, dest); - - tcg_temp_free_i32(tcg_ctx, zero); - } - - return 0; -} - -static int handle_vminmaxnm(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rn, - uint32_t rm, uint32_t dp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t vmin = extract32(insn, 6, 1); - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); - - if (dp) { - TCGv_i64 frn, frm, dest; - - frn = tcg_temp_new_i64(tcg_ctx); - frm = tcg_temp_new_i64(tcg_ctx); - dest = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_ld_f64(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f64(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); - if (vmin) { - gen_helper_vfp_minnumd(tcg_ctx, dest, frn, frm, fpst); - } else { - gen_helper_vfp_maxnumd(tcg_ctx, dest, frn, frm, fpst); - } - tcg_gen_st_f64(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i64(tcg_ctx, frn); - tcg_temp_free_i64(tcg_ctx, frm); - tcg_temp_free_i64(tcg_ctx, dest); - } else { - TCGv_i32 frn, frm, dest; - - frn = tcg_temp_new_i32(tcg_ctx); - frm = tcg_temp_new_i32(tcg_ctx); - dest = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_ld_f32(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); - tcg_gen_ld_f32(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); - if (vmin) { - gen_helper_vfp_minnums(tcg_ctx, dest, frn, frm, fpst); - } else { - gen_helper_vfp_maxnums(tcg_ctx, dest, frn, frm, fpst); - } - tcg_gen_st_f32(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i32(tcg_ctx, frn); - tcg_temp_free_i32(tcg_ctx, frm); - tcg_temp_free_i32(tcg_ctx, dest); - } - - tcg_temp_free_ptr(tcg_ctx, fpst); - return 0; -} - -static int handle_vrint(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, - int rounding) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); - TCGv_i32 tcg_rmode; - - tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - - if (dp) { - TCGv_i64 tcg_op; - TCGv_i64 tcg_res; - tcg_op = tcg_temp_new_i64(tcg_ctx); - tcg_res = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ld_f64(tcg_ctx, tcg_op, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); - gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); - tcg_gen_st_f64(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i64(tcg_ctx, tcg_op); - tcg_temp_free_i64(tcg_ctx, tcg_res); - } else { - TCGv_i32 tcg_op; - TCGv_i32 tcg_res; - tcg_op = tcg_temp_new_i32(tcg_ctx); - tcg_res = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_f32(tcg_ctx, tcg_op, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); - gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); - tcg_gen_st_f32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - tcg_temp_free_i32(tcg_ctx, tcg_op); - tcg_temp_free_i32(tcg_ctx, tcg_res); - } - - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - tcg_temp_free_i32(tcg_ctx, tcg_rmode); - - tcg_temp_free_ptr(tcg_ctx, fpst); - return 0; -} - -static int handle_vcvt(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, - int rounding) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - bool is_signed = extract32(insn, 7, 1); - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); - TCGv_i32 tcg_rmode, tcg_shift; - - tcg_shift = tcg_const_i32(tcg_ctx, 0); - - tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - - if (dp) { - TCGv_i64 tcg_double, tcg_res; - TCGv_i32 tcg_tmp; - /* Rd is encoded as a single precision register even when the source - * is double precision. - */ - rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1); - tcg_double = tcg_temp_new_i64(tcg_ctx); - tcg_res = tcg_temp_new_i64(tcg_ctx); - tcg_tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_f64(tcg_ctx, tcg_double, tcg_ctx->cpu_env, vfp_reg_offset(1, rm)); - if (is_signed) { - gen_helper_vfp_tosld(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); - } else { - gen_helper_vfp_tould(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); - } - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp, tcg_res); - tcg_gen_st_f32(tcg_ctx, tcg_tmp, tcg_ctx->cpu_env, vfp_reg_offset(0, rd)); - tcg_temp_free_i32(tcg_ctx, tcg_tmp); - tcg_temp_free_i64(tcg_ctx, tcg_res); - tcg_temp_free_i64(tcg_ctx, tcg_double); - } else { - TCGv_i32 tcg_single, tcg_res; - tcg_single = tcg_temp_new_i32(tcg_ctx); - tcg_res = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_f32(tcg_ctx, tcg_single, tcg_ctx->cpu_env, vfp_reg_offset(0, rm)); - if (is_signed) { - gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); - } else { - gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); - } - tcg_gen_st_f32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(0, rd)); - tcg_temp_free_i32(tcg_ctx, tcg_res); - tcg_temp_free_i32(tcg_ctx, tcg_single); - } - - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - tcg_temp_free_i32(tcg_ctx, tcg_rmode); - - tcg_temp_free_i32(tcg_ctx, tcg_shift); - - tcg_temp_free_ptr(tcg_ctx, fpst); - - return 0; -} - -/* Table for converting the most common AArch32 encoding of - * rounding mode to arm_fprounding order (which matches the - * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). - */ -static const uint8_t fp_decode_rm[] = { - FPROUNDING_TIEAWAY, - FPROUNDING_TIEEVEN, - FPROUNDING_POSINF, - FPROUNDING_NEGINF, -}; - -static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn) -{ - uint32_t rd, rn, rm, dp = extract32(insn, 8, 1); - - if (!arm_dc_feature(s, ARM_FEATURE_V8)) { - return 1; - } - - if (dp) { - VFP_DREG_D(rd, insn); - VFP_DREG_N(rn, insn); - VFP_DREG_M(rm, insn); - } else { - rd = VFP_SREG_D(insn); - rn = VFP_SREG_N(insn); - rm = VFP_SREG_M(insn); - } - - if ((insn & 0x0f800e50) == 0x0e000a00) { - return handle_vsel(s, insn, rd, rn, rm, dp); - } else if ((insn & 0x0fb00e10) == 0x0e800a00) { - return handle_vminmaxnm(s, insn, rd, rn, rm, dp); - } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) { - /* VRINTA, VRINTN, VRINTP, VRINTM */ - int rounding = fp_decode_rm[extract32(insn, 16, 2)]; - return handle_vrint(s, insn, rd, rm, dp, rounding); - } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) { - /* VCVTA, VCVTN, VCVTP, VCVTM */ - int rounding = fp_decode_rm[extract32(insn, 16, 2)]; - return handle_vcvt(s, insn, rd, rm, dp, rounding); - } - return 1; -} - -/* Disassemble a VFP instruction. Returns nonzero if an error occurred - (ie. an undefined instruction). */ -static int disas_vfp_insn(DisasContext *s, uint32_t insn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; - int dp, veclen; - TCGv_i32 addr; - TCGv_i32 tmp; - TCGv_i32 tmp2; - - if (!arm_dc_feature(s, ARM_FEATURE_VFP)) { - return 1; - } - - /* FIXME: this access check should not take precedence over UNDEF - * for invalid encodings; we will generate incorrect syndrome information - * for attempts to execute invalid vfp/neon encodings with FP disabled. - */ - if (!s->cpacr_fpen) { - gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, s->thumb)); - return 0; - } - - if (!s->vfp_enabled) { - /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */ - if ((insn & 0x0fe00fff) != 0x0ee00a10) - return 1; - rn = (insn >> 16) & 0xf; - if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2 - && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) { - return 1; - } - } - - if (extract32(insn, 28, 4) == 0xf) { - /* Encodings with T=1 (Thumb) or unconditional (ARM): - * only used in v8 and above. - */ - return disas_vfp_v8_insn(s, insn); - } - - dp = ((insn & 0xf00) == 0xb00); - switch ((insn >> 24) & 0xf) { - case 0xe: - if (insn & (1 << 4)) { - /* single register transfer */ - rd = (insn >> 12) & 0xf; - if (dp) { - int size; - int pass; - - VFP_DREG_N(rn, insn); - if (insn & 0xf) - return 1; - if (insn & 0x00c00060 - && !arm_dc_feature(s, ARM_FEATURE_NEON)) { - return 1; - } - - pass = (insn >> 21) & 1; - if (insn & (1 << 22)) { - size = 0; - offset = ((insn >> 5) & 3) * 8; - } else if (insn & (1 << 5)) { - size = 1; - offset = (insn & (1 << 6)) ? 16 : 0; - } else { - size = 2; - offset = 0; - } - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - tmp = neon_load_reg(tcg_ctx, rn, pass); - switch (size) { - case 0: - if (offset) - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, offset); - if (insn & (1 << 23)) - gen_uxtb(tmp); - else - gen_sxtb(tmp); - break; - case 1: - if (insn & (1 << 23)) { - if (offset) { - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); - } else { - gen_uxth(tmp); - } - } else { - if (offset) { - tcg_gen_sari_i32(tcg_ctx, tmp, tmp, 16); - } else { - gen_sxth(tmp); - } - } - break; - case 2: - break; - } - store_reg(s, rd, tmp); - } else { - /* arm->vfp */ - tmp = load_reg(s, rd); - if (insn & (1 << 23)) { - /* VDUP */ - if (size == 0) { - gen_neon_dup_u8(s, tmp, 0); - } else if (size == 1) { - gen_neon_dup_low16(s, tmp); - } - for (n = 0; n <= pass * 2; n++) { - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); - neon_store_reg(tcg_ctx, rn, n, tmp2); - } - neon_store_reg(tcg_ctx, rn, n, tmp); - } else { - /* VMOV */ - switch (size) { - case 0: - tmp2 = neon_load_reg(tcg_ctx, rn, pass); - tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 8); - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - case 1: - tmp2 = neon_load_reg(tcg_ctx, rn, pass); - tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 16); - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - case 2: - break; - } - neon_store_reg(tcg_ctx, rn, pass, tmp); - } - } - } else { /* !dp */ - if ((insn & 0x6f) != 0x00) - return 1; - rn = VFP_SREG_N(insn); - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - if (insn & (1 << 21)) { - /* system register */ - rn >>= 1; - - switch (rn) { - case ARM_VFP_FPSID: - /* VFP2 allows access to FSID from userspace. - VFP3 restricts all id registers to privileged - accesses. */ - if (IS_USER(s) - && arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - tmp = load_cpu_field(s->uc, vfp.xregs[rn]); - break; - case ARM_VFP_FPEXC: - if (IS_USER(s)) - return 1; - tmp = load_cpu_field(s->uc, vfp.xregs[rn]); - break; - case ARM_VFP_FPINST: - case ARM_VFP_FPINST2: - /* Not present in VFP3. */ - if (IS_USER(s) - || arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - tmp = load_cpu_field(s->uc, vfp.xregs[rn]); - break; - case ARM_VFP_FPSCR: - if (rd == 15) { - tmp = load_cpu_field(s->uc, vfp.xregs[ARM_VFP_FPSCR]); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xf0000000); - } else { - tmp = tcg_temp_new_i32(tcg_ctx); - gen_helper_vfp_get_fpscr(tcg_ctx, tmp, tcg_ctx->cpu_env); - } - break; - case ARM_VFP_MVFR2: - if (!arm_dc_feature(s, ARM_FEATURE_V8)) { - return 1; - } - /* fall through */ - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - if (IS_USER(s) - || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { - return 1; - } - tmp = load_cpu_field(s->uc, vfp.xregs[rn]); - break; - default: - return 1; - } - } else { - gen_mov_F0_vreg(s, 0, rn); - tmp = gen_vfp_mrs(s); - } - if (rd == 15) { - /* Set the 4 flag bits in the CPSR. */ - gen_set_nzcv(s, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - store_reg(s, rd, tmp); - } - } else { - /* arm->vfp */ - if (insn & (1 << 21)) { - rn >>= 1; - /* system register */ - switch (rn) { - case ARM_VFP_FPSID: - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - /* Writes are ignored. */ - break; - case ARM_VFP_FPSCR: - tmp = load_reg(s, rd); - gen_helper_vfp_set_fpscr(tcg_ctx, tcg_ctx->cpu_env, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_lookup_tb(s); - break; - case ARM_VFP_FPEXC: - if (IS_USER(s)) - return 1; - /* TODO: VFP subarchitecture support. - * For now, keep the EN bit only */ - tmp = load_reg(s, rd); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 1 << 30); - store_cpu_field(tcg_ctx, tmp, vfp.xregs[rn]); - gen_lookup_tb(s); - break; - case ARM_VFP_FPINST: - case ARM_VFP_FPINST2: - if (IS_USER(s)) { - return 1; - } - tmp = load_reg(s, rd); - store_cpu_field(tcg_ctx, tmp, vfp.xregs[rn]); - break; - default: - return 1; - } - } else { - tmp = load_reg(s, rd); - gen_vfp_msr(s, tmp); - gen_mov_vreg_F0(s, 0, rn); - } - } - } - } else { - /* data processing */ - /* The opcode is in bits 23, 21, 20 and 6. */ - op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); - if (dp) { - if (op == 15) { - /* rn is opcode */ - rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); - } else { - /* rn is register number */ - VFP_DREG_N(rn, insn); - } - - if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) || - ((rn & 0x1e) == 0x6))) { - /* Integer or single/half precision destination. */ - rd = VFP_SREG_D(insn); - } else { - VFP_DREG_D(rd, insn); - } - if (op == 15 && - (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) || - ((rn & 0x1e) == 0x4))) { - /* VCVT from int or half precision is always from S reg - * regardless of dp bit. VCVT with immediate frac_bits - * has same format as SREG_M. - */ - rm = VFP_SREG_M(insn); - } else { - VFP_DREG_M(rm, insn); - } - } else { - rn = VFP_SREG_N(insn); - if (op == 15 && rn == 15) { - /* Double precision destination. */ - VFP_DREG_D(rd, insn); - } else { - rd = VFP_SREG_D(insn); - } - /* NB that we implicitly rely on the encoding for the frac_bits - * in VCVT of fixed to float being the same as that of an SREG_M - */ - rm = VFP_SREG_M(insn); - } - - veclen = s->vec_len; - if (op == 15 && rn > 3) - veclen = 0; - - /* Shut up compiler warnings. */ - delta_m = 0; - delta_d = 0; - bank_mask = 0; - - if (veclen > 0) { - if (dp) - bank_mask = 0xc; - else - bank_mask = 0x18; - - /* Figure out what type of vector operation this is. */ - if ((rd & bank_mask) == 0) { - /* scalar */ - veclen = 0; - } else { - if (dp) - delta_d = (s->vec_stride >> 1) + 1; - else - delta_d = s->vec_stride + 1; - - if ((rm & bank_mask) == 0) { - /* mixed scalar/vector */ - delta_m = 0; - } else { - /* vector */ - delta_m = delta_d; - } - } - } - - /* Load the initial operands. */ - if (op == 15) { - switch (rn) { - case 16: - case 17: - /* Integer source */ - gen_mov_F0_vreg(s, 0, rm); - break; - case 8: - case 9: - /* Compare */ - gen_mov_F0_vreg(s, dp, rd); - gen_mov_F1_vreg(s, dp, rm); - break; - case 10: - case 11: - /* Compare with zero */ - gen_mov_F0_vreg(s, dp, rd); - gen_vfp_F1_ld0(s, dp); - break; - case 20: - case 21: - case 22: - case 23: - case 28: - case 29: - case 30: - case 31: - /* Source and destination the same. */ - gen_mov_F0_vreg(s, dp, rd); - break; - case 4: - case 5: - case 6: - case 7: - /* VCVTB, VCVTT: only present with the halfprec extension - * UNPREDICTABLE if bit 8 is set prior to ARMv8 - * (we choose to UNDEF) - */ - if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) || - !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) { - return 1; - } - if (!extract32(rn, 1, 1)) { - /* Half precision source. */ - gen_mov_F0_vreg(s, 0, rm); - break; - } - /* Otherwise fall through */ - default: - /* One source operand. */ - gen_mov_F0_vreg(s, dp, rm); - break; - } - } else { - /* Two source operands. */ - gen_mov_F0_vreg(s, dp, rn); - gen_mov_F1_vreg(s, dp, rm); - } - - for (;;) { - /* Perform the calculation. */ - switch (op) { - case 0: /* VMLA: fd + (fn * fm) */ - /* Note that order of inputs to the add matters for NaNs */ - gen_vfp_F1_mul(s, dp); - gen_mov_F0_vreg(s, dp, rd); - gen_vfp_add(s, dp); - break; - case 1: /* VMLS: fd + -(fn * fm) */ - gen_vfp_mul(s, dp); - gen_vfp_F1_neg(s, dp); - gen_mov_F0_vreg(s, dp, rd); - gen_vfp_add(s, dp); - break; - case 2: /* VNMLS: -fd + (fn * fm) */ - /* Note that it isn't valid to replace (-A + B) with (B - A) - * or similar plausible looking simplifications - * because this will give wrong results for NaNs. - */ - gen_vfp_F1_mul(s, dp); - gen_mov_F0_vreg(s, dp, rd); - gen_vfp_neg(s, dp); - gen_vfp_add(s, dp); - break; - case 3: /* VNMLA: -fd + -(fn * fm) */ - gen_vfp_mul(s, dp); - gen_vfp_F1_neg(s, dp); - gen_mov_F0_vreg(s, dp, rd); - gen_vfp_neg(s, dp); - gen_vfp_add(s, dp); - break; - case 4: /* mul: fn * fm */ - gen_vfp_mul(s, dp); - break; - case 5: /* nmul: -(fn * fm) */ - gen_vfp_mul(s, dp); - gen_vfp_neg(s, dp); - break; - case 6: /* add: fn + fm */ - gen_vfp_add(s, dp); - break; - case 7: /* sub: fn - fm */ - gen_vfp_sub(s, dp); - break; - case 8: /* div: fn / fm */ - gen_vfp_div(s, dp); - break; - case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ - case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ - case 12: /* VFMA : fd = muladd( fd, fn, fm) */ - case 13: /* VFMS : fd = muladd( fd, -fn, fm) */ - /* These are fused multiply-add, and must be done as one - * floating point operation with no rounding between the - * multiplication and addition steps. - * NB that doing the negations here as separate steps is - * correct : an input NaN should come out with its sign bit - * flipped if it is a negated-input. - */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) { - return 1; - } - if (dp) { - TCGv_ptr fpst; - TCGv_i64 frd; - if (op & 1) { - /* VFNMS, VFMS */ - gen_helper_vfp_negd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d); - } - frd = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ld_f64(tcg_ctx, frd, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - if (op & 2) { - /* VFNMA, VFNMS */ - gen_helper_vfp_negd(tcg_ctx, frd, frd); - } - fpst = get_fpstatus_ptr(s, 0); - gen_helper_vfp_muladdd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, - tcg_ctx->cpu_F1d, frd, fpst); - tcg_temp_free_ptr(tcg_ctx, fpst); - tcg_temp_free_i64(tcg_ctx, frd); - } else { - TCGv_ptr fpst; - TCGv_i32 frd; - if (op & 1) { - /* VFNMS, VFMS */ - gen_helper_vfp_negs(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s); - } - frd = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_f32(tcg_ctx, frd, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); - if (op & 2) { - gen_helper_vfp_negs(tcg_ctx, frd, frd); - } - fpst = get_fpstatus_ptr(s, 0); - gen_helper_vfp_muladds(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, - tcg_ctx->cpu_F1s, frd, fpst); - tcg_temp_free_ptr(tcg_ctx, fpst); - tcg_temp_free_i32(tcg_ctx, frd); - } - break; - case 14: /* fconst */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - - n = (insn << 12) & 0x80000000; - i = ((insn >> 12) & 0x70) | (insn & 0xf); - if (dp) { - if (i & 0x40) - i |= 0x3f80; - else - i |= 0x4000; - n |= i << 16; - tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_F0d, ((uint64_t)n) << 32); - } else { - if (i & 0x40) - i |= 0x780; - else - i |= 0x800; - n |= i << 19; - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_F0s, n); - } - break; - case 15: /* extension space */ - switch (rn) { - case 0: /* cpy */ - /* no-op */ - break; - case 1: /* abs */ - gen_vfp_abs(s, dp); - break; - case 2: /* neg */ - gen_vfp_neg(s, dp); - break; - case 3: /* sqrt */ - gen_vfp_sqrt(s, dp); - break; - case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */ - tmp = gen_vfp_mrs(s); - tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); - if (dp) { - gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_ctx->cpu_F0d, tmp, - tcg_ctx->cpu_env); - } else { - gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp, - tcg_ctx->cpu_env); - } - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */ - tmp = gen_vfp_mrs(s); - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); - if (dp) { - gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_ctx->cpu_F0d, tmp, - tcg_ctx->cpu_env); - } else { - gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp, - tcg_ctx->cpu_env); - } - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */ - tmp = tcg_temp_new_i32(tcg_ctx); - if (dp) { - gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0d, - tcg_ctx->cpu_env); - } else { - gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, - tcg_ctx->cpu_env); - } - gen_mov_F0_vreg(s, 0, rd); - tmp2 = gen_vfp_mrs(s); - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff0000); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - gen_vfp_msr(s, tmp); - break; - case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */ - tmp = tcg_temp_new_i32(tcg_ctx); - if (dp) { - gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0d, - tcg_ctx->cpu_env); - } else { - gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, - tcg_ctx->cpu_env); - } - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 16); - gen_mov_F0_vreg(s, 0, rd); - tmp2 = gen_vfp_mrs(s); - tcg_gen_ext16u_i32(tcg_ctx, tmp2, tmp2); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - gen_vfp_msr(s, tmp); - break; - case 8: /* cmp */ - gen_vfp_cmp(s, dp); - break; - case 9: /* cmpe */ - gen_vfp_cmpe(s, dp); - break; - case 10: /* cmpz */ - gen_vfp_cmp(s, dp); - break; - case 11: /* cmpez */ - gen_vfp_F1_ld0(s, dp); - gen_vfp_cmpe(s, dp); - break; - case 12: /* vrintr */ - { - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); - if (dp) { - gen_helper_rintd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, fpst); - } else { - gen_helper_rints(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpst); - } - tcg_temp_free_ptr(tcg_ctx, fpst); - break; - } - case 13: /* vrintz */ - { - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); - TCGv_i32 tcg_rmode; - tcg_rmode = tcg_const_i32(tcg_ctx, float_round_to_zero); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - if (dp) { - gen_helper_rintd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, fpst); - } else { - gen_helper_rints(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpst); - } - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - tcg_temp_free_i32(tcg_ctx, tcg_rmode); - tcg_temp_free_ptr(tcg_ctx, fpst); - break; - } - case 14: /* vrintx */ - { - TCGv_ptr fpst = get_fpstatus_ptr(s, 0); - if (dp) { - gen_helper_rintd_exact(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, fpst); - } else { - gen_helper_rints_exact(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpst); - } - tcg_temp_free_ptr(tcg_ctx, fpst); - break; - } - case 15: /* single<->double conversion */ - if (dp) - gen_helper_vfp_fcvtsd(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env); - else - gen_helper_vfp_fcvtds(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); - break; - case 16: /* fuito */ - gen_vfp_uito(s, dp, 0); - break; - case 17: /* fsito */ - gen_vfp_sito(s, dp, 0); - break; - case 20: /* fshto */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_shto(s, dp, 16 - rm, 0); - break; - case 21: /* fslto */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_slto(s, dp, 32 - rm, 0); - break; - case 22: /* fuhto */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_uhto(s, dp, 16 - rm, 0); - break; - case 23: /* fulto */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_ulto(s, dp, 32 - rm, 0); - break; - case 24: /* ftoui */ - gen_vfp_toui(s, dp, 0); - break; - case 25: /* ftouiz */ - gen_vfp_touiz(s, dp, 0); - break; - case 26: /* ftosi */ - gen_vfp_tosi(s, dp, 0); - break; - case 27: /* ftosiz */ - gen_vfp_tosiz(s, dp, 0); - break; - case 28: /* ftosh */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_tosh(s, dp, 16 - rm, 0); - break; - case 29: /* ftosl */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_tosl(s, dp, 32 - rm, 0); - break; - case 30: /* ftouh */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_touh(s, dp, 16 - rm, 0); - break; - case 31: /* ftoul */ - if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { - return 1; - } - gen_vfp_toul(s, dp, 32 - rm, 0); - break; - default: /* undefined */ - return 1; - } - break; - default: /* undefined */ - return 1; - } - - /* Write back the result. */ - if (op == 15 && (rn >= 8 && rn <= 11)) { - /* Comparison, do nothing. */ - } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 || - (rn & 0x1e) == 0x6)) { - /* VCVT double to int: always integer result. - * VCVT double to half precision is always a single - * precision result. - */ - gen_mov_vreg_F0(s, 0, rd); - } else if (op == 15 && rn == 15) { - /* conversion */ - gen_mov_vreg_F0(s, !dp, rd); - } else { - gen_mov_vreg_F0(s, dp, rd); - } - - /* break out of the loop if we have finished */ - if (veclen == 0) - break; - - if (op == 15 && delta_m == 0) { - /* single source one-many */ - while (veclen--) { - rd = ((rd + delta_d) & (bank_mask - 1)) - | (rd & bank_mask); - gen_mov_vreg_F0(s, dp, rd); - } - break; - } - /* Setup the next operands. */ - veclen--; - rd = ((rd + delta_d) & (bank_mask - 1)) - | (rd & bank_mask); - - if (op == 15) { - /* One source operand. */ - rm = ((rm + delta_m) & (bank_mask - 1)) - | (rm & bank_mask); - gen_mov_F0_vreg(s, dp, rm); - } else { - /* Two source operands. */ - rn = ((rn + delta_d) & (bank_mask - 1)) - | (rn & bank_mask); - gen_mov_F0_vreg(s, dp, rn); - if (delta_m) { - rm = ((rm + delta_m) & (bank_mask - 1)) - | (rm & bank_mask); - gen_mov_F1_vreg(s, dp, rm); - } - } - } - } - break; - case 0xc: - case 0xd: - if ((insn & 0x03e00000) == 0x00400000) { - /* two-register transfer */ - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - if (dp) { - VFP_DREG_M(rm, insn); - } else { - rm = VFP_SREG_M(insn); - } - - if (insn & ARM_CP_RW_BIT) { - /* vfp->arm */ - if (dp) { - gen_mov_F0_vreg(s, 0, rm * 2); - tmp = gen_vfp_mrs(s); - store_reg(s, rd, tmp); - gen_mov_F0_vreg(s, 0, rm * 2 + 1); - tmp = gen_vfp_mrs(s); - store_reg(s, rn, tmp); - } else { - gen_mov_F0_vreg(s, 0, rm); - tmp = gen_vfp_mrs(s); - store_reg(s, rd, tmp); - gen_mov_F0_vreg(s, 0, rm + 1); - tmp = gen_vfp_mrs(s); - store_reg(s, rn, tmp); - } - } else { - /* arm->vfp */ - if (dp) { - tmp = load_reg(s, rd); - gen_vfp_msr(s, tmp); - gen_mov_vreg_F0(s, 0, rm * 2); - tmp = load_reg(s, rn); - gen_vfp_msr(s, tmp); - gen_mov_vreg_F0(s, 0, rm * 2 + 1); - } else { - tmp = load_reg(s, rd); - gen_vfp_msr(s, tmp); - gen_mov_vreg_F0(s, 0, rm); - tmp = load_reg(s, rn); - gen_vfp_msr(s, tmp); - gen_mov_vreg_F0(s, 0, rm + 1); - } - } - } else { - /* Load/store */ - rn = (insn >> 16) & 0xf; - if (dp) - VFP_DREG_D(rd, insn); - else - rd = VFP_SREG_D(insn); - if ((insn & 0x01200000) == 0x01000000) { - /* Single load/store */ - offset = (insn & 0xff) << 2; - if ((insn & (1 << 23)) == 0) - offset = 0-offset; - if (s->thumb && rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~2); - } else { - addr = load_reg(s, rn); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - if (insn & (1 << 20)) { - gen_vfp_ld(s, dp, addr); - gen_mov_vreg_F0(s, dp, rd); - } else { - gen_mov_F0_vreg(s, dp, rd); - gen_vfp_st(s, dp, addr); - } - tcg_temp_free_i32(tcg_ctx, addr); - } else { - /* load/store multiple */ - int w = insn & (1 << 21); - if (dp) - n = (insn >> 1) & 0x7f; - else - n = insn & 0xff; - - if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) { - /* P == U , W == 1 => UNDEF */ - return 1; - } - if (n == 0 || (rd + n) > 32 || (dp && n > 16)) { - /* UNPREDICTABLE cases for bad immediates: we choose to - * UNDEF to avoid generating huge numbers of TCG ops - */ - return 1; - } - if (rn == 15 && w) { - /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ - return 1; - } - - if (s->thumb && rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~2); - } else { - addr = load_reg(s, rn); - } - if (insn & (1 << 24)) /* pre-decrement */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-((insn & 0xff) << 2)); - - if (dp) - offset = 8; - else - offset = 4; - for (i = 0; i < n; i++) { - if (insn & ARM_CP_RW_BIT) { - /* load */ - gen_vfp_ld(s, dp, addr); - gen_mov_vreg_F0(s, dp, rd + i); - } else { - /* store */ - gen_mov_F0_vreg(s, dp, rd + i); - gen_vfp_st(s, dp, addr); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - } - if (w) { - /* writeback */ - if (insn & (1 << 24)) - offset = (0-offset) * n; - else if (dp && (insn & 1)) - offset = 4; - else - offset = 0; - - if (offset != 0) - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - } - } - break; - default: - /* Should never happen. */ - return 1; - } - return 0; -} - -static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TranslationBlock *tb; - - tb = s->tb; - if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { - tcg_gen_goto_tb(tcg_ctx, n); - gen_set_pc_im(s, dest); - tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n); - } else { - gen_set_pc_im(s, dest); - tcg_gen_exit_tb(tcg_ctx, 0); - } -} - -static inline void gen_jmp(DisasContext *s, uint32_t dest) -{ - if (unlikely(s->singlestep_enabled || s->ss_active)) { - /* An indirect jump so that we still trigger the debug exception. */ - if (s->thumb) - dest |= 1; - gen_bx_im(s, dest); - } else { - gen_goto_tb(s, 0, dest); - s->is_jmp = DISAS_TB_JUMP; - } -} - -static inline void gen_mulxy(DisasContext *s, TCGv_i32 t0, TCGv_i32 t1, int x, int y) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (x) - tcg_gen_sari_i32(tcg_ctx, t0, t0, 16); - else - gen_sxth(t0); - if (y) - tcg_gen_sari_i32(tcg_ctx, t1, t1, 16); - else - gen_sxth(t1); - tcg_gen_mul_i32(tcg_ctx, t0, t0, t1); -} - -/* Return the mask of PSR bits set by a MSR instruction. */ -static uint32_t msr_mask(DisasContext *s, int flags, int spsr) -{ - uint32_t mask; - - mask = 0; - if (flags & (1 << 0)) - mask |= 0xff; - if (flags & (1 << 1)) - mask |= 0xff00; - if (flags & (1 << 2)) - mask |= 0xff0000; - if (flags & (1 << 3)) - mask |= 0xff000000; - - /* Mask out undefined bits. */ - mask &= ~CPSR_RESERVED; - if (!arm_dc_feature(s, ARM_FEATURE_V4T)) { - mask &= ~CPSR_T; - } - if (!arm_dc_feature(s, ARM_FEATURE_V5)) { - mask &= ~CPSR_Q; /* V5TE in reality*/ - } - if (!arm_dc_feature(s, ARM_FEATURE_V6)) { - mask &= ~(CPSR_E | CPSR_GE); - } - if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) { - mask &= ~CPSR_IT; - } - /* Mask out execution state and reserved bits. */ - if (!spsr) { - mask &= ~(CPSR_EXEC | CPSR_RESERVED); - } - /* Mask out privileged bits. */ - if (IS_USER(s)) - mask &= CPSR_USER; - return mask; -} - -/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ -static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - if (spsr) { - /* ??? This is also undefined in system mode. */ - if (IS_USER(s)) - return 1; - - tmp = load_cpu_field(s->uc, spsr); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, ~mask); - tcg_gen_andi_i32(tcg_ctx, t0, t0, mask); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, t0); - store_cpu_field(tcg_ctx, tmp, spsr); - } else { - gen_set_cpsr(s, t0, mask); - } - tcg_temp_free_i32(tcg_ctx, t0); - gen_lookup_tb(s); - return 0; -} - -/* Returns nonzero if access to the PSR is not permitted. */ -static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, val); - return gen_set_psr(s, mask, spsr, tmp); -} - -/* Generate an old-style exception return. Marks pc as dead. */ -static void gen_exception_return(DisasContext *s, TCGv_i32 pc) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - store_reg(s, 15, pc); - tmp = load_cpu_field(s->uc, spsr); - gen_set_cpsr(s, tmp, CPSR_ERET_MASK); - tcg_temp_free_i32(tcg_ctx, tmp); - s->is_jmp = DISAS_UPDATE; -} - -/* Generate a v6 exception return. Marks both values as dead. */ -static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_set_cpsr(s, cpsr, CPSR_ERET_MASK); - tcg_temp_free_i32(tcg_ctx, cpsr); - store_reg(s, 15, pc); - s->is_jmp = DISAS_UPDATE; -} - -static void gen_nop_hint(DisasContext *s, int val) -{ - switch (val) { - case 3: /* wfi */ - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_WFI; - break; - case 2: /* wfe */ - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_WFE; - break; - case 4: /* sev */ - case 5: /* sevl */ - /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */ - default: /* nop */ - break; - } -} - -#define CPU_V001 tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1 - -static inline void gen_neon_add(DisasContext *s, int size, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_add_u8(tcg_ctx, t0, t0, t1); break; - case 1: gen_helper_neon_add_u16(tcg_ctx, t0, t0, t1); break; - case 2: tcg_gen_add_i32(tcg_ctx, t0, t0, t1); break; - default: abort(); - } -} - -static inline void gen_neon_rsb(DisasContext *s, int size, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_sub_u8(tcg_ctx, t0, t1, t0); break; - case 1: gen_helper_neon_sub_u16(tcg_ctx, t0, t1, t0); break; - case 2: tcg_gen_sub_i32(tcg_ctx, t0, t1, t0); break; - default: return; - } -} - -/* 32-bit pairwise ops end up the same as the elementwise versions. */ -#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32 -#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32 -#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32 -#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32 - -#define GEN_NEON_INTEGER_OP_ENV(name) do { \ - switch ((size << 1) | u) { \ - case 0: \ - gen_helper_neon_##name##_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ - break; \ - case 1: \ - gen_helper_neon_##name##_u8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ - break; \ - case 2: \ - gen_helper_neon_##name##_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ - break; \ - case 3: \ - gen_helper_neon_##name##_u16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ - break; \ - case 4: \ - gen_helper_neon_##name##_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ - break; \ - case 5: \ - gen_helper_neon_##name##_u32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ - break; \ - default: return 1; \ - }} while (0) - -#define GEN_NEON_INTEGER_OP(name) do { \ - switch ((size << 1) | u) { \ - case 0: \ - gen_helper_neon_##name##_s8(tcg_ctx, tmp, tmp, tmp2); \ - break; \ - case 1: \ - gen_helper_neon_##name##_u8(tcg_ctx, tmp, tmp, tmp2); \ - break; \ - case 2: \ - gen_helper_neon_##name##_s16(tcg_ctx, tmp, tmp, tmp2); \ - break; \ - case 3: \ - gen_helper_neon_##name##_u16(tcg_ctx, tmp, tmp, tmp2); \ - break; \ - case 4: \ - gen_helper_neon_##name##_s32(tcg_ctx, tmp, tmp, tmp2); \ - break; \ - case 5: \ - gen_helper_neon_##name##_u32(tcg_ctx, tmp, tmp, tmp2); \ - break; \ - default: return 1; \ - }} while (0) - -static TCGv_i32 neon_load_scratch(TCGContext *tcg_ctx, int scratch) -{ - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); - return tmp; -} - -static void neon_store_scratch(TCGContext *tcg_ctx, int scratch, TCGv_i32 var) -{ - tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); - tcg_temp_free_i32(tcg_ctx, var); -} - -static inline TCGv_i32 neon_get_scalar(DisasContext *s, int size, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - if (size == 1) { - tmp = neon_load_reg(tcg_ctx, reg & 7, reg >> 4); - if (reg & 8) { - gen_neon_dup_high16(s, tmp); - } else { - gen_neon_dup_low16(s, tmp); - } - } else { - tmp = neon_load_reg(tcg_ctx, reg & 15, reg >> 4); - } - return tmp; -} - -static int gen_neon_unzip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) -{ - TCGv_i32 tmp, tmp2; - if (!q && size == 2) { - return 1; - } - tmp = tcg_const_i32(tcg_ctx, rd); - tmp2 = tcg_const_i32(tcg_ctx, rm); - if (q) { - switch (size) { - case 0: - gen_helper_neon_qunzip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_qunzip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qunzip32(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - default: - abort(); - } - } else { - switch (size) { - case 0: - gen_helper_neon_unzip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_unzip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - default: - abort(); - } - } - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - return 0; -} - -static int gen_neon_zip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) -{ - TCGv_i32 tmp, tmp2; - if (!q && size == 2) { - return 1; - } - tmp = tcg_const_i32(tcg_ctx, rd); - tmp2 = tcg_const_i32(tcg_ctx, rm); - if (q) { - switch (size) { - case 0: - gen_helper_neon_qzip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_qzip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qzip32(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - default: - abort(); - } - } else { - switch (size) { - case 0: - gen_helper_neon_zip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 1: - gen_helper_neon_zip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - break; - default: - abort(); - } - } - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - return 0; -} - -static void gen_neon_trn_u8(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGv_i32 rd, tmp; - - rd = tcg_temp_new_i32(tcg_ctx); - tmp = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_shli_i32(tcg_ctx, rd, t0, 8); - tcg_gen_andi_i32(tcg_ctx, rd, rd, 0xff00ff00); - tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0x00ff00ff); - tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); - - tcg_gen_shri_i32(tcg_ctx, t1, t1, 8); - tcg_gen_andi_i32(tcg_ctx, t1, t1, 0x00ff00ff); - tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xff00ff00); - tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); - tcg_gen_mov_i32(tcg_ctx, t0, rd); - - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, rd); -} - -static void gen_neon_trn_u16(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) -{ - TCGv_i32 rd, tmp; - - rd = tcg_temp_new_i32(tcg_ctx); - tmp = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_shli_i32(tcg_ctx, rd, t0, 16); - tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0xffff); - tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); - tcg_gen_shri_i32(tcg_ctx, t1, t1, 16); - tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xffff0000); - tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); - tcg_gen_mov_i32(tcg_ctx, t0, rd); - - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, rd); -} - - -static struct { - int nregs; - int interleave; - int spacing; -} neon_ls_element_type[11] = { - {4, 4, 1}, - {4, 4, 2}, - {4, 1, 1}, - {4, 2, 1}, - {3, 3, 1}, - {3, 3, 2}, - {3, 1, 1}, - {1, 1, 1}, - {2, 2, 1}, - {2, 2, 2}, - {2, 1, 1} -}; - -/* Translate a NEON load/store element instruction. Return nonzero if the - instruction is invalid. */ -static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int rd, rn, rm; - int op; - int nregs; - int interleave; - int spacing; - int stride; - int size; - int reg; - int pass; - int load; - int shift; - int n; - TCGv_i32 addr; - TCGv_i32 tmp; - TCGv_i32 tmp2; - TCGv_i64 tmp64; - - /* FIXME: this access check should not take precedence over UNDEF - * for invalid encodings; we will generate incorrect syndrome information - * for attempts to execute invalid vfp/neon encodings with FP disabled. - */ - if (!s->cpacr_fpen) { - gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, s->thumb)); - return 0; - } - - if (!s->vfp_enabled) - return 1; - VFP_DREG_D(rd, insn); - rn = (insn >> 16) & 0xf; - rm = insn & 0xf; - load = (insn & (1 << 21)) != 0; - if ((insn & (1 << 23)) == 0) { - /* Load store all elements. */ - op = (insn >> 8) & 0xf; - size = (insn >> 6) & 3; - if (op > 10) - return 1; - /* Catch UNDEF cases for bad values of align field */ - switch (op & 0xc) { - case 4: - if (((insn >> 5) & 1) == 1) { - return 1; - } - break; - case 8: - if (((insn >> 4) & 3) == 3) { - return 1; - } - break; - default: - break; - } - nregs = neon_ls_element_type[op].nregs; - interleave = neon_ls_element_type[op].interleave; - spacing = neon_ls_element_type[op].spacing; - if (size == 3 && (interleave | spacing) != 1) - return 1; - addr = tcg_temp_new_i32(tcg_ctx); - load_reg_var(s, addr, rn); - stride = (1 << size) * interleave; - for (reg = 0; reg < nregs; reg++) { - if (interleave > 2 || (interleave == 2 && nregs == 2)) { - load_reg_var(s, addr, rn); - tcg_gen_addi_i32(tcg_ctx, addr, addr, (1 << size) * reg); - } else if (interleave == 2 && nregs == 4 && reg == 2) { - load_reg_var(s, addr, rn); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); - } - if (size == 3) { - tmp64 = tcg_temp_new_i64(tcg_ctx); - if (load) { - gen_aa32_ld64(s, tmp64, addr, get_mem_index(s)); - neon_store_reg64(tcg_ctx, tmp64, rd); - } else { - neon_load_reg64(tcg_ctx, tmp64, rd); - gen_aa32_st64(s, tmp64, addr, get_mem_index(s)); - } - tcg_temp_free_i64(tcg_ctx, tmp64); - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - } else { - for (pass = 0; pass < 2; pass++) { - if (size == 2) { - if (load) { - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - neon_store_reg(tcg_ctx, rd, pass, tmp); - } else { - tmp = neon_load_reg(tcg_ctx, rd, pass); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - } else if (size == 1) { - if (load) { - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - tmp2 = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s)); - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - neon_store_reg(tcg_ctx, rd, pass, tmp); - } else { - tmp = neon_load_reg(tcg_ctx, rd, pass); - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i32(tcg_ctx, tmp2, tmp, 16); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - gen_aa32_st16(s, tmp2, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - } - } else /* size == 0 */ { - if (load) { - TCGV_UNUSED_I32(tmp2); - for (n = 0; n < 4; n++) { - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - if (n == 0) { - tmp2 = tmp; - } else { - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, n * 8); - tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } - } - neon_store_reg(tcg_ctx, rd, pass, tmp2); - } else { - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - for (n = 0; n < 4; n++) { - tmp = tcg_temp_new_i32(tcg_ctx); - if (n == 0) { - tcg_gen_mov_i32(tcg_ctx, tmp, tmp2); - } else { - tcg_gen_shri_i32(tcg_ctx, tmp, tmp2, n * 8); - } - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - } - } - } - } - rd += spacing; - } - tcg_temp_free_i32(tcg_ctx, addr); - stride = nregs * 8; - } else { - size = (insn >> 10) & 3; - if (size == 3) { - /* Load single element to all lanes. */ - int a = (insn >> 4) & 1; - if (!load) { - return 1; - } - size = (insn >> 6) & 3; - nregs = ((insn >> 8) & 3) + 1; - - if (size == 3) { - if (nregs != 4 || a == 0) { - return 1; - } - /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */ - size = 2; - } - if (nregs == 1 && a == 1 && size == 0) { - return 1; - } - if (nregs == 3 && a == 1) { - return 1; - } - addr = tcg_temp_new_i32(tcg_ctx); - load_reg_var(s, addr, rn); - if (nregs == 1) { - /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */ - tmp = gen_load_and_replicate(s, addr, size); - tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 0)); - tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 1)); - if (insn & (1 << 5)) { - tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd + 1, 0)); - tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd + 1, 1)); - } - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - /* VLD2/3/4 to all lanes: bit 5 indicates register stride */ - stride = (insn & (1 << 5)) ? 2 : 1; - for (reg = 0; reg < nregs; reg++) { - tmp = gen_load_and_replicate(s, addr, size); - tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 0)); - tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 1)); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); - rd += stride; - } - } - tcg_temp_free_i32(tcg_ctx, addr); - stride = (1 << size) * nregs; - } else { - /* Single element. */ - int idx = (insn >> 4) & 0xf; - pass = (insn >> 7) & 1; - switch (size) { - case 0: - shift = ((insn >> 5) & 3) * 8; - stride = 1; - break; - case 1: - shift = ((insn >> 6) & 1) * 16; - stride = (insn & (1 << 5)) ? 2 : 1; - break; - case 2: - shift = 0; - stride = (insn & (1 << 6)) ? 2 : 1; - break; - default: - abort(); - } - nregs = ((insn >> 8) & 3) + 1; - /* Catch the UNDEF cases. This is unavoidably a bit messy. */ - switch (nregs) { - case 1: - if (((idx & (1 << size)) != 0) || - (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { - return 1; - } - break; - case 3: - if ((idx & 1) != 0) { - return 1; - } - /* fall through */ - case 2: - if (size == 2 && (idx & 2) != 0) { - return 1; - } - break; - case 4: - if ((size == 2) && ((idx & 3) == 3)) { - return 1; - } - break; - default: - abort(); - } - if ((rd + stride * (nregs - 1)) > 31) { - /* Attempts to write off the end of the register file - * are UNPREDICTABLE; we choose to UNDEF because otherwise - * the neon_load_reg() would write off the end of the array. - */ - return 1; - } - addr = tcg_temp_new_i32(tcg_ctx); - load_reg_var(s, addr, rn); - for (reg = 0; reg < nregs; reg++) { - if (load) { - tmp = tcg_temp_new_i32(tcg_ctx); - switch (size) { - case 0: - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - break; - case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - case 2: - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - default: /* Avoid compiler warnings. */ - abort(); - } - if (size != 2) { - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, - shift, size ? 16 : 8); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - neon_store_reg(tcg_ctx, rd, pass, tmp); - } else { /* Store */ - tmp = neon_load_reg(tcg_ctx, rd, pass); - if (shift) - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, shift); - switch (size) { - case 0: - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - break; - case 1: - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - break; - case 2: - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp); - } - rd += stride; - tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); - } - tcg_temp_free_i32(tcg_ctx, addr); - stride = nregs * (1 << size); - } - } - if (rm != 15) { - TCGv_i32 base; - - base = load_reg(s, rn); - if (rm == 13) { - tcg_gen_addi_i32(tcg_ctx, base, base, stride); - } else { - TCGv_i32 index; - index = load_reg(s, rm); - tcg_gen_add_i32(tcg_ctx, base, base, index); - tcg_temp_free_i32(tcg_ctx, index); - } - store_reg(s, rn, base); - } - return 0; -} - -/* Bitwise select. dest = c ? t : f. Clobbers T and F. */ -static void gen_neon_bsl(DisasContext *s, TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_and_i32(tcg_ctx, t, t, c); - tcg_gen_andc_i32(tcg_ctx, f, f, c); - tcg_gen_or_i32(tcg_ctx, dest, t, f); -} - -static inline void gen_neon_narrow(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_narrow_u8(tcg_ctx, dest, src); break; - case 1: gen_helper_neon_narrow_u16(tcg_ctx, dest, src); break; - case 2: tcg_gen_trunc_i64_i32(tcg_ctx, dest, src); break; - default: abort(); - } -} - -static inline void gen_neon_narrow_sats(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_narrow_sat_s8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - case 1: gen_helper_neon_narrow_sat_s16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - case 2: gen_helper_neon_narrow_sat_s32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - default: abort(); - } -} - -static inline void gen_neon_narrow_satu(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_narrow_sat_u8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - case 1: gen_helper_neon_narrow_sat_u16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - case 2: gen_helper_neon_narrow_sat_u32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - default: abort(); - } -} - -static inline void gen_neon_unarrow_sats(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_unarrow_sat8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - case 1: gen_helper_neon_unarrow_sat16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - case 2: gen_helper_neon_unarrow_sat32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; - default: abort(); - } -} - -static inline void gen_neon_shift_narrow(DisasContext *s, int size, TCGv_i32 var, TCGv_i32 shift, - int q, int u) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (q) { - if (u) { - switch (size) { - case 1: gen_helper_neon_rshl_u16(tcg_ctx, var, var, shift); break; - case 2: gen_helper_neon_rshl_u32(tcg_ctx, var, var, shift); break; - default: abort(); - } - } else { - switch (size) { - case 1: gen_helper_neon_rshl_s16(tcg_ctx, var, var, shift); break; - case 2: gen_helper_neon_rshl_s32(tcg_ctx, var, var, shift); break; - default: abort(); - } - } - } else { - if (u) { - switch (size) { - case 1: gen_helper_neon_shl_u16(tcg_ctx, var, var, shift); break; - case 2: gen_helper_neon_shl_u32(tcg_ctx, var, var, shift); break; - default: abort(); - } - } else { - switch (size) { - case 1: gen_helper_neon_shl_s16(tcg_ctx, var, var, shift); break; - case 2: gen_helper_neon_shl_s32(tcg_ctx, var, var, shift); break; - default: abort(); - } - } - } -} - -static inline void gen_neon_widen(DisasContext *s, TCGv_i64 dest, TCGv_i32 src, int size, int u) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (u) { - switch (size) { - case 0: gen_helper_neon_widen_u8(tcg_ctx, dest, src); break; - case 1: gen_helper_neon_widen_u16(tcg_ctx, dest, src); break; - case 2: tcg_gen_extu_i32_i64(tcg_ctx, dest, src); break; - default: abort(); - } - } else { - switch (size) { - case 0: gen_helper_neon_widen_s8(tcg_ctx, dest, src); break; - case 1: gen_helper_neon_widen_s16(tcg_ctx, dest, src); break; - case 2: tcg_gen_ext_i32_i64(tcg_ctx, dest, src); break; - default: abort(); - } - } - tcg_temp_free_i32(tcg_ctx, src); -} - -static inline void gen_neon_addl(DisasContext *s, int size) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_addl_u16(tcg_ctx, CPU_V001); break; - case 1: gen_helper_neon_addl_u32(tcg_ctx, CPU_V001); break; - case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; - default: abort(); - } -} - -static inline void gen_neon_subl(DisasContext *s, int size) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_subl_u16(tcg_ctx, CPU_V001); break; - case 1: gen_helper_neon_subl_u32(tcg_ctx, CPU_V001); break; - case 2: tcg_gen_sub_i64(tcg_ctx, CPU_V001); break; - default: abort(); - } -} - -static inline void gen_neon_negl(DisasContext *s, TCGv_i64 var, int size) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 0: gen_helper_neon_negl_u16(tcg_ctx, var, var); break; - case 1: gen_helper_neon_negl_u32(tcg_ctx, var, var); break; - case 2: - tcg_gen_neg_i64(tcg_ctx, var, var); - break; - default: abort(); - } -} - -static inline void gen_neon_addl_saturate(DisasContext *s, TCGv_i64 op0, TCGv_i64 op1, int size) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - switch (size) { - case 1: gen_helper_neon_addl_saturate_s32(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; - case 2: gen_helper_neon_addl_saturate_s64(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; - default: abort(); - } -} - -static inline void gen_neon_mull(DisasContext *s, TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b, - int size, int u) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tmp; - - switch ((size << 1) | u) { - case 0: gen_helper_neon_mull_s8(tcg_ctx, dest, a, b); break; - case 1: gen_helper_neon_mull_u8(tcg_ctx, dest, a, b); break; - case 2: gen_helper_neon_mull_s16(tcg_ctx, dest, a, b); break; - case 3: gen_helper_neon_mull_u16(tcg_ctx, dest, a, b); break; - case 4: - tmp = gen_muls_i64_i32(s, a, b); - tcg_gen_mov_i64(tcg_ctx, dest, tmp); - tcg_temp_free_i64(tcg_ctx, tmp); - break; - case 5: - tmp = gen_mulu_i64_i32(s, a, b); - tcg_gen_mov_i64(tcg_ctx, dest, tmp); - tcg_temp_free_i64(tcg_ctx, tmp); - break; - default: abort(); - } - - /* gen_helper_neon_mull_[su]{8|16} do not free their parameters. - Don't forget to clean them now. */ - if (size < 2) { - tcg_temp_free_i32(tcg_ctx, a); - tcg_temp_free_i32(tcg_ctx, b); - } -} - -static void gen_neon_narrow_op(DisasContext *s, int op, int u, int size, - TCGv_i32 dest, TCGv_i64 src) -{ - if (op) { - if (u) { - gen_neon_unarrow_sats(s, size, dest, src); - } else { - gen_neon_narrow(s, size, dest, src); - } - } else { - if (u) { - gen_neon_narrow_satu(s, size, dest, src); - } else { - gen_neon_narrow_sats(s, size, dest, src); - } - } -} - -/* Symbolic constants for op fields for Neon 3-register same-length. - * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B - * table A7-9. - */ -#define NEON_3R_VHADD 0 -#define NEON_3R_VQADD 1 -#define NEON_3R_VRHADD 2 -#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */ -#define NEON_3R_VHSUB 4 -#define NEON_3R_VQSUB 5 -#define NEON_3R_VCGT 6 -#define NEON_3R_VCGE 7 -#define NEON_3R_VSHL 8 -#define NEON_3R_VQSHL 9 -#define NEON_3R_VRSHL 10 -#define NEON_3R_VQRSHL 11 -#define NEON_3R_VMAX 12 -#define NEON_3R_VMIN 13 -#define NEON_3R_VABD 14 -#define NEON_3R_VABA 15 -#define NEON_3R_VADD_VSUB 16 -#define NEON_3R_VTST_VCEQ 17 -#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */ -#define NEON_3R_VMUL 19 -#define NEON_3R_VPMAX 20 -#define NEON_3R_VPMIN 21 -#define NEON_3R_VQDMULH_VQRDMULH 22 -#define NEON_3R_VPADD 23 -#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */ -#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */ -#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ -#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */ -#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ -#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */ -#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */ -#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */ - -static const uint8_t neon_3r_sizes[] = { - /*NEON_3R_VHADD*/ 0x7, - /*NEON_3R_VQADD*/ 0xf, - /*NEON_3R_VRHADD*/ 0x7, - /*NEON_3R_LOGIC*/ 0xf, /* size field encodes op type */ - /*NEON_3R_VHSUB*/ 0x7, - /*NEON_3R_VQSUB*/ 0xf, - /*NEON_3R_VCGT*/ 0x7, - /*NEON_3R_VCGE*/ 0x7, - /*NEON_3R_VSHL*/ 0xf, - /*NEON_3R_VQSHL*/ 0xf, - /*NEON_3R_VRSHL*/ 0xf, - /*NEON_3R_VQRSHL*/ 0xf, - /*NEON_3R_VMAX*/ 0x7, - /*NEON_3R_VMIN*/ 0x7, - /*NEON_3R_VABD*/ 0x7, - /*NEON_3R_VABA*/ 0x7, - /*NEON_3R_VADD_VSUB*/ 0xf, - /*NEON_3R_VTST_VCEQ*/ 0x7, - /*NEON_3R_VML*/ 0x7, - /*NEON_3R_VMUL*/ 0x7, - /*NEON_3R_VPMAX*/ 0x7, - /*NEON_3R_VPMIN*/ 0x7, - /*NEON_3R_VQDMULH_VQRDMULH*/ 0x6, - /*NEON_3R_VPADD*/ 0x7, - /*NEON_3R_SHA*/ 0xf, /* size field encodes op type */ - /*NEON_3R_VFM*/ 0x5, /* size bit 1 encodes op */ - /*NEON_3R_FLOAT_ARITH*/ 0x5, /* size bit 1 encodes op */ - /*NEON_3R_FLOAT_MULTIPLY*/ 0x5, /* size bit 1 encodes op */ - /*NEON_3R_FLOAT_CMP*/ 0x5, /* size bit 1 encodes op */ - /*NEON_3R_FLOAT_ACMP*/ 0x5, /* size bit 1 encodes op */ - /*NEON_3R_FLOAT_MINMAX*/ 0x5, /* size bit 1 encodes op */ - /*NEON_3R_FLOAT_MISC*/ 0x5, /* size bit 1 encodes op */ -}; - -/* Symbolic constants for op fields for Neon 2-register miscellaneous. - * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B - * table A7-13. - */ -#define NEON_2RM_VREV64 0 -#define NEON_2RM_VREV32 1 -#define NEON_2RM_VREV16 2 -#define NEON_2RM_VPADDL 4 -#define NEON_2RM_VPADDL_U 5 -#define NEON_2RM_AESE 6 /* Includes AESD */ -#define NEON_2RM_AESMC 7 /* Includes AESIMC */ -#define NEON_2RM_VCLS 8 -#define NEON_2RM_VCLZ 9 -#define NEON_2RM_VCNT 10 -#define NEON_2RM_VMVN 11 -#define NEON_2RM_VPADAL 12 -#define NEON_2RM_VPADAL_U 13 -#define NEON_2RM_VQABS 14 -#define NEON_2RM_VQNEG 15 -#define NEON_2RM_VCGT0 16 -#define NEON_2RM_VCGE0 17 -#define NEON_2RM_VCEQ0 18 -#define NEON_2RM_VCLE0 19 -#define NEON_2RM_VCLT0 20 -#define NEON_2RM_SHA1H 21 -#define NEON_2RM_VABS 22 -#define NEON_2RM_VNEG 23 -#define NEON_2RM_VCGT0_F 24 -#define NEON_2RM_VCGE0_F 25 -#define NEON_2RM_VCEQ0_F 26 -#define NEON_2RM_VCLE0_F 27 -#define NEON_2RM_VCLT0_F 28 -#define NEON_2RM_VABS_F 30 -#define NEON_2RM_VNEG_F 31 -#define NEON_2RM_VSWP 32 -#define NEON_2RM_VTRN 33 -#define NEON_2RM_VUZP 34 -#define NEON_2RM_VZIP 35 -#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */ -#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */ -#define NEON_2RM_VSHLL 38 -#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */ -#define NEON_2RM_VRINTN 40 -#define NEON_2RM_VRINTX 41 -#define NEON_2RM_VRINTA 42 -#define NEON_2RM_VRINTZ 43 -#define NEON_2RM_VCVT_F16_F32 44 -#define NEON_2RM_VRINTM 45 -#define NEON_2RM_VCVT_F32_F16 46 -#define NEON_2RM_VRINTP 47 -#define NEON_2RM_VCVTAU 48 -#define NEON_2RM_VCVTAS 49 -#define NEON_2RM_VCVTNU 50 -#define NEON_2RM_VCVTNS 51 -#define NEON_2RM_VCVTPU 52 -#define NEON_2RM_VCVTPS 53 -#define NEON_2RM_VCVTMU 54 -#define NEON_2RM_VCVTMS 55 -#define NEON_2RM_VRECPE 56 -#define NEON_2RM_VRSQRTE 57 -#define NEON_2RM_VRECPE_F 58 -#define NEON_2RM_VRSQRTE_F 59 -#define NEON_2RM_VCVT_FS 60 -#define NEON_2RM_VCVT_FU 61 -#define NEON_2RM_VCVT_SF 62 -#define NEON_2RM_VCVT_UF 63 - -static int neon_2rm_is_float_op(int op) -{ - /* Return true if this neon 2reg-misc op is float-to-float */ - return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F || - (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) || - op == NEON_2RM_VRINTM || - (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) || - op >= NEON_2RM_VRECPE_F); -} - -/* Each entry in this array has bit n set if the insn allows - * size value n (otherwise it will UNDEF). Since unallocated - * op values will have no bits set they always UNDEF. - */ -static const uint8_t neon_2rm_sizes[] = { - /*NEON_2RM_VREV64*/ 0x7, - /*NEON_2RM_VREV32*/ 0x3, - /*NEON_2RM_VREV16*/ 0x1, - 0, - /*NEON_2RM_VPADDL*/ 0x7, - /*NEON_2RM_VPADDL_U*/ 0x7, - /*NEON_2RM_AESE*/ 0x1, - /*NEON_2RM_AESMC*/ 0x1, - /*NEON_2RM_VCLS*/ 0x7, - /*NEON_2RM_VCLZ*/ 0x7, - /*NEON_2RM_VCNT*/ 0x1, - /*NEON_2RM_VMVN*/ 0x1, - /*NEON_2RM_VPADAL*/ 0x7, - /*NEON_2RM_VPADAL_U*/ 0x7, - /*NEON_2RM_VQABS*/ 0x7, - /*NEON_2RM_VQNEG*/ 0x7, - /*NEON_2RM_VCGT0*/ 0x7, - /*NEON_2RM_VCGE0*/ 0x7, - /*NEON_2RM_VCEQ0*/ 0x7, - /*NEON_2RM_VCLE0*/ 0x7, - /*NEON_2RM_VCLT0*/ 0x7, - /*NEON_2RM_SHA1H*/ 0x4, - /*NEON_2RM_VABS*/ 0x7, - /*NEON_2RM_VNEG*/ 0x7, - /*NEON_2RM_VCGT0_F*/ 0x4, - /*NEON_2RM_VCGE0_F*/ 0x4, - /*NEON_2RM_VCEQ0_F*/ 0x4, - /*NEON_2RM_VCLE0_F*/ 0x4, - /*NEON_2RM_VCLT0_F*/ 0x4, - 0, - /*NEON_2RM_VABS_F*/ 0x4, - /*NEON_2RM_VNEG_F*/ 0x4, - /*NEON_2RM_VSWP*/ 0x1, - /*NEON_2RM_VTRN*/ 0x7, - /*NEON_2RM_VUZP*/ 0x7, - /*NEON_2RM_VZIP*/ 0x7, - /*NEON_2RM_VMOVN*/ 0x7, - /*NEON_2RM_VQMOVN*/ 0x7, - /*NEON_2RM_VSHLL*/ 0x7, - /*NEON_2RM_SHA1SU1*/ 0x4, - /*NEON_2RM_VRINTN*/ 0x4, - /*NEON_2RM_VRINTX*/ 0x4, - /*NEON_2RM_VRINTA*/ 0x4, - /*NEON_2RM_VRINTZ*/ 0x4, - /*NEON_2RM_VCVT_F16_F32*/ 0x2, - /*NEON_2RM_VRINTM*/ 0x4, - /*NEON_2RM_VCVT_F32_F16*/ 0x2, - /*NEON_2RM_VRINTP*/ 0x4, - /*NEON_2RM_VCVTAU*/ 0x4, - /*NEON_2RM_VCVTAS*/ 0x4, - /*NEON_2RM_VCVTNU*/ 0x4, - /*NEON_2RM_VCVTNS*/ 0x4, - /*NEON_2RM_VCVTPU*/ 0x4, - /*NEON_2RM_VCVTPS*/ 0x4, - /*NEON_2RM_VCVTMU*/ 0x4, - /*NEON_2RM_VCVTMS*/ 0x4, - /*NEON_2RM_VRECPE*/ 0x4, - /*NEON_2RM_VRSQRTE*/ 0x4, - /*NEON_2RM_VRECPE_F*/ 0x4, - /*NEON_2RM_VRSQRTE_F*/ 0x4, - /*NEON_2RM_VCVT_FS*/ 0x4, - /*NEON_2RM_VCVT_FU*/ 0x4, - /*NEON_2RM_VCVT_SF*/ 0x4, - /*NEON_2RM_VCVT_UF*/ 0x4, -}; - -/* Translate a NEON data processing instruction. Return nonzero if the - instruction is invalid. - We process data in a mixture of 32-bit and 64-bit chunks. - Mostly we use 32-bit chunks so we can use normal scalar instructions. */ - -static int disas_neon_data_insn(DisasContext *s, uint32_t insn) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int op; - int q; - int rd, rn, rm; - int size; - int shift; - int pass; - int count; - int pairwise; - int u; - uint32_t imm, mask; - TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5; - TCGv_i64 tmp64; - - /* FIXME: this access check should not take precedence over UNDEF - * for invalid encodings; we will generate incorrect syndrome information - * for attempts to execute invalid vfp/neon encodings with FP disabled. - */ - if (!s->cpacr_fpen) { - gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, s->thumb)); - return 0; - } - - if (!s->vfp_enabled) - return 1; - q = (insn & (1 << 6)) != 0; - u = (insn >> 24) & 1; - VFP_DREG_D(rd, insn); - VFP_DREG_N(rn, insn); - VFP_DREG_M(rm, insn); - size = (insn >> 20) & 3; - if ((insn & (1 << 23)) == 0) { - /* Three register same length. */ - op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); - /* Catch invalid op and bad size combinations: UNDEF */ - if ((neon_3r_sizes[op] & (1 << size)) == 0) { - return 1; - } - /* All insns of this form UNDEF for either this condition or the - * superset of cases "Q==1"; we catch the latter later. - */ - if (q && ((rd | rn | rm) & 1)) { - return 1; - } - /* - * The SHA-1/SHA-256 3-register instructions require special treatment - * here, as their size field is overloaded as an op type selector, and - * they all consume their input in a single pass. - */ - if (op == NEON_3R_SHA) { - if (!q) { - return 1; - } - if (!u) { /* SHA-1 */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { - return 1; - } - tmp = tcg_const_i32(tcg_ctx, rd); - tmp2 = tcg_const_i32(tcg_ctx, rn); - tmp3 = tcg_const_i32(tcg_ctx, rm); - tmp4 = tcg_const_i32(tcg_ctx, size); - gen_helper_crypto_sha1_3reg(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3, tmp4); - tcg_temp_free_i32(tcg_ctx, tmp4); - } else { /* SHA-256 */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) { - return 1; - } - tmp = tcg_const_i32(tcg_ctx, rd); - tmp2 = tcg_const_i32(tcg_ctx, rn); - tmp3 = tcg_const_i32(tcg_ctx, rm); - switch (size) { - case 0: - gen_helper_crypto_sha256h(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); - break; - case 1: - gen_helper_crypto_sha256h2(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); - break; - case 2: - gen_helper_crypto_sha256su1(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); - break; - } - } - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - return 0; - } - if (size == 3 && op != NEON_3R_LOGIC) { - /* 64-bit element instructions. */ - for (pass = 0; pass < (q ? 2 : 1); pass++) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); - switch (op) { - case NEON_3R_VQADD: - if (u) { - gen_helper_neon_qadd_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } else { - gen_helper_neon_qadd_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } - break; - case NEON_3R_VQSUB: - if (u) { - gen_helper_neon_qsub_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } else { - gen_helper_neon_qsub_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } - break; - case NEON_3R_VSHL: - if (u) { - gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } else { - gen_helper_neon_shl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } - break; - case NEON_3R_VQSHL: - if (u) { - gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } else { - gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } - break; - case NEON_3R_VRSHL: - if (u) { - gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } else { - gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } - break; - case NEON_3R_VQRSHL: - if (u) { - gen_helper_neon_qrshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } else { - gen_helper_neon_qrshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); - } - break; - case NEON_3R_VADD_VSUB: - if (u) { - tcg_gen_sub_i64(tcg_ctx, CPU_V001); - } else { - tcg_gen_add_i64(tcg_ctx, CPU_V001); - } - break; - default: - abort(); - } - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } - return 0; - } - pairwise = 0; - switch (op) { - case NEON_3R_VSHL: - case NEON_3R_VQSHL: - case NEON_3R_VRSHL: - case NEON_3R_VQRSHL: - { - int rtmp; - /* Shift instruction operands are reversed. */ - rtmp = rn; - rn = rm; - rm = rtmp; - } - break; - case NEON_3R_VPADD: - if (u) { - return 1; - } - /* Fall through */ - case NEON_3R_VPMAX: - case NEON_3R_VPMIN: - pairwise = 1; - break; - case NEON_3R_FLOAT_ARITH: - pairwise = (u && size < 2); /* if VPADD (float) */ - break; - case NEON_3R_FLOAT_MINMAX: - pairwise = u; /* if VPMIN/VPMAX (float) */ - break; - case NEON_3R_FLOAT_CMP: - if (!u && size) { - /* no encoding for U=0 C=1x */ - return 1; - } - break; - case NEON_3R_FLOAT_ACMP: - if (!u) { - return 1; - } - break; - case NEON_3R_FLOAT_MISC: - /* VMAXNM/VMINNM in ARMv8 */ - if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) { - return 1; - } - break; - case NEON_3R_VMUL: - if (u && (size != 0)) { - /* UNDEF on invalid size for polynomial subcase */ - return 1; - } - break; - case NEON_3R_VFM: - if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) { - return 1; - } - break; - default: - break; - } - - if (pairwise && q) { - /* All the pairwise insns UNDEF if Q is set */ - return 1; - } - - for (pass = 0; pass < (q ? 4 : 2); pass++) { - - if (pairwise) { - /* Pairwise. */ - if (pass < 1) { - tmp = neon_load_reg(tcg_ctx, rn, 0); - tmp2 = neon_load_reg(tcg_ctx, rn, 1); - } else { - tmp = neon_load_reg(tcg_ctx, rm, 0); - tmp2 = neon_load_reg(tcg_ctx, rm, 1); - } - } else { - /* Elementwise. */ - tmp = neon_load_reg(tcg_ctx, rn, pass); - tmp2 = neon_load_reg(tcg_ctx, rm, pass); - } - switch (op) { - case NEON_3R_VHADD: - GEN_NEON_INTEGER_OP(hadd); - break; - case NEON_3R_VQADD: - GEN_NEON_INTEGER_OP_ENV(qadd); - break; - case NEON_3R_VRHADD: - GEN_NEON_INTEGER_OP(rhadd); - break; - case NEON_3R_LOGIC: /* Logic ops. */ - switch ((u << 2) | size) { - case 0: /* VAND */ - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - break; - case 1: /* BIC */ - tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); - break; - case 2: /* VORR */ - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - break; - case 3: /* VORN */ - tcg_gen_orc_i32(tcg_ctx, tmp, tmp, tmp2); - break; - case 4: /* VEOR */ - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); - break; - case 5: /* VBSL */ - tmp3 = neon_load_reg(tcg_ctx, rd, pass); - gen_neon_bsl(s, tmp, tmp, tmp2, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp3); - break; - case 6: /* VBIT */ - tmp3 = neon_load_reg(tcg_ctx, rd, pass); - gen_neon_bsl(s, tmp, tmp, tmp3, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - break; - case 7: /* VBIF */ - tmp3 = neon_load_reg(tcg_ctx, rd, pass); - gen_neon_bsl(s, tmp, tmp3, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - break; - } - break; - case NEON_3R_VHSUB: - GEN_NEON_INTEGER_OP(hsub); - break; - case NEON_3R_VQSUB: - GEN_NEON_INTEGER_OP_ENV(qsub); - break; - case NEON_3R_VCGT: - GEN_NEON_INTEGER_OP(cgt); - break; - case NEON_3R_VCGE: - GEN_NEON_INTEGER_OP(cge); - break; - case NEON_3R_VSHL: - GEN_NEON_INTEGER_OP(shl); - break; - case NEON_3R_VQSHL: - GEN_NEON_INTEGER_OP_ENV(qshl); - break; - case NEON_3R_VRSHL: - GEN_NEON_INTEGER_OP(rshl); - break; - case NEON_3R_VQRSHL: - GEN_NEON_INTEGER_OP_ENV(qrshl); - break; - case NEON_3R_VMAX: - GEN_NEON_INTEGER_OP(max); - break; - case NEON_3R_VMIN: - GEN_NEON_INTEGER_OP(min); - break; - case NEON_3R_VABD: - GEN_NEON_INTEGER_OP(abd); - break; - case NEON_3R_VABA: - GEN_NEON_INTEGER_OP(abd); - tcg_temp_free_i32(tcg_ctx, tmp2); - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - gen_neon_add(s, size, tmp, tmp2); - break; - case NEON_3R_VADD_VSUB: - if (!u) { /* VADD */ - gen_neon_add(s, size, tmp, tmp2); - } else { /* VSUB */ - switch (size) { - case 0: gen_helper_neon_sub_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_sub_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - } - break; - case NEON_3R_VTST_VCEQ: - if (!u) { /* VTST */ - switch (size) { - case 0: gen_helper_neon_tst_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_tst_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: gen_helper_neon_tst_u32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - } else { /* VCEQ */ - switch (size) { - case 0: gen_helper_neon_ceq_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_ceq_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: gen_helper_neon_ceq_u32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - } - break; - case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ - switch (size) { - case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - if (u) { /* VMLS */ - gen_neon_rsb(s, size, tmp, tmp2); - } else { /* VMLA */ - gen_neon_add(s, size, tmp, tmp2); - } - break; - case NEON_3R_VMUL: - if (u) { /* polynomial */ - gen_helper_neon_mul_p8(tcg_ctx, tmp, tmp, tmp2); - } else { /* Integer */ - switch (size) { - case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - } - break; - case NEON_3R_VPMAX: - GEN_NEON_INTEGER_OP(pmax); - break; - case NEON_3R_VPMIN: - GEN_NEON_INTEGER_OP(pmin); - break; - case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ - if (!u) { /* VQDMULH */ - switch (size) { - case 1: - gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - break; - default: abort(); - } - } else { /* VQRDMULH */ - switch (size) { - case 1: - gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - break; - case 2: - gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - break; - default: abort(); - } - } - break; - case NEON_3R_VPADD: - switch (size) { - case 0: gen_helper_neon_padd_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_padd_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - break; - case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - switch ((u << 2) | size) { - case 0: /* VADD */ - case 4: /* VPADD */ - gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); - break; - case 2: /* VSUB */ - gen_helper_vfp_subs(tcg_ctx, tmp, tmp, tmp2, fpstatus); - break; - case 6: /* VABD */ - gen_helper_neon_abd_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - break; - default: - abort(); - } - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_3R_FLOAT_MULTIPLY: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); - if (!u) { - tcg_temp_free_i32(tcg_ctx, tmp2); - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - if (size == 0) { - gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } else { - gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); - } - } - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_3R_FLOAT_CMP: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - if (!u) { - gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } else { - if (size == 0) { - gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } else { - gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } - } - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_3R_FLOAT_ACMP: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - if (size == 0) { - gen_helper_neon_acge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } else { - gen_helper_neon_acgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_3R_FLOAT_MINMAX: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - if (size == 0) { - gen_helper_vfp_maxs(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } else { - gen_helper_vfp_mins(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_3R_FLOAT_MISC: - if (u) { - /* VMAXNM/VMINNM */ - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - if (size == 0) { - gen_helper_vfp_maxnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } else { - gen_helper_vfp_minnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); - } - tcg_temp_free_ptr(tcg_ctx, fpstatus); - } else { - if (size == 0) { - gen_helper_recps_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); - } else { - gen_helper_rsqrts_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); - } - } - break; - case NEON_3R_VFM: - { - /* VFMA, VFMS: fused multiply-add */ - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - TCGv_i32 tmp3 = neon_load_reg(tcg_ctx, rd, pass); - if (size) { - /* VFMS */ - gen_helper_vfp_negs(tcg_ctx, tmp, tmp); - } - gen_helper_vfp_muladds(tcg_ctx, tmp, tmp, tmp2, tmp3, fpstatus); - tcg_temp_free_i32(tcg_ctx, tmp3); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - default: - abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - - /* Save the result. For elementwise operations we can put it - straight into the destination register. For pairwise operations - we have to be careful to avoid clobbering the source operands. */ - if (pairwise && rd == rm) { - neon_store_scratch(tcg_ctx, pass, tmp); - } else { - neon_store_reg(tcg_ctx, rd, pass, tmp); - } - - } /* for pass */ - if (pairwise && rd == rm) { - for (pass = 0; pass < (q ? 4 : 2); pass++) { - tmp = neon_load_scratch(tcg_ctx, pass); - neon_store_reg(tcg_ctx, rd, pass, tmp); - } - } - /* End of 3 register same size operations. */ - } else if (insn & (1 << 4)) { - if ((insn & 0x00380080) != 0) { - /* Two registers and shift. */ - op = (insn >> 8) & 0xf; - if (insn & (1 << 7)) { - /* 64-bit shift. */ - if (op > 7) { - return 1; - } - size = 3; - } else { - size = 2; - while ((insn & (1 << (size + 19))) == 0) - size--; - } - shift = (insn >> 16) & ((1 << (3 + size)) - 1); - /* To avoid excessive duplication of ops we implement shift - by immediate using the variable shift operations. */ - if (op < 8) { - /* Shift by immediate: - VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ - if (q && ((rd | rm) & 1)) { - return 1; - } - if (!u && (op == 4 || op == 6)) { - return 1; - } - /* Right shifts are encoded as N - shift, where N is the - element size in bits. */ - if (op <= 4) - shift = shift - (1 << (size + 3)); - if (size == 3) { - count = q + 1; - } else { - count = q ? 4: 2; - } - switch (size) { - case 0: - imm = (uint8_t) shift; - imm |= imm << 8; - imm |= imm << 16; - break; - case 1: - imm = (uint16_t) shift; - imm |= imm << 16; - break; - case 2: - case 3: - imm = shift; - break; - default: - abort(); - } - - for (pass = 0; pass < count; pass++) { - if (size == 3) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); - tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_V1, imm); - switch (op) { - case 0: /* VSHR */ - case 1: /* VSRA */ - if (u) - gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - else - gen_helper_neon_shl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - break; - case 2: /* VRSHR */ - case 3: /* VRSRA */ - if (u) - gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - else - gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - break; - case 4: /* VSRI */ - case 5: /* VSHL, VSLI */ - gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - break; - case 6: /* VQSHLU */ - gen_helper_neon_qshlu_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - break; - case 7: /* VQSHL */ - if (u) { - gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } else { - gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, - tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } - break; - } - if (op == 1 || op == 3) { - /* Accumulate. */ - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); - tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } else if (op == 4 || (op == 5 && u)) { - /* Insert */ - uint64_t mask; - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); - if (shift < -63 || shift > 63) { - mask = 0; - } else { - if (op == 4) { - mask = 0xffffffffffffffffull >> -shift; - } else { - mask = 0xffffffffffffffffull << shift; - } - } - tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, ~mask); - tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } else { /* size < 3 */ - /* Operands in T0 and T1. */ - tmp = neon_load_reg(tcg_ctx, rm, pass); - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, imm); - switch (op) { - case 0: /* VSHR */ - case 1: /* VSRA */ - GEN_NEON_INTEGER_OP(shl); - break; - case 2: /* VRSHR */ - case 3: /* VRSRA */ - GEN_NEON_INTEGER_OP(rshl); - break; - case 4: /* VSRI */ - case 5: /* VSHL, VSLI */ - switch (size) { - case 0: gen_helper_neon_shl_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_shl_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: gen_helper_neon_shl_u32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - break; - case 6: /* VQSHLU */ - switch (size) { - case 0: - gen_helper_neon_qshlu_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, - tmp, tmp2); - break; - case 1: - gen_helper_neon_qshlu_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, - tmp, tmp2); - break; - case 2: - gen_helper_neon_qshlu_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, - tmp, tmp2); - break; - default: - abort(); - } - break; - case 7: /* VQSHL */ - GEN_NEON_INTEGER_OP_ENV(qshl); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp2); - - if (op == 1 || op == 3) { - /* Accumulate. */ - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - gen_neon_add(s, size, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } else if (op == 4 || (op == 5 && u)) { - /* Insert */ - switch (size) { - case 0: - if (op == 4) - mask = 0xff >> -shift; - else - mask = (uint8_t)(0xff << shift); - mask |= mask << 8; - mask |= mask << 16; - break; - case 1: - if (op == 4) - mask = 0xffff >> -shift; - else - mask = (uint16_t)(0xffff << shift); - mask |= mask << 16; - break; - case 2: - if (shift < -31 || shift > 31) { - mask = 0; - } else { - if (op == 4) - mask = 0xffffffffu >> -shift; - else - mask = 0xffffffffu << shift; - } - break; - default: - abort(); - } - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, mask); - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, ~mask); - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - neon_store_reg(tcg_ctx, rd, pass, tmp); - } - } /* for pass */ - } else if (op < 10) { - /* Shift by immediate and narrow: - VSHRN, VRSHRN, VQSHRN, VQRSHRN. */ - int input_unsigned = (op == 8) ? !u : u; - if (rm & 1) { - return 1; - } - shift = shift - (1 << (size + 3)); - size++; - if (size == 3) { - tmp64 = tcg_const_i64(tcg_ctx, shift); - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm); - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); - for (pass = 0; pass < 2; pass++) { - TCGv_i64 in; - if (pass == 0) { - in = tcg_ctx->cpu_V0; - } else { - in = tcg_ctx->cpu_V1; - } - if (q) { - if (input_unsigned) { - gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); - } else { - gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); - } - } else { - if (input_unsigned) { - gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); - } else { - gen_helper_neon_shl_s64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); - } - } - tmp = tcg_temp_new_i32(tcg_ctx); - gen_neon_narrow_op(s, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); - neon_store_reg(tcg_ctx, rd, pass, tmp); - } /* for pass */ - tcg_temp_free_i64(tcg_ctx, tmp64); - } else { - if (size == 1) { - imm = (uint16_t)shift; - imm |= imm << 16; - } else { - /* size == 2 */ - imm = (uint32_t)shift; - } - tmp2 = tcg_const_i32(tcg_ctx, imm); - tmp4 = neon_load_reg(tcg_ctx, rm + 1, 0); - tmp5 = neon_load_reg(tcg_ctx, rm + 1, 1); - for (pass = 0; pass < 2; pass++) { - if (pass == 0) { - tmp = neon_load_reg(tcg_ctx, rm, 0); - } else { - tmp = tmp4; - } - gen_neon_shift_narrow(s, size, tmp, tmp2, q, - input_unsigned); - if (pass == 0) { - tmp3 = neon_load_reg(tcg_ctx, rm, 1); - } else { - tmp3 = tmp5; - } - gen_neon_shift_narrow(s, size, tmp3, tmp2, q, - input_unsigned); - tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp3); - tmp = tcg_temp_new_i32(tcg_ctx); - gen_neon_narrow_op(s, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); - neon_store_reg(tcg_ctx, rd, pass, tmp); - } /* for pass */ - tcg_temp_free_i32(tcg_ctx, tmp2); - } - } else if (op == 10) { - /* VSHLL, VMOVL */ - if (q || (rd & 1)) { - return 1; - } - tmp = neon_load_reg(tcg_ctx, rm, 0); - tmp2 = neon_load_reg(tcg_ctx, rm, 1); - for (pass = 0; pass < 2; pass++) { - if (pass == 1) - tmp = tmp2; - - gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, u); - - if (shift != 0) { - /* The shift is less than the width of the source - type, so we can just shift the whole register. */ - tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, shift); - /* Widen the result of shift: we need to clear - * the potential overflow bits resulting from - * left bits of the narrow input appearing as - * right bits of left the neighbour narrow - * input. */ - if (size < 2 || !u) { - uint64_t imm64; - if (size == 0) { - imm = (0xffu >> (8 - shift)); - imm |= imm << 16; - } else if (size == 1) { - imm = 0xffff >> (16 - shift); - } else { - /* size == 2 */ - imm = 0xffffffff >> (32 - shift); - } - if (size < 2) { - imm64 = imm | (((uint64_t)imm) << 32); - } else { - imm64 = imm; - } - tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, ~imm64); - } - } - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } - } else if (op >= 14) { - /* VCVT fixed-point. */ - if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) { - return 1; - } - /* We have already masked out the must-be-1 top bit of imm6, - * hence this 32-shift where the ARM ARM has 64-imm6. - */ - shift = 32 - shift; - for (pass = 0; pass < (q ? 4 : 2); pass++) { - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, pass)); - if (!(op & 1)) { - if (u) - gen_vfp_ulto(s, 0, shift, 1); - else - gen_vfp_slto(s, 0, shift, 1); - } else { - if (u) - gen_vfp_toul(s, 0, shift, 1); - else - gen_vfp_tosl(s, 0, shift, 1); - } - tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, pass)); - } - } else { - return 1; - } - } else { /* (insn & 0x00380080) == 0 */ - int invert; - if (q && (rd & 1)) { - return 1; - } - - op = (insn >> 8) & 0xf; - /* One register and immediate. */ - imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf); - invert = (insn & (1 << 5)) != 0; - /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE. - * We choose to not special-case this and will behave as if a - * valid constant encoding of 0 had been given. - */ - switch (op) { - case 0: case 1: - /* no-op */ - break; - case 2: case 3: - imm <<= 8; - break; - case 4: case 5: - imm <<= 16; - break; - case 6: case 7: - imm <<= 24; - break; - case 8: case 9: - imm |= imm << 16; - break; - case 10: case 11: - imm = (imm << 8) | (imm << 24); - break; - case 12: - imm = (imm << 8) | 0xff; - break; - case 13: - imm = (imm << 16) | 0xffff; - break; - case 14: - imm |= (imm << 8) | (imm << 16) | (imm << 24); - if (invert) - imm = ~imm; - break; - case 15: - if (invert) { - return 1; - } - imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) - | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); - break; - } - if (invert) - imm = ~imm; - - for (pass = 0; pass < (q ? 4 : 2); pass++) { - if (op & 1 && op < 12) { - tmp = neon_load_reg(tcg_ctx, rd, pass); - if (invert) { - /* The immediate value has already been inverted, so - BIC becomes AND. */ - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, imm); - } else { - tcg_gen_ori_i32(tcg_ctx, tmp, tmp, imm); - } - } else { - /* VMOV, VMVN. */ - tmp = tcg_temp_new_i32(tcg_ctx); - if (op == 14 && invert) { - int n; - uint32_t val; - val = 0; - for (n = 0; n < 4; n++) { - if (imm & (1 << (n + (pass & 1) * 4))) - val |= 0xffU << (n * 8); - } - tcg_gen_movi_i32(tcg_ctx, tmp, val); - } else { - tcg_gen_movi_i32(tcg_ctx, tmp, imm); - } - } - neon_store_reg(tcg_ctx, rd, pass, tmp); - } - } - } else { /* (insn & 0x00800010 == 0x00800000) */ - if (size != 3) { - op = (insn >> 8) & 0xf; - if ((insn & (1 << 6)) == 0) { - /* Three registers of different lengths. */ - int src1_wide; - int src2_wide; - int prewiden; - /* undefreq: bit 0 : UNDEF if size == 0 - * bit 1 : UNDEF if size == 1 - * bit 2 : UNDEF if size == 2 - * bit 3 : UNDEF if U == 1 - * Note that [2:0] set implies 'always UNDEF' - */ - int undefreq; - /* prewiden, src1_wide, src2_wide, undefreq */ - static const int neon_3reg_wide[16][4] = { - {1, 0, 0, 0}, /* VADDL */ - {1, 1, 0, 0}, /* VADDW */ - {1, 0, 0, 0}, /* VSUBL */ - {1, 1, 0, 0}, /* VSUBW */ - {0, 1, 1, 0}, /* VADDHN */ - {0, 0, 0, 0}, /* VABAL */ - {0, 1, 1, 0}, /* VSUBHN */ - {0, 0, 0, 0}, /* VABDL */ - {0, 0, 0, 0}, /* VMLAL */ - {0, 0, 0, 9}, /* VQDMLAL */ - {0, 0, 0, 0}, /* VMLSL */ - {0, 0, 0, 9}, /* VQDMLSL */ - {0, 0, 0, 0}, /* Integer VMULL */ - {0, 0, 0, 1}, /* VQDMULL */ - {0, 0, 0, 0xa}, /* Polynomial VMULL */ - {0, 0, 0, 7}, /* Reserved: always UNDEF */ - }; - - prewiden = neon_3reg_wide[op][0]; - src1_wide = neon_3reg_wide[op][1]; - src2_wide = neon_3reg_wide[op][2]; - undefreq = neon_3reg_wide[op][3]; - - if ((undefreq & (1 << size)) || - ((undefreq & 8) && u)) { - return 1; - } - if ((src1_wide && (rn & 1)) || - (src2_wide && (rm & 1)) || - (!src2_wide && (rd & 1))) { - return 1; - } - - /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply) - * outside the loop below as it only performs a single pass. - */ - if (op == 14 && size == 2) { - TCGv_i64 tcg_rn, tcg_rm, tcg_rd; - - if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { - return 1; - } - tcg_rn = tcg_temp_new_i64(tcg_ctx); - tcg_rm = tcg_temp_new_i64(tcg_ctx); - tcg_rd = tcg_temp_new_i64(tcg_ctx); - neon_load_reg64(tcg_ctx, tcg_rn, rn); - neon_load_reg64(tcg_ctx, tcg_rm, rm); - gen_helper_neon_pmull_64_lo(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); - neon_store_reg64(tcg_ctx, tcg_rd, rd); - gen_helper_neon_pmull_64_hi(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); - neon_store_reg64(tcg_ctx, tcg_rd, rd + 1); - tcg_temp_free_i64(tcg_ctx, tcg_rn); - tcg_temp_free_i64(tcg_ctx, tcg_rm); - tcg_temp_free_i64(tcg_ctx, tcg_rd); - return 0; - } - - /* Avoid overlapping operands. Wide source operands are - always aligned so will never overlap with wide - destinations in problematic ways. */ - if (rd == rm && !src2_wide) { - tmp = neon_load_reg(tcg_ctx, rm, 1); - neon_store_scratch(tcg_ctx, 2, tmp); - } else if (rd == rn && !src1_wide) { - tmp = neon_load_reg(tcg_ctx, rn, 1); - neon_store_scratch(tcg_ctx, 2, tmp); - } - TCGV_UNUSED_I32(tmp3); - for (pass = 0; pass < 2; pass++) { - if (src1_wide) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); - TCGV_UNUSED_I32(tmp); - } else { - if (pass == 1 && rd == rn) { - tmp = neon_load_scratch(tcg_ctx, 2); - } else { - tmp = neon_load_reg(tcg_ctx, rn, pass); - } - if (prewiden) { - gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, u); - } - } - if (src2_wide) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); - TCGV_UNUSED_I32(tmp2); - } else { - if (pass == 1 && rd == rm) { - tmp2 = neon_load_scratch(tcg_ctx, 2); - } else { - tmp2 = neon_load_reg(tcg_ctx, rm, pass); - } - if (prewiden) { - gen_neon_widen(s, tcg_ctx->cpu_V1, tmp2, size, u); - } - } - switch (op) { - case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ - gen_neon_addl(s, size); - break; - case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ - gen_neon_subl(s, size); - break; - case 5: case 7: /* VABAL, VABDL */ - switch ((size << 1) | u) { - case 0: - gen_helper_neon_abdl_s16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); - break; - case 1: - gen_helper_neon_abdl_u16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); - break; - case 2: - gen_helper_neon_abdl_s32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); - break; - case 3: - gen_helper_neon_abdl_u32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); - break; - case 4: - gen_helper_neon_abdl_s64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); - break; - case 5: - gen_helper_neon_abdl_u64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); - break; - default: abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 8: case 9: case 10: case 11: case 12: case 13: - /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ - gen_neon_mull(s, tcg_ctx->cpu_V0, tmp, tmp2, size, u); - break; - case 14: /* Polynomial VMULL */ - gen_helper_neon_mull_p8(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - default: /* 15 is RESERVED: caught earlier */ - abort(); - } - if (op == 13) { - /* VQDMULL */ - gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } else if (op == 5 || (op >= 8 && op <= 11)) { - /* Accumulate. */ - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); - switch (op) { - case 10: /* VMLSL */ - gen_neon_negl(s, tcg_ctx->cpu_V0, size); - /* Fall through */ - case 5: case 8: /* VABAL, VMLAL */ - gen_neon_addl(s, size); - break; - case 9: case 11: /* VQDMLAL, VQDMLSL */ - gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); - if (op == 11) { - gen_neon_negl(s, tcg_ctx->cpu_V0, size); - } - gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); - break; - default: - abort(); - } - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } else if (op == 4 || op == 6) { - /* Narrowing operation. */ - tmp = tcg_temp_new_i32(tcg_ctx); - if (!u) { - switch (size) { - case 0: - gen_helper_neon_narrow_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); - break; - case 1: - gen_helper_neon_narrow_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); - break; - case 2: - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); - break; - default: abort(); - } - } else { - switch (size) { - case 0: - gen_helper_neon_narrow_round_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); - break; - case 1: - gen_helper_neon_narrow_round_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); - break; - case 2: - tcg_gen_addi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 1u << 31); - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); - break; - default: abort(); - } - } - if (pass == 0) { - tmp3 = tmp; - } else { - neon_store_reg(tcg_ctx, rd, 0, tmp3); - neon_store_reg(tcg_ctx, rd, 1, tmp); - } - } else { - /* Write back the result. */ - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } - } - } else { - /* Two registers and a scalar. NB that for ops of this form - * the ARM ARM labels bit 24 as Q, but it is in our variable - * 'u', not 'q'. - */ - if (size == 0) { - return 1; - } - switch (op) { - case 1: /* Float VMLA scalar */ - case 5: /* Floating point VMLS scalar */ - case 9: /* Floating point VMUL scalar */ - if (size == 1) { - return 1; - } - /* fall through */ - case 0: /* Integer VMLA scalar */ - case 4: /* Integer VMLS scalar */ - case 8: /* Integer VMUL scalar */ - case 12: /* VQDMULH scalar */ - case 13: /* VQRDMULH scalar */ - if (u && ((rd | rn) & 1)) { - return 1; - } - tmp = neon_get_scalar(s, size, rm); - neon_store_scratch(tcg_ctx, 0, tmp); - for (pass = 0; pass < (u ? 4 : 2); pass++) { - tmp = neon_load_scratch(tcg_ctx, 0); - tmp2 = neon_load_reg(tcg_ctx, rn, pass); - if (op == 12) { - if (size == 1) { - gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } else { - gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } - } else if (op == 13) { - if (size == 1) { - gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } else { - gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } - } else if (op & 1) { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - } else { - switch (size) { - case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - } - tcg_temp_free_i32(tcg_ctx, tmp2); - if (op < 8) { - /* Accumulate. */ - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - switch (op) { - case 0: - gen_neon_add(s, size, tmp, tmp2); - break; - case 1: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case 4: - gen_neon_rsb(s, size, tmp, tmp2); - break; - case 5: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - default: - abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - } - neon_store_reg(tcg_ctx, rd, pass, tmp); - } - break; - case 3: /* VQDMLAL scalar */ - case 7: /* VQDMLSL scalar */ - case 11: /* VQDMULL scalar */ - if (u == 1) { - return 1; - } - /* fall through */ - case 2: /* VMLAL sclar */ - case 6: /* VMLSL scalar */ - case 10: /* VMULL scalar */ - if (rd & 1) { - return 1; - } - tmp2 = neon_get_scalar(s, size, rm); - /* We need a copy of tmp2 because gen_neon_mull - * deletes it during pass 0. */ - tmp4 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, tmp4, tmp2); - tmp3 = neon_load_reg(tcg_ctx, rn, 1); - - for (pass = 0; pass < 2; pass++) { - if (pass == 0) { - tmp = neon_load_reg(tcg_ctx, rn, 0); - } else { - tmp = tmp3; - tmp2 = tmp4; - } - gen_neon_mull(s, tcg_ctx->cpu_V0, tmp, tmp2, size, u); - if (op != 11) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); - } - switch (op) { - case 6: - gen_neon_negl(s, tcg_ctx->cpu_V0, size); - /* Fall through */ - case 2: - gen_neon_addl(s, size); - break; - case 3: case 7: - gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); - if (op == 7) { - gen_neon_negl(s, tcg_ctx->cpu_V0, size); - } - gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); - break; - case 10: - /* no-op */ - break; - case 11: - gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); - break; - default: - abort(); - } - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } - - - break; - default: /* 14 and 15 are RESERVED */ - return 1; - } - } - } else { /* size == 3 */ - if (!u) { - /* Extract. */ - imm = (insn >> 8) & 0xf; - - if (imm > 7 && !q) - return 1; - - if (q && ((rd | rn | rm) & 1)) { - return 1; - } - - if (imm == 0) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); - if (q) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rn + 1); - } - } else if (imm == 8) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); - if (q) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); - } - } else if (q) { - tmp64 = tcg_temp_new_i64(tcg_ctx); - if (imm < 8) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); - neon_load_reg64(tcg_ctx, tmp64, rn + 1); - } else { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); - neon_load_reg64(tcg_ctx, tmp64, rm); - } - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, (imm & 7) * 8); - tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tmp64, 64 - ((imm & 7) * 8)); - tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - if (imm < 8) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); - } else { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); - imm -= 8; - } - tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); - tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, imm * 8); - tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - } else { - /* BUGFIX */ - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); - tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, imm * 8); - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); - tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); - tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); - } - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd); - if (q) { - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + 1); - } - } else if ((insn & (1 << 11)) == 0) { - /* Two register misc. */ - op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf); - size = (insn >> 18) & 3; - /* UNDEF for unknown op values and bad op-size combinations */ - if ((neon_2rm_sizes[op] & (1 << size)) == 0) { - return 1; - } - if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) && - q && ((rm | rd) & 1)) { - return 1; - } - switch (op) { - case NEON_2RM_VREV64: - for (pass = 0; pass < (q ? 2 : 1); pass++) { - tmp = neon_load_reg(tcg_ctx, rm, pass * 2); - tmp2 = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); - switch (size) { - case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; - case 1: gen_swap_half(s, tmp); break; - case 2: /* no-op */ break; - default: abort(); - } - neon_store_reg(tcg_ctx, rd, pass * 2 + 1, tmp); - if (size == 2) { - neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); - } else { - switch (size) { - case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp2, tmp2); break; - case 1: gen_swap_half(s, tmp2); break; - default: abort(); - } - neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); - } - } - break; - case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U: - case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U: - for (pass = 0; pass < q + 1; pass++) { - tmp = neon_load_reg(tcg_ctx, rm, pass * 2); - gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, op & 1); - tmp = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); - gen_neon_widen(s, tcg_ctx->cpu_V1, tmp, size, op & 1); - switch (size) { - case 0: gen_helper_neon_paddl_u16(tcg_ctx, CPU_V001); break; - case 1: gen_helper_neon_paddl_u32(tcg_ctx, CPU_V001); break; - case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; - default: abort(); - } - if (op >= NEON_2RM_VPADAL) { - /* Accumulate. */ - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); - gen_neon_addl(s, size); - } - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } - break; - case NEON_2RM_VTRN: - if (size == 2) { - int n; - for (n = 0; n < (q ? 4 : 2); n += 2) { - tmp = neon_load_reg(tcg_ctx, rm, n); - tmp2 = neon_load_reg(tcg_ctx, rd, n + 1); - neon_store_reg(tcg_ctx, rm, n, tmp2); - neon_store_reg(tcg_ctx, rd, n + 1, tmp); - } - } else { - goto elementwise; - } - break; - case NEON_2RM_VUZP: - if (gen_neon_unzip(tcg_ctx, rd, rm, size, q)) { - return 1; - } - break; - case NEON_2RM_VZIP: - if (gen_neon_zip(tcg_ctx, rd, rm, size, q)) { - return 1; - } - break; - case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN: - /* also VQMOVUN; op field and mnemonics don't line up */ - if (rm & 1) { - return 1; - } - TCGV_UNUSED_I32(tmp2); - for (pass = 0; pass < 2; pass++) { - neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); - tmp = tcg_temp_new_i32(tcg_ctx); - gen_neon_narrow_op(s, op == NEON_2RM_VMOVN, q, size, - tmp, tcg_ctx->cpu_V0); - if (pass == 0) { - tmp2 = tmp; - } else { - neon_store_reg(tcg_ctx, rd, 0, tmp2); - neon_store_reg(tcg_ctx, rd, 1, tmp); - } - } - break; - case NEON_2RM_VSHLL: - if (q || (rd & 1)) { - return 1; - } - tmp = neon_load_reg(tcg_ctx, rm, 0); - tmp2 = neon_load_reg(tcg_ctx, rm, 1); - for (pass = 0; pass < 2; pass++) { - if (pass == 1) - tmp = tmp2; - gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, 1); - tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 8 << size); - neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); - } - break; - case NEON_2RM_VCVT_F16_F32: - if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) || - q || (rm & 1)) { - return 1; - } - tmp = tcg_temp_new_i32(tcg_ctx); - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 0)); - gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 1)); - gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp2, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); - tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 2)); - gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 3)); - neon_store_reg(tcg_ctx, rd, 0, tmp2); - tmp2 = tcg_temp_new_i32(tcg_ctx); - gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp2, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); - tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); - neon_store_reg(tcg_ctx, rd, 1, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case NEON_2RM_VCVT_F32_F16: - if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) || - q || (rd & 1)) { - return 1; - } - tmp3 = tcg_temp_new_i32(tcg_ctx); - tmp = neon_load_reg(tcg_ctx, rm, 0); - tmp2 = neon_load_reg(tcg_ctx, rm, 1); - tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp); - gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); - tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 0)); - tcg_gen_shri_i32(tcg_ctx, tmp3, tmp, 16); - gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); - tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 1)); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp2); - gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); - tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 2)); - tcg_gen_shri_i32(tcg_ctx, tmp3, tmp2, 16); - gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); - tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 3)); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - break; - case NEON_2RM_AESE: case NEON_2RM_AESMC: - if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) - || ((rm | rd) & 1)) { - return 1; - } - tmp = tcg_const_i32(tcg_ctx, rd); - tmp2 = tcg_const_i32(tcg_ctx, rm); - - /* Bit 6 is the lowest opcode bit; it distinguishes between - * encryption (AESE/AESMC) and decryption (AESD/AESIMC) - */ - tmp3 = tcg_const_i32(tcg_ctx, extract32(insn, 6, 1)); - - if (op == NEON_2RM_AESE) { - gen_helper_crypto_aese(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); - } else { - gen_helper_crypto_aesmc(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); - } - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - break; - case NEON_2RM_SHA1H: - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1) - || ((rm | rd) & 1)) { - return 1; - } - tmp = tcg_const_i32(tcg_ctx, rd); - tmp2 = tcg_const_i32(tcg_ctx, rm); - - gen_helper_crypto_sha1h(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - case NEON_2RM_SHA1SU1: - if ((rm | rd) & 1) { - return 1; - } - /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */ - if (q) { - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) { - return 1; - } - } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { - return 1; - } - tmp = tcg_const_i32(tcg_ctx, rd); - tmp2 = tcg_const_i32(tcg_ctx, rm); - if (q) { - gen_helper_crypto_sha256su0(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - } else { - gen_helper_crypto_sha1su1(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - default: - elementwise: - for (pass = 0; pass < (q ? 4 : 2); pass++) { - if (neon_2rm_is_float_op(op)) { - tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, - neon_reg_offset(rm, pass)); - TCGV_UNUSED_I32(tmp); - } else { - tmp = neon_load_reg(tcg_ctx, rm, pass); - } - switch (op) { - case NEON_2RM_VREV32: - switch (size) { - case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; - case 1: gen_swap_half(s, tmp); break; - default: abort(); - } - break; - case NEON_2RM_VREV16: - gen_rev16(s, tmp); - break; - case NEON_2RM_VCLS: - switch (size) { - case 0: gen_helper_neon_cls_s8(tcg_ctx, tmp, tmp); break; - case 1: gen_helper_neon_cls_s16(tcg_ctx, tmp, tmp); break; - case 2: gen_helper_neon_cls_s32(tcg_ctx, tmp, tmp); break; - default: abort(); - } - break; - case NEON_2RM_VCLZ: - switch (size) { - case 0: gen_helper_neon_clz_u8(tcg_ctx, tmp, tmp); break; - case 1: gen_helper_neon_clz_u16(tcg_ctx, tmp, tmp); break; - case 2: gen_helper_clz(tcg_ctx, tmp, tmp); break; - default: abort(); - } - break; - case NEON_2RM_VCNT: - gen_helper_neon_cnt_u8(tcg_ctx, tmp, tmp); - break; - case NEON_2RM_VMVN: - tcg_gen_not_i32(tcg_ctx, tmp, tmp); - break; - case NEON_2RM_VQABS: - switch (size) { - case 0: - gen_helper_neon_qabs_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); - break; - case 1: - gen_helper_neon_qabs_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); - break; - case 2: - gen_helper_neon_qabs_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); - break; - default: abort(); - } - break; - case NEON_2RM_VQNEG: - switch (size) { - case 0: - gen_helper_neon_qneg_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); - break; - case 1: - gen_helper_neon_qneg_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); - break; - case 2: - gen_helper_neon_qneg_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); - break; - default: abort(); - } - break; - case NEON_2RM_VCGT0: case NEON_2RM_VCLE0: - tmp2 = tcg_const_i32(tcg_ctx, 0); - switch(size) { - case 0: gen_helper_neon_cgt_s8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_cgt_s16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: gen_helper_neon_cgt_s32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - if (op == NEON_2RM_VCLE0) { - tcg_gen_not_i32(tcg_ctx, tmp, tmp); - } - break; - case NEON_2RM_VCGE0: case NEON_2RM_VCLT0: - tmp2 = tcg_const_i32(tcg_ctx, 0); - switch(size) { - case 0: gen_helper_neon_cge_s8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_cge_s16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: gen_helper_neon_cge_s32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - if (op == NEON_2RM_VCLT0) { - tcg_gen_not_i32(tcg_ctx, tmp, tmp); - } - break; - case NEON_2RM_VCEQ0: - tmp2 = tcg_const_i32(tcg_ctx, 0); - switch(size) { - case 0: gen_helper_neon_ceq_u8(tcg_ctx, tmp, tmp, tmp2); break; - case 1: gen_helper_neon_ceq_u16(tcg_ctx, tmp, tmp, tmp2); break; - case 2: gen_helper_neon_ceq_u32(tcg_ctx, tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - case NEON_2RM_VABS: - switch(size) { - case 0: gen_helper_neon_abs_s8(tcg_ctx, tmp, tmp); break; - case 1: gen_helper_neon_abs_s16(tcg_ctx, tmp, tmp); break; - case 2: tcg_gen_abs_i32(s, tmp, tmp); break; - default: abort(); - } - break; - case NEON_2RM_VNEG: - tmp2 = tcg_const_i32(tcg_ctx, 0); - gen_neon_rsb(s, size, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - case NEON_2RM_VCGT0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - tmp2 = tcg_const_i32(tcg_ctx, 0); - gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VCGE0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - tmp2 = tcg_const_i32(tcg_ctx, 0); - gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VCEQ0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - tmp2 = tcg_const_i32(tcg_ctx, 0); - gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VCLE0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - tmp2 = tcg_const_i32(tcg_ctx, 0); - gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VCLT0_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - tmp2 = tcg_const_i32(tcg_ctx, 0); - gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VABS_F: - gen_vfp_abs(s, 0); - break; - case NEON_2RM_VNEG_F: - gen_vfp_neg(s, 0); - break; - case NEON_2RM_VSWP: - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - neon_store_reg(tcg_ctx, rm, pass, tmp2); - break; - case NEON_2RM_VTRN: - tmp2 = neon_load_reg(tcg_ctx, rd, pass); - switch (size) { - case 0: gen_neon_trn_u8(tcg_ctx, tmp, tmp2); break; - case 1: gen_neon_trn_u16(tcg_ctx, tmp, tmp2); break; - default: abort(); - } - neon_store_reg(tcg_ctx, rm, pass, tmp2); - break; - case NEON_2RM_VRINTN: - case NEON_2RM_VRINTA: - case NEON_2RM_VRINTM: - case NEON_2RM_VRINTP: - case NEON_2RM_VRINTZ: - { - TCGv_i32 tcg_rmode; - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - int rmode; - - if (op == NEON_2RM_VRINTZ) { - rmode = FPROUNDING_ZERO; - } else { - rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1]; - } - - tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); - gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, - tcg_ctx->cpu_env); - gen_helper_rints(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); - gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, - tcg_ctx->cpu_env); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - tcg_temp_free_i32(tcg_ctx, tcg_rmode); - break; - } - case NEON_2RM_VRINTX: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_rints_exact(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VCVTAU: - case NEON_2RM_VCVTAS: - case NEON_2RM_VCVTNU: - case NEON_2RM_VCVTNS: - case NEON_2RM_VCVTPU: - case NEON_2RM_VCVTPS: - case NEON_2RM_VCVTMU: - case NEON_2RM_VCVTMS: - { - bool is_signed = !extract32(insn, 7, 1); - TCGv_ptr fpst = get_fpstatus_ptr(s, 1); - TCGv_i32 tcg_rmode, tcg_shift; - int rmode = fp_decode_rm[extract32(insn, 8, 2)]; - - tcg_shift = tcg_const_i32(tcg_ctx, 0); - tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); - gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, - tcg_ctx->cpu_env); - - if (is_signed) { - gen_helper_vfp_tosls(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, - tcg_shift, fpst); - } else { - gen_helper_vfp_touls(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, - tcg_shift, fpst); - } - - gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, - tcg_ctx->cpu_env); - tcg_temp_free_i32(tcg_ctx, tcg_rmode); - tcg_temp_free_i32(tcg_ctx, tcg_shift); - tcg_temp_free_ptr(tcg_ctx, fpst); - break; - } - case NEON_2RM_VRECPE: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_recpe_u32(tcg_ctx, tmp, tmp, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VRSQRTE: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_rsqrte_u32(tcg_ctx, tmp, tmp, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VRECPE_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_recpe_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VRSQRTE_F: - { - TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); - gen_helper_rsqrte_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); - tcg_temp_free_ptr(tcg_ctx, fpstatus); - break; - } - case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ - gen_vfp_sito(s, 0, 1); - break; - case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ - gen_vfp_uito(s, 0, 1); - break; - case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ - gen_vfp_tosiz(s, 0, 1); - break; - case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ - gen_vfp_touiz(s, 0, 1); - break; - default: - /* Reserved op values were caught by the - * neon_2rm_sizes[] check earlier. - */ - abort(); - } - if (neon_2rm_is_float_op(op)) { - tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, - neon_reg_offset(rd, pass)); - } else { - neon_store_reg(tcg_ctx, rd, pass, tmp); - } - } - break; - } - } else if ((insn & (1 << 10)) == 0) { - /* VTBL, VTBX. */ - int n = ((insn >> 8) & 3) + 1; - if ((rn + n) > 32) { - /* This is UNPREDICTABLE; we choose to UNDEF to avoid the - * helper function running off the end of the register file. - */ - return 1; - } - n <<= 3; - if (insn & (1 << 6)) { - tmp = neon_load_reg(tcg_ctx, rd, 0); - } else { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - } - tmp2 = neon_load_reg(tcg_ctx, rm, 0); - tmp4 = tcg_const_i32(tcg_ctx, rn); - tmp5 = tcg_const_i32(tcg_ctx, n); - gen_helper_neon_tbl(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp, tmp4, tmp5); - tcg_temp_free_i32(tcg_ctx, tmp); - if (insn & (1 << 6)) { - tmp = neon_load_reg(tcg_ctx, rd, 1); - } else { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - } - tmp3 = neon_load_reg(tcg_ctx, rm, 1); - gen_helper_neon_tbl(tcg_ctx, tmp3, tcg_ctx->cpu_env, tmp3, tmp, tmp4, tmp5); - tcg_temp_free_i32(tcg_ctx, tmp5); - tcg_temp_free_i32(tcg_ctx, tmp4); - neon_store_reg(tcg_ctx, rd, 0, tmp2); - neon_store_reg(tcg_ctx, rd, 1, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp); - } else if ((insn & 0x380) == 0) { - /* VDUP */ - if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { - return 1; - } - if (insn & (1 << 19)) { - tmp = neon_load_reg(tcg_ctx, rm, 1); - } else { - tmp = neon_load_reg(tcg_ctx, rm, 0); - } - if (insn & (1 << 16)) { - gen_neon_dup_u8(s, tmp, ((insn >> 17) & 3) * 8); - } else if (insn & (1 << 17)) { - if ((insn >> 18) & 1) - gen_neon_dup_high16(s, tmp); - else - gen_neon_dup_low16(s, tmp); - } - for (pass = 0; pass < (q ? 4 : 2); pass++) { - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); - neon_store_reg(tcg_ctx, rd, pass, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - return 1; - } - } - } - return 0; -} - -static int disas_coproc_insn(DisasContext *s, uint32_t insn) -{ - int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2; - const ARMCPRegInfo *ri; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - cpnum = (insn >> 8) & 0xf; - - /* First check for coprocessor space used for XScale/iwMMXt insns */ - if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) { - if (extract32(s->c15_cpar, cpnum, 1) == 0) { - return 1; - } - if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { - return disas_iwmmxt_insn(s, insn); - } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) { - return disas_dsp_insn(s, insn); - } - return 1; - } - - /* Otherwise treat as a generic register access */ - is64 = (insn & (1 << 25)) == 0; - if (!is64 && ((insn & (1 << 4)) == 0)) { - /* cdp */ - return 1; - } - - crm = insn & 0xf; - if (is64) { - crn = 0; - opc1 = (insn >> 4) & 0xf; - opc2 = 0; - rt2 = (insn >> 16) & 0xf; - } else { - crn = (insn >> 16) & 0xf; - opc1 = (insn >> 21) & 7; - opc2 = (insn >> 5) & 7; - rt2 = 0; - } - isread = (insn >> 20) & 1; - rt = (insn >> 12) & 0xf; - - ri = get_arm_cp_reginfo(s->cp_regs, - ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2)); - if (ri) { - /* Check access permissions */ - if (!cp_access_ok(s->current_el, ri, isread)) { - return 1; - } - - if (ri->accessfn || - (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) { - /* Emit code to perform further access permissions checks at - * runtime; this may result in an exception. - * Note that on XScale all cp0..c13 registers do an access check - * call in order to handle c15_cpar. - */ - TCGv_ptr tmpptr; - TCGv_i32 tcg_syn; - uint32_t syndrome; - - /* Note that since we are an implementation which takes an - * exception on a trapped conditional instruction only if the - * instruction passes its condition code check, we can take - * advantage of the clause in the ARM ARM that allows us to set - * the COND field in the instruction to 0xE in all cases. - * We could fish the actual condition out of the insn (ARM) - * or the condexec bits (Thumb) but it isn't necessary. - */ - switch (cpnum) { - case 14: - if (is64) { - syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2, - isread, s->thumb); - } else { - syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm, - rt, isread, s->thumb); - } - break; - case 15: - if (is64) { - syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2, - isread, s->thumb); - } else { - syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm, - rt, isread, s->thumb); - } - break; - default: - /* ARMv8 defines that only coprocessors 14 and 15 exist, - * so this can only happen if this is an ARMv7 or earlier CPU, - * in which case the syndrome information won't actually be - * guest visible. - */ - assert(!arm_dc_feature(s, ARM_FEATURE_V8)); - syndrome = syn_uncategorized(); - break; - } - - gen_set_pc_im(s, s->pc); - tmpptr = tcg_const_ptr(tcg_ctx, ri); - tcg_syn = tcg_const_i32(tcg_ctx, syndrome); - gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn); - tcg_temp_free_ptr(tcg_ctx, tmpptr); - tcg_temp_free_i32(tcg_ctx, tcg_syn); - } - - /* Handle special cases first */ - switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { - case ARM_CP_NOP: - return 0; - case ARM_CP_WFI: - if (isread) { - return 1; - } - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_WFI; - return 0; - default: - break; - } - - if (isread) { - /* Read */ - if (is64) { - TCGv_i64 tmp64; - TCGv_i32 tmp; - if (ri->type & ARM_CP_CONST) { - tmp64 = tcg_const_i64(tcg_ctx, ri->resetvalue); - } else if (ri->readfn) { - TCGv_ptr tmpptr; - tmp64 = tcg_temp_new_i64(tcg_ctx); - tmpptr = tcg_const_ptr(tcg_ctx, ri); - gen_helper_get_cp_reg64(tcg_ctx, tmp64, tcg_ctx->cpu_env, tmpptr); - tcg_temp_free_ptr(tcg_ctx, tmpptr); - } else { - tmp64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ld_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); - } - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); - store_reg(s, rt, tmp); - tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 32); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - store_reg(s, rt2, tmp); - } else { - TCGv_i32 tmp; - if (ri->type & ARM_CP_CONST) { - tmp = tcg_const_i32(tcg_ctx, ri->resetvalue); - } else if (ri->readfn) { - TCGv_ptr tmpptr; - tmp = tcg_temp_new_i32(tcg_ctx); - tmpptr = tcg_const_ptr(tcg_ctx, ri); - gen_helper_get_cp_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmpptr); - tcg_temp_free_ptr(tcg_ctx, tmpptr); - } else { - tmp = load_cpu_offset(s->uc, ri->fieldoffset); - } - if (rt == 15) { - /* Destination register of r15 for 32 bit loads sets - * the condition codes from the high 4 bits of the value - */ - gen_set_nzcv(s, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - store_reg(s, rt, tmp); - } - } - } else { - /* Write */ - if (ri->type & ARM_CP_CONST) { - /* If not forbidden by access permissions, treat as WI */ - return 0; - } - - if (is64) { - TCGv_i32 tmplo, tmphi; - TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); - tmplo = load_reg(s, rt); - tmphi = load_reg(s, rt2); - tcg_gen_concat_i32_i64(tcg_ctx, tmp64, tmplo, tmphi); - tcg_temp_free_i32(tcg_ctx, tmplo); - tcg_temp_free_i32(tcg_ctx, tmphi); - if (ri->writefn) { - TCGv_ptr tmpptr = tcg_const_ptr(tcg_ctx, ri); - gen_helper_set_cp_reg64(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp64); - tcg_temp_free_ptr(tcg_ctx, tmpptr); - } else { - tcg_gen_st_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); - } - tcg_temp_free_i64(tcg_ctx, tmp64); - } else { - if (ri->writefn) { - TCGv_i32 tmp; - TCGv_ptr tmpptr; - tmp = load_reg(s, rt); - tmpptr = tcg_const_ptr(tcg_ctx, ri); - gen_helper_set_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp); - tcg_temp_free_ptr(tcg_ctx, tmpptr); - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - TCGv_i32 tmp = load_reg(s, rt); - store_cpu_offset(tcg_ctx, tmp, ri->fieldoffset); - } - } - } - - if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { - /* We default to ending the TB on a coprocessor register write, - * but allow this to be suppressed by the register definition - * (usually only necessary to work around guest bugs). - */ - gen_lookup_tb(s); - } - - return 0; - } - - /* Unknown register; this might be a guest error or a QEMU - * unimplemented feature. - */ - if (is64) { - qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " - "64 bit system register cp:%d opc1: %d crm:%d\n", - isread ? "read" : "write", cpnum, opc1, crm); - } else { - qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " - "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n", - isread ? "read" : "write", cpnum, opc1, crn, crm, opc2); - } - - return 1; -} - - -/* Store a 64-bit value to a register pair. Clobbers val. */ -static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, val); - store_reg(s, rlow, tmp); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, val, val, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, val); - store_reg(s, rhigh, tmp); -} - -/* load a 32-bit value from a register and perform a 64-bit accumulate. */ -static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tmp; - TCGv_i32 tmp2; - - /* Load value and extend to 64 bits. */ - tmp = tcg_temp_new_i64(tcg_ctx); - tmp2 = load_reg(s, rlow); - tcg_gen_extu_i32_i64(tcg_ctx, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_gen_add_i64(tcg_ctx, val, val, tmp); - tcg_temp_free_i64(tcg_ctx, tmp); -} - -/* load and add a 64-bit value from a register pair. */ -static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tmp; - TCGv_i32 tmpl; - TCGv_i32 tmph; - - /* Load 64-bit value rd:rn. */ - tmpl = load_reg(s, rlow); - tmph = load_reg(s, rhigh); - tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_concat_i32_i64(tcg_ctx, tmp, tmpl, tmph); - tcg_temp_free_i32(tcg_ctx, tmpl); - tcg_temp_free_i32(tcg_ctx, tmph); - tcg_gen_add_i64(tcg_ctx, val, val, tmp); - tcg_temp_free_i64(tcg_ctx, tmp); -} - -/* Set N and Z flags from hi|lo. */ -static void gen_logicq_cc(DisasContext *s, TCGv_i32 lo, TCGv_i32 hi) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, hi); - tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, lo, hi); -} - -/* Load/Store exclusive instructions are implemented by remembering - the value/address loaded, and seeing if these are the same - when the store is performed. This should be sufficient to implement - the architecturally mandated semantics, and avoids having to monitor - regular stores. - - In system emulation mode only one CPU will be running at once, so - this sequence is effectively atomic. In user emulation mode we - throw an exception and handle the atomic operation elsewhere. */ -static void gen_load_exclusive(DisasContext *s, int rt, int rt2, - TCGv_i32 addr, int size) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - - s->is_ldex = true; - - switch (size) { - case 0: - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - break; - case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - case 2: - case 3: - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - default: - abort(); - } - - if (size == 3) { - TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 tmp3 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_addi_i32(tcg_ctx, tmp2, addr, 4); - gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp, tmp3); - store_reg(s, rt2, tmp3); - } else { - tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp); - } - - store_reg(s, rt, tmp); - tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr); -} - -static void gen_clrex(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); -} - -#ifdef CONFIG_USER_ONLY -static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, - TCGv_i32 addr, int size) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_extu_i32_i64(tcg_ctx, cpu_exclusive_test, addr); - tcg_gen_movi_i32(tcg_ctx, cpu_exclusive_info, - size | (rd << 4) | (rt << 8) | (rt2 << 12)); - gen_exception_internal_insn(s, 4, EXCP_STREX); -} -#else -static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, - TCGv_i32 addr, int size) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tmp; - TCGv_i64 val64, extaddr; - int done_label; - int fail_label; - - /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { - [addr] = {Rt}; - {Rd} = 0; - } else { - {Rd} = 1; - } */ - fail_label = gen_new_label(tcg_ctx); - done_label = gen_new_label(tcg_ctx); - extaddr = tcg_temp_new_i64(tcg_ctx); - tcg_gen_extu_i32_i64(tcg_ctx, extaddr, addr); - tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, extaddr, tcg_ctx->cpu_exclusive_addr, fail_label); - tcg_temp_free_i64(tcg_ctx, extaddr); - - tmp = tcg_temp_new_i32(tcg_ctx); - switch (size) { - case 0: - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - break; - case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - case 2: - case 3: - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - default: - abort(); - } - - val64 = tcg_temp_new_i64(tcg_ctx); - if (size == 3) { - TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 tmp3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_addi_i32(tcg_ctx, tmp2, addr, 4); - gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_gen_concat_i32_i64(tcg_ctx, val64, tmp, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp3); - } else { - tcg_gen_extu_i32_i64(tcg_ctx, val64, tmp); - } - tcg_temp_free_i32(tcg_ctx, tmp); - - tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, val64, tcg_ctx->cpu_exclusive_val, fail_label); - tcg_temp_free_i64(tcg_ctx, val64); - - tmp = load_reg(s, rt); - switch (size) { - case 0: - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - break; - case 1: - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - break; - case 2: - case 3: - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - break; - default: - abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp); - if (size == 3) { - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - tmp = load_reg(s, rt2); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[rd], 0); - tcg_gen_br(tcg_ctx, done_label); - gen_set_label(tcg_ctx, fail_label); - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[rd], 1); - gen_set_label(tcg_ctx, done_label); - tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); -} -#endif - -/* gen_srs: - * @env: CPUARMState - * @s: DisasContext - * @mode: mode field from insn (which stack to store to) - * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn - * @writeback: true if writeback bit set - * - * Generate code for the SRS (Store Return State) insn. - */ -static void gen_srs(DisasContext *s, - uint32_t mode, uint32_t amode, bool writeback) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int32_t offset; - TCGv_i32 addr = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 tmp = tcg_const_i32(tcg_ctx, mode); - gen_helper_get_r13_banked(tcg_ctx, addr, tcg_ctx->cpu_env, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - switch (amode) { - case 0: /* DA */ - offset = -4; - break; - case 1: /* IA */ - offset = 0; - break; - case 2: /* DB */ - offset = -8; - break; - case 3: /* IB */ - offset = 4; - break; - default: - abort(); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - tmp = load_reg(s, 14); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - tmp = load_cpu_field(s->uc, spsr); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - if (writeback) { - switch (amode) { - case 0: - offset = -8; - break; - case 1: - offset = 4; - break; - case 2: - offset = -4; - break; - case 3: - offset = 0; - break; - default: - abort(); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - tmp = tcg_const_i32(tcg_ctx, mode); - gen_helper_set_r13_banked(tcg_ctx, tcg_ctx->cpu_env, tmp, addr); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_temp_free_i32(tcg_ctx, addr); -} - -static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh; - TCGv_i32 tmp; - TCGv_i32 tmp2; - TCGv_i32 tmp3; - TCGv_i32 addr; - TCGv_i64 tmp64; - - /* M variants do not implement ARM mode. */ - if (arm_dc_feature(s, ARM_FEATURE_M)) { - goto illegal_op; - } - - // Unicorn: trace this instruction on request - if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->pc - 4)) { - gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, s->uc, s->pc - 4); - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - } - - cond = insn >> 28; - if (cond == 0xf){ - /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we - * choose to UNDEF. In ARMv5 and above the space is used - * for miscellaneous unconditional instructions. - */ - ARCH(5); - - /* Unconditional instructions. */ - if (((insn >> 25) & 7) == 1) { - /* NEON Data processing. */ - if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { - goto illegal_op; - } - - if (disas_neon_data_insn(s, insn)) { - goto illegal_op; - } - return; - } - if ((insn & 0x0f100000) == 0x04000000) { - /* NEON load/store. */ - if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { - goto illegal_op; - } - - if (disas_neon_ls_insn(s, insn)) { - goto illegal_op; - } - return; - } - if ((insn & 0x0f000e10) == 0x0e000a00) { - /* VFP. */ - if (disas_vfp_insn(s, insn)) { - goto illegal_op; - } - return; - } - if (((insn & 0x0f30f000) == 0x0510f000) || - ((insn & 0x0f30f010) == 0x0710f000)) { - if ((insn & (1 << 22)) == 0) { - /* PLDW; v7MP */ - if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) { - goto illegal_op; - } - } - /* Otherwise PLD; v5TE+ */ - ARCH(5TE); - return; - } - if (((insn & 0x0f70f000) == 0x0450f000) || - ((insn & 0x0f70f010) == 0x0650f000)) { - ARCH(7); - return; /* PLI; V7 */ - } - if (((insn & 0x0f700000) == 0x04100000) || - ((insn & 0x0f700010) == 0x06100000)) { - if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) { - goto illegal_op; - } - return; /* v7MP: Unallocated memory hint: must NOP */ - } - - if ((insn & 0x0ffffdff) == 0x01010000) { - ARCH(6); - /* setend */ - if (((insn >> 9) & 1) != s->bswap_code) { - /* Dynamic endianness switching not implemented. */ - qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n"); - goto illegal_op; - } - return; - } else if ((insn & 0x0fffff00) == 0x057ff000) { - switch ((insn >> 4) & 0xf) { - case 1: /* clrex */ - ARCH(6K); - gen_clrex(s); - return; - case 4: /* dsb */ - case 5: /* dmb */ - case 6: /* isb */ - ARCH(7); - /* We don't emulate caches so these are a no-op. */ - return; - default: - goto illegal_op; - } - } else if ((insn & 0x0e5fffe0) == 0x084d0500) { - /* srs */ - if (IS_USER(s)) { - goto illegal_op; - } - ARCH(6); - gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21)); - return; - } else if ((insn & 0x0e50ffe0) == 0x08100a00) { - /* rfe */ - int32_t offset; - if (IS_USER(s)) - goto illegal_op; - ARCH(6); - rn = (insn >> 16) & 0xf; - addr = load_reg(s, rn); - i = (insn >> 23) & 3; - switch (i) { - case 0: offset = -4; break; /* DA */ - case 1: offset = 0; break; /* IA */ - case 2: offset = -8; break; /* DB */ - case 3: offset = 4; break; /* IB */ - default: abort(); - } - if (offset) - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - /* Load PC into tmp and CPSR into tmp2. */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - tmp2 = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s)); - if (insn & (1 << 21)) { - /* Base writeback. */ - switch (i) { - case 0: offset = -8; break; - case 1: offset = 4; break; - case 2: offset = -4; break; - case 3: offset = 0; break; - default: abort(); - } - if (offset) - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - gen_rfe(s, tmp, tmp2); - return; - } else if ((insn & 0x0e000000) == 0x0a000000) { - /* branch link and change to thumb (blx ) */ - int32_t offset; - - val = (uint32_t)s->pc; - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, val); - store_reg(s, 14, tmp); - /* Sign-extend the 24-bit offset */ - offset = ((int32_t)(insn << 8)) >> 8; - /* offset * 4 + bit24 * 2 + (thumb bit) */ - val += (((uint32_t)offset) << 2) | ((insn >> 23) & 2) | 1; - /* pipeline offset */ - val += 4; - /* protected by ARCH(5); above, near the start of uncond block */ - gen_bx_im(s, val); - return; - } else if ((insn & 0x0e000f00) == 0x0c000100) { - if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { - /* iWMMXt register transfer. */ - if (extract32(s->c15_cpar, 1, 1)) { - if (!disas_iwmmxt_insn(s, insn)) { - return; - } - } - } - } else if ((insn & 0x0fe00000) == 0x0c400000) { - /* Coprocessor double register transfer. */ - ARCH(5TE); - } else if ((insn & 0x0f000010) == 0x0e000010) { - /* Additional coprocessor register transfer. */ - } else if ((insn & 0x0ff10020) == 0x01000000) { - uint32_t mask; - uint32_t val; - /* cps (privileged) */ - if (IS_USER(s)) - return; - mask = val = 0; - if (insn & (1 << 19)) { - if (insn & (1 << 8)) - mask |= CPSR_A; - if (insn & (1 << 7)) - mask |= CPSR_I; - if (insn & (1 << 6)) - mask |= CPSR_F; - if (insn & (1 << 18)) - val |= mask; - } - if (insn & (1 << 17)) { - mask |= CPSR_M; - val |= (insn & 0x1f); - } - if (mask) { - gen_set_psr_im(s, mask, 0, val); - } - return; - } - goto illegal_op; - } - if (cond != 0xe) { - /* if not always execute, we generate a conditional jump to - next instruction */ - s->condlabel = gen_new_label(tcg_ctx); - arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); - s->condjmp = 1; - } - if ((insn & 0x0f900000) == 0x03000000) { - if ((insn & (1 << 21)) == 0) { - ARCH(6T2); - rd = (insn >> 12) & 0xf; - val = ((insn >> 4) & 0xf000) | (insn & 0xfff); - if ((insn & (1 << 22)) == 0) { - /* MOVW */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, val); - } else { - /* MOVT */ - tmp = load_reg(s, rd); - tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); - tcg_gen_ori_i32(tcg_ctx, tmp, tmp, val << 16); - } - store_reg(s, rd, tmp); - } else { - if (((insn >> 12) & 0xf) != 0xf) - goto illegal_op; - if (((insn >> 16) & 0xf) == 0) { - gen_nop_hint(s, insn & 0xff); - } else { - /* CPSR = immediate */ - val = insn & 0xff; - shift = ((insn >> 8) & 0xf) * 2; - if (shift) - val = (val >> shift) | (val << (32 - shift)); - i = ((insn & (1 << 22)) != 0); - if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i), - i, val)) { - goto illegal_op; - } - } - } - } else if ((insn & 0x0f900000) == 0x01000000 - && (insn & 0x00000090) != 0x00000090) { - /* miscellaneous instructions */ - op1 = (insn >> 21) & 3; - sh = (insn >> 4) & 0xf; - rm = insn & 0xf; - switch (sh) { - case 0x0: /* move program status register */ - if (op1 & 1) { - /* PSR = reg */ - tmp = load_reg(s, rm); - i = ((op1 & 2) != 0); - if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp)) - goto illegal_op; - } else { - /* reg = PSR */ - rd = (insn >> 12) & 0xf; - if (op1 & 2) { - if (IS_USER(s)) - goto illegal_op; - tmp = load_cpu_field(s->uc, spsr); - } else { - tmp = tcg_temp_new_i32(tcg_ctx); - gen_helper_cpsr_read(tcg_ctx, tmp, tcg_ctx->cpu_env); - } - store_reg(s, rd, tmp); - } - break; - case 0x1: - if (op1 == 1) { - /* branch/exchange thumb (bx). */ - ARCH(4T); - tmp = load_reg(s, rm); - gen_bx(s, tmp); - } else if (op1 == 3) { - /* clz */ - ARCH(5); - rd = (insn >> 12) & 0xf; - tmp = load_reg(s, rm); - gen_helper_clz(tcg_ctx, tmp, tmp); - store_reg(s, rd, tmp); - } else { - goto illegal_op; - } - break; - case 0x2: - if (op1 == 1) { - ARCH(5J); /* bxj */ - /* Trivial implementation equivalent to bx. */ - tmp = load_reg(s, rm); - gen_bx(s, tmp); - } else { - goto illegal_op; - } - break; - case 0x3: - if (op1 != 1) - goto illegal_op; - - ARCH(5); - /* branch link/exchange thumb (blx) */ - tmp = load_reg(s, rm); - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, s->pc); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - break; - case 0x4: - { - /* crc32/crc32c */ - uint32_t c = extract32(insn, 8, 4); - - /* Check this CPU supports ARMv8 CRC instructions. - * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED. - * Bits 8, 10 and 11 should be zero. - */ - if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 || - (c & 0xd) != 0) { - goto illegal_op; - } - - rn = extract32(insn, 16, 4); - rd = extract32(insn, 12, 4); - - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - if (op1 == 0) { - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xff); - } else if (op1 == 1) { - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff); - } - tmp3 = tcg_const_i32(tcg_ctx, 1 << op1); - if (c & 0x2) { - gen_helper_crc32c(tcg_ctx, tmp, tmp, tmp2, tmp3); - } else { - gen_helper_crc32(tcg_ctx, tmp, tmp, tmp2, tmp3); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - store_reg(s, rd, tmp); - break; - } - case 0x5: /* saturating add/subtract */ - ARCH(5TE); - rd = (insn >> 12) & 0xf; - rn = (insn >> 16) & 0xf; - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rn); - if (op1 & 2) - gen_helper_double_saturate(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2); - if (op1 & 1) - gen_helper_sub_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - else - gen_helper_add_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - break; - case 7: - { - int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4); - switch (op1) { - case 1: - /* bkpt */ - ARCH(5); - gen_exception_insn(s, 4, EXCP_BKPT, - syn_aa32_bkpt(imm16, false)); - break; - case 2: - /* Hypervisor call (v7) */ - ARCH(7); - if (IS_USER(s)) { - goto illegal_op; - } - gen_hvc(s, imm16); - break; - case 3: - /* Secure monitor call (v6+) */ - ARCH(6K); - if (IS_USER(s)) { - goto illegal_op; - } - gen_smc(s); - break; - default: - goto illegal_op; - } - break; - } - case 0x8: /* signed multiply */ - case 0xa: - case 0xc: - case 0xe: - ARCH(5TE); - rs = (insn >> 8) & 0xf; - rn = (insn >> 12) & 0xf; - rd = (insn >> 16) & 0xf; - if (op1 == 1) { - /* (32 * 16) >> 16 */ - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - if (sh & 4) - tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, 16); - else - gen_sxth(tmp2); - tmp64 = gen_muls_i64_i32(s, tmp, tmp2); - tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 16); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - if ((sh & 2) == 0) { - tmp2 = load_reg(s, rn); - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - store_reg(s, rd, tmp); - } else { - /* 16 * 16 */ - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - gen_mulxy(s, tmp, tmp2, sh & 2, sh & 4); - tcg_temp_free_i32(tcg_ctx, tmp2); - if (op1 == 2) { - tmp64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_addq(s, tmp64, rn, rd); - gen_storeq_reg(s, rn, rd, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - } else { - if (op1 == 0) { - tmp2 = load_reg(s, rn); - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - store_reg(s, rd, tmp); - } - } - break; - default: - goto illegal_op; - } - } else if (((insn & 0x0e000000) == 0 && - (insn & 0x00000090) != 0x90) || - ((insn & 0x0e000000) == (1 << 25))) { - int set_cc, logic_cc, shiftop; - - op1 = (insn >> 21) & 0xf; - set_cc = (insn >> 20) & 1; - logic_cc = table_logic_cc[op1] & set_cc; - - /* data processing instruction */ - if (insn & (1 << 25)) { - /* immediate operand */ - val = insn & 0xff; - shift = ((insn >> 8) & 0xf) * 2; - if (shift) { - val = (val >> shift) | (val << (32 - shift)); - } - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, val); - if (logic_cc && shift) { - gen_set_CF_bit31(s, tmp2); - } - } else { - /* register */ - rm = (insn) & 0xf; - tmp2 = load_reg(s, rm); - shiftop = (insn >> 5) & 3; - if (!(insn & (1 << 4))) { - shift = (insn >> 7) & 0x1f; - gen_arm_shift_im(s, tmp2, shiftop, shift, logic_cc); - } else { - rs = (insn >> 8) & 0xf; - tmp = load_reg(s, rs); - gen_arm_shift_reg(s, tmp2, shiftop, tmp, logic_cc); - } - } - if (op1 != 0x0f && op1 != 0x0d) { - rn = (insn >> 16) & 0xf; - tmp = load_reg(s, rn); - } else { - TCGV_UNUSED_I32(tmp); - } - rd = (insn >> 12) & 0xf; - switch(op1) { - case 0x00: - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(s, tmp); - } - store_reg_bx(s, rd, tmp); - break; - case 0x01: - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(s, tmp); - } - store_reg_bx(s, rd, tmp); - break; - case 0x02: - if (set_cc && rd == 15) { - /* SUBS r15, ... is used for exception return. */ - if (IS_USER(s)) { - goto illegal_op; - } - gen_sub_CC(s, tmp, tmp, tmp2); - gen_exception_return(s, tmp); - } else { - if (set_cc) { - gen_sub_CC(s, tmp, tmp, tmp2); - } else { - tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); - } - store_reg_bx(s, rd, tmp); - } - break; - case 0x03: - if (set_cc) { - gen_sub_CC(s, tmp, tmp2, tmp); - } else { - tcg_gen_sub_i32(tcg_ctx, tmp, tmp2, tmp); - } - store_reg_bx(s, rd, tmp); - break; - case 0x04: - if (set_cc) { - gen_add_CC(s, tmp, tmp, tmp2); - } else { - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - } - store_reg_bx(s, rd, tmp); - break; - case 0x05: - if (set_cc) { - gen_adc_CC(s, tmp, tmp, tmp2); - } else { - gen_add_carry(s, tmp, tmp, tmp2); - } - store_reg_bx(s, rd, tmp); - break; - case 0x06: - if (set_cc) { - gen_sbc_CC(s, tmp, tmp, tmp2); - } else { - gen_sub_carry(s, tmp, tmp, tmp2); - } - store_reg_bx(s, rd, tmp); - break; - case 0x07: - if (set_cc) { - gen_sbc_CC(s, tmp, tmp2, tmp); - } else { - gen_sub_carry(s, tmp, tmp2, tmp); - } - store_reg_bx(s, rd, tmp); - break; - case 0x08: - if (set_cc) { - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - gen_logic_CC(s, tmp); - } - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0x09: - if (set_cc) { - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); - gen_logic_CC(s, tmp); - } - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0x0a: - if (set_cc) { - gen_sub_CC(s, tmp, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0x0b: - if (set_cc) { - gen_add_CC(s, tmp, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0x0c: - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(s, tmp); - } - store_reg_bx(s, rd, tmp); - break; - case 0x0d: - if (logic_cc && rd == 15) { - /* MOVS r15, ... is used for exception return. */ - if (IS_USER(s)) { - goto illegal_op; - } - gen_exception_return(s, tmp2); - } else { - if (logic_cc) { - gen_logic_CC(s, tmp2); - } - store_reg_bx(s, rd, tmp2); - } - break; - case 0x0e: - tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); - if (logic_cc) { - gen_logic_CC(s, tmp); - } - store_reg_bx(s, rd, tmp); - break; - default: - case 0x0f: - tcg_gen_not_i32(tcg_ctx, tmp2, tmp2); - if (logic_cc) { - gen_logic_CC(s, tmp2); - } - store_reg_bx(s, rd, tmp2); - break; - } - if (op1 != 0x0f && op1 != 0x0d) { - tcg_temp_free_i32(tcg_ctx, tmp2); - } - } else { - /* other instructions */ - op1 = (insn >> 24) & 0xf; - switch(op1) { - case 0x0: - case 0x1: - /* multiplies, extra load/stores */ - sh = (insn >> 5) & 3; - if (sh == 0) { - if (op1 == 0x0) { - rd = (insn >> 16) & 0xf; - rn = (insn >> 12) & 0xf; - rs = (insn >> 8) & 0xf; - rm = (insn) & 0xf; - op1 = (insn >> 20) & 0xf; - switch (op1) { - case 0: case 1: case 2: case 3: case 6: - /* 32 bit mul */ - tmp = load_reg(s, rs); - tmp2 = load_reg(s, rm); - tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - if (insn & (1 << 22)) { - /* Subtract (mls) */ - ARCH(6T2); - tmp2 = load_reg(s, rn); - tcg_gen_sub_i32(tcg_ctx, tmp, tmp2, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - } else if (insn & (1 << 21)) { - /* Add */ - tmp2 = load_reg(s, rn); - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - if (insn & (1 << 20)) - gen_logic_CC(s, tmp); - store_reg(s, rd, tmp); - break; - case 4: - /* 64 bit mul double accumulate (UMAAL) */ - ARCH(6); - tmp = load_reg(s, rs); - tmp2 = load_reg(s, rm); - tmp64 = gen_mulu_i64_i32(s, tmp, tmp2); - gen_addq_lo(s, tmp64, rn); - gen_addq_lo(s, tmp64, rd); - gen_storeq_reg(s, rn, rd, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - break; - case 8: case 9: case 10: case 11: - case 12: case 13: case 14: case 15: - /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */ - tmp = load_reg(s, rs); - tmp2 = load_reg(s, rm); - if (insn & (1 << 22)) { - tcg_gen_muls2_i32(tcg_ctx, tmp, tmp2, tmp, tmp2); - } else { - tcg_gen_mulu2_i32(tcg_ctx, tmp, tmp2, tmp, tmp2); - } - if (insn & (1 << 21)) { /* mult accumulate */ - TCGv_i32 al = load_reg(s, rn); - TCGv_i32 ah = load_reg(s, rd); - tcg_gen_add2_i32(tcg_ctx, tmp, tmp2, tmp, tmp2, al, ah); - tcg_temp_free_i32(tcg_ctx, al); - tcg_temp_free_i32(tcg_ctx, ah); - } - if (insn & (1 << 20)) { - gen_logicq_cc(s, tmp, tmp2); - } - store_reg(s, rn, tmp); - store_reg(s, rd, tmp2); - break; - default: - goto illegal_op; - } - } else { - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - if (insn & (1 << 23)) { - /* load/store exclusive */ - int op2 = (insn >> 8) & 3; - op1 = (insn >> 21) & 0x3; - - switch (op2) { - case 0: /* lda/stl */ - if (op1 == 1) { - goto illegal_op; - } - ARCH(8); - break; - case 1: /* reserved */ - goto illegal_op; - case 2: /* ldaex/stlex */ - ARCH(8); - break; - case 3: /* ldrex/strex */ - if (op1) { - ARCH(6K); - } else { - ARCH(6); - } - break; - } - - addr = tcg_temp_local_new_i32(tcg_ctx); - load_reg_var(s, addr, rn); - - /* Since the emulation does not have barriers, - the acquire/release semantics need no special - handling */ - if (op2 == 0) { - if (insn & (1 << 20)) { - tmp = tcg_temp_new_i32(tcg_ctx); - switch (op1) { - case 0: /* lda */ - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - case 2: /* ldab */ - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - break; - case 3: /* ldah */ - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - default: - abort(); - } - store_reg(s, rd, tmp); - } else { - rm = insn & 0xf; - tmp = load_reg(s, rm); - switch (op1) { - case 0: /* stl */ - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - break; - case 2: /* stlb */ - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - break; - case 3: /* stlh */ - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - break; - default: - abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp); - } - } else if (insn & (1 << 20)) { - switch (op1) { - case 0: /* ldrex */ - gen_load_exclusive(s, rd, 15, addr, 2); - break; - case 1: /* ldrexd */ - gen_load_exclusive(s, rd, rd + 1, addr, 3); - break; - case 2: /* ldrexb */ - gen_load_exclusive(s, rd, 15, addr, 0); - break; - case 3: /* ldrexh */ - gen_load_exclusive(s, rd, 15, addr, 1); - break; - default: - abort(); - } - } else { - rm = insn & 0xf; - switch (op1) { - case 0: /* strex */ - gen_store_exclusive(s, rd, rm, 15, addr, 2); - break; - case 1: /* strexd */ - gen_store_exclusive(s, rd, rm, rm + 1, addr, 3); - break; - case 2: /* strexb */ - gen_store_exclusive(s, rd, rm, 15, addr, 0); - break; - case 3: /* strexh */ - gen_store_exclusive(s, rd, rm, 15, addr, 1); - break; - default: - abort(); - } - } - tcg_temp_free_i32(tcg_ctx, addr); - } else { - /* SWP instruction */ - rm = (insn) & 0xf; - - /* ??? This is not really atomic. However we know - we never have multiple CPUs running in parallel, - so it is good enough. */ - addr = load_reg(s, rn); - tmp = load_reg(s, rm); - tmp2 = tcg_temp_new_i32(tcg_ctx); - if (insn & (1 << 22)) { - gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s)); - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - } else { - gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s)); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - } - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, addr); - store_reg(s, rd, tmp2); - } - } - } else { - int address_offset; - int load = insn & (1 << 20); - int wbit = insn & (1 << 21); - int pbit = insn & (1 << 24); - int doubleword = 0; - /* Misc load/store */ - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - if (!load && (sh & 2)) { - /* doubleword */ - ARCH(5TE); - if (rd & 1) { - /* UNPREDICTABLE; we choose to UNDEF */ - goto illegal_op; - } - load = (sh & 1) == 0; - doubleword = 1; - } - addr = load_reg(s, rn); - if (pbit) - gen_add_datah_offset(s, insn, 0, addr); - address_offset = 0; - if (doubleword) { - if (!load) { - /* store */ - tmp = load_reg(s, rd); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - tmp = load_reg(s, rd + 1); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - store_reg(s, rd, tmp); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - rd++; - } - address_offset = -4; - } else if (load) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - switch(sh) { - case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - case 2: - gen_aa32_ld8s(s, tmp, addr, get_mem_index(s)); - break; - default: - case 3: - gen_aa32_ld16s(s, tmp, addr, get_mem_index(s)); - break; - } - } else { - /* store */ - tmp = load_reg(s, rd); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - /* Perform base writeback before the loaded value to - ensure correct behavior with overlapping index registers. - ldrd with base writeback is is undefined if the - destination and index registers overlap. */ - if (!pbit) { - gen_add_datah_offset(s, insn, address_offset, addr); - store_reg(s, rn, addr); - } else if (wbit) { - if (address_offset) - tcg_gen_addi_i32(tcg_ctx, addr, addr, address_offset); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - if (load) { - /* Complete the load. */ - store_reg(s, rd, tmp); - } - } - break; - case 0x4: - case 0x5: - goto do_ldst; - case 0x6: - case 0x7: - if (insn & (1 << 4)) { - ARCH(6); - /* Armv6 Media instructions. */ - rm = insn & 0xf; - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - rs = (insn >> 8) & 0xf; - switch ((insn >> 23) & 3) { - case 0: /* Parallel add/subtract. */ - op1 = (insn >> 20) & 7; - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - sh = (insn >> 5) & 7; - if ((op1 & 3) == 0 || sh == 5 || sh == 6) - goto illegal_op; - gen_arm_parallel_addsub(s, op1, sh, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - break; - case 1: - if ((insn & 0x00700020) == 0) { - /* Halfword pack. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - shift = (insn >> 7) & 0x1f; - if (insn & (1 << 6)) { - /* pkhtb */ - if (shift == 0) - shift = 31; - tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, shift); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff0000); - tcg_gen_ext16u_i32(tcg_ctx, tmp2, tmp2); - } else { - /* pkhbt */ - if (shift) - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, shift); - tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff0000); - } - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x00200020) == 0x00200000) { - /* [us]sat */ - tmp = load_reg(s, rm); - shift = (insn >> 7) & 0x1f; - if (insn & (1 << 6)) { - if (shift == 0) - shift = 31; - tcg_gen_sari_i32(tcg_ctx, tmp, tmp, shift); - } else { - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); - } - sh = (insn >> 16) & 0x1f; - tmp2 = tcg_const_i32(tcg_ctx, sh); - if (insn & (1 << 22)) - gen_helper_usat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - else - gen_helper_ssat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x00300fe0) == 0x00200f20) { - /* [us]sat16 */ - tmp = load_reg(s, rm); - sh = (insn >> 16) & 0x1f; - tmp2 = tcg_const_i32(tcg_ctx, sh); - if (insn & (1 << 22)) - gen_helper_usat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - else - gen_helper_ssat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x00700fe0) == 0x00000fa0) { - /* Select bytes. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - tmp3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, tmp3, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); - gen_helper_sel_flags(tcg_ctx, tmp, tmp3, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - } else if ((insn & 0x000003e0) == 0x00000060) { - tmp = load_reg(s, rm); - shift = (insn >> 10) & 3; - /* ??? In many cases it's not necessary to do a - rotate, a shift is sufficient. */ - if (shift != 0) - tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, shift * 8); - op1 = (insn >> 20) & 7; - switch (op1) { - case 0: gen_sxtb16(tmp); break; - case 2: gen_sxtb(tmp); break; - case 3: gen_sxth(tmp); break; - case 4: gen_uxtb16(tmp); break; - case 6: gen_uxtb(tmp); break; - case 7: gen_uxth(tmp); break; - default: goto illegal_op; - } - if (rn != 15) { - tmp2 = load_reg(s, rn); - if ((op1 & 3) == 0) { - gen_add16(s, tmp, tmp2); - } else { - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - } - store_reg(s, rd, tmp); - } else if ((insn & 0x003f0f60) == 0x003f0f20) { - /* rev */ - tmp = load_reg(s, rm); - if (insn & (1 << 22)) { - if (insn & (1 << 7)) { - gen_revsh(s, tmp); - } else { - ARCH(6T2); - gen_helper_rbit(tcg_ctx, tmp, tmp); - } - } else { - if (insn & (1 << 7)) - gen_rev16(s, tmp); - else - tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); - } - store_reg(s, rd, tmp); - } else { - goto illegal_op; - } - break; - case 2: /* Multiplies (Type 3). */ - switch ((insn >> 20) & 0x7) { - case 5: - if (((insn >> 6) ^ (insn >> 7)) & 1) { - /* op2 not 00x or 11x : UNDEF */ - goto illegal_op; - } - /* Signed multiply most significant [accumulate]. - (SMMUL, SMMLA, SMMLS) */ - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - tmp64 = gen_muls_i64_i32(s, tmp, tmp2); - - if (rd != 15) { - tmp = load_reg(s, rd); - if (insn & (1 << 6)) { - tmp64 = gen_subq_msw(s, tmp64, tmp); - } else { - tmp64 = gen_addq_msw(s, tmp64, tmp); - } - } - if (insn & (1 << 5)) { - tcg_gen_addi_i64(tcg_ctx, tmp64, tmp64, 0x80000000u); - } - tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 32); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - store_reg(s, rn, tmp); - break; - case 0: - case 4: - /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */ - if (insn & (1 << 7)) { - goto illegal_op; - } - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - if (insn & (1 << 5)) - gen_swap_half(s, tmp2); - gen_smul_dual(s, tmp, tmp2); - if (insn & (1 << 22)) { - /* smlald, smlsld */ - TCGv_i64 tmp64_2; - - tmp64 = tcg_temp_new_i64(tcg_ctx); - tmp64_2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); - tcg_gen_ext_i32_i64(tcg_ctx, tmp64_2, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - if (insn & (1 << 6)) { - tcg_gen_sub_i64(tcg_ctx, tmp64, tmp64, tmp64_2); - } else { - tcg_gen_add_i64(tcg_ctx, tmp64, tmp64, tmp64_2); - } - tcg_temp_free_i64(tcg_ctx, tmp64_2); - gen_addq(s, tmp64, rd, rn); - gen_storeq_reg(s, rd, rn, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - } else { - /* smuad, smusd, smlad, smlsd */ - if (insn & (1 << 6)) { - /* This subtraction cannot overflow. */ - tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); - } else { - /* This addition cannot overflow 32 bits; - * however it may overflow considered as a - * signed operation, in which case we must set - * the Q flag. - */ - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - if (rd != 15) - { - tmp2 = load_reg(s, rd); - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - store_reg(s, rn, tmp); - } - break; - case 1: - case 3: - /* SDIV, UDIV */ - if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) { - goto illegal_op; - } - if (((insn >> 5) & 7) || (rd != 15)) { - goto illegal_op; - } - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - if (insn & (1 << 21)) { - gen_helper_udiv(tcg_ctx, tmp, tmp, tmp2); - } else { - gen_helper_sdiv(tcg_ctx, tmp, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rn, tmp); - break; - default: - goto illegal_op; - } - break; - case 3: - op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7); - switch (op1) { - case 0: /* Unsigned sum of absolute differences. */ - ARCH(6); - tmp = load_reg(s, rm); - tmp2 = load_reg(s, rs); - gen_helper_usad8(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - if (rd != 15) { - tmp2 = load_reg(s, rd); - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - store_reg(s, rn, tmp); - break; - case 0x20: case 0x24: case 0x28: case 0x2c: - /* Bitfield insert/clear. */ - ARCH(6T2); - shift = (insn >> 7) & 0x1f; - i = (insn >> 16) & 0x1f; - i = i + 1 - shift; - if (rm == 15) { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - } else { - tmp = load_reg(s, rm); - } - if (i != 32) { - tmp2 = load_reg(s, rd); - tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, shift, i); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - store_reg(s, rd, tmp); - break; - case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */ - case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */ - ARCH(6T2); - tmp = load_reg(s, rm); - shift = (insn >> 7) & 0x1f; - i = ((insn >> 16) & 0x1f) + 1; - if (shift + i > 32) - goto illegal_op; - if (i < 32) { - if (op1 & 0x20) { - gen_ubfx(s, tmp, shift, (1u << i) - 1); - } else { - gen_sbfx(s, tmp, shift, i); - } - } - store_reg(s, rd, tmp); - break; - default: - goto illegal_op; - } - break; - } - break; - } - do_ldst: - /* Check for undefined extension instructions - * per the ARM Bible IE: - * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx - */ - sh = (0xf << 20) | (0xf << 4); - if (op1 == 0x7 && ((insn & sh) == sh)) - { - goto illegal_op; - } - /* load/store byte/word */ - rn = (insn >> 16) & 0xf; - rd = (insn >> 12) & 0xf; - tmp2 = load_reg(s, rn); - if ((insn & 0x01200000) == 0x00200000) { - /* ldrt/strt */ - i = MMU_USER_IDX; - } else { - i = get_mem_index(s); - } - if (insn & (1 << 24)) - gen_add_data_offset(s, insn, tmp2); - if (insn & (1 << 20)) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - if (insn & (1 << 22)) { - gen_aa32_ld8u(s, tmp, tmp2, i); - } else { - gen_aa32_ld32u(s, tmp, tmp2, i); - } - } else { - /* store */ - tmp = load_reg(s, rd); - if (insn & (1 << 22)) { - gen_aa32_st8(s, tmp, tmp2, i); - } else { - gen_aa32_st32(s, tmp, tmp2, i); - } - tcg_temp_free_i32(tcg_ctx, tmp); - } - if (!(insn & (1 << 24))) { - gen_add_data_offset(s, insn, tmp2); - store_reg(s, rn, tmp2); - } else if (insn & (1 << 21)) { - store_reg(s, rn, tmp2); - } else { - tcg_temp_free_i32(tcg_ctx, tmp2); - } - if (insn & (1 << 20)) { - /* Complete the load. */ - store_reg_from_load(s, rd, tmp); - } - break; - case 0x08: - case 0x09: - { - int j, n, user, loaded_base; - TCGv_i32 loaded_var; - /* load/store multiple words */ - /* XXX: store correct base if write back */ - user = 0; - if (insn & (1 << 22)) { - if (IS_USER(s)) - goto illegal_op; /* only usable in supervisor mode */ - - if ((insn & (1 << 15)) == 0) - user = 1; - } - rn = (insn >> 16) & 0xf; - addr = load_reg(s, rn); - - /* compute total size */ - loaded_base = 0; - TCGV_UNUSED_I32(loaded_var); - n = 0; - for(i=0;i<16;i++) { - if (insn & (1 << i)) - n++; - } - /* XXX: test invalid n == 0 case ? */ - if (insn & (1 << 23)) { - if (insn & (1 << 24)) { - /* pre increment */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } else { - /* post increment */ - } - } else { - if (insn & (1 << 24)) { - /* pre decrement */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); - } else { - /* post decrement */ - if (n != 1) - tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); - } - } - j = 0; - for(i=0;i<16;i++) { - if (insn & (1 << i)) { - if (insn & (1 << 20)) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - if (user) { - tmp2 = tcg_const_i32(tcg_ctx, i); - gen_helper_set_user_reg(tcg_ctx, tcg_ctx->cpu_env, tmp2, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - } else if (i == rn) { - loaded_var = tmp; - loaded_base = 1; - } else { - store_reg_from_load(s, i, tmp); - } - } else { - /* store */ - if (i == 15) { - /* special case: r15 = PC + 8 */ - val = (long)s->pc + 4; - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, val); - } else if (user) { - tmp = tcg_temp_new_i32(tcg_ctx); - tmp2 = tcg_const_i32(tcg_ctx, i); - gen_helper_get_user_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } else { - tmp = load_reg(s, i); - } - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - j++; - /* no need to add after the last transfer */ - if (j != n) - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } - } - if (insn & (1 << 21)) { - /* write back */ - if (insn & (1 << 23)) { - if (insn & (1 << 24)) { - /* pre increment */ - } else { - /* post increment */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } - } else { - if (insn & (1 << 24)) { - /* pre decrement */ - if (n != 1) - tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); - } else { - /* post decrement */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); - } - } - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - if (loaded_base) { - store_reg(s, rn, loaded_var); - } - if ((insn & (1 << 22)) && !user) { - /* Restore CPSR from SPSR. */ - tmp = load_cpu_field(s->uc, spsr); - gen_set_cpsr(s, tmp, CPSR_ERET_MASK); - tcg_temp_free_i32(tcg_ctx, tmp); - s->is_jmp = DISAS_UPDATE; - } - } - break; - case 0xa: - case 0xb: - { - int32_t offset; - - /* branch (and link) */ - val = (int32_t)s->pc; - if (insn & (1 << 24)) { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, val); - store_reg(s, 14, tmp); - } - offset = sextract32(insn << 2, 0, 26); - val += offset + 4; - gen_jmp(s, val); - } - break; - case 0xc: - case 0xd: - case 0xe: - if (((insn >> 8) & 0xe) == 10) { - /* VFP. */ - if (disas_vfp_insn(s, insn)) { - goto illegal_op; - } - } else if (disas_coproc_insn(s, insn)) { - /* Coprocessor. */ - goto illegal_op; - } - break; - case 0xf: // qq - /* swi */ - gen_set_pc_im(s, s->pc); - s->svc_imm = extract32(insn, 0, 24); - s->is_jmp = DISAS_SWI; - break; - default: - illegal_op: - gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized()); - break; - } - } -} - -/* Return true if this is a Thumb-2 logical op. */ -static int -thumb2_logic_op(int op) -{ - return (op < 8); -} - -/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero - then set condition code flags based on the result of the operation. - If SHIFTER_OUT is nonzero then set the carry flag for logical operations - to the high bit of T1. - Returns zero if the opcode is valid. */ - -static int -gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, - TCGv_i32 t0, TCGv_i32 t1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int logic_cc; - - logic_cc = 0; - switch (op) { - case 0: /* and */ - tcg_gen_and_i32(tcg_ctx, t0, t0, t1); - logic_cc = conds; - break; - case 1: /* bic */ - tcg_gen_andc_i32(tcg_ctx, t0, t0, t1); - logic_cc = conds; - break; - case 2: /* orr */ - tcg_gen_or_i32(tcg_ctx, t0, t0, t1); - logic_cc = conds; - break; - case 3: /* orn */ - tcg_gen_orc_i32(tcg_ctx, t0, t0, t1); - logic_cc = conds; - break; - case 4: /* eor */ - tcg_gen_xor_i32(tcg_ctx, t0, t0, t1); - logic_cc = conds; - break; - case 8: /* add */ - if (conds) - gen_add_CC(s, t0, t0, t1); - else - tcg_gen_add_i32(tcg_ctx, t0, t0, t1); - break; - case 10: /* adc */ - if (conds) - gen_adc_CC(s, t0, t0, t1); - else - gen_adc(s, t0, t1); - break; - case 11: /* sbc */ - if (conds) { - gen_sbc_CC(s, t0, t0, t1); - } else { - gen_sub_carry(s, t0, t0, t1); - } - break; - case 13: /* sub */ - if (conds) - gen_sub_CC(s, t0, t0, t1); - else - tcg_gen_sub_i32(tcg_ctx, t0, t0, t1); - break; - case 14: /* rsb */ - if (conds) - gen_sub_CC(s, t0, t1, t0); - else - tcg_gen_sub_i32(tcg_ctx, t0, t1, t0); - break; - default: /* 5, 6, 7, 9, 12, 15. */ - return 1; - } - if (logic_cc) { - gen_logic_CC(s, t0); - if (shifter_out) - gen_set_CF_bit31(s, t1); - } - return 0; -} - -/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction - is not legal. */ -static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t insn, imm, shift, offset; - uint32_t rd, rn, rm, rs; - TCGv_i32 tmp; - TCGv_i32 tmp2; - TCGv_i32 tmp3; - TCGv_i32 addr; - TCGv_i64 tmp64; - int op; - int shiftop; - int conds; - int logic_cc; - - if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2) - || arm_dc_feature(s, ARM_FEATURE_M))) { - /* Thumb-1 cores may need to treat bl and blx as a pair of - 16-bit instructions to get correct prefetch abort behavior. */ - insn = insn_hw1; - if ((insn & (1 << 12)) == 0) { - ARCH(5); - /* Second half of blx. */ - offset = ((insn & 0x7ff) << 1); - tmp = load_reg(s, 14); - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xfffffffc); - - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, s->pc | 1); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - return 0; - } - if (insn & (1 << 11)) { - /* Second half of bl. */ - offset = ((insn & 0x7ff) << 1) | 1; - tmp = load_reg(s, 14); - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); - - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, s->pc | 1); - store_reg(s, 14, tmp2); - gen_bx(s, tmp); - return 0; - } - if ((s->pc & ~TARGET_PAGE_MASK) == 0) { - /* Instruction spans a page boundary. Implement it as two - 16-bit instructions in case the second half causes an - prefetch abort. */ - offset = ((int32_t)insn << 21) >> 9; - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->pc + 2 + offset); - return 0; - } - /* Fall through to 32-bit decode. */ - } - - insn = arm_lduw_code(env, s->pc, s->bswap_code); - s->pc += 2; - insn |= (uint32_t)insn_hw1 << 16; - - if ((insn & 0xf800e800) != 0xf000e800) { - ARCH(6T2); - } - - rn = (insn >> 16) & 0xf; - rs = (insn >> 12) & 0xf; - rd = (insn >> 8) & 0xf; - rm = insn & 0xf; - switch ((insn >> 25) & 0xf) { - case 0: case 1: case 2: case 3: - /* 16-bit instructions. Should never happen. */ - abort(); - case 4: - if (insn & (1 << 22)) { - /* Other load/store, table branch. */ - if (insn & 0x01200000) { - /* Load/store doubleword. */ - if (rn == 15) { - addr = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~3); - } else { - addr = load_reg(s, rn); - } - offset = (insn & 0xff) * 4; - if ((insn & (1 << 23)) == 0) - offset = 0-offset; - if (insn & (1 << 24)) { - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); - offset = 0; - } - if (insn & (1 << 20)) { - /* ldrd */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - store_reg(s, rs, tmp); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - store_reg(s, rd, tmp); - } else { - /* strd */ - tmp = load_reg(s, rs); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - tmp = load_reg(s, rd); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - if (insn & (1 << 21)) { - /* Base writeback. */ - if (rn == 15) - goto illegal_op; - tcg_gen_addi_i32(tcg_ctx, addr, addr, offset - 4); - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - } else if ((insn & (1 << 23)) == 0) { - /* Load/store exclusive word. */ - addr = tcg_temp_local_new_i32(tcg_ctx); - load_reg_var(s, addr, rn); - tcg_gen_addi_i32(tcg_ctx, addr, addr, (insn & 0xff) << 2); - if (insn & (1 << 20)) { - gen_load_exclusive(s, rs, 15, addr, 2); - } else { - gen_store_exclusive(s, rd, rs, 15, addr, 2); - } - tcg_temp_free_i32(tcg_ctx, addr); - } else if ((insn & (7 << 5)) == 0) { - /* Table Branch. */ - if (rn == 15) { - addr = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, addr, s->pc); - } else { - addr = load_reg(s, rn); - } - tmp = load_reg(s, rm); - tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); - if (insn & (1 << 4)) { - /* tbh */ - tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - } else { /* tbb */ - tcg_temp_free_i32(tcg_ctx, tmp); - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - } - tcg_temp_free_i32(tcg_ctx, addr); - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 1); - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, s->pc); - store_reg(s, 15, tmp); - } else { - int op2 = (insn >> 6) & 0x3; - op = (insn >> 4) & 0x3; - switch (op2) { - case 0: - goto illegal_op; - case 1: - /* Load/store exclusive byte/halfword/doubleword */ - if (op == 2) { - goto illegal_op; - } - ARCH(7); - break; - case 2: - /* Load-acquire/store-release */ - if (op == 3) { - goto illegal_op; - } - /* Fall through */ - case 3: - /* Load-acquire/store-release exclusive */ - ARCH(8); - break; - } - addr = tcg_temp_local_new_i32(tcg_ctx); - load_reg_var(s, addr, rn); - if (!(op2 & 1)) { - if (insn & (1 << 20)) { - tmp = tcg_temp_new_i32(tcg_ctx); - switch (op) { - case 0: /* ldab */ - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - break; - case 1: /* ldah */ - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - case 2: /* lda */ - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - default: - abort(); - } - store_reg(s, rs, tmp); - } else { - tmp = load_reg(s, rs); - switch (op) { - case 0: /* stlb */ - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - break; - case 1: /* stlh */ - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - break; - case 2: /* stl */ - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - break; - default: - abort(); - } - tcg_temp_free_i32(tcg_ctx, tmp); - } - } else if (insn & (1 << 20)) { - gen_load_exclusive(s, rs, rd, addr, op); - } else { - gen_store_exclusive(s, rm, rs, rd, addr, op); - } - tcg_temp_free_i32(tcg_ctx, addr); - } - } else { - /* Load/store multiple, RFE, SRS. */ - if (((insn >> 23) & 1) == ((insn >> 24) & 1)) { - /* RFE, SRS: not available in user mode or on M profile */ - if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) { - goto illegal_op; - } - if (insn & (1 << 20)) { - /* rfe */ - addr = load_reg(s, rn); - if ((insn & (1 << 24)) == 0) - tcg_gen_addi_i32(tcg_ctx, addr, addr, -8); - /* Load PC into tmp and CPSR into tmp2. */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - tmp2 = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s)); - if (insn & (1 << 21)) { - /* Base writeback. */ - if (insn & (1 << 24)) { - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } else { - tcg_gen_addi_i32(tcg_ctx, addr, addr, -4); - } - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - gen_rfe(s, tmp, tmp2); - } else { - /* srs */ - gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2, - insn & (1 << 21)); - } - } else { - int i, loaded_base = 0; - TCGv_i32 loaded_var; - /* Load/store multiple. */ - addr = load_reg(s, rn); - offset = 0; - for (i = 0; i < 16; i++) { - if (insn & (1 << i)) - offset += 4; - } - if (insn & (1 << 24)) { - tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-offset); - } - - TCGV_UNUSED_I32(loaded_var); - for (i = 0; i < 16; i++) { - if ((insn & (1 << i)) == 0) - continue; - if (insn & (1 << 20)) { - /* Load. */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - if (i == 15) { - gen_bx(s, tmp); - } else if (i == rn) { - loaded_var = tmp; - loaded_base = 1; - } else { - store_reg(s, i, tmp); - } - } else { - /* Store. */ - tmp = load_reg(s, i); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } - if (loaded_base) { - store_reg(s, rn, loaded_var); - } - if (insn & (1 << 21)) { - /* Base register writeback. */ - if (insn & (1 << 24)) { - tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-offset); - } - /* Fault if writeback register is in register list. */ - if (insn & (1 << rn)) - goto illegal_op; - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - } - } - break; - case 5: - - op = (insn >> 21) & 0xf; - if (op == 6) { - /* Halfword pack. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3); - if (insn & (1 << 5)) { - /* pkhtb */ - if (shift == 0) - shift = 31; - tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, shift); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff0000); - tcg_gen_ext16u_i32(tcg_ctx, tmp2, tmp2); - } else { - /* pkhbt */ - if (shift) - tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, shift); - tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff0000); - } - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - } else { - /* Data processing register constant shift. */ - if (rn == 15) { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - } else { - tmp = load_reg(s, rn); - } - tmp2 = load_reg(s, rm); - - shiftop = (insn >> 4) & 3; - shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); - conds = (insn & (1 << 20)) != 0; - logic_cc = (conds && thumb2_logic_op(op)); - gen_arm_shift_im(s, tmp2, shiftop, shift, logic_cc); - if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) - goto illegal_op; - tcg_temp_free_i32(tcg_ctx, tmp2); - if (rd != 15) { - store_reg(s, rd, tmp); - } else { - tcg_temp_free_i32(tcg_ctx, tmp); - } - } - break; - case 13: /* Misc data processing. */ - op = ((insn >> 22) & 6) | ((insn >> 7) & 1); - if (op < 4 && (insn & 0xf000) != 0xf000) - goto illegal_op; - switch (op) { - case 0: /* Register controlled shift. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - if ((insn & 0x70) != 0) - goto illegal_op; - op = (insn >> 21) & 3; - logic_cc = (insn & (1 << 20)) != 0; - gen_arm_shift_reg(s, tmp, op, tmp2, logic_cc); - if (logic_cc) - gen_logic_CC(s, tmp); - store_reg_bx(s, rd, tmp); - break; - case 1: /* Sign/zero extend. */ - tmp = load_reg(s, rm); - shift = (insn >> 4) & 3; - /* ??? In many cases it's not necessary to do a - rotate, a shift is sufficient. */ - if (shift != 0) - tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, shift * 8); - op = (insn >> 20) & 7; - switch (op) { - case 0: gen_sxth(tmp); break; - case 1: gen_uxth(tmp); break; - case 2: gen_sxtb16(tmp); break; - case 3: gen_uxtb16(tmp); break; - case 4: gen_sxtb(tmp); break; - case 5: gen_uxtb(tmp); break; - default: goto illegal_op; - } - if (rn != 15) { - tmp2 = load_reg(s, rn); - if ((op >> 1) == 1) { - gen_add16(s, tmp, tmp2); - } else { - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - } - store_reg(s, rd, tmp); - break; - case 2: /* SIMD add/subtract. */ - op = (insn >> 20) & 7; - shift = (insn >> 4) & 7; - if ((op & 3) == 3 || (shift & 3) == 3) - goto illegal_op; - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - gen_thumb2_parallel_addsub(s, op, shift, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - break; - case 3: /* Other data processing. */ - op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7); - if (op < 4) { - /* Saturating add/subtract. */ - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - if (op & 1) - gen_helper_double_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); - if (op & 2) - gen_helper_sub_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2, tmp); - else - gen_helper_add_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } else { - tmp = load_reg(s, rn); - switch (op) { - case 0x0a: /* rbit */ - gen_helper_rbit(tcg_ctx, tmp, tmp); - break; - case 0x08: /* rev */ - tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); - break; - case 0x09: /* rev16 */ - gen_rev16(s, tmp); - break; - case 0x0b: /* revsh */ - gen_revsh(s, tmp); - break; - case 0x10: /* sel */ - tmp2 = load_reg(s, rm); - tmp3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, tmp3, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); - gen_helper_sel_flags(tcg_ctx, tmp, tmp3, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - case 0x18: /* clz */ - gen_helper_clz(tcg_ctx, tmp, tmp); - break; - case 0x20: - case 0x21: - case 0x22: - case 0x28: - case 0x29: - case 0x2a: - { - /* crc32/crc32c */ - uint32_t sz = op & 0x3; - uint32_t c = op & 0x8; - - if (!arm_dc_feature(s, ARM_FEATURE_CRC)) { - goto illegal_op; - } - - tmp2 = load_reg(s, rm); - if (sz == 0) { - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xff); - } else if (sz == 1) { - tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff); - } - tmp3 = tcg_const_i32(tcg_ctx, 1 << sz); - if (c) { - gen_helper_crc32c(tcg_ctx, tmp, tmp, tmp2, tmp3); - } else { - gen_helper_crc32(tcg_ctx, tmp, tmp, tmp2, tmp3); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp3); - break; - } - default: - goto illegal_op; - } - } - store_reg(s, rd, tmp); - break; - case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */ - op = (insn >> 4) & 0xf; - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - switch ((insn >> 20) & 7) { - case 0: /* 32 x 32 -> 32 */ - tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - if (rs != 15) { - tmp2 = load_reg(s, rs); - if (op) - tcg_gen_sub_i32(tcg_ctx, tmp, tmp2, tmp); - else - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - break; - case 1: /* 16 x 16 -> 32 */ - gen_mulxy(s, tmp, tmp2, op & 2, op & 1); - tcg_temp_free_i32(tcg_ctx, tmp2); - if (rs != 15) { - tmp2 = load_reg(s, rs); - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - break; - case 2: /* Dual multiply add. */ - case 4: /* Dual multiply subtract. */ - if (op) - gen_swap_half(s, tmp2); - gen_smul_dual(s, tmp, tmp2); - if (insn & (1 << 22)) { - /* This subtraction cannot overflow. */ - tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); - } else { - /* This addition cannot overflow 32 bits; - * however it may overflow considered as a signed - * operation, in which case we must set the Q flag. - */ - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - if (rs != 15) - { - tmp2 = load_reg(s, rs); - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - break; - case 3: /* 32 * 16 -> 32msb */ - if (op) - tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, 16); - else - gen_sxth(tmp2); - tmp64 = gen_muls_i64_i32(s, tmp, tmp2); - tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 16); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - if (rs != 15) - { - tmp2 = load_reg(s, rs); - gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - break; - case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ - tmp64 = gen_muls_i64_i32(s, tmp, tmp2); - if (rs != 15) { - tmp = load_reg(s, rs); - if (insn & (1 << 20)) { - tmp64 = gen_addq_msw(s, tmp64, tmp); - } else { - tmp64 = gen_subq_msw(s, tmp64, tmp); - } - } - if (insn & (1 << 4)) { - tcg_gen_addi_i64(tcg_ctx, tmp64, tmp64, 0x80000000u); - } - tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 32); - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - break; - case 7: /* Unsigned sum of absolute differences. */ - gen_helper_usad8(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - if (rs != 15) { - tmp2 = load_reg(s, rs); - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - break; - } - store_reg(s, rd, tmp); - break; - case 6: case 7: /* 64-bit multiply, Divide. */ - op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70); - tmp = load_reg(s, rn); - tmp2 = load_reg(s, rm); - if ((op & 0x50) == 0x10) { - /* sdiv, udiv */ - if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) { - goto illegal_op; - } - if (op & 0x20) - gen_helper_udiv(tcg_ctx, tmp, tmp, tmp2); - else - gen_helper_sdiv(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - } else if ((op & 0xe) == 0xc) { - /* Dual multiply accumulate long. */ - if (op & 1) - gen_swap_half(s, tmp2); - gen_smul_dual(s, tmp, tmp2); - if (op & 0x10) { - tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); - } else { - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - /* BUGFIX */ - tmp64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_addq(s, tmp64, rs, rd); - gen_storeq_reg(s, rs, rd, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - } else { - if (op & 0x20) { - /* Unsigned 64-bit multiply */ - tmp64 = gen_mulu_i64_i32(s, tmp, tmp2); - } else { - if (op & 8) { - /* smlalxy */ - gen_mulxy(s, tmp, tmp2, op & 2, op & 1); - tcg_temp_free_i32(tcg_ctx, tmp2); - tmp64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - /* Signed 64-bit multiply */ - tmp64 = gen_muls_i64_i32(s, tmp, tmp2); - } - } - if (op & 4) { - /* umaal */ - gen_addq_lo(s, tmp64, rs); - gen_addq_lo(s, tmp64, rd); - } else if (op & 0x40) { - /* 64-bit accumulate. */ - gen_addq(s, tmp64, rs, rd); - } - gen_storeq_reg(s, rs, rd, tmp64); - tcg_temp_free_i64(tcg_ctx, tmp64); - } - break; - } - break; - case 6: case 7: case 14: case 15: - /* Coprocessor. */ - if (((insn >> 24) & 3) == 3) { - /* Translate into the equivalent ARM encoding. */ - insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); - if (disas_neon_data_insn(s, insn)) { - goto illegal_op; - } - } else if (((insn >> 8) & 0xe) == 10) { - if (disas_vfp_insn(s, insn)) { - goto illegal_op; - } - } else { - if (insn & (1 << 28)) - goto illegal_op; - if (disas_coproc_insn(s, insn)) { - goto illegal_op; - } - } - break; - case 8: case 9: case 10: case 11: - if (insn & (1 << 15)) { - /* Branches, misc control. */ - if (insn & 0x5000) { - /* Unconditional branch. */ - /* signextend(hw1[10:0]) -> offset[:12]. */ - offset = ((int32_t)(insn << 5)) >> 9 & ~(int32_t)0xfff; - /* hw1[10:0] -> offset[11:1]. */ - offset |= (insn & 0x7ff) << 1; - /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22] - offset[24:22] already have the same value because of the - sign extension above. */ - offset ^= ((~insn) & (1 << 13)) << 10; - offset ^= ((~insn) & (1 << 11)) << 11; - - if (insn & (1 << 14)) { - /* Branch and link. */ - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->pc | 1); - } - - offset += s->pc; - if (insn & (1 << 12)) { - /* b/bl */ - gen_jmp(s, offset); - } else { - /* blx */ - offset &= ~(uint32_t)2; - /* thumb2 bx, no need to check */ - gen_bx_im(s, offset); - } - } else if (((insn >> 23) & 7) == 7) { - /* Misc control */ - if (insn & (1 << 13)) - goto illegal_op; - - if (insn & (1 << 26)) { - if (!(insn & (1 << 20))) { - /* Hypervisor call (v7) */ - int imm16 = extract32(insn, 16, 4) << 12 - | extract32(insn, 0, 12); - ARCH(7); - if (IS_USER(s)) { - goto illegal_op; - } - gen_hvc(s, imm16); - } else { - /* Secure monitor call (v6+) */ - ARCH(6K); - if (IS_USER(s)) { - goto illegal_op; - } - gen_smc(s); - } - } else { - op = (insn >> 20) & 7; - switch (op) { - case 0: /* msr cpsr. */ - if (arm_dc_feature(s, ARM_FEATURE_M)) { - tmp = load_reg(s, rn); - addr = tcg_const_i32(tcg_ctx, insn & 0xff); - gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); - tcg_temp_free_i32(tcg_ctx, addr); - tcg_temp_free_i32(tcg_ctx, tmp); - gen_lookup_tb(s); - break; - } - /* fall through */ - case 1: /* msr spsr. */ - if (arm_dc_feature(s, ARM_FEATURE_M)) { - goto illegal_op; - } - tmp = load_reg(s, rn); - if (gen_set_psr(s, - msr_mask(s, (insn >> 8) & 0xf, op == 1), - op == 1, tmp)) - goto illegal_op; - break; - case 2: /* cps, nop-hint. */ - if (((insn >> 8) & 7) == 0) { - gen_nop_hint(s, insn & 0xff); - } - /* Implemented as NOP in user mode. */ - if (IS_USER(s)) - break; - offset = 0; - imm = 0; - if (insn & (1 << 10)) { - if (insn & (1 << 7)) - offset |= CPSR_A; - if (insn & (1 << 6)) - offset |= CPSR_I; - if (insn & (1 << 5)) - offset |= CPSR_F; - if (insn & (1 << 9)) - imm = CPSR_A | CPSR_I | CPSR_F; - } - if (insn & (1 << 8)) { - offset |= 0x1f; - imm |= (insn & 0x1f); - } - if (offset) { - gen_set_psr_im(s, offset, 0, imm); - } - break; - case 3: /* Special control operations. */ - ARCH(7); - op = (insn >> 4) & 0xf; - switch (op) { - case 2: /* clrex */ - gen_clrex(s); - break; - case 4: /* dsb */ - case 5: /* dmb */ - case 6: /* isb */ - /* These execute as NOPs. */ - break; - default: - goto illegal_op; - } - break; - case 4: /* bxj */ - /* Trivial implementation equivalent to bx. */ - tmp = load_reg(s, rn); - gen_bx(s, tmp); - break; - case 5: /* Exception return. */ - if (IS_USER(s)) { - goto illegal_op; - } - if (rn != 14 || rd != 15) { - goto illegal_op; - } - tmp = load_reg(s, rn); - tcg_gen_subi_i32(tcg_ctx, tmp, tmp, insn & 0xff); - gen_exception_return(s, tmp); - break; - case 6: /* mrs cpsr. */ - tmp = tcg_temp_new_i32(tcg_ctx); - if (arm_dc_feature(s, ARM_FEATURE_M)) { - addr = tcg_const_i32(tcg_ctx, insn & 0xff); - gen_helper_v7m_mrs(tcg_ctx, tmp, tcg_ctx->cpu_env, addr); - tcg_temp_free_i32(tcg_ctx, addr); - } else { - gen_helper_cpsr_read(tcg_ctx, tmp, tcg_ctx->cpu_env); - } - store_reg(s, rd, tmp); - break; - case 7: /* mrs spsr. */ - /* Not accessible in user mode. */ - if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) { - goto illegal_op; - } - tmp = load_cpu_field(s->uc, spsr); - store_reg(s, rd, tmp); - break; - } - } - } else { - /* Conditional branch. */ - op = (insn >> 22) & 0xf; - /* Generate a conditional jump to next instruction. */ - s->condlabel = gen_new_label(tcg_ctx); - arm_gen_test_cc(tcg_ctx, op ^ 1, s->condlabel); - s->condjmp = 1; - - /* offset[11:1] = insn[10:0] */ - offset = (insn & 0x7ff) << 1; - /* offset[17:12] = insn[21:16]. */ - offset |= (insn & 0x003f0000) >> 4; - /* offset[31:20] = insn[26]. */ - offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11; - /* offset[18] = insn[13]. */ - offset |= (insn & (1 << 13)) << 5; - /* offset[19] = insn[11]. */ - offset |= (insn & (1 << 11)) << 8; - - /* jump to the offset */ - gen_jmp(s, s->pc + offset); - } - } else { - /* Data processing immediate. */ - if (insn & (1 << 25)) { - if (insn & (1 << 24)) { - if (insn & (1 << 20)) - goto illegal_op; - /* Bitfield/Saturate. */ - op = (insn >> 21) & 7; - imm = insn & 0x1f; - shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); - if (rn == 15) { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - } else { - tmp = load_reg(s, rn); - } - switch (op) { - case 2: /* Signed bitfield extract. */ - imm++; - if (shift + imm > 32) - goto illegal_op; - if (imm < 32) - gen_sbfx(s, tmp, shift, imm); - break; - case 6: /* Unsigned bitfield extract. */ - imm++; - if (shift + imm > 32) - goto illegal_op; - if (imm < 32) - gen_ubfx(s, tmp, shift, (1u << imm) - 1); - break; - case 3: /* Bitfield insert/clear. */ - if (imm < shift) - goto illegal_op; - imm = imm + 1 - shift; - if (imm != 32) { - tmp2 = load_reg(s, rd); - tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, shift, imm); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - break; - case 7: - goto illegal_op; - default: /* Saturate. */ - if (shift) { - if (op & 1) - tcg_gen_sari_i32(tcg_ctx, tmp, tmp, shift); - else - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); - } - tmp2 = tcg_const_i32(tcg_ctx, imm); - if (op & 4) { - /* Unsigned. */ - if ((op & 1) && shift == 0) - gen_helper_usat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - else - gen_helper_usat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } else { - /* Signed. */ - if ((op & 1) && shift == 0) - gen_helper_ssat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - else - gen_helper_ssat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - } - store_reg(s, rd, tmp); - } else { - imm = ((insn & 0x04000000) >> 15) - | ((insn & 0x7000) >> 4) | (insn & 0xff); - if (insn & (1 << 22)) { - /* 16-bit immediate. */ - imm |= (insn >> 4) & 0xf000; - if (insn & (1 << 23)) { - /* movt */ - tmp = load_reg(s, rd); - tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); - tcg_gen_ori_i32(tcg_ctx, tmp, tmp, imm << 16); - } else { - /* movw */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, imm); - } - } else { - /* Add/sub 12-bit immediate. */ - if (rn == 15) { - offset = s->pc & ~(uint32_t)3; - if (insn & (1 << 23)) - offset -= imm; - else - offset += imm; - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, offset); - } else { - tmp = load_reg(s, rn); - if (insn & (1 << 23)) - tcg_gen_subi_i32(tcg_ctx, tmp, tmp, imm); - else - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, imm); - } - } - store_reg(s, rd, tmp); - } - } else { - int shifter_out = 0; - /* modified 12-bit immediate. */ - shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); - imm = (insn & 0xff); - switch (shift) { - case 0: /* XY */ - /* Nothing to do. */ - break; - case 1: /* 00XY00XY */ - imm |= imm << 16; - break; - case 2: /* XY00XY00 */ - imm |= imm << 16; - imm <<= 8; - break; - case 3: /* XYXYXYXY */ - imm |= imm << 16; - imm |= imm << 8; - break; - default: /* Rotated constant. */ - shift = (shift << 1) | (imm >> 7); - imm |= 0x80; - imm = imm << (32 - shift); - shifter_out = 1; - break; - } - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, imm); - rn = (insn >> 16) & 0xf; - if (rn == 15) { - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - } else { - tmp = load_reg(s, rn); - } - op = (insn >> 21) & 0xf; - if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, - shifter_out, tmp, tmp2)) - goto illegal_op; - tcg_temp_free_i32(tcg_ctx, tmp2); - rd = (insn >> 8) & 0xf; - if (rd != 15) { - store_reg(s, rd, tmp); - } else { - tcg_temp_free_i32(tcg_ctx, tmp); - } - } - } - break; - case 12: /* Load/store single data item. */ - { - int postinc = 0; - int writeback = 0; - int memidx; - if ((insn & 0x01100000) == 0x01000000) { - if (disas_neon_ls_insn(s, insn)) { - goto illegal_op; - } - break; - } - op = ((insn >> 21) & 3) | ((insn >> 22) & 4); - if (rs == 15) { - if (!(insn & (1 << 20))) { - goto illegal_op; - } - if (op != 2) { - /* Byte or halfword load space with dest == r15 : memory hints. - * Catch them early so we don't emit pointless addressing code. - * This space is a mix of: - * PLD/PLDW/PLI, which we implement as NOPs (note that unlike - * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP - * cores) - * unallocated hints, which must be treated as NOPs - * UNPREDICTABLE space, which we NOP or UNDEF depending on - * which is easiest for the decoding logic - * Some space which must UNDEF - */ - int op1 = (insn >> 23) & 3; - int op2 = (insn >> 6) & 0x3f; - if (op & 2) { - goto illegal_op; - } - if (rn == 15) { - /* UNPREDICTABLE, unallocated hint or - * PLD/PLDW/PLI (literal) - */ - return 0; - } - if (op1 & 1) { - return 0; /* PLD/PLDW/PLI or unallocated hint */ - } - if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) { - return 0; /* PLD/PLDW/PLI or unallocated hint */ - } - /* UNDEF space, or an UNPREDICTABLE */ - return 1; - } - } - memidx = get_mem_index(s); - if (rn == 15) { - addr = tcg_temp_new_i32(tcg_ctx); - /* PC relative. */ - /* s->pc has already been incremented by 4. */ - imm = s->pc & 0xfffffffc; - if (insn & (1 << 23)) - imm += insn & 0xfff; - else - imm -= insn & 0xfff; - tcg_gen_movi_i32(tcg_ctx, addr, imm); - } else { - addr = load_reg(s, rn); - if (insn & (1 << 23)) { - /* Positive offset. */ - imm = insn & 0xfff; - tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); - } else { - imm = insn & 0xff; - switch ((insn >> 8) & 0xf) { - case 0x0: /* Shifted Register. */ - shift = (insn >> 4) & 0xf; - if (shift > 3) { - tcg_temp_free_i32(tcg_ctx, addr); - goto illegal_op; - } - tmp = load_reg(s, rm); - if (shift) - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); - tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 0xc: /* Negative offset. */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-imm); - break; - case 0xe: /* User privilege. */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); - memidx = MMU_USER_IDX; - break; - case 0x9: /* Post-decrement. */ - imm = 0-imm; - /* Fall through. */ - case 0xb: /* Post-increment. */ - postinc = 1; - writeback = 1; - break; - case 0xd: /* Pre-decrement. */ - imm = 0-imm; - /* Fall through. */ - case 0xf: /* Pre-increment. */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); - writeback = 1; - break; - default: - tcg_temp_free_i32(tcg_ctx, addr); - goto illegal_op; - } - } - } - if (insn & (1 << 20)) { - /* Load. */ - tmp = tcg_temp_new_i32(tcg_ctx); - switch (op) { - case 0: - gen_aa32_ld8u(s, tmp, addr, memidx); - break; - case 4: - gen_aa32_ld8s(s, tmp, addr, memidx); - break; - case 1: - gen_aa32_ld16u(s, tmp, addr, memidx); - break; - case 5: - gen_aa32_ld16s(s, tmp, addr, memidx); - break; - case 2: - gen_aa32_ld32u(s, tmp, addr, memidx); - break; - default: - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, addr); - goto illegal_op; - } - if (rs == 15) { - gen_bx(s, tmp); - } else { - store_reg(s, rs, tmp); - } - } else { - /* Store. */ - tmp = load_reg(s, rs); - switch (op) { - case 0: - gen_aa32_st8(s, tmp, addr, memidx); - break; - case 1: - gen_aa32_st16(s, tmp, addr, memidx); - break; - case 2: - gen_aa32_st32(s, tmp, addr, memidx); - break; - default: - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, addr); - goto illegal_op; - } - tcg_temp_free_i32(tcg_ctx, tmp); - } - if (postinc) - tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); - if (writeback) { - store_reg(s, rn, addr); - } else { - tcg_temp_free_i32(tcg_ctx, addr); - } - } - break; - default: - goto illegal_op; - } - return 0; -illegal_op: - return 1; -} - -static void disas_thumb_insn(CPUARMState *env, DisasContext *s) // qq -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t val, insn, op, rm, rn, rd, shift, cond; - int32_t offset; - int i; - TCGv_i32 tmp; - TCGv_i32 tmp2; - TCGv_i32 addr; - - // Unicorn: end address tells us to stop emulation - if (s->pc == s->uc->addr_end) { - // imitate WFI instruction to halt emulation - s->is_jmp = DISAS_WFI; - return; - } - - if (s->condexec_mask) { - cond = s->condexec_cond; - if (cond != 0x0e) { /* Skip conditional when condition is AL. */ - s->condlabel = gen_new_label(tcg_ctx); - arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); - s->condjmp = 1; - } - } - - insn = arm_lduw_code(env, s->pc, s->bswap_code); - - // Unicorn: trace this instruction on request - if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->pc)) { - // determine instruction size (Thumb/Thumb2) - // avoid terminating inside ITE clause - if (s->condexec_mask == 0) { - switch(insn & 0xf800) { - // Thumb2: 32-bit - case 0xe800: - case 0xf000: - case 0xf800: - gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, s->uc, s->pc); - break; - // Thumb: 16-bit - default: - // avoid terminating at an IT instruction - if (!((insn & 0xff00) == 0xbf00)) { - gen_uc_tracecode(tcg_ctx, 2, UC_HOOK_CODE_IDX, s->uc, s->pc); - } - break; - } - } - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - } - - s->pc += 2; - - switch (insn >> 12) { - case 0: case 1: - - rd = insn & 7; - op = (insn >> 11) & 3; - if (op == 3) { - /* add/subtract */ - rn = (insn >> 3) & 7; - tmp = load_reg(s, rn); - if (insn & (1 << 10)) { - /* immediate */ - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, (insn >> 6) & 7); - } else { - /* reg */ - rm = (insn >> 6) & 7; - tmp2 = load_reg(s, rm); - } - if (insn & (1 << 9)) { - if (s->condexec_mask) - tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); - else - gen_sub_CC(s, tmp, tmp, tmp2); - } else { - if (s->condexec_mask) - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - else - gen_add_CC(s, tmp, tmp, tmp2); - } - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - } else { - /* shift immediate */ - rm = (insn >> 3) & 7; - shift = (insn >> 6) & 0x1f; - tmp = load_reg(s, rm); - gen_arm_shift_im(s, tmp, op, shift, s->condexec_mask == 0); - if (!s->condexec_mask) - gen_logic_CC(s, tmp); - store_reg(s, rd, tmp); - } - break; - case 2: case 3: - /* arithmetic large immediate */ - op = (insn >> 11) & 3; - rd = (insn >> 8) & 0x7; - if (op == 0) { /* mov */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, insn & 0xff); - if (!s->condexec_mask) - gen_logic_CC(s, tmp); - store_reg(s, rd, tmp); - } else { - tmp = load_reg(s, rd); - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, insn & 0xff); - switch (op) { - case 1: /* cmp */ - gen_sub_CC(s, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - break; - case 2: /* add */ - if (s->condexec_mask) - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - else - gen_add_CC(s, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - break; - case 3: /* sub */ - if (s->condexec_mask) - tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); - else - gen_sub_CC(s, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - break; - } - } - break; - case 4: - if (insn & (1 << 11)) { - rd = (insn >> 8) & 7; - /* load pc-relative. Bit 1 of PC is ignored. */ - val = s->pc + 2 + ((insn & 0xff) * 4); - val &= ~(uint32_t)2; - addr = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, addr, val); - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, addr); - store_reg(s, rd, tmp); - break; - } - if (insn & (1 << 10)) { - /* data processing extended or blx */ - rd = (insn & 7) | ((insn >> 4) & 8); - rm = (insn >> 3) & 0xf; - op = (insn >> 8) & 3; - switch (op) { - case 0: /* add */ - tmp = load_reg(s, rd); - tmp2 = load_reg(s, rm); - tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - store_reg(s, rd, tmp); - break; - case 1: /* cmp */ - tmp = load_reg(s, rd); - tmp2 = load_reg(s, rm); - gen_sub_CC(s, tmp, tmp, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp2); - tcg_temp_free_i32(tcg_ctx, tmp); - break; - case 2: /* mov/cpy */ - tmp = load_reg(s, rm); - store_reg(s, rd, tmp); - break; - case 3:/* branch [and link] exchange thumb register */ - tmp = load_reg(s, rm); - if (insn & (1 << 7)) { - ARCH(5); - val = (uint32_t)s->pc | 1; - tmp2 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp2, val); - store_reg(s, 14, tmp2); - } - /* already thumb, no need to check */ - gen_bx(s, tmp); - break; - } - break; - } - - /* data processing register */ - rd = insn & 7; - rm = (insn >> 3) & 7; - op = (insn >> 6) & 0xf; - if (op == 2 || op == 3 || op == 4 || op == 7) { - /* the shift/rotate ops want the operands backwards */ - val = rm; - rm = rd; - rd = val; - val = 1; - } else { - val = 0; - } - - if (op == 9) { /* neg */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - } else if (op != 0xf) { /* mvn doesn't read its first operand */ - tmp = load_reg(s, rd); - } else { - TCGV_UNUSED_I32(tmp); - } - - tmp2 = load_reg(s, rm); - switch (op) { - case 0x0: /* and */ - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(s, tmp); - break; - case 0x1: /* eor */ - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(s, tmp); - break; - case 0x2: /* lsl */ - if (s->condexec_mask) { - gen_shl(s, tmp2, tmp2, tmp); - } else { - gen_helper_shl_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); - gen_logic_CC(s, tmp2); - } - break; - case 0x3: /* lsr */ - if (s->condexec_mask) { - gen_shr(s, tmp2, tmp2, tmp); - } else { - gen_helper_shr_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); - gen_logic_CC(s, tmp2); - } - break; - case 0x4: /* asr */ - if (s->condexec_mask) { - gen_sar(s, tmp2, tmp2, tmp); - } else { - gen_helper_sar_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); - gen_logic_CC(s, tmp2); - } - break; - case 0x5: /* adc */ - if (s->condexec_mask) { - gen_adc(s, tmp, tmp2); - } else { - gen_adc_CC(s, tmp, tmp, tmp2); - } - break; - case 0x6: /* sbc */ - if (s->condexec_mask) { - gen_sub_carry(s, tmp, tmp, tmp2); - } else { - gen_sbc_CC(s, tmp, tmp, tmp2); - } - break; - case 0x7: /* ror */ - if (s->condexec_mask) { - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x1f); - tcg_gen_rotr_i32(tcg_ctx, tmp2, tmp2, tmp); - } else { - gen_helper_ror_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); - gen_logic_CC(s, tmp2); - } - break; - case 0x8: /* tst */ - tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); - gen_logic_CC(s, tmp); - rd = 16; - break; - case 0x9: /* neg */ - if (s->condexec_mask) - tcg_gen_neg_i32(tcg_ctx, tmp, tmp2); - else - gen_sub_CC(s, tmp, tmp, tmp2); - break; - case 0xa: /* cmp */ - gen_sub_CC(s, tmp, tmp, tmp2); - rd = 16; - break; - case 0xb: /* cmn */ - gen_add_CC(s, tmp, tmp, tmp2); - rd = 16; - break; - case 0xc: /* orr */ - tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(s, tmp); - break; - case 0xd: /* mul */ - tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(s, tmp); - break; - case 0xe: /* bic */ - tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); - if (!s->condexec_mask) - gen_logic_CC(s, tmp); - break; - case 0xf: /* mvn */ - tcg_gen_not_i32(tcg_ctx, tmp2, tmp2); - if (!s->condexec_mask) - gen_logic_CC(s, tmp2); - val = 1; - rm = rd; - break; - } - if (rd != 16) { - if (val) { - store_reg(s, rm, tmp2); - if (op != 0xf) - tcg_temp_free_i32(tcg_ctx, tmp); - } else { - store_reg(s, rd, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - } else { - tcg_temp_free_i32(tcg_ctx, tmp); - tcg_temp_free_i32(tcg_ctx, tmp2); - } - break; - - case 5: - /* load/store register offset. */ - rd = insn & 7; - rn = (insn >> 3) & 7; - rm = (insn >> 6) & 7; - op = (insn >> 9) & 7; - addr = load_reg(s, rn); - tmp = load_reg(s, rm); - tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - - if (op < 3) { /* store */ - tmp = load_reg(s, rd); - } else { - tmp = tcg_temp_new_i32(tcg_ctx); - } - - switch (op) { - case 0: /* str */ - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - break; - case 1: /* strh */ - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - break; - case 2: /* strb */ - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - break; - case 3: /* ldrsb */ - gen_aa32_ld8s(s, tmp, addr, get_mem_index(s)); - break; - case 4: /* ldr */ - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - case 5: /* ldrh */ - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - case 6: /* ldrb */ - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - break; - case 7: /* ldrsh */ - gen_aa32_ld16s(s, tmp, addr, get_mem_index(s)); - break; - } - if (op >= 3) { /* load */ - store_reg(s, rd, tmp); - } else { - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_temp_free_i32(tcg_ctx, addr); - break; - - case 6: - /* load/store word immediate offset */ - rd = insn & 7; - rn = (insn >> 3) & 7; - addr = load_reg(s, rn); - val = (insn >> 4) & 0x7c; - tcg_gen_addi_i32(tcg_ctx, addr, addr, val); - - if (insn & (1 << 11)) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_temp_free_i32(tcg_ctx, addr); - break; - - case 7: - /* load/store byte immediate offset */ - rd = insn & 7; - rn = (insn >> 3) & 7; - addr = load_reg(s, rn); - val = (insn >> 6) & 0x1f; - tcg_gen_addi_i32(tcg_ctx, addr, addr, val); - - if (insn & (1 << 11)) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_temp_free_i32(tcg_ctx, addr); - break; - - case 8: - /* load/store halfword immediate offset */ - rd = insn & 7; - rn = (insn >> 3) & 7; - addr = load_reg(s, rn); - val = (insn >> 5) & 0x3e; - tcg_gen_addi_i32(tcg_ctx, addr, addr, val); - - if (insn & (1 << 11)) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_temp_free_i32(tcg_ctx, addr); - break; - - case 9: - /* load/store from stack */ - rd = (insn >> 8) & 7; - addr = load_reg(s, 13); - val = (insn & 0xff) * 4; - tcg_gen_addi_i32(tcg_ctx, addr, addr, val); - - if (insn & (1 << 11)) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - store_reg(s, rd, tmp); - } else { - /* store */ - tmp = load_reg(s, rd); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_temp_free_i32(tcg_ctx, addr); - break; - - case 10: - /* add to high reg */ - rd = (insn >> 8) & 7; - if (insn & (1 << 11)) { - /* SP */ - tmp = load_reg(s, 13); - } else { - /* PC. bit 1 is ignored. */ - tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, (s->pc + 2) & ~(uint32_t)2); - } - val = (insn & 0xff) * 4; - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, val); - store_reg(s, rd, tmp); - break; - - case 11: - /* misc */ - op = (insn >> 8) & 0xf; - switch (op) { - case 0: - /* adjust stack pointer */ - tmp = load_reg(s, 13); - val = (insn & 0x7f) * 4; - if (insn & (1 << 7)) - val = -(int32_t)val; - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, val); - store_reg(s, 13, tmp); - break; - - case 2: /* sign/zero extend. */ - ARCH(6); - rd = insn & 7; - rm = (insn >> 3) & 7; - tmp = load_reg(s, rm); - switch ((insn >> 6) & 3) { - case 0: gen_sxth(tmp); break; - case 1: gen_sxtb(tmp); break; - case 2: gen_uxth(tmp); break; - case 3: gen_uxtb(tmp); break; - } - store_reg(s, rd, tmp); - break; - case 4: case 5: case 0xc: case 0xd: - /* push/pop */ - addr = load_reg(s, 13); - if (insn & (1 << 8)) - offset = 4; - else - offset = 0; - for (i = 0; i < 8; i++) { - if (insn & (1 << i)) - offset += 4; - } - if ((insn & (1 << 11)) == 0) { - tcg_gen_addi_i32(tcg_ctx, addr, addr, -offset); - } - for (i = 0; i < 8; i++) { - if (insn & (1 << i)) { - if (insn & (1 << 11)) { - /* pop */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - store_reg(s, i, tmp); - } else { - /* push */ - tmp = load_reg(s, i); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - /* advance to the next address. */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } - } - TCGV_UNUSED_I32(tmp); - if (insn & (1 << 8)) { - if (insn & (1 << 11)) { - /* pop pc */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - /* don't set the pc until the rest of the instruction - has completed */ - } else { - /* push lr */ - tmp = load_reg(s, 14); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } - if ((insn & (1 << 11)) == 0) { - tcg_gen_addi_i32(tcg_ctx, addr, addr, -offset); - } - /* write back the new stack pointer */ - store_reg(s, 13, addr); - /* set the new PC value */ - if ((insn & 0x0900) == 0x0900) { - store_reg_from_load(s, 15, tmp); - } - break; - - case 1: case 3: case 9: case 11: /* czb */ - rm = insn & 7; - tmp = load_reg(s, rm); - s->condlabel = gen_new_label(tcg_ctx); - s->condjmp = 1; - if (insn & (1 << 11)) - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, s->condlabel); - else - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, s->condlabel); - tcg_temp_free_i32(tcg_ctx, tmp); - offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; - val = (uint32_t)s->pc + 2; - val += offset; - gen_jmp(s, val); - break; - - case 15: /* IT, nop-hint. */ - if ((insn & 0xf) == 0) { - gen_nop_hint(s, (insn >> 4) & 0xf); - break; - } - /* If Then. */ - s->condexec_cond = (insn >> 4) & 0xe; - s->condexec_mask = insn & 0x1f; - /* No actual code generated for this insn, just setup state. */ - break; - - case 0xe: /* bkpt */ - { - int imm8 = extract32(insn, 0, 8); - ARCH(5); - gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true)); - break; - } - - case 0xa: /* rev */ - ARCH(6); - rn = (insn >> 3) & 0x7; - rd = insn & 0x7; - tmp = load_reg(s, rn); - switch ((insn >> 6) & 3) { - case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; - case 1: gen_rev16(s, tmp); break; - case 3: gen_revsh(s, tmp); break; - default: goto illegal_op; - } - store_reg(s, rd, tmp); - break; - - case 6: - switch ((insn >> 5) & 7) { - case 2: - /* setend */ - ARCH(6); - if (((insn >> 3) & 1) != s->bswap_code) { - /* Dynamic endianness switching not implemented. */ - qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n"); - goto illegal_op; - } - break; - case 3: - /* cps */ - ARCH(6); - if (IS_USER(s)) { - break; - } - if (arm_dc_feature(s, ARM_FEATURE_M)) { - tmp = tcg_const_i32(tcg_ctx, (insn & (1 << 4)) != 0); - /* FAULTMASK */ - if (insn & 1) { - addr = tcg_const_i32(tcg_ctx, 19); - gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); - tcg_temp_free_i32(tcg_ctx, addr); - } - /* PRIMASK */ - if (insn & 2) { - addr = tcg_const_i32(tcg_ctx, 16); - gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); - tcg_temp_free_i32(tcg_ctx, addr); - } - tcg_temp_free_i32(tcg_ctx, tmp); - gen_lookup_tb(s); - } else { - if (insn & (1 << 4)) { - shift = CPSR_A | CPSR_I | CPSR_F; - } else { - shift = 0; - } - gen_set_psr_im(s, ((insn & 7) << 6), 0, shift); - } - break; - default: - goto undef; - } - break; - - default: - goto undef; - } - break; - - case 12: - { - /* load/store multiple */ - TCGv_i32 loaded_var; - TCGV_UNUSED_I32(loaded_var); - rn = (insn >> 8) & 0x7; - addr = load_reg(s, rn); - for (i = 0; i < 8; i++) { - if (insn & (1 << i)) { - if (insn & (1 << 11)) { - /* load */ - tmp = tcg_temp_new_i32(tcg_ctx); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - if (i == rn) { - loaded_var = tmp; - } else { - store_reg(s, i, tmp); - } - } else { - /* store */ - tmp = load_reg(s, i); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tcg_ctx, tmp); - } - /* advance to the next address */ - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } - } - if ((insn & (1 << rn)) == 0) { - /* base reg not in list: base register writeback */ - store_reg(s, rn, addr); - } else { - /* base reg in list: if load, complete it now */ - if (insn & (1 << 11)) { - store_reg(s, rn, loaded_var); - } - tcg_temp_free_i32(tcg_ctx, addr); - } - break; - } - case 13: - /* conditional branch or swi */ - cond = (insn >> 8) & 0xf; - if (cond == 0xe) - goto undef; - - if (cond == 0xf) { - /* swi */ - gen_set_pc_im(s, s->pc); - s->svc_imm = extract32(insn, 0, 8); - s->is_jmp = DISAS_SWI; - break; - } - /* generate a conditional jump to next instruction */ - s->condlabel = gen_new_label(tcg_ctx); - arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); - s->condjmp = 1; - - /* jump to the offset */ - val = (uint32_t)s->pc + 2; - offset = ((int32_t)((uint32_t)insn << 24)) >> 24; - val += (int32_t)((uint32_t)offset << 1); - gen_jmp(s, val); - break; - - case 14: - if (insn & (1 << 11)) { - if (disas_thumb2_insn(env, s, insn)) - goto undef32; - break; - } - /* unconditional branch */ - val = (uint32_t)s->pc; - offset = ((int32_t)((uint32_t)insn << 21)) >> 21; - val += (int32_t)((uint32_t)offset << 1) + 2; - gen_jmp(s, val); - break; - - case 15: - if (disas_thumb2_insn(env, s, insn)) - goto undef32; - break; - } - - return; -undef32: - gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized()); - return; -illegal_op: -undef: - gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized()); -} - -/* generate intermediate code in gen_opc_buf and gen_opparam_buf for - basic block 'tb'. If search_pc is TRUE, also generate PC - information for each intermediate instruction. */ -static inline void gen_intermediate_code_internal(ARMCPU *cpu, - TranslationBlock *tb, - bool search_pc) -{ - CPUState *cs = CPU(cpu); - CPUARMState *env = &cpu->env; - DisasContext dc1, *dc = &dc1; - CPUBreakpoint *bp; - uint16_t *gen_opc_end; - int j, lj; - target_ulong pc_start; - target_ulong next_page_start; - int num_insns; - int max_insns; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - bool block_full = false; - - /* generate intermediate code */ - - /* The A64 decoder has its own top level loop, because it doesn't need - * the A32/T32 complexity to do with conditional execution/IT blocks/etc. - */ - if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { - gen_intermediate_code_internal_a64(cpu, tb, search_pc); - return; - } - - pc_start = tb->pc; - - dc->uc = env->uc; - dc->tb = tb; - - gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; - - dc->is_jmp = DISAS_NEXT; - dc->pc = pc_start; - - dc->singlestep_enabled = cs->singlestep_enabled; - dc->condjmp = 0; - - dc->aarch64 = 0; - dc->thumb = ARM_TBFLAG_THUMB(tb->flags); // qq - dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags); - dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; - dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; -#if !defined(CONFIG_USER_ONLY) - dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0); -#endif - dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags); - dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); - dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); - dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); - dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); - dc->cp_regs = cpu->cp_regs; - dc->current_el = arm_current_el(env); - dc->features = env->features; - - /* Single step state. The code-generation logic here is: - * SS_ACTIVE == 0: - * generate code with no special handling for single-stepping (except - * that anything that can make us go to SS_ACTIVE == 1 must end the TB; - * this happens anyway because those changes are all system register or - * PSTATE writes). - * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) - * emit code for one insn - * emit code to clear PSTATE.SS - * emit code to generate software step exception for completed step - * end TB (as usual for having generated an exception) - * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) - * emit code to generate a software step exception - * end the TB - */ - dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags); - dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags); - dc->is_ldex = false; - dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */ - - tcg_ctx->cpu_F0s = tcg_temp_new_i32(tcg_ctx); - tcg_ctx->cpu_F1s = tcg_temp_new_i32(tcg_ctx); - tcg_ctx->cpu_F0d = tcg_temp_new_i64(tcg_ctx); - tcg_ctx->cpu_F1d = tcg_temp_new_i64(tcg_ctx); - tcg_ctx->cpu_V0 = tcg_ctx->cpu_F0d; - tcg_ctx->cpu_V1 = tcg_ctx->cpu_F1d; - /* FIXME: tcg_ctx->cpu_M0 can probably be the same as tcg_ctx->cpu_V0. */ - tcg_ctx->cpu_M0 = tcg_temp_new_i64(tcg_ctx); - next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - lj = -1; - num_insns = 0; - max_insns = tb->cflags & CF_COUNT_MASK; - if (max_insns == 0) - max_insns = CF_COUNT_MASK; - - tcg_clear_temp_count(); - - // Unicorn: early check to see if the address of this block is the until address - if (tb->pc == env->uc->addr_end) { - // imitate WFI instruction to halt emulation - gen_tb_start(tcg_ctx); - dc->is_jmp = DISAS_WFI; - goto tb_end; - } - - // Unicorn: trace this block on request - // Only hook this block if it is not broken from previous translation due to - // full translation cache - if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { - // save block address to see if we need to patch block size later - env->uc->block_addr = pc_start; - env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); - } else { - env->uc->size_arg = -1; - } - - gen_tb_start(tcg_ctx); - - /* A note on handling of the condexec (IT) bits: - * - * We want to avoid the overhead of having to write the updated condexec - * bits back to the CPUARMState for every instruction in an IT block. So: - * (1) if the condexec bits are not already zero then we write - * zero back into the CPUARMState now. This avoids complications trying - * to do it at the end of the block. (For example if we don't do this - * it's hard to identify whether we can safely skip writing condexec - * at the end of the TB, which we definitely want to do for the case - * where a TB doesn't do anything with the IT state at all.) - * (2) if we are going to leave the TB then we call gen_set_condexec() - * which will write the correct value into CPUARMState if zero is wrong. - * This is done both for leaving the TB at the end, and for leaving - * it because of an exception we know will happen, which is done in - * gen_exception_insn(). The latter is necessary because we need to - * leave the TB with the PC/IT state just prior to execution of the - * instruction which caused the exception. - * (3) if we leave the TB unexpectedly (eg a data abort on a load) - * then the CPUARMState will be wrong and we need to reset it. - * This is handled in the same way as restoration of the - * PC in these situations: we will be called again with search_pc=1 - * and generate a mapping of the condexec bits for each PC in - * gen_opc_condexec_bits[]. restore_state_to_opc() then uses - * this to restore the condexec bits. - * - * Note that there are no instructions which can read the condexec - * bits, and none which can write non-static values to them, so - * we don't need to care about whether CPUARMState is correct in the - * middle of a TB. - */ - - /* Reset the conditional execution bits immediately. This avoids - complications trying to do it at the end of the block. */ - if (dc->condexec_mask || dc->condexec_cond) - { - TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, tmp, 0); - store_cpu_field(tcg_ctx, tmp, condexec_bits); - } - do { - //printf(">>> arm pc = %x\n", dc->pc); -#ifdef CONFIG_USER_ONLY - /* Intercept jump to the magic kernel page. */ - if (dc->pc >= 0xffff0000) { - /* We always get here via a jump, so know we are not in a - conditional execution block. */ - gen_exception_internal(dc, EXCP_KERNEL_TRAP); - dc->is_jmp = DISAS_UPDATE; - break; - } -#else - if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) { - /* We always get here via a jump, so know we are not in a - conditional execution block. */ - gen_exception_internal(dc, EXCP_EXCEPTION_EXIT); - dc->is_jmp = DISAS_UPDATE; - break; - } -#endif - - if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == dc->pc) { - gen_exception_internal_insn(dc, 0, EXCP_DEBUG); - /* Advance PC so that clearing the breakpoint will - invalidate this TB. */ - dc->pc += 2; - goto done_generating; - } - } - } - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - if (lj < j) { - lj++; - while (lj < j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } - tcg_ctx->gen_opc_pc[lj] = dc->pc; - tcg_ctx->gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); - tcg_ctx->gen_opc_instr_start[lj] = 1; - //tcg_ctx->gen_opc_icount[lj] = num_insns; - } - - //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) - // gen_io_start(); - - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { - tcg_gen_debug_insn_start(tcg_ctx, dc->pc); - } - - if (dc->ss_active && !dc->pstate_ss) { - /* Singlestep state is Active-pending. - * If we're in this state at the start of a TB then either - * a) we just took an exception to an EL which is being debugged - * and this is the first insn in the exception handler - * b) debug exceptions were masked and we just unmasked them - * without changing EL (eg by clearing PSTATE.D) - * In either case we're going to take a swstep exception in the - * "did not step an insn" case, and so the syndrome ISV and EX - * bits should be zero. - */ - assert(num_insns == 0); - gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0)); - goto done_generating; - } - - if (dc->thumb) { // qq - disas_thumb_insn(env, dc); - if (dc->condexec_mask) { - dc->condexec_cond = (dc->condexec_cond & 0xe) - | ((dc->condexec_mask >> 4) & 1); - dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; - if (dc->condexec_mask == 0) { - dc->condexec_cond = 0; - } - } - } else { - unsigned int insn; - - // end address tells us to stop emulation - if (dc->pc == dc->uc->addr_end) { - // imitate WFI instruction to halt emulation - dc->is_jmp = DISAS_WFI; - } else { - insn = arm_ldl_code(env, dc->pc, dc->bswap_code); - dc->pc += 4; - disas_arm_insn(dc, insn); - } - } - - if (dc->condjmp && !dc->is_jmp) { - gen_set_label(tcg_ctx, dc->condlabel); - dc->condjmp = 0; - } - - if (tcg_check_temp_count()) { - fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n", - dc->pc); - } - - /* Translation stops when a conditional branch is encountered. - * Otherwise the subsequent code could get translated several times. - * Also stop translation when a page boundary is reached. This - * ensures prefetch aborts occur at the right place. */ - num_insns ++; - } while (!dc->is_jmp && tcg_ctx->gen_opc_ptr < gen_opc_end && - !cs->singlestep_enabled && - !dc->ss_active && - dc->pc < next_page_start && - num_insns < max_insns); - - if (tb->cflags & CF_LAST_IO) { - if (dc->condjmp) { - /* FIXME: This can theoretically happen with self-modifying - code. */ - cpu_abort(cs, "IO on conditional branch instruction"); - } - //gen_io_end(); - } - - /* if too long translation, save this info */ - if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) { - block_full = true; - } - -tb_end: - - /* At this stage dc->condjmp will only be set when the skipped - instruction was a conditional branch or trap, and the PC has - already been written. */ - if (unlikely(cs->singlestep_enabled || dc->ss_active)) { - /* Make sure the pc is updated, and raise a debug exception. */ - if (dc->condjmp) { - gen_set_condexec(dc); - if (dc->is_jmp == DISAS_SWI) { - gen_ss_advance(dc); - gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); - } else if (dc->is_jmp == DISAS_HVC) { - gen_ss_advance(dc); - gen_exception(dc, EXCP_HVC, syn_aa32_hvc(dc->svc_imm)); - } else if (dc->is_jmp == DISAS_SMC) { - gen_ss_advance(dc); - gen_exception(dc, EXCP_SMC, syn_aa32_smc()); - } else if (dc->ss_active) { - gen_step_complete_exception(dc); - } else { - gen_exception_internal(dc, EXCP_DEBUG); - } - gen_set_label(tcg_ctx, dc->condlabel); - } - if (dc->condjmp || !dc->is_jmp) { - gen_set_pc_im(dc, dc->pc); - dc->condjmp = 0; - } - gen_set_condexec(dc); - if (dc->is_jmp == DISAS_SWI && !dc->condjmp) { - gen_ss_advance(dc); - gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); - } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) { - gen_ss_advance(dc); - gen_exception(dc, EXCP_HVC, syn_aa32_hvc(dc->svc_imm)); - } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) { - gen_ss_advance(dc); - gen_exception(dc, EXCP_SMC, syn_aa32_smc()); - } else if (dc->ss_active) { - gen_step_complete_exception(dc); - } else { - /* FIXME: Single stepping a WFI insn will not halt - the CPU. */ - gen_exception_internal(dc, EXCP_DEBUG); - } - } else { - /* While branches must always occur at the end of an IT block, - there are a few other things that can cause us to terminate - the TB in the middle of an IT block: - - Exception generating instructions (bkpt, swi, undefined). - - Page boundaries. - - Hardware watchpoints. - Hardware breakpoints have already been handled and skip this code. - */ - gen_set_condexec(dc); - switch(dc->is_jmp) { - case DISAS_NEXT: - gen_goto_tb(dc, 1, dc->pc); - break; - default: - case DISAS_JUMP: - case DISAS_UPDATE: - /* indicate that the hash table must be used to find the next TB */ - tcg_gen_exit_tb(tcg_ctx, 0); - break; - case DISAS_TB_JUMP: - /* nothing more to generate */ - break; - case DISAS_WFI: - gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env); - break; - case DISAS_WFE: - gen_helper_wfe(tcg_ctx, tcg_ctx->cpu_env); - break; - case DISAS_SWI: - gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); - break; - case DISAS_HVC: - gen_exception(dc, EXCP_HVC, syn_aa32_hvc(dc->svc_imm)); - break; - case DISAS_SMC: - gen_exception(dc, EXCP_SMC, syn_aa32_smc()); - break; - } - if (dc->condjmp) { - gen_set_label(tcg_ctx, dc->condlabel); - gen_set_condexec(dc); - gen_goto_tb(dc, 1, dc->pc); - dc->condjmp = 0; - } - } - -done_generating: - gen_tb_end(tcg_ctx, tb, num_insns); - *tcg_ctx->gen_opc_ptr = INDEX_op_end; - - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - lj++; - while (lj <= j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } else { - tb->size = dc->pc - pc_start; - //tb->icount = num_insns; - } - - env->uc->block_full = block_full; -} - -void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) -{ - gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false); -} - -void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb) -{ - gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true); -} - -#if 0 -static const char *cpu_mode_names[16] = { - "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", - "???", "???", "hyp", "und", "???", "???", "???", "sys" -}; - -void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, - int flags) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - int i; - uint32_t psr; - - if (is_a64(env)) { - aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags); - return; - } - - for(i=0;i<16;i++) { - cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); - if ((i % 4) == 3) - cpu_fprintf(f, "\n"); - else - cpu_fprintf(f, " "); - } - psr = cpsr_read(env); - cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n", - psr, - psr & (1 << 31) ? 'N' : '-', - psr & (1 << 30) ? 'Z' : '-', - psr & (1 << 29) ? 'C' : '-', - psr & (1 << 28) ? 'V' : '-', - psr & CPSR_T ? 'T' : 'A', - cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); - - if (flags & CPU_DUMP_FPU) { - int numvfpregs = 0; - if (arm_feature(env, ARM_FEATURE_VFP)) { - numvfpregs += 16; - } - if (arm_feature(env, ARM_FEATURE_VFP3)) { - numvfpregs += 16; - } - for (i = 0; i < numvfpregs; i++) { - uint64_t v = float64_val(env->vfp.regs[i]); - cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n", - i * 2, (uint32_t)v, - i * 2 + 1, (uint32_t)(v >> 32), - i, v); - } - cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]); - } -} -#endif - -void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - if (is_a64(env)) { - env->pc = tcg_ctx->gen_opc_pc[pc_pos]; - env->condexec_bits = 0; - } else { - env->regs[15] = tcg_ctx->gen_opc_pc[pc_pos]; - env->condexec_bits = tcg_ctx->gen_opc_condexec_bits[pc_pos]; - } -} diff --git a/qemu/target-arm/translate.h b/qemu/target-arm/translate.h deleted file mode 100644 index 44b160f1..00000000 --- a/qemu/target-arm/translate.h +++ /dev/null @@ -1,116 +0,0 @@ -#ifndef TARGET_ARM_TRANSLATE_H -#define TARGET_ARM_TRANSLATE_H - -/* internal defines */ -typedef struct DisasContext { - target_ulong pc; - uint32_t insn; - int is_jmp; - /* Nonzero if this instruction has been conditionally skipped. */ - int condjmp; - /* The label that will be jumped to when the instruction is skipped. */ - int condlabel; - /* Thumb-2 conditional execution bits. */ - int condexec_mask; - int condexec_cond; - struct TranslationBlock *tb; - int singlestep_enabled; - int thumb; - int bswap_code; -#if !defined(CONFIG_USER_ONLY) - int user; -#endif - bool cpacr_fpen; /* FP enabled via CPACR.FPEN */ - bool vfp_enabled; /* FP enabled via FPSCR.EN */ - int vec_len; - int vec_stride; - /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI - * so that top level loop can generate correct syndrome information. - */ - uint32_t svc_imm; - int aarch64; - int current_el; - GHashTable *cp_regs; - uint64_t features; /* CPU features bits */ - /* Because unallocated encodings generate different exception syndrome - * information from traps due to FP being disabled, we can't do a single - * "is fp access disabled" check at a high level in the decode tree. - * To help in catching bugs where the access check was forgotten in some - * code path, we set this flag when the access check is done, and assert - * that it is set at the point where we actually touch the FP regs. - */ - bool fp_access_checked; - /* ARMv8 single-step state (this is distinct from the QEMU gdbstub - * single-step support). - */ - bool ss_active; - bool pstate_ss; - /* True if the insn just emitted was a load-exclusive instruction - * (necessary for syndrome information for single step exceptions), - * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. - */ - bool is_ldex; - /* True if a single-step exception will be taken to the current EL */ - bool ss_same_el; - /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ - int c15_cpar; -#define TMP_A64_MAX 16 - int tmp_a64_count; - TCGv_i64 tmp_a64[TMP_A64_MAX]; - - // Unicorn engine - struct uc_struct *uc; -} DisasContext; - - -static inline int arm_dc_feature(DisasContext *dc, int feature) -{ - return (dc->features & (1ULL << feature)) != 0; -} - -static inline int get_mem_index(DisasContext *s) -{ - return s->current_el; -} - -/* target-specific extra values for is_jmp */ -/* These instructions trap after executing, so the A32/T32 decoder must - * defer them until after the conditional execution state has been updated. - * WFI also needs special handling when single-stepping. - */ -#define DISAS_WFI 4 -#define DISAS_SWI 5 -/* For instructions which unconditionally cause an exception we can skip - * emitting unreachable code at the end of the TB in the A64 decoder - */ -#define DISAS_EXC 6 -/* WFE */ -#define DISAS_WFE 7 -#define DISAS_HVC 8 -#define DISAS_SMC 9 - -#ifdef TARGET_AARCH64 -void a64_translate_init(struct uc_struct *uc); -void gen_intermediate_code_internal_a64(ARMCPU *cpu, - TranslationBlock *tb, - bool search_pc); -void gen_a64_set_pc_im(DisasContext *s, uint64_t val); -#else -static inline void a64_translate_init(struct uc_struct *uc) -{ -} - -static inline void gen_intermediate_code_internal_a64(ARMCPU *cpu, - TranslationBlock *tb, - bool search_pc) -{ -} - -static inline void gen_a64_set_pc_im(uint64_t val) -{ -} -#endif - -void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, int label); - -#endif /* TARGET_ARM_TRANSLATE_H */ diff --git a/qemu/target-arm/unicorn_aarch64.c b/qemu/target-arm/unicorn_aarch64.c deleted file mode 100644 index 42bd7cc3..00000000 --- a/qemu/target-arm/unicorn_aarch64.c +++ /dev/null @@ -1,248 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#include "hw/boards.h" -#include "hw/arm/arm.h" -#include "sysemu/cpus.h" -#include "unicorn.h" -#include "cpu.h" -#include "unicorn_common.h" -#include "uc_priv.h" - - -const int ARM64_REGS_STORAGE_SIZE = offsetof(CPUARMState, tlb_table); - -static void arm64_set_pc(struct uc_struct *uc, uint64_t address) -{ - ((CPUARMState *)uc->current_cpu->env_ptr)->pc = address; -} - -void arm64_release(void* ctx); - -void arm64_release(void* ctx) -{ - struct uc_struct* uc; - ARMCPU* cpu; - TCGContext *s = (TCGContext *) ctx; - - g_free(s->tb_ctx.tbs); - uc = s->uc; - cpu = (ARMCPU*) uc->cpu; - g_free(cpu->cpreg_indexes); - g_free(cpu->cpreg_values); - g_free(cpu->cpreg_vmstate_indexes); - g_free(cpu->cpreg_vmstate_values); - - release_common(ctx); -} - -void arm64_reg_reset(struct uc_struct *uc) -{ - CPUArchState *env = uc->cpu->env_ptr; - memset(env->xregs, 0, sizeof(env->xregs)); - - env->pc = 0; -} - -int arm64_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - void *value = vals[i]; - // V & Q registers are the same - if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { - regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; - } - if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { - *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_X0]; - } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { - *(int32_t *)value = READ_DWORD(ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_W0]); - } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { - float64 *dst = (float64*) value; - uint32_t reg_index = 2*(regid - UC_ARM64_REG_Q0); - dst[0] = ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index]; - dst[1] = ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index+1]; - } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { - *(float64*)value = ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_D0)]; - } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { - *(int32_t*)value = READ_DWORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_S0)]); - } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { - *(int16_t*)value = READ_WORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_H0)]); - } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { - *(int8_t*)value = READ_BYTE_L(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_B0)]); - } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { - *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.elr_el[regid - UC_ARM64_REG_ELR_EL0]; - } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { - *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.sp_el[regid - UC_ARM64_REG_SP_EL0]; - } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { - *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0]; - } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { - *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.cp15.far_el[regid - UC_ARM64_REG_FAR_EL0]; - } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { - *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0]; - } else { - switch(regid) { - default: break; - case UC_ARM64_REG_CPACR_EL1: - *(uint32_t *)value = ARM_CPU(uc, mycpu)->env.cp15.c1_coproc; - break; - case UC_ARM64_REG_TPIDR_EL0: - *(int64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidr_el0; - break; - case UC_ARM64_REG_TPIDRRO_EL0: - *(int64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0; - break; - case UC_ARM64_REG_TPIDR_EL1: - *(int64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidr_el1; - break; - case UC_ARM64_REG_X29: - *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[29]; - break; - case UC_ARM64_REG_X30: - *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[30]; - break; - case UC_ARM64_REG_PC: - *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.pc; - break; - case UC_ARM64_REG_SP: - *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[31]; - break; - case UC_ARM64_REG_NZCV: - *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env) & CPSR_NZCV; - break; - case UC_ARM64_REG_PSTATE: - *(uint32_t *)value = pstate_read(&ARM_CPU(uc, mycpu)->env); - break; - case UC_ARM64_REG_TTBR0_EL1: - *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.ttbr0_el1; - break; - case UC_ARM64_REG_TTBR1_EL1: - *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.ttbr1_el1; - break; - case UC_ARM64_REG_PAR_EL1: - *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.par_el1; - break; - case UC_ARM64_REG_MAIR_EL1: - *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.mair_el1; - break; - } - } - } - - return 0; -} - -int arm64_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - const void *value = vals[i]; - if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { - regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; - } - if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { - ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_X0] = *(uint64_t *)value; - } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { - WRITE_DWORD(ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_W0], *(uint32_t *)value); - } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { - float64 *src = (float64*) value; - uint32_t reg_index = 2*(regid - UC_ARM64_REG_Q0); - ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index] = src[0]; - ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index+1] = src[1]; - } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { - ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_D0)] = * (float64*) value; - } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { - WRITE_DWORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_S0)], *(int32_t*) value); - } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { - WRITE_WORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_H0)], *(int16_t*) value); - } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { - WRITE_BYTE_L(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_B0)], *(int8_t*) value); - } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { - ARM_CPU(uc, mycpu)->env.elr_el[regid - UC_ARM64_REG_ELR_EL0] = *(uint64_t*)value; - } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { - ARM_CPU(uc, mycpu)->env.sp_el[regid - UC_ARM64_REG_SP_EL0] = *(uint64_t*)value; - } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { - ARM_CPU(uc, mycpu)->env.cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0] = *(uint64_t*)value; - } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { - ARM_CPU(uc, mycpu)->env.cp15.far_el[regid - UC_ARM64_REG_FAR_EL0] = *(uint64_t*)value; - } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { - ARM_CPU(uc, mycpu)->env.cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0] = *(uint64_t*)value; - } else { - switch(regid) { - default: break; - case UC_ARM64_REG_CPACR_EL1: - ARM_CPU(uc, mycpu)->env.cp15.c1_coproc = *(uint32_t *)value; - break; - case UC_ARM64_REG_TPIDR_EL0: - ARM_CPU(uc, mycpu)->env.cp15.tpidr_el0 = *(uint64_t *)value; - break; - case UC_ARM64_REG_TPIDRRO_EL0: - ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0 = *(uint64_t *)value; - break; - case UC_ARM64_REG_TPIDR_EL1: - ARM_CPU(uc, mycpu)->env.cp15.tpidr_el1 = *(uint64_t *)value; - break; - case UC_ARM64_REG_X29: - ARM_CPU(uc, mycpu)->env.xregs[29] = *(uint64_t *)value; - break; - case UC_ARM64_REG_X30: - ARM_CPU(uc, mycpu)->env.xregs[30] = *(uint64_t *)value; - break; - case UC_ARM64_REG_PC: - ARM_CPU(uc, mycpu)->env.pc = *(uint64_t *)value; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - case UC_ARM64_REG_SP: - ARM_CPU(uc, mycpu)->env.xregs[31] = *(uint64_t *)value; - break; - case UC_ARM64_REG_NZCV: - cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, CPSR_NZCV); - break; - case UC_ARM64_REG_PSTATE: - pstate_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value); - break; - case UC_ARM64_REG_TTBR0_EL1: - ARM_CPU(uc, mycpu)->env.cp15.ttbr0_el1 = *(uint64_t *)value; - break; - case UC_ARM64_REG_TTBR1_EL1: - ARM_CPU(uc, mycpu)->env.cp15.ttbr1_el1 = *(uint64_t *)value; - break; - case UC_ARM64_REG_PAR_EL1: - ARM_CPU(uc, mycpu)->env.cp15.par_el1 = *(uint64_t *)value; - break; - case UC_ARM64_REG_MAIR_EL1: - ARM_CPU(uc, mycpu)->env.cp15.mair_el1 = *(uint64_t *)value; - break; - } - } - } - - return 0; -} - -DEFAULT_VISIBILITY -#ifdef TARGET_WORDS_BIGENDIAN -void arm64eb_uc_init(struct uc_struct* uc) -#else -void arm64_uc_init(struct uc_struct* uc) -#endif -{ - register_accel_types(uc); - arm_cpu_register_types(uc); - aarch64_cpu_register_types(uc); - machvirt_machine_init(uc); - uc->reg_read = arm64_reg_read; - uc->reg_write = arm64_reg_write; - uc->reg_reset = arm64_reg_reset; - uc->set_pc = arm64_set_pc; - uc->release = arm64_release; - uc_common_init(uc); -} diff --git a/qemu/target-arm/unicorn_arm.c b/qemu/target-arm/unicorn_arm.c deleted file mode 100644 index 5ff9ebbe..00000000 --- a/qemu/target-arm/unicorn_arm.c +++ /dev/null @@ -1,243 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#include "hw/boards.h" -#include "hw/arm/arm.h" -#include "sysemu/cpus.h" -#include "unicorn.h" -#include "cpu.h" -#include "unicorn_common.h" -#include "uc_priv.h" - -const int ARM_REGS_STORAGE_SIZE = offsetof(CPUARMState, tlb_table); - -static void arm_set_pc(struct uc_struct *uc, uint64_t address) -{ - ((CPUARMState *)uc->current_cpu->env_ptr)->pc = address; - ((CPUARMState *)uc->current_cpu->env_ptr)->regs[15] = address; -} - -void arm_release(void* ctx); - -void arm_release(void* ctx) -{ - ARMCPU* cpu; - struct uc_struct* uc; - TCGContext *s = (TCGContext *) ctx; - - g_free(s->tb_ctx.tbs); - uc = s->uc; - cpu = (ARMCPU*) uc->cpu; - g_free(cpu->cpreg_indexes); - g_free(cpu->cpreg_values); - g_free(cpu->cpreg_vmstate_indexes); - g_free(cpu->cpreg_vmstate_values); - - release_common(ctx); -} - -void arm_reg_reset(struct uc_struct *uc) -{ - CPUArchState *env; - (void)uc; - - env = uc->cpu->env_ptr; - memset(env->regs, 0, sizeof(env->regs)); - - env->pc = 0; -} - -/* these functions are implemented in helper.c. */ -#include "exec/helper-head.h" -uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg); -void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val); - -int arm_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) -{ - CPUState *mycpu; - int i; - - mycpu = uc->cpu; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - void *value = vals[i]; - if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[regid - UC_ARM_REG_R0]; - else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) - *(float64 *)value = ARM_CPU(uc, mycpu)->env.vfp.regs[regid - UC_ARM_REG_D0]; - else { - switch(regid) { - case UC_ARM_REG_APSR: - *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env) & (CPSR_NZCV | CPSR_Q | CPSR_GE); - break; - case UC_ARM_REG_APSR_NZCV: - *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env) & CPSR_NZCV; - break; - case UC_ARM_REG_CPSR: - *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env); - break; - case UC_ARM_REG_SPSR: - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.spsr; - break; - //case UC_ARM_REG_SP: - case UC_ARM_REG_R13: - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[13]; - break; - //case UC_ARM_REG_LR: - case UC_ARM_REG_R14: - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[14]; - break; - //case UC_ARM_REG_PC: - case UC_ARM_REG_R15: - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[15]; - break; - case UC_ARM_REG_C1_C0_2: - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.cp15.c1_coproc; - break; - case UC_ARM_REG_C13_C0_3: - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0; - break; - case UC_ARM_REG_FPEXC: - *(int32_t *)value = ARM_CPU(uc, mycpu)->env.vfp.xregs[ARM_VFP_FPEXC]; - break; - case UC_ARM_REG_IPSR: - *(uint32_t *)value = xpsr_read(&ARM_CPU(uc, mycpu)->env) & 0x1ff; - break; - case UC_ARM_REG_MSP: - *(uint32_t *)value = helper_v7m_mrs(&ARM_CPU(uc, mycpu)->env, 8); - break; - case UC_ARM_REG_PSP: - *(uint32_t *)value = helper_v7m_mrs(&ARM_CPU(uc, mycpu)->env, 9); - break; - case UC_ARM_REG_CONTROL: - *(uint32_t *)value = helper_v7m_mrs(&ARM_CPU(uc, mycpu)->env, 20); - break; - } - } - } - - return 0; -} - -int arm_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - const void *value = vals[i]; - if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) - ARM_CPU(uc, mycpu)->env.regs[regid - UC_ARM_REG_R0] = *(uint32_t *)value; - else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) - ARM_CPU(uc, mycpu)->env.vfp.regs[regid - UC_ARM_REG_D0] = *(float64 *)value; - else { - switch(regid) { - case UC_ARM_REG_APSR: - cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, (CPSR_NZCV | CPSR_Q | CPSR_GE)); - break; - case UC_ARM_REG_APSR_NZCV: - cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, CPSR_NZCV); - break; - case UC_ARM_REG_CPSR: - cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, ~0); - break; - case UC_ARM_REG_SPSR: - ARM_CPU(uc, mycpu)->env.spsr = *(uint32_t *)value; - break; - //case UC_ARM_REG_SP: - case UC_ARM_REG_R13: - ARM_CPU(uc, mycpu)->env.regs[13] = *(uint32_t *)value; - break; - //case UC_ARM_REG_LR: - case UC_ARM_REG_R14: - ARM_CPU(uc, mycpu)->env.regs[14] = *(uint32_t *)value; - break; - //case UC_ARM_REG_PC: - case UC_ARM_REG_R15: - ARM_CPU(uc, mycpu)->env.pc = (*(uint32_t *)value & ~1); - ARM_CPU(uc, mycpu)->env.thumb = (*(uint32_t *)value & 1); - ARM_CPU(uc, mycpu)->env.uc->thumb = (*(uint32_t *)value & 1); - ARM_CPU(uc, mycpu)->env.regs[15] = (*(uint32_t *)value & ~1); - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - - break; - case UC_ARM_REG_C1_C0_2: - ARM_CPU(uc, mycpu)->env.cp15.c1_coproc = *(int32_t *)value; - break; - - case UC_ARM_REG_C13_C0_3: - ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0 = *(int32_t *)value; - break; - case UC_ARM_REG_FPEXC: - ARM_CPU(uc, mycpu)->env.vfp.xregs[ARM_VFP_FPEXC] = *(int32_t *)value; - break; - case UC_ARM_REG_IPSR: - xpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, 0x1ff); - break; - case UC_ARM_REG_MSP: - helper_v7m_msr(&ARM_CPU(uc, mycpu)->env, 8, *(uint32_t *)value); - break; - case UC_ARM_REG_PSP: - helper_v7m_msr(&ARM_CPU(uc, mycpu)->env, 9, *(uint32_t *)value); - break; - case UC_ARM_REG_CONTROL: - helper_v7m_msr(&ARM_CPU(uc, mycpu)->env, 20, *(uint32_t *)value); - break; - } - } - } - - return 0; -} - -static bool arm_stop_interrupt(int intno) -{ - switch(intno) { - default: - return false; - case EXCP_UDEF: - case EXCP_YIELD: - return true; - } -} - -static uc_err arm_query(struct uc_struct *uc, uc_query_type type, size_t *result) -{ - CPUState *mycpu = uc->cpu; - uint32_t mode; - - switch(type) { - case UC_QUERY_MODE: - // zero out ARM/THUMB mode - mode = uc->mode & ~(UC_MODE_ARM | UC_MODE_THUMB); - // THUMB mode or ARM MOde - mode |= ((ARM_CPU(uc, mycpu)->env.thumb != 0)? UC_MODE_THUMB : UC_MODE_ARM); - *result = mode; - return UC_ERR_OK; - default: - return UC_ERR_ARG; - } -} - -#ifdef TARGET_WORDS_BIGENDIAN -void armeb_uc_init(struct uc_struct* uc) -#else -void arm_uc_init(struct uc_struct* uc) -#endif -{ - register_accel_types(uc); - arm_cpu_register_types(uc); - tosa_machine_init(uc); - uc->reg_read = arm_reg_read; - uc->reg_write = arm_reg_write; - uc->reg_reset = arm_reg_reset; - uc->set_pc = arm_set_pc; - uc->stop_interrupt = arm_stop_interrupt; - uc->release = arm_release; - uc->query = arm_query; - uc_common_init(uc); -} diff --git a/qemu/target-i386/Makefile.objs b/qemu/target-i386/Makefile.objs deleted file mode 100644 index 98cb3e31..00000000 --- a/qemu/target-i386/Makefile.objs +++ /dev/null @@ -1,5 +0,0 @@ -obj-y += translate.o helper.o cpu.o -obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o -obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o -obj-$(CONFIG_SOFTMMU) += arch_memory_mapping.o -obj-y += unicorn.o diff --git a/qemu/target-i386/cpu-qom.h b/qemu/target-i386/cpu-qom.h deleted file mode 100644 index 5d19fc44..00000000 --- a/qemu/target-i386/cpu-qom.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * QEMU x86 CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * - */ -#ifndef QEMU_I386_CPU_QOM_H -#define QEMU_I386_CPU_QOM_H - -#include "qom/cpu.h" -#include "cpu.h" -#include "qapi/error.h" - -#ifdef TARGET_X86_64 -#define TYPE_X86_CPU "x86_64-cpu" -#else -#define TYPE_X86_CPU "i386-cpu" -#endif - -#define X86_CPU_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, X86CPUClass, (klass), TYPE_X86_CPU) -#define X86_CPU(uc, obj) ((X86CPU *)obj) -#define X86_CPU_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, X86CPUClass, (obj), TYPE_X86_CPU) - -/** - * X86CPUDefinition: - * - * CPU model definition data that was not converted to QOM per-subclass - * property defaults yet. - */ -typedef struct X86CPUDefinition X86CPUDefinition; - -/** - * X86CPUClass: - * @cpu_def: CPU model definition - * @kvm_required: Whether CPU model requires KVM to be enabled. - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * - * An x86 CPU model or family. - */ -typedef struct X86CPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - /* Should be eventually replaced by subclass-specific property defaults. */ - X86CPUDefinition *cpu_def; - - bool kvm_required; - - DeviceRealize parent_realize; - void (*parent_reset)(CPUState *cpu); -} X86CPUClass; - -/** - * X86CPU: - * @env: #CPUX86State - * @migratable: If set, only migratable flags will be accepted when "enforce" - * mode is used, and only migratable flags will be included in the "host" - * CPU model. - * - * An x86 CPU. - */ -typedef struct X86CPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUX86State env; - - bool hyperv_vapic; - bool hyperv_relaxed_timing; - int hyperv_spinlock_attempts; - bool hyperv_time; - bool check_cpuid; - bool enforce_cpuid; - bool expose_kvm; - bool migratable; - bool host_features; - - /* if true the CPUID code directly forward host cache leaves to the guest */ - bool cache_info_passthrough; - - /* Features that were filtered out because of missing host capabilities */ - uint32_t filtered_features[FEATURE_WORDS]; - - /* Enable PMU CPUID bits. This can't be enabled by default yet because - * it doesn't have ABI stability guarantees, as it passes all PMU CPUID - * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel - * capabilities) directly to the guest. - */ - bool enable_pmu; - - /* in order to simplify APIC support, we leave this pointer to the - user */ - struct DeviceState *apic_state; -} X86CPU; - -static inline X86CPU *x86_env_get_cpu(CPUX86State *env) -{ - return container_of(env, X86CPU, env); -} - -#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e)) - -#define ENV_OFFSET offsetof(X86CPU, env) - -#ifndef CONFIG_USER_ONLY -extern struct VMStateDescription vmstate_x86_cpu; -#endif - -/** - * x86_cpu_do_interrupt: - * @cpu: vCPU the interrupt is to be handled by. - */ -void x86_cpu_do_interrupt(CPUState *cpu); -bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); - -int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); -int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque); -int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); -int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque); - -void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, - Error **errp); - -void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, - int flags); - -hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); - -int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); -int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); - -void x86_cpu_exec_enter(CPUState *cpu); -void x86_cpu_exec_exit(CPUState *cpu); - -#endif diff --git a/qemu/target-i386/cpu.c b/qemu/target-i386/cpu.c deleted file mode 100644 index 7f574a3f..00000000 --- a/qemu/target-i386/cpu.c +++ /dev/null @@ -1,2633 +0,0 @@ -/* - * i386 CPUID helper functions - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include -#include -#include -#include "unicorn/platform.h" - -#include "cpu.h" -#include "sysemu/cpus.h" -#include "topology.h" - -#include "qapi/qmp/qerror.h" - -#include "qapi-types.h" -#include "qapi-visit.h" -#include "qapi/visitor.h" - -#include "hw/hw.h" - -#include "sysemu/sysemu.h" -#include "hw/cpu/icc_bus.h" -#ifndef CONFIG_USER_ONLY -#include "hw/i386/apic_internal.h" -#endif - -/* Cache topology CPUID constants: */ - -/* CPUID Leaf 2 Descriptors */ - -#define CPUID_2_L1D_32KB_8WAY_64B 0x2c -#define CPUID_2_L1I_32KB_8WAY_64B 0x30 -#define CPUID_2_L2_2MB_8WAY_64B 0x7d - - -/* CPUID Leaf 4 constants: */ - -/* EAX: */ -#define CPUID_4_TYPE_DCACHE 1 -#define CPUID_4_TYPE_ICACHE 2 -#define CPUID_4_TYPE_UNIFIED 3 - -#define CPUID_4_LEVEL(l) ((l) << 5) - -#define CPUID_4_SELF_INIT_LEVEL (1 << 8) -#define CPUID_4_FULLY_ASSOC (1 << 9) - -/* EDX: */ -#define CPUID_4_NO_INVD_SHARING (1 << 0) -#define CPUID_4_INCLUSIVE (1 << 1) -#define CPUID_4_COMPLEX_IDX (1 << 2) - -#define ASSOC_FULL 0xFF - -/* AMD associativity encoding used on CPUID Leaf 0x80000006: */ -#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ - a == 2 ? 0x2 : \ - a == 4 ? 0x4 : \ - a == 8 ? 0x6 : \ - a == 16 ? 0x8 : \ - a == 32 ? 0xA : \ - a == 48 ? 0xB : \ - a == 64 ? 0xC : \ - a == 96 ? 0xD : \ - a == 128 ? 0xE : \ - a == ASSOC_FULL ? 0xF : \ - 0 /* invalid value */) - - -/* Definitions of the hardcoded cache entries we expose: */ - -/* L1 data cache: */ -#define L1D_LINE_SIZE 64 -#define L1D_ASSOCIATIVITY 8 -#define L1D_SETS 64 -#define L1D_PARTITIONS 1 -/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ -#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B -/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ -#define L1D_LINES_PER_TAG 1 -#define L1D_SIZE_KB_AMD 64 -#define L1D_ASSOCIATIVITY_AMD 2 - -/* L1 instruction cache: */ -#define L1I_LINE_SIZE 64 -#define L1I_ASSOCIATIVITY 8 -#define L1I_SETS 64 -#define L1I_PARTITIONS 1 -/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ -#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B -/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ -#define L1I_LINES_PER_TAG 1 -#define L1I_SIZE_KB_AMD 64 -#define L1I_ASSOCIATIVITY_AMD 2 - -/* Level 2 unified cache: */ -#define L2_LINE_SIZE 64 -#define L2_ASSOCIATIVITY 16 -#define L2_SETS 4096 -#define L2_PARTITIONS 1 -/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */ -/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ -#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B -/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ -#define L2_LINES_PER_TAG 1 -#define L2_SIZE_KB_AMD 512 - -/* No L3 cache: */ -#define L3_SIZE_KB 0 /* disabled */ -#define L3_ASSOCIATIVITY 0 /* disabled */ -#define L3_LINES_PER_TAG 0 /* disabled */ -#define L3_LINE_SIZE 0 /* disabled */ - -/* TLB definitions: */ - -#define L1_DTLB_2M_ASSOC 1 -#define L1_DTLB_2M_ENTRIES 255 -#define L1_DTLB_4K_ASSOC 1 -#define L1_DTLB_4K_ENTRIES 255 - -#define L1_ITLB_2M_ASSOC 1 -#define L1_ITLB_2M_ENTRIES 255 -#define L1_ITLB_4K_ASSOC 1 -#define L1_ITLB_4K_ENTRIES 255 - -#define L2_DTLB_2M_ASSOC 0 /* disabled */ -#define L2_DTLB_2M_ENTRIES 0 /* disabled */ -#define L2_DTLB_4K_ASSOC 4 -#define L2_DTLB_4K_ENTRIES 512 - -#define L2_ITLB_2M_ASSOC 0 /* disabled */ -#define L2_ITLB_2M_ENTRIES 0 /* disabled */ -#define L2_ITLB_4K_ASSOC 4 -#define L2_ITLB_4K_ENTRIES 512 - -void x86_cpu_register_types(void *); - -static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, - uint32_t vendor2, uint32_t vendor3) -{ - int i; - for (i = 0; i < 4; i++) { - dst[i] = vendor1 >> (8 * i); - dst[i + 4] = vendor2 >> (8 * i); - dst[i + 8] = vendor3 >> (8 * i); - } - dst[CPUID_VENDOR_SZ] = '\0'; -} - -/* feature flags taken from "Intel Processor Identification and the CPUID - * Instruction" and AMD's "CPUID Specification". In cases of disagreement - * between feature naming conventions, aliases may be added. - */ -static const char *feature_name[] = { - "fpu", "vme", "de", "pse", - "tsc", "msr", "pae", "mce", - "cx8", "apic", NULL, "sep", - "mtrr", "pge", "mca", "cmov", - "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, - NULL, "ds" /* Intel dts */, "acpi", "mmx", - "fxsr", "sse", "sse2", "ss", - "ht" /* Intel htt */, "tm", "ia64", "pbe", -}; -static const char *ext_feature_name[] = { - "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor", - "ds_cpl", "vmx", "smx", "est", - "tm2", "ssse3", "cid", NULL, - "fma", "cx16", "xtpr", "pdcm", - NULL, "pcid", "dca", "sse4.1|sse4_1", - "sse4.2|sse4_2", "x2apic", "movbe", "popcnt", - "tsc-deadline", "aes", "xsave", "osxsave", - "avx", "f16c", "rdrand", "hypervisor", -}; -/* Feature names that are already defined on feature_name[] but are set on - * CPUID[8000_0001].EDX on AMD CPUs don't have their names on - * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features - * if and only if CPU vendor is AMD. - */ -static const char *ext2_feature_name[] = { - NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, - NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, - NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall", - NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, - NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, - "nx|xd", NULL, "mmxext", NULL /* mmx */, - NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp", - NULL, "lm|i64", "3dnowext", "3dnow", -}; -static const char *ext3_feature_name[] = { - "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, - "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse", - "3dnowprefetch", "osvw", "ibs", "xop", - "skinit", "wdt", NULL, "lwp", - "fma4", "tce", NULL, "nodeid_msr", - NULL, "tbm", "topoext", "perfctr_core", - "perfctr_nb", NULL, NULL, NULL, - NULL, NULL, NULL, NULL, -}; - -static const char *ext4_feature_name[] = { - NULL, NULL, "xstore", "xstore-en", - NULL, NULL, "xcrypt", "xcrypt-en", - "ace2", "ace2-en", "phe", "phe-en", - "pmm", "pmm-en", NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, -}; - -static const char *cpuid_7_0_ebx_feature_name[] = { - "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep", - "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL, - "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL, - NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL, -}; - -static const char *cpuid_apm_edx_feature_name[] = { - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - "invtsc", NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, -}; - -#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) -#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ - CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) -#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ - CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ - CPUID_PSE36 | CPUID_FXSR) -#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) -#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ - CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ - CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ - CPUID_PAE | CPUID_SEP | CPUID_APIC) - -#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ - CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ - CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ - CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ - CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS) - /* partly implemented: - CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ - /* missing: - CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ -#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ - CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ - CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ - CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR) - /* missing: - CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, - CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, - CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, - CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE, - CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C, - CPUID_EXT_RDRAND */ - -#ifdef TARGET_X86_64 -#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) -#else -#define TCG_EXT2_X86_64_FEATURES 0 -#endif - -#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ - CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ - CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ - TCG_EXT2_X86_64_FEATURES) -#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ - CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) -#define TCG_EXT4_FEATURES 0 -#define TCG_SVM_FEATURES 0 -#define TCG_KVM_FEATURES 0 -#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ - CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX) - /* missing: - CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, - CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, - CPUID_7_0_EBX_RDSEED */ -#define TCG_APM_FEATURES 0 - - -typedef struct FeatureWordInfo { - const char **feat_names; - uint32_t cpuid_eax; /* Input EAX for CPUID */ - bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */ - uint32_t cpuid_ecx; /* Input ECX value for CPUID */ - int cpuid_reg; /* output register (R_* constant) */ - uint32_t tcg_features; /* Feature flags supported by TCG */ - uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ -} FeatureWordInfo; - -static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { -#ifdef _MSC_VER - // FEAT_1_EDX - { - feature_name, - 1, - false,0, - R_EDX, - TCG_FEATURES, - }, - // FEAT_1_ECX - { - ext_feature_name, - 1, - false,0, - R_ECX, - TCG_EXT_FEATURES, - }, - // FEAT_7_0_EBX - { - cpuid_7_0_ebx_feature_name, - 7, - true, 0, - R_EBX, - TCG_7_0_EBX_FEATURES, - }, - // FEAT_8000_0001_EDX - { - ext2_feature_name, - 0x80000001, - false,0, - R_EDX, - TCG_EXT2_FEATURES, - }, - // FEAT_8000_0001_ECX - { - ext3_feature_name, - 0x80000001, - false,0, - R_ECX, - TCG_EXT3_FEATURES, - }, - // FEAT_8000_0007_EDX - { - cpuid_apm_edx_feature_name, - 0x80000007, - false,0, - R_EDX, - TCG_APM_FEATURES, - CPUID_APM_INVTSC, - }, - // FEAT_C000_0001_EDX - { - ext4_feature_name, - 0xC0000001, - false,0, - R_EDX, - TCG_EXT4_FEATURES, - }, - // FEAT_KVM - {0}, - // FEAT_SVM - {0}, -#else - [FEAT_1_EDX] = { - .feat_names = feature_name, - .cpuid_eax = 1, .cpuid_reg = R_EDX, - .tcg_features = TCG_FEATURES, - }, - [FEAT_1_ECX] = { - .feat_names = ext_feature_name, - .cpuid_eax = 1, .cpuid_reg = R_ECX, - .tcg_features = TCG_EXT_FEATURES, - }, - [FEAT_8000_0001_EDX] = { - .feat_names = ext2_feature_name, - .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX, - .tcg_features = TCG_EXT2_FEATURES, - }, - [FEAT_8000_0001_ECX] = { - .feat_names = ext3_feature_name, - .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX, - .tcg_features = TCG_EXT3_FEATURES, - }, - [FEAT_C000_0001_EDX] = { - .feat_names = ext4_feature_name, - .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX, - .tcg_features = TCG_EXT4_FEATURES, - }, - [FEAT_7_0_EBX] = { - .feat_names = cpuid_7_0_ebx_feature_name, - .cpuid_eax = 7, - .cpuid_needs_ecx = true, .cpuid_ecx = 0, - .cpuid_reg = R_EBX, - .tcg_features = TCG_7_0_EBX_FEATURES, - }, - [FEAT_8000_0007_EDX] = { - .feat_names = cpuid_apm_edx_feature_name, - .cpuid_eax = 0x80000007, - .cpuid_reg = R_EDX, - .tcg_features = TCG_APM_FEATURES, - .unmigratable_flags = CPUID_APM_INVTSC, - }, -#endif -}; - -typedef struct X86RegisterInfo32 { - /* Name of register */ - const char *name; - /* QAPI enum value register */ - X86CPURegister32 qapi_enum; -} X86RegisterInfo32; - -#define REGISTER(reg) \ - { #reg, X86_CPU_REGISTER32_##reg } -static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { - REGISTER(EAX), - REGISTER(ECX), - REGISTER(EDX), - REGISTER(EBX), - REGISTER(ESP), - REGISTER(EBP), - REGISTER(ESI), - REGISTER(EDI), -}; -#undef REGISTER - -typedef struct ExtSaveArea { - uint32_t feature, bits; - uint32_t offset, size; -} ExtSaveArea; - -const char *get_register_name_32(unsigned int reg) -{ - if (reg >= CPU_NB_REGS32) { - return NULL; - } - return x86_reg_info_32[reg].name; -} - -#ifdef _MSC_VER -#include -#endif - -void host_cpuid(uint32_t function, uint32_t count, - uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) -{ - uint32_t vec[4]; - -#ifdef _MSC_VER - __cpuidex((int*)vec, function, count); -#else -#ifdef __x86_64__ - asm volatile("cpuid" - : "=a"(vec[0]), "=b"(vec[1]), - "=c"(vec[2]), "=d"(vec[3]) - : "0"(function), "c"(count) : "cc"); -#elif defined(__i386__) - asm volatile("pusha \n\t" - "cpuid \n\t" - "mov %%eax, 0(%2) \n\t" - "mov %%ebx, 4(%2) \n\t" - "mov %%ecx, 8(%2) \n\t" - "mov %%edx, 12(%2) \n\t" - "popa" - : : "a"(function), "c"(count), "S"(vec) - : "memory", "cc"); -#else - abort(); -#endif -#endif // _MSC_VER - - if (eax) - *eax = vec[0]; - if (ebx) - *ebx = vec[1]; - if (ecx) - *ecx = vec[2]; - if (edx) - *edx = vec[3]; -} - -#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c))) - -/* general substring compare of *[s1..e1) and *[s2..e2). sx is start of - * a substring. ex if !NULL points to the first char after a substring, - * otherwise the string is assumed to sized by a terminating nul. - * Return lexical ordering of *s1:*s2. - */ -static int sstrcmp(const char *s1, const char *e1, - const char *s2, const char *e2) -{ - for (;;) { - if (!*s1 || !*s2 || *s1 != *s2) - return (*s1 - *s2); - ++s1, ++s2; - if (s1 == e1 && s2 == e2) - return (0); - else if (s1 == e1) - return (*s2); - else if (s2 == e2) - return (*s1); - } -} - -/* compare *[s..e) to *altstr. *altstr may be a simple string or multiple - * '|' delimited (possibly empty) strings in which case search for a match - * within the alternatives proceeds left to right. Return 0 for success, - * non-zero otherwise. - */ -static int altcmp(const char *s, const char *e, const char *altstr) -{ - const char *p, *q; - - for (q = p = altstr; ; ) { - while (*p && *p != '|') - ++p; - if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p))) - return (0); - if (!*p) - return (1); - else - q = ++p; - } -} - -/* search featureset for flag *[s..e), if found set corresponding bit in - * *pval and return true, otherwise return false - */ -static bool lookup_feature(uint32_t *pval, const char *s, const char *e, - const char **featureset) -{ - uint32_t mask; - const char **ppc; - bool found = false; - - for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) { - if (*ppc && !altcmp(s, e, *ppc)) { - *pval |= mask; - found = true; - } - } - return found; -} - -static void add_flagname_to_bitmaps(const char *flagname, - FeatureWordArray words, - Error **errp) -{ - FeatureWord w; - for (w = 0; w < FEATURE_WORDS; w++) { - FeatureWordInfo *wi = &feature_word_info[w]; - if (wi->feat_names && - lookup_feature(&words[w], flagname, NULL, wi->feat_names)) { - break; - } - } - if (w == FEATURE_WORDS) { - error_setg(errp, "CPU feature %s not found", flagname); - } -} - -/* CPU class name definitions: */ - -#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU -#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) - -/* Return type name for a given CPU model name - * Caller is responsible for freeing the returned string. - */ -static char *x86_cpu_type_name(const char *model_name) -{ - return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); -} - -static ObjectClass *x86_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model) -{ - ObjectClass *oc; - char *typename; - - if (cpu_model == NULL) { - return NULL; - } - - typename = x86_cpu_type_name(cpu_model); - oc = object_class_by_name(uc, typename); - g_free(typename); - return oc; -} - -struct X86CPUDefinition { - const char *name; - uint32_t level; - uint32_t xlevel; - uint32_t xlevel2; - /* vendor is zero-terminated, 12 character ASCII string */ - char vendor[CPUID_VENDOR_SZ + 1]; - int family; - int model; - int stepping; - FeatureWordArray features; - char model_id[48]; - bool cache_info_passthrough; -}; - -static X86CPUDefinition builtin_x86_defs[] = { - { - "qemu64", - 4, 0x8000000A, 0, - CPUID_VENDOR_AMD, - 6, 6, 3, - { - // FEAT_1_EDX - PPRO_FEATURES | - CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | - CPUID_PSE36, - // FEAT_1_ECX - CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | - CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | - CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, - }, - }, - { - "phenom", - 5, 0x8000001A, 0, - CPUID_VENDOR_AMD, - 16, 2, 3, - { - /* Missing: CPUID_HT */ - // FEAT_1_EDX - PPRO_FEATURES | - CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | - CPUID_PSE36 | CPUID_VME, - // FEAT_1_ECX - CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | - CPUID_EXT_POPCNT, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | - CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | - CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | - CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, - /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, - CPUID_EXT3_CR8LEG, - CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, - CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | - CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, - // FEAT_8000_0007_EDX - 0, - // FEAT_C000_0001_EDX - 0, - // FEAT_KVM - 0, - /* Missing: CPUID_SVM_LBRV */ - // FEAT_SVM - CPUID_SVM_NPT, - }, - "AMD Phenom(tm) 9550 Quad-Core Processor", - }, - { - "core2duo", - 10, 0x80000008, 0, - CPUID_VENDOR_INTEL, - 6, 15, 11, - { - /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ - // FEAT_1_EDX - PPRO_FEATURES | - CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | - CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, - /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, - * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ - // FEAT_1_ECX - CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | - CPUID_EXT_CX16, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", - }, - { - "kvm64", - 5, 0x80000008, 0, - CPUID_VENDOR_INTEL, - 15, 6, 1, - { - /* Missing: CPUID_VME, CPUID_HT */ - // FEAT_1_EDX - PPRO_FEATURES | - CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | - CPUID_PSE36, - /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ - // FEAT_1_ECX - CPUID_EXT_SSE3 | CPUID_EXT_CX16, - // FEAT_7_0_EBX - 0, - /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ - // FEAT_8000_0001_EDX - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | - CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, - /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, - CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, - CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, - CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ - // FEAT_8000_0001_ECX - 0, - }, - "Common KVM processor", - }, - { - "qemu32", - 4, 0x80000004, 0, - CPUID_VENDOR_INTEL, - 6, 6, 3, - { - // FEAT_1_EDX - PPRO_FEATURES, - // FEAT_1_ECX - CPUID_EXT_SSE3 | CPUID_EXT_POPCNT, - }, - }, - { - "kvm32", - 5, 0x80000008, 0, - CPUID_VENDOR_INTEL, - 15, 6, 1, - { - // FEAT_1_EDX - PPRO_FEATURES | - CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, - // FEAT_1_ECX - CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES, - // FEAT_8000_0001_ECX - 0, - }, - "Common 32-bit KVM processor", - }, - { - "coreduo", - 10, 0x80000008, 0, - CPUID_VENDOR_INTEL, - 6, 14, 8, - { - /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ - // FEAT_1_EDX - PPRO_FEATURES | CPUID_VME | - CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | - CPUID_SS, - /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, - * CPUID_EXT_PDCM, CPUID_EXT_VMX */ - // FEAT_1_ECX - CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_NX, - }, - "Genuine Intel(R) CPU T2600 @ 2.16GHz", - }, - { - "486", - 1, 0, 0, - CPUID_VENDOR_INTEL, - 4, 8, 0, - { - // FEAT_1_EDX - I486_FEATURES, - }, - }, - { - "pentium", - 1, 0, 0, - CPUID_VENDOR_INTEL, - 5, 4, 3, - { - // FEAT_1_EDX - PENTIUM_FEATURES, - }, - }, - { - "pentium2", - 2, 0, 0, - CPUID_VENDOR_INTEL, - 6, 5, 2, - { - // FEAT_1_EDX - PENTIUM2_FEATURES, - }, - }, - { - "pentium3", - 2, 0, 0, - CPUID_VENDOR_INTEL, - 6, 7, 3, - { - // FEAT_1_EDX - PENTIUM3_FEATURES, - }, - }, - { - "athlon", - 2, 0x80000008, 0, - CPUID_VENDOR_AMD, - 6, 2, 3, - { - // FEAT_1_EDX - PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | - CPUID_MCA, - // FEAT_1_ECX - 0, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | - CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, - }, - }, - { - "n270", - /* original is on level 10 */ - 5, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 28, 2, - { - /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ - // FEAT_1_EDX - PPRO_FEATURES | - CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | - CPUID_ACPI | CPUID_SS, - /* Some CPUs got no CPUID_SEP */ - /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, - * CPUID_EXT_XTPR */ - // FEAT_1_ECX - CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | - CPUID_EXT_MOVBE, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | - CPUID_EXT2_NX, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", - }, - { - "Conroe", - 4, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 15, 3, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", - }, - { - "Penryn", - 4, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 23, 3, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | - CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", - }, - { - "Nehalem", - 4, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 26, 3, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | - CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Intel Core i7 9xx (Nehalem Class Core i7)", - }, - { - "Westmere", - 11, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 44, 1, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | - CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | - CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Westmere E56xx/L56xx/X56xx (Nehalem-C)", - }, - { - "SandyBridge", - 0xd, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 42, 1, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | - CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | - CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | - CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | - CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | - CPUID_EXT2_SYSCALL, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Intel Xeon E312xx (Sandy Bridge)", - }, - { - "Haswell", - 0xd, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 60, 1, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | - CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | - CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | - CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | - CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | - CPUID_EXT_PCID, - // FEAT_7_0_EBX - CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | - CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | - CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | - CPUID_7_0_EBX_RTM, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | - CPUID_EXT2_SYSCALL, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM, - }, - "Intel Core Processor (Haswell)", - }, - { - "Broadwell", - 0xd, 0x8000000A, 0, - CPUID_VENDOR_INTEL, - 6, 61, 2, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | - CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | - CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | - CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | - CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | - CPUID_EXT_PCID, - // FEAT_7_0_EBX - CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | - CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | - CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | - CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | - CPUID_7_0_EBX_SMAP, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | - CPUID_EXT2_SYSCALL, - // FEAT_8000_0001_ECX - CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, - }, - "Intel Core Processor (Broadwell)", - }, - { - "Opteron_G1", - 5, 0x80000008, 0, - CPUID_VENDOR_AMD, - 15, 6, 1, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX | - CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT | - CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE | - CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC | - CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR | - CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU, - }, - "AMD Opteron 240 (Gen 1 Class Opteron)", - }, - { - "Opteron_G2", - 5, 0x80000008, 0, - CPUID_VENDOR_AMD, - 15, 6, 1, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_CX16 | CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR | - CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 | - CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA | - CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | - CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE | - CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE | - CPUID_EXT2_DE | CPUID_EXT2_FPU, - // FEAT_8000_0001_ECX - CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, - }, - "AMD Opteron 22xx (Gen 2 Class Opteron)", - }, - { - "Opteron_G3", - 5, 0x80000008, 0, - CPUID_VENDOR_AMD, - 15, 6, 1, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | - CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR | - CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 | - CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA | - CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | - CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE | - CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE | - CPUID_EXT2_DE | CPUID_EXT2_FPU, - // FEAT_8000_0001_ECX - CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | - CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, - }, - "AMD Opteron 23xx (Gen 3 Class Opteron)", - }, - { - "Opteron_G4", - 0xd, 0x8000001A, 0, - CPUID_VENDOR_AMD, - 21, 1, 2, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | - CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | - CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | - CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | - CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX | - CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT | - CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE | - CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC | - CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR | - CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU, - // FEAT_8000_0001_ECX - CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | - CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | - CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | - CPUID_EXT3_LAHF_LM, - }, - "AMD Opteron 62xx class CPU", - }, - { - "Opteron_G5", - 0xd, 0x8000001A, 0, - CPUID_VENDOR_AMD, - 21, 2, 0, - { - // FEAT_1_EDX - CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | - CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | - CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | - CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | - CPUID_DE | CPUID_FP87, - // FEAT_1_ECX - CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | - CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | - CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | - CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, - // FEAT_7_0_EBX - 0, - // FEAT_8000_0001_EDX - CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | - CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX | - CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT | - CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE | - CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC | - CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR | - CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU, - // FEAT_8000_0001_ECX - CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | - CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | - CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | - CPUID_EXT3_LAHF_LM, - }, - "AMD Opteron 63xx class CPU", - }, -}; - -static uint32_t x86_cpu_get_supported_feature_word(struct uc_struct *uc, FeatureWord w); - -static void report_unavailable_features(FeatureWord w, uint32_t mask) -{ - FeatureWordInfo *f = &feature_word_info[w]; - int i; - - for (i = 0; i < 32; ++i) { - if (1 << i & mask) { - const char *reg = get_register_name_32(f->cpuid_reg); - assert(reg); - fprintf(stderr, "warning: %s doesn't support requested feature: " - "CPUID.%02XH:%s%s%s [bit %d]\n", - "TCG", - f->cpuid_eax, reg, - f->feat_names[i] ? "." : "", - f->feat_names[i] ? f->feat_names[i] : "", i); - } - } -} - -static void x86_cpuid_version_get_family(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - int64_t value; - - value = (env->cpuid_version >> 8) & 0xf; - if (value == 0xf) { - value += (env->cpuid_version >> 20) & 0xff; - } - visit_type_int(v, &value, name, errp); -} - -static int x86_cpuid_version_set_family(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xff + 0xf; - Error *local_err = NULL; - int64_t value; - - visit_type_int(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - if (value < min || value > max) { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); - return -1; - } - - env->cpuid_version &= ~0xff00f00; - if (value > 0x0f) { - env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); - } else { - env->cpuid_version |= value << 8; - } - - return 0; -} - -static void x86_cpuid_version_get_model(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - int64_t value; - - value = (env->cpuid_version >> 4) & 0xf; - value |= ((env->cpuid_version >> 16) & 0xf) << 4; - visit_type_int(v, &value, name, errp); -} - -static int x86_cpuid_version_set_model(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xff; - Error *local_err = NULL; - int64_t value; - - visit_type_int(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - if (value < min || value > max) { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); - return -1; - } - - env->cpuid_version &= ~0xf00f0; - env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); - - return 0; -} - -static void x86_cpuid_version_get_stepping(struct uc_struct *uc, Object *obj, Visitor *v, - void *opaque, const char *name, - Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - int64_t value; - - value = env->cpuid_version & 0xf; - visit_type_int(v, &value, name, errp); -} - -static int x86_cpuid_version_set_stepping(struct uc_struct *uc, Object *obj, Visitor *v, - void *opaque, const char *name, - Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xf; - Error *local_err = NULL; - int64_t value; - - visit_type_int(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - if (value < min || value > max) { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); - return -1; - } - - env->cpuid_version &= ~0xf; - env->cpuid_version |= value & 0xf; - - return 0; -} - -static void x86_cpuid_get_level(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - - visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); -} - -static int x86_cpuid_set_level(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - - visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); - - return 0; -} - -static void x86_cpuid_get_xlevel(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - - visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); -} - -static int x86_cpuid_set_xlevel(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - - visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); - - return 0; -} - -static char *x86_cpuid_get_vendor(struct uc_struct *uc, Object *obj, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - char *value; - - value = (char *)g_malloc(CPUID_VENDOR_SZ + 1); - x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, - env->cpuid_vendor3); - return value; -} - -static int x86_cpuid_set_vendor(struct uc_struct *uc, Object *obj, const char *value, - Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - int i; - - if (strlen(value) != CPUID_VENDOR_SZ) { - error_set(errp, QERR_PROPERTY_VALUE_BAD, "", - "vendor", value); - return -1; - } - - env->cpuid_vendor1 = 0; - env->cpuid_vendor2 = 0; - env->cpuid_vendor3 = 0; - for (i = 0; i < 4; i++) { - env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); - env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); - env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); - } - - return 0; -} - -static char *x86_cpuid_get_model_id(struct uc_struct *uc, Object *obj, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - char *value; - int i; - - value = g_malloc(48 + 1); - for (i = 0; i < 48; i++) { - value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); - } - value[48] = '\0'; - return value; -} - -static int x86_cpuid_set_model_id(struct uc_struct *uc, Object *obj, const char *model_id, - Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - CPUX86State *env = &cpu->env; - int c, len, i; - - if (model_id == NULL) { - model_id = ""; - } - len = strlen(model_id); - memset(env->cpuid_model, 0, 48); - for (i = 0; i < 48; i++) { - if (i >= len) { - c = '\0'; - } else { - c = (uint8_t)model_id[i]; - } - env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); - } - - return 0; -} - -static void x86_cpuid_get_tsc_freq(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - int64_t value; - - value = cpu->env.tsc_khz * 1000; - visit_type_int(v, &value, name, errp); -} - -static int x86_cpuid_set_tsc_freq(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - const int64_t min = 0; - const int64_t max = INT64_MAX; - Error *local_err = NULL; - int64_t value; - - visit_type_int(v, &value, name, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return -1; - } - if (value < min || value > max) { - error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); - return -1; - } - - cpu->env.tsc_khz = (int)(value / 1000); - - return 0; -} - -static void x86_cpuid_get_apic_id(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - int64_t value = cpu->env.cpuid_apic_id; - - visit_type_int(v, &value, name, errp); -} - -static int x86_cpuid_set_apic_id(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - X86CPU *cpu = X86_CPU(uc, obj); - DeviceState *dev = DEVICE(uc, obj); - const int64_t min = 0; - const int64_t max = UINT32_MAX; - Error *error = NULL; - int64_t value; - - if (dev->realized) { - error_setg(errp, "Attempt to set property '%s' on '%s' after " - "it was realized", name, object_get_typename(obj)); - return -1; - } - - visit_type_int(v, &value, name, &error); - if (error) { - error_propagate(errp, error); - return -1; - } - if (value < min || value > max) { - error_setg(errp, "Property %s.%s doesn't take value %" PRId64 - " (minimum: %" PRId64 ", maximum: %" PRId64 ")" , - object_get_typename(obj), name, value, min, max); - return -1; - } - - if ((value != cpu->env.cpuid_apic_id) && cpu_exists(uc, value)) { - error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value); - return -1; - } - cpu->env.cpuid_apic_id = (uint32_t)value; - - return 0; -} - -/* Generic getter for "feature-words" and "filtered-features" properties */ -static void x86_cpu_get_feature_words(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, - const char *name, Error **errp) -{ - uint32_t *array = (uint32_t *)opaque; - FeatureWord w; - Error *err = NULL; - // These all get setup below, so no need to initialise them here. - X86CPUFeatureWordInfo word_infos[FEATURE_WORDS]; - X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS]; - X86CPUFeatureWordInfoList *list = NULL; - - for (w = 0; w < FEATURE_WORDS; w++) { - FeatureWordInfo *wi = &feature_word_info[w]; - X86CPUFeatureWordInfo *qwi = &word_infos[w]; - qwi->cpuid_input_eax = wi->cpuid_eax; - qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx; - qwi->cpuid_input_ecx = wi->cpuid_ecx; - qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum; - qwi->features = array[w]; - - /* List will be in reverse order, but order shouldn't matter */ - list_entries[w].next = list; - list_entries[w].value = &word_infos[w]; - list = &list_entries[w]; - } - - visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err); - error_propagate(errp, err); -} - -/* Convert all '_' in a feature string option name to '-', to make feature - * name conform to QOM property naming rule, which uses '-' instead of '_'. - */ -static inline void feat2prop(char *s) -{ - while ((s = strchr(s, '_'))) { - *s = '-'; - } -} - -/* Parse "+feature,-feature,feature=foo" CPU feature string - */ -static void x86_cpu_parse_featurestr(CPUState *cs, char *features, - Error **errp) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - char *featurestr; /* Single 'key=value" string being parsed */ - FeatureWord w; - /* Features to be added */ - FeatureWordArray plus_features = { 0 }; - /* Features to be removed */ - FeatureWordArray minus_features = { 0 }; - uint32_t numvalue; - CPUX86State *env = &cpu->env; - Error *local_err = NULL; - - featurestr = features ? strtok(features, ",") : NULL; - - while (featurestr) { - char *val; - if (featurestr[0] == '+') { - add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err); - } else if (featurestr[0] == '-') { - add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err); - } else if ((val = strchr(featurestr, '='))) { - *val = 0; val++; - feat2prop(featurestr); - if (!strcmp(featurestr, "xlevel")) { - char *err; - char num[32]; - - numvalue = strtoul(val, &err, 0); - if (!*val || *err) { - error_setg(errp, "bad numerical value %s", val); - return; - } - if (numvalue < 0x80000000) { - numvalue += 0x80000000; - } - snprintf(num, sizeof(num), "%" PRIu32, numvalue); - object_property_parse(cs->uc, OBJECT(cpu), num, featurestr, &local_err); - } else if (!strcmp(featurestr, "tsc-freq")) { - int64_t tsc_freq; - char *err; - char num[32]; - - tsc_freq = strtosz_suffix_unit(val, &err, - STRTOSZ_DEFSUFFIX_B, 1000); - if (tsc_freq < 0 || *err) { - error_setg(errp, "bad numerical value %s", val); - return; - } - snprintf(num, sizeof(num), "%" PRId64, tsc_freq); - object_property_parse(cs->uc, OBJECT(cpu), num, "tsc-frequency", - &local_err); - } else if (!strcmp(featurestr, "hv-spinlocks")) { - char *err; - const int min = 0xFFF; - char num[32]; - numvalue = strtoul(val, &err, 0); - if (!*val || *err) { - error_setg(errp, "bad numerical value %s", val); - return; - } - if (numvalue < (uint32_t)min) { - numvalue = min; - } - snprintf(num, sizeof(num), "%" PRId32, numvalue); - object_property_parse(cs->uc, OBJECT(cpu), num, featurestr, &local_err); - } else { - object_property_parse(cs->uc, OBJECT(cpu), val, featurestr, &local_err); - } - } else { - feat2prop(featurestr); - object_property_parse(cs->uc, OBJECT(cpu), "on", featurestr, &local_err); - } - if (local_err) { - error_propagate(errp, local_err); - return; - } - featurestr = strtok(NULL, ","); - } - - if (cpu->host_features) { - for (w = 0; w < FEATURE_WORDS; w++) { - env->features[w] = - x86_cpu_get_supported_feature_word(env->uc, w); - } - } - - for (w = 0; w < FEATURE_WORDS; w++) { - env->features[w] |= plus_features[w]; - env->features[w] &= ~minus_features[w]; - } -} - -static uint32_t x86_cpu_get_supported_feature_word(struct uc_struct *uc, FeatureWord w) -{ - FeatureWordInfo *wi = &feature_word_info[w]; - - if (tcg_enabled(uc)) { - return wi->tcg_features; - } else { - return ~0; - } -} - -/* - * Filters CPU feature words based on host availability of each feature. - * - * Returns: 0 if all flags are supported by the host, non-zero otherwise. - */ -static int x86_cpu_filter_features(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - FeatureWord w; - int rv = 0; - - for (w = 0; w < FEATURE_WORDS; w++) { - uint32_t host_feat = x86_cpu_get_supported_feature_word(env->uc, w); - uint32_t requested_features = env->features[w]; - env->features[w] &= host_feat; - cpu->filtered_features[w] = requested_features & ~env->features[w]; - if (cpu->filtered_features[w]) { - if (cpu->check_cpuid || cpu->enforce_cpuid) { - report_unavailable_features(w, cpu->filtered_features[w]); - } - rv = 1; - } - } - - return rv; -} - -/* Load data from X86CPUDefinition - */ -static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) -{ - CPUX86State *env = &cpu->env; - const char *vendor; - FeatureWord w; - - object_property_set_int(env->uc, OBJECT(cpu), def->level, "level", errp); - object_property_set_int(env->uc, OBJECT(cpu), def->family, "family", errp); - object_property_set_int(env->uc, OBJECT(cpu), def->model, "model", errp); - object_property_set_int(env->uc, OBJECT(cpu), def->stepping, "stepping", errp); - object_property_set_int(env->uc, OBJECT(cpu), def->xlevel, "xlevel", errp); - env->cpuid_xlevel2 = def->xlevel2; - cpu->cache_info_passthrough = def->cache_info_passthrough; - object_property_set_str(env->uc, OBJECT(cpu), def->model_id, "model-id", errp); - for (w = 0; w < FEATURE_WORDS; w++) { - env->features[w] = def->features[w]; - } - - env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; - - /* sysenter isn't supported in compatibility mode on AMD, - * syscall isn't supported in compatibility mode on Intel. - * Normally we advertise the actual CPU vendor, but you can - * override this using the 'vendor' property if you want to use - * KVM's sysenter/syscall emulation in compatibility mode and - * when doing cross vendor migration - */ - vendor = def->vendor; - - object_property_set_str(env->uc, OBJECT(cpu), vendor, "vendor", errp); -} - -X86CPU *cpu_x86_create(struct uc_struct *uc, const char *cpu_model, Error **errp) -{ - X86CPU *cpu = NULL; - ObjectClass *oc; - gchar **model_pieces; - char *name, *features; - Error *error = NULL; - - model_pieces = g_strsplit(cpu_model, ",", 2); - if (!model_pieces[0]) { - error_setg(&error, "Invalid/empty CPU model name"); - goto out; - } - name = model_pieces[0]; - features = model_pieces[1]; - - oc = x86_cpu_class_by_name(uc, name); - if (oc == NULL) { - error_setg(&error, "Unable to find CPU definition: %s", name); - goto out; - } - - cpu = X86_CPU(uc, object_new(uc, object_class_get_name(oc))); - - x86_cpu_parse_featurestr(CPU(cpu), features, &error); - if (error) { - goto out; - } - -out: - if (error != NULL) { - error_propagate(errp, error); - if (cpu) { - object_unref(uc, OBJECT(cpu)); - cpu = NULL; - } - } - g_strfreev(model_pieces); - return cpu; -} - -X86CPU *cpu_x86_init(struct uc_struct *uc, const char *cpu_model) -{ - Error *error = NULL; - X86CPU *cpu; - - cpu = cpu_x86_create(uc, cpu_model, &error); - if (error) { - goto out; - } - - object_property_set_bool(uc, OBJECT(cpu), true, "realized", &error); - -out: - if (error) { - error_free(error); - if (cpu != NULL) { - object_unref(uc, OBJECT(cpu)); - cpu = NULL; - } - } - return cpu; -} - -static void x86_cpu_cpudef_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - X86CPUDefinition *cpudef = data; - X86CPUClass *xcc = X86_CPU_CLASS(uc, oc); - - xcc->cpu_def = cpudef; -} - -static void x86_register_cpudef_type(struct uc_struct *uc, X86CPUDefinition *def) -{ - char *typename = x86_cpu_type_name(def->name); - TypeInfo ti = { - typename, - TYPE_X86_CPU, - - 0, - 0, - NULL, - - NULL, - NULL, - NULL, - - def, - - x86_cpu_cpudef_class_init, - }; - - type_register(uc, &ti); - g_free(typename); -} - -#if !defined(CONFIG_USER_ONLY) - -void cpu_clear_apic_feature(CPUX86State *env) -{ - env->features[FEAT_1_EDX] &= ~CPUID_APIC; -} - -#endif /* !CONFIG_USER_ONLY */ - -/* Initialize list of CPU models, filling some non-static fields if necessary - */ -void x86_cpudef_setup(void) -{ - int i, j; - static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" }; - - for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) { - X86CPUDefinition *def = &builtin_x86_defs[i]; - - /* Look for specific "cpudef" models that */ - /* have the QEMU version in .model_id */ - for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) { - if (strcmp(model_with_versions[j], def->name) == 0) { - pstrcpy(def->model_id, sizeof(def->model_id), - "QEMU Virtual CPU version "); - break; - } - } - } -} - -static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - *ebx = env->cpuid_vendor1; - *edx = env->cpuid_vendor2; - *ecx = env->cpuid_vendor3; -} - -void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - X86CPU *cpu = x86_env_get_cpu(env); - CPUState *cs = CPU(cpu); - - /* test if maximum index reached */ - if (index & 0x80000000) { - if (index > env->cpuid_xlevel) { - if (env->cpuid_xlevel2 > 0) { - /* Handle the Centaur's CPUID instruction. */ - if (index > env->cpuid_xlevel2) { - index = env->cpuid_xlevel2; - } else if (index < 0xC0000000) { - index = env->cpuid_xlevel; - } - } else { - /* Intel documentation states that invalid EAX input will - * return the same information as EAX=cpuid_level - * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) - */ - index = env->cpuid_level; - } - } - } else { - if (index > env->cpuid_level) - index = env->cpuid_level; - } - - switch(index) { - case 0: - *eax = env->cpuid_level; - get_cpuid_vendor(env, ebx, ecx, edx); - break; - case 1: - *eax = env->cpuid_version; - *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ - *ecx = env->features[FEAT_1_ECX]; - *edx = env->features[FEAT_1_EDX]; - if (cs->nr_cores * cs->nr_threads > 1) { - *ebx |= (cs->nr_cores * cs->nr_threads) << 16; - *edx |= 1 << 28; /* HTT bit */ - } - break; - case 2: - /* cache info: needed for Pentium Pro compatibility */ - if (cpu->cache_info_passthrough) { - host_cpuid(index, 0, eax, ebx, ecx, edx); - break; - } - *eax = 1; /* Number of CPUID[EAX=2] calls required */ - *ebx = 0; - *ecx = 0; - *edx = (L1D_DESCRIPTOR << 16) | \ - (L1I_DESCRIPTOR << 8) | \ - (L2_DESCRIPTOR); - break; - case 4: - /* cache info: needed for Core compatibility */ - if (cpu->cache_info_passthrough) { - host_cpuid(index, count, eax, ebx, ecx, edx); - *eax &= ~0xFC000000; - } else { - *eax = 0; - switch (count) { - case 0: /* L1 dcache info */ - *eax |= CPUID_4_TYPE_DCACHE | \ - CPUID_4_LEVEL(1) | \ - CPUID_4_SELF_INIT_LEVEL; - *ebx = (L1D_LINE_SIZE - 1) | \ - ((L1D_PARTITIONS - 1) << 12) | \ - ((L1D_ASSOCIATIVITY - 1) << 22); - *ecx = L1D_SETS - 1; - *edx = CPUID_4_NO_INVD_SHARING; - break; - case 1: /* L1 icache info */ - *eax |= CPUID_4_TYPE_ICACHE | \ - CPUID_4_LEVEL(1) | \ - CPUID_4_SELF_INIT_LEVEL; - *ebx = (L1I_LINE_SIZE - 1) | \ - ((L1I_PARTITIONS - 1) << 12) | \ - ((L1I_ASSOCIATIVITY - 1) << 22); - *ecx = L1I_SETS - 1; - *edx = CPUID_4_NO_INVD_SHARING; - break; - case 2: /* L2 cache info */ - *eax |= CPUID_4_TYPE_UNIFIED | \ - CPUID_4_LEVEL(2) | \ - CPUID_4_SELF_INIT_LEVEL; - if (cs->nr_threads > 1) { - *eax |= (cs->nr_threads - 1) << 14; - } - *ebx = (L2_LINE_SIZE - 1) | \ - ((L2_PARTITIONS - 1) << 12) | \ - ((L2_ASSOCIATIVITY - 1) << 22); - *ecx = L2_SETS - 1; - *edx = CPUID_4_NO_INVD_SHARING; - break; - default: /* end of info */ - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = 0; - break; - } - } - - /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ - if ((*eax & 31) && cs->nr_cores > 1) { - *eax |= (cs->nr_cores - 1) << 26; - } - break; - case 5: - /* mwait info: needed for Core compatibility */ - *eax = 0; /* Smallest monitor-line size in bytes */ - *ebx = 0; /* Largest monitor-line size in bytes */ - *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; - *edx = 0; - break; - case 6: - /* Thermal and Power Leaf */ - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = 0; - break; - case 7: - /* Structured Extended Feature Flags Enumeration Leaf */ - if (count == 0) { - *eax = 0; /* Maximum ECX value for sub-leaves */ - *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ - *ecx = 0; /* Reserved */ - *edx = 0; /* Reserved */ - } else { - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = 0; - } - break; - case 9: - /* Direct Cache Access Information Leaf */ - *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ - *ebx = 0; - *ecx = 0; - *edx = 0; - break; - case 0xA: - /* Architectural Performance Monitoring Leaf */ - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = 0; - break; - case 0xD: { - break; - } - case 0x80000000: - *eax = env->cpuid_xlevel; - *ebx = env->cpuid_vendor1; - *edx = env->cpuid_vendor2; - *ecx = env->cpuid_vendor3; - break; - case 0x80000001: - *eax = env->cpuid_version; - *ebx = 0; - *ecx = env->features[FEAT_8000_0001_ECX]; - *edx = env->features[FEAT_8000_0001_EDX]; - - /* The Linux kernel checks for the CMPLegacy bit and - * discards multiple thread information if it is set. - * So dont set it here for Intel to make Linux guests happy. - */ - if (cs->nr_cores * cs->nr_threads > 1) { - uint32_t tebx, tecx, tedx; - get_cpuid_vendor(env, &tebx, &tecx, &tedx); - if (tebx != CPUID_VENDOR_INTEL_1 || - tedx != CPUID_VENDOR_INTEL_2 || - tecx != CPUID_VENDOR_INTEL_3) { - *ecx |= 1 << 1; /* CmpLegacy bit */ - } - } - break; - case 0x80000002: - case 0x80000003: - case 0x80000004: - *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; - *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; - *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; - *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; - break; - case 0x80000005: - /* cache info (L1 cache) */ - if (cpu->cache_info_passthrough) { - host_cpuid(index, 0, eax, ebx, ecx, edx); - break; - } - *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ - (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); - *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ - (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); - *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \ - (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE); - *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \ - (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE); - break; - case 0x80000006: - /* cache info (L2 cache) */ - if (cpu->cache_info_passthrough) { - host_cpuid(index, 0, eax, ebx, ecx, edx); - break; - } - *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ - (L2_DTLB_2M_ENTRIES << 16) | \ - (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ - (L2_ITLB_2M_ENTRIES); - *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ - (L2_DTLB_4K_ENTRIES << 16) | \ - (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ - (L2_ITLB_4K_ENTRIES); - *ecx = (L2_SIZE_KB_AMD << 16) | \ - (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \ - (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE); - *edx = ((L3_SIZE_KB/512) << 18) | \ - (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \ - (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE); - break; - case 0x80000007: - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = env->features[FEAT_8000_0007_EDX]; - break; - case 0x80000008: - /* virtual & phys address size in low 2 bytes. */ -/* XXX: This value must match the one used in the MMU code. */ - if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { - /* 64 bit processor */ -/* XXX: The physical address space is limited to 42 bits in exec.c. */ - *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */ - } else { - if (env->features[FEAT_1_EDX] & CPUID_PSE36) { - *eax = 0x00000024; /* 36 bits physical */ - } else { - *eax = 0x00000020; /* 32 bits physical */ - } - } - *ebx = 0; - *ecx = 0; - *edx = 0; - if (cs->nr_cores * cs->nr_threads > 1) { - *ecx |= (cs->nr_cores * cs->nr_threads) - 1; - } - break; - case 0x8000000A: - if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { - *eax = 0x00000001; /* SVM Revision */ - *ebx = 0x00000010; /* nr of ASIDs */ - *ecx = 0; - *edx = env->features[FEAT_SVM]; /* optional features */ - } else { - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = 0; - } - break; - case 0xC0000000: - *eax = env->cpuid_xlevel2; - *ebx = 0; - *ecx = 0; - *edx = 0; - break; - case 0xC0000001: - /* Support for VIA CPU's CPUID instruction */ - *eax = env->cpuid_version; - *ebx = 0; - *ecx = 0; - *edx = env->features[FEAT_C000_0001_EDX]; - break; - case 0xC0000002: - case 0xC0000003: - case 0xC0000004: - /* Reserved for the future, and now filled with zero */ - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = 0; - break; - default: - /* reserved values: zero */ - *eax = 0; - *ebx = 0; - *ecx = 0; - *edx = 0; - break; - } -} - -/* CPUClass::reset() */ -static void x86_cpu_reset(CPUState *s) -{ - X86CPU *cpu = X86_CPU(s->uc, s); - X86CPUClass *xcc = X86_CPU_GET_CLASS(s->uc, cpu); - CPUX86State *env = &cpu->env; - int i; - - xcc->parent_reset(s); - - memset(env, 0, offsetof(CPUX86State, cpuid_level)); - - tlb_flush(s, 1); - - env->old_exception = -1; - - /* init to reset state */ - -#ifdef CONFIG_SOFTMMU - env->hflags |= HF_SOFTMMU_MASK; -#endif - env->hflags2 |= HF2_GIF_MASK; - - cpu_x86_update_cr0(env, 0x60000010); - env->a20_mask = ~0x0; - env->smbase = 0x30000; - - env->idt.limit = 0xffff; - env->gdt.limit = 0xffff; - env->ldt.limit = 0xffff; - env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); - env->tr.limit = 0xffff; - env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); - - cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | - DESC_R_MASK | DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - - env->eip = 0xfff0; - env->regs[R_EDX] = env->cpuid_version; - - env->eflags = 0x2; - - /* FPU init */ - for (i = 0; i < 8; i++) { - env->fptags[i] = 1; - } - cpu_set_fpuc(env, 0x37f); - - env->mxcsr = 0x1f80; - env->xstate_bv = XSTATE_FP | XSTATE_SSE; - - env->pat = 0x0007040600070406ULL; - env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; - - memset(env->dr, 0, sizeof(env->dr)); - env->dr[6] = DR6_FIXED_1; - env->dr[7] = DR7_FIXED_1; - cpu_breakpoint_remove_all(s, BP_CPU); - cpu_watchpoint_remove_all(s, BP_CPU); - - env->xcr0 = 1; - - /* - * SDM 11.11.5 requires: - * - IA32_MTRR_DEF_TYPE MSR.E = 0 - * - IA32_MTRR_PHYSMASKn.V = 0 - * All other bits are undefined. For simplification, zero it all. - */ - env->mtrr_deftype = 0; - memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); - memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); - -#if !defined(CONFIG_USER_ONLY) - /* We hard-wire the BSP to the first CPU. */ - if (s->cpu_index == 0) { - apic_designate_bsp(env->uc, cpu->apic_state); - } - - s->halted = !cpu_is_bsp(cpu); -#endif -} - -#ifndef CONFIG_USER_ONLY -bool cpu_is_bsp(X86CPU *cpu) -{ - return (cpu_get_apic_base((&cpu->env)->uc, cpu->apic_state) & MSR_IA32_APICBASE_BSP) != 0; -} -#endif - -static void mce_init(X86CPU *cpu) -{ - CPUX86State *cenv = &cpu->env; - unsigned int bank; - - if (((cenv->cpuid_version >> 8) & 0xf) >= 6 - && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == - (CPUID_MCE | CPUID_MCA)) { - cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF; - cenv->mcg_ctl = ~(uint64_t)0; - for (bank = 0; bank < MCE_BANKS_DEF; bank++) { - cenv->mce_banks[bank * 4] = ~(uint64_t)0; - } - } -} - -#ifndef CONFIG_USER_ONLY -static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) -{ -#if 0 - DeviceState *dev = DEVICE(cpu); - APICCommonState *apic; - const char *apic_type = "apic"; - - cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type); - if (cpu->apic_state == NULL) { - error_setg(errp, "APIC device '%s' could not be created", apic_type); - return; - } - - object_property_add_child(OBJECT(cpu), "apic", - OBJECT(cpu->apic_state), NULL); - //qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id); - /* TODO: convert to link<> */ - apic = APIC_COMMON(cpu->apic_state); - apic->cpu = cpu; -#endif -} - -static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) -{ - if (cpu->apic_state == NULL) { - return; - } - - if (qdev_init(cpu->apic_state)) { - error_setg(errp, "APIC device '%s' could not be initialized", - object_get_typename(OBJECT(cpu->apic_state))); - return; - } -} -#else -static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) -{ -} -#endif - - -#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ - (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ - (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) -#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ - (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ - (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) -static int x86_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - X86CPU *cpu = X86_CPU(uc, dev); - X86CPUClass *xcc = X86_CPU_GET_CLASS(uc, dev); - CPUX86State *env = &cpu->env; - Error *local_err = NULL; - - if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) { - env->cpuid_level = 7; - } - - /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on - * CPUID[1].EDX. - */ - if (IS_AMD_CPU(env)) { - env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; - env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] - & CPUID_EXT2_AMD_ALIASES); - } - - if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) { - error_setg(&local_err, - "TCG doesn't support requested features"); - goto out; - } - -#ifndef CONFIG_USER_ONLY - //qemu_register_reset(x86_cpu_machine_reset_cb, cpu); - - if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { - x86_cpu_apic_create(cpu, &local_err); - if (local_err != NULL) { - goto out; - } - } -#endif - - mce_init(cpu); - if (qemu_init_vcpu(cs)) - return -1; - - x86_cpu_apic_realize(cpu, &local_err); - if (local_err != NULL) { - goto out; - } - cpu_reset(cs); - - xcc->parent_realize(uc, dev, &local_err); -out: - if (local_err != NULL) { - error_propagate(errp, local_err); - return -1; - } - - return 0; -} - -/* Enables contiguous-apic-ID mode, for compatibility */ -static bool compat_apic_id_mode; - -void enable_compat_apic_id_mode(void) -{ - compat_apic_id_mode = true; -} - -/* Calculates initial APIC ID for a specific CPU index - * - * Currently we need to be able to calculate the APIC ID from the CPU index - * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have - * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of - * all CPUs up to max_cpus. - */ -uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index) -{ - uint32_t correct_id; - - correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index); - if (compat_apic_id_mode) { - if (cpu_index != correct_id) { - //error_report("APIC IDs set in compatibility mode, " - // "CPU topology won't match the configuration"); - } - return cpu_index; - } else { - return correct_id; - } -} - -static void x86_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - //printf("... X86 initialize (object)\n"); - CPUState *cs = CPU(obj); - X86CPU *cpu = X86_CPU(cs->uc, obj); - X86CPUClass *xcc = X86_CPU_GET_CLASS(uc, obj); - CPUX86State *env = &cpu->env; - - cs->env_ptr = env; - cpu_exec_init(env, opaque); - - object_property_add(obj, "family", "int", - x86_cpuid_version_get_family, - x86_cpuid_version_set_family, NULL, NULL, NULL); - object_property_add(obj, "model", "int", - x86_cpuid_version_get_model, - x86_cpuid_version_set_model, NULL, NULL, NULL); - object_property_add(obj, "stepping", "int", - x86_cpuid_version_get_stepping, - x86_cpuid_version_set_stepping, NULL, NULL, NULL); - object_property_add(obj, "level", "int", - x86_cpuid_get_level, - x86_cpuid_set_level, NULL, NULL, NULL); - object_property_add(obj, "xlevel", "int", - x86_cpuid_get_xlevel, - x86_cpuid_set_xlevel, NULL, NULL, NULL); - object_property_add_str(obj, "vendor", - x86_cpuid_get_vendor, - x86_cpuid_set_vendor, NULL); - object_property_add_str(obj, "model-id", - x86_cpuid_get_model_id, - x86_cpuid_set_model_id, NULL); - object_property_add(obj, "tsc-frequency", "int", - x86_cpuid_get_tsc_freq, - x86_cpuid_set_tsc_freq, NULL, NULL, NULL); - object_property_add(obj, "apic-id", "int", - x86_cpuid_get_apic_id, - x86_cpuid_set_apic_id, NULL, NULL, NULL); - object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", - x86_cpu_get_feature_words, - NULL, NULL, (void *)env->features, NULL); - object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", - x86_cpu_get_feature_words, - NULL, NULL, (void *)cpu->filtered_features, NULL); - - cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; - env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index); - - x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); - - /* init various static tables used in TCG mode */ - if (tcg_enabled(env->uc)) - optimize_flags_init(env->uc); -} - -static int64_t x86_cpu_get_arch_id(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - - return env->cpuid_apic_id; -} - -static bool x86_cpu_get_paging_enabled(const CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - - return (cpu->env.cr[0] & CR0_PG_MASK) != 0; -} - -static void x86_cpu_set_pc(CPUState *cs, vaddr value) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - - cpu->env.eip = value; -} - -static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - - cpu->env.eip = tb->pc - tb->cs_base; -} - -static bool x86_cpu_has_work(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - -#if !defined(CONFIG_USER_ONLY) - if (cs->interrupt_request & CPU_INTERRUPT_POLL) { - apic_poll_irq(cpu->apic_state); - cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); - } -#endif - - return ((cs->interrupt_request & CPU_INTERRUPT_HARD) && - (env->eflags & IF_MASK)) || - (cs->interrupt_request & (CPU_INTERRUPT_NMI | - CPU_INTERRUPT_INIT | - CPU_INTERRUPT_SIPI | - CPU_INTERRUPT_MCE)); -} - -static void x86_cpu_common_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - //printf("... init X86 cpu common class\n"); - X86CPUClass *xcc = X86_CPU_CLASS(uc, oc); - CPUClass *cc = CPU_CLASS(uc, oc); - DeviceClass *dc = DEVICE_CLASS(uc, oc); - - xcc->parent_realize = dc->realize; - dc->realize = x86_cpu_realizefn; - dc->bus_type = TYPE_ICC_BUS; - - xcc->parent_reset = cc->reset; - cc->reset = x86_cpu_reset; - cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; - - cc->class_by_name = x86_cpu_class_by_name; - cc->parse_features = x86_cpu_parse_featurestr; - cc->has_work = x86_cpu_has_work; - cc->do_interrupt = x86_cpu_do_interrupt; - cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; - cc->dump_state = x86_cpu_dump_state; - cc->set_pc = x86_cpu_set_pc; - cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; - cc->get_arch_id = x86_cpu_get_arch_id; - cc->get_paging_enabled = x86_cpu_get_paging_enabled; -#ifdef CONFIG_USER_ONLY - cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; -#else - cc->get_memory_mapping = x86_cpu_get_memory_mapping; - cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; -#endif -#ifndef CONFIG_USER_ONLY - cc->debug_excp_handler = breakpoint_handler; -#endif - cc->cpu_exec_enter = x86_cpu_exec_enter; - cc->cpu_exec_exit = x86_cpu_exec_exit; -} - -void x86_cpu_register_types(void *opaque) -{ - const TypeInfo x86_cpu_type_info = { - TYPE_X86_CPU, - TYPE_CPU, - - sizeof(X86CPUClass), - sizeof(X86CPU), - opaque, - - x86_cpu_initfn, - NULL, - NULL, - - NULL, - - x86_cpu_common_class_init, - NULL, - NULL, - - true, - }; - - //printf("... register X86 cpu\n"); - int i; - - type_register_static(opaque, &x86_cpu_type_info); - for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { - x86_register_cpudef_type(opaque, &builtin_x86_defs[i]); - } - //printf("... END OF register X86 cpu\n"); -} diff --git a/qemu/target-i386/cpu.h b/qemu/target-i386/cpu.h deleted file mode 100644 index 00887308..00000000 --- a/qemu/target-i386/cpu.h +++ /dev/null @@ -1,1386 +0,0 @@ -/* - * i386 virtual CPU header - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef CPU_I386_H -#define CPU_I386_H - -#include "config.h" -#include "qemu-common.h" - -#ifdef TARGET_X86_64 -#define TARGET_LONG_BITS 64 -#else -#define TARGET_LONG_BITS 32 -#endif - -/* target supports implicit self modifying code */ -#define TARGET_HAS_SMC -/* support for self modifying code even if the modified instruction is - close to the modifying instruction */ -#define TARGET_HAS_PRECISE_SMC - -#define TARGET_HAS_ICE 1 - -#ifdef TARGET_X86_64 -#define ELF_MACHINE EM_X86_64 -#define ELF_MACHINE_UNAME "x86_64" -#else -#define ELF_MACHINE EM_386 -#define ELF_MACHINE_UNAME "i686" -#endif - -#define CPUArchState struct CPUX86State - -#include "exec/cpu-defs.h" - -#include "fpu/softfloat.h" - -#define R_EAX 0 -#define R_ECX 1 -#define R_EDX 2 -#define R_EBX 3 -#define R_ESP 4 -#define R_EBP 5 -#define R_ESI 6 -#define R_EDI 7 - -#define R_AL 0 -#define R_CL 1 -#define R_DL 2 -#define R_BL 3 -#define R_AH 4 -#define R_CH 5 -#define R_DH 6 -#define R_BH 7 - -#define R_ES 0 -#define R_CS 1 -#define R_SS 2 -#define R_DS 3 -#define R_FS 4 -#define R_GS 5 - -/* segment descriptor fields */ -#define DESC_G_MASK (1 << 23) -#define DESC_B_SHIFT 22 -#define DESC_B_MASK (1 << DESC_B_SHIFT) -#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ -#define DESC_L_MASK (1 << DESC_L_SHIFT) -#define DESC_AVL_MASK (1 << 20) -#define DESC_P_MASK (1 << 15) -#define DESC_DPL_SHIFT 13 -#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) -#define DESC_S_MASK (1 << 12) -#define DESC_TYPE_SHIFT 8 -#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) -#define DESC_A_MASK (1 << 8) - -#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ -#define DESC_C_MASK (1 << 10) /* code: conforming */ -#define DESC_R_MASK (1 << 9) /* code: readable */ - -#define DESC_E_MASK (1 << 10) /* data: expansion direction */ -#define DESC_W_MASK (1 << 9) /* data: writable */ - -#define DESC_TSS_BUSY_MASK (1 << 9) - -/* eflags masks */ -#define CC_C 0x0001 -#define CC_P 0x0004 -#define CC_A 0x0010 -#define CC_Z 0x0040 -#define CC_S 0x0080 -#define CC_O 0x0800 - -#define TF_SHIFT 8 -#define IOPL_SHIFT 12 -#define VM_SHIFT 17 - -#define TF_MASK 0x00000100 -#define IF_MASK 0x00000200 -#define DF_MASK 0x00000400 -#define IOPL_MASK 0x00003000 -#define NT_MASK 0x00004000 -#define RF_MASK 0x00010000 -#define VM_MASK 0x00020000 -#define AC_MASK 0x00040000 -#define VIF_MASK 0x00080000 -#define VIP_MASK 0x00100000 -#define ID_MASK 0x00200000 - -/* hidden flags - used internally by qemu to represent additional cpu - states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We - avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit - positions to ease oring with eflags. */ -/* current cpl */ -#define HF_CPL_SHIFT 0 -/* true if soft mmu is being used */ -#define HF_SOFTMMU_SHIFT 2 -/* true if hardware interrupts must be disabled for next instruction */ -#define HF_INHIBIT_IRQ_SHIFT 3 -/* 16 or 32 segments */ -#define HF_CS32_SHIFT 4 -#define HF_SS32_SHIFT 5 -/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ -#define HF_ADDSEG_SHIFT 6 -/* copy of CR0.PE (protected mode) */ -#define HF_PE_SHIFT 7 -#define HF_TF_SHIFT 8 /* must be same as eflags */ -#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ -#define HF_EM_SHIFT 10 -#define HF_TS_SHIFT 11 -#define HF_IOPL_SHIFT 12 /* must be same as eflags */ -#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ -#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ -#define HF_RF_SHIFT 16 /* must be same as eflags */ -#define HF_VM_SHIFT 17 /* must be same as eflags */ -#define HF_AC_SHIFT 18 /* must be same as eflags */ -#define HF_SMM_SHIFT 19 /* CPU in SMM mode */ -#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ -#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ -#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ -#define HF_SMAP_SHIFT 23 /* CR4.SMAP */ - -#define HF_CPL_MASK (3 << HF_CPL_SHIFT) -#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) -#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) -#define HF_CS32_MASK (1 << HF_CS32_SHIFT) -#define HF_SS32_MASK (1 << HF_SS32_SHIFT) -#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) -#define HF_PE_MASK (1 << HF_PE_SHIFT) -#define HF_TF_MASK (1 << HF_TF_SHIFT) -#define HF_MP_MASK (1 << HF_MP_SHIFT) -#define HF_EM_MASK (1 << HF_EM_SHIFT) -#define HF_TS_MASK (1 << HF_TS_SHIFT) -#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) -#define HF_LMA_MASK (1 << HF_LMA_SHIFT) -#define HF_CS64_MASK (1 << HF_CS64_SHIFT) -#define HF_RF_MASK (1 << HF_RF_SHIFT) -#define HF_VM_MASK (1 << HF_VM_SHIFT) -#define HF_AC_MASK (1 << HF_AC_SHIFT) -#define HF_SMM_MASK (1 << HF_SMM_SHIFT) -#define HF_SVME_MASK (1 << HF_SVME_SHIFT) -#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) -#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) -#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) - -/* hflags2 */ - -#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ -#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ -#define HF2_NMI_SHIFT 2 /* CPU serving NMI */ -#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ - -#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) -#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) -#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) -#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) - -#define CR0_PE_SHIFT 0 -#define CR0_MP_SHIFT 1 - -#define CR0_PE_MASK (1U << 0) -#define CR0_MP_MASK (1U << 1) -#define CR0_EM_MASK (1U << 2) -#define CR0_TS_MASK (1U << 3) -#define CR0_ET_MASK (1U << 4) -#define CR0_NE_MASK (1U << 5) -#define CR0_WP_MASK (1U << 16) -#define CR0_AM_MASK (1U << 18) -#define CR0_PG_MASK (1U << 31) - -#define CR4_VME_MASK (1U << 0) -#define CR4_PVI_MASK (1U << 1) -#define CR4_TSD_MASK (1U << 2) -#define CR4_DE_MASK (1U << 3) -#define CR4_PSE_MASK (1U << 4) -#define CR4_PAE_MASK (1U << 5) -#define CR4_MCE_MASK (1U << 6) -#define CR4_PGE_MASK (1U << 7) -#define CR4_PCE_MASK (1U << 8) -#define CR4_OSFXSR_SHIFT 9 -#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) -#define CR4_OSXMMEXCPT_MASK (1U << 10) -#define CR4_VMXE_MASK (1U << 13) -#define CR4_SMXE_MASK (1U << 14) -#define CR4_FSGSBASE_MASK (1U << 16) -#define CR4_PCIDE_MASK (1U << 17) -#define CR4_OSXSAVE_MASK (1U << 18) -#define CR4_SMEP_MASK (1U << 20) -#define CR4_SMAP_MASK (1U << 21) - -#define DR6_BD (1 << 13) -#define DR6_BS (1 << 14) -#define DR6_BT (1 << 15) -#define DR6_FIXED_1 0xffff0ff0 - -#define DR7_GD (1 << 13) -#define DR7_TYPE_SHIFT 16 -#define DR7_LEN_SHIFT 18 -#define DR7_FIXED_1 0x00000400 -#define DR7_LOCAL_BP_MASK 0x55 -#define DR7_MAX_BP 4 -#define DR7_TYPE_BP_INST 0x0 -#define DR7_TYPE_DATA_WR 0x1 -#define DR7_TYPE_IO_RW 0x2 -#define DR7_TYPE_DATA_RW 0x3 - -#define PG_PRESENT_BIT 0 -#define PG_RW_BIT 1 -#define PG_USER_BIT 2 -#define PG_PWT_BIT 3 -#define PG_PCD_BIT 4 -#define PG_ACCESSED_BIT 5 -#define PG_DIRTY_BIT 6 -#define PG_PSE_BIT 7 -#define PG_GLOBAL_BIT 8 -#define PG_PSE_PAT_BIT 12 -#define PG_NX_BIT 63 - -#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) -#define PG_RW_MASK (1 << PG_RW_BIT) -#define PG_USER_MASK (1 << PG_USER_BIT) -#define PG_PWT_MASK (1 << PG_PWT_BIT) -#define PG_PCD_MASK (1 << PG_PCD_BIT) -#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) -#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) -#define PG_PSE_MASK (1 << PG_PSE_BIT) -#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) -#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) -#define PG_ADDRESS_MASK 0x000ffffffffff000LL -#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) -#define PG_HI_USER_MASK 0x7ff0000000000000LL -#define PG_NX_MASK (1ULL << PG_NX_BIT) - -#define PG_ERROR_W_BIT 1 - -#define PG_ERROR_P_MASK 0x01 -#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) -#define PG_ERROR_U_MASK 0x04 -#define PG_ERROR_RSVD_MASK 0x08 -#define PG_ERROR_I_D_MASK 0x10 - -#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ -#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ - -#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) -#define MCE_BANKS_DEF 10 - -#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ -#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ -#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ - -#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ -#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ -#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ -#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ -#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ -#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ -#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ -#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ -#define MCI_STATUS_AR (1ULL<<55) /* Action required */ - -/* MISC register defines */ -#define MCM_ADDR_SEGOFF 0 /* segment offset */ -#define MCM_ADDR_LINEAR 1 /* linear address */ -#define MCM_ADDR_PHYS 2 /* physical address */ -#define MCM_ADDR_MEM 3 /* memory address */ -#define MCM_ADDR_GENERIC 7 /* generic */ - -#define MSR_IA32_TSC 0x10 -#define MSR_IA32_APICBASE 0x1b -#define MSR_IA32_APICBASE_BSP (1<<8) -#define MSR_IA32_APICBASE_ENABLE (1<<11) -#define MSR_IA32_APICBASE_BASE (0xfffff<<12) -#define MSR_IA32_FEATURE_CONTROL 0x0000003a -#define MSR_TSC_ADJUST 0x0000003b -#define MSR_IA32_TSCDEADLINE 0x6e0 - -#define MSR_P6_PERFCTR0 0xc1 - -#define MSR_MTRRcap 0xfe -#define MSR_MTRRcap_VCNT 8 -#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) -#define MSR_MTRRcap_WC_SUPPORTED (1 << 10) - -#define MSR_IA32_SYSENTER_CS 0x174 -#define MSR_IA32_SYSENTER_ESP 0x175 -#define MSR_IA32_SYSENTER_EIP 0x176 - -#define MSR_MCG_CAP 0x179 -#define MSR_MCG_STATUS 0x17a -#define MSR_MCG_CTL 0x17b - -#define MSR_P6_EVNTSEL0 0x186 - -#define MSR_IA32_PERF_STATUS 0x198 - -#define MSR_IA32_MISC_ENABLE 0x1a0 -/* Indicates good rep/movs microcode on some processors: */ -#define MSR_IA32_MISC_ENABLE_DEFAULT 1 - -#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) -#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) - -#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) - -#define MSR_MTRRfix64K_00000 0x250 -#define MSR_MTRRfix16K_80000 0x258 -#define MSR_MTRRfix16K_A0000 0x259 -#define MSR_MTRRfix4K_C0000 0x268 -#define MSR_MTRRfix4K_C8000 0x269 -#define MSR_MTRRfix4K_D0000 0x26a -#define MSR_MTRRfix4K_D8000 0x26b -#define MSR_MTRRfix4K_E0000 0x26c -#define MSR_MTRRfix4K_E8000 0x26d -#define MSR_MTRRfix4K_F0000 0x26e -#define MSR_MTRRfix4K_F8000 0x26f - -#define MSR_PAT 0x277 - -#define MSR_MTRRdefType 0x2ff - -#define MSR_CORE_PERF_FIXED_CTR0 0x309 -#define MSR_CORE_PERF_FIXED_CTR1 0x30a -#define MSR_CORE_PERF_FIXED_CTR2 0x30b -#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d -#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e -#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f -#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 - -#define MSR_MC0_CTL 0x400 -#define MSR_MC0_STATUS 0x401 -#define MSR_MC0_ADDR 0x402 -#define MSR_MC0_MISC 0x403 - -#define MSR_EFER 0xc0000080 - -#define MSR_EFER_SCE (1 << 0) -#define MSR_EFER_LME (1 << 8) -#define MSR_EFER_LMA (1 << 10) -#define MSR_EFER_NXE (1 << 11) -#define MSR_EFER_SVME (1 << 12) -#define MSR_EFER_FFXSR (1 << 14) - -#define MSR_STAR 0xc0000081 -#define MSR_LSTAR 0xc0000082 -#define MSR_CSTAR 0xc0000083 -#define MSR_FMASK 0xc0000084 -#define MSR_FSBASE 0xc0000100 -#define MSR_GSBASE 0xc0000101 -#define MSR_KERNELGSBASE 0xc0000102 -#define MSR_TSC_AUX 0xc0000103 - -#define MSR_VM_HSAVE_PA 0xc0010117 - -#define MSR_IA32_BNDCFGS 0x00000d90 - -#define XSTATE_FP (1ULL << 0) -#define XSTATE_SSE (1ULL << 1) -#define XSTATE_YMM (1ULL << 2) -#define XSTATE_BNDREGS (1ULL << 3) -#define XSTATE_BNDCSR (1ULL << 4) -#define XSTATE_OPMASK (1ULL << 5) -#define XSTATE_ZMM_Hi256 (1ULL << 6) -#define XSTATE_Hi16_ZMM (1ULL << 7) - - -/* CPUID feature words */ -typedef enum FeatureWord { - FEAT_1_EDX, /* CPUID[1].EDX */ - FEAT_1_ECX, /* CPUID[1].ECX */ - FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ - FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ - FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ - FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ - FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ - FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ - FEAT_SVM, /* CPUID[8000_000A].EDX */ - FEATURE_WORDS, -} FeatureWord; - -typedef uint32_t FeatureWordArray[FEATURE_WORDS]; - -/* cpuid_features bits */ -#define CPUID_FP87 (1U << 0) -#define CPUID_VME (1U << 1) -#define CPUID_DE (1U << 2) -#define CPUID_PSE (1U << 3) -#define CPUID_TSC (1U << 4) -#define CPUID_MSR (1U << 5) -#define CPUID_PAE (1U << 6) -#define CPUID_MCE (1U << 7) -#define CPUID_CX8 (1U << 8) -#define CPUID_APIC (1U << 9) -#define CPUID_SEP (1U << 11) /* sysenter/sysexit */ -#define CPUID_MTRR (1U << 12) -#define CPUID_PGE (1U << 13) -#define CPUID_MCA (1U << 14) -#define CPUID_CMOV (1U << 15) -#define CPUID_PAT (1U << 16) -#define CPUID_PSE36 (1U << 17) -#define CPUID_PN (1U << 18) -#define CPUID_CLFLUSH (1U << 19) -#define CPUID_DTS (1U << 21) -#define CPUID_ACPI (1U << 22) -#define CPUID_MMX (1U << 23) -#define CPUID_FXSR (1U << 24) -#define CPUID_SSE (1U << 25) -#define CPUID_SSE2 (1U << 26) -#define CPUID_SS (1U << 27) -#define CPUID_HT (1U << 28) -#define CPUID_TM (1U << 29) -#define CPUID_IA64 (1U << 30) -#define CPUID_PBE (1U << 31) - -#define CPUID_EXT_SSE3 (1U << 0) -#define CPUID_EXT_PCLMULQDQ (1U << 1) -#define CPUID_EXT_DTES64 (1U << 2) -#define CPUID_EXT_MONITOR (1U << 3) -#define CPUID_EXT_DSCPL (1U << 4) -#define CPUID_EXT_VMX (1U << 5) -#define CPUID_EXT_SMX (1U << 6) -#define CPUID_EXT_EST (1U << 7) -#define CPUID_EXT_TM2 (1U << 8) -#define CPUID_EXT_SSSE3 (1U << 9) -#define CPUID_EXT_CID (1U << 10) -#define CPUID_EXT_FMA (1U << 12) -#define CPUID_EXT_CX16 (1U << 13) -#define CPUID_EXT_XTPR (1U << 14) -#define CPUID_EXT_PDCM (1U << 15) -#define CPUID_EXT_PCID (1U << 17) -#define CPUID_EXT_DCA (1U << 18) -#define CPUID_EXT_SSE41 (1U << 19) -#define CPUID_EXT_SSE42 (1U << 20) -#define CPUID_EXT_X2APIC (1U << 21) -#define CPUID_EXT_MOVBE (1U << 22) -#define CPUID_EXT_POPCNT (1U << 23) -#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) -#define CPUID_EXT_AES (1U << 25) -#define CPUID_EXT_XSAVE (1U << 26) -#define CPUID_EXT_OSXSAVE (1U << 27) -#define CPUID_EXT_AVX (1U << 28) -#define CPUID_EXT_F16C (1U << 29) -#define CPUID_EXT_RDRAND (1U << 30) -#define CPUID_EXT_HYPERVISOR (1U << 31) - -#define CPUID_EXT2_FPU (1U << 0) -#define CPUID_EXT2_VME (1U << 1) -#define CPUID_EXT2_DE (1U << 2) -#define CPUID_EXT2_PSE (1U << 3) -#define CPUID_EXT2_TSC (1U << 4) -#define CPUID_EXT2_MSR (1U << 5) -#define CPUID_EXT2_PAE (1U << 6) -#define CPUID_EXT2_MCE (1U << 7) -#define CPUID_EXT2_CX8 (1U << 8) -#define CPUID_EXT2_APIC (1U << 9) -#define CPUID_EXT2_SYSCALL (1U << 11) -#define CPUID_EXT2_MTRR (1U << 12) -#define CPUID_EXT2_PGE (1U << 13) -#define CPUID_EXT2_MCA (1U << 14) -#define CPUID_EXT2_CMOV (1U << 15) -#define CPUID_EXT2_PAT (1U << 16) -#define CPUID_EXT2_PSE36 (1U << 17) -#define CPUID_EXT2_MP (1U << 19) -#define CPUID_EXT2_NX (1U << 20) -#define CPUID_EXT2_MMXEXT (1U << 22) -#define CPUID_EXT2_MMX (1U << 23) -#define CPUID_EXT2_FXSR (1U << 24) -#define CPUID_EXT2_FFXSR (1U << 25) -#define CPUID_EXT2_PDPE1GB (1U << 26) -#define CPUID_EXT2_RDTSCP (1U << 27) -#define CPUID_EXT2_LM (1U << 29) -#define CPUID_EXT2_3DNOWEXT (1U << 30) -#define CPUID_EXT2_3DNOW (1U << 31) - -/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ -#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ - CPUID_EXT2_DE | CPUID_EXT2_PSE | \ - CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ - CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ - CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ - CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ - CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ - CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ - CPUID_EXT2_MMX | CPUID_EXT2_FXSR) - -#define CPUID_EXT3_LAHF_LM (1U << 0) -#define CPUID_EXT3_CMP_LEG (1U << 1) -#define CPUID_EXT3_SVM (1U << 2) -#define CPUID_EXT3_EXTAPIC (1U << 3) -#define CPUID_EXT3_CR8LEG (1U << 4) -#define CPUID_EXT3_ABM (1U << 5) -#define CPUID_EXT3_SSE4A (1U << 6) -#define CPUID_EXT3_MISALIGNSSE (1U << 7) -#define CPUID_EXT3_3DNOWPREFETCH (1U << 8) -#define CPUID_EXT3_OSVW (1U << 9) -#define CPUID_EXT3_IBS (1U << 10) -#define CPUID_EXT3_XOP (1U << 11) -#define CPUID_EXT3_SKINIT (1U << 12) -#define CPUID_EXT3_WDT (1U << 13) -#define CPUID_EXT3_LWP (1U << 15) -#define CPUID_EXT3_FMA4 (1U << 16) -#define CPUID_EXT3_TCE (1U << 17) -#define CPUID_EXT3_NODEID (1U << 19) -#define CPUID_EXT3_TBM (1U << 21) -#define CPUID_EXT3_TOPOEXT (1U << 22) -#define CPUID_EXT3_PERFCORE (1U << 23) -#define CPUID_EXT3_PERFNB (1U << 24) - -#define CPUID_SVM_NPT (1U << 0) -#define CPUID_SVM_LBRV (1U << 1) -#define CPUID_SVM_SVMLOCK (1U << 2) -#define CPUID_SVM_NRIPSAVE (1U << 3) -#define CPUID_SVM_TSCSCALE (1U << 4) -#define CPUID_SVM_VMCBCLEAN (1U << 5) -#define CPUID_SVM_FLUSHASID (1U << 6) -#define CPUID_SVM_DECODEASSIST (1U << 7) -#define CPUID_SVM_PAUSEFILTER (1U << 10) -#define CPUID_SVM_PFTHRESHOLD (1U << 12) - -#define CPUID_7_0_EBX_FSGSBASE (1U << 0) -#define CPUID_7_0_EBX_BMI1 (1U << 3) -#define CPUID_7_0_EBX_HLE (1U << 4) -#define CPUID_7_0_EBX_AVX2 (1U << 5) -#define CPUID_7_0_EBX_SMEP (1U << 7) -#define CPUID_7_0_EBX_BMI2 (1U << 8) -#define CPUID_7_0_EBX_ERMS (1U << 9) -#define CPUID_7_0_EBX_INVPCID (1U << 10) -#define CPUID_7_0_EBX_RTM (1U << 11) -#define CPUID_7_0_EBX_MPX (1U << 14) -#define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */ -#define CPUID_7_0_EBX_RDSEED (1U << 18) -#define CPUID_7_0_EBX_ADX (1U << 19) -#define CPUID_7_0_EBX_SMAP (1U << 20) -#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */ -#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */ -#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */ - -/* CPUID[0x80000007].EDX flags: */ -#define CPUID_APM_INVTSC (1U << 8) - -#define CPUID_VENDOR_SZ 12 - -#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ -#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ -#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ -#define CPUID_VENDOR_INTEL "GenuineIntel" - -#define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ -#define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ -#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ -#define CPUID_VENDOR_AMD "AuthenticAMD" - -#define CPUID_VENDOR_VIA "CentaurHauls" - -#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ -#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ - -#ifndef HYPERV_SPINLOCK_NEVER_RETRY -#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF -#endif - -#define EXCP00_DIVZ 0 -#define EXCP01_DB 1 -#define EXCP02_NMI 2 -#define EXCP03_INT3 3 -#define EXCP04_INTO 4 -#define EXCP05_BOUND 5 -#define EXCP06_ILLOP 6 -#define EXCP07_PREX 7 -#define EXCP08_DBLE 8 -#define EXCP09_XERR 9 -#define EXCP0A_TSS 10 -#define EXCP0B_NOSEG 11 -#define EXCP0C_STACK 12 -#define EXCP0D_GPF 13 -#define EXCP0E_PAGE 14 -#define EXCP10_COPR 16 -#define EXCP11_ALGN 17 -#define EXCP12_MCHK 18 - -#define EXCP_SYSCALL 0x100 /* only happens in user only emulation - for syscall instruction */ - -/* i386-specific interrupt pending bits. */ -#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 -#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 -#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 -#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 -#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 -#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 -#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 - -/* Use a clearer name for this. */ -#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET - -typedef enum { - CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ - CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ - - CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ - CC_OP_MULW, - CC_OP_MULL, - CC_OP_MULQ, - - CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - CC_OP_ADDW, - CC_OP_ADDL, - CC_OP_ADDQ, - - CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - CC_OP_ADCW, - CC_OP_ADCL, - CC_OP_ADCQ, - - CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - CC_OP_SUBW, - CC_OP_SUBL, - CC_OP_SUBQ, - - CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - CC_OP_SBBW, - CC_OP_SBBL, - CC_OP_SBBQ, - - CC_OP_LOGICB, /* modify all flags, CC_DST = res */ - CC_OP_LOGICW, - CC_OP_LOGICL, - CC_OP_LOGICQ, - - CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ - CC_OP_INCW, - CC_OP_INCL, - CC_OP_INCQ, - - CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ - CC_OP_DECW, - CC_OP_DECL, - CC_OP_DECQ, - - CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ - CC_OP_SHLW, - CC_OP_SHLL, - CC_OP_SHLQ, - - CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ - CC_OP_SARW, - CC_OP_SARL, - CC_OP_SARQ, - - CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ - CC_OP_BMILGW, - CC_OP_BMILGL, - CC_OP_BMILGQ, - - CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ - CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ - CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ - - CC_OP_CLR, /* Z set, all other flags clear. */ - - CC_OP_NB, -} CCOp; - -typedef struct SegmentCache { - uint32_t selector; - target_ulong base; - uint32_t limit; - uint32_t flags; -} SegmentCache; - -typedef union { - uint8_t _b[16]; - uint16_t _w[8]; - uint32_t _l[4]; - uint64_t _q[2]; - float32 _s[4]; - float64 _d[2]; -} XMMReg; - -typedef union { - uint8_t _b[32]; - uint16_t _w[16]; - uint32_t _l[8]; - uint64_t _q[4]; - float32 _s[8]; - float64 _d[4]; -} YMMReg; - -typedef union { - uint8_t _b[64]; - uint16_t _w[32]; - uint32_t _l[16]; - uint64_t _q[8]; - float32 _s[16]; - float64 _d[8]; -} ZMMReg; - -typedef union { - uint8_t _b[8]; - uint16_t _w[4]; - uint32_t _l[2]; - float32 _s[2]; - uint64_t q; -} MMXReg; - -typedef struct BNDReg { - uint64_t lb; - uint64_t ub; -} BNDReg; - -typedef struct BNDCSReg { - uint64_t cfgu; - uint64_t sts; -} BNDCSReg; - -#ifdef HOST_WORDS_BIGENDIAN -#define ZMM_B(n) _b[63 - (n)] -#define ZMM_W(n) _w[31 - (n)] -#define ZMM_L(n) _l[15 - (n)] -#define ZMM_S(n) _s[15 - (n)] -#define ZMM_Q(n) _q[7 - (n)] -#define ZMM_D(n) _d[7 - (n)] - -#define YMM_B(n) _b[31 - (n)] -#define YMM_W(n) _w[15 - (n)] -#define YMM_L(n) _l[7 - (n)] -#define YMM_S(n) _s[7 - (n)] -#define YMM_Q(n) _q[3 - (n)] -#define YMM_D(n) _d[3 - (n)] - -#define XMM_B(n) _b[15 - (n)] -#define XMM_W(n) _w[7 - (n)] -#define XMM_L(n) _l[3 - (n)] -#define XMM_S(n) _s[3 - (n)] -#define XMM_Q(n) _q[1 - (n)] -#define XMM_D(n) _d[1 - (n)] - -#define MMX_B(n) _b[7 - (n)] -#define MMX_W(n) _w[3 - (n)] -#define MMX_L(n) _l[1 - (n)] -#define MMX_S(n) _s[1 - (n)] -#else -#define ZMM_B(n) _b[n] -#define ZMM_W(n) _w[n] -#define ZMM_L(n) _l[n] -#define ZMM_S(n) _s[n] -#define ZMM_Q(n) _q[n] -#define ZMM_D(n) _d[n] - -#define YMM_B(n) _b[n] -#define YMM_W(n) _w[n] -#define YMM_L(n) _l[n] -#define YMM_S(n) _s[n] -#define YMM_Q(n) _q[n] -#define YMM_D(n) _d[n] - -#define XMM_B(n) _b[n] -#define XMM_W(n) _w[n] -#define XMM_L(n) _l[n] -#define XMM_S(n) _s[n] -#define XMM_Q(n) _q[n] -#define XMM_D(n) _d[n] - -#define MMX_B(n) _b[n] -#define MMX_W(n) _w[n] -#define MMX_L(n) _l[n] -#define MMX_S(n) _s[n] -#endif -#define MMX_Q(n) q - -typedef union { - floatx80 QEMU_ALIGN(16, d); - MMXReg mmx; -} FPReg; - -typedef struct { - uint64_t base; - uint64_t mask; -} MTRRVar; - -#define CPU_NB_REGS64 16 -#define CPU_NB_REGS32 8 - -#ifdef TARGET_X86_64 -#define CPU_NB_REGS CPU_NB_REGS64 -#else -#define CPU_NB_REGS CPU_NB_REGS32 -#endif - -#define MAX_FIXED_COUNTERS 3 -#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) - -#define NB_MMU_MODES 3 - -#define NB_OPMASK_REGS 8 - -typedef enum TPRAccess { - TPR_ACCESS_READ, - TPR_ACCESS_WRITE, -} TPRAccess; - -typedef struct CPUX86State { - /* standard registers */ - target_ulong regs[CPU_NB_REGS]; - target_ulong eip; - target_ulong eflags0; // copy of eflags that does not change thru the BB - target_ulong eflags; /* eflags register. During CPU emulation, CC - flags and DF are set to zero because they are - stored elsewhere */ - - /* emulator internal eflags handling */ - target_ulong cc_dst; - target_ulong cc_src; - target_ulong cc_src2; - uint32_t cc_op; - int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ - uint32_t hflags; /* TB flags, see HF_xxx constants. These flags - are known at translation time. */ - uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ - - /* segments */ - SegmentCache segs[6]; /* selector values */ - SegmentCache ldt; - SegmentCache tr; - SegmentCache gdt; /* only base and limit are used */ - SegmentCache idt; /* only base and limit are used */ - - target_ulong cr[5]; /* NOTE: cr1 is unused */ - int32_t a20_mask; - - BNDReg bnd_regs[4]; - BNDCSReg bndcs_regs; - uint64_t msr_bndcfgs; - - /* Beginning of state preserved by INIT (dummy marker). */ - //struct {} start_init_save; - int start_init_save; - - /* FPU state */ - unsigned int fpstt; /* top of stack index */ - uint16_t fpus; - uint16_t fpuc; - uint8_t fptags[8]; /* 0 = valid, 1 = empty */ - FPReg fpregs[8]; - /* KVM-only so far */ - uint16_t fpop; - uint64_t fpip; - uint64_t fpdp; - - /* emulator internal variables */ - float_status fp_status; - floatx80 ft0; - - float_status mmx_status; /* for 3DNow! float ops */ - float_status sse_status; - uint32_t mxcsr; - XMMReg xmm_regs[CPU_NB_REGS]; - XMMReg xmm_t0; - MMXReg mmx_t0; - - XMMReg ymmh_regs[CPU_NB_REGS]; - - uint64_t opmask_regs[NB_OPMASK_REGS]; - YMMReg zmmh_regs[CPU_NB_REGS]; -#ifdef TARGET_X86_64 - ZMMReg hi16_zmm_regs[CPU_NB_REGS]; -#endif - - /* sysenter registers */ - uint32_t sysenter_cs; - target_ulong sysenter_esp; - target_ulong sysenter_eip; - uint64_t efer; - uint64_t star; - - uint64_t vm_hsave; - -#ifdef TARGET_X86_64 - target_ulong lstar; - target_ulong cstar; - target_ulong fmask; - target_ulong kernelgsbase; -#endif - - uint64_t tsc; - uint64_t tsc_adjust; - uint64_t tsc_deadline; - - uint64_t mcg_status; - uint64_t msr_ia32_misc_enable; - uint64_t msr_ia32_feature_control; - - uint64_t msr_fixed_ctr_ctrl; - uint64_t msr_global_ctrl; - uint64_t msr_global_status; - uint64_t msr_global_ovf_ctrl; - uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; - uint64_t msr_gp_counters[MAX_GP_COUNTERS]; - uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; - - uint64_t pat; - uint32_t smbase; - - /* End of state preserved by INIT (dummy marker). */ - //struct {} end_init_save; - int end_init_save; - - uint64_t system_time_msr; - uint64_t wall_clock_msr; - uint64_t steal_time_msr; - uint64_t async_pf_en_msr; - uint64_t pv_eoi_en_msr; - - uint64_t msr_hv_hypercall; - uint64_t msr_hv_guest_os_id; - uint64_t msr_hv_vapic; - uint64_t msr_hv_tsc; - - /* exception/interrupt handling */ - int error_code; - int exception_is_int; - target_ulong exception_next_eip; - target_ulong dr[8]; /* debug registers */ - union { - struct CPUBreakpoint *cpu_breakpoint[4]; - struct CPUWatchpoint *cpu_watchpoint[4]; - }; /* break/watchpoints for dr[0..3] */ - int old_exception; /* exception in flight */ - - uint64_t vm_vmcb; - uint64_t tsc_offset; - uint64_t intercept; - uint16_t intercept_cr_read; - uint16_t intercept_cr_write; - uint16_t intercept_dr_read; - uint16_t intercept_dr_write; - uint32_t intercept_exceptions; - uint8_t v_tpr; - - /* KVM states, automatically cleared on reset */ - uint8_t nmi_injected; - uint8_t nmi_pending; - - CPU_COMMON - - /* Fields from here on are preserved across CPU reset. */ - - /* processor features (e.g. for CPUID insn) */ - uint32_t cpuid_level; - uint32_t cpuid_xlevel; - uint32_t cpuid_xlevel2; - uint32_t cpuid_vendor1; - uint32_t cpuid_vendor2; - uint32_t cpuid_vendor3; - uint32_t cpuid_version; - FeatureWordArray features; - uint32_t cpuid_model[12]; - uint32_t cpuid_apic_id; - - /* MTRRs */ - uint64_t mtrr_fixed[11]; - uint64_t mtrr_deftype; - MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; - - /* For KVM */ - uint32_t mp_state; - int32_t exception_injected; - int32_t interrupt_injected; - uint8_t soft_interrupt; - uint8_t has_error_code; - uint32_t sipi_vector; - bool tsc_valid; - int tsc_khz; - void *kvm_xsave_buf; - - uint64_t mcg_cap; - uint64_t mcg_ctl; - uint64_t mce_banks[MCE_BANKS_DEF*4]; - - uint64_t tsc_aux; - - /* vmstate */ - uint16_t fpus_vmstate; - uint16_t fptag_vmstate; - uint16_t fpregs_format_vmstate; - uint64_t xstate_bv; - - uint64_t xcr0; - - TPRAccess tpr_access_type; - - // Unicorn engine - struct uc_struct *uc; -} CPUX86State; - -#include "cpu-qom.h" - -X86CPU *cpu_x86_init(struct uc_struct *uc, const char *cpu_model); -X86CPU *cpu_x86_create(struct uc_struct *uc, const char *cpu_model, Error **errp); -int cpu_x86_exec(struct uc_struct *uc, CPUX86State *s); -void x86_cpudef_setup(void); -int cpu_x86_support_mca_broadcast(CPUX86State *env); - -int cpu_get_pic_interrupt(CPUX86State *s); -/* MSDOS compatibility mode FPU exception support */ -void cpu_set_ferr(CPUX86State *s); - -/* this function must always be used to load data in the segment - cache: it synchronizes the hflags with the segment cache values */ -static inline void cpu_x86_load_seg_cache(CPUX86State *env, - int seg_reg, unsigned int selector, - target_ulong base, - unsigned int limit, - unsigned int flags) -{ - SegmentCache *sc; - unsigned int new_hflags; - - sc = &env->segs[seg_reg]; - sc->selector = selector; - sc->base = base; - sc->limit = limit; - sc->flags = flags; - - /* update the hidden flags */ - { - if (seg_reg == R_CS) { -#ifdef TARGET_X86_64 - if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { - /* long mode */ - env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; - env->hflags &= ~(HF_ADDSEG_MASK); - } else -#endif - { - /* legacy / compatibility case */ - new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) - >> (DESC_B_SHIFT - HF_CS32_SHIFT); - env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | - new_hflags; - } - } - if (seg_reg == R_SS) { - int cpl = (flags >> DESC_DPL_SHIFT) & 3; -#if HF_CPL_MASK != 3 -#error HF_CPL_MASK is hardcoded -#endif - env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; - } - new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) - >> (DESC_B_SHIFT - HF_SS32_SHIFT); - if (env->hflags & HF_CS64_MASK) { - /* zero base assumed for DS, ES and SS in long mode */ - } else if (!(env->cr[0] & CR0_PE_MASK) || - (env->eflags & VM_MASK) || - !(env->hflags & HF_CS32_MASK)) { - /* XXX: try to avoid this test. The problem comes from the - fact that is real mode or vm86 mode we only modify the - 'base' and 'selector' fields of the segment cache to go - faster. A solution may be to force addseg to one in - translate-i386.c. */ - new_hflags |= HF_ADDSEG_MASK; - } else { - new_hflags |= ((env->segs[R_DS].base | - env->segs[R_ES].base | - env->segs[R_SS].base) != 0) << - HF_ADDSEG_SHIFT; - } - env->hflags = (env->hflags & - ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; - } -} - -static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, - uint8_t sipi_vector) -{ - CPUState *cs = CPU(cpu); - CPUX86State *env = &cpu->env; - - env->eip = 0; - cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, - sipi_vector << 12, - env->segs[R_CS].limit, - env->segs[R_CS].flags); - cs->halted = 0; -} - -int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, - target_ulong *base, unsigned int *limit, - unsigned int *flags); - -/* op_helper.c */ -/* used for debug or cpu save/restore */ -void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); -floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); - -/* cpu-exec.c */ -/* the following helpers are only usable in user mode simulation as - they can trigger unexpected exceptions */ -void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); -void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); -void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); - -/* the binding language can not catch the exceptions. - check the arguments, return error instead of raise exceptions. */ -int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel); - -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_x86_signal_handler(int host_signum, void *pinfo, - void *puc); - -/* cpuid.c */ -void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx); -void cpu_clear_apic_feature(CPUX86State *env); -void host_cpuid(uint32_t function, uint32_t count, - uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); - -/* helper.c */ -int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, - int is_write, int mmu_idx); -void x86_cpu_set_a20(X86CPU *cpu, int a20_state); - -static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) -{ - return (dr7 >> (index * 2)) & 1; -} - -static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index) -{ - return (dr7 >> (index * 2)) & 2; - -} -static inline bool hw_breakpoint_enabled(unsigned long dr7, int index) -{ - return hw_global_breakpoint_enabled(dr7, index) || - hw_local_breakpoint_enabled(dr7, index); -} - -static inline int hw_breakpoint_type(unsigned long dr7, int index) -{ - return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; -} - -static inline int hw_breakpoint_len(unsigned long dr7, int index) -{ - int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); - return (len == 2) ? 8 : len + 1; -} - -void hw_breakpoint_insert(CPUX86State *env, int index); -void hw_breakpoint_remove(CPUX86State *env, int index); -bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update); -void breakpoint_handler(CPUState *cs); - -/* will be suppressed */ -void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); -void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); -void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); - -/* hw/pc.c */ -void cpu_smm_update(CPUX86State *env); -uint64_t cpu_get_tsc(CPUX86State *env); - -#define TARGET_PAGE_BITS 12 - -#ifdef TARGET_X86_64 -#define TARGET_PHYS_ADDR_SPACE_BITS 52 -/* ??? This is really 48 bits, sign-extended, but the only thing - accessible to userland with bit 48 set is the VSYSCALL, and that - is handled via other mechanisms. */ -#define TARGET_VIRT_ADDR_SPACE_BITS 47 -#else -#define TARGET_PHYS_ADDR_SPACE_BITS 36 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 -#endif - -/* XXX: This value should match the one returned by CPUID - * and in exec.c */ -# if defined(TARGET_X86_64) -# define PHYS_ADDR_MASK 0xffffffffffLL -# else -# define PHYS_ADDR_MASK 0xfffffffffLL -# endif - -static inline CPUX86State *cpu_init(struct uc_struct *uc, const char *cpu_model) -{ - X86CPU *cpu = cpu_x86_init(uc, cpu_model); - if (cpu == NULL) { - return NULL; - } - return &cpu->env; -} - -#ifdef TARGET_I386 -#define cpu_exec cpu_x86_exec -#define cpu_gen_code cpu_x86_gen_code -#define cpu_signal_handler cpu_x86_signal_handler -#define cpudef_setup x86_cpudef_setup -#endif - -/* MMU modes definitions */ -#define MMU_MODE0_SUFFIX _ksmap -#define MMU_MODE1_SUFFIX _user -#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */ -#define MMU_KSMAP_IDX 0 -#define MMU_USER_IDX 1 -#define MMU_KNOSMAP_IDX 2 -static inline int cpu_mmu_index(CPUX86State *env) -{ - return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : - (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) - ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; -} - -static inline int cpu_mmu_index_kernel(CPUX86State *env) -{ - return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : - ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) - ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; -} - -#define CC_DST (env->cc_dst) -#define CC_SRC (env->cc_src) -#define CC_SRC2 (env->cc_src2) -#define CC_OP (env->cc_op) - -/* n must be a constant to be efficient */ -static inline target_long lshift(target_long x, int n) -{ - if (n >= 0) { - return x << n; - } else { - return x >> (-n); - } -} - -/* float macros */ -#define FT0 (env->ft0) -#define ST0 (env->fpregs[env->fpstt].d) -#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) -#define ST1 ST(1) - -/* translate.c */ -void optimize_flags_init(struct uc_struct *); - -#include "exec/cpu-all.h" -#include "svm.h" - -#if !defined(CONFIG_USER_ONLY) -#include "hw/i386/apic.h" -#endif - -#include "exec/exec-all.h" - -static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, - target_ulong *cs_base, int *flags) -{ - *cs_base = env->segs[R_CS].base; - *pc = *cs_base + env->eip; - *flags = env->hflags | - (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); -} - -void do_cpu_init(X86CPU *cpu); -void do_cpu_sipi(X86CPU *cpu); - -#define MCE_INJECT_BROADCAST 1 -#define MCE_INJECT_UNCOND_AO 2 - -/* excp_helper.c */ -void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); -void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, - int error_code); -void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, - int error_code, int next_eip_addend); - -/* cc_helper.c */ -extern const uint8_t parity_table[256]; -uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); -void update_fp_status(CPUX86State *env); - -static inline uint32_t cpu_compute_eflags(CPUX86State *env) -{ - return (env->eflags & ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)) | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); -} - -/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS - * after generating a call to a helper that uses this. - */ -static inline void cpu_load_eflags(CPUX86State *env, int eflags, - int update_mask) -{ - CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); - CC_OP = CC_OP_EFLAGS; - env->df = 1 - (2 * ((eflags >> 10) & 1)); - env->eflags = (env->eflags & ~update_mask) | - (eflags & update_mask) | 0x2; -} - -/* load efer and update the corresponding hflags. XXX: do consistency - checks with cpuid bits? */ -static inline void cpu_load_efer(CPUX86State *env, uint64_t val) -{ - env->efer = val; - env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); - if (env->efer & MSR_EFER_LMA) { - env->hflags |= HF_LMA_MASK; - } - if (env->efer & MSR_EFER_SVME) { - env->hflags |= HF_SVME_MASK; - } -} - -/* fpu_helper.c */ -void cpu_set_mxcsr(CPUX86State *env, uint32_t val); -void cpu_set_fpuc(CPUX86State *env, uint16_t val); - -/* svm_helper.c */ -void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, - uint64_t param); -void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1); - -/* seg_helper.c */ -void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); - -void do_smm_enter(X86CPU *cpu); - -void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); - -void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w, - uint32_t feat_add, uint32_t feat_remove); - -void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features); -void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features); - - -/* Return name of 32-bit register, from a R_* constant */ -const char *get_register_name_32(unsigned int reg); - -uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index); -void enable_compat_apic_id_mode(void); - -#define APIC_DEFAULT_ADDRESS 0xfee00000 -#define APIC_SPACE_SIZE 0x100000 - -#endif /* CPU_I386_H */ diff --git a/qemu/target-i386/excp_helper.c b/qemu/target-i386/excp_helper.c deleted file mode 100644 index 7aea373b..00000000 --- a/qemu/target-i386/excp_helper.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * x86 exception helpers - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "qemu/log.h" -#include "sysemu/sysemu.h" -#include "exec/helper-proto.h" - -#include "uc_priv.h" - -#if 0 -#define raise_exception_err(env, a, b) \ - do { \ - qemu_log("raise_exception line=%d\n", __LINE__); \ - (raise_exception_err)(env, a, b); \ - } while (0) -#endif - -void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) -{ - raise_interrupt(env, intno, 1, 0, next_eip_addend); -} - -void helper_raise_exception(CPUX86State *env, int exception_index) -{ - raise_exception(env, exception_index); -} - -/* - * Check nested exceptions and change to double or triple fault if - * needed. It should only be called, if this is not an interrupt. - * Returns the new exception number. - */ -static int check_exception(CPUX86State *env, int intno, int *error_code) -{ - int first_contributory = env->old_exception == 0 || - (env->old_exception >= 10 && - env->old_exception <= 13); - int second_contributory = intno == 0 || - (intno >= 10 && intno <= 13); - - qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", - env->old_exception, intno); - -#if !defined(CONFIG_USER_ONLY) - if (env->old_exception == EXCP08_DBLE) { - if (env->hflags & HF_SVMI_MASK) { - cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */ - } - - qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); - - qemu_system_reset_request(env->uc); - return EXCP_HLT; - } -#endif - - if ((first_contributory && second_contributory) - || (env->old_exception == EXCP0E_PAGE && - (second_contributory || (intno == EXCP0E_PAGE)))) { - intno = EXCP08_DBLE; - *error_code = 0; - } - - if (second_contributory || (intno == EXCP0E_PAGE) || - (intno == EXCP08_DBLE)) { - env->old_exception = intno; - } - - return intno; -} - -/* - * Signal an interruption. It is executed in the main CPU loop. - * is_int is TRUE if coming from the int instruction. next_eip is the - * env->eip value AFTER the interrupt instruction. It is only relevant if - * is_int is TRUE. - */ -static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, - int is_int, int error_code, - int next_eip_addend) -{ - CPUState *cs = CPU(x86_env_get_cpu(env)); - - if (!is_int) { - cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, - error_code); - intno = check_exception(env, intno, &error_code); - } else { - cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0); - } - - cs->exception_index = intno; // qq - env->error_code = error_code; - env->exception_is_int = is_int; - env->exception_next_eip = env->eip + next_eip_addend; - cpu_loop_exit(cs); -} - -/* shortcuts to generate exceptions */ - -void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, - int error_code, int next_eip_addend) -{ - raise_interrupt2(env, intno, is_int, error_code, next_eip_addend); -} - -void raise_exception_err(CPUX86State *env, int exception_index, - int error_code) -{ - raise_interrupt2(env, exception_index, 0, error_code, 0); -} - -void raise_exception(CPUX86State *env, int exception_index) -{ - raise_interrupt2(env, exception_index, 0, 0, 0); -} diff --git a/qemu/target-i386/helper.c b/qemu/target-i386/helper.c deleted file mode 100644 index c2fba8ee..00000000 --- a/qemu/target-i386/helper.c +++ /dev/null @@ -1,1152 +0,0 @@ -/* - * i386 helpers (without register variable usage) - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#ifndef CONFIG_USER_ONLY -#include "sysemu/sysemu.h" -#endif - -//#define DEBUG_MMU - -static void cpu_x86_version(CPUX86State *env, int *family, int *model) -{ - int cpuver = env->cpuid_version; - - if (family == NULL || model == NULL) { - return; - } - - *family = (cpuver >> 8) & 0x0f; - *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f); -} - -/* Broadcast MCA signal for processor version 06H_EH and above */ -int cpu_x86_support_mca_broadcast(CPUX86State *env) -{ - int family = 0; - int model = 0; - - cpu_x86_version(env, &family, &model); - if ((family == 6 && model >= 14) || family > 6) { - return 1; - } - - return 0; -} - -/***********************************************************/ -/* x86 debug */ - -static const char *cc_op_str[CC_OP_NB] = { - "DYNAMIC", - "EFLAGS", - - "MULB", - "MULW", - "MULL", - "MULQ", - - "ADDB", - "ADDW", - "ADDL", - "ADDQ", - - "ADCB", - "ADCW", - "ADCL", - "ADCQ", - - "SUBB", - "SUBW", - "SUBL", - "SUBQ", - - "SBBB", - "SBBW", - "SBBL", - "SBBQ", - - "LOGICB", - "LOGICW", - "LOGICL", - "LOGICQ", - - "INCB", - "INCW", - "INCL", - "INCQ", - - "DECB", - "DECW", - "DECL", - "DECQ", - - "SHLB", - "SHLW", - "SHLL", - "SHLQ", - - "SARB", - "SARW", - "SARL", - "SARQ", - - "BMILGB", - "BMILGW", - "BMILGL", - "BMILGQ", - - "ADCX", - "ADOX", - "ADCOX", - - "CLR", -}; - -static void -cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf, - const char *name, struct SegmentCache *sc) -{ -#ifdef TARGET_X86_64 - if (env->hflags & HF_CS64_MASK) { - cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name, - sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00); - } else -#endif - { - cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector, - (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00); - } - - if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK)) - goto done; - - cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT); - if (sc->flags & DESC_S_MASK) { - if (sc->flags & DESC_CS_MASK) { - cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" : - ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16")); - cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-', - (sc->flags & DESC_R_MASK) ? 'R' : '-'); - } else { - cpu_fprintf(f, - (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK) - ? "DS " : "DS16"); - cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-', - (sc->flags & DESC_W_MASK) ? 'W' : '-'); - } - cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-'); - } else { - static const char *sys_type_name[2][16] = { - { /* 32 bit mode */ - "Reserved", "TSS16-avl", "LDT", "TSS16-busy", - "CallGate16", "TaskGate", "IntGate16", "TrapGate16", - "Reserved", "TSS32-avl", "Reserved", "TSS32-busy", - "CallGate32", "Reserved", "IntGate32", "TrapGate32" - }, - { /* 64 bit mode */ - "", "Reserved", "LDT", "Reserved", "Reserved", - "Reserved", "Reserved", "Reserved", "Reserved", - "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64", - "Reserved", "IntGate64", "TrapGate64" - } - }; - cpu_fprintf(f, "%s", - sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] - [(sc->flags & DESC_TYPE_MASK) - >> DESC_TYPE_SHIFT]); - } -done: - cpu_fprintf(f, "\n"); -} - -#define DUMP_CODE_BYTES_TOTAL 50 -#define DUMP_CODE_BYTES_BACKWARD 20 - -void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, - int flags) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - int eflags, i, nb; - char cc_op_name[32]; - static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; - - eflags = cpu_compute_eflags(env); -#ifdef TARGET_X86_64 - if (env->hflags & HF_CS64_MASK) { - cpu_fprintf(f, - "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n" - "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n" - "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n" - "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n" - "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", - env->regs[R_EAX], - env->regs[R_EBX], - env->regs[R_ECX], - env->regs[R_EDX], - env->regs[R_ESI], - env->regs[R_EDI], - env->regs[R_EBP], - env->regs[R_ESP], - env->regs[8], - env->regs[9], - env->regs[10], - env->regs[11], - env->regs[12], - env->regs[13], - env->regs[14], - env->regs[15], - env->eip, eflags, - eflags & DF_MASK ? 'D' : '-', - eflags & CC_O ? 'O' : '-', - eflags & CC_S ? 'S' : '-', - eflags & CC_Z ? 'Z' : '-', - eflags & CC_A ? 'A' : '-', - eflags & CC_P ? 'P' : '-', - eflags & CC_C ? 'C' : '-', - env->hflags & HF_CPL_MASK, - (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, - (env->a20_mask >> 20) & 1, - (env->hflags >> HF_SMM_SHIFT) & 1, - cs->halted); - } else -#endif - { - cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" - "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" - "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", - (uint32_t)env->regs[R_EAX], - (uint32_t)env->regs[R_EBX], - (uint32_t)env->regs[R_ECX], - (uint32_t)env->regs[R_EDX], - (uint32_t)env->regs[R_ESI], - (uint32_t)env->regs[R_EDI], - (uint32_t)env->regs[R_EBP], - (uint32_t)env->regs[R_ESP], - (uint32_t)env->eip, eflags, - eflags & DF_MASK ? 'D' : '-', - eflags & CC_O ? 'O' : '-', - eflags & CC_S ? 'S' : '-', - eflags & CC_Z ? 'Z' : '-', - eflags & CC_A ? 'A' : '-', - eflags & CC_P ? 'P' : '-', - eflags & CC_C ? 'C' : '-', - env->hflags & HF_CPL_MASK, - (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, - (env->a20_mask >> 20) & 1, - (env->hflags >> HF_SMM_SHIFT) & 1, - cs->halted); - } - - for(i = 0; i < 6; i++) { - cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i], - &env->segs[i]); - } - cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt); - cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr); - -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", - env->gdt.base, env->gdt.limit); - cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n", - env->idt.base, env->idt.limit); - cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n", - (uint32_t)env->cr[0], - env->cr[2], - env->cr[3], - (uint32_t)env->cr[4]); - for(i = 0; i < 4; i++) - cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]); - cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n", - env->dr[6], env->dr[7]); - } else -#endif - { - cpu_fprintf(f, "GDT= %08x %08x\n", - (uint32_t)env->gdt.base, env->gdt.limit); - cpu_fprintf(f, "IDT= %08x %08x\n", - (uint32_t)env->idt.base, env->idt.limit); - cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n", - (uint32_t)env->cr[0], - (uint32_t)env->cr[2], - (uint32_t)env->cr[3], - (uint32_t)env->cr[4]); - for(i = 0; i < 4; i++) { - cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]); - } - cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n", - env->dr[6], env->dr[7]); - } - if (flags & CPU_DUMP_CCOP) { - if ((unsigned)env->cc_op < CC_OP_NB) - snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); - else - snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); -#ifdef TARGET_X86_64 - if (env->hflags & HF_CS64_MASK) { - cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", - env->cc_src, env->cc_dst, - cc_op_name); - } else -#endif - { - cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", - (uint32_t)env->cc_src, (uint32_t)env->cc_dst, - cc_op_name); - } - } - cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer); - if (flags & CPU_DUMP_FPU) { - int fptag; - fptag = 0; - for(i = 0; i < 8; i++) { - fptag |= ((!env->fptags[i]) << i); - } - cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n", - env->fpuc, - (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, - env->fpstt, - fptag, - env->mxcsr); - for(i=0;i<8;i++) { - CPU_LDoubleU u; - u.d = env->fpregs[i].d; - cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", - i, u.l.lower, u.l.upper); - if ((i & 1) == 1) - cpu_fprintf(f, "\n"); - else - cpu_fprintf(f, " "); - } - if (env->hflags & HF_CS64_MASK) - nb = 16; - else - nb = 8; - for(i=0;ixmm_regs[i].XMM_L(3), - env->xmm_regs[i].XMM_L(2), - env->xmm_regs[i].XMM_L(1), - env->xmm_regs[i].XMM_L(0)); - if ((i & 1) == 1) - cpu_fprintf(f, "\n"); - else - cpu_fprintf(f, " "); - } - } - if (flags & CPU_DUMP_CODE) { - target_ulong base = env->segs[R_CS].base + env->eip; - target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD); - uint8_t code; - char codestr[3]; - - cpu_fprintf(f, "Code="); - for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) { - if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) { - snprintf(codestr, sizeof(codestr), "%02x", code); - } else { - snprintf(codestr, sizeof(codestr), "??"); - } - cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "", - i == offs ? "<" : "", codestr, i == offs ? ">" : ""); - } - cpu_fprintf(f, "\n"); - } -} - -/***********************************************************/ -/* x86 mmu */ -/* XXX: add PGE support */ - -void x86_cpu_set_a20(X86CPU *cpu, int a20_state) -{ - CPUX86State *env = &cpu->env; - - a20_state = (a20_state != 0); - if (a20_state != ((env->a20_mask >> 20) & 1)) { - CPUState *cs = CPU(cpu); - -#if defined(DEBUG_MMU) - printf("A20 update: a20=%d\n", a20_state); -#endif - /* if the cpu is currently executing code, we must unlink it and - all the potentially executing TB */ - cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); - - /* when a20 is changed, all the MMU mappings are invalid, so - we must flush everything */ - tlb_flush(cs, 1); - env->a20_mask = ~(1 << 20) | (a20_state << 20); - } -} - -void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) -{ - X86CPU *cpu = x86_env_get_cpu(env); - int pe_state; - -#if defined(DEBUG_MMU) - printf("CR0 update: CR0=0x%08x\n", new_cr0); -#endif - if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != - (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { - tlb_flush(CPU(cpu), 1); - } - -#ifdef TARGET_X86_64 - if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && - (env->efer & MSR_EFER_LME)) { - /* enter in long mode */ - /* XXX: generate an exception */ - if (!(env->cr[4] & CR4_PAE_MASK)) - return; - env->efer |= MSR_EFER_LMA; - env->hflags |= HF_LMA_MASK; - } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && - (env->efer & MSR_EFER_LMA)) { - /* exit long mode */ - env->efer &= ~MSR_EFER_LMA; - env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); - env->eip &= 0xffffffff; - } -#endif - env->cr[0] = new_cr0 | CR0_ET_MASK; - - /* update PE flag in hidden flags */ - pe_state = (env->cr[0] & CR0_PE_MASK); - env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); - /* ensure that ADDSEG is always set in real mode */ - env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); - /* update FPU flags */ - env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | - ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); -} - -/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in - the PDPT */ -void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) -{ - X86CPU *cpu = x86_env_get_cpu(env); - - env->cr[3] = new_cr3; - if (env->cr[0] & CR0_PG_MASK) { -#if defined(DEBUG_MMU) - printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); -#endif - tlb_flush(CPU(cpu), 0); - } -} - -void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) -{ - X86CPU *cpu = x86_env_get_cpu(env); - -#if defined(DEBUG_MMU) - printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); -#endif - if ((new_cr4 ^ env->cr[4]) & - (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | - CR4_SMEP_MASK | CR4_SMAP_MASK)) { - tlb_flush(CPU(cpu), 1); - } - /* SSE handling */ - if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) { - new_cr4 &= ~CR4_OSFXSR_MASK; - } - env->hflags &= ~HF_OSFXSR_MASK; - if (new_cr4 & CR4_OSFXSR_MASK) { - env->hflags |= HF_OSFXSR_MASK; - } - - if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { - new_cr4 &= ~CR4_SMAP_MASK; - } - env->hflags &= ~HF_SMAP_MASK; - if (new_cr4 & CR4_SMAP_MASK) { - env->hflags |= HF_SMAP_MASK; - } - - env->cr[4] = new_cr4; -} - -#if defined(CONFIG_USER_ONLY) - -int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, - int is_write, int mmu_idx) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - - /* user mode only emulation */ - is_write &= 1; - env->cr[2] = addr; - env->error_code = (is_write << PG_ERROR_W_BIT); - env->error_code |= PG_ERROR_U_MASK; - cs->exception_index = EXCP0E_PAGE; - return 1; -} - -#else - -/* return value: - * -1 = cannot handle fault - * 0 = nothing more to do - * 1 = generate PF fault - */ -int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, - int is_write1, int mmu_idx) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - uint64_t ptep, pte; - target_ulong pde_addr, pte_addr; - int error_code = 0; - int is_dirty, prot, page_size, is_write, is_user; - hwaddr paddr; - uint64_t rsvd_mask = PG_HI_RSVD_MASK; - //uint32_t page_offset; - target_ulong vaddr; - - is_user = mmu_idx == MMU_USER_IDX; -#if defined(DEBUG_MMU) - printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", - addr, is_write1, is_user, env->eip); -#endif - is_write = is_write1 & 1; - - if (!(env->cr[0] & CR0_PG_MASK)) { - pte = addr; -#ifdef TARGET_X86_64 - if (!(env->hflags & HF_LMA_MASK)) { - /* Without long mode we can only address 32bits in real mode */ - pte = (uint32_t)pte; - } -#endif - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - page_size = 4096; - goto do_mapping; - } - - if (!(env->efer & MSR_EFER_NXE)) { - rsvd_mask |= PG_NX_MASK; - } - - if (env->cr[4] & CR4_PAE_MASK) { - uint64_t pde, pdpe; - target_ulong pdpe_addr; - -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - uint64_t pml4e_addr, pml4e; - int32_t sext; - - /* test virtual address sign extension */ - sext = (int64_t)addr >> 47; - if (sext != 0 && sext != -1) { - env->error_code = 0; - cs->exception_index = EXCP0D_GPF; - return 1; - } - - pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & - env->a20_mask; - pml4e = ldq_phys(cs->as, pml4e_addr); - if (!(pml4e & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pml4e & (rsvd_mask | PG_PSE_MASK)) { - goto do_fault_rsvd; - } - if (!(pml4e & PG_ACCESSED_MASK)) { - pml4e |= PG_ACCESSED_MASK; - stl_phys_notdirty(cs->as, pml4e_addr, pml4e); - } - ptep = pml4e ^ PG_NX_MASK; - pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & - env->a20_mask; - pdpe = ldq_phys(cs->as, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pdpe & rsvd_mask) { - goto do_fault_rsvd; - } - ptep &= pdpe ^ PG_NX_MASK; - if (!(pdpe & PG_ACCESSED_MASK)) { - pdpe |= PG_ACCESSED_MASK; - stl_phys_notdirty(cs->as, pdpe_addr, pdpe); - } - if (pdpe & PG_PSE_MASK) { - /* 1 GB page */ - page_size = 1024 * 1024 * 1024; - pte_addr = pdpe_addr; - pte = pdpe; - goto do_check_protect; - } - } else -#endif - { - /* XXX: load them when cr3 is loaded ? */ - pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & - env->a20_mask; - pdpe = ldq_phys(cs->as, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - goto do_fault; - } - rsvd_mask |= PG_HI_USER_MASK; - if (pdpe & (rsvd_mask | PG_NX_MASK)) { - goto do_fault_rsvd; - } - ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; - } - - pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & - env->a20_mask; - pde = ldq_phys(cs->as, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pde & rsvd_mask) { - goto do_fault_rsvd; - } - ptep &= pde ^ PG_NX_MASK; - if (pde & PG_PSE_MASK) { - /* 2 MB page */ - page_size = 2048 * 1024; - pte_addr = pde_addr; - pte = pde; - goto do_check_protect; - } - /* 4 KB page */ - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - stl_phys_notdirty(cs->as, pde_addr, pde); - } - pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & - env->a20_mask; - pte = ldq_phys(cs->as, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - goto do_fault; - } - if (pte & rsvd_mask) { - goto do_fault_rsvd; - } - /* combine pde and pte nx, user and rw protections */ - ptep &= pte ^ PG_NX_MASK; - page_size = 4096; - } else { - uint32_t pde; - - /* page directory entry */ - pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & - env->a20_mask; - pde = ldl_phys(cs->as, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { - goto do_fault; - } - ptep = pde | PG_NX_MASK; - - /* if PSE bit is set, then we use a 4MB page */ - if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { - page_size = 4096 * 1024; - pte_addr = pde_addr; - - /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. - * Leave bits 20-13 in place for setting accessed/dirty bits below. - */ - pte = pde | ((pde & 0x1fe000) << (32 - 13)); - rsvd_mask = 0x200000; - goto do_check_protect_pse36; - } - - if (!(pde & PG_ACCESSED_MASK)) { - pde |= PG_ACCESSED_MASK; - stl_phys_notdirty(cs->as, pde_addr, pde); - } - - /* page directory entry */ - pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & - env->a20_mask; - pte = ldl_phys(cs->as, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - goto do_fault; - } - /* combine pde and pte user and rw protections */ - ptep &= pte | PG_NX_MASK; - page_size = 4096; - rsvd_mask = 0; - } - -do_check_protect: - rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; -do_check_protect_pse36: - if (pte & rsvd_mask) { - goto do_fault_rsvd; - } - ptep ^= PG_NX_MASK; - if ((ptep & PG_NX_MASK) && is_write1 == 2) { - goto do_fault_protect; - } - switch (mmu_idx) { - case MMU_USER_IDX: - if (!(ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if (is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - case MMU_KSMAP_IDX: - if (is_write1 != 2 && (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - /* fall through */ - case MMU_KNOSMAP_IDX: - if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && - (ptep & PG_USER_MASK)) { - goto do_fault_protect; - } - if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) { - goto do_fault_protect; - } - break; - - default: /* cannot happen */ - break; - } - is_dirty = is_write && !(pte & PG_DIRTY_MASK); - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { - pte |= PG_ACCESSED_MASK; - if (is_dirty) { - pte |= PG_DIRTY_MASK; - } - stl_phys_notdirty(cs->as, pte_addr, pte); - } - - /* the page can be put in the TLB */ - prot = PAGE_READ; - if (!(ptep & PG_NX_MASK) && - (mmu_idx == MMU_USER_IDX || - !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { - prot |= PAGE_EXEC; - } - if (pte & PG_DIRTY_MASK) { - /* only set write access if already dirty... otherwise wait - for dirty access */ - if (is_user) { - if (ptep & PG_RW_MASK) - prot |= PAGE_WRITE; - } else { - if (!(env->cr[0] & CR0_WP_MASK) || - (ptep & PG_RW_MASK)) - prot |= PAGE_WRITE; - } - } - do_mapping: - -#if 0 - pte = pte & env->a20_mask; - - /* align to page_size */ - pte &= PG_ADDRESS_MASK & ~(page_size - 1); - - /* Even if 4MB pages, we map only one 4KB page in the cache to - avoid filling it too fast */ - vaddr = addr & TARGET_PAGE_MASK; - page_offset = vaddr & (page_size - 1); - paddr = pte + page_offset; -#endif - - // Unicorn: indentity map guest virtual address to host virtual address - vaddr = addr & TARGET_PAGE_MASK; - paddr = vaddr; - //printf(">>> map address %"PRIx64" to %"PRIx64"\n", vaddr, paddr); - - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); - return 0; - do_fault_rsvd: - error_code |= PG_ERROR_RSVD_MASK; - do_fault_protect: - error_code |= PG_ERROR_P_MASK; - do_fault: - error_code |= (is_write << PG_ERROR_W_BIT); - if (is_user) - error_code |= PG_ERROR_U_MASK; - if (is_write1 == 2 && - (((env->efer & MSR_EFER_NXE) && - (env->cr[4] & CR4_PAE_MASK)) || - (env->cr[4] & CR4_SMEP_MASK))) - error_code |= PG_ERROR_I_D_MASK; - if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { - /* cr2 is not modified in case of exceptions */ - stq_phys(cs->as, - env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), - addr); - } else { - env->cr[2] = addr; - } - env->error_code = error_code; - cs->exception_index = EXCP0E_PAGE; - return 1; -} - -hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - target_ulong pde_addr, pte_addr; - uint64_t pte; - uint32_t page_offset; - int page_size; - - if (!(env->cr[0] & CR0_PG_MASK)) { - pte = addr & env->a20_mask; - page_size = 4096; - } else if (env->cr[4] & CR4_PAE_MASK) { - target_ulong pdpe_addr; - uint64_t pde, pdpe; - -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - uint64_t pml4e_addr, pml4e; - int32_t sext; - - /* test virtual address sign extension */ - sext = (int64_t)addr >> 47; - if (sext != 0 && sext != -1) { - return -1; - } - pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & - env->a20_mask; - pml4e = ldq_phys(cs->as, pml4e_addr); - if (!(pml4e & PG_PRESENT_MASK)) { - return -1; - } - pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + - (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; - pdpe = ldq_phys(cs->as, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) { - return -1; - } - if (pdpe & PG_PSE_MASK) { - page_size = 1024 * 1024 * 1024; - pte = pdpe; - goto out; - } - - } else -#endif - { - pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & - env->a20_mask; - pdpe = ldq_phys(cs->as, pdpe_addr); - if (!(pdpe & PG_PRESENT_MASK)) - return -1; - } - - pde_addr = ((pdpe & PG_ADDRESS_MASK) + - (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; - pde = ldq_phys(cs->as, pde_addr); - if (!(pde & PG_PRESENT_MASK)) { - return -1; - } - if (pde & PG_PSE_MASK) { - /* 2 MB page */ - page_size = 2048 * 1024; - pte = pde; - } else { - /* 4 KB page */ - pte_addr = ((pde & PG_ADDRESS_MASK) + - (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; - page_size = 4096; - pte = ldq_phys(cs->as, pte_addr); - } - if (!(pte & PG_PRESENT_MASK)) { - return -1; - } - } else { - uint32_t pde; - - /* page directory entry */ - pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask; - pde = ldl_phys(cs->as, pde_addr); - if (!(pde & PG_PRESENT_MASK)) - return -1; - if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { - pte = pde | ((pde & 0x1fe000) << (32 - 13)); - page_size = 4096 * 1024; - } else { - /* page directory entry */ - pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; - pte = ldl_phys(cs->as, pte_addr); - if (!(pte & PG_PRESENT_MASK)) { - return -1; - } - page_size = 4096; - } - pte = pte & env->a20_mask; - } - -#ifdef TARGET_X86_64 -out: -#endif - pte &= PG_ADDRESS_MASK & ~(page_size - 1); - page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); - return pte | page_offset; -} - -void hw_breakpoint_insert(CPUX86State *env, int index) -{ - CPUState *cs = CPU(x86_env_get_cpu(env)); - int type = 0, err = 0; - - switch (hw_breakpoint_type(env->dr[7], index)) { - case DR7_TYPE_BP_INST: - if (hw_breakpoint_enabled(env->dr[7], index)) { - err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU, - &env->cpu_breakpoint[index]); - } - break; - case DR7_TYPE_DATA_WR: - type = BP_CPU | BP_MEM_WRITE; - break; - case DR7_TYPE_IO_RW: - /* No support for I/O watchpoints yet */ - break; - case DR7_TYPE_DATA_RW: - type = BP_CPU | BP_MEM_ACCESS; - break; - } - - if (type != 0) { - err = cpu_watchpoint_insert(cs, env->dr[index], - hw_breakpoint_len(env->dr[7], index), - type, &env->cpu_watchpoint[index]); - } - - if (err) { - env->cpu_breakpoint[index] = NULL; - } -} - -void hw_breakpoint_remove(CPUX86State *env, int index) -{ - CPUState *cs; - - if (!env->cpu_breakpoint[index]) { - return; - } - cs = CPU(x86_env_get_cpu(env)); - switch (hw_breakpoint_type(env->dr[7], index)) { - case DR7_TYPE_BP_INST: - if (hw_breakpoint_enabled(env->dr[7], index)) { - cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]); - } - break; - case DR7_TYPE_DATA_WR: - case DR7_TYPE_DATA_RW: - cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]); - break; - case DR7_TYPE_IO_RW: - /* No support for I/O watchpoints yet */ - break; - } -} - -bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update) -{ - target_ulong dr6; - int reg; - bool hit_enabled = false; - - dr6 = env->dr[6] & ~0xf; - for (reg = 0; reg < DR7_MAX_BP; reg++) { - bool bp_match = false; - bool wp_match = false; - - switch (hw_breakpoint_type(env->dr[7], reg)) { - case DR7_TYPE_BP_INST: - if (env->dr[reg] == env->eip) { - bp_match = true; - } - break; - case DR7_TYPE_DATA_WR: - case DR7_TYPE_DATA_RW: - if (env->cpu_watchpoint[reg] && - env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) { - wp_match = true; - } - break; - case DR7_TYPE_IO_RW: - break; - } - if (bp_match || wp_match) { - dr6 |= 1ULL << reg; - if (hw_breakpoint_enabled(env->dr[7], reg)) { - hit_enabled = true; - } - } - } - - if (hit_enabled || force_dr6_update) { - env->dr[6] = dr6; - } - - return hit_enabled; -} - -void breakpoint_handler(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - CPUBreakpoint *bp; - - if (cs->watchpoint_hit) { - if (cs->watchpoint_hit->flags & BP_CPU) { - cs->watchpoint_hit = NULL; - if (check_hw_breakpoints(env, false)) { - raise_exception(env, EXCP01_DB); - } else { - cpu_resume_from_signal(cs, NULL); - } - } - } else { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == env->eip) { - if (bp->flags & BP_CPU) { - check_hw_breakpoints(env, true); - raise_exception(env, EXCP01_DB); - } - break; - } - } - } -} - -typedef struct MCEInjectionParams { - X86CPU *cpu; - int bank; - uint64_t status; - uint64_t mcg_status; - uint64_t addr; - uint64_t misc; - int flags; -} MCEInjectionParams; - -void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) -{ - X86CPU *cpu = x86_env_get_cpu(env); - CPUState *cs = CPU(cpu); - - cpu_restore_state(cs, cs->mem_io_pc); - - apic_handle_tpr_access_report(cpu->apic_state, env->eip, access); -} -#endif /* !CONFIG_USER_ONLY */ - -int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, - target_ulong *base, unsigned int *limit, - unsigned int *flags) -{ - X86CPU *cpu = x86_env_get_cpu(env); - CPUState *cs = CPU(cpu); - SegmentCache *dt; - target_ulong ptr; - uint32_t e1, e2; - int index; - - if (selector & 0x4) - dt = &env->ldt; - else - dt = &env->gdt; - index = selector & ~7; - ptr = dt->base + index; - if ((uint32_t)(index + 7) > dt->limit - || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0 - || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0) - return 0; - - *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); - *limit = (e1 & 0xffff) | (e2 & 0x000f0000); - if (e2 & DESC_G_MASK) - *limit = (*limit << 12) | 0xfff; - *flags = e2; - - return 1; -} - -#if !defined(CONFIG_USER_ONLY) -void do_cpu_init(X86CPU *cpu) -{ - CPUState *cs = CPU(cpu); - CPUX86State *env = &cpu->env; - CPUX86State *save = g_new(CPUX86State, 1); - int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI; - - *save = *env; - - cpu_reset(cs); - cs->interrupt_request = sipi; - memcpy(&env->start_init_save, &save->start_init_save, - offsetof(CPUX86State, end_init_save) - - offsetof(CPUX86State, start_init_save)); - g_free(save); - - apic_init_reset(env->uc, cpu->apic_state); -} - -void do_cpu_sipi(X86CPU *cpu) -{ - apic_sipi(cpu->apic_state); -} -#else -void do_cpu_init(X86CPU *cpu) -{ -} -void do_cpu_sipi(X86CPU *cpu) -{ -} -#endif - -/* Frob eflags into and out of the CPU temporary format. */ - -void x86_cpu_exec_enter(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - - CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); - env->df = 1 - (2 * ((env->eflags >> 10) & 1)); - CC_OP = CC_OP_EFLAGS; - env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); -} - -void x86_cpu_exec_exit(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - - env->eflags = cpu_compute_eflags(env); - env->eflags0 = env->eflags; -} diff --git a/qemu/target-i386/mem_helper.c b/qemu/target-i386/mem_helper.c deleted file mode 100644 index f92c736c..00000000 --- a/qemu/target-i386/mem_helper.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * x86 memory access helpers - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" - -#include "uc_priv.h" - -/* broken thread support */ - -void helper_lock(CPUX86State *env) -{ -} - -void helper_unlock(CPUX86State *env) -{ -} - -void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) -{ - uint64_t d; - int eflags; - - eflags = cpu_cc_compute_all(env, CC_OP); - d = cpu_ldq_data(env, a0); - if (d == (((uint64_t)env->regs[R_EDX] << 32) | (uint32_t)env->regs[R_EAX])) { - cpu_stq_data(env, a0, ((uint64_t)env->regs[R_ECX] << 32) | (uint32_t)env->regs[R_EBX]); - eflags |= CC_Z; - } else { - /* always do the store */ - cpu_stq_data(env, a0, d); - env->regs[R_EDX] = (uint32_t)(d >> 32); - env->regs[R_EAX] = (uint32_t)d; - eflags &= ~CC_Z; - } - CC_SRC = eflags; -} - -#ifdef TARGET_X86_64 -void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) -{ - uint64_t d0, d1; - int eflags; - - if ((a0 & 0xf) != 0) { - raise_exception(env, EXCP0D_GPF); - } - eflags = cpu_cc_compute_all(env, CC_OP); - d0 = cpu_ldq_data(env, a0); - d1 = cpu_ldq_data(env, a0 + 8); - if (d0 == env->regs[R_EAX] && d1 == env->regs[R_EDX]) { - cpu_stq_data(env, a0, env->regs[R_EBX]); - cpu_stq_data(env, a0 + 8, env->regs[R_ECX]); - eflags |= CC_Z; - } else { - /* always do the store */ - cpu_stq_data(env, a0, d0); - cpu_stq_data(env, a0 + 8, d1); - env->regs[R_EDX] = d1; - env->regs[R_EAX] = d0; - eflags &= ~CC_Z; - } - CC_SRC = eflags; -} -#endif - -void helper_boundw(CPUX86State *env, target_ulong a0, int v) -{ - int low, high; - - low = cpu_ldsw_data(env, a0); - high = cpu_ldsw_data(env, a0 + 2); - v = (int16_t)v; - if (v < low || v > high) { - raise_exception(env, EXCP05_BOUND); - } -} - -void helper_boundl(CPUX86State *env, target_ulong a0, int v) -{ - int low, high; - - low = cpu_ldl_data(env, a0); - high = cpu_ldl_data(env, a0 + 4); - if (v < low || v > high) { - raise_exception(env, EXCP05_BOUND); - } -} - -#if !defined(CONFIG_USER_ONLY) -/* try to fill the TLB and return an exception if error. If retaddr is - * NULL, it means that the function was called in C code (i.e. not - * from generated code or from helper.c) - */ -/* XXX: fix it to restore all registers */ -void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr) -{ - int ret; - - ret = x86_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); - if (ret) { - X86CPU *cpu = X86_CPU(cs->uc, cs); - CPUX86State *env = &cpu->env; - - if (retaddr) { - /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr); - } - raise_exception_err(env, cs->exception_index, env->error_code); - } -} -#endif diff --git a/qemu/target-i386/smm_helper.c b/qemu/target-i386/smm_helper.c deleted file mode 100644 index 7875ff03..00000000 --- a/qemu/target-i386/smm_helper.c +++ /dev/null @@ -1,317 +0,0 @@ -/* - * x86 SMM helpers - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "exec/helper-proto.h" - -/* SMM support */ - -#if defined(CONFIG_USER_ONLY) - -void do_smm_enter(X86CPU *cpu) -{ -} - -void helper_rsm(CPUX86State *env) -{ -} - -#else - -#ifdef TARGET_X86_64 -#define SMM_REVISION_ID 0x00020064 -#else -#define SMM_REVISION_ID 0x00020000 -#endif - -void do_smm_enter(X86CPU *cpu) -{ - CPUX86State *env = &cpu->env; - CPUState *cs = CPU(cpu); - target_ulong sm_state; - SegmentCache *dt; - int i, offset; - - qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); - log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); - - env->hflags |= HF_SMM_MASK; - cpu_smm_update(env); - - sm_state = env->smbase + 0x8000; - -#ifdef TARGET_X86_64 - for (i = 0; i < 6; i++) { - dt = &env->segs[i]; - offset = 0x7e00 + i * 16; - stw_phys(cs->as, sm_state + offset, dt->selector); - stw_phys(cs->as, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); - stl_phys(cs->as, sm_state + offset + 4, dt->limit); - stq_phys(cs->as, sm_state + offset + 8, dt->base); - } - - stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base); - stl_phys(cs->as, sm_state + 0x7e64, env->gdt.limit); - - stw_phys(cs->as, sm_state + 0x7e70, env->ldt.selector); - stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base); - stl_phys(cs->as, sm_state + 0x7e74, env->ldt.limit); - stw_phys(cs->as, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); - - stq_phys(cs->as, sm_state + 0x7e88, env->idt.base); - stl_phys(cs->as, sm_state + 0x7e84, env->idt.limit); - - stw_phys(cs->as, sm_state + 0x7e90, env->tr.selector); - stq_phys(cs->as, sm_state + 0x7e98, env->tr.base); - stl_phys(cs->as, sm_state + 0x7e94, env->tr.limit); - stw_phys(cs->as, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); - - stq_phys(cs->as, sm_state + 0x7ed0, env->efer); - - stq_phys(cs->as, sm_state + 0x7ff8, env->regs[R_EAX]); - stq_phys(cs->as, sm_state + 0x7ff0, env->regs[R_ECX]); - stq_phys(cs->as, sm_state + 0x7fe8, env->regs[R_EDX]); - stq_phys(cs->as, sm_state + 0x7fe0, env->regs[R_EBX]); - stq_phys(cs->as, sm_state + 0x7fd8, env->regs[R_ESP]); - stq_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EBP]); - stq_phys(cs->as, sm_state + 0x7fc8, env->regs[R_ESI]); - stq_phys(cs->as, sm_state + 0x7fc0, env->regs[R_EDI]); - for (i = 8; i < 16; i++) { - stq_phys(cs->as, sm_state + 0x7ff8 - i * 8, env->regs[i]); - } - stq_phys(cs->as, sm_state + 0x7f78, env->eip); - stl_phys(cs->as, sm_state + 0x7f70, cpu_compute_eflags(env)); - stl_phys(cs->as, sm_state + 0x7f68, (uint32_t)env->dr[6]); - stl_phys(cs->as, sm_state + 0x7f60, (uint32_t)env->dr[7]); - - stl_phys(cs->as, sm_state + 0x7f48, (uint32_t)env->cr[4]); - stl_phys(cs->as, sm_state + 0x7f50, (uint32_t)env->cr[3]); - stl_phys(cs->as, sm_state + 0x7f58, (uint32_t)env->cr[0]); - - stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID); - stl_phys(cs->as, sm_state + 0x7f00, env->smbase); -#else - stl_phys(cs->as, sm_state + 0x7ffc, env->cr[0]); - stl_phys(cs->as, sm_state + 0x7ff8, env->cr[3]); - stl_phys(cs->as, sm_state + 0x7ff4, cpu_compute_eflags(env)); - stl_phys(cs->as, sm_state + 0x7ff0, env->eip); - stl_phys(cs->as, sm_state + 0x7fec, env->regs[R_EDI]); - stl_phys(cs->as, sm_state + 0x7fe8, env->regs[R_ESI]); - stl_phys(cs->as, sm_state + 0x7fe4, env->regs[R_EBP]); - stl_phys(cs->as, sm_state + 0x7fe0, env->regs[R_ESP]); - stl_phys(cs->as, sm_state + 0x7fdc, env->regs[R_EBX]); - stl_phys(cs->as, sm_state + 0x7fd8, env->regs[R_EDX]); - stl_phys(cs->as, sm_state + 0x7fd4, env->regs[R_ECX]); - stl_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EAX]); - stl_phys(cs->as, sm_state + 0x7fcc, env->dr[6]); - stl_phys(cs->as, sm_state + 0x7fc8, env->dr[7]); - - stl_phys(cs->as, sm_state + 0x7fc4, env->tr.selector); - stl_phys(cs->as, sm_state + 0x7f64, env->tr.base); - stl_phys(cs->as, sm_state + 0x7f60, env->tr.limit); - stl_phys(cs->as, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); - - stl_phys(cs->as, sm_state + 0x7fc0, env->ldt.selector); - stl_phys(cs->as, sm_state + 0x7f80, env->ldt.base); - stl_phys(cs->as, sm_state + 0x7f7c, env->ldt.limit); - stl_phys(cs->as, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); - - stl_phys(cs->as, sm_state + 0x7f74, env->gdt.base); - stl_phys(cs->as, sm_state + 0x7f70, env->gdt.limit); - - stl_phys(cs->as, sm_state + 0x7f58, env->idt.base); - stl_phys(cs->as, sm_state + 0x7f54, env->idt.limit); - - for (i = 0; i < 6; i++) { - dt = &env->segs[i]; - if (i < 3) { - offset = 0x7f84 + i * 12; - } else { - offset = 0x7f2c + (i - 3) * 12; - } - stl_phys(cs->as, sm_state + 0x7fa8 + i * 4, dt->selector); - stl_phys(cs->as, sm_state + offset + 8, dt->base); - stl_phys(cs->as, sm_state + offset + 4, dt->limit); - stl_phys(cs->as, sm_state + offset, (dt->flags >> 8) & 0xf0ff); - } - stl_phys(cs->as, sm_state + 0x7f14, env->cr[4]); - - stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID); - stl_phys(cs->as, sm_state + 0x7ef8, env->smbase); -#endif - /* init SMM cpu state */ - -#ifdef TARGET_X86_64 - cpu_load_efer(env, 0); -#endif - cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | - DF_MASK)); - env->eip = 0x00008000; - cpu_x86_update_cr0(env, - env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | - CR0_PG_MASK)); - cpu_x86_update_cr4(env, 0); - env->dr[7] = 0x00000400; - - cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, - 0xffffffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, - DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | - DESC_A_MASK); -} - -void helper_rsm(CPUX86State *env) -{ - X86CPU *cpu = x86_env_get_cpu(env); - CPUState *cs = CPU(cpu); - target_ulong sm_state; - int i, offset; - uint32_t val; - - sm_state = env->smbase + 0x8000; -#ifdef TARGET_X86_64 - cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0)); - - env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68); - env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64); - - env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70); - env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78); - env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74); - env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8; - - env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88); - env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84); - - env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90); - env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98); - env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94); - env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8; - - env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8); - env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0); - env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8); - env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0); - env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8); - env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0); - env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8); - env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0); - for (i = 8; i < 16; i++) { - env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8); - } - env->eip = ldq_phys(cs->as, sm_state + 0x7f78); - cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70), - ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68); - env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60); - - cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48)); - cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50)); - cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58)); - - for (i = 0; i < 6; i++) { - offset = 0x7e00 + i * 16; - cpu_x86_load_seg_cache(env, i, - lduw_phys(cs->as, sm_state + offset), - ldq_phys(cs->as, sm_state + offset + 8), - ldl_phys(cs->as, sm_state + offset + 4), - (lduw_phys(cs->as, sm_state + offset + 2) & - 0xf0ff) << 8); - } - - val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ - if (val & 0x20000) { - env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff; - } -#else - cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc)); - cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8)); - cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4), - ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - env->eip = ldl_phys(cs->as, sm_state + 0x7ff0); - env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec); - env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8); - env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4); - env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0); - env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc); - env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8); - env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4); - env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0); - env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc); - env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8); - - env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff; - env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64); - env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60); - env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8; - - env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff; - env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80); - env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c); - env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8; - - env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74); - env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70); - - env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58); - env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54); - - for (i = 0; i < 6; i++) { - if (i < 3) { - offset = 0x7f84 + i * 12; - } else { - offset = 0x7f2c + (i - 3) * 12; - } - cpu_x86_load_seg_cache(env, i, - ldl_phys(cs->as, - sm_state + 0x7fa8 + i * 4) & 0xffff, - ldl_phys(cs->as, sm_state + offset + 8), - ldl_phys(cs->as, sm_state + offset + 4), - (ldl_phys(cs->as, - sm_state + offset) & 0xf0ff) << 8); - } - cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14)); - - val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ - if (val & 0x20000) { - env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff; - } -#endif - env->hflags &= ~HF_SMM_MASK; - cpu_smm_update(env); - - qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); - log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); -} - -#endif /* !CONFIG_USER_ONLY */ diff --git a/qemu/target-i386/topology.h b/qemu/target-i386/topology.h deleted file mode 100644 index e18ddfbe..00000000 --- a/qemu/target-i386/topology.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * x86 CPU topology data structures and functions - * - * Copyright (c) 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#ifndef TARGET_I386_TOPOLOGY_H -#define TARGET_I386_TOPOLOGY_H - -/* This file implements the APIC-ID-based CPU topology enumeration logic, - * documented at the following document: - * Intel® 64 Architecture Processor Topology Enumeration - * http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/ - * - * This code should be compatible with AMD's "Extended Method" described at: - * AMD CPUID Specification (Publication #25481) - * Section 3: Multiple Core Calcuation - * as long as: - * nr_threads is set to 1; - * OFFSET_IDX is assumed to be 0; - * CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width(). - */ - -#include "unicorn/platform.h" -#include - -#include "qemu/bitops.h" - -/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support - */ -typedef uint32_t apic_id_t; - -/* Return the bit width needed for 'count' IDs - */ -static unsigned apicid_bitwidth_for_count(unsigned count) -{ - g_assert(count >= 1); - count -= 1; - return count ? 32 - clz32(count) : 0; -} - -/* Bit width of the SMT_ID (thread ID) field on the APIC ID - */ -static inline unsigned apicid_smt_width(unsigned nr_cores, unsigned nr_threads) -{ - return apicid_bitwidth_for_count(nr_threads); -} - -/* Bit width of the Core_ID field - */ -static inline unsigned apicid_core_width(unsigned nr_cores, unsigned nr_threads) -{ - return apicid_bitwidth_for_count(nr_cores); -} - -/* Bit offset of the Core_ID field - */ -static inline unsigned apicid_core_offset(unsigned nr_cores, - unsigned nr_threads) -{ - return apicid_smt_width(nr_cores, nr_threads); -} - -/* Bit offset of the Pkg_ID (socket ID) field - */ -static inline unsigned apicid_pkg_offset(unsigned nr_cores, unsigned nr_threads) -{ - return apicid_core_offset(nr_cores, nr_threads) + - apicid_core_width(nr_cores, nr_threads); -} - -/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID - * - * The caller must make sure core_id < nr_cores and smt_id < nr_threads. - */ -static inline apic_id_t apicid_from_topo_ids(unsigned nr_cores, - unsigned nr_threads, - unsigned pkg_id, - unsigned core_id, - unsigned smt_id) -{ - return (pkg_id << apicid_pkg_offset(nr_cores, nr_threads)) | - (core_id << apicid_core_offset(nr_cores, nr_threads)) | - smt_id; -} - -/* Calculate thread/core/package IDs for a specific topology, - * based on (contiguous) CPU index - */ -static inline void x86_topo_ids_from_idx(unsigned nr_cores, - unsigned nr_threads, - unsigned cpu_index, - unsigned *pkg_id, - unsigned *core_id, - unsigned *smt_id) -{ - unsigned core_index = cpu_index / nr_threads; - *smt_id = cpu_index % nr_threads; - *core_id = core_index % nr_cores; - *pkg_id = core_index / nr_cores; -} - -/* Make APIC ID for the CPU 'cpu_index' - * - * 'cpu_index' is a sequential, contiguous ID for the CPU. - */ -static inline apic_id_t x86_apicid_from_cpu_idx(unsigned nr_cores, - unsigned nr_threads, - unsigned cpu_index) -{ - unsigned pkg_id, core_id, smt_id; - x86_topo_ids_from_idx(nr_cores, nr_threads, cpu_index, - &pkg_id, &core_id, &smt_id); - return apicid_from_topo_ids(nr_cores, nr_threads, pkg_id, core_id, smt_id); -} - -#endif /* TARGET_I386_TOPOLOGY_H */ diff --git a/qemu/target-i386/translate.c b/qemu/target-i386/translate.c deleted file mode 100644 index b7762b1f..00000000 --- a/qemu/target-i386/translate.c +++ /dev/null @@ -1,8806 +0,0 @@ -/* - * i386 translation - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include -#include -#include -#include -#include "unicorn/platform.h" -#include - -#include "qemu/host-utils.h" -#include "cpu.h" -#include "tcg-op.h" -#include "exec/cpu_ldst.h" - -#include "exec/helper-proto.h" -#include "exec/helper-gen.h" - -#include "uc_priv.h" - -#define PREFIX_REPZ 0x01 -#define PREFIX_REPNZ 0x02 -#define PREFIX_LOCK 0x04 -#define PREFIX_DATA 0x08 -#define PREFIX_ADR 0x10 -#define PREFIX_VEX 0x20 - -#ifdef TARGET_X86_64 -#define CODE64(s) ((s)->code64) -#define REX_X(s) ((s)->rex_x) -#define REX_B(s) ((s)->rex_b) -#else -#define CODE64(s) 0 -#define REX_X(s) 0 -#define REX_B(s) 0 -#endif - -#ifdef TARGET_X86_64 -# define ctztl ctz64 -# define clztl clz64 -#else -# define ctztl ctz32 -# define clztl clz32 -#endif - -#include "exec/gen-icount.h" - -typedef struct DisasContext { - /* current insn context */ - int override; /* -1 if no override */ - int prefix; - TCGMemOp aflag; - TCGMemOp dflag; - target_ulong pc; /* pc = eip + cs_base */ - int is_jmp; /* 1 = means jump (stop translation), 2 means CPU - static state change (stop translation) */ - /* current block context */ - target_ulong cs_base; /* base of CS segment */ - int pe; /* protected mode */ - int code32; /* 32 bit code segment */ -#ifdef TARGET_X86_64 - int lma; /* long mode active */ - int code64; /* 64 bit code segment */ - int rex_x, rex_b; -#endif - int vex_l; /* vex vector length */ - int vex_v; /* vex vvvv register, without 1's compliment. */ - int ss32; /* 32 bit stack segment */ - CCOp cc_op; /* current CC operation */ - CCOp last_cc_op; /* Unicorn: last CC operation. Save this to see if cc_op has changed */ - bool cc_op_dirty; - int addseg; /* non zero if either DS/ES/SS have a non zero base */ - int f_st; /* currently unused */ - int vm86; /* vm86 mode */ - int cpl; - int iopl; - int tf; /* TF cpu flag */ - int singlestep_enabled; /* "hardware" single step enabled */ - int jmp_opt; /* use direct block chaining for direct jumps */ - int mem_index; /* select memory access functions */ - uint64_t flags; /* all execution flags */ - struct TranslationBlock *tb; - int popl_esp_hack; /* for correct popl with esp base handling */ - int rip_offset; /* only used in x86_64, but left for simplicity */ - int cpuid_features; - int cpuid_ext_features; - int cpuid_ext2_features; - int cpuid_ext3_features; - int cpuid_7_0_ebx_features; - struct uc_struct *uc; - - // Unicorn - target_ulong prev_pc; /* save address of the previous instruction */ -} DisasContext; - -static void gen_eob(DisasContext *s); -static void gen_jmp(DisasContext *s, target_ulong eip); -static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); -static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d); - -/* i386 arith/logic operations */ -enum { - OP_ADDL, - OP_ORL, - OP_ADCL, - OP_SBBL, - OP_ANDL, - OP_SUBL, - OP_XORL, - OP_CMPL, -}; - -/* i386 shift ops */ -enum { - OP_ROL, - OP_ROR, - OP_RCL, - OP_RCR, - OP_SHL, - OP_SHR, - OP_SHL1, /* undocumented */ - OP_SAR = 7, -}; - -enum { - JCC_O, - JCC_B, - JCC_Z, - JCC_BE, - JCC_S, - JCC_P, - JCC_L, - JCC_LE, -}; - -enum { - /* I386 int registers */ - OR_EAX, /* MUST be even numbered */ - OR_ECX, - OR_EDX, - OR_EBX, - OR_ESP, - OR_EBP, - OR_ESI, - OR_EDI, - - OR_TMP0 = 16, /* temporary operand register */ - OR_TMP1, - OR_A0, /* temporary register used when doing address evaluation */ -}; - -enum { - USES_CC_DST = 1, - USES_CC_SRC = 2, - USES_CC_SRC2 = 4, - USES_CC_SRCT = 8, -}; - -/* Bit set if the global variable is live after setting CC_OP to X. */ -static const uint8_t cc_op_live[CC_OP_NB] = { -#ifdef _MSC_VER - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ - USES_CC_SRC, // CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ - - USES_CC_DST | USES_CC_SRC, // CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ - USES_CC_DST | USES_CC_SRC, // CC_OP_MULW, - USES_CC_DST | USES_CC_SRC, // CC_OP_MULL, - USES_CC_DST | USES_CC_SRC, // CC_OP_MULQ, - - USES_CC_DST | USES_CC_SRC, // CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - USES_CC_DST | USES_CC_SRC, // CC_OP_ADDW, - USES_CC_DST | USES_CC_SRC, // CC_OP_ADDL, - USES_CC_DST | USES_CC_SRC, // CC_OP_ADDQ, - - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCW, - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCL, - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCQ, - - USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBW, - USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBL, - USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBQ, - - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBW, - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBL, - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBQ, - - USES_CC_DST, // CC_OP_LOGICB, /* modify all flags, CC_DST = res */ - USES_CC_DST, // CC_OP_LOGICW, - USES_CC_DST, // CC_OP_LOGICL, - USES_CC_DST, // CC_OP_LOGICQ, - - USES_CC_DST | USES_CC_SRC, // CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ - USES_CC_DST | USES_CC_SRC, // CC_OP_INCW, - USES_CC_DST | USES_CC_SRC, // CC_OP_INCL, - USES_CC_DST | USES_CC_SRC, // CC_OP_INCQ, - - USES_CC_DST | USES_CC_SRC, // CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ - USES_CC_DST | USES_CC_SRC, // CC_OP_DECW, - USES_CC_DST | USES_CC_SRC, // CC_OP_DECL, - USES_CC_DST | USES_CC_SRC, // CC_OP_DECQ, - - USES_CC_DST | USES_CC_SRC, // CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ - USES_CC_DST | USES_CC_SRC, // CC_OP_SHLW, - USES_CC_DST | USES_CC_SRC, // CC_OP_SHLL, - USES_CC_DST | USES_CC_SRC, // CC_OP_SHLQ, - - USES_CC_DST | USES_CC_SRC, // CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ - USES_CC_DST | USES_CC_SRC, // CC_OP_SARW, - USES_CC_DST | USES_CC_SRC, // CC_OP_SARL, - USES_CC_DST | USES_CC_SRC, // CC_OP_SARQ, - - USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ - USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGW, - USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGL, - USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGQ, - - USES_CC_DST | USES_CC_SRC, // CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ - USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ - USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ - - 0, // CC_OP_CLR, /* Z set, all other flags clear. */ -#else - [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, - [CC_OP_EFLAGS] = USES_CC_SRC, - [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, - [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, - [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, - [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, - [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, - [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST, - [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, - [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, - [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, - [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, - [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, - [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, - [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, - [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, - [CC_OP_CLR] = 0, -#endif -}; - -static inline void gen_jmp_im(DisasContext *s, target_ulong pc); - -static void set_cc_op(DisasContext *s, CCOp op) -{ - int dead; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; - TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; - - if (s->cc_op == op) { - return; - } - - /* Discard CC computation that will no longer be used. */ - dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; - if (dead & USES_CC_DST) { - tcg_gen_discard_tl(tcg_ctx, cpu_cc_dst); - } - if (dead & USES_CC_SRC) { - tcg_gen_discard_tl(tcg_ctx, cpu_cc_src); - } - if (dead & USES_CC_SRC2) { - tcg_gen_discard_tl(tcg_ctx, cpu_cc_src2); - } - if (dead & USES_CC_SRCT) { - tcg_gen_discard_tl(tcg_ctx, cpu_cc_srcT); - } - - if (op == CC_OP_DYNAMIC) { - /* The DYNAMIC setting is translator only, and should never be - stored. Thus we always consider it clean. */ - s->cc_op_dirty = false; - } else { - /* Discard any computed CC_OP value (see shifts). */ - if (s->cc_op == CC_OP_DYNAMIC) { - tcg_gen_discard_i32(tcg_ctx, cpu_cc_op); - } - s->cc_op_dirty = true; - } - s->cc_op = op; -} - -static void gen_update_cc_op(DisasContext *s) -{ - if (s->cc_op_dirty) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; - - tcg_gen_movi_i32(tcg_ctx, cpu_cc_op, s->cc_op); - s->cc_op_dirty = false; - } -} - -#ifdef TARGET_X86_64 - -#define NB_OP_SIZES 4 - -#else /* !TARGET_X86_64 */ - -#define NB_OP_SIZES 3 - -#endif /* !TARGET_X86_64 */ - -#if defined(HOST_WORDS_BIGENDIAN) -#define REG_B_OFFSET (sizeof(target_ulong) - 1) -#define REG_H_OFFSET (sizeof(target_ulong) - 2) -#define REG_W_OFFSET (sizeof(target_ulong) - 2) -#define REG_L_OFFSET (sizeof(target_ulong) - 4) -#define REG_LH_OFFSET (sizeof(target_ulong) - 8) -#else -#define REG_B_OFFSET 0 -#define REG_H_OFFSET 1 -#define REG_W_OFFSET 0 -#define REG_L_OFFSET 0 -#define REG_LH_OFFSET 4 -#endif - -/* In instruction encodings for byte register accesses the - * register number usually indicates "low 8 bits of register N"; - * however there are some special cases where N 4..7 indicates - * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return - * true for this special case, false otherwise. - */ -static inline bool byte_reg_is_xH(int x86_64_hregs, int reg) -{ - if (reg < 4) { - return false; - } -#ifdef TARGET_X86_64 - if (reg >= 8 || x86_64_hregs) { - return false; - } -#endif - return true; -} - -/* Select the size of a push/pop operation. */ -static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) -{ - if (CODE64(s)) { - return ot == MO_16 ? MO_16 : MO_64; - } else { - return ot; - } -} - -/* Select only size 64 else 32. Used for SSE operand sizes. */ -static inline TCGMemOp mo_64_32(TCGMemOp ot) -{ -#ifdef TARGET_X86_64 - return ot == MO_64 ? MO_64 : MO_32; -#else - return MO_32; -#endif -} - -/* Select size 8 if lsb of B is clear, else OT. Used for decoding - byte vs word opcodes. */ -static inline TCGMemOp mo_b_d(int b, TCGMemOp ot) -{ - return b & 1 ? ot : MO_8; -} - -/* Select size 8 if lsb of B is clear, else OT capped at 32. - Used for decoding operand size of port opcodes. */ -static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) -{ - return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; -} - -static void gen_op_mov_reg_v(TCGContext *s, TCGMemOp ot, int reg, TCGv t0) -{ - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - switch(ot) { - case MO_8: - if (!byte_reg_is_xH(s->x86_64_hregs, reg)) { - tcg_gen_deposit_tl(s, *cpu_regs[reg], *cpu_regs[reg], t0, 0, 8); - } else { - tcg_gen_deposit_tl(s, *cpu_regs[reg - 4], *cpu_regs[reg - 4], t0, 8, 8); - } - break; - case MO_16: - tcg_gen_deposit_tl(s, *cpu_regs[reg], *cpu_regs[reg], t0, 0, 16); - break; - case MO_32: - /* For x86_64, this sets the higher half of register to zero. - For i386, this is equivalent to a mov. */ - tcg_gen_ext32u_tl(s, *cpu_regs[reg], t0); - break; -#ifdef TARGET_X86_64 - case MO_64: - tcg_gen_mov_tl(s, *cpu_regs[reg], t0); - break; -#endif - default: - tcg_abort(); - } -} - -static inline void gen_op_mov_v_reg(TCGContext *s, TCGMemOp ot, TCGv t0, int reg) -{ - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - if (ot == MO_8 && byte_reg_is_xH(s->x86_64_hregs, reg)) { - tcg_gen_shri_tl(s, t0, *cpu_regs[reg - 4], 8); - tcg_gen_ext8u_tl(s, t0, t0); - } else { - tcg_gen_mov_tl(s, t0, *cpu_regs[reg]); - } -} - -static inline void gen_op_movl_A0_reg(TCGContext *s, int reg) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_mov_tl(s, cpu_A0, *cpu_regs[reg]); -} - -static inline void gen_op_addl_A0_im(TCGContext *s, int32_t val) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - - tcg_gen_addi_tl(s, cpu_A0, cpu_A0, val); -#ifdef TARGET_X86_64 - tcg_gen_andi_tl(s, cpu_A0, cpu_A0, 0xffffffff); -#endif -} - -#ifdef TARGET_X86_64 -static inline void gen_op_addq_A0_im(TCGContext *s, int64_t val) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - - tcg_gen_addi_tl(s, cpu_A0, cpu_A0, val); -} -#endif - -static void gen_add_A0_im(DisasContext *s, int val) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; -#ifdef TARGET_X86_64 - if (CODE64(s)) - gen_op_addq_A0_im(tcg_ctx, val); - else -#endif - gen_op_addl_A0_im(tcg_ctx, val); -} - -static inline void gen_op_jmp_v(TCGContext *s, TCGv dest) -{ - tcg_gen_st_tl(s, dest, s->cpu_env, offsetof(CPUX86State, eip)); -} - -static inline void gen_op_add_reg_im(TCGContext *s, TCGMemOp size, int reg, int32_t val) -{ - TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_addi_tl(s, cpu_tmp0, *cpu_regs[reg], val); - gen_op_mov_reg_v(s, size, reg, cpu_tmp0); -} - -static inline void gen_op_add_reg_T0(TCGContext *s, TCGMemOp size, int reg) -{ - TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; - TCGv **cpu_T = (TCGv **)s->cpu_T; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_add_tl(s, cpu_tmp0, *cpu_regs[reg], *cpu_T[0]); - gen_op_mov_reg_v(s, size, reg, cpu_tmp0); -} - -static inline void gen_op_addl_A0_reg_sN(TCGContext *s, int shift, int reg) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[reg]); - if (shift != 0) - tcg_gen_shli_tl(s, cpu_tmp0, cpu_tmp0, shift); - tcg_gen_add_tl(s, cpu_A0, cpu_A0, cpu_tmp0); - /* For x86_64, this sets the higher half of register to zero. - For i386, this is equivalent to a nop. */ - tcg_gen_ext32u_tl(s, cpu_A0, cpu_A0); -} - -static inline void gen_op_movl_A0_seg(TCGContext *s, int reg) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - - tcg_gen_ld32u_tl(s, cpu_A0, s->cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET); -} - -static inline void gen_op_addl_A0_seg(DisasContext *s, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - - tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUX86State, segs[reg].base)); -#ifdef TARGET_X86_64 - if (CODE64(s)) { - tcg_gen_andi_tl(tcg_ctx, cpu_A0, cpu_A0, 0xffffffff); - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); - } else { - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); - tcg_gen_andi_tl(tcg_ctx, cpu_A0, cpu_A0, 0xffffffff); - } -#else - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); -#endif -} - -#ifdef TARGET_X86_64 -static inline void gen_op_movq_A0_seg(TCGContext *s, int reg) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - - tcg_gen_ld_tl(s, cpu_A0, s->cpu_env, offsetof(CPUX86State, segs[reg].base)); -} - -static inline void gen_op_addq_A0_seg(TCGContext *s, int reg) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; - - tcg_gen_ld_tl(s, cpu_tmp0, s->cpu_env, offsetof(CPUX86State, segs[reg].base)); - tcg_gen_add_tl(s, cpu_A0, cpu_A0, cpu_tmp0); -} - -static inline void gen_op_movq_A0_reg(TCGContext *s, int reg) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_mov_tl(s, cpu_A0, *cpu_regs[reg]); -} - -static inline void gen_op_addq_A0_reg_sN(TCGContext *s, int shift, int reg) -{ - TCGv cpu_A0 = *(TCGv *)s->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[reg]); - if (shift != 0) - tcg_gen_shli_tl(s, cpu_tmp0, cpu_tmp0, shift); - tcg_gen_add_tl(s, cpu_A0, cpu_A0, cpu_tmp0); -} -#endif - -static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) -{ - if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ)) - gen_jmp_im(s, s->prev_pc); // Unicorn: sync EIP - tcg_gen_qemu_ld_tl(s->uc, t0, a0, s->mem_index, idx | MO_LE); -} - -static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) -{ - if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) - gen_jmp_im(s, s->prev_pc); // Unicorn: sync EIP - tcg_gen_qemu_st_tl(s->uc, t0, a0, s->mem_index, idx | MO_LE); -} - -static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - if (d == OR_TMP0) { - gen_op_st_v(s, idx, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_reg_v(tcg_ctx, idx, d, *cpu_T[0]); - } -} - -static inline void gen_jmp_im(DisasContext *s, target_ulong pc) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - - tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, pc); - gen_op_jmp_v(tcg_ctx, cpu_tmp0); -} - -static inline void gen_string_movl_A0_ESI(DisasContext *s) -{ - int override; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - override = s->override; - switch (s->aflag) { -#ifdef TARGET_X86_64 - case MO_64: - if (override >= 0) { - gen_op_movq_A0_seg(tcg_ctx, override); - gen_op_addq_A0_reg_sN(tcg_ctx, 0, R_ESI); - } else { - gen_op_movq_A0_reg(tcg_ctx, R_ESI); - } - break; -#endif - case MO_32: - /* 32 bit address */ - if (s->addseg && override < 0) - override = R_DS; - if (override >= 0) { - gen_op_movl_A0_seg(tcg_ctx, override); - gen_op_addl_A0_reg_sN(tcg_ctx, 0, R_ESI); - } else { - gen_op_movl_A0_reg(tcg_ctx, R_ESI); - } - break; - case MO_16: - /* 16 address, always override */ - if (override < 0) - override = R_DS; - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESI]); - gen_op_addl_A0_seg(s, override); - break; - default: - tcg_abort(); - } -} - -static inline void gen_string_movl_A0_EDI(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - switch (s->aflag) { -#ifdef TARGET_X86_64 - case MO_64: - gen_op_movq_A0_reg(tcg_ctx, R_EDI); - break; -#endif - case MO_32: - if (s->addseg) { - gen_op_movl_A0_seg(tcg_ctx, R_ES); - gen_op_addl_A0_reg_sN(tcg_ctx, 0, R_EDI); - } else { - gen_op_movl_A0_reg(tcg_ctx, R_EDI); - } - break; - case MO_16: - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EDI]); - gen_op_addl_A0_seg(s, R_ES); - break; - default: - tcg_abort(); - } -} - -static inline void gen_op_movl_T0_Dshift(TCGContext *s, TCGMemOp ot) -{ - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_ld32s_tl(s, *cpu_T[0], s->cpu_env, offsetof(CPUX86State, df)); - tcg_gen_shli_tl(s, *cpu_T[0], *cpu_T[0], ot); -}; - -static TCGv gen_ext_tl(TCGContext *s, TCGv dst, TCGv src, TCGMemOp size, bool sign) -{ - switch (size) { - case MO_8: - if (sign) { - tcg_gen_ext8s_tl(s, dst, src); - } else { - tcg_gen_ext8u_tl(s, dst, src); - } - return dst; - case MO_16: - if (sign) { - tcg_gen_ext16s_tl(s, dst, src); - } else { - tcg_gen_ext16u_tl(s, dst, src); - } - return dst; -#ifdef TARGET_X86_64 - case MO_32: - if (sign) { - tcg_gen_ext32s_tl(s, dst, src); - } else { - tcg_gen_ext32u_tl(s, dst, src); - } - return dst; -#endif - default: - return src; - } -} - -static void gen_extu(TCGContext *s, TCGMemOp ot, TCGv reg) -{ - gen_ext_tl(s, reg, reg, ot, false); -} - -static void gen_exts(TCGContext *s, TCGMemOp ot, TCGv reg) -{ - gen_ext_tl(s, reg, reg, ot, true); -} - -static inline void gen_op_jnz_ecx(TCGContext *s, TCGMemOp size, int label1) -{ - TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[R_ECX]); - gen_extu(s, size, cpu_tmp0); - tcg_gen_brcondi_tl(s, TCG_COND_NE, cpu_tmp0, 0, label1); -} - -static inline void gen_op_jz_ecx(TCGContext *s, TCGMemOp size, int label1) -{ - TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; - TCGv **cpu_regs = (TCGv **)s->cpu_regs; - - tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[R_ECX]); - gen_extu(s, size, cpu_tmp0); - tcg_gen_brcondi_tl(s, TCG_COND_EQ, cpu_tmp0, 0, label1); -} - -static void gen_helper_in_func(TCGContext *s, TCGMemOp ot, TCGv v, TCGv_i32 n) -{ - switch (ot) { - case MO_8: - gen_helper_inb(s, v, tcg_const_ptr(s, s->uc), n); - break; - case MO_16: - gen_helper_inw(s, v, tcg_const_ptr(s, s->uc), n); - break; - case MO_32: - gen_helper_inl(s, v, tcg_const_ptr(s, s->uc), n); - break; - default: - tcg_abort(); - } -} - -static void gen_helper_out_func(TCGContext *s, TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) -{ - switch (ot) { - case MO_8: - gen_helper_outb(s, tcg_const_ptr(s, s->uc), v, n); - break; - case MO_16: - gen_helper_outw(s, tcg_const_ptr(s, s->uc), v, n); - break; - case MO_32: - gen_helper_outl(s, tcg_const_ptr(s, s->uc), v, n); - break; - default: - tcg_abort(); - } -} - -static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, - uint32_t svm_flags) -{ - int state_saved; - target_ulong next_eip; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - // Unicorn: allow all I/O instructions - return; - - state_saved = 0; - if (s->pe && (s->cpl > s->iopl || s->vm86)) { - gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); - state_saved = 1; - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - switch (ot) { - case MO_8: - gen_helper_check_iob(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32); - break; - case MO_16: - gen_helper_check_iow(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32); - break; - case MO_32: - gen_helper_check_iol(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32); - break; - default: - tcg_abort(); - } - } - if(s->flags & HF_SVMI_MASK) { - if (!state_saved) { - gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); - } - svm_flags |= (1 << (4 + ot)); - next_eip = s->pc - s->cs_base; - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_svm_check_io(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32, - tcg_const_i32(tcg_ctx, svm_flags), - tcg_const_i32(tcg_ctx, next_eip - cur_eip)); - } -} - -static inline void gen_movs(DisasContext *s, TCGMemOp ot) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - gen_op_movl_T0_Dshift(tcg_ctx, ot); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); -} - -static void gen_op_update1_cc(TCGContext *s) -{ - TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); -} - -static void gen_op_update2_cc(TCGContext *s) -{ - TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)s->cpu_cc_src; - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_mov_tl(s, cpu_cc_src, *cpu_T[1]); - tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); -} - -static void gen_op_update3_cc(TCGContext *s, TCGv reg) -{ - TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)s->cpu_cc_src; - TCGv cpu_cc_src2 = *(TCGv *)s->cpu_cc_src2; - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_mov_tl(s, cpu_cc_src2, reg); - tcg_gen_mov_tl(s, cpu_cc_src, *cpu_T[1]); - tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); -} - -static inline void gen_op_testl_T0_T1_cc(TCGContext *s) -{ - TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_and_tl(s, cpu_cc_dst, *cpu_T[0], *cpu_T[1]); -} - -static void gen_op_update_neg_cc(TCGContext *s) -{ - TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)s->cpu_cc_src; - TCGv cpu_cc_srcT = *(TCGv *)s->cpu_cc_srcT; - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); - tcg_gen_neg_tl(s, cpu_cc_src, *cpu_T[0]); - tcg_gen_movi_tl(s, cpu_cc_srcT, 0); -} - -/* compute all eflags to cc_src */ -static void gen_compute_eflags(DisasContext *s) -{ - TCGv zero, dst, src1, src2; - int live, dead; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; - - if (s->cc_op == CC_OP_EFLAGS) { - return; - } - if (s->cc_op == CC_OP_CLR) { - tcg_gen_movi_tl(tcg_ctx, cpu_cc_src, CC_Z | CC_P); - set_cc_op(s, CC_OP_EFLAGS); - return; - } - - TCGV_UNUSED(zero); - dst = cpu_cc_dst; - src1 = cpu_cc_src; - src2 = cpu_cc_src2; - - /* Take care to not read values that are not live. */ - live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; - dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); - if (dead) { - zero = tcg_const_tl(tcg_ctx, 0); - if (dead & USES_CC_DST) { - dst = zero; - } - if (dead & USES_CC_SRC) { - src1 = zero; - } - if (dead & USES_CC_SRC2) { - src2 = zero; - } - } - - gen_update_cc_op(s); - gen_helper_cc_compute_all(tcg_ctx, cpu_cc_src, dst, src1, src2, cpu_cc_op); - set_cc_op(s, CC_OP_EFLAGS); - - if (dead) { - tcg_temp_free(tcg_ctx, zero); - } -} - -typedef struct CCPrepare { - TCGCond cond; - TCGv reg; - TCGv reg2; - target_ulong imm; - target_ulong mask; - bool use_reg2; - bool no_setcond; -} CCPrepare; - -static inline CCPrepare ccprepare_make(TCGCond cond, - TCGv reg, TCGv reg2, - target_ulong imm, target_ulong mask, - bool use_reg2, bool no_setcond) -{ - CCPrepare cc = { cond, reg, reg2, imm, mask, use_reg2, no_setcond }; - return cc; -} - -/* compute eflags.C to reg */ -static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) -{ - TCGv t0, t1; - int size, shift; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; - TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - - switch (s->cc_op) { - case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: - /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ - size = s->cc_op - CC_OP_SUBB; - t1 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, false); - /* If no temporary was used, be careful not to alias t1 and t0. */ - t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg; - tcg_gen_mov_tl(tcg_ctx, t0, cpu_cc_srcT); - gen_extu(tcg_ctx, size, t0); - goto add_sub; - - case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: - /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ - size = s->cc_op - CC_OP_ADDB; - t1 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, false); - t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_dst, size, false); - add_sub: - return ccprepare_make(TCG_COND_LTU, t0, t1, 0, -1, true, false); - - case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: - case CC_OP_CLR: - return ccprepare_make(TCG_COND_NEVER, 0, 0, 0, -1, false, false); - - case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: - case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, -1, false, true); - - case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: - /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ - size = s->cc_op - CC_OP_SHLB; - shift = (8 << size) - 1; - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, (target_ulong)(1ULL << shift), false, false); - - case CC_OP_MULB: case CC_OP_MULW: case CC_OP_MULL: case CC_OP_MULQ: - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, -1, false, false); - - case CC_OP_BMILGB: case CC_OP_BMILGW: case CC_OP_BMILGL: case CC_OP_BMILGQ: - size = s->cc_op - CC_OP_BMILGB; - t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_src, size, false); - return ccprepare_make(TCG_COND_EQ, t0, 0, 0, -1, false, false); - - case CC_OP_ADCX: - case CC_OP_ADCOX: - return ccprepare_make(TCG_COND_NE, cpu_cc_dst, 0, 0, -1, false, true); - - case CC_OP_EFLAGS: - case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: - /* CC_SRC & 1 */ - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_C, false, false); - - default: - /* The need to compute only C from CC_OP_DYNAMIC is important - in efficiently implementing e.g. INC at the start of a TB. */ - gen_update_cc_op(s); - gen_helper_cc_compute_c(tcg_ctx, reg, cpu_cc_dst, cpu_cc_src, - cpu_cc_src2, cpu_cc_op); - return ccprepare_make(TCG_COND_NE, reg, 0, 0, -1, false, true); - } -} - -/* compute eflags.P to reg */ -static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - - gen_compute_eflags(s); - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_P, false, false); -} - -/* compute eflags.S to reg */ -static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - - switch (s->cc_op) { - case CC_OP_DYNAMIC: - gen_compute_eflags(s); - /* FALLTHRU */ - case CC_OP_EFLAGS: - case CC_OP_ADCX: - case CC_OP_ADOX: - case CC_OP_ADCOX: - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_S, false, false); - case CC_OP_CLR: - return ccprepare_make(TCG_COND_NEVER, 0, 0, 0, -1, false, false); - default: - { - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; - TCGv t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_dst, size, true); - return ccprepare_make(TCG_COND_LT, t0, 0, 0, -1, false, false); - } - } -} - -/* compute eflags.O to reg */ -static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; - - switch (s->cc_op) { - case CC_OP_ADOX: - case CC_OP_ADCOX: - return ccprepare_make(TCG_COND_NE, cpu_cc_src2, 0, 0, -1, false, true); - case CC_OP_CLR: - return ccprepare_make(TCG_COND_NEVER, 0, 0, 0, -1, false, false); - default: - gen_compute_eflags(s); - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_O, false, false); - } -} - -/* compute eflags.Z to reg */ -static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - - switch (s->cc_op) { - case CC_OP_DYNAMIC: - gen_compute_eflags(s); - /* FALLTHRU */ - case CC_OP_EFLAGS: - case CC_OP_ADCX: - case CC_OP_ADOX: - case CC_OP_ADCOX: - return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_Z, false, false); - case CC_OP_CLR: - return ccprepare_make(TCG_COND_ALWAYS, 0, 0, 0, -1, false, false); - default: - { - TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; - TCGv t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_dst, size, false); - return ccprepare_make(TCG_COND_EQ, t0, 0, 0, -1, false, false); - } - } -} - -/* perform a conditional store into register 'reg' according to jump opcode - value 'b'. In the fast case, T0 is guaranted not to be used. */ -static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) -{ - int inv, jcc_op, cond; - TCGMemOp size; - CCPrepare cc; - TCGv t0; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; - - inv = b & 1; - jcc_op = (b >> 1) & 7; - - switch (s->cc_op) { - case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: - /* We optimize relational operators for the cmp/jcc case. */ - size = s->cc_op - CC_OP_SUBB; - switch (jcc_op) { - case JCC_BE: - tcg_gen_mov_tl(tcg_ctx, cpu_tmp4, cpu_cc_srcT); - gen_extu(tcg_ctx, size, cpu_tmp4); - t0 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, false); - cc = ccprepare_make(TCG_COND_LEU, cpu_tmp4, t0, 0, -1, true, false); - break; - - case JCC_L: - cond = TCG_COND_LT; - goto fast_jcc_l; - case JCC_LE: - cond = TCG_COND_LE; - fast_jcc_l: - tcg_gen_mov_tl(tcg_ctx, cpu_tmp4, cpu_cc_srcT); - gen_exts(tcg_ctx, size, cpu_tmp4); - t0 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, true); - cc = ccprepare_make(cond, cpu_tmp4, t0, 0, -1, true, false); - break; - - default: - goto slow_jcc; - } - break; - - default: - slow_jcc: - /* This actually generates good code for JC, JZ and JS. */ - switch (jcc_op) { - case JCC_O: - cc = gen_prepare_eflags_o(s, reg); - break; - case JCC_B: - cc = gen_prepare_eflags_c(s, reg); - break; - case JCC_Z: - cc = gen_prepare_eflags_z(s, reg); - break; - case JCC_BE: - gen_compute_eflags(s); - cc = ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_Z | CC_C, false, false); - break; - case JCC_S: - cc = gen_prepare_eflags_s(s, reg); - break; - case JCC_P: - cc = gen_prepare_eflags_p(s, reg); - break; - case JCC_L: - gen_compute_eflags(s); - if (TCGV_EQUAL(reg, cpu_cc_src)) { - reg = cpu_tmp0; - } - tcg_gen_shri_tl(tcg_ctx, reg, cpu_cc_src, 4); /* CC_O -> CC_S */ - tcg_gen_xor_tl(tcg_ctx, reg, reg, cpu_cc_src); - cc = ccprepare_make(TCG_COND_NE, reg, 0, 0, CC_S, false, false); - break; - default: - case JCC_LE: - gen_compute_eflags(s); - if (TCGV_EQUAL(reg, cpu_cc_src)) { - reg = cpu_tmp0; - } - tcg_gen_shri_tl(tcg_ctx, reg, cpu_cc_src, 4); /* CC_O -> CC_S */ - tcg_gen_xor_tl(tcg_ctx, reg, reg, cpu_cc_src); - cc = ccprepare_make(TCG_COND_NE, reg, 0, 0, CC_S | CC_Z, false, false); - break; - } - break; - } - - if (inv) { - cc.cond = tcg_invert_cond(cc.cond); - } - return cc; -} - -static void gen_setcc1(DisasContext *s, int b, TCGv reg) -{ - CCPrepare cc = gen_prepare_cc(s, b, reg); - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - if (cc.no_setcond) { - if (cc.cond == TCG_COND_EQ) { - tcg_gen_xori_tl(tcg_ctx, reg, cc.reg, 1); - } else { - tcg_gen_mov_tl(tcg_ctx, reg, cc.reg); - } - return; - } - - if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && - cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { - tcg_gen_shri_tl(tcg_ctx, reg, cc.reg, ctztl(cc.mask)); - tcg_gen_andi_tl(tcg_ctx, reg, reg, 1); - return; - } - if (cc.mask != -1) { - tcg_gen_andi_tl(tcg_ctx, reg, cc.reg, cc.mask); - cc.reg = reg; - } - if (cc.use_reg2) { - tcg_gen_setcond_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.reg2); - } else { - tcg_gen_setcondi_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.imm); - } -} - -static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) -{ - gen_setcc1(s, JCC_B << 1, reg); -} - -/* generate a conditional jump to label 'l1' according to jump opcode - value 'b'. In the fast case, T0 is guaranted not to be used. */ -static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - CCPrepare cc = gen_prepare_cc(s, b, *cpu_T[0]); - - if (cc.mask != -1) { - tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], cc.reg, cc.mask); - cc.reg = *cpu_T[0]; - } - if (cc.use_reg2) { - tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); - } else { - tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); - } -} - -/* Generate a conditional jump to label 'l1' according to jump opcode - value 'b'. In the fast case, T0 is guaranted not to be used. - A translation block must end soon. */ -static inline void gen_jcc1(DisasContext *s, int b, int l1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - CCPrepare cc = gen_prepare_cc(s, b, *cpu_T[0]); - - gen_update_cc_op(s); - if (cc.mask != -1) { - tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], cc.reg, cc.mask); - cc.reg = *cpu_T[0]; - } - set_cc_op(s, CC_OP_DYNAMIC); - if (cc.use_reg2) { - tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); - } else { - tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); - } -} - -/* XXX: does not work with gdbstub "ice" single step - not a - serious problem */ -static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) -{ - int l1, l2; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - l1 = gen_new_label(tcg_ctx); - l2 = gen_new_label(tcg_ctx); - gen_op_jnz_ecx(tcg_ctx, s->aflag, l1); - gen_set_label(tcg_ctx, l2); - gen_jmp_tb(s, next_eip, 1); - gen_set_label(tcg_ctx, l1); - return l2; -} - -static inline void gen_stos(DisasContext *s, TCGMemOp ot) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EAX); - gen_string_movl_A0_EDI(s); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - gen_op_movl_T0_Dshift(tcg_ctx, ot); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); -} - -static inline void gen_lods(DisasContext *s, TCGMemOp ot) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[0]); - gen_op_movl_T0_Dshift(tcg_ctx, ot); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); -} - -static inline void gen_scas(DisasContext *s, TCGMemOp ot) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - gen_op(s, OP_CMPL, ot, R_EAX); - gen_op_movl_T0_Dshift(tcg_ctx, ot); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); -} - -static inline void gen_cmps(DisasContext *s, TCGMemOp ot) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_string_movl_A0_EDI(s); - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - gen_string_movl_A0_ESI(s); - gen_op(s, OP_CMPL, ot, OR_TMP0); - gen_op_movl_T0_Dshift(tcg_ctx, ot); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); -} - -static inline void gen_ins(DisasContext *s, TCGMemOp ot) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - gen_string_movl_A0_EDI(s); - /* Note: we must do this dummy write first to be restartable in - case of page fault. */ - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[R_EDX]); - tcg_gen_andi_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); - gen_helper_in_func(tcg_ctx, ot, *cpu_T[0], cpu_tmp2_i32); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - gen_op_movl_T0_Dshift(tcg_ctx, ot); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); -} - -static inline void gen_outs(DisasContext *s, TCGMemOp ot) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - gen_string_movl_A0_ESI(s); - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[R_EDX]); - tcg_gen_andi_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[0]); - gen_helper_out_func(tcg_ctx, ot, cpu_tmp2_i32, cpu_tmp3_i32); - - gen_op_movl_T0_Dshift(tcg_ctx, ot); - gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); -} - -/* same method as Valgrind : we generate jumps to current or next - instruction */ -#define GEN_REPZ(op) \ -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ - target_ulong cur_eip, target_ulong next_eip) \ -{ \ - int l2;\ - gen_update_cc_op(s); \ - l2 = gen_jz_ecx_string(s, next_eip); \ - gen_ ## op(s, ot); \ - gen_op_add_reg_im(s->uc->tcg_ctx, s->aflag, R_ECX, -1); \ - /* a loop would cause two single step exceptions if ECX = 1 \ - before rep string_insn */ \ - if (!s->jmp_opt) \ - gen_op_jz_ecx(s->uc->tcg_ctx, s->aflag, l2); \ - gen_jmp(s, cur_eip); \ -} - -#define GEN_REPZ2(op) \ -static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ - target_ulong cur_eip, \ - target_ulong next_eip, \ - int nz) \ -{ \ - int l2;\ - gen_update_cc_op(s); \ - l2 = gen_jz_ecx_string(s, next_eip); \ - gen_ ## op(s, ot); \ - gen_op_add_reg_im(s->uc->tcg_ctx, s->aflag, R_ECX, -1); \ - gen_update_cc_op(s); \ - gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ - if (!s->jmp_opt) \ - gen_op_jz_ecx(s->uc->tcg_ctx, s->aflag, l2); \ - gen_jmp(s, cur_eip); \ -} - -GEN_REPZ(movs) -GEN_REPZ(stos) -GEN_REPZ(lods) -GEN_REPZ(ins) -GEN_REPZ(outs) -GEN_REPZ2(scas) -GEN_REPZ2(cmps) - -static void gen_helper_fp_arith_ST0_FT0(TCGContext *s, int op) -{ - switch (op) { - case 0: - gen_helper_fadd_ST0_FT0(s, s->cpu_env); - break; - case 1: - gen_helper_fmul_ST0_FT0(s, s->cpu_env); - break; - case 2: - gen_helper_fcom_ST0_FT0(s, s->cpu_env); - break; - case 3: - gen_helper_fcom_ST0_FT0(s, s->cpu_env); - break; - case 4: - gen_helper_fsub_ST0_FT0(s, s->cpu_env); - break; - case 5: - gen_helper_fsubr_ST0_FT0(s, s->cpu_env); - break; - case 6: - gen_helper_fdiv_ST0_FT0(s, s->cpu_env); - break; - case 7: - gen_helper_fdivr_ST0_FT0(s, s->cpu_env); - break; - } -} - -/* NOTE the exception in "r" op ordering */ -static void gen_helper_fp_arith_STN_ST0(TCGContext *s, int op, int opreg) -{ - TCGv_i32 tmp = tcg_const_i32(s, opreg); - switch (op) { - case 0: - gen_helper_fadd_STN_ST0(s, s->cpu_env, tmp); - break; - case 1: - gen_helper_fmul_STN_ST0(s, s->cpu_env, tmp); - break; - case 4: - gen_helper_fsubr_STN_ST0(s, s->cpu_env, tmp); - break; - case 5: - gen_helper_fsub_STN_ST0(s, s->cpu_env, tmp); - break; - case 6: - gen_helper_fdivr_STN_ST0(s, s->cpu_env, tmp); - break; - case 7: - gen_helper_fdiv_STN_ST0(s, s->cpu_env, tmp); - break; - } -} - -/* if d == OR_TMP0, it means memory operand (address in A0) */ -static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; - TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - if (d != OR_TMP0) { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], d); - } else { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } - switch(op) { - case OP_ADCL: - gen_compute_eflags_c(s, cpu_tmp4); - tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp4); - gen_op_st_rm_T0_A0(s, ot, d); - gen_op_update3_cc(tcg_ctx, cpu_tmp4); - set_cc_op(s, CC_OP_ADCB + ot); - break; - case OP_SBBL: - gen_compute_eflags_c(s, cpu_tmp4); - tcg_gen_sub_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - tcg_gen_sub_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp4); - gen_op_st_rm_T0_A0(s, ot, d); - gen_op_update3_cc(tcg_ctx, cpu_tmp4); - set_cc_op(s, CC_OP_SBBB + ot); - break; - case OP_ADDL: - tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_st_rm_T0_A0(s, ot, d); - gen_op_update2_cc(tcg_ctx); - set_cc_op(s, CC_OP_ADDB + ot); - break; - case OP_SUBL: - tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, *cpu_T[0]); - tcg_gen_sub_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_st_rm_T0_A0(s, ot, d); - gen_op_update2_cc(tcg_ctx); - set_cc_op(s, CC_OP_SUBB + ot); - break; - default: - case OP_ANDL: - tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_st_rm_T0_A0(s, ot, d); - gen_op_update1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - case OP_ORL: - tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_st_rm_T0_A0(s, ot, d); - gen_op_update1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - case OP_XORL: - tcg_gen_xor_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_st_rm_T0_A0(s, ot, d); - gen_op_update1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - case OP_CMPL: - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[1]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, *cpu_T[0]); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], *cpu_T[1]); - set_cc_op(s, CC_OP_SUBB + ot); - break; - } -} - -/* if d == OR_TMP0, it means memory operand (address in A0) */ -static void gen_inc(DisasContext *s, TCGMemOp ot, int d, int c) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - if (d != OR_TMP0) { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], d); - } else { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } - gen_compute_eflags_c(s, cpu_cc_src); - if (c > 0) { - tcg_gen_addi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 1); - set_cc_op(s, CC_OP_INCB + ot); - } else { - tcg_gen_addi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], -1); - set_cc_op(s, CC_OP_DECB + ot); - } - gen_op_st_rm_T0_A0(s, ot, d); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); -} - -static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, - TCGv shm1, TCGv count, bool is_right) -{ - TCGv_i32 z32, s32, oldop; - TCGv z_tl; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; - TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - - /* Store the results into the CC variables. If we know that the - variable must be dead, store unconditionally. Otherwise we'll - need to not disrupt the current contents. */ - z_tl = tcg_const_tl(tcg_ctx, 0); - if (cc_op_live[s->cc_op] & USES_CC_DST) { - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, cpu_cc_dst, count, z_tl, - result, cpu_cc_dst); - } else { - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, result); - } - if (cc_op_live[s->cc_op] & USES_CC_SRC) { - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, cpu_cc_src, count, z_tl, - shm1, cpu_cc_src); - } else { - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, shm1); - } - tcg_temp_free(tcg_ctx, z_tl); - - /* Get the two potential CC_OP values into temporaries. */ - tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); - if (s->cc_op == CC_OP_DYNAMIC) { - oldop = cpu_cc_op; - } else { - tcg_gen_movi_i32(tcg_ctx, cpu_tmp3_i32, s->cc_op); - oldop = cpu_tmp3_i32; - } - - /* Conditionally store the CC_OP value. */ - z32 = tcg_const_i32(tcg_ctx, 0); - s32 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, s32, count); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop); - tcg_temp_free_i32(tcg_ctx, z32); - tcg_temp_free_i32(tcg_ctx, s32); - - /* The CC_OP value is no longer predictable. */ - set_cc_op(s, CC_OP_DYNAMIC); -} - -static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, - int is_right, int is_arith) -{ - target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - /* load */ - if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); - } - - tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], mask); - tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, *cpu_T[1], 1); - - if (is_right) { - if (is_arith) { - gen_exts(tcg_ctx, ot, *cpu_T[0]); - tcg_gen_sar_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); - tcg_gen_sar_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } else { - gen_extu(tcg_ctx, ot, *cpu_T[0]); - tcg_gen_shr_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); - tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } - } else { - tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); - tcg_gen_shl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } - - /* store */ - gen_op_st_rm_T0_A0(s, ot, op1); - - gen_shift_flags(s, ot, *cpu_T[0], cpu_tmp0, *cpu_T[1], is_right); -} - -static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, - int is_right, int is_arith) -{ - int mask = (ot == MO_64 ? 0x3f : 0x1f); - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - /* load */ - if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - else - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); - - op2 &= mask; - if (op2 != 0) { - if (is_right) { - if (is_arith) { - gen_exts(tcg_ctx, ot, *cpu_T[0]); - tcg_gen_sari_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], op2 - 1); - tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); - } else { - gen_extu(tcg_ctx, ot, *cpu_T[0]); - tcg_gen_shri_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], op2 - 1); - tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); - } - } else { - tcg_gen_shli_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], op2 - 1); - tcg_gen_shli_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); - } - } - - /* store */ - gen_op_st_rm_T0_A0(s, ot, op1); - - /* update eflags if non zero shift */ - if (op2 != 0) { - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, cpu_tmp4); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); - } -} - -static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) -{ - target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); - TCGv_i32 t0, t1; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; - TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - /* load */ - if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); - } - - tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], mask); - - switch (ot) { - case MO_8: - /* Replicate the 8-bit input so that a 32-bit rotate works. */ - tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_muli_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0x01010101); - goto do_long; - case MO_16: - /* Replicate the 16-bit input so that a 32-bit rotate works. */ - tcg_gen_deposit_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[0], 16, 16); - goto do_long; - do_long: -#ifdef TARGET_X86_64 - case MO_32: - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); - if (is_right) { - tcg_gen_rotr_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); - } else { - tcg_gen_rotl_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); - } - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); - break; -#endif - default: - if (is_right) { - tcg_gen_rotr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } else { - tcg_gen_rotl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } - break; - } - - /* store */ - gen_op_st_rm_T0_A0(s, ot, op1); - - /* We'll need the flags computed into CC_SRC. */ - gen_compute_eflags(s); - - /* The value that was "rotated out" is now present at the other end - of the word. Compute C into CC_DST and O into CC_SRC2. Note that - since we've computed the flags into CC_SRC, these variables are - currently dead. */ - if (is_right) { - tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask - 1); - tcg_gen_shri_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], mask); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, cpu_cc_dst, 1); - } else { - tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], 1); - } - tcg_gen_andi_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, 1); - tcg_gen_xor_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); - - /* Now conditionally store the new CC_OP value. If the shift count - is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. - Otherwise reuse CC_OP_ADCOX which have the C and O flags split out - exactly as we computed above. */ - t0 = tcg_const_i32(tcg_ctx, 0); - t1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t1, *cpu_T[1]); - tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, CC_OP_ADCOX); - tcg_gen_movi_i32(tcg_ctx, cpu_tmp3_i32, CC_OP_EFLAGS); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, cpu_cc_op, t1, t0, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_temp_free_i32(tcg_ctx, t0); - tcg_temp_free_i32(tcg_ctx, t1); - - /* The CC_OP value is no longer predictable. */ - set_cc_op(s, CC_OP_DYNAMIC); -} - -static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, - int is_right) -{ - int mask = (ot == MO_64 ? 0x3f : 0x1f); - int shift; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - -#ifdef TARGET_X86_64 - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; -#endif - - /* load */ - if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); - } - - op2 &= mask; - if (op2 != 0) { - switch (ot) { -#ifdef TARGET_X86_64 - case MO_32: - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - if (is_right) { - tcg_gen_rotri_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, op2); - } else { - tcg_gen_rotli_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, op2); - } - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); - break; -#endif - default: - if (is_right) { - tcg_gen_rotri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); - } else { - tcg_gen_rotli_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); - } - break; - case MO_8: - mask = 7; - goto do_shifts; - case MO_16: - mask = 15; - do_shifts: - shift = op2 & mask; - if (is_right) { - shift = mask + 1 - shift; - } - gen_extu(tcg_ctx, ot, *cpu_T[0]); - tcg_gen_shli_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], shift); - tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], mask + 1 - shift); - tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); - break; - } - } - - /* store */ - gen_op_st_rm_T0_A0(s, ot, op1); - - if (op2 != 0) { - /* Compute the flags into CC_SRC. */ - gen_compute_eflags(s); - - /* The value that was "rotated out" is now present at the other end - of the word. Compute C into CC_DST and O into CC_SRC2. Note that - since we've computed the flags into CC_SRC, these variables are - currently dead. */ - if (is_right) { - tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask - 1); - tcg_gen_shri_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], mask); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, cpu_cc_dst, 1); - } else { - tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], 1); - } - tcg_gen_andi_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, 1); - tcg_gen_xor_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); - set_cc_op(s, CC_OP_ADCOX); - } -} - -/* XXX: add faster immediate = 1 case */ -static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, - int is_right) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_compute_eflags(s); - assert(s->cc_op == CC_OP_EFLAGS); - - /* load */ - if (op1 == OR_TMP0) - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - else - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); - - if (is_right) { - switch (ot) { - case MO_8: - gen_helper_rcrb(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; - case MO_16: - gen_helper_rcrw(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; - case MO_32: - gen_helper_rcrl(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; -#ifdef TARGET_X86_64 - case MO_64: - gen_helper_rcrq(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; -#endif - default: - tcg_abort(); - } - } else { - switch (ot) { - case MO_8: - gen_helper_rclb(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; - case MO_16: - gen_helper_rclw(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; - case MO_32: - gen_helper_rcll(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; -#ifdef TARGET_X86_64 - case MO_64: - gen_helper_rclq(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); - break; -#endif - default: - tcg_abort(); - } - } - /* store */ - gen_op_st_rm_T0_A0(s, ot, op1); -} - -/* XXX: add faster immediate case */ -static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, - bool is_right, TCGv count_in) -{ - target_ulong mask = (ot == MO_64 ? 63 : 31); - TCGv count; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - /* load */ - if (op1 == OR_TMP0) { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); - } - - count = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, count, count_in, mask); - - switch (ot) { - case MO_16: - /* Note: we implement the Intel behaviour for shift count > 16. - This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A - portion by constructing it as a 32-bit value. */ - if (is_right) { - tcg_gen_deposit_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], *cpu_T[1], 16, 16); - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], *cpu_T[0]); - tcg_gen_mov_tl(tcg_ctx, *cpu_T[0], cpu_tmp0); - } else { - tcg_gen_deposit_tl(tcg_ctx, *cpu_T[1], *cpu_T[0], *cpu_T[1], 16, 16); - } - /* FALLTHRU */ -#ifdef TARGET_X86_64 - case MO_32: - /* Concatenate the two 32-bit values and use a 64-bit shift. */ - tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, count, 1); - if (is_right) { - tcg_gen_concat_tl_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - tcg_gen_shr_i64(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); - tcg_gen_shr_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], count); - } else { - tcg_gen_concat_tl_i64(tcg_ctx, *cpu_T[0], *cpu_T[1], *cpu_T[0]); - tcg_gen_shl_i64(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); - tcg_gen_shl_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], count); - tcg_gen_shri_i64(tcg_ctx, cpu_tmp0, cpu_tmp0, 32); - tcg_gen_shri_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], 32); - } - break; -#endif - default: - tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, count, 1); - if (is_right) { - tcg_gen_shr_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); - - tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, mask + 1, count); - tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], count); - tcg_gen_shl_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], cpu_tmp4); - } else { - tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); - if (ot == MO_16) { - /* Only needed if count > 16, for Intel behaviour. */ - tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, 33, count); - tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, *cpu_T[1], cpu_tmp4); - tcg_gen_or_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, cpu_tmp4); - } - - tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, mask + 1, count); - tcg_gen_shl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], count); - tcg_gen_shr_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], cpu_tmp4); - } - tcg_gen_movi_tl(tcg_ctx, cpu_tmp4, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_T[1], count, cpu_tmp4, - cpu_tmp4, *cpu_T[1]); - tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - break; - } - - /* store */ - gen_op_st_rm_T0_A0(s, ot, op1); - - gen_shift_flags(s, ot, *cpu_T[0], cpu_tmp0, count, is_right); - tcg_temp_free(tcg_ctx, count); -} - -static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) -{ - TCGContext *tcg_ctx = s1->uc->tcg_ctx; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - if (s != OR_TMP1) - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], s); - switch(op) { - case OP_ROL: - gen_rot_rm_T1(s1, ot, d, 0); - break; - case OP_ROR: - gen_rot_rm_T1(s1, ot, d, 1); - break; - case OP_SHL: - case OP_SHL1: - gen_shift_rm_T1(s1, ot, d, 0, 0); - break; - case OP_SHR: - gen_shift_rm_T1(s1, ot, d, 1, 0); - break; - case OP_SAR: - gen_shift_rm_T1(s1, ot, d, 1, 1); - break; - case OP_RCL: - gen_rotc_rm_T1(s1, ot, d, 0); - break; - case OP_RCR: - gen_rotc_rm_T1(s1, ot, d, 1); - break; - } -} - -static void gen_shifti(DisasContext *s, int op, TCGMemOp ot, int d, int c) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - switch(op) { - case OP_ROL: - gen_rot_rm_im(s, ot, d, c, 0); - break; - case OP_ROR: - gen_rot_rm_im(s, ot, d, c, 1); - break; - case OP_SHL: - case OP_SHL1: - gen_shift_rm_im(s, ot, d, c, 0, 0); - break; - case OP_SHR: - gen_shift_rm_im(s, ot, d, c, 1, 0); - break; - case OP_SAR: - gen_shift_rm_im(s, ot, d, c, 1, 1); - break; - default: - /* currently not optimized */ - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], c); - gen_shift(s, op, ot, d, OR_TMP1); - break; - } -} - -static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) -{ - target_long disp; - int havesib; - int base; - int index; - int scale; - int mod, rm, code, override, must_add_seg; - TCGv sum; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - override = s->override; - must_add_seg = s->addseg; - if (override >= 0) - must_add_seg = 1; - mod = (modrm >> 6) & 3; - rm = modrm & 7; - - switch (s->aflag) { - case MO_64: - case MO_32: - havesib = 0; - base = rm; - index = -1; - scale = 0; - - if (base == 4) { - havesib = 1; - code = cpu_ldub_code(env, s->pc++); - scale = (code >> 6) & 3; - index = ((code >> 3) & 7) | REX_X(s); - if (index == 4) { - index = -1; /* no index */ - } - base = (code & 7); - } - base |= REX_B(s); - - switch (mod) { - case 0: - if ((base & 7) == 5) { - base = -1; - disp = (int32_t)cpu_ldl_code(env, s->pc); - s->pc += 4; - if (CODE64(s) && !havesib) { - disp += s->pc + s->rip_offset; - } - } else { - disp = 0; - } - break; - case 1: - disp = (int8_t)cpu_ldub_code(env, s->pc++); - break; - default: - case 2: - disp = (int32_t)cpu_ldl_code(env, s->pc); - s->pc += 4; - break; - } - - /* For correct popl handling with esp. */ - if (base == R_ESP && s->popl_esp_hack) { - disp += s->popl_esp_hack; - } - - /* Compute the address, with a minimum number of TCG ops. */ - TCGV_UNUSED(sum); - if (index >= 0) { - if (scale == 0) { - sum = *cpu_regs[index]; - } else { - tcg_gen_shli_tl(tcg_ctx, cpu_A0, *cpu_regs[index], scale); - sum = cpu_A0; - } - if (base >= 0) { - tcg_gen_add_tl(tcg_ctx, cpu_A0, sum, *cpu_regs[base]); - sum = cpu_A0; - } - } else if (base >= 0) { - sum = *cpu_regs[base]; - } - if (TCGV_IS_UNUSED(sum)) { - tcg_gen_movi_tl(tcg_ctx, cpu_A0, disp); - } else { - tcg_gen_addi_tl(tcg_ctx, cpu_A0, sum, disp); - } - - if (must_add_seg) { - if (override < 0) { - if (base == R_EBP || base == R_ESP) { - override = R_SS; - } else { - override = R_DS; - } - } - - tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, - offsetof(CPUX86State, segs[override].base)); - if (CODE64(s)) { - if (s->aflag == MO_32) { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); - } - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); - return; - } - - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); - } - - if (s->aflag == MO_32) { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); - } - break; - - case MO_16: - switch (mod) { - case 0: - if (rm == 6) { - disp = cpu_lduw_code(env, s->pc); - s->pc += 2; - tcg_gen_movi_tl(tcg_ctx, cpu_A0, disp); - rm = 0; /* avoid SS override */ - goto no_rm; - } else { - disp = 0; - } - break; - case 1: - disp = (int8_t)cpu_ldub_code(env, s->pc++); - break; - default: - case 2: - disp = (int16_t)cpu_lduw_code(env, s->pc); - s->pc += 2; - break; - } - - sum = cpu_A0; - switch (rm) { - case 0: - tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBX], *cpu_regs[R_ESI]); - break; - case 1: - tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBX], *cpu_regs[R_EDI]); - break; - case 2: - tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBP], *cpu_regs[R_ESI]); - break; - case 3: - tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBP], *cpu_regs[R_EDI]); - break; - case 4: - sum = *cpu_regs[R_ESI]; - break; - case 5: - sum = *cpu_regs[R_EDI]; - break; - case 6: - sum = *cpu_regs[R_EBP]; - break; - default: - case 7: - sum = *cpu_regs[R_EBX]; - break; - } - tcg_gen_addi_tl(tcg_ctx, cpu_A0, sum, disp); - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); - no_rm: - if (must_add_seg) { - if (override < 0) { - if (rm == 2 || rm == 3 || rm == 6) { - override = R_SS; - } else { - override = R_DS; - } - } - gen_op_addl_A0_seg(s, override); - } - break; - - default: - tcg_abort(); - } -} - -static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) -{ - int mod, rm, base, code; - - mod = (modrm >> 6) & 3; - if (mod == 3) - return; - rm = modrm & 7; - - switch (s->aflag) { - case MO_64: - case MO_32: - base = rm; - - if (base == 4) { - code = cpu_ldub_code(env, s->pc++); - base = (code & 7); - } - - switch (mod) { - case 0: - if (base == 5) { - s->pc += 4; - } - break; - case 1: - s->pc++; - break; - default: - case 2: - s->pc += 4; - break; - } - break; - - case MO_16: - switch (mod) { - case 0: - if (rm == 6) { - s->pc += 2; - } - break; - case 1: - s->pc++; - break; - default: - case 2: - s->pc += 2; - break; - } - break; - - default: - tcg_abort(); - } -} - -/* used for LEA and MOV AX, mem */ -static void gen_add_A0_ds_seg(DisasContext *s) -{ - int override, must_add_seg; - must_add_seg = s->addseg; - override = R_DS; - if (s->override >= 0) { - override = s->override; - must_add_seg = 1; - } - if (must_add_seg) { -#ifdef TARGET_X86_64 - if (CODE64(s)) { - gen_op_addq_A0_seg(s->uc->tcg_ctx, override); - } else -#endif - { - gen_op_addl_A0_seg(s, override); - } - } -} - -/* generate modrm memory load or store of 'reg'. TMP0 is used if reg == - OR_TMP0 */ -static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, - TCGMemOp ot, int reg, int is_store) -{ - int mod, rm; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - if (mod == 3) { - if (is_store) { - if (reg != OR_TMP0) - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - if (reg != OR_TMP0) - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - } - } else { - gen_lea_modrm(env, s, modrm); - if (is_store) { - if (reg != OR_TMP0) - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - if (reg != OR_TMP0) - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - } - } -} - -static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) -{ - uint32_t ret; - - switch (ot) { - case MO_8: - ret = cpu_ldub_code(env, s->pc); - s->pc++; - break; - case MO_16: - ret = cpu_lduw_code(env, s->pc); - s->pc += 2; - break; - case MO_32: -#ifdef TARGET_X86_64 - case MO_64: -#endif - ret = cpu_ldl_code(env, s->pc); - s->pc += 4; - break; - default: - tcg_abort(); - } - return ret; -} - -static inline int insn_const_size(TCGMemOp ot) -{ - if (ot <= MO_32) { - return 1 << ot; - } else { - return 4; - } -} - -static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) -{ - TranslationBlock *tb; - target_ulong pc; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - pc = s->cs_base + eip; - tb = s->tb; - /* NOTE: we handle the case where the TB spans two pages here */ - if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) || - (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { - /* jump to same page: we can use a direct jump */ - tcg_gen_goto_tb(tcg_ctx, tb_num); - gen_jmp_im(s, eip); - tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + tb_num); - } else { - /* jump to another page: currently not optimized */ - gen_jmp_im(s, eip); - gen_eob(s); - } -} - -static inline void gen_jcc(DisasContext *s, int b, - target_ulong val, target_ulong next_eip) -{ - int l1, l2; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - if (s->jmp_opt) { - l1 = gen_new_label(tcg_ctx); - gen_jcc1(s, b, l1); - - gen_goto_tb(s, 0, next_eip); - - gen_set_label(tcg_ctx, l1); - gen_goto_tb(s, 1, val); - s->is_jmp = DISAS_TB_JUMP; - } else { - l1 = gen_new_label(tcg_ctx); - l2 = gen_new_label(tcg_ctx); - gen_jcc1(s, b, l1); - - gen_jmp_im(s, next_eip); - tcg_gen_br(tcg_ctx, l2); - - gen_set_label(tcg_ctx, l1); - gen_jmp_im(s, val); - gen_set_label(tcg_ctx, l2); - gen_eob(s); - } -} - -static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, - int modrm, int reg) -{ - CCPrepare cc; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - - cc = gen_prepare_cc(s, b, *cpu_T[1]); - if (cc.mask != -1) { - TCGv t0 = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, t0, cc.reg, cc.mask); - cc.reg = t0; - } - if (!cc.use_reg2) { - cc.reg2 = tcg_const_tl(tcg_ctx, cc.imm); - } - - tcg_gen_movcond_tl(tcg_ctx, cc.cond, *cpu_T[0], cc.reg, cc.reg2, - *cpu_T[0], *cpu_regs[reg]); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - - if (cc.mask != -1) { - tcg_temp_free(tcg_ctx, cc.reg); - } - if (!cc.use_reg2) { - tcg_temp_free(tcg_ctx, cc.reg2); - } -} - -static inline void gen_op_movl_T0_seg(TCGContext *s, int seg_reg) -{ - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_ld32u_tl(s, *cpu_T[0], s->cpu_env, - offsetof(CPUX86State,segs[seg_reg].selector)); -} - -static inline void gen_op_movl_seg_T0_vm(TCGContext *s, int seg_reg) -{ - TCGv **cpu_T = (TCGv **)s->cpu_T; - - tcg_gen_andi_tl(s, *cpu_T[0], *cpu_T[0], 0xffff); - tcg_gen_st32_tl(s, *cpu_T[0], s->cpu_env, - offsetof(CPUX86State,segs[seg_reg].selector)); - tcg_gen_shli_tl(s, *cpu_T[0], *cpu_T[0], 4); - tcg_gen_st_tl(s, *cpu_T[0], s->cpu_env, - offsetof(CPUX86State,segs[seg_reg].base)); -} - -/* move T0 to seg_reg and compute if the CPU state may change. Never - call this function with seg_reg == R_CS */ -static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - if (s->pe && !s->vm86) { - /* XXX: optimize by finding processor state dynamically */ - gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_load_seg(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, seg_reg), cpu_tmp2_i32); - /* abort translation because the addseg value may change or - because ss32 may change. For R_SS, translation must always - stop as a special handling must be done to disable hardware - interrupts for the next instruction */ - if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) - s->is_jmp = DISAS_TB_JUMP; - } else { - gen_op_movl_seg_T0_vm(tcg_ctx, seg_reg); - if (seg_reg == R_SS) - s->is_jmp = DISAS_TB_JUMP; - } -} - -static inline int svm_is_rep(int prefixes) -{ - return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0); -} - -static inline void -gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, - uint32_t type, uint64_t param) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - /* no SVM activated; fast case */ - if (likely(!(s->flags & HF_SVMI_MASK))) - return; - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_svm_check_intercept_param(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, type), - tcg_const_i64(tcg_ctx, param)); -} - -static inline void -gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) -{ - gen_svm_check_intercept_param(s, pc_start, type, 0); -} - -static inline void gen_stack_update(DisasContext *s, int addend) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - -#ifdef TARGET_X86_64 - if (CODE64(s)) { - gen_op_add_reg_im(tcg_ctx, MO_64, R_ESP, addend); - } else -#endif - if (s->ss32) { - gen_op_add_reg_im(tcg_ctx, MO_32, R_ESP, addend); - } else { - gen_op_add_reg_im(tcg_ctx, MO_16, R_ESP, addend); - } -} - -/* Generate a push. It depends on ss32, addseg and dflag. */ -static void gen_push_v(DisasContext *s, TCGv val) -{ - TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag); - int size = 1 << d_ot; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; - TCGv new_esp = cpu_A0; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - tcg_gen_subi_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP], size); - - if (CODE64(s)) { - a_ot = MO_64; - } else if (s->ss32) { - a_ot = MO_32; - if (s->addseg) { - new_esp = cpu_tmp4; - tcg_gen_mov_tl(tcg_ctx, new_esp, cpu_A0); - gen_op_addl_A0_seg(s, R_SS); - } else { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); - } - } else { - a_ot = MO_16; - new_esp = cpu_tmp4; - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); - tcg_gen_mov_tl(tcg_ctx, new_esp, cpu_A0); - gen_op_addl_A0_seg(s, R_SS); - } - - gen_op_st_v(s, d_ot, val, cpu_A0); - gen_op_mov_reg_v(tcg_ctx, a_ot, R_ESP, new_esp); -} - -/* two step pop is necessary for precise exceptions */ -static TCGMemOp gen_pop_T0(DisasContext *s) -{ - TCGMemOp d_ot = mo_pushpop(s, s->dflag); - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv addr = cpu_A0; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - if (CODE64(s)) { - addr = *cpu_regs[R_ESP]; - } else if (!s->ss32) { - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP]); - gen_op_addl_A0_seg(s, R_SS); - } else if (s->addseg) { - tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP]); - gen_op_addl_A0_seg(s, R_SS); - } else { - tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP]); - } - - gen_op_ld_v(s, d_ot, *cpu_T[0], addr); - return d_ot; -} - -static void gen_pop_update(DisasContext *s, TCGMemOp ot) -{ - gen_stack_update(s, 1 << ot); -} - -static void gen_stack_A0(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_op_movl_A0_reg(tcg_ctx, R_ESP); - if (!s->ss32) - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); - if (s->addseg) - gen_op_addl_A0_seg(s, R_SS); -} - -/* NOTE: wrap around in 16 bit not fully handled */ -static void gen_pusha(DisasContext *s) -{ - int i; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_op_movl_A0_reg(tcg_ctx, R_ESP); - gen_op_addl_A0_im(tcg_ctx, ((unsigned int)(-8)) << s->dflag); - if (!s->ss32) - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); - if (s->addseg) - gen_op_addl_A0_seg(s, R_SS); - for(i = 0;i < 8; i++) { - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], 7 - i); - gen_op_st_v(s, s->dflag, *cpu_T[0], cpu_A0); - gen_op_addl_A0_im(tcg_ctx, 1 << s->dflag); - } - gen_op_mov_reg_v(tcg_ctx, MO_16 + s->ss32, R_ESP, *cpu_T[1]); -} - -/* NOTE: wrap around in 16 bit not fully handled */ -static void gen_popa(DisasContext *s) -{ - int i; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - gen_op_movl_A0_reg(tcg_ctx, R_ESP); - if (!s->ss32) - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); - tcg_gen_addi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], 8 << s->dflag); - if (s->addseg) - gen_op_addl_A0_seg(s, R_SS); - for(i = 0;i < 8; i++) { - /* ESP is not reloaded */ - if (i != 3) { - gen_op_ld_v(s, s->dflag, *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, s->dflag, 7 - i, *cpu_T[0]); - } - gen_op_addl_A0_im(tcg_ctx, 1 << s->dflag); - } - gen_op_mov_reg_v(tcg_ctx, MO_16 + s->ss32, R_ESP, *cpu_T[1]); -} - -static void gen_enter(DisasContext *s, int esp_addend, int level) -{ - TCGMemOp ot = mo_pushpop(s, s->dflag); - int opsize = 1 << ot; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - - level &= 0x1f; -#ifdef TARGET_X86_64 - if (CODE64(s)) { - gen_op_movl_A0_reg(tcg_ctx, R_ESP); - gen_op_addq_A0_im(tcg_ctx, -opsize); - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); - - /* push bp */ - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EBP); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - if (level) { - /* XXX: must save state */ - gen_helper_enter64_level(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, level), - tcg_const_i32(tcg_ctx, (ot == MO_64)), - *cpu_T[1]); - } - gen_op_mov_reg_v(tcg_ctx, ot, R_EBP, *cpu_T[1]); - tcg_gen_addi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], -esp_addend + (-opsize * level)); - gen_op_mov_reg_v(tcg_ctx, MO_64, R_ESP, *cpu_T[1]); - } else -#endif - { - gen_op_movl_A0_reg(tcg_ctx, R_ESP); - gen_op_addl_A0_im(tcg_ctx, -opsize); - if (!s->ss32) - tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); - if (s->addseg) - gen_op_addl_A0_seg(s, R_SS); - /* push bp */ - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EBP); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - if (level) { - /* XXX: must save state */ - gen_helper_enter_level(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, level), - tcg_const_i32(tcg_ctx, s->dflag - 1), - *cpu_T[1]); - } - gen_op_mov_reg_v(tcg_ctx, ot, R_EBP, *cpu_T[1]); - tcg_gen_addi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], -esp_addend + (-opsize * level)); - gen_op_mov_reg_v(tcg_ctx, MO_16 + s->ss32, R_ESP, *cpu_T[1]); - } -} - -static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, trapno)); - s->is_jmp = DISAS_TB_JUMP; -} - -/* an interrupt is different from an exception because of the - privilege checks */ -static void gen_interrupt(DisasContext *s, int intno, - target_ulong cur_eip, target_ulong next_eip) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - gen_update_cc_op(s); - // Unicorn: skip to the next instruction after our interrupt callback - gen_jmp_im(s, cur_eip); - gen_helper_raise_interrupt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, intno), - tcg_const_i32(tcg_ctx, next_eip - cur_eip)); - s->is_jmp = DISAS_TB_JUMP; -} - -static void gen_debug(DisasContext *s, target_ulong cur_eip) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - gen_update_cc_op(s); - gen_jmp_im(s, cur_eip); - gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); - s->is_jmp = DISAS_TB_JUMP; -} - -/* generate a generic end of block. Trace exception is also generated - if needed */ -static void gen_eob(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - - gen_update_cc_op(s); // qq - if (s->tb->flags & HF_INHIBIT_IRQ_MASK) { - gen_helper_reset_inhibit_irq(tcg_ctx, tcg_ctx->cpu_env); - } - if (s->tb->flags & HF_RF_MASK) { - gen_helper_reset_rf(tcg_ctx, tcg_ctx->cpu_env); - } - if (s->singlestep_enabled) { - gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); - } else if (s->tf) { - gen_helper_single_step(tcg_ctx, tcg_ctx->cpu_env); - } else { - tcg_gen_exit_tb(s->uc->tcg_ctx, 0); - } - s->is_jmp = DISAS_TB_JUMP; -} - -/* generate a jump to eip. No segment change must happen before as a - direct call to the next block may occur */ -static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) -{ - gen_update_cc_op(s); - set_cc_op(s, CC_OP_DYNAMIC); - if (s->jmp_opt) { - gen_goto_tb(s, tb_num, eip); - s->is_jmp = DISAS_TB_JUMP; - } else { - gen_jmp_im(s, eip); - gen_eob(s); - } -} - -static void gen_jmp(DisasContext *s, target_ulong eip) -{ - gen_jmp_tb(s, eip, 0); -} - -static inline void gen_ldq_env_A0(DisasContext *s, int offset) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); - tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset); -} - -static inline void gen_stq_env_A0(DisasContext *s, int offset) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - - tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); -} - -static inline void gen_ldo_env_A0(DisasContext *s, int offset) -{ - int mem_index = s->mem_index; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); - tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); - tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_A0, 8); - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); - tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); -} - -static inline void gen_sto_env_A0(DisasContext *s, int offset) -{ - int mem_index = s->mem_index; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - - tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); - tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_A0, 8); - tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); -} - -static inline void gen_op_movo(TCGContext *s, int d_offset, int s_offset) -{ - TCGv_i64 cpu_tmp1_i64 = s->cpu_tmp1_i64; - - tcg_gen_ld_i64(s, cpu_tmp1_i64, s->cpu_env, s_offset); - tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset); - tcg_gen_ld_i64(s, cpu_tmp1_i64, s->cpu_env, s_offset + 8); - tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset + 8); -} - -static inline void gen_op_movq(TCGContext *s, int d_offset, int s_offset) -{ - TCGv_i64 cpu_tmp1_i64 = s->cpu_tmp1_i64; - - tcg_gen_ld_i64(s, cpu_tmp1_i64, s->cpu_env, s_offset); - tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset); -} - -static inline void gen_op_movl(TCGContext *s, int d_offset, int s_offset) -{ - tcg_gen_ld_i32(s, s->cpu_tmp2_i32, s->cpu_env, s_offset); - tcg_gen_st_i32(s, s->cpu_tmp2_i32, s->cpu_env, d_offset); -} - -static inline void gen_op_movq_env_0(TCGContext *s, int d_offset) -{ - TCGv_i64 cpu_tmp1_i64 = s->cpu_tmp1_i64; - - tcg_gen_movi_i64(s, cpu_tmp1_i64, 0); - tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset); -} - -typedef void (*SSEFunc_i_ep)(TCGContext *s, TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); -typedef void (*SSEFunc_l_ep)(TCGContext *s, TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); -typedef void (*SSEFunc_0_epi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val); -typedef void (*SSEFunc_0_epl)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val); -typedef void (*SSEFunc_0_epp)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); -typedef void (*SSEFunc_0_eppi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, - TCGv_i32 val); -typedef void (*SSEFunc_0_ppi)(TCGContext *s, TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); -typedef void (*SSEFunc_0_eppt)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, - TCGv val); - -#define SSE_SPECIAL ((void *)1) -#define SSE_DUMMY ((void *)2) - -#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } -#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ - gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } - -static const SSEFunc_0_epp sse_op_table1[256][4] = { - // filler: 0x00 - 0x0e - {0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, - - /* 3DNow! extensions */ - { SSE_DUMMY }, /* femms */ - { SSE_DUMMY }, /* pf. . . */ - - /* pure SSE operations */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ - { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ - { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm }, - { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm }, - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ - { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ - - // filler: 0x18 - 0x27 - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - - /* pure SSE operations */ - { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ - { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ - { gen_helper_ucomiss, gen_helper_ucomisd }, - { gen_helper_comiss, gen_helper_comisd }, - - // filler: 0x30 - 0x37 - {0},{0},{0},{0},{0},{0},{0},{0}, - - /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, - {0}, // filler: 0x39 - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, - - // filler: 0x3b - 0x4f - {0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - - /* pure SSE operations */ - { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ - SSE_FOP(sqrt), - { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL }, - { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL }, - { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ - { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ - { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ - { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ - SSE_FOP(add), - SSE_FOP(mul), - { gen_helper_cvtps2pd, gen_helper_cvtpd2ps, - gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, - { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq }, - SSE_FOP(sub), - SSE_FOP(min), - SSE_FOP(div), - SSE_FOP(max), - - /* MMX ops and their SSE extensions */ - MMX_OP2(punpcklbw), - MMX_OP2(punpcklwd), - MMX_OP2(punpckldq), - MMX_OP2(packsswb), - MMX_OP2(pcmpgtb), - MMX_OP2(pcmpgtw), - MMX_OP2(pcmpgtl), - MMX_OP2(packuswb), - MMX_OP2(punpckhbw), - MMX_OP2(punpckhwd), - MMX_OP2(punpckhdq), - MMX_OP2(packssdw), - { NULL, gen_helper_punpcklqdq_xmm }, - { NULL, gen_helper_punpckhqdq_xmm }, - { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ - { (SSEFunc_0_epp)gen_helper_pshufw_mmx, - (SSEFunc_0_epp)gen_helper_pshufd_xmm, - (SSEFunc_0_epp)gen_helper_pshufhw_xmm, - (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */ - { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ - { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ - { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ - MMX_OP2(pcmpeqb), - MMX_OP2(pcmpeqw), - MMX_OP2(pcmpeql), - { SSE_DUMMY }, /* emms */ - { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */ - { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r }, - {0},{0}, // filler: 0x7a - 0x7b - { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, - { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ - { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ - - // filler: 0x80 - 0xc1 - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0}, - - SSE_FOP(cmpeq), - - // filler: 0xc3 - {0}, - - /* MMX ops and their SSE extensions */ - { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ - { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ - - { (SSEFunc_0_epp)gen_helper_shufps, - (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */ - - // filler: 0xc7 - 0xcf - {0}, {0},{0},{0},{0},{0},{0},{0},{0}, - - /* MMX ops and their SSE extensions */ - { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps }, - MMX_OP2(psrlw), - MMX_OP2(psrld), - MMX_OP2(psrlq), - MMX_OP2(paddq), - MMX_OP2(pmullw), - { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, - { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */ - MMX_OP2(psubusb), - MMX_OP2(psubusw), - MMX_OP2(pminub), - MMX_OP2(pand), - MMX_OP2(paddusb), - MMX_OP2(paddusw), - MMX_OP2(pmaxub), - MMX_OP2(pandn), - MMX_OP2(pavgb), - MMX_OP2(psraw), - MMX_OP2(psrad), - MMX_OP2(pavgw), - MMX_OP2(pmulhuw), - MMX_OP2(pmulhw), - { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, - { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ - MMX_OP2(psubsb), - MMX_OP2(psubsw), - MMX_OP2(pminsw), - MMX_OP2(por), - MMX_OP2(paddsb), - MMX_OP2(paddsw), - MMX_OP2(pmaxsw), - MMX_OP2(pxor), - { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */ - MMX_OP2(psllw), - MMX_OP2(pslld), - MMX_OP2(psllq), - MMX_OP2(pmuludq), - MMX_OP2(pmaddwd), - MMX_OP2(psadbw), - { (SSEFunc_0_epp)gen_helper_maskmov_mmx, - (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */ - MMX_OP2(psubb), - MMX_OP2(psubw), - MMX_OP2(psubl), - MMX_OP2(psubq), - MMX_OP2(paddb), - MMX_OP2(paddw), - MMX_OP2(paddl), - - // filler: 0xff - {0}, -}; - -static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = { -#ifdef _MSC_VER - {0},{0}, - MMX_OP2(psrlw), - {0}, - MMX_OP2(psraw), - {0}, - MMX_OP2(psllw), - {0},{0},{0}, - MMX_OP2(psrld), - {0}, - MMX_OP2(psrad), - {0}, - MMX_OP2(pslld), - {0},{0},{0}, - MMX_OP2(psrlq), - { NULL, gen_helper_psrldq_xmm }, - {0},{0}, - MMX_OP2(psllq), - { NULL, gen_helper_pslldq_xmm }, -#else - [0 + 2] = MMX_OP2(psrlw), - [0 + 4] = MMX_OP2(psraw), - [0 + 6] = MMX_OP2(psllw), - [8 + 2] = MMX_OP2(psrld), - [8 + 4] = MMX_OP2(psrad), - [8 + 6] = MMX_OP2(pslld), - [16 + 2] = MMX_OP2(psrlq), - [16 + 3] = { NULL, gen_helper_psrldq_xmm }, - [16 + 6] = MMX_OP2(psllq), - [16 + 7] = { NULL, gen_helper_pslldq_xmm }, -#endif -}; - -static const SSEFunc_0_epi sse_op_table3ai[] = { - gen_helper_cvtsi2ss, - gen_helper_cvtsi2sd -}; - -#ifdef TARGET_X86_64 -static const SSEFunc_0_epl sse_op_table3aq[] = { - gen_helper_cvtsq2ss, - gen_helper_cvtsq2sd -}; -#endif - -static const SSEFunc_i_ep sse_op_table3bi[] = { - gen_helper_cvttss2si, - gen_helper_cvtss2si, - gen_helper_cvttsd2si, - gen_helper_cvtsd2si -}; - -#ifdef TARGET_X86_64 -static const SSEFunc_l_ep sse_op_table3bq[] = { - gen_helper_cvttss2sq, - gen_helper_cvtss2sq, - gen_helper_cvttsd2sq, - gen_helper_cvtsd2sq -}; -#endif - -static const SSEFunc_0_epp sse_op_table4[8][4] = { - SSE_FOP(cmpeq), - SSE_FOP(cmplt), - SSE_FOP(cmple), - SSE_FOP(cmpunord), - SSE_FOP(cmpneq), - SSE_FOP(cmpnlt), - SSE_FOP(cmpnle), - SSE_FOP(cmpord), -}; - -static const SSEFunc_0_epp sse_op_table5[256] = { -#ifdef _MSC_VER - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0}, // filler: 0x00 - 0x0b - gen_helper_pi2fw, - gen_helper_pi2fd, - {0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0}, // filler: 0x0e - 0x01b - gen_helper_pf2iw, - gen_helper_pf2id, - // filler: 0x1e - 0x89 - {0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0}, - gen_helper_pfnacc, - {0},{0},{0}, // filler: 0x8b - 0x8d - gen_helper_pfpnacc, - {0}, // filler: 0x8f - gen_helper_pfcmpge, - {0},{0},{0}, // filler: 0x91 - 0x93 - gen_helper_pfmin, - {0}, // filler: 0x95 - gen_helper_pfrcp, - gen_helper_pfrsqrt, - {0},{0}, // filler: 0x98 - 0x99 - gen_helper_pfsub, - {0},{0},{0}, // filler: 0x9b - 0x9d - gen_helper_pfadd, - {0}, // filler: 0x9f - gen_helper_pfcmpgt, - {0},{0},{0}, // filler: 0xa1 - 0xa3 - gen_helper_pfmax, - {0}, // filler: 0xa5 - gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ - gen_helper_movq, /* pfrsqit1 */ - {0},{0}, // filler: 0xa8 - 0xa9 - gen_helper_pfsubr, - {0},{0},{0}, // filler: 0xab - 0xad - gen_helper_pfacc, - {0}, // filler: 0xaf - gen_helper_pfcmpeq, - {0},{0},{0}, // filler: 0xb1 - 0xb3 - gen_helper_pfmul, - {0}, // filler: 0xb5 - gen_helper_movq, /* pfrcpit2 */ - gen_helper_pmulhrw_mmx, - {0},{0},{0}, // filler: 0xb8 - 0xba - gen_helper_pswapd, - {0},{0},{0}, // filler: 0xbc - 0xbe - gen_helper_pavgb_mmx, /* pavgusb */ - // filler: 0xc0 - 0xff - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, - {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, -#else - [0x0c] = gen_helper_pi2fw, - [0x0d] = gen_helper_pi2fd, - [0x1c] = gen_helper_pf2iw, - [0x1d] = gen_helper_pf2id, - [0x8a] = gen_helper_pfnacc, - [0x8e] = gen_helper_pfpnacc, - [0x90] = gen_helper_pfcmpge, - [0x94] = gen_helper_pfmin, - [0x96] = gen_helper_pfrcp, - [0x97] = gen_helper_pfrsqrt, - [0x9a] = gen_helper_pfsub, - [0x9e] = gen_helper_pfadd, - [0xa0] = gen_helper_pfcmpgt, - [0xa4] = gen_helper_pfmax, - [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ - [0xa7] = gen_helper_movq, /* pfrsqit1 */ - [0xaa] = gen_helper_pfsubr, - [0xae] = gen_helper_pfacc, - [0xb0] = gen_helper_pfcmpeq, - [0xb4] = gen_helper_pfmul, - [0xb6] = gen_helper_movq, /* pfrcpit2 */ - [0xb7] = gen_helper_pmulhrw_mmx, - [0xbb] = gen_helper_pswapd, - [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ -#endif -}; - -struct SSEOpHelper_epp { - SSEFunc_0_epp op[2]; - uint32_t ext_mask; -}; - -struct SSEOpHelper_eppi { - SSEFunc_0_eppi op[2]; - uint32_t ext_mask; -}; - -#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } -#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } -#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } -#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } -#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \ - CPUID_EXT_PCLMULQDQ } -#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES } - -static const struct SSEOpHelper_epp sse_op_table6[256] = { - SSSE3_OP(pshufb), - SSSE3_OP(phaddw), - SSSE3_OP(phaddd), - SSSE3_OP(phaddsw), - SSSE3_OP(pmaddubsw), - SSSE3_OP(phsubw), - SSSE3_OP(phsubd), - SSSE3_OP(phsubsw), - SSSE3_OP(psignb), - SSSE3_OP(psignw), - SSSE3_OP(psignd), - SSSE3_OP(pmulhrsw), - {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x0c - 0x0f - SSE41_OP(pblendvb), - {{0},0},{{0},0},{{0},0}, // filler: 0x11 - 0x13 - SSE41_OP(blendvps), - SSE41_OP(blendvpd), - {{0},0}, // filler: 0x16 - SSE41_OP(ptest), - {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x18 - 0x1b - SSSE3_OP(pabsb), - SSSE3_OP(pabsw), - SSSE3_OP(pabsd), - {{0},0}, // filler: 0x1f - SSE41_OP(pmovsxbw), - SSE41_OP(pmovsxbd), - SSE41_OP(pmovsxbq), - SSE41_OP(pmovsxwd), - SSE41_OP(pmovsxwq), - SSE41_OP(pmovsxdq), - {{0},0},{{0},0}, // filler: 0x26 - 0x27 - SSE41_OP(pmuldq), - SSE41_OP(pcmpeqq), - SSE41_SPECIAL, /* movntqda */ - SSE41_OP(packusdw), - {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x2c - 0x2f - SSE41_OP(pmovzxbw), - SSE41_OP(pmovzxbd), - SSE41_OP(pmovzxbq), - SSE41_OP(pmovzxwd), - SSE41_OP(pmovzxwq), - SSE41_OP(pmovzxdq), - {{0},0}, // filler: 0x36 - SSE42_OP(pcmpgtq), - SSE41_OP(pminsb), - SSE41_OP(pminsd), - SSE41_OP(pminuw), - SSE41_OP(pminud), - SSE41_OP(pmaxsb), - SSE41_OP(pmaxsd), - SSE41_OP(pmaxuw), - SSE41_OP(pmaxud), - SSE41_OP(pmulld), - SSE41_OP(phminposuw), - // filler: 0x42 - 0xda - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0}, - AESNI_OP(aesimc), - AESNI_OP(aesenc), - AESNI_OP(aesenclast), - AESNI_OP(aesdec), - AESNI_OP(aesdeclast), - // filler: 0xe0 - 0xff - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, -}; - -static const struct SSEOpHelper_eppi sse_op_table7[256] = { -#ifdef _MSC_VER - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x00 - 0x07 - SSE41_OP(roundps), - SSE41_OP(roundpd), - SSE41_OP(roundss), - SSE41_OP(roundsd), - SSE41_OP(blendps), - SSE41_OP(blendpd), - SSE41_OP(pblendw), - SSSE3_OP(palignr), - {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x10 - 0x13 - SSE41_SPECIAL, /* pextrb */ - SSE41_SPECIAL, /* pextrw */ - SSE41_SPECIAL, /* pextrd/pextrq */ - SSE41_SPECIAL, /* extractps */ - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x18 - 0x1f - SSE41_SPECIAL, /* pinsrb */ - SSE41_SPECIAL, /* insertps */ - SSE41_SPECIAL, /* pinsrd/pinsrq */ - // filler: 0x23 - 0x3f - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - SSE41_OP(dpps), - SSE41_OP(dppd), - SSE41_OP(mpsadbw), - {{0},0}, // filler: 0x43 - PCLMULQDQ_OP(pclmulqdq), - // filler: 0x45 - 0x5f - {{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - SSE42_OP(pcmpestrm), - SSE42_OP(pcmpestri), - SSE42_OP(pcmpistrm), - SSE42_OP(pcmpistri), - // filler: 0x64 - 0xde - {{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - AESNI_OP(aeskeygenassist), - // filler: 0xe0 - 0xff - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, - {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, -#else - [0x08] = SSE41_OP(roundps), - [0x09] = SSE41_OP(roundpd), - [0x0a] = SSE41_OP(roundss), - [0x0b] = SSE41_OP(roundsd), - [0x0c] = SSE41_OP(blendps), - [0x0d] = SSE41_OP(blendpd), - [0x0e] = SSE41_OP(pblendw), - [0x0f] = SSSE3_OP(palignr), - [0x14] = SSE41_SPECIAL, /* pextrb */ - [0x15] = SSE41_SPECIAL, /* pextrw */ - [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */ - [0x17] = SSE41_SPECIAL, /* extractps */ - [0x20] = SSE41_SPECIAL, /* pinsrb */ - [0x21] = SSE41_SPECIAL, /* insertps */ - [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */ - [0x40] = SSE41_OP(dpps), - [0x41] = SSE41_OP(dppd), - [0x42] = SSE41_OP(mpsadbw), - [0x44] = PCLMULQDQ_OP(pclmulqdq), - [0x60] = SSE42_OP(pcmpestrm), - [0x61] = SSE42_OP(pcmpestri), - [0x62] = SSE42_OP(pcmpistrm), - [0x63] = SSE42_OP(pcmpistri), - [0xdf] = AESNI_OP(aeskeygenassist), -#endif -}; - -static void gen_sse(CPUX86State *env, DisasContext *s, int b, - target_ulong pc_start, int rex_r) -{ - int b1, op1_offset, op2_offset, is_xmm, val; - int modrm, mod, rm, reg; - SSEFunc_0_epp sse_fn_epp; - SSEFunc_0_eppi sse_fn_eppi; - SSEFunc_0_ppi sse_fn_ppi; - SSEFunc_0_eppt sse_fn_eppt; - TCGMemOp ot; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_ptr cpu_env = tcg_ctx->cpu_env; - TCGv_ptr cpu_ptr0 = tcg_ctx->cpu_ptr0; - TCGv_ptr cpu_ptr1 = tcg_ctx->cpu_ptr1; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; - TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - - b &= 0xff; - if (s->prefix & PREFIX_DATA) - b1 = 1; - else if (s->prefix & PREFIX_REPZ) - b1 = 2; - else if (s->prefix & PREFIX_REPNZ) - b1 = 3; - else - b1 = 0; - sse_fn_epp = sse_op_table1[b][b1]; - if (!sse_fn_epp) { - goto illegal_op; - } - if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { - is_xmm = 1; - } else { - if (b1 == 0) { - /* MMX case */ - is_xmm = 0; - } else { - is_xmm = 1; - } - } - /* simple MMX/SSE operation */ - if (s->flags & HF_TS_MASK) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); - return; - } - if (s->flags & HF_EM_MASK) { - illegal_op: - gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); - return; - } - if (is_xmm && !(s->flags & HF_OSFXSR_MASK)) - if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA)) - goto illegal_op; - if (b == 0x0e) { - if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) - goto illegal_op; - /* femms */ - gen_helper_emms(tcg_ctx, cpu_env); - return; - } - if (b == 0x77) { - /* emms */ - gen_helper_emms(tcg_ctx, cpu_env); - return; - } - /* prepare MMX state (XXX: optimize by storing fptt and fptags in - the static cpu state) */ - if (!is_xmm) { - gen_helper_enter_mmx(tcg_ctx, cpu_env); - } - - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7); - if (is_xmm) - reg |= rex_r; - mod = (modrm >> 6) & 3; - if (sse_fn_epp == SSE_SPECIAL) { - b |= (b1 << 8); - switch(b) { - case 0x0e7: /* movntq */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); - break; - case 0x1e7: /* movntdq */ - case 0x02b: /* movntps */ - case 0x12b: /* movntps */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - break; - case 0x3f0: /* lddqu */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - break; - case 0x22b: /* movntss */ - case 0x32b: /* movntsd */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - if (b1 & 1) { - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - } else { - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_L(0))); - gen_op_st_v(s, MO_32, *cpu_T[0], cpu_A0); - } - break; - case 0x6e: /* movd mm, ea */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); - tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); - } else -#endif - { - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx)); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_movl_mm_T0_mmx(tcg_ctx, cpu_ptr0, cpu_tmp2_i32); - } - break; - case 0x16e: /* movd xmm, ea */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg])); - gen_helper_movq_mm_T0_xmm(tcg_ctx, cpu_ptr0, *cpu_T[0]); - } else -#endif - { - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg])); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_movl_mm_T0_xmm(tcg_ctx, cpu_ptr0, cpu_tmp2_i32); - } - break; - case 0x6f: /* movq mm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); - } else { - rm = (modrm & 7); - tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, - offsetof(CPUX86State,fpregs[rm].mmx)); - tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx)); - } - break; - case 0x010: /* movups */ - case 0x110: /* movupd */ - case 0x028: /* movaps */ - case 0x128: /* movapd */ - case 0x16f: /* movdqa xmm, ea */ - case 0x26f: /* movdqu xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movo(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg]), - offsetof(CPUX86State,xmm_regs[rm])); - } - break; - case 0x210: /* movss xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32, *cpu_T[0], cpu_A0); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), - offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); - } - break; - case 0x310: /* movsd xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); - } - break; - case 0x012: /* movlps */ - case 0x112: /* movlpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - } else { - /* movhlps */ - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); - } - break; - case 0x212: /* movsldup */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), - offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), - offsetof(CPUX86State,xmm_regs[rm].XMM_L(2))); - } - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), - offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), - offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); - break; - case 0x312: /* movddup */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); - } - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), - offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); - break; - case 0x016: /* movhps */ - case 0x116: /* movhpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(1))); - } else { - /* movlhps */ - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), - offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); - } - break; - case 0x216: /* movshdup */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), - offsetof(CPUX86State,xmm_regs[rm].XMM_L(1))); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), - offsetof(CPUX86State,xmm_regs[rm].XMM_L(3))); - } - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), - offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), - offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); - break; - case 0x178: - case 0x378: - { - int bit_index, field_length; - - if (b1 == 1 && reg != 0) - goto illegal_op; - field_length = cpu_ldub_code(env, s->pc++) & 0x3F; - bit_index = cpu_ldub_code(env, s->pc++) & 0x3F; - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[reg])); - if (b1 == 1) - gen_helper_extrq_i(tcg_ctx, cpu_env, cpu_ptr0, - tcg_const_i32(tcg_ctx, bit_index), - tcg_const_i32(tcg_ctx, field_length)); - else - gen_helper_insertq_i(tcg_ctx, cpu_env, cpu_ptr0, - tcg_const_i32(tcg_ctx, bit_index), - tcg_const_i32(tcg_ctx, field_length)); - } - break; - case 0x7e: /* movd ea, mm */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - tcg_gen_ld_i64(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx)); - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); - } else -#endif - { - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); - } - break; - case 0x17e: /* movd ea, xmm */ -#ifdef TARGET_X86_64 - if (s->dflag == MO_64) { - tcg_gen_ld_i64(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); - gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); - } else -#endif - { - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); - gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); - } - break; - case 0x27e: /* movq xmm, ea */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), - offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); - } - gen_op_movq_env_0(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); - break; - case 0x7f: /* movq ea, mm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); - } else { - rm = (modrm & 7); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,fpregs[rm].mmx), - offsetof(CPUX86State,fpregs[reg].mmx)); - } - break; - case 0x011: /* movups */ - case 0x111: /* movupd */ - case 0x029: /* movaps */ - case 0x129: /* movapd */ - case 0x17f: /* movdqa ea, xmm */ - case 0x27f: /* movdqu ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movo(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm]), - offsetof(CPUX86State,xmm_regs[reg])); - } - break; - case 0x211: /* movss ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); - gen_op_st_v(s, MO_32, *cpu_T[0], cpu_A0); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)), - offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); - } - break; - case 0x311: /* movsd ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), - offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); - } - break; - case 0x013: /* movlps */ - case 0x113: /* movlpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - } else { - goto illegal_op; - } - break; - case 0x017: /* movhps */ - case 0x117: /* movhpd */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(1))); - } else { - goto illegal_op; - } - break; - case 0x71: /* shift mm, im */ - case 0x72: - case 0x73: - case 0x171: /* shift xmm, im */ - case 0x172: - case 0x173: - if (b1 >= 2) { - goto illegal_op; - } - val = cpu_ldub_code(env, s->pc++); - if (is_xmm) { - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1))); - op1_offset = offsetof(CPUX86State,xmm_t0); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); - op1_offset = offsetof(CPUX86State,mmx_t0); - } - sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + - (((modrm >> 3)) & 7)][b1]; - if (!sse_fn_epp) { - goto illegal_op; - } - if (is_xmm) { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } else { - rm = (modrm & 7); - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op2_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op1_offset); - sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - case 0x050: /* movmskps */ - rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskps(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); - break; - case 0x150: /* movmskpd */ - rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, - offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_movmskpd(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); - break; - case 0x02a: /* cvtpi2ps */ - case 0x12a: /* cvtpi2pd */ - gen_helper_enter_mmx(tcg_ctx, cpu_env); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_ldq_env_A0(s, op2_offset); - } else { - rm = (modrm & 7); - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - switch(b >> 8) { - case 0x0: - gen_helper_cvtpi2ps(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - default: - case 0x1: - gen_helper_cvtpi2pd(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - } - break; - case 0x22a: /* cvtsi2ss */ - case 0x32a: /* cvtsi2sd */ - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - if (ot == MO_32) { - SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - sse_fn_epi(tcg_ctx, cpu_env, cpu_ptr0, cpu_tmp2_i32); - } else { -#ifdef TARGET_X86_64 - SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; - sse_fn_epl(tcg_ctx, cpu_env, cpu_ptr0, *cpu_T[0]); -#else - goto illegal_op; -#endif - } - break; - case 0x02c: /* cvttps2pi */ - case 0x12c: /* cvttpd2pi */ - case 0x02d: /* cvtps2pi */ - case 0x12d: /* cvtpd2pi */ - gen_helper_enter_mmx(tcg_ctx, cpu_env); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,xmm_t0); - gen_ldo_env_A0(s, op2_offset); - } else { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } - op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - switch(b) { - case 0x02c: - gen_helper_cvttps2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - case 0x12c: - gen_helper_cvttpd2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - case 0x02d: - gen_helper_cvtps2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - case 0x12d: - gen_helper_cvtpd2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - } - break; - case 0x22c: /* cvttss2si */ - case 0x32c: /* cvttsd2si */ - case 0x22d: /* cvtss2si */ - case 0x32d: /* cvtsd2si */ - ot = mo_64_32(s->dflag); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - if ((b >> 8) & 1) { - gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0))); - } else { - gen_op_ld_v(s, MO_32, *cpu_T[0], cpu_A0); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); - } - op2_offset = offsetof(CPUX86State,xmm_t0); - } else { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op2_offset); - if (ot == MO_32) { - SSEFunc_i_ep sse_fn_i_ep = - sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; - sse_fn_i_ep(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); - } else { -#ifdef TARGET_X86_64 - SSEFunc_l_ep sse_fn_l_ep = - sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; - sse_fn_l_ep(tcg_ctx, *cpu_T[0], cpu_env, cpu_ptr0); -#else - goto illegal_op; -#endif - } - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - case 0xc4: /* pinsrw */ - case 0x1c4: - s->rip_offset = 1; - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - val = cpu_ldub_code(env, s->pc++); - if (b1) { - val &= 7; - tcg_gen_st16_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,xmm_regs[reg].XMM_W(val))); - } else { - val &= 3; - tcg_gen_st16_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); - } - break; - case 0xc5: /* pextrw */ - case 0x1c5: - if (mod != 3) - goto illegal_op; - ot = mo_64_32(s->dflag); - val = cpu_ldub_code(env, s->pc++); - if (b1) { - val &= 7; - rm = (modrm & 7) | REX_B(s); - tcg_gen_ld16u_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,xmm_regs[rm].XMM_W(val))); - } else { - val &= 3; - rm = (modrm & 7); - tcg_gen_ld16u_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); - } - reg = ((modrm >> 3) & 7) | rex_r; - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - case 0x1d6: /* movq ea, xmm */ - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_stq_env_A0(s, offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(0))); - } else { - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), - offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); - gen_op_movq_env_0(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); - } - break; - case 0x2d6: /* movq2dq */ - gen_helper_enter_mmx(tcg_ctx, cpu_env); - rm = (modrm & 7); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), - offsetof(CPUX86State,fpregs[rm].mmx)); - gen_op_movq_env_0(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); - break; - case 0x3d6: /* movdq2q */ - gen_helper_enter_mmx(tcg_ctx, cpu_env); - rm = (modrm & 7) | REX_B(s); - gen_op_movq(tcg_ctx, offsetof(CPUX86State,fpregs[reg & 7].mmx), - offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); - break; - case 0xd7: /* pmovmskb */ - case 0x1d7: - if (mod != 3) - goto illegal_op; - if (b1) { - rm = (modrm & 7) | REX_B(s); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); - gen_helper_pmovmskb_xmm(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); - } else { - rm = (modrm & 7); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); - gen_helper_pmovmskb_mmx(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); - } - reg = ((modrm >> 3) & 7) | rex_r; - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); - break; - - case 0x138: - case 0x038: - b = modrm; - if ((b & 0xf0) == 0xf0) { - goto do_0f_38_fx; - } - modrm = cpu_ldub_code(env, s->pc++); - rm = modrm & 7; - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - if (b1 >= 2) { - goto illegal_op; - } - - sse_fn_epp = sse_op_table6[b].op[b1]; - if (!sse_fn_epp) { - goto illegal_op; - } - if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) - goto illegal_op; - - if (b1) { - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); - } else { - op2_offset = offsetof(CPUX86State,xmm_t0); - gen_lea_modrm(env, s, modrm); - switch (b) { - case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ - case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ - case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ - gen_ldq_env_A0(s, op2_offset + - offsetof(XMMReg, XMM_Q(0))); - break; - case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ - case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, op2_offset + - offsetof(XMMReg, XMM_L(0))); - break; - case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ - tcg_gen_qemu_ld_tl(s->uc, cpu_tmp0, cpu_A0, - s->mem_index, MO_LEUW); - tcg_gen_st16_tl(tcg_ctx, cpu_tmp0, cpu_env, op2_offset + - offsetof(XMMReg, XMM_W(0))); - break; - case 0x2a: /* movntqda */ - gen_ldo_env_A0(s, op1_offset); - return; - default: - gen_ldo_env_A0(s, op2_offset); - } - } - } else { - op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } else { - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, op2_offset); - } - } - if (sse_fn_epp == SSE_SPECIAL) { - goto illegal_op; - } - - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - - if (b == 0x17) { - set_cc_op(s, CC_OP_EFLAGS); - } - break; - - case 0x238: - case 0x338: - do_0f_38_fx: - /* Various integer extensions at 0f 38 f[0-f]. */ - b = modrm | (b1 << 8); - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - - switch (b) { - case 0x3f0: /* crc32 Gd,Eb */ - case 0x3f1: /* crc32 Gd,Ey */ - do_crc32: - if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) { - goto illegal_op; - } - if ((b & 0xff) == 0xf0) { - ot = MO_8; - } else if (s->dflag != MO_64) { - ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); - } else { - ot = MO_64; - } - - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[reg]); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_helper_crc32(tcg_ctx, *cpu_T[0], cpu_tmp2_i32, - *cpu_T[0], tcg_const_i32(tcg_ctx, 8 << ot)); - - ot = mo_64_32(s->dflag); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - - case 0x1f0: /* crc32 or movbe */ - case 0x1f1: - /* For these insns, the f3 prefix is supposed to have priority - over the 66 prefix, but that's not what we implement above - setting b1. */ - if (s->prefix & PREFIX_REPNZ) { - goto do_crc32; - } - /* FALLTHRU */ - case 0x0f0: /* movbe Gy,My */ - case 0x0f1: /* movbe My,Gy */ - if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) { - goto illegal_op; - } - if (s->dflag != MO_64) { - ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); - } else { - ot = MO_64; - } - - gen_lea_modrm(env, s, modrm); - if ((b & 1) == 0) { - tcg_gen_qemu_ld_tl(s->uc, *cpu_T[0], cpu_A0, - s->mem_index, ot | MO_BE); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - } else { - tcg_gen_qemu_st_tl(s->uc, *cpu_regs[reg], cpu_A0, - s->mem_index, ot | MO_BE); - } - break; - - case 0x0f2: /* andn Gy, By, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_andc_tl(tcg_ctx, *cpu_T[0], *cpu_regs[s->vex_v], *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - gen_op_update1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - - case 0x0f7: /* bextr Gy, Ey, By */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - { - TCGv bound, zero; - - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - /* Extract START, and shift the operand. - Shifts larger than operand size get zeros. */ - tcg_gen_ext8u_tl(tcg_ctx, cpu_A0, *cpu_regs[s->vex_v]); - tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_A0); - - bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); - zero = tcg_const_tl(tcg_ctx, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, *cpu_T[0], cpu_A0, bound, - *cpu_T[0], zero); - tcg_temp_free(tcg_ctx, zero); - - /* Extract the LEN into a mask. Lengths larger than - operand size get all ones. */ - tcg_gen_shri_tl(tcg_ctx, cpu_A0, *cpu_regs[s->vex_v], 8); - tcg_gen_ext8u_tl(tcg_ctx, cpu_A0, cpu_A0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, cpu_A0, cpu_A0, bound, - cpu_A0, bound); - tcg_temp_free(tcg_ctx, bound); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], 1); - tcg_gen_shl_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], cpu_A0); - tcg_gen_subi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], 1); - tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - gen_op_update1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - } - break; - - case 0x0f5: /* bzhi Gy, Ey, By */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); - { - TCGv bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); - /* Note that since we're using BMILG (in order to get O - cleared) we need to store the inverse into C. */ - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, cpu_cc_src, - *cpu_T[1], bound); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, *cpu_T[1], *cpu_T[1], - bound, bound, *cpu_T[1]); - tcg_temp_free(tcg_ctx, bound); - } - tcg_gen_movi_tl(tcg_ctx, cpu_A0, -1); - tcg_gen_shl_tl(tcg_ctx, cpu_A0, cpu_A0, *cpu_T[1]); - tcg_gen_andc_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - gen_op_update1_cc(tcg_ctx); - set_cc_op(s, CC_OP_BMILGB + ot); - break; - - case 0x3f6: /* mulx By, Gy, rdx, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - switch (ot) { - default: - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_regs[R_EDX]); - tcg_gen_mulu2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[s->vex_v], cpu_tmp2_i32); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp3_i32); - break; -#ifdef TARGET_X86_64 - case MO_64: - tcg_gen_mulu2_i64(tcg_ctx, *cpu_T[0], *cpu_T[1], - *cpu_T[0], *cpu_regs[R_EDX]); - tcg_gen_mov_i64(tcg_ctx, *cpu_regs[s->vex_v], *cpu_T[0]); - tcg_gen_mov_i64(tcg_ctx, *cpu_regs[reg], *cpu_T[1]); - break; -#endif - } - break; - - case 0x3f5: /* pdep Gy, By, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - /* Note that by zero-extending the mask operand, we - automatically handle zero-extending the result. */ - if (ot == MO_64) { - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); - } else { - tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); - } - gen_helper_pdep(tcg_ctx, *cpu_regs[reg], *cpu_T[0], *cpu_T[1]); - break; - - case 0x2f5: /* pext Gy, By, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - /* Note that by zero-extending the mask operand, we - automatically handle zero-extending the result. */ - if (ot == MO_64) { - tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); - } else { - tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); - } - gen_helper_pext(tcg_ctx, *cpu_regs[reg], *cpu_T[0], *cpu_T[1]); - break; - - case 0x1f6: /* adcx Gy, Ey */ - case 0x2f6: /* adox Gy, Ey */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) { - goto illegal_op; - } else { - TCGv carry_in, carry_out, zero; - int end_op; - - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - - /* Re-use the carry-out from a previous round. */ - TCGV_UNUSED(carry_in); - carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2); - switch (s->cc_op) { - case CC_OP_ADCX: - if (b == 0x1f6) { - carry_in = cpu_cc_dst; - end_op = CC_OP_ADCX; - } else { - end_op = CC_OP_ADCOX; - } - break; - case CC_OP_ADOX: - if (b == 0x1f6) { - end_op = CC_OP_ADCOX; - } else { - carry_in = cpu_cc_src2; - end_op = CC_OP_ADOX; - } - break; - case CC_OP_ADCOX: - end_op = CC_OP_ADCOX; - carry_in = carry_out; - break; - default: - end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX); - break; - } - /* If we can't reuse carry-out, get it out of EFLAGS. */ - if (TCGV_IS_UNUSED(carry_in)) { - if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { - gen_compute_eflags(s); - } - carry_in = cpu_tmp0; - tcg_gen_shri_tl(tcg_ctx, carry_in, cpu_cc_src, - ctz32(b == 0x1f6 ? CC_C : CC_O)); - tcg_gen_andi_tl(tcg_ctx, carry_in, carry_in, 1); - } - - switch (ot) { -#ifdef TARGET_X86_64 - case MO_32: - /* If we know TL is 64-bit, and we want a 32-bit - result, just do everything in 64-bit arithmetic. */ - tcg_gen_ext32u_i64(tcg_ctx, *cpu_regs[reg], *cpu_regs[reg]); - tcg_gen_ext32u_i64(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_add_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_regs[reg]); - tcg_gen_add_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], carry_in); - tcg_gen_ext32u_i64(tcg_ctx, *cpu_regs[reg], *cpu_T[0]); - tcg_gen_shri_i64(tcg_ctx, carry_out, *cpu_T[0], 32); - break; -#endif - default: - /* Otherwise compute the carry-out in two steps. */ - zero = tcg_const_tl(tcg_ctx, 0); - tcg_gen_add2_tl(tcg_ctx, *cpu_T[0], carry_out, - *cpu_T[0], zero, - carry_in, zero); - tcg_gen_add2_tl(tcg_ctx, *cpu_regs[reg], carry_out, - *cpu_regs[reg], carry_out, - *cpu_T[0], zero); - tcg_temp_free(tcg_ctx, zero); - break; - } - set_cc_op(s, end_op); - } - break; - - case 0x1f7: /* shlx Gy, Ey, By */ - case 0x2f7: /* sarx Gy, Ey, By */ - case 0x3f7: /* shrx Gy, Ey, By */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - if (ot == MO_64) { - tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v], 63); - } else { - tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v], 31); - } - if (b == 0x1f7) { - tcg_gen_shl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } else if (b == 0x2f7) { - if (ot != MO_64) { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - } - tcg_gen_sar_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } else { - if (ot != MO_64) { - tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - } - tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - } - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - - case 0x0f3: - case 0x1f3: - case 0x2f3: - case 0x3f3: /* Group 17 */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - - switch (reg & 7) { - case 1: /* blsr By,Ey */ - tcg_gen_neg_tl(tcg_ctx, *cpu_T[1], *cpu_T[0]); - tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_mov_reg_v(tcg_ctx, ot, s->vex_v, *cpu_T[0]); - gen_op_update2_cc(tcg_ctx); - set_cc_op(s, CC_OP_BMILGB + ot); - break; - - case 2: /* blsmsk By,Ey */ - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); - tcg_gen_subi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 1); - tcg_gen_xor_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_cc_src); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - set_cc_op(s, CC_OP_BMILGB + ot); - break; - - case 3: /* blsi By, Ey */ - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); - tcg_gen_subi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 1); - tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_cc_src); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - set_cc_op(s, CC_OP_BMILGB + ot); - break; - - default: - goto illegal_op; - } - break; - - default: - goto illegal_op; - } - break; - - case 0x03a: - case 0x13a: - b = modrm; - modrm = cpu_ldub_code(env, s->pc++); - rm = modrm & 7; - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - if (b1 >= 2) { - goto illegal_op; - } - - sse_fn_eppi = sse_op_table7[b].op[b1]; - if (!sse_fn_eppi) { - goto illegal_op; - } - if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) - goto illegal_op; - - if (sse_fn_eppi == SSE_SPECIAL) { - ot = mo_64_32(s->dflag); - rm = (modrm & 7) | REX_B(s); - if (mod != 3) - gen_lea_modrm(env, s, modrm); - reg = ((modrm >> 3) & 7) | rex_r; - val = cpu_ldub_code(env, s->pc++); - switch (b) { - case 0x14: /* pextrb */ - tcg_gen_ld8u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_B(val & 15))); - if (mod == 3) { - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } else { - tcg_gen_qemu_st_tl(s->uc, *cpu_T[0], cpu_A0, - s->mem_index, MO_UB); - } - break; - case 0x15: /* pextrw */ - tcg_gen_ld16u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_W(val & 7))); - if (mod == 3) { - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } else { - tcg_gen_qemu_st_tl(s->uc, *cpu_T[0], cpu_A0, - s->mem_index, MO_LEUW); - } - break; - case 0x16: - if (ot == MO_32) { /* pextrd */ - tcg_gen_ld_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].XMM_L(val & 3))); - if (mod == 3) { - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[rm], cpu_tmp2_i32); - } else { - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - } - } else { /* pextrq */ -#ifdef TARGET_X86_64 - tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(val & 1))); - if (mod == 3) { - tcg_gen_mov_i64(tcg_ctx, *cpu_regs[rm], cpu_tmp1_i64); - } else { - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, - s->mem_index, MO_LEQ); - } -#else - goto illegal_op; -#endif - } - break; - case 0x17: /* extractps */ - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_L(val & 3))); - if (mod == 3) { - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } else { - tcg_gen_qemu_st_tl(s->uc, *cpu_T[0], cpu_A0, - s->mem_index, MO_LEUL); - } - break; - case 0x20: /* pinsrb */ - if (mod == 3) { - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], rm); - } else { - tcg_gen_qemu_ld_tl(s->uc, *cpu_T[0], cpu_A0, - s->mem_index, MO_UB); - } - tcg_gen_st8_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_B(val & 15))); - break; - case 0x21: /* insertps */ - if (mod == 3) { - tcg_gen_ld_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, - offsetof(CPUX86State,xmm_regs[rm] - .XMM_L((val >> 6) & 3))); - } else { - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - } - tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, - offsetof(CPUX86State,xmm_regs[reg] - .XMM_L((val >> 4) & 3))); - if ((val >> 0) & 1) - tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_L(0))); - if ((val >> 1) & 1) - tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_L(1))); - if ((val >> 2) & 1) - tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_L(2))); - if ((val >> 3) & 1) - tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), - cpu_env, offsetof(CPUX86State, - xmm_regs[reg].XMM_L(3))); - break; - case 0x22: - if (ot == MO_32) { /* pinsrd */ - if (mod == 3) { - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[rm]); - } else { - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - } - tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].XMM_L(val & 3))); - } else { /* pinsrq */ -#ifdef TARGET_X86_64 - if (mod == 3) { - gen_op_mov_v_reg(tcg_ctx, ot, cpu_tmp1_i64, rm); - } else { - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, - s->mem_index, MO_LEQ); - } - tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, - offsetof(CPUX86State, - xmm_regs[reg].XMM_Q(val & 1))); -#else - goto illegal_op; -#endif - } - break; - } - return; - } - - if (b1) { - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); - } else { - op2_offset = offsetof(CPUX86State,xmm_t0); - gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, op2_offset); - } - } else { - op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); - if (mod == 3) { - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } else { - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_lea_modrm(env, s, modrm); - gen_ldq_env_A0(s, op2_offset); - } - } - val = cpu_ldub_code(env, s->pc++); - - if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ - set_cc_op(s, CC_OP_EFLAGS); - - if (s->dflag == MO_64) { - /* The helper must use entire 64-bit gp registers */ - val |= 1 << 8; - } - } - - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - sse_fn_eppi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(tcg_ctx, val)); - break; - - case 0x33a: - /* Various integer extensions at 0f 3a f[0-f]. */ - b = modrm | (b1 << 8); - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - - switch (b) { - case 0x3f0: /* rorx Gy,Ey, Ib */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) - || !(s->prefix & PREFIX_VEX) - || s->vex_l != 0) { - goto illegal_op; - } - ot = mo_64_32(s->dflag); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - b = cpu_ldub_code(env, s->pc++); - if (ot == MO_64) { - tcg_gen_rotri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], b & 63); - } else { - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - tcg_gen_rotri_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, b & 31); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); - } - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - - default: - goto illegal_op; - } - break; - - default: - goto illegal_op; - } - } else { - /* generic MMX or SSE operation */ - switch(b) { - case 0x70: /* pshufx insn */ - case 0xc6: /* pshufx insn */ - case 0xc2: /* compare insns */ - s->rip_offset = 1; - break; - default: - break; - } - if (is_xmm) { - op1_offset = offsetof(CPUX86State,xmm_regs[reg]); - if (mod != 3) { - int sz = 4; - - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,xmm_t0); - - if( (b >= 0x50 && b <= 0x5a) || - (b >= 0x5c && b <= 0x5f) || - b == 0xc2 ) { - /* Most sse scalar operations. */ - if (b1 == 2) { - sz = 2; - } else if (b1 == 3) { - sz = 3; - } - } else if( b == 0x2e || /* ucomis[sd] */ - b == 0x2f ) /* comis[sd] */ - { - if (b1 == 0) { - sz = 2; - } else { - sz = 3; - } - } - - switch (sz) { - case 2: - /* 32 bit access */ - gen_op_ld_v(s, MO_32, *cpu_T[0], cpu_A0); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,xmm_t0.XMM_L(0))); - break; - case 3: - /* 64 bit access */ - gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0))); - break; - default: - /* 128 bit access */ - gen_ldo_env_A0(s, op2_offset); - break; - } - } else { - rm = (modrm & 7) | REX_B(s); - op2_offset = offsetof(CPUX86State,xmm_regs[rm]); - } - } else { - op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,mmx_t0); - gen_ldq_env_A0(s, op2_offset); - } else { - rm = (modrm & 7); - op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); - } - } - switch(b) { - case 0x0f: /* 3DNow! data insns */ - if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) - goto illegal_op; - val = cpu_ldub_code(env, s->pc++); - sse_fn_epp = sse_op_table5[val]; - if (!sse_fn_epp) { - goto illegal_op; - } - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - case 0x70: /* pshufx insn */ - case 0xc6: /* pshufx insn */ - val = cpu_ldub_code(env, s->pc++); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - /* XXX: introduce a new table? */ - sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; - sse_fn_ppi(tcg_ctx, cpu_ptr0, cpu_ptr1, tcg_const_i32(tcg_ctx, val)); - break; - case 0xc2: - /* compare insns */ - val = cpu_ldub_code(env, s->pc++); - if (val >= 8) - goto illegal_op; - sse_fn_epp = sse_op_table4[val][b1]; - - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - case 0xf7: - /* maskmov : we must prepare A0 */ - if (mod != 3) - goto illegal_op; - tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EDI]); - gen_extu(tcg_ctx, s->aflag, cpu_A0); - gen_add_A0_ds_seg(s); - - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - /* XXX: introduce a new table? */ - sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; - sse_fn_eppt(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0); - break; - default: - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); - tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); - sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); - break; - } - if (b == 0x2e || b == 0x2f) { - set_cc_op(s, CC_OP_EFLAGS); - } - } -} - -// Unicorn: sync EFLAGS on demand -static void sync_eflags(DisasContext *s, TCGContext *tcg_ctx) -{ - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv_ptr cpu_env = tcg_ctx->cpu_env; - - gen_update_cc_op(s); - gen_helper_read_eflags(tcg_ctx, *cpu_T[0], cpu_env); - tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, eflags)); -} - -/* -static void restore_eflags(DisasContext *s, TCGContext *tcg_ctx) -{ - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv_ptr cpu_env = tcg_ctx->cpu_env; - - tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, eflags)); - gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], - tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); - set_cc_op(s, CC_OP_EFLAGS); -} -*/ - -/* convert one instruction. s->is_jmp is set if the translation must - be stopped. Return the next pc value */ -static target_ulong disas_insn(CPUX86State *env, DisasContext *s, - target_ulong pc_start) // qq -{ - int b, prefixes; - int shift; - TCGMemOp ot, aflag, dflag; - int modrm, reg, rm, mod, op, opreg, val; - target_ulong next_eip, tval; - int rex_w, rex_r; - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_ptr cpu_env = tcg_ctx->cpu_env; - TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; - TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; - TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; - TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; - TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; - TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; - TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; - TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; - TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; - TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; - TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; - TCGArg *save_opparam_ptr = tcg_ctx->gen_opparam_ptr; - bool cc_op_dirty = s->cc_op_dirty; - bool changed_cc_op = false; - - s->pc = pc_start; - s->prefix = 0; - - // end address tells us to stop emulation - if (s->pc == s->uc->addr_end) { - // imitate the HLT instruction - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_hlt(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); - s->is_jmp = DISAS_TB_JUMP; - return s->pc; - } - - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { - tcg_gen_debug_insn_start(tcg_ctx, pc_start); - } - - // Unicorn: trace this instruction on request - if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, pc_start)) { - if (s->last_cc_op != s->cc_op) { - sync_eflags(s, tcg_ctx); - s->last_cc_op = s->cc_op; - changed_cc_op = true; - } - gen_uc_tracecode(tcg_ctx, 0xf1f1f1f1, UC_HOOK_CODE_IDX, env->uc, pc_start); - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - } - - prefixes = 0; - s->override = -1; - rex_w = -1; - rex_r = 0; -#ifdef TARGET_X86_64 - s->rex_x = 0; - s->rex_b = 0; - s->uc = env->uc; - tcg_ctx->x86_64_hregs = 0; -#endif - s->rip_offset = 0; /* for relative ip address */ - s->vex_l = 0; - s->vex_v = 0; - next_byte: - b = cpu_ldub_code(env, s->pc); - s->pc++; - /* Collect prefixes. */ - switch (b) { - case 0xf3: - prefixes |= PREFIX_REPZ; - goto next_byte; - case 0xf2: - prefixes |= PREFIX_REPNZ; - goto next_byte; - case 0xf0: - prefixes |= PREFIX_LOCK; - goto next_byte; - case 0x2e: - s->override = R_CS; - goto next_byte; - case 0x36: - s->override = R_SS; - goto next_byte; - case 0x3e: - s->override = R_DS; - goto next_byte; - case 0x26: - s->override = R_ES; - goto next_byte; - case 0x64: - s->override = R_FS; - goto next_byte; - case 0x65: - s->override = R_GS; - goto next_byte; - case 0x66: - prefixes |= PREFIX_DATA; - goto next_byte; - case 0x67: - prefixes |= PREFIX_ADR; - goto next_byte; -#ifdef TARGET_X86_64 - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4a: - case 0x4b: - case 0x4c: - case 0x4d: - case 0x4e: - case 0x4f: - if (CODE64(s)) { - /* REX prefix */ - rex_w = (b >> 3) & 1; - rex_r = (b & 0x4) << 1; - s->rex_x = (b & 0x2) << 2; - REX_B(s) = (b & 0x1) << 3; - tcg_ctx->x86_64_hregs = 1; /* select uniform byte register addressing */ - goto next_byte; - } - break; -#endif - case 0xc5: /* 2-byte VEX */ - case 0xc4: /* 3-byte VEX */ - /* VEX prefixes cannot be used except in 32-bit mode. - Otherwise the instruction is LES or LDS. */ - if (s->code32 && !s->vm86) { - static const int pp_prefix[4] = { - 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ - }; - int vex3, vex2 = cpu_ldub_code(env, s->pc); - - if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { - /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, - otherwise the instruction is LES or LDS. */ - break; - } - s->pc++; - - /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ - if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ - | PREFIX_LOCK | PREFIX_DATA)) { - goto illegal_op; - } -#ifdef TARGET_X86_64 - if (tcg_ctx->x86_64_hregs) { - goto illegal_op; - } -#endif - rex_r = (~vex2 >> 4) & 8; - if (b == 0xc5) { - vex3 = vex2; - b = cpu_ldub_code(env, s->pc++) | 0x100; - } else { -#ifdef TARGET_X86_64 - s->rex_x = (~vex2 >> 3) & 8; - s->rex_b = (~vex2 >> 2) & 8; -#endif - vex3 = cpu_ldub_code(env, s->pc++); - rex_w = (vex3 >> 7) & 1; - switch (vex2 & 0x1f) { - case 0x01: /* Implied 0f leading opcode bytes. */ - b = cpu_ldub_code(env, s->pc++) | 0x100; - break; - case 0x02: /* Implied 0f 38 leading opcode bytes. */ - b = 0x138; - break; - case 0x03: /* Implied 0f 3a leading opcode bytes. */ - b = 0x13a; - break; - default: /* Reserved for future use. */ - goto illegal_op; - } - } - s->vex_v = (~vex3 >> 3) & 0xf; - s->vex_l = (vex3 >> 2) & 1; - prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; - } - break; - } - - /* Post-process prefixes. */ - if (CODE64(s)) { - /* In 64-bit mode, the default data size is 32-bit. Select 64-bit - data with rex_w, and 16-bit data with 0x66; rex_w takes precedence - over 0x66 if both are present. */ - dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); - /* In 64-bit mode, 0x67 selects 32-bit addressing. */ - aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); - } else { - /* In 16/32-bit mode, 0x66 selects the opposite data size. */ - if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { // qq - dflag = MO_32; - } else { - dflag = MO_16; - } - /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ - if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { - aflag = MO_32; - } else { - aflag = MO_16; - } - } - - s->prefix = prefixes; - s->aflag = aflag; - s->dflag = dflag; - - /* lock generation */ - if (prefixes & PREFIX_LOCK) - gen_helper_lock(tcg_ctx, cpu_env); - - /* now check op code */ - reswitch: - switch(b) { - case 0x0f: - /**************************/ - /* extended op code */ - b = cpu_ldub_code(env, s->pc++) | 0x100; - goto reswitch; - - /**************************/ - /* arith & logic */ - case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: //case 0x00 ... 0x05: - case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: //case 0x08 ... 0x0d: - case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: //case 0x10 ... 0x15: - case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: //case 0x18 ... 0x1d: - case 0x20: case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: //case 0x20 ... 0x25: - case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: //case 0x28 ... 0x2d: - case 0x30: case 0x31: case 0x32: case 0x33: case 0x34: case 0x35: //case 0x30 ... 0x35: - case 0x38: case 0x39: case 0x3a: case 0x3b: case 0x3c: case 0x3d: //case 0x38 ... 0x3d: - { - int op, f, val; - op = (b >> 3) & 7; - f = (b >> 1) & 3; - - ot = mo_b_d(b, dflag); - - switch(f) { - case 0: /* OP Ev, Gv */ - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - opreg = OR_TMP0; - } else if (op == OP_XORL && rm == reg) { - xor_zero: - /* xor reg, reg optimisation */ - set_cc_op(s, CC_OP_CLR); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - } else { - opreg = rm; - } - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); - gen_op(s, op, ot, opreg); - break; - case 1: /* OP Gv, Ev */ - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - reg = ((modrm >> 3) & 7) | rex_r; - rm = (modrm & 7) | REX_B(s); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - } else if (op == OP_XORL && rm == reg) { - goto xor_zero; - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], rm); - } - gen_op(s, op, ot, reg); - break; - case 2: /* OP A, Iv */ - val = insn_get(env, s, ot); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); - gen_op(s, op, ot, OR_EAX); - break; - } - } - break; - - case 0x82: - if (CODE64(s)) - goto illegal_op; - case 0x80: /* GRP1 */ - case 0x81: - case 0x83: - { - int val; - - ot = mo_b_d(b, dflag); - - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - op = (modrm >> 3) & 7; - - if (mod != 3) { - if (b == 0x83) - s->rip_offset = 1; - else - s->rip_offset = insn_const_size(ot); - gen_lea_modrm(env, s, modrm); - opreg = OR_TMP0; - } else { - opreg = rm; - } - - switch(b) { - default: - case 0x80: - case 0x81: - case 0x82: - val = insn_get(env, s, ot); - break; - case 0x83: - val = (int8_t)insn_get(env, s, MO_8); - break; - } - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); - gen_op(s, op, ot, opreg); - } - break; - - /**************************/ - /* inc, dec, and other misc arith */ - case 0x40: case 0x41: case 0x42: case 0x43: - case 0x44: case 0x45: case 0x46: case 0x47: //case 0x40 ... 0x47: /* inc Gv */ - ot = dflag; - gen_inc(s, ot, OR_EAX + (b & 7), 1); - break; - case 0x48: case 0x49: case 0x4a: case 0x4b: - case 0x4c: case 0x4d: case 0x4e: case 0x4f: //case 0x48 ... 0x4f: /* dec Gv */ - ot = dflag; - gen_inc(s, ot, OR_EAX + (b & 7), -1); - break; - case 0xf6: /* GRP3 */ - case 0xf7: - ot = mo_b_d(b, dflag); - - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - op = (modrm >> 3) & 7; - if (mod != 3) { - if (op == 0) - s->rip_offset = insn_const_size(ot); - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - } - - switch(op) { - case 0: /* test */ - val = insn_get(env, s, ot); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); - gen_op_testl_T0_T1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - case 2: /* not */ - tcg_gen_not_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - if (mod != 3) { - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } - break; - case 3: /* neg */ - tcg_gen_neg_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - if (mod != 3) { - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } - gen_op_update_neg_cc(tcg_ctx); - set_cc_op(s, CC_OP_SUBB + ot); - break; - case 4: /* mul */ - switch(ot) { - case MO_8: - gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[1], R_EAX); - tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); - /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], 0xff00); - set_cc_op(s, CC_OP_MULB); - break; - case MO_16: - gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[1], R_EAX); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); - /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 16); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EDX, *cpu_T[0]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); - set_cc_op(s, CC_OP_MULW); - break; - default: - case MO_32: - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_regs[R_EAX]); - tcg_gen_mulu2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EAX], cpu_tmp2_i32); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EDX], cpu_tmp3_i32); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_regs[R_EDX]); - set_cc_op(s, CC_OP_MULL); - break; -#ifdef TARGET_X86_64 - case MO_64: - tcg_gen_mulu2_i64(tcg_ctx, *cpu_regs[R_EAX], *cpu_regs[R_EDX], - *cpu_T[0], *cpu_regs[R_EAX]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_regs[R_EDX]); - set_cc_op(s, CC_OP_MULQ); - break; -#endif - } - break; - case 5: /* imul */ - switch(ot) { - case MO_8: - gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[1], R_EAX); - tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); - /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - tcg_gen_ext8s_tl(tcg_ctx, cpu_tmp0, *cpu_T[0]); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], cpu_tmp0); - set_cc_op(s, CC_OP_MULB); - break; - case MO_16: - gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[1], R_EAX); - tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); - /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - tcg_gen_ext16s_tl(tcg_ctx, cpu_tmp0, *cpu_T[0]); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], cpu_tmp0); - tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 16); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EDX, *cpu_T[0]); - set_cc_op(s, CC_OP_MULW); - break; - default: - case MO_32: - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_regs[R_EAX]); - tcg_gen_muls2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EAX], cpu_tmp2_i32); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EDX], cpu_tmp3_i32); - tcg_gen_sari_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 31); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); - tcg_gen_sub_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(tcg_ctx, cpu_cc_src, cpu_tmp2_i32); - set_cc_op(s, CC_OP_MULL); - break; -#ifdef TARGET_X86_64 - case MO_64: - tcg_gen_muls2_i64(tcg_ctx, *cpu_regs[R_EAX], *cpu_regs[R_EDX], - *cpu_T[0], *cpu_regs[R_EAX]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); - tcg_gen_sari_tl(tcg_ctx, cpu_cc_src, *cpu_regs[R_EAX], 63); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, *cpu_regs[R_EDX]); - set_cc_op(s, CC_OP_MULQ); - break; -#endif - } - break; - case 6: /* div */ - switch(ot) { - case MO_8: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_divb_AL(tcg_ctx, cpu_env, *cpu_T[0]); - break; - case MO_16: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_divw_AX(tcg_ctx, cpu_env, *cpu_T[0]); - break; - default: - case MO_32: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_divl_EAX(tcg_ctx, cpu_env, *cpu_T[0]); - break; -#ifdef TARGET_X86_64 - case MO_64: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_divq_EAX(tcg_ctx, cpu_env, *cpu_T[0]); - break; -#endif - } - break; - case 7: /* idiv */ - switch(ot) { - case MO_8: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_idivb_AL(tcg_ctx, cpu_env, *cpu_T[0]); - break; - case MO_16: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_idivw_AX(tcg_ctx, cpu_env, *cpu_T[0]); - break; - default: - case MO_32: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_idivl_EAX(tcg_ctx, cpu_env, *cpu_T[0]); - break; -#ifdef TARGET_X86_64 - case MO_64: - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_idivq_EAX(tcg_ctx, cpu_env, *cpu_T[0]); - break; -#endif - } - break; - default: - goto illegal_op; - } - break; - - case 0xfe: /* GRP4 */ - case 0xff: /* GRP5 */ - ot = mo_b_d(b, dflag); - - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - op = (modrm >> 3) & 7; - if (op >= 2 && b == 0xfe) { - goto illegal_op; - } - if (CODE64(s)) { - if (op == 2 || op == 4) { - /* operand size for jumps is 64 bit */ - ot = MO_64; - } else if (op == 3 || op == 5) { - ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; - } else if (op == 6) { - /* default push size is 64 bit */ - ot = mo_pushpop(s, dflag); - } - } - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - if (op >= 2 && op != 3 && op != 5) - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - } - - switch(op) { - case 0: /* inc Ev */ - if (mod != 3) - opreg = OR_TMP0; - else - opreg = rm; - gen_inc(s, ot, opreg, 1); - break; - case 1: /* dec Ev */ - if (mod != 3) - opreg = OR_TMP0; - else - opreg = rm; - gen_inc(s, ot, opreg, -1); - break; - case 2: /* call Ev */ - /* XXX: optimize if memory (no 'and' is necessary) */ - if (dflag == MO_16) { - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - } - next_eip = s->pc - s->cs_base; - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], next_eip); - gen_push_v(s, *cpu_T[1]); - gen_op_jmp_v(tcg_ctx, *cpu_T[0]); - gen_eob(s); - break; - case 3: /* lcall Ev */ - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0); - do_lcall: - if (s->pe && !s->vm86) { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_lcall_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1], - tcg_const_i32(tcg_ctx, dflag - 1), - tcg_const_i32(tcg_ctx, s->pc - pc_start)); - } else { - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_lcall_real(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1], - tcg_const_i32(tcg_ctx, dflag - 1), - tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); - } - gen_eob(s); - break; - case 4: /* jmp Ev */ - if (dflag == MO_16) { - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - } - gen_op_jmp_v(tcg_ctx, *cpu_T[0]); - gen_eob(s); - break; - case 5: /* ljmp Ev */ - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - gen_add_A0_im(s, 1 << ot); - gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0); - do_ljmp: - if (s->pe && !s->vm86) { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_ljmp_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1], - tcg_const_i32(tcg_ctx, s->pc - pc_start)); - } else { - gen_op_movl_seg_T0_vm(tcg_ctx, R_CS); - gen_op_jmp_v(tcg_ctx, *cpu_T[1]); - } - gen_eob(s); - break; - case 6: /* push Ev */ - gen_push_v(s, *cpu_T[0]); - break; - default: - goto illegal_op; - } - break; - - case 0x84: /* test Ev, Gv */ - case 0x85: - ot = mo_b_d(b, dflag); - - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); - gen_op_testl_T0_T1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - - case 0xa8: /* test eAX, Iv */ - case 0xa9: - ot = mo_b_d(b, dflag); - val = insn_get(env, s, ot); - - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], OR_EAX); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); - gen_op_testl_T0_T1_cc(tcg_ctx); - set_cc_op(s, CC_OP_LOGICB + ot); - break; - - case 0x98: /* CWDE/CBW */ - switch (dflag) { -#ifdef TARGET_X86_64 - case MO_64: - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EAX); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, MO_64, R_EAX, *cpu_T[0]); - break; -#endif - case MO_32: - gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[0], R_EAX); - tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, MO_32, R_EAX, *cpu_T[0]); - break; - case MO_16: - gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[0], R_EAX); - tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); - break; - default: - tcg_abort(); - } - break; - case 0x99: /* CDQ/CWD */ - switch (dflag) { -#ifdef TARGET_X86_64 - case MO_64: - gen_op_mov_v_reg(tcg_ctx, MO_64, *cpu_T[0], R_EAX); - tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 63); - gen_op_mov_reg_v(tcg_ctx, MO_64, R_EDX, *cpu_T[0]); - break; -#endif - case MO_32: - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EAX); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 31); - gen_op_mov_reg_v(tcg_ctx, MO_32, R_EDX, *cpu_T[0]); - break; - case MO_16: - gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[0], R_EAX); - tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 15); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EDX, *cpu_T[0]); - break; - default: - tcg_abort(); - } - break; - case 0x1af: /* imul Gv, Ev */ - case 0x69: /* imul Gv, Ev, I */ - case 0x6b: - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - if (b == 0x69) - s->rip_offset = insn_const_size(ot); - else if (b == 0x6b) - s->rip_offset = 1; - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - if (b == 0x69) { - val = insn_get(env, s, ot); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); - } else if (b == 0x6b) { - val = (int8_t)insn_get(env, s, MO_8); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); - } - switch (ot) { -#ifdef TARGET_X86_64 - case MO_64: - tcg_gen_muls2_i64(tcg_ctx, *cpu_regs[reg], *cpu_T[1], *cpu_T[0], *cpu_T[1]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[reg]); - tcg_gen_sari_tl(tcg_ctx, cpu_cc_src, cpu_cc_dst, 63); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, *cpu_T[1]); - break; -#endif - case MO_32: - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); - tcg_gen_muls2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, - cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); - tcg_gen_sari_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 31); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[reg]); - tcg_gen_sub_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); - tcg_gen_extu_i32_tl(tcg_ctx, cpu_cc_src, cpu_tmp2_i32); - break; - default: - tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); - /* XXX: use 32 bit mul which could be faster */ - tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - tcg_gen_ext16s_tl(tcg_ctx, cpu_tmp0, *cpu_T[0]); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], cpu_tmp0); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - } - set_cc_op(s, CC_OP_MULB + ot); - break; - case 0x1c0: - case 0x1c1: /* xadd Ev, Gv */ - ot = mo_b_d(b, dflag); - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - if (mod == 3) { - rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], rm); - tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } else { - gen_lea_modrm(env, s, modrm); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); - } - gen_op_update2_cc(tcg_ctx); - set_cc_op(s, CC_OP_ADDB + ot); - break; - case 0x1b0: - case 0x1b1: /* cmpxchg Ev, Gv */ - { - int label1, label2; - TCGv t0, t1, t2, a0; - - ot = mo_b_d(b, dflag); - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - t0 = tcg_temp_local_new(tcg_ctx); - t1 = tcg_temp_local_new(tcg_ctx); - t2 = tcg_temp_local_new(tcg_ctx); - a0 = tcg_temp_local_new(tcg_ctx); - gen_op_mov_v_reg(tcg_ctx, ot, t1, reg); - if (mod == 3) { - rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(tcg_ctx, ot, t0, rm); - } else { - gen_lea_modrm(env, s, modrm); - tcg_gen_mov_tl(tcg_ctx, a0, cpu_A0); - gen_op_ld_v(s, ot, t0, a0); - rm = 0; /* avoid warning */ - } - label1 = gen_new_label(tcg_ctx); - tcg_gen_mov_tl(tcg_ctx, t2, *cpu_regs[R_EAX]); - gen_extu(tcg_ctx, ot, t0); - gen_extu(tcg_ctx, ot, t2); - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, t2, t0, label1); - label2 = gen_new_label(tcg_ctx); - if (mod == 3) { - gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, t0); - tcg_gen_br(tcg_ctx, label2); - gen_set_label(tcg_ctx, label1); - gen_op_mov_reg_v(tcg_ctx, ot, rm, t1); - } else { - /* perform no-op store cycle like physical cpu; must be - before changing accumulator to ensure idempotency if - the store faults and the instruction is restarted */ - gen_op_st_v(s, ot, t0, a0); - gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, t0); - tcg_gen_br(tcg_ctx, label2); - gen_set_label(tcg_ctx, label1); - gen_op_st_v(s, ot, t1, a0); - } - gen_set_label(tcg_ctx, label2); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, t0); - tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, t2); - tcg_gen_sub_tl(tcg_ctx, cpu_cc_dst, t2, t0); - set_cc_op(s, CC_OP_SUBB + ot); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, t2); - tcg_temp_free(tcg_ctx, a0); - } - break; - case 0x1c7: /* cmpxchg8b */ - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - if ((mod == 3) || ((modrm & 0x38) != 0x8)) - goto illegal_op; -#ifdef TARGET_X86_64 - if (dflag == MO_64) { - if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) - goto illegal_op; - gen_jmp_im(s, pc_start - s->cs_base); - gen_update_cc_op(s); - gen_lea_modrm(env, s, modrm); - gen_helper_cmpxchg16b(tcg_ctx, cpu_env, cpu_A0); - } else -#endif - { - if (!(s->cpuid_features & CPUID_CX8)) - goto illegal_op; - gen_jmp_im(s, pc_start - s->cs_base); - gen_update_cc_op(s); - gen_lea_modrm(env, s, modrm); - gen_helper_cmpxchg8b(tcg_ctx, cpu_env, cpu_A0); - } - set_cc_op(s, CC_OP_EFLAGS); - break; - - /**************************/ - /* push/pop */ - case 0x50: case 0x51: case 0x52: case 0x53: - case 0x54: case 0x55: case 0x56: case 0x57: //case 0x50 ... 0x57: /* push */ - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], (b & 7) | REX_B(s)); - gen_push_v(s, *cpu_T[0]); - break; - case 0x58: case 0x59: case 0x5a: case 0x5b: - case 0x5c: case 0x5d: case 0x5e: case 0x5f: //case 0x58 ... 0x5f: /* pop */ - ot = gen_pop_T0(s); - /* NOTE: order is important for pop %sp */ - gen_pop_update(s, ot); - gen_op_mov_reg_v(tcg_ctx, ot, (b & 7) | REX_B(s), *cpu_T[0]); - break; - case 0x60: /* pusha */ - if (CODE64(s)) - goto illegal_op; - gen_pusha(s); - break; - case 0x61: /* popa */ - if (CODE64(s)) - goto illegal_op; - gen_popa(s); - break; - case 0x68: /* push Iv */ - case 0x6a: - ot = mo_pushpop(s, dflag); - if (b == 0x68) - val = insn_get(env, s, ot); - else - val = (int8_t)insn_get(env, s, MO_8); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - gen_push_v(s, *cpu_T[0]); - break; - case 0x8f: /* pop Ev */ - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - ot = gen_pop_T0(s); - if (mod == 3) { - /* NOTE: order is important for pop %sp */ - gen_pop_update(s, ot); - rm = (modrm & 7) | REX_B(s); - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } else { - /* NOTE: order is important too for MMU exceptions */ - s->popl_esp_hack = 1 << ot; - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); - s->popl_esp_hack = 0; - gen_pop_update(s, ot); - } - break; - case 0xc8: /* enter */ - { - int level; - val = cpu_lduw_code(env, s->pc); - s->pc += 2; - level = cpu_ldub_code(env, s->pc++); - gen_enter(s, val, level); - } - break; - case 0xc9: /* leave */ - /* XXX: exception not precise (ESP is updated before potential exception) */ - if (CODE64(s)) { - gen_op_mov_v_reg(tcg_ctx, MO_64, *cpu_T[0], R_EBP); - gen_op_mov_reg_v(tcg_ctx, MO_64, R_ESP, *cpu_T[0]); - } else if (s->ss32) { - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EBP); - gen_op_mov_reg_v(tcg_ctx, MO_32, R_ESP, *cpu_T[0]); - } else { - gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[0], R_EBP); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_ESP, *cpu_T[0]); - } - ot = gen_pop_T0(s); - gen_op_mov_reg_v(tcg_ctx, ot, R_EBP, *cpu_T[0]); - gen_pop_update(s, ot); - break; - case 0x06: /* push es */ - case 0x0e: /* push cs */ - case 0x16: /* push ss */ - case 0x1e: /* push ds */ - if (CODE64(s)) - goto illegal_op; - gen_op_movl_T0_seg(tcg_ctx, b >> 3); - gen_push_v(s, *cpu_T[0]); - break; - case 0x1a0: /* push fs */ - case 0x1a8: /* push gs */ - gen_op_movl_T0_seg(tcg_ctx, (b >> 3) & 7); - gen_push_v(s, *cpu_T[0]); - break; - case 0x07: /* pop es */ - case 0x17: /* pop ss */ - case 0x1f: /* pop ds */ - if (CODE64(s)) - goto illegal_op; - reg = b >> 3; - ot = gen_pop_T0(s); - gen_movl_seg_T0(s, reg, pc_start - s->cs_base); - gen_pop_update(s, ot); - if (reg == R_SS) { - /* if reg == SS, inhibit interrupts/trace. */ - /* If several instructions disable interrupts, only the - _first_ does it */ - if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) - gen_helper_set_inhibit_irq(tcg_ctx, cpu_env); - s->tf = 0; - } - if (s->is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - break; - case 0x1a1: /* pop fs */ - case 0x1a9: /* pop gs */ - ot = gen_pop_T0(s); - gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base); - gen_pop_update(s, ot); - if (s->is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - break; - - /**************************/ - /* mov */ - case 0x88: - case 0x89: /* mov Gv, Ev */ - ot = mo_b_d(b, dflag); - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - - /* generate a generic store */ - gen_ldst_modrm(env, s, modrm, ot, reg, 1); - break; - case 0xc6: - case 0xc7: /* mov Ev, Iv */ - ot = mo_b_d(b, dflag); - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - reg = ((modrm >> 3) & 7) | rex_r; - if (mod != 3) { - if (reg != 0) - goto illegal_op; - s->rip_offset = insn_const_size(ot); - gen_lea_modrm(env, s, modrm); - } else { - if (reg != 0 && reg != 7) - goto illegal_op; - } - val = insn_get(env, s, ot); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - if (mod != 3) { - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_reg_v(tcg_ctx, ot, (modrm & 7) | REX_B(s), *cpu_T[0]); - } - break; - case 0x8a: - case 0x8b: /* mov Ev, Gv */ - ot = mo_b_d(b, dflag); - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - case 0x8e: /* mov seg, Gv */ - modrm = cpu_ldub_code(env, s->pc++); - reg = (modrm >> 3) & 7; - if (reg >= 6 || reg == R_CS) - goto illegal_op; - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - gen_movl_seg_T0(s, reg, pc_start - s->cs_base); - if (reg == R_SS) { - /* if reg == SS, inhibit interrupts/trace */ - /* If several instructions disable interrupts, only the - _first_ does it */ - if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) - gen_helper_set_inhibit_irq(tcg_ctx, cpu_env); - s->tf = 0; - } - if (s->is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - break; - case 0x8c: /* mov Gv, seg */ - modrm = cpu_ldub_code(env, s->pc++); - reg = (modrm >> 3) & 7; - mod = (modrm >> 6) & 3; - if (reg >= 6) - goto illegal_op; - gen_op_movl_T0_seg(tcg_ctx, reg); - ot = mod == 3 ? dflag : MO_16; - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); - break; - - case 0x1b6: /* movzbS Gv, Eb */ - case 0x1b7: /* movzwS Gv, Eb */ - case 0x1be: /* movsbS Gv, Eb */ - case 0x1bf: /* movswS Gv, Eb */ - { - TCGMemOp d_ot; - TCGMemOp s_ot; - - /* d_ot is the size of destination */ - d_ot = dflag; - /* ot is the size of source */ - ot = (b & 1) + MO_8; - /* s_ot is the sign+size of source */ - s_ot = b & 8 ? MO_SIGN | ot : ot; - - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - - if (mod == 3) { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - switch (s_ot) { - case MO_UB: - tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - break; - case MO_SB: - tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - break; - case MO_UW: - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - break; - default: - case MO_SW: - tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - break; - } - gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); - } else { - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, s_ot, *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); - } - } - break; - - case 0x8d: /* lea */ - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - if (mod == 3) - goto illegal_op; - reg = ((modrm >> 3) & 7) | rex_r; - /* we must ensure that no segment is added */ - s->override = -1; - val = s->addseg; - s->addseg = 0; - gen_lea_modrm(env, s, modrm); - s->addseg = val; - gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_A0); - break; - - case 0xa0: /* mov EAX, Ov */ - case 0xa1: - case 0xa2: /* mov Ov, EAX */ - case 0xa3: - { - target_ulong offset_addr; - - ot = mo_b_d(b, dflag); - switch (s->aflag) { -#ifdef TARGET_X86_64 - case MO_64: - offset_addr = cpu_ldq_code(env, s->pc); - s->pc += 8; - break; -#endif - default: - offset_addr = insn_get(env, s, s->aflag); - break; - } - tcg_gen_movi_tl(tcg_ctx, cpu_A0, offset_addr); - gen_add_A0_ds_seg(s); - if ((b & 2) == 0) { - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[0]); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], R_EAX); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - } - } - break; - case 0xd7: /* xlat */ - tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBX]); - tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EAX]); - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, *cpu_T[0]); - gen_extu(tcg_ctx, s->aflag, cpu_A0); - gen_add_A0_ds_seg(s); - gen_op_ld_v(s, MO_8, *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, MO_8, R_EAX, *cpu_T[0]); - break; - case 0xb0: case 0xb1: case 0xb2: case 0xb3: - case 0xb4: case 0xb5: case 0xb6: case 0xb7: //case 0xb0 ... 0xb7: /* mov R, Ib */ - val = insn_get(env, s, MO_8); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - gen_op_mov_reg_v(tcg_ctx, MO_8, (b & 7) | REX_B(s), *cpu_T[0]); - break; - case 0xb8: case 0xb9: case 0xba: case 0xbb: - case 0xbc: case 0xbd: case 0xbe: case 0xbf: //case 0xb8 ... 0xbf: /* mov R, Iv */ -#ifdef TARGET_X86_64 - if (dflag == MO_64) { - uint64_t tmp; - /* 64 bit case */ - tmp = cpu_ldq_code(env, s->pc); - s->pc += 8; - reg = (b & 7) | REX_B(s); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], tmp); - gen_op_mov_reg_v(tcg_ctx, MO_64, reg, *cpu_T[0]); - } else -#endif - { - ot = dflag; - val = insn_get(env, s, ot); - reg = (b & 7) | REX_B(s); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - } - break; - - case 0x91: case 0x92: case 0x93: - case 0x94: case 0x95: case 0x96: case 0x97: //case 0x91 ... 0x97: /* xchg R, EAX */ - do_xchg_reg_eax: - ot = dflag; - reg = (b & 7) | REX_B(s); - rm = R_EAX; - goto do_xchg_reg; - case 0x86: - case 0x87: /* xchg Ev, Gv */ - ot = mo_b_d(b, dflag); - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - if (mod == 3) { - rm = (modrm & 7) | REX_B(s); - do_xchg_reg: - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], rm); - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); - } else { - gen_lea_modrm(env, s, modrm); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); - /* for xchg, lock is implicit */ - if (!(prefixes & PREFIX_LOCK)) - gen_helper_lock(tcg_ctx, cpu_env); - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - if (!(prefixes & PREFIX_LOCK)) - gen_helper_unlock(tcg_ctx, cpu_env); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); - } - break; - case 0xc4: /* les Gv */ - /* In CODE64 this is VEX3; see above. */ - op = R_ES; - goto do_lxx; - case 0xc5: /* lds Gv */ - /* In CODE64 this is VEX2; see above. */ - op = R_DS; - goto do_lxx; - case 0x1b2: /* lss Gv */ - op = R_SS; - goto do_lxx; - case 0x1b4: /* lfs Gv */ - op = R_FS; - goto do_lxx; - case 0x1b5: /* lgs Gv */ - op = R_GS; - do_lxx: - ot = dflag != MO_16 ? MO_32 : MO_16; - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); - gen_add_A0_im(s, 1 << ot); - /* load the segment first to handle exceptions properly */ - gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0); - gen_movl_seg_T0(s, op, pc_start - s->cs_base); - /* then put the data */ - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); - if (s->is_jmp) { - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - break; - - /************************/ - /* shifts */ - case 0xc0: - case 0xc1: - /* shift Ev,Ib */ - shift = 2; - grp2_label: - { - ot = mo_b_d(b, dflag); - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - op = (modrm >> 3) & 7; - - if (mod != 3) { - if (shift == 2) { - s->rip_offset = 1; - } - gen_lea_modrm(env, s, modrm); - opreg = OR_TMP0; - } else { - opreg = (modrm & 7) | REX_B(s); - } - - /* simpler op */ - if (shift == 0) { - gen_shift(s, op, ot, opreg, OR_ECX); - } else { - if (shift == 2) { - shift = cpu_ldub_code(env, s->pc++); - } - gen_shifti(s, op, ot, opreg, shift); - } - } - break; - case 0xd0: - case 0xd1: - /* shift Ev,1 */ - shift = 1; - goto grp2_label; - case 0xd2: - case 0xd3: - /* shift Ev,cl */ - shift = 0; - goto grp2_label; - - case 0x1a4: /* shld imm */ - op = 0; - shift = 1; - goto do_shiftd; - case 0x1a5: /* shld cl */ - op = 0; - shift = 0; - goto do_shiftd; - case 0x1ac: /* shrd imm */ - op = 1; - shift = 1; - goto do_shiftd; - case 0x1ad: /* shrd cl */ - op = 1; - shift = 0; - do_shiftd: - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - reg = ((modrm >> 3) & 7) | rex_r; - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - opreg = OR_TMP0; - } else { - opreg = rm; - } - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); - - if (shift) { - TCGv imm = tcg_const_tl(tcg_ctx, cpu_ldub_code(env, s->pc++)); - gen_shiftd_rm_T1(s, ot, opreg, op, imm); - tcg_temp_free(tcg_ctx, imm); - } else { - gen_shiftd_rm_T1(s, ot, opreg, op, *cpu_regs[R_ECX]); - } - break; - - /************************/ - /* floats */ - case 0xd8: case 0xd9: case 0xda: case 0xdb: - case 0xdc: case 0xdd: case 0xde: case 0xdf: //case 0xd8 ... 0xdf: - if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { - /* if CR0.EM or CR0.TS are set, generate an FPU exception */ - /* XXX: what to do if illegal op ? */ - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); - break; - } - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - rm = modrm & 7; - op = ((b & 7) << 3) | ((modrm >> 3) & 7); - if (mod != 3) { - /* memory op */ - gen_lea_modrm(env, s, modrm); - - if( (op >= 0x00 && op <= 0x07) || /* fxxxs */ - (op >= 0x10 && op <= 0x17) || /* fixxxl */ - (op >= 0x20 && op <= 0x27) || /* fxxxl */ - (op >= 0x30 && op <= 0x37) ) /* fixxx */ - { - int op1; - op1 = op & 7; - - switch(op >> 4) { - case 0: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - gen_helper_flds_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); - break; - case 1: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - gen_helper_fildl_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); - break; - case 2: - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, - s->mem_index, MO_LEQ); - gen_helper_fldl_FT0(tcg_ctx, cpu_env, cpu_tmp1_i64); - break; - case 3: - default: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LESW); - gen_helper_fildl_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); - break; - } - - gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); - if (op1 == 3) { - /* fcomp needs pop */ - gen_helper_fpop(tcg_ctx, cpu_env); - } - } - else if((op == 0x08) || /* flds */ - (op == 0x0a) || /* fsts */ - (op == 0x0b) || /* fstps */ - (op >= 0x18 && op <= 0x1b) || /* fildl, fisttpl, fistl, fistpl */ - (op >= 0x28 && op <= 0x2b) || /* fldl, fisttpll, fstl, fstpl */ - (op >= 0x38 && op <= 0x3b) ) /* filds, fisttps, fists, fistps */ - { - switch(op & 7) { - case 0: - switch(op >> 4) { - case 0: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - gen_helper_flds_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); - break; - case 1: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - gen_helper_fildl_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); - break; - case 2: - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, - s->mem_index, MO_LEQ); - gen_helper_fldl_ST0(tcg_ctx, cpu_env, cpu_tmp1_i64); - break; - case 3: - default: - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LESW); - gen_helper_fildl_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); - break; - } - break; - case 1: - /* XXX: the corresponding CPUID bit must be tested ! */ - switch(op >> 4) { - case 1: - gen_helper_fisttl_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - break; - case 2: - gen_helper_fisttll_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, - s->mem_index, MO_LEQ); - break; - case 3: - default: - gen_helper_fistt_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUW); - break; - } - gen_helper_fpop(tcg_ctx, cpu_env); - break; - default: - switch(op >> 4) { - case 0: - gen_helper_fsts_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - break; - case 1: - gen_helper_fistl_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - break; - case 2: - gen_helper_fstl_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, - s->mem_index, MO_LEQ); - break; - case 3: - default: - gen_helper_fist_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUW); - break; - } - if ((op & 7) == 3) - gen_helper_fpop(tcg_ctx, cpu_env); - break; - } - } - else if(op == 0x0c) /* fldenv mem */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fldenv(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); - } - else if(op == 0x0d) /* fldcw mem */ - { - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUW); - gen_helper_fldcw(tcg_ctx, cpu_env, cpu_tmp2_i32); - } - else if(op == 0x0e) /* fnstenv mem */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fstenv(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); - } - else if(op == 0x0f) /* fnstcw mem */ - { - gen_helper_fnstcw(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUW); - } - else if(op == 0x1d) /* fldt mem */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fldt_ST0(tcg_ctx, cpu_env, cpu_A0); - } - else if(op == 0x1f) /* fstpt mem */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fstt_ST0(tcg_ctx, cpu_env, cpu_A0); - gen_helper_fpop(tcg_ctx, cpu_env); - } - else if(op == 0x2c) /* frstor mem */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_frstor(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); - } - else if(op == 0x2e) /* fnsave mem */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fsave(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); - } - else if(op == 0x2f) /* fnstsw mem */ - { - gen_helper_fnstsw(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUW); - } - else if(op == 0x3c) /* fbld */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fbld_ST0(tcg_ctx, cpu_env, cpu_A0); - } - else if(op == 0x3e) /* fbstp */ - { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fbst_ST0(tcg_ctx, cpu_env, cpu_A0); - gen_helper_fpop(tcg_ctx, cpu_env); - } - else if(op == 0x3d) /* fildll */ - { - tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); - gen_helper_fildll_ST0(tcg_ctx, cpu_env, cpu_tmp1_i64); - } - else if(op == 0x3f) /* fistpll */ - { - gen_helper_fistll_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); - tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); - gen_helper_fpop(tcg_ctx, cpu_env); - } - else - { - goto illegal_op; - } - } else { - /* register float ops */ - opreg = rm; - - switch(op) { - case 0x08: /* fld sti */ - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fmov_ST0_STN(tcg_ctx, cpu_env, - tcg_const_i32(tcg_ctx, (opreg + 1) & 7)); - break; - case 0x09: /* fxchg sti */ - case 0x29: /* fxchg4 sti, undocumented op */ - case 0x39: /* fxchg7 sti, undocumented op */ - gen_helper_fxchg_ST0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - break; - case 0x0a: /* grp d9/2 */ - switch(rm) { - case 0: /* fnop */ - /* check exceptions (FreeBSD FPU probe) */ - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fwait(tcg_ctx, cpu_env); - break; - default: - goto illegal_op; - } - break; - case 0x0c: /* grp d9/4 */ - switch(rm) { - case 0: /* fchs */ - gen_helper_fchs_ST0(tcg_ctx, cpu_env); - break; - case 1: /* fabs */ - gen_helper_fabs_ST0(tcg_ctx, cpu_env); - break; - case 4: /* ftst */ - gen_helper_fldz_FT0(tcg_ctx, cpu_env); - gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); - break; - case 5: /* fxam */ - gen_helper_fxam_ST0(tcg_ctx, cpu_env); - break; - default: - goto illegal_op; - } - break; - case 0x0d: /* grp d9/5 */ - { - switch(rm) { - case 0: - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fld1_ST0(tcg_ctx, cpu_env); - break; - case 1: - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fldl2t_ST0(tcg_ctx, cpu_env); - break; - case 2: - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fldl2e_ST0(tcg_ctx, cpu_env); - break; - case 3: - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fldpi_ST0(tcg_ctx, cpu_env); - break; - case 4: - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fldlg2_ST0(tcg_ctx, cpu_env); - break; - case 5: - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fldln2_ST0(tcg_ctx, cpu_env); - break; - case 6: - gen_helper_fpush(tcg_ctx, cpu_env); - gen_helper_fldz_ST0(tcg_ctx, cpu_env); - break; - default: - goto illegal_op; - } - } - break; - case 0x0e: /* grp d9/6 */ - switch(rm) { - case 0: /* f2xm1 */ - gen_helper_f2xm1(tcg_ctx, cpu_env); - break; - case 1: /* fyl2x */ - gen_helper_fyl2x(tcg_ctx, cpu_env); - break; - case 2: /* fptan */ - gen_helper_fptan(tcg_ctx, cpu_env); - break; - case 3: /* fpatan */ - gen_helper_fpatan(tcg_ctx, cpu_env); - break; - case 4: /* fxtract */ - gen_helper_fxtract(tcg_ctx, cpu_env); - break; - case 5: /* fprem1 */ - gen_helper_fprem1(tcg_ctx, cpu_env); - break; - case 6: /* fdecstp */ - gen_helper_fdecstp(tcg_ctx, cpu_env); - break; - default: - case 7: /* fincstp */ - gen_helper_fincstp(tcg_ctx, cpu_env); - break; - } - break; - case 0x0f: /* grp d9/7 */ - switch(rm) { - case 0: /* fprem */ - gen_helper_fprem(tcg_ctx, cpu_env); - break; - case 1: /* fyl2xp1 */ - gen_helper_fyl2xp1(tcg_ctx, cpu_env); - break; - case 2: /* fsqrt */ - gen_helper_fsqrt(tcg_ctx, cpu_env); - break; - case 3: /* fsincos */ - gen_helper_fsincos(tcg_ctx, cpu_env); - break; - case 5: /* fscale */ - gen_helper_fscale(tcg_ctx, cpu_env); - break; - case 4: /* frndint */ - gen_helper_frndint(tcg_ctx, cpu_env); - break; - case 6: /* fsin */ - gen_helper_fsin(tcg_ctx, cpu_env); - break; - default: - case 7: /* fcos */ - gen_helper_fcos(tcg_ctx, cpu_env); - break; - } - break; - case 0x00: case 0x01: case 0x04: case 0x05: case 0x06: case 0x07: /* fxxx st, sti */ - case 0x20: case 0x21: case 0x24: case 0x25: case 0x26: case 0x27: /* fxxx sti, st */ - case 0x30: case 0x31: case 0x34: case 0x35: case 0x36: case 0x37: /* fxxxp sti, st */ - { - int op1; - - op1 = op & 7; - if (op >= 0x20) { - gen_helper_fp_arith_STN_ST0(tcg_ctx, op1, opreg); - if (op >= 0x30) - gen_helper_fpop(tcg_ctx, cpu_env); - } else { - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); - } - } - break; - case 0x02: /* fcom */ - case 0x22: /* fcom2, undocumented op */ - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); - break; - case 0x03: /* fcomp */ - case 0x23: /* fcomp3, undocumented op */ - case 0x32: /* fcomp5, undocumented op */ - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - break; - case 0x15: /* da/5 */ - switch(rm) { - case 1: /* fucompp */ - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, 1)); - gen_helper_fucom_ST0_FT0(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - break; - default: - goto illegal_op; - } - break; - case 0x1c: - switch(rm) { - case 0: /* feni (287 only, just do nop here) */ - break; - case 1: /* fdisi (287 only, just do nop here) */ - break; - case 2: /* fclex */ - gen_helper_fclex(tcg_ctx, cpu_env); - break; - case 3: /* fninit */ - gen_helper_fninit(tcg_ctx, cpu_env); - break; - case 4: /* fsetpm (287 only, just do nop here) */ - break; - default: - goto illegal_op; - } - break; - case 0x1d: /* fucomi */ - if (!(s->cpuid_features & CPUID_CMOV)) { - goto illegal_op; - } - gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fucomi_ST0_FT0(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x1e: /* fcomi */ - if (!(s->cpuid_features & CPUID_CMOV)) { - goto illegal_op; - } - gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fcomi_ST0_FT0(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x28: /* ffree sti */ - gen_helper_ffree_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - break; - case 0x2a: /* fst sti */ - gen_helper_fmov_STN_ST0(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - break; - case 0x2b: /* fstp sti */ - case 0x0b: /* fstp1 sti, undocumented op */ - case 0x3a: /* fstp8 sti, undocumented op */ - case 0x3b: /* fstp9 sti, undocumented op */ - gen_helper_fmov_STN_ST0(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fpop(tcg_ctx, cpu_env); - break; - case 0x2c: /* fucom st(i) */ - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fucom_ST0_FT0(tcg_ctx, cpu_env); - break; - case 0x2d: /* fucomp st(i) */ - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fucom_ST0_FT0(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - break; - case 0x33: /* de/3 */ - switch(rm) { - case 1: /* fcompp */ - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, 1)); - gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - break; - default: - goto illegal_op; - } - break; - case 0x38: /* ffreep sti, undocumented op */ - gen_helper_ffree_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fpop(tcg_ctx, cpu_env); - break; - case 0x3c: /* df/4 */ - switch(rm) { - case 0: - gen_helper_fnstsw(tcg_ctx, cpu_tmp2_i32, cpu_env); - tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); - gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); - break; - default: - goto illegal_op; - } - break; - case 0x3d: /* fucomip */ - if (!(s->cpuid_features & CPUID_CMOV)) { - goto illegal_op; - } - gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fucomi_ST0_FT0(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x3e: /* fcomip */ - if (!(s->cpuid_features & CPUID_CMOV)) { - goto illegal_op; - } - gen_update_cc_op(s); - gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_helper_fcomi_ST0_FT0(tcg_ctx, cpu_env); - gen_helper_fpop(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x10: case 0x11: case 0x12: case 0x13: /* fcmovxx */ - case 0x18: case 0x19: case 0x1a: case 0x1b: - { - int op1, l1; - static const uint8_t fcmov_cc[8] = { - (JCC_B << 1), - (JCC_Z << 1), - (JCC_BE << 1), - (JCC_P << 1), - }; - - if (!(s->cpuid_features & CPUID_CMOV)) { - goto illegal_op; - } - op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); - l1 = gen_new_label(tcg_ctx); - gen_jcc1_noeob(s, op1, l1); - gen_helper_fmov_ST0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); - gen_set_label(tcg_ctx, l1); - } - break; - default: - goto illegal_op; - } - } - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tmp0, pc_start - s->cs_base); - tcg_gen_st_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tmp0, cpu_env, offsetof(CPUX86State, fpip)); - break; - /************************/ - /* string ops */ - - case 0xa4: /* movsS */ - case 0xa5: - ot = mo_b_d(b, dflag); - if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); - } else { - gen_movs(s, ot); - } - break; - - case 0xaa: /* stosS */ - case 0xab: - ot = mo_b_d(b, dflag); - if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); - } else { - gen_stos(s, ot); - } - break; - case 0xac: /* lodsS */ - case 0xad: - ot = mo_b_d(b, dflag); - if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); - } else { - gen_lods(s, ot); - } - break; - case 0xae: /* scasS */ - case 0xaf: - ot = mo_b_d(b, dflag); - if (prefixes & PREFIX_REPNZ) { - gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); - } else if (prefixes & PREFIX_REPZ) { - gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); - } else { - gen_scas(s, ot); - } - break; - - case 0xa6: /* cmpsS */ - case 0xa7: - ot = mo_b_d(b, dflag); - if (prefixes & PREFIX_REPNZ) { - gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); - } else if (prefixes & PREFIX_REPZ) { - gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); - } else { - gen_cmps(s, ot); - } - break; - case 0x6c: /* insS */ // qq - case 0x6d: - ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); - if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); - } else { - gen_ins(s, ot); - } - break; - case 0x6e: /* outsS */ // qq - case 0x6f: - ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - svm_is_rep(prefixes) | 4); - if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); - } else { - gen_outs(s, ot); - } - break; - - /************************/ - /* port I/O */ - - case 0xe4: // in - case 0xe5: // out - ot = mo_b_d32(b, dflag); - val = cpu_ldub_code(env, s->pc++); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - gen_check_io(s, ot, pc_start - s->cs_base, - SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); - tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, val); - gen_helper_in_func(tcg_ctx, ot, *cpu_T[1], cpu_tmp2_i32); - gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[1]); - break; - case 0xe6: - case 0xe7: - ot = mo_b_d32(b, dflag); - val = cpu_ldub_code(env, s->pc++); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); - gen_check_io(s, ot, pc_start - s->cs_base, - svm_is_rep(prefixes)); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], R_EAX); - - tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, val); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); - gen_helper_out_func(tcg_ctx, ot, cpu_tmp2_i32, cpu_tmp3_i32); - break; - case 0xec: - case 0xed: - ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_in_func(tcg_ctx, ot, *cpu_T[1], cpu_tmp2_i32); - gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[1]); - break; - case 0xee: - case 0xef: - ot = mo_b_d32(b, dflag); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); - gen_check_io(s, ot, pc_start - s->cs_base, - svm_is_rep(prefixes)); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], R_EAX); - - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); - gen_helper_out_func(tcg_ctx, ot, cpu_tmp2_i32, cpu_tmp3_i32); - break; - - /************************/ - /* control */ - case 0xc2: /* ret im */ - val = cpu_ldsw_code(env, s->pc); - s->pc += 2; - ot = gen_pop_T0(s); - gen_stack_update(s, val + (1 << ot)); - /* Note that gen_pop_T0 uses a zero-extending load. */ - gen_op_jmp_v(tcg_ctx, *cpu_T[0]); - gen_eob(s); - break; - case 0xc3: /* ret */ - ot = gen_pop_T0(s); - gen_pop_update(s, ot); - /* Note that gen_pop_T0 uses a zero-extending load. */ - gen_op_jmp_v(tcg_ctx, *cpu_T[0]); - gen_eob(s); - break; - case 0xca: /* lret im */ - val = cpu_ldsw_code(env, s->pc); - s->pc += 2; - do_lret: - if (s->pe && !s->vm86) { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_lret_protected(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), - tcg_const_i32(tcg_ctx, val)); - } else { - gen_stack_A0(s); - /* pop offset */ - gen_op_ld_v(s, dflag, *cpu_T[0], cpu_A0); - /* NOTE: keeping EIP updated is not a problem in case of - exception */ - gen_op_jmp_v(tcg_ctx, *cpu_T[0]); - /* pop selector */ - gen_op_addl_A0_im(tcg_ctx, 1 << dflag); - gen_op_ld_v(s, dflag, *cpu_T[0], cpu_A0); - gen_op_movl_seg_T0_vm(tcg_ctx, R_CS); - /* add stack offset */ - gen_stack_update(s, val + (2 << dflag)); - } - gen_eob(s); - break; - case 0xcb: /* lret */ - val = 0; - goto do_lret; - case 0xcf: /* iret */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); - if (!s->pe) { - /* real mode */ - gen_helper_iret_real(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); - set_cc_op(s, CC_OP_EFLAGS); - } else if (s->vm86) { - if (s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_helper_iret_real(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); - set_cc_op(s, CC_OP_EFLAGS); - } - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_iret_protected(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), - tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); - set_cc_op(s, CC_OP_EFLAGS); - } - gen_eob(s); - break; - case 0xe8: /* call im */ - { - if (dflag != MO_16) { - tval = (int32_t)insn_get(env, s, MO_32); - } else { - tval = (int16_t)insn_get(env, s, MO_16); - } - next_eip = s->pc - s->cs_base; - tval += next_eip; - if (dflag == MO_16) { - tval &= 0xffff; - } else if (!CODE64(s)) { - tval &= 0xffffffff; - } - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], next_eip); - gen_push_v(s, *cpu_T[0]); - gen_jmp(s, tval); - } - break; - case 0x9a: /* lcall im */ - { - unsigned int selector, offset; - - if (CODE64(s)) - goto illegal_op; - ot = dflag; - offset = insn_get(env, s, ot); - selector = insn_get(env, s, MO_16); - - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], selector); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], offset); - } - goto do_lcall; - case 0xe9: /* jmp im */ - if (dflag != MO_16) { - tval = (int32_t)insn_get(env, s, MO_32); - } else { - tval = (int16_t)insn_get(env, s, MO_16); - } - tval += s->pc - s->cs_base; - if (dflag == MO_16) { - tval &= 0xffff; - } else if (!CODE64(s)) { - tval &= 0xffffffff; - } - gen_jmp(s, tval); - break; - case 0xea: /* ljmp im */ - { - unsigned int selector, offset; - - if (CODE64(s)) - goto illegal_op; - ot = dflag; - offset = insn_get(env, s, ot); - selector = insn_get(env, s, MO_16); - - tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], selector); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], offset); - } - goto do_ljmp; - case 0xeb: /* jmp Jb */ - tval = (int8_t)insn_get(env, s, MO_8); - tval += s->pc - s->cs_base; - if (dflag == MO_16) { - tval &= 0xffff; - } - gen_jmp(s, tval); - break; - //case 0x70 ... 0x7f: /* jcc Jb */ - case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: - case 0x78: case 0x79: case 0x7a: case 0x7b: case 0x7c: case 0x7d: case 0x7e: case 0x7f: - tval = (int8_t)insn_get(env, s, MO_8); - goto do_jcc; - //case 0x180 ... 0x18f: /* jcc Jv */ - case 0x180: case 0x181: case 0x182: case 0x183: case 0x184: case 0x185: case 0x186: case 0x187: - case 0x188: case 0x189: case 0x18a: case 0x18b: case 0x18c: case 0x18d: case 0x18e: case 0x18f: - if (dflag != MO_16) { - tval = (int32_t)insn_get(env, s, MO_32); - } else { - tval = (int16_t)insn_get(env, s, MO_16); - } - do_jcc: - next_eip = s->pc - s->cs_base; - tval += next_eip; - if (dflag == MO_16) { - tval &= 0xffff; - } - gen_jcc(s, b, tval, next_eip); - break; - - //case 0x190 ... 0x19f: /* setcc Gv */ - case 0x190: case 0x191: case 0x192: case 0x193: case 0x194: case 0x195: case 0x196: case 0x197: - case 0x198: case 0x199: case 0x19a: case 0x19b: case 0x19c: case 0x19d: case 0x19e: case 0x19f: - modrm = cpu_ldub_code(env, s->pc++); - gen_setcc1(s, b, *cpu_T[0]); - gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); - break; - //case 0x140 ... 0x14f: /* cmov Gv, Ev */ - case 0x140: case 0x141: case 0x142: case 0x143: case 0x144: case 0x145: case 0x146: case 0x147: - case 0x148: case 0x149: case 0x14a: case 0x14b: case 0x14c: case 0x14d: case 0x14e: case 0x14f: - if (!(s->cpuid_features & CPUID_CMOV)) { - goto illegal_op; - } - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - gen_cmovcc1(env, s, ot, b, modrm, reg); - break; - - /************************/ - /* flags */ - case 0x9c: /* pushf */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); - if (s->vm86 && s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_helper_read_eflags(tcg_ctx, *cpu_T[0], cpu_env); - gen_push_v(s, *cpu_T[0]); - } - break; - case 0x9d: /* popf */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); - if (s->vm86 && s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - ot = gen_pop_T0(s); - if (s->cpl == 0) { - if (dflag != MO_16) { - gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], - tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | - ID_MASK | NT_MASK | - IF_MASK | - IOPL_MASK))); - } else { - gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], - tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | - ID_MASK | NT_MASK | - IF_MASK | IOPL_MASK) - & 0xffff)); - } - } else { - if (s->cpl <= s->iopl) { - if (dflag != MO_16) { - gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], - tcg_const_i32(tcg_ctx, (TF_MASK | - AC_MASK | - ID_MASK | - NT_MASK | - IF_MASK))); - } else { - gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], - tcg_const_i32(tcg_ctx, (TF_MASK | - AC_MASK | - ID_MASK | - NT_MASK | - IF_MASK) - & 0xffff)); - } - } else { - if (dflag != MO_16) { - gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], - tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | - ID_MASK | NT_MASK))); - } else { - gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], - tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | - ID_MASK | NT_MASK) - & 0xffff)); - } - } - } - gen_pop_update(s, ot); - set_cc_op(s, CC_OP_EFLAGS); - /* abort translation because TF/AC flag may change */ - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - break; - case 0x9e: /* sahf */ - if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) - goto illegal_op; - gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[0], R_AH); - gen_compute_eflags(s); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, CC_O); - tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C); - tcg_gen_or_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, *cpu_T[0]); - break; - case 0x9f: /* lahf */ - if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) - goto illegal_op; - gen_compute_eflags(s); - /* Note: gen_compute_eflags() only gives the condition codes */ - tcg_gen_ori_tl(tcg_ctx, *cpu_T[0], cpu_cc_src, 0x02); - gen_op_mov_reg_v(tcg_ctx, MO_8, R_AH, *cpu_T[0]); - break; - case 0xf5: /* cmc */ - gen_compute_eflags(s); - tcg_gen_xori_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, CC_C); - break; - case 0xf8: /* clc */ - gen_compute_eflags(s); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, ~CC_C); - break; - case 0xf9: /* stc */ - gen_compute_eflags(s); - tcg_gen_ori_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, CC_C); - break; - case 0xfc: /* cld */ - tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, 1); - tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); - break; - case 0xfd: /* std */ - tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, -1); - tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); - break; - - /************************/ - /* bit operations */ - case 0x1ba: /* bt/bts/btr/btc Gv, im */ - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - op = (modrm >> 3) & 7; - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - if (mod != 3) { - s->rip_offset = 1; - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - } - /* load shift */ - val = cpu_ldub_code(env, s->pc++); - tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); - if (op < 4) - goto illegal_op; - op -= 4; - goto bt_op; - case 0x1a3: /* bt Gv, Ev */ - op = 0; - goto do_btx; - case 0x1ab: /* bts */ - op = 1; - goto do_btx; - case 0x1b3: /* btr */ - op = 2; - goto do_btx; - case 0x1bb: /* btc */ - op = 3; - do_btx: - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[1], reg); - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - /* specific case: we need to add a displacement */ - gen_exts(tcg_ctx, ot, *cpu_T[1]); - tcg_gen_sari_tl(tcg_ctx, cpu_tmp0, *cpu_T[1], 3 + ot); - tcg_gen_shli_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, ot); - tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); - gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - } - bt_op: - tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], (1 << (3 + ot)) - 1); - tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], *cpu_T[1]); - switch(op) { - case 0: - break; - case 1: - tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 1); - tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, *cpu_T[1]); - tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); - break; - case 2: - tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 1); - tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, *cpu_T[1]); - tcg_gen_andc_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); - break; - default: - case 3: - tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 1); - tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, *cpu_T[1]); - tcg_gen_xor_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); - break; - } - if (op != 0) { - if (mod != 3) { - gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); - } else { - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } - } - - /* Delay all CC updates until after the store above. Note that - C is the result of the test, Z is unchanged, and the others - are all undefined. */ - switch (s->cc_op) { - case CC_OP_MULB: case CC_OP_MULW: case CC_OP_MULL: case CC_OP_MULQ: //case CC_OP_MULB ... CC_OP_MULQ: - case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: //case CC_OP_ADDB ... CC_OP_ADDQ: - case CC_OP_ADCB: case CC_OP_ADCW: case CC_OP_ADCL: case CC_OP_ADCQ: //case CC_OP_ADCB ... CC_OP_ADCQ: - case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: //case CC_OP_SUBB ... CC_OP_SUBQ: - case CC_OP_SBBB: case CC_OP_SBBW: case CC_OP_SBBL: case CC_OP_SBBQ: //case CC_OP_SBBB ... CC_OP_SBBQ: - case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: //case CC_OP_LOGICB ... CC_OP_LOGICQ: - case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: //case CC_OP_INCB ... CC_OP_INCQ: - case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: //case CC_OP_DECB ... CC_OP_DECQ: - case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: //case CC_OP_SHLB ... CC_OP_SHLQ: - case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: //case CC_OP_SARB ... CC_OP_SARQ: - case CC_OP_BMILGB: case CC_OP_BMILGW: case CC_OP_BMILGL: case CC_OP_BMILGQ: //case CC_OP_BMILGB ... CC_OP_BMILGQ: - /* Z was going to be computed from the non-zero status of CC_DST. - We can get that same Z value (and the new C value) by leaving - CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the - same width. */ - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, cpu_tmp4); - set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); - break; - default: - /* Otherwise, generate EFLAGS and replace the C bit. */ - gen_compute_eflags(s); - tcg_gen_deposit_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, cpu_tmp4, - ctz32(CC_C), 1); - break; - } - break; - case 0x1bc: /* bsf / tzcnt */ - case 0x1bd: /* bsr / lzcnt */ - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_extu(tcg_ctx, ot, *cpu_T[0]); - - /* Note that lzcnt and tzcnt are in different extensions. */ - if ((prefixes & PREFIX_REPZ) - && (b & 1 - ? s->cpuid_ext3_features & CPUID_EXT3_ABM - : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { - int size = 8 << ot; - tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); - if (b & 1) { - /* For lzcnt, reduce the target_ulong result by the - number of zeros that we expect to find at the top. */ - gen_helper_clz(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_subi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], TARGET_LONG_BITS - size); - } else { - /* For tzcnt, a zero input must return the operand size: - force all bits outside the operand size to 1. */ - target_ulong mask = (target_ulong)-2 << (size - 1); - tcg_gen_ori_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], mask); - gen_helper_ctz(tcg_ctx, *cpu_T[0], *cpu_T[0]); - } - /* For lzcnt/tzcnt, C and Z bits are defined and are - related to the result. */ - gen_op_update1_cc(tcg_ctx); - set_cc_op(s, CC_OP_BMILGB + ot); - } else { - /* For bsr/bsf, only the Z bit is defined and it is related - to the input and not the result. */ - tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); - set_cc_op(s, CC_OP_LOGICB + ot); - if (b & 1) { - /* For bsr, return the bit index of the first 1 bit, - not the count of leading zeros. */ - gen_helper_clz(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_xori_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], TARGET_LONG_BITS - 1); - } else { - gen_helper_ctz(tcg_ctx, *cpu_T[0], *cpu_T[0]); - } - /* ??? The manual says that the output is undefined when the - input is zero, but real hardware leaves it unchanged, and - real programs appear to depend on that. */ - tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_T[0], cpu_cc_dst, cpu_tmp0, - *cpu_regs[reg], *cpu_T[0]); - } - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - break; - /************************/ - /* bcd */ - case 0x27: /* daa */ - if (CODE64(s)) - goto illegal_op; - gen_update_cc_op(s); - gen_helper_daa(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x2f: /* das */ - if (CODE64(s)) - goto illegal_op; - gen_update_cc_op(s); - gen_helper_das(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x37: /* aaa */ - if (CODE64(s)) - goto illegal_op; - gen_update_cc_op(s); - gen_helper_aaa(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x3f: /* aas */ - if (CODE64(s)) - goto illegal_op; - gen_update_cc_op(s); - gen_helper_aas(tcg_ctx, cpu_env); - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0xd4: /* aam */ - if (CODE64(s)) - goto illegal_op; - val = cpu_ldub_code(env, s->pc++); - if (val == 0) { - gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); - } else { - gen_helper_aam(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, val)); - set_cc_op(s, CC_OP_LOGICB); - } - break; - case 0xd5: /* aad */ - if (CODE64(s)) - goto illegal_op; - val = cpu_ldub_code(env, s->pc++); - gen_helper_aad(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, val)); - set_cc_op(s, CC_OP_LOGICB); - break; - /************************/ - /* misc */ - case 0x90: /* nop */ - /* XXX: correct lock test for all insn */ - if (prefixes & PREFIX_LOCK) { - goto illegal_op; - } - /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ - if (REX_B(s)) { - goto do_xchg_reg_eax; - } - if (prefixes & PREFIX_REPZ) { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_pause(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); - s->is_jmp = DISAS_TB_JUMP; - } - break; - case 0x9b: /* fwait */ - if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == - (HF_MP_MASK | HF_TS_MASK)) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fwait(tcg_ctx, cpu_env); - } - break; - case 0xcc: /* int3 */ - gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); - break; - case 0xcd: /* int N */ - val = cpu_ldub_code(env, s->pc++); - if (s->vm86 && s->iopl != 3) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); - } - break; - case 0xce: /* into */ - if (CODE64(s)) - goto illegal_op; - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_into(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); - break; -#ifdef WANT_ICEBP - case 0xf1: /* icebp (undocumented, exits to external debugger) */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); -#if 1 - gen_debug(s, pc_start - s->cs_base); -#else - /* start debug */ - tb_flush(env); - qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); -#endif - break; -#endif - case 0xfa: /* cli */ - if (!s->vm86) { - if (s->cpl <= s->iopl) { - gen_helper_cli(tcg_ctx, cpu_env); - } else { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } - } else { - if (s->iopl == 3) { - gen_helper_cli(tcg_ctx, cpu_env); - } else { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } - } - break; - case 0xfb: /* sti */ - if (!s->vm86) { - if (s->cpl <= s->iopl) { - gen_sti: - gen_helper_sti(tcg_ctx, cpu_env); - /* interruptions are enabled only the first insn after sti */ - /* If several instructions disable interrupts, only the - _first_ does it */ - if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) - gen_helper_set_inhibit_irq(tcg_ctx, cpu_env); - /* give a chance to handle pending irqs */ - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } else { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } - } else { - if (s->iopl == 3) { - goto gen_sti; - } else { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } - } - break; - case 0x62: /* bound */ - if (CODE64(s)) - goto illegal_op; - ot = dflag; - modrm = cpu_ldub_code(env, s->pc++); - reg = (modrm >> 3) & 7; - mod = (modrm >> 6) & 3; - if (mod == 3) - goto illegal_op; - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); - gen_lea_modrm(env, s, modrm); - gen_jmp_im(s, pc_start - s->cs_base); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - if (ot == MO_16) { - gen_helper_boundw(tcg_ctx, cpu_env, cpu_A0, cpu_tmp2_i32); - } else { - gen_helper_boundl(tcg_ctx, cpu_env, cpu_A0, cpu_tmp2_i32); - } - break; - case 0x1c8: case 0x1c9: case 0x1ca: case 0x1cb: - case 0x1cc: case 0x1cd: case 0x1ce: case 0x1cf: /* bswap reg */ - reg = (b & 7) | REX_B(s); -#ifdef TARGET_X86_64 - if (dflag == MO_64) { - gen_op_mov_v_reg(tcg_ctx, MO_64, *cpu_T[0], reg); - tcg_gen_bswap64_i64(tcg_ctx, *cpu_T[0], *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, MO_64, reg, *cpu_T[0]); - } else -#endif - { - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], reg); - tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - tcg_gen_bswap32_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, MO_32, reg, *cpu_T[0]); - } - break; - case 0xd6: /* salc */ - if (CODE64(s)) - goto illegal_op; - gen_compute_eflags_c(s, *cpu_T[0]); - tcg_gen_neg_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - gen_op_mov_reg_v(tcg_ctx, MO_8, R_EAX, *cpu_T[0]); - break; - case 0xe0: /* loopnz */ - case 0xe1: /* loopz */ - case 0xe2: /* loop */ - case 0xe3: /* jecxz */ - { - int l1, l2, l3; - - tval = (int8_t)insn_get(env, s, MO_8); - next_eip = s->pc - s->cs_base; - tval += next_eip; - if (dflag == MO_16) { - tval &= 0xffff; - } - - l1 = gen_new_label(tcg_ctx); - l2 = gen_new_label(tcg_ctx); - l3 = gen_new_label(tcg_ctx); - b &= 3; - switch(b) { - case 0: /* loopnz */ - case 1: /* loopz */ - gen_op_add_reg_im(tcg_ctx, s->aflag, R_ECX, -1); - gen_op_jz_ecx(tcg_ctx, s->aflag, l3); - gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); - break; - case 2: /* loop */ - gen_op_add_reg_im(tcg_ctx, s->aflag, R_ECX, -1); - gen_op_jnz_ecx(tcg_ctx, s->aflag, l1); - break; - default: - case 3: /* jcxz */ - gen_op_jz_ecx(tcg_ctx, s->aflag, l1); - break; - } - - gen_set_label(tcg_ctx, l3); - gen_jmp_im(s, next_eip); - tcg_gen_br(tcg_ctx, l2); - - gen_set_label(tcg_ctx, l1); - gen_jmp_im(s, tval); - gen_set_label(tcg_ctx, l2); - gen_eob(s); - } - break; - case 0x130: /* wrmsr */ - case 0x132: /* rdmsr */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - if (b & 2) { - gen_helper_rdmsr(tcg_ctx, cpu_env); - } else { - gen_helper_wrmsr(tcg_ctx, cpu_env); - } - } - break; - case 0x131: /* rdtsc */ - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_rdtsc(tcg_ctx, cpu_env); - break; - case 0x133: /* rdpmc */ - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_rdpmc(tcg_ctx, cpu_env); - break; - case 0x134: /* sysenter */ - /* For Intel SYSENTER is valid on 64-bit */ - if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) - goto illegal_op; - - if (!s->pe) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_sysenter(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); - gen_eob(s); - } - break; - case 0x135: /* sysexit */ - /* For Intel SYSEXIT is valid on 64-bit */ - if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) - goto illegal_op; - if (!s->pe) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_sysexit(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); - gen_eob(s); - } - break; -#ifdef TARGET_X86_64 - case 0x105: /* syscall */ - /* XXX: is it usable in real mode ? */ - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_syscall(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); - gen_eob(s); - break; - case 0x107: /* sysret */ - if (!s->pe) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_sysret(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); - /* condition codes are modified only in long mode */ - if (s->lma) { - set_cc_op(s, CC_OP_EFLAGS); - } - gen_eob(s); - } - break; -#endif - case 0x1a2: /* cpuid */ - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_cpuid(tcg_ctx, cpu_env); - break; - case 0xf4: /* hlt */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_hlt(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); - s->is_jmp = DISAS_TB_JUMP; - } - break; - case 0x100: - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - op = (modrm >> 3) & 7; - switch(op) { - case 0: /* sldt */ - if (!s->pe || s->vm86) - goto illegal_op; - gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector)); - ot = mod == 3 ? dflag : MO_16; - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); - break; - case 2: /* lldt */ - if (!s->pe || s->vm86) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - gen_jmp_im(s, pc_start - s->cs_base); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_lldt(tcg_ctx, cpu_env, cpu_tmp2_i32); - } - break; - case 1: /* str */ - if (!s->pe || s->vm86) - goto illegal_op; - gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector)); - ot = mod == 3 ? dflag : MO_16; - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); - break; - case 3: /* ltr */ - if (!s->pe || s->vm86) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - gen_jmp_im(s, pc_start - s->cs_base); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); - gen_helper_ltr(tcg_ctx, cpu_env, cpu_tmp2_i32); - } - break; - case 4: /* verr */ - case 5: /* verw */ - if (!s->pe || s->vm86) - goto illegal_op; - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - gen_update_cc_op(s); - if (op == 4) { - gen_helper_verr(tcg_ctx, cpu_env, *cpu_T[0]); - } else { - gen_helper_verw(tcg_ctx, cpu_env, *cpu_T[0]); - } - set_cc_op(s, CC_OP_EFLAGS); - break; - default: - goto illegal_op; - } - break; - case 0x101: - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - op = (modrm >> 3) & 7; - rm = modrm & 7; - switch(op) { - case 0: /* sgdt */ - if (mod == 3) - goto illegal_op; - gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); - gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit)); - gen_op_st_v(s, MO_16, *cpu_T[0], cpu_A0); - gen_add_A0_im(s, 2); - tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base)); - if (dflag == MO_16) { - tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0xffffff); - } - gen_op_st_v(s, CODE64(s) + MO_32, *cpu_T[0], cpu_A0); - break; - case 1: - if (mod == 3) { - switch (rm) { - case 0: /* monitor */ - if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || - s->cpl != 0) - goto illegal_op; - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EAX]); - gen_extu(tcg_ctx, s->aflag, cpu_A0); - gen_add_A0_ds_seg(s); - gen_helper_monitor(tcg_ctx, cpu_env, cpu_A0); - break; - case 1: /* mwait */ - if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || - s->cpl != 0) - goto illegal_op; - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_mwait(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); - gen_eob(s); - break; - case 2: /* clac */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || - s->cpl != 0) { - goto illegal_op; - } - gen_helper_clac(tcg_ctx, cpu_env); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - break; - case 3: /* stac */ - if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || - s->cpl != 0) { - goto illegal_op; - } - gen_helper_stac(tcg_ctx, cpu_env); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - break; - default: - goto illegal_op; - } - } else { /* sidt */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); - gen_lea_modrm(env, s, modrm); - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit)); - gen_op_st_v(s, MO_16, *cpu_T[0], cpu_A0); - gen_add_A0_im(s, 2); - tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base)); - if (dflag == MO_16) { - tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0xffffff); - } - gen_op_st_v(s, CODE64(s) + MO_32, *cpu_T[0], cpu_A0); - } - break; - case 2: /* lgdt */ - case 3: /* lidt */ - if (mod == 3) { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - switch(rm) { - case 0: /* VMRUN */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - break; - } else { - gen_helper_vmrun(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1), - tcg_const_i32(tcg_ctx, s->pc - pc_start)); - tcg_gen_exit_tb(tcg_ctx, 0); - s->is_jmp = DISAS_TB_JUMP; - } - break; - case 1: /* VMMCALL */ - if (!(s->flags & HF_SVME_MASK)) - goto illegal_op; - gen_helper_vmmcall(tcg_ctx, cpu_env); - break; - case 2: /* VMLOAD */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - break; - } else { - gen_helper_vmload(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); - } - break; - case 3: /* VMSAVE */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - break; - } else { - gen_helper_vmsave(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); - } - break; - case 4: /* STGI */ - if ((!(s->flags & HF_SVME_MASK) && - !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || - !s->pe) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - break; - } else { - gen_helper_stgi(tcg_ctx, cpu_env); - } - break; - case 5: /* CLGI */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - break; - } else { - gen_helper_clgi(tcg_ctx, cpu_env); - } - break; - case 6: /* SKINIT */ - if ((!(s->flags & HF_SVME_MASK) && - !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || - !s->pe) - goto illegal_op; - gen_helper_skinit(tcg_ctx, cpu_env); - break; - case 7: /* INVLPGA */ - if (!(s->flags & HF_SVME_MASK) || !s->pe) - goto illegal_op; - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - break; - } else { - gen_helper_invlpga(tcg_ctx, cpu_env, - tcg_const_i32(tcg_ctx, s->aflag - 1)); - } - break; - default: - goto illegal_op; - } - } else if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, - op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE); - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_16, *cpu_T[1], cpu_A0); - gen_add_A0_im(s, 2); - gen_op_ld_v(s, CODE64(s) + MO_32, *cpu_T[0], cpu_A0); - if (dflag == MO_16) { - tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0xffffff); - } - if (op == 2) { - tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base)); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit)); - } else { - tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base)); - tcg_gen_st32_tl(tcg_ctx, *cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit)); - } - } - break; - case 4: /* smsw */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); -#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4); -#else - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])); -#endif - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1); - break; - case 6: /* lmsw */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - gen_helper_lmsw(tcg_ctx, cpu_env, *cpu_T[0]); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - break; - case 7: - if (mod != 3) { /* invlpg */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_lea_modrm(env, s, modrm); - gen_helper_invlpg(tcg_ctx, cpu_env, cpu_A0); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - } else { - switch (rm) { - case 0: /* swapgs */ -#ifdef TARGET_X86_64 - if (CODE64(s)) { - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,segs[R_GS].base)); - tcg_gen_ld_tl(tcg_ctx, *cpu_T[1], cpu_env, - offsetof(CPUX86State,kernelgsbase)); - tcg_gen_st_tl(tcg_ctx, *cpu_T[1], cpu_env, - offsetof(CPUX86State,segs[R_GS].base)); - tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, - offsetof(CPUX86State,kernelgsbase)); - } - } else -#endif - { - goto illegal_op; - } - break; - case 1: /* rdtscp */ - if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) - goto illegal_op; - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_rdtscp(tcg_ctx, cpu_env); - break; - default: - goto illegal_op; - } - } - break; - default: - goto illegal_op; - } - break; - case 0x108: /* invd */ - case 0x109: /* wbinvd */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); - /* nothing to do */ - } - break; - case 0x63: /* arpl or movslS (x86_64) */ -#ifdef TARGET_X86_64 - if (CODE64(s)) { - int d_ot; - /* d_ot is the size of destination */ - d_ot = dflag; - - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - - if (mod == 3) { - gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], rm); - /* sign extend */ - if (d_ot == MO_64) { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); - } - gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); - } else { - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, MO_32 | MO_SIGN, *cpu_T[0], cpu_A0); - gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); - } - } else -#endif - { - int label1; - TCGv t0, t1, t2, a0; - - if (!s->pe || s->vm86) - goto illegal_op; - t0 = tcg_temp_local_new(tcg_ctx); - t1 = tcg_temp_local_new(tcg_ctx); - t2 = tcg_temp_local_new(tcg_ctx); - ot = MO_16; - modrm = cpu_ldub_code(env, s->pc++); - reg = (modrm >> 3) & 7; - mod = (modrm >> 6) & 3; - rm = modrm & 7; - if (mod != 3) { - gen_lea_modrm(env, s, modrm); - gen_op_ld_v(s, ot, t0, cpu_A0); - a0 = tcg_temp_local_new(tcg_ctx); - tcg_gen_mov_tl(tcg_ctx, a0, cpu_A0); - } else { - gen_op_mov_v_reg(tcg_ctx, ot, t0, rm); - TCGV_UNUSED(a0); - } - gen_op_mov_v_reg(tcg_ctx, ot, t1, reg); - tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, t0, 3); - tcg_gen_andi_tl(tcg_ctx, t1, t1, 3); - tcg_gen_movi_tl(tcg_ctx, t2, 0); - label1 = gen_new_label(tcg_ctx); - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, cpu_tmp0, t1, label1); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); - tcg_gen_or_tl(tcg_ctx, t0, t0, t1); - tcg_gen_movi_tl(tcg_ctx, t2, CC_Z); - gen_set_label(tcg_ctx, label1); - if (mod != 3) { - gen_op_st_v(s, ot, t0, a0); - tcg_temp_free(tcg_ctx, a0); - } else { - gen_op_mov_reg_v(tcg_ctx, ot, rm, t0); - } - gen_compute_eflags(s); - tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, ~CC_Z); - tcg_gen_or_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, t2); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, t2); - } - break; - case 0x102: /* lar */ - case 0x103: /* lsl */ - { - int label1; - TCGv t0; - if (!s->pe || s->vm86) - goto illegal_op; - ot = dflag != MO_16 ? MO_32 : MO_16; - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); - t0 = tcg_temp_local_new(tcg_ctx); - gen_update_cc_op(s); - if (b == 0x102) { - gen_helper_lar(tcg_ctx, t0, cpu_env, *cpu_T[0]); - } else { - gen_helper_lsl(tcg_ctx, t0, cpu_env, *cpu_T[0]); - } - tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, CC_Z); - label1 = gen_new_label(tcg_ctx); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_tmp0, 0, label1); - gen_op_mov_reg_v(tcg_ctx, ot, reg, t0); - gen_set_label(tcg_ctx, label1); - set_cc_op(s, CC_OP_EFLAGS); - tcg_temp_free(tcg_ctx, t0); - } - break; - case 0x118: - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - op = (modrm >> 3) & 7; - switch(op) { - case 0: /* prefetchnta */ - case 1: /* prefetchnt0 */ - case 2: /* prefetchnt0 */ - case 3: /* prefetchnt0 */ - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - /* nothing more to do */ - break; - default: /* nop (multi byte) */ - gen_nop_modrm(env, s, modrm); - break; - } - break; - //case 0x119 ... 0x11f: /* nop (multi byte) */ - case 0x119: case 0x11a: case 0x11b: case 0x11c: case 0x11d: case 0x11e: case 0x11f: - modrm = cpu_ldub_code(env, s->pc++); - gen_nop_modrm(env, s, modrm); - break; - case 0x120: /* mov reg, crN */ - case 0x122: /* mov crN, reg */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - modrm = cpu_ldub_code(env, s->pc++); - /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). - * AMD documentation (24594.pdf) and testing of - * intel 386 and 486 processors all show that the mod bits - * are assumed to be 1's, regardless of actual values. - */ - rm = (modrm & 7) | REX_B(s); - reg = ((modrm >> 3) & 7) | rex_r; - if (CODE64(s)) - ot = MO_64; - else - ot = MO_32; - if ((prefixes & PREFIX_LOCK) && (reg == 0) && - (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { - reg = 8; - } - switch(reg) { - case 0: - case 2: - case 3: - case 4: - case 8: - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - if (b & 2) { - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - gen_helper_write_crN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, reg), - *cpu_T[0]); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } else { - gen_helper_read_crN(tcg_ctx, *cpu_T[0], cpu_env, tcg_const_i32(tcg_ctx, reg)); - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } - break; - default: - goto illegal_op; - } - } - break; - case 0x121: /* mov reg, drN */ - case 0x123: /* mov drN, reg */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - modrm = cpu_ldub_code(env, s->pc++); - /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). - * AMD documentation (24594.pdf) and testing of - * intel 386 and 486 processors all show that the mod bits - * are assumed to be 1's, regardless of actual values. - */ - rm = (modrm & 7) | REX_B(s); - reg = ((modrm >> 3) & 7) | rex_r; - if (CODE64(s)) - ot = MO_64; - else - ot = MO_32; - /* XXX: do it dynamically with CR4.DE bit */ - if (reg == 4 || reg == 5 || reg >= 8) - goto illegal_op; - if (b & 2) { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); - gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); - gen_helper_movl_drN_T0(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, reg), *cpu_T[0]); - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); - tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg])); - gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); - } - } - break; - case 0x106: /* clts */ - if (s->cpl != 0) { - gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); - } else { - gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); - gen_helper_clts(tcg_ctx, cpu_env); - /* abort block because static cpu state changed */ - gen_jmp_im(s, s->pc - s->cs_base); - gen_eob(s); - } - break; - /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ - case 0x1c3: /* MOVNTI reg, mem */ - if (!(s->cpuid_features & CPUID_SSE2)) - goto illegal_op; - ot = mo_64_32(dflag); - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - if (mod == 3) - goto illegal_op; - reg = ((modrm >> 3) & 7) | rex_r; - /* generate a generic store */ - gen_ldst_modrm(env, s, modrm, ot, reg, 1); - break; - case 0x1ae: - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - op = (modrm >> 3) & 7; - switch(op) { - case 0: /* fxsave */ - if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || - (s->prefix & PREFIX_LOCK)) - goto illegal_op; - if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); - break; - } - gen_lea_modrm(env, s, modrm); - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fxsave(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag == MO_64)); - break; - case 1: /* fxrstor */ - if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || - (s->prefix & PREFIX_LOCK)) - goto illegal_op; - if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); - break; - } - gen_lea_modrm(env, s, modrm); - gen_update_cc_op(s); - gen_jmp_im(s, pc_start - s->cs_base); - gen_helper_fxrstor(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag == MO_64)); - break; - case 2: /* ldmxcsr */ - case 3: /* stmxcsr */ - if (s->flags & HF_TS_MASK) { - gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); - break; - } - if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) || - mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - if (op == 2) { - tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, - s->mem_index, MO_LEUL); - gen_helper_ldmxcsr(tcg_ctx, cpu_env, cpu_tmp2_i32); - } else { - tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); - gen_op_st_v(s, MO_32, *cpu_T[0], cpu_A0); - } - break; - case 5: /* lfence */ - case 6: /* mfence */ - if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2)) - goto illegal_op; - break; - case 7: /* sfence / clflush */ - if ((modrm & 0xc7) == 0xc0) { - /* sfence */ - /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */ - if (!(s->cpuid_features & CPUID_SSE)) - goto illegal_op; - } else { - /* clflush */ - if (!(s->cpuid_features & CPUID_CLFLUSH)) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - } - break; - default: - goto illegal_op; - } - break; - case 0x10d: /* 3DNow! prefetch(w) */ - modrm = cpu_ldub_code(env, s->pc++); - mod = (modrm >> 6) & 3; - if (mod == 3) - goto illegal_op; - gen_lea_modrm(env, s, modrm); - /* ignore for now */ - break; - case 0x1aa: /* rsm */ - gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); - if (!(s->flags & HF_SMM_MASK)) - goto illegal_op; - gen_update_cc_op(s); - gen_jmp_im(s, s->pc - s->cs_base); - gen_helper_rsm(tcg_ctx, cpu_env); - gen_eob(s); - break; - case 0x1b8: /* SSE4.2 popcnt */ - if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != - PREFIX_REPZ) - goto illegal_op; - if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) - goto illegal_op; - - modrm = cpu_ldub_code(env, s->pc++); - reg = ((modrm >> 3) & 7) | rex_r; - - if (s->prefix & PREFIX_DATA) { - ot = MO_16; - } else { - ot = mo_64_32(dflag); - } - - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - gen_helper_popcnt(tcg_ctx, *cpu_T[0], cpu_env, *cpu_T[0], tcg_const_i32(tcg_ctx, ot)); - gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); - - set_cc_op(s, CC_OP_EFLAGS); - break; - case 0x10e: case 0x10f: - /* 3DNow! instructions, ignore prefixes */ - s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); - case 0x110: case 0x111: case 0x112: case 0x113: case 0x114: case 0x115: case 0x116: case 0x117: //case 0x110 ... 0x117: - case 0x128: case 0x129: case 0x12a: case 0x12b: case 0x12c: case 0x12d: case 0x12e: case 0x12f: //case 0x128 ... 0x12f: - case 0x138: case 0x139: case 0x13a: - // case 0x150 ... 0x179: - case 0x150: case 0x151: case 0x152: case 0x153: case 0x154: case 0x155: case 0x156: case 0x157: - case 0x158: case 0x159: case 0x15a: case 0x15b: case 0x15c: case 0x15d: case 0x15e: case 0x15f: - case 0x160: case 0x161: case 0x162: case 0x163: case 0x164: case 0x165: case 0x166: case 0x167: - case 0x168: case 0x169: case 0x16a: case 0x16b: case 0x16c: case 0x16d: case 0x16e: case 0x16f: - case 0x170: case 0x171: case 0x172: case 0x173: case 0x174: case 0x175: case 0x176: case 0x177: - case 0x178: case 0x179: - // case 0x17c ... 0x17f: - case 0x17c: case 0x17d: case 0x17e: case 0x17f: - case 0x1c2: - case 0x1c4: case 0x1c5: case 0x1c6: - //case 0x1d0 ... 0x1fe: - case 0x1d0: case 0x1d1: case 0x1d2: case 0x1d3: case 0x1d4: case 0x1d5: case 0x1d6: case 0x1d7: - case 0x1d8: case 0x1d9: case 0x1da: case 0x1db: case 0x1dc: case 0x1dd: case 0x1de: case 0x1df: - case 0x1e0: case 0x1e1: case 0x1e2: case 0x1e3: case 0x1e4: case 0x1e5: case 0x1e6: case 0x1e7: - case 0x1e8: case 0x1e9: case 0x1ea: case 0x1eb: case 0x1ec: case 0x1ed: case 0x1ee: case 0x1ef: - case 0x1f0: case 0x1f1: case 0x1f2: case 0x1f3: case 0x1f4: case 0x1f5: case 0x1f6: case 0x1f7: - case 0x1f8: case 0x1f9: case 0x1fa: case 0x1fb: case 0x1fc: case 0x1fd: case 0x1fe: - gen_sse(env, s, b, pc_start, rex_r); - break; - default: - goto illegal_op; - } - /* lock generation */ - if (s->prefix & PREFIX_LOCK) - gen_helper_unlock(tcg_ctx, cpu_env); - - // Unicorn: patch the callback for the instruction size - if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, pc_start)) { - // int i; - // for(i = 0; i < 20; i++) - // printf("=== [%u] = %x\n", i, *(save_opparam_ptr + i)); - // printf("\n"); - if (changed_cc_op) { - if (cc_op_dirty) -#if TCG_TARGET_REG_BITS == 32 - *(save_opparam_ptr + 16) = s->pc - pc_start; - else - *(save_opparam_ptr + 14) = s->pc - pc_start; -#else - *(save_opparam_ptr + 12) = s->pc - pc_start; - else - *(save_opparam_ptr + 10) = s->pc - pc_start; -#endif - } else { - *(save_opparam_ptr + 1) = s->pc - pc_start; - } - } - - return s->pc; - illegal_op: - if (s->prefix & PREFIX_LOCK) - gen_helper_unlock(tcg_ctx, cpu_env); - /* XXX: ensure that no lock was generated */ - gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); - return s->pc; -} - -void optimize_flags_init(struct uc_struct *uc) -{ - static const char reg_names[CPU_NB_REGS][4] = { -#ifdef TARGET_X86_64 - "rax", - "rcx", - "rdx", - "rbx", - "rsp", - "rbp", - "rsi", - "rdi", - "r8", - "r9", - "r10", - "r11", - "r12", - "r13", - "r14", - "r15", -#else - "eax", - "ecx", - "edx", - "ebx", - "esp", - "ebp", - "esi", - "edi", -#endif - }; - int i; - TCGContext *tcg_ctx = uc->tcg_ctx; - - tcg_ctx->cpu_env = tcg_global_reg_new_ptr(uc->tcg_ctx, TCG_AREG0, "env"); - tcg_ctx->cpu_cc_op = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUX86State, cc_op), "cc_op"); - tcg_ctx->cpu_cc_dst = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_cc_dst) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUX86State, cc_dst), "cc_dst"); - - tcg_ctx->cpu_cc_src = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_cc_src) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUX86State, cc_src), "cc_src"); - - tcg_ctx->cpu_cc_src2 = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_cc_src2) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUX86State, cc_src2), "cc_src2"); - - for (i = 0; i < CPU_NB_REGS; ++i) { - tcg_ctx->cpu_regs[i] = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_regs[i]) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUX86State, regs[i]), - reg_names[i]); - } -} - -/* generate intermediate code in gen_opc_buf and gen_opparam_buf for - basic block 'tb'. If search_pc is TRUE, also generate PC - information for each intermediate instruction. */ -static inline void gen_intermediate_code_internal(uint8_t *gen_opc_cc_op, - X86CPU *cpu, - TranslationBlock *tb, - bool search_pc) -{ - CPUState *cs = CPU(cpu); - CPUX86State *env = &cpu->env; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - DisasContext dc1, *dc = &dc1; - target_ulong pc_ptr; - uint16_t *gen_opc_end; - CPUBreakpoint *bp; - int j; - int lj = -1; - uint64_t flags; - target_ulong pc_start; - target_ulong cs_base; - int num_insns = 0; - int max_insns; - bool block_full = false; - - /* generate intermediate code */ - pc_start = tb->pc; - cs_base = tb->cs_base; - flags = tb->flags; - - dc->uc = env->uc; - dc->pe = (flags >> HF_PE_SHIFT) & 1; - dc->code32 = (flags >> HF_CS32_SHIFT) & 1; - dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; - dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; - dc->f_st = 0; - dc->vm86 = (flags >> VM_SHIFT) & 1; - dc->cpl = (flags >> HF_CPL_SHIFT) & 3; - dc->iopl = (flags >> IOPL_SHIFT) & 3; - dc->tf = (flags >> TF_SHIFT) & 1; - dc->singlestep_enabled = cs->singlestep_enabled; - dc->last_cc_op = dc->cc_op = CC_OP_DYNAMIC; - dc->cc_op_dirty = false; - dc->cs_base = cs_base; - dc->tb = tb; - dc->popl_esp_hack = 0; - /* select memory access functions */ - dc->mem_index = 0; - if (flags & HF_SOFTMMU_MASK) { - dc->mem_index = cpu_mmu_index(env); - } - dc->cpuid_features = env->features[FEAT_1_EDX]; - dc->cpuid_ext_features = env->features[FEAT_1_ECX]; - dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; - dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; - dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; -#ifdef TARGET_X86_64 - dc->lma = (flags >> HF_LMA_SHIFT) & 1; - dc->code64 = (flags >> HF_CS64_SHIFT) & 1; -#endif - dc->flags = flags; - dc->jmp_opt = !(dc->tf || cs->singlestep_enabled || - (flags & HF_INHIBIT_IRQ_MASK) -#ifndef CONFIG_SOFTMMU - || (flags & HF_SOFTMMU_MASK) -#endif - ); -#if 0 - /* check addseg logic */ - if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) - printf("ERROR addseg\n"); -#endif - - if (!env->uc->init_tcg) - tcg_ctx->cpu_T[0] = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_T[0] = tcg_temp_new(tcg_ctx); - - if (!env->uc->init_tcg) - tcg_ctx->cpu_T[1] = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_T[1] = tcg_temp_new(tcg_ctx); - - if (!env->uc->init_tcg) - tcg_ctx->cpu_A0 = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_A0) = tcg_temp_new(tcg_ctx); - - if (!env->uc->init_tcg) - tcg_ctx->cpu_tmp0 = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_tmp0) = tcg_temp_new(tcg_ctx); - - if (!env->uc->init_tcg) - tcg_ctx->cpu_tmp4 = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_tmp4) = tcg_temp_new(tcg_ctx); - - tcg_ctx->cpu_tmp1_i64 = tcg_temp_new_i64(tcg_ctx); - tcg_ctx->cpu_tmp2_i32 = tcg_temp_new_i32(tcg_ctx); - tcg_ctx->cpu_tmp3_i32 = tcg_temp_new_i32(tcg_ctx); - tcg_ctx->cpu_ptr0 = tcg_temp_new_ptr(tcg_ctx); - tcg_ctx->cpu_ptr1 = tcg_temp_new_ptr(tcg_ctx); - - if (!env->uc->init_tcg) - tcg_ctx->cpu_cc_srcT = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_cc_srcT) = tcg_temp_local_new(tcg_ctx); - - // done with initializing TCG variables - env->uc->init_tcg = true; - - pc_ptr = pc_start; - - // early check to see if the address of this block is the until address - if (tb->pc == env->uc->addr_end) { - // imitate the HLT instruction - gen_tb_start(tcg_ctx); - gen_jmp_im(dc, tb->pc - tb->cs_base); - gen_helper_hlt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0)); - dc->is_jmp = DISAS_TB_JUMP; - goto done_generating; - } - - gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; - - dc->is_jmp = DISAS_NEXT; - max_insns = tb->cflags & CF_COUNT_MASK; - if (max_insns == 0) - max_insns = CF_COUNT_MASK; - - // Unicorn: trace this block on request - // Only hook this block if the previous block was not truncated due to space - if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { - env->uc->block_addr = pc_start; - env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); - } else { - env->uc->size_arg = -1; - } - - gen_tb_start(tcg_ctx); - for(;;) { - if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == pc_ptr && - !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) { - gen_debug(dc, pc_ptr - dc->cs_base); - goto done_generating; - } - } - } - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - if (lj < j) { - lj++; - while (lj < j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } - tcg_ctx->gen_opc_pc[lj] = pc_ptr; - gen_opc_cc_op[lj] = dc->cc_op; - tcg_ctx->gen_opc_instr_start[lj] = 1; - // tcg_ctx->gen_opc_icount[lj] = num_insns; - } - //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) - // gen_io_start(); - - // Unicorn: save current PC address to sync EIP - dc->prev_pc = pc_ptr; - pc_ptr = disas_insn(env, dc, pc_ptr); - num_insns++; - /* stop translation if indicated */ - if (dc->is_jmp) - break; - /* if single step mode, we generate only one instruction and - generate an exception */ - /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear - the flag and abort the translation to give the irqs a - change to be happen */ - if (dc->tf || dc->singlestep_enabled || - (flags & HF_INHIBIT_IRQ_MASK)) { - gen_jmp_im(dc, pc_ptr - dc->cs_base); - gen_eob(dc); - break; - } - /* if too long translation, stop generation too */ - if (tcg_ctx->gen_opc_ptr >= gen_opc_end || - (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) || - num_insns >= max_insns) { - gen_jmp_im(dc, pc_ptr - dc->cs_base); - gen_eob(dc); - block_full = true; - break; - } - /* - gen_jmp_im(dc, pc_ptr - dc->cs_base); - gen_eob(dc); - block_full = true; - break; - */ - } - //if (tb->cflags & CF_LAST_IO) - // gen_io_end(); -done_generating: - gen_tb_end(tcg_ctx, tb, num_insns); - *tcg_ctx->gen_opc_ptr = INDEX_op_end; - /* we don't forget to fill the last values */ - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - lj++; - while (lj <= j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } - - if (!search_pc) { - tb->size = pc_ptr - pc_start; - } - - env->uc->block_full = block_full; -} - -void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - gen_intermediate_code_internal(tcg_ctx->gen_opc_cc_op, - x86_env_get_cpu(env), tb, false); -} - -void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - gen_intermediate_code_internal(tcg_ctx->gen_opc_cc_op, - x86_env_get_cpu(env), tb, true); -} - -void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos) -{ - int cc_op; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - env->eip = tcg_ctx->gen_opc_pc[pc_pos] - tb->cs_base; - cc_op = tcg_ctx->gen_opc_cc_op[pc_pos]; - if (cc_op != CC_OP_DYNAMIC) - env->cc_op = cc_op; -} diff --git a/qemu/target-i386/unicorn.c b/qemu/target-i386/unicorn.c deleted file mode 100644 index 9ab5dd22..00000000 --- a/qemu/target-i386/unicorn.c +++ /dev/null @@ -1,1528 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#include "hw/boards.h" -#include "hw/i386/pc.h" -#include "sysemu/cpus.h" -#include "unicorn.h" -#include "cpu.h" -#include "tcg.h" -#include "unicorn_common.h" -#include /* needed for uc_x86_mmr */ -#include "uc_priv.h" - -#define FPST(n) (X86_CPU(uc, mycpu)->env.fpregs[(X86_CPU(uc, mycpu)->env.fpstt + (n)) & 7].d) - -#define X86_NON_CS_FLAGS (DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK) -static void load_seg_16_helper(CPUX86State *env, int seg, uint32_t selector) -{ - cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, X86_NON_CS_FLAGS); -} - - -extern void helper_wrmsr(CPUX86State *env); -extern void helper_rdmsr(CPUX86State *env); - -const int X86_REGS_STORAGE_SIZE = offsetof(CPUX86State, tlb_table); - -static void x86_set_pc(struct uc_struct *uc, uint64_t address) -{ - CPUState* cpu = uc->cpu; - int16_t cs = (uint16_t)X86_CPU(uc, cpu)->env.segs[R_CS].selector; - if(uc->mode == UC_MODE_16) - ((CPUX86State *)uc->current_cpu->env_ptr)->eip = address - cs*16; - else - ((CPUX86State *)uc->current_cpu->env_ptr)->eip = address; -} - -void x86_release(void *ctx); - -void x86_release(void *ctx) -{ - int i; - TCGContext *s = (TCGContext *) ctx; - - cpu_breakpoint_remove_all(s->uc->cpu, BP_CPU); - - release_common(ctx); - - // arch specific - g_free(s->cpu_A0); - g_free(s->cpu_T[0]); - g_free(s->cpu_T[1]); - g_free(s->cpu_tmp0); - g_free(s->cpu_tmp4); - g_free(s->cpu_cc_srcT); - g_free(s->cpu_cc_dst); - g_free(s->cpu_cc_src); - g_free(s->cpu_cc_src2); - - for (i = 0; i < CPU_NB_REGS; ++i) { - g_free(s->cpu_regs[i]); - } - - g_free(s->tb_ctx.tbs); -} - -void x86_reg_reset(struct uc_struct *uc) -{ - CPUArchState *env = uc->cpu->env_ptr; - - env->features[FEAT_1_EDX] = CPUID_CX8 | CPUID_CMOV | CPUID_SSE2 | CPUID_FXSR | CPUID_SSE | CPUID_CLFLUSH; - env->features[FEAT_1_ECX] = CPUID_EXT_SSSE3 | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_AES | CPUID_EXT_CX16; - env->features[FEAT_8000_0001_EDX] = CPUID_EXT2_3DNOW | CPUID_EXT2_RDTSCP; - env->features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_SKINIT | CPUID_EXT3_CR8LEG; - env->features[FEAT_7_0_EBX] = CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP; - - memset(env->regs, 0, sizeof(env->regs)); - memset(env->segs, 0, sizeof(env->segs)); - memset(env->cr, 0, sizeof(env->cr)); - - memset(&env->ldt, 0, sizeof(env->ldt)); - memset(&env->gdt, 0, sizeof(env->gdt)); - memset(&env->tr, 0, sizeof(env->tr)); - memset(&env->idt, 0, sizeof(env->idt)); - - env->eip = 0; - env->eflags = 0; - env->eflags0 = 0; - env->cc_op = CC_OP_EFLAGS; - - env->fpstt = 0; /* top of stack index */ - env->fpus = 0; - env->fpuc = 0; - memset(env->fptags, 0, sizeof(env->fptags)); /* 0 = valid, 1 = empty */ - - env->mxcsr = 0; - memset(env->xmm_regs, 0, sizeof(env->xmm_regs)); - memset(&env->xmm_t0, 0, sizeof(env->xmm_t0)); - memset(&env->mmx_t0, 0, sizeof(env->mmx_t0)); - - memset(env->ymmh_regs, 0, sizeof(env->ymmh_regs)); - - memset(env->opmask_regs, 0, sizeof(env->opmask_regs)); - memset(env->zmmh_regs, 0, sizeof(env->zmmh_regs)); - - /* sysenter registers */ - env->sysenter_cs = 0; - env->sysenter_esp = 0; - env->sysenter_eip = 0; - env->efer = 0; - env->star = 0; - - env->vm_hsave = 0; - - env->tsc = 0; - env->tsc_adjust = 0; - env->tsc_deadline = 0; - - env->mcg_status = 0; - env->msr_ia32_misc_enable = 0; - env->msr_ia32_feature_control = 0; - - env->msr_fixed_ctr_ctrl = 0; - env->msr_global_ctrl = 0; - env->msr_global_status = 0; - env->msr_global_ovf_ctrl = 0; - memset(env->msr_fixed_counters, 0, sizeof(env->msr_fixed_counters)); - memset(env->msr_gp_counters, 0, sizeof(env->msr_gp_counters)); - memset(env->msr_gp_evtsel, 0, sizeof(env->msr_gp_evtsel)); - -#ifdef TARGET_X86_64 - memset(env->hi16_zmm_regs, 0, sizeof(env->hi16_zmm_regs)); - env->lstar = 0; - env->cstar = 0; - env->fmask = 0; - env->kernelgsbase = 0; -#endif - - // TODO: reset other registers in CPUX86State qemu/target-i386/cpu.h - - // properly initialize internal setup for each mode - switch(uc->mode) { - default: - break; - case UC_MODE_16: - env->hflags = 0; - env->cr[0] = 0; - //undo the damage done by the memset of env->segs above - //for R_CS, not quite the same as x86_cpu_reset - cpu_x86_load_seg_cache(env, R_CS, 0, 0, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | - DESC_R_MASK | DESC_A_MASK); - //remainder yields same state as x86_cpu_reset - load_seg_16_helper(env, R_DS, 0); - load_seg_16_helper(env, R_ES, 0); - load_seg_16_helper(env, R_SS, 0); - load_seg_16_helper(env, R_FS, 0); - load_seg_16_helper(env, R_GS, 0); - - break; - case UC_MODE_32: - env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_OSFXSR_MASK; - cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode - break; - case UC_MODE_64: - env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_LMA_MASK | HF_OSFXSR_MASK; - env->hflags &= ~(HF_ADDSEG_MASK); - env->efer |= MSR_EFER_LMA | MSR_EFER_LME; // extended mode activated - cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode - /* If we are operating in 64bit mode then add the Long Mode flag - * to the CPUID feature flag - */ - env->features[FEAT_8000_0001_EDX] |= CPUID_EXT2_LM; - break; - } -} - -static int x86_msr_read(struct uc_struct *uc, uc_x86_msr *msr) -{ - CPUX86State *env = (CPUX86State *)uc->cpu->env_ptr; - uint64_t ecx = env->regs[R_ECX]; - uint64_t eax = env->regs[R_EAX]; - uint64_t edx = env->regs[R_EDX]; - - env->regs[R_ECX] = msr->rid; - helper_rdmsr(env); - - msr->value = ((uint32_t)env->regs[R_EAX]) | - ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); - - env->regs[R_EAX] = eax; - env->regs[R_ECX] = ecx; - env->regs[R_EDX] = edx; - - /* The implementation doesn't throw exception or return an error if there is one, so - * we will return 0. */ - return 0; -} - -static int x86_msr_write(struct uc_struct *uc, uc_x86_msr *msr) -{ - CPUX86State *env = (CPUX86State *)uc->cpu->env_ptr; - uint64_t ecx = env->regs[R_ECX]; - uint64_t eax = env->regs[R_EAX]; - uint64_t edx = env->regs[R_EDX]; - - env->regs[R_ECX] = msr->rid; - env->regs[R_EAX] = (unsigned int)msr->value; - env->regs[R_EDX] = (unsigned int)(msr->value >> 32); - helper_wrmsr(env); - - env->regs[R_ECX] = ecx; - env->regs[R_EAX] = eax; - env->regs[R_EDX] = edx; - - /* The implementation doesn't throw exception or return an error if there is one, so - * we will return 0. */ - return 0; -} - -int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - void *value = vals[i]; - switch(regid) { - default: - break; - case UC_X86_REG_FP0: - case UC_X86_REG_FP1: - case UC_X86_REG_FP2: - case UC_X86_REG_FP3: - case UC_X86_REG_FP4: - case UC_X86_REG_FP5: - case UC_X86_REG_FP6: - case UC_X86_REG_FP7: - { - floatx80 reg = X86_CPU(uc, mycpu)->env.fpregs[regid - UC_X86_REG_FP0].d; - cpu_get_fp80(value, (uint16_t*)((char*)value+sizeof(uint64_t)), reg); - } - continue; - case UC_X86_REG_FPSW: - { - uint16_t fpus = X86_CPU(uc, mycpu)->env.fpus; - fpus = fpus & ~0x3800; - fpus |= ( X86_CPU(uc, mycpu)->env.fpstt & 0x7 ) << 11; - *(uint16_t*) value = fpus; - } - continue; - case UC_X86_REG_FPCW: - *(uint16_t*) value = X86_CPU(uc, mycpu)->env.fpuc; - continue; - case UC_X86_REG_FPTAG: - { - #define EXPD(fp) (fp.l.upper & 0x7fff) - #define MANTD(fp) (fp.l.lower) - #define MAXEXPD 0x7fff - int fptag, exp, i; - uint64_t mant; - CPU_LDoubleU tmp; - fptag = 0; - for (i = 7; i >= 0; i--) { - fptag <<= 2; - if (X86_CPU(uc, mycpu)->env.fptags[i]) { - fptag |= 3; - } else { - tmp.d = X86_CPU(uc, mycpu)->env.fpregs[i].d; - exp = EXPD(tmp); - mant = MANTD(tmp); - if (exp == 0 && mant == 0) { - /* zero */ - fptag |= 1; - } else if (exp == 0 || exp == MAXEXPD - || (mant & (1LL << 63)) == 0) { - /* NaNs, infinity, denormal */ - fptag |= 2; - } - } - } - *(uint16_t*) value = fptag; - } - continue; - case UC_X86_REG_XMM0: - case UC_X86_REG_XMM1: - case UC_X86_REG_XMM2: - case UC_X86_REG_XMM3: - case UC_X86_REG_XMM4: - case UC_X86_REG_XMM5: - case UC_X86_REG_XMM6: - case UC_X86_REG_XMM7: - { - float64 *dst = (float64*)value; - XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; - dst[0] = reg->_d[0]; - dst[1] = reg->_d[1]; - continue; - } - case UC_X86_REG_ST0: - case UC_X86_REG_ST1: - case UC_X86_REG_ST2: - case UC_X86_REG_ST3: - case UC_X86_REG_ST4: - case UC_X86_REG_ST5: - case UC_X86_REG_ST6: - case UC_X86_REG_ST7: - { - // value must be big enough to keep 80 bits (10 bytes) - memcpy(value, &FPST(regid - UC_X86_REG_ST0), 10); - continue; - } - case UC_X86_REG_YMM0: - case UC_X86_REG_YMM1: - case UC_X86_REG_YMM2: - case UC_X86_REG_YMM3: - case UC_X86_REG_YMM4: - case UC_X86_REG_YMM5: - case UC_X86_REG_YMM6: - case UC_X86_REG_YMM7: - case UC_X86_REG_YMM8: - case UC_X86_REG_YMM9: - case UC_X86_REG_YMM10: - case UC_X86_REG_YMM11: - case UC_X86_REG_YMM12: - case UC_X86_REG_YMM13: - case UC_X86_REG_YMM14: - case UC_X86_REG_YMM15: - { - float64 *dst = (float64*)value; - XMMReg *lo_reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_YMM0]; - XMMReg *hi_reg = &X86_CPU(uc, mycpu)->env.ymmh_regs[regid - UC_X86_REG_YMM0]; - dst[0] = lo_reg->_d[0]; - dst[1] = lo_reg->_d[1]; - dst[2] = hi_reg->_d[0]; - dst[3] = hi_reg->_d[1]; - continue; - } - } - - switch(uc->mode) { - default: - break; - case UC_MODE_16: - switch(regid) { - default: break; - case UC_X86_REG_ES: - *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_ES].selector; - continue; - case UC_X86_REG_SS: - *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_SS].selector; - continue; - case UC_X86_REG_DS: - *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_DS].selector; - continue; - case UC_X86_REG_FS: - *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_FS].selector; - continue; - case UC_X86_REG_GS: - *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_GS].selector; - continue; - case UC_X86_REG_FS_BASE: - *(uint32_t *)value = (uint32_t)X86_CPU(uc, mycpu)->env.segs[R_FS].base; - continue; - } - // fall-thru - case UC_MODE_32: - switch(regid) { - default: - break; - case UC_X86_REG_CR0: - case UC_X86_REG_CR1: - case UC_X86_REG_CR2: - case UC_X86_REG_CR3: - case UC_X86_REG_CR4: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0]; - break; - case UC_X86_REG_DR0: - case UC_X86_REG_DR1: - case UC_X86_REG_DR2: - case UC_X86_REG_DR3: - case UC_X86_REG_DR4: - case UC_X86_REG_DR5: - case UC_X86_REG_DR6: - case UC_X86_REG_DR7: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0]; - break; - case UC_X86_REG_EFLAGS: - *(int32_t *)value = cpu_compute_eflags(&X86_CPU(uc, mycpu)->env); - break; - case UC_X86_REG_EAX: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX]; - break; - case UC_X86_REG_AX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); - break; - case UC_X86_REG_AH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]); - break; - case UC_X86_REG_AL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]); - break; - case UC_X86_REG_EBX: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX]; - break; - case UC_X86_REG_BX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); - break; - case UC_X86_REG_BH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]); - break; - case UC_X86_REG_BL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]); - break; - case UC_X86_REG_ECX: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX]; - break; - case UC_X86_REG_CX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); - break; - case UC_X86_REG_CH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]); - break; - case UC_X86_REG_CL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]); - break; - case UC_X86_REG_EDX: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX]; - break; - case UC_X86_REG_DX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); - break; - case UC_X86_REG_DH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]); - break; - case UC_X86_REG_DL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]); - break; - case UC_X86_REG_ESP: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP]; - break; - case UC_X86_REG_SP: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); - break; - case UC_X86_REG_EBP: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP]; - break; - case UC_X86_REG_BP: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); - break; - case UC_X86_REG_ESI: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI]; - break; - case UC_X86_REG_SI: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); - break; - case UC_X86_REG_EDI: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI]; - break; - case UC_X86_REG_DI: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); - break; - case UC_X86_REG_EIP: - *(int32_t *)value = X86_CPU(uc, mycpu)->env.eip; - break; - case UC_X86_REG_IP: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip); - break; - case UC_X86_REG_CS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_CS].selector; - break; - case UC_X86_REG_DS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_DS].selector; - break; - case UC_X86_REG_SS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_SS].selector; - break; - case UC_X86_REG_ES: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_ES].selector; - break; - case UC_X86_REG_FS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_FS].selector; - break; - case UC_X86_REG_GS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_GS].selector; - break; - case UC_X86_REG_IDTR: - ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.idt.limit; - ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.idt.base; - break; - case UC_X86_REG_GDTR: - ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.gdt.limit; - ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.gdt.base; - break; - case UC_X86_REG_LDTR: - ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.ldt.limit; - ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.ldt.base; - ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.ldt.selector; - ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.ldt.flags; - break; - case UC_X86_REG_TR: - ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.tr.limit; - ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.tr.base; - ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.tr.selector; - ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.tr.flags; - break; - case UC_X86_REG_MSR: - x86_msr_read(uc, (uc_x86_msr *)value); - break; - case UC_X86_REG_MXCSR: - *(uint32_t *)value = X86_CPU(uc, mycpu)->env.mxcsr; - break; - case UC_X86_REG_FS_BASE: - *(uint32_t *)value = (uint32_t)X86_CPU(uc, mycpu)->env.segs[R_FS].base; - break; - } - break; - -#ifdef TARGET_X86_64 - case UC_MODE_64: - switch(regid) { - default: - break; - case UC_X86_REG_CR0: - case UC_X86_REG_CR1: - case UC_X86_REG_CR2: - case UC_X86_REG_CR3: - case UC_X86_REG_CR4: - *(int64_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0]; - break; - case UC_X86_REG_DR0: - case UC_X86_REG_DR1: - case UC_X86_REG_DR2: - case UC_X86_REG_DR3: - case UC_X86_REG_DR4: - case UC_X86_REG_DR5: - case UC_X86_REG_DR6: - case UC_X86_REG_DR7: - *(int64_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0]; - break; - case UC_X86_REG_EFLAGS: - *(int64_t *)value = cpu_compute_eflags(&X86_CPU(uc, mycpu)->env); - break; - case UC_X86_REG_RAX: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX]; - break; - case UC_X86_REG_EAX: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); - break; - case UC_X86_REG_AX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); - break; - case UC_X86_REG_AH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]); - break; - case UC_X86_REG_AL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]); - break; - case UC_X86_REG_RBX: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX]; - break; - case UC_X86_REG_EBX: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); - break; - case UC_X86_REG_BX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); - break; - case UC_X86_REG_BH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]); - break; - case UC_X86_REG_BL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]); - break; - case UC_X86_REG_RCX: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX]; - break; - case UC_X86_REG_ECX: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); - break; - case UC_X86_REG_CX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); - break; - case UC_X86_REG_CH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]); - break; - case UC_X86_REG_CL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]); - break; - case UC_X86_REG_RDX: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX]; - break; - case UC_X86_REG_EDX: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); - break; - case UC_X86_REG_DX: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); - break; - case UC_X86_REG_DH: - *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]); - break; - case UC_X86_REG_DL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]); - break; - case UC_X86_REG_RSP: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP]; - break; - case UC_X86_REG_ESP: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); - break; - case UC_X86_REG_SP: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); - break; - case UC_X86_REG_SPL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP]); - break; - case UC_X86_REG_RBP: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP]; - break; - case UC_X86_REG_EBP: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); - break; - case UC_X86_REG_BP: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); - break; - case UC_X86_REG_BPL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP]); - break; - case UC_X86_REG_RSI: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI]; - break; - case UC_X86_REG_ESI: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); - break; - case UC_X86_REG_SI: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); - break; - case UC_X86_REG_SIL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI]); - break; - case UC_X86_REG_RDI: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI]; - break; - case UC_X86_REG_EDI: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); - break; - case UC_X86_REG_DI: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); - break; - case UC_X86_REG_DIL: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI]); - break; - case UC_X86_REG_RIP: - *(uint64_t *)value = X86_CPU(uc, mycpu)->env.eip; - break; - case UC_X86_REG_EIP: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.eip); - break; - case UC_X86_REG_IP: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip); - break; - case UC_X86_REG_CS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_CS].selector; - break; - case UC_X86_REG_DS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_DS].selector; - break; - case UC_X86_REG_SS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_SS].selector; - break; - case UC_X86_REG_ES: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_ES].selector; - break; - case UC_X86_REG_FS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_FS].selector; - break; - case UC_X86_REG_GS: - *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_GS].selector; - break; - case UC_X86_REG_R8: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[8]); - break; - case UC_X86_REG_R8D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[8]); - break; - case UC_X86_REG_R8W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[8]); - break; - case UC_X86_REG_R8B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8]); - break; - case UC_X86_REG_R9: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[9]); - break; - case UC_X86_REG_R9D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[9]); - break; - case UC_X86_REG_R9W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[9]); - break; - case UC_X86_REG_R9B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9]); - break; - case UC_X86_REG_R10: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[10]); - break; - case UC_X86_REG_R10D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[10]); - break; - case UC_X86_REG_R10W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[10]); - break; - case UC_X86_REG_R10B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10]); - break; - case UC_X86_REG_R11: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[11]); - break; - case UC_X86_REG_R11D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[11]); - break; - case UC_X86_REG_R11W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[11]); - break; - case UC_X86_REG_R11B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11]); - break; - case UC_X86_REG_R12: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[12]); - break; - case UC_X86_REG_R12D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[12]); - break; - case UC_X86_REG_R12W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[12]); - break; - case UC_X86_REG_R12B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12]); - break; - case UC_X86_REG_R13: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[13]); - break; - case UC_X86_REG_R13D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[13]); - break; - case UC_X86_REG_R13W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[13]); - break; - case UC_X86_REG_R13B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13]); - break; - case UC_X86_REG_R14: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[14]); - break; - case UC_X86_REG_R14D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[14]); - break; - case UC_X86_REG_R14W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[14]); - break; - case UC_X86_REG_R14B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14]); - break; - case UC_X86_REG_R15: - *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[15]); - break; - case UC_X86_REG_R15D: - *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[15]); - break; - case UC_X86_REG_R15W: - *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[15]); - break; - case UC_X86_REG_R15B: - *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15]); - break; - case UC_X86_REG_IDTR: - ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.idt.limit; - ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.idt.base; - break; - case UC_X86_REG_GDTR: - ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.gdt.limit; - ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.gdt.base; - break; - case UC_X86_REG_LDTR: - ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.ldt.limit; - ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.ldt.base; - ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.ldt.selector; - ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.ldt.flags; - break; - case UC_X86_REG_TR: - ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.tr.limit; - ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.tr.base; - ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.tr.selector; - ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.tr.flags; - break; - case UC_X86_REG_MSR: - x86_msr_read(uc, (uc_x86_msr *)value); - break; - case UC_X86_REG_MXCSR: - *(uint32_t *)value = X86_CPU(uc, mycpu)->env.mxcsr; - break; - case UC_X86_REG_XMM8: - case UC_X86_REG_XMM9: - case UC_X86_REG_XMM10: - case UC_X86_REG_XMM11: - case UC_X86_REG_XMM12: - case UC_X86_REG_XMM13: - case UC_X86_REG_XMM14: - case UC_X86_REG_XMM15: - { - float64 *dst = (float64*)value; - XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; - dst[0] = reg->_d[0]; - dst[1] = reg->_d[1]; - break; - } - case UC_X86_REG_FS_BASE: - *(uint64_t *)value = (uint64_t)X86_CPU(uc, mycpu)->env.segs[R_FS].base; - break; - case UC_X86_REG_GS_BASE: - *(uint64_t *)value = (uint64_t)X86_CPU(uc, mycpu)->env.segs[R_GS].base; - break; - } - break; -#endif - } - } - - return 0; -} - -int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - int ret; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - const void *value = vals[i]; - switch(regid) { - default: - break; - case UC_X86_REG_FP0: - case UC_X86_REG_FP1: - case UC_X86_REG_FP2: - case UC_X86_REG_FP3: - case UC_X86_REG_FP4: - case UC_X86_REG_FP5: - case UC_X86_REG_FP6: - case UC_X86_REG_FP7: - { - uint64_t mant = *(uint64_t*) value; - uint16_t upper = *(uint16_t*) ((char*)value + sizeof(uint64_t)); - X86_CPU(uc, mycpu)->env.fpregs[regid - UC_X86_REG_FP0].d = cpu_set_fp80(mant, upper); - } - continue; - case UC_X86_REG_FPSW: - { - uint16_t fpus = *(uint16_t*) value; - X86_CPU(uc, mycpu)->env.fpus = fpus & ~0x3800; - X86_CPU(uc, mycpu)->env.fpstt = (fpus >> 11) & 0x7; - } - continue; - case UC_X86_REG_FPCW: - cpu_set_fpuc(&X86_CPU(uc, mycpu)->env, *(uint16_t *)value); - continue; - case UC_X86_REG_FPTAG: - { - int i; - uint16_t fptag = *(uint16_t*) value; - for (i = 0; i < 8; i++) { - X86_CPU(uc, mycpu)->env.fptags[i] = ((fptag & 3) == 3); - fptag >>= 2; - } - - continue; - } - break; - case UC_X86_REG_XMM0: - case UC_X86_REG_XMM1: - case UC_X86_REG_XMM2: - case UC_X86_REG_XMM3: - case UC_X86_REG_XMM4: - case UC_X86_REG_XMM5: - case UC_X86_REG_XMM6: - case UC_X86_REG_XMM7: - { - float64 *src = (float64*)value; - XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; - reg->_d[0] = src[0]; - reg->_d[1] = src[1]; - continue; - } - case UC_X86_REG_ST0: - case UC_X86_REG_ST1: - case UC_X86_REG_ST2: - case UC_X86_REG_ST3: - case UC_X86_REG_ST4: - case UC_X86_REG_ST5: - case UC_X86_REG_ST6: - case UC_X86_REG_ST7: - { - // value must be big enough to keep 80 bits (10 bytes) - memcpy(&FPST(regid - UC_X86_REG_ST0), value, 10); - continue; - } - case UC_X86_REG_YMM0: - case UC_X86_REG_YMM1: - case UC_X86_REG_YMM2: - case UC_X86_REG_YMM3: - case UC_X86_REG_YMM4: - case UC_X86_REG_YMM5: - case UC_X86_REG_YMM6: - case UC_X86_REG_YMM7: - case UC_X86_REG_YMM8: - case UC_X86_REG_YMM9: - case UC_X86_REG_YMM10: - case UC_X86_REG_YMM11: - case UC_X86_REG_YMM12: - case UC_X86_REG_YMM13: - case UC_X86_REG_YMM14: - case UC_X86_REG_YMM15: - { - float64 *src = (float64*)value; - XMMReg *lo_reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_YMM0]; - XMMReg *hi_reg = &X86_CPU(uc, mycpu)->env.ymmh_regs[regid - UC_X86_REG_YMM0]; - lo_reg->_d[0] = src[0]; - lo_reg->_d[1] = src[1]; - hi_reg->_d[0] = src[2]; - hi_reg->_d[1] = src[3]; - continue; - } - } - - switch(uc->mode) { - default: - break; - - case UC_MODE_16: - switch(regid) { - default: break; - case UC_X86_REG_ES: - load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); - continue; - case UC_X86_REG_SS: - load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); - continue; - case UC_X86_REG_DS: - load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); - continue; - case UC_X86_REG_FS: - load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); - continue; - case UC_X86_REG_GS: - load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); - continue; - } - // fall-thru - case UC_MODE_32: - switch(regid) { - default: - break; - case UC_X86_REG_CR0: - case UC_X86_REG_CR1: - case UC_X86_REG_CR2: - case UC_X86_REG_CR3: - case UC_X86_REG_CR4: - X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0] = *(uint32_t *)value; - break; - case UC_X86_REG_DR0: - case UC_X86_REG_DR1: - case UC_X86_REG_DR2: - case UC_X86_REG_DR3: - case UC_X86_REG_DR4: - case UC_X86_REG_DR5: - case UC_X86_REG_DR6: - case UC_X86_REG_DR7: - X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0] = *(uint32_t *)value; - break; - case UC_X86_REG_EFLAGS: - cpu_load_eflags(&X86_CPU(uc, mycpu)->env, *(uint32_t *)value, -1); - X86_CPU(uc, mycpu)->env.eflags0 = *(uint32_t *)value; - break; - case UC_X86_REG_EAX: - X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(uint32_t *)value; - break; - case UC_X86_REG_AX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint16_t *)value); - break; - case UC_X86_REG_AH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); - break; - case UC_X86_REG_AL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); - break; - case UC_X86_REG_EBX: - X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(uint32_t *)value; - break; - case UC_X86_REG_BX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint16_t *)value); - break; - case UC_X86_REG_BH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); - break; - case UC_X86_REG_BL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); - break; - case UC_X86_REG_ECX: - X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(uint32_t *)value; - break; - case UC_X86_REG_CX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint16_t *)value); - break; - case UC_X86_REG_CH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); - break; - case UC_X86_REG_CL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); - break; - case UC_X86_REG_EDX: - X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(uint32_t *)value; - break; - case UC_X86_REG_DX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint16_t *)value); - break; - case UC_X86_REG_DH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); - break; - case UC_X86_REG_DL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); - break; - case UC_X86_REG_ESP: - X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(uint32_t *)value; - break; - case UC_X86_REG_SP: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint16_t *)value); - break; - case UC_X86_REG_EBP: - X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(uint32_t *)value; - break; - case UC_X86_REG_BP: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint16_t *)value); - break; - case UC_X86_REG_ESI: - X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(uint32_t *)value; - break; - case UC_X86_REG_SI: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint16_t *)value); - break; - case UC_X86_REG_EDI: - X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(uint32_t *)value; - break; - case UC_X86_REG_DI: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint16_t *)value); - break; - case UC_X86_REG_EIP: - X86_CPU(uc, mycpu)->env.eip = *(uint32_t *)value; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - case UC_X86_REG_IP: - X86_CPU(uc, mycpu)->env.eip = *(uint16_t *)value; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - case UC_X86_REG_CS: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_CS, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_CS, *(uint16_t *)value); - break; - case UC_X86_REG_DS: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); - break; - case UC_X86_REG_SS: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); - break; - case UC_X86_REG_ES: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); - break; - case UC_X86_REG_FS: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); - break; - case UC_X86_REG_GS: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); - break; - case UC_X86_REG_IDTR: - X86_CPU(uc, mycpu)->env.idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.idt.base = (uint32_t)((uc_x86_mmr *)value)->base; - break; - case UC_X86_REG_GDTR: - X86_CPU(uc, mycpu)->env.gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.gdt.base = (uint32_t)((uc_x86_mmr *)value)->base; - break; - case UC_X86_REG_LDTR: - X86_CPU(uc, mycpu)->env.ldt.limit = ((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.ldt.base = (uint32_t)((uc_x86_mmr *)value)->base; - X86_CPU(uc, mycpu)->env.ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; - X86_CPU(uc, mycpu)->env.ldt.flags = ((uc_x86_mmr *)value)->flags; - break; - case UC_X86_REG_TR: - X86_CPU(uc, mycpu)->env.tr.limit = ((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.tr.base = (uint32_t)((uc_x86_mmr *)value)->base; - X86_CPU(uc, mycpu)->env.tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; - X86_CPU(uc, mycpu)->env.tr.flags = ((uc_x86_mmr *)value)->flags; - break; - case UC_X86_REG_MSR: - x86_msr_write(uc, (uc_x86_msr *)value); - break; - case UC_X86_REG_MXCSR: - cpu_set_mxcsr(&X86_CPU(uc, mycpu)->env, *(uint32_t *)value); - break; - /* - // Don't think base registers are a "thing" on x86 - case UC_X86_REG_FS_BASE: - X86_CPU(uc, mycpu)->env.segs[R_FS].base = *(uint32_t *)value; - continue; - case UC_X86_REG_GS_BASE: - X86_CPU(uc, mycpu)->env.segs[R_GS].base = *(uint32_t *)value; - continue; - */ - } - break; - -#ifdef TARGET_X86_64 - case UC_MODE_64: - switch(regid) { - default: - break; - case UC_X86_REG_CR0: - case UC_X86_REG_CR1: - case UC_X86_REG_CR2: - case UC_X86_REG_CR3: - case UC_X86_REG_CR4: - X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0] = *(uint64_t *)value; - break; - case UC_X86_REG_DR0: - case UC_X86_REG_DR1: - case UC_X86_REG_DR2: - case UC_X86_REG_DR3: - case UC_X86_REG_DR4: - case UC_X86_REG_DR5: - case UC_X86_REG_DR6: - case UC_X86_REG_DR7: - X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0] = *(uint64_t *)value; - break; - case UC_X86_REG_EFLAGS: - cpu_load_eflags(&X86_CPU(uc, mycpu)->env, *(uint64_t *)value, -1); - X86_CPU(uc, mycpu)->env.eflags0 = *(uint64_t *)value; - break; - case UC_X86_REG_RAX: - X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(uint64_t *)value; - break; - case UC_X86_REG_EAX: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint32_t *)value); - break; - case UC_X86_REG_AX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint16_t *)value); - break; - case UC_X86_REG_AH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); - break; - case UC_X86_REG_AL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); - break; - case UC_X86_REG_RBX: - X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(uint64_t *)value; - break; - case UC_X86_REG_EBX: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint32_t *)value); - break; - case UC_X86_REG_BX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint16_t *)value); - break; - case UC_X86_REG_BH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); - break; - case UC_X86_REG_BL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); - break; - case UC_X86_REG_RCX: - X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(uint64_t *)value; - break; - case UC_X86_REG_ECX: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint32_t *)value); - break; - case UC_X86_REG_CX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint16_t *)value); - break; - case UC_X86_REG_CH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); - break; - case UC_X86_REG_CL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); - break; - case UC_X86_REG_RDX: - X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(uint64_t *)value; - break; - case UC_X86_REG_EDX: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint32_t *)value); - break; - case UC_X86_REG_DX: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint16_t *)value); - break; - case UC_X86_REG_DH: - WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); - break; - case UC_X86_REG_DL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); - break; - case UC_X86_REG_RSP: - X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(uint64_t *)value; - break; - case UC_X86_REG_ESP: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint32_t *)value); - break; - case UC_X86_REG_SP: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint16_t *)value); - break; - case UC_X86_REG_SPL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint8_t *)value); - break; - case UC_X86_REG_RBP: - X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(uint64_t *)value; - break; - case UC_X86_REG_EBP: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint32_t *)value); - break; - case UC_X86_REG_BP: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint16_t *)value); - break; - case UC_X86_REG_BPL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint8_t *)value); - break; - case UC_X86_REG_RSI: - X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(uint64_t *)value; - break; - case UC_X86_REG_ESI: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint32_t *)value); - break; - case UC_X86_REG_SI: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint16_t *)value); - break; - case UC_X86_REG_SIL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint8_t *)value); - break; - case UC_X86_REG_RDI: - X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(uint64_t *)value; - break; - case UC_X86_REG_EDI: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint32_t *)value); - break; - case UC_X86_REG_DI: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint16_t *)value); - break; - case UC_X86_REG_DIL: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint8_t *)value); - break; - case UC_X86_REG_RIP: - X86_CPU(uc, mycpu)->env.eip = *(uint64_t *)value; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - case UC_X86_REG_EIP: - X86_CPU(uc, mycpu)->env.eip = *(uint32_t *)value; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - case UC_X86_REG_IP: - WRITE_WORD(X86_CPU(uc, mycpu)->env.eip, *(uint16_t *)value); - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - case UC_X86_REG_CS: - X86_CPU(uc, mycpu)->env.segs[R_CS].selector = *(uint16_t *)value; - break; - case UC_X86_REG_DS: - X86_CPU(uc, mycpu)->env.segs[R_DS].selector = *(uint16_t *)value; - break; - case UC_X86_REG_SS: - X86_CPU(uc, mycpu)->env.segs[R_SS].selector = *(uint16_t *)value; - break; - case UC_X86_REG_ES: - X86_CPU(uc, mycpu)->env.segs[R_ES].selector = *(uint16_t *)value; - break; - case UC_X86_REG_FS: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); - break; - case UC_X86_REG_GS: - ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); - if (ret) { - return ret; - } - cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); - break; - case UC_X86_REG_R8: - X86_CPU(uc, mycpu)->env.regs[8] = *(uint64_t *)value; - break; - case UC_X86_REG_R8D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[8], *(uint32_t *)value); - break; - case UC_X86_REG_R8W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[8], *(uint16_t *)value); - break; - case UC_X86_REG_R8B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8], *(uint8_t *)value); - break; - case UC_X86_REG_R9: - X86_CPU(uc, mycpu)->env.regs[9] = *(uint64_t *)value; - break; - case UC_X86_REG_R9D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[9], *(uint32_t *)value); - break; - case UC_X86_REG_R9W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[9], *(uint16_t *)value); - break; - case UC_X86_REG_R9B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9], *(uint8_t *)value); - break; - case UC_X86_REG_R10: - X86_CPU(uc, mycpu)->env.regs[10] = *(uint64_t *)value; - break; - case UC_X86_REG_R10D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[10], *(uint32_t *)value); - break; - case UC_X86_REG_R10W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[10], *(uint16_t *)value); - break; - case UC_X86_REG_R10B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10], *(uint8_t *)value); - break; - case UC_X86_REG_R11: - X86_CPU(uc, mycpu)->env.regs[11] = *(uint64_t *)value; - break; - case UC_X86_REG_R11D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[11], *(uint32_t *)value); - break; - case UC_X86_REG_R11W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[11], *(uint16_t *)value); - break; - case UC_X86_REG_R11B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11], *(uint8_t *)value); - break; - case UC_X86_REG_R12: - X86_CPU(uc, mycpu)->env.regs[12] = *(uint64_t *)value; - break; - case UC_X86_REG_R12D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[12], *(uint32_t *)value); - break; - case UC_X86_REG_R12W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[12], *(uint16_t *)value); - break; - case UC_X86_REG_R12B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12], *(uint8_t *)value); - break; - case UC_X86_REG_R13: - X86_CPU(uc, mycpu)->env.regs[13] = *(uint64_t *)value; - break; - case UC_X86_REG_R13D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[13], *(uint32_t *)value); - break; - case UC_X86_REG_R13W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[13], *(uint16_t *)value); - break; - case UC_X86_REG_R13B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13], *(uint8_t *)value); - break; - case UC_X86_REG_R14: - X86_CPU(uc, mycpu)->env.regs[14] = *(uint64_t *)value; - break; - case UC_X86_REG_R14D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[14], *(uint32_t *)value); - break; - case UC_X86_REG_R14W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[14], *(uint16_t *)value); - break; - case UC_X86_REG_R14B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14], *(uint8_t *)value); - break; - case UC_X86_REG_R15: - X86_CPU(uc, mycpu)->env.regs[15] = *(uint64_t *)value; - break; - case UC_X86_REG_R15D: - WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[15], *(uint32_t *)value); - break; - case UC_X86_REG_R15W: - WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[15], *(uint16_t *)value); - break; - case UC_X86_REG_R15B: - WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15], *(uint8_t *)value); - break; - case UC_X86_REG_IDTR: - X86_CPU(uc, mycpu)->env.idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.idt.base = ((uc_x86_mmr *)value)->base; - break; - case UC_X86_REG_GDTR: - X86_CPU(uc, mycpu)->env.gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.gdt.base = ((uc_x86_mmr *)value)->base; - break; - case UC_X86_REG_LDTR: - X86_CPU(uc, mycpu)->env.ldt.limit = ((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.ldt.base = ((uc_x86_mmr *)value)->base; - X86_CPU(uc, mycpu)->env.ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; - X86_CPU(uc, mycpu)->env.ldt.flags = ((uc_x86_mmr *)value)->flags; - break; - case UC_X86_REG_TR: - X86_CPU(uc, mycpu)->env.tr.limit = ((uc_x86_mmr *)value)->limit; - X86_CPU(uc, mycpu)->env.tr.base = ((uc_x86_mmr *)value)->base; - X86_CPU(uc, mycpu)->env.tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; - X86_CPU(uc, mycpu)->env.tr.flags = ((uc_x86_mmr *)value)->flags; - break; - case UC_X86_REG_MSR: - x86_msr_write(uc, (uc_x86_msr *)value); - break; - case UC_X86_REG_MXCSR: - cpu_set_mxcsr(&X86_CPU(uc, mycpu)->env, *(uint32_t *)value); - break; - case UC_X86_REG_XMM8: - case UC_X86_REG_XMM9: - case UC_X86_REG_XMM10: - case UC_X86_REG_XMM11: - case UC_X86_REG_XMM12: - case UC_X86_REG_XMM13: - case UC_X86_REG_XMM14: - case UC_X86_REG_XMM15: - { - float64 *src = (float64*)value; - XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; - reg->_d[0] = src[0]; - reg->_d[1] = src[1]; - break; - } - case UC_X86_REG_FS_BASE: - X86_CPU(uc, mycpu)->env.segs[R_FS].base = *(uint64_t *)value; - continue; - case UC_X86_REG_GS_BASE: - X86_CPU(uc, mycpu)->env.segs[R_GS].base = *(uint64_t *)value; - continue; - } - break; -#endif - } - } - - return 0; -} - -DEFAULT_VISIBILITY -int x86_uc_machine_init(struct uc_struct *uc) -{ - return machine_initialize(uc); -} - -static bool x86_stop_interrupt(int intno) -{ - switch(intno) { - default: - return false; - case EXCP06_ILLOP: - return true; - } -} - -void pc_machine_init(struct uc_struct *uc); - -static bool x86_insn_hook_validate(uint32_t insn_enum) -{ - //for x86 we can only hook IN, OUT, and SYSCALL - if (insn_enum != UC_X86_INS_IN - && insn_enum != UC_X86_INS_OUT - && insn_enum != UC_X86_INS_SYSCALL - && insn_enum != UC_X86_INS_SYSENTER) { - return false; - } - return true; -} - -DEFAULT_VISIBILITY -void x86_uc_init(struct uc_struct* uc) -{ - apic_register_types(uc); - apic_common_register_types(uc); - register_accel_types(uc); - pc_machine_register_types(uc); - x86_cpu_register_types(uc); - pc_machine_init(uc); // pc_piix - uc->reg_read = x86_reg_read; - uc->reg_write = x86_reg_write; - uc->reg_reset = x86_reg_reset; - uc->release = x86_release; - uc->set_pc = x86_set_pc; - uc->stop_interrupt = x86_stop_interrupt; - uc->insn_hook_validate = x86_insn_hook_validate; - uc_common_init(uc); -} - -/* vim: set ts=4 sts=4 sw=4 et: */ diff --git a/qemu/target-m68k/Makefile.objs b/qemu/target-m68k/Makefile.objs deleted file mode 100644 index f87fde4b..00000000 --- a/qemu/target-m68k/Makefile.objs +++ /dev/null @@ -1,2 +0,0 @@ -obj-y += translate.o op_helper.o helper.o cpu.o -obj-y += unicorn.o diff --git a/qemu/target-m68k/cpu-qom.h b/qemu/target-m68k/cpu-qom.h deleted file mode 100644 index d8a49179..00000000 --- a/qemu/target-m68k/cpu-qom.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * QEMU Motorola 68k CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * - */ -#ifndef QEMU_M68K_CPU_QOM_H -#define QEMU_M68K_CPU_QOM_H - -#include "qom/cpu.h" - -#define TYPE_M68K_CPU "m68k-cpu" - -#define M68K_CPU_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, M68kCPUClass, (klass), TYPE_M68K_CPU) -#define M68K_CPU(uc, obj) ((M68kCPU *)obj) -#define M68K_CPU_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, M68kCPUClass, (obj), TYPE_M68K_CPU) - -/** - * M68kCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * - * A Motorola 68k CPU model. - */ -typedef struct M68kCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - void (*parent_reset)(CPUState *cpu); -} M68kCPUClass; - -/** - * M68kCPU: - * @env: #CPUM68KState - * - * A Motorola 68k CPU. - */ -typedef struct M68kCPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUM68KState env; -} M68kCPU; - -static inline M68kCPU *m68k_env_get_cpu(CPUM68KState *env) -{ - return container_of(env, M68kCPU, env); -} - -#define ENV_GET_CPU(e) CPU(m68k_env_get_cpu(e)) - -#define ENV_OFFSET offsetof(M68kCPU, env) - -void m68k_cpu_do_interrupt(CPUState *cpu); -bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req); -void m68k_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, - int flags); -hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -int m68k_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); -int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); - -void m68k_cpu_exec_enter(CPUState *cs); -void m68k_cpu_exec_exit(CPUState *cs); - -#endif diff --git a/qemu/target-m68k/cpu.c b/qemu/target-m68k/cpu.c deleted file mode 100644 index f000648f..00000000 --- a/qemu/target-m68k/cpu.c +++ /dev/null @@ -1,242 +0,0 @@ -/* - * QEMU Motorola 68k CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * - */ - -#include "hw/m68k/m68k.h" -#include "cpu.h" -#include "qemu-common.h" - - -static void m68k_cpu_set_pc(CPUState *cs, vaddr value) -{ - M68kCPU *cpu = M68K_CPU(cs->uc, cs); - - cpu->env.pc = value; -} - -static bool m68k_cpu_has_work(CPUState *cs) -{ - return cs->interrupt_request & CPU_INTERRUPT_HARD; -} - -static void m68k_set_feature(CPUM68KState *env, int feature) -{ - env->features |= (1u << feature); -} - -/* CPUClass::reset() */ -static void m68k_cpu_reset(CPUState *s) -{ - M68kCPU *cpu = M68K_CPU(s->uc, s); - M68kCPUClass *mcc = M68K_CPU_GET_CLASS(s->uc, cpu); - CPUM68KState *env = &cpu->env; - - mcc->parent_reset(s); - - memset(env, 0, offsetof(CPUM68KState, features)); -#if !defined(CONFIG_USER_ONLY) - env->sr = 0x2700; -#endif - m68k_switch_sp(env); - /* ??? FP regs should be initialized to NaN. */ - env->cc_op = CC_OP_FLAGS; - /* TODO: We should set PC from the interrupt vector. */ - env->pc = 0; - tlb_flush(s, 1); -} - -/* CPU models */ - -static ObjectClass *m68k_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model) -{ - ObjectClass *oc; - char *typename; - - if (cpu_model == NULL) { - return NULL; - } - - typename = g_strdup_printf("%s-" TYPE_M68K_CPU, cpu_model); - oc = object_class_by_name(uc, typename); - g_free(typename); - if (oc != NULL && (object_class_dynamic_cast(uc, oc, TYPE_M68K_CPU) == NULL || - object_class_is_abstract(oc))) { - return NULL; - } - return oc; -} - -static void m5206_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - M68kCPU *cpu = M68K_CPU(uc, obj); - CPUM68KState *env = &cpu->env; - - m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); -} - -static void m5208_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - M68kCPU *cpu = M68K_CPU(uc, obj); - CPUM68KState *env = &cpu->env; - - m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); - m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); - m68k_set_feature(env, M68K_FEATURE_BRAL); - m68k_set_feature(env, M68K_FEATURE_CF_EMAC); - m68k_set_feature(env, M68K_FEATURE_USP); -} - -static void cfv4e_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - M68kCPU *cpu = M68K_CPU(uc, obj); - CPUM68KState *env = &cpu->env; - - m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); - m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); - m68k_set_feature(env, M68K_FEATURE_BRAL); - m68k_set_feature(env, M68K_FEATURE_CF_FPU); - m68k_set_feature(env, M68K_FEATURE_CF_EMAC); - m68k_set_feature(env, M68K_FEATURE_USP); -} - -static void any_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - M68kCPU *cpu = M68K_CPU(uc, obj); - CPUM68KState *env = &cpu->env; - - m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); - m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); - m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); - m68k_set_feature(env, M68K_FEATURE_BRAL); - m68k_set_feature(env, M68K_FEATURE_CF_FPU); - /* MAC and EMAC are mututally exclusive, so pick EMAC. - It's mostly backwards compatible. */ - m68k_set_feature(env, M68K_FEATURE_CF_EMAC); - m68k_set_feature(env, M68K_FEATURE_CF_EMAC_B); - m68k_set_feature(env, M68K_FEATURE_USP); - m68k_set_feature(env, M68K_FEATURE_EXT_FULL); - m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); -} - -typedef struct M68kCPUInfo { - const char *name; - void (*instance_init)(struct uc_struct *uc, Object *obj, void *opaque); -} M68kCPUInfo; - -static const M68kCPUInfo m68k_cpus[] = { - { "m5206", m5206_cpu_initfn }, - { "m5208", m5208_cpu_initfn }, - { "cfv4e", cfv4e_cpu_initfn }, - { "any", any_cpu_initfn }, -}; - -static int m68k_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - M68kCPUClass *mcc = M68K_CPU_GET_CLASS(uc, dev); - - cpu_reset(cs); - qemu_init_vcpu(cs); - - mcc->parent_realize(cs->uc, dev, errp); - - return 0; -} - -static void m68k_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - CPUState *cs = CPU(obj); - M68kCPU *cpu = M68K_CPU(uc, obj); - CPUM68KState *env = &cpu->env; - - cs->env_ptr = env; - cpu_exec_init(env, opaque); - - if (tcg_enabled(uc)) { - m68k_tcg_init(uc); - } -} - -static void m68k_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data) -{ - M68kCPUClass *mcc = M68K_CPU_CLASS(uc, c); - CPUClass *cc = CPU_CLASS(uc, c); - DeviceClass *dc = DEVICE_CLASS(uc, c); - - mcc->parent_realize = dc->realize; - dc->realize = m68k_cpu_realizefn; - - mcc->parent_reset = cc->reset; - cc->reset = m68k_cpu_reset; - - cc->class_by_name = m68k_cpu_class_by_name; - cc->has_work = m68k_cpu_has_work; - cc->do_interrupt = m68k_cpu_do_interrupt; - cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt; - cc->set_pc = m68k_cpu_set_pc; -#ifdef CONFIG_USER_ONLY - cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault; -#else - cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug; -#endif - cc->cpu_exec_enter = m68k_cpu_exec_enter; - cc->cpu_exec_exit = m68k_cpu_exec_exit; -} - -static void register_cpu_type(void *opaque, const M68kCPUInfo *info) -{ - TypeInfo type_info = {0}; - type_info.parent = TYPE_M68K_CPU, - type_info.instance_init = info->instance_init, - - type_info.name = g_strdup_printf("%s-" TYPE_M68K_CPU, info->name); - type_register(opaque, &type_info); - g_free((void *)type_info.name); -} - -void m68k_cpu_register_types(void *opaque) -{ - const TypeInfo m68k_cpu_type_info = { - TYPE_M68K_CPU, - TYPE_CPU, - - sizeof(M68kCPUClass), - sizeof(M68kCPU), - opaque, - - m68k_cpu_initfn, - NULL, - NULL, - - NULL, - - m68k_cpu_class_init, - NULL, - NULL, - - true, - }; - - int i; - - type_register_static(opaque, &m68k_cpu_type_info); - for (i = 0; i < ARRAY_SIZE(m68k_cpus); i++) { - register_cpu_type(opaque, &m68k_cpus[i]); - } -} diff --git a/qemu/target-m68k/cpu.h b/qemu/target-m68k/cpu.h deleted file mode 100644 index 40f66b4c..00000000 --- a/qemu/target-m68k/cpu.h +++ /dev/null @@ -1,259 +0,0 @@ -/* - * m68k virtual CPU header - * - * Copyright (c) 2005-2007 CodeSourcery - * Written by Paul Brook - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef CPU_M68K_H -#define CPU_M68K_H - -#define TARGET_LONG_BITS 32 - -#define CPUArchState struct CPUM68KState - -#include "config.h" -#include "qemu-common.h" -#include "exec/cpu-defs.h" - -#include "fpu/softfloat.h" - -#define MAX_QREGS 32 - -#define TARGET_HAS_ICE 1 - -#define ELF_MACHINE EM_68K - -#define EXCP_ACCESS 2 /* Access (MMU) error. */ -#define EXCP_ADDRESS 3 /* Address error. */ -#define EXCP_ILLEGAL 4 /* Illegal instruction. */ -#define EXCP_DIV0 5 /* Divide by zero */ -#define EXCP_PRIVILEGE 8 /* Privilege violation. */ -#define EXCP_TRACE 9 -#define EXCP_LINEA 10 /* Unimplemented line-A (MAC) opcode. */ -#define EXCP_LINEF 11 /* Unimplemented line-F (FPU) opcode. */ -#define EXCP_DEBUGNBP 12 /* Non-breakpoint debug interrupt. */ -#define EXCP_DEBEGBP 13 /* Breakpoint debug interrupt. */ -#define EXCP_FORMAT 14 /* RTE format error. */ -#define EXCP_UNINITIALIZED 15 -#define EXCP_TRAP0 32 /* User trap #0. */ -#define EXCP_TRAP15 47 /* User trap #15. */ -#define EXCP_UNSUPPORTED 61 -#define EXCP_ICE 13 - -#define EXCP_RTE 0x100 -#define EXCP_HALT_INSN 0x101 - -#define NB_MMU_MODES 2 - -typedef struct CPUM68KState { - uint32_t dregs[8]; - uint32_t aregs[8]; - uint32_t pc; - uint32_t sr; - - /* SSP and USP. The current_sp is stored in aregs[7], the other here. */ - int current_sp; - uint32_t sp[2]; - - /* Condition flags. */ - uint32_t cc_op; - uint32_t cc_dest; - uint32_t cc_src; - uint32_t cc_x; - - float64 fregs[8]; - float64 fp_result; - uint32_t fpcr; - uint32_t fpsr; - float_status fp_status; - - uint64_t mactmp; - /* EMAC Hardware deals with 48-bit values composed of one 32-bit and - two 8-bit parts. We store a single 64-bit value and - rearrange/extend this when changing modes. */ - uint64_t macc[4]; - uint32_t macsr; - uint32_t mac_mask; - - /* Temporary storage for DIV helpers. */ - uint32_t div1; - uint32_t div2; - - /* MMU status. */ - struct { - uint32_t ar; - } mmu; - - /* Control registers. */ - uint32_t vbr; - uint32_t mbar; - uint32_t rambar0; - uint32_t cacr; - - int pending_vector; - int pending_level; - - uint32_t qregs[MAX_QREGS]; - - CPU_COMMON - - /* Fields from here on are preserved across CPU reset. */ - uint32_t features; - - // Unicorn engine - struct uc_struct *uc; -} CPUM68KState; - -#include "cpu-qom.h" - -void m68k_tcg_init(struct uc_struct *uc); -M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model); -int cpu_m68k_exec(struct uc_struct *uc, CPUM68KState *s); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_m68k_signal_handler(int host_signum, void *pinfo, - void *puc); -void cpu_m68k_flush_flags(CPUM68KState *, int); - -enum { - CC_OP_DYNAMIC, /* Use env->cc_op */ - CC_OP_FLAGS, /* CC_DEST = CVZN, CC_SRC = unused */ - CC_OP_LOGIC, /* CC_DEST = result, CC_SRC = unused */ - CC_OP_ADD, /* CC_DEST = result, CC_SRC = source */ - CC_OP_SUB, /* CC_DEST = result, CC_SRC = source */ - CC_OP_CMPB, /* CC_DEST = result, CC_SRC = source */ - CC_OP_CMPW, /* CC_DEST = result, CC_SRC = source */ - CC_OP_ADDX, /* CC_DEST = result, CC_SRC = source */ - CC_OP_SUBX, /* CC_DEST = result, CC_SRC = source */ - CC_OP_SHIFT, /* CC_DEST = result, CC_SRC = carry */ -}; - -#define CCF_C 0x01 -#define CCF_V 0x02 -#define CCF_Z 0x04 -#define CCF_N 0x08 -#define CCF_X 0x10 - -#define SR_I_SHIFT 8 -#define SR_I 0x0700 -#define SR_M 0x1000 -#define SR_S 0x2000 -#define SR_T 0x8000 - -#define M68K_SSP 0 -#define M68K_USP 1 - -/* CACR fields are implementation defined, but some bits are common. */ -#define M68K_CACR_EUSP 0x10 - -#define MACSR_PAV0 0x100 -#define MACSR_OMC 0x080 -#define MACSR_SU 0x040 -#define MACSR_FI 0x020 -#define MACSR_RT 0x010 -#define MACSR_N 0x008 -#define MACSR_Z 0x004 -#define MACSR_V 0x002 -#define MACSR_EV 0x001 - -void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector); -void m68k_set_macsr(CPUM68KState *env, uint32_t val); -void m68k_switch_sp(CPUM68KState *env); - -#define M68K_FPCR_PREC (1 << 6) - -void do_m68k_semihosting(CPUM68KState *env, int nr); - -/* There are 4 ColdFire core ISA revisions: A, A+, B and C. - Each feature covers the subset of instructions common to the - ISA revisions mentioned. */ - -enum m68k_features { - M68K_FEATURE_CF_ISA_A, - M68K_FEATURE_CF_ISA_B, /* (ISA B or C). */ - M68K_FEATURE_CF_ISA_APLUSC, /* BIT/BITREV, FF1, STRLDSR (ISA A+ or C). */ - M68K_FEATURE_BRAL, /* Long unconditional branch. (ISA A+ or B). */ - M68K_FEATURE_CF_FPU, - M68K_FEATURE_CF_MAC, - M68K_FEATURE_CF_EMAC, - M68K_FEATURE_CF_EMAC_B, /* Revision B EMAC (dual accumulate). */ - M68K_FEATURE_USP, /* User Stack Pointer. (ISA A+, B or C). */ - M68K_FEATURE_EXT_FULL, /* 68020+ full extension word. */ - M68K_FEATURE_WORD_INDEX /* word sized address index registers. */ -}; - -static inline int m68k_feature(CPUM68KState *env, int feature) -{ - return (env->features & (1u << feature)) != 0; -} - -void m68k_cpu_list(FILE *f, fprintf_function cpu_fprintf); - -void register_m68k_insns (CPUM68KState *env); - -#ifdef CONFIG_USER_ONLY -/* Linux uses 8k pages. */ -#define TARGET_PAGE_BITS 13 -#else -/* Smallest TLB entry size is 1k. */ -#define TARGET_PAGE_BITS 10 -#endif - -#define TARGET_PHYS_ADDR_SPACE_BITS 32 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 - -static inline CPUM68KState *cpu_init(struct uc_struct *uc, const char *cpu_model) -{ - M68kCPU *cpu = cpu_m68k_init(uc, cpu_model); - if (cpu == NULL) { - return NULL; - } - return &cpu->env; -} - -#define cpu_exec cpu_m68k_exec -#define cpu_gen_code cpu_m68k_gen_code -#define cpu_signal_handler cpu_m68k_signal_handler -#define cpu_list m68k_cpu_list - -/* MMU modes definitions */ -#define MMU_MODE0_SUFFIX _kernel -#define MMU_MODE1_SUFFIX _user -#define MMU_USER_IDX 1 -static inline int cpu_mmu_index (CPUM68KState *env) -{ - return (env->sr & SR_S) == 0 ? 1 : 0; -} - -int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, - int mmu_idx); - -#include "exec/cpu-all.h" - -static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc, - target_ulong *cs_base, int *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */ - | (env->sr & SR_S) /* Bit 13 */ - | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */ -} - -#include "exec/exec-all.h" - -#endif diff --git a/qemu/target-m68k/helper.c b/qemu/target-m68k/helper.c deleted file mode 100644 index 3ecc496a..00000000 --- a/qemu/target-m68k/helper.c +++ /dev/null @@ -1,799 +0,0 @@ -/* - * m68k op helpers - * - * Copyright (c) 2006-2007 CodeSourcery - * Written by Paul Brook - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" - -#include "exec/helper-proto.h" - -#define SIGNBIT (1u << 31) - -M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model) -{ - M68kCPU *cpu; - CPUM68KState *env; - ObjectClass *oc; - - oc = cpu_class_by_name(uc, TYPE_M68K_CPU, cpu_model); - if (oc == NULL) { - return NULL; - } - cpu = M68K_CPU(uc, object_new(uc, object_class_get_name(oc))); - env = &cpu->env; - - register_m68k_insns(env); - - object_property_set_bool(uc, OBJECT(cpu), true, "realized", NULL); - - return cpu; -} - -void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op) -{ - M68kCPU *cpu = m68k_env_get_cpu(env); - int flags; - uint32_t src; - uint32_t dest; - uint32_t tmp; - -#define HIGHBIT 0x80000000u - -#define SET_NZ(x) do { \ - if ((x) == 0) \ - flags |= CCF_Z; \ - else if ((int32_t)(x) < 0) \ - flags |= CCF_N; \ - } while (0) - -#define SET_FLAGS_SUB(type, utype) do { \ - SET_NZ((type)dest); \ - tmp = dest + src; \ - if ((utype) tmp < (utype) src) \ - flags |= CCF_C; \ - if ((1u << (sizeof(type) * 8 - 1)) & (tmp ^ dest) & (tmp ^ src)) \ - flags |= CCF_V; \ - } while (0) - - flags = 0; - src = env->cc_src; - dest = env->cc_dest; - switch (cc_op) { - case CC_OP_FLAGS: - flags = dest; - break; - case CC_OP_LOGIC: - SET_NZ(dest); - break; - case CC_OP_ADD: - SET_NZ(dest); - if (dest < src) - flags |= CCF_C; - tmp = dest - src; - if (HIGHBIT & (src ^ dest) & ~(tmp ^ src)) - flags |= CCF_V; - break; - case CC_OP_SUB: - SET_FLAGS_SUB(int32_t, uint32_t); - break; - case CC_OP_CMPB: - SET_FLAGS_SUB(int8_t, uint8_t); - break; - case CC_OP_CMPW: - SET_FLAGS_SUB(int16_t, uint16_t); - break; - case CC_OP_ADDX: - SET_NZ(dest); - if (dest <= src) - flags |= CCF_C; - tmp = dest - src - 1; - if (HIGHBIT & (src ^ dest) & ~(tmp ^ src)) - flags |= CCF_V; - break; - case CC_OP_SUBX: - SET_NZ(dest); - tmp = dest + src + 1; - if (tmp <= src) - flags |= CCF_C; - if (HIGHBIT & (tmp ^ dest) & (tmp ^ src)) - flags |= CCF_V; - break; - case CC_OP_SHIFT: - SET_NZ(dest); - if (src) - flags |= CCF_C; - break; - default: - cpu_abort(CPU(cpu), "Bad CC_OP %d", cc_op); - } - env->cc_op = CC_OP_FLAGS; - env->cc_dest = flags; -} - -void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val) -{ - switch (reg) { - case 0x02: /* CACR */ - env->cacr = val; - m68k_switch_sp(env); - break; - case 0x04: case 0x05: case 0x06: case 0x07: /* ACR[0-3] */ - /* TODO: Implement Access Control Registers. */ - break; - case 0x801: /* VBR */ - env->vbr = val; - break; - /* TODO: Implement control registers. */ - default: - qemu_log("Unimplemented control register write 0x%x = 0x%x\n", - reg, val); - helper_raise_exception(env, EXCP_UNSUPPORTED); - } -} - -void HELPER(set_macsr)(CPUM68KState *env, uint32_t val) -{ - uint32_t acc; - int8_t exthigh; - uint8_t extlow; - uint64_t regval; - int i; - if ((env->macsr ^ val) & (MACSR_FI | MACSR_SU)) { - for (i = 0; i < 4; i++) { - regval = env->macc[i]; - exthigh = regval >> 40; - if (env->macsr & MACSR_FI) { - acc = regval >> 8; - extlow = regval; - } else { - acc = regval; - extlow = regval >> 32; - } - if (env->macsr & MACSR_FI) { - regval = (((uint64_t)acc) << 8) | extlow; - regval |= ((uint64_t)((int64_t)exthigh)) << 40; - } else if (env->macsr & MACSR_SU) { - regval = acc | (((int64_t)extlow) << 32); - regval |= ((uint64_t)((int64_t)exthigh)) << 40; - } else { - regval = acc | (((uint64_t)extlow) << 32); - regval |= ((uint64_t)(uint8_t)exthigh) << 40; - } - env->macc[i] = regval; - } - } - env->macsr = val; -} - -void m68k_switch_sp(CPUM68KState *env) -{ - int new_sp; - - env->sp[env->current_sp] = env->aregs[7]; - new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP) - ? M68K_SSP : M68K_USP; - env->aregs[7] = env->sp[new_sp]; - env->current_sp = new_sp; -} - -#if defined(CONFIG_USER_ONLY) - -int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, - int mmu_idx) -{ - M68kCPU *cpu = M68K_CPU(cs); - - cs->exception_index = EXCP_ACCESS; - cpu->env.mmu.ar = address; - return 1; -} - -#else - -/* MMU */ - -/* TODO: This will need fixing once the MMU is implemented. */ -hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - return addr; -} - -int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, - int mmu_idx) -{ - int prot; - - address &= TARGET_PAGE_MASK; - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); - return 0; -} - -/* Notify CPU of a pending interrupt. Prioritization and vectoring should - be handled by the interrupt controller. Real hardware only requests - the vector when the interrupt is acknowledged by the CPU. For - simplicitly we calculate it when the interrupt is signalled. */ -void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector) -{ - CPUState *cs = CPU(cpu); - CPUM68KState *env = &cpu->env; - - env->pending_level = level; - env->pending_vector = vector; - if (level) { - cpu_interrupt(cs, CPU_INTERRUPT_HARD); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); - } -} - -#endif - -uint32_t HELPER(bitrev)(uint32_t x) -{ - x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau); - x = ((x >> 2) & 0x33333333u) | ((x << 2) & 0xccccccccu); - x = ((x >> 4) & 0x0f0f0f0fu) | ((x << 4) & 0xf0f0f0f0u); - return bswap32(x); -} - -uint32_t HELPER(ff1)(uint32_t x) -{ - int n; - for (n = 32; x; n--) - x >>= 1; - return n; -} - -uint32_t HELPER(sats)(uint32_t val, uint32_t ccr) -{ - /* The result has the opposite sign to the original value. */ - if (ccr & CCF_V) - val = (((int32_t)val) >> 31) ^ SIGNBIT; - return val; -} - -uint32_t HELPER(subx_cc)(CPUM68KState *env, uint32_t op1, uint32_t op2) -{ - uint32_t res; - uint32_t old_flags; - - old_flags = env->cc_dest; - if (env->cc_x) { - env->cc_x = (op1 <= op2); - env->cc_op = CC_OP_SUBX; - res = op1 - (op2 + 1); - } else { - env->cc_x = (op1 < op2); - env->cc_op = CC_OP_SUB; - res = op1 - op2; - } - env->cc_dest = res; - env->cc_src = op2; - cpu_m68k_flush_flags(env, env->cc_op); - /* !Z is sticky. */ - env->cc_dest &= (old_flags | ~CCF_Z); - return res; -} - -uint32_t HELPER(addx_cc)(CPUM68KState *env, uint32_t op1, uint32_t op2) -{ - uint32_t res; - uint32_t old_flags; - - old_flags = env->cc_dest; - if (env->cc_x) { - res = op1 + op2 + 1; - env->cc_x = (res <= op2); - env->cc_op = CC_OP_ADDX; - } else { - res = op1 + op2; - env->cc_x = (res < op2); - env->cc_op = CC_OP_ADD; - } - env->cc_dest = res; - env->cc_src = op2; - cpu_m68k_flush_flags(env, env->cc_op); - /* !Z is sticky. */ - env->cc_dest &= (old_flags | ~CCF_Z); - return res; -} - -uint32_t HELPER(xflag_lt)(uint32_t a, uint32_t b) -{ - return a < b; -} - -void HELPER(set_sr)(CPUM68KState *env, uint32_t val) -{ - env->sr = val & 0xffff; - m68k_switch_sp(env); -} - -uint32_t HELPER(shl_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) -{ - uint32_t result; - uint32_t cf; - - shift &= 63; - if (shift == 0) { - result = val; - cf = env->cc_src & CCF_C; - } else if (shift < 32) { - result = val << shift; - cf = (val >> (32 - shift)) & 1; - } else if (shift == 32) { - result = 0; - cf = val & 1; - } else /* shift > 32 */ { - result = 0; - cf = 0; - } - env->cc_src = cf; - env->cc_x = (cf != 0); - env->cc_dest = result; - return result; -} - -uint32_t HELPER(shr_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) -{ - uint32_t result; - uint32_t cf; - - shift &= 63; - if (shift == 0) { - result = val; - cf = env->cc_src & CCF_C; - } else if (shift < 32) { - result = val >> shift; - cf = (val >> (shift - 1)) & 1; - } else if (shift == 32) { - result = 0; - cf = val >> 31; - } else /* shift > 32 */ { - result = 0; - cf = 0; - } - env->cc_src = cf; - env->cc_x = (cf != 0); - env->cc_dest = result; - return result; -} - -uint32_t HELPER(sar_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) -{ - uint32_t result; - uint32_t cf; - - shift &= 63; - if (shift == 0) { - result = val; - cf = (env->cc_src & CCF_C) != 0; - } else if (shift < 32) { - result = (int32_t)val >> shift; - cf = (val >> (shift - 1)) & 1; - } else /* shift >= 32 */ { - result = (int32_t)val >> 31; - cf = val >> 31; - } - env->cc_src = cf; - env->cc_x = cf; - env->cc_dest = result; - return result; -} - -/* FPU helpers. */ -uint32_t HELPER(f64_to_i32)(CPUM68KState *env, float64 val) -{ - return float64_to_int32(val, &env->fp_status); -} - -float32 HELPER(f64_to_f32)(CPUM68KState *env, float64 val) -{ - return float64_to_float32(val, &env->fp_status); -} - -float64 HELPER(i32_to_f64)(CPUM68KState *env, uint32_t val) -{ - return int32_to_float64(val, &env->fp_status); -} - -float64 HELPER(f32_to_f64)(CPUM68KState *env, float32 val) -{ - return float32_to_float64(val, &env->fp_status); -} - -float64 HELPER(iround_f64)(CPUM68KState *env, float64 val) -{ - return float64_round_to_int(val, &env->fp_status); -} - -float64 HELPER(itrunc_f64)(CPUM68KState *env, float64 val) -{ - return float64_trunc_to_int(val, &env->fp_status); -} - -float64 HELPER(sqrt_f64)(CPUM68KState *env, float64 val) -{ - return float64_sqrt(val, &env->fp_status); -} - -float64 HELPER(abs_f64)(float64 val) -{ - return float64_abs(val); -} - -float64 HELPER(chs_f64)(float64 val) -{ - return float64_chs(val); -} - -float64 HELPER(add_f64)(CPUM68KState *env, float64 a, float64 b) -{ - return float64_add(a, b, &env->fp_status); -} - -float64 HELPER(sub_f64)(CPUM68KState *env, float64 a, float64 b) -{ - return float64_sub(a, b, &env->fp_status); -} - -float64 HELPER(mul_f64)(CPUM68KState *env, float64 a, float64 b) -{ - return float64_mul(a, b, &env->fp_status); -} - -float64 HELPER(div_f64)(CPUM68KState *env, float64 a, float64 b) -{ - return float64_div(a, b, &env->fp_status); -} - -float64 HELPER(sub_cmp_f64)(CPUM68KState *env, float64 a, float64 b) -{ - /* ??? This may incorrectly raise exceptions. */ - /* ??? Should flush denormals to zero. */ - float64 res; - res = float64_sub(a, b, &env->fp_status); - if (float64_is_quiet_nan(res)) { - /* +/-inf compares equal against itself, but sub returns nan. */ - if (!float64_is_quiet_nan(a) - && !float64_is_quiet_nan(b)) { - res = float64_zero; - if (float64_lt_quiet(a, res, &env->fp_status)) - res = float64_chs(res); - } - } - return res; -} - -uint32_t HELPER(compare_f64)(CPUM68KState *env, float64 val) -{ - return float64_compare_quiet(val, float64_zero, &env->fp_status); -} - -/* MAC unit. */ -/* FIXME: The MAC unit implementation is a bit of a mess. Some helpers - take values, others take register numbers and manipulate the contents - in-place. */ -void HELPER(mac_move)(CPUM68KState *env, uint32_t dest, uint32_t src) -{ - uint32_t mask; - env->macc[dest] = env->macc[src]; - mask = MACSR_PAV0 << dest; - if (env->macsr & (MACSR_PAV0 << src)) - env->macsr |= mask; - else - env->macsr &= ~mask; -} - -uint64_t HELPER(macmuls)(CPUM68KState *env, uint32_t op1, uint32_t op2) -{ - int64_t product; - int64_t res; - - product = (uint64_t)op1 * op2; - res = ((int64_t)(((uint64_t)product) << 24)) >> 24; - if (res != product) { - env->macsr |= MACSR_V; - if (env->macsr & MACSR_OMC) { - /* Make sure the accumulate operation overflows. */ - if (product < 0) - res = ~(1ll << 50); - else - res = 1ll << 50; - } - } - return res; -} - -uint64_t HELPER(macmulu)(CPUM68KState *env, uint32_t op1, uint32_t op2) -{ - uint64_t product; - - product = (uint64_t)op1 * op2; - if (product & (0xffffffull << 40)) { - env->macsr |= MACSR_V; - if (env->macsr & MACSR_OMC) { - /* Make sure the accumulate operation overflows. */ - product = 1ll << 50; - } else { - product &= ((1ull << 40) - 1); - } - } - return product; -} - -uint64_t HELPER(macmulf)(CPUM68KState *env, uint32_t op1, uint32_t op2) -{ - uint64_t product; - uint32_t remainder; - - product = (uint64_t)op1 * op2; - if (env->macsr & MACSR_RT) { - remainder = product & 0xffffff; - product >>= 24; - if (remainder > 0x800000) - product++; - else if (remainder == 0x800000) - product += (product & 1); - } else { - product >>= 24; - } - return product; -} - -void HELPER(macsats)(CPUM68KState *env, uint32_t acc) -{ - int64_t tmp; - int64_t result; - tmp = env->macc[acc]; - result = ((int64_t)((uint64_t)tmp << 16) >> 16); - if (result != tmp) { - env->macsr |= MACSR_V; - } - if (env->macsr & MACSR_V) { - env->macsr |= MACSR_PAV0 << acc; - if (env->macsr & MACSR_OMC) { - /* The result is saturated to 32 bits, despite overflow occurring - at 48 bits. Seems weird, but that's what the hardware docs - say. */ - result = (result >> 63) ^ 0x7fffffff; - } - } - env->macc[acc] = result; -} - -void HELPER(macsatu)(CPUM68KState *env, uint32_t acc) -{ - uint64_t val; - - val = env->macc[acc]; - if (val & (0xffffull << 48)) { - env->macsr |= MACSR_V; - } - if (env->macsr & MACSR_V) { - env->macsr |= MACSR_PAV0 << acc; - if (env->macsr & MACSR_OMC) { - if (val > (1ull << 53)) - val = 0; - else - val = (1ull << 48) - 1; - } else { - val &= ((1ull << 48) - 1); - } - } - env->macc[acc] = val; -} - -void HELPER(macsatf)(CPUM68KState *env, uint32_t acc) -{ - int64_t sum; - int64_t result; - - sum = env->macc[acc]; - result = ((int64_t)((uint64_t)sum << 16)) >> 16; - if (result != sum) { - env->macsr |= MACSR_V; - } - if (env->macsr & MACSR_V) { - env->macsr |= MACSR_PAV0 << acc; - if (env->macsr & MACSR_OMC) { - result = (result >> 63) ^ 0x7fffffffffffll; - } - } - env->macc[acc] = result; -} - -void HELPER(mac_set_flags)(CPUM68KState *env, uint32_t acc) -{ - uint64_t val; - val = env->macc[acc]; - if (val == 0) { - env->macsr |= MACSR_Z; - } else if (val & (1ull << 47)) { - env->macsr |= MACSR_N; - } - if (env->macsr & (MACSR_PAV0 << acc)) { - env->macsr |= MACSR_V; - } - if (env->macsr & MACSR_FI) { - val = ((int64_t)val) >> 40; - if (val != 0 && val != -1) - env->macsr |= MACSR_EV; - } else if (env->macsr & MACSR_SU) { - val = ((int64_t)val) >> 32; - if (val != 0 && val != -1) - env->macsr |= MACSR_EV; - } else { - if ((val >> 32) != 0) - env->macsr |= MACSR_EV; - } -} - -void HELPER(flush_flags)(CPUM68KState *env, uint32_t cc_op) -{ - cpu_m68k_flush_flags(env, cc_op); -} - -uint32_t HELPER(get_macf)(CPUM68KState *env, uint64_t val) -{ - int rem; - uint32_t result; - - if (env->macsr & MACSR_SU) { - /* 16-bit rounding. */ - rem = val & 0xffffff; - val = (val >> 24) & 0xffffu; - if (rem > 0x800000) - val++; - else if (rem == 0x800000) - val += (val & 1); - } else if (env->macsr & MACSR_RT) { - /* 32-bit rounding. */ - rem = val & 0xff; - val >>= 8; - if (rem > 0x80) - val++; - else if (rem == 0x80) - val += (val & 1); - } else { - /* No rounding. */ - val >>= 8; - } - if (env->macsr & MACSR_OMC) { - /* Saturate. */ - if (env->macsr & MACSR_SU) { - if (val != (uint16_t) val) { - result = ((val >> 63) ^ 0x7fff) & 0xffff; - } else { - result = val & 0xffff; - } - } else { - if (val != (uint32_t)val) { - result = ((uint32_t)(val >> 63) & 0x7fffffff); - } else { - result = (uint32_t)val; - } - } - } else { - /* No saturation. */ - if (env->macsr & MACSR_SU) { - result = val & 0xffff; - } else { - result = (uint32_t)val; - } - } - return result; -} - -uint32_t HELPER(get_macs)(uint64_t val) -{ - if (val == (int32_t)val) { - return (int32_t)val; - } else { - return (val >> 61) ^ ~SIGNBIT; - } -} - -uint32_t HELPER(get_macu)(uint64_t val) -{ - if ((val >> 32) == 0) { - return (uint32_t)val; - } else { - return 0xffffffffu; - } -} - -uint32_t HELPER(get_mac_extf)(CPUM68KState *env, uint32_t acc) -{ - uint32_t val; - val = env->macc[acc] & 0x00ff; - val = (env->macc[acc] >> 32) & 0xff00; - val |= (env->macc[acc + 1] << 16) & 0x00ff0000; - val |= (env->macc[acc + 1] >> 16) & 0xff000000; - return val; -} - -uint32_t HELPER(get_mac_exti)(CPUM68KState *env, uint32_t acc) -{ - uint32_t val; - val = (env->macc[acc] >> 32) & 0xffff; - val |= (env->macc[acc + 1] >> 16) & 0xffff0000; - return val; -} - -void HELPER(set_mac_extf)(CPUM68KState *env, uint32_t val, uint32_t acc) -{ - int64_t res; - int32_t tmp; - res = env->macc[acc] & 0xffffffff00ull; - tmp = (int16_t)(val & 0xff00); - res |= ((uint64_t)((int64_t)tmp)) << 32; - res |= val & 0xff; - env->macc[acc] = res; - res = env->macc[acc + 1] & 0xffffffff00ull; - tmp = (val & 0xff000000); - res |= ((uint64_t)((int64_t)tmp)) << 16; - res |= (val >> 16) & 0xff; - env->macc[acc + 1] = res; -} - -void HELPER(set_mac_exts)(CPUM68KState *env, uint32_t val, uint32_t acc) -{ - int64_t res; - int32_t tmp; - res = (uint32_t)env->macc[acc]; - tmp = (int16_t)val; - res |= ((uint64_t)((int64_t)tmp)) << 32; - env->macc[acc] = res; - res = (uint32_t)env->macc[acc + 1]; - tmp = val & 0xffff0000; - res |= ((uint64_t)((int64_t)tmp)) << 16; - env->macc[acc + 1] = res; -} - -void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc) -{ - uint64_t res; - res = (uint32_t)env->macc[acc]; - res |= ((uint64_t)(val & 0xffff)) << 32; - env->macc[acc] = res; - res = (uint32_t)env->macc[acc + 1]; - res |= (uint64_t)(val & 0xffff0000) << 16; - env->macc[acc + 1] = res; -} - -void m68k_cpu_exec_enter(CPUState *cs) -{ - M68kCPU *cpu = M68K_CPU(cs->uc, cs); - CPUM68KState *env = &cpu->env; - - env->cc_op = CC_OP_FLAGS; - env->cc_dest = env->sr & 0xf; - env->cc_x = (env->sr >> 4) & 1; -} - -void m68k_cpu_exec_exit(CPUState *cs) -{ - M68kCPU *cpu = M68K_CPU(cs->uc, cs); - CPUM68KState *env = &cpu->env; - - cpu_m68k_flush_flags(env, env->cc_op); - env->cc_op = CC_OP_FLAGS; - env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4); -} diff --git a/qemu/target-m68k/helper.h b/qemu/target-m68k/helper.h deleted file mode 100644 index caaadb3a..00000000 --- a/qemu/target-m68k/helper.h +++ /dev/null @@ -1,52 +0,0 @@ -DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) - -DEF_HELPER_1(bitrev, i32, i32) -DEF_HELPER_1(ff1, i32, i32) -DEF_HELPER_2(sats, i32, i32, i32) -DEF_HELPER_2(divu, void, env, i32) -DEF_HELPER_2(divs, void, env, i32) -DEF_HELPER_3(addx_cc, i32, env, i32, i32) -DEF_HELPER_3(subx_cc, i32, env, i32, i32) -DEF_HELPER_3(shl_cc, i32, env, i32, i32) -DEF_HELPER_3(shr_cc, i32, env, i32, i32) -DEF_HELPER_3(sar_cc, i32, env, i32, i32) -DEF_HELPER_2(xflag_lt, i32, i32, i32) -DEF_HELPER_2(set_sr, void, env, i32) -DEF_HELPER_3(movec, void, env, i32, i32) - -DEF_HELPER_2(f64_to_i32, f32, env, f64) -DEF_HELPER_2(f64_to_f32, f32, env, f64) -DEF_HELPER_2(i32_to_f64, f64, env, i32) -DEF_HELPER_2(f32_to_f64, f64, env, f32) -DEF_HELPER_2(iround_f64, f64, env, f64) -DEF_HELPER_2(itrunc_f64, f64, env, f64) -DEF_HELPER_2(sqrt_f64, f64, env, f64) -DEF_HELPER_1(abs_f64, f64, f64) -DEF_HELPER_1(chs_f64, f64, f64) -DEF_HELPER_3(add_f64, f64, env, f64, f64) -DEF_HELPER_3(sub_f64, f64, env, f64, f64) -DEF_HELPER_3(mul_f64, f64, env, f64, f64) -DEF_HELPER_3(div_f64, f64, env, f64, f64) -DEF_HELPER_3(sub_cmp_f64, f64, env, f64, f64) -DEF_HELPER_2(compare_f64, i32, env, f64) - -DEF_HELPER_3(mac_move, void, env, i32, i32) -DEF_HELPER_3(macmulf, i64, env, i32, i32) -DEF_HELPER_3(macmuls, i64, env, i32, i32) -DEF_HELPER_3(macmulu, i64, env, i32, i32) -DEF_HELPER_2(macsats, void, env, i32) -DEF_HELPER_2(macsatu, void, env, i32) -DEF_HELPER_2(macsatf, void, env, i32) -DEF_HELPER_2(mac_set_flags, void, env, i32) -DEF_HELPER_2(set_macsr, void, env, i32) -DEF_HELPER_2(get_macf, i32, env, i64) -DEF_HELPER_1(get_macs, i32, i64) -DEF_HELPER_1(get_macu, i32, i64) -DEF_HELPER_2(get_mac_extf, i32, env, i32) -DEF_HELPER_2(get_mac_exti, i32, env, i32) -DEF_HELPER_3(set_mac_extf, void, env, i32, i32) -DEF_HELPER_3(set_mac_exts, void, env, i32, i32) -DEF_HELPER_3(set_mac_extu, void, env, i32, i32) - -DEF_HELPER_2(flush_flags, void, env, i32) -DEF_HELPER_2(raise_exception, void, env, i32) diff --git a/qemu/target-m68k/m68k-qreg.h b/qemu/target-m68k/m68k-qreg.h deleted file mode 100644 index c224d5ec..00000000 --- a/qemu/target-m68k/m68k-qreg.h +++ /dev/null @@ -1,11 +0,0 @@ -enum { -#define DEFO32(name, offset) QREG_##name, -#define DEFR(name, reg, mode) QREG_##name, -#define DEFF64(name, offset) QREG_##name, - QREG_NULL, -#include "qregs.def" - TARGET_NUM_QREGS = 0x100 -#undef DEFO32 -#undef DEFR -#undef DEFF64 -}; diff --git a/qemu/target-m68k/op_helper.c b/qemu/target-m68k/op_helper.c deleted file mode 100644 index 2686e980..00000000 --- a/qemu/target-m68k/op_helper.c +++ /dev/null @@ -1,225 +0,0 @@ -/* - * M68K helper routines - * - * Copyright (c) 2007 CodeSourcery - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include "cpu.h" -#include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" - -#if defined(CONFIG_USER_ONLY) - -void m68k_cpu_do_interrupt(CPUState *cs) -{ - cs->exception_index = -1; -} - -static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) -{ -} - -#else - -extern int semihosting_enabled; - -/* Try to fill the TLB and return an exception if error. If retaddr is - NULL, it means that the function was called in C code (i.e. not - from generated code or from helper.c) */ -void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr) -{ - int ret; - - ret = m68k_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); - if (unlikely(ret)) { - if (retaddr) { - /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr); - } - cpu_loop_exit(cs); - } -} - -static void do_rte(CPUM68KState *env) -{ - uint32_t sp; - uint32_t fmt; - - sp = env->aregs[7]; - fmt = cpu_ldl_kernel(env, sp); - env->pc = cpu_ldl_kernel(env, sp + 4); - sp |= (fmt >> 28) & 3; - env->sr = fmt & 0xffff; - m68k_switch_sp(env); - env->aregs[7] = sp + 8; -} - -static void do_interrupt_all(CPUM68KState *env, int is_hw) -{ - CPUState *cs = CPU(m68k_env_get_cpu(env)); - uint32_t sp; - uint32_t fmt; - uint32_t retaddr; - uint32_t vector; - - fmt = 0; - retaddr = env->pc; - - if (!is_hw) { - switch (cs->exception_index) { - case EXCP_RTE: - /* Return from an exception. */ - do_rte(env); - return; - case EXCP_HALT_INSN: - cs->halted = 1; - cs->exception_index = EXCP_HLT; - cpu_loop_exit(cs); - return; - } - if (cs->exception_index >= EXCP_TRAP0 - && cs->exception_index <= EXCP_TRAP15) { - /* Move the PC after the trap instruction. */ - retaddr += 2; - } - } - - vector = cs->exception_index << 2; - - sp = env->aregs[7]; - - fmt |= 0x40000000; - fmt |= (sp & 3) << 28; - fmt |= vector << 16; - fmt |= env->sr; - - env->sr |= SR_S; - if (is_hw) { - env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT); - env->sr &= ~SR_M; - } - m68k_switch_sp(env); - - /* ??? This could cause MMU faults. */ - sp &= ~3; - sp -= 4; - cpu_stl_kernel(env, sp, retaddr); - sp -= 4; - cpu_stl_kernel(env, sp, fmt); - env->aregs[7] = sp; - /* Jump to vector. */ - env->pc = cpu_ldl_kernel(env, env->vbr + vector); -} - -void m68k_cpu_do_interrupt(CPUState *cs) -{ - M68kCPU *cpu = M68K_CPU(cs->uc, cs); - CPUM68KState *env = &cpu->env; - - do_interrupt_all(env, 0); -} - -static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) -{ - do_interrupt_all(env, 1); -} -#endif - -bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - M68kCPU *cpu = M68K_CPU(cs->uc, cs); - CPUM68KState *env = &cpu->env; - - if (interrupt_request & CPU_INTERRUPT_HARD - && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) { - /* Real hardware gets the interrupt vector via an IACK cycle - at this point. Current emulated hardware doesn't rely on - this, so we provide/save the vector when the interrupt is - first signalled. */ - cs->exception_index = env->pending_vector; - do_interrupt_m68k_hardirq(env); - return true; - } - return false; -} - -static void raise_exception(CPUM68KState *env, int tt) -{ - CPUState *cs = CPU(m68k_env_get_cpu(env)); - - cs->exception_index = tt; - cpu_loop_exit(cs); -} - -void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt) -{ - raise_exception(env, tt); -} - -void HELPER(divu)(CPUM68KState *env, uint32_t word) -{ - uint32_t num; - uint32_t den; - uint32_t quot; - uint32_t rem; - uint32_t flags; - - num = env->div1; - den = env->div2; - /* ??? This needs to make sure the throwing location is accurate. */ - if (den == 0) { - raise_exception(env, EXCP_DIV0); - } - quot = num / den; - rem = num % den; - flags = 0; - if (word && quot > 0xffff) - flags |= CCF_V; - if (quot == 0) - flags |= CCF_Z; - else if ((int32_t)quot < 0) - flags |= CCF_N; - env->div1 = quot; - env->div2 = rem; - env->cc_dest = flags; -} - -void HELPER(divs)(CPUM68KState *env, uint32_t word) -{ - int32_t num; - int32_t den; - int32_t quot; - int32_t rem; - int32_t flags; - - num = env->div1; - den = env->div2; - if (den == 0) { - raise_exception(env, EXCP_DIV0); - } - quot = (int64_t)num / den; - rem = (int64_t)num % den; - flags = 0; - if (word && quot != (int16_t)quot) - flags |= CCF_V; - if (quot == 0) - flags |= CCF_Z; - else if (quot < 0) - flags |= CCF_N; - env->div1 = quot; - env->div2 = rem; - env->cc_dest = flags; -} diff --git a/qemu/target-m68k/translate.c b/qemu/target-m68k/translate.c deleted file mode 100644 index 7ada763c..00000000 --- a/qemu/target-m68k/translate.c +++ /dev/null @@ -1,3220 +0,0 @@ -/* - * m68k translation - * - * Copyright (c) 2005-2007 CodeSourcery - * Written by Paul Brook - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "tcg-op.h" -#include "qemu/log.h" -#include "exec/cpu_ldst.h" - -#include "exec/helper-proto.h" -#include "exec/helper-gen.h" - -#include "exec/gen-icount.h" - -//#define DEBUG_DISPATCH 1 - -/* Fake floating point. */ -#define tcg_gen_mov_f64 tcg_gen_mov_i64 -#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64 -#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64 - -#define DREG(insn, pos) *((TCGv *)tcg_ctx->cpu_dregs[((insn) >> (pos)) & 7]) -#define AREG(insn, pos) *((TCGv *)tcg_ctx->cpu_aregs[((insn) >> (pos)) & 7]) -#define FREG(insn, pos) tcg_ctx->cpu_fregs[((insn) >> (pos)) & 7] -#define MACREG(acc) tcg_ctx->cpu_macc[acc] -#define QREG_SP *((TCGv *)tcg_ctx->cpu_aregs[7]) - -#define IS_NULL_QREG(t) (TCGV_EQUAL(t, tcg_ctx->NULL_QREG)) - -void m68k_tcg_init(struct uc_struct *uc) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - char *p; - int i; - -#define DEFO32(name, offset) if (!uc->init_tcg) { tcg_ctx->QREG_##name = g_malloc0(sizeof(TCGv));} *((TCGv *)tcg_ctx->QREG_##name) = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUM68KState, offset), #name); -#define DEFO64(name, offset) tcg_ctx->QREG_##name = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, offsetof(CPUM68KState, offset), #name); -#define DEFF64(name, offset) DEFO64(name, offset) -#include "qregs.def" -#undef DEFO32 -#undef DEFO64 -#undef DEFF64 - - // tcg_ctx->QREG_FP_RESULT = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, offsetof(CPUM68KState, fp_result), "FP_RESULT"); - - tcg_ctx->cpu_halted = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, - 0-offsetof(M68kCPU, env) + - offsetof(CPUState, halted), "HALTED"); - - tcg_ctx->cpu_env = tcg_global_reg_new_ptr(tcg_ctx, TCG_AREG0, "env"); - - p = tcg_ctx->cpu_reg_names; - - for (i = 0; i < 8; i++) { - sprintf(p, "D%d", i); - if (!uc->init_tcg) - tcg_ctx->cpu_dregs[i] = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_dregs[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUM68KState, dregs[i]), p); - p += 3; - sprintf(p, "A%d", i); - if (!uc->init_tcg) - tcg_ctx->cpu_aregs[i] = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_aregs[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUM68KState, aregs[i]), p); - p += 3; - sprintf(p, "F%d", i); - tcg_ctx->cpu_fregs[i] = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, - offsetof(CPUM68KState, fregs[i]), p); - p += 3; - } - - for (i = 0; i < 4; i++) { - sprintf(p, "ACC%d", i); - tcg_ctx->cpu_macc[i] = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, - offsetof(CPUM68KState, macc[i]), p); - p += 5; - } - - if (!uc->init_tcg) - tcg_ctx->NULL_QREG = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->NULL_QREG) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, -4, "NULL"); - - if (!uc->init_tcg) - tcg_ctx->store_dummy = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->store_dummy) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, -8, "NULL"); - - uc->init_tcg = true; -} - -/* internal defines */ -typedef struct DisasContext { - CPUM68KState *env; - target_ulong insn_pc; /* Start of the current instruction. */ - target_ulong pc; - int is_jmp; - int cc_op; - int user; - uint32_t fpcr; - struct TranslationBlock *tb; - int singlestep_enabled; - int is_mem; - TCGv_i64 mactmp; - int done_mac; - - // Unicorn engine - struct uc_struct *uc; -} DisasContext; - -#define DISAS_JUMP_NEXT 4 - -#if defined(CONFIG_USER_ONLY) -#define IS_USER(s) 1 -#else -#define IS_USER(s) s->user -#endif - -#define OS_BYTE 0 -#define OS_WORD 1 -#define OS_LONG 2 -#define OS_SINGLE 4 -#define OS_DOUBLE 5 - -typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn); - -#ifdef DEBUG_DISPATCH -#define DISAS_INSN(name) \ - static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ - uint16_t insn); \ - static void disas_##name(CPUM68KState *env, DisasContext *s, \ - uint16_t insn) \ - { \ - qemu_log("Dispatch " #name "\n"); \ - real_disas_##name(s, env, insn); \ - } \ - static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ - uint16_t insn) -#else -#define DISAS_INSN(name) \ - static void disas_##name(CPUM68KState *env, DisasContext *s, \ - uint16_t insn) -#endif - -/* Generate a load from the specified address. Narrow values are - sign extended to full register width. */ -static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - int index = IS_USER(s); - s->is_mem = 1; - tmp = tcg_temp_new_i32(tcg_ctx); - switch(opsize) { - case OS_BYTE: - if (sign) - tcg_gen_qemu_ld8s(s->uc, tmp, addr, index); - else - tcg_gen_qemu_ld8u(s->uc, tmp, addr, index); - break; - case OS_WORD: - if (sign) - tcg_gen_qemu_ld16s(s->uc, tmp, addr, index); - else - tcg_gen_qemu_ld16u(s->uc, tmp, addr, index); - break; - case OS_LONG: - case OS_SINGLE: - tcg_gen_qemu_ld32u(s->uc, tmp, addr, index); - break; - default: - g_assert_not_reached(); - } - return tmp; -} - -static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tmp; - int index = IS_USER(s); - s->is_mem = 1; - tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ldf64(s->uc, tmp, addr, index); - return tmp; -} - -/* Generate a store. */ -static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val) -{ - int index = IS_USER(s); - s->is_mem = 1; - switch(opsize) { - case OS_BYTE: - tcg_gen_qemu_st8(s->uc, val, addr, index); - break; - case OS_WORD: - tcg_gen_qemu_st16(s->uc, val, addr, index); - break; - case OS_LONG: - case OS_SINGLE: - tcg_gen_qemu_st32(s->uc, val, addr, index); - break; - default: - g_assert_not_reached(); - } -} - -static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val) -{ - int index = IS_USER(s); - s->is_mem = 1; - tcg_gen_qemu_stf64(s->uc, val, addr, index); -} - -typedef enum { - EA_STORE, - EA_LOADU, - EA_LOADS -} ea_what; - -/* Generate an unsigned load if VAL is 0 a signed load if val is -1, - otherwise generate a store. */ -static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, - ea_what what) -{ - if (what == EA_STORE) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_store(s, opsize, addr, val); - return *(TCGv *)tcg_ctx->store_dummy; - } else { - return gen_load(s, opsize, addr, what == EA_LOADS); - } -} - -/* Read a 32-bit immediate constant. */ -static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s) -{ - uint32_t im; - im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16; - s->pc += 2; - im |= cpu_lduw_code(env, s->pc); - s->pc += 2; - return im; -} - -/* Calculate and address index. */ -static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv add; - int scale; - - add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12); - if ((ext & 0x800) == 0) { - tcg_gen_ext16s_i32(tcg_ctx, tmp, add); - add = tmp; - } - scale = (ext >> 9) & 3; - if (scale != 0) { - tcg_gen_shli_i32(tcg_ctx, tmp, add, scale); - add = tmp; - } - return add; -} - -/* Handle a base + index + displacement effective addresss. - A NULL_QREG base means pc-relative. */ -static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, int opsize, - TCGv base) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t offset; - uint16_t ext; - TCGv add; - TCGv tmp; - uint32_t bd, od; - - offset = s->pc; - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - - if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX)) - return *(TCGv *)tcg_ctx->NULL_QREG; - - if (ext & 0x100) { - /* full extension word format */ - if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) - return *(TCGv *)tcg_ctx->NULL_QREG; - - if ((ext & 0x30) > 0x10) { - /* base displacement */ - if ((ext & 0x30) == 0x20) { - bd = (int16_t)cpu_lduw_code(env, s->pc); - s->pc += 2; - } else { - bd = read_im32(env, s); - } - } else { - bd = 0; - } - tmp = tcg_temp_new(tcg_ctx); - if ((ext & 0x44) == 0) { - /* pre-index */ - add = gen_addr_index(s, ext, tmp); - } else { - add = *(TCGv *)tcg_ctx->NULL_QREG; - } - if ((ext & 0x80) == 0) { - /* base not suppressed */ - if (IS_NULL_QREG(base)) { - base = tcg_const_i32(tcg_ctx, offset + bd); - bd = 0; - } - if (!IS_NULL_QREG(add)) { - tcg_gen_add_i32(tcg_ctx, tmp, add, base); - add = tmp; - } else { - add = base; - } - } - if (!IS_NULL_QREG(add)) { - if (bd != 0) { - tcg_gen_addi_i32(tcg_ctx, tmp, add, bd); - add = tmp; - } - } else { - add = tcg_const_i32(tcg_ctx, bd); - } - if ((ext & 3) != 0) { - /* memory indirect */ - base = gen_load(s, OS_LONG, add, 0); - if ((ext & 0x44) == 4) { - add = gen_addr_index(s, ext, tmp); - tcg_gen_add_i32(tcg_ctx, tmp, add, base); - add = tmp; - } else { - add = base; - } - if ((ext & 3) > 1) { - /* outer displacement */ - if ((ext & 3) == 2) { - od = (int16_t)cpu_lduw_code(env, s->pc); - s->pc += 2; - } else { - od = read_im32(env, s); - } - } else { - od = 0; - } - if (od != 0) { - tcg_gen_addi_i32(tcg_ctx, tmp, add, od); - add = tmp; - } - } - } else { - /* brief extension word format */ - tmp = tcg_temp_new(tcg_ctx); - add = gen_addr_index(s, ext, tmp); - if (!IS_NULL_QREG(base)) { - tcg_gen_add_i32(tcg_ctx, tmp, add, base); - if ((int8_t)ext) - tcg_gen_addi_i32(tcg_ctx, tmp, tmp, (int8_t)ext); - } else { - tcg_gen_addi_i32(tcg_ctx, tmp, add, offset + (int8_t)ext); - } - add = tmp; - } - return add; -} - -/* Update the CPU env CC_OP state. */ -static inline void gen_flush_cc_op(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (s->cc_op != CC_OP_DYNAMIC) - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_OP, s->cc_op); -} - -/* Evaluate all the CC flags. */ -static inline void gen_flush_flags(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (s->cc_op == CC_OP_FLAGS) - return; - gen_flush_cc_op(s); - gen_helper_flush_flags(tcg_ctx, tcg_ctx->cpu_env, *(TCGv *)tcg_ctx->QREG_CC_OP); - s->cc_op = CC_OP_FLAGS; -} - -static void gen_logic_cc(DisasContext *s, TCGv val) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, val); - s->cc_op = CC_OP_LOGIC; -} - -static void gen_update_cc_add(DisasContext *s, TCGv dest, TCGv src) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, dest); - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_SRC, src); -} - -static inline int opsize_bytes(int opsize) -{ - switch (opsize) { - case OS_BYTE: return 1; - case OS_WORD: return 2; - case OS_LONG: return 4; - case OS_SINGLE: return 4; - case OS_DOUBLE: return 8; - default: - g_assert_not_reached(); - return 0; - } - - return 0; -} - -/* Assign value to a register. If the width is less than the register width - only the low part of the register is set. */ -static void gen_partset_reg(DisasContext *s, int opsize, TCGv reg, TCGv val) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - switch (opsize) { - case OS_BYTE: - tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffffff00); - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_ext8u_i32(tcg_ctx, tmp, val); - tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); - break; - case OS_WORD: - tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffff0000); - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_ext16u_i32(tcg_ctx, tmp, val); - tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); - break; - case OS_LONG: - case OS_SINGLE: - tcg_gen_mov_i32(tcg_ctx, reg, val); - break; - default: - g_assert_not_reached(); - } -} - -/* Sign or zero extend a value. */ -static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - - switch (opsize) { - case OS_BYTE: - tmp = tcg_temp_new(tcg_ctx); - if (sign) - tcg_gen_ext8s_i32(tcg_ctx, tmp, val); - else - tcg_gen_ext8u_i32(tcg_ctx, tmp, val); - break; - case OS_WORD: - tmp = tcg_temp_new(tcg_ctx); - if (sign) - tcg_gen_ext16s_i32(tcg_ctx, tmp, val); - else - tcg_gen_ext16u_i32(tcg_ctx, tmp, val); - break; - case OS_LONG: - case OS_SINGLE: - tmp = val; - break; - default: - g_assert_not_reached(); - } - return tmp; -} - -/* Generate code for an "effective address". Does not adjust the base - register for autoincrement addressing modes. */ -static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn, - int opsize) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv tmp; - uint16_t ext; - uint32_t offset; - - switch ((insn >> 3) & 7) { - case 0: /* Data register direct. */ - case 1: /* Address register direct. */ - return *(TCGv *)tcg_ctx->NULL_QREG; - case 2: /* Indirect register */ - case 3: /* Indirect postincrement. */ - return AREG(insn, 0); - case 4: /* Indirect predecrememnt. */ - reg = AREG(insn, 0); - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_subi_i32(tcg_ctx, tmp, reg, opsize_bytes(opsize)); - return tmp; - case 5: /* Indirect displacement. */ - reg = AREG(insn, 0); - tmp = tcg_temp_new(tcg_ctx); - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - tcg_gen_addi_i32(tcg_ctx, tmp, reg, (int16_t)ext); - return tmp; - case 6: /* Indirect index + displacement. */ - reg = AREG(insn, 0); - return gen_lea_indexed(env, s, opsize, reg); - case 7: /* Other */ - switch (insn & 7) { - case 0: /* Absolute short. */ - offset = cpu_ldsw_code(env, s->pc); - s->pc += 2; - return tcg_const_i32(tcg_ctx, offset); - case 1: /* Absolute long. */ - offset = read_im32(env, s); - return tcg_const_i32(tcg_ctx, offset); - case 2: /* pc displacement */ - offset = s->pc; - offset += cpu_ldsw_code(env, s->pc); - s->pc += 2; - return tcg_const_i32(tcg_ctx, offset); - case 3: /* pc index+displacement. */ - return gen_lea_indexed(env, s, opsize, *(TCGv *)tcg_ctx->NULL_QREG); - case 4: /* Immediate. */ - default: - return *(TCGv *)tcg_ctx->NULL_QREG; - } - } - /* Should never happen. */ - return *(TCGv *)tcg_ctx->NULL_QREG; -} - -/* Helper function for gen_ea. Reuse the computed address between the - for read/write operands. */ -static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s, - uint16_t insn, int opsize, TCGv val, - TCGv *addrp, ea_what what) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - - if (addrp && what == EA_STORE) { - tmp = *addrp; - } else { - tmp = gen_lea(env, s, insn, opsize); - if (IS_NULL_QREG(tmp)) - return tmp; - if (addrp) - *addrp = tmp; - } - return gen_ldst(s, opsize, tmp, val, what); -} - -/* Generate code to load/store a value from/into an EA. If VAL > 0 this is - a write otherwise it is a read (0 == sign extend, -1 == zero extend). - ADDRP is non-null for readwrite operands. */ -static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn, - int opsize, TCGv val, TCGv *addrp, ea_what what) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv result; - uint32_t offset; - - switch ((insn >> 3) & 7) { - case 0: /* Data register direct. */ - reg = DREG(insn, 0); - if (what == EA_STORE) { - gen_partset_reg(s, opsize, reg, val); - return *(TCGv *)tcg_ctx->store_dummy; - } else { - return gen_extend(s, reg, opsize, what == EA_LOADS); - } - case 1: /* Address register direct. */ - reg = AREG(insn, 0); - if (what == EA_STORE) { - tcg_gen_mov_i32(tcg_ctx, reg, val); - return *(TCGv *)tcg_ctx->store_dummy; - } else { - return gen_extend(s, reg, opsize, what == EA_LOADS); - } - case 2: /* Indirect register */ - reg = AREG(insn, 0); - return gen_ldst(s, opsize, reg, val, what); - case 3: /* Indirect postincrement. */ - reg = AREG(insn, 0); - result = gen_ldst(s, opsize, reg, val, what); - /* ??? This is not exception safe. The instruction may still - fault after this point. */ - if (what == EA_STORE || !addrp) - tcg_gen_addi_i32(tcg_ctx, reg, reg, opsize_bytes(opsize)); - return result; - case 4: /* Indirect predecrememnt. */ - { - TCGv tmp; - if (addrp && what == EA_STORE) { - tmp = *addrp; - } else { - tmp = gen_lea(env, s, insn, opsize); - if (IS_NULL_QREG(tmp)) - return tmp; - if (addrp) - *addrp = tmp; - } - result = gen_ldst(s, opsize, tmp, val, what); - /* ??? This is not exception safe. The instruction may still - fault after this point. */ - if (what == EA_STORE || !addrp) { - reg = AREG(insn, 0); - tcg_gen_mov_i32(tcg_ctx, reg, tmp); - } - } - return result; - case 5: /* Indirect displacement. */ - case 6: /* Indirect index + displacement. */ - return gen_ea_once(env, s, insn, opsize, val, addrp, what); - case 7: /* Other */ - switch (insn & 7) { - case 0: /* Absolute short. */ - case 1: /* Absolute long. */ - case 2: /* pc displacement */ - case 3: /* pc index+displacement. */ - return gen_ea_once(env, s, insn, opsize, val, addrp, what); - case 4: /* Immediate. */ - /* Sign extend values for consistency. */ - switch (opsize) { - case OS_BYTE: - if (what == EA_LOADS) { - offset = cpu_ldsb_code(env, s->pc + 1); - } else { - offset = cpu_ldub_code(env, s->pc + 1); - } - s->pc += 2; - break; - case OS_WORD: - if (what == EA_LOADS) { - offset = cpu_ldsw_code(env, s->pc); - } else { - offset = cpu_lduw_code(env, s->pc); - } - s->pc += 2; - break; - case OS_LONG: - offset = read_im32(env, s); - break; - default: - // Should not happen : for OS_SIGNLE - return *(TCGv *)tcg_ctx->NULL_QREG; - } - return tcg_const_i32(tcg_ctx, offset); - default: - return *(TCGv *)tcg_ctx->NULL_QREG; - } - } - /* Should never happen. */ - return *(TCGv *)tcg_ctx->NULL_QREG; -} - -/* This generates a conditional branch, clobbering all temporaries. */ -static void gen_jmpcc(DisasContext *s, int cond, int l1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - - /* TODO: Optimize compare/branch pairs rather than always flushing - flag state to CC_OP_FLAGS. */ - gen_flush_flags(s); - switch (cond) { - case 0: /* T */ - tcg_gen_br(tcg_ctx, l1); - break; - case 1: /* F */ - break; - case 2: /* HI (!C && !Z) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C | CCF_Z); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); - break; - case 3: /* LS (C || Z) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C | CCF_Z); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); - break; - case 4: /* CC (!C) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); - break; - case 5: /* CS (C) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); - break; - case 6: /* NE (!Z) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); - break; - case 7: /* EQ (Z) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); - break; - case 8: /* VC (!V) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_V); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); - break; - case 9: /* VS (V) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_V); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); - break; - case 10: /* PL (!N) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); - break; - case 11: /* MI (N) */ - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); - break; - case 12: /* GE (!(N ^ V)) */ - tmp = tcg_temp_new(tcg_ctx); - assert(CCF_V == (CCF_N >> 2)); - tcg_gen_shri_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, 2); - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); - break; - case 13: /* LT (N ^ V) */ - tmp = tcg_temp_new(tcg_ctx); - assert(CCF_V == (CCF_N >> 2)); - tcg_gen_shri_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, 2); - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); - break; - case 14: /* GT (!(Z || (N ^ V))) */ - tmp = tcg_temp_new(tcg_ctx); - assert(CCF_V == (CCF_N >> 2)); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 2); - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V | CCF_Z); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); - break; - case 15: /* LE (Z || (N ^ V)) */ - tmp = tcg_temp_new(tcg_ctx); - assert(CCF_V == (CCF_N >> 2)); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); - tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 2); - tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V | CCF_Z); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); - break; - default: - /* Should ever happen. */ - abort(); - } -} - -DISAS_INSN(scc) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int l1; - int cond; - TCGv reg; - - l1 = gen_new_label(tcg_ctx); - cond = (insn >> 8) & 0xf; - reg = DREG(insn, 0); - tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffffff00); - /* This is safe because we modify the reg directly, with no other values - live. */ - gen_jmpcc(s, cond ^ 1, l1); - tcg_gen_ori_i32(tcg_ctx, reg, reg, 0xff); - gen_set_label(tcg_ctx, l1); -} - -/* Force a TB lookup after an instruction that changes the CPU state. */ -static void gen_lookup_tb(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_flush_cc_op(s); - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, s->pc); - s->is_jmp = DISAS_UPDATE; -} - -/* Generate a jump to an immediate address. */ -static void gen_jmp_im(DisasContext *s, uint32_t dest) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_flush_cc_op(s); - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dest); - s->is_jmp = DISAS_JUMP; -} - -/* Generate a jump to the address in qreg DEST. */ -static void gen_jmp(DisasContext *s, TCGv dest) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_flush_cc_op(s); - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dest); - s->is_jmp = DISAS_JUMP; -} - -static void gen_exception(DisasContext *s, uint32_t where, int nr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_flush_cc_op(s); - gen_jmp_im(s, where); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, nr)); -} - -static inline void gen_addr_fault(DisasContext *s) -{ - gen_exception(s, s->insn_pc, EXCP_ADDRESS); -} - -#define SRC_EA(env, result, opsize, op_sign, addrp) do { \ - result = gen_ea(env, s, insn, opsize, *(TCGv *)tcg_ctx->NULL_QREG, addrp, \ - op_sign ? EA_LOADS : EA_LOADU); \ - if (IS_NULL_QREG(result)) { \ - gen_addr_fault(s); \ - return; \ - } \ - } while (0) - -#define DEST_EA(env, insn, opsize, val, addrp) do { \ - TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \ - if (IS_NULL_QREG(ea_result)) { \ - gen_addr_fault(s); \ - return; \ - } \ - } while (0) - -/* Generate a jump to an immediate address. */ -static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TranslationBlock *tb; - - tb = s->tb; - if (unlikely(s->singlestep_enabled)) { - gen_exception(s, dest, EXCP_DEBUG); - } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || - (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { - tcg_gen_goto_tb(tcg_ctx, n); - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dest); - tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n); - } else { - gen_jmp_im(s, dest); - tcg_gen_exit_tb(tcg_ctx, 0); - } - s->is_jmp = DISAS_TB_JUMP; -} - -DISAS_INSN(undef_mac) -{ - gen_exception(s, s->pc - 2, EXCP_LINEA); -} - -DISAS_INSN(undef_fpu) -{ - gen_exception(s, s->pc - 2, EXCP_LINEF); -} - -DISAS_INSN(undef) -{ - gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED); -} - -DISAS_INSN(mulw) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv tmp; - TCGv src; - int sign; - - sign = (insn & 0x100) != 0; - reg = DREG(insn, 9); - tmp = tcg_temp_new(tcg_ctx); - if (sign) - tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); - else - tcg_gen_ext16u_i32(tcg_ctx, tmp, reg); - SRC_EA(env, src, OS_WORD, sign, NULL); - tcg_gen_mul_i32(tcg_ctx, tmp, tmp, src); - tcg_gen_mov_i32(tcg_ctx, reg, tmp); - /* Unlike m68k, coldfire always clears the overflow bit. */ - gen_logic_cc(s, tmp); -} - -DISAS_INSN(divw) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv tmp; - TCGv src; - int sign; - - sign = (insn & 0x100) != 0; - reg = DREG(insn, 9); - if (sign) { - tcg_gen_ext16s_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV1, reg); - } else { - tcg_gen_ext16u_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV1, reg); - } - SRC_EA(env, src, OS_WORD, sign, NULL); - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV2, src); - if (sign) { - gen_helper_divs(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); - } else { - gen_helper_divu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); - } - - tmp = tcg_temp_new(tcg_ctx); - src = tcg_temp_new(tcg_ctx); - tcg_gen_ext16u_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_DIV1); - tcg_gen_shli_i32(tcg_ctx, src, *(TCGv *)tcg_ctx->QREG_DIV2, 16); - tcg_gen_or_i32(tcg_ctx, reg, tmp, src); - s->cc_op = CC_OP_FLAGS; -} - -DISAS_INSN(divl) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv num; - TCGv den; - TCGv reg; - uint16_t ext; - - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - if (ext & 0x87f8) { - gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED); - return; - } - num = DREG(ext, 12); - reg = DREG(ext, 0); - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV1, num); - SRC_EA(env, den, OS_LONG, 0, NULL); - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV2, den); - if (ext & 0x0800) { - gen_helper_divs(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0)); - } else { - gen_helper_divu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0)); - } - if ((ext & 7) == ((ext >> 12) & 7)) { - /* div */ - tcg_gen_mov_i32 (tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_DIV1); - } else { - /* rem */ - tcg_gen_mov_i32 (tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_DIV2); - } - s->cc_op = CC_OP_FLAGS; -} - -DISAS_INSN(addsub) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv dest; - TCGv src; - TCGv tmp; - TCGv addr; - int add; - - add = (insn & 0x4000) != 0; - reg = DREG(insn, 9); - dest = tcg_temp_new(tcg_ctx); - if (insn & 0x100) { - SRC_EA(env, tmp, OS_LONG, 0, &addr); - src = reg; - } else { - tmp = reg; - SRC_EA(env, src, OS_LONG, 0, NULL); - } - if (add) { - tcg_gen_add_i32(tcg_ctx, dest, tmp, src); - gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, src); - s->cc_op = CC_OP_ADD; - } else { - gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, tmp, src); - tcg_gen_sub_i32(tcg_ctx, dest, tmp, src); - s->cc_op = CC_OP_SUB; - } - gen_update_cc_add(s, dest, src); - if (insn & 0x100) { - DEST_EA(env, insn, OS_LONG, dest, &addr); - } else { - tcg_gen_mov_i32(tcg_ctx, reg, dest); - } -} - - -/* Reverse the order of the bits in REG. */ -DISAS_INSN(bitrev) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - reg = DREG(insn, 0); - gen_helper_bitrev(tcg_ctx, reg, reg); -} - -DISAS_INSN(bitop_reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int opsize; - int op; - TCGv src1; - TCGv src2; - TCGv tmp; - TCGv addr; - TCGv dest; - - if ((insn & 0x38) != 0) - opsize = OS_BYTE; - else - opsize = OS_LONG; - op = (insn >> 6) & 3; - SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); - src2 = DREG(insn, 9); - dest = tcg_temp_new(tcg_ctx); - - gen_flush_flags(s); - tmp = tcg_temp_new(tcg_ctx); - if (opsize == OS_BYTE) - tcg_gen_andi_i32(tcg_ctx, tmp, src2, 7); - else - tcg_gen_andi_i32(tcg_ctx, tmp, src2, 31); - src2 = tmp; - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_shr_i32(tcg_ctx, tmp, src1, src2); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 1); - tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 2); - /* Clear CCF_Z if bit set. */ - tcg_gen_ori_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); - tcg_gen_xor_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, tmp); - - tcg_gen_shl_i32(tcg_ctx, tmp, tcg_const_i32(tcg_ctx, 1), src2); - switch (op) { - case 1: /* bchg */ - tcg_gen_xor_i32(tcg_ctx, dest, src1, tmp); - break; - case 2: /* bclr */ - tcg_gen_not_i32(tcg_ctx, tmp, tmp); - tcg_gen_and_i32(tcg_ctx, dest, src1, tmp); - break; - case 3: /* bset */ - tcg_gen_or_i32(tcg_ctx, dest, src1, tmp); - break; - default: /* btst */ - break; - } - if (op) - DEST_EA(env, insn, opsize, dest, &addr); -} - -DISAS_INSN(sats) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - reg = DREG(insn, 0); - gen_flush_flags(s); - gen_helper_sats(tcg_ctx, reg, reg, *(TCGv *)tcg_ctx->QREG_CC_DEST); - gen_logic_cc(s, reg); -} - -static void gen_push(DisasContext *s, TCGv val) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); - gen_store(s, OS_LONG, tmp, val); - tcg_gen_mov_i32(tcg_ctx, QREG_SP, tmp); -} - -DISAS_INSN(movem) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv addr; - int i; - uint16_t mask; - TCGv reg; - TCGv tmp; - int is_load; - - mask = cpu_lduw_code(env, s->pc); - s->pc += 2; - tmp = gen_lea(env, s, insn, OS_LONG); - if (IS_NULL_QREG(tmp)) { - gen_addr_fault(s); - return; - } - addr = tcg_temp_new(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, addr, tmp); - is_load = ((insn & 0x0400) != 0); - for (i = 0; i < 16; i++, mask >>= 1) { - if (mask & 1) { - if (i < 8) - reg = DREG(i, 0); - else - reg = AREG(i, 0); - if (is_load) { - tmp = gen_load(s, OS_LONG, addr, 0); - tcg_gen_mov_i32(tcg_ctx, reg, tmp); - } else { - gen_store(s, OS_LONG, addr, reg); - } - if (mask != 1) - tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); - } - } -} - -DISAS_INSN(bitop_im) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int opsize; - int op; - TCGv src1; - uint32_t mask; - int bitnum; - TCGv tmp; - TCGv addr; - - if ((insn & 0x38) != 0) - opsize = OS_BYTE; - else - opsize = OS_LONG; - op = (insn >> 6) & 3; - - bitnum = cpu_lduw_code(env, s->pc); - s->pc += 2; - if (bitnum & 0xff00) { - disas_undef(env, s, insn); - return; - } - - SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); - - gen_flush_flags(s); - if (opsize == OS_BYTE) - bitnum &= 7; - else - bitnum &= 31; - mask = 1U << bitnum; - - tmp = tcg_temp_new(tcg_ctx); - assert (CCF_Z == (1 << 2)); - if (bitnum > 2) - tcg_gen_shri_i32(tcg_ctx, tmp, src1, bitnum - 2); - else if (bitnum < 2) - tcg_gen_shli_i32(tcg_ctx, tmp, src1, 2 - bitnum); - else - tcg_gen_mov_i32(tcg_ctx, tmp, src1); - tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_Z); - /* Clear CCF_Z if bit set. */ - tcg_gen_ori_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); - tcg_gen_xor_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, tmp); - if (op) { - switch (op) { - case 1: /* bchg */ - tcg_gen_xori_i32(tcg_ctx, tmp, src1, mask); - break; - case 2: /* bclr */ - tcg_gen_andi_i32(tcg_ctx, tmp, src1, ~mask); - break; - case 3: /* bset */ - tcg_gen_ori_i32(tcg_ctx, tmp, src1, mask); - break; - default: /* btst */ - break; - } - DEST_EA(env, insn, opsize, tmp, &addr); - } -} - -DISAS_INSN(arith_im) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int op; - uint32_t im; - TCGv src1; - TCGv dest; - TCGv addr; - - op = (insn >> 9) & 7; - SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr); - im = read_im32(env, s); - dest = tcg_temp_new(tcg_ctx); - switch (op) { - case 0: /* ori */ - tcg_gen_ori_i32(tcg_ctx, dest, src1, im); - gen_logic_cc(s, dest); - break; - case 1: /* andi */ - tcg_gen_andi_i32(tcg_ctx, dest, src1, im); - gen_logic_cc(s, dest); - break; - case 2: /* subi */ - tcg_gen_mov_i32(tcg_ctx, dest, src1); - gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, tcg_const_i32(tcg_ctx, im)); - tcg_gen_subi_i32(tcg_ctx, dest, dest, im); - gen_update_cc_add(s, dest, tcg_const_i32(tcg_ctx, im)); - s->cc_op = CC_OP_SUB; - break; - case 3: /* addi */ - tcg_gen_mov_i32(tcg_ctx, dest, src1); - tcg_gen_addi_i32(tcg_ctx, dest, dest, im); - gen_update_cc_add(s, dest, tcg_const_i32(tcg_ctx, im)); - gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, tcg_const_i32(tcg_ctx, im)); - s->cc_op = CC_OP_ADD; - break; - case 5: /* eori */ - tcg_gen_xori_i32(tcg_ctx, dest, src1, im); - gen_logic_cc(s, dest); - break; - case 6: /* cmpi */ - tcg_gen_mov_i32(tcg_ctx, dest, src1); - tcg_gen_subi_i32(tcg_ctx, dest, dest, im); - gen_update_cc_add(s, dest, tcg_const_i32(tcg_ctx, im)); - s->cc_op = CC_OP_SUB; - break; - default: - abort(); - } - if (op != 6) { - DEST_EA(env, insn, OS_LONG, dest, &addr); - } -} - -DISAS_INSN(byterev) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - - reg = DREG(insn, 0); - tcg_gen_bswap32_i32(tcg_ctx, reg, reg); -} - -DISAS_INSN(move) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src; - TCGv dest; - int op; - int opsize; - - switch (insn >> 12) { - case 1: /* move.b */ - opsize = OS_BYTE; - break; - case 2: /* move.l */ - opsize = OS_LONG; - break; - case 3: /* move.w */ - opsize = OS_WORD; - break; - default: - abort(); - } - SRC_EA(env, src, opsize, 1, NULL); - op = (insn >> 6) & 7; - if (op == 1) { - /* movea */ - /* The value will already have been sign extended. */ - dest = AREG(insn, 9); - tcg_gen_mov_i32(tcg_ctx, dest, src); - } else { - /* normal move */ - uint16_t dest_ea; - dest_ea = ((insn >> 9) & 7) | (op << 3); - DEST_EA(env, dest_ea, opsize, src, NULL); - /* This will be correct because loads sign extend. */ - gen_logic_cc(s, src); - } -} - -DISAS_INSN(negx) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - - gen_flush_flags(s); - reg = DREG(insn, 0); - gen_helper_subx_cc(tcg_ctx, reg, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0), reg); -} - -DISAS_INSN(lea) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv tmp; - - reg = AREG(insn, 9); - tmp = gen_lea(env, s, insn, OS_LONG); - if (IS_NULL_QREG(tmp)) { - gen_addr_fault(s); - return; - } - tcg_gen_mov_i32(tcg_ctx, reg, tmp); -} - -DISAS_INSN(clr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int opsize; - - switch ((insn >> 6) & 3) { - case 0: /* clr.b */ - opsize = OS_BYTE; - break; - case 1: /* clr.w */ - opsize = OS_WORD; - break; - case 2: /* clr.l */ - opsize = OS_LONG; - break; - default: - abort(); - } - DEST_EA(env, insn, opsize, tcg_const_i32(tcg_ctx, 0), NULL); - gen_logic_cc(s, tcg_const_i32(tcg_ctx, 0)); -} - -static TCGv gen_get_ccr(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv dest; - - gen_flush_flags(s); - dest = tcg_temp_new(tcg_ctx); - tcg_gen_shli_i32(tcg_ctx, dest, *(TCGv *)tcg_ctx->QREG_CC_X, 4); - tcg_gen_or_i32(tcg_ctx, dest, dest, *(TCGv *)tcg_ctx->QREG_CC_DEST); - return dest; -} - -DISAS_INSN(move_from_ccr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv ccr; - - ccr = gen_get_ccr(s); - reg = DREG(insn, 0); - gen_partset_reg(s, OS_WORD, reg, ccr); -} - -DISAS_INSN(neg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv src1; - - reg = DREG(insn, 0); - src1 = tcg_temp_new(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, src1, reg); - tcg_gen_neg_i32(tcg_ctx, reg, src1); - s->cc_op = CC_OP_SUB; - gen_update_cc_add(s, reg, src1); - gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, tcg_const_i32(tcg_ctx, 0), src1); - s->cc_op = CC_OP_SUB; -} - -static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, val & 0xf); - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, (val & 0x10) >> 4); - if (!ccr_only) { - gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, val & 0xff00)); - } -} - -static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn, - int ccr_only) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - TCGv reg; - - s->cc_op = CC_OP_FLAGS; - if ((insn & 0x38) == 0) - { - tmp = tcg_temp_new(tcg_ctx); - reg = DREG(insn, 0); - tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, reg, 0xf); - tcg_gen_shri_i32(tcg_ctx, tmp, reg, 4); - tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, tmp, 1); - if (!ccr_only) { - gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, reg); - } - } - else if ((insn & 0x3f) == 0x3c) - { - uint16_t val; - val = cpu_lduw_code(env, s->pc); - s->pc += 2; - gen_set_sr_im(s, val, ccr_only); - } - else - disas_undef(env, s, insn); -} - -DISAS_INSN(move_to_ccr) -{ - gen_set_sr(env, s, insn, 1); -} - -DISAS_INSN(not) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - - reg = DREG(insn, 0); - tcg_gen_not_i32(tcg_ctx, reg, reg); - gen_logic_cc(s, reg); -} - -DISAS_INSN(swap) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src1; - TCGv src2; - TCGv reg; - - src1 = tcg_temp_new(tcg_ctx); - src2 = tcg_temp_new(tcg_ctx); - reg = DREG(insn, 0); - tcg_gen_shli_i32(tcg_ctx, src1, reg, 16); - tcg_gen_shri_i32(tcg_ctx, src2, reg, 16); - tcg_gen_or_i32(tcg_ctx, reg, src1, src2); - gen_logic_cc(s, reg); -} - -DISAS_INSN(pea) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - - tmp = gen_lea(env, s, insn, OS_LONG); - if (IS_NULL_QREG(tmp)) { - gen_addr_fault(s); - return; - } - gen_push(s, tmp); -} - -DISAS_INSN(ext) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int op; - TCGv reg; - TCGv tmp; - - reg = DREG(insn, 0); - op = (insn >> 6) & 7; - tmp = tcg_temp_new(tcg_ctx); - if (op == 3) - tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); - else - tcg_gen_ext8s_i32(tcg_ctx, tmp, reg); - if (op == 2) - gen_partset_reg(s, OS_WORD, reg, tmp); - else - tcg_gen_mov_i32(tcg_ctx, reg, tmp); - gen_logic_cc(s, tmp); -} - -DISAS_INSN(tst) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int opsize; - TCGv tmp; - - switch ((insn >> 6) & 3) { - case 0: /* tst.b */ - opsize = OS_BYTE; - break; - case 1: /* tst.w */ - opsize = OS_WORD; - break; - case 2: /* tst.l */ - opsize = OS_LONG; - break; - default: - abort(); - } - SRC_EA(env, tmp, opsize, 1, NULL); - gen_logic_cc(s, tmp); -} - -DISAS_INSN(pulse) -{ - /* Implemented as a NOP. */ -} - -DISAS_INSN(illegal) -{ - gen_exception(s, s->pc - 2, EXCP_ILLEGAL); -} - -/* ??? This should be atomic. */ -DISAS_INSN(tas) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv dest; - TCGv src1; - TCGv addr; - - dest = tcg_temp_new(tcg_ctx); - SRC_EA(env, src1, OS_BYTE, 1, &addr); - gen_logic_cc(s, src1); - tcg_gen_ori_i32(tcg_ctx, dest, src1, 0x80); - DEST_EA(env, insn, OS_BYTE, dest, &addr); -} - -DISAS_INSN(mull) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint16_t ext; - TCGv reg; - TCGv src1; - TCGv dest; - - /* The upper 32 bits of the product are discarded, so - muls.l and mulu.l are functionally equivalent. */ - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - if (ext & 0x87ff) { - gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED); - return; - } - reg = DREG(ext, 12); - SRC_EA(env, src1, OS_LONG, 0, NULL); - dest = tcg_temp_new(tcg_ctx); - tcg_gen_mul_i32(tcg_ctx, dest, src1, reg); - tcg_gen_mov_i32(tcg_ctx, reg, dest); - /* Unlike m68k, coldfire always clears the overflow bit. */ - gen_logic_cc(s, dest); -} - -DISAS_INSN(link) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int16_t offset; - TCGv reg; - TCGv tmp; - - offset = cpu_ldsw_code(env, s->pc); - s->pc += 2; - reg = AREG(insn, 0); - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); - gen_store(s, OS_LONG, tmp, reg); - if ((insn & 7) != 7) - tcg_gen_mov_i32(tcg_ctx, reg, tmp); - tcg_gen_addi_i32(tcg_ctx, QREG_SP, tmp, offset); -} - -DISAS_INSN(unlk) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src; - TCGv reg; - TCGv tmp; - - src = tcg_temp_new(tcg_ctx); - reg = AREG(insn, 0); - tcg_gen_mov_i32(tcg_ctx, src, reg); - tmp = gen_load(s, OS_LONG, src, 0); - tcg_gen_mov_i32(tcg_ctx, reg, tmp); - tcg_gen_addi_i32(tcg_ctx, QREG_SP, src, 4); -} - -DISAS_INSN(nop) -{ -} - -DISAS_INSN(rts) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - - tmp = gen_load(s, OS_LONG, QREG_SP, 0); - tcg_gen_addi_i32(tcg_ctx, QREG_SP, QREG_SP, 4); - gen_jmp(s, tmp); -} - -DISAS_INSN(jump) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp; - - /* Load the target address first to ensure correct exception - behavior. */ - tmp = gen_lea(env, s, insn, OS_LONG); - if (IS_NULL_QREG(tmp)) { - gen_addr_fault(s); - return; - } - if ((insn & 0x40) == 0) { - /* jsr */ - gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); - } - gen_jmp(s, tmp); -} - -DISAS_INSN(addsubq) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src1; - TCGv src2; - TCGv dest; - int val; - TCGv addr; - - SRC_EA(env, src1, OS_LONG, 0, &addr); - val = (insn >> 9) & 7; - if (val == 0) - val = 8; - dest = tcg_temp_new(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, dest, src1); - if ((insn & 0x38) == 0x08) { - /* Don't update condition codes if the destination is an - address register. */ - if (insn & 0x0100) { - tcg_gen_subi_i32(tcg_ctx, dest, dest, val); - } else { - tcg_gen_addi_i32(tcg_ctx, dest, dest, val); - } - } else { - src2 = tcg_const_i32(tcg_ctx, val); - if (insn & 0x0100) { - gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, src2); - tcg_gen_subi_i32(tcg_ctx, dest, dest, val); - s->cc_op = CC_OP_SUB; - } else { - tcg_gen_addi_i32(tcg_ctx, dest, dest, val); - gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, src2); - s->cc_op = CC_OP_ADD; - } - gen_update_cc_add(s, dest, src2); - } - DEST_EA(env, insn, OS_LONG, dest, &addr); -} - -DISAS_INSN(tpf) -{ - switch (insn & 7) { - case 2: /* One extension word. */ - s->pc += 2; - break; - case 3: /* Two extension words. */ - s->pc += 4; - break; - case 4: /* No extension words. */ - break; - default: - disas_undef(env, s, insn); - } -} - -DISAS_INSN(branch) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int32_t offset; - uint32_t base; - int op; - int l1; - - base = s->pc; - op = (insn >> 8) & 0xf; - offset = (int8_t)insn; - if (offset == 0) { - offset = cpu_ldsw_code(env, s->pc); - s->pc += 2; - } else if (offset == -1) { - offset = read_im32(env, s); - } - if (op == 1) { - /* bsr */ - gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); - } - gen_flush_cc_op(s); - if (op > 1) { - /* Bcc */ - l1 = gen_new_label(tcg_ctx); - gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1); - gen_jmp_tb(s, 1, base + offset); - gen_set_label(tcg_ctx, l1); - gen_jmp_tb(s, 0, s->pc); - } else { - /* Unconditional branch. */ - gen_jmp_tb(s, 0, base + offset); - } -} - -DISAS_INSN(moveq) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t val; - - val = (int8_t)insn; - tcg_gen_movi_i32(tcg_ctx, DREG(insn, 9), val); - gen_logic_cc(s, tcg_const_i32(tcg_ctx, val)); -} - -DISAS_INSN(mvzs) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int opsize; - TCGv src; - TCGv reg; - - if (insn & 0x40) - opsize = OS_WORD; - else - opsize = OS_BYTE; - SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL); - reg = DREG(insn, 9); - tcg_gen_mov_i32(tcg_ctx, reg, src); - gen_logic_cc(s, src); -} - -DISAS_INSN(or) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv dest; - TCGv src; - TCGv addr; - - reg = DREG(insn, 9); - dest = tcg_temp_new(tcg_ctx); - if (insn & 0x100) { - SRC_EA(env, src, OS_LONG, 0, &addr); - tcg_gen_or_i32(tcg_ctx, dest, src, reg); - DEST_EA(env, insn, OS_LONG, dest, &addr); - } else { - SRC_EA(env, src, OS_LONG, 0, NULL); - tcg_gen_or_i32(tcg_ctx, dest, src, reg); - tcg_gen_mov_i32(tcg_ctx, reg, dest); - } - gen_logic_cc(s, dest); -} - -DISAS_INSN(suba) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src; - TCGv reg; - - SRC_EA(env, src, OS_LONG, 0, NULL); - reg = AREG(insn, 9); - tcg_gen_sub_i32(tcg_ctx, reg, reg, src); -} - -DISAS_INSN(subx) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv src; - - gen_flush_flags(s); - reg = DREG(insn, 9); - src = DREG(insn, 0); - gen_helper_subx_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, src); -} - -DISAS_INSN(mov3q) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src; - int val; - - val = (insn >> 9) & 7; - if (val == 0) - val = -1; - src = tcg_const_i32(tcg_ctx, val); - gen_logic_cc(s, src); - DEST_EA(env, insn, OS_LONG, src, NULL); -} - -DISAS_INSN(cmp) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int op; - TCGv src; - TCGv reg; - TCGv dest; - int opsize; - - op = (insn >> 6) & 3; - switch (op) { - case 0: /* cmp.b */ - opsize = OS_BYTE; - s->cc_op = CC_OP_CMPB; - break; - case 1: /* cmp.w */ - opsize = OS_WORD; - s->cc_op = CC_OP_CMPW; - break; - case 2: /* cmp.l */ - opsize = OS_LONG; - s->cc_op = CC_OP_SUB; - break; - default: - abort(); - } - SRC_EA(env, src, opsize, 1, NULL); - reg = DREG(insn, 9); - dest = tcg_temp_new(tcg_ctx); - tcg_gen_sub_i32(tcg_ctx, dest, reg, src); - gen_update_cc_add(s, dest, src); -} - -DISAS_INSN(cmpa) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - int opsize; - TCGv src; - TCGv reg; - TCGv dest; - - if (insn & 0x100) { - opsize = OS_LONG; - } else { - opsize = OS_WORD; - } - SRC_EA(env, src, opsize, 1, NULL); - reg = AREG(insn, 9); - dest = tcg_temp_new(tcg_ctx); - tcg_gen_sub_i32(tcg_ctx, dest, reg, src); - gen_update_cc_add(s, dest, src); - s->cc_op = CC_OP_SUB; -} - -DISAS_INSN(eor) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src; - TCGv reg; - TCGv dest; - TCGv addr; - - SRC_EA(env, src, OS_LONG, 0, &addr); - reg = DREG(insn, 9); - dest = tcg_temp_new(tcg_ctx); - tcg_gen_xor_i32(tcg_ctx, dest, src, reg); - gen_logic_cc(s, dest); - DEST_EA(env, insn, OS_LONG, dest, &addr); -} - -DISAS_INSN(and) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src; - TCGv reg; - TCGv dest; - TCGv addr; - - reg = DREG(insn, 9); - dest = tcg_temp_new(tcg_ctx); - if (insn & 0x100) { - SRC_EA(env, src, OS_LONG, 0, &addr); - tcg_gen_and_i32(tcg_ctx, dest, src, reg); - DEST_EA(env, insn, OS_LONG, dest, &addr); - } else { - SRC_EA(env, src, OS_LONG, 0, NULL); - tcg_gen_and_i32(tcg_ctx, dest, src, reg); - tcg_gen_mov_i32(tcg_ctx, reg, dest); - } - gen_logic_cc(s, dest); -} - -DISAS_INSN(adda) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv src; - TCGv reg; - - SRC_EA(env, src, OS_LONG, 0, NULL); - reg = AREG(insn, 9); - tcg_gen_add_i32(tcg_ctx, reg, reg, src); -} - -DISAS_INSN(addx) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv src; - - gen_flush_flags(s); - reg = DREG(insn, 9); - src = DREG(insn, 0); - gen_helper_addx_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, src); - s->cc_op = CC_OP_FLAGS; -} - -/* TODO: This could be implemented without helper functions. */ -DISAS_INSN(shift_im) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - int tmp; - TCGv shift; - - reg = DREG(insn, 0); - tmp = (insn >> 9) & 7; - if (tmp == 0) - tmp = 8; - shift = tcg_const_i32(tcg_ctx, tmp); - /* No need to flush flags becuse we know we will set C flag. */ - if (insn & 0x100) { - gen_helper_shl_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); - } else { - if (insn & 8) { - gen_helper_shr_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); - } else { - gen_helper_sar_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); - } - } - s->cc_op = CC_OP_SHIFT; -} - -DISAS_INSN(shift_reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv shift; - - reg = DREG(insn, 0); - shift = DREG(insn, 9); - /* Shift by zero leaves C flag unmodified. */ - gen_flush_flags(s); - if (insn & 0x100) { - gen_helper_shl_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); - } else { - if (insn & 8) { - gen_helper_shr_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); - } else { - gen_helper_sar_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); - } - } - s->cc_op = CC_OP_SHIFT; -} - -DISAS_INSN(ff1) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - reg = DREG(insn, 0); - gen_logic_cc(s, reg); - gen_helper_ff1(tcg_ctx, reg, reg); -} - -static TCGv gen_get_sr(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv ccr; - TCGv sr; - - ccr = gen_get_ccr(s); - sr = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, sr, *(TCGv *)tcg_ctx->QREG_SR, 0xffe0); - tcg_gen_or_i32(tcg_ctx, sr, sr, ccr); - return sr; -} - -DISAS_INSN(strldsr) -{ - uint16_t ext; - uint32_t addr; - - addr = s->pc - 2; - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - if (ext != 0x46FC) { - gen_exception(s, addr, EXCP_UNSUPPORTED); - return; - } - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - if (IS_USER(s) || (ext & SR_S) == 0) { - gen_exception(s, addr, EXCP_PRIVILEGE); - return; - } - gen_push(s, gen_get_sr(s)); - gen_set_sr_im(s, ext, 0); -} - -DISAS_INSN(move_from_sr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv sr; - - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - sr = gen_get_sr(s); - reg = DREG(insn, 0); - gen_partset_reg(s, OS_WORD, reg, sr); -} - -DISAS_INSN(move_to_sr) -{ - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - gen_set_sr(env, s, insn, 0); - gen_lookup_tb(s); -} - -DISAS_INSN(move_from_usp) -{ - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - /* TODO: Implement USP. */ - gen_exception(s, s->pc - 2, EXCP_ILLEGAL); -} - -DISAS_INSN(move_to_usp) -{ - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - /* TODO: Implement USP. */ - gen_exception(s, s->pc - 2, EXCP_ILLEGAL); -} - -DISAS_INSN(halt) -{ - gen_exception(s, s->pc, EXCP_HALT_INSN); -} - -DISAS_INSN(stop) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint16_t ext; - - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - - gen_set_sr_im(s, ext, 0); - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_halted, 1); - gen_exception(s, s->pc, EXCP_HLT); -} - -DISAS_INSN(rte) -{ - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - gen_exception(s, s->pc - 2, EXCP_RTE); -} - -DISAS_INSN(movec) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint16_t ext; - TCGv reg; - - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - - if (ext & 0x8000) { - reg = AREG(ext, 12); - } else { - reg = DREG(ext, 12); - } - gen_helper_movec(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff), reg); - gen_lookup_tb(s); -} - -DISAS_INSN(intouch) -{ - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - /* ICache fetch. Implement as no-op. */ -} - -DISAS_INSN(cpushl) -{ - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - /* Cache push/invalidate. Implement as no-op. */ -} - -DISAS_INSN(wddata) -{ - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); -} - -DISAS_INSN(wdebug) -{ - if (IS_USER(s)) { - gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); - return; - } - /* TODO: Implement wdebug. */ - qemu_log("WDEBUG not implemented\n"); - gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED); -} - -DISAS_INSN(trap) -{ - gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf)); -} - -/* ??? FP exceptions are not implemented. Most exceptions are deferred until - immediately before the next FP instruction is executed. */ -DISAS_INSN(fpu) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint16_t ext; - int32_t offset; - int opmode; - TCGv_i64 src; - TCGv_i64 dest; - TCGv_i64 res; - TCGv tmp32; - int round; - int set_dest; - int opsize; - - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - opmode = ext & 0x7f; - switch ((ext >> 13) & 7) { - case 0: case 2: - break; - case 1: - goto undef; - case 3: /* fmove out */ - src = FREG(ext, 7); - tmp32 = tcg_temp_new_i32(tcg_ctx); - /* fmove */ - /* ??? TODO: Proper behavior on overflow. */ - switch ((ext >> 10) & 7) { - case 0: - opsize = OS_LONG; - gen_helper_f64_to_i32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); - break; - case 1: - opsize = OS_SINGLE; - gen_helper_f64_to_f32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); - break; - case 4: - opsize = OS_WORD; - gen_helper_f64_to_i32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); - break; - case 5: /* OS_DOUBLE */ - tcg_gen_mov_i32(tcg_ctx, tmp32, AREG(insn, 0)); - switch ((insn >> 3) & 7) { - case 2: - case 3: - break; - case 4: - tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, -8); - break; - case 5: - offset = cpu_ldsw_code(env, s->pc); - s->pc += 2; - tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, offset); - break; - default: - goto undef; - } - gen_store64(s, tmp32, src); - switch ((insn >> 3) & 7) { - case 3: - tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, 8); - tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); - break; - case 4: - tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp32); - return; - case 6: - opsize = OS_BYTE; - gen_helper_f64_to_i32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); - break; - default: - goto undef; - } - DEST_EA(env, insn, opsize, tmp32, NULL); - tcg_temp_free_i32(tcg_ctx, tmp32); - return; - case 4: /* fmove to control register. */ - switch ((ext >> 10) & 7) { - case 4: /* FPCR */ - /* Not implemented. Ignore writes. */ - break; - case 1: /* FPIAR */ - case 2: /* FPSR */ - default: - qemu_log("Unimplemented: fmove to control %d\n", - (ext >> 10) & 7); - goto undef; - } - break; - case 5: /* fmove from control register. */ - switch ((ext >> 10) & 7) { - case 4: /* FPCR */ - /* Not implemented. Always return zero. */ - tmp32 = tcg_const_i32(tcg_ctx, 0); - break; - case 1: /* FPIAR */ - case 2: /* FPSR */ - default: - qemu_log("Unimplemented: fmove from control %d\n", - (ext >> 10) & 7); - goto undef; - } - DEST_EA(env, insn, OS_LONG, tmp32, NULL); - break; - case 6: /* fmovem */ - case 7: - { - TCGv addr; - uint16_t mask; - int i; - if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0) - goto undef; - tmp32 = gen_lea(env, s, insn, OS_LONG); - if (IS_NULL_QREG(tmp32)) { - gen_addr_fault(s); - return; - } - addr = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, addr, tmp32); - mask = 0x80; - for (i = 0; i < 8; i++) { - if (ext & mask) { - s->is_mem = 1; - dest = FREG(i, 0); - if (ext & (1 << 13)) { - /* store */ - tcg_gen_qemu_stf64(s->uc, dest, addr, IS_USER(s)); - } else { - /* load */ - tcg_gen_qemu_ldf64(s->uc, dest, addr, IS_USER(s)); - } - if (ext & (mask - 1)) - tcg_gen_addi_i32(tcg_ctx, addr, addr, 8); - } - mask >>= 1; - } - tcg_temp_free_i32(tcg_ctx, addr); - } - return; - } - if (ext & (1 << 14)) { - /* Source effective address. */ - switch ((ext >> 10) & 7) { - case 0: opsize = OS_LONG; break; - case 1: opsize = OS_SINGLE; break; - case 4: opsize = OS_WORD; break; - case 5: opsize = OS_DOUBLE; break; - case 6: opsize = OS_BYTE; break; - default: - goto undef; - } - if (opsize == OS_DOUBLE) { - tmp32 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, tmp32, AREG(insn, 0)); - switch ((insn >> 3) & 7) { - case 2: - case 3: - break; - case 4: - tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, -8); - break; - case 5: - offset = cpu_ldsw_code(env, s->pc); - s->pc += 2; - tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, offset); - break; - case 7: - offset = cpu_ldsw_code(env, s->pc); - offset += s->pc - 2; - s->pc += 2; - tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, offset); - break; - default: - goto undef; - } - src = gen_load64(s, tmp32); - switch ((insn >> 3) & 7) { - case 3: - tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, 8); - tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); - break; - case 4: - tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); - break; - } - tcg_temp_free_i32(tcg_ctx, tmp32); - } else { - SRC_EA(env, tmp32, opsize, 1, NULL); - src = tcg_temp_new_i64(tcg_ctx); - switch (opsize) { - case OS_LONG: - case OS_WORD: - case OS_BYTE: - gen_helper_i32_to_f64(tcg_ctx, src, tcg_ctx->cpu_env, tmp32); - break; - case OS_SINGLE: - gen_helper_f32_to_f64(tcg_ctx, src, tcg_ctx->cpu_env, tmp32); - break; - } - } - } else { - /* Source register. */ - src = FREG(ext, 10); - } - dest = FREG(ext, 7); - res = tcg_temp_new_i64(tcg_ctx); - if (opmode != 0x3a) - tcg_gen_mov_f64(tcg_ctx, res, dest); - round = 1; - set_dest = 1; - switch (opmode) { - case 0: case 0x40: case 0x44: /* fmove */ - tcg_gen_mov_f64(tcg_ctx, res, src); - break; - case 1: /* fint */ - gen_helper_iround_f64(tcg_ctx, res, tcg_ctx->cpu_env, src); - round = 0; - break; - case 3: /* fintrz */ - gen_helper_itrunc_f64(tcg_ctx, res, tcg_ctx->cpu_env, src); - round = 0; - break; - case 4: case 0x41: case 0x45: /* fsqrt */ - gen_helper_sqrt_f64(tcg_ctx, res, tcg_ctx->cpu_env, src); - break; - case 0x18: case 0x58: case 0x5c: /* fabs */ - gen_helper_abs_f64(tcg_ctx, res, src); - break; - case 0x1a: case 0x5a: case 0x5e: /* fneg */ - gen_helper_chs_f64(tcg_ctx, res, src); - break; - case 0x20: case 0x60: case 0x64: /* fdiv */ - gen_helper_div_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); - break; - case 0x22: case 0x62: case 0x66: /* fadd */ - gen_helper_add_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); - break; - case 0x23: case 0x63: case 0x67: /* fmul */ - gen_helper_mul_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); - break; - case 0x28: case 0x68: case 0x6c: /* fsub */ - gen_helper_sub_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); - break; - case 0x38: /* fcmp */ - gen_helper_sub_cmp_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); - set_dest = 0; - round = 0; - break; - case 0x3a: /* ftst */ - tcg_gen_mov_f64(tcg_ctx, res, src); - set_dest = 0; - round = 0; - break; - default: - goto undef; - } - if (ext & (1 << 14)) { - tcg_temp_free_i64(tcg_ctx, src); - } - if (round) { - if (opmode & 0x40) { - if ((opmode & 0x4) != 0) - round = 0; - } else if ((s->fpcr & M68K_FPCR_PREC) == 0) { - round = 0; - } - } - if (round) { - TCGv tmp = tcg_temp_new_i32(tcg_ctx); - gen_helper_f64_to_f32(tcg_ctx, tmp, tcg_ctx->cpu_env, res); - gen_helper_f32_to_f64(tcg_ctx, res, tcg_ctx->cpu_env, tmp); - tcg_temp_free_i32(tcg_ctx, tmp); - } - tcg_gen_mov_f64(tcg_ctx, tcg_ctx->QREG_FP_RESULT, res); - if (set_dest) { - tcg_gen_mov_f64(tcg_ctx, dest, res); - } - tcg_temp_free_i64(tcg_ctx, res); - return; -undef: - /* FIXME: Is this right for offset addressing modes? */ - s->pc -= 2; - disas_undef_fpu(env, s, insn); -} - -DISAS_INSN(fbcc) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint32_t offset; - uint32_t addr; - TCGv flag; - int l1; - - addr = s->pc; - offset = cpu_ldsw_code(env, s->pc); - s->pc += 2; - if (insn & (1 << 6)) { - offset = (offset << 16) | cpu_lduw_code(env, s->pc); - s->pc += 2; - } - - l1 = gen_new_label(tcg_ctx); - /* TODO: Raise BSUN exception. */ - flag = tcg_temp_new(tcg_ctx); - gen_helper_compare_f64(tcg_ctx, flag, tcg_ctx->cpu_env, tcg_ctx->QREG_FP_RESULT); - /* Jump to l1 if condition is true. */ - switch (insn & 0xf) { - case 0: /* f */ - break; - case 1: /* eq (=0) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 2: /* ogt (=1) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 1), l1); - break; - case 3: /* oge (=0 or =1) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LEU, flag, tcg_const_i32(tcg_ctx, 1), l1); - break; - case 4: /* olt (=-1) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LT, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 5: /* ole (=-1 or =0) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LE, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 6: /* ogl (=-1 or =1) */ - tcg_gen_andi_i32(tcg_ctx, flag, flag, 1); - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_NE, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 7: /* or (=2) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 2), l1); - break; - case 8: /* un (<2) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LT, flag, tcg_const_i32(tcg_ctx, 2), l1); - break; - case 9: /* ueq (=0 or =2) */ - tcg_gen_andi_i32(tcg_ctx, flag, flag, 1); - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 10: /* ugt (>0) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_GT, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 11: /* uge (>=0) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_GE, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 12: /* ult (=-1 or =2) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_GEU, flag, tcg_const_i32(tcg_ctx, 2), l1); - break; - case 13: /* ule (!=1) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_NE, flag, tcg_const_i32(tcg_ctx, 1), l1); - break; - case 14: /* ne (!=0) */ - tcg_gen_brcond_i32(tcg_ctx, TCG_COND_NE, flag, tcg_const_i32(tcg_ctx, 0), l1); - break; - case 15: /* t */ - tcg_gen_br(tcg_ctx, l1); - break; - } - gen_jmp_tb(s, 0, s->pc); - gen_set_label(tcg_ctx, l1); - gen_jmp_tb(s, 1, addr + offset); -} - -DISAS_INSN(frestore) -{ - M68kCPU *cpu = m68k_env_get_cpu(env); - - /* TODO: Implement frestore. */ - cpu_abort(CPU(cpu), "FRESTORE not implemented"); -} - -DISAS_INSN(fsave) -{ - M68kCPU *cpu = m68k_env_get_cpu(env); - - /* TODO: Implement fsave. */ - cpu_abort(CPU(cpu), "FSAVE not implemented"); -} - -static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv tmp = tcg_temp_new(tcg_ctx); - if (s->env->macsr & MACSR_FI) { - if (upper) - tcg_gen_andi_i32(tcg_ctx, tmp, val, 0xffff0000); - else - tcg_gen_shli_i32(tcg_ctx, tmp, val, 16); - } else if (s->env->macsr & MACSR_SU) { - if (upper) - tcg_gen_sari_i32(tcg_ctx, tmp, val, 16); - else - tcg_gen_ext16s_i32(tcg_ctx, tmp, val); - } else { - if (upper) - tcg_gen_shri_i32(tcg_ctx, tmp, val, 16); - else - tcg_gen_ext16u_i32(tcg_ctx, tmp, val); - } - return tmp; -} - -static void gen_mac_clear_flags(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, *(TCGv *)tcg_ctx->QREG_MACSR, - ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV)); -} - -DISAS_INSN(mac) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv rx; - TCGv ry; - uint16_t ext; - int acc; - TCGv tmp; - TCGv addr; - TCGv loadval; - int dual; - TCGv saved_flags; - - if (!s->done_mac) { - s->mactmp = tcg_temp_new_i64(tcg_ctx); - s->done_mac = 1; - } - - ext = cpu_lduw_code(env, s->pc); - s->pc += 2; - - acc = ((insn >> 7) & 1) | ((ext >> 3) & 2); - dual = ((insn & 0x30) != 0 && (ext & 3) != 0); - if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) { - disas_undef(env, s, insn); - return; - } - if (insn & 0x30) { - /* MAC with load. */ - tmp = gen_lea(env, s, insn, OS_LONG); - addr = tcg_temp_new(tcg_ctx); - tcg_gen_and_i32(tcg_ctx, addr, tmp, *(TCGv *)tcg_ctx->QREG_MAC_MASK); - /* Load the value now to ensure correct exception behavior. - Perform writeback after reading the MAC inputs. */ - loadval = gen_load(s, OS_LONG, addr, 0); - - acc ^= 1; - rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12); - ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0); - } else { - loadval = addr = *(TCGv *)tcg_ctx->NULL_QREG; - rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); - ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); - } - - gen_mac_clear_flags(s); -#if 0 - l1 = -1; - /* Disabled because conditional branches clobber temporary vars. */ - if ((s->env->macsr & MACSR_OMC) != 0 && !dual) { - /* Skip the multiply if we know we will ignore it. */ - l1 = gen_new_label(tcg_ctx); - tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_MACSR, 1 << (acc + 8)); - gen_op_jmp_nz32(tmp, l1); - } -#endif - - if ((ext & 0x0800) == 0) { - /* Word. */ - rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0); - ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0); - } - if (s->env->macsr & MACSR_FI) { - gen_helper_macmulf(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); - } else { - if (s->env->macsr & MACSR_SU) - gen_helper_macmuls(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); - else - gen_helper_macmulu(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); - switch ((ext >> 9) & 3) { - case 1: - tcg_gen_shli_i64(tcg_ctx, s->mactmp, s->mactmp, 1); - break; - case 3: - tcg_gen_shri_i64(tcg_ctx, s->mactmp, s->mactmp, 1); - break; - } - } - - if (dual) { - /* Save the overflow flag from the multiply. */ - saved_flags = tcg_temp_new(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, saved_flags, *(TCGv *)tcg_ctx->QREG_MACSR); - } else { - saved_flags = *(TCGv *)tcg_ctx->NULL_QREG; - } - -#if 0 - /* Disabled because conditional branches clobber temporary vars. */ - if ((s->env->macsr & MACSR_OMC) != 0 && dual) { - /* Skip the accumulate if the value is already saturated. */ - l1 = gen_new_label(tcg_ctx); - tmp = tcg_temp_new(tcg_ctx); - gen_op_and32(tmp, *(TCGv *)tcg_ctx->QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); - gen_op_jmp_nz32(tmp, l1); - } -#endif - - if (insn & 0x100) - tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); - else - tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); - - if (s->env->macsr & MACSR_FI) - gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); - else if (s->env->macsr & MACSR_SU) - gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); - else - gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); - -#if 0 - /* Disabled because conditional branches clobber temporary vars. */ - if (l1 != -1) - gen_set_label(tcg_ctx, l1); -#endif - - if (dual) { - /* Dual accumulate variant. */ - acc = (ext >> 2) & 3; - /* Restore the overflow flag from the multiplier. */ - tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, saved_flags); -#if 0 - /* Disabled because conditional branches clobber temporary vars. */ - if ((s->env->macsr & MACSR_OMC) != 0) { - /* Skip the accumulate if the value is already saturated. */ - l1 = gen_new_label(tcg_ctx); - tmp = tcg_temp_new(tcg_ctx); - gen_op_and32(tmp, *(TCGv *)tcg_ctx->QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); - gen_op_jmp_nz32(tmp, l1); - } -#endif - if (ext & 2) - tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); - else - tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); - if (s->env->macsr & MACSR_FI) - gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); - else if (s->env->macsr & MACSR_SU) - gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); - else - gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); -#if 0 - /* Disabled because conditional branches clobber temporary vars. */ - if (l1 != -1) - gen_set_label(tcg_ctx, l1); -#endif - } - gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); - - if (insn & 0x30) { - TCGv rw; - rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); - tcg_gen_mov_i32(tcg_ctx, rw, loadval); - /* FIXME: Should address writeback happen with the masked or - unmasked value? */ - switch ((insn >> 3) & 7) { - case 3: /* Post-increment. */ - tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, 4); - break; - case 4: /* Pre-decrement. */ - tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); - } - } -} - -DISAS_INSN(from_mac) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv rx; - TCGv_i64 acc; - int accnum; - - rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); - accnum = (insn >> 9) & 3; - acc = MACREG(accnum); - if (s->env->macsr & MACSR_FI) { - gen_helper_get_macf(tcg_ctx, rx, tcg_ctx->cpu_env, acc); - } else if ((s->env->macsr & MACSR_OMC) == 0) { - tcg_gen_trunc_i64_i32(tcg_ctx, rx, acc); - } else if (s->env->macsr & MACSR_SU) { - gen_helper_get_macs(tcg_ctx, rx, acc); - } else { - gen_helper_get_macu(tcg_ctx, rx, acc); - } - if (insn & 0x40) { - tcg_gen_movi_i64(tcg_ctx, acc, 0); - tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, *(TCGv *)tcg_ctx->QREG_MACSR, ~(MACSR_PAV0 << accnum)); - } -} - -DISAS_INSN(move_mac) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* FIXME: This can be done without a helper. */ - int src; - TCGv dest; - src = insn & 3; - dest = tcg_const_i32(tcg_ctx, (insn >> 9) & 3); - gen_helper_mac_move(tcg_ctx, tcg_ctx->cpu_env, dest, tcg_const_i32(tcg_ctx, src)); - gen_mac_clear_flags(s); - gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, dest); -} - -DISAS_INSN(from_macsr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - - reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); - tcg_gen_mov_i32(tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_MACSR); -} - -DISAS_INSN(from_mask) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); - tcg_gen_mov_i32(tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_MAC_MASK); -} - -DISAS_INSN(from_mext) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv reg; - TCGv acc; - reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); - acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); - if (s->env->macsr & MACSR_FI) - gen_helper_get_mac_extf(tcg_ctx, reg, tcg_ctx->cpu_env, acc); - else - gen_helper_get_mac_exti(tcg_ctx, reg, tcg_ctx->cpu_env, acc); -} - -DISAS_INSN(macsr_to_ccr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, 0); - tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_MACSR, 0xf); - s->cc_op = CC_OP_FLAGS; -} - -DISAS_INSN(to_mac) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 acc; - TCGv val; - int accnum; - accnum = (insn >> 9) & 3; - acc = MACREG(accnum); - SRC_EA(env, val, OS_LONG, 0, NULL); - if (s->env->macsr & MACSR_FI) { - tcg_gen_ext_i32_i64(tcg_ctx, acc, val); - tcg_gen_shli_i64(tcg_ctx, acc, acc, 8); - } else if (s->env->macsr & MACSR_SU) { - tcg_gen_ext_i32_i64(tcg_ctx, acc, val); - } else { - tcg_gen_extu_i32_i64(tcg_ctx, acc, val); - } - tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, *(TCGv *)tcg_ctx->QREG_MACSR, ~(MACSR_PAV0 << accnum)); - gen_mac_clear_flags(s); - gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, accnum)); -} - -DISAS_INSN(to_macsr) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv val; - SRC_EA(env, val, OS_LONG, 0, NULL); - gen_helper_set_macsr(tcg_ctx, tcg_ctx->cpu_env, val); - gen_lookup_tb(s); -} - -DISAS_INSN(to_mask) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv val; - SRC_EA(env, val, OS_LONG, 0, NULL); - tcg_gen_ori_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MAC_MASK, val, 0xffff0000); -} - -DISAS_INSN(to_mext) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv val; - TCGv acc; - SRC_EA(env, val, OS_LONG, 0, NULL); - acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); - if (s->env->macsr & MACSR_FI) - gen_helper_set_mac_extf(tcg_ctx, tcg_ctx->cpu_env, val, acc); - else if (s->env->macsr & MACSR_SU) - gen_helper_set_mac_exts(tcg_ctx, tcg_ctx->cpu_env, val, acc); - else - gen_helper_set_mac_extu(tcg_ctx, tcg_ctx->cpu_env, val, acc); -} - -static void -register_opcode(TCGContext *tcg_ctx, disas_proc proc, uint16_t opcode, uint16_t mask) -{ - int i; - int from; - int to; - - /* Sanity check. All set bits must be included in the mask. */ - if (opcode & ~mask) { - fprintf(stderr, - "qemu internal error: bogus opcode definition %04x/%04x\n", - opcode, mask); - abort(); - } - /* This could probably be cleverer. For now just optimize the case where - the top bits are known. */ - /* Find the first zero bit in the mask. */ - i = 0x8000; - while ((i & mask) != 0) - i >>= 1; - /* Iterate over all combinations of this and lower bits. */ - if (i == 0) - i = 1; - else - i <<= 1; - from = opcode & ~(i - 1); - to = from + i; - for (i = from; i < to; i++) { - if ((i & mask) == opcode) { - tcg_ctx->opcode_table[i] = proc; - } - } -} - -/* Register m68k opcode handlers. Order is important. - Later insn override earlier ones. */ -void register_m68k_insns (CPUM68KState *env) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; -#define INSN(name, opcode, mask, feature) do { \ - if (m68k_feature(env, M68K_FEATURE_##feature)) \ - register_opcode(tcg_ctx, disas_##name, 0x##opcode, 0x##mask); \ - } while(0) - INSN(undef, 0000, 0000, CF_ISA_A); - INSN(arith_im, 0080, fff8, CF_ISA_A); - INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC); - INSN(bitop_reg, 0100, f1c0, CF_ISA_A); - INSN(bitop_reg, 0140, f1c0, CF_ISA_A); - INSN(bitop_reg, 0180, f1c0, CF_ISA_A); - INSN(bitop_reg, 01c0, f1c0, CF_ISA_A); - INSN(arith_im, 0280, fff8, CF_ISA_A); - INSN(byterev, 02c0, fff8, CF_ISA_APLUSC); - INSN(arith_im, 0480, fff8, CF_ISA_A); - INSN(ff1, 04c0, fff8, CF_ISA_APLUSC); - INSN(arith_im, 0680, fff8, CF_ISA_A); - INSN(bitop_im, 0800, ffc0, CF_ISA_A); - INSN(bitop_im, 0840, ffc0, CF_ISA_A); - INSN(bitop_im, 0880, ffc0, CF_ISA_A); - INSN(bitop_im, 08c0, ffc0, CF_ISA_A); - INSN(arith_im, 0a80, fff8, CF_ISA_A); - INSN(arith_im, 0c00, ff38, CF_ISA_A); - INSN(move, 1000, f000, CF_ISA_A); - INSN(move, 2000, f000, CF_ISA_A); - INSN(move, 3000, f000, CF_ISA_A); - INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC); - INSN(negx, 4080, fff8, CF_ISA_A); - INSN(move_from_sr, 40c0, fff8, CF_ISA_A); - INSN(lea, 41c0, f1c0, CF_ISA_A); - INSN(clr, 4200, ff00, CF_ISA_A); - INSN(undef, 42c0, ffc0, CF_ISA_A); - INSN(move_from_ccr, 42c0, fff8, CF_ISA_A); - INSN(neg, 4480, fff8, CF_ISA_A); - INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A); - INSN(not, 4680, fff8, CF_ISA_A); - INSN(move_to_sr, 46c0, ffc0, CF_ISA_A); - INSN(pea, 4840, ffc0, CF_ISA_A); - INSN(swap, 4840, fff8, CF_ISA_A); - INSN(movem, 48c0, fbc0, CF_ISA_A); - INSN(ext, 4880, fff8, CF_ISA_A); - INSN(ext, 48c0, fff8, CF_ISA_A); - INSN(ext, 49c0, fff8, CF_ISA_A); - INSN(tst, 4a00, ff00, CF_ISA_A); - INSN(tas, 4ac0, ffc0, CF_ISA_B); - INSN(halt, 4ac8, ffff, CF_ISA_A); - INSN(pulse, 4acc, ffff, CF_ISA_A); - INSN(illegal, 4afc, ffff, CF_ISA_A); - INSN(mull, 4c00, ffc0, CF_ISA_A); - INSN(divl, 4c40, ffc0, CF_ISA_A); - INSN(sats, 4c80, fff8, CF_ISA_B); - INSN(trap, 4e40, fff0, CF_ISA_A); - INSN(link, 4e50, fff8, CF_ISA_A); - INSN(unlk, 4e58, fff8, CF_ISA_A); - INSN(move_to_usp, 4e60, fff8, USP); - INSN(move_from_usp, 4e68, fff8, USP); - INSN(nop, 4e71, ffff, CF_ISA_A); - INSN(stop, 4e72, ffff, CF_ISA_A); - INSN(rte, 4e73, ffff, CF_ISA_A); - INSN(rts, 4e75, ffff, CF_ISA_A); - INSN(movec, 4e7b, ffff, CF_ISA_A); - INSN(jump, 4e80, ffc0, CF_ISA_A); - INSN(jump, 4ec0, ffc0, CF_ISA_A); - INSN(addsubq, 5180, f1c0, CF_ISA_A); - INSN(scc, 50c0, f0f8, CF_ISA_A); - INSN(addsubq, 5080, f1c0, CF_ISA_A); - INSN(tpf, 51f8, fff8, CF_ISA_A); - - /* Branch instructions. */ - INSN(branch, 6000, f000, CF_ISA_A); - /* Disable long branch instructions, then add back the ones we want. */ - INSN(undef, 60ff, f0ff, CF_ISA_A); /* All long branches. */ - INSN(branch, 60ff, f0ff, CF_ISA_B); - INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */ - INSN(branch, 60ff, ffff, BRAL); - - INSN(moveq, 7000, f100, CF_ISA_A); - INSN(mvzs, 7100, f100, CF_ISA_B); - INSN(or, 8000, f000, CF_ISA_A); - INSN(divw, 80c0, f0c0, CF_ISA_A); - INSN(addsub, 9000, f000, CF_ISA_A); - INSN(subx, 9180, f1f8, CF_ISA_A); - INSN(suba, 91c0, f1c0, CF_ISA_A); - - INSN(undef_mac, a000, f000, CF_ISA_A); - INSN(mac, a000, f100, CF_EMAC); - INSN(from_mac, a180, f9b0, CF_EMAC); - INSN(move_mac, a110, f9fc, CF_EMAC); - INSN(from_macsr,a980, f9f0, CF_EMAC); - INSN(from_mask, ad80, fff0, CF_EMAC); - INSN(from_mext, ab80, fbf0, CF_EMAC); - INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC); - INSN(to_mac, a100, f9c0, CF_EMAC); - INSN(to_macsr, a900, ffc0, CF_EMAC); - INSN(to_mext, ab00, fbc0, CF_EMAC); - INSN(to_mask, ad00, ffc0, CF_EMAC); - - INSN(mov3q, a140, f1c0, CF_ISA_B); - INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */ - INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */ - INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */ - INSN(cmp, b080, f1c0, CF_ISA_A); - INSN(cmpa, b1c0, f1c0, CF_ISA_A); - INSN(eor, b180, f1c0, CF_ISA_A); - INSN(and, c000, f000, CF_ISA_A); - INSN(mulw, c0c0, f0c0, CF_ISA_A); - INSN(addsub, d000, f000, CF_ISA_A); - INSN(addx, d180, f1f8, CF_ISA_A); - INSN(adda, d1c0, f1c0, CF_ISA_A); - INSN(shift_im, e080, f0f0, CF_ISA_A); - INSN(shift_reg, e0a0, f0f0, CF_ISA_A); - INSN(undef_fpu, f000, f000, CF_ISA_A); - INSN(fpu, f200, ffc0, CF_FPU); - INSN(fbcc, f280, ffc0, CF_FPU); - INSN(frestore, f340, ffc0, CF_FPU); - INSN(fsave, f340, ffc0, CF_FPU); - INSN(intouch, f340, ffc0, CF_ISA_A); - INSN(cpushl, f428, ff38, CF_ISA_A); - INSN(wddata, fb00, ff00, CF_ISA_A); - INSN(wdebug, fbc0, ffc0, CF_ISA_A); -#undef INSN -} - -/* ??? Some of this implementation is not exception safe. We should always - write back the result to memory before setting the condition codes. */ -static void disas_m68k_insn(CPUM68KState * env, DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint16_t insn; - - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { - tcg_gen_debug_insn_start(tcg_ctx, s->pc); - } - - // Unicorn: end address tells us to stop emulation - if (s->pc == s->uc->addr_end) { - gen_exception(s, s->pc, EXCP_HLT); - return; - } - - // Unicorn: trace this instruction on request - if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, s->pc)) { - gen_uc_tracecode(tcg_ctx, 2, UC_HOOK_CODE_IDX, env->uc, s->pc); - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - } - - insn = cpu_lduw_code(env, s->pc); - s->pc += 2; - - ((disas_proc)tcg_ctx->opcode_table[insn])(env, s, insn); -} - -/* generate intermediate code for basic block 'tb'. */ -static inline void -gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb, - bool search_pc) -{ - CPUState *cs = CPU(cpu); - CPUM68KState *env = &cpu->env; - DisasContext dc1, *dc = &dc1; - uint16_t *gen_opc_end; - CPUBreakpoint *bp; - int j, lj; - target_ulong pc_start; - int pc_offset; - int num_insns; - int max_insns; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - bool block_full = false; - - /* generate intermediate code */ - pc_start = tb->pc; - - dc->tb = tb; - dc->uc = env->uc; - - gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; - - dc->env = env; - dc->is_jmp = DISAS_NEXT; - dc->pc = pc_start; - dc->cc_op = CC_OP_DYNAMIC; - dc->singlestep_enabled = cs->singlestep_enabled; - dc->fpcr = env->fpcr; - dc->user = (env->sr & SR_S) == 0; - dc->is_mem = 0; - dc->done_mac = 0; - lj = -1; - num_insns = 0; - max_insns = tb->cflags & CF_COUNT_MASK; - if (max_insns == 0) - max_insns = CF_COUNT_MASK; - - // Unicorn: early check to see if the address of this block is the until address - if (tb->pc == env->uc->addr_end) { - gen_tb_start(tcg_ctx); - gen_exception(dc, dc->pc, EXCP_HLT); - goto done_generating; - } - - // Unicorn: trace this block on request - // Only hook this block if it is not broken from previous translation due to - // full translation cache - if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { - // save block address to see if we need to patch block size later - env->uc->block_addr = pc_start; - env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); - } else { - env->uc->size_arg = -1; - } - - gen_tb_start(tcg_ctx); - do { - pc_offset = dc->pc - pc_start; - if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == dc->pc) { - gen_exception(dc, dc->pc, EXCP_DEBUG); - dc->is_jmp = DISAS_JUMP; - break; - } - } - if (dc->is_jmp) - break; - } - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - if (lj < j) { - lj++; - while (lj < j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } - tcg_ctx->gen_opc_pc[lj] = dc->pc; - tcg_ctx->gen_opc_instr_start[lj] = 1; - //tcg_ctx.gen_opc_icount[lj] = num_insns; - } - //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) - // gen_io_start(); - dc->insn_pc = dc->pc; - disas_m68k_insn(env, dc); - num_insns++; - } while (!dc->is_jmp && tcg_ctx->gen_opc_ptr < gen_opc_end && - !cs->singlestep_enabled && - (pc_offset) < (TARGET_PAGE_SIZE - 32) && - num_insns < max_insns); - - /* if too long translation, save this info */ - if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) - block_full = true; - - //if (tb->cflags & CF_LAST_IO) - // gen_io_end(); - if (unlikely(cs->singlestep_enabled)) { - /* Make sure the pc is updated, and raise a debug exception. */ - if (!dc->is_jmp) { - gen_flush_cc_op(dc); - tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dc->pc); - } - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, EXCP_DEBUG)); - } else { - switch(dc->is_jmp) { - case DISAS_NEXT: - gen_flush_cc_op(dc); - gen_jmp_tb(dc, 0, dc->pc); - break; - default: - case DISAS_JUMP: - case DISAS_UPDATE: - gen_flush_cc_op(dc); - /* indicate that the hash table must be used to find the next TB */ - tcg_gen_exit_tb(tcg_ctx, 0); - break; - case DISAS_TB_JUMP: - /* nothing more to generate */ - break; - } - } - -done_generating: - gen_tb_end(tcg_ctx, tb, num_insns); - *tcg_ctx->gen_opc_ptr = INDEX_op_end; - - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - lj++; - while (lj <= j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } else { - tb->size = dc->pc - pc_start; - //tb->icount = num_insns; - } - - //optimize_flags(); - //expand_target_qops(); - - env->uc->block_full = block_full; -} - -void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb) -{ - gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, false); -} - -void gen_intermediate_code_pc(CPUM68KState *env, TranslationBlock *tb) -{ - gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, true); -} - -void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, int pc_pos) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - env->pc = tcg_ctx->gen_opc_pc[pc_pos]; -} diff --git a/qemu/target-m68k/unicorn.c b/qemu/target-m68k/unicorn.c deleted file mode 100644 index f63d742a..00000000 --- a/qemu/target-m68k/unicorn.c +++ /dev/null @@ -1,122 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#include "hw/boards.h" -#include "hw/m68k/m68k.h" -#include "sysemu/cpus.h" -#include "unicorn.h" -#include "cpu.h" -#include "unicorn_common.h" -#include "uc_priv.h" - - -const int M68K_REGS_STORAGE_SIZE = offsetof(CPUM68KState, tlb_table); - -static void m68k_set_pc(struct uc_struct *uc, uint64_t address) -{ - ((CPUM68KState *)uc->current_cpu->env_ptr)->pc = address; -} - -void m68k_release(void* ctx); -void m68k_release(void* ctx) -{ - TCGContext *tcg_ctx; - int i; - - release_common(ctx); - tcg_ctx = (TCGContext *) ctx; - g_free(tcg_ctx->tb_ctx.tbs); - g_free(tcg_ctx->QREG_PC); - g_free(tcg_ctx->QREG_SR); - g_free(tcg_ctx->QREG_CC_OP); - g_free(tcg_ctx->QREG_CC_DEST); - g_free(tcg_ctx->QREG_CC_SRC); - g_free(tcg_ctx->QREG_CC_X); - g_free(tcg_ctx->QREG_DIV1); - g_free(tcg_ctx->QREG_DIV2); - g_free(tcg_ctx->QREG_MACSR); - g_free(tcg_ctx->QREG_MAC_MASK); - for (i = 0; i < 8; i++) { - g_free(tcg_ctx->cpu_dregs[i]); - g_free(tcg_ctx->cpu_aregs[i]); - } - g_free(tcg_ctx->NULL_QREG); - g_free(tcg_ctx->store_dummy); -} - -void m68k_reg_reset(struct uc_struct *uc) -{ - CPUArchState *env = uc->cpu->env_ptr; - - memset(env->aregs, 0, sizeof(env->aregs)); - memset(env->dregs, 0, sizeof(env->dregs)); - - env->pc = 0; -} - -int m68k_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - void *value = vals[i]; - if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) - *(int32_t *)value = M68K_CPU(uc, mycpu)->env.aregs[regid - UC_M68K_REG_A0]; - else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) - *(int32_t *)value = M68K_CPU(uc, mycpu)->env.dregs[regid - UC_M68K_REG_D0]; - else { - switch(regid) { - default: break; - case UC_M68K_REG_PC: - *(int32_t *)value = M68K_CPU(uc, mycpu)->env.pc; - break; - } - } - } - - return 0; -} - -int m68k_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - const void *value = vals[i]; - if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) - M68K_CPU(uc, mycpu)->env.aregs[regid - UC_M68K_REG_A0] = *(uint32_t *)value; - else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) - M68K_CPU(uc, mycpu)->env.dregs[regid - UC_M68K_REG_D0] = *(uint32_t *)value; - else { - switch(regid) { - default: break; - case UC_M68K_REG_PC: - M68K_CPU(uc, mycpu)->env.pc = *(uint32_t *)value; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - } - } - } - - return 0; -} - -DEFAULT_VISIBILITY -void m68k_uc_init(struct uc_struct* uc) -{ - register_accel_types(uc); - m68k_cpu_register_types(uc); - dummy_m68k_machine_init(uc); - uc->release = m68k_release; - uc->reg_read = m68k_reg_read; - uc->reg_write = m68k_reg_write; - uc->reg_reset = m68k_reg_reset; - uc->set_pc = m68k_set_pc; - uc_common_init(uc); -} diff --git a/qemu/target-mips/Makefile.objs b/qemu/target-mips/Makefile.objs deleted file mode 100644 index e43e509e..00000000 --- a/qemu/target-mips/Makefile.objs +++ /dev/null @@ -1,3 +0,0 @@ -obj-y += translate.o dsp_helper.o op_helper.o lmi_helper.o helper.o cpu.o -obj-y += msa_helper.o -obj-y += unicorn.o diff --git a/qemu/target-mips/cpu.c b/qemu/target-mips/cpu.c deleted file mode 100644 index 3b0d422f..00000000 --- a/qemu/target-mips/cpu.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * QEMU MIPS CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * - */ - -#include "cpu.h" -#include "qemu-common.h" -#include "hw/mips/mips.h" - - -static void mips_cpu_set_pc(CPUState *cs, vaddr value) -{ - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - - env->active_tc.PC = value & ~(target_ulong)1; - if (value & 1) { - env->hflags |= MIPS_HFLAG_M16; - } else { - env->hflags &= ~(MIPS_HFLAG_M16); - } -} - -static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) -{ - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - - env->active_tc.PC = tb->pc; - env->hflags &= ~MIPS_HFLAG_BMASK; - env->hflags |= tb->flags & MIPS_HFLAG_BMASK; -} - -static bool mips_cpu_has_work(CPUState *cs) -{ - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - bool has_work = false; - - /* It is implementation dependent if non-enabled interrupts - wake-up the CPU, however most of the implementations only - check for interrupts that can be taken. */ - if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && - cpu_mips_hw_interrupts_pending(env)) { - has_work = true; - } - - /* MIPS-MT has the ability to halt the CPU. */ - if (env->CP0_Config3 & (1 << CP0C3_MT)) { - /* The QEMU model will issue an _WAKE request whenever the CPUs - should be woken up. */ - if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { - has_work = true; - } - - if (!mips_vpe_active(env)) { - has_work = false; - } - } - return has_work; -} - -/* CPUClass::reset() */ -static void mips_cpu_reset(CPUState *s) -{ - MIPSCPU *cpu = MIPS_CPU(s->uc, s); - MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(s->uc, cpu); - CPUMIPSState *env = &cpu->env; - - mcc->parent_reset(s); - - memset(env, 0, offsetof(CPUMIPSState, mvp)); - tlb_flush(s, 1); - - cpu_state_reset(env); -} - -static int mips_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(uc, dev); - - cpu_reset(cs); - qemu_init_vcpu(cs); - - mcc->parent_realize(uc, dev, errp); - - return 0; -} - -static void mips_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - CPUState *cs = CPU(obj); - MIPSCPU *cpu = MIPS_CPU(uc, obj); - CPUMIPSState *env = &cpu->env; - - cs->env_ptr = env; - cpu_exec_init(env, opaque); - - if (tcg_enabled(uc)) { - mips_tcg_init(uc); - } -} - -static void mips_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data) -{ - MIPSCPUClass *mcc = MIPS_CPU_CLASS(uc, c); - CPUClass *cc = CPU_CLASS(uc, c); - DeviceClass *dc = DEVICE_CLASS(uc, c); - - mcc->parent_realize = dc->realize; - dc->realize = mips_cpu_realizefn; - - mcc->parent_reset = cc->reset; - cc->reset = mips_cpu_reset; - - cc->has_work = mips_cpu_has_work; - cc->do_interrupt = mips_cpu_do_interrupt; - cc->cpu_exec_interrupt = mips_cpu_exec_interrupt; - cc->set_pc = mips_cpu_set_pc; - cc->synchronize_from_tb = mips_cpu_synchronize_from_tb; -#ifdef CONFIG_USER_ONLY - cc->handle_mmu_fault = mips_cpu_handle_mmu_fault; -#else - cc->do_unassigned_access = mips_cpu_unassigned_access; - cc->do_unaligned_access = mips_cpu_do_unaligned_access; - cc->get_phys_page_debug = mips_cpu_get_phys_page_debug; -#endif -} - -void mips_cpu_register_types(void *opaque) -{ - const TypeInfo mips_cpu_type_info = { - TYPE_MIPS_CPU, - TYPE_CPU, - - sizeof(MIPSCPUClass), - sizeof(MIPSCPU), - opaque, - - mips_cpu_initfn, - NULL, - NULL, - - NULL, - - mips_cpu_class_init, - NULL, - NULL, - - false, - }; - - type_register_static(opaque, &mips_cpu_type_info); -} diff --git a/qemu/target-mips/cpu.h b/qemu/target-mips/cpu.h deleted file mode 100644 index df4ec2b5..00000000 --- a/qemu/target-mips/cpu.h +++ /dev/null @@ -1,901 +0,0 @@ -#if !defined (__MIPS_CPU_H__) -#define __MIPS_CPU_H__ - -//#define DEBUG_OP - -#define ALIGNED_ONLY -#define TARGET_HAS_ICE 1 - -#define ELF_MACHINE EM_MIPS - -#define CPUArchState struct CPUMIPSState - -#include "config.h" -#include "qemu-common.h" -#include "mips-defs.h" -#include "exec/cpu-defs.h" -#include "fpu/softfloat.h" - -struct CPUMIPSState; - -typedef struct r4k_tlb_t r4k_tlb_t; -struct r4k_tlb_t { - target_ulong VPN; - uint32_t PageMask; - uint_fast8_t ASID; - uint_fast16_t G:1; - uint_fast16_t C0:3; - uint_fast16_t C1:3; - uint_fast16_t V0:1; - uint_fast16_t V1:1; - uint_fast16_t D0:1; - uint_fast16_t D1:1; - uint_fast16_t XI0:1; - uint_fast16_t XI1:1; - uint_fast16_t RI0:1; - uint_fast16_t RI1:1; - uint_fast16_t EHINV:1; - target_ulong PFN[2]; -}; - -#if !defined(CONFIG_USER_ONLY) -typedef struct CPUMIPSTLBContext CPUMIPSTLBContext; -struct CPUMIPSTLBContext { - uint32_t nb_tlb; - uint32_t tlb_in_use; - int (*map_address) (struct CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type); - void (*helper_tlbwi)(struct CPUMIPSState *env); - void (*helper_tlbwr)(struct CPUMIPSState *env); - void (*helper_tlbp)(struct CPUMIPSState *env); - void (*helper_tlbr)(struct CPUMIPSState *env); - void (*helper_tlbinv)(struct CPUMIPSState *env); - void (*helper_tlbinvf)(struct CPUMIPSState *env); - union { - struct { - r4k_tlb_t tlb[MIPS_TLB_MAX]; - } r4k; - } mmu; -}; -#endif - -/* MSA Context */ -#define MSA_WRLEN (128) - -enum CPUMIPSMSADataFormat { - DF_BYTE = 0, - DF_HALF, - DF_WORD, - DF_DOUBLE -}; - -typedef union wr_t wr_t; -union wr_t { - int8_t b[MSA_WRLEN/8]; - int16_t h[MSA_WRLEN/16]; - int32_t w[MSA_WRLEN/32]; - int64_t d[MSA_WRLEN/64]; -}; - -typedef union fpr_t fpr_t; -union fpr_t { - float64 fd; /* ieee double precision */ - float32 fs[2];/* ieee single precision */ - uint64_t d; /* binary double fixed-point */ - uint32_t w[2]; /* binary single fixed-point */ -/* FPU/MSA register mapping is not tested on big-endian hosts. */ - wr_t wr; /* vector data */ -}; -/* define FP_ENDIAN_IDX to access the same location - * in the fpr_t union regardless of the host endianness - */ -#if defined(HOST_WORDS_BIGENDIAN) -# define FP_ENDIAN_IDX 1 -#else -# define FP_ENDIAN_IDX 0 -#endif - -typedef struct CPUMIPSFPUContext CPUMIPSFPUContext; -struct CPUMIPSFPUContext { - /* Floating point registers */ - fpr_t fpr[32]; - float_status fp_status; - /* fpu implementation/revision register (fir) */ - uint32_t fcr0; -#define FCR0_UFRP 28 -#define FCR0_F64 22 -#define FCR0_L 21 -#define FCR0_W 20 -#define FCR0_3D 19 -#define FCR0_PS 18 -#define FCR0_D 17 -#define FCR0_S 16 -#define FCR0_PRID 8 -#define FCR0_REV 0 - /* fcsr */ - uint32_t fcr31; -#define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? ((int)(1U << (((num) + 24) & 0x1f))) : (1 << 23)); } while(0) -#define CLEAR_FP_COND(num,env) do { ((env).fcr31) &= ~((num) ? ((int)(1U << (((num) + 24) & 0x1f))) : (1 << 23)); } while(0) -#define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | (((env).fcr31 >> 23) & 0x1)) -#define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f) -#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f) -#define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f) -#define SET_FP_CAUSE(reg,v) do { (reg) = ((reg) & ~(0x3f << 12)) | ((v & 0x3f) << 12); } while(0) -#define SET_FP_ENABLE(reg,v) do { (reg) = ((reg) & ~(0x1f << 7)) | ((v & 0x1f) << 7); } while(0) -#define SET_FP_FLAGS(reg,v) do { (reg) = ((reg) & ~(0x1f << 2)) | ((v & 0x1f) << 2); } while(0) -#define UPDATE_FP_FLAGS(reg,v) do { (reg) |= ((v & 0x1f) << 2); } while(0) -#define FP_INEXACT 1 -#define FP_UNDERFLOW 2 -#define FP_OVERFLOW 4 -#define FP_DIV0 8 -#define FP_INVALID 16 -#define FP_UNIMPLEMENTED 32 -}; - -#define NB_MMU_MODES 3 - -typedef struct CPUMIPSMVPContext CPUMIPSMVPContext; -struct CPUMIPSMVPContext { - int32_t CP0_MVPControl; -#define CP0MVPCo_CPA 3 -#define CP0MVPCo_STLB 2 -#define CP0MVPCo_VPC 1 -#define CP0MVPCo_EVP 0 - int32_t CP0_MVPConf0; -#define CP0MVPC0_M 31 -#define CP0MVPC0_TLBS 29 -#define CP0MVPC0_GS 28 -#define CP0MVPC0_PCP 27 -#define CP0MVPC0_PTLBE 16 -#define CP0MVPC0_TCA 15 -#define CP0MVPC0_PVPE 10 -#define CP0MVPC0_PTC 0 - int32_t CP0_MVPConf1; -#define CP0MVPC1_CIM 31 -#define CP0MVPC1_CIF 30 -#define CP0MVPC1_PCX 20 -#define CP0MVPC1_PCP2 10 -#define CP0MVPC1_PCP1 0 -}; - -typedef struct mips_def_t mips_def_t; - -#define MIPS_SHADOW_SET_MAX 16 -#define MIPS_TC_MAX 5 -#define MIPS_FPU_MAX 1 -#define MIPS_DSP_ACC 4 -#define MIPS_KSCRATCH_NUM 6 - -typedef struct TCState TCState; -struct TCState { - target_ulong gpr[32]; - target_ulong PC; - target_ulong HI[MIPS_DSP_ACC]; - target_ulong LO[MIPS_DSP_ACC]; - target_ulong ACX[MIPS_DSP_ACC]; - target_ulong DSPControl; - int32_t CP0_TCStatus; -#define CP0TCSt_TCU3 31 -#define CP0TCSt_TCU2 30 -#define CP0TCSt_TCU1 29 -#define CP0TCSt_TCU0 28 -#define CP0TCSt_TMX 27 -#define CP0TCSt_RNST 23 -#define CP0TCSt_TDS 21 -#define CP0TCSt_DT 20 -#define CP0TCSt_DA 15 -#define CP0TCSt_A 13 -#define CP0TCSt_TKSU 11 -#define CP0TCSt_IXMT 10 -#define CP0TCSt_TASID 0 - int32_t CP0_TCBind; -#define CP0TCBd_CurTC 21 -#define CP0TCBd_TBE 17 -#define CP0TCBd_CurVPE 0 - target_ulong CP0_TCHalt; - target_ulong CP0_TCContext; - target_ulong CP0_TCSchedule; - target_ulong CP0_TCScheFBack; - int32_t CP0_Debug_tcstatus; - target_ulong CP0_UserLocal; - - int32_t msacsr; - -#define MSACSR_FS 24 -#define MSACSR_FS_MASK (1 << MSACSR_FS) -#define MSACSR_NX 18 -#define MSACSR_NX_MASK (1 << MSACSR_NX) -#define MSACSR_CEF 2 -#define MSACSR_CEF_MASK (0xffff << MSACSR_CEF) -#define MSACSR_RM 0 -#define MSACSR_RM_MASK (0x3 << MSACSR_RM) -#define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \ - MSACSR_FS_MASK) - - float_status msa_fp_status; -}; - -typedef struct CPUMIPSState CPUMIPSState; -struct CPUMIPSState { - TCState active_tc; - CPUMIPSFPUContext active_fpu; - - uint32_t current_tc; - uint32_t current_fpu; - - uint32_t SEGBITS; - uint32_t PABITS; - target_ulong SEGMask; - target_ulong PAMask; - - int32_t msair; -#define MSAIR_ProcID 8 -#define MSAIR_Rev 0 - - int32_t CP0_Index; - /* CP0_MVP* are per MVP registers. */ - int32_t CP0_Random; - int32_t CP0_VPEControl; -#define CP0VPECo_YSI 21 -#define CP0VPECo_GSI 20 -#define CP0VPECo_EXCPT 16 -#define CP0VPECo_TE 15 -#define CP0VPECo_TargTC 0 - int32_t CP0_VPEConf0; -#define CP0VPEC0_M 31 -#define CP0VPEC0_XTC 21 -#define CP0VPEC0_TCS 19 -#define CP0VPEC0_SCS 18 -#define CP0VPEC0_DSC 17 -#define CP0VPEC0_ICS 16 -#define CP0VPEC0_MVP 1 -#define CP0VPEC0_VPA 0 - int32_t CP0_VPEConf1; -#define CP0VPEC1_NCX 20 -#define CP0VPEC1_NCP2 10 -#define CP0VPEC1_NCP1 0 - target_ulong CP0_YQMask; - target_ulong CP0_VPESchedule; - target_ulong CP0_VPEScheFBack; - int32_t CP0_VPEOpt; -#define CP0VPEOpt_IWX7 15 -#define CP0VPEOpt_IWX6 14 -#define CP0VPEOpt_IWX5 13 -#define CP0VPEOpt_IWX4 12 -#define CP0VPEOpt_IWX3 11 -#define CP0VPEOpt_IWX2 10 -#define CP0VPEOpt_IWX1 9 -#define CP0VPEOpt_IWX0 8 -#define CP0VPEOpt_DWX7 7 -#define CP0VPEOpt_DWX6 6 -#define CP0VPEOpt_DWX5 5 -#define CP0VPEOpt_DWX4 4 -#define CP0VPEOpt_DWX3 3 -#define CP0VPEOpt_DWX2 2 -#define CP0VPEOpt_DWX1 1 -#define CP0VPEOpt_DWX0 0 - target_ulong CP0_EntryLo0; - target_ulong CP0_EntryLo1; -#if defined(TARGET_MIPS64) -# define CP0EnLo_RI 63 -# define CP0EnLo_XI 62 -#else -# define CP0EnLo_RI 31 -# define CP0EnLo_XI 30 -#endif - target_ulong CP0_Context; - target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM]; - int32_t CP0_PageMask; - int32_t CP0_PageGrain_rw_bitmask; - int32_t CP0_PageGrain; -#define CP0PG_RIE 31 -#define CP0PG_XIE 30 -#define CP0PG_IEC 27 - int32_t CP0_Wired; - int32_t CP0_SRSConf0_rw_bitmask; - int32_t CP0_SRSConf0; -#define CP0SRSC0_M 31 -#define CP0SRSC0_SRS3 20 -#define CP0SRSC0_SRS2 10 -#define CP0SRSC0_SRS1 0 - int32_t CP0_SRSConf1_rw_bitmask; - int32_t CP0_SRSConf1; -#define CP0SRSC1_M 31 -#define CP0SRSC1_SRS6 20 -#define CP0SRSC1_SRS5 10 -#define CP0SRSC1_SRS4 0 - int32_t CP0_SRSConf2_rw_bitmask; - int32_t CP0_SRSConf2; -#define CP0SRSC2_M 31 -#define CP0SRSC2_SRS9 20 -#define CP0SRSC2_SRS8 10 -#define CP0SRSC2_SRS7 0 - int32_t CP0_SRSConf3_rw_bitmask; - int32_t CP0_SRSConf3; -#define CP0SRSC3_M 31 -#define CP0SRSC3_SRS12 20 -#define CP0SRSC3_SRS11 10 -#define CP0SRSC3_SRS10 0 - int32_t CP0_SRSConf4_rw_bitmask; - int32_t CP0_SRSConf4; -#define CP0SRSC4_SRS15 20 -#define CP0SRSC4_SRS14 10 -#define CP0SRSC4_SRS13 0 - int32_t CP0_HWREna; - target_ulong CP0_BadVAddr; - uint32_t CP0_BadInstr; - uint32_t CP0_BadInstrP; - int32_t CP0_Count; - target_ulong CP0_EntryHi; -#define CP0EnHi_EHINV 10 - int32_t CP0_Compare; - int32_t CP0_Status; -#define CP0St_CU3 31 -#define CP0St_CU2 30 -#define CP0St_CU1 29 -#define CP0St_CU0 28 -#define CP0St_RP 27 -#define CP0St_FR 26 -#define CP0St_RE 25 -#define CP0St_MX 24 -#define CP0St_PX 23 -#define CP0St_BEV 22 -#define CP0St_TS 21 -#define CP0St_SR 20 -#define CP0St_NMI 19 -#define CP0St_IM 8 -#define CP0St_KX 7 -#define CP0St_SX 6 -#define CP0St_UX 5 -#define CP0St_KSU 3 -#define CP0St_ERL 2 -#define CP0St_EXL 1 -#define CP0St_IE 0 - int32_t CP0_IntCtl; -#define CP0IntCtl_IPTI 29 -#define CP0IntCtl_IPPC1 26 -#define CP0IntCtl_VS 5 - int32_t CP0_SRSCtl; -#define CP0SRSCtl_HSS 26 -#define CP0SRSCtl_EICSS 18 -#define CP0SRSCtl_ESS 12 -#define CP0SRSCtl_PSS 6 -#define CP0SRSCtl_CSS 0 - int32_t CP0_SRSMap; -#define CP0SRSMap_SSV7 28 -#define CP0SRSMap_SSV6 24 -#define CP0SRSMap_SSV5 20 -#define CP0SRSMap_SSV4 16 -#define CP0SRSMap_SSV3 12 -#define CP0SRSMap_SSV2 8 -#define CP0SRSMap_SSV1 4 -#define CP0SRSMap_SSV0 0 - int32_t CP0_Cause; -#define CP0Ca_BD 31 -#define CP0Ca_TI 30 -#define CP0Ca_CE 28 -#define CP0Ca_DC 27 -#define CP0Ca_PCI 26 -#define CP0Ca_IV 23 -#define CP0Ca_WP 22 -#define CP0Ca_IP 8 -#define CP0Ca_IP_mask 0x0000FF00 -#define CP0Ca_EC 2 - target_ulong CP0_EPC; - int32_t CP0_PRid; - int32_t CP0_EBase; - int32_t CP0_Config0; -#define CP0C0_M 31 -#define CP0C0_K23 28 -#define CP0C0_KU 25 -#define CP0C0_MDU 20 -#define CP0C0_MM 17 -#define CP0C0_BM 16 -#define CP0C0_BE 15 -#define CP0C0_AT 13 -#define CP0C0_AR 10 -#define CP0C0_MT 7 -#define CP0C0_VI 3 -#define CP0C0_K0 0 - int32_t CP0_Config1; -#define CP0C1_M 31 -#define CP0C1_MMU 25 -#define CP0C1_IS 22 -#define CP0C1_IL 19 -#define CP0C1_IA 16 -#define CP0C1_DS 13 -#define CP0C1_DL 10 -#define CP0C1_DA 7 -#define CP0C1_C2 6 -#define CP0C1_MD 5 -#define CP0C1_PC 4 -#define CP0C1_WR 3 -#define CP0C1_CA 2 -#define CP0C1_EP 1 -#define CP0C1_FP 0 - int32_t CP0_Config2; -#define CP0C2_M 31 -#define CP0C2_TU 28 -#define CP0C2_TS 24 -#define CP0C2_TL 20 -#define CP0C2_TA 16 -#define CP0C2_SU 12 -#define CP0C2_SS 8 -#define CP0C2_SL 4 -#define CP0C2_SA 0 - int32_t CP0_Config3; -#define CP0C3_M 31 -#define CP0C3_BPG 30 -#define CP0C3_CMCGR 29 -#define CP0C3_MSAP 28 -#define CP0C3_BP 27 -#define CP0C3_BI 26 -#define CP0C3_IPLW 21 -#define CP0C3_MMAR 18 -#define CP0C3_MCU 17 -#define CP0C3_ISA_ON_EXC 16 -#define CP0C3_ISA 14 -#define CP0C3_ULRI 13 -#define CP0C3_RXI 12 -#define CP0C3_DSP2P 11 -#define CP0C3_DSPP 10 -#define CP0C3_LPA 7 -#define CP0C3_VEIC 6 -#define CP0C3_VInt 5 -#define CP0C3_SP 4 -#define CP0C3_CDMM 3 -#define CP0C3_MT 2 -#define CP0C3_SM 1 -#define CP0C3_TL 0 - uint32_t CP0_Config4; - uint32_t CP0_Config4_rw_bitmask; -#define CP0C4_M 31 -#define CP0C4_IE 29 -#define CP0C4_KScrExist 16 -#define CP0C4_MMUExtDef 14 -#define CP0C4_FTLBPageSize 8 -#define CP0C4_FTLBWays 4 -#define CP0C4_FTLBSets 0 -#define CP0C4_MMUSizeExt 0 - uint32_t CP0_Config5; - uint32_t CP0_Config5_rw_bitmask; -#define CP0C5_M 31 -#define CP0C5_K 30 -#define CP0C5_CV 29 -#define CP0C5_EVA 28 -#define CP0C5_MSAEn 27 -#define CP0C5_SBRI 6 -#define CP0C5_UFR 2 -#define CP0C5_NFExists 0 - int32_t CP0_Config6; - int32_t CP0_Config7; - /* XXX: Maybe make LLAddr per-TC? */ - target_ulong lladdr; - target_ulong llval; - target_ulong llnewval; - target_ulong llreg; - target_ulong CP0_LLAddr_rw_bitmask; - int CP0_LLAddr_shift; - target_ulong CP0_WatchLo[8]; - int32_t CP0_WatchHi[8]; - target_ulong CP0_XContext; - int32_t CP0_Framemask; - int32_t CP0_Debug; -#define CP0DB_DBD 31 -#define CP0DB_DM 30 -#define CP0DB_LSNM 28 -#define CP0DB_Doze 27 -#define CP0DB_Halt 26 -#define CP0DB_CNT 25 -#define CP0DB_IBEP 24 -#define CP0DB_DBEP 21 -#define CP0DB_IEXI 20 -#define CP0DB_VER 15 -#define CP0DB_DEC 10 -#define CP0DB_SSt 8 -#define CP0DB_DINT 5 -#define CP0DB_DIB 4 -#define CP0DB_DDBS 3 -#define CP0DB_DDBL 2 -#define CP0DB_DBp 1 -#define CP0DB_DSS 0 - target_ulong CP0_DEPC; - int32_t CP0_Performance0; - int32_t CP0_TagLo; - int32_t CP0_DataLo; - int32_t CP0_TagHi; - int32_t CP0_DataHi; - target_ulong CP0_ErrorEPC; - int32_t CP0_DESAVE; - /* We waste some space so we can handle shadow registers like TCs. */ - TCState tcs[MIPS_SHADOW_SET_MAX]; - CPUMIPSFPUContext fpus[MIPS_FPU_MAX]; - /* QEMU */ - int error_code; -#define EXCP_TLB_NOMATCH 0x1 -#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */ - uint32_t hflags; /* CPU State */ - /* TMASK defines different execution modes */ -#define MIPS_HFLAG_TMASK 0x15807FF -#define MIPS_HFLAG_MODE 0x00007 /* execution modes */ - /* The KSU flags must be the lowest bits in hflags. The flag order - must be the same as defined for CP0 Status. This allows to use - the bits as the value of mmu_idx. */ -#define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */ -#define MIPS_HFLAG_UM 0x00002 /* user mode flag */ -#define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */ -#define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */ -#define MIPS_HFLAG_DM 0x00004 /* Debug mode */ -#define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */ -#define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */ -#define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */ -#define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */ - /* True if the MIPS IV COP1X instructions can be used. This also - controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S - and RSQRT.D. */ -#define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */ -#define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */ -#define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */ -#define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */ -#define MIPS_HFLAG_M16_SHIFT 10 - /* If translation is interrupted between the branch instruction and - * the delay slot, record what type of branch it is so that we can - * resume translation properly. It might be possible to reduce - * this from three bits to two. */ -#define MIPS_HFLAG_BMASK_BASE 0x803800 -#define MIPS_HFLAG_B 0x00800 /* Unconditional branch */ -#define MIPS_HFLAG_BC 0x01000 /* Conditional branch */ -#define MIPS_HFLAG_BL 0x01800 /* Likely branch */ -#define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */ - /* Extra flags about the current pending branch. */ -#define MIPS_HFLAG_BMASK_EXT 0x7C000 -#define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */ -#define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */ -#define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */ -#define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */ -#define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */ -#define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT) - /* MIPS DSP resources access. */ -#define MIPS_HFLAG_DSP 0x080000 /* Enable access to MIPS DSP resources. */ -#define MIPS_HFLAG_DSPR2 0x100000 /* Enable access to MIPS DSPR2 resources. */ - /* Extra flag about HWREna register. */ -#define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */ -#define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */ -#define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */ -#define MIPS_HFLAG_MSA 0x1000000 - target_ulong btarget; /* Jump / branch target */ - target_ulong bcond; /* Branch condition (if needed) */ - - int SYNCI_Step; /* Address step size for SYNCI */ - int CCRes; /* Cycle count resolution/divisor */ - uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */ - uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */ - int insn_flags; /* Supported instruction set */ - - CPU_COMMON - - /* Fields from here on are preserved across CPU reset. */ - CPUMIPSMVPContext *mvp; -#if !defined(CONFIG_USER_ONLY) - CPUMIPSTLBContext *tlb; -#endif - - const mips_def_t *cpu_model; - //void *irq[8]; - //QEMUTimer *timer; /* Internal timer */ - - // Unicorn engine - struct uc_struct *uc; -}; - -#include "cpu-qom.h" - -#if !defined(CONFIG_USER_ONLY) -int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, int rw, int access_type); -int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, int rw, int access_type); -int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, int rw, int access_type); -void r4k_helper_tlbwi(CPUMIPSState *env); -void r4k_helper_tlbwr(CPUMIPSState *env); -void r4k_helper_tlbp(CPUMIPSState *env); -void r4k_helper_tlbr(CPUMIPSState *env); -void r4k_helper_tlbinv(CPUMIPSState *env); -void r4k_helper_tlbinvf(CPUMIPSState *env); - -void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr, - bool is_write, bool is_exec, int unused, - unsigned size); -#endif - -void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf); - -#define cpu_exec cpu_mips_exec -#define cpu_gen_code cpu_mips_gen_code -#define cpu_signal_handler cpu_mips_signal_handler -#define cpu_list mips_cpu_list - -extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); -extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env); - -#define CPU_SAVE_VERSION 5 - -/* MMU modes definitions. We carefully match the indices with our - hflags layout. */ -#define MMU_MODE0_SUFFIX _kernel -#define MMU_MODE1_SUFFIX _super -#define MMU_MODE2_SUFFIX _user -#define MMU_USER_IDX 2 -static inline int cpu_mmu_index (CPUMIPSState *env) -{ - return env->hflags & MIPS_HFLAG_KSU; -} - -static inline int cpu_mips_hw_interrupts_pending(CPUMIPSState *env) -{ - int32_t pending; - int32_t status; - int r; - - if (!(env->CP0_Status & (1 << CP0St_IE)) || - (env->CP0_Status & (1 << CP0St_EXL)) || - (env->CP0_Status & (1 << CP0St_ERL)) || - /* Note that the TCStatus IXMT field is initialized to zero, - and only MT capable cores can set it to one. So we don't - need to check for MT capabilities here. */ - (env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)) || - (env->hflags & MIPS_HFLAG_DM)) { - /* Interrupts are disabled */ - return 0; - } - - pending = env->CP0_Cause & CP0Ca_IP_mask; - status = env->CP0_Status & CP0Ca_IP_mask; - - if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { - /* A MIPS configured with a vectorizing external interrupt controller - will feed a vector into the Cause pending lines. The core treats - the status lines as a vector level, not as indiviual masks. */ - r = pending > status; - } else { - /* A MIPS configured with compatibility or VInt (Vectored Interrupts) - treats the pending lines as individual interrupt lines, the status - lines are individual masks. */ - r = pending & status; - } - return r; -} - -#include "exec/cpu-all.h" - -/* Memory access type : - * may be needed for precise access rights control and precise exceptions. - */ -enum { - /* 1 bit to define user level / supervisor access */ - ACCESS_USER = 0x00, - ACCESS_SUPER = 0x01, - /* 1 bit to indicate direction */ - ACCESS_STORE = 0x02, - /* Type of instruction that generated the access */ - ACCESS_CODE = 0x10, /* Code fetch access */ - ACCESS_INT = 0x20, /* Integer load/store access */ - ACCESS_FLOAT = 0x30, /* floating point load/store access */ -}; - -/* Exceptions */ -enum { - EXCP_NONE = -1, - EXCP_RESET = 0, - EXCP_SRESET, - EXCP_DSS, - EXCP_DINT, - EXCP_DDBL, - EXCP_DDBS, - EXCP_NMI, - EXCP_MCHECK, - EXCP_EXT_INTERRUPT, /* 8 */ - EXCP_DFWATCH, - EXCP_DIB, - EXCP_IWATCH, - EXCP_AdEL, - EXCP_AdES, - EXCP_TLBF, - EXCP_IBE, - EXCP_DBp, /* 16 */ - EXCP_SYSCALL, - EXCP_BREAK, - EXCP_CpU, - EXCP_RI, - EXCP_OVERFLOW, - EXCP_TRAP, - EXCP_FPE, - EXCP_DWATCH, /* 24 */ - EXCP_LTLBL, - EXCP_TLBL, - EXCP_TLBS, - EXCP_DBE, - EXCP_THREAD, - EXCP_MDMX, - EXCP_C2E, - EXCP_CACHE, /* 32 */ - EXCP_DSPDIS, - EXCP_MSADIS, - EXCP_MSAFPE, - EXCP_TLBXI, - EXCP_TLBRI, - - EXCP_LAST = EXCP_TLBRI, -}; -/* Dummy exception for conditional stores. */ -#define EXCP_SC 0x100 - -/* - * This is an interrnally generated WAKE request line. - * It is driven by the CPU itself. Raised when the MT - * block wants to wake a VPE from an inactive state and - * cleared when VPE goes from active to inactive. - */ -#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 - -int cpu_mips_exec(struct uc_struct *uc, CPUMIPSState *s); -void mips_tcg_init(struct uc_struct *uc); -MIPSCPU *cpu_mips_init(struct uc_struct *uc, const char *cpu_model); -int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc); - -/* TODO QOM'ify CPU reset and remove */ -void cpu_state_reset(CPUMIPSState *s); - -/* mips_timer.c */ -uint32_t cpu_mips_get_random (CPUMIPSState *env); -uint32_t cpu_mips_get_count (CPUMIPSState *env); -void cpu_mips_store_count (CPUMIPSState *env, uint32_t value); -void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value); -void cpu_mips_start_count(CPUMIPSState *env); -void cpu_mips_stop_count(CPUMIPSState *env); - -/* mips_int.c */ -void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level); - -/* helper.c */ -int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, - int mmu_idx); -#if !defined(CONFIG_USER_ONLY) -void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); -hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, - int rw); -#endif -target_ulong exception_resume_pc (CPUMIPSState *env); - -/* op_helper.c */ -extern unsigned int ieee_rm[]; -int ieee_ex_to_mips(int xcpt); - -static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc, - target_ulong *cs_base, int *flags) -{ - *pc = env->active_tc.PC; - *cs_base = 0; - *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK | - MIPS_HFLAG_HWRENA_ULR); -} - -static inline int mips_vpe_active(CPUMIPSState *env) -{ - int active = 1; - - /* Check that the VPE is enabled. */ - if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) { - active = 0; - } - /* Check that the VPE is activated. */ - if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) { - active = 0; - } - - /* Now verify that there are active thread contexts in the VPE. - - This assumes the CPU model will internally reschedule threads - if the active one goes to sleep. If there are no threads available - the active one will be in a sleeping state, and we can turn off - the entire VPE. */ - if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) { - /* TC is not activated. */ - active = 0; - } - if (env->active_tc.CP0_TCHalt & 1) { - /* TC is in halt state. */ - active = 0; - } - - return active; -} - -#include "exec/exec-all.h" - -static inline void compute_hflags(CPUMIPSState *env) -{ - env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | - MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | - MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2 | - MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA); - if (!(env->CP0_Status & (1 << CP0St_EXL)) && - !(env->CP0_Status & (1 << CP0St_ERL)) && - !(env->hflags & MIPS_HFLAG_DM)) { - env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU; - } -#if defined(TARGET_MIPS64) - if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) || - (env->CP0_Status & (1 << CP0St_PX)) || - (env->CP0_Status & (1 << CP0St_UX))) { - env->hflags |= MIPS_HFLAG_64; - } - - if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) && - !(env->CP0_Status & (1 << CP0St_UX))) { - env->hflags |= MIPS_HFLAG_AWRAP; - } else if (env->insn_flags & ISA_MIPS32R6) { - /* Address wrapping for Supervisor and Kernel is specified in R6 */ - if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) && - !(env->CP0_Status & (1 << CP0St_SX))) || - (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) && - !(env->CP0_Status & (1 << CP0St_KX)))) { - env->hflags |= MIPS_HFLAG_AWRAP; - } - } -#endif - if (((env->CP0_Status & (1 << CP0St_CU0)) && - !(env->insn_flags & ISA_MIPS32R6)) || - !(env->hflags & MIPS_HFLAG_KSU)) { - env->hflags |= MIPS_HFLAG_CP0; - } - if (env->CP0_Status & (1 << CP0St_CU1)) { - env->hflags |= MIPS_HFLAG_FPU; - } - if (env->CP0_Status & (1 << CP0St_FR)) { - env->hflags |= MIPS_HFLAG_F64; - } - if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) && - (env->CP0_Config5 & (1 << CP0C5_SBRI))) { - env->hflags |= MIPS_HFLAG_SBRI; - } - if (env->insn_flags & ASE_DSPR2) { - /* Enables access MIPS DSP resources, now our cpu is DSP ASER2, - so enable to access DSPR2 resources. */ - if (env->CP0_Status & (1 << CP0St_MX)) { - env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2; - } - - } else if (env->insn_flags & ASE_DSP) { - /* Enables access MIPS DSP resources, now our cpu is DSP ASE, - so enable to access DSP resources. */ - if (env->CP0_Status & (1 << CP0St_MX)) { - env->hflags |= MIPS_HFLAG_DSP; - } - - } - if (env->insn_flags & ISA_MIPS32R2) { - if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { - env->hflags |= MIPS_HFLAG_COP1X; - } - } else if (env->insn_flags & ISA_MIPS32) { - if (env->hflags & MIPS_HFLAG_64) { - env->hflags |= MIPS_HFLAG_COP1X; - } - } else if (env->insn_flags & ISA_MIPS4) { - /* All supported MIPS IV CPUs use the XX (CU3) to enable - and disable the MIPS IV extensions to the MIPS III ISA. - Some other MIPS IV CPUs ignore the bit, so the check here - would be too restrictive for them. */ - if (env->CP0_Status & (1U << CP0St_CU3)) { - env->hflags |= MIPS_HFLAG_COP1X; - } - } - if (env->insn_flags & ASE_MSA) { - if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) { - env->hflags |= MIPS_HFLAG_MSA; - } - } -} - -#endif /* !defined (__MIPS_CPU_H__) */ diff --git a/qemu/target-mips/helper.c b/qemu/target-mips/helper.c deleted file mode 100644 index 749584c9..00000000 --- a/qemu/target-mips/helper.c +++ /dev/null @@ -1,826 +0,0 @@ -/* - * MIPS emulation helpers for qemu. - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include -#include -#include -#include -#include "unicorn/platform.h" -#include - -#include "cpu.h" -#include "exec/cpu_ldst.h" - -enum { - TLBRET_XI = -6, - TLBRET_RI = -5, - TLBRET_DIRTY = -4, - TLBRET_INVALID = -3, - TLBRET_NOMATCH = -2, - TLBRET_BADADDR = -1, - TLBRET_MATCH = 0 -}; - -#if !defined(CONFIG_USER_ONLY) - -/* no MMU emulation */ -int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, int rw, int access_type) -{ - *physical = address; - *prot = PAGE_READ | PAGE_WRITE; - return TLBRET_MATCH; -} - -/* fixed mapping MMU emulation */ -int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, int rw, int access_type) -{ - if (address <= (int32_t)0x7FFFFFFFUL) { - if (!(env->CP0_Status & (1 << CP0St_ERL))) - *physical = address + 0x40000000UL; - else - *physical = address; - } else if (address <= (int32_t)0xBFFFFFFFUL) - *physical = address & 0x1FFFFFFF; - else - *physical = address; - - *prot = PAGE_READ | PAGE_WRITE; - return TLBRET_MATCH; -} - -/* MIPS32/MIPS64 R4000-style MMU emulation */ -int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, - target_ulong address, int rw, int access_type) -{ - uint8_t ASID = env->CP0_EntryHi & 0xFF; - int i; - - for (i = 0; i < env->tlb->tlb_in_use; i++) { - r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i]; - /* 1k pages are not supported. */ - target_ulong mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); - target_ulong tag = address & ~mask; - target_ulong VPN = tlb->VPN & ~mask; -#if defined(TARGET_MIPS64) - tag &= env->SEGMask; -#endif - - /* Check ASID, virtual page number & size */ - if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) { - /* TLB match */ - int n = !!(address & mask & ~(mask >> 1)); - /* Check access rights */ - if (!(n ? tlb->V1 : tlb->V0)) { - return TLBRET_INVALID; - } - if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) { - return TLBRET_XI; - } - if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) { - return TLBRET_RI; - } - if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) { - *physical = tlb->PFN[n] | (address & (mask >> 1)); - *prot = PAGE_READ; - if (n ? tlb->D1 : tlb->D0) - *prot |= PAGE_WRITE; - return TLBRET_MATCH; - } - return TLBRET_DIRTY; - } - } - return TLBRET_NOMATCH; -} - -static int get_physical_address (CPUMIPSState *env, hwaddr *physical, - int *prot, target_ulong real_address, - int rw, int access_type) -{ - /* User mode can only access useg/xuseg */ - int user_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM; - int supervisor_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_SM; - int kernel_mode = !user_mode && !supervisor_mode; -#if defined(TARGET_MIPS64) - int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; - int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; - int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; -#endif - int ret = TLBRET_MATCH; - /* effective address (modified for KVM T&E kernel segments) */ - target_ulong address = real_address; - -#if 0 - qemu_log("user mode %d h %08x\n", user_mode, env->hflags); -#endif - -#define USEG_LIMIT 0x7FFFFFFFUL -#define KSEG0_BASE 0x80000000UL -#define KSEG1_BASE 0xA0000000UL -#define KSEG2_BASE 0xC0000000UL -#define KSEG3_BASE 0xE0000000UL - -#define KVM_KSEG0_BASE 0x40000000UL -#define KVM_KSEG2_BASE 0x60000000UL - - if (address <= USEG_LIMIT) { - /* useg */ - if (env->CP0_Status & (1 << CP0St_ERL)) { - *physical = address & 0xFFFFFFFF; - *prot = PAGE_READ | PAGE_WRITE; - } else { - ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); - } -#if defined(TARGET_MIPS64) - } else if (address < 0x4000000000000000ULL) { - /* xuseg */ - if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { - ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); - } else { - ret = TLBRET_BADADDR; - } - } else if (address < 0x8000000000000000ULL) { - /* xsseg */ - if ((supervisor_mode || kernel_mode) && - SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { - ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); - } else { - ret = TLBRET_BADADDR; - } - } else if (address < 0xC000000000000000ULL) { - /* xkphys */ - if (kernel_mode && KX && - (address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { - *physical = address & env->PAMask; - *prot = PAGE_READ | PAGE_WRITE; - } else { - ret = TLBRET_BADADDR; - } - } else if (address < 0xFFFFFFFF80000000ULL) { - /* xkseg */ - if (kernel_mode && KX && - address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { - ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); - } else { - ret = TLBRET_BADADDR; - } -#endif - } else if (address < (int32_t)KSEG1_BASE) { - /* kseg0 */ - if (kernel_mode) { - *physical = address - (int32_t)KSEG0_BASE; - *prot = PAGE_READ | PAGE_WRITE; - } else { - ret = TLBRET_BADADDR; - } - } else if (address < (int32_t)KSEG2_BASE) { - /* kseg1 */ - if (kernel_mode) { - *physical = address - (int32_t)KSEG1_BASE; - *prot = PAGE_READ | PAGE_WRITE; - } else { - ret = TLBRET_BADADDR; - } - } else if (address < (int32_t)KSEG3_BASE) { - /* sseg (kseg2) */ - if (supervisor_mode || kernel_mode) { - ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); - } else { - ret = TLBRET_BADADDR; - } - } else { - /* kseg3 */ - /* XXX: debug segment is not emulated */ - if (kernel_mode) { - ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); - } else { - ret = TLBRET_BADADDR; - } - } -#if 0 - qemu_log(TARGET_FMT_lx " %d %d => %" HWADDR_PRIx " %d (%d)\n", - address, rw, access_type, *physical, *prot, ret); -#endif - - return ret; -} -#endif - -static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, - int rw, int tlb_error) -{ - CPUState *cs = CPU(mips_env_get_cpu(env)); - int exception = 0, error_code = 0; - - if (rw == MMU_INST_FETCH) { - error_code |= EXCP_INST_NOTAVAIL; - } - - switch (tlb_error) { - default: - case TLBRET_BADADDR: - /* Reference to kernel address from user mode or supervisor mode */ - /* Reference to supervisor address from user mode */ - if (rw == MMU_DATA_STORE) { - exception = EXCP_AdES; - } else { - exception = EXCP_AdEL; - } - break; - case TLBRET_NOMATCH: - /* No TLB match for a mapped address */ - if (rw == MMU_DATA_STORE) { - exception = EXCP_TLBS; - } else { - exception = EXCP_TLBL; - } - error_code |= EXCP_TLB_NOMATCH; - break; - case TLBRET_INVALID: - /* TLB match with no valid bit */ - if (rw == MMU_DATA_STORE) { - exception = EXCP_TLBS; - } else { - exception = EXCP_TLBL; - } - break; - case TLBRET_DIRTY: - /* TLB match but 'D' bit is cleared */ - exception = EXCP_LTLBL; - break; - case TLBRET_XI: - /* Execute-Inhibit Exception */ - if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { - exception = EXCP_TLBXI; - } else { - exception = EXCP_TLBL; - } - break; - case TLBRET_RI: - /* Read-Inhibit Exception */ - if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { - exception = EXCP_TLBRI; - } else { - exception = EXCP_TLBL; - } - break; - } - /* Raise exception */ - env->CP0_BadVAddr = address; - env->CP0_Context = (env->CP0_Context & ~0x007fffff) | - ((address >> 9) & 0x007ffff0); - env->CP0_EntryHi = - (env->CP0_EntryHi & 0xFF) | (address & (((unsigned int)TARGET_PAGE_MASK) << 1)); -#if defined(TARGET_MIPS64) - env->CP0_EntryHi &= env->SEGMask; - env->CP0_XContext = (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | - ((address & 0xC00000000000ULL) >> (55 - env->SEGBITS)) | - ((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9); -#endif - cs->exception_index = exception; - env->error_code = error_code; -} - -#if !defined(CONFIG_USER_ONLY) -hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - hwaddr phys_addr; - int prot; - - if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, - ACCESS_INT) != 0) { - return -1; - } - return phys_addr; -} -#endif - -int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, - int mmu_idx) -{ - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; -#if !defined(CONFIG_USER_ONLY) - hwaddr physical; - int prot; - int access_type; -#endif - int ret = 0; - -#if 0 - log_cpu_state(cs, 0); -#endif - qemu_log("%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n", - __func__, env->active_tc.PC, address, rw, mmu_idx); - - /* data access */ -#if !defined(CONFIG_USER_ONLY) - /* XXX: put correct access by using cpu_restore_state() - correctly */ - access_type = ACCESS_INT; - ret = get_physical_address(env, &physical, &prot, - address, rw, access_type); - qemu_log("%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx - " prot %d\n", - __func__, address, ret, physical, prot); - if (ret == TLBRET_MATCH) { - if (mmu_idx < 0 || mmu_idx >= NB_MMU_MODES) { - raise_mmu_exception(env, address, rw, ret); - ret = 1; - } else { - tlb_set_page(cs, address & TARGET_PAGE_MASK, - physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, - mmu_idx, TARGET_PAGE_SIZE); - ret = 0; - } - } else if (ret < 0) -#endif - { - raise_mmu_exception(env, address, rw, ret); - ret = 1; - } - - return ret; -} - -#if !defined(CONFIG_USER_ONLY) -hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw) -{ - hwaddr physical; - int prot; - int access_type; - int ret = 0; - - /* data access */ - access_type = ACCESS_INT; - ret = get_physical_address(env, &physical, &prot, - address, rw, access_type); - if (ret != TLBRET_MATCH) { - raise_mmu_exception(env, address, rw, ret); - return -1LL; - } else { - return physical; - } -} -#endif - -static const char * const excp_names[EXCP_LAST + 1] = { - "reset", - "soft reset", - "debug single step", - "debug interrupt", - "debug data break load", - "debug data break store", - "non-maskable interrupt", - "machine check", - "interrupt", - "deferred watchpoint", - "debug instruction breakpoint", - "instruction fetch watchpoint", - "address error load", - "address error store", - "TLB refill", - "instruction bus error", - "debug breakpoint", - "syscall", - "break", - "coprocessor unusable", - "reserved instruction", - "arithmetic overflow", - "trap", - "floating point", - "data watchpoint", - "TLB modify", - "TLB load", - "TLB store", - "data bus error", - "thread", - "MDMX", - "precise coprocessor 2", - "cache error", - "DSP disabled", - "MSA disabled", - "MSA floating point", - "TLB execute-inhibit", - "TLB read-inhibit", -}; - -target_ulong exception_resume_pc (CPUMIPSState *env) -{ - target_ulong bad_pc; - target_ulong isa_mode; - - isa_mode = !!(env->hflags & MIPS_HFLAG_M16); - bad_pc = env->active_tc.PC | isa_mode; - if (env->hflags & MIPS_HFLAG_BMASK) { - /* If the exception was raised from a delay slot, come back to - the jump. */ - bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); - } - - return bad_pc; -} - -#if !defined(CONFIG_USER_ONLY) -static void set_hflags_for_handler (CPUMIPSState *env) -{ - /* Exception handlers are entered in 32-bit mode. */ - env->hflags &= ~(MIPS_HFLAG_M16); - /* ...except that microMIPS lets you choose. */ - if (env->insn_flags & ASE_MICROMIPS) { - env->hflags |= (!!(env->CP0_Config3 - & (1 << CP0C3_ISA_ON_EXC)) - << MIPS_HFLAG_M16_SHIFT); - } -} - -static inline void set_badinstr_registers(CPUMIPSState *env) -{ - if (env->hflags & MIPS_HFLAG_M16) { - /* TODO: add BadInstr support for microMIPS */ - return; - } - if (env->CP0_Config3 & (1 << CP0C3_BI)) { - env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC); - } - if ((env->CP0_Config3 & (1 << CP0C3_BP)) && - (env->hflags & MIPS_HFLAG_BMASK)) { - env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4); - } -} -#endif - -void mips_cpu_do_interrupt(CPUState *cs) -{ -#if !defined(CONFIG_USER_ONLY) - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - bool update_badinstr = 0; - target_ulong offset; - int cause = -1; - const char *name; - - if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) { - if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { - name = "unknown"; - } else { - name = excp_names[cs->exception_index]; - } - - qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", - __func__, env->active_tc.PC, env->CP0_EPC, name); - } - if (cs->exception_index == EXCP_EXT_INTERRUPT && - (env->hflags & MIPS_HFLAG_DM)) { - cs->exception_index = EXCP_DINT; - } - offset = 0x180; - switch (cs->exception_index) { - case EXCP_DSS: - env->CP0_Debug |= 1 << CP0DB_DSS; - /* Debug single step cannot be raised inside a delay slot and - resume will always occur on the next instruction - (but we assume the pc has always been updated during - code translation). */ - env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16); - goto enter_debug_mode; - case EXCP_DINT: - env->CP0_Debug |= 1 << CP0DB_DINT; - goto set_DEPC; - case EXCP_DIB: - env->CP0_Debug |= 1 << CP0DB_DIB; - goto set_DEPC; - case EXCP_DBp: - env->CP0_Debug |= 1 << CP0DB_DBp; - goto set_DEPC; - case EXCP_DDBS: - env->CP0_Debug |= 1 << CP0DB_DDBS; - goto set_DEPC; - case EXCP_DDBL: - env->CP0_Debug |= 1 << CP0DB_DDBL; - set_DEPC: - env->CP0_DEPC = exception_resume_pc(env); - env->hflags &= ~MIPS_HFLAG_BMASK; - enter_debug_mode: - env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_64 | MIPS_HFLAG_CP0; - env->hflags &= ~(MIPS_HFLAG_KSU); - /* EJTAG probe trap enable is not implemented... */ - if (!(env->CP0_Status & (1 << CP0St_EXL))) - env->CP0_Cause &= ~(1U << CP0Ca_BD); - env->active_tc.PC = (int32_t)0xBFC00480; - set_hflags_for_handler(env); - break; - case EXCP_RESET: - cpu_reset(CPU(cpu)); - break; - case EXCP_SRESET: - env->CP0_Status |= (1 << CP0St_SR); - /* memset CP0_WatchLo which is fixed size array. */ - memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo)); - goto set_error_EPC; - case EXCP_NMI: - env->CP0_Status |= (1 << CP0St_NMI); - set_error_EPC: - env->CP0_ErrorEPC = exception_resume_pc(env); - env->hflags &= ~MIPS_HFLAG_BMASK; - env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); - env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; - env->hflags &= ~(MIPS_HFLAG_KSU); - if (!(env->CP0_Status & (1 << CP0St_EXL))) - env->CP0_Cause &= ~(1U << CP0Ca_BD); - env->active_tc.PC = (int32_t)0xBFC00000; - set_hflags_for_handler(env); - break; - case EXCP_EXT_INTERRUPT: - cause = 0; - if (env->CP0_Cause & (1 << CP0Ca_IV)) - offset = 0x200; - - if (env->CP0_Config3 & ((1 << CP0C3_VInt) | (1 << CP0C3_VEIC))) { - /* Vectored Interrupts. */ - unsigned int spacing; - unsigned int vector; - unsigned int pending = (env->CP0_Cause & CP0Ca_IP_mask) >> 8; - - pending &= env->CP0_Status >> 8; - /* Compute the Vector Spacing. */ - spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & ((1 << 6) - 1); - spacing <<= 5; - - if (env->CP0_Config3 & (1 << CP0C3_VInt)) { - /* For VInt mode, the MIPS computes the vector internally. */ - for (vector = 7; vector > 0; vector--) { - if (pending & (1 << vector)) { - /* Found it. */ - break; - } - } - } else { - /* For VEIC mode, the external interrupt controller feeds the - vector through the CP0Cause IP lines. */ - vector = pending; - } - offset = 0x200 + vector * spacing; - } - goto set_EPC; - case EXCP_LTLBL: - cause = 1; - update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); - goto set_EPC; - case EXCP_TLBL: - cause = 2; - update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); - if ((env->error_code & EXCP_TLB_NOMATCH) && - !(env->CP0_Status & (1 << CP0St_EXL))) { -#if defined(TARGET_MIPS64) - int R = env->CP0_BadVAddr >> 62; - int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; - int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; - int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; - - if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && - (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) - offset = 0x080; - else -#endif - offset = 0x000; - } - goto set_EPC; - case EXCP_TLBS: - cause = 3; - update_badinstr = 1; - if ((env->error_code & EXCP_TLB_NOMATCH) && - !(env->CP0_Status & (1 << CP0St_EXL))) { -#if defined(TARGET_MIPS64) - int R = env->CP0_BadVAddr >> 62; - int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; - int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; - int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; - - if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && - (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) - offset = 0x080; - else -#endif - offset = 0x000; - } - goto set_EPC; - case EXCP_AdEL: - cause = 4; - update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); - goto set_EPC; - case EXCP_AdES: - cause = 5; - update_badinstr = 1; - goto set_EPC; - case EXCP_IBE: - cause = 6; - goto set_EPC; - case EXCP_DBE: - cause = 7; - goto set_EPC; - case EXCP_SYSCALL: - cause = 8; - update_badinstr = 1; - goto set_EPC; - case EXCP_BREAK: - cause = 9; - update_badinstr = 1; - goto set_EPC; - case EXCP_RI: - cause = 10; - update_badinstr = 1; - goto set_EPC; - case EXCP_CpU: - cause = 11; - update_badinstr = 1; - env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | - (env->error_code << CP0Ca_CE); - goto set_EPC; - case EXCP_OVERFLOW: - cause = 12; - update_badinstr = 1; - goto set_EPC; - case EXCP_TRAP: - cause = 13; - update_badinstr = 1; - goto set_EPC; - case EXCP_MSAFPE: - cause = 14; - update_badinstr = 1; - goto set_EPC; - case EXCP_FPE: - cause = 15; - update_badinstr = 1; - goto set_EPC; - case EXCP_C2E: - cause = 18; - goto set_EPC; - case EXCP_TLBRI: - cause = 19; - update_badinstr = 1; - goto set_EPC; - case EXCP_TLBXI: - cause = 20; - goto set_EPC; - case EXCP_MSADIS: - cause = 21; - update_badinstr = 1; - goto set_EPC; - case EXCP_MDMX: - cause = 22; - goto set_EPC; - case EXCP_DWATCH: - cause = 23; - /* XXX: TODO: manage defered watch exceptions */ - goto set_EPC; - case EXCP_MCHECK: - cause = 24; - goto set_EPC; - case EXCP_THREAD: - cause = 25; - goto set_EPC; - case EXCP_DSPDIS: - cause = 26; - goto set_EPC; - case EXCP_CACHE: - cause = 30; - if (env->CP0_Status & (1 << CP0St_BEV)) { - offset = 0x100; - } else { - offset = 0x20000100; - } - set_EPC: - if (!(env->CP0_Status & (1 << CP0St_EXL))) { - env->CP0_EPC = exception_resume_pc(env); - if (update_badinstr) { - set_badinstr_registers(env); - } - if (env->hflags & MIPS_HFLAG_BMASK) { - env->CP0_Cause |= (1U << CP0Ca_BD); - } else { - env->CP0_Cause &= ~(1U << CP0Ca_BD); - } - env->CP0_Status |= (1 << CP0St_EXL); - env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; - env->hflags &= ~(MIPS_HFLAG_KSU); - } - env->hflags &= ~MIPS_HFLAG_BMASK; - if (env->CP0_Status & (1 << CP0St_BEV)) { - env->active_tc.PC = (int32_t)0xBFC00200; - } else { - env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff); - } - env->active_tc.PC += offset; - set_hflags_for_handler(env); - env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); - break; - default: - qemu_log("Invalid MIPS exception %d. Exiting\n", cs->exception_index); - printf("Invalid MIPS exception %d. Exiting\n", cs->exception_index); - exit(1); - } - if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) { - qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" - " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", - __func__, env->active_tc.PC, env->CP0_EPC, cause, - env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, - env->CP0_DEPC); - } -#endif - cs->exception_index = EXCP_NONE; -} - -bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) // qq -{ - if (interrupt_request & CPU_INTERRUPT_HARD) { - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - - if (cpu_mips_hw_interrupts_pending(env)) { - /* Raise it */ - cs->exception_index = EXCP_EXT_INTERRUPT; - env->error_code = 0; - mips_cpu_do_interrupt(cs); - return true; - } - } - return false; -} - -#if !defined(CONFIG_USER_ONLY) -void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra) -{ - MIPSCPU *cpu = mips_env_get_cpu(env); - CPUState *cs; - r4k_tlb_t *tlb; - target_ulong addr; - target_ulong end; - uint8_t ASID = env->CP0_EntryHi & 0xFF; - target_ulong mask; - - tlb = &env->tlb->mmu.r4k.tlb[idx]; - /* The qemu TLB is flushed when the ASID changes, so no need to - flush these entries again. */ - if (tlb->G == 0 && tlb->ASID != ASID) { - return; - } - - if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) { - /* For tlbwr, we can shadow the discarded entry into - a new (fake) TLB entry, as long as the guest can not - tell that it's there. */ - env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb; - env->tlb->tlb_in_use++; - return; - } - - /* 1k pages are not supported. */ - mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); - if (tlb->V0) { - cs = CPU(cpu); - addr = tlb->VPN & ~mask; -#if defined(TARGET_MIPS64) - if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { - addr |= 0x3FFFFF0000000000ULL; - } -#endif - end = addr | (mask >> 1); - while (addr < end) { - tlb_flush_page(cs, addr); - addr += TARGET_PAGE_SIZE; - } - } - if (tlb->V1) { - cs = CPU(cpu); - addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); -#if defined(TARGET_MIPS64) - if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { - addr |= 0x3FFFFF0000000000ULL; - } -#endif - end = addr | mask; - while (addr - 1 < end) { - tlb_flush_page(cs, addr); - addr += TARGET_PAGE_SIZE; - } - } -} -#endif diff --git a/qemu/target-mips/mips-defs.h b/qemu/target-mips/mips-defs.h deleted file mode 100644 index 17842274..00000000 --- a/qemu/target-mips/mips-defs.h +++ /dev/null @@ -1,91 +0,0 @@ -#if !defined (__QEMU_MIPS_DEFS_H__) -#define __QEMU_MIPS_DEFS_H__ - -/* If we want to use host float regs... */ -//#define USE_HOST_FLOAT_REGS - -/* Real pages are variable size... */ -#define TARGET_PAGE_BITS 12 -#define MIPS_TLB_MAX 128 - -#if defined(TARGET_MIPS64) -#define TARGET_LONG_BITS 64 -#define TARGET_PHYS_ADDR_SPACE_BITS 36 -#define TARGET_VIRT_ADDR_SPACE_BITS 42 -#else -#define TARGET_LONG_BITS 32 -#define TARGET_PHYS_ADDR_SPACE_BITS 36 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 -#endif - -/* Masks used to mark instructions to indicate which ISA level they - were introduced in. */ -#define ISA_MIPS1 0x00000001 -#define ISA_MIPS2 0x00000002 -#define ISA_MIPS3 0x00000004 -#define ISA_MIPS4 0x00000008 -#define ISA_MIPS5 0x00000010 -#define ISA_MIPS32 0x00000020 -#define ISA_MIPS32R2 0x00000040 -#define ISA_MIPS64 0x00000080 -#define ISA_MIPS64R2 0x00000100 -#define ISA_MIPS32R3 0x00000200 -#define ISA_MIPS64R3 0x00000400 -#define ISA_MIPS32R5 0x00000800 -#define ISA_MIPS64R5 0x00001000 -#define ISA_MIPS32R6 0x00002000 -#define ISA_MIPS64R6 0x00004000 - -/* MIPS ASEs. */ -#define ASE_MIPS16 0x00010000 -#define ASE_MIPS3D 0x00020000 -#define ASE_MDMX 0x00040000 -#define ASE_DSP 0x00080000 -#define ASE_DSPR2 0x00100000 -#define ASE_MT 0x00200000 -#define ASE_SMARTMIPS 0x00400000 -#define ASE_MICROMIPS 0x00800000 -#define ASE_MSA 0x01000000 - -/* Chip specific instructions. */ -#define INSN_LOONGSON2E 0x20000000 -#define INSN_LOONGSON2F 0x40000000 -#define INSN_VR54XX 0x80000000 - -/* MIPS CPU defines. */ -#define CPU_MIPS1 (ISA_MIPS1) -#define CPU_MIPS2 (CPU_MIPS1 | ISA_MIPS2) -#define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3) -#define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4) -#define CPU_VR54XX (CPU_MIPS4 | INSN_VR54XX) -#define CPU_LOONGSON2E (CPU_MIPS3 | INSN_LOONGSON2E) -#define CPU_LOONGSON2F (CPU_MIPS3 | INSN_LOONGSON2F) - -#define CPU_MIPS5 (CPU_MIPS4 | ISA_MIPS5) - -/* MIPS Technologies "Release 1" */ -#define CPU_MIPS32 (CPU_MIPS2 | ISA_MIPS32) -#define CPU_MIPS64 (CPU_MIPS5 | CPU_MIPS32 | ISA_MIPS64) - -/* MIPS Technologies "Release 2" */ -#define CPU_MIPS32R2 (CPU_MIPS32 | ISA_MIPS32R2) -#define CPU_MIPS64R2 (CPU_MIPS64 | CPU_MIPS32R2 | ISA_MIPS64R2) - -/* MIPS Technologies "Release 3" */ -#define CPU_MIPS32R3 (CPU_MIPS32R2 | ISA_MIPS32R3) -#define CPU_MIPS64R3 (CPU_MIPS64R2 | CPU_MIPS32R3 | ISA_MIPS64R3) - -/* MIPS Technologies "Release 5" */ -#define CPU_MIPS32R5 (CPU_MIPS32R3 | ISA_MIPS32R5) -#define CPU_MIPS64R5 (CPU_MIPS64R3 | CPU_MIPS32R5 | ISA_MIPS64R5) - -/* MIPS Technologies "Release 6" */ -#define CPU_MIPS32R6 (CPU_MIPS32R5 | ISA_MIPS32R6) -#define CPU_MIPS64R6 (CPU_MIPS64R5 | CPU_MIPS32R6 | ISA_MIPS64R6) - -/* Strictly follow the architecture standard: - - Disallow "special" instruction handling for PMON/SPIM. - Note that we still maintain Count/Compare to match the host clock. */ -//#define MIPS_STRICT_STANDARD 1 - -#endif /* !defined (__QEMU_MIPS_DEFS_H__) */ diff --git a/qemu/target-mips/msa_helper.c b/qemu/target-mips/msa_helper.c deleted file mode 100644 index dccadc46..00000000 --- a/qemu/target-mips/msa_helper.c +++ /dev/null @@ -1,3436 +0,0 @@ -/* - * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU. - * - * Copyright (c) 2014 Imagination Technologies - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "exec/helper-proto.h" - -/* Data format min and max values */ -#define DF_BITS(df) ((uint64_t)1 << ((df) + 3)) - -#define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1) -#define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1) - -#define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1))) -#define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1))) - -#define DF_MAX_UINT(df) (uint64_t)((0-1ULL) >> (64 - DF_BITS(df))) -#define M_MAX_UINT(m) (uint64_t)((0-1ULL) >> (64 - (m))) - -#define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df)) -#define SIGNED(x, df) \ - ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df))) - -/* Element-by-element access macros */ -#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) - -static inline void msa_move_v(wr_t *pwd, wr_t *pws) -{ - uint32_t i; - - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - pwd->d[i] = pws->d[i]; - } -} - -#define MSA_FN_IMM8(FUNC, DEST, OPERATION) \ -void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \ - uint32_t i8) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - uint32_t i; \ - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ - DEST = OPERATION; \ - } \ -} - -MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8) -MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8) -MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8)) -MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8) - -#define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \ - UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df) -MSA_FN_IMM8(bmnzi_b, pwd->b[i], - BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) - -#define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \ - UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df) -MSA_FN_IMM8(bmzi_b, pwd->b[i], - BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) - -#define BIT_SELECT(dest, arg1, arg2, df) \ - UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df) -MSA_FN_IMM8(bseli_b, pwd->b[i], - BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE)) - -#undef MSA_FN_IMM8 - -#define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03)) - -void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t imm) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t wx, *pwx = &wx; - uint32_t i; - - switch (df) { - case DF_BYTE: - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - pwx->b[i] = pws->b[SHF_POS(i, imm)]; - } - break; - case DF_HALF: - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - pwx->h[i] = pws->h[SHF_POS(i, imm)]; - } - break; - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - pwx->w[i] = pws->w[SHF_POS(i, imm)]; - } - break; - default: - assert(0); - } - msa_move_v(pwd, pwx); -} - -#define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \ -void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \ - uint32_t wt) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ - uint32_t i; \ - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ - DEST = OPERATION; \ - } \ -} - -MSA_FN_VECTOR(and_v, pwd->d[i], pws->d[i] & pwt->d[i]) -MSA_FN_VECTOR(or_v, pwd->d[i], pws->d[i] | pwt->d[i]) -MSA_FN_VECTOR(nor_v, pwd->d[i], ~(pws->d[i] | pwt->d[i])) -MSA_FN_VECTOR(xor_v, pwd->d[i], pws->d[i] ^ pwt->d[i]) -MSA_FN_VECTOR(bmnz_v, pwd->d[i], - BIT_MOVE_IF_NOT_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE)) -MSA_FN_VECTOR(bmz_v, pwd->d[i], - BIT_MOVE_IF_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE)) -MSA_FN_VECTOR(bsel_v, pwd->d[i], - BIT_SELECT(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE)) -#undef BIT_MOVE_IF_NOT_ZERO -#undef BIT_MOVE_IF_ZERO -#undef BIT_SELECT -#undef MSA_FN_VECTOR - -static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 + arg2; -} - -static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 - arg2; -} - -static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 == arg2 ? -1 : 0; -} - -static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 <= arg2 ? -1 : 0; -} - -static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return u_arg1 <= u_arg2 ? -1 : 0; -} - -static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 < arg2 ? -1 : 0; -} - -static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return u_arg1 < u_arg2 ? -1 : 0; -} - -static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 > arg2 ? arg1 : arg2; -} - -static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return u_arg1 > u_arg2 ? arg1 : arg2; -} - -static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 < arg2 ? arg1 : arg2; -} - -static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return u_arg1 < u_arg2 ? arg1 : arg2; -} - -#define MSA_BINOP_IMM_DF(helper, func) \ -void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ - uint32_t wd, uint32_t ws, int32_t u5) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - uint32_t i; \ - \ - switch (df) { \ - case DF_BYTE: \ - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ - pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ - } \ - break; \ - case DF_HALF: \ - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ - pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ - } \ - break; \ - case DF_WORD: \ - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ - pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ - } \ - break; \ - case DF_DOUBLE: \ - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ - pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ - } \ - break; \ - default: \ - assert(0); \ - } \ -} - -MSA_BINOP_IMM_DF(addvi, addv) -MSA_BINOP_IMM_DF(subvi, subv) -MSA_BINOP_IMM_DF(ceqi, ceq) -MSA_BINOP_IMM_DF(clei_s, cle_s) -MSA_BINOP_IMM_DF(clei_u, cle_u) -MSA_BINOP_IMM_DF(clti_s, clt_s) -MSA_BINOP_IMM_DF(clti_u, clt_u) -MSA_BINOP_IMM_DF(maxi_s, max_s) -MSA_BINOP_IMM_DF(maxi_u, max_u) -MSA_BINOP_IMM_DF(mini_s, min_s) -MSA_BINOP_IMM_DF(mini_u, min_u) -#undef MSA_BINOP_IMM_DF - -void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - int32_t s10) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - uint32_t i; - - switch (df) { - case DF_BYTE: - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - pwd->b[i] = (int8_t)s10; - } - break; - case DF_HALF: - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - pwd->h[i] = (int16_t)s10; - } - break; - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - pwd->w[i] = (int32_t)s10; - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - pwd->d[i] = (int64_t)s10; - } - break; - default: - assert(0); - } -} - -/* Data format bit position and unsigned values */ -#define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df)) - -static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int32_t b_arg2 = BIT_POSITION(arg2, df); - return arg1 << b_arg2; -} - -static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int32_t b_arg2 = BIT_POSITION(arg2, df); - return arg1 >> b_arg2; -} - -static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - int32_t b_arg2 = BIT_POSITION(arg2, df); - return u_arg1 >> b_arg2; -} - -static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int32_t b_arg2 = BIT_POSITION(arg2, df); - return UNSIGNED(arg1 & (~(1LL << b_arg2)), df); -} - -static inline int64_t msa_bset_df(uint32_t df, int64_t arg1, - int64_t arg2) -{ - int32_t b_arg2 = BIT_POSITION(arg2, df); - return UNSIGNED(arg1 | (1LL << b_arg2), df); -} - -static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int32_t b_arg2 = BIT_POSITION(arg2, df); - return UNSIGNED(arg1 ^ (1LL << b_arg2), df); -} - -static inline int64_t msa_binsl_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_dest = UNSIGNED(dest, df); - int32_t sh_d = BIT_POSITION(arg2, df) + 1; - int32_t sh_a = DF_BITS(df) - sh_d; - if (sh_d == DF_BITS(df)) { - return u_arg1; - } else { - return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) | - UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df); - } -} - -static inline int64_t msa_binsr_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_dest = UNSIGNED(dest, df); - int32_t sh_d = BIT_POSITION(arg2, df) + 1; - int32_t sh_a = DF_BITS(df) - sh_d; - if (sh_d == DF_BITS(df)) { - return u_arg1; - } else { - return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) | - UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df); - } -} - -static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m) -{ - return arg < M_MIN_INT(m+1) ? M_MIN_INT(m+1) : - arg > M_MAX_INT(m+1) ? M_MAX_INT(m+1) : - arg; -} - -static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m) -{ - uint64_t u_arg = UNSIGNED(arg, df); - return u_arg < M_MAX_UINT(m+1) ? u_arg : - M_MAX_UINT(m+1); -} - -static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int32_t b_arg2 = BIT_POSITION(arg2, df); - if (b_arg2 == 0) { - return arg1; - } else { - int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1; - return (arg1 >> b_arg2) + r_bit; - } -} - -static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - int32_t b_arg2 = BIT_POSITION(arg2, df); - if (b_arg2 == 0) { - return u_arg1; - } else { - uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1; - return (u_arg1 >> b_arg2) + r_bit; - } -} - -#define MSA_BINOP_IMMU_DF(helper, func) \ -void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ - uint32_t ws, uint32_t u5) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - uint32_t i; \ - \ - switch (df) { \ - case DF_BYTE: \ - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ - pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ - } \ - break; \ - case DF_HALF: \ - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ - pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ - } \ - break; \ - case DF_WORD: \ - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ - pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ - } \ - break; \ - case DF_DOUBLE: \ - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ - pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ - } \ - break; \ - default: \ - assert(0); \ - } \ -} - -MSA_BINOP_IMMU_DF(slli, sll) -MSA_BINOP_IMMU_DF(srai, sra) -MSA_BINOP_IMMU_DF(srli, srl) -MSA_BINOP_IMMU_DF(bclri, bclr) -MSA_BINOP_IMMU_DF(bseti, bset) -MSA_BINOP_IMMU_DF(bnegi, bneg) -MSA_BINOP_IMMU_DF(sat_s, sat_s) -MSA_BINOP_IMMU_DF(sat_u, sat_u) -MSA_BINOP_IMMU_DF(srari, srar) -MSA_BINOP_IMMU_DF(srlri, srlr) -#undef MSA_BINOP_IMMU_DF - -#define MSA_TEROP_IMMU_DF(helper, func) \ -void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ - uint32_t wd, uint32_t ws, uint32_t u5) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - uint32_t i; \ - \ - switch (df) { \ - case DF_BYTE: \ - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ - pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \ - u5); \ - } \ - break; \ - case DF_HALF: \ - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ - pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \ - u5); \ - } \ - break; \ - case DF_WORD: \ - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ - pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \ - u5); \ - } \ - break; \ - case DF_DOUBLE: \ - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ - pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \ - u5); \ - } \ - break; \ - default: \ - assert(0); \ - } \ -} - -MSA_TEROP_IMMU_DF(binsli, binsl) -MSA_TEROP_IMMU_DF(binsri, binsr) -#undef MSA_TEROP_IMMU_DF - -static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; - uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; - return abs_arg1 > abs_arg2 ? arg1 : arg2; -} - -static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; - uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; - return abs_arg1 < abs_arg2 ? arg1 : arg2; -} - -static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; - uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; - return abs_arg1 + abs_arg2; -} - -static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t max_int = (uint64_t)DF_MAX_INT(df); - uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; - uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; - if (abs_arg1 > max_int || abs_arg2 > max_int) { - return (int64_t)max_int; - } else { - return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int; - } -} - -static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int64_t max_int = DF_MAX_INT(df); - int64_t min_int = DF_MIN_INT(df); - if (arg1 < 0) { - return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int; - } else { - return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int; - } -} - -static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) -{ - uint64_t max_uint = DF_MAX_UINT(df); - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint; -} - -static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - /* signed shift */ - return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1); -} - -static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - /* unsigned shift */ - return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1); -} - -static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - /* signed shift */ - return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1); -} - -static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - /* unsigned shift */ - return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1); -} - -static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int64_t max_int = DF_MAX_INT(df); - int64_t min_int = DF_MIN_INT(df); - if (arg2 > 0) { - return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int; - } else { - return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int; - } -} - -static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0; -} - -static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t max_uint = DF_MAX_UINT(df); - if (arg2 >= 0) { - uint64_t u_arg2 = (uint64_t)arg2; - return (u_arg1 > u_arg2) ? - (int64_t)(u_arg1 - u_arg2) : - 0; - } else { - uint64_t u_arg2 = (uint64_t)(-arg2); - return (u_arg1 < max_uint - u_arg2) ? - (int64_t)(u_arg1 + u_arg2) : - (int64_t)max_uint; - } -} - -static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - int64_t max_int = DF_MAX_INT(df); - int64_t min_int = DF_MIN_INT(df); - if (u_arg1 > u_arg2) { - return u_arg1 - u_arg2 < (uint64_t)max_int ? - (int64_t)(u_arg1 - u_arg2) : - max_int; - } else { - return u_arg2 - u_arg1 < (uint64_t)(-min_int) ? - (int64_t)(u_arg1 - u_arg2) : - min_int; - } -} - -static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - /* signed compare */ - return (arg1 < arg2) ? - (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2); -} - -static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - /* unsigned compare */ - return (u_arg1 < u_arg2) ? - (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2); -} - -static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return arg1 * arg2; -} - -static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - if (arg1 == DF_MIN_INT(df) && arg2 == -1) { - return DF_MIN_INT(df); - } - return arg2 ? arg1 / arg2 : 0; -} - -static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return u_arg2 ? u_arg1 / u_arg2 : 0; -} - -static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - if (arg1 == DF_MIN_INT(df) && arg2 == -1) { - return 0; - } - return arg2 ? arg1 % arg2 : 0; -} - -static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - uint64_t u_arg1 = UNSIGNED(arg1, df); - uint64_t u_arg2 = UNSIGNED(arg2, df); - return u_arg2 ? u_arg1 % u_arg2 : 0; -} - -#define SIGNED_EVEN(a, df) \ - ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2)) - -#define UNSIGNED_EVEN(a, df) \ - ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2)) - -#define SIGNED_ODD(a, df) \ - ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2)) - -#define UNSIGNED_ODD(a, df) \ - ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2)) - -#define SIGNED_EXTRACT(e, o, a, df) \ - do { \ - e = SIGNED_EVEN(a, df); \ - o = SIGNED_ODD(a, df); \ - } while (0); - -#define UNSIGNED_EXTRACT(e, o, a, df) \ - do { \ - e = UNSIGNED_EVEN(a, df); \ - o = UNSIGNED_ODD(a, df); \ - } while (0); - -static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int64_t even_arg1; - int64_t even_arg2; - int64_t odd_arg1; - int64_t odd_arg2; - SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); - SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); - return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); -} - -static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int64_t even_arg1; - int64_t even_arg2; - int64_t odd_arg1; - int64_t odd_arg2; - UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); - UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); - return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); -} - -#define CONCATENATE_AND_SLIDE(s, k) \ - do { \ - for (i = 0; i < s; i++) { \ - v[i] = pws->b[s * k + i]; \ - v[i + s] = pwd->b[s * k + i]; \ - } \ - for (i = 0; i < s; i++) { \ - pwd->b[s * k + i] = v[i + n]; \ - } \ - } while (0) - -static inline void msa_sld_df(uint32_t df, wr_t *pwd, - wr_t *pws, target_ulong rt) -{ - uint32_t n = rt % DF_ELEMENTS(df); - uint8_t v[64]; - uint32_t i, k; - - switch (df) { - case DF_BYTE: - CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0); - break; - case DF_HALF: - for (k = 0; k < 2; k++) { - CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k); - } - break; - case DF_WORD: - for (k = 0; k < 4; k++) { - CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k); - } - break; - case DF_DOUBLE: - for (k = 0; k < 8; k++) { - CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k); - } - break; - default: - assert(0); - } -} - -static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df); -} - -static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df); -} - -static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df); -} - -static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df); -} - -static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int64_t q_min = DF_MIN_INT(df); - int64_t q_max = DF_MAX_INT(df); - - if (arg1 == q_min && arg2 == q_min) { - return q_max; - } - return (arg1 * arg2) >> (DF_BITS(df) - 1); -} - -static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2) -{ - int64_t q_min = DF_MIN_INT(df); - int64_t q_max = DF_MAX_INT(df); - int64_t r_bit = (int64_t)1 << (DF_BITS(df) - 2); - - if (arg1 == q_min && arg2 == q_min) { - return q_max; - } - return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1); -} - -#define MSA_BINOP_DF(func) \ -void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \ - uint32_t wd, uint32_t ws, uint32_t wt) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ - uint32_t i; \ - \ - switch (df) { \ - case DF_BYTE: \ - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ - pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \ - } \ - break; \ - case DF_HALF: \ - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ - pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \ - } \ - break; \ - case DF_WORD: \ - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ - pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \ - } \ - break; \ - case DF_DOUBLE: \ - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ - pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \ - } \ - break; \ - default: \ - assert(0); \ - } \ -} - -MSA_BINOP_DF(sll) -MSA_BINOP_DF(sra) -MSA_BINOP_DF(srl) -MSA_BINOP_DF(bclr) -MSA_BINOP_DF(bset) -MSA_BINOP_DF(bneg) -MSA_BINOP_DF(addv) -MSA_BINOP_DF(subv) -MSA_BINOP_DF(max_s) -MSA_BINOP_DF(max_u) -MSA_BINOP_DF(min_s) -MSA_BINOP_DF(min_u) -MSA_BINOP_DF(max_a) -MSA_BINOP_DF(min_a) -MSA_BINOP_DF(ceq) -MSA_BINOP_DF(clt_s) -MSA_BINOP_DF(clt_u) -MSA_BINOP_DF(cle_s) -MSA_BINOP_DF(cle_u) -MSA_BINOP_DF(add_a) -MSA_BINOP_DF(adds_a) -MSA_BINOP_DF(adds_s) -MSA_BINOP_DF(adds_u) -MSA_BINOP_DF(ave_s) -MSA_BINOP_DF(ave_u) -MSA_BINOP_DF(aver_s) -MSA_BINOP_DF(aver_u) -MSA_BINOP_DF(subs_s) -MSA_BINOP_DF(subs_u) -MSA_BINOP_DF(subsus_u) -MSA_BINOP_DF(subsuu_s) -MSA_BINOP_DF(asub_s) -MSA_BINOP_DF(asub_u) -MSA_BINOP_DF(mulv) -MSA_BINOP_DF(div_s) -MSA_BINOP_DF(div_u) -MSA_BINOP_DF(mod_s) -MSA_BINOP_DF(mod_u) -MSA_BINOP_DF(dotp_s) -MSA_BINOP_DF(dotp_u) -MSA_BINOP_DF(srar) -MSA_BINOP_DF(srlr) -MSA_BINOP_DF(hadd_s) -MSA_BINOP_DF(hadd_u) -MSA_BINOP_DF(hsub_s) -MSA_BINOP_DF(hsub_u) - -MSA_BINOP_DF(mul_q) -MSA_BINOP_DF(mulr_q) -#undef MSA_BINOP_DF - -void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t rt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - - msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]); -} - -static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - return dest + arg1 * arg2; -} - -static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - return dest - arg1 * arg2; -} - -static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t even_arg1; - int64_t even_arg2; - int64_t odd_arg1; - int64_t odd_arg2; - SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); - SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); - return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); -} - -static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t even_arg1; - int64_t even_arg2; - int64_t odd_arg1; - int64_t odd_arg2; - UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); - UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); - return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); -} - -static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t even_arg1; - int64_t even_arg2; - int64_t odd_arg1; - int64_t odd_arg2; - SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); - SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); - return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); -} - -static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t even_arg1; - int64_t even_arg2; - int64_t odd_arg1; - int64_t odd_arg2; - UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); - UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); - return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); -} - -static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t q_prod, q_ret; - - int64_t q_max = DF_MAX_INT(df); - int64_t q_min = DF_MIN_INT(df); - - q_prod = arg1 * arg2; - q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1); - - return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; -} - -static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t q_prod, q_ret; - - int64_t q_max = DF_MAX_INT(df); - int64_t q_min = DF_MIN_INT(df); - - q_prod = arg1 * arg2; - q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1); - - return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; -} - -static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t q_prod, q_ret; - - int64_t q_max = DF_MAX_INT(df); - int64_t q_min = DF_MIN_INT(df); - int64_t r_bit = (int64_t)1 << (DF_BITS(df) - 2); - - q_prod = arg1 * arg2; - q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1); - - return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; -} - -static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1, - int64_t arg2) -{ - int64_t q_prod, q_ret; - - int64_t q_max = DF_MAX_INT(df); - int64_t q_min = DF_MIN_INT(df); - int64_t r_bit = (int64_t)1 << (DF_BITS(df) - 2); - - q_prod = arg1 * arg2; - q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1); - - return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; -} - -#define MSA_TEROP_DF(func) \ -void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ - uint32_t ws, uint32_t wt) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ - uint32_t i; \ - \ - switch (df) { \ - case DF_BYTE: \ - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ - pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \ - pwt->b[i]); \ - } \ - break; \ - case DF_HALF: \ - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ - pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \ - pwt->h[i]); \ - } \ - break; \ - case DF_WORD: \ - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ - pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \ - pwt->w[i]); \ - } \ - break; \ - case DF_DOUBLE: \ - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ - pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \ - pwt->d[i]); \ - } \ - break; \ - default: \ - assert(0); \ - } \ -} - -MSA_TEROP_DF(maddv) -MSA_TEROP_DF(msubv) -MSA_TEROP_DF(dpadd_s) -MSA_TEROP_DF(dpadd_u) -MSA_TEROP_DF(dpsub_s) -MSA_TEROP_DF(dpsub_u) -MSA_TEROP_DF(binsl) -MSA_TEROP_DF(binsr) -MSA_TEROP_DF(madd_q) -MSA_TEROP_DF(msub_q) -MSA_TEROP_DF(maddr_q) -MSA_TEROP_DF(msubr_q) -#undef MSA_TEROP_DF - -static inline void msa_splat_df(uint32_t df, wr_t *pwd, - wr_t *pws, target_ulong rt) -{ - uint32_t n = rt % DF_ELEMENTS(df); - uint32_t i; - - switch (df) { - case DF_BYTE: - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - pwd->b[i] = pws->b[n]; - } - break; - case DF_HALF: - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - pwd->h[i] = pws->h[n]; - } - break; - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - pwd->w[i] = pws->w[n]; - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - pwd->d[i] = pws->d[n]; - } - break; - default: - assert(0); - } -} - -void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t rt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - - msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]); -} - -#define MSA_DO_B MSA_DO(b) -#define MSA_DO_H MSA_DO(h) -#define MSA_DO_W MSA_DO(w) -#define MSA_DO_D MSA_DO(d) - -#define MSA_LOOP_B MSA_LOOP(B) -#define MSA_LOOP_H MSA_LOOP(H) -#define MSA_LOOP_W MSA_LOOP(W) -#define MSA_LOOP_D MSA_LOOP(D) - -#define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE) -#define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF) -#define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD) -#define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE) - -#define MSA_LOOP(DF) \ - for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \ - MSA_DO_ ## DF \ - } - -#define MSA_FN_DF(FUNC) \ -void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ - uint32_t ws, uint32_t wt) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ - wr_t wx, *pwx = &wx; \ - uint32_t i; \ - switch (df) { \ - case DF_BYTE: \ - MSA_LOOP_B \ - break; \ - case DF_HALF: \ - MSA_LOOP_H \ - break; \ - case DF_WORD: \ - MSA_LOOP_W \ - break; \ - case DF_DOUBLE: \ - MSA_LOOP_D \ - break; \ - default: \ - assert(0); \ - } \ - msa_move_v(pwd, pwx); \ -} - -#define MSA_LOOP_COND(DF) \ - (DF_ELEMENTS(DF) / 2) - -#define Rb(pwr, i) (pwr->b[i]) -#define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2]) -#define Rh(pwr, i) (pwr->h[i]) -#define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2]) -#define Rw(pwr, i) (pwr->w[i]) -#define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2]) -#define Rd(pwr, i) (pwr->d[i]) -#define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2]) - -#define MSA_DO(DF) \ - do { \ - R##DF(pwx, i) = pwt->DF[2*i]; \ - L##DF(pwx, i) = pws->DF[2*i]; \ - } while (0); -MSA_FN_DF(pckev_df) -#undef MSA_DO - -#define MSA_DO(DF) \ - do { \ - R##DF(pwx, i) = pwt->DF[2*i+1]; \ - L##DF(pwx, i) = pws->DF[2*i+1]; \ - } while (0); -MSA_FN_DF(pckod_df) -#undef MSA_DO - -#define MSA_DO(DF) \ - do { \ - pwx->DF[2*i] = L##DF(pwt, i); \ - pwx->DF[2*i+1] = L##DF(pws, i); \ - } while (0); -MSA_FN_DF(ilvl_df) -#undef MSA_DO - -#define MSA_DO(DF) \ - do { \ - pwx->DF[2*i] = R##DF(pwt, i); \ - pwx->DF[2*i+1] = R##DF(pws, i); \ - } while (0); -MSA_FN_DF(ilvr_df) -#undef MSA_DO - -#define MSA_DO(DF) \ - do { \ - pwx->DF[2*i] = pwt->DF[2*i]; \ - pwx->DF[2*i+1] = pws->DF[2*i]; \ - } while (0); -MSA_FN_DF(ilvev_df) -#undef MSA_DO - -#define MSA_DO(DF) \ - do { \ - pwx->DF[2*i] = pwt->DF[2*i+1]; \ - pwx->DF[2*i+1] = pws->DF[2*i+1]; \ - } while (0); -MSA_FN_DF(ilvod_df) -#undef MSA_DO -#undef MSA_LOOP_COND - -#define MSA_LOOP_COND(DF) \ - (DF_ELEMENTS(DF)) - -#define MSA_DO(DF) \ - do { \ - uint32_t n = DF_ELEMENTS(df); \ - uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \ - pwx->DF[i] = \ - (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \ - } while (0); -MSA_FN_DF(vshf_df) -#undef MSA_DO -#undef MSA_LOOP_COND -#undef MSA_FN_DF - -void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t n) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - - msa_sld_df(df, pwd, pws, n); -} - -void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t n) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - - msa_splat_df(df, pwd, pws, n); -} - -void helper_msa_copy_s_df(CPUMIPSState *env, uint32_t df, uint32_t rd, - uint32_t ws, uint32_t n) -{ - n %= DF_ELEMENTS(df); - - switch (df) { - case DF_BYTE: - env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n]; - break; - case DF_HALF: - env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n]; - break; - case DF_WORD: - env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n]; - break; -#ifdef TARGET_MIPS64 - case DF_DOUBLE: - env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n]; - break; -#endif - default: - assert(0); - } -} - -void helper_msa_copy_u_df(CPUMIPSState *env, uint32_t df, uint32_t rd, - uint32_t ws, uint32_t n) -{ - n %= DF_ELEMENTS(df); - - switch (df) { - case DF_BYTE: - env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n]; - break; - case DF_HALF: - env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n]; - break; - case DF_WORD: - env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n]; - break; -#ifdef TARGET_MIPS64 - case DF_DOUBLE: - env->active_tc.gpr[rd] = (uint64_t)env->active_fpu.fpr[ws].wr.d[n]; - break; -#endif - default: - assert(0); - } -} - -void helper_msa_insert_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t rs_num, uint32_t n) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - target_ulong rs = env->active_tc.gpr[rs_num]; - - switch (df) { - case DF_BYTE: - pwd->b[n] = (int8_t)rs; - break; - case DF_HALF: - pwd->h[n] = (int16_t)rs; - break; - case DF_WORD: - pwd->w[n] = (int32_t)rs; - break; - case DF_DOUBLE: - pwd->d[n] = (int64_t)rs; - break; - default: - assert(0); - } -} - -void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t n) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - - switch (df) { - case DF_BYTE: - pwd->b[n] = (int8_t)pws->b[0]; - break; - case DF_HALF: - pwd->h[n] = (int16_t)pws->h[0]; - break; - case DF_WORD: - pwd->w[n] = (int32_t)pws->w[0]; - break; - case DF_DOUBLE: - pwd->d[n] = (int64_t)pws->d[0]; - break; - default: - assert(0); - } -} - -void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd) -{ - switch (cd) { - case 0: - break; - case 1: - env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK; - /* set float_status rounding mode */ - set_float_rounding_mode( - ieee_rm[(env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM], - &env->active_tc.msa_fp_status); - /* set float_status flush modes */ - set_flush_to_zero( - (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0, - &env->active_tc.msa_fp_status); - set_flush_inputs_to_zero( - (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0, - &env->active_tc.msa_fp_status); - /* check exception */ - if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED) - & GET_FP_CAUSE(env->active_tc.msacsr)) { - helper_raise_exception(env, EXCP_MSAFPE); - } - break; - } -} - -target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs) -{ - switch (cs) { - case 0: - return env->msair; - case 1: - return env->active_tc.msacsr & MSACSR_MASK; - } - return 0; -} - -void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - - msa_move_v(pwd, pws); -} - -static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg) -{ - uint64_t x; - - x = UNSIGNED(arg, df); - - x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL); - x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); - x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL); - x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL); - x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL); - x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32)); - - return x; -} - -static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg) -{ - uint64_t x, y; - int n, c; - - x = UNSIGNED(arg, df); - n = DF_BITS(df); - c = DF_BITS(df) / 2; - - do { - y = x >> c; - if (y != 0) { - n = n - c; - x = y; - } - c = c >> 1; - } while (c != 0); - - return n - x; -} - -static inline int64_t msa_nloc_df(uint32_t df, int64_t arg) -{ - return msa_nlzc_df(df, UNSIGNED((~arg), df)); -} - -void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t rs) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - uint32_t i; - - switch (df) { - case DF_BYTE: - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - pwd->b[i] = (int8_t)env->active_tc.gpr[rs]; - } - break; - case DF_HALF: - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - pwd->h[i] = (int16_t)env->active_tc.gpr[rs]; - } - break; - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - pwd->w[i] = (int32_t)env->active_tc.gpr[rs]; - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - pwd->d[i] = (int64_t)env->active_tc.gpr[rs]; - } - break; - default: - assert(0); - } -} - -#define MSA_UNOP_DF(func) \ -void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \ - uint32_t wd, uint32_t ws) \ -{ \ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ - wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ - uint32_t i; \ - \ - switch (df) { \ - case DF_BYTE: \ - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ - pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \ - } \ - break; \ - case DF_HALF: \ - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ - pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \ - } \ - break; \ - case DF_WORD: \ - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ - pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \ - } \ - break; \ - case DF_DOUBLE: \ - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ - pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \ - } \ - break; \ - default: \ - assert(0); \ - } \ -} - -MSA_UNOP_DF(nlzc) -MSA_UNOP_DF(nloc) -MSA_UNOP_DF(pcnt) -#undef MSA_UNOP_DF - -#define FLOAT_ONE32 make_float32(0x3f8 << 20) -#define FLOAT_ONE64 make_float64(0x3ffULL << 52) - -#define FLOAT_SNAN16 (float16_default_nan ^ 0x0220) - /* 0x7c20 */ -#define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020) - /* 0x7f800020 */ -#define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL) - /* 0x7ff0000000000020 */ - -static inline void clear_msacsr_cause(CPUMIPSState *env) -{ - SET_FP_CAUSE(env->active_tc.msacsr, 0); -} - -static inline void check_msacsr_cause(CPUMIPSState *env) -{ - if ((GET_FP_CAUSE(env->active_tc.msacsr) & - (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) { - UPDATE_FP_FLAGS(env->active_tc.msacsr, - GET_FP_CAUSE(env->active_tc.msacsr)); - } else { - helper_raise_exception(env, EXCP_MSAFPE); - } -} - -/* Flush-to-zero use cases for update_msacsr() */ -#define CLEAR_FS_UNDERFLOW 1 -#define CLEAR_IS_INEXACT 2 -#define RECIPROCAL_INEXACT 4 - -static inline int update_msacsr(CPUMIPSState *env, int action, int denormal) -{ - int ieee_ex; - - int c; - int cause; - int enable; - - ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status); - - /* QEMU softfloat does not signal all underflow cases */ - if (denormal) { - ieee_ex |= float_flag_underflow; - } - - c = ieee_ex_to_mips(ieee_ex); - enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; - - /* Set Inexact (I) when flushing inputs to zero */ - if ((ieee_ex & float_flag_input_denormal) && - (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { - if (action & CLEAR_IS_INEXACT) { - c &= ~FP_INEXACT; - } else { - c |= FP_INEXACT; - } - } - - /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */ - if ((ieee_ex & float_flag_output_denormal) && - (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { - c |= FP_INEXACT; - if (action & CLEAR_FS_UNDERFLOW) { - c &= ~FP_UNDERFLOW; - } else { - c |= FP_UNDERFLOW; - } - } - - /* Set Inexact (I) when Overflow (O) is not enabled */ - if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) { - c |= FP_INEXACT; - } - - /* Clear Exact Underflow when Underflow (U) is not enabled */ - if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 && - (c & FP_INEXACT) == 0) { - c &= ~FP_UNDERFLOW; - } - - /* Reciprocal operations set only Inexact when valid and not - divide by zero */ - if ((action & RECIPROCAL_INEXACT) && - (c & (FP_INVALID | FP_DIV0)) == 0) { - c = FP_INEXACT; - } - - cause = c & enable; /* all current enabled exceptions */ - - if (cause == 0) { - /* No enabled exception, update the MSACSR Cause - with all current exceptions */ - SET_FP_CAUSE(env->active_tc.msacsr, - (GET_FP_CAUSE(env->active_tc.msacsr) | c)); - } else { - /* Current exceptions are enabled */ - if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) { - /* Exception(s) will trap, update MSACSR Cause - with all enabled exceptions */ - SET_FP_CAUSE(env->active_tc.msacsr, - (GET_FP_CAUSE(env->active_tc.msacsr) | c)); - } - } - - return c; -} - -static inline int get_enabled_exceptions(const CPUMIPSState *env, int c) -{ - int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; - return c & enable; -} - -static inline float16 float16_from_float32(int32 a, flag ieee STATUS_PARAM) -{ - float16 f_val; - - f_val = float32_to_float16((float32)a, ieee STATUS_VAR); - f_val = float16_maybe_silence_nan(f_val); - - return a < 0 ? (f_val | (1 << 15)) : f_val; -} - -static inline float32 float32_from_float64(int64 a STATUS_PARAM) -{ - float32 f_val; - - f_val = float64_to_float32((float64)a STATUS_VAR); - f_val = float32_maybe_silence_nan(f_val); - - return a < 0 ? (f_val | (1 << 31)) : f_val; -} - -static inline float32 float32_from_float16(int16_t a, flag ieee STATUS_PARAM) -{ - float32 f_val; - - f_val = float16_to_float32((float16)a, ieee STATUS_VAR); - f_val = float32_maybe_silence_nan(f_val); - - return a < 0 ? (f_val | (1 << 31)) : f_val; -} - -static inline float64 float64_from_float32(int32 a STATUS_PARAM) -{ - float64 f_val; - - f_val = float32_to_float64((float64)a STATUS_VAR); - f_val = float64_maybe_silence_nan(f_val); - - return a < 0 ? (f_val | (1ULL << 63)) : f_val; -} - -static inline float32 float32_from_q16(int16_t a STATUS_PARAM) -{ - float32 f_val; - - /* conversion as integer and scaling */ - f_val = int32_to_float32(a STATUS_VAR); - f_val = float32_scalbn(f_val, -15 STATUS_VAR); - - return f_val; -} - -static inline float64 float64_from_q32(int32 a STATUS_PARAM) -{ - float64 f_val; - - /* conversion as integer and scaling */ - f_val = int32_to_float64(a STATUS_VAR); - f_val = float64_scalbn(f_val, -31 STATUS_VAR); - - return f_val; -} - -static inline int16_t float32_to_q16(float32 a STATUS_PARAM) -{ - int32 q_val; - int32 q_min = 0xffff8000; - int32 q_max = 0x00007fff; - - int ieee_ex; - - if (float32_is_any_nan(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return 0; - } - - /* scaling */ - a = float32_scalbn(a, 15 STATUS_VAR); - - ieee_ex = get_float_exception_flags(status); - set_float_exception_flags(ieee_ex & (~float_flag_underflow) - STATUS_VAR); - - if (ieee_ex & float_flag_overflow) { - float_raise(float_flag_inexact STATUS_VAR); - return (int32)a < 0 ? q_min : q_max; - } - - /* conversion to int */ - q_val = float32_to_int32(a STATUS_VAR); - - ieee_ex = get_float_exception_flags(status); - set_float_exception_flags(ieee_ex & (~float_flag_underflow) - STATUS_VAR); - - if (ieee_ex & float_flag_invalid) { - set_float_exception_flags(ieee_ex & (~float_flag_invalid) - STATUS_VAR); - float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); - return (int32)a < 0 ? q_min : q_max; - } - - if (q_val < q_min) { - float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); - return (int16_t)q_min; - } - - if (q_max < q_val) { - float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); - return (int16_t)q_max; - } - - return (int16_t)q_val; -} - -static inline int32 float64_to_q32(float64 a STATUS_PARAM) -{ - int64 q_val; - int64 q_min = 0xffffffff80000000LL; - int64 q_max = 0x000000007fffffffLL; - - int ieee_ex; - - if (float64_is_any_nan(a)) { - float_raise(float_flag_invalid STATUS_VAR); - return 0; - } - - /* scaling */ - a = float64_scalbn(a, 31 STATUS_VAR); - - ieee_ex = get_float_exception_flags(status); - set_float_exception_flags(ieee_ex & (~float_flag_underflow) - STATUS_VAR); - - if (ieee_ex & float_flag_overflow) { - float_raise(float_flag_inexact STATUS_VAR); - return (int64)a < 0 ? q_min : q_max; - } - - /* conversion to integer */ - q_val = float64_to_int64(a STATUS_VAR); - - ieee_ex = get_float_exception_flags(status); - set_float_exception_flags(ieee_ex & (~float_flag_underflow) - STATUS_VAR); - - if (ieee_ex & float_flag_invalid) { - set_float_exception_flags(ieee_ex & (~float_flag_invalid) - STATUS_VAR); - float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); - return (int64)a < 0 ? q_min : q_max; - } - - if (q_val < q_min) { - float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); - return (int32)q_min; - } - - if (q_max < q_val) { - float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); - return (int32)q_max; - } - - return (int32)q_val; -} - -#define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \ - do { \ - int c; \ - int64_t cond; \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - if (!QUIET) { \ - cond = float ## BITS ## _ ## OP(ARG1, ARG2, \ - &env->active_tc.msa_fp_status); \ - } else { \ - cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, \ - &env->active_tc.msa_fp_status); \ - } \ - DEST = cond ? M_MAX_UINT(BITS) : 0; \ - c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } \ - } while (0) - -#define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \ - do { \ - MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ - if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \ - DEST = 0; \ - } \ - } while (0) - -#define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \ - do { \ - MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ - if (DEST == 0) { \ - MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ - } \ - } while (0) - -#define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \ - do { \ - MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ - if (DEST == 0) { \ - MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ - } \ - } while (0) - -#define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \ - do { \ - MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ - if (DEST == 0) { \ - MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ - if (DEST == 0) { \ - MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ - } \ - } \ - } while (0) - -#define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \ - do { \ - MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ - if (DEST == 0) { \ - MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ - } \ - } while (0) - -#define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \ - do { \ - MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ - if (DEST == 0) { \ - MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ - } \ - } while (0) - -#define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \ - do { \ - MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ - if (DEST == 0) { \ - MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \ - } \ - } while (0) - -static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32, - quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64, - quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) -{ - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws, - wr_t *pwt, uint32_t df, int quiet) { - wr_t wx, *pwx = &wx; - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_af(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_un(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_eq(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ueq(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_lt(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ult(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_le(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ule(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_af(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_un(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_eq(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ueq(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_lt(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ult(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_le(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ule(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_or(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_une(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ne(env, pwd, pws, pwt, df, 1); -} - -void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_or(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_une(env, pwd, pws, pwt, df, 0); -} - -void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - compare_ne(env, pwd, pws, pwt, df, 0); -} - -#define float16_is_zero(ARG) 0 -#define float16_is_zero_or_denormal(ARG) 0 - -#define IS_DENORMAL(ARG, BITS) \ - (!float ## BITS ## _is_zero(ARG) \ - && float ## BITS ## _is_zero_or_denormal(ARG)) - -#define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## OP(ARG1, ARG2, \ - &env->active_tc.msa_fp_status); \ - c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } \ - } while (0) - -void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - msa_move_v(pwd, pwx); -} - -void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - msa_move_v(pwd, pwx); -} - -void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -#define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, \ - &env->active_tc.msa_fp_status); \ - c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } \ - } while (0) - -void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], - pws->w[i], pwt->w[i], 0, 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], - pws->d[i], pwt->d[i], 0, 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], - pws->w[i], pwt->w[i], - float_muladd_negate_product, 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], - pws->d[i], pwt->d[i], - float_muladd_negate_product, 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i], - pwt->w[i] > 0x200 ? 0x200 : - pwt->w[i] < -0x200 ? -0x200 : pwt->w[i], - 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i], - pwt->d[i] > 0x1000 ? 0x1000 : - pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i], - 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -#define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\ - c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } \ - } while (0) - -void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - /* Half precision floats come in two formats: standard - IEEE and "ARM" format. The latter gains extra exponent - range by omitting the NaN/Inf encodings. */ - flag ieee = 1; - - MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16); - MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32); - MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - msa_move_v(pwd, pwx); -} - -#define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\ - c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \ - } \ - } while (0) - -void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16); - MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32); - MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \ - !float ## BITS ## _is_any_nan(ARG1) \ - && float ## BITS ## _is_quiet_nan(ARG2) - -#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## OP(ARG1, ARG2, \ - &env->active_tc.msa_fp_status); \ - c = update_msacsr(env, 0, 0); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } \ - } while (0) - -#define FMAXMIN_A(F, G, X, _S, _T, BITS) \ - do { \ - uint## BITS ##_t S = _S, T = _T; \ - uint## BITS ##_t as, at, xs, xt, xd; \ - if (NUMBER_QNAN_PAIR(S, T, BITS)) { \ - T = S; \ - } \ - else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \ - S = T; \ - } \ - as = float## BITS ##_abs(S); \ - at = float## BITS ##_abs(T); \ - MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \ - MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \ - MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \ - X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \ - } while (0) - -void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) { - MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32); - } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) { - MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32); - } else { - MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32); - } - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) { - MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64); - } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) { - MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64); - } else { - MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64); - } - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) { - MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32); - } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) { - MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32); - } else { - MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32); - } - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) { - MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64); - } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) { - MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64); - } else { - MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64); - } - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws, uint32_t wt) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df, - uint32_t wd, uint32_t ws) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - if (df == DF_WORD) { - pwd->w[0] = helper_float_class_s(pws->w[0]); - pwd->w[1] = helper_float_class_s(pws->w[1]); - pwd->w[2] = helper_float_class_s(pws->w[2]); - pwd->w[3] = helper_float_class_s(pws->w[3]); - } else { - pwd->d[0] = helper_float_class_d(pws->d[0]); - pwd->d[1] = helper_float_class_d(pws->d[1]); - } -} - -#define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\ - c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } else if (float ## BITS ## _is_any_nan(ARG)) { \ - DEST = 0; \ - } \ - } while (0) - -void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -#define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, \ - &env->active_tc.msa_fp_status); \ - c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \ - float ## BITS ## _is_quiet_nan(DEST) ? \ - 0 : RECIPROCAL_INEXACT, \ - IS_DENORMAL(DEST, BITS)); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } \ - } while (0) - -void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i], - &env->active_tc.msa_fp_status), 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i], - &env->active_tc.msa_fp_status), 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -#define MSA_FLOAT_LOGB(DEST, ARG, BITS) \ - do { \ - int c; \ - \ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ - set_float_rounding_mode(float_round_down, \ - &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## log2(ARG, \ - &env->active_tc.msa_fp_status); \ - DEST = float ## BITS ## _ ## round_to_int(DEST, \ - &env->active_tc.msa_fp_status); \ - set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \ - MSACSR_RM_MASK) >> MSACSR_RM], \ - &env->active_tc.msa_fp_status); \ - \ - set_float_exception_flags( \ - get_float_exception_flags(&env->active_tc.msa_fp_status) \ - & (~float_flag_inexact), \ - &env->active_tc.msa_fp_status); \ - \ - c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ - \ - if (get_enabled_exceptions(env, c)) { \ - DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ - } \ - } while (0) - -void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - /* Half precision floats come in two formats: standard - IEEE and "ARM" format. The latter gains extra exponent - range by omitting the NaN/Inf encodings. */ - flag ieee = 1; - - MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - msa_move_v(pwd, pwx); -} - -void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - /* Half precision floats come in two formats: standard - IEEE and "ARM" format. The latter gains extra exponent - range by omitting the NaN/Inf encodings. */ - flag ieee = 1; - - MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - msa_move_v(pwd, pwx); -} - -void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64); - } - break; - default: - assert(0); - } - - msa_move_v(pwd, pwx); -} - -void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64); - } - break; - default: - assert(0); - } - - msa_move_v(pwd, pwx); -} - -void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -#define float32_from_int32 int32_to_float32 -#define float32_from_uint32 uint32_to_float32 - -#define float64_from_int64 int64_to_float64 -#define float64_from_uint64 uint64_to_float64 - -void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} - -void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, - uint32_t ws) -{ - wr_t wx, *pwx = &wx; - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - wr_t *pws = &(env->active_fpu.fpr[ws].wr); - uint32_t i; - - clear_msacsr_cause(env); - - switch (df) { - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64); - } - break; - default: - assert(0); - } - - check_msacsr_cause(env); - - msa_move_v(pwd, pwx); -} diff --git a/qemu/target-mips/op_helper.c b/qemu/target-mips/op_helper.c deleted file mode 100644 index 0855d8a3..00000000 --- a/qemu/target-mips/op_helper.c +++ /dev/null @@ -1,3713 +0,0 @@ -/* - * MIPS emulation helpers for qemu. - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include -#include "cpu.h" -#include "qemu/host-utils.h" -#include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" - -#ifndef CONFIG_USER_ONLY -static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global); -#endif - -/*****************************************************************************/ -/* Exceptions processing helpers */ - -static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, - uint32_t exception, - int error_code, - uintptr_t pc) -{ - CPUState *cs = CPU(mips_env_get_cpu(env)); - - if (exception < EXCP_SC) { - qemu_log("%s: %d %d\n", __func__, exception, error_code); - } - cs->exception_index = exception; - env->error_code = error_code; - - if (pc) { - /* now we have a real cpu fault */ - cpu_restore_state(cs, pc); - } - - if (exception == 0x11) { - env->uc->next_pc = env->active_tc.PC + 4; - } - - cpu_loop_exit(cs); -} - -static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, - uint32_t exception, - uintptr_t pc) -{ - do_raise_exception_err(env, exception, 0, pc); -} - -void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception, - int error_code) -{ - do_raise_exception_err(env, exception, error_code, 0); -} - -void helper_raise_exception(CPUMIPSState *env, uint32_t exception) -{ - do_raise_exception(env, exception, 0); -} - -#if defined(CONFIG_USER_ONLY) -#define HELPER_LD(name, insn, type) \ -static inline type do_##name(CPUMIPSState *env, target_ulong addr, \ - int mem_idx) \ -{ \ - return (type) insn##_raw(addr); \ -} -#else -#define HELPER_LD(name, insn, type) \ -static inline type do_##name(CPUMIPSState *env, target_ulong addr, \ - int mem_idx) \ -{ \ - switch (mem_idx) \ - { \ - case 0: return (type) cpu_##insn##_kernel(env, addr); break; \ - case 1: return (type) cpu_##insn##_super(env, addr); break; \ - default: \ - case 2: return (type) cpu_##insn##_user(env, addr); break; \ - } \ -} -#endif -HELPER_LD(lbu, ldub, uint8_t) -HELPER_LD(lhu, lduw, uint16_t) -HELPER_LD(lw, ldl, int32_t) -HELPER_LD(ld, ldq, int64_t) -#undef HELPER_LD - -#if defined(CONFIG_USER_ONLY) -#define HELPER_ST(name, insn, type) \ -static inline void do_##name(CPUMIPSState *env, target_ulong addr, \ - type val, int mem_idx) \ -{ \ - insn##_raw(addr, val); \ -} -#else -#define HELPER_ST(name, insn, type) \ -static inline void do_##name(CPUMIPSState *env, target_ulong addr, \ - type val, int mem_idx) \ -{ \ - switch (mem_idx) \ - { \ - case 0: cpu_##insn##_kernel(env, addr, val); break; \ - case 1: cpu_##insn##_super(env, addr, val); break; \ - default: \ - case 2: cpu_##insn##_user(env, addr, val); break; \ - } \ -} -#endif -HELPER_ST(sb, stb, uint8_t) -HELPER_ST(sh, stw, uint16_t) -HELPER_ST(sw, stl, uint32_t) -HELPER_ST(sd, stq, uint64_t) -#undef HELPER_ST - -target_ulong helper_clo (target_ulong arg1) -{ - return clo32(arg1); -} - -target_ulong helper_clz (target_ulong arg1) -{ - return clz32(arg1); -} - -#if defined(TARGET_MIPS64) -target_ulong helper_dclo (target_ulong arg1) -{ - return clo64(arg1); -} - -target_ulong helper_dclz (target_ulong arg1) -{ - return clz64(arg1); -} -#endif /* TARGET_MIPS64 */ - -/* 64 bits arithmetic for 32 bits hosts */ -static inline uint64_t get_HILO(CPUMIPSState *env) -{ - return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0]; -} - -static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) -{ - target_ulong tmp; - env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); - tmp = env->active_tc.HI[0] = (int32_t)(HILO >> 32); - return tmp; -} - -static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) -{ - target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); - env->active_tc.HI[0] = (int32_t)(HILO >> 32); - return tmp; -} - -/* Multiplication variants of the vr54xx. */ -target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2)); -} - -target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (uint64_t)get_HILO(env) + - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)get_HILO(env) + - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HI_LOT0(env, (uint64_t)get_HILO(env) - - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)get_HILO(env) - - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); -} - -target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * - (int64_t)(int32_t)arg2); -} - -target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, - target_ulong arg2) -{ - return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * - (uint64_t)(uint32_t)arg2); -} - -static inline target_ulong bitswap(target_ulong v) -{ - v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | - ((v & (target_ulong)0x5555555555555555ULL) << 1); - v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | - ((v & (target_ulong)0x3333333333333333ULL) << 2); - v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | - ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); - return v; -} - -#ifdef TARGET_MIPS64 -target_ulong helper_dbitswap(target_ulong rt) -{ - return bitswap(rt); -} -#endif - -target_ulong helper_bitswap(target_ulong rt) -{ - return (int32_t)bitswap(rt); -} - -#ifndef CONFIG_USER_ONLY - -static inline hwaddr do_translate_address(CPUMIPSState *env, - target_ulong address, - int rw) -{ - hwaddr lladdr; - - lladdr = cpu_mips_translate_address(env, address, rw); - - if (lladdr == -1LL) { - cpu_loop_exit(CPU(mips_env_get_cpu(env))); - } else { - return lladdr; - } -} - -#define HELPER_LD_ATOMIC(name, insn) \ -target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ -{ \ - env->lladdr = do_translate_address(env, arg, 0); \ - env->llval = do_##insn(env, arg, mem_idx); \ - return env->llval; \ -} -HELPER_LD_ATOMIC(ll, lw) -#ifdef TARGET_MIPS64 -HELPER_LD_ATOMIC(lld, ld) -#endif -#undef HELPER_LD_ATOMIC - -#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \ -target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \ - target_ulong arg2, int mem_idx) \ -{ \ - target_long tmp; \ - \ - if (arg2 & almask) { \ - env->CP0_BadVAddr = arg2; \ - helper_raise_exception(env, EXCP_AdES); \ - } \ - if (do_translate_address(env, arg2, 1) == env->lladdr) { \ - tmp = do_##ld_insn(env, arg2, mem_idx); \ - if (tmp == env->llval) { \ - do_##st_insn(env, arg2, arg1, mem_idx); \ - return 1; \ - } \ - } \ - return 0; \ -} -HELPER_ST_ATOMIC(sc, lw, sw, 0x3) -#ifdef TARGET_MIPS64 -HELPER_ST_ATOMIC(scd, ld, sd, 0x7) -#endif -#undef HELPER_ST_ATOMIC -#endif - -#ifdef TARGET_WORDS_BIGENDIAN -#define GET_LMASK(v) ((v) & 3) -#define GET_OFFSET(addr, offset) (addr + (offset)) -#else -#define GET_LMASK(v) (((v) & 3) ^ 3) -#define GET_OFFSET(addr, offset) (addr - (offset)) -#endif - -void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx); - - if (GET_LMASK(arg2) <= 2) - do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx); - - if (GET_LMASK(arg2) <= 1) - do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx); - - if (GET_LMASK(arg2) == 0) - do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx); -} - -void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - do_sb(env, arg2, (uint8_t)arg1, mem_idx); - - if (GET_LMASK(arg2) >= 1) - do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx); - - if (GET_LMASK(arg2) >= 2) - do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx); - - if (GET_LMASK(arg2) == 3) - do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx); -} - -#if defined(TARGET_MIPS64) -/* "half" load and stores. We must do the memory access inline, - or fault handling won't work. */ - -#ifdef TARGET_WORDS_BIGENDIAN -#define GET_LMASK64(v) ((v) & 7) -#else -#define GET_LMASK64(v) (((v) & 7) ^ 7) -#endif - -void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx); - - if (GET_LMASK64(arg2) <= 6) - do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx); - - if (GET_LMASK64(arg2) <= 5) - do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx); - - if (GET_LMASK64(arg2) <= 4) - do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx); - - if (GET_LMASK64(arg2) <= 3) - do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx); - - if (GET_LMASK64(arg2) <= 2) - do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx); - - if (GET_LMASK64(arg2) <= 1) - do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx); - - if (GET_LMASK64(arg2) <= 0) - do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx); -} - -void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, - int mem_idx) -{ - do_sb(env, arg2, (uint8_t)arg1, mem_idx); - - if (GET_LMASK64(arg2) >= 1) - do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx); - - if (GET_LMASK64(arg2) >= 2) - do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx); - - if (GET_LMASK64(arg2) >= 3) - do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx); - - if (GET_LMASK64(arg2) >= 4) - do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx); - - if (GET_LMASK64(arg2) >= 5) - do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx); - - if (GET_LMASK64(arg2) >= 6) - do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx); - - if (GET_LMASK64(arg2) == 7) - do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx); -} -#endif /* TARGET_MIPS64 */ - -static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 }; - -void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - env->active_tc.gpr[multiple_regs[i]] = - (target_long)do_lw(env, addr, mem_idx); - addr += 4; - } - } - - if (do_r31) { - env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx); - } -} - -void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx); - addr += 4; - } - } - - if (do_r31) { - do_sw(env, addr, env->active_tc.gpr[31], mem_idx); - } -} - -#if defined(TARGET_MIPS64) -void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx); - addr += 8; - } - } - - if (do_r31) { - env->active_tc.gpr[31] = do_ld(env, addr, mem_idx); - } -} - -void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, - uint32_t mem_idx) -{ - target_ulong base_reglist = reglist & 0xf; - target_ulong do_r31 = reglist & 0x10; - - if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { - target_ulong i; - - for (i = 0; i < base_reglist; i++) { - do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx); - addr += 8; - } - } - - if (do_r31) { - do_sd(env, addr, env->active_tc.gpr[31], mem_idx); - } -} -#endif - -#ifndef CONFIG_USER_ONLY -/* SMP helpers. */ -static bool mips_vpe_is_wfi(MIPSCPU *c) -{ - CPUState *cpu = CPU(c); - CPUMIPSState *env = &c->env; - - /* If the VPE is halted but otherwise active, it means it's waiting for - an interrupt. */ - return cpu->halted && mips_vpe_active(env); -} - -static inline void mips_vpe_wake(MIPSCPU *c) -{ - /* Dont set ->halted = 0 directly, let it be done via cpu_has_work - because there might be other conditions that state that c should - be sleeping. */ - cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); -} - -static inline void mips_vpe_sleep(MIPSCPU *cpu) -{ - CPUState *cs = CPU(cpu); - - /* The VPE was shut off, really go to bed. - Reset any old _WAKE requests. */ - cs->halted = 1; - cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); -} - -static inline void mips_tc_wake(MIPSCPU *cpu, int tc) -{ - CPUMIPSState *c = &cpu->env; - - /* FIXME: TC reschedule. */ - if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) { - mips_vpe_wake(cpu); - } -} - -static inline void mips_tc_sleep(MIPSCPU *cpu, int tc) -{ - CPUMIPSState *c = &cpu->env; - - /* FIXME: TC reschedule. */ - if (!mips_vpe_active(c)) { - mips_vpe_sleep(cpu); - } -} - -/** - * mips_cpu_map_tc: - * @env: CPU from which mapping is performed. - * @tc: Should point to an int with the value of the global TC index. - * - * This function will transform @tc into a local index within the - * returned #CPUMIPSState. - */ -/* FIXME: This code assumes that all VPEs have the same number of TCs, - which depends on runtime setup. Can probably be fixed by - walking the list of CPUMIPSStates. */ -static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc) -{ - MIPSCPU *cpu; - CPUState *cs; - CPUState *other_cs; - int vpe_idx; - int tc_idx = *tc; - - if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) { - /* Not allowed to address other CPUs. */ - *tc = env->current_tc; - return env; - } - - cs = CPU(mips_env_get_cpu(env)); - vpe_idx = tc_idx / cs->nr_threads; - *tc = tc_idx % cs->nr_threads; - other_cs = qemu_get_cpu(env->uc, vpe_idx); - if (other_cs == NULL) { - return env; - } - cpu = MIPS_CPU(env->uc, other_cs); - return &cpu->env; -} - -/* The per VPE CP0_Status register shares some fields with the per TC - CP0_TCStatus registers. These fields are wired to the same registers, - so changes to either of them should be reflected on both registers. - - Also, EntryHi shares the bottom 8 bit ASID with TCStauts. - - These helper call synchronizes the regs for a given cpu. */ - -/* Called for updates to CP0_Status. */ -static void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) -{ - int32_t tcstatus, *tcst; - uint32_t v = cpu->CP0_Status; - uint32_t cu, mx, asid, ksu; - uint32_t mask = ((1 << CP0TCSt_TCU3) - | (1 << CP0TCSt_TCU2) - | (1 << CP0TCSt_TCU1) - | (1 << CP0TCSt_TCU0) - | (1 << CP0TCSt_TMX) - | (3 << CP0TCSt_TKSU) - | (0xff << CP0TCSt_TASID)); - - cu = (v >> CP0St_CU0) & 0xf; - mx = (v >> CP0St_MX) & 0x1; - ksu = (v >> CP0St_KSU) & 0x3; - asid = env->CP0_EntryHi & 0xff; - - tcstatus = cu << CP0TCSt_TCU0; - tcstatus |= mx << CP0TCSt_TMX; - tcstatus |= ksu << CP0TCSt_TKSU; - tcstatus |= asid; - - if (tc == cpu->current_tc) { - tcst = &cpu->active_tc.CP0_TCStatus; - } else { - tcst = &cpu->tcs[tc].CP0_TCStatus; - } - - *tcst &= ~mask; - *tcst |= tcstatus; - compute_hflags(cpu); -} - -/* Called for updates to CP0_TCStatus. */ -static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, - target_ulong v) -{ - uint32_t status; - uint32_t tcu, tmx, tasid, tksu; - uint32_t mask = ((1U << CP0St_CU3) - | (1 << CP0St_CU2) - | (1 << CP0St_CU1) - | (1 << CP0St_CU0) - | (1 << CP0St_MX) - | (3 << CP0St_KSU)); - - tcu = (v >> CP0TCSt_TCU0) & 0xf; - tmx = (v >> CP0TCSt_TMX) & 0x1; - tasid = v & 0xff; - tksu = (v >> CP0TCSt_TKSU) & 0x3; - - status = tcu << CP0St_CU0; - status |= tmx << CP0St_MX; - status |= tksu << CP0St_KSU; - - cpu->CP0_Status &= ~mask; - cpu->CP0_Status |= status; - - /* Sync the TASID with EntryHi. */ - cpu->CP0_EntryHi &= ~0xff; - cpu->CP0_EntryHi = tasid; - - compute_hflags(cpu); -} - -/* Called for updates to CP0_EntryHi. */ -static void sync_c0_entryhi(CPUMIPSState *cpu, int tc) -{ - int32_t *tcst; - uint32_t asid, v = cpu->CP0_EntryHi; - - asid = v & 0xff; - - if (tc == cpu->current_tc) { - tcst = &cpu->active_tc.CP0_TCStatus; - } else { - tcst = &cpu->tcs[tc].CP0_TCStatus; - } - - *tcst &= ~0xff; - *tcst |= asid; -} - -/* CP0 helpers */ -target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env) -{ - return env->mvp->CP0_MVPControl; -} - -target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env) -{ - return env->mvp->CP0_MVPConf0; -} - -target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env) -{ - return env->mvp->CP0_MVPConf1; -} - -target_ulong helper_mfc0_random(CPUMIPSState *env) -{ - return (int32_t)cpu_mips_get_random(env); -} - -target_ulong helper_mfc0_tcstatus(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCStatus; -} - -target_ulong helper_mftc0_tcstatus(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.CP0_TCStatus; - else - return other->tcs[other_tc].CP0_TCStatus; -} - -target_ulong helper_mfc0_tcbind(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCBind; -} - -target_ulong helper_mftc0_tcbind(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.CP0_TCBind; - else - return other->tcs[other_tc].CP0_TCBind; -} - -target_ulong helper_mfc0_tcrestart(CPUMIPSState *env) -{ - return env->active_tc.PC; -} - -target_ulong helper_mftc0_tcrestart(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.PC; - else - return other->tcs[other_tc].PC; -} - -target_ulong helper_mfc0_tchalt(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCHalt; -} - -target_ulong helper_mftc0_tchalt(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.CP0_TCHalt; - else - return other->tcs[other_tc].CP0_TCHalt; -} - -target_ulong helper_mfc0_tccontext(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCContext; -} - -target_ulong helper_mftc0_tccontext(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.CP0_TCContext; - else - return other->tcs[other_tc].CP0_TCContext; -} - -target_ulong helper_mfc0_tcschedule(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCSchedule; -} - -target_ulong helper_mftc0_tcschedule(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.CP0_TCSchedule; - else - return other->tcs[other_tc].CP0_TCSchedule; -} - -target_ulong helper_mfc0_tcschefback(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCScheFBack; -} - -target_ulong helper_mftc0_tcschefback(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.CP0_TCScheFBack; - else - return other->tcs[other_tc].CP0_TCScheFBack; -} - -target_ulong helper_mfc0_count(CPUMIPSState *env) -{ - return (int32_t)cpu_mips_get_count(env); -} - -target_ulong helper_mftc0_entryhi(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - return other->CP0_EntryHi; -} - -target_ulong helper_mftc0_cause(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - int32_t tccause; - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) { - tccause = other->CP0_Cause; - } else { - tccause = other->CP0_Cause; - } - - return tccause; -} - -target_ulong helper_mftc0_status(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - return other->CP0_Status; -} - -target_ulong helper_mfc0_lladdr(CPUMIPSState *env) -{ - return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift); -} - -target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel) -{ - return (int32_t)env->CP0_WatchLo[sel]; -} - -target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel) -{ - return env->CP0_WatchHi[sel]; -} - -target_ulong helper_mfc0_debug(CPUMIPSState *env) -{ - target_ulong t0 = env->CP0_Debug; - if (env->hflags & MIPS_HFLAG_DM) - t0 |= 1 << CP0DB_DM; - - return t0; -} - -target_ulong helper_mftc0_debug(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - int32_t tcstatus; - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - tcstatus = other->active_tc.CP0_Debug_tcstatus; - else - tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus; - - /* XXX: Might be wrong, check with EJTAG spec. */ - return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | - (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); -} - -#if defined(TARGET_MIPS64) -target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env) -{ - return env->active_tc.PC; -} - -target_ulong helper_dmfc0_tchalt(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCHalt; -} - -target_ulong helper_dmfc0_tccontext(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCContext; -} - -target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCSchedule; -} - -target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env) -{ - return env->active_tc.CP0_TCScheFBack; -} - -target_ulong helper_dmfc0_lladdr(CPUMIPSState *env) -{ - return env->lladdr >> env->CP0_LLAddr_shift; -} - -target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel) -{ - return env->CP0_WatchLo[sel]; -} -#endif /* TARGET_MIPS64 */ - -void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t index_p = env->CP0_Index & 0x80000000; - uint32_t tlb_index = arg1 & 0x7fffffff; - if (tlb_index < env->tlb->nb_tlb) { - if (env->insn_flags & ISA_MIPS32R6) { - index_p |= arg1 & 0x80000000; - } - env->CP0_Index = index_p | tlb_index; - } -} - -void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask = 0; - uint32_t newval; - - if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) - mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | - (1 << CP0MVPCo_EVP); - if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) - mask |= (1 << CP0MVPCo_STLB); - newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask); - - // TODO: Enable/disable shared TLB, enable/disable VPEs. - - env->mvp->CP0_MVPControl = newval; -} - -void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask; - uint32_t newval; - - mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | - (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); - newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask); - - /* Yield scheduler intercept not implemented. */ - /* Gating storage scheduler intercept not implemented. */ - - // TODO: Enable/disable TCs. - - env->CP0_VPEControl = newval; -} - -void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - uint32_t mask; - uint32_t newval; - - mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | - (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); - newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask); - - /* TODO: Enable/disable TCs. */ - - other->CP0_VPEControl = newval; -} - -target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - /* FIXME: Mask away return zero on read bits. */ - return other->CP0_VPEControl; -} - -target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - return other->CP0_VPEConf0; -} - -void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask = 0; - uint32_t newval; - - if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { - if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) - mask |= (0xff << CP0VPEC0_XTC); - mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); - } - newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask); - - // TODO: TC exclusive handling due to ERL/EXL. - - env->CP0_VPEConf0 = newval; -} - -void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - uint32_t mask = 0; - uint32_t newval; - - mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); - newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask); - - /* TODO: TC exclusive handling due to ERL/EXL. */ - other->CP0_VPEConf0 = newval; -} - -void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask = 0; - uint32_t newval; - - if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) - mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | - (0xff << CP0VPEC1_NCP1); - newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask); - - /* UDI not implemented. */ - /* CP2 not implemented. */ - - // TODO: Handle FPU (CP1) binding. - - env->CP0_VPEConf1 = newval; -} - -void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1) -{ - /* Yield qualifier inputs not implemented. */ - env->CP0_YQMask = 0x00000000; -} - -void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_VPEOpt = arg1 & 0x0000ffff; -} - -void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1) -{ - /* Large physaddr (PABITS) not implemented */ - /* 1k pages not implemented */ - target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); - env->CP0_EntryLo0 = (arg1 & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI - 30)); -} - -#if defined(TARGET_MIPS64) -void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1) -{ - uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); - env->CP0_EntryLo0 = (arg1 & 0x3FFFFFFF) | rxi; -} -#endif - -void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask = env->CP0_TCStatus_rw_bitmask; - uint32_t newval; - - newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask); - - env->active_tc.CP0_TCStatus = newval; - sync_c0_tcstatus(env, env->current_tc, newval); -} - -void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.CP0_TCStatus = arg1; - else - other->tcs[other_tc].CP0_TCStatus = arg1; - sync_c0_tcstatus(other, other_tc, arg1); -} - -void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask = (1 << CP0TCBd_TBE); - uint32_t newval; - - if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) - mask |= (1 << CP0TCBd_CurVPE); - newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); - env->active_tc.CP0_TCBind = newval; -} - -void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - uint32_t mask = (1 << CP0TCBd_TBE); - uint32_t newval; - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) - mask |= (1 << CP0TCBd_CurVPE); - if (other_tc == other->current_tc) { - newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); - other->active_tc.CP0_TCBind = newval; - } else { - newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask); - other->tcs[other_tc].CP0_TCBind = newval; - } -} - -void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1) -{ - env->active_tc.PC = arg1; - env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); - env->lladdr = 0ULL; - /* MIPS16 not implemented. */ -} - -void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) { - other->active_tc.PC = arg1; - other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); - other->lladdr = 0ULL; - /* MIPS16 not implemented. */ - } else { - other->tcs[other_tc].PC = arg1; - other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS); - other->lladdr = 0ULL; - /* MIPS16 not implemented. */ - } -} - -void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) -{ - MIPSCPU *cpu = mips_env_get_cpu(env); - - env->active_tc.CP0_TCHalt = arg1 & 0x1; - - // TODO: Halt TC / Restart (if allocated+active) TC. - if (env->active_tc.CP0_TCHalt & 1) { - mips_tc_sleep(cpu, env->current_tc); - } else { - mips_tc_wake(cpu, env->current_tc); - } -} - -void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - MIPSCPU *other_cpu = mips_env_get_cpu(other); - - // TODO: Halt TC / Restart (if allocated+active) TC. - - if (other_tc == other->current_tc) - other->active_tc.CP0_TCHalt = arg1; - else - other->tcs[other_tc].CP0_TCHalt = arg1; - - if (arg1 & 1) { - mips_tc_sleep(other_cpu, other_tc); - } else { - mips_tc_wake(other_cpu, other_tc); - } -} - -void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1) -{ - env->active_tc.CP0_TCContext = arg1; -} - -void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.CP0_TCContext = arg1; - else - other->tcs[other_tc].CP0_TCContext = arg1; -} - -void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1) -{ - env->active_tc.CP0_TCSchedule = arg1; -} - -void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.CP0_TCSchedule = arg1; - else - other->tcs[other_tc].CP0_TCSchedule = arg1; -} - -void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1) -{ - env->active_tc.CP0_TCScheFBack = arg1; -} - -void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.CP0_TCScheFBack = arg1; - else - other->tcs[other_tc].CP0_TCScheFBack = arg1; -} - -void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1) -{ - /* Large physaddr (PABITS) not implemented */ - /* 1k pages not implemented */ - target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); - env->CP0_EntryLo1 = (arg1 & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI - 30)); -} - -#if defined(TARGET_MIPS64) -void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1) -{ - uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); - env->CP0_EntryLo1 = (arg1 & 0x3FFFFFFF) | rxi; -} -#endif - -void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); -} - -void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) -{ - uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1); - if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) || - (mask == 0x0000 || mask == 0x0003 || mask == 0x000F || - mask == 0x003F || mask == 0x00FF || mask == 0x03FF || - mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) { - env->CP0_PageMask = arg1 & (0x1FFFFFFF & (((unsigned int)TARGET_PAGE_MASK) << 1)); - } -} - -void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) -{ - /* SmartMIPS not implemented */ - /* Large physaddr (PABITS) not implemented */ - /* 1k pages not implemented */ - env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) | - (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask); -} - -void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) -{ - if (env->insn_flags & ISA_MIPS32R6) { - if (arg1 < env->tlb->nb_tlb) { - env->CP0_Wired = arg1; - } - } else { - env->CP0_Wired = arg1 % env->tlb->nb_tlb; - } -} - -void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; -} - -void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask; -} - -void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask; -} - -void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask; -} - -void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask; -} - -void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask = 0x0000000F; - - if (env->CP0_Config3 & (1 << CP0C3_ULRI)) { - mask |= (1 << 29); - - if (arg1 & (1 << 29)) { - env->hflags |= MIPS_HFLAG_HWRENA_ULR; - } else { - env->hflags &= ~MIPS_HFLAG_HWRENA_ULR; - } - } - - env->CP0_HWREna = arg1 & mask; -} - -void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) -{ - cpu_mips_store_count(env, arg1); -} - -void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) -{ - target_ulong old, val, mask; - mask = (((unsigned int)TARGET_PAGE_MASK) << 1) | 0xFF; - if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { - mask |= 1 << CP0EnHi_EHINV; - } - - /* 1k pages not implemented */ -#if defined(TARGET_MIPS64) - if (env->insn_flags & ISA_MIPS32R6) { - int entryhi_r = extract64(arg1, 62, 2); - int config0_at = extract32(env->CP0_Config0, 13, 2); - bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; - if ((entryhi_r == 2) || - (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { - /* skip EntryHi.R field if new value is reserved */ - mask &= ~(0x3ull << 62); - } - } - mask &= env->SEGMask; -#endif - old = env->CP0_EntryHi; - val = (arg1 & mask) | (old & ~mask); - env->CP0_EntryHi = val; - if (env->CP0_Config3 & (1 << CP0C3_MT)) { - sync_c0_entryhi(env, env->current_tc); - } - /* If the ASID changes, flush qemu's TLB. */ - if ((old & 0xFF) != (val & 0xFF)) - cpu_mips_tlb_flush(env, 1); -} - -void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - other->CP0_EntryHi = arg1; - sync_c0_entryhi(other, other_tc); -} - -void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1) -{ - cpu_mips_store_compare(env, arg1); -} - -void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) -{ - MIPSCPU *cpu = mips_env_get_cpu(env); - uint32_t val, old; - uint32_t mask = env->CP0_Status_rw_bitmask; - - if (env->insn_flags & ISA_MIPS32R6) { - if (extract32(env->CP0_Status, CP0St_KSU, 2) == 0x3) { - mask &= ~(3 << CP0St_KSU); - } - mask &= ~(0x00180000 & arg1); - } - - val = arg1 & mask; - old = env->CP0_Status; - env->CP0_Status = (env->CP0_Status & ~mask) | val; - if (env->CP0_Config3 & (1 << CP0C3_MT)) { - sync_c0_status(env, env, env->current_tc); - } else { - compute_hflags(env); - } - - if (qemu_loglevel_mask(CPU_LOG_EXEC)) { - qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x", - old, old & env->CP0_Cause & CP0Ca_IP_mask, - val, val & env->CP0_Cause & CP0Ca_IP_mask, - env->CP0_Cause); - switch (env->hflags & MIPS_HFLAG_KSU) { - case MIPS_HFLAG_UM: qemu_log(", UM\n"); break; - case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; - case MIPS_HFLAG_KM: qemu_log("\n"); break; - default: - cpu_abort(CPU(cpu), "Invalid MMU mode!\n"); - break; - } - } -} - -void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - other->CP0_Status = arg1 & ~0xf1000018; - sync_c0_status(env, other, other_tc); -} - -void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1) -{ - /* vectored interrupts not implemented, no performance counters. */ - env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0); -} - -void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1) -{ - uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); - env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask); -} - -static void mtc0_cause(CPUMIPSState *cpu, target_ulong arg1) -{ - uint32_t mask = 0x00C00300; - uint32_t old = cpu->CP0_Cause; - - if (cpu->insn_flags & ISA_MIPS32R2) { - mask |= 1 << CP0Ca_DC; - } - if (cpu->insn_flags & ISA_MIPS32R6) { - mask &= ~((1 << CP0Ca_WP) & arg1); - } - - cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask); - - if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) { - if (cpu->CP0_Cause & (1 << CP0Ca_DC)) { - cpu_mips_stop_count(cpu); - } else { - cpu_mips_start_count(cpu); - } - } - -#if 0 - int i; - /* Set/reset software interrupts */ - for (i = 0 ; i < 2 ; i++) { - if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) { - cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i))); - } - } -#endif -} - -void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1) -{ - mtc0_cause(env, arg1); -} - -void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - mtc0_cause(other, arg1); -} - -target_ulong helper_mftc0_epc(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - return other->CP0_EPC; -} - -target_ulong helper_mftc0_ebase(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - return other->CP0_EBase; -} - -void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1) -{ - /* vectored interrupts not implemented */ - env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000); -} - -void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000); -} - -target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - switch (idx) { - case 0: return other->CP0_Config0; - case 1: return other->CP0_Config1; - case 2: return other->CP0_Config2; - case 3: return other->CP0_Config3; - /* 4 and 5 are reserved. */ - case 6: return other->CP0_Config6; - case 7: return other->CP0_Config7; - default: - break; - } - return 0; -} - -void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007); -} - -void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1) -{ - /* tertiary/secondary caches not implemented */ - env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF); -} - -void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) | - (arg1 & env->CP0_Config4_rw_bitmask); -} - -void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) | - (arg1 & env->CP0_Config5_rw_bitmask); - compute_hflags(env); -} - -void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1) -{ - target_long mask = env->CP0_LLAddr_rw_bitmask; - arg1 = arg1 << env->CP0_LLAddr_shift; - env->lladdr = (env->lladdr & ~mask) | (arg1 & mask); -} - -void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) -{ - /* Watch exceptions for instructions, data loads, data stores - not implemented. */ - env->CP0_WatchLo[sel] = (arg1 & ~0x7); -} - -void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) -{ - env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8); - env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); -} - -void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1) -{ - target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; - env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask); -} - -void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_Framemask = arg1; /* XXX */ -} - -void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120); - if (arg1 & (1 << CP0DB_DM)) - env->hflags |= MIPS_HFLAG_DM; - else - env->hflags &= ~MIPS_HFLAG_DM; -} - -void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - /* XXX: Might be wrong, check with EJTAG spec. */ - if (other_tc == other->current_tc) - other->active_tc.CP0_Debug_tcstatus = val; - else - other->tcs[other_tc].CP0_Debug_tcstatus = val; - other->CP0_Debug = (other->CP0_Debug & - ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | - (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); -} - -void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_Performance0 = arg1 & 0x000007ff; -} - -void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_TagLo = arg1 & 0xFFFFFCF6; -} - -void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_DataLo = arg1; /* XXX */ -} - -void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_TagHi = arg1; /* XXX */ -} - -void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1) -{ - env->CP0_DataHi = arg1; /* XXX */ -} - -/* MIPS MT functions */ -target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.gpr[sel]; - else - return other->tcs[other_tc].gpr[sel]; -} - -target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.LO[sel]; - else - return other->tcs[other_tc].LO[sel]; -} - -target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.HI[sel]; - else - return other->tcs[other_tc].HI[sel]; -} - -target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.ACX[sel]; - else - return other->tcs[other_tc].ACX[sel]; -} - -target_ulong helper_mftdsp(CPUMIPSState *env) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - return other->active_tc.DSPControl; - else - return other->tcs[other_tc].DSPControl; -} - -void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.gpr[sel] = arg1; - else - other->tcs[other_tc].gpr[sel] = arg1; -} - -void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.LO[sel] = arg1; - else - other->tcs[other_tc].LO[sel] = arg1; -} - -void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.HI[sel] = arg1; - else - other->tcs[other_tc].HI[sel] = arg1; -} - -void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.ACX[sel] = arg1; - else - other->tcs[other_tc].ACX[sel] = arg1; -} - -void helper_mttdsp(CPUMIPSState *env, target_ulong arg1) -{ - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); - - if (other_tc == other->current_tc) - other->active_tc.DSPControl = arg1; - else - other->tcs[other_tc].DSPControl = arg1; -} - -/* MIPS MT functions */ -target_ulong helper_dmt(void) -{ - // TODO - return 0; -} - -target_ulong helper_emt(void) -{ - // TODO - return 0; -} - -target_ulong helper_dvpe(CPUMIPSState *env) -{ - //struct uc_struct *uc = env->uc; - //CPUState *other_cs = uc->cpu; - target_ulong prev = env->mvp->CP0_MVPControl; - - // TODO: #642 SMP groups - /* - CPU_FOREACH(other_cs) { - MIPSCPU *other_cpu = MIPS_CPU(uc, other_cs); - // Turn off all VPEs except the one executing the dvpe. - if (&other_cpu->env != env) { - other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); - mips_vpe_sleep(other_cpu); - } - } - */ - return prev; -} - -target_ulong helper_evpe(CPUMIPSState *env) -{ - //struct uc_struct *uc = env->uc; - //CPUState *other_cs = uc->cpu; - target_ulong prev = env->mvp->CP0_MVPControl; - - // TODO: #642 SMP groups - /* - CPU_FOREACH(other_cs) { - MIPSCPU *other_cpu = MIPS_CPU(uc, other_cs); - - if (&other_cpu->env != env - // If the VPE is WFI, don't disturb its sleep. - && !mips_vpe_is_wfi(other_cpu)) { - // Enable the VPE. - other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); - mips_vpe_wake(other_cpu); // And wake it up. - } - } - */ - return prev; -} -#endif /* !CONFIG_USER_ONLY */ - -void helper_fork(target_ulong arg1, target_ulong arg2) -{ - // arg1 = rt, arg2 = rs - // TODO: store to TC register -} - -target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) -{ - target_long arg1 = arg; - - if (arg1 < 0) { - /* No scheduling policy implemented. */ - if (arg1 != -2) { - if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && - env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) { - env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); - env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT; - helper_raise_exception(env, EXCP_THREAD); - } - } - } else if (arg1 == 0) { - if (0 /* TODO: TC underflow */) { - env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); - helper_raise_exception(env, EXCP_THREAD); - } else { - // TODO: Deallocate TC - } - } else if (arg1 > 0) { - /* Yield qualifier inputs not implemented. */ - env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); - env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT; - helper_raise_exception(env, EXCP_THREAD); - } - return env->CP0_YQMask; -} - -#ifndef CONFIG_USER_ONLY -/* TLB management */ -static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global) -{ - MIPSCPU *cpu = mips_env_get_cpu(env); - - /* Flush qemu's TLB and discard all shadowed entries. */ - tlb_flush(CPU(cpu), flush_global); - env->tlb->tlb_in_use = env->tlb->nb_tlb; -} - -static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first) -{ - /* Discard entries from env->tlb[first] onwards. */ - while (env->tlb->tlb_in_use > first) { - r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); - } -} - -static void r4k_fill_tlb(CPUMIPSState *env, int idx) -{ - r4k_tlb_t *tlb; - - /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */ - tlb = &env->tlb->mmu.r4k.tlb[idx]; - if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) { - tlb->EHINV = 1; - return; - } - tlb->EHINV = 0; - tlb->VPN = env->CP0_EntryHi & (((unsigned int)TARGET_PAGE_MASK) << 1); -#if defined(TARGET_MIPS64) - tlb->VPN &= env->SEGMask; -#endif - tlb->ASID = env->CP0_EntryHi & 0xFF; - tlb->PageMask = env->CP0_PageMask; - tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; - tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; - tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; - tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; - tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; - tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; - tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12; - tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; - tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; - tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; - tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; - tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; - tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12; -} - -void r4k_helper_tlbinv(CPUMIPSState *env) -{ - int idx; - r4k_tlb_t *tlb; - uint8_t ASID = env->CP0_EntryHi & 0xFF; - - for (idx = 0; idx < env->tlb->nb_tlb; idx++) { - tlb = &env->tlb->mmu.r4k.tlb[idx]; - if (!tlb->G && tlb->ASID == ASID) { - tlb->EHINV = 1; - } - } - cpu_mips_tlb_flush(env, 1); -} - -void r4k_helper_tlbinvf(CPUMIPSState *env) -{ - int idx; - - for (idx = 0; idx < env->tlb->nb_tlb; idx++) { - env->tlb->mmu.r4k.tlb[idx].EHINV = 1; - } - cpu_mips_tlb_flush(env, 1); -} - -void r4k_helper_tlbwi(CPUMIPSState *env) -{ - r4k_tlb_t *tlb; - int idx; - target_ulong VPN; - uint8_t ASID; - bool G, V0, D0, V1, D1; - - idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; - tlb = &env->tlb->mmu.r4k.tlb[idx]; - VPN = env->CP0_EntryHi & (((unsigned int)TARGET_PAGE_MASK) << 1); -#if defined(TARGET_MIPS64) - VPN &= env->SEGMask; -#endif - ASID = env->CP0_EntryHi & 0xff; - G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; - V0 = (env->CP0_EntryLo0 & 2) != 0; - D0 = (env->CP0_EntryLo0 & 4) != 0; - V1 = (env->CP0_EntryLo1 & 2) != 0; - D1 = (env->CP0_EntryLo1 & 4) != 0; - - /* Discard cached TLB entries, unless tlbwi is just upgrading access - permissions on the current entry. */ - if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G || - (tlb->V0 && !V0) || (tlb->D0 && !D0) || - (tlb->V1 && !V1) || (tlb->D1 && !D1)) { - r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); - } - - r4k_invalidate_tlb(env, idx, 0); - r4k_fill_tlb(env, idx); -} - -void r4k_helper_tlbwr(CPUMIPSState *env) -{ - int r = cpu_mips_get_random(env); - - r4k_invalidate_tlb(env, r, 1); - r4k_fill_tlb(env, r); -} - -void r4k_helper_tlbp(CPUMIPSState *env) -{ - r4k_tlb_t *tlb; - target_ulong mask; - target_ulong tag; - target_ulong VPN; - uint8_t ASID; - int i; - - ASID = env->CP0_EntryHi & 0xFF; - for (i = 0; i < env->tlb->nb_tlb; i++) { - tlb = &env->tlb->mmu.r4k.tlb[i]; - /* 1k pages are not supported. */ - mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); - tag = env->CP0_EntryHi & ~mask; - VPN = tlb->VPN & ~mask; -#if defined(TARGET_MIPS64) - tag &= env->SEGMask; -#endif - /* Check ASID, virtual page number & size */ - if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) { - /* TLB match */ - env->CP0_Index = i; - break; - } - } - if (i == env->tlb->nb_tlb) { - /* No match. Discard any shadow entries, if any of them match. */ - for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { - tlb = &env->tlb->mmu.r4k.tlb[i]; - /* 1k pages are not supported. */ - mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); - tag = env->CP0_EntryHi & ~mask; - VPN = tlb->VPN & ~mask; -#if defined(TARGET_MIPS64) - tag &= env->SEGMask; -#endif - /* Check ASID, virtual page number & size */ - if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) { - r4k_mips_tlb_flush_extra (env, i); - break; - } - } - - env->CP0_Index |= 0x80000000; - } -} - -void r4k_helper_tlbr(CPUMIPSState *env) -{ - r4k_tlb_t *tlb; - uint8_t ASID; - int idx; - - ASID = env->CP0_EntryHi & 0xFF; - idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; - tlb = &env->tlb->mmu.r4k.tlb[idx]; - - /* If this will change the current ASID, flush qemu's TLB. */ - if (ASID != tlb->ASID) - cpu_mips_tlb_flush (env, 1); - - r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); - - if (tlb->EHINV) { - env->CP0_EntryHi = 1 << CP0EnHi_EHINV; - env->CP0_PageMask = 0; - env->CP0_EntryLo0 = 0; - env->CP0_EntryLo1 = 0; - } else { - env->CP0_EntryHi = tlb->VPN | tlb->ASID; - env->CP0_PageMask = tlb->PageMask; - env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | - ((target_ulong)tlb->RI0 << CP0EnLo_RI) | - ((target_ulong)tlb->XI0 << CP0EnLo_XI) | - (tlb->C0 << 3) | (tlb->PFN[0] >> 6); - env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | - ((target_ulong)tlb->RI1 << CP0EnLo_RI) | - ((target_ulong)tlb->XI1 << CP0EnLo_XI) | - (tlb->C1 << 3) | (tlb->PFN[1] >> 6); - } -} - -void helper_tlbwi(CPUMIPSState *env) -{ - env->tlb->helper_tlbwi(env); -} - -void helper_tlbwr(CPUMIPSState *env) -{ - env->tlb->helper_tlbwr(env); -} - -void helper_tlbp(CPUMIPSState *env) -{ - env->tlb->helper_tlbp(env); -} - -void helper_tlbr(CPUMIPSState *env) -{ - env->tlb->helper_tlbr(env); -} - -void helper_tlbinv(CPUMIPSState *env) -{ - env->tlb->helper_tlbinv(env); -} - -void helper_tlbinvf(CPUMIPSState *env) -{ - env->tlb->helper_tlbinvf(env); -} - -/* Specials */ -target_ulong helper_di(CPUMIPSState *env) -{ - target_ulong t0 = env->CP0_Status; - - env->CP0_Status = t0 & ~(1 << CP0St_IE); - return t0; -} - -target_ulong helper_ei(CPUMIPSState *env) -{ - target_ulong t0 = env->CP0_Status; - - env->CP0_Status = t0 | (1 << CP0St_IE); - return t0; -} - -static void debug_pre_eret(CPUMIPSState *env) -{ - if (qemu_loglevel_mask(CPU_LOG_EXEC)) { - qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, - env->active_tc.PC, env->CP0_EPC); - if (env->CP0_Status & (1 << CP0St_ERL)) - qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); - if (env->hflags & MIPS_HFLAG_DM) - qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); - qemu_log("\n"); - } -} - -static void debug_post_eret(CPUMIPSState *env) -{ - MIPSCPU *cpu = mips_env_get_cpu(env); - - if (qemu_loglevel_mask(CPU_LOG_EXEC)) { - qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, - env->active_tc.PC, env->CP0_EPC); - if (env->CP0_Status & (1 << CP0St_ERL)) - qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); - if (env->hflags & MIPS_HFLAG_DM) - qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); - switch (env->hflags & MIPS_HFLAG_KSU) { - case MIPS_HFLAG_UM: qemu_log(", UM\n"); break; - case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; - case MIPS_HFLAG_KM: qemu_log("\n"); break; - default: - cpu_abort(CPU(cpu), "Invalid MMU mode!\n"); - break; - } - } -} - -static void set_pc(CPUMIPSState *env, target_ulong error_pc) -{ - env->active_tc.PC = error_pc & ~(target_ulong)1; - if (error_pc & 1) { - env->hflags |= MIPS_HFLAG_M16; - } else { - env->hflags &= ~(MIPS_HFLAG_M16); - } -} - -void helper_eret(CPUMIPSState *env) -{ - debug_pre_eret(env); - if (env->CP0_Status & (1 << CP0St_ERL)) { - set_pc(env, env->CP0_ErrorEPC); - env->CP0_Status &= ~(1 << CP0St_ERL); - } else { - set_pc(env, env->CP0_EPC); - env->CP0_Status &= ~(1 << CP0St_EXL); - } - compute_hflags(env); - debug_post_eret(env); - env->lladdr = 1; -} - -void helper_deret(CPUMIPSState *env) -{ - debug_pre_eret(env); - set_pc(env, env->CP0_DEPC); - - env->hflags &= MIPS_HFLAG_DM; - compute_hflags(env); - debug_post_eret(env); - env->lladdr = 1; -} -#endif /* !CONFIG_USER_ONLY */ - -target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) -{ - if ((env->hflags & MIPS_HFLAG_CP0) || - (env->CP0_HWREna & (1 << 0))) - return env->CP0_EBase & 0x3ff; - else - helper_raise_exception(env, EXCP_RI); - - return 0; -} - -target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) -{ - if ((env->hflags & MIPS_HFLAG_CP0) || - (env->CP0_HWREna & (1 << 1))) - return env->SYNCI_Step; - else - helper_raise_exception(env, EXCP_RI); - - return 0; -} - -target_ulong helper_rdhwr_cc(CPUMIPSState *env) -{ - if ((env->hflags & MIPS_HFLAG_CP0) || - (env->CP0_HWREna & (1 << 2))) - return env->CP0_Count; - else - helper_raise_exception(env, EXCP_RI); - - return 0; -} - -target_ulong helper_rdhwr_ccres(CPUMIPSState *env) -{ - if ((env->hflags & MIPS_HFLAG_CP0) || - (env->CP0_HWREna & (1 << 3))) - return env->CCRes; - else - helper_raise_exception(env, EXCP_RI); - - return 0; -} - -void helper_pmon(CPUMIPSState *env, int function) -{ - function /= 2; - switch (function) { - case 2: /* TODO: char inbyte(int waitflag); */ - if (env->active_tc.gpr[4] == 0) - env->active_tc.gpr[2] = -1; - /* Fall through */ - case 11: /* TODO: char inbyte (void); */ - env->active_tc.gpr[2] = -1; - break; - case 3: - case 12: - printf("%c", (char)(env->active_tc.gpr[4] & 0xFF)); - break; - case 17: - break; - case 158: - { - unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4]; - printf("%s", fmt); - } - break; - } -} - -void helper_wait(CPUMIPSState *env) -{ - CPUState *cs = CPU(mips_env_get_cpu(env)); - - cs->halted = 1; - cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); - helper_raise_exception(env, EXCP_HLT); -} - -#if !defined(CONFIG_USER_ONLY) - -void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - int access_type, int is_user, - uintptr_t retaddr) -{ - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - int error_code = 0; - int excp; - - env->CP0_BadVAddr = addr; - - if (access_type == MMU_DATA_STORE) { - excp = EXCP_AdES; - } else { - excp = EXCP_AdEL; - if (access_type == MMU_INST_FETCH) { - error_code |= EXCP_INST_NOTAVAIL; - } - } - - do_raise_exception_err(env, excp, error_code, retaddr); -} - -void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr) -{ - int ret; - - ret = mips_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); - if (ret) { - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - - do_raise_exception_err(env, cs->exception_index, - env->error_code, retaddr); - } -} - -void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr, - bool is_write, bool is_exec, int unused, - unsigned size) -{ - MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); - CPUMIPSState *env = &cpu->env; - - /* - * Raising an exception with KVM enabled will crash because it won't be from - * the main execution loop so the longjmp won't have a matching setjmp. - * Until we can trigger a bus error exception through KVM lets just ignore - * the access. - */ - if (is_exec) { - helper_raise_exception(env, EXCP_IBE); - } else { - helper_raise_exception(env, EXCP_DBE); - } -} -#endif /* !CONFIG_USER_ONLY */ - -/* Complex FPU operations which may need stack space. */ - -#define FLOAT_TWO32 make_float32(1 << 30) -#define FLOAT_TWO64 make_float64(1ULL << 62) -#define FP_TO_INT32_OVERFLOW 0x7fffffff -#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL - -/* convert MIPS rounding mode in FCR31 to IEEE library */ -unsigned int ieee_rm[] = { - float_round_nearest_even, - float_round_to_zero, - float_round_up, - float_round_down -}; - -static inline void restore_rounding_mode(CPUMIPSState *env) -{ - set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], - &env->active_fpu.fp_status); -} - -static inline void restore_flush_mode(CPUMIPSState *env) -{ - set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, - &env->active_fpu.fp_status); -} - -target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg) -{ - target_ulong arg1 = 0; - - switch (reg) { - case 0: - arg1 = (int32_t)env->active_fpu.fcr0; - break; - case 1: - /* UFR Support - Read Status FR */ - if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) { - if (env->CP0_Config5 & (1 << CP0C5_UFR)) { - arg1 = (int32_t) - ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR); - } else { - helper_raise_exception(env, EXCP_RI); - } - } - break; - case 25: - arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1); - break; - case 26: - arg1 = env->active_fpu.fcr31 & 0x0003f07c; - break; - case 28: - arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4); - break; - default: - arg1 = (int32_t)env->active_fpu.fcr31; - break; - } - - return arg1; -} - -void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt) -{ - switch (fs) { - case 1: - /* UFR Alias - Reset Status FR */ - if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { - return; - } - if (env->CP0_Config5 & (1 << CP0C5_UFR)) { - env->CP0_Status &= ~(1 << CP0St_FR); - compute_hflags(env); - } else { - helper_raise_exception(env, EXCP_RI); - } - break; - case 4: - /* UNFR Alias - Set Status FR */ - if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { - return; - } - if (env->CP0_Config5 & (1 << CP0C5_UFR)) { - env->CP0_Status |= (1 << CP0St_FR); - compute_hflags(env); - } else { - helper_raise_exception(env, EXCP_RI); - } - break; - case 25: - if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) { - return; - } - env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) | - ((arg1 & 0x1) << 23); - break; - case 26: - if (arg1 & 0x007c0000) - return; - env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c); - break; - case 28: - if (arg1 & 0x007c0000) - return; - env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) | - ((arg1 & 0x4) << 22); - break; - case 31: - if (env->insn_flags & ISA_MIPS32R6) { - uint32_t mask = 0xfefc0000; - env->active_fpu.fcr31 = (arg1 & ~mask) | - (env->active_fpu.fcr31 & mask); - } else if (!(arg1 & 0x007c0000)) { - env->active_fpu.fcr31 = arg1; - } - break; - default: - return; - } - /* set rounding mode */ - restore_rounding_mode(env); - /* set flush-to-zero mode */ - restore_flush_mode(env); - set_float_exception_flags(0, &env->active_fpu.fp_status); - if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31)) - do_raise_exception(env, EXCP_FPE, GETPC()); -} - -int ieee_ex_to_mips(int xcpt) -{ - int ret = 0; - if (xcpt) { - if (xcpt & float_flag_invalid) { - ret |= FP_INVALID; - } - if (xcpt & float_flag_overflow) { - ret |= FP_OVERFLOW; - } - if (xcpt & float_flag_underflow) { - ret |= FP_UNDERFLOW; - } - if (xcpt & float_flag_divbyzero) { - ret |= FP_DIV0; - } - if (xcpt & float_flag_inexact) { - ret |= FP_INEXACT; - } - } - return ret; -} - -static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc) -{ - int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status)); - - SET_FP_CAUSE(env->active_fpu.fcr31, tmp); - - if (tmp) { - set_float_exception_flags(0, &env->active_fpu.fp_status); - - if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) { - do_raise_exception(env, EXCP_FPE, pc); - } else { - UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp); - } - } -} - -/* Float support. - Single precition routines have a "s" suffix, double precision a - "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps", - paired single lower "pl", paired single upper "pu". */ - -/* unary operations, modifying fp status */ -uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0) -{ - fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt0; -} - -uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0) -{ - fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst0; -} - -uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0) -{ - uint64_t fdt2; - - fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0) -{ - uint64_t fdt2; - - fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0) -{ - uint64_t fdt2; - - fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t dt2; - - dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0) -{ - uint64_t dt2; - - dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0) -{ - uint32_t fst2; - uint32_t fsth2; - - fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); - fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return ((uint64_t)fsth2 << 32) | fst2; -} - -uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t wt2; - uint32_t wth2; - int excp, excph; - - wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); - excp = get_float_exception_flags(&env->active_fpu.fp_status); - if (excp & (float_flag_overflow | float_flag_invalid)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - - set_float_exception_flags(0, &env->active_fpu.fp_status); - wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status); - excph = get_float_exception_flags(&env->active_fpu.fp_status); - if (excph & (float_flag_overflow | float_flag_invalid)) { - wth2 = FP_TO_INT32_OVERFLOW; - } - - set_float_exception_flags(excp | excph, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - - return ((uint64_t)wth2 << 32) | wt2; -} - -uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t fst2; - - fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst2; -} - -uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0) -{ - uint32_t fst2; - - fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst2; -} - -uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0) -{ - uint32_t fst2; - - fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst2; -} - -uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0) -{ - uint32_t wt2; - - wt2 = wt0; - update_fcr31(env, GETPC()); - return wt2; -} - -uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0) -{ - uint32_t wt2; - - wt2 = wth0; - update_fcr31(env, GETPC()); - return wt2; -} - -uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t wt2; - - wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - return wt2; -} - -uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t wt2; - - wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t dt2; - - set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); - dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0) -{ - uint64_t dt2; - - set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); - dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t wt2; - - set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); - wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t wt2; - - set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); - wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t dt2; - - dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0) -{ - uint64_t dt2; - - dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t wt2; - - wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t wt2; - - wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t dt2; - - set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); - dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0) -{ - uint64_t dt2; - - set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); - dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t wt2; - - set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); - wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t wt2; - - set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); - wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t dt2; - - set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); - dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0) -{ - uint64_t dt2; - - set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); - dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - dt2 = FP_TO_INT64_OVERFLOW; - } - update_fcr31(env, GETPC()); - return dt2; -} - -uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t wt2; - - set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); - wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t wt2; - - set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); - wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); - restore_rounding_mode(env); - if (get_float_exception_flags(&env->active_fpu.fp_status) - & (float_flag_invalid | float_flag_overflow)) { - wt2 = FP_TO_INT32_OVERFLOW; - } - update_fcr31(env, GETPC()); - return wt2; -} - -/* unary operations, not modifying fp status */ -#define FLOAT_UNOP(name) \ -uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \ -{ \ - return float64_ ## name(fdt0); \ -} \ -uint32_t helper_float_ ## name ## _s(uint32_t fst0) \ -{ \ - return float32_ ## name(fst0); \ -} \ -uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \ -{ \ - uint32_t wt0; \ - uint32_t wth0; \ - \ - wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \ - wth0 = float32_ ## name(fdt0 >> 32); \ - return ((uint64_t)wth0 << 32) | wt0; \ -} -FLOAT_UNOP(abs) -FLOAT_UNOP(chs) -#undef FLOAT_UNOP - -#define FLOAT_FMADDSUB(name, bits, muladd_arg) \ -uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \ - uint ## bits ## _t fs, \ - uint ## bits ## _t ft, \ - uint ## bits ## _t fd) \ -{ \ - uint ## bits ## _t fdret; \ - \ - fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \ - &env->active_fpu.fp_status); \ - update_fcr31(env, GETPC()); \ - return fdret; \ -} - -FLOAT_FMADDSUB(maddf_s, 32, 0) -FLOAT_FMADDSUB(maddf_d, 64, 0) -FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product) -FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product) -#undef FLOAT_FMADDSUB - -#define FLOAT_MINMAX(name, bits, minmaxfunc) \ -uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \ - uint ## bits ## _t fs, \ - uint ## bits ## _t ft) \ -{ \ - uint ## bits ## _t fdret; \ - \ - fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \ - &env->active_fpu.fp_status); \ - update_fcr31(env, GETPC()); \ - return fdret; \ -} - -FLOAT_MINMAX(max_s, 32, maxnum) -FLOAT_MINMAX(max_d, 64, maxnum) -FLOAT_MINMAX(maxa_s, 32, maxnummag) -FLOAT_MINMAX(maxa_d, 64, maxnummag) - -FLOAT_MINMAX(min_s, 32, minnum) -FLOAT_MINMAX(min_d, 64, minnum) -FLOAT_MINMAX(mina_s, 32, minnummag) -FLOAT_MINMAX(mina_d, 64, minnummag) -#undef FLOAT_MINMAX - -#define FLOAT_RINT(name, bits) \ -uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \ - uint ## bits ## _t fs) \ -{ \ - uint ## bits ## _t fdret; \ - \ - fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \ - update_fcr31(env, GETPC()); \ - return fdret; \ -} - -FLOAT_RINT(rint_s, 32) -FLOAT_RINT(rint_d, 64) -#undef FLOAT_RINT - -#define FLOAT_CLASS_SIGNALING_NAN 0x001 -#define FLOAT_CLASS_QUIET_NAN 0x002 -#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 -#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 -#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 -#define FLOAT_CLASS_NEGATIVE_ZERO 0x020 -#define FLOAT_CLASS_POSITIVE_INFINITY 0x040 -#define FLOAT_CLASS_POSITIVE_NORMAL 0x080 -#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 -#define FLOAT_CLASS_POSITIVE_ZERO 0x200 - -#define FLOAT_CLASS(name, bits) \ -uint ## bits ## _t helper_float_ ## name (uint ## bits ## _t arg) \ -{ \ - if (float ## bits ## _is_signaling_nan(arg)) { \ - return FLOAT_CLASS_SIGNALING_NAN; \ - } else if (float ## bits ## _is_quiet_nan(arg)) { \ - return FLOAT_CLASS_QUIET_NAN; \ - } else if (float ## bits ## _is_neg(arg)) { \ - if (float ## bits ## _is_infinity(arg)) { \ - return FLOAT_CLASS_NEGATIVE_INFINITY; \ - } else if (float ## bits ## _is_zero(arg)) { \ - return FLOAT_CLASS_NEGATIVE_ZERO; \ - } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ - return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \ - } else { \ - return FLOAT_CLASS_NEGATIVE_NORMAL; \ - } \ - } else { \ - if (float ## bits ## _is_infinity(arg)) { \ - return FLOAT_CLASS_POSITIVE_INFINITY; \ - } else if (float ## bits ## _is_zero(arg)) { \ - return FLOAT_CLASS_POSITIVE_ZERO; \ - } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ - return FLOAT_CLASS_POSITIVE_SUBNORMAL; \ - } else { \ - return FLOAT_CLASS_POSITIVE_NORMAL; \ - } \ - } \ -} - -FLOAT_CLASS(class_s, 32) -FLOAT_CLASS(class_d, 64) -#undef FLOAT_CLASS - -/* MIPS specific unary operations */ -uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t fdt2; - - fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t fst2; - - fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst2; -} - -uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t fdt2; - - fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); - fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t fst2; - - fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); - fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst2; -} - -uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t fdt2; - - fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t fst2; - - fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst2; -} - -uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t fst2; - uint32_t fsth2; - - fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); - fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return ((uint64_t)fsth2 << 32) | fst2; -} - -uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0) -{ - uint64_t fdt2; - - fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); - fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0) -{ - uint32_t fst2; - - fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); - fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return fst2; -} - -uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0) -{ - uint32_t fst2; - uint32_t fsth2; - - fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); - fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status); - fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); - fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return ((uint64_t)fsth2 << 32) | fst2; -} - -#define FLOAT_OP(name, p) void helper_float_##name##_##p(CPUMIPSState *env) - -/* binary operations */ -#define FLOAT_BINOP(name) \ -uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ - uint64_t fdt0, uint64_t fdt1) \ -{ \ - uint64_t dt2; \ - \ - dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \ - update_fcr31(env, GETPC()); \ - return dt2; \ -} \ - \ -uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ - uint32_t fst0, uint32_t fst1) \ -{ \ - uint32_t wt2; \ - \ - wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \ - update_fcr31(env, GETPC()); \ - return wt2; \ -} \ - \ -uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ - uint64_t fdt0, \ - uint64_t fdt1) \ -{ \ - uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ - uint32_t fsth0 = fdt0 >> 32; \ - uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ - uint32_t fsth1 = fdt1 >> 32; \ - uint32_t wt2; \ - uint32_t wth2; \ - \ - wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \ - wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \ - update_fcr31(env, GETPC()); \ - return ((uint64_t)wth2 << 32) | wt2; \ -} - -FLOAT_BINOP(add) -FLOAT_BINOP(sub) -FLOAT_BINOP(mul) -FLOAT_BINOP(div) -#undef FLOAT_BINOP - -#define UNFUSED_FMA(prefix, a, b, c, flags) \ -{ \ - a = prefix##_mul(a, b, &env->active_fpu.fp_status); \ - if ((flags) & float_muladd_negate_c) { \ - a = prefix##_sub(a, c, &env->active_fpu.fp_status); \ - } else { \ - a = prefix##_add(a, c, &env->active_fpu.fp_status); \ - } \ - if ((flags) & float_muladd_negate_result) { \ - a = prefix##_chs(a); \ - } \ -} - -/* FMA based operations */ -#define FLOAT_FMA(name, type) \ -uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ - uint64_t fdt0, uint64_t fdt1, \ - uint64_t fdt2) \ -{ \ - UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \ - update_fcr31(env, GETPC()); \ - return fdt0; \ -} \ - \ -uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ - uint32_t fst0, uint32_t fst1, \ - uint32_t fst2) \ -{ \ - UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ - update_fcr31(env, GETPC()); \ - return fst0; \ -} \ - \ -uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ - uint64_t fdt0, uint64_t fdt1, \ - uint64_t fdt2) \ -{ \ - uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ - uint32_t fsth0 = fdt0 >> 32; \ - uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ - uint32_t fsth1 = fdt1 >> 32; \ - uint32_t fst2 = fdt2 & 0XFFFFFFFF; \ - uint32_t fsth2 = fdt2 >> 32; \ - \ - UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ - UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \ - update_fcr31(env, GETPC()); \ - return ((uint64_t)fsth0 << 32) | fst0; \ -} -FLOAT_FMA(madd, 0) -FLOAT_FMA(msub, float_muladd_negate_c) -FLOAT_FMA(nmadd, float_muladd_negate_result) -FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c) -#undef FLOAT_FMA - -/* MIPS specific binary operations */ -uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) -{ - fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); - fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status)); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) -{ - fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); - fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status)); - update_fcr31(env, GETPC()); - return fst2; -} - -uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) -{ - uint32_t fst0 = fdt0 & 0XFFFFFFFF; - uint32_t fsth0 = fdt0 >> 32; - uint32_t fst2 = fdt2 & 0XFFFFFFFF; - uint32_t fsth2 = fdt2 >> 32; - - fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); - fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); - fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status)); - fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status)); - update_fcr31(env, GETPC()); - return ((uint64_t)fsth2 << 32) | fst2; -} - -uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) -{ - fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); - fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status); - fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status)); - update_fcr31(env, GETPC()); - return fdt2; -} - -uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) -{ - fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); - fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); - fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status)); - update_fcr31(env, GETPC()); - return fst2; -} - -uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) -{ - uint32_t fst0 = fdt0 & 0XFFFFFFFF; - uint32_t fsth0 = fdt0 >> 32; - uint32_t fst2 = fdt2 & 0XFFFFFFFF; - uint32_t fsth2 = fdt2 >> 32; - - fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); - fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); - fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); - fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status); - fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status)); - fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status)); - update_fcr31(env, GETPC()); - return ((uint64_t)fsth2 << 32) | fst2; -} - -uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) -{ - uint32_t fst0 = fdt0 & 0XFFFFFFFF; - uint32_t fsth0 = fdt0 >> 32; - uint32_t fst1 = fdt1 & 0XFFFFFFFF; - uint32_t fsth1 = fdt1 >> 32; - uint32_t fst2; - uint32_t fsth2; - - fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status); - fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return ((uint64_t)fsth2 << 32) | fst2; -} - -uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) -{ - uint32_t fst0 = fdt0 & 0XFFFFFFFF; - uint32_t fsth0 = fdt0 >> 32; - uint32_t fst1 = fdt1 & 0XFFFFFFFF; - uint32_t fsth1 = fdt1 >> 32; - uint32_t fst2; - uint32_t fsth2; - - fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status); - fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status); - update_fcr31(env, GETPC()); - return ((uint64_t)fsth2 << 32) | fst2; -} - -/* compare operations */ -#define FOP_COND_D(op, cond) \ -void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ - uint64_t fdt1, int cc) \ -{ \ - int c; \ - c = cond; \ - update_fcr31(env, GETPC()); \ - if (c) \ - SET_FP_COND(cc, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc, env->active_fpu); \ -} \ -void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ - uint64_t fdt1, int cc) \ -{ \ - int c; \ - fdt0 = float64_abs(fdt0); \ - fdt1 = float64_abs(fdt1); \ - c = cond; \ - update_fcr31(env, GETPC()); \ - if (c) \ - SET_FP_COND(cc, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc, env->active_fpu); \ -} - -/* NOTE: the comma operator will make "cond" to eval to false, - * but float64_unordered_quiet() is still called. */ -FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0)) -FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)) -FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) -/* NOTE: the comma operator will make "cond" to eval to false, - * but float64_unordered() is still called. */ -FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0)) -FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)) -FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status)) -FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)) - -#define FOP_COND_S(op, cond) \ -void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ - uint32_t fst1, int cc) \ -{ \ - int c; \ - c = cond; \ - update_fcr31(env, GETPC()); \ - if (c) \ - SET_FP_COND(cc, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc, env->active_fpu); \ -} \ -void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ - uint32_t fst1, int cc) \ -{ \ - int c; \ - fst0 = float32_abs(fst0); \ - fst1 = float32_abs(fst1); \ - c = cond; \ - update_fcr31(env, GETPC()); \ - if (c) \ - SET_FP_COND(cc, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc, env->active_fpu); \ -} - -/* NOTE: the comma operator will make "cond" to eval to false, - * but float32_unordered_quiet() is still called. */ -FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0)) -FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)) -FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)) -/* NOTE: the comma operator will make "cond" to eval to false, - * but float32_unordered() is still called. */ -FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0)) -FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status)) -FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status)) -FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status)) - -#define FOP_COND_PS(op, condl, condh) \ -void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ - uint64_t fdt1, int cc) \ -{ \ - uint32_t fst0, fsth0, fst1, fsth1; \ - int ch, cl; \ - fst0 = fdt0 & 0XFFFFFFFF; \ - fsth0 = fdt0 >> 32; \ - fst1 = fdt1 & 0XFFFFFFFF; \ - fsth1 = fdt1 >> 32; \ - cl = condl; \ - ch = condh; \ - update_fcr31(env, GETPC()); \ - if (cl) \ - SET_FP_COND(cc, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc, env->active_fpu); \ - if (ch) \ - SET_FP_COND(cc + 1, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc + 1, env->active_fpu); \ -} \ -void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ - uint64_t fdt1, int cc) \ -{ \ - uint32_t fst0, fsth0, fst1, fsth1; \ - int ch, cl; \ - fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \ - fsth0 = float32_abs(fdt0 >> 32); \ - fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \ - fsth1 = float32_abs(fdt1 >> 32); \ - cl = condl; \ - ch = condh; \ - update_fcr31(env, GETPC()); \ - if (cl) \ - SET_FP_COND(cc, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc, env->active_fpu); \ - if (ch) \ - SET_FP_COND(cc + 1, env->active_fpu); \ - else \ - CLEAR_FP_COND(cc + 1, env->active_fpu); \ -} - -/* NOTE: the comma operator will make "cond" to eval to false, - * but float32_unordered_quiet() is still called. */ -FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0), - (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0)) -FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), - float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)) -FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status), - float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status), - float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status), - float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status), - float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status), - float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status), - float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) -/* NOTE: the comma operator will make "cond" to eval to false, - * but float32_unordered() is still called. */ -FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0), - (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0)) -FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status), - float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)) -FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status), - float32_eq(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status), - float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status), - float32_lt(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status), - float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status), - float32_le(fsth0, fsth1, &env->active_fpu.fp_status)) -FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status), - float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status)) - -/* R6 compare operations */ -#define FOP_CONDN_D(op, cond) \ -uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState * env, uint64_t fdt0, \ - uint64_t fdt1) \ -{ \ - uint64_t c; \ - c = cond; \ - update_fcr31(env, GETPC()); \ - if (c) { \ - return -1; \ - } else { \ - return 0; \ - } \ -} - -/* NOTE: the comma operator will make "cond" to eval to false, - * but float64_unordered_quiet() is still called. */ -FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0)) -FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))) -FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -/* NOTE: the comma operator will make "cond" to eval to false, - * but float64_unordered() is still called. */ -FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0)) -FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))) -FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) -FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) - || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) - -#define FOP_CONDN_S(op, cond) \ -uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState * env, uint32_t fst0, \ - uint32_t fst1) \ -{ \ - uint64_t c; \ - c = cond; \ - update_fcr31(env, GETPC()); \ - if (c) { \ - return -1; \ - } else { \ - return 0; \ - } \ -} - -/* NOTE: the comma operator will make "cond" to eval to false, - * but float32_unordered_quiet() is still called. */ -FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0)) -FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))) -FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) - || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) - || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) -/* NOTE: the comma operator will make "cond" to eval to false, - * but float32_unordered() is still called. */ -FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0)) -FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status))) -FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) - || float32_eq(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) - || float32_le(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status) - || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status) - || float32_le(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) -FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) - || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) - -/* MSA */ -/* Data format min and max values */ -#define DF_BITS(df) (1 << ((df) + 3)) - -/* Element-by-element access macros */ -#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) - -void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, - int32_t s10) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); - int i; - - switch (df) { - case DF_BYTE: - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - pwd->b[i] = do_lbu(env, addr + (i << DF_BYTE), - env->hflags & MIPS_HFLAG_KSU); - } - break; - case DF_HALF: - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - pwd->h[i] = do_lhu(env, addr + (i << DF_HALF), - env->hflags & MIPS_HFLAG_KSU); - } - break; - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - pwd->w[i] = do_lw(env, addr + (i << DF_WORD), - env->hflags & MIPS_HFLAG_KSU); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - pwd->d[i] = do_ld(env, addr + (i << DF_DOUBLE), - env->hflags & MIPS_HFLAG_KSU); - } - break; - } -} - -void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, - int32_t s10) -{ - wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); - int i; - - switch (df) { - case DF_BYTE: - for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - do_sb(env, addr + (i << DF_BYTE), pwd->b[i], - env->hflags & MIPS_HFLAG_KSU); - } - break; - case DF_HALF: - for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - do_sh(env, addr + (i << DF_HALF), pwd->h[i], - env->hflags & MIPS_HFLAG_KSU); - } - break; - case DF_WORD: - for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - do_sw(env, addr + (i << DF_WORD), pwd->w[i], - env->hflags & MIPS_HFLAG_KSU); - } - break; - case DF_DOUBLE: - for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - do_sd(env, addr + (i << DF_DOUBLE), pwd->d[i], - env->hflags & MIPS_HFLAG_KSU); - } - break; - } -} diff --git a/qemu/target-mips/translate.c b/qemu/target-mips/translate.c deleted file mode 100644 index 9a3ac9e8..00000000 --- a/qemu/target-mips/translate.c +++ /dev/null @@ -1,19748 +0,0 @@ -/* - * MIPS32 emulation for qemu: main translation routines. - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * Copyright (c) 2006 Marius Groeger (FPU operations) - * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) - * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) - * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support) - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "tcg-op.h" -#include "exec/cpu_ldst.h" - -#include "exec/helper-proto.h" -#include "exec/helper-gen.h" - -#include "exec/gen-icount.h" - -#define MIPS_DEBUG_DISAS 0 -//#define MIPS_DEBUG_SIGN_EXTENSIONS - -/* MIPS major opcodes */ -#define MASK_OP_MAJOR(op) (op & (((uint32_t)0x3F) << 26)) - -enum { - /* indirect opcode tables */ - OPC_SPECIAL = (0x00 << 26), - OPC_REGIMM = (0x01 << 26), - OPC_CP0 = (0x10 << 26), - OPC_CP1 = (0x11 << 26), - OPC_CP2 = (0x12 << 26), - OPC_CP3 = (0x13 << 26), - OPC_SPECIAL2 = (0x1C << 26), - OPC_SPECIAL3 = (0x1F << 26), - /* arithmetic with immediate */ - OPC_ADDI = (0x08 << 26), - OPC_ADDIU = (0x09 << 26), - OPC_SLTI = (0x0A << 26), - OPC_SLTIU = (0x0B << 26), - /* logic with immediate */ - OPC_ANDI = (0x0C << 26), - OPC_ORI = (0x0D << 26), - OPC_XORI = (0x0E << 26), - OPC_LUI = (0x0F << 26), - /* arithmetic with immediate */ - OPC_DADDI = (0x18 << 26), - OPC_DADDIU = (0x19 << 26), - /* Jump and branches */ - OPC_J = (0x02 << 26), - OPC_JAL = (0x03 << 26), - OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */ - OPC_BEQL = (0x14 << 26), - OPC_BNE = (0x05 << 26), - OPC_BNEL = (0x15 << 26), - OPC_BLEZ = (0x06 << 26), - OPC_BLEZL = (0x16 << 26), - OPC_BGTZ = (0x07 << 26), - OPC_BGTZL = (0x17 << 26), - OPC_JALX = (0x1D << 26), - OPC_DAUI = (0x1D << 26), - /* Load and stores */ - OPC_LDL = (0x1A << 26), - OPC_LDR = (0x1B << 26), - OPC_LB = (0x20 << 26), - OPC_LH = (0x21 << 26), - OPC_LWL = (0x22 << 26), - OPC_LW = (0x23 << 26), - OPC_LWPC = OPC_LW | 0x5, - OPC_LBU = (0x24 << 26), - OPC_LHU = (0x25 << 26), - OPC_LWR = (0x26 << 26), - OPC_LWU = (0x27 << 26), - OPC_SB = (0x28 << 26), - OPC_SH = (0x29 << 26), - OPC_SWL = (0x2A << 26), - OPC_SW = (0x2B << 26), - OPC_SDL = (0x2C << 26), - OPC_SDR = (0x2D << 26), - OPC_SWR = (0x2E << 26), - OPC_LL = (0x30 << 26), - OPC_LLD = (0x34 << 26), - OPC_LD = (0x37 << 26), - OPC_LDPC = OPC_LD | 0x5, - OPC_SC = (0x38 << 26), - OPC_SCD = (0x3C << 26), - OPC_SD = (0x3F << 26), - /* Floating point load/store */ - OPC_LWC1 = (0x31 << 26), - OPC_LWC2 = (0x32 << 26), - OPC_LDC1 = (0x35 << 26), - OPC_LDC2 = (0x36 << 26), - OPC_SWC1 = (0x39 << 26), - OPC_SWC2 = (0x3A << 26), - OPC_SDC1 = (0x3D << 26), - OPC_SDC2 = (0x3E << 26), - /* Compact Branches */ - OPC_BLEZALC = (0x06 << 26), - OPC_BGEZALC = (0x06 << 26), - OPC_BGEUC = (0x06 << 26), - OPC_BGTZALC = (0x07 << 26), - OPC_BLTZALC = (0x07 << 26), - OPC_BLTUC = (0x07 << 26), - OPC_BOVC = (0x08 << 26), - OPC_BEQZALC = (0x08 << 26), - OPC_BEQC = (0x08 << 26), - OPC_BLEZC = (0x16 << 26), - OPC_BGEZC = (0x16 << 26), - OPC_BGEC = (0x16 << 26), - OPC_BGTZC = (0x17 << 26), - OPC_BLTZC = (0x17 << 26), - OPC_BLTC = (0x17 << 26), - OPC_BNVC = (0x18 << 26), - OPC_BNEZALC = (0x18 << 26), - OPC_BNEC = (0x18 << 26), - OPC_BC = (0x32 << 26), - OPC_BEQZC = (0x36 << 26), - OPC_JIC = (0x36 << 26), - OPC_BALC = (0x3A << 26), - OPC_BNEZC = (0x3E << 26), - OPC_JIALC = (0x3E << 26), - /* MDMX ASE specific */ - OPC_MDMX = (0x1E << 26), - /* MSA ASE, same as MDMX */ - OPC_MSA = OPC_MDMX, - /* Cache and prefetch */ - OPC_CACHE = (0x2F << 26), - OPC_PREF = (0x33 << 26), - /* PC-relative address computation / loads */ - OPC_PCREL = (0x3B << 26), -}; - -/* PC-relative address computation / loads */ -#define MASK_OPC_PCREL_TOP2BITS(op) (MASK_OP_MAJOR(op) | (op & (3 << 19))) -#define MASK_OPC_PCREL_TOP5BITS(op) (MASK_OP_MAJOR(op) | (op & (0x1f << 16))) -enum { - /* Instructions determined by bits 19 and 20 */ - OPC_ADDIUPC = OPC_PCREL | (0 << 19), - R6_OPC_LWPC = OPC_PCREL | (1 << 19), - OPC_LWUPC = OPC_PCREL | (2 << 19), - - /* Instructions determined by bits 16 ... 20 */ - OPC_AUIPC = OPC_PCREL | (0x1e << 16), - OPC_ALUIPC = OPC_PCREL | (0x1f << 16), - - /* Other */ - R6_OPC_LDPC = OPC_PCREL | (6 << 18), -}; - -/* MIPS special opcodes */ -#define MASK_SPECIAL(op) MASK_OP_MAJOR(op) | (op & 0x3F) - -enum { - /* Shifts */ - OPC_SLL = 0x00 | OPC_SPECIAL, - /* NOP is SLL r0, r0, 0 */ - /* SSNOP is SLL r0, r0, 1 */ - /* EHB is SLL r0, r0, 3 */ - OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */ - OPC_ROTR = OPC_SRL | (1 << 21), - OPC_SRA = 0x03 | OPC_SPECIAL, - OPC_SLLV = 0x04 | OPC_SPECIAL, - OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */ - OPC_ROTRV = OPC_SRLV | (1 << 6), - OPC_SRAV = 0x07 | OPC_SPECIAL, - OPC_DSLLV = 0x14 | OPC_SPECIAL, - OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */ - OPC_DROTRV = OPC_DSRLV | (1 << 6), - OPC_DSRAV = 0x17 | OPC_SPECIAL, - OPC_DSLL = 0x38 | OPC_SPECIAL, - OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */ - OPC_DROTR = OPC_DSRL | (1 << 21), - OPC_DSRA = 0x3B | OPC_SPECIAL, - OPC_DSLL32 = 0x3C | OPC_SPECIAL, - OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */ - OPC_DROTR32 = OPC_DSRL32 | (1 << 21), - OPC_DSRA32 = 0x3F | OPC_SPECIAL, - /* Multiplication / division */ - OPC_MULT = 0x18 | OPC_SPECIAL, - OPC_MULTU = 0x19 | OPC_SPECIAL, - OPC_DIV = 0x1A | OPC_SPECIAL, - OPC_DIVU = 0x1B | OPC_SPECIAL, - OPC_DMULT = 0x1C | OPC_SPECIAL, - OPC_DMULTU = 0x1D | OPC_SPECIAL, - OPC_DDIV = 0x1E | OPC_SPECIAL, - OPC_DDIVU = 0x1F | OPC_SPECIAL, - - /* 2 registers arithmetic / logic */ - OPC_ADD = 0x20 | OPC_SPECIAL, - OPC_ADDU = 0x21 | OPC_SPECIAL, - OPC_SUB = 0x22 | OPC_SPECIAL, - OPC_SUBU = 0x23 | OPC_SPECIAL, - OPC_AND = 0x24 | OPC_SPECIAL, - OPC_OR = 0x25 | OPC_SPECIAL, - OPC_XOR = 0x26 | OPC_SPECIAL, - OPC_NOR = 0x27 | OPC_SPECIAL, - OPC_SLT = 0x2A | OPC_SPECIAL, - OPC_SLTU = 0x2B | OPC_SPECIAL, - OPC_DADD = 0x2C | OPC_SPECIAL, - OPC_DADDU = 0x2D | OPC_SPECIAL, - OPC_DSUB = 0x2E | OPC_SPECIAL, - OPC_DSUBU = 0x2F | OPC_SPECIAL, - /* Jumps */ - OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */ - OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */ - /* Traps */ - OPC_TGE = 0x30 | OPC_SPECIAL, - OPC_TGEU = 0x31 | OPC_SPECIAL, - OPC_TLT = 0x32 | OPC_SPECIAL, - OPC_TLTU = 0x33 | OPC_SPECIAL, - OPC_TEQ = 0x34 | OPC_SPECIAL, - OPC_TNE = 0x36 | OPC_SPECIAL, - /* HI / LO registers load & stores */ - OPC_MFHI = 0x10 | OPC_SPECIAL, - OPC_MTHI = 0x11 | OPC_SPECIAL, - OPC_MFLO = 0x12 | OPC_SPECIAL, - OPC_MTLO = 0x13 | OPC_SPECIAL, - /* Conditional moves */ - OPC_MOVZ = 0x0A | OPC_SPECIAL, - OPC_MOVN = 0x0B | OPC_SPECIAL, - - OPC_SELEQZ = 0x35 | OPC_SPECIAL, - OPC_SELNEZ = 0x37 | OPC_SPECIAL, - - OPC_MOVCI = 0x01 | OPC_SPECIAL, - - /* Special */ - OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */ - OPC_SYSCALL = 0x0C | OPC_SPECIAL, - OPC_BREAK = 0x0D | OPC_SPECIAL, - OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */ - OPC_SYNC = 0x0F | OPC_SPECIAL, - - OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL, - OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL, - OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL, - OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL, -}; - -/* R6 Multiply and Divide instructions have the same Opcode - and function field as legacy OPC_MULT[U]/OPC_DIV[U] */ -#define MASK_R6_MULDIV(op) (MASK_SPECIAL(op) | (op & (0x7ff))) - -enum { - R6_OPC_MUL = OPC_MULT | (2 << 6), - R6_OPC_MUH = OPC_MULT | (3 << 6), - R6_OPC_MULU = OPC_MULTU | (2 << 6), - R6_OPC_MUHU = OPC_MULTU | (3 << 6), - R6_OPC_DIV = OPC_DIV | (2 << 6), - R6_OPC_MOD = OPC_DIV | (3 << 6), - R6_OPC_DIVU = OPC_DIVU | (2 << 6), - R6_OPC_MODU = OPC_DIVU | (3 << 6), - - R6_OPC_DMUL = OPC_DMULT | (2 << 6), - R6_OPC_DMUH = OPC_DMULT | (3 << 6), - R6_OPC_DMULU = OPC_DMULTU | (2 << 6), - R6_OPC_DMUHU = OPC_DMULTU | (3 << 6), - R6_OPC_DDIV = OPC_DDIV | (2 << 6), - R6_OPC_DMOD = OPC_DDIV | (3 << 6), - R6_OPC_DDIVU = OPC_DDIVU | (2 << 6), - R6_OPC_DMODU = OPC_DDIVU | (3 << 6), - - R6_OPC_CLZ = 0x10 | OPC_SPECIAL, - R6_OPC_CLO = 0x11 | OPC_SPECIAL, - R6_OPC_DCLZ = 0x12 | OPC_SPECIAL, - R6_OPC_DCLO = 0x13 | OPC_SPECIAL, - R6_OPC_SDBBP = 0x0e | OPC_SPECIAL, - - OPC_LSA = 0x05 | OPC_SPECIAL, - OPC_DLSA = 0x15 | OPC_SPECIAL, -}; - -/* Multiplication variants of the vr54xx. */ -#define MASK_MUL_VR54XX(op) MASK_SPECIAL(op) | (op & (0x1F << 6)) - -enum { - OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT, - OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU, - OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT, - OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU, - OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT, - OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU, - OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT, - OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU, - OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT, - OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU, - OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT, - OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU, - OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT, - OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU, -}; - -/* REGIMM (rt field) opcodes */ -#define MASK_REGIMM(op) MASK_OP_MAJOR(op) | (op & (0x1F << 16)) - -enum { - OPC_BLTZ = (0x00 << 16) | OPC_REGIMM, - OPC_BLTZL = (0x02 << 16) | OPC_REGIMM, - OPC_BGEZ = (0x01 << 16) | OPC_REGIMM, - OPC_BGEZL = (0x03 << 16) | OPC_REGIMM, - OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM, - OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM, - OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM, - OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM, - OPC_TGEI = (0x08 << 16) | OPC_REGIMM, - OPC_TGEIU = (0x09 << 16) | OPC_REGIMM, - OPC_TLTI = (0x0A << 16) | OPC_REGIMM, - OPC_TLTIU = (0x0B << 16) | OPC_REGIMM, - OPC_TEQI = (0x0C << 16) | OPC_REGIMM, - OPC_TNEI = (0x0E << 16) | OPC_REGIMM, - OPC_SYNCI = (0x1F << 16) | OPC_REGIMM, - - OPC_DAHI = (0x06 << 16) | OPC_REGIMM, - OPC_DATI = (0x1e << 16) | OPC_REGIMM, -}; - -/* Special2 opcodes */ -#define MASK_SPECIAL2(op) MASK_OP_MAJOR(op) | (op & 0x3F) - -enum { - /* Multiply & xxx operations */ - OPC_MADD = 0x00 | OPC_SPECIAL2, - OPC_MADDU = 0x01 | OPC_SPECIAL2, - OPC_MUL = 0x02 | OPC_SPECIAL2, - OPC_MSUB = 0x04 | OPC_SPECIAL2, - OPC_MSUBU = 0x05 | OPC_SPECIAL2, - /* Loongson 2F */ - OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2, - OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2, - OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2, - OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2, - OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2, - OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2, - OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2, - OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2, - OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2, - OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2, - OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2, - OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2, - /* Misc */ - OPC_CLZ = 0x20 | OPC_SPECIAL2, - OPC_CLO = 0x21 | OPC_SPECIAL2, - OPC_DCLZ = 0x24 | OPC_SPECIAL2, - OPC_DCLO = 0x25 | OPC_SPECIAL2, - /* Special */ - OPC_SDBBP = 0x3F | OPC_SPECIAL2, -}; - -/* Special3 opcodes */ -#define MASK_SPECIAL3(op) MASK_OP_MAJOR(op) | (op & 0x3F) - -enum { - OPC_EXT = 0x00 | OPC_SPECIAL3, - OPC_DEXTM = 0x01 | OPC_SPECIAL3, - OPC_DEXTU = 0x02 | OPC_SPECIAL3, - OPC_DEXT = 0x03 | OPC_SPECIAL3, - OPC_INS = 0x04 | OPC_SPECIAL3, - OPC_DINSM = 0x05 | OPC_SPECIAL3, - OPC_DINSU = 0x06 | OPC_SPECIAL3, - OPC_DINS = 0x07 | OPC_SPECIAL3, - OPC_FORK = 0x08 | OPC_SPECIAL3, - OPC_YIELD = 0x09 | OPC_SPECIAL3, - OPC_BSHFL = 0x20 | OPC_SPECIAL3, - OPC_DBSHFL = 0x24 | OPC_SPECIAL3, - OPC_RDHWR = 0x3B | OPC_SPECIAL3, - - /* Loongson 2E */ - OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3, - OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3, - OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3, - OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3, - OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3, - OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3, - OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3, - OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3, - OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3, - OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3, - OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3, - OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3, - - /* MIPS DSP Load */ - OPC_LX_DSP = 0x0A | OPC_SPECIAL3, - /* MIPS DSP Arithmetic */ - OPC_ADDU_QB_DSP = 0x10 | OPC_SPECIAL3, - OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3, - OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3, - OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3, - /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */ - /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */ - OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3, - OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3, - /* MIPS DSP GPR-Based Shift Sub-class */ - OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3, - OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3, - /* MIPS DSP Multiply Sub-class insns */ - /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */ - /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */ - OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3, - OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3, - /* DSP Bit/Manipulation Sub-class */ - OPC_INSV_DSP = 0x0C | OPC_SPECIAL3, - OPC_DINSV_DSP = 0x0D | OPC_SPECIAL3, - /* MIPS DSP Append Sub-class */ - OPC_APPEND_DSP = 0x31 | OPC_SPECIAL3, - OPC_DAPPEND_DSP = 0x35 | OPC_SPECIAL3, - /* MIPS DSP Accumulator and DSPControl Access Sub-class */ - OPC_EXTR_W_DSP = 0x38 | OPC_SPECIAL3, - OPC_DEXTR_W_DSP = 0x3C | OPC_SPECIAL3, - - /* R6 */ - R6_OPC_PREF = 0x35 | OPC_SPECIAL3, - R6_OPC_CACHE = 0x25 | OPC_SPECIAL3, - R6_OPC_LL = 0x36 | OPC_SPECIAL3, - R6_OPC_SC = 0x26 | OPC_SPECIAL3, - R6_OPC_LLD = 0x37 | OPC_SPECIAL3, - R6_OPC_SCD = 0x27 | OPC_SPECIAL3, -}; - -/* BSHFL opcodes */ -#define MASK_BSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6)) - -enum { - OPC_WSBH = (0x02 << 6) | OPC_BSHFL, - OPC_SEB = (0x10 << 6) | OPC_BSHFL, - OPC_SEH = (0x18 << 6) | OPC_BSHFL, - OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp */ - OPC_ALIGN_END = (0x0B << 6) | OPC_BSHFL, /* 010.00 to 010.11 */ - OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */ -}; - -/* DBSHFL opcodes */ -#define MASK_DBSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6)) - -enum { - OPC_DSBH = (0x02 << 6) | OPC_DBSHFL, - OPC_DSHD = (0x05 << 6) | OPC_DBSHFL, - OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp */ - OPC_DALIGN_END = (0x0F << 6) | OPC_DBSHFL, /* 01.000 to 01.111 */ - OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */ -}; - -/* MIPS DSP REGIMM opcodes */ -enum { - OPC_BPOSGE32 = (0x1C << 16) | OPC_REGIMM, - OPC_BPOSGE64 = (0x1D << 16) | OPC_REGIMM, -}; - -#define MASK_LX(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -/* MIPS DSP Load */ -enum { - OPC_LBUX = (0x06 << 6) | OPC_LX_DSP, - OPC_LHX = (0x04 << 6) | OPC_LX_DSP, - OPC_LWX = (0x00 << 6) | OPC_LX_DSP, - OPC_LDX = (0x08 << 6) | OPC_LX_DSP, -}; - -#define MASK_ADDU_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Arithmetic Sub-class */ - OPC_ADDQ_PH = (0x0A << 6) | OPC_ADDU_QB_DSP, - OPC_ADDQ_S_PH = (0x0E << 6) | OPC_ADDU_QB_DSP, - OPC_ADDQ_S_W = (0x16 << 6) | OPC_ADDU_QB_DSP, - OPC_ADDU_QB = (0x00 << 6) | OPC_ADDU_QB_DSP, - OPC_ADDU_S_QB = (0x04 << 6) | OPC_ADDU_QB_DSP, - OPC_ADDU_PH = (0x08 << 6) | OPC_ADDU_QB_DSP, - OPC_ADDU_S_PH = (0x0C << 6) | OPC_ADDU_QB_DSP, - OPC_SUBQ_PH = (0x0B << 6) | OPC_ADDU_QB_DSP, - OPC_SUBQ_S_PH = (0x0F << 6) | OPC_ADDU_QB_DSP, - OPC_SUBQ_S_W = (0x17 << 6) | OPC_ADDU_QB_DSP, - OPC_SUBU_QB = (0x01 << 6) | OPC_ADDU_QB_DSP, - OPC_SUBU_S_QB = (0x05 << 6) | OPC_ADDU_QB_DSP, - OPC_SUBU_PH = (0x09 << 6) | OPC_ADDU_QB_DSP, - OPC_SUBU_S_PH = (0x0D << 6) | OPC_ADDU_QB_DSP, - OPC_ADDSC = (0x10 << 6) | OPC_ADDU_QB_DSP, - OPC_ADDWC = (0x11 << 6) | OPC_ADDU_QB_DSP, - OPC_MODSUB = (0x12 << 6) | OPC_ADDU_QB_DSP, - OPC_RADDU_W_QB = (0x14 << 6) | OPC_ADDU_QB_DSP, - /* MIPS DSP Multiply Sub-class insns */ - OPC_MULEU_S_PH_QBL = (0x06 << 6) | OPC_ADDU_QB_DSP, - OPC_MULEU_S_PH_QBR = (0x07 << 6) | OPC_ADDU_QB_DSP, - OPC_MULQ_RS_PH = (0x1F << 6) | OPC_ADDU_QB_DSP, - OPC_MULEQ_S_W_PHL = (0x1C << 6) | OPC_ADDU_QB_DSP, - OPC_MULEQ_S_W_PHR = (0x1D << 6) | OPC_ADDU_QB_DSP, - OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP, -}; - -#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E -#define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Arithmetic Sub-class */ - OPC_ADDUH_QB = (0x00 << 6) | OPC_ADDUH_QB_DSP, - OPC_ADDUH_R_QB = (0x02 << 6) | OPC_ADDUH_QB_DSP, - OPC_ADDQH_PH = (0x08 << 6) | OPC_ADDUH_QB_DSP, - OPC_ADDQH_R_PH = (0x0A << 6) | OPC_ADDUH_QB_DSP, - OPC_ADDQH_W = (0x10 << 6) | OPC_ADDUH_QB_DSP, - OPC_ADDQH_R_W = (0x12 << 6) | OPC_ADDUH_QB_DSP, - OPC_SUBUH_QB = (0x01 << 6) | OPC_ADDUH_QB_DSP, - OPC_SUBUH_R_QB = (0x03 << 6) | OPC_ADDUH_QB_DSP, - OPC_SUBQH_PH = (0x09 << 6) | OPC_ADDUH_QB_DSP, - OPC_SUBQH_R_PH = (0x0B << 6) | OPC_ADDUH_QB_DSP, - OPC_SUBQH_W = (0x11 << 6) | OPC_ADDUH_QB_DSP, - OPC_SUBQH_R_W = (0x13 << 6) | OPC_ADDUH_QB_DSP, - /* MIPS DSP Multiply Sub-class insns */ - OPC_MUL_PH = (0x0C << 6) | OPC_ADDUH_QB_DSP, - OPC_MUL_S_PH = (0x0E << 6) | OPC_ADDUH_QB_DSP, - OPC_MULQ_S_W = (0x16 << 6) | OPC_ADDUH_QB_DSP, - OPC_MULQ_RS_W = (0x17 << 6) | OPC_ADDUH_QB_DSP, -}; - -#define MASK_ABSQ_S_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Arithmetic Sub-class */ - OPC_ABSQ_S_QB = (0x01 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_ABSQ_S_PH = (0x09 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_ABSQ_S_W = (0x11 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEQ_W_PHL = (0x0C << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEQ_W_PHR = (0x0D << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEQU_PH_QBL = (0x04 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEQU_PH_QBR = (0x05 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEQU_PH_QBLA = (0x06 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEQU_PH_QBRA = (0x07 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEU_PH_QBL = (0x1C << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEU_PH_QBR = (0x1D << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEU_PH_QBLA = (0x1E << 6) | OPC_ABSQ_S_PH_DSP, - OPC_PRECEU_PH_QBRA = (0x1F << 6) | OPC_ABSQ_S_PH_DSP, - /* DSP Bit/Manipulation Sub-class */ - OPC_BITREV = (0x1B << 6) | OPC_ABSQ_S_PH_DSP, - OPC_REPL_QB = (0x02 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_REPLV_QB = (0x03 << 6) | OPC_ABSQ_S_PH_DSP, - OPC_REPL_PH = (0x0A << 6) | OPC_ABSQ_S_PH_DSP, - OPC_REPLV_PH = (0x0B << 6) | OPC_ABSQ_S_PH_DSP, -}; - -#define MASK_CMPU_EQ_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Arithmetic Sub-class */ - OPC_PRECR_QB_PH = (0x0D << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PRECRQ_QB_PH = (0x0C << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PRECR_SRA_PH_W = (0x1E << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PRECR_SRA_R_PH_W = (0x1F << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PRECRQ_PH_W = (0x14 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PRECRQ_RS_PH_W = (0x15 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PRECRQU_S_QB_PH = (0x0F << 6) | OPC_CMPU_EQ_QB_DSP, - /* DSP Compare-Pick Sub-class */ - OPC_CMPU_EQ_QB = (0x00 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPU_LT_QB = (0x01 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPU_LE_QB = (0x02 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPGU_EQ_QB = (0x04 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPGU_LT_QB = (0x05 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPGU_LE_QB = (0x06 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPGDU_EQ_QB = (0x18 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPGDU_LT_QB = (0x19 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMPGDU_LE_QB = (0x1A << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMP_EQ_PH = (0x08 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMP_LT_PH = (0x09 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_CMP_LE_PH = (0x0A << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PICK_QB = (0x03 << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PICK_PH = (0x0B << 6) | OPC_CMPU_EQ_QB_DSP, - OPC_PACKRL_PH = (0x0E << 6) | OPC_CMPU_EQ_QB_DSP, -}; - -#define MASK_SHLL_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP GPR-Based Shift Sub-class */ - OPC_SHLL_QB = (0x00 << 6) | OPC_SHLL_QB_DSP, - OPC_SHLLV_QB = (0x02 << 6) | OPC_SHLL_QB_DSP, - OPC_SHLL_PH = (0x08 << 6) | OPC_SHLL_QB_DSP, - OPC_SHLLV_PH = (0x0A << 6) | OPC_SHLL_QB_DSP, - OPC_SHLL_S_PH = (0x0C << 6) | OPC_SHLL_QB_DSP, - OPC_SHLLV_S_PH = (0x0E << 6) | OPC_SHLL_QB_DSP, - OPC_SHLL_S_W = (0x14 << 6) | OPC_SHLL_QB_DSP, - OPC_SHLLV_S_W = (0x16 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRL_QB = (0x01 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRLV_QB = (0x03 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRL_PH = (0x19 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRLV_PH = (0x1B << 6) | OPC_SHLL_QB_DSP, - OPC_SHRA_QB = (0x04 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRA_R_QB = (0x05 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRAV_QB = (0x06 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRAV_R_QB = (0x07 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRA_PH = (0x09 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRAV_PH = (0x0B << 6) | OPC_SHLL_QB_DSP, - OPC_SHRA_R_PH = (0x0D << 6) | OPC_SHLL_QB_DSP, - OPC_SHRAV_R_PH = (0x0F << 6) | OPC_SHLL_QB_DSP, - OPC_SHRA_R_W = (0x15 << 6) | OPC_SHLL_QB_DSP, - OPC_SHRAV_R_W = (0x17 << 6) | OPC_SHLL_QB_DSP, -}; - -#define MASK_DPA_W_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Multiply Sub-class insns */ - OPC_DPAU_H_QBL = (0x03 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPAU_H_QBR = (0x07 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPSU_H_QBL = (0x0B << 6) | OPC_DPA_W_PH_DSP, - OPC_DPSU_H_QBR = (0x0F << 6) | OPC_DPA_W_PH_DSP, - OPC_DPA_W_PH = (0x00 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPAX_W_PH = (0x08 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPAQ_S_W_PH = (0x04 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPAQX_S_W_PH = (0x18 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPAQX_SA_W_PH = (0x1A << 6) | OPC_DPA_W_PH_DSP, - OPC_DPS_W_PH = (0x01 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPSX_W_PH = (0x09 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPSQ_S_W_PH = (0x05 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPSQX_S_W_PH = (0x19 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPSQX_SA_W_PH = (0x1B << 6) | OPC_DPA_W_PH_DSP, - OPC_MULSAQ_S_W_PH = (0x06 << 6) | OPC_DPA_W_PH_DSP, - OPC_DPAQ_SA_L_W = (0x0C << 6) | OPC_DPA_W_PH_DSP, - OPC_DPSQ_SA_L_W = (0x0D << 6) | OPC_DPA_W_PH_DSP, - OPC_MAQ_S_W_PHL = (0x14 << 6) | OPC_DPA_W_PH_DSP, - OPC_MAQ_S_W_PHR = (0x16 << 6) | OPC_DPA_W_PH_DSP, - OPC_MAQ_SA_W_PHL = (0x10 << 6) | OPC_DPA_W_PH_DSP, - OPC_MAQ_SA_W_PHR = (0x12 << 6) | OPC_DPA_W_PH_DSP, - OPC_MULSA_W_PH = (0x02 << 6) | OPC_DPA_W_PH_DSP, -}; - -#define MASK_INSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* DSP Bit/Manipulation Sub-class */ - OPC_INSV = (0x00 << 6) | OPC_INSV_DSP, -}; - -#define MASK_APPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Append Sub-class */ - OPC_APPEND = (0x00 << 6) | OPC_APPEND_DSP, - OPC_PREPEND = (0x01 << 6) | OPC_APPEND_DSP, - OPC_BALIGN = (0x10 << 6) | OPC_APPEND_DSP, -}; - -#define MASK_EXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Accumulator and DSPControl Access Sub-class */ - OPC_EXTR_W = (0x00 << 6) | OPC_EXTR_W_DSP, - OPC_EXTR_R_W = (0x04 << 6) | OPC_EXTR_W_DSP, - OPC_EXTR_RS_W = (0x06 << 6) | OPC_EXTR_W_DSP, - OPC_EXTR_S_H = (0x0E << 6) | OPC_EXTR_W_DSP, - OPC_EXTRV_S_H = (0x0F << 6) | OPC_EXTR_W_DSP, - OPC_EXTRV_W = (0x01 << 6) | OPC_EXTR_W_DSP, - OPC_EXTRV_R_W = (0x05 << 6) | OPC_EXTR_W_DSP, - OPC_EXTRV_RS_W = (0x07 << 6) | OPC_EXTR_W_DSP, - OPC_EXTP = (0x02 << 6) | OPC_EXTR_W_DSP, - OPC_EXTPV = (0x03 << 6) | OPC_EXTR_W_DSP, - OPC_EXTPDP = (0x0A << 6) | OPC_EXTR_W_DSP, - OPC_EXTPDPV = (0x0B << 6) | OPC_EXTR_W_DSP, - OPC_SHILO = (0x1A << 6) | OPC_EXTR_W_DSP, - OPC_SHILOV = (0x1B << 6) | OPC_EXTR_W_DSP, - OPC_MTHLIP = (0x1F << 6) | OPC_EXTR_W_DSP, - OPC_WRDSP = (0x13 << 6) | OPC_EXTR_W_DSP, - OPC_RDDSP = (0x12 << 6) | OPC_EXTR_W_DSP, -}; - -#define MASK_ABSQ_S_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Arithmetic Sub-class */ - OPC_PRECEQ_L_PWL = (0x14 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQ_L_PWR = (0x15 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQ_PW_QHL = (0x0C << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQ_PW_QHR = (0x0D << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQ_PW_QHLA = (0x0E << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQ_PW_QHRA = (0x0F << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQU_QH_OBL = (0x04 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQU_QH_OBR = (0x05 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQU_QH_OBLA = (0x06 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEQU_QH_OBRA = (0x07 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEU_QH_OBL = (0x1C << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEU_QH_OBR = (0x1D << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEU_QH_OBLA = (0x1E << 6) | OPC_ABSQ_S_QH_DSP, - OPC_PRECEU_QH_OBRA = (0x1F << 6) | OPC_ABSQ_S_QH_DSP, - OPC_ABSQ_S_OB = (0x01 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_ABSQ_S_PW = (0x11 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_ABSQ_S_QH = (0x09 << 6) | OPC_ABSQ_S_QH_DSP, - /* DSP Bit/Manipulation Sub-class */ - OPC_REPL_OB = (0x02 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_REPL_PW = (0x12 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_REPL_QH = (0x0A << 6) | OPC_ABSQ_S_QH_DSP, - OPC_REPLV_OB = (0x03 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_REPLV_PW = (0x13 << 6) | OPC_ABSQ_S_QH_DSP, - OPC_REPLV_QH = (0x0B << 6) | OPC_ABSQ_S_QH_DSP, -}; - -#define MASK_ADDU_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Multiply Sub-class insns */ - OPC_MULEQ_S_PW_QHL = (0x1C << 6) | OPC_ADDU_OB_DSP, - OPC_MULEQ_S_PW_QHR = (0x1D << 6) | OPC_ADDU_OB_DSP, - OPC_MULEU_S_QH_OBL = (0x06 << 6) | OPC_ADDU_OB_DSP, - OPC_MULEU_S_QH_OBR = (0x07 << 6) | OPC_ADDU_OB_DSP, - OPC_MULQ_RS_QH = (0x1F << 6) | OPC_ADDU_OB_DSP, - /* MIPS DSP Arithmetic Sub-class */ - OPC_RADDU_L_OB = (0x14 << 6) | OPC_ADDU_OB_DSP, - OPC_SUBQ_PW = (0x13 << 6) | OPC_ADDU_OB_DSP, - OPC_SUBQ_S_PW = (0x17 << 6) | OPC_ADDU_OB_DSP, - OPC_SUBQ_QH = (0x0B << 6) | OPC_ADDU_OB_DSP, - OPC_SUBQ_S_QH = (0x0F << 6) | OPC_ADDU_OB_DSP, - OPC_SUBU_OB = (0x01 << 6) | OPC_ADDU_OB_DSP, - OPC_SUBU_S_OB = (0x05 << 6) | OPC_ADDU_OB_DSP, - OPC_SUBU_QH = (0x09 << 6) | OPC_ADDU_OB_DSP, - OPC_SUBU_S_QH = (0x0D << 6) | OPC_ADDU_OB_DSP, - OPC_SUBUH_OB = (0x19 << 6) | OPC_ADDU_OB_DSP, - OPC_SUBUH_R_OB = (0x1B << 6) | OPC_ADDU_OB_DSP, - OPC_ADDQ_PW = (0x12 << 6) | OPC_ADDU_OB_DSP, - OPC_ADDQ_S_PW = (0x16 << 6) | OPC_ADDU_OB_DSP, - OPC_ADDQ_QH = (0x0A << 6) | OPC_ADDU_OB_DSP, - OPC_ADDQ_S_QH = (0x0E << 6) | OPC_ADDU_OB_DSP, - OPC_ADDU_OB = (0x00 << 6) | OPC_ADDU_OB_DSP, - OPC_ADDU_S_OB = (0x04 << 6) | OPC_ADDU_OB_DSP, - OPC_ADDU_QH = (0x08 << 6) | OPC_ADDU_OB_DSP, - OPC_ADDU_S_QH = (0x0C << 6) | OPC_ADDU_OB_DSP, - OPC_ADDUH_OB = (0x18 << 6) | OPC_ADDU_OB_DSP, - OPC_ADDUH_R_OB = (0x1A << 6) | OPC_ADDU_OB_DSP, -}; - -#define MASK_CMPU_EQ_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* DSP Compare-Pick Sub-class */ - OPC_CMP_EQ_PW = (0x10 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMP_LT_PW = (0x11 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMP_LE_PW = (0x12 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMP_EQ_QH = (0x08 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMP_LT_QH = (0x09 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMP_LE_QH = (0x0A << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPGDU_EQ_OB = (0x18 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPGDU_LT_OB = (0x19 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPGDU_LE_OB = (0x1A << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPGU_EQ_OB = (0x04 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPGU_LT_OB = (0x05 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPGU_LE_OB = (0x06 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPU_EQ_OB = (0x00 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPU_LT_OB = (0x01 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_CMPU_LE_OB = (0x02 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PACKRL_PW = (0x0E << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PICK_OB = (0x03 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PICK_PW = (0x13 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PICK_QH = (0x0B << 6) | OPC_CMPU_EQ_OB_DSP, - /* MIPS DSP Arithmetic Sub-class */ - OPC_PRECR_OB_QH = (0x0D << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PRECR_SRA_QH_PW = (0x1E << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PRECR_SRA_R_QH_PW = (0x1F << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PRECRQ_OB_QH = (0x0C << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PRECRQ_PW_L = (0x1C << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PRECRQ_QH_PW = (0x14 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PRECRQ_RS_QH_PW = (0x15 << 6) | OPC_CMPU_EQ_OB_DSP, - OPC_PRECRQU_S_OB_QH = (0x0F << 6) | OPC_CMPU_EQ_OB_DSP, -}; - -#define MASK_DAPPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* DSP Append Sub-class */ - OPC_DAPPEND = (0x00 << 6) | OPC_DAPPEND_DSP, - OPC_PREPENDD = (0x03 << 6) | OPC_DAPPEND_DSP, - OPC_PREPENDW = (0x01 << 6) | OPC_DAPPEND_DSP, - OPC_DBALIGN = (0x10 << 6) | OPC_DAPPEND_DSP, -}; - -#define MASK_DEXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Accumulator and DSPControl Access Sub-class */ - OPC_DMTHLIP = (0x1F << 6) | OPC_DEXTR_W_DSP, - OPC_DSHILO = (0x1A << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTP = (0x02 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTPDP = (0x0A << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTPDPV = (0x0B << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTPV = (0x03 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTR_L = (0x10 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTR_R_L = (0x14 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTR_RS_L = (0x16 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTR_W = (0x00 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTR_R_W = (0x04 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTR_RS_W = (0x06 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTR_S_H = (0x0E << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTRV_L = (0x11 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTRV_R_L = (0x15 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTRV_RS_L = (0x17 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTRV_S_H = (0x0F << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTRV_W = (0x01 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTRV_R_W = (0x05 << 6) | OPC_DEXTR_W_DSP, - OPC_DEXTRV_RS_W = (0x07 << 6) | OPC_DEXTR_W_DSP, - OPC_DSHILOV = (0x1B << 6) | OPC_DEXTR_W_DSP, -}; - -#define MASK_DINSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* DSP Bit/Manipulation Sub-class */ - OPC_DINSV = (0x00 << 6) | OPC_DINSV_DSP, -}; - -#define MASK_DPAQ_W_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP Multiply Sub-class insns */ - OPC_DMADD = (0x19 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DMADDU = (0x1D << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DMSUB = (0x1B << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DMSUBU = (0x1F << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPA_W_QH = (0x00 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPAQ_S_W_QH = (0x04 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPAQ_SA_L_PW = (0x0C << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPAU_H_OBL = (0x03 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPAU_H_OBR = (0x07 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPS_W_QH = (0x01 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPSQ_S_W_QH = (0x05 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPSQ_SA_L_PW = (0x0D << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPSU_H_OBL = (0x0B << 6) | OPC_DPAQ_W_QH_DSP, - OPC_DPSU_H_OBR = (0x0F << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_S_L_PWL = (0x1C << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_S_L_PWR = (0x1E << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_S_W_QHLL = (0x14 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_SA_W_QHLL = (0x10 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_S_W_QHLR = (0x15 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_SA_W_QHLR = (0x11 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_S_W_QHRL = (0x16 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_SA_W_QHRL = (0x12 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_S_W_QHRR = (0x17 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MAQ_SA_W_QHRR = (0x13 << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MULSAQ_S_L_PW = (0x0E << 6) | OPC_DPAQ_W_QH_DSP, - OPC_MULSAQ_S_W_QH = (0x06 << 6) | OPC_DPAQ_W_QH_DSP, -}; - -#define MASK_SHLL_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) -enum { - /* MIPS DSP GPR-Based Shift Sub-class */ - OPC_SHLL_PW = (0x10 << 6) | OPC_SHLL_OB_DSP, - OPC_SHLL_S_PW = (0x14 << 6) | OPC_SHLL_OB_DSP, - OPC_SHLLV_OB = (0x02 << 6) | OPC_SHLL_OB_DSP, - OPC_SHLLV_PW = (0x12 << 6) | OPC_SHLL_OB_DSP, - OPC_SHLLV_S_PW = (0x16 << 6) | OPC_SHLL_OB_DSP, - OPC_SHLLV_QH = (0x0A << 6) | OPC_SHLL_OB_DSP, - OPC_SHLLV_S_QH = (0x0E << 6) | OPC_SHLL_OB_DSP, - OPC_SHRA_PW = (0x11 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRA_R_PW = (0x15 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRAV_OB = (0x06 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRAV_R_OB = (0x07 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRAV_PW = (0x13 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRAV_R_PW = (0x17 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRAV_QH = (0x0B << 6) | OPC_SHLL_OB_DSP, - OPC_SHRAV_R_QH = (0x0F << 6) | OPC_SHLL_OB_DSP, - OPC_SHRLV_OB = (0x03 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRLV_QH = (0x1B << 6) | OPC_SHLL_OB_DSP, - OPC_SHLL_OB = (0x00 << 6) | OPC_SHLL_OB_DSP, - OPC_SHLL_QH = (0x08 << 6) | OPC_SHLL_OB_DSP, - OPC_SHLL_S_QH = (0x0C << 6) | OPC_SHLL_OB_DSP, - OPC_SHRA_OB = (0x04 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRA_R_OB = (0x05 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRA_QH = (0x09 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRA_R_QH = (0x0D << 6) | OPC_SHLL_OB_DSP, - OPC_SHRL_OB = (0x01 << 6) | OPC_SHLL_OB_DSP, - OPC_SHRL_QH = (0x19 << 6) | OPC_SHLL_OB_DSP, -}; - -/* Coprocessor 0 (rs field) */ -#define MASK_CP0(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21)) - -enum { - OPC_MFC0 = (0x00 << 21) | OPC_CP0, - OPC_DMFC0 = (0x01 << 21) | OPC_CP0, - OPC_MTC0 = (0x04 << 21) | OPC_CP0, - OPC_DMTC0 = (0x05 << 21) | OPC_CP0, - OPC_MFTR = (0x08 << 21) | OPC_CP0, - OPC_RDPGPR = (0x0A << 21) | OPC_CP0, - OPC_MFMC0 = (0x0B << 21) | OPC_CP0, - OPC_MTTR = (0x0C << 21) | OPC_CP0, - OPC_WRPGPR = (0x0E << 21) | OPC_CP0, - OPC_C0 = (0x10 << 21) | OPC_CP0, - OPC_C0_FIRST = (0x10 << 21) | OPC_CP0, - OPC_C0_LAST = (0x1F << 21) | OPC_CP0, -}; - -/* MFMC0 opcodes */ -#define MASK_MFMC0(op) MASK_CP0(op) | (op & 0xFFFF) - -enum { - OPC_DMT = 0x01 | (0 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, - OPC_EMT = 0x01 | (1 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, - OPC_DVPE = 0x01 | (0 << 5) | OPC_MFMC0, - OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0, - OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0, - OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0, -}; - -/* Coprocessor 0 (with rs == C0) */ -#define MASK_C0(op) MASK_CP0(op) | (op & 0x3F) - -enum { - OPC_TLBR = 0x01 | OPC_C0, - OPC_TLBWI = 0x02 | OPC_C0, - OPC_TLBINV = 0x03 | OPC_C0, - OPC_TLBINVF = 0x04 | OPC_C0, - OPC_TLBWR = 0x06 | OPC_C0, - OPC_TLBP = 0x08 | OPC_C0, - OPC_RFE = 0x10 | OPC_C0, - OPC_ERET = 0x18 | OPC_C0, - OPC_DERET = 0x1F | OPC_C0, - OPC_WAIT = 0x20 | OPC_C0, -}; - -/* Coprocessor 1 (rs field) */ -#define MASK_CP1(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21)) - -/* Values for the fmt field in FP instructions */ -enum { - /* 0 - 15 are reserved */ - FMT_S = 16, /* single fp */ - FMT_D = 17, /* double fp */ - FMT_E = 18, /* extended fp */ - FMT_Q = 19, /* quad fp */ - FMT_W = 20, /* 32-bit fixed */ - FMT_L = 21, /* 64-bit fixed */ - FMT_PS = 22, /* paired single fp */ - /* 23 - 31 are reserved */ -}; - -enum { - OPC_MFC1 = (0x00 << 21) | OPC_CP1, - OPC_DMFC1 = (0x01 << 21) | OPC_CP1, - OPC_CFC1 = (0x02 << 21) | OPC_CP1, - OPC_MFHC1 = (0x03 << 21) | OPC_CP1, - OPC_MTC1 = (0x04 << 21) | OPC_CP1, - OPC_DMTC1 = (0x05 << 21) | OPC_CP1, - OPC_CTC1 = (0x06 << 21) | OPC_CP1, - OPC_MTHC1 = (0x07 << 21) | OPC_CP1, - OPC_BC1 = (0x08 << 21) | OPC_CP1, /* bc */ - OPC_BC1ANY2 = (0x09 << 21) | OPC_CP1, - OPC_BC1ANY4 = (0x0A << 21) | OPC_CP1, - OPC_BZ_V = (0x0B << 21) | OPC_CP1, - OPC_BNZ_V = (0x0F << 21) | OPC_CP1, - OPC_S_FMT = (FMT_S << 21) | OPC_CP1, - OPC_D_FMT = (FMT_D << 21) | OPC_CP1, - OPC_E_FMT = (FMT_E << 21) | OPC_CP1, - OPC_Q_FMT = (FMT_Q << 21) | OPC_CP1, - OPC_W_FMT = (FMT_W << 21) | OPC_CP1, - OPC_L_FMT = (FMT_L << 21) | OPC_CP1, - OPC_PS_FMT = (FMT_PS << 21) | OPC_CP1, - OPC_BC1EQZ = (0x09 << 21) | OPC_CP1, - OPC_BC1NEZ = (0x0D << 21) | OPC_CP1, - OPC_BZ_B = (0x18 << 21) | OPC_CP1, - OPC_BZ_H = (0x19 << 21) | OPC_CP1, - OPC_BZ_W = (0x1A << 21) | OPC_CP1, - OPC_BZ_D = (0x1B << 21) | OPC_CP1, - OPC_BNZ_B = (0x1C << 21) | OPC_CP1, - OPC_BNZ_H = (0x1D << 21) | OPC_CP1, - OPC_BNZ_W = (0x1E << 21) | OPC_CP1, - OPC_BNZ_D = (0x1F << 21) | OPC_CP1, -}; - -#define MASK_CP1_FUNC(op) MASK_CP1(op) | (op & 0x3F) -#define MASK_BC1(op) MASK_CP1(op) | (op & (0x3 << 16)) - -enum { - OPC_BC1F = (0x00 << 16) | OPC_BC1, - OPC_BC1T = (0x01 << 16) | OPC_BC1, - OPC_BC1FL = (0x02 << 16) | OPC_BC1, - OPC_BC1TL = (0x03 << 16) | OPC_BC1, -}; - -enum { - OPC_BC1FANY2 = (0x00 << 16) | OPC_BC1ANY2, - OPC_BC1TANY2 = (0x01 << 16) | OPC_BC1ANY2, -}; - -enum { - OPC_BC1FANY4 = (0x00 << 16) | OPC_BC1ANY4, - OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4, -}; - -#define MASK_CP2(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21)) - -enum { - OPC_MFC2 = (0x00 << 21) | OPC_CP2, - OPC_DMFC2 = (0x01 << 21) | OPC_CP2, - OPC_CFC2 = (0x02 << 21) | OPC_CP2, - OPC_MFHC2 = (0x03 << 21) | OPC_CP2, - OPC_MTC2 = (0x04 << 21) | OPC_CP2, - OPC_DMTC2 = (0x05 << 21) | OPC_CP2, - OPC_CTC2 = (0x06 << 21) | OPC_CP2, - OPC_MTHC2 = (0x07 << 21) | OPC_CP2, - OPC_BC2 = (0x08 << 21) | OPC_CP2, - OPC_BC2EQZ = (0x09 << 21) | OPC_CP2, - OPC_BC2NEZ = (0x0D << 21) | OPC_CP2, -}; - -#define MASK_LMI(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)) | (op & 0x1F)) - -enum { - OPC_PADDSH = (24 << 21) | (0x00) | OPC_CP2, - OPC_PADDUSH = (25 << 21) | (0x00) | OPC_CP2, - OPC_PADDH = (26 << 21) | (0x00) | OPC_CP2, - OPC_PADDW = (27 << 21) | (0x00) | OPC_CP2, - OPC_PADDSB = (28 << 21) | (0x00) | OPC_CP2, - OPC_PADDUSB = (29 << 21) | (0x00) | OPC_CP2, - OPC_PADDB = (30 << 21) | (0x00) | OPC_CP2, - OPC_PADDD = (31 << 21) | (0x00) | OPC_CP2, - - OPC_PSUBSH = (24 << 21) | (0x01) | OPC_CP2, - OPC_PSUBUSH = (25 << 21) | (0x01) | OPC_CP2, - OPC_PSUBH = (26 << 21) | (0x01) | OPC_CP2, - OPC_PSUBW = (27 << 21) | (0x01) | OPC_CP2, - OPC_PSUBSB = (28 << 21) | (0x01) | OPC_CP2, - OPC_PSUBUSB = (29 << 21) | (0x01) | OPC_CP2, - OPC_PSUBB = (30 << 21) | (0x01) | OPC_CP2, - OPC_PSUBD = (31 << 21) | (0x01) | OPC_CP2, - - OPC_PSHUFH = (24 << 21) | (0x02) | OPC_CP2, - OPC_PACKSSWH = (25 << 21) | (0x02) | OPC_CP2, - OPC_PACKSSHB = (26 << 21) | (0x02) | OPC_CP2, - OPC_PACKUSHB = (27 << 21) | (0x02) | OPC_CP2, - OPC_XOR_CP2 = (28 << 21) | (0x02) | OPC_CP2, - OPC_NOR_CP2 = (29 << 21) | (0x02) | OPC_CP2, - OPC_AND_CP2 = (30 << 21) | (0x02) | OPC_CP2, - OPC_PANDN = (31 << 21) | (0x02) | OPC_CP2, - - OPC_PUNPCKLHW = (24 << 21) | (0x03) | OPC_CP2, - OPC_PUNPCKHHW = (25 << 21) | (0x03) | OPC_CP2, - OPC_PUNPCKLBH = (26 << 21) | (0x03) | OPC_CP2, - OPC_PUNPCKHBH = (27 << 21) | (0x03) | OPC_CP2, - OPC_PINSRH_0 = (28 << 21) | (0x03) | OPC_CP2, - OPC_PINSRH_1 = (29 << 21) | (0x03) | OPC_CP2, - OPC_PINSRH_2 = (30 << 21) | (0x03) | OPC_CP2, - OPC_PINSRH_3 = (31 << 21) | (0x03) | OPC_CP2, - - OPC_PAVGH = (24 << 21) | (0x08) | OPC_CP2, - OPC_PAVGB = (25 << 21) | (0x08) | OPC_CP2, - OPC_PMAXSH = (26 << 21) | (0x08) | OPC_CP2, - OPC_PMINSH = (27 << 21) | (0x08) | OPC_CP2, - OPC_PMAXUB = (28 << 21) | (0x08) | OPC_CP2, - OPC_PMINUB = (29 << 21) | (0x08) | OPC_CP2, - - OPC_PCMPEQW = (24 << 21) | (0x09) | OPC_CP2, - OPC_PCMPGTW = (25 << 21) | (0x09) | OPC_CP2, - OPC_PCMPEQH = (26 << 21) | (0x09) | OPC_CP2, - OPC_PCMPGTH = (27 << 21) | (0x09) | OPC_CP2, - OPC_PCMPEQB = (28 << 21) | (0x09) | OPC_CP2, - OPC_PCMPGTB = (29 << 21) | (0x09) | OPC_CP2, - - OPC_PSLLW = (24 << 21) | (0x0A) | OPC_CP2, - OPC_PSLLH = (25 << 21) | (0x0A) | OPC_CP2, - OPC_PMULLH = (26 << 21) | (0x0A) | OPC_CP2, - OPC_PMULHH = (27 << 21) | (0x0A) | OPC_CP2, - OPC_PMULUW = (28 << 21) | (0x0A) | OPC_CP2, - OPC_PMULHUH = (29 << 21) | (0x0A) | OPC_CP2, - - OPC_PSRLW = (24 << 21) | (0x0B) | OPC_CP2, - OPC_PSRLH = (25 << 21) | (0x0B) | OPC_CP2, - OPC_PSRAW = (26 << 21) | (0x0B) | OPC_CP2, - OPC_PSRAH = (27 << 21) | (0x0B) | OPC_CP2, - OPC_PUNPCKLWD = (28 << 21) | (0x0B) | OPC_CP2, - OPC_PUNPCKHWD = (29 << 21) | (0x0B) | OPC_CP2, - - OPC_ADDU_CP2 = (24 << 21) | (0x0C) | OPC_CP2, - OPC_OR_CP2 = (25 << 21) | (0x0C) | OPC_CP2, - OPC_ADD_CP2 = (26 << 21) | (0x0C) | OPC_CP2, - OPC_DADD_CP2 = (27 << 21) | (0x0C) | OPC_CP2, - OPC_SEQU_CP2 = (28 << 21) | (0x0C) | OPC_CP2, - OPC_SEQ_CP2 = (29 << 21) | (0x0C) | OPC_CP2, - - OPC_SUBU_CP2 = (24 << 21) | (0x0D) | OPC_CP2, - OPC_PASUBUB = (25 << 21) | (0x0D) | OPC_CP2, - OPC_SUB_CP2 = (26 << 21) | (0x0D) | OPC_CP2, - OPC_DSUB_CP2 = (27 << 21) | (0x0D) | OPC_CP2, - OPC_SLTU_CP2 = (28 << 21) | (0x0D) | OPC_CP2, - OPC_SLT_CP2 = (29 << 21) | (0x0D) | OPC_CP2, - - OPC_SLL_CP2 = (24 << 21) | (0x0E) | OPC_CP2, - OPC_DSLL_CP2 = (25 << 21) | (0x0E) | OPC_CP2, - OPC_PEXTRH = (26 << 21) | (0x0E) | OPC_CP2, - OPC_PMADDHW = (27 << 21) | (0x0E) | OPC_CP2, - OPC_SLEU_CP2 = (28 << 21) | (0x0E) | OPC_CP2, - OPC_SLE_CP2 = (29 << 21) | (0x0E) | OPC_CP2, - - OPC_SRL_CP2 = (24 << 21) | (0x0F) | OPC_CP2, - OPC_DSRL_CP2 = (25 << 21) | (0x0F) | OPC_CP2, - OPC_SRA_CP2 = (26 << 21) | (0x0F) | OPC_CP2, - OPC_DSRA_CP2 = (27 << 21) | (0x0F) | OPC_CP2, - OPC_BIADD = (28 << 21) | (0x0F) | OPC_CP2, - OPC_PMOVMSKB = (29 << 21) | (0x0F) | OPC_CP2, -}; - - -#define MASK_CP3(op) MASK_OP_MAJOR(op) | (op & 0x3F) - -enum { - OPC_LWXC1 = 0x00 | OPC_CP3, - OPC_LDXC1 = 0x01 | OPC_CP3, - OPC_LUXC1 = 0x05 | OPC_CP3, - OPC_SWXC1 = 0x08 | OPC_CP3, - OPC_SDXC1 = 0x09 | OPC_CP3, - OPC_SUXC1 = 0x0D | OPC_CP3, - OPC_PREFX = 0x0F | OPC_CP3, - OPC_ALNV_PS = 0x1E | OPC_CP3, - OPC_MADD_S = 0x20 | OPC_CP3, - OPC_MADD_D = 0x21 | OPC_CP3, - OPC_MADD_PS = 0x26 | OPC_CP3, - OPC_MSUB_S = 0x28 | OPC_CP3, - OPC_MSUB_D = 0x29 | OPC_CP3, - OPC_MSUB_PS = 0x2E | OPC_CP3, - OPC_NMADD_S = 0x30 | OPC_CP3, - OPC_NMADD_D = 0x31 | OPC_CP3, - OPC_NMADD_PS= 0x36 | OPC_CP3, - OPC_NMSUB_S = 0x38 | OPC_CP3, - OPC_NMSUB_D = 0x39 | OPC_CP3, - OPC_NMSUB_PS= 0x3E | OPC_CP3, -}; - -/* MSA Opcodes */ -#define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) -enum { - OPC_MSA_I8_00 = 0x00 | OPC_MSA, - OPC_MSA_I8_01 = 0x01 | OPC_MSA, - OPC_MSA_I8_02 = 0x02 | OPC_MSA, - OPC_MSA_I5_06 = 0x06 | OPC_MSA, - OPC_MSA_I5_07 = 0x07 | OPC_MSA, - OPC_MSA_BIT_09 = 0x09 | OPC_MSA, - OPC_MSA_BIT_0A = 0x0A | OPC_MSA, - OPC_MSA_3R_0D = 0x0D | OPC_MSA, - OPC_MSA_3R_0E = 0x0E | OPC_MSA, - OPC_MSA_3R_0F = 0x0F | OPC_MSA, - OPC_MSA_3R_10 = 0x10 | OPC_MSA, - OPC_MSA_3R_11 = 0x11 | OPC_MSA, - OPC_MSA_3R_12 = 0x12 | OPC_MSA, - OPC_MSA_3R_13 = 0x13 | OPC_MSA, - OPC_MSA_3R_14 = 0x14 | OPC_MSA, - OPC_MSA_3R_15 = 0x15 | OPC_MSA, - OPC_MSA_ELM = 0x19 | OPC_MSA, - OPC_MSA_3RF_1A = 0x1A | OPC_MSA, - OPC_MSA_3RF_1B = 0x1B | OPC_MSA, - OPC_MSA_3RF_1C = 0x1C | OPC_MSA, - OPC_MSA_VEC = 0x1E | OPC_MSA, - - /* MI10 instruction */ - OPC_LD_B = (0x20) | OPC_MSA, - OPC_LD_H = (0x21) | OPC_MSA, - OPC_LD_W = (0x22) | OPC_MSA, - OPC_LD_D = (0x23) | OPC_MSA, - OPC_ST_B = (0x24) | OPC_MSA, - OPC_ST_H = (0x25) | OPC_MSA, - OPC_ST_W = (0x26) | OPC_MSA, - OPC_ST_D = (0x27) | OPC_MSA, -}; - -enum { - /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */ - OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06, - OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07, - OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06, - OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06, - OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07, - OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06, - OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07, - OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06, - OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07, - OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06, - OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07, - OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07, - - /* I8 instruction */ - OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00, - OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01, - OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02, - OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00, - OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01, - OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02, - OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00, - OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01, - OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02, - OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00, - - /* VEC/2R/2RF instruction */ - OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC, - OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC, - OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC, - OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC, - OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC, - OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC, - OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC, - - OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC, - OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC, - - /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */ - OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R, - OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R, - OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R, - OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R, - - /* 2RF instruction df(bit 16) = _w, _d */ - OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF, - OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF, - OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF, - OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF, - OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF, - OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF, - OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF, - OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF, - OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF, - OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF, - OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF, - OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF, - OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF, - OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF, - OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF, - OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF, - - /* 3R instruction df(bits 22..21) = _b, _h, _w, d */ - OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D, - OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E, - OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F, - OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10, - OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11, - OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12, - OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13, - OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14, - OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15, - OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D, - OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E, - OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10, - OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11, - OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12, - OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13, - OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14, - OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15, - OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D, - OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E, - OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F, - OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10, - OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11, - OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12, - OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13, - OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14, - OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15, - OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D, - OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E, - OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F, - OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10, - OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11, - OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13, - OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14, - OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D, - OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E, - OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F, - OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10, - OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11, - OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12, - OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13, - OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14, - OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15, - OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D, - OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E, - OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F, - OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10, - OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11, - OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12, - OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13, - OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14, - OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15, - OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D, - OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E, - OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10, - OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12, - OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14, - OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15, - OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D, - OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E, - OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10, - OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12, - OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14, - OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15, - - /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */ - OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM, - - /* 3RF instruction _df(bit 21) = _w, _d */ - OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A, - OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B, - OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A, - OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B, - OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C, - OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A, - OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B, - OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C, - OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A, - OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B, - OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C, - OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A, - OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B, - OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C, - OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A, - OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B, - OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C, - OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A, - OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C, - OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A, - OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B, - OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A, - OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B, - OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A, - OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C, - OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A, - OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B, - OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C, - OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A, - OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C, - OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A, - OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B, - OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C, - OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A, - OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B, - OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C, - OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A, - OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B, - OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C, - OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A, - OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B, - - /* BIT instruction df(bits 22..16) = _B _H _W _D */ - OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09, - OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A, - OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09, - OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A, - OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09, - OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A, - OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09, - OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A, - OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09, - OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09, - OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09, - OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09, -}; - - -#define gen_helper_0e0i(tcg_ctx, name, arg) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg); \ - gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); \ - tcg_temp_free_i32(tcg_ctx, helper_tmp); \ - } while(0) - -#define gen_helper_0e1i(tcg_ctx, name, arg1, arg2) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ - gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, helper_tmp); \ - tcg_temp_free_i32(tcg_ctx, helper_tmp); \ - } while(0) - -#define gen_helper_1e0i(tcg_ctx, name, ret, arg1) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg1); \ - gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, helper_tmp); \ - tcg_temp_free_i32(tcg_ctx, helper_tmp); \ - } while(0) - -#define gen_helper_1e1i(tcg_ctx, name, ret, arg1, arg2) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ - gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, helper_tmp); \ - tcg_temp_free_i32(tcg_ctx, helper_tmp); \ - } while(0) - -#define gen_helper_0e2i(tcg_ctx, name, arg1, arg2, arg3) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ - gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ - tcg_temp_free_i32(tcg_ctx, helper_tmp); \ - } while(0) - -#define gen_helper_1e2i(tcg_ctx, name, ret, arg1, arg2, arg3) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ - gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ - tcg_temp_free_i32(tcg_ctx, helper_tmp); \ - } while(0) - -#define gen_helper_0e3i(tcg_ctx, name, arg1, arg2, arg3, arg4) do { \ - TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg4); \ - gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, arg3, helper_tmp); \ - tcg_temp_free_i32(tcg_ctx, helper_tmp); \ - } while(0) - -typedef struct DisasContext { - struct TranslationBlock *tb; - target_ulong pc, saved_pc; - uint32_t opcode; - int singlestep_enabled; - int insn_flags; - int32_t CP0_Config1; - /* Routine used to access memory */ - int mem_idx; - uint32_t hflags, saved_hflags; - int bstate; - target_ulong btarget; - bool ulri; - int kscrexist; - bool rxi; - int ie; - bool bi; - bool bp; - // Unicorn engine - struct uc_struct *uc; -} DisasContext; - -enum { - BS_NONE = 0, /* We go out of the TB without reaching a branch or an - * exception condition */ - BS_STOP = 1, /* We want to stop translation for any reason */ - BS_BRANCH = 2, /* We reached a branch condition */ - BS_EXCP = 3, /* We reached an exception condition */ -}; - -static const char * const regnames[] = { - "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3", - "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", - "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", - "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", -}; - -static const char * const regnames_HI[] = { - "HI0", "HI1", "HI2", "HI3", -}; - -static const char * const regnames_LO[] = { - "LO0", "LO1", "LO2", "LO3", -}; - -static const char * const fregnames[] = { - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", - "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", - "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", - "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", -}; - -static const char * const msaregnames[] = { - "w0.d0", "w0.d1", "w1.d0", "w1.d1", - "w2.d0", "w2.d1", "w3.d0", "w3.d1", - "w4.d0", "w4.d1", "w5.d0", "w5.d1", - "w6.d0", "w6.d1", "w7.d0", "w7.d1", - "w8.d0", "w8.d1", "w9.d0", "w9.d1", - "w10.d0", "w10.d1", "w11.d0", "w11.d1", - "w12.d0", "w12.d1", "w13.d0", "w13.d1", - "w14.d0", "w14.d1", "w15.d0", "w15.d1", - "w16.d0", "w16.d1", "w17.d0", "w17.d1", - "w18.d0", "w18.d1", "w19.d0", "w19.d1", - "w20.d0", "w20.d1", "w21.d0", "w21.d1", - "w22.d0", "w22.d1", "w23.d0", "w23.d1", - "w24.d0", "w24.d1", "w25.d0", "w25.d1", - "w26.d0", "w26.d1", "w27.d0", "w27.d1", - "w28.d0", "w28.d1", "w29.d0", "w29.d1", - "w30.d0", "w30.d1", "w31.d0", "w31.d1", -}; - -#define MIPS_DEBUG(fmt, ...) \ - do { \ - if (MIPS_DEBUG_DISAS) { \ - qemu_log_mask(CPU_LOG_TB_IN_ASM, \ - TARGET_FMT_lx ": %08x " fmt "\n", \ - ctx->pc, ctx->opcode , ## __VA_ARGS__); \ - } \ - } while (0) - -#define LOG_DISAS(...) \ - do { \ - if (MIPS_DEBUG_DISAS) { \ - qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \ - } \ - } while (0) - -#define MIPS_INVAL(op) \ - MIPS_DEBUG("Invalid %s %03x %03x %03x", op, ctx->opcode >> 26, \ - ctx->opcode & 0x3F, ((ctx->opcode >> 16) & 0x1F)) - -/* General purpose registers moves. */ -static inline void gen_load_gpr (DisasContext *s, TCGv t, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - if (reg == 0) - tcg_gen_movi_tl(tcg_ctx, t, 0); - else - tcg_gen_mov_tl(tcg_ctx, t, *cpu_gpr[reg]); -} - -static inline void gen_store_gpr (TCGContext *tcg_ctx, TCGv t, int reg) -{ - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - if (reg != 0) - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[reg], t); -} - -/* Moves to/from shadow registers. */ -static inline void gen_load_srsgpr (DisasContext *s, int from, int to) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv t0 = tcg_temp_new(tcg_ctx); - - if (from == 0) - tcg_gen_movi_tl(tcg_ctx, t0, 0); - else { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); - - tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); - tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); - tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); - tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); - tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); - tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); - - tcg_gen_ld_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * from); - tcg_temp_free_ptr(tcg_ctx, addr); - tcg_temp_free_i32(tcg_ctx, t2); - } - gen_store_gpr(tcg_ctx, t0, to); - tcg_temp_free(tcg_ctx, t0); -} - -static inline void gen_store_srsgpr (DisasContext *s, int from, int to) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (to != 0) { - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); - - gen_load_gpr(s, t0, from); - tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); - tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); - tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); - tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); - tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); - tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); - - tcg_gen_st_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * to); - tcg_temp_free_ptr(tcg_ctx, addr); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free(tcg_ctx, t0); - } -} - -/* Floating point register moves. */ -static void gen_load_fpr32(DisasContext *s, TCGv_i32 t, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_trunc_i64_i32(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); -} - -static void gen_store_fpr32(DisasContext *s, TCGv_i32 t, int reg) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_extu_i32_i64(tcg_ctx, t64, t); - tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 0, 32); - tcg_temp_free_i64(tcg_ctx, t64); -} - -static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - if (ctx->hflags & MIPS_HFLAG_F64) { - TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, t64, tcg_ctx->fpu_f64[reg], 32); - tcg_gen_trunc_i64_i32(tcg_ctx, t, t64); - tcg_temp_free_i64(tcg_ctx, t64); - } else { - gen_load_fpr32(ctx, t, reg | 1); - } -} - -static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - if (ctx->hflags & MIPS_HFLAG_F64) { - TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_extu_i32_i64(tcg_ctx, t64, t); - tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 32, 32); - tcg_temp_free_i64(tcg_ctx, t64); - } else { - gen_store_fpr32(ctx, t, reg | 1); - } -} - -static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - if (ctx->hflags & MIPS_HFLAG_F64) { - tcg_gen_mov_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); - } else { - tcg_gen_concat32_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg | 1]); - } -} - -static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - if (ctx->hflags & MIPS_HFLAG_F64) { - tcg_gen_mov_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], t); - } else { - TCGv_i64 t0; - tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg & ~1], t, 0, 32); - t0 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, t0, t, 32); - tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg | 1], tcg_ctx->fpu_f64[reg | 1], t0, 0, 32); - tcg_temp_free_i64(tcg_ctx, t0); - } -} - -static inline int get_fp_bit (int cc) -{ - if (cc) - return 24 + cc; - else - return 23; -} - -/* Tests */ -static inline void gen_save_pc(DisasContext *ctx, target_ulong pc) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_PC, pc); -} - -static inline void save_cpu_state (DisasContext *ctx, int do_save_pc) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags); - if (do_save_pc && ctx->pc != ctx->saved_pc) { - gen_save_pc(ctx, ctx->pc); - ctx->saved_pc = ctx->pc; - } - if (ctx->hflags != ctx->saved_hflags) { - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags); - ctx->saved_hflags = ctx->hflags; - switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { - case MIPS_HFLAG_BR: - break; - case MIPS_HFLAG_BC: - case MIPS_HFLAG_BL: - case MIPS_HFLAG_B: - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->btarget, ctx->btarget); - break; - } - } -} - -static inline void restore_cpu_state (CPUMIPSState *env, DisasContext *ctx) -{ - ctx->saved_hflags = ctx->hflags; - switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { - case MIPS_HFLAG_BR: - break; - case MIPS_HFLAG_BC: - case MIPS_HFLAG_BL: - case MIPS_HFLAG_B: - ctx->btarget = env->btarget; - break; - } -} - -static inline void -generate_exception_err (DisasContext *ctx, int excp, int err) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv_i32 texcp = tcg_const_i32(tcg_ctx, excp); - TCGv_i32 terr = tcg_const_i32(tcg_ctx, err); - save_cpu_state(ctx, 1); - gen_helper_raise_exception_err(tcg_ctx, tcg_ctx->cpu_env, texcp, terr); - tcg_temp_free_i32(tcg_ctx, terr); - tcg_temp_free_i32(tcg_ctx, texcp); -} - -static inline void -generate_exception (DisasContext *ctx, int excp) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - save_cpu_state(ctx, 1); - gen_helper_0e0i(tcg_ctx, raise_exception, excp); -} - -/* Addresses computation */ -static inline void gen_op_addr_add (DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - tcg_gen_add_tl(tcg_ctx, ret, arg0, arg1); - -#if defined(TARGET_MIPS64) - if (ctx->hflags & MIPS_HFLAG_AWRAP) { - tcg_gen_ext32s_i64(tcg_ctx, ret, ret); - } -#endif -} - -/* Addresses computation (translation time) */ -static target_long addr_add(DisasContext *ctx, target_long base, - target_long offset) -{ - target_long sum = (target_long)((target_ulong)base + offset); - -#if defined(TARGET_MIPS64) - if (ctx->hflags & MIPS_HFLAG_AWRAP) { - sum = (int32_t)sum; - } -#endif - return sum; -} - -static inline void check_cp0_enabled(DisasContext *ctx) -{ - if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) - generate_exception_err(ctx, EXCP_CpU, 0); -} - -static inline void check_cp1_enabled(DisasContext *ctx) -{ - if (unlikely(!(ctx->hflags & MIPS_HFLAG_FPU))) - generate_exception_err(ctx, EXCP_CpU, 1); -} - -/* Verify that the processor is running with COP1X instructions enabled. - This is associated with the nabla symbol in the MIPS32 and MIPS64 - opcode tables. */ - -static inline void check_cop1x(DisasContext *ctx) -{ - if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X))) - generate_exception(ctx, EXCP_RI); -} - -/* Verify that the processor is running with 64-bit floating-point - operations enabled. */ - -static inline void check_cp1_64bitmode(DisasContext *ctx) -{ - if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X))) - generate_exception(ctx, EXCP_RI); -} - -/* - * Verify if floating point register is valid; an operation is not defined - * if bit 0 of any register specification is set and the FR bit in the - * Status register equals zero, since the register numbers specify an - * even-odd pair of adjacent coprocessor general registers. When the FR bit - * in the Status register equals one, both even and odd register numbers - * are valid. This limitation exists only for 64 bit wide (d,l,ps) registers. - * - * Multiple 64 bit wide registers can be checked by calling - * gen_op_cp1_registers(freg1 | freg2 | ... | fregN); - */ -static inline void check_cp1_registers(DisasContext *ctx, int regs) -{ - if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64) && (regs & 1))) - generate_exception(ctx, EXCP_RI); -} - -/* Verify that the processor is running with DSP instructions enabled. - This is enabled by CP0 Status register MX(24) bit. - */ - -static inline void check_dsp(DisasContext *ctx) -{ - if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP))) { - if (ctx->insn_flags & ASE_DSP) { - generate_exception(ctx, EXCP_DSPDIS); - } else { - generate_exception(ctx, EXCP_RI); - } - } -} - -static inline void check_dspr2(DisasContext *ctx) -{ - if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSPR2))) { - if (ctx->insn_flags & ASE_DSP) { - generate_exception(ctx, EXCP_DSPDIS); - } else { - generate_exception(ctx, EXCP_RI); - } - } -} - -/* This code generates a "reserved instruction" exception if the - CPU does not support the instruction set corresponding to flags. */ -static inline void check_insn(DisasContext *ctx, int flags) -{ - if (unlikely(!(ctx->insn_flags & flags))) { - generate_exception(ctx, EXCP_RI); - } -} - -/* This code generates a "reserved instruction" exception if the - CPU has corresponding flag set which indicates that the instruction - has been removed. */ -static inline void check_insn_opc_removed(DisasContext *ctx, int flags) -{ - if (unlikely(ctx->insn_flags & flags)) { - generate_exception(ctx, EXCP_RI); - } -} - -#ifdef TARGET_MIPS64 -/* This code generates a "reserved instruction" exception if 64-bit - instructions are not enabled. */ -static inline void check_mips_64(DisasContext *ctx) -{ - if (unlikely(!(ctx->hflags & MIPS_HFLAG_64))) - generate_exception(ctx, EXCP_RI); -} -#endif - -/* Define small wrappers for gen_load_fpr* so that we have a uniform - calling interface for 32 and 64-bit FPRs. No sense in changing - all callers for gen_load_fpr32 when we need the CTX parameter for - this one use. */ -#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y) -#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y) -#define FOP_CONDS(type, abs, fmt, ifmt, bits) \ -static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \ - int ft, int fs, int cc) \ -{ \ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ - TCGv_i##bits fp0 = tcg_temp_new_i##bits (tcg_ctx); \ - TCGv_i##bits fp1 = tcg_temp_new_i##bits (tcg_ctx); \ - switch (ifmt) { \ - case FMT_PS: \ - check_cp1_64bitmode(ctx); \ - break; \ - case FMT_D: \ - if (abs) { \ - check_cop1x(ctx); \ - } \ - check_cp1_registers(ctx, fs | ft); \ - break; \ - case FMT_S: \ - if (abs) { \ - check_cop1x(ctx); \ - } \ - break; \ - } \ - gen_ldcmp_fpr##bits (ctx, fp0, fs); \ - gen_ldcmp_fpr##bits (ctx, fp1, ft); \ - switch (n) { \ - case 0: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); break;\ - case 1: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); break;\ - case 2: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); break;\ - case 3: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); break;\ - case 4: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); break;\ - case 5: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); break;\ - case 6: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); break;\ - case 7: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); break;\ - case 8: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); break;\ - case 9: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); break;\ - case 10: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); break;\ - case 11: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); break;\ - case 12: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); break;\ - case 13: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); break;\ - case 14: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); break;\ - case 15: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); break;\ - default: abort(); \ - } \ - tcg_temp_free_i##bits (tcg_ctx, fp0); \ - tcg_temp_free_i##bits (tcg_ctx, fp1); \ -} - -FOP_CONDS(, 0, d, FMT_D, 64) -FOP_CONDS(abs, 1, d, FMT_D, 64) -FOP_CONDS(, 0, s, FMT_S, 32) -FOP_CONDS(abs, 1, s, FMT_S, 32) -FOP_CONDS(, 0, ps, FMT_PS, 64) -FOP_CONDS(abs, 1, ps, FMT_PS, 64) -#undef FOP_CONDS - -#define FOP_CONDNS(fmt, ifmt, bits, STORE) \ -static inline void gen_r6_cmp_ ## fmt(DisasContext * ctx, int n, \ - int ft, int fs, int fd) \ -{ \ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ - TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(tcg_ctx); \ - TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(tcg_ctx); \ - switch (ifmt) { \ - default: break; \ - case FMT_D: \ - check_cp1_registers(ctx, fs | ft | fd); \ - break; \ - } \ - gen_ldcmp_fpr ## bits(ctx, fp0, fs); \ - gen_ldcmp_fpr ## bits(ctx, fp1, ft); \ - switch (n) { \ - case 0: \ - gen_helper_r6_cmp_ ## fmt ## _af(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 1: \ - gen_helper_r6_cmp_ ## fmt ## _un(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 2: \ - gen_helper_r6_cmp_ ## fmt ## _eq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 3: \ - gen_helper_r6_cmp_ ## fmt ## _ueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 4: \ - gen_helper_r6_cmp_ ## fmt ## _lt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 5: \ - gen_helper_r6_cmp_ ## fmt ## _ult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 6: \ - gen_helper_r6_cmp_ ## fmt ## _le(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 7: \ - gen_helper_r6_cmp_ ## fmt ## _ule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 8: \ - gen_helper_r6_cmp_ ## fmt ## _saf(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 9: \ - gen_helper_r6_cmp_ ## fmt ## _sun(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 10: \ - gen_helper_r6_cmp_ ## fmt ## _seq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 11: \ - gen_helper_r6_cmp_ ## fmt ## _sueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 12: \ - gen_helper_r6_cmp_ ## fmt ## _slt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 13: \ - gen_helper_r6_cmp_ ## fmt ## _sult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 14: \ - gen_helper_r6_cmp_ ## fmt ## _sle(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 15: \ - gen_helper_r6_cmp_ ## fmt ## _sule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 17: \ - gen_helper_r6_cmp_ ## fmt ## _or(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 18: \ - gen_helper_r6_cmp_ ## fmt ## _une(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 19: \ - gen_helper_r6_cmp_ ## fmt ## _ne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 25: \ - gen_helper_r6_cmp_ ## fmt ## _sor(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 26: \ - gen_helper_r6_cmp_ ## fmt ## _sune(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - case 27: \ - gen_helper_r6_cmp_ ## fmt ## _sne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ - break; \ - default: \ - abort(); \ - } \ - STORE; \ - tcg_temp_free_i ## bits (tcg_ctx, fp0); \ - tcg_temp_free_i ## bits (tcg_ctx, fp1); \ -} - -FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd)) -FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd)) -#undef FOP_CONDNS -#undef gen_ldcmp_fpr32 -#undef gen_ldcmp_fpr64 - -/* load/store instructions. */ -#ifdef CONFIG_USER_ONLY -#define OP_LD_ATOMIC(insn,fname) \ -static inline void op_ld_##insn(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, DisasContext *ctx) \ -{ \ - TCGv t0 = tcg_temp_new(tcg_ctx); \ - tcg_gen_mov_tl(tcg_ctx, t0, arg1); \ - tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \ - tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); \ - tcg_gen_st_tl(tcg_ctx, ret, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval)); \ - tcg_temp_free(tcg_ctx, t0); \ -} -#else -#define OP_LD_ATOMIC(insn,fname) \ -static inline void op_ld_##insn(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, DisasContext *ctx) \ -{ \ - gen_helper_1e1i(tcg_ctx, insn, ret, arg1, ctx->mem_idx); \ -} -#endif -OP_LD_ATOMIC(ll,ld32s); -#if defined(TARGET_MIPS64) -OP_LD_ATOMIC(lld,ld64); -#endif -#undef OP_LD_ATOMIC - -#ifdef CONFIG_USER_ONLY -#define OP_ST_ATOMIC(insn,fname,ldname,almask) \ -static inline void op_st_##insn(DisasContext *s, TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \ -{ \ - TCGContext *tcg_ctx = s->uc->tcg_ctx; \ - TCGv t0 = tcg_temp_new(tcg_ctx); \ - int l1 = gen_new_label(tcg_ctx); \ - int l2 = gen_new_label(tcg_ctx); \ - \ - tcg_gen_andi_tl(tcg_ctx, t0, arg2, almask); \ - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); \ - tcg_gen_st_tl(tcg_ctx, arg2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); \ - generate_exception(ctx, EXCP_AdES); \ - gen_set_label(tcg_ctx, l1); \ - tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); \ - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, arg2, t0, l2); \ - tcg_gen_movi_tl(tcg_ctx, t0, rt | ((almask << 3) & 0x20)); \ - tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llreg)); \ - tcg_gen_st_tl(tcg_ctx, arg1, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llnewval)); \ - gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_SC); \ - gen_set_label(tcg_ctx, l2); \ - tcg_gen_movi_tl(tcg_ctx, t0, 0); \ - gen_store_gpr(tcg_ctx, t0, rt); \ - tcg_temp_free(tcg_ctx, t0); \ -} -#else -#define OP_ST_ATOMIC(insn,fname,ldname,almask) \ -static inline void op_st_##insn(TCGContext *tcg_ctx, TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \ -{ \ - TCGv t0 = tcg_temp_new(tcg_ctx); \ - gen_helper_1e2i(tcg_ctx, insn, t0, arg1, arg2, ctx->mem_idx); \ - gen_store_gpr(tcg_ctx, t0, rt); \ - tcg_temp_free(tcg_ctx, t0); \ -} -#endif -OP_ST_ATOMIC(sc,st32,ld32s,0x3); -#if defined(TARGET_MIPS64) -OP_ST_ATOMIC(scd,st64,ld64,0x7); -#endif -#undef OP_ST_ATOMIC - -static void gen_base_offset_addr (DisasContext *ctx, TCGv addr, - int base, int16_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - if (base == 0) { - tcg_gen_movi_tl(tcg_ctx, addr, offset); - } else if (offset == 0) { - gen_load_gpr(ctx, addr, base); - } else { - tcg_gen_movi_tl(tcg_ctx, addr, offset); - gen_op_addr_add(ctx, addr, *cpu_gpr[base], addr); - } -} - -static target_ulong pc_relative_pc (DisasContext *ctx) -{ - target_ulong pc = ctx->pc; - - if (ctx->hflags & MIPS_HFLAG_BMASK) { - int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4; - - pc -= branch_bytes; - } - - pc &= ~(target_ulong)3; - return pc; -} - -/* Load */ -static void gen_ld(DisasContext *ctx, uint32_t opc, - int rt, int base, int16_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "ld"; - TCGv t0, t1, t2; - - if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)) { - /* Loongson CPU uses a load to zero register for prefetch. - We emulate it as a NOP. On other CPU we must perform the - actual memory access. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - gen_base_offset_addr(ctx, t0, base, offset); - - switch (opc) { -#if defined(TARGET_MIPS64) - case OPC_LWU: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUL); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lwu"; - break; - case OPC_LD: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "ld"; - break; - case OPC_LLD: - case R6_OPC_LLD: - save_cpu_state(ctx, 1); - op_ld_lld(tcg_ctx, t0, t0, ctx); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lld"; - break; - case OPC_LDL: - t1 = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); -#ifndef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); -#endif - tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); - tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); - tcg_gen_xori_tl(tcg_ctx, t1, t1, 63); - t2 = tcg_const_tl(tcg_ctx, 0x7fffffffffffffffull); - tcg_gen_shr_tl(tcg_ctx, t2, t2, t1); - gen_load_gpr(ctx, t1, rt); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_or_tl(tcg_ctx, t0, t0, t1); - tcg_temp_free(tcg_ctx, t1); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "ldl"; - break; - case OPC_LDR: - t1 = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); -#endif - tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); - tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); - tcg_gen_xori_tl(tcg_ctx, t1, t1, 63); - t2 = tcg_const_tl(tcg_ctx, 0xfffffffffffffffeull); - tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); - gen_load_gpr(ctx, t1, rt); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_or_tl(tcg_ctx, t0, t0, t1); - tcg_temp_free(tcg_ctx, t1); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "ldr"; - break; - case OPC_LDPC: - t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_temp_free(tcg_ctx, t1); - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "ldpc"; - break; -#endif - case OPC_LWPC: - t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_temp_free(tcg_ctx, t1); - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lwpc"; - break; - case OPC_LW: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lw"; - break; - case OPC_LH: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESW); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lh"; - break; - case OPC_LHU: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUW); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lhu"; - break; - case OPC_LB: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_SB); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lb"; - break; - case OPC_LBU: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_UB); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lbu"; - break; - case OPC_LWL: - t1 = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); -#ifndef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); -#endif - tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUL); - tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); - tcg_gen_xori_tl(tcg_ctx, t1, t1, 31); - t2 = tcg_const_tl(tcg_ctx, 0x7fffffffull); - tcg_gen_shr_tl(tcg_ctx, t2, t2, t1); - gen_load_gpr(ctx, t1, rt); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_or_tl(tcg_ctx, t0, t0, t1); - tcg_temp_free(tcg_ctx, t1); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lwl"; - break; - case OPC_LWR: - t1 = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); -#ifdef TARGET_WORDS_BIGENDIAN - tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); -#endif - tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUL); - tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); - tcg_gen_xori_tl(tcg_ctx, t1, t1, 31); - t2 = tcg_const_tl(tcg_ctx, 0xfffffffeull); - tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); - gen_load_gpr(ctx, t1, rt); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_or_tl(tcg_ctx, t0, t0, t1); - tcg_temp_free(tcg_ctx, t1); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "lwr"; - break; - case OPC_LL: - case R6_OPC_LL: - save_cpu_state(ctx, 1); - op_ld_ll(tcg_ctx, t0, t0, ctx); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "ll"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]); - tcg_temp_free(tcg_ctx, t0); -} - -/* Store */ -static void gen_st (DisasContext *ctx, uint32_t opc, int rt, - int base, int16_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "st"; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - gen_base_offset_addr(ctx, t0, base, offset); - gen_load_gpr(ctx, t1, rt); - switch (opc) { -#if defined(TARGET_MIPS64) - case OPC_SD: - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); - opn = "sd"; - break; - case OPC_SDL: - save_cpu_state(ctx, 1); - gen_helper_0e2i(tcg_ctx, sdl, t1, t0, ctx->mem_idx); - opn = "sdl"; - break; - case OPC_SDR: - save_cpu_state(ctx, 1); - gen_helper_0e2i(tcg_ctx, sdr, t1, t0, ctx->mem_idx); - opn = "sdr"; - break; -#endif - case OPC_SW: - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); - opn = "sw"; - break; - case OPC_SH: - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUW); - opn = "sh"; - break; - case OPC_SB: - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_8); - opn = "sb"; - break; - case OPC_SWL: - save_cpu_state(ctx, 1); - gen_helper_0e2i(tcg_ctx, swl, t1, t0, ctx->mem_idx); - opn = "swl"; - break; - case OPC_SWR: - save_cpu_state(ctx, 1); - gen_helper_0e2i(tcg_ctx, swr, t1, t0, ctx->mem_idx); - opn = "swr"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - - -/* Store conditional */ -static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt, - int base, int16_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "st_cond"; - TCGv t0, t1; - -#ifdef CONFIG_USER_ONLY - t0 = tcg_temp_local_new(tcg_ctx); - t1 = tcg_temp_local_new(tcg_ctx); -#else - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); -#endif - gen_base_offset_addr(ctx, t0, base, offset); - gen_load_gpr(ctx, t1, rt); - switch (opc) { -#if defined(TARGET_MIPS64) - case OPC_SCD: - case R6_OPC_SCD: - save_cpu_state(ctx, 1); - op_st_scd(tcg_ctx, t1, t0, rt, ctx); - opn = "scd"; - break; -#endif - case OPC_SC: - case R6_OPC_SC: - save_cpu_state(ctx, 1); - op_st_sc(tcg_ctx, t1, t0, rt, ctx); - opn = "sc"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, t0); -} - -/* Load and store */ -static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft, - int base, int16_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "flt_ldst"; - TCGv t0 = tcg_temp_new(tcg_ctx); - - gen_base_offset_addr(ctx, t0, base, offset); - /* Don't do NOP if destination is zero: we must perform the actual - memory access. */ - switch (opc) { - case OPC_LWC1: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_qemu_ld_i32(ctx->uc, fp0, t0, ctx->mem_idx, MO_TESL); - gen_store_fpr32(ctx, fp0, ft); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "lwc1"; - break; - case OPC_SWC1: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, ft); - tcg_gen_qemu_st_i32(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEUL); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "swc1"; - break; - case OPC_LDC1: - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); - gen_store_fpr64(ctx, fp0, ft); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "ldc1"; - break; - case OPC_SDC1: - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, ft); - tcg_gen_qemu_st_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "sdc1"; - break; - default: - MIPS_INVAL(opn); - generate_exception(ctx, EXCP_RI); - goto out; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %d(%s)", opn, fregnames[ft], offset, regnames[base]); - out: - tcg_temp_free(tcg_ctx, t0); -} - -static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt, - int rs, int16_t imm) -{ - if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { - check_cp1_enabled(ctx); - gen_flt_ldst(ctx, op, rt, rs, imm); - } else { - generate_exception_err(ctx, EXCP_CpU, 1); - } -} - -/* Arithmetic with immediate operand */ -static void gen_arith_imm(DisasContext *ctx, uint32_t opc, - int rt, int rs, int16_t imm) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ - const char *opn = "imm arith"; - - if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) { - /* If no destination, treat it as a NOP. - For addi, we must generate the overflow exception when needed. */ - MIPS_DEBUG("NOP"); - return; - } - switch (opc) { - case OPC_ADDI: - { - TCGv t0 = tcg_temp_local_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - TCGv t2 = tcg_temp_new(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - - gen_load_gpr(ctx, t1, rs); - tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - - tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); - tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); - tcg_temp_free(tcg_ctx, t1); - /* operands of same sign, result different sign */ - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, l1); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - gen_store_gpr(tcg_ctx, t0, rt); - tcg_temp_free(tcg_ctx, t0); - } - opn = "addi"; - break; - case OPC_ADDIU: - if (rs != 0) { - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); - } - opn = "addiu"; - break; -#if defined(TARGET_MIPS64) - case OPC_DADDI: - { - TCGv t0 = tcg_temp_local_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - TCGv t2 = tcg_temp_new(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - - gen_load_gpr(ctx, t1, rs); - tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); - - tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); - tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); - tcg_temp_free(tcg_ctx, t1); - /* operands of same sign, result different sign */ - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, l1); - gen_store_gpr(tcg_ctx, t0, rt); - tcg_temp_free(tcg_ctx, t0); - } - opn = "daddi"; - break; - case OPC_DADDIU: - if (rs != 0) { - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); - } - opn = "daddiu"; - break; -#endif - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm); -} - -/* Logic with immediate operand */ -static void gen_logic_imm(DisasContext *ctx, uint32_t opc, - int rt, int rs, int16_t imm) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - target_ulong uimm; - - if (rt == 0) { - /* If no destination, treat it as a NOP. */ - MIPS_DEBUG("NOP"); - return; - } - uimm = (uint16_t)imm; - switch (opc) { - case OPC_ANDI: - if (likely(rs != 0)) - tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); - else - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], 0); - MIPS_DEBUG("andi %s, %s, " TARGET_FMT_lx, regnames[rt], - regnames[rs], uimm); - break; - case OPC_ORI: - if (rs != 0) - tcg_gen_ori_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); - else - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); - MIPS_DEBUG("ori %s, %s, " TARGET_FMT_lx, regnames[rt], - regnames[rs], uimm); - break; - case OPC_XORI: - if (likely(rs != 0)) - tcg_gen_xori_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); - else - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); - MIPS_DEBUG("xori %s, %s, " TARGET_FMT_lx, regnames[rt], - regnames[rs], uimm); - break; - case OPC_LUI: - if (rs != 0 && (ctx->insn_flags & ISA_MIPS32R6)) { - /* OPC_AUI */ - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm << 16); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); - MIPS_DEBUG("aui %s, %s, %04x", regnames[rt], regnames[rs], imm); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm << 16); - MIPS_DEBUG("lui %s, " TARGET_FMT_lx, regnames[rt], uimm); - } - break; - - default: - MIPS_DEBUG("Unknown logical immediate opcode %08x", opc); - break; - } -} - -/* Set on less than with immediate operand */ -static void gen_slt_imm(DisasContext *ctx, uint32_t opc, - int rt, int rs, int16_t imm) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ - const char *opn = "imm arith"; - TCGv t0; - - if (rt == 0) { - /* If no destination, treat it as a NOP. */ - MIPS_DEBUG("NOP"); - return; - } - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - switch (opc) { - case OPC_SLTI: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *cpu_gpr[rt], t0, uimm); - opn = "slti"; - break; - case OPC_SLTIU: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LTU, *cpu_gpr[rt], t0, uimm); - opn = "sltiu"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm); - tcg_temp_free(tcg_ctx, t0); -} - -/* Shifts with immediate operand */ -static void gen_shift_imm(DisasContext *ctx, uint32_t opc, - int rt, int rs, int16_t imm) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - target_ulong uimm = ((uint16_t)imm) & 0x1f; - const char *opn = "imm shift"; - TCGv t0; - - if (rt == 0) { - /* If no destination, treat it as a NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - switch (opc) { - case OPC_SLL: - tcg_gen_shli_tl(tcg_ctx, t0, t0, uimm); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], t0); - opn = "sll"; - break; - case OPC_SRA: - tcg_gen_sari_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); - opn = "sra"; - break; - case OPC_SRL: - if (uimm != 0) { - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); - } else { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], t0); - } - opn = "srl"; - break; - case OPC_ROTR: - if (uimm != 0) { - TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); - tcg_gen_rotri_i32(tcg_ctx, t1, t1, uimm); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rt], t1); - tcg_temp_free_i32(tcg_ctx, t1); - } else { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], t0); - } - opn = "rotr"; - break; -#if defined(TARGET_MIPS64) - case OPC_DSLL: - tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); - opn = "dsll"; - break; - case OPC_DSRA: - tcg_gen_sari_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); - opn = "dsra"; - break; - case OPC_DSRL: - tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); - opn = "dsrl"; - break; - case OPC_DROTR: - if (uimm != 0) { - tcg_gen_rotri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); - } else { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rt], t0); - } - opn = "drotr"; - break; - case OPC_DSLL32: - tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); - opn = "dsll32"; - break; - case OPC_DSRA32: - tcg_gen_sari_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); - opn = "dsra32"; - break; - case OPC_DSRL32: - tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); - opn = "dsrl32"; - break; - case OPC_DROTR32: - tcg_gen_rotri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); - opn = "drotr32"; - break; -#endif - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm); - tcg_temp_free(tcg_ctx, t0); -} - -/* Arithmetic */ -static void gen_arith(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "arith"; - - if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB - && opc != OPC_DADD && opc != OPC_DSUB) { - /* If no destination, treat it as a NOP. - For add & sub, we must generate the overflow exception when needed. */ - MIPS_DEBUG("NOP"); - return; - } - - switch (opc) { - case OPC_ADD: - { - TCGv t0 = tcg_temp_local_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - TCGv t2 = tcg_temp_new(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - - gen_load_gpr(ctx, t1, rs); - gen_load_gpr(ctx, t2, rt); - tcg_gen_add_tl(tcg_ctx, t0, t1, t2); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); - tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); - tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); - tcg_temp_free(tcg_ctx, t1); - /* operands of same sign, result different sign */ - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, l1); - gen_store_gpr(tcg_ctx, t0, rd); - tcg_temp_free(tcg_ctx, t0); - } - opn = "add"; - break; - case OPC_ADDU: - if (rs != 0 && rt != 0) { - tcg_gen_add_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - } else if (rs == 0 && rt != 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); - } else if (rs != 0 && rt == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "addu"; - break; - case OPC_SUB: - { - TCGv t0 = tcg_temp_local_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - TCGv t2 = tcg_temp_new(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - - gen_load_gpr(ctx, t1, rs); - gen_load_gpr(ctx, t2, rt); - tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); - tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); - tcg_temp_free(tcg_ctx, t1); - /* operands of different sign, first operand and result different sign */ - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, l1); - gen_store_gpr(tcg_ctx, t0, rd); - tcg_temp_free(tcg_ctx, t0); - } - opn = "sub"; - break; - case OPC_SUBU: - if (rs != 0 && rt != 0) { - tcg_gen_sub_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - } else if (rs == 0 && rt != 0) { - tcg_gen_neg_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - } else if (rs != 0 && rt == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "subu"; - break; -#if defined(TARGET_MIPS64) - case OPC_DADD: - { - TCGv t0 = tcg_temp_local_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - TCGv t2 = tcg_temp_new(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - - gen_load_gpr(ctx, t1, rs); - gen_load_gpr(ctx, t2, rt); - tcg_gen_add_tl(tcg_ctx, t0, t1, t2); - tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); - tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); - tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); - tcg_temp_free(tcg_ctx, t1); - /* operands of same sign, result different sign */ - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, l1); - gen_store_gpr(tcg_ctx, t0, rd); - tcg_temp_free(tcg_ctx, t0); - } - opn = "dadd"; - break; - case OPC_DADDU: - if (rs != 0 && rt != 0) { - tcg_gen_add_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - } else if (rs == 0 && rt != 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); - } else if (rs != 0 && rt == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "daddu"; - break; - case OPC_DSUB: - { - TCGv t0 = tcg_temp_local_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - TCGv t2 = tcg_temp_new(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - - gen_load_gpr(ctx, t1, rs); - gen_load_gpr(ctx, t2, rt); - tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); - tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); - tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); - tcg_gen_and_tl(tcg_ctx, t1, t1, t2); - tcg_temp_free(tcg_ctx, t2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); - tcg_temp_free(tcg_ctx, t1); - /* operands of different sign, first operand and result different sign */ - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, l1); - gen_store_gpr(tcg_ctx, t0, rd); - tcg_temp_free(tcg_ctx, t0); - } - opn = "dsub"; - break; - case OPC_DSUBU: - if (rs != 0 && rt != 0) { - tcg_gen_sub_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - } else if (rs == 0 && rt != 0) { - tcg_gen_neg_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); - } else if (rs != 0 && rt == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "dsubu"; - break; -#endif - case OPC_MUL: - if (likely(rs != 0 && rt != 0)) { - tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "mul"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); -} - -/* Conditional move */ -static void gen_cond_move(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "cond move"; - TCGv t0, t1, t2; - - if (rd == 0) { - /* If no destination, treat it as a NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rt); - t1 = tcg_const_tl(tcg_ctx, 0); - t2 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t2, rs); - switch (opc) { - case OPC_MOVN: - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[rd], t0, t1, t2, *cpu_gpr[rd]); - opn = "movn"; - break; - case OPC_MOVZ: - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[rd], t0, t1, t2, *cpu_gpr[rd]); - opn = "movz"; - break; - case OPC_SELNEZ: - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[rd], t0, t1, t2, t1); - opn = "selnez"; - break; - case OPC_SELEQZ: - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[rd], t0, t1, t2, t1); - opn = "seleqz"; - break; - } - tcg_temp_free(tcg_ctx, t2); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, t0); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); -} - -/* Logic */ -static void gen_logic(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "logic"; - - if (rd == 0) { - /* If no destination, treat it as a NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - switch (opc) { - case OPC_AND: - if (likely(rs != 0 && rt != 0)) { - tcg_gen_and_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "and"; - break; - case OPC_NOR: - if (rs != 0 && rt != 0) { - tcg_gen_nor_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - } else if (rs == 0 && rt != 0) { - tcg_gen_not_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); - } else if (rs != 0 && rt == 0) { - tcg_gen_not_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], ~((target_ulong)0)); - } - opn = "nor"; - break; - case OPC_OR: - if (likely(rs != 0 && rt != 0)) { - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - } else if (rs == 0 && rt != 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); - } else if (rs != 0 && rt == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "or"; - break; - case OPC_XOR: - if (likely(rs != 0 && rt != 0)) { - tcg_gen_xor_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); - } else if (rs == 0 && rt != 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); - } else if (rs != 0 && rt == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } - opn = "xor"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); -} - -/* Set on lower than */ -static void gen_slt(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "slt"; - TCGv t0, t1; - - if (rd == 0) { - /* If no destination, treat it as a NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - switch (opc) { - case OPC_SLT: - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, *cpu_gpr[rd], t0, t1); - opn = "slt"; - break; - case OPC_SLTU: - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, *cpu_gpr[rd], t0, t1); - opn = "sltu"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -/* Shifts */ -static void gen_shift(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "shifts"; - TCGv t0, t1; - - if (rd == 0) { - /* If no destination, treat it as a NOP. - For add & sub, we must generate the overflow exception when needed. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - switch (opc) { - case OPC_SLLV: - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); - tcg_gen_shl_tl(tcg_ctx, t0, t1, t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); - opn = "sllv"; - break; - case OPC_SRAV: - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); - tcg_gen_sar_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); - opn = "srav"; - break; - case OPC_SRLV: - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); - tcg_gen_shr_tl(tcg_ctx, t0, t1, t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); - opn = "srlv"; - break; - case OPC_ROTRV: - { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); - tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); - tcg_gen_andi_i32(tcg_ctx, t2, t2, 0x1f); - tcg_gen_rotr_i32(tcg_ctx, t2, t3, t2); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t2); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free_i32(tcg_ctx, t3); - opn = "rotrv"; - } - break; -#if defined(TARGET_MIPS64) - case OPC_DSLLV: - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); - tcg_gen_shl_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); - opn = "dsllv"; - break; - case OPC_DSRAV: - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); - tcg_gen_sar_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); - opn = "dsrav"; - break; - case OPC_DSRLV: - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); - tcg_gen_shr_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); - opn = "dsrlv"; - break; - case OPC_DROTRV: - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); - tcg_gen_rotr_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); - opn = "drotrv"; - break; -#endif - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -/* Arithmetic on HI/LO registers */ -static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - TCGv **cpu_HI = (TCGv **)tcg_ctx->cpu_HI; - TCGv **cpu_LO = (TCGv **)tcg_ctx->cpu_LO; - const char *opn = "hilo"; - - if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - if (acc != 0) { - check_dsp(ctx); - } - - switch (opc) { - case OPC_MFHI: -#if defined(TARGET_MIPS64) - if (acc != 0) { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[reg], *cpu_HI[acc]); - } else -#endif - { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[reg], *cpu_HI[acc]); - } - opn = "mfhi"; - break; - case OPC_MFLO: -#if defined(TARGET_MIPS64) - if (acc != 0) { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[reg], *cpu_LO[acc]); - } else -#endif - { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[reg], *cpu_LO[acc]); - } - opn = "mflo"; - break; - case OPC_MTHI: - if (reg != 0) { -#if defined(TARGET_MIPS64) - if (acc != 0) { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], *cpu_gpr[reg]); - } else -#endif - { - tcg_gen_mov_tl(tcg_ctx, *cpu_HI[acc], *cpu_gpr[reg]); - } - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_HI[acc], 0); - } - opn = "mthi"; - break; - case OPC_MTLO: - if (reg != 0) { -#if defined(TARGET_MIPS64) - if (acc != 0) { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], *cpu_gpr[reg]); - } else -#endif - { - tcg_gen_mov_tl(tcg_ctx, *cpu_LO[acc], *cpu_gpr[reg]); - } - } else { - tcg_gen_movi_tl(tcg_ctx, *cpu_LO[acc], 0); - } - opn = "mtlo"; - break; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s", opn, regnames[reg]); -} - -static inline void gen_r6_ld(DisasContext *ctx, target_long addr, int reg, int memidx, - TCGMemOp memop) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv t0 = tcg_const_tl(tcg_ctx, addr); - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, memidx, memop); - gen_store_gpr(tcg_ctx, t0, reg); - tcg_temp_free(tcg_ctx, t0); -} - -static inline void gen_pcrel(DisasContext *ctx, int rs, int16_t imm) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - target_long offset; - target_long addr; - - switch (MASK_OPC_PCREL_TOP2BITS(ctx->opcode)) { - case OPC_ADDIUPC: - if (rs != 0) { - offset = sextract32(ctx->opcode << 2, 0, 21); - addr = addr_add(ctx, ctx->pc, offset); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rs], addr); - } - break; - case R6_OPC_LWPC: - offset = sextract32(ctx->opcode << 2, 0, 21); - addr = addr_add(ctx, ctx->pc, offset); - gen_r6_ld(ctx, addr, rs, ctx->mem_idx, MO_TESL); - break; -#if defined(TARGET_MIPS64) - case OPC_LWUPC: - check_mips_64(ctx); - offset = sextract32(ctx->opcode << 2, 0, 21); - addr = addr_add(ctx, ctx->pc, offset); - gen_r6_ld(ctx, addr, rs, ctx->mem_idx, MO_TEUL); - break; -#endif - default: - switch (MASK_OPC_PCREL_TOP5BITS(ctx->opcode)) { - case OPC_AUIPC: - if (rs != 0) { - offset = ((target_ulong)imm) << 16; - addr = addr_add(ctx, ctx->pc, offset); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rs], addr); - } - break; - case OPC_ALUIPC: - if (rs != 0) { - offset = ((target_ulong)imm) << 16; - addr = ~0xFFFF & addr_add(ctx, ctx->pc, offset); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rs], addr); - } - break; -#if defined(TARGET_MIPS64) - case R6_OPC_LDPC: /* bits 16 and 17 are part of immediate */ - case R6_OPC_LDPC + (1 << 16): - case R6_OPC_LDPC + (2 << 16): - case R6_OPC_LDPC + (3 << 16): - check_mips_64(ctx); - offset = sextract32(ctx->opcode << 3, 0, 21); - addr = addr_add(ctx, (ctx->pc & ~0x7), offset); - gen_r6_ld(ctx, addr, rs, ctx->mem_idx, MO_TEQ); - break; -#endif - default: - MIPS_INVAL("OPC_PCREL"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - } -} - -static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "r6 mul/div"; - TCGv t0, t1; - - if (rd == 0) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - - switch (opc) { - case R6_OPC_DIV: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - TCGv t3 = tcg_temp_new(tcg_ctx); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - tcg_gen_ext32s_tl(tcg_ctx, t1, t1); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); - tcg_gen_and_tl(tcg_ctx, t2, t2, t3); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); - tcg_gen_or_tl(tcg_ctx, t2, t2, t3); - tcg_gen_movi_tl(tcg_ctx, t3, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); - tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "div"; - break; - case R6_OPC_MOD: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - TCGv t3 = tcg_temp_new(tcg_ctx); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - tcg_gen_ext32s_tl(tcg_ctx, t1, t1); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); - tcg_gen_and_tl(tcg_ctx, t2, t2, t3); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); - tcg_gen_or_tl(tcg_ctx, t2, t2, t3); - tcg_gen_movi_tl(tcg_ctx, t3, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); - tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "mod"; - break; - case R6_OPC_DIVU: - { - TCGv t2 = tcg_const_tl(tcg_ctx, 0); - TCGv t3 = tcg_const_tl(tcg_ctx, 1); - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); - tcg_gen_divu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "divu"; - break; - case R6_OPC_MODU: - { - TCGv t2 = tcg_const_tl(tcg_ctx, 0); - TCGv t3 = tcg_const_tl(tcg_ctx, 1); - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); - tcg_gen_remu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "modu"; - break; - case R6_OPC_MUL: - { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); - tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); - tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t2); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free_i32(tcg_ctx, t3); - } - opn = "mul"; - break; - case R6_OPC_MUH: - { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); - tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); - tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t3); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free_i32(tcg_ctx, t3); - } - opn = "muh"; - break; - case R6_OPC_MULU: - { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); - tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); - tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t2); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free_i32(tcg_ctx, t3); - } - opn = "mulu"; - break; - case R6_OPC_MUHU: - { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); - tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); - tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t3); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free_i32(tcg_ctx, t3); - } - opn = "muhu"; - break; -#if defined(TARGET_MIPS64) - case R6_OPC_DDIV: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - TCGv t3 = tcg_temp_new(tcg_ctx); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1ULL << 63); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); - tcg_gen_and_tl(tcg_ctx, t2, t2, t3); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); - tcg_gen_or_tl(tcg_ctx, t2, t2, t3); - tcg_gen_movi_tl(tcg_ctx, t3, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); - tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "ddiv"; - break; - case R6_OPC_DMOD: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - TCGv t3 = tcg_temp_new(tcg_ctx); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1ULL << 63); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); - tcg_gen_and_tl(tcg_ctx, t2, t2, t3); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); - tcg_gen_or_tl(tcg_ctx, t2, t2, t3); - tcg_gen_movi_tl(tcg_ctx, t3, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); - tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "dmod"; - break; - case R6_OPC_DDIVU: - { - TCGv t2 = tcg_const_tl(tcg_ctx, 0); - TCGv t3 = tcg_const_tl(tcg_ctx, 1); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); - tcg_gen_divu_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "ddivu"; - break; - case R6_OPC_DMODU: - { - TCGv t2 = tcg_const_tl(tcg_ctx, 0); - TCGv t3 = tcg_const_tl(tcg_ctx, 1); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); - tcg_gen_remu_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "dmodu"; - break; - case R6_OPC_DMUL: - tcg_gen_mul_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); - opn = "dmul"; - break; - case R6_OPC_DMUH: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - tcg_gen_muls2_i64(tcg_ctx, t2, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t2); - } - opn = "dmuh"; - break; - case R6_OPC_DMULU: - tcg_gen_mul_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); - opn = "dmulu"; - break; - case R6_OPC_DMUHU: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - tcg_gen_mulu2_i64(tcg_ctx, t2, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t2); - } - opn = "dmuhu"; - break; -#endif - default: - MIPS_INVAL(opn); - generate_exception(ctx, EXCP_RI); - goto out; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]); - out: - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_muldiv(DisasContext *ctx, uint32_t opc, - int acc, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_HI = (TCGv **)tcg_ctx->cpu_HI; - TCGv **cpu_LO = (TCGv **)tcg_ctx->cpu_LO; - const char *opn = "mul/div"; - TCGv t0, t1; - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - - if (acc != 0) { - check_dsp(ctx); - } - - switch (opc) { - case OPC_DIV: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - TCGv t3 = tcg_temp_new(tcg_ctx); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - tcg_gen_ext32s_tl(tcg_ctx, t1, t1); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); - tcg_gen_and_tl(tcg_ctx, t2, t2, t3); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); - tcg_gen_or_tl(tcg_ctx, t2, t2, t3); - tcg_gen_movi_tl(tcg_ctx, t3, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); - tcg_gen_div_tl(tcg_ctx, *cpu_LO[acc], t0, t1); - tcg_gen_rem_tl(tcg_ctx, *cpu_HI[acc], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], *cpu_LO[acc]); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], *cpu_HI[acc]); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "div"; - break; - case OPC_DIVU: - { - TCGv t2 = tcg_const_tl(tcg_ctx, 0); - TCGv t3 = tcg_const_tl(tcg_ctx, 1); - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); - tcg_gen_divu_tl(tcg_ctx, *cpu_LO[acc], t0, t1); - tcg_gen_remu_tl(tcg_ctx, *cpu_HI[acc], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], *cpu_LO[acc]); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], *cpu_HI[acc]); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "divu"; - break; - case OPC_MULT: - { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); - tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); - tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_LO[acc], t2); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_HI[acc], t3); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free_i32(tcg_ctx, t3); - } - opn = "mult"; - break; - case OPC_MULTU: - { - TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); - tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); - tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_LO[acc], t2); - tcg_gen_ext_i32_tl(tcg_ctx, *cpu_HI[acc], t3); - tcg_temp_free_i32(tcg_ctx, t2); - tcg_temp_free_i32(tcg_ctx, t3); - } - opn = "multu"; - break; -#if defined(TARGET_MIPS64) - case OPC_DDIV: - { - TCGv t2 = tcg_temp_new(tcg_ctx); - TCGv t3 = tcg_temp_new(tcg_ctx); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1ULL << 63); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); - tcg_gen_and_tl(tcg_ctx, t2, t2, t3); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); - tcg_gen_or_tl(tcg_ctx, t2, t2, t3); - tcg_gen_movi_tl(tcg_ctx, t3, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); - tcg_gen_div_tl(tcg_ctx, *cpu_LO[acc], t0, t1); - tcg_gen_rem_tl(tcg_ctx, *cpu_HI[acc], t0, t1); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "ddiv"; - break; - case OPC_DDIVU: - { - TCGv t2 = tcg_const_tl(tcg_ctx, 0); - TCGv t3 = tcg_const_tl(tcg_ctx, 1); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); - tcg_gen_divu_i64(tcg_ctx, *cpu_LO[acc], t0, t1); - tcg_gen_remu_i64(tcg_ctx, *cpu_HI[acc], t0, t1); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } - opn = "ddivu"; - break; - case OPC_DMULT: - tcg_gen_muls2_i64(tcg_ctx, *cpu_LO[acc], *cpu_HI[acc], t0, t1); - opn = "dmult"; - break; - case OPC_DMULTU: - tcg_gen_mulu2_i64(tcg_ctx, *cpu_LO[acc], *cpu_HI[acc], t0, t1); - opn = "dmultu"; - break; -#endif - case OPC_MADD: - { - TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); - tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); - tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); - tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); - tcg_gen_add_i64(tcg_ctx, t2, t2, t3); - tcg_temp_free_i64(tcg_ctx, t3); - tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); - tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); - tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); - tcg_temp_free_i64(tcg_ctx, t2); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); - } - opn = "madd"; - break; - case OPC_MADDU: - { - TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); - tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); - tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); - tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); - tcg_gen_add_i64(tcg_ctx, t2, t2, t3); - tcg_temp_free_i64(tcg_ctx, t3); - tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); - tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); - tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); - tcg_temp_free_i64(tcg_ctx, t2); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); - } - opn = "maddu"; - break; - case OPC_MSUB: - { - TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); - tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); - tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); - tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); - tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); - tcg_temp_free_i64(tcg_ctx, t3); - tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); - tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); - tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); - tcg_temp_free_i64(tcg_ctx, t2); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); - } - opn = "msub"; - break; - case OPC_MSUBU: - { - TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); - tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); - tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); - tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); - tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); - tcg_temp_free_i64(tcg_ctx, t3); - tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); - tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); - tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); - tcg_temp_free_i64(tcg_ctx, t2); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); - } - opn = "msubu"; - break; - default: - MIPS_INVAL(opn); - generate_exception(ctx, EXCP_RI); - goto out; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]); - out: - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "mul vr54xx"; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - - switch (opc) { - case OPC_VR54XX_MULS: - gen_helper_muls(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "muls"; - break; - case OPC_VR54XX_MULSU: - gen_helper_mulsu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "mulsu"; - break; - case OPC_VR54XX_MACC: - gen_helper_macc(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "macc"; - break; - case OPC_VR54XX_MACCU: - gen_helper_maccu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "maccu"; - break; - case OPC_VR54XX_MSAC: - gen_helper_msac(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "msac"; - break; - case OPC_VR54XX_MSACU: - gen_helper_msacu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "msacu"; - break; - case OPC_VR54XX_MULHI: - gen_helper_mulhi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "mulhi"; - break; - case OPC_VR54XX_MULHIU: - gen_helper_mulhiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "mulhiu"; - break; - case OPC_VR54XX_MULSHI: - gen_helper_mulshi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "mulshi"; - break; - case OPC_VR54XX_MULSHIU: - gen_helper_mulshiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "mulshiu"; - break; - case OPC_VR54XX_MACCHI: - gen_helper_macchi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "macchi"; - break; - case OPC_VR54XX_MACCHIU: - gen_helper_macchiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "macchiu"; - break; - case OPC_VR54XX_MSACHI: - gen_helper_msachi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "msachi"; - break; - case OPC_VR54XX_MSACHIU: - gen_helper_msachiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); - opn = "msachiu"; - break; - default: - MIPS_INVAL("mul vr54xx"); - generate_exception(ctx, EXCP_RI); - goto out; - } - gen_store_gpr(tcg_ctx, t0, rd); - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); - - out: - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_cl (DisasContext *ctx, uint32_t opc, - int rd, int rs) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "CLx"; - TCGv t0; - - if (rd == 0) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - switch (opc) { - case OPC_CLO: - case R6_OPC_CLO: - gen_helper_clo(tcg_ctx, *cpu_gpr[rd], t0); - opn = "clo"; - break; - case OPC_CLZ: - case R6_OPC_CLZ: - gen_helper_clz(tcg_ctx, *cpu_gpr[rd], t0); - opn = "clz"; - break; -#if defined(TARGET_MIPS64) - case OPC_DCLO: - case R6_OPC_DCLO: - gen_helper_dclo(tcg_ctx, *cpu_gpr[rd], t0); - opn = "dclo"; - break; - case OPC_DCLZ: - case R6_OPC_DCLZ: - gen_helper_dclz(tcg_ctx, *cpu_gpr[rd], t0); - opn = "dclz"; - break; -#endif - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s", opn, regnames[rd], regnames[rs]); - tcg_temp_free(tcg_ctx, t0); -} - -/* Godson integer instructions */ -static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "loongson"; - TCGv t0, t1; - - if (rd == 0) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - switch (opc) { - case OPC_MULT_G_2E: - case OPC_MULT_G_2F: - case OPC_MULTU_G_2E: - case OPC_MULTU_G_2F: -#if defined(TARGET_MIPS64) - case OPC_DMULT_G_2E: - case OPC_DMULT_G_2F: - case OPC_DMULTU_G_2E: - case OPC_DMULTU_G_2F: -#endif - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - break; - default: - t0 = tcg_temp_local_new(tcg_ctx); - t1 = tcg_temp_local_new(tcg_ctx); - break; - } - - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - - switch (opc) { - case OPC_MULT_G_2E: - case OPC_MULT_G_2F: - tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - opn = "mult.g"; - break; - case OPC_MULTU_G_2E: - case OPC_MULTU_G_2F: - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - opn = "multu.g"; - break; - case OPC_DIV_G_2E: - case OPC_DIV_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - int l3 = gen_new_label(tcg_ctx); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - tcg_gen_ext32s_tl(tcg_ctx, t1, t1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l3); - gen_set_label(tcg_ctx, l1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); - tcg_gen_br(tcg_ctx, l3); - gen_set_label(tcg_ctx, l2); - tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - gen_set_label(tcg_ctx, l3); - } - opn = "div.g"; - break; - case OPC_DIVU_G_2E: - case OPC_DIVU_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l2); - gen_set_label(tcg_ctx, l1); - tcg_gen_divu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - gen_set_label(tcg_ctx, l2); - } - opn = "divu.g"; - break; - case OPC_MOD_G_2E: - case OPC_MOD_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - int l3 = gen_new_label(tcg_ctx); - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); - gen_set_label(tcg_ctx, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l3); - gen_set_label(tcg_ctx, l2); - tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - gen_set_label(tcg_ctx, l3); - } - opn = "mod.g"; - break; - case OPC_MODU_G_2E: - case OPC_MODU_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_ext32u_tl(tcg_ctx, t1, t1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l2); - gen_set_label(tcg_ctx, l1); - tcg_gen_remu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); - gen_set_label(tcg_ctx, l2); - } - opn = "modu.g"; - break; -#if defined(TARGET_MIPS64) - case OPC_DMULT_G_2E: - case OPC_DMULT_G_2F: - tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - opn = "dmult.g"; - break; - case OPC_DMULTU_G_2E: - case OPC_DMULTU_G_2F: - tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - opn = "dmultu.g"; - break; - case OPC_DDIV_G_2E: - case OPC_DDIV_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - int l3 = gen_new_label(tcg_ctx); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l3); - gen_set_label(tcg_ctx, l1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1ULL << 63, l2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); - tcg_gen_br(tcg_ctx, l3); - gen_set_label(tcg_ctx, l2); - tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - gen_set_label(tcg_ctx, l3); - } - opn = "ddiv.g"; - break; - case OPC_DDIVU_G_2E: - case OPC_DDIVU_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l2); - gen_set_label(tcg_ctx, l1); - tcg_gen_divu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - gen_set_label(tcg_ctx, l2); - } - opn = "ddivu.g"; - break; - case OPC_DMOD_G_2E: - case OPC_DMOD_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - int l3 = gen_new_label(tcg_ctx); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1ULL << 63, l2); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); - gen_set_label(tcg_ctx, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l3); - gen_set_label(tcg_ctx, l2); - tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - gen_set_label(tcg_ctx, l3); - } - opn = "dmod.g"; - break; - case OPC_DMODU_G_2E: - case OPC_DMODU_G_2F: - { - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - tcg_gen_br(tcg_ctx, l2); - gen_set_label(tcg_ctx, l1); - tcg_gen_remu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - gen_set_label(tcg_ctx, l2); - } - opn = "dmodu.g"; - break; -#endif - } - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s", opn, regnames[rd], regnames[rs]); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -/* Loongson multimedia instructions */ -static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "loongson_cp2"; - uint32_t opc, shift_max; - TCGv_i64 t0, t1; - - opc = MASK_LMI(ctx->opcode); - switch (opc) { - case OPC_ADD_CP2: - case OPC_SUB_CP2: - case OPC_DADD_CP2: - case OPC_DSUB_CP2: - t0 = tcg_temp_local_new_i64(tcg_ctx); - t1 = tcg_temp_local_new_i64(tcg_ctx); - break; - default: - t0 = tcg_temp_new_i64(tcg_ctx); - t1 = tcg_temp_new_i64(tcg_ctx); - break; - } - - gen_load_fpr64(ctx, t0, rs); - gen_load_fpr64(ctx, t1, rt); - -#define LMI_HELPER(UP, LO) \ - case OPC_##UP: gen_helper_##LO(tcg_ctx, t0, t0, t1); opn = #LO; break -#define LMI_HELPER_1(UP, LO) \ - case OPC_##UP: gen_helper_##LO(tcg_ctx, t0, t0); opn = #LO; break -#define LMI_DIRECT(UP, LO, OP) \ - case OPC_##UP: tcg_gen_##OP##_i64(tcg_ctx, t0, t0, t1); opn = #LO; break - - switch (opc) { - LMI_HELPER(PADDSH, paddsh); - LMI_HELPER(PADDUSH, paddush); - LMI_HELPER(PADDH, paddh); - LMI_HELPER(PADDW, paddw); - LMI_HELPER(PADDSB, paddsb); - LMI_HELPER(PADDUSB, paddusb); - LMI_HELPER(PADDB, paddb); - - LMI_HELPER(PSUBSH, psubsh); - LMI_HELPER(PSUBUSH, psubush); - LMI_HELPER(PSUBH, psubh); - LMI_HELPER(PSUBW, psubw); - LMI_HELPER(PSUBSB, psubsb); - LMI_HELPER(PSUBUSB, psubusb); - LMI_HELPER(PSUBB, psubb); - - LMI_HELPER(PSHUFH, pshufh); - LMI_HELPER(PACKSSWH, packsswh); - LMI_HELPER(PACKSSHB, packsshb); - LMI_HELPER(PACKUSHB, packushb); - - LMI_HELPER(PUNPCKLHW, punpcklhw); - LMI_HELPER(PUNPCKHHW, punpckhhw); - LMI_HELPER(PUNPCKLBH, punpcklbh); - LMI_HELPER(PUNPCKHBH, punpckhbh); - LMI_HELPER(PUNPCKLWD, punpcklwd); - LMI_HELPER(PUNPCKHWD, punpckhwd); - - LMI_HELPER(PAVGH, pavgh); - LMI_HELPER(PAVGB, pavgb); - LMI_HELPER(PMAXSH, pmaxsh); - LMI_HELPER(PMINSH, pminsh); - LMI_HELPER(PMAXUB, pmaxub); - LMI_HELPER(PMINUB, pminub); - - LMI_HELPER(PCMPEQW, pcmpeqw); - LMI_HELPER(PCMPGTW, pcmpgtw); - LMI_HELPER(PCMPEQH, pcmpeqh); - LMI_HELPER(PCMPGTH, pcmpgth); - LMI_HELPER(PCMPEQB, pcmpeqb); - LMI_HELPER(PCMPGTB, pcmpgtb); - - LMI_HELPER(PSLLW, psllw); - LMI_HELPER(PSLLH, psllh); - LMI_HELPER(PSRLW, psrlw); - LMI_HELPER(PSRLH, psrlh); - LMI_HELPER(PSRAW, psraw); - LMI_HELPER(PSRAH, psrah); - - LMI_HELPER(PMULLH, pmullh); - LMI_HELPER(PMULHH, pmulhh); - LMI_HELPER(PMULHUH, pmulhuh); - LMI_HELPER(PMADDHW, pmaddhw); - - LMI_HELPER(PASUBUB, pasubub); - LMI_HELPER_1(BIADD, biadd); - LMI_HELPER_1(PMOVMSKB, pmovmskb); - - LMI_DIRECT(PADDD, paddd, add); - LMI_DIRECT(PSUBD, psubd, sub); - LMI_DIRECT(XOR_CP2, xor, xor); - LMI_DIRECT(NOR_CP2, nor, nor); - LMI_DIRECT(AND_CP2, and, and); - LMI_DIRECT(PANDN, pandn, andc); - LMI_DIRECT(OR, or, or); - - case OPC_PINSRH_0: - tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 0, 16); - opn = "pinsrh_0"; - break; - case OPC_PINSRH_1: - tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 16, 16); - opn = "pinsrh_1"; - break; - case OPC_PINSRH_2: - tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 16); - opn = "pinsrh_2"; - break; - case OPC_PINSRH_3: - tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 48, 16); - opn = "pinsrh_3"; - break; - - case OPC_PEXTRH: - tcg_gen_andi_i64(tcg_ctx, t1, t1, 3); - tcg_gen_shli_i64(tcg_ctx, t1, t1, 4); - tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); - tcg_gen_ext16u_i64(tcg_ctx, t0, t0); - opn = "pextrh"; - break; - - case OPC_ADDU_CP2: - tcg_gen_add_i64(tcg_ctx, t0, t0, t1); - tcg_gen_ext32s_i64(tcg_ctx, t0, t0); - opn = "addu"; - break; - case OPC_SUBU_CP2: - tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); - tcg_gen_ext32s_i64(tcg_ctx, t0, t0); - opn = "addu"; - break; - - case OPC_SLL_CP2: - opn = "sll"; - shift_max = 32; - goto do_shift; - case OPC_SRL_CP2: - opn = "srl"; - shift_max = 32; - goto do_shift; - case OPC_SRA_CP2: - opn = "sra"; - shift_max = 32; - goto do_shift; - case OPC_DSLL_CP2: - opn = "dsll"; - shift_max = 64; - goto do_shift; - case OPC_DSRL_CP2: - opn = "dsrl"; - shift_max = 64; - goto do_shift; - case OPC_DSRA_CP2: - opn = "dsra"; - shift_max = 64; - goto do_shift; - do_shift: - /* Make sure shift count isn't TCG undefined behaviour. */ - tcg_gen_andi_i64(tcg_ctx, t1, t1, shift_max - 1); - - switch (opc) { - case OPC_SLL_CP2: - case OPC_DSLL_CP2: - tcg_gen_shl_i64(tcg_ctx, t0, t0, t1); - break; - case OPC_SRA_CP2: - case OPC_DSRA_CP2: - /* Since SRA is UndefinedResult without sign-extended inputs, - we can treat SRA and DSRA the same. */ - tcg_gen_sar_i64(tcg_ctx, t0, t0, t1); - break; - case OPC_SRL_CP2: - /* We want to shift in zeros for SRL; zero-extend first. */ - tcg_gen_ext32u_i64(tcg_ctx, t0, t0); - /* FALLTHRU */ - case OPC_DSRL_CP2: - tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); - break; - } - - if (shift_max == 32) { - tcg_gen_ext32s_i64(tcg_ctx, t0, t0); - } - - /* Shifts larger than MAX produce zero. */ - tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LTU, t1, t1, shift_max); - tcg_gen_neg_i64(tcg_ctx, t1, t1); - tcg_gen_and_i64(tcg_ctx, t0, t0, t1); - break; - - case OPC_ADD_CP2: - case OPC_DADD_CP2: - { - TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); - int lab = gen_new_label(tcg_ctx); - - tcg_gen_mov_i64(tcg_ctx, t2, t0); - tcg_gen_add_i64(tcg_ctx, t0, t1, t2); - if (opc == OPC_ADD_CP2) { - tcg_gen_ext32s_i64(tcg_ctx, t0, t0); - } - tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); - tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); - tcg_gen_andc_i64(tcg_ctx, t1, t2, t1); - tcg_temp_free_i64(tcg_ctx, t2); - tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, lab); - - opn = (opc == OPC_ADD_CP2 ? "add" : "dadd"); - break; - } - - case OPC_SUB_CP2: - case OPC_DSUB_CP2: - { - TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); - int lab = gen_new_label(tcg_ctx); - - tcg_gen_mov_i64(tcg_ctx, t2, t0); - tcg_gen_sub_i64(tcg_ctx, t0, t1, t2); - if (opc == OPC_SUB_CP2) { - tcg_gen_ext32s_i64(tcg_ctx, t0, t0); - } - tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); - tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); - tcg_gen_and_i64(tcg_ctx, t1, t1, t2); - tcg_temp_free_i64(tcg_ctx, t2); - tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); - generate_exception(ctx, EXCP_OVERFLOW); - gen_set_label(tcg_ctx, lab); - - opn = (opc == OPC_SUB_CP2 ? "sub" : "dsub"); - break; - } - - case OPC_PMULUW: - tcg_gen_ext32u_i64(tcg_ctx, t0, t0); - tcg_gen_ext32u_i64(tcg_ctx, t1, t1); - tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); - opn = "pmuluw"; - break; - - case OPC_SEQU_CP2: - case OPC_SEQ_CP2: - case OPC_SLTU_CP2: - case OPC_SLT_CP2: - case OPC_SLEU_CP2: - case OPC_SLE_CP2: - /* ??? Document is unclear: Set FCC[CC]. Does that mean the - FD field is the CC field? */ - default: - MIPS_INVAL(opn); - generate_exception(ctx, EXCP_RI); - return; - } - -#undef LMI_HELPER -#undef LMI_DIRECT - - gen_store_fpr64(ctx, t0, rd); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s", opn, - fregnames[rd], fregnames[rs], fregnames[rt]); - tcg_temp_free_i64(tcg_ctx, t0); - tcg_temp_free_i64(tcg_ctx, t1); -} - -/* Traps */ -static void gen_trap (DisasContext *ctx, uint32_t opc, - int rs, int rt, int16_t imm) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - int cond; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - cond = 0; - /* Load needed operands */ - switch (opc) { - case OPC_TEQ: - case OPC_TGE: - case OPC_TGEU: - case OPC_TLT: - case OPC_TLTU: - case OPC_TNE: - /* Compare two registers */ - if (rs != rt) { - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - cond = 1; - } - break; - case OPC_TEQI: - case OPC_TGEI: - case OPC_TGEIU: - case OPC_TLTI: - case OPC_TLTIU: - case OPC_TNEI: - /* Compare register to immediate */ - if (rs != 0 || imm != 0) { - gen_load_gpr(ctx, t0, rs); - tcg_gen_movi_tl(tcg_ctx, t1, (int32_t)imm); - cond = 1; - } - break; - } - if (cond == 0) { - switch (opc) { - case OPC_TEQ: /* rs == rs */ - case OPC_TEQI: /* r0 == 0 */ - case OPC_TGE: /* rs >= rs */ - case OPC_TGEI: /* r0 >= 0 */ - case OPC_TGEU: /* rs >= rs unsigned */ - case OPC_TGEIU: /* r0 >= 0 unsigned */ - /* Always trap */ - generate_exception(ctx, EXCP_TRAP); - break; - case OPC_TLT: /* rs < rs */ - case OPC_TLTI: /* r0 < 0 */ - case OPC_TLTU: /* rs < rs unsigned */ - case OPC_TLTIU: /* r0 < 0 unsigned */ - case OPC_TNE: /* rs != rs */ - case OPC_TNEI: /* r0 != 0 */ - /* Never trap: treat as NOP. */ - break; - } - } else { - int l1 = gen_new_label(tcg_ctx); - - switch (opc) { - case OPC_TEQ: - case OPC_TEQI: - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, t1, l1); - break; - case OPC_TGE: - case OPC_TGEI: - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LT, t0, t1, l1); - break; - case OPC_TGEU: - case OPC_TGEIU: - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LTU, t0, t1, l1); - break; - case OPC_TLT: - case OPC_TLTI: - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, t0, t1, l1); - break; - case OPC_TLTU: - case OPC_TLTIU: - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GEU, t0, t1, l1); - break; - case OPC_TNE: - case OPC_TNEI: - tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, t0, t1, l1); - break; - } - generate_exception(ctx, EXCP_TRAP); - gen_set_label(tcg_ctx, l1); - } - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TranslationBlock *tb; - tb = ctx->tb; - if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) && - likely(!ctx->singlestep_enabled)) { - tcg_gen_goto_tb(tcg_ctx, n); - gen_save_pc(ctx, dest); - tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n); - } else { - gen_save_pc(ctx, dest); - if (ctx->singlestep_enabled) { - save_cpu_state(ctx, 0); - gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); - } - tcg_gen_exit_tb(tcg_ctx, 0); - } -} - -/* Branches (before delay slot) */ -static void gen_compute_branch (DisasContext *ctx, uint32_t opc, - int insn_bytes, - int rs, int rt, int32_t offset, - int delayslot_size) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - target_ulong btgt = -1; - int blink = 0; - int bcond_compute = 0; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - if (ctx->hflags & MIPS_HFLAG_BMASK) { -#ifdef MIPS_DEBUG_DISAS - LOG_DISAS("Branch in delay / forbidden slot at PC 0x" - TARGET_FMT_lx "\n", ctx->pc); -#endif - generate_exception(ctx, EXCP_RI); - goto out; - } - - /* Load needed operands */ - switch (opc) { - case OPC_BEQ: - case OPC_BEQL: - case OPC_BNE: - case OPC_BNEL: - /* Compare two registers */ - if (rs != rt) { - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - bcond_compute = 1; - } - btgt = ctx->pc + insn_bytes + offset; - break; - case OPC_BGEZ: - case OPC_BGEZAL: - case OPC_BGEZALL: - case OPC_BGEZL: - case OPC_BGTZ: - case OPC_BGTZL: - case OPC_BLEZ: - case OPC_BLEZL: - case OPC_BLTZ: - case OPC_BLTZAL: - case OPC_BLTZALL: - case OPC_BLTZL: - /* Compare to zero */ - if (rs != 0) { - gen_load_gpr(ctx, t0, rs); - bcond_compute = 1; - } - btgt = ctx->pc + insn_bytes + offset; - break; - case OPC_BPOSGE32: -#if defined(TARGET_MIPS64) - case OPC_BPOSGE64: - tcg_gen_andi_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->cpu_dspctrl, 0x7F); -#else - tcg_gen_andi_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->cpu_dspctrl, 0x3F); -#endif - bcond_compute = 1; - btgt = ctx->pc + insn_bytes + offset; - break; - case OPC_J: - case OPC_JAL: - case OPC_JALX: - /* Jump to immediate */ - btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset; - break; - case OPC_JR: - case OPC_JALR: - /* Jump to register */ - if (offset != 0 && offset != 16) { - /* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the - others are reserved. */ - MIPS_INVAL("jump hint"); - generate_exception(ctx, EXCP_RI); - goto out; - } - gen_load_gpr(ctx, *(TCGv *)tcg_ctx->btarget, rs); - break; - default: - MIPS_INVAL("branch/jump"); - generate_exception(ctx, EXCP_RI); - goto out; - } - if (bcond_compute == 0) { - /* No condition to be computed */ - switch (opc) { - case OPC_BEQ: /* rx == rx */ - case OPC_BEQL: /* rx == rx likely */ - case OPC_BGEZ: /* 0 >= 0 */ - case OPC_BGEZL: /* 0 >= 0 likely */ - case OPC_BLEZ: /* 0 <= 0 */ - case OPC_BLEZL: /* 0 <= 0 likely */ - /* Always take */ - ctx->hflags |= MIPS_HFLAG_B; - MIPS_DEBUG("balways"); - break; - case OPC_BGEZAL: /* 0 >= 0 */ - case OPC_BGEZALL: /* 0 >= 0 likely */ - /* Always take and link */ - blink = 31; - ctx->hflags |= MIPS_HFLAG_B; - MIPS_DEBUG("balways and link"); - break; - case OPC_BNE: /* rx != rx */ - case OPC_BGTZ: /* 0 > 0 */ - case OPC_BLTZ: /* 0 < 0 */ - /* Treat as NOP. */ - MIPS_DEBUG("bnever (NOP)"); - goto out; - case OPC_BLTZAL: /* 0 < 0 */ - /* Handle as an unconditional branch to get correct delay - slot checking. */ - blink = 31; - btgt = ctx->pc + insn_bytes + delayslot_size; - ctx->hflags |= MIPS_HFLAG_B; - MIPS_DEBUG("bnever and link"); - break; - case OPC_BLTZALL: /* 0 < 0 likely */ - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 8); - /* Skip the instruction in the delay slot */ - MIPS_DEBUG("bnever, link and skip"); - ctx->pc += 4; - goto out; - case OPC_BNEL: /* rx != rx likely */ - case OPC_BGTZL: /* 0 > 0 likely */ - case OPC_BLTZL: /* 0 < 0 likely */ - /* Skip the instruction in the delay slot */ - MIPS_DEBUG("bnever and skip"); - ctx->pc += 4; - goto out; - case OPC_J: - ctx->hflags |= MIPS_HFLAG_B; - MIPS_DEBUG("j " TARGET_FMT_lx, btgt); - break; - case OPC_JALX: - ctx->hflags |= MIPS_HFLAG_BX; - /* Fallthrough */ - case OPC_JAL: - blink = 31; - ctx->hflags |= MIPS_HFLAG_B; - MIPS_DEBUG("jal " TARGET_FMT_lx, btgt); - break; - case OPC_JR: - ctx->hflags |= MIPS_HFLAG_BR; - MIPS_DEBUG("jr %s", regnames[rs]); - break; - case OPC_JALR: - blink = rt; - ctx->hflags |= MIPS_HFLAG_BR; - MIPS_DEBUG("jalr %s, %s", regnames[rt], regnames[rs]); - break; - default: - MIPS_INVAL("branch/jump"); - generate_exception(ctx, EXCP_RI); - goto out; - } - } else { - switch (opc) { - case OPC_BEQ: - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->bcond, t0, t1); - MIPS_DEBUG("beq %s, %s, " TARGET_FMT_lx, - regnames[rs], regnames[rt], btgt); - goto not_likely; - case OPC_BEQL: - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->bcond, t0, t1); - MIPS_DEBUG("beql %s, %s, " TARGET_FMT_lx, - regnames[rs], regnames[rt], btgt); - goto likely; - case OPC_BNE: - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, t0, t1); - MIPS_DEBUG("bne %s, %s, " TARGET_FMT_lx, - regnames[rs], regnames[rt], btgt); - goto not_likely; - case OPC_BNEL: - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, t0, t1); - MIPS_DEBUG("bnel %s, %s, " TARGET_FMT_lx, - regnames[rs], regnames[rt], btgt); - goto likely; - case OPC_BGEZ: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("bgez %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto not_likely; - case OPC_BGEZL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("bgezl %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto likely; - case OPC_BGEZAL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("bgezal %s, " TARGET_FMT_lx, regnames[rs], btgt); - blink = 31; - goto not_likely; - case OPC_BGEZALL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); - blink = 31; - MIPS_DEBUG("bgezall %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto likely; - case OPC_BGTZ: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("bgtz %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto not_likely; - case OPC_BGTZL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("bgtzl %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto likely; - case OPC_BLEZ: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("blez %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto not_likely; - case OPC_BLEZL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("blezl %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto likely; - case OPC_BLTZ: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("bltz %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto not_likely; - case OPC_BLTZL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); - MIPS_DEBUG("bltzl %s, " TARGET_FMT_lx, regnames[rs], btgt); - goto likely; - case OPC_BPOSGE32: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 32); - MIPS_DEBUG("bposge32 " TARGET_FMT_lx, btgt); - goto not_likely; -#if defined(TARGET_MIPS64) - case OPC_BPOSGE64: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 64); - MIPS_DEBUG("bposge64 " TARGET_FMT_lx, btgt); - goto not_likely; -#endif - case OPC_BLTZAL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); - blink = 31; - MIPS_DEBUG("bltzal %s, " TARGET_FMT_lx, regnames[rs], btgt); - not_likely: - ctx->hflags |= MIPS_HFLAG_BC; - break; - case OPC_BLTZALL: - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); - blink = 31; - MIPS_DEBUG("bltzall %s, " TARGET_FMT_lx, regnames[rs], btgt); - likely: - ctx->hflags |= MIPS_HFLAG_BL; - break; - default: - MIPS_INVAL("conditional branch/jump"); - generate_exception(ctx, EXCP_RI); - goto out; - } - } - MIPS_DEBUG("enter ds: link %d cond %02x target " TARGET_FMT_lx, - blink, ctx->hflags, btgt); - - ctx->btarget = btgt; - - switch (delayslot_size) { - case 2: - ctx->hflags |= MIPS_HFLAG_BDS16; - break; - case 4: - ctx->hflags |= MIPS_HFLAG_BDS32; - break; - } - - if (blink > 0) { - int post_delay = insn_bytes + delayslot_size; - int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16); - - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[blink], ctx->pc + post_delay + lowbit); - } - - out: - if (insn_bytes == 2) - ctx->hflags |= MIPS_HFLAG_B16; - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -/* special3 bitfield operations */ -static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt, - int rs, int lsb, int msb) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t1, rs); - switch (opc) { - case OPC_EXT: - if (lsb + msb > 31) - goto fail; - tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb); - if (msb != 31) { - tcg_gen_andi_tl(tcg_ctx, t0, t0, (1U << (msb + 1)) - 1); - } else { - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - } - break; -#if defined(TARGET_MIPS64) - case OPC_DEXTM: - tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb); - if (msb != 31) { - tcg_gen_andi_tl(tcg_ctx, t0, t0, (1ULL << (msb + 1 + 32)) - 1); - } - break; - case OPC_DEXTU: - tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb + 32); - tcg_gen_andi_tl(tcg_ctx, t0, t0, (1ULL << (msb + 1)) - 1); - break; - case OPC_DEXT: - tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb); - tcg_gen_andi_tl(tcg_ctx, t0, t0, (1ULL << (msb + 1)) - 1); - break; -#endif - case OPC_INS: - if (lsb > msb) - goto fail; - gen_load_gpr(ctx, t0, rt); - tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); - tcg_gen_ext32s_tl(tcg_ctx, t0, t0); - break; -#if defined(TARGET_MIPS64) - case OPC_DINSM: - gen_load_gpr(ctx, t0, rt); - tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb + 32 - lsb + 1); - break; - case OPC_DINSU: - gen_load_gpr(ctx, t0, rt); - tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb + 32, msb - lsb + 1); - break; - case OPC_DINS: - gen_load_gpr(ctx, t0, rt); - tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); - break; -#endif - default: -fail: - MIPS_INVAL("bitops"); - generate_exception(ctx, EXCP_RI); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - return; - } - gen_store_gpr(tcg_ctx, t0, rt); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_bshfl (DisasContext *ctx, uint32_t op2, int rt, int rd) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - TCGv t0; - - if (rd == 0) { - /* If no destination, treat it as a NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rt); - switch (op2) { - case OPC_WSBH: - { - TCGv t1 = tcg_temp_new(tcg_ctx); - - tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); - tcg_gen_andi_tl(tcg_ctx, t1, t1, 0x00FF00FF); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x00FF00FF); - tcg_gen_or_tl(tcg_ctx, t0, t0, t1); - tcg_temp_free(tcg_ctx, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); - } - break; - case OPC_SEB: - tcg_gen_ext8s_tl(tcg_ctx, *cpu_gpr[rd], t0); - break; - case OPC_SEH: - tcg_gen_ext16s_tl(tcg_ctx, *cpu_gpr[rd], t0); - break; -#if defined(TARGET_MIPS64) - case OPC_DSBH: - { - TCGv t1 = tcg_temp_new(tcg_ctx); - - tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); - tcg_gen_andi_tl(tcg_ctx, t1, t1, 0x00FF00FF00FF00FFULL); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x00FF00FF00FF00FFULL); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t1); - } - break; - case OPC_DSHD: - { - TCGv t1 = tcg_temp_new(tcg_ctx); - - tcg_gen_shri_tl(tcg_ctx, t1, t0, 16); - tcg_gen_andi_tl(tcg_ctx, t1, t1, 0x0000FFFF0000FFFFULL); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 16); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x0000FFFF0000FFFFULL); - tcg_gen_or_tl(tcg_ctx, t0, t0, t1); - tcg_gen_shri_tl(tcg_ctx, t1, t0, 32); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 32); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t1); - } - break; -#endif - default: - MIPS_INVAL("bsfhl"); - generate_exception(ctx, EXCP_RI); - tcg_temp_free(tcg_ctx, t0); - return; - } - tcg_temp_free(tcg_ctx, t0); -} - -#ifndef CONFIG_USER_ONLY -/* CP0 (MMU and control) */ -static inline void gen_mfc0_load32 (DisasContext *ctx, TCGv arg, target_ulong off) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); - tcg_gen_ext_i32_tl(tcg_ctx, arg, t0); - tcg_temp_free_i32(tcg_ctx, t0); -} - -static inline void gen_mfc0_load64 (DisasContext *ctx, TCGv arg, target_ulong off) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, off); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); -} - -static inline void gen_mtc0_store32 (DisasContext *ctx, TCGv arg, target_ulong off) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg); - tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); - tcg_temp_free_i32(tcg_ctx, t0); -} - -static inline void gen_mtc0_store64 (DisasContext *ctx, TCGv arg, target_ulong off) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, off); -} - -static inline void gen_mfc0_unimplemented(DisasContext *ctx, TCGv arg) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - if (ctx->insn_flags & ISA_MIPS32R6) { - tcg_gen_movi_tl(tcg_ctx, arg, 0); - } else { - tcg_gen_movi_tl(tcg_ctx, arg, ~0); - } -} - -#define CP0_CHECK(c) \ - do { \ - if (!(c)) { \ - goto cp0_unimplemented; \ - } \ - } while (0) - -static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *rn = "invalid"; - - if (sel != 0) - check_insn(ctx, ISA_MIPS32); - - switch (reg) { - case 0: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Index)); - rn = "Index"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "MVPControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "MVPConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "MVPConf1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 1: - switch (sel) { - case 0: - CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); - gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "Random"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); - rn = "VPEControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); - rn = "VPEConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); - rn = "VPEConf1"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load64(ctx, arg, offsetof(CPUMIPSState, CP0_YQMask)); - rn = "YQMask"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load64(ctx, arg, offsetof(CPUMIPSState, CP0_VPESchedule)); - rn = "VPESchedule"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load64(ctx, arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); - rn = "VPEScheFBack"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); - rn = "VPEOpt"; - break; - default: - goto cp0_unimplemented; - } - break; - case 2: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); -#if defined(TARGET_MIPS64) - if (ctx->rxi) { - TCGv tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, tmp, arg, (3ull << 62)); - tcg_gen_shri_tl(tcg_ctx, tmp, tmp, 32); - tcg_gen_or_tl(tcg_ctx, arg, arg, tmp); - tcg_temp_free(tcg_ctx, tmp); - } -#endif - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "EntryLo0"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCStatus"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCBind"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCRestart"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCHalt"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCContext"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCSchedule"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCScheFBack"; - break; - default: - goto cp0_unimplemented; - } - break; - case 3: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); -#if defined(TARGET_MIPS64) - if (ctx->rxi) { - TCGv tmp = tcg_temp_new(tcg_ctx); - tcg_gen_andi_tl(tcg_ctx, tmp, arg, (3ull << 62)); - tcg_gen_shri_tl(tcg_ctx, tmp, tmp, 32); - tcg_gen_or_tl(tcg_ctx, arg, arg, tmp); - tcg_temp_free(tcg_ctx, tmp); - } -#endif - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "EntryLo1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 4: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "Context"; - break; - case 1: -// gen_helper_mfc0_contextconfig(arg); /* SmartMIPS ASE */ - rn = "ContextConfig"; - goto cp0_unimplemented; -// break; - case 2: - CP0_CHECK(ctx->ulri); - tcg_gen_ld32s_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); - rn = "UserLocal"; - break; - default: - goto cp0_unimplemented; - } - break; - case 5: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); - rn = "PageMask"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); - rn = "PageGrain"; - break; - default: - goto cp0_unimplemented; - } - break; - case 6: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); - rn = "Wired"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); - rn = "SRSConf0"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); - rn = "SRSConf1"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); - rn = "SRSConf2"; - break; - case 4: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); - rn = "SRSConf3"; - break; - case 5: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); - rn = "SRSConf4"; - break; - default: - goto cp0_unimplemented; - } - break; - case 7: - switch (sel) { - case 0: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); - rn = "HWREna"; - break; - default: - goto cp0_unimplemented; - } - break; - case 8: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "BadVAddr"; - break; - case 1: - CP0_CHECK(ctx->bi); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); - rn = "BadInstr"; - break; - case 2: - CP0_CHECK(ctx->bp); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); - rn = "BadInstrP"; - break; - default: - goto cp0_unimplemented; - } - break; - case 9: - switch (sel) { - case 0: - /* Mark as an IO operation because we read the time. */ - //if (use_icount) - // gen_io_start(); - gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); - //if (use_icount) { - // gen_io_end(); - //} - /* Break the TB to be able to take timer interrupts immediately - after reading count. */ - ctx->bstate = BS_STOP; - rn = "Count"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - break; - case 10: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "EntryHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 11: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); - rn = "Compare"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - break; - case 12: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Status)); - rn = "Status"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); - rn = "IntCtl"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); - rn = "SRSCtl"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); - rn = "SRSMap"; - break; - default: - goto cp0_unimplemented; - } - break; - case 13: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); - rn = "Cause"; - break; - default: - goto cp0_unimplemented; - } - break; - case 14: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "EPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 15: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); - rn = "PRid"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_EBase)); - rn = "EBase"; - break; - default: - goto cp0_unimplemented; - } - break; - case 16: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); - rn = "Config"; - break; - case 1: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); - rn = "Config1"; - break; - case 2: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); - rn = "Config2"; - break; - case 3: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); - rn = "Config3"; - break; - case 4: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); - rn = "Config4"; - break; - case 5: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); - rn = "Config5"; - break; - /* 6,7 are implementation dependent */ - case 6: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); - rn = "Config6"; - break; - case 7: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); - rn = "Config7"; - break; - default: - goto cp0_unimplemented; - } - break; - case 17: - switch (sel) { - case 0: - gen_helper_mfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "LLAddr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 18: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_1e0i(tcg_ctx, mfc0_watchlo, arg, sel); - rn = "WatchLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 19: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_1e0i(tcg_ctx, mfc0_watchhi, arg, sel); - rn = "WatchHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 20: - switch (sel) { - case 0: -#if defined(TARGET_MIPS64) - check_insn(ctx, ISA_MIPS3); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "XContext"; - break; -#endif - default: - goto cp0_unimplemented; - } - break; - case 21: - /* Officially reserved, but sel 0 is used for R1x000 framemask */ - CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); - rn = "Framemask"; - break; - default: - goto cp0_unimplemented; - } - break; - case 22: - tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ - rn = "'Diagnostic"; /* implementation dependent */ - break; - case 23: - switch (sel) { - case 0: - gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ - rn = "Debug"; - break; - case 1: -// gen_helper_mfc0_tracecontrol(arg); /* PDtrace support */ - rn = "TraceControl"; -// break; - case 2: -// gen_helper_mfc0_tracecontrol2(arg); /* PDtrace support */ - rn = "TraceControl2"; -// break; - case 3: -// gen_helper_mfc0_usertracedata(arg); /* PDtrace support */ - rn = "UserTraceData"; -// break; - case 4: -// gen_helper_mfc0_tracebpc(arg); /* PDtrace support */ - rn = "TraceBPC"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 24: - switch (sel) { - case 0: - /* EJTAG support */ - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "DEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 25: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); - rn = "Performance0"; - break; - case 1: -// gen_helper_mfc0_performance1(arg); - rn = "Performance1"; -// break; - case 2: -// gen_helper_mfc0_performance2(arg); - rn = "Performance2"; -// break; - case 3: -// gen_helper_mfc0_performance3(arg); - rn = "Performance3"; -// break; - case 4: -// gen_helper_mfc0_performance4(arg); - rn = "Performance4"; -// break; - case 5: -// gen_helper_mfc0_performance5(arg); - rn = "Performance5"; -// break; - case 6: -// gen_helper_mfc0_performance6(arg); - rn = "Performance6"; -// break; - case 7: -// gen_helper_mfc0_performance7(arg); - rn = "Performance7"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 26: - tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ - rn = "ECC"; - break; - case 27: - switch (sel) { - case 0: case 1: case 2: case 3: - tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ - rn = "CacheErr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 28: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); - rn = "TagLo"; - break; - case 1: - case 3: - case 5: - case 7: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); - rn = "DataLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 29: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); - rn = "TagHi"; - break; - case 1: - case 3: - case 5: - case 7: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); - rn = "DataHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 30: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "ErrorEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 31: - switch (sel) { - case 0: - /* EJTAG support */ - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); - rn = "DESAVE"; - break; - case 2: case 3: case 4: case 5: case 6: case 7: - CP0_CHECK(ctx->kscrexist & (1 << sel)); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, CP0_KScratch[sel-2])); - tcg_gen_ext32s_tl(tcg_ctx, arg, arg); - rn = "KScratch"; - break; - default: - goto cp0_unimplemented; - } - break; - default: - goto cp0_unimplemented; - } - (void)rn; /* avoid a compiler warning */ - LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel); - return; - -cp0_unimplemented: - LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel); - gen_mfc0_unimplemented(ctx, arg); -} - -static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *rn = "invalid"; - - if (sel != 0) - check_insn(ctx, ISA_MIPS32); - - //if (use_icount) - // gen_io_start(); - - switch (reg) { - case 0: - switch (sel) { - case 0: - gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Index"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "MVPControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - /* ignored */ - rn = "MVPConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - /* ignored */ - rn = "MVPConf1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 1: - switch (sel) { - case 0: - /* ignored */ - rn = "Random"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEConf1"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "YQMask"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_VPESchedule)); - rn = "VPESchedule"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); - rn = "VPEScheFBack"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEOpt"; - break; - default: - goto cp0_unimplemented; - } - break; - case 2: - switch (sel) { - case 0: - gen_helper_mtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EntryLo0"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCStatus"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCBind"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCRestart"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCHalt"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCContext"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCSchedule"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCScheFBack"; - break; - default: - goto cp0_unimplemented; - } - break; - case 3: - switch (sel) { - case 0: - gen_helper_mtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EntryLo1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 4: - switch (sel) { - case 0: - gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Context"; - break; - case 1: -// gen_helper_mtc0_contextconfig(tcg_ctx->cpu_env, arg); /* SmartMIPS ASE */ - rn = "ContextConfig"; - goto cp0_unimplemented; -// break; - case 2: - CP0_CHECK(ctx->ulri); - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); - rn = "UserLocal"; - break; - default: - goto cp0_unimplemented; - } - break; - case 5: - switch (sel) { - case 0: - gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "PageMask"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "PageGrain"; - break; - default: - goto cp0_unimplemented; - } - break; - case 6: - switch (sel) { - case 0: - gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Wired"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf0"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf1"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf2"; - break; - case 4: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf3"; - break; - case 5: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf4"; - break; - default: - goto cp0_unimplemented; - } - break; - case 7: - switch (sel) { - case 0: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); - ctx->bstate = BS_STOP; - rn = "HWREna"; - break; - default: - goto cp0_unimplemented; - } - break; - case 8: - switch (sel) { - case 0: - /* ignored */ - rn = "BadVAddr"; - break; - case 1: - /* ignored */ - rn = "BadInstr"; - break; - case 2: - /* ignored */ - rn = "BadInstrP"; - break; - default: - goto cp0_unimplemented; - } - break; - case 9: - switch (sel) { - case 0: - gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Count"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - break; - case 10: - switch (sel) { - case 0: - gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EntryHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 11: - switch (sel) { - case 0: - gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Compare"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - break; - case 12: - switch (sel) { - case 0: - save_cpu_state(ctx, 1); - gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); - /* BS_STOP isn't good enough here, hflags may have changed. */ - gen_save_pc(ctx, ctx->pc + 4); - ctx->bstate = BS_EXCP; - rn = "Status"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "IntCtl"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "SRSCtl"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "SRSMap"; - break; - default: - goto cp0_unimplemented; - } - break; - case 13: - switch (sel) { - case 0: - save_cpu_state(ctx, 1); - gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Cause"; - break; - default: - goto cp0_unimplemented; - } - break; - case 14: - switch (sel) { - case 0: - gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_EPC)); - rn = "EPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 15: - switch (sel) { - case 0: - /* ignored */ - rn = "PRid"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EBase"; - break; - default: - goto cp0_unimplemented; - } - break; - case 16: - switch (sel) { - case 0: - gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Config"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - case 1: - /* ignored, read only */ - rn = "Config1"; - break; - case 2: - gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Config2"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - case 3: - /* ignored, read only */ - rn = "Config3"; - break; - case 4: - gen_helper_mtc0_config4(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Config4"; - ctx->bstate = BS_STOP; - break; - case 5: - gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Config5"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - /* 6,7 are implementation dependent */ - case 6: - /* ignored */ - rn = "Config6"; - break; - case 7: - /* ignored */ - rn = "Config7"; - break; - default: - rn = "Invalid config selector"; - goto cp0_unimplemented; - } - break; - case 17: - switch (sel) { - case 0: - gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "LLAddr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 18: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_0e1i(tcg_ctx, mtc0_watchlo, arg, sel); - rn = "WatchLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 19: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_0e1i(tcg_ctx, mtc0_watchhi, arg, sel); - rn = "WatchHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 20: - switch (sel) { - case 0: -#if defined(TARGET_MIPS64) - check_insn(ctx, ISA_MIPS3); - gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "XContext"; - break; -#endif - default: - goto cp0_unimplemented; - } - break; - case 21: - /* Officially reserved, but sel 0 is used for R1x000 framemask */ - CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); - switch (sel) { - case 0: - gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Framemask"; - break; - default: - goto cp0_unimplemented; - } - break; - case 22: - /* ignored */ - rn = "Diagnostic"; /* implementation dependent */ - break; - case 23: - switch (sel) { - case 0: - gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ - /* BS_STOP isn't good enough here, hflags may have changed. */ - gen_save_pc(ctx, ctx->pc + 4); - ctx->bstate = BS_EXCP; - rn = "Debug"; - break; - case 1: -// gen_helper_mtc0_tracecontrol(tcg_ctx->cpu_env, arg); /* PDtrace support */ - rn = "TraceControl"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; -// break; - case 2: -// gen_helper_mtc0_tracecontrol2(tcg_ctx->cpu_env, arg); /* PDtrace support */ - rn = "TraceControl2"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; -// break; - case 3: - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; -// gen_helper_mtc0_usertracedata(tcg_ctx->cpu_env, arg); /* PDtrace support */ - rn = "UserTraceData"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; -// break; - case 4: -// gen_helper_mtc0_tracebpc(tcg_ctx->cpu_env, arg); /* PDtrace support */ - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "TraceBPC"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 24: - switch (sel) { - case 0: - /* EJTAG support */ - gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_DEPC)); - rn = "DEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 25: - switch (sel) { - case 0: - gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Performance0"; - break; - case 1: -// gen_helper_mtc0_performance1(arg); - rn = "Performance1"; -// break; - case 2: -// gen_helper_mtc0_performance2(arg); - rn = "Performance2"; -// break; - case 3: -// gen_helper_mtc0_performance3(arg); - rn = "Performance3"; -// break; - case 4: -// gen_helper_mtc0_performance4(arg); - rn = "Performance4"; -// break; - case 5: -// gen_helper_mtc0_performance5(arg); - rn = "Performance5"; -// break; - case 6: -// gen_helper_mtc0_performance6(arg); - rn = "Performance6"; -// break; - case 7: -// gen_helper_mtc0_performance7(arg); - rn = "Performance7"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 26: - /* ignored */ - rn = "ECC"; - break; - case 27: - switch (sel) { - case 0: case 1: case 2: case 3: - /* ignored */ - rn = "CacheErr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 28: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TagLo"; - break; - case 1: - case 3: - case 5: - case 7: - gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "DataLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 29: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TagHi"; - break; - case 1: - case 3: - case 5: - case 7: - gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "DataHi"; - break; - default: - rn = "invalid sel"; - goto cp0_unimplemented; - } - break; - case 30: - switch (sel) { - case 0: - gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_ErrorEPC)); - rn = "ErrorEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 31: - switch (sel) { - case 0: - /* EJTAG support */ - gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); - rn = "DESAVE"; - break; - case 2: case 3: case 4: case 5: case 6: case 7: - CP0_CHECK(ctx->kscrexist & (1 << sel)); - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, CP0_KScratch[sel-2])); - rn = "KScratch"; - break; - default: - goto cp0_unimplemented; - } - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - default: - goto cp0_unimplemented; - } - (void)rn; /* avoid a compiler warning */ - LOG_DISAS("mtc0 %s (reg %d sel %d)\n", rn, reg, sel); - /* For simplicity assume that all writes can cause interrupts. */ - //if (use_icount) { - // gen_io_end(); - // ctx->bstate = BS_STOP; - //} - return; - -cp0_unimplemented: - LOG_DISAS("mtc0 %s (reg %d sel %d)\n", rn, reg, sel); -} - -#if defined(TARGET_MIPS64) -static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *rn = "invalid"; - - if (sel != 0) - check_insn(ctx, ISA_MIPS64); - - switch (reg) { - case 0: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Index)); - rn = "Index"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "MVPControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "MVPConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "MVPConf1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 1: - switch (sel) { - case 0: - CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); - gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "Random"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); - rn = "VPEControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); - rn = "VPEConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); - rn = "VPEConf1"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_YQMask)); - rn = "YQMask"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); - rn = "VPESchedule"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); - rn = "VPEScheFBack"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); - rn = "VPEOpt"; - break; - default: - goto cp0_unimplemented; - } - break; - case 2: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); - rn = "EntryLo0"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCStatus"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCBind"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_dmfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCRestart"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_dmfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCHalt"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_dmfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCContext"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_dmfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCSchedule"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_dmfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "TCScheFBack"; - break; - default: - goto cp0_unimplemented; - } - break; - case 3: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); - rn = "EntryLo1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 4: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); - rn = "Context"; - break; - case 1: -// gen_helper_dmfc0_contextconfig(arg); /* SmartMIPS ASE */ - rn = "ContextConfig"; - goto cp0_unimplemented; -// break; - case 2: - CP0_CHECK(ctx->ulri); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); - rn = "UserLocal"; - break; - default: - goto cp0_unimplemented; - } - break; - case 5: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); - rn = "PageMask"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); - rn = "PageGrain"; - break; - default: - goto cp0_unimplemented; - } - break; - case 6: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); - rn = "Wired"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); - rn = "SRSConf0"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); - rn = "SRSConf1"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); - rn = "SRSConf2"; - break; - case 4: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); - rn = "SRSConf3"; - break; - case 5: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); - rn = "SRSConf4"; - break; - default: - goto cp0_unimplemented; - } - break; - case 7: - switch (sel) { - case 0: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); - rn = "HWREna"; - break; - default: - goto cp0_unimplemented; - } - break; - case 8: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); - rn = "BadVAddr"; - break; - case 1: - CP0_CHECK(ctx->bi); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); - rn = "BadInstr"; - break; - case 2: - CP0_CHECK(ctx->bp); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); - rn = "BadInstrP"; - break; - default: - goto cp0_unimplemented; - } - break; - case 9: - switch (sel) { - case 0: - /* Mark as an IO operation because we read the time. */ - //if (use_icount) - // gen_io_start(); - gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); - //if (use_icount) { - // gen_io_end(); - //} - /* Break the TB to be able to take timer interrupts immediately - after reading count. */ - ctx->bstate = BS_STOP; - rn = "Count"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - break; - case 10: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); - rn = "EntryHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 11: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); - rn = "Compare"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - break; - case 12: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Status)); - rn = "Status"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); - rn = "IntCtl"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); - rn = "SRSCtl"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); - rn = "SRSMap"; - break; - default: - goto cp0_unimplemented; - } - break; - case 13: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); - rn = "Cause"; - break; - default: - goto cp0_unimplemented; - } - break; - case 14: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); - rn = "EPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 15: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); - rn = "PRid"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_EBase)); - rn = "EBase"; - break; - default: - goto cp0_unimplemented; - } - break; - case 16: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); - rn = "Config"; - break; - case 1: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); - rn = "Config1"; - break; - case 2: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); - rn = "Config2"; - break; - case 3: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); - rn = "Config3"; - break; - case 4: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); - rn = "Config4"; - break; - case 5: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); - rn = "Config5"; - break; - /* 6,7 are implementation dependent */ - case 6: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); - rn = "Config6"; - break; - case 7: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); - rn = "Config7"; - break; - default: - goto cp0_unimplemented; - } - break; - case 17: - switch (sel) { - case 0: - gen_helper_dmfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); - rn = "LLAddr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 18: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_1e0i(tcg_ctx, dmfc0_watchlo, arg, sel); - rn = "WatchLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 19: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_1e0i(tcg_ctx, mfc0_watchhi, arg, sel); - rn = "WatchHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 20: - switch (sel) { - case 0: - check_insn(ctx, ISA_MIPS3); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); - rn = "XContext"; - break; - default: - goto cp0_unimplemented; - } - break; - case 21: - /* Officially reserved, but sel 0 is used for R1x000 framemask */ - CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); - rn = "Framemask"; - break; - default: - goto cp0_unimplemented; - } - break; - case 22: - tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ - rn = "'Diagnostic"; /* implementation dependent */ - break; - case 23: - switch (sel) { - case 0: - gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ - rn = "Debug"; - break; - case 1: -// gen_helper_dmfc0_tracecontrol(arg, tcg_ctx->cpu_env); /* PDtrace support */ - rn = "TraceControl"; -// break; - case 2: -// gen_helper_dmfc0_tracecontrol2(arg, tcg_ctx->cpu_env); /* PDtrace support */ - rn = "TraceControl2"; -// break; - case 3: -// gen_helper_dmfc0_usertracedata(arg, tcg_ctx->cpu_env); /* PDtrace support */ - rn = "UserTraceData"; -// break; - case 4: -// gen_helper_dmfc0_tracebpc(arg, tcg_ctx->cpu_env); /* PDtrace support */ - rn = "TraceBPC"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 24: - switch (sel) { - case 0: - /* EJTAG support */ - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); - rn = "DEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 25: - switch (sel) { - case 0: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); - rn = "Performance0"; - break; - case 1: -// gen_helper_dmfc0_performance1(arg); - rn = "Performance1"; -// break; - case 2: -// gen_helper_dmfc0_performance2(arg); - rn = "Performance2"; -// break; - case 3: -// gen_helper_dmfc0_performance3(arg); - rn = "Performance3"; -// break; - case 4: -// gen_helper_dmfc0_performance4(arg); - rn = "Performance4"; -// break; - case 5: -// gen_helper_dmfc0_performance5(arg); - rn = "Performance5"; -// break; - case 6: -// gen_helper_dmfc0_performance6(arg); - rn = "Performance6"; -// break; - case 7: -// gen_helper_dmfc0_performance7(arg); - rn = "Performance7"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 26: - tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ - rn = "ECC"; - break; - case 27: - switch (sel) { - /* ignored */ - case 0: case 1: case 2: case 3: - tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ - rn = "CacheErr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 28: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); - rn = "TagLo"; - break; - case 1: - case 3: - case 5: - case 7: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); - rn = "DataLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 29: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); - rn = "TagHi"; - break; - case 1: - case 3: - case 5: - case 7: - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); - rn = "DataHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 30: - switch (sel) { - case 0: - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); - rn = "ErrorEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 31: - switch (sel) { - case 0: - /* EJTAG support */ - gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); - rn = "DESAVE"; - break; - case 2: case 3: case 4: case 5: case 6: case 7: - CP0_CHECK(ctx->kscrexist & (1 << sel)); - tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, CP0_KScratch[sel-2])); - rn = "KScratch"; - break; - default: - goto cp0_unimplemented; - } - break; - default: - goto cp0_unimplemented; - } - (void)rn; /* avoid a compiler warning */ - LOG_DISAS("dmfc0 %s (reg %d sel %d)\n", rn, reg, sel); - return; - -cp0_unimplemented: - LOG_DISAS("dmfc0 %s (reg %d sel %d)\n", rn, reg, sel); - gen_mfc0_unimplemented(ctx, arg); -} - -static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *rn = "invalid"; - - if (sel != 0) - check_insn(ctx, ISA_MIPS64); - - //if (use_icount) - // gen_io_start(); - - switch (reg) { - case 0: - switch (sel) { - case 0: - gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Index"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "MVPControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - /* ignored */ - rn = "MVPConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - /* ignored */ - rn = "MVPConf1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 1: - switch (sel) { - case 0: - /* ignored */ - rn = "Random"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEControl"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEConf0"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEConf1"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "YQMask"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); - rn = "VPESchedule"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); - rn = "VPEScheFBack"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "VPEOpt"; - break; - default: - goto cp0_unimplemented; - } - break; - case 2: - switch (sel) { - case 0: - gen_helper_dmtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EntryLo0"; - break; - case 1: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCStatus"; - break; - case 2: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCBind"; - break; - case 3: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCRestart"; - break; - case 4: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCHalt"; - break; - case 5: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCContext"; - break; - case 6: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCSchedule"; - break; - case 7: - CP0_CHECK(ctx->insn_flags & ASE_MT); - gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TCScheFBack"; - break; - default: - goto cp0_unimplemented; - } - break; - case 3: - switch (sel) { - case 0: - gen_helper_dmtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EntryLo1"; - break; - default: - goto cp0_unimplemented; - } - break; - case 4: - switch (sel) { - case 0: - gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Context"; - break; - case 1: -// gen_helper_mtc0_contextconfig(tcg_ctx->cpu_env, arg); /* SmartMIPS ASE */ - rn = "ContextConfig"; - goto cp0_unimplemented; -// break; - case 2: - CP0_CHECK(ctx->ulri); - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); - rn = "UserLocal"; - break; - default: - goto cp0_unimplemented; - } - break; - case 5: - switch (sel) { - case 0: - gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "PageMask"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "PageGrain"; - break; - default: - goto cp0_unimplemented; - } - break; - case 6: - switch (sel) { - case 0: - gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Wired"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf0"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf1"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf2"; - break; - case 4: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf3"; - break; - case 5: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "SRSConf4"; - break; - default: - goto cp0_unimplemented; - } - break; - case 7: - switch (sel) { - case 0: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); - ctx->bstate = BS_STOP; - rn = "HWREna"; - break; - default: - goto cp0_unimplemented; - } - break; - case 8: - switch (sel) { - case 0: - /* ignored */ - rn = "BadVAddr"; - break; - case 1: - /* ignored */ - rn = "BadInstr"; - break; - case 2: - /* ignored */ - rn = "BadInstrP"; - break; - default: - goto cp0_unimplemented; - } - break; - case 9: - switch (sel) { - case 0: - gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Count"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - case 10: - switch (sel) { - case 0: - gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EntryHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 11: - switch (sel) { - case 0: - gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Compare"; - break; - /* 6,7 are implementation dependent */ - default: - goto cp0_unimplemented; - } - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - case 12: - switch (sel) { - case 0: - save_cpu_state(ctx, 1); - gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); - /* BS_STOP isn't good enough here, hflags may have changed. */ - gen_save_pc(ctx, ctx->pc + 4); - ctx->bstate = BS_EXCP; - rn = "Status"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "IntCtl"; - break; - case 2: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "SRSCtl"; - break; - case 3: - check_insn(ctx, ISA_MIPS32R2); - gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "SRSMap"; - break; - default: - goto cp0_unimplemented; - } - break; - case 13: - switch (sel) { - case 0: - save_cpu_state(ctx, 1); - /* Mark as an IO operation because we may trigger a software - interrupt. */ - //if (use_icount) { - // gen_io_start(); - //} - gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); - //if (use_icount) { - // gen_io_end(); - //} - /* Stop translation as we may have triggered an intetrupt */ - ctx->bstate = BS_STOP; - rn = "Cause"; - break; - default: - goto cp0_unimplemented; - } - break; - case 14: - switch (sel) { - case 0: - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); - rn = "EPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 15: - switch (sel) { - case 0: - /* ignored */ - rn = "PRid"; - break; - case 1: - check_insn(ctx, ISA_MIPS32R2); - gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "EBase"; - break; - default: - goto cp0_unimplemented; - } - break; - case 16: - switch (sel) { - case 0: - gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Config"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - case 1: - /* ignored, read only */ - rn = "Config1"; - break; - case 2: - gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Config2"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - case 3: - /* ignored */ - rn = "Config3"; - break; - case 4: - /* currently ignored */ - rn = "Config4"; - break; - case 5: - gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Config5"; - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - /* 6,7 are implementation dependent */ - default: - rn = "Invalid config selector"; - goto cp0_unimplemented; - } - break; - case 17: - switch (sel) { - case 0: - gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "LLAddr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 18: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_0e1i(tcg_ctx, mtc0_watchlo, arg, sel); - rn = "WatchLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 19: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_0e1i(tcg_ctx, mtc0_watchhi, arg, sel); - rn = "WatchHi"; - break; - default: - goto cp0_unimplemented; - } - break; - case 20: - switch (sel) { - case 0: - check_insn(ctx, ISA_MIPS3); - gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "XContext"; - break; - default: - goto cp0_unimplemented; - } - break; - case 21: - /* Officially reserved, but sel 0 is used for R1x000 framemask */ - CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); - switch (sel) { - case 0: - gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Framemask"; - break; - default: - goto cp0_unimplemented; - } - break; - case 22: - /* ignored */ - rn = "Diagnostic"; /* implementation dependent */ - break; - case 23: - switch (sel) { - case 0: - gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ - /* BS_STOP isn't good enough here, hflags may have changed. */ - gen_save_pc(ctx, ctx->pc + 4); - ctx->bstate = BS_EXCP; - rn = "Debug"; - break; - case 1: -// gen_helper_mtc0_tracecontrol(tcg_ctx->cpu_env, arg); /* PDtrace support */ - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "TraceControl"; -// break; - case 2: -// gen_helper_mtc0_tracecontrol2(tcg_ctx->cpu_env, arg); /* PDtrace support */ - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "TraceControl2"; -// break; - case 3: -// gen_helper_mtc0_usertracedata(tcg_ctx->cpu_env, arg); /* PDtrace support */ - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "UserTraceData"; -// break; - case 4: -// gen_helper_mtc0_tracebpc(tcg_ctx->cpu_env, arg); /* PDtrace support */ - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - rn = "TraceBPC"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 24: - switch (sel) { - case 0: - /* EJTAG support */ - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); - rn = "DEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 25: - switch (sel) { - case 0: - gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "Performance0"; - break; - case 1: -// gen_helper_mtc0_performance1(tcg_ctx->cpu_env, arg); - rn = "Performance1"; -// break; - case 2: -// gen_helper_mtc0_performance2(tcg_ctx->cpu_env, arg); - rn = "Performance2"; -// break; - case 3: -// gen_helper_mtc0_performance3(tcg_ctx->cpu_env, arg); - rn = "Performance3"; -// break; - case 4: -// gen_helper_mtc0_performance4(tcg_ctx->cpu_env, arg); - rn = "Performance4"; -// break; - case 5: -// gen_helper_mtc0_performance5(tcg_ctx->cpu_env, arg); - rn = "Performance5"; -// break; - case 6: -// gen_helper_mtc0_performance6(tcg_ctx->cpu_env, arg); - rn = "Performance6"; -// break; - case 7: -// gen_helper_mtc0_performance7(tcg_ctx->cpu_env, arg); - rn = "Performance7"; -// break; - default: - goto cp0_unimplemented; - } - break; - case 26: - /* ignored */ - rn = "ECC"; - break; - case 27: - switch (sel) { - case 0: case 1: case 2: case 3: - /* ignored */ - rn = "CacheErr"; - break; - default: - goto cp0_unimplemented; - } - break; - case 28: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TagLo"; - break; - case 1: - case 3: - case 5: - case 7: - gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "DataLo"; - break; - default: - goto cp0_unimplemented; - } - break; - case 29: - switch (sel) { - case 0: - case 2: - case 4: - case 6: - gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "TagHi"; - break; - case 1: - case 3: - case 5: - case 7: - gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); - rn = "DataHi"; - break; - default: - rn = "invalid sel"; - goto cp0_unimplemented; - } - break; - case 30: - switch (sel) { - case 0: - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); - rn = "ErrorEPC"; - break; - default: - goto cp0_unimplemented; - } - break; - case 31: - switch (sel) { - case 0: - /* EJTAG support */ - gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); - rn = "DESAVE"; - break; - case 2: case 3: case 4: case 5: case 6: case 7: - CP0_CHECK(ctx->kscrexist & (1 << sel)); - tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, CP0_KScratch[sel-2])); - rn = "KScratch"; - break; - default: - goto cp0_unimplemented; - } - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - default: - goto cp0_unimplemented; - } - (void)rn; /* avoid a compiler warning */ - LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel); - /* For simplicity assume that all writes can cause interrupts. */ - //if (use_icount) { - // gen_io_end(); - // ctx->bstate = BS_STOP; - //} - return; - -cp0_unimplemented: - LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel); -} -#endif /* TARGET_MIPS64 */ - -static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, - int u, int sel, int h) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - TCGv t0 = tcg_temp_local_new(tcg_ctx); - - if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && - ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != - (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) - tcg_gen_movi_tl(tcg_ctx, t0, -1); - else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > - (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) - tcg_gen_movi_tl(tcg_ctx, t0, -1); - else if (u == 0) { - switch (rt) { - case 1: - switch (sel) { - case 1: - gen_helper_mftc0_vpecontrol(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - case 2: - gen_helper_mftc0_vpeconf0(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - goto die; - break; - } - break; - case 2: - switch (sel) { - case 1: - gen_helper_mftc0_tcstatus(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - case 2: - gen_helper_mftc0_tcbind(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - case 3: - gen_helper_mftc0_tcrestart(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - case 4: - gen_helper_mftc0_tchalt(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - case 5: - gen_helper_mftc0_tccontext(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - case 6: - gen_helper_mftc0_tcschedule(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - case 7: - gen_helper_mftc0_tcschefback(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - gen_mfc0(ctx, t0, rt, sel); - break; - } - break; - case 10: - switch (sel) { - case 0: - gen_helper_mftc0_entryhi(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - gen_mfc0(ctx, t0, rt, sel); - break; - } - case 12: - switch (sel) { - case 0: - gen_helper_mftc0_status(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - gen_mfc0(ctx, t0, rt, sel); - break; - } - case 13: - switch (sel) { - case 0: - gen_helper_mftc0_cause(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - goto die; - break; - } - break; - case 14: - switch (sel) { - case 0: - gen_helper_mftc0_epc(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - goto die; - break; - } - break; - case 15: - switch (sel) { - case 1: - gen_helper_mftc0_ebase(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - goto die; - break; - } - break; - case 16: - switch (sel) { - case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: - gen_helper_mftc0_configx(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_const_tl(tcg_ctx, sel)); - break; - default: - goto die; - break; - } - break; - case 23: - switch (sel) { - case 0: - gen_helper_mftc0_debug(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - gen_mfc0(ctx, t0, rt, sel); - break; - } - break; - default: - gen_mfc0(ctx, t0, rt, sel); - } - } else switch (sel) { - /* GPR registers. */ - case 0: - gen_helper_1e0i(tcg_ctx, mftgpr, t0, rt); - break; - /* Auxiliary CPU registers */ - case 1: - switch (rt) { - case 0: - gen_helper_1e0i(tcg_ctx, mftlo, t0, 0); - break; - case 1: - gen_helper_1e0i(tcg_ctx, mfthi, t0, 0); - break; - case 2: - gen_helper_1e0i(tcg_ctx, mftacx, t0, 0); - break; - case 4: - gen_helper_1e0i(tcg_ctx, mftlo, t0, 1); - break; - case 5: - gen_helper_1e0i(tcg_ctx, mfthi, t0, 1); - break; - case 6: - gen_helper_1e0i(tcg_ctx, mftacx, t0, 1); - break; - case 8: - gen_helper_1e0i(tcg_ctx, mftlo, t0, 2); - break; - case 9: - gen_helper_1e0i(tcg_ctx, mfthi, t0, 2); - break; - case 10: - gen_helper_1e0i(tcg_ctx, mftacx, t0, 2); - break; - case 12: - gen_helper_1e0i(tcg_ctx, mftlo, t0, 3); - break; - case 13: - gen_helper_1e0i(tcg_ctx, mfthi, t0, 3); - break; - case 14: - gen_helper_1e0i(tcg_ctx, mftacx, t0, 3); - break; - case 16: - gen_helper_mftdsp(tcg_ctx, t0, tcg_ctx->cpu_env); - break; - default: - goto die; - } - break; - /* Floating point (COP1). */ - case 2: - /* XXX: For now we support only a single FPU context. */ - if (h == 0) { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, rt); - tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); - tcg_temp_free_i32(tcg_ctx, fp0); - } else { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32h(ctx, fp0, rt); - tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); - tcg_temp_free_i32(tcg_ctx, fp0); - } - break; - case 3: - /* XXX: For now we support only a single FPU context. */ - gen_helper_1e0i(tcg_ctx, cfc1, t0, rt); - break; - /* COP2: Not implemented. */ - case 4: - case 5: - /* fall through */ - default: - goto die; - } - LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); - gen_store_gpr(tcg_ctx, t0, rd); - tcg_temp_free(tcg_ctx, t0); - return; - -die: - tcg_temp_free(tcg_ctx, t0); - LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); - generate_exception(ctx, EXCP_RI); -} - -static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, - int u, int sel, int h) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); - TCGv t0 = tcg_temp_local_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rt); - if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && - ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != - (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) - /* NOP */ ; - else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > - (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) - /* NOP */ ; - else if (u == 0) { - switch (rd) { - case 1: - switch (sel) { - case 1: - gen_helper_mttc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - case 2: - gen_helper_mttc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - goto die; - break; - } - break; - case 2: - switch (sel) { - case 1: - gen_helper_mttc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - case 2: - gen_helper_mttc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - case 3: - gen_helper_mttc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - case 4: - gen_helper_mttc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - case 5: - gen_helper_mttc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - case 6: - gen_helper_mttc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - case 7: - gen_helper_mttc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - gen_mtc0(ctx, t0, rd, sel); - break; - } - break; - case 10: - switch (sel) { - case 0: - gen_helper_mttc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - gen_mtc0(ctx, t0, rd, sel); - break; - } - case 12: - switch (sel) { - case 0: - gen_helper_mttc0_status(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - gen_mtc0(ctx, t0, rd, sel); - break; - } - case 13: - switch (sel) { - case 0: - gen_helper_mttc0_cause(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - goto die; - break; - } - break; - case 15: - switch (sel) { - case 1: - gen_helper_mttc0_ebase(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - goto die; - break; - } - break; - case 23: - switch (sel) { - case 0: - gen_helper_mttc0_debug(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - gen_mtc0(ctx, t0, rd, sel); - break; - } - break; - default: - gen_mtc0(ctx, t0, rd, sel); - } - } else switch (sel) { - /* GPR registers. */ - case 0: - gen_helper_0e1i(tcg_ctx, mttgpr, t0, rd); - break; - /* Auxiliary CPU registers */ - case 1: - switch (rd) { - case 0: - gen_helper_0e1i(tcg_ctx, mttlo, t0, 0); - break; - case 1: - gen_helper_0e1i(tcg_ctx, mtthi, t0, 0); - break; - case 2: - gen_helper_0e1i(tcg_ctx, mttacx, t0, 0); - break; - case 4: - gen_helper_0e1i(tcg_ctx, mttlo, t0, 1); - break; - case 5: - gen_helper_0e1i(tcg_ctx, mtthi, t0, 1); - break; - case 6: - gen_helper_0e1i(tcg_ctx, mttacx, t0, 1); - break; - case 8: - gen_helper_0e1i(tcg_ctx, mttlo, t0, 2); - break; - case 9: - gen_helper_0e1i(tcg_ctx, mtthi, t0, 2); - break; - case 10: - gen_helper_0e1i(tcg_ctx, mttacx, t0, 2); - break; - case 12: - gen_helper_0e1i(tcg_ctx, mttlo, t0, 3); - break; - case 13: - gen_helper_0e1i(tcg_ctx, mtthi, t0, 3); - break; - case 14: - gen_helper_0e1i(tcg_ctx, mttacx, t0, 3); - break; - case 16: - gen_helper_mttdsp(tcg_ctx, tcg_ctx->cpu_env, t0); - break; - default: - goto die; - } - break; - /* Floating point (COP1). */ - case 2: - /* XXX: For now we support only a single FPU context. */ - if (h == 0) { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); - gen_store_fpr32(ctx, fp0, rd); - tcg_temp_free_i32(tcg_ctx, fp0); - } else { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); - gen_store_fpr32h(ctx, fp0, rd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - break; - case 3: - /* XXX: For now we support only a single FPU context. */ - save_cpu_state(ctx, 1); - { - TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, rd); - - gen_helper_0e2i(tcg_ctx, ctc1, t0, fs_tmp, rt); - tcg_temp_free_i32(tcg_ctx, fs_tmp); - } - /* Stop translation as we may have changed hflags */ - ctx->bstate = BS_STOP; - break; - /* COP2: Not implemented. */ - case 4: - case 5: - /* fall through */ - default: - goto die; - } - LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); - tcg_temp_free(tcg_ctx, t0); - return; - -die: - tcg_temp_free(tcg_ctx, t0); - LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); - generate_exception(ctx, EXCP_RI); -} - -static void gen_cp0 (CPUMIPSState *env, DisasContext *ctx, uint32_t opc, int rt, int rd) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "ldst"; - - check_cp0_enabled(ctx); - switch (opc) { - case OPC_MFC0: - if (rt == 0) { - /* Treat as NOP. */ - return; - } - gen_mfc0(ctx, *cpu_gpr[rt], rd, ctx->opcode & 0x7); - opn = "mfc0"; - break; - case OPC_MTC0: - { - TCGv t0 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rt); - gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7); - tcg_temp_free(tcg_ctx, t0); - } - opn = "mtc0"; - break; -#if defined(TARGET_MIPS64) - case OPC_DMFC0: - check_insn(ctx, ISA_MIPS3); - if (rt == 0) { - /* Treat as NOP. */ - return; - } - gen_dmfc0(ctx, *cpu_gpr[rt], rd, ctx->opcode & 0x7); - opn = "dmfc0"; - break; - case OPC_DMTC0: - check_insn(ctx, ISA_MIPS3); - { - TCGv t0 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rt); - gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7); - tcg_temp_free(tcg_ctx, t0); - } - opn = "dmtc0"; - break; -#endif - case OPC_MFTR: - check_insn(ctx, ASE_MT); - if (rd == 0) { - /* Treat as NOP. */ - return; - } - gen_mftr(env, ctx, rt, rd, (ctx->opcode >> 5) & 1, - ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); - opn = "mftr"; - break; - case OPC_MTTR: - check_insn(ctx, ASE_MT); - gen_mttr(env, ctx, rd, rt, (ctx->opcode >> 5) & 1, - ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); - opn = "mttr"; - break; - case OPC_TLBWI: - opn = "tlbwi"; - if (!env->tlb->helper_tlbwi) - goto die; - gen_helper_tlbwi(tcg_ctx, tcg_ctx->cpu_env); - break; - case OPC_TLBINV: - opn = "tlbinv"; - if (ctx->ie >= 2) { - if (!env->tlb->helper_tlbinv) { - goto die; - } - gen_helper_tlbinv(tcg_ctx, tcg_ctx->cpu_env); - } /* treat as nop if TLBINV not supported */ - break; - case OPC_TLBINVF: - opn = "tlbinvf"; - if (ctx->ie >= 2) { - if (!env->tlb->helper_tlbinvf) { - goto die; - } - gen_helper_tlbinvf(tcg_ctx, tcg_ctx->cpu_env); - } /* treat as nop if TLBINV not supported */ - break; - case OPC_TLBWR: - opn = "tlbwr"; - if (!env->tlb->helper_tlbwr) - goto die; - gen_helper_tlbwr(tcg_ctx, tcg_ctx->cpu_env); - break; - case OPC_TLBP: - opn = "tlbp"; - if (!env->tlb->helper_tlbp) - goto die; - gen_helper_tlbp(tcg_ctx, tcg_ctx->cpu_env); - break; - case OPC_TLBR: - opn = "tlbr"; - if (!env->tlb->helper_tlbr) - goto die; - gen_helper_tlbr(tcg_ctx, tcg_ctx->cpu_env); - break; - case OPC_ERET: - opn = "eret"; - check_insn(ctx, ISA_MIPS2); - if ((ctx->insn_flags & ISA_MIPS32R6) && - (ctx->hflags & MIPS_HFLAG_BMASK)) { - MIPS_DEBUG("CTI in delay / forbidden slot"); - goto die; - } - gen_helper_eret(tcg_ctx, tcg_ctx->cpu_env); - ctx->bstate = BS_EXCP; - break; - case OPC_DERET: - opn = "deret"; - check_insn(ctx, ISA_MIPS32); - if ((ctx->insn_flags & ISA_MIPS32R6) && - (ctx->hflags & MIPS_HFLAG_BMASK)) { - MIPS_DEBUG("CTI in delay / forbidden slot"); - goto die; - } - if (!(ctx->hflags & MIPS_HFLAG_DM)) { - MIPS_INVAL(opn); - generate_exception(ctx, EXCP_RI); - } else { - gen_helper_deret(tcg_ctx, tcg_ctx->cpu_env); - ctx->bstate = BS_EXCP; - } - break; - case OPC_WAIT: - opn = "wait"; - check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); - if ((ctx->insn_flags & ISA_MIPS32R6) && - (ctx->hflags & MIPS_HFLAG_BMASK)) { - MIPS_DEBUG("CTI in delay / forbidden slot"); - goto die; - } - /* If we get an exception, we want to restart at next instruction */ - ctx->pc += 4; - save_cpu_state(ctx, 1); - ctx->pc -= 4; - gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); - ctx->bstate = BS_EXCP; - break; - default: - die: - MIPS_INVAL(opn); - generate_exception(ctx, EXCP_RI); - return; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s %d", opn, regnames[rt], rd); -} -#endif /* !CONFIG_USER_ONLY */ - -/* CP1 Branches (before delay slot) */ -static void gen_compute_branch1(DisasContext *ctx, uint32_t op, - int32_t cc, int32_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - target_ulong btarget; - const char *opn = "cp1 cond branch"; - TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); - - if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { - MIPS_DEBUG("CTI in delay / forbidden slot"); - generate_exception(ctx, EXCP_RI); - goto out; - } - - if (cc != 0) - check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); - - btarget = ctx->pc + 4 + offset; - - switch (op) { - case OPC_BC1F: - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_not_i32(tcg_ctx, t0, t0); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - opn = "bc1f"; - goto not_likely; - case OPC_BC1FL: - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_not_i32(tcg_ctx, t0, t0); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - opn = "bc1fl"; - goto likely; - case OPC_BC1T: - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - opn = "bc1t"; - goto not_likely; - case OPC_BC1TL: - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - opn = "bc1tl"; - likely: - ctx->hflags |= MIPS_HFLAG_BL; - break; - case OPC_BC1FANY2: - { - TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); - tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); - tcg_temp_free_i32(tcg_ctx, t1); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - } - opn = "bc1any2f"; - goto not_likely; - case OPC_BC1TANY2: - { - TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); - tcg_gen_or_i32(tcg_ctx, t0, t0, t1); - tcg_temp_free_i32(tcg_ctx, t1); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - } - opn = "bc1any2t"; - goto not_likely; - case OPC_BC1FANY4: - { - TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); - tcg_gen_and_i32(tcg_ctx, t0, t0, t1); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+2)); - tcg_gen_and_i32(tcg_ctx, t0, t0, t1); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+3)); - tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); - tcg_temp_free_i32(tcg_ctx, t1); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - } - opn = "bc1any4f"; - goto not_likely; - case OPC_BC1TANY4: - { - TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); - tcg_gen_or_i32(tcg_ctx, t0, t0, t1); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+2)); - tcg_gen_or_i32(tcg_ctx, t0, t0, t1); - tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+3)); - tcg_gen_or_i32(tcg_ctx, t0, t0, t1); - tcg_temp_free_i32(tcg_ctx, t1); - tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); - tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - } - opn = "bc1any4t"; - not_likely: - ctx->hflags |= MIPS_HFLAG_BC; - break; - default: - MIPS_INVAL(opn); - generate_exception (ctx, EXCP_RI); - goto out; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn, - ctx->hflags, btarget); - ctx->btarget = btarget; - ctx->hflags |= MIPS_HFLAG_BDS32; - out: - tcg_temp_free_i32(tcg_ctx, t0); -} - -/* R6 CP1 Branches */ -static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, - int32_t ft, int32_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - target_ulong btarget; - const char *opn = "cp1 cond branch"; - TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); - - if (ctx->hflags & MIPS_HFLAG_BMASK) { -#ifdef MIPS_DEBUG_DISAS - LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx - "\n", ctx->pc); -#endif - generate_exception(ctx, EXCP_RI); - goto out; - } - - gen_load_fpr64(ctx, t0, ft); - tcg_gen_andi_i64(tcg_ctx, t0, t0, 1); - - btarget = addr_add(ctx, ctx->pc + 4, offset); - - switch (op) { - case OPC_BC1EQZ: - tcg_gen_xori_i64(tcg_ctx, t0, t0, 1); - opn = "bc1eqz"; - ctx->hflags |= MIPS_HFLAG_BC; - break; - case OPC_BC1NEZ: - /* t0 already set */ - opn = "bc1nez"; - ctx->hflags |= MIPS_HFLAG_BC; - break; - default: - MIPS_INVAL(opn); - generate_exception(ctx, EXCP_RI); - goto out; - } - - tcg_gen_trunc_i64_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn, - ctx->hflags, btarget); - ctx->btarget = btarget; - ctx->hflags |= MIPS_HFLAG_BDS32; - -out: - tcg_temp_free_i64(tcg_ctx, t0); -} - -/* Coprocessor 1 (FPU) */ - -#define FOP(func, fmt) (((fmt) << 21) | (func)) - -enum fopcode { - OPC_ADD_S = FOP(0, FMT_S), - OPC_SUB_S = FOP(1, FMT_S), - OPC_MUL_S = FOP(2, FMT_S), - OPC_DIV_S = FOP(3, FMT_S), - OPC_SQRT_S = FOP(4, FMT_S), - OPC_ABS_S = FOP(5, FMT_S), - OPC_MOV_S = FOP(6, FMT_S), - OPC_NEG_S = FOP(7, FMT_S), - OPC_ROUND_L_S = FOP(8, FMT_S), - OPC_TRUNC_L_S = FOP(9, FMT_S), - OPC_CEIL_L_S = FOP(10, FMT_S), - OPC_FLOOR_L_S = FOP(11, FMT_S), - OPC_ROUND_W_S = FOP(12, FMT_S), - OPC_TRUNC_W_S = FOP(13, FMT_S), - OPC_CEIL_W_S = FOP(14, FMT_S), - OPC_FLOOR_W_S = FOP(15, FMT_S), - OPC_SEL_S = FOP(16, FMT_S), - OPC_MOVCF_S = FOP(17, FMT_S), - OPC_MOVZ_S = FOP(18, FMT_S), - OPC_MOVN_S = FOP(19, FMT_S), - OPC_SELEQZ_S = FOP(20, FMT_S), - OPC_RECIP_S = FOP(21, FMT_S), - OPC_RSQRT_S = FOP(22, FMT_S), - OPC_SELNEZ_S = FOP(23, FMT_S), - OPC_MADDF_S = FOP(24, FMT_S), - OPC_MSUBF_S = FOP(25, FMT_S), - OPC_RINT_S = FOP(26, FMT_S), - OPC_CLASS_S = FOP(27, FMT_S), - OPC_MIN_S = FOP(28, FMT_S), - OPC_RECIP2_S = FOP(28, FMT_S), - OPC_MINA_S = FOP(29, FMT_S), - OPC_RECIP1_S = FOP(29, FMT_S), - OPC_MAX_S = FOP(30, FMT_S), - OPC_RSQRT1_S = FOP(30, FMT_S), - OPC_MAXA_S = FOP(31, FMT_S), - OPC_RSQRT2_S = FOP(31, FMT_S), - OPC_CVT_D_S = FOP(33, FMT_S), - OPC_CVT_W_S = FOP(36, FMT_S), - OPC_CVT_L_S = FOP(37, FMT_S), - OPC_CVT_PS_S = FOP(38, FMT_S), - OPC_CMP_F_S = FOP (48, FMT_S), - OPC_CMP_UN_S = FOP (49, FMT_S), - OPC_CMP_EQ_S = FOP (50, FMT_S), - OPC_CMP_UEQ_S = FOP (51, FMT_S), - OPC_CMP_OLT_S = FOP (52, FMT_S), - OPC_CMP_ULT_S = FOP (53, FMT_S), - OPC_CMP_OLE_S = FOP (54, FMT_S), - OPC_CMP_ULE_S = FOP (55, FMT_S), - OPC_CMP_SF_S = FOP (56, FMT_S), - OPC_CMP_NGLE_S = FOP (57, FMT_S), - OPC_CMP_SEQ_S = FOP (58, FMT_S), - OPC_CMP_NGL_S = FOP (59, FMT_S), - OPC_CMP_LT_S = FOP (60, FMT_S), - OPC_CMP_NGE_S = FOP (61, FMT_S), - OPC_CMP_LE_S = FOP (62, FMT_S), - OPC_CMP_NGT_S = FOP (63, FMT_S), - - OPC_ADD_D = FOP(0, FMT_D), - OPC_SUB_D = FOP(1, FMT_D), - OPC_MUL_D = FOP(2, FMT_D), - OPC_DIV_D = FOP(3, FMT_D), - OPC_SQRT_D = FOP(4, FMT_D), - OPC_ABS_D = FOP(5, FMT_D), - OPC_MOV_D = FOP(6, FMT_D), - OPC_NEG_D = FOP(7, FMT_D), - OPC_ROUND_L_D = FOP(8, FMT_D), - OPC_TRUNC_L_D = FOP(9, FMT_D), - OPC_CEIL_L_D = FOP(10, FMT_D), - OPC_FLOOR_L_D = FOP(11, FMT_D), - OPC_ROUND_W_D = FOP(12, FMT_D), - OPC_TRUNC_W_D = FOP(13, FMT_D), - OPC_CEIL_W_D = FOP(14, FMT_D), - OPC_FLOOR_W_D = FOP(15, FMT_D), - OPC_SEL_D = FOP(16, FMT_D), - OPC_MOVCF_D = FOP(17, FMT_D), - OPC_MOVZ_D = FOP(18, FMT_D), - OPC_MOVN_D = FOP(19, FMT_D), - OPC_SELEQZ_D = FOP(20, FMT_D), - OPC_RECIP_D = FOP(21, FMT_D), - OPC_RSQRT_D = FOP(22, FMT_D), - OPC_SELNEZ_D = FOP(23, FMT_D), - OPC_MADDF_D = FOP(24, FMT_D), - OPC_MSUBF_D = FOP(25, FMT_D), - OPC_RINT_D = FOP(26, FMT_D), - OPC_CLASS_D = FOP(27, FMT_D), - OPC_MIN_D = FOP(28, FMT_D), - OPC_RECIP2_D = FOP(28, FMT_D), - OPC_MINA_D = FOP(29, FMT_D), - OPC_RECIP1_D = FOP(29, FMT_D), - OPC_MAX_D = FOP(30, FMT_D), - OPC_RSQRT1_D = FOP(30, FMT_D), - OPC_MAXA_D = FOP(31, FMT_D), - OPC_RSQRT2_D = FOP(31, FMT_D), - OPC_CVT_S_D = FOP(32, FMT_D), - OPC_CVT_W_D = FOP(36, FMT_D), - OPC_CVT_L_D = FOP(37, FMT_D), - OPC_CMP_F_D = FOP (48, FMT_D), - OPC_CMP_UN_D = FOP (49, FMT_D), - OPC_CMP_EQ_D = FOP (50, FMT_D), - OPC_CMP_UEQ_D = FOP (51, FMT_D), - OPC_CMP_OLT_D = FOP (52, FMT_D), - OPC_CMP_ULT_D = FOP (53, FMT_D), - OPC_CMP_OLE_D = FOP (54, FMT_D), - OPC_CMP_ULE_D = FOP (55, FMT_D), - OPC_CMP_SF_D = FOP (56, FMT_D), - OPC_CMP_NGLE_D = FOP (57, FMT_D), - OPC_CMP_SEQ_D = FOP (58, FMT_D), - OPC_CMP_NGL_D = FOP (59, FMT_D), - OPC_CMP_LT_D = FOP (60, FMT_D), - OPC_CMP_NGE_D = FOP (61, FMT_D), - OPC_CMP_LE_D = FOP (62, FMT_D), - OPC_CMP_NGT_D = FOP (63, FMT_D), - - OPC_CVT_S_W = FOP(32, FMT_W), - OPC_CVT_D_W = FOP(33, FMT_W), - OPC_CVT_S_L = FOP(32, FMT_L), - OPC_CVT_D_L = FOP(33, FMT_L), - OPC_CVT_PS_PW = FOP(38, FMT_W), - - OPC_ADD_PS = FOP(0, FMT_PS), - OPC_SUB_PS = FOP(1, FMT_PS), - OPC_MUL_PS = FOP(2, FMT_PS), - OPC_DIV_PS = FOP(3, FMT_PS), - OPC_ABS_PS = FOP(5, FMT_PS), - OPC_MOV_PS = FOP(6, FMT_PS), - OPC_NEG_PS = FOP(7, FMT_PS), - OPC_MOVCF_PS = FOP(17, FMT_PS), - OPC_MOVZ_PS = FOP(18, FMT_PS), - OPC_MOVN_PS = FOP(19, FMT_PS), - OPC_ADDR_PS = FOP(24, FMT_PS), - OPC_MULR_PS = FOP(26, FMT_PS), - OPC_RECIP2_PS = FOP(28, FMT_PS), - OPC_RECIP1_PS = FOP(29, FMT_PS), - OPC_RSQRT1_PS = FOP(30, FMT_PS), - OPC_RSQRT2_PS = FOP(31, FMT_PS), - - OPC_CVT_S_PU = FOP(32, FMT_PS), - OPC_CVT_PW_PS = FOP(36, FMT_PS), - OPC_CVT_S_PL = FOP(40, FMT_PS), - OPC_PLL_PS = FOP(44, FMT_PS), - OPC_PLU_PS = FOP(45, FMT_PS), - OPC_PUL_PS = FOP(46, FMT_PS), - OPC_PUU_PS = FOP(47, FMT_PS), - OPC_CMP_F_PS = FOP (48, FMT_PS), - OPC_CMP_UN_PS = FOP (49, FMT_PS), - OPC_CMP_EQ_PS = FOP (50, FMT_PS), - OPC_CMP_UEQ_PS = FOP (51, FMT_PS), - OPC_CMP_OLT_PS = FOP (52, FMT_PS), - OPC_CMP_ULT_PS = FOP (53, FMT_PS), - OPC_CMP_OLE_PS = FOP (54, FMT_PS), - OPC_CMP_ULE_PS = FOP (55, FMT_PS), - OPC_CMP_SF_PS = FOP (56, FMT_PS), - OPC_CMP_NGLE_PS = FOP (57, FMT_PS), - OPC_CMP_SEQ_PS = FOP (58, FMT_PS), - OPC_CMP_NGL_PS = FOP (59, FMT_PS), - OPC_CMP_LT_PS = FOP (60, FMT_PS), - OPC_CMP_NGE_PS = FOP (61, FMT_PS), - OPC_CMP_LE_PS = FOP (62, FMT_PS), - OPC_CMP_NGT_PS = FOP (63, FMT_PS), -}; - -enum r6_f_cmp_op { - R6_OPC_CMP_AF_S = FOP(0, FMT_W), - R6_OPC_CMP_UN_S = FOP(1, FMT_W), - R6_OPC_CMP_EQ_S = FOP(2, FMT_W), - R6_OPC_CMP_UEQ_S = FOP(3, FMT_W), - R6_OPC_CMP_LT_S = FOP(4, FMT_W), - R6_OPC_CMP_ULT_S = FOP(5, FMT_W), - R6_OPC_CMP_LE_S = FOP(6, FMT_W), - R6_OPC_CMP_ULE_S = FOP(7, FMT_W), - R6_OPC_CMP_SAF_S = FOP(8, FMT_W), - R6_OPC_CMP_SUN_S = FOP(9, FMT_W), - R6_OPC_CMP_SEQ_S = FOP(10, FMT_W), - R6_OPC_CMP_SEUQ_S = FOP(11, FMT_W), - R6_OPC_CMP_SLT_S = FOP(12, FMT_W), - R6_OPC_CMP_SULT_S = FOP(13, FMT_W), - R6_OPC_CMP_SLE_S = FOP(14, FMT_W), - R6_OPC_CMP_SULE_S = FOP(15, FMT_W), - R6_OPC_CMP_OR_S = FOP(17, FMT_W), - R6_OPC_CMP_UNE_S = FOP(18, FMT_W), - R6_OPC_CMP_NE_S = FOP(19, FMT_W), - R6_OPC_CMP_SOR_S = FOP(25, FMT_W), - R6_OPC_CMP_SUNE_S = FOP(26, FMT_W), - R6_OPC_CMP_SNE_S = FOP(27, FMT_W), - - R6_OPC_CMP_AF_D = FOP(0, FMT_L), - R6_OPC_CMP_UN_D = FOP(1, FMT_L), - R6_OPC_CMP_EQ_D = FOP(2, FMT_L), - R6_OPC_CMP_UEQ_D = FOP(3, FMT_L), - R6_OPC_CMP_LT_D = FOP(4, FMT_L), - R6_OPC_CMP_ULT_D = FOP(5, FMT_L), - R6_OPC_CMP_LE_D = FOP(6, FMT_L), - R6_OPC_CMP_ULE_D = FOP(7, FMT_L), - R6_OPC_CMP_SAF_D = FOP(8, FMT_L), - R6_OPC_CMP_SUN_D = FOP(9, FMT_L), - R6_OPC_CMP_SEQ_D = FOP(10, FMT_L), - R6_OPC_CMP_SEUQ_D = FOP(11, FMT_L), - R6_OPC_CMP_SLT_D = FOP(12, FMT_L), - R6_OPC_CMP_SULT_D = FOP(13, FMT_L), - R6_OPC_CMP_SLE_D = FOP(14, FMT_L), - R6_OPC_CMP_SULE_D = FOP(15, FMT_L), - R6_OPC_CMP_OR_D = FOP(17, FMT_L), - R6_OPC_CMP_UNE_D = FOP(18, FMT_L), - R6_OPC_CMP_NE_D = FOP(19, FMT_L), - R6_OPC_CMP_SOR_D = FOP(25, FMT_L), - R6_OPC_CMP_SUNE_D = FOP(26, FMT_L), - R6_OPC_CMP_SNE_D = FOP(27, FMT_L), -}; -static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "cp1 move"; - TCGv t0 = tcg_temp_new(tcg_ctx); - - switch (opc) { - case OPC_MFC1: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); - tcg_temp_free_i32(tcg_ctx, fp0); - } - gen_store_gpr(tcg_ctx, t0, rt); - opn = "mfc1"; - break; - case OPC_MTC1: - gen_load_gpr(ctx, t0, rt); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); - gen_store_fpr32(ctx, fp0, fs); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "mtc1"; - break; - case OPC_CFC1: - gen_helper_1e0i(tcg_ctx, cfc1, t0, fs); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "cfc1"; - break; - case OPC_CTC1: - gen_load_gpr(ctx, t0, rt); - save_cpu_state(ctx, 1); - { - TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, fs); - - gen_helper_0e2i(tcg_ctx, ctc1, t0, fs_tmp, rt); - tcg_temp_free_i32(tcg_ctx, fs_tmp); - } - /* Stop translation as we may have changed hflags */ - ctx->bstate = BS_STOP; - opn = "ctc1"; - break; -#if defined(TARGET_MIPS64) - case OPC_DMFC1: - gen_load_fpr64(ctx, t0, fs); - gen_store_gpr(tcg_ctx, t0, rt); - opn = "dmfc1"; - break; - case OPC_DMTC1: - gen_load_gpr(ctx, t0, rt); - gen_store_fpr64(ctx, t0, fs); - opn = "dmtc1"; - break; -#endif - case OPC_MFHC1: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32h(ctx, fp0, fs); - tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); - tcg_temp_free_i32(tcg_ctx, fp0); - } - gen_store_gpr(tcg_ctx, t0, rt); - opn = "mfhc1"; - break; - case OPC_MTHC1: - gen_load_gpr(ctx, t0, rt); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); - gen_store_fpr32h(ctx, fp0, fs); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "mthc1"; - break; - default: - MIPS_INVAL(opn); - generate_exception (ctx, EXCP_RI); - goto out; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s %s", opn, regnames[rt], fregnames[fs]); - - out: - tcg_temp_free(tcg_ctx, t0); -} - -static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int l1; - TCGCond cond; - TCGv_i32 t0; - - if (rd == 0) { - /* Treat as NOP. */ - return; - } - - if (tf) - cond = TCG_COND_EQ; - else - cond = TCG_COND_NE; - - l1 = gen_new_label(tcg_ctx); - t0 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); - tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); - tcg_temp_free_i32(tcg_ctx, t0); - if (rs == 0) { - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); - } else { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); - } - gen_set_label(tcg_ctx, l1); -} - -static inline void gen_movcf_s (DisasContext *ctx, int fs, int fd, int cc, int tf) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - int cond; - TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - - if (tf) - cond = TCG_COND_EQ; - else - cond = TCG_COND_NE; - - tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); - tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); - gen_load_fpr32(ctx, t0, fs); - gen_store_fpr32(ctx, t0, fd); - gen_set_label(tcg_ctx, l1); - tcg_temp_free_i32(tcg_ctx, t0); -} - -static inline void gen_movcf_d (DisasContext *ctx, int fs, int fd, int cc, int tf) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - int cond; - TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp0; - int l1 = gen_new_label(tcg_ctx); - - if (tf) - cond = TCG_COND_EQ; - else - cond = TCG_COND_NE; - - tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); - tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); - tcg_temp_free_i32(tcg_ctx, t0); - fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - gen_set_label(tcg_ctx, l1); -} - -static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, - int cc, int tf) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - int cond; - TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - - if (tf) - cond = TCG_COND_EQ; - else - cond = TCG_COND_NE; - - tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); - tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); - gen_load_fpr32(ctx, t0, fs); - gen_store_fpr32(ctx, t0, fd); - gen_set_label(tcg_ctx, l1); - - tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc+1) & 0x1f)); - tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l2); - gen_load_fpr32h(ctx, t0, fs); - gen_store_fpr32h(ctx, t0, fd); - tcg_temp_free_i32(tcg_ctx, t0); - gen_set_label(tcg_ctx, l2); -} - -static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, - int fs) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fd); - gen_load_fpr32(ctx, fp1, ft); - gen_load_fpr32(ctx, fp2, fs); - - switch (op1) { - case OPC_SEL_S: - tcg_gen_andi_i32(tcg_ctx, fp0, fp0, 1); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); - break; - case OPC_SELEQZ_S: - tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); - break; - case OPC_SELNEZ_S: - tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); - break; - default: - MIPS_INVAL("gen_sel_s"); - generate_exception (ctx, EXCP_RI); - break; - } - - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - tcg_temp_free_i32(tcg_ctx, fp1); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, t1); -} - -static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, - int fs) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv_i64 t1 = tcg_const_i64(tcg_ctx, 0); - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fd); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fs); - - switch (op1) { - case OPC_SEL_D: - tcg_gen_andi_i64(tcg_ctx, fp0, fp0, 1); - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); - break; - case OPC_SELEQZ_D: - tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); - break; - case OPC_SELNEZ_D: - tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); - break; - default: - MIPS_INVAL("gen_sel_d"); - generate_exception (ctx, EXCP_RI); - break; - } - - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - tcg_temp_free_i64(tcg_ctx, fp1); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, t1); -} - -static void gen_farith (DisasContext *ctx, enum fopcode op1, - int ft, int fs, int fd, int cc) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "farith"; - const char *condnames[] = { - "c.f", - "c.un", - "c.eq", - "c.ueq", - "c.olt", - "c.ult", - "c.ole", - "c.ule", - "c.sf", - "c.ngle", - "c.seq", - "c.ngl", - "c.lt", - "c.nge", - "c.le", - "c.ngt", - }; - const char *condnames_abs[] = { - "cabs.f", - "cabs.un", - "cabs.eq", - "cabs.ueq", - "cabs.olt", - "cabs.ult", - "cabs.ole", - "cabs.ule", - "cabs.sf", - "cabs.ngle", - "cabs.seq", - "cabs.ngl", - "cabs.lt", - "cabs.nge", - "cabs.le", - "cabs.ngt", - }; - enum { BINOP, CMPOP, OTHEROP } optype = OTHEROP; - uint32_t func = ctx->opcode & 0x3f; - - switch (op1) { - case OPC_ADD_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_add_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "add.s"; - optype = BINOP; - break; - case OPC_SUB_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_sub_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "sub.s"; - optype = BINOP; - break; - case OPC_MUL_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_mul_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "mul.s"; - optype = BINOP; - break; - case OPC_DIV_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_div_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "div.s"; - optype = BINOP; - break; - case OPC_SQRT_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_sqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "sqrt.s"; - break; - case OPC_ABS_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_abs_s(tcg_ctx, fp0, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "abs.s"; - break; - case OPC_MOV_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "mov.s"; - break; - case OPC_NEG_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_chs_s(tcg_ctx, fp0, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "neg.s"; - break; - case OPC_ROUND_L_S: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_roundl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); - tcg_temp_free_i32(tcg_ctx, fp32); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "round.l.s"; - break; - case OPC_TRUNC_L_S: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_truncl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); - tcg_temp_free_i32(tcg_ctx, fp32); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "trunc.l.s"; - break; - case OPC_CEIL_L_S: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_ceill_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); - tcg_temp_free_i32(tcg_ctx, fp32); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "ceil.l.s"; - break; - case OPC_FLOOR_L_S: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_floorl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); - tcg_temp_free_i32(tcg_ctx, fp32); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "floor.l.s"; - break; - case OPC_ROUND_W_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_roundw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "round.w.s"; - break; - case OPC_TRUNC_W_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_truncw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "trunc.w.s"; - break; - case OPC_CEIL_W_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_ceilw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "ceil.w.s"; - break; - case OPC_FLOOR_W_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_floorw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "floor.w.s"; - break; - case OPC_SEL_S: - check_insn(ctx, ISA_MIPS32R6); - gen_sel_s(ctx, op1, fd, ft, fs); - opn = "sel.s"; - break; - case OPC_SELEQZ_S: - check_insn(ctx, ISA_MIPS32R6); - gen_sel_s(ctx, op1, fd, ft, fs); - opn = "seleqz.s"; - break; - case OPC_SELNEZ_S: - check_insn(ctx, ISA_MIPS32R6); - gen_sel_s(ctx, op1, fd, ft, fs); - opn = "selnez.s"; - break; - case OPC_MOVCF_S: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - gen_movcf_s(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); - opn = "movcf.s"; - break; - case OPC_MOVZ_S: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - { - int l1 = gen_new_label(tcg_ctx); - TCGv_i32 fp0; - - if (ft != 0) { - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[ft], 0, l1); - } - fp0 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - gen_set_label(tcg_ctx, l1); - } - opn = "movz.s"; - break; - case OPC_MOVN_S: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - { - int l1 = gen_new_label(tcg_ctx); - TCGv_i32 fp0; - - if (ft != 0) { - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[ft], 0, l1); - fp0 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - gen_set_label(tcg_ctx, l1); - } - } - opn = "movn.s"; - break; - case OPC_RECIP_S: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_recip_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "recip.s"; - break; - case OPC_RSQRT_S: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_rsqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "rsqrt.s"; - break; - case OPC_MADDF_S: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_load_fpr32(ctx, fp2, fd); - gen_helper_float_maddf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - tcg_temp_free_i32(tcg_ctx, fp1); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "maddf.s"; - } - break; - case OPC_MSUBF_S: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_load_fpr32(ctx, fp2, fd); - gen_helper_float_msubf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - tcg_temp_free_i32(tcg_ctx, fp1); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "msubf.s"; - } - break; - case OPC_RINT_S: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_rint_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "rint.s"; - } - break; - case OPC_CLASS_S: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_class_s(tcg_ctx, fp0, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "class.s"; - } - break; - case OPC_MIN_S: /* OPC_RECIP2_S */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MIN_S */ - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_min_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - tcg_temp_free_i32(tcg_ctx, fp1); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "min.s"; - } else { - /* OPC_RECIP2_S */ - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_recip2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "recip2.s"; - } - break; - case OPC_MINA_S: /* OPC_RECIP1_S */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MINA_S */ - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_mina_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - tcg_temp_free_i32(tcg_ctx, fp1); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "mina.s"; - } else { - /* OPC_RECIP1_S */ - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_recip1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "recip1.s"; - } - break; - case OPC_MAX_S: /* OPC_RSQRT1_S */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MAX_S */ - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_max_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr32(ctx, fp1, fd); - tcg_temp_free_i32(tcg_ctx, fp1); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "max.s"; - } else { - /* OPC_RSQRT1_S */ - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_rsqrt1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "rsqrt1.s"; - } - break; - case OPC_MAXA_S: /* OPC_RSQRT2_S */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MAXA_S */ - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_maxa_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr32(ctx, fp1, fd); - tcg_temp_free_i32(tcg_ctx, fp1); - tcg_temp_free_i32(tcg_ctx, fp0); - opn = "maxa.s"; - } else { - /* OPC_RSQRT2_S */ - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_helper_float_rsqrt2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "rsqrt2.s"; - } - break; - case OPC_CVT_D_S: - check_cp1_registers(ctx, fd); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_cvtd_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); - tcg_temp_free_i32(tcg_ctx, fp32); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "cvt.d.s"; - break; - case OPC_CVT_W_S: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_cvtw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "cvt.w.s"; - break; - case OPC_CVT_L_S: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_cvtl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); - tcg_temp_free_i32(tcg_ctx, fp32); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "cvt.l.s"; - break; - case OPC_CVT_PS_S: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - TCGv_i32 fp32_0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp32_1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp32_0, fs); - gen_load_fpr32(ctx, fp32_1, ft); - tcg_gen_concat_i32_i64(tcg_ctx, fp64, fp32_1, fp32_0); - tcg_temp_free_i32(tcg_ctx, fp32_1); - tcg_temp_free_i32(tcg_ctx, fp32_0); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "cvt.ps.s"; - break; - case OPC_CMP_F_S: - case OPC_CMP_UN_S: - case OPC_CMP_EQ_S: - case OPC_CMP_UEQ_S: - case OPC_CMP_OLT_S: - case OPC_CMP_ULT_S: - case OPC_CMP_OLE_S: - case OPC_CMP_ULE_S: - case OPC_CMP_SF_S: - case OPC_CMP_NGLE_S: - case OPC_CMP_SEQ_S: - case OPC_CMP_NGL_S: - case OPC_CMP_LT_S: - case OPC_CMP_NGE_S: - case OPC_CMP_LE_S: - case OPC_CMP_NGT_S: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - if (ctx->opcode & (1 << 6)) { - gen_cmpabs_s(ctx, func-48, ft, fs, cc); - opn = condnames_abs[func-48]; - } else { - gen_cmp_s(ctx, func-48, ft, fs, cc); - opn = condnames[func-48]; - } - break; - case OPC_ADD_D: - check_cp1_registers(ctx, fs | ft | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_add_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "add.d"; - optype = BINOP; - break; - case OPC_SUB_D: - check_cp1_registers(ctx, fs | ft | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_sub_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "sub.d"; - optype = BINOP; - break; - case OPC_MUL_D: - check_cp1_registers(ctx, fs | ft | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_mul_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "mul.d"; - optype = BINOP; - break; - case OPC_DIV_D: - check_cp1_registers(ctx, fs | ft | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_div_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "div.d"; - optype = BINOP; - break; - case OPC_SQRT_D: - check_cp1_registers(ctx, fs | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_sqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "sqrt.d"; - break; - case OPC_ABS_D: - check_cp1_registers(ctx, fs | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_abs_d(tcg_ctx, fp0, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "abs.d"; - break; - case OPC_MOV_D: - check_cp1_registers(ctx, fs | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "mov.d"; - break; - case OPC_NEG_D: - check_cp1_registers(ctx, fs | fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_chs_d(tcg_ctx, fp0, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "neg.d"; - break; - case OPC_ROUND_L_D: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_roundl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "round.l.d"; - break; - case OPC_TRUNC_L_D: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_truncl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "trunc.l.d"; - break; - case OPC_CEIL_L_D: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_ceill_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "ceil.l.d"; - break; - case OPC_FLOOR_L_D: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_floorl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "floor.l.d"; - break; - case OPC_ROUND_W_D: - check_cp1_registers(ctx, fs); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_roundw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); - tcg_temp_free_i64(tcg_ctx, fp64); - gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(tcg_ctx, fp32); - } - opn = "round.w.d"; - break; - case OPC_TRUNC_W_D: - check_cp1_registers(ctx, fs); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_truncw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); - tcg_temp_free_i64(tcg_ctx, fp64); - gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(tcg_ctx, fp32); - } - opn = "trunc.w.d"; - break; - case OPC_CEIL_W_D: - check_cp1_registers(ctx, fs); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_ceilw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); - tcg_temp_free_i64(tcg_ctx, fp64); - gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(tcg_ctx, fp32); - } - opn = "ceil.w.d"; - break; - case OPC_FLOOR_W_D: - check_cp1_registers(ctx, fs); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_floorw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); - tcg_temp_free_i64(tcg_ctx, fp64); - gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(tcg_ctx, fp32); - } - opn = "floor.w.d"; - break; - case OPC_SEL_D: - check_insn(ctx, ISA_MIPS32R6); - gen_sel_d(ctx, op1, fd, ft, fs); - opn = "sel.d"; - break; - case OPC_SELEQZ_D: - check_insn(ctx, ISA_MIPS32R6); - gen_sel_d(ctx, op1, fd, ft, fs); - opn = "seleqz.d"; - break; - case OPC_SELNEZ_D: - check_insn(ctx, ISA_MIPS32R6); - gen_sel_d(ctx, op1, fd, ft, fs); - opn = "selnez.d"; - break; - case OPC_MOVCF_D: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - gen_movcf_d(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); - opn = "movcf.d"; - break; - case OPC_MOVZ_D: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - { - int l1 = gen_new_label(tcg_ctx); - TCGv_i64 fp0; - - if (ft != 0) { - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[ft], 0, l1); - } - fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - gen_set_label(tcg_ctx, l1); - } - opn = "movz.d"; - break; - case OPC_MOVN_D: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - { - int l1 = gen_new_label(tcg_ctx); - TCGv_i64 fp0; - - if (ft != 0) { - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[ft], 0, l1); - fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - gen_set_label(tcg_ctx, l1); - } - } - opn = "movn.d"; - break; - case OPC_RECIP_D: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_recip_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "recip.d"; - break; - case OPC_RSQRT_D: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_rsqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "rsqrt.d"; - break; - case OPC_MADDF_D: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fd); - gen_helper_float_maddf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - tcg_temp_free_i64(tcg_ctx, fp1); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "maddf.d"; - } - break; - case OPC_MSUBF_D: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fd); - gen_helper_float_msubf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - tcg_temp_free_i64(tcg_ctx, fp1); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "msubf.d"; - } - break; - case OPC_RINT_D: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_rint_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "rint.d"; - } - break; - case OPC_CLASS_D: - check_insn(ctx, ISA_MIPS32R6); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_class_d(tcg_ctx, fp0, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "class.d"; - } - break; - case OPC_MIN_D: /* OPC_RECIP2_D */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MIN_D */ - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_min_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(tcg_ctx, fp1); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "min.d"; - } else { - /* OPC_RECIP2_D */ - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_recip2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "recip2.d"; - } - break; - case OPC_MINA_D: /* OPC_RECIP1_D */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MINA_D */ - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_mina_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(tcg_ctx, fp1); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "mina.d"; - } else { - /* OPC_RECIP1_D */ - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_recip1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "recip1.d"; - } - break; - case OPC_MAX_D: /* OPC_RSQRT1_D */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MAX_D */ - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_max_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(tcg_ctx, fp1); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "max.d"; - } else { - /* OPC_RSQRT1_D */ - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_rsqrt1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "rsqrt1.d"; - } - break; - case OPC_MAXA_D: /* OPC_RSQRT2_D */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_MAXA_D */ - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_maxa_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); - gen_store_fpr64(ctx, fp1, fd); - tcg_temp_free_i64(tcg_ctx, fp1); - tcg_temp_free_i64(tcg_ctx, fp0); - opn = "maxa.d"; - } else { - /* OPC_RSQRT2_D */ - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_rsqrt2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "rsqrt2.d"; - } - break; - case OPC_CMP_F_D: - case OPC_CMP_UN_D: - case OPC_CMP_EQ_D: - case OPC_CMP_UEQ_D: - case OPC_CMP_OLT_D: - case OPC_CMP_ULT_D: - case OPC_CMP_OLE_D: - case OPC_CMP_ULE_D: - case OPC_CMP_SF_D: - case OPC_CMP_NGLE_D: - case OPC_CMP_SEQ_D: - case OPC_CMP_NGL_D: - case OPC_CMP_LT_D: - case OPC_CMP_NGE_D: - case OPC_CMP_LE_D: - case OPC_CMP_NGT_D: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - if (ctx->opcode & (1 << 6)) { - gen_cmpabs_d(ctx, func-48, ft, fs, cc); - opn = condnames_abs[func-48]; - } else { - gen_cmp_d(ctx, func-48, ft, fs, cc); - opn = condnames[func-48]; - } - break; - case OPC_CVT_S_D: - check_cp1_registers(ctx, fs); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_cvts_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); - tcg_temp_free_i64(tcg_ctx, fp64); - gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(tcg_ctx, fp32); - } - opn = "cvt.s.d"; - break; - case OPC_CVT_W_D: - check_cp1_registers(ctx, fs); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_cvtw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); - tcg_temp_free_i64(tcg_ctx, fp64); - gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(tcg_ctx, fp32); - } - opn = "cvt.w.d"; - break; - case OPC_CVT_L_D: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_cvtl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "cvt.l.d"; - break; - case OPC_CVT_S_W: - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_cvts_w(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "cvt.s.w"; - break; - case OPC_CVT_D_W: - check_cp1_registers(ctx, fd); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr32(ctx, fp32, fs); - gen_helper_float_cvtd_w(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); - tcg_temp_free_i32(tcg_ctx, fp32); - gen_store_fpr64(ctx, fp64, fd); - tcg_temp_free_i64(tcg_ctx, fp64); - } - opn = "cvt.d.w"; - break; - case OPC_CVT_S_L: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); - TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp64, fs); - gen_helper_float_cvts_l(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); - tcg_temp_free_i64(tcg_ctx, fp64); - gen_store_fpr32(ctx, fp32, fd); - tcg_temp_free_i32(tcg_ctx, fp32); - } - opn = "cvt.s.l"; - break; - case OPC_CVT_D_L: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_cvtd_l(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "cvt.d.l"; - break; - case OPC_CVT_PS_PW: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_cvtps_pw(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "cvt.ps.pw"; - break; - case OPC_ADD_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_add_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "add.ps"; - break; - case OPC_SUB_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_sub_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "sub.ps"; - break; - case OPC_MUL_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_mul_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "mul.ps"; - break; - case OPC_ABS_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_abs_ps(tcg_ctx, fp0, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "abs.ps"; - break; - case OPC_MOV_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "mov.ps"; - break; - case OPC_NEG_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_chs_ps(tcg_ctx, fp0, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "neg.ps"; - break; - case OPC_MOVCF_PS: - check_cp1_64bitmode(ctx); - gen_movcf_ps(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); - opn = "movcf.ps"; - break; - case OPC_MOVZ_PS: - check_cp1_64bitmode(ctx); - { - int l1 = gen_new_label(tcg_ctx); - TCGv_i64 fp0; - - if (ft != 0) - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[ft], 0, l1); - fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - gen_set_label(tcg_ctx, l1); - } - opn = "movz.ps"; - break; - case OPC_MOVN_PS: - check_cp1_64bitmode(ctx); - { - int l1 = gen_new_label(tcg_ctx); - TCGv_i64 fp0; - - if (ft != 0) { - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[ft], 0, l1); - fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - gen_set_label(tcg_ctx, l1); - } - } - opn = "movn.ps"; - break; - case OPC_ADDR_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, ft); - gen_load_fpr64(ctx, fp1, fs); - gen_helper_float_addr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "addr.ps"; - break; - case OPC_MULR_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, ft); - gen_load_fpr64(ctx, fp1, fs); - gen_helper_float_mulr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "mulr.ps"; - break; - case OPC_RECIP2_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_recip2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "recip2.ps"; - break; - case OPC_RECIP1_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_recip1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "recip1.ps"; - break; - case OPC_RSQRT1_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_rsqrt1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "rsqrt1.ps"; - break; - case OPC_RSQRT2_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_helper_float_rsqrt2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "rsqrt2.ps"; - break; - case OPC_CVT_S_PU: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32h(ctx, fp0, fs); - gen_helper_float_cvts_pu(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "cvt.s.pu"; - break; - case OPC_CVT_PW_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_helper_float_cvtpw_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "cvt.pw.ps"; - break; - case OPC_CVT_S_PL: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_helper_float_cvts_pl(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "cvt.s.pl"; - break; - case OPC_PLL_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_store_fpr32h(ctx, fp0, fd); - gen_store_fpr32(ctx, fp1, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - } - opn = "pll.ps"; - break; - case OPC_PLU_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32h(ctx, fp1, ft); - gen_store_fpr32(ctx, fp1, fd); - gen_store_fpr32h(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - } - opn = "plu.ps"; - break; - case OPC_PUL_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32h(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_store_fpr32(ctx, fp1, fd); - gen_store_fpr32h(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - } - opn = "pul.ps"; - break; - case OPC_PUU_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32h(ctx, fp0, fs); - gen_load_fpr32h(ctx, fp1, ft); - gen_store_fpr32(ctx, fp1, fd); - gen_store_fpr32h(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - } - opn = "puu.ps"; - break; - case OPC_CMP_F_PS: - case OPC_CMP_UN_PS: - case OPC_CMP_EQ_PS: - case OPC_CMP_UEQ_PS: - case OPC_CMP_OLT_PS: - case OPC_CMP_ULT_PS: - case OPC_CMP_OLE_PS: - case OPC_CMP_ULE_PS: - case OPC_CMP_SF_PS: - case OPC_CMP_NGLE_PS: - case OPC_CMP_SEQ_PS: - case OPC_CMP_NGL_PS: - case OPC_CMP_LT_PS: - case OPC_CMP_NGE_PS: - case OPC_CMP_LE_PS: - case OPC_CMP_NGT_PS: - if (ctx->opcode & (1 << 6)) { - gen_cmpabs_ps(ctx, func-48, ft, fs, cc); - opn = condnames_abs[func-48]; - } else { - gen_cmp_ps(ctx, func-48, ft, fs, cc); - opn = condnames[func-48]; - } - break; - default: - MIPS_INVAL(opn); - generate_exception (ctx, EXCP_RI); - return; - } - (void)opn; /* avoid a compiler warning */ - switch (optype) { - case BINOP: - MIPS_DEBUG("%s %s, %s, %s", opn, fregnames[fd], fregnames[fs], fregnames[ft]); - break; - case CMPOP: - MIPS_DEBUG("%s %s,%s", opn, fregnames[fs], fregnames[ft]); - break; - default: - MIPS_DEBUG("%s %s,%s", opn, fregnames[fd], fregnames[fs]); - break; - } -} - -/* Coprocessor 3 (FPU) */ -static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc, - int fd, int fs, int base, int index) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "extended float load/store"; - int store = 0; - TCGv t0 = tcg_temp_new(tcg_ctx); - - if (base == 0) { - gen_load_gpr(ctx, t0, index); - } else if (index == 0) { - gen_load_gpr(ctx, t0, base); - } else { - gen_op_addr_add(ctx, t0, *cpu_gpr[base], *cpu_gpr[index]); - } - /* Don't do NOP if destination is zero: we must perform the actual - memory access. */ - switch (opc) { - case OPC_LWXC1: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); - tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); - gen_store_fpr32(ctx, fp0, fd); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "lwxc1"; - break; - case OPC_LDXC1: - check_cop1x(ctx); - check_cp1_registers(ctx, fd); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "ldxc1"; - break; - case OPC_LUXC1: - check_cp1_64bitmode(ctx); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_qemu_ld_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); - gen_store_fpr64(ctx, fp0, fd); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "luxc1"; - break; - case OPC_SWXC1: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - gen_load_fpr32(ctx, fp0, fs); - tcg_gen_qemu_st_i32(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEUL); - tcg_temp_free_i32(tcg_ctx, fp0); - } - opn = "swxc1"; - store = 1; - break; - case OPC_SDXC1: - check_cop1x(ctx); - check_cp1_registers(ctx, fs); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "sdxc1"; - store = 1; - break; - case OPC_SUXC1: - check_cp1_64bitmode(ctx); - tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); - tcg_temp_free_i64(tcg_ctx, fp0); - } - opn = "suxc1"; - store = 1; - break; - } - tcg_temp_free(tcg_ctx, t0); - (void)opn; (void)store; /* avoid compiler warnings */ - MIPS_DEBUG("%s %s, %s(%s)", opn, fregnames[store ? fs : fd], - regnames[index], regnames[base]); -} - -static void gen_flt3_arith (DisasContext *ctx, uint32_t opc, - int fd, int fr, int fs, int ft) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "flt3_arith"; - - switch (opc) { - case OPC_ALNV_PS: - check_cp1_64bitmode(ctx); - { - TCGv t0 = tcg_temp_local_new(tcg_ctx); - TCGv_i32 fp = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fph = tcg_temp_new_i32(tcg_ctx); - int l1 = gen_new_label(tcg_ctx); - int l2 = gen_new_label(tcg_ctx); - - gen_load_gpr(ctx, t0, fr); - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x7); - - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 0, l1); - gen_load_fpr32(ctx, fp, fs); - gen_load_fpr32h(ctx, fph, fs); - gen_store_fpr32(ctx, fp, fd); - gen_store_fpr32h(ctx, fph, fd); - tcg_gen_br(tcg_ctx, l2); - gen_set_label(tcg_ctx, l1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 4, l2); - tcg_temp_free(tcg_ctx, t0); -#ifdef TARGET_WORDS_BIGENDIAN - gen_load_fpr32(ctx, fp, fs); - gen_load_fpr32h(ctx, fph, ft); - gen_store_fpr32h(ctx, fp, fd); - gen_store_fpr32(ctx, fph, fd); -#else - gen_load_fpr32h(ctx, fph, fs); - gen_load_fpr32(ctx, fp, ft); - gen_store_fpr32(ctx, fph, fd); - gen_store_fpr32h(ctx, fp, fd); -#endif - gen_set_label(tcg_ctx, l2); - tcg_temp_free_i32(tcg_ctx, fp); - tcg_temp_free_i32(tcg_ctx, fph); - } - opn = "alnv.ps"; - break; - case OPC_MADD_S: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_load_fpr32(ctx, fp2, fr); - gen_helper_float_madd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - } - opn = "madd.s"; - break; - case OPC_MADD_D: - check_cop1x(ctx); - check_cp1_registers(ctx, fd | fs | ft | fr); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_madd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "madd.d"; - break; - case OPC_MADD_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_madd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "madd.ps"; - break; - case OPC_MSUB_S: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_load_fpr32(ctx, fp2, fr); - gen_helper_float_msub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - } - opn = "msub.s"; - break; - case OPC_MSUB_D: - check_cop1x(ctx); - check_cp1_registers(ctx, fd | fs | ft | fr); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_msub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "msub.d"; - break; - case OPC_MSUB_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_msub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "msub.ps"; - break; - case OPC_NMADD_S: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_load_fpr32(ctx, fp2, fr); - gen_helper_float_nmadd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - } - opn = "nmadd.s"; - break; - case OPC_NMADD_D: - check_cop1x(ctx); - check_cp1_registers(ctx, fd | fs | ft | fr); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_nmadd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "nmadd.d"; - break; - case OPC_NMADD_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_nmadd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "nmadd.ps"; - break; - case OPC_NMSUB_S: - check_cop1x(ctx); - { - TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); - - gen_load_fpr32(ctx, fp0, fs); - gen_load_fpr32(ctx, fp1, ft); - gen_load_fpr32(ctx, fp2, fr); - gen_helper_float_nmsub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i32(tcg_ctx, fp0); - tcg_temp_free_i32(tcg_ctx, fp1); - gen_store_fpr32(ctx, fp2, fd); - tcg_temp_free_i32(tcg_ctx, fp2); - } - opn = "nmsub.s"; - break; - case OPC_NMSUB_D: - check_cop1x(ctx); - check_cp1_registers(ctx, fd | fs | ft | fr); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_nmsub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "nmsub.d"; - break; - case OPC_NMSUB_PS: - check_cp1_64bitmode(ctx); - { - TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); - - gen_load_fpr64(ctx, fp0, fs); - gen_load_fpr64(ctx, fp1, ft); - gen_load_fpr64(ctx, fp2, fr); - gen_helper_float_nmsub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); - tcg_temp_free_i64(tcg_ctx, fp0); - tcg_temp_free_i64(tcg_ctx, fp1); - gen_store_fpr64(ctx, fp2, fd); - tcg_temp_free_i64(tcg_ctx, fp2); - } - opn = "nmsub.ps"; - break; - default: - MIPS_INVAL(opn); - generate_exception (ctx, EXCP_RI); - return; - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s, %s, %s", opn, fregnames[fd], fregnames[fr], - fregnames[fs], fregnames[ft]); -} - -static void gen_rdhwr(DisasContext *ctx, int rt, int rd) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv t0; - -#if !defined(CONFIG_USER_ONLY) - /* The Linux kernel will emulate rdhwr if it's not supported natively. - Therefore only check the ISA in system mode. */ - check_insn(ctx, ISA_MIPS32R2); -#endif - t0 = tcg_temp_new(tcg_ctx); - - switch (rd) { - case 0: - save_cpu_state(ctx, 1); - gen_helper_rdhwr_cpunum(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case 1: - save_cpu_state(ctx, 1); - gen_helper_rdhwr_synci_step(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case 2: - save_cpu_state(ctx, 1); - gen_helper_rdhwr_cc(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case 3: - save_cpu_state(ctx, 1); - gen_helper_rdhwr_ccres(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case 29: -#if defined(CONFIG_USER_ONLY) - tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); - gen_store_gpr(tcg_ctx, t0, rt); - break; -#else - if ((ctx->hflags & MIPS_HFLAG_CP0) || - (ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) { - tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, - offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); - gen_store_gpr(tcg_ctx, t0, rt); - } else { - generate_exception(ctx, EXCP_RI); - } - break; -#endif - default: /* Invalid */ - MIPS_INVAL("rdhwr"); - generate_exception(ctx, EXCP_RI); - break; - } - tcg_temp_free(tcg_ctx, t0); -} - -static void gen_branch(DisasContext *ctx, int insn_bytes) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - if (ctx->hflags & MIPS_HFLAG_BMASK) { - int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK; - /* Branches completion */ - ctx->hflags &= ~MIPS_HFLAG_BMASK; - ctx->bstate = BS_BRANCH; - save_cpu_state(ctx, 0); - /* FIXME: Need to clear can_do_io. */ - switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { - case MIPS_HFLAG_FBNSLOT: - MIPS_DEBUG("forbidden slot"); - gen_goto_tb(ctx, 0, ctx->pc + insn_bytes); - break; - case MIPS_HFLAG_B: - /* unconditional branch */ - MIPS_DEBUG("unconditional branch"); - if (proc_hflags & MIPS_HFLAG_BX) { - tcg_gen_xori_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, MIPS_HFLAG_M16); - } - gen_goto_tb(ctx, 0, ctx->btarget); - break; - case MIPS_HFLAG_BL: - /* blikely taken case */ - MIPS_DEBUG("blikely branch taken"); - gen_goto_tb(ctx, 0, ctx->btarget); - break; - case MIPS_HFLAG_BC: - /* Conditional branch */ - MIPS_DEBUG("conditional branch"); - { - int l1 = gen_new_label(tcg_ctx); - - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, 0, l1); - gen_goto_tb(ctx, 1, ctx->pc + insn_bytes); - gen_set_label(tcg_ctx, l1); - gen_goto_tb(ctx, 0, ctx->btarget); - } - break; - case MIPS_HFLAG_BR: - /* unconditional branch to register */ - MIPS_DEBUG("branch to register"); - if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); - - tcg_gen_andi_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->btarget, 0x1); - tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); - tcg_temp_free(tcg_ctx, t0); - tcg_gen_andi_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, ~(uint32_t)MIPS_HFLAG_M16); - tcg_gen_shli_i32(tcg_ctx, t1, t1, MIPS_HFLAG_M16_SHIFT); - tcg_gen_or_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, t1); - tcg_temp_free_i32(tcg_ctx, t1); - - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_PC, *(TCGv *)tcg_ctx->btarget, ~(target_ulong)0x1); - } else { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_PC, *(TCGv *)tcg_ctx->btarget); - } - if (ctx->singlestep_enabled) { - save_cpu_state(ctx, 0); - gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); - } - tcg_gen_exit_tb(tcg_ctx, 0); - break; - default: - MIPS_DEBUG("unknown branch"); - break; - } - } -} - -/* ISA extensions (ASEs) */ -/* MIPS16 extension to MIPS32 */ - -/* MIPS16 major opcodes */ -enum { - M16_OPC_ADDIUSP = 0x00, - M16_OPC_ADDIUPC = 0x01, - M16_OPC_B = 0x02, - M16_OPC_JAL = 0x03, - M16_OPC_BEQZ = 0x04, - M16_OPC_BNEQZ = 0x05, - M16_OPC_SHIFT = 0x06, - M16_OPC_LD = 0x07, - M16_OPC_RRIA = 0x08, - M16_OPC_ADDIU8 = 0x09, - M16_OPC_SLTI = 0x0a, - M16_OPC_SLTIU = 0x0b, - M16_OPC_I8 = 0x0c, - M16_OPC_LI = 0x0d, - M16_OPC_CMPI = 0x0e, - M16_OPC_SD = 0x0f, - M16_OPC_LB = 0x10, - M16_OPC_LH = 0x11, - M16_OPC_LWSP = 0x12, - M16_OPC_LW = 0x13, - M16_OPC_LBU = 0x14, - M16_OPC_LHU = 0x15, - M16_OPC_LWPC = 0x16, - M16_OPC_LWU = 0x17, - M16_OPC_SB = 0x18, - M16_OPC_SH = 0x19, - M16_OPC_SWSP = 0x1a, - M16_OPC_SW = 0x1b, - M16_OPC_RRR = 0x1c, - M16_OPC_RR = 0x1d, - M16_OPC_EXTEND = 0x1e, - M16_OPC_I64 = 0x1f -}; - -/* I8 funct field */ -enum { - I8_BTEQZ = 0x0, - I8_BTNEZ = 0x1, - I8_SWRASP = 0x2, - I8_ADJSP = 0x3, - I8_SVRS = 0x4, - I8_MOV32R = 0x5, - I8_MOVR32 = 0x7 -}; - -/* RRR f field */ -enum { - RRR_DADDU = 0x0, - RRR_ADDU = 0x1, - RRR_DSUBU = 0x2, - RRR_SUBU = 0x3 -}; - -/* RR funct field */ -enum { - RR_JR = 0x00, - RR_SDBBP = 0x01, - RR_SLT = 0x02, - RR_SLTU = 0x03, - RR_SLLV = 0x04, - RR_BREAK = 0x05, - RR_SRLV = 0x06, - RR_SRAV = 0x07, - RR_DSRL = 0x08, - RR_CMP = 0x0a, - RR_NEG = 0x0b, - RR_AND = 0x0c, - RR_OR = 0x0d, - RR_XOR = 0x0e, - RR_NOT = 0x0f, - RR_MFHI = 0x10, - RR_CNVT = 0x11, - RR_MFLO = 0x12, - RR_DSRA = 0x13, - RR_DSLLV = 0x14, - RR_DSRLV = 0x16, - RR_DSRAV = 0x17, - RR_MULT = 0x18, - RR_MULTU = 0x19, - RR_DIV = 0x1a, - RR_DIVU = 0x1b, - RR_DMULT = 0x1c, - RR_DMULTU = 0x1d, - RR_DDIV = 0x1e, - RR_DDIVU = 0x1f -}; - -/* I64 funct field */ -enum { - I64_LDSP = 0x0, - I64_SDSP = 0x1, - I64_SDRASP = 0x2, - I64_DADJSP = 0x3, - I64_LDPC = 0x4, - I64_DADDIU5 = 0x5, - I64_DADDIUPC = 0x6, - I64_DADDIUSP = 0x7 -}; - -/* RR ry field for CNVT */ -enum { - RR_RY_CNVT_ZEB = 0x0, - RR_RY_CNVT_ZEH = 0x1, - RR_RY_CNVT_ZEW = 0x2, - RR_RY_CNVT_SEB = 0x4, - RR_RY_CNVT_SEH = 0x5, - RR_RY_CNVT_SEW = 0x6, -}; - -static int xlat (int r) -{ - static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; - - return map[r]; -} - -static void gen_mips16_save (DisasContext *ctx, - int xsregs, int aregs, - int do_ra, int do_s0, int do_s1, - int framesize) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - int args, astatic; - - switch (aregs) { - case 0: - case 1: - case 2: - case 3: - case 11: - args = 0; - break; - case 4: - case 5: - case 6: - case 7: - args = 1; - break; - case 8: - case 9: - case 10: - args = 2; - break; - case 12: - case 13: - args = 3; - break; - case 14: - args = 4; - break; - default: - generate_exception(ctx, EXCP_RI); - return; - } - - switch (args) { - case 4: - gen_base_offset_addr(ctx, t0, 29, 12); - gen_load_gpr(ctx, t1, 7); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); - /* Fall through */ - case 3: - gen_base_offset_addr(ctx, t0, 29, 8); - gen_load_gpr(ctx, t1, 6); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); - /* Fall through */ - case 2: - gen_base_offset_addr(ctx, t0, 29, 4); - gen_load_gpr(ctx, t1, 5); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); - /* Fall through */ - case 1: - gen_base_offset_addr(ctx, t0, 29, 0); - gen_load_gpr(ctx, t1, 4); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); - } - - gen_load_gpr(ctx, t0, 29); - -#define DECR_AND_STORE(reg) do { \ - tcg_gen_subi_tl(tcg_ctx, t0, t0, 4); \ - gen_load_gpr(ctx, t1, reg); \ - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); \ - } while (0) - - if (do_ra) { - DECR_AND_STORE(31); - } - - switch (xsregs) { - case 7: - DECR_AND_STORE(30); - /* Fall through */ - case 6: - DECR_AND_STORE(23); - /* Fall through */ - case 5: - DECR_AND_STORE(22); - /* Fall through */ - case 4: - DECR_AND_STORE(21); - /* Fall through */ - case 3: - DECR_AND_STORE(20); - /* Fall through */ - case 2: - DECR_AND_STORE(19); - /* Fall through */ - case 1: - DECR_AND_STORE(18); - } - - if (do_s1) { - DECR_AND_STORE(17); - } - if (do_s0) { - DECR_AND_STORE(16); - } - - switch (aregs) { - case 0: - case 4: - case 8: - case 12: - case 14: - astatic = 0; - break; - case 1: - case 5: - case 9: - case 13: - astatic = 1; - break; - case 2: - case 6: - case 10: - astatic = 2; - break; - case 3: - case 7: - astatic = 3; - break; - case 11: - astatic = 4; - break; - default: - generate_exception(ctx, EXCP_RI); - return; - } - - if (astatic > 0) { - DECR_AND_STORE(7); - if (astatic > 1) { - DECR_AND_STORE(6); - if (astatic > 2) { - DECR_AND_STORE(5); - if (astatic > 3) { - DECR_AND_STORE(4); - } - } - } - } -#undef DECR_AND_STORE - - tcg_gen_subi_tl(tcg_ctx, *cpu_gpr[29], *cpu_gpr[29], framesize); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_mips16_restore (DisasContext *ctx, - int xsregs, int aregs, - int do_ra, int do_s0, int do_s1, - int framesize) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int astatic; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - tcg_gen_addi_tl(tcg_ctx, t0, *cpu_gpr[29], framesize); - -#define DECR_AND_LOAD(reg) do { \ - tcg_gen_subi_tl(tcg_ctx, t0, t0, 4); \ - tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); \ - gen_store_gpr(tcg_ctx, t1, reg); \ - } while (0) - - if (do_ra) { - DECR_AND_LOAD(31); - } - - switch (xsregs) { - case 7: - DECR_AND_LOAD(30); - /* Fall through */ - case 6: - DECR_AND_LOAD(23); - /* Fall through */ - case 5: - DECR_AND_LOAD(22); - /* Fall through */ - case 4: - DECR_AND_LOAD(21); - /* Fall through */ - case 3: - DECR_AND_LOAD(20); - /* Fall through */ - case 2: - DECR_AND_LOAD(19); - /* Fall through */ - case 1: - DECR_AND_LOAD(18); - } - - if (do_s1) { - DECR_AND_LOAD(17); - } - if (do_s0) { - DECR_AND_LOAD(16); - } - - switch (aregs) { - case 0: - case 4: - case 8: - case 12: - case 14: - astatic = 0; - break; - case 1: - case 5: - case 9: - case 13: - astatic = 1; - break; - case 2: - case 6: - case 10: - astatic = 2; - break; - case 3: - case 7: - astatic = 3; - break; - case 11: - astatic = 4; - break; - default: - generate_exception(ctx, EXCP_RI); - return; - } - - if (astatic > 0) { - DECR_AND_LOAD(7); - if (astatic > 1) { - DECR_AND_LOAD(6); - if (astatic > 2) { - DECR_AND_LOAD(5); - if (astatic > 3) { - DECR_AND_LOAD(4); - } - } - } - } -#undef DECR_AND_LOAD - - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[29], *cpu_gpr[29], framesize); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_addiupc (DisasContext *ctx, int rx, int imm, - int is_64_bit, int extended) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - TCGv t0; - - if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { - generate_exception(ctx, EXCP_RI); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - - tcg_gen_movi_tl(tcg_ctx, t0, pc_relative_pc(ctx)); - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rx], t0, imm); - if (!is_64_bit) { - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); - } - - tcg_temp_free(tcg_ctx, t0); -} - -#if defined(TARGET_MIPS64) -static void decode_i64_mips16 (DisasContext *ctx, - int ry, int funct, int16_t offset, - int extended) -{ - switch (funct) { - case I64_LDSP: - check_mips_64(ctx); - offset = extended ? offset : offset << 3; - gen_ld(ctx, OPC_LD, ry, 29, offset); - break; - case I64_SDSP: - check_mips_64(ctx); - offset = extended ? offset : offset << 3; - gen_st(ctx, OPC_SD, ry, 29, offset); - break; - case I64_SDRASP: - check_mips_64(ctx); - offset = extended ? offset : (ctx->opcode & 0xff) << 3; - gen_st(ctx, OPC_SD, 31, 29, offset); - break; - case I64_DADJSP: - check_mips_64(ctx); - offset = extended ? offset : ((int8_t)ctx->opcode) << 3; - gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset); - break; - case I64_LDPC: - if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { - generate_exception(ctx, EXCP_RI); - } else { - offset = extended ? offset : offset << 3; - gen_ld(ctx, OPC_LDPC, ry, 0, offset); - } - break; - case I64_DADDIU5: - check_mips_64(ctx); - offset = extended ? offset : ((int8_t)(offset << 3)) >> 3; - gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset); - break; - case I64_DADDIUPC: - check_mips_64(ctx); - offset = extended ? offset : offset << 2; - gen_addiupc(ctx, ry, offset, 1, extended); - break; - case I64_DADDIUSP: - check_mips_64(ctx); - offset = extended ? offset : offset << 2; - gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset); - break; - } -} -#endif - -static int decode_extended_mips16_opc (CPUMIPSState *env, DisasContext *ctx) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int extend = cpu_lduw_code(env, ctx->pc + 2); - int op, rx, ry, funct, sa; - int16_t imm, offset; - - ctx->opcode = (ctx->opcode << 16) | extend; - op = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 22) & 0x1f; - funct = (ctx->opcode >> 8) & 0x7; - rx = xlat((ctx->opcode >> 8) & 0x7); - ry = xlat((ctx->opcode >> 5) & 0x7); - offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11 - | ((ctx->opcode >> 21) & 0x3f) << 5 - | (ctx->opcode & 0x1f)); - - /* The extended opcodes cleverly reuse the opcodes from their 16-bit - counterparts. */ - switch (op) { - case M16_OPC_ADDIUSP: - gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); - break; - case M16_OPC_ADDIUPC: - gen_addiupc(ctx, rx, imm, 0, 1); - break; - case M16_OPC_B: - gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, (uint32_t)offset << 1, 0); - /* No delay slot, so just process as a normal instruction */ - break; - case M16_OPC_BEQZ: - gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, (uint16_t)offset << 1, 0); - /* No delay slot, so just process as a normal instruction */ - break; - case M16_OPC_BNEQZ: - gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, (uint16_t)offset << 1, 0); - /* No delay slot, so just process as a normal instruction */ - break; - case M16_OPC_SHIFT: - switch (ctx->opcode & 0x3) { - case 0x0: - gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); - break; - case 0x1: -#if defined(TARGET_MIPS64) - check_mips_64(ctx); - gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); -#else - generate_exception(ctx, EXCP_RI); -#endif - break; - case 0x2: - gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); - break; - case 0x3: - gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); - break; - } - break; -#if defined(TARGET_MIPS64) - case M16_OPC_LD: - check_mips_64(ctx); - gen_ld(ctx, OPC_LD, ry, rx, offset); - break; -#endif - case M16_OPC_RRIA: - imm = ctx->opcode & 0xf; - imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4; - imm = imm | ((ctx->opcode >> 16) & 0xf) << 11; - imm = (int16_t) (imm << 1) >> 1; - if ((ctx->opcode >> 4) & 0x1) { -#if defined(TARGET_MIPS64) - check_mips_64(ctx); - gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); -#else - generate_exception(ctx, EXCP_RI); -#endif - } else { - gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); - } - break; - case M16_OPC_ADDIU8: - gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); - break; - case M16_OPC_SLTI: - gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); - break; - case M16_OPC_SLTIU: - gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); - break; - case M16_OPC_I8: - switch (funct) { - case I8_BTEQZ: - gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, (uint16_t)offset << 1, 0); - break; - case I8_BTNEZ: - gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, (uint16_t)offset << 1, 0); - break; - case I8_SWRASP: - gen_st(ctx, OPC_SW, 31, 29, imm); - break; - case I8_ADJSP: - gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm); - break; - case I8_SVRS: - { - int xsregs = (ctx->opcode >> 24) & 0x7; - int aregs = (ctx->opcode >> 16) & 0xf; - int do_ra = (ctx->opcode >> 6) & 0x1; - int do_s0 = (ctx->opcode >> 5) & 0x1; - int do_s1 = (ctx->opcode >> 4) & 0x1; - int framesize = (((ctx->opcode >> 20) & 0xf) << 4 - | (ctx->opcode & 0xf)) << 3; - - if (ctx->opcode & (1 << 7)) { - gen_mips16_save(ctx, xsregs, aregs, - do_ra, do_s0, do_s1, - framesize); - } else { - gen_mips16_restore(ctx, xsregs, aregs, - do_ra, do_s0, do_s1, - framesize); - } - } - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case M16_OPC_LI: - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rx], (uint16_t) imm); - break; - case M16_OPC_CMPI: - tcg_gen_xori_tl(tcg_ctx, *cpu_gpr[24], *cpu_gpr[rx], (uint16_t) imm); - break; -#if defined(TARGET_MIPS64) - case M16_OPC_SD: - gen_st(ctx, OPC_SD, ry, rx, offset); - break; -#endif - case M16_OPC_LB: - gen_ld(ctx, OPC_LB, ry, rx, offset); - break; - case M16_OPC_LH: - gen_ld(ctx, OPC_LH, ry, rx, offset); - break; - case M16_OPC_LWSP: - gen_ld(ctx, OPC_LW, rx, 29, offset); - break; - case M16_OPC_LW: - gen_ld(ctx, OPC_LW, ry, rx, offset); - break; - case M16_OPC_LBU: - gen_ld(ctx, OPC_LBU, ry, rx, offset); - break; - case M16_OPC_LHU: - gen_ld(ctx, OPC_LHU, ry, rx, offset); - break; - case M16_OPC_LWPC: - gen_ld(ctx, OPC_LWPC, rx, 0, offset); - break; -#if defined(TARGET_MIPS64) - case M16_OPC_LWU: - gen_ld(ctx, OPC_LWU, ry, rx, offset); - break; -#endif - case M16_OPC_SB: - gen_st(ctx, OPC_SB, ry, rx, offset); - break; - case M16_OPC_SH: - gen_st(ctx, OPC_SH, ry, rx, offset); - break; - case M16_OPC_SWSP: - gen_st(ctx, OPC_SW, rx, 29, offset); - break; - case M16_OPC_SW: - gen_st(ctx, OPC_SW, ry, rx, offset); - break; -#if defined(TARGET_MIPS64) - case M16_OPC_I64: - decode_i64_mips16(ctx, ry, funct, offset, 1); - break; -#endif - default: - generate_exception(ctx, EXCP_RI); - break; - } - - return 4; -} - -static int decode_mips16_opc (CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int rx, ry; - int sa; - int op, cnvt_op, op1, offset; - int funct; - int n_bytes; - - op = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 2) & 0x7; - sa = sa == 0 ? 8 : sa; - rx = xlat((ctx->opcode >> 8) & 0x7); - cnvt_op = (ctx->opcode >> 5) & 0x7; - ry = xlat((ctx->opcode >> 5) & 0x7); - op1 = offset = ctx->opcode & 0x1f; - - n_bytes = 2; - - // Unicorn: trace this instruction on request - if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, ctx->pc)) { - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_CODE_IDX, env->uc, ctx->pc); - *insn_need_patch = true; - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - } - - switch (op) { - case M16_OPC_ADDIUSP: - { - int16_t imm = ((uint8_t) ctx->opcode) << 2; - - gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); - } - break; - case M16_OPC_ADDIUPC: - gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0); - break; - case M16_OPC_B: - offset = (ctx->opcode & 0x7ff) << 1; - offset = (int16_t)(offset << 4) >> 4; - gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0); - /* No delay slot, so just process as a normal instruction */ - break; - case M16_OPC_JAL: - offset = cpu_lduw_code(env, ctx->pc + 2); - offset = (((ctx->opcode & 0x1f) << 21) - | ((ctx->opcode >> 5) & 0x1f) << 16 - | offset) << 2; - op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL; - gen_compute_branch(ctx, op, 4, rx, ry, offset, 2); - n_bytes = 4; - break; - case M16_OPC_BEQZ: - gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, - ((uint8_t)ctx->opcode) << 1, 0); - /* No delay slot, so just process as a normal instruction */ - break; - case M16_OPC_BNEQZ: - gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, - ((uint8_t)ctx->opcode) << 1, 0); - /* No delay slot, so just process as a normal instruction */ - break; - case M16_OPC_SHIFT: - switch (ctx->opcode & 0x3) { - case 0x0: - gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); - break; - case 0x1: -#if defined(TARGET_MIPS64) - check_mips_64(ctx); - gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); -#else - generate_exception(ctx, EXCP_RI); -#endif - break; - case 0x2: - gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); - break; - case 0x3: - gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); - break; - } - break; -#if defined(TARGET_MIPS64) - case M16_OPC_LD: - check_mips_64(ctx); - gen_ld(ctx, OPC_LD, ry, rx, offset << 3); - break; -#endif - case M16_OPC_RRIA: - { - int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4; - - if ((ctx->opcode >> 4) & 1) { -#if defined(TARGET_MIPS64) - check_mips_64(ctx); - gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); -#else - generate_exception(ctx, EXCP_RI); -#endif - } else { - gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); - } - } - break; - case M16_OPC_ADDIU8: - { - int16_t imm = (int8_t) ctx->opcode; - - gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); - } - break; - case M16_OPC_SLTI: - { - int16_t imm = (uint8_t) ctx->opcode; - gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); - } - break; - case M16_OPC_SLTIU: - { - int16_t imm = (uint8_t) ctx->opcode; - gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); - } - break; - case M16_OPC_I8: - { - int reg32; - - funct = (ctx->opcode >> 8) & 0x7; - switch (funct) { - case I8_BTEQZ: - gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0, - ((uint8_t)ctx->opcode) << 1, 0); - break; - case I8_BTNEZ: - gen_compute_branch(ctx, OPC_BNE, 2, 24, 0, - ((uint8_t)ctx->opcode) << 1, 0); - break; - case I8_SWRASP: - gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2); - break; - case I8_ADJSP: - gen_arith_imm(ctx, OPC_ADDIU, 29, 29, - ((uint8_t)ctx->opcode) << 3); - break; - case I8_SVRS: - { - int do_ra = ctx->opcode & (1 << 6); - int do_s0 = ctx->opcode & (1 << 5); - int do_s1 = ctx->opcode & (1 << 4); - int framesize = ctx->opcode & 0xf; - - if (framesize == 0) { - framesize = 128; - } else { - framesize = framesize << 3; - } - - if (ctx->opcode & (1 << 7)) { - gen_mips16_save(ctx, 0, 0, - do_ra, do_s0, do_s1, framesize); - } else { - gen_mips16_restore(ctx, 0, 0, - do_ra, do_s0, do_s1, framesize); - } - } - break; - case I8_MOV32R: - { - int rz = xlat(ctx->opcode & 0x7); - - reg32 = (((ctx->opcode >> 3) & 0x3) << 3) | - ((ctx->opcode >> 5) & 0x7); - gen_arith(ctx, OPC_ADDU, reg32, rz, 0); - } - break; - case I8_MOVR32: - reg32 = ctx->opcode & 0x1f; - gen_arith(ctx, OPC_ADDU, ry, reg32, 0); - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } - } - break; - case M16_OPC_LI: - { - int16_t imm = (uint8_t) ctx->opcode; - - gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm); - } - break; - case M16_OPC_CMPI: - { - int16_t imm = (uint8_t) ctx->opcode; - gen_logic_imm(ctx, OPC_XORI, 24, rx, imm); - } - break; -#if defined(TARGET_MIPS64) - case M16_OPC_SD: - check_mips_64(ctx); - gen_st(ctx, OPC_SD, ry, rx, offset << 3); - break; -#endif - case M16_OPC_LB: - gen_ld(ctx, OPC_LB, ry, rx, offset); - break; - case M16_OPC_LH: - gen_ld(ctx, OPC_LH, ry, rx, offset << 1); - break; - case M16_OPC_LWSP: - gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2); - break; - case M16_OPC_LW: - gen_ld(ctx, OPC_LW, ry, rx, offset << 2); - break; - case M16_OPC_LBU: - gen_ld(ctx, OPC_LBU, ry, rx, offset); - break; - case M16_OPC_LHU: - gen_ld(ctx, OPC_LHU, ry, rx, offset << 1); - break; - case M16_OPC_LWPC: - gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2); - break; -#if defined (TARGET_MIPS64) - case M16_OPC_LWU: - check_mips_64(ctx); - gen_ld(ctx, OPC_LWU, ry, rx, offset << 2); - break; -#endif - case M16_OPC_SB: - gen_st(ctx, OPC_SB, ry, rx, offset); - break; - case M16_OPC_SH: - gen_st(ctx, OPC_SH, ry, rx, offset << 1); - break; - case M16_OPC_SWSP: - gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2); - break; - case M16_OPC_SW: - gen_st(ctx, OPC_SW, ry, rx, offset << 2); - break; - case M16_OPC_RRR: - { - int rz = xlat((ctx->opcode >> 2) & 0x7); - int mips32_op; - - switch (ctx->opcode & 0x3) { - case RRR_ADDU: - mips32_op = OPC_ADDU; - break; - case RRR_SUBU: - mips32_op = OPC_SUBU; - break; -#if defined(TARGET_MIPS64) - case RRR_DADDU: - mips32_op = OPC_DADDU; - check_mips_64(ctx); - break; - case RRR_DSUBU: - mips32_op = OPC_DSUBU; - check_mips_64(ctx); - break; -#endif - default: - generate_exception(ctx, EXCP_RI); - goto done; - } - - gen_arith(ctx, mips32_op, rz, rx, ry); - done: - ; - } - break; - case M16_OPC_RR: - switch (op1) { - case RR_JR: - { - int nd = (ctx->opcode >> 7) & 0x1; - int link = (ctx->opcode >> 6) & 0x1; - int ra = (ctx->opcode >> 5) & 0x1; - - if (link) { - op = OPC_JALR; - } else { - op = OPC_JR; - } - - gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0, - (nd ? 0 : 2)); - } - break; - case RR_SDBBP: - /* XXX: not clear which exception should be raised - * when in debug mode... - */ - check_insn(ctx, ISA_MIPS32); - if (!(ctx->hflags & MIPS_HFLAG_DM)) { - generate_exception(ctx, EXCP_DBp); - } else { - generate_exception(ctx, EXCP_DBp); - } - break; - case RR_SLT: - gen_slt(ctx, OPC_SLT, 24, rx, ry); - break; - case RR_SLTU: - gen_slt(ctx, OPC_SLTU, 24, rx, ry); - break; - case RR_BREAK: - generate_exception(ctx, EXCP_BREAK); - break; - case RR_SLLV: - gen_shift(ctx, OPC_SLLV, ry, rx, ry); - break; - case RR_SRLV: - gen_shift(ctx, OPC_SRLV, ry, rx, ry); - break; - case RR_SRAV: - gen_shift(ctx, OPC_SRAV, ry, rx, ry); - break; -#if defined (TARGET_MIPS64) - case RR_DSRL: - check_mips_64(ctx); - gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa); - break; -#endif - case RR_CMP: - gen_logic(ctx, OPC_XOR, 24, rx, ry); - break; - case RR_NEG: - gen_arith(ctx, OPC_SUBU, rx, 0, ry); - break; - case RR_AND: - gen_logic(ctx, OPC_AND, rx, rx, ry); - break; - case RR_OR: - gen_logic(ctx, OPC_OR, rx, rx, ry); - break; - case RR_XOR: - gen_logic(ctx, OPC_XOR, rx, rx, ry); - break; - case RR_NOT: - gen_logic(ctx, OPC_NOR, rx, ry, 0); - break; - case RR_MFHI: - gen_HILO(ctx, OPC_MFHI, 0, rx); - break; - case RR_CNVT: - switch (cnvt_op) { - case RR_RY_CNVT_ZEB: - tcg_gen_ext8u_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); - break; - case RR_RY_CNVT_ZEH: - tcg_gen_ext16u_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); - break; - case RR_RY_CNVT_SEB: - tcg_gen_ext8s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); - break; - case RR_RY_CNVT_SEH: - tcg_gen_ext16s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); - break; -#if defined (TARGET_MIPS64) - case RR_RY_CNVT_ZEW: - check_mips_64(ctx); - tcg_gen_ext32u_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); - break; - case RR_RY_CNVT_SEW: - check_mips_64(ctx); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); - break; -#endif - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case RR_MFLO: - gen_HILO(ctx, OPC_MFLO, 0, rx); - break; -#if defined (TARGET_MIPS64) - case RR_DSRA: - check_mips_64(ctx); - gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa); - break; - case RR_DSLLV: - check_mips_64(ctx); - gen_shift(ctx, OPC_DSLLV, ry, rx, ry); - break; - case RR_DSRLV: - check_mips_64(ctx); - gen_shift(ctx, OPC_DSRLV, ry, rx, ry); - break; - case RR_DSRAV: - check_mips_64(ctx); - gen_shift(ctx, OPC_DSRAV, ry, rx, ry); - break; -#endif - case RR_MULT: - gen_muldiv(ctx, OPC_MULT, 0, rx, ry); - break; - case RR_MULTU: - gen_muldiv(ctx, OPC_MULTU, 0, rx, ry); - break; - case RR_DIV: - gen_muldiv(ctx, OPC_DIV, 0, rx, ry); - break; - case RR_DIVU: - gen_muldiv(ctx, OPC_DIVU, 0, rx, ry); - break; -#if defined (TARGET_MIPS64) - case RR_DMULT: - check_mips_64(ctx); - gen_muldiv(ctx, OPC_DMULT, 0, rx, ry); - break; - case RR_DMULTU: - check_mips_64(ctx); - gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry); - break; - case RR_DDIV: - check_mips_64(ctx); - gen_muldiv(ctx, OPC_DDIV, 0, rx, ry); - break; - case RR_DDIVU: - check_mips_64(ctx); - gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry); - break; -#endif - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case M16_OPC_EXTEND: - decode_extended_mips16_opc(env, ctx); - n_bytes = 4; - break; -#if defined(TARGET_MIPS64) - case M16_OPC_I64: - funct = (ctx->opcode >> 8) & 0x7; - decode_i64_mips16(ctx, ry, funct, offset, 0); - break; -#endif - default: - generate_exception(ctx, EXCP_RI); - break; - } - - return n_bytes; -} - -/* microMIPS extension to MIPS32/MIPS64 */ - -/* - * microMIPS32/microMIPS64 major opcodes - * - * 1. MIPS Architecture for Programmers Volume II-B: - * The microMIPS32 Instruction Set (Revision 3.05) - * - * Table 6.2 microMIPS32 Encoding of Major Opcode Field - * - * 2. MIPS Architecture For Programmers Volume II-A: - * The MIPS64 Instruction Set (Revision 3.51) - */ - -enum { - POOL32A = 0x00, - POOL16A = 0x01, - LBU16 = 0x02, - MOVE16 = 0x03, - ADDI32 = 0x04, - LBU32 = 0x05, - SB32 = 0x06, - LB32 = 0x07, - - POOL32B = 0x08, - POOL16B = 0x09, - LHU16 = 0x0a, - ANDI16 = 0x0b, - ADDIU32 = 0x0c, - LHU32 = 0x0d, - SH32 = 0x0e, - LH32 = 0x0f, - - POOL32I = 0x10, - POOL16C = 0x11, - LWSP16 = 0x12, - POOL16D = 0x13, - ORI32 = 0x14, - POOL32F = 0x15, - POOL32S = 0x16, /* MIPS64 */ - DADDIU32 = 0x17, /* MIPS64 */ - - /* 0x1f is reserved */ - POOL32C = 0x18, - LWGP16 = 0x19, - LW16 = 0x1a, - POOL16E = 0x1b, - XORI32 = 0x1c, - JALS32 = 0x1d, - ADDIUPC = 0x1e, - - /* 0x20 is reserved */ - RES_20 = 0x20, - POOL16F = 0x21, - SB16 = 0x22, - BEQZ16 = 0x23, - SLTI32 = 0x24, - BEQ32 = 0x25, - SWC132 = 0x26, - LWC132 = 0x27, - - /* 0x28 and 0x29 are reserved */ - RES_28 = 0x28, - RES_29 = 0x29, - SH16 = 0x2a, - BNEZ16 = 0x2b, - SLTIU32 = 0x2c, - BNE32 = 0x2d, - SDC132 = 0x2e, - LDC132 = 0x2f, - - /* 0x30 and 0x31 are reserved */ - RES_30 = 0x30, - RES_31 = 0x31, - SWSP16 = 0x32, - B16 = 0x33, - ANDI32 = 0x34, - J32 = 0x35, - SD32 = 0x36, /* MIPS64 */ - LD32 = 0x37, /* MIPS64 */ - - /* 0x38 and 0x39 are reserved */ - RES_38 = 0x38, - RES_39 = 0x39, - SW16 = 0x3a, - LI16 = 0x3b, - JALX32 = 0x3c, - JAL32 = 0x3d, - SW32 = 0x3e, - LW32 = 0x3f -}; - -/* POOL32A encoding of minor opcode field */ - -enum { - /* These opcodes are distinguished only by bits 9..6; those bits are - * what are recorded below. */ - SLL32 = 0x0, - SRL32 = 0x1, - SRA = 0x2, - ROTR = 0x3, - - SLLV = 0x0, - SRLV = 0x1, - SRAV = 0x2, - ROTRV = 0x3, - ADD = 0x4, - ADDU32 = 0x5, - SUB = 0x6, - SUBU32 = 0x7, - MUL = 0x8, - AND = 0x9, - OR32 = 0xa, - NOR = 0xb, - XOR32 = 0xc, - SLT = 0xd, - SLTU = 0xe, - - MOVN = 0x0, - MOVZ = 0x1, - LWXS = 0x4, - - /* The following can be distinguished by their lower 6 bits. */ - INS = 0x0c, - EXT = 0x2c, - POOL32AXF = 0x3c -}; - -/* POOL32AXF encoding of minor opcode field extension */ - -/* - * 1. MIPS Architecture for Programmers Volume II-B: - * The microMIPS32 Instruction Set (Revision 3.05) - * - * Table 6.5 POOL32Axf Encoding of Minor Opcode Extension Field - * - * 2. MIPS Architecture for Programmers VolumeIV-e: - * The MIPS DSP Application-Specific Extension - * to the microMIPS32 Architecture (Revision 2.34) - * - * Table 5.5 POOL32Axf Encoding of Minor Opcode Extension Field - */ - -enum { - /* bits 11..6 */ - TEQ = 0x00, - TGE = 0x08, - TGEU = 0x10, - TLT = 0x20, - TLTU = 0x28, - TNE = 0x30, - - MFC0 = 0x03, - MTC0 = 0x0b, - - /* begin of microMIPS32 DSP */ - - /* bits 13..12 for 0x01 */ - MFHI_ACC = 0x0, - MFLO_ACC = 0x1, - MTHI_ACC = 0x2, - MTLO_ACC = 0x3, - - /* bits 13..12 for 0x2a */ - MADD_ACC = 0x0, - MADDU_ACC = 0x1, - MSUB_ACC = 0x2, - MSUBU_ACC = 0x3, - - /* bits 13..12 for 0x32 */ - MULT_ACC = 0x0, - MULTU_ACC = 0x1, - - /* end of microMIPS32 DSP */ - - /* bits 15..12 for 0x2c */ - SEB = 0x2, - SEH = 0x3, - CLO = 0x4, - CLZ = 0x5, - RDHWR = 0x6, - WSBH = 0x7, - MULT = 0x8, - MULTU = 0x9, - DIV = 0xa, - DIVU = 0xb, - MADD = 0xc, - MADDU = 0xd, - MSUB = 0xe, - MSUBU = 0xf, - - /* bits 15..12 for 0x34 */ - MFC2 = 0x4, - MTC2 = 0x5, - MFHC2 = 0x8, - MTHC2 = 0x9, - CFC2 = 0xc, - CTC2 = 0xd, - - /* bits 15..12 for 0x3c */ - JALR = 0x0, - JR = 0x0, /* alias */ - JALR_HB = 0x1, - JALRS = 0x4, - JALRS_HB = 0x5, - - /* bits 15..12 for 0x05 */ - RDPGPR = 0xe, - WRPGPR = 0xf, - - /* bits 15..12 for 0x0d */ - TLBP = 0x0, - TLBR = 0x1, - TLBWI = 0x2, - TLBWR = 0x3, - WAIT = 0x9, - IRET = 0xd, - DERET = 0xe, - ERET = 0xf, - - /* bits 15..12 for 0x15 */ - DMT = 0x0, - DVPE = 0x1, - EMT = 0x2, - EVPE = 0x3, - - /* bits 15..12 for 0x1d */ - DI = 0x4, - EI = 0x5, - - /* bits 15..12 for 0x2d */ - SYNC = 0x6, - SYSCALL = 0x8, - SDBBP = 0xd, - - /* bits 15..12 for 0x35 */ - MFHI32 = 0x0, - MFLO32 = 0x1, - MTHI32 = 0x2, - MTLO32 = 0x3, -}; - -/* POOL32B encoding of minor opcode field (bits 15..12) */ - -enum { - LWC2 = 0x0, - LWP = 0x1, - LDP = 0x4, - LWM32 = 0x5, - CACHE = 0x6, - LDM = 0x7, - SWC2 = 0x8, - SWP = 0x9, - SDP = 0xc, - SWM32 = 0xd, - SDM = 0xf -}; - -/* POOL32C encoding of minor opcode field (bits 15..12) */ - -enum { - LWL = 0x0, - SWL = 0x8, - LWR = 0x1, - SWR = 0x9, - PREF = 0x2, - /* 0xa is reserved */ - LL = 0x3, - SC = 0xb, - LDL = 0x4, - SDL = 0xc, - LDR = 0x5, - SDR = 0xd, - /* 0x6 is reserved */ - LWU = 0xe, - LLD = 0x7, - SCD = 0xf -}; - -/* POOL32F encoding of minor opcode field (bits 5..0) */ - -enum { - /* These are the bit 7..6 values */ - ADD_FMT = 0x0, - MOVN_FMT = 0x0, - - SUB_FMT = 0x1, - MOVZ_FMT = 0x1, - - MUL_FMT = 0x2, - - DIV_FMT = 0x3, - - /* These are the bit 8..6 values */ - RSQRT2_FMT = 0x0, - MOVF_FMT = 0x0, - - LWXC1 = 0x1, - MOVT_FMT = 0x1, - - PLL_PS = 0x2, - SWXC1 = 0x2, - - PLU_PS = 0x3, - LDXC1 = 0x3, - - PUL_PS = 0x4, - SDXC1 = 0x4, - RECIP2_FMT = 0x4, - - PUU_PS = 0x5, - LUXC1 = 0x5, - - CVT_PS_S = 0x6, - SUXC1 = 0x6, - ADDR_PS = 0x6, - PREFX = 0x6, - - MULR_PS = 0x7, - - MADD_S = 0x01, - MADD_D = 0x09, - MADD_PS = 0x11, - ALNV_PS = 0x19, - MSUB_S = 0x21, - MSUB_D = 0x29, - MSUB_PS = 0x31, - - NMADD_S = 0x02, - NMADD_D = 0x0a, - NMADD_PS = 0x12, - NMSUB_S = 0x22, - NMSUB_D = 0x2a, - NMSUB_PS = 0x32, - - POOL32FXF = 0x3b, - - CABS_COND_FMT = 0x1c, /* MIPS3D */ - C_COND_FMT = 0x3c -}; - -/* POOL32Fxf encoding of minor opcode extension field */ - -enum { - CVT_L = 0x04, - RSQRT_FMT = 0x08, - FLOOR_L = 0x0c, - CVT_PW_PS = 0x1c, - CVT_W = 0x24, - SQRT_FMT = 0x28, - FLOOR_W = 0x2c, - CVT_PS_PW = 0x3c, - CFC1 = 0x40, - RECIP_FMT = 0x48, - CEIL_L = 0x4c, - CTC1 = 0x60, - CEIL_W = 0x6c, - MFC1 = 0x80, - CVT_S_PL = 0x84, - TRUNC_L = 0x8c, - MTC1 = 0xa0, - CVT_S_PU = 0xa4, - TRUNC_W = 0xac, - MFHC1 = 0xc0, - ROUND_L = 0xcc, - MTHC1 = 0xe0, - ROUND_W = 0xec, - - MOV_FMT = 0x01, - MOVF = 0x05, - ABS_FMT = 0x0d, - RSQRT1_FMT = 0x1d, - MOVT = 0x25, - NEG_FMT = 0x2d, - CVT_D = 0x4d, - RECIP1_FMT = 0x5d, - CVT_S = 0x6d -}; - -/* POOL32I encoding of minor opcode field (bits 25..21) */ - -enum { - BLTZ = 0x00, - BLTZAL = 0x01, - BGEZ = 0x02, - BGEZAL = 0x03, - BLEZ = 0x04, - BNEZC = 0x05, - BGTZ = 0x06, - BEQZC = 0x07, - TLTI = 0x08, - TGEI = 0x09, - TLTIU = 0x0a, - TGEIU = 0x0b, - TNEI = 0x0c, - LUI = 0x0d, - TEQI = 0x0e, - SYNCI = 0x10, - BLTZALS = 0x11, - BGEZALS = 0x13, - BC2F = 0x14, - BC2T = 0x15, - BPOSGE64 = 0x1a, - BPOSGE32 = 0x1b, - /* These overlap and are distinguished by bit16 of the instruction */ - BC1F = 0x1c, - BC1T = 0x1d, - BC1ANY2F = 0x1c, - BC1ANY2T = 0x1d, - BC1ANY4F = 0x1e, - BC1ANY4T = 0x1f -}; - -/* POOL16A encoding of minor opcode field */ - -enum { - ADDU16 = 0x0, - SUBU16 = 0x1 -}; - -/* POOL16B encoding of minor opcode field */ - -enum { - SLL16 = 0x0, - SRL16 = 0x1 -}; - -/* POOL16C encoding of minor opcode field */ - -enum { - NOT16 = 0x00, - XOR16 = 0x04, - AND16 = 0x08, - OR16 = 0x0c, - LWM16 = 0x10, - SWM16 = 0x14, - JR16 = 0x18, - JRC16 = 0x1a, - JALR16 = 0x1c, - JALR16S = 0x1e, - MFHI16 = 0x20, - MFLO16 = 0x24, - BREAK16 = 0x28, - SDBBP16 = 0x2c, - JRADDIUSP = 0x30 -}; - -/* POOL16D encoding of minor opcode field */ - -enum { - ADDIUS5 = 0x0, - ADDIUSP = 0x1 -}; - -/* POOL16E encoding of minor opcode field */ - -enum { - ADDIUR2 = 0x0, - ADDIUR1SP = 0x1 -}; - -static int mmreg (int r) -{ - static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; - - return map[r]; -} - -/* Used for 16-bit store instructions. */ -static int mmreg2 (int r) -{ - static const int map[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; - - return map[r]; -} - -#define uMIPS_RD(op) ((op >> 7) & 0x7) -#define uMIPS_RS(op) ((op >> 4) & 0x7) -#define uMIPS_RS2(op) uMIPS_RS(op) -#define uMIPS_RS1(op) ((op >> 1) & 0x7) -#define uMIPS_RD5(op) ((op >> 5) & 0x1f) -#define uMIPS_RS5(op) (op & 0x1f) - -/* Signed immediate */ -#define SIMM(op, start, width) \ - ((int32_t)(((op >> start) & ((~0U) >> (32-width))) \ - << (32-width)) \ - >> (32-width)) -/* Zero-extended immediate */ -#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32-width))) - -static void gen_addiur1sp(DisasContext *ctx) -{ - int rd = mmreg(uMIPS_RD(ctx->opcode)); - - gen_arith_imm(ctx, OPC_ADDIU, rd, 29, ((ctx->opcode >> 1) & 0x3f) << 2); -} - -static void gen_addiur2(DisasContext *ctx) -{ - static const int decoded_imm[] = { 1, 4, 8, 12, 16, 20, 24, -1 }; - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rs = mmreg(uMIPS_RS(ctx->opcode)); - - gen_arith_imm(ctx, OPC_ADDIU, rd, rs, decoded_imm[ZIMM(ctx->opcode, 1, 3)]); -} - -static void gen_addiusp(DisasContext *ctx) -{ - int encoded = ZIMM(ctx->opcode, 1, 9); - int decoded; - - if (encoded <= 1) { - decoded = 256 + encoded; - } else if (encoded <= 255) { - decoded = encoded; - } else if (encoded <= 509) { - decoded = encoded - 512; - } else { - decoded = encoded - 768; - } - - gen_arith_imm(ctx, OPC_ADDIU, 29, 29, decoded << 2); -} - -static void gen_addius5(DisasContext *ctx) -{ - int imm = SIMM(ctx->opcode, 1, 4); - int rd = (ctx->opcode >> 5) & 0x1f; - - gen_arith_imm(ctx, OPC_ADDIU, rd, rd, imm); -} - -static void gen_andi16(DisasContext *ctx) -{ - static const int decoded_imm[] = { 128, 1, 2, 3, 4, 7, 8, 15, 16, - 31, 32, 63, 64, 255, 32768, 65535 }; - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rs = mmreg(uMIPS_RS(ctx->opcode)); - int encoded = ZIMM(ctx->opcode, 0, 4); - - gen_logic_imm(ctx, OPC_ANDI, rd, rs, decoded_imm[encoded]); -} - -static void gen_ldst_multiple (DisasContext *ctx, uint32_t opc, int reglist, - int base, int16_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "ldst_multiple"; - TCGv t0, t1; - TCGv_i32 t2; - - if (ctx->hflags & MIPS_HFLAG_BMASK) { - generate_exception(ctx, EXCP_RI); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - - gen_base_offset_addr(ctx, t0, base, offset); - - t1 = tcg_const_tl(tcg_ctx, reglist); - t2 = tcg_const_i32(tcg_ctx, ctx->mem_idx); - - save_cpu_state(ctx, 1); - switch (opc) { - case LWM32: - gen_helper_lwm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); - opn = "lwm"; - break; - case SWM32: - gen_helper_swm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); - opn = "swm"; - break; -#ifdef TARGET_MIPS64 - case LDM: - gen_helper_ldm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); - opn = "ldm"; - break; - case SDM: - gen_helper_sdm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); - opn = "sdm"; - break; -#endif - } - (void)opn; - MIPS_DEBUG("%s, %x, %d(%s)", opn, reglist, offset, regnames[base]); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free_i32(tcg_ctx, t2); -} - - -static void gen_pool16c_insn(DisasContext *ctx) -{ - int rd = mmreg((ctx->opcode >> 3) & 0x7); - int rs = mmreg(ctx->opcode & 0x7); - - switch (((ctx->opcode) >> 4) & 0x3f) { - case NOT16 + 0: - case NOT16 + 1: - case NOT16 + 2: - case NOT16 + 3: - gen_logic(ctx, OPC_NOR, rd, rs, 0); - break; - case XOR16 + 0: - case XOR16 + 1: - case XOR16 + 2: - case XOR16 + 3: - gen_logic(ctx, OPC_XOR, rd, rd, rs); - break; - case AND16 + 0: - case AND16 + 1: - case AND16 + 2: - case AND16 + 3: - gen_logic(ctx, OPC_AND, rd, rd, rs); - break; - case OR16 + 0: - case OR16 + 1: - case OR16 + 2: - case OR16 + 3: - gen_logic(ctx, OPC_OR, rd, rd, rs); - break; - case LWM16 + 0: - case LWM16 + 1: - case LWM16 + 2: - case LWM16 + 3: - { - static const int lwm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; - int offset = ZIMM(ctx->opcode, 0, 4); - - gen_ldst_multiple(ctx, LWM32, lwm_convert[(ctx->opcode >> 4) & 0x3], - 29, offset << 2); - } - break; - case SWM16 + 0: - case SWM16 + 1: - case SWM16 + 2: - case SWM16 + 3: - { - static const int swm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; - int offset = ZIMM(ctx->opcode, 0, 4); - - gen_ldst_multiple(ctx, SWM32, swm_convert[(ctx->opcode >> 4) & 0x3], - 29, offset << 2); - } - break; - case JR16 + 0: - case JR16 + 1: - { - int reg = ctx->opcode & 0x1f; - - gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 4); - } - break; - case JRC16 + 0: - case JRC16 + 1: - { - int reg = ctx->opcode & 0x1f; - gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 0); - /* Let normal delay slot handling in our caller take us - to the branch target. */ - } - break; - case JALR16 + 0: - case JALR16 + 1: - gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 4); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case JALR16S + 0: - case JALR16S + 1: - gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 2); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case MFHI16 + 0: - case MFHI16 + 1: - gen_HILO(ctx, OPC_MFHI, 0, uMIPS_RS5(ctx->opcode)); - break; - case MFLO16 + 0: - case MFLO16 + 1: - gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode)); - break; - case BREAK16: - generate_exception(ctx, EXCP_BREAK); - break; - case SDBBP16: - /* XXX: not clear which exception should be raised - * when in debug mode... - */ - check_insn(ctx, ISA_MIPS32); - if (!(ctx->hflags & MIPS_HFLAG_DM)) { - generate_exception(ctx, EXCP_DBp); - } else { - generate_exception(ctx, EXCP_DBp); - } - break; - case JRADDIUSP + 0: - case JRADDIUSP + 1: - { - int imm = ZIMM(ctx->opcode, 0, 5); - gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0); - gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2); - /* Let normal delay slot handling in our caller take us - to the branch target. */ - } - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void gen_ldxs (DisasContext *ctx, int base, int index, int rd) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, base); - - if (index != 0) { - gen_load_gpr(ctx, t1, index); - tcg_gen_shli_tl(tcg_ctx, t1, t1, 2); - gen_op_addr_add(ctx, t0, t1, t0); - } - - tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); - gen_store_gpr(tcg_ctx, t1, rd); - - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_ldst_pair (DisasContext *ctx, uint32_t opc, int rd, - int base, int16_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - const char *opn = "ldst_pair"; - TCGv t0, t1; - - if (ctx->hflags & MIPS_HFLAG_BMASK || rd == 31) { - generate_exception(ctx, EXCP_RI); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - - gen_base_offset_addr(ctx, t0, base, offset); - - switch (opc) { - case LWP: - if (rd == base) { - generate_exception(ctx, EXCP_RI); - return; - } - tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); - gen_store_gpr(tcg_ctx, t1, rd); - tcg_gen_movi_tl(tcg_ctx, t1, 4); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); - gen_store_gpr(tcg_ctx, t1, rd+1); - opn = "lwp"; - break; - case SWP: - gen_load_gpr(ctx, t1, rd); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); - tcg_gen_movi_tl(tcg_ctx, t1, 4); - gen_op_addr_add(ctx, t0, t0, t1); - gen_load_gpr(ctx, t1, rd+1); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); - opn = "swp"; - break; -#ifdef TARGET_MIPS64 - case LDP: - if (rd == base) { - generate_exception(ctx, EXCP_RI); - return; - } - tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); - gen_store_gpr(tcg_ctx, t1, rd); - tcg_gen_movi_tl(tcg_ctx, t1, 8); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); - gen_store_gpr(tcg_ctx, t1, rd+1); - opn = "ldp"; - break; - case SDP: - gen_load_gpr(ctx, t1, rd); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); - tcg_gen_movi_tl(tcg_ctx, t1, 8); - gen_op_addr_add(ctx, t0, t0, t1); - gen_load_gpr(ctx, t1, rd+1); - tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); - opn = "sdp"; - break; -#endif - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s, %s, %d(%s)", opn, regnames[rd], offset, regnames[base]); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void gen_pool32axf (CPUMIPSState *env, DisasContext *ctx, int rt, int rs) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int extension = (ctx->opcode >> 6) & 0x3f; - int minor = (ctx->opcode >> 12) & 0xf; - uint32_t mips32_op; - - switch (extension) { - case TEQ: - mips32_op = OPC_TEQ; - goto do_trap; - case TGE: - mips32_op = OPC_TGE; - goto do_trap; - case TGEU: - mips32_op = OPC_TGEU; - goto do_trap; - case TLT: - mips32_op = OPC_TLT; - goto do_trap; - case TLTU: - mips32_op = OPC_TLTU; - goto do_trap; - case TNE: - mips32_op = OPC_TNE; - do_trap: - gen_trap(ctx, mips32_op, rs, rt, -1); - break; -#ifndef CONFIG_USER_ONLY - case MFC0: - case MFC0 + 32: - check_cp0_enabled(ctx); - if (rt == 0) { - /* Treat as NOP. */ - break; - } - gen_mfc0(ctx, *cpu_gpr[rt], rs, (ctx->opcode >> 11) & 0x7); - break; - case MTC0: - case MTC0 + 32: - check_cp0_enabled(ctx); - { - TCGv t0 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rt); - gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7); - tcg_temp_free(tcg_ctx, t0); - } - break; -#endif - case 0x2a: - switch (minor & 3) { - case MADD_ACC: - gen_muldiv(ctx, OPC_MADD, (ctx->opcode >> 14) & 3, rs, rt); - break; - case MADDU_ACC: - gen_muldiv(ctx, OPC_MADDU, (ctx->opcode >> 14) & 3, rs, rt); - break; - case MSUB_ACC: - gen_muldiv(ctx, OPC_MSUB, (ctx->opcode >> 14) & 3, rs, rt); - break; - case MSUBU_ACC: - gen_muldiv(ctx, OPC_MSUBU, (ctx->opcode >> 14) & 3, rs, rt); - break; - default: - goto pool32axf_invalid; - } - break; - case 0x32: - switch (minor & 3) { - case MULT_ACC: - gen_muldiv(ctx, OPC_MULT, (ctx->opcode >> 14) & 3, rs, rt); - break; - case MULTU_ACC: - gen_muldiv(ctx, OPC_MULTU, (ctx->opcode >> 14) & 3, rs, rt); - break; - default: - goto pool32axf_invalid; - } - break; - case 0x2c: - switch (minor) { - case SEB: - gen_bshfl(ctx, OPC_SEB, rs, rt); - break; - case SEH: - gen_bshfl(ctx, OPC_SEH, rs, rt); - break; - case CLO: - mips32_op = OPC_CLO; - goto do_cl; - case CLZ: - mips32_op = OPC_CLZ; - do_cl: - check_insn(ctx, ISA_MIPS32); - gen_cl(ctx, mips32_op, rt, rs); - break; - case RDHWR: - gen_rdhwr(ctx, rt, rs); - break; - case WSBH: - gen_bshfl(ctx, OPC_WSBH, rs, rt); - break; - case MULT: - mips32_op = OPC_MULT; - goto do_mul; - case MULTU: - mips32_op = OPC_MULTU; - goto do_mul; - case DIV: - mips32_op = OPC_DIV; - goto do_div; - case DIVU: - mips32_op = OPC_DIVU; - goto do_div; - do_div: - check_insn(ctx, ISA_MIPS32); - gen_muldiv(ctx, mips32_op, 0, rs, rt); - break; - case MADD: - mips32_op = OPC_MADD; - goto do_mul; - case MADDU: - mips32_op = OPC_MADDU; - goto do_mul; - case MSUB: - mips32_op = OPC_MSUB; - goto do_mul; - case MSUBU: - mips32_op = OPC_MSUBU; - do_mul: - check_insn(ctx, ISA_MIPS32); - gen_muldiv(ctx, mips32_op, 0, rs, rt); - break; - default: - goto pool32axf_invalid; - } - break; - case 0x34: - switch (minor) { - case MFC2: - case MTC2: - case MFHC2: - case MTHC2: - case CFC2: - case CTC2: - generate_exception_err(ctx, EXCP_CpU, 2); - break; - default: - goto pool32axf_invalid; - } - break; - case 0x3c: - switch (minor) { - case JALR: - case JALR_HB: - gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 4); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case JALRS: - case JALRS_HB: - gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 2); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - default: - goto pool32axf_invalid; - } - break; - case 0x05: - switch (minor) { - case RDPGPR: - check_cp0_enabled(ctx); - check_insn(ctx, ISA_MIPS32R2); - gen_load_srsgpr(ctx, rt, rs); - break; - case WRPGPR: - check_cp0_enabled(ctx); - check_insn(ctx, ISA_MIPS32R2); - gen_store_srsgpr(ctx, rt, rs); - break; - default: - goto pool32axf_invalid; - } - break; -#ifndef CONFIG_USER_ONLY - case 0x0d: - switch (minor) { - case TLBP: - mips32_op = OPC_TLBP; - goto do_cp0; - case TLBR: - mips32_op = OPC_TLBR; - goto do_cp0; - case TLBWI: - mips32_op = OPC_TLBWI; - goto do_cp0; - case TLBWR: - mips32_op = OPC_TLBWR; - goto do_cp0; - case WAIT: - mips32_op = OPC_WAIT; - goto do_cp0; - case DERET: - mips32_op = OPC_DERET; - goto do_cp0; - case ERET: - mips32_op = OPC_ERET; - do_cp0: - gen_cp0(env, ctx, mips32_op, rt, rs); - break; - default: - goto pool32axf_invalid; - } - break; - case 0x1d: - switch (minor) { - case DI: - check_cp0_enabled(ctx); - { - TCGv t0 = tcg_temp_new(tcg_ctx); - - save_cpu_state(ctx, 1); - gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rs); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - tcg_temp_free(tcg_ctx, t0); - } - break; - case EI: - check_cp0_enabled(ctx); - { - TCGv t0 = tcg_temp_new(tcg_ctx); - - save_cpu_state(ctx, 1); - gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rs); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - tcg_temp_free(tcg_ctx, t0); - } - break; - default: - goto pool32axf_invalid; - } - break; -#endif - case 0x2d: - switch (minor) { - case SYNC: - /* NOP */ - break; - case SYSCALL: - generate_exception(ctx, EXCP_SYSCALL); - ctx->bstate = BS_STOP; - break; - case SDBBP: - check_insn(ctx, ISA_MIPS32); - if (!(ctx->hflags & MIPS_HFLAG_DM)) { - generate_exception(ctx, EXCP_DBp); - } else { - generate_exception(ctx, EXCP_DBp); - } - break; - default: - goto pool32axf_invalid; - } - break; - case 0x01: - switch (minor & 3) { - case MFHI_ACC: - gen_HILO(ctx, OPC_MFHI, minor >> 2, rs); - break; - case MFLO_ACC: - gen_HILO(ctx, OPC_MFLO, minor >> 2, rs); - break; - case MTHI_ACC: - gen_HILO(ctx, OPC_MTHI, minor >> 2, rs); - break; - case MTLO_ACC: - gen_HILO(ctx, OPC_MTLO, minor >> 2, rs); - break; - default: - goto pool32axf_invalid; - } - break; - case 0x35: - switch (minor) { - case MFHI32: - gen_HILO(ctx, OPC_MFHI, 0, rs); - break; - case MFLO32: - gen_HILO(ctx, OPC_MFLO, 0, rs); - break; - case MTHI32: - gen_HILO(ctx, OPC_MTHI, 0, rs); - break; - case MTLO32: - gen_HILO(ctx, OPC_MTLO, 0, rs); - break; - default: - goto pool32axf_invalid; - } - break; - default: - pool32axf_invalid: - MIPS_INVAL("pool32axf"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -/* Values for microMIPS fmt field. Variable-width, depending on which - formats the instruction supports. */ - -enum { - FMT_SD_S = 0, - FMT_SD_D = 1, - - FMT_SDPS_S = 0, - FMT_SDPS_D = 1, - FMT_SDPS_PS = 2, - - FMT_SWL_S = 0, - FMT_SWL_W = 1, - FMT_SWL_L = 2, - - FMT_DWL_D = 0, - FMT_DWL_W = 1, - FMT_DWL_L = 2 -}; - -static void gen_pool32fxf(DisasContext *ctx, int rt, int rs) -{ - int extension = (ctx->opcode >> 6) & 0x3ff; - uint32_t mips32_op; - -#define FLOAT_1BIT_FMT(opc, fmt) (fmt << 8) | opc -#define FLOAT_2BIT_FMT(opc, fmt) (fmt << 7) | opc -#define COND_FLOAT_MOV(opc, cond) (cond << 7) | opc - - switch (extension) { - case FLOAT_1BIT_FMT(CFC1, 0): - mips32_op = OPC_CFC1; - goto do_cp1; - case FLOAT_1BIT_FMT(CTC1, 0): - mips32_op = OPC_CTC1; - goto do_cp1; - case FLOAT_1BIT_FMT(MFC1, 0): - mips32_op = OPC_MFC1; - goto do_cp1; - case FLOAT_1BIT_FMT(MTC1, 0): - mips32_op = OPC_MTC1; - goto do_cp1; - case FLOAT_1BIT_FMT(MFHC1, 0): - mips32_op = OPC_MFHC1; - goto do_cp1; - case FLOAT_1BIT_FMT(MTHC1, 0): - mips32_op = OPC_MTHC1; - do_cp1: - gen_cp1(ctx, mips32_op, rt, rs); - break; - - /* Reciprocal square root */ - case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_S): - mips32_op = OPC_RSQRT_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_D): - mips32_op = OPC_RSQRT_D; - goto do_unaryfp; - - /* Square root */ - case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_S): - mips32_op = OPC_SQRT_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_D): - mips32_op = OPC_SQRT_D; - goto do_unaryfp; - - /* Reciprocal */ - case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_S): - mips32_op = OPC_RECIP_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_D): - mips32_op = OPC_RECIP_D; - goto do_unaryfp; - - /* Floor */ - case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_S): - mips32_op = OPC_FLOOR_L_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_D): - mips32_op = OPC_FLOOR_L_D; - goto do_unaryfp; - case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_S): - mips32_op = OPC_FLOOR_W_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_D): - mips32_op = OPC_FLOOR_W_D; - goto do_unaryfp; - - /* Ceiling */ - case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_S): - mips32_op = OPC_CEIL_L_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_D): - mips32_op = OPC_CEIL_L_D; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_S): - mips32_op = OPC_CEIL_W_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_D): - mips32_op = OPC_CEIL_W_D; - goto do_unaryfp; - - /* Truncation */ - case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_S): - mips32_op = OPC_TRUNC_L_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_D): - mips32_op = OPC_TRUNC_L_D; - goto do_unaryfp; - case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_S): - mips32_op = OPC_TRUNC_W_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_D): - mips32_op = OPC_TRUNC_W_D; - goto do_unaryfp; - - /* Round */ - case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_S): - mips32_op = OPC_ROUND_L_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_D): - mips32_op = OPC_ROUND_L_D; - goto do_unaryfp; - case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_S): - mips32_op = OPC_ROUND_W_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_D): - mips32_op = OPC_ROUND_W_D; - goto do_unaryfp; - - /* Integer to floating-point conversion */ - case FLOAT_1BIT_FMT(CVT_L, FMT_SD_S): - mips32_op = OPC_CVT_L_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CVT_L, FMT_SD_D): - mips32_op = OPC_CVT_L_D; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CVT_W, FMT_SD_S): - mips32_op = OPC_CVT_W_S; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CVT_W, FMT_SD_D): - mips32_op = OPC_CVT_W_D; - goto do_unaryfp; - - /* Paired-foo conversions */ - case FLOAT_1BIT_FMT(CVT_S_PL, 0): - mips32_op = OPC_CVT_S_PL; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CVT_S_PU, 0): - mips32_op = OPC_CVT_S_PU; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CVT_PW_PS, 0): - mips32_op = OPC_CVT_PW_PS; - goto do_unaryfp; - case FLOAT_1BIT_FMT(CVT_PS_PW, 0): - mips32_op = OPC_CVT_PS_PW; - goto do_unaryfp; - - /* Floating-point moves */ - case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_S): - mips32_op = OPC_MOV_S; - goto do_unaryfp; - case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_D): - mips32_op = OPC_MOV_D; - goto do_unaryfp; - case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_PS): - mips32_op = OPC_MOV_PS; - goto do_unaryfp; - - /* Absolute value */ - case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_S): - mips32_op = OPC_ABS_S; - goto do_unaryfp; - case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_D): - mips32_op = OPC_ABS_D; - goto do_unaryfp; - case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_PS): - mips32_op = OPC_ABS_PS; - goto do_unaryfp; - - /* Negation */ - case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_S): - mips32_op = OPC_NEG_S; - goto do_unaryfp; - case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_D): - mips32_op = OPC_NEG_D; - goto do_unaryfp; - case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_PS): - mips32_op = OPC_NEG_PS; - goto do_unaryfp; - - /* Reciprocal square root step */ - case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_S): - mips32_op = OPC_RSQRT1_S; - goto do_unaryfp; - case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_D): - mips32_op = OPC_RSQRT1_D; - goto do_unaryfp; - case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_PS): - mips32_op = OPC_RSQRT1_PS; - goto do_unaryfp; - - /* Reciprocal step */ - case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_S): - mips32_op = OPC_RECIP1_S; - goto do_unaryfp; - case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_D): - mips32_op = OPC_RECIP1_S; - goto do_unaryfp; - case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_PS): - mips32_op = OPC_RECIP1_PS; - goto do_unaryfp; - - /* Conversions from double */ - case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_S): - mips32_op = OPC_CVT_D_S; - goto do_unaryfp; - case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_W): - mips32_op = OPC_CVT_D_W; - goto do_unaryfp; - case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_L): - mips32_op = OPC_CVT_D_L; - goto do_unaryfp; - - /* Conversions from single */ - case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_D): - mips32_op = OPC_CVT_S_D; - goto do_unaryfp; - case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_W): - mips32_op = OPC_CVT_S_W; - goto do_unaryfp; - case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_L): - mips32_op = OPC_CVT_S_L; - do_unaryfp: - gen_farith(ctx, mips32_op, -1, rs, rt, 0); - break; - - /* Conditional moves on floating-point codes */ - case COND_FLOAT_MOV(MOVT, 0): - case COND_FLOAT_MOV(MOVT, 1): - case COND_FLOAT_MOV(MOVT, 2): - case COND_FLOAT_MOV(MOVT, 3): - case COND_FLOAT_MOV(MOVT, 4): - case COND_FLOAT_MOV(MOVT, 5): - case COND_FLOAT_MOV(MOVT, 6): - case COND_FLOAT_MOV(MOVT, 7): - gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 1); - break; - case COND_FLOAT_MOV(MOVF, 0): - case COND_FLOAT_MOV(MOVF, 1): - case COND_FLOAT_MOV(MOVF, 2): - case COND_FLOAT_MOV(MOVF, 3): - case COND_FLOAT_MOV(MOVF, 4): - case COND_FLOAT_MOV(MOVF, 5): - case COND_FLOAT_MOV(MOVF, 6): - case COND_FLOAT_MOV(MOVF, 7): - gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 0); - break; - default: - MIPS_INVAL("pool32fxf"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void decode_micromips32_opc (CPUMIPSState *env, DisasContext *ctx, - uint16_t insn_hw1) -{ - int32_t offset; - uint16_t insn; - int rt, rs, rd, rr; - int16_t imm; - uint32_t op, minor, mips32_op; - uint32_t cond, fmt, cc; - - insn = cpu_lduw_code(env, ctx->pc + 2); - ctx->opcode = (ctx->opcode << 16) | insn; - - rt = (ctx->opcode >> 21) & 0x1f; - rs = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - rr = (ctx->opcode >> 6) & 0x1f; - imm = (int16_t) ctx->opcode; - - op = (ctx->opcode >> 26) & 0x3f; - switch (op) { - case POOL32A: - minor = ctx->opcode & 0x3f; - switch (minor) { - case 0x00: - minor = (ctx->opcode >> 6) & 0xf; - switch (minor) { - case SLL32: - mips32_op = OPC_SLL; - goto do_shifti; - case SRA: - mips32_op = OPC_SRA; - goto do_shifti; - case SRL32: - mips32_op = OPC_SRL; - goto do_shifti; - case ROTR: - mips32_op = OPC_ROTR; - do_shifti: - gen_shift_imm(ctx, mips32_op, rt, rs, rd); - break; - default: - goto pool32a_invalid; - } - break; - case 0x10: - minor = (ctx->opcode >> 6) & 0xf; - switch (minor) { - /* Arithmetic */ - case ADD: - mips32_op = OPC_ADD; - goto do_arith; - case ADDU32: - mips32_op = OPC_ADDU; - goto do_arith; - case SUB: - mips32_op = OPC_SUB; - goto do_arith; - case SUBU32: - mips32_op = OPC_SUBU; - goto do_arith; - case MUL: - mips32_op = OPC_MUL; - do_arith: - gen_arith(ctx, mips32_op, rd, rs, rt); - break; - /* Shifts */ - case SLLV: - mips32_op = OPC_SLLV; - goto do_shift; - case SRLV: - mips32_op = OPC_SRLV; - goto do_shift; - case SRAV: - mips32_op = OPC_SRAV; - goto do_shift; - case ROTRV: - mips32_op = OPC_ROTRV; - do_shift: - gen_shift(ctx, mips32_op, rd, rs, rt); - break; - /* Logical operations */ - case AND: - mips32_op = OPC_AND; - goto do_logic; - case OR32: - mips32_op = OPC_OR; - goto do_logic; - case NOR: - mips32_op = OPC_NOR; - goto do_logic; - case XOR32: - mips32_op = OPC_XOR; - do_logic: - gen_logic(ctx, mips32_op, rd, rs, rt); - break; - /* Set less than */ - case SLT: - mips32_op = OPC_SLT; - goto do_slt; - case SLTU: - mips32_op = OPC_SLTU; - do_slt: - gen_slt(ctx, mips32_op, rd, rs, rt); - break; - default: - goto pool32a_invalid; - } - break; - case 0x18: - minor = (ctx->opcode >> 6) & 0xf; - switch (minor) { - /* Conditional moves */ - case MOVN: - mips32_op = OPC_MOVN; - goto do_cmov; - case MOVZ: - mips32_op = OPC_MOVZ; - do_cmov: - gen_cond_move(ctx, mips32_op, rd, rs, rt); - break; - case LWXS: - gen_ldxs(ctx, rs, rt, rd); - break; - default: - goto pool32a_invalid; - } - break; - case INS: - gen_bitops(ctx, OPC_INS, rt, rs, rr, rd); - return; - case EXT: - gen_bitops(ctx, OPC_EXT, rt, rs, rr, rd); - return; - case POOL32AXF: - gen_pool32axf(env, ctx, rt, rs); - break; - case 0x07: - generate_exception(ctx, EXCP_BREAK); - break; - default: - pool32a_invalid: - MIPS_INVAL("pool32a"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case POOL32B: - minor = (ctx->opcode >> 12) & 0xf; - switch (minor) { - case CACHE: - check_cp0_enabled(ctx); - /* Treat as no-op. */ - break; - case LWC2: - case SWC2: - /* COP2: Not implemented. */ - generate_exception_err(ctx, EXCP_CpU, 2); - break; - case LWP: - case SWP: -#ifdef TARGET_MIPS64 - case LDP: - case SDP: -#endif - gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); - break; - case LWM32: - case SWM32: -#ifdef TARGET_MIPS64 - case LDM: - case SDM: -#endif - gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); - break; - default: - MIPS_INVAL("pool32b"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case POOL32F: - if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { - minor = ctx->opcode & 0x3f; - check_cp1_enabled(ctx); - switch (minor) { - case ALNV_PS: - mips32_op = OPC_ALNV_PS; - goto do_madd; - case MADD_S: - mips32_op = OPC_MADD_S; - goto do_madd; - case MADD_D: - mips32_op = OPC_MADD_D; - goto do_madd; - case MADD_PS: - mips32_op = OPC_MADD_PS; - goto do_madd; - case MSUB_S: - mips32_op = OPC_MSUB_S; - goto do_madd; - case MSUB_D: - mips32_op = OPC_MSUB_D; - goto do_madd; - case MSUB_PS: - mips32_op = OPC_MSUB_PS; - goto do_madd; - case NMADD_S: - mips32_op = OPC_NMADD_S; - goto do_madd; - case NMADD_D: - mips32_op = OPC_NMADD_D; - goto do_madd; - case NMADD_PS: - mips32_op = OPC_NMADD_PS; - goto do_madd; - case NMSUB_S: - mips32_op = OPC_NMSUB_S; - goto do_madd; - case NMSUB_D: - mips32_op = OPC_NMSUB_D; - goto do_madd; - case NMSUB_PS: - mips32_op = OPC_NMSUB_PS; - do_madd: - gen_flt3_arith(ctx, mips32_op, rd, rr, rs, rt); - break; - case CABS_COND_FMT: - cond = (ctx->opcode >> 6) & 0xf; - cc = (ctx->opcode >> 13) & 0x7; - fmt = (ctx->opcode >> 10) & 0x3; - switch (fmt) { - case 0x0: - gen_cmpabs_s(ctx, cond, rt, rs, cc); - break; - case 0x1: - gen_cmpabs_d(ctx, cond, rt, rs, cc); - break; - case 0x2: - gen_cmpabs_ps(ctx, cond, rt, rs, cc); - break; - default: - goto pool32f_invalid; - } - break; - case C_COND_FMT: - cond = (ctx->opcode >> 6) & 0xf; - cc = (ctx->opcode >> 13) & 0x7; - fmt = (ctx->opcode >> 10) & 0x3; - switch (fmt) { - case 0x0: - gen_cmp_s(ctx, cond, rt, rs, cc); - break; - case 0x1: - gen_cmp_d(ctx, cond, rt, rs, cc); - break; - case 0x2: - gen_cmp_ps(ctx, cond, rt, rs, cc); - break; - default: - goto pool32f_invalid; - } - break; - case POOL32FXF: - gen_pool32fxf(ctx, rt, rs); - break; - case 0x00: - /* PLL foo */ - switch ((ctx->opcode >> 6) & 0x7) { - case PLL_PS: - mips32_op = OPC_PLL_PS; - goto do_ps; - case PLU_PS: - mips32_op = OPC_PLU_PS; - goto do_ps; - case PUL_PS: - mips32_op = OPC_PUL_PS; - goto do_ps; - case PUU_PS: - mips32_op = OPC_PUU_PS; - goto do_ps; - case CVT_PS_S: - mips32_op = OPC_CVT_PS_S; - do_ps: - gen_farith(ctx, mips32_op, rt, rs, rd, 0); - break; - default: - goto pool32f_invalid; - } - break; - case 0x08: - /* [LS][WDU]XC1 */ - switch ((ctx->opcode >> 6) & 0x7) { - case LWXC1: - mips32_op = OPC_LWXC1; - goto do_ldst_cp1; - case SWXC1: - mips32_op = OPC_SWXC1; - goto do_ldst_cp1; - case LDXC1: - mips32_op = OPC_LDXC1; - goto do_ldst_cp1; - case SDXC1: - mips32_op = OPC_SDXC1; - goto do_ldst_cp1; - case LUXC1: - mips32_op = OPC_LUXC1; - goto do_ldst_cp1; - case SUXC1: - mips32_op = OPC_SUXC1; - do_ldst_cp1: - gen_flt3_ldst(ctx, mips32_op, rd, rd, rt, rs); - break; - default: - goto pool32f_invalid; - } - break; - case 0x18: - /* 3D insns */ - fmt = (ctx->opcode >> 9) & 0x3; - switch ((ctx->opcode >> 6) & 0x7) { - case RSQRT2_FMT: - switch (fmt) { - case FMT_SDPS_S: - mips32_op = OPC_RSQRT2_S; - goto do_3d; - case FMT_SDPS_D: - mips32_op = OPC_RSQRT2_D; - goto do_3d; - case FMT_SDPS_PS: - mips32_op = OPC_RSQRT2_PS; - goto do_3d; - default: - goto pool32f_invalid; - } - break; - case RECIP2_FMT: - switch (fmt) { - case FMT_SDPS_S: - mips32_op = OPC_RECIP2_S; - goto do_3d; - case FMT_SDPS_D: - mips32_op = OPC_RECIP2_D; - goto do_3d; - case FMT_SDPS_PS: - mips32_op = OPC_RECIP2_PS; - goto do_3d; - default: - goto pool32f_invalid; - } - break; - case ADDR_PS: - mips32_op = OPC_ADDR_PS; - goto do_3d; - case MULR_PS: - mips32_op = OPC_MULR_PS; - do_3d: - gen_farith(ctx, mips32_op, rt, rs, rd, 0); - break; - default: - goto pool32f_invalid; - } - break; - case 0x20: - /* MOV[FT].fmt and PREFX */ - cc = (ctx->opcode >> 13) & 0x7; - fmt = (ctx->opcode >> 9) & 0x3; - switch ((ctx->opcode >> 6) & 0x7) { - case MOVF_FMT: - switch (fmt) { - case FMT_SDPS_S: - gen_movcf_s(ctx, rs, rt, cc, 0); - break; - case FMT_SDPS_D: - gen_movcf_d(ctx, rs, rt, cc, 0); - break; - case FMT_SDPS_PS: - gen_movcf_ps(ctx, rs, rt, cc, 0); - break; - default: - goto pool32f_invalid; - } - break; - case MOVT_FMT: - switch (fmt) { - case FMT_SDPS_S: - gen_movcf_s(ctx, rs, rt, cc, 1); - break; - case FMT_SDPS_D: - gen_movcf_d(ctx, rs, rt, cc, 1); - break; - case FMT_SDPS_PS: - gen_movcf_ps(ctx, rs, rt, cc, 1); - break; - default: - goto pool32f_invalid; - } - break; - case PREFX: - break; - default: - goto pool32f_invalid; - } - break; -#define FINSN_3ARG_SDPS(prfx) \ - switch ((ctx->opcode >> 8) & 0x3) { \ - case FMT_SDPS_S: \ - mips32_op = OPC_##prfx##_S; \ - goto do_fpop; \ - case FMT_SDPS_D: \ - mips32_op = OPC_##prfx##_D; \ - goto do_fpop; \ - case FMT_SDPS_PS: \ - mips32_op = OPC_##prfx##_PS; \ - goto do_fpop; \ - default: \ - goto pool32f_invalid; \ - } - case 0x30: - /* regular FP ops */ - switch ((ctx->opcode >> 6) & 0x3) { - case ADD_FMT: - FINSN_3ARG_SDPS(ADD); - break; - case SUB_FMT: - FINSN_3ARG_SDPS(SUB); - break; - case MUL_FMT: - FINSN_3ARG_SDPS(MUL); - break; - case DIV_FMT: - fmt = (ctx->opcode >> 8) & 0x3; - if (fmt == 1) { - mips32_op = OPC_DIV_D; - } else if (fmt == 0) { - mips32_op = OPC_DIV_S; - } else { - goto pool32f_invalid; - } - goto do_fpop; - default: - goto pool32f_invalid; - } - break; - case 0x38: - /* cmovs */ - switch ((ctx->opcode >> 6) & 0x3) { - case MOVN_FMT: - FINSN_3ARG_SDPS(MOVN); - break; - case MOVZ_FMT: - FINSN_3ARG_SDPS(MOVZ); - break; - default: - goto pool32f_invalid; - } - break; - do_fpop: - gen_farith(ctx, mips32_op, rt, rs, rd, 0); - break; - default: - pool32f_invalid: - MIPS_INVAL("pool32f"); - generate_exception(ctx, EXCP_RI); - break; - } - } else { - generate_exception_err(ctx, EXCP_CpU, 1); - } - break; - case POOL32I: - minor = (ctx->opcode >> 21) & 0x1f; - switch (minor) { - case BLTZ: - gen_compute_branch(ctx, OPC_BLTZ, 4, rs, -1, imm << 1, 4); - break; - case BLTZAL: - gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 4); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case BLTZALS: - gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 2); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case BGEZ: - gen_compute_branch(ctx, OPC_BGEZ, 4, rs, -1, imm << 1, 4); - break; - case BGEZAL: - gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 4); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case BGEZALS: - gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 2); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case BLEZ: - gen_compute_branch(ctx, OPC_BLEZ, 4, rs, -1, imm << 1, 4); - break; - case BGTZ: - gen_compute_branch(ctx, OPC_BGTZ, 4, rs, -1, imm << 1, 4); - break; - - /* Traps */ - case TLTI: - mips32_op = OPC_TLTI; - goto do_trapi; - case TGEI: - mips32_op = OPC_TGEI; - goto do_trapi; - case TLTIU: - mips32_op = OPC_TLTIU; - goto do_trapi; - case TGEIU: - mips32_op = OPC_TGEIU; - goto do_trapi; - case TNEI: - mips32_op = OPC_TNEI; - goto do_trapi; - case TEQI: - mips32_op = OPC_TEQI; - do_trapi: - gen_trap(ctx, mips32_op, rs, -1, imm); - break; - - case BNEZC: - case BEQZC: - gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ, - 4, rs, 0, imm << 1, 0); - /* Compact branches don't have a delay slot, so just let - the normal delay slot handling take us to the branch - target. */ - break; - case LUI: - gen_logic_imm(ctx, OPC_LUI, rs, -1, imm); - break; - case SYNCI: - /* Break the TB to be able to sync copied instructions - immediately */ - ctx->bstate = BS_STOP; - break; - case BC2F: - case BC2T: - /* COP2: Not implemented. */ - generate_exception_err(ctx, EXCP_CpU, 2); - break; - case BC1F: - mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1FANY2 : OPC_BC1F; - goto do_cp1branch; - case BC1T: - mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1TANY2 : OPC_BC1T; - goto do_cp1branch; - case BC1ANY4F: - mips32_op = OPC_BC1FANY4; - goto do_cp1mips3d; - case BC1ANY4T: - mips32_op = OPC_BC1TANY4; - do_cp1mips3d: - check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); - /* Fall through */ - do_cp1branch: - if (env->CP0_Config1 & (1 << CP0C1_FP)) { - check_cp1_enabled(ctx); - gen_compute_branch1(ctx, mips32_op, - (ctx->opcode >> 18) & 0x7, imm << 1); - } else { - generate_exception_err(ctx, EXCP_CpU, 1); - } - break; - case BPOSGE64: - case BPOSGE32: - /* MIPS DSP: not implemented */ - /* Fall through */ - default: - MIPS_INVAL("pool32i"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case POOL32C: - minor = (ctx->opcode >> 12) & 0xf; - switch (minor) { - case LWL: - mips32_op = OPC_LWL; - goto do_ld_lr; - case SWL: - mips32_op = OPC_SWL; - goto do_st_lr; - case LWR: - mips32_op = OPC_LWR; - goto do_ld_lr; - case SWR: - mips32_op = OPC_SWR; - goto do_st_lr; -#if defined(TARGET_MIPS64) - case LDL: - mips32_op = OPC_LDL; - goto do_ld_lr; - case SDL: - mips32_op = OPC_SDL; - goto do_st_lr; - case LDR: - mips32_op = OPC_LDR; - goto do_ld_lr; - case SDR: - mips32_op = OPC_SDR; - goto do_st_lr; - case LWU: - mips32_op = OPC_LWU; - goto do_ld_lr; - case LLD: - mips32_op = OPC_LLD; - goto do_ld_lr; -#endif - case LL: - mips32_op = OPC_LL; - goto do_ld_lr; - do_ld_lr: - gen_ld(ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12)); - break; - do_st_lr: - gen_st(ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12)); - break; - case SC: - gen_st_cond(ctx, OPC_SC, rt, rs, SIMM(ctx->opcode, 0, 12)); - break; -#if defined(TARGET_MIPS64) - case SCD: - gen_st_cond(ctx, OPC_SCD, rt, rs, SIMM(ctx->opcode, 0, 12)); - break; -#endif - case PREF: - /* Treat as no-op */ - break; - default: - MIPS_INVAL("pool32c"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case ADDI32: - mips32_op = OPC_ADDI; - goto do_addi; - case ADDIU32: - mips32_op = OPC_ADDIU; - do_addi: - gen_arith_imm(ctx, mips32_op, rt, rs, imm); - break; - - /* Logical operations */ - case ORI32: - mips32_op = OPC_ORI; - goto do_logici; - case XORI32: - mips32_op = OPC_XORI; - goto do_logici; - case ANDI32: - mips32_op = OPC_ANDI; - do_logici: - gen_logic_imm(ctx, mips32_op, rt, rs, imm); - break; - - /* Set less than immediate */ - case SLTI32: - mips32_op = OPC_SLTI; - goto do_slti; - case SLTIU32: - mips32_op = OPC_SLTIU; - do_slti: - gen_slt_imm(ctx, mips32_op, rt, rs, imm); - break; - case JALX32: - offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; - gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset, 4); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case JALS32: - offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1; - gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, offset, 2); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - case BEQ32: - gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1, 4); - break; - case BNE32: - gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1, 4); - break; - case J32: - gen_compute_branch(ctx, OPC_J, 4, rt, rs, - (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); - break; - case JAL32: - gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, - (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); - ctx->hflags |= MIPS_HFLAG_BDS_STRICT; - break; - /* Floating point (COP1) */ - case LWC132: - mips32_op = OPC_LWC1; - goto do_cop1; - case LDC132: - mips32_op = OPC_LDC1; - goto do_cop1; - case SWC132: - mips32_op = OPC_SWC1; - goto do_cop1; - case SDC132: - mips32_op = OPC_SDC1; - do_cop1: - gen_cop1_ldst(ctx, mips32_op, rt, rs, imm); - break; - case ADDIUPC: - { - int reg = mmreg(ZIMM(ctx->opcode, 23, 3)); - int offset = SIMM(ctx->opcode, 0, 23) << 2; - - gen_addiupc(ctx, reg, offset, 0, 0); - } - break; - /* Loads and stores */ - case LB32: - mips32_op = OPC_LB; - goto do_ld; - case LBU32: - mips32_op = OPC_LBU; - goto do_ld; - case LH32: - mips32_op = OPC_LH; - goto do_ld; - case LHU32: - mips32_op = OPC_LHU; - goto do_ld; - case LW32: - mips32_op = OPC_LW; - goto do_ld; -#ifdef TARGET_MIPS64 - case LD32: - mips32_op = OPC_LD; - goto do_ld; - case SD32: - mips32_op = OPC_SD; - goto do_st; -#endif - case SB32: - mips32_op = OPC_SB; - goto do_st; - case SH32: - mips32_op = OPC_SH; - goto do_st; - case SW32: - mips32_op = OPC_SW; - goto do_st; - do_ld: - gen_ld(ctx, mips32_op, rt, rs, imm); - break; - do_st: - gen_st(ctx, mips32_op, rt, rs, imm); - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } -} - -static int decode_micromips_opc (CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - uint32_t op; - - /* make sure instructions are on a halfword boundary */ - if (ctx->pc & 0x1) { - env->CP0_BadVAddr = ctx->pc; - generate_exception(ctx, EXCP_AdEL); - ctx->bstate = BS_STOP; - return 2; - } - - // Unicorn: trace this instruction on request - if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, ctx->pc)) { - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_CODE_IDX, env->uc, ctx->pc); - *insn_need_patch = true; - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - } - - op = (ctx->opcode >> 10) & 0x3f; - /* Enforce properly-sized instructions in a delay slot */ - if (ctx->hflags & MIPS_HFLAG_BDS_STRICT) { - switch (op & 0x7) { /* MSB-3..MSB-5 */ - case 0: - /* POOL32A, POOL32B, POOL32I, POOL32C */ - case 4: - /* ADDI32, ADDIU32, ORI32, XORI32, SLTI32, SLTIU32, ANDI32, JALX32 */ - case 5: - /* LBU32, LHU32, POOL32F, JALS32, BEQ32, BNE32, J32, JAL32 */ - case 6: - /* SB32, SH32, ADDIUPC, SWC132, SDC132, SW32 */ - case 7: - /* LB32, LH32, LWC132, LDC132, LW32 */ - if (ctx->hflags & MIPS_HFLAG_BDS16) { - generate_exception(ctx, EXCP_RI); - /* Just stop translation; the user is confused. */ - ctx->bstate = BS_STOP; - return 2; - } - break; - case 1: - /* POOL16A, POOL16B, POOL16C, LWGP16, POOL16F */ - case 2: - /* LBU16, LHU16, LWSP16, LW16, SB16, SH16, SWSP16, SW16 */ - case 3: - /* MOVE16, ANDI16, POOL16D, POOL16E, BEQZ16, BNEZ16, B16, LI16 */ - if (ctx->hflags & MIPS_HFLAG_BDS32) { - generate_exception(ctx, EXCP_RI); - /* Just stop translation; the user is confused. */ - ctx->bstate = BS_STOP; - return 2; - } - break; - } - } - - switch (op) { - case POOL16A: - { - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rs1 = mmreg(uMIPS_RS1(ctx->opcode)); - int rs2 = mmreg(uMIPS_RS2(ctx->opcode)); - uint32_t opc = 0; - - switch (ctx->opcode & 0x1) { - case ADDU16: - opc = OPC_ADDU; - break; - case SUBU16: - opc = OPC_SUBU; - break; - } - - gen_arith(ctx, opc, rd, rs1, rs2); - } - break; - case POOL16B: - { - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rs = mmreg(uMIPS_RS(ctx->opcode)); - int amount = (ctx->opcode >> 1) & 0x7; - uint32_t opc = 0; - amount = amount == 0 ? 8 : amount; - - switch (ctx->opcode & 0x1) { - case SLL16: - opc = OPC_SLL; - break; - case SRL16: - opc = OPC_SRL; - break; - } - - gen_shift_imm(ctx, opc, rd, rs, amount); - } - break; - case POOL16C: - gen_pool16c_insn(ctx); - break; - case LWGP16: - { - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rb = 28; /* GP */ - int16_t offset = SIMM(ctx->opcode, 0, 7) << 2; - - gen_ld(ctx, OPC_LW, rd, rb, offset); - } - break; - case POOL16F: - if (ctx->opcode & 1) { - generate_exception(ctx, EXCP_RI); - } else { - /* MOVEP */ - int enc_dest = uMIPS_RD(ctx->opcode); - int enc_rt = uMIPS_RS2(ctx->opcode); - int enc_rs = uMIPS_RS1(ctx->opcode); - int rd, rs, re, rt; - static const int rd_enc[] = { 5, 5, 6, 4, 4, 4, 4, 4 }; - static const int re_enc[] = { 6, 7, 7, 21, 22, 5, 6, 7 }; - static const int rs_rt_enc[] = { 0, 17, 2, 3, 16, 18, 19, 20 }; - - rd = rd_enc[enc_dest]; - re = re_enc[enc_dest]; - rs = rs_rt_enc[enc_rs]; - rt = rs_rt_enc[enc_rt]; - - gen_arith_imm(ctx, OPC_ADDIU, rd, rs, 0); - gen_arith_imm(ctx, OPC_ADDIU, re, rt, 0); - } - break; - case LBU16: - { - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rb = mmreg(uMIPS_RS(ctx->opcode)); - int16_t offset = ZIMM(ctx->opcode, 0, 4); - offset = (offset == 0xf ? -1 : offset); - - gen_ld(ctx, OPC_LBU, rd, rb, offset); - } - break; - case LHU16: - { - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rb = mmreg(uMIPS_RS(ctx->opcode)); - int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; - - gen_ld(ctx, OPC_LHU, rd, rb, offset); - } - break; - case LWSP16: - { - int rd = (ctx->opcode >> 5) & 0x1f; - int rb = 29; /* SP */ - int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; - - gen_ld(ctx, OPC_LW, rd, rb, offset); - } - break; - case LW16: - { - int rd = mmreg(uMIPS_RD(ctx->opcode)); - int rb = mmreg(uMIPS_RS(ctx->opcode)); - int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; - - gen_ld(ctx, OPC_LW, rd, rb, offset); - } - break; - case SB16: - { - int rd = mmreg2(uMIPS_RD(ctx->opcode)); - int rb = mmreg(uMIPS_RS(ctx->opcode)); - int16_t offset = ZIMM(ctx->opcode, 0, 4); - - gen_st(ctx, OPC_SB, rd, rb, offset); - } - break; - case SH16: - { - int rd = mmreg2(uMIPS_RD(ctx->opcode)); - int rb = mmreg(uMIPS_RS(ctx->opcode)); - int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; - - gen_st(ctx, OPC_SH, rd, rb, offset); - } - break; - case SWSP16: - { - int rd = (ctx->opcode >> 5) & 0x1f; - int rb = 29; /* SP */ - int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; - - gen_st(ctx, OPC_SW, rd, rb, offset); - } - break; - case SW16: - { - int rd = mmreg2(uMIPS_RD(ctx->opcode)); - int rb = mmreg(uMIPS_RS(ctx->opcode)); - int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; - - gen_st(ctx, OPC_SW, rd, rb, offset); - } - break; - case MOVE16: - { - int rd = uMIPS_RD5(ctx->opcode); - int rs = uMIPS_RS5(ctx->opcode); - - gen_arith_imm(ctx, OPC_ADDIU, rd, rs, 0); - } - break; - case ANDI16: - gen_andi16(ctx); - break; - case POOL16D: - switch (ctx->opcode & 0x1) { - case ADDIUS5: - gen_addius5(ctx); - break; - case ADDIUSP: - gen_addiusp(ctx); - break; - } - break; - case POOL16E: - switch (ctx->opcode & 0x1) { - case ADDIUR2: - gen_addiur2(ctx); - break; - case ADDIUR1SP: - gen_addiur1sp(ctx); - break; - } - break; - case B16: - gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, - SIMM(ctx->opcode, 0, 10) << 1, 4); - break; - case BNEZ16: - case BEQZ16: - gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2, - mmreg(uMIPS_RD(ctx->opcode)), - 0, SIMM(ctx->opcode, 0, 7) << 1, 4); - break; - case LI16: - { - int reg = mmreg(uMIPS_RD(ctx->opcode)); - int imm = ZIMM(ctx->opcode, 0, 7); - - imm = (imm == 0x7f ? -1 : imm); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[reg], imm); - } - break; - case RES_20: - case RES_28: - case RES_29: - case RES_30: - case RES_31: - case RES_38: - case RES_39: - generate_exception(ctx, EXCP_RI); - break; - default: - decode_micromips32_opc (env, ctx, op); - return 4; - } - - return 2; -} - -/* SmartMIPS extension to MIPS32 */ - -#if defined(TARGET_MIPS64) - -/* MDMX extension to MIPS64 */ - -#endif - -/* MIPSDSP functions. */ -static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc, - int rd, int base, int offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "ldx"; - TCGv t0; - - check_dsp(ctx); - t0 = tcg_temp_new(tcg_ctx); - - if (base == 0) { - gen_load_gpr(ctx, t0, offset); - } else if (offset == 0) { - gen_load_gpr(ctx, t0, base); - } else { - gen_op_addr_add(ctx, t0, *cpu_gpr[base], *cpu_gpr[offset]); - } - - switch (opc) { - case OPC_LBUX: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_UB); - gen_store_gpr(tcg_ctx, t0, rd); - opn = "lbux"; - break; - case OPC_LHX: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESW); - gen_store_gpr(tcg_ctx, t0, rd); - opn = "lhx"; - break; - case OPC_LWX: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); - gen_store_gpr(tcg_ctx, t0, rd); - opn = "lwx"; - break; -#if defined(TARGET_MIPS64) - case OPC_LDX: - tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); - gen_store_gpr(tcg_ctx, t0, rd); - opn = "ldx"; - break; -#endif - } - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s %s, %s(%s)", opn, - regnames[rd], regnames[offset], regnames[base]); - tcg_temp_free(tcg_ctx, t0); -} - -static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, - int ret, int v1, int v2) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "mipsdsp arith"; - TCGv v1_t; - TCGv v2_t; - - if (ret == 0) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - v1_t = tcg_temp_new(tcg_ctx); - v2_t = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, v1_t, v1); - gen_load_gpr(ctx, v2_t, v2); - - switch (op1) { - /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */ - case OPC_MULT_G_2E: - check_dspr2(ctx); - switch (op2) { - case OPC_ADDUH_QB: - gen_helper_adduh_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_ADDUH_R_QB: - gen_helper_adduh_r_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_ADDQH_PH: - gen_helper_addqh_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_ADDQH_R_PH: - gen_helper_addqh_r_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_ADDQH_W: - gen_helper_addqh_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_ADDQH_R_W: - gen_helper_addqh_r_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SUBUH_QB: - gen_helper_subuh_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SUBUH_R_QB: - gen_helper_subuh_r_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SUBQH_PH: - gen_helper_subqh_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SUBQH_R_PH: - gen_helper_subqh_r_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SUBQH_W: - gen_helper_subqh_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SUBQH_R_W: - gen_helper_subqh_r_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - } - break; - case OPC_ABSQ_S_PH_DSP: - switch (op2) { - case OPC_ABSQ_S_QB: - check_dspr2(ctx); - gen_helper_absq_s_qb(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); - break; - case OPC_ABSQ_S_PH: - check_dsp(ctx); - gen_helper_absq_s_ph(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); - break; - case OPC_ABSQ_S_W: - check_dsp(ctx); - gen_helper_absq_s_w(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); - break; - case OPC_PRECEQ_W_PHL: - check_dsp(ctx); - tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 0xFFFF0000); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); - break; - case OPC_PRECEQ_W_PHR: - check_dsp(ctx); - tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 0x0000FFFF); - tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], 16); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); - break; - case OPC_PRECEQU_PH_QBL: - check_dsp(ctx); - gen_helper_precequ_ph_qbl(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQU_PH_QBR: - check_dsp(ctx); - gen_helper_precequ_ph_qbr(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQU_PH_QBLA: - check_dsp(ctx); - gen_helper_precequ_ph_qbla(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQU_PH_QBRA: - check_dsp(ctx); - gen_helper_precequ_ph_qbra(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_PH_QBL: - check_dsp(ctx); - gen_helper_preceu_ph_qbl(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_PH_QBR: - check_dsp(ctx); - gen_helper_preceu_ph_qbr(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_PH_QBLA: - check_dsp(ctx); - gen_helper_preceu_ph_qbla(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_PH_QBRA: - check_dsp(ctx); - gen_helper_preceu_ph_qbra(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - } - break; - case OPC_ADDU_QB_DSP: - switch (op2) { - case OPC_ADDQ_PH: - check_dsp(ctx); - gen_helper_addq_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDQ_S_PH: - check_dsp(ctx); - gen_helper_addq_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDQ_S_W: - check_dsp(ctx); - gen_helper_addq_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_QB: - check_dsp(ctx); - gen_helper_addu_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_S_QB: - check_dsp(ctx); - gen_helper_addu_s_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_PH: - check_dspr2(ctx); - gen_helper_addu_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_S_PH: - check_dspr2(ctx); - gen_helper_addu_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBQ_PH: - check_dsp(ctx); - gen_helper_subq_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBQ_S_PH: - check_dsp(ctx); - gen_helper_subq_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBQ_S_W: - check_dsp(ctx); - gen_helper_subq_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_QB: - check_dsp(ctx); - gen_helper_subu_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_S_QB: - check_dsp(ctx); - gen_helper_subu_s_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_PH: - check_dspr2(ctx); - gen_helper_subu_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_S_PH: - check_dspr2(ctx); - gen_helper_subu_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDSC: - check_dsp(ctx); - gen_helper_addsc(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDWC: - check_dsp(ctx); - gen_helper_addwc(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MODSUB: - check_dsp(ctx); - gen_helper_modsub(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_RADDU_W_QB: - check_dsp(ctx); - gen_helper_raddu_w_qb(tcg_ctx, *cpu_gpr[ret], v1_t); - break; - } - break; - case OPC_CMPU_EQ_QB_DSP: - switch (op2) { - case OPC_PRECR_QB_PH: - check_dspr2(ctx); - gen_helper_precr_qb_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PRECRQ_QB_PH: - check_dsp(ctx); - gen_helper_precrq_qb_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PRECR_SRA_PH_W: - check_dspr2(ctx); - { - TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); - gen_helper_precr_sra_ph_w(tcg_ctx, *cpu_gpr[ret], sa_t, v1_t, - *cpu_gpr[ret]); - tcg_temp_free_i32(tcg_ctx, sa_t); - break; - } - case OPC_PRECR_SRA_R_PH_W: - check_dspr2(ctx); - { - TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); - gen_helper_precr_sra_r_ph_w(tcg_ctx, *cpu_gpr[ret], sa_t, v1_t, - *cpu_gpr[ret]); - tcg_temp_free_i32(tcg_ctx, sa_t); - break; - } - case OPC_PRECRQ_PH_W: - check_dsp(ctx); - gen_helper_precrq_ph_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PRECRQ_RS_PH_W: - check_dsp(ctx); - gen_helper_precrq_rs_ph_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PRECRQU_S_QB_PH: - check_dsp(ctx); - gen_helper_precrqu_s_qb_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - } - break; -#ifdef TARGET_MIPS64 - case OPC_ABSQ_S_QH_DSP: - switch (op2) { - case OPC_PRECEQ_L_PWL: - check_dsp(ctx); - tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 0xFFFFFFFF00000000ull); - break; - case OPC_PRECEQ_L_PWR: - check_dsp(ctx); - tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 32); - break; - case OPC_PRECEQ_PW_QHL: - check_dsp(ctx); - gen_helper_preceq_pw_qhl(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQ_PW_QHR: - check_dsp(ctx); - gen_helper_preceq_pw_qhr(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQ_PW_QHLA: - check_dsp(ctx); - gen_helper_preceq_pw_qhla(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQ_PW_QHRA: - check_dsp(ctx); - gen_helper_preceq_pw_qhra(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQU_QH_OBL: - check_dsp(ctx); - gen_helper_precequ_qh_obl(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQU_QH_OBR: - check_dsp(ctx); - gen_helper_precequ_qh_obr(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQU_QH_OBLA: - check_dsp(ctx); - gen_helper_precequ_qh_obla(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEQU_QH_OBRA: - check_dsp(ctx); - gen_helper_precequ_qh_obra(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_QH_OBL: - check_dsp(ctx); - gen_helper_preceu_qh_obl(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_QH_OBR: - check_dsp(ctx); - gen_helper_preceu_qh_obr(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_QH_OBLA: - check_dsp(ctx); - gen_helper_preceu_qh_obla(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_PRECEU_QH_OBRA: - check_dsp(ctx); - gen_helper_preceu_qh_obra(tcg_ctx, *cpu_gpr[ret], v2_t); - break; - case OPC_ABSQ_S_OB: - check_dspr2(ctx); - gen_helper_absq_s_ob(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); - break; - case OPC_ABSQ_S_PW: - check_dsp(ctx); - gen_helper_absq_s_pw(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); - break; - case OPC_ABSQ_S_QH: - check_dsp(ctx); - gen_helper_absq_s_qh(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); - break; - } - break; - case OPC_ADDU_OB_DSP: - switch (op2) { - case OPC_RADDU_L_OB: - check_dsp(ctx); - gen_helper_raddu_l_ob(tcg_ctx, *cpu_gpr[ret], v1_t); - break; - case OPC_SUBQ_PW: - check_dsp(ctx); - gen_helper_subq_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBQ_S_PW: - check_dsp(ctx); - gen_helper_subq_s_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBQ_QH: - check_dsp(ctx); - gen_helper_subq_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBQ_S_QH: - check_dsp(ctx); - gen_helper_subq_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_OB: - check_dsp(ctx); - gen_helper_subu_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_S_OB: - check_dsp(ctx); - gen_helper_subu_s_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_QH: - check_dspr2(ctx); - gen_helper_subu_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBU_S_QH: - check_dspr2(ctx); - gen_helper_subu_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SUBUH_OB: - check_dspr2(ctx); - gen_helper_subuh_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SUBUH_R_OB: - check_dspr2(ctx); - gen_helper_subuh_r_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_ADDQ_PW: - check_dsp(ctx); - gen_helper_addq_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDQ_S_PW: - check_dsp(ctx); - gen_helper_addq_s_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDQ_QH: - check_dsp(ctx); - gen_helper_addq_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDQ_S_QH: - check_dsp(ctx); - gen_helper_addq_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_OB: - check_dsp(ctx); - gen_helper_addu_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_S_OB: - check_dsp(ctx); - gen_helper_addu_s_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_QH: - check_dspr2(ctx); - gen_helper_addu_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDU_S_QH: - check_dspr2(ctx); - gen_helper_addu_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_ADDUH_OB: - check_dspr2(ctx); - gen_helper_adduh_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_ADDUH_R_OB: - check_dspr2(ctx); - gen_helper_adduh_r_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - } - break; - case OPC_CMPU_EQ_OB_DSP: - switch (op2) { - case OPC_PRECR_OB_QH: - check_dspr2(ctx); - gen_helper_precr_ob_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PRECR_SRA_QH_PW: - check_dspr2(ctx); - { - TCGv_i32 ret_t = tcg_const_i32(tcg_ctx, ret); - gen_helper_precr_sra_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, ret_t); - tcg_temp_free_i32(tcg_ctx, ret_t); - break; - } - case OPC_PRECR_SRA_R_QH_PW: - check_dspr2(ctx); - { - TCGv_i32 sa_v = tcg_const_i32(tcg_ctx, ret); - gen_helper_precr_sra_r_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, sa_v); - tcg_temp_free_i32(tcg_ctx, sa_v); - break; - } - case OPC_PRECRQ_OB_QH: - check_dsp(ctx); - gen_helper_precrq_ob_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PRECRQ_PW_L: - check_dsp(ctx); - gen_helper_precrq_pw_l(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PRECRQ_QH_PW: - check_dsp(ctx); - gen_helper_precrq_qh_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PRECRQ_RS_QH_PW: - check_dsp(ctx); - gen_helper_precrq_rs_qh_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PRECRQU_S_OB_QH: - check_dsp(ctx); - gen_helper_precrqu_s_ob_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - } - break; -#endif - } - - tcg_temp_free(tcg_ctx, v1_t); - tcg_temp_free(tcg_ctx, v2_t); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s", opn); -} - -static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, - int ret, int v1, int v2) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - uint32_t op2; - const char *opn = "mipsdsp shift"; - TCGv t0; - TCGv v1_t; - TCGv v2_t; - - if (ret == 0) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - v1_t = tcg_temp_new(tcg_ctx); - v2_t = tcg_temp_new(tcg_ctx); - - tcg_gen_movi_tl(tcg_ctx, t0, v1); - gen_load_gpr(ctx, v1_t, v1); - gen_load_gpr(ctx, v2_t, v2); - - switch (opc) { - case OPC_SHLL_QB_DSP: - { - op2 = MASK_SHLL_QB(ctx->opcode); - switch (op2) { - case OPC_SHLL_QB: - check_dsp(ctx); - gen_helper_shll_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_QB: - check_dsp(ctx); - gen_helper_shll_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHLL_PH: - check_dsp(ctx); - gen_helper_shll_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_PH: - check_dsp(ctx); - gen_helper_shll_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHLL_S_PH: - check_dsp(ctx); - gen_helper_shll_s_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_S_PH: - check_dsp(ctx); - gen_helper_shll_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHLL_S_W: - check_dsp(ctx); - gen_helper_shll_s_w(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_S_W: - check_dsp(ctx); - gen_helper_shll_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_SHRL_QB: - check_dsp(ctx); - gen_helper_shrl_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t); - break; - case OPC_SHRLV_QB: - check_dsp(ctx); - gen_helper_shrl_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SHRL_PH: - check_dspr2(ctx); - gen_helper_shrl_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t); - break; - case OPC_SHRLV_PH: - check_dspr2(ctx); - gen_helper_shrl_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SHRA_QB: - check_dspr2(ctx); - gen_helper_shra_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t); - break; - case OPC_SHRA_R_QB: - check_dspr2(ctx); - gen_helper_shra_r_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t); - break; - case OPC_SHRAV_QB: - check_dspr2(ctx); - gen_helper_shra_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SHRAV_R_QB: - check_dspr2(ctx); - gen_helper_shra_r_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SHRA_PH: - check_dsp(ctx); - gen_helper_shra_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t); - break; - case OPC_SHRA_R_PH: - check_dsp(ctx); - gen_helper_shra_r_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t); - break; - case OPC_SHRAV_PH: - check_dsp(ctx); - gen_helper_shra_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SHRAV_R_PH: - check_dsp(ctx); - gen_helper_shra_r_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_SHRA_R_W: - check_dsp(ctx); - gen_helper_shra_r_w(tcg_ctx, *cpu_gpr[ret], t0, v2_t); - break; - case OPC_SHRAV_R_W: - check_dsp(ctx); - gen_helper_shra_r_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - default: /* Invalid */ - MIPS_INVAL("MASK SHLL.QB"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - } -#ifdef TARGET_MIPS64 - case OPC_SHLL_OB_DSP: - op2 = MASK_SHLL_OB(ctx->opcode); - switch (op2) { - case OPC_SHLL_PW: - check_dsp(ctx); - gen_helper_shll_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_PW: - check_dsp(ctx); - gen_helper_shll_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); - break; - case OPC_SHLL_S_PW: - check_dsp(ctx); - gen_helper_shll_s_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_S_PW: - check_dsp(ctx); - gen_helper_shll_s_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); - break; - case OPC_SHLL_OB: - check_dsp(ctx); - gen_helper_shll_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_OB: - check_dsp(ctx); - gen_helper_shll_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); - break; - case OPC_SHLL_QH: - check_dsp(ctx); - gen_helper_shll_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_QH: - check_dsp(ctx); - gen_helper_shll_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); - break; - case OPC_SHLL_S_QH: - check_dsp(ctx); - gen_helper_shll_s_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_SHLLV_S_QH: - check_dsp(ctx); - gen_helper_shll_s_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); - break; - case OPC_SHRA_OB: - check_dspr2(ctx); - gen_helper_shra_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRAV_OB: - check_dspr2(ctx); - gen_helper_shra_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - case OPC_SHRA_R_OB: - check_dspr2(ctx); - gen_helper_shra_r_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRAV_R_OB: - check_dspr2(ctx); - gen_helper_shra_r_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - case OPC_SHRA_PW: - check_dsp(ctx); - gen_helper_shra_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRAV_PW: - check_dsp(ctx); - gen_helper_shra_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - case OPC_SHRA_R_PW: - check_dsp(ctx); - gen_helper_shra_r_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRAV_R_PW: - check_dsp(ctx); - gen_helper_shra_r_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - case OPC_SHRA_QH: - check_dsp(ctx); - gen_helper_shra_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRAV_QH: - check_dsp(ctx); - gen_helper_shra_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - case OPC_SHRA_R_QH: - check_dsp(ctx); - gen_helper_shra_r_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRAV_R_QH: - check_dsp(ctx); - gen_helper_shra_r_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - case OPC_SHRL_OB: - check_dsp(ctx); - gen_helper_shrl_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRLV_OB: - check_dsp(ctx); - gen_helper_shrl_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - case OPC_SHRL_QH: - check_dspr2(ctx); - gen_helper_shrl_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0); - break; - case OPC_SHRLV_QH: - check_dspr2(ctx); - gen_helper_shrl_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); - break; - default: /* Invalid */ - MIPS_INVAL("MASK SHLL.OB"); - generate_exception(ctx, EXCP_RI); - break; - } - break; -#endif - } - - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, v1_t); - tcg_temp_free(tcg_ctx, v2_t); - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s", opn); -} - -static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, - int ret, int v1, int v2, int check_ret) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "mipsdsp multiply"; - TCGv_i32 t0; - TCGv v1_t; - TCGv v2_t; - - if ((ret == 0) && (check_ret == 1)) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new_i32(tcg_ctx); - v1_t = tcg_temp_new(tcg_ctx); - v2_t = tcg_temp_new(tcg_ctx); - - tcg_gen_movi_i32(tcg_ctx, t0, ret); - gen_load_gpr(ctx, v1_t, v1); - gen_load_gpr(ctx, v2_t, v2); - - switch (op1) { - /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have - * the same mask and op1. */ - case OPC_MULT_G_2E: - check_dspr2(ctx); - switch (op2) { - case OPC_MUL_PH: - gen_helper_mul_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MUL_S_PH: - gen_helper_mul_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULQ_S_W: - gen_helper_mulq_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULQ_RS_W: - gen_helper_mulq_rs_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - } - break; - case OPC_DPA_W_PH_DSP: - switch (op2) { - case OPC_DPAU_H_QBL: - check_dsp(ctx); - gen_helper_dpau_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPAU_H_QBR: - check_dsp(ctx); - gen_helper_dpau_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPSU_H_QBL: - check_dsp(ctx); - gen_helper_dpsu_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPSU_H_QBR: - check_dsp(ctx); - gen_helper_dpsu_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPA_W_PH: - check_dspr2(ctx); - gen_helper_dpa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPAX_W_PH: - check_dspr2(ctx); - gen_helper_dpax_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPAQ_S_W_PH: - check_dsp(ctx); - gen_helper_dpaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPAQX_S_W_PH: - check_dspr2(ctx); - gen_helper_dpaqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPAQX_SA_W_PH: - check_dspr2(ctx); - gen_helper_dpaqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPS_W_PH: - check_dspr2(ctx); - gen_helper_dps_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPSX_W_PH: - check_dspr2(ctx); - gen_helper_dpsx_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPSQ_S_W_PH: - check_dsp(ctx); - gen_helper_dpsq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPSQX_S_W_PH: - check_dspr2(ctx); - gen_helper_dpsqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPSQX_SA_W_PH: - check_dspr2(ctx); - gen_helper_dpsqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULSAQ_S_W_PH: - check_dsp(ctx); - gen_helper_mulsaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPAQ_SA_L_W: - check_dsp(ctx); - gen_helper_dpaq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_DPSQ_SA_L_W: - check_dsp(ctx); - gen_helper_dpsq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_W_PHL: - check_dsp(ctx); - gen_helper_maq_s_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_W_PHR: - check_dsp(ctx); - gen_helper_maq_s_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MAQ_SA_W_PHL: - check_dsp(ctx); - gen_helper_maq_sa_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MAQ_SA_W_PHR: - check_dsp(ctx); - gen_helper_maq_sa_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULSA_W_PH: - check_dspr2(ctx); - gen_helper_mulsa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); - break; - } - break; -#ifdef TARGET_MIPS64 - case OPC_DPAQ_W_QH_DSP: - { - int ac = ret & 0x03; - tcg_gen_movi_i32(tcg_ctx, t0, ac); - - switch (op2) { - case OPC_DMADD: - check_dsp(ctx); - gen_helper_dmadd(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DMADDU: - check_dsp(ctx); - gen_helper_dmaddu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DMSUB: - check_dsp(ctx); - gen_helper_dmsub(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DMSUBU: - check_dsp(ctx); - gen_helper_dmsubu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPA_W_QH: - check_dspr2(ctx); - gen_helper_dpa_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPAQ_S_W_QH: - check_dsp(ctx); - gen_helper_dpaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPAQ_SA_L_PW: - check_dsp(ctx); - gen_helper_dpaq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPAU_H_OBL: - check_dsp(ctx); - gen_helper_dpau_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPAU_H_OBR: - check_dsp(ctx); - gen_helper_dpau_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPS_W_QH: - check_dspr2(ctx); - gen_helper_dps_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPSQ_S_W_QH: - check_dsp(ctx); - gen_helper_dpsq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPSQ_SA_L_PW: - check_dsp(ctx); - gen_helper_dpsq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPSU_H_OBL: - check_dsp(ctx); - gen_helper_dpsu_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DPSU_H_OBR: - check_dsp(ctx); - gen_helper_dpsu_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_L_PWL: - check_dsp(ctx); - gen_helper_maq_s_l_pwl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_L_PWR: - check_dsp(ctx); - gen_helper_maq_s_l_pwr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_W_QHLL: - check_dsp(ctx); - gen_helper_maq_s_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_SA_W_QHLL: - check_dsp(ctx); - gen_helper_maq_sa_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_W_QHLR: - check_dsp(ctx); - gen_helper_maq_s_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_SA_W_QHLR: - check_dsp(ctx); - gen_helper_maq_sa_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_W_QHRL: - check_dsp(ctx); - gen_helper_maq_s_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_SA_W_QHRL: - check_dsp(ctx); - gen_helper_maq_sa_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_S_W_QHRR: - check_dsp(ctx); - gen_helper_maq_s_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MAQ_SA_W_QHRR: - check_dsp(ctx); - gen_helper_maq_sa_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MULSAQ_S_L_PW: - check_dsp(ctx); - gen_helper_mulsaq_s_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - case OPC_MULSAQ_S_W_QH: - check_dsp(ctx); - gen_helper_mulsaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); - break; - } - } - break; -#endif - case OPC_ADDU_QB_DSP: - switch (op2) { - case OPC_MULEU_S_PH_QBL: - check_dsp(ctx); - gen_helper_muleu_s_ph_qbl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULEU_S_PH_QBR: - check_dsp(ctx); - gen_helper_muleu_s_ph_qbr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULQ_RS_PH: - check_dsp(ctx); - gen_helper_mulq_rs_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULEQ_S_W_PHL: - check_dsp(ctx); - gen_helper_muleq_s_w_phl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULEQ_S_W_PHR: - check_dsp(ctx); - gen_helper_muleq_s_w_phr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULQ_S_PH: - check_dspr2(ctx); - gen_helper_mulq_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - } - break; -#ifdef TARGET_MIPS64 - case OPC_ADDU_OB_DSP: - switch (op2) { - case OPC_MULEQ_S_PW_QHL: - check_dsp(ctx); - gen_helper_muleq_s_pw_qhl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULEQ_S_PW_QHR: - check_dsp(ctx); - gen_helper_muleq_s_pw_qhr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULEU_S_QH_OBL: - check_dsp(ctx); - gen_helper_muleu_s_qh_obl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULEU_S_QH_OBR: - check_dsp(ctx); - gen_helper_muleu_s_qh_obr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_MULQ_RS_QH: - check_dsp(ctx); - gen_helper_mulq_rs_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - } - break; -#endif - } - - tcg_temp_free_i32(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, v1_t); - tcg_temp_free(tcg_ctx, v2_t); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s", opn); - -} - -static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, - int ret, int val) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "mipsdsp Bit/ Manipulation"; - int16_t imm; - TCGv t0; - TCGv val_t; - - if (ret == 0) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - val_t = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, val_t, val); - - switch (op1) { - case OPC_ABSQ_S_PH_DSP: - switch (op2) { - case OPC_BITREV: - check_dsp(ctx); - gen_helper_bitrev(tcg_ctx, *cpu_gpr[ret], val_t); - break; - case OPC_REPL_QB: - check_dsp(ctx); - { - target_long result; - imm = (ctx->opcode >> 16) & 0xFF; - result = (uint32_t)imm << 24 | - (uint32_t)imm << 16 | - (uint32_t)imm << 8 | - (uint32_t)imm; - result = (int32_t)result; - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], result); - } - break; - case OPC_REPLV_QB: - check_dsp(ctx); - tcg_gen_ext8u_tl(tcg_ctx, *cpu_gpr[ret], val_t); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 8); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); - break; - case OPC_REPL_PH: - check_dsp(ctx); - { - imm = (ctx->opcode >> 16) & 0x03FF; - imm = (int16_t)(imm << 6) >> 6; - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], \ - (target_long)((int32_t)((uint32_t)imm << 16) | \ - (uint16_t)imm)); - } - break; - case OPC_REPLV_PH: - check_dsp(ctx); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_gpr[ret], val_t); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); - break; - } - break; -#ifdef TARGET_MIPS64 - case OPC_ABSQ_S_QH_DSP: - switch (op2) { - case OPC_REPL_OB: - check_dsp(ctx); - { - target_long temp; - - imm = (ctx->opcode >> 16) & 0xFF; - temp = ((uint64_t)imm << 8) | (uint64_t)imm; - temp = (temp << 16) | temp; - temp = (temp << 32) | temp; - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], temp); - break; - } - case OPC_REPL_PW: - check_dsp(ctx); - { - target_long temp; - - imm = (ctx->opcode >> 16) & 0x03FF; - imm = (int16_t)(imm << 6) >> 6; - temp = ((target_long)imm << 32) \ - | ((target_long)imm & 0xFFFFFFFF); - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], temp); - break; - } - case OPC_REPL_QH: - check_dsp(ctx); - { - target_long temp; - - imm = (ctx->opcode >> 16) & 0x03FF; - imm = (int16_t)(imm << 6) >> 6; - - temp = ((uint64_t)(uint16_t)imm << 48) | - ((uint64_t)(uint16_t)imm << 32) | - ((uint64_t)(uint16_t)imm << 16) | - (uint64_t)(uint16_t)imm; - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], temp); - break; - } - case OPC_REPLV_OB: - check_dsp(ctx); - tcg_gen_ext8u_tl(tcg_ctx, *cpu_gpr[ret], val_t); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 8); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 32); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - break; - case OPC_REPLV_PW: - check_dsp(ctx); - tcg_gen_ext32u_i64(tcg_ctx, *cpu_gpr[ret], val_t); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 32); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - break; - case OPC_REPLV_QH: - check_dsp(ctx); - tcg_gen_ext16u_tl(tcg_ctx, *cpu_gpr[ret], val_t); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 32); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); - break; - } - break; -#endif - } - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, val_t); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s", opn); -} - -static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, - uint32_t op1, uint32_t op2, - int ret, int v1, int v2, int check_ret) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "mipsdsp add compare pick"; - TCGv t1; - TCGv v1_t; - TCGv v2_t; - - if ((ret == 0) && (check_ret == 1)) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t1 = tcg_temp_new(tcg_ctx); - v1_t = tcg_temp_new(tcg_ctx); - v2_t = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, v1_t, v1); - gen_load_gpr(ctx, v2_t, v2); - - switch (op1) { - case OPC_CMPU_EQ_QB_DSP: - switch (op2) { - case OPC_CMPU_EQ_QB: - check_dsp(ctx); - gen_helper_cmpu_eq_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPU_LT_QB: - check_dsp(ctx); - gen_helper_cmpu_lt_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPU_LE_QB: - check_dsp(ctx); - gen_helper_cmpu_le_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPGU_EQ_QB: - check_dsp(ctx); - gen_helper_cmpgu_eq_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_CMPGU_LT_QB: - check_dsp(ctx); - gen_helper_cmpgu_lt_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_CMPGU_LE_QB: - check_dsp(ctx); - gen_helper_cmpgu_le_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_CMPGDU_EQ_QB: - check_dspr2(ctx); - gen_helper_cmpgu_eq_qb(tcg_ctx, t1, v1_t, v2_t); - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[ret], t1); - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); - tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); - tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, t1); - break; - case OPC_CMPGDU_LT_QB: - check_dspr2(ctx); - gen_helper_cmpgu_lt_qb(tcg_ctx, t1, v1_t, v2_t); - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[ret], t1); - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); - tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); - tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, t1); - break; - case OPC_CMPGDU_LE_QB: - check_dspr2(ctx); - gen_helper_cmpgu_le_qb(tcg_ctx, t1, v1_t, v2_t); - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[ret], t1); - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); - tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); - tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, t1); - break; - case OPC_CMP_EQ_PH: - check_dsp(ctx); - gen_helper_cmp_eq_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMP_LT_PH: - check_dsp(ctx); - gen_helper_cmp_lt_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMP_LE_PH: - check_dsp(ctx); - gen_helper_cmp_le_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PICK_QB: - check_dsp(ctx); - gen_helper_pick_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PICK_PH: - check_dsp(ctx); - gen_helper_pick_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PACKRL_PH: - check_dsp(ctx); - gen_helper_packrl_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - } - break; -#ifdef TARGET_MIPS64 - case OPC_CMPU_EQ_OB_DSP: - switch (op2) { - case OPC_CMP_EQ_PW: - check_dsp(ctx); - gen_helper_cmp_eq_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMP_LT_PW: - check_dsp(ctx); - gen_helper_cmp_lt_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMP_LE_PW: - check_dsp(ctx); - gen_helper_cmp_le_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMP_EQ_QH: - check_dsp(ctx); - gen_helper_cmp_eq_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMP_LT_QH: - check_dsp(ctx); - gen_helper_cmp_lt_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMP_LE_QH: - check_dsp(ctx); - gen_helper_cmp_le_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPGDU_EQ_OB: - check_dspr2(ctx); - gen_helper_cmpgdu_eq_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPGDU_LT_OB: - check_dspr2(ctx); - gen_helper_cmpgdu_lt_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPGDU_LE_OB: - check_dspr2(ctx); - gen_helper_cmpgdu_le_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPGU_EQ_OB: - check_dsp(ctx); - gen_helper_cmpgu_eq_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_CMPGU_LT_OB: - check_dsp(ctx); - gen_helper_cmpgu_lt_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_CMPGU_LE_OB: - check_dsp(ctx); - gen_helper_cmpgu_le_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_CMPU_EQ_OB: - check_dsp(ctx); - gen_helper_cmpu_eq_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPU_LT_OB: - check_dsp(ctx); - gen_helper_cmpu_lt_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_CMPU_LE_OB: - check_dsp(ctx); - gen_helper_cmpu_le_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PACKRL_PW: - check_dsp(ctx); - gen_helper_packrl_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); - break; - case OPC_PICK_OB: - check_dsp(ctx); - gen_helper_pick_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PICK_PW: - check_dsp(ctx); - gen_helper_pick_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - case OPC_PICK_QH: - check_dsp(ctx); - gen_helper_pick_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); - break; - } - break; -#endif - } - - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, v1_t); - tcg_temp_free(tcg_ctx, v2_t); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s", opn); -} - -static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, - uint32_t op1, int rt, int rs, int sa) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "mipsdsp append/dappend"; - TCGv t0; - - check_dspr2(ctx); - - if (rt == 0) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - - switch (op1) { - case OPC_APPEND_DSP: - switch (MASK_APPEND(ctx->opcode)) { - case OPC_APPEND: - if (sa != 0) { - tcg_gen_deposit_tl(tcg_ctx, *cpu_gpr[rt], t0, *cpu_gpr[rt], sa, 32 - sa); - } - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); - break; - case OPC_PREPEND: - if (sa != 0) { - tcg_gen_ext32u_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); - tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], sa); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 32 - sa); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); - } - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); - break; - case OPC_BALIGN: - sa &= 3; - if (sa != 0 && sa != 2) { - tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], 8 * sa); - tcg_gen_ext32u_tl(tcg_ctx, t0, t0); - tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (4 - sa)); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); - } - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); - break; - default: /* Invalid */ - MIPS_INVAL("MASK APPEND"); - generate_exception(ctx, EXCP_RI); - break; - } - break; -#ifdef TARGET_MIPS64 - case OPC_DAPPEND_DSP: - switch (MASK_DAPPEND(ctx->opcode)) { - case OPC_DAPPEND: - if (sa != 0) { - tcg_gen_deposit_tl(tcg_ctx, *cpu_gpr[rt], t0, *cpu_gpr[rt], sa, 64 - sa); - } - break; - case OPC_PREPENDD: - tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], 0x20 | sa); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - (0x20 | sa)); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], t0, t0); - break; - case OPC_PREPENDW: - if (sa != 0) { - tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], sa); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - sa); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); - } - break; - case OPC_DBALIGN: - sa &= 7; - if (sa != 0 && sa != 2 && sa != 4) { - tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], 8 * sa); - tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (8 - sa)); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); - } - break; - default: /* Invalid */ - MIPS_INVAL("MASK DAPPEND"); - generate_exception(ctx, EXCP_RI); - break; - } - break; -#endif - } - tcg_temp_free(tcg_ctx, t0); - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s", opn); -} - -static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, - int ret, int v1, int v2, int check_ret) - -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - const char *opn = "mipsdsp accumulator"; - TCGv t0; - TCGv t1; - TCGv v1_t; - TCGv v2_t; - int16_t imm; - - if ((ret == 0) && (check_ret == 1)) { - /* Treat as NOP. */ - MIPS_DEBUG("NOP"); - return; - } - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - v1_t = tcg_temp_new(tcg_ctx); - v2_t = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, v1_t, v1); - gen_load_gpr(ctx, v2_t, v2); - - switch (op1) { - case OPC_EXTR_W_DSP: - check_dsp(ctx); - switch (op2) { - case OPC_EXTR_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_extr_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_EXTR_R_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_extr_r_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_EXTR_RS_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_extr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_EXTR_S_H: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_extr_s_h(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_EXTRV_S_H: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_extr_s_h(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_EXTRV_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_extr_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_EXTRV_R_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_extr_r_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_EXTRV_RS_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_extr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_EXTP: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_extp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_EXTPV: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_extp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_EXTPDP: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_extpdp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_EXTPDPV: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_extpdp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_SHILO: - imm = (ctx->opcode >> 20) & 0x3F; - tcg_gen_movi_tl(tcg_ctx, t0, ret); - tcg_gen_movi_tl(tcg_ctx, t1, imm); - gen_helper_shilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); - break; - case OPC_SHILOV: - tcg_gen_movi_tl(tcg_ctx, t0, ret); - gen_helper_shilo(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_MTHLIP: - tcg_gen_movi_tl(tcg_ctx, t0, ret); - gen_helper_mthlip(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_WRDSP: - imm = (ctx->opcode >> 11) & 0x3FF; - tcg_gen_movi_tl(tcg_ctx, t0, imm); - gen_helper_wrdsp(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); - break; - case OPC_RDDSP: - imm = (ctx->opcode >> 16) & 0x03FF; - tcg_gen_movi_tl(tcg_ctx, t0, imm); - gen_helper_rddsp(tcg_ctx, *cpu_gpr[ret], t0, tcg_ctx->cpu_env); - break; - } - break; -#ifdef TARGET_MIPS64 - case OPC_DEXTR_W_DSP: - check_dsp(ctx); - switch (op2) { - case OPC_DMTHLIP: - tcg_gen_movi_tl(tcg_ctx, t0, ret); - gen_helper_dmthlip(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); - break; - case OPC_DSHILO: - { - int shift = (ctx->opcode >> 19) & 0x7F; - int ac = (ctx->opcode >> 11) & 0x03; - tcg_gen_movi_tl(tcg_ctx, t0, shift); - tcg_gen_movi_tl(tcg_ctx, t1, ac); - gen_helper_dshilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); - break; - } - case OPC_DSHILOV: - { - int ac = (ctx->opcode >> 11) & 0x03; - tcg_gen_movi_tl(tcg_ctx, t0, ac); - gen_helper_dshilo(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); - break; - } - case OPC_DEXTP: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - - gen_helper_dextp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTPV: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_DEXTPDP: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextpdp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTPDPV: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextpdp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_DEXTR_L: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_l(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTR_R_L: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_r_l(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTR_RS_L: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_rs_l(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTR_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTR_R_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_r_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTR_RS_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTR_S_H: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_s_h(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTRV_S_H: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - tcg_gen_movi_tl(tcg_ctx, t1, v1); - gen_helper_dextr_s_h(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); - break; - case OPC_DEXTRV_L: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextr_l(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_DEXTRV_R_L: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextr_r_l(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_DEXTRV_RS_L: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextr_rs_l(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_DEXTRV_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextr_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_DEXTRV_R_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextr_r_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - case OPC_DEXTRV_RS_W: - tcg_gen_movi_tl(tcg_ctx, t0, v2); - gen_helper_dextr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); - break; - } - break; -#endif - } - - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, v1_t); - tcg_temp_free(tcg_ctx, v2_t); - - (void)opn; /* avoid a compiler warning */ - MIPS_DEBUG("%s", opn); -} - -/* End MIPSDSP functions. */ - -/* Compact Branches */ -static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, - int rs, int rt, int32_t offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int bcond_compute = 0; - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - if (ctx->hflags & MIPS_HFLAG_BMASK) { -#ifdef MIPS_DEBUG_DISAS - LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx - "\n", ctx->pc); -#endif - generate_exception(ctx, EXCP_RI); - goto out; - } - - /* Load needed operands and calculate btarget */ - switch (opc) { - /* compact branch */ - case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ - case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - bcond_compute = 1; - ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); - if (rs <= rt && rs == 0) { - /* OPC_BEQZALC, OPC_BNEZALC */ - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); - } - break; - case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ - case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - bcond_compute = 1; - ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); - break; - case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ - case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ - if (rs == 0 || rs == rt) { - /* OPC_BLEZALC, OPC_BGEZALC */ - /* OPC_BGTZALC, OPC_BLTZALC */ - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); - } - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - bcond_compute = 1; - ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); - break; - case OPC_BC: - case OPC_BALC: - ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); - break; - case OPC_BEQZC: - case OPC_BNEZC: - if (rs != 0) { - /* OPC_BEQZC, OPC_BNEZC */ - gen_load_gpr(ctx, t0, rs); - bcond_compute = 1; - ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); - } else { - /* OPC_JIC, OPC_JIALC */ - TCGv tbase = tcg_temp_new(tcg_ctx); - TCGv toffset = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, tbase, rt); - tcg_gen_movi_tl(tcg_ctx, toffset, offset); - gen_op_addr_add(ctx, *(TCGv *)tcg_ctx->btarget, tbase, toffset); - tcg_temp_free(tcg_ctx, tbase); - tcg_temp_free(tcg_ctx, toffset); - } - break; - default: - MIPS_INVAL("Compact branch/jump"); - generate_exception(ctx, EXCP_RI); - goto out; - } - - if (bcond_compute == 0) { - /* Uncoditional compact branch */ - switch (opc) { - case OPC_JIALC: - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); - /* Fallthrough */ - case OPC_JIC: - ctx->hflags |= MIPS_HFLAG_BR; - break; - case OPC_BALC: - tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); - /* Fallthrough */ - case OPC_BC: - ctx->hflags |= MIPS_HFLAG_B; - break; - default: - MIPS_INVAL("Compact branch/jump"); - generate_exception(ctx, EXCP_RI); - goto out; - } - - /* Generating branch here as compact branches don't have delay slot */ - gen_branch(ctx, 4); - } else { - /* Conditional compact branch */ - int fs = gen_new_label(tcg_ctx); - save_cpu_state(ctx, 0); - - switch (opc) { - case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ - if (rs == 0 && rt != 0) { - /* OPC_BLEZALC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); - } else if (rs != 0 && rt != 0 && rs == rt) { - /* OPC_BGEZALC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); - } else { - /* OPC_BGEUC */ - tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GEU), t0, t1, fs); - } - break; - case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ - if (rs == 0 && rt != 0) { - /* OPC_BGTZALC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); - } else if (rs != 0 && rt != 0 && rs == rt) { - /* OPC_BLTZALC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); - } else { - /* OPC_BLTUC */ - tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LTU), t0, t1, fs); - } - break; - case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ - if (rs == 0 && rt != 0) { - /* OPC_BLEZC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); - } else if (rs != 0 && rt != 0 && rs == rt) { - /* OPC_BGEZC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); - } else { - /* OPC_BGEC */ - tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t0, t1, fs); - } - break; - case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ - if (rs == 0 && rt != 0) { - /* OPC_BGTZC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); - } else if (rs != 0 && rt != 0 && rs == rt) { - /* OPC_BLTZC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); - } else { - /* OPC_BLTC */ - tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t0, t1, fs); - } - break; - case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ - case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ - if (rs >= rt) { - /* OPC_BOVC, OPC_BNVC */ - TCGv t2 = tcg_temp_new(tcg_ctx); - TCGv t3 = tcg_temp_new(tcg_ctx); - TCGv t4 = tcg_temp_new(tcg_ctx); - TCGv input_overflow = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - tcg_gen_ext32s_tl(tcg_ctx, t2, t0); - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, input_overflow, t2, t0); - tcg_gen_ext32s_tl(tcg_ctx, t3, t1); - tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, t4, t3, t1); - tcg_gen_or_tl(tcg_ctx, input_overflow, input_overflow, t4); - - tcg_gen_add_tl(tcg_ctx, t4, t2, t3); - tcg_gen_ext32s_tl(tcg_ctx, t4, t4); - tcg_gen_xor_tl(tcg_ctx, t2, t2, t3); - tcg_gen_xor_tl(tcg_ctx, t3, t4, t3); - tcg_gen_andc_tl(tcg_ctx, t2, t3, t2); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, t4, t2, 0); - tcg_gen_or_tl(tcg_ctx, t4, t4, input_overflow); - if (opc == OPC_BOVC) { - /* OPC_BOVC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t4, 0, fs); - } else { - /* OPC_BNVC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t4, 0, fs); - } - tcg_temp_free(tcg_ctx, input_overflow); - tcg_temp_free(tcg_ctx, t4); - tcg_temp_free(tcg_ctx, t3); - tcg_temp_free(tcg_ctx, t2); - } else if (rs < rt && rs == 0) { - /* OPC_BEQZALC, OPC_BNEZALC */ - if (opc == OPC_BEQZALC) { - /* OPC_BEQZALC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t1, 0, fs); - } else { - /* OPC_BNEZALC */ - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t1, 0, fs); - } - } else { - /* OPC_BEQC, OPC_BNEC */ - if (opc == OPC_BEQC) { - /* OPC_BEQC */ - tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, t1, fs); - } else { - /* OPC_BNEC */ - tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, t1, fs); - } - } - break; - case OPC_BEQZC: - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, 0, fs); - break; - case OPC_BNEZC: - tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, 0, fs); - break; - default: - MIPS_INVAL("Compact conditional branch/jump"); - generate_exception(ctx, EXCP_RI); - goto out; - } - - /* Generating branch here as compact branches don't have delay slot */ - gen_goto_tb(ctx, 1, ctx->btarget); - gen_set_label(tcg_ctx, fs); - - ctx->hflags |= MIPS_HFLAG_FBNSLOT; - MIPS_DEBUG("Compact conditional branch"); - } - -out: - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); -} - -static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int rs, rt, rd, sa; - uint32_t op1, op2; - - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 6) & 0x1f; - - op1 = MASK_SPECIAL(ctx->opcode); - switch (op1) { - case OPC_LSA: - if (rd != 0) { - int imm2 = extract32(ctx->opcode, 6, 3); - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - tcg_gen_shli_tl(tcg_ctx, t0, t0, imm2 + 1); - tcg_gen_add_tl(tcg_ctx, t0, t0, t1); - tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, t0); - } - break; - case OPC_MULT: case OPC_MULTU: case OPC_DIV: case OPC_DIVU: - op2 = MASK_R6_MULDIV(ctx->opcode); - switch (op2) { - case R6_OPC_MUL: - case R6_OPC_MUH: - case R6_OPC_MULU: - case R6_OPC_MUHU: - case R6_OPC_DIV: - case R6_OPC_MOD: - case R6_OPC_DIVU: - case R6_OPC_MODU: - gen_r6_muldiv(ctx, op2, rd, rs, rt); - break; - default: - MIPS_INVAL("special_r6 muldiv"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_SELEQZ: - case OPC_SELNEZ: - gen_cond_move(ctx, op1, rd, rs, rt); - break; - case R6_OPC_CLO: - case R6_OPC_CLZ: - if (rt == 0 && sa == 1) { - /* Major opcode and function field is shared with preR6 MFHI/MTHI. - We need additionally to check other fields */ - gen_cl(ctx, op1, rd, rs); - } else { - generate_exception(ctx, EXCP_RI); - } - break; - case R6_OPC_SDBBP: - if (ctx->hflags & MIPS_HFLAG_SBRI) { - generate_exception(ctx, EXCP_RI); - } else { - generate_exception(ctx, EXCP_DBp); - } - break; -#if defined(TARGET_MIPS64) - case OPC_DLSA: - check_mips_64(ctx); - if (rd != 0) { - int imm2 = extract32(ctx->opcode, 6, 3); - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - gen_load_gpr(ctx, t1, rt); - tcg_gen_shli_tl(tcg_ctx, t0, t0, imm2 + 1); - tcg_gen_add_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); - tcg_temp_free(tcg_ctx, t1); - tcg_temp_free(tcg_ctx, t0); - } - break; - case R6_OPC_DCLO: - case R6_OPC_DCLZ: - if (rt == 0 && sa == 1) { - /* Major opcode and function field is shared with preR6 MFHI/MTHI. - We need additionally to check other fields */ - check_mips_64(ctx); - gen_cl(ctx, op1, rd, rs); - } else { - generate_exception(ctx, EXCP_RI); - } - break; - case OPC_DMULT: case OPC_DMULTU: case OPC_DDIV: case OPC_DDIVU: - op2 = MASK_R6_MULDIV(ctx->opcode); - switch (op2) { - case R6_OPC_DMUL: - case R6_OPC_DMUH: - case R6_OPC_DMULU: - case R6_OPC_DMUHU: - case R6_OPC_DDIV: - case R6_OPC_DMOD: - case R6_OPC_DDIVU: - case R6_OPC_DMODU: - check_mips_64(ctx); - gen_r6_muldiv(ctx, op2, rd, rs, rt); - break; - default: - MIPS_INVAL("special_r6 muldiv"); - generate_exception(ctx, EXCP_RI); - break; - } - break; -#endif - default: /* Invalid */ - MIPS_INVAL("special_r6"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) -{ - int rs, rt, rd, sa; - uint32_t op1; - - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 6) & 0x1f; - - op1 = MASK_SPECIAL(ctx->opcode); - switch (op1) { - case OPC_MOVN: /* Conditional move */ - case OPC_MOVZ: - check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 | - INSN_LOONGSON2E | INSN_LOONGSON2F); - gen_cond_move(ctx, op1, rd, rs, rt); - break; - case OPC_MFHI: /* Move from HI/LO */ - case OPC_MFLO: - gen_HILO(ctx, op1, rs & 3, rd); - break; - case OPC_MTHI: - case OPC_MTLO: /* Move to HI/LO */ - gen_HILO(ctx, op1, rd & 3, rs); - break; - case OPC_MOVCI: - check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); - if (env->CP0_Config1 & (1 << CP0C1_FP)) { - check_cp1_enabled(ctx); - gen_movci(ctx, rd, rs, (ctx->opcode >> 18) & 0x7, - (ctx->opcode >> 16) & 1); - } else { - generate_exception_err(ctx, EXCP_CpU, 1); - } - break; - case OPC_MULT: - case OPC_MULTU: - if (sa) { - check_insn(ctx, INSN_VR54XX); - op1 = MASK_MUL_VR54XX(ctx->opcode); - gen_mul_vr54xx(ctx, op1, rd, rs, rt); - } else { - gen_muldiv(ctx, op1, rd & 3, rs, rt); - } - break; - case OPC_DIV: - case OPC_DIVU: - gen_muldiv(ctx, op1, 0, rs, rt); - break; -#if defined(TARGET_MIPS64) - case OPC_DMULT: case OPC_DMULTU: case OPC_DDIV: case OPC_DDIVU: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_muldiv(ctx, op1, 0, rs, rt); - break; -#endif - case OPC_JR: - gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); - break; - case OPC_SPIM: -#ifdef MIPS_STRICT_STANDARD - MIPS_INVAL("SPIM"); - generate_exception(ctx, EXCP_RI); -#else - /* Implemented as RI exception for now. */ - MIPS_INVAL("spim (unofficial)"); - generate_exception(ctx, EXCP_RI); -#endif - break; - default: /* Invalid */ - MIPS_INVAL("special_legacy"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - int rs, rt, rd, sa; - uint32_t op1; - - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 6) & 0x1f; - - op1 = MASK_SPECIAL(ctx->opcode); - switch (op1) { - case OPC_SLL: /* Shift with immediate */ - if (sa == 5 && rd == 0 && - rs == 0 && rt == 0) { /* PAUSE */ - if ((ctx->insn_flags & ISA_MIPS32R6) && - (ctx->hflags & MIPS_HFLAG_BMASK)) { - MIPS_DEBUG("CTI in delay / forbidden slot"); - generate_exception(ctx, EXCP_RI); - break; - } - } - /* Fallthrough */ - case OPC_SRA: - gen_shift_imm(ctx, op1, rd, rt, sa); - break; - case OPC_SRL: - switch ((ctx->opcode >> 21) & 0x1f) { - case 1: - /* rotr is decoded as srl on non-R2 CPUs */ - if (ctx->insn_flags & ISA_MIPS32R2) { - op1 = OPC_ROTR; - } - /* Fallthrough */ - case 0: - gen_shift_imm(ctx, op1, rd, rt, sa); - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_ADD: case OPC_ADDU: case OPC_SUB: case OPC_SUBU: - gen_arith(ctx, op1, rd, rs, rt); - break; - case OPC_SLLV: /* Shifts */ - case OPC_SRAV: - gen_shift(ctx, op1, rd, rs, rt); - break; - case OPC_SRLV: - switch ((ctx->opcode >> 6) & 0x1f) { - case 1: - /* rotrv is decoded as srlv on non-R2 CPUs */ - if (ctx->insn_flags & ISA_MIPS32R2) { - op1 = OPC_ROTRV; - } - /* Fallthrough */ - case 0: - gen_shift(ctx, op1, rd, rs, rt); - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_SLT: /* Set on less than */ - case OPC_SLTU: - gen_slt(ctx, op1, rd, rs, rt); - break; - case OPC_AND: /* Logic*/ - case OPC_OR: - case OPC_NOR: - case OPC_XOR: - gen_logic(ctx, op1, rd, rs, rt); - break; - case OPC_JALR: - gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); - break; - case OPC_TGE: case OPC_TGEU: case OPC_TLT: case OPC_TLTU: case OPC_TEQ: - case OPC_TNE: - gen_trap(ctx, op1, rs, rt, -1); - break; - case OPC_LSA: /* OPC_PMON */ - if ((ctx->insn_flags & ISA_MIPS32R6) || - (env->CP0_Config3 & (1 << CP0C3_MSAP))) { - decode_opc_special_r6(env, ctx); - } else { - /* Pmon entry point, also R4010 selsl */ -#ifdef MIPS_STRICT_STANDARD - MIPS_INVAL("PMON / selsl"); - generate_exception(ctx, EXCP_RI); -#else - gen_helper_0e0i(tcg_ctx, pmon, sa); -#endif - } - break; - case OPC_SYSCALL: - generate_exception(ctx, EXCP_SYSCALL); - ctx->bstate = BS_STOP; - break; - case OPC_BREAK: - generate_exception(ctx, EXCP_BREAK); - break; - case OPC_SYNC: - /* Treat as NOP. */ - break; - -#if defined(TARGET_MIPS64) - /* MIPS64 specific opcodes */ - case OPC_DSLL: - case OPC_DSRA: - case OPC_DSLL32: - case OPC_DSRA32: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_shift_imm(ctx, op1, rd, rt, sa); - break; - case OPC_DSRL: - switch ((ctx->opcode >> 21) & 0x1f) { - case 1: - /* drotr is decoded as dsrl on non-R2 CPUs */ - if (ctx->insn_flags & ISA_MIPS32R2) { - op1 = OPC_DROTR; - } - /* Fallthrough */ - case 0: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_shift_imm(ctx, op1, rd, rt, sa); - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_DSRL32: - switch ((ctx->opcode >> 21) & 0x1f) { - case 1: - /* drotr32 is decoded as dsrl32 on non-R2 CPUs */ - if (ctx->insn_flags & ISA_MIPS32R2) { - op1 = OPC_DROTR32; - } - /* Fallthrough */ - case 0: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_shift_imm(ctx, op1, rd, rt, sa); - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_DADD: case OPC_DADDU: case OPC_DSUB: case OPC_DSUBU: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_arith(ctx, op1, rd, rs, rt); - break; - case OPC_DSLLV: - case OPC_DSRAV: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_shift(ctx, op1, rd, rs, rt); - break; - case OPC_DSRLV: - switch ((ctx->opcode >> 6) & 0x1f) { - case 1: - /* drotrv is decoded as dsrlv on non-R2 CPUs */ - if (ctx->insn_flags & ISA_MIPS32R2) { - op1 = OPC_DROTRV; - } - /* Fallthrough */ - case 0: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_shift(ctx, op1, rd, rs, rt); - break; - default: - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_DLSA: - if ((ctx->insn_flags & ISA_MIPS32R6) || - (env->CP0_Config3 & (1 << CP0C3_MSAP))) { - decode_opc_special_r6(env, ctx); - } - break; -#endif - default: - if (ctx->insn_flags & ISA_MIPS32R6) { - decode_opc_special_r6(env, ctx); - } else { - decode_opc_special_legacy(env, ctx); - } - } -} - -static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) -{ - int rs, rt, rd; - uint32_t op1; - - check_insn_opc_removed(ctx, ISA_MIPS32R6); - - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - - op1 = MASK_SPECIAL2(ctx->opcode); - switch (op1) { - case OPC_MADD: case OPC_MADDU: - case OPC_MSUB: case OPC_MSUBU: - check_insn(ctx, ISA_MIPS32); - gen_muldiv(ctx, op1, rd & 3, rs, rt); - break; - case OPC_MUL: - gen_arith(ctx, op1, rd, rs, rt); - break; - case OPC_DIV_G_2F: - case OPC_DIVU_G_2F: - case OPC_MULT_G_2F: - case OPC_MULTU_G_2F: - case OPC_MOD_G_2F: - case OPC_MODU_G_2F: - check_insn(ctx, INSN_LOONGSON2F); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; - case OPC_CLO: - case OPC_CLZ: - check_insn(ctx, ISA_MIPS32); - gen_cl(ctx, op1, rd, rs); - break; - case OPC_SDBBP: - /* XXX: not clear which exception should be raised - * when in debug mode... - */ - check_insn(ctx, ISA_MIPS32); - if (!(ctx->hflags & MIPS_HFLAG_DM)) { - generate_exception(ctx, EXCP_DBp); - } else { - generate_exception(ctx, EXCP_DBp); - } - /* Treat as NOP. */ - break; -#if defined(TARGET_MIPS64) - case OPC_DCLO: - case OPC_DCLZ: - check_insn(ctx, ISA_MIPS64); - check_mips_64(ctx); - gen_cl(ctx, op1, rd, rs); - break; - case OPC_DMULT_G_2F: - case OPC_DMULTU_G_2F: - case OPC_DDIV_G_2F: - case OPC_DDIVU_G_2F: - case OPC_DMOD_G_2F: - case OPC_DMODU_G_2F: - check_insn(ctx, INSN_LOONGSON2F); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; -#endif - default: /* Invalid */ - MIPS_INVAL("special2_legacy"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int rs, rt, rd, sa; - uint32_t op1, op2; - int16_t imm; - - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 6) & 0x1f; - imm = (int16_t)ctx->opcode >> 7; - - op1 = MASK_SPECIAL3(ctx->opcode); - switch (op1) { - case R6_OPC_PREF: - if (rt >= 24) { - /* hint codes 24-31 are reserved and signal RI */ - generate_exception(ctx, EXCP_RI); - } - /* Treat as NOP. */ - break; - case R6_OPC_CACHE: - /* Treat as NOP. */ - break; - case R6_OPC_SC: - gen_st_cond(ctx, op1, rt, rs, imm); - break; - case R6_OPC_LL: - gen_ld(ctx, op1, rt, rs, imm); - break; - case OPC_BSHFL: - { - TCGv t0; - if (rd == 0) { - /* Treat as NOP. */ - break; - } - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rt); - - op2 = MASK_BSHFL(ctx->opcode); - switch (op2) { - case OPC_ALIGN: case OPC_ALIGN_END: - sa &= 3; - if (sa == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); - } else { - TCGv t1 = tcg_temp_new(tcg_ctx); - TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); - gen_load_gpr(ctx, t1, rs); - tcg_gen_concat_tl_i64(tcg_ctx, t2, t1, t0); - tcg_gen_shri_i64(tcg_ctx, t2, t2, 8 * (4 - sa)); -#if defined(TARGET_MIPS64) - tcg_gen_ext32s_i64(tcg_ctx, *cpu_gpr[rd], t2); -#else - tcg_gen_trunc_i64_i32(tcg_ctx, *cpu_gpr[rd], t2); -#endif - tcg_temp_free_i64(tcg_ctx, t2); - tcg_temp_free(tcg_ctx, t1); - } - break; - case OPC_BITSWAP: - gen_helper_bitswap(tcg_ctx, *cpu_gpr[rd], t0); - break; - } - tcg_temp_free(tcg_ctx, t0); - } - break; -#if defined(TARGET_MIPS64) - case R6_OPC_SCD: - gen_st_cond(ctx, op1, rt, rs, imm); - break; - case R6_OPC_LLD: - gen_ld(ctx, op1, rt, rs, imm); - break; - case OPC_DBSHFL: - check_mips_64(ctx); - { - TCGv t0; - if (rd == 0) { - /* Treat as NOP. */ - break; - } - t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rt); - - op2 = MASK_DBSHFL(ctx->opcode); - switch (op2) { - case OPC_DALIGN: case OPC_DALIGN_END: - sa &= 7; - if (sa == 0) { - tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); - } else { - TCGv t1 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t1, rs); - tcg_gen_shli_tl(tcg_ctx, t0, t0, 8 * sa); - tcg_gen_shri_tl(tcg_ctx, t1, t1, 8 * (8 - sa)); - tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); - tcg_temp_free(tcg_ctx, t1); - } - break; - case OPC_DBITSWAP: - gen_helper_dbitswap(tcg_ctx, *cpu_gpr[rd], t0); - break; - } - tcg_temp_free(tcg_ctx, t0); - } - break; -#endif - default: /* Invalid */ - MIPS_INVAL("special3_r6"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - int rs, rt, rd; - uint32_t op1, op2; - - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - - op1 = MASK_SPECIAL3(ctx->opcode); - switch (op1) { - case OPC_DIV_G_2E: case OPC_DIVU_G_2E: - case OPC_MOD_G_2E: case OPC_MODU_G_2E: - case OPC_MULT_G_2E: case OPC_MULTU_G_2E: - /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have - * the same mask and op1. */ - if ((ctx->insn_flags & ASE_DSPR2) && (op1 == OPC_MULT_G_2E)) { - op2 = MASK_ADDUH_QB(ctx->opcode); - switch (op2) { - case OPC_ADDUH_QB: - case OPC_ADDUH_R_QB: - case OPC_ADDQH_PH: - case OPC_ADDQH_R_PH: - case OPC_ADDQH_W: - case OPC_ADDQH_R_W: - case OPC_SUBUH_QB: - case OPC_SUBUH_R_QB: - case OPC_SUBQH_PH: - case OPC_SUBQH_R_PH: - case OPC_SUBQH_W: - case OPC_SUBQH_R_W: - gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); - break; - case OPC_MUL_PH: - case OPC_MUL_S_PH: - case OPC_MULQ_S_W: - case OPC_MULQ_RS_W: - gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); - break; - default: - MIPS_INVAL("MASK ADDUH.QB"); - generate_exception(ctx, EXCP_RI); - break; - } - } else if (ctx->insn_flags & INSN_LOONGSON2E) { - gen_loongson_integer(ctx, op1, rd, rs, rt); - } else { - generate_exception(ctx, EXCP_RI); - } - break; - case OPC_LX_DSP: - op2 = MASK_LX(ctx->opcode); - switch (op2) { -#if defined(TARGET_MIPS64) - case OPC_LDX: -#endif - case OPC_LBUX: - case OPC_LHX: - case OPC_LWX: - gen_mipsdsp_ld(ctx, op2, rd, rs, rt); - break; - default: /* Invalid */ - MIPS_INVAL("MASK LX"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_ABSQ_S_PH_DSP: - op2 = MASK_ABSQ_S_PH(ctx->opcode); - switch (op2) { - case OPC_ABSQ_S_QB: - case OPC_ABSQ_S_PH: - case OPC_ABSQ_S_W: - case OPC_PRECEQ_W_PHL: - case OPC_PRECEQ_W_PHR: - case OPC_PRECEQU_PH_QBL: - case OPC_PRECEQU_PH_QBR: - case OPC_PRECEQU_PH_QBLA: - case OPC_PRECEQU_PH_QBRA: - case OPC_PRECEU_PH_QBL: - case OPC_PRECEU_PH_QBR: - case OPC_PRECEU_PH_QBLA: - case OPC_PRECEU_PH_QBRA: - gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); - break; - case OPC_BITREV: - case OPC_REPL_QB: - case OPC_REPLV_QB: - case OPC_REPL_PH: - case OPC_REPLV_PH: - gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); - break; - default: - MIPS_INVAL("MASK ABSQ_S.PH"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_ADDU_QB_DSP: - op2 = MASK_ADDU_QB(ctx->opcode); - switch (op2) { - case OPC_ADDQ_PH: - case OPC_ADDQ_S_PH: - case OPC_ADDQ_S_W: - case OPC_ADDU_QB: - case OPC_ADDU_S_QB: - case OPC_ADDU_PH: - case OPC_ADDU_S_PH: - case OPC_SUBQ_PH: - case OPC_SUBQ_S_PH: - case OPC_SUBQ_S_W: - case OPC_SUBU_QB: - case OPC_SUBU_S_QB: - case OPC_SUBU_PH: - case OPC_SUBU_S_PH: - case OPC_ADDSC: - case OPC_ADDWC: - case OPC_MODSUB: - case OPC_RADDU_W_QB: - gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); - break; - case OPC_MULEU_S_PH_QBL: - case OPC_MULEU_S_PH_QBR: - case OPC_MULQ_RS_PH: - case OPC_MULEQ_S_W_PHL: - case OPC_MULEQ_S_W_PHR: - case OPC_MULQ_S_PH: - gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); - break; - default: /* Invalid */ - MIPS_INVAL("MASK ADDU.QB"); - generate_exception(ctx, EXCP_RI); - break; - - } - break; - case OPC_CMPU_EQ_QB_DSP: - op2 = MASK_CMPU_EQ_QB(ctx->opcode); - switch (op2) { - case OPC_PRECR_SRA_PH_W: - case OPC_PRECR_SRA_R_PH_W: - gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); - break; - case OPC_PRECR_QB_PH: - case OPC_PRECRQ_QB_PH: - case OPC_PRECRQ_PH_W: - case OPC_PRECRQ_RS_PH_W: - case OPC_PRECRQU_S_QB_PH: - gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); - break; - case OPC_CMPU_EQ_QB: - case OPC_CMPU_LT_QB: - case OPC_CMPU_LE_QB: - case OPC_CMP_EQ_PH: - case OPC_CMP_LT_PH: - case OPC_CMP_LE_PH: - gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); - break; - case OPC_CMPGU_EQ_QB: - case OPC_CMPGU_LT_QB: - case OPC_CMPGU_LE_QB: - case OPC_CMPGDU_EQ_QB: - case OPC_CMPGDU_LT_QB: - case OPC_CMPGDU_LE_QB: - case OPC_PICK_QB: - case OPC_PICK_PH: - case OPC_PACKRL_PH: - gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); - break; - default: /* Invalid */ - MIPS_INVAL("MASK CMPU.EQ.QB"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_SHLL_QB_DSP: - gen_mipsdsp_shift(ctx, op1, rd, rs, rt); - break; - case OPC_DPA_W_PH_DSP: - op2 = MASK_DPA_W_PH(ctx->opcode); - switch (op2) { - case OPC_DPAU_H_QBL: - case OPC_DPAU_H_QBR: - case OPC_DPSU_H_QBL: - case OPC_DPSU_H_QBR: - case OPC_DPA_W_PH: - case OPC_DPAX_W_PH: - case OPC_DPAQ_S_W_PH: - case OPC_DPAQX_S_W_PH: - case OPC_DPAQX_SA_W_PH: - case OPC_DPS_W_PH: - case OPC_DPSX_W_PH: - case OPC_DPSQ_S_W_PH: - case OPC_DPSQX_S_W_PH: - case OPC_DPSQX_SA_W_PH: - case OPC_MULSAQ_S_W_PH: - case OPC_DPAQ_SA_L_W: - case OPC_DPSQ_SA_L_W: - case OPC_MAQ_S_W_PHL: - case OPC_MAQ_S_W_PHR: - case OPC_MAQ_SA_W_PHL: - case OPC_MAQ_SA_W_PHR: - case OPC_MULSA_W_PH: - gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); - break; - default: /* Invalid */ - MIPS_INVAL("MASK DPAW.PH"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_INSV_DSP: - op2 = MASK_INSV(ctx->opcode); - switch (op2) { - case OPC_INSV: - check_dsp(ctx); - { - TCGv t0, t1; - - if (rt == 0) { - MIPS_DEBUG("NOP"); - break; - } - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rt); - gen_load_gpr(ctx, t1, rs); - - gen_helper_insv(tcg_ctx, *cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); - - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - break; - } - default: /* Invalid */ - MIPS_INVAL("MASK INSV"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_APPEND_DSP: - gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); - break; - case OPC_EXTR_W_DSP: - op2 = MASK_EXTR_W(ctx->opcode); - switch (op2) { - case OPC_EXTR_W: - case OPC_EXTR_R_W: - case OPC_EXTR_RS_W: - case OPC_EXTR_S_H: - case OPC_EXTRV_S_H: - case OPC_EXTRV_W: - case OPC_EXTRV_R_W: - case OPC_EXTRV_RS_W: - case OPC_EXTP: - case OPC_EXTPV: - case OPC_EXTPDP: - case OPC_EXTPDPV: - gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); - break; - case OPC_RDDSP: - gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 1); - break; - case OPC_SHILO: - case OPC_SHILOV: - case OPC_MTHLIP: - case OPC_WRDSP: - gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); - break; - default: /* Invalid */ - MIPS_INVAL("MASK EXTR.W"); - generate_exception(ctx, EXCP_RI); - break; - } - break; -#if defined(TARGET_MIPS64) - case OPC_DDIV_G_2E: case OPC_DDIVU_G_2E: - case OPC_DMULT_G_2E: case OPC_DMULTU_G_2E: - case OPC_DMOD_G_2E: case OPC_DMODU_G_2E: - check_insn(ctx, INSN_LOONGSON2E); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; - case OPC_ABSQ_S_QH_DSP: - op2 = MASK_ABSQ_S_QH(ctx->opcode); - switch (op2) { - case OPC_PRECEQ_L_PWL: - case OPC_PRECEQ_L_PWR: - case OPC_PRECEQ_PW_QHL: - case OPC_PRECEQ_PW_QHR: - case OPC_PRECEQ_PW_QHLA: - case OPC_PRECEQ_PW_QHRA: - case OPC_PRECEQU_QH_OBL: - case OPC_PRECEQU_QH_OBR: - case OPC_PRECEQU_QH_OBLA: - case OPC_PRECEQU_QH_OBRA: - case OPC_PRECEU_QH_OBL: - case OPC_PRECEU_QH_OBR: - case OPC_PRECEU_QH_OBLA: - case OPC_PRECEU_QH_OBRA: - case OPC_ABSQ_S_OB: - case OPC_ABSQ_S_PW: - case OPC_ABSQ_S_QH: - gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); - break; - case OPC_REPL_OB: - case OPC_REPL_PW: - case OPC_REPL_QH: - case OPC_REPLV_OB: - case OPC_REPLV_PW: - case OPC_REPLV_QH: - gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); - break; - default: /* Invalid */ - MIPS_INVAL("MASK ABSQ_S.QH"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_ADDU_OB_DSP: - op2 = MASK_ADDU_OB(ctx->opcode); - switch (op2) { - case OPC_RADDU_L_OB: - case OPC_SUBQ_PW: - case OPC_SUBQ_S_PW: - case OPC_SUBQ_QH: - case OPC_SUBQ_S_QH: - case OPC_SUBU_OB: - case OPC_SUBU_S_OB: - case OPC_SUBU_QH: - case OPC_SUBU_S_QH: - case OPC_SUBUH_OB: - case OPC_SUBUH_R_OB: - case OPC_ADDQ_PW: - case OPC_ADDQ_S_PW: - case OPC_ADDQ_QH: - case OPC_ADDQ_S_QH: - case OPC_ADDU_OB: - case OPC_ADDU_S_OB: - case OPC_ADDU_QH: - case OPC_ADDU_S_QH: - case OPC_ADDUH_OB: - case OPC_ADDUH_R_OB: - gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); - break; - case OPC_MULEQ_S_PW_QHL: - case OPC_MULEQ_S_PW_QHR: - case OPC_MULEU_S_QH_OBL: - case OPC_MULEU_S_QH_OBR: - case OPC_MULQ_RS_QH: - gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); - break; - default: /* Invalid */ - MIPS_INVAL("MASK ADDU.OB"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_CMPU_EQ_OB_DSP: - op2 = MASK_CMPU_EQ_OB(ctx->opcode); - switch (op2) { - case OPC_PRECR_SRA_QH_PW: - case OPC_PRECR_SRA_R_QH_PW: - /* Return value is rt. */ - gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); - break; - case OPC_PRECR_OB_QH: - case OPC_PRECRQ_OB_QH: - case OPC_PRECRQ_PW_L: - case OPC_PRECRQ_QH_PW: - case OPC_PRECRQ_RS_QH_PW: - case OPC_PRECRQU_S_OB_QH: - gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); - break; - case OPC_CMPU_EQ_OB: - case OPC_CMPU_LT_OB: - case OPC_CMPU_LE_OB: - case OPC_CMP_EQ_QH: - case OPC_CMP_LT_QH: - case OPC_CMP_LE_QH: - case OPC_CMP_EQ_PW: - case OPC_CMP_LT_PW: - case OPC_CMP_LE_PW: - gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); - break; - case OPC_CMPGDU_EQ_OB: - case OPC_CMPGDU_LT_OB: - case OPC_CMPGDU_LE_OB: - case OPC_CMPGU_EQ_OB: - case OPC_CMPGU_LT_OB: - case OPC_CMPGU_LE_OB: - case OPC_PACKRL_PW: - case OPC_PICK_OB: - case OPC_PICK_PW: - case OPC_PICK_QH: - gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); - break; - default: /* Invalid */ - MIPS_INVAL("MASK CMPU_EQ.OB"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_DAPPEND_DSP: - gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); - break; - case OPC_DEXTR_W_DSP: - op2 = MASK_DEXTR_W(ctx->opcode); - switch (op2) { - case OPC_DEXTP: - case OPC_DEXTPDP: - case OPC_DEXTPDPV: - case OPC_DEXTPV: - case OPC_DEXTR_L: - case OPC_DEXTR_R_L: - case OPC_DEXTR_RS_L: - case OPC_DEXTR_W: - case OPC_DEXTR_R_W: - case OPC_DEXTR_RS_W: - case OPC_DEXTR_S_H: - case OPC_DEXTRV_L: - case OPC_DEXTRV_R_L: - case OPC_DEXTRV_RS_L: - case OPC_DEXTRV_S_H: - case OPC_DEXTRV_W: - case OPC_DEXTRV_R_W: - case OPC_DEXTRV_RS_W: - gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); - break; - case OPC_DMTHLIP: - case OPC_DSHILO: - case OPC_DSHILOV: - gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); - break; - default: /* Invalid */ - MIPS_INVAL("MASK EXTR.W"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_DPAQ_W_QH_DSP: - op2 = MASK_DPAQ_W_QH(ctx->opcode); - switch (op2) { - case OPC_DPAU_H_OBL: - case OPC_DPAU_H_OBR: - case OPC_DPSU_H_OBL: - case OPC_DPSU_H_OBR: - case OPC_DPA_W_QH: - case OPC_DPAQ_S_W_QH: - case OPC_DPS_W_QH: - case OPC_DPSQ_S_W_QH: - case OPC_MULSAQ_S_W_QH: - case OPC_DPAQ_SA_L_PW: - case OPC_DPSQ_SA_L_PW: - case OPC_MULSAQ_S_L_PW: - gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); - break; - case OPC_MAQ_S_W_QHLL: - case OPC_MAQ_S_W_QHLR: - case OPC_MAQ_S_W_QHRL: - case OPC_MAQ_S_W_QHRR: - case OPC_MAQ_SA_W_QHLL: - case OPC_MAQ_SA_W_QHLR: - case OPC_MAQ_SA_W_QHRL: - case OPC_MAQ_SA_W_QHRR: - case OPC_MAQ_S_L_PWL: - case OPC_MAQ_S_L_PWR: - case OPC_DMADD: - case OPC_DMADDU: - case OPC_DMSUB: - case OPC_DMSUBU: - gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); - break; - default: /* Invalid */ - MIPS_INVAL("MASK DPAQ.W.QH"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_DINSV_DSP: - op2 = MASK_INSV(ctx->opcode); - switch (op2) { - case OPC_DINSV: - { - TCGv t0, t1; - - if (rt == 0) { - MIPS_DEBUG("NOP"); - break; - } - check_dsp(ctx); - - t0 = tcg_temp_new(tcg_ctx); - t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rt); - gen_load_gpr(ctx, t1, rs); - - gen_helper_dinsv(tcg_ctx, *cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); - - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - break; - } - default: /* Invalid */ - MIPS_INVAL("MASK DINSV"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_SHLL_OB_DSP: - gen_mipsdsp_shift(ctx, op1, rd, rs, rt); - break; -#endif - default: /* Invalid */ - MIPS_INVAL("special3_legacy"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - int rs, rt, rd, sa; - uint32_t op1, op2; - - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 6) & 0x1f; - - op1 = MASK_SPECIAL3(ctx->opcode); - switch (op1) { - case OPC_EXT: - case OPC_INS: - check_insn(ctx, ISA_MIPS32R2); - gen_bitops(ctx, op1, rt, rs, sa, rd); - break; - case OPC_BSHFL: - op2 = MASK_BSHFL(ctx->opcode); - switch (op2) { - case OPC_ALIGN: case OPC_ALIGN_END: - case OPC_BITSWAP: - check_insn(ctx, ISA_MIPS32R6); - decode_opc_special3_r6(env, ctx); - break; - default: - check_insn(ctx, ISA_MIPS32R2); - gen_bshfl(ctx, op2, rt, rd); - break; - } - break; -#if defined(TARGET_MIPS64) - case OPC_DEXTM: case OPC_DEXTU: case OPC_DEXT: - case OPC_DINSM: case OPC_DINSU: case OPC_DINS: - check_insn(ctx, ISA_MIPS64R2); - check_mips_64(ctx); - gen_bitops(ctx, op1, rt, rs, sa, rd); - break; - case OPC_DBSHFL: - op2 = MASK_DBSHFL(ctx->opcode); - switch (op2) { - case OPC_DALIGN: case OPC_DALIGN_END: - case OPC_DBITSWAP: - check_insn(ctx, ISA_MIPS32R6); - decode_opc_special3_r6(env, ctx); - break; - default: - check_insn(ctx, ISA_MIPS64R2); - check_mips_64(ctx); - op2 = MASK_DBSHFL(ctx->opcode); - gen_bshfl(ctx, op2, rt, rd); - break; - } - break; -#endif - case OPC_RDHWR: - gen_rdhwr(ctx, rt, rd); - break; - case OPC_FORK: - check_insn(ctx, ASE_MT); - { - TCGv t0 = tcg_temp_new(tcg_ctx); - TCGv t1 = tcg_temp_new(tcg_ctx); - - gen_load_gpr(ctx, t0, rt); - gen_load_gpr(ctx, t1, rs); - gen_helper_fork(tcg_ctx, t0, t1); - tcg_temp_free(tcg_ctx, t0); - tcg_temp_free(tcg_ctx, t1); - } - break; - case OPC_YIELD: - check_insn(ctx, ASE_MT); - { - TCGv t0 = tcg_temp_new(tcg_ctx); - - save_cpu_state(ctx, 1); - gen_load_gpr(ctx, t0, rs); - gen_helper_yield(tcg_ctx, t0, tcg_ctx->cpu_env, t0); - gen_store_gpr(tcg_ctx, t0, rd); - tcg_temp_free(tcg_ctx, t0); - } - break; - default: - if (ctx->insn_flags & ISA_MIPS32R6) { - decode_opc_special3_r6(env, ctx); - } else { - decode_opc_special3_legacy(env, ctx); - } - } -} - -/* MIPS SIMD Architecture (MSA) */ -static inline int check_msa_access(DisasContext *ctx) -{ - if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) && - !(ctx->hflags & MIPS_HFLAG_F64))) { - generate_exception(ctx, EXCP_RI); - return 0; - } - - if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) { - if (ctx->insn_flags & ASE_MSA) { - generate_exception(ctx, EXCP_MSADIS); - return 0; - } else { - generate_exception(ctx, EXCP_RI); - return 0; - } - } - return 1; -} - -static void gen_check_zero_element(CPUMIPSState *env, TCGv tresult, uint8_t df, uint8_t wt) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - /* generates tcg ops to check if any element is 0 */ - /* Note this function only works with MSA_WRLEN = 128 */ - uint64_t eval_zero_or_big = 0; - uint64_t eval_big = 0; - TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); - switch (df) { - case DF_BYTE: - eval_zero_or_big = 0x0101010101010101ULL; - eval_big = 0x8080808080808080ULL; - break; - case DF_HALF: - eval_zero_or_big = 0x0001000100010001ULL; - eval_big = 0x8000800080008000ULL; - break; - case DF_WORD: - eval_zero_or_big = 0x0000000100000001ULL; - eval_big = 0x8000000080000000ULL; - break; - case DF_DOUBLE: - eval_zero_or_big = 0x0000000000000001ULL; - eval_big = 0x8000000000000000ULL; - break; - } - tcg_gen_subi_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt<<1], eval_zero_or_big); - tcg_gen_andc_i64(tcg_ctx, t0, t0, tcg_ctx->msa_wr_d[wt<<1]); - tcg_gen_andi_i64(tcg_ctx, t0, t0, eval_big); - tcg_gen_subi_i64(tcg_ctx, t1, tcg_ctx->msa_wr_d[(wt<<1)+1], eval_zero_or_big); - tcg_gen_andc_i64(tcg_ctx, t1, t1, tcg_ctx->msa_wr_d[(wt<<1)+1]); - tcg_gen_andi_i64(tcg_ctx, t1, t1, eval_big); - tcg_gen_or_i64(tcg_ctx, t0, t0, t1); - /* if all bits are zero then all elements are not zero */ - /* if some bit is non-zero then some element is zero */ - tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t0, t0, 0); - tcg_gen_trunc_i64_tl(tcg_ctx, tresult, t0); - tcg_temp_free_i64(tcg_ctx, t0); - tcg_temp_free_i64(tcg_ctx, t1); -} - -static void gen_msa_branch(CPUMIPSState *env, DisasContext *ctx, uint32_t op1) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - uint8_t df = (ctx->opcode >> 21) & 0x3; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - int64_t s16 = (int16_t)ctx->opcode; - - check_msa_access(ctx); - - if (ctx->insn_flags & ISA_MIPS32R6 && ctx->hflags & MIPS_HFLAG_BMASK) { - MIPS_DEBUG("CTI in delay / forbidden slot"); - generate_exception(ctx, EXCP_RI); - return; - } - switch (op1) { - case OPC_BZ_V: - case OPC_BNZ_V: - { - TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_or_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt<<1], tcg_ctx->msa_wr_d[(wt<<1)+1]); - tcg_gen_setcondi_i64(tcg_ctx, (op1 == OPC_BZ_V) ? - TCG_COND_EQ : TCG_COND_NE, t0, t0, 0); - tcg_gen_trunc_i64_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); - tcg_temp_free_i64(tcg_ctx, t0); - } - break; - case OPC_BZ_B: - case OPC_BZ_H: - case OPC_BZ_W: - case OPC_BZ_D: - gen_check_zero_element(env, *(TCGv *)tcg_ctx->bcond, df, wt); - break; - case OPC_BNZ_B: - case OPC_BNZ_H: - case OPC_BNZ_W: - case OPC_BNZ_D: - gen_check_zero_element(env, *(TCGv *)tcg_ctx->bcond, df, wt); - tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->bcond, *(TCGv *)tcg_ctx->bcond, 0); - break; - } - - ctx->btarget = ctx->pc + (int64_t)((uint64_t)s16 << 2) + 4; - - ctx->hflags |= MIPS_HFLAG_BC; - ctx->hflags |= MIPS_HFLAG_BDS32; -} - -static void gen_msa_i8(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t i8 = (ctx->opcode >> 16) & 0xff; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 ti8 = tcg_const_i32(tcg_ctx, i8); - - switch (MASK_MSA_I8(ctx->opcode)) { - case OPC_ANDI_B: - gen_helper_msa_andi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); - break; - case OPC_ORI_B: - gen_helper_msa_ori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); - break; - case OPC_NORI_B: - gen_helper_msa_nori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); - break; - case OPC_XORI_B: - gen_helper_msa_xori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); - break; - case OPC_BMNZI_B: - gen_helper_msa_bmnzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); - break; - case OPC_BMZI_B: - gen_helper_msa_bmzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); - break; - case OPC_BSELI_B: - gen_helper_msa_bseli_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); - break; - case OPC_SHF_B: - case OPC_SHF_H: - case OPC_SHF_W: - { - uint8_t df = (ctx->opcode >> 24) & 0x3; - if (df == DF_DOUBLE) { - generate_exception(ctx, EXCP_RI); - } else { - TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); - gen_helper_msa_shf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, ti8); - tcg_temp_free_i32(tcg_ctx, tdf); - } - } - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, ti8); -} - -static void gen_msa_i5(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t df = (ctx->opcode >> 21) & 0x3; - int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5); - uint8_t u5 = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 timm = tcg_temp_new_i32(tcg_ctx); - tcg_gen_movi_i32(tcg_ctx, timm, u5); - - switch (MASK_MSA_I5(ctx->opcode)) { - case OPC_ADDVI_df: - gen_helper_msa_addvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_SUBVI_df: - gen_helper_msa_subvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_MAXI_S_df: - tcg_gen_movi_i32(tcg_ctx, timm, s5); - gen_helper_msa_maxi_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_MAXI_U_df: - gen_helper_msa_maxi_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_MINI_S_df: - tcg_gen_movi_i32(tcg_ctx, timm, s5); - gen_helper_msa_mini_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_MINI_U_df: - gen_helper_msa_mini_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_CEQI_df: - tcg_gen_movi_i32(tcg_ctx, timm, s5); - gen_helper_msa_ceqi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLTI_S_df: - tcg_gen_movi_i32(tcg_ctx, timm, s5); - gen_helper_msa_clti_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLTI_U_df: - gen_helper_msa_clti_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLEI_S_df: - tcg_gen_movi_i32(tcg_ctx, timm, s5); - gen_helper_msa_clei_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLEI_U_df: - gen_helper_msa_clei_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); - break; - case OPC_LDI_df: - { - int32_t s10 = sextract32(ctx->opcode, 11, 10); - tcg_gen_movi_i32(tcg_ctx, timm, s10); - gen_helper_msa_ldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, timm); - } - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - - tcg_temp_free_i32(tcg_ctx, tdf); - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, timm); -} - -static void gen_msa_bit(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t dfm = (ctx->opcode >> 16) & 0x7f; - uint32_t df = 0, m = 0; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf; - TCGv_i32 tm; - TCGv_i32 twd; - TCGv_i32 tws; - - if ((dfm & 0x40) == 0x00) { - m = dfm & 0x3f; - df = DF_DOUBLE; - } else if ((dfm & 0x60) == 0x40) { - m = dfm & 0x1f; - df = DF_WORD; - } else if ((dfm & 0x70) == 0x60) { - m = dfm & 0x0f; - df = DF_HALF; - } else if ((dfm & 0x78) == 0x70) { - m = dfm & 0x7; - df = DF_BYTE; - } else { - generate_exception(ctx, EXCP_RI); - return; - } - - tdf = tcg_const_i32(tcg_ctx, df); - tm = tcg_const_i32(tcg_ctx, m); - twd = tcg_const_i32(tcg_ctx, wd); - tws = tcg_const_i32(tcg_ctx, ws); - - switch (MASK_MSA_BIT(ctx->opcode)) { - case OPC_SLLI_df: - gen_helper_msa_slli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRAI_df: - gen_helper_msa_srai_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRLI_df: - gen_helper_msa_srli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_BCLRI_df: - gen_helper_msa_bclri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_BSETI_df: - gen_helper_msa_bseti_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_BNEGI_df: - gen_helper_msa_bnegi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_BINSLI_df: - gen_helper_msa_binsli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_BINSRI_df: - gen_helper_msa_binsri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_SAT_S_df: - gen_helper_msa_sat_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_SAT_U_df: - gen_helper_msa_sat_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRARI_df: - gen_helper_msa_srari_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRLRI_df: - gen_helper_msa_srlri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - - tcg_temp_free_i32(tcg_ctx, tdf); - tcg_temp_free_i32(tcg_ctx, tm); - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); -} - -static void gen_msa_3r(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t df = (ctx->opcode >> 21) & 0x3; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); - - switch (MASK_MSA_3R(ctx->opcode)) { - case OPC_SLL_df: - gen_helper_msa_sll_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ADDV_df: - gen_helper_msa_addv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_CEQ_df: - gen_helper_msa_ceq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ADD_A_df: - gen_helper_msa_add_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBS_S_df: - gen_helper_msa_subs_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MULV_df: - gen_helper_msa_mulv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SLD_df: - gen_helper_msa_sld_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_VSHF_df: - gen_helper_msa_vshf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SRA_df: - gen_helper_msa_sra_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBV_df: - gen_helper_msa_subv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ADDS_A_df: - gen_helper_msa_adds_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBS_U_df: - gen_helper_msa_subs_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MADDV_df: - gen_helper_msa_maddv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SPLAT_df: - gen_helper_msa_splat_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SRAR_df: - gen_helper_msa_srar_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SRL_df: - gen_helper_msa_srl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MAX_S_df: - gen_helper_msa_max_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_CLT_S_df: - gen_helper_msa_clt_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ADDS_S_df: - gen_helper_msa_adds_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBSUS_U_df: - gen_helper_msa_subsus_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MSUBV_df: - gen_helper_msa_msubv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_PCKEV_df: - gen_helper_msa_pckev_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SRLR_df: - gen_helper_msa_srlr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_BCLR_df: - gen_helper_msa_bclr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MAX_U_df: - gen_helper_msa_max_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_CLT_U_df: - gen_helper_msa_clt_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ADDS_U_df: - gen_helper_msa_adds_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBSUU_S_df: - gen_helper_msa_subsuu_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_PCKOD_df: - gen_helper_msa_pckod_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_BSET_df: - gen_helper_msa_bset_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MIN_S_df: - gen_helper_msa_min_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_CLE_S_df: - gen_helper_msa_cle_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_AVE_S_df: - gen_helper_msa_ave_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ASUB_S_df: - gen_helper_msa_asub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_DIV_S_df: - gen_helper_msa_div_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ILVL_df: - gen_helper_msa_ilvl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_BNEG_df: - gen_helper_msa_bneg_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MIN_U_df: - gen_helper_msa_min_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_CLE_U_df: - gen_helper_msa_cle_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_AVE_U_df: - gen_helper_msa_ave_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ASUB_U_df: - gen_helper_msa_asub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_DIV_U_df: - gen_helper_msa_div_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ILVR_df: - gen_helper_msa_ilvr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_BINSL_df: - gen_helper_msa_binsl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MAX_A_df: - gen_helper_msa_max_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_AVER_S_df: - gen_helper_msa_aver_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MOD_S_df: - gen_helper_msa_mod_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ILVEV_df: - gen_helper_msa_ilvev_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_BINSR_df: - gen_helper_msa_binsr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MIN_A_df: - gen_helper_msa_min_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_AVER_U_df: - gen_helper_msa_aver_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MOD_U_df: - gen_helper_msa_mod_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_ILVOD_df: - gen_helper_msa_ilvod_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - - case OPC_DOTP_S_df: - case OPC_DOTP_U_df: - case OPC_DPADD_S_df: - case OPC_DPADD_U_df: - case OPC_DPSUB_S_df: - case OPC_HADD_S_df: - case OPC_DPSUB_U_df: - case OPC_HADD_U_df: - case OPC_HSUB_S_df: - case OPC_HSUB_U_df: - if (df == DF_BYTE) { - generate_exception(ctx, EXCP_RI); - } - switch (MASK_MSA_3R(ctx->opcode)) { - case OPC_DOTP_S_df: - gen_helper_msa_dotp_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_DOTP_U_df: - gen_helper_msa_dotp_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_DPADD_S_df: - gen_helper_msa_dpadd_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_DPADD_U_df: - gen_helper_msa_dpadd_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_DPSUB_S_df: - gen_helper_msa_dpsub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_HADD_S_df: - gen_helper_msa_hadd_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_DPSUB_U_df: - gen_helper_msa_dpsub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_HADD_U_df: - gen_helper_msa_hadd_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_HSUB_S_df: - gen_helper_msa_hsub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_HSUB_U_df: - gen_helper_msa_hsub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, twt); - tcg_temp_free_i32(tcg_ctx, tdf); -} - -static void gen_msa_elm_3e(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t source = (ctx->opcode >> 11) & 0x1f; - uint8_t dest = (ctx->opcode >> 6) & 0x1f; - TCGv telm = tcg_temp_new(tcg_ctx); - TCGv_i32 tsr = tcg_const_i32(tcg_ctx, source); - TCGv_i32 tdt = tcg_const_i32(tcg_ctx, dest); - - switch (MASK_MSA_ELM_DF3E(ctx->opcode)) { - case OPC_CTCMSA: - gen_load_gpr(ctx, telm, source); - gen_helper_msa_ctcmsa(tcg_ctx, tcg_ctx->cpu_env, telm, tdt); - break; - case OPC_CFCMSA: - gen_helper_msa_cfcmsa(tcg_ctx, telm, tcg_ctx->cpu_env, tsr); - gen_store_gpr(tcg_ctx, telm, dest); - break; - case OPC_MOVE_V: - gen_helper_msa_move_v(tcg_ctx, tcg_ctx->cpu_env, tdt, tsr); - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - - tcg_temp_free(tcg_ctx, telm); - tcg_temp_free_i32(tcg_ctx, tdt); - tcg_temp_free_i32(tcg_ctx, tsr); -} - -static void gen_msa_elm_df(CPUMIPSState *env, DisasContext *ctx, uint32_t df, - uint32_t n) -{ -#define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tn = tcg_const_i32(tcg_ctx, n); - TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); - - switch (MASK_MSA_ELM(ctx->opcode)) { - case OPC_SLDI_df: - gen_helper_msa_sldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); - break; - case OPC_SPLATI_df: - gen_helper_msa_splati_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); - break; - case OPC_INSVE_df: - gen_helper_msa_insve_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); - break; - case OPC_COPY_S_df: - case OPC_COPY_U_df: - case OPC_INSERT_df: -#if !defined(TARGET_MIPS64) - /* Double format valid only for MIPS64 */ - if (df == DF_DOUBLE) { - generate_exception(ctx, EXCP_RI); - break; - } -#endif - switch (MASK_MSA_ELM(ctx->opcode)) { - case OPC_COPY_S_df: - gen_helper_msa_copy_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); - break; - case OPC_COPY_U_df: - gen_helper_msa_copy_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); - break; - case OPC_INSERT_df: - gen_helper_msa_insert_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - } - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, tn); - tcg_temp_free_i32(tcg_ctx, tdf); -} - -static void gen_msa_elm(CPUMIPSState *env, DisasContext *ctx) -{ - uint8_t dfn = (ctx->opcode >> 16) & 0x3f; - uint32_t df = 0, n = 0; - - if ((dfn & 0x30) == 0x00) { - n = dfn & 0x0f; - df = DF_BYTE; - } else if ((dfn & 0x38) == 0x20) { - n = dfn & 0x07; - df = DF_HALF; - } else if ((dfn & 0x3c) == 0x30) { - n = dfn & 0x03; - df = DF_WORD; - } else if ((dfn & 0x3e) == 0x38) { - n = dfn & 0x01; - df = DF_DOUBLE; - } else if (dfn == 0x3E) { - /* CTCMSA, CFCMSA, MOVE.V */ - gen_msa_elm_3e(env, ctx); - return; - } else { - generate_exception(ctx, EXCP_RI); - return; - } - - gen_msa_elm_df(env, ctx, df, n); -} - -static void gen_msa_3rf(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t df = (ctx->opcode >> 21) & 0x1; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); - TCGv_i32 tdf = tcg_temp_new_i32(tcg_ctx); - - /* adjust df value for floating-point instruction */ - tcg_gen_movi_i32(tcg_ctx, tdf, df + 2); - - switch (MASK_MSA_3RF(ctx->opcode)) { - case OPC_FCAF_df: - gen_helper_msa_fcaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FADD_df: - gen_helper_msa_fadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUN_df: - gen_helper_msa_fcun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUB_df: - gen_helper_msa_fsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCOR_df: - gen_helper_msa_fcor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCEQ_df: - gen_helper_msa_fceq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMUL_df: - gen_helper_msa_fmul_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUNE_df: - gen_helper_msa_fcune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUEQ_df: - gen_helper_msa_fcueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FDIV_df: - gen_helper_msa_fdiv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCNE_df: - gen_helper_msa_fcne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCLT_df: - gen_helper_msa_fclt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMADD_df: - gen_helper_msa_fmadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MUL_Q_df: - tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); - gen_helper_msa_mul_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCULT_df: - gen_helper_msa_fcult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMSUB_df: - gen_helper_msa_fmsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MADD_Q_df: - tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); - gen_helper_msa_madd_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCLE_df: - gen_helper_msa_fcle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MSUB_Q_df: - tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); - gen_helper_msa_msub_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCULE_df: - gen_helper_msa_fcule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FEXP2_df: - gen_helper_msa_fexp2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSAF_df: - gen_helper_msa_fsaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FEXDO_df: - gen_helper_msa_fexdo_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUN_df: - gen_helper_msa_fsun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSOR_df: - gen_helper_msa_fsor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSEQ_df: - gen_helper_msa_fseq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FTQ_df: - gen_helper_msa_ftq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUNE_df: - gen_helper_msa_fsune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUEQ_df: - gen_helper_msa_fsueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSNE_df: - gen_helper_msa_fsne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSLT_df: - gen_helper_msa_fslt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMIN_df: - gen_helper_msa_fmin_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MULR_Q_df: - tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); - gen_helper_msa_mulr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSULT_df: - gen_helper_msa_fsult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMIN_A_df: - gen_helper_msa_fmin_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MADDR_Q_df: - tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); - gen_helper_msa_maddr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSLE_df: - gen_helper_msa_fsle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMAX_df: - gen_helper_msa_fmax_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_MSUBR_Q_df: - tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); - gen_helper_msa_msubr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSULE_df: - gen_helper_msa_fsule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMAX_A_df: - gen_helper_msa_fmax_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, twt); - tcg_temp_free_i32(tcg_ctx, tdf); -} - -static void gen_msa_2r(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ - (op & (0x7 << 18))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 16) & 0x3; - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); - TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); - - switch (MASK_MSA_2R(ctx->opcode)) { - case OPC_FILL_df: -#if !defined(TARGET_MIPS64) - /* Double format valid only for MIPS64 */ - if (df == DF_DOUBLE) { - generate_exception(ctx, EXCP_RI); - break; - } -#endif - gen_helper_msa_fill_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); /* trs */ - break; - case OPC_PCNT_df: - gen_helper_msa_pcnt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_NLOC_df: - gen_helper_msa_nloc_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_NLZC_df: - gen_helper_msa_nlzc_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, twt); - tcg_temp_free_i32(tcg_ctx, tdf); -} - -static void gen_msa_2rf(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ - (op & (0xf << 17))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 16) & 0x1; - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); - /* adjust df value for floating-point instruction */ - TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df + 2); - - switch (MASK_MSA_2RF(ctx->opcode)) { - case OPC_FCLASS_df: - gen_helper_msa_fclass_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FTRUNC_S_df: - gen_helper_msa_ftrunc_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FTRUNC_U_df: - gen_helper_msa_ftrunc_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FSQRT_df: - gen_helper_msa_fsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FRSQRT_df: - gen_helper_msa_frsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FRCP_df: - gen_helper_msa_frcp_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FRINT_df: - gen_helper_msa_frint_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FLOG2_df: - gen_helper_msa_flog2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FEXUPL_df: - gen_helper_msa_fexupl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FEXUPR_df: - gen_helper_msa_fexupr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FFQL_df: - gen_helper_msa_ffql_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FFQR_df: - gen_helper_msa_ffqr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FTINT_S_df: - gen_helper_msa_ftint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FTINT_U_df: - gen_helper_msa_ftint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FFINT_S_df: - gen_helper_msa_ffint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - case OPC_FFINT_U_df: - gen_helper_msa_ffint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); - break; - } - - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, twt); - tcg_temp_free_i32(tcg_ctx, tdf); -} - -static void gen_msa_vec_v(CPUMIPSState *env, DisasContext *ctx) -{ -#define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21))) - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); - TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); - - switch (MASK_MSA_VEC(ctx->opcode)) { - case OPC_AND_V: - gen_helper_msa_and_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); - break; - case OPC_OR_V: - gen_helper_msa_or_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); - break; - case OPC_NOR_V: - gen_helper_msa_nor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); - break; - case OPC_XOR_V: - gen_helper_msa_xor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); - break; - case OPC_BMNZ_V: - gen_helper_msa_bmnz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); - break; - case OPC_BMZ_V: - gen_helper_msa_bmz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); - break; - case OPC_BSEL_V: - gen_helper_msa_bsel_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } - - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tws); - tcg_temp_free_i32(tcg_ctx, twt); -} - -static void gen_msa_vec(CPUMIPSState *env, DisasContext *ctx) -{ - switch (MASK_MSA_VEC(ctx->opcode)) { - case OPC_AND_V: - case OPC_OR_V: - case OPC_NOR_V: - case OPC_XOR_V: - case OPC_BMNZ_V: - case OPC_BMZ_V: - case OPC_BSEL_V: - gen_msa_vec_v(env, ctx); - break; - case OPC_MSA_2R: - gen_msa_2r(env, ctx); - break; - case OPC_MSA_2RF: - gen_msa_2rf(env, ctx); - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static void gen_msa(CPUMIPSState *env, DisasContext *ctx) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - uint32_t opcode = ctx->opcode; - check_insn(ctx, ASE_MSA); - check_msa_access(ctx); - - switch (MASK_MSA_MINOR(opcode)) { - case OPC_MSA_I8_00: - case OPC_MSA_I8_01: - case OPC_MSA_I8_02: - gen_msa_i8(env, ctx); - break; - case OPC_MSA_I5_06: - case OPC_MSA_I5_07: - gen_msa_i5(env, ctx); - break; - case OPC_MSA_BIT_09: - case OPC_MSA_BIT_0A: - gen_msa_bit(env, ctx); - break; - case OPC_MSA_3R_0D: - case OPC_MSA_3R_0E: - case OPC_MSA_3R_0F: - case OPC_MSA_3R_10: - case OPC_MSA_3R_11: - case OPC_MSA_3R_12: - case OPC_MSA_3R_13: - case OPC_MSA_3R_14: - case OPC_MSA_3R_15: - gen_msa_3r(env, ctx); - break; - case OPC_MSA_ELM: - gen_msa_elm(env, ctx); - break; - case OPC_MSA_3RF_1A: - case OPC_MSA_3RF_1B: - case OPC_MSA_3RF_1C: - gen_msa_3rf(env, ctx); - break; - case OPC_MSA_VEC: - gen_msa_vec(env, ctx); - break; - case OPC_LD_B: - case OPC_LD_H: - case OPC_LD_W: - case OPC_LD_D: - case OPC_ST_B: - case OPC_ST_H: - case OPC_ST_W: - case OPC_ST_D: - { - int32_t s10 = sextract32(ctx->opcode, 16, 10); - uint8_t rs = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 0) & 0x3; - - TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); - TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); - TCGv_i32 trs = tcg_const_i32(tcg_ctx, rs); - TCGv_i32 ts10 = tcg_const_i32(tcg_ctx, s10); - - switch (MASK_MSA_MINOR(opcode)) { - case OPC_LD_B: - case OPC_LD_H: - case OPC_LD_W: - case OPC_LD_D: - gen_helper_msa_ld_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, trs, ts10); - break; - case OPC_ST_B: - case OPC_ST_H: - case OPC_ST_W: - case OPC_ST_D: - gen_helper_msa_st_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, trs, ts10); - break; - } - - tcg_temp_free_i32(tcg_ctx, twd); - tcg_temp_free_i32(tcg_ctx, tdf); - tcg_temp_free_i32(tcg_ctx, trs); - tcg_temp_free_i32(tcg_ctx, ts10); - } - break; - default: - MIPS_INVAL("MSA instruction"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -// Unicorn: trace this instruction on request -static void hook_insn(CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch, int *insn_patch_offset, int offset_value) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; - if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, ctx->pc)) { - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_CODE_IDX, env->uc, ctx->pc); - *insn_need_patch = true; - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - *insn_patch_offset = offset_value; - } -} - -static void decode_opc (CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch, int *insn_patch_offset) -{ - TCGContext *tcg_ctx = ctx->uc->tcg_ctx; -#if defined(TARGET_MIPS64) - TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; -#endif - int32_t offset; - int rs, rt, rd, sa; - uint32_t op, op1; - int16_t imm; - - /* make sure instructions are on a word boundary */ - if (ctx->pc & 0x3) { - env->CP0_BadVAddr = ctx->pc; - generate_exception_err(ctx, EXCP_AdEL, EXCP_INST_NOTAVAIL); - return; - } - - /* Handle blikely not taken case */ - if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) { - int l1 = gen_new_label(tcg_ctx); - - MIPS_DEBUG("blikely condition (" TARGET_FMT_lx ")", ctx->pc + 4); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, 0, l1); - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags & ~MIPS_HFLAG_BMASK); - gen_goto_tb(ctx, 1, ctx->pc + 4); - gen_set_label(tcg_ctx, l1); - hook_insn(env, ctx, insn_need_patch, insn_patch_offset, 14); - } else { - hook_insn(env, ctx, insn_need_patch, insn_patch_offset, 1); - } - - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { - tcg_gen_debug_insn_start(tcg_ctx, ctx->pc); - } - - op = MASK_OP_MAJOR(ctx->opcode); - rs = (ctx->opcode >> 21) & 0x1f; - rt = (ctx->opcode >> 16) & 0x1f; - rd = (ctx->opcode >> 11) & 0x1f; - sa = (ctx->opcode >> 6) & 0x1f; - imm = (int16_t)ctx->opcode; - switch (op) { - case OPC_SPECIAL: - decode_opc_special(env, ctx); - break; - case OPC_SPECIAL2: - decode_opc_special2_legacy(env, ctx); - break; - case OPC_SPECIAL3: - decode_opc_special3(env, ctx); - break; - case OPC_REGIMM: - op1 = MASK_REGIMM(ctx->opcode); - switch (op1) { - case OPC_BLTZL: /* REGIMM branches */ - case OPC_BGEZL: - case OPC_BLTZALL: - case OPC_BGEZALL: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - case OPC_BLTZ: - case OPC_BGEZ: - gen_compute_branch(ctx, op1, 4, rs, -1, (uint32_t)imm << 2, 4); - break; - case OPC_BLTZAL: - case OPC_BGEZAL: - if (ctx->insn_flags & ISA_MIPS32R6) { - if (rs == 0) { - /* OPC_NAL, OPC_BAL */ - gen_compute_branch(ctx, op1, 4, 0, -1, (uint32_t)imm << 2, 4); - } else { - generate_exception(ctx, EXCP_RI); - } - } else { - gen_compute_branch(ctx, op1, 4, rs, -1, (uint32_t)imm << 2, 4); - } - break; - case OPC_TGEI: case OPC_TGEIU: case OPC_TLTI: case OPC_TLTIU: case OPC_TEQI: /* REGIMM traps */ - case OPC_TNEI: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - gen_trap(ctx, op1, rs, -1, imm); - break; - case OPC_SYNCI: - check_insn(ctx, ISA_MIPS32R2); - /* Break the TB to be able to sync copied instructions - immediately */ - ctx->bstate = BS_STOP; - break; - case OPC_BPOSGE32: /* MIPS DSP branch */ -#if defined(TARGET_MIPS64) - case OPC_BPOSGE64: -#endif - check_dsp(ctx); - gen_compute_branch(ctx, op1, 4, -1, -2, (uint32_t)imm << 2, 4); - break; -#if defined(TARGET_MIPS64) - case OPC_DAHI: - check_insn(ctx, ISA_MIPS32R6); - check_mips_64(ctx); - if (rs != 0) { - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rs], *cpu_gpr[rs], (int64_t)imm << 32); - } - MIPS_DEBUG("dahi %s, %04x", regnames[rs], imm); - break; - case OPC_DATI: - check_insn(ctx, ISA_MIPS32R6); - check_mips_64(ctx); - if (rs != 0) { - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rs], *cpu_gpr[rs], (int64_t)imm << 48); - } - MIPS_DEBUG("dati %s, %04x", regnames[rs], imm); - break; -#endif - default: /* Invalid */ - MIPS_INVAL("regimm"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_CP0: - check_cp0_enabled(ctx); - op1 = MASK_CP0(ctx->opcode); - switch (op1) { - case OPC_MFC0: - case OPC_MTC0: - case OPC_MFTR: - case OPC_MTTR: -#if defined(TARGET_MIPS64) - case OPC_DMFC0: - case OPC_DMTC0: -#endif -#ifndef CONFIG_USER_ONLY - gen_cp0(env, ctx, op1, rt, rd); -#endif /* !CONFIG_USER_ONLY */ - break; - case OPC_C0_FIRST: case OPC_C0_LAST: -#ifndef CONFIG_USER_ONLY - gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd); -#endif /* !CONFIG_USER_ONLY */ - break; - case OPC_MFMC0: -#ifndef CONFIG_USER_ONLY - { - uint32_t op2; - TCGv t0 = tcg_temp_new(tcg_ctx); - - op2 = MASK_MFMC0(ctx->opcode); - switch (op2) { - case OPC_DMT: - check_insn(ctx, ASE_MT); - gen_helper_dmt(tcg_ctx, t0); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case OPC_EMT: - check_insn(ctx, ASE_MT); - gen_helper_emt(tcg_ctx, t0); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case OPC_DVPE: - check_insn(ctx, ASE_MT); - gen_helper_dvpe(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case OPC_EVPE: - check_insn(ctx, ASE_MT); - gen_helper_evpe(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - break; - case OPC_DI: - check_insn(ctx, ISA_MIPS32R2); - save_cpu_state(ctx, 1); - gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - case OPC_EI: - check_insn(ctx, ISA_MIPS32R2); - save_cpu_state(ctx, 1); - gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); - gen_store_gpr(tcg_ctx, t0, rt); - /* Stop translation as we may have switched the execution mode */ - ctx->bstate = BS_STOP; - break; - default: /* Invalid */ - MIPS_INVAL("mfmc0"); - generate_exception(ctx, EXCP_RI); - break; - } - tcg_temp_free(tcg_ctx, t0); - } -#endif /* !CONFIG_USER_ONLY */ - break; - case OPC_RDPGPR: - check_insn(ctx, ISA_MIPS32R2); - gen_load_srsgpr(ctx, rt, rd); - break; - case OPC_WRPGPR: - check_insn(ctx, ISA_MIPS32R2); - gen_store_srsgpr(ctx, rt, rd); - break; - default: - MIPS_INVAL("cp0"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */ - gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); - } else { - /* OPC_ADDI */ - /* Arithmetic with immediate opcode */ - gen_arith_imm(ctx, op, rt, rs, imm); - } - break; - case OPC_ADDIU: - gen_arith_imm(ctx, op, rt, rs, imm); - break; - case OPC_SLTI: /* Set on less than with immediate opcode */ - case OPC_SLTIU: - gen_slt_imm(ctx, op, rt, rs, imm); - break; - case OPC_ANDI: /* Arithmetic with immediate opcode */ - case OPC_LUI: /* OPC_AUI */ - case OPC_ORI: - case OPC_XORI: - gen_logic_imm(ctx, op, rt, rs, imm); - break; - case OPC_J: case OPC_JAL: /* Jump */ - offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; - gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); - break; - /* Branch */ - case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */ - if (ctx->insn_flags & ISA_MIPS32R6) { - if (rt == 0) { - generate_exception(ctx, EXCP_RI); - break; - } - /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */ - gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); - } else { - /* OPC_BLEZL */ - gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); - } - break; - case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */ - if (ctx->insn_flags & ISA_MIPS32R6) { - if (rt == 0) { - generate_exception(ctx, EXCP_RI); - break; - } - /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */ - gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); - } else { - /* OPC_BGTZL */ - gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); - } - break; - case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */ - if (rt == 0) { - /* OPC_BLEZ */ - gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); - } else { - check_insn(ctx, ISA_MIPS32R6); - /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */ - gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); - } - break; - case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */ - if (rt == 0) { - /* OPC_BGTZ */ - gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); - } else { - check_insn(ctx, ISA_MIPS32R6); - /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */ - gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); - } - break; - case OPC_BEQL: - case OPC_BNEL: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - case OPC_BEQ: - case OPC_BNE: - gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); - break; - case OPC_LWL: /* Load and stores */ - case OPC_LWR: - case OPC_LL: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - case OPC_LB: case OPC_LH: - case OPC_LW: case OPC_LBU: case OPC_LHU: - gen_ld(ctx, op, rt, rs, imm); - break; - case OPC_SWL: - case OPC_SWR: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - case OPC_SB: case OPC_SH: - case OPC_SW: - gen_st(ctx, op, rt, rs, imm); - break; - case OPC_SC: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - gen_st_cond(ctx, op, rt, rs, imm); - break; - case OPC_CACHE: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - check_cp0_enabled(ctx); - check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); - /* Treat as NOP. */ - break; - case OPC_PREF: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); - /* Treat as NOP. */ - break; - - /* Floating point (COP1). */ - case OPC_LWC1: - case OPC_LDC1: - case OPC_SWC1: - case OPC_SDC1: - gen_cop1_ldst(ctx, op, rt, rs, imm); - break; - - case OPC_CP1: - op1 = MASK_CP1(ctx->opcode); - - switch (op1) { - case OPC_MFHC1: - case OPC_MTHC1: - check_cp1_enabled(ctx); - check_insn(ctx, ISA_MIPS32R2); - case OPC_MFC1: - case OPC_CFC1: - case OPC_MTC1: - case OPC_CTC1: - check_cp1_enabled(ctx); - gen_cp1(ctx, op1, rt, rd); - break; -#if defined(TARGET_MIPS64) - case OPC_DMFC1: - case OPC_DMTC1: - check_cp1_enabled(ctx); - check_insn(ctx, ISA_MIPS3); - gen_cp1(ctx, op1, rt, rd); - break; -#endif - case OPC_BC1EQZ: /* OPC_BC1ANY2 */ - check_cp1_enabled(ctx); - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_BC1EQZ */ - gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), - rt, ((uint16_t)imm) << 2); - } else { - /* OPC_BC1ANY2 */ - check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); - gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), - (rt >> 2) & 0x7, ((uint32_t)imm) << 2); - } - break; - case OPC_BC1NEZ: - check_cp1_enabled(ctx); - check_insn(ctx, ISA_MIPS32R6); - gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), - rt, ((uint16_t)imm) << 2); - break; - case OPC_BC1ANY4: - check_cp1_enabled(ctx); - check_insn_opc_removed(ctx, ISA_MIPS32R6); - check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); - /* fall through */ - case OPC_BC1: - check_cp1_enabled(ctx); - check_insn_opc_removed(ctx, ISA_MIPS32R6); - gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), - (rt >> 2) & 0x7, (uint32_t)imm << 2); - break; - case OPC_PS_FMT: - check_cp1_enabled(ctx); - check_insn_opc_removed(ctx, ISA_MIPS32R6); - case OPC_S_FMT: - case OPC_D_FMT: - check_cp1_enabled(ctx); - gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, - (imm >> 8) & 0x7); - break; - case OPC_W_FMT: - case OPC_L_FMT: - { - int r6_op = ctx->opcode & FOP(0x3f, 0x1f); - check_cp1_enabled(ctx); - if (ctx->insn_flags & ISA_MIPS32R6) { - switch (r6_op) { - case R6_OPC_CMP_AF_S: - case R6_OPC_CMP_UN_S: - case R6_OPC_CMP_EQ_S: - case R6_OPC_CMP_UEQ_S: - case R6_OPC_CMP_LT_S: - case R6_OPC_CMP_ULT_S: - case R6_OPC_CMP_LE_S: - case R6_OPC_CMP_ULE_S: - case R6_OPC_CMP_SAF_S: - case R6_OPC_CMP_SUN_S: - case R6_OPC_CMP_SEQ_S: - case R6_OPC_CMP_SEUQ_S: - case R6_OPC_CMP_SLT_S: - case R6_OPC_CMP_SULT_S: - case R6_OPC_CMP_SLE_S: - case R6_OPC_CMP_SULE_S: - case R6_OPC_CMP_OR_S: - case R6_OPC_CMP_UNE_S: - case R6_OPC_CMP_NE_S: - case R6_OPC_CMP_SOR_S: - case R6_OPC_CMP_SUNE_S: - case R6_OPC_CMP_SNE_S: - gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa); - break; - case R6_OPC_CMP_AF_D: - case R6_OPC_CMP_UN_D: - case R6_OPC_CMP_EQ_D: - case R6_OPC_CMP_UEQ_D: - case R6_OPC_CMP_LT_D: - case R6_OPC_CMP_ULT_D: - case R6_OPC_CMP_LE_D: - case R6_OPC_CMP_ULE_D: - case R6_OPC_CMP_SAF_D: - case R6_OPC_CMP_SUN_D: - case R6_OPC_CMP_SEQ_D: - case R6_OPC_CMP_SEUQ_D: - case R6_OPC_CMP_SLT_D: - case R6_OPC_CMP_SULT_D: - case R6_OPC_CMP_SLE_D: - case R6_OPC_CMP_SULE_D: - case R6_OPC_CMP_OR_D: - case R6_OPC_CMP_UNE_D: - case R6_OPC_CMP_NE_D: - case R6_OPC_CMP_SOR_D: - case R6_OPC_CMP_SUNE_D: - case R6_OPC_CMP_SNE_D: - gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa); - break; - default: - gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, - (imm >> 8) & 0x7); - break; - } - } else { - gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, - (imm >> 8) & 0x7); - } - break; - } - case OPC_BZ_V: - case OPC_BNZ_V: - case OPC_BZ_B: - case OPC_BZ_H: - case OPC_BZ_W: - case OPC_BZ_D: - case OPC_BNZ_B: - case OPC_BNZ_H: - case OPC_BNZ_W: - case OPC_BNZ_D: - check_insn(ctx, ASE_MSA); - gen_msa_branch(env, ctx, op1); - break; - default: - MIPS_INVAL("cp1"); - generate_exception(ctx, EXCP_RI); - break; - } - break; - - /* Compact branches [R6] and COP2 [non-R6] */ - case OPC_BC: /* OPC_LWC2 */ - case OPC_BALC: /* OPC_SWC2 */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_BC, OPC_BALC */ - gen_compute_compact_branch(ctx, op, 0, 0, - sextract32(ctx->opcode << 2, 0, 28)); - } else { - /* OPC_LWC2, OPC_SWC2 */ - /* COP2: Not implemented. */ - generate_exception_err(ctx, EXCP_CpU, 2); - } - break; - case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */ - case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */ - if (ctx->insn_flags & ISA_MIPS32R6) { - if (rs != 0) { - /* OPC_BEQZC, OPC_BNEZC */ - gen_compute_compact_branch(ctx, op, rs, 0, - sextract32(ctx->opcode << 2, 0, 23)); - } else { - /* OPC_JIC, OPC_JIALC */ - gen_compute_compact_branch(ctx, op, 0, rt, imm); - } - } else { - /* OPC_LWC2, OPC_SWC2 */ - /* COP2: Not implemented. */ - generate_exception_err(ctx, EXCP_CpU, 2); - } - break; - case OPC_CP2: - check_insn(ctx, INSN_LOONGSON2F); - /* Note that these instructions use different fields. */ - gen_loongson_multimedia(ctx, sa, rd, rt); - break; - - case OPC_CP3: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { - check_cp1_enabled(ctx); - op1 = MASK_CP3(ctx->opcode); - switch (op1) { - case OPC_LWXC1: - case OPC_LDXC1: - case OPC_LUXC1: - case OPC_SWXC1: - case OPC_SDXC1: - case OPC_SUXC1: - gen_flt3_ldst(ctx, op1, sa, rd, rs, rt); - break; - case OPC_PREFX: - /* Treat as NOP. */ - break; - case OPC_ALNV_PS: - case OPC_MADD_S: - case OPC_MADD_D: - case OPC_MADD_PS: - case OPC_MSUB_S: - case OPC_MSUB_D: - case OPC_MSUB_PS: - case OPC_NMADD_S: - case OPC_NMADD_D: - case OPC_NMADD_PS: - case OPC_NMSUB_S: - case OPC_NMSUB_D: - case OPC_NMSUB_PS: - gen_flt3_arith(ctx, op1, sa, rs, rd, rt); - break; - default: - MIPS_INVAL("cp3"); - generate_exception (ctx, EXCP_RI); - break; - } - } else { - generate_exception_err(ctx, EXCP_CpU, 1); - } - break; - -#if defined(TARGET_MIPS64) - /* MIPS64 opcodes */ - case OPC_LDL: case OPC_LDR: - case OPC_LLD: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - case OPC_LWU: - case OPC_LD: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_ld(ctx, op, rt, rs, imm); - break; - case OPC_SDL: case OPC_SDR: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - case OPC_SD: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_st(ctx, op, rt, rs, imm); - break; - case OPC_SCD: - check_insn_opc_removed(ctx, ISA_MIPS32R6); - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_st_cond(ctx, op, rt, rs, imm); - break; - case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ - if (ctx->insn_flags & ISA_MIPS32R6) { - /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */ - gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); - } else { - /* OPC_DADDI */ - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_arith_imm(ctx, op, rt, rs, imm); - } - break; - case OPC_DADDIU: - check_insn(ctx, ISA_MIPS3); - check_mips_64(ctx); - gen_arith_imm(ctx, op, rt, rs, imm); - break; -#else - case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ - if (ctx->insn_flags & ISA_MIPS32R6) { - gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); - } else { - MIPS_INVAL("major opcode"); - generate_exception(ctx, EXCP_RI); - } - break; -#endif - case OPC_DAUI: /* OPC_JALX */ - if (ctx->insn_flags & ISA_MIPS32R6) { -#if defined(TARGET_MIPS64) - /* OPC_DAUI */ - check_mips_64(ctx); - if (rt != 0) { - TCGv t0 = tcg_temp_new(tcg_ctx); - gen_load_gpr(ctx, t0, rs); - tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], t0, (uint32_t)imm << 16); - tcg_temp_free(tcg_ctx, t0); - } - MIPS_DEBUG("daui %s, %s, %04x", regnames[rt], regnames[rs], imm); -#else - generate_exception(ctx, EXCP_RI); - MIPS_INVAL("major opcode"); -#endif - } else { - /* OPC_JALX */ - check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS); - offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; - gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); - } - break; - case OPC_MSA: /* OPC_MDMX */ - /* MDMX: Not implemented. */ - gen_msa(env, ctx); - break; - case OPC_PCREL: - check_insn(ctx, ISA_MIPS32R6); - gen_pcrel(ctx, rs, imm); - break; - default: /* Invalid */ - MIPS_INVAL("major opcode"); - generate_exception(ctx, EXCP_RI); - break; - } -} - -static inline void -gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb, - bool search_pc) -{ - CPUState *cs = CPU(cpu); - CPUMIPSState *env = &cpu->env; - DisasContext ctx; - target_ulong pc_start; - uint16_t *gen_opc_end; - CPUBreakpoint *bp; - int j, lj = -1; - int num_insns; - int max_insns; - int insn_bytes; - int is_slot = 0; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TCGArg *save_opparam_ptr = NULL; - bool block_full = false; - - if (search_pc) - qemu_log("search pc %d\n", search_pc); - - pc_start = tb->pc; - gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; - ctx.uc = env->uc; - ctx.pc = pc_start; - ctx.saved_pc = -1; - ctx.singlestep_enabled = cs->singlestep_enabled; - ctx.insn_flags = env->insn_flags; - ctx.CP0_Config1 = env->CP0_Config1; - ctx.tb = tb; - ctx.bstate = BS_NONE; - ctx.kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff; - ctx.rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1; - ctx.ie = (env->CP0_Config4 >> CP0C4_IE) & 3; - ctx.bi = (env->CP0_Config3 >> CP0C3_BI) & 1; - ctx.bp = (env->CP0_Config3 >> CP0C3_BP) & 1; - /* Restore delay slot state from the tb context. */ - ctx.hflags = (uint32_t)tb->flags; /* FIXME: maybe use 64 bits here? */ - ctx.ulri = env->CP0_Config3 & (1 << CP0C3_ULRI); - restore_cpu_state(env, &ctx); -#ifdef CONFIG_USER_ONLY - ctx.mem_idx = MIPS_HFLAG_UM; -#else - ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU; -#endif - num_insns = 0; - max_insns = tb->cflags & CF_COUNT_MASK; - if (max_insns == 0) - max_insns = CF_COUNT_MASK; - LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags); - - // Unicorn: early check to see if the address of this block is the until address - if (tb->pc == env->uc->addr_end) { - gen_tb_start(tcg_ctx); - gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); - ctx.bstate = BS_EXCP; - goto done_generating; - } - - // Unicorn: trace this block on request - // Only hook this block if it is not broken from previous translation due to - // full translation cache - if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { - // save block address to see if we need to patch block size later - env->uc->block_addr = pc_start; - env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); - } else { - env->uc->size_arg = -1; - } - - gen_tb_start(tcg_ctx); - while (ctx.bstate == BS_NONE) { - // printf(">>> mips pc = %x\n", ctx.pc); - if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == ctx.pc) { - save_cpu_state(&ctx, 1); - ctx.bstate = BS_BRANCH; - gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); - /* Include the breakpoint location or the tb won't - * be flushed when it must be. */ - ctx.pc += 4; - goto done_generating; - } - } - } - - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - if (lj < j) { - lj++; - while (lj < j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } - tcg_ctx->gen_opc_pc[lj] = ctx.pc; - tcg_ctx->gen_opc_hflags[lj] = ctx.hflags & MIPS_HFLAG_BMASK; - tcg_ctx->gen_opc_btarget[lj] = ctx.btarget; - tcg_ctx->gen_opc_instr_start[lj] = 1; - tcg_ctx->gen_opc_icount[lj] = num_insns; - } - //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) - // gen_io_start(); - - // Unicorn: end address tells us to stop emulation - if (ctx.pc == ctx.uc->addr_end) { - gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); - ctx.bstate = BS_EXCP; - break; - } else { - bool insn_need_patch = false; - int insn_patch_offset = 1; - - // Unicorn: save param buffer - if (HOOK_EXISTS(env->uc, UC_HOOK_CODE)) - save_opparam_ptr = tcg_ctx->gen_opparam_ptr; - - is_slot = ctx.hflags & MIPS_HFLAG_BMASK; - - if (!(ctx.hflags & MIPS_HFLAG_M16)) { - ctx.opcode = cpu_ldl_code(env, ctx.pc); - insn_bytes = 4; - decode_opc(env, &ctx, &insn_need_patch, &insn_patch_offset); - } else if (ctx.insn_flags & ASE_MICROMIPS) { - ctx.opcode = cpu_lduw_code(env, ctx.pc); - insn_bytes = decode_micromips_opc(env, &ctx, &insn_need_patch); - } else if (ctx.insn_flags & ASE_MIPS16) { - ctx.opcode = cpu_lduw_code(env, ctx.pc); - insn_bytes = decode_mips16_opc(env, &ctx, &insn_need_patch); - } else { - generate_exception(&ctx, EXCP_RI); - ctx.bstate = BS_STOP; - break; - } - - // Unicorn: patch the callback for the instruction size - if (insn_need_patch) { - /* - int i; - for (i = 0; i < 30; i++) - printf("[%u] = %x\n", i, *(save_opparam_ptr + i)); - printf("\n"); - */ - *(save_opparam_ptr + insn_patch_offset) = insn_bytes; - } - } - - if (ctx.hflags & MIPS_HFLAG_BMASK) { - if (!(ctx.hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 | - MIPS_HFLAG_FBNSLOT))) { - /* force to generate branch as there is neither delay nor - forbidden slot */ - is_slot = 1; - } - } - - if (is_slot) { - gen_branch(&ctx, insn_bytes); - } - ctx.pc += insn_bytes; - - num_insns++; - - /* Execute a branch and its delay slot as a single instruction. - This is what GDB expects and is consistent with what the - hardware does (e.g. if a delay slot instruction faults, the - reported PC is the PC of the branch). */ - if (cs->singlestep_enabled && (ctx.hflags & MIPS_HFLAG_BMASK) == 0) { - break; - } - - if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) - break; - - if (tcg_ctx->gen_opc_ptr >= gen_opc_end) { - break; - } - - if (num_insns >= max_insns) - break; - - //if (singlestep) - // break; - } - - if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) { - block_full = true; - } - - //if (tb->cflags & CF_LAST_IO) { - // gen_io_end(); - //} - if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) { - save_cpu_state(&ctx, ctx.bstate != BS_EXCP); - gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); - } else { - switch (ctx.bstate) { - case BS_STOP: - gen_goto_tb(&ctx, 0, ctx.pc); - env->uc->next_pc = ctx.pc; - break; - case BS_NONE: - save_cpu_state(&ctx, 0); - gen_goto_tb(&ctx, 0, ctx.pc); - break; - case BS_EXCP: - tcg_gen_exit_tb(tcg_ctx, 0); - break; - case BS_BRANCH: - default: - break; - } - } -done_generating: - gen_tb_end(tcg_ctx, tb, num_insns); - *tcg_ctx->gen_opc_ptr = INDEX_op_end; - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - lj++; - while (lj <= j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } else { - tb->size = ctx.pc - pc_start; - tb->icount = num_insns; - } - - env->uc->block_full = block_full; -} - -void gen_intermediate_code (CPUMIPSState *env, struct TranslationBlock *tb) -{ - gen_intermediate_code_internal(mips_env_get_cpu(env), tb, false); -} - -void gen_intermediate_code_pc (CPUMIPSState *env, struct TranslationBlock *tb) -{ - gen_intermediate_code_internal(mips_env_get_cpu(env), tb, true); -} - -#if 0 -static void fpu_dump_state(CPUMIPSState *env, FILE *f, fprintf_function fpu_fprintf, - int flags) -{ - int i; - int is_fpu64 = !!(env->hflags & MIPS_HFLAG_F64); - -#define printfpr(fp) \ - do { \ - if (is_fpu64) \ - fpu_fprintf(f, "w:%08x d:%016" PRIx64 \ - " fd:%13g fs:%13g psu: %13g\n", \ - (fp)->w[FP_ENDIAN_IDX], (fp)->d, \ - (double)(fp)->fd, \ - (double)(fp)->fs[FP_ENDIAN_IDX], \ - (double)(fp)->fs[!FP_ENDIAN_IDX]); \ - else { \ - fpr_t tmp; \ - tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \ - tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \ - fpu_fprintf(f, "w:%08x d:%016" PRIx64 \ - " fd:%13g fs:%13g psu:%13g\n", \ - tmp.w[FP_ENDIAN_IDX], tmp.d, \ - (double)tmp.fd, \ - (double)tmp.fs[FP_ENDIAN_IDX], \ - (double)tmp.fs[!FP_ENDIAN_IDX]); \ - } \ - } while(0) - - - fpu_fprintf(f, "CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%02x\n", - env->active_fpu.fcr0, env->active_fpu.fcr31, is_fpu64, - get_float_exception_flags(&env->active_fpu.fp_status)); - for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) { - fpu_fprintf(f, "%3s: ", fregnames[i]); - printfpr(&env->active_fpu.fpr[i]); - } - -#undef printfpr -} -#endif - -#if defined(TARGET_MIPS64) && defined(MIPS_DEBUG_SIGN_EXTENSIONS) -/* Debug help: The architecture requires 32bit code to maintain proper - sign-extended values on 64bit machines. */ - -#define SIGN_EXT_P(val) ((((val) & ~0x7fffffff) == 0) || (((val) & ~0x7fffffff) == ~0x7fffffff)) - -static void -cpu_mips_check_sign_extensions (CPUMIPSState *env, FILE *f, - fprintf_function cpu_fprintf, - int flags) -{ - int i; - - if (!SIGN_EXT_P(env->active_tc.PC)) - cpu_fprintf(f, "BROKEN: pc=0x" TARGET_FMT_lx "\n", env->active_tc.PC); - if (!SIGN_EXT_P(env->active_tc.HI[0])) - cpu_fprintf(f, "BROKEN: HI=0x" TARGET_FMT_lx "\n", env->active_tc.HI[0]); - if (!SIGN_EXT_P(env->active_tc.LO[0])) - cpu_fprintf(f, "BROKEN: LO=0x" TARGET_FMT_lx "\n", env->active_tc.LO[0]); - if (!SIGN_EXT_P(env->btarget)) - cpu_fprintf(f, "BROKEN: btarget=0x" TARGET_FMT_lx "\n", env->btarget); - - for (i = 0; i < 32; i++) { - if (!SIGN_EXT_P(env->active_tc.gpr[i])) - cpu_fprintf(f, "BROKEN: %s=0x" TARGET_FMT_lx "\n", regnames[i], env->active_tc.gpr[i]); - } - - if (!SIGN_EXT_P(env->CP0_EPC)) - cpu_fprintf(f, "BROKEN: EPC=0x" TARGET_FMT_lx "\n", env->CP0_EPC); - if (!SIGN_EXT_P(env->lladdr)) - cpu_fprintf(f, "BROKEN: LLAddr=0x" TARGET_FMT_lx "\n", env->lladdr); -} -#endif - -void mips_tcg_init(struct uc_struct *uc) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - TCGv **cpu_gpr; - int i; - - tcg_ctx->cpu_env = tcg_global_reg_new_ptr(uc->tcg_ctx, TCG_AREG0, "env"); - - if (!uc->init_tcg) { - for (i = 0; i < 32; i++) { - tcg_ctx->cpu_gpr[i] = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_gpr[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, active_tc.gpr[i]), - regnames[i]); - } - } - - cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; - TCGV_UNUSED(*cpu_gpr[0]); - - for (i = 0; i < 32; i++) { - int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); - tcg_ctx->msa_wr_d[i * 2] = - tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, off, msaregnames[i * 2]); - /* The scalar floating-point unit (FPU) registers are mapped on - * the MSA vector registers. */ - tcg_ctx->fpu_f64[i] = tcg_ctx->msa_wr_d[i * 2]; - off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]); - tcg_ctx->msa_wr_d[i * 2 + 1] = - tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, off, msaregnames[i * 2 + 1]); - } - - if (!uc->init_tcg) - tcg_ctx->cpu_PC = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_PC) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, active_tc.PC), "PC"); - - if (!uc->init_tcg) { - for (i = 0; i < MIPS_DSP_ACC; i++) { - tcg_ctx->cpu_HI[i] = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_HI[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, active_tc.HI[i]), - regnames_HI[i]); - tcg_ctx->cpu_LO[i] = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_LO[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, active_tc.LO[i]), - regnames_LO[i]); - } - } - - if (!uc->init_tcg) - tcg_ctx->cpu_dspctrl = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_dspctrl) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, active_tc.DSPControl), - "DSPControl"); - - if (!uc->init_tcg) - tcg_ctx->bcond = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->bcond) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, bcond), "bcond"); - - if (!uc->init_tcg) - tcg_ctx->btarget = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->btarget) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, btarget), "btarget"); - - tcg_ctx->hflags = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, hflags), "hflags"); - - //tcg_ctx->fpu_fcr0 = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, - // offsetof(CPUMIPSState, active_fpu.fcr0), - // "fcr0"); - tcg_ctx->fpu_fcr31 = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, - offsetof(CPUMIPSState, active_fpu.fcr31), - "fcr31"); - uc->init_tcg = true; -} - -#include "translate_init.c" - -MIPSCPU *cpu_mips_init(struct uc_struct *uc, const char *cpu_model) -{ - MIPSCPU *cpu; - CPUMIPSState *env; - const mips_def_t *def; - - def = cpu_mips_find_by_name(cpu_model); - if (!def) - return NULL; - cpu = MIPS_CPU(uc, object_new(uc, TYPE_MIPS_CPU)); - env = &cpu->env; - env->cpu_model = def; - -#ifndef CONFIG_USER_ONLY - mmu_init(env, def); -#endif - fpu_init(env, def); - mvp_init(env, def); - - object_property_set_bool(uc, OBJECT(cpu), true, "realized", NULL); - - return cpu; -} - -void cpu_state_reset(CPUMIPSState *env) -{ - MIPSCPU *cpu = mips_env_get_cpu(env); - CPUState *cs = CPU(cpu); - - /* Reset registers to their default values */ - env->CP0_PRid = env->cpu_model->CP0_PRid; - env->CP0_Config0 = env->cpu_model->CP0_Config0; -#ifdef TARGET_WORDS_BIGENDIAN - env->CP0_Config0 |= (1 << CP0C0_BE); -#endif - env->CP0_Config1 = env->cpu_model->CP0_Config1; - env->CP0_Config2 = env->cpu_model->CP0_Config2; - env->CP0_Config3 = env->cpu_model->CP0_Config3; - env->CP0_Config4 = env->cpu_model->CP0_Config4; - env->CP0_Config4_rw_bitmask = env->cpu_model->CP0_Config4_rw_bitmask; - env->CP0_Config5 = env->cpu_model->CP0_Config5; - env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask; - env->CP0_Config6 = env->cpu_model->CP0_Config6; - env->CP0_Config7 = env->cpu_model->CP0_Config7; - env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask - << env->cpu_model->CP0_LLAddr_shift; - env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift; - env->SYNCI_Step = env->cpu_model->SYNCI_Step; - env->CCRes = env->cpu_model->CCRes; - env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask; - env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask; - env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl; - env->current_tc = 0; - env->SEGBITS = env->cpu_model->SEGBITS; - env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1); -#if defined(TARGET_MIPS64) - if (env->cpu_model->insn_flags & ISA_MIPS3) { - env->SEGMask |= 3ULL << 62; - } -#endif - env->PABITS = env->cpu_model->PABITS; - env->PAMask = (target_ulong)((1ULL << env->cpu_model->PABITS) - 1); - env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask; - env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0; - env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask; - env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1; - env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask; - env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2; - env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask; - env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3; - env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask; - env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4; - env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask; - env->CP0_PageGrain = env->cpu_model->CP0_PageGrain; - env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0; - env->msair = env->cpu_model->MSAIR; - env->insn_flags = env->cpu_model->insn_flags; - -#if defined(CONFIG_USER_ONLY) - env->CP0_Status = (MIPS_HFLAG_UM << CP0St_KSU); -# ifdef TARGET_MIPS64 - /* Enable 64-bit register mode. */ - env->CP0_Status |= (1 << CP0St_PX); -# endif -# ifdef TARGET_ABI_MIPSN64 - /* Enable 64-bit address mode. */ - env->CP0_Status |= (1 << CP0St_UX); -# endif - /* Enable access to the CPUNum, SYNCI_Step, CC, and CCRes RDHWR - hardware registers. */ - env->CP0_HWREna |= 0x0000000F; - if (env->CP0_Config1 & (1 << CP0C1_FP)) { - env->CP0_Status |= (1 << CP0St_CU1); - } - if (env->CP0_Config3 & (1 << CP0C3_DSPP)) { - env->CP0_Status |= (1 << CP0St_MX); - } -# if defined(TARGET_MIPS64) - /* For MIPS64, init FR bit to 1 if FPU unit is there and bit is writable. */ - if ((env->CP0_Config1 & (1 << CP0C1_FP)) && - (env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) { - env->CP0_Status |= (1 << CP0St_FR); - } -# endif -#else - if (env->hflags & MIPS_HFLAG_BMASK) { - /* If the exception was raised from a delay slot, - come back to the jump. */ - env->CP0_ErrorEPC = env->active_tc.PC - 4; - } else { - env->CP0_ErrorEPC = env->active_tc.PC; - } - env->active_tc.PC = (int32_t)0xBFC00000; - env->CP0_Random = env->tlb->nb_tlb - 1; - env->tlb->tlb_in_use = env->tlb->nb_tlb; - env->CP0_Wired = 0; - env->CP0_EBase = (cs->cpu_index & 0x3FF); - env->CP0_EBase |= 0x80000000; - env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL); - /* vectored interrupts not implemented, timer on int 7, - no performance counters. */ - env->CP0_IntCtl = 0xe0000000; - { - int i; - - for (i = 0; i < 7; i++) { - env->CP0_WatchLo[i] = 0; - env->CP0_WatchHi[i] = 0x80000000; - } - env->CP0_WatchLo[7] = 0; - env->CP0_WatchHi[7] = 0; - } - /* Count register increments in debug mode, EJTAG version 1 */ - env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER); - - cpu_mips_store_count(env, 1); - - if (env->CP0_Config3 & (1 << CP0C3_MT)) { - int i; - - /* Only TC0 on VPE 0 starts as active. */ - for (i = 0; i < ARRAY_SIZE(env->tcs); i++) { - env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE; - env->tcs[i].CP0_TCHalt = 1; - } - env->active_tc.CP0_TCHalt = 1; - cs->halted = 1; - - if (cs->cpu_index == 0) { - /* VPE0 starts up enabled. */ - env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); - env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); - - /* TC0 starts up unhalted. */ - cs->halted = 0; - env->active_tc.CP0_TCHalt = 0; - env->tcs[0].CP0_TCHalt = 0; - /* With thread 0 active. */ - env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A); - env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A); - } - } - if (env->CP0_Config1 & (1 << CP0C1_FP)) { - env->CP0_Status |= (1 << CP0St_CU1); - } -#endif - if ((env->insn_flags & ISA_MIPS32R6) && - (env->active_fpu.fcr0 & (1 << FCR0_F64))) { - /* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */ - env->CP0_Status |= (1 << CP0St_FR); - } - - /* MSA */ - if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { - msa_reset(env); - } - - compute_hflags(env); - cs->exception_index = EXCP_NONE; -} - -void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - env->active_tc.PC = tcg_ctx->gen_opc_pc[pc_pos]; - env->hflags &= ~MIPS_HFLAG_BMASK; - env->hflags |= tcg_ctx->gen_opc_hflags[pc_pos]; - switch (env->hflags & MIPS_HFLAG_BMASK_BASE) { - case MIPS_HFLAG_BR: - break; - case MIPS_HFLAG_BC: - case MIPS_HFLAG_BL: - case MIPS_HFLAG_B: - env->btarget = tcg_ctx->gen_opc_btarget[pc_pos]; - break; - } -} diff --git a/qemu/target-mips/translate_init.c b/qemu/target-mips/translate_init.c deleted file mode 100644 index 7178d008..00000000 --- a/qemu/target-mips/translate_init.c +++ /dev/null @@ -1,948 +0,0 @@ -/* - * MIPS emulation for qemu: CPU initialisation routines. - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * Copyright (c) 2007 Herve Poussineau - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -/* CPU / CPU family specific config register values. */ - -/* Have config1, uncached coherency */ -#define MIPS_CONFIG0 \ - ((1U << CP0C0_M) | (0x2 << CP0C0_K0)) - -/* Have config2, no coprocessor2 attached, no MDMX support attached, - no performance counters, watch registers present, - no code compression, EJTAG present, no FPU */ -#define MIPS_CONFIG1 \ -((1U << CP0C1_M) | \ - (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ - (1 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ - (0 << CP0C1_FP)) - -/* Have config3, no tertiary/secondary caches implemented */ -#define MIPS_CONFIG2 \ -((1U << CP0C2_M)) - -/* No config4, no DSP ASE, no large physaddr (PABITS), - no external interrupt controller, no vectored interrupts, - no 1kb pages, no SmartMIPS ASE, no trace logic */ -#define MIPS_CONFIG3 \ -((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ - (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ - (0 << CP0C3_SM) | (0 << CP0C3_TL)) -#define MIPS_CONFIG4 \ -((0 << CP0C4_M)) - -#define MIPS_CONFIG5 \ -((0 << CP0C5_M)) - -/* MMU types, the first four entries have the same layout as the - CP0C0_MT field. */ -enum mips_mmu_types { - MMU_TYPE_NONE, - MMU_TYPE_R4000, - MMU_TYPE_RESERVED, - MMU_TYPE_FMT, - MMU_TYPE_R3000, - MMU_TYPE_R6000, - MMU_TYPE_R8000 -}; - -struct mips_def_t { - const char *name; - int32_t CP0_PRid; - int32_t CP0_Config0; - int32_t CP0_Config1; - int32_t CP0_Config2; - int32_t CP0_Config3; - int32_t CP0_Config4; - int32_t CP0_Config4_rw_bitmask; - int32_t CP0_Config5; - int32_t CP0_Config5_rw_bitmask; - int32_t CP0_Config6; - int32_t CP0_Config7; - target_ulong CP0_LLAddr_rw_bitmask; - int CP0_LLAddr_shift; - int32_t SYNCI_Step; - int32_t CCRes; - int32_t CP0_Status_rw_bitmask; - int32_t CP0_TCStatus_rw_bitmask; - int32_t CP0_SRSCtl; - int32_t CP1_fcr0; - int32_t MSAIR; - int32_t SEGBITS; - int32_t PABITS; - int32_t CP0_SRSConf0_rw_bitmask; - int32_t CP0_SRSConf0; - int32_t CP0_SRSConf1_rw_bitmask; - int32_t CP0_SRSConf1; - int32_t CP0_SRSConf2_rw_bitmask; - int32_t CP0_SRSConf2; - int32_t CP0_SRSConf3_rw_bitmask; - int32_t CP0_SRSConf3; - int32_t CP0_SRSConf4_rw_bitmask; - int32_t CP0_SRSConf4; - int32_t CP0_PageGrain_rw_bitmask; - int32_t CP0_PageGrain; - int insn_flags; - enum mips_mmu_types mmu_type; -}; - -/*****************************************************************************/ -/* MIPS CPU definitions */ -static const mips_def_t mips_defs[] = -{ - { - "4Kc", - 0x00018000, - MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (0 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - 0x1278FF17, - 0, - 0, - 0, - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32, - MMU_TYPE_R4000, - }, - { - "4Km", - 0x00018300, - /* Config1 implemented, fixed mapping MMU, - no virtual icache, uncached coherency. */ - MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), - MIPS_CONFIG1 | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,0, - 0,0, - 0, - 0, - - 0, - 4, - 32, - 2, - 0x1258FF17, - 0, - - 0, - 0, - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32 | ASE_MIPS16, - MMU_TYPE_FMT, - }, - { - "4KEcR1", - 0x00018400, - MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (0 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - 0x1278FF17, - 0, - 0, - 0, - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32, - MMU_TYPE_R4000, - }, - { - "4KEmR1", - 0x00018500, - MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), - MIPS_CONFIG1 | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - 0x1258FF17, - 0, - 0, - 0, - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32 | ASE_MIPS16, - MMU_TYPE_FMT, - }, - { - "4KEc", - 0x00019000, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (0 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3 | (0 << CP0C3_VInt), - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - 0x1278FF17, - 0, - 0, - 0, - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32R2, - MMU_TYPE_R4000, - }, - { - "4KEm", - 0x00019100, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | - (MMU_TYPE_FMT << CP0C0_MT), - MIPS_CONFIG1 | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - 0x1258FF17, - 0, - 0, - 0, - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32R2 | ASE_MIPS16, - MMU_TYPE_FMT, - }, - { - "24Kc", - 0x00019300, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3 | (0 << CP0C3_VInt), - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - /* No DSP implemented. */ - 0x1278FF1F, - 0, - 0, - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32R2 | ASE_MIPS16, - MMU_TYPE_R4000, - }, - { - "24Kf", - 0x00019300, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3 | (0 << CP0C3_VInt), - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - /* No DSP implemented. */ - 0x3678FF1F, - 0, - 0, - (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | - (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32R2 | ASE_MIPS16, - MMU_TYPE_R4000, - }, - { - "34Kf", - 0x00019500, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3 | (1 << CP0C3_VInt) | (1 << CP0C3_MT) | - (1 << CP0C3_DSPP), - 0, - 0, - 32, - 2, - 0x3778FF1F, - (0 << CP0TCSt_TCU3) | (0 << CP0TCSt_TCU2) | - (1 << CP0TCSt_TCU1) | (1 << CP0TCSt_TCU0) | - (0 << CP0TCSt_TMX) | (1 << CP0TCSt_DT) | - (1 << CP0TCSt_DA) | (1 << CP0TCSt_A) | - (0x3 << CP0TCSt_TKSU) | (1 << CP0TCSt_IXMT) | - (0xff << CP0TCSt_TASID), - (0xf << CP0SRSCtl_HSS), - 0, - 32, - 32, - (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | - (1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID), - 0x3fffffff, - (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) | - (0x3fe << CP0SRSC0_SRS2) | (0x3fe << CP0SRSC0_SRS1), - 0x3fffffff, - (1U << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) | - (0x3fe << CP0SRSC1_SRS5) | (0x3fe << CP0SRSC1_SRS4), - 0x3fffffff, - (1U << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) | - (0x3fe << CP0SRSC2_SRS8) | (0x3fe << CP0SRSC2_SRS7), - 0x3fffffff, - (1U << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) | - (0x3fe << CP0SRSC3_SRS11) | (0x3fe << CP0SRSC3_SRS10), - 0x3fffffff, - (0x3fe << CP0SRSC4_SRS15) | - (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13), - 0,0, - CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT, - MMU_TYPE_R4000, - }, - { - "74Kf", - 0x00019700, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3 | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) | - (0 << CP0C3_VInt), - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - 0x3778FF1F, - 0, - 0, - (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | - (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSPR2, - MMU_TYPE_R4000, - }, - { - /* A generic CPU providing MIPS32 Release 5 features. - FIXME: Eventually this should be replaced by a real CPU model. */ - "mips32r5-generic", - 0x00019700, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | - (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | - (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_CA), - MIPS_CONFIG2, - MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP), - MIPS_CONFIG4 | (1U << CP0C4_M), - 0, - MIPS_CONFIG5 | (1 << CP0C5_UFR), - (0 << CP0C5_M) | (1 << CP0C5_K) | - (1 << CP0C5_CV) | (0 << CP0C5_EVA) | - (1 << CP0C5_MSAEn) | (1 << CP0C5_UFR) | - (0 << CP0C5_NFExists), - 0, - 0, - 0, - 4, - 32, - 2, - 0x3778FF1F, - 0, - 0, - (1 << FCR0_UFRP) | (1 << FCR0_F64) | (1 << FCR0_L) | - (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | - (0x93 << FCR0_PRID), - 0, - 32, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS32R5 | ASE_MIPS16 | ASE_MSA, - MMU_TYPE_R4000, - }, -#if defined(TARGET_MIPS64) - { - "R4000", - 0x00000400, - /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ - (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), - /* Note: Config1 is only used internally, the R4000 has only Config0. */ - (1 << CP0C1_FP) | (47 << CP0C1_MMU), - 0, - 0, - 0,0, - 0,0, - 0, - 0, - 0xFFFFFFFF, - 4, - 16, - 2, - 0x3678FFFF, - 0, - 0, - /* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */ - (0x5 << FCR0_PRID) | (0x0 << FCR0_REV), - 0, - 40, - 36, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS3, - MMU_TYPE_R4000, - }, - { - "VR5432", - 0x00005400, - /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ - (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), - (1 << CP0C1_FP) | (47 << CP0C1_MMU), - 0, - 0, - 0,0, - 0,0, - 0, - 0, - 0xFFFFFFFFL, - 4, - 16, - 2, - 0x3678FFFF, - 0, - 0, - /* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */ - (0x54 << FCR0_PRID) | (0x0 << FCR0_REV), - 0, - 40, - 32, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_VR54XX, - MMU_TYPE_R4000, - }, - { - "5Kc", - 0x00018100, - MIPS_CONFIG0 | (0x2 << CP0C0_AT) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (31 << CP0C1_MMU) | - (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | - (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,0, - 0,0, - 0, - 0, - 0, - 4, - 32, - 2, - 0x32F8FFFF, - 0, - 0, - 0, - 0, - 42, - 36, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS64, - MMU_TYPE_R4000, - }, - { - "5Kf", - 0x00018100, - MIPS_CONFIG0 | (0x2 << CP0C0_AT) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | - (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | - (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | - (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,0, - 0,0, - 0, - 0, - - 0, - 4, - 32, - 2, - 0x36F8FFFF, - 0, - 0, - /* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */ - (1 << FCR0_D) | (1 << FCR0_S) | - (0x81 << FCR0_PRID) | (0x0 << FCR0_REV), - 0, - 42, - 36, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS64, - MMU_TYPE_R4000, - }, - { - "20Kc", - /* We emulate a later version of the 20Kc, earlier ones had a broken - WAIT instruction. */ - 0x000182a0, - MIPS_CONFIG0 | (0x2 << CP0C0_AT) | - (MMU_TYPE_R4000 << CP0C0_MT) | (1 << CP0C0_VI), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (47 << CP0C1_MMU) | - (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | - (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | - (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), - MIPS_CONFIG2, - MIPS_CONFIG3, - 0,.0, - 0,0, - 0, - 0, - 0, - 0, - 32, - 1, - 0x36FBFFFF, - 0, - 0, - /* The 20Kc has F64 / L / W but doesn't use the fcr0 bits. */ - (1 << FCR0_3D) | (1 << FCR0_PS) | - (1 << FCR0_D) | (1 << FCR0_S) | - (0x82 << FCR0_PRID) | (0x0 << FCR0_REV), - 0, - 40, - 36, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS64 | ASE_MIPS3D, - MMU_TYPE_R4000, - }, - { - /* A generic CPU providing MIPS64 Release 2 features. - FIXME: Eventually this should be replaced by a real CPU model. */ - "MIPS64R2-generic", - 0x00010000, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | - (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | - (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | - (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), - MIPS_CONFIG2, - MIPS_CONFIG3 | (1 << CP0C3_LPA), - 0,0, - 0,0, - 0, - 0, - 0, - 0, - 32, - 2, - 0x36FBFFFF, - 0, - 0, - (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | - (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | - (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), - 0, - 42, - /* The architectural limit is 59, but we have hardcoded 36 bit - in some places... - 59, */ /* the architectural limit */ - 36, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS64R2 | ASE_MIPS3D, - MMU_TYPE_R4000, - }, - { - /* A generic CPU supporting MIPS64 Release 6 ISA. - FIXME: Support IEEE 754-2008 FP and misaligned memory accesses. - Eventually this should be replaced by a real CPU model. */ - "MIPS64R6-generic", - 0x00010000, - MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | - (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | - (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | - (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), - MIPS_CONFIG2, - MIPS_CONFIG3 | (1 << CP0C3_RXI) | (1 << CP0C3_BP) | - (1 << CP0C3_BI) | (1 << CP0C3_ULRI) | (1U << CP0C3_M), - MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | - (3 << CP0C4_IE) | (1 << CP0C4_M), - 0, - 0, - (1 << CP0C5_SBRI), - 0, - 0, - 0, - 0, - 32, - 2, - 0x30D8FFFF, - 0, - 0, - (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | - (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | - (0x0 << FCR0_REV), - 0, - 42, - /* The architectural limit is 59, but we have hardcoded 36 bit - in some places... - 59, */ /* the architectural limit */ - 36, - 0,0, 0,0, 0,0, 0,0, 0,0, - (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | - (1U << CP0PG_RIE), - 0, - CPU_MIPS64R6, - MMU_TYPE_R4000, - }, - { - "Loongson-2E", - 0x6302, - /*64KB I-cache and d-cache. 4 way with 32 bit cache line size*/ - (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | (0x1<<5) | - (0x1<<4) | (0x1<<1), - /* Note: Config1 is only used internally, Loongson-2E has only Config0. */ - (1 << CP0C1_FP) | (47 << CP0C1_MMU), - 0, - 0, - 0,0, - 0,0, - 0, - 0, - 0, - 0, - 16, - 2, - 0x35D0FFFF, - 0, - 0, - (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), - 0, - 40, - 40, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_LOONGSON2E, - MMU_TYPE_R4000, - }, - { - "Loongson-2F", - 0x6303, - /*64KB I-cache and d-cache. 4 way with 32 bit cache line size*/ - (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | (0x1<<5) | - (0x1<<4) | (0x1<<1), - /* Note: Config1 is only used internally, Loongson-2F has only Config0. */ - (1 << CP0C1_FP) | (47 << CP0C1_MMU), - 0, - 0, - 0,0, - 0,0, - 0, - 0, - 0, - 0, - 16, - 2, - 0xF5D0FF1F, /*bit5:7 not writable*/ - 0, - 0, - (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), - 0, - 40, - 40, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_LOONGSON2F, - MMU_TYPE_R4000, - }, - { - /* A generic CPU providing MIPS64 ASE DSP 2 features. - FIXME: Eventually this should be replaced by a real CPU model. */ - "mips64dspr2", - 0x00010000, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | - (MMU_TYPE_R4000 << CP0C0_MT), - MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | - (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | - (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | - (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), - MIPS_CONFIG2, - MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_DSP2P) | - (1 << CP0C3_DSPP) | (1 << CP0C3_LPA), - 0,0, - 0,0, - 0, - 0, - 0, - 0, - 32, - 2, - 0x37FBFFFF, - 0, - 0, - (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | - (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | - (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), - 0, - 42, - /* The architectural limit is 59, but we have hardcoded 36 bit - in some places... - 59, */ /* the architectural limit */ - 36, - 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, - CPU_MIPS64R2 | ASE_DSP | ASE_DSPR2, - MMU_TYPE_R4000, - }, - -#endif -}; - -static const mips_def_t *cpu_mips_find_by_name (const char *name) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mips_defs); i++) { - if (strcasecmp(name, mips_defs[i].name) == 0) { - return &mips_defs[i]; - } - } - return NULL; -} - -void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mips_defs); i++) { - (*cpu_fprintf)(f, "MIPS '%s'\n", - mips_defs[i].name); - } -} - -#ifndef CONFIG_USER_ONLY -static void no_mmu_init (CPUMIPSState *env, const mips_def_t *def) -{ - env->tlb->nb_tlb = 1; - env->tlb->map_address = &no_mmu_map_address; -} - -static void fixed_mmu_init (CPUMIPSState *env, const mips_def_t *def) -{ - env->tlb->nb_tlb = 1; - env->tlb->map_address = &fixed_mmu_map_address; -} - -static void r4k_mmu_init (CPUMIPSState *env, const mips_def_t *def) -{ - env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63); - env->tlb->map_address = &r4k_map_address; - env->tlb->helper_tlbwi = r4k_helper_tlbwi; - env->tlb->helper_tlbwr = r4k_helper_tlbwr; - env->tlb->helper_tlbp = r4k_helper_tlbp; - env->tlb->helper_tlbr = r4k_helper_tlbr; - env->tlb->helper_tlbinv = r4k_helper_tlbinv; - env->tlb->helper_tlbinvf = r4k_helper_tlbinvf; -} - -static void mmu_init (CPUMIPSState *env, const mips_def_t *def) -{ - MIPSCPU *cpu = mips_env_get_cpu(env); - - env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext)); - - switch (def->mmu_type) { - case MMU_TYPE_NONE: - no_mmu_init(env, def); - break; - case MMU_TYPE_R4000: - r4k_mmu_init(env, def); - break; - case MMU_TYPE_FMT: - fixed_mmu_init(env, def); - break; - case MMU_TYPE_R3000: - case MMU_TYPE_R6000: - case MMU_TYPE_R8000: - default: - cpu_abort(CPU(cpu), "MMU type not supported\n"); - } -} -#endif /* CONFIG_USER_ONLY */ - -static void fpu_init (CPUMIPSState *env, const mips_def_t *def) -{ - int i; - - for (i = 0; i < MIPS_FPU_MAX; i++) - env->fpus[i].fcr0 = def->CP1_fcr0; - - memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu)); -} - -static void mvp_init (CPUMIPSState *env, const mips_def_t *def) -{ - env->mvp = g_malloc0(sizeof(CPUMIPSMVPContext)); - - /* MVPConf1 implemented, TLB sharable, no gating storage support, - programmable cache partitioning implemented, number of allocatable - and sharable TLB entries, MVP has allocatable TCs, 2 VPEs - implemented, 5 TCs implemented. */ - env->mvp->CP0_MVPConf0 = (1U << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) | - (0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) | -// TODO: actually do 2 VPEs. -// (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) | -// (0x04 << CP0MVPC0_PTC); - (1 << CP0MVPC0_TCA) | (0x0 << CP0MVPC0_PVPE) | - (0x00 << CP0MVPC0_PTC); -#if !defined(CONFIG_USER_ONLY) - /* Usermode has no TLB support */ - env->mvp->CP0_MVPConf0 |= (env->tlb->nb_tlb << CP0MVPC0_PTLBE); -#endif - - /* Allocatable CP1 have media extensions, allocatable CP1 have FP support, - no UDI implemented, no CP2 implemented, 1 CP1 implemented. */ - env->mvp->CP0_MVPConf1 = (1U << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) | - (0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) | - (0x1 << CP0MVPC1_PCP1); -} - -static void msa_reset(CPUMIPSState *env) -{ -#ifdef CONFIG_USER_ONLY - /* MSA access enabled */ - env->CP0_Config5 |= 1 << CP0C5_MSAEn; - env->CP0_Status |= (1 << CP0St_CU1) | (1 << CP0St_FR); -#endif - - /* MSA CSR: - - non-signaling floating point exception mode off (NX bit is 0) - - Cause, Enables, and Flags are all 0 - - round to nearest / ties to even (RM bits are 0) */ - env->active_tc.msacsr = 0; - - /* tininess detected after rounding.*/ - set_float_detect_tininess(float_tininess_after_rounding, - &env->active_tc.msa_fp_status); - - /* clear float_status exception flags */ - set_float_exception_flags(0, &env->active_tc.msa_fp_status); - - /* set float_status rounding mode */ - set_float_rounding_mode(float_round_nearest_even, - &env->active_tc.msa_fp_status); - - /* set float_status flush modes */ - set_flush_to_zero(0, &env->active_tc.msa_fp_status); - set_flush_inputs_to_zero(0, &env->active_tc.msa_fp_status); - - /* clear float_status nan mode */ - set_default_nan_mode(0, &env->active_tc.msa_fp_status); -} diff --git a/qemu/target-mips/unicorn.c b/qemu/target-mips/unicorn.c deleted file mode 100644 index 67233413..00000000 --- a/qemu/target-mips/unicorn.c +++ /dev/null @@ -1,169 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#include "hw/boards.h" -#include "hw/mips/mips.h" -#include "sysemu/cpus.h" -#include "unicorn.h" -#include "cpu.h" -#include "unicorn_common.h" -#include "uc_priv.h" - -#ifdef TARGET_MIPS64 -const int MIPS64_REGS_STORAGE_SIZE = offsetof(CPUMIPSState, tlb_table); -#else // MIPS32 -const int MIPS_REGS_STORAGE_SIZE = offsetof(CPUMIPSState, tlb_table); -#endif - -#ifdef TARGET_MIPS64 -typedef uint64_t mipsreg_t; -#else -typedef uint32_t mipsreg_t; -#endif - -static uint64_t mips_mem_redirect(uint64_t address) -{ - // kseg0 range masks off high address bit - if (address >= 0x80000000 && address <= 0x9fffffff) - return address & 0x7fffffff; - - // kseg1 range masks off top 3 address bits - if (address >= 0xa0000000 && address <= 0xbfffffff) { - return address & 0x1fffffff; - } - - // no redirect - return address; -} - -static void mips_set_pc(struct uc_struct *uc, uint64_t address) -{ - ((CPUMIPSState *)uc->current_cpu->env_ptr)->active_tc.PC = address; -} - - -void mips_release(void *ctx); -void mips_release(void *ctx) -{ - MIPSCPU* cpu; - int i; - TCGContext *tcg_ctx = (TCGContext *) ctx; - release_common(ctx); - cpu = MIPS_CPU(tcg_ctx->uc, tcg_ctx->uc->cpu); - g_free(cpu->env.tlb); - g_free(cpu->env.mvp); - - for (i = 0; i < MIPS_DSP_ACC; i++) { - g_free(tcg_ctx->cpu_HI[i]); - g_free(tcg_ctx->cpu_LO[i]); - } - - for (i = 0; i < 32; i++) { - g_free(tcg_ctx->cpu_gpr[i]); - } - - g_free(tcg_ctx->cpu_PC); - g_free(tcg_ctx->btarget); - g_free(tcg_ctx->bcond); - g_free(tcg_ctx->cpu_dspctrl); - - g_free(tcg_ctx->tb_ctx.tbs); -} - -void mips_reg_reset(struct uc_struct *uc) -{ - CPUArchState *env; - (void)uc; - env = uc->cpu->env_ptr; - memset(env->active_tc.gpr, 0, sizeof(env->active_tc.gpr)); - - env->active_tc.PC = 0; -} - -int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - void *value = vals[i]; - if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) - *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.active_tc.gpr[regid - UC_MIPS_REG_0]; - else { - switch(regid) { - default: break; - case UC_MIPS_REG_PC: - *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.active_tc.PC; - break; - case UC_MIPS_REG_CP0_CONFIG3: - *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.CP0_Config3; - break; - case UC_MIPS_REG_CP0_USERLOCAL: - *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.active_tc.CP0_UserLocal; - break; - } - } - } - - return 0; -} - -int mips_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - const void *value = vals[i]; - if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) - MIPS_CPU(uc, mycpu)->env.active_tc.gpr[regid - UC_MIPS_REG_0] = *(mipsreg_t *)value; - else { - switch(regid) { - default: break; - case UC_MIPS_REG_PC: - MIPS_CPU(uc, mycpu)->env.active_tc.PC = *(mipsreg_t *)value; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - case UC_MIPS_REG_CP0_CONFIG3: - MIPS_CPU(uc, mycpu)->env.CP0_Config3 = *(mipsreg_t *)value; - break; - case UC_MIPS_REG_CP0_USERLOCAL: - MIPS_CPU(uc, mycpu)->env.active_tc.CP0_UserLocal = *(mipsreg_t *)value; - break; - } - } - } - - return 0; -} - -DEFAULT_VISIBILITY -#ifdef TARGET_MIPS64 -#ifdef TARGET_WORDS_BIGENDIAN - void mips64_uc_init(struct uc_struct* uc) -#else - void mips64el_uc_init(struct uc_struct* uc) -#endif -#else // if TARGET_MIPS -#ifdef TARGET_WORDS_BIGENDIAN - void mips_uc_init(struct uc_struct* uc) -#else - void mipsel_uc_init(struct uc_struct* uc) -#endif -#endif -{ - register_accel_types(uc); - mips_cpu_register_types(uc); - mips_machine_init(uc); - uc->reg_read = mips_reg_read; - uc->reg_write = mips_reg_write; - uc->reg_reset = mips_reg_reset; - uc->release = mips_release; - uc->set_pc = mips_set_pc; - uc->mem_redirect = mips_mem_redirect; - uc_common_init(uc); -} diff --git a/qemu/target-mips/unicorn.h b/qemu/target-mips/unicorn.h deleted file mode 100644 index b1c6cac8..00000000 --- a/qemu/target-mips/unicorn.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#ifndef UC_QEMU_TARGET_MIPS_H -#define UC_QEMU_TARGET_MIPS_H - -// functions to read & write registers -int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); -int mips_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); - -void mips_reg_reset(struct uc_struct *uc); - -void mips_uc_init(struct uc_struct* uc); -void mipsel_uc_init(struct uc_struct* uc); -void mips64_uc_init(struct uc_struct* uc); -void mips64el_uc_init(struct uc_struct* uc); - -extern const int MIPS_REGS_STORAGE_SIZE_mips; -extern const int MIPS_REGS_STORAGE_SIZE_mipsel; -extern const int MIPS64_REGS_STORAGE_SIZE_mips64; -extern const int MIPS64_REGS_STORAGE_SIZE_mips64el; - -#endif diff --git a/qemu/target-sparc/Makefile.objs b/qemu/target-sparc/Makefile.objs deleted file mode 100644 index a04ffad4..00000000 --- a/qemu/target-sparc/Makefile.objs +++ /dev/null @@ -1,7 +0,0 @@ -obj-y += translate.o helper.o cpu.o -obj-y += fop_helper.o cc_helper.o win_helper.o mmu_helper.o ldst_helper.o -obj-$(TARGET_SPARC) += int32_helper.o -obj-$(TARGET_SPARC64) += int64_helper.o -obj-$(TARGET_SPARC64) += vis_helper.o -obj-$(TARGET_SPARC) += unicorn.o -obj-$(TARGET_SPARC64) += unicorn64.o diff --git a/qemu/target-sparc/TODO b/qemu/target-sparc/TODO deleted file mode 100644 index b8c727e8..00000000 --- a/qemu/target-sparc/TODO +++ /dev/null @@ -1,88 +0,0 @@ -TODO-list: - -CPU common: -- Unimplemented features/bugs: - - Delay slot handling may fail sometimes (branch end of page, delay - slot next page) - - Atomical instructions - - CPU features should match real CPUs (also ASI selection) -- Optimizations/improvements: - - Condition code/branch handling like x86, also for FPU? - - Remove remaining explicit alignment checks - - Global register for regwptr, so that windowed registers can be - accessed directly - - Improve Sparc32plus addressing - - NPC/PC static optimisations (use JUMP_TB when possible)? (Is this - obsolete?) - - Synthetic instructions - - MMU model dependent on CPU model - - Select ASI helper at translation time (on V9 only if known) - - KQemu/KVM support for VM only - - Hardware breakpoint/watchpoint support - - Cache emulation mode - - Reverse-endian pages - - Faster FPU emulation - - Busy loop detection - -Sparc32 CPUs: -- Unimplemented features/bugs: - - Sun4/Sun4c MMUs - - Some V8 ASIs - -Sparc64 CPUs: -- Unimplemented features/bugs: - - Interrupt handling - - Secondary address space, other MMU functions - - Many V9/UA2005/UA2007 ASIs - - Rest of V9 instructions, missing VIS instructions - - IG/MG/AG vs. UA2007 globals - - Full hypervisor support - - SMP/CMT - - Sun4v CPUs - -Sun4: -- To be added - -Sun4c: -- A lot of unimplemented features -- Maybe split from Sun4m - -Sun4m: -- Unimplemented features/bugs: - - Hardware devices do not match real boards - - Floppy does not work - - CS4231: merge with cs4231a, add DMA - - Add cg6, bwtwo - - Arbitrary resolution support - - PCI for MicroSparc-IIe - - JavaStation machines - - SBus slot probing, FCode ROM support - - SMP probing support - - Interrupt routing does not match real HW - - SuSE 7.3 keyboard sometimes unresponsive - - Gentoo 2004.1 SMP does not work - - SS600MP ledma -> lebuffer - - Type 5 keyboard - - Less fixed hardware choices - - DBRI audio (Am7930) - - BPP parallel - - Diagnostic switch - - ESP PIO mode - -Sun4d: -- A lot of unimplemented features: - - SBI - - IO-unit -- Maybe split from Sun4m - -Sun4u: -- Unimplemented features/bugs: - - Interrupt controller - - PCI/IOMMU support (Simba, JIO, Tomatillo, Psycho, Schizo, Safari...) - - SMP - - Happy Meal Ethernet, flash, I2C, GPIO - - A lot of real machine types - -Sun4v: -- A lot of unimplemented features - - A lot of real machine types diff --git a/qemu/target-sparc/cpu-qom.h b/qemu/target-sparc/cpu-qom.h deleted file mode 100644 index 6374fe85..00000000 --- a/qemu/target-sparc/cpu-qom.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * QEMU SPARC CPU - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * - */ -#ifndef QEMU_SPARC_CPU_QOM_H -#define QEMU_SPARC_CPU_QOM_H - -#include "qom/cpu.h" -#include "cpu.h" - -#ifdef TARGET_SPARC64 -#define TYPE_SPARC_CPU "sparc64-cpu" -#else -#define TYPE_SPARC_CPU "sparc-cpu" -#endif - -#define SPARC_CPU_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, SPARCCPUClass, (klass), TYPE_SPARC_CPU) -#define SPARC_CPU(uc, obj) ((SPARCCPU *)obj) -#define SPARC_CPU_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, SPARCCPUClass, (obj), TYPE_SPARC_CPU) - -/** - * SPARCCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. - * - * A SPARC CPU model. - */ -typedef struct SPARCCPUClass { - /*< private >*/ - CPUClass parent_class; - /*< public >*/ - - DeviceRealize parent_realize; - void (*parent_reset)(CPUState *cpu); -} SPARCCPUClass; - -/** - * SPARCCPU: - * @env: #CPUSPARCState - * - * A SPARC CPU. - */ -typedef struct SPARCCPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUSPARCState env; -} SPARCCPU; - -static inline SPARCCPU *sparc_env_get_cpu(CPUSPARCState *env) -{ - return container_of(env, SPARCCPU, env); -} - -#define ENV_GET_CPU(e) CPU(sparc_env_get_cpu(e)) - -#define ENV_OFFSET offsetof(SPARCCPU, env) - -void sparc_cpu_do_interrupt(CPUState *cpu); -void sparc_cpu_dump_state(CPUState *cpu, FILE *f, - fprintf_function cpu_fprintf, int flags); -hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); -int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, - vaddr addr, int is_write, - int is_user, uintptr_t retaddr); - -#endif diff --git a/qemu/target-sparc/cpu.c b/qemu/target-sparc/cpu.c deleted file mode 100644 index 9c9c5ead..00000000 --- a/qemu/target-sparc/cpu.c +++ /dev/null @@ -1,923 +0,0 @@ -/* - * Sparc CPU init helpers - * - * Copyright (c) 2003-2005 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "hw/sparc/sparc.h" - -//#define DEBUG_FEATURES - -static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model); - -/* CPUClass::reset() */ -static void sparc_cpu_reset(CPUState *s) -{ - SPARCCPU *cpu = SPARC_CPU(s->uc, s); - SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(s->uc, cpu); - CPUSPARCState *env = &cpu->env; - - scc->parent_reset(s); - - memset(env, 0, offsetof(CPUSPARCState, version)); - tlb_flush(s, 1); - env->cwp = 0; -#ifndef TARGET_SPARC64 - env->wim = 1; -#endif - env->regwptr = env->regbase + (env->cwp * 16); - CC_OP = CC_OP_FLAGS; -#if defined(CONFIG_USER_ONLY) -#ifdef TARGET_SPARC64 - env->cleanwin = env->nwindows - 2; - env->cansave = env->nwindows - 2; - env->pstate = PS_RMO | PS_PEF | PS_IE; - env->asi = 0x82; /* Primary no-fault */ -#endif -#else -#if !defined(TARGET_SPARC64) - env->psret = 0; - env->psrs = 1; - env->psrps = 1; -#endif -#ifdef TARGET_SPARC64 - env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG; - env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0; - env->tl = env->maxtl; - cpu_tsptr(env)->tt = TT_POWER_ON_RESET; - env->lsu = 0; -#else - env->mmuregs[0] &= ~(MMU_E | MMU_NF); - env->mmuregs[0] |= env->def->mmu_bm; -#endif - env->pc = 0; - env->npc = env->pc + 4; -#endif - env->cache_control = 0; -} - -static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - if (interrupt_request & CPU_INTERRUPT_HARD) { - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - CPUSPARCState *env = &cpu->env; - - if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) { - int pil = env->interrupt_index & 0xf; - int type = env->interrupt_index & 0xf0; - - if (type != TT_EXTINT || cpu_pil_allowed(env, pil)) { - cs->exception_index = env->interrupt_index; - sparc_cpu_do_interrupt(cs); - return true; - } - } - } - return false; -} - -static int cpu_sparc_register(struct uc_struct *uc, SPARCCPU *cpu, const char *cpu_model) -{ - CPUClass *cc = CPU_GET_CLASS(uc, cpu); - CPUSPARCState *env = &cpu->env; - char *s = g_strdup(cpu_model); - char *featurestr, *name = strtok(s, ","); - sparc_def_t def1, *def = &def1; - Error *err = NULL; - - if (cpu_sparc_find_by_name(def, name) < 0) { - g_free(s); - return -1; - } - - env->def = g_new0(sparc_def_t, 1); - memcpy(env->def, def, sizeof(*def)); - - featurestr = strtok(NULL, ","); - cc->parse_features(CPU(cpu), featurestr, &err); - g_free(s); - if (err) { - //error_report("%s", error_get_pretty(err)); - error_free(err); - return -1; - } - - env->version = def->iu_version; - env->fsr = def->fpu_version; - env->nwindows = def->nwindows; -#if !defined(TARGET_SPARC64) - env->mmuregs[0] |= def->mmu_version; - cpu_sparc_set_id(env, 0); - env->mxccregs[7] |= def->mxcc_version; -#else - env->mmu_version = def->mmu_version; - env->maxtl = def->maxtl; - env->version |= def->maxtl << 8; - env->version |= def->nwindows - 1; -#endif - return 0; -} - -SPARCCPU *cpu_sparc_init(struct uc_struct *uc, const char *cpu_model) -{ - SPARCCPU *cpu; - - cpu = SPARC_CPU(uc, object_new(uc, TYPE_SPARC_CPU)); - - if (cpu_sparc_register(uc, cpu, cpu_model) < 0) { - object_unref(uc, OBJECT(cpu)); - return NULL; - } - - object_property_set_bool(uc, OBJECT(cpu), true, "realized", NULL); - - return cpu; -} - -void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu) -{ -#if !defined(TARGET_SPARC64) - env->mxccregs[7] = ((cpu + 8) & 0xf) << 24; -#endif -} - -static const sparc_def_t sparc_defs[] = { -#ifdef TARGET_SPARC64 - { - "Fujitsu Sparc64", - ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 4, - 4, - }, - { - "Fujitsu Sparc64 III", - ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 5, - 4, - }, - { - "Fujitsu Sparc64 IV", - ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "Fujitsu Sparc64 V", - ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "TI UltraSparc I", - ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "TI UltraSparc II", - ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "TI UltraSparc IIi", - ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "TI UltraSparc IIe", - ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "Sun UltraSparc III", - ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "Sun UltraSparc III Cu", - ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)), - 0x00000000, - mmu_us_3, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "Sun UltraSparc IIIi", - ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "Sun UltraSparc IV", - ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)), - 0x00000000, - mmu_us_4, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "Sun UltraSparc IV+", - ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT, - 8, - 5, - }, - { - "Sun UltraSparc IIIi+", - ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)), - 0x00000000, - mmu_us_3, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, - { - "Sun UltraSparc T1", - /* defined in sparc_ifu_fdp.v and ctu.h */ - ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)), - 0x00000000, - mmu_sun4v, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT - | CPU_FEATURE_GL, - 8, - 6, - }, - { - "Sun UltraSparc T2", - /* defined in tlu_asi_ctl.v and n2_revid_cust.v */ - ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)), - 0x00000000, - mmu_sun4v, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT - | CPU_FEATURE_GL, - 8, - 6, - }, - { - "NEC UltraSparc I", - ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), - 0x00000000, - mmu_us_12, - 0,0,0,0,0,0, - CPU_DEFAULT_FEATURES, - 8, - 5, - }, -#else - { - "Fujitsu MB86904", - 0x04 << 24, /* Impl 0, ver 4 */ - 4 << 17, /* FPU version 4 (Meiko) */ - 0x04 << 24, /* Impl 0, ver 4 */ - 0x00004000, - 0x00ffffc0, - 0x000000ff, - 0x00016fff, - 0x00ffffff, - 0, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "Fujitsu MB86907", - 0x05 << 24, /* Impl 0, ver 5 */ - 4 << 17, /* FPU version 4 (Meiko) */ - 0x05 << 24, /* Impl 0, ver 5 */ - 0x00004000, - 0xffffffc0, - 0x000000ff, - 0x00016fff, - 0xffffffff, - 0, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI MicroSparc I", - 0x41000000, - 4 << 17, - 0x41000000, - 0x00004000, - 0x007ffff0, - 0x0000003f, - 0x00016fff, - 0x0000003f, - 0, - CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL | - CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | - CPU_FEATURE_FMUL, - 7, - 0, - }, - { - "TI MicroSparc II", - 0x42000000, - 4 << 17, - 0x02000000, - 0x00004000, - 0x00ffffc0, - 0x000000ff, - 0x00016fff, - 0x00ffffff, - 0, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI MicroSparc IIep", - 0x42000000, - 4 << 17, - 0x04000000, - 0x00004000, - 0x00ffffc0, - 0x000000ff, - 0x00016bff, - 0x00ffffff, - 0, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI SuperSparc 40", /* STP1020NPGA */ - 0x41000000, /* SuperSPARC 2.x */ - 0 << 17, - 0x00000800, /* SuperSPARC 2.x, no MXCC */ - 0x00002000, - 0xffffffc0, - 0x0000ffff, - 0xffffffff, - 0xffffffff, - 0, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI SuperSparc 50", /* STP1020PGA */ - 0x40000000, /* SuperSPARC 3.x */ - 0 << 17, - 0x01000800, /* SuperSPARC 3.x, no MXCC */ - 0x00002000, - 0xffffffc0, - 0x0000ffff, - 0xffffffff, - 0xffffffff, - 0, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI SuperSparc 51", - 0x40000000, /* SuperSPARC 3.x */ - 0 << 17, - 0x01000000, /* SuperSPARC 3.x, MXCC */ - 0x00002000, - 0xffffffc0, - 0x0000ffff, - 0xffffffff, - 0xffffffff, - 0x00000104, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI SuperSparc 60", /* STP1020APGA */ - 0x40000000, /* SuperSPARC 3.x */ - 0 << 17, - 0x01000800, /* SuperSPARC 3.x, no MXCC */ - 0x00002000, - 0xffffffc0, - 0x0000ffff, - 0xffffffff, - 0xffffffff, - 0, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI SuperSparc 61", - 0x44000000, /* SuperSPARC 3.x */ - 0 << 17, - 0x01000000, /* SuperSPARC 3.x, MXCC */ - 0x00002000, - 0xffffffc0, - 0x0000ffff, - 0xffffffff, - 0xffffffff, - 0x00000104, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "TI SuperSparc II", - 0x40000000, /* SuperSPARC II 1.x */ - 0 << 17, - 0x08000000, /* SuperSPARC II 1.x, MXCC */ - 0x00002000, - 0xffffffc0, - 0x0000ffff, - 0xffffffff, - 0xffffffff, - 0x00000104, - CPU_DEFAULT_FEATURES, - 8, - 0, - }, - { - "LEON2", - 0xf2000000, - 4 << 17, /* FPU version 4 (Meiko) */ - 0xf2000000, - 0x00004000, - 0x007ffff0, - 0x0000003f, - 0xffffffff, - 0xffffffff, - 0, - CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN, - 8, - 0, - }, - { - "LEON3", - 0xf3000000, - 4 << 17, /* FPU version 4 (Meiko) */ - 0xf3000000, - 0x00000000, - 0xfffffffc, - 0x000000ff, - 0xffffffff, - 0xffffffff, - 0, - CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN | - CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL | CPU_FEATURE_POWERDOWN | - CPU_FEATURE_CASA, - 8, - 0, - }, -#endif -}; - -static const char * const feature_name[] = { - "float", - "float128", - "swap", - "mul", - "div", - "flush", - "fsqrt", - "fmul", - "vis1", - "vis2", - "fsmuld", - "hypv", - "cmt", - "gl", -}; - -#if 0 -static void print_features(FILE *f, fprintf_function cpu_fprintf, - uint32_t features, const char *prefix) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(feature_name); i++) { - if (feature_name[i] && (features & (1 << i))) { - if (prefix) { - (*cpu_fprintf)(f, "%s", prefix); - } - (*cpu_fprintf)(f, "%s ", feature_name[i]); - } - } -} -#endif - -static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(feature_name); i++) { - if (feature_name[i] && !strcmp(flagname, feature_name[i])) { - *features |= 1 << i; - return; - } - } - //error_report("CPU feature %s not found", flagname); -} - -static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *name) -{ - unsigned int i; - const sparc_def_t *def = NULL; - - for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) { - if (strcasecmp(name, sparc_defs[i].name) == 0) { - def = &sparc_defs[i]; - } - } - if (!def) { - return -1; - } - memcpy(cpu_def, def, sizeof(*def)); - return 0; -} - -static void sparc_cpu_parse_features(CPUState *cs, char *features, - Error **errp) -{ - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - sparc_def_t *cpu_def = cpu->env.def; - char *featurestr; - uint32_t plus_features = 0; - uint32_t minus_features = 0; - uint64_t iu_version; - uint32_t fpu_version, mmu_version, nwindows; - - featurestr = features ? strtok(features, ",") : NULL; - while (featurestr) { - char *val; - - if (featurestr[0] == '+') { - add_flagname_to_bitmaps(featurestr + 1, &plus_features); - } else if (featurestr[0] == '-') { - add_flagname_to_bitmaps(featurestr + 1, &minus_features); - } else if ((val = strchr(featurestr, '='))) { - *val = 0; val++; - if (!strcmp(featurestr, "iu_version")) { - char *err; - - iu_version = strtoll(val, &err, 0); - if (!*val || *err) { - error_setg(errp, "bad numerical value %s", val); - return; - } - cpu_def->iu_version = iu_version; -#ifdef DEBUG_FEATURES - fprintf(stderr, "iu_version %" PRIx64 "\n", iu_version); -#endif - } else if (!strcmp(featurestr, "fpu_version")) { - char *err; - - fpu_version = strtol(val, &err, 0); - if (!*val || *err) { - error_setg(errp, "bad numerical value %s", val); - return; - } - cpu_def->fpu_version = fpu_version; -#ifdef DEBUG_FEATURES - fprintf(stderr, "fpu_version %x\n", fpu_version); -#endif - } else if (!strcmp(featurestr, "mmu_version")) { - char *err; - - mmu_version = strtol(val, &err, 0); - if (!*val || *err) { - error_setg(errp, "bad numerical value %s", val); - return; - } - cpu_def->mmu_version = mmu_version; -#ifdef DEBUG_FEATURES - fprintf(stderr, "mmu_version %x\n", mmu_version); -#endif - } else if (!strcmp(featurestr, "nwindows")) { - char *err; - - nwindows = strtol(val, &err, 0); - if (!*val || *err || nwindows > MAX_NWINDOWS || - nwindows < MIN_NWINDOWS) { - error_setg(errp, "bad numerical value %s", val); - return; - } - cpu_def->nwindows = nwindows; -#ifdef DEBUG_FEATURES - fprintf(stderr, "nwindows %d\n", nwindows); -#endif - } else { - error_setg(errp, "unrecognized feature %s", featurestr); - return; - } - } else { - error_setg(errp, "feature string `%s' not in format " - "(+feature|-feature|feature=xyz)", featurestr); - return; - } - featurestr = strtok(NULL, ","); - } - cpu_def->features |= plus_features; - cpu_def->features &= ~minus_features; -#ifdef DEBUG_FEATURES - print_features(stderr, fprintf, cpu_def->features, NULL); -#endif -} - -#if 0 -void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) { - (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx - " FPU %08x MMU %08x NWINS %d ", - sparc_defs[i].name, - sparc_defs[i].iu_version, - sparc_defs[i].fpu_version, - sparc_defs[i].mmu_version, - sparc_defs[i].nwindows); - print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES & - ~sparc_defs[i].features, "-"); - print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES & - sparc_defs[i].features, "+"); - (*cpu_fprintf)(f, "\n"); - } - (*cpu_fprintf)(f, "Default CPU feature flags (use '-' to remove): "); - print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES, NULL); - (*cpu_fprintf)(f, "\n"); - (*cpu_fprintf)(f, "Available CPU feature flags (use '+' to add): "); - print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES, NULL); - (*cpu_fprintf)(f, "\n"); - (*cpu_fprintf)(f, "Numerical features (use '=' to set): iu_version " - "fpu_version mmu_version nwindows\n"); -} - -static void cpu_print_cc(FILE *f, fprintf_function cpu_fprintf, - uint32_t cc) -{ - cpu_fprintf(f, "%c%c%c%c", cc & PSR_NEG ? 'N' : '-', - cc & PSR_ZERO ? 'Z' : '-', cc & PSR_OVF ? 'V' : '-', - cc & PSR_CARRY ? 'C' : '-'); -} - -#ifdef TARGET_SPARC64 -#define REGS_PER_LINE 4 -#else -#define REGS_PER_LINE 8 -#endif - -void sparc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, - int flags) -{ - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; - int i, x; - - cpu_fprintf(f, "pc: " TARGET_FMT_lx " npc: " TARGET_FMT_lx "\n", env->pc, - env->npc); - - for (i = 0; i < 8; i++) { - if (i % REGS_PER_LINE == 0) { - cpu_fprintf(f, "%%g%d-%d:", i, i + REGS_PER_LINE - 1); - } - cpu_fprintf(f, " " TARGET_FMT_lx, env->gregs[i]); - if (i % REGS_PER_LINE == REGS_PER_LINE - 1) { - cpu_fprintf(f, "\n"); - } - } - for (x = 0; x < 3; x++) { - for (i = 0; i < 8; i++) { - if (i % REGS_PER_LINE == 0) { - cpu_fprintf(f, "%%%c%d-%d: ", - x == 0 ? 'o' : (x == 1 ? 'l' : 'i'), - i, i + REGS_PER_LINE - 1); - } - cpu_fprintf(f, TARGET_FMT_lx " ", env->regwptr[i + x * 8]); - if (i % REGS_PER_LINE == REGS_PER_LINE - 1) { - cpu_fprintf(f, "\n"); - } - } - } - - for (i = 0; i < TARGET_DPREGS; i++) { - if ((i & 3) == 0) { - cpu_fprintf(f, "%%f%02d: ", i * 2); - } - cpu_fprintf(f, " %016" PRIx64, env->fpr[i].ll); - if ((i & 3) == 3) { - cpu_fprintf(f, "\n"); - } - } -#ifdef TARGET_SPARC64 - cpu_fprintf(f, "pstate: %08x ccr: %02x (icc: ", env->pstate, - (unsigned)cpu_get_ccr(env)); - cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << PSR_CARRY_SHIFT); - cpu_fprintf(f, " xcc: "); - cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << (PSR_CARRY_SHIFT - 4)); - cpu_fprintf(f, ") asi: %02x tl: %d pil: %x\n", env->asi, env->tl, - env->psrpil); - cpu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate: %d " - "cleanwin: %d cwp: %d\n", - env->cansave, env->canrestore, env->otherwin, env->wstate, - env->cleanwin, env->nwindows - 1 - env->cwp); - cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: " - TARGET_FMT_lx "\n", env->fsr, env->y, env->fprs); -#else - cpu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env)); - cpu_print_cc(f, cpu_fprintf, cpu_get_psr(env)); - cpu_fprintf(f, " SPE: %c%c%c) wim: %08x\n", env->psrs ? 'S' : '-', - env->psrps ? 'P' : '-', env->psret ? 'E' : '-', - env->wim); - cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx "\n", - env->fsr, env->y); -#endif - cpu_fprintf(f, "\n"); -} -#endif - -static void sparc_cpu_set_pc(CPUState *cs, vaddr value) -{ - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - - cpu->env.pc = value; - cpu->env.npc = value + 4; -} - -static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) -{ - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - - cpu->env.pc = tb->pc; - cpu->env.npc = tb->cs_base; -} - -static bool sparc_cpu_has_work(CPUState *cs) -{ - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - CPUSPARCState *env = &cpu->env; - - return (cs->interrupt_request & CPU_INTERRUPT_HARD) && - cpu_interrupts_enabled(env); -} - -static int sparc_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) -{ - SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(uc, dev); -#if defined(CONFIG_USER_ONLY) - SPARCCPU *cpu = SPARC_CPU(uc, dev); - CPUSPARCState *env = &cpu->env; - - if ((env->def->features & CPU_FEATURE_FLOAT)) { - env->def->features |= CPU_FEATURE_FLOAT128; - } -#endif - - qemu_init_vcpu(CPU(dev)); - - scc->parent_realize(uc, dev, errp); - - return 0; -} - -static void sparc_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - CPUState *cs = CPU(obj); - SPARCCPU *cpu = SPARC_CPU(uc, obj); - CPUSPARCState *env = &cpu->env; - - cs->env_ptr = env; - cpu_exec_init(env, opaque); - - if (tcg_enabled(uc)) { - gen_intermediate_code_init(env); - } -} - -static void sparc_cpu_uninitfn(struct uc_struct *uc, Object *obj, void *opaque) -{ - SPARCCPU *cpu = SPARC_CPU(uc, obj); - CPUSPARCState *env = &cpu->env; - - g_free(env->def); -} - -static void sparc_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - SPARCCPUClass *scc = SPARC_CPU_CLASS(uc, oc); - CPUClass *cc = CPU_CLASS(uc, oc); - DeviceClass *dc = DEVICE_CLASS(uc, oc); - - scc->parent_realize = dc->realize; - dc->realize = sparc_cpu_realizefn; - - scc->parent_reset = cc->reset; - cc->reset = sparc_cpu_reset; - - cc->parse_features = sparc_cpu_parse_features; - cc->has_work = sparc_cpu_has_work; - cc->do_interrupt = sparc_cpu_do_interrupt; - cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt; - //cc->dump_state = sparc_cpu_dump_state; -#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) - cc->memory_rw_debug = sparc_cpu_memory_rw_debug; -#endif - cc->set_pc = sparc_cpu_set_pc; - cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb; -#ifdef CONFIG_USER_ONLY - cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault; -#else - cc->do_unassigned_access = sparc_cpu_unassigned_access; - cc->do_unaligned_access = sparc_cpu_do_unaligned_access; - cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug; -#endif -} - -void sparc_cpu_register_types(void *opaque) -{ - const TypeInfo sparc_cpu_type_info = { - TYPE_SPARC_CPU, - TYPE_CPU, - - sizeof(SPARCCPUClass), - sizeof(SPARCCPU), - opaque, - - sparc_cpu_initfn, - NULL, - sparc_cpu_uninitfn, - - NULL, - - sparc_cpu_class_init, - NULL, - NULL, - - false, - }; - - //printf(">>> sparc_cpu_register_types\n"); - type_register_static(opaque, &sparc_cpu_type_info); -} diff --git a/qemu/target-sparc/helper.h b/qemu/target-sparc/helper.h deleted file mode 100644 index 503e1e5c..00000000 --- a/qemu/target-sparc/helper.h +++ /dev/null @@ -1,177 +0,0 @@ -DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) -DEF_HELPER_1(power_down, void, env) - -#ifndef TARGET_SPARC64 -DEF_HELPER_1(rett, void, env) -DEF_HELPER_2(wrpsr, void, env, tl) -DEF_HELPER_1(rdpsr, tl, env) -#else -DEF_HELPER_2(wrpil, void, env, tl) -DEF_HELPER_2(wrpstate, void, env, tl) -DEF_HELPER_1(done, void, env) -DEF_HELPER_1(retry, void, env) -DEF_HELPER_1(flushw, void, env) -DEF_HELPER_1(saved, void, env) -DEF_HELPER_1(restored, void, env) -DEF_HELPER_1(rdccr, tl, env) -DEF_HELPER_2(wrccr, void, env, tl) -DEF_HELPER_1(rdcwp, tl, env) -DEF_HELPER_2(wrcwp, void, env, tl) -DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl) -DEF_HELPER_1(popc, tl, tl) -DEF_HELPER_4(ldda_asi, void, env, tl, int, int) -DEF_HELPER_5(ldf_asi, void, env, tl, int, int, int) -DEF_HELPER_5(stf_asi, void, env, tl, int, int, int) -DEF_HELPER_5(casx_asi, tl, env, tl, tl, tl, i32) -DEF_HELPER_2(set_softint, void, env, i64) -DEF_HELPER_2(clear_softint, void, env, i64) -DEF_HELPER_2(write_softint, void, env, i64) -DEF_HELPER_2(tick_set_count, void, ptr, i64) -DEF_HELPER_1(tick_get_count, i64, ptr) -DEF_HELPER_2(tick_set_limit, void, ptr, i64) -#endif -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) -DEF_HELPER_5(cas_asi, tl, env, tl, tl, tl, i32) -#endif -DEF_HELPER_3(check_align, void, env, tl, i32) -DEF_HELPER_1(debug, void, env) -DEF_HELPER_1(save, void, env) -DEF_HELPER_1(restore, void, env) -DEF_HELPER_3(udiv, tl, env, tl, tl) -DEF_HELPER_3(udiv_cc, tl, env, tl, tl) -DEF_HELPER_3(sdiv, tl, env, tl, tl) -DEF_HELPER_3(sdiv_cc, tl, env, tl, tl) -DEF_HELPER_3(taddcctv, tl, env, tl, tl) -DEF_HELPER_3(tsubcctv, tl, env, tl, tl) -#ifdef TARGET_SPARC64 -DEF_HELPER_3(sdivx, s64, env, s64, s64) -DEF_HELPER_3(udivx, i64, env, i64, i64) -#endif -DEF_HELPER_3(ldqf, void, env, tl, int) -DEF_HELPER_3(stqf, void, env, tl, int) -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) -DEF_HELPER_5(ld_asi, i64, env, tl, int, int, int) -DEF_HELPER_5(st_asi, void, env, tl, i64, int, int) -#endif -DEF_HELPER_2(ldfsr, void, env, i32) -DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32) -DEF_HELPER_2(fsqrts, f32, env, f32) -DEF_HELPER_2(fsqrtd, f64, env, f64) -DEF_HELPER_3(fcmps, void, env, f32, f32) -DEF_HELPER_3(fcmpd, void, env, f64, f64) -DEF_HELPER_3(fcmpes, void, env, f32, f32) -DEF_HELPER_3(fcmped, void, env, f64, f64) -DEF_HELPER_1(fsqrtq, void, env) -DEF_HELPER_1(fcmpq, void, env) -DEF_HELPER_1(fcmpeq, void, env) -#ifdef TARGET_SPARC64 -DEF_HELPER_2(ldxfsr, void, env, i64) -DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64) -DEF_HELPER_3(fcmps_fcc1, void, env, f32, f32) -DEF_HELPER_3(fcmps_fcc2, void, env, f32, f32) -DEF_HELPER_3(fcmps_fcc3, void, env, f32, f32) -DEF_HELPER_3(fcmpd_fcc1, void, env, f64, f64) -DEF_HELPER_3(fcmpd_fcc2, void, env, f64, f64) -DEF_HELPER_3(fcmpd_fcc3, void, env, f64, f64) -DEF_HELPER_3(fcmpes_fcc1, void, env, f32, f32) -DEF_HELPER_3(fcmpes_fcc2, void, env, f32, f32) -DEF_HELPER_3(fcmpes_fcc3, void, env, f32, f32) -DEF_HELPER_3(fcmped_fcc1, void, env, f64, f64) -DEF_HELPER_3(fcmped_fcc2, void, env, f64, f64) -DEF_HELPER_3(fcmped_fcc3, void, env, f64, f64) -DEF_HELPER_1(fabsq, void, env) -DEF_HELPER_1(fcmpq_fcc1, void, env) -DEF_HELPER_1(fcmpq_fcc2, void, env) -DEF_HELPER_1(fcmpq_fcc3, void, env) -DEF_HELPER_1(fcmpeq_fcc1, void, env) -DEF_HELPER_1(fcmpeq_fcc2, void, env) -DEF_HELPER_1(fcmpeq_fcc3, void, env) -#endif -DEF_HELPER_2(raise_exception, noreturn, env, int) -#define F_HELPER_0_1(name) DEF_HELPER_1(f ## name, void, env) - -DEF_HELPER_3(faddd, f64, env, f64, f64) -DEF_HELPER_3(fsubd, f64, env, f64, f64) -DEF_HELPER_3(fmuld, f64, env, f64, f64) -DEF_HELPER_3(fdivd, f64, env, f64, f64) -F_HELPER_0_1(addq) -F_HELPER_0_1(subq) -F_HELPER_0_1(mulq) -F_HELPER_0_1(divq) - -DEF_HELPER_3(fadds, f32, env, f32, f32) -DEF_HELPER_3(fsubs, f32, env, f32, f32) -DEF_HELPER_3(fmuls, f32, env, f32, f32) -DEF_HELPER_3(fdivs, f32, env, f32, f32) - -DEF_HELPER_3(fsmuld, f64, env, f32, f32) -DEF_HELPER_3(fdmulq, void, env, f64, f64) - -DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32) -DEF_HELPER_2(fitod, f64, env, s32) -DEF_HELPER_2(fitoq, void, env, s32) - -DEF_HELPER_2(fitos, f32, env, s32) - -#ifdef TARGET_SPARC64 -DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64) -DEF_HELPER_1(fnegq, void, env) -DEF_HELPER_2(fxtos, f32, env, s64) -DEF_HELPER_2(fxtod, f64, env, s64) -DEF_HELPER_2(fxtoq, void, env, s64) -#endif -DEF_HELPER_2(fdtos, f32, env, f64) -DEF_HELPER_2(fstod, f64, env, f32) -DEF_HELPER_1(fqtos, f32, env) -DEF_HELPER_2(fstoq, void, env, f32) -DEF_HELPER_1(fqtod, f64, env) -DEF_HELPER_2(fdtoq, void, env, f64) -DEF_HELPER_2(fstoi, s32, env, f32) -DEF_HELPER_2(fdtoi, s32, env, f64) -DEF_HELPER_1(fqtoi, s32, env) -#ifdef TARGET_SPARC64 -DEF_HELPER_2(fstox, s64, env, f32) -DEF_HELPER_2(fdtox, s64, env, f64) -DEF_HELPER_1(fqtox, s64, env) - -DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_3(pdist, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) -DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64) -DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) -DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64) -DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) -#define VIS_HELPER(name) \ - DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \ - i64, i64, i64) \ - DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \ - i32, i32, i32) \ - DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \ - i64, i64, i64) \ - DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \ - i32, i32, i32) - -VIS_HELPER(padd) -VIS_HELPER(psub) -#define VIS_CMPHELPER(name) \ - DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \ - i64, i64, i64) \ - DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \ - i64, i64, i64) -VIS_CMPHELPER(cmpgt) -VIS_CMPHELPER(cmpeq) -VIS_CMPHELPER(cmple) -VIS_CMPHELPER(cmpne) -#endif -#undef F_HELPER_0_1 -#undef VIS_HELPER -#undef VIS_CMPHELPER -DEF_HELPER_1(compute_psr, void, env) -DEF_HELPER_1(compute_C_icc, i32, env) diff --git a/qemu/target-sparc/ldst_helper.c b/qemu/target-sparc/ldst_helper.c deleted file mode 100644 index 94f75d02..00000000 --- a/qemu/target-sparc/ldst_helper.c +++ /dev/null @@ -1,2460 +0,0 @@ -/* - * Helpers for loads and stores - * - * Copyright (c) 2003-2005 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "cpu.h" -#include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" - -//#define DEBUG_MMU -//#define DEBUG_MXCC -//#define DEBUG_UNALIGNED -//#define DEBUG_UNASSIGNED -//#define DEBUG_ASI -//#define DEBUG_CACHE_CONTROL - -#ifdef DEBUG_MMU -#define DPRINTF_MMU(fmt, ...) \ - do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_MMU(fmt, ...) do {} while (0) -#endif - -#ifdef DEBUG_MXCC -#define DPRINTF_MXCC(fmt, ...) \ - do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_MXCC(fmt, ...) do {} while (0) -#endif - -#ifdef DEBUG_ASI -#define DPRINTF_ASI(fmt, ...) \ - do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0) -#endif - -#ifdef DEBUG_CACHE_CONTROL -#define DPRINTF_CACHE_CONTROL(fmt, ...) \ - do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0) -#else -#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0) -#endif - -#ifdef TARGET_SPARC64 -#ifndef TARGET_ABI32 -#define AM_CHECK(env1) ((env1)->pstate & PS_AM) -#else -#define AM_CHECK(env1) (1) -#endif -#endif - -#define QT0 (env->qt0) -#define QT1 (env->qt1) - -#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) -/* Calculates TSB pointer value for fault page size 8k or 64k */ -static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register, - uint64_t tag_access_register, - int page_size) -{ - uint64_t tsb_base = tsb_register & ~0x1fffULL; - int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0; - int tsb_size = tsb_register & 0xf; - - /* discard lower 13 bits which hold tag access context */ - uint64_t tag_access_va = tag_access_register & ~0x1fffULL; - - /* now reorder bits */ - uint64_t tsb_base_mask = ~0x1fffULL; - uint64_t va = tag_access_va; - - /* move va bits to correct position */ - if (page_size == 8*1024) { - va >>= 9; - } else if (page_size == 64*1024) { - va >>= 12; - } - - if (tsb_size) { - tsb_base_mask <<= tsb_size; - } - - /* calculate tsb_base mask and adjust va if split is in use */ - if (tsb_split) { - if (page_size == 8*1024) { - va &= ~(1ULL << (13 + tsb_size)); - } else if (page_size == 64*1024) { - va |= (1ULL << (13 + tsb_size)); - } - tsb_base_mask <<= 1; - } - - return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL; -} - -/* Calculates tag target register value by reordering bits - in tag access register */ -static uint64_t ultrasparc_tag_target(uint64_t tag_access_register) -{ - return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22); -} - -static void replace_tlb_entry(SparcTLBEntry *tlb, - uint64_t tlb_tag, uint64_t tlb_tte, - CPUSPARCState *env1) -{ - target_ulong mask, size, va, offset; - - /* flush page range if translation is valid */ - if (TTE_IS_VALID(tlb->tte)) { - CPUState *cs = CPU(sparc_env_get_cpu(env1)); - - mask = 0xffffffffffffe000ULL; - mask <<= 3 * ((tlb->tte >> 61) & 3); - size = ~mask + 1; - - va = tlb->tag & mask; - - for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) { - tlb_flush_page(cs, va + offset); - } - } - - tlb->tag = tlb_tag; - tlb->tte = tlb_tte; -} - -static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr, - const char *strmmu, CPUSPARCState *env1) -{ - unsigned int i; - target_ulong mask; - uint64_t context; - - int is_demap_context = (demap_addr >> 6) & 1; - - /* demap context */ - switch ((demap_addr >> 4) & 3) { - case 0: /* primary */ - context = env1->dmmu.mmu_primary_context; - break; - case 1: /* secondary */ - context = env1->dmmu.mmu_secondary_context; - break; - case 2: /* nucleus */ - context = 0; - break; - case 3: /* reserved */ - default: - return; - } - - for (i = 0; i < 64; i++) { - if (TTE_IS_VALID(tlb[i].tte)) { - - if (is_demap_context) { - /* will remove non-global entries matching context value */ - if (TTE_IS_GLOBAL(tlb[i].tte) || - !tlb_compare_context(&tlb[i], context)) { - continue; - } - } else { - /* demap page - will remove any entry matching VA */ - mask = 0xffffffffffffe000ULL; - mask <<= 3 * ((tlb[i].tte >> 61) & 3); - - if (!compare_masked(demap_addr, tlb[i].tag, mask)) { - continue; - } - - /* entry should be global or matching context value */ - if (!TTE_IS_GLOBAL(tlb[i].tte) && - !tlb_compare_context(&tlb[i], context)) { - continue; - } - } - - replace_tlb_entry(&tlb[i], 0, 0, env1); -#ifdef DEBUG_MMU - DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i); - dump_mmu(stdout, fprintf, env1); -#endif - } - } -} - -static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, - uint64_t tlb_tag, uint64_t tlb_tte, - const char *strmmu, CPUSPARCState *env1) -{ - unsigned int i, replace_used; - - /* Try replacing invalid entry */ - for (i = 0; i < 64; i++) { - if (!TTE_IS_VALID(tlb[i].tte)) { - replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); -#ifdef DEBUG_MMU - DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i); - dump_mmu(stdout, fprintf, env1); -#endif - return; - } - } - - /* All entries are valid, try replacing unlocked entry */ - - for (replace_used = 0; replace_used < 2; ++replace_used) { - - /* Used entries are not replaced on first pass */ - - for (i = 0; i < 64; i++) { - if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) { - - replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); -#ifdef DEBUG_MMU - DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n", - strmmu, (replace_used ? "used" : "unused"), i); - dump_mmu(stdout, fprintf, env1); -#endif - return; - } - } - - /* Now reset used bit and search for unused entries again */ - - for (i = 0; i < 64; i++) { - TTE_SET_UNUSED(tlb[i].tte); - } - } - -#ifdef DEBUG_MMU - DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu); -#endif - /* error state? */ -} - -#endif - -void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align) -{ - if (addr & align) { -#ifdef DEBUG_UNALIGNED - printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx - "\n", addr, env->pc); -#endif - helper_raise_exception(env, TT_UNALIGNED); - } -} - -#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \ - defined(DEBUG_MXCC) -static void dump_mxcc(CPUSPARCState *env) -{ - printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 - "\n", - env->mxccdata[0], env->mxccdata[1], - env->mxccdata[2], env->mxccdata[3]); - printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 - "\n" - " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 - "\n", - env->mxccregs[0], env->mxccregs[1], - env->mxccregs[2], env->mxccregs[3], - env->mxccregs[4], env->mxccregs[5], - env->mxccregs[6], env->mxccregs[7]); -} -#endif - -#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \ - && defined(DEBUG_ASI) -static void dump_asi(const char *txt, target_ulong addr, int asi, int size, - uint64_t r1) -{ - switch (size) { - case 1: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt, - addr, asi, r1 & 0xff); - break; - case 2: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt, - addr, asi, r1 & 0xffff); - break; - case 4: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt, - addr, asi, r1 & 0xffffffff); - break; - case 8: - DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt, - addr, asi, r1); - break; - } -} -#endif - -#ifndef TARGET_SPARC64 -#ifndef CONFIG_USER_ONLY - - -/* Leon3 cache control */ - -static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr, - uint64_t val, int size) -{ - DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n", - addr, val, size); - - if (size != 4) { - DPRINTF_CACHE_CONTROL("32bits only\n"); - return; - } - - switch (addr) { - case 0x00: /* Cache control */ - - /* These values must always be read as zeros */ - val &= ~CACHE_CTRL_FD; - val &= ~CACHE_CTRL_FI; - val &= ~CACHE_CTRL_IB; - val &= ~CACHE_CTRL_IP; - val &= ~CACHE_CTRL_DP; - - env->cache_control = val; - break; - case 0x04: /* Instruction cache configuration */ - case 0x08: /* Data cache configuration */ - /* Read Only */ - break; - default: - DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr); - break; - }; -} - -static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr, - int size) -{ - uint64_t ret = 0; - - if (size != 4) { - DPRINTF_CACHE_CONTROL("32bits only\n"); - return 0; - } - - switch (addr) { - case 0x00: /* Cache control */ - ret = env->cache_control; - break; - - /* Configuration registers are read and only always keep those - predefined values */ - - case 0x04: /* Instruction cache configuration */ - ret = 0x10220000; - break; - case 0x08: /* Data cache configuration */ - ret = 0x18220000; - break; - default: - DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr); - break; - }; - DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n", - addr, ret, size); - return ret; -} - -uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, - int sign) -{ - CPUState *cs = CPU(sparc_env_get_cpu(env)); - uint64_t ret = 0; -#if defined(DEBUG_MXCC) || defined(DEBUG_ASI) - uint32_t last_addr = addr; -#endif - - helper_check_align(env, addr, size - 1); - switch (asi) { - case 2: /* SuperSparc MXCC registers and Leon3 cache control */ - switch (addr) { - case 0x00: /* Leon3 Cache Control */ - case 0x08: /* Leon3 Instruction Cache config */ - case 0x0C: /* Leon3 Date Cache config */ - if (env->def->features & CPU_FEATURE_CACHE_CTRL) { - ret = leon3_cache_control_ld(env, addr, size); - } - break; - case 0x01c00a00: /* MXCC control register */ - if (size == 8) { - ret = env->mxccregs[3]; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00a04: /* MXCC control register */ - if (size == 4) { - ret = env->mxccregs[3]; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00c00: /* Module reset register */ - if (size == 8) { - ret = env->mxccregs[5]; - /* should we do something here? */ - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00f00: /* MBus port address register */ - if (size == 8) { - ret = env->mxccregs[7]; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - default: - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented address, size: %d\n", addr, - size); - break; - } - DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " - "addr = %08x -> ret = %" PRIx64 "," - "addr = %08x\n", asi, size, sign, last_addr, ret, addr); -#ifdef DEBUG_MXCC - dump_mxcc(env); -#endif - break; - case 3: /* MMU probe */ - case 0x18: /* LEON3 MMU probe */ - { - int mmulev; - - mmulev = (addr >> 8) & 15; - if (mmulev > 4) { - ret = 0; - } else { - ret = mmu_probe(env, addr, mmulev); - } - DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n", - addr, mmulev, ret); - } - break; - case 4: /* read MMU regs */ - case 0x19: /* LEON3 read MMU regs */ - { - int reg = (addr >> 8) & 0x1f; - - ret = env->mmuregs[reg]; - if (reg == 3) { /* Fault status cleared on read */ - env->mmuregs[3] = 0; - } else if (reg == 0x13) { /* Fault status read */ - ret = env->mmuregs[3]; - } else if (reg == 0x14) { /* Fault address read */ - ret = env->mmuregs[4]; - } - DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret); - } - break; - case 5: /* Turbosparc ITLB Diagnostic */ - case 6: /* Turbosparc DTLB Diagnostic */ - case 7: /* Turbosparc IOTLB Diagnostic */ - break; - case 9: /* Supervisor code access */ - switch (size) { - case 1: - ret = cpu_ldub_code(env, addr); - break; - case 2: - ret = cpu_lduw_code(env, addr); - break; - default: - case 4: - ret = cpu_ldl_code(env, addr); - break; - case 8: - ret = cpu_ldq_code(env, addr); - break; - } - break; - case 0xa: /* User data access */ - switch (size) { - case 1: - ret = cpu_ldub_user(env, addr); - break; - case 2: - ret = cpu_lduw_user(env, addr); - break; - default: - case 4: - ret = cpu_ldl_user(env, addr); - break; - case 8: - ret = cpu_ldq_user(env, addr); - break; - } - break; - case 0xb: /* Supervisor data access */ - case 0x80: - switch (size) { - case 1: - ret = cpu_ldub_kernel(env, addr); - break; - case 2: - ret = cpu_lduw_kernel(env, addr); - break; - default: - case 4: - ret = cpu_ldl_kernel(env, addr); - break; - case 8: - ret = cpu_ldq_kernel(env, addr); - break; - } - break; - case 0xc: /* I-cache tag */ - case 0xd: /* I-cache data */ - case 0xe: /* D-cache tag */ - case 0xf: /* D-cache data */ - break; - case 0x20: /* MMU passthrough */ - case 0x1c: /* LEON MMU passthrough */ - switch (size) { - case 1: - ret = ldub_phys(cs->as, addr); - break; - case 2: - ret = lduw_phys(cs->as, addr); - break; - default: - case 4: - ret = ldl_phys(cs->as, addr); - break; - case 8: - ret = ldq_phys(cs->as, addr); - break; - } - break; - /* MMU passthrough, 0x100000000 to 0xfffffffff */ - case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: - case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x2e: case 0x2f: - switch (size) { - case 1: - ret = ldub_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32)); - break; - case 2: - ret = lduw_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32)); - break; - default: - case 4: - ret = ldl_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32)); - break; - case 8: - ret = ldq_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32)); - break; - } - break; - case 0x30: /* Turbosparc secondary cache diagnostic */ - case 0x31: /* Turbosparc RAM snoop */ - case 0x32: /* Turbosparc page table descriptor diagnostic */ - case 0x39: /* data cache diagnostic register */ - ret = 0; - break; - case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */ - { - int reg = (addr >> 8) & 3; - - switch (reg) { - case 0: /* Breakpoint Value (Addr) */ - ret = env->mmubpregs[reg]; - break; - case 1: /* Breakpoint Mask */ - ret = env->mmubpregs[reg]; - break; - case 2: /* Breakpoint Control */ - ret = env->mmubpregs[reg]; - break; - case 3: /* Breakpoint Status */ - ret = env->mmubpregs[reg]; - env->mmubpregs[reg] = 0ULL; - break; - } - DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg, - ret); - } - break; - case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ - ret = env->mmubpctrv; - break; - case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ - ret = env->mmubpctrc; - break; - case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ - ret = env->mmubpctrs; - break; - case 0x4c: /* SuperSPARC MMU Breakpoint Action */ - ret = env->mmubpaction; - break; - case 8: /* User code access, XXX */ - default: - cpu_unassigned_access(cs, addr, false, false, asi, size); - ret = 0; - break; - } - if (sign) { - switch (size) { - case 1: - ret = (int8_t) ret; - break; - case 2: - ret = (int16_t) ret; - break; - case 4: - ret = (int32_t) ret; - break; - default: - break; - } - } -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return ret; -} - -void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, - int size) -{ - SPARCCPU *cpu = sparc_env_get_cpu(env); - CPUState *cs = CPU(cpu); - - helper_check_align(env, addr, size - 1); - switch (asi) { - case 2: /* SuperSparc MXCC registers and Leon3 cache control */ - switch (addr) { - case 0x00: /* Leon3 Cache Control */ - case 0x08: /* Leon3 Instruction Cache config */ - case 0x0C: /* Leon3 Date Cache config */ - if (env->def->features & CPU_FEATURE_CACHE_CTRL) { - leon3_cache_control_st(env, addr, val, size); - } - break; - - case 0x01c00000: /* MXCC stream data register 0 */ - if (size == 8) { - env->mxccdata[0] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00008: /* MXCC stream data register 1 */ - if (size == 8) { - env->mxccdata[1] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00010: /* MXCC stream data register 2 */ - if (size == 8) { - env->mxccdata[2] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00018: /* MXCC stream data register 3 */ - if (size == 8) { - env->mxccdata[3] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00100: /* MXCC stream source */ - if (size == 8) { - env->mxccregs[0] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - env->mxccdata[0] = ldq_phys(cs->as, - (env->mxccregs[0] & 0xffffffffULL) + - 0); - env->mxccdata[1] = ldq_phys(cs->as, - (env->mxccregs[0] & 0xffffffffULL) + - 8); - env->mxccdata[2] = ldq_phys(cs->as, - (env->mxccregs[0] & 0xffffffffULL) + - 16); - env->mxccdata[3] = ldq_phys(cs->as, - (env->mxccregs[0] & 0xffffffffULL) + - 24); - break; - case 0x01c00200: /* MXCC stream destination */ - if (size == 8) { - env->mxccregs[1] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0, - env->mxccdata[0]); - stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8, - env->mxccdata[1]); - stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16, - env->mxccdata[2]); - stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24, - env->mxccdata[3]); - break; - case 0x01c00a00: /* MXCC control register */ - if (size == 8) { - env->mxccregs[3] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00a04: /* MXCC control register */ - if (size == 4) { - env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) - | val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00e00: /* MXCC error register */ - /* writing a 1 bit clears the error */ - if (size == 8) { - env->mxccregs[6] &= ~val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - case 0x01c00f00: /* MBus port address register */ - if (size == 8) { - env->mxccregs[7] = val; - } else { - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented access size: %d\n", addr, - size); - } - break; - default: - qemu_log_mask(LOG_UNIMP, - "%08x: unimplemented address, size: %d\n", addr, - size); - break; - } - DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", - asi, size, addr, val); -#ifdef DEBUG_MXCC - dump_mxcc(env); -#endif - break; - case 3: /* MMU flush */ - case 0x18: /* LEON3 MMU flush */ - { - int mmulev; - - mmulev = (addr >> 8) & 15; - DPRINTF_MMU("mmu flush level %d\n", mmulev); - switch (mmulev) { - case 0: /* flush page */ - tlb_flush_page(CPU(cpu), addr & 0xfffff000); - break; - case 1: /* flush segment (256k) */ - case 2: /* flush region (16M) */ - case 3: /* flush context (4G) */ - case 4: /* flush entire */ - tlb_flush(CPU(cpu), 1); - break; - default: - break; - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - } - break; - case 4: /* write MMU regs */ - case 0x19: /* LEON3 write MMU regs */ - { - int reg = (addr >> 8) & 0x1f; - uint32_t oldreg; - - oldreg = env->mmuregs[reg]; - switch (reg) { - case 0: /* Control Register */ - env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) | - (val & 0x00ffffff); - /* Mappings generated during no-fault mode or MMU - disabled mode are invalid in normal mode */ - if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) != - (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) { - tlb_flush(CPU(cpu), 1); - } - break; - case 1: /* Context Table Pointer Register */ - env->mmuregs[reg] = val & env->def->mmu_ctpr_mask; - break; - case 2: /* Context Register */ - env->mmuregs[reg] = val & env->def->mmu_cxr_mask; - if (oldreg != env->mmuregs[reg]) { - /* we flush when the MMU context changes because - QEMU has no MMU context support */ - tlb_flush(CPU(cpu), 1); - } - break; - case 3: /* Synchronous Fault Status Register with Clear */ - case 4: /* Synchronous Fault Address Register */ - break; - case 0x10: /* TLB Replacement Control Register */ - env->mmuregs[reg] = val & env->def->mmu_trcr_mask; - break; - case 0x13: /* Synchronous Fault Status Register with Read - and Clear */ - env->mmuregs[3] = val & env->def->mmu_sfsr_mask; - break; - case 0x14: /* Synchronous Fault Address Register */ - env->mmuregs[4] = val; - break; - default: - env->mmuregs[reg] = val; - break; - } - if (oldreg != env->mmuregs[reg]) { - DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n", - reg, oldreg, env->mmuregs[reg]); - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - } - break; - case 5: /* Turbosparc ITLB Diagnostic */ - case 6: /* Turbosparc DTLB Diagnostic */ - case 7: /* Turbosparc IOTLB Diagnostic */ - break; - case 0xa: /* User data access */ - switch (size) { - case 1: - cpu_stb_user(env, addr, val); - break; - case 2: - cpu_stw_user(env, addr, val); - break; - default: - case 4: - cpu_stl_user(env, addr, val); - break; - case 8: - cpu_stq_user(env, addr, val); - break; - } - break; - case 0xb: /* Supervisor data access */ - case 0x80: - switch (size) { - case 1: - cpu_stb_kernel(env, addr, val); - break; - case 2: - cpu_stw_kernel(env, addr, val); - break; - default: - case 4: - cpu_stl_kernel(env, addr, val); - break; - case 8: - cpu_stq_kernel(env, addr, val); - break; - } - break; - case 0xc: /* I-cache tag */ - case 0xd: /* I-cache data */ - case 0xe: /* D-cache tag */ - case 0xf: /* D-cache data */ - case 0x10: /* I/D-cache flush page */ - case 0x11: /* I/D-cache flush segment */ - case 0x12: /* I/D-cache flush region */ - case 0x13: /* I/D-cache flush context */ - case 0x14: /* I/D-cache flush user */ - break; - case 0x17: /* Block copy, sta access */ - { - /* val = src - addr = dst - copy 32 bytes */ - unsigned int i; - uint32_t src = val & ~3, dst = addr & ~3, temp; - - for (i = 0; i < 32; i += 4, src += 4, dst += 4) { - temp = cpu_ldl_kernel(env, src); - cpu_stl_kernel(env, dst, temp); - } - } - break; - case 0x1f: /* Block fill, stda access */ - { - /* addr = dst - fill 32 bytes with val */ - unsigned int i; - uint32_t dst = addr & 7; - - for (i = 0; i < 32; i += 8, dst += 8) { - cpu_stq_kernel(env, dst, val); - } - } - break; - case 0x20: /* MMU passthrough */ - case 0x1c: /* LEON MMU passthrough */ - { - switch (size) { - case 1: - stb_phys(cs->as, addr, val); - break; - case 2: - stw_phys(cs->as, addr, val); - break; - case 4: - default: - stl_phys(cs->as, addr, val); - break; - case 8: - stq_phys(cs->as, addr, val); - break; - } - } - break; - /* MMU passthrough, 0x100000000 to 0xfffffffff */ - case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: - case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x2e: case 0x2f: - { - switch (size) { - case 1: - stb_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32), val); - break; - case 2: - stw_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32), val); - break; - case 4: - default: - stl_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32), val); - break; - case 8: - stq_phys(cs->as, (hwaddr)addr - | ((hwaddr)(asi & 0xf) << 32), val); - break; - } - } - break; - case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */ - case 0x31: /* store buffer data, Ross RT620 I-cache flush or - Turbosparc snoop RAM */ - case 0x32: /* store buffer control or Turbosparc page table - descriptor diagnostic */ - case 0x36: /* I-cache flash clear */ - case 0x37: /* D-cache flash clear */ - break; - case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/ - { - int reg = (addr >> 8) & 3; - - switch (reg) { - case 0: /* Breakpoint Value (Addr) */ - env->mmubpregs[reg] = (val & 0xfffffffffULL); - break; - case 1: /* Breakpoint Mask */ - env->mmubpregs[reg] = (val & 0xfffffffffULL); - break; - case 2: /* Breakpoint Control */ - env->mmubpregs[reg] = (val & 0x7fULL); - break; - case 3: /* Breakpoint Status */ - env->mmubpregs[reg] = (val & 0xfULL); - break; - } - DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg, - env->mmuregs[reg]); - } - break; - case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ - env->mmubpctrv = val & 0xffffffff; - break; - case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ - env->mmubpctrc = val & 0x3; - break; - case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ - env->mmubpctrs = val & 0x3; - break; - case 0x4c: /* SuperSPARC MMU Breakpoint Action */ - env->mmubpaction = val & 0x1fff; - break; - case 8: /* User code access, XXX */ - case 9: /* Supervisor code access, XXX */ - default: - cpu_unassigned_access(CPU(sparc_env_get_cpu(env)), - addr, true, false, asi, size); - break; - } -#ifdef DEBUG_ASI - dump_asi("write", addr, asi, size, val); -#endif -} - -#endif /* CONFIG_USER_ONLY */ -#else /* TARGET_SPARC64 */ - -/* returns true if access using this ASI is to have address translated by MMU - otherwise access is to raw physical address */ -static inline int is_translating_asi(int asi) -{ -#ifdef TARGET_SPARC64 - /* Ultrasparc IIi translating asi - - note this list is defined by cpu implementation - */ - if( (asi >= 0x04 && asi <= 0x11) || - (asi >= 0x16 && asi <= 0x19) || - (asi >= 0x1E && asi <= 0x1F) || - (asi >= 0x24 && asi <= 0x2C) || - (asi >= 0x70 && asi <= 0x73) || - (asi >= 0x78 && asi <= 0x79) || - (asi >= 0x80 && asi <= 0xFF) ) - { - return 1; - } - else - { - return 0; - } -#else - /* TODO: check sparc32 bits */ - return 0; -#endif -} - -static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr) -{ -#ifdef TARGET_SPARC64 - if (AM_CHECK(env1)) { - addr &= 0xffffffffULL; - } -#endif - return addr; -} - -static inline target_ulong asi_address_mask(CPUSPARCState *env, - int asi, target_ulong addr) -{ - if (is_translating_asi(asi)) { - return address_mask(env, addr); - } else { - return addr; - } -} - -#ifdef CONFIG_USER_ONLY -uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, - int sign) -{ - uint64_t ret = 0; -#if defined(DEBUG_ASI) - target_ulong last_addr = addr; -#endif - - if (asi < 0x80) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(env, addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - switch (asi) { - case 0x82: /* Primary no-fault */ - case 0x8a: /* Primary no-fault LE */ - if (page_check_range(addr, size, PAGE_READ) == -1) { -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return 0; - } - /* Fall through */ - case 0x80: /* Primary */ - case 0x88: /* Primary LE */ - { - switch (size) { - case 1: - ret = ldub_raw(addr); - break; - case 2: - ret = lduw_raw(addr); - break; - case 4: - ret = ldl_raw(addr); - break; - default: - case 8: - ret = ldq_raw(addr); - break; - } - } - break; - case 0x83: /* Secondary no-fault */ - case 0x8b: /* Secondary no-fault LE */ - if (page_check_range(addr, size, PAGE_READ) == -1) { -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return 0; - } - /* Fall through */ - case 0x81: /* Secondary */ - case 0x89: /* Secondary LE */ - /* XXX */ - break; - default: - break; - } - - /* Convert from little endian */ - switch (asi) { - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - case 0x8a: /* Primary no-fault LE */ - case 0x8b: /* Secondary no-fault LE */ - switch (size) { - case 2: - ret = bswap16(ret); - break; - case 4: - ret = bswap32(ret); - break; - case 8: - ret = bswap64(ret); - break; - default: - break; - } - default: - break; - } - - /* Convert to signed number */ - if (sign) { - switch (size) { - case 1: - ret = (int8_t) ret; - break; - case 2: - ret = (int16_t) ret; - break; - case 4: - ret = (int32_t) ret; - break; - default: - break; - } - } -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return ret; -} - -void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, - int asi, int size) -{ -#ifdef DEBUG_ASI - dump_asi("write", addr, asi, size, val); -#endif - if (asi < 0x80) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(env, addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - /* Convert to little endian */ - switch (asi) { - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - switch (size) { - case 2: - val = bswap16(val); - break; - case 4: - val = bswap32(val); - break; - case 8: - val = bswap64(val); - break; - default: - break; - } - default: - break; - } - - switch (asi) { - case 0x80: /* Primary */ - case 0x88: /* Primary LE */ - { - switch (size) { - case 1: - stb_raw(addr, val); - break; - case 2: - stw_raw(addr, val); - break; - case 4: - stl_raw(addr, val); - break; - case 8: - default: - stq_raw(addr, val); - break; - } - } - break; - case 0x81: /* Secondary */ - case 0x89: /* Secondary LE */ - /* XXX */ - return; - - case 0x82: /* Primary no-fault, RO */ - case 0x83: /* Secondary no-fault, RO */ - case 0x8a: /* Primary no-fault LE, RO */ - case 0x8b: /* Secondary no-fault LE, RO */ - default: - helper_raise_exception(env, TT_DATA_ACCESS); - return; - } -} - -#else /* CONFIG_USER_ONLY */ - -uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, - int sign) -{ - CPUState *cs = CPU(sparc_env_get_cpu(env)); - uint64_t ret = 0; -#if defined(DEBUG_ASI) - target_ulong last_addr = addr; -#endif - - asi &= 0xff; - - if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) - || (cpu_has_hypervisor(env) - && asi >= 0x30 && asi < 0x80 - && !(env->hpstate & HS_PRIV))) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(env, addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - /* process nonfaulting loads first */ - if ((asi & 0xf6) == 0x82) { - int mmu_idx; - - /* secondary space access has lowest asi bit equal to 1 */ - if (env->pstate & PS_PRIV) { - mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX; - } else { - mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX; - } - - if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == (0-1ULL)) { -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - /* env->exception_index is set in get_physical_address_data(). */ - helper_raise_exception(env, cs->exception_index); - } - - /* convert nonfaulting load ASIs to normal load ASIs */ - asi &= ~0x02; - } - - switch (asi) { - case 0x10: /* As if user primary */ - case 0x11: /* As if user secondary */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x80: /* Primary */ - case 0x81: /* Secondary */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - case 0xe2: /* UA2007 Primary block init */ - case 0xe3: /* UA2007 Secondary block init */ - if ((asi & 0x80) && (env->pstate & PS_PRIV)) { - if (cpu_hypervisor_mode(env)) { - switch (size) { - case 1: - ret = cpu_ldub_hypv(env, addr); - break; - case 2: - ret = cpu_lduw_hypv(env, addr); - break; - case 4: - ret = cpu_ldl_hypv(env, addr); - break; - default: - case 8: - ret = cpu_ldq_hypv(env, addr); - break; - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - ret = cpu_ldub_kernel_secondary(env, addr); - break; - case 2: - ret = cpu_lduw_kernel_secondary(env, addr); - break; - case 4: - ret = cpu_ldl_kernel_secondary(env, addr); - break; - default: - case 8: - ret = cpu_ldq_kernel_secondary(env, addr); - break; - } - } else { - switch (size) { - case 1: - ret = cpu_ldub_kernel(env, addr); - break; - case 2: - ret = cpu_lduw_kernel(env, addr); - break; - case 4: - ret = cpu_ldl_kernel(env, addr); - break; - default: - case 8: - ret = cpu_ldq_kernel(env, addr); - break; - } - } - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - ret = cpu_ldub_user_secondary(env, addr); - break; - case 2: - ret = cpu_lduw_user_secondary(env, addr); - break; - case 4: - ret = cpu_ldl_user_secondary(env, addr); - break; - default: - case 8: - ret = cpu_ldq_user_secondary(env, addr); - break; - } - } else { - switch (size) { - case 1: - ret = cpu_ldub_user(env, addr); - break; - case 2: - ret = cpu_lduw_user(env, addr); - break; - case 4: - ret = cpu_ldl_user(env, addr); - break; - default: - case 8: - ret = cpu_ldq_user(env, addr); - break; - } - } - } - break; - case 0x14: /* Bypass */ - case 0x15: /* Bypass, non-cacheable */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - { - switch (size) { - case 1: - ret = ldub_phys(cs->as, addr); - break; - case 2: - ret = lduw_phys(cs->as, addr); - break; - case 4: - ret = ldl_phys(cs->as, addr); - break; - default: - case 8: - ret = ldq_phys(cs->as, addr); - break; - } - break; - } - case 0x24: /* Nucleus quad LDD 128 bit atomic */ - case 0x2c: /* Nucleus quad LDD 128 bit atomic LE - Only ldda allowed */ - helper_raise_exception(env, TT_ILL_INSN); - return 0; - case 0x04: /* Nucleus */ - case 0x0c: /* Nucleus Little Endian (LE) */ - { - switch (size) { - case 1: - ret = cpu_ldub_nucleus(env, addr); - break; - case 2: - ret = cpu_lduw_nucleus(env, addr); - break; - case 4: - ret = cpu_ldl_nucleus(env, addr); - break; - default: - case 8: - ret = cpu_ldq_nucleus(env, addr); - break; - } - break; - } - case 0x4a: /* UPA config */ - /* XXX */ - break; - case 0x45: /* LSU */ - ret = env->lsu; - break; - case 0x50: /* I-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - - if (reg == 0) { - /* I-TSB Tag Target register */ - ret = ultrasparc_tag_target(env->immu.tag_access); - } else { - ret = env->immuregs[reg]; - } - - break; - } - case 0x51: /* I-MMU 8k TSB pointer */ - { - /* env->immuregs[5] holds I-MMU TSB register value - env->immuregs[6] holds I-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, - 8*1024); - break; - } - case 0x52: /* I-MMU 64k TSB pointer */ - { - /* env->immuregs[5] holds I-MMU TSB register value - env->immuregs[6] holds I-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, - 64*1024); - break; - } - case 0x55: /* I-MMU data access */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->itlb[reg].tte; - break; - } - case 0x56: /* I-MMU tag read */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->itlb[reg].tag; - break; - } - case 0x58: /* D-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - - if (reg == 0) { - /* D-TSB Tag Target register */ - ret = ultrasparc_tag_target(env->dmmu.tag_access); - } else { - ret = env->dmmuregs[reg]; - } - break; - } - case 0x59: /* D-MMU 8k TSB pointer */ - { - /* env->dmmuregs[5] holds D-MMU TSB register value - env->dmmuregs[6] holds D-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, - 8*1024); - break; - } - case 0x5a: /* D-MMU 64k TSB pointer */ - { - /* env->dmmuregs[5] holds D-MMU TSB register value - env->dmmuregs[6] holds D-MMU Tag Access register value */ - ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, - 64*1024); - break; - } - case 0x5d: /* D-MMU data access */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->dtlb[reg].tte; - break; - } - case 0x5e: /* D-MMU tag read */ - { - int reg = (addr >> 3) & 0x3f; - - ret = env->dtlb[reg].tag; - break; - } - case 0x48: /* Interrupt dispatch, RO */ - break; - case 0x49: /* Interrupt data receive */ - ret = env->ivec_status; - break; - case 0x7f: /* Incoming interrupt vector, RO */ - { - int reg = (addr >> 4) & 0x3; - if (reg < 3) { - ret = env->ivec_data[reg]; - } - break; - } - case 0x46: /* D-cache data */ - case 0x47: /* D-cache tag access */ - case 0x4b: /* E-cache error enable */ - case 0x4c: /* E-cache asynchronous fault status */ - case 0x4d: /* E-cache asynchronous fault address */ - case 0x4e: /* E-cache tag data */ - case 0x66: /* I-cache instruction access */ - case 0x67: /* I-cache tag access */ - case 0x6e: /* I-cache predecode */ - case 0x6f: /* I-cache LRU etc. */ - case 0x76: /* E-cache tag */ - case 0x7e: /* E-cache tag */ - break; - case 0x5b: /* D-MMU data pointer */ - case 0x54: /* I-MMU data in, WO */ - case 0x57: /* I-MMU demap, WO */ - case 0x5c: /* D-MMU data in, WO */ - case 0x5f: /* D-MMU demap, WO */ - case 0x77: /* Interrupt vector, WO */ - default: - cpu_unassigned_access(cs, addr, false, false, 1, size); - ret = 0; - break; - } - - /* Convert from little endian */ - switch (asi) { - case 0x0c: /* Nucleus Little Endian (LE) */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - switch(size) { - case 2: - ret = bswap16(ret); - break; - case 4: - ret = bswap32(ret); - break; - case 8: - ret = bswap64(ret); - break; - default: - break; - } - default: - break; - } - - /* Convert to signed number */ - if (sign) { - switch (size) { - case 1: - ret = (int8_t) ret; - break; - case 2: - ret = (int16_t) ret; - break; - case 4: - ret = (int32_t) ret; - break; - default: - break; - } - } -#ifdef DEBUG_ASI - dump_asi("read ", last_addr, asi, size, ret); -#endif - return ret; -} - -void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, - int asi, int size) -{ - SPARCCPU *cpu = sparc_env_get_cpu(env); - CPUState *cs = CPU(cpu); - -#ifdef DEBUG_ASI - dump_asi("write", addr, asi, size, val); -#endif - - asi &= 0xff; - - if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) - || (cpu_has_hypervisor(env) - && asi >= 0x30 && asi < 0x80 - && !(env->hpstate & HS_PRIV))) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - helper_check_align(env, addr, size - 1); - addr = asi_address_mask(env, asi, addr); - - /* Convert to little endian */ - switch (asi) { - case 0x0c: /* Nucleus Little Endian (LE) */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - switch (size) { - case 2: - val = bswap16(val); - break; - case 4: - val = bswap32(val); - break; - case 8: - val = bswap64(val); - break; - default: - break; - } - default: - break; - } - - switch (asi) { - case 0x10: /* As if user primary */ - case 0x11: /* As if user secondary */ - case 0x18: /* As if user primary LE */ - case 0x19: /* As if user secondary LE */ - case 0x80: /* Primary */ - case 0x81: /* Secondary */ - case 0x88: /* Primary LE */ - case 0x89: /* Secondary LE */ - case 0xe2: /* UA2007 Primary block init */ - case 0xe3: /* UA2007 Secondary block init */ - if ((asi & 0x80) && (env->pstate & PS_PRIV)) { - if (cpu_hypervisor_mode(env)) { - switch (size) { - case 1: - cpu_stb_hypv(env, addr, val); - break; - case 2: - cpu_stw_hypv(env, addr, val); - break; - case 4: - cpu_stl_hypv(env, addr, val); - break; - case 8: - default: - cpu_stq_hypv(env, addr, val); - break; - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - cpu_stb_kernel_secondary(env, addr, val); - break; - case 2: - cpu_stw_kernel_secondary(env, addr, val); - break; - case 4: - cpu_stl_kernel_secondary(env, addr, val); - break; - case 8: - default: - cpu_stq_kernel_secondary(env, addr, val); - break; - } - } else { - switch (size) { - case 1: - cpu_stb_kernel(env, addr, val); - break; - case 2: - cpu_stw_kernel(env, addr, val); - break; - case 4: - cpu_stl_kernel(env, addr, val); - break; - case 8: - default: - cpu_stq_kernel(env, addr, val); - break; - } - } - } - } else { - /* secondary space access has lowest asi bit equal to 1 */ - if (asi & 1) { - switch (size) { - case 1: - cpu_stb_user_secondary(env, addr, val); - break; - case 2: - cpu_stw_user_secondary(env, addr, val); - break; - case 4: - cpu_stl_user_secondary(env, addr, val); - break; - case 8: - default: - cpu_stq_user_secondary(env, addr, val); - break; - } - } else { - switch (size) { - case 1: - cpu_stb_user(env, addr, val); - break; - case 2: - cpu_stw_user(env, addr, val); - break; - case 4: - cpu_stl_user(env, addr, val); - break; - case 8: - default: - cpu_stq_user(env, addr, val); - break; - } - } - } - break; - case 0x14: /* Bypass */ - case 0x15: /* Bypass, non-cacheable */ - case 0x1c: /* Bypass LE */ - case 0x1d: /* Bypass, non-cacheable LE */ - { - switch (size) { - case 1: - stb_phys(cs->as, addr, val); - break; - case 2: - stw_phys(cs->as, addr, val); - break; - case 4: - stl_phys(cs->as, addr, val); - break; - case 8: - default: - stq_phys(cs->as, addr, val); - break; - } - } - return; - case 0x24: /* Nucleus quad LDD 128 bit atomic */ - case 0x2c: /* Nucleus quad LDD 128 bit atomic LE - Only ldda allowed */ - helper_raise_exception(env, TT_ILL_INSN); - return; - case 0x04: /* Nucleus */ - case 0x0c: /* Nucleus Little Endian (LE) */ - { - switch (size) { - case 1: - cpu_stb_nucleus(env, addr, val); - break; - case 2: - cpu_stw_nucleus(env, addr, val); - break; - case 4: - cpu_stl_nucleus(env, addr, val); - break; - default: - case 8: - cpu_stq_nucleus(env, addr, val); - break; - } - break; - } - - case 0x4a: /* UPA config */ - /* XXX */ - return; - case 0x45: /* LSU */ - { - uint64_t oldreg; - - oldreg = env->lsu; - env->lsu = val & (DMMU_E | IMMU_E); - /* Mappings generated during D/I MMU disabled mode are - invalid in normal mode */ - if (oldreg != env->lsu) { - DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n", - oldreg, env->lsu); -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - tlb_flush(CPU(cpu), 1); - } - return; - } - case 0x50: /* I-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - uint64_t oldreg; - - oldreg = env->immuregs[reg]; - switch (reg) { - case 0: /* RO */ - return; - case 1: /* Not in I-MMU */ - case 2: - return; - case 3: /* SFSR */ - if ((val & 1) == 0) { - val = 0; /* Clear SFSR */ - } - env->immu.sfsr = val; - break; - case 4: /* RO */ - return; - case 5: /* TSB access */ - DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", env->immu.tsb, val); - env->immu.tsb = val; - break; - case 6: /* Tag access */ - env->immu.tag_access = val; - break; - case 7: - case 8: - return; - default: - break; - } - - if (oldreg != env->immuregs[reg]) { - DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", reg, oldreg, env->immuregs[reg]); - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x54: /* I-MMU data in */ - replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env); - return; - case 0x55: /* I-MMU data access */ - { - /* TODO: auto demap */ - - unsigned int i = (addr >> 3) & 0x3f; - - replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env); - -#ifdef DEBUG_MMU - DPRINTF_MMU("immu data access replaced entry [%i]\n", i); - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x57: /* I-MMU demap */ - demap_tlb(env->itlb, addr, "immu", env); - return; - case 0x58: /* D-MMU regs */ - { - int reg = (addr >> 3) & 0xf; - uint64_t oldreg; - - oldreg = env->dmmuregs[reg]; - switch (reg) { - case 0: /* RO */ - case 4: - return; - case 3: /* SFSR */ - if ((val & 1) == 0) { - val = 0; /* Clear SFSR, Fault address */ - env->dmmu.sfar = 0; - } - env->dmmu.sfsr = val; - break; - case 1: /* Primary context */ - env->dmmu.mmu_primary_context = val; - /* can be optimized to only flush MMU_USER_IDX - and MMU_KERNEL_IDX entries */ - tlb_flush(CPU(cpu), 1); - break; - case 2: /* Secondary context */ - env->dmmu.mmu_secondary_context = val; - /* can be optimized to only flush MMU_USER_SECONDARY_IDX - and MMU_KERNEL_SECONDARY_IDX entries */ - tlb_flush(CPU(cpu), 1); - break; - case 5: /* TSB access */ - DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", env->dmmu.tsb, val); - env->dmmu.tsb = val; - break; - case 6: /* Tag access */ - env->dmmu.tag_access = val; - break; - case 7: /* Virtual Watchpoint */ - case 8: /* Physical Watchpoint */ - default: - env->dmmuregs[reg] = val; - break; - } - - if (oldreg != env->dmmuregs[reg]) { - DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" - PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]); - } -#ifdef DEBUG_MMU - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x5c: /* D-MMU data in */ - replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env); - return; - case 0x5d: /* D-MMU data access */ - { - unsigned int i = (addr >> 3) & 0x3f; - - replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env); - -#ifdef DEBUG_MMU - DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i); - dump_mmu(stdout, fprintf, env); -#endif - return; - } - case 0x5f: /* D-MMU demap */ - demap_tlb(env->dtlb, addr, "dmmu", env); - return; - case 0x49: /* Interrupt data receive */ - env->ivec_status = val & 0x20; - return; - case 0x46: /* D-cache data */ - case 0x47: /* D-cache tag access */ - case 0x4b: /* E-cache error enable */ - case 0x4c: /* E-cache asynchronous fault status */ - case 0x4d: /* E-cache asynchronous fault address */ - case 0x4e: /* E-cache tag data */ - case 0x66: /* I-cache instruction access */ - case 0x67: /* I-cache tag access */ - case 0x6e: /* I-cache predecode */ - case 0x6f: /* I-cache LRU etc. */ - case 0x76: /* E-cache tag */ - case 0x7e: /* E-cache tag */ - return; - case 0x51: /* I-MMU 8k TSB pointer, RO */ - case 0x52: /* I-MMU 64k TSB pointer, RO */ - case 0x56: /* I-MMU tag read, RO */ - case 0x59: /* D-MMU 8k TSB pointer, RO */ - case 0x5a: /* D-MMU 64k TSB pointer, RO */ - case 0x5b: /* D-MMU data pointer, RO */ - case 0x5e: /* D-MMU tag read, RO */ - case 0x48: /* Interrupt dispatch, RO */ - case 0x7f: /* Incoming interrupt vector, RO */ - case 0x82: /* Primary no-fault, RO */ - case 0x83: /* Secondary no-fault, RO */ - case 0x8a: /* Primary no-fault LE, RO */ - case 0x8b: /* Secondary no-fault LE, RO */ - default: - cpu_unassigned_access(cs, addr, true, false, 1, size); - return; - } -} -#endif /* CONFIG_USER_ONLY */ - -void helper_ldda_asi(CPUSPARCState *env, target_ulong addr, int asi, int rd) -{ - if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) - || (cpu_has_hypervisor(env) - && asi >= 0x30 && asi < 0x80 - && !(env->hpstate & HS_PRIV))) { - helper_raise_exception(env, TT_PRIV_ACT); - } - - addr = asi_address_mask(env, asi, addr); - - switch (asi) { -#if !defined(CONFIG_USER_ONLY) - case 0x24: /* Nucleus quad LDD 128 bit atomic */ - case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */ - helper_check_align(env, addr, 0xf); - if (rd == 0) { - env->gregs[1] = cpu_ldq_nucleus(env, addr + 8); - if (asi == 0x2c) { - bswap64s(&env->gregs[1]); - } - } else if (rd < 8) { - env->gregs[rd] = cpu_ldq_nucleus(env, addr); - env->gregs[rd + 1] = cpu_ldq_nucleus(env, addr + 8); - if (asi == 0x2c) { - bswap64s(&env->gregs[rd]); - bswap64s(&env->gregs[rd + 1]); - } - } else { - env->regwptr[rd] = cpu_ldq_nucleus(env, addr); - env->regwptr[rd + 1] = cpu_ldq_nucleus(env, addr + 8); - if (asi == 0x2c) { - bswap64s(&env->regwptr[rd]); - bswap64s(&env->regwptr[rd + 1]); - } - } - break; -#endif - default: - helper_check_align(env, addr, 0x3); - if (rd == 0) { - env->gregs[1] = helper_ld_asi(env, addr + 4, asi, 4, 0); - } else if (rd < 8) { - env->gregs[rd] = helper_ld_asi(env, addr, asi, 4, 0); - env->gregs[rd + 1] = helper_ld_asi(env, addr + 4, asi, 4, 0); - } else { - env->regwptr[rd] = helper_ld_asi(env, addr, asi, 4, 0); - env->regwptr[rd + 1] = helper_ld_asi(env, addr + 4, asi, 4, 0); - } - break; - } -} - -void helper_ldf_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, - int rd) -{ - unsigned int i; - target_ulong val; - - helper_check_align(env, addr, 3); - addr = asi_address_mask(env, asi, addr); - - switch (asi) { - case 0xf0: /* UA2007/JPS1 Block load primary */ - case 0xf1: /* UA2007/JPS1 Block load secondary */ - case 0xf8: /* UA2007/JPS1 Block load primary LE */ - case 0xf9: /* UA2007/JPS1 Block load secondary LE */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(env, addr, 0x3f); - for (i = 0; i < 8; i++, rd += 2, addr += 8) { - env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi & 0x8f, 8, 0); - } - return; - - case 0x16: /* UA2007 Block load primary, user privilege */ - case 0x17: /* UA2007 Block load secondary, user privilege */ - case 0x1e: /* UA2007 Block load primary LE, user privilege */ - case 0x1f: /* UA2007 Block load secondary LE, user privilege */ - case 0x70: /* JPS1 Block load primary, user privilege */ - case 0x71: /* JPS1 Block load secondary, user privilege */ - case 0x78: /* JPS1 Block load primary LE, user privilege */ - case 0x79: /* JPS1 Block load secondary LE, user privilege */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(env, addr, 0x3f); - for (i = 0; i < 8; i++, rd += 2, addr += 8) { - env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi & 0x19, 8, 0); - } - return; - - default: - break; - } - - switch (size) { - default: - case 4: - val = helper_ld_asi(env, addr, asi, size, 0); - if (rd & 1) { - env->fpr[rd / 2].l.lower = val; - } else { - env->fpr[rd / 2].l.upper = val; - } - break; - case 8: - env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi, size, 0); - break; - case 16: - env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi, 8, 0); - env->fpr[rd / 2 + 1].ll = helper_ld_asi(env, addr + 8, asi, 8, 0); - break; - } -} - -void helper_stf_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, - int rd) -{ - unsigned int i; - target_ulong val; - - addr = asi_address_mask(env, asi, addr); - - switch (asi) { - case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */ - case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */ - case 0xf0: /* UA2007/JPS1 Block store primary */ - case 0xf1: /* UA2007/JPS1 Block store secondary */ - case 0xf8: /* UA2007/JPS1 Block store primary LE */ - case 0xf9: /* UA2007/JPS1 Block store secondary LE */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(env, addr, 0x3f); - for (i = 0; i < 8; i++, rd += 2, addr += 8) { - helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi & 0x8f, 8); - } - - return; - case 0x16: /* UA2007 Block load primary, user privilege */ - case 0x17: /* UA2007 Block load secondary, user privilege */ - case 0x1e: /* UA2007 Block load primary LE, user privilege */ - case 0x1f: /* UA2007 Block load secondary LE, user privilege */ - case 0x70: /* JPS1 Block store primary, user privilege */ - case 0x71: /* JPS1 Block store secondary, user privilege */ - case 0x78: /* JPS1 Block load primary LE, user privilege */ - case 0x79: /* JPS1 Block load secondary LE, user privilege */ - if (rd & 7) { - helper_raise_exception(env, TT_ILL_INSN); - return; - } - helper_check_align(env, addr, 0x3f); - for (i = 0; i < 8; i++, rd += 2, addr += 8) { - helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi & 0x19, 8); - } - - return; - case 0xd2: /* 16-bit floating point load primary */ - case 0xd3: /* 16-bit floating point load secondary */ - case 0xda: /* 16-bit floating point load primary, LE */ - case 0xdb: /* 16-bit floating point load secondary, LE */ - helper_check_align(env, addr, 1); - /* Fall through */ - case 0xd0: /* 8-bit floating point load primary */ - case 0xd1: /* 8-bit floating point load secondary */ - case 0xd8: /* 8-bit floating point load primary, LE */ - case 0xd9: /* 8-bit floating point load secondary, LE */ - val = env->fpr[rd / 2].l.lower; - helper_st_asi(env, addr, val, asi & 0x8d, ((asi & 2) >> 1) + 1); - return; - default: - helper_check_align(env, addr, 3); - break; - } - - switch (size) { - default: - case 4: - if (rd & 1) { - val = env->fpr[rd / 2].l.lower; - } else { - val = env->fpr[rd / 2].l.upper; - } - helper_st_asi(env, addr, val, asi, size); - break; - case 8: - helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi, size); - break; - case 16: - helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi, 8); - helper_st_asi(env, addr + 8, env->fpr[rd / 2 + 1].ll, asi, 8); - break; - } -} - -target_ulong helper_casx_asi(CPUSPARCState *env, target_ulong addr, - target_ulong val1, target_ulong val2, - uint32_t asi) -{ - target_ulong ret; - - ret = helper_ld_asi(env, addr, asi, 8, 0); - if (val2 == ret) { - helper_st_asi(env, addr, val1, asi, 8); - } - return ret; -} -#endif /* TARGET_SPARC64 */ - -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) -target_ulong helper_cas_asi(CPUSPARCState *env, target_ulong addr, - target_ulong val1, target_ulong val2, uint32_t asi) -{ - target_ulong ret; - - val2 &= 0xffffffffUL; - ret = helper_ld_asi(env, addr, asi, 4, 0); - ret &= 0xffffffffUL; - if (val2 == ret) { - helper_st_asi(env, addr, val1 & 0xffffffffUL, asi, 4); - } - return ret; -} -#endif /* !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) */ - -void helper_ldqf(CPUSPARCState *env, target_ulong addr, int mem_idx) -{ - /* XXX add 128 bit load */ - CPU_QuadU u; - - helper_check_align(env, addr, 7); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - u.ll.upper = cpu_ldq_user(env, addr); - u.ll.lower = cpu_ldq_user(env, addr + 8); - QT0 = u.q; - break; - case MMU_KERNEL_IDX: - u.ll.upper = cpu_ldq_kernel(env, addr); - u.ll.lower = cpu_ldq_kernel(env, addr + 8); - QT0 = u.q; - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - u.ll.upper = cpu_ldq_hypv(env, addr); - u.ll.lower = cpu_ldq_hypv(env, addr + 8); - QT0 = u.q; - break; -#endif - default: - DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - u.ll.upper = ldq_raw(address_mask(env, addr)); - u.ll.lower = ldq_raw(address_mask(env, addr + 8)); - QT0 = u.q; -#endif -} - -void helper_stqf(CPUSPARCState *env, target_ulong addr, int mem_idx) -{ - /* XXX add 128 bit store */ - CPU_QuadU u; - - helper_check_align(env, addr, 7); -#if !defined(CONFIG_USER_ONLY) - switch (mem_idx) { - case MMU_USER_IDX: - u.q = QT0; - cpu_stq_user(env, addr, u.ll.upper); - cpu_stq_user(env, addr + 8, u.ll.lower); - break; - case MMU_KERNEL_IDX: - u.q = QT0; - cpu_stq_kernel(env, addr, u.ll.upper); - cpu_stq_kernel(env, addr + 8, u.ll.lower); - break; -#ifdef TARGET_SPARC64 - case MMU_HYPV_IDX: - u.q = QT0; - cpu_stq_hypv(env, addr, u.ll.upper); - cpu_stq_hypv(env, addr + 8, u.ll.lower); - break; -#endif - default: - DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx); - break; - } -#else - u.q = QT0; - stq_raw(address_mask(env, addr), u.ll.upper); - stq_raw(address_mask(env, addr + 8), u.ll.lower); -#endif -} - -#if !defined(CONFIG_USER_ONLY) -#ifndef TARGET_SPARC64 -void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, - bool is_write, bool is_exec, int is_asi, - unsigned size) -{ - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - CPUSPARCState *env = &cpu->env; - int fault_type; - -#ifdef DEBUG_UNASSIGNED - if (is_asi) { - printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx - " asi 0x%02x from " TARGET_FMT_lx "\n", - is_exec ? "exec" : is_write ? "write" : "read", size, - size == 1 ? "" : "s", addr, is_asi, env->pc); - } else { - printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx - " from " TARGET_FMT_lx "\n", - is_exec ? "exec" : is_write ? "write" : "read", size, - size == 1 ? "" : "s", addr, env->pc); - } -#endif - /* Don't overwrite translation and access faults */ - fault_type = (env->mmuregs[3] & 0x1c) >> 2; - if ((fault_type > 4) || (fault_type == 0)) { - env->mmuregs[3] = 0; /* Fault status register */ - if (is_asi) { - env->mmuregs[3] |= 1 << 16; - } - if (env->psrs) { - env->mmuregs[3] |= 1 << 5; - } - if (is_exec) { - env->mmuregs[3] |= 1 << 6; - } - if (is_write) { - env->mmuregs[3] |= 1 << 7; - } - env->mmuregs[3] |= (5 << 2) | 2; - /* SuperSPARC will never place instruction fault addresses in the FAR */ - if (!is_exec) { - env->mmuregs[4] = addr; /* Fault address register */ - } - } - /* overflow (same type fault was not read before another fault) */ - if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) { - env->mmuregs[3] |= 1; - } - - if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) { - if (is_exec) { - helper_raise_exception(env, TT_CODE_ACCESS); - } else { - helper_raise_exception(env, TT_DATA_ACCESS); - } - } - - /* flush neverland mappings created during no-fault mode, - so the sequential MMU faults report proper fault types */ - if (env->mmuregs[0] & MMU_NF) { - tlb_flush(cs, 1); - } -} -#else -void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, - bool is_write, bool is_exec, int is_asi, - unsigned size) -{ - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - CPUSPARCState *env = &cpu->env; - -#ifdef DEBUG_UNASSIGNED - printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx - "\n", addr, env->pc); -#endif - - if (is_exec) { - helper_raise_exception(env, TT_CODE_ACCESS); - } else { - helper_raise_exception(env, TT_DATA_ACCESS); - } -} -#endif -#endif - -#if !defined(CONFIG_USER_ONLY) -void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, - vaddr addr, int is_write, - int is_user, uintptr_t retaddr) -{ - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); - CPUSPARCState *env = &cpu->env; - -#ifdef DEBUG_UNALIGNED - printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx - "\n", addr, env->pc); -#endif - if (retaddr) { - cpu_restore_state(CPU(cpu), retaddr); - } - helper_raise_exception(env, TT_UNALIGNED); -} - -/* try to fill the TLB and return an exception if error. If retaddr is - NULL, it means that the function was called in C code (i.e. not - from generated code or from helper.c) */ -/* XXX: fix it to restore all registers */ -void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, - uintptr_t retaddr) -{ - int ret; - - ret = sparc_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); - if (ret) { - if (retaddr) { - cpu_restore_state(cs, retaddr); - } - cpu_loop_exit(cs); - } -} -#endif diff --git a/qemu/target-sparc/unicorn.c b/qemu/target-sparc/unicorn.c deleted file mode 100644 index 8db2b52e..00000000 --- a/qemu/target-sparc/unicorn.c +++ /dev/null @@ -1,151 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#include "hw/boards.h" -#include "hw/sparc/sparc.h" -#include "sysemu/cpus.h" -#include "unicorn.h" -#include "cpu.h" -#include "unicorn_common.h" -#include "uc_priv.h" - - -const int SPARC_REGS_STORAGE_SIZE = offsetof(CPUSPARCState, tlb_table); - -static bool sparc_stop_interrupt(int intno) -{ - switch(intno) { - default: - return false; - case TT_ILL_INSN: - return true; - } -} - -static void sparc_set_pc(struct uc_struct *uc, uint64_t address) -{ - ((CPUSPARCState *)uc->current_cpu->env_ptr)->pc = address; - ((CPUSPARCState *)uc->current_cpu->env_ptr)->npc = address + 4; -} - -void sparc_release(void *ctx); -void sparc_release(void *ctx) -{ - int i; - TCGContext *tcg_ctx = (TCGContext *) ctx; - release_common(ctx); - g_free(tcg_ctx->cpu_wim); - g_free(tcg_ctx->cpu_cond); - g_free(tcg_ctx->cpu_cc_src); - g_free(tcg_ctx->cpu_cc_src2); - g_free(tcg_ctx->cpu_cc_dst); - g_free(tcg_ctx->cpu_fsr); - g_free(tcg_ctx->sparc_cpu_pc); - g_free(tcg_ctx->cpu_npc); - g_free(tcg_ctx->cpu_y); - g_free(tcg_ctx->cpu_tbr); - - for (i = 0; i < 8; i++) { - g_free(tcg_ctx->cpu_gregs[i]); - } - for (i = 0; i < 32; i++) { - g_free(tcg_ctx->cpu_gpr[i]); - } - - g_free(tcg_ctx->cpu_PC); - g_free(tcg_ctx->btarget); - g_free(tcg_ctx->bcond); - g_free(tcg_ctx->cpu_dspctrl); - - g_free(tcg_ctx->tb_ctx.tbs); -} - -void sparc_reg_reset(struct uc_struct *uc) -{ - CPUArchState *env = uc->cpu->env_ptr; - - memset(env->gregs, 0, sizeof(env->gregs)); - memset(env->fpr, 0, sizeof(env->fpr)); - memset(env->regbase, 0, sizeof(env->regbase)); - - env->pc = 0; - env->npc = 0; - env->regwptr = env->regbase; -} - -int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - void *value = vals[i]; - if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) - *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0]; - else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) - *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0]; - else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) - *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0]; - else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) - *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0]; - else { - switch(regid) { - default: break; - case UC_SPARC_REG_PC: - *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.pc; - break; - } - } - } - - return 0; -} - -int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - const void *value = vals[i]; - if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) - SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0] = *(uint32_t *)value; - else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) - SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0] = *(uint32_t *)value; - else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) - SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint32_t *)value; - else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) - SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint32_t *)value; - else { - switch(regid) { - default: break; - case UC_SPARC_REG_PC: - SPARC_CPU(uc, mycpu)->env.pc = *(uint32_t *)value; - SPARC_CPU(uc, mycpu)->env.npc = *(uint32_t *)value + 4; - // force to quit execution and flush TB - uc->quit_request = true; - uc_emu_stop(uc); - break; - } - } - } - - return 0; -} - -DEFAULT_VISIBILITY -void sparc_uc_init(struct uc_struct* uc) -{ - register_accel_types(uc); - sparc_cpu_register_types(uc); - leon3_machine_init(uc); - uc->release = sparc_release; - uc->reg_read = sparc_reg_read; - uc->reg_write = sparc_reg_write; - uc->reg_reset = sparc_reg_reset; - uc->set_pc = sparc_set_pc; - uc->stop_interrupt = sparc_stop_interrupt; - uc_common_init(uc); -} diff --git a/qemu/target-sparc/unicorn64.c b/qemu/target-sparc/unicorn64.c deleted file mode 100644 index e6f07a33..00000000 --- a/qemu/target-sparc/unicorn64.c +++ /dev/null @@ -1,115 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh , 2015 */ - -#include "hw/boards.h" -#include "hw/sparc/sparc.h" -#include "sysemu/cpus.h" -#include "unicorn.h" -#include "cpu.h" -#include "unicorn_common.h" -#include "uc_priv.h" - - -const int SPARC64_REGS_STORAGE_SIZE = offsetof(CPUSPARCState, tlb_table); - -static bool sparc_stop_interrupt(int intno) -{ - switch(intno) { - default: - return false; - case TT_ILL_INSN: - return true; - } -} - -static void sparc_set_pc(struct uc_struct *uc, uint64_t address) -{ - ((CPUSPARCState *)uc->current_cpu->env_ptr)->pc = address; - ((CPUSPARCState *)uc->current_cpu->env_ptr)->npc = address + 4; -} - -void sparc_reg_reset(struct uc_struct *uc) -{ - CPUArchState *env = uc->cpu->env_ptr; - - memset(env->gregs, 0, sizeof(env->gregs)); - memset(env->fpr, 0, sizeof(env->fpr)); - memset(env->regbase, 0, sizeof(env->regbase)); - - env->pc = 0; - env->npc = 0; - env->regwptr = env->regbase; -} - -int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - void *value = vals[i]; - if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) - *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0]; - else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) - *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0]; - else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) - *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0]; - else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) - *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0]; - else { - switch(regid) { - default: break; - case UC_SPARC_REG_PC: - *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.pc; - break; - } - } - } - - return 0; -} - -int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) -{ - CPUState *mycpu = uc->cpu; - int i; - - for (i = 0; i < count; i++) { - unsigned int regid = regs[i]; - const void *value = vals[i]; - if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) - SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0] = *(uint64_t *)value; - else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) - SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0] = *(uint64_t *)value; - else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) - SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint64_t *)value; - else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) - SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint64_t *)value; - else { - switch(regid) { - default: break; - case UC_SPARC_REG_PC: - SPARC_CPU(uc, mycpu)->env.pc = *(uint64_t *)value; - SPARC_CPU(uc, mycpu)->env.npc = *(uint64_t *)value + 4; - break; - } - } - } - - return 0; -} - -DEFAULT_VISIBILITY -void sparc64_uc_init(struct uc_struct* uc) -{ - register_accel_types(uc); - sparc_cpu_register_types(uc); - sun4u_machine_init(uc); - uc->reg_read = sparc_reg_read; - uc->reg_write = sparc_reg_write; - uc->reg_reset = sparc_reg_reset; - uc->set_pc = sparc_set_pc; - uc->stop_interrupt = sparc_stop_interrupt; - uc_common_init(uc); -} diff --git a/qemu/target/arm/README b/qemu/target/arm/README new file mode 100644 index 00000000..545e30c5 --- /dev/null +++ b/qemu/target/arm/README @@ -0,0 +1,5 @@ +code under arm/ is from arm-softmmu/target/arm/*.inc.c +code under aarch64/ is from aarch64-softmmu/target/aarch64/*.inc.c + +WARNING: these code are autogen from scripts/decodetree.py, DO NOT modify them. + diff --git a/qemu/target/arm/arm-powerctl.c b/qemu/target/arm/arm-powerctl.c new file mode 100644 index 00000000..ede54ecb --- /dev/null +++ b/qemu/target/arm/arm-powerctl.c @@ -0,0 +1,356 @@ +/* + * QEMU support -- ARM Power Control specific functions. + * + * Copyright (c) 2016 Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "cpu-qom.h" +#include "internals.h" +#include "arm-powerctl.h" +#include "qemu/log.h" + +#ifndef DEBUG_ARM_POWERCTL +#define DEBUG_ARM_POWERCTL 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_ARM_POWERCTL) { \ + fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \ + } \ + } while (0) + +CPUState *arm_get_cpu_by_id(uint64_t id) +{ + CPUState *cpu; + + DPRINTF("cpu %" PRId64 "\n", id); + + CPU_FOREACH(cpu) { + ARMCPU *armcpu = ARM_CPU(cpu); + + if (armcpu->mp_affinity == id) { + return cpu; + } + } + + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: Requesting unknown CPU %" PRId64 "\n", + __func__, id); + + return NULL; +} + +struct CpuOnInfo { + uint64_t entry; + uint64_t context_id; + uint32_t target_el; + bool target_aa64; +}; + + +static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + ARMCPU *target_cpu = ARM_CPU(target_cpu_state); + struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr; + + /* Initialize the cpu we are turning on */ + cpu_reset(target_cpu_state); + target_cpu_state->halted = 0; + + if (info->target_aa64) { + if ((info->target_el < 3) && arm_feature(&target_cpu->env, + ARM_FEATURE_EL3)) { + /* + * As target mode is AArch64, we need to set lower + * exception level (the requested level 2) to AArch64 + */ + target_cpu->env.cp15.scr_el3 |= SCR_RW; + } + + if ((info->target_el < 2) && arm_feature(&target_cpu->env, + ARM_FEATURE_EL2)) { + /* + * As target mode is AArch64, we need to set lower + * exception level (the requested level 1) to AArch64 + */ + target_cpu->env.cp15.hcr_el2 |= HCR_RW; + } + + target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true); + } else { + /* We are requested to boot in AArch32 mode */ + static const uint32_t mode_for_el[] = { 0, + ARM_CPU_MODE_SVC, + ARM_CPU_MODE_HYP, + ARM_CPU_MODE_SVC }; + + cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M, + CPSRWriteRaw); + } + + if (info->target_el == 3) { + /* Processor is in secure mode */ + target_cpu->env.cp15.scr_el3 &= ~SCR_NS; + } else { + /* Processor is not in secure mode */ + target_cpu->env.cp15.scr_el3 |= SCR_NS; + + /* Set NSACR.{CP11,CP10} so NS can access the FPU */ + target_cpu->env.cp15.nsacr |= 3 << 10; + + /* + * If QEMU is providing the equivalent of EL3 firmware, then we need + * to make sure a CPU targeting EL2 comes out of reset with a + * functional HVC insn. + */ + if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3) + && info->target_el == 2) { + target_cpu->env.cp15.scr_el3 |= SCR_HCE; + } + } + + /* We check if the started CPU is now at the correct level */ + assert(info->target_el == arm_current_el(&target_cpu->env)); + + if (info->target_aa64) { + target_cpu->env.xregs[0] = info->context_id; + } else { + target_cpu->env.regs[0] = info->context_id; + } + + /* CP15 update requires rebuilding hflags */ + arm_rebuild_hflags(&target_cpu->env); + + /* Start the new CPU at the requested address */ + cpu_set_pc(target_cpu_state, info->entry); + + g_free(info); + + /* Finally set the power status */ + target_cpu->power_state = PSCI_ON; +} + +int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, + uint32_t target_el, bool target_aa64) +{ + CPUState *target_cpu_state; + ARMCPU *target_cpu; + struct CpuOnInfo *info; + + DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 + "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, + context_id); + + /* requested EL level need to be in the 1 to 3 range */ + assert((target_el > 0) && (target_el < 4)); + + if (target_aa64 && (entry & 3)) { + /* + * if we are booting in AArch64 mode then "entry" needs to be 4 bytes + * aligned. + */ + return QEMU_ARM_POWERCTL_INVALID_PARAM; + } + + /* Retrieve the cpu we are powering up */ + target_cpu_state = arm_get_cpu_by_id(cpuid); + if (!target_cpu_state) { + /* The cpu was not found */ + return QEMU_ARM_POWERCTL_INVALID_PARAM; + } + + target_cpu = ARM_CPU(target_cpu_state); + if (target_cpu->power_state == PSCI_ON) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is already on\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_ALREADY_ON; + } + + /* + * The newly brought CPU is requested to enter the exception level + * "target_el" and be in the requested mode (AArch64 or AArch32). + */ + + if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) || + ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) { + /* + * The CPU does not support requested level + */ + return QEMU_ARM_POWERCTL_INVALID_PARAM; + } + + if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) { + /* + * For now we don't support booting an AArch64 CPU in AArch32 mode + * TODO: We should add this support later + */ + qemu_log_mask(LOG_UNIMP, + "[ARM]%s: Starting AArch64 CPU %" PRId64 + " in AArch32 mode is not supported yet\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_INVALID_PARAM; + } + + /* + * If another CPU has powered the target on we are in the state + * ON_PENDING and additional attempts to power on the CPU should + * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI + * spec) + */ + if (target_cpu->power_state == PSCI_ON_PENDING) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is already powering on\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_ON_PENDING; + } + + /* To avoid racing with a CPU we are just kicking off we do the + * final bit of preparation for the work in the target CPUs + * context. + */ + info = g_new(struct CpuOnInfo, 1); + info->entry = entry; + info->context_id = context_id; + info->target_el = target_el; + info->target_aa64 = target_aa64; + + async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work, + RUN_ON_CPU_HOST_PTR(info)); + + /* We are good to go */ + return QEMU_ARM_POWERCTL_RET_SUCCESS; +} + +static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + ARMCPU *target_cpu = ARM_CPU(target_cpu_state); + + /* Initialize the cpu we are turning on */ + cpu_reset(target_cpu_state); + target_cpu_state->halted = 0; + + /* Finally set the power status */ + target_cpu->power_state = PSCI_ON; +} + +int arm_set_cpu_on_and_reset(uint64_t cpuid) +{ + CPUState *target_cpu_state; + ARMCPU *target_cpu; + + /* Retrieve the cpu we are powering up */ + target_cpu_state = arm_get_cpu_by_id(cpuid); + if (!target_cpu_state) { + /* The cpu was not found */ + return QEMU_ARM_POWERCTL_INVALID_PARAM; + } + + target_cpu = ARM_CPU(target_cpu_state); + if (target_cpu->power_state == PSCI_ON) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is already on\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_ALREADY_ON; + } + + /* + * If another CPU has powered the target on we are in the state + * ON_PENDING and additional attempts to power on the CPU should + * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI + * spec) + */ + if (target_cpu->power_state == PSCI_ON_PENDING) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is already powering on\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_ON_PENDING; + } + + async_run_on_cpu(target_cpu_state, arm_set_cpu_on_and_reset_async_work, + RUN_ON_CPU_NULL); + + /* We are good to go */ + return QEMU_ARM_POWERCTL_RET_SUCCESS; +} + +static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + ARMCPU *target_cpu = ARM_CPU(target_cpu_state); + + target_cpu->power_state = PSCI_OFF; + target_cpu_state->halted = 1; + target_cpu_state->exception_index = EXCP_HLT; +} + +int arm_set_cpu_off(uint64_t cpuid) +{ + CPUState *target_cpu_state; + ARMCPU *target_cpu; + + DPRINTF("cpu %" PRId64 "\n", cpuid); + + /* change to the cpu we are powering up */ + target_cpu_state = arm_get_cpu_by_id(cpuid); + if (!target_cpu_state) { + return QEMU_ARM_POWERCTL_INVALID_PARAM; + } + target_cpu = ARM_CPU(target_cpu_state); + if (target_cpu->power_state == PSCI_OFF) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is already off\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_IS_OFF; + } + + /* Queue work to run under the target vCPUs context */ + async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work, + RUN_ON_CPU_NULL); + + return QEMU_ARM_POWERCTL_RET_SUCCESS; +} + +static void arm_reset_cpu_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + /* Reset the cpu */ + cpu_reset(target_cpu_state); +} + +int arm_reset_cpu(uint64_t cpuid) +{ + CPUState *target_cpu_state; + ARMCPU *target_cpu; + + DPRINTF("cpu %" PRId64 "\n", cpuid); + + /* change to the cpu we are resetting */ + target_cpu_state = arm_get_cpu_by_id(cpuid); + if (!target_cpu_state) { + return QEMU_ARM_POWERCTL_INVALID_PARAM; + } + target_cpu = ARM_CPU(target_cpu_state); + + if (target_cpu->power_state == PSCI_OFF) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is off\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_IS_OFF; + } + + /* Queue work to run under the target vCPUs context */ + async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work, + RUN_ON_CPU_NULL); + + return QEMU_ARM_POWERCTL_RET_SUCCESS; +} diff --git a/qemu/target/arm/arm-powerctl.h b/qemu/target/arm/arm-powerctl.h new file mode 100644 index 00000000..37c8a04f --- /dev/null +++ b/qemu/target/arm/arm-powerctl.h @@ -0,0 +1,93 @@ +/* + * QEMU support -- ARM Power Control specific functions. + * + * Copyright (c) 2016 Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_ARM_POWERCTL_H +#define QEMU_ARM_POWERCTL_H + +#include "kvm-consts.h" + +#define QEMU_ARM_POWERCTL_RET_SUCCESS QEMU_PSCI_RET_SUCCESS +#define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS +#define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON +#define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED +#define QEMU_ARM_POWERCTL_ON_PENDING QEMU_PSCI_RET_ON_PENDING + +/* + * arm_get_cpu_by_id: + * @cpuid: the id of the CPU we want to retrieve the state + * + * Retrieve a CPUState object from its CPU ID provided in @cpuid. + * + * Returns: a pointer to the CPUState structure of the requested CPU. + */ +CPUState *arm_get_cpu_by_id(uint64_t cpuid); + +/* + * arm_set_cpu_on: + * @cpuid: the id of the CPU we want to start/wake up. + * @entry: the address the CPU shall start from. + * @context_id: the value to put in r0/x0. + * @target_el: The desired exception level. + * @target_aa64: 1 if the requested mode is AArch64. 0 otherwise. + * + * Start the cpu designated by @cpuid in @target_el exception level. The mode + * shall be AArch64 if @target_aa64 is set to 1. Otherwise the mode is + * AArch32. The CPU shall start at @entry with @context_id in r0/x0. + * + * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. + * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. + * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started. + * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is still powering up + */ +int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, + uint32_t target_el, bool target_aa64); + +/* + * arm_set_cpu_off: + * @cpuid: the id of the CPU we want to stop/shut down. + * + * Stop the cpu designated by @cpuid. + * + * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. + * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. + * QEMU_ARM_POWERCTL_IS_OFF if CPU is already off + */ + +int arm_set_cpu_off(uint64_t cpuid); + +/* + * arm_reset_cpu: + * @cpuid: the id of the CPU we want to reset. + * + * Reset the cpu designated by @cpuid. + * + * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. + * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. + * QEMU_ARM_POWERCTL_IS_OFF if CPU is off + */ +int arm_reset_cpu(uint64_t cpuid); + +/* + * arm_set_cpu_on_and_reset: + * @cpuid: the id of the CPU we want to star + * + * Start the cpu designated by @cpuid and put it through its normal + * CPU reset process. The CPU will start in the way it is architected + * to start after a power-on reset. + * + * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. + * QEMU_ARM_POWERCTL_INVALID_PARAM if there is no CPU with that ID. + * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU is already on. + * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is already partway through + * powering on. + */ +int arm_set_cpu_on_and_reset(uint64_t cpuid); + +#endif diff --git a/qemu/target/arm/arm-semi.c b/qemu/target/arm/arm-semi.c new file mode 100644 index 00000000..873e8ea3 --- /dev/null +++ b/qemu/target/arm/arm-semi.c @@ -0,0 +1,1022 @@ +/* + * Arm "Angel" semihosting syscalls + * + * Copyright (c) 2005, 2007 CodeSourcery. + * Copyright (c) 2019 Linaro + * Written by Paul Brook. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * ARM Semihosting is documented in: + * Semihosting for AArch32 and AArch64 Release 2.0 + * https://static.docs.arm.com/100863/0200/semihosting.pdf + */ + +#include "qemu/osdep.h" + +#include "cpu.h" +//#include "hw/semihosting/semihost.h" +//#include "hw/semihosting/console.h" +#include "qemu/log.h" +#include "exec/gdbstub.h" +#include "qemu/cutils.h" + +#define TARGET_SYS_OPEN 0x01 +#define TARGET_SYS_CLOSE 0x02 +#define TARGET_SYS_WRITEC 0x03 +#define TARGET_SYS_WRITE0 0x04 +#define TARGET_SYS_WRITE 0x05 +#define TARGET_SYS_READ 0x06 +#define TARGET_SYS_READC 0x07 +#define TARGET_SYS_ISTTY 0x09 +#define TARGET_SYS_SEEK 0x0a +#define TARGET_SYS_FLEN 0x0c +#define TARGET_SYS_TMPNAM 0x0d +#define TARGET_SYS_REMOVE 0x0e +#define TARGET_SYS_RENAME 0x0f +#define TARGET_SYS_CLOCK 0x10 +#define TARGET_SYS_TIME 0x11 +#define TARGET_SYS_SYSTEM 0x12 +#define TARGET_SYS_ERRNO 0x13 +#define TARGET_SYS_GET_CMDLINE 0x15 +#define TARGET_SYS_HEAPINFO 0x16 +#define TARGET_SYS_EXIT 0x18 +#define TARGET_SYS_SYNCCACHE 0x19 +#define TARGET_SYS_EXIT_EXTENDED 0x20 + +/* ADP_Stopped_ApplicationExit is used for exit(0), + * anything else is implemented as exit(1) */ +#define ADP_Stopped_ApplicationExit (0x20026) + +#ifndef O_BINARY +#define O_BINARY 0 +#endif + +#define GDB_O_RDONLY 0x000 +#define GDB_O_WRONLY 0x001 +#define GDB_O_RDWR 0x002 +#define GDB_O_APPEND 0x008 +#define GDB_O_CREAT 0x200 +#define GDB_O_TRUNC 0x400 +#define GDB_O_BINARY 0 + +static int gdb_open_modeflags[12] = { + GDB_O_RDONLY, + GDB_O_RDONLY | GDB_O_BINARY, + GDB_O_RDWR, + GDB_O_RDWR | GDB_O_BINARY, + GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC, + GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY, + GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC, + GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY, + GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND, + GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY, + GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND, + GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY +}; + +static int open_modeflags[12] = { + O_RDONLY, + O_RDONLY | O_BINARY, + O_RDWR, + O_RDWR | O_BINARY, + O_WRONLY | O_CREAT | O_TRUNC, + O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, + O_RDWR | O_CREAT | O_TRUNC, + O_RDWR | O_CREAT | O_TRUNC | O_BINARY, + O_WRONLY | O_CREAT | O_APPEND, + O_WRONLY | O_CREAT | O_APPEND | O_BINARY, + O_RDWR | O_CREAT | O_APPEND, + O_RDWR | O_CREAT | O_APPEND | O_BINARY +}; + +typedef enum GuestFDType { + GuestFDUnused = 0, + GuestFDHost = 1, + GuestFDGDB = 2, + GuestFDFeatureFile = 3, +} GuestFDType; + +/* + * Guest file descriptors are integer indexes into an array of + * these structures (we will dynamically resize as necessary). + */ +typedef struct GuestFD { + GuestFDType type; + union { + int hostfd; + target_ulong featurefile_offset; + }; +} GuestFD; + +static GArray *guestfd_array; + +/* + * Allocate a new guest file descriptor and return it; if we + * couldn't allocate a new fd then return -1. + * This is a fairly simplistic implementation because we don't + * expect that most semihosting guest programs will make very + * heavy use of opening and closing fds. + */ +static int alloc_guestfd(void) +{ + guint i; + + if (!guestfd_array) { + /* New entries zero-initialized, i.e. type GuestFDUnused */ + guestfd_array = g_array_new(FALSE, TRUE, sizeof(GuestFD)); + } + + /* SYS_OPEN should return nonzero handle on success. Start guestfd from 1 */ + for (i = 1; i < guestfd_array->len; i++) { + GuestFD *gf = &g_array_index(guestfd_array, GuestFD, i); + + if (gf->type == GuestFDUnused) { + return i; + } + } + + /* All elements already in use: expand the array */ + g_array_set_size(guestfd_array, i + 1); + return i; +} + +/* + * Look up the guestfd in the data structure; return NULL + * for out of bounds, but don't check whether the slot is unused. + * This is used internally by the other guestfd functions. + */ +static GuestFD *do_get_guestfd(int guestfd) +{ + if (!guestfd_array) { + return NULL; + } + + if (guestfd <= 0 || guestfd >= guestfd_array->len) { + return NULL; + } + + return &g_array_index(guestfd_array, GuestFD, guestfd); +} + +/* + * Associate the specified guest fd (which must have been + * allocated via alloc_fd() and not previously used) with + * the specified host/gdb fd. + */ +static void associate_guestfd(int guestfd, int hostfd) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + assert(gf); + gf->type = use_gdb_syscalls() ? GuestFDGDB : GuestFDHost; + gf->hostfd = hostfd; +} + +/* + * Deallocate the specified guest file descriptor. This doesn't + * close the host fd, it merely undoes the work of alloc_fd(). + */ +static void dealloc_guestfd(int guestfd) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + assert(gf); + gf->type = GuestFDUnused; +} + +/* + * Given a guest file descriptor, get the associated struct. + * If the fd is not valid, return NULL. This is the function + * used by the various semihosting calls to validate a handle + * from the guest. + * Note: calling alloc_guestfd() or dealloc_guestfd() will + * invalidate any GuestFD* obtained by calling this function. + */ +static GuestFD *get_guestfd(int guestfd) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + if (!gf || gf->type == GuestFDUnused) { + return NULL; + } + return gf; +} + +/* + * The semihosting API has no concept of its errno being thread-safe, + * as the API design predates SMP CPUs and was intended as a simple + * real-hardware set of debug functionality. For QEMU, we make the + * errno be per-thread in linux-user mode; in softmmu it is a simple + * global, and we assume that the guest takes care of avoiding any races. + */ +static target_ulong syscall_err; + +#include "exec/softmmu-semi.h" + +static inline uint32_t set_swi_errno(CPUARMState *env, uint32_t code) +{ + if (code == (uint32_t)-1) { + syscall_err = errno; + } + return code; +} + +static inline uint32_t get_swi_errno(CPUARMState *env) +{ + return syscall_err; +} + +static target_ulong arm_semi_syscall_len; + +static void arm_semi_cb(CPUState *cs, target_ulong ret, target_ulong err) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + target_ulong reg0 = is_a64(env) ? env->xregs[0] : env->regs[0]; + + if (ret == (target_ulong)-1) { + errno = err; + set_swi_errno(env, -1); + reg0 = ret; + } else { + /* Fixup syscalls that use nonstardard return conventions. */ + switch (reg0) { + case TARGET_SYS_WRITE: + case TARGET_SYS_READ: + reg0 = arm_semi_syscall_len - ret; + break; + case TARGET_SYS_SEEK: + reg0 = 0; + break; + default: + reg0 = ret; + break; + } + } + if (is_a64(env)) { + env->xregs[0] = reg0; + } else { + env->regs[0] = reg0; + } +} + +static target_ulong arm_flen_buf(ARMCPU *cpu) +{ + /* Return an address in target memory of 64 bytes where the remote + * gdb should write its stat struct. (The format of this structure + * is defined by GDB's remote protocol and is not target-specific.) + * We put this on the guest's stack just below SP. + */ + CPUARMState *env = &cpu->env; + target_ulong sp; + + if (is_a64(env)) { + sp = env->xregs[31]; + } else { + sp = env->regs[13]; + } + + return sp - 64; +} + +static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + /* The size is always stored in big-endian order, extract + the value. We assume the size always fit in 32 bits. */ + uint32_t size; + cpu_memory_rw_debug(cs, arm_flen_buf(cpu) + 32, (uint8_t *)&size, 4, 0); + size = be32_to_cpu(size); + if (is_a64(env)) { + env->xregs[0] = size; + } else { + env->regs[0] = size; + } + errno = err; + set_swi_errno(env, -1); +} + +static int arm_semi_open_guestfd; + +static void arm_semi_open_cb(CPUState *cs, target_ulong ret, target_ulong err) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + if (ret == (target_ulong)-1) { + errno = err; + set_swi_errno(env, -1); + dealloc_guestfd(arm_semi_open_guestfd); + } else { + associate_guestfd(arm_semi_open_guestfd, ret); + ret = arm_semi_open_guestfd; + } + + if (is_a64(env)) { + env->xregs[0] = ret; + } else { + env->regs[0] = ret; + } +} + +static target_ulong arm_gdb_syscall(ARMCPU *cpu, gdb_syscall_complete_cb cb, + const char *fmt, ...) +{ + va_list va; + CPUARMState *env = &cpu->env; + + va_start(va, fmt); + gdb_do_syscallv(cb, fmt, va); + va_end(va); + + /* + * FIXME: in softmmu mode, the gdbstub will schedule our callback + * to occur, but will not actually call it to complete the syscall + * until after this function has returned and we are back in the + * CPU main loop. Therefore callers to this function must not + * do anything with its return value, because it is not necessarily + * the result of the syscall, but could just be the old value of X0. + * The only thing safe to do with this is that the callers of + * do_arm_semihosting() will write it straight back into X0. + * (In linux-user mode, the callback will have happened before + * gdb_do_syscallv() returns.) + * + * We should tidy this up so neither this function nor + * do_arm_semihosting() return a value, so the mistake of + * doing something with the return value is not possible to make. + */ + + return is_a64(env) ? env->xregs[0] : env->regs[0]; +} + +/* + * Types for functions implementing various semihosting calls + * for specific types of guest file descriptor. These must all + * do the work and return the required return value for the guest, + * setting the guest errno if appropriate. + */ +typedef uint32_t sys_closefn(ARMCPU *cpu, GuestFD *gf); +typedef uint32_t sys_writefn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len); +typedef uint32_t sys_readfn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len); +typedef uint32_t sys_isattyfn(ARMCPU *cpu, GuestFD *gf); +typedef uint32_t sys_seekfn(ARMCPU *cpu, GuestFD *gf, + target_ulong offset); +typedef uint32_t sys_flenfn(ARMCPU *cpu, GuestFD *gf); + +static uint32_t host_closefn(ARMCPU *cpu, GuestFD *gf) +{ + CPUARMState *env = &cpu->env; + + /* + * Only close the underlying host fd if it's one we opened on behalf + * of the guest in SYS_OPEN. + */ + if (gf->hostfd == STDIN_FILENO || + gf->hostfd == STDOUT_FILENO || + gf->hostfd == STDERR_FILENO) { + return 0; + } + return set_swi_errno(env, close(gf->hostfd)); +} + +static uint32_t host_writefn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len) +{ + uint32_t ret; + CPUARMState *env = &cpu->env; + char *s = lock_user(VERIFY_READ, buf, len, 1); + if (!s) { + /* Return bytes not written on error */ + return len; + } + ret = set_swi_errno(env, write(gf->hostfd, s, len)); + unlock_user(s, buf, 0); + if (ret == (uint32_t)-1) { + ret = 0; + } + /* Return bytes not written */ + return len - ret; +} + +static uint32_t host_readfn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len) +{ + uint32_t ret; + CPUARMState *env = &cpu->env; + char *s = lock_user(VERIFY_WRITE, buf, len, 0); + if (!s) { + /* return bytes not read */ + return len; + } + do { + ret = set_swi_errno(env, read(gf->hostfd, s, len)); + } while (ret == -1 && errno == EINTR); + unlock_user(s, buf, len); + if (ret == (uint32_t)-1) { + ret = 0; + } + /* Return bytes not read */ + return len - ret; +} + +static uint32_t host_isattyfn(ARMCPU *cpu, GuestFD *gf) +{ + return isatty(gf->hostfd); +} + +static uint32_t host_seekfn(ARMCPU *cpu, GuestFD *gf, target_ulong offset) +{ + CPUARMState *env = &cpu->env; + uint32_t ret = set_swi_errno(env, lseek(gf->hostfd, offset, SEEK_SET)); + if (ret == (uint32_t)-1) { + return -1; + } + return 0; +} + +static uint32_t host_flenfn(ARMCPU *cpu, GuestFD *gf) +{ + CPUARMState *env = &cpu->env; + struct stat buf; + uint32_t ret = set_swi_errno(env, fstat(gf->hostfd, &buf)); + if (ret == (uint32_t)-1) { + return -1; + } + return buf.st_size; +} + +static uint32_t gdb_closefn(ARMCPU *cpu, GuestFD *gf) +{ + return arm_gdb_syscall(cpu, arm_semi_cb, "close,%x", gf->hostfd); +} + +static uint32_t gdb_writefn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len) +{ + arm_semi_syscall_len = len; + return arm_gdb_syscall(cpu, arm_semi_cb, "write,%x,%x,%x", + gf->hostfd, buf, len); +} + +static uint32_t gdb_readfn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len) +{ + arm_semi_syscall_len = len; + return arm_gdb_syscall(cpu, arm_semi_cb, "read,%x,%x,%x", + gf->hostfd, buf, len); +} + +static uint32_t gdb_isattyfn(ARMCPU *cpu, GuestFD *gf) +{ + return arm_gdb_syscall(cpu, arm_semi_cb, "isatty,%x", gf->hostfd); +} + +static uint32_t gdb_seekfn(ARMCPU *cpu, GuestFD *gf, target_ulong offset) +{ + return arm_gdb_syscall(cpu, arm_semi_cb, "lseek,%x,%x,0", + gf->hostfd, offset); +} + +static uint32_t gdb_flenfn(ARMCPU *cpu, GuestFD *gf) +{ + return arm_gdb_syscall(cpu, arm_semi_flen_cb, "fstat,%x,%x", + gf->hostfd, arm_flen_buf(cpu)); +} + +#define SHFB_MAGIC_0 0x53 +#define SHFB_MAGIC_1 0x48 +#define SHFB_MAGIC_2 0x46 +#define SHFB_MAGIC_3 0x42 + +/* Feature bits reportable in feature byte 0 */ +#define SH_EXT_EXIT_EXTENDED (1 << 0) +#define SH_EXT_STDOUT_STDERR (1 << 1) + +static const uint8_t featurefile_data[] = { + SHFB_MAGIC_0, + SHFB_MAGIC_1, + SHFB_MAGIC_2, + SHFB_MAGIC_3, + SH_EXT_EXIT_EXTENDED | SH_EXT_STDOUT_STDERR, /* Feature byte 0 */ +}; + +static void init_featurefile_guestfd(int guestfd) +{ + GuestFD *gf = do_get_guestfd(guestfd); + + assert(gf); + gf->type = GuestFDFeatureFile; + gf->featurefile_offset = 0; +} + +static uint32_t featurefile_closefn(ARMCPU *cpu, GuestFD *gf) +{ + /* Nothing to do */ + return 0; +} + +static uint32_t featurefile_writefn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len) +{ + /* This fd can never be open for writing */ + CPUARMState *env = &cpu->env; + + errno = EBADF; + return set_swi_errno(env, -1); +} + +static uint32_t featurefile_readfn(ARMCPU *cpu, GuestFD *gf, + target_ulong buf, uint32_t len) +{ + uint32_t i; + CPUARMState *env = &cpu->env; + char *s; + + s = lock_user(VERIFY_WRITE, buf, len, 0); + if (!s) { + return len; + } + + for (i = 0; i < len; i++) { + if (gf->featurefile_offset >= sizeof(featurefile_data)) { + break; + } + s[i] = featurefile_data[gf->featurefile_offset]; + gf->featurefile_offset++; + } + + unlock_user(s, buf, len); + + /* Return number of bytes not read */ + return len - i; +} + +static uint32_t featurefile_isattyfn(ARMCPU *cpu, GuestFD *gf) +{ + return 0; +} + +static uint32_t featurefile_seekfn(ARMCPU *cpu, GuestFD *gf, + target_ulong offset) +{ + gf->featurefile_offset = offset; + return 0; +} + +static uint32_t featurefile_flenfn(ARMCPU *cpu, GuestFD *gf) +{ + return sizeof(featurefile_data); +} + +typedef struct GuestFDFunctions { + sys_closefn *closefn; + sys_writefn *writefn; + sys_readfn *readfn; + sys_isattyfn *isattyfn; + sys_seekfn *seekfn; + sys_flenfn *flenfn; +} GuestFDFunctions; + +static const GuestFDFunctions guestfd_fns[] = { + [GuestFDHost] = { + .closefn = host_closefn, + .writefn = host_writefn, + .readfn = host_readfn, + .isattyfn = host_isattyfn, + .seekfn = host_seekfn, + .flenfn = host_flenfn, + }, + [GuestFDGDB] = { + .closefn = gdb_closefn, + .writefn = gdb_writefn, + .readfn = gdb_readfn, + .isattyfn = gdb_isattyfn, + .seekfn = gdb_seekfn, + .flenfn = gdb_flenfn, + }, + [GuestFDFeatureFile] = { + .closefn = featurefile_closefn, + .writefn = featurefile_writefn, + .readfn = featurefile_readfn, + .isattyfn = featurefile_isattyfn, + .seekfn = featurefile_seekfn, + .flenfn = featurefile_flenfn, + }, +}; + +/* Read the input value from the argument block; fail the semihosting + * call if the memory read fails. + */ +#define GET_ARG(n) do { \ + if (is_a64(env)) { \ + if (get_user_u64(arg ## n, args + (n) * 8)) { \ + errno = EFAULT; \ + return set_swi_errno(env, -1); \ + } \ + } else { \ + if (get_user_u32(arg ## n, args + (n) * 4)) { \ + errno = EFAULT; \ + return set_swi_errno(env, -1); \ + } \ + } \ +} while (0) + +#define SET_ARG(n, val) \ + (is_a64(env) ? \ + put_user_u64(val, args + (n) * 8) : \ + put_user_u32(val, args + (n) * 4)) + +/* + * Do a semihosting call. + * + * The specification always says that the "return register" either + * returns a specific value or is corrupted, so we don't need to + * report to our caller whether we are returning a value or trying to + * leave the register unchanged. We use 0xdeadbeef as the return value + * when there isn't a defined return value for the call. + */ +target_ulong do_arm_semihosting(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + target_ulong args; + target_ulong arg0, arg1, arg2, arg3; + char * s; + int nr; + uint32_t ret; + uint32_t len; + GuestFD *gf; + + if (is_a64(env)) { + /* Note that the syscall number is in W0, not X0 */ + nr = env->xregs[0] & 0xffffffffU; + args = env->xregs[1]; + } else { + nr = env->regs[0]; + args = env->regs[1]; + } + + switch (nr) { + case TARGET_SYS_OPEN: + { + int guestfd; + + GET_ARG(0); + GET_ARG(1); + GET_ARG(2); + s = lock_user_string(arg0); + if (!s) { + errno = EFAULT; + return set_swi_errno(env, -1); + } + if (arg1 >= 12) { + unlock_user(s, arg0, 0); + errno = EINVAL; + return set_swi_errno(env, -1); + } + + guestfd = alloc_guestfd(); + if (guestfd < 0) { + unlock_user(s, arg0, 0); + errno = EMFILE; + return set_swi_errno(env, -1); + } + + if (strcmp(s, ":tt") == 0) { + int result_fileno; + + /* + * We implement SH_EXT_STDOUT_STDERR, so: + * open for read == stdin + * open for write == stdout + * open for append == stderr + */ + if (arg1 < 4) { + result_fileno = STDIN_FILENO; + } else if (arg1 < 8) { + result_fileno = STDOUT_FILENO; + } else { + result_fileno = STDERR_FILENO; + } + associate_guestfd(guestfd, result_fileno); + unlock_user(s, arg0, 0); + return guestfd; + } + if (strcmp(s, ":semihosting-features") == 0) { + unlock_user(s, arg0, 0); + /* We must fail opens for modes other than 0 ('r') or 1 ('rb') */ + if (arg1 != 0 && arg1 != 1) { + dealloc_guestfd(guestfd); + errno = EACCES; + return set_swi_errno(env, -1); + } + init_featurefile_guestfd(guestfd); + return guestfd; + } + + if (use_gdb_syscalls()) { + arm_semi_open_guestfd = guestfd; + ret = arm_gdb_syscall(cpu, arm_semi_open_cb, "open,%s,%x,1a4", arg0, + (int)arg2+1, gdb_open_modeflags[arg1]); + } else { + ret = set_swi_errno(env, open(s, open_modeflags[arg1], 0644)); + if (ret == (uint32_t)-1) { + dealloc_guestfd(guestfd); + } else { + associate_guestfd(guestfd, ret); + ret = guestfd; + } + } + unlock_user(s, arg0, 0); + return ret; + } + case TARGET_SYS_CLOSE: + GET_ARG(0); + + gf = get_guestfd(arg0); + if (!gf) { + errno = EBADF; + return set_swi_errno(env, -1); + } + + ret = guestfd_fns[gf->type].closefn(cpu, gf); + dealloc_guestfd(arg0); + return ret; + case TARGET_SYS_WRITEC: + qemu_semihosting_console_outc(env, args); + return 0xdeadbeef; + case TARGET_SYS_WRITE0: + return qemu_semihosting_console_outs(env, args); + case TARGET_SYS_WRITE: + GET_ARG(0); + GET_ARG(1); + GET_ARG(2); + len = arg2; + + gf = get_guestfd(arg0); + if (!gf) { + errno = EBADF; + return set_swi_errno(env, -1); + } + + return guestfd_fns[gf->type].writefn(cpu, gf, arg1, len); + case TARGET_SYS_READ: + GET_ARG(0); + GET_ARG(1); + GET_ARG(2); + len = arg2; + + gf = get_guestfd(arg0); + if (!gf) { + errno = EBADF; + return set_swi_errno(env, -1); + } + + return guestfd_fns[gf->type].readfn(cpu, gf, arg1, len); + case TARGET_SYS_READC: + return qemu_semihosting_console_inc(env); + case TARGET_SYS_ISTTY: + GET_ARG(0); + + gf = get_guestfd(arg0); + if (!gf) { + errno = EBADF; + return set_swi_errno(env, -1); + } + + return guestfd_fns[gf->type].isattyfn(cpu, gf); + case TARGET_SYS_SEEK: + GET_ARG(0); + GET_ARG(1); + + gf = get_guestfd(arg0); + if (!gf) { + errno = EBADF; + return set_swi_errno(env, -1); + } + + return guestfd_fns[gf->type].seekfn(cpu, gf, arg1); + case TARGET_SYS_FLEN: + GET_ARG(0); + + gf = get_guestfd(arg0); + if (!gf) { + errno = EBADF; + return set_swi_errno(env, -1); + } + + return guestfd_fns[gf->type].flenfn(cpu, gf); + case TARGET_SYS_TMPNAM: + qemu_log_mask(LOG_UNIMP, "%s: SYS_TMPNAM not implemented", __func__); + return -1; + case TARGET_SYS_REMOVE: + GET_ARG(0); + GET_ARG(1); + if (use_gdb_syscalls()) { + ret = arm_gdb_syscall(cpu, arm_semi_cb, "unlink,%s", + arg0, (int)arg1+1); + } else { + s = lock_user_string(arg0); + if (!s) { + errno = EFAULT; + return set_swi_errno(env, -1); + } + ret = set_swi_errno(env, remove(s)); + unlock_user(s, arg0, 0); + } + return ret; + case TARGET_SYS_RENAME: + GET_ARG(0); + GET_ARG(1); + GET_ARG(2); + GET_ARG(3); + if (use_gdb_syscalls()) { + return arm_gdb_syscall(cpu, arm_semi_cb, "rename,%s,%s", + arg0, (int)arg1+1, arg2, (int)arg3+1); + } else { + char *s2; + s = lock_user_string(arg0); + s2 = lock_user_string(arg2); + if (!s || !s2) { + errno = EFAULT; + ret = set_swi_errno(env, -1); + } else { + ret = set_swi_errno(env, rename(s, s2)); + } + if (s2) + unlock_user(s2, arg2, 0); + if (s) + unlock_user(s, arg0, 0); + return ret; + } + case TARGET_SYS_CLOCK: + return clock() / (CLOCKS_PER_SEC / 100); + case TARGET_SYS_TIME: + return set_swi_errno(env, time(NULL)); + case TARGET_SYS_SYSTEM: + GET_ARG(0); + GET_ARG(1); + if (use_gdb_syscalls()) { + return arm_gdb_syscall(cpu, arm_semi_cb, "system,%s", + arg0, (int)arg1+1); + } else { + s = lock_user_string(arg0); + if (!s) { + errno = EFAULT; + return set_swi_errno(env, -1); + } + ret = set_swi_errno(env, system(s)); + unlock_user(s, arg0, 0); + return ret; + } + case TARGET_SYS_ERRNO: + return get_swi_errno(env); + case TARGET_SYS_GET_CMDLINE: + { + /* Build a command-line from the original argv. + * + * The inputs are: + * * arg0, pointer to a buffer of at least the size + * specified in arg1. + * * arg1, size of the buffer pointed to by arg0 in + * bytes. + * + * The outputs are: + * * arg0, pointer to null-terminated string of the + * command line. + * * arg1, length of the string pointed to by arg0. + */ + + char *output_buffer; + size_t input_size; + size_t output_size; + int status = 0; + const char *cmdline; + GET_ARG(0); + GET_ARG(1); + input_size = arg1; + /* Compute the size of the output string. */ + cmdline = semihosting_get_cmdline(); + if (cmdline == NULL) { + cmdline = ""; /* Default to an empty line. */ + } + output_size = strlen(cmdline) + 1; /* Count terminating 0. */ + + if (output_size > input_size) { + /* Not enough space to store command-line arguments. */ + errno = E2BIG; + return set_swi_errno(env, -1); + } + + /* Adjust the command-line length. */ + if (SET_ARG(1, output_size - 1)) { + /* Couldn't write back to argument block */ + errno = EFAULT; + return set_swi_errno(env, -1); + } + + /* Lock the buffer on the ARM side. */ + output_buffer = lock_user(VERIFY_WRITE, arg0, output_size, 0); + if (!output_buffer) { + errno = EFAULT; + return set_swi_errno(env, -1); + } + + /* Copy the command-line arguments. */ + pstrcpy(output_buffer, output_size, cmdline); + /* Unlock the buffer on the ARM side. */ + unlock_user(output_buffer, arg0, output_size); + + return status; + } + case TARGET_SYS_HEAPINFO: + { + target_ulong retvals[4]; + target_ulong limit; + int i; + + GET_ARG(0); + + limit = ram_size; + /* TODO: Make this use the limit of the loaded application. */ + retvals[0] = limit / 2; + retvals[1] = limit; + retvals[2] = limit; /* Stack base */ + retvals[3] = 0; /* Stack limit. */ + + for (i = 0; i < ARRAY_SIZE(retvals); i++) { + bool fail; + + if (is_a64(env)) { + fail = put_user_u64(retvals[i], arg0 + i * 8); + } else { + fail = put_user_u32(retvals[i], arg0 + i * 4); + } + + if (fail) { + /* Couldn't write back to argument block */ + errno = EFAULT; + return set_swi_errno(env, -1); + } + } + return 0; + } + case TARGET_SYS_EXIT: + case TARGET_SYS_EXIT_EXTENDED: + if (nr == TARGET_SYS_EXIT_EXTENDED || is_a64(env)) { + /* + * The A64 version of SYS_EXIT takes a parameter block, + * so the application-exit type can return a subcode which + * is the exit status code from the application. + * SYS_EXIT_EXTENDED is an a new-in-v2.0 optional function + * which allows A32/T32 guests to also provide a status code. + */ + GET_ARG(0); + GET_ARG(1); + + if (arg0 == ADP_Stopped_ApplicationExit) { + ret = arg1; + } else { + ret = 1; + } + } else { + /* + * The A32/T32 version of SYS_EXIT specifies only + * Stopped_ApplicationExit as normal exit, but does not + * allow the guest to specify the exit status code. + * Everything else is considered an error. + */ + ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1; + } + gdb_exit(env, ret); + exit(ret); + case TARGET_SYS_SYNCCACHE: + /* + * Clean the D-cache and invalidate the I-cache for the specified + * virtual address range. This is a nop for us since we don't + * implement caches. This is only present on A64. + */ + if (is_a64(env)) { + return 0; + } + /* fall through -- invalid for A32/T32 */ + default: + fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr); + cpu_dump_state(cs, stderr, 0); + abort(); + } +} diff --git a/qemu/target-arm/arm_ldst.h b/qemu/target/arm/arm_ldst.h similarity index 67% rename from qemu/target-arm/arm_ldst.h rename to qemu/target/arm/arm_ldst.h index b1ece017..709a2ed3 100644 --- a/qemu/target-arm/arm_ldst.h +++ b/qemu/target/arm/arm_ldst.h @@ -20,29 +20,31 @@ #ifndef ARM_LDST_H #define ARM_LDST_H -#include "exec/cpu_ldst.h" +#include "exec/translator.h" #include "qemu/bswap.h" +#include + /* Load an instruction and return it in the standard little-endian order */ static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr, - bool do_swap) + bool sctlr_b) { - uint32_t insn = cpu_ldl_code(env, addr); - if (do_swap) { - return bswap32(insn); - } - return insn; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + + return translator_ldl_swap(tcg_ctx, env, addr, bswap_code(sctlr_b)); } /* Ditto, for a halfword (Thumb) instruction */ static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr, - bool do_swap) + bool sctlr_b) { - uint16_t insn = cpu_lduw_code(env, addr); - if (do_swap) { - return bswap16(insn); + TCGContext *tcg_ctx = env->uc->tcg_ctx; + /* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped + within each word. Undo that now. */ + if (sctlr_b) { + addr ^= 2; } - return insn; + return translator_lduw_swap(tcg_ctx, env, addr, bswap_code(sctlr_b)); } #endif diff --git a/qemu/target/arm/cpu-param.h b/qemu/target/arm/cpu-param.h new file mode 100644 index 00000000..208858c7 --- /dev/null +++ b/qemu/target/arm/cpu-param.h @@ -0,0 +1,30 @@ +/* + * ARM cpu parameters for qemu. + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef ARM_CPU_PARAM_H +#define ARM_CPU_PARAM_H 1 + +#ifdef TARGET_AARCH64 +# define TARGET_LONG_BITS 64 +# define TARGET_PHYS_ADDR_SPACE_BITS 48 +# define TARGET_VIRT_ADDR_SPACE_BITS 48 +#else +# define TARGET_LONG_BITS 32 +# define TARGET_PHYS_ADDR_SPACE_BITS 40 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +#endif + +/* + * ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6 + * have to support 1K tiny pages. + */ +# define TARGET_PAGE_BITS_VARY +# define TARGET_PAGE_BITS_MIN 10 + +#define NB_MMU_MODES 12 + +#endif diff --git a/qemu/target/arm/cpu-qom.h b/qemu/target/arm/cpu-qom.h new file mode 100644 index 00000000..963a628d --- /dev/null +++ b/qemu/target/arm/cpu-qom.h @@ -0,0 +1,90 @@ +/* + * QEMU ARM CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ +#ifndef QEMU_ARM_CPU_QOM_H +#define QEMU_ARM_CPU_QOM_H + +#include "hw/core/cpu.h" + +struct arm_boot_info; + +#define TYPE_ARM_CPU "arm-cpu" + +#define ARM_CPU(obj) ((ARMCPU *)obj) +#define ARM_CPU_CLASS(klass) ((ARMCPUClass *)klass) +#define ARM_CPU_GET_CLASS(obj) (&((ARMCPU *)obj)->cc) + +#define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU + +typedef struct ARMCPUInfo ARMCPUInfo; + +/** + * ARMCPUClass: + * @parent_reset: The parent class' reset handler. + * + * An ARM CPU model. + */ +typedef struct ARMCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + const ARMCPUInfo *info; + void (*parent_reset)(CPUState *cpu); +} ARMCPUClass; + +typedef struct ARMCPU ARMCPU; + +#define TYPE_AARCH64_CPU "aarch64-cpu" +#define AARCH64_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(AArch64CPUClass, (klass), TYPE_AARCH64_CPU) +#define AARCH64_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AArch64_CPU) + +typedef struct AArch64CPUClass { + /*< private >*/ + ARMCPUClass parent_class; + /*< public >*/ +} AArch64CPUClass; + +void register_cp_regs_for_features(ARMCPU *cpu); + +/* Callback functions for the generic timer's timers. */ +void arm_gt_ptimer_cb(void *opaque); +void arm_gt_vtimer_cb(void *opaque); +void arm_gt_htimer_cb(void *opaque); +void arm_gt_stimer_cb(void *opaque); +void arm_gt_hvtimer_cb(void *opaque); + +#define ARM_AFF0_SHIFT 0 +#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT) +#define ARM_AFF1_SHIFT 8 +#define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT) +#define ARM_AFF2_SHIFT 16 +#define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT) +#define ARM_AFF3_SHIFT 32 +#define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT) +#define ARM_DEFAULT_CPUS_PER_CLUSTER 8 + +#define ARM32_AFFINITY_MASK (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK) +#define ARM64_AFFINITY_MASK \ + (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK|ARM_AFF3_MASK) +#define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK) + +#endif diff --git a/qemu/target/arm/cpu.c b/qemu/target/arm/cpu.c new file mode 100644 index 00000000..36ef844c --- /dev/null +++ b/qemu/target/arm/cpu.c @@ -0,0 +1,2092 @@ +/* + * QEMU ARM CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "cpu.h" +#include "internals.h" +#include "exec/exec-all.h" +#include "sysemu/sysemu.h" +#include "fpu/softfloat.h" + +#include + +static void arm_cpu_set_pc(CPUState *cs, vaddr value) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + if (is_a64(env)) { + env->pc = value; + env->thumb = 0; + } else { + env->regs[15] = value & ~1; + env->thumb = value & 1; + } +} + +static void arm_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* + * It's OK to look at env for the current mode here, because it's + * never possible for an AArch64 TB to chain to an AArch32 TB. + */ + if (is_a64(env)) { + env->pc = tb->pc; + } else { + env->regs[15] = tb->pc; + } +} + +static bool arm_cpu_has_work(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + + return (cpu->power_state != PSCI_OFF) + && cs->interrupt_request & + (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ + | CPU_INTERRUPT_EXITTB); +} + +static void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, + void *opaque) +{ + ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1); + + entry->hook = hook; + entry->opaque = opaque; + + QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node); +} + +static void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, + void *opaque) +{ + ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1); + + entry->hook = hook; + entry->opaque = opaque; + + QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node); +} + +static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) +{ + /* Reset a single ARMCPRegInfo register */ + ARMCPRegInfo *ri = value; + ARMCPU *cpu = opaque; + + if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) { + return; + } + + if (ri->resetfn) { + ri->resetfn(&cpu->env, ri); + return; + } + + /* A zero offset is never possible as it would be regs[0] + * so we use it to indicate that reset is being handled elsewhere. + * This is basically only used for fields in non-core coprocessors + * (like the pxa2xx ones). + */ + if (!ri->fieldoffset) { + return; + } + + if (cpreg_field_is_64bit(ri)) { + CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue; + } else { + CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue; + } +} + +static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque) +{ + /* Purely an assertion check: we've already done reset once, + * so now check that running the reset for the cpreg doesn't + * change its value. This traps bugs where two different cpregs + * both try to reset the same state field but to different values. + */ + ARMCPRegInfo *ri = value; +#ifndef NDEBUG + ARMCPU *cpu = opaque; + uint64_t oldvalue, newvalue; +#endif + + if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) { + return; + } + +#ifndef NDEBUG + oldvalue = read_raw_cp_reg(&cpu->env, ri); +#endif + cp_reg_reset(key, value, opaque); +#ifndef NDEBUG + newvalue = read_raw_cp_reg(&cpu->env, ri); +#endif + assert(oldvalue == newvalue); +} + +static void arm_cpu_reset(CPUState *dev) +{ + CPUState *s = CPU(dev); + ARMCPU *cpu = ARM_CPU(s); + ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu); + CPUARMState *env = &cpu->env; + + acc->parent_reset(dev); + + memset(env, 0, offsetof(CPUARMState, end_reset_fields)); + + g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu); + g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu); + + env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; + env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0; + env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; + env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; + + cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON; + s->halted = cpu->start_powered_off; + + if (arm_feature(env, ARM_FEATURE_IWMMXT)) { + env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; + } + + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + /* 64 bit CPUs always start in 64 bit mode */ + env->aarch64 = 1; + /* Reset into the highest available EL */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + env->pstate = PSTATE_MODE_EL3h; + } else if (arm_feature(env, ARM_FEATURE_EL2)) { + env->pstate = PSTATE_MODE_EL2h; + } else { + env->pstate = PSTATE_MODE_EL1h; + } + /* + * Unicorn: Hack to force to enable EL2/EL3 for aarch64 so that we can + * use the full 64bits virtual address space. + * + * See cpu_aarch64_init for details. + */ + env->pstate = PSTATE_MODE_EL1h; + env->pc = cpu->rvbar; + } + + /* + * If the highest available EL is EL2, AArch32 will start in Hyp + * mode; otherwise it starts in SVC. Note that if we start in + * AArch64 then these values in the uncached_cpsr will be ignored. + */ + if (arm_feature(env, ARM_FEATURE_EL2) && + !arm_feature(env, ARM_FEATURE_EL3)) { + env->uncached_cpsr = ARM_CPU_MODE_HYP; + } else { + env->uncached_cpsr = ARM_CPU_MODE_SVC; + } + env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; + + if (arm_feature(env, ARM_FEATURE_M)) { + uint32_t initial_msp; /* Loaded from 0x0 */ + uint32_t initial_pc; /* Loaded from 0x4 */ + // uint8_t *rom; + uint32_t vecbase; + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + env->v7m.secure = true; + } else { + /* This bit resets to 0 if security is supported, but 1 if + * it is not. The bit is not present in v7M, but we set it + * here so we can avoid having to make checks on it conditional + * on ARM_FEATURE_V8 (we don't let the guest see the bit). + */ + env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK; + /* + * Set NSACR to indicate "NS access permitted to everything"; + * this avoids having to have all the tests of it being + * conditional on ARM_FEATURE_M_SECURITY. Note also that from + * v8.1M the guest-visible value of NSACR in a CPU without the + * Security Extension is 0xcff. + */ + env->v7m.nsacr = 0xcff; + } + + /* In v7M the reset value of this bit is IMPDEF, but ARM recommends + * that it resets to 1, so QEMU always does that rather than making + * it dependent on CPU model. In v8M it is RES1. + */ + env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK; + env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK; + if (arm_feature(env, ARM_FEATURE_V8)) { + /* in v8M the NONBASETHRDENA bit [0] is RES1 */ + env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK; + env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK; + } + if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { + env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK; + env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK; + } + + if (cpu_isar_feature(aa32_vfp_simd, cpu)) { + env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK; + env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK | + R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK; + } + /* Unlike A/R profile, M profile defines the reset LR value */ + env->regs[14] = 0xffffffff; + + env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80; + + /* Load the initial SP and PC from offset 0 and 4 in the vector table */ + vecbase = env->v7m.vecbase[env->v7m.secure]; +#if 0 + rom = rom_ptr(vecbase, 8); + if (rom) { + /* Address zero is covered by ROM which hasn't yet been + * copied into physical memory. + */ + initial_msp = ldl_p(rom); + initial_pc = ldl_p(rom + 4); + } else +#endif + { + /* Address zero not covered by a ROM blob, or the ROM blob + * is in non-modifiable memory and this is a second reset after + * it got copied into memory. In the latter case, rom_ptr + * will return a NULL pointer and we should use ldl_phys instead. + */ +#ifdef UNICORN_ARCH_POSTFIX + initial_msp = glue(ldl_phys, UNICORN_ARCH_POSTFIX)(s->uc, s->as, vecbase); + initial_pc = glue(ldl_phys, UNICORN_ARCH_POSTFIX)(s->uc, s->as, vecbase + 4); +#else + initial_msp = ldl_phys(s->uc, s->as, vecbase); + initial_pc = ldl_phys(s->uc, s->as, vecbase + 4); +#endif + } + + env->regs[13] = initial_msp & 0xFFFFFFFC; + env->regs[15] = initial_pc & ~1; + env->thumb = initial_pc & 1; + } + + /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently + * executing as AArch32 then check if highvecs are enabled and + * adjust the PC accordingly. + */ + if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { + env->regs[15] = 0xFFFF0000; + } + + /* M profile requires that reset clears the exclusive monitor; + * A profile does not, but clearing it makes more sense than having it + * set with an exclusive access on address zero. + */ + arm_clear_exclusive(env); + + env->vfp.xregs[ARM_VFP_FPEXC] = 0; + + if (arm_feature(env, ARM_FEATURE_PMSA)) { + if (cpu->pmsav7_dregion > 0) { + if (arm_feature(env, ARM_FEATURE_V8)) { + memset(env->pmsav8.rbar[M_REG_NS], 0, + sizeof(*env->pmsav8.rbar[M_REG_NS]) + * cpu->pmsav7_dregion); + memset(env->pmsav8.rlar[M_REG_NS], 0, + sizeof(*env->pmsav8.rlar[M_REG_NS]) + * cpu->pmsav7_dregion); + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + memset(env->pmsav8.rbar[M_REG_S], 0, + sizeof(*env->pmsav8.rbar[M_REG_S]) + * cpu->pmsav7_dregion); + memset(env->pmsav8.rlar[M_REG_S], 0, + sizeof(*env->pmsav8.rlar[M_REG_S]) + * cpu->pmsav7_dregion); + } + } else if (arm_feature(env, ARM_FEATURE_V7)) { + memset(env->pmsav7.drbar, 0, + sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion); + memset(env->pmsav7.drsr, 0, + sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion); + memset(env->pmsav7.dracr, 0, + sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion); + } + } + env->pmsav7.rnr[M_REG_NS] = 0; + env->pmsav7.rnr[M_REG_S] = 0; + env->pmsav8.mair0[M_REG_NS] = 0; + env->pmsav8.mair0[M_REG_S] = 0; + env->pmsav8.mair1[M_REG_NS] = 0; + env->pmsav8.mair1[M_REG_S] = 0; + } + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + if (cpu->sau_sregion > 0) { + memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion); + memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion); + } + env->sau.rnr = 0; + /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what + * the Cortex-M33 does. + */ + env->sau.ctrl = 0; + } + + set_flush_to_zero(1, &env->vfp.standard_fp_status); + set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); + set_default_nan_mode(1, &env->vfp.standard_fp_status); + set_float_detect_tininess(float_tininess_before_rounding, + &env->vfp.fp_status); + set_float_detect_tininess(float_tininess_before_rounding, + &env->vfp.standard_fp_status); + set_float_detect_tininess(float_tininess_before_rounding, + &env->vfp.fp_status_f16); + + hw_breakpoint_update_all(cpu); + hw_watchpoint_update_all(cpu); + arm_rebuild_hflags(env); +} + +static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, + unsigned int target_el, + unsigned int cur_el, bool secure, + uint64_t hcr_el2) +{ + CPUARMState *env = cs->env_ptr; + bool pstate_unmasked; + bool unmasked = false; + + /* + * Don't take exceptions if they target a lower EL. + * This check should catch any exceptions that would not be taken + * but left pending. + */ + if (cur_el > target_el) { + return false; + } + + switch (excp_idx) { + case EXCP_FIQ: + pstate_unmasked = !(env->daif & PSTATE_F); + break; + + case EXCP_IRQ: + pstate_unmasked = !(env->daif & PSTATE_I); + break; + + case EXCP_VFIQ: + if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { + /* VFIQs are only taken when hypervized and non-secure. */ + return false; + } + return !(env->daif & PSTATE_F); + case EXCP_VIRQ: + if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { + /* VIRQs are only taken when hypervized and non-secure. */ + return false; + } + return !(env->daif & PSTATE_I); + default: + g_assert_not_reached(); + } + + /* + * Use the target EL, current execution state and SCR/HCR settings to + * determine whether the corresponding CPSR bit is used to mask the + * interrupt. + */ + if ((target_el > cur_el) && (target_el != 1)) { + /* Exceptions targeting a higher EL may not be maskable */ + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + /* + * 64-bit masking rules are simple: exceptions to EL3 + * can't be masked, and exceptions to EL2 can only be + * masked from Secure state. The HCR and SCR settings + * don't affect the masking logic, only the interrupt routing. + */ + if (target_el == 3 || !secure) { + unmasked = true; + } + } else { + /* + * The old 32-bit-only environment has a more complicated + * masking setup. HCR and SCR bits not only affect interrupt + * routing but also change the behaviour of masking. + */ + bool hcr, scr; + + switch (excp_idx) { + case EXCP_FIQ: + /* + * If FIQs are routed to EL3 or EL2 then there are cases where + * we override the CPSR.F in determining if the exception is + * masked or not. If neither of these are set then we fall back + * to the CPSR.F setting otherwise we further assess the state + * below. + */ + hcr = hcr_el2 & HCR_FMO; + scr = (env->cp15.scr_el3 & SCR_FIQ); + + /* + * When EL3 is 32-bit, the SCR.FW bit controls whether the + * CPSR.F bit masks FIQ interrupts when taken in non-secure + * state. If SCR.FW is set then FIQs can be masked by CPSR.F + * when non-secure but only when FIQs are only routed to EL3. + */ + scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); + break; + case EXCP_IRQ: + /* + * When EL3 execution state is 32-bit, if HCR.IMO is set then + * we may override the CPSR.I masking when in non-secure state. + * The SCR.IRQ setting has already been taken into consideration + * when setting the target EL, so it does not have a further + * affect here. + */ + hcr = hcr_el2 & HCR_IMO; + scr = false; + break; + default: + g_assert_not_reached(); + } + + if ((scr || hcr) && !secure) { + unmasked = true; + } + } + } + + /* + * The PSTATE bits only mask the interrupt if we have not overriden the + * ability above. + */ + return unmasked || pstate_unmasked; +} + +bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + CPUClass *cc = CPU_GET_CLASS(cs); + CPUARMState *env = cs->env_ptr; + uint32_t cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + uint32_t target_el; + uint32_t excp_idx; + + /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */ + + if (interrupt_request & CPU_INTERRUPT_FIQ) { + excp_idx = EXCP_FIQ; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_HARD) { + excp_idx = EXCP_IRQ; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VIRQ) { + excp_idx = EXCP_VIRQ; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VFIQ) { + excp_idx = EXCP_VFIQ; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + return false; + + found: + cs->exception_index = excp_idx; + env->exception.target_el = target_el; + cc->do_interrupt(cs); + return true; +} + +#if !defined(TARGET_AARCH64) +static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + CPUClass *cc = CPU_GET_CLASS(cs); + // ARMCPU *cpu = ARM_CPU(cs); + // CPUARMState *env = &cpu->env; + bool ret = false; + + /* ARMv7-M interrupt masking works differently than -A or -R. + * There is no FIQ/IRQ distinction. Instead of I and F bits + * masking FIQ and IRQ interrupts, an exception is taken only + * if it is higher priority than the current execution priority + * (which depends on state like BASEPRI, FAULTMASK and the + * currently active exception). + */ + if (interrupt_request & CPU_INTERRUPT_HARD) { + // && (armv7m_nvic_can_take_pending_exception(env->nvic))) { + cs->exception_index = EXCP_IRQ; + cc->do_interrupt(cs); + ret = true; + } + return ret; +} +#endif + +void arm_cpu_update_virq(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VIRQ, which is the logical OR of + * the HCR_EL2.VI bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = (env->cp15.hcr_el2 & HCR_VI) || + (env->irq_line_state & CPU_INTERRUPT_VIRQ); + + if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); + } + } +} + +void arm_cpu_update_vfiq(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VFIQ, which is the logical OR of + * the HCR_EL2.VF bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = (env->cp15.hcr_el2 & HCR_VF) || + (env->irq_line_state & CPU_INTERRUPT_VFIQ); + + if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VFIQ); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ); + } + } +} + +static inline void set_feature(CPUARMState *env, int feature) +{ + env->features |= 1ULL << feature; +} + +static inline void unset_feature(CPUARMState *env, int feature) +{ + env->features &= ~(1ULL << feature); +} + +static uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz) +{ + uint32_t Aff1 = idx / clustersz; + uint32_t Aff0 = idx % clustersz; + return (Aff1 << ARM_AFF1_SHIFT) | Aff0; +} + +static void cpreg_hashtable_data_destroy(gpointer data) +{ + /* + * Destroy function for cpu->cp_regs hashtable data entries. + * We must free the name string because it was g_strdup()ed in + * add_cpreg_to_hashtable(). It's OK to cast away the 'const' + * from r->name because we know we definitely allocated it. + */ + ARMCPRegInfo *r = data; + + g_free((void *)r->name); + g_free(r); +} + +void arm_cpu_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + CPUARMState *env = &cpu->env; + + env->uc = uc; + cpu_set_cpustate_pointers(cpu); + cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, + g_free, cpreg_hashtable_data_destroy); + + QLIST_INIT(&cpu->pre_el_change_hooks); + QLIST_INIT(&cpu->el_change_hooks); + + /* DTB consumers generally don't in fact care what the 'compatible' + * string is, so always provide some string and trust that a hypothetical + * picky DTB consumer will also provide a helpful error message. + */ + cpu->psci_version = 1; /* By default assume PSCI v0.1 */ + + cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ +} + +unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) +{ + /* + * The exact approach to calculating guest ticks is: + * + * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz, + * NANOSECONDS_PER_SECOND); + * + * We don't do that. Rather we intentionally use integer division + * truncation below and in the caller for the conversion of host monotonic + * time to guest ticks to provide the exact inverse for the semantics of + * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so + * it loses precision when representing frequencies where + * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to + * provide an exact inverse leads to scheduling timers with negative + * periods, which in turn leads to sticky behaviour in the guest. + * + * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor + * cannot become zero. + */ + return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ? + NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1; +} + +void arm_cpu_post_init(CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + /* M profile implies PMSA. We have to do this here rather than + * in realize with the other feature-implication checks because + * we look at the PMSA bit to see if we should add some properties. + */ + if (arm_feature(&cpu->env, ARM_FEATURE_M)) { + set_feature(&cpu->env, ARM_FEATURE_PMSA); + } + + if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) { + cpu->has_pmu = true; + } + + /* + * Allow user to turn off VFP and Neon support, but only for TCG -- + * KVM does not currently allow us to lie to the guest about its + * ID/feature registers, so the guest always sees what the host has. + */ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) + ? cpu_isar_feature(aa64_fp_simd, cpu) + : cpu_isar_feature(aa32_vfp, cpu)) { + cpu->has_vfp = true; + } + + if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) { + cpu->has_neon = true; + } +} + +static void arm_cpu_finalize_features(ARMCPU *cpu) +{ +#if 0 + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + arm_cpu_sve_finalize(cpu); + } +#endif +} + +void arm_cpu_realizefn(struct uc_struct *uc, CPUState *dev) +{ + CPUState *cs = CPU(dev); + ARMCPU *cpu = ARM_CPU(dev); + CPUARMState *env = &cpu->env; +#ifndef NDEBUG + bool no_aa32 = false; +#endif + +#if 0 + /* The NVIC and M-profile CPU are two halves of a single piece of + * hardware; trying to use one without the other is a command line + * error and will result in segfaults if not caught here. + */ + if (arm_feature(env, ARM_FEATURE_M)) { + if (!env->nvic) { + return; + } + } else { + if (env->nvic) { + return; + } + } + + if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { + if (!cpu->gt_cntfrq_hz) { + return; + } + } +#endif + + cpu_exec_realizefn(cs); + + arm_cpu_finalize_features(cpu); + + if (arm_feature(env, ARM_FEATURE_AARCH64) && + cpu->has_vfp != cpu->has_neon) { + /* + * This is an architectural requirement for AArch64; AArch32 is + * more flexible and permits VFP-no-Neon and Neon-no-VFP. + */ + // error_setg(errp, "AArch64 CPUs must have both VFP and Neon or neither"); + return; + } + + if (!cpu->has_vfp) { + uint64_t t; + uint32_t u; + + t = cpu->isar.id_aa64isar1; + FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0, t); + cpu->isar.id_aa64isar1 = t; + + t = cpu->isar.id_aa64pfr0; + FIELD_DP64(t, ID_AA64PFR0, FP, 0xf, t); + cpu->isar.id_aa64pfr0 = t; + + u = cpu->isar.id_isar6; + FIELD_DP32(u, ID_ISAR6, JSCVT, 0, u); + cpu->isar.id_isar6 = u; + + u = cpu->isar.mvfr0; + FIELD_DP32(u, MVFR0, FPSP, 0, u); + FIELD_DP32(u, MVFR0, FPDP, 0, u); + FIELD_DP32(u, MVFR0, FPTRAP, 0, u); + FIELD_DP32(u, MVFR0, FPDIVIDE, 0, u); + FIELD_DP32(u, MVFR0, FPSQRT, 0, u); + FIELD_DP32(u, MVFR0, FPSHVEC, 0, u); + FIELD_DP32(u, MVFR0, FPROUND, 0, u); + cpu->isar.mvfr0 = u; + + u = cpu->isar.mvfr1; + FIELD_DP32(u, MVFR1, FPFTZ, 0, u); + FIELD_DP32(u, MVFR1, FPDNAN, 0, u); + FIELD_DP32(u, MVFR1, FPHP, 0, u); + cpu->isar.mvfr1 = u; + + u = cpu->isar.mvfr2; + FIELD_DP32(u, MVFR2, FPMISC, 0, u); + cpu->isar.mvfr2 = u; + } + + if (!cpu->has_neon) { + uint64_t t; + uint32_t u; + + unset_feature(env, ARM_FEATURE_NEON); + + t = cpu->isar.id_aa64isar0; + FIELD_DP64(t, ID_AA64ISAR0, DP, 0, t); + cpu->isar.id_aa64isar0 = t; + + t = cpu->isar.id_aa64isar1; + FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0, t); + cpu->isar.id_aa64isar1 = t; + + t = cpu->isar.id_aa64pfr0; + FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf, t); + cpu->isar.id_aa64pfr0 = t; + + u = cpu->isar.id_isar5; + FIELD_DP32(u, ID_ISAR5, RDM, 0, u); + FIELD_DP32(u, ID_ISAR5, VCMA, 0, u); + cpu->isar.id_isar5 = u; + + u = cpu->isar.id_isar6; + FIELD_DP32(u, ID_ISAR6, DP, 0, u); + FIELD_DP32(u, ID_ISAR6, FHM, 0, u); + cpu->isar.id_isar6 = u; + + u = cpu->isar.mvfr1; + FIELD_DP32(u, MVFR1, SIMDLS, 0, u); + FIELD_DP32(u, MVFR1, SIMDINT, 0, u); + FIELD_DP32(u, MVFR1, SIMDSP, 0, u); + FIELD_DP32(u, MVFR1, SIMDHP, 0, u); + cpu->isar.mvfr1 = u; + + u = cpu->isar.mvfr2; + FIELD_DP32(u, MVFR2, SIMDMISC, 0, u); + cpu->isar.mvfr2 = u; + } + + if (!cpu->has_neon && !cpu->has_vfp) { + uint64_t t; + uint32_t u; + + t = cpu->isar.id_aa64isar0; + FIELD_DP64(t, ID_AA64ISAR0, FHM, 0, t); + cpu->isar.id_aa64isar0 = t; + + t = cpu->isar.id_aa64isar1; + FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0, t); + cpu->isar.id_aa64isar1 = t; + + u = cpu->isar.mvfr0; + FIELD_DP32(u, MVFR0, SIMDREG, 0, u); + cpu->isar.mvfr0 = u; + + /* Despite the name, this field covers both VFP and Neon */ + u = cpu->isar.mvfr1; + FIELD_DP32(u, MVFR1, SIMDFMAC, 0, u); + cpu->isar.mvfr1 = u; + } + + if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) { + uint32_t u; + + unset_feature(env, ARM_FEATURE_THUMB_DSP); + + u = cpu->isar.id_isar1; + FIELD_DP32(u, ID_ISAR1, EXTEND, 1, u); + cpu->isar.id_isar1 = u; + + u = cpu->isar.id_isar2; + FIELD_DP32(u, ID_ISAR2, MULTU, 1, u); + FIELD_DP32(u, ID_ISAR2, MULTS, 1, u); + cpu->isar.id_isar2 = u; + + u = cpu->isar.id_isar3; + FIELD_DP32(u, ID_ISAR3, SIMD, 1, u); + FIELD_DP32(u, ID_ISAR3, SATURATE, 0, u); + cpu->isar.id_isar3 = u; + } + + /* Some features automatically imply others: */ + if (arm_feature(env, ARM_FEATURE_V8)) { + if (arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_V7); + } else { + set_feature(env, ARM_FEATURE_V7VE); + } + } + + /* + * There exist AArch64 cpus without AArch32 support. When KVM + * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN. + * Similarly, we cannot check ID_AA64PFR0 without AArch64 support. + * As a general principle, we also do not make ID register + * consistency checks anywhere unless using TCG, because only + * for TCG would a consistency-check failure be a QEMU bug. + */ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +#ifndef NDEBUG + no_aa32 = !cpu_isar_feature(aa64_aa32, cpu); +#else + cpu_isar_feature(aa64_aa32, cpu); +#endif + } + + if (arm_feature(env, ARM_FEATURE_V7VE)) { + /* v7 Virtualization Extensions. In real hardware this implies + * EL2 and also the presence of the Security Extensions. + * For QEMU, for backwards-compatibility we implement some + * CPUs or CPU configs which have no actual EL2 or EL3 but do + * include the various other features that V7VE implies. + * Presence of EL2 itself is ARM_FEATURE_EL2, and of the + * Security Extensions is ARM_FEATURE_EL3. + */ + assert(no_aa32 || cpu_isar_feature(aa32_arm_div, cpu)); + set_feature(env, ARM_FEATURE_LPAE); + set_feature(env, ARM_FEATURE_V7); + } + if (arm_feature(env, ARM_FEATURE_V7)) { + set_feature(env, ARM_FEATURE_VAPA); + set_feature(env, ARM_FEATURE_THUMB2); + set_feature(env, ARM_FEATURE_MPIDR); + if (!arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_V6K); + } else { + set_feature(env, ARM_FEATURE_V6); + } + + /* Always define VBAR for V7 CPUs even if it doesn't exist in + * non-EL3 configs. This is needed by some legacy boards. + */ + set_feature(env, ARM_FEATURE_VBAR); + } + if (arm_feature(env, ARM_FEATURE_V6K)) { + set_feature(env, ARM_FEATURE_V6); + set_feature(env, ARM_FEATURE_MVFR); + } + if (arm_feature(env, ARM_FEATURE_V6)) { + set_feature(env, ARM_FEATURE_V5); + if (!arm_feature(env, ARM_FEATURE_M)) { + assert(no_aa32 || cpu_isar_feature(aa32_jazelle, cpu)); + set_feature(env, ARM_FEATURE_AUXCR); + } + } + if (arm_feature(env, ARM_FEATURE_V5)) { + set_feature(env, ARM_FEATURE_V4T); + } + if (arm_feature(env, ARM_FEATURE_LPAE)) { + set_feature(env, ARM_FEATURE_V7MP); + set_feature(env, ARM_FEATURE_PXN); + } + if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { + set_feature(env, ARM_FEATURE_CBAR); + } + if (arm_feature(env, ARM_FEATURE_THUMB2) && + !arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_THUMB_DSP); + } + + /* + * We rely on no XScale CPU having VFP so we can use the same bits in the + * TB flags field for VECSTRIDE and XSCALE_CPAR. + */ + assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) || + !cpu_isar_feature(aa32_vfp_simd, cpu) || + !arm_feature(env, ARM_FEATURE_XSCALE)); + +#if 0 + if (arm_feature(env, ARM_FEATURE_V7) && + !arm_feature(env, ARM_FEATURE_M) && + !arm_feature(env, ARM_FEATURE_PMSA)) { + /* v7VMSA drops support for the old ARMv5 tiny pages, so we + * can use 4K pages. + */ + pagebits = 12; + } else { + /* For CPUs which might have tiny 1K pages, or which have an + * MPU and might have small region sizes, stick with 1K pages. + */ + pagebits = 10; + } + + if (!set_preferred_target_page_bits(cpu->uc, pagebits)) { + /* This can only ever happen for hotplugging a CPU, or if + * the board code incorrectly creates a CPU which it has + * promised via minimum_page_size that it will not. + */ + // error_setg(errp, "This CPU requires a smaller page size than the " + // "system is using"); + return; + } +#endif + + /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it. + * We don't support setting cluster ID ([16..23]) (known as Aff2 + * in later ARM ARM versions), or any of the higher affinity level fields, + * so these bits always RAZ. + */ + if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) { + cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index, + ARM_DEFAULT_CPUS_PER_CLUSTER); + } + + if (cpu->reset_hivecs) { + cpu->reset_sctlr |= (1 << 13); + } + + if (cpu->cfgend) { + if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { + cpu->reset_sctlr |= SCTLR_EE; + } else { + cpu->reset_sctlr |= SCTLR_B; + } + } + + if (!cpu->has_el3) { + /* If the has_el3 CPU property is disabled then we need to disable the + * feature. + */ + unset_feature(env, ARM_FEATURE_EL3); + + /* Disable the security extension feature bits in the processor feature + * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. + */ + cpu->id_pfr1 &= ~0xf0; + cpu->isar.id_aa64pfr0 &= ~0xf000; + } + + if (!cpu->has_el2) { + unset_feature(env, ARM_FEATURE_EL2); + } + + if (!cpu->has_pmu) { + unset_feature(env, ARM_FEATURE_PMU); + } + if (arm_feature(env, ARM_FEATURE_PMU)) { + pmu_init(cpu); + + arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0); + arm_register_el_change_hook(cpu, &pmu_post_el_change, 0); + } else { + FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0, cpu->isar.id_aa64dfr0); + FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0, cpu->isar.id_dfr0); + cpu->pmceid0 = 0; + cpu->pmceid1 = 0; + } + + if (!arm_feature(env, ARM_FEATURE_EL2)) { + /* Disable the hypervisor feature bits in the processor feature + * registers if we don't have EL2. These are id_pfr1[15:12] and + * id_aa64pfr0_el1[11:8]. + */ + cpu->isar.id_aa64pfr0 &= ~0xf00; + cpu->id_pfr1 &= ~0xf000; + } + + /* MPU can be configured out of a PMSA CPU either by setting has-mpu + * to false or by setting pmsav7-dregion to 0. + */ + if (!cpu->has_mpu) { + cpu->pmsav7_dregion = 0; + } + if (cpu->pmsav7_dregion == 0) { + cpu->has_mpu = false; + } + + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V7)) { + uint32_t nr = cpu->pmsav7_dregion; + + if (nr > 0xff) { + // error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr); + return; + } + + if (nr) { + if (arm_feature(env, ARM_FEATURE_V8)) { + /* PMSAv8 */ + env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr); + env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr); + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr); + env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr); + } + } else { + env->pmsav7.drbar = g_new0(uint32_t, nr); + env->pmsav7.drsr = g_new0(uint32_t, nr); + env->pmsav7.dracr = g_new0(uint32_t, nr); + } + } + } + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + uint32_t nr = cpu->sau_sregion; + + if (nr > 0xff) { + // error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr); + return; + } + + if (nr) { + env->sau.rbar = g_new0(uint32_t, nr); + env->sau.rlar = g_new0(uint32_t, nr); + } + } + + if (arm_feature(env, ARM_FEATURE_EL3)) { + set_feature(env, ARM_FEATURE_VBAR); + } + + register_cp_regs_for_features(cpu); + + unsigned int smp_cpus = 1; + + if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) { + cs->num_ases = 2; + + if (!cpu->secure_memory) { + cpu->secure_memory = cs->memory; + } + cpu_address_space_init(cs, ARMASIdx_S, cpu->secure_memory); + } else { + cs->num_ases = 1; + } + cpu_address_space_init(cs, ARMASIdx_NS, cs->memory); + + /* No core_count specified, default to smp_cpus. */ + if (cpu->core_count == -1) { + cpu->core_count = smp_cpus; + } + + cpu_reset(cs); +} + +/* CPU models. These are not needed for the AArch64 linux-user build. */ +#if !defined(TARGET_AARCH64) + +static void arm926_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); + cpu->midr = 0x41069265; + cpu->reset_fpsid = 0x41011090; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00090078; + + /* + * ARMv5 does not have the ID_ISAR registers, but we can still + * set the field to indicate Jazelle support within QEMU. + */ + FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1, cpu->isar.id_isar1); + /* + * Similarly, we need to set MVFR0 fields to enable vfp and short vector + * support even though ARMv5 doesn't have this register. + */ + FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1, cpu->isar.mvfr0); + FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1, cpu->isar.mvfr0); + FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1, cpu->isar.mvfr0); +} + +static void arm946_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_PMSA); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x41059461; + cpu->ctr = 0x0f004006; + cpu->reset_sctlr = 0x00000078; +} + +static void arm1026_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_AUXCR); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); + cpu->midr = 0x4106a262; + cpu->reset_fpsid = 0x410110a0; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00090078; + cpu->reset_auxcr = 1; + + /* + * ARMv5 does not have the ID_ISAR registers, but we can still + * set the field to indicate Jazelle support within QEMU. + */ + FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1, cpu->isar.id_isar1); + /* + * Similarly, we need to set MVFR0 fields to enable vfp and short vector + * support even though ARMv5 doesn't have this register. + */ + FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1, cpu->isar.mvfr0); + FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1, cpu->isar.mvfr0); + FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1, cpu->isar.mvfr0); + + { + /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ + ARMCPRegInfo ifar = { + .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns), + .resetvalue = 0 + }; + define_one_arm_cp_reg(cpu, &ifar); + } +} + +static void arm1136_r2_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an + * older core than plain "arm1136". In particular this does not + * have the v6K features. + * These ID register values are correct for 1136 but may be wrong + * for 1136_r2 (in particular r0p2 does not actually implement most + * of the ID registers). + */ + + set_feature(&cpu->env, ARM_FEATURE_V6); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x4107b362; + cpu->reset_fpsid = 0x410120b4; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00050078; + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x1; + cpu->isar.id_dfr0 = 0x2; + cpu->id_afr0 = 0x3; + cpu->isar.id_mmfr0 = 0x01130003; + cpu->isar.id_mmfr1 = 0x10030302; + cpu->isar.id_mmfr2 = 0x01222110; + cpu->isar.id_isar0 = 0x00140011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11231111; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x141; + cpu->reset_auxcr = 7; +} + +static void arm1136_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V6K); + set_feature(&cpu->env, ARM_FEATURE_V6); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x4117b363; + cpu->reset_fpsid = 0x410120b4; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00050078; + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x1; + cpu->isar.id_dfr0 = 0x2; + cpu->id_afr0 = 0x3; + cpu->isar.id_mmfr0 = 0x01130003; + cpu->isar.id_mmfr1 = 0x10030302; + cpu->isar.id_mmfr2 = 0x01222110; + cpu->isar.id_isar0 = 0x00140011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11231111; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x141; + cpu->reset_auxcr = 7; +} + +static void arm1176_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V6K); + set_feature(&cpu->env, ARM_FEATURE_VAPA); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + set_feature(&cpu->env, ARM_FEATURE_EL3); + cpu->midr = 0x410fb767; + cpu->reset_fpsid = 0x410120b5; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00050078; + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x11; + cpu->isar.id_dfr0 = 0x33; + cpu->id_afr0 = 0; + cpu->isar.id_mmfr0 = 0x01130003; + cpu->isar.id_mmfr1 = 0x10030302; + cpu->isar.id_mmfr2 = 0x01222100; + cpu->isar.id_isar0 = 0x0140011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11231121; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x01141; + cpu->reset_auxcr = 7; +} + +static void arm11mpcore_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V6K); + set_feature(&cpu->env, ARM_FEATURE_VAPA); + set_feature(&cpu->env, ARM_FEATURE_MPIDR); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x410fb022; + cpu->reset_fpsid = 0x410120b4; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; + cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x1; + cpu->isar.id_dfr0 = 0; + cpu->id_afr0 = 0x2; + cpu->isar.id_mmfr0 = 0x01100103; + cpu->isar.id_mmfr1 = 0x10020302; + cpu->isar.id_mmfr2 = 0x01222000; + cpu->isar.id_isar0 = 0x00100011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11221011; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x141; + cpu->reset_auxcr = 1; +} + +static void cortex_m0_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + set_feature(&cpu->env, ARM_FEATURE_V6); + set_feature(&cpu->env, ARM_FEATURE_M); + + cpu->midr = 0x410cc200; +} + +static void cortex_m3_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + cpu->midr = 0x410fc231; + cpu->pmsav7_dregion = 8; + cpu->id_pfr0 = 0x00000030; + cpu->id_pfr1 = 0x00000200; + cpu->isar.id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00000030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x00000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01141110; + cpu->isar.id_isar1 = 0x02111000; + cpu->isar.id_isar2 = 0x21112231; + cpu->isar.id_isar3 = 0x01111110; + cpu->isar.id_isar4 = 0x01310102; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + +static void cortex_m4_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + cpu->midr = 0x410fc240; /* r0p0 */ + cpu->pmsav7_dregion = 8; + cpu->isar.mvfr0 = 0x10110021; + cpu->isar.mvfr1 = 0x11000011; + cpu->isar.mvfr2 = 0x00000000; + cpu->id_pfr0 = 0x00000030; + cpu->id_pfr1 = 0x00000200; + cpu->isar.id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00000030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x00000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01141110; + cpu->isar.id_isar1 = 0x02111000; + cpu->isar.id_isar2 = 0x21112231; + cpu->isar.id_isar3 = 0x01111110; + cpu->isar.id_isar4 = 0x01310102; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + +static void cortex_m7_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + cpu->midr = 0x411fc272; /* r1p2 */ + cpu->pmsav7_dregion = 8; + cpu->isar.mvfr0 = 0x10110221; + cpu->isar.mvfr1 = 0x12000011; + cpu->isar.mvfr2 = 0x00000040; + cpu->id_pfr0 = 0x00000030; + cpu->id_pfr1 = 0x00000200; + cpu->isar.id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00100030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01101110; + cpu->isar.id_isar1 = 0x02112000; + cpu->isar.id_isar2 = 0x20232231; + cpu->isar.id_isar3 = 0x01111131; + cpu->isar.id_isar4 = 0x01310132; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + +static void cortex_m33_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_M_SECURITY); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + cpu->midr = 0x410fd213; /* r0p3 */ + cpu->pmsav7_dregion = 16; + cpu->sau_sregion = 8; + cpu->isar.mvfr0 = 0x10110021; + cpu->isar.mvfr1 = 0x11000011; + cpu->isar.mvfr2 = 0x00000040; + cpu->id_pfr0 = 0x00000030; + cpu->id_pfr1 = 0x00000210; + cpu->isar.id_dfr0 = 0x00200000; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00101F40; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01000000; + cpu->isar.id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01101110; + cpu->isar.id_isar1 = 0x02212000; + cpu->isar.id_isar2 = 0x20232232; + cpu->isar.id_isar3 = 0x01111131; + cpu->isar.id_isar4 = 0x01310132; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; + cpu->clidr = 0x00000000; + cpu->ctr = 0x8000c000; +} + +static void arm_v7m_class_init(struct uc_struct *uc, CPUClass *oc, void *data) +{ + ARMCPUClass *acc = ARM_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + + acc->info = data; + cc->do_interrupt = arm_v7m_cpu_do_interrupt; + + cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt; +} + +static ARMCPRegInfo cortexr5_cp_reginfo[] = { + /* Dummy the TCM region regs for the moment */ + { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST }, + { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST }, + { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5, + .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP }, + REGINFO_SENTINEL +}; + +static void cortex_r5_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_V7MP); + set_feature(&cpu->env, ARM_FEATURE_PMSA); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x411fc153; /* r1p3 */ + cpu->id_pfr0 = 0x0131; + cpu->id_pfr1 = 0x001; + cpu->isar.id_dfr0 = 0x010400; + cpu->id_afr0 = 0x0; + cpu->isar.id_mmfr0 = 0x0210030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01200000; + cpu->isar.id_mmfr3 = 0x0211; + cpu->isar.id_isar0 = 0x02101111; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232141; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x0010142; + cpu->isar.id_isar5 = 0x0; + cpu->isar.id_isar6 = 0x0; + cpu->mp_is_up = true; + cpu->pmsav7_dregion = 16; + define_arm_cp_regs(cpu, cortexr5_cp_reginfo); +} + +static void cortex_r5f_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cortex_r5_initfn(uc, obj); + cpu->isar.mvfr0 = 0x10110221; + cpu->isar.mvfr1 = 0x00000011; +} + +static const ARMCPRegInfo cortexa8_cp_reginfo[] = { + { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static void cortex_a8_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_EL3); + cpu->midr = 0x410fc080; + cpu->reset_fpsid = 0x410330c0; + cpu->isar.mvfr0 = 0x11110222; + cpu->isar.mvfr1 = 0x00011111; + cpu->ctr = 0x82048004; + cpu->reset_sctlr = 0x00c50078; + cpu->id_pfr0 = 0x1031; + cpu->id_pfr1 = 0x11; + cpu->isar.id_dfr0 = 0x400; + cpu->id_afr0 = 0; + cpu->isar.id_mmfr0 = 0x31100003; + cpu->isar.id_mmfr1 = 0x20000000; + cpu->isar.id_mmfr2 = 0x01202000; + cpu->isar.id_mmfr3 = 0x11; + cpu->isar.id_isar0 = 0x00101111; + cpu->isar.id_isar1 = 0x12112111; + cpu->isar.id_isar2 = 0x21232031; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x00111142; + cpu->isar.dbgdidr = 0x15141000; + cpu->clidr = (1 << 27) | (2 << 24) | 3; + cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ + cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ + cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ + cpu->reset_auxcr = 2; + define_arm_cp_regs(cpu, cortexa8_cp_reginfo); +} + +static const ARMCPRegInfo cortexa9_cp_reginfo[] = { + /* power_control should be set to maximum latency. Again, + * default to 0 and set by private hook + */ + { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) }, + { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) }, + { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) }, + { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + /* TLB lockdown control */ + { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2, + .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP }, + { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4, + .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP }, + { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST }, + REGINFO_SENTINEL +}; + +static void cortex_a9_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + set_feature(&cpu->env, ARM_FEATURE_EL3); + /* Note that A9 supports the MP extensions even for + * A9UP and single-core A9MP (which are both different + * and valid configurations; we don't model A9UP). + */ + set_feature(&cpu->env, ARM_FEATURE_V7MP); + set_feature(&cpu->env, ARM_FEATURE_CBAR); + cpu->midr = 0x410fc090; + cpu->reset_fpsid = 0x41033090; + cpu->isar.mvfr0 = 0x11110222; + cpu->isar.mvfr1 = 0x01111111; + cpu->ctr = 0x80038003; + cpu->reset_sctlr = 0x00c50078; + cpu->id_pfr0 = 0x1031; + cpu->id_pfr1 = 0x11; + cpu->isar.id_dfr0 = 0x000; + cpu->id_afr0 = 0; + cpu->isar.id_mmfr0 = 0x00100103; + cpu->isar.id_mmfr1 = 0x20000000; + cpu->isar.id_mmfr2 = 0x01230000; + cpu->isar.id_mmfr3 = 0x00002111; + cpu->isar.id_isar0 = 0x00101111; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232041; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x00111142; + cpu->isar.dbgdidr = 0x35141000; + cpu->clidr = (1 << 27) | (1 << 24) | 3; + cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ + cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ + define_arm_cp_regs(cpu, cortexa9_cp_reginfo); +} + +uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ +#if 0 + MachineState *ms = MACHINE(qdev_get_machine()); + + /* Linux wants the number of processors from here. + * Might as well set the interrupt-controller bit too. + */ + return ((ms->smp.cpus - 1) << 24) | (1 << 23); +#endif + return (1 << 23); +} + +static ARMCPRegInfo cortexa15_cp_reginfo[] = { + { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2, + .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read, + .writefn = arm_cp_write_ignore }, + { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static void cortex_a7_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7VE); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x410fc075; + cpu->reset_fpsid = 0x41023075; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x11111111; + cpu->ctr = 0x84448003; + cpu->reset_sctlr = 0x00c50078; + cpu->id_pfr0 = 0x00001131; + cpu->id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x02010555; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01240000; + cpu->isar.id_mmfr3 = 0x02102211; + /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but + * table 4-41 gives 0x02101110, which includes the arm div insns. + */ + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232041; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x10011142; + cpu->isar.dbgdidr = 0x3515f005; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ + cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ + define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */ +} + +static void cortex_a15_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7VE); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x412fc0f1; + cpu->reset_fpsid = 0x410430f0; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x11111111; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50078; + cpu->id_pfr0 = 0x00001131; + cpu->id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x02010555; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x20000000; + cpu->isar.id_mmfr2 = 0x01240000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232041; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x10011142; + cpu->isar.dbgdidr = 0x3515f021; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ + cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ + define_arm_cp_regs(cpu, cortexa15_cp_reginfo); +} + +static void ti925t_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + set_feature(&cpu->env, ARM_FEATURE_V4T); + set_feature(&cpu->env, ARM_FEATURE_OMAPCP); + cpu->midr = ARM_CPUID_TI925T; + cpu->ctr = 0x5109149; + cpu->reset_sctlr = 0x00000070; +} + +static void sa1100_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_STRONGARM); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x4401A11B; + cpu->reset_sctlr = 0x00000070; +} + +static void sa1110_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + set_feature(&cpu->env, ARM_FEATURE_STRONGARM); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x6901B119; + cpu->reset_sctlr = 0x00000070; +} + +static void pxa250_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052100; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa255_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052d00; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa260_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052903; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa261_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052d05; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa262_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052d06; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270a0_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054110; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270a1_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054111; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270b0_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054112; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270b1_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054113; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270c0_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054114; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270c5_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054117; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +#ifndef TARGET_AARCH64 +/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host); + * otherwise, a CPU with as many features enabled as our emulation supports. + * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c; + * this only needs to handle 32 bits. + */ +static void arm_max_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + { + cortex_a15_initfn(uc, obj); + + /* old-style VFP short-vector support */ + FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1, cpu->isar.mvfr0); + } +} +#endif + +#endif /* !defined(TARGET_AARCH64) */ + +struct ARMCPUInfo { + const char *name; + void (*initfn)(struct uc_struct *uc, CPUState *obj); + void (*class_init)(struct uc_struct *uc, CPUClass *oc, void *data); +}; + +#if !defined(TARGET_AARCH64) +static struct ARMCPUInfo arm_cpus[] = { + { "arm926", arm926_initfn }, + { "arm946", arm946_initfn }, + { "arm1026", arm1026_initfn }, + /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an + * older core than plain "arm1136". In particular this does not + * have the v6K features. + */ + { "arm1136-r2", arm1136_r2_initfn }, + { "arm1136", arm1136_initfn }, + { "arm1176", arm1176_initfn }, + { "arm11mpcore", arm11mpcore_initfn }, + { "cortex-m0", cortex_m0_initfn, arm_v7m_class_init }, + { "cortex-m3", cortex_m3_initfn, arm_v7m_class_init }, + { "cortex-m4", cortex_m4_initfn, arm_v7m_class_init }, + { "cortex-m7", cortex_m7_initfn, arm_v7m_class_init }, + { "cortex-m33", cortex_m33_initfn, arm_v7m_class_init }, + { "cortex-r5", cortex_r5_initfn }, + { "cortex-r5f", cortex_r5f_initfn }, + { "cortex-a7", cortex_a7_initfn }, + { "cortex-a8", cortex_a8_initfn }, + { "cortex-a9", cortex_a9_initfn }, + { "cortex-a15", cortex_a15_initfn }, + { "ti925t", ti925t_initfn }, + { "sa1100", sa1100_initfn }, + { "sa1110", sa1110_initfn }, + { "pxa250", pxa250_initfn }, + { "pxa255", pxa255_initfn }, + { "pxa260", pxa260_initfn }, + { "pxa261", pxa261_initfn }, + { "pxa262", pxa262_initfn }, + /* "pxa270" is an alias for "pxa270-a0" */ + { "pxa270", pxa270a0_initfn }, + { "pxa270-a0", pxa270a0_initfn }, + { "pxa270-a1", pxa270a1_initfn }, + { "pxa270-b0", pxa270b0_initfn }, + { "pxa270-b1", pxa270b1_initfn }, + { "pxa270-c0", pxa270c0_initfn }, + { "pxa270-c5", pxa270c5_initfn }, + { "max", arm_max_initfn }, +}; +#endif + +void arm_cpu_class_init(struct uc_struct *uc, CPUClass *oc) +{ + ARMCPUClass *acc = ARM_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(acc); + + /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ + acc->parent_reset = cc->reset; + /* overwrite the CPUClass->reset to arch reset: arm_cpu_reset(). */ + cc->reset = arm_cpu_reset; + + cc->has_work = arm_cpu_has_work; + cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; + cc->set_pc = arm_cpu_set_pc; + cc->synchronize_from_tb = arm_cpu_synchronize_from_tb; + cc->do_interrupt = arm_cpu_do_interrupt; + cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug; + cc->asidx_from_attrs = arm_asidx_from_attrs; + cc->tcg_initialize = arm_translate_init; + cc->tlb_fill = arm_cpu_tlb_fill; + cc->debug_excp_handler = arm_debug_excp_handler; + cc->do_unaligned_access = arm_cpu_do_unaligned_access; +} + +static void arm_cpu_instance_init(CPUState *obj) +{ +#if 0 + ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); + + acc->info->initfn(obj); +#endif + arm_cpu_post_init(obj); +} + +ARMCPU *cpu_arm_init(struct uc_struct *uc) +{ +#if !defined(TARGET_AARCH64) + int i; + char *cpu_model; +#endif + ARMCPU *cpu; + CPUState *cs; + CPUClass *cc; + +#if !defined(TARGET_AARCH64) + if (uc->mode & UC_MODE_MCLASS) { + cpu_model = "cortex-m33"; + } else if (uc->mode & UC_MODE_ARM926) { + cpu_model = "arm926"; + } else if (uc->mode & UC_MODE_ARM946) { + cpu_model = "arm946"; + } else if (uc->mode & UC_MODE_ARM1176) { + cpu_model = "arm1176"; + } else { + cpu_model = "cortex-a15"; + } +#endif + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = (CPUState *)cpu; + + /* init CPUClass */ + cpu_class_init(uc, cc); + + /* init ARMCPUClass */ + arm_cpu_class_init(uc, cc); + + /* init CPUState */ + cpu_common_initfn(uc, cs); + + /* init ARMCPU */ + arm_cpu_initfn(uc, cs); + +#if !defined(TARGET_AARCH64) + /* init ARM types */ + for (i = 0; i < ARRAY_SIZE(arm_cpus); i++) { + if (strcmp(cpu_model, arm_cpus[i].name) == 0) { + if (arm_cpus[i].class_init) { + arm_cpus[i].class_init(uc, cc, uc); + } + if (arm_cpus[i].initfn) { + arm_cpus[i].initfn(uc, cs); + } + break; + } + } + if (i == ARRAY_SIZE(arm_cpus)) { + free(cpu); + return NULL; + } +#endif + + /* postinit ARMCPU */ + arm_cpu_instance_init(cs); + + /* realize ARMCPU */ + arm_cpu_realizefn(uc, cs); + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target/arm/cpu.h b/qemu/target/arm/cpu.h new file mode 100644 index 00000000..2e734a62 --- /dev/null +++ b/qemu/target/arm/cpu.h @@ -0,0 +1,3785 @@ +/* + * ARM virtual CPU header + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef ARM_CPU_H +#define ARM_CPU_H + +#include "hw/registerfields.h" +#include "cpu-qom.h" +#include "exec/cpu-defs.h" + +struct uc_struct; + +/* ARM processors have a weak memory model */ +#define TCG_GUEST_DEFAULT_MO (0) + +#define EXCP_UDEF 1 /* undefined instruction */ +#define EXCP_SWI 2 /* software interrupt */ +#define EXCP_PREFETCH_ABORT 3 +#define EXCP_DATA_ABORT 4 +#define EXCP_IRQ 5 +#define EXCP_FIQ 6 +#define EXCP_BKPT 7 +#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ +#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ +#define EXCP_HVC 11 /* HyperVisor Call */ +#define EXCP_HYP_TRAP 12 +#define EXCP_SMC 13 /* Secure Monitor Call */ +#define EXCP_VIRQ 14 +#define EXCP_VFIQ 15 +#define EXCP_SEMIHOST 16 /* semihosting call */ +#define EXCP_NOCP 17 /* v7M NOCP UsageFault */ +#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ +#define EXCP_STKOF 19 /* v8M STKOF UsageFault */ +#define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */ +#define EXCP_LSERR 21 /* v8M LSERR SecureFault */ +#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */ +/* NB: add new EXCP_ defines to the array in arm_log_exception() too */ + +#define ARMV7M_EXCP_RESET 1 +#define ARMV7M_EXCP_NMI 2 +#define ARMV7M_EXCP_HARD 3 +#define ARMV7M_EXCP_MEM 4 +#define ARMV7M_EXCP_BUS 5 +#define ARMV7M_EXCP_USAGE 6 +#define ARMV7M_EXCP_SECURE 7 +#define ARMV7M_EXCP_SVC 11 +#define ARMV7M_EXCP_DEBUG 12 +#define ARMV7M_EXCP_PENDSV 14 +#define ARMV7M_EXCP_SYSTICK 15 + +/* For M profile, some registers are banked secure vs non-secure; + * these are represented as a 2-element array where the first element + * is the non-secure copy and the second is the secure copy. + * When the CPU does not have implement the security extension then + * only the first element is used. + * This means that the copy for the current security state can be + * accessed via env->registerfield[env->v7m.secure] (whether the security + * extension is implemented or not). + */ +enum { + M_REG_NS = 0, + M_REG_S = 1, + M_REG_NUM_BANKS = 2, +}; + +/* ARM-specific interrupt pending bits. */ +#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 +#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 + +/* The usual mapping for an AArch64 system register to its AArch32 + * counterpart is for the 32 bit world to have access to the lower + * half only (with writes leaving the upper half untouched). It's + * therefore useful to be able to pass TCG the offset of the least + * significant half of a uint64_t struct member. + */ +#ifdef HOST_WORDS_BIGENDIAN +#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) +#define offsetofhigh32(S, M) offsetof(S, M) +#else +#define offsetoflow32(S, M) offsetof(S, M) +#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) +#endif + +/* Meanings of the ARMCPU object's four inbound GPIO lines */ +#define ARM_CPU_IRQ 0 +#define ARM_CPU_FIQ 1 +#define ARM_CPU_VIRQ 2 +#define ARM_CPU_VFIQ 3 + +/* ARM-specific extra insn start words: + * 1: Conditional execution bits + * 2: Partial exception syndrome for data aborts + */ +#define TARGET_INSN_START_EXTRA_WORDS 2 + +/* The 2nd extra word holding syndrome info for data aborts does not use + * the upper 6 bits nor the lower 14 bits. We mask and shift it down to + * help the sleb128 encoder do a better job. + * When restoring the CPU state, we shift it back up. + */ +#define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1) +#define ARM_INSN_START_WORD2_SHIFT 14 + +/* We currently assume float and double are IEEE single and double + precision respectively. + Doing runtime conversions is tricky because VFP registers may contain + integer values (eg. as the result of a FTOSI instruction). + s<2n> maps to the least significant half of d + s<2n+1> maps to the most significant half of d + */ + +/* CPU state for each instance of a generic timer (in cp15 c14) */ +typedef struct ARMGenericTimer { + uint64_t cval; /* Timer CompareValue register */ + uint64_t ctl; /* Timer Control register */ +} ARMGenericTimer; + +#define GTIMER_PHYS 0 +#define GTIMER_VIRT 1 +#define GTIMER_HYP 2 +#define GTIMER_SEC 3 +#define GTIMER_HYPVIRT 4 +#define NUM_GTIMERS 5 + +typedef struct { + uint64_t raw_tcr; + uint32_t mask; + uint32_t base_mask; +} TCR; + +/* Define a maximum sized vector register. + * For 32-bit, this is a 128-bit NEON/AdvSIMD register. + * For 64-bit, this is a 2048-bit SVE register. + * + * Note that the mapping between S, D, and Q views of the register bank + * differs between AArch64 and AArch32. + * In AArch32: + * Qn = regs[n].d[1]:regs[n].d[0] + * Dn = regs[n / 2].d[n & 1] + * Sn = regs[n / 4].d[n % 4 / 2], + * bits 31..0 for even n, and bits 63..32 for odd n + * (and regs[16] to regs[31] are inaccessible) + * In AArch64: + * Zn = regs[n].d[*] + * Qn = regs[n].d[1]:regs[n].d[0] + * Dn = regs[n].d[0] + * Sn = regs[n].d[0] bits 31..0 + * Hn = regs[n].d[0] bits 15..0 + * + * This corresponds to the architecturally defined mapping between + * the two execution states, and means we do not need to explicitly + * map these registers when changing states. + * + * Align the data for use with TCG host vector operations. + */ + +#ifdef TARGET_AARCH64 +# define ARM_MAX_VQ 16 +//void arm_cpu_sve_finalize(ARMCPU *cpu); +#else +# define ARM_MAX_VQ 1 +//static inline void arm_cpu_sve_finalize(ARMCPU *cpu) { } +#endif + +typedef struct ARMVectorReg { + uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16); +} ARMVectorReg; + +#ifdef TARGET_AARCH64 +/* In AArch32 mode, predicate registers do not exist at all. */ +typedef struct ARMPredicateReg { + uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16); +} ARMPredicateReg; + +/* In AArch32 mode, PAC keys do not exist at all. */ +typedef struct ARMPACKey { + uint64_t lo, hi; +} ARMPACKey; +#endif + + +typedef struct CPUARMState { + /* Regs for current mode. */ + uint32_t regs[16]; + + /* 32/64 switch only happens when taking and returning from + * exceptions so the overlap semantics are taken care of then + * instead of having a complicated union. + */ + /* Regs for A64 mode. */ + uint64_t xregs[32]; + uint64_t pc; + /* PSTATE isn't an architectural register for ARMv8. However, it is + * convenient for us to assemble the underlying state into a 32 bit format + * identical to the architectural format used for the SPSR. (This is also + * what the Linux kernel's 'pstate' field in signal handlers and KVM's + * 'pstate' register are.) Of the PSTATE bits: + * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same + * semantics as for AArch32, as described in the comments on each field) + * nRW (also known as M[4]) is kept, inverted, in env->aarch64 + * DAIF (exception masks) are kept in env->daif + * BTYPE is kept in env->btype + * all other bits are stored in their correct places in env->pstate + */ + uint32_t pstate; + uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ + + /* Cached TBFLAGS state. See below for which bits are included. */ + uint32_t hflags; + + /* Frequently accessed CPSR bits are stored separately for efficiency. + This contains all the other bits. Use cpsr_{read,write} to access + the whole CPSR. */ + uint32_t uncached_cpsr; + uint32_t spsr; + + /* Banked registers. */ + uint64_t banked_spsr[8]; + uint32_t banked_r13[8]; + uint32_t banked_r14[8]; + + /* These hold r8-r12. */ + uint32_t usr_regs[5]; + uint32_t fiq_regs[5]; + + /* cpsr flag cache for faster execution */ + uint32_t CF; /* 0 or 1 */ + uint32_t VF; /* V is the bit 31. All other bits are undefined */ + uint32_t NF; /* N is bit 31. All other bits are undefined. */ + uint32_t ZF; /* Z set if zero. */ + uint32_t QF; /* 0 or 1 */ + uint32_t GE; /* cpsr[19:16] */ + uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ + uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ + uint32_t btype; /* BTI branch type. spsr[11:10]. */ + uint64_t daif; /* exception masks, in the bits they are in PSTATE */ + + uint64_t elr_el[4]; /* AArch64 exception link regs */ + uint64_t sp_el[4]; /* AArch64 banked stack pointers */ + + /* System control coprocessor (cp15) */ + struct { + uint32_t c0_cpuid; + union { /* Cache size selection */ + struct { + uint64_t _unused_csselr0; + uint64_t csselr_ns; + uint64_t _unused_csselr1; + uint64_t csselr_s; + }; + uint64_t csselr_el[4]; + }; + union { /* System control register. */ + struct { + uint64_t _unused_sctlr; + uint64_t sctlr_ns; + uint64_t hsctlr; + uint64_t sctlr_s; + }; + uint64_t sctlr_el[4]; + }; + uint64_t cpacr_el1; /* Architectural feature access control register */ + uint64_t cptr_el[4]; /* ARMv8 feature trap registers */ + uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ + uint64_t sder; /* Secure debug enable register. */ + uint32_t nsacr; /* Non-secure access control register. */ + union { /* MMU translation table base 0. */ + struct { + uint64_t _unused_ttbr0_0; + uint64_t ttbr0_ns; + uint64_t _unused_ttbr0_1; + uint64_t ttbr0_s; + }; + uint64_t ttbr0_el[4]; + }; + union { /* MMU translation table base 1. */ + struct { + uint64_t _unused_ttbr1_0; + uint64_t ttbr1_ns; + uint64_t _unused_ttbr1_1; + uint64_t ttbr1_s; + }; + uint64_t ttbr1_el[4]; + }; + uint64_t vttbr_el2; /* Virtualization Translation Table Base. */ + /* MMU translation table base control. */ + TCR tcr_el[4]; + TCR vtcr_el2; /* Virtualization Translation Control. */ + uint32_t c2_data; /* MPU data cacheable bits. */ + uint32_t c2_insn; /* MPU instruction cacheable bits. */ + union { /* MMU domain access control register + * MPU write buffer control. + */ + struct { + uint64_t dacr_ns; + uint64_t dacr_s; + }; + struct { + uint64_t dacr32_el2; + }; + }; + uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ + uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ + uint64_t hcr_el2; /* Hypervisor configuration register */ + uint64_t scr_el3; /* Secure configuration register. */ + union { /* Fault status registers. */ + struct { + uint64_t ifsr_ns; + uint64_t ifsr_s; + }; + struct { + uint64_t ifsr32_el2; + }; + }; + union { + struct { + uint64_t _unused_dfsr; + uint64_t dfsr_ns; + uint64_t hsr; + uint64_t dfsr_s; + }; + uint64_t esr_el[4]; + }; + uint32_t c6_region[8]; /* MPU base/size registers. */ + union { /* Fault address registers. */ + struct { + uint64_t _unused_far0; +#ifdef HOST_WORDS_BIGENDIAN + uint32_t ifar_ns; + uint32_t dfar_ns; + uint32_t ifar_s; + uint32_t dfar_s; +#else + uint32_t dfar_ns; + uint32_t ifar_ns; + uint32_t dfar_s; + uint32_t ifar_s; +#endif + uint64_t _unused_far3; + }; + uint64_t far_el[4]; + }; + uint64_t hpfar_el2; + uint64_t hstr_el2; + union { /* Translation result. */ + struct { + uint64_t _unused_par_0; + uint64_t par_ns; + uint64_t _unused_par_1; + uint64_t par_s; + }; + uint64_t par_el[4]; + }; + + uint32_t c9_insn; /* Cache lockdown registers. */ + uint32_t c9_data; + uint64_t c9_pmcr; /* performance monitor control register */ + uint64_t c9_pmcnten; /* perf monitor counter enables */ + uint64_t c9_pmovsr; /* perf monitor overflow status */ + uint64_t c9_pmuserenr; /* perf monitor user enable */ + uint64_t c9_pmselr; /* perf monitor counter selection register */ + uint64_t c9_pminten; /* perf monitor interrupt enables */ + union { /* Memory attribute redirection */ + struct { +#ifdef HOST_WORDS_BIGENDIAN + uint64_t _unused_mair_0; + uint32_t mair1_ns; + uint32_t mair0_ns; + uint64_t _unused_mair_1; + uint32_t mair1_s; + uint32_t mair0_s; +#else + uint64_t _unused_mair_0; + uint32_t mair0_ns; + uint32_t mair1_ns; + uint64_t _unused_mair_1; + uint32_t mair0_s; + uint32_t mair1_s; +#endif + }; + uint64_t mair_el[4]; + }; + union { /* vector base address register */ + struct { + uint64_t _unused_vbar; + uint64_t vbar_ns; + uint64_t hvbar; + uint64_t vbar_s; + }; + uint64_t vbar_el[4]; + }; + uint32_t mvbar; /* (monitor) vector base address register */ + struct { /* FCSE PID. */ + uint32_t fcseidr_ns; + uint32_t fcseidr_s; + }; + union { /* Context ID. */ + struct { + uint64_t _unused_contextidr_0; + uint64_t contextidr_ns; + uint64_t _unused_contextidr_1; + uint64_t contextidr_s; + }; + uint64_t contextidr_el[4]; + }; + union { /* User RW Thread register. */ + struct { + uint64_t tpidrurw_ns; + uint64_t tpidrprw_ns; + uint64_t htpidr; + uint64_t _tpidr_el3; + }; + uint64_t tpidr_el[4]; + }; + /* The secure banks of these registers don't map anywhere */ + uint64_t tpidrurw_s; + uint64_t tpidrprw_s; + uint64_t tpidruro_s; + + union { /* User RO Thread register. */ + uint64_t tpidruro_ns; + uint64_t tpidrro_el[1]; + }; + uint64_t c14_cntfrq; /* Counter Frequency register */ + uint64_t c14_cntkctl; /* Timer Control register */ + uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */ + uint64_t cntvoff_el2; /* Counter Virtual Offset register */ + ARMGenericTimer c14_timer[NUM_GTIMERS]; + uint32_t c15_cpar; /* XScale Coprocessor Access Register */ + uint32_t c15_ticonfig; /* TI925T configuration byte. */ + uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ + uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ + uint32_t c15_threadid; /* TI debugger thread-ID. */ + uint32_t c15_config_base_address; /* SCU base address. */ + uint32_t c15_diagnostic; /* diagnostic register */ + uint32_t c15_power_diagnostic; + uint32_t c15_power_control; /* power control */ + uint64_t dbgbvr[16]; /* breakpoint value registers */ + uint64_t dbgbcr[16]; /* breakpoint control registers */ + uint64_t dbgwvr[16]; /* watchpoint value registers */ + uint64_t dbgwcr[16]; /* watchpoint control registers */ + uint64_t mdscr_el1; + uint64_t oslsr_el1; /* OS Lock Status */ + uint64_t mdcr_el2; + uint64_t mdcr_el3; + /* Stores the architectural value of the counter *the last time it was + * updated* by pmccntr_op_start. Accesses should always be surrounded + * by pmccntr_op_start/pmccntr_op_finish to guarantee the latest + * architecturally-correct value is being read/set. + */ + uint64_t c15_ccnt; + /* Stores the delta between the architectural value and the underlying + * cycle count during normal operation. It is used to update c15_ccnt + * to be the correct architectural value before accesses. During + * accesses, c15_ccnt_delta contains the underlying count being used + * for the access, after which it reverts to the delta value in + * pmccntr_op_finish. + */ + uint64_t c15_ccnt_delta; + uint64_t c14_pmevcntr[31]; + uint64_t c14_pmevcntr_delta[31]; + uint64_t c14_pmevtyper[31]; + uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */ + uint64_t vpidr_el2; /* Virtualization Processor ID Register */ + uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */ + } cp15; + + struct { + /* M profile has up to 4 stack pointers: + * a Main Stack Pointer and a Process Stack Pointer for each + * of the Secure and Non-Secure states. (If the CPU doesn't support + * the security extension then it has only two SPs.) + * In QEMU we always store the currently active SP in regs[13], + * and the non-active SP for the current security state in + * v7m.other_sp. The stack pointers for the inactive security state + * are stored in other_ss_msp and other_ss_psp. + * switch_v7m_security_state() is responsible for rearranging them + * when we change security state. + */ + uint32_t other_sp; + uint32_t other_ss_msp; + uint32_t other_ss_psp; + uint32_t vecbase[M_REG_NUM_BANKS]; + uint32_t basepri[M_REG_NUM_BANKS]; + uint32_t control[M_REG_NUM_BANKS]; + uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */ + uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */ + uint32_t hfsr; /* HardFault Status */ + uint32_t dfsr; /* Debug Fault Status Register */ + uint32_t sfsr; /* Secure Fault Status Register */ + uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */ + uint32_t bfar; /* BusFault Address */ + uint32_t sfar; /* Secure Fault Address Register */ + unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */ + int exception; + uint32_t primask[M_REG_NUM_BANKS]; + uint32_t faultmask[M_REG_NUM_BANKS]; + uint32_t aircr; /* only holds r/w state if security extn implemented */ + uint32_t secure; /* Is CPU in Secure state? (not guest visible) */ + uint32_t csselr[M_REG_NUM_BANKS]; + uint32_t scr[M_REG_NUM_BANKS]; + uint32_t msplim[M_REG_NUM_BANKS]; + uint32_t psplim[M_REG_NUM_BANKS]; + uint32_t fpcar[M_REG_NUM_BANKS]; + uint32_t fpccr[M_REG_NUM_BANKS]; + uint32_t fpdscr[M_REG_NUM_BANKS]; + uint32_t cpacr[M_REG_NUM_BANKS]; + uint32_t nsacr; + } v7m; + + /* Information associated with an exception about to be taken: + * code which raises an exception must set cs->exception_index and + * the relevant parts of this structure; the cpu_do_interrupt function + * will then set the guest-visible registers as part of the exception + * entry process. + */ + struct { + uint32_t syndrome; /* AArch64 format syndrome register */ + uint32_t fsr; /* AArch32 format fault status register info */ + uint64_t vaddress; /* virtual addr associated with exception, if any */ + uint32_t target_el; /* EL the exception should be targeted for */ + /* If we implement EL2 we will also need to store information + * about the intermediate physical address for stage 2 faults. + */ + } exception; + + /* Information associated with an SError */ + struct { + uint8_t pending; + uint8_t has_esr; + uint64_t esr; + } serror; + + /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */ + uint32_t irq_line_state; + + /* Thumb-2 EE state. */ + uint32_t teecr; + uint32_t teehbr; + + /* VFP coprocessor state. */ + struct { + ARMVectorReg zregs[32]; + +#ifdef TARGET_AARCH64 + /* Store FFR as pregs[16] to make it easier to treat as any other. */ +#define FFR_PRED_NUM 16 + ARMPredicateReg pregs[17]; + /* Scratch space for aa64 sve predicate temporary. */ + ARMPredicateReg preg_tmp; +#endif + + /* We store these fpcsr fields separately for convenience. */ + uint32_t qc[4] QEMU_ALIGNED(16); + int vec_len; + int vec_stride; + + uint32_t xregs[16]; + + /* Scratch space for aa32 neon expansion. */ + uint32_t scratch[8]; + + /* There are a number of distinct float control structures: + * + * fp_status: is the "normal" fp status. + * fp_status_fp16: used for half-precision calculations + * standard_fp_status : the ARM "Standard FPSCR Value" + * + * Half-precision operations are governed by a separate + * flush-to-zero control bit in FPSCR:FZ16. We pass a separate + * status structure to control this. + * + * The "Standard FPSCR", ie default-NaN, flush-to-zero, + * round-to-nearest and is used by any operations (generally + * Neon) which the architecture defines as controlled by the + * standard FPSCR value rather than the FPSCR. + * + * To avoid having to transfer exception bits around, we simply + * say that the FPSCR cumulative exception flags are the logical + * OR of the flags in the three fp statuses. This relies on the + * only thing which needs to read the exception flags being + * an explicit FPSCR read. + */ + float_status fp_status; + float_status fp_status_f16; + float_status standard_fp_status; + + /* ZCR_EL[1-3] */ + uint64_t zcr_el[4]; + } vfp; + uint64_t exclusive_addr; + uint64_t exclusive_val; + uint64_t exclusive_high; + + /* iwMMXt coprocessor state. */ + struct { + uint64_t regs[16]; + uint64_t val; + + uint32_t cregs[16]; + } iwmmxt; + +#ifdef TARGET_AARCH64 + struct { + ARMPACKey apia; + ARMPACKey apib; + ARMPACKey apda; + ARMPACKey apdb; + ARMPACKey apga; + } keys; +#endif + + /* Fields up to this point are cleared by a CPU reset */ +#ifndef _MSC_VER + struct {} end_reset_fields; +#else + int end_reset_fields; +#endif + + /* Fields after this point are preserved across CPU reset. */ + + /* Internal CPU feature flags. */ + uint64_t features; + + /* PMSAv7 MPU */ + struct { + uint32_t *drbar; + uint32_t *drsr; + uint32_t *dracr; + uint32_t rnr[M_REG_NUM_BANKS]; + } pmsav7; + + /* PMSAv8 MPU */ + struct { + /* The PMSAv8 implementation also shares some PMSAv7 config + * and state: + * pmsav7.rnr (region number register) + * pmsav7_dregion (number of configured regions) + */ + uint32_t *rbar[M_REG_NUM_BANKS]; + uint32_t *rlar[M_REG_NUM_BANKS]; + uint32_t mair0[M_REG_NUM_BANKS]; + uint32_t mair1[M_REG_NUM_BANKS]; + } pmsav8; + + /* v8M SAU */ + struct { + uint32_t *rbar; + uint32_t *rlar; + uint32_t rnr; + uint32_t ctrl; + } sau; + + void *nvic; + const struct arm_boot_info *boot_info; + /* Store GICv3CPUState to access from this struct */ + void *gicv3state; + + struct CPUBreakpoint *cpu_breakpoint[16]; + struct CPUWatchpoint *cpu_watchpoint[16]; + + // Unicorn engine + struct uc_struct *uc; +} CPUARMState; + +/** + * ARMELChangeHookFn: + * type of a function which can be registered via arm_register_el_change_hook() + * to get callbacks when the CPU changes its exception level or mode. + */ +typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque); +typedef struct ARMELChangeHook ARMELChangeHook; +struct ARMELChangeHook { + ARMELChangeHookFn *hook; + void *opaque; + QLIST_ENTRY(ARMELChangeHook) node; +}; + +/* These values map onto the return values for + * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ +typedef enum ARMPSCIState { + PSCI_ON = 0, + PSCI_OFF = 1, + PSCI_ON_PENDING = 2 +} ARMPSCIState; + +typedef struct ARMISARegisters ARMISARegisters; + +/** + * ARMCPU: + * @env: #CPUARMState + * + * An ARM CPU core. + */ +struct ARMCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUARMState env; + + /* Coprocessor information */ + GHashTable *cp_regs; + /* For marshalling (mostly coprocessor) register state between the + * kernel and QEMU (for KVM) and between two QEMUs (for migration), + * we use these arrays. + */ + /* List of register indexes managed via these arrays; (full KVM style + * 64 bit indexes, not CPRegInfo 32 bit indexes) + */ + uint64_t *cpreg_indexes; + /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */ + uint64_t *cpreg_values; + /* Length of the indexes, values, reset_values arrays */ + int32_t cpreg_array_len; + /* These are used only for migration: incoming data arrives in + * these fields and is sanity checked in post_load before copying + * to the working data structures above. + */ + uint64_t *cpreg_vmstate_indexes; + uint64_t *cpreg_vmstate_values; + int32_t cpreg_vmstate_array_len; + + /* Timers used by the generic (architected) timer */ + // QEMUTimer *gt_timer[NUM_GTIMERS]; + /* + * Timer used by the PMU. Its state is restored after migration by + * pmu_op_finish() - it does not need other handling during migration + */ + // QEMUTimer *pmu_timer; + + /* GPIO outputs for generic timer */ + //qemu_irq gt_timer_outputs[NUM_GTIMERS]; + /* GPIO output for GICv3 maintenance interrupt signal */ + //qemu_irq gicv3_maintenance_interrupt; + /* GPIO output for the PMU interrupt */ + //qemu_irq pmu_interrupt; + + /* MemoryRegion to use for secure physical accesses */ + MemoryRegion *secure_memory; + + /* For v8M, pointer to the IDAU interface provided by board/SoC */ + void *idau; + + /* PSCI version for this CPU + * Bits[31:16] = Major Version + * Bits[15:0] = Minor Version + */ + uint32_t psci_version; + + /* Should CPU start in PSCI powered-off state? */ + bool start_powered_off; + + /* Current power state, access guarded by BQL */ + ARMPSCIState power_state; + + /* CPU has virtualization extension */ + bool has_el2; + /* CPU has security extension */ + bool has_el3; + /* CPU has PMU (Performance Monitor Unit) */ + bool has_pmu; + /* CPU has VFP */ + bool has_vfp; + /* CPU has Neon */ + bool has_neon; + /* CPU has M-profile DSP extension */ + bool has_dsp; + + /* CPU has memory protection unit */ + bool has_mpu; + /* PMSAv7 MPU number of supported regions */ + uint32_t pmsav7_dregion; + /* v8M SAU number of supported regions */ + uint32_t sau_sregion; + + /* PSCI conduit used to invoke PSCI methods + * 0 - disabled, 1 - smc, 2 - hvc + */ + uint32_t psci_conduit; + + /* For v8M, initial value of the Secure VTOR */ + uint32_t init_svtor; + + /* Uniprocessor system with MP extensions */ + bool mp_is_up; + + /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init + * and the probe failed (so we need to report the error in realize) + */ + bool host_cpu_probe_failed; + + /* Specify the number of cores in this CPU cluster. Used for the L2CTLR + * register. + */ + int32_t core_count; + + /* The instance init functions for implementation-specific subclasses + * set these fields to specify the implementation-dependent values of + * various constant registers and reset values of non-constant + * registers. + * Some of these might become QOM properties eventually. + * Field names match the official register names as defined in the + * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix + * is used for reset values of non-constant registers; no reset_ + * prefix means a constant register. + * Some of these registers are split out into a substructure that + * is shared with the translators to control the ISA. + * + * Note that if you add an ID register to the ARMISARegisters struct + * you need to also update the 32-bit and 64-bit versions of the + * kvm_arm_get_host_cpu_features() function to correctly populate the + * field by reading the value from the KVM vCPU. + */ + struct ARMISARegisters { + uint32_t id_isar0; + uint32_t id_isar1; + uint32_t id_isar2; + uint32_t id_isar3; + uint32_t id_isar4; + uint32_t id_isar5; + uint32_t id_isar6; + uint32_t id_mmfr0; + uint32_t id_mmfr1; + uint32_t id_mmfr2; + uint32_t id_mmfr3; + uint32_t id_mmfr4; + uint32_t mvfr0; + uint32_t mvfr1; + uint32_t mvfr2; + uint32_t id_dfr0; + uint32_t dbgdidr; + uint64_t id_aa64isar0; + uint64_t id_aa64isar1; + uint64_t id_aa64pfr0; + uint64_t id_aa64pfr1; + uint64_t id_aa64mmfr0; + uint64_t id_aa64mmfr1; + uint64_t id_aa64mmfr2; + uint64_t id_aa64dfr0; + uint64_t id_aa64dfr1; + } isar; + uint32_t midr; + uint32_t revidr; + uint32_t reset_fpsid; + uint32_t ctr; + uint32_t reset_sctlr; + uint32_t id_pfr0; + uint32_t id_pfr1; + uint64_t pmceid0; + uint64_t pmceid1; + uint32_t id_afr0; + uint64_t id_aa64afr0; + uint64_t id_aa64afr1; + uint32_t clidr; + uint64_t mp_affinity; /* MP ID without feature bits */ + /* The elements of this array are the CCSIDR values for each cache, + * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. + */ + uint64_t ccsidr[16]; + uint64_t reset_cbar; + uint32_t reset_auxcr; + bool reset_hivecs; + /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ + uint32_t dcz_blocksize; + uint64_t rvbar; + + /* Configurable aspects of GIC cpu interface (which is part of the CPU) */ + int gic_num_lrs; /* number of list registers */ + int gic_vpribits; /* number of virtual priority bits */ + int gic_vprebits; /* number of virtual preemption bits */ + + /* Whether the cfgend input is high (i.e. this CPU should reset into + * big-endian mode). This setting isn't used directly: instead it modifies + * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the + * architecture version. + */ + bool cfgend; + + QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks; + QLIST_HEAD(, ARMELChangeHook) el_change_hooks; + + int32_t node_id; /* NUMA node this CPU belongs to */ + + /* Used to synchronize KVM and QEMU in-kernel device levels */ + uint8_t device_irq_level; + + /* Used to set the maximum vector length the cpu will support. */ + uint32_t sve_max_vq; + + /* + * In sve_vq_map each set bit is a supported vector length of + * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector + * length in quadwords. + * + * While processing properties during initialization, corresponding + * sve_vq_init bits are set for bits in sve_vq_map that have been + * set by properties. + */ + DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ); + DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ); + + /* Generic timer counter frequency, in Hz */ + uint64_t gt_cntfrq_hz; + + struct ARMCPUClass cc; +}; + +unsigned int gt_cntfrq_period_ns(ARMCPU *cpu); + +void arm_cpu_do_interrupt(CPUState *cpu); +void arm_v7m_cpu_do_interrupt(CPUState *cpu); +bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); + +hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs); + +/* + * Helpers to dynamically generates XML descriptions of the sysregs + * and SVE registers. Returns the number of registers in each set. + */ +int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg); +int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); + +#ifdef TARGET_AARCH64 +void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); +void aarch64_sve_change_el(CPUARMState *env, int old_el, + int new_el, bool el0_a64); +void aarch64_add_sve_properties(void *obj); + +/* + * SVE registers are encoded in KVM's memory in an endianness-invariant format. + * The byte at offset i from the start of the in-memory representation contains + * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the + * lowest offsets are stored in the lowest memory addresses, then that nearly + * matches QEMU's representation, which is to use an array of host-endian + * uint64_t's, where the lower offsets are at the lower indices. To complete + * the translation we just need to byte swap the uint64_t's on big-endian hosts. + */ +static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) +{ +#ifdef HOST_WORDS_BIGENDIAN + int i; + + for (i = 0; i < nr; ++i) { + dst[i] = bswap64(src[i]); + } + + return dst; +#else + return src; +#endif +} + +#else +static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } +static inline void aarch64_sve_change_el(CPUARMState *env, int o, + int n, bool a) +{ } +static inline void aarch64_add_sve_properties(void *obj) { } +#endif + +target_ulong do_arm_semihosting(CPUARMState *env); + +void aarch64_sync_32_to_64(CPUARMState *env); +void aarch64_sync_64_to_32(CPUARMState *env); + +int fp_exception_el(CPUARMState *env, int cur_el); +int sve_exception_el(CPUARMState *env, int cur_el); +uint32_t sve_zcr_len_for_el(CPUARMState *env, int el); + +static inline bool is_a64(CPUARMState *env) +{ + return env->aarch64; +} + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_arm_signal_handler(int host_signum, void *pinfo, + void *puc); + +/** + * pmu_op_start/finish + * @env: CPUARMState + * + * Convert all PMU counters between their delta form (the typical mode when + * they are enabled) and the guest-visible values. These two calls must + * surround any action which might affect the counters. + */ +void pmu_op_start(CPUARMState *env); +void pmu_op_finish(CPUARMState *env); + +/* + * Called when a PMU counter is due to overflow + */ +void arm_pmu_timer_cb(void *opaque); + +/** + * Functions to register as EL change hooks for PMU mode filtering + */ +void pmu_pre_el_change(ARMCPU *cpu, void *ignored); +void pmu_post_el_change(ARMCPU *cpu, void *ignored); + +/* + * pmu_init + * @cpu: ARMCPU + * + * Initialize the CPU's PMCEID[01]_EL0 registers and associated internal state + * for the current configuration + */ +void pmu_init(ARMCPU *cpu); + +/* SCTLR bit meanings. Several bits have been reused in newer + * versions of the architecture; in that case we define constants + * for both old and new bit meanings. Code which tests against those + * bits should probably check or otherwise arrange that the CPU + * is the architectural version it expects. + */ +#define SCTLR_M (1U << 0) +#define SCTLR_A (1U << 1) +#define SCTLR_C (1U << 2) +#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */ +#define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */ +#define SCTLR_SA (1U << 3) /* AArch64 only */ +#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */ +#define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */ +#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */ +#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ +#define SCTLR_CP15BEN (1U << 5) /* v7 onward */ +#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ +#define SCTLR_nAA (1U << 6) /* when v8.4-LSE is implemented */ +#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ +#define SCTLR_ITD (1U << 7) /* v8 onward */ +#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ +#define SCTLR_SED (1U << 8) /* v8 onward */ +#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */ +#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */ +#define SCTLR_F (1U << 10) /* up to v6 */ +#define SCTLR_SW (1U << 10) /* v7 */ +#define SCTLR_EnRCTX (1U << 10) /* in v8.0-PredInv */ +#define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */ +#define SCTLR_EOS (1U << 11) /* v8.5-ExS */ +#define SCTLR_I (1U << 12) +#define SCTLR_V (1U << 13) /* AArch32 only */ +#define SCTLR_EnDB (1U << 13) /* v8.3, AArch64 only */ +#define SCTLR_RR (1U << 14) /* up to v7 */ +#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */ +#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */ +#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */ +#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */ +#define SCTLR_nTWI (1U << 16) /* v8 onward */ +#define SCTLR_HA (1U << 17) /* up to v7, RES0 in v8 */ +#define SCTLR_BR (1U << 17) /* PMSA only */ +#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */ +#define SCTLR_nTWE (1U << 18) /* v8 onward */ +#define SCTLR_WXN (1U << 19) +#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ +#define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */ +#define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */ +#define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */ +#define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */ +#define SCTLR_EIS (1U << 22) /* v8.5-ExS */ +#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */ +#define SCTLR_SPAN (1U << 23) /* v8.1-PAN */ +#define SCTLR_VE (1U << 24) /* up to v7 */ +#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */ +#define SCTLR_EE (1U << 25) +#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */ +#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */ +#define SCTLR_NMFI (1U << 27) /* up to v7, RAZ in v7VE and v8 */ +#define SCTLR_EnDA (1U << 27) /* v8.3, AArch64 only */ +#define SCTLR_TRE (1U << 28) /* AArch32 only */ +#define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */ +#define SCTLR_AFE (1U << 29) /* AArch32 only */ +#define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */ +#define SCTLR_TE (1U << 30) /* AArch32 only */ +#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */ +#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */ +#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */ +#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */ +#define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */ +#define SCTLR_TCF0 (3ULL << 38) /* v8.5-MemTag */ +#define SCTLR_TCF (3ULL << 40) /* v8.5-MemTag */ +#define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */ +#define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */ +#define SCTLR_DSSBS (1ULL << 44) /* v8.5 */ + +#define CPTR_TCPAC (1U << 31) +#define CPTR_TTA (1U << 20) +#define CPTR_TFP (1U << 10) +#define CPTR_TZ (1U << 8) /* CPTR_EL2 */ +#define CPTR_EZ (1U << 8) /* CPTR_EL3 */ + +#define MDCR_EPMAD (1U << 21) +#define MDCR_EDAD (1U << 20) +#define MDCR_SPME (1U << 17) /* MDCR_EL3 */ +#define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ +#define MDCR_SDD (1U << 16) +#define MDCR_SPD (3U << 14) +#define MDCR_TDRA (1U << 11) +#define MDCR_TDOSA (1U << 10) +#define MDCR_TDA (1U << 9) +#define MDCR_TDE (1U << 8) +#define MDCR_HPME (1U << 7) +#define MDCR_TPM (1U << 6) +#define MDCR_TPMCR (1U << 5) +#define MDCR_HPMN (0x1fU) + +/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ +#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD) + +#define CPSR_M (0x1fU) +#define CPSR_T (1U << 5) +#define CPSR_F (1U << 6) +#define CPSR_I (1U << 7) +#define CPSR_A (1U << 8) +#define CPSR_E (1U << 9) +#define CPSR_IT_2_7 (0xfc00U) +#define CPSR_GE (0xfU << 16) +#define CPSR_IL (1U << 20) +#define CPSR_PAN (1U << 22) +#define CPSR_J (1U << 24) +#define CPSR_IT_0_1 (3U << 25) +#define CPSR_Q (1U << 27) +#define CPSR_V (1U << 28) +#define CPSR_C (1U << 29) +#define CPSR_Z (1U << 30) +#define CPSR_N (1U << 31) +#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) +#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) + +#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) +#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ + | CPSR_NZCV) +/* Bits writable in user mode. */ +#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) +/* Execution state bits. MRS read as zero, MSR writes ignored. */ +#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL) + +/* Bit definitions for M profile XPSR. Most are the same as CPSR. */ +#define XPSR_EXCP 0x1ffU +#define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */ +#define XPSR_IT_2_7 CPSR_IT_2_7 +#define XPSR_GE CPSR_GE +#define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */ +#define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */ +#define XPSR_IT_0_1 CPSR_IT_0_1 +#define XPSR_Q CPSR_Q +#define XPSR_V CPSR_V +#define XPSR_C CPSR_C +#define XPSR_Z CPSR_Z +#define XPSR_N CPSR_N +#define XPSR_NZCV CPSR_NZCV +#define XPSR_IT CPSR_IT + +#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ +#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ +#define TTBCR_PD0 (1U << 4) +#define TTBCR_PD1 (1U << 5) +#define TTBCR_EPD0 (1U << 7) +#define TTBCR_IRGN0 (3U << 8) +#define TTBCR_ORGN0 (3U << 10) +#define TTBCR_SH0 (3U << 12) +#define TTBCR_T1SZ (3U << 16) +#define TTBCR_A1 (1U << 22) +#define TTBCR_EPD1 (1U << 23) +#define TTBCR_IRGN1 (3U << 24) +#define TTBCR_ORGN1 (3U << 26) +#define TTBCR_SH1 (1U << 28) +#define TTBCR_EAE (1U << 31) + +/* Bit definitions for ARMv8 SPSR (PSTATE) format. + * Only these are valid when in AArch64 mode; in + * AArch32 mode SPSRs are basically CPSR-format. + */ +#define PSTATE_SP (1U) +#define PSTATE_M (0xFU) +#define PSTATE_nRW (1U << 4) +#define PSTATE_F (1U << 6) +#define PSTATE_I (1U << 7) +#define PSTATE_A (1U << 8) +#define PSTATE_D (1U << 9) +#define PSTATE_BTYPE (3U << 10) +#define PSTATE_IL (1U << 20) +#define PSTATE_SS (1U << 21) +#define PSTATE_PAN (1U << 22) +#define PSTATE_UAO (1U << 23) +#define PSTATE_V (1U << 28) +#define PSTATE_C (1U << 29) +#define PSTATE_Z (1U << 30) +#define PSTATE_N (1U << 31) +#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) +#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) +#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE) +/* Mode values for AArch64 */ +#define PSTATE_MODE_EL3h 13 +#define PSTATE_MODE_EL3t 12 +#define PSTATE_MODE_EL2h 9 +#define PSTATE_MODE_EL2t 8 +#define PSTATE_MODE_EL1h 5 +#define PSTATE_MODE_EL1t 4 +#define PSTATE_MODE_EL0t 0 + +/* Write a new value to v7m.exception, thus transitioning into or out + * of Handler mode; this may result in a change of active stack pointer. + */ +void write_v7m_exception(CPUARMState *env, uint32_t new_exc); + +/* Map EL and handler into a PSTATE_MODE. */ +static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) +{ + return (el << 2) | handler; +} + +/* Return the current PSTATE value. For the moment we don't support 32<->64 bit + * interprocessing, so we don't attempt to sync with the cpsr state used by + * the 32 bit decoder. + */ +static inline uint32_t pstate_read(CPUARMState *env) +{ + int ZF; + + ZF = (env->ZF == 0); + return (env->NF & 0x80000000) | (ZF << 30) + | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) + | env->pstate | env->daif | (env->btype << 10); +} + +static inline void pstate_write(CPUARMState *env, uint32_t val) +{ + env->ZF = (~val) & PSTATE_Z; + env->NF = val; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + env->daif = val & PSTATE_DAIF; + env->btype = (val >> 10) & 3; + env->pstate = val & ~CACHED_PSTATE_BITS; +} + +/* Return the current CPSR value. */ +uint32_t cpsr_read(CPUARMState *env); + +typedef enum CPSRWriteType { + CPSRWriteByInstr = 0, /* from guest MSR or CPS */ + CPSRWriteExceptionReturn = 1, /* from guest exception return insn */ + CPSRWriteRaw = 2, /* trust values, do not switch reg banks */ + CPSRWriteByGDBStub = 3, /* from the GDB stub */ +} CPSRWriteType; + +/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/ +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, + CPSRWriteType write_type); + +/* Return the current xPSR value. */ +static inline uint32_t xpsr_read(CPUARMState *env) +{ + int ZF; + ZF = (env->ZF == 0); + return (env->NF & 0x80000000) | (ZF << 30) + | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) + | (env->thumb << 24) | ((env->condexec_bits & 3) << 25) + | ((env->condexec_bits & 0xfc) << 8) + | (env->GE << 16) + | env->v7m.exception; +} + +/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ +static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) +{ + if (mask & XPSR_NZCV) { + env->ZF = (~val) & XPSR_Z; + env->NF = val; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + } + if (mask & XPSR_Q) { + env->QF = ((val & XPSR_Q) != 0); + } + if (mask & XPSR_GE) { + env->GE = (val & XPSR_GE) >> 16; + } + if (mask & XPSR_T) { + env->thumb = ((val & XPSR_T) != 0); + } + if (mask & XPSR_IT_0_1) { + env->condexec_bits &= ~3; + env->condexec_bits |= (val >> 25) & 3; + } + if (mask & XPSR_IT_2_7) { + env->condexec_bits &= 3; + env->condexec_bits |= (val >> 8) & 0xfc; + } + if (mask & XPSR_EXCP) { + /* Note that this only happens on exception exit */ + write_v7m_exception(env, val & XPSR_EXCP); + } +} + +#define HCR_VM (1ULL << 0) +#define HCR_SWIO (1ULL << 1) +#define HCR_PTW (1ULL << 2) +#define HCR_FMO (1ULL << 3) +#define HCR_IMO (1ULL << 4) +#define HCR_AMO (1ULL << 5) +#define HCR_VF (1ULL << 6) +#define HCR_VI (1ULL << 7) +#define HCR_VSE (1ULL << 8) +#define HCR_FB (1ULL << 9) +#define HCR_BSU_MASK (3ULL << 10) +#define HCR_DC (1ULL << 12) +#define HCR_TWI (1ULL << 13) +#define HCR_TWE (1ULL << 14) +#define HCR_TID0 (1ULL << 15) +#define HCR_TID1 (1ULL << 16) +#define HCR_TID2 (1ULL << 17) +#define HCR_TID3 (1ULL << 18) +#define HCR_TSC (1ULL << 19) +#define HCR_TIDCP (1ULL << 20) +#define HCR_TACR (1ULL << 21) +#define HCR_TSW (1ULL << 22) +#define HCR_TPCP (1ULL << 23) +#define HCR_TPU (1ULL << 24) +#define HCR_TTLB (1ULL << 25) +#define HCR_TVM (1ULL << 26) +#define HCR_TGE (1ULL << 27) +#define HCR_TDZ (1ULL << 28) +#define HCR_HCD (1ULL << 29) +#define HCR_TRVM (1ULL << 30) +#define HCR_RW (1ULL << 31) +#define HCR_CD (1ULL << 32) +#define HCR_ID (1ULL << 33) +#define HCR_E2H (1ULL << 34) +#define HCR_TLOR (1ULL << 35) +#define HCR_TERR (1ULL << 36) +#define HCR_TEA (1ULL << 37) +#define HCR_MIOCNCE (1ULL << 38) +/* RES0 bit 39 */ +#define HCR_APK (1ULL << 40) +#define HCR_API (1ULL << 41) +#define HCR_NV (1ULL << 42) +#define HCR_NV1 (1ULL << 43) +#define HCR_AT (1ULL << 44) +#define HCR_NV2 (1ULL << 45) +#define HCR_FWB (1ULL << 46) +#define HCR_FIEN (1ULL << 47) +/* RES0 bit 48 */ +#define HCR_TID4 (1ULL << 49) +#define HCR_TICAB (1ULL << 50) +#define HCR_AMVOFFEN (1ULL << 51) +#define HCR_TOCU (1ULL << 52) +#define HCR_ENSCXT (1ULL << 53) +#define HCR_TTLBIS (1ULL << 54) +#define HCR_TTLBOS (1ULL << 55) +#define HCR_ATA (1ULL << 56) +#define HCR_DCT (1ULL << 57) +#define HCR_TID5 (1ULL << 58) +#define HCR_TWEDEN (1ULL << 59) +#define HCR_TWEDEL MAKE_64BIT_MASK(60, 4) + +#define SCR_NS (1U << 0) +#define SCR_IRQ (1U << 1) +#define SCR_FIQ (1U << 2) +#define SCR_EA (1U << 3) +#define SCR_FW (1U << 4) +#define SCR_AW (1U << 5) +#define SCR_NET (1U << 6) +#define SCR_SMD (1U << 7) +#define SCR_HCE (1U << 8) +#define SCR_SIF (1U << 9) +#define SCR_RW (1U << 10) +#define SCR_ST (1U << 11) +#define SCR_TWI (1U << 12) +#define SCR_TWE (1U << 13) +#define SCR_TLOR (1U << 14) +#define SCR_TERR (1U << 15) +#define SCR_APK (1U << 16) +#define SCR_API (1U << 17) +#define SCR_EEL2 (1U << 18) +#define SCR_EASE (1U << 19) +#define SCR_NMEA (1U << 20) +#define SCR_FIEN (1U << 21) +#define SCR_ENSCXT (1U << 25) +#define SCR_ATA (1U << 26) + +/* Return the current FPSCR value. */ +uint32_t vfp_get_fpscr(CPUARMState *env); +void vfp_set_fpscr(CPUARMState *env, uint32_t val); + +/* FPCR, Floating Point Control Register + * FPSR, Floating Poiht Status Register + * + * For A64 the FPSCR is split into two logically distinct registers, + * FPCR and FPSR. However since they still use non-overlapping bits + * we store the underlying state in fpscr and just mask on read/write. + */ +#define FPSR_MASK 0xf800009f +#define FPCR_MASK 0x07ff9f00 + +#define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */ +#define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */ +#define FPCR_OFE (1 << 10) /* Overflow exception trap enable */ +#define FPCR_UFE (1 << 11) /* Underflow exception trap enable */ +#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */ +#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */ +#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */ +#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */ +#define FPCR_DN (1 << 25) /* Default NaN enable bit */ +#define FPCR_QC (1 << 27) /* Cumulative saturation bit */ + +static inline uint32_t vfp_get_fpsr(CPUARMState *env) +{ + return vfp_get_fpscr(env) & FPSR_MASK; +} + +static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val) +{ + uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK); + vfp_set_fpscr(env, new_fpscr); +} + +static inline uint32_t vfp_get_fpcr(CPUARMState *env) +{ + return vfp_get_fpscr(env) & FPCR_MASK; +} + +static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val) +{ + uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK); + vfp_set_fpscr(env, new_fpscr); +} + +enum arm_cpu_mode { + ARM_CPU_MODE_USR = 0x10, + ARM_CPU_MODE_FIQ = 0x11, + ARM_CPU_MODE_IRQ = 0x12, + ARM_CPU_MODE_SVC = 0x13, + ARM_CPU_MODE_MON = 0x16, + ARM_CPU_MODE_ABT = 0x17, + ARM_CPU_MODE_HYP = 0x1a, + ARM_CPU_MODE_UND = 0x1b, + ARM_CPU_MODE_SYS = 0x1f +}; + +/* VFP system registers. */ +#define ARM_VFP_FPSID 0 +#define ARM_VFP_FPSCR 1 +#define ARM_VFP_MVFR2 5 +#define ARM_VFP_MVFR1 6 +#define ARM_VFP_MVFR0 7 +#define ARM_VFP_FPEXC 8 +#define ARM_VFP_FPINST 9 +#define ARM_VFP_FPINST2 10 + +/* iwMMXt coprocessor control registers. */ +#define ARM_IWMMXT_wCID 0 +#define ARM_IWMMXT_wCon 1 +#define ARM_IWMMXT_wCSSF 2 +#define ARM_IWMMXT_wCASF 3 +#define ARM_IWMMXT_wCGR0 8 +#define ARM_IWMMXT_wCGR1 9 +#define ARM_IWMMXT_wCGR2 10 +#define ARM_IWMMXT_wCGR3 11 + +/* V7M CCR bits */ +FIELD(V7M_CCR, NONBASETHRDENA, 0, 1) +FIELD(V7M_CCR, USERSETMPEND, 1, 1) +FIELD(V7M_CCR, UNALIGN_TRP, 3, 1) +FIELD(V7M_CCR, DIV_0_TRP, 4, 1) +FIELD(V7M_CCR, BFHFNMIGN, 8, 1) +FIELD(V7M_CCR, STKALIGN, 9, 1) +FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1) +FIELD(V7M_CCR, DC, 16, 1) +FIELD(V7M_CCR, IC, 17, 1) +FIELD(V7M_CCR, BP, 18, 1) + +/* V7M SCR bits */ +FIELD(V7M_SCR, SLEEPONEXIT, 1, 1) +FIELD(V7M_SCR, SLEEPDEEP, 2, 1) +FIELD(V7M_SCR, SLEEPDEEPS, 3, 1) +FIELD(V7M_SCR, SEVONPEND, 4, 1) + +/* V7M AIRCR bits */ +FIELD(V7M_AIRCR, VECTRESET, 0, 1) +FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1) +FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1) +FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1) +FIELD(V7M_AIRCR, PRIGROUP, 8, 3) +FIELD(V7M_AIRCR, BFHFNMINS, 13, 1) +FIELD(V7M_AIRCR, PRIS, 14, 1) +FIELD(V7M_AIRCR, ENDIANNESS, 15, 1) +FIELD(V7M_AIRCR, VECTKEY, 16, 16) + +/* V7M CFSR bits for MMFSR */ +FIELD(V7M_CFSR, IACCVIOL, 0, 1) +FIELD(V7M_CFSR, DACCVIOL, 1, 1) +FIELD(V7M_CFSR, MUNSTKERR, 3, 1) +FIELD(V7M_CFSR, MSTKERR, 4, 1) +FIELD(V7M_CFSR, MLSPERR, 5, 1) +FIELD(V7M_CFSR, MMARVALID, 7, 1) + +/* V7M CFSR bits for BFSR */ +FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1) +FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1) +FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1) +FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1) +FIELD(V7M_CFSR, STKERR, 8 + 4, 1) +FIELD(V7M_CFSR, LSPERR, 8 + 5, 1) +FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1) + +/* V7M CFSR bits for UFSR */ +FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1) +FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1) +FIELD(V7M_CFSR, INVPC, 16 + 2, 1) +FIELD(V7M_CFSR, NOCP, 16 + 3, 1) +FIELD(V7M_CFSR, STKOF, 16 + 4, 1) +FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1) +FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1) + +/* V7M CFSR bit masks covering all of the subregister bits */ +FIELD(V7M_CFSR, MMFSR, 0, 8) +FIELD(V7M_CFSR, BFSR, 8, 8) +FIELD(V7M_CFSR, UFSR, 16, 16) + +/* V7M HFSR bits */ +FIELD(V7M_HFSR, VECTTBL, 1, 1) +FIELD(V7M_HFSR, FORCED, 30, 1) +FIELD(V7M_HFSR, DEBUGEVT, 31, 1) + +/* V7M DFSR bits */ +FIELD(V7M_DFSR, HALTED, 0, 1) +FIELD(V7M_DFSR, BKPT, 1, 1) +FIELD(V7M_DFSR, DWTTRAP, 2, 1) +FIELD(V7M_DFSR, VCATCH, 3, 1) +FIELD(V7M_DFSR, EXTERNAL, 4, 1) + +/* V7M SFSR bits */ +FIELD(V7M_SFSR, INVEP, 0, 1) +FIELD(V7M_SFSR, INVIS, 1, 1) +FIELD(V7M_SFSR, INVER, 2, 1) +FIELD(V7M_SFSR, AUVIOL, 3, 1) +FIELD(V7M_SFSR, INVTRAN, 4, 1) +FIELD(V7M_SFSR, LSPERR, 5, 1) +FIELD(V7M_SFSR, SFARVALID, 6, 1) +FIELD(V7M_SFSR, LSERR, 7, 1) + +/* v7M MPU_CTRL bits */ +FIELD(V7M_MPU_CTRL, ENABLE, 0, 1) +FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1) +FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1) + +/* v7M CLIDR bits */ +FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21) +FIELD(V7M_CLIDR, LOUIS, 21, 3) +FIELD(V7M_CLIDR, LOC, 24, 3) +FIELD(V7M_CLIDR, LOUU, 27, 3) +FIELD(V7M_CLIDR, ICB, 30, 2) + +FIELD(V7M_CSSELR, IND, 0, 1) +FIELD(V7M_CSSELR, LEVEL, 1, 3) +/* We use the combination of InD and Level to index into cpu->ccsidr[]; + * define a mask for this and check that it doesn't permit running off + * the end of the array. + */ +FIELD(V7M_CSSELR, INDEX, 0, 4) + +/* v7M FPCCR bits */ +FIELD(V7M_FPCCR, LSPACT, 0, 1) +FIELD(V7M_FPCCR, USER, 1, 1) +FIELD(V7M_FPCCR, S, 2, 1) +FIELD(V7M_FPCCR, THREAD, 3, 1) +FIELD(V7M_FPCCR, HFRDY, 4, 1) +FIELD(V7M_FPCCR, MMRDY, 5, 1) +FIELD(V7M_FPCCR, BFRDY, 6, 1) +FIELD(V7M_FPCCR, SFRDY, 7, 1) +FIELD(V7M_FPCCR, MONRDY, 8, 1) +FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1) +FIELD(V7M_FPCCR, UFRDY, 10, 1) +FIELD(V7M_FPCCR, RES0, 11, 15) +FIELD(V7M_FPCCR, TS, 26, 1) +FIELD(V7M_FPCCR, CLRONRETS, 27, 1) +FIELD(V7M_FPCCR, CLRONRET, 28, 1) +FIELD(V7M_FPCCR, LSPENS, 29, 1) +FIELD(V7M_FPCCR, LSPEN, 30, 1) +FIELD(V7M_FPCCR, ASPEN, 31, 1) +/* These bits are banked. Others are non-banked and live in the M_REG_S bank */ +#define R_V7M_FPCCR_BANKED_MASK \ + (R_V7M_FPCCR_LSPACT_MASK | \ + R_V7M_FPCCR_USER_MASK | \ + R_V7M_FPCCR_THREAD_MASK | \ + R_V7M_FPCCR_MMRDY_MASK | \ + R_V7M_FPCCR_SPLIMVIOL_MASK | \ + R_V7M_FPCCR_UFRDY_MASK | \ + R_V7M_FPCCR_ASPEN_MASK) + +/* + * System register ID fields. + */ +FIELD(MIDR_EL1, REVISION, 0, 4) +FIELD(MIDR_EL1, PARTNUM, 4, 12) +FIELD(MIDR_EL1, ARCHITECTURE, 16, 4) +FIELD(MIDR_EL1, VARIANT, 20, 4) +FIELD(MIDR_EL1, IMPLEMENTER, 24, 8) + +FIELD(ID_ISAR0, SWAP, 0, 4) +FIELD(ID_ISAR0, BITCOUNT, 4, 4) +FIELD(ID_ISAR0, BITFIELD, 8, 4) +FIELD(ID_ISAR0, CMPBRANCH, 12, 4) +FIELD(ID_ISAR0, COPROC, 16, 4) +FIELD(ID_ISAR0, DEBUG, 20, 4) +FIELD(ID_ISAR0, DIVIDE, 24, 4) + +FIELD(ID_ISAR1, ENDIAN, 0, 4) +FIELD(ID_ISAR1, EXCEPT, 4, 4) +FIELD(ID_ISAR1, EXCEPT_AR, 8, 4) +FIELD(ID_ISAR1, EXTEND, 12, 4) +FIELD(ID_ISAR1, IFTHEN, 16, 4) +FIELD(ID_ISAR1, IMMEDIATE, 20, 4) +FIELD(ID_ISAR1, INTERWORK, 24, 4) +FIELD(ID_ISAR1, JAZELLE, 28, 4) + +FIELD(ID_ISAR2, LOADSTORE, 0, 4) +FIELD(ID_ISAR2, MEMHINT, 4, 4) +FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4) +FIELD(ID_ISAR2, MULT, 12, 4) +FIELD(ID_ISAR2, MULTS, 16, 4) +FIELD(ID_ISAR2, MULTU, 20, 4) +FIELD(ID_ISAR2, PSR_AR, 24, 4) +FIELD(ID_ISAR2, REVERSAL, 28, 4) + +FIELD(ID_ISAR3, SATURATE, 0, 4) +FIELD(ID_ISAR3, SIMD, 4, 4) +FIELD(ID_ISAR3, SVC, 8, 4) +FIELD(ID_ISAR3, SYNCHPRIM, 12, 4) +FIELD(ID_ISAR3, TABBRANCH, 16, 4) +FIELD(ID_ISAR3, T32COPY, 20, 4) +FIELD(ID_ISAR3, TRUENOP, 24, 4) +FIELD(ID_ISAR3, T32EE, 28, 4) + +FIELD(ID_ISAR4, UNPRIV, 0, 4) +FIELD(ID_ISAR4, WITHSHIFTS, 4, 4) +FIELD(ID_ISAR4, WRITEBACK, 8, 4) +FIELD(ID_ISAR4, SMC, 12, 4) +FIELD(ID_ISAR4, BARRIER, 16, 4) +FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4) +FIELD(ID_ISAR4, PSR_M, 24, 4) +FIELD(ID_ISAR4, SWP_FRAC, 28, 4) + +FIELD(ID_ISAR5, SEVL, 0, 4) +FIELD(ID_ISAR5, AES, 4, 4) +FIELD(ID_ISAR5, SHA1, 8, 4) +FIELD(ID_ISAR5, SHA2, 12, 4) +FIELD(ID_ISAR5, CRC32, 16, 4) +FIELD(ID_ISAR5, RDM, 24, 4) +FIELD(ID_ISAR5, VCMA, 28, 4) + +FIELD(ID_ISAR6, JSCVT, 0, 4) +FIELD(ID_ISAR6, DP, 4, 4) +FIELD(ID_ISAR6, FHM, 8, 4) +FIELD(ID_ISAR6, SB, 12, 4) +FIELD(ID_ISAR6, SPECRES, 16, 4) + +FIELD(ID_MMFR3, CMAINTVA, 0, 4) +FIELD(ID_MMFR3, CMAINTSW, 4, 4) +FIELD(ID_MMFR3, BPMAINT, 8, 4) +FIELD(ID_MMFR3, MAINTBCST, 12, 4) +FIELD(ID_MMFR3, PAN, 16, 4) +FIELD(ID_MMFR3, COHWALK, 20, 4) +FIELD(ID_MMFR3, CMEMSZ, 24, 4) +FIELD(ID_MMFR3, SUPERSEC, 28, 4) + +FIELD(ID_MMFR4, SPECSEI, 0, 4) +FIELD(ID_MMFR4, AC2, 4, 4) +FIELD(ID_MMFR4, XNX, 8, 4) +FIELD(ID_MMFR4, CNP, 12, 4) +FIELD(ID_MMFR4, HPDS, 16, 4) +FIELD(ID_MMFR4, LSM, 20, 4) +FIELD(ID_MMFR4, CCIDX, 24, 4) +FIELD(ID_MMFR4, EVT, 28, 4) + +FIELD(ID_AA64ISAR0, AES, 4, 4) +FIELD(ID_AA64ISAR0, SHA1, 8, 4) +FIELD(ID_AA64ISAR0, SHA2, 12, 4) +FIELD(ID_AA64ISAR0, CRC32, 16, 4) +FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) +FIELD(ID_AA64ISAR0, RDM, 28, 4) +#ifdef _MSC_VER +/* warning C4309: 'initializing': truncation of constant value. + enum is 32bit in MSVC. */ +#define R_ID_AA64ISAR0_SHA3_SHIFT 32 +#define R_ID_AA64ISAR0_SHA3_LENGTH 4 +#define R_ID_AA64ISAR0_SHA3_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_SHA3_SHIFT, R_ID_AA64ISAR0_SHA3_LENGTH) +#define R_ID_AA64ISAR0_SM3_SHIFT 36 +#define R_ID_AA64ISAR0_SM3_LENGTH 4 +#define R_ID_AA64ISAR0_SM3_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_SM3_SHIFT, R_ID_AA64ISAR0_SM3_LENGTH) +#define R_ID_AA64ISAR0_SM4_SHIFT 40 +#define R_ID_AA64ISAR0_SM4_LENGTH 4 +#define R_ID_AA64ISAR0_SM4_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_SM4_SHIFT, R_ID_AA64ISAR0_SM4_LENGTH) +#define R_ID_AA64ISAR0_DP_SHIFT 44 +#define R_ID_AA64ISAR0_DP_LENGTH 4 +#define R_ID_AA64ISAR0_DP_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_DP_SHIFT, R_ID_AA64ISAR0_DP_LENGTH) +#define R_ID_AA64ISAR0_FHM_SHIFT 48 +#define R_ID_AA64ISAR0_FHM_LENGTH 4 +#define R_ID_AA64ISAR0_FHM_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_FHM_SHIFT, R_ID_AA64ISAR0_FHM_LENGTH) +#define R_ID_AA64ISAR0_TS_SHIFT 52 +#define R_ID_AA64ISAR0_TS_LENGTH 4 +#define R_ID_AA64ISAR0_TS_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_TS_SHIFT, R_ID_AA64ISAR0_TS_LENGTH) +#define R_ID_AA64ISAR0_TLB_SHIFT 56 +#define R_ID_AA64ISAR0_TLB_LENGTH 4 +#define R_ID_AA64ISAR0_TLB_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_TLB_SHIFT, R_ID_AA64ISAR0_TLB_LENGTH) +#define R_ID_AA64ISAR0_RNDR_SHIFT 60 +#define R_ID_AA64ISAR0_RNDR_LENGTH 4 +#define R_ID_AA64ISAR0_RNDR_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR0_RNDR_SHIFT, R_ID_AA64ISAR0_RNDR_LENGTH) +#else +FIELD(ID_AA64ISAR0, SHA3, 32, 4) +FIELD(ID_AA64ISAR0, SM3, 36, 4) +FIELD(ID_AA64ISAR0, SM4, 40, 4) +FIELD(ID_AA64ISAR0, DP, 44, 4) +FIELD(ID_AA64ISAR0, FHM, 48, 4) +FIELD(ID_AA64ISAR0, TS, 52, 4) +FIELD(ID_AA64ISAR0, TLB, 56, 4) +FIELD(ID_AA64ISAR0, RNDR, 60, 4) +#endif + +FIELD(ID_AA64ISAR1, DPB, 0, 4) +FIELD(ID_AA64ISAR1, APA, 4, 4) +FIELD(ID_AA64ISAR1, API, 8, 4) +FIELD(ID_AA64ISAR1, JSCVT, 12, 4) +FIELD(ID_AA64ISAR1, FCMA, 16, 4) +FIELD(ID_AA64ISAR1, LRCPC, 20, 4) +FIELD(ID_AA64ISAR1, GPA, 24, 4) +FIELD(ID_AA64ISAR1, GPI, 28, 4) +#ifdef _MSC_VER +#define R_ID_AA64ISAR1_FRINTTS_SHIFT 32 +#define R_ID_AA64ISAR1_FRINTTS_LENGTH 4 +#define R_ID_AA64ISAR1_FRINTTS_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR1_FRINTTS_SHIFT, R_ID_AA64ISAR1_FRINTTS_LENGTH) +#define R_ID_AA64ISAR1_SB_SHIFT 36 +#define R_ID_AA64ISAR1_SB_LENGTH 4 +#define R_ID_AA64ISAR1_SB_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR1_SB_SHIFT, R_ID_AA64ISAR1_SB_LENGTH) +#define R_ID_AA64ISAR1_SPECRES_SHIFT 40 +#define R_ID_AA64ISAR1_SPECRES_LENGTH 4 +#define R_ID_AA64ISAR1_SPECRES_MASK MAKE_64BIT_MASK(R_ID_AA64ISAR1_SPECRES_SHIFT, R_ID_AA64ISAR1_SPECRES_LENGTH) +#else +FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) +FIELD(ID_AA64ISAR1, SB, 36, 4) +FIELD(ID_AA64ISAR1, SPECRES, 40, 4) +#endif + +FIELD(ID_AA64PFR0, EL0, 0, 4) +FIELD(ID_AA64PFR0, EL1, 4, 4) +FIELD(ID_AA64PFR0, EL2, 8, 4) +FIELD(ID_AA64PFR0, EL3, 12, 4) +FIELD(ID_AA64PFR0, FP, 16, 4) +FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) +FIELD(ID_AA64PFR0, GIC, 24, 4) +FIELD(ID_AA64PFR0, RAS, 28, 4) +#ifdef _MSC_VER +#define R_ID_AA64PFR0_SVE_SHIFT 60 +#define R_ID_AA64PFR0_SVE_LENGTH 4 +#define R_ID_AA64PFR0_SVE_MASK MAKE_64BIT_MASK(R_ID_AA64PFR0_SVE_SHIFT, R_ID_AA64PFR0_SVE_LENGTH) +#else +FIELD(ID_AA64PFR0, SVE, 32, 4) +#endif + +FIELD(ID_AA64PFR1, BT, 0, 4) +FIELD(ID_AA64PFR1, SBSS, 4, 4) +FIELD(ID_AA64PFR1, MTE, 8, 4) +FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4) + +FIELD(ID_AA64MMFR0, PARANGE, 0, 4) +FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) +FIELD(ID_AA64MMFR0, BIGEND, 8, 4) +FIELD(ID_AA64MMFR0, SNSMEM, 12, 4) +FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4) +FIELD(ID_AA64MMFR0, TGRAN16, 20, 4) +FIELD(ID_AA64MMFR0, TGRAN64, 24, 4) +FIELD(ID_AA64MMFR0, TGRAN4, 28, 4) +#ifdef _MSC_VER +#define R_ID_AA64MMFR0_TGRAN16_2_SHIFT 32 +#define R_ID_AA64MMFR0_TGRAN16_2_LENGTH 4 +#define R_ID_AA64MMFR0_TGRAN16_2_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_TGRAN16_2_SHIFT, R_ID_AA64MMFR0_TGRAN16_2_LENGTH) +#define R_ID_AA64MMFR0_TGRAN64_2_SHIFT 36 +#define R_ID_AA64MMFR0_TGRAN64_2_LENGTH 4 +#define R_ID_AA64MMFR0_TGRAN64_2_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_TGRAN64_2_SHIFT, R_ID_AA64MMFR0_TGRAN64_2_LENGTH) +#define R_ID_AA64MMFR0_TGRAN4_2_SHIFT 40 +#define R_ID_AA64MMFR0_TGRAN4_2_LENGTH 4 +#define R_ID_AA64MMFR0_TGRAN4_2_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_TGRAN4_2_SHIFT, R_ID_AA64MMFR0_TGRAN4_2_LENGTH) +#define R_ID_AA64MMFR0_EXS_SHIFT 44 +#define R_ID_AA64MMFR0_EXS_LENGTH 4 +#define R_ID_AA64MMFR0_EXS_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR0_EXS_SHIFT, R_ID_AA64MMFR0_EXS_LENGTH) +#else +FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4) +FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4) +FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4) +FIELD(ID_AA64MMFR0, EXS, 44, 4) +#endif + +FIELD(ID_AA64MMFR1, HAFDBS, 0, 4) +FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4) +FIELD(ID_AA64MMFR1, VH, 8, 4) +FIELD(ID_AA64MMFR1, HPDS, 12, 4) +FIELD(ID_AA64MMFR1, LO, 16, 4) +FIELD(ID_AA64MMFR1, PAN, 20, 4) +FIELD(ID_AA64MMFR1, SPECSEI, 24, 4) +FIELD(ID_AA64MMFR1, XNX, 28, 4) + +FIELD(ID_AA64MMFR2, CNP, 0, 4) +FIELD(ID_AA64MMFR2, UAO, 4, 4) +FIELD(ID_AA64MMFR2, LSM, 8, 4) +FIELD(ID_AA64MMFR2, IESB, 12, 4) +FIELD(ID_AA64MMFR2, VARANGE, 16, 4) +FIELD(ID_AA64MMFR2, CCIDX, 20, 4) +FIELD(ID_AA64MMFR2, NV, 24, 4) +FIELD(ID_AA64MMFR2, ST, 28, 4) +#ifdef _MSC_VER +#define R_ID_AA64MMFR2_AT_SHIFT 32 +#define R_ID_AA64MMFR2_AT_LENGTH 4 +#define R_ID_AA64MMFR2_AT_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_AT_SHIFT, R_ID_AA64MMFR2_AT_LENGTH) +#define R_ID_AA64MMFR2_IDS_SHIFT 36 +#define R_ID_AA64MMFR2_IDS_LENGTH 4 +#define R_ID_AA64MMFR2_IDS_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_IDS_SHIFT, R_ID_AA64MMFR2_IDS_LENGTH) +#define R_ID_AA64MMFR2_FWB_SHIFT 40 +#define R_ID_AA64MMFR2_FWB_LENGTH 4 +#define R_ID_AA64MMFR2_FWB_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_FWB_SHIFT, R_ID_AA64MMFR2_FWB_LENGTH) +#define R_ID_AA64MMFR2_TTL_SHIFT 48 +#define R_ID_AA64MMFR2_TTL_LENGTH 4 +#define R_ID_AA64MMFR2_TTL_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_TTL_SHIFT, R_ID_AA64MMFR2_TTL_LENGTH) +#define R_ID_AA64MMFR2_BBM_SHIFT 52 +#define R_ID_AA64MMFR2_BBM_LENGTH 4 +#define R_ID_AA64MMFR2_BBM_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_BBM_SHIFT, R_ID_AA64MMFR2_BBM_LENGTH) +#define R_ID_AA64MMFR2_EVT_SHIFT 56 +#define R_ID_AA64MMFR2_EVT_LENGTH 4 +#define R_ID_AA64MMFR2_EVT_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_EVT_SHIFT, R_ID_AA64MMFR2_EVT_LENGTH) +#define R_ID_AA64MMFR2_E0PD_SHIFT 60 +#define R_ID_AA64MMFR2_E0PD_LENGTH 4 +#define R_ID_AA64MMFR2_E0PD_MASK MAKE_64BIT_MASK(R_ID_AA64MMFR2_E0PD_SHIFT, R_ID_AA64MMFR2_E0PD_LENGTH) +#else +FIELD(ID_AA64MMFR2, AT, 32, 4) +FIELD(ID_AA64MMFR2, IDS, 36, 4) +FIELD(ID_AA64MMFR2, FWB, 40, 4) +FIELD(ID_AA64MMFR2, TTL, 48, 4) +FIELD(ID_AA64MMFR2, BBM, 52, 4) +FIELD(ID_AA64MMFR2, EVT, 56, 4) +FIELD(ID_AA64MMFR2, E0PD, 60, 4) +#endif + +FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) +FIELD(ID_AA64DFR0, TRACEVER, 4, 4) +FIELD(ID_AA64DFR0, PMUVER, 8, 4) +FIELD(ID_AA64DFR0, BRPS, 12, 4) +FIELD(ID_AA64DFR0, WRPS, 20, 4) +FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) +#ifdef _MSC_VER +#define R_ID_AA64DFR0_PMSVER_SHIFT 32 +#define R_ID_AA64DFR0_PMSVER_LENGTH 4 +#define R_ID_AA64DFR0_PMSVER_MASK MAKE_64BIT_MASK(R_ID_AA64DFR0_PMSVER_SHIFT, R_ID_AA64DFR0_PMSVER_LENGTH) +#define R_ID_AA64DFR0_DOUBLELOCK_SHIFT 36 +#define R_ID_AA64DFR0_DOUBLELOCK_LENGTH 4 +#define R_ID_AA64DFR0_DOUBLELOCK_MASK MAKE_64BIT_MASK(R_ID_AA64DFR0_DOUBLELOCK_SHIFT, R_ID_AA64DFR0_DOUBLELOCK_LENGTH) +#define R_ID_AA64DFR0_TRACEFILT_SHIFT 40 +#define R_ID_AA64DFR0_TRACEFILT_LENGTH 4 +#define R_ID_AA64DFR0_TRACEFILT_MASK MAKE_64BIT_MASK(R_ID_AA64DFR0_TRACEFILT_SHIFT, R_ID_AA64DFR0_TRACEFILT_LENGTH) +#else +FIELD(ID_AA64DFR0, PMSVER, 32, 4) +FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) +FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) +#endif + +FIELD(ID_DFR0, COPDBG, 0, 4) +FIELD(ID_DFR0, COPSDBG, 4, 4) +FIELD(ID_DFR0, MMAPDBG, 8, 4) +FIELD(ID_DFR0, COPTRC, 12, 4) +FIELD(ID_DFR0, MMAPTRC, 16, 4) +FIELD(ID_DFR0, MPROFDBG, 20, 4) +FIELD(ID_DFR0, PERFMON, 24, 4) +FIELD(ID_DFR0, TRACEFILT, 28, 4) + +FIELD(DBGDIDR, SE_IMP, 12, 1) +FIELD(DBGDIDR, NSUHD_IMP, 14, 1) +FIELD(DBGDIDR, VERSION, 16, 4) +FIELD(DBGDIDR, CTX_CMPS, 20, 4) +FIELD(DBGDIDR, BRPS, 24, 4) +FIELD(DBGDIDR, WRPS, 28, 4) + +FIELD(MVFR0, SIMDREG, 0, 4) +FIELD(MVFR0, FPSP, 4, 4) +FIELD(MVFR0, FPDP, 8, 4) +FIELD(MVFR0, FPTRAP, 12, 4) +FIELD(MVFR0, FPDIVIDE, 16, 4) +FIELD(MVFR0, FPSQRT, 20, 4) +FIELD(MVFR0, FPSHVEC, 24, 4) +FIELD(MVFR0, FPROUND, 28, 4) + +FIELD(MVFR1, FPFTZ, 0, 4) +FIELD(MVFR1, FPDNAN, 4, 4) +FIELD(MVFR1, SIMDLS, 8, 4) +FIELD(MVFR1, SIMDINT, 12, 4) +FIELD(MVFR1, SIMDSP, 16, 4) +FIELD(MVFR1, SIMDHP, 20, 4) +FIELD(MVFR1, FPHP, 24, 4) +FIELD(MVFR1, SIMDFMAC, 28, 4) + +FIELD(MVFR2, SIMDMISC, 0, 4) +FIELD(MVFR2, FPMISC, 4, 4) + +QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); + +/* If adding a feature bit which corresponds to a Linux ELF + * HWCAP bit, remember to update the feature-bit-to-hwcap + * mapping in linux-user/elfload.c:get_elf_hwcap(). + */ +enum arm_features { + ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ + ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ + ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ + ARM_FEATURE_V6, + ARM_FEATURE_V6K, + ARM_FEATURE_V7, + ARM_FEATURE_THUMB2, + ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */ + ARM_FEATURE_NEON, + ARM_FEATURE_M, /* Microcontroller profile. */ + ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ + ARM_FEATURE_THUMB2EE, + ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ + ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */ + ARM_FEATURE_V4T, + ARM_FEATURE_V5, + ARM_FEATURE_STRONGARM, + ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ + ARM_FEATURE_GENERIC_TIMER, + ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ + ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ + ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ + ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ + ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ + ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ + ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ + ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ + ARM_FEATURE_V8, + ARM_FEATURE_AARCH64, /* supports 64 bit mode */ + ARM_FEATURE_CBAR, /* has cp15 CBAR */ + ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ + ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ + ARM_FEATURE_EL2, /* has EL2 Virtualization support */ + ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ + ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ + ARM_FEATURE_PMU, /* has PMU support */ + ARM_FEATURE_VBAR, /* has cp15 VBAR */ + ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ + ARM_FEATURE_M_MAIN, /* M profile Main Extension */ +}; + +static inline int arm_feature(CPUARMState *env, int feature) +{ + return (env->features & (1ULL << feature)) != 0; +} + +/* Return true if exception levels below EL3 are in secure state, + * or would be following an exception return to that level. + * Unlike arm_is_secure() (which is always a question about the + * _current_ state of the CPU) this doesn't care about the current + * EL or mode. + */ +static inline bool arm_is_secure_below_el3(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_EL3)) { + return !(env->cp15.scr_el3 & SCR_NS); + } else { + /* If EL3 is not supported then the secure state is implementation + * defined, in which case QEMU defaults to non-secure. + */ + return false; + } +} + +/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */ +static inline bool arm_is_el3_or_mon(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_EL3)) { + if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { + /* CPU currently in AArch64 state and EL3 */ + return true; + } else if (!is_a64(env) && + (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { + /* CPU currently in AArch32 state and monitor mode */ + return true; + } + } + return false; +} + +/* Return true if the processor is in secure state */ +static inline bool arm_is_secure(CPUARMState *env) +{ + if (arm_is_el3_or_mon(env)) { + return true; + } + return arm_is_secure_below_el3(env); +} + +/** + * arm_hcr_el2_eff(): Return the effective value of HCR_EL2. + * E.g. when in secure state, fields in HCR_EL2 are suppressed, + * "for all purposes other than a direct read or write access of HCR_EL2." + * Not included here is HCR_RW. + */ +uint64_t arm_hcr_el2_eff(CPUARMState *env); + +/* Return true if the specified exception level is running in AArch64 state. */ +static inline bool arm_el_is_aa64(CPUARMState *env, int el) +{ + /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, + * and if we're not in EL0 then the state of EL0 isn't well defined.) + */ + assert(el >= 1 && el <= 3); + bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); + + /* The highest exception level is always at the maximum supported + * register width, and then lower levels have a register width controlled + * by bits in the SCR or HCR registers. + */ + if (el == 3) { + return aa64; + } + + if (arm_feature(env, ARM_FEATURE_EL3)) { + aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); + } + + if (el == 2) { + return aa64; + } + + if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) { + aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); + } + + return aa64; +} + +/* Function for determing whether guest cp register reads and writes should + * access the secure or non-secure bank of a cp register. When EL3 is + * operating in AArch32 state, the NS-bit determines whether the secure + * instance of a cp register should be used. When EL3 is AArch64 (or if + * it doesn't exist at all) then there is no register banking, and all + * accesses are to the non-secure version. + */ +static inline bool access_secure_reg(CPUARMState *env) +{ + bool ret = (arm_feature(env, ARM_FEATURE_EL3) && + !arm_el_is_aa64(env, 3) && + !(env->cp15.scr_el3 & SCR_NS)); + + return ret; +} + +/* Macros for accessing a specified CP register bank */ +#define A32_BANKED_REG_GET(_env, _regname, _secure) \ + ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) + +#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ + do { \ + if (_secure) { \ + (_env)->cp15._regname##_s = (_val); \ + } else { \ + (_env)->cp15._regname##_ns = (_val); \ + } \ + } while (0) + +/* Macros for automatically accessing a specific CP register bank depending on + * the current secure state of the system. These macros are not intended for + * supporting instruction translation reads/writes as these are dependent + * solely on the SCR.NS bit and not the mode. + */ +#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ + A32_BANKED_REG_GET((_env), _regname, \ + (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) + +#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ + A32_BANKED_REG_SET((_env), _regname, \ + (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ + (_val)) + +void arm_cpu_list(void); +uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, + uint32_t cur_el, bool secure); + +/* Interface between CPU and Interrupt controller. */ +bool armv7m_nvic_can_take_pending_exception(void *opaque); + +/** + * armv7m_nvic_set_pending: mark the specified exception as pending + * @opaque: the NVIC + * @irq: the exception number to mark pending + * @secure: false for non-banked exceptions or for the nonsecure + * version of a banked exception, true for the secure version of a banked + * exception. + * + * Marks the specified exception as pending. Note that we will assert() + * if @secure is true and @irq does not specify one of the fixed set + * of architecturally banked exceptions. + */ +void armv7m_nvic_set_pending(void *opaque, int irq, bool secure); +/** + * armv7m_nvic_set_pending_derived: mark this derived exception as pending + * @opaque: the NVIC + * @irq: the exception number to mark pending + * @secure: false for non-banked exceptions or for the nonsecure + * version of a banked exception, true for the secure version of a banked + * exception. + * + * Similar to armv7m_nvic_set_pending(), but specifically for derived + * exceptions (exceptions generated in the course of trying to take + * a different exception). + */ +void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure); +/** + * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending + * @opaque: the NVIC + * @irq: the exception number to mark pending + * @secure: false for non-banked exceptions or for the nonsecure + * version of a banked exception, true for the secure version of a banked + * exception. + * + * Similar to armv7m_nvic_set_pending(), but specifically for exceptions + * generated in the course of lazy stacking of FP registers. + */ +void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure); +/** + * armv7m_nvic_get_pending_irq_info: return highest priority pending + * exception, and whether it targets Secure state + * @opaque: the NVIC + * @pirq: set to pending exception number + * @ptargets_secure: set to whether pending exception targets Secure + * + * This function writes the number of the highest priority pending + * exception (the one which would be made active by + * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure + * to true if the current highest priority pending exception should + * be taken to Secure state, false for NS. + */ +void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq, + bool *ptargets_secure); +/** + * armv7m_nvic_acknowledge_irq: make highest priority pending exception active + * @opaque: the NVIC + * + * Move the current highest priority pending exception from the pending + * state to the active state, and update v7m.exception to indicate that + * it is the exception currently being handled. + */ +void armv7m_nvic_acknowledge_irq(void *opaque); +/** + * armv7m_nvic_complete_irq: complete specified interrupt or exception + * @opaque: the NVIC + * @irq: the exception number to complete + * @secure: true if this exception was secure + * + * Returns: -1 if the irq was not active + * 1 if completing this irq brought us back to base (no active irqs) + * 0 if there is still an irq active after this one was completed + * (Ignoring -1, this is the same as the RETTOBASE value before completion.) + */ +int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure); +/** + * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure) + * @opaque: the NVIC + * @irq: the exception number to mark pending + * @secure: false for non-banked exceptions or for the nonsecure + * version of a banked exception, true for the secure version of a banked + * exception. + * + * Return whether an exception is "ready", i.e. whether the exception is + * enabled and is configured at a priority which would allow it to + * interrupt the current execution priority. This controls whether the + * RDY bit for it in the FPCCR is set. + */ +bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure); +/** + * armv7m_nvic_raw_execution_priority: return the raw execution priority + * @opaque: the NVIC + * + * Returns: the raw execution priority as defined by the v8M architecture. + * This is the execution priority minus the effects of AIRCR.PRIS, + * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting. + * (v8M ARM ARM I_PKLD.) + */ +int armv7m_nvic_raw_execution_priority(void *opaque); +/** + * armv7m_nvic_neg_prio_requested: return true if the requested execution + * priority is negative for the specified security state. + * @opaque: the NVIC + * @secure: the security state to test + * This corresponds to the pseudocode IsReqExecPriNeg(). + */ +bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure); + +/* Interface for defining coprocessor registers. + * Registers are defined in tables of arm_cp_reginfo structs + * which are passed to define_arm_cp_regs(). + */ + +/* When looking up a coprocessor register we look for it + * via an integer which encodes all of: + * coprocessor number + * Crn, Crm, opc1, opc2 fields + * 32 or 64 bit register (ie is it accessed via MRC/MCR + * or via MRRC/MCRR?) + * non-secure/secure bank (AArch32 only) + * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field. + * (In this case crn and opc2 should be zero.) + * For AArch64, there is no 32/64 bit size distinction; + * instead all registers have a 2 bit op0, 3 bit op1 and op2, + * and 4 bit CRn and CRm. The encoding patterns are chosen + * to be easy to convert to and from the KVM encodings, and also + * so that the hashtable can contain both AArch32 and AArch64 + * registers (to allow for interprocessing where we might run + * 32 bit code on a 64 bit core). + */ +/* This bit is private to our hashtable cpreg; in KVM register + * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64 + * in the upper bits of the 64 bit ID. + */ +#define CP_REG_AA64_SHIFT 28 +#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT) + +/* To enable banking of coprocessor registers depending on ns-bit we + * add a bit to distinguish between secure and non-secure cpregs in the + * hashtable. + */ +#define CP_REG_NS_SHIFT 29 +#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT) + +#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \ + ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \ + ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2)) + +#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \ + (CP_REG_AA64_MASK | \ + ((cp) << CP_REG_ARM_COPROC_SHIFT) | \ + ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ + ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ + ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ + ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ + ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) + +#if 0 +/* Convert a full 64 bit KVM register ID to the truncated 32 bit + * version used as a key for the coprocessor register hashtable + */ +static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid) +{ + uint32_t cpregid = kvmid; + if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) { + cpregid |= CP_REG_AA64_MASK; + } else { + if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) { + cpregid |= (1 << 15); + } + + /* KVM is always non-secure so add the NS flag on AArch32 register + * entries. + */ + cpregid |= 1 << CP_REG_NS_SHIFT; + } + return cpregid; +} + +/* Convert a truncated 32 bit hashtable key into the full + * 64 bit KVM register ID. + */ +static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) +{ + uint64_t kvmid; + + if (cpregid & CP_REG_AA64_MASK) { + kvmid = cpregid & ~CP_REG_AA64_MASK; + kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64; + } else { + kvmid = cpregid & ~(1 << 15); + if (cpregid & (1 << 15)) { + kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM; + } else { + kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM; + } + } + return kvmid; +} +#endif + +/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a + * special-behaviour cp reg and bits [11..8] indicate what behaviour + * it has. Otherwise it is a simple cp reg, where CONST indicates that + * TCG can assume the value to be constant (ie load at translate time) + * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END + * indicates that the TB should not be ended after a write to this register + * (the default is that the TB ends after cp writes). OVERRIDE permits + * a register definition to override a previous definition for the + * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the + * old must have the OVERRIDE bit set. + * ALIAS indicates that this register is an alias view of some underlying + * state which is also visible via another register, and that the other + * register is handling migration and reset; registers marked ALIAS will not be + * migrated but may have their state set by syncing of register state from KVM. + * NO_RAW indicates that this register has no underlying state and does not + * support raw access for state saving/loading; it will not be used for either + * migration or KVM state synchronization. (Typically this is for "registers" + * which are actually used as instructions for cache maintenance and so on.) + * IO indicates that this register does I/O and therefore its accesses + * need to be surrounded by gen_io_start()/gen_io_end(). In particular, + * registers which implement clocks or timers require this. + * RAISES_EXC is for when the read or write hook might raise an exception; + * the generated code will synchronize the CPU state before calling the hook + * so that it is safe for the hook to call raise_exception(). + * NEWEL is for writes to registers that might change the exception + * level - typically on older ARM chips. For those cases we need to + * re-read the new el when recomputing the translation flags. + */ +#define ARM_CP_SPECIAL 0x0001 +#define ARM_CP_CONST 0x0002 +#define ARM_CP_64BIT 0x0004 +#define ARM_CP_SUPPRESS_TB_END 0x0008 +#define ARM_CP_OVERRIDE 0x0010 +#define ARM_CP_ALIAS 0x0020 +#define ARM_CP_IO 0x0040 +#define ARM_CP_NO_RAW 0x0080 +#define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100) +#define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200) +#define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300) +#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400) +#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500) +#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA +#define ARM_CP_FPU 0x1000 +#define ARM_CP_SVE 0x2000 +#define ARM_CP_NO_GDB 0x4000 +#define ARM_CP_RAISES_EXC 0x8000 +#define ARM_CP_NEWEL 0x10000 +/* Used only as a terminator for ARMCPRegInfo lists */ +#define ARM_CP_SENTINEL 0xfffff +/* Mask of only the flag bits in a type field */ +#define ARM_CP_FLAG_MASK 0x1f0ff + +/* Valid values for ARMCPRegInfo state field, indicating which of + * the AArch32 and AArch64 execution states this register is visible in. + * If the reginfo doesn't explicitly specify then it is AArch32 only. + * If the reginfo is declared to be visible in both states then a second + * reginfo is synthesised for the AArch32 view of the AArch64 register, + * such that the AArch32 view is the lower 32 bits of the AArch64 one. + * Note that we rely on the values of these enums as we iterate through + * the various states in some places. + */ +enum { + ARM_CP_STATE_AA32 = 0, + ARM_CP_STATE_AA64 = 1, + ARM_CP_STATE_BOTH = 2, +}; + +/* ARM CP register secure state flags. These flags identify security state + * attributes for a given CP register entry. + * The existence of both or neither secure and non-secure flags indicates that + * the register has both a secure and non-secure hash entry. A single one of + * these flags causes the register to only be hashed for the specified + * security state. + * Although definitions may have any combination of the S/NS bits, each + * registered entry will only have one to identify whether the entry is secure + * or non-secure. + */ +enum { + ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */ + ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */ +}; + +/* Return true if cptype is a valid type field. This is used to try to + * catch errors where the sentinel has been accidentally left off the end + * of a list of registers. + */ +static inline bool cptype_valid(int cptype) +{ + return ((cptype & ~ARM_CP_FLAG_MASK) == 0) + || ((cptype & ARM_CP_SPECIAL) && + ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL)); +} + +/* Access rights: + * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM + * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and + * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 + * (ie any of the privileged modes in Secure state, or Monitor mode). + * If a register is accessible in one privilege level it's always accessible + * in higher privilege levels too. Since "Secure PL1" also follows this rule + * (ie anything visible in PL2 is visible in S-PL1, some things are only + * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the + * terminology a little and call this PL3. + * In AArch64 things are somewhat simpler as the PLx bits line up exactly + * with the ELx exception levels. + * + * If access permissions for a register are more complex than can be + * described with these bits, then use a laxer set of restrictions, and + * do the more restrictive/complex check inside a helper function. + */ +#define PL3_R 0x80 +#define PL3_W 0x40 +#define PL2_R (0x20 | PL3_R) +#define PL2_W (0x10 | PL3_W) +#define PL1_R (0x08 | PL2_R) +#define PL1_W (0x04 | PL2_W) +#define PL0_R (0x02 | PL1_R) +#define PL0_W (0x01 | PL1_W) + +/* + * For user-mode some registers are accessible to EL0 via a kernel + * trap-and-emulate ABI. In this case we define the read permissions + * as actually being PL0_R. However some bits of any given register + * may still be masked. + */ +#define PL0U_R PL1_R + +#define PL3_RW (PL3_R | PL3_W) +#define PL2_RW (PL2_R | PL2_W) +#define PL1_RW (PL1_R | PL1_W) +#define PL0_RW (PL0_R | PL0_W) + +/* Return the highest implemented Exception Level */ +static inline int arm_highest_el(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_EL3)) { + return 3; + } + if (arm_feature(env, ARM_FEATURE_EL2)) { + return 2; + } + return 1; +} + +/* Return true if a v7M CPU is in Handler mode */ +static inline bool arm_v7m_is_handler_mode(CPUARMState *env) +{ + return env->v7m.exception != 0; +} + +/* Return the current Exception Level (as per ARMv8; note that this differs + * from the ARMv7 Privilege Level). + */ +static inline int arm_current_el(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_M)) { + return arm_v7m_is_handler_mode(env) || + !(env->v7m.control[env->v7m.secure] & 1); + } + + if (is_a64(env)) { + return extract32(env->pstate, 2, 2); + } + + switch (env->uncached_cpsr & 0x1f) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_HYP: + return 2; + case ARM_CPU_MODE_MON: + return 3; + default: + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { + /* If EL3 is 32-bit then all secure privileged modes run in + * EL3 + */ + return 3; + } + + return 1; + } +} + +typedef struct ARMCPRegInfo ARMCPRegInfo; + +typedef enum CPAccessResult { + /* Access is permitted */ + CP_ACCESS_OK = 0, + /* Access fails due to a configurable trap or enable which would + * result in a categorized exception syndrome giving information about + * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, + * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or + * PL1 if in EL0, otherwise to the current EL). + */ + CP_ACCESS_TRAP = 1, + /* Access fails and results in an exception syndrome 0x0 ("uncategorized"). + * Note that this is not a catch-all case -- the set of cases which may + * result in this failure is specifically defined by the architecture. + */ + CP_ACCESS_TRAP_UNCATEGORIZED = 2, + /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */ + CP_ACCESS_TRAP_EL2 = 3, + CP_ACCESS_TRAP_EL3 = 4, + /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */ + CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5, + CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6, + /* Access fails and results in an exception syndrome for an FP access, + * trapped directly to EL2 or EL3 + */ + CP_ACCESS_TRAP_FP_EL2 = 7, + CP_ACCESS_TRAP_FP_EL3 = 8, +} CPAccessResult; + +/* Access functions for coprocessor registers. These cannot fail and + * may not raise exceptions. + */ +typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); +typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value); +/* Access permission check functions for coprocessor registers. */ +typedef CPAccessResult CPAccessFn(CPUARMState *env, + const ARMCPRegInfo *opaque, + bool isread); +/* Hook function for register reset */ +typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); + +#define CP_ANY 0xff + +/* Definition of an ARM coprocessor register */ +struct ARMCPRegInfo { + /* Name of register (useful mainly for debugging, need not be unique) */ + const char *name; + /* Location of register: coprocessor number and (crn,crm,opc1,opc2) + * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a + * 'wildcard' field -- any value of that field in the MRC/MCR insn + * will be decoded to this register. The register read and write + * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 + * used by the program, so it is possible to register a wildcard and + * then behave differently on read/write if necessary. + * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 + * must both be zero. + * For AArch64-visible registers, opc0 is also used. + * Since there are no "coprocessors" in AArch64, cp is purely used as a + * way to distinguish (for KVM's benefit) guest-visible system registers + * from demuxed ones provided to preserve the "no side effects on + * KVM register read/write from QEMU" semantics. cp==0x13 is guest + * visible (to match KVM's encoding); cp==0 will be converted to + * cp==0x13 when the ARMCPRegInfo is registered, for convenience. + */ + uint8_t cp; + uint8_t crn; + uint8_t crm; + uint8_t opc0; + uint8_t opc1; + uint8_t opc2; + /* Execution state in which this register is visible: ARM_CP_STATE_* */ + int state; + /* Register type: ARM_CP_* bits/values */ + int type; + /* Access rights: PL*_[RW] */ + int access; + /* Security state: ARM_CP_SECSTATE_* bits/values */ + int secure; + /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when + * this register was defined: can be used to hand data through to the + * register read/write functions, since they are passed the ARMCPRegInfo*. + */ + void *opaque; + /* Value of this register, if it is ARM_CP_CONST. Otherwise, if + * fieldoffset is non-zero, the reset value of the register. + */ + uint64_t resetvalue; + /* Offset of the field in CPUARMState for this register. + * + * This is not needed if either: + * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs + * 2. both readfn and writefn are specified + */ + ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ + + /* Offsets of the secure and non-secure fields in CPUARMState for the + * register if it is banked. These fields are only used during the static + * registration of a register. During hashing the bank associated + * with a given security state is copied to fieldoffset which is used from + * there on out. + * + * It is expected that register definitions use either fieldoffset or + * bank_fieldoffsets in the definition but not both. It is also expected + * that both bank offsets are set when defining a banked register. This + * use indicates that a register is banked. + */ + ptrdiff_t bank_fieldoffsets[2]; + + /* Function for making any access checks for this register in addition to + * those specified by the 'access' permissions bits. If NULL, no extra + * checks required. The access check is performed at runtime, not at + * translate time. + */ + CPAccessFn *accessfn; + /* Function for handling reads of this register. If NULL, then reads + * will be done by loading from the offset into CPUARMState specified + * by fieldoffset. + */ + CPReadFn *readfn; + /* Function for handling writes of this register. If NULL, then writes + * will be done by writing to the offset into CPUARMState specified + * by fieldoffset. + */ + CPWriteFn *writefn; + /* Function for doing a "raw" read; used when we need to copy + * coprocessor state to the kernel for KVM or out for + * migration. This only needs to be provided if there is also a + * readfn and it has side effects (for instance clear-on-read bits). + */ + CPReadFn *raw_readfn; + /* Function for doing a "raw" write; used when we need to copy KVM + * kernel coprocessor state into userspace, or for inbound + * migration. This only needs to be provided if there is also a + * writefn and it masks out "unwritable" bits or has write-one-to-clear + * or similar behaviour. + */ + CPWriteFn *raw_writefn; + /* Function for resetting the register. If NULL, then reset will be done + * by writing resetvalue to the field specified in fieldoffset. If + * fieldoffset is 0 then no reset will be done. + */ + CPResetFn *resetfn; + + /* + * "Original" writefn and readfn. + * For ARMv8.1-VHE register aliases, we overwrite the read/write + * accessor functions of various EL1/EL0 to perform the runtime + * check for which sysreg should actually be modified, and then + * forwards the operation. Before overwriting the accessors, + * the original function is copied here, so that accesses that + * really do go to the EL1/EL0 version proceed normally. + * (The corresponding EL2 register is linked via opaque.) + */ + CPReadFn *orig_readfn; + CPWriteFn *orig_writefn; +}; + +/* Macros which are lvalues for the field in CPUARMState for the + * ARMCPRegInfo *ri. + */ +#define CPREG_FIELD32(env, ri) \ + (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) +#define CPREG_FIELD64(env, ri) \ + (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) + +#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL } + +void define_arm_cp_regs_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque); +void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque); +static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) +{ + define_arm_cp_regs_with_opaque(cpu, regs, 0); +} +static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) +{ + define_one_arm_cp_reg_with_opaque(cpu, regs, 0); +} +const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); + +/* + * Definition of an ARM co-processor register as viewed from + * userspace. This is used for presenting sanitised versions of + * registers to userspace when emulating the Linux AArch64 CPU + * ID/feature ABI (advertised as HWCAP_CPUID). + */ +typedef struct ARMCPRegUserSpaceInfo { + /* Name of register */ + const char *name; + + /* Is the name actually a glob pattern */ + bool is_glob; + + /* Only some bits are exported to user space */ + uint64_t exported_bits; + + /* Fixed bits are applied after the mask */ + uint64_t fixed_bits; +} ARMCPRegUserSpaceInfo; + +#define REGUSERINFO_SENTINEL { .name = NULL } + +void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods); + +/* CPWriteFn that can be used to implement writes-ignored behaviour */ +void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value); +/* CPReadFn that can be used for read-as-zero behaviour */ +uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); + +/* CPResetFn that does nothing, for use if no reset is required even + * if fieldoffset is non zero. + */ +void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); + +/* Return true if this reginfo struct's field in the cpu state struct + * is 64 bits wide. + */ +static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) +{ + return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); +} + +static inline bool cp_access_ok(int current_el, + const ARMCPRegInfo *ri, int isread) +{ + return (ri->access >> ((current_el * 2) + isread)) & 1; +} + +/* Raw read of a coprocessor register (as needed for migration, etc) */ +uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri); + +/** + * write_list_to_cpustate + * @cpu: ARMCPU + * + * For each register listed in the ARMCPU cpreg_indexes list, write + * its value from the cpreg_values list into the ARMCPUState structure. + * This updates TCG's working data structures from KVM data or + * from incoming migration state. + * + * Returns: true if all register values were updated correctly, + * false if some register was unknown or could not be written. + * Note that we do not stop early on failure -- we will attempt + * writing all registers in the list. + */ +bool write_list_to_cpustate(ARMCPU *cpu); + +/** + * write_cpustate_to_list: + * @cpu: ARMCPU + * @kvm_sync: true if this is for syncing back to KVM + * + * For each register listed in the ARMCPU cpreg_indexes list, write + * its value from the ARMCPUState structure into the cpreg_values list. + * This is used to copy info from TCG's working data structures into + * KVM or for outbound migration. + * + * @kvm_sync is true if we are doing this in order to sync the + * register state back to KVM. In this case we will only update + * values in the list if the previous list->cpustate sync actually + * successfully wrote the CPU state. Otherwise we will keep the value + * that is in the list. + * + * Returns: true if all register values were read correctly, + * false if some register was unknown or could not be read. + * Note that we do not stop early on failure -- we will attempt + * reading all registers in the list. + */ +bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); + +#define ARM_CPUID_TI915T 0x54029152 +#define ARM_CPUID_TI925T 0x54029252 + +#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU +#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) +#define CPU_RESOLVING_TYPE TYPE_ARM_CPU + +#define cpu_signal_handler cpu_arm_signal_handler +#define cpu_list arm_cpu_list + +/* ARM has the following "translation regimes" (as the ARM ARM calls them): + * + * If EL3 is 64-bit: + * + NonSecure EL1 & 0 stage 1 + * + NonSecure EL1 & 0 stage 2 + * + NonSecure EL2 + * + NonSecure EL2 & 0 (ARMv8.1-VHE) + * + Secure EL1 & 0 + * + Secure EL3 + * If EL3 is 32-bit: + * + NonSecure PL1 & 0 stage 1 + * + NonSecure PL1 & 0 stage 2 + * + NonSecure PL2 + * + Secure PL0 + * + Secure PL1 + * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) + * + * For QEMU, an mmu_idx is not quite the same as a translation regime because: + * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes, + * because they may differ in access permissions even if the VA->PA map is + * the same + * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 + * translation, which means that we have one mmu_idx that deals with two + * concatenated translation regimes [this sort of combined s1+2 TLB is + * architecturally permitted] + * 3. we don't need to allocate an mmu_idx to translations that we won't be + * handling via the TLB. The only way to do a stage 1 translation without + * the immediate stage 2 translation is via the ATS or AT system insns, + * which can be slow-pathed and always do a page table walk. + * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" + * translation regimes, because they map reasonably well to each other + * and they can't both be active at the same time. + * 5. we want to be able to use the TLB for accesses done as part of a + * stage1 page table walk, rather than having to walk the stage2 page + * table over and over. + * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access + * Never (PAN) bit within PSTATE. + * + * This gives us the following list of cases: + * + * NS EL0 EL1&0 stage 1+2 (aka NS PL0) + * NS EL1 EL1&0 stage 1+2 (aka NS PL1) + * NS EL1 EL1&0 stage 1+2 +PAN + * NS EL0 EL2&0 + * NS EL2 EL2&0 +PAN + * NS EL2 (aka NS PL2) + * S EL0 EL1&0 (aka S PL0) + * S EL1 EL1&0 (not used if EL3 is 32 bit) + * S EL1 EL1&0 +PAN + * S EL3 (aka S PL1) + * NS EL1&0 stage 2 + * + * for a total of 12 different mmu_idx. + * + * R profile CPUs have an MPU, but can use the same set of MMU indexes + * as A profile. They only need to distinguish NS EL0 and NS EL1 (and + * NS EL2 if we ever model a Cortex-R52). + * + * M profile CPUs are rather different as they do not have a true MMU. + * They have the following different MMU indexes: + * User + * Privileged + * User, execution priority negative (ie the MPU HFNMIENA bit may apply) + * Privileged, execution priority negative (ditto) + * If the CPU supports the v8M Security Extension then there are also: + * Secure User + * Secure Privileged + * Secure User, execution priority negative + * Secure Privileged, execution priority negative + * + * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code + * are not quite the same -- different CPU types (most notably M profile + * vs A/R profile) would like to use MMU indexes with different semantics, + * but since we don't ever need to use all of those in a single CPU we + * can avoid setting NB_MMU_MODES to more than 8. The lower bits of + * ARMMMUIdx are the core TLB mmu index, and the higher bits are always + * the same for any particular CPU. + * Variables of type ARMMUIdx are always full values, and the core + * index values are in variables of type 'int'. + * + * Our enumeration includes at the end some entries which are not "true" + * mmu_idx values in that they don't have corresponding TLBs and are only + * valid for doing slow path page table walks. + * + * The constant names here are patterned after the general style of the names + * of the AT/ATS operations. + * The values used are carefully arranged to make mmu_idx => EL lookup easy. + * For M profile we arrange them to have a bit for priv, a bit for negpri + * and a bit for secure. + */ +#define ARM_MMU_IDX_A 0x10 /* A profile */ +#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ +#define ARM_MMU_IDX_M 0x40 /* M profile */ + +/* Meanings of the bits for M profile mmu idx values */ +#define ARM_MMU_IDX_M_PRIV 0x1 +#define ARM_MMU_IDX_M_NEGPRI 0x2 +#define ARM_MMU_IDX_M_S 0x4 /* Secure */ + +#define ARM_MMU_IDX_TYPE_MASK \ + (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB) +#define ARM_MMU_IDX_COREIDX_MASK 0xf + +typedef enum ARMMMUIdx { + /* + * A-profile. + */ + ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A, + + ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A, + + ARMMMUIdx_E2 = 4 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2 = 5 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2_PAN = 6 | ARM_MMU_IDX_A, + + ARMMMUIdx_SE10_0 = 7 | ARM_MMU_IDX_A, + ARMMMUIdx_SE10_1 = 8 | ARM_MMU_IDX_A, + ARMMMUIdx_SE10_1_PAN = 9 | ARM_MMU_IDX_A, + ARMMMUIdx_SE3 = 10 | ARM_MMU_IDX_A, + + ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A, + + /* + * These are not allocated TLBs and are used only for AT system + * instructions or for the first stage of an S12 page table walk. + */ + ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB, + + /* + * M-profile. + */ + ARMMMUIdx_MUser = ARM_MMU_IDX_M, + ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV, + ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI, + ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI, + ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S, +} ARMMMUIdx; + +/* + * Bit macros for the core-mmu-index values for each index, + * for use when calling tlb_flush_by_mmuidx() and friends. + */ +#define TO_CORE_BIT(NAME) \ + ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK) + +typedef enum ARMMMUIdxBit { + TO_CORE_BIT(E10_0), + TO_CORE_BIT(E20_0), + TO_CORE_BIT(E10_1), + TO_CORE_BIT(E10_1_PAN), + TO_CORE_BIT(E2), + TO_CORE_BIT(E20_2), + TO_CORE_BIT(E20_2_PAN), + TO_CORE_BIT(SE10_0), + TO_CORE_BIT(SE10_1), + TO_CORE_BIT(SE10_1_PAN), + TO_CORE_BIT(SE3), + TO_CORE_BIT(Stage2), + + TO_CORE_BIT(MUser), + TO_CORE_BIT(MPriv), + TO_CORE_BIT(MUserNegPri), + TO_CORE_BIT(MPrivNegPri), + TO_CORE_BIT(MSUser), + TO_CORE_BIT(MSPriv), + TO_CORE_BIT(MSUserNegPri), + TO_CORE_BIT(MSPrivNegPri), +} ARMMMUIdxBit; + +#undef TO_CORE_BIT + +#define MMU_USER_IDX 0 + +/* Indexes used when registering address spaces with cpu_address_space_init */ +typedef enum ARMASIdx { + ARMASIdx_NS = 0, + ARMASIdx_S = 1, +} ARMASIdx; + +/* Return the Exception Level targeted by debug exceptions. */ +static inline int arm_debug_target_el(CPUARMState *env) +{ + bool secure = arm_is_secure(env); + bool route_to_el2 = false; + + if (arm_feature(env, ARM_FEATURE_EL2) && !secure) { + route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || + env->cp15.mdcr_el2 & MDCR_TDE; + } + + if (route_to_el2) { + return 2; + } else if (arm_feature(env, ARM_FEATURE_EL3) && + !arm_el_is_aa64(env, 3) && secure) { + return 3; + } else { + return 1; + } +} + +static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu) +{ + /* If all the CLIDR.Ctypem bits are 0 there are no caches, and + * CSSELR is RAZ/WI. + */ + return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; +} + +/* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ +static inline bool aa64_generate_debug_exceptions(CPUARMState *env) +{ + int cur_el = arm_current_el(env); + int debug_el; + + if (cur_el == 3) { + return false; + } + + /* MDCR_EL3.SDD disables debug events from Secure state */ + if (arm_is_secure_below_el3(env) + && extract32(env->cp15.mdcr_el3, 16, 1)) { + return false; + } + + /* + * Same EL to same EL debug exceptions need MDSCR_KDE enabled + * while not masking the (D)ebug bit in DAIF. + */ + debug_el = arm_debug_target_el(env); + + if (cur_el == debug_el) { + return extract32(env->cp15.mdscr_el1, 13, 1) + && !(env->daif & PSTATE_D); + } + + /* Otherwise the debug target needs to be a higher EL */ + return debug_el > cur_el; +} + +static inline bool aa32_generate_debug_exceptions(CPUARMState *env) +{ + int el = arm_current_el(env); + + if (el == 0 && arm_el_is_aa64(env, 1)) { + return aa64_generate_debug_exceptions(env); + } + + if (arm_is_secure(env)) { + int spd; + + if (el == 0 && (env->cp15.sder & 1)) { + /* SDER.SUIDEN means debug exceptions from Secure EL0 + * are always enabled. Otherwise they are controlled by + * SDCR.SPD like those from other Secure ELs. + */ + return true; + } + + spd = extract32(env->cp15.mdcr_el3, 14, 2); + switch (spd) { + case 1: + /* SPD == 0b01 is reserved, but behaves as 0b00. */ + case 0: + /* For 0b00 we return true if external secure invasive debug + * is enabled. On real hardware this is controlled by external + * signals to the core. QEMU always permits debug, and behaves + * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. + */ + return true; + case 2: + return false; + case 3: + return true; + } + } + + return el != 2; +} + +/* Return true if debugging exceptions are currently enabled. + * This corresponds to what in ARM ARM pseudocode would be + * if UsingAArch32() then + * return AArch32.GenerateDebugExceptions() + * else + * return AArch64.GenerateDebugExceptions() + * We choose to push the if() down into this function for clarity, + * since the pseudocode has it at all callsites except for the one in + * CheckSoftwareStep(), where it is elided because both branches would + * always return the same value. + */ +static inline bool arm_generate_debug_exceptions(CPUARMState *env) +{ + if (env->aarch64) { + return aa64_generate_debug_exceptions(env); + } else { + return aa32_generate_debug_exceptions(env); + } +} + +/* Is single-stepping active? (Note that the "is EL_D AArch64?" check + * implicitly means this always returns false in pre-v8 CPUs.) + */ +static inline bool arm_singlestep_active(CPUARMState *env) +{ + return extract32(env->cp15.mdscr_el1, 0, 1) + && arm_el_is_aa64(env, arm_debug_target_el(env)) + && arm_generate_debug_exceptions(env); +} + +static inline bool arm_sctlr_b(CPUARMState *env) +{ + return + /* We need not implement SCTLR.ITD in user-mode emulation, so + * let linux-user ignore the fact that it conflicts with SCTLR_B. + * This lets people run BE32 binaries with "-cpu any". + */ + !arm_feature(env, ARM_FEATURE_V7) && + (env->cp15.sctlr_el[1] & SCTLR_B) != 0; +} + +uint64_t arm_sctlr(CPUARMState *env, int el); + +static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, + bool sctlr_b) +{ + /* In 32bit endianness is determined by looking at CPSR's E bit */ + return env->uncached_cpsr & CPSR_E; +} + +static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) +{ + return sctlr & (el ? SCTLR_EE : SCTLR_E0E); +} + +/* Return true if the processor is in big-endian mode. */ +static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) +{ + if (!is_a64(env)) { + return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); + } else { + int cur_el = arm_current_el(env); + uint64_t sctlr = arm_sctlr(env, cur_el); + return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); + } +} + +typedef CPUARMState CPUArchState; +typedef ARMCPU ArchCPU; + +#include "exec/cpu-all.h" + +/* + * Bit usage in the TB flags field: bit 31 indicates whether we are + * in 32 or 64 bit mode. The meaning of the other bits depends on that. + * We put flags which are shared between 32 and 64 bit mode at the top + * of the word, and flags which apply to only one mode at the bottom. + * + * 31 20 18 14 9 0 + * +--------------+-----+-----+----------+--------------+ + * | | | TBFLAG_A32 | | + * | | +-----+----------+ TBFLAG_AM32 | + * | TBFLAG_ANY | |TBFLAG_M32| | + * | | +-+----------+--------------| + * | | | TBFLAG_A64 | + * +--------------+---------+---------------------------+ + * 31 20 15 0 + * + * Unless otherwise noted, these bits are cached in env->hflags. + */ +FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1) +FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1) +FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */ +FIELD(TBFLAG_ANY, BE_DATA, 28, 1) +FIELD(TBFLAG_ANY, MMUIDX, 24, 4) +/* Target EL if we take a floating-point-disabled exception */ +FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2) +/* For A-profile only, target EL for debug exceptions. */ +FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2) + +/* + * Bit usage when in AArch32 state, both A- and M-profile. + */ +FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */ +FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */ + +/* + * Bit usage when in AArch32 state, for A-profile only. + */ +FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */ +FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */ +/* + * We store the bottom two bits of the CPAR as TB flags and handle + * checks on the other bits at runtime. This shares the same bits as + * VECSTRIDE, which is OK as no XScale CPU has VFP. + * Not cached, because VECLEN+VECSTRIDE are not cached. + */ +FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2) +FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */ +FIELD(TBFLAG_A32, SCTLR_B, 15, 1) +FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1) +/* + * Indicates whether cp register reads and writes by guest code should access + * the secure or nonsecure bank of banked registers; note that this is not + * the same thing as the current security state of the processor! + */ +FIELD(TBFLAG_A32, NS, 17, 1) + +/* + * Bit usage when in AArch32 state, for M-profile only. + */ +/* Handler (ie not Thread) mode */ +FIELD(TBFLAG_M32, HANDLER, 9, 1) +/* Whether we should generate stack-limit checks */ +FIELD(TBFLAG_M32, STACKCHECK, 10, 1) +/* Set if FPCCR.LSPACT is set */ +FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */ +/* Set if we must create a new FP context */ +FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */ +/* Set if FPCCR.S does not match current security state */ +FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */ + +/* + * Bit usage when in AArch64 state + */ +FIELD(TBFLAG_A64, TBII, 0, 2) +FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2) +FIELD(TBFLAG_A64, ZCR_LEN, 4, 4) +FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1) +FIELD(TBFLAG_A64, BT, 9, 1) +FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */ +FIELD(TBFLAG_A64, TBID, 12, 2) +FIELD(TBFLAG_A64, UNPRIV, 14, 1) + +/** + * cpu_mmu_index: + * @env: The cpu environment + * @ifetch: True for code access, false for data access. + * + * Return the core mmu index for the current translation regime. + * This function is used by generic TCG code paths. + */ +static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) +{ + return FIELD_EX32(env->hflags, TBFLAG_ANY, MMUIDX); +} + +static inline bool bswap_code(bool sctlr_b) +{ + /* All code access in ARM is little endian, and there are no loaders + * doing swaps that need to be reversed + */ + return 0; +} + +void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags); + +enum { + QEMU_PSCI_CONDUIT_DISABLED = 0, + QEMU_PSCI_CONDUIT_SMC = 1, + QEMU_PSCI_CONDUIT_HVC = 2, +}; + +/* Return the address space index to use for a memory access */ +static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) +{ + return attrs.secure ? ARMASIdx_S : ARMASIdx_NS; +} + +/* Return the AddressSpace to use for a memory access + * (which depends on whether the access is S or NS, and whether + * the board gave us a separate AddressSpace for S accesses). + */ +static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs) +{ + return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs)); +} + +/** + * arm_rebuild_hflags: + * Rebuild the cached TBFLAGS for arbitrary changed processor state. + */ +void arm_rebuild_hflags(CPUARMState *env); + +/** + * aa32_vfp_dreg: + * Return a pointer to the Dn register within env in 32-bit mode. + */ +static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno) +{ + return &env->vfp.zregs[regno >> 1].d[regno & 1]; +} + +/** + * aa32_vfp_qreg: + * Return a pointer to the Qn register within env in 32-bit mode. + */ +static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno) +{ + return &env->vfp.zregs[regno].d[0]; +} + +/** + * aa64_vfp_qreg: + * Return a pointer to the Qn register within env in 64-bit mode. + */ +static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) +{ + return &env->vfp.zregs[regno].d[0]; +} + +/* Shared between translate-sve.c and sve_helper.c. */ +extern const uint64_t pred_esz_masks[4]; + +/* + * Naming convention for isar_feature functions: + * Functions which test 32-bit ID registers should have _aa32_ in + * their name. Functions which test 64-bit ID registers should have + * _aa64_ in their name. These must only be used in code where we + * know for certain that the CPU has AArch32 or AArch64 respectively + * or where the correct answer for a CPU which doesn't implement that + * CPU state is "false" (eg when generating A32 or A64 code, if adding + * system registers that are specific to that CPU state, for "should + * we let this system register bit be set" tests where the 32-bit + * flavour of the register doesn't have the bit, and so on). + * Functions which simply ask "does this feature exist at all" have + * _any_ in their name, and always return the logical OR of the _aa64_ + * and the _aa32_ function. + */ + +/* + * 32-bit feature tests via id registers. + */ +static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; +} + +static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; +} + +static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; +} + +static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; +} + +static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; +} + +static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; +} + +static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; +} + +static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; +} + +static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; +} + +static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; +} + +static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0; +} + +static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; +} + +static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0; +} + +static inline bool isar_feature_aa32_sb(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0; +} + +static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0; +} + +static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) +{ + /* + * This is a placeholder for use by VCMA until the rest of + * the ARMv8.2-FP16 extension is implemented for aa32 mode. + * At which point we can properly set and check MVFR1.FPHP. + */ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; +} + +static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id) +{ + /* + * Return true if either VFP or SIMD is implemented. + * In this case, a minimum of VFP w/ D0-D15. + */ + return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0; +} + +static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id) +{ + /* Return true if D16-D31 are implemented */ + return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2; +} + +static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0; +} + +static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id) +{ + /* Return true if CPU supports single precision floating point, VFPv2 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0; +} + +static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id) +{ + /* Return true if CPU supports single precision floating point, VFPv3 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2; +} + +static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id) +{ + /* Return true if CPU supports double precision floating point, VFPv2 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0; +} + +static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id) +{ + /* Return true if CPU supports double precision floating point, VFPv3 */ + return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2; +} + +static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id) +{ + return isar_feature_aa32_fpsp_v2(id) || isar_feature_aa32_fpdp_v2(id); +} + +/* + * We always set the FP and SIMD FP16 fields to indicate identical + * levels of support (assuming SIMD is implemented at all), so + * we only need one set of accessors. + */ +static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0; +} + +static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1; +} + +/* + * Note that this ID register field covers both VFP and Neon FMAC, + * so should usually be tested in combination with some other + * check that confirms the presence of whichever of VFP or Neon is + * relevant, to avoid accidentally enabling a Neon feature on + * a VFP-no-Neon core or vice-versa. + */ +static inline bool isar_feature_aa32_simdfmac(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr1, MVFR1, SIMDFMAC) != 0; +} + +static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1; +} + +static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2; +} + +static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3; +} + +static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4; +} + +static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0; +} + +static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; +} + +static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + +static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + +static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; +} + +static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0; +} + +static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0; +} + +/* + * 64-bit feature tests via id registers. + */ +static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; +} + +static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; +} + +static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; +} + +static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; +} + +static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; +} + +static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; +} + +static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; +} + +static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; +} + +static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; +} + +static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; +} + +static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; +} + +static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; +} + +static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0; +} + +static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0; +} + +static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2; +} + +static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0; +} + +static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0; +} + +static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; +} + +static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) +{ + uint64_t apa, api, gpa, gpi; + + /* + * Note that while QEMU will only implement the architected algorithm + * QARMA, and thus APA+GPA, the host cpu for kvm may use implementation + * defined algorithms, and thus API+GPI, and this predicate controls + * migration of the 128-bit keys. + */ + FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf, apa) + FIELD_DP64(0, ID_AA64ISAR1, API, 0xf, api) + FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf, gpa) + FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf, gpi) + + return (id->id_aa64isar1 & (apa | api | gpa | gpi)) != 0; +} + +static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; +} + +static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0; +} + +static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; +} + +static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; +} + +static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; +} + +static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) +{ + /* We always set the AdvSIMD and FP fields identically. */ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf; +} + +static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) +{ + /* We always set the AdvSIMD and FP fields identically wrt FP16. */ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; +} + +static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; +} + +static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; +} + +static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; +} + +static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; +} + +static inline bool isar_feature_aa64_pan(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0; +} + +static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; +} + +static inline bool isar_feature_aa64_uao(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0; +} + +static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; +} + +static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + +static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + +static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; +} + +static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2; +} + +static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0; +} + +/* + * Feature tests for "does this exist in either 32-bit or 64-bit?" + */ +static inline bool isar_feature_any_fp16(const ARMISARegisters *id) +{ + return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id); +} + +static inline bool isar_feature_any_predinv(const ARMISARegisters *id) +{ + return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id); +} + +static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id); +} + +static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id); +} + +static inline bool isar_feature_any_ccidx(const ARMISARegisters *id) +{ + return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id); +} + +/* + * Forward to the above feature tests given an ARMCPU pointer. + */ +#define cpu_isar_feature(name, cpu) isar_feature_##name(&cpu->isar) + +#endif diff --git a/qemu/target/arm/cpu64.c b/qemu/target/arm/cpu64.c new file mode 100644 index 00000000..52933554 --- /dev/null +++ b/qemu/target/arm/cpu64.c @@ -0,0 +1,388 @@ +/* + * QEMU AArch64 CPU + * + * Copyright (c) 2013 Linaro Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include + +void arm_cpu_realizefn(struct uc_struct *uc, CPUState *dev); +void arm_cpu_class_init(struct uc_struct *uc, CPUClass *oc); +void arm_cpu_post_init(CPUState *obj); +void arm_cpu_initfn(struct uc_struct *uc, CPUState *obj); +ARMCPU *cpu_arm_init(struct uc_struct *uc); + + +static inline void set_feature(CPUARMState *env, int feature) +{ + env->features |= 1ULL << feature; +} + +static void aarch64_a57_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x411fd070; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034070; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->id_pfr0 = 0x00000131; + cpu->id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_isar6 = 0; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64mmfr0 = 0x00001124; + cpu->isar.dbgdidr = 0x3516d000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; +} + +static void aarch64_a53_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x410fd034; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034070; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x84448004; /* L1Ip = VIPT */ + cpu->reset_sctlr = 0x00c50838; + cpu->id_pfr0 = 0x00000131; + cpu->id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_isar6 = 0; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ + cpu->isar.dbgdidr = 0x3516d000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ + cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; +} + +static void aarch64_a72_initfn(struct uc_struct *uc, CPUState *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x410fd083; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034080; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->id_pfr0 = 0x00000131; + cpu->id_pfr1 = 0x00011011; + cpu->isar.id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_aa64pfr0 = 0x00002222; + cpu->isar.id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64mmfr0 = 0x00001124; + cpu->isar.dbgdidr = 0x3516d000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; +} + +/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host); + * otherwise, a CPU with as many features enabled as our emulation supports. + * The version of '-cpu max' for qemu-system-arm is defined in cpu.c; + * this only needs to handle 64 bits. + */ +static void aarch64_max_initfn(struct uc_struct *uc, CPUState *obj) +{ + + uint64_t t; + uint32_t u; + ARMCPU *cpu = ARM_CPU(obj); + + aarch64_a57_initfn(uc, obj); + + /* + * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real + * one and try to apply errata workarounds or use impdef features we + * don't provide. + * An IMPLEMENTER field of 0 means "reserved for software use"; + * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers + * to see which features are present"; + * the VARIANT, PARTNUM and REVISION fields are all implementation + * defined and we choose to define PARTNUM just in case guest + * code needs to distinguish this QEMU CPU from other software + * implementations, though this shouldn't be needed. + */ + FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0, t); + FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf ,t); + FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q', t); + FIELD_DP64(t, MIDR_EL1, VARIANT, 0, t); + FIELD_DP64(t, MIDR_EL1, REVISION, 0, t); + cpu->midr = t; + + t = cpu->isar.id_aa64isar0; + FIELD_DP64(t, ID_AA64ISAR0, AES, 2, t); /* AES + PMULL */ + FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2, t); /* SHA512 */ + FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2, t); + FIELD_DP64(t, ID_AA64ISAR0, RDM, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, SM3, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, SM4, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, DP, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, FHM, 1, t); + FIELD_DP64(t, ID_AA64ISAR0, TS, 2, t); /* v8.5-CondM */ + FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1, t); + cpu->isar.id_aa64isar0 = t; + + t = cpu->isar.id_aa64isar1; + FIELD_DP64(t, ID_AA64ISAR1, DPB, 2, t); + FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1, t); + FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1, t); + FIELD_DP64(t, ID_AA64ISAR1, APA, 1, t); /* PAuth, architected only */ + FIELD_DP64(t, ID_AA64ISAR1, API, 0, t); + FIELD_DP64(t, ID_AA64ISAR1, GPA, 1, t); + FIELD_DP64(t, ID_AA64ISAR1, GPI, 0, t); + FIELD_DP64(t, ID_AA64ISAR1, SB, 1, t); + FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1, t); + FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1, t); + FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2, t); /* ARMv8.4-RCPC */ + cpu->isar.id_aa64isar1 = t; + + t = cpu->isar.id_aa64pfr0; + FIELD_DP64(t, ID_AA64PFR0, SVE, 1, t); + FIELD_DP64(t, ID_AA64PFR0, FP, 1, t); + FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1, t); + cpu->isar.id_aa64pfr0 = t; + + t = cpu->isar.id_aa64pfr1; + FIELD_DP64(t, ID_AA64PFR1, BT, 1, t); + cpu->isar.id_aa64pfr1 = t; + + t = cpu->isar.id_aa64mmfr1; + FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1, t); /* HPD */ + FIELD_DP64(t, ID_AA64MMFR1, LO, 1, t); + FIELD_DP64(t, ID_AA64MMFR1, VH, 1, t); + FIELD_DP64(t, ID_AA64MMFR1, PAN, 2, t); /* ATS1E1 */ + FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2, t); /* VMID16 */ + cpu->isar.id_aa64mmfr1 = t; + + t = cpu->isar.id_aa64mmfr2; + FIELD_DP64(t, ID_AA64MMFR2, UAO, 1, t); + FIELD_DP64(t, ID_AA64MMFR2, CNP, 1, t); /* TTCNP */ + cpu->isar.id_aa64mmfr2 = t; + + /* Replicate the same data to the 32-bit id registers. */ + u = cpu->isar.id_isar5; + FIELD_DP32(u, ID_ISAR5, AES, 2, u); /* AES + PMULL */ + FIELD_DP32(u, ID_ISAR5, SHA1, 1, u); + FIELD_DP32(u, ID_ISAR5, SHA2, 1, u); + FIELD_DP32(u, ID_ISAR5, CRC32, 1, u); + FIELD_DP32(u, ID_ISAR5, RDM, 1, u); + FIELD_DP32(u, ID_ISAR5, VCMA, 1, u); + cpu->isar.id_isar5 = u; + + u = cpu->isar.id_isar6; + FIELD_DP32(u, ID_ISAR6, JSCVT, 1, u); + FIELD_DP32(u, ID_ISAR6, DP, 1, u); + FIELD_DP32(u, ID_ISAR6, FHM, 1, u); + FIELD_DP32(u, ID_ISAR6, SB, 1, u); + FIELD_DP32(u, ID_ISAR6, SPECRES, 1, u); + cpu->isar.id_isar6 = u; + + u = cpu->isar.id_mmfr3; + FIELD_DP32(u, ID_MMFR3, PAN, 2, u); /* ATS1E1 */ + cpu->isar.id_mmfr3 = u; + + u = cpu->isar.id_mmfr4; + FIELD_DP32(u, ID_MMFR4, HPDS, 1, u); /* AA32HPD */ + FIELD_DP32(u, ID_MMFR4, AC2, 1, u); /* ACTLR2, HACTLR2 */ + FIELD_DP32(u, ID_MMFR4, CNP, 1, u); /* TTCNP */ + cpu->isar.id_mmfr4 = u; + + u = cpu->isar.id_aa64dfr0; + FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5, u); /* v8.4-PMU */ + cpu->isar.id_aa64dfr0 = u; + + u = cpu->isar.id_dfr0; + FIELD_DP32(u, ID_DFR0, PERFMON, 5, u); /* v8.4-PMU */ + cpu->isar.id_dfr0 = u; +} + +struct ARMCPUInfo { + const char *name; + void (*initfn)(struct uc_struct *uc, CPUState *obj); +}; + +static const ARMCPUInfo aarch64_cpus[] = { + { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, + { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, + { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, + { .name = "max", .initfn = aarch64_max_initfn }, +}; + +ARMCPU *cpu_aarch64_init(struct uc_struct *uc) +{ + int i; + char *cpu_model = "cortex-a72"; + ARMCPU *cpu; + CPUState *cs; + CPUClass *cc; + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = (CPUState *)cpu; + + /* init CPUClass */ + cpu_class_init(uc, cc); + + /* init ARMCPUClass */ + arm_cpu_class_init(uc, cc); + + /* init CPUState */ + cpu_common_initfn(uc, cs); + + /* init ARMCPU */ + arm_cpu_initfn(uc, cs); + + for (i = 0; i < ARRAY_SIZE(aarch64_cpus); i++) { + if (strcmp(cpu_model, aarch64_cpus[i].name) == 0) { + if (aarch64_cpus[i].initfn) { + aarch64_cpus[i].initfn(uc, cs); + } + break; + } + } + if (i == ARRAY_SIZE(aarch64_cpus)) { + free(cpu); + return NULL; + } + + /* postinit ARMCPU */ + arm_cpu_post_init(cs); + + /* + * Unicorn: Hack to force to enable EL2/EL3 for aarch64 so that we can + * use the full 64bits virtual address space. + * + * While EL2/EL3 is enabled but running within EL1, we could + * get somewhat like "x86 flat mode", though aarch64 only allows + * a maximum of 52bits virtual address space. + */ + ARM_CPU(cs)->has_el2 = true; + ARM_CPU(cs)->has_el3 = true; + + /* realize ARMCPU */ + arm_cpu_realizefn(uc, cs); + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target/arm/crypto_helper.c b/qemu/target/arm/crypto_helper.c new file mode 100644 index 00000000..117be6f8 --- /dev/null +++ b/qemu/target/arm/crypto_helper.c @@ -0,0 +1,695 @@ +/* + * crypto_helper.c - emulate v8 Crypto Extensions instructions + * + * Copyright (C) 2013 - 2018 Linaro Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "crypto/aes.h" + +union CRYPTO_STATE { + uint8_t bytes[16]; + uint32_t words[4]; + uint64_t l[2]; +}; + +#ifdef HOST_WORDS_BIGENDIAN +#define CR_ST_BYTE(state, i) (state.bytes[(15 - (i)) ^ 8]) +#define CR_ST_WORD(state, i) (state.words[(3 - (i)) ^ 2]) +#else +#define CR_ST_BYTE(state, i) (state.bytes[i]) +#define CR_ST_WORD(state, i) (state.words[i]) +#endif + +void HELPER(crypto_aese)(void *vd, void *vm, uint32_t decrypt) +{ + static uint8_t const * const sbox[2] = { AES_sbox, AES_isbox }; + static uint8_t const * const shift[2] = { AES_shifts, AES_ishifts }; + uint64_t *rd = vd; + uint64_t *rm = vm; + union CRYPTO_STATE rk = { .l = { rm[0], rm[1] } }; + union CRYPTO_STATE st = { .l = { rd[0], rd[1] } }; + int i; + + assert(decrypt < 2); + + /* xor state vector with round key */ + rk.l[0] ^= st.l[0]; + rk.l[1] ^= st.l[1]; + + /* combine ShiftRows operation and sbox substitution */ + for (i = 0; i < 16; i++) { + CR_ST_BYTE(st, i) = sbox[decrypt][CR_ST_BYTE(rk, shift[decrypt][i])]; + } + + rd[0] = st.l[0]; + rd[1] = st.l[1]; +} + +void HELPER(crypto_aesmc)(void *vd, void *vm, uint32_t decrypt) +{ + static uint32_t const mc[][256] = { { + /* MixColumns lookup table */ + 0x00000000, 0x03010102, 0x06020204, 0x05030306, + 0x0c040408, 0x0f05050a, 0x0a06060c, 0x0907070e, + 0x18080810, 0x1b090912, 0x1e0a0a14, 0x1d0b0b16, + 0x140c0c18, 0x170d0d1a, 0x120e0e1c, 0x110f0f1e, + 0x30101020, 0x33111122, 0x36121224, 0x35131326, + 0x3c141428, 0x3f15152a, 0x3a16162c, 0x3917172e, + 0x28181830, 0x2b191932, 0x2e1a1a34, 0x2d1b1b36, + 0x241c1c38, 0x271d1d3a, 0x221e1e3c, 0x211f1f3e, + 0x60202040, 0x63212142, 0x66222244, 0x65232346, + 0x6c242448, 0x6f25254a, 0x6a26264c, 0x6927274e, + 0x78282850, 0x7b292952, 0x7e2a2a54, 0x7d2b2b56, + 0x742c2c58, 0x772d2d5a, 0x722e2e5c, 0x712f2f5e, + 0x50303060, 0x53313162, 0x56323264, 0x55333366, + 0x5c343468, 0x5f35356a, 0x5a36366c, 0x5937376e, + 0x48383870, 0x4b393972, 0x4e3a3a74, 0x4d3b3b76, + 0x443c3c78, 0x473d3d7a, 0x423e3e7c, 0x413f3f7e, + 0xc0404080, 0xc3414182, 0xc6424284, 0xc5434386, + 0xcc444488, 0xcf45458a, 0xca46468c, 0xc947478e, + 0xd8484890, 0xdb494992, 0xde4a4a94, 0xdd4b4b96, + 0xd44c4c98, 0xd74d4d9a, 0xd24e4e9c, 0xd14f4f9e, + 0xf05050a0, 0xf35151a2, 0xf65252a4, 0xf55353a6, + 0xfc5454a8, 0xff5555aa, 0xfa5656ac, 0xf95757ae, + 0xe85858b0, 0xeb5959b2, 0xee5a5ab4, 0xed5b5bb6, + 0xe45c5cb8, 0xe75d5dba, 0xe25e5ebc, 0xe15f5fbe, + 0xa06060c0, 0xa36161c2, 0xa66262c4, 0xa56363c6, + 0xac6464c8, 0xaf6565ca, 0xaa6666cc, 0xa96767ce, + 0xb86868d0, 0xbb6969d2, 0xbe6a6ad4, 0xbd6b6bd6, + 0xb46c6cd8, 0xb76d6dda, 0xb26e6edc, 0xb16f6fde, + 0x907070e0, 0x937171e2, 0x967272e4, 0x957373e6, + 0x9c7474e8, 0x9f7575ea, 0x9a7676ec, 0x997777ee, + 0x887878f0, 0x8b7979f2, 0x8e7a7af4, 0x8d7b7bf6, + 0x847c7cf8, 0x877d7dfa, 0x827e7efc, 0x817f7ffe, + 0x9b80801b, 0x98818119, 0x9d82821f, 0x9e83831d, + 0x97848413, 0x94858511, 0x91868617, 0x92878715, + 0x8388880b, 0x80898909, 0x858a8a0f, 0x868b8b0d, + 0x8f8c8c03, 0x8c8d8d01, 0x898e8e07, 0x8a8f8f05, + 0xab90903b, 0xa8919139, 0xad92923f, 0xae93933d, + 0xa7949433, 0xa4959531, 0xa1969637, 0xa2979735, + 0xb398982b, 0xb0999929, 0xb59a9a2f, 0xb69b9b2d, + 0xbf9c9c23, 0xbc9d9d21, 0xb99e9e27, 0xba9f9f25, + 0xfba0a05b, 0xf8a1a159, 0xfda2a25f, 0xfea3a35d, + 0xf7a4a453, 0xf4a5a551, 0xf1a6a657, 0xf2a7a755, + 0xe3a8a84b, 0xe0a9a949, 0xe5aaaa4f, 0xe6abab4d, + 0xefacac43, 0xecadad41, 0xe9aeae47, 0xeaafaf45, + 0xcbb0b07b, 0xc8b1b179, 0xcdb2b27f, 0xceb3b37d, + 0xc7b4b473, 0xc4b5b571, 0xc1b6b677, 0xc2b7b775, + 0xd3b8b86b, 0xd0b9b969, 0xd5baba6f, 0xd6bbbb6d, + 0xdfbcbc63, 0xdcbdbd61, 0xd9bebe67, 0xdabfbf65, + 0x5bc0c09b, 0x58c1c199, 0x5dc2c29f, 0x5ec3c39d, + 0x57c4c493, 0x54c5c591, 0x51c6c697, 0x52c7c795, + 0x43c8c88b, 0x40c9c989, 0x45caca8f, 0x46cbcb8d, + 0x4fcccc83, 0x4ccdcd81, 0x49cece87, 0x4acfcf85, + 0x6bd0d0bb, 0x68d1d1b9, 0x6dd2d2bf, 0x6ed3d3bd, + 0x67d4d4b3, 0x64d5d5b1, 0x61d6d6b7, 0x62d7d7b5, + 0x73d8d8ab, 0x70d9d9a9, 0x75dadaaf, 0x76dbdbad, + 0x7fdcdca3, 0x7cdddda1, 0x79dedea7, 0x7adfdfa5, + 0x3be0e0db, 0x38e1e1d9, 0x3de2e2df, 0x3ee3e3dd, + 0x37e4e4d3, 0x34e5e5d1, 0x31e6e6d7, 0x32e7e7d5, + 0x23e8e8cb, 0x20e9e9c9, 0x25eaeacf, 0x26ebebcd, + 0x2fececc3, 0x2cededc1, 0x29eeeec7, 0x2aefefc5, + 0x0bf0f0fb, 0x08f1f1f9, 0x0df2f2ff, 0x0ef3f3fd, + 0x07f4f4f3, 0x04f5f5f1, 0x01f6f6f7, 0x02f7f7f5, + 0x13f8f8eb, 0x10f9f9e9, 0x15fafaef, 0x16fbfbed, + 0x1ffcfce3, 0x1cfdfde1, 0x19fefee7, 0x1affffe5, + }, { + /* Inverse MixColumns lookup table */ + 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12, + 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a, + 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362, + 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a, + 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2, + 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca, + 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382, + 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba, + 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9, + 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1, + 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9, + 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81, + 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029, + 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411, + 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859, + 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61, + 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf, + 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987, + 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf, + 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7, + 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f, + 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967, + 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f, + 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117, + 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664, + 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c, + 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14, + 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c, + 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684, + 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc, + 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4, + 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc, + 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753, + 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b, + 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23, + 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b, + 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3, + 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b, + 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3, + 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb, + 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88, + 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0, + 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8, + 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0, + 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68, + 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850, + 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418, + 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020, + 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe, + 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6, + 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e, + 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6, + 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e, + 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526, + 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e, + 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56, + 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25, + 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d, + 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255, + 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d, + 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5, + 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd, + 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5, + 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d, + } }; + + uint64_t *rd = vd; + uint64_t *rm = vm; + union CRYPTO_STATE st = { .l = { rm[0], rm[1] } }; + int i; + + assert(decrypt < 2); + + for (i = 0; i < 16; i += 4) { + CR_ST_WORD(st, i >> 2) = + mc[decrypt][CR_ST_BYTE(st, i)] ^ + rol32(mc[decrypt][CR_ST_BYTE(st, i + 1)], 8) ^ + rol32(mc[decrypt][CR_ST_BYTE(st, i + 2)], 16) ^ + rol32(mc[decrypt][CR_ST_BYTE(st, i + 3)], 24); + } + + rd[0] = st.l[0]; + rd[1] = st.l[1]; +} + +/* + * SHA-1 logical functions + */ + +static uint32_t cho(uint32_t x, uint32_t y, uint32_t z) +{ + return (x & (y ^ z)) ^ z; +} + +static uint32_t par(uint32_t x, uint32_t y, uint32_t z) +{ + return x ^ y ^ z; +} + +static uint32_t maj(uint32_t x, uint32_t y, uint32_t z) +{ + return (x & y) | ((x | y) & z); +} + +void HELPER(crypto_sha1_3reg)(void *vd, void *vn, void *vm, uint32_t op) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + + if (op == 3) { /* sha1su0 */ + d.l[0] ^= d.l[1] ^ m.l[0]; + d.l[1] ^= n.l[0] ^ m.l[1]; + } else { + int i; + + for (i = 0; i < 4; i++) { + uint32_t t = 0; + + switch (op) { + case 0: /* sha1c */ + t = cho(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3)); + break; + case 1: /* sha1p */ + t = par(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3)); + break; + case 2: /* sha1m */ + t = maj(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3)); + break; + default: + g_assert_not_reached(); + } + t += rol32(CR_ST_WORD(d, 0), 5) + CR_ST_WORD(n, 0) + + CR_ST_WORD(m, i); + + CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3); + CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2); + CR_ST_WORD(d, 2) = ror32(CR_ST_WORD(d, 1), 2); + CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0); + CR_ST_WORD(d, 0) = t; + } + } + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +void HELPER(crypto_sha1h)(void *vd, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rm = vm; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + + CR_ST_WORD(m, 0) = ror32(CR_ST_WORD(m, 0), 2); + CR_ST_WORD(m, 1) = CR_ST_WORD(m, 2) = CR_ST_WORD(m, 3) = 0; + + rd[0] = m.l[0]; + rd[1] = m.l[1]; +} + +void HELPER(crypto_sha1su1)(void *vd, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + + CR_ST_WORD(d, 0) = rol32(CR_ST_WORD(d, 0) ^ CR_ST_WORD(m, 1), 1); + CR_ST_WORD(d, 1) = rol32(CR_ST_WORD(d, 1) ^ CR_ST_WORD(m, 2), 1); + CR_ST_WORD(d, 2) = rol32(CR_ST_WORD(d, 2) ^ CR_ST_WORD(m, 3), 1); + CR_ST_WORD(d, 3) = rol32(CR_ST_WORD(d, 3) ^ CR_ST_WORD(d, 0), 1); + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +/* + * The SHA-256 logical functions, according to + * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf + */ + +static uint32_t S0(uint32_t x) +{ + return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22); +} + +static uint32_t S1(uint32_t x) +{ + return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25); +} + +static uint32_t s0(uint32_t x) +{ + return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3); +} + +static uint32_t s1(uint32_t x) +{ + return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10); +} + +void HELPER(crypto_sha256h)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + int i; + + for (i = 0; i < 4; i++) { + uint32_t t = cho(CR_ST_WORD(n, 0), CR_ST_WORD(n, 1), CR_ST_WORD(n, 2)) + + CR_ST_WORD(n, 3) + S1(CR_ST_WORD(n, 0)) + + CR_ST_WORD(m, i); + + CR_ST_WORD(n, 3) = CR_ST_WORD(n, 2); + CR_ST_WORD(n, 2) = CR_ST_WORD(n, 1); + CR_ST_WORD(n, 1) = CR_ST_WORD(n, 0); + CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3) + t; + + t += maj(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2)) + + S0(CR_ST_WORD(d, 0)); + + CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2); + CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1); + CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0); + CR_ST_WORD(d, 0) = t; + } + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +void HELPER(crypto_sha256h2)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + int i; + + for (i = 0; i < 4; i++) { + uint32_t t = cho(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2)) + + CR_ST_WORD(d, 3) + S1(CR_ST_WORD(d, 0)) + + CR_ST_WORD(m, i); + + CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2); + CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1); + CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0); + CR_ST_WORD(d, 0) = CR_ST_WORD(n, 3 - i) + t; + } + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +void HELPER(crypto_sha256su0)(void *vd, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + + CR_ST_WORD(d, 0) += s0(CR_ST_WORD(d, 1)); + CR_ST_WORD(d, 1) += s0(CR_ST_WORD(d, 2)); + CR_ST_WORD(d, 2) += s0(CR_ST_WORD(d, 3)); + CR_ST_WORD(d, 3) += s0(CR_ST_WORD(m, 0)); + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +void HELPER(crypto_sha256su1)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + + CR_ST_WORD(d, 0) += s1(CR_ST_WORD(m, 2)) + CR_ST_WORD(n, 1); + CR_ST_WORD(d, 1) += s1(CR_ST_WORD(m, 3)) + CR_ST_WORD(n, 2); + CR_ST_WORD(d, 2) += s1(CR_ST_WORD(d, 0)) + CR_ST_WORD(n, 3); + CR_ST_WORD(d, 3) += s1(CR_ST_WORD(d, 1)) + CR_ST_WORD(m, 0); + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +/* + * The SHA-512 logical functions (same as above but using 64-bit operands) + */ + +static uint64_t cho512(uint64_t x, uint64_t y, uint64_t z) +{ + return (x & (y ^ z)) ^ z; +} + +static uint64_t maj512(uint64_t x, uint64_t y, uint64_t z) +{ + return (x & y) | ((x | y) & z); +} + +static uint64_t S0_512(uint64_t x) +{ + return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39); +} + +static uint64_t S1_512(uint64_t x) +{ + return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41); +} + +static uint64_t s0_512(uint64_t x) +{ + return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7); +} + +static uint64_t s1_512(uint64_t x) +{ + return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6); +} + +void HELPER(crypto_sha512h)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + uint64_t d0 = rd[0]; + uint64_t d1 = rd[1]; + + d1 += S1_512(rm[1]) + cho512(rm[1], rn[0], rn[1]); + d0 += S1_512(d1 + rm[0]) + cho512(d1 + rm[0], rm[1], rn[0]); + + rd[0] = d0; + rd[1] = d1; +} + +void HELPER(crypto_sha512h2)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + uint64_t d0 = rd[0]; + uint64_t d1 = rd[1]; + + d1 += S0_512(rm[0]) + maj512(rn[0], rm[1], rm[0]); + d0 += S0_512(d1) + maj512(d1, rm[0], rm[1]); + + rd[0] = d0; + rd[1] = d1; +} + +void HELPER(crypto_sha512su0)(void *vd, void *vn) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t d0 = rd[0]; + uint64_t d1 = rd[1]; + + d0 += s0_512(rd[1]); + d1 += s0_512(rn[0]); + + rd[0] = d0; + rd[1] = d1; +} + +void HELPER(crypto_sha512su1)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + + rd[0] += s1_512(rn[0]) + rm[0]; + rd[1] += s1_512(rn[1]) + rm[1]; +} + +void HELPER(crypto_sm3partw1)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + uint32_t t; + + t = CR_ST_WORD(d, 0) ^ CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 1), 17); + CR_ST_WORD(d, 0) = t ^ ror32(t, 17) ^ ror32(t, 9); + + t = CR_ST_WORD(d, 1) ^ CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 2), 17); + CR_ST_WORD(d, 1) = t ^ ror32(t, 17) ^ ror32(t, 9); + + t = CR_ST_WORD(d, 2) ^ CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 3), 17); + CR_ST_WORD(d, 2) = t ^ ror32(t, 17) ^ ror32(t, 9); + + t = CR_ST_WORD(d, 3) ^ CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 0), 17); + CR_ST_WORD(d, 3) = t ^ ror32(t, 17) ^ ror32(t, 9); + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +void HELPER(crypto_sm3partw2)(void *vd, void *vn, void *vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + uint32_t t = CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 0), 25); + + CR_ST_WORD(d, 0) ^= t; + CR_ST_WORD(d, 1) ^= CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 1), 25); + CR_ST_WORD(d, 2) ^= CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 2), 25); + CR_ST_WORD(d, 3) ^= CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(m, 3), 25) ^ + ror32(t, 17) ^ ror32(t, 2) ^ ror32(t, 26); + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +void HELPER(crypto_sm3tt)(void *vd, void *vn, void *vm, uint32_t imm2, + uint32_t opcode) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + uint32_t t = 0; + + assert(imm2 < 4); + + if (opcode == 0 || opcode == 2) { + /* SM3TT1A, SM3TT2A */ + t = par(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1)); + } else if (opcode == 1) { + /* SM3TT1B */ + t = maj(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1)); + } else if (opcode == 3) { + /* SM3TT2B */ + t = cho(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1)); + } else { + g_assert_not_reached(); + } + + t += CR_ST_WORD(d, 0) + CR_ST_WORD(m, imm2); + + CR_ST_WORD(d, 0) = CR_ST_WORD(d, 1); + + if (opcode < 2) { + /* SM3TT1A, SM3TT1B */ + t += CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 3), 20); + + CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 23); + } else { + /* SM3TT2A, SM3TT2B */ + t += CR_ST_WORD(n, 3); + t ^= rol32(t, 9) ^ rol32(t, 17); + + CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 13); + } + + CR_ST_WORD(d, 2) = CR_ST_WORD(d, 3); + CR_ST_WORD(d, 3) = t; + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +static uint8_t const sm4_sbox[] = { + 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, + 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, + 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, + 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, + 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, + 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, + 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, + 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, + 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, + 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, + 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, + 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, + 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, + 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, + 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, + 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, + 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, + 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, + 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, + 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, + 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, + 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, + 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, + 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, + 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, + 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, + 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, + 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, + 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, + 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, + 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, + 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48, +}; + +void HELPER(crypto_sm4e)(void *vd, void *vn) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + union CRYPTO_STATE d = { .l = { rd[0], rd[1] } }; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + uint32_t t, i; + + for (i = 0; i < 4; i++) { + t = CR_ST_WORD(d, (i + 1) % 4) ^ + CR_ST_WORD(d, (i + 2) % 4) ^ + CR_ST_WORD(d, (i + 3) % 4) ^ + CR_ST_WORD(n, i); + + t = sm4_sbox[t & 0xff] | + sm4_sbox[(t >> 8) & 0xff] << 8 | + sm4_sbox[(t >> 16) & 0xff] << 16 | + sm4_sbox[(t >> 24) & 0xff] << 24; + + CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^ + rol32(t, 24); + } + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} + +void HELPER(crypto_sm4ekey)(void *vd, void *vn, void* vm) +{ + uint64_t *rd = vd; + uint64_t *rn = vn; + uint64_t *rm = vm; + union CRYPTO_STATE d; + union CRYPTO_STATE n = { .l = { rn[0], rn[1] } }; + union CRYPTO_STATE m = { .l = { rm[0], rm[1] } }; + uint32_t t, i; + + d = n; + for (i = 0; i < 4; i++) { + t = CR_ST_WORD(d, (i + 1) % 4) ^ + CR_ST_WORD(d, (i + 2) % 4) ^ + CR_ST_WORD(d, (i + 3) % 4) ^ + CR_ST_WORD(m, i); + + t = sm4_sbox[t & 0xff] | + sm4_sbox[(t >> 8) & 0xff] << 8 | + sm4_sbox[(t >> 16) & 0xff] << 16 | + sm4_sbox[(t >> 24) & 0xff] << 24; + + CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23); + } + + rd[0] = d.l[0]; + rd[1] = d.l[1]; +} diff --git a/qemu/target/arm/debug_helper.c b/qemu/target/arm/debug_helper.c new file mode 100644 index 00000000..96dcb7cf --- /dev/null +++ b/qemu/target/arm/debug_helper.c @@ -0,0 +1,333 @@ +/* + * ARM debug helpers. + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + +/* Return true if the linked breakpoint entry lbn passes its checks */ +static bool linked_bp_matches(ARMCPU *cpu, int lbn) +{ + CPUARMState *env = &cpu->env; + uint64_t bcr = env->cp15.dbgbcr[lbn]; + int brps = arm_num_brps(cpu); + int ctx_cmps = arm_num_ctx_cmps(cpu); + int bt; + uint32_t contextidr; + uint64_t hcr_el2; + + /* + * Links to unimplemented or non-context aware breakpoints are + * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or + * as if linked to an UNKNOWN context-aware breakpoint (in which + * case DBGWCR_EL1.LBN must indicate that breakpoint). + * We choose the former. + */ + if (lbn >= brps || lbn < (brps - ctx_cmps)) { + return false; + } + + bcr = env->cp15.dbgbcr[lbn]; + + if (extract64(bcr, 0, 1) == 0) { + /* Linked breakpoint disabled : generate no events */ + return false; + } + + bt = extract64(bcr, 20, 4); + hcr_el2 = arm_hcr_el2_eff(env); + + switch (bt) { + case 3: /* linked context ID match */ + switch (arm_current_el(env)) { + default: + /* Context matches never fire in AArch64 EL3 */ + return false; + case 2: + if (!(hcr_el2 & HCR_E2H)) { + /* Context matches never fire in EL2 without E2H enabled. */ + return false; + } + contextidr = env->cp15.contextidr_el[2]; + break; + case 1: + contextidr = env->cp15.contextidr_el[1]; + break; + case 0: + if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + contextidr = env->cp15.contextidr_el[2]; + } else { + contextidr = env->cp15.contextidr_el[1]; + } + break; + } + break; + + case 7: /* linked contextidr_el1 match */ + contextidr = env->cp15.contextidr_el[1]; + break; + case 13: /* linked contextidr_el2 match */ + contextidr = env->cp15.contextidr_el[2]; + break; + + case 9: /* linked VMID match (reserved if no EL2) */ + case 11: /* linked context ID and VMID match (reserved if no EL2) */ + case 15: /* linked full context ID match */ + default: + /* + * Links to Unlinked context breakpoints must generate no + * events; we choose to do the same for reserved values too. + */ + return false; + } + + /* + * We match the whole register even if this is AArch32 using the + * short descriptor format (in which case it holds both PROCID and ASID), + * since we don't implement the optional v7 context ID masking. + */ + return contextidr == (uint32_t)env->cp15.dbgbvr[lbn]; +} + +static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) +{ + CPUARMState *env = &cpu->env; + uint64_t cr; + int pac, hmc, ssc, wt, lbn; + /* + * Note that for watchpoints the check is against the CPU security + * state, not the S/NS attribute on the offending data access. + */ + bool is_secure = arm_is_secure(env); + int access_el = arm_current_el(env); + + if (is_wp) { + CPUWatchpoint *wp = env->cpu_watchpoint[n]; + + if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { + return false; + } + cr = env->cp15.dbgwcr[n]; + if (wp->hitattrs.user) { + /* + * The LDRT/STRT/LDT/STT "unprivileged access" instructions should + * match watchpoints as if they were accesses done at EL0, even if + * the CPU is at EL1 or higher. + */ + access_el = 0; + } + } else { + uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; + + if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { + return false; + } + cr = env->cp15.dbgbcr[n]; + } + /* + * The WATCHPOINT_HIT flag guarantees us that the watchpoint is + * enabled and that the address and access type match; for breakpoints + * we know the address matched; check the remaining fields, including + * linked breakpoints. We rely on WCR and BCR having the same layout + * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. + * Note that some combinations of {PAC, HMC, SSC} are reserved and + * must act either like some valid combination or as if the watchpoint + * were disabled. We choose the former, and use this together with + * the fact that EL3 must always be Secure and EL2 must always be + * Non-Secure to simplify the code slightly compared to the full + * table in the ARM ARM. + */ + pac = extract64(cr, 1, 2); + hmc = extract64(cr, 13, 1); + ssc = extract64(cr, 14, 2); + + switch (ssc) { + case 0: + break; + case 1: + case 3: + if (is_secure) { + return false; + } + break; + case 2: + if (!is_secure) { + return false; + } + break; + } + + switch (access_el) { + case 3: + case 2: + if (!hmc) { + return false; + } + break; + case 1: + if (extract32(pac, 0, 1) == 0) { + return false; + } + break; + case 0: + if (extract32(pac, 1, 1) == 0) { + return false; + } + break; + default: + g_assert_not_reached(); + } + + wt = extract64(cr, 20, 1); + lbn = extract64(cr, 16, 4); + + if (wt && !linked_bp_matches(cpu, lbn)) { + return false; + } + + return true; +} + +static bool check_watchpoints(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + int n; + + /* + * If watchpoints are disabled globally or we can't take debug + * exceptions here then watchpoint firings are ignored. + */ + if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 + || !arm_generate_debug_exceptions(env)) { + return false; + } + + for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { + if (bp_wp_matches(cpu, n, true)) { + return true; + } + } + return false; +} + +static bool check_breakpoints(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + int n; + + /* + * If breakpoints are disabled globally or we can't take debug + * exceptions here then breakpoint firings are ignored. + */ + if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 + || !arm_generate_debug_exceptions(env)) { + return false; + } + + for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { + if (bp_wp_matches(cpu, n, false)) { + return true; + } + } + return false; +} + +void HELPER(check_breakpoints)(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + + if (check_breakpoints(cpu)) { + HELPER(exception_internal(env, EXCP_DEBUG)); + } +} + +bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) +{ + /* + * Called by core code when a CPU watchpoint fires; need to check if this + * is also an architectural watchpoint match. + */ + ARMCPU *cpu = ARM_CPU(cs); + + return check_watchpoints(cpu); +} + +void arm_debug_excp_handler(CPUState *cs) +{ + /* + * Called by core code when a watchpoint or breakpoint fires; + * need to check which one and raise the appropriate exception. + */ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + CPUWatchpoint *wp_hit = cs->watchpoint_hit; + + if (wp_hit) { + if (wp_hit->flags & BP_CPU) { + bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; + bool same_el = arm_debug_target_el(env) == arm_current_el(env); + + cs->watchpoint_hit = NULL; + + env->exception.fsr = arm_debug_exception_fsr(env); + env->exception.vaddress = wp_hit->hitaddr; + raise_exception(env, EXCP_DATA_ABORT, + syn_watchpoint(same_el, 0, wnr), + arm_debug_target_el(env)); + } + } else { + uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; + bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); + + /* + * (1) GDB breakpoints should be handled first. + * (2) Do not raise a CPU exception if no CPU breakpoint has fired, + * since singlestep is also done by generating a debug internal + * exception. + */ + if (cpu_breakpoint_test(cs, pc, BP_GDB) + || !cpu_breakpoint_test(cs, pc, BP_CPU)) { + return; + } + + env->exception.fsr = arm_debug_exception_fsr(env); + /* + * FAR is UNKNOWN: clear vaddress to avoid potentially exposing + * values to the guest that it shouldn't be able to see at its + * exception/security level. + */ + env->exception.vaddress = 0; + raise_exception(env, EXCP_PREFETCH_ABORT, + syn_breakpoint(same_el), + arm_debug_target_el(env)); + } +} + +vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* + * In BE32 system mode, target memory is stored byteswapped (on a + * little-endian host system), and by the time we reach here (via an + * opcode helper) the addresses of subword accesses have been adjusted + * to account for that, which means that watchpoints will not match. + * Undo the adjustment here. + */ + if (arm_sctlr_b(env)) { + if (len == 1) { + addr ^= 3; + } else if (len == 2) { + addr ^= 2; + } + } + + return addr; +} diff --git a/qemu/target/arm/decode-a32-uncond.inc.c b/qemu/target/arm/decode-a32-uncond.inc.c new file mode 100644 index 00000000..3b6de817 --- /dev/null +++ b/qemu/target/arm/decode-a32-uncond.inc.c @@ -0,0 +1,301 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int A; + int F; + int I; + int M; + int imod; + int mode; +} arg_cps; + +typedef struct { + int pu; + int rn; + int w; +} arg_rfe; + +typedef struct { + int E; +} arg_setend; + +typedef struct { + int mode; + int pu; + int w; +} arg_srs; + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wredundant-decls" +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtypedef-redefinition" +# endif +#endif + +typedef arg_i arg_BLX_i; +static bool trans_BLX_i(DisasContext *ctx, arg_BLX_i *a); +typedef arg_rfe arg_RFE; +static bool trans_RFE(DisasContext *ctx, arg_RFE *a); +typedef arg_srs arg_SRS; +static bool trans_SRS(DisasContext *ctx, arg_SRS *a); +typedef arg_cps arg_CPS; +static bool trans_CPS(DisasContext *ctx, arg_CPS *a); +typedef arg_empty arg_CLREX; +static bool trans_CLREX(DisasContext *ctx, arg_CLREX *a); +typedef arg_empty arg_DSB; +static bool trans_DSB(DisasContext *ctx, arg_DSB *a); +typedef arg_empty arg_DMB; +static bool trans_DMB(DisasContext *ctx, arg_DMB *a); +typedef arg_empty arg_ISB; +static bool trans_ISB(DisasContext *ctx, arg_ISB *a); +typedef arg_empty arg_SB; +static bool trans_SB(DisasContext *ctx, arg_SB *a); +typedef arg_setend arg_SETEND; +static bool trans_SETEND(DisasContext *ctx, arg_SETEND *a); +typedef arg_empty arg_PLD; +static bool trans_PLD(DisasContext *ctx, arg_PLD *a); +typedef arg_empty arg_PLDW; +static bool trans_PLDW(DisasContext *ctx, arg_PLDW *a); +typedef arg_empty arg_PLI; +static bool trans_PLI(DisasContext *ctx, arg_PLI *a); + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic pop +#endif + +static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_0(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = times_2(ctx, deposit32(extract32(insn, 24, 1), 1, 31, sextract32(insn, 0, 24))); +} + +static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_1(DisasContext *ctx, arg_rfe *a, uint32_t insn) +{ + a->pu = extract32(insn, 23, 2); + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); +} + +static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_2(DisasContext *ctx, arg_srs *a, uint32_t insn) +{ + a->pu = extract32(insn, 23, 2); + a->w = extract32(insn, 21, 1); + a->mode = extract32(insn, 0, 5); +} + +static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_3(DisasContext *ctx, arg_cps *a, uint32_t insn) +{ + a->imod = extract32(insn, 18, 2); + a->M = extract32(insn, 17, 1); + a->A = extract32(insn, 8, 1); + a->I = extract32(insn, 7, 1); + a->F = extract32(insn, 6, 1); + a->mode = extract32(insn, 0, 5); +} + +static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_4(DisasContext *ctx, arg_empty *a, uint32_t insn) +{ +} + +static void disas_a32_uncond_extract_disas_a32_uncond_Fmt_5(DisasContext *ctx, arg_setend *a, uint32_t insn) +{ + a->E = extract32(insn, 9, 1); +} + +static bool disas_a32_uncond(DisasContext *ctx, uint32_t insn) +{ + union { + arg_cps f_cps; + arg_empty f_empty; + arg_i f_i; + arg_rfe f_rfe; + arg_setend f_setend; + arg_srs f_srs; + } u; + + switch ((insn >> 25) & 0x7f) { + case 0x78: + /* 1111000. ........ ........ ........ */ + switch (insn & 0x01f1fc20) { + case 0x01000000: + /* 11110001 0000...0 000000.. ..0..... */ + disas_a32_uncond_extract_disas_a32_uncond_Fmt_3(ctx, &u.f_cps, insn); + switch ((insn >> 9) & 0x1) { + case 0x0: + /* 11110001 0000...0 0000000. ..0..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:43 */ + if (trans_CPS(ctx, &u.f_cps)) return true; + return false; + } + return false; + case 0x01010000: + /* 11110001 0000...1 000000.. ..0..... */ + disas_a32_uncond_extract_disas_a32_uncond_Fmt_5(ctx, &u.f_setend, insn); + switch (insn & 0x000e01df) { + case 0x00000000: + /* 11110001 00000001 000000.0 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:56 */ + if (trans_SETEND(ctx, &u.f_setend)) return true; + return false; + } + return false; + } + return false; + case 0x7a: + /* 1111010. ........ ........ ........ */ + disas_a32_uncond_extract_disas_a32_uncond_Fmt_4(ctx, &u.f_empty, insn); + switch (insn & 0x01700000) { + case 0x00100000: + /* 11110100 .001.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:73 */ + if (trans_PLDW(ctx, &u.f_empty)) return true; + return false; + case 0x00500000: + /* 11110100 .101.... ........ ........ */ + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11110100 .101.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:62 */ + if (trans_PLI(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x01100000: + /* 11110101 .001.... ........ ........ */ + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11110101 .001.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:61 */ + if (trans_PLDW(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x01500000: + /* 11110101 .101.... ........ ........ */ + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11110101 .101.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:60 */ + if (trans_PLD(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x01700000: + /* 11110101 .111.... ........ ........ */ + switch (insn & 0x008ffff0) { + case 0x000ff010: + /* 11110101 01111111 11110000 0001.... */ + switch (insn & 0x0000000f) { + case 0x0000000f: + /* 11110101 01111111 11110000 00011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:49 */ + if (trans_CLREX(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x000ff040: + /* 11110101 01111111 11110000 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:50 */ + if (trans_DSB(ctx, &u.f_empty)) return true; + return false; + case 0x000ff050: + /* 11110101 01111111 11110000 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:51 */ + if (trans_DMB(ctx, &u.f_empty)) return true; + return false; + case 0x000ff060: + /* 11110101 01111111 11110000 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:52 */ + if (trans_ISB(ctx, &u.f_empty)) return true; + return false; + case 0x000ff070: + /* 11110101 01111111 11110000 0111.... */ + switch (insn & 0x0000000f) { + case 0x00000000: + /* 11110101 01111111 11110000 01110000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:53 */ + if (trans_SB(ctx, &u.f_empty)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x7b: + /* 1111011. ........ ........ ........ */ + disas_a32_uncond_extract_disas_a32_uncond_Fmt_4(ctx, &u.f_empty, insn); + switch (insn & 0x01700010) { + case 0x00100000: + /* 11110110 .001.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:74 */ + if (trans_PLDW(ctx, &u.f_empty)) return true; + return false; + case 0x00500000: + /* 11110110 .101.... ........ ...0.... */ + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11110110 .101.... 1111.... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:66 */ + if (trans_PLI(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x01100000: + /* 11110111 .001.... ........ ...0.... */ + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11110111 .001.... 1111.... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:65 */ + if (trans_PLDW(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x01500000: + /* 11110111 .101.... ........ ...0.... */ + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11110111 .101.... 1111.... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:64 */ + if (trans_PLD(ctx, &u.f_empty)) return true; + return false; + } + return false; + } + return false; + case 0x7c: + /* 1111100. ........ ........ ........ */ + switch (insn & 0x0050ffe0) { + case 0x00100a00: + /* 1111100. .0.1.... 00001010 000..... */ + disas_a32_uncond_extract_disas_a32_uncond_Fmt_1(ctx, &u.f_rfe, insn); + switch (insn & 0x0000001f) { + case 0x00000000: + /* 1111100. .0.1.... 00001010 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:41 */ + if (trans_RFE(ctx, &u.f_rfe)) return true; + return false; + } + return false; + case 0x00400500: + /* 1111100. .1.0.... 00000101 000..... */ + disas_a32_uncond_extract_disas_a32_uncond_Fmt_2(ctx, &u.f_srs, insn); + switch ((insn >> 16) & 0xf) { + case 0xd: + /* 1111100. .1.01101 00000101 000..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:42 */ + if (trans_SRS(ctx, &u.f_srs)) return true; + return false; + } + return false; + } + return false; + case 0x7d: + /* 1111101. ........ ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32-uncond.decode:33 */ + disas_a32_uncond_extract_disas_a32_uncond_Fmt_0(ctx, &u.f_i, insn); + if (trans_BLX_i(ctx, &u.f_i)) return true; + return false; + } + return false; +} diff --git a/qemu/target/arm/decode-a32.inc.c b/qemu/target/arm/decode-a32.inc.c new file mode 100644 index 00000000..cf6c644a --- /dev/null +++ b/qemu/target/arm/decode-a32.inc.c @@ -0,0 +1,3375 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int lsb; + int msb; + int rd; + int rn; +} arg_bfi; + +typedef struct { + int lsb; + int rd; + int rn; + int widthm1; +} arg_bfx; + +typedef struct { + int rn; + int rt; + int rt2; +} arg_disas_a3226; + +typedef struct { +#ifdef _MSC_VER + int dummy; +#endif +} arg_empty; + +typedef struct { + int imm; +} arg_i; + +typedef struct { + int imm; + int rn; + int rt; + int rt2; +} arg_ldrex; + +typedef struct { + int b; + int i; + int list; + int rn; + int u; + int w; +} arg_ldst_block; + +typedef struct { + int imm; + int p; + int rn; + int rt; + int u; + int w; +} arg_ldst_ri; + +typedef struct { + int p; + int rm; + int rn; + int rt; + int shimm; + int shtype; + int u; + int w; +} arg_ldst_rr; + +typedef struct { + int r; + int rd; + int sysm; +} arg_mrs_bank; + +typedef struct { + int r; + int rd; +} arg_mrs_reg; + +typedef struct { + int r; + int rn; + int sysm; +} arg_msr_bank; + +typedef struct { + int imm; + int mask; + int r; + int rot; +} arg_msr_i; + +typedef struct { + int mask; + int r; + int rn; +} arg_msr_reg; + +typedef struct { + int imm; + int rd; + int rm; + int rn; + int tb; +} arg_pkh; + +typedef struct { + int rm; +} arg_r; + +typedef struct { + int imm; + int rd; +} arg_ri; + +typedef struct { + int rd; + int rm; +} arg_rr; + +typedef struct { + int rd; + int rm; + int rn; +} arg_rrr; + +typedef struct { + int rd; + int rm; + int rn; + int rot; +} arg_rrr_rot; + +typedef struct { + int ra; + int rd; + int rm; + int rn; +} arg_rrrr; + +typedef struct { + int imm; + int rd; + int rn; + int rot; + int s; +} arg_s_rri_rot; + +typedef struct { + int rd; + int rm; + int rn; + int s; + int shim; + int shty; +} arg_s_rrr_shi; + +typedef struct { + int rd; + int rm; + int rn; + int rs; + int s; + int shty; +} arg_s_rrr_shr; + +typedef struct { + int ra; + int rd; + int rm; + int rn; + int s; +} arg_s_rrrr; + +typedef struct { + int imm; + int rd; + int rn; + int satimm; + int sh; +} arg_sat; + +typedef struct { + int imm; + int rd; + int rn; + int rt; + int rt2; +} arg_strex; + +typedef arg_s_rrr_shi arg_AND_rrri; +static bool trans_AND_rrri(DisasContext *ctx, arg_AND_rrri *a); +typedef arg_s_rrr_shi arg_EOR_rrri; +static bool trans_EOR_rrri(DisasContext *ctx, arg_EOR_rrri *a); +typedef arg_s_rrr_shi arg_SUB_rrri; +static bool trans_SUB_rrri(DisasContext *ctx, arg_SUB_rrri *a); +typedef arg_s_rrr_shi arg_RSB_rrri; +static bool trans_RSB_rrri(DisasContext *ctx, arg_RSB_rrri *a); +typedef arg_s_rrr_shi arg_ADD_rrri; +static bool trans_ADD_rrri(DisasContext *ctx, arg_ADD_rrri *a); +typedef arg_s_rrr_shi arg_ADC_rrri; +static bool trans_ADC_rrri(DisasContext *ctx, arg_ADC_rrri *a); +typedef arg_s_rrr_shi arg_SBC_rrri; +static bool trans_SBC_rrri(DisasContext *ctx, arg_SBC_rrri *a); +typedef arg_s_rrr_shi arg_RSC_rrri; +static bool trans_RSC_rrri(DisasContext *ctx, arg_RSC_rrri *a); +typedef arg_s_rrr_shi arg_TST_xrri; +static bool trans_TST_xrri(DisasContext *ctx, arg_TST_xrri *a); +typedef arg_s_rrr_shi arg_TEQ_xrri; +static bool trans_TEQ_xrri(DisasContext *ctx, arg_TEQ_xrri *a); +typedef arg_s_rrr_shi arg_CMP_xrri; +static bool trans_CMP_xrri(DisasContext *ctx, arg_CMP_xrri *a); +typedef arg_s_rrr_shi arg_CMN_xrri; +static bool trans_CMN_xrri(DisasContext *ctx, arg_CMN_xrri *a); +typedef arg_s_rrr_shi arg_ORR_rrri; +static bool trans_ORR_rrri(DisasContext *ctx, arg_ORR_rrri *a); +typedef arg_s_rrr_shi arg_MOV_rxri; +static bool trans_MOV_rxri(DisasContext *ctx, arg_MOV_rxri *a); +typedef arg_s_rrr_shi arg_BIC_rrri; +static bool trans_BIC_rrri(DisasContext *ctx, arg_BIC_rrri *a); +typedef arg_s_rrr_shi arg_MVN_rxri; +static bool trans_MVN_rxri(DisasContext *ctx, arg_MVN_rxri *a); +typedef arg_ri arg_MOVW; +static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a); +typedef arg_ri arg_MOVT; +static bool trans_MOVT(DisasContext *ctx, arg_MOVT *a); +typedef arg_s_rrr_shr arg_AND_rrrr; +static bool trans_AND_rrrr(DisasContext *ctx, arg_AND_rrrr *a); +typedef arg_s_rrr_shr arg_EOR_rrrr; +static bool trans_EOR_rrrr(DisasContext *ctx, arg_EOR_rrrr *a); +typedef arg_s_rrr_shr arg_SUB_rrrr; +static bool trans_SUB_rrrr(DisasContext *ctx, arg_SUB_rrrr *a); +typedef arg_s_rrr_shr arg_RSB_rrrr; +static bool trans_RSB_rrrr(DisasContext *ctx, arg_RSB_rrrr *a); +typedef arg_s_rrr_shr arg_ADD_rrrr; +static bool trans_ADD_rrrr(DisasContext *ctx, arg_ADD_rrrr *a); +typedef arg_s_rrr_shr arg_ADC_rrrr; +static bool trans_ADC_rrrr(DisasContext *ctx, arg_ADC_rrrr *a); +typedef arg_s_rrr_shr arg_SBC_rrrr; +static bool trans_SBC_rrrr(DisasContext *ctx, arg_SBC_rrrr *a); +typedef arg_s_rrr_shr arg_RSC_rrrr; +static bool trans_RSC_rrrr(DisasContext *ctx, arg_RSC_rrrr *a); +typedef arg_s_rrr_shr arg_TST_xrrr; +static bool trans_TST_xrrr(DisasContext *ctx, arg_TST_xrrr *a); +typedef arg_s_rrr_shr arg_TEQ_xrrr; +static bool trans_TEQ_xrrr(DisasContext *ctx, arg_TEQ_xrrr *a); +typedef arg_s_rrr_shr arg_CMP_xrrr; +static bool trans_CMP_xrrr(DisasContext *ctx, arg_CMP_xrrr *a); +typedef arg_s_rrr_shr arg_CMN_xrrr; +static bool trans_CMN_xrrr(DisasContext *ctx, arg_CMN_xrrr *a); +typedef arg_s_rrr_shr arg_ORR_rrrr; +static bool trans_ORR_rrrr(DisasContext *ctx, arg_ORR_rrrr *a); +typedef arg_s_rrr_shr arg_MOV_rxrr; +static bool trans_MOV_rxrr(DisasContext *ctx, arg_MOV_rxrr *a); +typedef arg_s_rrr_shr arg_BIC_rrrr; +static bool trans_BIC_rrrr(DisasContext *ctx, arg_BIC_rrrr *a); +typedef arg_s_rrr_shr arg_MVN_rxrr; +static bool trans_MVN_rxrr(DisasContext *ctx, arg_MVN_rxrr *a); +typedef arg_s_rri_rot arg_AND_rri; +static bool trans_AND_rri(DisasContext *ctx, arg_AND_rri *a); +typedef arg_s_rri_rot arg_EOR_rri; +static bool trans_EOR_rri(DisasContext *ctx, arg_EOR_rri *a); +typedef arg_s_rri_rot arg_SUB_rri; +static bool trans_SUB_rri(DisasContext *ctx, arg_SUB_rri *a); +typedef arg_s_rri_rot arg_RSB_rri; +static bool trans_RSB_rri(DisasContext *ctx, arg_RSB_rri *a); +typedef arg_s_rri_rot arg_ADD_rri; +static bool trans_ADD_rri(DisasContext *ctx, arg_ADD_rri *a); +typedef arg_s_rri_rot arg_ADC_rri; +static bool trans_ADC_rri(DisasContext *ctx, arg_ADC_rri *a); +typedef arg_s_rri_rot arg_SBC_rri; +static bool trans_SBC_rri(DisasContext *ctx, arg_SBC_rri *a); +typedef arg_s_rri_rot arg_RSC_rri; +static bool trans_RSC_rri(DisasContext *ctx, arg_RSC_rri *a); +typedef arg_s_rri_rot arg_TST_xri; +static bool trans_TST_xri(DisasContext *ctx, arg_TST_xri *a); +typedef arg_s_rri_rot arg_TEQ_xri; +static bool trans_TEQ_xri(DisasContext *ctx, arg_TEQ_xri *a); +typedef arg_s_rri_rot arg_CMP_xri; +static bool trans_CMP_xri(DisasContext *ctx, arg_CMP_xri *a); +typedef arg_s_rri_rot arg_CMN_xri; +static bool trans_CMN_xri(DisasContext *ctx, arg_CMN_xri *a); +typedef arg_s_rri_rot arg_ORR_rri; +static bool trans_ORR_rri(DisasContext *ctx, arg_ORR_rri *a); +typedef arg_s_rri_rot arg_MOV_rxi; +static bool trans_MOV_rxi(DisasContext *ctx, arg_MOV_rxi *a); +typedef arg_s_rri_rot arg_BIC_rri; +static bool trans_BIC_rri(DisasContext *ctx, arg_BIC_rri *a); +typedef arg_s_rri_rot arg_MVN_rxi; +static bool trans_MVN_rxi(DisasContext *ctx, arg_MVN_rxi *a); +typedef arg_s_rrrr arg_MUL; +static bool trans_MUL(DisasContext *ctx, arg_MUL *a); +typedef arg_s_rrrr arg_MLA; +static bool trans_MLA(DisasContext *ctx, arg_MLA *a); +typedef arg_rrrr arg_UMAAL; +static bool trans_UMAAL(DisasContext *ctx, arg_UMAAL *a); +typedef arg_rrrr arg_MLS; +static bool trans_MLS(DisasContext *ctx, arg_MLS *a); +typedef arg_s_rrrr arg_UMULL; +static bool trans_UMULL(DisasContext *ctx, arg_UMULL *a); +typedef arg_s_rrrr arg_UMLAL; +static bool trans_UMLAL(DisasContext *ctx, arg_UMLAL *a); +typedef arg_s_rrrr arg_SMULL; +static bool trans_SMULL(DisasContext *ctx, arg_SMULL *a); +typedef arg_s_rrrr arg_SMLAL; +static bool trans_SMLAL(DisasContext *ctx, arg_SMLAL *a); +typedef arg_rrr arg_QADD; +static bool trans_QADD(DisasContext *ctx, arg_QADD *a); +typedef arg_rrr arg_QSUB; +static bool trans_QSUB(DisasContext *ctx, arg_QSUB *a); +typedef arg_rrr arg_QDADD; +static bool trans_QDADD(DisasContext *ctx, arg_QDADD *a); +typedef arg_rrr arg_QDSUB; +static bool trans_QDSUB(DisasContext *ctx, arg_QDSUB *a); +typedef arg_rrrr arg_SMLABB; +static bool trans_SMLABB(DisasContext *ctx, arg_SMLABB *a); +typedef arg_rrrr arg_SMLABT; +static bool trans_SMLABT(DisasContext *ctx, arg_SMLABT *a); +typedef arg_rrrr arg_SMLATB; +static bool trans_SMLATB(DisasContext *ctx, arg_SMLATB *a); +typedef arg_rrrr arg_SMLATT; +static bool trans_SMLATT(DisasContext *ctx, arg_SMLATT *a); +typedef arg_rrrr arg_SMLAWB; +static bool trans_SMLAWB(DisasContext *ctx, arg_SMLAWB *a); +typedef arg_rrrr arg_SMULWB; +static bool trans_SMULWB(DisasContext *ctx, arg_SMULWB *a); +typedef arg_rrrr arg_SMLAWT; +static bool trans_SMLAWT(DisasContext *ctx, arg_SMLAWT *a); +typedef arg_rrrr arg_SMULWT; +static bool trans_SMULWT(DisasContext *ctx, arg_SMULWT *a); +typedef arg_rrrr arg_SMLALBB; +static bool trans_SMLALBB(DisasContext *ctx, arg_SMLALBB *a); +typedef arg_rrrr arg_SMLALBT; +static bool trans_SMLALBT(DisasContext *ctx, arg_SMLALBT *a); +typedef arg_rrrr arg_SMLALTB; +static bool trans_SMLALTB(DisasContext *ctx, arg_SMLALTB *a); +typedef arg_rrrr arg_SMLALTT; +static bool trans_SMLALTT(DisasContext *ctx, arg_SMLALTT *a); +typedef arg_rrrr arg_SMULBB; +static bool trans_SMULBB(DisasContext *ctx, arg_SMULBB *a); +typedef arg_rrrr arg_SMULBT; +static bool trans_SMULBT(DisasContext *ctx, arg_SMULBT *a); +typedef arg_rrrr arg_SMULTB; +static bool trans_SMULTB(DisasContext *ctx, arg_SMULTB *a); +typedef arg_rrrr arg_SMULTT; +static bool trans_SMULTT(DisasContext *ctx, arg_SMULTT *a); +typedef arg_empty arg_YIELD; +static bool trans_YIELD(DisasContext *ctx, arg_YIELD *a); +typedef arg_empty arg_WFE; +static bool trans_WFE(DisasContext *ctx, arg_WFE *a); +typedef arg_empty arg_WFI; +static bool trans_WFI(DisasContext *ctx, arg_WFI *a); +typedef arg_empty arg_NOP; +static bool trans_NOP(DisasContext *ctx, arg_NOP *a); +typedef arg_msr_i arg_MSR_imm; +static bool trans_MSR_imm(DisasContext *ctx, arg_MSR_imm *a); +typedef arg_rrr arg_CRC32B; +static bool trans_CRC32B(DisasContext *ctx, arg_CRC32B *a); +typedef arg_rrr arg_CRC32H; +static bool trans_CRC32H(DisasContext *ctx, arg_CRC32H *a); +typedef arg_rrr arg_CRC32W; +static bool trans_CRC32W(DisasContext *ctx, arg_CRC32W *a); +typedef arg_rrr arg_CRC32CB; +static bool trans_CRC32CB(DisasContext *ctx, arg_CRC32CB *a); +typedef arg_rrr arg_CRC32CH; +static bool trans_CRC32CH(DisasContext *ctx, arg_CRC32CH *a); +typedef arg_rrr arg_CRC32CW; +static bool trans_CRC32CW(DisasContext *ctx, arg_CRC32CW *a); +typedef arg_mrs_bank arg_MRS_bank; +static bool trans_MRS_bank(DisasContext *ctx, arg_MRS_bank *a); +typedef arg_msr_bank arg_MSR_bank; +static bool trans_MSR_bank(DisasContext *ctx, arg_MSR_bank *a); +typedef arg_mrs_reg arg_MRS_reg; +static bool trans_MRS_reg(DisasContext *ctx, arg_MRS_reg *a); +typedef arg_msr_reg arg_MSR_reg; +static bool trans_MSR_reg(DisasContext *ctx, arg_MSR_reg *a); +typedef arg_r arg_BX; +static bool trans_BX(DisasContext *ctx, arg_BX *a); +typedef arg_r arg_BXJ; +static bool trans_BXJ(DisasContext *ctx, arg_BXJ *a); +typedef arg_r arg_BLX_r; +static bool trans_BLX_r(DisasContext *ctx, arg_BLX_r *a); +typedef arg_rr arg_CLZ; +static bool trans_CLZ(DisasContext *ctx, arg_CLZ *a); +typedef arg_empty arg_ERET; +static bool trans_ERET(DisasContext *ctx, arg_ERET *a); +typedef arg_i arg_HLT; +static bool trans_HLT(DisasContext *ctx, arg_HLT *a); +typedef arg_i arg_BKPT; +static bool trans_BKPT(DisasContext *ctx, arg_BKPT *a); +typedef arg_i arg_HVC; +static bool trans_HVC(DisasContext *ctx, arg_HVC *a); +typedef arg_i arg_SMC; +static bool trans_SMC(DisasContext *ctx, arg_SMC *a); +typedef arg_ldst_rr arg_STRH_rr; +static bool trans_STRH_rr(DisasContext *ctx, arg_STRH_rr *a); +typedef arg_ldst_rr arg_LDRD_rr; +static bool trans_LDRD_rr(DisasContext *ctx, arg_LDRD_rr *a); +typedef arg_ldst_rr arg_STRD_rr; +static bool trans_STRD_rr(DisasContext *ctx, arg_STRD_rr *a); +typedef arg_ldst_rr arg_LDRH_rr; +static bool trans_LDRH_rr(DisasContext *ctx, arg_LDRH_rr *a); +typedef arg_ldst_rr arg_LDRSB_rr; +static bool trans_LDRSB_rr(DisasContext *ctx, arg_LDRSB_rr *a); +typedef arg_ldst_rr arg_LDRSH_rr; +static bool trans_LDRSH_rr(DisasContext *ctx, arg_LDRSH_rr *a); +typedef arg_ldst_rr arg_STRHT_rr; +static bool trans_STRHT_rr(DisasContext *ctx, arg_STRHT_rr *a); +typedef arg_ldst_rr arg_LDRHT_rr; +static bool trans_LDRHT_rr(DisasContext *ctx, arg_LDRHT_rr *a); +typedef arg_ldst_rr arg_LDRSBT_rr; +static bool trans_LDRSBT_rr(DisasContext *ctx, arg_LDRSBT_rr *a); +typedef arg_ldst_rr arg_LDRSHT_rr; +static bool trans_LDRSHT_rr(DisasContext *ctx, arg_LDRSHT_rr *a); +typedef arg_ldst_rr arg_STR_rr; +static bool trans_STR_rr(DisasContext *ctx, arg_STR_rr *a); +typedef arg_ldst_rr arg_STRB_rr; +static bool trans_STRB_rr(DisasContext *ctx, arg_STRB_rr *a); +typedef arg_ldst_rr arg_LDR_rr; +static bool trans_LDR_rr(DisasContext *ctx, arg_LDR_rr *a); +typedef arg_ldst_rr arg_LDRB_rr; +static bool trans_LDRB_rr(DisasContext *ctx, arg_LDRB_rr *a); +typedef arg_ldst_rr arg_STRT_rr; +static bool trans_STRT_rr(DisasContext *ctx, arg_STRT_rr *a); +typedef arg_ldst_rr arg_STRBT_rr; +static bool trans_STRBT_rr(DisasContext *ctx, arg_STRBT_rr *a); +typedef arg_ldst_rr arg_LDRT_rr; +static bool trans_LDRT_rr(DisasContext *ctx, arg_LDRT_rr *a); +typedef arg_ldst_rr arg_LDRBT_rr; +static bool trans_LDRBT_rr(DisasContext *ctx, arg_LDRBT_rr *a); +typedef arg_ldst_ri arg_STRH_ri; +static bool trans_STRH_ri(DisasContext *ctx, arg_STRH_ri *a); +typedef arg_ldst_ri arg_LDRD_ri_a32; +static bool trans_LDRD_ri_a32(DisasContext *ctx, arg_LDRD_ri_a32 *a); +typedef arg_ldst_ri arg_STRD_ri_a32; +static bool trans_STRD_ri_a32(DisasContext *ctx, arg_STRD_ri_a32 *a); +typedef arg_ldst_ri arg_LDRH_ri; +static bool trans_LDRH_ri(DisasContext *ctx, arg_LDRH_ri *a); +typedef arg_ldst_ri arg_LDRSB_ri; +static bool trans_LDRSB_ri(DisasContext *ctx, arg_LDRSB_ri *a); +typedef arg_ldst_ri arg_LDRSH_ri; +static bool trans_LDRSH_ri(DisasContext *ctx, arg_LDRSH_ri *a); +typedef arg_ldst_ri arg_STRHT_ri; +static bool trans_STRHT_ri(DisasContext *ctx, arg_STRHT_ri *a); +typedef arg_ldst_ri arg_LDRHT_ri; +static bool trans_LDRHT_ri(DisasContext *ctx, arg_LDRHT_ri *a); +typedef arg_ldst_ri arg_LDRSBT_ri; +static bool trans_LDRSBT_ri(DisasContext *ctx, arg_LDRSBT_ri *a); +typedef arg_ldst_ri arg_LDRSHT_ri; +static bool trans_LDRSHT_ri(DisasContext *ctx, arg_LDRSHT_ri *a); +typedef arg_ldst_ri arg_STR_ri; +static bool trans_STR_ri(DisasContext *ctx, arg_STR_ri *a); +typedef arg_ldst_ri arg_STRB_ri; +static bool trans_STRB_ri(DisasContext *ctx, arg_STRB_ri *a); +typedef arg_ldst_ri arg_LDR_ri; +static bool trans_LDR_ri(DisasContext *ctx, arg_LDR_ri *a); +typedef arg_ldst_ri arg_LDRB_ri; +static bool trans_LDRB_ri(DisasContext *ctx, arg_LDRB_ri *a); +typedef arg_ldst_ri arg_STRT_ri; +static bool trans_STRT_ri(DisasContext *ctx, arg_STRT_ri *a); +typedef arg_ldst_ri arg_STRBT_ri; +static bool trans_STRBT_ri(DisasContext *ctx, arg_STRBT_ri *a); +typedef arg_ldst_ri arg_LDRT_ri; +static bool trans_LDRT_ri(DisasContext *ctx, arg_LDRT_ri *a); +typedef arg_ldst_ri arg_LDRBT_ri; +static bool trans_LDRBT_ri(DisasContext *ctx, arg_LDRBT_ri *a); +typedef arg_disas_a3226 arg_SWP; +static bool trans_SWP(DisasContext *ctx, arg_SWP *a); +typedef arg_disas_a3226 arg_SWPB; +static bool trans_SWPB(DisasContext *ctx, arg_SWPB *a); +typedef arg_strex arg_STREX; +static bool trans_STREX(DisasContext *ctx, arg_STREX *a); +typedef arg_strex arg_STREXD_a32; +static bool trans_STREXD_a32(DisasContext *ctx, arg_STREXD_a32 *a); +typedef arg_strex arg_STREXB; +static bool trans_STREXB(DisasContext *ctx, arg_STREXB *a); +typedef arg_strex arg_STREXH; +static bool trans_STREXH(DisasContext *ctx, arg_STREXH *a); +typedef arg_strex arg_STLEX; +static bool trans_STLEX(DisasContext *ctx, arg_STLEX *a); +typedef arg_strex arg_STLEXD_a32; +static bool trans_STLEXD_a32(DisasContext *ctx, arg_STLEXD_a32 *a); +typedef arg_strex arg_STLEXB; +static bool trans_STLEXB(DisasContext *ctx, arg_STLEXB *a); +typedef arg_strex arg_STLEXH; +static bool trans_STLEXH(DisasContext *ctx, arg_STLEXH *a); +typedef arg_ldrex arg_STL; +static bool trans_STL(DisasContext *ctx, arg_STL *a); +typedef arg_ldrex arg_STLB; +static bool trans_STLB(DisasContext *ctx, arg_STLB *a); +typedef arg_ldrex arg_STLH; +static bool trans_STLH(DisasContext *ctx, arg_STLH *a); +typedef arg_ldrex arg_LDREX; +static bool trans_LDREX(DisasContext *ctx, arg_LDREX *a); +typedef arg_ldrex arg_LDREXD_a32; +static bool trans_LDREXD_a32(DisasContext *ctx, arg_LDREXD_a32 *a); +typedef arg_ldrex arg_LDREXB; +static bool trans_LDREXB(DisasContext *ctx, arg_LDREXB *a); +typedef arg_ldrex arg_LDREXH; +static bool trans_LDREXH(DisasContext *ctx, arg_LDREXH *a); +typedef arg_ldrex arg_LDAEX; +static bool trans_LDAEX(DisasContext *ctx, arg_LDAEX *a); +typedef arg_ldrex arg_LDAEXD_a32; +static bool trans_LDAEXD_a32(DisasContext *ctx, arg_LDAEXD_a32 *a); +typedef arg_ldrex arg_LDAEXB; +static bool trans_LDAEXB(DisasContext *ctx, arg_LDAEXB *a); +typedef arg_ldrex arg_LDAEXH; +static bool trans_LDAEXH(DisasContext *ctx, arg_LDAEXH *a); +typedef arg_ldrex arg_LDA; +static bool trans_LDA(DisasContext *ctx, arg_LDA *a); +typedef arg_ldrex arg_LDAB; +static bool trans_LDAB(DisasContext *ctx, arg_LDAB *a); +typedef arg_ldrex arg_LDAH; +static bool trans_LDAH(DisasContext *ctx, arg_LDAH *a); +typedef arg_rrrr arg_USADA8; +static bool trans_USADA8(DisasContext *ctx, arg_USADA8 *a); +typedef arg_bfx arg_SBFX; +static bool trans_SBFX(DisasContext *ctx, arg_SBFX *a); +typedef arg_bfx arg_UBFX; +static bool trans_UBFX(DisasContext *ctx, arg_UBFX *a); +typedef arg_bfi arg_BFCI; +static bool trans_BFCI(DisasContext *ctx, arg_BFCI *a); +typedef arg_empty arg_UDF; +static bool trans_UDF(DisasContext *ctx, arg_UDF *a); +typedef arg_rrr arg_SADD16; +static bool trans_SADD16(DisasContext *ctx, arg_SADD16 *a); +typedef arg_rrr arg_SASX; +static bool trans_SASX(DisasContext *ctx, arg_SASX *a); +typedef arg_rrr arg_SSAX; +static bool trans_SSAX(DisasContext *ctx, arg_SSAX *a); +typedef arg_rrr arg_SSUB16; +static bool trans_SSUB16(DisasContext *ctx, arg_SSUB16 *a); +typedef arg_rrr arg_SADD8; +static bool trans_SADD8(DisasContext *ctx, arg_SADD8 *a); +typedef arg_rrr arg_SSUB8; +static bool trans_SSUB8(DisasContext *ctx, arg_SSUB8 *a); +typedef arg_rrr arg_QADD16; +static bool trans_QADD16(DisasContext *ctx, arg_QADD16 *a); +typedef arg_rrr arg_QASX; +static bool trans_QASX(DisasContext *ctx, arg_QASX *a); +typedef arg_rrr arg_QSAX; +static bool trans_QSAX(DisasContext *ctx, arg_QSAX *a); +typedef arg_rrr arg_QSUB16; +static bool trans_QSUB16(DisasContext *ctx, arg_QSUB16 *a); +typedef arg_rrr arg_QADD8; +static bool trans_QADD8(DisasContext *ctx, arg_QADD8 *a); +typedef arg_rrr arg_QSUB8; +static bool trans_QSUB8(DisasContext *ctx, arg_QSUB8 *a); +typedef arg_rrr arg_SHADD16; +static bool trans_SHADD16(DisasContext *ctx, arg_SHADD16 *a); +typedef arg_rrr arg_SHASX; +static bool trans_SHASX(DisasContext *ctx, arg_SHASX *a); +typedef arg_rrr arg_SHSAX; +static bool trans_SHSAX(DisasContext *ctx, arg_SHSAX *a); +typedef arg_rrr arg_SHSUB16; +static bool trans_SHSUB16(DisasContext *ctx, arg_SHSUB16 *a); +typedef arg_rrr arg_SHADD8; +static bool trans_SHADD8(DisasContext *ctx, arg_SHADD8 *a); +typedef arg_rrr arg_SHSUB8; +static bool trans_SHSUB8(DisasContext *ctx, arg_SHSUB8 *a); +typedef arg_rrr arg_UADD16; +static bool trans_UADD16(DisasContext *ctx, arg_UADD16 *a); +typedef arg_rrr arg_UASX; +static bool trans_UASX(DisasContext *ctx, arg_UASX *a); +typedef arg_rrr arg_USAX; +static bool trans_USAX(DisasContext *ctx, arg_USAX *a); +typedef arg_rrr arg_USUB16; +static bool trans_USUB16(DisasContext *ctx, arg_USUB16 *a); +typedef arg_rrr arg_UADD8; +static bool trans_UADD8(DisasContext *ctx, arg_UADD8 *a); +typedef arg_rrr arg_USUB8; +static bool trans_USUB8(DisasContext *ctx, arg_USUB8 *a); +typedef arg_rrr arg_UQADD16; +static bool trans_UQADD16(DisasContext *ctx, arg_UQADD16 *a); +typedef arg_rrr arg_UQASX; +static bool trans_UQASX(DisasContext *ctx, arg_UQASX *a); +typedef arg_rrr arg_UQSAX; +static bool trans_UQSAX(DisasContext *ctx, arg_UQSAX *a); +typedef arg_rrr arg_UQSUB16; +static bool trans_UQSUB16(DisasContext *ctx, arg_UQSUB16 *a); +typedef arg_rrr arg_UQADD8; +static bool trans_UQADD8(DisasContext *ctx, arg_UQADD8 *a); +typedef arg_rrr arg_UQSUB8; +static bool trans_UQSUB8(DisasContext *ctx, arg_UQSUB8 *a); +typedef arg_rrr arg_UHADD16; +static bool trans_UHADD16(DisasContext *ctx, arg_UHADD16 *a); +typedef arg_rrr arg_UHASX; +static bool trans_UHASX(DisasContext *ctx, arg_UHASX *a); +typedef arg_rrr arg_UHSAX; +static bool trans_UHSAX(DisasContext *ctx, arg_UHSAX *a); +typedef arg_rrr arg_UHSUB16; +static bool trans_UHSUB16(DisasContext *ctx, arg_UHSUB16 *a); +typedef arg_rrr arg_UHADD8; +static bool trans_UHADD8(DisasContext *ctx, arg_UHADD8 *a); +typedef arg_rrr arg_UHSUB8; +static bool trans_UHSUB8(DisasContext *ctx, arg_UHSUB8 *a); +typedef arg_pkh arg_PKH; +static bool trans_PKH(DisasContext *ctx, arg_PKH *a); +typedef arg_sat arg_SSAT; +static bool trans_SSAT(DisasContext *ctx, arg_SSAT *a); +typedef arg_sat arg_USAT; +static bool trans_USAT(DisasContext *ctx, arg_USAT *a); +typedef arg_sat arg_SSAT16; +static bool trans_SSAT16(DisasContext *ctx, arg_SSAT16 *a); +typedef arg_sat arg_USAT16; +static bool trans_USAT16(DisasContext *ctx, arg_USAT16 *a); +typedef arg_rrr_rot arg_SXTAB16; +static bool trans_SXTAB16(DisasContext *ctx, arg_SXTAB16 *a); +typedef arg_rrr_rot arg_SXTAB; +static bool trans_SXTAB(DisasContext *ctx, arg_SXTAB *a); +typedef arg_rrr_rot arg_SXTAH; +static bool trans_SXTAH(DisasContext *ctx, arg_SXTAH *a); +typedef arg_rrr_rot arg_UXTAB16; +static bool trans_UXTAB16(DisasContext *ctx, arg_UXTAB16 *a); +typedef arg_rrr_rot arg_UXTAB; +static bool trans_UXTAB(DisasContext *ctx, arg_UXTAB *a); +typedef arg_rrr_rot arg_UXTAH; +static bool trans_UXTAH(DisasContext *ctx, arg_UXTAH *a); +typedef arg_rrr arg_SEL; +static bool trans_SEL(DisasContext *ctx, arg_SEL *a); +typedef arg_rr arg_REV; +static bool trans_REV(DisasContext *ctx, arg_REV *a); +typedef arg_rr arg_REV16; +static bool trans_REV16(DisasContext *ctx, arg_REV16 *a); +typedef arg_rr arg_REVSH; +static bool trans_REVSH(DisasContext *ctx, arg_REVSH *a); +typedef arg_rr arg_RBIT; +static bool trans_RBIT(DisasContext *ctx, arg_RBIT *a); +typedef arg_rrrr arg_SMLAD; +static bool trans_SMLAD(DisasContext *ctx, arg_SMLAD *a); +typedef arg_rrrr arg_SMLADX; +static bool trans_SMLADX(DisasContext *ctx, arg_SMLADX *a); +typedef arg_rrrr arg_SMLSD; +static bool trans_SMLSD(DisasContext *ctx, arg_SMLSD *a); +typedef arg_rrrr arg_SMLSDX; +static bool trans_SMLSDX(DisasContext *ctx, arg_SMLSDX *a); +typedef arg_rrr arg_SDIV; +static bool trans_SDIV(DisasContext *ctx, arg_SDIV *a); +typedef arg_rrr arg_UDIV; +static bool trans_UDIV(DisasContext *ctx, arg_UDIV *a); +typedef arg_rrrr arg_SMLALD; +static bool trans_SMLALD(DisasContext *ctx, arg_SMLALD *a); +typedef arg_rrrr arg_SMLALDX; +static bool trans_SMLALDX(DisasContext *ctx, arg_SMLALDX *a); +typedef arg_rrrr arg_SMLSLD; +static bool trans_SMLSLD(DisasContext *ctx, arg_SMLSLD *a); +typedef arg_rrrr arg_SMLSLDX; +static bool trans_SMLSLDX(DisasContext *ctx, arg_SMLSLDX *a); +typedef arg_rrrr arg_SMMLA; +static bool trans_SMMLA(DisasContext *ctx, arg_SMMLA *a); +typedef arg_rrrr arg_SMMLAR; +static bool trans_SMMLAR(DisasContext *ctx, arg_SMMLAR *a); +typedef arg_rrrr arg_SMMLS; +static bool trans_SMMLS(DisasContext *ctx, arg_SMMLS *a); +typedef arg_rrrr arg_SMMLSR; +static bool trans_SMMLSR(DisasContext *ctx, arg_SMMLSR *a); +typedef arg_ldst_block arg_STM; +static bool trans_STM(DisasContext *ctx, arg_STM *a); +typedef arg_ldst_block arg_LDM_a32; +static bool trans_LDM_a32(DisasContext *ctx, arg_LDM_a32 *a); +typedef arg_i arg_B; +static bool trans_B(DisasContext *ctx, arg_B *a); +typedef arg_i arg_BL; +static bool trans_BL(DisasContext *ctx, arg_BL *a); +typedef arg_i arg_SVC; +static bool trans_SVC(DisasContext *ctx, arg_SVC *a); + +static void disas_a32_extract_S_xri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->imm = extract32(insn, 0, 8); + a->rot = times_2(ctx, extract32(insn, 8, 4)); + a->rd = 0; + a->s = 1; +} + +static void disas_a32_extract_S_xrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->shim = extract32(insn, 7, 5); + a->shty = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); + a->s = 1; + a->rd = 0; +} + +static void disas_a32_extract_S_xrr_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rs = extract32(insn, 8, 4); + a->shty = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); + a->rd = 0; + a->s = 1; +} + +static void disas_a32_extract_bfx(DisasContext *ctx, arg_bfx *a, uint32_t insn) +{ + a->widthm1 = extract32(insn, 16, 5); + a->rd = extract32(insn, 12, 4); + a->lsb = extract32(insn, 7, 5); + a->rn = extract32(insn, 0, 4); +} + +static void disas_a32_extract_branch(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = times_4(ctx, sextract32(insn, 0, 24)); +} + +static void disas_a32_extract_disas_a32_Fmt_16(DisasContext *ctx, arg_empty *a, uint32_t insn) +{ +} + +static void disas_a32_extract_disas_a32_Fmt_20(DisasContext *ctx, arg_mrs_bank *a, uint32_t insn) +{ + a->r = extract32(insn, 22, 1); + a->rd = extract32(insn, 12, 4); + a->sysm = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 8, 1)); +} + +static void disas_a32_extract_disas_a32_Fmt_21(DisasContext *ctx, arg_msr_bank *a, uint32_t insn) +{ + a->r = extract32(insn, 22, 1); + a->rn = extract32(insn, 0, 4); + a->sysm = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 8, 1)); +} + +static void disas_a32_extract_disas_a32_Fmt_22(DisasContext *ctx, arg_mrs_reg *a, uint32_t insn) +{ + a->r = extract32(insn, 22, 1); + a->rd = extract32(insn, 12, 4); +} + +static void disas_a32_extract_disas_a32_Fmt_23(DisasContext *ctx, arg_msr_reg *a, uint32_t insn) +{ + a->r = extract32(insn, 22, 1); + a->mask = extract32(insn, 16, 4); + a->rn = extract32(insn, 0, 4); +} + +static void disas_a32_extract_disas_a32_Fmt_24(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_disas_a32_Fmt_42(DisasContext *ctx, arg_bfi *a, uint32_t insn) +{ + a->msb = extract32(insn, 16, 5); + a->rd = extract32(insn, 12, 4); + a->lsb = extract32(insn, 7, 5); + a->rn = extract32(insn, 0, 4); +} + +static void disas_a32_extract_disas_a32_Fmt_43(DisasContext *ctx, arg_pkh *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->imm = extract32(insn, 7, 5); + a->tb = extract32(insn, 6, 1); + a->rm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_disas_a32_Fmt_48(DisasContext *ctx, arg_ldst_block *a, uint32_t insn) +{ + a->b = extract32(insn, 24, 1); + a->i = extract32(insn, 23, 1); + a->u = extract32(insn, 22, 1); + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); + a->list = extract32(insn, 0, 16); +} + +static void disas_a32_extract_disas_a32_Fmt_50(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = extract32(insn, 0, 24); +} + +static void disas_a32_extract_i16(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 12)); +} + +static void disas_a32_extract_ldrex(DisasContext *ctx, arg_ldrex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = 0; + a->rt2 = 15; +} + +static void disas_a32_extract_ldst_ri12_p0w1(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 12); + a->p = 0; + a->w = 0; +} + +static void disas_a32_extract_ldst_ri12_p1w(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 12); + a->p = 1; +} + +static void disas_a32_extract_ldst_ri12_pw0(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 12); + a->p = 0; + a->w = 0; +} + +static void disas_a32_extract_ldst_ri8_p0w1(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 4)); + a->p = 0; + a->w = 0; +} + +static void disas_a32_extract_ldst_ri8_p1w(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 4)); + a->p = 1; +} + +static void disas_a32_extract_ldst_ri8_pw0(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 8, 4)); + a->p = 0; + a->w = 0; +} + +static void disas_a32_extract_ldst_rr_p0w1(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rm = extract32(insn, 0, 4); + a->p = 0; + a->w = 0; + a->shimm = 0; + a->shtype = 0; +} + +static void disas_a32_extract_ldst_rr_p1w(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rm = extract32(insn, 0, 4); + a->p = 1; + a->shimm = 0; + a->shtype = 0; +} + +static void disas_a32_extract_ldst_rr_pw0(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rm = extract32(insn, 0, 4); + a->p = 0; + a->w = 0; + a->shimm = 0; + a->shtype = 0; +} + +static void disas_a32_extract_ldst_rs_p0w1(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->shimm = extract32(insn, 7, 5); + a->shtype = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); + a->p = 0; + a->w = 0; +} + +static void disas_a32_extract_ldst_rs_p1w(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->shimm = extract32(insn, 7, 5); + a->shtype = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); + a->p = 1; +} + +static void disas_a32_extract_ldst_rs_pw0(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->shimm = extract32(insn, 7, 5); + a->shtype = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); + a->p = 0; + a->w = 0; +} + +static void disas_a32_extract_mov16(DisasContext *ctx, arg_ri *a, uint32_t insn) +{ + a->rd = extract32(insn, 12, 4); + a->imm = deposit32(extract32(insn, 0, 12), 12, 20, extract32(insn, 16, 4)); +} + +static void disas_a32_extract_msr_i(DisasContext *ctx, arg_msr_i *a, uint32_t insn) +{ + a->mask = extract32(insn, 16, 4); + a->rot = extract32(insn, 8, 4); + a->imm = extract32(insn, 0, 8); +} + +static void disas_a32_extract_rd0mn(DisasContext *ctx, arg_rrrr *a, uint32_t insn) +{ + a->rd = extract32(insn, 16, 4); + a->rm = extract32(insn, 8, 4); + a->rn = extract32(insn, 0, 4); + a->ra = 0; +} + +static void disas_a32_extract_rdamn(DisasContext *ctx, arg_rrrr *a, uint32_t insn) +{ + a->rd = extract32(insn, 16, 4); + a->ra = extract32(insn, 12, 4); + a->rm = extract32(insn, 8, 4); + a->rn = extract32(insn, 0, 4); +} + +static void disas_a32_extract_rdm(DisasContext *ctx, arg_rr *a, uint32_t insn) +{ + a->rd = extract32(insn, 12, 4); + a->rm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_rdmn(DisasContext *ctx, arg_rrr *a, uint32_t insn) +{ + a->rd = extract32(insn, 16, 4); + a->rm = extract32(insn, 8, 4); + a->rn = extract32(insn, 0, 4); +} + +static void disas_a32_extract_rm(DisasContext *ctx, arg_r *a, uint32_t insn) +{ + a->rm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_rndm(DisasContext *ctx, arg_rrr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->rm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_rrr_rot(DisasContext *ctx, arg_rrr_rot *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->rot = extract32(insn, 10, 2); + a->rm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_s_rd0mn(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rd = extract32(insn, 16, 4); + a->rm = extract32(insn, 8, 4); + a->rn = extract32(insn, 0, 4); + a->ra = 0; +} + +static void disas_a32_extract_s_rdamn(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rd = extract32(insn, 16, 4); + a->ra = extract32(insn, 12, 4); + a->rm = extract32(insn, 8, 4); + a->rn = extract32(insn, 0, 4); +} + +static void disas_a32_extract_s_rri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 8); + a->rot = times_2(ctx, extract32(insn, 8, 4)); +} + +static void disas_a32_extract_s_rrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->shim = extract32(insn, 7, 5); + a->shty = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_s_rrr_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->rs = extract32(insn, 8, 4); + a->shty = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); +} + +static void disas_a32_extract_s_rxi_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rd = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 8); + a->rot = times_2(ctx, extract32(insn, 8, 4)); + a->rn = 0; +} + +static void disas_a32_extract_s_rxr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rd = extract32(insn, 12, 4); + a->shim = extract32(insn, 7, 5); + a->shty = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); + a->rn = 0; +} + +static void disas_a32_extract_s_rxr_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rd = extract32(insn, 12, 4); + a->rs = extract32(insn, 8, 4); + a->shty = extract32(insn, 5, 2); + a->rm = extract32(insn, 0, 4); + a->rn = 0; +} + +static void disas_a32_extract_sat(DisasContext *ctx, arg_sat *a, uint32_t insn) +{ + a->satimm = extract32(insn, 16, 5); + a->rd = extract32(insn, 12, 4); + a->imm = extract32(insn, 7, 5); + a->sh = extract32(insn, 6, 1); + a->rn = extract32(insn, 0, 4); +} + +static void disas_a32_extract_sat16(DisasContext *ctx, arg_sat *a, uint32_t insn) +{ + a->satimm = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->rn = extract32(insn, 0, 4); + a->imm = 0; + a->sh = 0; +} + +static void disas_a32_extract_stl(DisasContext *ctx, arg_ldrex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 0, 4); + a->imm = 0; + a->rt2 = 15; +} + +static void disas_a32_extract_strex(DisasContext *ctx, arg_strex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 12, 4); + a->rt = extract32(insn, 0, 4); + a->imm = 0; + a->rt2 = 15; +} + +static void disas_a32_extract_swp(DisasContext *ctx, arg_disas_a3226 *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rt2 = extract32(insn, 0, 4); +} + +static bool disas_a32(DisasContext *ctx, uint32_t insn) +{ + union { + arg_bfi f_bfi; + arg_bfx f_bfx; + arg_disas_a3226 f_disas_a3226; + arg_empty f_empty; + arg_i f_i; + arg_ldrex f_ldrex; + arg_ldst_block f_ldst_block; + arg_ldst_ri f_ldst_ri; + arg_ldst_rr f_ldst_rr; + arg_mrs_bank f_mrs_bank; + arg_mrs_reg f_mrs_reg; + arg_msr_bank f_msr_bank; + arg_msr_i f_msr_i; + arg_msr_reg f_msr_reg; + arg_pkh f_pkh; + arg_r f_r; + arg_ri f_ri; + arg_rr f_rr; + arg_rrr f_rrr; + arg_rrr_rot f_rrr_rot; + arg_rrrr f_rrrr; + arg_s_rri_rot f_s_rri_rot; + arg_s_rrr_shi f_s_rrr_shi; + arg_s_rrr_shr f_s_rrr_shr; + arg_s_rrrr f_s_rrrr; + arg_sat f_sat; + arg_strex f_strex; + } u; + + switch ((insn >> 25) & 0x7) { + case 0x0: + /* ....000. ........ ........ ........ */ + switch (insn & 0x01000010) { + case 0x00000000: + /* ....0000 ........ ........ ...0.... */ + disas_a32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + switch ((insn >> 21) & 0x7) { + case 0x0: + /* ....0000 000..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:60 */ + if (trans_AND_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* ....0000 001..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:61 */ + if (trans_EOR_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x2: + /* ....0000 010..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:62 */ + if (trans_SUB_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x3: + /* ....0000 011..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:63 */ + if (trans_RSB_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x4: + /* ....0000 100..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:64 */ + if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x5: + /* ....0000 101..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:65 */ + if (trans_ADC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x6: + /* ....0000 110..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:66 */ + if (trans_SBC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x7: + /* ....0000 111..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:67 */ + if (trans_RSC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + case 0x00000010: + /* ....0000 ........ ........ ...1.... */ + switch (insn & 0x00600080) { + case 0x00000000: + /* ....0000 .00..... ........ 0..1.... */ + disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 000..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:92 */ + if (trans_AND_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x1: + /* ....0000 100..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:96 */ + if (trans_ADD_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + case 0x00000080: + /* ....0000 .00..... ........ 1..1.... */ + switch ((insn >> 5) & 0x3) { + case 0x0: + /* ....0000 .00..... ........ 1001.... */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 000..... ........ 1001.... */ + disas_a32_extract_s_rd0mn(ctx, &u.f_s_rrrr, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0000 000..... 0000.... 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:144 */ + if (trans_MUL(ctx, &u.f_s_rrrr)) return true; + return false; + } + return false; + case 0x1: + /* ....0000 100..... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:148 */ + disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); + if (trans_UMULL(ctx, &u.f_s_rrrr)) return true; + return false; + } + return false; + case 0x1: + /* ....0000 .00..... ........ 1011.... */ + disas_a32_extract_ldst_rr_pw0(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x00100f00) { + case 0x00000000: + /* ....0000 .000.... ....0000 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:249 */ + if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00100000: + /* ....0000 .001.... ....0000 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:258 */ + if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x2: + /* ....0000 .00..... ........ 1101.... */ + disas_a32_extract_ldst_rr_pw0(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x00100f00) { + case 0x00000000: + /* ....0000 .000.... ....0000 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:252 */ + if (trans_LDRD_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00100000: + /* ....0000 .001.... ....0000 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:261 */ + if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x3: + /* ....0000 .00..... ........ 1111.... */ + disas_a32_extract_ldst_rr_pw0(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x00100f00) { + case 0x00000000: + /* ....0000 .000.... ....0000 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:255 */ + if (trans_STRD_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00100000: + /* ....0000 .001.... ....0000 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:264 */ + if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + } + return false; + case 0x00200000: + /* ....0000 .01..... ........ 0..1.... */ + disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 001..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:93 */ + if (trans_EOR_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x1: + /* ....0000 101..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:97 */ + if (trans_ADC_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + case 0x00200080: + /* ....0000 .01..... ........ 1..1.... */ + switch ((insn >> 5) & 0x3) { + case 0x0: + /* ....0000 .01..... ........ 1001.... */ + disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 001..... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:145 */ + if (trans_MLA(ctx, &u.f_s_rrrr)) return true; + return false; + case 0x1: + /* ....0000 101..... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:149 */ + if (trans_UMLAL(ctx, &u.f_s_rrrr)) return true; + return false; + } + return false; + case 0x1: + /* ....0000 .01..... ........ 1011.... */ + disas_a32_extract_ldst_rr_p0w1(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x00100f00) { + case 0x00000000: + /* ....0000 .010.... ....0000 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:272 */ + if (trans_STRHT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00100000: + /* ....0000 .011.... ....0000 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:273 */ + if (trans_LDRHT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x2: + /* ....0000 .01..... ........ 1101.... */ + disas_a32_extract_ldst_rr_p0w1(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x00100f00) { + case 0x00100000: + /* ....0000 .011.... ....0000 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:274 */ + if (trans_LDRSBT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x3: + /* ....0000 .01..... ........ 1111.... */ + disas_a32_extract_ldst_rr_p0w1(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x00100f00) { + case 0x00100000: + /* ....0000 .011.... ....0000 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:275 */ + if (trans_LDRSHT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + } + return false; + case 0x00400000: + /* ....0000 .10..... ........ 0..1.... */ + disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 010..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:94 */ + if (trans_SUB_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x1: + /* ....0000 110..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:98 */ + if (trans_SBC_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + case 0x00400080: + /* ....0000 .10..... ........ 1..1.... */ + switch ((insn >> 5) & 0x3) { + case 0x0: + /* ....0000 .10..... ........ 1001.... */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 010..... ........ 1001.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0000 0100.... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:146 */ + if (trans_UMAAL(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x1: + /* ....0000 110..... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:150 */ + disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); + if (trans_SMULL(ctx, &u.f_s_rrrr)) return true; + return false; + } + return false; + case 0x1: + /* ....0000 .10..... ........ 1011.... */ + disas_a32_extract_ldst_ri8_pw0(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0000 .100.... ........ 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:310 */ + if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0000 .101.... ........ 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:319 */ + if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x2: + /* ....0000 .10..... ........ 1101.... */ + disas_a32_extract_ldst_ri8_pw0(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0000 .100.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:313 */ + if (trans_LDRD_ri_a32(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0000 .101.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:322 */ + if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x3: + /* ....0000 .10..... ........ 1111.... */ + disas_a32_extract_ldst_ri8_pw0(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0000 .100.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:316 */ + if (trans_STRD_ri_a32(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0000 .101.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:325 */ + if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + } + return false; + case 0x00600000: + /* ....0000 .11..... ........ 0..1.... */ + disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 011..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:95 */ + if (trans_RSB_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x1: + /* ....0000 111..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:99 */ + if (trans_RSC_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + case 0x00600080: + /* ....0000 .11..... ........ 1..1.... */ + switch ((insn >> 5) & 0x3) { + case 0x0: + /* ....0000 .11..... ........ 1001.... */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....0000 011..... ........ 1001.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0000 0110.... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:147 */ + if (trans_MLS(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x1: + /* ....0000 111..... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:151 */ + disas_a32_extract_s_rdamn(ctx, &u.f_s_rrrr, insn); + if (trans_SMLAL(ctx, &u.f_s_rrrr)) return true; + return false; + } + return false; + case 0x1: + /* ....0000 .11..... ........ 1011.... */ + disas_a32_extract_ldst_ri8_p0w1(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0000 .110.... ........ 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:333 */ + if (trans_STRHT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0000 .111.... ........ 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:334 */ + if (trans_LDRHT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x2: + /* ....0000 .11..... ........ 1101.... */ + disas_a32_extract_ldst_ri8_p0w1(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 20) & 0x1) { + case 0x1: + /* ....0000 .111.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:335 */ + if (trans_LDRSBT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x3: + /* ....0000 .11..... ........ 1111.... */ + disas_a32_extract_ldst_ri8_p0w1(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 20) & 0x1) { + case 0x1: + /* ....0000 .111.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:336 */ + if (trans_LDRSHT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x01000000: + /* ....0001 ........ ........ ...0.... */ + switch (insn & 0x00a00000) { + case 0x00000000: + /* ....0001 0.0..... ........ ...0.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0001 0.00.... ........ ...0.... */ + switch ((insn >> 5) & 0x7) { + case 0x0: + /* ....0001 0.00.... ........ 0000.... */ + switch (insn & 0x00000e0f) { + case 0x00000000: + /* ....0001 0.00.... ....000. 00000000 */ + disas_a32_extract_disas_a32_Fmt_22(ctx, &u.f_mrs_reg, insn); + switch (insn & 0x000f0100) { + case 0x000f0000: + /* ....0001 0.001111 ....0000 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:226 */ + if (trans_MRS_reg(ctx, &u.f_mrs_reg)) return true; + return false; + } + return false; + case 0x00000200: + /* ....0001 0.00.... ....001. 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:223 */ + disas_a32_extract_disas_a32_Fmt_20(ctx, &u.f_mrs_bank, insn); + if (trans_MRS_bank(ctx, &u.f_mrs_bank)) return true; + return false; + } + return false; + case 0x2: + /* ....0001 0.00.... ........ 0100.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00400f00) { + case 0x00000000: + /* ....0001 0000.... ....0000 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:207 */ + if (trans_CRC32B(ctx, &u.f_rrr)) return true; + return false; + case 0x00000200: + /* ....0001 0000.... ....0010 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:210 */ + if (trans_CRC32CB(ctx, &u.f_rrr)) return true; + return false; + case 0x00400000: + /* ....0001 0100.... ....0000 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:209 */ + if (trans_CRC32W(ctx, &u.f_rrr)) return true; + return false; + case 0x00400200: + /* ....0001 0100.... ....0010 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:212 */ + if (trans_CRC32CW(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x4: + /* ....0001 0.00.... ........ 1000.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....0001 0000.... ........ 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:164 */ + if (trans_SMLABB(ctx, &u.f_rrrr)) return true; + return false; + case 0x1: + /* ....0001 0100.... ........ 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:172 */ + if (trans_SMLALBB(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x5: + /* ....0001 0.00.... ........ 1010.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....0001 0000.... ........ 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:166 */ + if (trans_SMLATB(ctx, &u.f_rrrr)) return true; + return false; + case 0x1: + /* ....0001 0100.... ........ 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:174 */ + if (trans_SMLALTB(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x6: + /* ....0001 0.00.... ........ 1100.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....0001 0000.... ........ 1100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:165 */ + if (trans_SMLABT(ctx, &u.f_rrrr)) return true; + return false; + case 0x1: + /* ....0001 0100.... ........ 1100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:173 */ + if (trans_SMLALBT(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x7: + /* ....0001 0.00.... ........ 1110.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....0001 0000.... ........ 1110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:167 */ + if (trans_SMLATT(ctx, &u.f_rrrr)) return true; + return false; + case 0x1: + /* ....0001 0100.... ........ 1110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:175 */ + if (trans_SMLALTT(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* ....0001 0.01.... ........ ...0.... */ + disas_a32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); + switch (insn & 0x0040f000) { + case 0x00000000: + /* ....0001 0001.... 0000.... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:68 */ + if (trans_TST_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00400000: + /* ....0001 0101.... 0000.... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:70 */ + if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + } + return false; + case 0x00200000: + /* ....0001 0.1..... ........ ...0.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0001 0.10.... ........ ...0.... */ + switch ((insn >> 5) & 0x7) { + case 0x0: + /* ....0001 0.10.... ........ 0000.... */ + switch ((insn >> 9) & 0x7f) { + case 0x78: + /* ....0001 0.10.... 1111000. 0000.... */ + disas_a32_extract_disas_a32_Fmt_23(ctx, &u.f_msr_reg, insn); + switch ((insn >> 8) & 0x1) { + case 0x0: + /* ....0001 0.10.... 11110000 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:227 */ + if (trans_MSR_reg(ctx, &u.f_msr_reg)) return true; + return false; + } + return false; + case 0x79: + /* ....0001 0.10.... 1111001. 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:224 */ + disas_a32_extract_disas_a32_Fmt_21(ctx, &u.f_msr_bank, insn); + if (trans_MSR_bank(ctx, &u.f_msr_bank)) return true; + return false; + } + return false; + case 0x1: + /* ....0001 0.10.... ........ 0010.... */ + disas_a32_extract_rm(ctx, &u.f_r, insn); + switch (insn & 0x004fff00) { + case 0x000fff00: + /* ....0001 00101111 11111111 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:230 */ + if (trans_BXJ(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x2: + /* ....0001 0.10.... ........ 0100.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00400f00) { + case 0x00000000: + /* ....0001 0010.... ....0000 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:208 */ + if (trans_CRC32H(ctx, &u.f_rrr)) return true; + return false; + case 0x00000200: + /* ....0001 0010.... ....0010 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:211 */ + if (trans_CRC32CH(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x3: + /* ....0001 0.10.... ........ 0110.... */ + disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); + switch (insn & 0x004fff0f) { + case 0x0040000e: + /* ....0001 01100000 00000000 01101110 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:235 */ + if (trans_ERET(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x4: + /* ....0001 0.10.... ........ 1000.... */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....0001 0010.... ........ 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:168 */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + if (trans_SMLAWB(ctx, &u.f_rrrr)) return true; + return false; + case 0x1: + /* ....0001 0110.... ........ 1000.... */ + disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0001 0110.... 0000.... 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:176 */ + if (trans_SMULBB(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + } + return false; + case 0x5: + /* ....0001 0.10.... ........ 1010.... */ + disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); + switch (insn & 0x0040f000) { + case 0x00000000: + /* ....0001 0010.... 0000.... 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:169 */ + if (trans_SMULWB(ctx, &u.f_rrrr)) return true; + return false; + case 0x00400000: + /* ....0001 0110.... 0000.... 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:178 */ + if (trans_SMULTB(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x6: + /* ....0001 0.10.... ........ 1100.... */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....0001 0010.... ........ 1100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:170 */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + if (trans_SMLAWT(ctx, &u.f_rrrr)) return true; + return false; + case 0x1: + /* ....0001 0110.... ........ 1100.... */ + disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0001 0110.... 0000.... 1100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:177 */ + if (trans_SMULBT(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + } + return false; + case 0x7: + /* ....0001 0.10.... ........ 1110.... */ + disas_a32_extract_rd0mn(ctx, &u.f_rrrr, insn); + switch (insn & 0x0040f000) { + case 0x00000000: + /* ....0001 0010.... 0000.... 1110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:171 */ + if (trans_SMULWT(ctx, &u.f_rrrr)) return true; + return false; + case 0x00400000: + /* ....0001 0110.... 0000.... 1110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:179 */ + if (trans_SMULTT(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* ....0001 0.11.... ........ ...0.... */ + disas_a32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); + switch (insn & 0x0040f000) { + case 0x00000000: + /* ....0001 0011.... 0000.... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:69 */ + if (trans_TEQ_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00400000: + /* ....0001 0111.... 0000.... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:71 */ + if (trans_CMN_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + } + return false; + case 0x00800000: + /* ....0001 1.0..... ........ ...0.... */ + disas_a32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....0001 100..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:72 */ + if (trans_ORR_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* ....0001 110..... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:74 */ + if (trans_BIC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + case 0x00a00000: + /* ....0001 1.1..... ........ ...0.... */ + disas_a32_extract_s_rxr_shi(ctx, &u.f_s_rrr_shi, insn); + switch (insn & 0x004f0000) { + case 0x00000000: + /* ....0001 101.0000 ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:73 */ + if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00400000: + /* ....0001 111.0000 ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:75 */ + if (trans_MVN_rxri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + } + return false; + case 0x01000010: + /* ....0001 ........ ........ ...1.... */ + switch (insn & 0x00400080) { + case 0x00000000: + /* ....0001 .0...... ........ 0..1.... */ + switch (insn & 0x00a00000) { + case 0x00000000: + /* ....0001 000..... ........ 0..1.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0001 0000.... ........ 0..1.... */ + switch ((insn >> 5) & 0x3) { + case 0x2: + /* ....0001 0000.... ........ 0101.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch ((insn >> 8) & 0xf) { + case 0x0: + /* ....0001 0000.... ....0000 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:157 */ + if (trans_QADD(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x3: + /* ....0001 0000.... ........ 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:237 */ + disas_a32_extract_i16(ctx, &u.f_i, insn); + if (trans_HLT(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x1: + /* ....0001 0001.... ........ 0..1.... */ + disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0001 0001.... 0000.... 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:100 */ + if (trans_TST_xrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + } + return false; + case 0x00200000: + /* ....0001 001..... ........ 0..1.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0001 0010.... ........ 0..1.... */ + switch ((insn >> 5) & 0x3) { + case 0x0: + /* ....0001 0010.... ........ 0001.... */ + disas_a32_extract_rm(ctx, &u.f_r, insn); + switch ((insn >> 8) & 0xfff) { + case 0xfff: + /* ....0001 00101111 11111111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:229 */ + if (trans_BX(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x1: + /* ....0001 0010.... ........ 0011.... */ + disas_a32_extract_rm(ctx, &u.f_r, insn); + switch ((insn >> 8) & 0xfff) { + case 0xfff: + /* ....0001 00101111 11111111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:231 */ + if (trans_BLX_r(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x2: + /* ....0001 0010.... ........ 0101.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch ((insn >> 8) & 0xf) { + case 0x0: + /* ....0001 0010.... ....0000 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:158 */ + if (trans_QSUB(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x3: + /* ....0001 0010.... ........ 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:238 */ + disas_a32_extract_i16(ctx, &u.f_i, insn); + if (trans_BKPT(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x1: + /* ....0001 0011.... ........ 0..1.... */ + disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0001 0011.... 0000.... 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:101 */ + if (trans_TEQ_xrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + } + return false; + case 0x00800000: + /* ....0001 100..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:104 */ + disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); + if (trans_ORR_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x00a00000: + /* ....0001 101..... ........ 0..1.... */ + disas_a32_extract_s_rxr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 16) & 0xf) { + case 0x0: + /* ....0001 101.0000 ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:105 */ + if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + } + return false; + case 0x00000080: + /* ....0001 .0...... ........ 1..1.... */ + switch (insn & 0x00100f60) { + case 0x00000000: + /* ....0001 .0.0.... ....0000 1001.... */ + disas_a32_extract_swp(ctx, &u.f_disas_a3226, insn); + switch (insn & 0x00a00000) { + case 0x00000000: + /* ....0001 0000.... ....0000 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:364 */ + if (trans_SWP(ctx, &u.f_disas_a3226)) return true; + return false; + } + return false; + case 0x00000020: + /* ....0001 .0.0.... ....0000 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:250 */ + disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); + if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00000040: + /* ....0001 .0.0.... ....0000 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:253 */ + disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); + if (trans_LDRD_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00000060: + /* ....0001 .0.0.... ....0000 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:256 */ + disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); + if (trans_STRD_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00000c00: + /* ....0001 .0.0.... ....1100 1001.... */ + disas_a32_extract_stl(ctx, &u.f_ldrex, insn); + switch (insn & 0x00a0f000) { + case 0x0080f000: + /* ....0001 1000.... 11111100 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:388 */ + if (trans_STL(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x00000e00: + /* ....0001 .0.0.... ....1110 1001.... */ + disas_a32_extract_strex(ctx, &u.f_strex, insn); + switch (insn & 0x00a00000) { + case 0x00800000: + /* ....0001 1000.... ....1110 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:383 */ + if (trans_STLEX(ctx, &u.f_strex)) return true; + return false; + case 0x00a00000: + /* ....0001 1010.... ....1110 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:384 */ + if (trans_STLEXD_a32(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0x00000f00: + /* ....0001 .0.0.... ....1111 1001.... */ + disas_a32_extract_strex(ctx, &u.f_strex, insn); + switch (insn & 0x00a00000) { + case 0x00800000: + /* ....0001 1000.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:378 */ + if (trans_STREX(ctx, &u.f_strex)) return true; + return false; + case 0x00a00000: + /* ....0001 1010.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:379 */ + if (trans_STREXD_a32(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0x00100020: + /* ....0001 .0.1.... ....0000 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:259 */ + disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); + if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00100040: + /* ....0001 .0.1.... ....0000 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:262 */ + disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); + if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00100060: + /* ....0001 .0.1.... ....0000 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:265 */ + disas_a32_extract_ldst_rr_p1w(ctx, &u.f_ldst_rr, insn); + if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x00100c00: + /* ....0001 .0.1.... ....1100 1001.... */ + disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); + switch (insn & 0x00a0000f) { + case 0x0080000f: + /* ....0001 1001.... ....1100 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:402 */ + if (trans_LDA(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x00100e00: + /* ....0001 .0.1.... ....1110 1001.... */ + disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); + switch (insn & 0x00a0000f) { + case 0x0080000f: + /* ....0001 1001.... ....1110 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:397 */ + if (trans_LDAEX(ctx, &u.f_ldrex)) return true; + return false; + case 0x00a0000f: + /* ....0001 1011.... ....1110 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:398 */ + if (trans_LDAEXD_a32(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x00100f00: + /* ....0001 .0.1.... ....1111 1001.... */ + disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); + switch (insn & 0x00a0000f) { + case 0x0080000f: + /* ....0001 1001.... ....1111 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:392 */ + if (trans_LDREX(ctx, &u.f_ldrex)) return true; + return false; + case 0x00a0000f: + /* ....0001 1011.... ....1111 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:393 */ + if (trans_LDREXD_a32(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + } + return false; + case 0x00400000: + /* ....0001 .1...... ........ 0..1.... */ + switch (insn & 0x00a00000) { + case 0x00000000: + /* ....0001 010..... ........ 0..1.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0001 0100.... ........ 0..1.... */ + switch ((insn >> 5) & 0x3) { + case 0x2: + /* ....0001 0100.... ........ 0101.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch ((insn >> 8) & 0xf) { + case 0x0: + /* ....0001 0100.... ....0000 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:159 */ + if (trans_QDADD(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x3: + /* ....0001 0100.... ........ 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:239 */ + disas_a32_extract_i16(ctx, &u.f_i, insn); + if (trans_HVC(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x1: + /* ....0001 0101.... ........ 0..1.... */ + disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0001 0101.... 0000.... 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:102 */ + if (trans_CMP_xrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + } + return false; + case 0x00200000: + /* ....0001 011..... ........ 0..1.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0001 0110.... ........ 0..1.... */ + switch (insn & 0x00000f60) { + case 0x00000040: + /* ....0001 0110.... ....0000 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:160 */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_QDSUB(ctx, &u.f_rrr)) return true; + return false; + case 0x00000060: + /* ....0001 0110.... ....0000 0111.... */ + disas_a32_extract_disas_a32_Fmt_24(ctx, &u.f_i, insn); + switch ((insn >> 12) & 0xff) { + case 0x0: + /* ....0001 01100000 00000000 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:240 */ + if (trans_SMC(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x00000f00: + /* ....0001 0110.... ....1111 0001.... */ + disas_a32_extract_rdm(ctx, &u.f_rr, insn); + switch ((insn >> 16) & 0xf) { + case 0xf: + /* ....0001 01101111 ....1111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:233 */ + if (trans_CLZ(ctx, &u.f_rr)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* ....0001 0111.... ........ 0..1.... */ + disas_a32_extract_S_xrr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0001 0111.... 0000.... 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:103 */ + if (trans_CMN_xrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + } + return false; + case 0x00800000: + /* ....0001 110..... ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:106 */ + disas_a32_extract_s_rrr_shr(ctx, &u.f_s_rrr_shr, insn); + if (trans_BIC_rrrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x00a00000: + /* ....0001 111..... ........ 0..1.... */ + disas_a32_extract_s_rxr_shr(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 16) & 0xf) { + case 0x0: + /* ....0001 111.0000 ........ 0..1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:107 */ + if (trans_MVN_rxrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + } + return false; + case 0x00400080: + /* ....0001 .1...... ........ 1..1.... */ + switch (insn & 0x00100060) { + case 0x00000000: + /* ....0001 .1.0.... ........ 1001.... */ + switch (insn & 0x00a00f00) { + case 0x00000000: + /* ....0001 0100.... ....0000 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:365 */ + disas_a32_extract_swp(ctx, &u.f_disas_a3226, insn); + if (trans_SWPB(ctx, &u.f_disas_a3226)) return true; + return false; + case 0x00800c00: + /* ....0001 1100.... ....1100 1001.... */ + disas_a32_extract_stl(ctx, &u.f_ldrex, insn); + switch ((insn >> 12) & 0xf) { + case 0xf: + /* ....0001 1100.... 11111100 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:389 */ + if (trans_STLB(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x00800e00: + /* ....0001 1100.... ....1110 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:385 */ + disas_a32_extract_strex(ctx, &u.f_strex, insn); + if (trans_STLEXB(ctx, &u.f_strex)) return true; + return false; + case 0x00800f00: + /* ....0001 1100.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:380 */ + disas_a32_extract_strex(ctx, &u.f_strex, insn); + if (trans_STREXB(ctx, &u.f_strex)) return true; + return false; + case 0x00a00c00: + /* ....0001 1110.... ....1100 1001.... */ + disas_a32_extract_stl(ctx, &u.f_ldrex, insn); + switch ((insn >> 12) & 0xf) { + case 0xf: + /* ....0001 1110.... 11111100 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:390 */ + if (trans_STLH(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x00a00e00: + /* ....0001 1110.... ....1110 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:386 */ + disas_a32_extract_strex(ctx, &u.f_strex, insn); + if (trans_STLEXH(ctx, &u.f_strex)) return true; + return false; + case 0x00a00f00: + /* ....0001 1110.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:381 */ + disas_a32_extract_strex(ctx, &u.f_strex, insn); + if (trans_STREXH(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0x00000020: + /* ....0001 .1.0.... ........ 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:311 */ + disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x00000040: + /* ....0001 .1.0.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:314 */ + disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_LDRD_ri_a32(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x00000060: + /* ....0001 .1.0.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:317 */ + disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_STRD_ri_a32(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x00100000: + /* ....0001 .1.1.... ........ 1001.... */ + disas_a32_extract_ldrex(ctx, &u.f_ldrex, insn); + switch (insn & 0x00a00f0f) { + case 0x00800c0f: + /* ....0001 1101.... ....1100 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:403 */ + if (trans_LDAB(ctx, &u.f_ldrex)) return true; + return false; + case 0x00800e0f: + /* ....0001 1101.... ....1110 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:399 */ + if (trans_LDAEXB(ctx, &u.f_ldrex)) return true; + return false; + case 0x00800f0f: + /* ....0001 1101.... ....1111 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:394 */ + if (trans_LDREXB(ctx, &u.f_ldrex)) return true; + return false; + case 0x00a00c0f: + /* ....0001 1111.... ....1100 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:404 */ + if (trans_LDAH(ctx, &u.f_ldrex)) return true; + return false; + case 0x00a00e0f: + /* ....0001 1111.... ....1110 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:400 */ + if (trans_LDAEXH(ctx, &u.f_ldrex)) return true; + return false; + case 0x00a00f0f: + /* ....0001 1111.... ....1111 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:395 */ + if (trans_LDREXH(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x00100020: + /* ....0001 .1.1.... ........ 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:320 */ + disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x00100040: + /* ....0001 .1.1.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:323 */ + disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x00100060: + /* ....0001 .1.1.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:326 */ + disas_a32_extract_ldst_ri8_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x1: + /* ....001. ........ ........ ........ */ + switch ((insn >> 21) & 0xf) { + case 0x0: + /* ....0010 000..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:120 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_AND_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x1: + /* ....0010 001..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:121 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_EOR_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x2: + /* ....0010 010..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:122 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x3: + /* ....0010 011..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:123 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_RSB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x4: + /* ....0010 100..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:124 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x5: + /* ....0010 101..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:125 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_ADC_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x6: + /* ....0010 110..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:126 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_SBC_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x7: + /* ....0010 111..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:127 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_RSC_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x8: + /* ....0011 000..... ........ ........ */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0011 0000.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:80 */ + disas_a32_extract_mov16(ctx, &u.f_ri, insn); + if (trans_MOVW(ctx, &u.f_ri)) return true; + return false; + case 0x1: + /* ....0011 0001.... ........ ........ */ + disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0011 0001.... 0000.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:128 */ + if (trans_TST_xri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + } + return false; + case 0x9: + /* ....0011 001..... ........ ........ */ + switch (insn & 0x0010f000) { + case 0x0000f000: + /* ....0011 0010.... 1111.... ........ */ + if ((insn & 0x000f0000) == 0x00000000) { + /* ....0011 00100000 1111.... ........ */ + if ((insn & 0x000000ff) == 0x00000001) { + /* ....0011 00100000 1111.... 00000001 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:188 */ + disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); + if (trans_YIELD(ctx, &u.f_empty)) return true; + } + if ((insn & 0x000000ff) == 0x00000002) { + /* ....0011 00100000 1111.... 00000010 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:189 */ + disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); + if (trans_WFE(ctx, &u.f_empty)) return true; + } + if ((insn & 0x000000ff) == 0x00000003) { + /* ....0011 00100000 1111.... 00000011 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:190 */ + disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); + if (trans_WFI(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:198 */ + disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:201 */ + disas_a32_extract_msr_i(ctx, &u.f_msr_i, insn); + u.f_msr_i.r = 0; + if (trans_MSR_imm(ctx, &u.f_msr_i)) return true; + return false; + case 0x00100000: + /* ....0011 0011.... 0000.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:129 */ + disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_TEQ_xri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0xa: + /* ....0011 010..... ........ ........ */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0011 0100.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:81 */ + disas_a32_extract_mov16(ctx, &u.f_ri, insn); + if (trans_MOVT(ctx, &u.f_ri)) return true; + return false; + case 0x1: + /* ....0011 0101.... ........ ........ */ + disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 12) & 0xf) { + case 0x0: + /* ....0011 0101.... 0000.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:130 */ + if (trans_CMP_xri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + } + return false; + case 0xb: + /* ....0011 011..... ........ ........ */ + switch (insn & 0x0010f000) { + case 0x0000f000: + /* ....0011 0110.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:203 */ + disas_a32_extract_msr_i(ctx, &u.f_msr_i, insn); + u.f_msr_i.r = 1; + if (trans_MSR_imm(ctx, &u.f_msr_i)) return true; + return false; + case 0x00100000: + /* ....0011 0111.... 0000.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:131 */ + disas_a32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_CMN_xri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0xc: + /* ....0011 100..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:132 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_ORR_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0xd: + /* ....0011 101..... ........ ........ */ + disas_a32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 16) & 0xf) { + case 0x0: + /* ....0011 101.0000 ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:133 */ + if (trans_MOV_rxi(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0xe: + /* ....0011 110..... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:134 */ + disas_a32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_BIC_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0xf: + /* ....0011 111..... ........ ........ */ + disas_a32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 16) & 0xf) { + case 0x0: + /* ....0011 111.0000 ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:135 */ + if (trans_MVN_rxi(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + } + return false; + case 0x2: + /* ....010. ........ ........ ........ */ + switch (insn & 0x01500000) { + case 0x00000000: + /* ....0100 .0.0.... ........ ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* ....0100 .000.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:344 */ + disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); + if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0100 .010.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:355 */ + disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); + if (trans_STRT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x00100000: + /* ....0100 .0.1.... ........ ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* ....0100 .001.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:349 */ + disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0100 .011.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:357 */ + disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); + if (trans_LDRT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x00400000: + /* ....0100 .1.0.... ........ ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* ....0100 .100.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:346 */ + disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); + if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0100 .110.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:356 */ + disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); + if (trans_STRBT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x00500000: + /* ....0100 .1.1.... ........ ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* ....0100 .101.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:351 */ + disas_a32_extract_ldst_ri12_pw0(ctx, &u.f_ldst_ri, insn); + if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* ....0100 .111.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:358 */ + disas_a32_extract_ldst_ri12_p0w1(ctx, &u.f_ldst_ri, insn); + if (trans_LDRBT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x01000000: + /* ....0101 .0.0.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:343 */ + disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x01100000: + /* ....0101 .0.1.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:348 */ + disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x01400000: + /* ....0101 .1.0.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:345 */ + disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x01500000: + /* ....0101 .1.1.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:350 */ + disas_a32_extract_ldst_ri12_p1w(ctx, &u.f_ldst_ri, insn); + if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x3: + /* ....011. ........ ........ ........ */ + switch (insn & 0x01400010) { + case 0x00000000: + /* ....0110 .0...... ........ ...0.... */ + switch ((insn >> 20) & 0x3) { + case 0x0: + /* ....0110 .000.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:284 */ + disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); + if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x1: + /* ....0110 .001.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:289 */ + disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); + if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x2: + /* ....0110 .010.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:297 */ + disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); + if (trans_STRT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x3: + /* ....0110 .011.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:299 */ + disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); + if (trans_LDRT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x00000010: + /* ....0110 .0...... ........ ...1.... */ + switch (insn & 0x00a00020) { + case 0x00000000: + /* ....0110 000..... ........ ..01.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00100f00: + /* ....0110 0001.... ....1111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:426 */ + if (trans_SADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0001.... ....1111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:428 */ + if (trans_SSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f80: + /* ....0110 0001.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:430 */ + if (trans_SADD8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00000020: + /* ....0110 000..... ........ ..11.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00100f00: + /* ....0110 0001.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:427 */ + if (trans_SASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0001.... ....1111 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:429 */ + if (trans_SSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100fc0: + /* ....0110 0001.... ....1111 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:431 */ + if (trans_SSUB8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00200000: + /* ....0110 001..... ........ ..01.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00000f00: + /* ....0110 0010.... ....1111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:433 */ + if (trans_QADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00000f40: + /* ....0110 0010.... ....1111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:435 */ + if (trans_QSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00000f80: + /* ....0110 0010.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:437 */ + if (trans_QADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f00: + /* ....0110 0011.... ....1111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:440 */ + if (trans_SHADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0011.... ....1111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:442 */ + if (trans_SHSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f80: + /* ....0110 0011.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:444 */ + if (trans_SHADD8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00200020: + /* ....0110 001..... ........ ..11.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00000f00: + /* ....0110 0010.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:434 */ + if (trans_QASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00000f40: + /* ....0110 0010.... ....1111 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:436 */ + if (trans_QSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00000fc0: + /* ....0110 0010.... ....1111 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:438 */ + if (trans_QSUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f00: + /* ....0110 0011.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:441 */ + if (trans_SHASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0011.... ....1111 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:443 */ + if (trans_SHSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100fc0: + /* ....0110 0011.... ....1111 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:445 */ + if (trans_SHSUB8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00800000: + /* ....0110 100..... ........ ..01.... */ + disas_a32_extract_disas_a32_Fmt_43(ctx, &u.f_pkh, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0110 1000.... ........ ..01.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:470 */ + if (trans_PKH(ctx, &u.f_pkh)) return true; + return false; + } + return false; + case 0x00800020: + /* ....0110 100..... ........ ..11.... */ + switch (insn & 0x001003c0) { + case 0x00000040: + /* ....0110 1000.... ......00 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:484 */ + disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); + if (trans_SXTAB16(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x00000380: + /* ....0110 1000.... ......11 1011.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch ((insn >> 10) & 0x3) { + case 0x3: + /* ....0110 1000.... ....1111 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:491 */ + if (trans_SEL(ctx, &u.f_rrr)) return true; + return false; + } + return false; + } + return false; + case 0x00a00000: + /* ....0110 101..... ........ ..01.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:476 */ + disas_a32_extract_sat(ctx, &u.f_sat, insn); + if (trans_SSAT(ctx, &u.f_sat)) return true; + return false; + case 0x00a00020: + /* ....0110 101..... ........ ..11.... */ + switch (insn & 0x001003c0) { + case 0x00000040: + /* ....0110 1010.... ......00 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:485 */ + disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); + if (trans_SXTAB(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x00000300: + /* ....0110 1010.... ......11 0011.... */ + disas_a32_extract_sat16(ctx, &u.f_sat, insn); + switch ((insn >> 10) & 0x3) { + case 0x3: + /* ....0110 1010.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:479 */ + if (trans_SSAT16(ctx, &u.f_sat)) return true; + return false; + } + return false; + case 0x00100040: + /* ....0110 1011.... ......00 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:486 */ + disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); + if (trans_SXTAH(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x00100300: + /* ....0110 1011.... ......11 0011.... */ + disas_a32_extract_rdm(ctx, &u.f_rr, insn); + switch (insn & 0x000f0c00) { + case 0x000f0c00: + /* ....0110 10111111 ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:492 */ + if (trans_REV(ctx, &u.f_rr)) return true; + return false; + } + return false; + case 0x00100380: + /* ....0110 1011.... ......11 1011.... */ + disas_a32_extract_rdm(ctx, &u.f_rr, insn); + switch (insn & 0x000f0c00) { + case 0x000f0c00: + /* ....0110 10111111 ....1111 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:493 */ + if (trans_REV16(ctx, &u.f_rr)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x00400000: + /* ....0110 .1...... ........ ...0.... */ + switch ((insn >> 20) & 0x3) { + case 0x0: + /* ....0110 .100.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:286 */ + disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); + if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x1: + /* ....0110 .101.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:291 */ + disas_a32_extract_ldst_rs_pw0(ctx, &u.f_ldst_rr, insn); + if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x2: + /* ....0110 .110.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:298 */ + disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); + if (trans_STRBT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x3: + /* ....0110 .111.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:300 */ + disas_a32_extract_ldst_rs_p0w1(ctx, &u.f_ldst_rr, insn); + if (trans_LDRBT_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x00400010: + /* ....0110 .1...... ........ ...1.... */ + switch (insn & 0x00a00020) { + case 0x00000000: + /* ....0110 010..... ........ ..01.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00100f00: + /* ....0110 0101.... ....1111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:447 */ + if (trans_UADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0101.... ....1111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:449 */ + if (trans_USAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f80: + /* ....0110 0101.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:451 */ + if (trans_UADD8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00000020: + /* ....0110 010..... ........ ..11.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00100f00: + /* ....0110 0101.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:448 */ + if (trans_UASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0101.... ....1111 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:450 */ + if (trans_USUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100fc0: + /* ....0110 0101.... ....1111 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:452 */ + if (trans_USUB8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00200000: + /* ....0110 011..... ........ ..01.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00000f00: + /* ....0110 0110.... ....1111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:454 */ + if (trans_UQADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00000f40: + /* ....0110 0110.... ....1111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:456 */ + if (trans_UQSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00000f80: + /* ....0110 0110.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:458 */ + if (trans_UQADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f00: + /* ....0110 0111.... ....1111 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:461 */ + if (trans_UHADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0111.... ....1111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:463 */ + if (trans_UHSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f80: + /* ....0110 0111.... ....1111 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:465 */ + if (trans_UHADD8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00200020: + /* ....0110 011..... ........ ..11.... */ + disas_a32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00100fc0) { + case 0x00000f00: + /* ....0110 0110.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:455 */ + if (trans_UQASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00000f40: + /* ....0110 0110.... ....1111 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:457 */ + if (trans_UQSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00000fc0: + /* ....0110 0110.... ....1111 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:459 */ + if (trans_UQSUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f00: + /* ....0110 0111.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:462 */ + if (trans_UHASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00100f40: + /* ....0110 0111.... ....1111 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:464 */ + if (trans_UHSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100fc0: + /* ....0110 0111.... ....1111 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:466 */ + if (trans_UHSUB8(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00800020: + /* ....0110 110..... ........ ..11.... */ + disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); + switch (insn & 0x001003c0) { + case 0x00000040: + /* ....0110 1100.... ......00 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:487 */ + if (trans_UXTAB16(ctx, &u.f_rrr_rot)) return true; + return false; + } + return false; + case 0x00a00000: + /* ....0110 111..... ........ ..01.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:477 */ + disas_a32_extract_sat(ctx, &u.f_sat, insn); + if (trans_USAT(ctx, &u.f_sat)) return true; + return false; + case 0x00a00020: + /* ....0110 111..... ........ ..11.... */ + switch (insn & 0x001003c0) { + case 0x00000040: + /* ....0110 1110.... ......00 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:488 */ + disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); + if (trans_UXTAB(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x00000300: + /* ....0110 1110.... ......11 0011.... */ + disas_a32_extract_sat16(ctx, &u.f_sat, insn); + switch ((insn >> 10) & 0x3) { + case 0x3: + /* ....0110 1110.... ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:480 */ + if (trans_USAT16(ctx, &u.f_sat)) return true; + return false; + } + return false; + case 0x00100040: + /* ....0110 1111.... ......00 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:489 */ + disas_a32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); + if (trans_UXTAH(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x00100300: + /* ....0110 1111.... ......11 0011.... */ + disas_a32_extract_rdm(ctx, &u.f_rr, insn); + switch (insn & 0x000f0c00) { + case 0x000f0c00: + /* ....0110 11111111 ....1111 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:495 */ + if (trans_RBIT(ctx, &u.f_rr)) return true; + return false; + } + return false; + case 0x00100380: + /* ....0110 1111.... ......11 1011.... */ + disas_a32_extract_rdm(ctx, &u.f_rr, insn); + switch (insn & 0x000f0c00) { + case 0x000f0c00: + /* ....0110 11111111 ....1111 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:494 */ + if (trans_REVSH(ctx, &u.f_rr)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x01000000: + /* ....0111 .0...... ........ ...0.... */ + disas_a32_extract_ldst_rs_p1w(ctx, &u.f_ldst_rr, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0111 .0.0.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:285 */ + if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x1: + /* ....0111 .0.1.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:290 */ + if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x01000010: + /* ....0111 .0...... ........ ...1.... */ + switch (insn & 0x00a00060) { + case 0x00000000: + /* ....0111 000..... ........ .001.... */ + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0000.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:501 */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + if (trans_SMLAD(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100000: + /* ....0111 0001.... ........ 0001.... */ + disas_a32_extract_rdmn(ctx, &u.f_rrr, insn); + switch ((insn >> 12) & 0xf) { + case 0xf: + /* ....0111 0001.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:506 */ + if (trans_SDIV(ctx, &u.f_rrr)) return true; + return false; + } + return false; + } + return false; + case 0x00000020: + /* ....0111 000..... ........ .011.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0000.... ........ 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:502 */ + if (trans_SMLADX(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00000040: + /* ....0111 000..... ........ .101.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0000.... ........ 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:503 */ + if (trans_SMLSD(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00000060: + /* ....0111 000..... ........ .111.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0000.... ........ 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:504 */ + if (trans_SMLSDX(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00200000: + /* ....0111 001..... ........ .001.... */ + disas_a32_extract_rdmn(ctx, &u.f_rrr, insn); + switch (insn & 0x0010f080) { + case 0x0010f000: + /* ....0111 0011.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:507 */ + if (trans_UDIV(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00800000: + /* ....0111 100..... ........ .001.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 1000.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:409 */ + if (trans_USADA8(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00a00040: + /* ....0111 101..... ........ .101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:414 */ + disas_a32_extract_bfx(ctx, &u.f_bfx, insn); + if (trans_SBFX(ctx, &u.f_bfx)) return true; + return false; + } + return false; + case 0x01400000: + /* ....0111 .1...... ........ ...0.... */ + disas_a32_extract_ldst_rs_p1w(ctx, &u.f_ldst_rr, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....0111 .1.0.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:287 */ + if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x1: + /* ....0111 .1.1.... ........ ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:292 */ + if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x01400010: + /* ....0111 .1...... ........ ...1.... */ + switch (insn & 0x00a00060) { + case 0x00000000: + /* ....0111 010..... ........ .001.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0100.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:509 */ + if (trans_SMLALD(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100000: + /* ....0111 0101.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:514 */ + if (trans_SMMLA(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00000020: + /* ....0111 010..... ........ .011.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0100.... ........ 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:510 */ + if (trans_SMLALDX(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100000: + /* ....0111 0101.... ........ 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:515 */ + if (trans_SMMLAR(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00000040: + /* ....0111 010..... ........ .101.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0100.... ........ 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:511 */ + if (trans_SMLSLD(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100080: + /* ....0111 0101.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:516 */ + if (trans_SMMLS(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00000060: + /* ....0111 010..... ........ .111.... */ + disas_a32_extract_rdamn(ctx, &u.f_rrrr, insn); + switch (insn & 0x00100080) { + case 0x00000000: + /* ....0111 0100.... ........ 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:512 */ + if (trans_SMLSLDX(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100080: + /* ....0111 0101.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:517 */ + if (trans_SMMLSR(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + case 0x00800000: + /* ....0111 110..... ........ .001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:418 */ + disas_a32_extract_disas_a32_Fmt_42(ctx, &u.f_bfi, insn); + if (trans_BFCI(ctx, &u.f_bfi)) return true; + return false; + case 0x00a00040: + /* ....0111 111..... ........ .101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:415 */ + disas_a32_extract_bfx(ctx, &u.f_bfx, insn); + if (trans_UBFX(ctx, &u.f_bfx)) return true; + return false; + case 0x00a00060: + /* ....0111 111..... ........ .111.... */ + disas_a32_extract_disas_a32_Fmt_16(ctx, &u.f_empty, insn); + switch (insn & 0xf0100080) { + case 0xe0100080: + /* 11100111 1111.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:422 */ + if (trans_UDF(ctx, &u.f_empty)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x4: + /* ....100. ........ ........ ........ */ + disas_a32_extract_disas_a32_Fmt_48(ctx, &u.f_ldst_block, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....100. ...0.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:521 */ + if (trans_STM(ctx, &u.f_ldst_block)) return true; + return false; + case 0x1: + /* ....100. ...1.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:522 */ + if (trans_LDM_a32(ctx, &u.f_ldst_block)) return true; + return false; + } + return false; + case 0x5: + /* ....101. ........ ........ ........ */ + disas_a32_extract_branch(ctx, &u.f_i, insn); + switch ((insn >> 24) & 0x1) { + case 0x0: + /* ....1010 ........ ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:529 */ + if (trans_B(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* ....1011 ........ ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:530 */ + if (trans_BL(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x7: + /* ....111. ........ ........ ........ */ + disas_a32_extract_disas_a32_Fmt_50(ctx, &u.f_i, insn); + switch ((insn >> 24) & 0x1) { + case 0x1: + /* ....1111 ........ ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/a32.decode:534 */ + if (trans_SVC(ctx, &u.f_i)) return true; + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/arm/decode-sve.inc.c b/qemu/target/arm/decode-sve.inc.c new file mode 100644 index 00000000..9740f1aa --- /dev/null +++ b/qemu/target/arm/decode-sve.inc.c @@ -0,0 +1,5080 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int esz; + int imm1; + int imm2; + int rd; +} arg_disas_sve25; + +typedef struct { + int esz; + int imm; + int rd; + int rm; +} arg_disas_sve26; + +typedef struct { + int imm; + int rd; +} arg_disas_sve27; + +typedef struct { + int rd; + int rn; +} arg_disas_sve28; + +typedef struct { + int dbm; + int rd; +} arg_disas_sve29; + +typedef struct { + int esz; + int h; + int rd; + int rn; + int u; +} arg_disas_sve30; + +typedef struct { + int pg; + int rn; +} arg_disas_sve31; + +typedef struct { +#ifdef _MSC_VER + int dummy; +#endif +} arg_disas_sve32; + +typedef struct { + int rd; +} arg_disas_sve33; + +typedef struct { + int pg; + int rd; + int s; +} arg_disas_sve34; + +typedef struct { + int rn; +} arg_disas_sve35; + +typedef struct { + int ne; + int rm; + int rn; + int sf; +} arg_disas_sve36; + +typedef struct { + int eq; + int esz; + int rd; + int rm; + int rn; + int sf; + int u; +} arg_disas_sve37; + +typedef struct { + int esz; + int imm; + int rd; +} arg_disas_sve38; + +typedef struct { + int ra; + int rd; + int rm; + int rn; + int sz; + int u; +} arg_disas_sve39; + +typedef struct { + int index; + int ra; + int rd; + int rm; + int rn; + int sz; + int u; +} arg_disas_sve40; + +typedef struct { + int esz; + int pg; + int rd; + int rm; + int rn; + int rot; +} arg_disas_sve41; + +typedef struct { + int esz; + int pg; + int ra; + int rd; + int rm; + int rn; + int rot; +} arg_disas_sve42; + +typedef struct { + int esz; + int index; + int ra; + int rd; + int rm; + int rn; + int rot; +} arg_disas_sve43; + +typedef struct { + int esz; + int index; + int ra; + int rd; + int rm; + int rn; + int sub; +} arg_disas_sve44; + +typedef struct { + int esz; + int index; + int rd; + int rm; + int rn; +} arg_disas_sve45; + +typedef struct { + int esz; + int imm; + int rd; + int rm; + int rn; +} arg_disas_sve46; + +typedef struct { + int rm; +} arg_disas_sve47; + +typedef struct { + int d; + int esz; + int imm; + int pat; + int rd; + int rn; + int u; +} arg_incdec2_cnt; + +typedef struct { + int d; + int esz; + int pg; + int rd; + int rn; + int u; +} arg_incdec2_pred; + +typedef struct { + int d; + int esz; + int imm; + int pat; + int rd; + int u; +} arg_incdec_cnt; + +typedef struct { + int d; + int esz; + int pg; + int rd; + int u; +} arg_incdec_pred; + +typedef struct { + int esz; + int pat; + int rd; + int s; +} arg_ptrue; + +typedef struct { + int esz; + int pg; + int rd; + int rn; +} arg_rpr_esz; + +typedef struct { + int pg; + int rd; + int rn; + int s; +} arg_rpr_s; + +typedef struct { + int esz; + int imm; + int pg; + int rd; + int rn; +} arg_rpri_esz; + +typedef struct { + int esz; + int ff; + int imm; + int msz; + int pg; + int rd; + int rn; + int u; +} arg_rpri_gather_load; + +typedef struct { + int dtype; + int imm; + int nreg; + int pg; + int rd; + int rn; +} arg_rpri_load; + +typedef struct { + int esz; + int imm; + int msz; + int pg; + int rd; + int rn; +} arg_rpri_scatter_store; + +typedef struct { + int esz; + int imm; + int msz; + int nreg; + int pg; + int rd; + int rn; +} arg_rpri_store; + +typedef struct { + int esz; + int pg; + int rd; + int rm; + int rn; +} arg_rprr_esz; + +typedef struct { + int esz; + int ff; + int msz; + int pg; + int rd; + int rm; + int rn; + int scale; + int u; + int xs; +} arg_rprr_gather_load; + +typedef struct { + int dtype; + int nreg; + int pg; + int rd; + int rm; + int rn; +} arg_rprr_load; + +typedef struct { + int pg; + int rd; + int rm; + int rn; + int s; +} arg_rprr_s; + +typedef struct { + int esz; + int msz; + int pg; + int rd; + int rm; + int rn; + int scale; + int xs; +} arg_rprr_scatter_store; + +typedef struct { + int esz; + int msz; + int nreg; + int pg; + int rd; + int rm; + int rn; +} arg_rprr_store; + +typedef struct { + int esz; + int pg; + int ra; + int rd; + int rm; + int rn; +} arg_rprrr_esz; + +typedef struct { + int dbm; + int rd; + int rn; +} arg_rr_dbm; + +typedef struct { + int esz; + int rd; + int rn; +} arg_rr_esz; + +typedef struct { + int imm; + int rd; + int rn; +} arg_rri; + +typedef struct { + int esz; + int imm; + int rd; + int rn; +} arg_rri_esz; + +typedef struct { + int esz; + int rd; + int rm; + int rn; +} arg_rrr_esz; + +typedef struct { + int imm; + int rd; + int rm; + int rn; +} arg_rrri; + +typedef arg_rprr_esz arg_ORR_zpzz; +static bool trans_ORR_zpzz(DisasContext *ctx, arg_ORR_zpzz *a); +typedef arg_rprr_esz arg_EOR_zpzz; +static bool trans_EOR_zpzz(DisasContext *ctx, arg_EOR_zpzz *a); +typedef arg_rprr_esz arg_AND_zpzz; +static bool trans_AND_zpzz(DisasContext *ctx, arg_AND_zpzz *a); +typedef arg_rprr_esz arg_BIC_zpzz; +static bool trans_BIC_zpzz(DisasContext *ctx, arg_BIC_zpzz *a); +typedef arg_rprr_esz arg_ADD_zpzz; +static bool trans_ADD_zpzz(DisasContext *ctx, arg_ADD_zpzz *a); +typedef arg_rprr_esz arg_SUB_zpzz; +static bool trans_SUB_zpzz(DisasContext *ctx, arg_SUB_zpzz *a); +typedef arg_rprr_esz arg_SMAX_zpzz; +static bool trans_SMAX_zpzz(DisasContext *ctx, arg_SMAX_zpzz *a); +typedef arg_rprr_esz arg_UMAX_zpzz; +static bool trans_UMAX_zpzz(DisasContext *ctx, arg_UMAX_zpzz *a); +typedef arg_rprr_esz arg_SMIN_zpzz; +static bool trans_SMIN_zpzz(DisasContext *ctx, arg_SMIN_zpzz *a); +typedef arg_rprr_esz arg_UMIN_zpzz; +static bool trans_UMIN_zpzz(DisasContext *ctx, arg_UMIN_zpzz *a); +typedef arg_rprr_esz arg_SABD_zpzz; +static bool trans_SABD_zpzz(DisasContext *ctx, arg_SABD_zpzz *a); +typedef arg_rprr_esz arg_UABD_zpzz; +static bool trans_UABD_zpzz(DisasContext *ctx, arg_UABD_zpzz *a); +typedef arg_rprr_esz arg_MUL_zpzz; +static bool trans_MUL_zpzz(DisasContext *ctx, arg_MUL_zpzz *a); +typedef arg_rprr_esz arg_SMULH_zpzz; +static bool trans_SMULH_zpzz(DisasContext *ctx, arg_SMULH_zpzz *a); +typedef arg_rprr_esz arg_UMULH_zpzz; +static bool trans_UMULH_zpzz(DisasContext *ctx, arg_UMULH_zpzz *a); +typedef arg_rprr_esz arg_SDIV_zpzz; +static bool trans_SDIV_zpzz(DisasContext *ctx, arg_SDIV_zpzz *a); +typedef arg_rprr_esz arg_UDIV_zpzz; +static bool trans_UDIV_zpzz(DisasContext *ctx, arg_UDIV_zpzz *a); +typedef arg_rpr_esz arg_ORV; +static bool trans_ORV(DisasContext *ctx, arg_ORV *a); +typedef arg_rpr_esz arg_EORV; +static bool trans_EORV(DisasContext *ctx, arg_EORV *a); +typedef arg_rpr_esz arg_ANDV; +static bool trans_ANDV(DisasContext *ctx, arg_ANDV *a); +typedef arg_rpr_esz arg_MOVPRFX_z; +static bool trans_MOVPRFX_z(DisasContext *ctx, arg_MOVPRFX_z *a); +typedef arg_rpr_esz arg_MOVPRFX_m; +static bool trans_MOVPRFX_m(DisasContext *ctx, arg_MOVPRFX_m *a); +typedef arg_rpr_esz arg_UADDV; +static bool trans_UADDV(DisasContext *ctx, arg_UADDV *a); +typedef arg_rpr_esz arg_SADDV; +static bool trans_SADDV(DisasContext *ctx, arg_SADDV *a); +typedef arg_rpr_esz arg_SMAXV; +static bool trans_SMAXV(DisasContext *ctx, arg_SMAXV *a); +typedef arg_rpr_esz arg_UMAXV; +static bool trans_UMAXV(DisasContext *ctx, arg_UMAXV *a); +typedef arg_rpr_esz arg_SMINV; +static bool trans_SMINV(DisasContext *ctx, arg_SMINV *a); +typedef arg_rpr_esz arg_UMINV; +static bool trans_UMINV(DisasContext *ctx, arg_UMINV *a); +typedef arg_rpri_esz arg_ASR_zpzi; +static bool trans_ASR_zpzi(DisasContext *ctx, arg_ASR_zpzi *a); +typedef arg_rpri_esz arg_LSR_zpzi; +static bool trans_LSR_zpzi(DisasContext *ctx, arg_LSR_zpzi *a); +typedef arg_rpri_esz arg_LSL_zpzi; +static bool trans_LSL_zpzi(DisasContext *ctx, arg_LSL_zpzi *a); +typedef arg_rpri_esz arg_ASRD; +static bool trans_ASRD(DisasContext *ctx, arg_ASRD *a); +typedef arg_rprr_esz arg_ASR_zpzz; +static bool trans_ASR_zpzz(DisasContext *ctx, arg_ASR_zpzz *a); +typedef arg_rprr_esz arg_LSR_zpzz; +static bool trans_LSR_zpzz(DisasContext *ctx, arg_LSR_zpzz *a); +typedef arg_rprr_esz arg_LSL_zpzz; +static bool trans_LSL_zpzz(DisasContext *ctx, arg_LSL_zpzz *a); +typedef arg_rprr_esz arg_ASR_zpzw; +static bool trans_ASR_zpzw(DisasContext *ctx, arg_ASR_zpzw *a); +typedef arg_rprr_esz arg_LSR_zpzw; +static bool trans_LSR_zpzw(DisasContext *ctx, arg_LSR_zpzw *a); +typedef arg_rprr_esz arg_LSL_zpzw; +static bool trans_LSL_zpzw(DisasContext *ctx, arg_LSL_zpzw *a); +typedef arg_rpr_esz arg_CLS; +static bool trans_CLS(DisasContext *ctx, arg_CLS *a); +typedef arg_rpr_esz arg_CLZ; +static bool trans_CLZ(DisasContext *ctx, arg_CLZ *a); +typedef arg_rpr_esz arg_CNT_zpz; +static bool trans_CNT_zpz(DisasContext *ctx, arg_CNT_zpz *a); +typedef arg_rpr_esz arg_CNOT; +static bool trans_CNOT(DisasContext *ctx, arg_CNOT *a); +typedef arg_rpr_esz arg_NOT_zpz; +static bool trans_NOT_zpz(DisasContext *ctx, arg_NOT_zpz *a); +typedef arg_rpr_esz arg_FABS; +static bool trans_FABS(DisasContext *ctx, arg_FABS *a); +typedef arg_rpr_esz arg_FNEG; +static bool trans_FNEG(DisasContext *ctx, arg_FNEG *a); +typedef arg_rpr_esz arg_ABS; +static bool trans_ABS(DisasContext *ctx, arg_ABS *a); +typedef arg_rpr_esz arg_NEG; +static bool trans_NEG(DisasContext *ctx, arg_NEG *a); +typedef arg_rpr_esz arg_SXTB; +static bool trans_SXTB(DisasContext *ctx, arg_SXTB *a); +typedef arg_rpr_esz arg_UXTB; +static bool trans_UXTB(DisasContext *ctx, arg_UXTB *a); +typedef arg_rpr_esz arg_SXTH; +static bool trans_SXTH(DisasContext *ctx, arg_SXTH *a); +typedef arg_rpr_esz arg_UXTH; +static bool trans_UXTH(DisasContext *ctx, arg_UXTH *a); +typedef arg_rpr_esz arg_SXTW; +static bool trans_SXTW(DisasContext *ctx, arg_SXTW *a); +typedef arg_rpr_esz arg_UXTW; +static bool trans_UXTW(DisasContext *ctx, arg_UXTW *a); +typedef arg_rprr_esz arg_FCMGE_ppzz; +static bool trans_FCMGE_ppzz(DisasContext *ctx, arg_FCMGE_ppzz *a); +typedef arg_rprr_esz arg_FCMGT_ppzz; +static bool trans_FCMGT_ppzz(DisasContext *ctx, arg_FCMGT_ppzz *a); +typedef arg_rprr_esz arg_FCMEQ_ppzz; +static bool trans_FCMEQ_ppzz(DisasContext *ctx, arg_FCMEQ_ppzz *a); +typedef arg_rprr_esz arg_FCMNE_ppzz; +static bool trans_FCMNE_ppzz(DisasContext *ctx, arg_FCMNE_ppzz *a); +typedef arg_rprr_esz arg_FCMUO_ppzz; +static bool trans_FCMUO_ppzz(DisasContext *ctx, arg_FCMUO_ppzz *a); +typedef arg_rprr_esz arg_FACGE_ppzz; +static bool trans_FACGE_ppzz(DisasContext *ctx, arg_FACGE_ppzz *a); +typedef arg_rprr_esz arg_FACGT_ppzz; +static bool trans_FACGT_ppzz(DisasContext *ctx, arg_FACGT_ppzz *a); +typedef arg_rprrr_esz arg_MLA; +static bool trans_MLA(DisasContext *ctx, arg_MLA *a); +typedef arg_rprrr_esz arg_MLS; +static bool trans_MLS(DisasContext *ctx, arg_MLS *a); +typedef arg_rrr_esz arg_ADD_zzz; +static bool trans_ADD_zzz(DisasContext *ctx, arg_ADD_zzz *a); +typedef arg_rrr_esz arg_SUB_zzz; +static bool trans_SUB_zzz(DisasContext *ctx, arg_SUB_zzz *a); +typedef arg_rrr_esz arg_SQADD_zzz; +static bool trans_SQADD_zzz(DisasContext *ctx, arg_SQADD_zzz *a); +typedef arg_rrr_esz arg_UQADD_zzz; +static bool trans_UQADD_zzz(DisasContext *ctx, arg_UQADD_zzz *a); +typedef arg_rrr_esz arg_SQSUB_zzz; +static bool trans_SQSUB_zzz(DisasContext *ctx, arg_SQSUB_zzz *a); +typedef arg_rrr_esz arg_UQSUB_zzz; +static bool trans_UQSUB_zzz(DisasContext *ctx, arg_UQSUB_zzz *a); +typedef arg_rrr_esz arg_AND_zzz; +static bool trans_AND_zzz(DisasContext *ctx, arg_AND_zzz *a); +typedef arg_rrr_esz arg_ORR_zzz; +static bool trans_ORR_zzz(DisasContext *ctx, arg_ORR_zzz *a); +typedef arg_rrr_esz arg_EOR_zzz; +static bool trans_EOR_zzz(DisasContext *ctx, arg_EOR_zzz *a); +typedef arg_rrr_esz arg_BIC_zzz; +static bool trans_BIC_zzz(DisasContext *ctx, arg_BIC_zzz *a); +typedef arg_disas_sve25 arg_INDEX_ii; +static bool trans_INDEX_ii(DisasContext *ctx, arg_INDEX_ii *a); +typedef arg_disas_sve26 arg_INDEX_ir; +static bool trans_INDEX_ir(DisasContext *ctx, arg_INDEX_ir *a); +typedef arg_rri_esz arg_INDEX_ri; +static bool trans_INDEX_ri(DisasContext *ctx, arg_INDEX_ri *a); +typedef arg_rrr_esz arg_INDEX_rr; +static bool trans_INDEX_rr(DisasContext *ctx, arg_INDEX_rr *a); +typedef arg_rri arg_ADDVL; +static bool trans_ADDVL(DisasContext *ctx, arg_ADDVL *a); +typedef arg_rri arg_ADDPL; +static bool trans_ADDPL(DisasContext *ctx, arg_ADDPL *a); +typedef arg_disas_sve27 arg_RDVL; +static bool trans_RDVL(DisasContext *ctx, arg_RDVL *a); +typedef arg_rri_esz arg_ASR_zzi; +static bool trans_ASR_zzi(DisasContext *ctx, arg_ASR_zzi *a); +typedef arg_rri_esz arg_LSR_zzi; +static bool trans_LSR_zzi(DisasContext *ctx, arg_LSR_zzi *a); +typedef arg_rri_esz arg_LSL_zzi; +static bool trans_LSL_zzi(DisasContext *ctx, arg_LSL_zzi *a); +typedef arg_rrr_esz arg_ASR_zzw; +static bool trans_ASR_zzw(DisasContext *ctx, arg_ASR_zzw *a); +typedef arg_rrr_esz arg_LSR_zzw; +static bool trans_LSR_zzw(DisasContext *ctx, arg_LSR_zzw *a); +typedef arg_rrr_esz arg_LSL_zzw; +static bool trans_LSL_zzw(DisasContext *ctx, arg_LSL_zzw *a); +typedef arg_rrri arg_ADR_s32; +static bool trans_ADR_s32(DisasContext *ctx, arg_ADR_s32 *a); +typedef arg_rrri arg_ADR_u32; +static bool trans_ADR_u32(DisasContext *ctx, arg_ADR_u32 *a); +typedef arg_rrri arg_ADR_p32; +static bool trans_ADR_p32(DisasContext *ctx, arg_ADR_p32 *a); +typedef arg_rrri arg_ADR_p64; +static bool trans_ADR_p64(DisasContext *ctx, arg_ADR_p64 *a); +typedef arg_disas_sve28 arg_MOVPRFX; +static bool trans_MOVPRFX(DisasContext *ctx, arg_MOVPRFX *a); +typedef arg_rr_esz arg_FEXPA; +static bool trans_FEXPA(DisasContext *ctx, arg_FEXPA *a); +typedef arg_rrr_esz arg_FTSSEL; +static bool trans_FTSSEL(DisasContext *ctx, arg_FTSSEL *a); +typedef arg_incdec_cnt arg_CNT_r; +static bool trans_CNT_r(DisasContext *ctx, arg_CNT_r *a); +typedef arg_incdec_cnt arg_INCDEC_r; +static bool trans_INCDEC_r(DisasContext *ctx, arg_INCDEC_r *a); +typedef arg_incdec_cnt arg_SINCDEC_r_32; +static bool trans_SINCDEC_r_32(DisasContext *ctx, arg_SINCDEC_r_32 *a); +typedef arg_incdec_cnt arg_SINCDEC_r_64; +static bool trans_SINCDEC_r_64(DisasContext *ctx, arg_SINCDEC_r_64 *a); +typedef arg_incdec2_cnt arg_INCDEC_v; +static bool trans_INCDEC_v(DisasContext *ctx, arg_INCDEC_v *a); +typedef arg_incdec2_cnt arg_SINCDEC_v; +static bool trans_SINCDEC_v(DisasContext *ctx, arg_SINCDEC_v *a); +typedef arg_rr_dbm arg_ORR_zzi; +static bool trans_ORR_zzi(DisasContext *ctx, arg_ORR_zzi *a); +typedef arg_rr_dbm arg_EOR_zzi; +static bool trans_EOR_zzi(DisasContext *ctx, arg_EOR_zzi *a); +typedef arg_rr_dbm arg_AND_zzi; +static bool trans_AND_zzi(DisasContext *ctx, arg_AND_zzi *a); +typedef arg_disas_sve29 arg_DUPM; +static bool trans_DUPM(DisasContext *ctx, arg_DUPM *a); +typedef arg_rpri_esz arg_FCPY; +static bool trans_FCPY(DisasContext *ctx, arg_FCPY *a); +typedef arg_rpri_esz arg_CPY_m_i; +static bool trans_CPY_m_i(DisasContext *ctx, arg_CPY_m_i *a); +typedef arg_rpri_esz arg_CPY_z_i; +static bool trans_CPY_z_i(DisasContext *ctx, arg_CPY_z_i *a); +typedef arg_rrri arg_EXT; +static bool trans_EXT(DisasContext *ctx, arg_EXT *a); +typedef arg_rr_esz arg_DUP_s; +static bool trans_DUP_s(DisasContext *ctx, arg_DUP_s *a); +typedef arg_rri arg_DUP_x; +static bool trans_DUP_x(DisasContext *ctx, arg_DUP_x *a); +typedef arg_rrr_esz arg_INSR_f; +static bool trans_INSR_f(DisasContext *ctx, arg_INSR_f *a); +typedef arg_rrr_esz arg_INSR_r; +static bool trans_INSR_r(DisasContext *ctx, arg_INSR_r *a); +typedef arg_rr_esz arg_REV_v; +static bool trans_REV_v(DisasContext *ctx, arg_REV_v *a); +typedef arg_rrr_esz arg_TBL; +static bool trans_TBL(DisasContext *ctx, arg_TBL *a); +typedef arg_disas_sve30 arg_UNPK; +static bool trans_UNPK(DisasContext *ctx, arg_UNPK *a); +typedef arg_rrr_esz arg_ZIP1_p; +static bool trans_ZIP1_p(DisasContext *ctx, arg_ZIP1_p *a); +typedef arg_rrr_esz arg_ZIP2_p; +static bool trans_ZIP2_p(DisasContext *ctx, arg_ZIP2_p *a); +typedef arg_rrr_esz arg_UZP1_p; +static bool trans_UZP1_p(DisasContext *ctx, arg_UZP1_p *a); +typedef arg_rrr_esz arg_UZP2_p; +static bool trans_UZP2_p(DisasContext *ctx, arg_UZP2_p *a); +typedef arg_rrr_esz arg_TRN1_p; +static bool trans_TRN1_p(DisasContext *ctx, arg_TRN1_p *a); +typedef arg_rrr_esz arg_TRN2_p; +static bool trans_TRN2_p(DisasContext *ctx, arg_TRN2_p *a); +typedef arg_rr_esz arg_REV_p; +static bool trans_REV_p(DisasContext *ctx, arg_REV_p *a); +typedef arg_rr_esz arg_PUNPKLO; +static bool trans_PUNPKLO(DisasContext *ctx, arg_PUNPKLO *a); +typedef arg_rr_esz arg_PUNPKHI; +static bool trans_PUNPKHI(DisasContext *ctx, arg_PUNPKHI *a); +typedef arg_rrr_esz arg_ZIP1_z; +static bool trans_ZIP1_z(DisasContext *ctx, arg_ZIP1_z *a); +typedef arg_rrr_esz arg_ZIP2_z; +static bool trans_ZIP2_z(DisasContext *ctx, arg_ZIP2_z *a); +typedef arg_rrr_esz arg_UZP1_z; +static bool trans_UZP1_z(DisasContext *ctx, arg_UZP1_z *a); +typedef arg_rrr_esz arg_UZP2_z; +static bool trans_UZP2_z(DisasContext *ctx, arg_UZP2_z *a); +typedef arg_rrr_esz arg_TRN1_z; +static bool trans_TRN1_z(DisasContext *ctx, arg_TRN1_z *a); +typedef arg_rrr_esz arg_TRN2_z; +static bool trans_TRN2_z(DisasContext *ctx, arg_TRN2_z *a); +typedef arg_rpr_esz arg_COMPACT; +static bool trans_COMPACT(DisasContext *ctx, arg_COMPACT *a); +typedef arg_rprr_esz arg_CLASTA_z; +static bool trans_CLASTA_z(DisasContext *ctx, arg_CLASTA_z *a); +typedef arg_rprr_esz arg_CLASTB_z; +static bool trans_CLASTB_z(DisasContext *ctx, arg_CLASTB_z *a); +typedef arg_rpr_esz arg_CLASTA_v; +static bool trans_CLASTA_v(DisasContext *ctx, arg_CLASTA_v *a); +typedef arg_rpr_esz arg_CLASTB_v; +static bool trans_CLASTB_v(DisasContext *ctx, arg_CLASTB_v *a); +typedef arg_rpr_esz arg_CLASTA_r; +static bool trans_CLASTA_r(DisasContext *ctx, arg_CLASTA_r *a); +typedef arg_rpr_esz arg_CLASTB_r; +static bool trans_CLASTB_r(DisasContext *ctx, arg_CLASTB_r *a); +typedef arg_rpr_esz arg_LASTA_v; +static bool trans_LASTA_v(DisasContext *ctx, arg_LASTA_v *a); +typedef arg_rpr_esz arg_LASTB_v; +static bool trans_LASTB_v(DisasContext *ctx, arg_LASTB_v *a); +typedef arg_rpr_esz arg_LASTA_r; +static bool trans_LASTA_r(DisasContext *ctx, arg_LASTA_r *a); +typedef arg_rpr_esz arg_LASTB_r; +static bool trans_LASTB_r(DisasContext *ctx, arg_LASTB_r *a); +typedef arg_rpr_esz arg_CPY_m_v; +static bool trans_CPY_m_v(DisasContext *ctx, arg_CPY_m_v *a); +typedef arg_rpr_esz arg_CPY_m_r; +static bool trans_CPY_m_r(DisasContext *ctx, arg_CPY_m_r *a); +typedef arg_rpr_esz arg_REVB; +static bool trans_REVB(DisasContext *ctx, arg_REVB *a); +typedef arg_rpr_esz arg_REVH; +static bool trans_REVH(DisasContext *ctx, arg_REVH *a); +typedef arg_rpr_esz arg_REVW; +static bool trans_REVW(DisasContext *ctx, arg_REVW *a); +typedef arg_rpr_esz arg_RBIT; +static bool trans_RBIT(DisasContext *ctx, arg_RBIT *a); +typedef arg_rprr_esz arg_SPLICE; +static bool trans_SPLICE(DisasContext *ctx, arg_SPLICE *a); +typedef arg_rprr_esz arg_SEL_zpzz; +static bool trans_SEL_zpzz(DisasContext *ctx, arg_SEL_zpzz *a); +typedef arg_rprr_esz arg_CMPHS_ppzz; +static bool trans_CMPHS_ppzz(DisasContext *ctx, arg_CMPHS_ppzz *a); +typedef arg_rprr_esz arg_CMPHI_ppzz; +static bool trans_CMPHI_ppzz(DisasContext *ctx, arg_CMPHI_ppzz *a); +typedef arg_rprr_esz arg_CMPGE_ppzz; +static bool trans_CMPGE_ppzz(DisasContext *ctx, arg_CMPGE_ppzz *a); +typedef arg_rprr_esz arg_CMPGT_ppzz; +static bool trans_CMPGT_ppzz(DisasContext *ctx, arg_CMPGT_ppzz *a); +typedef arg_rprr_esz arg_CMPEQ_ppzz; +static bool trans_CMPEQ_ppzz(DisasContext *ctx, arg_CMPEQ_ppzz *a); +typedef arg_rprr_esz arg_CMPNE_ppzz; +static bool trans_CMPNE_ppzz(DisasContext *ctx, arg_CMPNE_ppzz *a); +typedef arg_rprr_esz arg_CMPEQ_ppzw; +static bool trans_CMPEQ_ppzw(DisasContext *ctx, arg_CMPEQ_ppzw *a); +typedef arg_rprr_esz arg_CMPNE_ppzw; +static bool trans_CMPNE_ppzw(DisasContext *ctx, arg_CMPNE_ppzw *a); +typedef arg_rprr_esz arg_CMPGE_ppzw; +static bool trans_CMPGE_ppzw(DisasContext *ctx, arg_CMPGE_ppzw *a); +typedef arg_rprr_esz arg_CMPGT_ppzw; +static bool trans_CMPGT_ppzw(DisasContext *ctx, arg_CMPGT_ppzw *a); +typedef arg_rprr_esz arg_CMPLT_ppzw; +static bool trans_CMPLT_ppzw(DisasContext *ctx, arg_CMPLT_ppzw *a); +typedef arg_rprr_esz arg_CMPLE_ppzw; +static bool trans_CMPLE_ppzw(DisasContext *ctx, arg_CMPLE_ppzw *a); +typedef arg_rprr_esz arg_CMPHS_ppzw; +static bool trans_CMPHS_ppzw(DisasContext *ctx, arg_CMPHS_ppzw *a); +typedef arg_rprr_esz arg_CMPHI_ppzw; +static bool trans_CMPHI_ppzw(DisasContext *ctx, arg_CMPHI_ppzw *a); +typedef arg_rprr_esz arg_CMPLO_ppzw; +static bool trans_CMPLO_ppzw(DisasContext *ctx, arg_CMPLO_ppzw *a); +typedef arg_rprr_esz arg_CMPLS_ppzw; +static bool trans_CMPLS_ppzw(DisasContext *ctx, arg_CMPLS_ppzw *a); +typedef arg_rpri_esz arg_CMPHS_ppzi; +static bool trans_CMPHS_ppzi(DisasContext *ctx, arg_CMPHS_ppzi *a); +typedef arg_rpri_esz arg_CMPHI_ppzi; +static bool trans_CMPHI_ppzi(DisasContext *ctx, arg_CMPHI_ppzi *a); +typedef arg_rpri_esz arg_CMPLO_ppzi; +static bool trans_CMPLO_ppzi(DisasContext *ctx, arg_CMPLO_ppzi *a); +typedef arg_rpri_esz arg_CMPLS_ppzi; +static bool trans_CMPLS_ppzi(DisasContext *ctx, arg_CMPLS_ppzi *a); +typedef arg_rpri_esz arg_CMPGE_ppzi; +static bool trans_CMPGE_ppzi(DisasContext *ctx, arg_CMPGE_ppzi *a); +typedef arg_rpri_esz arg_CMPGT_ppzi; +static bool trans_CMPGT_ppzi(DisasContext *ctx, arg_CMPGT_ppzi *a); +typedef arg_rpri_esz arg_CMPLT_ppzi; +static bool trans_CMPLT_ppzi(DisasContext *ctx, arg_CMPLT_ppzi *a); +typedef arg_rpri_esz arg_CMPLE_ppzi; +static bool trans_CMPLE_ppzi(DisasContext *ctx, arg_CMPLE_ppzi *a); +typedef arg_rpri_esz arg_CMPEQ_ppzi; +static bool trans_CMPEQ_ppzi(DisasContext *ctx, arg_CMPEQ_ppzi *a); +typedef arg_rpri_esz arg_CMPNE_ppzi; +static bool trans_CMPNE_ppzi(DisasContext *ctx, arg_CMPNE_ppzi *a); +typedef arg_rprr_s arg_AND_pppp; +static bool trans_AND_pppp(DisasContext *ctx, arg_AND_pppp *a); +typedef arg_rprr_s arg_BIC_pppp; +static bool trans_BIC_pppp(DisasContext *ctx, arg_BIC_pppp *a); +typedef arg_rprr_s arg_EOR_pppp; +static bool trans_EOR_pppp(DisasContext *ctx, arg_EOR_pppp *a); +typedef arg_rprr_s arg_SEL_pppp; +static bool trans_SEL_pppp(DisasContext *ctx, arg_SEL_pppp *a); +typedef arg_rprr_s arg_ORR_pppp; +static bool trans_ORR_pppp(DisasContext *ctx, arg_ORR_pppp *a); +typedef arg_rprr_s arg_ORN_pppp; +static bool trans_ORN_pppp(DisasContext *ctx, arg_ORN_pppp *a); +typedef arg_rprr_s arg_NOR_pppp; +static bool trans_NOR_pppp(DisasContext *ctx, arg_NOR_pppp *a); +typedef arg_rprr_s arg_NAND_pppp; +static bool trans_NAND_pppp(DisasContext *ctx, arg_NAND_pppp *a); +typedef arg_disas_sve31 arg_PTEST; +static bool trans_PTEST(DisasContext *ctx, arg_PTEST *a); +typedef arg_ptrue arg_PTRUE; +static bool trans_PTRUE(DisasContext *ctx, arg_PTRUE *a); +typedef arg_disas_sve32 arg_SETFFR; +static bool trans_SETFFR(DisasContext *ctx, arg_SETFFR *a); +typedef arg_disas_sve33 arg_PFALSE; +static bool trans_PFALSE(DisasContext *ctx, arg_PFALSE *a); +typedef arg_disas_sve34 arg_RDFFR_p; +static bool trans_RDFFR_p(DisasContext *ctx, arg_RDFFR_p *a); +typedef arg_disas_sve33 arg_RDFFR; +static bool trans_RDFFR(DisasContext *ctx, arg_RDFFR *a); +typedef arg_disas_sve35 arg_WRFFR; +static bool trans_WRFFR(DisasContext *ctx, arg_WRFFR *a); +typedef arg_rr_esz arg_PFIRST; +static bool trans_PFIRST(DisasContext *ctx, arg_PFIRST *a); +typedef arg_rr_esz arg_PNEXT; +static bool trans_PNEXT(DisasContext *ctx, arg_PNEXT *a); +typedef arg_rprr_s arg_BRKPA; +static bool trans_BRKPA(DisasContext *ctx, arg_BRKPA *a); +typedef arg_rprr_s arg_BRKPB; +static bool trans_BRKPB(DisasContext *ctx, arg_BRKPB *a); +typedef arg_rpr_s arg_BRKA_z; +static bool trans_BRKA_z(DisasContext *ctx, arg_BRKA_z *a); +typedef arg_rpr_s arg_BRKB_z; +static bool trans_BRKB_z(DisasContext *ctx, arg_BRKB_z *a); +typedef arg_rpr_s arg_BRKA_m; +static bool trans_BRKA_m(DisasContext *ctx, arg_BRKA_m *a); +typedef arg_rpr_s arg_BRKB_m; +static bool trans_BRKB_m(DisasContext *ctx, arg_BRKB_m *a); +typedef arg_rpr_s arg_BRKN; +static bool trans_BRKN(DisasContext *ctx, arg_BRKN *a); +typedef arg_rpr_esz arg_CNTP; +static bool trans_CNTP(DisasContext *ctx, arg_CNTP *a); +typedef arg_incdec_pred arg_INCDECP_r; +static bool trans_INCDECP_r(DisasContext *ctx, arg_INCDECP_r *a); +typedef arg_incdec2_pred arg_INCDECP_z; +static bool trans_INCDECP_z(DisasContext *ctx, arg_INCDECP_z *a); +typedef arg_incdec_pred arg_SINCDECP_r_32; +static bool trans_SINCDECP_r_32(DisasContext *ctx, arg_SINCDECP_r_32 *a); +typedef arg_incdec_pred arg_SINCDECP_r_64; +static bool trans_SINCDECP_r_64(DisasContext *ctx, arg_SINCDECP_r_64 *a); +typedef arg_incdec2_pred arg_SINCDECP_z; +static bool trans_SINCDECP_z(DisasContext *ctx, arg_SINCDECP_z *a); +typedef arg_disas_sve36 arg_CTERM; +static bool trans_CTERM(DisasContext *ctx, arg_CTERM *a); +typedef arg_disas_sve37 arg_WHILE; +static bool trans_WHILE(DisasContext *ctx, arg_WHILE *a); +typedef arg_disas_sve38 arg_FDUP; +static bool trans_FDUP(DisasContext *ctx, arg_FDUP *a); +typedef arg_disas_sve38 arg_DUP_i; +static bool trans_DUP_i(DisasContext *ctx, arg_DUP_i *a); +typedef arg_rri_esz arg_ADD_zzi; +static bool trans_ADD_zzi(DisasContext *ctx, arg_ADD_zzi *a); +typedef arg_rri_esz arg_SUB_zzi; +static bool trans_SUB_zzi(DisasContext *ctx, arg_SUB_zzi *a); +typedef arg_rri_esz arg_SUBR_zzi; +static bool trans_SUBR_zzi(DisasContext *ctx, arg_SUBR_zzi *a); +typedef arg_rri_esz arg_SQADD_zzi; +static bool trans_SQADD_zzi(DisasContext *ctx, arg_SQADD_zzi *a); +typedef arg_rri_esz arg_UQADD_zzi; +static bool trans_UQADD_zzi(DisasContext *ctx, arg_UQADD_zzi *a); +typedef arg_rri_esz arg_SQSUB_zzi; +static bool trans_SQSUB_zzi(DisasContext *ctx, arg_SQSUB_zzi *a); +typedef arg_rri_esz arg_UQSUB_zzi; +static bool trans_UQSUB_zzi(DisasContext *ctx, arg_UQSUB_zzi *a); +typedef arg_rri_esz arg_SMAX_zzi; +static bool trans_SMAX_zzi(DisasContext *ctx, arg_SMAX_zzi *a); +typedef arg_rri_esz arg_UMAX_zzi; +static bool trans_UMAX_zzi(DisasContext *ctx, arg_UMAX_zzi *a); +typedef arg_rri_esz arg_SMIN_zzi; +static bool trans_SMIN_zzi(DisasContext *ctx, arg_SMIN_zzi *a); +typedef arg_rri_esz arg_UMIN_zzi; +static bool trans_UMIN_zzi(DisasContext *ctx, arg_UMIN_zzi *a); +typedef arg_rri_esz arg_MUL_zzi; +static bool trans_MUL_zzi(DisasContext *ctx, arg_MUL_zzi *a); +typedef arg_disas_sve39 arg_DOT_zzz; +static bool trans_DOT_zzz(DisasContext *ctx, arg_DOT_zzz *a); +typedef arg_disas_sve40 arg_DOT_zzx; +static bool trans_DOT_zzx(DisasContext *ctx, arg_DOT_zzx *a); +typedef arg_disas_sve41 arg_FCADD; +static bool trans_FCADD(DisasContext *ctx, arg_FCADD *a); +typedef arg_disas_sve42 arg_FCMLA_zpzzz; +static bool trans_FCMLA_zpzzz(DisasContext *ctx, arg_FCMLA_zpzzz *a); +typedef arg_disas_sve43 arg_FCMLA_zzxz; +static bool trans_FCMLA_zzxz(DisasContext *ctx, arg_FCMLA_zzxz *a); +typedef arg_disas_sve44 arg_FMLA_zzxz; +static bool trans_FMLA_zzxz(DisasContext *ctx, arg_FMLA_zzxz *a); +typedef arg_disas_sve45 arg_FMUL_zzx; +static bool trans_FMUL_zzx(DisasContext *ctx, arg_FMUL_zzx *a); +typedef arg_rpr_esz arg_FADDV; +static bool trans_FADDV(DisasContext *ctx, arg_FADDV *a); +typedef arg_rpr_esz arg_FMAXNMV; +static bool trans_FMAXNMV(DisasContext *ctx, arg_FMAXNMV *a); +typedef arg_rpr_esz arg_FMINNMV; +static bool trans_FMINNMV(DisasContext *ctx, arg_FMINNMV *a); +typedef arg_rpr_esz arg_FMAXV; +static bool trans_FMAXV(DisasContext *ctx, arg_FMAXV *a); +typedef arg_rpr_esz arg_FMINV; +static bool trans_FMINV(DisasContext *ctx, arg_FMINV *a); +typedef arg_rr_esz arg_FRECPE; +static bool trans_FRECPE(DisasContext *ctx, arg_FRECPE *a); +typedef arg_rr_esz arg_FRSQRTE; +static bool trans_FRSQRTE(DisasContext *ctx, arg_FRSQRTE *a); +typedef arg_rpr_esz arg_FCMGE_ppz0; +static bool trans_FCMGE_ppz0(DisasContext *ctx, arg_FCMGE_ppz0 *a); +typedef arg_rpr_esz arg_FCMGT_ppz0; +static bool trans_FCMGT_ppz0(DisasContext *ctx, arg_FCMGT_ppz0 *a); +typedef arg_rpr_esz arg_FCMLT_ppz0; +static bool trans_FCMLT_ppz0(DisasContext *ctx, arg_FCMLT_ppz0 *a); +typedef arg_rpr_esz arg_FCMLE_ppz0; +static bool trans_FCMLE_ppz0(DisasContext *ctx, arg_FCMLE_ppz0 *a); +typedef arg_rpr_esz arg_FCMEQ_ppz0; +static bool trans_FCMEQ_ppz0(DisasContext *ctx, arg_FCMEQ_ppz0 *a); +typedef arg_rpr_esz arg_FCMNE_ppz0; +static bool trans_FCMNE_ppz0(DisasContext *ctx, arg_FCMNE_ppz0 *a); +typedef arg_rprr_esz arg_FADDA; +static bool trans_FADDA(DisasContext *ctx, arg_FADDA *a); +typedef arg_rrr_esz arg_FADD_zzz; +static bool trans_FADD_zzz(DisasContext *ctx, arg_FADD_zzz *a); +typedef arg_rrr_esz arg_FSUB_zzz; +static bool trans_FSUB_zzz(DisasContext *ctx, arg_FSUB_zzz *a); +typedef arg_rrr_esz arg_FMUL_zzz; +static bool trans_FMUL_zzz(DisasContext *ctx, arg_FMUL_zzz *a); +typedef arg_rrr_esz arg_FTSMUL; +static bool trans_FTSMUL(DisasContext *ctx, arg_FTSMUL *a); +typedef arg_rrr_esz arg_FRECPS; +static bool trans_FRECPS(DisasContext *ctx, arg_FRECPS *a); +typedef arg_rrr_esz arg_FRSQRTS; +static bool trans_FRSQRTS(DisasContext *ctx, arg_FRSQRTS *a); +typedef arg_rprr_esz arg_FADD_zpzz; +static bool trans_FADD_zpzz(DisasContext *ctx, arg_FADD_zpzz *a); +typedef arg_rprr_esz arg_FSUB_zpzz; +static bool trans_FSUB_zpzz(DisasContext *ctx, arg_FSUB_zpzz *a); +typedef arg_rprr_esz arg_FMUL_zpzz; +static bool trans_FMUL_zpzz(DisasContext *ctx, arg_FMUL_zpzz *a); +typedef arg_rprr_esz arg_FMAXNM_zpzz; +static bool trans_FMAXNM_zpzz(DisasContext *ctx, arg_FMAXNM_zpzz *a); +typedef arg_rprr_esz arg_FMINNM_zpzz; +static bool trans_FMINNM_zpzz(DisasContext *ctx, arg_FMINNM_zpzz *a); +typedef arg_rprr_esz arg_FMAX_zpzz; +static bool trans_FMAX_zpzz(DisasContext *ctx, arg_FMAX_zpzz *a); +typedef arg_rprr_esz arg_FMIN_zpzz; +static bool trans_FMIN_zpzz(DisasContext *ctx, arg_FMIN_zpzz *a); +typedef arg_rprr_esz arg_FABD; +static bool trans_FABD(DisasContext *ctx, arg_FABD *a); +typedef arg_rprr_esz arg_FSCALE; +static bool trans_FSCALE(DisasContext *ctx, arg_FSCALE *a); +typedef arg_rprr_esz arg_FMULX; +static bool trans_FMULX(DisasContext *ctx, arg_FMULX *a); +typedef arg_rprr_esz arg_FDIV; +static bool trans_FDIV(DisasContext *ctx, arg_FDIV *a); +typedef arg_rpri_esz arg_FADD_zpzi; +static bool trans_FADD_zpzi(DisasContext *ctx, arg_FADD_zpzi *a); +typedef arg_rpri_esz arg_FSUB_zpzi; +static bool trans_FSUB_zpzi(DisasContext *ctx, arg_FSUB_zpzi *a); +typedef arg_rpri_esz arg_FMUL_zpzi; +static bool trans_FMUL_zpzi(DisasContext *ctx, arg_FMUL_zpzi *a); +typedef arg_rpri_esz arg_FSUBR_zpzi; +static bool trans_FSUBR_zpzi(DisasContext *ctx, arg_FSUBR_zpzi *a); +typedef arg_rpri_esz arg_FMAXNM_zpzi; +static bool trans_FMAXNM_zpzi(DisasContext *ctx, arg_FMAXNM_zpzi *a); +typedef arg_rpri_esz arg_FMINNM_zpzi; +static bool trans_FMINNM_zpzi(DisasContext *ctx, arg_FMINNM_zpzi *a); +typedef arg_rpri_esz arg_FMAX_zpzi; +static bool trans_FMAX_zpzi(DisasContext *ctx, arg_FMAX_zpzi *a); +typedef arg_rpri_esz arg_FMIN_zpzi; +static bool trans_FMIN_zpzi(DisasContext *ctx, arg_FMIN_zpzi *a); +typedef arg_disas_sve46 arg_FTMAD; +static bool trans_FTMAD(DisasContext *ctx, arg_FTMAD *a); +typedef arg_rprrr_esz arg_FMLA_zpzzz; +static bool trans_FMLA_zpzzz(DisasContext *ctx, arg_FMLA_zpzzz *a); +typedef arg_rprrr_esz arg_FMLS_zpzzz; +static bool trans_FMLS_zpzzz(DisasContext *ctx, arg_FMLS_zpzzz *a); +typedef arg_rprrr_esz arg_FNMLA_zpzzz; +static bool trans_FNMLA_zpzzz(DisasContext *ctx, arg_FNMLA_zpzzz *a); +typedef arg_rprrr_esz arg_FNMLS_zpzzz; +static bool trans_FNMLS_zpzzz(DisasContext *ctx, arg_FNMLS_zpzzz *a); +typedef arg_rpr_esz arg_FCVT_sh; +static bool trans_FCVT_sh(DisasContext *ctx, arg_FCVT_sh *a); +typedef arg_rpr_esz arg_FCVT_hs; +static bool trans_FCVT_hs(DisasContext *ctx, arg_FCVT_hs *a); +typedef arg_rpr_esz arg_FCVT_dh; +static bool trans_FCVT_dh(DisasContext *ctx, arg_FCVT_dh *a); +typedef arg_rpr_esz arg_FCVT_hd; +static bool trans_FCVT_hd(DisasContext *ctx, arg_FCVT_hd *a); +typedef arg_rpr_esz arg_FCVT_ds; +static bool trans_FCVT_ds(DisasContext *ctx, arg_FCVT_ds *a); +typedef arg_rpr_esz arg_FCVT_sd; +static bool trans_FCVT_sd(DisasContext *ctx, arg_FCVT_sd *a); +typedef arg_rpr_esz arg_FCVTZS_hh; +static bool trans_FCVTZS_hh(DisasContext *ctx, arg_FCVTZS_hh *a); +typedef arg_rpr_esz arg_FCVTZU_hh; +static bool trans_FCVTZU_hh(DisasContext *ctx, arg_FCVTZU_hh *a); +typedef arg_rpr_esz arg_FCVTZS_hs; +static bool trans_FCVTZS_hs(DisasContext *ctx, arg_FCVTZS_hs *a); +typedef arg_rpr_esz arg_FCVTZU_hs; +static bool trans_FCVTZU_hs(DisasContext *ctx, arg_FCVTZU_hs *a); +typedef arg_rpr_esz arg_FCVTZS_hd; +static bool trans_FCVTZS_hd(DisasContext *ctx, arg_FCVTZS_hd *a); +typedef arg_rpr_esz arg_FCVTZU_hd; +static bool trans_FCVTZU_hd(DisasContext *ctx, arg_FCVTZU_hd *a); +typedef arg_rpr_esz arg_FCVTZS_ss; +static bool trans_FCVTZS_ss(DisasContext *ctx, arg_FCVTZS_ss *a); +typedef arg_rpr_esz arg_FCVTZU_ss; +static bool trans_FCVTZU_ss(DisasContext *ctx, arg_FCVTZU_ss *a); +typedef arg_rpr_esz arg_FCVTZS_ds; +static bool trans_FCVTZS_ds(DisasContext *ctx, arg_FCVTZS_ds *a); +typedef arg_rpr_esz arg_FCVTZU_ds; +static bool trans_FCVTZU_ds(DisasContext *ctx, arg_FCVTZU_ds *a); +typedef arg_rpr_esz arg_FCVTZS_sd; +static bool trans_FCVTZS_sd(DisasContext *ctx, arg_FCVTZS_sd *a); +typedef arg_rpr_esz arg_FCVTZU_sd; +static bool trans_FCVTZU_sd(DisasContext *ctx, arg_FCVTZU_sd *a); +typedef arg_rpr_esz arg_FCVTZS_dd; +static bool trans_FCVTZS_dd(DisasContext *ctx, arg_FCVTZS_dd *a); +typedef arg_rpr_esz arg_FCVTZU_dd; +static bool trans_FCVTZU_dd(DisasContext *ctx, arg_FCVTZU_dd *a); +typedef arg_rpr_esz arg_FRINTN; +static bool trans_FRINTN(DisasContext *ctx, arg_FRINTN *a); +typedef arg_rpr_esz arg_FRINTP; +static bool trans_FRINTP(DisasContext *ctx, arg_FRINTP *a); +typedef arg_rpr_esz arg_FRINTM; +static bool trans_FRINTM(DisasContext *ctx, arg_FRINTM *a); +typedef arg_rpr_esz arg_FRINTZ; +static bool trans_FRINTZ(DisasContext *ctx, arg_FRINTZ *a); +typedef arg_rpr_esz arg_FRINTA; +static bool trans_FRINTA(DisasContext *ctx, arg_FRINTA *a); +typedef arg_rpr_esz arg_FRINTX; +static bool trans_FRINTX(DisasContext *ctx, arg_FRINTX *a); +typedef arg_rpr_esz arg_FRINTI; +static bool trans_FRINTI(DisasContext *ctx, arg_FRINTI *a); +typedef arg_rpr_esz arg_FRECPX; +static bool trans_FRECPX(DisasContext *ctx, arg_FRECPX *a); +typedef arg_rpr_esz arg_FSQRT; +static bool trans_FSQRT(DisasContext *ctx, arg_FSQRT *a); +typedef arg_rpr_esz arg_SCVTF_hh; +static bool trans_SCVTF_hh(DisasContext *ctx, arg_SCVTF_hh *a); +typedef arg_rpr_esz arg_SCVTF_sh; +static bool trans_SCVTF_sh(DisasContext *ctx, arg_SCVTF_sh *a); +typedef arg_rpr_esz arg_SCVTF_dh; +static bool trans_SCVTF_dh(DisasContext *ctx, arg_SCVTF_dh *a); +typedef arg_rpr_esz arg_SCVTF_ss; +static bool trans_SCVTF_ss(DisasContext *ctx, arg_SCVTF_ss *a); +typedef arg_rpr_esz arg_SCVTF_sd; +static bool trans_SCVTF_sd(DisasContext *ctx, arg_SCVTF_sd *a); +typedef arg_rpr_esz arg_SCVTF_ds; +static bool trans_SCVTF_ds(DisasContext *ctx, arg_SCVTF_ds *a); +typedef arg_rpr_esz arg_SCVTF_dd; +static bool trans_SCVTF_dd(DisasContext *ctx, arg_SCVTF_dd *a); +typedef arg_rpr_esz arg_UCVTF_hh; +static bool trans_UCVTF_hh(DisasContext *ctx, arg_UCVTF_hh *a); +typedef arg_rpr_esz arg_UCVTF_sh; +static bool trans_UCVTF_sh(DisasContext *ctx, arg_UCVTF_sh *a); +typedef arg_rpr_esz arg_UCVTF_dh; +static bool trans_UCVTF_dh(DisasContext *ctx, arg_UCVTF_dh *a); +typedef arg_rpr_esz arg_UCVTF_ss; +static bool trans_UCVTF_ss(DisasContext *ctx, arg_UCVTF_ss *a); +typedef arg_rpr_esz arg_UCVTF_sd; +static bool trans_UCVTF_sd(DisasContext *ctx, arg_UCVTF_sd *a); +typedef arg_rpr_esz arg_UCVTF_ds; +static bool trans_UCVTF_ds(DisasContext *ctx, arg_UCVTF_ds *a); +typedef arg_rpr_esz arg_UCVTF_dd; +static bool trans_UCVTF_dd(DisasContext *ctx, arg_UCVTF_dd *a); +typedef arg_rri arg_LDR_pri; +static bool trans_LDR_pri(DisasContext *ctx, arg_LDR_pri *a); +typedef arg_rri arg_LDR_zri; +static bool trans_LDR_zri(DisasContext *ctx, arg_LDR_zri *a); +typedef arg_rpri_load arg_LD1R_zpri; +static bool trans_LD1R_zpri(DisasContext *ctx, arg_LD1R_zpri *a); +typedef arg_rprr_gather_load arg_LD1_zprz; +static bool trans_LD1_zprz(DisasContext *ctx, arg_LD1_zprz *a); +typedef arg_rpri_gather_load arg_LD1_zpiz; +static bool trans_LD1_zpiz(DisasContext *ctx, arg_LD1_zpiz *a); +typedef arg_rprr_load arg_LD_zprr; +static bool trans_LD_zprr(DisasContext *ctx, arg_LD_zprr *a); +typedef arg_rprr_load arg_LDFF1_zprr; +static bool trans_LDFF1_zprr(DisasContext *ctx, arg_LDFF1_zprr *a); +typedef arg_rpri_load arg_LD_zpri; +static bool trans_LD_zpri(DisasContext *ctx, arg_LD_zpri *a); +typedef arg_rpri_load arg_LDNF1_zpri; +static bool trans_LDNF1_zpri(DisasContext *ctx, arg_LDNF1_zpri *a); +typedef arg_rprr_load arg_LD1RQ_zprr; +static bool trans_LD1RQ_zprr(DisasContext *ctx, arg_LD1RQ_zprr *a); +typedef arg_rpri_load arg_LD1RQ_zpri; +static bool trans_LD1RQ_zpri(DisasContext *ctx, arg_LD1RQ_zpri *a); +typedef arg_disas_sve32 arg_PRF; +static bool trans_PRF(DisasContext *ctx, arg_PRF *a); +typedef arg_disas_sve47 arg_PRF_rr; +static bool trans_PRF_rr(DisasContext *ctx, arg_PRF_rr *a); +typedef arg_rri arg_STR_pri; +static bool trans_STR_pri(DisasContext *ctx, arg_STR_pri *a); +typedef arg_rri arg_STR_zri; +static bool trans_STR_zri(DisasContext *ctx, arg_STR_zri *a); +typedef arg_rpri_store arg_ST_zpri; +static bool trans_ST_zpri(DisasContext *ctx, arg_ST_zpri *a); +typedef arg_rprr_store arg_ST_zprr; +static bool trans_ST_zprr(DisasContext *ctx, arg_ST_zprr *a); +typedef arg_rprr_scatter_store arg_ST1_zprz; +static bool trans_ST1_zprz(DisasContext *ctx, arg_ST1_zprz *a); +typedef arg_rpri_scatter_store arg_ST1_zpiz; +static bool trans_ST1_zpiz(DisasContext *ctx, arg_ST1_zpiz *a); + +static void disas_sve_extract_disas_sve_Fmt_55(DisasContext *ctx, arg_disas_sve25 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm2 = sextract32(insn, 16, 5); + a->imm1 = sextract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_56(DisasContext *ctx, arg_disas_sve26 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->imm = sextract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_57(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm = sextract32(insn, 16, 5); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_58(DisasContext *ctx, arg_disas_sve27 *a, uint32_t insn) +{ + a->imm = sextract32(insn, 5, 6); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_59(DisasContext *ctx, arg_disas_sve28 *a, uint32_t insn) +{ + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_60(DisasContext *ctx, arg_disas_sve29 *a, uint32_t insn) +{ + a->dbm = extract32(insn, 5, 13); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_61(DisasContext *ctx, arg_rrri *a, uint32_t insn) +{ + a->rm = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); + a->imm = deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 16, 5)); +} + +static void disas_sve_extract_disas_sve_Fmt_62(DisasContext *ctx, arg_rri *a, uint32_t insn) +{ + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->imm = deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2)); +} + +static void disas_sve_extract_disas_sve_Fmt_63(DisasContext *ctx, arg_disas_sve30 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->u = extract32(insn, 17, 1); + a->h = extract32(insn, 16, 1); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_64(DisasContext *ctx, arg_disas_sve31 *a, uint32_t insn) +{ + a->pg = extract32(insn, 10, 4); + a->rn = extract32(insn, 5, 4); +} + +static void disas_sve_extract_disas_sve_Fmt_65(DisasContext *ctx, arg_ptrue *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->s = extract32(insn, 16, 1); + a->pat = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_disas_sve_Fmt_66(DisasContext *ctx, arg_disas_sve32 *a, uint32_t insn) +{ +} + +static void disas_sve_extract_disas_sve_Fmt_67(DisasContext *ctx, arg_disas_sve33 *a, uint32_t insn) +{ + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_disas_sve_Fmt_68(DisasContext *ctx, arg_disas_sve34 *a, uint32_t insn) +{ + a->s = extract32(insn, 22, 1); + a->pg = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_disas_sve_Fmt_69(DisasContext *ctx, arg_disas_sve35 *a, uint32_t insn) +{ + a->rn = extract32(insn, 5, 4); +} + +static void disas_sve_extract_disas_sve_Fmt_70(DisasContext *ctx, arg_disas_sve36 *a, uint32_t insn) +{ + a->sf = extract32(insn, 22, 1); + a->rm = extract32(insn, 16, 5); + a->rn = extract32(insn, 5, 5); + a->ne = extract32(insn, 4, 1); +} + +static void disas_sve_extract_disas_sve_Fmt_71(DisasContext *ctx, arg_disas_sve37 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->sf = extract32(insn, 12, 1); + a->u = extract32(insn, 11, 1); + a->rn = extract32(insn, 5, 5); + a->eq = extract32(insn, 4, 1); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_disas_sve_Fmt_72(DisasContext *ctx, arg_disas_sve38 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm = extract32(insn, 5, 8); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_73(DisasContext *ctx, arg_disas_sve38 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rd = extract32(insn, 0, 5); + a->imm = expand_imm_sh8s(ctx, extract32(insn, 5, 9)); +} + +static void disas_sve_extract_disas_sve_Fmt_74(DisasContext *ctx, arg_disas_sve39 *a, uint32_t insn) +{ + a->sz = extract32(insn, 22, 1); + a->rm = extract32(insn, 16, 5); + a->u = extract32(insn, 10, 1); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_75(DisasContext *ctx, arg_disas_sve40 *a, uint32_t insn) +{ + a->index = extract32(insn, 19, 2); + a->rm = extract32(insn, 16, 3); + a->u = extract32(insn, 10, 1); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->sz = 0; + a->ra = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_76(DisasContext *ctx, arg_disas_sve40 *a, uint32_t insn) +{ + a->index = extract32(insn, 20, 1); + a->rm = extract32(insn, 16, 4); + a->u = extract32(insn, 10, 1); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->sz = 1; + a->ra = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_77(DisasContext *ctx, arg_disas_sve41 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rot = extract32(insn, 16, 1); + a->pg = extract32(insn, 10, 3); + a->rm = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_78(DisasContext *ctx, arg_disas_sve42 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->rot = extract32(insn, 13, 2); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_79(DisasContext *ctx, arg_disas_sve43 *a, uint32_t insn) +{ + a->index = extract32(insn, 19, 2); + a->rm = extract32(insn, 16, 3); + a->rot = extract32(insn, 10, 2); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); + a->esz = 1; +} + +static void disas_sve_extract_disas_sve_Fmt_80(DisasContext *ctx, arg_disas_sve43 *a, uint32_t insn) +{ + a->index = extract32(insn, 20, 1); + a->rm = extract32(insn, 16, 4); + a->rot = extract32(insn, 10, 2); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); + a->esz = 2; +} + +static void disas_sve_extract_disas_sve_Fmt_81(DisasContext *ctx, arg_disas_sve44 *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 3); + a->sub = extract32(insn, 10, 1); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); + a->index = deposit32(extract32(insn, 19, 2), 2, 30, extract32(insn, 22, 1)); + a->esz = 1; +} + +static void disas_sve_extract_disas_sve_Fmt_82(DisasContext *ctx, arg_disas_sve44 *a, uint32_t insn) +{ + a->index = extract32(insn, 19, 2); + a->rm = extract32(insn, 16, 3); + a->sub = extract32(insn, 10, 1); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); + a->esz = 2; +} + +static void disas_sve_extract_disas_sve_Fmt_83(DisasContext *ctx, arg_disas_sve44 *a, uint32_t insn) +{ + a->index = extract32(insn, 20, 1); + a->rm = extract32(insn, 16, 4); + a->sub = extract32(insn, 10, 1); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); + a->esz = 3; +} + +static void disas_sve_extract_disas_sve_Fmt_84(DisasContext *ctx, arg_disas_sve45 *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->index = deposit32(extract32(insn, 19, 2), 2, 30, extract32(insn, 22, 1)); + a->esz = 1; +} + +static void disas_sve_extract_disas_sve_Fmt_85(DisasContext *ctx, arg_disas_sve45 *a, uint32_t insn) +{ + a->index = extract32(insn, 19, 2); + a->rm = extract32(insn, 16, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->esz = 2; +} + +static void disas_sve_extract_disas_sve_Fmt_86(DisasContext *ctx, arg_disas_sve45 *a, uint32_t insn) +{ + a->index = extract32(insn, 20, 1); + a->rm = extract32(insn, 16, 4); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->esz = 3; +} + +static void disas_sve_extract_disas_sve_Fmt_87(DisasContext *ctx, arg_disas_sve46 *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm = extract32(insn, 16, 3); + a->rm = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_disas_sve_Fmt_88(DisasContext *ctx, arg_rpri_load *a, uint32_t insn) +{ + a->imm = extract32(insn, 16, 6); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->dtype = deposit32(extract32(insn, 13, 2), 2, 30, extract32(insn, 23, 2)); + a->nreg = 0; +} + +static void disas_sve_extract_disas_sve_Fmt_89(DisasContext *ctx, arg_disas_sve47 *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 5); +} + +static void disas_sve_extract_incdec2_cnt(DisasContext *ctx, arg_incdec2_cnt *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pat = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->imm = plus1(ctx, extract32(insn, 16, 4)); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_incdec2_pred(DisasContext *ctx, arg_incdec2_pred *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_incdec_cnt(DisasContext *ctx, arg_incdec_cnt *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pat = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->imm = plus1(ctx, extract32(insn, 16, 4)); +} + +static void disas_sve_extract_incdec_pred(DisasContext *ctx, arg_incdec_pred *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_pd_pg_pn_pm_s(DisasContext *ctx, arg_rprr_s *a, uint32_t insn) +{ + a->s = extract32(insn, 22, 1); + a->rm = extract32(insn, 16, 4); + a->pg = extract32(insn, 10, 4); + a->rn = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_pg_pn_s(DisasContext *ctx, arg_rpr_s *a, uint32_t insn) +{ + a->s = extract32(insn, 22, 1); + a->pg = extract32(insn, 10, 4); + a->rn = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_pg_pn_s0(DisasContext *ctx, arg_rpr_s *a, uint32_t insn) +{ + a->pg = extract32(insn, 10, 4); + a->rn = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 4); + a->s = 0; +} + +static void disas_sve_extract_pd_pg_rn(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_pg_rn_i5(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm = sextract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_pg_rn_i7(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm = extract32(insn, 14, 7); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_pg_rn_rm(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_pn(DisasContext *ctx, arg_rr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rn = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_pn_e0(DisasContext *ctx, arg_rr_esz *a, uint32_t insn) +{ + a->rn = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 4); + a->esz = 0; +} + +static void disas_sve_extract_pd_pn_pm(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 4); + a->rn = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 4); +} + +static void disas_sve_extract_pd_rn_i9(DisasContext *ctx, arg_rri *a, uint32_t insn) +{ + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 4); + a->imm = deposit32(extract32(insn, 10, 3), 3, 29, sextract32(insn, 16, 6)); +} + +static void disas_sve_extract_rd_pg4_pn(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 10, 4); + a->rn = extract32(insn, 5, 4); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rd_pg4_rn_rm(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 4); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rd_pg_rn(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rd_pg_rn_e0(DisasContext *ctx, arg_rpr_esz *a, uint32_t insn) +{ + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->esz = 0; +} + +static void disas_sve_extract_rd_rn(DisasContext *ctx, arg_rr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rd_rn_i6(DisasContext *ctx, arg_rri *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 5); + a->imm = sextract32(insn, 5, 6); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rd_rn_i9(DisasContext *ctx, arg_rri *a, uint32_t insn) +{ + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->imm = deposit32(extract32(insn, 10, 3), 3, 29, sextract32(insn, 16, 6)); +} + +static void disas_sve_extract_rd_rn_msz_rm(DisasContext *ctx, arg_rrri *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 5); + a->imm = extract32(insn, 10, 2); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rd_rn_rm(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rd_rn_rm_e0(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 5); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->esz = 0; +} + +static void disas_sve_extract_rd_rn_tszimm(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) +{ + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->esz = tszimm_esz(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); +} + +static void disas_sve_extract_rda_pg_rn_rm(DisasContext *ctx, arg_rprrr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->ra = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdm_pg_rn(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rm = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_dbm(DisasContext *ctx, arg_rr_dbm *a, uint32_t insn) +{ + a->dbm = extract32(insn, 5, 13); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_i1(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 10, 3); + a->imm = extract32(insn, 5, 1); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_i8s(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm = sextract32(insn, 5, 8); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_i8u(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->imm = extract32(insn, 5, 8); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_pg4(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 16, 4); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_pg_ra_rm(DisasContext *ctx, arg_rprrr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->ra = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_pg_rm(DisasContext *ctx, arg_rprr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->pg = extract32(insn, 10, 3); + a->rm = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_pg_rm_ra(DisasContext *ctx, arg_rprrr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->ra = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rm = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_pg_tszimm(DisasContext *ctx, arg_rpri_esz *a, uint32_t insn) +{ + a->pg = extract32(insn, 10, 3); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); + a->esz = tszimm_esz(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); +} + +static void disas_sve_extract_rdn_rm(DisasContext *ctx, arg_rrr_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rm = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rdn_sh_i8u(DisasContext *ctx, arg_rri_esz *a, uint32_t insn) +{ + a->esz = extract32(insn, 22, 2); + a->rd = extract32(insn, 0, 5); + a->rn = extract32(insn, 0, 5); + a->imm = expand_imm_sh8u(ctx, extract32(insn, 5, 9)); +} + +static void disas_sve_extract_rpri_g_load(DisasContext *ctx, arg_rpri_gather_load *a, uint32_t insn) +{ + a->msz = extract32(insn, 23, 2); + a->imm = extract32(insn, 16, 5); + a->u = extract32(insn, 14, 1); + a->ff = extract32(insn, 13, 1); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rpri_load_dt(DisasContext *ctx, arg_rpri_load *a, uint32_t insn) +{ + a->dtype = extract32(insn, 21, 4); + a->imm = sextract32(insn, 16, 4); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rpri_load_msz(DisasContext *ctx, arg_rpri_load *a, uint32_t insn) +{ + a->imm = sextract32(insn, 16, 4); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->dtype = msz_dtype(ctx, extract32(insn, 23, 2)); +} + +static void disas_sve_extract_rpri_scatter_store(DisasContext *ctx, arg_rpri_scatter_store *a, uint32_t insn) +{ + a->msz = extract32(insn, 23, 2); + a->imm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rpri_store_msz(DisasContext *ctx, arg_rpri_store *a, uint32_t insn) +{ + a->msz = extract32(insn, 23, 2); + a->imm = sextract32(insn, 16, 4); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rprr_g_load_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) +{ + a->scale = extract32(insn, 21, 1); + a->rm = extract32(insn, 16, 5); + a->ff = extract32(insn, 13, 1); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->xs = 2; +} + +static void disas_sve_extract_rprr_g_load_u(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 5); + a->u = extract32(insn, 14, 1); + a->ff = extract32(insn, 13, 1); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->xs = 2; +} + +static void disas_sve_extract_rprr_g_load_u_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) +{ + a->scale = extract32(insn, 21, 1); + a->rm = extract32(insn, 16, 5); + a->u = extract32(insn, 14, 1); + a->ff = extract32(insn, 13, 1); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->xs = 2; +} + +static void disas_sve_extract_rprr_g_load_xs_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) +{ + a->xs = extract32(insn, 22, 1); + a->scale = extract32(insn, 21, 1); + a->rm = extract32(insn, 16, 5); + a->ff = extract32(insn, 13, 1); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rprr_g_load_xs_u(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) +{ + a->xs = extract32(insn, 22, 1); + a->rm = extract32(insn, 16, 5); + a->u = extract32(insn, 14, 1); + a->ff = extract32(insn, 13, 1); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rprr_g_load_xs_u_sc(DisasContext *ctx, arg_rprr_gather_load *a, uint32_t insn) +{ + a->xs = extract32(insn, 22, 1); + a->scale = extract32(insn, 21, 1); + a->rm = extract32(insn, 16, 5); + a->u = extract32(insn, 14, 1); + a->ff = extract32(insn, 13, 1); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rprr_load_dt(DisasContext *ctx, arg_rprr_load *a, uint32_t insn) +{ + a->dtype = extract32(insn, 21, 4); + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rprr_load_msz(DisasContext *ctx, arg_rprr_load *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->dtype = msz_dtype(ctx, extract32(insn, 23, 2)); +} + +static void disas_sve_extract_rprr_scatter_store(DisasContext *ctx, arg_rprr_scatter_store *a, uint32_t insn) +{ + a->msz = extract32(insn, 23, 2); + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rprr_store(DisasContext *ctx, arg_rprr_store *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); +} + +static void disas_sve_extract_rprr_store_esz_n0(DisasContext *ctx, arg_rprr_store *a, uint32_t insn) +{ + a->esz = extract32(insn, 21, 2); + a->rm = extract32(insn, 16, 5); + a->pg = extract32(insn, 10, 3); + a->rn = extract32(insn, 5, 5); + a->rd = extract32(insn, 0, 5); + a->nreg = 0; +} + +bool disas_sve(DisasContext *ctx, uint32_t insn) +{ + union { + arg_disas_sve25 f_disas_sve25; + arg_disas_sve26 f_disas_sve26; + arg_disas_sve27 f_disas_sve27; + arg_disas_sve28 f_disas_sve28; + arg_disas_sve29 f_disas_sve29; + arg_disas_sve30 f_disas_sve30; + arg_disas_sve31 f_disas_sve31; + arg_disas_sve32 f_disas_sve32; + arg_disas_sve33 f_disas_sve33; + arg_disas_sve34 f_disas_sve34; + arg_disas_sve35 f_disas_sve35; + arg_disas_sve36 f_disas_sve36; + arg_disas_sve37 f_disas_sve37; + arg_disas_sve38 f_disas_sve38; + arg_disas_sve39 f_disas_sve39; + arg_disas_sve40 f_disas_sve40; + arg_disas_sve41 f_disas_sve41; + arg_disas_sve42 f_disas_sve42; + arg_disas_sve43 f_disas_sve43; + arg_disas_sve44 f_disas_sve44; + arg_disas_sve45 f_disas_sve45; + arg_disas_sve46 f_disas_sve46; + arg_disas_sve47 f_disas_sve47; + arg_incdec2_cnt f_incdec2_cnt; + arg_incdec2_pred f_incdec2_pred; + arg_incdec_cnt f_incdec_cnt; + arg_incdec_pred f_incdec_pred; + arg_ptrue f_ptrue; + arg_rpr_esz f_rpr_esz; + arg_rpr_s f_rpr_s; + arg_rpri_esz f_rpri_esz; + arg_rpri_gather_load f_rpri_gather_load; + arg_rpri_load f_rpri_load; + arg_rpri_scatter_store f_rpri_scatter_store; + arg_rpri_store f_rpri_store; + arg_rprr_esz f_rprr_esz; + arg_rprr_gather_load f_rprr_gather_load; + arg_rprr_load f_rprr_load; + arg_rprr_s f_rprr_s; + arg_rprr_scatter_store f_rprr_scatter_store; + arg_rprr_store f_rprr_store; + arg_rprrr_esz f_rprrr_esz; + arg_rr_dbm f_rr_dbm; + arg_rr_esz f_rr_esz; + arg_rri f_rri; + arg_rri_esz f_rri_esz; + arg_rrr_esz f_rrr_esz; + arg_rrri f_rrri; + } u; + + switch ((insn >> 25) & 0x7f) { + case 0x2: + /* 0000010. ........ ........ ........ */ + switch (insn & 0x01200000) { + case 0x00000000: + /* 00000100 ..0..... ........ ........ */ + switch ((insn >> 13) & 0x7) { + case 0x0: + /* 00000100 ..0..... 000..... ........ */ + switch ((insn >> 16) & 0x1f) { + case 0x0: + /* 00000100 ..000000 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:245 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_ADD_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1: + /* 00000100 ..000001 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:246 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SUB_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x3: + /* 00000100 ..000011 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:247 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_SUB_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x8: + /* 00000100 ..001000 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:250 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SMAX_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x9: + /* 00000100 ..001001 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:251 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_UMAX_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0xa: + /* 00000100 ..001010 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:252 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SMIN_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0xb: + /* 00000100 ..001011 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:253 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_UMIN_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0xc: + /* 00000100 ..001100 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:254 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SABD_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0xd: + /* 00000100 ..001101 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:255 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_UABD_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x10: + /* 00000100 ..010000 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:258 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_MUL_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x12: + /* 00000100 ..010010 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:259 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SMULH_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x13: + /* 00000100 ..010011 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:260 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_UMULH_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x14: + /* 00000100 ..010100 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:262 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SDIV_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x15: + /* 00000100 ..010101 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:263 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_UDIV_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x16: + /* 00000100 ..010110 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:264 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_SDIV_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x17: + /* 00000100 ..010111 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:265 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_UDIV_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x18: + /* 00000100 ..011000 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:239 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_ORR_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x19: + /* 00000100 ..011001 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:240 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_EOR_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1a: + /* 00000100 ..011010 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:241 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_AND_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1b: + /* 00000100 ..011011 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:242 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_BIC_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x1: + /* 00000100 ..0..... 001..... ........ */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 16) & 0x1f) { + case 0x0: + /* 00000100 ..000000 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:281 */ + if (trans_SADDV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1: + /* 00000100 ..000001 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:280 */ + if (trans_UADDV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x8: + /* 00000100 ..001000 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:284 */ + if (trans_SMAXV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x9: + /* 00000100 ..001001 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:285 */ + if (trans_UMAXV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0xa: + /* 00000100 ..001010 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:286 */ + if (trans_SMINV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0xb: + /* 00000100 ..001011 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:287 */ + if (trans_UMINV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x10: + /* 00000100 ..010000 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:275 */ + if (trans_MOVPRFX_z(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x11: + /* 00000100 ..010001 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:276 */ + if (trans_MOVPRFX_m(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x18: + /* 00000100 ..011000 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:270 */ + if (trans_ORV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x19: + /* 00000100 ..011001 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:271 */ + if (trans_EORV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1a: + /* 00000100 ..011010 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:272 */ + if (trans_ANDV(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x2: + /* 00000100 ..0..... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:352 */ + disas_sve_extract_rda_pg_rn_rm(ctx, &u.f_rprrr_esz, insn); + if (trans_MLA(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x3: + /* 00000100 ..0..... 011..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:353 */ + disas_sve_extract_rda_pg_rn_rm(ctx, &u.f_rprrr_esz, insn); + if (trans_MLS(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x4: + /* 00000100 ..0..... 100..... ........ */ + switch ((insn >> 16) & 0x1f) { + case 0x0: + /* 00000100 ..000000 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:292 */ + disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); + u.f_rpri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); + if (trans_ASR_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x1: + /* 00000100 ..000001 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:294 */ + disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); + u.f_rpri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); + if (trans_LSR_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x3: + /* 00000100 ..000011 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:296 */ + disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); + u.f_rpri_esz.imm = tszimm_shl(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); + if (trans_LSL_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x4: + /* 00000100 ..000100 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:298 */ + disas_sve_extract_rdn_pg_tszimm(ctx, &u.f_rpri_esz, insn); + u.f_rpri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 5, 5), 5, 27, extract32(insn, 22, 2))); + if (trans_ASRD(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x10: + /* 00000100 ..010000 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:302 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_ASR_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x11: + /* 00000100 ..010001 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:303 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_LSR_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x13: + /* 00000100 ..010011 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:304 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_LSL_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x14: + /* 00000100 ..010100 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:305 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_ASR_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x15: + /* 00000100 ..010101 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:306 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_LSR_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x17: + /* 00000100 ..010111 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:307 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_LSL_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x18: + /* 00000100 ..011000 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:311 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_ASR_zpzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x19: + /* 00000100 ..011001 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:312 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_LSR_zpzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1b: + /* 00000100 ..011011 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:313 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_LSL_zpzw(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x5: + /* 00000100 ..0..... 101..... ........ */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 16) & 0x1f) { + case 0x10: + /* 00000100 ..010000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:331 */ + if (trans_SXTB(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x11: + /* 00000100 ..010001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:332 */ + if (trans_UXTB(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x12: + /* 00000100 ..010010 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:333 */ + if (trans_SXTH(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x13: + /* 00000100 ..010011 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:334 */ + if (trans_UXTH(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x14: + /* 00000100 ..010100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:335 */ + if (trans_SXTW(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x15: + /* 00000100 ..010101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:336 */ + if (trans_UXTW(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x16: + /* 00000100 ..010110 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:329 */ + if (trans_ABS(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x17: + /* 00000100 ..010111 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:330 */ + if (trans_NEG(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x18: + /* 00000100 ..011000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:319 */ + if (trans_CLS(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x19: + /* 00000100 ..011001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:320 */ + if (trans_CLZ(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1a: + /* 00000100 ..011010 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:321 */ + if (trans_CNT_zpz(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1b: + /* 00000100 ..011011 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:322 */ + if (trans_CNOT(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1c: + /* 00000100 ..011100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:324 */ + if (trans_FABS(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1d: + /* 00000100 ..011101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:325 */ + if (trans_FNEG(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1e: + /* 00000100 ..011110 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:323 */ + if (trans_NOT_zpz(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x6: + /* 00000100 ..0..... 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:356 */ + disas_sve_extract_rdn_pg_ra_rm(ctx, &u.f_rprrr_esz, insn); + if (trans_MLA(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x7: + /* 00000100 ..0..... 111..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:357 */ + disas_sve_extract_rdn_pg_ra_rm(ctx, &u.f_rprrr_esz, insn); + if (trans_MLS(ctx, &u.f_rprrr_esz)) return true; + return false; + } + return false; + case 0x00200000: + /* 00000100 ..1..... ........ ........ */ + switch ((insn >> 12) & 0xf) { + case 0x0: + /* 00000100 ..1..... 0000.... ........ */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 00000100 ..1..... 000000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:362 */ + if (trans_ADD_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x1: + /* 00000100 ..1..... 000001.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:363 */ + if (trans_SUB_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x1: + /* 00000100 ..1..... 0001.... ........ */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 00000100 ..1..... 000100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:364 */ + if (trans_SQADD_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x1: + /* 00000100 ..1..... 000101.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:365 */ + if (trans_UQADD_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x2: + /* 00000100 ..1..... 000110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:366 */ + if (trans_SQSUB_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x3: + /* 00000100 ..1..... 000111.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:367 */ + if (trans_UQSUB_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x3: + /* 00000100 ..1..... 0011.... ........ */ + disas_sve_extract_rd_rn_rm_e0(ctx, &u.f_rrr_esz, insn); + switch (insn & 0x00c00c00) { + case 0x00000000: + /* 00000100 001..... 001100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:372 */ + if (trans_AND_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x00400000: + /* 00000100 011..... 001100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:373 */ + if (trans_ORR_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x00800000: + /* 00000100 101..... 001100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:374 */ + if (trans_EOR_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x00c00000: + /* 00000100 111..... 001100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:375 */ + if (trans_BIC_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x4: + /* 00000100 ..1..... 0100.... ........ */ + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 00000100 ..1..... 010000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:380 */ + disas_sve_extract_disas_sve_Fmt_55(ctx, &u.f_disas_sve25, insn); + if (trans_INDEX_ii(ctx, &u.f_disas_sve25)) return true; + return false; + case 0x1: + /* 00000100 ..1..... 010001.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:386 */ + disas_sve_extract_disas_sve_Fmt_57(ctx, &u.f_rri_esz, insn); + if (trans_INDEX_ri(ctx, &u.f_rri_esz)) return true; + return false; + case 0x2: + /* 00000100 ..1..... 010010.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:383 */ + disas_sve_extract_disas_sve_Fmt_56(ctx, &u.f_disas_sve26, insn); + if (trans_INDEX_ir(ctx, &u.f_disas_sve26)) return true; + return false; + case 0x3: + /* 00000100 ..1..... 010011.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:389 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_INDEX_rr(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x5: + /* 00000100 ..1..... 0101.... ........ */ + switch (insn & 0x00c00800) { + case 0x00000000: + /* 00000100 001..... 01010... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:394 */ + disas_sve_extract_rd_rn_i6(ctx, &u.f_rri, insn); + if (trans_ADDVL(ctx, &u.f_rri)) return true; + return false; + case 0x00400000: + /* 00000100 011..... 01010... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:395 */ + disas_sve_extract_rd_rn_i6(ctx, &u.f_rri, insn); + if (trans_ADDPL(ctx, &u.f_rri)) return true; + return false; + case 0x00800000: + /* 00000100 101..... 01010... ........ */ + disas_sve_extract_disas_sve_Fmt_58(ctx, &u.f_disas_sve27, insn); + switch ((insn >> 16) & 0x1f) { + case 0x1f: + /* 00000100 10111111 01010... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:398 */ + if (trans_RDVL(ctx, &u.f_disas_sve27)) return true; + return false; + } + return false; + } + return false; + case 0x8: + /* 00000100 ..1..... 1000.... ........ */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 00000100 ..1..... 100000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:412 */ + if (trans_ASR_zzw(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x1: + /* 00000100 ..1..... 100001.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:413 */ + if (trans_LSR_zzw(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x3: + /* 00000100 ..1..... 100011.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:414 */ + if (trans_LSL_zzw(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x9: + /* 00000100 ..1..... 1001.... ........ */ + disas_sve_extract_rd_rn_tszimm(ctx, &u.f_rri_esz, insn); + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 00000100 ..1..... 100100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:403 */ + u.f_rri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); + if (trans_ASR_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x1: + /* 00000100 ..1..... 100101.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:405 */ + u.f_rri_esz.imm = tszimm_shr(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); + if (trans_LSR_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x3: + /* 00000100 ..1..... 100111.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:407 */ + u.f_rri_esz.imm = tszimm_shl(ctx, deposit32(extract32(insn, 16, 5), 5, 27, extract32(insn, 22, 2))); + if (trans_LSL_zzi(ctx, &u.f_rri_esz)) return true; + return false; + } + return false; + case 0xa: + /* 00000100 ..1..... 1010.... ........ */ + disas_sve_extract_rd_rn_msz_rm(ctx, &u.f_rrri, insn); + switch ((insn >> 22) & 0x3) { + case 0x0: + /* 00000100 001..... 1010.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:419 */ + if (trans_ADR_s32(ctx, &u.f_rrri)) return true; + return false; + case 0x1: + /* 00000100 011..... 1010.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:420 */ + if (trans_ADR_u32(ctx, &u.f_rrri)) return true; + return false; + case 0x2: + /* 00000100 101..... 1010.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:421 */ + if (trans_ADR_p32(ctx, &u.f_rrri)) return true; + return false; + case 0x3: + /* 00000100 111..... 1010.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:422 */ + if (trans_ADR_p64(ctx, &u.f_rrri)) return true; + return false; + } + return false; + case 0xb: + /* 00000100 ..1..... 1011.... ........ */ + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 00000100 ..1..... 101100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:435 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_FTSSEL(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x2: + /* 00000100 ..1..... 101110.. ........ */ + disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); + switch ((insn >> 16) & 0x1f) { + case 0x0: + /* 00000100 ..100000 101110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:431 */ + if (trans_FEXPA(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0x3: + /* 00000100 ..1..... 101111.. ........ */ + disas_sve_extract_disas_sve_Fmt_59(ctx, &u.f_disas_sve28, insn); + switch (insn & 0x00df0000) { + case 0x00000000: + /* 00000100 00100000 101111.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:427 */ + if (trans_MOVPRFX(ctx, &u.f_disas_sve28)) return true; + return false; + } + return false; + } + return false; + case 0xc: + /* 00000100 ..1..... 1100.... ........ */ + disas_sve_extract_incdec2_cnt(ctx, &u.f_incdec2_cnt, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* 00000100 ..10.... 1100.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:455 */ + u.f_incdec2_cnt.d = extract32(insn, 11, 1); + u.f_incdec2_cnt.u = extract32(insn, 10, 1); + if (trans_SINCDEC_v(ctx, &u.f_incdec2_cnt)) return true; + return false; + case 0x1: + /* 00000100 ..11.... 1100.... ........ */ + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 00000100 ..11.... 11000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:451 */ + u.f_incdec2_cnt.d = extract32(insn, 10, 1); + u.f_incdec2_cnt.u = 1; + if (trans_INCDEC_v(ctx, &u.f_incdec2_cnt)) return true; + return false; + } + return false; + } + return false; + case 0xe: + /* 00000100 ..1..... 1110.... ........ */ + disas_sve_extract_incdec_cnt(ctx, &u.f_incdec_cnt, insn); + switch (insn & 0x00100800) { + case 0x00000000: + /* 00000100 ..10.... 11100... ........ */ + switch ((insn >> 10) & 0x1) { + case 0x0: + /* 00000100 ..10.... 111000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:440 */ + u.f_incdec_cnt.d = 0; + u.f_incdec_cnt.u = 1; + if (trans_CNT_r(ctx, &u.f_incdec_cnt)) return true; + return false; + } + return false; + case 0x00100000: + /* 00000100 ..11.... 11100... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:443 */ + u.f_incdec_cnt.d = extract32(insn, 10, 1); + u.f_incdec_cnt.u = 1; + if (trans_INCDEC_r(ctx, &u.f_incdec_cnt)) return true; + return false; + } + return false; + case 0xf: + /* 00000100 ..1..... 1111.... ........ */ + disas_sve_extract_incdec_cnt(ctx, &u.f_incdec_cnt, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* 00000100 ..10.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:446 */ + u.f_incdec_cnt.d = extract32(insn, 11, 1); + u.f_incdec_cnt.u = extract32(insn, 10, 1); + if (trans_SINCDEC_r_32(ctx, &u.f_incdec_cnt)) return true; + return false; + case 0x1: + /* 00000100 ..11.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:447 */ + u.f_incdec_cnt.d = extract32(insn, 11, 1); + u.f_incdec_cnt.u = extract32(insn, 10, 1); + if (trans_SINCDEC_r_64(ctx, &u.f_incdec_cnt)) return true; + return false; + } + return false; + } + return false; + case 0x01000000: + /* 00000101 ..0..... ........ ........ */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* 00000101 ..00.... ........ ........ */ + switch (insn & 0x00cc0000) { + case 0x00000000: + /* 00000101 000000.. ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:460 */ + disas_sve_extract_rdn_dbm(ctx, &u.f_rr_dbm, insn); + if (trans_ORR_zzi(ctx, &u.f_rr_dbm)) return true; + return false; + case 0x00400000: + /* 00000101 010000.. ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:461 */ + disas_sve_extract_rdn_dbm(ctx, &u.f_rr_dbm, insn); + if (trans_EOR_zzi(ctx, &u.f_rr_dbm)) return true; + return false; + case 0x00800000: + /* 00000101 100000.. ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:462 */ + disas_sve_extract_rdn_dbm(ctx, &u.f_rr_dbm, insn); + if (trans_AND_zzi(ctx, &u.f_rr_dbm)) return true; + return false; + case 0x00c00000: + /* 00000101 110000.. ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:465 */ + disas_sve_extract_disas_sve_Fmt_60(ctx, &u.f_disas_sve29, insn); + if (trans_DUPM(ctx, &u.f_disas_sve29)) return true; + return false; + } + return false; + case 0x1: + /* 00000101 ..01.... ........ ........ */ + disas_sve_extract_rdn_pg4(ctx, &u.f_rpri_esz, insn); + switch ((insn >> 14) & 0x3) { + case 0x0: + /* 00000101 ..01.... 00...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:474 */ + u.f_rpri_esz.imm = expand_imm_sh8s(ctx, extract32(insn, 5, 9)); + if (trans_CPY_z_i(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x1: + /* 00000101 ..01.... 01...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:473 */ + u.f_rpri_esz.imm = expand_imm_sh8s(ctx, extract32(insn, 5, 9)); + if (trans_CPY_m_i(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x3: + /* 00000101 ..01.... 11...... ........ */ + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00000101 ..01.... 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:470 */ + u.f_rpri_esz.imm = extract32(insn, 5, 8); + if (trans_FCPY(ctx, &u.f_rpri_esz)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x01200000: + /* 00000101 ..1..... ........ ........ */ + switch ((insn >> 14) & 0x3) { + case 0x0: + /* 00000101 ..1..... 00...... ........ */ + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00000101 ..1..... 000..... ........ */ + disas_sve_extract_disas_sve_Fmt_61(ctx, &u.f_rrri, insn); + switch ((insn >> 22) & 0x3) { + case 0x0: + /* 00000101 001..... 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:479 */ + if (trans_EXT(ctx, &u.f_rrri)) return true; + return false; + } + return false; + case 0x1: + /* 00000101 ..1..... 001..... ........ */ + switch ((insn >> 10) & 0x7) { + case 0x0: + /* 00000101 ..1..... 001000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:488 */ + disas_sve_extract_disas_sve_Fmt_62(ctx, &u.f_rri, insn); + if (trans_DUP_x(ctx, &u.f_rri)) return true; + return false; + case 0x4: + /* 00000101 ..1..... 001100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:501 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_TBL(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x6: + /* 00000101 ..1..... 001110.. ........ */ + switch ((insn >> 18) & 0x7) { + case 0x0: + /* 00000101 ..1000.. 001110.. ........ */ + disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); + switch ((insn >> 16) & 0x3) { + case 0x0: + /* 00000101 ..100000 001110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:485 */ + if (trans_DUP_s(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0x1: + /* 00000101 ..1001.. 001110.. ........ */ + disas_sve_extract_rdn_rm(ctx, &u.f_rrr_esz, insn); + switch ((insn >> 16) & 0x3) { + case 0x0: + /* 00000101 ..100100 001110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:495 */ + if (trans_INSR_r(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x4: + /* 00000101 ..1100.. 001110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:504 */ + disas_sve_extract_disas_sve_Fmt_63(ctx, &u.f_disas_sve30, insn); + if (trans_UNPK(ctx, &u.f_disas_sve30)) return true; + return false; + case 0x5: + /* 00000101 ..1101.. 001110.. ........ */ + disas_sve_extract_rdn_rm(ctx, &u.f_rrr_esz, insn); + switch ((insn >> 16) & 0x3) { + case 0x0: + /* 00000101 ..110100 001110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:492 */ + if (trans_INSR_f(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x6: + /* 00000101 ..1110.. 001110.. ........ */ + disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); + switch ((insn >> 16) & 0x3) { + case 0x0: + /* 00000101 ..111000 001110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:498 */ + if (trans_REV_v(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + } + return false; + } + return false; + } + return false; + case 0x1: + /* 00000101 ..1..... 01...... ........ */ + switch ((insn >> 10) & 0xf) { + case 0x0: + /* 00000101 ..1..... 010000.. ........ */ + switch (insn & 0x00100210) { + case 0x00000000: + /* 00000101 ..10.... 0100000. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:509 */ + disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); + if (trans_ZIP1_p(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x00100000: + /* 00000101 ..11.... 0100000. ...0.... */ + switch ((insn >> 16) & 0xf) { + case 0x0: + /* 00000101 ..110000 0100000. ...0.... */ + disas_sve_extract_pd_pn_e0(ctx, &u.f_rr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x0: + /* 00000101 00110000 0100000. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:520 */ + if (trans_PUNPKLO(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0x1: + /* 00000101 ..110001 0100000. ...0.... */ + disas_sve_extract_pd_pn_e0(ctx, &u.f_rr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x0: + /* 00000101 00110001 0100000. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:521 */ + if (trans_PUNPKHI(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0x4: + /* 00000101 ..110100 0100000. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:517 */ + disas_sve_extract_pd_pn(ctx, &u.f_rr_esz, insn); + if (trans_REV_p(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 00000101 ..1..... 010001.. ........ */ + disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); + switch (insn & 0x00100210) { + case 0x00000000: + /* 00000101 ..10.... 0100010. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:510 */ + if (trans_ZIP2_p(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x2: + /* 00000101 ..1..... 010010.. ........ */ + disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); + switch (insn & 0x00100210) { + case 0x00000000: + /* 00000101 ..10.... 0100100. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:511 */ + if (trans_UZP1_p(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x3: + /* 00000101 ..1..... 010011.. ........ */ + disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); + switch (insn & 0x00100210) { + case 0x00000000: + /* 00000101 ..10.... 0100110. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:512 */ + if (trans_UZP2_p(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x4: + /* 00000101 ..1..... 010100.. ........ */ + disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); + switch (insn & 0x00100210) { + case 0x00000000: + /* 00000101 ..10.... 0101000. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:513 */ + if (trans_TRN1_p(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x5: + /* 00000101 ..1..... 010101.. ........ */ + disas_sve_extract_pd_pn_pm(ctx, &u.f_rrr_esz, insn); + switch (insn & 0x00100210) { + case 0x00000000: + /* 00000101 ..10.... 0101010. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:514 */ + if (trans_TRN2_p(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x8: + /* 00000101 ..1..... 011000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:526 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_ZIP1_z(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x9: + /* 00000101 ..1..... 011001.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:527 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_ZIP2_z(ctx, &u.f_rrr_esz)) return true; + return false; + case 0xa: + /* 00000101 ..1..... 011010.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:528 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_UZP1_z(ctx, &u.f_rrr_esz)) return true; + return false; + case 0xb: + /* 00000101 ..1..... 011011.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:529 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_UZP2_z(ctx, &u.f_rrr_esz)) return true; + return false; + case 0xc: + /* 00000101 ..1..... 011100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:530 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_TRN1_z(ctx, &u.f_rrr_esz)) return true; + return false; + case 0xd: + /* 00000101 ..1..... 011101.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:531 */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + if (trans_TRN2_z(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x2: + /* 00000101 ..1..... 10...... ........ */ + switch (insn & 0x001f2000) { + case 0x00000000: + /* 00000101 ..100000 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:560 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_CPY_m_v(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00002000: + /* 00000101 ..100000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:556 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_LASTA_r(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00010000: + /* 00000101 ..100001 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:537 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_COMPACT(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00012000: + /* 00000101 ..100001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:557 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_LASTB_r(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00020000: + /* 00000101 ..100010 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:552 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_LASTA_v(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00030000: + /* 00000101 ..100011 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:553 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_LASTB_v(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00040000: + /* 00000101 ..100100 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:567 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_REVB(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00050000: + /* 00000101 ..100101 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:568 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_REVH(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00060000: + /* 00000101 ..100110 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:569 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_REVW(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00070000: + /* 00000101 ..100111 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:570 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_RBIT(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00080000: + /* 00000101 ..101000 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:540 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_CLASTA_z(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00082000: + /* 00000101 ..101000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:563 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_CPY_m_r(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00090000: + /* 00000101 ..101001 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:541 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_CLASTB_z(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x000a0000: + /* 00000101 ..101010 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:544 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_CLASTA_v(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x000b0000: + /* 00000101 ..101011 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:545 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_CLASTB_v(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x000c0000: + /* 00000101 ..101100 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:573 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SPLICE(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00102000: + /* 00000101 ..110000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:548 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_CLASTA_r(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x00112000: + /* 00000101 ..110001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:549 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_CLASTB_r(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x3: + /* 00000101 ..1..... 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:578 */ + disas_sve_extract_rd_pg4_rn_rm(ctx, &u.f_rprr_esz, insn); + if (trans_SEL_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + } + return false; + case 0x12: + /* 0010010. ........ ........ ........ */ + switch (insn & 0x01200000) { + case 0x00000000: + /* 00100100 ..0..... ........ ........ */ + disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); + switch (insn & 0x0000e010) { + case 0x00000000: + /* 00100100 ..0..... 000..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:583 */ + if (trans_CMPHS_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00000010: + /* 00100100 ..0..... 000..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:584 */ + if (trans_CMPHI_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00002000: + /* 00100100 ..0..... 001..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:592 */ + if (trans_CMPEQ_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00002010: + /* 00100100 ..0..... 001..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:593 */ + if (trans_CMPNE_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00004000: + /* 00100100 ..0..... 010..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:594 */ + if (trans_CMPGE_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00004010: + /* 00100100 ..0..... 010..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:595 */ + if (trans_CMPGT_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00006000: + /* 00100100 ..0..... 011..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:596 */ + if (trans_CMPLT_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00006010: + /* 00100100 ..0..... 011..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:597 */ + if (trans_CMPLE_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00008000: + /* 00100100 ..0..... 100..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:585 */ + if (trans_CMPGE_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x00008010: + /* 00100100 ..0..... 100..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:586 */ + if (trans_CMPGT_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x0000a000: + /* 00100100 ..0..... 101..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:587 */ + if (trans_CMPEQ_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x0000a010: + /* 00100100 ..0..... 101..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:588 */ + if (trans_CMPNE_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x0000c000: + /* 00100100 ..0..... 110..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:598 */ + if (trans_CMPHS_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x0000c010: + /* 00100100 ..0..... 110..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:599 */ + if (trans_CMPHI_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x0000e000: + /* 00100100 ..0..... 111..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:600 */ + if (trans_CMPLO_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x0000e010: + /* 00100100 ..0..... 111..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:601 */ + if (trans_CMPLS_ppzw(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x00200000: + /* 00100100 ..1..... ........ ........ */ + disas_sve_extract_pd_pg_rn_i7(ctx, &u.f_rpri_esz, insn); + switch (insn & 0x00002010) { + case 0x00000000: + /* 00100100 ..1..... ..0..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:606 */ + if (trans_CMPHS_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00000010: + /* 00100100 ..1..... ..0..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:607 */ + if (trans_CMPHI_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00002000: + /* 00100100 ..1..... ..1..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:608 */ + if (trans_CMPLO_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00002010: + /* 00100100 ..1..... ..1..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:609 */ + if (trans_CMPLS_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + } + return false; + case 0x01000000: + /* 00100101 ..0..... ........ ........ */ + switch (insn & 0x0000c010) { + case 0x00000000: + /* 00100101 ..0..... 00...... ...0.... */ + disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..0..... 000..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:614 */ + if (trans_CMPGE_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x1: + /* 00100101 ..0..... 001..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:616 */ + if (trans_CMPLT_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + } + return false; + case 0x00000010: + /* 00100101 ..0..... 00...... ...1.... */ + disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..0..... 000..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:615 */ + if (trans_CMPGT_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x1: + /* 00100101 ..0..... 001..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:617 */ + if (trans_CMPLE_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + } + return false; + case 0x00004000: + /* 00100101 ..0..... 01...... ...0.... */ + switch (insn & 0x00900200) { + case 0x00000000: + /* 00100101 0.00.... 01....0. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:624 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_AND_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00000200: + /* 00100101 0.00.... 01....1. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:626 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_EOR_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00100000: + /* 00100101 0.01.... 01....0. ...0.... */ + disas_sve_extract_pd_pg_pn_s(ctx, &u.f_rpr_s, insn); + switch ((insn >> 16) & 0xf) { + case 0x0: + /* 00100101 0.010000 01....0. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:669 */ + if (trans_BRKA_z(ctx, &u.f_rpr_s)) return true; + return false; + case 0x8: + /* 00100101 0.011000 01....0. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:675 */ + if (trans_BRKN(ctx, &u.f_rpr_s)) return true; + return false; + } + return false; + case 0x00800000: + /* 00100101 1.00.... 01....0. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:628 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_ORR_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00800200: + /* 00100101 1.00.... 01....1. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:630 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_NOR_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00900000: + /* 00100101 1.01.... 01....0. ...0.... */ + disas_sve_extract_pd_pg_pn_s(ctx, &u.f_rpr_s, insn); + switch ((insn >> 16) & 0xf) { + case 0x0: + /* 00100101 1.010000 01....0. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:670 */ + if (trans_BRKB_z(ctx, &u.f_rpr_s)) return true; + return false; + } + return false; + } + return false; + case 0x00004010: + /* 00100101 ..0..... 01...... ...1.... */ + switch (insn & 0x00900200) { + case 0x00000000: + /* 00100101 0.00.... 01....0. ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:625 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_BIC_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00000200: + /* 00100101 0.00.... 01....1. ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:627 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_SEL_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00100000: + /* 00100101 0.01.... 01....0. ...1.... */ + disas_sve_extract_pd_pg_pn_s0(ctx, &u.f_rpr_s, insn); + switch (insn & 0x004f0000) { + case 0x00000000: + /* 00100101 00010000 01....0. ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:671 */ + if (trans_BRKA_m(ctx, &u.f_rpr_s)) return true; + return false; + } + return false; + case 0x00800000: + /* 00100101 1.00.... 01....0. ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:629 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_ORN_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00800200: + /* 00100101 1.00.... 01....1. ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:631 */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + if (trans_NAND_pppp(ctx, &u.f_rprr_s)) return true; + return false; + case 0x00900000: + /* 00100101 1.01.... 01....0. ...1.... */ + disas_sve_extract_pd_pg_pn_s0(ctx, &u.f_rpr_s, insn); + switch (insn & 0x004f0000) { + case 0x00000000: + /* 00100101 10010000 01....0. ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:672 */ + if (trans_BRKB_m(ctx, &u.f_rpr_s)) return true; + return false; + } + return false; + } + return false; + case 0x00008000: + /* 00100101 ..0..... 10...... ...0.... */ + disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..0..... 100..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:618 */ + if (trans_CMPEQ_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + } + return false; + case 0x00008010: + /* 00100101 ..0..... 10...... ...1.... */ + disas_sve_extract_pd_pg_rn_i5(ctx, &u.f_rpri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..0..... 100..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:619 */ + if (trans_CMPNE_ppzi(ctx, &u.f_rpri_esz)) return true; + return false; + } + return false; + case 0x0000c000: + /* 00100101 ..0..... 11...... ...0.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* 00100101 ..00.... 11...... ...0.... */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + switch (insn & 0x00800200) { + case 0x00000000: + /* 00100101 0.00.... 11....0. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:665 */ + if (trans_BRKPA(ctx, &u.f_rprr_s)) return true; + return false; + } + return false; + case 0x1: + /* 00100101 ..01.... 11...... ...0.... */ + switch ((insn >> 17) & 0x7) { + case 0x0: + /* 00100101 ..01000. 11...... ...0.... */ + disas_sve_extract_disas_sve_Fmt_64(ctx, &u.f_disas_sve31, insn); + switch (insn & 0x00c1020f) { + case 0x00400000: + /* 00100101 01010000 11....0. ...00000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:636 */ + if (trans_PTEST(ctx, &u.f_disas_sve31)) return true; + return false; + } + return false; + case 0x4: + /* 00100101 ..01100. 11...... ...0.... */ + switch ((insn >> 10) & 0xf) { + case 0x0: + /* 00100101 ..01100. 110000.. ...0.... */ + disas_sve_extract_pd_pn_e0(ctx, &u.f_rr_esz, insn); + switch (insn & 0x00c10200) { + case 0x00400000: + /* 00100101 01011000 1100000. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:657 */ + if (trans_PFIRST(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0x1: + /* 00100101 ..01100. 110001.. ...0.... */ + disas_sve_extract_pd_pn(ctx, &u.f_rr_esz, insn); + switch (insn & 0x00010200) { + case 0x00010000: + /* 00100101 ..011001 1100010. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:660 */ + if (trans_PNEXT(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0x8: + /* 00100101 ..01100. 111000.. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:639 */ + disas_sve_extract_disas_sve_Fmt_65(ctx, &u.f_ptrue, insn); + if (trans_PTRUE(ctx, &u.f_ptrue)) return true; + return false; + case 0x9: + /* 00100101 ..01100. 111001.. ...0.... */ + disas_sve_extract_disas_sve_Fmt_67(ctx, &u.f_disas_sve33, insn); + switch (insn & 0x00c103e0) { + case 0x00000000: + /* 00100101 00011000 11100100 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:645 */ + if (trans_PFALSE(ctx, &u.f_disas_sve33)) return true; + return false; + } + return false; + case 0xc: + /* 00100101 ..01100. 111100.. ...0.... */ + switch (insn & 0x00810200) { + case 0x00000000: + /* 00100101 0.011000 1111000. ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:648 */ + disas_sve_extract_disas_sve_Fmt_68(ctx, &u.f_disas_sve34, insn); + if (trans_RDFFR_p(ctx, &u.f_disas_sve34)) return true; + return false; + case 0x00010000: + /* 00100101 0.011001 1111000. ...0.... */ + disas_sve_extract_disas_sve_Fmt_67(ctx, &u.f_disas_sve33, insn); + switch (insn & 0x004001e0) { + case 0x00000000: + /* 00100101 00011001 11110000 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:651 */ + if (trans_RDFFR(ctx, &u.f_disas_sve33)) return true; + return false; + } + return false; + } + return false; + } + return false; + } + return false; + } + return false; + case 0x0000c010: + /* 00100101 ..0..... 11...... ...1.... */ + disas_sve_extract_pd_pg_pn_pm_s(ctx, &u.f_rprr_s, insn); + switch (insn & 0x00900200) { + case 0x00000000: + /* 00100101 0.00.... 11....0. ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:666 */ + if (trans_BRKPB(ctx, &u.f_rprr_s)) return true; + return false; + } + return false; + } + return false; + case 0x01200000: + /* 00100101 ..1..... ........ ........ */ + switch ((insn >> 14) & 0x3) { + case 0x0: + /* 00100101 ..1..... 00...... ........ */ + switch (insn & 0x00002400) { + case 0x00000400: + /* 00100101 ..1..... 000..1.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:701 */ + disas_sve_extract_disas_sve_Fmt_71(ctx, &u.f_disas_sve37, insn); + if (trans_WHILE(ctx, &u.f_disas_sve37)) return true; + return false; + case 0x00002000: + /* 00100101 ..1..... 001..0.. ........ */ + disas_sve_extract_disas_sve_Fmt_70(ctx, &u.f_disas_sve36, insn); + switch (insn & 0x0080180f) { + case 0x00800000: + /* 00100101 1.1..... 001000.. ....0000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:698 */ + if (trans_CTERM(ctx, &u.f_disas_sve36)) return true; + return false; + } + return false; + } + return false; + case 0x2: + /* 00100101 ..1..... 10...... ........ */ + switch (insn & 0x001c0200) { + case 0x00000000: + /* 00100101 ..1000.. 10....0. ........ */ + disas_sve_extract_rd_pg4_pn(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 16) & 0x3) { + case 0x0: + /* 00100101 ..100000 10....0. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:680 */ + if (trans_CNTP(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x00080000: + /* 00100101 ..1010.. 10....0. ........ */ + switch ((insn >> 10) & 0xf) { + case 0x0: + /* 00100101 ..1010.. 1000000. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:693 */ + disas_sve_extract_incdec2_pred(ctx, &u.f_incdec2_pred, insn); + u.f_incdec2_pred.d = extract32(insn, 17, 1); + u.f_incdec2_pred.u = extract32(insn, 16, 1); + if (trans_SINCDECP_z(ctx, &u.f_incdec2_pred)) return true; + return false; + case 0x2: + /* 00100101 ..1010.. 1000100. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:689 */ + disas_sve_extract_incdec_pred(ctx, &u.f_incdec_pred, insn); + u.f_incdec_pred.d = extract32(insn, 17, 1); + u.f_incdec_pred.u = extract32(insn, 16, 1); + if (trans_SINCDECP_r_32(ctx, &u.f_incdec_pred)) return true; + return false; + case 0x3: + /* 00100101 ..1010.. 1000110. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:690 */ + disas_sve_extract_incdec_pred(ctx, &u.f_incdec_pred, insn); + u.f_incdec_pred.d = extract32(insn, 17, 1); + u.f_incdec_pred.u = extract32(insn, 16, 1); + if (trans_SINCDECP_r_64(ctx, &u.f_incdec_pred)) return true; + return false; + case 0x4: + /* 00100101 ..1010.. 1001000. ........ */ + disas_sve_extract_disas_sve_Fmt_69(ctx, &u.f_disas_sve35, insn); + switch (insn & 0x00c3001f) { + case 0x00000000: + /* 00100101 00101000 1001000. ...00000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:654 */ + if (trans_WRFFR(ctx, &u.f_disas_sve35)) return true; + return false; + } + return false; + } + return false; + case 0x000c0000: + /* 00100101 ..1011.. 10....0. ........ */ + switch (insn & 0x00023c00) { + case 0x00000000: + /* 00100101 ..10110. 1000000. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:686 */ + disas_sve_extract_incdec2_pred(ctx, &u.f_incdec2_pred, insn); + u.f_incdec2_pred.d = extract32(insn, 16, 1); + u.f_incdec2_pred.u = 1; + if (trans_INCDECP_z(ctx, &u.f_incdec2_pred)) return true; + return false; + case 0x00000800: + /* 00100101 ..10110. 1000100. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:683 */ + disas_sve_extract_incdec_pred(ctx, &u.f_incdec_pred, insn); + u.f_incdec_pred.d = extract32(insn, 16, 1); + u.f_incdec_pred.u = 1; + if (trans_INCDECP_r(ctx, &u.f_incdec_pred)) return true; + return false; + case 0x00001000: + /* 00100101 ..10110. 1001000. ........ */ + disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); + switch (insn & 0x00c101ff) { + case 0x00000000: + /* 00100101 00101100 10010000 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:642 */ + if (trans_SETFFR(ctx, &u.f_disas_sve32)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x3: + /* 00100101 ..1..... 11...... ........ */ + switch ((insn >> 16) & 0x1f) { + case 0x0: + /* 00100101 ..100000 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:712 */ + disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); + if (trans_ADD_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x1: + /* 00100101 ..100001 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:713 */ + disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); + if (trans_SUB_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x3: + /* 00100101 ..100011 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:714 */ + disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); + if (trans_SUBR_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x4: + /* 00100101 ..100100 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:715 */ + disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); + if (trans_SQADD_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x5: + /* 00100101 ..100101 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:716 */ + disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); + if (trans_UQADD_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x6: + /* 00100101 ..100110 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:717 */ + disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); + if (trans_SQSUB_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x7: + /* 00100101 ..100111 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:718 */ + disas_sve_extract_rdn_sh_i8u(ctx, &u.f_rri_esz, insn); + if (trans_UQSUB_zzi(ctx, &u.f_rri_esz)) return true; + return false; + case 0x8: + /* 00100101 ..101000 11...... ........ */ + disas_sve_extract_rdn_i8s(ctx, &u.f_rri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..101000 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:721 */ + if (trans_SMAX_zzi(ctx, &u.f_rri_esz)) return true; + return false; + } + return false; + case 0x9: + /* 00100101 ..101001 11...... ........ */ + disas_sve_extract_rdn_i8u(ctx, &u.f_rri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..101001 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:722 */ + if (trans_UMAX_zzi(ctx, &u.f_rri_esz)) return true; + return false; + } + return false; + case 0xa: + /* 00100101 ..101010 11...... ........ */ + disas_sve_extract_rdn_i8s(ctx, &u.f_rri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..101010 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:723 */ + if (trans_SMIN_zzi(ctx, &u.f_rri_esz)) return true; + return false; + } + return false; + case 0xb: + /* 00100101 ..101011 11...... ........ */ + disas_sve_extract_rdn_i8u(ctx, &u.f_rri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..101011 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:724 */ + if (trans_UMIN_zzi(ctx, &u.f_rri_esz)) return true; + return false; + } + return false; + case 0x10: + /* 00100101 ..110000 11...... ........ */ + disas_sve_extract_rdn_i8s(ctx, &u.f_rri_esz, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..110000 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:727 */ + if (trans_MUL_zzi(ctx, &u.f_rri_esz)) return true; + return false; + } + return false; + case 0x18: + /* 00100101 ..111000 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:709 */ + disas_sve_extract_disas_sve_Fmt_73(ctx, &u.f_disas_sve38, insn); + if (trans_DUP_i(ctx, &u.f_disas_sve38)) return true; + return false; + case 0x19: + /* 00100101 ..111001 11...... ........ */ + disas_sve_extract_disas_sve_Fmt_72(ctx, &u.f_disas_sve38, insn); + switch ((insn >> 13) & 0x1) { + case 0x0: + /* 00100101 ..111001 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:706 */ + if (trans_FDUP(ctx, &u.f_disas_sve38)) return true; + return false; + } + return false; + } + return false; + } + return false; + } + return false; + case 0x22: + /* 0100010. ........ ........ ........ */ + switch (insn & 0x01a0f800) { + case 0x00800000: + /* 01000100 1.0..... 00000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:730 */ + disas_sve_extract_disas_sve_Fmt_74(ctx, &u.f_disas_sve39, insn); + if (trans_DOT_zzz(ctx, &u.f_disas_sve39)) return true; + return false; + case 0x00a00000: + /* 01000100 1.1..... 00000... ........ */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* 01000100 101..... 00000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:733 */ + disas_sve_extract_disas_sve_Fmt_75(ctx, &u.f_disas_sve40, insn); + if (trans_DOT_zzx(ctx, &u.f_disas_sve40)) return true; + return false; + case 0x1: + /* 01000100 111..... 00000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:735 */ + disas_sve_extract_disas_sve_Fmt_76(ctx, &u.f_disas_sve40, insn); + if (trans_DOT_zzx(ctx, &u.f_disas_sve40)) return true; + return false; + } + return false; + } + return false; + case 0x32: + /* 0110010. ........ ........ ........ */ + switch (insn & 0x01208000) { + case 0x00000000: + /* 01100100 ..0..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:743 */ + disas_sve_extract_disas_sve_Fmt_78(ctx, &u.f_disas_sve42, insn); + if (trans_FCMLA_zpzzz(ctx, &u.f_disas_sve42)) return true; + return false; + case 0x00008000: + /* 01100100 ..0..... 1....... ........ */ + disas_sve_extract_disas_sve_Fmt_77(ctx, &u.f_disas_sve41, insn); + switch (insn & 0x001e6000) { + case 0x00000000: + /* 01100100 ..00000. 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:739 */ + if (trans_FCADD(ctx, &u.f_disas_sve41)) return true; + return false; + } + return false; + case 0x00200000: + /* 01100100 ..1..... 0....... ........ */ + switch (insn & 0x00807000) { + case 0x00000000: + /* 01100100 0.1..... 0000.... ........ */ + disas_sve_extract_disas_sve_Fmt_81(ctx, &u.f_disas_sve44, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 01100100 0.1..... 00000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:755 */ + if (trans_FMLA_zzxz(ctx, &u.f_disas_sve44)) return true; + return false; + } + return false; + case 0x00002000: + /* 01100100 0.1..... 0010.... ........ */ + disas_sve_extract_disas_sve_Fmt_84(ctx, &u.f_disas_sve45, insn); + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 01100100 0.1..... 001000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:765 */ + if (trans_FMUL_zzx(ctx, &u.f_disas_sve45)) return true; + return false; + } + return false; + case 0x00800000: + /* 01100100 1.1..... 0000.... ........ */ + switch (insn & 0x00400800) { + case 0x00000000: + /* 01100100 101..... 00000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:757 */ + disas_sve_extract_disas_sve_Fmt_82(ctx, &u.f_disas_sve44, insn); + if (trans_FMLA_zzxz(ctx, &u.f_disas_sve44)) return true; + return false; + case 0x00400000: + /* 01100100 111..... 00000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:759 */ + disas_sve_extract_disas_sve_Fmt_83(ctx, &u.f_disas_sve44, insn); + if (trans_FMLA_zzxz(ctx, &u.f_disas_sve44)) return true; + return false; + } + return false; + case 0x00801000: + /* 01100100 1.1..... 0001.... ........ */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* 01100100 101..... 0001.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:747 */ + disas_sve_extract_disas_sve_Fmt_79(ctx, &u.f_disas_sve43, insn); + if (trans_FCMLA_zzxz(ctx, &u.f_disas_sve43)) return true; + return false; + case 0x1: + /* 01100100 111..... 0001.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:749 */ + disas_sve_extract_disas_sve_Fmt_80(ctx, &u.f_disas_sve43, insn); + if (trans_FCMLA_zzxz(ctx, &u.f_disas_sve43)) return true; + return false; + } + return false; + case 0x00802000: + /* 01100100 1.1..... 0010.... ........ */ + switch (insn & 0x00400c00) { + case 0x00000000: + /* 01100100 101..... 001000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:767 */ + disas_sve_extract_disas_sve_Fmt_85(ctx, &u.f_disas_sve45, insn); + if (trans_FMUL_zzx(ctx, &u.f_disas_sve45)) return true; + return false; + case 0x00400000: + /* 01100100 111..... 001000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:768 */ + disas_sve_extract_disas_sve_Fmt_86(ctx, &u.f_disas_sve45, insn); + if (trans_FMUL_zzx(ctx, &u.f_disas_sve45)) return true; + return false; + } + return false; + } + return false; + case 0x01000000: + /* 01100101 ..0..... 0....... ........ */ + switch ((insn >> 13) & 0x3) { + case 0x0: + /* 01100101 ..0..... 000..... ........ */ + disas_sve_extract_rd_rn_rm(ctx, &u.f_rrr_esz, insn); + switch ((insn >> 10) & 0x7) { + case 0x0: + /* 01100101 ..0..... 000000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:800 */ + if (trans_FADD_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..0..... 000001.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:801 */ + if (trans_FSUB_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x2: + /* 01100101 ..0..... 000010.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:802 */ + if (trans_FMUL_zzz(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x3: + /* 01100101 ..0..... 000011.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:803 */ + if (trans_FTSMUL(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x6: + /* 01100101 ..0..... 000110.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:804 */ + if (trans_FRECPS(ctx, &u.f_rrr_esz)) return true; + return false; + case 0x7: + /* 01100101 ..0..... 000111.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:805 */ + if (trans_FRSQRTS(ctx, &u.f_rrr_esz)) return true; + return false; + } + return false; + case 0x1: + /* 01100101 ..0..... 001..... ........ */ + switch ((insn >> 16) & 0x1f) { + case 0x0: + /* 01100101 ..000000 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:772 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FADDV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x4: + /* 01100101 ..000100 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:773 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FMAXNMV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x5: + /* 01100101 ..000101 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:774 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FMINNMV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x6: + /* 01100101 ..000110 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:775 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FMAXV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x7: + /* 01100101 ..000111 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:776 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FMINV(ctx, &u.f_rpr_esz)) return true; + return false; + case 0xe: + /* 01100101 ..001110 001..... ........ */ + disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); + switch ((insn >> 10) & 0x7) { + case 0x4: + /* 01100101 ..001110 001100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:780 */ + if (trans_FRECPE(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0xf: + /* 01100101 ..001111 001..... ........ */ + disas_sve_extract_rd_rn(ctx, &u.f_rr_esz, insn); + switch ((insn >> 10) & 0x7) { + case 0x4: + /* 01100101 ..001111 001100.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:781 */ + if (trans_FRSQRTE(ctx, &u.f_rr_esz)) return true; + return false; + } + return false; + case 0x10: + /* 01100101 ..010000 001..... ........ */ + disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 01100101 ..010000 001..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:785 */ + if (trans_FCMGE_ppz0(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..010000 001..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:786 */ + if (trans_FCMGT_ppz0(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x11: + /* 01100101 ..010001 001..... ........ */ + disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 01100101 ..010001 001..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:787 */ + if (trans_FCMLT_ppz0(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..010001 001..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:788 */ + if (trans_FCMLE_ppz0(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x12: + /* 01100101 ..010010 001..... ........ */ + disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 01100101 ..010010 001..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:789 */ + if (trans_FCMEQ_ppz0(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x13: + /* 01100101 ..010011 001..... ........ */ + disas_sve_extract_pd_pg_rn(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 01100101 ..010011 001..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:790 */ + if (trans_FCMNE_ppz0(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x18: + /* 01100101 ..011000 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:795 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FADDA(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x2: + /* 01100101 ..0..... 010..... ........ */ + disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 01100101 ..0..... 010..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:341 */ + if (trans_FCMGE_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..0..... 010..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:342 */ + if (trans_FCMGT_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x3: + /* 01100101 ..0..... 011..... ........ */ + disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 01100101 ..0..... 011..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:343 */ + if (trans_FCMEQ_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..0..... 011..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:344 */ + if (trans_FCMNE_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + } + return false; + case 0x01008000: + /* 01100101 ..0..... 1....... ........ */ + switch ((insn >> 13) & 0x3) { + case 0x0: + /* 01100101 ..0..... 100..... ........ */ + switch ((insn >> 19) & 0x3) { + case 0x0: + /* 01100101 ..000... 100..... ........ */ + switch ((insn >> 16) & 0x7) { + case 0x0: + /* 01100101 ..000000 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:810 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FADD_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..000001 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:811 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FSUB_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x2: + /* 01100101 ..000010 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:812 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FMUL_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x3: + /* 01100101 ..000011 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:813 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_FSUB_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x4: + /* 01100101 ..000100 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:814 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FMAXNM_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x5: + /* 01100101 ..000101 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:815 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FMINNM_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x6: + /* 01100101 ..000110 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:816 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FMAX_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x7: + /* 01100101 ..000111 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:817 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FMIN_zpzz(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x1: + /* 01100101 ..001... 100..... ........ */ + switch ((insn >> 16) & 0x7) { + case 0x0: + /* 01100101 ..001000 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:818 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FABD(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..001001 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:819 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FSCALE(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x2: + /* 01100101 ..001010 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:820 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FMULX(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x4: + /* 01100101 ..001100 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:821 */ + disas_sve_extract_rdm_pg_rn(ctx, &u.f_rprr_esz, insn); + if (trans_FDIV(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x5: + /* 01100101 ..001101 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:822 */ + disas_sve_extract_rdn_pg_rm(ctx, &u.f_rprr_esz, insn); + if (trans_FDIV(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x2: + /* 01100101 ..010... 100..... ........ */ + disas_sve_extract_disas_sve_Fmt_87(ctx, &u.f_disas_sve46, insn); + switch ((insn >> 10) & 0x7) { + case 0x0: + /* 01100101 ..010... 100000.. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:835 */ + if (trans_FTMAD(ctx, &u.f_disas_sve46)) return true; + return false; + } + return false; + case 0x3: + /* 01100101 ..011... 100..... ........ */ + disas_sve_extract_rdn_i1(ctx, &u.f_rpri_esz, insn); + switch (insn & 0x000703c0) { + case 0x00000000: + /* 01100101 ..011000 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:825 */ + if (trans_FADD_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00010000: + /* 01100101 ..011001 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:826 */ + if (trans_FSUB_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00020000: + /* 01100101 ..011010 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:827 */ + if (trans_FMUL_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00030000: + /* 01100101 ..011011 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:828 */ + if (trans_FSUBR_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00040000: + /* 01100101 ..011100 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:829 */ + if (trans_FMAXNM_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00050000: + /* 01100101 ..011101 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:830 */ + if (trans_FMINNM_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00060000: + /* 01100101 ..011110 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:831 */ + if (trans_FMAX_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + case 0x00070000: + /* 01100101 ..011111 100...00 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:832 */ + if (trans_FMIN_zpzi(ctx, &u.f_rpri_esz)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 01100101 ..0..... 101..... ........ */ + switch ((insn >> 16) & 0x1f) { + case 0x0: + /* 01100101 ..000000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:880 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRINTN(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..000001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:881 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRINTP(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x2: + /* 01100101 ..000010 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:882 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRINTM(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 ..000011 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:883 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRINTZ(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x4: + /* 01100101 ..000100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:884 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRINTA(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x6: + /* 01100101 ..000110 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:885 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRINTX(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x7: + /* 01100101 ..000111 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:886 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRINTI(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x8: + /* 01100101 ..001000 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x2: + /* 01100101 10001000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:856 */ + if (trans_FCVT_sh(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11001000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:858 */ + if (trans_FCVT_dh(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x9: + /* 01100101 ..001001 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x2: + /* 01100101 10001001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:857 */ + if (trans_FCVT_hs(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11001001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:859 */ + if (trans_FCVT_hd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0xa: + /* 01100101 ..001010 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x3: + /* 01100101 11001010 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:860 */ + if (trans_FCVT_ds(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0xb: + /* 01100101 ..001011 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x3: + /* 01100101 11001011 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:861 */ + if (trans_FCVT_sd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0xc: + /* 01100101 ..001100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:889 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FRECPX(ctx, &u.f_rpr_esz)) return true; + return false; + case 0xd: + /* 01100101 ..001101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:890 */ + disas_sve_extract_rd_pg_rn(ctx, &u.f_rpr_esz, insn); + if (trans_FSQRT(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x10: + /* 01100101 ..010000 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x3: + /* 01100101 11010000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:897 */ + if (trans_SCVTF_sd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x11: + /* 01100101 ..010001 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x3: + /* 01100101 11010001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:905 */ + if (trans_UCVTF_sd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x12: + /* 01100101 ..010010 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01010010 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:893 */ + if (trans_SCVTF_hh(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x13: + /* 01100101 ..010011 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01010011 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:901 */ + if (trans_UCVTF_hh(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x14: + /* 01100101 ..010100 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01010100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:894 */ + if (trans_SCVTF_sh(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x2: + /* 01100101 10010100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:896 */ + if (trans_SCVTF_ss(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11010100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:898 */ + if (trans_SCVTF_ds(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x15: + /* 01100101 ..010101 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01010101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:902 */ + if (trans_UCVTF_sh(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x2: + /* 01100101 10010101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:904 */ + if (trans_UCVTF_ss(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11010101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:906 */ + if (trans_UCVTF_ds(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x16: + /* 01100101 ..010110 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01010110 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:895 */ + if (trans_SCVTF_dh(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11010110 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:899 */ + if (trans_SCVTF_dd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x17: + /* 01100101 ..010111 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01010111 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:903 */ + if (trans_UCVTF_dh(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11010111 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:907 */ + if (trans_UCVTF_dd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x18: + /* 01100101 ..011000 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x3: + /* 01100101 11011000 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:872 */ + if (trans_FCVTZS_ds(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x19: + /* 01100101 ..011001 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x3: + /* 01100101 11011001 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:873 */ + if (trans_FCVTZU_ds(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x1a: + /* 01100101 ..011010 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01011010 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:864 */ + if (trans_FCVTZS_hh(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x1b: + /* 01100101 ..011011 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01011011 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:865 */ + if (trans_FCVTZU_hh(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x1c: + /* 01100101 ..011100 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01011100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:866 */ + if (trans_FCVTZS_hs(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x2: + /* 01100101 10011100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:870 */ + if (trans_FCVTZS_ss(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11011100 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:874 */ + if (trans_FCVTZS_sd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x1d: + /* 01100101 ..011101 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01011101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:867 */ + if (trans_FCVTZU_hs(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x2: + /* 01100101 10011101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:871 */ + if (trans_FCVTZU_ss(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11011101 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:875 */ + if (trans_FCVTZU_sd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x1e: + /* 01100101 ..011110 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01011110 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:868 */ + if (trans_FCVTZS_hd(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11011110 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:876 */ + if (trans_FCVTZS_dd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + case 0x1f: + /* 01100101 ..011111 101..... ........ */ + disas_sve_extract_rd_pg_rn_e0(ctx, &u.f_rpr_esz, insn); + switch ((insn >> 22) & 0x3) { + case 0x1: + /* 01100101 01011111 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:869 */ + if (trans_FCVTZU_hd(ctx, &u.f_rpr_esz)) return true; + return false; + case 0x3: + /* 01100101 11011111 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:877 */ + if (trans_FCVTZU_dd(ctx, &u.f_rpr_esz)) return true; + return false; + } + return false; + } + return false; + case 0x2: + /* 01100101 ..0..... 110..... ........ */ + disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 01100101 ..0..... 110..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:345 */ + if (trans_FCMUO_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..0..... 110..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:346 */ + if (trans_FACGE_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + case 0x3: + /* 01100101 ..0..... 111..... ........ */ + disas_sve_extract_pd_pg_rn_rm(ctx, &u.f_rprr_esz, insn); + switch ((insn >> 4) & 0x1) { + case 0x1: + /* 01100101 ..0..... 111..... ...1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:347 */ + if (trans_FACGT_ppzz(ctx, &u.f_rprr_esz)) return true; + return false; + } + return false; + } + return false; + case 0x01200000: + /* 01100101 ..1..... 0....... ........ */ + disas_sve_extract_rda_pg_rn_rm(ctx, &u.f_rprrr_esz, insn); + switch ((insn >> 13) & 0x3) { + case 0x0: + /* 01100101 ..1..... 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:840 */ + if (trans_FMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..1..... 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:841 */ + if (trans_FMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x2: + /* 01100101 ..1..... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:842 */ + if (trans_FNMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x3: + /* 01100101 ..1..... 011..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:843 */ + if (trans_FNMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + } + return false; + case 0x01208000: + /* 01100101 ..1..... 1....... ........ */ + disas_sve_extract_rdn_pg_rm_ra(ctx, &u.f_rprrr_esz, insn); + switch ((insn >> 13) & 0x3) { + case 0x0: + /* 01100101 ..1..... 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:848 */ + if (trans_FMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x1: + /* 01100101 ..1..... 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:849 */ + if (trans_FMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x2: + /* 01100101 ..1..... 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:850 */ + if (trans_FNMLA_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + case 0x3: + /* 01100101 ..1..... 111..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:851 */ + if (trans_FNMLS_zpzzz(ctx, &u.f_rprrr_esz)) return true; + return false; + } + return false; + } + return false; + case 0x42: + /* 1000010. ........ ........ ........ */ + switch ((insn >> 15) & 0x1) { + case 0x0: + /* 1000010. ........ 0....... ........ */ + switch ((insn >> 23) & 0x3) { + case 0x0: + /* 10000100 0....... 0....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 10000100 0.0..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:923 */ + disas_sve_extract_rprr_g_load_xs_u(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 2; + u.f_rprr_gather_load.msz = 0; + u.f_rprr_gather_load.scale = 0; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x1: + /* 10000100 0.1..... 0....... ........ */ + disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 10000100 0.1..... 0....... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:970 */ + if (trans_PRF(ctx, &u.f_disas_sve32)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 10000100 1....... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:925 */ + disas_sve_extract_rprr_g_load_xs_u_sc(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 2; + u.f_rprr_gather_load.msz = 1; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x2: + /* 10000101 0....... 0....... ........ */ + disas_sve_extract_rprr_g_load_xs_sc(ctx, &u.f_rprr_gather_load, insn); + switch ((insn >> 14) & 0x1) { + case 0x1: + /* 10000101 0....... 01...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:927 */ + u.f_rprr_gather_load.esz = 2; + u.f_rprr_gather_load.msz = 2; + u.f_rprr_gather_load.u = 1; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + } + return false; + case 0x3: + /* 10000101 1....... 0....... ........ */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* 10000101 10...... 0....... ........ */ + switch ((insn >> 13) & 0x3) { + case 0x0: + /* 10000101 10...... 000..... ........ */ + disas_sve_extract_pd_rn_i9(ctx, &u.f_rri, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 10000101 10...... 000..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:912 */ + if (trans_LDR_pri(ctx, &u.f_rri)) return true; + return false; + } + return false; + case 0x2: + /* 10000101 10...... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:915 */ + disas_sve_extract_rd_rn_i9(ctx, &u.f_rri, insn); + if (trans_LDR_zri(ctx, &u.f_rri)) return true; + return false; + } + return false; + case 0x1: + /* 10000101 11...... 0....... ........ */ + disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 10000101 11...... 0....... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:976 */ + if (trans_PRF(ctx, &u.f_disas_sve32)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x1: + /* 1000010. ........ 1....... ........ */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* 1000010. .0...... 1....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 1000010. .00..... 1....... ........ */ + switch (insn & 0x00006010) { + case 0x00004000: + /* 1000010. .00..... 110..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:979 */ + disas_sve_extract_disas_sve_Fmt_89(ctx, &u.f_disas_sve47, insn); + if (trans_PRF_rr(ctx, &u.f_disas_sve47)) return true; + return false; + case 0x00006000: + /* 1000010. .00..... 111..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:973 */ + disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); + if (trans_PRF(ctx, &u.f_disas_sve32)) return true; + return false; + } + return false; + case 0x1: + /* 1000010. .01..... 1....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:931 */ + disas_sve_extract_rpri_g_load(ctx, &u.f_rpri_gather_load, insn); + u.f_rpri_gather_load.esz = 2; + if (trans_LD1_zpiz(ctx, &u.f_rpri_gather_load)) return true; + return false; + } + return false; + case 0x1: + /* 1000010. .1...... 1....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:918 */ + disas_sve_extract_disas_sve_Fmt_88(ctx, &u.f_rpri_load, insn); + if (trans_LD1R_zpri(ctx, &u.f_rpri_load)) return true; + return false; + } + return false; + } + return false; + case 0x52: + /* 1010010. ........ ........ ........ */ + switch ((insn >> 13) & 0x7) { + case 0x0: + /* 1010010. ........ 000..... ........ */ + disas_sve_extract_rprr_load_msz(ctx, &u.f_rprr_load, insn); + switch ((insn >> 21) & 0x3) { + case 0x0: + /* 1010010. .00..... 000..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:961 */ + u.f_rprr_load.nreg = 0; + if (trans_LD1RQ_zprr(ctx, &u.f_rprr_load)) return true; + return false; + } + return false; + case 0x1: + /* 1010010. ........ 001..... ........ */ + disas_sve_extract_rpri_load_msz(ctx, &u.f_rpri_load, insn); + switch ((insn >> 20) & 0x7) { + case 0x0: + /* 1010010. .000.... 001..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:966 */ + u.f_rpri_load.nreg = 0; + if (trans_LD1RQ_zpri(ctx, &u.f_rpri_load)) return true; + return false; + } + return false; + case 0x2: + /* 1010010. ........ 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:937 */ + disas_sve_extract_rprr_load_dt(ctx, &u.f_rprr_load, insn); + u.f_rprr_load.nreg = 0; + if (trans_LD_zprr(ctx, &u.f_rprr_load)) return true; + return false; + case 0x3: + /* 1010010. ........ 011..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:940 */ + disas_sve_extract_rprr_load_dt(ctx, &u.f_rprr_load, insn); + u.f_rprr_load.nreg = 0; + if (trans_LDFF1_zprr(ctx, &u.f_rprr_load)) return true; + return false; + case 0x5: + /* 1010010. ........ 101..... ........ */ + disas_sve_extract_rpri_load_dt(ctx, &u.f_rpri_load, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* 1010010. ...0.... 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:943 */ + u.f_rpri_load.nreg = 0; + if (trans_LD_zpri(ctx, &u.f_rpri_load)) return true; + return false; + case 0x1: + /* 1010010. ...1.... 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:946 */ + u.f_rpri_load.nreg = 0; + if (trans_LDNF1_zpri(ctx, &u.f_rpri_load)) return true; + return false; + } + return false; + case 0x6: + /* 1010010. ........ 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:952 */ + disas_sve_extract_rprr_load_msz(ctx, &u.f_rprr_load, insn); + u.f_rprr_load.nreg = extract32(insn, 21, 2); + if (trans_LD_zprr(ctx, &u.f_rprr_load)) return true; + return false; + case 0x7: + /* 1010010. ........ 111..... ........ */ + disas_sve_extract_rpri_load_msz(ctx, &u.f_rpri_load, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* 1010010. ...0.... 111..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:958 */ + u.f_rpri_load.nreg = extract32(insn, 21, 2); + if (trans_LD_zpri(ctx, &u.f_rpri_load)) return true; + return false; + } + return false; + } + return false; + case 0x62: + /* 1100010. ........ ........ ........ */ + switch ((insn >> 15) & 0x1) { + case 0x0: + /* 1100010. ........ 0....... ........ */ + switch ((insn >> 23) & 0x3) { + case 0x0: + /* 11000100 0....... 0....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11000100 0.0..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:985 */ + disas_sve_extract_rprr_g_load_xs_u(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 0; + u.f_rprr_gather_load.scale = 0; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x1: + /* 11000100 0.1..... 0....... ........ */ + disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 11000100 0.1..... 0....... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1013 */ + if (trans_PRF(ctx, &u.f_disas_sve32)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 11000100 1....... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:987 */ + disas_sve_extract_rprr_g_load_xs_u_sc(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 1; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x2: + /* 11000101 0....... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:989 */ + disas_sve_extract_rprr_g_load_xs_u_sc(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 2; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x3: + /* 11000101 1....... 0....... ........ */ + disas_sve_extract_rprr_g_load_xs_sc(ctx, &u.f_rprr_gather_load, insn); + switch ((insn >> 14) & 0x1) { + case 0x1: + /* 11000101 1....... 01...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:991 */ + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 3; + u.f_rprr_gather_load.u = 1; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 1100010. ........ 1....... ........ */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* 1100010. .0...... 1....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 1100010. .00..... 1....... ........ */ + disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); + switch (insn & 0x00006010) { + case 0x00006000: + /* 1100010. .00..... 111..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1016 */ + if (trans_PRF(ctx, &u.f_disas_sve32)) return true; + return false; + } + return false; + case 0x1: + /* 1100010. .01..... 1....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1006 */ + disas_sve_extract_rpri_g_load(ctx, &u.f_rpri_gather_load, insn); + u.f_rpri_gather_load.esz = 3; + if (trans_LD1_zpiz(ctx, &u.f_rpri_gather_load)) return true; + return false; + } + return false; + case 0x1: + /* 1100010. .1...... 1....... ........ */ + switch ((insn >> 23) & 0x3) { + case 0x0: + /* 11000100 01...... 1....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11000100 010..... 1....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:996 */ + disas_sve_extract_rprr_g_load_u(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 0; + u.f_rprr_gather_load.scale = 0; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x1: + /* 11000100 011..... 1....... ........ */ + disas_sve_extract_disas_sve_Fmt_66(ctx, &u.f_disas_sve32, insn); + switch ((insn >> 4) & 0x1) { + case 0x0: + /* 11000100 011..... 1....... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1010 */ + if (trans_PRF(ctx, &u.f_disas_sve32)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 11000100 11...... 1....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:998 */ + disas_sve_extract_rprr_g_load_u_sc(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 1; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x2: + /* 11000101 01...... 1....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1000 */ + disas_sve_extract_rprr_g_load_u_sc(ctx, &u.f_rprr_gather_load, insn); + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 2; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + case 0x3: + /* 11000101 11...... 1....... ........ */ + disas_sve_extract_rprr_g_load_sc(ctx, &u.f_rprr_gather_load, insn); + switch ((insn >> 14) & 0x1) { + case 0x1: + /* 11000101 11...... 11...... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1002 */ + u.f_rprr_gather_load.esz = 3; + u.f_rprr_gather_load.msz = 3; + u.f_rprr_gather_load.u = 1; + if (trans_LD1_zprz(ctx, &u.f_rprr_gather_load)) return true; + return false; + } + return false; + } + return false; + } + return false; + } + return false; + case 0x72: + /* 1110010. ........ ........ ........ */ + switch ((insn >> 13) & 0x7) { + case 0x0: + /* 1110010. ........ 000..... ........ */ + disas_sve_extract_pd_rn_i9(ctx, &u.f_rri, insn); + switch (insn & 0x01c00010) { + case 0x01800000: + /* 11100101 10...... 000..... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1021 */ + if (trans_STR_pri(ctx, &u.f_rri)) return true; + return false; + } + return false; + case 0x2: + /* 1110010. ........ 010..... ........ */ + switch ((insn >> 23) & 0x3) { + case 0x0: + /* 11100100 0....... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1034 */ + disas_sve_extract_rprr_store_esz_n0(ctx, &u.f_rprr_store, insn); + u.f_rprr_store.msz = 0; + if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; + return false; + case 0x1: + /* 11100100 1....... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1036 */ + disas_sve_extract_rprr_store_esz_n0(ctx, &u.f_rprr_store, insn); + u.f_rprr_store.msz = 1; + if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; + return false; + case 0x2: + /* 11100101 0....... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1038 */ + disas_sve_extract_rprr_store_esz_n0(ctx, &u.f_rprr_store, insn); + u.f_rprr_store.msz = 2; + if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; + return false; + case 0x3: + /* 11100101 1....... 010..... ........ */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* 11100101 10...... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1024 */ + disas_sve_extract_rd_rn_i9(ctx, &u.f_rri, insn); + if (trans_STR_zri(ctx, &u.f_rri)) return true; + return false; + case 0x1: + /* 11100101 11...... 010..... ........ */ + disas_sve_extract_rprr_store(ctx, &u.f_rprr_store, insn); + switch ((insn >> 21) & 0x1) { + case 0x1: + /* 11100101 111..... 010..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1040 */ + u.f_rprr_store.msz = 3; + u.f_rprr_store.esz = 3; + u.f_rprr_store.nreg = 0; + if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x3: + /* 1110010. ........ 011..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1050 */ + disas_sve_extract_rprr_store(ctx, &u.f_rprr_store, insn); + u.f_rprr_store.msz = extract32(insn, 23, 2); + u.f_rprr_store.nreg = extract32(insn, 21, 2); + u.f_rprr_store.esz = extract32(insn, 23, 2); + if (trans_ST_zprr(ctx, &u.f_rprr_store)) return true; + return false; + case 0x4: + /* 1110010. ........ 100..... ........ */ + disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); + switch ((insn >> 21) & 0x3) { + case 0x0: + /* 1110010. .00..... 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1092 */ + u.f_rprr_scatter_store.xs = 0; + u.f_rprr_scatter_store.esz = 3; + u.f_rprr_scatter_store.scale = 0; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x1: + /* 1110010. .01..... 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1086 */ + u.f_rprr_scatter_store.xs = 0; + u.f_rprr_scatter_store.esz = 3; + u.f_rprr_scatter_store.scale = 1; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x2: + /* 1110010. .10..... 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1062 */ + u.f_rprr_scatter_store.xs = 0; + u.f_rprr_scatter_store.esz = 2; + u.f_rprr_scatter_store.scale = 0; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x3: + /* 1110010. .11..... 100..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1055 */ + u.f_rprr_scatter_store.xs = 0; + u.f_rprr_scatter_store.esz = 2; + u.f_rprr_scatter_store.scale = 1; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + } + return false; + case 0x5: + /* 1110010. ........ 101..... ........ */ + switch ((insn >> 21) & 0x3) { + case 0x0: + /* 1110010. .00..... 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1073 */ + disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); + u.f_rprr_scatter_store.xs = 2; + u.f_rprr_scatter_store.esz = 3; + u.f_rprr_scatter_store.scale = 0; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x1: + /* 1110010. .01..... 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1069 */ + disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); + u.f_rprr_scatter_store.xs = 2; + u.f_rprr_scatter_store.esz = 3; + u.f_rprr_scatter_store.scale = 1; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x2: + /* 1110010. .10..... 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1077 */ + disas_sve_extract_rpri_scatter_store(ctx, &u.f_rpri_scatter_store, insn); + u.f_rpri_scatter_store.esz = 3; + if (trans_ST1_zpiz(ctx, &u.f_rpri_scatter_store)) return true; + return false; + case 0x3: + /* 1110010. .11..... 101..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1081 */ + disas_sve_extract_rpri_scatter_store(ctx, &u.f_rpri_scatter_store, insn); + u.f_rpri_scatter_store.esz = 2; + if (trans_ST1_zpiz(ctx, &u.f_rpri_scatter_store)) return true; + return false; + } + return false; + case 0x6: + /* 1110010. ........ 110..... ........ */ + disas_sve_extract_rprr_scatter_store(ctx, &u.f_rprr_scatter_store, insn); + switch ((insn >> 21) & 0x3) { + case 0x0: + /* 1110010. .00..... 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1094 */ + u.f_rprr_scatter_store.xs = 1; + u.f_rprr_scatter_store.esz = 3; + u.f_rprr_scatter_store.scale = 0; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x1: + /* 1110010. .01..... 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1088 */ + u.f_rprr_scatter_store.xs = 1; + u.f_rprr_scatter_store.esz = 3; + u.f_rprr_scatter_store.scale = 1; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x2: + /* 1110010. .10..... 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1064 */ + u.f_rprr_scatter_store.xs = 1; + u.f_rprr_scatter_store.esz = 2; + u.f_rprr_scatter_store.scale = 0; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + case 0x3: + /* 1110010. .11..... 110..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1057 */ + u.f_rprr_scatter_store.xs = 1; + u.f_rprr_scatter_store.esz = 2; + u.f_rprr_scatter_store.scale = 1; + if (trans_ST1_zprz(ctx, &u.f_rprr_scatter_store)) return true; + return false; + } + return false; + case 0x7: + /* 1110010. ........ 111..... ........ */ + disas_sve_extract_rpri_store_msz(ctx, &u.f_rpri_store, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* 1110010. ...0.... 111..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1028 */ + u.f_rpri_store.esz = extract32(insn, 21, 2); + u.f_rpri_store.nreg = 0; + if (trans_ST_zpri(ctx, &u.f_rpri_store)) return true; + return false; + case 0x1: + /* 1110010. ...1.... 111..... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/sve.decode:1045 */ + u.f_rpri_store.nreg = extract32(insn, 21, 2); + u.f_rpri_store.esz = extract32(insn, 23, 2); + if (trans_ST_zpri(ctx, &u.f_rpri_store)) return true; + return false; + } + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/arm/decode-t16.inc.c b/qemu/target/arm/decode-t16.inc.c new file mode 100644 index 00000000..71a6c0ce --- /dev/null +++ b/qemu/target/arm/decode-t16.inc.c @@ -0,0 +1,1141 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int F; + int I; + int im; +} arg_disas_t1616; + +typedef struct { + int cond_mask; +} arg_disas_t1617; + +typedef struct { + int imm; + int nz; + int rn; +} arg_disas_t1618; + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wredundant-decls" +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtypedef-redefinition" +# endif +#endif + +typedef arg_s_rrr_shi arg_AND_rrri; +static bool trans_AND_rrri(DisasContext *ctx, arg_AND_rrri *a); +typedef arg_s_rrr_shi arg_EOR_rrri; +static bool trans_EOR_rrri(DisasContext *ctx, arg_EOR_rrri *a); +typedef arg_s_rrr_shr arg_MOV_rxrr; +static bool trans_MOV_rxrr(DisasContext *ctx, arg_MOV_rxrr *a); +typedef arg_s_rrr_shi arg_ADC_rrri; +static bool trans_ADC_rrri(DisasContext *ctx, arg_ADC_rrri *a); +typedef arg_s_rrr_shi arg_SBC_rrri; +static bool trans_SBC_rrri(DisasContext *ctx, arg_SBC_rrri *a); +typedef arg_s_rrr_shi arg_TST_xrri; +static bool trans_TST_xrri(DisasContext *ctx, arg_TST_xrri *a); +typedef arg_s_rri_rot arg_RSB_rri; +static bool trans_RSB_rri(DisasContext *ctx, arg_RSB_rri *a); +typedef arg_s_rrr_shi arg_CMP_xrri; +static bool trans_CMP_xrri(DisasContext *ctx, arg_CMP_xrri *a); +typedef arg_s_rrr_shi arg_CMN_xrri; +static bool trans_CMN_xrri(DisasContext *ctx, arg_CMN_xrri *a); +typedef arg_s_rrr_shi arg_ORR_rrri; +static bool trans_ORR_rrri(DisasContext *ctx, arg_ORR_rrri *a); +typedef arg_s_rrrr arg_MUL; +static bool trans_MUL(DisasContext *ctx, arg_MUL *a); +typedef arg_s_rrr_shi arg_BIC_rrri; +static bool trans_BIC_rrri(DisasContext *ctx, arg_BIC_rrri *a); +typedef arg_s_rrr_shi arg_MVN_rxri; +static bool trans_MVN_rxri(DisasContext *ctx, arg_MVN_rxri *a); +typedef arg_ldst_rr arg_STR_rr; +static bool trans_STR_rr(DisasContext *ctx, arg_STR_rr *a); +typedef arg_ldst_rr arg_STRH_rr; +static bool trans_STRH_rr(DisasContext *ctx, arg_STRH_rr *a); +typedef arg_ldst_rr arg_STRB_rr; +static bool trans_STRB_rr(DisasContext *ctx, arg_STRB_rr *a); +typedef arg_ldst_rr arg_LDRSB_rr; +static bool trans_LDRSB_rr(DisasContext *ctx, arg_LDRSB_rr *a); +typedef arg_ldst_rr arg_LDR_rr; +static bool trans_LDR_rr(DisasContext *ctx, arg_LDR_rr *a); +typedef arg_ldst_rr arg_LDRH_rr; +static bool trans_LDRH_rr(DisasContext *ctx, arg_LDRH_rr *a); +typedef arg_ldst_rr arg_LDRB_rr; +static bool trans_LDRB_rr(DisasContext *ctx, arg_LDRB_rr *a); +typedef arg_ldst_rr arg_LDRSH_rr; +static bool trans_LDRSH_rr(DisasContext *ctx, arg_LDRSH_rr *a); +typedef arg_ldst_ri arg_STR_ri; +static bool trans_STR_ri(DisasContext *ctx, arg_STR_ri *a); +typedef arg_ldst_ri arg_LDR_ri; +static bool trans_LDR_ri(DisasContext *ctx, arg_LDR_ri *a); +typedef arg_ldst_ri arg_STRB_ri; +static bool trans_STRB_ri(DisasContext *ctx, arg_STRB_ri *a); +typedef arg_ldst_ri arg_LDRB_ri; +static bool trans_LDRB_ri(DisasContext *ctx, arg_LDRB_ri *a); +typedef arg_ldst_ri arg_STRH_ri; +static bool trans_STRH_ri(DisasContext *ctx, arg_STRH_ri *a); +typedef arg_ldst_ri arg_LDRH_ri; +static bool trans_LDRH_ri(DisasContext *ctx, arg_LDRH_ri *a); +typedef arg_ri arg_ADR; +static bool trans_ADR(DisasContext *ctx, arg_ADR *a); +typedef arg_s_rri_rot arg_ADD_rri; +static bool trans_ADD_rri(DisasContext *ctx, arg_ADD_rri *a); +typedef arg_ldst_block arg_STM; +static bool trans_STM(DisasContext *ctx, arg_STM *a); +typedef arg_ldst_block arg_LDM_t16; +static bool trans_LDM_t16(DisasContext *ctx, arg_LDM_t16 *a); +typedef arg_s_rrr_shi arg_MOV_rxri; +static bool trans_MOV_rxri(DisasContext *ctx, arg_MOV_rxri *a); +typedef arg_s_rrr_shi arg_ADD_rrri; +static bool trans_ADD_rrri(DisasContext *ctx, arg_ADD_rrri *a); +typedef arg_s_rrr_shi arg_SUB_rrri; +static bool trans_SUB_rrri(DisasContext *ctx, arg_SUB_rrri *a); +typedef arg_s_rri_rot arg_SUB_rri; +static bool trans_SUB_rri(DisasContext *ctx, arg_SUB_rri *a); +typedef arg_s_rri_rot arg_MOV_rxi; +static bool trans_MOV_rxi(DisasContext *ctx, arg_MOV_rxi *a); +typedef arg_s_rri_rot arg_CMP_xri; +static bool trans_CMP_xri(DisasContext *ctx, arg_CMP_xri *a); +typedef arg_r arg_BX; +static bool trans_BX(DisasContext *ctx, arg_BX *a); +typedef arg_r arg_BLX_r; +static bool trans_BLX_r(DisasContext *ctx, arg_BLX_r *a); +typedef arg_r arg_BXNS; +static bool trans_BXNS(DisasContext *ctx, arg_BXNS *a); +typedef arg_r arg_BLXNS; +static bool trans_BLXNS(DisasContext *ctx, arg_BLXNS *a); +typedef arg_rrr_rot arg_SXTAH; +static bool trans_SXTAH(DisasContext *ctx, arg_SXTAH *a); +typedef arg_rrr_rot arg_SXTAB; +static bool trans_SXTAB(DisasContext *ctx, arg_SXTAB *a); +typedef arg_rrr_rot arg_UXTAH; +static bool trans_UXTAH(DisasContext *ctx, arg_UXTAH *a); +typedef arg_rrr_rot arg_UXTAB; +static bool trans_UXTAB(DisasContext *ctx, arg_UXTAB *a); +typedef arg_setend arg_SETEND; +static bool trans_SETEND(DisasContext *ctx, arg_SETEND *a); +typedef arg_cps arg_CPS; +static bool trans_CPS(DisasContext *ctx, arg_CPS *a); +typedef arg_disas_t1616 arg_CPS_v7m; +static bool trans_CPS_v7m(DisasContext *ctx, arg_CPS_v7m *a); +typedef arg_rr arg_REV; +static bool trans_REV(DisasContext *ctx, arg_REV *a); +typedef arg_rr arg_REV16; +static bool trans_REV16(DisasContext *ctx, arg_REV16 *a); +typedef arg_rr arg_REVSH; +static bool trans_REVSH(DisasContext *ctx, arg_REVSH *a); +typedef arg_empty arg_YIELD; +static bool trans_YIELD(DisasContext *ctx, arg_YIELD *a); +typedef arg_empty arg_WFE; +static bool trans_WFE(DisasContext *ctx, arg_WFE *a); +typedef arg_empty arg_WFI; +static bool trans_WFI(DisasContext *ctx, arg_WFI *a); +typedef arg_empty arg_NOP; +static bool trans_NOP(DisasContext *ctx, arg_NOP *a); +typedef arg_disas_t1617 arg_IT; +static bool trans_IT(DisasContext *ctx, arg_IT *a); +typedef arg_i arg_HLT; +static bool trans_HLT(DisasContext *ctx, arg_HLT *a); +typedef arg_i arg_BKPT; +static bool trans_BKPT(DisasContext *ctx, arg_BKPT *a); +typedef arg_disas_t1618 arg_CBZ; +static bool trans_CBZ(DisasContext *ctx, arg_CBZ *a); +typedef arg_empty arg_UDF; +static bool trans_UDF(DisasContext *ctx, arg_UDF *a); +typedef arg_i arg_SVC; +static bool trans_SVC(DisasContext *ctx, arg_SVC *a); +typedef arg_ci arg_B_cond_thumb; +static bool trans_B_cond_thumb(DisasContext *ctx, arg_B_cond_thumb *a); +typedef arg_i arg_B; +static bool trans_B(DisasContext *ctx, arg_B *a); +typedef arg_i arg_BLX_suffix; +static bool trans_BLX_suffix(DisasContext *ctx, arg_BLX_suffix *a); +typedef arg_i arg_BL_BLX_prefix; +static bool trans_BL_BLX_prefix(DisasContext *ctx, arg_BL_BLX_prefix *a); +typedef arg_i arg_BL_suffix; +static bool trans_BL_suffix(DisasContext *ctx, arg_BL_suffix *a); + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic pop +#endif + +static void disas_t16_extract_addsub_2h(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) +{ + a->rm = extract32(insn, 3, 4); + a->rd = deposit32(extract32(insn, 0, 3), 3, 29, extract32(insn, 7, 1)); + a->rn = deposit32(extract32(insn, 0, 3), 3, 29, extract32(insn, 7, 1)); + a->shim = 0; + a->shty = 0; +} + +static void disas_t16_extract_addsub_2i(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) +{ + a->imm = extract32(insn, 6, 3); + a->rn = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->s = t16_setflags(ctx); + a->rot = 0; +} + +static void disas_t16_extract_addsub_3(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) +{ + a->rm = extract32(insn, 6, 3); + a->rn = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->s = t16_setflags(ctx); + a->shim = 0; + a->shty = 0; +} + +static void disas_t16_extract_addsub_sp_i(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) +{ + a->s = 0; + a->rd = 13; + a->rn = 13; + a->rot = 0; + a->imm = times_4(ctx, extract32(insn, 0, 7)); +} + +static void disas_t16_extract_arith_1i(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) +{ + a->rd = extract32(insn, 8, 3); + a->imm = extract32(insn, 0, 8); + a->rot = 0; + a->rn = extract32(insn, 8, 3); +} + +static void disas_t16_extract_branchr(DisasContext *ctx, arg_r *a, uint16_t insn) +{ + a->rm = extract32(insn, 3, 4); +} + +static void disas_t16_extract_disas_t16_Fmt_10(DisasContext *ctx, arg_ri *a, uint16_t insn) +{ + a->rd = extract32(insn, 8, 3); + a->imm = times_4(ctx, extract32(insn, 0, 8)); +} + +static void disas_t16_extract_disas_t16_Fmt_11(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) +{ + a->rd = extract32(insn, 8, 3); + a->rn = 13; + a->s = 0; + a->rot = 0; + a->imm = times_4(ctx, extract32(insn, 0, 8)); +} + +static void disas_t16_extract_disas_t16_Fmt_21(DisasContext *ctx, arg_setend *a, uint16_t insn) +{ + a->E = extract32(insn, 3, 1); +} + +static void disas_t16_extract_disas_t16_Fmt_22(DisasContext *ctx, arg_cps *a, uint16_t insn) +{ + a->A = extract32(insn, 2, 1); + a->I = extract32(insn, 1, 1); + a->F = extract32(insn, 0, 1); + a->mode = 0; + a->M = 0; + a->imod = plus_2(ctx, extract32(insn, 4, 1)); +} + +static void disas_t16_extract_disas_t16_Fmt_23(DisasContext *ctx, arg_disas_t1616 *a, uint16_t insn) +{ + a->im = extract32(insn, 4, 1); + a->I = extract32(insn, 1, 1); + a->F = extract32(insn, 0, 1); +} + +static void disas_t16_extract_disas_t16_Fmt_25(DisasContext *ctx, arg_empty *a, uint16_t insn) +{ +} + +static void disas_t16_extract_disas_t16_Fmt_26(DisasContext *ctx, arg_disas_t1617 *a, uint16_t insn) +{ + a->cond_mask = extract32(insn, 0, 8); +} + +static void disas_t16_extract_disas_t16_Fmt_27(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = extract32(insn, 0, 6); +} + +static void disas_t16_extract_disas_t16_Fmt_28(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = extract32(insn, 0, 8); +} + +static void disas_t16_extract_disas_t16_Fmt_29(DisasContext *ctx, arg_disas_t1618 *a, uint16_t insn) +{ + a->nz = extract32(insn, 11, 1); + a->rn = extract32(insn, 0, 3); + a->imm = times_2(ctx, deposit32(extract32(insn, 3, 5), 5, 27, extract32(insn, 9, 1))); +} + +static void disas_t16_extract_disas_t16_Fmt_3(DisasContext *ctx, arg_s_rri_rot *a, uint16_t insn) +{ + a->rn = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->s = t16_setflags(ctx); + a->imm = 0; + a->rot = 0; +} + +static void disas_t16_extract_disas_t16_Fmt_30(DisasContext *ctx, arg_ldst_block *a, uint16_t insn) +{ + a->i = 0; + a->b = 1; + a->u = 0; + a->w = 1; + a->rn = 13; + a->list = t16_push_list(ctx, extract32(insn, 0, 9)); +} + +static void disas_t16_extract_disas_t16_Fmt_31(DisasContext *ctx, arg_ldst_block *a, uint16_t insn) +{ + a->i = 1; + a->b = 0; + a->u = 0; + a->w = 1; + a->rn = 13; + a->list = t16_pop_list(ctx, extract32(insn, 0, 9)); +} + +static void disas_t16_extract_disas_t16_Fmt_32(DisasContext *ctx, arg_ci *a, uint16_t insn) +{ + a->cond = extract32(insn, 8, 4); + a->imm = times_2(ctx, sextract32(insn, 0, 8)); +} + +static void disas_t16_extract_disas_t16_Fmt_33(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = times_2(ctx, sextract32(insn, 0, 11)); +} + +static void disas_t16_extract_disas_t16_Fmt_34(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = extract32(insn, 0, 11); +} + +static void disas_t16_extract_disas_t16_Fmt_35(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = sextract32(insn, 0, 11); +} + +static void disas_t16_extract_disas_t16_Fmt_4(DisasContext *ctx, arg_s_rrrr *a, uint16_t insn) +{ + a->rn = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->s = t16_setflags(ctx); + a->rm = extract32(insn, 0, 3); + a->ra = 0; +} + +static void disas_t16_extract_extend(DisasContext *ctx, arg_rrr_rot *a, uint16_t insn) +{ + a->rm = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->rn = 15; + a->rot = 0; +} + +static void disas_t16_extract_ldst_ri_1(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) +{ + a->imm = extract32(insn, 6, 5); + a->rn = extract32(insn, 3, 3); + a->rt = extract32(insn, 0, 3); + a->p = 1; + a->w = 0; + a->u = 1; +} + +static void disas_t16_extract_ldst_ri_2(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) +{ + a->rn = extract32(insn, 3, 3); + a->rt = extract32(insn, 0, 3); + a->p = 1; + a->w = 0; + a->u = 1; + a->imm = times_2(ctx, extract32(insn, 6, 5)); +} + +static void disas_t16_extract_ldst_ri_4(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) +{ + a->rn = extract32(insn, 3, 3); + a->rt = extract32(insn, 0, 3); + a->p = 1; + a->w = 0; + a->u = 1; + a->imm = times_4(ctx, extract32(insn, 6, 5)); +} + +static void disas_t16_extract_ldst_rr(DisasContext *ctx, arg_ldst_rr *a, uint16_t insn) +{ + a->rm = extract32(insn, 6, 3); + a->rn = extract32(insn, 3, 3); + a->rt = extract32(insn, 0, 3); + a->p = 1; + a->w = 0; + a->u = 1; + a->shimm = 0; + a->shtype = 0; +} + +static void disas_t16_extract_ldst_spec_i(DisasContext *ctx, arg_ldst_ri *a, uint16_t insn) +{ + a->rt = extract32(insn, 8, 3); + a->p = 1; + a->w = 0; + a->u = 1; + a->imm = times_4(ctx, extract32(insn, 0, 8)); +} + +static void disas_t16_extract_ldstm(DisasContext *ctx, arg_ldst_block *a, uint16_t insn) +{ + a->rn = extract32(insn, 8, 3); + a->list = extract32(insn, 0, 8); + a->i = 1; + a->b = 0; + a->u = 0; + a->w = 1; +} + +static void disas_t16_extract_lll_noshr(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) +{ + a->rm = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->s = t16_setflags(ctx); + a->rn = extract32(insn, 0, 3); + a->shim = 0; + a->shty = 0; +} + +static void disas_t16_extract_lxl_shr(DisasContext *ctx, arg_s_rrr_shr *a, uint16_t insn) +{ + a->rs = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->s = t16_setflags(ctx); + a->rm = extract32(insn, 0, 3); + a->rn = 0; +} + +static void disas_t16_extract_rdm(DisasContext *ctx, arg_rr *a, uint16_t insn) +{ + a->rm = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); +} + +static void disas_t16_extract_shift_i(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) +{ + a->shim = extract32(insn, 6, 5); + a->rm = extract32(insn, 3, 3); + a->rd = extract32(insn, 0, 3); + a->s = t16_setflags(ctx); + a->rn = extract32(insn, 0, 3); +} + +static void disas_t16_extract_xll_noshr(DisasContext *ctx, arg_s_rrr_shi *a, uint16_t insn) +{ + a->rm = extract32(insn, 3, 3); + a->rn = extract32(insn, 0, 3); + a->s = 1; + a->rd = 0; + a->shim = 0; + a->shty = 0; +} + +static bool disas_t16(DisasContext *ctx, uint16_t insn) +{ + union { + arg_ci f_ci; + arg_cps f_cps; + arg_disas_t1616 f_disas_t1616; + arg_disas_t1617 f_disas_t1617; + arg_disas_t1618 f_disas_t1618; + arg_empty f_empty; + arg_i f_i; + arg_ldst_block f_ldst_block; + arg_ldst_ri f_ldst_ri; + arg_ldst_rr f_ldst_rr; + arg_r f_r; + arg_ri f_ri; + arg_rr f_rr; + arg_rrr_rot f_rrr_rot; + arg_s_rri_rot f_s_rri_rot; + arg_s_rrr_shi f_s_rrr_shi; + arg_s_rrr_shr f_s_rrr_shr; + arg_s_rrrr f_s_rrrr; + arg_setend f_setend; + } u; + + switch ((insn >> 12) & 0xf) { + case 0x0: + /* 0000.... ........ */ + disas_t16_extract_shift_i(ctx, &u.f_s_rrr_shi, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 00000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:137 */ + u.f_s_rrr_shi.shty = 0; + if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* 00001... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:138 */ + u.f_s_rrr_shi.shty = 1; + if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + case 0x1: + /* 0001.... ........ */ + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 00010... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:139 */ + disas_t16_extract_shift_i(ctx, &u.f_s_rrr_shi, insn); + u.f_s_rrr_shi.shty = 2; + if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* 00011... ........ */ + switch ((insn >> 9) & 0x3) { + case 0x0: + /* 0001100. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:146 */ + disas_t16_extract_addsub_3(ctx, &u.f_s_rrr_shi, insn); + if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* 0001101. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:147 */ + disas_t16_extract_addsub_3(ctx, &u.f_s_rrr_shi, insn); + if (trans_SUB_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x2: + /* 0001110. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:154 */ + disas_t16_extract_addsub_2i(ctx, &u.f_s_rri_rot, insn); + if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x3: + /* 0001111. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:155 */ + disas_t16_extract_addsub_2i(ctx, &u.f_s_rri_rot, insn); + if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + } + return false; + case 0x2: + /* 0010.... ........ */ + disas_t16_extract_arith_1i(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 00100... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:163 */ + u.f_s_rri_rot.s = t16_setflags(ctx); + if (trans_MOV_rxi(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x1: + /* 00101... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:164 */ + u.f_s_rri_rot.s = 1; + if (trans_CMP_xri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x3: + /* 0011.... ........ */ + disas_t16_extract_arith_1i(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 00110... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:165 */ + u.f_s_rri_rot.s = t16_setflags(ctx); + if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x1: + /* 00111... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:166 */ + u.f_s_rri_rot.s = t16_setflags(ctx); + if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x4: + /* 0100.... ........ */ + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 01000... ........ */ + switch ((insn >> 8) & 0x7) { + case 0x0: + /* 01000000 ........ */ + switch ((insn >> 6) & 0x3) { + case 0x0: + /* 01000000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:53 */ + disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_AND_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* 01000000 01...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:54 */ + disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_EOR_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x2: + /* 01000000 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:55 */ + disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); + u.f_s_rrr_shr.shty = 0; + if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x3: + /* 01000000 11...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:56 */ + disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); + u.f_s_rrr_shr.shty = 1; + if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + case 0x1: + /* 01000001 ........ */ + switch ((insn >> 6) & 0x3) { + case 0x0: + /* 01000001 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:57 */ + disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); + u.f_s_rrr_shr.shty = 2; + if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + case 0x1: + /* 01000001 01...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:58 */ + disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_ADC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x2: + /* 01000001 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:59 */ + disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_SBC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x3: + /* 01000001 11...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:60 */ + disas_t16_extract_lxl_shr(ctx, &u.f_s_rrr_shr, insn); + u.f_s_rrr_shr.shty = 3; + if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + case 0x2: + /* 01000010 ........ */ + switch ((insn >> 6) & 0x3) { + case 0x0: + /* 01000010 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:61 */ + disas_t16_extract_xll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_TST_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* 01000010 01...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:62 */ + disas_t16_extract_disas_t16_Fmt_3(ctx, &u.f_s_rri_rot, insn); + if (trans_RSB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x2: + /* 01000010 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:63 */ + disas_t16_extract_xll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x3: + /* 01000010 11...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:64 */ + disas_t16_extract_xll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_CMN_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + case 0x3: + /* 01000011 ........ */ + switch ((insn >> 6) & 0x3) { + case 0x0: + /* 01000011 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:65 */ + disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_ORR_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x1: + /* 01000011 01...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:66 */ + disas_t16_extract_disas_t16_Fmt_4(ctx, &u.f_s_rrrr, insn); + if (trans_MUL(ctx, &u.f_s_rrrr)) return true; + return false; + case 0x2: + /* 01000011 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:67 */ + disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_BIC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x3: + /* 01000011 11...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:68 */ + disas_t16_extract_lll_noshr(ctx, &u.f_s_rrr_shi, insn); + if (trans_MVN_rxri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + case 0x4: + /* 01000100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:174 */ + disas_t16_extract_addsub_2h(ctx, &u.f_s_rrr_shi, insn); + u.f_s_rrr_shi.s = 0; + if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x5: + /* 01000101 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:175 */ + disas_t16_extract_addsub_2h(ctx, &u.f_s_rrr_shi, insn); + u.f_s_rrr_shi.s = 1; + if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x6: + /* 01000110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:176 */ + disas_t16_extract_addsub_2h(ctx, &u.f_s_rrr_shi, insn); + u.f_s_rrr_shi.s = 0; + if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x7: + /* 01000111 ........ */ + disas_t16_extract_branchr(ctx, &u.f_r, insn); + switch (insn & 0x00000087) { + case 0x00000000: + /* 01000111 0....000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:191 */ + if (trans_BX(ctx, &u.f_r)) return true; + return false; + case 0x00000004: + /* 01000111 0....100 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:193 */ + if (trans_BXNS(ctx, &u.f_r)) return true; + return false; + case 0x00000080: + /* 01000111 1....000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:192 */ + if (trans_BLX_r(ctx, &u.f_r)) return true; + return false; + case 0x00000084: + /* 01000111 1....100 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:194 */ + if (trans_BLXNS(ctx, &u.f_r)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 01001... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:118 */ + disas_t16_extract_ldst_spec_i(ctx, &u.f_ldst_ri, insn); + u.f_ldst_ri.rn = 15; + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x5: + /* 0101.... ........ */ + disas_t16_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + switch ((insn >> 9) & 0x7) { + case 0x0: + /* 0101000. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:75 */ + if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x1: + /* 0101001. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:76 */ + if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x2: + /* 0101010. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:77 */ + if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x3: + /* 0101011. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:78 */ + if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x4: + /* 0101100. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:79 */ + if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x5: + /* 0101101. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:80 */ + if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x6: + /* 0101110. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:81 */ + if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + case 0x7: + /* 0101111. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:82 */ + if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x6: + /* 0110.... ........ */ + disas_t16_extract_ldst_ri_4(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 01100... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:93 */ + if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* 01101... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:94 */ + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x7: + /* 0111.... ........ */ + disas_t16_extract_ldst_ri_1(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 01110... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:95 */ + if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* 01111... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:96 */ + if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x8: + /* 1000.... ........ */ + disas_t16_extract_ldst_ri_2(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 10000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:104 */ + if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* 10001... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:105 */ + if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x9: + /* 1001.... ........ */ + disas_t16_extract_ldst_spec_i(ctx, &u.f_ldst_ri, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 10010... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:113 */ + u.f_ldst_ri.rn = 13; + if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x1: + /* 10011... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:114 */ + u.f_ldst_ri.rn = 13; + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0xa: + /* 1010.... ........ */ + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 10100... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:122 */ + disas_t16_extract_disas_t16_Fmt_10(ctx, &u.f_ri, insn); + if (trans_ADR(ctx, &u.f_ri)) return true; + return false; + case 0x1: + /* 10101... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:123 */ + disas_t16_extract_disas_t16_Fmt_11(ctx, &u.f_s_rri_rot, insn); + if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0xb: + /* 1011.... ........ */ + switch ((insn >> 10) & 0x1) { + case 0x0: + /* 1011.0.. ........ */ + switch ((insn >> 8) & 0x1) { + case 0x0: + /* 1011.0.0 ........ */ + switch (insn & 0x00000a80) { + case 0x00000000: + /* 10110000 0....... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:184 */ + disas_t16_extract_addsub_sp_i(ctx, &u.f_s_rri_rot, insn); + if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x00000080: + /* 10110000 1....... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:185 */ + disas_t16_extract_addsub_sp_i(ctx, &u.f_s_rri_rot, insn); + if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x00000200: + /* 10110010 0....... */ + disas_t16_extract_extend(ctx, &u.f_rrr_rot, insn); + switch ((insn >> 6) & 0x1) { + case 0x0: + /* 10110010 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:200 */ + if (trans_SXTAH(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x1: + /* 10110010 01...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:201 */ + if (trans_SXTAB(ctx, &u.f_rrr_rot)) return true; + return false; + } + return false; + case 0x00000280: + /* 10110010 1....... */ + disas_t16_extract_extend(ctx, &u.f_rrr_rot, insn); + switch ((insn >> 6) & 0x1) { + case 0x0: + /* 10110010 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:202 */ + if (trans_UXTAH(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x1: + /* 10110010 11...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:203 */ + if (trans_UXTAB(ctx, &u.f_rrr_rot)) return true; + return false; + } + return false; + case 0x00000a00: + /* 10111010 0....... */ + disas_t16_extract_rdm(ctx, &u.f_rr, insn); + switch ((insn >> 6) & 0x1) { + case 0x0: + /* 10111010 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:219 */ + if (trans_REV(ctx, &u.f_rr)) return true; + return false; + case 0x1: + /* 10111010 01...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:220 */ + if (trans_REV16(ctx, &u.f_rr)) return true; + return false; + } + return false; + case 0x00000a80: + /* 10111010 1....... */ + switch ((insn >> 6) & 0x1) { + case 0x0: + /* 10111010 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:246 */ + disas_t16_extract_disas_t16_Fmt_27(ctx, &u.f_i, insn); + if (trans_HLT(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* 10111010 11...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:221 */ + disas_t16_extract_rdm(ctx, &u.f_rr, insn); + if (trans_REVSH(ctx, &u.f_rr)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 1011.0.1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:248 */ + disas_t16_extract_disas_t16_Fmt_29(ctx, &u.f_disas_t1618, insn); + if (trans_CBZ(ctx, &u.f_disas_t1618)) return true; + return false; + } + return false; + case 0x1: + /* 1011.1.. ........ */ + switch (insn & 0x00000a00) { + case 0x00000000: + /* 1011010. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:255 */ + disas_t16_extract_disas_t16_Fmt_30(ctx, &u.f_ldst_block, insn); + if (trans_STM(ctx, &u.f_ldst_block)) return true; + return false; + case 0x00000200: + /* 1011011. ........ */ + switch ((insn >> 5) & 0xf) { + case 0x2: + /* 10110110 010..... */ + disas_t16_extract_disas_t16_Fmt_21(ctx, &u.f_setend, insn); + switch (insn & 0x00000017) { + case 0x00000010: + /* 10110110 0101.000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:209 */ + if (trans_SETEND(ctx, &u.f_setend)) return true; + return false; + } + return false; + case 0x3: + /* 10110110 011..... */ + switch ((insn >> 3) & 0x1) { + case 0x0: + /* 10110110 011.0... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:211 */ + disas_t16_extract_disas_t16_Fmt_22(ctx, &u.f_cps, insn); + if (trans_CPS(ctx, &u.f_cps)) return true; + if ((insn & 0x00000004) == 0x00000000) { + /* 10110110 011.00.. */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:212 */ + disas_t16_extract_disas_t16_Fmt_23(ctx, &u.f_disas_t1616, insn); + if (trans_CPS_v7m(ctx, &u.f_disas_t1616)) return true; + } + return false; + } + return false; + } + return false; + case 0x00000800: + /* 1011110. ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:257 */ + disas_t16_extract_disas_t16_Fmt_31(ctx, &u.f_ldst_block, insn); + if (trans_LDM_t16(ctx, &u.f_ldst_block)) return true; + return false; + case 0x00000a00: + /* 1011111. ........ */ + switch ((insn >> 8) & 0x1) { + case 0x0: + /* 10111110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:247 */ + disas_t16_extract_disas_t16_Fmt_28(ctx, &u.f_i, insn); + if (trans_BKPT(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* 10111111 ........ */ + if ((insn & 0x0000000f) == 0x00000000) { + /* 10111111 ....0000 */ + if ((insn & 0x000000f0) == 0x00000010) { + /* 10111111 00010000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:227 */ + disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); + if (trans_YIELD(ctx, &u.f_empty)) return true; + } + if ((insn & 0x000000f0) == 0x00000020) { + /* 10111111 00100000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:228 */ + disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); + if (trans_WFE(ctx, &u.f_empty)) return true; + } + if ((insn & 0x000000f0) == 0x00000030) { + /* 10111111 00110000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:229 */ + disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); + if (trans_WFI(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:237 */ + disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:239 */ + disas_t16_extract_disas_t16_Fmt_26(ctx, &u.f_disas_t1617, insn); + if (trans_IT(ctx, &u.f_disas_t1617)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0xc: + /* 1100.... ........ */ + disas_t16_extract_ldstm(ctx, &u.f_ldst_block, insn); + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 11000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:130 */ + if (trans_STM(ctx, &u.f_ldst_block)) return true; + return false; + case 0x1: + /* 11001... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:131 */ + if (trans_LDM_t16(ctx, &u.f_ldst_block)) return true; + return false; + } + return false; + case 0xd: + /* 1101.... ........ */ + if ((insn & 0x00000f00) == 0x00000e00) { + /* 11011110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:265 */ + disas_t16_extract_disas_t16_Fmt_25(ctx, &u.f_empty, insn); + if (trans_UDF(ctx, &u.f_empty)) return true; + } + if ((insn & 0x00000f00) == 0x00000f00) { + /* 11011111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:266 */ + disas_t16_extract_disas_t16_Fmt_28(ctx, &u.f_i, insn); + if (trans_SVC(ctx, &u.f_i)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:267 */ + disas_t16_extract_disas_t16_Fmt_32(ctx, &u.f_ci, insn); + if (trans_B_cond_thumb(ctx, &u.f_ci)) return true; + return false; + case 0xe: + /* 1110.... ........ */ + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 11100... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:274 */ + disas_t16_extract_disas_t16_Fmt_33(ctx, &u.f_i, insn); + if (trans_B(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* 11101... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:279 */ + disas_t16_extract_disas_t16_Fmt_34(ctx, &u.f_i, insn); + if (trans_BLX_suffix(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0xf: + /* 1111.... ........ */ + switch ((insn >> 11) & 0x1) { + case 0x0: + /* 11110... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:280 */ + disas_t16_extract_disas_t16_Fmt_35(ctx, &u.f_i, insn); + if (trans_BL_BLX_prefix(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* 11111... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t16.decode:281 */ + disas_t16_extract_disas_t16_Fmt_34(ctx, &u.f_i, insn); + if (trans_BL_suffix(ctx, &u.f_i)) return true; + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/arm/decode-t32.inc.c b/qemu/target/arm/decode-t32.inc.c new file mode 100644 index 00000000..7e630be1 --- /dev/null +++ b/qemu/target/arm/decode-t32.inc.c @@ -0,0 +1,3017 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int cond; + int imm; +} arg_ci; + +typedef struct { + int rd; + int sysm; +} arg_disas_t3227; + +typedef struct { + int mask; + int rn; + int sysm; +} arg_disas_t3228; + +typedef struct { + int A; + int T; + int rd; + int rn; +} arg_disas_t3230; + +typedef struct { + int imm; + int p; + int rn; + int rt; + int rt2; + int u; + int w; +} arg_ldst_ri2; + +typedef struct { + int rm; + int rn; +} arg_tbranch; + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wredundant-decls" +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtypedef-redefinition" +# endif +#endif + +typedef arg_s_rrr_shi arg_TST_xrri; +static bool trans_TST_xrri(DisasContext *ctx, arg_TST_xrri *a); +typedef arg_s_rrr_shi arg_AND_rrri; +static bool trans_AND_rrri(DisasContext *ctx, arg_AND_rrri *a); +typedef arg_s_rrr_shi arg_BIC_rrri; +static bool trans_BIC_rrri(DisasContext *ctx, arg_BIC_rrri *a); +typedef arg_s_rrr_shi arg_MOV_rxri; +static bool trans_MOV_rxri(DisasContext *ctx, arg_MOV_rxri *a); +typedef arg_s_rrr_shi arg_ORR_rrri; +static bool trans_ORR_rrri(DisasContext *ctx, arg_ORR_rrri *a); +typedef arg_s_rrr_shi arg_MVN_rxri; +static bool trans_MVN_rxri(DisasContext *ctx, arg_MVN_rxri *a); +typedef arg_s_rrr_shi arg_ORN_rrri; +static bool trans_ORN_rrri(DisasContext *ctx, arg_ORN_rrri *a); +typedef arg_s_rrr_shi arg_TEQ_xrri; +static bool trans_TEQ_xrri(DisasContext *ctx, arg_TEQ_xrri *a); +typedef arg_s_rrr_shi arg_EOR_rrri; +static bool trans_EOR_rrri(DisasContext *ctx, arg_EOR_rrri *a); +typedef arg_pkh arg_PKH; +static bool trans_PKH(DisasContext *ctx, arg_PKH *a); +typedef arg_s_rrr_shi arg_CMN_xrri; +static bool trans_CMN_xrri(DisasContext *ctx, arg_CMN_xrri *a); +typedef arg_s_rrr_shi arg_ADD_rrri; +static bool trans_ADD_rrri(DisasContext *ctx, arg_ADD_rrri *a); +typedef arg_s_rrr_shi arg_ADC_rrri; +static bool trans_ADC_rrri(DisasContext *ctx, arg_ADC_rrri *a); +typedef arg_s_rrr_shi arg_SBC_rrri; +static bool trans_SBC_rrri(DisasContext *ctx, arg_SBC_rrri *a); +typedef arg_s_rrr_shi arg_CMP_xrri; +static bool trans_CMP_xrri(DisasContext *ctx, arg_CMP_xrri *a); +typedef arg_s_rrr_shi arg_SUB_rrri; +static bool trans_SUB_rrri(DisasContext *ctx, arg_SUB_rrri *a); +typedef arg_s_rrr_shi arg_RSB_rrri; +static bool trans_RSB_rrri(DisasContext *ctx, arg_RSB_rrri *a); +typedef arg_s_rrr_shr arg_MOV_rxrr; +static bool trans_MOV_rxrr(DisasContext *ctx, arg_MOV_rxrr *a); +typedef arg_s_rri_rot arg_TST_xri; +static bool trans_TST_xri(DisasContext *ctx, arg_TST_xri *a); +typedef arg_s_rri_rot arg_AND_rri; +static bool trans_AND_rri(DisasContext *ctx, arg_AND_rri *a); +typedef arg_s_rri_rot arg_BIC_rri; +static bool trans_BIC_rri(DisasContext *ctx, arg_BIC_rri *a); +typedef arg_s_rri_rot arg_MOV_rxi; +static bool trans_MOV_rxi(DisasContext *ctx, arg_MOV_rxi *a); +typedef arg_s_rri_rot arg_ORR_rri; +static bool trans_ORR_rri(DisasContext *ctx, arg_ORR_rri *a); +typedef arg_s_rri_rot arg_MVN_rxi; +static bool trans_MVN_rxi(DisasContext *ctx, arg_MVN_rxi *a); +typedef arg_s_rri_rot arg_ORN_rri; +static bool trans_ORN_rri(DisasContext *ctx, arg_ORN_rri *a); +typedef arg_s_rri_rot arg_TEQ_xri; +static bool trans_TEQ_xri(DisasContext *ctx, arg_TEQ_xri *a); +typedef arg_s_rri_rot arg_EOR_rri; +static bool trans_EOR_rri(DisasContext *ctx, arg_EOR_rri *a); +typedef arg_s_rri_rot arg_CMN_xri; +static bool trans_CMN_xri(DisasContext *ctx, arg_CMN_xri *a); +typedef arg_s_rri_rot arg_ADD_rri; +static bool trans_ADD_rri(DisasContext *ctx, arg_ADD_rri *a); +typedef arg_s_rri_rot arg_ADC_rri; +static bool trans_ADC_rri(DisasContext *ctx, arg_ADC_rri *a); +typedef arg_s_rri_rot arg_SBC_rri; +static bool trans_SBC_rri(DisasContext *ctx, arg_SBC_rri *a); +typedef arg_s_rri_rot arg_CMP_xri; +static bool trans_CMP_xri(DisasContext *ctx, arg_CMP_xri *a); +typedef arg_s_rri_rot arg_SUB_rri; +static bool trans_SUB_rri(DisasContext *ctx, arg_SUB_rri *a); +typedef arg_s_rri_rot arg_RSB_rri; +static bool trans_RSB_rri(DisasContext *ctx, arg_RSB_rri *a); +typedef arg_ri arg_ADR; +static bool trans_ADR(DisasContext *ctx, arg_ADR *a); +typedef arg_ri arg_MOVW; +static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a); +typedef arg_ri arg_MOVT; +static bool trans_MOVT(DisasContext *ctx, arg_MOVT *a); +typedef arg_sat arg_SSAT16; +static bool trans_SSAT16(DisasContext *ctx, arg_SSAT16 *a); +typedef arg_sat arg_SSAT; +static bool trans_SSAT(DisasContext *ctx, arg_SSAT *a); +typedef arg_sat arg_USAT16; +static bool trans_USAT16(DisasContext *ctx, arg_USAT16 *a); +typedef arg_sat arg_USAT; +static bool trans_USAT(DisasContext *ctx, arg_USAT *a); +typedef arg_bfx arg_SBFX; +static bool trans_SBFX(DisasContext *ctx, arg_SBFX *a); +typedef arg_bfx arg_UBFX; +static bool trans_UBFX(DisasContext *ctx, arg_UBFX *a); +typedef arg_bfi arg_BFCI; +static bool trans_BFCI(DisasContext *ctx, arg_BFCI *a); +typedef arg_s_rrrr arg_MUL; +static bool trans_MUL(DisasContext *ctx, arg_MUL *a); +typedef arg_s_rrrr arg_MLA; +static bool trans_MLA(DisasContext *ctx, arg_MLA *a); +typedef arg_rrrr arg_MLS; +static bool trans_MLS(DisasContext *ctx, arg_MLS *a); +typedef arg_s_rrrr arg_SMULL; +static bool trans_SMULL(DisasContext *ctx, arg_SMULL *a); +typedef arg_s_rrrr arg_UMULL; +static bool trans_UMULL(DisasContext *ctx, arg_UMULL *a); +typedef arg_s_rrrr arg_SMLAL; +static bool trans_SMLAL(DisasContext *ctx, arg_SMLAL *a); +typedef arg_s_rrrr arg_UMLAL; +static bool trans_UMLAL(DisasContext *ctx, arg_UMLAL *a); +typedef arg_rrrr arg_UMAAL; +static bool trans_UMAAL(DisasContext *ctx, arg_UMAAL *a); +typedef arg_rrrr arg_SMULWB; +static bool trans_SMULWB(DisasContext *ctx, arg_SMULWB *a); +typedef arg_rrrr arg_SMLAWB; +static bool trans_SMLAWB(DisasContext *ctx, arg_SMLAWB *a); +typedef arg_rrrr arg_SMULWT; +static bool trans_SMULWT(DisasContext *ctx, arg_SMULWT *a); +typedef arg_rrrr arg_SMLAWT; +static bool trans_SMLAWT(DisasContext *ctx, arg_SMLAWT *a); +typedef arg_rrrr arg_SMULBB; +static bool trans_SMULBB(DisasContext *ctx, arg_SMULBB *a); +typedef arg_rrrr arg_SMLABB; +static bool trans_SMLABB(DisasContext *ctx, arg_SMLABB *a); +typedef arg_rrrr arg_SMULBT; +static bool trans_SMULBT(DisasContext *ctx, arg_SMULBT *a); +typedef arg_rrrr arg_SMLABT; +static bool trans_SMLABT(DisasContext *ctx, arg_SMLABT *a); +typedef arg_rrrr arg_SMULTB; +static bool trans_SMULTB(DisasContext *ctx, arg_SMULTB *a); +typedef arg_rrrr arg_SMLATB; +static bool trans_SMLATB(DisasContext *ctx, arg_SMLATB *a); +typedef arg_rrrr arg_SMULTT; +static bool trans_SMULTT(DisasContext *ctx, arg_SMULTT *a); +typedef arg_rrrr arg_SMLATT; +static bool trans_SMLATT(DisasContext *ctx, arg_SMLATT *a); +typedef arg_rrrr arg_SMLALBB; +static bool trans_SMLALBB(DisasContext *ctx, arg_SMLALBB *a); +typedef arg_rrrr arg_SMLALBT; +static bool trans_SMLALBT(DisasContext *ctx, arg_SMLALBT *a); +typedef arg_rrrr arg_SMLALTB; +static bool trans_SMLALTB(DisasContext *ctx, arg_SMLALTB *a); +typedef arg_rrrr arg_SMLALTT; +static bool trans_SMLALTT(DisasContext *ctx, arg_SMLALTT *a); +typedef arg_rrrr arg_USADA8; +static bool trans_USADA8(DisasContext *ctx, arg_USADA8 *a); +typedef arg_rrrr arg_SMLAD; +static bool trans_SMLAD(DisasContext *ctx, arg_SMLAD *a); +typedef arg_rrrr arg_SMLADX; +static bool trans_SMLADX(DisasContext *ctx, arg_SMLADX *a); +typedef arg_rrrr arg_SMLSD; +static bool trans_SMLSD(DisasContext *ctx, arg_SMLSD *a); +typedef arg_rrrr arg_SMLSDX; +static bool trans_SMLSDX(DisasContext *ctx, arg_SMLSDX *a); +typedef arg_rrrr arg_SMLALD; +static bool trans_SMLALD(DisasContext *ctx, arg_SMLALD *a); +typedef arg_rrrr arg_SMLALDX; +static bool trans_SMLALDX(DisasContext *ctx, arg_SMLALDX *a); +typedef arg_rrrr arg_SMLSLD; +static bool trans_SMLSLD(DisasContext *ctx, arg_SMLSLD *a); +typedef arg_rrrr arg_SMLSLDX; +static bool trans_SMLSLDX(DisasContext *ctx, arg_SMLSLDX *a); +typedef arg_rrrr arg_SMMLA; +static bool trans_SMMLA(DisasContext *ctx, arg_SMMLA *a); +typedef arg_rrrr arg_SMMLAR; +static bool trans_SMMLAR(DisasContext *ctx, arg_SMMLAR *a); +typedef arg_rrrr arg_SMMLS; +static bool trans_SMMLS(DisasContext *ctx, arg_SMMLS *a); +typedef arg_rrrr arg_SMMLSR; +static bool trans_SMMLSR(DisasContext *ctx, arg_SMMLSR *a); +typedef arg_rrr arg_SDIV; +static bool trans_SDIV(DisasContext *ctx, arg_SDIV *a); +typedef arg_rrr arg_UDIV; +static bool trans_UDIV(DisasContext *ctx, arg_UDIV *a); +typedef arg_rrr arg_QADD; +static bool trans_QADD(DisasContext *ctx, arg_QADD *a); +typedef arg_rrr arg_QSUB; +static bool trans_QSUB(DisasContext *ctx, arg_QSUB *a); +typedef arg_rrr arg_QDADD; +static bool trans_QDADD(DisasContext *ctx, arg_QDADD *a); +typedef arg_rrr arg_QDSUB; +static bool trans_QDSUB(DisasContext *ctx, arg_QDSUB *a); +typedef arg_rrr arg_CRC32B; +static bool trans_CRC32B(DisasContext *ctx, arg_CRC32B *a); +typedef arg_rrr arg_CRC32H; +static bool trans_CRC32H(DisasContext *ctx, arg_CRC32H *a); +typedef arg_rrr arg_CRC32W; +static bool trans_CRC32W(DisasContext *ctx, arg_CRC32W *a); +typedef arg_rrr arg_CRC32CB; +static bool trans_CRC32CB(DisasContext *ctx, arg_CRC32CB *a); +typedef arg_rrr arg_CRC32CH; +static bool trans_CRC32CH(DisasContext *ctx, arg_CRC32CH *a); +typedef arg_rrr arg_CRC32CW; +static bool trans_CRC32CW(DisasContext *ctx, arg_CRC32CW *a); +typedef arg_rrr arg_SEL; +static bool trans_SEL(DisasContext *ctx, arg_SEL *a); +typedef arg_rr arg_REV; +static bool trans_REV(DisasContext *ctx, arg_REV *a); +typedef arg_rr arg_REV16; +static bool trans_REV16(DisasContext *ctx, arg_REV16 *a); +typedef arg_rr arg_RBIT; +static bool trans_RBIT(DisasContext *ctx, arg_RBIT *a); +typedef arg_rr arg_REVSH; +static bool trans_REVSH(DisasContext *ctx, arg_REVSH *a); +typedef arg_rr arg_CLZ; +static bool trans_CLZ(DisasContext *ctx, arg_CLZ *a); +typedef arg_empty arg_YIELD; +static bool trans_YIELD(DisasContext *ctx, arg_YIELD *a); +typedef arg_empty arg_WFE; +static bool trans_WFE(DisasContext *ctx, arg_WFE *a); +typedef arg_empty arg_WFI; +static bool trans_WFI(DisasContext *ctx, arg_WFI *a); +typedef arg_empty arg_NOP; +static bool trans_NOP(DisasContext *ctx, arg_NOP *a); +typedef arg_cps arg_CPS; +static bool trans_CPS(DisasContext *ctx, arg_CPS *a); +typedef arg_empty arg_CLREX; +static bool trans_CLREX(DisasContext *ctx, arg_CLREX *a); +typedef arg_empty arg_DSB; +static bool trans_DSB(DisasContext *ctx, arg_DSB *a); +typedef arg_empty arg_DMB; +static bool trans_DMB(DisasContext *ctx, arg_DMB *a); +typedef arg_empty arg_ISB; +static bool trans_ISB(DisasContext *ctx, arg_ISB *a); +typedef arg_empty arg_SB; +static bool trans_SB(DisasContext *ctx, arg_SB *a); +typedef arg_mrs_bank arg_MRS_bank; +static bool trans_MRS_bank(DisasContext *ctx, arg_MRS_bank *a); +typedef arg_mrs_reg arg_MRS_reg; +static bool trans_MRS_reg(DisasContext *ctx, arg_MRS_reg *a); +typedef arg_disas_t3227 arg_MRS_v7m; +static bool trans_MRS_v7m(DisasContext *ctx, arg_MRS_v7m *a); +typedef arg_msr_bank arg_MSR_bank; +static bool trans_MSR_bank(DisasContext *ctx, arg_MSR_bank *a); +typedef arg_msr_reg arg_MSR_reg; +static bool trans_MSR_reg(DisasContext *ctx, arg_MSR_reg *a); +typedef arg_disas_t3228 arg_MSR_v7m; +static bool trans_MSR_v7m(DisasContext *ctx, arg_MSR_v7m *a); +typedef arg_r arg_BXJ; +static bool trans_BXJ(DisasContext *ctx, arg_BXJ *a); +typedef arg_empty arg_ERET; +static bool trans_ERET(DisasContext *ctx, arg_ERET *a); +typedef arg_i arg_SMC; +static bool trans_SMC(DisasContext *ctx, arg_SMC *a); +typedef arg_i arg_HVC; +static bool trans_HVC(DisasContext *ctx, arg_HVC *a); +typedef arg_empty arg_UDF; +static bool trans_UDF(DisasContext *ctx, arg_UDF *a); +typedef arg_ci arg_B_cond_thumb; +static bool trans_B_cond_thumb(DisasContext *ctx, arg_B_cond_thumb *a); +typedef arg_ldst_rr arg_STRB_rr; +static bool trans_STRB_rr(DisasContext *ctx, arg_STRB_rr *a); +typedef arg_ldst_ri arg_STRB_ri; +static bool trans_STRB_ri(DisasContext *ctx, arg_STRB_ri *a); +typedef arg_ldst_ri arg_STRBT_ri; +static bool trans_STRBT_ri(DisasContext *ctx, arg_STRBT_ri *a); +typedef arg_ldst_rr arg_STRH_rr; +static bool trans_STRH_rr(DisasContext *ctx, arg_STRH_rr *a); +typedef arg_ldst_ri arg_STRH_ri; +static bool trans_STRH_ri(DisasContext *ctx, arg_STRH_ri *a); +typedef arg_ldst_ri arg_STRHT_ri; +static bool trans_STRHT_ri(DisasContext *ctx, arg_STRHT_ri *a); +typedef arg_ldst_rr arg_STR_rr; +static bool trans_STR_rr(DisasContext *ctx, arg_STR_rr *a); +typedef arg_ldst_ri arg_STR_ri; +static bool trans_STR_ri(DisasContext *ctx, arg_STR_ri *a); +typedef arg_ldst_ri arg_STRT_ri; +static bool trans_STRT_ri(DisasContext *ctx, arg_STRT_ri *a); +typedef arg_ldst_ri arg_LDRB_ri; +static bool trans_LDRB_ri(DisasContext *ctx, arg_LDRB_ri *a); +typedef arg_ldst_ri arg_LDRBT_ri; +static bool trans_LDRBT_ri(DisasContext *ctx, arg_LDRBT_ri *a); +typedef arg_ldst_rr arg_LDRB_rr; +static bool trans_LDRB_rr(DisasContext *ctx, arg_LDRB_rr *a); +typedef arg_ldst_ri arg_LDRH_ri; +static bool trans_LDRH_ri(DisasContext *ctx, arg_LDRH_ri *a); +typedef arg_ldst_ri arg_LDRHT_ri; +static bool trans_LDRHT_ri(DisasContext *ctx, arg_LDRHT_ri *a); +typedef arg_ldst_rr arg_LDRH_rr; +static bool trans_LDRH_rr(DisasContext *ctx, arg_LDRH_rr *a); +typedef arg_ldst_ri arg_LDR_ri; +static bool trans_LDR_ri(DisasContext *ctx, arg_LDR_ri *a); +typedef arg_ldst_ri arg_LDRT_ri; +static bool trans_LDRT_ri(DisasContext *ctx, arg_LDRT_ri *a); +typedef arg_ldst_rr arg_LDR_rr; +static bool trans_LDR_rr(DisasContext *ctx, arg_LDR_rr *a); +typedef arg_ldst_ri arg_LDRSB_ri; +static bool trans_LDRSB_ri(DisasContext *ctx, arg_LDRSB_ri *a); +typedef arg_ldst_ri arg_LDRSBT_ri; +static bool trans_LDRSBT_ri(DisasContext *ctx, arg_LDRSBT_ri *a); +typedef arg_ldst_rr arg_LDRSB_rr; +static bool trans_LDRSB_rr(DisasContext *ctx, arg_LDRSB_rr *a); +typedef arg_ldst_ri arg_LDRSH_ri; +static bool trans_LDRSH_ri(DisasContext *ctx, arg_LDRSH_ri *a); +typedef arg_ldst_ri arg_LDRSHT_ri; +static bool trans_LDRSHT_ri(DisasContext *ctx, arg_LDRSHT_ri *a); +typedef arg_ldst_rr arg_LDRSH_rr; +static bool trans_LDRSH_rr(DisasContext *ctx, arg_LDRSH_rr *a); +typedef arg_ldst_ri2 arg_STRD_ri_t32; +static bool trans_STRD_ri_t32(DisasContext *ctx, arg_STRD_ri_t32 *a); +typedef arg_ldst_ri2 arg_LDRD_ri_t32; +static bool trans_LDRD_ri_t32(DisasContext *ctx, arg_LDRD_ri_t32 *a); +typedef arg_empty arg_SG; +static bool trans_SG(DisasContext *ctx, arg_SG *a); +typedef arg_disas_t3230 arg_TT; +static bool trans_TT(DisasContext *ctx, arg_TT *a); +typedef arg_strex arg_STREX; +static bool trans_STREX(DisasContext *ctx, arg_STREX *a); +typedef arg_strex arg_STREXB; +static bool trans_STREXB(DisasContext *ctx, arg_STREXB *a); +typedef arg_strex arg_STREXH; +static bool trans_STREXH(DisasContext *ctx, arg_STREXH *a); +typedef arg_strex arg_STREXD_t32; +static bool trans_STREXD_t32(DisasContext *ctx, arg_STREXD_t32 *a); +typedef arg_strex arg_STLEX; +static bool trans_STLEX(DisasContext *ctx, arg_STLEX *a); +typedef arg_strex arg_STLEXB; +static bool trans_STLEXB(DisasContext *ctx, arg_STLEXB *a); +typedef arg_strex arg_STLEXH; +static bool trans_STLEXH(DisasContext *ctx, arg_STLEXH *a); +typedef arg_strex arg_STLEXD_t32; +static bool trans_STLEXD_t32(DisasContext *ctx, arg_STLEXD_t32 *a); +typedef arg_ldrex arg_STL; +static bool trans_STL(DisasContext *ctx, arg_STL *a); +typedef arg_ldrex arg_STLB; +static bool trans_STLB(DisasContext *ctx, arg_STLB *a); +typedef arg_ldrex arg_STLH; +static bool trans_STLH(DisasContext *ctx, arg_STLH *a); +typedef arg_ldrex arg_LDREX; +static bool trans_LDREX(DisasContext *ctx, arg_LDREX *a); +typedef arg_ldrex arg_LDREXB; +static bool trans_LDREXB(DisasContext *ctx, arg_LDREXB *a); +typedef arg_ldrex arg_LDREXH; +static bool trans_LDREXH(DisasContext *ctx, arg_LDREXH *a); +typedef arg_ldrex arg_LDREXD_t32; +static bool trans_LDREXD_t32(DisasContext *ctx, arg_LDREXD_t32 *a); +typedef arg_ldrex arg_LDAEX; +static bool trans_LDAEX(DisasContext *ctx, arg_LDAEX *a); +typedef arg_ldrex arg_LDAEXB; +static bool trans_LDAEXB(DisasContext *ctx, arg_LDAEXB *a); +typedef arg_ldrex arg_LDAEXH; +static bool trans_LDAEXH(DisasContext *ctx, arg_LDAEXH *a); +typedef arg_ldrex arg_LDAEXD_t32; +static bool trans_LDAEXD_t32(DisasContext *ctx, arg_LDAEXD_t32 *a); +typedef arg_ldrex arg_LDA; +static bool trans_LDA(DisasContext *ctx, arg_LDA *a); +typedef arg_ldrex arg_LDAB; +static bool trans_LDAB(DisasContext *ctx, arg_LDAB *a); +typedef arg_ldrex arg_LDAH; +static bool trans_LDAH(DisasContext *ctx, arg_LDAH *a); +typedef arg_tbranch arg_TBB; +static bool trans_TBB(DisasContext *ctx, arg_TBB *a); +typedef arg_tbranch arg_TBH; +static bool trans_TBH(DisasContext *ctx, arg_TBH *a); +typedef arg_rrr arg_SADD8; +static bool trans_SADD8(DisasContext *ctx, arg_SADD8 *a); +typedef arg_rrr arg_QADD8; +static bool trans_QADD8(DisasContext *ctx, arg_QADD8 *a); +typedef arg_rrr arg_SHADD8; +static bool trans_SHADD8(DisasContext *ctx, arg_SHADD8 *a); +typedef arg_rrr arg_UADD8; +static bool trans_UADD8(DisasContext *ctx, arg_UADD8 *a); +typedef arg_rrr arg_UQADD8; +static bool trans_UQADD8(DisasContext *ctx, arg_UQADD8 *a); +typedef arg_rrr arg_UHADD8; +static bool trans_UHADD8(DisasContext *ctx, arg_UHADD8 *a); +typedef arg_rrr arg_SADD16; +static bool trans_SADD16(DisasContext *ctx, arg_SADD16 *a); +typedef arg_rrr arg_QADD16; +static bool trans_QADD16(DisasContext *ctx, arg_QADD16 *a); +typedef arg_rrr arg_SHADD16; +static bool trans_SHADD16(DisasContext *ctx, arg_SHADD16 *a); +typedef arg_rrr arg_UADD16; +static bool trans_UADD16(DisasContext *ctx, arg_UADD16 *a); +typedef arg_rrr arg_UQADD16; +static bool trans_UQADD16(DisasContext *ctx, arg_UQADD16 *a); +typedef arg_rrr arg_UHADD16; +static bool trans_UHADD16(DisasContext *ctx, arg_UHADD16 *a); +typedef arg_rrr arg_SASX; +static bool trans_SASX(DisasContext *ctx, arg_SASX *a); +typedef arg_rrr arg_QASX; +static bool trans_QASX(DisasContext *ctx, arg_QASX *a); +typedef arg_rrr arg_SHASX; +static bool trans_SHASX(DisasContext *ctx, arg_SHASX *a); +typedef arg_rrr arg_UASX; +static bool trans_UASX(DisasContext *ctx, arg_UASX *a); +typedef arg_rrr arg_UQASX; +static bool trans_UQASX(DisasContext *ctx, arg_UQASX *a); +typedef arg_rrr arg_UHASX; +static bool trans_UHASX(DisasContext *ctx, arg_UHASX *a); +typedef arg_rrr arg_SSUB8; +static bool trans_SSUB8(DisasContext *ctx, arg_SSUB8 *a); +typedef arg_rrr arg_QSUB8; +static bool trans_QSUB8(DisasContext *ctx, arg_QSUB8 *a); +typedef arg_rrr arg_SHSUB8; +static bool trans_SHSUB8(DisasContext *ctx, arg_SHSUB8 *a); +typedef arg_rrr arg_USUB8; +static bool trans_USUB8(DisasContext *ctx, arg_USUB8 *a); +typedef arg_rrr arg_UQSUB8; +static bool trans_UQSUB8(DisasContext *ctx, arg_UQSUB8 *a); +typedef arg_rrr arg_UHSUB8; +static bool trans_UHSUB8(DisasContext *ctx, arg_UHSUB8 *a); +typedef arg_rrr arg_SSUB16; +static bool trans_SSUB16(DisasContext *ctx, arg_SSUB16 *a); +typedef arg_rrr arg_QSUB16; +static bool trans_QSUB16(DisasContext *ctx, arg_QSUB16 *a); +typedef arg_rrr arg_SHSUB16; +static bool trans_SHSUB16(DisasContext *ctx, arg_SHSUB16 *a); +typedef arg_rrr arg_USUB16; +static bool trans_USUB16(DisasContext *ctx, arg_USUB16 *a); +typedef arg_rrr arg_UQSUB16; +static bool trans_UQSUB16(DisasContext *ctx, arg_UQSUB16 *a); +typedef arg_rrr arg_UHSUB16; +static bool trans_UHSUB16(DisasContext *ctx, arg_UHSUB16 *a); +typedef arg_rrr arg_SSAX; +static bool trans_SSAX(DisasContext *ctx, arg_SSAX *a); +typedef arg_rrr arg_QSAX; +static bool trans_QSAX(DisasContext *ctx, arg_QSAX *a); +typedef arg_rrr arg_SHSAX; +static bool trans_SHSAX(DisasContext *ctx, arg_SHSAX *a); +typedef arg_rrr arg_USAX; +static bool trans_USAX(DisasContext *ctx, arg_USAX *a); +typedef arg_rrr arg_UQSAX; +static bool trans_UQSAX(DisasContext *ctx, arg_UQSAX *a); +typedef arg_rrr arg_UHSAX; +static bool trans_UHSAX(DisasContext *ctx, arg_UHSAX *a); +typedef arg_rrr_rot arg_SXTAH; +static bool trans_SXTAH(DisasContext *ctx, arg_SXTAH *a); +typedef arg_rrr_rot arg_UXTAH; +static bool trans_UXTAH(DisasContext *ctx, arg_UXTAH *a); +typedef arg_rrr_rot arg_SXTAB16; +static bool trans_SXTAB16(DisasContext *ctx, arg_SXTAB16 *a); +typedef arg_rrr_rot arg_UXTAB16; +static bool trans_UXTAB16(DisasContext *ctx, arg_UXTAB16 *a); +typedef arg_rrr_rot arg_SXTAB; +static bool trans_SXTAB(DisasContext *ctx, arg_SXTAB *a); +typedef arg_rrr_rot arg_UXTAB; +static bool trans_UXTAB(DisasContext *ctx, arg_UXTAB *a); +typedef arg_ldst_block arg_STM_t32; +static bool trans_STM_t32(DisasContext *ctx, arg_STM_t32 *a); +typedef arg_ldst_block arg_LDM_t32; +static bool trans_LDM_t32(DisasContext *ctx, arg_LDM_t32 *a); +typedef arg_rfe arg_RFE; +static bool trans_RFE(DisasContext *ctx, arg_RFE *a); +typedef arg_srs arg_SRS; +static bool trans_SRS(DisasContext *ctx, arg_SRS *a); +typedef arg_i arg_B; +static bool trans_B(DisasContext *ctx, arg_B *a); +typedef arg_i arg_BL; +static bool trans_BL(DisasContext *ctx, arg_BL *a); +typedef arg_i arg_BLX_i; +static bool trans_BLX_i(DisasContext *ctx, arg_BLX_i *a); + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic pop +#endif + +static void disas_t32_extract_S_xri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->imm = t32_expandimm_imm(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); + a->rot = t32_expandimm_rot(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); + a->s = 1; + a->rd = 0; +} + +static void disas_t32_extract_S_xrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->shty = extract32(insn, 4, 2); + a->rm = extract32(insn, 0, 4); + a->shim = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); + a->s = 1; + a->rd = 0; +} + +static void disas_t32_extract_bfi(DisasContext *ctx, arg_bfi *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->msb = extract32(insn, 0, 5); + a->lsb = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); +} + +static void disas_t32_extract_bfx(DisasContext *ctx, arg_bfx *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->widthm1 = extract32(insn, 0, 5); + a->lsb = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); +} + +static void disas_t32_extract_branch24(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = t32_branch24(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 0, 11), 11, 21, extract32(insn, 16, 10)), 21, 11, extract32(insn, 11, 1)), 22, 10, extract32(insn, 13, 1)), 23, 9, sextract32(insn, 26, 1))); +} + +static void disas_t32_extract_disas_t32_Fmt_10(DisasContext *ctx, arg_ri *a, uint32_t insn) +{ + a->rd = extract32(insn, 8, 4); + a->imm = negate(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); +} + +static void disas_t32_extract_disas_t32_Fmt_22(DisasContext *ctx, arg_empty *a, uint32_t insn) +{ +} + +static void disas_t32_extract_disas_t32_Fmt_23(DisasContext *ctx, arg_cps *a, uint32_t insn) +{ + a->imod = extract32(insn, 9, 2); + a->M = extract32(insn, 8, 1); + a->A = extract32(insn, 7, 1); + a->I = extract32(insn, 6, 1); + a->F = extract32(insn, 5, 1); + a->mode = extract32(insn, 0, 5); +} + +static void disas_t32_extract_disas_t32_Fmt_24(DisasContext *ctx, arg_mrs_bank *a, uint32_t insn) +{ + a->r = extract32(insn, 20, 1); + a->rd = extract32(insn, 8, 4); + a->sysm = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 4, 1)); +} + +static void disas_t32_extract_disas_t32_Fmt_25(DisasContext *ctx, arg_mrs_reg *a, uint32_t insn) +{ + a->r = extract32(insn, 20, 1); + a->rd = extract32(insn, 8, 4); +} + +static void disas_t32_extract_disas_t32_Fmt_26(DisasContext *ctx, arg_disas_t3227 *a, uint32_t insn) +{ + a->rd = extract32(insn, 8, 4); + a->sysm = extract32(insn, 0, 8); +} + +static void disas_t32_extract_disas_t32_Fmt_27(DisasContext *ctx, arg_msr_bank *a, uint32_t insn) +{ + a->r = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->sysm = deposit32(extract32(insn, 8, 4), 4, 28, extract32(insn, 4, 1)); +} + +static void disas_t32_extract_disas_t32_Fmt_28(DisasContext *ctx, arg_msr_reg *a, uint32_t insn) +{ + a->r = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->mask = extract32(insn, 8, 4); +} + +static void disas_t32_extract_disas_t32_Fmt_29(DisasContext *ctx, arg_disas_t3228 *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->mask = extract32(insn, 10, 2); + a->sysm = extract32(insn, 0, 8); +} + +static void disas_t32_extract_disas_t32_Fmt_3(DisasContext *ctx, arg_pkh *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->tb = extract32(insn, 5, 1); + a->rm = extract32(insn, 0, 4); + a->imm = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); +} + +static void disas_t32_extract_disas_t32_Fmt_30(DisasContext *ctx, arg_r *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 4); +} + +static void disas_t32_extract_disas_t32_Fmt_31(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->imm = extract32(insn, 0, 8); + a->rot = 0; + a->s = 1; + a->rd = 15; + a->rn = 14; +} + +static void disas_t32_extract_disas_t32_Fmt_32(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = extract32(insn, 16, 4); +} + +static void disas_t32_extract_disas_t32_Fmt_33(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = deposit32(extract32(insn, 0, 12), 12, 20, extract32(insn, 16, 4)); +} + +static void disas_t32_extract_disas_t32_Fmt_34(DisasContext *ctx, arg_ci *a, uint32_t insn) +{ + a->cond = extract32(insn, 22, 4); + a->imm = times_2(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 0, 11), 11, 21, extract32(insn, 16, 6)), 17, 15, extract32(insn, 13, 1)), 18, 14, extract32(insn, 11, 1)), 19, 13, sextract32(insn, 26, 1))); +} + +static void disas_t32_extract_disas_t32_Fmt_4(DisasContext *ctx, arg_s_rrr_shr *a, uint32_t insn) +{ + a->shty = extract32(insn, 21, 2); + a->s = extract32(insn, 20, 1); + a->rm = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->rs = extract32(insn, 0, 4); + a->rn = 0; +} + +static void disas_t32_extract_disas_t32_Fmt_48(DisasContext *ctx, arg_disas_t3230 *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->A = extract32(insn, 7, 1); + a->T = extract32(insn, 6, 1); +} + +static void disas_t32_extract_disas_t32_Fmt_9(DisasContext *ctx, arg_ri *a, uint32_t insn) +{ + a->rd = extract32(insn, 8, 4); + a->imm = deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1)); +} + +static void disas_t32_extract_ldrex_0(DisasContext *ctx, arg_ldrex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rt2 = 15; + a->imm = 0; +} + +static void disas_t32_extract_ldrex_d(DisasContext *ctx, arg_ldrex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rt2 = extract32(insn, 8, 4); + a->imm = 0; +} + +static void disas_t32_extract_ldrex_i(DisasContext *ctx, arg_ldrex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rt2 = 15; + a->imm = times_4(ctx, extract32(insn, 0, 8)); +} + +static void disas_t32_extract_ldst_ri_idx(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->p = extract32(insn, 10, 1); + a->u = extract32(insn, 9, 1); + a->imm = extract32(insn, 0, 8); + a->w = 1; +} + +static void disas_t32_extract_ldst_ri_lit(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rt = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 12); + a->p = 1; + a->w = 0; + a->rn = 15; +} + +static void disas_t32_extract_ldst_ri_neg(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 8); + a->p = 1; + a->w = 0; + a->u = 0; +} + +static void disas_t32_extract_ldst_ri_pos(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 12); + a->p = 1; + a->w = 0; + a->u = 1; +} + +static void disas_t32_extract_ldst_ri_unp(DisasContext *ctx, arg_ldst_ri *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->imm = extract32(insn, 0, 8); + a->p = 1; + a->w = 0; + a->u = 1; +} + +static void disas_t32_extract_ldst_rr(DisasContext *ctx, arg_ldst_rr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->shimm = extract32(insn, 4, 2); + a->rm = extract32(insn, 0, 4); + a->p = 1; + a->w = 0; + a->u = 1; + a->shtype = 0; +} + +static void disas_t32_extract_ldstd_ri8(DisasContext *ctx, arg_ldst_ri2 *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rt2 = extract32(insn, 8, 4); + a->imm = times_4(ctx, extract32(insn, 0, 8)); +} + +static void disas_t32_extract_ldstm(DisasContext *ctx, arg_ldst_block *a, uint32_t insn) +{ + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); + a->list = extract32(insn, 0, 16); + a->u = 0; +} + +static void disas_t32_extract_mov16(DisasContext *ctx, arg_ri *a, uint32_t insn) +{ + a->rd = extract32(insn, 8, 4); + a->imm = deposit32(deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1)), 12, 20, extract32(insn, 16, 4)); +} + +static void disas_t32_extract_rdm(DisasContext *ctx, arg_rr *a, uint32_t insn) +{ + a->rd = extract32(insn, 8, 4); + a->rm = extract32(insn, 0, 4); +} + +static void disas_t32_extract_rfe(DisasContext *ctx, arg_rfe *a, uint32_t insn) +{ + a->w = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); +} + +static void disas_t32_extract_rn0dm(DisasContext *ctx, arg_rrrr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->rm = extract32(insn, 0, 4); + a->ra = 0; +} + +static void disas_t32_extract_rnadm(DisasContext *ctx, arg_rrrr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->ra = extract32(insn, 12, 4); + a->rd = extract32(insn, 8, 4); + a->rm = extract32(insn, 0, 4); +} + +static void disas_t32_extract_rndm(DisasContext *ctx, arg_rrr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->rm = extract32(insn, 0, 4); +} + +static void disas_t32_extract_rrr_rot(DisasContext *ctx, arg_rrr_rot *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->rot = extract32(insn, 4, 2); + a->rm = extract32(insn, 0, 4); +} + +static void disas_t32_extract_s0_rn0dm(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->rm = extract32(insn, 0, 4); + a->ra = 0; + a->s = 0; +} + +static void disas_t32_extract_s0_rnadm(DisasContext *ctx, arg_s_rrrr *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->ra = extract32(insn, 12, 4); + a->rd = extract32(insn, 8, 4); + a->rm = extract32(insn, 0, 4); + a->s = 0; +} + +static void disas_t32_extract_s0_rri_12(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->imm = deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1)); + a->rot = 0; + a->s = 0; +} + +static void disas_t32_extract_s_rri_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->imm = t32_expandimm_imm(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); + a->rot = t32_expandimm_rot(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); +} + +static void disas_t32_extract_s_rrr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->shty = extract32(insn, 4, 2); + a->rm = extract32(insn, 0, 4); + a->shim = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); +} + +static void disas_t32_extract_s_rxi_rot(DisasContext *ctx, arg_s_rri_rot *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rd = extract32(insn, 8, 4); + a->imm = t32_expandimm_imm(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); + a->rot = t32_expandimm_rot(ctx, deposit32(deposit32(extract32(insn, 0, 8), 8, 24, extract32(insn, 12, 3)), 11, 21, extract32(insn, 26, 1))); + a->rn = 0; +} + +static void disas_t32_extract_s_rxr_shi(DisasContext *ctx, arg_s_rrr_shi *a, uint32_t insn) +{ + a->s = extract32(insn, 20, 1); + a->rd = extract32(insn, 8, 4); + a->shty = extract32(insn, 4, 2); + a->rm = extract32(insn, 0, 4); + a->shim = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); + a->rn = 0; +} + +static void disas_t32_extract_sat(DisasContext *ctx, arg_sat *a, uint32_t insn) +{ + a->sh = extract32(insn, 21, 1); + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->satimm = extract32(insn, 0, 5); + a->imm = deposit32(extract32(insn, 6, 2), 2, 30, extract32(insn, 12, 3)); +} + +static void disas_t32_extract_sat16(DisasContext *ctx, arg_sat *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rd = extract32(insn, 8, 4); + a->satimm = extract32(insn, 0, 5); + a->sh = 0; + a->imm = 0; +} + +static void disas_t32_extract_srs(DisasContext *ctx, arg_srs *a, uint32_t insn) +{ + a->w = extract32(insn, 21, 1); + a->mode = extract32(insn, 0, 5); +} + +static void disas_t32_extract_strex_0(DisasContext *ctx, arg_strex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rd = extract32(insn, 0, 4); + a->rt2 = 15; + a->imm = 0; +} + +static void disas_t32_extract_strex_d(DisasContext *ctx, arg_strex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rt2 = extract32(insn, 8, 4); + a->rd = extract32(insn, 0, 4); + a->imm = 0; +} + +static void disas_t32_extract_strex_i(DisasContext *ctx, arg_strex *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->rd = extract32(insn, 8, 4); + a->rt2 = 15; + a->imm = times_4(ctx, extract32(insn, 0, 8)); +} + +static void disas_t32_extract_tbranch(DisasContext *ctx, arg_tbranch *a, uint32_t insn) +{ + a->rn = extract32(insn, 16, 4); + a->rm = extract32(insn, 0, 4); +} + +static bool disas_t32(DisasContext *ctx, uint32_t insn) +{ + union { + arg_bfi f_bfi; + arg_bfx f_bfx; + arg_ci f_ci; + arg_cps f_cps; + arg_disas_t3227 f_disas_t3227; + arg_disas_t3228 f_disas_t3228; + arg_disas_t3230 f_disas_t3230; + arg_empty f_empty; + arg_i f_i; + arg_ldrex f_ldrex; + arg_ldst_block f_ldst_block; + arg_ldst_ri f_ldst_ri; + arg_ldst_ri2 f_ldst_ri2; + arg_ldst_rr f_ldst_rr; + arg_mrs_bank f_mrs_bank; + arg_mrs_reg f_mrs_reg; + arg_msr_bank f_msr_bank; + arg_msr_reg f_msr_reg; + arg_pkh f_pkh; + arg_r f_r; + arg_rfe f_rfe; + arg_ri f_ri; + arg_rr f_rr; + arg_rrr f_rrr; + arg_rrr_rot f_rrr_rot; + arg_rrrr f_rrrr; + arg_s_rri_rot f_s_rri_rot; + arg_s_rrr_shi f_s_rrr_shi; + arg_s_rrr_shr f_s_rrr_shr; + arg_s_rrrr f_s_rrrr; + arg_sat f_sat; + arg_srs f_srs; + arg_strex f_strex; + arg_tbranch f_tbranch; + } u; + + switch ((insn >> 27) & 0x1f) { + case 0x1d: + /* 11101... ........ ........ ........ */ + switch (insn & 0x07400000) { + case 0x00000000: + /* 11101000 .0...... ........ ........ */ + switch (insn & 0x00900000) { + case 0x00000000: + /* 11101000 00.0.... ........ ........ */ + disas_t32_extract_srs(ctx, &u.f_srs, insn); + switch ((insn >> 5) & 0x7fff) { + case 0x6e00: + /* 11101000 00.01101 11000000 000..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:621 */ + u.f_srs.pu = 2; + if (trans_SRS(ctx, &u.f_srs)) return true; + return false; + } + return false; + case 0x00100000: + /* 11101000 00.1.... ........ ........ */ + disas_t32_extract_rfe(ctx, &u.f_rfe, insn); + switch (insn & 0x0000ffff) { + case 0x0000c000: + /* 11101000 00.1.... 11000000 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:615 */ + u.f_rfe.pu = 2; + if (trans_RFE(ctx, &u.f_rfe)) return true; + return false; + } + return false; + case 0x00800000: + /* 11101000 10.0.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:607 */ + disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); + u.f_ldst_block.i = 1; + u.f_ldst_block.b = 0; + if (trans_STM_t32(ctx, &u.f_ldst_block)) return true; + return false; + case 0x00900000: + /* 11101000 10.1.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:609 */ + disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); + u.f_ldst_block.i = 1; + u.f_ldst_block.b = 0; + if (trans_LDM_t32(ctx, &u.f_ldst_block)) return true; + return false; + } + return false; + case 0x00400000: + /* 11101000 .1...... ........ ........ */ + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11101000 .100.... ........ ........ */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* 11101000 0100.... ........ ........ */ + if ((insn & 0x0000f03f) == 0x0000f000) { + /* 11101000 0100.... 1111.... ..000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:512 */ + disas_t32_extract_disas_t32_Fmt_48(ctx, &u.f_disas_t3230, insn); + if (trans_TT(ctx, &u.f_disas_t3230)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:513 */ + disas_t32_extract_strex_i(ctx, &u.f_strex, insn); + if (trans_STREX(ctx, &u.f_strex)) return true; + return false; + case 0x1: + /* 11101000 1100.... ........ ........ */ + switch ((insn >> 4) & 0xf) { + case 0x4: + /* 11101000 1100.... ........ 0100.... */ + disas_t32_extract_strex_0(ctx, &u.f_strex, insn); + switch ((insn >> 8) & 0xf) { + case 0xf: + /* 11101000 1100.... ....1111 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:515 */ + if (trans_STREXB(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0x5: + /* 11101000 1100.... ........ 0101.... */ + disas_t32_extract_strex_0(ctx, &u.f_strex, insn); + switch ((insn >> 8) & 0xf) { + case 0xf: + /* 11101000 1100.... ....1111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:516 */ + if (trans_STREXH(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0x7: + /* 11101000 1100.... ........ 0111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:517 */ + disas_t32_extract_strex_d(ctx, &u.f_strex, insn); + if (trans_STREXD_t32(ctx, &u.f_strex)) return true; + return false; + case 0x8: + /* 11101000 1100.... ........ 1000.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1100.... ....1111 10001111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:525 */ + if (trans_STLB(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x9: + /* 11101000 1100.... ........ 1001.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1100.... ....1111 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:526 */ + if (trans_STLH(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0xa: + /* 11101000 1100.... ........ 1010.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1100.... ....1111 10101111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:524 */ + if (trans_STL(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0xc: + /* 11101000 1100.... ........ 1100.... */ + disas_t32_extract_strex_0(ctx, &u.f_strex, insn); + switch ((insn >> 8) & 0xf) { + case 0xf: + /* 11101000 1100.... ....1111 1100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:520 */ + if (trans_STLEXB(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0xd: + /* 11101000 1100.... ........ 1101.... */ + disas_t32_extract_strex_0(ctx, &u.f_strex, insn); + switch ((insn >> 8) & 0xf) { + case 0xf: + /* 11101000 1100.... ....1111 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:521 */ + if (trans_STLEXH(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0xe: + /* 11101000 1100.... ........ 1110.... */ + disas_t32_extract_strex_0(ctx, &u.f_strex, insn); + switch ((insn >> 8) & 0xf) { + case 0xf: + /* 11101000 1100.... ....1111 1110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:519 */ + if (trans_STLEX(ctx, &u.f_strex)) return true; + return false; + } + return false; + case 0xf: + /* 11101000 1100.... ........ 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:522 */ + disas_t32_extract_strex_d(ctx, &u.f_strex, insn); + if (trans_STLEXD_t32(ctx, &u.f_strex)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 11101000 .101.... ........ ........ */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* 11101000 0101.... ........ ........ */ + disas_t32_extract_ldrex_i(ctx, &u.f_ldrex, insn); + switch ((insn >> 8) & 0xf) { + case 0xf: + /* 11101000 0101.... ....1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:528 */ + if (trans_LDREX(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x1: + /* 11101000 1101.... ........ ........ */ + switch ((insn >> 4) & 0xf) { + case 0x0: + /* 11101000 1101.... ........ 0000.... */ + disas_t32_extract_tbranch(ctx, &u.f_tbranch, insn); + switch ((insn >> 8) & 0xff) { + case 0xf0: + /* 11101000 1101.... 11110000 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:545 */ + if (trans_TBB(ctx, &u.f_tbranch)) return true; + return false; + } + return false; + case 0x1: + /* 11101000 1101.... ........ 0001.... */ + disas_t32_extract_tbranch(ctx, &u.f_tbranch, insn); + switch ((insn >> 8) & 0xff) { + case 0xf0: + /* 11101000 1101.... 11110000 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:546 */ + if (trans_TBH(ctx, &u.f_tbranch)) return true; + return false; + } + return false; + case 0x4: + /* 11101000 1101.... ........ 0100.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 01001111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:529 */ + if (trans_LDREXB(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x5: + /* 11101000 1101.... ........ 0101.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 01011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:530 */ + if (trans_LDREXH(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x7: + /* 11101000 1101.... ........ 0111.... */ + disas_t32_extract_ldrex_d(ctx, &u.f_ldrex, insn); + switch (insn & 0x0000000f) { + case 0x0000000f: + /* 11101000 1101.... ........ 01111111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:531 */ + if (trans_LDREXD_t32(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x8: + /* 11101000 1101.... ........ 1000.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 10001111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:539 */ + if (trans_LDAB(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0x9: + /* 11101000 1101.... ........ 1001.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 10011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:540 */ + if (trans_LDAH(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0xa: + /* 11101000 1101.... ........ 1010.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 10101111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:538 */ + if (trans_LDA(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0xc: + /* 11101000 1101.... ........ 1100.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 11001111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:534 */ + if (trans_LDAEXB(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0xd: + /* 11101000 1101.... ........ 1101.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 11011111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:535 */ + if (trans_LDAEXH(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0xe: + /* 11101000 1101.... ........ 1110.... */ + disas_t32_extract_ldrex_0(ctx, &u.f_ldrex, insn); + switch (insn & 0x00000f0f) { + case 0x00000f0f: + /* 11101000 1101.... ....1111 11101111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:533 */ + if (trans_LDAEX(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + case 0xf: + /* 11101000 1101.... ........ 1111.... */ + disas_t32_extract_ldrex_d(ctx, &u.f_ldrex, insn); + switch (insn & 0x0000000f) { + case 0x0000000f: + /* 11101000 1101.... ........ 11111111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:536 */ + if (trans_LDAEXD_t32(ctx, &u.f_ldrex)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x2: + /* 11101000 .110.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:483 */ + disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); + u.f_ldst_ri2.w = 1; + u.f_ldst_ri2.p = 0; + if (trans_STRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; + return false; + case 0x3: + /* 11101000 .111.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:484 */ + disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); + u.f_ldst_ri2.w = 1; + u.f_ldst_ri2.p = 0; + if (trans_LDRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; + return false; + } + return false; + case 0x01000000: + /* 11101001 .0...... ........ ........ */ + switch (insn & 0x00900000) { + case 0x00000000: + /* 11101001 00.0.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:608 */ + disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); + u.f_ldst_block.i = 0; + u.f_ldst_block.b = 1; + if (trans_STM_t32(ctx, &u.f_ldst_block)) return true; + return false; + case 0x00100000: + /* 11101001 00.1.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:610 */ + disas_t32_extract_ldstm(ctx, &u.f_ldst_block, insn); + u.f_ldst_block.i = 0; + u.f_ldst_block.b = 1; + if (trans_LDM_t32(ctx, &u.f_ldst_block)) return true; + return false; + case 0x00800000: + /* 11101001 10.0.... ........ ........ */ + disas_t32_extract_srs(ctx, &u.f_srs, insn); + switch ((insn >> 5) & 0x7fff) { + case 0x6e00: + /* 11101001 10.01101 11000000 000..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:622 */ + u.f_srs.pu = 1; + if (trans_SRS(ctx, &u.f_srs)) return true; + return false; + } + return false; + case 0x00900000: + /* 11101001 10.1.... ........ ........ */ + disas_t32_extract_rfe(ctx, &u.f_rfe, insn); + switch (insn & 0x0000ffff) { + case 0x0000c000: + /* 11101001 10.1.... 11000000 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:616 */ + u.f_rfe.pu = 1; + if (trans_RFE(ctx, &u.f_rfe)) return true; + return false; + } + return false; + } + return false; + case 0x01400000: + /* 11101001 .1...... ........ ........ */ + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11101001 .100.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:486 */ + disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); + u.f_ldst_ri2.w = 0; + u.f_ldst_ri2.p = 1; + if (trans_STRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; + return false; + case 0x1: + /* 11101001 .101.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:487 */ + disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); + u.f_ldst_ri2.w = 0; + u.f_ldst_ri2.p = 1; + if (trans_LDRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; + return false; + case 0x2: + /* 11101001 .110.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:489 */ + disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); + u.f_ldst_ri2.w = 1; + u.f_ldst_ri2.p = 1; + if (trans_STRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; + return false; + case 0x3: + /* 11101001 .111.... ........ ........ */ + if ((insn & 0x008fffff) == 0x000fe97f) { + /* 11101001 01111111 11101001 01111111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:491 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_SG(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:492 */ + disas_t32_extract_ldstd_ri8(ctx, &u.f_ldst_ri2, insn); + u.f_ldst_ri2.w = 1; + u.f_ldst_ri2.p = 1; + if (trans_LDRD_ri_t32(ctx, &u.f_ldst_ri2)) return true; + return false; + } + return false; + case 0x02000000: + /* 11101010 .0...... ........ ........ */ + switch (insn & 0x00a08000) { + case 0x00000000: + /* 11101010 000..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11101010 0001.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:61 */ + disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_TST_xrri(ctx, &u.f_s_rrr_shi)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:62 */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_AND_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00200000: + /* 11101010 001..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:64 */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_BIC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00800000: + /* 11101010 100..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11101010 1001.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:74 */ + disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_TEQ_xrri(ctx, &u.f_s_rrr_shi)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:75 */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_EOR_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + case 0x02400000: + /* 11101010 .1...... ........ ........ */ + switch (insn & 0x00a08000) { + case 0x00000000: + /* 11101010 010..... 0....... ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11101010 010.1111 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:66 */ + disas_t32_extract_s_rxr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_MOV_rxri(ctx, &u.f_s_rrr_shi)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:67 */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_ORR_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00200000: + /* 11101010 011..... 0....... ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11101010 011.1111 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:70 */ + disas_t32_extract_s_rxr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_MVN_rxri(ctx, &u.f_s_rrr_shi)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:71 */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_ORN_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00800000: + /* 11101010 110..... 0....... ........ */ + disas_t32_extract_disas_t32_Fmt_3(ctx, &u.f_pkh, insn); + switch (insn & 0x00100010) { + case 0x00000000: + /* 11101010 1100.... 0....... ...0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:77 */ + if (trans_PKH(ctx, &u.f_pkh)) return true; + return false; + } + return false; + } + return false; + case 0x03000000: + /* 11101011 .0...... ........ ........ */ + switch (insn & 0x00a08000) { + case 0x00000000: + /* 11101011 000..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11101011 0001.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:80 */ + disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_CMN_xrri(ctx, &u.f_s_rrr_shi)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:81 */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_ADD_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00a00000: + /* 11101011 101..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11101011 1011.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:86 */ + disas_t32_extract_S_xrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_CMP_xrri(ctx, &u.f_s_rrr_shi)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:87 */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + if (trans_SUB_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + case 0x03400000: + /* 11101011 .1...... ........ ........ */ + disas_t32_extract_s_rrr_shi(ctx, &u.f_s_rrr_shi, insn); + switch (insn & 0x00a08000) { + case 0x00000000: + /* 11101011 010..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:83 */ + if (trans_ADC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00200000: + /* 11101011 011..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:84 */ + if (trans_SBC_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + case 0x00800000: + /* 11101011 110..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:89 */ + if (trans_RSB_rrri(ctx, &u.f_s_rrr_shi)) return true; + return false; + } + return false; + } + return false; + case 0x1e: + /* 11110... ........ ........ ........ */ + switch ((insn >> 15) & 0x1) { + case 0x0: + /* 11110... ........ 0....... ........ */ + switch ((insn >> 22) & 0xf) { + case 0x0: + /* 11110.00 00...... 0....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11110.00 000..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11110.00 0001.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:109 */ + disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_TST_xri(ctx, &u.f_s_rri_rot)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:110 */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_AND_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x1: + /* 11110.00 001..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:112 */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_BIC_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x1: + /* 11110.00 01...... 0....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11110.00 010..... 0....... ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11110.00 010.1111 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:114 */ + disas_t32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_MOV_rxi(ctx, &u.f_s_rri_rot)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:115 */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_ORR_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x1: + /* 11110.00 011..... 0....... ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11110.00 011.1111 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:118 */ + disas_t32_extract_s_rxi_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_MVN_rxi(ctx, &u.f_s_rri_rot)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:119 */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_ORN_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x2: + /* 11110.00 10...... 0....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11110.00 100..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11110.00 1001.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:122 */ + disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_TEQ_xri(ctx, &u.f_s_rri_rot)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:123 */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_EOR_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x4: + /* 11110.01 00...... 0....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11110.01 000..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11110.01 0001.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:126 */ + disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_CMN_xri(ctx, &u.f_s_rri_rot)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:127 */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x5: + /* 11110.01 01...... 0....... ........ */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11110.01 010..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:129 */ + if (trans_ADC_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + case 0x1: + /* 11110.01 011..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:130 */ + if (trans_SBC_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x6: + /* 11110.01 10...... 0....... ........ */ + switch ((insn >> 21) & 0x1) { + case 0x1: + /* 11110.01 101..... 0....... ........ */ + if ((insn & 0x00100f00) == 0x00100f00) { + /* 11110.01 1011.... 0...1111 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:132 */ + disas_t32_extract_S_xri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_CMP_xri(ctx, &u.f_s_rri_rot)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:133 */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x7: + /* 11110.01 11...... 0....... ........ */ + disas_t32_extract_s_rri_rot(ctx, &u.f_s_rri_rot, insn); + switch ((insn >> 21) & 0x1) { + case 0x0: + /* 11110.01 110..... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:135 */ + if (trans_RSB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x8: + /* 11110.10 00...... 0....... ........ */ + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11110.10 0000.... 0....... ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11110.10 00001111 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:145 */ + disas_t32_extract_disas_t32_Fmt_9(ctx, &u.f_ri, insn); + if (trans_ADR(ctx, &u.f_ri)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:147 */ + disas_t32_extract_s0_rri_12(ctx, &u.f_s_rri_rot, insn); + if (trans_ADD_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0x9: + /* 11110.10 01...... 0....... ........ */ + disas_t32_extract_mov16(ctx, &u.f_ri, insn); + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11110.10 0100.... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:161 */ + if (trans_MOVW(ctx, &u.f_ri)) return true; + return false; + } + return false; + case 0xa: + /* 11110.10 10...... 0....... ........ */ + switch ((insn >> 20) & 0x3) { + case 0x2: + /* 11110.10 1010.... 0....... ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11110.10 10101111 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:150 */ + disas_t32_extract_disas_t32_Fmt_10(ctx, &u.f_ri, insn); + if (trans_ADR(ctx, &u.f_ri)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:152 */ + disas_t32_extract_s0_rri_12(ctx, &u.f_s_rri_rot, insn); + if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; + return false; + } + return false; + case 0xb: + /* 11110.10 11...... 0....... ........ */ + disas_t32_extract_mov16(ctx, &u.f_ri, insn); + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11110.10 1100.... 0....... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:162 */ + if (trans_MOVT(ctx, &u.f_ri)) return true; + return false; + } + return false; + case 0xc: + /* 11110.11 00...... 0....... ........ */ + switch (insn & 0x04100020) { + case 0x00000000: + /* 11110011 00.0.... 0....... ..0..... */ + if ((insn & 0x002070c0) == 0x00200000) { + /* 11110011 0010.... 0000.... 000..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:172 */ + disas_t32_extract_sat16(ctx, &u.f_sat, insn); + if (trans_SSAT16(ctx, &u.f_sat)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:173 */ + disas_t32_extract_sat(ctx, &u.f_sat, insn); + if (trans_SSAT(ctx, &u.f_sat)) return true; + return false; + } + return false; + case 0xd: + /* 11110.11 01...... 0....... ........ */ + switch (insn & 0x04300020) { + case 0x00000000: + /* 11110011 0100.... 0....... ..0..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:185 */ + disas_t32_extract_bfx(ctx, &u.f_bfx, insn); + if (trans_SBFX(ctx, &u.f_bfx)) return true; + return false; + case 0x00200000: + /* 11110011 0110.... 0....... ..0..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:189 */ + disas_t32_extract_bfi(ctx, &u.f_bfi, insn); + if (trans_BFCI(ctx, &u.f_bfi)) return true; + return false; + } + return false; + case 0xe: + /* 11110.11 10...... 0....... ........ */ + switch (insn & 0x04100020) { + case 0x00000000: + /* 11110011 10.0.... 0....... ..0..... */ + if ((insn & 0x002070c0) == 0x00200000) { + /* 11110011 1010.... 0000.... 000..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:176 */ + disas_t32_extract_sat16(ctx, &u.f_sat, insn); + if (trans_USAT16(ctx, &u.f_sat)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:177 */ + disas_t32_extract_sat(ctx, &u.f_sat, insn); + if (trans_USAT(ctx, &u.f_sat)) return true; + return false; + } + return false; + case 0xf: + /* 11110.11 11...... 0....... ........ */ + disas_t32_extract_bfx(ctx, &u.f_bfx, insn); + switch (insn & 0x04300020) { + case 0x00000000: + /* 11110011 1100.... 0....... ..0..... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:186 */ + if (trans_UBFX(ctx, &u.f_bfx)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* 11110... ........ 1....... ........ */ + switch (insn & 0x00005000) { + case 0x00000000: + /* 11110... ........ 10.0.... ........ */ + if ((insn & 0x03800000) == 0x03800000) { + /* 11110.11 1....... 10.0.... ........ */ + if ((insn & 0x047f2f00) == 0x002f0000) { + /* 11110011 10101111 10000000 ........ */ + if ((insn & 0x000000ff) == 0x00000001) { + /* 11110011 10101111 10000000 00000001 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:297 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_YIELD(ctx, &u.f_empty)) return true; + } + if ((insn & 0x000000ff) == 0x00000002) { + /* 11110011 10101111 10000000 00000010 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:298 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_WFE(ctx, &u.f_empty)) return true; + } + if ((insn & 0x000000ff) == 0x00000003) { + /* 11110011 10101111 10000000 00000011 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:299 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_WFI(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:307 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + if ((insn & 0x047f2800) == 0x002f0000) { + /* 11110011 10101111 10000... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:311 */ + disas_t32_extract_disas_t32_Fmt_23(ctx, &u.f_cps, insn); + if (trans_CPS(ctx, &u.f_cps)) return true; + } + if ((insn & 0x047f2f80) == 0x003f0f00) { + /* 11110011 10111111 10001111 0....... */ + if ((insn & 0x0000007f) == 0x0000002f) { + /* 11110011 10111111 10001111 00101111 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:316 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_CLREX(ctx, &u.f_empty)) return true; + } + if ((insn & 0x00000070) == 0x00000040) { + /* 11110011 10111111 10001111 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:317 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_DSB(ctx, &u.f_empty)) return true; + } + if ((insn & 0x00000070) == 0x00000050) { + /* 11110011 10111111 10001111 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:318 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_DMB(ctx, &u.f_empty)) return true; + } + if ((insn & 0x00000070) == 0x00000060) { + /* 11110011 10111111 10001111 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:319 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_ISB(ctx, &u.f_empty)) return true; + } + if ((insn & 0x0000007f) == 0x00000070) { + /* 11110011 10111111 10001111 01110000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:320 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_SB(ctx, &u.f_empty)) return true; + } + } + if ((insn & 0x04602000) == 0x00600000) { + /* 11110011 111..... 1000.... ........ */ + if ((insn & 0x000000ef) == 0x00000020) { + /* 11110011 111..... 1000.... 001.0000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:325 */ + disas_t32_extract_disas_t32_Fmt_24(ctx, &u.f_mrs_bank, insn); + if (trans_MRS_bank(ctx, &u.f_mrs_bank)) return true; + } + if ((insn & 0x000f00ff) == 0x000f0000) { + /* 11110011 111.1111 1000.... 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:327 */ + disas_t32_extract_disas_t32_Fmt_25(ctx, &u.f_mrs_reg, insn); + if (trans_MRS_reg(ctx, &u.f_mrs_reg)) return true; + } + if ((insn & 0x001f0000) == 0x000f0000) { + /* 11110011 11101111 1000.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:328 */ + disas_t32_extract_disas_t32_Fmt_26(ctx, &u.f_disas_t3227, insn); + if (trans_MRS_v7m(ctx, &u.f_disas_t3227)) return true; + } + } + if ((insn & 0x04602000) == 0x00000000) { + /* 11110011 100..... 1000.... ........ */ + if ((insn & 0x000000ef) == 0x00000020) { + /* 11110011 100..... 1000.... 001.0000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:331 */ + disas_t32_extract_disas_t32_Fmt_27(ctx, &u.f_msr_bank, insn); + if (trans_MSR_bank(ctx, &u.f_msr_bank)) return true; + } + if ((insn & 0x000000ff) == 0x00000000) { + /* 11110011 100..... 1000.... 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:333 */ + disas_t32_extract_disas_t32_Fmt_28(ctx, &u.f_msr_reg, insn); + if (trans_MSR_reg(ctx, &u.f_msr_reg)) return true; + } + if ((insn & 0x00100300) == 0x00000000) { + /* 11110011 1000.... 1000..00 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:334 */ + disas_t32_extract_disas_t32_Fmt_29(ctx, &u.f_disas_t3228, insn); + if (trans_MSR_v7m(ctx, &u.f_disas_t3228)) return true; + } + } + if ((insn & 0x04702fff) == 0x00400f00) { + /* 11110011 1100.... 10001111 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:336 */ + disas_t32_extract_disas_t32_Fmt_30(ctx, &u.f_r, insn); + if (trans_BXJ(ctx, &u.f_r)) return true; + } + if ((insn & 0x047f2f00) == 0x005e0f00) { + /* 11110011 11011110 10001111 ........ */ + if ((insn & 0x000000ff) == 0x00000000) { + /* 11110011 11011110 10001111 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:341 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_ERET(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:342 */ + disas_t32_extract_disas_t32_Fmt_31(ctx, &u.f_s_rri_rot, insn); + if (trans_SUB_rri(ctx, &u.f_s_rri_rot)) return true; + } + if ((insn & 0x04702fff) == 0x04700000) { + /* 11110111 1111.... 10000000 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:345 */ + disas_t32_extract_disas_t32_Fmt_32(ctx, &u.f_i, insn); + if (trans_SMC(ctx, &u.f_i)) return true; + } + if ((insn & 0x04702000) == 0x04600000) { + /* 11110111 1110.... 1000.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:346 */ + disas_t32_extract_disas_t32_Fmt_33(ctx, &u.f_i, insn); + if (trans_HVC(ctx, &u.f_i)) return true; + } + if ((insn & 0x04702000) == 0x04702000) { + /* 11110111 1111.... 1010.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:348 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_UDF(ctx, &u.f_empty)) return true; + } + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:350 */ + disas_t32_extract_disas_t32_Fmt_34(ctx, &u.f_ci, insn); + if (trans_B_cond_thumb(ctx, &u.f_ci)) return true; + return false; + case 0x00001000: + /* 11110... ........ 10.1.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:629 */ + disas_t32_extract_branch24(ctx, &u.f_i, insn); + if (trans_B(ctx, &u.f_i)) return true; + return false; + case 0x00004000: + /* 11110... ........ 11.0.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:631 */ + disas_t32_extract_branch24(ctx, &u.f_i, insn); + if (trans_BLX_i(ctx, &u.f_i)) return true; + return false; + case 0x00005000: + /* 11110... ........ 11.1.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:630 */ + disas_t32_extract_branch24(ctx, &u.f_i, insn); + if (trans_BL(ctx, &u.f_i)) return true; + return false; + } + return false; + } + return false; + case 0x1f: + /* 11111... ........ ........ ........ */ + switch ((insn >> 24) & 0x7) { + case 0x0: + /* 11111000 ........ ........ ........ */ + switch ((insn >> 20) & 0x7) { + case 0x0: + /* 11111000 .000.... ........ ........ */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* 11111000 0000.... ........ ........ */ + switch (insn & 0x00000900) { + case 0x00000000: + /* 11111000 0000.... ....0..0 ........ */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x000006c0) { + case 0x00000000: + /* 11111000 0000.... ....0000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:368 */ + if (trans_STRB_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x00000800: + /* 11111000 0000.... ....1..0 ........ */ + switch ((insn >> 9) & 0x3) { + case 0x2: + /* 11111000 0000.... ....1100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:370 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x3: + /* 11111000 0000.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:371 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_STRBT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x00000900: + /* 11111000 0000.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:369 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x1: + /* 11111000 1000.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:372 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_STRB_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x1: + /* 11111000 .001.... ........ ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11111000 .0011111 ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 .0011111 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:389 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:390 */ + disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); + if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800000) == 0x00800000) { + /* 11111000 1001.... ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 1001.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:393 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:394 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800900) == 0x00000900) { + /* 11111000 0001.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:396 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000c00) { + /* 11111000 0001.... ....1100 ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 0001.... 11111100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:398 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:399 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_LDRB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000e00) { + /* 11111000 0001.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:401 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_LDRBT_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800fc0) == 0x00000000) { + /* 11111000 0001.... ....0000 00...... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 0001.... 11110000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:403 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:404 */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + if (trans_LDRB_rr(ctx, &u.f_ldst_rr)) return true; + } + return false; + case 0x2: + /* 11111000 .010.... ........ ........ */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* 11111000 0010.... ........ ........ */ + switch (insn & 0x00000900) { + case 0x00000000: + /* 11111000 0010.... ....0..0 ........ */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x000006c0) { + case 0x00000000: + /* 11111000 0010.... ....0000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:374 */ + if (trans_STRH_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x00000800: + /* 11111000 0010.... ....1..0 ........ */ + switch ((insn >> 9) & 0x3) { + case 0x2: + /* 11111000 0010.... ....1100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:376 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x3: + /* 11111000 0010.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:377 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_STRHT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x00000900: + /* 11111000 0010.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:375 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x1: + /* 11111000 1010.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:378 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_STRH_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x3: + /* 11111000 .011.... ........ ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11111000 .0111111 ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 .0111111 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:409 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:410 */ + disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); + if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800000) == 0x00800000) { + /* 11111000 1011.... ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 1011.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:413 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:414 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800900) == 0x00000900) { + /* 11111000 0011.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:416 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000c00) { + /* 11111000 0011.... ....1100 ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 0011.... 11111100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:418 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:419 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_LDRH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000e00) { + /* 11111000 0011.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:421 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_LDRHT_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800fc0) == 0x00000000) { + /* 11111000 0011.... ....0000 00...... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111000 0011.... 11110000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:423 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:424 */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + if (trans_LDRH_rr(ctx, &u.f_ldst_rr)) return true; + } + return false; + case 0x4: + /* 11111000 .100.... ........ ........ */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* 11111000 0100.... ........ ........ */ + switch (insn & 0x00000900) { + case 0x00000000: + /* 11111000 0100.... ....0..0 ........ */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + switch (insn & 0x000006c0) { + case 0x00000000: + /* 11111000 0100.... ....0000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:380 */ + if (trans_STR_rr(ctx, &u.f_ldst_rr)) return true; + return false; + } + return false; + case 0x00000800: + /* 11111000 0100.... ....1..0 ........ */ + switch ((insn >> 9) & 0x3) { + case 0x2: + /* 11111000 0100.... ....1100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:382 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + case 0x3: + /* 11111000 0100.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:383 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_STRT_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x00000900: + /* 11111000 0100.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:381 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x1: + /* 11111000 1100.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:384 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_STR_ri(ctx, &u.f_ldst_ri)) return true; + return false; + } + return false; + case 0x5: + /* 11111000 .101.... ........ ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11111000 .1011111 ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:428 */ + disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800000) == 0x00800000) { + /* 11111000 1101.... ........ ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:429 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800900) == 0x00000900) { + /* 11111000 0101.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:430 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000c00) { + /* 11111000 0101.... ....1100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:431 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_LDR_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000e00) { + /* 11111000 0101.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:432 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_LDRT_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800fc0) == 0x00000000) { + /* 11111000 0101.... ....0000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:433 */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + if (trans_LDR_rr(ctx, &u.f_ldst_rr)) return true; + } + return false; + } + return false; + case 0x1: + /* 11111001 ........ ........ ........ */ + switch ((insn >> 20) & 0x7) { + case 0x1: + /* 11111001 .001.... ........ ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11111001 .0011111 ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 .0011111 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:438 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:439 */ + disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800000) == 0x00800000) { + /* 11111001 1001.... ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 1001.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:442 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:443 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800900) == 0x00000900) { + /* 11111001 0001.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:445 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000c00) { + /* 11111001 0001.... ....1100 ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 0001.... 11111100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:447 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:448 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSB_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000e00) { + /* 11111001 0001.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:450 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSBT_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800fc0) == 0x00000000) { + /* 11111001 0001.... ....0000 00...... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 0001.... 11110000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:452 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:453 */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + if (trans_LDRSB_rr(ctx, &u.f_ldst_rr)) return true; + } + return false; + case 0x3: + /* 11111001 .011.... ........ ........ */ + if ((insn & 0x000f0000) == 0x000f0000) { + /* 11111001 .0111111 ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 .0111111 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:459 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:460 */ + disas_t32_extract_ldst_ri_lit(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800000) == 0x00800000) { + /* 11111001 1011.... ........ ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 1011.... 1111.... ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:463 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:464 */ + disas_t32_extract_ldst_ri_pos(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800900) == 0x00000900) { + /* 11111001 0011.... ....1..1 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:466 */ + disas_t32_extract_ldst_ri_idx(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000c00) { + /* 11111001 0011.... ....1100 ........ */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 0011.... 11111100 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:468 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:469 */ + disas_t32_extract_ldst_ri_neg(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSH_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800f00) == 0x00000e00) { + /* 11111001 0011.... ....1110 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:471 */ + disas_t32_extract_ldst_ri_unp(ctx, &u.f_ldst_ri, insn); + if (trans_LDRSHT_ri(ctx, &u.f_ldst_ri)) return true; + } + if ((insn & 0x00800fc0) == 0x00000000) { + /* 11111001 0011.... ....0000 00...... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111001 0011.... 11110000 00...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:473 */ + disas_t32_extract_disas_t32_Fmt_22(ctx, &u.f_empty, insn); + if (trans_NOP(ctx, &u.f_empty)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:474 */ + disas_t32_extract_ldst_rr(ctx, &u.f_ldst_rr, insn); + if (trans_LDRSH_rr(ctx, &u.f_ldst_rr)) return true; + } + return false; + } + return false; + case 0x2: + /* 11111010 ........ ........ ........ */ + switch (insn & 0x0080f0c0) { + case 0x0000f000: + /* 11111010 0....... 1111.... 00...... */ + disas_t32_extract_disas_t32_Fmt_4(ctx, &u.f_s_rrr_shr, insn); + switch ((insn >> 4) & 0x3) { + case 0x0: + /* 11111010 0....... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:93 */ + if (trans_MOV_rxrr(ctx, &u.f_s_rrr_shr)) return true; + return false; + } + return false; + case 0x0000f080: + /* 11111010 0....... 1111.... 10...... */ + disas_t32_extract_rrr_rot(ctx, &u.f_rrr_rot, insn); + switch ((insn >> 20) & 0x7) { + case 0x0: + /* 11111010 0000.... 1111.... 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:596 */ + if (trans_SXTAH(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x1: + /* 11111010 0001.... 1111.... 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:597 */ + if (trans_UXTAH(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x2: + /* 11111010 0010.... 1111.... 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:598 */ + if (trans_SXTAB16(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x3: + /* 11111010 0011.... 1111.... 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:599 */ + if (trans_UXTAB16(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x4: + /* 11111010 0100.... 1111.... 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:600 */ + if (trans_SXTAB(ctx, &u.f_rrr_rot)) return true; + return false; + case 0x5: + /* 11111010 0101.... 1111.... 10...... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:601 */ + if (trans_UXTAB(ctx, &u.f_rrr_rot)) return true; + return false; + } + return false; + case 0x0080f000: + /* 11111010 1....... 1111.... 00...... */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00700030) { + case 0x00000000: + /* 11111010 1000.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:550 */ + if (trans_SADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00000010: + /* 11111010 1000.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:551 */ + if (trans_QADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00000020: + /* 11111010 1000.... 1111.... 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:552 */ + if (trans_SHADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00100000: + /* 11111010 1001.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:557 */ + if (trans_SADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100010: + /* 11111010 1001.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:558 */ + if (trans_QADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100020: + /* 11111010 1001.... 1111.... 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:559 */ + if (trans_SHADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00200000: + /* 11111010 1010.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:564 */ + if (trans_SASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00200010: + /* 11111010 1010.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:565 */ + if (trans_QASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00200020: + /* 11111010 1010.... 1111.... 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:566 */ + if (trans_SHASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00400000: + /* 11111010 1100.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:571 */ + if (trans_SSUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00400010: + /* 11111010 1100.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:572 */ + if (trans_QSUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00400020: + /* 11111010 1100.... 1111.... 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:573 */ + if (trans_SHSUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00500000: + /* 11111010 1101.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:578 */ + if (trans_SSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00500010: + /* 11111010 1101.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:579 */ + if (trans_QSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00500020: + /* 11111010 1101.... 1111.... 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:580 */ + if (trans_SHSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00600000: + /* 11111010 1110.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:585 */ + if (trans_SSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00600010: + /* 11111010 1110.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:586 */ + if (trans_QSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00600020: + /* 11111010 1110.... 1111.... 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:587 */ + if (trans_SHSAX(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x0080f040: + /* 11111010 1....... 1111.... 01...... */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + switch (insn & 0x00700030) { + case 0x00000000: + /* 11111010 1000.... 1111.... 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:553 */ + if (trans_UADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00000010: + /* 11111010 1000.... 1111.... 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:554 */ + if (trans_UQADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00000020: + /* 11111010 1000.... 1111.... 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:555 */ + if (trans_UHADD8(ctx, &u.f_rrr)) return true; + return false; + case 0x00100000: + /* 11111010 1001.... 1111.... 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:560 */ + if (trans_UADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100010: + /* 11111010 1001.... 1111.... 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:561 */ + if (trans_UQADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00100020: + /* 11111010 1001.... 1111.... 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:562 */ + if (trans_UHADD16(ctx, &u.f_rrr)) return true; + return false; + case 0x00200000: + /* 11111010 1010.... 1111.... 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:567 */ + if (trans_UASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00200010: + /* 11111010 1010.... 1111.... 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:568 */ + if (trans_UQASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00200020: + /* 11111010 1010.... 1111.... 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:569 */ + if (trans_UHASX(ctx, &u.f_rrr)) return true; + return false; + case 0x00400000: + /* 11111010 1100.... 1111.... 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:574 */ + if (trans_USUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00400010: + /* 11111010 1100.... 1111.... 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:575 */ + if (trans_UQSUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00400020: + /* 11111010 1100.... 1111.... 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:576 */ + if (trans_UHSUB8(ctx, &u.f_rrr)) return true; + return false; + case 0x00500000: + /* 11111010 1101.... 1111.... 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:581 */ + if (trans_USUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00500010: + /* 11111010 1101.... 1111.... 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:582 */ + if (trans_UQSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00500020: + /* 11111010 1101.... 1111.... 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:583 */ + if (trans_UHSUB16(ctx, &u.f_rrr)) return true; + return false; + case 0x00600000: + /* 11111010 1110.... 1111.... 0100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:588 */ + if (trans_USAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00600010: + /* 11111010 1110.... 1111.... 0101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:589 */ + if (trans_UQSAX(ctx, &u.f_rrr)) return true; + return false; + case 0x00600020: + /* 11111010 1110.... 1111.... 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:590 */ + if (trans_UHSAX(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x0080f080: + /* 11111010 1....... 1111.... 10...... */ + switch (insn & 0x00700030) { + case 0x00000000: + /* 11111010 1000.... 1111.... 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:262 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_QADD(ctx, &u.f_rrr)) return true; + return false; + case 0x00000010: + /* 11111010 1000.... 1111.... 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:264 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_QDADD(ctx, &u.f_rrr)) return true; + return false; + case 0x00000020: + /* 11111010 1000.... 1111.... 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:263 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_QSUB(ctx, &u.f_rrr)) return true; + return false; + case 0x00000030: + /* 11111010 1000.... 1111.... 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:265 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_QDSUB(ctx, &u.f_rrr)) return true; + return false; + case 0x00100000: + /* 11111010 1001.... 1111.... 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:277 */ + disas_t32_extract_rdm(ctx, &u.f_rr, insn); + if (trans_REV(ctx, &u.f_rr)) return true; + return false; + case 0x00100010: + /* 11111010 1001.... 1111.... 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:278 */ + disas_t32_extract_rdm(ctx, &u.f_rr, insn); + if (trans_REV16(ctx, &u.f_rr)) return true; + return false; + case 0x00100020: + /* 11111010 1001.... 1111.... 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:279 */ + disas_t32_extract_rdm(ctx, &u.f_rr, insn); + if (trans_RBIT(ctx, &u.f_rr)) return true; + return false; + case 0x00100030: + /* 11111010 1001.... 1111.... 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:280 */ + disas_t32_extract_rdm(ctx, &u.f_rr, insn); + if (trans_REVSH(ctx, &u.f_rr)) return true; + return false; + case 0x00200000: + /* 11111010 1010.... 1111.... 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:274 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_SEL(ctx, &u.f_rrr)) return true; + return false; + case 0x00300000: + /* 11111010 1011.... 1111.... 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:281 */ + disas_t32_extract_rdm(ctx, &u.f_rr, insn); + if (trans_CLZ(ctx, &u.f_rr)) return true; + return false; + case 0x00400000: + /* 11111010 1100.... 1111.... 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:267 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_CRC32B(ctx, &u.f_rrr)) return true; + return false; + case 0x00400010: + /* 11111010 1100.... 1111.... 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:268 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_CRC32H(ctx, &u.f_rrr)) return true; + return false; + case 0x00400020: + /* 11111010 1100.... 1111.... 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:269 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_CRC32W(ctx, &u.f_rrr)) return true; + return false; + case 0x00500000: + /* 11111010 1101.... 1111.... 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:270 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_CRC32CB(ctx, &u.f_rrr)) return true; + return false; + case 0x00500010: + /* 11111010 1101.... 1111.... 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:271 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_CRC32CH(ctx, &u.f_rrr)) return true; + return false; + case 0x00500020: + /* 11111010 1101.... 1111.... 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:272 */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + if (trans_CRC32CW(ctx, &u.f_rrr)) return true; + return false; + } + return false; + } + return false; + case 0x3: + /* 11111011 ........ ........ ........ */ + switch (insn & 0x00f000f0) { + case 0x00000000: + /* 11111011 0000.... ........ 0000.... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111011 0000.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:201 */ + disas_t32_extract_s0_rn0dm(ctx, &u.f_s_rrrr, insn); + if (trans_MUL(ctx, &u.f_s_rrrr)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:202 */ + disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); + if (trans_MLA(ctx, &u.f_s_rrrr)) return true; + return false; + case 0x00000010: + /* 11111011 0000.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:204 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_MLS(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100000: + /* 11111011 0001.... ........ 0000.... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111011 0001.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:219 */ + disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); + if (trans_SMULBB(ctx, &u.f_rrrr)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:220 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLABB(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100010: + /* 11111011 0001.... ........ 0001.... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111011 0001.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:223 */ + disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); + if (trans_SMULBT(ctx, &u.f_rrrr)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:224 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLABT(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100020: + /* 11111011 0001.... ........ 0010.... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111011 0001.... 1111.... 0010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:227 */ + disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); + if (trans_SMULTB(ctx, &u.f_rrrr)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:228 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLATB(ctx, &u.f_rrrr)) return true; + return false; + case 0x00100030: + /* 11111011 0001.... ........ 0011.... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111011 0001.... 1111.... 0011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:231 */ + disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); + if (trans_SMULTT(ctx, &u.f_rrrr)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:232 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLATT(ctx, &u.f_rrrr)) return true; + return false; + case 0x00200000: + /* 11111011 0010.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:242 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLAD(ctx, &u.f_rrrr)) return true; + return false; + case 0x00200010: + /* 11111011 0010.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:243 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLADX(ctx, &u.f_rrrr)) return true; + return false; + case 0x00300000: + /* 11111011 0011.... ........ 0000.... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111011 0011.... 1111.... 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:211 */ + disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); + if (trans_SMULWB(ctx, &u.f_rrrr)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:212 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLAWB(ctx, &u.f_rrrr)) return true; + return false; + case 0x00300010: + /* 11111011 0011.... ........ 0001.... */ + if ((insn & 0x0000f000) == 0x0000f000) { + /* 11111011 0011.... 1111.... 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:215 */ + disas_t32_extract_rn0dm(ctx, &u.f_rrrr, insn); + if (trans_SMULWT(ctx, &u.f_rrrr)) return true; + } + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:216 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLAWT(ctx, &u.f_rrrr)) return true; + return false; + case 0x00400000: + /* 11111011 0100.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:244 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLSD(ctx, &u.f_rrrr)) return true; + return false; + case 0x00400010: + /* 11111011 0100.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:245 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLSDX(ctx, &u.f_rrrr)) return true; + return false; + case 0x00500000: + /* 11111011 0101.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:252 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMMLA(ctx, &u.f_rrrr)) return true; + return false; + case 0x00500010: + /* 11111011 0101.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:253 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMMLAR(ctx, &u.f_rrrr)) return true; + return false; + case 0x00600000: + /* 11111011 0110.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:254 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMMLS(ctx, &u.f_rrrr)) return true; + return false; + case 0x00600010: + /* 11111011 0110.... ........ 0001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:255 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMMLSR(ctx, &u.f_rrrr)) return true; + return false; + case 0x00700000: + /* 11111011 0111.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:240 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_USADA8(ctx, &u.f_rrrr)) return true; + return false; + case 0x00800000: + /* 11111011 1000.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:205 */ + disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); + if (trans_SMULL(ctx, &u.f_s_rrrr)) return true; + return false; + case 0x009000f0: + /* 11111011 1001.... ........ 1111.... */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11111011 1001.... 1111.... 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:257 */ + if (trans_SDIV(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00a00000: + /* 11111011 1010.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:206 */ + disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); + if (trans_UMULL(ctx, &u.f_s_rrrr)) return true; + return false; + case 0x00b000f0: + /* 11111011 1011.... ........ 1111.... */ + disas_t32_extract_rndm(ctx, &u.f_rrr, insn); + switch ((insn >> 12) & 0xf) { + case 0xf: + /* 11111011 1011.... 1111.... 1111.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:258 */ + if (trans_UDIV(ctx, &u.f_rrr)) return true; + return false; + } + return false; + case 0x00c00000: + /* 11111011 1100.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:207 */ + disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); + if (trans_SMLAL(ctx, &u.f_s_rrrr)) return true; + return false; + case 0x00c00080: + /* 11111011 1100.... ........ 1000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:234 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLALBB(ctx, &u.f_rrrr)) return true; + return false; + case 0x00c00090: + /* 11111011 1100.... ........ 1001.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:235 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLALBT(ctx, &u.f_rrrr)) return true; + return false; + case 0x00c000a0: + /* 11111011 1100.... ........ 1010.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:236 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLALTB(ctx, &u.f_rrrr)) return true; + return false; + case 0x00c000b0: + /* 11111011 1100.... ........ 1011.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:237 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLALTT(ctx, &u.f_rrrr)) return true; + return false; + case 0x00c000c0: + /* 11111011 1100.... ........ 1100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:247 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLALD(ctx, &u.f_rrrr)) return true; + return false; + case 0x00c000d0: + /* 11111011 1100.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:248 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLALDX(ctx, &u.f_rrrr)) return true; + return false; + case 0x00d000c0: + /* 11111011 1101.... ........ 1100.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:249 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLSLD(ctx, &u.f_rrrr)) return true; + return false; + case 0x00d000d0: + /* 11111011 1101.... ........ 1101.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:250 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_SMLSLDX(ctx, &u.f_rrrr)) return true; + return false; + case 0x00e00000: + /* 11111011 1110.... ........ 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:208 */ + disas_t32_extract_s0_rnadm(ctx, &u.f_s_rrrr, insn); + if (trans_UMLAL(ctx, &u.f_s_rrrr)) return true; + return false; + case 0x00e00060: + /* 11111011 1110.... ........ 0110.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/t32.decode:209 */ + disas_t32_extract_rnadm(ctx, &u.f_rrrr, insn); + if (trans_UMAAL(ctx, &u.f_rrrr)) return true; + return false; + } + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/arm/decode-vfp-uncond.inc.c b/qemu/target/arm/decode-vfp-uncond.inc.c new file mode 100644 index 00000000..95f9da2a --- /dev/null +++ b/qemu/target/arm/decode-vfp-uncond.inc.c @@ -0,0 +1,225 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int vd; + int vm; + int vn; +} arg_disas_vfp_uncond0; + +typedef struct { + int cc; + int dp; + int vd; + int vm; + int vn; +} arg_disas_vfp_uncond1; + +typedef struct { + int dp; + int rm; + int vd; + int vm; +} arg_disas_vfp_uncond2; + +typedef struct { + int dp; + int op; + int rm; + int vd; + int vm; +} arg_disas_vfp_uncond3; + +typedef arg_disas_vfp_uncond1 arg_VSEL; +static bool trans_VSEL(DisasContext *ctx, arg_VSEL *a); +typedef arg_disas_vfp_uncond0 arg_VMAXNM_sp; +static bool trans_VMAXNM_sp(DisasContext *ctx, arg_VMAXNM_sp *a); +typedef arg_disas_vfp_uncond0 arg_VMINNM_sp; +static bool trans_VMINNM_sp(DisasContext *ctx, arg_VMINNM_sp *a); +typedef arg_disas_vfp_uncond0 arg_VMAXNM_dp; +static bool trans_VMAXNM_dp(DisasContext *ctx, arg_VMAXNM_dp *a); +typedef arg_disas_vfp_uncond0 arg_VMINNM_dp; +static bool trans_VMINNM_dp(DisasContext *ctx, arg_VMINNM_dp *a); +typedef arg_disas_vfp_uncond2 arg_VRINT; +static bool trans_VRINT(DisasContext *ctx, arg_VRINT *a); +typedef arg_disas_vfp_uncond3 arg_VCVT; +static bool trans_VCVT(DisasContext *ctx, arg_VCVT *a); + +static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_2(DisasContext *ctx, arg_disas_vfp_uncond1 *a, uint32_t insn) +{ + a->cc = extract32(insn, 20, 2); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->dp = 0; +} + +static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_3(DisasContext *ctx, arg_disas_vfp_uncond1 *a, uint32_t insn) +{ + a->cc = extract32(insn, 20, 2); + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->dp = 1; +} + +static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_4(DisasContext *ctx, arg_disas_vfp_uncond2 *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 2); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->dp = 0; +} + +static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_5(DisasContext *ctx, arg_disas_vfp_uncond2 *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 2); + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->dp = 1; +} + +static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_6(DisasContext *ctx, arg_disas_vfp_uncond3 *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 2); + a->op = extract32(insn, 7, 1); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->dp = 0; +} + +static void disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_7(DisasContext *ctx, arg_disas_vfp_uncond3 *a, uint32_t insn) +{ + a->rm = extract32(insn, 16, 2); + a->op = extract32(insn, 7, 1); + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->dp = 1; +} + +static void disas_vfp_uncond_extract_vfp_dnm_d(DisasContext *ctx, arg_disas_vfp_uncond0 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); +} + +static void disas_vfp_uncond_extract_vfp_dnm_s(DisasContext *ctx, arg_disas_vfp_uncond0 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); +} + +static bool disas_vfp_uncond(DisasContext *ctx, uint32_t insn) +{ + union { + arg_disas_vfp_uncond0 f_disas_vfp_uncond0; + arg_disas_vfp_uncond1 f_disas_vfp_uncond1; + arg_disas_vfp_uncond2 f_disas_vfp_uncond2; + arg_disas_vfp_uncond3 f_disas_vfp_uncond3; + } u; + + switch (insn & 0xff800f50) { + case 0xfe000a00: + /* 11111110 0....... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:47 */ + disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_2(ctx, &u.f_disas_vfp_uncond1, insn); + if (trans_VSEL(ctx, &u.f_disas_vfp_uncond1)) return true; + return false; + case 0xfe000b00: + /* 11111110 0....... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:49 */ + disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_3(ctx, &u.f_disas_vfp_uncond1, insn); + if (trans_VSEL(ctx, &u.f_disas_vfp_uncond1)) return true; + return false; + case 0xfe800a00: + /* 11111110 1....... ....1010 .0.0.... */ + disas_vfp_uncond_extract_vfp_dnm_s(ctx, &u.f_disas_vfp_uncond0, insn); + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11111110 1.00.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:52 */ + if (trans_VMAXNM_sp(ctx, &u.f_disas_vfp_uncond0)) return true; + return false; + } + return false; + case 0xfe800a40: + /* 11111110 1....... ....1010 .1.0.... */ + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11111110 1.00.... ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:53 */ + disas_vfp_uncond_extract_vfp_dnm_s(ctx, &u.f_disas_vfp_uncond0, insn); + if (trans_VMINNM_sp(ctx, &u.f_disas_vfp_uncond0)) return true; + return false; + case 0x3: + /* 11111110 1.11.... ....1010 .1.0.... */ + switch ((insn >> 18) & 0x3) { + case 0x2: + /* 11111110 1.1110.. ....1010 .1.0.... */ + disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_4(ctx, &u.f_disas_vfp_uncond2, insn); + switch ((insn >> 7) & 0x1) { + case 0x0: + /* 11111110 1.1110.. ....1010 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:58 */ + if (trans_VRINT(ctx, &u.f_disas_vfp_uncond2)) return true; + return false; + } + return false; + case 0x3: + /* 11111110 1.1111.. ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:64 */ + disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_6(ctx, &u.f_disas_vfp_uncond3, insn); + if (trans_VCVT(ctx, &u.f_disas_vfp_uncond3)) return true; + return false; + } + return false; + } + return false; + case 0xfe800b00: + /* 11111110 1....... ....1011 .0.0.... */ + disas_vfp_uncond_extract_vfp_dnm_d(ctx, &u.f_disas_vfp_uncond0, insn); + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11111110 1.00.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:55 */ + if (trans_VMAXNM_dp(ctx, &u.f_disas_vfp_uncond0)) return true; + return false; + } + return false; + case 0xfe800b40: + /* 11111110 1....... ....1011 .1.0.... */ + switch ((insn >> 20) & 0x3) { + case 0x0: + /* 11111110 1.00.... ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:56 */ + disas_vfp_uncond_extract_vfp_dnm_d(ctx, &u.f_disas_vfp_uncond0, insn); + if (trans_VMINNM_dp(ctx, &u.f_disas_vfp_uncond0)) return true; + return false; + case 0x3: + /* 11111110 1.11.... ....1011 .1.0.... */ + switch ((insn >> 18) & 0x3) { + case 0x2: + /* 11111110 1.1110.. ....1011 .1.0.... */ + disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_5(ctx, &u.f_disas_vfp_uncond2, insn); + switch ((insn >> 7) & 0x1) { + case 0x0: + /* 11111110 1.1110.. ....1011 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:60 */ + if (trans_VRINT(ctx, &u.f_disas_vfp_uncond2)) return true; + return false; + } + return false; + case 0x3: + /* 11111110 1.1111.. ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp-uncond.decode:66 */ + disas_vfp_uncond_extract_disas_vfp_uncond_Fmt_7(ctx, &u.f_disas_vfp_uncond3, insn); + if (trans_VCVT(ctx, &u.f_disas_vfp_uncond3)) return true; + return false; + } + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/arm/decode-vfp.inc.c b/qemu/target/arm/decode-vfp.inc.c new file mode 100644 index 00000000..b04ab8b0 --- /dev/null +++ b/qemu/target/arm/decode-vfp.inc.c @@ -0,0 +1,1250 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int vd; + int vm; + int vn; +} arg_disas_vfp0; + +typedef struct { + int vd; + int vm; +} arg_disas_vfp1; + +typedef struct { + int imm; + int vd; +} arg_disas_vfp10; + +typedef struct { + int e; + int vd; + int vm; + int z; +} arg_disas_vfp11; + +typedef struct { + int t; + int vd; + int vm; +} arg_disas_vfp12; + +typedef struct { + int s; + int vd; + int vm; +} arg_disas_vfp13; + +typedef struct { + int imm; + int opc; + int vd; +} arg_disas_vfp14; + +typedef struct { + int rz; + int s; + int vd; + int vm; +} arg_disas_vfp15; + +typedef struct { + int l; + int rn; +} arg_disas_vfp16; + +typedef struct { + int index; + int rt; + int size; + int u; + int vn; +} arg_disas_vfp2; + +typedef struct { + int index; + int rt; + int size; + int vn; +} arg_disas_vfp3; + +typedef struct { + int b; + int e; + int q; + int rt; + int vn; +} arg_disas_vfp4; + +typedef struct { + int l; + int reg; + int rt; +} arg_disas_vfp5; + +typedef struct { + int l; + int rt; + int vn; +} arg_disas_vfp6; + +typedef struct { + int op; + int rt; + int rt2; + int vm; +} arg_disas_vfp7; + +typedef struct { + int imm; + int l; + int rn; + int u; + int vd; +} arg_disas_vfp8; + +typedef struct { + int imm; + int l; + int p; + int rn; + int u; + int vd; + int w; +} arg_disas_vfp9; + +typedef arg_disas_vfp2 arg_VMOV_to_gp; +static bool trans_VMOV_to_gp(DisasContext *ctx, arg_VMOV_to_gp *a); +typedef arg_disas_vfp3 arg_VMOV_from_gp; +static bool trans_VMOV_from_gp(DisasContext *ctx, arg_VMOV_from_gp *a); +typedef arg_disas_vfp4 arg_VDUP; +static bool trans_VDUP(DisasContext *ctx, arg_VDUP *a); +typedef arg_disas_vfp5 arg_VMSR_VMRS; +static bool trans_VMSR_VMRS(DisasContext *ctx, arg_VMSR_VMRS *a); +typedef arg_disas_vfp6 arg_VMOV_single; +static bool trans_VMOV_single(DisasContext *ctx, arg_VMOV_single *a); +typedef arg_disas_vfp7 arg_VMOV_64_sp; +static bool trans_VMOV_64_sp(DisasContext *ctx, arg_VMOV_64_sp *a); +typedef arg_disas_vfp7 arg_VMOV_64_dp; +static bool trans_VMOV_64_dp(DisasContext *ctx, arg_VMOV_64_dp *a); +typedef arg_disas_vfp8 arg_VLDR_VSTR_sp; +static bool trans_VLDR_VSTR_sp(DisasContext *ctx, arg_VLDR_VSTR_sp *a); +typedef arg_disas_vfp8 arg_VLDR_VSTR_dp; +static bool trans_VLDR_VSTR_dp(DisasContext *ctx, arg_VLDR_VSTR_dp *a); +typedef arg_disas_vfp9 arg_VLDM_VSTM_sp; +static bool trans_VLDM_VSTM_sp(DisasContext *ctx, arg_VLDM_VSTM_sp *a); +typedef arg_disas_vfp9 arg_VLDM_VSTM_dp; +static bool trans_VLDM_VSTM_dp(DisasContext *ctx, arg_VLDM_VSTM_dp *a); +typedef arg_disas_vfp0 arg_VMLA_sp; +static bool trans_VMLA_sp(DisasContext *ctx, arg_VMLA_sp *a); +typedef arg_disas_vfp0 arg_VMLA_dp; +static bool trans_VMLA_dp(DisasContext *ctx, arg_VMLA_dp *a); +typedef arg_disas_vfp0 arg_VMLS_sp; +static bool trans_VMLS_sp(DisasContext *ctx, arg_VMLS_sp *a); +typedef arg_disas_vfp0 arg_VMLS_dp; +static bool trans_VMLS_dp(DisasContext *ctx, arg_VMLS_dp *a); +typedef arg_disas_vfp0 arg_VNMLS_sp; +static bool trans_VNMLS_sp(DisasContext *ctx, arg_VNMLS_sp *a); +typedef arg_disas_vfp0 arg_VNMLS_dp; +static bool trans_VNMLS_dp(DisasContext *ctx, arg_VNMLS_dp *a); +typedef arg_disas_vfp0 arg_VNMLA_sp; +static bool trans_VNMLA_sp(DisasContext *ctx, arg_VNMLA_sp *a); +typedef arg_disas_vfp0 arg_VNMLA_dp; +static bool trans_VNMLA_dp(DisasContext *ctx, arg_VNMLA_dp *a); +typedef arg_disas_vfp0 arg_VMUL_sp; +static bool trans_VMUL_sp(DisasContext *ctx, arg_VMUL_sp *a); +typedef arg_disas_vfp0 arg_VMUL_dp; +static bool trans_VMUL_dp(DisasContext *ctx, arg_VMUL_dp *a); +typedef arg_disas_vfp0 arg_VNMUL_sp; +static bool trans_VNMUL_sp(DisasContext *ctx, arg_VNMUL_sp *a); +typedef arg_disas_vfp0 arg_VNMUL_dp; +static bool trans_VNMUL_dp(DisasContext *ctx, arg_VNMUL_dp *a); +typedef arg_disas_vfp0 arg_VADD_sp; +static bool trans_VADD_sp(DisasContext *ctx, arg_VADD_sp *a); +typedef arg_disas_vfp0 arg_VADD_dp; +static bool trans_VADD_dp(DisasContext *ctx, arg_VADD_dp *a); +typedef arg_disas_vfp0 arg_VSUB_sp; +static bool trans_VSUB_sp(DisasContext *ctx, arg_VSUB_sp *a); +typedef arg_disas_vfp0 arg_VSUB_dp; +static bool trans_VSUB_dp(DisasContext *ctx, arg_VSUB_dp *a); +typedef arg_disas_vfp0 arg_VDIV_sp; +static bool trans_VDIV_sp(DisasContext *ctx, arg_VDIV_sp *a); +typedef arg_disas_vfp0 arg_VDIV_dp; +static bool trans_VDIV_dp(DisasContext *ctx, arg_VDIV_dp *a); +typedef arg_disas_vfp0 arg_VFMA_sp; +static bool trans_VFMA_sp(DisasContext *ctx, arg_VFMA_sp *a); +typedef arg_disas_vfp0 arg_VFMS_sp; +static bool trans_VFMS_sp(DisasContext *ctx, arg_VFMS_sp *a); +typedef arg_disas_vfp0 arg_VFNMA_sp; +static bool trans_VFNMA_sp(DisasContext *ctx, arg_VFNMA_sp *a); +typedef arg_disas_vfp0 arg_VFNMS_sp; +static bool trans_VFNMS_sp(DisasContext *ctx, arg_VFNMS_sp *a); +typedef arg_disas_vfp0 arg_VFMA_dp; +static bool trans_VFMA_dp(DisasContext *ctx, arg_VFMA_dp *a); +typedef arg_disas_vfp0 arg_VFMS_dp; +static bool trans_VFMS_dp(DisasContext *ctx, arg_VFMS_dp *a); +typedef arg_disas_vfp0 arg_VFNMA_dp; +static bool trans_VFNMA_dp(DisasContext *ctx, arg_VFNMA_dp *a); +typedef arg_disas_vfp0 arg_VFNMS_dp; +static bool trans_VFNMS_dp(DisasContext *ctx, arg_VFNMS_dp *a); +typedef arg_disas_vfp10 arg_VMOV_imm_sp; +static bool trans_VMOV_imm_sp(DisasContext *ctx, arg_VMOV_imm_sp *a); +typedef arg_disas_vfp10 arg_VMOV_imm_dp; +static bool trans_VMOV_imm_dp(DisasContext *ctx, arg_VMOV_imm_dp *a); +typedef arg_disas_vfp1 arg_VMOV_reg_sp; +static bool trans_VMOV_reg_sp(DisasContext *ctx, arg_VMOV_reg_sp *a); +typedef arg_disas_vfp1 arg_VMOV_reg_dp; +static bool trans_VMOV_reg_dp(DisasContext *ctx, arg_VMOV_reg_dp *a); +typedef arg_disas_vfp1 arg_VABS_sp; +static bool trans_VABS_sp(DisasContext *ctx, arg_VABS_sp *a); +typedef arg_disas_vfp1 arg_VABS_dp; +static bool trans_VABS_dp(DisasContext *ctx, arg_VABS_dp *a); +typedef arg_disas_vfp1 arg_VNEG_sp; +static bool trans_VNEG_sp(DisasContext *ctx, arg_VNEG_sp *a); +typedef arg_disas_vfp1 arg_VNEG_dp; +static bool trans_VNEG_dp(DisasContext *ctx, arg_VNEG_dp *a); +typedef arg_disas_vfp1 arg_VSQRT_sp; +static bool trans_VSQRT_sp(DisasContext *ctx, arg_VSQRT_sp *a); +typedef arg_disas_vfp1 arg_VSQRT_dp; +static bool trans_VSQRT_dp(DisasContext *ctx, arg_VSQRT_dp *a); +typedef arg_disas_vfp11 arg_VCMP_sp; +static bool trans_VCMP_sp(DisasContext *ctx, arg_VCMP_sp *a); +typedef arg_disas_vfp11 arg_VCMP_dp; +static bool trans_VCMP_dp(DisasContext *ctx, arg_VCMP_dp *a); +typedef arg_disas_vfp12 arg_VCVT_f32_f16; +static bool trans_VCVT_f32_f16(DisasContext *ctx, arg_VCVT_f32_f16 *a); +typedef arg_disas_vfp12 arg_VCVT_f64_f16; +static bool trans_VCVT_f64_f16(DisasContext *ctx, arg_VCVT_f64_f16 *a); +typedef arg_disas_vfp12 arg_VCVT_f16_f32; +static bool trans_VCVT_f16_f32(DisasContext *ctx, arg_VCVT_f16_f32 *a); +typedef arg_disas_vfp12 arg_VCVT_f16_f64; +static bool trans_VCVT_f16_f64(DisasContext *ctx, arg_VCVT_f16_f64 *a); +typedef arg_disas_vfp1 arg_VRINTR_sp; +static bool trans_VRINTR_sp(DisasContext *ctx, arg_VRINTR_sp *a); +typedef arg_disas_vfp1 arg_VRINTR_dp; +static bool trans_VRINTR_dp(DisasContext *ctx, arg_VRINTR_dp *a); +typedef arg_disas_vfp1 arg_VRINTZ_sp; +static bool trans_VRINTZ_sp(DisasContext *ctx, arg_VRINTZ_sp *a); +typedef arg_disas_vfp1 arg_VRINTZ_dp; +static bool trans_VRINTZ_dp(DisasContext *ctx, arg_VRINTZ_dp *a); +typedef arg_disas_vfp1 arg_VRINTX_sp; +static bool trans_VRINTX_sp(DisasContext *ctx, arg_VRINTX_sp *a); +typedef arg_disas_vfp1 arg_VRINTX_dp; +static bool trans_VRINTX_dp(DisasContext *ctx, arg_VRINTX_dp *a); +typedef arg_disas_vfp1 arg_VCVT_sp; +static bool trans_VCVT_sp(DisasContext *ctx, arg_VCVT_sp *a); +typedef arg_disas_vfp1 arg_VCVT_dp; +static bool trans_VCVT_dp(DisasContext *ctx, arg_VCVT_dp *a); +typedef arg_disas_vfp13 arg_VCVT_int_sp; +static bool trans_VCVT_int_sp(DisasContext *ctx, arg_VCVT_int_sp *a); +typedef arg_disas_vfp13 arg_VCVT_int_dp; +static bool trans_VCVT_int_dp(DisasContext *ctx, arg_VCVT_int_dp *a); +typedef arg_disas_vfp1 arg_VJCVT; +static bool trans_VJCVT(DisasContext *ctx, arg_VJCVT *a); +typedef arg_disas_vfp14 arg_VCVT_fix_sp; +static bool trans_VCVT_fix_sp(DisasContext *ctx, arg_VCVT_fix_sp *a); +typedef arg_disas_vfp14 arg_VCVT_fix_dp; +static bool trans_VCVT_fix_dp(DisasContext *ctx, arg_VCVT_fix_dp *a); +typedef arg_disas_vfp15 arg_VCVT_sp_int; +static bool trans_VCVT_sp_int(DisasContext *ctx, arg_VCVT_sp_int *a); +typedef arg_disas_vfp15 arg_VCVT_dp_int; +static bool trans_VCVT_dp_int(DisasContext *ctx, arg_VCVT_dp_int *a); +typedef arg_disas_vfp16 arg_VLLDM_VLSTM; +static bool trans_VLLDM_VLSTM(DisasContext *ctx, arg_VLLDM_VLSTM *a); + +static void disas_vfp_extract_disas_vfp_Fmt_10(DisasContext *ctx, arg_disas_vfp3 *a, uint32_t insn) +{ + a->rt = extract32(insn, 12, 4); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->size = 1; + a->index = deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 21, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_11(DisasContext *ctx, arg_disas_vfp3 *a, uint32_t insn) +{ + a->index = extract32(insn, 21, 1); + a->rt = extract32(insn, 12, 4); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->size = 2; +} + +static void disas_vfp_extract_disas_vfp_Fmt_12(DisasContext *ctx, arg_disas_vfp4 *a, uint32_t insn) +{ + a->b = extract32(insn, 22, 1); + a->q = extract32(insn, 21, 1); + a->rt = extract32(insn, 12, 4); + a->e = extract32(insn, 5, 1); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_13(DisasContext *ctx, arg_disas_vfp5 *a, uint32_t insn) +{ + a->l = extract32(insn, 20, 1); + a->reg = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); +} + +static void disas_vfp_extract_disas_vfp_Fmt_14(DisasContext *ctx, arg_disas_vfp6 *a, uint32_t insn) +{ + a->l = extract32(insn, 20, 1); + a->rt = extract32(insn, 12, 4); + a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_15(DisasContext *ctx, arg_disas_vfp7 *a, uint32_t insn) +{ + a->op = extract32(insn, 20, 1); + a->rt2 = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_16(DisasContext *ctx, arg_disas_vfp7 *a, uint32_t insn) +{ + a->op = extract32(insn, 20, 1); + a->rt2 = extract32(insn, 16, 4); + a->rt = extract32(insn, 12, 4); + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_17(DisasContext *ctx, arg_disas_vfp8 *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->l = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->imm = extract32(insn, 0, 8); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_18(DisasContext *ctx, arg_disas_vfp8 *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->l = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->imm = extract32(insn, 0, 8); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_19(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) +{ + a->w = extract32(insn, 21, 1); + a->l = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->imm = extract32(insn, 0, 8); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->p = 0; + a->u = 1; +} + +static void disas_vfp_extract_disas_vfp_Fmt_20(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) +{ + a->w = extract32(insn, 21, 1); + a->l = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->imm = extract32(insn, 0, 8); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->p = 0; + a->u = 1; +} + +static void disas_vfp_extract_disas_vfp_Fmt_21(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) +{ + a->l = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->imm = extract32(insn, 0, 8); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->p = 1; + a->u = 0; + a->w = 1; +} + +static void disas_vfp_extract_disas_vfp_Fmt_22(DisasContext *ctx, arg_disas_vfp9 *a, uint32_t insn) +{ + a->l = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); + a->imm = extract32(insn, 0, 8); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->p = 1; + a->u = 0; + a->w = 1; +} + +static void disas_vfp_extract_disas_vfp_Fmt_23(DisasContext *ctx, arg_disas_vfp10 *a, uint32_t insn) +{ + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 16, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_24(DisasContext *ctx, arg_disas_vfp10 *a, uint32_t insn) +{ + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->imm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 16, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_25(DisasContext *ctx, arg_disas_vfp11 *a, uint32_t insn) +{ + a->z = extract32(insn, 16, 1); + a->e = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_26(DisasContext *ctx, arg_disas_vfp11 *a, uint32_t insn) +{ + a->z = extract32(insn, 16, 1); + a->e = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_27(DisasContext *ctx, arg_disas_vfp12 *a, uint32_t insn) +{ + a->t = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_28(DisasContext *ctx, arg_disas_vfp12 *a, uint32_t insn) +{ + a->t = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_29(DisasContext *ctx, arg_disas_vfp12 *a, uint32_t insn) +{ + a->t = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_30(DisasContext *ctx, arg_disas_vfp13 *a, uint32_t insn) +{ + a->s = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_31(DisasContext *ctx, arg_disas_vfp13 *a, uint32_t insn) +{ + a->s = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_32(DisasContext *ctx, arg_disas_vfp14 *a, uint32_t insn) +{ + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->imm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->opc = deposit32(deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 1)), 2, 30, extract32(insn, 18, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_33(DisasContext *ctx, arg_disas_vfp14 *a, uint32_t insn) +{ + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); + a->imm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->opc = deposit32(deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 1)), 2, 30, extract32(insn, 18, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_34(DisasContext *ctx, arg_disas_vfp15 *a, uint32_t insn) +{ + a->s = extract32(insn, 16, 1); + a->rz = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_35(DisasContext *ctx, arg_disas_vfp15 *a, uint32_t insn) +{ + a->s = extract32(insn, 16, 1); + a->rz = extract32(insn, 7, 1); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_36(DisasContext *ctx, arg_disas_vfp16 *a, uint32_t insn) +{ + a->l = extract32(insn, 20, 1); + a->rn = extract32(insn, 16, 4); +} + +static void disas_vfp_extract_disas_vfp_Fmt_6(DisasContext *ctx, arg_disas_vfp2 *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rt = extract32(insn, 12, 4); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->size = 0; + a->index = deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 21, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_7(DisasContext *ctx, arg_disas_vfp2 *a, uint32_t insn) +{ + a->u = extract32(insn, 23, 1); + a->rt = extract32(insn, 12, 4); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->size = 1; + a->index = deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 21, 1)); +} + +static void disas_vfp_extract_disas_vfp_Fmt_8(DisasContext *ctx, arg_disas_vfp2 *a, uint32_t insn) +{ + a->index = extract32(insn, 21, 1); + a->rt = extract32(insn, 12, 4); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->size = 2; + a->u = 0; +} + +static void disas_vfp_extract_disas_vfp_Fmt_9(DisasContext *ctx, arg_disas_vfp3 *a, uint32_t insn) +{ + a->rt = extract32(insn, 12, 4); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->size = 0; + a->index = deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 21, 1)); +} + +static void disas_vfp_extract_vfp_dm_dd(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); +} + +static void disas_vfp_extract_vfp_dm_ds(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); +} + +static void disas_vfp_extract_vfp_dm_sd(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); +} + +static void disas_vfp_extract_vfp_dm_ss(DisasContext *ctx, arg_disas_vfp1 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); +} + +static void disas_vfp_extract_vfp_dnm_d(DisasContext *ctx, arg_disas_vfp0 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 0, 4), 4, 28, extract32(insn, 5, 1)); + a->vn = deposit32(extract32(insn, 16, 4), 4, 28, extract32(insn, 7, 1)); + a->vd = deposit32(extract32(insn, 12, 4), 4, 28, extract32(insn, 22, 1)); +} + +static void disas_vfp_extract_vfp_dnm_s(DisasContext *ctx, arg_disas_vfp0 *a, uint32_t insn) +{ + a->vm = deposit32(extract32(insn, 5, 1), 1, 31, extract32(insn, 0, 4)); + a->vn = deposit32(extract32(insn, 7, 1), 1, 31, extract32(insn, 16, 4)); + a->vd = deposit32(extract32(insn, 22, 1), 1, 31, extract32(insn, 12, 4)); +} + +static bool disas_vfp(DisasContext *ctx, uint32_t insn) +{ + union { + arg_disas_vfp0 f_disas_vfp0; + arg_disas_vfp1 f_disas_vfp1; + arg_disas_vfp10 f_disas_vfp10; + arg_disas_vfp11 f_disas_vfp11; + arg_disas_vfp12 f_disas_vfp12; + arg_disas_vfp13 f_disas_vfp13; + arg_disas_vfp14 f_disas_vfp14; + arg_disas_vfp15 f_disas_vfp15; + arg_disas_vfp16 f_disas_vfp16; + arg_disas_vfp2 f_disas_vfp2; + arg_disas_vfp3 f_disas_vfp3; + arg_disas_vfp4 f_disas_vfp4; + arg_disas_vfp5 f_disas_vfp5; + arg_disas_vfp6 f_disas_vfp6; + arg_disas_vfp7 f_disas_vfp7; + arg_disas_vfp8 f_disas_vfp8; + arg_disas_vfp9 f_disas_vfp9; + } u; + + switch (insn & 0x0f000f00) { + case 0x0c000a00: + /* ....1100 ........ ....1010 ........ */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....1100 0....... ....1010 ........ */ + switch (insn & 0x006000d0) { + case 0x00200000: + /* ....1100 001..... ....1010 00.0.... */ + disas_vfp_extract_disas_vfp_Fmt_36(ctx, &u.f_disas_vfp16, insn); + switch (insn & 0xf000f02f) { + case 0xe0000000: + /* 11101100 001..... 00001010 00000000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:217 */ + if (trans_VLLDM_VLSTM(ctx, &u.f_disas_vfp16)) return true; + return false; + } + return false; + case 0x00400010: + /* ....1100 010..... ....1010 00.1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:79 */ + disas_vfp_extract_disas_vfp_Fmt_15(ctx, &u.f_disas_vfp7, insn); + if (trans_VMOV_64_sp(ctx, &u.f_disas_vfp7)) return true; + return false; + } + return false; + case 0x1: + /* ....1100 1....... ....1010 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:95 */ + disas_vfp_extract_disas_vfp_Fmt_19(ctx, &u.f_disas_vfp9, insn); + if (trans_VLDM_VSTM_sp(ctx, &u.f_disas_vfp9)) return true; + return false; + } + return false; + case 0x0c000b00: + /* ....1100 ........ ....1011 ........ */ + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....1100 0....... ....1011 ........ */ + disas_vfp_extract_disas_vfp_Fmt_16(ctx, &u.f_disas_vfp7, insn); + switch (insn & 0x006000d0) { + case 0x00400010: + /* ....1100 010..... ....1011 00.1.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:80 */ + if (trans_VMOV_64_dp(ctx, &u.f_disas_vfp7)) return true; + return false; + } + return false; + case 0x1: + /* ....1100 1....... ....1011 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:97 */ + disas_vfp_extract_disas_vfp_Fmt_20(ctx, &u.f_disas_vfp9, insn); + if (trans_VLDM_VSTM_dp(ctx, &u.f_disas_vfp9)) return true; + return false; + } + return false; + case 0x0d000a00: + /* ....1101 ........ ....1010 ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* ....1101 ..0..... ....1010 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:84 */ + disas_vfp_extract_disas_vfp_Fmt_17(ctx, &u.f_disas_vfp8, insn); + if (trans_VLDR_VSTR_sp(ctx, &u.f_disas_vfp8)) return true; + return false; + case 0x1: + /* ....1101 ..1..... ....1010 ........ */ + disas_vfp_extract_disas_vfp_Fmt_21(ctx, &u.f_disas_vfp9, insn); + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....1101 0.1..... ....1010 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:100 */ + if (trans_VLDM_VSTM_sp(ctx, &u.f_disas_vfp9)) return true; + return false; + } + return false; + } + return false; + case 0x0d000b00: + /* ....1101 ........ ....1011 ........ */ + switch ((insn >> 21) & 0x1) { + case 0x0: + /* ....1101 ..0..... ....1011 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:85 */ + disas_vfp_extract_disas_vfp_Fmt_18(ctx, &u.f_disas_vfp8, insn); + if (trans_VLDR_VSTR_dp(ctx, &u.f_disas_vfp8)) return true; + return false; + case 0x1: + /* ....1101 ..1..... ....1011 ........ */ + disas_vfp_extract_disas_vfp_Fmt_22(ctx, &u.f_disas_vfp9, insn); + switch ((insn >> 23) & 0x1) { + case 0x0: + /* ....1101 0.1..... ....1011 ........ */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:102 */ + if (trans_VLDM_VSTM_dp(ctx, &u.f_disas_vfp9)) return true; + return false; + } + return false; + } + return false; + case 0x0e000a00: + /* ....1110 ........ ....1010 ........ */ + switch (insn & 0x00a00050) { + case 0x00000000: + /* ....1110 0.0..... ....1010 .0.0.... */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....1110 0.00.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:106 */ + if (trans_VMLA_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x1: + /* ....1110 0.01.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:112 */ + if (trans_VNMLS_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + } + return false; + case 0x00000010: + /* ....1110 0.0..... ....1010 .0.1.... */ + disas_vfp_extract_disas_vfp_Fmt_14(ctx, &u.f_disas_vfp6, insn); + switch (insn & 0x0040002f) { + case 0x00000000: + /* ....1110 000..... ....1010 .0010000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:77 */ + if (trans_VMOV_single(ctx, &u.f_disas_vfp6)) return true; + return false; + } + return false; + case 0x00000040: + /* ....1110 0.0..... ....1010 .1.0.... */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....1110 0.00.... ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:109 */ + if (trans_VMLS_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x1: + /* ....1110 0.01.... ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:115 */ + if (trans_VNMLA_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + } + return false; + case 0x00200000: + /* ....1110 0.1..... ....1010 .0.0.... */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....1110 0.10.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:118 */ + if (trans_VMUL_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x1: + /* ....1110 0.11.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:124 */ + if (trans_VADD_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + } + return false; + case 0x00200040: + /* ....1110 0.1..... ....1010 .1.0.... */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....1110 0.10.... ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:121 */ + if (trans_VNMUL_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x1: + /* ....1110 0.11.... ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:127 */ + if (trans_VSUB_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + } + return false; + case 0x00800000: + /* ....1110 1.0..... ....1010 .0.0.... */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....1110 1.00.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:130 */ + if (trans_VDIV_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x1: + /* ....1110 1.01.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:135 */ + if (trans_VFNMA_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + } + return false; + case 0x00800040: + /* ....1110 1.0..... ....1010 .1.0.... */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + switch ((insn >> 20) & 0x1) { + case 0x1: + /* ....1110 1.01.... ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:136 */ + if (trans_VFNMS_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + } + return false; + case 0x00a00000: + /* ....1110 1.1..... ....1010 .0.0.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....1110 1.10.... ....1010 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:133 */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + if (trans_VFMA_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x1: + /* ....1110 1.11.... ....1010 .0.0.... */ + disas_vfp_extract_disas_vfp_Fmt_23(ctx, &u.f_disas_vfp10, insn); + switch (insn & 0x000000a0) { + case 0x00000000: + /* ....1110 1.11.... ....1010 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:143 */ + if (trans_VMOV_imm_sp(ctx, &u.f_disas_vfp10)) return true; + return false; + } + return false; + } + return false; + case 0x00a00010: + /* ....1110 1.1..... ....1010 .0.1.... */ + disas_vfp_extract_disas_vfp_Fmt_13(ctx, &u.f_disas_vfp5, insn); + switch (insn & 0x004000af) { + case 0x00400000: + /* ....1110 111..... ....1010 00010000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:76 */ + if (trans_VMSR_VMRS(ctx, &u.f_disas_vfp5)) return true; + return false; + } + return false; + case 0x00a00040: + /* ....1110 1.1..... ....1010 .1.0.... */ + switch ((insn >> 20) & 0x1) { + case 0x0: + /* ....1110 1.10.... ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:134 */ + disas_vfp_extract_vfp_dnm_s(ctx, &u.f_disas_vfp0, insn); + if (trans_VFMS_sp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x1: + /* ....1110 1.11.... ....1010 .1.0.... */ + switch (insn & 0x000a0000) { + case 0x00000000: + /* ....1110 1.110.0. ....1010 .1.0.... */ + switch ((insn >> 18) & 0x1) { + case 0x0: + /* ....1110 1.11000. ....1010 .1.0.... */ + disas_vfp_extract_vfp_dm_ss(ctx, &u.f_disas_vfp1, insn); + switch (insn & 0x00010080) { + case 0x00000000: + /* ....1110 1.110000 ....1010 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:148 */ + if (trans_VMOV_reg_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x00000080: + /* ....1110 1.110000 ....1010 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:151 */ + if (trans_VABS_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x00010000: + /* ....1110 1.110001 ....1010 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:154 */ + if (trans_VNEG_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x00010080: + /* ....1110 1.110001 ....1010 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:157 */ + if (trans_VSQRT_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + } + return false; + case 0x1: + /* ....1110 1.11010. ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:160 */ + disas_vfp_extract_disas_vfp_Fmt_25(ctx, &u.f_disas_vfp11, insn); + if (trans_VCMP_sp(ctx, &u.f_disas_vfp11)) return true; + return false; + } + return false; + case 0x00020000: + /* ....1110 1.110.1. ....1010 .1.0.... */ + switch (insn & 0x00050000) { + case 0x00000000: + /* ....1110 1.110010 ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:166 */ + disas_vfp_extract_disas_vfp_Fmt_27(ctx, &u.f_disas_vfp12, insn); + if (trans_VCVT_f32_f16(ctx, &u.f_disas_vfp12)) return true; + return false; + case 0x00010000: + /* ....1110 1.110011 ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:173 */ + disas_vfp_extract_disas_vfp_Fmt_27(ctx, &u.f_disas_vfp12, insn); + if (trans_VCVT_f16_f32(ctx, &u.f_disas_vfp12)) return true; + return false; + case 0x00040000: + /* ....1110 1.110110 ....1010 .1.0.... */ + disas_vfp_extract_vfp_dm_ss(ctx, &u.f_disas_vfp1, insn); + switch ((insn >> 7) & 0x1) { + case 0x0: + /* ....1110 1.110110 ....1010 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:178 */ + if (trans_VRINTR_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x1: + /* ....1110 1.110110 ....1010 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:181 */ + if (trans_VRINTZ_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + } + return false; + case 0x00050000: + /* ....1110 1.110111 ....1010 .1.0.... */ + switch ((insn >> 7) & 0x1) { + case 0x0: + /* ....1110 1.110111 ....1010 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:184 */ + disas_vfp_extract_vfp_dm_ss(ctx, &u.f_disas_vfp1, insn); + if (trans_VRINTX_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x1: + /* ....1110 1.110111 ....1010 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:189 */ + disas_vfp_extract_vfp_dm_ds(ctx, &u.f_disas_vfp1, insn); + if (trans_VCVT_sp(ctx, &u.f_disas_vfp1)) return true; + return false; + } + return false; + } + return false; + case 0x00080000: + /* ....1110 1.111.0. ....1010 .1.0.... */ + switch ((insn >> 18) & 0x1) { + case 0x0: + /* ....1110 1.11100. ....1010 .1.0.... */ + disas_vfp_extract_disas_vfp_Fmt_30(ctx, &u.f_disas_vfp13, insn); + switch ((insn >> 16) & 0x1) { + case 0x0: + /* ....1110 1.111000 ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:193 */ + if (trans_VCVT_int_sp(ctx, &u.f_disas_vfp13)) return true; + return false; + } + return false; + case 0x1: + /* ....1110 1.11110. ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:212 */ + disas_vfp_extract_disas_vfp_Fmt_34(ctx, &u.f_disas_vfp15, insn); + if (trans_VCVT_sp_int(ctx, &u.f_disas_vfp15)) return true; + return false; + } + return false; + case 0x000a0000: + /* ....1110 1.111.1. ....1010 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:206 */ + disas_vfp_extract_disas_vfp_Fmt_32(ctx, &u.f_disas_vfp14, insn); + if (trans_VCVT_fix_sp(ctx, &u.f_disas_vfp14)) return true; + return false; + } + return false; + } + return false; + } + return false; + case 0x0e000b00: + /* ....1110 ........ ....1011 ........ */ + switch (insn & 0x00100010) { + case 0x00000000: + /* ....1110 ...0.... ....1011 ...0.... */ + disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); + switch (insn & 0x00a00040) { + case 0x00000000: + /* ....1110 0.00.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:107 */ + if (trans_VMLA_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00000040: + /* ....1110 0.00.... ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:110 */ + if (trans_VMLS_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00200000: + /* ....1110 0.10.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:119 */ + if (trans_VMUL_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00200040: + /* ....1110 0.10.... ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:122 */ + if (trans_VNMUL_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00800000: + /* ....1110 1.00.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:131 */ + if (trans_VDIV_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00a00000: + /* ....1110 1.10.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:138 */ + if (trans_VFMA_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00a00040: + /* ....1110 1.10.... ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:139 */ + if (trans_VFMS_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + } + return false; + case 0x00000010: + /* ....1110 ...0.... ....1011 ...1.... */ + switch (insn & 0x0080000f) { + case 0x00000000: + /* ....1110 0..0.... ....1011 ...10000 */ + switch ((insn >> 22) & 0x1) { + case 0x0: + /* ....1110 00.0.... ....1011 ...10000 */ + switch ((insn >> 5) & 0x1) { + case 0x0: + /* ....1110 00.0.... ....1011 ..010000 */ + disas_vfp_extract_disas_vfp_Fmt_11(ctx, &u.f_disas_vfp3, insn); + switch ((insn >> 6) & 0x1) { + case 0x0: + /* ....1110 00.0.... ....1011 .0010000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:70 */ + if (trans_VMOV_from_gp(ctx, &u.f_disas_vfp3)) return true; + return false; + } + return false; + case 0x1: + /* ....1110 00.0.... ....1011 ..110000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:68 */ + disas_vfp_extract_disas_vfp_Fmt_10(ctx, &u.f_disas_vfp3, insn); + if (trans_VMOV_from_gp(ctx, &u.f_disas_vfp3)) return true; + return false; + } + return false; + case 0x1: + /* ....1110 01.0.... ....1011 ...10000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:66 */ + disas_vfp_extract_disas_vfp_Fmt_9(ctx, &u.f_disas_vfp3, insn); + if (trans_VMOV_from_gp(ctx, &u.f_disas_vfp3)) return true; + return false; + } + return false; + case 0x00800000: + /* ....1110 1..0.... ....1011 ...10000 */ + disas_vfp_extract_disas_vfp_Fmt_12(ctx, &u.f_disas_vfp4, insn); + switch ((insn >> 6) & 0x1) { + case 0x0: + /* ....1110 1..0.... ....1011 .0.10000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:73 */ + if (trans_VDUP(ctx, &u.f_disas_vfp4)) return true; + return false; + } + return false; + } + return false; + case 0x00100000: + /* ....1110 ...1.... ....1011 ...0.... */ + switch (insn & 0x00a00040) { + case 0x00000000: + /* ....1110 0.01.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:113 */ + disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); + if (trans_VNMLS_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00000040: + /* ....1110 0.01.... ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:116 */ + disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); + if (trans_VNMLA_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00200000: + /* ....1110 0.11.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:125 */ + disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); + if (trans_VADD_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00200040: + /* ....1110 0.11.... ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:128 */ + disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); + if (trans_VSUB_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00800000: + /* ....1110 1.01.... ....1011 .0.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:140 */ + disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); + if (trans_VFNMA_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00800040: + /* ....1110 1.01.... ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:141 */ + disas_vfp_extract_vfp_dnm_d(ctx, &u.f_disas_vfp0, insn); + if (trans_VFNMS_dp(ctx, &u.f_disas_vfp0)) return true; + return false; + case 0x00a00000: + /* ....1110 1.11.... ....1011 .0.0.... */ + disas_vfp_extract_disas_vfp_Fmt_24(ctx, &u.f_disas_vfp10, insn); + switch (insn & 0x000000a0) { + case 0x00000000: + /* ....1110 1.11.... ....1011 0000.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:145 */ + if (trans_VMOV_imm_dp(ctx, &u.f_disas_vfp10)) return true; + return false; + } + return false; + case 0x00a00040: + /* ....1110 1.11.... ....1011 .1.0.... */ + switch (insn & 0x000a0000) { + case 0x00000000: + /* ....1110 1.110.0. ....1011 .1.0.... */ + switch ((insn >> 18) & 0x1) { + case 0x0: + /* ....1110 1.11000. ....1011 .1.0.... */ + disas_vfp_extract_vfp_dm_dd(ctx, &u.f_disas_vfp1, insn); + switch (insn & 0x00010080) { + case 0x00000000: + /* ....1110 1.110000 ....1011 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:149 */ + if (trans_VMOV_reg_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x00000080: + /* ....1110 1.110000 ....1011 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:152 */ + if (trans_VABS_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x00010000: + /* ....1110 1.110001 ....1011 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:155 */ + if (trans_VNEG_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x00010080: + /* ....1110 1.110001 ....1011 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:158 */ + if (trans_VSQRT_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + } + return false; + case 0x1: + /* ....1110 1.11010. ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:162 */ + disas_vfp_extract_disas_vfp_Fmt_26(ctx, &u.f_disas_vfp11, insn); + if (trans_VCMP_dp(ctx, &u.f_disas_vfp11)) return true; + return false; + } + return false; + case 0x00020000: + /* ....1110 1.110.1. ....1011 .1.0.... */ + switch (insn & 0x00050000) { + case 0x00000000: + /* ....1110 1.110010 ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:168 */ + disas_vfp_extract_disas_vfp_Fmt_28(ctx, &u.f_disas_vfp12, insn); + if (trans_VCVT_f64_f16(ctx, &u.f_disas_vfp12)) return true; + return false; + case 0x00010000: + /* ....1110 1.110011 ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:175 */ + disas_vfp_extract_disas_vfp_Fmt_29(ctx, &u.f_disas_vfp12, insn); + if (trans_VCVT_f16_f64(ctx, &u.f_disas_vfp12)) return true; + return false; + case 0x00040000: + /* ....1110 1.110110 ....1011 .1.0.... */ + disas_vfp_extract_vfp_dm_dd(ctx, &u.f_disas_vfp1, insn); + switch ((insn >> 7) & 0x1) { + case 0x0: + /* ....1110 1.110110 ....1011 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:179 */ + if (trans_VRINTR_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x1: + /* ....1110 1.110110 ....1011 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:182 */ + if (trans_VRINTZ_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + } + return false; + case 0x00050000: + /* ....1110 1.110111 ....1011 .1.0.... */ + switch ((insn >> 7) & 0x1) { + case 0x0: + /* ....1110 1.110111 ....1011 01.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:185 */ + disas_vfp_extract_vfp_dm_dd(ctx, &u.f_disas_vfp1, insn); + if (trans_VRINTX_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + case 0x1: + /* ....1110 1.110111 ....1011 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:190 */ + disas_vfp_extract_vfp_dm_sd(ctx, &u.f_disas_vfp1, insn); + if (trans_VCVT_dp(ctx, &u.f_disas_vfp1)) return true; + return false; + } + return false; + } + return false; + case 0x00080000: + /* ....1110 1.111.0. ....1011 .1.0.... */ + switch ((insn >> 18) & 0x1) { + case 0x0: + /* ....1110 1.11100. ....1011 .1.0.... */ + switch ((insn >> 16) & 0x1) { + case 0x0: + /* ....1110 1.111000 ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:195 */ + disas_vfp_extract_disas_vfp_Fmt_31(ctx, &u.f_disas_vfp13, insn); + if (trans_VCVT_int_dp(ctx, &u.f_disas_vfp13)) return true; + return false; + case 0x1: + /* ....1110 1.111001 ....1011 .1.0.... */ + disas_vfp_extract_vfp_dm_sd(ctx, &u.f_disas_vfp1, insn); + switch ((insn >> 7) & 0x1) { + case 0x1: + /* ....1110 1.111001 ....1011 11.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:199 */ + if (trans_VJCVT(ctx, &u.f_disas_vfp1)) return true; + return false; + } + return false; + } + return false; + case 0x1: + /* ....1110 1.11110. ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:214 */ + disas_vfp_extract_disas_vfp_Fmt_35(ctx, &u.f_disas_vfp15, insn); + if (trans_VCVT_dp_int(ctx, &u.f_disas_vfp15)) return true; + return false; + } + return false; + case 0x000a0000: + /* ....1110 1.111.1. ....1011 .1.0.... */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:208 */ + disas_vfp_extract_disas_vfp_Fmt_33(ctx, &u.f_disas_vfp14, insn); + if (trans_VCVT_fix_dp(ctx, &u.f_disas_vfp14)) return true; + return false; + } + return false; + } + return false; + case 0x00100010: + /* ....1110 ...1.... ....1011 ...1.... */ + switch (insn & 0x0040000f) { + case 0x00000000: + /* ....1110 .0.1.... ....1011 ...10000 */ + switch ((insn >> 5) & 0x1) { + case 0x0: + /* ....1110 .0.1.... ....1011 ..010000 */ + disas_vfp_extract_disas_vfp_Fmt_8(ctx, &u.f_disas_vfp2, insn); + switch (insn & 0x00800040) { + case 0x00000000: + /* ....1110 00.1.... ....1011 .0010000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:63 */ + if (trans_VMOV_to_gp(ctx, &u.f_disas_vfp2)) return true; + return false; + } + return false; + case 0x1: + /* ....1110 .0.1.... ....1011 ..110000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:61 */ + disas_vfp_extract_disas_vfp_Fmt_7(ctx, &u.f_disas_vfp2, insn); + if (trans_VMOV_to_gp(ctx, &u.f_disas_vfp2)) return true; + return false; + } + return false; + case 0x00400000: + /* ....1110 .1.1.... ....1011 ...10000 */ + /* /mnt/c/Users/me/Documents/projects/unicorn2/tmp/tmp/qemu-5.0.0/target/arm/vfp.decode:59 */ + disas_vfp_extract_disas_vfp_Fmt_6(ctx, &u.f_disas_vfp2, insn); + if (trans_VMOV_to_gp(ctx, &u.f_disas_vfp2)) return true; + return false; + } + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/arm/helper-a64.c b/qemu/target/arm/helper-a64.c new file mode 100644 index 00000000..8b698824 --- /dev/null +++ b/qemu/target/arm/helper-a64.c @@ -0,0 +1,1155 @@ +/* + * AArch64 specific helpers + * + * Copyright (c) 2013 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" +#include "qemu/log.h" +#include "qemu/bitops.h" +#include "internals.h" +#include "qemu/crc32c.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "qemu/int128.h" +#include "qemu/atomic128.h" +#include "tcg/tcg.h" +#include "fpu/softfloat.h" + +#include + +/* C2.4.7 Multiply and divide */ +/* special cases for 0 and LLONG_MIN are mandated by the standard */ +uint64_t HELPER(udiv64)(uint64_t num, uint64_t den) +{ + if (den == 0) { + return 0; + } + return num / den; +} + +int64_t HELPER(sdiv64)(int64_t num, int64_t den) +{ + if (den == 0) { + return 0; + } + if (num == LLONG_MIN && den == -1) { + return LLONG_MIN; + } + return num / den; +} + +uint64_t HELPER(rbit64)(uint64_t x) +{ + return revbit64(x); +} + +void HELPER(msr_i_spsel)(CPUARMState *env, uint32_t imm) +{ + update_spsel(env, imm); +} + +static void daif_check(CPUARMState *env, uint32_t op, + uint32_t imm, uintptr_t ra) +{ + /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */ + if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { + raise_exception_ra(env, EXCP_UDEF, + syn_aa64_sysregtrap(0, extract32(op, 0, 3), + extract32(op, 3, 3), 4, + imm, 0x1f, 0), + exception_target_el(env), ra); + } +} + +void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm) +{ + daif_check(env, 0x1e, imm, GETPC()); + env->daif |= (imm << 6) & PSTATE_DAIF; +} + +void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm) +{ + daif_check(env, 0x1f, imm, GETPC()); + env->daif &= ~((imm << 6) & PSTATE_DAIF); +} + +/* Convert a softfloat float_relation_ (as returned by + * the float*_compare functions) to the correct ARM + * NZCV flag state. + */ +static inline uint32_t float_rel_to_flags(int res) +{ + uint64_t flags; + switch (res) { + case float_relation_equal: + flags = PSTATE_Z | PSTATE_C; + break; + case float_relation_less: + flags = PSTATE_N; + break; + case float_relation_greater: + flags = PSTATE_C; + break; + case float_relation_unordered: + default: + flags = PSTATE_C | PSTATE_V; + break; + } + return flags; +} + +uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status) +{ + return float_rel_to_flags(float16_compare_quiet(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status) +{ + return float_rel_to_flags(float16_compare(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status) +{ + return float_rel_to_flags(float32_compare_quiet(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status) +{ + return float_rel_to_flags(float32_compare(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status) +{ + return float_rel_to_flags(float64_compare_quiet(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status) +{ + return float_rel_to_flags(float64_compare(x, y, fp_status)); +} + +float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float32_squash_input_denormal(a, fpst); + b = float32_squash_input_denormal(b, fpst); + + if ((float32_is_zero(a) && float32_is_infinity(b)) || + (float32_is_infinity(a) && float32_is_zero(b))) { + /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ + return make_float32((1U << 30) | + ((float32_val(a) ^ float32_val(b)) & (1U << 31))); + } + return float32_mul(a, b, fpst); +} + +float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float64_squash_input_denormal(a, fpst); + b = float64_squash_input_denormal(b, fpst); + + if ((float64_is_zero(a) && float64_is_infinity(b)) || + (float64_is_infinity(a) && float64_is_zero(b))) { + /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ + return make_float64((1ULL << 62) | + ((float64_val(a) ^ float64_val(b)) & (1ULL << 63))); + } + return float64_mul(a, b, fpst); +} + +uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices, + uint32_t rn, uint32_t numregs) +{ + /* Helper function for SIMD TBL and TBX. We have to do the table + * lookup part for the 64 bits worth of indices we're passed in. + * result is the initial results vector (either zeroes for TBL + * or some guest values for TBX), rn the register number where + * the table starts, and numregs the number of registers in the table. + * We return the results of the lookups. + */ + int shift; + + for (shift = 0; shift < 64; shift += 8) { + int index = extract64(indices, shift, 8); + if (index < 16 * numregs) { + /* Convert index (a byte offset into the virtual table + * which is a series of 128-bit vectors concatenated) + * into the correct register element plus a bit offset + * into that element, bearing in mind that the table + * can wrap around from V31 to V0. + */ + int elt = (rn * 2 + (index >> 3)) % 64; + int bitidx = (index & 7) * 8; + uint64_t *q = aa64_vfp_qreg(env, elt >> 1); + uint64_t val = extract64(q[elt & 1], bitidx, 8); + + result = deposit64(result, shift, 8, val); + } + } + return result; +} + +/* 64bit/double versions of the neon float compare functions */ +uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float64_eq_quiet(a, b, fpst); +} + +uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float64_le(b, a, fpst); +} + +uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float64_lt(b, a, fpst); +} + +/* Reciprocal step and sqrt step. Note that unlike the A32/T32 + * versions, these do a fully fused multiply-add or + * multiply-add-and-halve. + */ +#define float16_two make_float16(0x4000) +#define float16_three make_float16(0x4200) +#define float16_one_point_five make_float16(0x3e00) + +#define float32_two make_float32(0x40000000) +#define float32_three make_float32(0x40400000) +#define float32_one_point_five make_float32(0x3fc00000) + +#define float64_two make_float64(0x4000000000000000ULL) +#define float64_three make_float64(0x4008000000000000ULL) +#define float64_one_point_five make_float64(0x3FF8000000000000ULL) + +uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float16_squash_input_denormal(a, fpst); + b = float16_squash_input_denormal(b, fpst); + + a = float16_chs(a); + if ((float16_is_infinity(a) && float16_is_zero(b)) || + (float16_is_infinity(b) && float16_is_zero(a))) { + return float16_two; + } + return float16_muladd(a, b, float16_two, 0, fpst); +} + +float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float32_squash_input_denormal(a, fpst); + b = float32_squash_input_denormal(b, fpst); + + a = float32_chs(a); + if ((float32_is_infinity(a) && float32_is_zero(b)) || + (float32_is_infinity(b) && float32_is_zero(a))) { + return float32_two; + } + return float32_muladd(a, b, float32_two, 0, fpst); +} + +float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float64_squash_input_denormal(a, fpst); + b = float64_squash_input_denormal(b, fpst); + + a = float64_chs(a); + if ((float64_is_infinity(a) && float64_is_zero(b)) || + (float64_is_infinity(b) && float64_is_zero(a))) { + return float64_two; + } + return float64_muladd(a, b, float64_two, 0, fpst); +} + +uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float16_squash_input_denormal(a, fpst); + b = float16_squash_input_denormal(b, fpst); + + a = float16_chs(a); + if ((float16_is_infinity(a) && float16_is_zero(b)) || + (float16_is_infinity(b) && float16_is_zero(a))) { + return float16_one_point_five; + } + return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst); +} + +float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float32_squash_input_denormal(a, fpst); + b = float32_squash_input_denormal(b, fpst); + + a = float32_chs(a); + if ((float32_is_infinity(a) && float32_is_zero(b)) || + (float32_is_infinity(b) && float32_is_zero(a))) { + return float32_one_point_five; + } + return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst); +} + +float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float64_squash_input_denormal(a, fpst); + b = float64_squash_input_denormal(b, fpst); + + a = float64_chs(a); + if ((float64_is_infinity(a) && float64_is_zero(b)) || + (float64_is_infinity(b) && float64_is_zero(a))) { + return float64_one_point_five; + } + return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst); +} + +/* Pairwise long add: add pairs of adjacent elements into + * double-width elements in the result (eg _s8 is an 8x8->16 op) + */ +uint64_t HELPER(neon_addlp_s8)(uint64_t a) +{ + uint64_t nsignmask = 0x0080008000800080ULL; + uint64_t wsignmask = 0x8000800080008000ULL; + uint64_t elementmask = 0x00ff00ff00ff00ffULL; + uint64_t tmp1, tmp2; + uint64_t res, signres; + + /* Extract odd elements, sign extend each to a 16 bit field */ + tmp1 = a & elementmask; + tmp1 ^= nsignmask; + tmp1 |= wsignmask; + tmp1 = (tmp1 - nsignmask) ^ wsignmask; + /* Ditto for the even elements */ + tmp2 = (a >> 8) & elementmask; + tmp2 ^= nsignmask; + tmp2 |= wsignmask; + tmp2 = (tmp2 - nsignmask) ^ wsignmask; + + /* calculate the result by summing bits 0..14, 16..22, etc, + * and then adjusting the sign bits 15, 23, etc manually. + * This ensures the addition can't overflow the 16 bit field. + */ + signres = (tmp1 ^ tmp2) & wsignmask; + res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask); + res ^= signres; + + return res; +} + +uint64_t HELPER(neon_addlp_u8)(uint64_t a) +{ + uint64_t tmp; + + tmp = a & 0x00ff00ff00ff00ffULL; + tmp += (a >> 8) & 0x00ff00ff00ff00ffULL; + return tmp; +} + +uint64_t HELPER(neon_addlp_s16)(uint64_t a) +{ + int32_t reslo, reshi; + + reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16); + reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48); + + return (uint32_t)reslo | (((uint64_t)reshi) << 32); +} + +uint64_t HELPER(neon_addlp_u16)(uint64_t a) +{ + uint64_t tmp; + + tmp = a & 0x0000ffff0000ffffULL; + tmp += (a >> 16) & 0x0000ffff0000ffffULL; + return tmp; +} + +/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */ +uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp) +{ + float_status *fpst = fpstp; + uint16_t val16, sbit; + int16_t exp; + + if (float16_is_any_nan(a)) { + float16 nan = a; + if (float16_is_signaling_nan(a, fpst)) { + float_raise(float_flag_invalid, fpst); + nan = float16_silence_nan(a, fpst); + } + if (fpst->default_nan_mode) { + nan = float16_default_nan(fpst); + } + return nan; + } + + a = float16_squash_input_denormal(a, fpst); + + val16 = float16_val(a); + sbit = 0x8000 & val16; + exp = extract32(val16, 10, 5); + + if (exp == 0) { + return make_float16(deposit32(sbit, 10, 5, 0x1e)); + } else { + return make_float16(deposit32(sbit, 10, 5, ~exp)); + } +} + +float32 HELPER(frecpx_f32)(float32 a, void *fpstp) +{ + float_status *fpst = fpstp; + uint32_t val32, sbit; + int32_t exp; + + if (float32_is_any_nan(a)) { + float32 nan = a; + if (float32_is_signaling_nan(a, fpst)) { + float_raise(float_flag_invalid, fpst); + nan = float32_silence_nan(a, fpst); + } + if (fpst->default_nan_mode) { + nan = float32_default_nan(fpst); + } + return nan; + } + + a = float32_squash_input_denormal(a, fpst); + + val32 = float32_val(a); + sbit = 0x80000000ULL & val32; + exp = extract32(val32, 23, 8); + + if (exp == 0) { + return make_float32(sbit | (0xfe << 23)); + } else { + return make_float32(sbit | (~exp & 0xff) << 23); + } +} + +float64 HELPER(frecpx_f64)(float64 a, void *fpstp) +{ + float_status *fpst = fpstp; + uint64_t val64, sbit; + int64_t exp; + + if (float64_is_any_nan(a)) { + float64 nan = a; + if (float64_is_signaling_nan(a, fpst)) { + float_raise(float_flag_invalid, fpst); + nan = float64_silence_nan(a, fpst); + } + if (fpst->default_nan_mode) { + nan = float64_default_nan(fpst); + } + return nan; + } + + a = float64_squash_input_denormal(a, fpst); + + val64 = float64_val(a); + sbit = 0x8000000000000000ULL & val64; + exp = extract64(float64_val(a), 52, 11); + + if (exp == 0) { + return make_float64(sbit | (0x7feULL << 52)); + } else { + return make_float64(sbit | (~exp & 0x7ffULL) << 52); + } +} + +float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env) +{ + /* Von Neumann rounding is implemented by using round-to-zero + * and then setting the LSB of the result if Inexact was raised. + */ + float32 r; + float_status *fpst = &env->vfp.fp_status; + float_status tstat = *fpst; + int exflags; + + set_float_rounding_mode(float_round_to_zero, &tstat); + set_float_exception_flags(0, &tstat); + r = float64_to_float32(a, &tstat); + exflags = get_float_exception_flags(&tstat); + if (exflags & float_flag_inexact) { + r = make_float32(float32_val(r) | 1); + } + exflags |= get_float_exception_flags(fpst); + set_float_exception_flags(exflags, fpst); + return r; +} + +/* 64-bit versions of the CRC helpers. Note that although the operation + * (and the prototypes of crc32c() and crc32() mean that only the bottom + * 32 bits of the accumulator and result are used, we pass and return + * uint64_t for convenience of the generated code. Unlike the 32-bit + * instruction set versions, val may genuinely have 64 bits of data in it. + * The upper bytes of val (above the number specified by 'bytes') must have + * been zeroed out by the caller. + */ +uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes) +{ + uint8_t buf[8]; + + stq_le_p(buf, val); + + return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; +} + +uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes) +{ + uint8_t buf[8]; + + stq_le_p(buf, val); + + /* Linux crc32c converts the output to one's complement. */ + return crc32c(acc, buf, bytes) ^ 0xffffffff; +} + +uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, + uint64_t new_lo, uint64_t new_hi) +{ + Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high); + Int128 newv = int128_make128(new_lo, new_hi); + Int128 oldv; + uintptr_t ra = GETPC(); + uint64_t o0, o1; + bool success; + int mem_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); + + o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); + o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); + oldv = int128_make128(o0, o1); + + success = int128_eq(oldv, cmpv); + if (success) { + helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); + helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); + } + + return !success; +} + +uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr, + uint64_t new_lo, uint64_t new_hi) +{ + Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); + bool success; + int mem_idx; + TCGMemOpIdx oi; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + + cmpv = int128_make128(env->exclusive_val, env->exclusive_high); + newv = int128_make128(new_lo, new_hi); + oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); + + success = int128_eq(oldv, cmpv); + return !success; +} + +uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, + uint64_t new_lo, uint64_t new_hi) +{ + /* + * High and low need to be switched here because this is not actually a + * 128bit store but two doublewords stored consecutively + */ + Int128 cmpv = int128_make128(env->exclusive_high, env->exclusive_val); + Int128 newv = int128_make128(new_hi, new_lo); + Int128 oldv; + uintptr_t ra = GETPC(); + uint64_t o0, o1; + bool success; + int mem_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); + + o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); + o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); + oldv = int128_make128(o0, o1); + + success = int128_eq(oldv, cmpv); + if (success) { + helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); + helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); + } + + return !success; +} + +uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr, + uint64_t new_lo, uint64_t new_hi) +{ + Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); + bool success; + int mem_idx; + TCGMemOpIdx oi; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + + /* + * High and low need to be switched here because this is not actually a + * 128bit store but two doublewords stored consecutively + */ + cmpv = int128_make128(env->exclusive_high, env->exclusive_val); + newv = int128_make128(new_hi, new_lo); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); + + success = int128_eq(oldv, cmpv); + return !success; +} + +/* Writes back the old data into Rs. */ +void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, + uint64_t new_lo, uint64_t new_hi) +{ + Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); + int mem_idx; + TCGMemOpIdx oi; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + + cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); + newv = int128_make128(new_lo, new_hi); + oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); + + env->xregs[rs] = int128_getlo(oldv); + env->xregs[rs + 1] = int128_gethi(oldv); +} + +void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, + uint64_t new_hi, uint64_t new_lo) +{ + Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); + int mem_idx; + TCGMemOpIdx oi; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + + cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); + newv = int128_make128(new_lo, new_hi); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); + + env->xregs[rs + 1] = int128_getlo(oldv); + env->xregs[rs] = int128_gethi(oldv); +} + +/* + * AdvSIMD half-precision + */ + +#define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix)) + +#define ADVSIMD_HALFOP(name) \ +uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return float16_ ## name(a, b, fpst); \ +} + +ADVSIMD_HALFOP(add) +ADVSIMD_HALFOP(sub) +ADVSIMD_HALFOP(mul) +ADVSIMD_HALFOP(div) +ADVSIMD_HALFOP(min) +ADVSIMD_HALFOP(max) +ADVSIMD_HALFOP(minnum) +ADVSIMD_HALFOP(maxnum) + +#define ADVSIMD_TWOHALFOP(name) \ +uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \ +{ \ + float16 a1, a2, b1, b2; \ + uint32_t r1, r2; \ + float_status *fpst = fpstp; \ + a1 = extract32(two_a, 0, 16); \ + a2 = extract32(two_a, 16, 16); \ + b1 = extract32(two_b, 0, 16); \ + b2 = extract32(two_b, 16, 16); \ + r1 = float16_ ## name(a1, b1, fpst); \ + r2 = float16_ ## name(a2, b2, fpst); \ + return deposit32(r1, 16, 16, r2); \ +} + +ADVSIMD_TWOHALFOP(add) +ADVSIMD_TWOHALFOP(sub) +ADVSIMD_TWOHALFOP(mul) +ADVSIMD_TWOHALFOP(div) +ADVSIMD_TWOHALFOP(min) +ADVSIMD_TWOHALFOP(max) +ADVSIMD_TWOHALFOP(minnum) +ADVSIMD_TWOHALFOP(maxnum) + +/* Data processing - scalar floating-point and advanced SIMD */ +static float16 float16_mulx(float16 a, float16 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float16_squash_input_denormal(a, fpst); + b = float16_squash_input_denormal(b, fpst); + + if ((float16_is_zero(a) && float16_is_infinity(b)) || + (float16_is_infinity(a) && float16_is_zero(b))) { + /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ + return make_float16((1U << 14) | + ((float16_val(a) ^ float16_val(b)) & (1U << 15))); + } + return float16_mul(a, b, fpst); +} + +ADVSIMD_HALFOP(mulx) +ADVSIMD_TWOHALFOP(mulx) + +/* fused multiply-accumulate */ +uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c, + void *fpstp) +{ + float_status *fpst = fpstp; + return float16_muladd(a, b, c, 0, fpst); +} + +uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b, + uint32_t two_c, void *fpstp) +{ + float_status *fpst = fpstp; + float16 a1, a2, b1, b2, c1, c2; + uint32_t r1, r2; + a1 = extract32(two_a, 0, 16); + a2 = extract32(two_a, 16, 16); + b1 = extract32(two_b, 0, 16); + b2 = extract32(two_b, 16, 16); + c1 = extract32(two_c, 0, 16); + c2 = extract32(two_c, 16, 16); + r1 = float16_muladd(a1, b1, c1, 0, fpst); + r2 = float16_muladd(a2, b2, c2, 0, fpst); + return deposit32(r1, 16, 16, r2); +} + +/* + * Floating point comparisons produce an integer result. Softfloat + * routines return float_relation types which we convert to the 0/-1 + * Neon requires. + */ + +#define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0 + +uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + int compare = float16_compare_quiet(a, b, fpst); + return ADVSIMD_CMPRES(compare == float_relation_equal); +} + +uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + int compare = float16_compare(a, b, fpst); + return ADVSIMD_CMPRES(compare == float_relation_greater || + compare == float_relation_equal); +} + +uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + int compare = float16_compare(a, b, fpst); + return ADVSIMD_CMPRES(compare == float_relation_greater); +} + +uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float16 f0 = float16_abs(a); + float16 f1 = float16_abs(b); + int compare = float16_compare(f0, f1, fpst); + return ADVSIMD_CMPRES(compare == float_relation_greater || + compare == float_relation_equal); +} + +uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float16 f0 = float16_abs(a); + float16 f1 = float16_abs(b); + int compare = float16_compare(f0, f1, fpst); + return ADVSIMD_CMPRES(compare == float_relation_greater); +} + +/* round to integral */ +uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status) +{ + return float16_round_to_int(x, fp_status); +} + +uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float16 ret; + + ret = float16_round_to_int(x, fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +/* + * Half-precision floating point conversion functions + * + * There are a multitude of conversion functions with various + * different rounding modes. This is dealt with by the calling code + * setting the mode appropriately before calling the helper. + */ + +uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp) +{ + float_status *fpst = fpstp; + + /* Invalid if we are passed a NaN */ + if (float16_is_any_nan(a)) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_int16(a, fpst); +} + +uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp) +{ + float_status *fpst = fpstp; + + /* Invalid if we are passed a NaN */ + if (float16_is_any_nan(a)) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_uint16(a, fpst); +} + +static int el_from_spsr(uint32_t spsr) +{ + /* Return the exception level that this SPSR is requesting a return to, + * or -1 if it is invalid (an illegal return) + */ + if (spsr & PSTATE_nRW) { + switch (spsr & CPSR_M) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_HYP: + return 2; + case ARM_CPU_MODE_FIQ: + case ARM_CPU_MODE_IRQ: + case ARM_CPU_MODE_SVC: + case ARM_CPU_MODE_ABT: + case ARM_CPU_MODE_UND: + case ARM_CPU_MODE_SYS: + return 1; + case ARM_CPU_MODE_MON: + /* Returning to Mon from AArch64 is never possible, + * so this is an illegal return. + */ + default: + return -1; + } + } else { + if (extract32(spsr, 1, 1)) { + /* Return with reserved M[1] bit set */ + return -1; + } + if (extract32(spsr, 0, 4) == 1) { + /* return to EL0 with M[0] bit set */ + return -1; + } + return extract32(spsr, 2, 2); + } +} + +void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) +{ + int cur_el = arm_current_el(env); + unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); + uint32_t mask, spsr = env->banked_spsr[spsr_idx]; + int new_el; + bool return_to_aa64 = (spsr & PSTATE_nRW) == 0; + + aarch64_save_sp(env, cur_el); + + arm_clear_exclusive(env); + + /* We must squash the PSTATE.SS bit to zero unless both of the + * following hold: + * 1. debug exceptions are currently disabled + * 2. singlestep will be active in the EL we return to + * We check 1 here and 2 after we've done the pstate/cpsr write() to + * transition to the EL we're going to. + */ + if (arm_generate_debug_exceptions(env)) { + spsr &= ~PSTATE_SS; + } + + new_el = el_from_spsr(spsr); + if (new_el == -1) { + goto illegal_return; + } + if (new_el > cur_el + || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { + /* Disallow return to an EL which is unimplemented or higher + * than the current one. + */ + goto illegal_return; + } + + if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) { + /* Return to an EL which is configured for a different register width */ + goto illegal_return; + } + + if (new_el == 2 && arm_is_secure_below_el3(env)) { + /* Return to the non-existent secure-EL2 */ + goto illegal_return; + } + + if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { + goto illegal_return; + } + + arm_call_pre_el_change_hook(env_archcpu(env)); + + if (!return_to_aa64) { + env->aarch64 = 0; + /* We do a raw CPSR write because aarch64_sync_64_to_32() + * will sort the register banks out for us, and we've already + * caught all the bad-mode cases in el_from_spsr(). + */ + mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); + cpsr_write(env, spsr, mask, CPSRWriteRaw); + if (!arm_singlestep_active(env)) { + env->uncached_cpsr &= ~PSTATE_SS; + } + aarch64_sync_64_to_32(env); + + if (spsr & CPSR_T) { + env->regs[15] = new_pc & ~0x1; + } else { + env->regs[15] = new_pc & ~0x3; + } + helper_rebuild_hflags_a32(env, new_el); + qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to " + "AArch32 EL%d PC 0x%" PRIx32 "\n", + cur_el, new_el, env->regs[15]); + } else { + int tbii; + + env->aarch64 = 1; + spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar); + pstate_write(env, spsr); + if (!arm_singlestep_active(env)) { + env->pstate &= ~PSTATE_SS; + } + aarch64_restore_sp(env, new_el); + helper_rebuild_hflags_a64(env, new_el); + + /* + * Apply TBI to the exception return address. We had to delay this + * until after we selected the new EL, so that we could select the + * correct TBI+TBID bits. This is made easier by waiting until after + * the hflags rebuild, since we can pull the composite TBII field + * from there. + */ + tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII); + if ((tbii >> extract64(new_pc, 55, 1)) & 1) { + /* TBI is enabled. */ + int core_mmu_idx = cpu_mmu_index(env, false); + if (regime_has_2_ranges(core_to_aa64_mmu_idx(core_mmu_idx))) { + new_pc = sextract64(new_pc, 0, 56); + } else { + new_pc = extract64(new_pc, 0, 56); + } + } + env->pc = new_pc; + + qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to " + "AArch64 EL%d PC 0x%" PRIx64 "\n", + cur_el, new_el, env->pc); + } + + /* + * Note that cur_el can never be 0. If new_el is 0, then + * el0_a64 is return_to_aa64, else el0_a64 is ignored. + */ + aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); + + arm_call_el_change_hook(env_archcpu(env)); + + return; + +illegal_return: + /* Illegal return events of various kinds have architecturally + * mandated behaviour: + * restore NZCV and DAIF from SPSR_ELx + * set PSTATE.IL + * restore PC from ELR_ELx + * no change to exception level, execution state or stack pointer + */ + env->pstate |= PSTATE_IL; + env->pc = new_pc; + spsr &= PSTATE_NZCV | PSTATE_DAIF; + spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); + pstate_write(env, spsr); + if (!arm_singlestep_active(env)) { + env->pstate &= ~PSTATE_SS; + } + qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: " + "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc); +} + +/* + * Square Root and Reciprocal square root + */ + +uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp) +{ + float_status *s = fpstp; + + return float16_sqrt(a, s); +} + +void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) +{ + /* + * Implement DC ZVA, which zeroes a fixed-length block of memory. + * Note that we do not implement the (architecturally mandated) + * alignment fault for attempts to use this on Device memory + * (which matches the usual QEMU behaviour of not implementing either + * alignment faults or any memory attribute handling). + */ + + struct uc_struct *uc = env->uc; + ARMCPU *cpu = env_archcpu(env); + uint64_t blocklen = 4 << cpu->dcz_blocksize; + uint64_t vaddr = vaddr_in & ~(blocklen - 1); + /* + * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than + * the block size so we might have to do more than one TLB lookup. + * We know that in fact for any v8 CPU the page size is at least 4K + * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only + * 1K as an artefact of legacy v5 subpage support being present in the + * same QEMU executable. So in practice the hostaddr[] array has + * two entries, given the current setting of TARGET_PAGE_BITS_MIN. + */ + int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); + void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)]; + int try, i; + unsigned mmu_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + + assert(maxidx <= ARRAY_SIZE(hostaddr)); + + for (try = 0; try < 2; try++) { + + for (i = 0; i < maxidx; i++) { + hostaddr[i] = tlb_vaddr_to_host(env, + vaddr + TARGET_PAGE_SIZE * i, + 1, mmu_idx); + if (!hostaddr[i]) { + break; + } + } + if (i == maxidx) { + /* + * If it's all in the TLB it's fair game for just writing to; + * we know we don't need to update dirty status, etc. + */ + for (i = 0; i < maxidx - 1; i++) { + memset(hostaddr[i], 0, TARGET_PAGE_SIZE); + } + memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); + return; + } + /* + * OK, try a store and see if we can populate the tlb. This + * might cause an exception if the memory isn't writable, + * in which case we will longjmp out of here. We must for + * this purpose use the actual register value passed to us + * so that we get the fault address right. + */ + helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); + /* Now we can populate the other TLB entries, if any */ + for (i = 0; i < maxidx; i++) { + uint64_t va = vaddr + TARGET_PAGE_SIZE * i; + if (va != (vaddr_in & TARGET_PAGE_MASK)) { + helper_ret_stb_mmu(env, va, 0, oi, GETPC()); + } + } + } + + /* + * Slow path (probably attempt to do this to an I/O device or + * similar, or clearing of a block of code we have translations + * cached for). Just do a series of byte writes as the architecture + * demands. It's not worth trying to use a cpu_physical_memory_map(), + * memset(), unmap() sequence here because: + * + we'd need to account for the blocksize being larger than a page + * + the direct-RAM access case is almost always going to be dealt + * with in the fastpath code above, so there's no speed benefit + * + we would have to deal with the map returning NULL because the + * bounce buffer was in use + */ + for (i = 0; i < blocklen; i++) { + helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); + } +} diff --git a/qemu/target/arm/helper-a64.h b/qemu/target/arm/helper-a64.h new file mode 100644 index 00000000..3df7c185 --- /dev/null +++ b/qemu/target/arm/helper-a64.h @@ -0,0 +1,105 @@ +/* + * AArch64 specific helper definitions + * + * Copyright (c) 2013 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_2(msr_i_spsel, void, env, i32) +DEF_HELPER_2(msr_i_daifset, void, env, i32) +DEF_HELPER_2(msr_i_daifclear, void, env, i32) +DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, ptr) +DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, ptr) +DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr) +DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr) +DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr) +DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr) +DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32) +DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr) +DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr) +DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) +DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) +DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) +DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr) +DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) +DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) +DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr) +DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) +DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) +DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, ptr) +DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env) +DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) +DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) +DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG, + i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG, + i64, env, i64, i64, i64) +DEF_HELPER_5(casp_le_parallel, void, env, i32, i64, i64, i64) +DEF_HELPER_5(casp_be_parallel, void, env, i32, i64, i64, i64) +DEF_HELPER_FLAGS_3(advsimd_maxh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) +DEF_HELPER_FLAGS_3(advsimd_minh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) +DEF_HELPER_FLAGS_3(advsimd_maxnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) +DEF_HELPER_FLAGS_3(advsimd_minnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) +DEF_HELPER_3(advsimd_addh, f16, f16, f16, ptr) +DEF_HELPER_3(advsimd_subh, f16, f16, f16, ptr) +DEF_HELPER_3(advsimd_mulh, f16, f16, f16, ptr) +DEF_HELPER_3(advsimd_divh, f16, f16, f16, ptr) +DEF_HELPER_3(advsimd_ceq_f16, i32, f16, f16, ptr) +DEF_HELPER_3(advsimd_cge_f16, i32, f16, f16, ptr) +DEF_HELPER_3(advsimd_cgt_f16, i32, f16, f16, ptr) +DEF_HELPER_3(advsimd_acge_f16, i32, f16, f16, ptr) +DEF_HELPER_3(advsimd_acgt_f16, i32, f16, f16, ptr) +DEF_HELPER_3(advsimd_mulxh, f16, f16, f16, ptr) +DEF_HELPER_4(advsimd_muladdh, f16, f16, f16, f16, ptr) +DEF_HELPER_3(advsimd_add2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_sub2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_mul2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_div2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_max2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_min2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_maxnum2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_minnum2h, i32, i32, i32, ptr) +DEF_HELPER_3(advsimd_mulx2h, i32, i32, i32, ptr) +DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, ptr) +DEF_HELPER_2(advsimd_rinth_exact, f16, f16, ptr) +DEF_HELPER_2(advsimd_rinth, f16, f16, ptr) +DEF_HELPER_2(advsimd_f16tosinth, i32, f16, ptr) +DEF_HELPER_2(advsimd_f16touinth, i32, f16, ptr) +DEF_HELPER_2(sqrt_f16, f16, f16, ptr) + +DEF_HELPER_2(exception_return, void, env, i64) +DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64) + +DEF_HELPER_FLAGS_3(pacia, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(pacib, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(pacda, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(pacdb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(pacga, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(autia, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(autib, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64) +DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64) diff --git a/qemu/target/arm/helper-sve.h b/qemu/target/arm/helper-sve.h new file mode 100644 index 00000000..2f472791 --- /dev/null +++ b/qemu/target/arm/helper-sve.h @@ -0,0 +1,1578 @@ +/* + * AArch64 SVE specific helper definitions + * + * Copyright (c) 2018 Linaro, Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +DEF_HELPER_FLAGS_2(sve_predtest1, TCG_CALL_NO_WG, i32, i64, i64) +DEF_HELPER_FLAGS_3(sve_predtest, TCG_CALL_NO_WG, i32, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_pfirst, TCG_CALL_NO_WG, i32, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_pnext, TCG_CALL_NO_WG, i32, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_and_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_and_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_and_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_and_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_eor_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_eor_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_eor_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_eor_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_orr_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_orr_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_orr_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_orr_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_bic_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_bic_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_bic_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_bic_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_add_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_add_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_add_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_add_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_sub_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sub_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sub_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sub_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_smax_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smax_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smax_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smax_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_umax_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umax_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umax_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umax_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_smin_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smin_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smin_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smin_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_umin_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umin_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umin_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umin_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_sabd_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sabd_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sabd_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sabd_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_uabd_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_uabd_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_uabd_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_uabd_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_mul_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_mul_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_mul_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_mul_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_smulh_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smulh_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smulh_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_smulh_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_umulh_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umulh_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umulh_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_umulh_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_udiv_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_udiv_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_asr_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_asr_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_asr_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_asr_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_lsr_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsr_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsr_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsr_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_lsl_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsl_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsl_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsl_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_sel_zpzz_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sel_zpzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_asr_zpzw_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_lsr_zpzw_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsr_zpzw_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsr_zpzw_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_lsl_zpzw_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsl_zpzw_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_lsl_zpzw_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_orv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_orv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_orv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_orv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_eorv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_eorv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_eorv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_eorv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_andv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_andv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_andv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_andv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_saddv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_saddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_saddv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_uaddv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uaddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uaddv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uaddv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_smaxv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_smaxv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_smaxv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_smaxv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_umaxv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_umaxv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_umaxv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_umaxv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_sminv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_sminv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_sminv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_sminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_uminv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uminv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uminv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_clr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_clr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_clr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_clr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_movz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_movz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_movz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_movz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_asr_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asr_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asr_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asr_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_lsr_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsr_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsr_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsr_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_lsl_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsl_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsl_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsl_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_asrd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asrd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asrd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asrd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_cls_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cls_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_clz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_clz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_clz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_clz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_cnt_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cnt_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cnt_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cnt_zpz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_cnot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cnot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cnot_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cnot_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_not_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_not_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_not_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_not_zpz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_sxtb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_sxtb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_sxtb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_uxtb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uxtb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uxtb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_sxth_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_sxth_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_uxth_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uxth_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_sxtw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uxtw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_abs_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_abs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_abs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_abs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_neg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_neg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_neg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_neg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_mla_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_mla_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_mla_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_mla_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_mls_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_mls_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_mls_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_mls_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_index_b, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32) +DEF_HELPER_FLAGS_4(sve_index_h, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32) +DEF_HELPER_FLAGS_4(sve_index_s, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32) +DEF_HELPER_FLAGS_4(sve_index_d, TCG_CALL_NO_RWG, void, ptr, i64, i64, i32) + +DEF_HELPER_FLAGS_4(sve_asr_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asr_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_asr_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_lsr_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsr_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsr_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_lsl_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsl_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_lsl_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_adr_p32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_adr_p64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_adr_s32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_adr_u32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_fexpa_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fexpa_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fexpa_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_ftssel_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ftssel_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ftssel_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_sqaddi_b, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) +DEF_HELPER_FLAGS_4(sve_sqaddi_h, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) +DEF_HELPER_FLAGS_4(sve_sqaddi_s, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32) +DEF_HELPER_FLAGS_4(sve_sqaddi_d, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32) + +DEF_HELPER_FLAGS_4(sve_uqaddi_b, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) +DEF_HELPER_FLAGS_4(sve_uqaddi_h, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32) +DEF_HELPER_FLAGS_4(sve_uqaddi_s, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32) +DEF_HELPER_FLAGS_4(sve_uqaddi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_uqsubi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_5(sve_cpy_m_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_5(sve_cpy_m_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_5(sve_cpy_m_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_5(sve_cpy_m_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_cpy_z_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_cpy_z_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_cpy_z_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_cpy_z_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_ext, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_insr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_insr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_insr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_insr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_3(sve_rev_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_rev_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_rev_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_rev_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_uunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_uunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_zip_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uzp_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_trn_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_rev_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sve_punpk_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_zip_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_zip_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_trn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_compact_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_compact_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_2(sve_last_active_element, TCG_CALL_NO_RWG, s32, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_revb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_revb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_revb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_revh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_rbit_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_splice, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_d, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_d, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_d, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_d, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_d, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_d, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmple_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_b, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmple_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_h, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmple_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_s, TCG_CALL_NO_RWG, + i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmple_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmple_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmple_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmple_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_and_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_bic_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_eor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sel_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_orr_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_orn_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_nor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_nand_pppp, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_brkpa, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_brkpb, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_brkpas, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_brkpbs, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_brka_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_brkb_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_brka_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_brkb_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_brkas_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_brkbs_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_brkas_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_brkbs_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_brkn, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_while, TCG_CALL_NO_RWG, i32, ptr, i32, i32) + +DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_subri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_subri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_smaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_smini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_smini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_umaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(sve_umini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(sve_umini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_5(gvec_recps_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_recps_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_recps_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_rsqrts_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_faddv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_faddv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_faddv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_fmaxnmv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fmaxnmv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fmaxnmv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_fminnmv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fminnmv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fminnmv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_fmaxv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fmaxv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fmaxv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_fminv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fminv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_fminv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fadda_h, TCG_CALL_NO_RWG, + i64, i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fadda_s, TCG_CALL_NO_RWG, + i64, i64, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fadda_d, TCG_CALL_NO_RWG, + i64, i64, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcmge0_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmge0_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmge0_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcmgt0_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmgt0_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmgt0_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcmlt0_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmlt0_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmlt0_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcmle0_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmle0_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmle0_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcmeq0_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmeq0_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmeq0_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcmne0_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmne0_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcmne0_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fsub_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fsub_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fsub_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmul_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmul_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmul_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fdiv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fdiv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fdiv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmin_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmin_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmin_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmax_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmax_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fminnum_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fminnum_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmaxnum_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmaxnum_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmaxnum_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fabd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fabd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fabd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fscalbn_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fscalbn_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fscalbn_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmulx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmulx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmulx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fadds_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fadds_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fadds_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fsubs_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fsubs_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fsubs_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmuls_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmuls_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmuls_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fsubrs_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fsubrs_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fsubrs_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmaxnms_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmaxnms_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmaxnms_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fminnms_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fminnms_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fminnms_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmaxs_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmaxs_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmaxs_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fmins_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmins_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fmins_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcvt_sh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvt_dh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvt_hs, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvt_ds, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvt_hd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvt_sd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcvtzs_hh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzs_hs, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzs_ss, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzs_ds, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzs_hd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzs_sd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzs_dd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fcvtzu_hh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzu_hs, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzu_ss, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzu_ds, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzu_hd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzu_sd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fcvtzu_dd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_frint_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_frint_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_frint_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_frintx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_frintx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_frintx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_frecpx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_frecpx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_frecpx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_fsqrt_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fsqrt_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_fsqrt_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_scvt_hh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_scvt_sh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_scvt_dh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_scvt_ss, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_scvt_sd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_scvt_ds, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_scvt_dd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_ucvt_hh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ucvt_sh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ucvt_dh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ucvt_ss, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ucvt_sd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ucvt_ds, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ucvt_dd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fcmge_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmge_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmge_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fcmgt_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmgt_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmgt_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fcmeq_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmeq_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmeq_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fcmne_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmne_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmne_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fcmuo_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmuo_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcmuo_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_facge_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_facge_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_facge_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_facgt_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_facgt_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_facgt_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve_fcadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sve_fcadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_3(sve_fcmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fcmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32) +DEF_HELPER_FLAGS_3(sve_fcmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32) + +DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldss_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldss_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldss_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldss_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhss_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhss_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_lddd_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_lddd_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_lddd_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_lddd_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhds_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhds_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsds_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsds_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_lddd_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_lddd_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhds_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldhds_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffss_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffss_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sths_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sths_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stss_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stss_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sths_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sths_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stss_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stss_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sthd_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sthd_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stsd_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stsd_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stdd_le_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stdd_be_zsu, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sthd_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sthd_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stsd_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stsd_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stdd_le_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stdd_be_zss, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sthd_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_sthd_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stsd_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stsd_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) diff --git a/qemu/target/arm/helper.c b/qemu/target/arm/helper.c new file mode 100644 index 00000000..9789455b --- /dev/null +++ b/qemu/target/arm/helper.c @@ -0,0 +1,11968 @@ +/* + * ARM generic helpers. + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "cpu.h" +#include "internals.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" +#include "qemu/bitops.h" +#include "qemu/crc32c.h" +#include "exec/exec-all.h" +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "qemu/range.h" +#include "qemu/guest-random.h" +#include "arm_ldst.h" +#include "exec/cpu_ldst.h" +#include "kvm-consts.h" + +#ifdef TARGET_AARCH64 +#include +#endif + +#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ + +static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, + target_ulong *page_size_ptr, + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); + +static void switch_mode(CPUARMState *env, int mode); + +static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + assert(ri->fieldoffset); + if (cpreg_field_is_64bit(ri)) { + return CPREG_FIELD64(env, ri); + } else { + return CPREG_FIELD32(env, ri); + } +} + +static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + assert(ri->fieldoffset); + if (cpreg_field_is_64bit(ri)) { + CPREG_FIELD64(env, ri) = value; + } else { + CPREG_FIELD32(env, ri) = value; + } +} + +static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return (char *)env + ri->fieldoffset; +} + +uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Raw read of a coprocessor register (as needed for migration, etc). */ + if (ri->type & ARM_CP_CONST) { + return ri->resetvalue; + } else if (ri->raw_readfn) { + return ri->raw_readfn(env, ri); + } else if (ri->readfn) { + return ri->readfn(env, ri); + } else { + return raw_read(env, ri); + } +} + +/* + * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but + * they are accessible when EL3 is using AArch64 regardless of EL3.NS. + * + * access_el3_aa32ns: Used to check AArch32 register views. + * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. + */ +static CPAccessResult access_el3_aa32ns(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + bool secure = arm_is_secure_below_el3(env); + + assert(!arm_el_is_aa64(env, 3)); + if (secure) { + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (!arm_el_is_aa64(env, 3)) { + return access_el3_aa32ns(env, ri, isread); + } + return CP_ACCESS_OK; +} + +/* Some secure-only AArch32 registers trap to EL3 if used from + * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). + * Note that an access from Secure EL1 can only happen if EL3 is AArch64. + * We assume that the .access field is set to PL1_RW. + */ +static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 3) { + return CP_ACCESS_OK; + } + if (arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL3; + } + /* This will be EL1 NS and EL2 NS, which just UNDEF */ + return CP_ACCESS_TRAP_UNCATEGORIZED; +} + +/* Check for traps to "powerdown debug" registers, which are controlled + * by MDCR.TDOSA + */ +static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || + (env->cp15.mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* Check for traps to "debug ROM" registers, which are controlled + * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. + */ +static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || + (env->cp15.mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* Check for traps to general debug registers, which are controlled + * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. + */ +static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || + (env->cp15.mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* Check for traps to performance monitor registers, which are controlled + * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. + */ +static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + + if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) + && !arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ +static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + uint64_t trap = isread ? HCR_TRVM : HCR_TVM; + if (arm_hcr_el2_eff(env) & trap) { + return CP_ACCESS_TRAP_EL2; + } + } + return CP_ACCESS_OK; +} + +/* Check for traps from EL1 due to HCR_EL2.TSW. */ +static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +/* Check for traps from EL1 due to HCR_EL2.TACR. */ +static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +/* Check for traps from EL1 due to HCR_EL2.TTLB. */ +static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + raw_write(env, ri, value); + tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ +} + +static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + if (raw_read(env, ri) != value) { + /* Unlike real hardware the qemu TLB uses virtual addresses, + * not modified virtual addresses, so this causes a TLB flush. + */ + tlb_flush(CPU(cpu)); + raw_write(env, ri, value); + } +} + +static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) + && !extended_addresses_enabled(env)) { + /* For VMSA (when not using the LPAE long descriptor page table + * format) this register includes the ASID, so do a TLB flush. + * For PMSA it is purely a process ID and no action is needed. + */ + tlb_flush(CPU(cpu)); + } + raw_write(env, ri, value); +} + +/* IS variants of TLB operations must affect all cores */ +static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_all_cpus_synced(cs); +} + +static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_all_cpus_synced(cs); +} + +static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + struct uc_struct *uc = env->uc; + CPUState *cs = env_cpu(env); + + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); +} + +static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + struct uc_struct *uc = env->uc; + CPUState *cs = env_cpu(env); + + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); +} + +/* + * Non-IS variants of TLB operations are upgraded to + * IS versions if we are at NS EL1 and HCR_EL2.FB is set to + * force broadcast of these operations. + */ +static bool tlb_force_broadcast(CPUARMState *env) +{ + return (env->cp15.hcr_el2 & HCR_FB) && + arm_current_el(env) == 1 && arm_is_secure_below_el3(env); +} + +static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate all (TLBIALL) */ + CPUState *cs = env_cpu(env); + + if (tlb_force_broadcast(env)) { + tlb_flush_all_cpus_synced(cs); + } else { + tlb_flush(cs); + } +} + +static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + struct uc_struct *uc = env->uc; + /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ + CPUState *cs = env_cpu(env); + + value &= TARGET_PAGE_MASK; + if (tlb_force_broadcast(env)) { + tlb_flush_page_all_cpus_synced(cs, value); + } else { + tlb_flush_page(cs, value); + } +} + +static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by ASID (TLBIASID) */ + CPUState *cs = env_cpu(env); + + if (tlb_force_broadcast(env)) { + tlb_flush_all_cpus_synced(cs); + } else { + tlb_flush(cs); + } +} + +static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + struct uc_struct *uc = env->uc; + /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ + CPUState *cs = env_cpu(env); + + value &= TARGET_PAGE_MASK; + if (tlb_force_broadcast(env)) { + tlb_flush_page_all_cpus_synced(cs, value); + } else { + tlb_flush_page(cs, value); + } +} + +static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx(cs, + ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_Stage2); +} + +static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, + ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_Stage2); +} + +static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by IPA. This has to invalidate any structures that + * contain only stage 2 translation information, but does not need + * to apply to structures that contain combined stage 1 and stage 2 + * translation information. + * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. + */ + CPUState *cs = env_cpu(env); + uint64_t pageaddr; + + if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { + return; + } + + pageaddr = sextract64(value << 12, 0, 40); + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); +} + +static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr; + + if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { + return; + } + + pageaddr = sextract64(value << 12, 0, 40); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_Stage2); +} + +static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); +} + +static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); +} + +static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); +} + +static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_E2); +} + +static const ARMCPRegInfo cp_reginfo[] = { + /* Define the secure and non-secure FCSE identifier CP registers + * separately because there is no secure bank in V8 (no _EL3). This allows + * the secure register to be properly reset and migrated. There is also no + * v8 EL1 version of the register so the non-secure instance stands alone. + */ + { .name = "FCSEIDR", + .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, + .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, + .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), + .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, + { .name = "FCSEIDR_S", + .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, + .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, + .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), + .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, + /* Define the secure and non-secure context identifier CP registers + * separately because there is no secure bank in V8 (no _EL3). This allows + * the secure register to be properly reset and migrated. In the + * non-secure case, the 32-bit register will have reset and migration + * disabled during registration as it is handled by the 64-bit instance. + */ + { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .secure = ARM_CP_SECSTATE_NS, + .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), + .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, + { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .secure = ARM_CP_SECSTATE_S, + .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), + .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v8_cp_reginfo[] = { + /* NB: Some of these registers exist in v8 but with more precise + * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). + */ + /* MMU Domain access control / MPU write buffer control */ + { .name = "DACR", + .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, + .writefn = dacr_write, .raw_writefn = raw_write, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), + offsetoflow32(CPUARMState, cp15.dacr_ns) } }, + /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. + * For v6 and v5, these mappings are overly broad. + */ + { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, + /* Cache maintenance ops; some of this space may be overridden later. */ + { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, + .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, + .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v6_cp_reginfo[] = { + /* Not all pre-v6 cores implemented this WFI, so this is slightly + * over-broad. + */ + { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, + .access = PL1_W, .type = ARM_CP_WFI }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v7_cp_reginfo[] = { + /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which + * is UNPREDICTABLE; we choose to NOP as most implementations do). + */ + { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_WFI }, + /* L1 cache lockdown. Not architectural in v6 and earlier but in practice + * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and + * OMAPCP will override this space. + */ + { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), + .resetvalue = 0 }, + { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), + .resetvalue = 0 }, + /* v6 doesn't have the cache ID registers but Linux reads them anyway */ + { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = 0 }, + /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; + * implementing it as RAZ means the "debug architecture version" bits + * will read as a reserved value, which should cause Linux to not try + * to use the debug hardware. + */ + { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* MMU TLB control. Note that the wildcarding means we cover not just + * the unified TLB ops but also the dside/iside/inner-shareable variants. + */ + { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, + .type = ARM_CP_NO_RAW }, + { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, + .type = ARM_CP_NO_RAW }, + { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, + .type = ARM_CP_NO_RAW }, + { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, + .type = ARM_CP_NO_RAW }, + { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, + .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, + .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, + REGINFO_SENTINEL +}; + +static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint32_t mask = 0; + + /* In ARMv8 most bits of CPACR_EL1 are RES0. */ + if (!arm_feature(env, ARM_FEATURE_V8)) { + /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. + * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. + * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. + */ + if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { + /* VFP coprocessor: cp10 & cp11 [23:20] */ + mask |= (1 << 31) | (1 << 30) | (0xf << 20); + + if (!arm_feature(env, ARM_FEATURE_NEON)) { + /* ASEDIS [31] bit is RAO/WI */ + value |= (1 << 31); + } + + /* VFPv3 and upwards with NEON implement 32 double precision + * registers (D0-D31). + */ + if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { + /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ + value |= (1 << 30); + } + } + value &= mask; + } + + /* + * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 + * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. + */ + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value &= ~(0xf << 20); + value |= env->cp15.cpacr_el1 & (0xf << 20); + } + + env->cp15.cpacr_el1 = value; +} + +static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* + * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 + * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. + */ + uint64_t value = env->cp15.cpacr_el1; + + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value &= ~(0xf << 20); + } + return value; +} + + +static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Call cpacr_write() so that we reset with the correct RAO bits set + * for our CPU features. + */ + cpacr_write(env, ri, 0); +} + +static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + /* Check if CPACR accesses are to be trapped to EL2 */ + if (arm_current_el(env) == 1 && + (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { + return CP_ACCESS_TRAP_EL2; + /* Check if CPACR accesses are to be trapped to EL3 */ + } else if (arm_current_el(env) < 3 && + (env->cp15.cptr_el[3] & CPTR_TCPAC)) { + return CP_ACCESS_TRAP_EL3; + } + } + + return CP_ACCESS_OK; +} + +static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* Check if CPTR accesses are set to trap to EL3 */ + if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { + return CP_ACCESS_TRAP_EL3; + } + + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo v6_cp_reginfo[] = { + /* prefetch by MVA in v6, NOP in v7 */ + { .name = "MVA_prefetch", + .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP }, + /* We need to break the TB after ISB to execute self-modifying code + * correctly and also to take any pending interrupts immediately. + * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. + */ + { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, + .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, + { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, + .access = PL0_W, .type = ARM_CP_NOP }, + { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, + .access = PL0_W, .type = ARM_CP_NOP }, + { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), + offsetof(CPUARMState, cp15.ifar_ns) }, + .resetvalue = 0, }, + /* Watchpoint Fault Address Register : should actually only be present + * for 1136, 1176, 11MPCore. + */ + { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, + { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, + .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), + .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, + REGINFO_SENTINEL +}; + +/* Definitions for the PMU registers */ +#define PMCRN_MASK 0xf800 +#define PMCRN_SHIFT 11 +#define PMCRLC 0x40 +#define PMCRDP 0x20 +#define PMCRX 0x10 +#define PMCRD 0x8 +#define PMCRC 0x4 +#define PMCRP 0x2 +#define PMCRE 0x1 +/* + * Mask of PMCR bits writeable by guest (not including WO bits like C, P, + * which can be written as 1 to trigger behaviour but which stay RAZ). + */ +#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) + +#define PMXEVTYPER_P 0x80000000 +#define PMXEVTYPER_U 0x40000000 +#define PMXEVTYPER_NSK 0x20000000 +#define PMXEVTYPER_NSU 0x10000000 +#define PMXEVTYPER_NSH 0x08000000 +#define PMXEVTYPER_M 0x04000000 +#define PMXEVTYPER_MT 0x02000000 +#define PMXEVTYPER_EVTCOUNT 0x0000ffff +#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ + PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ + PMXEVTYPER_M | PMXEVTYPER_MT | \ + PMXEVTYPER_EVTCOUNT) + +#define PMCCFILTR 0xf8000000 +#define PMCCFILTR_M PMXEVTYPER_M +#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) + +static inline uint32_t pmu_num_counters(CPUARMState *env) +{ + return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; +} + +/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ +static inline uint64_t pmu_counter_mask(CPUARMState *env) +{ + return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); +} + +typedef struct pm_event { + uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ + /* If the event is supported on this CPU (used to generate PMCEID[01]) */ + bool (*supported)(CPUARMState *); + /* + * Retrieve the current count of the underlying event. The programmed + * counters hold a difference from the return value from this function + */ + uint64_t (*get_count)(CPUARMState *); + /* + * Return how many nanoseconds it will take (at a minimum) for count events + * to occur. A negative value indicates the counter will never overflow, or + * that the counter has otherwise arranged for the overflow bit to be set + * and the PMU interrupt to be raised on overflow. + */ + int64_t (*ns_per_count)(uint64_t); +} pm_event; + +static bool event_always_supported(CPUARMState *env) +{ + return true; +} + +static uint64_t swinc_get_count(CPUARMState *env) +{ + /* + * SW_INCR events are written directly to the pmevcntr's by writes to + * PMSWINC, so there is no underlying count maintained by the PMU itself + */ + return 0; +} + +static int64_t swinc_ns_per(uint64_t ignored) +{ + return -1; +} + +/* + * Return the underlying cycle count for the PMU cycle counters. If we're in + * usermode, simply return 0. + */ +static uint64_t cycles_get_count(CPUARMState *env) +{ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); +} + +static int64_t cycles_ns_per(uint64_t cycles) +{ + return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; +} + +static bool instructions_supported(CPUARMState *env) +{ + return false; +} + +static uint64_t instructions_get_count(CPUARMState *env) +{ + return 0; +} + +static int64_t instructions_ns_per(uint64_t icount) +{ + return cpu_icount_to_ns((int64_t)icount); +} + +static bool pmu_8_1_events_supported(CPUARMState *env) +{ + /* For events which are supported in any v8.1 PMU */ + return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); +} + +static bool pmu_8_4_events_supported(CPUARMState *env) +{ + /* For events which are supported in any v8.1 PMU */ + return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); +} + +static uint64_t zero_event_get_count(CPUARMState *env) +{ + /* For events which on QEMU never fire, so their count is always zero */ + return 0; +} + +static int64_t zero_event_ns_per(uint64_t cycles) +{ + /* An event which never fires can never overflow */ + return -1; +} + +static const pm_event pm_events[] = { + { .number = 0x000, /* SW_INCR */ + .supported = event_always_supported, + .get_count = swinc_get_count, + .ns_per_count = swinc_ns_per, + }, + { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ + .supported = instructions_supported, + .get_count = instructions_get_count, + .ns_per_count = instructions_ns_per, + }, + { .number = 0x011, /* CPU_CYCLES, Cycle */ + .supported = event_always_supported, + .get_count = cycles_get_count, + .ns_per_count = cycles_ns_per, + }, + { .number = 0x023, /* STALL_FRONTEND */ + .supported = pmu_8_1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, + { .number = 0x024, /* STALL_BACKEND */ + .supported = pmu_8_1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, + { .number = 0x03c, /* STALL */ + .supported = pmu_8_4_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, +}; + +/* + * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of + * events (i.e. the statistical profiling extension), this implementation + * should first be updated to something sparse instead of the current + * supported_event_map[] array. + */ +#define MAX_EVENT_ID 0x3c +#define UNSUPPORTED_EVENT UINT16_MAX +static uint16_t supported_event_map[MAX_EVENT_ID + 1]; + +/* + * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map + * of ARM event numbers to indices in our pm_events array. + * + * Note: Events in the 0x40XX range are not currently supported. + */ +void pmu_init(ARMCPU *cpu) +{ + unsigned int i; + + /* + * Empty supported_event_map and cpu->pmceid[01] before adding supported + * events to them + */ + for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { + supported_event_map[i] = UNSUPPORTED_EVENT; + } + cpu->pmceid0 = 0; + cpu->pmceid1 = 0; + + for (i = 0; i < ARRAY_SIZE(pm_events); i++) { + const pm_event *cnt = &pm_events[i]; + assert(cnt->number <= MAX_EVENT_ID); + /* We do not currently support events in the 0x40xx range */ + assert(cnt->number <= 0x3f); + + if (cnt->supported(&cpu->env)) { + supported_event_map[cnt->number] = i; + uint64_t event_mask = 1ULL << (cnt->number & 0x1f); + if (cnt->number & 0x20) { + cpu->pmceid1 |= event_mask; + } else { + cpu->pmceid0 |= event_mask; + } + } + } +} + +/* + * Check at runtime whether a PMU event is supported for the current machine + */ +static bool event_supported(uint16_t number) +{ + if (number > MAX_EVENT_ID) { + return false; + } + return supported_event_map[number] != UNSUPPORTED_EVENT; +} + +static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* Performance monitor registers user accessibility is controlled + * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable + * trapping to EL2 or EL3 for other accesses. + */ + int el = arm_current_el(env); + + if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { + return CP_ACCESS_TRAP; + } + if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) + && !arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL3; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* ER: event counter read trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 + && isread) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +static CPAccessResult pmreg_access_swinc(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* SW: software increment write trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 + && !isread) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +static CPAccessResult pmreg_access_selr(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* ER: event counter read trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +static CPAccessResult pmreg_access_ccntr(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* CR: cycle counter read trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 + && isread) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +/* Returns true if the counter (pass 31 for PMCCNTR) should count events using + * the current EL, security state, and register configuration. + */ +static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) +{ + uint64_t filter; + bool e, p, u, nsk, nsu, nsh, m; + bool enabled, prohibited, filtered; + bool secure = arm_is_secure(env); + int el = arm_current_el(env); + uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; + + if (!arm_feature(env, ARM_FEATURE_PMU)) { + return false; + } + + if (!arm_feature(env, ARM_FEATURE_EL2) || + (counter < hpmn || counter == 31)) { + e = env->cp15.c9_pmcr & PMCRE; + } else { + e = env->cp15.mdcr_el2 & MDCR_HPME; + } + enabled = e && (env->cp15.c9_pmcnten & (1ULL << counter)); + + if (!secure) { + if (el == 2 && (counter < (hpmn & 0x7) || counter == 31)) { + prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; + } else { + prohibited = false; + } + } else { + prohibited = arm_feature(env, ARM_FEATURE_EL3) && + (env->cp15.mdcr_el3 & MDCR_SPME); + } + + if (prohibited && counter == 31) { + prohibited = env->cp15.c9_pmcr & PMCRDP; + } + + if (counter == 31) { + filter = env->cp15.pmccfiltr_el0; + } else { + filter = env->cp15.c14_pmevtyper[counter]; + } + + p = filter & PMXEVTYPER_P; + u = filter & PMXEVTYPER_U; + nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); + nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); + nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); + m = arm_el_is_aa64(env, 1) && + arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); + + if (el == 0) { + filtered = secure ? u : u != nsu; + } else if (el == 1) { + filtered = secure ? p : p != nsk; + } else if (el == 2) { + filtered = !nsh; + } else { /* EL3 */ + filtered = m != p; + } + + if (counter != 31) { + /* + * If not checking PMCCNTR, ensure the counter is setup to an event we + * support + */ + uint16_t event = filter & PMXEVTYPER_EVTCOUNT; + if (!event_supported(event)) { + return false; + } + } + + return enabled && !prohibited && !filtered; +} + +/* + * Ensure c15_ccnt is the guest-visible count so that operations such as + * enabling/disabling the counter or filtering, modifying the count itself, + * etc. can be done logically. This is essentially a no-op if the counter is + * not enabled at the time of the call. + */ +static void pmccntr_op_start(CPUARMState *env) +{ + uint64_t cycles = cycles_get_count(env); + + if (pmu_counter_enabled(env, 31)) { + uint64_t eff_cycles = cycles; + if (env->cp15.c9_pmcr & PMCRD) { + /* Increment once every 64 processor clock cycles */ + eff_cycles /= 64; + } + + uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; + + uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ + 1ull << 63 : 1ull << 31; + if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { + env->cp15.c9_pmovsr |= (1 << 31); + } + + env->cp15.c15_ccnt = new_pmccntr; + } + env->cp15.c15_ccnt_delta = cycles; +} + +/* + * If PMCCNTR is enabled, recalculate the delta between the clock and the + * guest-visible count. A call to pmccntr_op_finish should follow every call to + * pmccntr_op_start. + */ +static void pmccntr_op_finish(CPUARMState *env) +{ +#if 0 + if (pmu_counter_enabled(env, 31)) { + /* Calculate when the counter will next overflow */ + uint64_t remaining_cycles = -env->cp15.c15_ccnt; + if (!(env->cp15.c9_pmcr & PMCRLC)) { + remaining_cycles = (uint32_t)remaining_cycles; + } + int64_t overflow_in = cycles_ns_per(remaining_cycles); + + if (overflow_in > 0) { + int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + overflow_in; + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } + + uint64_t prev_cycles = env->cp15.c15_ccnt_delta; + if (env->cp15.c9_pmcr & PMCRD) { + /* Increment once every 64 processor clock cycles */ + prev_cycles /= 64; + } + env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; + } +#endif +} + +static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) +{ + + uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; + uint64_t count = 0; + if (event_supported(event)) { + uint16_t event_idx = supported_event_map[event]; + count = pm_events[event_idx].get_count(env); + } + + if (pmu_counter_enabled(env, counter)) { + uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; + + if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { + env->cp15.c9_pmovsr |= (1ULL << counter); + } + env->cp15.c14_pmevcntr[counter] = new_pmevcntr; + } + env->cp15.c14_pmevcntr_delta[counter] = count; +} + +static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) +{ +#if 0 + if (pmu_counter_enabled(env, counter)) { + uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; + uint16_t event_idx = supported_event_map[event]; + uint64_t delta = UINT32_MAX - + (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; + int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); + + if (overflow_in > 0) { + int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + overflow_in; + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } + + env->cp15.c14_pmevcntr_delta[counter] -= + env->cp15.c14_pmevcntr[counter]; + } +#endif +} + +void pmu_op_start(CPUARMState *env) +{ + unsigned int i; + pmccntr_op_start(env); + for (i = 0; i < pmu_num_counters(env); i++) { + pmevcntr_op_start(env, i); + } +} + +void pmu_op_finish(CPUARMState *env) +{ + unsigned int i; + pmccntr_op_finish(env); + for (i = 0; i < pmu_num_counters(env); i++) { + pmevcntr_op_finish(env, i); + } +} + +void pmu_pre_el_change(ARMCPU *cpu, void *ignored) +{ + pmu_op_start(&cpu->env); +} + +void pmu_post_el_change(ARMCPU *cpu, void *ignored) +{ + pmu_op_finish(&cpu->env); +} + +void arm_pmu_timer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + /* + * Update all the counter values based on the current underlying counts, + * triggering interrupts to be raised, if necessary. pmu_op_finish() also + * has the effect of setting the cpu->pmu_timer to the next earliest time a + * counter may expire. + */ + pmu_op_start(&cpu->env); + pmu_op_finish(&cpu->env); +} + +static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmu_op_start(env); + + if (value & PMCRC) { + /* The counter has been reset */ + env->cp15.c15_ccnt = 0; + } + + if (value & PMCRP) { + unsigned int i; + for (i = 0; i < pmu_num_counters(env); i++) { + env->cp15.c14_pmevcntr[i] = 0; + } + } + + env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; + env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); + + pmu_op_finish(env); +} + +static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + unsigned int i; + for (i = 0; i < pmu_num_counters(env); i++) { + /* Increment a counter's count iff: */ + if ((value & (1ULL << i)) && /* counter's bit is set */ + /* counter is enabled and not filtered */ + pmu_counter_enabled(env, i) && + /* counter is SW_INCR */ + (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { + pmevcntr_op_start(env, i); + + /* + * Detect if this write causes an overflow since we can't predict + * PMSWINC overflows like we can for other events + */ + uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; + + if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { + env->cp15.c9_pmovsr |= (1ULL << i); + } + + env->cp15.c14_pmevcntr[i] = new_pmswinc; + + pmevcntr_op_finish(env, i); + } + } +} + +static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t ret; + pmccntr_op_start(env); + ret = env->cp15.c15_ccnt; + pmccntr_op_finish(env); + return ret; +} + +static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and + * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the + * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are + * accessed. + */ + env->cp15.c9_pmselr = value & 0x1f; +} + +static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_op_start(env); + env->cp15.c15_ccnt = value; + pmccntr_op_finish(env); +} + +static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t cur_val = pmccntr_read(env, NULL); + + pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); +} + +static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_op_start(env); + env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; + pmccntr_op_finish(env); +} + +static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_op_start(env); + /* M is not accessible from AArch32 */ + env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | + (value & PMCCFILTR); + pmccntr_op_finish(env); +} + +static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* M is not visible in AArch32 */ + return env->cp15.pmccfiltr_el0 & PMCCFILTR; +} + +static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pmcnten |= value; +} + +static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pmcnten &= ~value; +} + +static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pmovsr &= ~value; +} + +static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pmovsr |= value; +} + +static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value, const uint8_t counter) +{ + if (counter == 31) { + pmccfiltr_write(env, ri, value); + } else if (counter < pmu_num_counters(env)) { + pmevcntr_op_start(env, counter); + + /* + * If this counter's event type is changing, store the current + * underlying count for the new type in c14_pmevcntr_delta[counter] so + * pmevcntr_op_finish has the correct baseline when it converts back to + * a delta. + */ + uint16_t old_event = env->cp15.c14_pmevtyper[counter] & + PMXEVTYPER_EVTCOUNT; + uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; + if (old_event != new_event) { + uint64_t count = 0; + if (event_supported(new_event)) { + uint16_t event_idx = supported_event_map[new_event]; + count = pm_events[event_idx].get_count(env); + } + env->cp15.c14_pmevcntr_delta[counter] = count; + } + + env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; + pmevcntr_op_finish(env, counter); + } + /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when + * PMSELR value is equal to or greater than the number of implemented + * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. + */ +} + +static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, + const uint8_t counter) +{ + if (counter == 31) { + return env->cp15.pmccfiltr_el0; + } else if (counter < pmu_num_counters(env)) { + return env->cp15.c14_pmevtyper[counter]; + } else { + /* + * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER + * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). + */ + return 0; + } +} + +static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + pmevtyper_write(env, ri, value, counter); +} + +static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + env->cp15.c14_pmevtyper[counter] = value; + + /* + * pmevtyper_rawwrite is called between a pair of pmu_op_start and + * pmu_op_finish calls when loading saved state for a migration. Because + * we're potentially updating the type of event here, the value written to + * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a + * different counter type. Therefore, we need to set this value to the + * current count for the counter type we're writing so that pmu_op_finish + * has the correct count for its calculation. + */ + uint16_t event = value & PMXEVTYPER_EVTCOUNT; + if (event_supported(event)) { + uint16_t event_idx = supported_event_map[event]; + env->cp15.c14_pmevcntr_delta[counter] = + pm_events[event_idx].get_count(env); + } +} + +static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + return pmevtyper_read(env, ri, counter); +} + +static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); +} + +static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); +} + +static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value, uint8_t counter) +{ + if (counter < pmu_num_counters(env)) { + pmevcntr_op_start(env, counter); + env->cp15.c14_pmevcntr[counter] = value; + pmevcntr_op_finish(env, counter); + } + /* + * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR + * are CONSTRAINED UNPREDICTABLE. + */ +} + +static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint8_t counter) +{ + if (counter < pmu_num_counters(env)) { + uint64_t ret; + pmevcntr_op_start(env, counter); + ret = env->cp15.c14_pmevcntr[counter]; + pmevcntr_op_finish(env, counter); + return ret; + } else { + /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR + * are CONSTRAINED UNPREDICTABLE. */ + return 0; + } +} + +static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + pmevcntr_write(env, ri, value, counter); +} + +static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + return pmevcntr_read(env, ri, counter); +} + +static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + assert(counter < pmu_num_counters(env)); + env->cp15.c14_pmevcntr[counter] = value; + pmevcntr_write(env, ri, value, counter); +} + +static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + assert(counter < pmu_num_counters(env)); + return env->cp15.c14_pmevcntr[counter]; +} + +static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); +} + +static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); +} + +static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + env->cp15.c9_pmuserenr = value & 0xf; + } else { + env->cp15.c9_pmuserenr = value & 1; + } +} + +static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* We have no event counters so only the C bit can be changed */ + value &= pmu_counter_mask(env); + env->cp15.c9_pminten |= value; +} + +static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pminten &= ~value; +} + +static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Note that even though the AArch64 view of this register has bits + * [10:0] all RES0 we can only mask the bottom 5, to comply with the + * architectural requirements for bits which are RES0 only in some + * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 + * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) + */ + raw_write(env, ri, value & ~0x1FULL); +} + +static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + /* Begin with base v8.0 state. */ + uint32_t valid_mask = 0x3fff; + ARMCPU *cpu = env_archcpu(env); + + if (arm_el_is_aa64(env, 3)) { + value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ + valid_mask &= ~SCR_NET; + } else { + valid_mask &= ~(SCR_RW | SCR_ST); + } + + if (!arm_feature(env, ARM_FEATURE_EL2)) { + valid_mask &= ~SCR_HCE; + + /* On ARMv7, SMD (or SCD as it is called in v7) is only + * supported if EL2 exists. The bit is UNK/SBZP when + * EL2 is unavailable. In QEMU ARMv7, we force it to always zero + * when EL2 is unavailable. + * On ARMv8, this bit is always available. + */ + if (arm_feature(env, ARM_FEATURE_V7) && + !arm_feature(env, ARM_FEATURE_V8)) { + valid_mask &= ~SCR_SMD; + } + } + if (cpu_isar_feature(aa64_lor, cpu)) { + valid_mask |= SCR_TLOR; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + valid_mask |= SCR_API | SCR_APK; + } + + /* Clear all-context RES0 bits. */ + value &= valid_mask; + raw_write(env, ri, value); +} + +static CPAccessResult access_aa64_tid2(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + + /* Acquire the CSSELR index from the bank corresponding to the CCSIDR + * bank + */ + uint32_t index = A32_BANKED_REG_GET(env, csselr, + ri->secure & ARM_CP_SECSTATE_S); + + return cpu->ccsidr[index]; +} + +static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + raw_write(env, ri, value & 0xf); +} + +static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + CPUState *cs = env_cpu(env); + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + uint64_t ret = 0; + bool allow_virt = (arm_current_el(env) == 1 && + (!arm_is_secure_below_el3(env) || + (env->cp15.scr_el3 & SCR_EEL2))); + + if (allow_virt && (hcr_el2 & HCR_IMO)) { + if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { + ret |= CPSR_I; + } + } else { + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + ret |= CPSR_I; + } + } + + if (allow_virt && (hcr_el2 & HCR_FMO)) { + if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { + ret |= CPSR_F; + } + } else { + if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { + ret |= CPSR_F; + } + } + + /* External aborts are not possible in QEMU so A bit is always clear */ + return ret; +} + +static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + return access_aa64_tid1(env, ri, isread); + } + + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo v7_cp_reginfo[] = { + /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ + { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_NOP }, + /* Performance monitors are implementation defined in v7, + * but with an ARM recommended set of registers, which we + * follow. + * + * Performance registers fall into three categories: + * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) + * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) + * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) + * For the cases controlled by PMUSERENR we must set .access to PL0_RW + * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. + */ + { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, + .access = PL0_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), + .writefn = pmcntenset_write, + .accessfn = pmreg_access, + .raw_writefn = raw_write }, + { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, + .access = PL0_RW, .accessfn = pmreg_access, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, + .writefn = pmcntenset_write, .raw_writefn = raw_write }, + { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), + .accessfn = pmreg_access, + .writefn = pmcntenclr_write, + .type = ARM_CP_ALIAS }, + { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), + .writefn = pmcntenclr_write }, + { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, + .access = PL0_RW, .type = ARM_CP_IO, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), + .accessfn = pmreg_access, + .writefn = pmovsr_write, + .raw_writefn = raw_write }, + { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsr_write, + .raw_writefn = raw_write }, + { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, + .access = PL0_W, .accessfn = pmreg_access_swinc, + .type = ARM_CP_NO_RAW | ARM_CP_IO, + .writefn = pmswinc_write }, + { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, + .access = PL0_W, .accessfn = pmreg_access_swinc, + .type = ARM_CP_NO_RAW | ARM_CP_IO, + .writefn = pmswinc_write }, + { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, + .access = PL0_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), + .accessfn = pmreg_access_selr, .writefn = pmselr_write, + .raw_writefn = raw_write}, + { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, + .access = PL0_RW, .accessfn = pmreg_access_selr, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), + .writefn = pmselr_write, .raw_writefn = raw_write, }, + { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, + .readfn = pmccntr_read, .writefn = pmccntr_write32, + .accessfn = pmreg_access_ccntr }, + { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, + .access = PL0_RW, .accessfn = pmreg_access_ccntr, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), + .readfn = pmccntr_read, .writefn = pmccntr_write, + .raw_readfn = raw_read, .raw_writefn = raw_write, }, + { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, + .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .resetvalue = 0, }, + { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, + .writefn = pmccfiltr_write, .raw_writefn = raw_write, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), + .resetvalue = 0, }, + { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access, + .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, + { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access, + .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, + { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access_xevcntr, + .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, + { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access_xevcntr, + .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, + { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, + .access = PL0_R | PL1_RW, .accessfn = access_tpm, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), + .resetvalue = 0, + .writefn = pmuserenr_write, .raw_writefn = raw_write }, + { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, + .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), + .resetvalue = 0, + .writefn = pmuserenr_write, .raw_writefn = raw_write }, + { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tpm, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), + .resetvalue = 0, + .writefn = pmintenset_write, .raw_writefn = raw_write }, + { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tpm, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .writefn = pmintenset_write, .raw_writefn = raw_write, + .resetvalue = 0x0 }, + { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tpm, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .writefn = pmintenclr_write, }, + { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tpm, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .writefn = pmintenclr_write }, + { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, + .access = PL1_R, + .accessfn = access_aa64_tid2, + .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, + { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, + .access = PL1_RW, + .accessfn = access_aa64_tid2, + .writefn = csselr_write, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), + offsetof(CPUARMState, cp15.csselr_ns) } }, + /* Auxiliary ID register: this actually has an IMPDEF value but for now + * just RAZ for all cores: + */ + { .name = "AIDR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid1, + .resetvalue = 0 }, + /* Auxiliary fault status registers: these also are IMPDEF, and we + * choose to RAZ/WI for all cores. + */ + { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* MAIR can just read-as-written because we don't implement caches + * and so don't need to care about memory attributes. + */ + { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), + .resetvalue = 0 }, + { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), + .resetvalue = 0 }, + /* For non-long-descriptor page tables these are PRRR and NMRR; + * regardless they still act as reads-as-written for QEMU. + */ + /* MAIR0/1 are defined separately from their 64-bit counterpart which + * allows them to assign the correct fieldoffset based on the endianness + * handled in the field definitions. + */ + { .name = "MAIR0", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), + offsetof(CPUARMState, cp15.mair0_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "MAIR1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), + offsetof(CPUARMState, cp15.mair1_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, + /* 32 bit ITLB invalidates */ + { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiall_write }, + { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiasid_write }, + /* 32 bit DTLB invalidates */ + { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiall_write }, + { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiasid_write }, + /* 32 bit TLB invalidates */ + { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiall_write }, + { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiasid_write }, + { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimvaa_write }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo v7mp_cp_reginfo[] = { + /* 32 bit TLB invalidates, Inner Shareable */ + { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiall_is_write }, + { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_is_write }, + { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiasid_is_write }, + { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimvaa_is_write }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo pmovsset_cp_reginfo[] = { + /* PMOVSSET is not implemented in v7 before v7ve */ + { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsset_write, + .raw_writefn = raw_write }, + { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsset_write, + .raw_writefn = raw_write }, + REGINFO_SENTINEL +}; + +static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= 1; + env->teecr = value; +} + +static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 0 && (env->teecr & 1)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo t2ee_cp_reginfo[] = { + { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), + .resetvalue = 0, + .writefn = teecr_write }, + { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, + .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), + .accessfn = teehbr_access, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo v6k_cp_reginfo[] = { + { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, + .access = PL0_RW, + .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, + { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), + offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, + .access = PL0_R|PL1_W, + .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), + .resetvalue = 0}, + { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, + .access = PL0_R|PL1_W, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), + offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, + { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, + .access = PL1_RW, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), + offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, + .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. + * Writable only at the highest implemented exception level. + */ + int el = arm_current_el(env); + uint64_t hcr; + uint32_t cntkctl; + + switch (el) { + case 0: + hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + cntkctl = env->cp15.cnthctl_el2; + } else { + cntkctl = env->cp15.c14_cntkctl; + } + if (!extract32(cntkctl, 0, 2)) { + return CP_ACCESS_TRAP; + } + break; + case 1: + if (!isread && ri->state == ARM_CP_STATE_AA32 && + arm_is_secure_below_el3(env)) { + /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + break; + case 2: + case 3: + break; + } + + if (!isread && el < arm_highest_el(env)) { + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, + bool isread) +{ + unsigned int cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + uint64_t hcr = arm_hcr_el2_eff(env); + + switch (cur_el) { + case 0: + /* If HCR_EL2. == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + return (extract32(env->cp15.cnthctl_el2, timeridx, 1) + ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); + } + + /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ + if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { + return CP_ACCESS_TRAP; + } + + /* If HCR_EL2. == '10': check CNTHCTL_EL2.EL1PCTEN. */ + if (hcr & HCR_E2H) { + if (timeridx == GTIMER_PHYS && + !extract32(env->cp15.cnthctl_el2, 10, 1)) { + return CP_ACCESS_TRAP_EL2; + } + } else { + /* If HCR_EL2. == 0: check CNTHCTL_EL2.EL1PCEN. */ + if (arm_feature(env, ARM_FEATURE_EL2) && + timeridx == GTIMER_PHYS && !secure && + !extract32(env->cp15.cnthctl_el2, 1, 1)) { + return CP_ACCESS_TRAP_EL2; + } + } + break; + + case 1: + /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ + if (arm_feature(env, ARM_FEATURE_EL2) && + timeridx == GTIMER_PHYS && !secure && + (hcr & HCR_E2H + ? !extract32(env->cp15.cnthctl_el2, 10, 1) + : !extract32(env->cp15.cnthctl_el2, 0, 1))) { + return CP_ACCESS_TRAP_EL2; + } + break; + } + return CP_ACCESS_OK; +} + +static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, + bool isread) +{ + unsigned int cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + uint64_t hcr = arm_hcr_el2_eff(env); + + switch (cur_el) { + case 0: + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + /* If HCR_EL2. == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ + return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) + ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); + } + + /* + * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from + * EL0 if EL0[PV]TEN is zero. + */ + if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { + return CP_ACCESS_TRAP; + } + /* fall through */ + + case 1: + if (arm_feature(env, ARM_FEATURE_EL2) && + timeridx == GTIMER_PHYS && !secure) { + if (hcr & HCR_E2H) { + /* If HCR_EL2. == '10': check CNTHCTL_EL2.EL1PTEN. */ + if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { + return CP_ACCESS_TRAP_EL2; + } + } else { + /* If HCR_EL2. == 0: check CNTHCTL_EL2.EL1PCEN. */ + if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { + return CP_ACCESS_TRAP_EL2; + } + } + } + break; + } + return CP_ACCESS_OK; +} + +static CPAccessResult gt_pct_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + return gt_counter_access(env, GTIMER_PHYS, isread); +} + +static CPAccessResult gt_vct_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + return gt_counter_access(env, GTIMER_VIRT, isread); +} + +static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + return gt_timer_access(env, GTIMER_PHYS, isread); +} + +static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + return gt_timer_access(env, GTIMER_VIRT, isread); +} + +static CPAccessResult gt_stimer_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* The AArch64 register view of the secure physical timer is + * always accessible from EL3, and configurably accessible from + * Secure EL1. + */ + switch (arm_current_el(env)) { + case 1: + if (!arm_is_secure(env)) { + return CP_ACCESS_TRAP; + } + if (!(env->cp15.scr_el3 & SCR_ST)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; + case 0: + case 2: + return CP_ACCESS_TRAP; + case 3: + return CP_ACCESS_OK; + default: + g_assert_not_reached(); + // never reach here + return CP_ACCESS_OK; + } +} + +static uint64_t gt_get_countervalue(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); +} + +static void gt_recalc_timer(ARMCPU *cpu, int timeridx) +{ +#if 0 + ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; + + if (gt->ctl & 1) { + /* Timer enabled: calculate and set current ISTATUS, irq, and + * reset timer to when ISTATUS next has to change + */ + uint64_t offset = timeridx == GTIMER_VIRT ? + cpu->env.cp15.cntvoff_el2 : 0; + uint64_t count = gt_get_countervalue(&cpu->env); + /* Note that this must be unsigned 64 bit arithmetic: */ + int istatus = count - offset >= gt->cval; + uint64_t nexttick; + int irqstate; + + gt->ctl = deposit32(gt->ctl, 2, 1, istatus); + + irqstate = (istatus && !(gt->ctl & 2)); + qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); + + if (istatus) { + /* Next transition is when count rolls back over to zero */ + nexttick = UINT64_MAX; + } else { + /* Next transition is when we hit cval */ + nexttick = gt->cval + offset; + } + /* Note that the desired next expiry time might be beyond the + * signed-64-bit range of a QEMUTimer -- in this case we just + * set the timer for as far in the future as possible. When the + * timer expires we will reset the timer for any remaining period. + */ + if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { + timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); + } else { + timer_mod(cpu->gt_timer[timeridx], nexttick); + } + trace_arm_gt_recalc(timeridx, irqstate, nexttick); + } else { + /* Timer disabled: ISTATUS and timer output always clear */ + gt->ctl &= ~4; + qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); + timer_del(cpu->gt_timer[timeridx]); + trace_arm_gt_recalc_disabled(timeridx); + } +#endif +} + +static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx) +{ +#if 0 + ARMCPU *cpu = env_archcpu(env); + + timer_del(cpu->gt_timer[timeridx]); +#endif +} + +static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_get_countervalue(env); +} + +static uint64_t gt_virt_cnt_offset(CPUARMState *env) +{ + uint64_t hcr; + + switch (arm_current_el(env)) { + case 2: + hcr = arm_hcr_el2_eff(env); + if (hcr & HCR_E2H) { + return 0; + } + break; + case 0: + hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + return 0; + } + break; + } + + return env->cp15.cntvoff_el2; +} + +static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_get_countervalue(env) - gt_virt_cnt_offset(env); +} + +static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx, + uint64_t value) +{ +#if 0 + trace_arm_gt_cval_write(timeridx, value); + env->cp15.c14_timer[timeridx].cval = value; + gt_recalc_timer(env_archcpu(env), timeridx); +#endif +} + +static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx) +{ + uint64_t offset = 0; + + switch (timeridx) { + case GTIMER_VIRT: + case GTIMER_HYPVIRT: + offset = gt_virt_cnt_offset(env); + break; + } + + return (uint32_t)(env->cp15.c14_timer[timeridx].cval - + (gt_get_countervalue(env) - offset)); +} + +static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx, + uint64_t value) +{ + uint64_t offset = 0; + + switch (timeridx) { + case GTIMER_VIRT: + case GTIMER_HYPVIRT: + offset = gt_virt_cnt_offset(env); + break; + } + + env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + + sextract64(value, 0, 32); + gt_recalc_timer(env_archcpu(env), timeridx); +} + +static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx, + uint64_t value) +{ +#if 0 + ARMCPU *cpu = env_archcpu(env); + uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; + + env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); + if ((oldval ^ value) & 1) { + /* Enable toggled */ + gt_recalc_timer(cpu, timeridx); + } else if ((oldval ^ value) & 2) { + /* IMASK toggled: don't need to recalculate, + * just set the interrupt line based on ISTATUS + */ + int irqstate = (oldval & 4) && !(value & 2); + + qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); + } +#endif +} + +static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_PHYS); +} + +static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_PHYS, value); +} + +static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_tval_read(env, ri, GTIMER_PHYS); +} + +static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_tval_write(env, ri, GTIMER_PHYS, value); +} + +static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_PHYS, value); +} + +static int gt_phys_redir_timeridx(CPUARMState *env) +{ + switch (arm_mmu_idx(env)) { + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return GTIMER_HYP; + default: + return GTIMER_PHYS; + } +} + +static int gt_virt_redir_timeridx(CPUARMState *env) +{ + switch (arm_mmu_idx(env)) { + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return GTIMER_HYPVIRT; + default: + return GTIMER_VIRT; + } +} + +static uint64_t gt_phys_redir_cval_read(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + int timeridx = gt_phys_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].cval; +} + +static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = gt_phys_redir_timeridx(env); + gt_cval_write(env, ri, timeridx, value); +} + +static uint64_t gt_phys_redir_tval_read(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + int timeridx = gt_phys_redir_timeridx(env); + return gt_tval_read(env, ri, timeridx); +} + +static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = gt_phys_redir_timeridx(env); + gt_tval_write(env, ri, timeridx, value); +} + +static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + int timeridx = gt_phys_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].ctl; +} + +static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = gt_phys_redir_timeridx(env); + gt_ctl_write(env, ri, timeridx, value); +} + +static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_VIRT); +} + +static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_VIRT, value); +} + +static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_tval_read(env, ri, GTIMER_VIRT); +} + +static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_tval_write(env, ri, GTIMER_VIRT, value); +} + +static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_VIRT, value); +} + +static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + raw_write(env, ri, value); + gt_recalc_timer(cpu, GTIMER_VIRT); +} + +static uint64_t gt_virt_redir_cval_read(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + int timeridx = gt_virt_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].cval; +} + +static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = gt_virt_redir_timeridx(env); + gt_cval_write(env, ri, timeridx, value); +} + +static uint64_t gt_virt_redir_tval_read(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + int timeridx = gt_virt_redir_timeridx(env); + return gt_tval_read(env, ri, timeridx); +} + +static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = gt_virt_redir_timeridx(env); + gt_tval_write(env, ri, timeridx, value); +} + +static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + int timeridx = gt_virt_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].ctl; +} + +static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = gt_virt_redir_timeridx(env); + gt_ctl_write(env, ri, timeridx, value); +} + +static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_HYP); +} + +static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_HYP, value); +} + +static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_tval_read(env, ri, GTIMER_HYP); +} + +static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_tval_write(env, ri, GTIMER_HYP, value); +} + +static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_HYP, value); +} + +static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_SEC); +} + +static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_SEC, value); +} + +static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_tval_read(env, ri, GTIMER_SEC); +} + +static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_tval_write(env, ri, GTIMER_SEC, value); +} + +static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_SEC, value); +} + +static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_HYPVIRT); +} + +static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_HYPVIRT, value); +} + +static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_tval_read(env, ri, GTIMER_HYPVIRT); +} + +static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_tval_write(env, ri, GTIMER_HYPVIRT, value); +} + +static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); +} + +void arm_gt_ptimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_PHYS); +} + +void arm_gt_vtimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_VIRT); +} + +void arm_gt_htimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_HYP); +} + +void arm_gt_stimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_SEC); +} + +void arm_gt_hvtimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_HYPVIRT); +} + +static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) +{ + ARMCPU *cpu = env_archcpu(env); + + cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; +} + +static const ARMCPRegInfo generic_timer_cp_reginfo[] = { + /* Note that CNTFRQ is purely reads-as-written for the benefit + * of software; writing it doesn't actually change the timer frequency. + * Our reset value matches the fixed frequency we implement the timer at. + */ + { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, + .type = ARM_CP_ALIAS, + .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), + }, + { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, + .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), + .resetfn = arm_gt_cntfrq_reset, + }, + /* overall control: mostly access permissions */ + { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), + .resetvalue = 0, + }, + /* per-timer control */ + { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, + .secure = ARM_CP_SECSTATE_NS, + .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, + .accessfn = gt_ptimer_access, + .fieldoffset = offsetoflow32(CPUARMState, + cp15.c14_timer[GTIMER_PHYS].ctl), + .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, + .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTP_CTL_S", + .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, + .secure = ARM_CP_SECSTATE_S, + .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, + .accessfn = gt_ptimer_access, + .fieldoffset = offsetoflow32(CPUARMState, + cp15.c14_timer[GTIMER_SEC].ctl), + .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, + .type = ARM_CP_IO, .access = PL0_RW, + .accessfn = gt_ptimer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), + .resetvalue = 0, + .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, + .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, + .accessfn = gt_vtimer_access, + .fieldoffset = offsetoflow32(CPUARMState, + cp15.c14_timer[GTIMER_VIRT].ctl), + .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, + .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, + .type = ARM_CP_IO, .access = PL0_RW, + .accessfn = gt_vtimer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), + .resetvalue = 0, + .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, + .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, + }, + /* TimerValue views: a 32 bit downcounting view of the underlying state */ + { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, + .secure = ARM_CP_SECSTATE_NS, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, + .accessfn = gt_ptimer_access, + .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, + }, + { .name = "CNTP_TVAL_S", + .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, + .secure = ARM_CP_SECSTATE_S, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, + .accessfn = gt_ptimer_access, + .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, + }, + { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, + .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, + .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, + }, + { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, + .accessfn = gt_vtimer_access, + .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, + }, + { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, + .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, + .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, + }, + /* The counter itself */ + { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, + .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_pct_access, + .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, + }, + { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, + .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_pct_access, .readfn = gt_cnt_read, + }, + { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, + .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_vct_access, + .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, + }, + { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, + .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, + }, + /* Comparison value, indicating when the timer goes off */ + { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, + .secure = ARM_CP_SECSTATE_NS, + .access = PL0_RW, + .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), + .accessfn = gt_ptimer_access, + .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, + .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, + }, + { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, + .secure = ARM_CP_SECSTATE_S, + .access = PL0_RW, + .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), + .accessfn = gt_ptimer_access, + .writefn = gt_sec_cval_write, .raw_writefn = raw_write, + }, + { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, + .access = PL0_RW, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), + .resetvalue = 0, .accessfn = gt_ptimer_access, + .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, + .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, + }, + { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, + .access = PL0_RW, + .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), + .accessfn = gt_vtimer_access, + .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, + .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, + }, + { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, + .access = PL0_RW, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), + .resetvalue = 0, .accessfn = gt_vtimer_access, + .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, + .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, + }, + /* Secure timer -- this is actually restricted to only EL3 + * and configurably Secure-EL1 via the accessfn. + */ + { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, + .accessfn = gt_stimer_access, + .readfn = gt_sec_tval_read, + .writefn = gt_sec_tval_write, + .resetfn = gt_sec_timer_reset, + }, + { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, + .type = ARM_CP_IO, .access = PL1_RW, + .accessfn = gt_stimer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), + .resetvalue = 0, + .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, + .type = ARM_CP_IO, .access = PL1_RW, + .accessfn = gt_stimer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), + .writefn = gt_sec_cval_write, .raw_writefn = raw_write, + }, + REGINFO_SENTINEL +}; + +static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + if (arm_feature(env, ARM_FEATURE_LPAE)) { + raw_write(env, ri, value); + } else if (arm_feature(env, ARM_FEATURE_V7)) { + raw_write(env, ri, value & 0xfffff6ff); + } else { + raw_write(env, ri, value & 0xfffff1ff); + } +} + +/* get_phys_addr() isn't present for user-mode-only targets */ + +static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (ri->opc2 & 4) { + /* The ATS12NSO* operations must trap to EL3 if executed in + * Secure EL1 (which can only happen if EL3 is AArch64). + * They are simply UNDEF if executed from NS EL1. + * They function normally from EL2 or EL3. + */ + if (arm_current_el(env) == 1) { + if (arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; + } + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + } + return CP_ACCESS_OK; +} + +static uint64_t do_ats_write(CPUARMState *env, uint64_t value, + MMUAccessType access_type, ARMMMUIdx mmu_idx) +{ + hwaddr phys_addr; + target_ulong page_size; + int prot; + bool ret; + uint64_t par64; + bool format64 = false; + MemTxAttrs attrs = { 0 }; + ARMMMUFaultInfo fi = { 0 }; + ARMCacheAttrs cacheattrs = { 0 }; + + ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, + &prot, &page_size, &fi, &cacheattrs); + + if (ret) { + /* + * Some kinds of translation fault must cause exceptions rather + * than being reported in the PAR. + */ + int current_el = arm_current_el(env); + int target_el; + uint32_t syn, fsr, fsc; + bool take_exc = false; + + if (fi.s1ptw && current_el == 1 && !arm_is_secure(env) + && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { + /* + * Synchronous stage 2 fault on an access made as part of the + * translation table walk for AT S1E0* or AT S1E1* insn + * executed from NS EL1. If this is a synchronous external abort + * and SCR_EL3.EA == 1, then we take a synchronous external abort + * to EL3. Otherwise the fault is taken as an exception to EL2, + * and HPFAR_EL2 holds the faulting IPA. + */ + if (fi.type == ARMFault_SyncExternalOnWalk && + (env->cp15.scr_el3 & SCR_EA)) { + target_el = 3; + } else { + env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; + target_el = 2; + } + take_exc = true; + } else if (fi.type == ARMFault_SyncExternalOnWalk) { + /* + * Synchronous external aborts during a translation table walk + * are taken as Data Abort exceptions. + */ + if (fi.stage2) { + if (current_el == 3) { + target_el = 3; + } else { + target_el = 2; + } + } else { + target_el = exception_target_el(env); + } + take_exc = true; + } + + if (take_exc) { + /* Construct FSR and FSC using same logic as arm_deliver_fault() */ + if (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, mmu_idx)) { + fsr = arm_fi_to_lfsc(&fi); + fsc = extract32(fsr, 0, 6); + } else { + fsr = arm_fi_to_sfsc(&fi); + fsc = 0x3f; + } + /* + * Report exception with ESR indicating a fault due to a + * translation table walk for a cache maintenance instruction. + */ + syn = syn_data_abort_no_iss(current_el == target_el, + fi.ea, 1, fi.s1ptw, 1, fsc); + env->exception.vaddress = value; + env->exception.fsr = fsr; + raise_exception(env, EXCP_DATA_ABORT, syn, target_el); + } + } + + if (is_a64(env)) { + format64 = true; + } else if (arm_feature(env, ARM_FEATURE_LPAE)) { + /* + * ATS1Cxx: + * * TTBCR.EAE determines whether the result is returned using the + * 32-bit or the 64-bit PAR format + * * Instructions executed in Hyp mode always use the 64bit format + * + * ATS1S2NSOxx uses the 64bit format if any of the following is true: + * * The Non-secure TTBCR.EAE bit is set to 1 + * * The implementation includes EL2, and the value of HCR.VM is 1 + * + * (Note that HCR.DC makes HCR.VM behave as if it is 1.) + * + * ATS1Hx always uses the 64bit format. + */ + format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); + + if (arm_feature(env, ARM_FEATURE_EL2)) { + if (mmu_idx == ARMMMUIdx_E10_0 || + mmu_idx == ARMMMUIdx_E10_1 || + mmu_idx == ARMMMUIdx_E10_1_PAN) { + format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); + } else { + format64 |= arm_current_el(env) == 2; + } + } + } + + if (format64) { + /* Create a 64-bit PAR */ + par64 = (1 << 11); /* LPAE bit always set */ + if (!ret) { + par64 |= phys_addr & ~0xfffULL; + if (!attrs.secure) { + par64 |= (1 << 9); /* NS */ + } + par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ + par64 |= cacheattrs.shareability << 7; /* SH */ + } else { + uint32_t fsr = arm_fi_to_lfsc(&fi); + + par64 |= 1; /* F */ + par64 |= (fsr & 0x3f) << 1; /* FS */ + if (fi.stage2) { + par64 |= (1 << 9); /* S */ + } + if (fi.s1ptw) { + par64 |= (1 << 8); /* PTW */ + } + } + } else { + /* fsr is a DFSR/IFSR value for the short descriptor + * translation table format (with WnR always clear). + * Convert it to a 32-bit PAR. + */ + if (!ret) { + /* We do not set any attribute bits in the PAR */ + if (page_size == (1 << 24) + && arm_feature(env, ARM_FEATURE_V7)) { + par64 = (phys_addr & 0xff000000) | (1 << 1); + } else { + par64 = phys_addr & 0xfffff000; + } + if (!attrs.secure) { + par64 |= (1 << 9); /* NS */ + } + } else { + uint32_t fsr = arm_fi_to_sfsc(&fi); + + par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | + ((fsr & 0xf) << 1) | 1; + } + } + return par64; +} + +static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + uint64_t par64; + ARMMMUIdx mmu_idx = 0; + int el = arm_current_el(env); + bool secure = arm_is_secure_below_el3(env); + + switch (ri->opc2 & 6) { + case 0: + /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ + switch (el) { + case 3: + mmu_idx = ARMMMUIdx_SE3; + break; + case 2: + g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */ + /* fall through */ + case 1: + if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { + mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN + : ARMMMUIdx_Stage1_E1_PAN); + } else { + mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; + } + break; + default: + g_assert_not_reached(); + break; + } + break; + case 2: + /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ + switch (el) { + case 3: + mmu_idx = ARMMMUIdx_SE10_0; + break; + case 2: + mmu_idx = ARMMMUIdx_Stage1_E0; + break; + case 1: + mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; + break; + default: + g_assert_not_reached(); + break; + } + break; + case 4: + /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ + mmu_idx = ARMMMUIdx_E10_1; + break; + case 6: + /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ + mmu_idx = ARMMMUIdx_E10_0; + break; + default: + g_assert_not_reached(); + break; + } + + par64 = do_ats_write(env, value, access_type, mmu_idx); + + A32_BANKED_CURRENT_REG_SET(env, par, par64); +} + +static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + uint64_t par64; + + par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); + + A32_BANKED_CURRENT_REG_SET(env, par, par64); +} + +static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + ARMMMUIdx mmu_idx = 0; + int secure = arm_is_secure_below_el3(env); + + switch (ri->opc2 & 6) { + case 0: + switch (ri->opc1) { + case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ + if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { + mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN + : ARMMMUIdx_Stage1_E1_PAN); + } else { + mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; + } + break; + case 4: /* AT S1E2R, AT S1E2W */ + mmu_idx = ARMMMUIdx_E2; + break; + case 6: /* AT S1E3R, AT S1E3W */ + mmu_idx = ARMMMUIdx_SE3; + break; + default: + g_assert_not_reached(); + break; + } + break; + case 2: /* AT S1E0R, AT S1E0W */ + mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; + break; + case 4: /* AT S12E1R, AT S12E1W */ + mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; + break; + case 6: /* AT S12E0R, AT S12E0W */ + mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; + break; + default: + g_assert_not_reached(); + break; + } + + env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); +} + +static const ARMCPRegInfo vapa_cp_reginfo[] = { + { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), + offsetoflow32(CPUARMState, cp15.par_ns) }, + .writefn = par_write }, + /* This underdecoding is safe because the reginfo is NO_RAW. */ + { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_W, .accessfn = ats_access, + .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, + REGINFO_SENTINEL +}; + +/* Return basic MPU access permission bits. */ +static uint32_t simple_mpu_ap_bits(uint32_t val) +{ + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val >> i) & mask; + mask <<= 2; + } + return ret; +} + +/* Pad basic MPU access permission bits to extended format. */ +static uint32_t extended_mpu_ap_bits(uint32_t val) +{ + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val & mask) << i; + mask <<= 2; + } + return ret; +} + +static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); +} + +static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); +} + +static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); +} + +static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); +} + +static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); + + if (!u32p) { + return 0; + } + + u32p += env->pmsav7.rnr[M_REG_NS]; + return *u32p; +} + +static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); + + if (!u32p) { + return; + } + + u32p += env->pmsav7.rnr[M_REG_NS]; + tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ + *u32p = value; +} + +static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + uint32_t nrgs = cpu->pmsav7_dregion; + + if (value >= nrgs) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMSAv7 RGNR write >= # supported regions, %" PRIu32 + " > %" PRIu32 "\n", (uint32_t)value, nrgs); + return; + } + + raw_write(env, ri, value); +} + +static const ARMCPRegInfo pmsav7_cp_reginfo[] = { + /* Reset for all these registers is handled in arm_cpu_reset(), + * because the PMSAv7 is also used by M-profile CPUs, which do + * not register cpregs but still need the state to be reset. + */ + { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_NO_RAW, + .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), + .readfn = pmsav7_read, .writefn = pmsav7_write, + .resetfn = arm_cp_reset_ignore }, + { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, + .access = PL1_RW, .type = ARM_CP_NO_RAW, + .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), + .readfn = pmsav7_read, .writefn = pmsav7_write, + .resetfn = arm_cp_reset_ignore }, + { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, + .access = PL1_RW, .type = ARM_CP_NO_RAW, + .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), + .readfn = pmsav7_read, .writefn = pmsav7_write, + .resetfn = arm_cp_reset_ignore }, + { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), + .writefn = pmsav7_rgnr_write, + .resetfn = arm_cp_reset_ignore }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo pmsav5_cp_reginfo[] = { + { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), + .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, + { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), + .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, + { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), + .resetvalue = 0, }, + { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), + .resetvalue = 0, }, + { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, + { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, + /* Protection region base and size registers */ + { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, + { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, + { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, + { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, + { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, + { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, + { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, + { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, + .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, + REGINFO_SENTINEL +}; + +static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + TCR *tcr = raw_ptr(env, ri); + int maskshift = extract32(value, 0, 3); + + if (!arm_feature(env, ARM_FEATURE_V8)) { + if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { + /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when + * using Long-desciptor translation table format */ + value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); + } else if (arm_feature(env, ARM_FEATURE_EL3)) { + /* In an implementation that includes the Security Extensions + * TTBCR has additional fields PD0 [4] and PD1 [5] for + * Short-descriptor translation table format. + */ + value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; + } else { + value &= TTBCR_N; + } + } + + /* Update the masks corresponding to the TCR bank being written + * Note that we always calculate mask and base_mask, but + * they are only used for short-descriptor tables (ie if EAE is 0); + * for long-descriptor tables the TCR fields are used differently + * and the mask and base_mask values are meaningless. + */ + tcr->raw_tcr = value; + tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); + tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); +} + +static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + TCR *tcr = raw_ptr(env, ri); + + if (arm_feature(env, ARM_FEATURE_LPAE)) { + /* With LPAE the TTBCR could result in a change of ASID + * via the TTBCR.A1 bit, so do a TLB flush. + */ + tlb_flush(CPU(cpu)); + } + /* Preserve the high half of TCR_EL1, set via TTBCR2. */ + value = deposit64(tcr->raw_tcr, 0, 32, value); + vmsa_ttbcr_raw_write(env, ri, value); +} + +static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + TCR *tcr = raw_ptr(env, ri); + + /* Reset both the TCR as well as the masks corresponding to the bank of + * the TCR being reset. + */ + tcr->raw_tcr = 0; + tcr->mask = 0; + tcr->base_mask = 0xffffc000u; +} + +static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + TCR *tcr = raw_ptr(env, ri); + + /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ + tlb_flush(CPU(cpu)); + tcr->raw_tcr = value; +} + +static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ + if (cpreg_field_is_64bit(ri) && + extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { + ARMCPU *cpu = env_archcpu(env); + tlb_flush(CPU(cpu)); + } + raw_write(env, ri, value); +} + +static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * If we are running with E2&0 regime, then an ASID is active. + * Flush if that might be changing. Note we're not checking + * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that + * holds the active ASID, only checking the field that might. + */ + if (extract64(raw_read(env, ri) ^ value, 48, 16) && + (arm_hcr_el2_eff(env) & HCR_E2H)) { + tlb_flush_by_mmuidx(env_cpu(env), + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E20_0); + } + raw_write(env, ri, value); +} + +static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + + /* + * A change in VMID to the stage2 page table (Stage2) invalidates + * the combined stage 1&2 tlbs (EL10_1 and EL10_0). + */ + if (raw_read(env, ri) != value) { + tlb_flush_by_mmuidx(cs, + ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_Stage2); + raw_write(env, ri, value); + } +} + +static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { + { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), + offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, + { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), + offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, + { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), + offsetof(CPUARMState, cp15.dfar_ns) } }, + { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), + .resetvalue = 0, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo vmsa_cp_reginfo[] = { + { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, + { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .writefn = vmsa_ttbr_write, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), + offsetof(CPUARMState, cp15.ttbr0_ns) } }, + { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .writefn = vmsa_ttbr_write, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), + offsetof(CPUARMState, cp15.ttbr1_ns) } }, + { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .writefn = vmsa_tcr_el12_write, + .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, + .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, + { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, + .raw_writefn = vmsa_ttbcr_raw_write, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), + offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, + REGINFO_SENTINEL +}; + +/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing + * qemu tlbs nor adjusting cached masks. + */ +static const ARMCPRegInfo ttbcr2_reginfo = { + .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_ALIAS, + .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), + offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, +}; + +static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_ticonfig = value & 0xe7; + /* The OS_TYPE bit in this register changes the reported CPUID! */ + env->cp15.c0_cpuid = (value & (1 << 5)) ? + ARM_CPUID_TI915T : ARM_CPUID_TI925T; +} + +static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_threadid = value & 0xffff; +} + +static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Wait-for-interrupt (deprecated) */ + cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); +} + +static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* On OMAP there are registers indicating the max/min index of dcache lines + * containing a dirty line; cache flush operations have to reset these. + */ + env->cp15.c15_i_max = 0x000; + env->cp15.c15_i_min = 0xff0; +} + +static const ARMCPRegInfo omap_cp_reginfo[] = { + { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, + .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), + .resetvalue = 0, }, + { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, + .writefn = omap_ticonfig_write }, + { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, + { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0xff0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, + { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, + .writefn = omap_threadid_write }, + { .name = "TI925T_STATUS", .cp = 15, .crn = 15, + .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, + .type = ARM_CP_NO_RAW, + .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, + /* TODO: Peripheral port remap register: + * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller + * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), + * when MMU is off. + */ + { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, + .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, + .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, + .writefn = omap_cachemaint_write }, + { .name = "C9", .cp = 15, .crn = 9, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, + .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_cpar = value & 0x3fff; +} + +static const ARMCPRegInfo xscale_cp_reginfo[] = { + { .name = "XSCALE_CPAR", + .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, + .writefn = xscale_cpar_write, }, + { .name = "XSCALE_AUXCR", + .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), + .resetvalue = 0, }, + /* XScale specific cache-lockdown: since we have no cache we NOP these + * and hope the guest does not really rely on cache behaviour. + */ + { .name = "XSCALE_LOCK_ICACHE_LINE", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NOP }, + { .name = "XSCALE_UNLOCK_ICACHE", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP }, + { .name = "XSCALE_DCACHE_LOCK", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "XSCALE_UNLOCK_DCACHE", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { + /* RAZ/WI the whole crn=15 space, when we don't have a more specific + * implementation of this implementation-defined space. + * Ideally this should eventually disappear in favour of actually + * implementing the correct behaviour for all cores. + */ + { .name = "C15_IMPDEF", .cp = 15, .crn = 15, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, + .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, + .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { + /* Cache status: RAZ because we have no cache so it's always clean */ + { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { + /* We never have a a block transfer operation in progress */ + { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = 0 }, + /* The cache ops themselves: these all NOP for QEMU */ + { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, + .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, + .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, + .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { + /* The cache test-and-clean instructions always return (1 << 30) + * to indicate that there are no dirty cache lines. + */ + { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = (1 << 30) }, + { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = (1 << 30) }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo strongarm_cp_reginfo[] = { + /* Ignore ReadBuffer accesses */ + { .name = "C9_READBUFFER", .cp = 15, .crn = 9, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, .resetvalue = 0, + .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, + REGINFO_SENTINEL +}; + +static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + unsigned int cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + + if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { + return env->cp15.vpidr_el2; + } + return raw_read(env, ri); +} + +static uint64_t mpidr_read_val(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + uint64_t mpidr = cpu->mp_affinity; + + if (arm_feature(env, ARM_FEATURE_V7MP)) { + mpidr |= (1U << 31); + /* Cores which are uniprocessor (non-coherent) + * but still implement the MP extensions set + * bit 30. (For instance, Cortex-R5). + */ + if (cpu->mp_is_up) { + mpidr |= (1u << 30); + } + } + return mpidr; +} + +static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + unsigned int cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + + if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { + return env->cp15.vmpidr_el2; + } + return mpidr_read_val(env); +} + +static const ARMCPRegInfo lpae_cp_reginfo[] = { + /* NOP AMAIR0/1 */ + { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ + { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, + .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), + offsetof(CPUARMState, cp15.par_ns)} }, + { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_64BIT | ARM_CP_ALIAS, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), + offsetof(CPUARMState, cp15.ttbr0_ns) }, + .writefn = vmsa_ttbr_write, }, + { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_64BIT | ARM_CP_ALIAS, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), + offsetof(CPUARMState, cp15.ttbr1_ns) }, + .writefn = vmsa_ttbr_write, }, + REGINFO_SENTINEL +}; + +static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return vfp_get_fpcr(env); +} + +static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + vfp_set_fpcr(env, value); +} + +static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return vfp_get_fpsr(env); +} + +static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + vfp_set_fpsr(env, value); +} + +static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->daif = value & PSTATE_DAIF; +} + +static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pstate & PSTATE_PAN; +} + +static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); +} + +static const ARMCPRegInfo pan_reginfo = { + .name = "PAN", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, + .type = ARM_CP_NO_RAW, .access = PL1_RW, + .readfn = aa64_pan_read, .writefn = aa64_pan_write +}; + +static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pstate & PSTATE_UAO; +} + +static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); +} + +static const ARMCPRegInfo uao_reginfo = { + .name = "UAO", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, + .type = ARM_CP_NO_RAW, .access = PL1_RW, + .readfn = aa64_uao_read, .writefn = aa64_uao_write +}; + +static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* Cache invalidate/clean to Point of Coherency or Persistence... */ + switch (arm_current_el(env)) { + case 0: + /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ + if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { + return CP_ACCESS_TRAP; + } + /* fall through */ + case 1: + /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ + if (arm_hcr_el2_eff(env) & HCR_TPCP) { + return CP_ACCESS_TRAP_EL2; + } + break; + } + return CP_ACCESS_OK; +} + +static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* Cache invalidate/clean to Point of Unification... */ + switch (arm_current_el(env)) { + case 0: + /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ + if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { + return CP_ACCESS_TRAP; + } + /* fall through */ + case 1: + /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */ + if (arm_hcr_el2_eff(env) & HCR_TPU) { + return CP_ACCESS_TRAP_EL2; + } + break; + } + return CP_ACCESS_OK; +} + +/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions + * Page D4-1736 (DDI0487A.b) + */ + +static int vae1_tlbmask(CPUARMState *env) +{ + /* Since we exclude secure first, we may read HCR_EL2 directly. */ + if (arm_is_secure_below_el3(env)) { + return ARMMMUIdxBit_SE10_1 | + ARMMMUIdxBit_SE10_1_PAN | + ARMMMUIdxBit_SE10_0; + } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) + == (HCR_E2H | HCR_TGE)) { + return ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E20_0; + } else { + return ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0; + } +} + +static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); +} + +static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + + if (tlb_force_broadcast(env)) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); + } else { + tlb_flush_by_mmuidx(cs, mask); + } +} + +static int alle1_tlbmask(CPUARMState *env) +{ + /* + * Note that the 'ALL' scope must invalidate both stage 1 and + * stage 2 translations, whereas most other scopes only invalidate + * stage 1 translations. + */ + if (arm_is_secure_below_el3(env)) { + return ARMMMUIdxBit_SE10_1 | + ARMMMUIdxBit_SE10_1_PAN | + ARMMMUIdxBit_SE10_0; + } else if (arm_feature(env, ARM_FEATURE_EL2)) { + return ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_Stage2; + } else { + return ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0; + } +} + +static int e2_tlbmask(CPUARMState *env) +{ + /* TODO: ARMv8.4-SecEL2 */ + return ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E2; +} + +static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = alle1_tlbmask(env); + + tlb_flush_by_mmuidx(cs, mask); +} + +static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = e2_tlbmask(env); + + tlb_flush_by_mmuidx(cs, mask); +} + +static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + + tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); +} + +static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = alle1_tlbmask(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); +} + +static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = e2_tlbmask(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); +} + +static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); +} + +static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by VA, EL2 + * Currently handles both VAE2 and VALE2, since we don't support + * flush-last-level-only. + */ + CPUState *cs = env_cpu(env); + int mask = e2_tlbmask(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx(cs, pageaddr, mask); +} + +static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by VA, EL3 + * Currently handles both VAE3 and VALE3, since we don't support + * flush-last-level-only. + */ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); +} + +static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); +} + +static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by VA, EL1&0 (AArch64 version). + * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, + * since we don't support flush-for-specific-ASID-only or + * flush-last-level-only. + */ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + if (tlb_force_broadcast(env)) { + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); + } else { + tlb_flush_page_by_mmuidx(cs, pageaddr, mask); + } +} + +static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_E2); +} + +static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_SE3); +} + +static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by IPA. This has to invalidate any structures that + * contain only stage 2 translation information, but does not need + * to apply to structures that contain combined stage 1 and stage 2 + * translation information. + * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. + */ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + uint64_t pageaddr; + + if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { + return; + } + + pageaddr = sextract64(value << 12, 0, 48); + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); +} + +static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr; + + if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { + return; + } + + pageaddr = sextract64(value << 12, 0, 48); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_Stage2); +} + +static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int cur_el = arm_current_el(env); + + if (cur_el < 2) { + uint64_t hcr = arm_hcr_el2_eff(env); + + if (cur_el == 0) { + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { + return CP_ACCESS_TRAP_EL2; + } + } else { + if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { + return CP_ACCESS_TRAP; + } + if (hcr & HCR_TDZ) { + return CP_ACCESS_TRAP_EL2; + } + } + } else if (hcr & HCR_TDZ) { + return CP_ACCESS_TRAP_EL2; + } + } + return CP_ACCESS_OK; +} + +static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + int dzp_bit = 1 << 4; + + /* DZP indicates whether DC ZVA access is allowed */ + if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { + dzp_bit = 0; + } + return cpu->dcz_blocksize | dzp_bit; +} + +static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (!(env->pstate & PSTATE_SP)) { + /* Access to SP_EL0 is undefined if it's being used as + * the stack pointer. + */ + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + return CP_ACCESS_OK; +} + +static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pstate & PSTATE_SP; +} + +static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) +{ + update_spsel(env, val); +} + +static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + + if (raw_read(env, ri) == value) { + /* Skip the TLB flush if nothing actually changed; Linux likes + * to do a lot of pointless SCTLR writes. + */ + return; + } + + if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { + /* M bit is RAZ/WI for PMSA with no MPU implemented */ + value &= ~SCTLR_M; + } + + raw_write(env, ri, value); + /* ??? Lots of these bits are not implemented. */ + /* This may enable/disable the MMU, so do a TLB flush. */ + tlb_flush(CPU(cpu)); + + if (ri->type & ARM_CP_SUPPRESS_TB_END) { + /* + * Normally we would always end the TB on an SCTLR write; see the + * comment in ARMCPRegInfo sctlr initialization below for why Xscale + * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild + * of hflags from the translator, so do it here. + */ + arm_rebuild_hflags(env); + } +} + +static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { + return CP_ACCESS_TRAP_FP_EL2; + } + if (env->cp15.cptr_el[3] & CPTR_TFP) { + return CP_ACCESS_TRAP_FP_EL3; + } + return CP_ACCESS_OK; +} + +static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; +} + +static const ARMCPRegInfo v8_cp_reginfo[] = { + /* Minimal set of EL0-visible registers. This will need to be expanded + * significantly for system emulation of AArch64 CPUs. + */ + { .name = "NZCV", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, + .access = PL0_RW, .type = ARM_CP_NZCV }, + { .name = "DAIF", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, + .type = ARM_CP_NO_RAW, + .access = PL0_RW, .accessfn = aa64_daif_access, + .fieldoffset = offsetof(CPUARMState, daif), + .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, + { .name = "FPCR", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, + .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, + .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, + { .name = "FPSR", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, + .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, + .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, + { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, + .access = PL0_R, .type = ARM_CP_NO_RAW, + .readfn = aa64_dczid_read }, + { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_DC_ZVA, + /* Avoid overhead of an access check that always passes in user-mode */ + .accessfn = aa64_zva_access, + }, + { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, + .access = PL1_R, .type = ARM_CP_CURRENTEL }, + /* Cache ops: all NOPs since we don't emulate caches */ + { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NOP, + .accessfn = aa64_cacheop_pou_access }, + { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NOP, + .accessfn = aa64_cacheop_pou_access }, + { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NOP, + .accessfn = aa64_cacheop_pou_access }, + { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, + .access = PL1_W, .accessfn = aa64_cacheop_poc_access, + .type = ARM_CP_NOP }, + { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, + .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, + { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NOP, + .accessfn = aa64_cacheop_poc_access }, + { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, + .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, + { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NOP, + .accessfn = aa64_cacheop_pou_access }, + { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NOP, + .accessfn = aa64_cacheop_poc_access }, + { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, + .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, + /* TLBI operations */ + { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vmalle1_write }, + { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vmalle1_write }, + { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1is_write }, + { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1is_write }, + { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1_write }, + { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_ipas2e1_write }, + { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_alle1_write }, + { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_alle1is_write }, + /* 64 bit address translation operations */ + { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ + { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), + .writefn = par_write }, + /* TLB invalidate last level of translation table walk */ + { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_is_write }, + { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimvaa_is_write }, + { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimvaa_write }, + { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_write }, + { .name = "TLBIMVALHIS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_is_write }, + { .name = "TLBIIPAS2", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_write }, + { .name = "TLBIIPAS2IS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_is_write }, + { .name = "TLBIIPAS2L", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_write }, + { .name = "TLBIIPAS2LIS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_is_write }, + /* 32 bit cache operations */ + { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, + .type = ARM_CP_NOP, .access = PL1_W }, + { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, + .type = ARM_CP_NOP, .access = PL1_W }, + { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, + .type = ARM_CP_NOP, .access = PL1_W }, + { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, + { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, + { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, + { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, + { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, + { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, + { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, + .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, + /* MMU Domain access control / MPU write buffer control */ + { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, + .writefn = dacr_write, .raw_writefn = raw_write, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), + offsetoflow32(CPUARMState, cp15.dacr_ns) } }, + { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, + { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, + /* We rely on the access checks not allowing the guest to write to the + * state field when SPSel indicates that it's being used as the stack + * pointer. + */ + { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, + .access = PL1_RW, .accessfn = sp_el0_access, + .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, + { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, + { .name = "SPSel", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, + .type = ARM_CP_NO_RAW, + .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, + { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, + .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), + .access = PL2_RW, .accessfn = fpexc32_access }, + { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, + .access = PL2_RW, .resetvalue = 0, + .writefn = dacr_write, .raw_writefn = raw_write, + .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, + { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, + .access = PL2_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, + { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, + { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, + { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, + { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, + { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, + .resetvalue = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, + { .name = "SDCR", .type = ARM_CP_ALIAS, + .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, + .access = PL1_RW, .accessfn = access_trap_aa32s_el1, + .writefn = sdcr_write, + .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, + REGINFO_SENTINEL +}; + +/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ +static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { + { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, + .access = PL2_RW, + .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, + { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, + .type = ARM_CP_NO_RAW, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL2_RW, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, + .access = PL2_RW, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, + .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "VTTBR", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 6, .crm = 2, + .access = PL2_RW, .accessfn = access_el3_aa32ns, + .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, + .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, + .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, + .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, + .access = PL2_RW, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, + .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "HIFAR", .state = ARM_CP_STATE_AA32, + .type = ARM_CP_CONST, + .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, + .access = PL2_RW, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +/* Ditto, but for registers which exist in ARMv8 but not v7 */ +static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { + { .name = "HCR2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, + .access = PL2_RW, + .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) +{ + ARMCPU *cpu = env_archcpu(env); + + if (arm_feature(env, ARM_FEATURE_V8)) { + valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ + } else { + valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ + } + + if (arm_feature(env, ARM_FEATURE_EL3)) { + valid_mask &= ~HCR_HCD; + } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { + /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. + * However, if we're using the SMC PSCI conduit then QEMU is + * effectively acting like EL3 firmware and so the guest at + * EL2 should retain the ability to prevent EL1 from being + * able to make SMC calls into the ersatz firmware, so in + * that case HCR.TSC should be read/write. + */ + valid_mask &= ~HCR_TSC; + } + + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + if (cpu_isar_feature(aa64_vh, cpu)) { + valid_mask |= HCR_E2H; + } + if (cpu_isar_feature(aa64_lor, cpu)) { + valid_mask |= HCR_TLOR; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + valid_mask |= HCR_API | HCR_APK; + } + } + + /* Clear RES0 bits. */ + value &= valid_mask; + + /* These bits change the MMU setup: + * HCR_VM enables stage 2 translation + * HCR_PTW forbids certain page-table setups + * HCR_DC Disables stage1 and enables stage2 translation + */ + if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { + tlb_flush(CPU(cpu)); + } + env->cp15.hcr_el2 = value; + + /* + * Updates to VI and VF require us to update the status of + * virtual interrupts, which are the logical OR of these bits + * and the state of the input lines from the GIC. (This requires + * that we have the iothread lock, which is done by marking the + * reginfo structs as ARM_CP_IO.) + * Note that if a write to HCR pends a VIRQ or VFIQ it is never + * possible for it to be taken immediately, because VIRQ and + * VFIQ are masked unless running at EL0 or EL1, and HCR + * can only be written at EL2. + */ + arm_cpu_update_virq(cpu); + arm_cpu_update_vfiq(cpu); +} + +static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + do_hcr_write(env, value, 0); +} + +static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ + value = deposit64(env->cp15.hcr_el2, 32, 32, value); + do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); +} + +static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Handle HCR write, i.e. write to low half of HCR_EL2 */ + value = deposit64(env->cp15.hcr_el2, 0, 32, value); + do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); +} + +/* + * Return the effective value of HCR_EL2. + * Bits that are not included here: + * RW (read from SCR_EL3.RW as needed) + */ +uint64_t arm_hcr_el2_eff(CPUARMState *env) +{ + uint64_t ret = env->cp15.hcr_el2; + + if (arm_is_secure_below_el3(env)) { + /* + * "This register has no effect if EL2 is not enabled in the + * current Security state". This is ARMv8.4-SecEL2 speak for + * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). + * + * Prior to that, the language was "In an implementation that + * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves + * as if this field is 0 for all purposes other than a direct + * read or write access of HCR_EL2". With lots of enumeration + * on a per-field basis. In current QEMU, this is condition + * is arm_is_secure_below_el3. + * + * Since the v8.4 language applies to the entire register, and + * appears to be backward compatible, use that. + */ + return 0; + } + + /* + * For a cpu that supports both aarch64 and aarch32, we can set bits + * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. + * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. + */ + if (!arm_el_is_aa64(env, 2)) { + uint64_t aa32_valid; + + /* + * These bits are up-to-date as of ARMv8.6. + * For HCR, it's easiest to list just the 2 bits that are invalid. + * For HCR2, list those that are valid. + */ + aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); + aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | + HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); + ret &= aa32_valid; + } + + if (ret & HCR_TGE) { + /* These bits are up-to-date as of ARMv8.6. */ + if (ret & HCR_E2H) { + ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | + HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | + HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | + HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | + HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | + HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); + } else { + ret |= HCR_FMO | HCR_IMO | HCR_AMO; + } + ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | + HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | + HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | + HCR_TLOR); + } + + return ret; +} + +static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * For A-profile AArch32 EL3, if NSACR.CP10 + * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. + */ + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value &= ~(0x3 << 10); + value |= env->cp15.cptr_el[2] & (0x3 << 10); + } + env->cp15.cptr_el[2] = value; +} + +static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* + * For A-profile AArch32 EL3, if NSACR.CP10 + * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. + */ + uint64_t value = env->cp15.cptr_el[2]; + + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && + !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { + value |= 0x3 << 10; + } + return value; +} + +static const ARMCPRegInfo el2_cp_reginfo[] = { + { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_IO, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), + .writefn = hcr_write }, + { .name = "HCR", .state = ARM_CP_STATE_AA32, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), + .writefn = hcr_writelow }, + { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, + { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, + { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, + { .name = "HIFAR", .state = ARM_CP_STATE_AA32, + .type = ARM_CP_ALIAS, + .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, + .access = PL2_RW, + .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, + { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, + { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, + .access = PL2_RW, .writefn = vbar_write, + .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), + .resetvalue = 0 }, + { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, + { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, + .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), + .readfn = cptr_el2_read, .writefn = cptr_el2_write }, + { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), + .resetvalue = 0 }, + { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_ALIAS, + .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, + { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ + { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, + .access = PL2_RW, .writefn = vmsa_tcr_el12_write, + /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ + .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, + { .name = "VTCR", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, + .type = ARM_CP_ALIAS, + .access = PL2_RW, .accessfn = access_el3_aa32ns, + .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, + { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, + .access = PL2_RW, + /* no .writefn needed as this can't cause an ASID change; + * no .raw_writefn or .resetfn needed as we never use mask/base_mask + */ + .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, + { .name = "VTTBR", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 6, .crm = 2, + .type = ARM_CP_64BIT | ARM_CP_ALIAS, + .access = PL2_RW, .accessfn = access_el3_aa32ns, + .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), + .writefn = vttbr_write }, + { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, + .access = PL2_RW, .writefn = vttbr_write, + .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, + { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, + .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, + .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, + { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, + .access = PL2_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, + { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, + .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, + .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, + { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, + .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, + { .name = "TLBIALLNSNH", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_nsnh_write }, + { .name = "TLBIALLNSNHIS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_nsnh_is_write }, + { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_hyp_write }, + { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_hyp_is_write }, + { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_write }, + { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_is_write }, + { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbi_aa64_alle2_write }, + { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbi_aa64_vae2_write }, + { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae2_write }, + { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_alle2is_write }, + { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbi_aa64_vae2is_write }, + { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae2is_write }, + /* Unlike the other EL2-related AT operations, these must + * UNDEF from EL3 if EL2 is not implemented, which is why we + * define them here rather than with the rest of the AT ops. + */ + { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL2_W, .accessfn = at_s1e2_access, + .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, + { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL2_W, .accessfn = at_s1e2_access, + .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, + /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE + * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 + * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose + * to behave as if SCR.NS was 1. + */ + { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL2_W, + .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, + { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL2_W, + .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, + { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, + /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the + * reset values as IMPDEF. We choose to reset to 3 to comply with + * both ARMv7 and ARMv8. + */ + .access = PL2_RW, .resetvalue = 3, + .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, + { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, + .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, + .writefn = gt_cntvoff_write, + .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, + { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, + .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, + .writefn = gt_cntvoff_write, + .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, + { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), + .type = ARM_CP_IO, .access = PL2_RW, + .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, + { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), + .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, + .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, + { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, + .resetfn = gt_hyp_timer_reset, + .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, + { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, + .type = ARM_CP_IO, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), + .resetvalue = 0, + .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, + /* The only field of MDCR_EL2 that has a defined architectural reset value + * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we + * don't implement any PMU event counters, so using zero as a reset + * value for MDCR_EL2 is okay + */ + { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, + .access = PL2_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, + { .name = "HPFAR", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, + .access = PL2_RW, .accessfn = access_el3_aa32ns, + .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, + { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, + { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, + .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo el2_v8_cp_reginfo[] = { + { .name = "HCR2", .state = ARM_CP_STATE_AA32, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, + .access = PL2_RW, + .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), + .writefn = hcr_writehigh }, + REGINFO_SENTINEL +}; + +static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. + * At Secure EL1 it traps to EL3. + */ + if (arm_current_el(env) == 3) { + return CP_ACCESS_OK; + } + if (arm_is_secure_below_el3(env)) { + return CP_ACCESS_TRAP_EL3; + } + /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ + if (isread) { + return CP_ACCESS_OK; + } + return CP_ACCESS_TRAP_UNCATEGORIZED; +} + +static const ARMCPRegInfo el3_cp_reginfo[] = { + { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), + .resetvalue = 0, .writefn = scr_write }, + { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, + .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, + .access = PL1_RW, .accessfn = access_trap_aa32s_el1, + .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), + .writefn = scr_write }, + { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, + .access = PL3_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.sder) }, + { .name = "SDER", + .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, + .access = PL3_RW, .resetvalue = 0, + .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, + { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_trap_aa32s_el1, + .writefn = vbar_write, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, + { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, + .access = PL3_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, + { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, + .access = PL3_RW, + /* no .writefn needed as this can't cause an ASID change; + * we must provide a .raw_writefn and .resetfn because we handle + * reset and migration for the AArch32 TTBCR(S), which might be + * using mask and base_mask. + */ + .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, + .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, + { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, + .access = PL3_RW, + .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, + { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, + { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, + { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_ALIAS, + .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, + .access = PL3_RW, + .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, + { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, + .access = PL3_RW, .writefn = vbar_write, + .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), + .resetvalue = 0 }, + { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, + .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, + { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, + .access = PL3_RW, .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, + { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, + .access = PL3_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_alle3is_write }, + { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3is_write }, + { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3is_write }, + { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_alle3_write }, + { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3_write }, + { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3_write }, + REGINFO_SENTINEL +}; + +static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) +{ +#if 0 + struct E2HAlias { + uint32_t src_key, dst_key, new_key; + const char *src_name, *dst_name, *new_name; + bool (*feature)(const ARMISARegisters *id); + }; + +#define K(op0, op1, crn, crm, op2) \ + ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) + + static const struct E2HAlias aliases[] = { + { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), + "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, + { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), + "CPACR", "CPTR_EL2", "CPACR_EL12" }, + { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), + "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, + { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), + "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, + { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), + "TCR_EL1", "TCR_EL2", "TCR_EL12" }, + { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), + "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, + { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), + "ELR_EL1", "ELR_EL2", "ELR_EL12" }, + { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), + "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, + { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), + "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, + { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), + "ESR_EL1", "ESR_EL2", "ESR_EL12" }, + { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), + "FAR_EL1", "FAR_EL2", "FAR_EL12" }, + { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), + "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, + { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), + "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, + { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), + "VBAR", "VBAR_EL2", "VBAR_EL12" }, + { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), + "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, + { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), + "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, + + /* + * Note that redirection of ZCR is mentioned in the description + * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but + * not in the summary table. + */ + { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), + "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, + + /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ + /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ + }; +#undef K + + size_t i; + + for (i = 0; i < ARRAY_SIZE(aliases); i++) { + const struct E2HAlias *a = &aliases[i]; + ARMCPRegInfo *src_reg, *dst_reg; + + if (a->feature && !a->feature(&cpu->isar)) { + continue; + } + + src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); + dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); + g_assert(src_reg != NULL); + g_assert(dst_reg != NULL); + + /* Cross-compare names to detect typos in the keys. */ + g_assert(strcmp(src_reg->name, a->src_name) == 0); + g_assert(strcmp(dst_reg->name, a->dst_name) == 0); + + /* None of the core system registers use opaque; we will. */ + g_assert(src_reg->opaque == NULL); + + /* Create alias before redirection so we dup the right data. */ + if (a->new_key) { + ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); + uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); + bool ok; + + new_reg->name = a->new_name; + new_reg->type |= ARM_CP_ALIAS; + /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ + new_reg->access &= PL2_RW | PL3_RW; + + ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); + g_assert(ok); + } + + src_reg->opaque = dst_reg; + src_reg->orig_readfn = src_reg->readfn ?: raw_read; + src_reg->orig_writefn = src_reg->writefn ?: raw_write; + if (!src_reg->raw_readfn) { + src_reg->raw_readfn = raw_read; + } + if (!src_reg->raw_writefn) { + src_reg->raw_writefn = raw_write; + } + src_reg->readfn = el2_e2h_read; + src_reg->writefn = el2_e2h_write; + } +#endif +} + +static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int cur_el = arm_current_el(env); + + if (cur_el < 2) { + uint64_t hcr = arm_hcr_el2_eff(env); + + if (cur_el == 0) { + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { + return CP_ACCESS_TRAP_EL2; + } + } else { + if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { + return CP_ACCESS_TRAP; + } + if (hcr & HCR_TID2) { + return CP_ACCESS_TRAP_EL2; + } + } + } else if (hcr & HCR_TID2) { + return CP_ACCESS_TRAP_EL2; + } + } + + if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Writes to OSLAR_EL1 may update the OS lock status, which can be + * read via a bit in OSLSR_EL1. + */ + int oslock; + + if (ri->state == ARM_CP_STATE_AA32) { + oslock = (value == 0xC5ACCE55); + } else { + oslock = value & 1; + } + + env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); +} + +static const ARMCPRegInfo debug_cp_reginfo[] = { + /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped + * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; + * unlike DBGDRAR it is never accessible from EL0. + * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 + * accessor. + */ + { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, + .access = PL1_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ + { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), + .resetvalue = 0 }, + /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. + * We don't implement the configurable EL0 access. + */ + { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, + .type = ARM_CP_ALIAS, + .access = PL1_R, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, + { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_NO_RAW, + .accessfn = access_tdosa, + .writefn = oslar_write }, + { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, + .access = PL1_R, .resetvalue = 10, + .accessfn = access_tdosa, + .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, + /* Dummy OSDLR_EL1: 32-bit Linux will read this */ + { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, + .access = PL1_RW, .accessfn = access_tdosa, + .type = ARM_CP_NOP }, + /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't + * implement vector catch debug events yet. + */ + { .name = "DBGVCR", + .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tda, + .type = ARM_CP_NOP }, + /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor + * to save and restore a 32-bit guest's DBGVCR) + */ + { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL2_RW, .accessfn = access_tda, + .type = ARM_CP_NOP }, + /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications + * Channel but Linux may try to access this register. The 32-bit + * alias is DBGDCCINT. + */ + { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tda, + .type = ARM_CP_NOP }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { + /* 64 bit access versions of the (dummy) debug registers */ + { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, + .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, + .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +/* Return the exception level to which exceptions should be taken + * via SVEAccessTrap. If an exception should be routed through + * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should + * take care of raising that exception. + * C.f. the ARM pseudocode function CheckSVEEnabled. + */ +int sve_exception_el(CPUARMState *env, int el) +{ + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + + if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + bool disabled = false; + + /* The CPACR.ZEN controls traps to EL1: + * 0, 2 : trap EL0 and EL1 accesses + * 1 : trap only EL0 accesses + * 3 : trap no accesses + */ + if (!extract32(env->cp15.cpacr_el1, 16, 1)) { + disabled = true; + } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { + disabled = el == 0; + } + if (disabled) { + /* route_to_el2 */ + return hcr_el2 & HCR_TGE ? 2 : 1; + } + + /* Check CPACR.FPEN. */ + if (!extract32(env->cp15.cpacr_el1, 20, 1)) { + disabled = true; + } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { + disabled = el == 0; + } + if (disabled) { + return 0; + } + } + + /* CPTR_EL2. Since TZ and TFP are positive, + * they will be zero when EL2 is not present. + */ + if (el <= 2 && !arm_is_secure_below_el3(env)) { + if (env->cp15.cptr_el[2] & CPTR_TZ) { + return 2; + } + if (env->cp15.cptr_el[2] & CPTR_TFP) { + return 0; + } + } + + /* CPTR_EL3. Since EZ is negative we must check for EL3. */ + if (arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.cptr_el[3] & CPTR_EZ)) { + return 3; + } + return 0; +} + +static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) +{ + uint32_t end_len; + + end_len = start_len &= 0xf; + if (!test_bit(start_len, cpu->sve_vq_map)) { + end_len = find_last_bit(cpu->sve_vq_map, start_len); + assert(end_len < start_len); + } + return end_len; +} + +/* + * Given that SVE is enabled, return the vector length for EL. + */ +uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) +{ + ARMCPU *cpu = env_archcpu(env); + uint32_t zcr_len = cpu->sve_max_vq - 1; + + if (el <= 1) { + zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); + } + if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { + zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); + } + + return sve_zcr_get_valid_len(cpu, zcr_len); +} + +static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int cur_el = arm_current_el(env); + int old_len = sve_zcr_len_for_el(env, cur_el); + int new_len; + + /* Bits other than [3:0] are RAZ/WI. */ + QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); + raw_write(env, ri, value & 0xf); + + /* + * Because we arrived here, we know both FP and SVE are enabled; + * otherwise we would have trapped access to the ZCR_ELn register. + */ + new_len = sve_zcr_len_for_el(env, cur_el); + if (new_len < old_len) { + aarch64_sve_narrow_vq(env, new_len + 1); + } +} + +static const ARMCPRegInfo zcr_el1_reginfo = { + .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_SVE, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), + .writefn = zcr_write, .raw_writefn = raw_write +}; + +static const ARMCPRegInfo zcr_el2_reginfo = { + .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_SVE, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), + .writefn = zcr_write, .raw_writefn = raw_write +}; + +static const ARMCPRegInfo zcr_no_el2_reginfo = { + .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_SVE, + .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore +}; + +static const ARMCPRegInfo zcr_el3_reginfo = { + .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL3_RW, .type = ARM_CP_SVE, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), + .writefn = zcr_write, .raw_writefn = raw_write +}; + +void hw_watchpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + vaddr len = 0; + vaddr wvr = env->cp15.dbgwvr[n]; + uint64_t wcr = env->cp15.dbgwcr[n]; + int mask; + int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; + + if (env->cpu_watchpoint[n]) { + cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); + env->cpu_watchpoint[n] = NULL; + } + + if (!extract64(wcr, 0, 1)) { + /* E bit clear : watchpoint disabled */ + return; + } + + switch (extract64(wcr, 3, 2)) { + case 0: + /* LSC 00 is reserved and must behave as if the wp is disabled */ + return; + case 1: + flags |= BP_MEM_READ; + break; + case 2: + flags |= BP_MEM_WRITE; + break; + case 3: + flags |= BP_MEM_ACCESS; + break; + } + + /* Attempts to use both MASK and BAS fields simultaneously are + * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, + * thus generating a watchpoint for every byte in the masked region. + */ + mask = extract64(wcr, 24, 4); + if (mask == 1 || mask == 2) { + /* Reserved values of MASK; we must act as if the mask value was + * some non-reserved value, or as if the watchpoint were disabled. + * We choose the latter. + */ + return; + } else if (mask) { + /* Watchpoint covers an aligned area up to 2GB in size */ + len = 1ULL << mask; + /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE + * whether the watchpoint fires when the unmasked bits match; we opt + * to generate the exceptions. + */ + wvr &= ~(len - 1); + } else { + /* Watchpoint covers bytes defined by the byte address select bits */ + int bas = extract64(wcr, 5, 8); + int basstart; + + if (extract64(wvr, 2, 1)) { + /* Deprecated case of an only 4-aligned address. BAS[7:4] are + * ignored, and BAS[3:0] define which bytes to watch. + */ + bas &= 0xf; + } + + if (bas == 0) { + /* This must act as if the watchpoint is disabled */ + return; + } + + /* The BAS bits are supposed to be programmed to indicate a contiguous + * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether + * we fire for each byte in the word/doubleword addressed by the WVR. + * We choose to ignore any non-zero bits after the first range of 1s. + */ + basstart = ctz32(bas); + len = cto32(bas >> basstart); + wvr += basstart; + } + + cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, + &env->cpu_watchpoint[n]); +} + +void hw_watchpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* Completely clear out existing QEMU watchpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { + hw_watchpoint_update(cpu, i); + } +} + +static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + /* Bits [63:49] are hardwired to the value of bit [48]; that is, the + * register reads and behaves as if values written are sign extended. + * Bits [1:0] are RES0. + */ + value = sextract64(value, 0, 49) & ~3ULL; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +void hw_breakpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + uint64_t bvr = env->cp15.dbgbvr[n]; + uint64_t bcr = env->cp15.dbgbcr[n]; + vaddr addr; + int bt; + int flags = BP_CPU; + + if (env->cpu_breakpoint[n]) { + cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); + env->cpu_breakpoint[n] = NULL; + } + + if (!extract64(bcr, 0, 1)) { + /* E bit clear : watchpoint disabled */ + return; + } + + bt = extract64(bcr, 20, 4); + + switch (bt) { + case 4: /* unlinked address mismatch (reserved if AArch64) */ + case 5: /* linked address mismatch (reserved if AArch64) */ + qemu_log_mask(LOG_UNIMP, + "arm: address mismatch breakpoint types not implemented\n"); + return; + case 0: /* unlinked address match */ + case 1: /* linked address match */ + { + /* Bits [63:49] are hardwired to the value of bit [48]; that is, + * we behave as if the register was sign extended. Bits [1:0] are + * RES0. The BAS field is used to allow setting breakpoints on 16 + * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether + * a bp will fire if the addresses covered by the bp and the addresses + * covered by the insn overlap but the insn doesn't start at the + * start of the bp address range. We choose to require the insn and + * the bp to have the same address. The constraints on writing to + * BAS enforced in dbgbcr_write mean we have only four cases: + * 0b0000 => no breakpoint + * 0b0011 => breakpoint on addr + * 0b1100 => breakpoint on addr + 2 + * 0b1111 => breakpoint on addr + * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). + */ + int bas = extract64(bcr, 5, 4); + addr = sextract64(bvr, 0, 49) & ~3ULL; + if (bas == 0) { + return; + } + if (bas == 0xc) { + addr += 2; + } + break; + } + case 2: /* unlinked context ID match */ + case 8: /* unlinked VMID match (reserved if no EL2) */ + case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ + qemu_log_mask(LOG_UNIMP, + "arm: unlinked context breakpoint types not implemented\n"); + return; + case 9: /* linked VMID match (reserved if no EL2) */ + case 11: /* linked context ID and VMID match (reserved if no EL2) */ + case 3: /* linked context ID match */ + default: + /* We must generate no events for Linked context matches (unless + * they are linked to by some other bp/wp, which is handled in + * updates for the linking bp/wp). We choose to also generate no events + * for reserved values. + */ + return; + } + + cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); +} + +void hw_breakpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* Completely clear out existing QEMU breakpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { + hw_breakpoint_update(cpu, i); + } +} + +static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only + * copy of BAS[0]. + */ + value = deposit64(value, 6, 1, extract64(value, 5, 1)); + value = deposit64(value, 8, 1, extract64(value, 7, 1)); + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +static void define_debug_regs(ARMCPU *cpu) +{ + /* Define v7 and v8 architectural debug registers. + * These are just dummy implementations for now. + */ + int i; +#ifndef NDEBUG + int wrps, brps, ctx_cmps; +#else + int wrps, brps; +#endif + ARMCPRegInfo dbgdidr = { + .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, + }; + + /* Note that all these register fields hold "number of Xs minus 1". */ + brps = arm_num_brps(cpu); + wrps = arm_num_wrps(cpu); +#ifndef NDEBUG + ctx_cmps = arm_num_ctx_cmps(cpu); +#else + arm_num_ctx_cmps(cpu); +#endif + + assert(ctx_cmps <= brps); + + define_one_arm_cp_reg(cpu, &dbgdidr); + define_arm_cp_regs(cpu, debug_cp_reginfo); + + if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { + define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); + } + + for (i = 0; i < brps; i++) { + ARMCPRegInfo dbgregs[] = { + { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), + .writefn = dbgbvr_write, .raw_writefn = raw_write + }, + { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), + .writefn = dbgbcr_write, .raw_writefn = raw_write + }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, dbgregs); + } + + for (i = 0; i < wrps; i++) { + ARMCPRegInfo dbgregs[] = { + { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), + .writefn = dbgwvr_write, .raw_writefn = raw_write + }, + { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), + .writefn = dbgwcr_write, .raw_writefn = raw_write + }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, dbgregs); + } +} + +static void define_pmu_regs(ARMCPU *cpu) +{ + /* + * v7 performance monitor control register: same implementor + * field as main ID register, and we implement four counters in + * addition to the cycle count register. + */ + unsigned int i, pmcrn = 4; + ARMCPRegInfo pmcr = { + .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, + .type = ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), + .accessfn = pmreg_access, .writefn = pmcr_write, + .raw_writefn = raw_write, + }; + ARMCPRegInfo pmcr64 = { + .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), + .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | + PMCRLC, + .writefn = pmcr_write, .raw_writefn = raw_write, + }; + define_one_arm_cp_reg(cpu, &pmcr); + define_one_arm_cp_reg(cpu, &pmcr64); + for (i = 0; i < pmcrn; i++) { + char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); + char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); + char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); + char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); + ARMCPRegInfo pmev_regs[] = { + { .name = pmevcntr_name, .cp = 15, .crn = 14, + .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .accessfn = pmreg_access }, + { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .raw_readfn = pmevcntr_rawread, + .raw_writefn = pmevcntr_rawwrite }, + { .name = pmevtyper_name, .cp = 15, .crn = 14, + .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .accessfn = pmreg_access }, + { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .raw_writefn = pmevtyper_rawwrite }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, pmev_regs); + g_free(pmevcntr_name); + g_free(pmevcntr_el0_name); + g_free(pmevtyper_name); + g_free(pmevtyper_el0_name); + } + if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { + ARMCPRegInfo v81_pmu_regs[] = { + { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid0, 32, 32) }, + { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid1, 32, 32) }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, v81_pmu_regs); + } + if (cpu_isar_feature(any_pmu_8_4, cpu)) { + static const ARMCPRegInfo v84_pmmir = { + .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, + .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = 0 + }; + define_one_arm_cp_reg(cpu, &v84_pmmir); + } +} + +/* We don't know until after realize whether there's a GICv3 + * attached, and that is what registers the gicv3 sysregs. + * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 + * at runtime. + */ +static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + uint64_t pfr1 = cpu->id_pfr1; + + if (env->gicv3state) { + pfr1 |= 1 << 28; + } + return pfr1; +} + +static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = env_archcpu(env); + uint64_t pfr0 = cpu->isar.id_aa64pfr0; + + if (env->gicv3state) { + pfr0 |= 1 << 24; + } + return pfr0; +} + +/* Shared logic between LORID and the rest of the LOR* registers. + * Secure state has already been delt with. + */ +static CPAccessResult access_lor_ns(CPUARMState *env) +{ + int el = arm_current_el(env); + + if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_is_secure_below_el3(env)) { + /* Access ok in secure mode. */ + return CP_ACCESS_OK; + } + return access_lor_ns(env); +} + +static CPAccessResult access_lor_other(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + if (arm_is_secure_below_el3(env)) { + /* Access denied in secure mode. */ + return CP_ACCESS_TRAP; + } + return access_lor_ns(env); +} + +/* + * A trivial implementation of ARMv8.1-LOR leaves all of these + * registers fixed at 0, which indicates that there are zero + * supported Limited Ordering regions. + */ +static const ARMCPRegInfo lor_reginfo[] = { + { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, + .access = PL1_RW, .accessfn = access_lor_other, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, + .access = PL1_R, .accessfn = access_lorid, + .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +#ifdef TARGET_AARCH64 +static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + + if (el < 2 && + arm_feature(env, ARM_FEATURE_EL2) && + !(arm_hcr_el2_eff(env) & HCR_APK)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && + arm_feature(env, ARM_FEATURE_EL3) && + !(env->cp15.scr_el3 & SCR_APK)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo pauth_reginfo[] = { + { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, + { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, + { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, + { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, + { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, + { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, + { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, + { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, + { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, + { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, + .access = PL1_RW, .accessfn = access_pauth, + .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, + REGINFO_SENTINEL +}; + +static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t ret; + + /* Success sets NZCV = 0000. */ + env->NF = env->CF = env->VF = 0, env->ZF = 1; + + if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { + /* + * ??? Failed, for unknown reasons in the crypto subsystem. + * The best we can do is log the reason and return the + * timed-out indication to the guest. There is no reason + * we know to expect this failure to be transitory, so the + * guest may well hang retrying the operation. + */ + //qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", + // ri->name, error_get_pretty(err)); + + env->ZF = 0; /* NZCF = 0100 */ + return 0; + } + return ret; +} + +/* We do not support re-seeding, so the two registers operate the same. */ +static const ARMCPRegInfo rndr_reginfo[] = { + { .name = "RNDR", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, + .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, + .access = PL0_R, .readfn = rndr_readfn }, + { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, + .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, + .access = PL0_R, .readfn = rndr_readfn }, + REGINFO_SENTINEL +}; + +static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + struct uc_struct *uc = env->uc; + /* CTR_EL0 System register -> DminLine, bits [19:16] */ + uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); + uint64_t vaddr_in = (uint64_t) value; + uint64_t vaddr = vaddr_in & ~(dline_size - 1); + void *haddr; + int mem_idx = cpu_mmu_index(env, false); + + /* This won't be crossing page boundaries */ + haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); + if (haddr) { + + ram_addr_t offset; + MemoryRegion *mr; + + /* RCU lock is already being held */ + mr = memory_region_from_host(uc, haddr, &offset); + if (mr) { + // memory_region_do_writeback(mr, offset, dline_size); FIXME + } + } +} + +static const ARMCPRegInfo dcpop_reg[] = { + { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo dcpodp_reg[] = { + { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; + +#endif + +static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + + if (el == 0) { + uint64_t sctlr = arm_sctlr(env, el); + if (!(sctlr & SCTLR_EnRCTX)) { + return CP_ACCESS_TRAP; + } + } else if (el == 1) { + uint64_t hcr = arm_hcr_el2_eff(env); + if (hcr & HCR_NV) { + return CP_ACCESS_TRAP_EL2; + } + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo predinv_reginfo[] = { + { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, + .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, + { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, + .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, + { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, + .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, + /* + * Note the AArch32 opcodes have a different OPC1. + */ + { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, + .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, + { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, + .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, + { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, + .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, + REGINFO_SENTINEL +}; + +static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Read the high 32 bits of the current CCSIDR */ + return extract64(ccsidr_read(env, ri), 32, 32); +} + +static const ARMCPRegInfo ccsidr2_reginfo[] = { + { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, + .access = PL1_R, + .accessfn = access_aa64_tid2, + .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, + REGINFO_SENTINEL +}; + +static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + return access_aa64_tid3(env, ri, isread); + } + + return CP_ACCESS_OK; +} + +static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo jazelle_regs[] = { + { .name = "JIDR", + .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_R, .accessfn = access_jazelle, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "JOSCR", + .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "JMCR", + .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo vhe_reginfo[] = { + { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, + { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, + .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, + .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, + { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, + .fieldoffset = + offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), + .type = ARM_CP_IO, .access = PL2_RW, + .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, + { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, + .resetfn = gt_hv_timer_reset, + .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, + { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, + .type = ARM_CP_IO, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, + .access = PL2_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), + .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, + { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_ALIAS, + .access = PL2_RW, .accessfn = e2h_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), + .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, + { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_ALIAS, + .access = PL2_RW, .accessfn = e2h_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), + .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, + { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, + .access = PL2_RW, .accessfn = e2h_access, + .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, + { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, + .access = PL2_RW, .accessfn = e2h_access, + .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, + { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), + .access = PL2_RW, .accessfn = e2h_access, + .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, + { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), + .access = PL2_RW, .accessfn = e2h_access, + .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo ats1e1_reginfo[] = { + { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo ats1cp_reginfo[] = { + { .name = "ATS1CPRP", + .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write }, + { .name = "ATS1CPWP", + .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write }, + REGINFO_SENTINEL +}; + +/* + * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and + * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field + * is non-zero, which is never for ARMv7, optionally in ARMv8 + * and mandatorily for ARMv8.2 and up. + * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's + * implementation is RAZ/WI we can ignore this detail, as we + * do for ACTLR. + */ +static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { + { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, + .access = PL1_RW, .accessfn = access_tacr, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + REGINFO_SENTINEL +}; + +void register_cp_regs_for_features(ARMCPU *cpu) +{ + /* Register all the coprocessor registers based on feature bits */ + CPUARMState *env = &cpu->env; + if (arm_feature(env, ARM_FEATURE_M)) { + /* M profile has no coprocessor registers */ + return; + } + + define_arm_cp_regs(cpu, cp_reginfo); + if (!arm_feature(env, ARM_FEATURE_V8)) { + /* Must go early as it is full of wildcards that may be + * overridden by later definitions. + */ + define_arm_cp_regs(cpu, not_v8_cp_reginfo); + } + + if (arm_feature(env, ARM_FEATURE_V6)) { + /* The ID registers all have impdef reset values */ + ARMCPRegInfo v6_idregs[] = { + { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->id_pfr0 }, + /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know + * the value of the GIC field until after we define these regs. + */ + { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_NO_RAW, + .accessfn = access_aa32_tid3, + .readfn = id_pfr1_read, + .writefn = arm_cp_write_ignore }, + { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_dfr0 }, + { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->id_afr0 }, + { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_mmfr0 }, + { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_mmfr1 }, + { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_mmfr2 }, + { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_mmfr3 }, + { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_isar0 }, + { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_isar1 }, + { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_isar2 }, + { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_isar3 }, + { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_isar4 }, + { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_isar5 }, + { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_mmfr4 }, + { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa32_tid3, + .resetvalue = cpu->isar.id_isar6 }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, v6_idregs); + define_arm_cp_regs(cpu, v6_cp_reginfo); + } else { + define_arm_cp_regs(cpu, not_v6_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V6K)) { + define_arm_cp_regs(cpu, v6k_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V7MP) && + !arm_feature(env, ARM_FEATURE_PMSA)) { + define_arm_cp_regs(cpu, v7mp_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V7VE)) { + define_arm_cp_regs(cpu, pmovsset_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V7)) { + ARMCPRegInfo clidr = { + .name = "CLIDR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid2, + .resetvalue = cpu->clidr + }; + define_one_arm_cp_reg(cpu, &clidr); + define_arm_cp_regs(cpu, v7_cp_reginfo); + define_debug_regs(cpu); + define_pmu_regs(cpu); + } else { + define_arm_cp_regs(cpu, not_v7_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V8)) { + /* AArch64 ID registers, which all have impdef reset values. + * Note that within the ID register ranges the unused slots + * must all RAZ, not UNDEF; future architecture versions may + * define new registers here. + */ + ARMCPRegInfo v8_idregs[] = { + /* + * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system + * emulation because we don't know the right value for the + * GIC field until after we define these regs. + */ + { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, + .access = PL1_R, + .type = ARM_CP_NO_RAW, + .accessfn = access_aa64_tid3, + .readfn = id_aa64pfr0_read, + .writefn = arm_cp_write_ignore + }, + { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64pfr1}, + { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + /* At present, only SVEver == 0 is defined anyway. */ + .resetvalue = 0 }, + { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64dfr0 }, + { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64dfr1 }, + { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->id_aa64afr0 }, + { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->id_aa64afr1 }, + { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64isar0 }, + { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64isar1 }, + { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64mmfr0 }, + { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64mmfr1 }, + { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.id_aa64mmfr2 }, + { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.mvfr0 }, + { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.mvfr1 }, + { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = cpu->isar.mvfr2 }, + { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid3, + .resetvalue = 0 }, + { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid0, 0, 32) }, + { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = cpu->pmceid0 }, + { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid1, 0, 32) }, + { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = cpu->pmceid1 }, + REGINFO_SENTINEL + }; + /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ + if (!arm_feature(env, ARM_FEATURE_EL3) && + !arm_feature(env, ARM_FEATURE_EL2)) { + ARMCPRegInfo rvbar = { + .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, + .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar + }; + define_one_arm_cp_reg(cpu, &rvbar); + } + define_arm_cp_regs(cpu, v8_idregs); + define_arm_cp_regs(cpu, v8_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_EL2)) { + uint64_t vmpidr_def = mpidr_read_val(env); + ARMCPRegInfo vpidr_regs[] = { + { .name = "VPIDR", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, + .access = PL2_RW, .accessfn = access_el3_aa32ns, + .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, + { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, + .access = PL2_RW, .resetvalue = cpu->midr, + .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, + { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, + .access = PL2_RW, .accessfn = access_el3_aa32ns, + .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, + { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, + .access = PL2_RW, + .resetvalue = vmpidr_def, + .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, vpidr_regs); + define_arm_cp_regs(cpu, el2_cp_reginfo); + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, el2_v8_cp_reginfo); + } + /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ + if (!arm_feature(env, ARM_FEATURE_EL3)) { + ARMCPRegInfo rvbar = { + .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, + .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar + }; + define_one_arm_cp_reg(cpu, &rvbar); + } + } else { + /* If EL2 is missing but higher ELs are enabled, we need to + * register the no_el2 reginfos. + */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value + * of MIDR_EL1 and MPIDR_EL1. + */ + ARMCPRegInfo vpidr_regs[] = { + { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, + .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, + .type = ARM_CP_CONST, .resetvalue = cpu->midr, + .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, + { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, + .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, + .type = ARM_CP_NO_RAW, + .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, vpidr_regs); + define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); + } + } + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + define_arm_cp_regs(cpu, el3_cp_reginfo); + ARMCPRegInfo el3_regs[] = { + { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, + .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, + { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, + .access = PL3_RW, + .raw_writefn = raw_write, .writefn = sctlr_write, + .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), + .resetvalue = cpu->reset_sctlr }, + REGINFO_SENTINEL + }; + + define_arm_cp_regs(cpu, el3_regs); + } + /* The behaviour of NSACR is sufficiently various that we don't + * try to describe it in a single reginfo: + * if EL3 is 64 bit, then trap to EL3 from S EL1, + * reads as constant 0xc00 from NS EL1 and NS EL2 + * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 + * if v7 without EL3, register doesn't exist + * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 + */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + ARMCPRegInfo nsacr = { + .name = "NSACR", .type = ARM_CP_CONST, + .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, + .access = PL1_RW, .accessfn = nsacr_access, + .resetvalue = 0xc00 + }; + define_one_arm_cp_reg(cpu, &nsacr); + } else { + ARMCPRegInfo nsacr = { + .name = "NSACR", + .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, + .access = PL3_RW | PL1_R, + .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.nsacr) + }; + define_one_arm_cp_reg(cpu, &nsacr); + } + } else { + if (arm_feature(env, ARM_FEATURE_V8)) { + ARMCPRegInfo nsacr = { + .name = "NSACR", .type = ARM_CP_CONST, + .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, + .access = PL1_R, + .resetvalue = 0xc00 + }; + define_one_arm_cp_reg(cpu, &nsacr); + } + } + + if (arm_feature(env, ARM_FEATURE_PMSA)) { + if (arm_feature(env, ARM_FEATURE_V6)) { + /* PMSAv6 not implemented */ + assert(arm_feature(env, ARM_FEATURE_V7)); + define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); + define_arm_cp_regs(cpu, pmsav7_cp_reginfo); + } else { + define_arm_cp_regs(cpu, pmsav5_cp_reginfo); + } + } else { + define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); + define_arm_cp_regs(cpu, vmsa_cp_reginfo); + /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ + if (cpu_isar_feature(aa32_hpd, cpu)) { + define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); + } + } + if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { + define_arm_cp_regs(cpu, t2ee_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { + define_arm_cp_regs(cpu, generic_timer_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_VAPA)) { + define_arm_cp_regs(cpu, vapa_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { + define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { + define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { + define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_OMAPCP)) { + define_arm_cp_regs(cpu, omap_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_STRONGARM)) { + define_arm_cp_regs(cpu, strongarm_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + define_arm_cp_regs(cpu, xscale_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { + define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_LPAE)) { + define_arm_cp_regs(cpu, lpae_cp_reginfo); + } + if (cpu_isar_feature(aa32_jazelle, cpu)) { + define_arm_cp_regs(cpu, jazelle_regs); + } + /* Slightly awkwardly, the OMAP and StrongARM cores need all of + * cp15 crn=0 to be writes-ignored, whereas for other cores they should + * be read-only (ie write causes UNDEF exception). + */ + { + ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { + /* Pre-v8 MIDR space. + * Note that the MIDR isn't a simple constant register because + * of the TI925 behaviour where writes to another register can + * cause the MIDR value to change. + * + * Unimplemented registers in the c15 0 0 0 space default to + * MIDR. Define MIDR first as this entire space, then CTR, TCMTR + * and friends override accordingly. + */ + { .name = "MIDR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .resetvalue = cpu->midr, + .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, + .readfn = midr_read, + .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), + .type = ARM_CP_OVERRIDE }, + /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DUMMY", + .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL + }; + ARMCPRegInfo id_v8_midr_cp_reginfo[] = { + { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, + .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, + .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), + .readfn = midr_read }, + /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ + { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_R, .resetvalue = cpu->midr }, + { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, + .access = PL1_R, .resetvalue = cpu->midr }, + { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, + .access = PL1_R, + .accessfn = access_aa64_tid1, + .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, + REGINFO_SENTINEL + }; + ARMCPRegInfo id_cp_reginfo[] = { + /* These are common to v8 and pre-v8 */ + { .name = "CTR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_R, .accessfn = ctr_el0_access, + .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, + { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, + .access = PL0_R, .accessfn = ctr_el0_access, + .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, + /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ + { .name = "TCMTR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL + }; + /* TLBTR is specific to VMSA */ + ARMCPRegInfo id_tlbtr_reginfo = { + .name = "TLBTR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0, + }; + /* MPUIR is specific to PMSA V6+ */ + ARMCPRegInfo id_mpuir_reginfo = { + .name = "MPUIR", + .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_R, .type = ARM_CP_CONST, + .resetvalue = cpu->pmsav7_dregion << 8 + }; + ARMCPRegInfo crn0_wi_reginfo = { + .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, + .type = ARM_CP_NOP | ARM_CP_OVERRIDE + }; + if (arm_feature(env, ARM_FEATURE_OMAPCP) || + arm_feature(env, ARM_FEATURE_STRONGARM)) { + ARMCPRegInfo *r; + /* Register the blanket "writes ignored" value first to cover the + * whole space. Then update the specific ID registers to allow write + * access, so that they ignore writes rather than causing them to + * UNDEF. + */ + define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); + for (r = id_pre_v8_midr_cp_reginfo; + r->type != ARM_CP_SENTINEL; r++) { + r->access = PL1_RW; + } + for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { + r->access = PL1_RW; + } + id_mpuir_reginfo.access = PL1_RW; + id_tlbtr_reginfo.access = PL1_RW; + } + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); + } else { + define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); + } + define_arm_cp_regs(cpu, id_cp_reginfo); + if (!arm_feature(env, ARM_FEATURE_PMSA)) { + define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); + } else if (arm_feature(env, ARM_FEATURE_V7)) { + define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); + } + } + + if (arm_feature(env, ARM_FEATURE_MPIDR)) { + ARMCPRegInfo mpidr_cp_reginfo[] = { + { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, + .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, mpidr_cp_reginfo); + } + + if (arm_feature(env, ARM_FEATURE_AUXCR)) { + ARMCPRegInfo auxcr_reginfo[] = { + { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tacr, + .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, + { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, + .access = PL3_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, auxcr_reginfo); + if (cpu_isar_feature(aa32_ac2, cpu)) { + define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); + } + } + + if (arm_feature(env, ARM_FEATURE_CBAR)) { + /* + * CBAR is IMPDEF, but common on Arm Cortex-A implementations. + * There are two flavours: + * (1) older 32-bit only cores have a simple 32-bit CBAR + * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a + * 32-bit register visible to AArch32 at a different encoding + * to the "flavour 1" register and with the bits rearranged to + * be able to squash a 64-bit address into the 32-bit view. + * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but + * in future if we support AArch32-only configs of some of the + * AArch64 cores we might need to add a specific feature flag + * to indicate cores with "flavour 2" CBAR. + */ + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + /* 32 bit view is [31:18] 0...0 [43:32]. */ + uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) + | extract64(cpu->reset_cbar, 32, 12); + ARMCPRegInfo cbar_reginfo[] = { + { .name = "CBAR", + .type = ARM_CP_CONST, + .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, + .access = PL1_R, .resetvalue = cbar32 }, + { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_CONST, + .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, + .access = PL1_R, .resetvalue = cpu->reset_cbar }, + REGINFO_SENTINEL + }; + /* We don't implement a r/w 64 bit CBAR currently */ + assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); + define_arm_cp_regs(cpu, cbar_reginfo); + } else { + ARMCPRegInfo cbar = { + .name = "CBAR", + .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, + .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, + .fieldoffset = offsetof(CPUARMState, + cp15.c15_config_base_address) + }; + if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { + cbar.access = PL1_R; + cbar.fieldoffset = 0; + cbar.type = ARM_CP_CONST; + } + define_one_arm_cp_reg(cpu, &cbar); + } + } + + if (arm_feature(env, ARM_FEATURE_VBAR)) { + ARMCPRegInfo vbar_cp_reginfo[] = { + { .name = "VBAR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .writefn = vbar_write, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), + offsetof(CPUARMState, cp15.vbar_ns) }, + .resetvalue = 0 }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, vbar_cp_reginfo); + } + + /* Generic registers whose values depend on the implementation */ + { + ARMCPRegInfo sctlr = { + .name = "SCTLR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), + offsetof(CPUARMState, cp15.sctlr_ns) }, + .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, + .raw_writefn = raw_write, + }; + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + /* Normally we would always end the TB on an SCTLR write, but Linux + * arch/arm/mach-pxa/sleep.S expects two instructions following + * an MMU enable to execute from cache. Imitate this behaviour. + */ + sctlr.type |= ARM_CP_SUPPRESS_TB_END; + } + define_one_arm_cp_reg(cpu, &sctlr); + } + + if (cpu_isar_feature(aa64_lor, cpu)) { + define_arm_cp_regs(cpu, lor_reginfo); + } + if (cpu_isar_feature(aa64_pan, cpu)) { + define_one_arm_cp_reg(cpu, &pan_reginfo); + } + if (cpu_isar_feature(aa64_ats1e1, cpu)) { + define_arm_cp_regs(cpu, ats1e1_reginfo); + } + if (cpu_isar_feature(aa32_ats1e1, cpu)) { + define_arm_cp_regs(cpu, ats1cp_reginfo); + } + if (cpu_isar_feature(aa64_uao, cpu)) { + define_one_arm_cp_reg(cpu, &uao_reginfo); + } + + if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { + define_arm_cp_regs(cpu, vhe_reginfo); + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); + if (arm_feature(env, ARM_FEATURE_EL2)) { + define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); + } else { + define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); + } + } + +#ifdef TARGET_AARCH64 + if (cpu_isar_feature(aa64_pauth, cpu)) { + define_arm_cp_regs(cpu, pauth_reginfo); + } + if (cpu_isar_feature(aa64_rndr, cpu)) { + define_arm_cp_regs(cpu, rndr_reginfo); + } + /* Data Cache clean instructions up to PoP */ + if (cpu_isar_feature(aa64_dcpop, cpu)) { + define_one_arm_cp_reg(cpu, dcpop_reg); + + if (cpu_isar_feature(aa64_dcpodp, cpu)) { + define_one_arm_cp_reg(cpu, dcpodp_reg); + } + } +#endif + + if (cpu_isar_feature(any_predinv, cpu)) { + define_arm_cp_regs(cpu, predinv_reginfo); + } + + if (cpu_isar_feature(any_ccidx, cpu)) { + define_arm_cp_regs(cpu, ccsidr2_reginfo); + } + + /* + * Register redirections and aliases must be done last, + * after the registers from the other extensions have been defined. + */ + if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { + define_arm_vh_e2h_redirects_aliases(cpu); + } +} + +static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, + void *opaque, int state, int secstate, + int crm, int opc1, int opc2, + const char *name) +{ + /* Private utility function for define_one_arm_cp_reg_with_opaque(): + * add a single reginfo struct to the hash table. + */ + uint32_t *key = g_new(uint32_t, 1); + ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); + int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; + int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; + + r2->name = g_strdup(name); + /* Reset the secure state to the specific incoming state. This is + * necessary as the register may have been defined with both states. + */ + r2->secure = secstate; + + if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { + /* Register is banked (using both entries in array). + * Overwriting fieldoffset as the array is only used to define + * banked registers but later only fieldoffset is used. + */ + r2->fieldoffset = r->bank_fieldoffsets[ns]; + } + + if (state == ARM_CP_STATE_AA32) { + if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { + /* If the register is banked then we don't need to migrate or + * reset the 32-bit instance in certain cases: + * + * 1) If the register has both 32-bit and 64-bit instances then we + * can count on the 64-bit instance taking care of the + * non-secure bank. + * 2) If ARMv8 is enabled then we can count on a 64-bit version + * taking care of the secure bank. This requires that separate + * 32 and 64-bit definitions are provided. + */ + if ((r->state == ARM_CP_STATE_BOTH && ns) || + (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { + r2->type |= ARM_CP_ALIAS; + } + } else if ((secstate != r->secure) && !ns) { + /* The register is not banked so we only want to allow migration of + * the non-secure instance. + */ + r2->type |= ARM_CP_ALIAS; + } + + if (r->state == ARM_CP_STATE_BOTH) { + /* We assume it is a cp15 register if the .cp field is left unset. + */ + if (r2->cp == 0) { + r2->cp = 15; + } + +#ifdef HOST_WORDS_BIGENDIAN + if (r2->fieldoffset) { + r2->fieldoffset += sizeof(uint32_t); + } +#endif + } + } + if (state == ARM_CP_STATE_AA64) { + /* To allow abbreviation of ARMCPRegInfo + * definitions, we treat cp == 0 as equivalent to + * the value for "standard guest-visible sysreg". + * STATE_BOTH definitions are also always "standard + * sysreg" in their AArch64 view (the .cp value may + * be non-zero for the benefit of the AArch32 view). + */ + if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { + r2->cp = CP_REG_ARM64_SYSREG_CP; + } + *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, + r2->opc0, opc1, opc2); + } else { + *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); + } + if (opaque) { + r2->opaque = opaque; + } + /* reginfo passed to helpers is correct for the actual access, + * and is never ARM_CP_STATE_BOTH: + */ + r2->state = state; + /* Make sure reginfo passed to helpers for wildcarded regs + * has the correct crm/opc1/opc2 for this reg, not CP_ANY: + */ + r2->crm = crm; + r2->opc1 = opc1; + r2->opc2 = opc2; + /* By convention, for wildcarded registers only the first + * entry is used for migration; the others are marked as + * ALIAS so we don't try to transfer the register + * multiple times. Special registers (ie NOP/WFI) are + * never migratable and not even raw-accessible. + */ + if ((r->type & ARM_CP_SPECIAL)) { + r2->type |= ARM_CP_NO_RAW; + } + if (((r->crm == CP_ANY) && crm != 0) || + ((r->opc1 == CP_ANY) && opc1 != 0) || + ((r->opc2 == CP_ANY) && opc2 != 0)) { + r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; + } + + /* Check that raw accesses are either forbidden or handled. Note that + * we can't assert this earlier because the setup of fieldoffset for + * banked registers has to be done first. + */ + if (!(r2->type & ARM_CP_NO_RAW)) { + // assert(!raw_accessors_invalid(r2)); + } + + /* Overriding of an existing definition must be explicitly + * requested. + */ + if (!(r->type & ARM_CP_OVERRIDE)) { + ARMCPRegInfo *oldreg; + oldreg = g_hash_table_lookup(cpu->cp_regs, key); + if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { + fprintf(stderr, "Register redefined: cp=%d %d bit " + "crn=%d crm=%d opc1=%d opc2=%d, " + "was %s, now %s\n", r2->cp, 32 + 32 * is64, + r2->crn, r2->crm, r2->opc1, r2->opc2, + oldreg->name, r2->name); + g_assert_not_reached(); + } + } + g_hash_table_insert(cpu->cp_regs, key, r2); +} + + +void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *r, void *opaque) +{ + /* Define implementations of coprocessor registers. + * We store these in a hashtable because typically + * there are less than 150 registers in a space which + * is 16*16*16*8*8 = 262144 in size. + * Wildcarding is supported for the crm, opc1 and opc2 fields. + * If a register is defined twice then the second definition is + * used, so this can be used to define some generic registers and + * then override them with implementation specific variations. + * At least one of the original and the second definition should + * include ARM_CP_OVERRIDE in its type bits -- this is just a guard + * against accidental use. + * + * The state field defines whether the register is to be + * visible in the AArch32 or AArch64 execution state. If the + * state is set to ARM_CP_STATE_BOTH then we synthesise a + * reginfo structure for the AArch32 view, which sees the lower + * 32 bits of the 64 bit register. + * + * Only registers visible in AArch64 may set r->opc0; opc0 cannot + * be wildcarded. AArch64 registers are always considered to be 64 + * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of + * the register, if any. + */ + int crm, opc1, opc2, state; + int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; + int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; + int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; + int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; + int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; + int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; + /* 64 bit registers have only CRm and Opc1 fields */ + assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); + /* op0 only exists in the AArch64 encodings */ + assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); + /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ + assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); + /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 + * encodes a minimum access level for the register. We roll this + * runtime check into our general permission check code, so check + * here that the reginfo's specified permissions are strict enough + * to encompass the generic architectural permission check. + */ +#ifndef NDEBUG + if (r->state != ARM_CP_STATE_AA32) { + int mask = 0; + switch (r->opc1) { + case 0: + /* min_EL EL1, but some accessible to EL0 via kernel ABI */ + mask = PL0U_R | PL1_RW; + break; + case 1: case 2: + /* min_EL EL1 */ + mask = PL1_RW; + break; + case 3: + /* min_EL EL0 */ + mask = PL0_RW; + break; + case 4: + case 5: + /* min_EL EL2 */ + mask = PL2_RW; + break; + case 6: + /* min_EL EL3 */ + mask = PL3_RW; + break; + case 7: + /* min_EL EL1, secure mode only (we don't check the latter) */ + mask = PL1_RW; + break; + default: + /* broken reginfo with out-of-range opc1 */ + assert(false); + break; + } + /* assert our permissions are not too lax (stricter is fine) */ + assert((r->access & ~mask) == 0); + } +#endif + + /* Check that the register definition has enough info to handle + * reads and writes if they are permitted. + */ + if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { + if (r->access & PL3_R) { + assert((r->fieldoffset || + (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || + r->readfn); + } + if (r->access & PL3_W) { + assert((r->fieldoffset || + (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || + r->writefn); + } + } + /* Bad type field probably means missing sentinel at end of reg list */ + assert(cptype_valid(r->type)); + for (crm = crmmin; crm <= crmmax; crm++) { + for (opc1 = opc1min; opc1 <= opc1max; opc1++) { + for (opc2 = opc2min; opc2 <= opc2max; opc2++) { + for (state = ARM_CP_STATE_AA32; + state <= ARM_CP_STATE_AA64; state++) { + if (r->state != state && r->state != ARM_CP_STATE_BOTH) { + continue; + } + if (state == ARM_CP_STATE_AA32) { + /* Under AArch32 CP registers can be common + * (same for secure and non-secure world) or banked. + */ + char *name; + + switch (r->secure) { + case ARM_CP_SECSTATE_S: + case ARM_CP_SECSTATE_NS: + add_cpreg_to_hashtable(cpu, r, opaque, state, + r->secure, crm, opc1, opc2, + r->name); + break; + default: + name = g_strdup_printf("%s_S", r->name); + add_cpreg_to_hashtable(cpu, r, opaque, state, + ARM_CP_SECSTATE_S, + crm, opc1, opc2, name); + g_free(name); + add_cpreg_to_hashtable(cpu, r, opaque, state, + ARM_CP_SECSTATE_NS, + crm, opc1, opc2, r->name); + break; + } + } else { + /* AArch64 registers get mapped to non-secure instance + * of AArch32 */ + add_cpreg_to_hashtable(cpu, r, opaque, state, + ARM_CP_SECSTATE_NS, + crm, opc1, opc2, r->name); + } + } + } + } + } +} + +void define_arm_cp_regs_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque) +{ + /* Define a whole list of registers */ + const ARMCPRegInfo *r; + for (r = regs; r->type != ARM_CP_SENTINEL; r++) { + define_one_arm_cp_reg_with_opaque(cpu, r, opaque); + } +} + +/* + * Modify ARMCPRegInfo for access from userspace. + * + * This is a data driven modification directed by + * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as + * user-space cannot alter any values and dynamic values pertaining to + * execution state are hidden from user space view anyway. + */ +void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) +{ + const ARMCPRegUserSpaceInfo *m; + ARMCPRegInfo *r; + + for (m = mods; m->name; m++) { + GPatternSpec *pat = NULL; + if (m->is_glob) { + pat = g_pattern_spec_new(m->name); + } + for (r = regs; r->type != ARM_CP_SENTINEL; r++) { + if (pat && g_pattern_match_string(pat, r->name)) { + r->type = ARM_CP_CONST; + r->access = PL0U_R; + r->resetvalue = 0; + /* continue */ + } else if (strcmp(r->name, m->name) == 0) { + r->type = ARM_CP_CONST; + r->access = PL0U_R; + r->resetvalue &= m->exported_bits; + r->resetvalue |= m->fixed_bits; + break; + } + } + if (pat) { + g_pattern_spec_free(pat); + } + } +} + +const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) +{ + return g_hash_table_lookup(cpregs, &encoded_cp); +} + +void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Helper coprocessor write function for write-ignore registers */ +} + +uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Helper coprocessor write function for read-as-zero registers */ + return 0; +} + +void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) +{ + /* Helper coprocessor reset function for do-nothing-on-reset registers */ +} + +static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) +{ + /* Return true if it is not valid for us to switch to + * this CPU mode (ie all the UNPREDICTABLE cases in + * the ARM ARM CPSRWriteByInstr pseudocode). + */ + + /* Changes to or from Hyp via MSR and CPS are illegal. */ + if (write_type == CPSRWriteByInstr && + ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || + mode == ARM_CPU_MODE_HYP)) { + return 1; + } + + switch (mode) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_SYS: + case ARM_CPU_MODE_SVC: + case ARM_CPU_MODE_ABT: + case ARM_CPU_MODE_UND: + case ARM_CPU_MODE_IRQ: + case ARM_CPU_MODE_FIQ: + /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 + * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) + */ + /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR + * and CPS are treated as illegal mode changes. + */ + if (write_type == CPSRWriteByInstr && + (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && + (arm_hcr_el2_eff(env) & HCR_TGE)) { + return 1; + } + return 0; + case ARM_CPU_MODE_HYP: + return !arm_feature(env, ARM_FEATURE_EL2) + || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); + case ARM_CPU_MODE_MON: + return arm_current_el(env) < 3; + default: + return 1; + } +} + +uint32_t cpsr_read(CPUARMState *env) +{ + int ZF; + ZF = (env->ZF == 0); + return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | + (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) + | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) + | ((env->condexec_bits & 0xfc) << 8) + | (env->GE << 16) | (env->daif & CPSR_AIF); +} + +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, + CPSRWriteType write_type) +{ + uint32_t changed_daif; + + if (mask & CPSR_NZCV) { + env->ZF = (~val) & CPSR_Z; + env->NF = val; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + } + if (mask & CPSR_Q) + env->QF = ((val & CPSR_Q) != 0); + if (mask & CPSR_T) + env->thumb = ((val & CPSR_T) != 0); + if (mask & CPSR_IT_0_1) { + env->condexec_bits &= ~3; + env->condexec_bits |= (val >> 25) & 3; + } + if (mask & CPSR_IT_2_7) { + env->condexec_bits &= 3; + env->condexec_bits |= (val >> 8) & 0xfc; + } + if (mask & CPSR_GE) { + env->GE = (val >> 16) & 0xf; + } + + /* In a V7 implementation that includes the security extensions but does + * not include Virtualization Extensions the SCR.FW and SCR.AW bits control + * whether non-secure software is allowed to change the CPSR_F and CPSR_A + * bits respectively. + * + * In a V8 implementation, it is permitted for privileged software to + * change the CPSR A/F bits regardless of the SCR.AW/FW bits. + */ + if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && + arm_feature(env, ARM_FEATURE_EL3) && + !arm_feature(env, ARM_FEATURE_EL2) && + !arm_is_secure(env)) { + + changed_daif = (env->daif ^ val) & mask; + + if (changed_daif & CPSR_A) { + /* Check to see if we are allowed to change the masking of async + * abort exceptions from a non-secure state. + */ + if (!(env->cp15.scr_el3 & SCR_AW)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Ignoring attempt to switch CPSR_A flag from " + "non-secure world with SCR.AW bit clear\n"); + mask &= ~CPSR_A; + } + } + + if (changed_daif & CPSR_F) { + /* Check to see if we are allowed to change the masking of FIQ + * exceptions from a non-secure state. + */ + if (!(env->cp15.scr_el3 & SCR_FW)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Ignoring attempt to switch CPSR_F flag from " + "non-secure world with SCR.FW bit clear\n"); + mask &= ~CPSR_F; + } + + /* Check whether non-maskable FIQ (NMFI) support is enabled. + * If this bit is set software is not allowed to mask + * FIQs, but is allowed to set CPSR_F to 0. + */ + if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && + (val & CPSR_F)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Ignoring attempt to enable CPSR_F flag " + "(non-maskable FIQ [NMFI] support enabled)\n"); + mask &= ~CPSR_F; + } + } + } + + env->daif &= ~(CPSR_AIF & mask); + env->daif |= val & CPSR_AIF & mask; + + if (write_type != CPSRWriteRaw && + ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { + if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { + /* Note that we can only get here in USR mode if this is a + * gdb stub write; for this case we follow the architectural + * behaviour for guest writes in USR mode of ignoring an attempt + * to switch mode. (Those are caught by translate.c for writes + * triggered by guest instructions.) + */ + mask &= ~CPSR_M; + } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { + /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in + * v7, and has defined behaviour in v8: + * + leave CPSR.M untouched + * + allow changes to the other CPSR fields + * + set PSTATE.IL + * For user changes via the GDB stub, we don't set PSTATE.IL, + * as this would be unnecessarily harsh for a user error. + */ + mask &= ~CPSR_M; + if (write_type != CPSRWriteByGDBStub && + arm_feature(env, ARM_FEATURE_V8)) { + mask |= CPSR_IL; + val |= CPSR_IL; + } + qemu_log_mask(LOG_GUEST_ERROR, + "Illegal AArch32 mode switch attempt from %s to %s\n", + aarch32_mode_name(env->uncached_cpsr), + aarch32_mode_name(val)); + } else { + qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", + write_type == CPSRWriteExceptionReturn ? + "Exception return from AArch32" : + "AArch32 mode switch from", + aarch32_mode_name(env->uncached_cpsr), + aarch32_mode_name(val), env->regs[15]); + switch_mode(env, val & CPSR_M); + } + } + mask &= ~CACHED_CPSR_BITS; + env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); +} + +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(int8_t)x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +uint32_t HELPER(uxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(uint8_t)x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +int32_t HELPER(sdiv)(int32_t num, int32_t den) +{ + if (den == 0) + return 0; + if (num == INT_MIN && den == -1) + return INT_MIN; + return num / den; +} + +uint32_t HELPER(udiv)(uint32_t num, uint32_t den) +{ + if (den == 0) + return 0; + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) +{ + return revbit32(x); +} + +static void switch_mode(CPUARMState *env, int mode) +{ + int old_mode; + int i; + + old_mode = env->uncached_cpsr & CPSR_M; + if (mode == old_mode) + return; + + if (old_mode == ARM_CPU_MODE_FIQ) { + memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); + } else if (mode == ARM_CPU_MODE_FIQ) { + memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); + } + + i = bank_number(old_mode); + env->banked_r13[i] = env->regs[13]; + env->banked_spsr[i] = env->spsr; + + i = bank_number(mode); + env->regs[13] = env->banked_r13[i]; + env->spsr = env->banked_spsr[i]; + + env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; + env->regs[14] = env->banked_r14[r14_bank_number(mode)]; +} + +/* Physical Interrupt Target EL Lookup Table + * + * [ From ARM ARM section G1.13.4 (Table G1-15) ] + * + * The below multi-dimensional table is used for looking up the target + * exception level given numerous condition criteria. Specifically, the + * target EL is based on SCR and HCR routing controls as well as the + * currently executing EL and secure state. + * + * Dimensions: + * target_el_table[2][2][2][2][2][4] + * | | | | | +--- Current EL + * | | | | +------ Non-secure(0)/Secure(1) + * | | | +--------- HCR mask override + * | | +------------ SCR exec state control + * | +--------------- SCR mask override + * +------------------ 32-bit(0)/64-bit(1) EL3 + * + * The table values are as such: + * 0-3 = EL0-EL3 + * -1 = Cannot occur + * + * The ARM ARM target EL table includes entries indicating that an "exception + * is not taken". The two cases where this is applicable are: + * 1) An exception is taken from EL3 but the SCR does not have the exception + * routed to EL3. + * 2) An exception is taken from EL2 but the HCR does not have the exception + * routed to EL2. + * In these two cases, the below table contain a target of EL1. This value is + * returned as it is expected that the consumer of the table data will check + * for "target EL >= current EL" to ensure the exception is not taken. + * + * SCR HCR + * 64 EA AMO From + * BIT IRQ IMO Non-secure Secure + * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 + */ +static const int8_t target_el_table[2][2][2][2][2][4] = { + {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, + {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, + {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, + {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, + {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, + {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, + {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, + {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, + {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, + {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, + {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, + {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, + {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, + {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, + {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, + {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, +}; + +/* + * Determine the target EL for physical exceptions + */ +uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, + uint32_t cur_el, bool secure) +{ + CPUARMState *env = cs->env_ptr; + bool rw; + bool scr; + bool hcr; + int target_el; + /* Is the highest EL AArch64? */ + bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); + uint64_t hcr_el2; + + if (arm_feature(env, ARM_FEATURE_EL3)) { + rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); + } else { + /* Either EL2 is the highest EL (and so the EL2 register width + * is given by is64); or there is no EL2 or EL3, in which case + * the value of 'rw' does not affect the table lookup anyway. + */ + rw = is64; + } + + hcr_el2 = arm_hcr_el2_eff(env); + switch (excp_idx) { + case EXCP_IRQ: + scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); + hcr = hcr_el2 & HCR_IMO; + break; + case EXCP_FIQ: + scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); + hcr = hcr_el2 & HCR_FMO; + break; + default: + scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); + hcr = hcr_el2 & HCR_AMO; + break; + }; + + /* + * For these purposes, TGE and AMO/IMO/FMO both force the + * interrupt to EL2. Fold TGE into the bit extracted above. + */ + hcr |= (hcr_el2 & HCR_TGE) != 0; + + /* Perform a table-lookup for the target EL given the current state */ + target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; + + assert(target_el > 0); + + return target_el; +} + +/* + * Function used to synchronize QEMU's AArch64 register set with AArch32 + * register set. This is necessary when switching between AArch32 and AArch64 + * execution state. + */ +void aarch64_sync_32_to_64(CPUARMState *env) +{ + int i; + uint32_t mode = env->uncached_cpsr & CPSR_M; + + /* We can blanket copy R[0:7] to X[0:7] */ + for (i = 0; i < 8; i++) { + env->xregs[i] = env->regs[i]; + } + + /* + * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. + * Otherwise, they come from the banked user regs. + */ + if (mode == ARM_CPU_MODE_FIQ) { + for (i = 8; i < 13; i++) { + env->xregs[i] = env->usr_regs[i - 8]; + } + } else { + for (i = 8; i < 13; i++) { + env->xregs[i] = env->regs[i]; + } + } + + /* + * Registers x13-x23 are the various mode SP and FP registers. Registers + * r13 and r14 are only copied if we are in that mode, otherwise we copy + * from the mode banked register. + */ + if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { + env->xregs[13] = env->regs[13]; + env->xregs[14] = env->regs[14]; + } else { + env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; + /* HYP is an exception in that it is copied from r14 */ + if (mode == ARM_CPU_MODE_HYP) { + env->xregs[14] = env->regs[14]; + } else { + env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; + } + } + + if (mode == ARM_CPU_MODE_HYP) { + env->xregs[15] = env->regs[13]; + } else { + env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; + } + + if (mode == ARM_CPU_MODE_IRQ) { + env->xregs[16] = env->regs[14]; + env->xregs[17] = env->regs[13]; + } else { + env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; + env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; + } + + if (mode == ARM_CPU_MODE_SVC) { + env->xregs[18] = env->regs[14]; + env->xregs[19] = env->regs[13]; + } else { + env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; + env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; + } + + if (mode == ARM_CPU_MODE_ABT) { + env->xregs[20] = env->regs[14]; + env->xregs[21] = env->regs[13]; + } else { + env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; + env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; + } + + if (mode == ARM_CPU_MODE_UND) { + env->xregs[22] = env->regs[14]; + env->xregs[23] = env->regs[13]; + } else { + env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; + env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; + } + + /* + * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ + * mode, then we can copy from r8-r14. Otherwise, we copy from the + * FIQ bank for r8-r14. + */ + if (mode == ARM_CPU_MODE_FIQ) { + for (i = 24; i < 31; i++) { + env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ + } + } else { + for (i = 24; i < 29; i++) { + env->xregs[i] = env->fiq_regs[i - 24]; + } + env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; + env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; + } + + env->pc = env->regs[15]; +} + +/* + * Function used to synchronize QEMU's AArch32 register set with AArch64 + * register set. This is necessary when switching between AArch32 and AArch64 + * execution state. + */ +void aarch64_sync_64_to_32(CPUARMState *env) +{ + int i; + uint32_t mode = env->uncached_cpsr & CPSR_M; + + /* We can blanket copy X[0:7] to R[0:7] */ + for (i = 0; i < 8; i++) { + env->regs[i] = env->xregs[i]; + } + + /* + * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. + * Otherwise, we copy x8-x12 into the banked user regs. + */ + if (mode == ARM_CPU_MODE_FIQ) { + for (i = 8; i < 13; i++) { + env->usr_regs[i - 8] = env->xregs[i]; + } + } else { + for (i = 8; i < 13; i++) { + env->regs[i] = env->xregs[i]; + } + } + + /* + * Registers r13 & r14 depend on the current mode. + * If we are in a given mode, we copy the corresponding x registers to r13 + * and r14. Otherwise, we copy the x register to the banked r13 and r14 + * for the mode. + */ + if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { + env->regs[13] = env->xregs[13]; + env->regs[14] = env->xregs[14]; + } else { + env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; + + /* + * HYP is an exception in that it does not have its own banked r14 but + * shares the USR r14 + */ + if (mode == ARM_CPU_MODE_HYP) { + env->regs[14] = env->xregs[14]; + } else { + env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; + } + } + + if (mode == ARM_CPU_MODE_HYP) { + env->regs[13] = env->xregs[15]; + } else { + env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; + } + + if (mode == ARM_CPU_MODE_IRQ) { + env->regs[14] = env->xregs[16]; + env->regs[13] = env->xregs[17]; + } else { + env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; + env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; + } + + if (mode == ARM_CPU_MODE_SVC) { + env->regs[14] = env->xregs[18]; + env->regs[13] = env->xregs[19]; + } else { + env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; + env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; + } + + if (mode == ARM_CPU_MODE_ABT) { + env->regs[14] = env->xregs[20]; + env->regs[13] = env->xregs[21]; + } else { + env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; + env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; + } + + if (mode == ARM_CPU_MODE_UND) { + env->regs[14] = env->xregs[22]; + env->regs[13] = env->xregs[23]; + } else { + env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; + env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; + } + + /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ + * mode, then we can copy to r8-r14. Otherwise, we copy to the + * FIQ bank for r8-r14. + */ + if (mode == ARM_CPU_MODE_FIQ) { + for (i = 24; i < 31; i++) { + env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ + } + } else { + for (i = 24; i < 29; i++) { + env->fiq_regs[i - 24] = env->xregs[i]; + } + env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; + env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; + } + + env->regs[15] = env->pc; +} + +static void take_aarch32_exception(CPUARMState *env, int new_mode, + uint32_t mask, uint32_t offset, + uint32_t newpc) +{ + int new_el; + + /* Change the CPU state so as to actually take the exception. */ + switch_mode(env, new_mode); + + /* + * For exceptions taken to AArch32 we must clear the SS bit in both + * PSTATE and in the old-state value we save to SPSR_, so zero it now. + */ + env->uncached_cpsr &= ~PSTATE_SS; + env->spsr = cpsr_read(env); + /* Clear IT bits. */ + env->condexec_bits = 0; + /* Switch to the new mode, and to the correct instruction set. */ + env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; + + /* This must be after mode switching. */ + new_el = arm_current_el(env); + + /* Set new mode endianness */ + env->uncached_cpsr &= ~CPSR_E; + if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { + env->uncached_cpsr |= CPSR_E; + } + /* J and IL must always be cleared for exception entry */ + env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); + env->daif |= mask; + + if (new_mode == ARM_CPU_MODE_HYP) { + env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; + env->elr_el[2] = env->regs[15]; + } else { + /* CPSR.PAN is normally preserved preserved unless... */ + if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { + switch (new_el) { + case 3: + if (!arm_is_secure_below_el3(env)) { + /* ... the target is EL3, from non-secure state. */ + env->uncached_cpsr &= ~CPSR_PAN; + break; + } + /* ... the target is EL3, from secure state ... */ + /* fall through */ + case 1: + /* ... the target is EL1 and SCTLR.SPAN is 0. */ + if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { + env->uncached_cpsr |= CPSR_PAN; + } + break; + } + } + /* + * this is a lie, as there was no c1_sys on V4T/V5, but who cares + * and we should just guard the thumb mode on V4 + */ + if (arm_feature(env, ARM_FEATURE_V4T)) { + env->thumb = + (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; + } + env->regs[14] = env->regs[15] + offset; + } + env->regs[15] = newpc; + arm_rebuild_hflags(env); +} + +static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) +{ + /* + * Handle exception entry to Hyp mode; this is sufficiently + * different to entry to other AArch32 modes that we handle it + * separately here. + * + * The vector table entry used is always the 0x14 Hyp mode entry point, + * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. + * The offset applied to the preferred return address is always zero + * (see DDI0487C.a section G1.12.3). + * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. + */ + uint32_t addr, mask; + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + switch (cs->exception_index) { + case EXCP_UDEF: + addr = 0x04; + break; + case EXCP_SWI: + addr = 0x14; + break; + case EXCP_BKPT: + /* Fall through to prefetch abort. */ + case EXCP_PREFETCH_ABORT: + env->cp15.ifar_s = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", + (uint32_t)env->exception.vaddress); + addr = 0x0c; + break; + case EXCP_DATA_ABORT: + env->cp15.dfar_s = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", + (uint32_t)env->exception.vaddress); + addr = 0x10; + break; + case EXCP_IRQ: + addr = 0x18; + break; + case EXCP_FIQ: + addr = 0x1c; + break; + case EXCP_HVC: + addr = 0x08; + break; + case EXCP_HYP_TRAP: + addr = 0x14; + break; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + } + + if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { + if (!arm_feature(env, ARM_FEATURE_V8)) { + /* + * QEMU syndrome values are v8-style. v7 has the IL bit + * UNK/SBZP for "field not valid" cases, where v8 uses RES1. + * If this is a v7 CPU, squash the IL bit in those cases. + */ + if (cs->exception_index == EXCP_PREFETCH_ABORT || + (cs->exception_index == EXCP_DATA_ABORT && + !(env->exception.syndrome & ARM_EL_ISV)) || + syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { + env->exception.syndrome &= ~ARM_EL_IL; + } + } + env->cp15.esr_el[2] = env->exception.syndrome; + } + + if (arm_current_el(env) != 2 && addr < 0x14) { + addr = 0x14; + } + + mask = 0; + if (!(env->cp15.scr_el3 & SCR_EA)) { + mask |= CPSR_A; + } + if (!(env->cp15.scr_el3 & SCR_IRQ)) { + mask |= CPSR_I; + } + if (!(env->cp15.scr_el3 & SCR_FIQ)) { + mask |= CPSR_F; + } + + addr += env->cp15.hvbar; + + take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); +} + +static void arm_cpu_do_interrupt_aarch32_qemu(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint32_t addr; + uint32_t mask; + int new_mode; + uint32_t offset; + uint32_t moe; + + /* If this is a debug exception we must update the DBGDSCR.MOE bits */ + switch (syn_get_ec(env->exception.syndrome)) { + case EC_BREAKPOINT: + case EC_BREAKPOINT_SAME_EL: + moe = 1; + break; + case EC_WATCHPOINT: + case EC_WATCHPOINT_SAME_EL: + moe = 10; + break; + case EC_AA32_BKPT: + moe = 3; + break; + case EC_VECTORCATCH: + moe = 5; + break; + default: + moe = 0; + break; + } + + if (moe) { + env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); + } + + if (env->exception.target_el == 2) { + arm_cpu_do_interrupt_aarch32_hyp(cs); + return; + } + + switch (cs->exception_index) { + case EXCP_UDEF: + new_mode = ARM_CPU_MODE_UND; + addr = 0x04; + mask = CPSR_I; + if (env->thumb) + offset = 2; + else + offset = 4; + break; + case EXCP_SWI: + new_mode = ARM_CPU_MODE_SVC; + addr = 0x08; + mask = CPSR_I; + /* The PC already points to the next instruction. */ + offset = 0; + break; + case EXCP_BKPT: + /* Fall through to prefetch abort. */ + case EXCP_PREFETCH_ABORT: + A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); + A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); + qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", + env->exception.fsr, (uint32_t)env->exception.vaddress); + new_mode = ARM_CPU_MODE_ABT; + addr = 0x0c; + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_DATA_ABORT: + A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); + A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); + qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", + env->exception.fsr, + (uint32_t)env->exception.vaddress); + new_mode = ARM_CPU_MODE_ABT; + addr = 0x10; + mask = CPSR_A | CPSR_I; + offset = 8; + break; + case EXCP_IRQ: + new_mode = ARM_CPU_MODE_IRQ; + addr = 0x18; + /* Disable IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I; + offset = 4; + if (env->cp15.scr_el3 & SCR_IRQ) { + /* IRQ routed to monitor mode */ + new_mode = ARM_CPU_MODE_MON; + mask |= CPSR_F; + } + break; + case EXCP_FIQ: + new_mode = ARM_CPU_MODE_FIQ; + addr = 0x1c; + /* Disable FIQ, IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I | CPSR_F; + if (env->cp15.scr_el3 & SCR_FIQ) { + /* FIQ routed to monitor mode */ + new_mode = ARM_CPU_MODE_MON; + } + offset = 4; + break; + case EXCP_VIRQ: + new_mode = ARM_CPU_MODE_IRQ; + addr = 0x18; + /* Disable IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_VFIQ: + new_mode = ARM_CPU_MODE_FIQ; + addr = 0x1c; + /* Disable FIQ, IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I | CPSR_F; + offset = 4; + break; + case EXCP_SMC: + new_mode = ARM_CPU_MODE_MON; + addr = 0x08; + mask = CPSR_A | CPSR_I | CPSR_F; + offset = 0; + break; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + return; /* Never happens. Keep compiler happy. */ + } + + if (new_mode == ARM_CPU_MODE_MON) { + addr += env->cp15.mvbar; + } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { + /* High vectors. When enabled, base address cannot be remapped. */ + addr += 0xffff0000; + } else { + /* ARM v7 architectures provide a vector base address register to remap + * the interrupt vector table. + * This register is only followed in non-monitor mode, and is banked. + * Note: only bits 31:5 are valid. + */ + addr += A32_BANKED_CURRENT_REG_GET(env, vbar); + } + + if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { + env->cp15.scr_el3 &= ~SCR_NS; + } + + take_aarch32_exception(env, new_mode, mask, offset, addr); +} + +/* Handle exception entry to a target EL which is using AArch64 */ +static void arm_cpu_do_interrupt_aarch64_qemu(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + unsigned int new_el = env->exception.target_el; + target_ulong addr = env->cp15.vbar_el[new_el]; + unsigned int new_mode = aarch64_pstate_mode(new_el, true); + unsigned int old_mode; + unsigned int cur_el = arm_current_el(env); + + /* + * Note that new_el can never be 0. If cur_el is 0, then + * el0_a64 is is_a64(), else el0_a64 is ignored. + */ + aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); + + if (cur_el < new_el) { + /* Entry vector offset depends on whether the implemented EL + * immediately lower than the target level is using AArch32 or AArch64 + */ + bool is_aa64 = false; + uint64_t hcr; + + switch (new_el) { + case 3: + is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; + break; + case 2: + hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + is_aa64 = (hcr & HCR_RW) != 0; + break; + } + /* fall through */ + case 1: + is_aa64 = is_a64(env); + break; + default: + g_assert_not_reached(); + break; + } + + if (is_aa64) { + addr += 0x400; + } else { + addr += 0x600; + } + } else if (pstate_read(env) & PSTATE_SP) { + addr += 0x200; + } + + switch (cs->exception_index) { + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + env->cp15.far_el[new_el] = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", + env->cp15.far_el[new_el]); + /* fall through */ + case EXCP_BKPT: + case EXCP_UDEF: + case EXCP_SWI: + case EXCP_HVC: + case EXCP_HYP_TRAP: + case EXCP_SMC: + if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { + /* + * QEMU internal FP/SIMD syndromes from AArch32 include the + * TA and coproc fields which are only exposed if the exception + * is taken to AArch32 Hyp mode. Mask them out to get a valid + * AArch64 format syndrome. + */ + env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); + } + env->cp15.esr_el[new_el] = env->exception.syndrome; + break; + case EXCP_IRQ: + case EXCP_VIRQ: + addr += 0x80; + break; + case EXCP_FIQ: + case EXCP_VFIQ: + addr += 0x100; + break; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + } + + if (is_a64(env)) { + old_mode = pstate_read(env); + aarch64_save_sp(env, arm_current_el(env)); + env->elr_el[new_el] = env->pc; + } else { + old_mode = cpsr_read(env); + env->elr_el[new_el] = env->regs[15]; + + aarch64_sync_32_to_64(env); + + env->condexec_bits = 0; + } + env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; + + qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", + env->elr_el[new_el]); + + if (cpu_isar_feature(aa64_pan, cpu)) { + /* The value of PSTATE.PAN is normally preserved, except when ... */ + new_mode |= old_mode & PSTATE_PAN; + switch (new_el) { + case 2: + /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ + if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) + != (HCR_E2H | HCR_TGE)) { + break; + } + /* fall through */ + case 1: + /* ... the target is EL1 ... */ + /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ + if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { + new_mode |= PSTATE_PAN; + } + break; + } + } + + pstate_write(env, PSTATE_DAIF | new_mode); + env->aarch64 = 1; + aarch64_restore_sp(env, new_el); + helper_rebuild_hflags_a64(env, new_el); + + env->pc = addr; + + qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", + new_el, env->pc, pstate_read(env)); +} + +/* Handle a CPU exception for A and R profile CPUs. + * Do any appropriate logging, handle PSCI calls, and then hand off + * to the AArch64-entry or AArch32-entry function depending on the + * target exception level's register width. + */ +void arm_cpu_do_interrupt(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + unsigned int new_el = env->exception.target_el; + + assert(!arm_feature(env, ARM_FEATURE_M)); + + if (arm_is_psci_call(cpu, cs->exception_index)) { + arm_handle_psci_call(cpu); + return; + } + + /* + * Semihosting semantics depend on the register width of the code + * that caused the exception, not the target exception level, so + * must be handled here. + */ + if (cs->exception_index == EXCP_SEMIHOST) { + // handle_semihosting(cs); + return; + } + + /* Hooks may change global state so BQL should be held, also the + * BQL needs to be held for any modification of + * cs->interrupt_request. + */ + arm_call_pre_el_change_hook(cpu); + + assert(!excp_is_internal(cs->exception_index)); + if (arm_el_is_aa64(env, new_el)) { + arm_cpu_do_interrupt_aarch64_qemu(cs); + } else { + arm_cpu_do_interrupt_aarch32_qemu(cs); + } + + arm_call_el_change_hook(cpu); + + cs->interrupt_request |= CPU_INTERRUPT_EXITTB; +} + +/* Return the exception level which controls this address translation regime */ +static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_Stage2: + case ARMMMUIdx_E2: + return 2; + case ARMMMUIdx_SE3: + return 3; + case ARMMMUIdx_SE10_0: + return arm_el_is_aa64(env, 3) ? 1 : 3; + case ARMMMUIdx_SE10_1: + case ARMMMUIdx_SE10_1_PAN: + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_MPrivNegPri: + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MPriv: + case ARMMMUIdx_MUser: + case ARMMMUIdx_MSPrivNegPri: + case ARMMMUIdx_MSUserNegPri: + case ARMMMUIdx_MSPriv: + case ARMMMUIdx_MSUser: + return 1; + default: + g_assert_not_reached(); + // never reach here + return 1; + } +} + +uint64_t arm_sctlr(CPUARMState *env, int el) +{ + /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ + if (el == 0) { + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); + el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1); + } + return env->cp15.sctlr_el[el]; +} + +/* Return the SCTLR value which controls this address translation regime */ +static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; +} + +/* Return true if the specified stage of address translation is disabled */ +static inline bool regime_translation_disabled(CPUARMState *env, + ARMMMUIdx mmu_idx) +{ + if (arm_feature(env, ARM_FEATURE_M)) { + switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & + (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { + case R_V7M_MPU_CTRL_ENABLE_MASK: + /* Enabled, but not for HardFault and NMI */ + return mmu_idx & ARM_MMU_IDX_M_NEGPRI; + case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: + /* Enabled for all cases */ + return false; + case 0: + default: + /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but + * we warned about that in armv7m_nvic.c when the guest set it. + */ + return true; + } + } + + if (mmu_idx == ARMMMUIdx_Stage2) { + /* HCR.DC means HCR.VM behaves as 1 */ + return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; + } + + if (env->cp15.hcr_el2 & HCR_TGE) { + /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ + if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { + return true; + } + } + + if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { + /* HCR.DC means SCTLR_EL1.M behaves as 0 */ + return true; + } + + return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; +} + +static inline bool regime_translation_big_endian(CPUARMState *env, + ARMMMUIdx mmu_idx) +{ + return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; +} + +/* Return the TTBR associated with this translation regime */ +static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, + int ttbrn) +{ + if (mmu_idx == ARMMMUIdx_Stage2) { + return env->cp15.vttbr_el2; + } + if (ttbrn == 0) { + return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; + } else { + return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; + } +} + +/* Return the TCR controlling this translation regime */ +static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + if (mmu_idx == ARMMMUIdx_Stage2) { + return &env->cp15.vtcr_el2; + } + return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; +} + +/* Convert a possible stage1+2 MMU index into the appropriate + * stage 1 MMU index + */ +static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_E10_0: + return ARMMMUIdx_Stage1_E0; + case ARMMMUIdx_E10_1: + return ARMMMUIdx_Stage1_E1; + case ARMMMUIdx_E10_1_PAN: + return ARMMMUIdx_Stage1_E1_PAN; + default: + return mmu_idx; + } +} + +/* Return true if the translation regime is using LPAE format page tables */ +static inline bool regime_using_lpae_format(CPUARMState *env, + ARMMMUIdx mmu_idx) +{ + int el = regime_el(env, mmu_idx); + if (el == 2 || arm_el_is_aa64(env, el)) { + return true; + } + if (arm_feature(env, ARM_FEATURE_LPAE) + && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { + return true; + } + return false; +} + +/* Returns true if the stage 1 translation regime is using LPAE format page + * tables. Used when raising alignment exceptions, whose FSR changes depending + * on whether the long or short descriptor format is in use. */ +bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + mmu_idx = stage_1_mmu_idx(mmu_idx); + + return regime_using_lpae_format(env, mmu_idx); +} + +static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_SE10_0: + case ARMMMUIdx_E20_0: + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_MUser: + case ARMMMUIdx_MSUser: + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MSUserNegPri: + return true; + default: + return false; + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + g_assert_not_reached(); + // never reach here + return false; + } +} + +/* Translate section/page access permissions to page + * R/W protection flags + * + * @env: CPUARMState + * @mmu_idx: MMU index indicating required translation regime + * @ap: The 3-bit access permissions (AP[2:0]) + * @domain_prot: The 2-bit domain access permissions + */ +static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, + int ap, int domain_prot) +{ + bool is_user = regime_is_user(env, mmu_idx); + + if (domain_prot == 3) { + return PAGE_READ | PAGE_WRITE; + } + + switch (ap) { + case 0: + if (arm_feature(env, ARM_FEATURE_V7)) { + return 0; + } + switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { + case SCTLR_S: + return is_user ? 0 : PAGE_READ; + case SCTLR_R: + return PAGE_READ; + default: + return 0; + } + case 1: + return is_user ? 0 : PAGE_READ | PAGE_WRITE; + case 2: + if (is_user) { + return PAGE_READ; + } else { + return PAGE_READ | PAGE_WRITE; + } + case 3: + return PAGE_READ | PAGE_WRITE; + case 4: /* Reserved. */ + return 0; + case 5: + return is_user ? 0 : PAGE_READ; + case 6: + return PAGE_READ; + case 7: + if (!arm_feature(env, ARM_FEATURE_V6K)) { + return 0; + } + return PAGE_READ; + default: + g_assert_not_reached(); + // never reach here + return PAGE_READ; + } +} + +/* Translate section/page access permissions to page + * R/W protection flags. + * + * @ap: The 2-bit simple AP (AP[2:1]) + * @is_user: TRUE if accessing from PL0 + */ +static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) +{ + switch (ap) { + case 0: + return is_user ? 0 : PAGE_READ | PAGE_WRITE; + case 1: + return PAGE_READ | PAGE_WRITE; + case 2: + return is_user ? 0 : PAGE_READ; + case 3: + return PAGE_READ; + default: + g_assert_not_reached(); + // never reach here + return PAGE_READ; + } +} + +static inline int +simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) +{ + return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); +} + +/* Translate S2 section/page access permissions to protection flags + * + * @env: CPUARMState + * @s2ap: The 2-bit stage2 access permissions (S2AP) + * @xn: XN (execute-never) bit + */ +static int get_S2prot(CPUARMState *env, int s2ap, int xn) +{ + int prot = 0; + + if (s2ap & 1) { + prot |= PAGE_READ; + } + if (s2ap & 2) { + prot |= PAGE_WRITE; + } + if (!xn) { + if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { + prot |= PAGE_EXEC; + } + } + return prot; +} + +/* Translate section/page access permissions to protection flags + * + * @env: CPUARMState + * @mmu_idx: MMU index indicating required translation regime + * @is_aa64: TRUE if AArch64 + * @ap: The 2-bit simple AP (AP[2:1]) + * @ns: NS (non-secure) bit + * @xn: XN (execute-never) bit + * @pxn: PXN (privileged execute-never) bit + */ +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, + int ap, int ns, int xn, int pxn) +{ + bool is_user = regime_is_user(env, mmu_idx); + int prot_rw, user_rw; + bool have_wxn; + int wxn = 0; + + assert(mmu_idx != ARMMMUIdx_Stage2); + + user_rw = simple_ap_to_rw_prot_is_user(ap, true); + if (is_user) { + prot_rw = user_rw; + } else { + if (user_rw && regime_is_pan(env, mmu_idx)) { + /* PAN forbids data accesses but doesn't affect insn fetch */ + prot_rw = 0; + } else { + prot_rw = simple_ap_to_rw_prot_is_user(ap, false); + } + } + + if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { + return prot_rw; + } + + /* TODO have_wxn should be replaced with + * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) + * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE + * compatible processors have EL2, which is required for [U]WXN. + */ + have_wxn = arm_feature(env, ARM_FEATURE_LPAE); + + if (have_wxn) { + wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; + } + + if (is_aa64) { + if (regime_has_2_ranges(mmu_idx) && !is_user) { + xn = pxn || (user_rw & PAGE_WRITE); + } + } else if (arm_feature(env, ARM_FEATURE_V7)) { + switch (regime_el(env, mmu_idx)) { + case 1: + case 3: + if (is_user) { + xn = xn || !(user_rw & PAGE_READ); + } else { + int uwxn = 0; + if (have_wxn) { + uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; + } + xn = xn || !(prot_rw & PAGE_READ) || pxn || + (uwxn && (user_rw & PAGE_WRITE)); + } + break; + case 2: + break; + } + } else { + xn = wxn = 0; + } + + if (xn || (wxn && (prot_rw & PAGE_WRITE))) { + return prot_rw; + } + return prot_rw | PAGE_EXEC; +} + +static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, + uint32_t *table, uint32_t address) +{ + /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ + TCR *tcr = regime_tcr(env, mmu_idx); + + if (address & tcr->mask) { + if (tcr->raw_tcr & TTBCR_PD1) { + /* Translation table walk disabled for TTBR1 */ + return false; + } + *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; + } else { + if (tcr->raw_tcr & TTBCR_PD0) { + /* Translation table walk disabled for TTBR0 */ + return false; + } + *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; + } + *table |= (address >> 18) & 0x3ffc; + return true; +} + +/* Translate a S1 pagetable walk through S2 if needed. */ +static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, + hwaddr addr, MemTxAttrs txattrs, + ARMMMUFaultInfo *fi) +{ + if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && + !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { + target_ulong s2size; + hwaddr s2pa; + int s2prot; + int ret; + ARMCacheAttrs cacheattrs = { 0 }; + ARMCacheAttrs *pcacheattrs = NULL; + + if (env->cp15.hcr_el2 & HCR_PTW) { + /* + * PTW means we must fault if this S1 walk touches S2 Device + * memory; otherwise we don't care about the attributes and can + * save the S2 translation the effort of computing them. + */ + pcacheattrs = &cacheattrs; + } + + ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa, + &txattrs, &s2prot, &s2size, fi, pcacheattrs); + if (ret) { + assert(fi->type != ARMFault_None); + fi->s2addr = addr; + fi->stage2 = true; + fi->s1ptw = true; + return ~0; + } + if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { + /* Access was to Device memory: generate Permission fault */ + fi->type = ARMFault_Permission; + fi->s2addr = addr; + fi->stage2 = true; + fi->s1ptw = true; + return ~0; + } + addr = s2pa; + } + return addr; +} + +/* All loads done in the course of a page table walk go through here. */ +static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, + ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) +{ + struct uc_struct *uc = cs->uc; + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = { 0 }; + MemTxResult result = MEMTX_OK; + AddressSpace *as; + uint32_t data; + + attrs.secure = is_secure; + as = arm_addressspace(cs, attrs); + addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); + if (fi->s1ptw) { + return 0; + } + if (regime_translation_big_endian(env, mmu_idx)) { +#ifdef UNICORN_ARCH_POSTFIX + data = glue(address_space_ldl_be, UNICORN_ARCH_POSTFIX)(uc, as, addr, attrs, &result); +#else + data = address_space_ldl_be(uc, as, addr, attrs, &result); +#endif + } else { +#ifdef UNICORN_ARCH_POSTFIX + data = glue(address_space_ldl_le, UNICORN_ARCH_POSTFIX)(uc, as, addr, attrs, &result); +#else + data = address_space_ldl_le(uc, as, addr, attrs, &result); +#endif + } + if (result == MEMTX_OK) { + return data; + } + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; +} + +static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, + ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = { 0 }; + MemTxResult result = MEMTX_OK; + AddressSpace *as; + uint64_t data; + + attrs.secure = is_secure; + as = arm_addressspace(cs, attrs); + addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); + if (fi->s1ptw) { + return 0; + } + if (regime_translation_big_endian(env, mmu_idx)) { +#ifdef UNICORN_ARCH_POSTFIX + data = glue(address_space_ldq_be, UNICORN_ARCH_POSTFIX)(cs->uc, as, addr, attrs, &result); +#else + data = address_space_ldq_be(cs->uc, as, addr, attrs, &result); +#endif + } else { +#ifdef UNICORN_ARCH_POSTFIX + data = glue(address_space_ldq_le, UNICORN_ARCH_POSTFIX)(cs->uc, as, addr, attrs, &result); +#else + data = address_space_ldq_le(cs->uc, as, addr, attrs, &result); +#endif + } + if (result == MEMTX_OK) { + return data; + } + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; +} + +static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, int *prot, + target_ulong *page_size, + ARMMMUFaultInfo *fi) +{ + CPUState *cs = env_cpu(env); + int level = 1; + uint32_t table; + uint32_t desc; + int type; + int ap; + int domain = 0; + int domain_prot; + hwaddr phys_addr; + uint32_t dacr; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + if (!get_level1_table_address(env, mmu_idx, &table, address)) { + /* Section translation fault if page walk is disabled by PD0 or PD1 */ + fi->type = ARMFault_Translation; + goto do_fault; + } + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), + mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + type = (desc & 3); + domain = (desc >> 5) & 0x0f; + if (regime_el(env, mmu_idx) == 1) { + dacr = env->cp15.dacr_ns; + } else { + dacr = env->cp15.dacr_s; + } + domain_prot = (dacr >> (domain * 2)) & 3; + if (type == 0) { + /* Section translation fault. */ + fi->type = ARMFault_Translation; + goto do_fault; + } + if (type != 2) { + level = 2; + } + if (domain_prot == 0 || domain_prot == 2) { + fi->type = ARMFault_Domain; + goto do_fault; + } + if (type == 2) { + /* 1Mb section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + ap = (desc >> 10) & 3; + *page_size = 1024 * 1024; + } else { + /* Lookup l2 entry. */ + if (type == 1) { + /* Coarse pagetable. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + } else { + /* Fine pagetable. */ + table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); + } + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), + mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + switch (desc & 3) { + case 0: /* Page translation fault. */ + fi->type = ARMFault_Translation; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + ap = (desc >> (4 + ((address >> 13) & 6))) & 3; + *page_size = 0x10000; + break; + case 2: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + ap = (desc >> (4 + ((address >> 9) & 6))) & 3; + *page_size = 0x1000; + break; + case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ + if (type == 1) { + /* ARMv6/XScale extended small page format */ + if (arm_feature(env, ARM_FEATURE_XSCALE) + || arm_feature(env, ARM_FEATURE_V6)) { + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + *page_size = 0x1000; + } else { + /* UNPREDICTABLE in ARMv5; we choose to take a + * page translation fault. + */ + fi->type = ARMFault_Translation; + goto do_fault; + } + } else { + phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); + *page_size = 0x400; + } + ap = (desc >> 4) & 3; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + abort(); + } + } + *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); + *prot |= *prot ? PAGE_EXEC : 0; + if (!(*prot & (1 << access_type))) { + /* Access permission fault. */ + fi->type = ARMFault_Permission; + goto do_fault; + } + *phys_ptr = phys_addr; + return false; +do_fault: + fi->domain = domain; + fi->level = level; + return true; +} + +static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, + target_ulong *page_size, ARMMMUFaultInfo *fi) +{ + CPUState *cs = env_cpu(env); + int level = 1; + uint32_t table; + uint32_t desc; + uint32_t xn; + uint32_t pxn = 0; + int type; + int ap; + int domain = 0; + int domain_prot; + hwaddr phys_addr; + uint32_t dacr; + bool ns; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + if (!get_level1_table_address(env, mmu_idx, &table, address)) { + /* Section translation fault if page walk is disabled by PD0 or PD1 */ + fi->type = ARMFault_Translation; + goto do_fault; + } + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), + mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + type = (desc & 3); + if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { + /* Section translation fault, or attempt to use the encoding + * which is Reserved on implementations without PXN. + */ + fi->type = ARMFault_Translation; + goto do_fault; + } + if ((type == 1) || !(desc & (1 << 18))) { + /* Page or Section. */ + domain = (desc >> 5) & 0x0f; + } + if (regime_el(env, mmu_idx) == 1) { + dacr = env->cp15.dacr_ns; + } else { + dacr = env->cp15.dacr_s; + } + if (type == 1) { + level = 2; + } + domain_prot = (dacr >> (domain * 2)) & 3; + if (domain_prot == 0 || domain_prot == 2) { + /* Section or Page domain fault */ + fi->type = ARMFault_Domain; + goto do_fault; + } + if (type != 1) { + if (desc & (1 << 18)) { + /* Supersection. */ + phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); + phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; + phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; + *page_size = 0x1000000; + } else { + /* Section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + *page_size = 0x100000; + } + ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); + xn = desc & (1 << 4); + pxn = desc & 1; + ns = extract32(desc, 19, 1); + } else { + if (arm_feature(env, ARM_FEATURE_PXN)) { + pxn = (desc >> 2) & 1; + } + ns = extract32(desc, 3, 1); + /* Lookup l2 entry. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), + mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); + switch (desc & 3) { + case 0: /* Page translation fault. */ + fi->type = ARMFault_Translation; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + xn = desc & (1 << 15); + *page_size = 0x10000; + break; + case 2: case 3: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + xn = desc & 1; + *page_size = 0x1000; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + abort(); + } + } + if (domain_prot == 3) { + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + } else { + if (pxn && !regime_is_user(env, mmu_idx)) { + xn = 1; + } + if (xn && access_type == MMU_INST_FETCH) { + fi->type = ARMFault_Permission; + goto do_fault; + } + + if (arm_feature(env, ARM_FEATURE_V6K) && + (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { + /* The simplified model uses AP[0] as an access control bit. */ + if ((ap & 1) == 0) { + /* Access flag fault. */ + fi->type = ARMFault_AccessFlag; + goto do_fault; + } + *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); + } else { + *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); + } + if (*prot && !xn) { + *prot |= PAGE_EXEC; + } + if (!(*prot & (1 << access_type))) { + /* Access permission fault. */ + fi->type = ARMFault_Permission; + goto do_fault; + } + } + if (ns) { + /* The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the attribute will already be non-secure. + */ + attrs->secure = false; + } + *phys_ptr = phys_addr; + return false; +do_fault: + fi->domain = domain; + fi->level = level; + return true; +} + +/* + * check_s2_mmu_setup + * @cpu: ARMCPU + * @is_aa64: True if the translation regime is in AArch64 state + * @startlevel: Suggested starting level + * @inputsize: Bitsize of IPAs + * @stride: Page-table stride (See the ARM ARM) + * + * Returns true if the suggested S2 translation parameters are OK and + * false otherwise. + */ +static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, + int inputsize, int stride) +{ + const int grainsize = stride + 3; + int startsizecheck; + + /* Negative levels are never allowed. */ + if (level < 0) { + return false; + } + + startsizecheck = inputsize - ((3 - level) * stride + grainsize); + if (startsizecheck < 1 || startsizecheck > stride + 4) { + return false; + } + + if (is_aa64) { + CPUARMState *env = &cpu->env; + unsigned int pamax = arm_pamax(cpu); + + switch (stride) { + case 13: /* 64KB Pages. */ + if (level == 0 || (level == 1 && pamax <= 42)) { + return false; + } + break; + case 11: /* 16KB Pages. */ + if (level == 0 || (level == 1 && pamax <= 40)) { + return false; + } + break; + case 9: /* 4KB Pages. */ + if (level == 0 && pamax <= 42) { + return false; + } + break; + default: + g_assert_not_reached(); + break; + } + + /* Inputsize checks. */ + if (inputsize > pamax && + (arm_el_is_aa64(env, 1) || inputsize > 40)) { + /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ + return false; + } + } else { + /* AArch32 only supports 4KB pages. Assert on that. */ + assert(stride == 9); + + if (level == 0) { + return false; + } + } + return true; +} + +/* Translate from the 4-bit stage 2 representation of + * memory attributes (without cache-allocation hints) to + * the 8-bit representation of the stage 1 MAIR registers + * (which includes allocation hints). + * + * ref: shared/translation/attrs/S2AttrDecode() + * .../S2ConvertAttrsHints() + */ +static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) +{ + uint8_t hiattr = extract32(s2attrs, 2, 2); + uint8_t loattr = extract32(s2attrs, 0, 2); + uint8_t hihint = 0, lohint = 0; + + if (hiattr != 0) { /* normal memory */ + if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ + hiattr = loattr = 1; /* non-cacheable */ + } else { + if (hiattr != 1) { /* Write-through or write-back */ + hihint = 3; /* RW allocate */ + } + if (loattr != 1) { /* Write-through or write-back */ + lohint = 3; /* RW allocate */ + } + } + } + + return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; +} + +static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) +{ + if (regime_has_2_ranges(mmu_idx)) { + return extract64(tcr, 37, 2); + } else if (mmu_idx == ARMMMUIdx_Stage2) { + return 0; /* VTCR_EL2 */ + } else { + /* Replicate the single TBI bit so we always have 2 bits. */ + return extract32(tcr, 20, 1) * 3; + } +} + +static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) +{ + if (regime_has_2_ranges(mmu_idx)) { + return extract64(tcr, 51, 2); + } else if (mmu_idx == ARMMMUIdx_Stage2) { + return 0; /* VTCR_EL2 */ + } else { + /* Replicate the single TBID bit so we always have 2 bits. */ + return extract32(tcr, 29, 1) * 3; + } +} + +ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, + ARMMMUIdx mmu_idx, bool data) +{ + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + bool epd, hpd, using16k, using64k; + int select, tsz, tbi; + + if (!regime_has_2_ranges(mmu_idx)) { + select = 0; + tsz = extract32(tcr, 0, 6); + using64k = extract32(tcr, 14, 1); + using16k = extract32(tcr, 15, 1); + if (mmu_idx == ARMMMUIdx_Stage2) { + /* VTCR_EL2 */ + hpd = false; + } else { + hpd = extract32(tcr, 24, 1); + } + epd = false; + } else { + /* + * Bit 55 is always between the two regions, and is canonical for + * determining if address tagging is enabled. + */ + select = extract64(va, 55, 1); + if (!select) { + tsz = extract32(tcr, 0, 6); + epd = extract32(tcr, 7, 1); + using64k = extract32(tcr, 14, 1); + using16k = extract32(tcr, 15, 1); + hpd = extract64(tcr, 41, 1); + } else { + int tg = extract32(tcr, 30, 2); + using16k = tg == 1; + using64k = tg == 3; + tsz = extract32(tcr, 16, 6); + epd = extract32(tcr, 23, 1); + hpd = extract64(tcr, 42, 1); + } + } + tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ + tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ + + /* Present TBI as a composite with TBID. */ + tbi = aa64_va_parameter_tbi(tcr, mmu_idx); + if (!data) { + tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); + } + tbi = (tbi >> select) & 1; + + return (ARMVAParameters) { + .tsz = tsz, + .select = select, + .tbi = tbi, + .epd = epd, + .hpd = hpd, + .using16k = using16k, + .using64k = using64k, + }; +} + +static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, + ARMMMUIdx mmu_idx) +{ + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint32_t el = regime_el(env, mmu_idx); + int select, tsz; + bool epd, hpd; + + if (mmu_idx == ARMMMUIdx_Stage2) { + /* VTCR */ + bool sext = extract32(tcr, 4, 1); + bool sign = extract32(tcr, 3, 1); + + /* + * If the sign-extend bit is not the same as t0sz[3], the result + * is unpredictable. Flag this as a guest error. + */ + if (sign != sext) { + qemu_log_mask(LOG_GUEST_ERROR, + "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); + } + tsz = sextract32(tcr, 0, 4) + 8; + select = 0; + hpd = false; + epd = false; + } else if (el == 2) { + /* HTCR */ + tsz = extract32(tcr, 0, 3); + select = 0; + hpd = extract64(tcr, 24, 1); + epd = false; + } else { + int t0sz = extract32(tcr, 0, 3); + int t1sz = extract32(tcr, 16, 3); + + if (t1sz == 0) { + select = va > (0xffffffffu >> t0sz); + } else { + /* Note that we will detect errors later. */ + select = va >= ~(0xffffffffu >> t1sz); + } + if (!select) { + tsz = t0sz; + epd = extract32(tcr, 7, 1); + hpd = extract64(tcr, 41, 1); + } else { + tsz = t1sz; + epd = extract32(tcr, 23, 1); + hpd = extract64(tcr, 42, 1); + } + /* For aarch32, hpd0 is not enabled without t2e as well. */ + hpd &= extract32(tcr, 6, 1); + } + + return (ARMVAParameters) { + .tsz = tsz, + .select = select, + .epd = epd, + .hpd = hpd, + }; +} + +static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, + target_ulong *page_size_ptr, + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) +{ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + /* Read an LPAE long-descriptor translation table. */ + ARMFaultType fault_type = ARMFault_Translation; + uint32_t level; + ARMVAParameters param; + uint64_t ttbr; + hwaddr descaddr, indexmask, indexmask_grainsize; + uint32_t tableattrs; + target_ulong page_size; + uint32_t attrs; + int32_t stride; + int addrsize, inputsize; + TCR *tcr = regime_tcr(env, mmu_idx); + int ap, ns, xn, pxn; + uint32_t el = regime_el(env, mmu_idx); + uint64_t descaddrmask; + bool aarch64 = arm_el_is_aa64(env, el); + bool guarded = false; + + /* TODO: This code does not support shareability levels. */ + if (aarch64) { + param = aa64_va_parameters(env, address, mmu_idx, + access_type != MMU_INST_FETCH); + level = 0; + addrsize = 64 - 8 * param.tbi; + inputsize = 64 - param.tsz; + } else { + param = aa32_va_parameters(env, address, mmu_idx); + level = 1; + addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); + inputsize = addrsize - param.tsz; + } + + /* + * We determined the region when collecting the parameters, but we + * have not yet validated that the address is valid for the region. + * Extract the top bits and verify that they all match select. + * + * For aa32, if inputsize == addrsize, then we have selected the + * region by exclusion in aa32_va_parameters and there is no more + * validation to do here. + */ + if (inputsize < addrsize) { + target_ulong top_bits = sextract64(address, inputsize, + addrsize - inputsize); +#ifdef _MSC_VER + if (param.select != (0 - top_bits)) { +#else + if (-top_bits != param.select) { +#endif + /* The gap between the two regions is a Translation fault */ + fault_type = ARMFault_Translation; + goto do_fault; + } + } + + if (param.using64k) { + stride = 13; + } else if (param.using16k) { + stride = 11; + } else { + stride = 9; + } + + /* Note that QEMU ignores shareability and cacheability attributes, + * so we don't need to do anything with the SH, ORGN, IRGN fields + * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the + * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently + * implement any ASID-like capability so we can ignore it (instead + * we will always flush the TLB any time the ASID is changed). + */ + ttbr = regime_ttbr(env, mmu_idx, param.select); + + /* Here we should have set up all the parameters for the translation: + * inputsize, ttbr, epd, stride, tbi + */ + + if (param.epd) { + /* Translation table walk disabled => Translation fault on TLB miss + * Note: This is always 0 on 64-bit EL2 and EL3. + */ + goto do_fault; + } + + if (mmu_idx != ARMMMUIdx_Stage2) { + /* The starting level depends on the virtual address size (which can + * be up to 48 bits) and the translation granule size. It indicates + * the number of strides (stride bits at a time) needed to + * consume the bits of the input address. In the pseudocode this is: + * level = 4 - RoundUp((inputsize - grainsize) / stride) + * where their 'inputsize' is our 'inputsize', 'grainsize' is + * our 'stride + 3' and 'stride' is our 'stride'. + * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: + * = 4 - (inputsize - stride - 3 + stride - 1) / stride + * = 4 - (inputsize - 4) / stride; + */ + level = 4 - (inputsize - 4) / stride; + } else { + /* For stage 2 translations the starting level is specified by the + * VTCR_EL2.SL0 field (whose interpretation depends on the page size) + */ + uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); + uint32_t startlevel; + bool ok; + + if (!aarch64 || stride == 9) { + /* AArch32 or 4KB pages */ + startlevel = 2 - sl0; + } else { + /* 16KB or 64KB pages */ + startlevel = 3 - sl0; + } + + /* Check that the starting level is valid. */ + ok = check_s2_mmu_setup(cpu, aarch64, startlevel, + inputsize, stride); + if (!ok) { + fault_type = ARMFault_Translation; + goto do_fault; + } + level = startlevel; + } + + indexmask_grainsize = (1ULL << (stride + 3)) - 1; + indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; + + /* Now we can extract the actual base address from the TTBR */ + descaddr = extract64(ttbr, 0, 48); + /* + * We rely on this masking to clear the RES0 bits at the bottom of the TTBR + * and also to mask out CnP (bit 0) which could validly be non-zero. + */ + descaddr &= ~indexmask; + + /* The address field in the descriptor goes up to bit 39 for ARMv7 + * but up to bit 47 for ARMv8, but we use the descaddrmask + * up to bit 39 for AArch32, because we don't need other bits in that case + * to construct next descriptor address (anyway they should be all zeroes). + */ + descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & + ~indexmask_grainsize; + + /* Secure accesses start with the page table in secure memory and + * can be downgraded to non-secure at any step. Non-secure accesses + * remain non-secure. We implement this by just ORing in the NSTable/NS + * bits at each step. + */ + tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); + for (;;) { + uint64_t descriptor; + bool nstable; + + descaddr |= (address >> (stride * (4 - level))) & indexmask; + descaddr &= ~7ULL; + nstable = extract32(tableattrs, 4, 1); + descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); + if (fi->type != ARMFault_None) { + goto do_fault; + } + + if (!(descriptor & 1) || + (!(descriptor & 2) && (level == 3))) { + /* Invalid, or the Reserved level 3 encoding */ + goto do_fault; + } + descaddr = descriptor & descaddrmask; + + if ((descriptor & 2) && (level < 3)) { + /* Table entry. The top five bits are attributes which may + * propagate down through lower levels of the table (and + * which are all arranged so that 0 means "no effect", so + * we can gather them up by ORing in the bits at each level). + */ + tableattrs |= extract64(descriptor, 59, 5); + level++; + indexmask = indexmask_grainsize; + continue; + } + /* Block entry at level 1 or 2, or page entry at level 3. + * These are basically the same thing, although the number + * of bits we pull in from the vaddr varies. + */ + page_size = (1ULL << ((stride * (4 - level)) + 3)); + descaddr |= (address & (page_size - 1)); + /* Extract attributes from the descriptor */ + attrs = extract64(descriptor, 2, 10) + | (extract64(descriptor, 52, 12) << 10); + + if (mmu_idx == ARMMMUIdx_Stage2) { + /* Stage 2 table descriptors do not include any attribute fields */ + break; + } + /* Merge in attributes from table descriptors */ + attrs |= nstable << 3; /* NS */ + guarded = extract64(descriptor, 50, 1); /* GP */ + if (param.hpd) { + /* HPD disables all the table attributes except NSTable. */ + break; + } + attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ + /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 + * means "force PL1 access only", which means forcing AP[1] to 0. + */ + attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ + attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ + break; + } + /* Here descaddr is the final physical address, and attributes + * are all in attrs. + */ + fault_type = ARMFault_AccessFlag; + if ((attrs & (1 << 8)) == 0) { + /* Access flag */ + goto do_fault; + } + + ap = extract32(attrs, 4, 2); + xn = extract32(attrs, 12, 1); + + if (mmu_idx == ARMMMUIdx_Stage2) { + ns = true; + *prot = get_S2prot(env, ap, xn); + } else { + ns = extract32(attrs, 3, 1); + pxn = extract32(attrs, 11, 1); + *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); + } + + fault_type = ARMFault_Permission; + if (!(*prot & (1 << access_type))) { + goto do_fault; + } + + if (ns) { + /* The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the attribute will already be non-secure. + */ + txattrs->secure = false; + } + /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ + if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { + txattrs->target_tlb_bit0 = true; + } + + if (cacheattrs != NULL) { + if (mmu_idx == ARMMMUIdx_Stage2) { + cacheattrs->attrs = convert_stage2_attrs(env, + extract32(attrs, 0, 4)); + } else { + /* Index into MAIR registers for cache attributes */ + uint8_t attrindx = extract32(attrs, 0, 3); + uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + assert(attrindx <= 7); + cacheattrs->attrs = extract64(mair, attrindx * 8, 8); + } + cacheattrs->shareability = extract32(attrs, 6, 2); + } + + *phys_ptr = descaddr; + *page_size_ptr = page_size; + return false; + +do_fault: + fi->type = fault_type; + fi->level = level; + /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ + fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2); + return true; +} + +static inline void get_phys_addr_pmsav7_default(CPUARMState *env, + ARMMMUIdx mmu_idx, + int32_t address, int *prot) +{ +#define XRANGE(a, x, y) (a >=x && a <= y) + if (!arm_feature(env, ARM_FEATURE_M)) { + *prot = PAGE_READ | PAGE_WRITE; + if (XRANGE(address, 0xF0000000, 0xFFFFFFFF)) { + if (regime_sctlr(env, mmu_idx) & SCTLR_V) { + /* hivecs execing is ok */ + *prot |= PAGE_EXEC; + } + } else if (XRANGE(address, 0x00000000, 0x7FFFFFFF)) { + *prot |= PAGE_EXEC; + } + } else { + /* Default system address map for M profile cores. + * The architecture specifies which regions are execute-never; + * at the MPU level no other checks are defined. + */ + if (XRANGE(address, 0x00000000, 0x1fffffff) || /* ROM */ + XRANGE(address, 0x20000000, 0x3fffffff) || /* SRAM */ + XRANGE(address, 0x60000000, 0x7fffffff) || /* RAM */ + XRANGE(address, 0x80000000, 0x9fffffff)) /* RAM */ + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + else if (XRANGE(address, 0x40000000, 0x5fffffff) || /* Peripheral */ + XRANGE(address, 0xa0000000, 0xbfffffff) || /* Device */ + XRANGE(address, 0xc0000000, 0xdfffffff) || /* Device */ + XRANGE(address, 0xe0000000, 0xffffffff)) /* System */ + *prot = PAGE_READ | PAGE_WRITE; + } +#undef XRANGE +} + +static bool pmsav7_use_background_region(ARMCPU *cpu, + ARMMMUIdx mmu_idx, bool is_user) +{ + /* Return true if we should use the default memory map as a + * "background" region if there are no hits against any MPU regions. + */ + CPUARMState *env = &cpu->env; + + if (is_user) { + return false; + } + + if (arm_feature(env, ARM_FEATURE_M)) { + return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] + & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; + } else { + return regime_sctlr(env, mmu_idx) & SCTLR_BR; + } +} + +static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) +{ + /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ + return arm_feature(env, ARM_FEATURE_M) && + extract32(address, 20, 12) == 0xe00; +} + +static inline bool m_is_system_region(CPUARMState *env, uint32_t address) +{ + /* True if address is in the M profile system region + * 0xe0000000 - 0xffffffff + */ + return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; +} + +static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, int *prot, + target_ulong *page_size, + ARMMMUFaultInfo *fi) +{ + struct uc_struct *uc = env->uc; + ARMCPU *cpu = env_archcpu(env); + int n; + bool is_user = regime_is_user(env, mmu_idx); + + *phys_ptr = address; + *page_size = TARGET_PAGE_SIZE; + *prot = 0; + + if (regime_translation_disabled(env, mmu_idx) || + m_is_ppb_region(env, address)) { + /* MPU disabled or M profile PPB access: use default memory map. + * The other case which uses the default memory map in the + * v7M ARM ARM pseudocode is exception vector reads from the vector + * table. In QEMU those accesses are done in arm_v7m_load_vector(), + * which always does a direct read using address_space_ldl(), rather + * than going via this function, so we don't need to check that here. + */ + get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); + } else { /* MPU enabled */ + for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { + /* region search */ + uint32_t base = env->pmsav7.drbar[n]; + uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); + uint32_t rmask; + bool srdis = false; + + if (!(env->pmsav7.drsr[n] & 0x1)) { + continue; + } + + if (!rsize) { + qemu_log_mask(LOG_GUEST_ERROR, + "DRSR[%d]: Rsize field cannot be 0\n", n); + continue; + } + rsize++; + rmask = (1ull << rsize) - 1; + + if (base & rmask) { + qemu_log_mask(LOG_GUEST_ERROR, + "DRBAR[%d]: 0x%" PRIx32 " misaligned " + "to DRSR region size, mask = 0x%" PRIx32 "\n", + n, base, rmask); + continue; + } + + if (address < base || address > base + rmask) { + /* + * Address not in this region. We must check whether the + * region covers addresses in the same page as our address. + * In that case we must not report a size that covers the + * whole page for a subsequent hit against a different MPU + * region or the background region, because it would result in + * incorrect TLB hits for subsequent accesses to addresses that + * are in this MPU region. + */ + if (ranges_overlap(base, rmask, + address & TARGET_PAGE_MASK, + TARGET_PAGE_SIZE)) { + *page_size = 1; + } + continue; + } + + /* Region matched */ + + if (rsize >= 8) { /* no subregions for regions < 256 bytes */ + int i, snd; + uint32_t srdis_mask; + + rsize -= 3; /* sub region size (power of 2) */ + snd = ((address - base) >> rsize) & 0x7; + srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); + + srdis_mask = srdis ? 0x3 : 0x0; + for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { + /* This will check in groups of 2, 4 and then 8, whether + * the subregion bits are consistent. rsize is incremented + * back up to give the region size, considering consistent + * adjacent subregions as one region. Stop testing if rsize + * is already big enough for an entire QEMU page. + */ + int snd_rounded = snd & ~(i - 1); + uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], + snd_rounded + 8, i); + if (srdis_mask ^ srdis_multi) { + break; + } + srdis_mask = (srdis_mask << i) | srdis_mask; + rsize++; + } + } + if (srdis) { + continue; + } + if (rsize < TARGET_PAGE_BITS) { + *page_size = 1ULL << rsize; + } + break; + } + + if (n == -1) { /* no hits */ + if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { + /* background fault */ + fi->type = ARMFault_Background; + return true; + } + get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); + } else { /* a MPU hit! */ + uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); + uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); + + if (m_is_system_region(env, address)) { + /* System space is always execute never */ + xn = 1; + } + + if (is_user) { /* User mode AP bit decoding */ + switch (ap) { + case 0: + case 1: + case 5: + break; /* no access */ + case 3: + *prot |= PAGE_WRITE; + /* fall through */ + case 2: + case 6: + *prot |= PAGE_READ | PAGE_EXEC; + break; + case 7: + /* for v7M, same as 6; for R profile a reserved value */ + if (arm_feature(env, ARM_FEATURE_M)) { + *prot |= PAGE_READ | PAGE_EXEC; + break; + } + /* fall through */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "DRACR[%d]: Bad value for AP bits: 0x%" + PRIx32 "\n", n, ap); + } + } else { /* Priv. mode AP bits decoding */ + switch (ap) { + case 0: + break; /* no access */ + case 1: + case 2: + case 3: + *prot |= PAGE_WRITE; + /* fall through */ + case 5: + case 6: + *prot |= PAGE_READ | PAGE_EXEC; + break; + case 7: + /* for v7M, same as 6; for R profile a reserved value */ + if (arm_feature(env, ARM_FEATURE_M)) { + *prot |= PAGE_READ | PAGE_EXEC; + break; + } + /* fall through */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "DRACR[%d]: Bad value for AP bits: 0x%" + PRIx32 "\n", n, ap); + } + } + + /* execute never */ + if (xn) { + *prot &= ~PAGE_EXEC; + } + } + } + + fi->type = ARMFault_Permission; + fi->level = 1; + return !(*prot & (1 << access_type)); +} + +void v8m_security_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + V8M_SAttributes *sattrs) +{ +#if 0 + /* Look up the security attributes for this address. Compare the + * pseudocode SecurityCheck() function. + * We assume the caller has zero-initialized *sattrs. + */ + ARMCPU *cpu = env_archcpu(env); + int r; + bool idau_exempt = false, idau_ns = true, idau_nsc = true; + int idau_region = IREGION_NOTVALID; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); + + if (cpu->idau) { + IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); + IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); + + iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, + &idau_nsc); + } + + if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { + /* 0xf0000000..0xffffffff is always S for insn fetches */ + return; + } + + if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { + sattrs->ns = !regime_is_secure(env, mmu_idx); + return; + } + + if (idau_region != IREGION_NOTVALID) { + sattrs->irvalid = true; + sattrs->iregion = idau_region; + } + + switch (env->sau.ctrl & 3) { + case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ + break; + case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ + sattrs->ns = true; + break; + default: /* SAU.ENABLE == 1 */ + for (r = 0; r < cpu->sau_sregion; r++) { + if (env->sau.rlar[r] & 1) { + uint32_t base = env->sau.rbar[r] & ~0x1f; + uint32_t limit = env->sau.rlar[r] | 0x1f; + + if (base <= address && limit >= address) { + if (base > addr_page_base || limit < addr_page_limit) { + sattrs->subpage = true; + } + if (sattrs->srvalid) { + /* If we hit in more than one region then we must report + * as Secure, not NS-Callable, with no valid region + * number info. + */ + sattrs->ns = false; + sattrs->nsc = false; + sattrs->sregion = 0; + sattrs->srvalid = false; + break; + } else { + if (env->sau.rlar[r] & 2) { + sattrs->nsc = true; + } else { + sattrs->ns = true; + } + sattrs->srvalid = true; + sattrs->sregion = r; + } + } else { + /* + * Address not in this region. We must check whether the + * region covers addresses in the same page as our address. + * In that case we must not report a size that covers the + * whole page for a subsequent hit against a different MPU + * region or the background region, because it would result + * in incorrect TLB hits for subsequent accesses to + * addresses that are in this MPU region. + */ + if (limit >= base && + ranges_overlap(base, limit - base + 1, + addr_page_base, + TARGET_PAGE_SIZE)) { + sattrs->subpage = true; + } + } + } + } + break; + } + + /* + * The IDAU will override the SAU lookup results if it specifies + * higher security than the SAU does. + */ + if (!idau_ns) { + if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { + sattrs->ns = false; + sattrs->nsc = idau_nsc; + } + } +#endif +} + +bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, + int *prot, bool *is_subpage, + ARMMMUFaultInfo *fi, uint32_t *mregion) +{ + struct uc_struct *uc = env->uc; + /* Perform a PMSAv8 MPU lookup (without also doing the SAU check + * that a full phys-to-virt translation does). + * mregion is (if not NULL) set to the region number which matched, + * or -1 if no region number is returned (MPU off, address did not + * hit a region, address hit in multiple regions). + * We set is_subpage to true if the region hit doesn't cover the + * entire TARGET_PAGE the address is within. + */ + ARMCPU *cpu = env_archcpu(env); + bool is_user = regime_is_user(env, mmu_idx); + uint32_t secure = regime_is_secure(env, mmu_idx); + int n; + int matchregion = -1; + bool hit = false; + uint32_t addr_page_base = address & TARGET_PAGE_MASK; + uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); + + *is_subpage = false; + *phys_ptr = address; + *prot = 0; + if (mregion) { + *mregion = -1; + } + + /* Unlike the ARM ARM pseudocode, we don't need to check whether this + * was an exception vector read from the vector table (which is always + * done using the default system address map), because those accesses + * are done in arm_v7m_load_vector(), which always does a direct + * read using address_space_ldl(), rather than going via this function. + */ + if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ + hit = true; + } else if (m_is_ppb_region(env, address)) { + hit = true; + } else { + if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { + hit = true; + } + + for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { + /* region search */ + /* Note that the base address is bits [31:5] from the register + * with bits [4:0] all zeroes, but the limit address is bits + * [31:5] from the register with bits [4:0] all ones. + */ + uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; + uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; + + if (!(env->pmsav8.rlar[secure][n] & 0x1)) { + /* Region disabled */ + continue; + } + + if (address < base || address > limit) { + /* + * Address not in this region. We must check whether the + * region covers addresses in the same page as our address. + * In that case we must not report a size that covers the + * whole page for a subsequent hit against a different MPU + * region or the background region, because it would result in + * incorrect TLB hits for subsequent accesses to addresses that + * are in this MPU region. + */ + if (limit >= base && + ranges_overlap(base, limit - base + 1, + addr_page_base, + TARGET_PAGE_SIZE)) { + *is_subpage = true; + } + continue; + } + + if (base > addr_page_base || limit < addr_page_limit) { + *is_subpage = true; + } + + if (matchregion != -1) { + /* Multiple regions match -- always a failure (unlike + * PMSAv7 where highest-numbered-region wins) + */ + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + + matchregion = n; + hit = true; + } + } + + if (!hit) { + /* background fault */ + fi->type = ARMFault_Background; + return true; + } + + if (matchregion == -1) { + /* hit using the background region */ + get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); + } else { + uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); + uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); + + if (m_is_system_region(env, address)) { + /* System space is always execute never */ + xn = 1; + } + + *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); + if (*prot && !xn) { + *prot |= PAGE_EXEC; + } + /* We don't need to look the attribute up in the MAIR0/MAIR1 + * registers because that only tells us about cacheability. + */ + if (mregion) { + *mregion = matchregion; + } + } + + fi->type = ARMFault_Permission; + fi->level = 1; + return !(*prot & (1 << access_type)); +} + + +static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, + int *prot, target_ulong *page_size, + ARMMMUFaultInfo *fi) +{ + struct uc_struct *uc = env->uc; + uint32_t secure = regime_is_secure(env, mmu_idx); + V8M_SAttributes sattrs = { 0 }; + bool ret; + bool mpu_is_subpage; + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); + if (access_type == MMU_INST_FETCH) { + /* Instruction fetches always use the MMU bank and the + * transaction attribute determined by the fetch address, + * regardless of CPU state. This is painful for QEMU + * to handle, because it would mean we need to encode + * into the mmu_idx not just the (user, negpri) information + * for the current security state but also that for the + * other security state, which would balloon the number + * of mmu_idx values needed alarmingly. + * Fortunately we can avoid this because it's not actually + * possible to arbitrarily execute code from memory with + * the wrong security attribute: it will always generate + * an exception of some kind or another, apart from the + * special case of an NS CPU executing an SG instruction + * in S&NSC memory. So we always just fail the translation + * here and sort things out in the exception handler + * (including possibly emulating an SG instruction). + */ + if (sattrs.ns != !secure) { + if (sattrs.nsc) { + fi->type = ARMFault_QEMU_NSCExec; + } else { + fi->type = ARMFault_QEMU_SFault; + } + *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; + *phys_ptr = address; + *prot = 0; + return true; + } + } else { + /* For data accesses we always use the MMU bank indicated + * by the current CPU state, but the security attributes + * might downgrade a secure access to nonsecure. + */ + if (sattrs.ns) { + txattrs->secure = false; + } else if (!secure) { + /* NS access to S memory must fault. + * Architecturally we should first check whether the + * MPU information for this address indicates that we + * are doing an unaligned access to Device memory, which + * should generate a UsageFault instead. QEMU does not + * currently check for that kind of unaligned access though. + * If we added it we would need to do so as a special case + * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). + */ + fi->type = ARMFault_QEMU_SFault; + *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; + *phys_ptr = address; + *prot = 0; + return true; + } + } + } + + ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, + txattrs, prot, &mpu_is_subpage, fi, NULL); + *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; + return ret; +} + +static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, int *prot, + ARMMMUFaultInfo *fi) +{ + int n; + uint32_t mask; + uint32_t base; + bool is_user = regime_is_user(env, mmu_idx); + + if (regime_translation_disabled(env, mmu_idx)) { + /* MPU disabled. */ + *phys_ptr = address; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return false; + } + + *phys_ptr = address; + for (n = 7; n >= 0; n--) { + base = env->cp15.c6_region[n]; + if ((base & 1) == 0) { + continue; + } + mask = 1 << ((base >> 1) & 0x1f); + /* Keep this shift separate from the above to avoid an + (undefined) << 32. */ + mask = (mask << 1) - 1; + if (((base ^ address) & ~mask) == 0) { + break; + } + } + if (n < 0) { + fi->type = ARMFault_Background; + return true; + } + + if (access_type == MMU_INST_FETCH) { + mask = env->cp15.pmsav5_insn_ap; + } else { + mask = env->cp15.pmsav5_data_ap; + } + mask = (mask >> (n * 4)) & 0xf; + switch (mask) { + case 0: + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + case 1: + if (is_user) { + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + *prot = PAGE_READ | PAGE_WRITE; + break; + case 2: + *prot = PAGE_READ; + if (!is_user) { + *prot |= PAGE_WRITE; + } + break; + case 3: + *prot = PAGE_READ | PAGE_WRITE; + break; + case 5: + if (is_user) { + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + *prot = PAGE_READ; + break; + case 6: + *prot = PAGE_READ; + break; + default: + /* Bad permission. */ + fi->type = ARMFault_Permission; + fi->level = 1; + return true; + } + *prot |= PAGE_EXEC; + return false; +} + +/* Combine either inner or outer cacheability attributes for normal + * memory, according to table D4-42 and pseudocode procedure + * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). + * + * NB: only stage 1 includes allocation hints (RW bits), leading to + * some asymmetry. + */ +static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) +{ + if (s1 == 4 || s2 == 4) { + /* non-cacheable has precedence */ + return 4; + } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { + /* stage 1 write-through takes precedence */ + return s1; + } else if (extract32(s2, 2, 2) == 2) { + /* stage 2 write-through takes precedence, but the allocation hint + * is still taken from stage 1 + */ + return (2 << 2) | extract32(s1, 0, 2); + } else { /* write-back */ + return s1; + } +} + +/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 + * and CombineS1S2Desc() + * + * @s1: Attributes from stage 1 walk + * @s2: Attributes from stage 2 walk + */ +static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) +{ + uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); + uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); + ARMCacheAttrs ret; + + /* Combine shareability attributes (table D4-43) */ + if (s1.shareability == 2 || s2.shareability == 2) { + /* if either are outer-shareable, the result is outer-shareable */ + ret.shareability = 2; + } else if (s1.shareability == 3 || s2.shareability == 3) { + /* if either are inner-shareable, the result is inner-shareable */ + ret.shareability = 3; + } else { + /* both non-shareable */ + ret.shareability = 0; + } + + /* Combine memory type and cacheability attributes */ + if (s1hi == 0 || s2hi == 0) { + /* Device has precedence over normal */ + if (s1lo == 0 || s2lo == 0) { + /* nGnRnE has precedence over anything */ + ret.attrs = 0; + } else if (s1lo == 4 || s2lo == 4) { + /* non-Reordering has precedence over Reordering */ + ret.attrs = 4; /* nGnRE */ + } else if (s1lo == 8 || s2lo == 8) { + /* non-Gathering has precedence over Gathering */ + ret.attrs = 8; /* nGRE */ + } else { + ret.attrs = 0xc; /* GRE */ + } + + /* Any location for which the resultant memory type is any + * type of Device memory is always treated as Outer Shareable. + */ + ret.shareability = 2; + } else { /* Normal memory */ + /* Outer/inner cacheability combine independently */ + ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 + | combine_cacheattr_nibble(s1lo, s2lo); + + if (ret.attrs == 0x44) { + /* Any location for which the resultant memory type is Normal + * Inner Non-cacheable, Outer Non-cacheable is always treated + * as Outer Shareable. + */ + ret.shareability = 2; + } + } + + return ret; +} + + +/* get_phys_addr - get the physical address for this virtual address + * + * Find the physical address corresponding to the given virtual address, + * by doing a translation table walk on MMU based systems or using the + * MPU state on MPU based systems. + * + * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, + * prot and page_size may not be filled in, and the populated fsr value provides + * information on why the translation aborted, in the format of a + * DFSR/IFSR fault register, with the following caveats: + * * we honour the short vs long DFSR format differences. + * * the WnR bit is never set (the caller must do this). + * * for PSMAv5 based systems we don't bother to return a full FSR format + * value. + * + * @env: CPUARMState + * @address: virtual address to get physical address for + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index indicating required translation regime + * @phys_ptr: set to the physical address corresponding to the virtual address + * @attrs: set to the memory transaction attributes to use + * @prot: set to the permissions for the page containing phys_ptr + * @page_size: set to the size of the page containing phys_ptr + * @fi: set to fault info if the translation fails + * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes + */ +bool get_phys_addr(CPUARMState *env, target_ulong address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, + target_ulong *page_size, + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) +{ + struct uc_struct *uc = env->uc; + if (mmu_idx == ARMMMUIdx_E10_0 || + mmu_idx == ARMMMUIdx_E10_1 || + mmu_idx == ARMMMUIdx_E10_1_PAN) { + /* Call ourselves recursively to do the stage 1 and then stage 2 + * translations. + */ + if (arm_feature(env, ARM_FEATURE_EL2)) { + hwaddr ipa; + int s2_prot; + int ret; + ARMCacheAttrs cacheattrs2 = { 0 }; + + ret = get_phys_addr(env, address, access_type, + stage_1_mmu_idx(mmu_idx), &ipa, attrs, + prot, page_size, fi, cacheattrs); + + /* If S1 fails or S2 is disabled, return early. */ + if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { + *phys_ptr = ipa; + return ret; + } + + /* S1 is done. Now do S2 translation. */ + ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2, + phys_ptr, attrs, &s2_prot, + page_size, fi, + cacheattrs != NULL ? &cacheattrs2 : NULL); + fi->s2addr = ipa; + /* Combine the S1 and S2 perms. */ + *prot &= s2_prot; + + /* Combine the S1 and S2 cache attributes, if needed */ + if (!ret && cacheattrs != NULL) { + if (env->cp15.hcr_el2 & HCR_DC) { + /* + * HCR.DC forces the first stage attributes to + * Normal Non-Shareable, + * Inner Write-Back Read-Allocate Write-Allocate, + * Outer Write-Back Read-Allocate Write-Allocate. + */ + cacheattrs->attrs = 0xff; + cacheattrs->shareability = 0; + } + *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); + } + + return ret; + } else { + /* + * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. + */ + mmu_idx = stage_1_mmu_idx(mmu_idx); + } + } + + /* The page table entries may downgrade secure to non-secure, but + * cannot upgrade an non-secure translation regime's attributes + * to secure. + */ + attrs->secure = regime_is_secure(env, mmu_idx); + attrs->user = regime_is_user(env, mmu_idx); + + /* Fast Context Switch Extension. This doesn't exist at all in v8. + * In v7 and earlier it affects all stage 1 translations. + */ + if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 + && !arm_feature(env, ARM_FEATURE_V8)) { + if (regime_el(env, mmu_idx) == 3) { + address += env->cp15.fcseidr_s; + } else { + address += env->cp15.fcseidr_ns; + } + } + + if (arm_feature(env, ARM_FEATURE_PMSA)) { + bool ret; + *page_size = TARGET_PAGE_SIZE; + + if (arm_feature(env, ARM_FEATURE_V8)) { + /* PMSAv8 */ + ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, + phys_ptr, attrs, prot, page_size, fi); + } else if (arm_feature(env, ARM_FEATURE_V7)) { + /* PMSAv7 */ + ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, + phys_ptr, prot, page_size, fi); + } else { + /* Pre-v7 MPU */ + ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, + phys_ptr, prot, fi); + } + qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 + " mmu_idx %u -> %s (prot %c%c%c)\n", + access_type == MMU_DATA_LOAD ? "reading" : + (access_type == MMU_DATA_STORE ? "writing" : "execute"), + (uint32_t)address, mmu_idx, + ret ? "Miss" : "Hit", + *prot & PAGE_READ ? 'r' : '-', + *prot & PAGE_WRITE ? 'w' : '-', + *prot & PAGE_EXEC ? 'x' : '-'); + + return ret; + } + + /* Definitely a real MMU, not an MPU */ + + if (regime_translation_disabled(env, mmu_idx)) { + /* + * MMU disabled. S1 addresses within aa64 translation regimes are + * still checked for bounds -- see AArch64.TranslateAddressS1Off. + */ + if (mmu_idx != ARMMMUIdx_Stage2) { + int r_el = regime_el(env, mmu_idx); + if (arm_el_is_aa64(env, r_el)) { + int pamax = arm_pamax(env_archcpu(env)); + uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; + int addrtop, tbi; + + tbi = aa64_va_parameter_tbi(tcr, mmu_idx); + if (access_type == MMU_INST_FETCH) { + tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); + } + tbi = (tbi >> extract64(address, 55, 1)) & 1; + addrtop = (tbi ? 55 : 63); + + if (extract64(address, pamax, addrtop - pamax + 1) != 0) { + fi->type = ARMFault_AddressSize; + fi->level = 0; + fi->stage2 = false; + return 1; + } + + /* + * When TBI is disabled, we've just validated that all of the + * bits above PAMax are zero, so logically we only need to + * clear the top byte for TBI. But it's clearer to follow + * the pseudocode set of addrdesc.paddress. + */ + address = extract64(address, 0, 52); + } + } + *phys_ptr = address; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + *page_size = TARGET_PAGE_SIZE; + return 0; + } + + if (regime_using_lpae_format(env, mmu_idx)) { + return get_phys_addr_lpae(env, address, access_type, mmu_idx, + phys_ptr, attrs, prot, page_size, + fi, cacheattrs); + } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { + return get_phys_addr_v6(env, address, access_type, mmu_idx, + phys_ptr, attrs, prot, page_size, fi); + } else { + return get_phys_addr_v5(env, address, access_type, mmu_idx, + phys_ptr, prot, page_size, fi); + } +} + +hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, + MemTxAttrs *attrs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + hwaddr phys_addr; + target_ulong page_size; + int prot; + bool ret; + ARMMMUFaultInfo fi = { 0 }; + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + + *attrs = (MemTxAttrs) { 0 }; + + ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, + attrs, &prot, &page_size, &fi, NULL); + + if (ret) { + return -1; + } + return phys_addr; +} + +/* Note that signed overflow is undefined in C. The following routines are + careful to use unsigned types where modulo arithmetic is required. + Failure to do so _will_ break on newer gcc. */ + +/* Signed saturating arithmetic. */ + +/* Perform 16-bit signed saturating addition. */ +static inline uint16_t add16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a + b; + if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { + if (a & 0x8000) + res = 0x8000; + else + res = 0x7fff; + } + return res; +} + +/* Perform 8-bit signed saturating addition. */ +static inline uint8_t add8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a + b; + if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { + if (a & 0x80) + res = 0x80; + else + res = 0x7f; + } + return res; +} + +/* Perform 16-bit signed saturating subtraction. */ +static inline uint16_t sub16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a - b; + if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { + if (a & 0x8000) + res = 0x8000; + else + res = 0x7fff; + } + return res; +} + +/* Perform 8-bit signed saturating subtraction. */ +static inline uint8_t sub8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a - b; + if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { + if (a & 0x80) + res = 0x80; + else + res = 0x7f; + } + return res; +} + +#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); +#define PFX q + +#include "op_addsub.h" + +/* Unsigned saturating arithmetic. */ +static inline uint16_t add16_usat(uint16_t a, uint16_t b) +{ + uint16_t res; + res = a + b; + if (res < a) + res = 0xffff; + return res; +} + +static inline uint16_t sub16_usat(uint16_t a, uint16_t b) +{ + if (a > b) + return a - b; + else + return 0; +} + +static inline uint8_t add8_usat(uint8_t a, uint8_t b) +{ + uint8_t res; + res = a + b; + if (res < a) + res = 0xff; + return res; +} + +static inline uint8_t sub8_usat(uint8_t a, uint8_t b) +{ + if (a > b) + return a - b; + else + return 0; +} + +#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); +#define PFX uq + +#include "op_addsub.h" + +/* Signed modulo arithmetic. */ +#define SARITH16(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ + RESULT(sum, n, 16); \ + if (sum >= 0) \ + ge |= 3 << (n * 2); \ + } while(0) + +#define SARITH8(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ + RESULT(sum, n, 8); \ + if (sum >= 0) \ + ge |= 1 << n; \ + } while(0) + + +#define ADD16(a, b, n) SARITH16(a, b, n, +) +#define SUB16(a, b, n) SARITH16(a, b, n, -) +#define ADD8(a, b, n) SARITH8(a, b, n, +) +#define SUB8(a, b, n) SARITH8(a, b, n, -) +#define PFX s +#define ARITH_GE + +#include "op_addsub.h" + +/* Unsigned modulo arithmetic. */ +#define ADD16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 1) \ + ge |= 3 << (n * 2); \ + } while(0) + +#define ADD8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 1) \ + ge |= 1 << n; \ + } while(0) + +#define SUB16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 0) \ + ge |= 3 << (n * 2); \ + } while(0) + +#define SUB8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 0) \ + ge |= 1 << n; \ + } while(0) + +#define PFX u +#define ARITH_GE + +#include "op_addsub.h" + +/* Halved signed arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) +#define PFX sh + +#include "op_addsub.h" + +/* Halved unsigned arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define PFX uh + +#include "op_addsub.h" + +static inline uint8_t do_usad(uint8_t a, uint8_t b) +{ + if (a > b) + return a - b; + else + return b - a; +} + +/* Unsigned sum of absolute byte differences. */ +uint32_t HELPER(usad8)(uint32_t a, uint32_t b) +{ + uint32_t sum; + sum = do_usad(a, b); + sum += do_usad(a >> 8, b >> 8); + sum += do_usad(a >> 16, b >>16); + sum += do_usad(a >> 24, b >> 24); + return sum; +} + +/* For ARMv6 SEL instruction. */ +uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) +{ + uint32_t mask; + + mask = 0; + if (flags & 1) + mask |= 0xff; + if (flags & 2) + mask |= 0xff00; + if (flags & 4) + mask |= 0xff0000; + if (flags & 8) + mask |= 0xff000000; + return (a & mask) | (b & ~mask); +} + +/* CRC helpers. + * The upper bytes of val (above the number specified by 'bytes') must have + * been zeroed out by the caller. + */ +uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) +{ + uint8_t buf[4]; + + stl_le_p(buf, val); + + return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; +} + +uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) +{ + uint8_t buf[4]; + + stl_le_p(buf, val); + + /* Linux crc32c converts the output to one's complement. */ + return crc32c(acc, buf, bytes) ^ 0xffffffff; +} + +/* Return the exception level to which FP-disabled exceptions should + * be taken, or 0 if FP is enabled. + */ +int fp_exception_el(CPUARMState *env, int cur_el) +{ + return 0; +} + +/* Return the exception level we're running at if this is our mmu_idx */ +int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) +{ + if (mmu_idx & ARM_MMU_IDX_M) { + return mmu_idx & ARM_MMU_IDX_M_PRIV; + } + + switch (mmu_idx) { + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E20_0: + case ARMMMUIdx_SE10_0: + return 0; + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_SE10_1: + case ARMMMUIdx_SE10_1_PAN: + return 1; + case ARMMMUIdx_E2: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return 2; + case ARMMMUIdx_SE3: + return 3; + default: + g_assert_not_reached(); + // never reach here + return 0; + } +} + +ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) +{ + if (arm_feature(env, ARM_FEATURE_M)) { + return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); + } + + /* See ARM pseudo-function ELIsInHost. */ + switch (el) { + case 0: + if (arm_is_secure_below_el3(env)) { + return ARMMMUIdx_SE10_0; + } + if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE) + && arm_el_is_aa64(env, 2)) { + return ARMMMUIdx_E20_0; + } + return ARMMMUIdx_E10_0; + case 1: + if (arm_is_secure_below_el3(env)) { + if (env->pstate & PSTATE_PAN) { + return ARMMMUIdx_SE10_1_PAN; + } + return ARMMMUIdx_SE10_1; + } + if (env->pstate & PSTATE_PAN) { + return ARMMMUIdx_E10_1_PAN; + } + return ARMMMUIdx_E10_1; + case 2: + /* TODO: ARMv8.4-SecEL2 */ + /* Note that TGE does not apply at EL2. */ + if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) { + if (env->pstate & PSTATE_PAN) { + return ARMMMUIdx_E20_2_PAN; + } + return ARMMMUIdx_E20_2; + } + return ARMMMUIdx_E2; + case 3: + return ARMMMUIdx_SE3; + default: + g_assert_not_reached(); + // never reach here + return 0; + } +} + +ARMMMUIdx arm_mmu_idx(CPUARMState *env) +{ + return arm_mmu_idx_el(env, arm_current_el(env)); +} + +ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) +{ + return stage_1_mmu_idx(arm_mmu_idx(env)); +} + +static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, + ARMMMUIdx mmu_idx, uint32_t flags) +{ + FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el, flags); + FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, + arm_to_core_mmu_idx(mmu_idx), flags); + + if (arm_singlestep_active(env)) { + FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1, flags); + } + return flags; +} + +static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, + ARMMMUIdx mmu_idx, uint32_t flags) +{ + bool sctlr_b = arm_sctlr_b(env); + + if (sctlr_b) { + FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1, flags); + } + if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { + FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1, flags); + } + FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env), flags); + + return rebuild_hflags_common(env, fp_el, mmu_idx, flags); +} + +static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, + ARMMMUIdx mmu_idx) +{ + uint32_t flags = 0; + + if (arm_v7m_is_handler_mode(env)) { + FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1, flags); + } + + /* + * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN + * is suppressing them because the requested execution priority + * is less than 0. + */ + if (arm_feature(env, ARM_FEATURE_V8) && + !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && + (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { + FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1, flags); + } + + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); +} + +static uint32_t rebuild_hflags_aprofile(CPUARMState *env) +{ + int flags = 0; + + FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, + arm_debug_target_el(env), flags); + return flags; +} + +static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, + ARMMMUIdx mmu_idx) +{ + uint32_t flags = rebuild_hflags_aprofile(env); + + if (arm_el_is_aa64(env, 1)) { + FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1, flags); + } + + if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1, flags); + } + + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); +} + +static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, + ARMMMUIdx mmu_idx) +{ + uint32_t flags = rebuild_hflags_aprofile(env); + ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + uint64_t sctlr; + int tbii, tbid; + + FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1, flags); + + /* Get control bits for tagged addresses. */ + tbid = aa64_va_parameter_tbi(tcr, mmu_idx); + tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); + + FIELD_DP32(flags, TBFLAG_A64, TBII, tbii, flags); + FIELD_DP32(flags, TBFLAG_A64, TBID, tbid, flags); + + if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { + int sve_el = sve_exception_el(env, el); + uint32_t zcr_len; + + /* + * If SVE is disabled, but FP is enabled, + * then the effective len is 0. + */ + if (sve_el != 0 && fp_el == 0) { + zcr_len = 0; + } else { + zcr_len = sve_zcr_len_for_el(env, el); + } + FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el, flags); + FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len, flags); + } + + sctlr = regime_sctlr(env, stage1); + + if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { + FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1, flags); + } + + if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { + /* + * In order to save space in flags, we record only whether + * pauth is "inactive", meaning all insns are implemented as + * a nop, or "active" when some action must be performed. + * The decision of which action to take is left to a helper. + */ + if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { + FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1, flags); + } + } + + if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { + /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ + if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { + FIELD_DP32(flags, TBFLAG_A64, BT, 1, flags); + } + } + + /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ + if (!(env->pstate & PSTATE_UAO)) { + switch (mmu_idx) { + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_SE10_1: + case ARMMMUIdx_SE10_1_PAN: + /* TODO: ARMv8.3-NV */ + FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1, flags); + break; + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + /* TODO: ARMv8.4-SecEL2 */ + /* + * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is + * gated by HCR_EL2. == '11', and so is LDTR. + */ + if (env->cp15.hcr_el2 & HCR_TGE) { + FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1, flags); + } + break; + default: + break; + } + } + + return rebuild_hflags_common(env, fp_el, mmu_idx, flags); +} + +static uint32_t rebuild_hflags_internal(CPUARMState *env) +{ + int el = arm_current_el(env); + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + + if (is_a64(env)) { + return rebuild_hflags_a64(env, el, fp_el, mmu_idx); + } else if (arm_feature(env, ARM_FEATURE_M)) { + return rebuild_hflags_m32(env, fp_el, mmu_idx); + } else { + return rebuild_hflags_a32(env, fp_el, mmu_idx); + } +} + +void arm_rebuild_hflags(CPUARMState *env) +{ + env->hflags = rebuild_hflags_internal(env); +} + +/* + * If we have triggered a EL state change we can't rely on the + * translator having passed it to us, we need to recompute. + */ +void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) +{ + int el = arm_current_el(env); + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); +} + +void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) +{ + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + + env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); +} + +/* + * If we have triggered a EL state change we can't rely on the + * translator having passed it to us, we need to recompute. + */ +void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) +{ + int el = arm_current_el(env); + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); +} + +void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) +{ + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + + env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); +} + +void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) +{ + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + + env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); +} + +static inline void assert_hflags_rebuild_correctly(CPUARMState *env) +{ +#ifdef CONFIG_DEBUG_TCG + uint32_t env_flags_current = env->hflags; + uint32_t env_flags_rebuilt = rebuild_hflags_internal(env); + + if (unlikely(env_flags_current != env_flags_rebuilt)) { + fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", + env_flags_current, env_flags_rebuilt); + abort(); + } +#endif +} + +void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *pflags) +{ + uint32_t flags = env->hflags; + uint32_t pstate_for_ss; + + *cs_base = 0; + assert_hflags_rebuild_correctly(env); + + if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) { + *pc = env->pc; + if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { + FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype, flags); + } + pstate_for_ss = env->pstate; + } else { + *pc = env->regs[15]; + + if (arm_feature(env, ARM_FEATURE_M)) { + if (arm_feature(env, ARM_FEATURE_M_SECURITY) && + FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) + != env->v7m.secure) { + FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1, flags); + } + + if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && + (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || + (env->v7m.secure && + !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { + /* + * ASPEN is set, but FPCA/SFPA indicate that there is no + * active FP context; we must create a new FP context before + * executing any FP insn. + */ + FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1, flags); + } + + bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; + if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { + FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1, flags); + } + } else { + /* + * Note that XSCALE_CPAR shares bits with VECSTRIDE. + * Note that VECLEN+VECSTRIDE are RES0 for M-profile. + */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + FIELD_DP32(flags, TBFLAG_A32, + XSCALE_CPAR, env->cp15.c15_cpar, flags); + } else { + FIELD_DP32(flags, TBFLAG_A32, VECLEN, + env->vfp.vec_len, flags); + FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, + env->vfp.vec_stride, flags); + } + if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { + FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1, flags); + } + } + + FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb, flags); + FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits, flags); + pstate_for_ss = env->uncached_cpsr; + } + + /* + * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine + * states defined in the ARM ARM for software singlestep: + * SS_ACTIVE PSTATE.SS State + * 0 x Inactive (the TB flag for SS is always 0) + * 1 0 Active-pending + * 1 1 Active-not-pending + * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB. + */ + if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && + (pstate_for_ss & PSTATE_SS)) { + FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1, flags); + } + + *pflags = flags; +} + +#ifdef TARGET_AARCH64 +/* + * The manual says that when SVE is enabled and VQ is widened the + * implementation is allowed to zero the previously inaccessible + * portion of the registers. The corollary to that is that when + * SVE is enabled and VQ is narrowed we are also allowed to zero + * the now inaccessible portion of the registers. + * + * The intent of this is that no predicate bit beyond VQ is ever set. + * Which means that some operations on predicate registers themselves + * may operate on full uint64_t or even unrolled across the maximum + * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally + * may well be cheaper than conditionals to restrict the operation + * to the relevant portion of a uint16_t[16]. + */ +void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) +{ + int i, j; + uint64_t pmask; + + assert(vq >= 1 && vq <= ARM_MAX_VQ); + assert(vq <= env_archcpu(env)->sve_max_vq); + + /* Zap the high bits of the zregs. */ + for (i = 0; i < 32; i++) { + memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); + } + + /* Zap the high bits of the pregs and ffr. */ + pmask = 0; + if (vq & 3) { + pmask = ~(0xffffffffffffffffULL << (16 * (vq & 3))); + } + for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { + for (i = 0; i < 17; ++i) { + env->vfp.pregs[i].p[j] &= pmask; + } + pmask = 0; + } +} + +/* + * Notice a change in SVE vector size when changing EL. + */ +void aarch64_sve_change_el(CPUARMState *env, int old_el, + int new_el, bool el0_a64) +{ + ARMCPU *cpu = env_archcpu(env); + int old_len, new_len; + bool old_a64, new_a64; + + /* Nothing to do if no SVE. */ + if (!cpu_isar_feature(aa64_sve, cpu)) { + return; + } + + /* Nothing to do if FP is disabled in either EL. */ + if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { + return; + } + + /* + * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped + * at ELx, or not available because the EL is in AArch32 state, then + * for all purposes other than a direct read, the ZCR_ELx.LEN field + * has an effective value of 0". + * + * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). + * If we ignore aa32 state, we would fail to see the vq4->vq0 transition + * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that + * we already have the correct register contents when encountering the + * vq0->vq0 transition between EL0->EL1. + */ + old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; + old_len = (old_a64 && !sve_exception_el(env, old_el) + ? sve_zcr_len_for_el(env, old_el) : 0); + new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; + new_len = (new_a64 && !sve_exception_el(env, new_el) + ? sve_zcr_len_for_el(env, new_el) : 0); + + /* When changing vector length, clear inaccessible state. */ + if (new_len < old_len) { + aarch64_sve_narrow_vq(env, new_len + 1); + } +} +#endif diff --git a/qemu/target-arm/helper.h b/qemu/target/arm/helper.h similarity index 61% rename from qemu/target-arm/helper.h rename to qemu/target/arm/helper.h index 6427c18c..9782f698 100644 --- a/qemu/target-arm/helper.h +++ b/qemu/target/arm/helper.h @@ -1,7 +1,5 @@ DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) -DEF_HELPER_FLAGS_1(clz_arm, TCG_CALL_NO_RWG_SE, i32, i32) - DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32) @@ -10,7 +8,6 @@ DEF_HELPER_3(add_saturate, i32, env, i32, i32) DEF_HELPER_3(sub_saturate, i32, env, i32, i32) DEF_HELPER_3(add_usaturate, i32, env, i32, i32) DEF_HELPER_3(sub_usaturate, i32, env, i32, i32) -DEF_HELPER_2(double_saturate, i32, env, s32) DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32) DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32) @@ -50,34 +47,57 @@ DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32) DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_2(exception_internal, void, env, i32) -DEF_HELPER_3(exception_with_syndrome, void, env, i32, i32) -DEF_HELPER_1(wfi, void, env) +DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32) +DEF_HELPER_2(exception_bkpt_insn, void, env, i32) +DEF_HELPER_1(setend, void, env) +DEF_HELPER_2(wfi, void, env, i32) DEF_HELPER_1(wfe, void, env) +DEF_HELPER_1(yield, void, env) DEF_HELPER_1(pre_hvc, void, env) DEF_HELPER_2(pre_smc, void, env, i32) +DEF_HELPER_1(check_breakpoints, void, env) + DEF_HELPER_3(cpsr_write, void, env, i32, i32) +DEF_HELPER_2(cpsr_write_eret, void, env, i32) DEF_HELPER_1(cpsr_read, i32, env) DEF_HELPER_3(v7m_msr, void, env, i32, i32) DEF_HELPER_2(v7m_mrs, i32, env, i32) -DEF_HELPER_3(access_check_cp_reg, void, env, ptr, i32) +DEF_HELPER_2(v7m_bxns, void, env, i32) +DEF_HELPER_2(v7m_blxns, void, env, i32) + +DEF_HELPER_3(v7m_tt, i32, env, i32, i32) + +DEF_HELPER_1(v7m_preserve_fp_state, void, env) + +DEF_HELPER_2(v7m_vlstm, void, env, i32) +DEF_HELPER_2(v7m_vlldm, void, env, i32) + +DEF_HELPER_2(v8m_stackcheck, void, env, i32) + +DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32) DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) DEF_HELPER_2(get_cp_reg, i32, env, ptr) DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64) DEF_HELPER_2(get_cp_reg64, i64, env, ptr) -DEF_HELPER_3(msr_i_pstate, void, env, i32, i32) -DEF_HELPER_1(clear_pstate_ss, void, env) -DEF_HELPER_1(exception_return, void, env) - DEF_HELPER_2(get_r13_banked, i32, env, i32) DEF_HELPER_3(set_r13_banked, void, env, i32, i32) +DEF_HELPER_3(mrs_banked, i32, env, i32, i32) +DEF_HELPER_4(msr_banked, void, env, i32, i32, i32) + DEF_HELPER_2(get_user_reg, i32, env, i32) DEF_HELPER_3(set_user_reg, void, env, i32, i32) +DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int) +DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int) +DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int) + DEF_HELPER_1(vfp_get_fpscr, i32, env) DEF_HELPER_2(vfp_set_fpscr, void, env, i32) @@ -111,19 +131,25 @@ DEF_HELPER_3(vfp_cmped, void, f64, f64, env) DEF_HELPER_2(vfp_fcvtds, f64, f32, env) DEF_HELPER_2(vfp_fcvtsd, f32, f64, env) +DEF_HELPER_2(vfp_uitoh, f16, i32, ptr) DEF_HELPER_2(vfp_uitos, f32, i32, ptr) DEF_HELPER_2(vfp_uitod, f64, i32, ptr) +DEF_HELPER_2(vfp_sitoh, f16, i32, ptr) DEF_HELPER_2(vfp_sitos, f32, i32, ptr) DEF_HELPER_2(vfp_sitod, f64, i32, ptr) +DEF_HELPER_2(vfp_touih, i32, f16, ptr) DEF_HELPER_2(vfp_touis, i32, f32, ptr) DEF_HELPER_2(vfp_touid, i32, f64, ptr) +DEF_HELPER_2(vfp_touizh, i32, f16, ptr) DEF_HELPER_2(vfp_touizs, i32, f32, ptr) DEF_HELPER_2(vfp_touizd, i32, f64, ptr) -DEF_HELPER_2(vfp_tosis, i32, f32, ptr) -DEF_HELPER_2(vfp_tosid, i32, f64, ptr) -DEF_HELPER_2(vfp_tosizs, i32, f32, ptr) -DEF_HELPER_2(vfp_tosizd, i32, f64, ptr) +DEF_HELPER_2(vfp_tosih, s32, f16, ptr) +DEF_HELPER_2(vfp_tosis, s32, f32, ptr) +DEF_HELPER_2(vfp_tosid, s32, f64, ptr) +DEF_HELPER_2(vfp_tosizh, s32, f16, ptr) +DEF_HELPER_2(vfp_tosizs, s32, f32, ptr) +DEF_HELPER_2(vfp_tosizd, s32, f64, ptr) DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr) DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr) @@ -133,6 +159,12 @@ DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr) DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr) DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr) DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_touhh, i32, f16, i32, ptr) +DEF_HELPER_3(vfp_toshh, i32, f16, i32, ptr) +DEF_HELPER_3(vfp_toulh, i32, f16, i32, ptr) +DEF_HELPER_3(vfp_toslh, i32, f16, i32, ptr) +DEF_HELPER_3(vfp_touqh, i64, f16, i32, ptr) +DEF_HELPER_3(vfp_tosqh, i64, f16, i32, ptr) DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr) DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr) DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr) @@ -157,29 +189,33 @@ DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr) DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_sltoh, f16, i32, i32, ptr) +DEF_HELPER_3(vfp_ultoh, f16, i32, i32, ptr) +DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, ptr) +DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, ptr) -DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, env) +DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, ptr) DEF_HELPER_FLAGS_2(set_neon_rmode, TCG_CALL_NO_RWG, i32, i32, env) -DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env) -DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env) -DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env) -DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env) -DEF_HELPER_FLAGS_2(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, i32, env) -DEF_HELPER_FLAGS_2(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, i32, f64, env) +DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, ptr, i32) +DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, ptr, i32) +DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, ptr, i32) +DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, ptr, i32) DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr) DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr) DEF_HELPER_3(recps_f32, f32, f32, f32, env) DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env) +DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, ptr) DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, ptr) DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_2(recpe_u32, i32, i32, ptr) DEF_HELPER_FLAGS_2(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32, ptr) -DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32) +DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i32, i32, i32, ptr, i32) DEF_HELPER_3(shl_cc, i32, env, i32, i32) DEF_HELPER_3(shr_cc, i32, env, i32, i32) @@ -191,6 +227,11 @@ DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env) +DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, ptr) + +DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32) + /* neon_helper.c */ DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32) @@ -249,18 +290,6 @@ DEF_HELPER_2(neon_cge_s16, i32, i32, i32) DEF_HELPER_2(neon_cge_u32, i32, i32, i32) DEF_HELPER_2(neon_cge_s32, i32, i32, i32) -DEF_HELPER_2(neon_min_u8, i32, i32, i32) -DEF_HELPER_2(neon_min_s8, i32, i32, i32) -DEF_HELPER_2(neon_min_u16, i32, i32, i32) -DEF_HELPER_2(neon_min_s16, i32, i32, i32) -DEF_HELPER_2(neon_min_u32, i32, i32, i32) -DEF_HELPER_2(neon_min_s32, i32, i32, i32) -DEF_HELPER_2(neon_max_u8, i32, i32, i32) -DEF_HELPER_2(neon_max_s8, i32, i32, i32) -DEF_HELPER_2(neon_max_u16, i32, i32, i32) -DEF_HELPER_2(neon_max_s16, i32, i32, i32) -DEF_HELPER_2(neon_max_u32, i32, i32, i32) -DEF_HELPER_2(neon_max_s32, i32, i32, i32) DEF_HELPER_2(neon_pmin_u8, i32, i32, i32) DEF_HELPER_2(neon_pmin_s8, i32, i32, i32) DEF_HELPER_2(neon_pmin_u16, i32, i32, i32) @@ -277,14 +306,8 @@ DEF_HELPER_2(neon_abd_s16, i32, i32, i32) DEF_HELPER_2(neon_abd_u32, i32, i32, i32) DEF_HELPER_2(neon_abd_s32, i32, i32, i32) -DEF_HELPER_2(neon_shl_u8, i32, i32, i32) -DEF_HELPER_2(neon_shl_s8, i32, i32, i32) DEF_HELPER_2(neon_shl_u16, i32, i32, i32) DEF_HELPER_2(neon_shl_s16, i32, i32, i32) -DEF_HELPER_2(neon_shl_u32, i32, i32, i32) -DEF_HELPER_2(neon_shl_s32, i32, i32, i32) -DEF_HELPER_2(neon_shl_u64, i64, i64, i64) -DEF_HELPER_2(neon_shl_s64, i64, i64, i64) DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) @@ -322,8 +345,6 @@ DEF_HELPER_2(neon_sub_u8, i32, i32, i32) DEF_HELPER_2(neon_sub_u16, i32, i32, i32) DEF_HELPER_2(neon_mul_u8, i32, i32, i32) DEF_HELPER_2(neon_mul_u16, i32, i32, i32) -DEF_HELPER_2(neon_mul_p8, i32, i32, i32) -DEF_HELPER_2(neon_mull_p8, i64, i32, i32) DEF_HELPER_2(neon_tst_u8, i32, i32, i32) DEF_HELPER_2(neon_tst_u16, i32, i32, i32) @@ -332,8 +353,6 @@ DEF_HELPER_2(neon_ceq_u8, i32, i32, i32) DEF_HELPER_2(neon_ceq_u16, i32, i32, i32) DEF_HELPER_2(neon_ceq_u32, i32, i32, i32) -DEF_HELPER_1(neon_abs_s8, i32, i32) -DEF_HELPER_1(neon_abs_s16, i32, i32) DEF_HELPER_1(neon_clz_u8, i32, i32) DEF_HELPER_1(neon_clz_u16, i32, i32) DEF_HELPER_1(neon_cls_s8, i32, i32) @@ -344,8 +363,12 @@ DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32) DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32) +DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32) +DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32) DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32) DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32) +DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32) +DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32) DEF_HELPER_1(neon_narrow_u8, i32, i64) DEF_HELPER_1(neon_narrow_u16, i32, i64) @@ -502,43 +525,180 @@ DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) -DEF_HELPER_3(neon_unzip8, void, env, i32, i32) -DEF_HELPER_3(neon_unzip16, void, env, i32, i32) -DEF_HELPER_3(neon_qunzip8, void, env, i32, i32) -DEF_HELPER_3(neon_qunzip16, void, env, i32, i32) -DEF_HELPER_3(neon_qunzip32, void, env, i32, i32) -DEF_HELPER_3(neon_zip8, void, env, i32, i32) -DEF_HELPER_3(neon_zip16, void, env, i32, i32) -DEF_HELPER_3(neon_qzip8, void, env, i32, i32) -DEF_HELPER_3(neon_qzip16, void, env, i32, i32) -DEF_HELPER_3(neon_qzip32, void, env, i32, i32) +DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_4(crypto_aese, void, env, i32, i32, i32) -DEF_HELPER_4(crypto_aesmc, void, env, i32, i32, i32) +DEF_HELPER_FLAGS_3(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_5(crypto_sha1_3reg, void, env, i32, i32, i32, i32) -DEF_HELPER_3(crypto_sha1h, void, env, i32, i32) -DEF_HELPER_3(crypto_sha1su1, void, env, i32, i32) +DEF_HELPER_FLAGS_4(crypto_sha1_3reg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_2(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_4(crypto_sha256h, void, env, i32, i32, i32) -DEF_HELPER_4(crypto_sha256h2, void, env, i32, i32, i32) -DEF_HELPER_3(crypto_sha256su0, void, env, i32, i32) -DEF_HELPER_4(crypto_sha256su1, void, env, i32, i32, i32) +DEF_HELPER_FLAGS_3(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) +DEF_HELPER_FLAGS_3(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) +DEF_HELPER_FLAGS_2(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_3(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) -DEF_HELPER_FLAGS_3(crc32_arm, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) +DEF_HELPER_FLAGS_3(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) +DEF_HELPER_FLAGS_3(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) +DEF_HELPER_FLAGS_2(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_3(crypto_sha512su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_5(crypto_sm3tt, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32) +DEF_HELPER_FLAGS_3(crypto_sm3partw1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) +DEF_HELPER_FLAGS_3(crypto_sm3partw2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_2(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_3(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) -DEF_HELPER_2(dc_zva, void, env, i64) -DEF_HELPER_FLAGS_2(neon_pmull_64_lo, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(neon_pmull_64_hi, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) -#ifdef TARGET_ARM -#define helper_clz helper_clz_arm -#define gen_helper_clz gen_helper_clz_arm -#define helper_crc32 helper_crc32_arm -#define gen_helper_crc32 gen_helper_crc32_arm -#endif +DEF_HELPER_FLAGS_4(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fcmlah, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fcmlah_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fcmlas, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr) + +DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) #ifdef TARGET_AARCH64 #include "helper-a64.h" +#include "helper-sve.h" #endif diff --git a/qemu/target/arm/internals.h b/qemu/target/arm/internals.h new file mode 100644 index 00000000..9aba020d --- /dev/null +++ b/qemu/target/arm/internals.h @@ -0,0 +1,1228 @@ +/* + * QEMU ARM CPU -- internal functions and types + * + * Copyright (c) 2014 Linaro Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + * + * This header defines functions, types, etc which need to be shared + * between different source files within target/arm/ but which are + * private to it and not required by the rest of QEMU. + */ + +#ifndef TARGET_ARM_INTERNALS_H +#define TARGET_ARM_INTERNALS_H + +#include "hw/registerfields.h" +struct uc_struct; + +/* register banks for CPU modes */ +#define BANK_USRSYS 0 +#define BANK_SVC 1 +#define BANK_ABT 2 +#define BANK_UND 3 +#define BANK_IRQ 4 +#define BANK_FIQ 5 +#define BANK_HYP 6 +#define BANK_MON 7 + +static inline bool excp_is_internal(int excp) +{ + /* Return true if this exception number represents a QEMU-internal + * exception that will not be passed to the guest. + */ + return excp == EXCP_INTERRUPT + || excp == EXCP_HLT + || excp == EXCP_DEBUG + || excp == EXCP_HALTED + || excp == EXCP_EXCEPTION_EXIT + || excp == EXCP_KERNEL_TRAP + || excp == EXCP_SEMIHOST; +} + +/* Scale factor for generic timers, ie number of ns per tick. + * This gives a 62.5MHz timer. + */ +#define GTIMER_SCALE 16 + +/* Bit definitions for the v7M CONTROL register */ +FIELD(V7M_CONTROL, NPRIV, 0, 1) +FIELD(V7M_CONTROL, SPSEL, 1, 1) +FIELD(V7M_CONTROL, FPCA, 2, 1) +FIELD(V7M_CONTROL, SFPA, 3, 1) + +/* Bit definitions for v7M exception return payload */ +FIELD(V7M_EXCRET, ES, 0, 1) +FIELD(V7M_EXCRET, RES0, 1, 1) +FIELD(V7M_EXCRET, SPSEL, 2, 1) +FIELD(V7M_EXCRET, MODE, 3, 1) +FIELD(V7M_EXCRET, FTYPE, 4, 1) +FIELD(V7M_EXCRET, DCRS, 5, 1) +FIELD(V7M_EXCRET, S, 6, 1) +FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ + +/* Minimum value which is a magic number for exception return */ +#define EXC_RETURN_MIN_MAGIC 0xff000000 +/* Minimum number which is a magic number for function or exception return + * when using v8M security extension + */ +#define FNC_RETURN_MIN_MAGIC 0xfefffffe + +/* We use a few fake FSR values for internal purposes in M profile. + * M profile cores don't have A/R format FSRs, but currently our + * get_phys_addr() code assumes A/R profile and reports failures via + * an A/R format FSR value. We then translate that into the proper + * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). + * Mostly the FSR values we use for this are those defined for v7PMSA, + * since we share some of that codepath. A few kinds of fault are + * only for M profile and have no A/R equivalent, though, so we have + * to pick a value from the reserved range (which we never otherwise + * generate) to use for these. + * These values will never be visible to the guest. + */ +#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ +#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ + +/** + * raise_exception: Raise the specified exception. + * Raise a guest exception with the specified value, syndrome register + * and target exception level. This should be called from helper functions, + * and never returns because we will longjump back up to the CPU main loop. + */ +void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el); + +/* + * Similarly, but also use unwinding to restore cpu state. + */ +void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el, + uintptr_t ra); + +/* + * For AArch64, map a given EL to an index in the banked_spsr array. + * Note that this mapping and the AArch32 mapping defined in bank_number() + * must agree such that the AArch64<->AArch32 SPSRs have the architecturally + * mandated mapping between each other. + */ +static inline unsigned int aarch64_banked_spsr_index(unsigned int el) +{ + static const unsigned int map[4] = { + [1] = BANK_SVC, /* EL1. */ + [2] = BANK_HYP, /* EL2. */ + [3] = BANK_MON, /* EL3. */ + }; + assert(el >= 1 && el <= 3); + return map[el]; +} + +/* Map CPU modes onto saved register banks. */ +static inline int bank_number(int mode) +{ + switch (mode) { + case ARM_CPU_MODE_USR: + case ARM_CPU_MODE_SYS: + return BANK_USRSYS; + case ARM_CPU_MODE_SVC: + return BANK_SVC; + case ARM_CPU_MODE_ABT: + return BANK_ABT; + case ARM_CPU_MODE_UND: + return BANK_UND; + case ARM_CPU_MODE_IRQ: + return BANK_IRQ; + case ARM_CPU_MODE_FIQ: + return BANK_FIQ; + case ARM_CPU_MODE_HYP: + return BANK_HYP; + case ARM_CPU_MODE_MON: + return BANK_MON; + } + g_assert_not_reached(); + // never reach + return BANK_MON; +} + +/** + * r14_bank_number: Map CPU mode onto register bank for r14 + * + * Given an AArch32 CPU mode, return the index into the saved register + * banks to use for the R14 (LR) in that mode. This is the same as + * bank_number(), except for the special case of Hyp mode, where + * R14 is shared with USR and SYS, unlike its R13 and SPSR. + * This should be used as the index into env->banked_r14[], and + * bank_number() used for the index into env->banked_r13[] and + * env->banked_spsr[]. + */ +static inline int r14_bank_number(int mode) +{ + return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); +} + +void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); +void arm_translate_init(struct uc_struct *uc); + +enum arm_fprounding { + FPROUNDING_TIEEVEN, + FPROUNDING_POSINF, + FPROUNDING_NEGINF, + FPROUNDING_ZERO, + FPROUNDING_TIEAWAY, + FPROUNDING_ODD +}; + +int arm_rmode_to_sf(int rmode); + +static inline void aarch64_save_sp(CPUARMState *env, int el) +{ + if (env->pstate & PSTATE_SP) { + env->sp_el[el] = env->xregs[31]; + } else { + env->sp_el[0] = env->xregs[31]; + } +} + +static inline void aarch64_restore_sp(CPUARMState *env, int el) +{ + if (env->pstate & PSTATE_SP) { + env->xregs[31] = env->sp_el[el]; + } else { + env->xregs[31] = env->sp_el[0]; + } +} + +static inline void update_spsel(CPUARMState *env, uint32_t imm) +{ + unsigned int cur_el = arm_current_el(env); + /* Update PSTATE SPSel bit; this requires us to update the + * working stack pointer in xregs[31]. + */ + if (!((imm ^ env->pstate) & PSTATE_SP)) { + return; + } + aarch64_save_sp(env, cur_el); + env->pstate = deposit32(env->pstate, 0, 1, imm); + + /* We rely on illegal updates to SPsel from EL0 to get trapped + * at translation time. + */ + assert(cur_el >= 1 && cur_el <= 3); + aarch64_restore_sp(env, cur_el); +} + +/* + * arm_pamax + * @cpu: ARMCPU + * + * Returns the implementation defined bit-width of physical addresses. + * The ARMv8 reference manuals refer to this as PAMax(). + */ +static inline unsigned int arm_pamax(ARMCPU *cpu) +{ + static const unsigned int pamax_map[] = { + [0] = 32, + [1] = 36, + [2] = 40, + [3] = 42, + [4] = 44, + [5] = 48, + }; + unsigned int parange = + FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + + /* id_aa64mmfr0 is a read-only register so values outside of the + * supported mappings can be considered an implementation error. */ + assert(parange < ARRAY_SIZE(pamax_map)); + return pamax_map[parange]; +} + +/* Return true if extended addresses are enabled. + * This is always the case if our translation regime is 64 bit, + * but depends on TTBCR.EAE for 32 bit. + */ +static inline bool extended_addresses_enabled(CPUARMState *env) +{ + TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; + return arm_el_is_aa64(env, 1) || + (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); +} + +/* Valid Syndrome Register EC field values */ +enum arm_exception_class { + EC_UNCATEGORIZED = 0x00, + EC_WFX_TRAP = 0x01, + EC_CP15RTTRAP = 0x03, + EC_CP15RRTTRAP = 0x04, + EC_CP14RTTRAP = 0x05, + EC_CP14DTTRAP = 0x06, + EC_ADVSIMDFPACCESSTRAP = 0x07, + EC_FPIDTRAP = 0x08, + EC_PACTRAP = 0x09, + EC_CP14RRTTRAP = 0x0c, + EC_BTITRAP = 0x0d, + EC_ILLEGALSTATE = 0x0e, + EC_AA32_SVC = 0x11, + EC_AA32_HVC = 0x12, + EC_AA32_SMC = 0x13, + EC_AA64_SVC = 0x15, + EC_AA64_HVC = 0x16, + EC_AA64_SMC = 0x17, + EC_SYSTEMREGISTERTRAP = 0x18, + EC_SVEACCESSTRAP = 0x19, + EC_INSNABORT = 0x20, + EC_INSNABORT_SAME_EL = 0x21, + EC_PCALIGNMENT = 0x22, + EC_DATAABORT = 0x24, + EC_DATAABORT_SAME_EL = 0x25, + EC_SPALIGNMENT = 0x26, + EC_AA32_FPTRAP = 0x28, + EC_AA64_FPTRAP = 0x2c, + EC_SERROR = 0x2f, + EC_BREAKPOINT = 0x30, + EC_BREAKPOINT_SAME_EL = 0x31, + EC_SOFTWARESTEP = 0x32, + EC_SOFTWARESTEP_SAME_EL = 0x33, + EC_WATCHPOINT = 0x34, + EC_WATCHPOINT_SAME_EL = 0x35, + EC_AA32_BKPT = 0x38, + EC_VECTORCATCH = 0x3a, + EC_AA64_BKPT = 0x3c, +}; + +#define ARM_EL_EC_SHIFT 26 +#define ARM_EL_IL_SHIFT 25 +#define ARM_EL_ISV_SHIFT 24 +#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) +#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) + +static inline uint32_t syn_get_ec(uint32_t syn) +{ + return syn >> ARM_EL_EC_SHIFT; +} + +/* Utility functions for constructing various kinds of syndrome value. + * Note that in general we follow the AArch64 syndrome values; in a + * few cases the value in HSR for exceptions taken to AArch32 Hyp + * mode differs slightly, and we fix this up when populating HSR in + * arm_cpu_do_interrupt_aarch32_hyp(). + * The exception is FP/SIMD access traps -- these report extra information + * when taking an exception to AArch32. For those we include the extra coproc + * and TA fields, and mask them out when taking the exception to AArch64. + */ +static inline uint32_t syn_uncategorized(void) +{ + return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL; +} + +static inline uint32_t syn_aa64_svc(uint32_t imm16) +{ + return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa64_hvc(uint32_t imm16) +{ + return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa64_smc(uint32_t imm16) +{ + return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit) +{ + return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) + | (is_16bit ? 0 : ARM_EL_IL); +} + +static inline uint32_t syn_aa32_hvc(uint32_t imm16) +{ + return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa32_smc(void) +{ + return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL; +} + +static inline uint32_t syn_aa64_bkpt(uint32_t imm16) +{ + return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit) +{ + return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) + | (is_16bit ? 0 : ARM_EL_IL); +} + +static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2, + int crn, int crm, int rt, + int isread) +{ + return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL + | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5) + | (crm << 1) | isread; +} + +static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2, + int crn, int crm, int rt, int isread, + bool is_16bit) +{ + return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) + | (crn << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2, + int crn, int crm, int rt, int isread, + bool is_16bit) +{ + return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) + | (crn << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm, + int rt, int rt2, int isread, + bool is_16bit) +{ + return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc1 << 16) + | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, + int rt, int rt2, int isread, + bool is_16bit) +{ + return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc1 << 16) + | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit) +{ + /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */ + return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | 0xa; +} + +static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit) +{ + /* AArch32 SIMD trap: TA == 1 coproc == 0 */ + return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (1 << 5); +} + +static inline uint32_t syn_sve_access_trap(void) +{ + return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT; +} + +static inline uint32_t syn_pactrap(void) +{ + return EC_PACTRAP << ARM_EL_EC_SHIFT; +} + +static inline uint32_t syn_btitrap(int btype) +{ + return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype; +} + +static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) +{ + return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc; +} + +static inline uint32_t syn_data_abort_no_iss(int same_el, + int ea, int cm, int s1ptw, + int wnr, int fsc) +{ + return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | ARM_EL_IL + | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; +} + +static inline uint32_t syn_data_abort_with_iss(int same_el, + int sas, int sse, int srt, + int sf, int ar, + int ea, int cm, int s1ptw, + int wnr, int fsc, + bool is_16bit) +{ + return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16) + | (sf << 15) | (ar << 14) + | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; +} + +static inline uint32_t syn_swstep(int same_el, int isv, int ex) +{ + return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22; +} + +static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr) +{ + return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22; +} + +static inline uint32_t syn_breakpoint(int same_el) +{ + return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | ARM_EL_IL | 0x22; +} + +static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit) +{ + return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) | + (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) | + (cv << 24) | (cond << 20) | ti; +} + +/* Update a QEMU watchpoint based on the information the guest has set in the + * DBGWCR_EL1 and DBGWVR_EL1 registers. + */ +void hw_watchpoint_update(ARMCPU *cpu, int n); +/* Update the QEMU watchpoints for every guest watchpoint. This does a + * complete delete-and-reinstate of the QEMU watchpoint list and so is + * suitable for use after migration or on reset. + */ +void hw_watchpoint_update_all(ARMCPU *cpu); +/* Update a QEMU breakpoint based on the information the guest has set in the + * DBGBCR_EL1 and DBGBVR_EL1 registers. + */ +void hw_breakpoint_update(ARMCPU *cpu, int n); +/* Update the QEMU breakpoints for every guest breakpoint. This does a + * complete delete-and-reinstate of the QEMU breakpoint list and so is + * suitable for use after migration or on reset. + */ +void hw_breakpoint_update_all(ARMCPU *cpu); + +/* Callback function for checking if a watchpoint should trigger. */ +bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); + +/* Adjust addresses (in BE32 mode) before testing against watchpoint + * addresses. + */ +vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); + +/* Callback function for when a watchpoint or breakpoint triggers. */ +void arm_debug_excp_handler(CPUState *cs); + +/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ +bool arm_is_psci_call(ARMCPU *cpu, int excp_type); +/* Actually handle a PSCI call */ +void arm_handle_psci_call(ARMCPU *cpu); + +/** + * arm_clear_exclusive: clear the exclusive monitor + * @env: CPU env + * Clear the CPU's exclusive monitor, like the guest CLREX instruction. + */ +static inline void arm_clear_exclusive(CPUARMState *env) +{ + env->exclusive_addr = -1; +} + +/** + * ARMFaultType: type of an ARM MMU fault + * This corresponds to the v8A pseudocode's Fault enumeration, + * with extensions for QEMU internal conditions. + */ +typedef enum ARMFaultType { + ARMFault_None, + ARMFault_AccessFlag, + ARMFault_Alignment, + ARMFault_Background, + ARMFault_Domain, + ARMFault_Permission, + ARMFault_Translation, + ARMFault_AddressSize, + ARMFault_SyncExternal, + ARMFault_SyncExternalOnWalk, + ARMFault_SyncParity, + ARMFault_SyncParityOnWalk, + ARMFault_AsyncParity, + ARMFault_AsyncExternal, + ARMFault_Debug, + ARMFault_TLBConflict, + ARMFault_Lockdown, + ARMFault_Exclusive, + ARMFault_ICacheMaint, + ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ + ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ +} ARMFaultType; + +/** + * ARMMMUFaultInfo: Information describing an ARM MMU Fault + * @type: Type of fault + * @level: Table walk level (for translation, access flag and permission faults) + * @domain: Domain of the fault address (for non-LPAE CPUs only) + * @s2addr: Address that caused a fault at stage 2 + * @stage2: True if we faulted at stage 2 + * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk + * @ea: True if we should set the EA (external abort type) bit in syndrome + */ +typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; +struct ARMMMUFaultInfo { + ARMFaultType type; + target_ulong s2addr; + int level; + int domain; + bool stage2; + bool s1ptw; + bool ea; +}; + +/** + * arm_fi_to_sfsc: Convert fault info struct to short-format FSC + * Compare pseudocode EncodeSDFSC(), though unlike that function + * we set up a whole FSR-format code including domain field and + * putting the high bit of the FSC into bit 10. + */ +static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) +{ + uint32_t fsc = 0; + + switch (fi->type) { + case ARMFault_None: + return 0; + case ARMFault_AccessFlag: + fsc = fi->level == 1 ? 0x3 : 0x6; + break; + case ARMFault_Alignment: + fsc = 0x1; + break; + case ARMFault_Permission: + fsc = fi->level == 1 ? 0xd : 0xf; + break; + case ARMFault_Domain: + fsc = fi->level == 1 ? 0x9 : 0xb; + break; + case ARMFault_Translation: + fsc = fi->level == 1 ? 0x5 : 0x7; + break; + case ARMFault_SyncExternal: + fsc = 0x8 | (fi->ea << 12); + break; + case ARMFault_SyncExternalOnWalk: + fsc = fi->level == 1 ? 0xc : 0xe; + fsc |= (fi->ea << 12); + break; + case ARMFault_SyncParity: + fsc = 0x409; + break; + case ARMFault_SyncParityOnWalk: + fsc = fi->level == 1 ? 0x40c : 0x40e; + break; + case ARMFault_AsyncParity: + fsc = 0x408; + break; + case ARMFault_AsyncExternal: + fsc = 0x406 | (fi->ea << 12); + break; + case ARMFault_Debug: + fsc = 0x2; + break; + case ARMFault_TLBConflict: + fsc = 0x400; + break; + case ARMFault_Lockdown: + fsc = 0x404; + break; + case ARMFault_Exclusive: + fsc = 0x405; + break; + case ARMFault_ICacheMaint: + fsc = 0x4; + break; + case ARMFault_Background: + fsc = 0x0; + break; + case ARMFault_QEMU_NSCExec: + fsc = M_FAKE_FSR_NSC_EXEC; + break; + case ARMFault_QEMU_SFault: + fsc = M_FAKE_FSR_SFAULT; + break; + default: + /* Other faults can't occur in a context that requires a + * short-format status code. + */ + g_assert_not_reached(); + break; + } + + fsc |= (fi->domain << 4); + return fsc; +} + +/** + * arm_fi_to_lfsc: Convert fault info struct to long-format FSC + * Compare pseudocode EncodeLDFSC(), though unlike that function + * we fill in also the LPAE bit 9 of a DFSR format. + */ +static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) +{ + uint32_t fsc = 0; + + switch (fi->type) { + case ARMFault_None: + return 0; + case ARMFault_AddressSize: + fsc = fi->level & 3; + break; + case ARMFault_AccessFlag: + fsc = (fi->level & 3) | (0x2 << 2); + break; + case ARMFault_Permission: + fsc = (fi->level & 3) | (0x3 << 2); + break; + case ARMFault_Translation: + fsc = (fi->level & 3) | (0x1 << 2); + break; + case ARMFault_SyncExternal: + fsc = 0x10 | (fi->ea << 12); + break; + case ARMFault_SyncExternalOnWalk: + fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); + break; + case ARMFault_SyncParity: + fsc = 0x18; + break; + case ARMFault_SyncParityOnWalk: + fsc = (fi->level & 3) | (0x7 << 2); + break; + case ARMFault_AsyncParity: + fsc = 0x19; + break; + case ARMFault_AsyncExternal: + fsc = 0x11 | (fi->ea << 12); + break; + case ARMFault_Alignment: + fsc = 0x21; + break; + case ARMFault_Debug: + fsc = 0x22; + break; + case ARMFault_TLBConflict: + fsc = 0x30; + break; + case ARMFault_Lockdown: + fsc = 0x34; + break; + case ARMFault_Exclusive: + fsc = 0x35; + break; + default: + /* Other faults can't occur in a context that requires a + * long-format status code. + */ + g_assert_not_reached(); + break; + } + + fsc |= 1 << 9; + return fsc; +} + +static inline bool arm_extabort_type(MemTxResult result) +{ + /* The EA bit in syndromes and fault status registers is an + * IMPDEF classification of external aborts. ARM implementations + * usually use this to indicate AXI bus Decode error (0) or + * Slave error (1); in QEMU we follow that. + */ + return result != MEMTX_DECODE_ERROR; +} + +bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + +static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) +{ + return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; +} + +static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) +{ + if (arm_feature(env, ARM_FEATURE_M)) { + return mmu_idx | ARM_MMU_IDX_M; + } else { + return mmu_idx | ARM_MMU_IDX_A; + } +} + +static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) +{ + /* AArch64 is always a-profile. */ + return mmu_idx | ARM_MMU_IDX_A; +} + +int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); + +/* + * Return the MMU index for a v7M CPU with all relevant information + * manually specified. + */ +ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, + bool secstate, bool priv, bool negpri); + +/* + * Return the MMU index for a v7M CPU in the specified security and + * privilege state. + */ +ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, + bool secstate, bool priv); + +/* Return the MMU index for a v7M CPU in the specified security state */ +ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); + +/* Return true if the stage 1 translation regime is using LPAE format page + * tables */ +bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); + +/* Raise a data fault alignment exception for the specified virtual address */ +void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + +/* arm_cpu_do_transaction_failed: handle a memory system error response + * (eg "no device/memory present at address") by raising an external abort + * exception + */ +void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); + +/* Call any registered EL change hooks */ +static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) +{ + ARMELChangeHook *hook, *next; + QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { + hook->hook(cpu, hook->opaque); + } +} +static inline void arm_call_el_change_hook(ARMCPU *cpu) +{ + ARMELChangeHook *hook, *next; + QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { + hook->hook(cpu, hook->opaque); + } +} + +/* Return true if this address translation regime has two ranges. */ +static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_SE10_0: + case ARMMMUIdx_SE10_1: + case ARMMMUIdx_SE10_1_PAN: + return true; + default: + return false; + } +} + +/* Return true if this address translation regime is secure */ +static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + case ARMMMUIdx_E2: + case ARMMMUIdx_Stage2: + case ARMMMUIdx_MPrivNegPri: + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MPriv: + case ARMMMUIdx_MUser: + return false; + case ARMMMUIdx_SE3: + case ARMMMUIdx_SE10_0: + case ARMMMUIdx_SE10_1: + case ARMMMUIdx_SE10_1_PAN: + case ARMMMUIdx_MSPrivNegPri: + case ARMMMUIdx_MSUserNegPri: + case ARMMMUIdx_MSPriv: + case ARMMMUIdx_MSUser: + return true; + default: + g_assert_not_reached(); + // never reach here + return true; + } +} + +static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_Stage1_E1_PAN: + case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_SE10_1_PAN: + return true; + default: + return false; + } +} + +/* Return the FSR value for a debug exception (watchpoint, hardware + * breakpoint or BKPT insn) targeting the specified exception level. + */ +static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) +{ + ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; + int target_el = arm_debug_target_el(env); + bool using_lpae = false; + + if (target_el == 2 || arm_el_is_aa64(env, target_el)) { + using_lpae = true; + } else { + if (arm_feature(env, ARM_FEATURE_LPAE) && + (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { + using_lpae = true; + } + } + + if (using_lpae) { + return arm_fi_to_lfsc(&fi); + } else { + return arm_fi_to_sfsc(&fi); + } +} + +/** + * arm_num_brps: Return number of implemented breakpoints. + * Note that the ID register BRPS field is "number of bps - 1", + * and we return the actual number of breakpoints. + */ +static inline int arm_num_brps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; + } else { + return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; + } +} + +/** + * arm_num_wrps: Return number of implemented watchpoints. + * Note that the ID register WRPS field is "number of wps - 1", + * and we return the actual number of watchpoints. + */ +static inline int arm_num_wrps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; + } else { + return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; + } +} + +/** + * arm_num_ctx_cmps: Return number of implemented context comparators. + * Note that the ID register CTX_CMPS field is "number of cmps - 1", + * and we return the actual number of comparators. + */ +static inline int arm_num_ctx_cmps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; + } else { + return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; + } +} + +/* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3. + * Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits. + */ +#define MEMOPIDX_SHIFT 8 + +/** + * v7m_using_psp: Return true if using process stack pointer + * Return true if the CPU is currently using the process stack + * pointer, or false if it is using the main stack pointer. + */ +static inline bool v7m_using_psp(CPUARMState *env) +{ + /* Handler mode always uses the main stack; for thread mode + * the CONTROL.SPSEL bit determines the answer. + * Note that in v7M it is not possible to be in Handler mode with + * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. + */ + return !arm_v7m_is_handler_mode(env) && + env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; +} + +/** + * v7m_sp_limit: Return SP limit for current CPU state + * Return the SP limit value for the current CPU security state + * and stack pointer. + */ +static inline uint32_t v7m_sp_limit(CPUARMState *env) +{ + if (v7m_using_psp(env)) { + return env->v7m.psplim[env->v7m.secure]; + } else { + return env->v7m.msplim[env->v7m.secure]; + } +} + +/** + * v7m_cpacr_pass: + * Return true if the v7M CPACR permits access to the FPU for the specified + * security state and privilege level. + */ +static inline bool v7m_cpacr_pass(CPUARMState *env, + bool is_secure, bool is_priv) +{ + switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { + case 0: + case 2: /* UNPREDICTABLE: we treat like 0 */ + return false; + case 1: + return is_priv; + case 3: + return true; + default: + g_assert_not_reached(); + // never reach here + return true; + } +} + +/** + * aarch32_mode_name(): Return name of the AArch32 CPU mode + * @psr: Program Status Register indicating CPU mode + * + * Returns, for debug logging purposes, a printable representation + * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by + * the low bits of the specified PSR. + */ +static inline const char *aarch32_mode_name(uint32_t psr) +{ + static const char cpu_mode_names[16][4] = { + "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", + "???", "???", "hyp", "und", "???", "???", "???", "sys" + }; + + return cpu_mode_names[psr & 0xf]; +} + +/** + * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request + * + * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following + * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. + * Must be called with the iothread lock held. + */ +void arm_cpu_update_virq(ARMCPU *cpu); + +/** + * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request + * + * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following + * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. + * Must be called with the iothread lock held. + */ +void arm_cpu_update_vfiq(ARMCPU *cpu); + +/** + * arm_mmu_idx_el: + * @env: The cpu environment + * @el: The EL to use. + * + * Return the full ARMMMUIdx for the translation regime for EL. + */ +ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); + +/** + * arm_mmu_idx: + * @env: The cpu environment + * + * Return the full ARMMMUIdx for the current translation regime. + */ +ARMMMUIdx arm_mmu_idx(CPUARMState *env); + +/** + * arm_stage1_mmu_idx: + * @env: The cpu environment + * + * Return the ARMMMUIdx for the stage1 traversal for the current regime. + */ +ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); + +/** + * arm_mmu_idx_is_stage1_of_2: + * @mmu_idx: The ARMMMUIdx to test + * + * Return true if @mmu_idx is a NOTLB mmu_idx that is the + * first stage of a two stage regime. + */ +static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E1: + case ARMMMUIdx_Stage1_E1_PAN: + return true; + default: + return false; + } +} + +static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, + const ARMISARegisters *id) +{ + uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; + + if ((features >> ARM_FEATURE_V4T) & 1) { + valid |= CPSR_T; + } + if ((features >> ARM_FEATURE_V5) & 1) { + valid |= CPSR_Q; /* V5TE in reality*/ + } + if ((features >> ARM_FEATURE_V6) & 1) { + valid |= CPSR_E | CPSR_GE; + } + if ((features >> ARM_FEATURE_THUMB2) & 1) { + valid |= CPSR_IT; + } + if (isar_feature_aa32_jazelle(id)) { + valid |= CPSR_J; + } + if (isar_feature_aa32_pan(id)) { + valid |= CPSR_PAN; + } + + return valid; +} + +static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) +{ + uint32_t valid; + + valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; + if (isar_feature_aa64_bti(id)) { + valid |= PSTATE_BTYPE; + } + if (isar_feature_aa64_pan(id)) { + valid |= PSTATE_PAN; + } + if (isar_feature_aa64_uao(id)) { + valid |= PSTATE_UAO; + } + + return valid; +} + +/* + * Parameters of a given virtual address, as extracted from the + * translation control register (TCR) for a given regime. + */ +typedef struct ARMVAParameters { + unsigned tsz : 8; + unsigned select : 1; + bool tbi : 1; + bool epd : 1; + bool hpd : 1; + bool using16k : 1; + bool using64k : 1; +} ARMVAParameters; + +ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, + ARMMMUIdx mmu_idx, bool data); + +static inline int exception_target_el(CPUARMState *env) +{ + int target_el = MAX(1, arm_current_el(env)); + + /* + * No such thing as secure EL1 if EL3 is aarch32, + * so update the target EL to EL3 in this case. + */ + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { + target_el = 3; + } + + return target_el; +} + +/* Security attributes for an address, as returned by v8m_security_lookup. */ +typedef struct V8M_SAttributes { + bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ + bool ns; + bool nsc; + uint8_t sregion; + bool srvalid; + uint8_t iregion; + bool irvalid; +} V8M_SAttributes; + +void v8m_security_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + V8M_SAttributes *sattrs); + +bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, + int *prot, bool *is_subpage, + ARMMMUFaultInfo *fi, uint32_t *mregion); + +/* Cacheability and shareability attributes for a memory access */ +typedef struct ARMCacheAttrs { + unsigned int attrs:8; /* as in the MAIR register encoding */ + unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ +} ARMCacheAttrs; + +bool get_phys_addr(CPUARMState *env, target_ulong address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, + target_ulong *page_size, + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); + +void arm_log_exception(int idx); + +#endif diff --git a/qemu/target-arm/iwmmxt_helper.c b/qemu/target/arm/iwmmxt_helper.c similarity index 73% rename from qemu/target-arm/iwmmxt_helper.c rename to qemu/target/arm/iwmmxt_helper.c index a5069144..24244d01 100644 --- a/qemu/target-arm/iwmmxt_helper.c +++ b/qemu/target/arm/iwmmxt_helper.c @@ -19,40 +19,38 @@ * License along with this library; if not, see . */ -#include -#include +#include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" /* iwMMXt macros extracted from GNU gdb. */ /* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */ -#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) -#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) -#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) -#define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) +#define SIMD8_SET(v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) +#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) +#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) +#define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) /* Flags to pass as "n" above. */ -#define SIMD_NBIT -1 -#define SIMD_ZBIT -2 -#define SIMD_CBIT -3 -#define SIMD_VBIT -4 +#define SIMD_NBIT -1 +#define SIMD_ZBIT -2 +#define SIMD_CBIT -3 +#define SIMD_VBIT -4 /* Various status bit macros. */ -#define NBIT8(x) ((x) & 0x80) -#define NBIT16(x) ((x) & 0x8000) -#define NBIT32(x) ((x) & 0x80000000) -#define NBIT64(x) ((x) & 0x8000000000000000ULL) -#define ZBIT8(x) (((x) & 0xff) == 0) -#define ZBIT16(x) (((x) & 0xffff) == 0) -#define ZBIT32(x) (((x) & 0xffffffff) == 0) -#define ZBIT64(x) (x == 0) +#define NBIT8(x) ((x) & 0x80) +#define NBIT16(x) ((x) & 0x8000) +#define NBIT32(x) ((x) & 0x80000000) +#define NBIT64(x) ((x) & 0x8000000000000000ULL) +#define ZBIT8(x) (((x) & 0xff) == 0) +#define ZBIT16(x) (((x) & 0xffff) == 0) +#define ZBIT32(x) (((x) & 0xffffffff) == 0) +#define ZBIT64(x) (x == 0) /* Sign extension macros. */ -#define EXTEND8H(a) ((uint16_t) (int8_t) (a)) -#define EXTEND8(a) ((uint32_t) (int8_t) (a)) -#define EXTEND16(a) ((uint32_t) (int16_t) (a)) -#define EXTEND16S(a) ((int32_t) (int16_t) (a)) -#define EXTEND32(a) ((uint64_t) (int32_t) (a)) +#define EXTEND8H(a) ((uint16_t) (int8_t) (a)) +#define EXTEND8(a) ((uint32_t) (int8_t) (a)) +#define EXTEND16(a) ((uint32_t) (int16_t) (a)) +#define EXTEND16S(a) ((int32_t) (int16_t) (a)) +#define EXTEND32(a) ((uint64_t) (int32_t) (a)) uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b) { @@ -161,141 +159,141 @@ uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b) #define NZBIT64(x) \ SIMD64_SET(NBIT64(x), SIMD_NBIT) | \ SIMD64_SET(ZBIT64(x), SIMD_ZBIT) -#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ +#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \ - (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \ - (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ - (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ +{ \ + a = \ + (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \ + (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \ + (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ + (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ return a; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xffff) << 0) | \ - (((b >> SH0) & 0xffff) << 16) | \ - (((a >> SH2) & 0xffff) << 32) | \ - (((b >> SH2) & 0xffff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \ - NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ +{ \ + a = \ + (((a >> SH0) & 0xffff) << 0) | \ + (((b >> SH0) & 0xffff) << 16) | \ + (((a >> SH2) & 0xffff) << 32) | \ + (((b >> SH2) & 0xffff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \ + NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ return a; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xffffffff) << 0) | \ - (((b >> SH0) & 0xffffffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ +{ \ + a = \ + (((a >> SH0) & 0xffffffff) << 0) | \ + (((b >> SH0) & 0xffffffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ return a; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \ uint64_t x) \ -{ \ - x = \ - (((x >> SH0) & 0xff) << 0) | \ - (((x >> SH1) & 0xff) << 16) | \ - (((x >> SH2) & 0xff) << 32) | \ - (((x >> SH3) & 0xff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ +{ \ + x = \ + (((x >> SH0) & 0xff) << 0) | \ + (((x >> SH1) & 0xff) << 16) | \ + (((x >> SH2) & 0xff) << 32) | \ + (((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ return x; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \ uint64_t x) \ -{ \ - x = \ - (((x >> SH0) & 0xffff) << 0) | \ - (((x >> SH2) & 0xffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ +{ \ + x = \ + (((x >> SH0) & 0xffff) << 0) | \ + (((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ return x; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \ uint64_t x) \ -{ \ - x = (((x >> SH0) & 0xffffffff) << 0); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ +{ \ + x = (((x >> SH0) & 0xffffffff) << 0); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ return x; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \ uint64_t x) \ -{ \ - x = \ - ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \ - ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ - ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \ - ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ +{ \ + x = \ + ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \ + ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ + ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \ + ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ return x; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \ uint64_t x) \ -{ \ - x = \ - ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \ - ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ +{ \ + x = \ + ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \ + ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ return x; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \ uint64_t x) \ -{ \ - x = EXTEND32((x >> SH0) & 0xffffffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ +{ \ + x = EXTEND32((x >> SH0) & 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ return x; \ } IWMMXT_OP_UNPACK(l, 0, 8, 16, 24) IWMMXT_OP_UNPACK(h, 32, 40, 48, 56) -#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ +#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ -{ \ - a = \ - CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \ - CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ - CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \ - CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ +{ \ + a = \ + CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \ + CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ + CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \ + CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ return a; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ -{ \ - a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \ - CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \ - NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ +{ \ + a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \ + CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \ + NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ return a; \ -} \ +} \ uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \ uint64_t a, uint64_t b) \ -{ \ - a = CMP(0, Tl, O, 0xffffffff) | \ - CMP(32, Tl, O, 0xffffffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ +{ \ + a = CMP(0, Tl, O, 0xffffffff) | \ + CMP(32, Tl, O, 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ return a; \ } #define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ diff --git a/qemu/target-arm/kvm-consts.h b/qemu/target/arm/kvm-consts.h similarity index 76% rename from qemu/target-arm/kvm-consts.h rename to qemu/target/arm/kvm-consts.h index aea12f1b..aad28258 100644 --- a/qemu/target-arm/kvm-consts.h +++ b/qemu/target/arm/kvm-consts.h @@ -15,14 +15,15 @@ #define ARM_KVM_CONSTS_H #ifdef CONFIG_KVM -#include "qemu/compiler.h" #include #include #define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(X != Y) #else -#define MISMATCH_CHECK(X, Y) + +#define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(0) + #endif #define CP_REG_SIZE_SHIFT 52 @@ -32,12 +33,12 @@ #define CP_REG_ARM 0x4000000000000000ULL #define CP_REG_ARCH_MASK 0xff00000000000000ULL -MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT) -MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK) -MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32) -MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64) -MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM) -MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK) +MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT); +MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK); +MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32); +MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64); +MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM); +MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK); #define QEMU_PSCI_0_1_FN_BASE 0x95c1ba5e #define QEMU_PSCI_0_1_FN(n) (QEMU_PSCI_0_1_FN_BASE + (n)) @@ -46,10 +47,10 @@ MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK) #define QEMU_PSCI_0_1_FN_CPU_ON QEMU_PSCI_0_1_FN(2) #define QEMU_PSCI_0_1_FN_MIGRATE QEMU_PSCI_0_1_FN(3) -MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND) -MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF) -MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_ON, KVM_PSCI_FN_CPU_ON) -MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE) +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND); +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF); +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_ON, KVM_PSCI_FN_CPU_ON); +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE); #define QEMU_PSCI_0_2_FN_BASE 0x84000000 #define QEMU_PSCI_0_2_FN(n) (QEMU_PSCI_0_2_FN_BASE + (n)) @@ -76,13 +77,13 @@ MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE) #define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4) #define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5) -MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND) -MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF) -MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON) -MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE) -MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND) -MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON) -MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE) +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND); +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF); +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON); +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE); +MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND); +MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON); +MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE); /* PSCI v0.2 return values used by TCG emulation of PSCI */ @@ -92,9 +93,9 @@ MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE) /* We implement version 0.2 only */ #define QEMU_PSCI_0_2_RET_VERSION_0_2 2 -MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP) +MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP); MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2, - (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2))) + (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2))); /* PSCI return values (inclusive of all PSCI versions) */ #define QEMU_PSCI_RET_SUCCESS 0 @@ -107,15 +108,15 @@ MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2, #define QEMU_PSCI_RET_NOT_PRESENT -7 #define QEMU_PSCI_RET_DISABLED -8 -MISMATCH_CHECK(QEMU_PSCI_RET_SUCCESS, PSCI_RET_SUCCESS) -MISMATCH_CHECK(QEMU_PSCI_RET_NOT_SUPPORTED, PSCI_RET_NOT_SUPPORTED) -MISMATCH_CHECK(QEMU_PSCI_RET_INVALID_PARAMS, PSCI_RET_INVALID_PARAMS) -MISMATCH_CHECK(QEMU_PSCI_RET_DENIED, PSCI_RET_DENIED) -MISMATCH_CHECK(QEMU_PSCI_RET_ALREADY_ON, PSCI_RET_ALREADY_ON) -MISMATCH_CHECK(QEMU_PSCI_RET_ON_PENDING, PSCI_RET_ON_PENDING) -MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE) -MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT) -MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED) +MISMATCH_CHECK(QEMU_PSCI_RET_SUCCESS, PSCI_RET_SUCCESS); +MISMATCH_CHECK(QEMU_PSCI_RET_NOT_SUPPORTED, PSCI_RET_NOT_SUPPORTED); +MISMATCH_CHECK(QEMU_PSCI_RET_INVALID_PARAMS, PSCI_RET_INVALID_PARAMS); +MISMATCH_CHECK(QEMU_PSCI_RET_DENIED, PSCI_RET_DENIED); +MISMATCH_CHECK(QEMU_PSCI_RET_ALREADY_ON, PSCI_RET_ALREADY_ON); +MISMATCH_CHECK(QEMU_PSCI_RET_ON_PENDING, PSCI_RET_ON_PENDING); +MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE); +MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT); +MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED); /* Note that KVM uses overlapping values for AArch32 and AArch64 * target CPU numbers. AArch32 targets: @@ -127,6 +128,8 @@ MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED) #define QEMU_KVM_ARM_TARGET_AEM_V8 0 #define QEMU_KVM_ARM_TARGET_FOUNDATION_V8 1 #define QEMU_KVM_ARM_TARGET_CORTEX_A57 2 +#define QEMU_KVM_ARM_TARGET_XGENE_POTENZA 3 +#define QEMU_KVM_ARM_TARGET_CORTEX_A53 4 /* There's no kernel define for this: sentinel value which * matches no KVM target value for either 64 or 32 bit @@ -134,12 +137,14 @@ MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED) #define QEMU_KVM_ARM_TARGET_NONE UINT_MAX #ifdef TARGET_AARCH64 -MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_AEM_V8, KVM_ARM_TARGET_AEM_V8) -MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, KVM_ARM_TARGET_FOUNDATION_V8) -MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57) +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_AEM_V8, KVM_ARM_TARGET_AEM_V8); +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, KVM_ARM_TARGET_FOUNDATION_V8); +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57); +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_XGENE_POTENZA, KVM_ARM_TARGET_XGENE_POTENZA); +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A53, KVM_ARM_TARGET_CORTEX_A53); #else -MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15) -MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A7, KVM_ARM_TARGET_CORTEX_A7) +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15); +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A7, KVM_ARM_TARGET_CORTEX_A7); #endif #define CP_REG_ARM64 0x6000000000000000ULL @@ -161,20 +166,20 @@ MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A7, KVM_ARM_TARGET_CORTEX_A7) #define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT) #ifdef TARGET_AARCH64 -MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64) -MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK) -MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK) -MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT) +MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64); +MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK); +MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK); +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT); #endif #undef MISMATCH_CHECK diff --git a/qemu/target/arm/m_helper.c b/qemu/target/arm/m_helper.c new file mode 100644 index 00000000..7fd9d219 --- /dev/null +++ b/qemu/target/arm/m_helper.c @@ -0,0 +1,2658 @@ +/* + * ARM generic helpers. + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "cpu.h" +#include "internals.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" +#include "qemu/bitops.h" +#include "qemu/crc32c.h" +#include "exec/exec-all.h" +#include "sysemu/cpus.h" +#include "qemu/range.h" +#include "qemu/guest-random.h" +#include "arm_ldst.h" +#include "exec/cpu_ldst.h" + +static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask, + uint32_t reg, uint32_t val) +{ + /* Only APSR is actually writable */ + if (!(reg & 4)) { + uint32_t apsrmask = 0; + + if (mask & 8) { + apsrmask |= XPSR_NZCV | XPSR_Q; + } + if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { + apsrmask |= XPSR_GE; + } + xpsr_write(env, val, apsrmask); + } +} + +static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el) +{ + uint32_t mask = 0; + + if ((reg & 1) && el) { + mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ + } + if (!(reg & 4)) { + mask |= XPSR_NZCV | XPSR_Q; /* APSR */ + if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) { + mask |= XPSR_GE; + } + } + /* EPSR reads as zero */ + return xpsr_read(env) & mask; +} + +static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure) +{ + uint32_t value = env->v7m.control[secure]; + + if (!secure) { + /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */ + value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK; + } + return value; +} + +/* + * What kind of stack write are we doing? This affects how exceptions + * generated during the stacking are treated. + */ +typedef enum StackingMode { + STACK_NORMAL, + STACK_IGNFAULTS, + STACK_LAZYFP, +} StackingMode; + +static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, + ARMMMUIdx mmu_idx, StackingMode mode) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = { 0 }; + MemTxResult txres; + target_ulong page_size; + hwaddr physaddr; + int prot; + ARMMMUFaultInfo fi = { 0 }; + bool secure = mmu_idx & ARM_MMU_IDX_M_S; + // int exc; + // bool exc_secure; + + if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, + &attrs, &prot, &page_size, &fi, NULL)) { + /* MPU/SAU lookup failed */ + if (fi.type == ARMFault_QEMU_SFault) { + if (mode == STACK_LAZYFP) { + qemu_log_mask(CPU_LOG_INT, + "...SecureFault with SFSR.LSPERR " + "during lazy stacking\n"); + env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK; + } else { + qemu_log_mask(CPU_LOG_INT, + "...SecureFault with SFSR.AUVIOL " + "during stacking\n"); + env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; + } + env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK; + env->v7m.sfar = addr; + // exc = ARMV7M_EXCP_SECURE; + // exc_secure = false; + } else { + if (mode == STACK_LAZYFP) { + qemu_log_mask(CPU_LOG_INT, + "...MemManageFault with CFSR.MLSPERR\n"); + env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK; + } else { + qemu_log_mask(CPU_LOG_INT, + "...MemManageFault with CFSR.MSTKERR\n"); + env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; + } + // exc = ARMV7M_EXCP_MEM; + // exc_secure = secure; + } + goto pend_fault; + } +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stl_le, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), physaddr, value, +#else + address_space_stl_le(cs->uc, arm_addressspace(cs, attrs), physaddr, value, +#endif + attrs, &txres); + if (txres != MEMTX_OK) { + /* BusFault trying to write the data */ + if (mode == STACK_LAZYFP) { + qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n"); + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK; + } else { + qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; + } + // exc = ARMV7M_EXCP_BUS; + // exc_secure = false; + goto pend_fault; + } + return true; + +pend_fault: + /* + * By pending the exception at this point we are making + * the IMPDEF choice "overridden exceptions pended" (see the + * MergeExcInfo() pseudocode). The other choice would be to not + * pend them now and then make a choice about which to throw away + * later if we have two derived exceptions. + * The only case when we must not pend the exception but instead + * throw it away is if we are doing the push of the callee registers + * and we've already generated a derived exception (this is indicated + * by the caller passing STACK_IGNFAULTS). Even in this case we will + * still update the fault status registers. + */ + switch (mode) { + case STACK_NORMAL: + // armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); + break; + case STACK_LAZYFP: + // armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure); + break; + case STACK_IGNFAULTS: + break; + } + return false; +} + +void armv7m_nvic_set_pending(void *opaque, int irq, bool secure) +{ +} + +static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, + ARMMMUIdx mmu_idx) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = { 0 }; + MemTxResult txres; + target_ulong page_size; + hwaddr physaddr; + int prot; + ARMMMUFaultInfo fi = { 0 }; + bool secure = mmu_idx & ARM_MMU_IDX_M_S; + int exc; + bool exc_secure; + uint32_t value; + + if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, + &attrs, &prot, &page_size, &fi, NULL)) { + /* MPU/SAU lookup failed */ + if (fi.type == ARMFault_QEMU_SFault) { + qemu_log_mask(CPU_LOG_INT, + "...SecureFault with SFSR.AUVIOL during unstack\n"); + env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; + env->v7m.sfar = addr; + exc = ARMV7M_EXCP_SECURE; + exc_secure = false; + } else { + qemu_log_mask(CPU_LOG_INT, + "...MemManageFault with CFSR.MUNSTKERR\n"); + env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; + exc = ARMV7M_EXCP_MEM; + exc_secure = secure; + } + goto pend_fault; + } + +#ifdef UNICORN_ARCH_POSTFIX + value = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), physaddr, +#else + value = address_space_ldl(cs->uc, arm_addressspace(cs, attrs), physaddr, +#endif + attrs, &txres); + if (txres != MEMTX_OK) { + /* BusFault trying to read the data */ + qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; + exc = ARMV7M_EXCP_BUS; + exc_secure = false; + goto pend_fault; + } + + *dest = value; + return true; + +pend_fault: + /* + * By pending the exception at this point we are making + * the IMPDEF choice "overridden exceptions pended" (see the + * MergeExcInfo() pseudocode). The other choice would be to not + * pend them now and then make a choice about which to throw away + * later if we have two derived exceptions. + */ + armv7m_nvic_set_pending(env->nvic, exc, exc_secure); + return false; +} + +void HELPER(v7m_preserve_fp_state)(CPUARMState *env) +{ + /* + * Preserve FP state (because LSPACT was set and we are about + * to execute an FP instruction). This corresponds to the + * PreserveFPState() pseudocode. + * We may throw an exception if the stacking fails. + */ + ARMCPU *cpu = env_archcpu(env); + bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; + bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK); + bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK); + bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK; + uint32_t fpcar = env->v7m.fpcar[is_secure]; + bool stacked_ok = true; + bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); + bool take_exception; + + /* Check the background context had access to the FPU */ + if (!v7m_cpacr_pass(env, is_secure, is_priv)) { + // armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure); + env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK; + stacked_ok = false; + } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) { + // armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S); + env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK; + stacked_ok = false; + } + + if (!splimviol && stacked_ok) { + /* We only stack if the stack limit wasn't violated */ + int i; + ARMMMUIdx mmu_idx; + + mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri); + for (i = 0; i < (ts ? 32 : 16); i += 2) { + uint64_t dn = *aa32_vfp_dreg(env, i / 2); + uint32_t faddr = fpcar + 4 * i; + uint32_t slo = extract64(dn, 0, 32); + uint32_t shi = extract64(dn, 32, 32); + + if (i >= 16) { + faddr += 8; /* skip the slot for the FPSCR */ + } + stacked_ok = stacked_ok && + v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) && + v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP); + } + + stacked_ok = stacked_ok && + v7m_stack_write(cpu, fpcar + 0x40, + vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP); + } + + /* + * We definitely pended an exception, but it's possible that it + * might not be able to be taken now. If its priority permits us + * to take it now, then we must not update the LSPACT or FP regs, + * but instead jump out to take the exception immediately. + * If it's just pending and won't be taken until the current + * handler exits, then we do update LSPACT and the FP regs. + */ + // take_exception = !stacked_ok && + // armv7m_nvic_can_take_pending_exception(env->nvic); + /* consider armv7m_nvic_can_take_pending_exception() always return false. + in unicorn */ + take_exception = false; + + if (take_exception) { + raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC()); + } + + env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK; + + if (ts) { + /* Clear s0 to s31 and the FPSCR */ + int i; + + for (i = 0; i < 32; i += 2) { + *aa32_vfp_dreg(env, i / 2) = 0; + } + vfp_set_fpscr(env, 0); + } + /* + * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them + * unchanged. + */ +} + +/* + * Write to v7M CONTROL.SPSEL bit for the specified security bank. + * This may change the current stack pointer between Main and Process + * stack pointers if it is done for the CONTROL register for the current + * security state. + */ +static void write_v7m_control_spsel_for_secstate(CPUARMState *env, + bool new_spsel, + bool secstate) +{ + bool old_is_psp = v7m_using_psp(env); + + env->v7m.control[secstate] = + deposit32(env->v7m.control[secstate], + R_V7M_CONTROL_SPSEL_SHIFT, + R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); + + if (secstate == env->v7m.secure) { + bool new_is_psp = v7m_using_psp(env); + uint32_t tmp; + + if (old_is_psp != new_is_psp) { + tmp = env->v7m.other_sp; + env->v7m.other_sp = env->regs[13]; + env->regs[13] = tmp; + } + } +} + +/* + * Write to v7M CONTROL.SPSEL bit. This may change the current + * stack pointer between Main and Process stack pointers. + */ +static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) +{ + write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); +} + +void write_v7m_exception(CPUARMState *env, uint32_t new_exc) +{ + /* + * Write a new value to v7m.exception, thus transitioning into or out + * of Handler mode; this may result in a change of active stack pointer. + */ + bool new_is_psp, old_is_psp = v7m_using_psp(env); + uint32_t tmp; + + env->v7m.exception = new_exc; + + new_is_psp = v7m_using_psp(env); + + if (old_is_psp != new_is_psp) { + tmp = env->v7m.other_sp; + env->v7m.other_sp = env->regs[13]; + env->regs[13] = tmp; + } +} + +/* Switch M profile security state between NS and S */ +static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) +{ + uint32_t new_ss_msp, new_ss_psp; + + if (env->v7m.secure == new_secstate) { + return; + } + + /* + * All the banked state is accessed by looking at env->v7m.secure + * except for the stack pointer; rearrange the SP appropriately. + */ + new_ss_msp = env->v7m.other_ss_msp; + new_ss_psp = env->v7m.other_ss_psp; + + if (v7m_using_psp(env)) { + env->v7m.other_ss_psp = env->regs[13]; + env->v7m.other_ss_msp = env->v7m.other_sp; + } else { + env->v7m.other_ss_msp = env->regs[13]; + env->v7m.other_ss_psp = env->v7m.other_sp; + } + + env->v7m.secure = new_secstate; + + if (v7m_using_psp(env)) { + env->regs[13] = new_ss_psp; + env->v7m.other_sp = new_ss_msp; + } else { + env->regs[13] = new_ss_msp; + env->v7m.other_sp = new_ss_psp; + } +} + +void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) +{ + /* + * Handle v7M BXNS: + * - if the return value is a magic value, do exception return (like BX) + * - otherwise bit 0 of the return value is the target security state + */ + uint32_t min_magic; + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + /* Covers FNC_RETURN and EXC_RETURN magic */ + min_magic = FNC_RETURN_MIN_MAGIC; + } else { + /* EXC_RETURN magic only */ + min_magic = EXC_RETURN_MIN_MAGIC; + } + + if (dest >= min_magic) { + /* + * This is an exception return magic value; put it where + * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. + * Note that if we ever add gen_ss_advance() singlestep support to + * M profile this should count as an "instruction execution complete" + * event (compare gen_bx_excret_final_code()). + */ + env->regs[15] = dest & ~1; + env->thumb = dest & 1; + HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); + /* notreached */ + } + + /* translate.c should have made BXNS UNDEF unless we're secure */ + assert(env->v7m.secure); + + if (!(dest & 1)) { + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; + } + switch_v7m_security_state(env, dest & 1); + env->thumb = 1; + env->regs[15] = dest & ~1; + arm_rebuild_hflags(env); +} + +void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) +{ + /* + * Handle v7M BLXNS: + * - bit 0 of the destination address is the target security state + */ + + /* At this point regs[15] is the address just after the BLXNS */ + uint32_t nextinst = env->regs[15] | 1; + uint32_t sp = env->regs[13] - 8; + uint32_t saved_psr; + + /* translate.c will have made BLXNS UNDEF unless we're secure */ + assert(env->v7m.secure); + + if (dest & 1) { + /* + * Target is Secure, so this is just a normal BLX, + * except that the low bit doesn't indicate Thumb/not. + */ + env->regs[14] = nextinst; + env->thumb = 1; + env->regs[15] = dest & ~1; + return; + } + + /* Target is non-secure: first push a stack frame */ + if (!QEMU_IS_ALIGNED(sp, 8)) { + qemu_log_mask(LOG_GUEST_ERROR, + "BLXNS with misaligned SP is UNPREDICTABLE\n"); + } + + if (sp < v7m_sp_limit(env)) { + raise_exception(env, EXCP_STKOF, 0, 1); + } + + saved_psr = env->v7m.exception; + if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { + saved_psr |= XPSR_SFPA; + } + + /* Note that these stores can throw exceptions on MPU faults */ + cpu_stl_data_ra(env, sp, nextinst, GETPC()); + cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC()); + + env->regs[13] = sp; + env->regs[14] = 0xfeffffff; + if (arm_v7m_is_handler_mode(env)) { + /* + * Write a dummy value to IPSR, to avoid leaking the current secure + * exception number to non-secure code. This is guaranteed not + * to cause write_v7m_exception() to actually change stacks. + */ + write_v7m_exception(env, 1); + } + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; + switch_v7m_security_state(env, 0); + env->thumb = 1; + env->regs[15] = dest; + arm_rebuild_hflags(env); +} + +static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, + bool spsel) +{ + /* + * Return a pointer to the location where we currently store the + * stack pointer for the requested security state and thread mode. + * This pointer will become invalid if the CPU state is updated + * such that the stack pointers are switched around (eg changing + * the SPSEL control bit). + * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). + * Unlike that pseudocode, we require the caller to pass us in the + * SPSEL control bit value; this is because we also use this + * function in handling of pushing of the callee-saves registers + * part of the v8M stack frame (pseudocode PushCalleeStack()), + * and in the tailchain codepath the SPSEL bit comes from the exception + * return magic LR value from the previous exception. The pseudocode + * opencodes the stack-selection in PushCalleeStack(), but we prefer + * to make this utility function generic enough to do the job. + */ + bool want_psp = threadmode && spsel; + + if (secure == env->v7m.secure) { + if (want_psp == v7m_using_psp(env)) { + return &env->regs[13]; + } else { + return &env->v7m.other_sp; + } + } else { + if (want_psp) { + return &env->v7m.other_ss_psp; + } else { + return &env->v7m.other_ss_msp; + } + } +} + +#if 0 +static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, + uint32_t *pvec) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + MemTxResult result; + uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; + uint32_t vector_entry; + MemTxAttrs attrs = { 0 }; + ARMMMUIdx mmu_idx; + // bool exc_secure; + + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); + + /* + * We don't do a get_phys_addr() here because the rules for vector + * loads are special: they always use the default memory map, and + * the default memory map permits reads from all addresses. + * Since there's no easy way to pass through to pmsav8_mpu_lookup() + * that we want this special case which would always say "yes", + * we just do the SAU lookup here followed by a direct physical load. + */ + attrs.secure = targets_secure; + attrs.user = false; + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + V8M_SAttributes sattrs = { 0 }; + + v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); + if (sattrs.ns) { + attrs.secure = false; + } else if (!targets_secure) { + /* + * NS access to S memory: the underlying exception which we escalate + * to HardFault is SecureFault, which always targets Secure. + */ + // exc_secure = true; + goto load_fail; + } + } + +#ifdef UNICORN_ARCH_POSTFIX + vector_entry = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), addr, +#else + vector_entry = address_space_ldl(cs->uc, arm_addressspace(cs, attrs), addr, +#endif + attrs, &result); + if (result != MEMTX_OK) { + /* + * Underlying exception is BusFault: its target security state + * depends on BFHFNMINS. + */ + // exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); + goto load_fail; + } + *pvec = vector_entry; + return true; + +load_fail: + /* + * All vector table fetch fails are reported as HardFault, with + * HFSR.VECTTBL and .FORCED set. (FORCED is set because + * technically the underlying exception is a SecureFault or BusFault + * that is escalated to HardFault.) This is a terminal exception, + * so we will either take the HardFault immediately or else enter + * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). + * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are + * secure); otherwise it targets the same security state as the + * underlying exception. + */ + if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + // exc_secure = true; + } + env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; + // armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); + return false; +} +#endif + +static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr) +{ + /* + * Return the integrity signature value for the callee-saves + * stack frame section. @lr is the exception return payload/LR value + * whose FType bit forms bit 0 of the signature if FP is present. + */ + uint32_t sig = 0xfefa125a; + + if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) + || (lr & R_V7M_EXCRET_FTYPE_MASK)) { + sig |= 1; + } + return sig; +} + +#if 0 +static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, + bool ignore_faults) +{ + /* + * For v8M, push the callee-saves register part of the stack frame. + * Compare the v8M pseudocode PushCalleeStack(). + * In the tailchaining case this may not be the current stack. + */ + CPUARMState *env = &cpu->env; + uint32_t *frame_sp_p; + uint32_t frameptr; + ARMMMUIdx mmu_idx; + bool stacked_ok; + uint32_t limit; + bool want_psp; + uint32_t sig; + StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL; + + if (dotailchain) { + bool mode = lr & R_V7M_EXCRET_MODE_MASK; + bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || + !mode; + + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); + frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, + lr & R_V7M_EXCRET_SPSEL_MASK); + want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK); + if (want_psp) { + limit = env->v7m.psplim[M_REG_S]; + } else { + limit = env->v7m.msplim[M_REG_S]; + } + } else { + mmu_idx = arm_mmu_idx(env); + frame_sp_p = &env->regs[13]; + limit = v7m_sp_limit(env); + } + + frameptr = *frame_sp_p - 0x28; + if (frameptr < limit) { + /* + * Stack limit failure: set SP to the limit value, and generate + * STKOF UsageFault. Stack pushes below the limit must not be + * performed. It is IMPDEF whether pushes above the limit are + * performed; we choose not to. + */ + qemu_log_mask(CPU_LOG_INT, + "...STKOF during callee-saves register stacking\n"); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, + env->v7m.secure); + *frame_sp_p = limit; + return true; + } + + /* + * Write as much of the stack frame as we can. A write failure may + * cause us to pend a derived exception. + */ + sig = v7m_integrity_sig(env, lr); + stacked_ok = + v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) && + v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode); + + /* Update SP regardless of whether any of the stack accesses failed. */ + *frame_sp_p = frameptr; + + return !stacked_ok; +} +#endif + +static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, + bool ignore_stackfaults) +{ + return; // FIXME +#if 0 + /* + * Do the "take the exception" parts of exception entry, + * but not the pushing of state to the stack. This is + * similar to the pseudocode ExceptionTaken() function. + */ + CPUARMState *env = &cpu->env; + uint32_t addr; + bool targets_secure; + int exc; + bool push_failed = false; + + armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); + qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n", + targets_secure ? "secure" : "nonsecure", exc); + + if (dotailchain) { + /* Sanitize LR FType and PREFIX bits */ + if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { + lr |= R_V7M_EXCRET_FTYPE_MASK; + } + lr = deposit32(lr, 24, 8, 0xff); + } + + if (arm_feature(env, ARM_FEATURE_V8)) { + if (arm_feature(env, ARM_FEATURE_M_SECURITY) && + (lr & R_V7M_EXCRET_S_MASK)) { + /* + * The background code (the owner of the registers in the + * exception frame) is Secure. This means it may either already + * have or now needs to push callee-saves registers. + */ + if (targets_secure) { + if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { + /* + * We took an exception from Secure to NonSecure + * (which means the callee-saved registers got stacked) + * and are now tailchaining to a Secure exception. + * Clear DCRS so eventual return from this Secure + * exception unstacks the callee-saved registers. + */ + lr &= ~R_V7M_EXCRET_DCRS_MASK; + } + } else { + /* + * We're going to a non-secure exception; push the + * callee-saves registers to the stack now, if they're + * not already saved. + */ + if (lr & R_V7M_EXCRET_DCRS_MASK && + !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) { + push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, + ignore_stackfaults); + } + lr |= R_V7M_EXCRET_DCRS_MASK; + } + } + + lr &= ~R_V7M_EXCRET_ES_MASK; + if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { + lr |= R_V7M_EXCRET_ES_MASK; + } + lr &= ~R_V7M_EXCRET_SPSEL_MASK; + if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { + lr |= R_V7M_EXCRET_SPSEL_MASK; + } + + /* + * Clear registers if necessary to prevent non-secure exception + * code being able to see register values from secure code. + * Where register values become architecturally UNKNOWN we leave + * them with their previous values. + */ + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + if (!targets_secure) { + /* + * Always clear the caller-saved registers (they have been + * pushed to the stack earlier in v7m_push_stack()). + * Clear callee-saved registers if the background code is + * Secure (in which case these regs were saved in + * v7m_push_callee_stack()). + */ + int i; + + for (i = 0; i < 13; i++) { + /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ + if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { + env->regs[i] = 0; + } + } + /* Clear EAPSR */ + xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); + } + } + } + + if (push_failed && !ignore_stackfaults) { + /* + * Derived exception on callee-saves register stacking: + * we might now want to take a different exception which + * targets a different security state, so try again from the top. + */ + qemu_log_mask(CPU_LOG_INT, + "...derived exception on callee-saves register stacking"); + v7m_exception_taken(cpu, lr, true, true); + return; + } + + if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { + /* Vector load failed: derived exception */ + qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load"); + v7m_exception_taken(cpu, lr, true, true); + return; + } + + /* + * Now we've done everything that might cause a derived exception + * we can go ahead and activate whichever exception we're going to + * take (which might now be the derived exception). + */ + armv7m_nvic_acknowledge_irq(env->nvic); + + /* Switch to target security state -- must do this before writing SPSEL */ + switch_v7m_security_state(env, targets_secure); + write_v7m_control_spsel(env, 0); + arm_clear_exclusive(env); + /* Clear SFPA and FPCA (has no effect if no FPU) */ + env->v7m.control[M_REG_S] &= + ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK); + /* Clear IT bits */ + env->condexec_bits = 0; + env->regs[14] = lr; + env->regs[15] = addr & 0xfffffffe; + env->thumb = addr & 1; + arm_rebuild_hflags(env); +#endif +} + +bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure) +{ + return false; +} + +static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr, + bool apply_splim) +{ +#if 0 + /* + * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR + * that we will need later in order to do lazy FP reg stacking. + */ + bool is_secure = env->v7m.secure; + void *nvic = env->nvic; + /* + * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits + * are banked and we want to update the bit in the bank for the + * current security state; and in one case we want to specifically + * update the NS banked version of a bit even if we are secure. + */ + uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S]; + uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS]; + uint32_t *fpccr = &env->v7m.fpccr[is_secure]; + bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy; + + env->v7m.fpcar[is_secure] = frameptr & ~0x7; + + if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) { + bool splimviol; + uint32_t splim = v7m_sp_limit(env); + bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) && + (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK); + + splimviol = !ign && frameptr < splim; + *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol); + } + + *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1); + + *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure); + + *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0); + + *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD, + !arm_v7m_is_handler_mode(env)); + + hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false); + *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy); + + bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false); + *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy); + + mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure); + *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy); + + ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false); + *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy); + + monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false); + *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy); + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true); + *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy); + + sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false); + *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy); + } +#endif +} + +void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr) +{ + /* fptr is the value of Rn, the frame pointer we store the FP regs to */ + bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; + bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK; + uintptr_t ra = GETPC(); + + assert(env->v7m.secure); + + if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) { + return; + } + + /* Check access to the coprocessor is permitted */ + if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) { + raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC()); + } + + if (lspact) { + /* LSPACT should not be active when there is active FP state */ + raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC()); + } + + if (fptr & 7) { + raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC()); + } + + /* + * Note that we do not use v7m_stack_write() here, because the + * accesses should not set the FSR bits for stacking errors if they + * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK + * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions + * and longjmp out. + */ + if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) { + bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK; + int i; + + for (i = 0; i < (ts ? 32 : 16); i += 2) { + uint64_t dn = *aa32_vfp_dreg(env, i / 2); + uint32_t faddr = fptr + 4 * i; + uint32_t slo = extract64(dn, 0, 32); + uint32_t shi = extract64(dn, 32, 32); + + if (i >= 16) { + faddr += 8; /* skip the slot for the FPSCR */ + } + cpu_stl_data_ra(env, faddr, slo, ra); + cpu_stl_data_ra(env, faddr + 4, shi, ra); + } + cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra); + + /* + * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to + * leave them unchanged, matching our choice in v7m_preserve_fp_state. + */ + if (ts) { + for (i = 0; i < 32; i += 2) { + *aa32_vfp_dreg(env, i / 2) = 0; + } + vfp_set_fpscr(env, 0); + } + } else { + v7m_update_fpccr(env, fptr, false); + } + + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK; +} + +void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr) +{ + uintptr_t ra = GETPC(); + + /* fptr is the value of Rn, the frame pointer we load the FP regs from */ + assert(env->v7m.secure); + + if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) { + return; + } + + /* Check access to the coprocessor is permitted */ + if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) { + raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC()); + } + + if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) { + /* State in FP is still valid */ + env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK; + } else { + bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK; + int i; + uint32_t fpscr; + + if (fptr & 7) { + raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC()); + } + + for (i = 0; i < (ts ? 32 : 16); i += 2) { + uint32_t slo, shi; + uint64_t dn; + uint32_t faddr = fptr + 4 * i; + + if (i >= 16) { + faddr += 8; /* skip the slot for the FPSCR */ + } + + slo = cpu_ldl_data_ra(env, faddr, ra); + shi = cpu_ldl_data_ra(env, faddr + 4, ra); + + dn = (uint64_t) shi << 32 | slo; + *aa32_vfp_dreg(env, i / 2) = dn; + } + fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra); + vfp_set_fpscr(env, fpscr); + } + + env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK; +} + +static bool v7m_push_stack(ARMCPU *cpu) +{ + /* + * Do the "set up stack frame" part of exception entry, + * similar to pseudocode PushStack(). + * Return true if we generate a derived exception (and so + * should ignore further stack faults trying to process + * that derived exception.) + */ + bool stacked_ok = true, limitviol = false; + CPUARMState *env = &cpu->env; + uint32_t xpsr = xpsr_read(env); + uint32_t frameptr = env->regs[13]; + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + uint32_t framesize; + bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1); + + if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) && + (env->v7m.secure || nsacr_cp10)) { + if (env->v7m.secure && + env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) { + framesize = 0xa8; + } else { + framesize = 0x68; + } + } else { + framesize = 0x20; + } + + /* Align stack pointer if the guest wants that */ + if ((frameptr & 4) && + (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { + frameptr -= 4; + xpsr |= XPSR_SPREALIGN; + } + + xpsr &= ~XPSR_SFPA; + if (env->v7m.secure && + (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) { + xpsr |= XPSR_SFPA; + } + + frameptr -= framesize; + + if (arm_feature(env, ARM_FEATURE_V8)) { + uint32_t limit = v7m_sp_limit(env); + + if (frameptr < limit) { + /* + * Stack limit failure: set SP to the limit value, and generate + * STKOF UsageFault. Stack pushes below the limit must not be + * performed. It is IMPDEF whether pushes above the limit are + * performed; we choose not to. + */ + qemu_log_mask(CPU_LOG_INT, + "...STKOF during stacking\n"); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, + env->v7m.secure); + env->regs[13] = limit; + /* + * We won't try to perform any further memory accesses but + * we must continue through the following code to check for + * permission faults during FPU state preservation, and we + * must update FPCCR if lazy stacking is enabled. + */ + limitviol = true; + stacked_ok = false; + } + } + + /* + * Write as much of the stack frame as we can. If we fail a stack + * write this will result in a derived exception being pended + * (which may be taken in preference to the one we started with + * if it has higher priority). + */ + stacked_ok = stacked_ok && + v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, frameptr + 4, env->regs[1], + mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, frameptr + 8, env->regs[2], + mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, frameptr + 12, env->regs[3], + mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, frameptr + 16, env->regs[12], + mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, frameptr + 20, env->regs[14], + mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, frameptr + 24, env->regs[15], + mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL); + + if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) { + /* FPU is active, try to save its registers */ + bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; + bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK; + + if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) { + qemu_log_mask(CPU_LOG_INT, + "...SecureFault because LSPACT and FPCA both set\n"); + env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + } else if (!env->v7m.secure && !nsacr_cp10) { + qemu_log_mask(CPU_LOG_INT, + "...Secure UsageFault with CFSR.NOCP because " + "NSACR.CP10 prevents stacking FP regs\n"); + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S); + env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK; + } else { + if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) { + /* Lazy stacking disabled, save registers now */ + int i; + bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure, + arm_current_el(env) != 0); + + if (stacked_ok && !cpacr_pass) { + /* + * Take UsageFault if CPACR forbids access. The pseudocode + * here does a full CheckCPEnabled() but we know the NSACR + * check can never fail as we have already handled that. + */ + qemu_log_mask(CPU_LOG_INT, + "...UsageFault with CFSR.NOCP because " + "CPACR.CP10 prevents stacking FP regs\n"); + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, + env->v7m.secure); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; + stacked_ok = false; + } + + for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) { + uint64_t dn = *aa32_vfp_dreg(env, i / 2); + uint32_t faddr = frameptr + 0x20 + 4 * i; + uint32_t slo = extract64(dn, 0, 32); + uint32_t shi = extract64(dn, 32, 32); + + if (i >= 16) { + faddr += 8; /* skip the slot for the FPSCR */ + } + stacked_ok = stacked_ok && + v7m_stack_write(cpu, faddr, slo, + mmu_idx, STACK_NORMAL) && + v7m_stack_write(cpu, faddr + 4, shi, + mmu_idx, STACK_NORMAL); + } + stacked_ok = stacked_ok && + v7m_stack_write(cpu, frameptr + 0x60, + vfp_get_fpscr(env), mmu_idx, STACK_NORMAL); + if (cpacr_pass) { + for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) { + *aa32_vfp_dreg(env, i / 2) = 0; + } + vfp_set_fpscr(env, 0); + } + } else { + /* Lazy stacking enabled, save necessary info to stack later */ + v7m_update_fpccr(env, frameptr + 0x20, true); + } + } + } + + /* + * If we broke a stack limit then SP was already updated earlier; + * otherwise we update SP regardless of whether any of the stack + * accesses failed or we took some other kind of fault. + */ + if (!limitviol) { + env->regs[13] = frameptr; + } + + return !stacked_ok; +} + +static void do_v7m_exception_exit(ARMCPU *cpu) +{ + return; // FIXME + CPUARMState *env = &cpu->env; + uint32_t excret; + uint32_t xpsr, xpsr_mask; + bool ufault = false; + bool sfault = false; + bool return_to_sp_process; + bool return_to_handler; + bool rettobase = false; + bool exc_secure = false; + bool return_to_secure; + bool ftype; + bool restore_s16_s31; + + /* + * If we're not in Handler mode then jumps to magic exception-exit + * addresses don't have magic behaviour. However for the v8M + * security extensions the magic secure-function-return has to + * work in thread mode too, so to avoid doing an extra check in + * the generated code we allow exception-exit magic to also cause the + * internal exception and bring us here in thread mode. Correct code + * will never try to do this (the following insn fetch will always + * fault) so we the overhead of having taken an unnecessary exception + * doesn't matter. + */ + if (!arm_v7m_is_handler_mode(env)) { + return; + } + + /* + * In the spec pseudocode ExceptionReturn() is called directly + * from BXWritePC() and gets the full target PC value including + * bit zero. In QEMU's implementation we treat it as a normal + * jump-to-register (which is then caught later on), and so split + * the target value up between env->regs[15] and env->thumb in + * gen_bx(). Reconstitute it. + */ + excret = env->regs[15]; + if (env->thumb) { + excret |= 1; + } + + qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 + " previous exception %d\n", + excret, env->v7m.exception); + + if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { + qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " + "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", + excret); + } + + ftype = excret & R_V7M_EXCRET_FTYPE_MASK; + + if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) { + qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception " + "exit PC value 0x%" PRIx32 " is UNPREDICTABLE " + "if FPU not present\n", + excret); + ftype = true; + } + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + /* + * EXC_RETURN.ES validation check (R_SMFL). We must do this before + * we pick which FAULTMASK to clear. + */ + if (!env->v7m.secure && + ((excret & R_V7M_EXCRET_ES_MASK) || + !(excret & R_V7M_EXCRET_DCRS_MASK))) { + sfault = 1; + /* For all other purposes, treat ES as 0 (R_HXSR) */ + excret &= ~R_V7M_EXCRET_ES_MASK; + } + exc_secure = excret & R_V7M_EXCRET_ES_MASK; + } + + if (env->v7m.exception != ARMV7M_EXCP_NMI) { + /* + * Auto-clear FAULTMASK on return from other than NMI. + * If the security extension is implemented then this only + * happens if the raw execution priority is >= 0; the + * value of the ES bit in the exception return value indicates + * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) + */ + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + // if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { + // env->v7m.faultmask[exc_secure] = 0; + // } + } else { + env->v7m.faultmask[M_REG_NS] = 0; + } + } + +#if 0 + switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, + exc_secure)) { + case -1: + /* attempt to exit an exception that isn't active */ + ufault = true; + break; + case 0: + /* still an irq active now */ + break; + case 1: + /* + * We returned to base exception level, no nesting. + * (In the pseudocode this is written using "NestedActivation != 1" + * where we have 'rettobase == false'.) + */ + rettobase = true; + break; + default: + g_assert_not_reached(); + } +#endif + + return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); + return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; + return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && + (excret & R_V7M_EXCRET_S_MASK); + + if (arm_feature(env, ARM_FEATURE_V8)) { + if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { + /* + * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); + * we choose to take the UsageFault. + */ + if ((excret & R_V7M_EXCRET_S_MASK) || + (excret & R_V7M_EXCRET_ES_MASK) || + !(excret & R_V7M_EXCRET_DCRS_MASK)) { + ufault = true; + } + } + if (excret & R_V7M_EXCRET_RES0_MASK) { + ufault = true; + } + } else { + /* For v7M we only recognize certain combinations of the low bits */ + switch (excret & 0xf) { + case 1: /* Return to Handler */ + break; + case 13: /* Return to Thread using Process stack */ + case 9: /* Return to Thread using Main stack */ + /* + * We only need to check NONBASETHRDENA for v7M, because in + * v8M this bit does not exist (it is RES1). + */ + if (!rettobase && + !(env->v7m.ccr[env->v7m.secure] & + R_V7M_CCR_NONBASETHRDENA_MASK)) { + ufault = true; + } + break; + default: + ufault = true; + } + } + + /* + * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in + * Handler mode (and will be until we write the new XPSR.Interrupt + * field) this does not switch around the current stack pointer. + * We must do this before we do any kind of tailchaining, including + * for the derived exceptions on integrity check failures, or we will + * give the guest an incorrect EXCRET.SPSEL value on exception entry. + */ + write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); + + /* + * Clear scratch FP values left in caller saved registers; this + * must happen before any kind of tail chaining. + */ + if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) && + (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) { + if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) { + env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " + "stackframe: error during lazy state deactivation\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } else { + /* Clear s0..s15 and FPSCR */ + int i; + + for (i = 0; i < 16; i += 2) { + *aa32_vfp_dreg(env, i / 2) = 0; + } + vfp_set_fpscr(env, 0); + } + } + + if (sfault) { + env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " + "stackframe: failed EXC_RETURN.ES validity check\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } + + if (ufault) { + /* + * Bad exception return: instead of popping the exception + * stack, directly take a usage fault on the current stack. + */ + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); + qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " + "stackframe: failed exception return integrity check\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } + + /* + * Tailchaining: if there is currently a pending exception that + * is high enough priority to preempt execution at the level we're + * about to return to, then just directly take that exception now, + * avoiding an unstack-and-then-stack. Note that now we have + * deactivated the previous exception by calling armv7m_nvic_complete_irq() + * our current execution priority is already the execution priority we are + * returning to -- none of the state we would unstack or set based on + * the EXCRET value affects it. + */ + // if (armv7m_nvic_can_take_pending_exception(env->nvic)) { + // qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n"); + // v7m_exception_taken(cpu, excret, true, false); + // return; + // } + + switch_v7m_security_state(env, return_to_secure); + + { + /* + * The stack pointer we should be reading the exception frame from + * depends on bits in the magic exception return type value (and + * for v8M isn't necessarily the stack pointer we will eventually + * end up resuming execution with). Get a pointer to the location + * in the CPU state struct where the SP we need is currently being + * stored; we will use and modify it in place. + * We use this limited C variable scope so we don't accidentally + * use 'frame_sp_p' after we do something that makes it invalid. + */ + uint32_t *frame_sp_p = get_v7m_sp_ptr(env, + return_to_secure, + !return_to_handler, + return_to_sp_process); + uint32_t frameptr = *frame_sp_p; + bool pop_ok = true; + ARMMMUIdx mmu_idx; + bool return_to_priv = return_to_handler || + !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK); + + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, + return_to_priv); + + if (!QEMU_IS_ALIGNED(frameptr, 8) && + arm_feature(env, ARM_FEATURE_V8)) { + qemu_log_mask(LOG_GUEST_ERROR, + "M profile exception return with non-8-aligned SP " + "for destination state is UNPREDICTABLE\n"); + } + + /* Do we need to pop callee-saved registers? */ + if (return_to_secure && + ((excret & R_V7M_EXCRET_ES_MASK) == 0 || + (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { + uint32_t actual_sig; + + pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); + + if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) { + /* Take a SecureFault on the current stack */ + env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " + "stackframe: failed exception return integrity " + "signature check\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } + + pop_ok = pop_ok && + v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && + v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && + v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && + v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && + v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && + v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && + v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && + v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); + + frameptr += 0x28; + } + + /* Pop registers */ + pop_ok = pop_ok && + v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && + v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && + v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && + v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && + v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && + v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && + v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && + v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); + + if (!pop_ok) { + /* + * v7m_stack_read() pended a fault, so take it (as a tail + * chained exception on the same stack frame) + */ + qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } + + /* + * Returning from an exception with a PC with bit 0 set is defined + * behaviour on v8M (bit 0 is ignored), but for v7M it was specified + * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore + * the lsbit, and there are several RTOSes out there which incorrectly + * assume the r15 in the stack frame should be a Thumb-style "lsbit + * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but + * complain about the badly behaved guest. + */ + if (env->regs[15] & 1) { + env->regs[15] &= ~1U; + if (!arm_feature(env, ARM_FEATURE_V8)) { + qemu_log_mask(LOG_GUEST_ERROR, + "M profile return from interrupt with misaligned " + "PC is UNPREDICTABLE on v7M\n"); + } + } + + if (arm_feature(env, ARM_FEATURE_V8)) { + /* + * For v8M we have to check whether the xPSR exception field + * matches the EXCRET value for return to handler/thread + * before we commit to changing the SP and xPSR. + */ + bool will_be_handler = (xpsr & XPSR_EXCP) != 0; + if (return_to_handler != will_be_handler) { + /* + * Take an INVPC UsageFault on the current stack. + * By this point we will have switched to the security state + * for the background state, so this UsageFault will target + * that state. + */ + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, + env->v7m.secure); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; + qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " + "stackframe: failed exception return integrity " + "check\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } + } + + if (!ftype) { + /* FP present and we need to handle it */ + if (!return_to_secure && + (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) { + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; + qemu_log_mask(CPU_LOG_INT, + "...taking SecureFault on existing stackframe: " + "Secure LSPACT set but exception return is " + "not to secure state\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } + + restore_s16_s31 = return_to_secure && + (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); + + if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) { + /* State in FPU is still valid, just clear LSPACT */ + env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK; + } else { + int i; + uint32_t fpscr; + bool cpacr_pass, nsacr_pass; + + cpacr_pass = v7m_cpacr_pass(env, return_to_secure, + return_to_priv); + nsacr_pass = return_to_secure || + extract32(env->v7m.nsacr, 10, 1); + + if (!cpacr_pass) { + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, + return_to_secure); + env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK; + qemu_log_mask(CPU_LOG_INT, + "...taking UsageFault on existing " + "stackframe: CPACR.CP10 prevents unstacking " + "FP regs\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } else if (!nsacr_pass) { + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true); + env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK; + qemu_log_mask(CPU_LOG_INT, + "...taking Secure UsageFault on existing " + "stackframe: NSACR.CP10 prevents unstacking " + "FP regs\n"); + v7m_exception_taken(cpu, excret, true, false); + return; + } + + for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) { + uint32_t slo, shi; + uint64_t dn; + uint32_t faddr = frameptr + 0x20 + 4 * i; + + if (i >= 16) { + faddr += 8; /* Skip the slot for the FPSCR */ + } + + pop_ok = pop_ok && + v7m_stack_read(cpu, &slo, faddr, mmu_idx) && + v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx); + + if (!pop_ok) { + break; + } + + dn = (uint64_t)shi << 32 | slo; + *aa32_vfp_dreg(env, i / 2) = dn; + } + pop_ok = pop_ok && + v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx); + if (pop_ok) { + vfp_set_fpscr(env, fpscr); + } + if (!pop_ok) { + /* + * These regs are 0 if security extension present; + * otherwise merely UNKNOWN. We zero always. + */ + for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) { + *aa32_vfp_dreg(env, i / 2) = 0; + } + vfp_set_fpscr(env, 0); + } + } + } + FIELD_DP32(env->v7m.control[M_REG_S], + V7M_CONTROL, FPCA, !ftype, env->v7m.control[M_REG_S]); + + /* Commit to consuming the stack frame */ + frameptr += 0x20; + if (!ftype) { + frameptr += 0x48; + if (restore_s16_s31) { + frameptr += 0x40; + } + } + /* + * Undo stack alignment (the SPREALIGN bit indicates that the original + * pre-exception SP was not 8-aligned and we added a padding word to + * align it, so we undo this by ORing in the bit that increases it + * from the current 8-aligned value to the 8-unaligned value. (Adding 4 + * would work too but a logical OR is how the pseudocode specifies it.) + */ + if (xpsr & XPSR_SPREALIGN) { + frameptr |= 4; + } + *frame_sp_p = frameptr; + } + + xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA); + if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) { + xpsr_mask &= ~XPSR_GE; + } + /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ + xpsr_write(env, xpsr, xpsr_mask); + + if (env->v7m.secure) { + bool sfpa = xpsr & XPSR_SFPA; + + FIELD_DP32(env->v7m.control[M_REG_S], + V7M_CONTROL, SFPA, sfpa, env->v7m.control[M_REG_S]); + } + + /* + * The restored xPSR exception field will be zero if we're + * resuming in Thread mode. If that doesn't match what the + * exception return excret specified then this is a UsageFault. + * v7M requires we make this check here; v8M did it earlier. + */ + if (return_to_handler != arm_v7m_is_handler_mode(env)) { + /* + * Take an INVPC UsageFault by pushing the stack again; + * we know we're v7M so this is never a Secure UsageFault. + */ + bool ignore_stackfaults; + + assert(!arm_feature(env, ARM_FEATURE_V8)); + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; + ignore_stackfaults = v7m_push_stack(cpu); + qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " + "failed exception return integrity check\n"); + v7m_exception_taken(cpu, excret, false, ignore_stackfaults); + return; + } + + /* Otherwise, we have a successful exception exit. */ + arm_clear_exclusive(env); + arm_rebuild_hflags(env); + qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); +} + +static bool do_v7m_function_return(ARMCPU *cpu) +{ + /* + * v8M security extensions magic function return. + * We may either: + * (1) throw an exception (longjump) + * (2) return true if we successfully handled the function return + * (3) return false if we failed a consistency check and have + * pended a UsageFault that needs to be taken now + * + * At this point the magic return value is split between env->regs[15] + * and env->thumb. We don't bother to reconstitute it because we don't + * need it (all values are handled the same way). + */ + CPUARMState *env = &cpu->env; + uint32_t newpc, newpsr, newpsr_exc; + + qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); + + { + bool threadmode, spsel; + TCGMemOpIdx oi; + ARMMMUIdx mmu_idx; + uint32_t *frame_sp_p; + uint32_t frameptr; + + /* Pull the return address and IPSR from the Secure stack */ + threadmode = !arm_v7m_is_handler_mode(env); + spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; + + frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); + frameptr = *frame_sp_p; + + /* + * These loads may throw an exception (for MPU faults). We want to + * do them as secure, so work out what MMU index that is. + */ + mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); + oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); + newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); + newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); + + /* Consistency checks on new IPSR */ + newpsr_exc = newpsr & XPSR_EXCP; + if (!((env->v7m.exception == 0 && newpsr_exc == 0) || + (env->v7m.exception == 1 && newpsr_exc != 0))) { + /* Pend the fault and tell our caller to take it */ + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, + env->v7m.secure); + qemu_log_mask(CPU_LOG_INT, + "...taking INVPC UsageFault: " + "IPSR consistency check failed\n"); + return false; + } + + *frame_sp_p = frameptr + 8; + } + + /* This invalidates frame_sp_p */ + switch_v7m_security_state(env, true); + env->v7m.exception = newpsr_exc; + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; + if (newpsr & XPSR_SFPA) { + env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; + } + xpsr_write(env, 0, XPSR_IT); + env->thumb = newpc & 1; + env->regs[15] = newpc & ~1; + arm_rebuild_hflags(env); + + qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); + return true; +} + +static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, + uint32_t addr, uint16_t *insn) +{ + /* + * Load a 16-bit portion of a v7M instruction, returning true on success, + * or false on failure (in which case we will have pended the appropriate + * exception). + * We need to do the instruction fetch's MPU and SAU checks + * like this because there is no MMU index that would allow + * doing the load with a single function call. Instead we must + * first check that the security attributes permit the load + * and that they don't mismatch on the two halves of the instruction, + * and then we do the load as a secure load (ie using the security + * attributes of the address, not the CPU, as architecturally required). + */ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + V8M_SAttributes sattrs = { 0 }; + MemTxAttrs attrs = { 0 }; + ARMMMUFaultInfo fi = { 0 }; + MemTxResult txres; + target_ulong page_size; + hwaddr physaddr; + int prot; + + v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); + if (!sattrs.nsc || sattrs.ns) { + /* + * This must be the second half of the insn, and it straddles a + * region boundary with the second half not being S&NSC. + */ + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + qemu_log_mask(CPU_LOG_INT, + "...really SecureFault with SFSR.INVEP\n"); + return false; + } + if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, + &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { + /* the MPU lookup failed */ + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); + qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); + return false; + } +#ifdef UNICORN_ARCH_POSTFIX + *insn = glue(address_space_lduw_le, UNICORN_ARCH_POSTFIX)(cs->uc, arm_addressspace(cs, attrs), physaddr, +#else + *insn = address_space_lduw_le(cs->uc, arm_addressspace(cs, attrs), physaddr, +#endif + attrs, &txres); + if (txres != MEMTX_OK) { + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); + qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); + return false; + } + return true; +} + +static bool v7m_handle_execute_nsc(ARMCPU *cpu) +{ + /* + * Check whether this attempt to execute code in a Secure & NS-Callable + * memory region is for an SG instruction; if so, then emulate the + * effect of the SG instruction and return true. Otherwise pend + * the correct kind of exception and return false. + */ + CPUARMState *env = &cpu->env; + ARMMMUIdx mmu_idx; + uint16_t insn; + + /* + * We should never get here unless get_phys_addr_pmsav8() caused + * an exception for NS executing in S&NSC memory. + */ + assert(!env->v7m.secure); + assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); + + /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ + mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); + + if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { + return false; + } + + if (!env->thumb) { + goto gen_invep; + } + + if (insn != 0xe97f) { + /* + * Not an SG instruction first half (we choose the IMPDEF + * early-SG-check option). + */ + goto gen_invep; + } + + if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { + return false; + } + + if (insn != 0xe97f) { + /* + * Not an SG instruction second half (yes, both halves of the SG + * insn have the same hex value) + */ + goto gen_invep; + } + + /* + * OK, we have confirmed that we really have an SG instruction. + * We know we're NS in S memory so don't need to repeat those checks. + */ + qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 + ", executing it\n", env->regs[15]); + env->regs[14] &= ~1; + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; + switch_v7m_security_state(env, true); + xpsr_write(env, 0, XPSR_IT); + env->regs[15] += 4; + arm_rebuild_hflags(env); + return true; + +gen_invep: + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + qemu_log_mask(CPU_LOG_INT, + "...really SecureFault with SFSR.INVEP\n"); + return false; +} + +void arm_v7m_cpu_do_interrupt(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint32_t lr; + bool ignore_stackfaults; + + // arm_log_exception(cs->exception_index); + + /* + * For exceptions we just mark as pending on the NVIC, and let that + * handle it. + */ + switch (cs->exception_index) { + case EXCP_UDEF: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; + break; + case EXCP_NOCP: + { + /* + * NOCP might be directed to something other than the current + * security state if this fault is because of NSACR; we indicate + * the target security state using exception.target_el. + */ + int target_secstate; + + if (env->exception.target_el == 3) { + target_secstate = M_REG_S; + } else { + target_secstate = env->v7m.secure; + } + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate); + env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK; + break; + } + case EXCP_INVSTATE: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; + break; + case EXCP_STKOF: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; + break; + case EXCP_LSERR: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; + break; + case EXCP_UNALIGNED: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK; + break; + case EXCP_SWI: + /* The PC already points to the next instruction. */ + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); + break; + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + /* + * Note that for M profile we don't have a guest facing FSR, but + * the env->exception.fsr will be populated by the code that + * raises the fault, in the A profile short-descriptor format. + */ + switch (env->exception.fsr & 0xf) { + case M_FAKE_FSR_NSC_EXEC: + /* + * Exception generated when we try to execute code at an address + * which is marked as Secure & Non-Secure Callable and the CPU + * is in the Non-Secure state. The only instruction which can + * be executed like this is SG (and that only if both halves of + * the SG instruction have the same security attributes.) + * Everything else must generate an INVEP SecureFault, so we + * emulate the SG instruction here. + */ + if (v7m_handle_execute_nsc(cpu)) { + return; + } + break; + case M_FAKE_FSR_SFAULT: + /* + * Various flavours of SecureFault for attempts to execute or + * access data in the wrong security state. + */ + switch (cs->exception_index) { + case EXCP_PREFETCH_ABORT: + if (env->v7m.secure) { + env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; + qemu_log_mask(CPU_LOG_INT, + "...really SecureFault with SFSR.INVTRAN\n"); + } else { + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; + qemu_log_mask(CPU_LOG_INT, + "...really SecureFault with SFSR.INVEP\n"); + } + break; + case EXCP_DATA_ABORT: + /* This must be an NS access to S memory */ + env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; + qemu_log_mask(CPU_LOG_INT, + "...really SecureFault with SFSR.AUVIOL\n"); + break; + } + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); + break; + case 0x8: /* External Abort */ + switch (cs->exception_index) { + case EXCP_PREFETCH_ABORT: + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; + qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); + break; + case EXCP_DATA_ABORT: + env->v7m.cfsr[M_REG_NS] |= + (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); + env->v7m.bfar = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, + "...with CFSR.PRECISERR and BFAR 0x%x\n", + env->v7m.bfar); + break; + } + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); + break; + default: + /* + * All other FSR values are either MPU faults or "can't happen + * for M profile" cases. + */ + switch (cs->exception_index) { + case EXCP_PREFETCH_ABORT: + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; + qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); + break; + case EXCP_DATA_ABORT: + env->v7m.cfsr[env->v7m.secure] |= + (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); + env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, + "...with CFSR.DACCVIOL and MMFAR 0x%x\n", + env->v7m.mmfar[env->v7m.secure]); + break; + } + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, + env->v7m.secure); + break; + } + break; + case EXCP_SEMIHOST: + qemu_log_mask(CPU_LOG_INT, + "...handling as semihosting call 0x%x\n", + env->regs[0]); + // env->regs[0] = do_arm_semihosting(env); FIXME + env->regs[15] += env->thumb ? 2 : 4; + return; + case EXCP_BKPT: + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); + break; + case EXCP_IRQ: + break; + case EXCP_EXCEPTION_EXIT: + if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { + /* Must be v8M security extension function return */ + assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); + assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); + if (do_v7m_function_return(cpu)) { + return; + } + } else { + do_v7m_exception_exit(cpu); + return; + } + break; + case EXCP_LAZYFP: + /* + * We already pended the specific exception in the NVIC in the + * v7m_preserve_fp_state() helper function. + */ + break; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + return; /* Never happens. Keep compiler happy. */ + } + + if (arm_feature(env, ARM_FEATURE_V8)) { + lr = R_V7M_EXCRET_RES1_MASK | + R_V7M_EXCRET_DCRS_MASK; + /* + * The S bit indicates whether we should return to Secure + * or NonSecure (ie our current state). + * The ES bit indicates whether we're taking this exception + * to Secure or NonSecure (ie our target state). We set it + * later, in v7m_exception_taken(). + * The SPSEL bit is also set in v7m_exception_taken() for v8M. + * This corresponds to the ARM ARM pseudocode for v8M setting + * some LR bits in PushStack() and some in ExceptionTaken(); + * the distinction matters for the tailchain cases where we + * can take an exception without pushing the stack. + */ + if (env->v7m.secure) { + lr |= R_V7M_EXCRET_S_MASK; + } + } else { + lr = R_V7M_EXCRET_RES1_MASK | + R_V7M_EXCRET_S_MASK | + R_V7M_EXCRET_DCRS_MASK | + R_V7M_EXCRET_ES_MASK; + if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { + lr |= R_V7M_EXCRET_SPSEL_MASK; + } + } + if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) { + lr |= R_V7M_EXCRET_FTYPE_MASK; + } + if (!arm_v7m_is_handler_mode(env)) { + lr |= R_V7M_EXCRET_MODE_MASK; + } + + ignore_stackfaults = v7m_push_stack(cpu); + v7m_exception_taken(cpu, lr, false, ignore_stackfaults); +} + +uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) +{ + unsigned el = arm_current_el(env); + + /* First handle registers which unprivileged can read */ + if (reg >=0 && reg <= 7) { + return v7m_mrs_xpsr(env, reg, el); + } else { + switch (reg) { + case 20: /* CONTROL */ + return v7m_mrs_control(env, env->v7m.secure); + case 0x94: /* CONTROL_NS */ + /* + * We have to handle this here because unprivileged Secure code + * can read the NS CONTROL register. + */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.control[M_REG_NS] | + (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK); + } + } + + if (el == 0) { + return 0; /* unprivileged reads others as zero */ + } + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + switch (reg) { + case 0x88: /* MSP_NS */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.other_ss_msp; + case 0x89: /* PSP_NS */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.other_ss_psp; + case 0x8a: /* MSPLIM_NS */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.msplim[M_REG_NS]; + case 0x8b: /* PSPLIM_NS */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.psplim[M_REG_NS]; + case 0x90: /* PRIMASK_NS */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.primask[M_REG_NS]; + case 0x91: /* BASEPRI_NS */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.basepri[M_REG_NS]; + case 0x93: /* FAULTMASK_NS */ + if (!env->v7m.secure) { + return 0; + } + return env->v7m.faultmask[M_REG_NS]; + case 0x98: /* SP_NS */ + { + /* + * This gives the non-secure SP selected based on whether we're + * currently in handler mode or not, using the NS CONTROL.SPSEL. + */ + bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; + + if (!env->v7m.secure) { + return 0; + } + if (!arm_v7m_is_handler_mode(env) && spsel) { + return env->v7m.other_ss_psp; + } else { + return env->v7m.other_ss_msp; + } + } + default: + break; + } + } + + switch (reg) { + case 8: /* MSP */ + return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; + case 9: /* PSP */ + return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; + case 10: /* MSPLIM */ + if (!arm_feature(env, ARM_FEATURE_V8)) { + goto bad_reg; + } + return env->v7m.msplim[env->v7m.secure]; + case 11: /* PSPLIM */ + if (!arm_feature(env, ARM_FEATURE_V8)) { + goto bad_reg; + } + return env->v7m.psplim[env->v7m.secure]; + case 16: /* PRIMASK */ + return env->v7m.primask[env->v7m.secure]; + case 17: /* BASEPRI */ + case 18: /* BASEPRI_MAX */ + return env->v7m.basepri[env->v7m.secure]; + case 19: /* FAULTMASK */ + return env->v7m.faultmask[env->v7m.secure]; + default: + bad_reg: + qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" + " register %d\n", reg); + return 0; + } +} + +void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) +{ + /* + * We're passed bits [11..0] of the instruction; extract + * SYSm and the mask bits. + * Invalid combinations of SYSm and mask are UNPREDICTABLE; + * we choose to treat them as if the mask bits were valid. + * NB that the pseudocode 'mask' variable is bits [11..10], + * whereas ours is [11..8]. + */ + uint32_t mask = extract32(maskreg, 8, 4); + uint32_t reg = extract32(maskreg, 0, 8); + int cur_el = arm_current_el(env); + + if (cur_el == 0 && reg > 7 && reg != 20) { + /* + * only xPSR sub-fields and CONTROL.SFPA may be written by + * unprivileged code + */ + return; + } + + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + switch (reg) { + case 0x88: /* MSP_NS */ + if (!env->v7m.secure) { + return; + } + env->v7m.other_ss_msp = val; + return; + case 0x89: /* PSP_NS */ + if (!env->v7m.secure) { + return; + } + env->v7m.other_ss_psp = val; + return; + case 0x8a: /* MSPLIM_NS */ + if (!env->v7m.secure) { + return; + } + env->v7m.msplim[M_REG_NS] = val & ~7; + return; + case 0x8b: /* PSPLIM_NS */ + if (!env->v7m.secure) { + return; + } + env->v7m.psplim[M_REG_NS] = val & ~7; + return; + case 0x90: /* PRIMASK_NS */ + if (!env->v7m.secure) { + return; + } + env->v7m.primask[M_REG_NS] = val & 1; + return; + case 0x91: /* BASEPRI_NS */ + if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { + return; + } + env->v7m.basepri[M_REG_NS] = val & 0xff; + return; + case 0x93: /* FAULTMASK_NS */ + if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { + return; + } + env->v7m.faultmask[M_REG_NS] = val & 1; + return; + case 0x94: /* CONTROL_NS */ + if (!env->v7m.secure) { + return; + } + write_v7m_control_spsel_for_secstate(env, + val & R_V7M_CONTROL_SPSEL_MASK, + M_REG_NS); + if (arm_feature(env, ARM_FEATURE_M_MAIN)) { + env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK; + env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK; + } + /* + * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0, + * RES0 if the FPU is not present, and is stored in the S bank + */ + if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) && + extract32(env->v7m.nsacr, 10, 1)) { + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK; + env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK; + } + return; + case 0x98: /* SP_NS */ + { + /* + * This gives the non-secure SP selected based on whether we're + * currently in handler mode or not, using the NS CONTROL.SPSEL. + */ + bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; + bool is_psp = !arm_v7m_is_handler_mode(env) && spsel; + uint32_t limit; + + if (!env->v7m.secure) { + return; + } + + limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false]; + + if (val < limit) { + CPUState *cs = env_cpu(env); + + cpu_restore_state(cs, GETPC(), true); + raise_exception(env, EXCP_STKOF, 0, 1); + } + + if (is_psp) { + env->v7m.other_ss_psp = val; + } else { + env->v7m.other_ss_msp = val; + } + return; + } + default: + break; + } + } + + if (reg >= 0 && reg <= 7) { + v7m_msr_xpsr(env, mask, reg, val); + } else { + switch (reg) { + case 8: /* MSP */ + if (v7m_using_psp(env)) { + env->v7m.other_sp = val; + } else { + env->regs[13] = val; + } + break; + case 9: /* PSP */ + if (v7m_using_psp(env)) { + env->regs[13] = val; + } else { + env->v7m.other_sp = val; + } + break; + case 10: /* MSPLIM */ + if (!arm_feature(env, ARM_FEATURE_V8)) { + goto bad_reg; + } + env->v7m.msplim[env->v7m.secure] = val & ~7; + break; + case 11: /* PSPLIM */ + if (!arm_feature(env, ARM_FEATURE_V8)) { + goto bad_reg; + } + env->v7m.psplim[env->v7m.secure] = val & ~7; + break; + case 16: /* PRIMASK */ + env->v7m.primask[env->v7m.secure] = val & 1; + break; + case 17: /* BASEPRI */ + if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { + goto bad_reg; + } + env->v7m.basepri[env->v7m.secure] = val & 0xff; + break; + case 18: /* BASEPRI_MAX */ + if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { + goto bad_reg; + } + val &= 0xff; + if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] + || env->v7m.basepri[env->v7m.secure] == 0)) { + env->v7m.basepri[env->v7m.secure] = val; + } + break; + case 19: /* FAULTMASK */ + if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { + goto bad_reg; + } + env->v7m.faultmask[env->v7m.secure] = val & 1; + break; + case 20: /* CONTROL */ + /* + * Writing to the SPSEL bit only has an effect if we are in + * thread mode; other bits can be updated by any privileged code. + * write_v7m_control_spsel() deals with updating the SPSEL bit in + * env->v7m.control, so we only need update the others. + * For v7M, we must just ignore explicit writes to SPSEL in handler + * mode; for v8M the write is permitted but will have no effect. + * All these bits are writes-ignored from non-privileged code, + * except for SFPA. + */ + if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) || + !arm_v7m_is_handler_mode(env))) { + write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); + } + if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) { + env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; + env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; + } + if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { + /* + * SFPA is RAZ/WI from NS or if no FPU. + * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present. + * Both are stored in the S bank. + */ + if (env->v7m.secure) { + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; + env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK; + } + if (cur_el > 0 && + (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) || + extract32(env->v7m.nsacr, 10, 1))) { + env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK; + env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK; + } + } + break; + default: +bad_reg: + qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" + " register %d\n", reg); + return; + } + } +} + +uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) +{ + /* Implement the TT instruction. op is bits [7:6] of the insn. */ + bool forceunpriv = op & 1; + bool alt = op & 2; + V8M_SAttributes sattrs = { 0 }; + uint32_t tt_resp; + bool r, rw, nsr, nsrw, mrvalid; + int prot; + ARMMMUFaultInfo fi = { 0 }; + MemTxAttrs attrs = { 0 }; + hwaddr phys_addr; + ARMMMUIdx mmu_idx; + uint32_t mregion; + bool targetpriv; + bool targetsec = env->v7m.secure; + bool is_subpage; + + /* + * Work out what the security state and privilege level we're + * interested in is... + */ + if (alt) { + targetsec = !targetsec; + } + + if (forceunpriv) { + targetpriv = false; + } else { + targetpriv = arm_v7m_is_handler_mode(env) || + !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); + } + + /* ...and then figure out which MMU index this is */ + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); + + /* + * We know that the MPU and SAU don't care about the access type + * for our purposes beyond that we don't want to claim to be + * an insn fetch, so we arbitrarily call this a read. + */ + + /* + * MPU region info only available for privileged or if + * inspecting the other MPU state. + */ + if (arm_current_el(env) != 0 || alt) { + /* We can ignore the return value as prot is always set */ + pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, + &phys_addr, &attrs, &prot, &is_subpage, + &fi, &mregion); + if (mregion == -1) { + mrvalid = false; + mregion = 0; + } else { + mrvalid = true; + } + r = prot & PAGE_READ; + rw = prot & PAGE_WRITE; + } else { + r = false; + rw = false; + mrvalid = false; + mregion = 0; + } + + if (env->v7m.secure) { + v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); + nsr = sattrs.ns && r; + nsrw = sattrs.ns && rw; + } else { + sattrs.ns = true; + nsr = false; + nsrw = false; + } + + tt_resp = (sattrs.iregion << 24) | + (sattrs.irvalid << 23) | + ((!sattrs.ns) << 22) | + (nsrw << 21) | + (nsr << 20) | + (rw << 19) | + (r << 18) | + (sattrs.srvalid << 17) | + (mrvalid << 16) | + (sattrs.sregion << 8) | + mregion; + + return tt_resp; +} + +ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, + bool secstate, bool priv, bool negpri) +{ + ARMMMUIdx mmu_idx = ARM_MMU_IDX_M; + + if (priv) { + mmu_idx |= ARM_MMU_IDX_M_PRIV; + } + + if (negpri) { + mmu_idx |= ARM_MMU_IDX_M_NEGPRI; + } + + if (secstate) { + mmu_idx |= ARM_MMU_IDX_M_S; + } + + return mmu_idx; +} + +ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, + bool secstate, bool priv) +{ + bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate); + + return arm_v7m_mmu_idx_all(env, secstate, priv, negpri); +} + +/* Return the MMU index for a v7M CPU in the specified security state */ +ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) +{ + bool priv = arm_current_el(env) != 0; + + return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv); +} diff --git a/qemu/target-arm/neon_helper.c b/qemu/target/arm/neon_helper.c similarity index 86% rename from qemu/target-arm/neon_helper.c rename to qemu/target/arm/neon_helper.c index fdae9d45..0c2828e6 100644 --- a/qemu/target-arm/neon_helper.c +++ b/qemu/target/arm/neon_helper.c @@ -6,17 +6,16 @@ * * This code is licensed under the GNU GPL v2. */ -#include -#include +#include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" +#include "fpu/softfloat.h" #define SIGNBIT (uint32_t)0x80000000 #define SIGNBIT64 ((uint64_t)1 << 63) -#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q +#define SET_QC() env->vfp.qc[0] = 1 #define NEON_TYPE1(name, type) \ typedef struct \ @@ -582,12 +581,6 @@ NEON_VOP(cge_u32, neon_u32, 1) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2 -NEON_VOP(min_s8, neon_s8, 4) -NEON_VOP(min_u8, neon_u8, 4) -NEON_VOP(min_s16, neon_s16, 2) -NEON_VOP(min_u16, neon_u16, 2) -NEON_VOP(min_s32, neon_s32, 1) -NEON_VOP(min_u32, neon_u32, 1) NEON_POP(pmin_s8, neon_s8, 4) NEON_POP(pmin_u8, neon_u8, 4) NEON_POP(pmin_s16, neon_s16, 2) @@ -595,12 +588,6 @@ NEON_POP(pmin_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2 -NEON_VOP(max_s8, neon_s8, 4) -NEON_VOP(max_u8, neon_u8, 4) -NEON_VOP(max_s16, neon_s16, 2) -NEON_VOP(max_u16, neon_u16, 2) -NEON_VOP(max_s32, neon_s32, 1) -NEON_VOP(max_u32, neon_u32, 1) NEON_POP(pmax_s8, neon_s8, 4) NEON_POP(pmax_u8, neon_u8, 4) NEON_POP(pmax_s16, neon_s16, 2) @@ -608,7 +595,7 @@ NEON_POP(pmax_u16, neon_u16, 2) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ - dest = (src1 > src2) ? ((int64_t)src1 - (int64_t)src2) : ((int64_t)src2 - (int64_t)src1) + dest = (src1 > src2) ? (src1 - src2) : (src2 - src1) NEON_VOP(abd_s8, neon_s8, 4) NEON_VOP(abd_u8, neon_u8, 4) NEON_VOP(abd_s16, neon_s16, 2) @@ -628,24 +615,9 @@ NEON_VOP(abd_u32, neon_u32, 1) } else { \ dest = src1 << tmp; \ }} while (0) -NEON_VOP(shl_u8, neon_u8, 4) NEON_VOP(shl_u16, neon_u16, 2) -NEON_VOP(shl_u32, neon_u32, 1) #undef NEON_FN -uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; - if (shift >= 64 || shift <= -64) { - val = 0; - } else if (shift < 0) { - val >>= -shift; - } else { - val <<= shift; - } - return val; -} - #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ @@ -658,27 +630,9 @@ uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) } else { \ dest = src1 << tmp; \ }} while (0) -NEON_VOP(shl_s8, neon_s8, 4) NEON_VOP(shl_s16, neon_s16, 2) -NEON_VOP(shl_s32, neon_s32, 1) #undef NEON_FN -uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; - uint64_t val = valop; - if (shift >= 64) { - val = 0; - } else if (shift <= -64) { - val >>= 63; - } else if (shift < 0) { - val >>= -shift; - } else { - val <<= shift; - } - return val; -} - #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ @@ -688,7 +642,7 @@ uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) } else if (tmp < 0) { \ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ } else { \ - dest = (uint64_t)src1 << tmp; \ + dest = src1 << tmp; \ }} while (0) NEON_VOP(rshl_s8, neon_s8, 4) NEON_VOP(rshl_s16, neon_s16, 2) @@ -698,7 +652,7 @@ NEON_VOP(rshl_s16, neon_s16, 2) * intermediate 64 bit accumulator. */ uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) { - uint32_t dest; + int32_t dest; int32_t val = (int32_t)valop; int8_t shift = (int8_t)shiftop; if ((shift >= 32) || (shift <= -32)) { @@ -707,7 +661,7 @@ uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) int64_t big_dest = ((int64_t)val + (1ULL << (-1 - shift))); dest = big_dest >> -shift; } else { - dest = (uint32_t)val << shift; + dest = val << shift; } return dest; } @@ -732,7 +686,7 @@ uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) val >>= 1; } } else { - val = ((uint64_t)val) << shift; + val <<= shift; } return val; } @@ -855,7 +809,7 @@ uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) if (tmp >= (ssize_t)sizeof(src1) * 8) { \ if (src1) { \ SET_QC(); \ - dest = (uint32_t)(1U << (sizeof(src1) * 8 - 1)); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ if (src1 > 0) { \ dest--; \ } \ @@ -867,7 +821,7 @@ uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) } else if (tmp < 0) { \ dest = src1 >> -tmp; \ } else { \ - dest = (uint32_t)src1 << tmp; \ + dest = src1 << tmp; \ if ((dest >> tmp) != src1) { \ SET_QC(); \ dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ @@ -884,7 +838,7 @@ NEON_VOP_ENV(qshl_s32, neon_s32, 1) uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) { int8_t shift = (uint8_t)shiftop; - uint64_t val = valop; + int64_t val = valop; if (shift >= 64) { if (val) { SET_QC(); @@ -1052,7 +1006,7 @@ uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop if (tmp >= (ssize_t)sizeof(src1) * 8) { \ if (src1) { \ SET_QC(); \ - dest = (uint32_t)(1U << (sizeof(src1) * 8 - 1)); \ + dest = (1 << (sizeof(src1) * 8 - 1)); \ if (src1 > 0) { \ dest--; \ } \ @@ -1064,10 +1018,10 @@ uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop } else if (tmp < 0) { \ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ } else { \ - dest = ((uint64_t)src1) << tmp; \ + dest = src1 << tmp; \ if ((dest >> tmp) != src1) { \ SET_QC(); \ - dest = (uint32_t)(1U << (sizeof(src1) * 8 - 1)); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ if (src1 > 0) { \ dest--; \ } \ @@ -1133,7 +1087,7 @@ uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shift } } else { int64_t tmp = val; - val = (uint64_t)val << (shift & 0x3f); + val <<= shift; if ((val >> shift) != tmp) { SET_QC(); val = (tmp >> 63) ^ ~SIGNBIT64; @@ -1170,65 +1124,11 @@ NEON_VOP(sub_u8, neon_u8, 4) NEON_VOP(sub_u16, neon_u16, 2) #undef NEON_FN -#define NEON_FN(dest, src1, src2) dest = (int64_t)src1 * src2 +#define NEON_FN(dest, src1, src2) dest = src1 * src2 NEON_VOP(mul_u8, neon_u8, 4) NEON_VOP(mul_u16, neon_u16, 2) #undef NEON_FN -/* Polynomial multiplication is like integer multiplication except the - partial products are XORed, not added. */ -uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2) -{ - uint32_t mask; - uint32_t result; - result = 0; - while (op1) { - mask = 0; - if (op1 & 1) - mask |= 0xff; - if (op1 & (1 << 8)) - mask |= (0xff << 8); - if (op1 & (1 << 16)) - mask |= (0xff << 16); - if (op1 & (1 << 24)) - mask |= (0xff << 24); - result ^= op2 & mask; - op1 = (op1 >> 1) & 0x7f7f7f7f; - op2 = (op2 << 1) & 0xfefefefe; - } - return result; -} - -uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2) -{ - uint64_t result = 0; - uint64_t mask; - uint64_t op2ex = op2; - op2ex = (op2ex & 0xff) | - ((op2ex & 0xff00) << 8) | - ((op2ex & 0xff0000) << 16) | - ((op2ex & 0xff000000) << 24); - while (op1) { - mask = 0; - if (op1 & 1) { - mask |= 0xffff; - } - if (op1 & (1 << 8)) { - mask |= (0xffffU << 16); - } - if (op1 & (1 << 16)) { - mask |= (0xffffULL << 32); - } - if (op1 & (1 << 24)) { - mask |= (0xffffULL << 48); - } - result ^= op2ex & mask; - op1 = (op1 >> 1) & 0x7f7f7f7f; - op2ex <<= 1; - } - return result; -} - #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0 NEON_VOP(tst_u8, neon_u8, 4) NEON_VOP(tst_u16, neon_u16, 2) @@ -1241,11 +1141,6 @@ NEON_VOP(ceq_u16, neon_u16, 2) NEON_VOP(ceq_u32, neon_u32, 1) #undef NEON_FN -#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src -NEON_VOP1(abs_s8, neon_s8, 4) -NEON_VOP1(abs_s16, neon_s16, 2) -#undef NEON_FN - /* Count Leading Sign/Zero Bits. */ static inline int do_clz8(uint8_t x) { @@ -1767,7 +1662,7 @@ uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) #define DO_MULL(dest, x, y, type1, type2) do { \ type1 tmp_x = x; \ type1 tmp_y = y; \ - dest = (type2)((int64_t)tmp_x * (int64_t)tmp_y); \ + dest = (type2)((type2)tmp_x * (type2)tmp_y); \ } while(0) uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b) @@ -1824,20 +1719,34 @@ uint64_t HELPER(neon_negl_u16)(uint64_t x) { uint16_t tmp; uint64_t result; - result = (uint16_t)(0-x); - tmp = 0-(x >> 16); +#ifdef _MSC_VER + result = (uint16_t)(0 - x); + tmp = 0 - (x >> 16); result |= (uint64_t)tmp << 16; - tmp = 0-(x >> 32); + tmp = 0 - (x >> 32); result |= (uint64_t)tmp << 32; - tmp = 0-(x >> 48); + tmp = 0 - (x >> 48); +#else + result = (uint16_t)-x; + tmp = -(x >> 16); + result |= (uint64_t)tmp << 16; + tmp = -(x >> 32); + result |= (uint64_t)tmp << 32; + tmp = -(x >> 48); +#endif result |= (uint64_t)tmp << 48; return result; } uint64_t HELPER(neon_negl_u32)(uint64_t x) { - uint32_t low = 0-x; - uint32_t high = 0-(x >> 32); +#ifdef _MSC_VER + uint32_t low = 0 - x; + uint32_t high = 0 - (x >> 32); +#else + uint32_t low = -x; + uint32_t high = -(x >> 32); +#endif return low | ((uint64_t)high << 32); } @@ -1925,7 +1834,11 @@ uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x) SET_QC(); x = ~SIGNBIT; } else if ((int32_t)x < 0) { - x = 0-x; +#ifdef _MSC_VER + x = 0 - x; +#else + x = -x; +#endif } return x; } @@ -1936,7 +1849,11 @@ uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x) SET_QC(); x = ~SIGNBIT; } else { - x = 0-x; +#ifdef _MSC_VER + x = 0 - x; +#else + x = -x; +#endif } return x; } @@ -1947,7 +1864,11 @@ uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x) SET_QC(); x = ~SIGNBIT64; } else if ((int64_t)x < 0) { - x = 0-x; +#ifdef _MSC_VER + x = 0 - x; +#else + x = -x; +#endif } return x; } @@ -1958,7 +1879,11 @@ uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x) SET_QC(); x = ~SIGNBIT64; } else { - x = 0-x; +#ifdef _MSC_VER + x = 0 - x; +#else + x = -x; +#endif } return x; } @@ -2028,12 +1953,12 @@ uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp) #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1)) -void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_qunzip8)(void *vd, void *vm) { - uint64_t zm0 = float64_val(env->vfp.regs[rm]); - uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); - uint64_t zd0 = float64_val(env->vfp.regs[rd]); - uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd0 = rd[0], zd1 = rd[1]; + uint64_t zm0 = rm[0], zm1 = rm[1]; + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8) | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24) | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40) @@ -2050,18 +1975,19 @@ void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24) | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40) | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rm + 1] = make_float64(m1); - env->vfp.regs[rd] = make_float64(d0); - env->vfp.regs[rd + 1] = make_float64(d1); + + rm[0] = m0; + rm[1] = m1; + rd[0] = d0; + rd[1] = d1; } -void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_qunzip16)(void *vd, void *vm) { - uint64_t zm0 = float64_val(env->vfp.regs[rm]); - uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); - uint64_t zd0 = float64_val(env->vfp.regs[rd]); - uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd0 = rd[0], zd1 = rd[1]; + uint64_t zm0 = rm[0], zm1 = rm[1]; + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16) | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48); uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16) @@ -2070,32 +1996,35 @@ void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48); uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16) | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rm + 1] = make_float64(m1); - env->vfp.regs[rd] = make_float64(d0); - env->vfp.regs[rd + 1] = make_float64(d1); + + rm[0] = m0; + rm[1] = m1; + rd[0] = d0; + rd[1] = d1; } -void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_qunzip32)(void *vd, void *vm) { - uint64_t zm0 = float64_val(env->vfp.regs[rm]); - uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); - uint64_t zd0 = float64_val(env->vfp.regs[rd]); - uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd0 = rd[0], zd1 = rd[1]; + uint64_t zm0 = rm[0], zm1 = rm[1]; + uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32); uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32); uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32); uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rm + 1] = make_float64(m1); - env->vfp.regs[rd] = make_float64(d0); - env->vfp.regs[rd + 1] = make_float64(d1); + + rm[0] = m0; + rm[1] = m1; + rd[0] = d0; + rd[1] = d1; } -void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_unzip8)(void *vd, void *vm) { - uint64_t zm = float64_val(env->vfp.regs[rm]); - uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd = rd[0], zm = rm[0]; + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8) | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24) | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40) @@ -2104,28 +2033,31 @@ void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24) | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40) | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rd] = make_float64(d0); + + rm[0] = m0; + rd[0] = d0; } -void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_unzip16)(void *vd, void *vm) { - uint64_t zm = float64_val(env->vfp.regs[rm]); - uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd = rd[0], zm = rm[0]; + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16) | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48); uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16) | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rd] = make_float64(d0); + + rm[0] = m0; + rd[0] = d0; } -void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_qzip8)(void *vd, void *vm) { - uint64_t zm0 = float64_val(env->vfp.regs[rm]); - uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); - uint64_t zd0 = float64_val(env->vfp.regs[rd]); - uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd0 = rd[0], zd1 = rd[1]; + uint64_t zm0 = rm[0], zm1 = rm[1]; + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8) | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24) | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40) @@ -2142,18 +2074,19 @@ void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24) | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40) | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rm + 1] = make_float64(m1); - env->vfp.regs[rd] = make_float64(d0); - env->vfp.regs[rd + 1] = make_float64(d1); + + rm[0] = m0; + rm[1] = m1; + rd[0] = d0; + rd[1] = d1; } -void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_qzip16)(void *vd, void *vm) { - uint64_t zm0 = float64_val(env->vfp.regs[rm]); - uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); - uint64_t zd0 = float64_val(env->vfp.regs[rd]); - uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd0 = rd[0], zd1 = rd[1]; + uint64_t zm0 = rm[0], zm1 = rm[1]; + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16) | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48); uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16) @@ -2162,32 +2095,35 @@ void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48); uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16) | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rm + 1] = make_float64(m1); - env->vfp.regs[rd] = make_float64(d0); - env->vfp.regs[rd + 1] = make_float64(d1); + + rm[0] = m0; + rm[1] = m1; + rd[0] = d0; + rd[1] = d1; } -void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_qzip32)(void *vd, void *vm) { - uint64_t zm0 = float64_val(env->vfp.regs[rm]); - uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); - uint64_t zd0 = float64_val(env->vfp.regs[rd]); - uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd0 = rd[0], zd1 = rd[1]; + uint64_t zm0 = rm[0], zm1 = rm[1]; + uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32); uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32); uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32); uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rm + 1] = make_float64(m1); - env->vfp.regs[rd] = make_float64(d0); - env->vfp.regs[rd + 1] = make_float64(d1); + + rm[0] = m0; + rm[1] = m1; + rd[0] = d0; + rd[1] = d1; } -void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_zip8)(void *vd, void *vm) { - uint64_t zm = float64_val(env->vfp.regs[rm]); - uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd = rd[0], zm = rm[0]; + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8) | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24) | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40) @@ -2196,48 +2132,21 @@ void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm) | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24) | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40) | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rd] = make_float64(d0); + + rm[0] = m0; + rd[0] = d0; } -void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +void HELPER(neon_zip16)(void *vd, void *vm) { - uint64_t zm = float64_val(env->vfp.regs[rm]); - uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t *rd = vd, *rm = vm; + uint64_t zd = rd[0], zm = rm[0]; + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16) | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48); uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16) | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48); - env->vfp.regs[rm] = make_float64(m0); - env->vfp.regs[rd] = make_float64(d0); -} -/* Helper function for 64 bit polynomial multiply case: - * perform PolynomialMult(op1, op2) and return either the top or - * bottom half of the 128 bit result. - */ -uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2) -{ - int bitnum; - uint64_t res = 0; - - for (bitnum = 0; bitnum < 64; bitnum++) { - if (op1 & (1ULL << bitnum)) { - res ^= op2 << bitnum; - } - } - return res; -} -uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2) -{ - int bitnum; - uint64_t res = 0; - - /* bit 0 of op1 can't influence the high 64 bits at all */ - for (bitnum = 1; bitnum < 64; bitnum++) { - if (op1 & (1ULL << bitnum)) { - res ^= op2 >> (64 - bitnum); - } - } - return res; + rm[0] = m0; + rd[0] = d0; } diff --git a/qemu/target-arm/op_addsub.h b/qemu/target/arm/op_addsub.h similarity index 100% rename from qemu/target-arm/op_addsub.h rename to qemu/target/arm/op_addsub.h diff --git a/qemu/target/arm/op_helper.c b/qemu/target/arm/op_helper.c new file mode 100644 index 00000000..7a1923c7 --- /dev/null +++ b/qemu/target/arm/op_helper.c @@ -0,0 +1,934 @@ +/* + * ARM helper routines + * + * Copyright (c) 2005-2007 CodeSourcery, LLC + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "internals.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" + +#define SIGNBIT (uint32_t)0x80000000 +#define SIGNBIT64 ((uint64_t)1 << 63) + +static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el) +{ + CPUState *cs = env_cpu(env); + + if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { + /* + * Redirect NS EL1 exceptions to NS EL2. These are reported with + * their original syndrome register value, with the exception of + * SIMD/FP access traps, which are reported as uncategorized + * (see DDI0478C.a D1.10.4) + */ + target_el = 2; + if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) { + syndrome = syn_uncategorized(); + } + } + + assert(!excp_is_internal(excp)); + cs->exception_index = excp; + env->exception.syndrome = syndrome; + env->exception.target_el = target_el; + + return cs; +} + +void raise_exception(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el) +{ + CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); + cpu_loop_exit(cs); +} + +void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, + uint32_t target_el, uintptr_t ra) +{ + CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); + cpu_loop_exit_restore(cs, ra); +} + +uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn, + uint32_t maxindex) +{ + uint32_t val, shift; + uint64_t *table = vn; + + val = 0; + for (shift = 0; shift < 32; shift += 8) { + uint32_t index = (ireg >> shift) & 0xff; + if (index < maxindex) { + uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; + val |= tmp << shift; + } else { + val |= def & (0xff << shift); + } + } + return val; +} + +void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) +{ + /* + * Perform the v8M stack limit check for SP updates from translated code, + * raising an exception if the limit is breached. + */ + if (newvalue < v7m_sp_limit(env)) { + CPUState *cs = env_cpu(env); + + /* + * Stack limit exceptions are a rare case, so rather than syncing + * PC/condbits before the call, we use cpu_restore_state() to + * get them right before raising the exception. + */ + cpu_restore_state(cs, GETPC(), true); + raise_exception(env, EXCP_STKOF, 0, 1); + } +} + +uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) + env->QF = 1; + return res; +} + +uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { + env->QF = 1; + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { + env->QF = 1; + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (res < a) { + env->QF = 1; + res = ~0; + } + return res; +} + +uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (res > a) { + env->QF = 1; + res = 0; + } + return res; +} + +/* Signed saturation. */ +static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift) +{ + int32_t top; + uint32_t mask; + + top = val >> shift; + mask = (1u << shift) - 1; + if (top > 0) { + env->QF = 1; + return mask; + } else if (top < -1) { + env->QF = 1; + return ~mask; + } + return val; +} + +/* Unsigned saturation. */ +static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift) +{ + uint32_t max; + + max = (1u << shift) - 1; + if (val < 0) { + env->QF = 1; + return 0; + } else if (val > max) { + env->QF = 1; + return max; + } + return val; +} + +/* Signed saturate. */ +uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + return do_ssat(env, x, shift); +} + +/* Dual halfword signed saturate. */ +uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + uint32_t res; + + res = (uint16_t)do_ssat(env, (int16_t)x, shift); + res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16; + return res; +} + +/* Unsigned saturate. */ +uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + return do_usat(env, x, shift); +} + +/* Dual halfword unsigned saturate. */ +uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + uint32_t res; + + res = (uint16_t)do_usat(env, (int16_t)x, shift); + res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16; + return res; +} + +void HELPER(setend)(CPUARMState *env) +{ + env->uncached_cpsr ^= CPSR_E; + arm_rebuild_hflags(env); +} + +/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. + * The function returns the target EL (1-3) if the instruction is to be trapped; + * otherwise it returns 0 indicating it is not trapped. + */ +static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) +{ + int cur_el = arm_current_el(env); + uint64_t mask; + + if (arm_feature(env, ARM_FEATURE_M)) { + /* M profile cores can never trap WFI/WFE. */ + return 0; + } + + /* If we are currently in EL0 then we need to check if SCTLR is set up for + * WFx instructions being trapped to EL1. These trap bits don't exist in v7. + */ + if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) { + int target_el; + + mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI; + if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) { + /* Secure EL0 and Secure PL1 is at EL3 */ + target_el = 3; + } else { + target_el = 1; + } + + if (!(env->cp15.sctlr_el[target_el] & mask)) { + return target_el; + } + } + + /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it + * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the + * bits will be zero indicating no trap. + */ + if (cur_el < 2) { + mask = is_wfe ? HCR_TWE : HCR_TWI; + if (arm_hcr_el2_eff(env) & mask) { + return 2; + } + } + + /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */ + if (cur_el < 3) { + mask = (is_wfe) ? SCR_TWE : SCR_TWI; + if (env->cp15.scr_el3 & mask) { + return 3; + } + } + + return 0; +} + +void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) +{ + CPUState *cs = env_cpu(env); + int target_el = check_wfx_trap(env, false); + + if (cpu_has_work(cs)) { + /* Don't bother to go into our "low power state" if + * we would just wake up immediately. + */ + return; + } + + if (target_el) { + if (env->aarch64) { + env->pc -= insn_len; + } else { + env->regs[15] -= insn_len; + } + + raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2), + target_el); + } + + cs->exception_index = EXCP_HLT; + cs->halted = 1; + cpu_loop_exit(cs); +} + +void HELPER(wfe)(CPUARMState *env) +{ + /* This is a hint instruction that is semantically different + * from YIELD even though we currently implement it identically. + * Don't actually halt the CPU, just yield back to top + * level loop. This is not going into a "low power state" + * (ie halting until some event occurs), so we never take + * a configurable trap to a different exception level. + */ + HELPER(yield)(env); +} + +void HELPER(yield)(CPUARMState *env) +{ + CPUState *cs = env_cpu(env); + + /* This is a non-trappable hint instruction that generally indicates + * that the guest is currently busy-looping. Yield control back to the + * top level loop so that a more deserving VCPU has a chance to run. + */ + cs->exception_index = EXCP_YIELD; + cpu_loop_exit(cs); +} + +/* Raise an internal-to-QEMU exception. This is limited to only + * those EXCP values which are special cases for QEMU to interrupt + * execution and not to be used for exceptions which are passed to + * the guest (those must all have syndrome information and thus should + * use exception_with_syndrome). + */ +void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) +{ + CPUState *cs = env_cpu(env); + + assert(excp_is_internal(excp)); + cs->exception_index = excp; + cpu_loop_exit(cs); +} + +/* Raise an exception with the specified syndrome register value */ +void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el) +{ + raise_exception(env, excp, syndrome, target_el); +} + +/* Raise an EXCP_BKPT with the specified syndrome register value, + * targeting the correct exception level for debug exceptions. + */ +void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) +{ + int debug_el = arm_debug_target_el(env); + int cur_el = arm_current_el(env); + + /* FSR will only be used if the debug target EL is AArch32. */ + env->exception.fsr = arm_debug_exception_fsr(env); + /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing + * values to the guest that it shouldn't be able to see at its + * exception/security level. + */ + env->exception.vaddress = 0; + /* + * Other kinds of architectural debug exception are ignored if + * they target an exception level below the current one (in QEMU + * this is checked by arm_generate_debug_exceptions()). Breakpoint + * instructions are special because they always generate an exception + * to somewhere: if they can't go to the configured debug exception + * level they are taken to the current exception level. + */ + if (debug_el < cur_el) { + debug_el = cur_el; + } + raise_exception(env, EXCP_BKPT, syndrome, debug_el); +} + +uint32_t HELPER(cpsr_read)(CPUARMState *env) +{ + /* + * We store the ARMv8 PSTATE.SS bit in env->uncached_cpsr. + * This is convenient for populating SPSR_ELx, but must be + * hidden from aarch32 mode, where it is not visible. + * + * TODO: ARMv8.4-DIT -- need to move SS somewhere else. + */ + return cpsr_read(env) & ~(CPSR_EXEC | PSTATE_SS); +} + +void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) +{ + cpsr_write(env, val, mask, CPSRWriteByInstr); + /* TODO: Not all cpsr bits are relevant to hflags. */ + arm_rebuild_hflags(env); +} + +/* Write the CPSR for a 32-bit exception return */ +void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) +{ + uint32_t mask; + + arm_call_pre_el_change_hook(env_archcpu(env)); + + mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); + cpsr_write(env, val, mask, CPSRWriteExceptionReturn); + + /* Generated code has already stored the new PC value, but + * without masking out its low bits, because which bits need + * masking depends on whether we're returning to Thumb or ARM + * state. Do the masking now. + */ + env->regs[15] &= (env->thumb ? ~1 : ~3); + arm_rebuild_hflags(env); + + arm_call_el_change_hook(env_archcpu(env)); +} + +/* Access to user mode registers from privileged modes. */ +uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) +{ + uint32_t val; + + if (regno == 13) { + val = env->banked_r13[BANK_USRSYS]; + } else if (regno == 14) { + val = env->banked_r14[BANK_USRSYS]; + } else if (regno >= 8 + && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + val = env->usr_regs[regno - 8]; + } else { + val = env->regs[regno]; + } + return val; +} + +void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) +{ + if (regno == 13) { + env->banked_r13[BANK_USRSYS] = val; + } else if (regno == 14) { + env->banked_r14[BANK_USRSYS] = val; + } else if (regno >= 8 + && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + env->usr_regs[regno - 8] = val; + } else { + env->regs[regno] = val; + } +} + +void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) +{ + if ((env->uncached_cpsr & CPSR_M) == mode) { + env->regs[13] = val; + } else { + env->banked_r13[bank_number(mode)] = val; + } +} + +uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) +{ + if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) { + /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF. + * Other UNPREDICTABLE and UNDEF cases were caught at translate time. + */ + raise_exception(env, EXCP_UDEF, syn_uncategorized(), + exception_target_el(env)); + } + + if ((env->uncached_cpsr & CPSR_M) == mode) { + return env->regs[13]; + } else { + return env->banked_r13[bank_number(mode)]; + } +} + +static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, + uint32_t regno) +{ + /* Raise an exception if the requested access is one of the UNPREDICTABLE + * cases; otherwise return. This broadly corresponds to the pseudocode + * BankedRegisterAccessValid() and SPSRAccessValid(), + * except that we have already handled some cases at translate time. + */ + int curmode = env->uncached_cpsr & CPSR_M; + + if (regno == 17) { + /* ELR_Hyp: a special case because access from tgtmode is OK */ + if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { + goto undef; + } + return; + } + + if (curmode == tgtmode) { + goto undef; + } + + if (tgtmode == ARM_CPU_MODE_USR) { + if (regno >= 8 && regno <= 12) { + if (curmode != ARM_CPU_MODE_FIQ) { + goto undef; + } + } else { + switch (regno) { + case 13: + if (curmode == ARM_CPU_MODE_SYS) { + goto undef; + } + break; + case 14: + if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) { + goto undef; + } + break; + default: + break; + } + } + } + + if (tgtmode == ARM_CPU_MODE_HYP) { + /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */ + if (curmode != ARM_CPU_MODE_MON) { + goto undef; + } + } + + return; + +undef: + raise_exception(env, EXCP_UDEF, syn_uncategorized(), + exception_target_el(env)); +} + +void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode, + uint32_t regno) +{ + msr_mrs_banked_exc_checks(env, tgtmode, regno); + + if (regno >= 8 && regno <= 12) { + switch (tgtmode) { + case ARM_CPU_MODE_USR: + env->usr_regs[regno - 8] = value; + break; + case ARM_CPU_MODE_FIQ: + env->fiq_regs[regno - 8] = value; + break; + default: + // g_assert_not_reached(); + break; + } + } else { + switch (regno) { + case 16: /* SPSRs */ + env->banked_spsr[bank_number(tgtmode)] = value; + break; + case 17: /* ELR_Hyp */ + env->elr_el[2] = value; + break; + case 13: + env->banked_r13[bank_number(tgtmode)] = value; + break; + case 14: + env->banked_r14[r14_bank_number(tgtmode)] = value; + break; + default: + g_assert_not_reached(); + } + } +} + +uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno) +{ + msr_mrs_banked_exc_checks(env, tgtmode, regno); + + if (regno >= 8 && regno <= 12) { + switch (tgtmode) { + case ARM_CPU_MODE_USR: + return env->usr_regs[regno - 8]; + case ARM_CPU_MODE_FIQ: + return env->fiq_regs[regno - 8]; + default: + g_assert_not_reached(); + // never reach here + return 0; + } + } else { + switch (regno) { + case 16: /* SPSRs */ + return env->banked_spsr[bank_number(tgtmode)]; + case 17: /* ELR_Hyp */ + return env->elr_el[2]; + case 13: + return env->banked_r13[bank_number(tgtmode)]; + case 14: + return env->banked_r14[r14_bank_number(tgtmode)]; + default: + g_assert_not_reached(); + // never reach here + return 0; + } + } +} + +void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, + uint32_t isread) +{ + const ARMCPRegInfo *ri = rip; + int target_el = 0; + + if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 + && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { + raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); + } + + /* + * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses + * to sysregs non accessible at EL0 to have UNDEF-ed already. + */ + if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + uint32_t mask = 1 << ri->crn; + + if (ri->type & ARM_CP_64BIT) { + mask = 1 << ri->crm; + } + + /* T4 and T14 are RES0 */ + mask &= ~((1 << 4) | (1 << 14)); + + if (env->cp15.hstr_el2 & mask) { + target_el = 2; + goto exept; + } + } + + if (!ri->accessfn) { + return; + } + + switch (ri->accessfn(env, ri, isread)) { + case CP_ACCESS_OK: + return; + case CP_ACCESS_TRAP: + target_el = exception_target_el(env); + break; + case CP_ACCESS_TRAP_EL2: + /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is + * a bug in the access function. + */ + assert(!arm_is_secure(env) && arm_current_el(env) != 3); + target_el = 2; + break; + case CP_ACCESS_TRAP_EL3: + target_el = 3; + break; + case CP_ACCESS_TRAP_UNCATEGORIZED: + target_el = exception_target_el(env); + syndrome = syn_uncategorized(); + break; + case CP_ACCESS_TRAP_UNCATEGORIZED_EL2: + target_el = 2; + syndrome = syn_uncategorized(); + break; + case CP_ACCESS_TRAP_UNCATEGORIZED_EL3: + target_el = 3; + syndrome = syn_uncategorized(); + break; + case CP_ACCESS_TRAP_FP_EL2: + target_el = 2; + /* Since we are an implementation that takes exceptions on a trapped + * conditional insn only if the insn has passed its condition code + * check, we take the IMPDEF choice to always report CV=1 COND=0xe + * (which is also the required value for AArch64 traps). + */ + syndrome = syn_fp_access_trap(1, 0xe, false); + break; + case CP_ACCESS_TRAP_FP_EL3: + target_el = 3; + syndrome = syn_fp_access_trap(1, 0xe, false); + break; + default: + g_assert_not_reached(); + break; + } + +exept: + raise_exception(env, EXCP_UDEF, syndrome, target_el); +} + +void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) +{ + const ARMCPRegInfo *ri = rip; + + if (ri->type & ARM_CP_IO) { + ri->writefn(env, ri, value); + } else { + ri->writefn(env, ri, value); + } +} + +uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) +{ + const ARMCPRegInfo *ri = rip; + uint32_t res; + + if (ri->type & ARM_CP_IO) { + res = ri->readfn(env, ri); + } else { + res = ri->readfn(env, ri); + } + + return res; +} + +void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) +{ + const ARMCPRegInfo *ri = rip; + + if (ri->type & ARM_CP_IO) { + ri->writefn(env, ri, value); + } else { + ri->writefn(env, ri, value); + } +} + +uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) +{ + const ARMCPRegInfo *ri = rip; + uint64_t res; + + if (ri->type & ARM_CP_IO) { + res = ri->readfn(env, ri); + } else { + res = ri->readfn(env, ri); + } + + return res; +} + +void HELPER(pre_hvc)(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + int cur_el = arm_current_el(env); + /* FIXME: Use actual secure state. */ + bool secure = false; + bool undef; + + if (arm_is_psci_call(cpu, EXCP_HVC)) { + /* If PSCI is enabled and this looks like a valid PSCI call then + * that overrides the architecturally mandated HVC behaviour. + */ + return; + } + + if (!arm_feature(env, ARM_FEATURE_EL2)) { + /* If EL2 doesn't exist, HVC always UNDEFs */ + undef = true; + } else if (arm_feature(env, ARM_FEATURE_EL3)) { + /* EL3.HCE has priority over EL2.HCD. */ + undef = !(env->cp15.scr_el3 & SCR_HCE); + } else { + undef = env->cp15.hcr_el2 & HCR_HCD; + } + + /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. + * For ARMv8/AArch64, HVC is allowed in EL3. + * Note that we've already trapped HVC from EL0 at translation + * time. + */ + if (secure && (!is_a64(env) || cur_el == 1)) { + undef = true; + } + + if (undef) { + raise_exception(env, EXCP_UDEF, syn_uncategorized(), + exception_target_el(env)); + } +} + +void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) +{ + ARMCPU *cpu = env_archcpu(env); + int cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + bool smd_flag = env->cp15.scr_el3 & SCR_SMD; + + /* + * SMC behaviour is summarized in the following table. + * This helper handles the "Trap to EL2" and "Undef insn" cases. + * The "Trap to EL3" and "PSCI call" cases are handled in the exception + * helper. + * + * -> ARM_FEATURE_EL3 and !SMD + * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 + * + * Conduit SMC, valid call Trap to EL2 PSCI Call + * Conduit SMC, inval call Trap to EL2 Trap to EL3 + * Conduit not SMC Trap to EL2 Trap to EL3 + * + * + * -> ARM_FEATURE_EL3 and SMD + * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 + * + * Conduit SMC, valid call Trap to EL2 PSCI Call + * Conduit SMC, inval call Trap to EL2 Undef insn + * Conduit not SMC Trap to EL2 Undef insn + * + * + * -> !ARM_FEATURE_EL3 + * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 + * + * Conduit SMC, valid call Trap to EL2 PSCI Call + * Conduit SMC, inval call Trap to EL2 Undef insn + * Conduit not SMC Undef insn Undef insn + */ + + /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. + * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization + * extensions, SMD only applies to NS state. + * On ARMv7 without the Virtualization extensions, the SMD bit + * doesn't exist, but we forbid the guest to set it to 1 in scr_write(), + * so we need not special case this here. + */ + bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag + : smd_flag && !secure; + + if (!arm_feature(env, ARM_FEATURE_EL3) && + cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { + /* If we have no EL3 then SMC always UNDEFs and can't be + * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 + * firmware within QEMU, and we want an EL2 guest to be able + * to forbid its EL1 from making PSCI calls into QEMU's + * "firmware" via HCR.TSC, so for these purposes treat + * PSCI-via-SMC as implying an EL3. + * This handles the very last line of the previous table. + */ + raise_exception(env, EXCP_UDEF, syn_uncategorized(), + exception_target_el(env)); + } + + if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) { + /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. + * We also want an EL2 guest to be able to forbid its EL1 from + * making PSCI calls into QEMU's "firmware" via HCR.TSC. + * This handles all the "Trap to EL2" cases of the previous table. + */ + raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); + } + + /* Catch the two remaining "Undef insn" cases of the previous table: + * - PSCI conduit is SMC but we don't have a valid PCSI call, + * - We don't have EL3 or SMD is set. + */ + if (!arm_is_psci_call(cpu, EXCP_SMC) && + (smd || !arm_feature(env, ARM_FEATURE_EL3))) { + raise_exception(env, EXCP_UDEF, syn_uncategorized(), + exception_target_el(env)); + } +} + +/* ??? Flag setting arithmetic is awkward because we need to do comparisons. + The only way to do that in TCG is a conditional branch, which clobbers + all our temporaries. For now implement these as helper functions. */ + +/* Similarly for variable shift instructions. */ + +uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = x & 1; + else + env->CF = 0; + return 0; + } else if (shift != 0) { + env->CF = (x >> (32 - shift)) & 1; + return x << shift; + } + return x; +} + +uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = (x >> 31) & 1; + else + env->CF = 0; + return 0; + } else if (shift != 0) { + env->CF = (x >> (shift - 1)) & 1; + return x >> shift; + } + return x; +} + +uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + env->CF = (x >> 31) & 1; + return (int32_t)x >> 31; + } else if (shift != 0) { + env->CF = (x >> (shift - 1)) & 1; + return (int32_t)x >> shift; + } + return x; +} + +uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift1, shift; + shift1 = i & 0xff; + shift = shift1 & 0x1f; + if (shift == 0) { + if (shift1 != 0) + env->CF = (x >> 31) & 1; + return x; + } else { + env->CF = (x >> (shift - 1)) & 1; + return ((uint32_t)x >> shift) | (x << (32 - shift)); + } +} diff --git a/qemu/target/arm/pauth_helper.c b/qemu/target/arm/pauth_helper.c new file mode 100644 index 00000000..b9096303 --- /dev/null +++ b/qemu/target/arm/pauth_helper.c @@ -0,0 +1,494 @@ +/* + * ARM v8.3-PAuth Operations + * + * Copyright (c) 2019 Linaro, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/helper-proto.h" +#include "tcg/tcg-gvec-desc.h" + + +static uint64_t pac_cell_shuffle(uint64_t i) +{ + uint64_t o = 0; + + o |= extract64(i, 52, 4); + o |= extract64(i, 24, 4) << 4; + o |= extract64(i, 44, 4) << 8; + o |= extract64(i, 0, 4) << 12; + + o |= extract64(i, 28, 4) << 16; + o |= extract64(i, 48, 4) << 20; + o |= extract64(i, 4, 4) << 24; + o |= extract64(i, 40, 4) << 28; + + o |= extract64(i, 32, 4) << 32; + o |= extract64(i, 12, 4) << 36; + o |= extract64(i, 56, 4) << 40; + o |= extract64(i, 20, 4) << 44; + + o |= extract64(i, 8, 4) << 48; + o |= extract64(i, 36, 4) << 52; + o |= extract64(i, 16, 4) << 56; + o |= extract64(i, 60, 4) << 60; + + return o; +} + +static uint64_t pac_cell_inv_shuffle(uint64_t i) +{ + uint64_t o = 0; + + o |= extract64(i, 12, 4); + o |= extract64(i, 24, 4) << 4; + o |= extract64(i, 48, 4) << 8; + o |= extract64(i, 36, 4) << 12; + + o |= extract64(i, 56, 4) << 16; + o |= extract64(i, 44, 4) << 20; + o |= extract64(i, 4, 4) << 24; + o |= extract64(i, 16, 4) << 28; + + o |= i & MAKE_64BIT_MASK(32, 4); + o |= extract64(i, 52, 4) << 36; + o |= extract64(i, 28, 4) << 40; + o |= extract64(i, 8, 4) << 44; + + o |= extract64(i, 20, 4) << 48; + o |= extract64(i, 0, 4) << 52; + o |= extract64(i, 40, 4) << 56; + o |= i & MAKE_64BIT_MASK(60, 4); + + return o; +} + +static uint64_t pac_sub(uint64_t i) +{ + static const uint8_t sub[16] = { + 0xb, 0x6, 0x8, 0xf, 0xc, 0x0, 0x9, 0xe, + 0x3, 0x7, 0x4, 0x5, 0xd, 0x2, 0x1, 0xa, + }; + uint64_t o = 0; + int b; + + for (b = 0; b < 64; b += 4) { + o |= (uint64_t)sub[(i >> b) & 0xf] << b; + } + return o; +} + +static uint64_t pac_inv_sub(uint64_t i) +{ + static const uint8_t inv_sub[16] = { + 0x5, 0xe, 0xd, 0x8, 0xa, 0xb, 0x1, 0x9, + 0x2, 0x6, 0xf, 0x0, 0x4, 0xc, 0x7, 0x3, + }; + uint64_t o = 0; + int b; + + for (b = 0; b < 64; b += 4) { + o |= (uint64_t)inv_sub[(i >> b) & 0xf] << b; + } + return o; +} + +static int rot_cell(int cell, int n) +{ + /* 4-bit rotate left by n. */ + cell |= cell << 4; + return extract32(cell, 4 - n, 4); +} + +static uint64_t pac_mult(uint64_t i) +{ + uint64_t o = 0; + int b; + + for (b = 0; b < 4 * 4; b += 4) { + int i0, i4, i8, ic, t0, t1, t2, t3; + + i0 = extract64(i, b, 4); + i4 = extract64(i, b + 4 * 4, 4); + i8 = extract64(i, b + 8 * 4, 4); + ic = extract64(i, b + 12 * 4, 4); + + t0 = rot_cell(i8, 1) ^ rot_cell(i4, 2) ^ rot_cell(i0, 1); + t1 = rot_cell(ic, 1) ^ rot_cell(i4, 1) ^ rot_cell(i0, 2); + t2 = rot_cell(ic, 2) ^ rot_cell(i8, 1) ^ rot_cell(i0, 1); + t3 = rot_cell(ic, 1) ^ rot_cell(i8, 2) ^ rot_cell(i4, 1); + + o |= (uint64_t)t3 << b; + o |= (uint64_t)t2 << (b + 4 * 4); + o |= (uint64_t)t1 << (b + 8 * 4); + o |= (uint64_t)t0 << (b + 12 * 4); + } + return o; +} + +static uint64_t tweak_cell_rot(uint64_t cell) +{ + return (cell >> 1) | (((cell ^ (cell >> 1)) & 1) << 3); +} + +static uint64_t tweak_shuffle(uint64_t i) +{ + uint64_t o = 0; + + o |= extract64(i, 16, 4) << 0; + o |= extract64(i, 20, 4) << 4; + o |= tweak_cell_rot(extract64(i, 24, 4)) << 8; + o |= extract64(i, 28, 4) << 12; + + o |= tweak_cell_rot(extract64(i, 44, 4)) << 16; + o |= extract64(i, 8, 4) << 20; + o |= extract64(i, 12, 4) << 24; + o |= tweak_cell_rot(extract64(i, 32, 4)) << 28; + + o |= extract64(i, 48, 4) << 32; + o |= extract64(i, 52, 4) << 36; + o |= extract64(i, 56, 4) << 40; + o |= tweak_cell_rot(extract64(i, 60, 4)) << 44; + + o |= tweak_cell_rot(extract64(i, 0, 4)) << 48; + o |= extract64(i, 4, 4) << 52; + o |= tweak_cell_rot(extract64(i, 40, 4)) << 56; + o |= tweak_cell_rot(extract64(i, 36, 4)) << 60; + + return o; +} + +static uint64_t tweak_cell_inv_rot(uint64_t cell) +{ + return ((cell << 1) & 0xf) | ((cell & 1) ^ (cell >> 3)); +} + +static uint64_t tweak_inv_shuffle(uint64_t i) +{ + uint64_t o = 0; + + o |= tweak_cell_inv_rot(extract64(i, 48, 4)); + o |= extract64(i, 52, 4) << 4; + o |= extract64(i, 20, 4) << 8; + o |= extract64(i, 24, 4) << 12; + + o |= extract64(i, 0, 4) << 16; + o |= extract64(i, 4, 4) << 20; + o |= tweak_cell_inv_rot(extract64(i, 8, 4)) << 24; + o |= extract64(i, 12, 4) << 28; + + o |= tweak_cell_inv_rot(extract64(i, 28, 4)) << 32; + o |= tweak_cell_inv_rot(extract64(i, 60, 4)) << 36; + o |= tweak_cell_inv_rot(extract64(i, 56, 4)) << 40; + o |= tweak_cell_inv_rot(extract64(i, 16, 4)) << 44; + + o |= extract64(i, 32, 4) << 48; + o |= extract64(i, 36, 4) << 52; + o |= extract64(i, 40, 4) << 56; + o |= tweak_cell_inv_rot(extract64(i, 44, 4)) << 60; + + return o; +} + +static uint64_t pauth_computepac(uint64_t data, uint64_t modifier, + ARMPACKey key) +{ + static const uint64_t RC[5] = { + 0x0000000000000000ull, + 0x13198A2E03707344ull, + 0xA4093822299F31D0ull, + 0x082EFA98EC4E6C89ull, + 0x452821E638D01377ull, + }; + const uint64_t alpha = 0xC0AC29B7C97C50DDull; + /* + * Note that in the ARM pseudocode, key0 contains bits <127:64> + * and key1 contains bits <63:0> of the 128-bit key. + */ + uint64_t key0 = key.hi, key1 = key.lo; + uint64_t workingval, runningmod, roundkey, modk0; + int i; + + modk0 = (key0 << 63) | ((key0 >> 1) ^ (key0 >> 63)); + runningmod = modifier; + workingval = data ^ key0; + + for (i = 0; i <= 4; ++i) { + roundkey = key1 ^ runningmod; + workingval ^= roundkey; + workingval ^= RC[i]; + if (i > 0) { + workingval = pac_cell_shuffle(workingval); + workingval = pac_mult(workingval); + } + workingval = pac_sub(workingval); + runningmod = tweak_shuffle(runningmod); + } + roundkey = modk0 ^ runningmod; + workingval ^= roundkey; + workingval = pac_cell_shuffle(workingval); + workingval = pac_mult(workingval); + workingval = pac_sub(workingval); + workingval = pac_cell_shuffle(workingval); + workingval = pac_mult(workingval); + workingval ^= key1; + workingval = pac_cell_inv_shuffle(workingval); + workingval = pac_inv_sub(workingval); + workingval = pac_mult(workingval); + workingval = pac_cell_inv_shuffle(workingval); + workingval ^= key0; + workingval ^= runningmod; + for (i = 0; i <= 4; ++i) { + workingval = pac_inv_sub(workingval); + if (i < 4) { + workingval = pac_mult(workingval); + workingval = pac_cell_inv_shuffle(workingval); + } + runningmod = tweak_inv_shuffle(runningmod); + roundkey = key1 ^ runningmod; + workingval ^= RC[4 - i]; + workingval ^= roundkey; + workingval ^= alpha; + } + workingval ^= modk0; + + return workingval; +} + +static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, + ARMPACKey *key, bool data) +{ + ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); + ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); + uint64_t pac, ext_ptr, ext, test; + int bot_bit, top_bit; + + /* If tagged pointers are in use, use ptr<55>, otherwise ptr<63>. */ + if (param.tbi) { + ext = sextract64(ptr, 55, 1); + } else { + ext = sextract64(ptr, 63, 1); + } + + /* Build a pointer with known good extension bits. */ + top_bit = 64 - 8 * param.tbi; + bot_bit = 64 - param.tsz; + ext_ptr = deposit64(ptr, bot_bit, top_bit - bot_bit, ext); + + pac = pauth_computepac(ext_ptr, modifier, *key); + + /* + * Check if the ptr has good extension bits and corrupt the + * pointer authentication code if not. + */ + test = sextract64(ptr, bot_bit, top_bit - bot_bit); + if (test != 0 && test != -1) { + pac ^= MAKE_64BIT_MASK(top_bit - 1, 1); + } + + /* + * Preserve the determination between upper and lower at bit 55, + * and insert pointer authentication code. + */ + if (param.tbi) { + ptr &= ~MAKE_64BIT_MASK(bot_bit, 55 - bot_bit + 1); + pac &= MAKE_64BIT_MASK(bot_bit, 54 - bot_bit + 1); + } else { + ptr &= MAKE_64BIT_MASK(0, bot_bit); + pac &= ~(MAKE_64BIT_MASK(55, 1) | MAKE_64BIT_MASK(0, bot_bit)); + } + ext &= MAKE_64BIT_MASK(55, 1); + return pac | ext | ptr; +} + +static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) +{ + /* Note that bit 55 is used whether or not the regime has 2 ranges. */ + uint64_t extfield = sextract64(ptr, 55, 1); + int bot_pac_bit = 64 - param.tsz; + int top_pac_bit = 64 - 8 * param.tbi; + + return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield); +} + +static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier, + ARMPACKey *key, bool data, int keynumber) +{ + ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); + ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); + int bot_bit, top_bit; + uint64_t pac, orig_ptr, test; + + orig_ptr = pauth_original_ptr(ptr, param); + pac = pauth_computepac(orig_ptr, modifier, *key); + bot_bit = 64 - param.tsz; + top_bit = 64 - 8 * param.tbi; + + test = (pac ^ ptr) & ~MAKE_64BIT_MASK(55, 1); + if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) { + int error_code = (keynumber << 1) | (keynumber ^ 1); + if (param.tbi) { + return deposit64(orig_ptr, 53, 2, error_code); + } else { + return deposit64(orig_ptr, 61, 2, error_code); + } + } + return orig_ptr; +} + +static uint64_t pauth_strip(CPUARMState *env, uint64_t ptr, bool data) +{ + ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); + ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); + + return pauth_original_ptr(ptr, param); +} + +static void QEMU_NORETURN pauth_trap(CPUARMState *env, int target_el, + uintptr_t ra) +{ + raise_exception_ra(env, EXCP_UDEF, syn_pactrap(), target_el, ra); +} + +static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra) +{ + if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { + uint64_t hcr = arm_hcr_el2_eff(env); + bool trap = !(hcr & HCR_API); + if (el == 0) { + /* Trap only applies to EL1&0 regime. */ + trap &= (hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE); + } + /* FIXME: ARMv8.3-NV: HCR_NV trap takes precedence for ERETA[AB]. */ + if (trap) { + pauth_trap(env, 2, ra); + } + } + if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { + if (!(env->cp15.scr_el3 & SCR_API)) { + pauth_trap(env, 3, ra); + } + } +} + +static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit) +{ + return (arm_sctlr(env, el) & bit) != 0; +} + +uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnIA)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_addpac(env, x, y, &env->keys.apia, false); +} + +uint64_t HELPER(pacib)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnIB)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_addpac(env, x, y, &env->keys.apib, false); +} + +uint64_t HELPER(pacda)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnDA)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_addpac(env, x, y, &env->keys.apda, true); +} + +uint64_t HELPER(pacdb)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnDB)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_addpac(env, x, y, &env->keys.apdb, true); +} + +uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y) +{ + uint64_t pac; + + pauth_check_trap(env, arm_current_el(env), GETPC()); + pac = pauth_computepac(x, y, env->keys.apga); + + return pac & 0xffffffff00000000ull; +} + +uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnIA)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_auth(env, x, y, &env->keys.apia, false, 0); +} + +uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnIB)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_auth(env, x, y, &env->keys.apib, false, 1); +} + +uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnDA)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_auth(env, x, y, &env->keys.apda, true, 0); +} + +uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y) +{ + int el = arm_current_el(env); + if (!pauth_key_enabled(env, el, SCTLR_EnDB)) { + return x; + } + pauth_check_trap(env, el, GETPC()); + return pauth_auth(env, x, y, &env->keys.apdb, true, 1); +} + +uint64_t HELPER(xpaci)(CPUARMState *env, uint64_t a) +{ + return pauth_strip(env, a, false); +} + +uint64_t HELPER(xpacd)(CPUARMState *env, uint64_t a) +{ + return pauth_strip(env, a, true); +} diff --git a/qemu/target/arm/psci.c b/qemu/target/arm/psci.c new file mode 100644 index 00000000..4d04fa97 --- /dev/null +++ b/qemu/target/arm/psci.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2014 - Linaro + * Author: Rob Herring + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "internals.h" + +bool arm_is_psci_call(ARMCPU *cpu, int excp_type) +{ + /* Return true if the r0/x0 value indicates a PSCI call and + * the exception type matches the configured PSCI conduit. This is + * called before the SMC/HVC instruction is executed, to decide whether + * we should treat it as a PSCI call or with the architecturally + * defined behaviour for an SMC or HVC (which might be UNDEF or trap + * to EL2 or to EL3). + */ + + switch (excp_type) { + case EXCP_HVC: + if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) { + return false; + } + break; + case EXCP_SMC: + if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { + return false; + } + break; + default: + return false; + } + + return false; +} + +void arm_handle_psci_call(ARMCPU *cpu) +{ +} diff --git a/qemu/target/arm/sve_helper.c b/qemu/target/arm/sve_helper.c new file mode 100644 index 00000000..2abbeba5 --- /dev/null +++ b/qemu/target/arm/sve_helper.c @@ -0,0 +1,5374 @@ +/* + * ARM SVE Operations + * + * Copyright (c) 2018 Linaro, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/helper-proto.h" +#include "tcg/tcg-gvec-desc.h" +#include "fpu/softfloat.h" +#include "tcg/tcg.h" + + +/* Note that vector data is stored in host-endian 64-bit chunks, + so addressing units smaller than that needs a host-endian fixup. */ +#ifdef HOST_WORDS_BIGENDIAN +#define H1(x) ((x) ^ 7) +#define H1_2(x) ((x) ^ 6) +#define H1_4(x) ((x) ^ 4) +#define H2(x) ((x) ^ 3) +#define H4(x) ((x) ^ 1) +#else +#define H1(x) (x) +#define H1_2(x) (x) +#define H1_4(x) (x) +#define H2(x) (x) +#define H4(x) (x) +#endif + +/* Return a value for NZCV as per the ARM PredTest pseudofunction. + * + * The return value has bit 31 set if N is set, bit 1 set if Z is clear, + * and bit 0 set if C is set. Compare the definitions of these variables + * within CPUARMState. + */ + +/* For no G bits set, NZCV = C. */ +#define PREDTEST_INIT 1 + +/* This is an iterative function, called for each Pd and Pg word + * moving forward. + */ +static uint32_t iter_predtest_fwd(uint64_t d, uint64_t g, uint32_t flags) +{ + if (likely(g)) { + /* Compute N from first D & G. + Use bit 2 to signal first G bit seen. */ + if (!(flags & 4)) { +#ifdef _MSC_VER + flags |= ((d & (g & (0 - g))) != 0) << 31; +#else + flags |= ((d & (g & -g)) != 0) << 31; +#endif + flags |= 4; + } + + /* Accumulate Z from each D & G. */ + flags |= ((d & g) != 0) << 1; + + /* Compute C from last !(D & G). Replace previous. */ + flags = deposit32(flags, 0, 1, (d & pow2floor(g)) == 0); + } + return flags; +} + +/* This is an iterative function, called for each Pd and Pg word + * moving backward. + */ +static uint32_t iter_predtest_bwd(uint64_t d, uint64_t g, uint32_t flags) +{ + if (likely(g)) { + /* Compute C from first (i.e last) !(D & G). + Use bit 2 to signal first G bit seen. */ + if (!(flags & 4)) { + flags += 4 - 1; /* add bit 2, subtract C from PREDTEST_INIT */ + flags |= (d & pow2floor(g)) == 0; + } + + /* Accumulate Z from each D & G. */ + flags |= ((d & g) != 0) << 1; + + /* Compute N from last (i.e first) D & G. Replace previous. */ +#ifdef _MSC_VER + flags = deposit32(flags, 31, 1, (d & (g & (0 - g))) != 0); +#else + flags = deposit32(flags, 31, 1, (d & (g & -g)) != 0); +#endif + } + return flags; +} + +/* The same for a single word predicate. */ +uint32_t HELPER(sve_predtest1)(uint64_t d, uint64_t g) +{ + return iter_predtest_fwd(d, g, PREDTEST_INIT); +} + +/* The same for a multi-word predicate. */ +uint32_t HELPER(sve_predtest)(void *vd, void *vg, uint32_t words) +{ + uint32_t flags = PREDTEST_INIT; + uint64_t *d = vd, *g = vg; + uintptr_t i = 0; + + do { + flags = iter_predtest_fwd(d[i], g[i], flags); + } while (++i < words); + + return flags; +} + +/* Expand active predicate bits to bytes, for byte elements. + * for (i = 0; i < 256; ++i) { + * unsigned long m = 0; + * for (j = 0; j < 8; j++) { + * if ((i >> j) & 1) { + * m |= 0xfful << (j << 3); + * } + * } + * printf("0x%016lx,\n", m); + * } + */ +static inline uint64_t expand_pred_b(uint8_t byte) +{ + static const uint64_t word[256] = { + 0x0000000000000000, 0x00000000000000ff, 0x000000000000ff00, + 0x000000000000ffff, 0x0000000000ff0000, 0x0000000000ff00ff, + 0x0000000000ffff00, 0x0000000000ffffff, 0x00000000ff000000, + 0x00000000ff0000ff, 0x00000000ff00ff00, 0x00000000ff00ffff, + 0x00000000ffff0000, 0x00000000ffff00ff, 0x00000000ffffff00, + 0x00000000ffffffff, 0x000000ff00000000, 0x000000ff000000ff, + 0x000000ff0000ff00, 0x000000ff0000ffff, 0x000000ff00ff0000, + 0x000000ff00ff00ff, 0x000000ff00ffff00, 0x000000ff00ffffff, + 0x000000ffff000000, 0x000000ffff0000ff, 0x000000ffff00ff00, + 0x000000ffff00ffff, 0x000000ffffff0000, 0x000000ffffff00ff, + 0x000000ffffffff00, 0x000000ffffffffff, 0x0000ff0000000000, + 0x0000ff00000000ff, 0x0000ff000000ff00, 0x0000ff000000ffff, + 0x0000ff0000ff0000, 0x0000ff0000ff00ff, 0x0000ff0000ffff00, + 0x0000ff0000ffffff, 0x0000ff00ff000000, 0x0000ff00ff0000ff, + 0x0000ff00ff00ff00, 0x0000ff00ff00ffff, 0x0000ff00ffff0000, + 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0x0000ff00ffffffff, + 0x0000ffff00000000, 0x0000ffff000000ff, 0x0000ffff0000ff00, + 0x0000ffff0000ffff, 0x0000ffff00ff0000, 0x0000ffff00ff00ff, + 0x0000ffff00ffff00, 0x0000ffff00ffffff, 0x0000ffffff000000, + 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0x0000ffffff00ffff, + 0x0000ffffffff0000, 0x0000ffffffff00ff, 0x0000ffffffffff00, + 0x0000ffffffffffff, 0x00ff000000000000, 0x00ff0000000000ff, + 0x00ff00000000ff00, 0x00ff00000000ffff, 0x00ff000000ff0000, + 0x00ff000000ff00ff, 0x00ff000000ffff00, 0x00ff000000ffffff, + 0x00ff0000ff000000, 0x00ff0000ff0000ff, 0x00ff0000ff00ff00, + 0x00ff0000ff00ffff, 0x00ff0000ffff0000, 0x00ff0000ffff00ff, + 0x00ff0000ffffff00, 0x00ff0000ffffffff, 0x00ff00ff00000000, + 0x00ff00ff000000ff, 0x00ff00ff0000ff00, 0x00ff00ff0000ffff, + 0x00ff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00, + 0x00ff00ff00ffffff, 0x00ff00ffff000000, 0x00ff00ffff0000ff, + 0x00ff00ffff00ff00, 0x00ff00ffff00ffff, 0x00ff00ffffff0000, + 0x00ff00ffffff00ff, 0x00ff00ffffffff00, 0x00ff00ffffffffff, + 0x00ffff0000000000, 0x00ffff00000000ff, 0x00ffff000000ff00, + 0x00ffff000000ffff, 0x00ffff0000ff0000, 0x00ffff0000ff00ff, + 0x00ffff0000ffff00, 0x00ffff0000ffffff, 0x00ffff00ff000000, + 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0x00ffff00ff00ffff, + 0x00ffff00ffff0000, 0x00ffff00ffff00ff, 0x00ffff00ffffff00, + 0x00ffff00ffffffff, 0x00ffffff00000000, 0x00ffffff000000ff, + 0x00ffffff0000ff00, 0x00ffffff0000ffff, 0x00ffffff00ff0000, + 0x00ffffff00ff00ff, 0x00ffffff00ffff00, 0x00ffffff00ffffff, + 0x00ffffffff000000, 0x00ffffffff0000ff, 0x00ffffffff00ff00, + 0x00ffffffff00ffff, 0x00ffffffffff0000, 0x00ffffffffff00ff, + 0x00ffffffffffff00, 0x00ffffffffffffff, 0xff00000000000000, + 0xff000000000000ff, 0xff0000000000ff00, 0xff0000000000ffff, + 0xff00000000ff0000, 0xff00000000ff00ff, 0xff00000000ffff00, + 0xff00000000ffffff, 0xff000000ff000000, 0xff000000ff0000ff, + 0xff000000ff00ff00, 0xff000000ff00ffff, 0xff000000ffff0000, + 0xff000000ffff00ff, 0xff000000ffffff00, 0xff000000ffffffff, + 0xff0000ff00000000, 0xff0000ff000000ff, 0xff0000ff0000ff00, + 0xff0000ff0000ffff, 0xff0000ff00ff0000, 0xff0000ff00ff00ff, + 0xff0000ff00ffff00, 0xff0000ff00ffffff, 0xff0000ffff000000, + 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0xff0000ffff00ffff, + 0xff0000ffffff0000, 0xff0000ffffff00ff, 0xff0000ffffffff00, + 0xff0000ffffffffff, 0xff00ff0000000000, 0xff00ff00000000ff, + 0xff00ff000000ff00, 0xff00ff000000ffff, 0xff00ff0000ff0000, + 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0xff00ff0000ffffff, + 0xff00ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00, + 0xff00ff00ff00ffff, 0xff00ff00ffff0000, 0xff00ff00ffff00ff, + 0xff00ff00ffffff00, 0xff00ff00ffffffff, 0xff00ffff00000000, + 0xff00ffff000000ff, 0xff00ffff0000ff00, 0xff00ffff0000ffff, + 0xff00ffff00ff0000, 0xff00ffff00ff00ff, 0xff00ffff00ffff00, + 0xff00ffff00ffffff, 0xff00ffffff000000, 0xff00ffffff0000ff, + 0xff00ffffff00ff00, 0xff00ffffff00ffff, 0xff00ffffffff0000, + 0xff00ffffffff00ff, 0xff00ffffffffff00, 0xff00ffffffffffff, + 0xffff000000000000, 0xffff0000000000ff, 0xffff00000000ff00, + 0xffff00000000ffff, 0xffff000000ff0000, 0xffff000000ff00ff, + 0xffff000000ffff00, 0xffff000000ffffff, 0xffff0000ff000000, + 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0xffff0000ff00ffff, + 0xffff0000ffff0000, 0xffff0000ffff00ff, 0xffff0000ffffff00, + 0xffff0000ffffffff, 0xffff00ff00000000, 0xffff00ff000000ff, + 0xffff00ff0000ff00, 0xffff00ff0000ffff, 0xffff00ff00ff0000, + 0xffff00ff00ff00ff, 0xffff00ff00ffff00, 0xffff00ff00ffffff, + 0xffff00ffff000000, 0xffff00ffff0000ff, 0xffff00ffff00ff00, + 0xffff00ffff00ffff, 0xffff00ffffff0000, 0xffff00ffffff00ff, + 0xffff00ffffffff00, 0xffff00ffffffffff, 0xffffff0000000000, + 0xffffff00000000ff, 0xffffff000000ff00, 0xffffff000000ffff, + 0xffffff0000ff0000, 0xffffff0000ff00ff, 0xffffff0000ffff00, + 0xffffff0000ffffff, 0xffffff00ff000000, 0xffffff00ff0000ff, + 0xffffff00ff00ff00, 0xffffff00ff00ffff, 0xffffff00ffff0000, + 0xffffff00ffff00ff, 0xffffff00ffffff00, 0xffffff00ffffffff, + 0xffffffff00000000, 0xffffffff000000ff, 0xffffffff0000ff00, + 0xffffffff0000ffff, 0xffffffff00ff0000, 0xffffffff00ff00ff, + 0xffffffff00ffff00, 0xffffffff00ffffff, 0xffffffffff000000, + 0xffffffffff0000ff, 0xffffffffff00ff00, 0xffffffffff00ffff, + 0xffffffffffff0000, 0xffffffffffff00ff, 0xffffffffffffff00, + 0xffffffffffffffff, + }; + return word[byte]; +} + +/* Similarly for half-word elements. + * for (i = 0; i < 256; ++i) { + * unsigned long m = 0; + * if (i & 0xaa) { + * continue; + * } + * for (j = 0; j < 8; j += 2) { + * if ((i >> j) & 1) { + * m |= 0xfffful << (j << 3); + * } + * } + * printf("[0x%x] = 0x%016lx,\n", i, m); + * } + */ +static inline uint64_t expand_pred_h(uint8_t byte) +{ + static const uint64_t word[] = { + [0x01] = 0x000000000000ffff, [0x04] = 0x00000000ffff0000, + [0x05] = 0x00000000ffffffff, [0x10] = 0x0000ffff00000000, + [0x11] = 0x0000ffff0000ffff, [0x14] = 0x0000ffffffff0000, + [0x15] = 0x0000ffffffffffff, [0x40] = 0xffff000000000000, + [0x41] = 0xffff00000000ffff, [0x44] = 0xffff0000ffff0000, + [0x45] = 0xffff0000ffffffff, [0x50] = 0xffffffff00000000, + [0x51] = 0xffffffff0000ffff, [0x54] = 0xffffffffffff0000, + [0x55] = 0xffffffffffffffff, + }; + return word[byte & 0x55]; +} + +/* Similarly for single word elements. */ +static inline uint64_t expand_pred_s(uint8_t byte) +{ + static const uint64_t word[] = { + [0x01] = 0x00000000ffffffffull, + [0x10] = 0xffffffff00000000ull, + [0x11] = 0xffffffffffffffffull, + }; + return word[byte & 0x11]; +} + +/* Swap 16-bit words within a 32-bit word. */ +static inline uint32_t hswap32(uint32_t h) +{ + return rol32(h, 16); +} + +/* Swap 16-bit words within a 64-bit word. */ +static inline uint64_t hswap64(uint64_t h) +{ + uint64_t m = 0x0000ffff0000ffffull; + h = rol64(h, 32); + return ((h & m) << 16) | ((h >> 16) & m); +} + +/* Swap 32-bit words within a 64-bit word. */ +static inline uint64_t wswap64(uint64_t h) +{ + return rol64(h, 32); +} + +#define LOGICAL_PPPP(NAME, FUNC) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ +{ \ + uintptr_t opr_sz = simd_oprsz(desc); \ + uint64_t *d = vd, *n = vn, *m = vm, *g = vg; \ + uintptr_t i; \ + for (i = 0; i < opr_sz / 8; ++i) { \ + d[i] = FUNC(n[i], m[i], g[i]); \ + } \ +} + +#define DO_AND(N, M, G) (((N) & (M)) & (G)) +#define DO_BIC(N, M, G) (((N) & ~(M)) & (G)) +#define DO_EOR(N, M, G) (((N) ^ (M)) & (G)) +#define DO_ORR(N, M, G) (((N) | (M)) & (G)) +#define DO_ORN(N, M, G) (((N) | ~(M)) & (G)) +#define DO_NOR(N, M, G) (~((N) | (M)) & (G)) +#define DO_NAND(N, M, G) (~((N) & (M)) & (G)) +#define DO_SEL(N, M, G) (((N) & (G)) | ((M) & ~(G))) + +LOGICAL_PPPP(sve_and_pppp, DO_AND) +LOGICAL_PPPP(sve_bic_pppp, DO_BIC) +LOGICAL_PPPP(sve_eor_pppp, DO_EOR) +LOGICAL_PPPP(sve_sel_pppp, DO_SEL) +LOGICAL_PPPP(sve_orr_pppp, DO_ORR) +LOGICAL_PPPP(sve_orn_pppp, DO_ORN) +LOGICAL_PPPP(sve_nor_pppp, DO_NOR) +LOGICAL_PPPP(sve_nand_pppp, DO_NAND) + +#undef DO_AND +#undef DO_BIC +#undef DO_EOR +#undef DO_ORR +#undef DO_ORN +#undef DO_NOR +#undef DO_NAND +#undef DO_SEL +#undef LOGICAL_PPPP + +/* Fully general three-operand expander, controlled by a predicate. + * This is complicated by the host-endian storage of the register file. + */ +/* ??? I don't expect the compiler could ever vectorize this itself. + * With some tables we can convert bit masks to byte masks, and with + * extra care wrt byte/word ordering we could use gcc generic vectors + * and do 16 bytes at a time. + */ +#define DO_ZPZZ(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + for (i = 0; i < opr_sz; ) { \ + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + TYPE mm = *(TYPE *)((char *)vm + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn, mm); \ + } \ + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +/* Similarly, specialized for 64-bit operands. */ +#define DO_ZPZZ_D(NAME, TYPE, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ + TYPE *d = vd, *n = vn, *m = vm; \ + uint8_t *pg = vg; \ + for (i = 0; i < opr_sz; i += 1) { \ + if (pg[H1(i)] & 1) { \ + TYPE nn = n[i], mm = m[i]; \ + d[i] = OP(nn, mm); \ + } \ + } \ +} + +#define DO_AND(N, M) (N & M) +#define DO_EOR(N, M) (N ^ M) +#define DO_ORR(N, M) (N | M) +#define DO_BIC(N, M) (N & ~M) +#define DO_ADD(N, M) (N + M) +#define DO_SUB(N, M) (N - M) +#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) +#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) +#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) +#define DO_MUL(N, M) (N * M) + + +/* + * We must avoid the C undefined behaviour cases: division by + * zero and signed division of INT_MIN by -1. Both of these + * have architecturally defined required results for Arm. + * We special case all signed divisions by -1 to avoid having + * to deduce the minimum integer for the type involved. + */ +#define DO_SDIV(N, M) (unlikely(M == 0) ? 0 : unlikely(M == -1) ? -N : N / M) +#define DO_UDIV(N, M) (unlikely(M == 0) ? 0 : N / M) + +DO_ZPZZ(sve_and_zpzz_b, uint8_t, H1, DO_AND) +DO_ZPZZ(sve_and_zpzz_h, uint16_t, H1_2, DO_AND) +DO_ZPZZ(sve_and_zpzz_s, uint32_t, H1_4, DO_AND) +DO_ZPZZ_D(sve_and_zpzz_d, uint64_t, DO_AND) + +DO_ZPZZ(sve_orr_zpzz_b, uint8_t, H1, DO_ORR) +DO_ZPZZ(sve_orr_zpzz_h, uint16_t, H1_2, DO_ORR) +DO_ZPZZ(sve_orr_zpzz_s, uint32_t, H1_4, DO_ORR) +DO_ZPZZ_D(sve_orr_zpzz_d, uint64_t, DO_ORR) + +DO_ZPZZ(sve_eor_zpzz_b, uint8_t, H1, DO_EOR) +DO_ZPZZ(sve_eor_zpzz_h, uint16_t, H1_2, DO_EOR) +DO_ZPZZ(sve_eor_zpzz_s, uint32_t, H1_4, DO_EOR) +DO_ZPZZ_D(sve_eor_zpzz_d, uint64_t, DO_EOR) + +DO_ZPZZ(sve_bic_zpzz_b, uint8_t, H1, DO_BIC) +DO_ZPZZ(sve_bic_zpzz_h, uint16_t, H1_2, DO_BIC) +DO_ZPZZ(sve_bic_zpzz_s, uint32_t, H1_4, DO_BIC) +DO_ZPZZ_D(sve_bic_zpzz_d, uint64_t, DO_BIC) + +DO_ZPZZ(sve_add_zpzz_b, uint8_t, H1, DO_ADD) +DO_ZPZZ(sve_add_zpzz_h, uint16_t, H1_2, DO_ADD) +DO_ZPZZ(sve_add_zpzz_s, uint32_t, H1_4, DO_ADD) +DO_ZPZZ_D(sve_add_zpzz_d, uint64_t, DO_ADD) + +DO_ZPZZ(sve_sub_zpzz_b, uint8_t, H1, DO_SUB) +DO_ZPZZ(sve_sub_zpzz_h, uint16_t, H1_2, DO_SUB) +DO_ZPZZ(sve_sub_zpzz_s, uint32_t, H1_4, DO_SUB) +DO_ZPZZ_D(sve_sub_zpzz_d, uint64_t, DO_SUB) + +DO_ZPZZ(sve_smax_zpzz_b, int8_t, H1, DO_MAX) +DO_ZPZZ(sve_smax_zpzz_h, int16_t, H1_2, DO_MAX) +DO_ZPZZ(sve_smax_zpzz_s, int32_t, H1_4, DO_MAX) +DO_ZPZZ_D(sve_smax_zpzz_d, int64_t, DO_MAX) + +DO_ZPZZ(sve_umax_zpzz_b, uint8_t, H1, DO_MAX) +DO_ZPZZ(sve_umax_zpzz_h, uint16_t, H1_2, DO_MAX) +DO_ZPZZ(sve_umax_zpzz_s, uint32_t, H1_4, DO_MAX) +DO_ZPZZ_D(sve_umax_zpzz_d, uint64_t, DO_MAX) + +DO_ZPZZ(sve_smin_zpzz_b, int8_t, H1, DO_MIN) +DO_ZPZZ(sve_smin_zpzz_h, int16_t, H1_2, DO_MIN) +DO_ZPZZ(sve_smin_zpzz_s, int32_t, H1_4, DO_MIN) +DO_ZPZZ_D(sve_smin_zpzz_d, int64_t, DO_MIN) + +DO_ZPZZ(sve_umin_zpzz_b, uint8_t, H1, DO_MIN) +DO_ZPZZ(sve_umin_zpzz_h, uint16_t, H1_2, DO_MIN) +DO_ZPZZ(sve_umin_zpzz_s, uint32_t, H1_4, DO_MIN) +DO_ZPZZ_D(sve_umin_zpzz_d, uint64_t, DO_MIN) + +DO_ZPZZ(sve_sabd_zpzz_b, int8_t, H1, DO_ABD) +DO_ZPZZ(sve_sabd_zpzz_h, int16_t, H1_2, DO_ABD) +DO_ZPZZ(sve_sabd_zpzz_s, int32_t, H1_4, DO_ABD) +DO_ZPZZ_D(sve_sabd_zpzz_d, int64_t, DO_ABD) + +DO_ZPZZ(sve_uabd_zpzz_b, uint8_t, H1, DO_ABD) +DO_ZPZZ(sve_uabd_zpzz_h, uint16_t, H1_2, DO_ABD) +DO_ZPZZ(sve_uabd_zpzz_s, uint32_t, H1_4, DO_ABD) +DO_ZPZZ_D(sve_uabd_zpzz_d, uint64_t, DO_ABD) + +/* Because the computation type is at least twice as large as required, + these work for both signed and unsigned source types. */ +static inline uint8_t do_mulh_b(int32_t n, int32_t m) +{ + return (n * m) >> 8; +} + +static inline uint16_t do_mulh_h(int32_t n, int32_t m) +{ + return (n * m) >> 16; +} + +static inline uint32_t do_mulh_s(int64_t n, int64_t m) +{ + return (n * m) >> 32; +} + +static inline uint64_t do_smulh_d(uint64_t n, uint64_t m) +{ + uint64_t lo, hi; + muls64(&lo, &hi, n, m); + return hi; +} + +static inline uint64_t do_umulh_d(uint64_t n, uint64_t m) +{ + uint64_t lo, hi; + mulu64(&lo, &hi, n, m); + return hi; +} + +DO_ZPZZ(sve_mul_zpzz_b, uint8_t, H1, DO_MUL) +DO_ZPZZ(sve_mul_zpzz_h, uint16_t, H1_2, DO_MUL) +DO_ZPZZ(sve_mul_zpzz_s, uint32_t, H1_4, DO_MUL) +DO_ZPZZ_D(sve_mul_zpzz_d, uint64_t, DO_MUL) + +DO_ZPZZ(sve_smulh_zpzz_b, int8_t, H1, do_mulh_b) +DO_ZPZZ(sve_smulh_zpzz_h, int16_t, H1_2, do_mulh_h) +DO_ZPZZ(sve_smulh_zpzz_s, int32_t, H1_4, do_mulh_s) +DO_ZPZZ_D(sve_smulh_zpzz_d, uint64_t, do_smulh_d) + +DO_ZPZZ(sve_umulh_zpzz_b, uint8_t, H1, do_mulh_b) +DO_ZPZZ(sve_umulh_zpzz_h, uint16_t, H1_2, do_mulh_h) +DO_ZPZZ(sve_umulh_zpzz_s, uint32_t, H1_4, do_mulh_s) +DO_ZPZZ_D(sve_umulh_zpzz_d, uint64_t, do_umulh_d) + +DO_ZPZZ(sve_sdiv_zpzz_s, int32_t, H1_4, DO_SDIV) +DO_ZPZZ_D(sve_sdiv_zpzz_d, int64_t, DO_SDIV) + +DO_ZPZZ(sve_udiv_zpzz_s, uint32_t, H1_4, DO_UDIV) +DO_ZPZZ_D(sve_udiv_zpzz_d, uint64_t, DO_UDIV) + +/* Note that all bits of the shift are significant + and not modulo the element size. */ +#define DO_ASR(N, M) (N >> MIN(M, sizeof(N) * 8 - 1)) +#define DO_LSR(N, M) (M < sizeof(N) * 8 ? N >> M : 0) +#define DO_LSL(N, M) (M < sizeof(N) * 8 ? N << M : 0) + +DO_ZPZZ(sve_asr_zpzz_b, int8_t, H1, DO_ASR) +DO_ZPZZ(sve_lsr_zpzz_b, uint8_t, H1_2, DO_LSR) +DO_ZPZZ(sve_lsl_zpzz_b, uint8_t, H1_4, DO_LSL) + +DO_ZPZZ(sve_asr_zpzz_h, int16_t, H1, DO_ASR) +DO_ZPZZ(sve_lsr_zpzz_h, uint16_t, H1_2, DO_LSR) +DO_ZPZZ(sve_lsl_zpzz_h, uint16_t, H1_4, DO_LSL) + +DO_ZPZZ(sve_asr_zpzz_s, int32_t, H1, DO_ASR) +DO_ZPZZ(sve_lsr_zpzz_s, uint32_t, H1_2, DO_LSR) +DO_ZPZZ(sve_lsl_zpzz_s, uint32_t, H1_4, DO_LSL) + +DO_ZPZZ_D(sve_asr_zpzz_d, int64_t, DO_ASR) +DO_ZPZZ_D(sve_lsr_zpzz_d, uint64_t, DO_LSR) +DO_ZPZZ_D(sve_lsl_zpzz_d, uint64_t, DO_LSL) + +#undef DO_ZPZZ +#undef DO_ZPZZ_D + +/* Three-operand expander, controlled by a predicate, in which the + * third operand is "wide". That is, for D = N op M, the same 64-bit + * value of M is used with all of the narrower values of N. + */ +#define DO_ZPZW(NAME, TYPE, TYPEW, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + for (i = 0; i < opr_sz; ) { \ + uint8_t pg = *(uint8_t *)((char *)vg + H1(i >> 3)); \ + TYPEW mm = *(TYPEW *)((char *)vm + i); \ + do { \ + if (pg & 1) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn, mm); \ + } \ + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ + } while (i & 7); \ + } \ +} + +DO_ZPZW(sve_asr_zpzw_b, int8_t, uint64_t, H1, DO_ASR) +DO_ZPZW(sve_lsr_zpzw_b, uint8_t, uint64_t, H1, DO_LSR) +DO_ZPZW(sve_lsl_zpzw_b, uint8_t, uint64_t, H1, DO_LSL) + +DO_ZPZW(sve_asr_zpzw_h, int16_t, uint64_t, H1_2, DO_ASR) +DO_ZPZW(sve_lsr_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSR) +DO_ZPZW(sve_lsl_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSL) + +DO_ZPZW(sve_asr_zpzw_s, int32_t, uint64_t, H1_4, DO_ASR) +DO_ZPZW(sve_lsr_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSR) +DO_ZPZW(sve_lsl_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSL) + +#undef DO_ZPZW + +/* Fully general two-operand expander, controlled by a predicate. + */ +#define DO_ZPZ(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + for (i = 0; i < opr_sz; ) { \ + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn); \ + } \ + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +/* Similarly, specialized for 64-bit operands. */ +#define DO_ZPZ_D(NAME, TYPE, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ + TYPE *d = vd, *n = vn; \ + uint8_t *pg = vg; \ + for (i = 0; i < opr_sz; i += 1) { \ + if (pg[H1(i)] & 1) { \ + TYPE nn = n[i]; \ + d[i] = OP(nn); \ + } \ + } \ +} + +#define DO_CLS_B(N) (clrsb32(N) - 24) +#define DO_CLS_H(N) (clrsb32(N) - 16) + +DO_ZPZ(sve_cls_b, int8_t, H1, DO_CLS_B) +DO_ZPZ(sve_cls_h, int16_t, H1_2, DO_CLS_H) +DO_ZPZ(sve_cls_s, int32_t, H1_4, clrsb32) +DO_ZPZ_D(sve_cls_d, int64_t, clrsb64) + +#define DO_CLZ_B(N) (clz32(N) - 24) +#define DO_CLZ_H(N) (clz32(N) - 16) + +DO_ZPZ(sve_clz_b, uint8_t, H1, DO_CLZ_B) +DO_ZPZ(sve_clz_h, uint16_t, H1_2, DO_CLZ_H) +DO_ZPZ(sve_clz_s, uint32_t, H1_4, clz32) +DO_ZPZ_D(sve_clz_d, uint64_t, clz64) + +DO_ZPZ(sve_cnt_zpz_b, uint8_t, H1, ctpop8) +DO_ZPZ(sve_cnt_zpz_h, uint16_t, H1_2, ctpop16) +DO_ZPZ(sve_cnt_zpz_s, uint32_t, H1_4, ctpop32) +DO_ZPZ_D(sve_cnt_zpz_d, uint64_t, ctpop64) + +#define DO_CNOT(N) (N == 0) + +DO_ZPZ(sve_cnot_b, uint8_t, H1, DO_CNOT) +DO_ZPZ(sve_cnot_h, uint16_t, H1_2, DO_CNOT) +DO_ZPZ(sve_cnot_s, uint32_t, H1_4, DO_CNOT) +DO_ZPZ_D(sve_cnot_d, uint64_t, DO_CNOT) + +#ifdef _MSC_VER +#define DO_FABS16(N) (N & ((uint16_t)-1 >> 1)) +#define DO_FABS32(N) (N & ((uint32_t)-1 >> 1)) +#define DO_FABS64(N) (N & ((uint64_t)-1 >> 1)) + +DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS16) +DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS32) +DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS64) +#else +#define DO_FABS(N) (N & ((__typeof(N))-1 >> 1)) + +DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS) +DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS) +DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS) +#endif + +#ifdef _MSC_VER +#define DO_FNEG16(N) (N ^ ~((uint16_t)-1 >> 1)) +#define DO_FNEG32(N) (N ^ ~((uint32_t)-1 >> 1)) +#define DO_FNEG64(N) (N ^ ~((uint64_t)-1 >> 1)) + +DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG16) +DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG32) +DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG64) +#else +#define DO_FNEG(N) (N ^ ~((__typeof(N))-1 >> 1)) + +DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG) +DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG) +DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG) +#endif + +#define DO_NOT(N) (~N) + +DO_ZPZ(sve_not_zpz_b, uint8_t, H1, DO_NOT) +DO_ZPZ(sve_not_zpz_h, uint16_t, H1_2, DO_NOT) +DO_ZPZ(sve_not_zpz_s, uint32_t, H1_4, DO_NOT) +DO_ZPZ_D(sve_not_zpz_d, uint64_t, DO_NOT) + +#define DO_SXTB(N) ((int8_t)N) +#define DO_SXTH(N) ((int16_t)N) +#define DO_SXTS(N) ((int32_t)N) +#define DO_UXTB(N) ((uint8_t)N) +#define DO_UXTH(N) ((uint16_t)N) +#define DO_UXTS(N) ((uint32_t)N) + +DO_ZPZ(sve_sxtb_h, uint16_t, H1_2, DO_SXTB) +DO_ZPZ(sve_sxtb_s, uint32_t, H1_4, DO_SXTB) +DO_ZPZ(sve_sxth_s, uint32_t, H1_4, DO_SXTH) +DO_ZPZ_D(sve_sxtb_d, uint64_t, DO_SXTB) +DO_ZPZ_D(sve_sxth_d, uint64_t, DO_SXTH) +DO_ZPZ_D(sve_sxtw_d, uint64_t, DO_SXTS) + +DO_ZPZ(sve_uxtb_h, uint16_t, H1_2, DO_UXTB) +DO_ZPZ(sve_uxtb_s, uint32_t, H1_4, DO_UXTB) +DO_ZPZ(sve_uxth_s, uint32_t, H1_4, DO_UXTH) +DO_ZPZ_D(sve_uxtb_d, uint64_t, DO_UXTB) +DO_ZPZ_D(sve_uxth_d, uint64_t, DO_UXTH) +DO_ZPZ_D(sve_uxtw_d, uint64_t, DO_UXTS) + +#ifdef _MSC_VER +#define DO_ABS(N) (N < 0 ? (0 - N) : N) +#else +#define DO_ABS(N) (N < 0 ? -N : N) +#endif + +DO_ZPZ(sve_abs_b, int8_t, H1, DO_ABS) +DO_ZPZ(sve_abs_h, int16_t, H1_2, DO_ABS) +DO_ZPZ(sve_abs_s, int32_t, H1_4, DO_ABS) +DO_ZPZ_D(sve_abs_d, int64_t, DO_ABS) + +#ifdef _MSC_VER +#define DO_NEG(N) (0 - N) +#else +#define DO_NEG(N) (-N) +#endif + +DO_ZPZ(sve_neg_b, uint8_t, H1, DO_NEG) +DO_ZPZ(sve_neg_h, uint16_t, H1_2, DO_NEG) +DO_ZPZ(sve_neg_s, uint32_t, H1_4, DO_NEG) +DO_ZPZ_D(sve_neg_d, uint64_t, DO_NEG) + +DO_ZPZ(sve_revb_h, uint16_t, H1_2, bswap16) +DO_ZPZ(sve_revb_s, uint32_t, H1_4, bswap32) +DO_ZPZ_D(sve_revb_d, uint64_t, bswap64) + +DO_ZPZ(sve_revh_s, uint32_t, H1_4, hswap32) +DO_ZPZ_D(sve_revh_d, uint64_t, hswap64) + +DO_ZPZ_D(sve_revw_d, uint64_t, wswap64) + +DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8) +DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16) +DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32) +DO_ZPZ_D(sve_rbit_d, uint64_t, revbit64) + +/* Three-operand expander, unpredicated, in which the third operand is "wide". + */ +#define DO_ZZW(NAME, TYPE, TYPEW, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + for (i = 0; i < opr_sz; ) { \ + TYPEW mm = *(TYPEW *)((char *)vm + i); \ + do { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn, mm); \ + i += sizeof(TYPE); \ + } while (i & 7); \ + } \ +} + +DO_ZZW(sve_asr_zzw_b, int8_t, uint64_t, H1, DO_ASR) +DO_ZZW(sve_lsr_zzw_b, uint8_t, uint64_t, H1, DO_LSR) +DO_ZZW(sve_lsl_zzw_b, uint8_t, uint64_t, H1, DO_LSL) + +DO_ZZW(sve_asr_zzw_h, int16_t, uint64_t, H1_2, DO_ASR) +DO_ZZW(sve_lsr_zzw_h, uint16_t, uint64_t, H1_2, DO_LSR) +DO_ZZW(sve_lsl_zzw_h, uint16_t, uint64_t, H1_2, DO_LSL) + +DO_ZZW(sve_asr_zzw_s, int32_t, uint64_t, H1_4, DO_ASR) +DO_ZZW(sve_lsr_zzw_s, uint32_t, uint64_t, H1_4, DO_LSR) +DO_ZZW(sve_lsl_zzw_s, uint32_t, uint64_t, H1_4, DO_LSL) + +#undef DO_ZZW + +#undef DO_CLS_B +#undef DO_CLS_H +#undef DO_CLZ_B +#undef DO_CLZ_H +#undef DO_CNOT +#undef DO_FABS +#undef DO_FNEG +#undef DO_ABS +#undef DO_NEG +#undef DO_ZPZ +#undef DO_ZPZ_D + +/* Two-operand reduction expander, controlled by a predicate. + * The difference between TYPERED and TYPERET has to do with + * sign-extension. E.g. for SMAX, TYPERED must be signed, + * but TYPERET must be unsigned so that e.g. a 32-bit value + * is not sign-extended to the ABI uint64_t return type. + */ +/* ??? If we were to vectorize this by hand the reduction ordering + * would change. For integer operands, this is perfectly fine. + */ +#define DO_VPZ(NAME, TYPEELT, TYPERED, TYPERET, H, INIT, OP) \ +uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + TYPERED ret = INIT; \ + for (i = 0; i < opr_sz; ) { \ + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + TYPEELT nn = *(TYPEELT *)((char *)vn + H(i)); \ + ret = OP(ret, nn); \ + } \ + i += sizeof(TYPEELT), pg >>= sizeof(TYPEELT); \ + } while (i & 15); \ + } \ + return (TYPERET)ret; \ +} + +#define DO_VPZ_D(NAME, TYPEE, TYPER, INIT, OP) \ +uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ + TYPEE *n = vn; \ + uint8_t *pg = vg; \ + TYPER ret = INIT; \ + for (i = 0; i < opr_sz; i += 1) { \ + if (pg[H1(i)] & 1) { \ + TYPEE nn = n[i]; \ + ret = OP(ret, nn); \ + } \ + } \ + return ret; \ +} + +DO_VPZ(sve_orv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_ORR) +DO_VPZ(sve_orv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_ORR) +DO_VPZ(sve_orv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_ORR) +DO_VPZ_D(sve_orv_d, uint64_t, uint64_t, 0, DO_ORR) + +DO_VPZ(sve_eorv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_EOR) +DO_VPZ(sve_eorv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_EOR) +DO_VPZ(sve_eorv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_EOR) +DO_VPZ_D(sve_eorv_d, uint64_t, uint64_t, 0, DO_EOR) + +DO_VPZ(sve_andv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_AND) +DO_VPZ(sve_andv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_AND) +DO_VPZ(sve_andv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_AND) +DO_VPZ_D(sve_andv_d, uint64_t, uint64_t, -1, DO_AND) + +DO_VPZ(sve_saddv_b, int8_t, uint64_t, uint64_t, H1, 0, DO_ADD) +DO_VPZ(sve_saddv_h, int16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD) +DO_VPZ(sve_saddv_s, int32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD) + +DO_VPZ(sve_uaddv_b, uint8_t, uint64_t, uint64_t, H1, 0, DO_ADD) +DO_VPZ(sve_uaddv_h, uint16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD) +DO_VPZ(sve_uaddv_s, uint32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD) +DO_VPZ_D(sve_uaddv_d, uint64_t, uint64_t, 0, DO_ADD) + +DO_VPZ(sve_smaxv_b, int8_t, int8_t, uint8_t, H1, INT8_MIN, DO_MAX) +DO_VPZ(sve_smaxv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MIN, DO_MAX) +DO_VPZ(sve_smaxv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MIN, DO_MAX) +DO_VPZ_D(sve_smaxv_d, int64_t, int64_t, INT64_MIN, DO_MAX) + +DO_VPZ(sve_umaxv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_MAX) +DO_VPZ(sve_umaxv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_MAX) +DO_VPZ(sve_umaxv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_MAX) +DO_VPZ_D(sve_umaxv_d, uint64_t, uint64_t, 0, DO_MAX) + +DO_VPZ(sve_sminv_b, int8_t, int8_t, uint8_t, H1, INT8_MAX, DO_MIN) +DO_VPZ(sve_sminv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MAX, DO_MIN) +DO_VPZ(sve_sminv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MAX, DO_MIN) +DO_VPZ_D(sve_sminv_d, int64_t, int64_t, INT64_MAX, DO_MIN) + +DO_VPZ(sve_uminv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_MIN) +DO_VPZ(sve_uminv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_MIN) +DO_VPZ(sve_uminv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_MIN) +DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN) + +#undef DO_VPZ +#undef DO_VPZ_D + +/* Two vector operand, one scalar operand, unpredicated. */ +#define DO_ZZI(NAME, TYPE, OP) \ +void HELPER(NAME)(void *vd, void *vn, uint64_t s64, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \ + TYPE s = s64, *d = vd, *n = vn; \ + for (i = 0; i < opr_sz; ++i) { \ + d[i] = OP(n[i], s); \ + } \ +} + +#define DO_SUBR(X, Y) (Y - X) + +DO_ZZI(sve_subri_b, uint8_t, DO_SUBR) +DO_ZZI(sve_subri_h, uint16_t, DO_SUBR) +DO_ZZI(sve_subri_s, uint32_t, DO_SUBR) +DO_ZZI(sve_subri_d, uint64_t, DO_SUBR) + +DO_ZZI(sve_smaxi_b, int8_t, DO_MAX) +DO_ZZI(sve_smaxi_h, int16_t, DO_MAX) +DO_ZZI(sve_smaxi_s, int32_t, DO_MAX) +DO_ZZI(sve_smaxi_d, int64_t, DO_MAX) + +DO_ZZI(sve_smini_b, int8_t, DO_MIN) +DO_ZZI(sve_smini_h, int16_t, DO_MIN) +DO_ZZI(sve_smini_s, int32_t, DO_MIN) +DO_ZZI(sve_smini_d, int64_t, DO_MIN) + +DO_ZZI(sve_umaxi_b, uint8_t, DO_MAX) +DO_ZZI(sve_umaxi_h, uint16_t, DO_MAX) +DO_ZZI(sve_umaxi_s, uint32_t, DO_MAX) +DO_ZZI(sve_umaxi_d, uint64_t, DO_MAX) + +DO_ZZI(sve_umini_b, uint8_t, DO_MIN) +DO_ZZI(sve_umini_h, uint16_t, DO_MIN) +DO_ZZI(sve_umini_s, uint32_t, DO_MIN) +DO_ZZI(sve_umini_d, uint64_t, DO_MIN) + +#undef DO_ZZI + +#undef DO_AND +#undef DO_ORR +#undef DO_EOR +#undef DO_BIC +#undef DO_ADD +#undef DO_SUB +#undef DO_MAX +#undef DO_MIN +#undef DO_ABD +#undef DO_MUL +#undef DO_DIV +#undef DO_ASR +#undef DO_LSR +#undef DO_LSL +#undef DO_SUBR + +/* Similar to the ARM LastActiveElement pseudocode function, except the + result is multiplied by the element size. This includes the not found + indication; e.g. not found for esz=3 is -8. */ +static intptr_t last_active_element(uint64_t *g, intptr_t words, intptr_t esz) +{ + uint64_t mask = pred_esz_masks[esz]; + intptr_t i = words; + + do { + uint64_t this_g = g[--i] & mask; + if (this_g) { + return i * 64 + (63 - clz64(this_g)); + } + } while (i > 0); + return (intptr_t)-1 << esz; +} + +uint32_t HELPER(sve_pfirst)(void *vd, void *vg, uint32_t words) +{ + uint32_t flags = PREDTEST_INIT; + uint64_t *d = vd, *g = vg; + intptr_t i = 0; + + do { + uint64_t this_d = d[i]; + uint64_t this_g = g[i]; + + if (this_g) { + if (!(flags & 4)) { + /* Set in D the first bit of G. */ +#ifdef _MSC_VER + this_d |= this_g & (0 - this_g); +#else + this_d |= this_g & -this_g; +#endif + d[i] = this_d; + } + flags = iter_predtest_fwd(this_d, this_g, flags); + } + } while (++i < words); + + return flags; +} + +uint32_t HELPER(sve_pnext)(void *vd, void *vg, uint32_t pred_desc) +{ + intptr_t words = extract32(pred_desc, 0, SIMD_OPRSZ_BITS); + intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + uint32_t flags = PREDTEST_INIT; + uint64_t *d = vd, *g = vg, esz_mask; + intptr_t i, next; + + next = last_active_element(vd, words, esz) + (1ULL << esz); + esz_mask = pred_esz_masks[esz]; + + /* Similar to the pseudocode for pnext, but scaled by ESZ + so that we find the correct bit. */ + if (next < words * 64) { + uint64_t mask = -1; + + if (next & 63) { + mask = ~((1ull << (next & 63)) - 1); + next &= -64; + } + do { + uint64_t this_g = g[next / 64] & esz_mask & mask; + if (this_g != 0) { + next = (next & -64) + ctz64(this_g); + break; + } + next += 64; + mask = -1; + } while (next < words * 64); + } + + i = 0; + do { + uint64_t this_d = 0; + if (i == next / 64) { + this_d = 1ull << (next & 63); + } + d[i] = this_d; + flags = iter_predtest_fwd(this_d, g[i] & esz_mask, flags); + } while (++i < words); + + return flags; +} + +/* Store zero into every active element of Zd. We will use this for two + * and three-operand predicated instructions for which logic dictates a + * zero result. In particular, logical shift by element size, which is + * otherwise undefined on the host. + * + * For element sizes smaller than uint64_t, we use tables to expand + * the N bits of the controlling predicate to a byte mask, and clear + * those bytes. + */ +void HELPER(sve_clr_b)(void *vd, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { + d[i] &= ~expand_pred_b(pg[H1(i)]); + } +} + +void HELPER(sve_clr_h)(void *vd, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { + d[i] &= ~expand_pred_h(pg[H1(i)]); + } +} + +void HELPER(sve_clr_s)(void *vd, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { + d[i] &= ~expand_pred_s(pg[H1(i)]); + } +} + +void HELPER(sve_clr_d)(void *vd, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { + if (pg[H1(i)] & 1) { + d[i] = 0; + } + } +} + +/* Copy Zn into Zd, and store zero into inactive elements. */ +void HELPER(sve_movz_b)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { + d[i] = n[i] & expand_pred_b(pg[H1(i)]); + } +} + +void HELPER(sve_movz_h)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { + d[i] = n[i] & expand_pred_h(pg[H1(i)]); + } +} + +void HELPER(sve_movz_s)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { + d[i] = n[i] & expand_pred_s(pg[H1(i)]); + } +} + +void HELPER(sve_movz_d)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + for (i = 0; i < opr_sz; i += 1) { +#ifdef _MSC_VER + d[i] = n[i] & ((uint64_t)0 - (uint64_t)(pg[H1(i)] & 1)); +#else + d[i] = n[i] & -(uint64_t)(pg[H1(i)] & 1); +#endif + } +} + +/* Three-operand expander, immediate operand, controlled by a predicate. + */ +#define DO_ZPZI(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + TYPE imm = simd_data(desc); \ + for (i = 0; i < opr_sz; ) { \ + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn, imm); \ + } \ + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +/* Similarly, specialized for 64-bit operands. */ +#define DO_ZPZI_D(NAME, TYPE, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ + TYPE *d = vd, *n = vn; \ + TYPE imm = simd_data(desc); \ + uint8_t *pg = vg; \ + for (i = 0; i < opr_sz; i += 1) { \ + if (pg[H1(i)] & 1) { \ + TYPE nn = n[i]; \ + d[i] = OP(nn, imm); \ + } \ + } \ +} + +#define DO_SHR(N, M) (N >> M) +#define DO_SHL(N, M) (N << M) + +/* Arithmetic shift right for division. This rounds negative numbers + toward zero as per signed division. Therefore before shifting, + when N is negative, add 2**M-1. */ +#ifdef _MSC_VER + #define DO_ASRD(N, M) ((N + (N < 0 ? (1 << M) - 1 : 0)) >> M) +#else + #define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M) +#endif + +DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR) +DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR) +DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR) +DO_ZPZI_D(sve_asr_zpzi_d, int64_t, DO_SHR) + +DO_ZPZI(sve_lsr_zpzi_b, uint8_t, H1, DO_SHR) +DO_ZPZI(sve_lsr_zpzi_h, uint16_t, H1_2, DO_SHR) +DO_ZPZI(sve_lsr_zpzi_s, uint32_t, H1_4, DO_SHR) +DO_ZPZI_D(sve_lsr_zpzi_d, uint64_t, DO_SHR) + +DO_ZPZI(sve_lsl_zpzi_b, uint8_t, H1, DO_SHL) +DO_ZPZI(sve_lsl_zpzi_h, uint16_t, H1_2, DO_SHL) +DO_ZPZI(sve_lsl_zpzi_s, uint32_t, H1_4, DO_SHL) +DO_ZPZI_D(sve_lsl_zpzi_d, uint64_t, DO_SHL) + +DO_ZPZI(sve_asrd_b, int8_t, H1, DO_ASRD) +DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD) +DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD) +DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD) + +#undef DO_SHR +#undef DO_SHL +#undef DO_ASRD +#undef DO_ZPZI +#undef DO_ZPZI_D + +/* Fully general four-operand expander, controlled by a predicate. + */ +#define DO_ZPZZZ(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \ + void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + for (i = 0; i < opr_sz; ) { \ + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + TYPE mm = *(TYPE *)((char *)vm + H(i)); \ + TYPE aa = *(TYPE *)((char *)va + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(aa, nn, mm); \ + } \ + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +/* Similarly, specialized for 64-bit operands. */ +#define DO_ZPZZZ_D(NAME, TYPE, OP) \ +void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \ + void *vg, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; \ + TYPE *d = vd, *a = va, *n = vn, *m = vm; \ + uint8_t *pg = vg; \ + for (i = 0; i < opr_sz; i += 1) { \ + if (pg[H1(i)] & 1) { \ + TYPE aa = a[i], nn = n[i], mm = m[i]; \ + d[i] = OP(aa, nn, mm); \ + } \ + } \ +} + +#define DO_MLA(A, N, M) (A + N * M) +#define DO_MLS(A, N, M) (A - N * M) + +DO_ZPZZZ(sve_mla_b, uint8_t, H1, DO_MLA) +DO_ZPZZZ(sve_mls_b, uint8_t, H1, DO_MLS) + +DO_ZPZZZ(sve_mla_h, uint16_t, H1_2, DO_MLA) +DO_ZPZZZ(sve_mls_h, uint16_t, H1_2, DO_MLS) + +DO_ZPZZZ(sve_mla_s, uint32_t, H1_4, DO_MLA) +DO_ZPZZZ(sve_mls_s, uint32_t, H1_4, DO_MLS) + +DO_ZPZZZ_D(sve_mla_d, uint64_t, DO_MLA) +DO_ZPZZZ_D(sve_mls_d, uint64_t, DO_MLS) + +#undef DO_MLA +#undef DO_MLS +#undef DO_ZPZZZ +#undef DO_ZPZZZ_D + +void HELPER(sve_index_b)(void *vd, uint32_t start, + uint32_t incr, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint8_t *d = vd; + for (i = 0; i < opr_sz; i += 1) { + d[H1(i)] = start + i * incr; + } +} + +void HELPER(sve_index_h)(void *vd, uint32_t start, + uint32_t incr, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 2; + uint16_t *d = vd; + for (i = 0; i < opr_sz; i += 1) { + d[H2(i)] = start + i * incr; + } +} + +void HELPER(sve_index_s)(void *vd, uint32_t start, + uint32_t incr, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 4; + uint32_t *d = vd; + for (i = 0; i < opr_sz; i += 1) { + d[H4(i)] = start + i * incr; + } +} + +void HELPER(sve_index_d)(void *vd, uint64_t start, + uint64_t incr, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + for (i = 0; i < opr_sz; i += 1) { + d[i] = start + i * incr; + } +} + +void HELPER(sve_adr_p32)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 4; + uint32_t sh = simd_data(desc); + uint32_t *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i += 1) { + d[i] = n[i] + (m[i] << sh); + } +} + +void HELPER(sve_adr_p64)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t sh = simd_data(desc); + uint64_t *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i += 1) { + d[i] = n[i] + (m[i] << sh); + } +} + +void HELPER(sve_adr_s32)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t sh = simd_data(desc); + uint64_t *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i += 1) { + d[i] = n[i] + ((uint64_t)(int32_t)m[i] << sh); + } +} + +void HELPER(sve_adr_u32)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t sh = simd_data(desc); + uint64_t *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i += 1) { + d[i] = n[i] + ((uint64_t)(uint32_t)m[i] << sh); + } +} + +void HELPER(sve_fexpa_h)(void *vd, void *vn, uint32_t desc) +{ + /* These constants are cut-and-paste directly from the ARM pseudocode. */ + static const uint16_t coeff[] = { + 0x0000, 0x0016, 0x002d, 0x0045, 0x005d, 0x0075, 0x008e, 0x00a8, + 0x00c2, 0x00dc, 0x00f8, 0x0114, 0x0130, 0x014d, 0x016b, 0x0189, + 0x01a8, 0x01c8, 0x01e8, 0x0209, 0x022b, 0x024e, 0x0271, 0x0295, + 0x02ba, 0x02e0, 0x0306, 0x032e, 0x0356, 0x037f, 0x03a9, 0x03d4, + }; + intptr_t i, opr_sz = simd_oprsz(desc) / 2; + uint16_t *d = vd, *n = vn; + + for (i = 0; i < opr_sz; i++) { + uint16_t nn = n[i]; + intptr_t idx = extract32(nn, 0, 5); + uint16_t exp = extract32(nn, 5, 5); + d[i] = coeff[idx] | (exp << 10); + } +} + +void HELPER(sve_fexpa_s)(void *vd, void *vn, uint32_t desc) +{ + /* These constants are cut-and-paste directly from the ARM pseudocode. */ + static const uint32_t coeff[] = { + 0x000000, 0x0164d2, 0x02cd87, 0x043a29, + 0x05aac3, 0x071f62, 0x08980f, 0x0a14d5, + 0x0b95c2, 0x0d1adf, 0x0ea43a, 0x1031dc, + 0x11c3d3, 0x135a2b, 0x14f4f0, 0x16942d, + 0x1837f0, 0x19e046, 0x1b8d3a, 0x1d3eda, + 0x1ef532, 0x20b051, 0x227043, 0x243516, + 0x25fed7, 0x27cd94, 0x29a15b, 0x2b7a3a, + 0x2d583f, 0x2f3b79, 0x3123f6, 0x3311c4, + 0x3504f3, 0x36fd92, 0x38fbaf, 0x3aff5b, + 0x3d08a4, 0x3f179a, 0x412c4d, 0x4346cd, + 0x45672a, 0x478d75, 0x49b9be, 0x4bec15, + 0x4e248c, 0x506334, 0x52a81e, 0x54f35b, + 0x5744fd, 0x599d16, 0x5bfbb8, 0x5e60f5, + 0x60ccdf, 0x633f89, 0x65b907, 0x68396a, + 0x6ac0c7, 0x6d4f30, 0x6fe4ba, 0x728177, + 0x75257d, 0x77d0df, 0x7a83b3, 0x7d3e0c, + }; + intptr_t i, opr_sz = simd_oprsz(desc) / 4; + uint32_t *d = vd, *n = vn; + + for (i = 0; i < opr_sz; i++) { + uint32_t nn = n[i]; + intptr_t idx = extract32(nn, 0, 6); + uint32_t exp = extract32(nn, 6, 8); + d[i] = coeff[idx] | (exp << 23); + } +} + +void HELPER(sve_fexpa_d)(void *vd, void *vn, uint32_t desc) +{ + /* These constants are cut-and-paste directly from the ARM pseudocode. */ + static const uint64_t coeff[] = { + 0x0000000000000ull, 0x02C9A3E778061ull, 0x059B0D3158574ull, + 0x0874518759BC8ull, 0x0B5586CF9890Full, 0x0E3EC32D3D1A2ull, + 0x11301D0125B51ull, 0x1429AAEA92DE0ull, 0x172B83C7D517Bull, + 0x1A35BEB6FCB75ull, 0x1D4873168B9AAull, 0x2063B88628CD6ull, + 0x2387A6E756238ull, 0x26B4565E27CDDull, 0x29E9DF51FDEE1ull, + 0x2D285A6E4030Bull, 0x306FE0A31B715ull, 0x33C08B26416FFull, + 0x371A7373AA9CBull, 0x3A7DB34E59FF7ull, 0x3DEA64C123422ull, + 0x4160A21F72E2Aull, 0x44E086061892Dull, 0x486A2B5C13CD0ull, + 0x4BFDAD5362A27ull, 0x4F9B2769D2CA7ull, 0x5342B569D4F82ull, + 0x56F4736B527DAull, 0x5AB07DD485429ull, 0x5E76F15AD2148ull, + 0x6247EB03A5585ull, 0x6623882552225ull, 0x6A09E667F3BCDull, + 0x6DFB23C651A2Full, 0x71F75E8EC5F74ull, 0x75FEB564267C9ull, + 0x7A11473EB0187ull, 0x7E2F336CF4E62ull, 0x82589994CCE13ull, + 0x868D99B4492EDull, 0x8ACE5422AA0DBull, 0x8F1AE99157736ull, + 0x93737B0CDC5E5ull, 0x97D829FDE4E50ull, 0x9C49182A3F090ull, + 0xA0C667B5DE565ull, 0xA5503B23E255Dull, 0xA9E6B5579FDBFull, + 0xAE89F995AD3ADull, 0xB33A2B84F15FBull, 0xB7F76F2FB5E47ull, + 0xBCC1E904BC1D2ull, 0xC199BDD85529Cull, 0xC67F12E57D14Bull, + 0xCB720DCEF9069ull, 0xD072D4A07897Cull, 0xD5818DCFBA487ull, + 0xDA9E603DB3285ull, 0xDFC97337B9B5Full, 0xE502EE78B3FF6ull, + 0xEA4AFA2A490DAull, 0xEFA1BEE615A27ull, 0xF50765B6E4540ull, + 0xFA7C1819E90D8ull, + }; + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + + for (i = 0; i < opr_sz; i++) { + uint64_t nn = n[i]; + intptr_t idx = extract32(nn, 0, 6); + uint64_t exp = extract32(nn, 6, 11); + d[i] = coeff[idx] | (exp << 52); + } +} + +void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 2; + uint16_t *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i += 1) { + uint16_t nn = n[i]; + uint16_t mm = m[i]; + if (mm & 1) { + nn = float16_one; + } + d[i] = nn ^ (mm & 2) << 14; + } +} + +void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 4; + uint32_t *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i += 1) { + uint32_t nn = n[i]; + uint32_t mm = m[i]; + if (mm & 1) { + nn = float32_one; + } + d[i] = nn ^ (mm & 2) << 30; + } +} + +void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i]; + uint64_t mm = m[i]; + if (mm & 1) { + nn = float64_one; + } + d[i] = nn ^ (mm & 2) << 62; + } +} + +/* + * Signed saturating addition with scalar operand. + */ + +void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(int8_t)) { + int r = *(int8_t *)((char *)a + i) + b; + if (r > INT8_MAX) { + r = INT8_MAX; + } else if (r < INT8_MIN) { + r = INT8_MIN; + } + *(int8_t *)((char *)d + i) = r; + } +} + +void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(int16_t)) { + int r = *(int16_t *)((char *)a + i) + b; + if (r > INT16_MAX) { + r = INT16_MAX; + } else if (r < INT16_MIN) { + r = INT16_MIN; + } + *(int16_t *)((char *)d + i) = r; + } +} + +void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(int32_t)) { + int64_t r = *(int32_t *)((char *)a + i) + b; + if (r > INT32_MAX) { + r = INT32_MAX; + } else if (r < INT32_MIN) { + r = INT32_MIN; + } + *(int32_t *)((char *)d + i) = r; + } +} + +void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(int64_t)) { + int64_t ai = *(int64_t *)((char *)a + i); + int64_t r = ai + b; + if (((r ^ ai) & ~(ai ^ b)) < 0) { + /* Signed overflow. */ + r = (r < 0 ? INT64_MAX : INT64_MIN); + } + *(int64_t *)((char *)d + i) = r; + } +} + +/* + * Unsigned saturating addition with scalar operand. + */ + +void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + int r = *(uint8_t *)((char *)a + i) + b; + if (r > UINT8_MAX) { + r = UINT8_MAX; + } else if (r < 0) { + r = 0; + } + *(uint8_t *)((char *)d + i) = r; + } +} + +void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + int r = *(uint16_t *)((char *)a + i) + b; + if (r > UINT16_MAX) { + r = UINT16_MAX; + } else if (r < 0) { + r = 0; + } + *(uint16_t *)((char *)d + i) = r; + } +} + +void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + int64_t r = *(uint32_t *)((char *)a + i) + b; + if (r > UINT32_MAX) { + r = UINT32_MAX; + } else if (r < 0) { + r = 0; + } + *(uint32_t *)((char *)d + i) = r; + } +} + +void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t r = *(uint64_t *)((char *)a + i) + b; + if (r < b) { + r = UINT64_MAX; + } + *(uint64_t *)((char *)d + i) = r; + } +} + +void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + uint64_t ai = *(uint64_t *)((char *)a + i); + *(uint64_t *)((char *)d + i) = (ai < b ? 0 : ai - b); + } +} + +/* Two operand predicated copy immediate with merge. All valid immediates + * can fit within 17 signed bits in the simd_data field. + */ +void HELPER(sve_cpy_m_b)(void *vd, void *vn, void *vg, + uint64_t mm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + + mm = dup_const(MO_8, mm); + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i]; + uint64_t pp = expand_pred_b(pg[H1(i)]); + d[i] = (mm & pp) | (nn & ~pp); + } +} + +void HELPER(sve_cpy_m_h)(void *vd, void *vn, void *vg, + uint64_t mm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + + mm = dup_const(MO_16, mm); + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i]; + uint64_t pp = expand_pred_h(pg[H1(i)]); + d[i] = (mm & pp) | (nn & ~pp); + } +} + +void HELPER(sve_cpy_m_s)(void *vd, void *vn, void *vg, + uint64_t mm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + + mm = dup_const(MO_32, mm); + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i]; + uint64_t pp = expand_pred_s(pg[H1(i)]); + d[i] = (mm & pp) | (nn & ~pp); + } +} + +void HELPER(sve_cpy_m_d)(void *vd, void *vn, void *vg, + uint64_t mm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i]; + d[i] = (pg[H1(i)] & 1 ? mm : nn); + } +} + +void HELPER(sve_cpy_z_b)(void *vd, void *vg, uint64_t val, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + + val = dup_const(MO_8, val); + for (i = 0; i < opr_sz; i += 1) { + d[i] = val & expand_pred_b(pg[H1(i)]); + } +} + +void HELPER(sve_cpy_z_h)(void *vd, void *vg, uint64_t val, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + + val = dup_const(MO_16, val); + for (i = 0; i < opr_sz; i += 1) { + d[i] = val & expand_pred_h(pg[H1(i)]); + } +} + +void HELPER(sve_cpy_z_s)(void *vd, void *vg, uint64_t val, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + + val = dup_const(MO_32, val); + for (i = 0; i < opr_sz; i += 1) { + d[i] = val & expand_pred_s(pg[H1(i)]); + } +} + +void HELPER(sve_cpy_z_d)(void *vd, void *vg, uint64_t val, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + d[i] = (pg[H1(i)] & 1 ? val : 0); + } +} + +/* Big-endian hosts need to frob the byte indicies. If the copy + * happens to be 8-byte aligned, then no frobbing necessary. + */ +static void swap_memmove(void *vd, void *vs, size_t n) +{ + uintptr_t d = (uintptr_t)vd; + uintptr_t s = (uintptr_t)vs; + uintptr_t o = (d | s | n) & 7; + size_t i; + +#ifndef HOST_WORDS_BIGENDIAN + o = 0; +#endif + switch (o) { + case 0: + memmove(vd, vs, n); + break; + + case 4: + if (d < s || d >= s + n) { + for (i = 0; i < n; i += 4) { + *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i); + } + } else { + for (i = n; i > 0; ) { + i -= 4; + *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i); + } + } + break; + + case 2: + case 6: + if (d < s || d >= s + n) { + for (i = 0; i < n; i += 2) { + *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i); + } + } else { + for (i = n; i > 0; ) { + i -= 2; + *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i); + } + } + break; + + default: + if (d < s || d >= s + n) { + for (i = 0; i < n; i++) { + *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i); + } + } else { + for (i = n; i > 0; ) { + i -= 1; + *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i); + } + } + break; + } +} + +/* Similarly for memset of 0. */ +static void swap_memzero(void *vd, size_t n) +{ + uintptr_t d = (uintptr_t)vd; + uintptr_t o = (d | n) & 7; + size_t i; + + /* Usually, the first bit of a predicate is set, so N is 0. */ + if (likely(n == 0)) { + return; + } + +#ifndef HOST_WORDS_BIGENDIAN + o = 0; +#endif + switch (o) { + case 0: + memset(vd, 0, n); + break; + + case 4: + for (i = 0; i < n; i += 4) { + *(uint32_t *)H1_4(d + i) = 0; + } + break; + + case 2: + case 6: + for (i = 0; i < n; i += 2) { + *(uint16_t *)H1_2(d + i) = 0; + } + break; + + default: + for (i = 0; i < n; i++) { + *(uint8_t *)H1(d + i) = 0; + } + break; + } +} + +void HELPER(sve_ext)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t opr_sz = simd_oprsz(desc); + size_t n_ofs = simd_data(desc); + size_t n_siz = opr_sz - n_ofs; + + if (vd != vm) { + swap_memmove(vd, (char *)vn + n_ofs, n_siz); + swap_memmove((char *)vd + n_siz, vm, n_ofs); + } else if (vd != vn) { + swap_memmove((char *)vd + n_siz, vd, n_ofs); + swap_memmove(vd, (char *)vn + n_ofs, n_siz); + } else { + /* vd == vn == vm. Need temp space. */ + ARMVectorReg tmp; + swap_memmove(&tmp, vm, n_ofs); + swap_memmove(vd, (char *)vd + n_ofs, n_siz); + memcpy((char *)vd + n_siz, &tmp, n_ofs); + } +} + +#define DO_INSR(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, uint64_t val, uint32_t desc) \ +{ \ + intptr_t opr_sz = simd_oprsz(desc); \ + swap_memmove((char *)vd + sizeof(TYPE), vn, opr_sz - sizeof(TYPE)); \ + *(TYPE *)((char *)vd + H(0)) = val; \ +} + +DO_INSR(sve_insr_b, uint8_t, H1) +DO_INSR(sve_insr_h, uint16_t, H1_2) +DO_INSR(sve_insr_s, uint32_t, H1_4) +DO_INSR(sve_insr_d, uint64_t, ) + +#undef DO_INSR + +void HELPER(sve_rev_b)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { + uint64_t f = *(uint64_t *)((char *)vn + i); + uint64_t b = *(uint64_t *)((char *)vn + j); + *(uint64_t *)((char *)vd + i) = bswap64(b); + *(uint64_t *)((char *)vd + j) = bswap64(f); + } +} + +void HELPER(sve_rev_h)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { + uint64_t f = *(uint64_t *)((char *)vn + i); + uint64_t b = *(uint64_t *)((char *)vn + j); + *(uint64_t *)((char *)vd + i) = hswap64(b); + *(uint64_t *)((char *)vd + j) = hswap64(f); + } +} + +void HELPER(sve_rev_s)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { + uint64_t f = *(uint64_t *)((char *)vn + i); + uint64_t b = *(uint64_t *)((char *)vn + j); + *(uint64_t *)((char *)vd + i) = rol64(b, 32); + *(uint64_t *)((char *)vd + j) = rol64(f, 32); + } +} + +void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) { + uint64_t f = *(uint64_t *)((char *)vn + i); + uint64_t b = *(uint64_t *)((char *)vn + j); + *(uint64_t *)((char *)vd + i) = b; + *(uint64_t *)((char *)vd + j) = f; + } +} + +#define DO_TBL(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + uintptr_t elem = opr_sz / sizeof(TYPE); \ + TYPE *d = vd, *n = vn, *m = vm; \ + ARMVectorReg tmp; \ + if (unlikely(vd == vn)) { \ + n = memcpy(&tmp, vn, opr_sz); \ + } \ + for (i = 0; i < elem; i++) { \ + TYPE j = m[H(i)]; \ + d[H(i)] = j < elem ? n[H(j)] : 0; \ + } \ +} + +DO_TBL(sve_tbl_b, uint8_t, H1) +DO_TBL(sve_tbl_h, uint16_t, H2) +DO_TBL(sve_tbl_s, uint32_t, H4) +DO_TBL(sve_tbl_d, uint64_t, ) + +#undef TBL + +#define DO_UNPK(NAME, TYPED, TYPES, HD, HS) \ +void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + TYPED *d = vd; \ + TYPES *n = vn; \ + ARMVectorReg tmp; \ + if (unlikely((char *)vn - (char *)vd < opr_sz)) { \ + n = memcpy(&tmp, n, opr_sz / 2); \ + } \ + for (i = 0; i < opr_sz / sizeof(TYPED); i++) { \ + d[HD(i)] = n[HS(i)]; \ + } \ +} + +DO_UNPK(sve_sunpk_h, int16_t, int8_t, H2, H1) +DO_UNPK(sve_sunpk_s, int32_t, int16_t, H4, H2) +DO_UNPK(sve_sunpk_d, int64_t, int32_t, , H4) + +DO_UNPK(sve_uunpk_h, uint16_t, uint8_t, H2, H1) +DO_UNPK(sve_uunpk_s, uint32_t, uint16_t, H4, H2) +DO_UNPK(sve_uunpk_d, uint64_t, uint32_t, , H4) + +#undef DO_UNPK + +/* Mask of bits included in the even numbered predicates of width esz. + * We also use this for expand_bits/compress_bits, and so extend the + * same pattern out to 16-bit units. + */ +static const uint64_t even_bit_esz_masks[5] = { + 0x5555555555555555ull, + 0x3333333333333333ull, + 0x0f0f0f0f0f0f0f0full, + 0x00ff00ff00ff00ffull, + 0x0000ffff0000ffffull, +}; + +/* Zero-extend units of 2**N bits to units of 2**(N+1) bits. + * For N==0, this corresponds to the operation that in qemu/bitops.h + * we call half_shuffle64; this algorithm is from Hacker's Delight, + * section 7-2 Shuffling Bits. + */ +static uint64_t expand_bits(uint64_t x, int n) +{ + int i; + + x &= 0xffffffffu; + for (i = 4; i >= n; i--) { + int sh = 1 << i; + x = ((x << sh) | x) & even_bit_esz_masks[i]; + } + return x; +} + +/* Compress units of 2**(N+1) bits to units of 2**N bits. + * For N==0, this corresponds to the operation that in qemu/bitops.h + * we call half_unshuffle64; this algorithm is from Hacker's Delight, + * section 7-2 Shuffling Bits, where it is called an inverse half shuffle. + */ +static uint64_t compress_bits(uint64_t x, int n) +{ + int i; + + for (i = n; i <= 4; i++) { + int sh = 1 << i; + x &= even_bit_esz_masks[i]; + x = (x >> sh) | x; + } + return x & 0xffffffffu; +} + +void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1); + uint64_t *d = vd; + intptr_t i; + + if (oprsz <= 8) { + uint64_t nn = *(uint64_t *)vn; + uint64_t mm = *(uint64_t *)vm; + int half = 4 * oprsz; + + nn = extract64(nn, high * half, half); + mm = extract64(mm, high * half, half); + nn = expand_bits(nn, esz); + mm = expand_bits(mm, esz); + d[0] = nn + (mm << (1 << esz)); + } else { + ARMPredicateReg tmp_n, tmp_m; + + /* We produce output faster than we consume input. + Therefore we must be mindful of possible overlap. */ + if (((char *)vn - (char *)vd) < (uintptr_t)oprsz) { + vn = memcpy(&tmp_n, vn, oprsz); + } + if (((char *)vm - (char *)vd) < (uintptr_t)oprsz) { + vm = memcpy(&tmp_m, vm, oprsz); + } + if (high) { + high = oprsz >> 1; + } + + if ((high & 3) == 0) { + uint32_t *n = vn, *m = vm; + high >>= 2; + + for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { + uint64_t nn = n[H4(high + i)]; + uint64_t mm = m[H4(high + i)]; + + nn = expand_bits(nn, esz); + mm = expand_bits(mm, esz); + d[i] = nn + (mm << (1 << esz)); + } + } else { + uint8_t *n = vn, *m = vm; + uint16_t *d16 = vd; + + for (i = 0; i < oprsz / 2; i++) { + uint16_t nn = n[H1(high + i)]; + uint16_t mm = m[H1(high + i)]; + + nn = expand_bits(nn, esz); + mm = expand_bits(mm, esz); + d16[H2(i)] = nn + (mm << (1 << esz)); + } + } + } +} + +void HELPER(sve_uzp_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + int odd = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1) << esz; + uint64_t *d = vd, *n = vn, *m = vm; + uint64_t l, h; + intptr_t i; + + if (oprsz <= 8) { + l = compress_bits(n[0] >> odd, esz); + h = compress_bits(m[0] >> odd, esz); + d[0] = extract64(l + (h << (4 * oprsz)), 0, 8 * oprsz); + } else { + ARMPredicateReg tmp_m; + intptr_t oprsz_16 = oprsz / 16; + + if (((char *)vm - (char *)vd) < (uintptr_t)oprsz) { + m = memcpy(&tmp_m, vm, oprsz); + } + + for (i = 0; i < oprsz_16; i++) { + l = n[2 * i + 0]; + h = n[2 * i + 1]; + l = compress_bits(l >> odd, esz); + h = compress_bits(h >> odd, esz); + d[i] = l + (h << 32); + } + + /* For VL which is not a power of 2, the results from M do not + align nicely with the uint64_t for D. Put the aligned results + from M into TMP_M and then copy it into place afterward. */ + if (oprsz & 15) { + d[i] = compress_bits(n[2 * i] >> odd, esz); + + for (i = 0; i < oprsz_16; i++) { + l = m[2 * i + 0]; + h = m[2 * i + 1]; + l = compress_bits(l >> odd, esz); + h = compress_bits(h >> odd, esz); + tmp_m.p[i] = l + (h << 32); + } + tmp_m.p[i] = compress_bits(m[2 * i] >> odd, esz); + + swap_memmove((char *)vd + oprsz / 2, &tmp_m, oprsz / 2); + } else { + for (i = 0; i < oprsz_16; i++) { + l = m[2 * i + 0]; + h = m[2 * i + 1]; + l = compress_bits(l >> odd, esz); + h = compress_bits(h >> odd, esz); + d[oprsz_16 + i] = l + (h << 32); + } + } + } +} + +void HELPER(sve_trn_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + uintptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + bool odd = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1); + uint64_t *d = vd, *n = vn, *m = vm; + uint64_t mask; + int shr, shl; + intptr_t i; + + shl = 1 << esz; + shr = 0; + mask = even_bit_esz_masks[esz]; + if (odd) { + mask <<= shl; + shr = shl; + shl = 0; + } + + for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { + uint64_t nn = (n[i] & mask) >> shr; + uint64_t mm = (m[i] & mask) << shl; + d[i] = nn + mm; + } +} + +/* Reverse units of 2**N bits. */ +static uint64_t reverse_bits_64(uint64_t x, int n) +{ + int i, sh; + + x = bswap64(x); + for (i = 2, sh = 4; i >= n; i--, sh >>= 1) { + uint64_t mask = even_bit_esz_masks[i]; + x = ((x & mask) << sh) | ((x >> sh) & mask); + } + return x; +} + +static uint8_t reverse_bits_8(uint8_t x, int n) +{ + static const uint8_t mask[3] = { 0x55, 0x33, 0x0f }; + int i, sh; + + for (i = 2, sh = 4; i >= n; i--, sh >>= 1) { + x = ((x & mask[i]) << sh) | ((x >> sh) & mask[i]); + } + return x; +} + +void HELPER(sve_rev_p)(void *vd, void *vn, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + int esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + intptr_t i, oprsz_2 = oprsz / 2; + + if (oprsz <= 8) { + uint64_t l = *(uint64_t *)vn; + l = reverse_bits_64(l << (64 - 8 * oprsz), esz); + *(uint64_t *)vd = l; + } else if ((oprsz & 15) == 0) { + for (i = 0; i < oprsz_2; i += 8) { + intptr_t ih = oprsz - 8 - i; + uint64_t l = reverse_bits_64(*(uint64_t *)((char *)vn + i), esz); + uint64_t h = reverse_bits_64(*(uint64_t *)((char *)vn + ih), esz); + *(uint64_t *)((char *)vd + i) = h; + *(uint64_t *)((char *)vd + ih) = l; + } + } else { + for (i = 0; i < oprsz_2; i += 1) { + intptr_t il = H1(i); + intptr_t ih = H1(oprsz - 1 - i); + uint8_t l = reverse_bits_8(*(uint8_t *)((char *)vn + il), esz); + uint8_t h = reverse_bits_8(*(uint8_t *)((char *)vn + ih), esz); + *(uint8_t *)((char *)vd + il) = h; + *(uint8_t *)((char *)vd + ih) = l; + } + } +} + +void HELPER(sve_punpk_p)(void *vd, void *vn, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + intptr_t high = extract32(pred_desc, SIMD_DATA_SHIFT + 2, 1); + uint64_t *d = vd; + intptr_t i; + + if (oprsz <= 8) { + uint64_t nn = *(uint64_t *)vn; + int half = 4 * oprsz; + + nn = extract64(nn, high * half, half); + nn = expand_bits(nn, 0); + d[0] = nn; + } else { + ARMPredicateReg tmp_n; + + /* We produce output faster than we consume input. + Therefore we must be mindful of possible overlap. */ + if (((char *)vn - (char *)vd) < (uintptr_t)oprsz) { + vn = memcpy(&tmp_n, vn, oprsz); + } + if (high) { + high = oprsz >> 1; + } + + if ((high & 3) == 0) { + uint32_t *n = vn; + high >>= 2; + + for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { + uint64_t nn = n[H4(high + i)]; + d[i] = expand_bits(nn, 0); + } + } else { + uint16_t *d16 = vd; + uint8_t *n = vn; + + for (i = 0; i < oprsz / 2; i++) { + uint16_t nn = n[H1(high + i)]; + d16[H2(i)] = expand_bits(nn, 0); + } + } + } +} + +#define DO_ZIP(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + intptr_t oprsz = simd_oprsz(desc); \ + intptr_t i, oprsz_2 = oprsz / 2; \ + ARMVectorReg tmp_n, tmp_m; \ + /* We produce output faster than we consume input. \ + Therefore we must be mindful of possible overlap. */ \ + if (unlikely(((char *)vn - (char *)vd) < (uintptr_t)oprsz)) { \ + vn = memcpy(&tmp_n, vn, oprsz_2); \ + } \ + if (unlikely(((char *)vm - (char *)vd) < (uintptr_t)oprsz)) { \ + vm = memcpy(&tmp_m, vm, oprsz_2); \ + } \ + for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ + *(TYPE *)((char *)vd + H(2 * i + 0)) = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)((char *)vm + H(i)); \ + } \ +} + +DO_ZIP(sve_zip_b, uint8_t, H1) +DO_ZIP(sve_zip_h, uint16_t, H1_2) +DO_ZIP(sve_zip_s, uint32_t, H1_4) +DO_ZIP(sve_zip_d, uint64_t, ) + +#define DO_UZP(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + intptr_t oprsz = simd_oprsz(desc); \ + intptr_t oprsz_2 = oprsz / 2; \ + intptr_t odd_ofs = simd_data(desc); \ + intptr_t i; \ + ARMVectorReg tmp_m; \ + if (unlikely(((char *)vm - (char *)vd) < (uintptr_t)oprsz)) { \ + vm = memcpy(&tmp_m, vm, oprsz); \ + } \ + for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ + *(TYPE *)((char *)vd + H(i)) = *(TYPE *)((char *)vn + H(2 * i + odd_ofs)); \ + } \ + for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ + *(TYPE *)((char *)vd + H(oprsz_2 + i)) = *(TYPE *)((char *)vm + H(2 * i + odd_ofs)); \ + } \ +} + +DO_UZP(sve_uzp_b, uint8_t, H1) +DO_UZP(sve_uzp_h, uint16_t, H1_2) +DO_UZP(sve_uzp_s, uint32_t, H1_4) +DO_UZP(sve_uzp_d, uint64_t, ) + +#define DO_TRN(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + intptr_t oprsz = simd_oprsz(desc); \ + intptr_t odd_ofs = simd_data(desc); \ + intptr_t i; \ + for (i = 0; i < oprsz; i += 2 * sizeof(TYPE)) { \ + TYPE ae = *(TYPE *)((char *)vn + H(i + odd_ofs)); \ + TYPE be = *(TYPE *)((char *)vm + H(i + odd_ofs)); \ + *(TYPE *)((char *)vd + H(i + 0)) = ae; \ + *(TYPE *)((char *)vd + H(i + sizeof(TYPE))) = be; \ + } \ +} + +DO_TRN(sve_trn_b, uint8_t, H1) +DO_TRN(sve_trn_h, uint16_t, H1_2) +DO_TRN(sve_trn_s, uint32_t, H1_4) +DO_TRN(sve_trn_d, uint64_t, ) + +#undef DO_ZIP +#undef DO_UZP +#undef DO_TRN + +void HELPER(sve_compact_s)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc) / 4; + uint32_t *d = vd, *n = vn; + uint8_t *pg = vg; + + for (i = j = 0; i < opr_sz; i++) { + if (pg[H1(i / 2)] & (i & 1 ? 0x10 : 0x01)) { + d[H4(j)] = n[H4(i)]; + j++; + } + } + for (; j < opr_sz; j++) { + d[H4(j)] = 0; + } +} + +void HELPER(sve_compact_d)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + + for (i = j = 0; i < opr_sz; i++) { + if (pg[H1(i)] & 1) { + d[j] = n[i]; + j++; + } + } + for (; j < opr_sz; j++) { + d[j] = 0; + } +} + +/* Similar to the ARM LastActiveElement pseudocode function, except the + * result is multiplied by the element size. This includes the not found + * indication; e.g. not found for esz=3 is -8. + */ +int32_t HELPER(sve_last_active_element)(void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + + return last_active_element(vg, DIV_ROUND_UP(oprsz, 8), esz); +} + +void HELPER(sve_splice)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) +{ + intptr_t opr_sz = simd_oprsz(desc) / 8; + int esz = simd_data(desc); + uint64_t pg, first_g, last_g, len, mask = pred_esz_masks[esz]; + intptr_t i, first_i, last_i; + ARMVectorReg tmp; + + first_i = last_i = 0; + first_g = last_g = 0; + + /* Find the extent of the active elements within VG. */ + for (i = QEMU_ALIGN_UP(opr_sz, 8) - 8; i >= 0; i -= 8) { + pg = *(uint64_t *)((char *)vg + i) & mask; + if (pg) { + if (last_g == 0) { + last_g = pg; + last_i = i; + } + first_g = pg; + first_i = i; + } + } + + len = 0; + if (first_g != 0) { + first_i = first_i * 8 + ctz64(first_g); + last_i = last_i * 8 + 63 - clz64(last_g); + len = last_i - first_i + (1ULL << esz); + if (vd == vm) { + vm = memcpy(&tmp, vm, opr_sz * 8); + } + swap_memmove(vd, (char *)vn + first_i, len); + } + swap_memmove((char *)vd + len, vm, opr_sz * 8 - len); +} + +void HELPER(sve_sel_zpzz_b)(void *vd, void *vn, void *vm, + void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn, *m = vm; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i], mm = m[i]; + uint64_t pp = expand_pred_b(pg[H1(i)]); + d[i] = (nn & pp) | (mm & ~pp); + } +} + +void HELPER(sve_sel_zpzz_h)(void *vd, void *vn, void *vm, + void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn, *m = vm; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i], mm = m[i]; + uint64_t pp = expand_pred_h(pg[H1(i)]); + d[i] = (nn & pp) | (mm & ~pp); + } +} + +void HELPER(sve_sel_zpzz_s)(void *vd, void *vn, void *vm, + void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn, *m = vm; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i], mm = m[i]; + uint64_t pp = expand_pred_s(pg[H1(i)]); + d[i] = (nn & pp) | (mm & ~pp); + } +} + +void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm, + void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn, *m = vm; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + uint64_t nn = n[i], mm = m[i]; + d[i] = (pg[H1(i)] & 1 ? nn : mm); + } +} + +/* Two operand comparison controlled by a predicate. + * ??? It is very tempting to want to be able to expand this inline + * with x86 instructions, e.g. + * + * vcmpeqw zm, zn, %ymm0 + * vpmovmskb %ymm0, %eax + * and $0x5555, %eax + * and pg, %eax + * + * or even aarch64, e.g. + * + * // mask = 4000 1000 0400 0100 0040 0010 0004 0001 + * cmeq v0.8h, zn, zm + * and v0.8h, v0.8h, mask + * addv h0, v0.8h + * and v0.8b, pg + * + * However, coming up with an abstraction that allows vector inputs and + * a scalar output, and also handles the byte-ordering of sub-uint64_t + * scalar outputs, is tricky. + */ +#define DO_CMP_PPZZ(NAME, TYPE, OP, H, MASK) \ +uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ +{ \ + intptr_t opr_sz = simd_oprsz(desc); \ + uint32_t flags = PREDTEST_INIT; \ + intptr_t i = opr_sz; \ + do { \ + uint64_t out = 0, pg; \ + do { \ + i -= sizeof(TYPE), out <<= sizeof(TYPE); \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + TYPE mm = *(TYPE *)((char *)vm + H(i)); \ + out |= nn OP mm; \ + } while (i & 63); \ + pg = *(uint64_t *)((char *)vg + (i >> 3)) & MASK; \ + out &= pg; \ + *(uint64_t *)((char *)vd + (i >> 3)) = out; \ + flags = iter_predtest_bwd(out, pg, flags); \ + } while (i > 0); \ + return flags; \ +} + +#define DO_CMP_PPZZ_B(NAME, TYPE, OP) \ + DO_CMP_PPZZ(NAME, TYPE, OP, H1, 0xffffffffffffffffull) +#define DO_CMP_PPZZ_H(NAME, TYPE, OP) \ + DO_CMP_PPZZ(NAME, TYPE, OP, H1_2, 0x5555555555555555ull) +#define DO_CMP_PPZZ_S(NAME, TYPE, OP) \ + DO_CMP_PPZZ(NAME, TYPE, OP, H1_4, 0x1111111111111111ull) +#define DO_CMP_PPZZ_D(NAME, TYPE, OP) \ + DO_CMP_PPZZ(NAME, TYPE, OP, , 0x0101010101010101ull) + +DO_CMP_PPZZ_B(sve_cmpeq_ppzz_b, uint8_t, ==) +DO_CMP_PPZZ_H(sve_cmpeq_ppzz_h, uint16_t, ==) +DO_CMP_PPZZ_S(sve_cmpeq_ppzz_s, uint32_t, ==) +DO_CMP_PPZZ_D(sve_cmpeq_ppzz_d, uint64_t, ==) + +DO_CMP_PPZZ_B(sve_cmpne_ppzz_b, uint8_t, !=) +DO_CMP_PPZZ_H(sve_cmpne_ppzz_h, uint16_t, !=) +DO_CMP_PPZZ_S(sve_cmpne_ppzz_s, uint32_t, !=) +DO_CMP_PPZZ_D(sve_cmpne_ppzz_d, uint64_t, !=) + +DO_CMP_PPZZ_B(sve_cmpgt_ppzz_b, int8_t, >) +DO_CMP_PPZZ_H(sve_cmpgt_ppzz_h, int16_t, >) +DO_CMP_PPZZ_S(sve_cmpgt_ppzz_s, int32_t, >) +DO_CMP_PPZZ_D(sve_cmpgt_ppzz_d, int64_t, >) + +DO_CMP_PPZZ_B(sve_cmpge_ppzz_b, int8_t, >=) +DO_CMP_PPZZ_H(sve_cmpge_ppzz_h, int16_t, >=) +DO_CMP_PPZZ_S(sve_cmpge_ppzz_s, int32_t, >=) +DO_CMP_PPZZ_D(sve_cmpge_ppzz_d, int64_t, >=) + +DO_CMP_PPZZ_B(sve_cmphi_ppzz_b, uint8_t, >) +DO_CMP_PPZZ_H(sve_cmphi_ppzz_h, uint16_t, >) +DO_CMP_PPZZ_S(sve_cmphi_ppzz_s, uint32_t, >) +DO_CMP_PPZZ_D(sve_cmphi_ppzz_d, uint64_t, >) + +DO_CMP_PPZZ_B(sve_cmphs_ppzz_b, uint8_t, >=) +DO_CMP_PPZZ_H(sve_cmphs_ppzz_h, uint16_t, >=) +DO_CMP_PPZZ_S(sve_cmphs_ppzz_s, uint32_t, >=) +DO_CMP_PPZZ_D(sve_cmphs_ppzz_d, uint64_t, >=) + +#undef DO_CMP_PPZZ_B +#undef DO_CMP_PPZZ_H +#undef DO_CMP_PPZZ_S +#undef DO_CMP_PPZZ_D +#undef DO_CMP_PPZZ + +/* Similar, but the second source is "wide". */ +#define DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H, MASK) \ +uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ +{ \ + intptr_t opr_sz = simd_oprsz(desc); \ + uint32_t flags = PREDTEST_INIT; \ + intptr_t i = opr_sz; \ + do { \ + uint64_t out = 0, pg; \ + do { \ + TYPEW mm = *(TYPEW *)((char *)vm + i - 8); \ + do { \ + i -= sizeof(TYPE), out <<= sizeof(TYPE); \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + out |= nn OP mm; \ + } while (i & 7); \ + } while (i & 63); \ + pg = *(uint64_t *)((char *)vg + (i >> 3)) & MASK; \ + out &= pg; \ + *(uint64_t *)((char *)vd + (i >> 3)) = out; \ + flags = iter_predtest_bwd(out, pg, flags); \ + } while (i > 0); \ + return flags; \ +} + +#define DO_CMP_PPZW_B(NAME, TYPE, TYPEW, OP) \ + DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1, 0xffffffffffffffffull) +#define DO_CMP_PPZW_H(NAME, TYPE, TYPEW, OP) \ + DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_2, 0x5555555555555555ull) +#define DO_CMP_PPZW_S(NAME, TYPE, TYPEW, OP) \ + DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_4, 0x1111111111111111ull) + +DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, int8_t, uint64_t, ==) +DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, int16_t, uint64_t, ==) +DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, int32_t, uint64_t, ==) + +DO_CMP_PPZW_B(sve_cmpne_ppzw_b, int8_t, uint64_t, !=) +DO_CMP_PPZW_H(sve_cmpne_ppzw_h, int16_t, uint64_t, !=) +DO_CMP_PPZW_S(sve_cmpne_ppzw_s, int32_t, uint64_t, !=) + +DO_CMP_PPZW_B(sve_cmpgt_ppzw_b, int8_t, int64_t, >) +DO_CMP_PPZW_H(sve_cmpgt_ppzw_h, int16_t, int64_t, >) +DO_CMP_PPZW_S(sve_cmpgt_ppzw_s, int32_t, int64_t, >) + +DO_CMP_PPZW_B(sve_cmpge_ppzw_b, int8_t, int64_t, >=) +DO_CMP_PPZW_H(sve_cmpge_ppzw_h, int16_t, int64_t, >=) +DO_CMP_PPZW_S(sve_cmpge_ppzw_s, int32_t, int64_t, >=) + +DO_CMP_PPZW_B(sve_cmphi_ppzw_b, uint8_t, uint64_t, >) +DO_CMP_PPZW_H(sve_cmphi_ppzw_h, uint16_t, uint64_t, >) +DO_CMP_PPZW_S(sve_cmphi_ppzw_s, uint32_t, uint64_t, >) + +DO_CMP_PPZW_B(sve_cmphs_ppzw_b, uint8_t, uint64_t, >=) +DO_CMP_PPZW_H(sve_cmphs_ppzw_h, uint16_t, uint64_t, >=) +DO_CMP_PPZW_S(sve_cmphs_ppzw_s, uint32_t, uint64_t, >=) + +DO_CMP_PPZW_B(sve_cmplt_ppzw_b, int8_t, int64_t, <) +DO_CMP_PPZW_H(sve_cmplt_ppzw_h, int16_t, int64_t, <) +DO_CMP_PPZW_S(sve_cmplt_ppzw_s, int32_t, int64_t, <) + +DO_CMP_PPZW_B(sve_cmple_ppzw_b, int8_t, int64_t, <=) +DO_CMP_PPZW_H(sve_cmple_ppzw_h, int16_t, int64_t, <=) +DO_CMP_PPZW_S(sve_cmple_ppzw_s, int32_t, int64_t, <=) + +DO_CMP_PPZW_B(sve_cmplo_ppzw_b, uint8_t, uint64_t, <) +DO_CMP_PPZW_H(sve_cmplo_ppzw_h, uint16_t, uint64_t, <) +DO_CMP_PPZW_S(sve_cmplo_ppzw_s, uint32_t, uint64_t, <) + +DO_CMP_PPZW_B(sve_cmpls_ppzw_b, uint8_t, uint64_t, <=) +DO_CMP_PPZW_H(sve_cmpls_ppzw_h, uint16_t, uint64_t, <=) +DO_CMP_PPZW_S(sve_cmpls_ppzw_s, uint32_t, uint64_t, <=) + +#undef DO_CMP_PPZW_B +#undef DO_CMP_PPZW_H +#undef DO_CMP_PPZW_S +#undef DO_CMP_PPZW + +/* Similar, but the second source is immediate. */ +#define DO_CMP_PPZI(NAME, TYPE, OP, H, MASK) \ +uint32_t HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ +{ \ + intptr_t opr_sz = simd_oprsz(desc); \ + uint32_t flags = PREDTEST_INIT; \ + TYPE mm = simd_data(desc); \ + intptr_t i = opr_sz; \ + do { \ + uint64_t out = 0, pg; \ + do { \ + i -= sizeof(TYPE), out <<= sizeof(TYPE); \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + out |= nn OP mm; \ + } while (i & 63); \ + pg = *(uint64_t *)((char *)vg + (i >> 3)) & MASK; \ + out &= pg; \ + *(uint64_t *)((char *)vd + (i >> 3)) = out; \ + flags = iter_predtest_bwd(out, pg, flags); \ + } while (i > 0); \ + return flags; \ +} + +#define DO_CMP_PPZI_B(NAME, TYPE, OP) \ + DO_CMP_PPZI(NAME, TYPE, OP, H1, 0xffffffffffffffffull) +#define DO_CMP_PPZI_H(NAME, TYPE, OP) \ + DO_CMP_PPZI(NAME, TYPE, OP, H1_2, 0x5555555555555555ull) +#define DO_CMP_PPZI_S(NAME, TYPE, OP) \ + DO_CMP_PPZI(NAME, TYPE, OP, H1_4, 0x1111111111111111ull) +#define DO_CMP_PPZI_D(NAME, TYPE, OP) \ + DO_CMP_PPZI(NAME, TYPE, OP, , 0x0101010101010101ull) + +DO_CMP_PPZI_B(sve_cmpeq_ppzi_b, uint8_t, ==) +DO_CMP_PPZI_H(sve_cmpeq_ppzi_h, uint16_t, ==) +DO_CMP_PPZI_S(sve_cmpeq_ppzi_s, uint32_t, ==) +DO_CMP_PPZI_D(sve_cmpeq_ppzi_d, uint64_t, ==) + +DO_CMP_PPZI_B(sve_cmpne_ppzi_b, uint8_t, !=) +DO_CMP_PPZI_H(sve_cmpne_ppzi_h, uint16_t, !=) +DO_CMP_PPZI_S(sve_cmpne_ppzi_s, uint32_t, !=) +DO_CMP_PPZI_D(sve_cmpne_ppzi_d, uint64_t, !=) + +DO_CMP_PPZI_B(sve_cmpgt_ppzi_b, int8_t, >) +DO_CMP_PPZI_H(sve_cmpgt_ppzi_h, int16_t, >) +DO_CMP_PPZI_S(sve_cmpgt_ppzi_s, int32_t, >) +DO_CMP_PPZI_D(sve_cmpgt_ppzi_d, int64_t, >) + +DO_CMP_PPZI_B(sve_cmpge_ppzi_b, int8_t, >=) +DO_CMP_PPZI_H(sve_cmpge_ppzi_h, int16_t, >=) +DO_CMP_PPZI_S(sve_cmpge_ppzi_s, int32_t, >=) +DO_CMP_PPZI_D(sve_cmpge_ppzi_d, int64_t, >=) + +DO_CMP_PPZI_B(sve_cmphi_ppzi_b, uint8_t, >) +DO_CMP_PPZI_H(sve_cmphi_ppzi_h, uint16_t, >) +DO_CMP_PPZI_S(sve_cmphi_ppzi_s, uint32_t, >) +DO_CMP_PPZI_D(sve_cmphi_ppzi_d, uint64_t, >) + +DO_CMP_PPZI_B(sve_cmphs_ppzi_b, uint8_t, >=) +DO_CMP_PPZI_H(sve_cmphs_ppzi_h, uint16_t, >=) +DO_CMP_PPZI_S(sve_cmphs_ppzi_s, uint32_t, >=) +DO_CMP_PPZI_D(sve_cmphs_ppzi_d, uint64_t, >=) + +DO_CMP_PPZI_B(sve_cmplt_ppzi_b, int8_t, <) +DO_CMP_PPZI_H(sve_cmplt_ppzi_h, int16_t, <) +DO_CMP_PPZI_S(sve_cmplt_ppzi_s, int32_t, <) +DO_CMP_PPZI_D(sve_cmplt_ppzi_d, int64_t, <) + +DO_CMP_PPZI_B(sve_cmple_ppzi_b, int8_t, <=) +DO_CMP_PPZI_H(sve_cmple_ppzi_h, int16_t, <=) +DO_CMP_PPZI_S(sve_cmple_ppzi_s, int32_t, <=) +DO_CMP_PPZI_D(sve_cmple_ppzi_d, int64_t, <=) + +DO_CMP_PPZI_B(sve_cmplo_ppzi_b, uint8_t, <) +DO_CMP_PPZI_H(sve_cmplo_ppzi_h, uint16_t, <) +DO_CMP_PPZI_S(sve_cmplo_ppzi_s, uint32_t, <) +DO_CMP_PPZI_D(sve_cmplo_ppzi_d, uint64_t, <) + +DO_CMP_PPZI_B(sve_cmpls_ppzi_b, uint8_t, <=) +DO_CMP_PPZI_H(sve_cmpls_ppzi_h, uint16_t, <=) +DO_CMP_PPZI_S(sve_cmpls_ppzi_s, uint32_t, <=) +DO_CMP_PPZI_D(sve_cmpls_ppzi_d, uint64_t, <=) + +#undef DO_CMP_PPZI_B +#undef DO_CMP_PPZI_H +#undef DO_CMP_PPZI_S +#undef DO_CMP_PPZI_D +#undef DO_CMP_PPZI + +/* Similar to the ARM LastActive pseudocode function. */ +static bool last_active_pred(void *vd, void *vg, intptr_t oprsz) +{ + intptr_t i; + + for (i = QEMU_ALIGN_UP(oprsz, 8) - 8; i >= 0; i -= 8) { + uint64_t pg = *(uint64_t *)((char *)vg + i); + if (pg) { + return (pow2floor(pg) & *(uint64_t *)((char *)vd + i)) != 0; + } + } + return 0; +} + +/* Compute a mask into RETB that is true for all G, up to and including + * (if after) or excluding (if !after) the first G & N. + * Return true if BRK found. + */ +static bool compute_brk(uint64_t *retb, uint64_t n, uint64_t g, + bool brk, bool after) +{ + uint64_t b; + + if (brk) { + b = 0; + } else if ((g & n) == 0) { + /* For all G, no N are set; break not found. */ + b = g; + } else { + /* Break somewhere in N. Locate it. */ + b = g & n; /* guard true, pred true */ +#ifdef _MSC_VER + b = b & (0 - b); /* first such */ +#else + b = b & -b; /* first such */ +#endif + if (after) { + b = b | (b - 1); /* break after same */ + } else { + b = b - 1; /* break before same */ + } + brk = true; + } + + *retb = b; + return brk; +} + +/* Compute a zeroing BRK. */ +static void compute_brk_z(uint64_t *d, uint64_t *n, uint64_t *g, + intptr_t oprsz, bool after) +{ + bool brk = false; + intptr_t i; + + for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { + uint64_t this_b, this_g = g[i]; + + brk = compute_brk(&this_b, n[i], this_g, brk, after); + d[i] = this_b & this_g; + } +} + +/* Likewise, but also compute flags. */ +static uint32_t compute_brks_z(uint64_t *d, uint64_t *n, uint64_t *g, + intptr_t oprsz, bool after) +{ + uint32_t flags = PREDTEST_INIT; + bool brk = false; + intptr_t i; + + for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { + uint64_t this_b, this_d, this_g = g[i]; + + brk = compute_brk(&this_b, n[i], this_g, brk, after); + d[i] = this_d = this_b & this_g; + flags = iter_predtest_fwd(this_d, this_g, flags); + } + return flags; +} + +/* Compute a merging BRK. */ +static void compute_brk_m(uint64_t *d, uint64_t *n, uint64_t *g, + intptr_t oprsz, bool after) +{ + bool brk = false; + intptr_t i; + + for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { + uint64_t this_b, this_g = g[i]; + + brk = compute_brk(&this_b, n[i], this_g, brk, after); + d[i] = (this_b & this_g) | (d[i] & ~this_g); + } +} + +/* Likewise, but also compute flags. */ +static uint32_t compute_brks_m(uint64_t *d, uint64_t *n, uint64_t *g, + intptr_t oprsz, bool after) +{ + uint32_t flags = PREDTEST_INIT; + bool brk = false; + intptr_t i; + + for (i = 0; i < oprsz / 8; ++i) { + uint64_t this_b, this_d = d[i], this_g = g[i]; + + brk = compute_brk(&this_b, n[i], this_g, brk, after); + d[i] = this_d = (this_b & this_g) | (this_d & ~this_g); + flags = iter_predtest_fwd(this_d, this_g, flags); + } + return flags; +} + +static uint32_t do_zero(ARMPredicateReg *d, intptr_t oprsz) +{ + /* It is quicker to zero the whole predicate than loop on OPRSZ. + * The compiler should turn this into 4 64-bit integer stores. + */ + memset(d, 0, sizeof(ARMPredicateReg)); + return PREDTEST_INIT; +} + +void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg, + uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + if (last_active_pred(vn, vg, oprsz)) { + compute_brk_z(vd, vm, vg, oprsz, true); + } else { + do_zero(vd, oprsz); + } +} + +uint32_t HELPER(sve_brkpas)(void *vd, void *vn, void *vm, void *vg, + uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + if (last_active_pred(vn, vg, oprsz)) { + return compute_brks_z(vd, vm, vg, oprsz, true); + } else { + return do_zero(vd, oprsz); + } +} + +void HELPER(sve_brkpb)(void *vd, void *vn, void *vm, void *vg, + uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + if (last_active_pred(vn, vg, oprsz)) { + compute_brk_z(vd, vm, vg, oprsz, false); + } else { + do_zero(vd, oprsz); + } +} + +uint32_t HELPER(sve_brkpbs)(void *vd, void *vn, void *vm, void *vg, + uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + if (last_active_pred(vn, vg, oprsz)) { + return compute_brks_z(vd, vm, vg, oprsz, false); + } else { + return do_zero(vd, oprsz); + } +} + +void HELPER(sve_brka_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + compute_brk_z(vd, vn, vg, oprsz, true); +} + +uint32_t HELPER(sve_brkas_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + return compute_brks_z(vd, vn, vg, oprsz, true); +} + +void HELPER(sve_brkb_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + compute_brk_z(vd, vn, vg, oprsz, false); +} + +uint32_t HELPER(sve_brkbs_z)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + return compute_brks_z(vd, vn, vg, oprsz, false); +} + +void HELPER(sve_brka_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + compute_brk_m(vd, vn, vg, oprsz, true); +} + +uint32_t HELPER(sve_brkas_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + return compute_brks_m(vd, vn, vg, oprsz, true); +} + +void HELPER(sve_brkb_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + compute_brk_m(vd, vn, vg, oprsz, false); +} + +uint32_t HELPER(sve_brkbs_m)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + return compute_brks_m(vd, vn, vg, oprsz, false); +} + +void HELPER(sve_brkn)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + + if (!last_active_pred(vn, vg, oprsz)) { + do_zero(vd, oprsz); + } +} + +/* As if PredTest(Ones(PL), D, esz). */ +static uint32_t predtest_ones(ARMPredicateReg *d, intptr_t oprsz, + uint64_t esz_mask) +{ + uint32_t flags = PREDTEST_INIT; + intptr_t i; + + for (i = 0; i < oprsz / 8; i++) { + flags = iter_predtest_fwd(d->p[i], esz_mask, flags); + } + if (oprsz & 7) { + uint64_t mask = ~(0xffffffffffffffffULL << (8 * (oprsz & 7))); + flags = iter_predtest_fwd(d->p[i], esz_mask & mask, flags); + } + return flags; +} + +uint32_t HELPER(sve_brkns)(void *vd, void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + + if (last_active_pred(vn, vg, oprsz)) { + return predtest_ones(vd, oprsz, -1); + } else { + return do_zero(vd, oprsz); + } +} + +uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc) +{ + intptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + uint64_t *n = vn, *g = vg, sum = 0, mask = pred_esz_masks[esz]; + intptr_t i; + + for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) { + uint64_t t = n[i] & g[i] & mask; + sum += ctpop64(t); + } + return sum; +} + +uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc) +{ + uintptr_t oprsz = extract32(pred_desc, 0, SIMD_OPRSZ_BITS) + 2; + intptr_t esz = extract32(pred_desc, SIMD_DATA_SHIFT, 2); + uint64_t esz_mask = pred_esz_masks[esz]; + ARMPredicateReg *d = vd; + uint32_t flags; + intptr_t i; + + /* Begin with a zero predicate register. */ + flags = do_zero(d, oprsz); + if (count == 0) { + return flags; + } + + /* Set all of the requested bits. */ + for (i = 0; i < count / 64; ++i) { + d->p[i] = esz_mask; + } + if (count & 63) { + d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask; + } + + return predtest_ones(d, oprsz, esz_mask); +} + +/* Recursive reduction on a function; + * C.f. the ARM ARM function ReducePredicated. + * + * While it would be possible to write this without the DATA temporary, + * it is much simpler to process the predicate register this way. + * The recursion is bounded to depth 7 (128 fp16 elements), so there's + * little to gain with a more complex non-recursive form. + */ +#define DO_REDUCE(NAME, TYPE, H, FUNC, IDENT) \ +static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \ +{ \ + if (n == 1) { \ + return *data; \ + } else { \ + uintptr_t half = n / 2; \ + TYPE lo = NAME##_reduce(data, status, half); \ + TYPE hi = NAME##_reduce(data + half, status, half); \ + return TYPE##_##FUNC(lo, hi, status); \ + } \ +} \ +uint64_t HELPER(NAME)(void *vn, void *vg, void *vs, uint32_t desc) \ +{ \ + uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_maxsz(desc); \ + TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \ + for (i = 0; i < oprsz; ) { \ + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); \ + do { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)data + i) = (pg & 1 ? nn : IDENT); \ + i += sizeof(TYPE), pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ + for (; i < maxsz; i += sizeof(TYPE)) { \ + *(TYPE *)((char *)data + i) = IDENT; \ + } \ + return NAME##_reduce(data, vs, maxsz / sizeof(TYPE)); \ +} + +DO_REDUCE(sve_faddv_h, float16, H1_2, add, float16_zero) +DO_REDUCE(sve_faddv_s, float32, H1_4, add, float32_zero) +DO_REDUCE(sve_faddv_d, float64, , add, float64_zero) + +/* Identity is floatN_default_nan, without the function call. */ +DO_REDUCE(sve_fminnmv_h, float16, H1_2, minnum, 0x7E00) +DO_REDUCE(sve_fminnmv_s, float32, H1_4, minnum, 0x7FC00000) +DO_REDUCE(sve_fminnmv_d, float64, , minnum, 0x7FF8000000000000ULL) + +DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, maxnum, 0x7E00) +DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, maxnum, 0x7FC00000) +DO_REDUCE(sve_fmaxnmv_d, float64, , maxnum, 0x7FF8000000000000ULL) + +DO_REDUCE(sve_fminv_h, float16, H1_2, min, float16_infinity) +DO_REDUCE(sve_fminv_s, float32, H1_4, min, float32_infinity) +DO_REDUCE(sve_fminv_d, float64, , min, float64_infinity) + +DO_REDUCE(sve_fmaxv_h, float16, H1_2, max, float16_chs(float16_infinity)) +DO_REDUCE(sve_fmaxv_s, float32, H1_4, max, float32_chs(float32_infinity)) +DO_REDUCE(sve_fmaxv_d, float64, , max, float64_chs(float64_infinity)) + +#undef DO_REDUCE + +uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg, + void *status, uint32_t desc) +{ + intptr_t i = 0, opr_sz = simd_oprsz(desc); + float16 result = nn; + + do { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + float16 mm = *(float16 *)((char *)vm + H1_2(i)); + result = float16_add(result, mm, status); + } + i += sizeof(float16), pg >>= sizeof(float16); + } while (i & 15); + } while (i < opr_sz); + + return result; +} + +uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg, + void *status, uint32_t desc) +{ + intptr_t i = 0, opr_sz = simd_oprsz(desc); + float32 result = nn; + + do { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + float32 mm = *(float32 *)((char *)vm + H1_2(i)); + result = float32_add(result, mm, status); + } + i += sizeof(float32), pg >>= sizeof(float32); + } while (i & 15); + } while (i < opr_sz); + + return result; +} + +uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg, + void *status, uint32_t desc) +{ + intptr_t i = 0, opr_sz = simd_oprsz(desc) / 8; + uint64_t *m = vm; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i++) { + if (pg[H1(i)] & 1) { + nn = float64_add(nn, m[i], status); + } + } + + return nn; +} + +/* Fully general three-operand expander, controlled by a predicate, + * With the extra float_status parameter. + */ +#define DO_ZPZZ_FP(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ + void *status, uint32_t desc) \ +{ \ + intptr_t i = simd_oprsz(desc); \ + uint64_t *g = vg; \ + do { \ + uint64_t pg = g[(i - 1) >> 6]; \ + do { \ + i -= sizeof(TYPE); \ + if (likely((pg >> (i & 63)) & 1)) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + TYPE mm = *(TYPE *)((char *)vm + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn, mm, status); \ + } \ + } while (i & 63); \ + } while (i != 0); \ +} + +DO_ZPZZ_FP(sve_fadd_h, uint16_t, H1_2, float16_add) +DO_ZPZZ_FP(sve_fadd_s, uint32_t, H1_4, float32_add) +DO_ZPZZ_FP(sve_fadd_d, uint64_t, , float64_add) + +DO_ZPZZ_FP(sve_fsub_h, uint16_t, H1_2, float16_sub) +DO_ZPZZ_FP(sve_fsub_s, uint32_t, H1_4, float32_sub) +DO_ZPZZ_FP(sve_fsub_d, uint64_t, , float64_sub) + +DO_ZPZZ_FP(sve_fmul_h, uint16_t, H1_2, float16_mul) +DO_ZPZZ_FP(sve_fmul_s, uint32_t, H1_4, float32_mul) +DO_ZPZZ_FP(sve_fmul_d, uint64_t, , float64_mul) + +DO_ZPZZ_FP(sve_fdiv_h, uint16_t, H1_2, float16_div) +DO_ZPZZ_FP(sve_fdiv_s, uint32_t, H1_4, float32_div) +DO_ZPZZ_FP(sve_fdiv_d, uint64_t, , float64_div) + +DO_ZPZZ_FP(sve_fmin_h, uint16_t, H1_2, float16_min) +DO_ZPZZ_FP(sve_fmin_s, uint32_t, H1_4, float32_min) +DO_ZPZZ_FP(sve_fmin_d, uint64_t, , float64_min) + +DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max) +DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max) +DO_ZPZZ_FP(sve_fmax_d, uint64_t, , float64_max) + +DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum) +DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum) +DO_ZPZZ_FP(sve_fminnum_d, uint64_t, , float64_minnum) + +DO_ZPZZ_FP(sve_fmaxnum_h, uint16_t, H1_2, float16_maxnum) +DO_ZPZZ_FP(sve_fmaxnum_s, uint32_t, H1_4, float32_maxnum) +DO_ZPZZ_FP(sve_fmaxnum_d, uint64_t, , float64_maxnum) + +static inline float16 abd_h(float16 a, float16 b, float_status *s) +{ + return float16_abs(float16_sub(a, b, s)); +} + +static inline float32 abd_s(float32 a, float32 b, float_status *s) +{ + return float32_abs(float32_sub(a, b, s)); +} + +static inline float64 abd_d(float64 a, float64 b, float_status *s) +{ + return float64_abs(float64_sub(a, b, s)); +} + +DO_ZPZZ_FP(sve_fabd_h, uint16_t, H1_2, abd_h) +DO_ZPZZ_FP(sve_fabd_s, uint32_t, H1_4, abd_s) +DO_ZPZZ_FP(sve_fabd_d, uint64_t, , abd_d) + +static inline float64 scalbn_d(float64 a, int64_t b, float_status *s) +{ + int b_int = MIN(MAX(b, INT_MIN), INT_MAX); + return float64_scalbn(a, b_int, s); +} + +DO_ZPZZ_FP(sve_fscalbn_h, int16_t, H1_2, float16_scalbn) +DO_ZPZZ_FP(sve_fscalbn_s, int32_t, H1_4, float32_scalbn) +DO_ZPZZ_FP(sve_fscalbn_d, int64_t, , scalbn_d) + +DO_ZPZZ_FP(sve_fmulx_h, uint16_t, H1_2, helper_advsimd_mulxh) +DO_ZPZZ_FP(sve_fmulx_s, uint32_t, H1_4, helper_vfp_mulxs) +DO_ZPZZ_FP(sve_fmulx_d, uint64_t, , helper_vfp_mulxd) + +#undef DO_ZPZZ_FP + +/* Three-operand expander, with one scalar operand, controlled by + * a predicate, with the extra float_status parameter. + */ +#define DO_ZPZS_FP(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, uint64_t scalar, \ + void *status, uint32_t desc) \ +{ \ + intptr_t i = simd_oprsz(desc); \ + uint64_t *g = vg; \ + TYPE mm = scalar; \ + do { \ + uint64_t pg = g[(i - 1) >> 6]; \ + do { \ + i -= sizeof(TYPE); \ + if (likely((pg >> (i & 63)) & 1)) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn, mm, status); \ + } \ + } while (i & 63); \ + } while (i != 0); \ +} + +DO_ZPZS_FP(sve_fadds_h, float16, H1_2, float16_add) +DO_ZPZS_FP(sve_fadds_s, float32, H1_4, float32_add) +DO_ZPZS_FP(sve_fadds_d, float64, , float64_add) + +DO_ZPZS_FP(sve_fsubs_h, float16, H1_2, float16_sub) +DO_ZPZS_FP(sve_fsubs_s, float32, H1_4, float32_sub) +DO_ZPZS_FP(sve_fsubs_d, float64, , float64_sub) + +DO_ZPZS_FP(sve_fmuls_h, float16, H1_2, float16_mul) +DO_ZPZS_FP(sve_fmuls_s, float32, H1_4, float32_mul) +DO_ZPZS_FP(sve_fmuls_d, float64, , float64_mul) + +static inline float16 subr_h(float16 a, float16 b, float_status *s) +{ + return float16_sub(b, a, s); +} + +static inline float32 subr_s(float32 a, float32 b, float_status *s) +{ + return float32_sub(b, a, s); +} + +static inline float64 subr_d(float64 a, float64 b, float_status *s) +{ + return float64_sub(b, a, s); +} + +DO_ZPZS_FP(sve_fsubrs_h, float16, H1_2, subr_h) +DO_ZPZS_FP(sve_fsubrs_s, float32, H1_4, subr_s) +DO_ZPZS_FP(sve_fsubrs_d, float64, , subr_d) + +DO_ZPZS_FP(sve_fmaxnms_h, float16, H1_2, float16_maxnum) +DO_ZPZS_FP(sve_fmaxnms_s, float32, H1_4, float32_maxnum) +DO_ZPZS_FP(sve_fmaxnms_d, float64, , float64_maxnum) + +DO_ZPZS_FP(sve_fminnms_h, float16, H1_2, float16_minnum) +DO_ZPZS_FP(sve_fminnms_s, float32, H1_4, float32_minnum) +DO_ZPZS_FP(sve_fminnms_d, float64, , float64_minnum) + +DO_ZPZS_FP(sve_fmaxs_h, float16, H1_2, float16_max) +DO_ZPZS_FP(sve_fmaxs_s, float32, H1_4, float32_max) +DO_ZPZS_FP(sve_fmaxs_d, float64, , float64_max) + +DO_ZPZS_FP(sve_fmins_h, float16, H1_2, float16_min) +DO_ZPZS_FP(sve_fmins_s, float32, H1_4, float32_min) +DO_ZPZS_FP(sve_fmins_d, float64, , float64_min) + +/* Fully general two-operand expander, controlled by a predicate, + * With the extra float_status parameter. + */ +#define DO_ZPZ_FP(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \ +{ \ + intptr_t i = simd_oprsz(desc); \ + uint64_t *g = vg; \ + do { \ + uint64_t pg = g[(i - 1) >> 6]; \ + do { \ + i -= sizeof(TYPE); \ + if (likely((pg >> (i & 63)) & 1)) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + *(TYPE *)((char *)vd + H(i)) = OP(nn, status); \ + } \ + } while (i & 63); \ + } while (i != 0); \ +} + +/* SVE fp16 conversions always use IEEE mode. Like AdvSIMD, they ignore + * FZ16. When converting from fp16, this affects flushing input denormals; + * when converting to fp16, this affects flushing output denormals. + */ +static inline float32 sve_f16_to_f32(float16 f, float_status *fpst) +{ + flag save = get_flush_inputs_to_zero(fpst); + float32 ret; + + set_flush_inputs_to_zero(false, fpst); + ret = float16_to_float32(f, true, fpst); + set_flush_inputs_to_zero(save, fpst); + return ret; +} + +static inline float64 sve_f16_to_f64(float16 f, float_status *fpst) +{ + flag save = get_flush_inputs_to_zero(fpst); + float64 ret; + + set_flush_inputs_to_zero(false, fpst); + ret = float16_to_float64(f, true, fpst); + set_flush_inputs_to_zero(save, fpst); + return ret; +} + +static inline float16 sve_f32_to_f16(float32 f, float_status *fpst) +{ + flag save = get_flush_to_zero(fpst); + float16 ret; + + set_flush_to_zero(false, fpst); + ret = float32_to_float16(f, true, fpst); + set_flush_to_zero(save, fpst); + return ret; +} + +static inline float16 sve_f64_to_f16(float64 f, float_status *fpst) +{ + flag save = get_flush_to_zero(fpst); + float16 ret; + + set_flush_to_zero(false, fpst); + ret = float64_to_float16(f, true, fpst); + set_flush_to_zero(save, fpst); + return ret; +} + +static inline int16_t vfp_float16_to_int16_rtz(float16 f, float_status *s) +{ + if (float16_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float16_to_int16_round_to_zero(f, s); +} + +static inline int64_t vfp_float16_to_int64_rtz(float16 f, float_status *s) +{ + if (float16_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float16_to_int64_round_to_zero(f, s); +} + +static inline int64_t vfp_float32_to_int64_rtz(float32 f, float_status *s) +{ + if (float32_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float32_to_int64_round_to_zero(f, s); +} + +static inline int64_t vfp_float64_to_int64_rtz(float64 f, float_status *s) +{ + if (float64_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float64_to_int64_round_to_zero(f, s); +} + +static inline uint16_t vfp_float16_to_uint16_rtz(float16 f, float_status *s) +{ + if (float16_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float16_to_uint16_round_to_zero(f, s); +} + +static inline uint64_t vfp_float16_to_uint64_rtz(float16 f, float_status *s) +{ + if (float16_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float16_to_uint64_round_to_zero(f, s); +} + +static inline uint64_t vfp_float32_to_uint64_rtz(float32 f, float_status *s) +{ + if (float32_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float32_to_uint64_round_to_zero(f, s); +} + +static inline uint64_t vfp_float64_to_uint64_rtz(float64 f, float_status *s) +{ + if (float64_is_any_nan(f)) { + float_raise(float_flag_invalid, s); + return 0; + } + return float64_to_uint64_round_to_zero(f, s); +} + +DO_ZPZ_FP(sve_fcvt_sh, uint32_t, H1_4, sve_f32_to_f16) +DO_ZPZ_FP(sve_fcvt_hs, uint32_t, H1_4, sve_f16_to_f32) +DO_ZPZ_FP(sve_fcvt_dh, uint64_t, , sve_f64_to_f16) +DO_ZPZ_FP(sve_fcvt_hd, uint64_t, , sve_f16_to_f64) +DO_ZPZ_FP(sve_fcvt_ds, uint64_t, , float64_to_float32) +DO_ZPZ_FP(sve_fcvt_sd, uint64_t, , float32_to_float64) + +DO_ZPZ_FP(sve_fcvtzs_hh, uint16_t, H1_2, vfp_float16_to_int16_rtz) +DO_ZPZ_FP(sve_fcvtzs_hs, uint32_t, H1_4, helper_vfp_tosizh) +DO_ZPZ_FP(sve_fcvtzs_ss, uint32_t, H1_4, helper_vfp_tosizs) +DO_ZPZ_FP(sve_fcvtzs_hd, uint64_t, , vfp_float16_to_int64_rtz) +DO_ZPZ_FP(sve_fcvtzs_sd, uint64_t, , vfp_float32_to_int64_rtz) +DO_ZPZ_FP(sve_fcvtzs_ds, uint64_t, , helper_vfp_tosizd) +DO_ZPZ_FP(sve_fcvtzs_dd, uint64_t, , vfp_float64_to_int64_rtz) + +DO_ZPZ_FP(sve_fcvtzu_hh, uint16_t, H1_2, vfp_float16_to_uint16_rtz) +DO_ZPZ_FP(sve_fcvtzu_hs, uint32_t, H1_4, helper_vfp_touizh) +DO_ZPZ_FP(sve_fcvtzu_ss, uint32_t, H1_4, helper_vfp_touizs) +DO_ZPZ_FP(sve_fcvtzu_hd, uint64_t, , vfp_float16_to_uint64_rtz) +DO_ZPZ_FP(sve_fcvtzu_sd, uint64_t, , vfp_float32_to_uint64_rtz) +DO_ZPZ_FP(sve_fcvtzu_ds, uint64_t, , helper_vfp_touizd) +DO_ZPZ_FP(sve_fcvtzu_dd, uint64_t, , vfp_float64_to_uint64_rtz) + +DO_ZPZ_FP(sve_frint_h, uint16_t, H1_2, helper_advsimd_rinth) +DO_ZPZ_FP(sve_frint_s, uint32_t, H1_4, helper_rints) +DO_ZPZ_FP(sve_frint_d, uint64_t, , helper_rintd) + +DO_ZPZ_FP(sve_frintx_h, uint16_t, H1_2, float16_round_to_int) +DO_ZPZ_FP(sve_frintx_s, uint32_t, H1_4, float32_round_to_int) +DO_ZPZ_FP(sve_frintx_d, uint64_t, , float64_round_to_int) + +DO_ZPZ_FP(sve_frecpx_h, uint16_t, H1_2, helper_frecpx_f16) +DO_ZPZ_FP(sve_frecpx_s, uint32_t, H1_4, helper_frecpx_f32) +DO_ZPZ_FP(sve_frecpx_d, uint64_t, , helper_frecpx_f64) + +DO_ZPZ_FP(sve_fsqrt_h, uint16_t, H1_2, float16_sqrt) +DO_ZPZ_FP(sve_fsqrt_s, uint32_t, H1_4, float32_sqrt) +DO_ZPZ_FP(sve_fsqrt_d, uint64_t, , float64_sqrt) + +DO_ZPZ_FP(sve_scvt_hh, uint16_t, H1_2, int16_to_float16) +DO_ZPZ_FP(sve_scvt_sh, uint32_t, H1_4, int32_to_float16) +DO_ZPZ_FP(sve_scvt_ss, uint32_t, H1_4, int32_to_float32) +DO_ZPZ_FP(sve_scvt_sd, uint64_t, , int32_to_float64) +DO_ZPZ_FP(sve_scvt_dh, uint64_t, , int64_to_float16) +DO_ZPZ_FP(sve_scvt_ds, uint64_t, , int64_to_float32) +DO_ZPZ_FP(sve_scvt_dd, uint64_t, , int64_to_float64) + +DO_ZPZ_FP(sve_ucvt_hh, uint16_t, H1_2, uint16_to_float16) +DO_ZPZ_FP(sve_ucvt_sh, uint32_t, H1_4, uint32_to_float16) +DO_ZPZ_FP(sve_ucvt_ss, uint32_t, H1_4, uint32_to_float32) +DO_ZPZ_FP(sve_ucvt_sd, uint64_t, , uint32_to_float64) +DO_ZPZ_FP(sve_ucvt_dh, uint64_t, , uint64_to_float16) +DO_ZPZ_FP(sve_ucvt_ds, uint64_t, , uint64_to_float32) +DO_ZPZ_FP(sve_ucvt_dd, uint64_t, , uint64_to_float64) + +#undef DO_ZPZ_FP + +/* 4-operand predicated multiply-add. This requires 7 operands to pass + * "properly", so we need to encode some of the registers into DESC. + */ +QEMU_BUILD_BUG_ON(SIMD_DATA_SHIFT + 20 > 32); + +static void do_fmla_zpzzz_h(CPUARMState *env, void *vg, uint32_t desc, + uint16_t neg1, uint16_t neg3) +{ + intptr_t i = simd_oprsz(desc); + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); + void *vd = &env->vfp.zregs[rd]; + void *vn = &env->vfp.zregs[rn]; + void *vm = &env->vfp.zregs[rm]; + void *va = &env->vfp.zregs[ra]; + uint64_t *g = vg; + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + i -= 2; + if (likely((pg >> (i & 63)) & 1)) { + float16 e1, e2, e3, r; + + e1 = *(uint16_t *)((char *)vn + H1_2(i)) ^ neg1; + e2 = *(uint16_t *)((char *)vm + H1_2(i)); + e3 = *(uint16_t *)((char *)va + H1_2(i)) ^ neg3; + r = float16_muladd(e1, e2, e3, 0, &env->vfp.fp_status_f16); + *(uint16_t *)((char *)vd + H1_2(i)) = r; + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_h(env, vg, desc, 0, 0); +} + +void HELPER(sve_fmls_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_h(env, vg, desc, 0x8000, 0); +} + +void HELPER(sve_fnmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_h(env, vg, desc, 0x8000, 0x8000); +} + +void HELPER(sve_fnmls_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_h(env, vg, desc, 0, 0x8000); +} + +static void do_fmla_zpzzz_s(CPUARMState *env, void *vg, uint32_t desc, + uint32_t neg1, uint32_t neg3) +{ + intptr_t i = simd_oprsz(desc); + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); + void *vd = &env->vfp.zregs[rd]; + void *vn = &env->vfp.zregs[rn]; + void *vm = &env->vfp.zregs[rm]; + void *va = &env->vfp.zregs[ra]; + uint64_t *g = vg; + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + i -= 4; + if (likely((pg >> (i & 63)) & 1)) { + float32 e1, e2, e3, r; + + e1 = *(uint32_t *)((char *)vn + H1_4(i)) ^ neg1; + e2 = *(uint32_t *)((char *)vm + H1_4(i)); + e3 = *(uint32_t *)((char *)va + H1_4(i)) ^ neg3; + r = float32_muladd(e1, e2, e3, 0, &env->vfp.fp_status); + *(uint32_t *)((char *)vd + H1_4(i)) = r; + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_s(env, vg, desc, 0, 0); +} + +void HELPER(sve_fmls_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_s(env, vg, desc, 0x80000000, 0); +} + +void HELPER(sve_fnmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_s(env, vg, desc, 0x80000000, 0x80000000); +} + +void HELPER(sve_fnmls_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_s(env, vg, desc, 0, 0x80000000); +} + +static void do_fmla_zpzzz_d(CPUARMState *env, void *vg, uint32_t desc, + uint64_t neg1, uint64_t neg3) +{ + intptr_t i = simd_oprsz(desc); + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); + void *vd = &env->vfp.zregs[rd]; + void *vn = &env->vfp.zregs[rn]; + void *vm = &env->vfp.zregs[rm]; + void *va = &env->vfp.zregs[ra]; + uint64_t *g = vg; + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + i -= 8; + if (likely((pg >> (i & 63)) & 1)) { + float64 e1, e2, e3, r; + + e1 = *(uint64_t *)((char *)vn + i) ^ neg1; + e2 = *(uint64_t *)((char *)vm + i); + e3 = *(uint64_t *)((char *)va + i) ^ neg3; + r = float64_muladd(e1, e2, e3, 0, &env->vfp.fp_status); + *(uint64_t *)((char *)vd + i) = r; + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_d(env, vg, desc, 0, 0); +} + +void HELPER(sve_fmls_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_d(env, vg, desc, INT64_MIN, 0); +} + +void HELPER(sve_fnmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_d(env, vg, desc, INT64_MIN, INT64_MIN); +} + +void HELPER(sve_fnmls_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) +{ + do_fmla_zpzzz_d(env, vg, desc, 0, INT64_MIN); +} + +/* Two operand floating-point comparison controlled by a predicate. + * Unlike the integer version, we are not allowed to optimistically + * compare operands, since the comparison may have side effects wrt + * the FPSR. + */ +#define DO_FPCMP_PPZZ(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ + void *status, uint32_t desc) \ +{ \ + intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \ + uint64_t *d = vd, *g = vg; \ + do { \ + uint64_t out = 0, pg = g[j]; \ + do { \ + i -= sizeof(TYPE), out <<= sizeof(TYPE); \ + if (likely((pg >> (i & 63)) & 1)) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + TYPE mm = *(TYPE *)((char *)vm + H(i)); \ + out |= OP(TYPE, nn, mm, status); \ + } \ + } while (i & 63); \ + d[j--] = out; \ + } while (i > 0); \ +} + +#define DO_FPCMP_PPZZ_H(NAME, OP) \ + DO_FPCMP_PPZZ(NAME##_h, float16, H1_2, OP) +#define DO_FPCMP_PPZZ_S(NAME, OP) \ + DO_FPCMP_PPZZ(NAME##_s, float32, H1_4, OP) +#define DO_FPCMP_PPZZ_D(NAME, OP) \ + DO_FPCMP_PPZZ(NAME##_d, float64, , OP) + +#define DO_FPCMP_PPZZ_ALL(NAME, OP) \ + DO_FPCMP_PPZZ_H(NAME, OP) \ + DO_FPCMP_PPZZ_S(NAME, OP) \ + DO_FPCMP_PPZZ_D(NAME, OP) + +#define DO_FCMGE(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) <= 0 +#define DO_FCMGT(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) < 0 +#define DO_FCMLE(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) <= 0 +#define DO_FCMLT(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) < 0 +#define DO_FCMEQ(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) == 0 +#define DO_FCMNE(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) != 0 +#define DO_FCMUO(TYPE, X, Y, ST) \ + TYPE##_compare_quiet(X, Y, ST) == float_relation_unordered +#define DO_FACGE(TYPE, X, Y, ST) \ + TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) <= 0 +#define DO_FACGT(TYPE, X, Y, ST) \ + TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) < 0 + +DO_FPCMP_PPZZ_ALL(sve_fcmge, DO_FCMGE) +DO_FPCMP_PPZZ_ALL(sve_fcmgt, DO_FCMGT) +DO_FPCMP_PPZZ_ALL(sve_fcmeq, DO_FCMEQ) +DO_FPCMP_PPZZ_ALL(sve_fcmne, DO_FCMNE) +DO_FPCMP_PPZZ_ALL(sve_fcmuo, DO_FCMUO) +DO_FPCMP_PPZZ_ALL(sve_facge, DO_FACGE) +DO_FPCMP_PPZZ_ALL(sve_facgt, DO_FACGT) + +#undef DO_FPCMP_PPZZ_ALL +#undef DO_FPCMP_PPZZ_D +#undef DO_FPCMP_PPZZ_S +#undef DO_FPCMP_PPZZ_H +#undef DO_FPCMP_PPZZ + +/* One operand floating-point comparison against zero, controlled + * by a predicate. + */ +#define DO_FPCMP_PPZ0(NAME, TYPE, H, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, \ + void *status, uint32_t desc) \ +{ \ + intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \ + uint64_t *d = vd, *g = vg; \ + do { \ + uint64_t out = 0, pg = g[j]; \ + do { \ + i -= sizeof(TYPE), out <<= sizeof(TYPE); \ + if ((pg >> (i & 63)) & 1) { \ + TYPE nn = *(TYPE *)((char *)vn + H(i)); \ + out |= OP(TYPE, nn, 0, status); \ + } \ + } while (i & 63); \ + d[j--] = out; \ + } while (i > 0); \ +} + +#define DO_FPCMP_PPZ0_H(NAME, OP) \ + DO_FPCMP_PPZ0(NAME##_h, float16, H1_2, OP) +#define DO_FPCMP_PPZ0_S(NAME, OP) \ + DO_FPCMP_PPZ0(NAME##_s, float32, H1_4, OP) +#define DO_FPCMP_PPZ0_D(NAME, OP) \ + DO_FPCMP_PPZ0(NAME##_d, float64, , OP) + +#define DO_FPCMP_PPZ0_ALL(NAME, OP) \ + DO_FPCMP_PPZ0_H(NAME, OP) \ + DO_FPCMP_PPZ0_S(NAME, OP) \ + DO_FPCMP_PPZ0_D(NAME, OP) + +DO_FPCMP_PPZ0_ALL(sve_fcmge0, DO_FCMGE) +DO_FPCMP_PPZ0_ALL(sve_fcmgt0, DO_FCMGT) +DO_FPCMP_PPZ0_ALL(sve_fcmle0, DO_FCMLE) +DO_FPCMP_PPZ0_ALL(sve_fcmlt0, DO_FCMLT) +DO_FPCMP_PPZ0_ALL(sve_fcmeq0, DO_FCMEQ) +DO_FPCMP_PPZ0_ALL(sve_fcmne0, DO_FCMNE) + +/* FP Trig Multiply-Add. */ + +void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) +{ + static const float16 coeff[16] = { + 0x3c00, 0xb155, 0x2030, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + }; + intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16); + intptr_t x = simd_data(desc); + float16 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { + float16 mm = m[i]; + intptr_t xx = x; + if (float16_is_neg(mm)) { + mm = float16_abs(mm); + xx += 8; + } + d[i] = float16_muladd(n[i], mm, coeff[xx], 0, vs); + } +} + +void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) +{ + static const float32 coeff[16] = { + 0x3f800000, 0xbe2aaaab, 0x3c088886, 0xb95008b9, + 0x36369d6d, 0x00000000, 0x00000000, 0x00000000, + 0x3f800000, 0xbf000000, 0x3d2aaaa6, 0xbab60705, + 0x37cd37cc, 0x00000000, 0x00000000, 0x00000000, + }; + intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32); + intptr_t x = simd_data(desc); + float32 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { + float32 mm = m[i]; + intptr_t xx = x; + if (float32_is_neg(mm)) { + mm = float32_abs(mm); + xx += 8; + } + d[i] = float32_muladd(n[i], mm, coeff[xx], 0, vs); + } +} + +void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) +{ + static const float64 coeff[16] = { + 0x3ff0000000000000ull, 0xbfc5555555555543ull, + 0x3f8111111110f30cull, 0xbf2a01a019b92fc6ull, + 0x3ec71de351f3d22bull, 0xbe5ae5e2b60f7b91ull, + 0x3de5d8408868552full, 0x0000000000000000ull, + 0x3ff0000000000000ull, 0xbfe0000000000000ull, + 0x3fa5555555555536ull, 0xbf56c16c16c13a0bull, + 0x3efa01a019b1e8d8ull, 0xbe927e4f7282f468ull, + 0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull, + }; + intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64); + intptr_t x = simd_data(desc); + float64 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { + float64 mm = m[i]; + intptr_t xx = x; + if (float64_is_neg(mm)) { + mm = float64_abs(mm); + xx += 8; + } + d[i] = float64_muladd(n[i], mm, coeff[xx], 0, vs); + } +} + +/* + * FP Complex Add + */ + +void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg, + void *vs, uint32_t desc) +{ + intptr_t j, i = simd_oprsz(desc); + uint64_t *g = vg; + float16 neg_imag = float16_set_sign(0, simd_data(desc)); + float16 neg_real = float16_chs(neg_imag); + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + float16 e0, e1, e2, e3; + + /* I holds the real index; J holds the imag index. */ + j = i - sizeof(float16); + i -= 2 * sizeof(float16); + + e0 = *(float16 *)((char *)vn + H1_2(i)); + e1 = *(float16 *)((char *)vm + H1_2(j)) ^ neg_real; + e2 = *(float16 *)((char *)vn + H1_2(j)); + e3 = *(float16 *)((char *)vm + H1_2(i)) ^ neg_imag; + + if (likely((pg >> (i & 63)) & 1)) { + *(float16 *)((char *)vd + H1_2(i)) = float16_add(e0, e1, vs); + } + if (likely((pg >> (j & 63)) & 1)) { + *(float16 *)((char *)vd + H1_2(j)) = float16_add(e2, e3, vs); + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg, + void *vs, uint32_t desc) +{ + intptr_t j, i = simd_oprsz(desc); + uint64_t *g = vg; + float32 neg_imag = float32_set_sign(0, simd_data(desc)); + float32 neg_real = float32_chs(neg_imag); + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + float32 e0, e1, e2, e3; + + /* I holds the real index; J holds the imag index. */ + j = i - sizeof(float32); + i -= 2 * sizeof(float32); + + e0 = *(float32 *)((char *)vn + H1_2(i)); + e1 = *(float32 *)((char *)vm + H1_2(j)) ^ neg_real; + e2 = *(float32 *)((char *)vn + H1_2(j)); + e3 = *(float32 *)((char *)vm + H1_2(i)) ^ neg_imag; + + if (likely((pg >> (i & 63)) & 1)) { + *(float32 *)((char *)vd + H1_2(i)) = float32_add(e0, e1, vs); + } + if (likely((pg >> (j & 63)) & 1)) { + *(float32 *)((char *)vd + H1_2(j)) = float32_add(e2, e3, vs); + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg, + void *vs, uint32_t desc) +{ + intptr_t j, i = simd_oprsz(desc); + uint64_t *g = vg; + float64 neg_imag = float64_set_sign(0, simd_data(desc)); + float64 neg_real = float64_chs(neg_imag); + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + float64 e0, e1, e2, e3; + + /* I holds the real index; J holds the imag index. */ + j = i - sizeof(float64); + i -= 2 * sizeof(float64); + + e0 = *(float64 *)((char *)vn + H1_2(i)); + e1 = *(float64 *)((char *)vm + H1_2(j)) ^ neg_real; + e2 = *(float64 *)((char *)vn + H1_2(j)); + e3 = *(float64 *)((char *)vm + H1_2(i)) ^ neg_imag; + + if (likely((pg >> (i & 63)) & 1)) { + *(float64 *)((char *)vd + H1_2(i)) = float64_add(e0, e1, vs); + } + if (likely((pg >> (j & 63)) & 1)) { + *(float64 *)((char *)vd + H1_2(j)) = float64_add(e2, e3, vs); + } + } while (i & 63); + } while (i != 0); +} + +/* + * FP Complex Multiply + */ + +QEMU_BUILD_BUG_ON(SIMD_DATA_SHIFT + 22 > 32); + +void HELPER(sve_fcmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc) +{ + intptr_t j, i = simd_oprsz(desc); + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); + unsigned rot = extract32(desc, SIMD_DATA_SHIFT + 20, 2); + bool flip = rot & 1; + float16 neg_imag, neg_real; + void *vd = &env->vfp.zregs[rd]; + void *vn = &env->vfp.zregs[rn]; + void *vm = &env->vfp.zregs[rm]; + void *va = &env->vfp.zregs[ra]; + uint64_t *g = vg; + + neg_imag = float16_set_sign(0, (rot & 2) != 0); + neg_real = float16_set_sign(0, rot == 1 || rot == 2); + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + float16 e1, e2, e3, e4, nr, ni, mr, mi, d; + + /* I holds the real index; J holds the imag index. */ + j = i - sizeof(float16); + i -= 2 * sizeof(float16); + + nr = *(float16 *)((char *)vn + H1_2(i)); + ni = *(float16 *)((char *)vn + H1_2(j)); + mr = *(float16 *)((char *)vm + H1_2(i)); + mi = *(float16 *)((char *)vm + H1_2(j)); + + e2 = (flip ? ni : nr); + e1 = (flip ? mi : mr) ^ neg_real; + e4 = e2; + e3 = (flip ? mr : mi) ^ neg_imag; + + if (likely((pg >> (i & 63)) & 1)) { + d = *(float16 *)((char *)va + H1_2(i)); + d = float16_muladd(e2, e1, d, 0, &env->vfp.fp_status_f16); + *(float16 *)((char *)vd + H1_2(i)) = d; + } + if (likely((pg >> (j & 63)) & 1)) { + d = *(float16 *)((char *)va + H1_2(j)); + d = float16_muladd(e4, e3, d, 0, &env->vfp.fp_status_f16); + *(float16 *)((char *)vd + H1_2(j)) = d; + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fcmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc) +{ + intptr_t j, i = simd_oprsz(desc); + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); + unsigned rot = extract32(desc, SIMD_DATA_SHIFT + 20, 2); + bool flip = rot & 1; + float32 neg_imag, neg_real; + void *vd = &env->vfp.zregs[rd]; + void *vn = &env->vfp.zregs[rn]; + void *vm = &env->vfp.zregs[rm]; + void *va = &env->vfp.zregs[ra]; + uint64_t *g = vg; + + neg_imag = float32_set_sign(0, (rot & 2) != 0); + neg_real = float32_set_sign(0, rot == 1 || rot == 2); + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + float32 e1, e2, e3, e4, nr, ni, mr, mi, d; + + /* I holds the real index; J holds the imag index. */ + j = i - sizeof(float32); + i -= 2 * sizeof(float32); + + nr = *(float32 *)((char *)vn + H1_2(i)); + ni = *(float32 *)((char *)vn + H1_2(j)); + mr = *(float32 *)((char *)vm + H1_2(i)); + mi = *(float32 *)((char *)vm + H1_2(j)); + + e2 = (flip ? ni : nr); + e1 = (flip ? mi : mr) ^ neg_real; + e4 = e2; + e3 = (flip ? mr : mi) ^ neg_imag; + + if (likely((pg >> (i & 63)) & 1)) { + d = *(float32 *)((char *)va + H1_2(i)); + d = float32_muladd(e2, e1, d, 0, &env->vfp.fp_status); + *(float32 *)((char *)vd + H1_2(i)) = d; + } + if (likely((pg >> (j & 63)) & 1)) { + d = *(float32 *)((char *)va + H1_2(j)); + d = float32_muladd(e4, e3, d, 0, &env->vfp.fp_status); + *(float32 *)((char *)vd + H1_2(j)) = d; + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fcmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) +{ + intptr_t j, i = simd_oprsz(desc); + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5); + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5); + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5); + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5); + unsigned rot = extract32(desc, SIMD_DATA_SHIFT + 20, 2); + bool flip = rot & 1; + float64 neg_imag, neg_real; + void *vd = &env->vfp.zregs[rd]; + void *vn = &env->vfp.zregs[rn]; + void *vm = &env->vfp.zregs[rm]; + void *va = &env->vfp.zregs[ra]; + uint64_t *g = vg; + + neg_imag = float64_set_sign(0, (rot & 2) != 0); + neg_real = float64_set_sign(0, rot == 1 || rot == 2); + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + float64 e1, e2, e3, e4, nr, ni, mr, mi, d; + + /* I holds the real index; J holds the imag index. */ + j = i - sizeof(float64); + i -= 2 * sizeof(float64); + + nr = *(float64 *)((char *)vn + H1_2(i)); + ni = *(float64 *)((char *)vn + H1_2(j)); + mr = *(float64 *)((char *)vm + H1_2(i)); + mi = *(float64 *)((char *)vm + H1_2(j)); + + e2 = (flip ? ni : nr); + e1 = (flip ? mi : mr) ^ neg_real; + e4 = e2; + e3 = (flip ? mr : mi) ^ neg_imag; + + if (likely((pg >> (i & 63)) & 1)) { + d = *(float64 *)((char *)va + H1_2(i)); + d = float64_muladd(e2, e1, d, 0, &env->vfp.fp_status); + *(float64 *)((char *)vd + H1_2(i)) = d; + } + if (likely((pg >> (j & 63)) & 1)) { + d = *(float64 *)((char *)va + H1_2(j)); + d = float64_muladd(e4, e3, d, 0, &env->vfp.fp_status); + *(float64 *)((char *)vd + H1_2(j)) = d; + } + } while (i & 63); + } while (i != 0); +} + +/* + * Load contiguous data, protected by a governing predicate. + */ + +/* + * Load elements into @vd, controlled by @vg, from @host + @mem_ofs. + * Memory is valid through @host + @mem_max. The register element + * indicies are inferred from @mem_ofs, as modified by the types for + * which the helper is built. Return the @mem_ofs of the first element + * not loaded (which is @mem_max if they are all loaded). + * + * For softmmu, we have fully validated the guest page. For user-only, + * we cannot fully validate without taking the mmap lock, but since we + * know the access is within one host page, if any access is valid they + * all must be valid. However, when @vg is all false, it may be that + * no access is valid. + */ +typedef intptr_t sve_ld1_host_fn(void *vd, void *vg, void *host, + intptr_t mem_ofs, intptr_t mem_max); + +/* + * Load one element into @vd + @reg_off from (@env, @vaddr, @ra). + * The controlling predicate is known to be true. + */ +typedef void sve_ld1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off, + target_ulong vaddr, TCGMemOpIdx oi, uintptr_t ra); +typedef sve_ld1_tlb_fn sve_st1_tlb_fn; + +/* + * Generate the above primitives. + */ + +#define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \ +static intptr_t sve_##NAME##_host(void *vd, void *vg, void *host, \ + intptr_t mem_off, const intptr_t mem_max) \ +{ \ + intptr_t reg_off = mem_off * (sizeof(TYPEE) / sizeof(TYPEM)); \ + uint64_t *pg = vg; \ + while (mem_off + sizeof(TYPEM) <= mem_max) { \ + TYPEM val = 0; \ + if (likely((pg[reg_off >> 6] >> (reg_off & 63)) & 1)) { \ + val = HOST((char *)host + mem_off); \ + } \ + *(TYPEE *)((char *)vd + H(reg_off)) = val; \ + mem_off += sizeof(TYPEM), reg_off += sizeof(TYPEE); \ + } \ + return mem_off; \ +} + +#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) \ +static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \ + target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \ +{ \ + TYPEM val = TLB(env, addr, oi, ra); \ + *(TYPEE *)((char *)vd + H(reg_off)) = val; \ +} + +#define DO_LD_PRIM_1(NAME, H, TE, TM) \ + DO_LD_HOST(NAME, H, TE, TM, ldub_p) \ + DO_LD_TLB(NAME, H, TE, TM, ldub_p, 0, helper_ret_ldub_mmu) + +DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t) +DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t) +DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t) +DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t) +DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t) +DO_LD_PRIM_1(ld1bdu, , uint64_t, uint8_t) +DO_LD_PRIM_1(ld1bds, , uint64_t, int8_t) + +#define DO_LD_PRIM_2(NAME, end, MOEND, H, TE, TM, PH, PT) \ + DO_LD_HOST(NAME##_##end, H, TE, TM, PH##_##end##_p) \ + DO_LD_TLB(NAME##_##end, H, TE, TM, PH##_##end##_p, \ + MOEND, helper_##end##_##PT##_mmu) + +DO_LD_PRIM_2(ld1hh, le, MO_LE, H1_2, uint16_t, uint16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hsu, le, MO_LE, H1_4, uint32_t, uint16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hss, le, MO_LE, H1_4, uint32_t, int16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hdu, le, MO_LE, , uint64_t, uint16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hds, le, MO_LE, , uint64_t, int16_t, lduw, lduw) + +DO_LD_PRIM_2(ld1ss, le, MO_LE, H1_4, uint32_t, uint32_t, ldl, ldul) +DO_LD_PRIM_2(ld1sdu, le, MO_LE, , uint64_t, uint32_t, ldl, ldul) +DO_LD_PRIM_2(ld1sds, le, MO_LE, , uint64_t, int32_t, ldl, ldul) + +DO_LD_PRIM_2(ld1dd, le, MO_LE, , uint64_t, uint64_t, ldq, ldq) + +DO_LD_PRIM_2(ld1hh, be, MO_BE, H1_2, uint16_t, uint16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hsu, be, MO_BE, H1_4, uint32_t, uint16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hss, be, MO_BE, H1_4, uint32_t, int16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hdu, be, MO_BE, , uint64_t, uint16_t, lduw, lduw) +DO_LD_PRIM_2(ld1hds, be, MO_BE, , uint64_t, int16_t, lduw, lduw) + +DO_LD_PRIM_2(ld1ss, be, MO_BE, H1_4, uint32_t, uint32_t, ldl, ldul) +DO_LD_PRIM_2(ld1sdu, be, MO_BE, , uint64_t, uint32_t, ldl, ldul) +DO_LD_PRIM_2(ld1sds, be, MO_BE, , uint64_t, int32_t, ldl, ldul) + +DO_LD_PRIM_2(ld1dd, be, MO_BE, , uint64_t, uint64_t, ldq, ldq) + +#undef DO_LD_TLB +#undef DO_LD_HOST +#undef DO_LD_PRIM_1 +#undef DO_LD_PRIM_2 + +/* + * Skip through a sequence of inactive elements in the guarding predicate @vg, + * beginning at @reg_off bounded by @reg_max. Return the offset of the active + * element >= @reg_off, or @reg_max if there were no active elements at all. + */ +static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off, + intptr_t reg_max, int esz) +{ + uint64_t pg_mask = pred_esz_masks[esz]; + uint64_t pg = (vg[reg_off >> 6] & pg_mask) >> (reg_off & 63); + + /* In normal usage, the first element is active. */ + if (likely(pg & 1)) { + return reg_off; + } + + if (pg == 0) { + reg_off &= -64; + do { + reg_off += 64; + if (unlikely(reg_off >= reg_max)) { + /* The entire predicate was false. */ + return reg_max; + } + pg = vg[reg_off >> 6] & pg_mask; + } while (pg == 0); + } + reg_off += ctz64(pg); + + /* We should never see an out of range predicate bit set. */ + tcg_debug_assert(reg_off < reg_max); + return reg_off; +} + +/* + * Return the maximum offset <= @mem_max which is still within the page + * referenced by @base + @mem_off. + */ +static intptr_t max_for_page(struct uc_struct *uc, target_ulong base, intptr_t mem_off, + intptr_t mem_max) +{ + target_ulong addr = base + mem_off; + intptr_t split = -(intptr_t)(addr | TARGET_PAGE_MASK); + return MIN(split, mem_max - mem_off) + mem_off; +} + +/* These are normally defined only for CONFIG_USER_ONLY in */ +static inline void set_helper_retaddr(uintptr_t ra) { } +static inline void clear_helper_retaddr(void) { } + +/* + * The result of tlb_vaddr_to_host for user-only is just g2h(x), + * which is always non-null. Elide the useless test. + */ +static inline bool test_host_page(void *host) +{ + return likely(host != NULL); +} + +/* + * Common helper for all contiguous one-register predicated loads. + */ +static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, + uint32_t desc, const uintptr_t retaddr, + const int esz, const int msz, + sve_ld1_host_fn *host_fn, + sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int mmu_idx = get_mmuidx(oi); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + void *vd = &env->vfp.zregs[rd]; + const int diffsz = esz - msz; + const intptr_t reg_max = simd_oprsz(desc); + const intptr_t mem_max = reg_max >> diffsz; + ARMVectorReg scratch; + void *host; + intptr_t split, reg_off, mem_off; + + /* Find the first active element. */ + reg_off = find_next_active(vg, 0, reg_max, esz); + if (unlikely(reg_off == reg_max)) { + /* The entire predicate was false; no load occurs. */ + memset(vd, 0, reg_max); + return; + } + mem_off = reg_off >> diffsz; + set_helper_retaddr(retaddr); + + /* + * If the (remaining) load is entirely within a single page, then: + * For softmmu, and the tlb hits, then no faults will occur; + * For user-only, either the first load will fault or none will. + * We can thus perform the load directly to the destination and + * Vd will be unmodified on any exception path. + */ + split = max_for_page(env->uc, addr, mem_off, mem_max); + if (likely(split == mem_max)) { + host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); + if (test_host_page(host)) { + mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, mem_max); + tcg_debug_assert(mem_off == mem_max); + clear_helper_retaddr(); + /* After having taken any fault, zero leading inactive elements. */ + swap_memzero(vd, reg_off); + return; + } + } + + /* + * Perform the predicated read into a temporary, thus ensuring + * if the load of the last element faults, Vd is not modified. + */ + memset(&scratch, 0, reg_max); + goto start; + while (1) { + reg_off = find_next_active(vg, reg_off, reg_max, esz); + if (reg_off >= reg_max) { + break; + } + mem_off = reg_off >> diffsz; + split = max_for_page(env->uc, addr, mem_off, mem_max); + + start: + if (split - mem_off >= (1ULL << msz)) { + /* At least one whole element on this page. */ + host = tlb_vaddr_to_host(env, addr + mem_off, + MMU_DATA_LOAD, mmu_idx); + if (host) { + mem_off = host_fn(&scratch, vg, (char *)host - mem_off, + mem_off, split); + reg_off = mem_off << diffsz; + continue; + } + } + + /* + * Perform one normal read. This may fault, longjmping out to the + * main loop in order to raise an exception. It may succeed, and + * as a side-effect load the TLB entry for the next round. Finally, + * in the extremely unlikely case we're performing this operation + * on I/O memory, it may succeed but not bring in the TLB entry. + * But even then we have still made forward progress. + */ + tlb_fn(env, &scratch, reg_off, addr + mem_off, oi, retaddr); + reg_off += 1ULL << esz; + } + + clear_helper_retaddr(); + memcpy(vd, &scratch, reg_max); +} + +#define DO_LD1_1(NAME, ESZ) \ +void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \ + sve_##NAME##_host, sve_##NAME##_tlb); \ +} + +#define DO_LD1_2(NAME, ESZ, MSZ) \ +void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ + sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ +} \ +void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ + sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ +} + +DO_LD1_1(ld1bb, 0) +DO_LD1_1(ld1bhu, 1) +DO_LD1_1(ld1bhs, 1) +DO_LD1_1(ld1bsu, 2) +DO_LD1_1(ld1bss, 2) +DO_LD1_1(ld1bdu, 3) +DO_LD1_1(ld1bds, 3) + +DO_LD1_2(ld1hh, 1, 1) +DO_LD1_2(ld1hsu, 2, 1) +DO_LD1_2(ld1hss, 2, 1) +DO_LD1_2(ld1hdu, 3, 1) +DO_LD1_2(ld1hds, 3, 1) + +DO_LD1_2(ld1ss, 2, 2) +DO_LD1_2(ld1sdu, 3, 2) +DO_LD1_2(ld1sds, 3, 2) + +DO_LD1_2(ld1dd, 3, 3) + +#undef DO_LD1_1 +#undef DO_LD1_2 + +/* + * Common helpers for all contiguous 2,3,4-register predicated loads. + */ +static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr, + uint32_t desc, int size, uintptr_t ra, + sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + intptr_t i, oprsz = simd_oprsz(desc); + ARMVectorReg scratch[2] = { 0 }; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + tlb_fn(env, &scratch[0], i, addr, oi, ra); + tlb_fn(env, &scratch[1], i, addr + size, oi, ra); + } + i += size, pg >>= size; + addr += 2 * size; + } while (i & 15); + } + clear_helper_retaddr(); + + /* Wait until all exceptions have been raised to write back. */ + memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); + memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz); +} + +static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr, + uint32_t desc, int size, uintptr_t ra, + sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + intptr_t i, oprsz = simd_oprsz(desc); + ARMVectorReg scratch[3] = { 0 }; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + tlb_fn(env, &scratch[0], i, addr, oi, ra); + tlb_fn(env, &scratch[1], i, addr + size, oi, ra); + tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra); + } + i += size, pg >>= size; + addr += 3 * size; + } while (i & 15); + } + clear_helper_retaddr(); + + /* Wait until all exceptions have been raised to write back. */ + memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); + memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz); + memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz); +} + +static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr, + uint32_t desc, int size, uintptr_t ra, + sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + intptr_t i, oprsz = simd_oprsz(desc); + ARMVectorReg scratch[4] = { 0 }; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + tlb_fn(env, &scratch[0], i, addr, oi, ra); + tlb_fn(env, &scratch[1], i, addr + size, oi, ra); + tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra); + tlb_fn(env, &scratch[3], i, addr + 3 * size, oi, ra); + } + i += size, pg >>= size; + addr += 4 * size; + } while (i & 15); + } + clear_helper_retaddr(); + + /* Wait until all exceptions have been raised to write back. */ + memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); + memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz); + memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz); + memcpy(&env->vfp.zregs[(rd + 3) & 31], &scratch[3], oprsz); +} + +#define DO_LDN_1(N) \ +void QEMU_FLATTEN HELPER(sve_ld##N##bb_r) \ + (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ +{ \ + sve_ld##N##_r(env, vg, addr, desc, 1, GETPC(), sve_ld1bb_tlb); \ +} + +#define DO_LDN_2(N, SUFF, SIZE) \ +void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_le_r) \ + (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ +{ \ + sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \ + sve_ld1##SUFF##_le_tlb); \ +} \ +void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_be_r) \ + (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ +{ \ + sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \ + sve_ld1##SUFF##_be_tlb); \ +} + +DO_LDN_1(2) +DO_LDN_1(3) +DO_LDN_1(4) + +DO_LDN_2(2, hh, 2) +DO_LDN_2(3, hh, 2) +DO_LDN_2(4, hh, 2) + +DO_LDN_2(2, ss, 4) +DO_LDN_2(3, ss, 4) +DO_LDN_2(4, ss, 4) + +DO_LDN_2(2, dd, 8) +DO_LDN_2(3, dd, 8) +DO_LDN_2(4, dd, 8) + +#undef DO_LDN_1 +#undef DO_LDN_2 + +/* + * Load contiguous data, first-fault and no-fault. + * + * For user-only, one could argue that we should hold the mmap_lock during + * the operation so that there is no race between page_check_range and the + * load operation. However, unmapping pages out from under a running thread + * is extraordinarily unlikely. This theoretical race condition also affects + * linux-user/ in its get_user/put_user macros. + * + * TODO: Construct some helpers, written in assembly, that interact with + * handle_cpu_signal to produce memory ops which can properly report errors + * without racing. + */ + +/* Fault on byte I. All bits in FFR from I are cleared. The vector + * result from I is CONSTRAINED UNPREDICTABLE; we choose the MERGE + * option, which leaves subsequent data unchanged. + */ +static void record_fault(CPUARMState *env, uintptr_t i, uintptr_t oprsz) +{ + uint64_t *ffr = env->vfp.pregs[FFR_PRED_NUM].p; + + if (i & 63) { + ffr[i / 64] &= MAKE_64BIT_MASK(0, i & 63); + i = ROUND_UP(i, 64); + } + for (; i < oprsz; i += 64) { + ffr[i / 64] = 0; + } +} + +/* + * Common helper for all contiguous first-fault loads. + */ +static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, + uint32_t desc, const uintptr_t retaddr, + const int esz, const int msz, + sve_ld1_host_fn *host_fn, + sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int mmu_idx = get_mmuidx(oi); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + void *vd = &env->vfp.zregs[rd]; + const int diffsz = esz - msz; + const intptr_t reg_max = simd_oprsz(desc); + const intptr_t mem_max = reg_max >> diffsz; + intptr_t split, reg_off, mem_off; + void *host; + + /* Skip to the first active element. */ + reg_off = find_next_active(vg, 0, reg_max, esz); + if (unlikely(reg_off == reg_max)) { + /* The entire predicate was false; no load occurs. */ + memset(vd, 0, reg_max); + return; + } + mem_off = reg_off >> diffsz; + set_helper_retaddr(retaddr); + + /* + * If the (remaining) load is entirely within a single page, then: + * For softmmu, and the tlb hits, then no faults will occur; + * For user-only, either the first load will fault or none will. + * We can thus perform the load directly to the destination and + * Vd will be unmodified on any exception path. + */ + split = max_for_page(env->uc, addr, mem_off, mem_max); + if (likely(split == mem_max)) { + host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); + if (test_host_page(host)) { + mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, mem_max); + tcg_debug_assert(mem_off == mem_max); + clear_helper_retaddr(); + /* After any fault, zero any leading inactive elements. */ + swap_memzero(vd, reg_off); + return; + } + } + + /* + * Perform one normal read, which will fault or not. + * But it is likely to bring the page into the tlb. + */ + tlb_fn(env, vd, reg_off, addr + mem_off, oi, retaddr); + + /* After any fault, zero any leading predicated false elts. */ + swap_memzero(vd, reg_off); + mem_off += 1ULL << msz; + reg_off += 1ULL << esz; + + /* Try again to read the balance of the page. */ + split = max_for_page(env->uc, addr, mem_off - 1, mem_max); + if (split >= (1ULL << msz)) { + host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); + if (host) { + mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, split); + reg_off = mem_off << diffsz; + } + } + + clear_helper_retaddr(); + record_fault(env, reg_off, reg_max); +} + +/* + * Common helper for all contiguous no-fault loads. + */ +static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr, + uint32_t desc, const int esz, const int msz, + sve_ld1_host_fn *host_fn) +{ + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + void *vd = &env->vfp.zregs[rd]; + const int diffsz = esz - msz; + const intptr_t reg_max = simd_oprsz(desc); + const intptr_t mem_max = reg_max >> diffsz; + const int mmu_idx = cpu_mmu_index(env, false); + intptr_t split, reg_off, mem_off; + void *host; + + /* There will be no fault, so we may modify in advance. */ + memset(vd, 0, reg_max); + + /* Skip to the first active element. */ + reg_off = find_next_active(vg, 0, reg_max, esz); + if (unlikely(reg_off == reg_max)) { + /* The entire predicate was false; no load occurs. */ + return; + } + mem_off = reg_off >> diffsz; + + /* + * If the address is not in the TLB, we have no way to bring the + * entry into the TLB without also risking a fault. Note that + * the corollary is that we never load from an address not in RAM. + * + * This last is out of spec, in a weird corner case. + * Per the MemNF/MemSingleNF pseudocode, a NF load from Device memory + * must not actually hit the bus -- it returns UNKNOWN data instead. + * But if you map non-RAM with Normal memory attributes and do a NF + * load then it should access the bus. (Nobody ought actually do this + * in the real world, obviously.) + * + * Then there are the annoying special cases with watchpoints... + * TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true). + */ + host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx); + split = max_for_page(env->uc, addr, mem_off, mem_max); + if (host && split >= (1ULL << msz)) { + mem_off = host_fn(vd, vg, (char *)host - mem_off, mem_off, split); + reg_off = mem_off << diffsz; + } + + record_fault(env, reg_off, reg_max); +} + +#define DO_LDFF1_LDNF1_1(PART, ESZ) \ +void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \ + sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ +} \ +void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ldnf1_r(env, vg, addr, desc, ESZ, 0, sve_ld1##PART##_host); \ +} + +#define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \ +void HELPER(sve_ldff1##PART##_le_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ + sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ +} \ +void HELPER(sve_ldnf1##PART##_le_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, sve_ld1##PART##_le_host); \ +} \ +void HELPER(sve_ldff1##PART##_be_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ + sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ +} \ +void HELPER(sve_ldnf1##PART##_be_r)(CPUARMState *env, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, sve_ld1##PART##_be_host); \ +} + +DO_LDFF1_LDNF1_1(bb, 0) +DO_LDFF1_LDNF1_1(bhu, 1) +DO_LDFF1_LDNF1_1(bhs, 1) +DO_LDFF1_LDNF1_1(bsu, 2) +DO_LDFF1_LDNF1_1(bss, 2) +DO_LDFF1_LDNF1_1(bdu, 3) +DO_LDFF1_LDNF1_1(bds, 3) + +DO_LDFF1_LDNF1_2(hh, 1, 1) +DO_LDFF1_LDNF1_2(hsu, 2, 1) +DO_LDFF1_LDNF1_2(hss, 2, 1) +DO_LDFF1_LDNF1_2(hdu, 3, 1) +DO_LDFF1_LDNF1_2(hds, 3, 1) + +DO_LDFF1_LDNF1_2(ss, 2, 2) +DO_LDFF1_LDNF1_2(sdu, 3, 2) +DO_LDFF1_LDNF1_2(sds, 3, 2) + +DO_LDFF1_LDNF1_2(dd, 3, 3) + +#undef DO_LDFF1_LDNF1_1 +#undef DO_LDFF1_LDNF1_2 + +/* + * Store contiguous data, protected by a governing predicate. + */ + +#define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \ +static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \ + target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \ +{ \ + TLB(env, addr, *(TYPEM *)((char *)vd + H(reg_off)), oi, ra); \ +} + +DO_ST_TLB(st1bb, H1, uint8_t, stb_p, 0, helper_ret_stb_mmu) +DO_ST_TLB(st1bh, H1_2, uint16_t, stb_p, 0, helper_ret_stb_mmu) +DO_ST_TLB(st1bs, H1_4, uint32_t, stb_p, 0, helper_ret_stb_mmu) +DO_ST_TLB(st1bd, , uint64_t, stb_p, 0, helper_ret_stb_mmu) + +DO_ST_TLB(st1hh_le, H1_2, uint16_t, stw_le_p, MO_LE, helper_le_stw_mmu) +DO_ST_TLB(st1hs_le, H1_4, uint32_t, stw_le_p, MO_LE, helper_le_stw_mmu) +DO_ST_TLB(st1hd_le, , uint64_t, stw_le_p, MO_LE, helper_le_stw_mmu) + +DO_ST_TLB(st1ss_le, H1_4, uint32_t, stl_le_p, MO_LE, helper_le_stl_mmu) +DO_ST_TLB(st1sd_le, , uint64_t, stl_le_p, MO_LE, helper_le_stl_mmu) + +DO_ST_TLB(st1dd_le, , uint64_t, stq_le_p, MO_LE, helper_le_stq_mmu) + +DO_ST_TLB(st1hh_be, H1_2, uint16_t, stw_be_p, MO_BE, helper_be_stw_mmu) +DO_ST_TLB(st1hs_be, H1_4, uint32_t, stw_be_p, MO_BE, helper_be_stw_mmu) +DO_ST_TLB(st1hd_be, , uint64_t, stw_be_p, MO_BE, helper_be_stw_mmu) + +DO_ST_TLB(st1ss_be, H1_4, uint32_t, stl_be_p, MO_BE, helper_be_stl_mmu) +DO_ST_TLB(st1sd_be, , uint64_t, stl_be_p, MO_BE, helper_be_stl_mmu) + +DO_ST_TLB(st1dd_be, , uint64_t, stq_be_p, MO_BE, helper_be_stq_mmu) + +#undef DO_ST_TLB + +/* + * Common helpers for all contiguous 1,2,3,4-register predicated stores. + */ +static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr, + uint32_t desc, const uintptr_t ra, + const int esize, const int msize, + sve_st1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + intptr_t i, oprsz = simd_oprsz(desc); + void *vd = &env->vfp.zregs[rd]; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + tlb_fn(env, vd, i, addr, oi, ra); + } + i += esize, pg >>= esize; + addr += msize; + } while (i & 15); + } + clear_helper_retaddr(); +} + +static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, + uint32_t desc, const uintptr_t ra, + const int esize, const int msize, + sve_st1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + intptr_t i, oprsz = simd_oprsz(desc); + void *d1 = &env->vfp.zregs[rd]; + void *d2 = &env->vfp.zregs[(rd + 1) & 31]; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + tlb_fn(env, d1, i, addr, oi, ra); + tlb_fn(env, d2, i, addr + msize, oi, ra); + } + i += esize, pg >>= esize; + addr += 2 * msize; + } while (i & 15); + } + clear_helper_retaddr(); +} + +static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, + uint32_t desc, const uintptr_t ra, + const int esize, const int msize, + sve_st1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + intptr_t i, oprsz = simd_oprsz(desc); + void *d1 = &env->vfp.zregs[rd]; + void *d2 = &env->vfp.zregs[(rd + 1) & 31]; + void *d3 = &env->vfp.zregs[(rd + 2) & 31]; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + tlb_fn(env, d1, i, addr, oi, ra); + tlb_fn(env, d2, i, addr + msize, oi, ra); + tlb_fn(env, d3, i, addr + 2 * msize, oi, ra); + } + i += esize, pg >>= esize; + addr += 3 * msize; + } while (i & 15); + } + clear_helper_retaddr(); +} + +static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, + uint32_t desc, const uintptr_t ra, + const int esize, const int msize, + sve_st1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); + intptr_t i, oprsz = simd_oprsz(desc); + void *d1 = &env->vfp.zregs[rd]; + void *d2 = &env->vfp.zregs[(rd + 1) & 31]; + void *d3 = &env->vfp.zregs[(rd + 2) & 31]; + void *d4 = &env->vfp.zregs[(rd + 3) & 31]; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (pg & 1) { + tlb_fn(env, d1, i, addr, oi, ra); + tlb_fn(env, d2, i, addr + msize, oi, ra); + tlb_fn(env, d3, i, addr + 2 * msize, oi, ra); + tlb_fn(env, d4, i, addr + 3 * msize, oi, ra); + } + i += esize, pg >>= esize; + addr += 4 * msize; + } while (i & 15); + } + clear_helper_retaddr(); +} + +#define DO_STN_1(N, NAME, ESIZE) \ +void QEMU_FLATTEN HELPER(sve_st##N##NAME##_r) \ + (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ +{ \ + sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, 1, \ + sve_st1##NAME##_tlb); \ +} + +#define DO_STN_2(N, NAME, ESIZE, MSIZE) \ +void QEMU_FLATTEN HELPER(sve_st##N##NAME##_le_r) \ + (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ +{ \ + sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, MSIZE, \ + sve_st1##NAME##_le_tlb); \ +} \ +void QEMU_FLATTEN HELPER(sve_st##N##NAME##_be_r) \ + (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ +{ \ + sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, MSIZE, \ + sve_st1##NAME##_be_tlb); \ +} + +DO_STN_1(1, bb, 1) +DO_STN_1(1, bh, 2) +DO_STN_1(1, bs, 4) +DO_STN_1(1, bd, 8) +DO_STN_1(2, bb, 1) +DO_STN_1(3, bb, 1) +DO_STN_1(4, bb, 1) + +DO_STN_2(1, hh, 2, 2) +DO_STN_2(1, hs, 4, 2) +DO_STN_2(1, hd, 8, 2) +DO_STN_2(2, hh, 2, 2) +DO_STN_2(3, hh, 2, 2) +DO_STN_2(4, hh, 2, 2) + +DO_STN_2(1, ss, 4, 4) +DO_STN_2(1, sd, 8, 4) +DO_STN_2(2, ss, 4, 4) +DO_STN_2(3, ss, 4, 4) +DO_STN_2(4, ss, 4, 4) + +DO_STN_2(1, dd, 8, 8) +DO_STN_2(2, dd, 8, 8) +DO_STN_2(3, dd, 8, 8) +DO_STN_2(4, dd, 8, 8) + +#undef DO_STN_1 +#undef DO_STN_2 + +/* + * Loads with a vector index. + */ + +/* + * Load the element at @reg + @reg_ofs, sign or zero-extend as needed. + */ +typedef target_ulong zreg_off_fn(void *reg, intptr_t reg_ofs); + +static target_ulong off_zsu_s(void *reg, intptr_t reg_ofs) +{ + return *(uint32_t *)((char *)reg + H1_4(reg_ofs)); +} + +static target_ulong off_zss_s(void *reg, intptr_t reg_ofs) +{ + return *(int32_t *)((char *)reg + H1_4(reg_ofs)); +} + +static target_ulong off_zsu_d(void *reg, intptr_t reg_ofs) +{ + return (uint32_t)*(uint64_t *)((char *)reg + reg_ofs); +} + +static target_ulong off_zss_d(void *reg, intptr_t reg_ofs) +{ + return (int32_t)*(uint64_t *)((char *)reg + reg_ofs); +} + +static target_ulong off_zd_d(void *reg, intptr_t reg_ofs) +{ + return *(uint64_t *)((char *)reg + reg_ofs); +} + +static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t ra, + zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); + intptr_t i, oprsz = simd_oprsz(desc); + ARMVectorReg scratch = { 0 }; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (likely(pg & 1)) { + target_ulong off = off_fn(vm, i); + tlb_fn(env, &scratch, i, base + (off << scale), oi, ra); + } + i += 4, pg >>= 4; + } while (i & 15); + } + clear_helper_retaddr(); + + /* Wait until all exceptions have been raised to write back. */ + memcpy(vd, &scratch, oprsz); +} + +static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t ra, + zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); + intptr_t i, oprsz = simd_oprsz(desc) / 8; + ARMVectorReg scratch = { 0 }; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; i++) { + uint8_t pg = *(uint8_t *)((char *)vg + H1(i)); + if (likely(pg & 1)) { + target_ulong off = off_fn(vm, i * 8); + tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra); + } + } + clear_helper_retaddr(); + + /* Wait until all exceptions have been raised to write back. */ + memcpy(vd, &scratch, oprsz * 8); +} + +#define DO_LD1_ZPZ_S(MEM, OFS) \ +void QEMU_FLATTEN HELPER(sve_ld##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, void *vm, \ + target_ulong base, uint32_t desc) \ +{ \ + sve_ld1_zs(env, vd, vg, vm, base, desc, GETPC(), \ + off_##OFS##_s, sve_ld1##MEM##_tlb); \ +} + +#define DO_LD1_ZPZ_D(MEM, OFS) \ +void QEMU_FLATTEN HELPER(sve_ld##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, void *vm, \ + target_ulong base, uint32_t desc) \ +{ \ + sve_ld1_zd(env, vd, vg, vm, base, desc, GETPC(), \ + off_##OFS##_d, sve_ld1##MEM##_tlb); \ +} + +DO_LD1_ZPZ_S(bsu, zsu) +DO_LD1_ZPZ_S(bsu, zss) +DO_LD1_ZPZ_D(bdu, zsu) +DO_LD1_ZPZ_D(bdu, zss) +DO_LD1_ZPZ_D(bdu, zd) + +DO_LD1_ZPZ_S(bss, zsu) +DO_LD1_ZPZ_S(bss, zss) +DO_LD1_ZPZ_D(bds, zsu) +DO_LD1_ZPZ_D(bds, zss) +DO_LD1_ZPZ_D(bds, zd) + +DO_LD1_ZPZ_S(hsu_le, zsu) +DO_LD1_ZPZ_S(hsu_le, zss) +DO_LD1_ZPZ_D(hdu_le, zsu) +DO_LD1_ZPZ_D(hdu_le, zss) +DO_LD1_ZPZ_D(hdu_le, zd) + +DO_LD1_ZPZ_S(hsu_be, zsu) +DO_LD1_ZPZ_S(hsu_be, zss) +DO_LD1_ZPZ_D(hdu_be, zsu) +DO_LD1_ZPZ_D(hdu_be, zss) +DO_LD1_ZPZ_D(hdu_be, zd) + +DO_LD1_ZPZ_S(hss_le, zsu) +DO_LD1_ZPZ_S(hss_le, zss) +DO_LD1_ZPZ_D(hds_le, zsu) +DO_LD1_ZPZ_D(hds_le, zss) +DO_LD1_ZPZ_D(hds_le, zd) + +DO_LD1_ZPZ_S(hss_be, zsu) +DO_LD1_ZPZ_S(hss_be, zss) +DO_LD1_ZPZ_D(hds_be, zsu) +DO_LD1_ZPZ_D(hds_be, zss) +DO_LD1_ZPZ_D(hds_be, zd) + +DO_LD1_ZPZ_S(ss_le, zsu) +DO_LD1_ZPZ_S(ss_le, zss) +DO_LD1_ZPZ_D(sdu_le, zsu) +DO_LD1_ZPZ_D(sdu_le, zss) +DO_LD1_ZPZ_D(sdu_le, zd) + +DO_LD1_ZPZ_S(ss_be, zsu) +DO_LD1_ZPZ_S(ss_be, zss) +DO_LD1_ZPZ_D(sdu_be, zsu) +DO_LD1_ZPZ_D(sdu_be, zss) +DO_LD1_ZPZ_D(sdu_be, zd) + +DO_LD1_ZPZ_D(sds_le, zsu) +DO_LD1_ZPZ_D(sds_le, zss) +DO_LD1_ZPZ_D(sds_le, zd) + +DO_LD1_ZPZ_D(sds_be, zsu) +DO_LD1_ZPZ_D(sds_be, zss) +DO_LD1_ZPZ_D(sds_be, zd) + +DO_LD1_ZPZ_D(dd_le, zsu) +DO_LD1_ZPZ_D(dd_le, zss) +DO_LD1_ZPZ_D(dd_le, zd) + +DO_LD1_ZPZ_D(dd_be, zsu) +DO_LD1_ZPZ_D(dd_be, zss) +DO_LD1_ZPZ_D(dd_be, zd) + +#undef DO_LD1_ZPZ_S +#undef DO_LD1_ZPZ_D + +/* First fault loads with a vector index. */ + +/* Load one element into VD+REG_OFF from (ENV,VADDR) without faulting. + * The controlling predicate is known to be true. Return true if the + * load was successful. + */ +typedef bool sve_ld1_nf_fn(CPUARMState *env, void *vd, intptr_t reg_off, + target_ulong vaddr, int mmu_idx); + +#ifdef _MSC_VER +#define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \ +static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off, \ + target_ulong addr, int mmu_idx) \ +{ \ + struct uc_struct *uc = env->uc; \ + target_ulong next_page = 0ULL - (addr | TARGET_PAGE_MASK); \ + if (likely(next_page - addr >= sizeof(TYPEM))) { \ + void *host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_idx); \ + if (likely(host)) { \ + TYPEM val = HOST(host); \ + *(TYPEE *)((char *)vd + H(reg_off)) = val; \ + return true; \ + } \ + } \ + return false; \ +} +#else +#define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \ +static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off, \ + target_ulong addr, int mmu_idx) \ +{ \ + struct uc_struct *uc = env->uc; \ + target_ulong next_page = -(addr | TARGET_PAGE_MASK); \ + if (likely(next_page - addr >= sizeof(TYPEM))) { \ + void *host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_idx); \ + if (likely(host)) { \ + TYPEM val = HOST(host); \ + *(TYPEE *)((char *)vd + H(reg_off)) = val; \ + return true; \ + } \ + } \ + return false; \ +} +#endif + +DO_LD_NF(bsu, H1_4, uint32_t, uint8_t, ldub_p) +DO_LD_NF(bss, H1_4, uint32_t, int8_t, ldsb_p) +DO_LD_NF(bdu, , uint64_t, uint8_t, ldub_p) +DO_LD_NF(bds, , uint64_t, int8_t, ldsb_p) + +DO_LD_NF(hsu_le, H1_4, uint32_t, uint16_t, lduw_le_p) +DO_LD_NF(hss_le, H1_4, uint32_t, int16_t, ldsw_le_p) +DO_LD_NF(hsu_be, H1_4, uint32_t, uint16_t, lduw_be_p) +DO_LD_NF(hss_be, H1_4, uint32_t, int16_t, ldsw_be_p) +DO_LD_NF(hdu_le, , uint64_t, uint16_t, lduw_le_p) +DO_LD_NF(hds_le, , uint64_t, int16_t, ldsw_le_p) +DO_LD_NF(hdu_be, , uint64_t, uint16_t, lduw_be_p) +DO_LD_NF(hds_be, , uint64_t, int16_t, ldsw_be_p) + +DO_LD_NF(ss_le, H1_4, uint32_t, uint32_t, ldl_le_p) +DO_LD_NF(ss_be, H1_4, uint32_t, uint32_t, ldl_be_p) +DO_LD_NF(sdu_le, , uint64_t, uint32_t, ldl_le_p) +DO_LD_NF(sds_le, , uint64_t, int32_t, ldl_le_p) +DO_LD_NF(sdu_be, , uint64_t, uint32_t, ldl_be_p) +DO_LD_NF(sds_be, , uint64_t, int32_t, ldl_be_p) + +DO_LD_NF(dd_le, , uint64_t, uint64_t, ldq_le_p) +DO_LD_NF(dd_be, , uint64_t, uint64_t, ldq_be_p) + +/* + * Common helper for all gather first-faulting loads. + */ +static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t ra, + zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn, + sve_ld1_nf_fn *nonfault_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int mmu_idx = get_mmuidx(oi); + const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); + intptr_t reg_off, reg_max = simd_oprsz(desc); + target_ulong addr; + + /* Skip to the first true predicate. */ + reg_off = find_next_active(vg, 0, reg_max, MO_32); + if (likely(reg_off < reg_max)) { + /* Perform one normal read, which will fault or not. */ + set_helper_retaddr(ra); + addr = off_fn(vm, reg_off); + addr = base + (addr << scale); + tlb_fn(env, vd, reg_off, addr, oi, ra); + + /* The rest of the reads will be non-faulting. */ + clear_helper_retaddr(); + } + + /* After any fault, zero the leading predicated false elements. */ + swap_memzero(vd, reg_off); + + while (likely((reg_off += 4) < reg_max)) { + uint64_t pg = *(uint64_t *)((char *)vg + (reg_off >> 6) * 8); + if (likely((pg >> (reg_off & 63)) & 1)) { + addr = off_fn(vm, reg_off); + addr = base + (addr << scale); + if (!nonfault_fn(env, vd, reg_off, addr, mmu_idx)) { + record_fault(env, reg_off, reg_max); + break; + } + } else { + *(uint32_t *)((char *)vd + H1_4(reg_off)) = 0; + } + } +} + +static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t ra, + zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn, + sve_ld1_nf_fn *nonfault_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int mmu_idx = get_mmuidx(oi); + const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); + intptr_t reg_off, reg_max = simd_oprsz(desc); + target_ulong addr; + + /* Skip to the first true predicate. */ + reg_off = find_next_active(vg, 0, reg_max, MO_64); + if (likely(reg_off < reg_max)) { + /* Perform one normal read, which will fault or not. */ + set_helper_retaddr(ra); + addr = off_fn(vm, reg_off); + addr = base + (addr << scale); + tlb_fn(env, vd, reg_off, addr, oi, ra); + + /* The rest of the reads will be non-faulting. */ + clear_helper_retaddr(); + } + + /* After any fault, zero the leading predicated false elements. */ + swap_memzero(vd, reg_off); + + while (likely((reg_off += 8) < reg_max)) { + uint8_t pg = *(uint8_t *)((char *)vg + H1(reg_off >> 3)); + if (likely(pg & 1)) { + addr = off_fn(vm, reg_off); + addr = base + (addr << scale); + if (!nonfault_fn(env, vd, reg_off, addr, mmu_idx)) { + record_fault(env, reg_off, reg_max); + break; + } + } else { + *(uint64_t *)((char *)vd + reg_off) = 0; + } + } +} + +#define DO_LDFF1_ZPZ_S(MEM, OFS) \ +void HELPER(sve_ldff##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, void *vm, \ + target_ulong base, uint32_t desc) \ +{ \ + sve_ldff1_zs(env, vd, vg, vm, base, desc, GETPC(), \ + off_##OFS##_s, sve_ld1##MEM##_tlb, sve_ld##MEM##_nf); \ +} + +#define DO_LDFF1_ZPZ_D(MEM, OFS) \ +void HELPER(sve_ldff##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, void *vm, \ + target_ulong base, uint32_t desc) \ +{ \ + sve_ldff1_zd(env, vd, vg, vm, base, desc, GETPC(), \ + off_##OFS##_d, sve_ld1##MEM##_tlb, sve_ld##MEM##_nf); \ +} + +DO_LDFF1_ZPZ_S(bsu, zsu) +DO_LDFF1_ZPZ_S(bsu, zss) +DO_LDFF1_ZPZ_D(bdu, zsu) +DO_LDFF1_ZPZ_D(bdu, zss) +DO_LDFF1_ZPZ_D(bdu, zd) + +DO_LDFF1_ZPZ_S(bss, zsu) +DO_LDFF1_ZPZ_S(bss, zss) +DO_LDFF1_ZPZ_D(bds, zsu) +DO_LDFF1_ZPZ_D(bds, zss) +DO_LDFF1_ZPZ_D(bds, zd) + +DO_LDFF1_ZPZ_S(hsu_le, zsu) +DO_LDFF1_ZPZ_S(hsu_le, zss) +DO_LDFF1_ZPZ_D(hdu_le, zsu) +DO_LDFF1_ZPZ_D(hdu_le, zss) +DO_LDFF1_ZPZ_D(hdu_le, zd) + +DO_LDFF1_ZPZ_S(hsu_be, zsu) +DO_LDFF1_ZPZ_S(hsu_be, zss) +DO_LDFF1_ZPZ_D(hdu_be, zsu) +DO_LDFF1_ZPZ_D(hdu_be, zss) +DO_LDFF1_ZPZ_D(hdu_be, zd) + +DO_LDFF1_ZPZ_S(hss_le, zsu) +DO_LDFF1_ZPZ_S(hss_le, zss) +DO_LDFF1_ZPZ_D(hds_le, zsu) +DO_LDFF1_ZPZ_D(hds_le, zss) +DO_LDFF1_ZPZ_D(hds_le, zd) + +DO_LDFF1_ZPZ_S(hss_be, zsu) +DO_LDFF1_ZPZ_S(hss_be, zss) +DO_LDFF1_ZPZ_D(hds_be, zsu) +DO_LDFF1_ZPZ_D(hds_be, zss) +DO_LDFF1_ZPZ_D(hds_be, zd) + +DO_LDFF1_ZPZ_S(ss_le, zsu) +DO_LDFF1_ZPZ_S(ss_le, zss) +DO_LDFF1_ZPZ_D(sdu_le, zsu) +DO_LDFF1_ZPZ_D(sdu_le, zss) +DO_LDFF1_ZPZ_D(sdu_le, zd) + +DO_LDFF1_ZPZ_S(ss_be, zsu) +DO_LDFF1_ZPZ_S(ss_be, zss) +DO_LDFF1_ZPZ_D(sdu_be, zsu) +DO_LDFF1_ZPZ_D(sdu_be, zss) +DO_LDFF1_ZPZ_D(sdu_be, zd) + +DO_LDFF1_ZPZ_D(sds_le, zsu) +DO_LDFF1_ZPZ_D(sds_le, zss) +DO_LDFF1_ZPZ_D(sds_le, zd) + +DO_LDFF1_ZPZ_D(sds_be, zsu) +DO_LDFF1_ZPZ_D(sds_be, zss) +DO_LDFF1_ZPZ_D(sds_be, zd) + +DO_LDFF1_ZPZ_D(dd_le, zsu) +DO_LDFF1_ZPZ_D(dd_le, zss) +DO_LDFF1_ZPZ_D(dd_le, zd) + +DO_LDFF1_ZPZ_D(dd_be, zsu) +DO_LDFF1_ZPZ_D(dd_be, zss) +DO_LDFF1_ZPZ_D(dd_be, zd) + +/* Stores with a vector index. */ + +static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t ra, + zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); + intptr_t i, oprsz = simd_oprsz(desc); + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; ) { + uint16_t pg = *(uint16_t *)((char *)vg + H1_2(i >> 3)); + do { + if (likely(pg & 1)) { + target_ulong off = off_fn(vm, i); + tlb_fn(env, vd, i, base + (off << scale), oi, ra); + } + i += 4, pg >>= 4; + } while (i & 15); + } + clear_helper_retaddr(); +} + +static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t ra, + zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) +{ + const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT); + const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); + intptr_t i, oprsz = simd_oprsz(desc) / 8; + + set_helper_retaddr(ra); + for (i = 0; i < oprsz; i++) { + uint8_t pg = *(uint8_t *)((char *)vg + H1(i)); + if (likely(pg & 1)) { + target_ulong off = off_fn(vm, i * 8); + tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra); + } + } + clear_helper_retaddr(); +} + +#define DO_ST1_ZPZ_S(MEM, OFS) \ +void QEMU_FLATTEN HELPER(sve_st##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, void *vm, \ + target_ulong base, uint32_t desc) \ +{ \ + sve_st1_zs(env, vd, vg, vm, base, desc, GETPC(), \ + off_##OFS##_s, sve_st1##MEM##_tlb); \ +} + +#define DO_ST1_ZPZ_D(MEM, OFS) \ +void QEMU_FLATTEN HELPER(sve_st##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, void *vm, \ + target_ulong base, uint32_t desc) \ +{ \ + sve_st1_zd(env, vd, vg, vm, base, desc, GETPC(), \ + off_##OFS##_d, sve_st1##MEM##_tlb); \ +} + +DO_ST1_ZPZ_S(bs, zsu) +DO_ST1_ZPZ_S(hs_le, zsu) +DO_ST1_ZPZ_S(hs_be, zsu) +DO_ST1_ZPZ_S(ss_le, zsu) +DO_ST1_ZPZ_S(ss_be, zsu) + +DO_ST1_ZPZ_S(bs, zss) +DO_ST1_ZPZ_S(hs_le, zss) +DO_ST1_ZPZ_S(hs_be, zss) +DO_ST1_ZPZ_S(ss_le, zss) +DO_ST1_ZPZ_S(ss_be, zss) + +DO_ST1_ZPZ_D(bd, zsu) +DO_ST1_ZPZ_D(hd_le, zsu) +DO_ST1_ZPZ_D(hd_be, zsu) +DO_ST1_ZPZ_D(sd_le, zsu) +DO_ST1_ZPZ_D(sd_be, zsu) +DO_ST1_ZPZ_D(dd_le, zsu) +DO_ST1_ZPZ_D(dd_be, zsu) + +DO_ST1_ZPZ_D(bd, zss) +DO_ST1_ZPZ_D(hd_le, zss) +DO_ST1_ZPZ_D(hd_be, zss) +DO_ST1_ZPZ_D(sd_le, zss) +DO_ST1_ZPZ_D(sd_be, zss) +DO_ST1_ZPZ_D(dd_le, zss) +DO_ST1_ZPZ_D(dd_be, zss) + +DO_ST1_ZPZ_D(bd, zd) +DO_ST1_ZPZ_D(hd_le, zd) +DO_ST1_ZPZ_D(hd_be, zd) +DO_ST1_ZPZ_D(sd_le, zd) +DO_ST1_ZPZ_D(sd_be, zd) +DO_ST1_ZPZ_D(dd_le, zd) +DO_ST1_ZPZ_D(dd_be, zd) + +#undef DO_ST1_ZPZ_S +#undef DO_ST1_ZPZ_D diff --git a/qemu/target/arm/tlb_helper.c b/qemu/target/arm/tlb_helper.c new file mode 100644 index 00000000..e19d6c17 --- /dev/null +++ b/qemu/target/arm/tlb_helper.c @@ -0,0 +1,187 @@ +/* + * ARM TLB (Translation lookaside buffer) helpers. + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "exec/exec-all.h" + +static inline uint32_t merge_syn_data_abort(uint32_t template_syn, + unsigned int target_el, + bool same_el, bool ea, + bool s1ptw, bool is_write, + int fsc) +{ + uint32_t syn; + + /* + * ISV is only set for data aborts routed to EL2 and + * never for stage-1 page table walks faulting on stage 2. + * + * Furthermore, ISV is only set for certain kinds of load/stores. + * If the template syndrome does not have ISV set, we should leave + * it cleared. + * + * See ARMv8 specs, D7-1974: + * ISS encoding for an exception from a Data Abort, the + * ISV field. + */ + if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) { + syn = syn_data_abort_no_iss(same_el, + ea, 0, s1ptw, is_write, fsc); + } else { + /* + * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template + * syndrome created at translation time. + * Now we create the runtime syndrome with the remaining fields. + */ + syn = syn_data_abort_with_iss(same_el, + 0, 0, 0, 0, 0, + ea, 0, s1ptw, is_write, fsc, + true); + /* Merge the runtime syndrome with the template syndrome. */ + syn |= template_syn; + } + return syn; +} + +static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, ARMMMUFaultInfo *fi) +{ + CPUARMState *env = &cpu->env; + int target_el; + bool same_el; + uint32_t syn, exc, fsr, fsc; + ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx); + + target_el = exception_target_el(env); + if (fi->stage2) { + target_el = 2; + env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4; + } + same_el = (arm_current_el(env) == target_el); + + if (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) { + /* + * LPAE format fault status register : bottom 6 bits are + * status code in the same form as needed for syndrome + */ + fsr = arm_fi_to_lfsc(fi); + fsc = extract32(fsr, 0, 6); + } else { + fsr = arm_fi_to_sfsc(fi); + /* + * Short format FSR : this fault will never actually be reported + * to an EL that uses a syndrome register. Use a (currently) + * reserved FSR code in case the constructed syndrome does leak + * into the guest somehow. + */ + fsc = 0x3f; + } + + if (access_type == MMU_INST_FETCH) { + syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); + exc = EXCP_PREFETCH_ABORT; + } else { + syn = merge_syn_data_abort(env->exception.syndrome, target_el, + same_el, fi->ea, fi->s1ptw, + access_type == MMU_DATA_STORE, + fsc); + if (access_type == MMU_DATA_STORE + && arm_feature(env, ARM_FEATURE_V6)) { + fsr |= (1 << 11); + } + exc = EXCP_DATA_ABORT; + } + + env->exception.vaddress = addr; + env->exception.fsr = fsr; + raise_exception(env, exc, syn, target_el); +} + +/* Raise a data fault alignment exception for the specified virtual address */ +void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + ARMCPU *cpu = ARM_CPU(cs); + ARMMMUFaultInfo fi = { 0 }; + + /* now we have a real cpu fault */ + cpu_restore_state(cs, retaddr, true); + + fi.type = ARMFault_Alignment; + arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); +} + +/* + * arm_cpu_do_transaction_failed: handle a memory system error response + * (eg "no device/memory present at address") by raising an external abort + * exception + */ +void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + ARMCPU *cpu = ARM_CPU(cs); + ARMMMUFaultInfo fi = { 0 }; + + /* now we have a real cpu fault */ + cpu_restore_state(cs, retaddr, true); + + fi.ea = arm_extabort_type(response); + fi.type = ARMFault_SyncExternal; + arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); +} + +bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + struct uc_struct *uc = cs->uc; + ARMCPU *cpu = ARM_CPU(cs); + + hwaddr phys_addr; + target_ulong page_size; + int prot, ret; + MemTxAttrs attrs = { 0 }; + ARMMMUFaultInfo fi = { 0 }; + + /* + * Walk the page table and (if the mapping exists) add the page + * to the TLB. On success, return true. Otherwise, if probing, + * return false. Otherwise populate fsr with ARM DFSR/IFSR fault + * register format, and signal the fault. + */ + ret = get_phys_addr(&cpu->env, address, access_type, + core_to_arm_mmu_idx(&cpu->env, mmu_idx), + &phys_addr, &attrs, &prot, &page_size, &fi, NULL); + if (likely(!ret)) { + /* + * Map a single [sub]page. Regions smaller than our declared + * target page size are handled specially, so for those we + * pass in the exact addresses. + */ + if (page_size >= TARGET_PAGE_SIZE) { + phys_addr &= TARGET_PAGE_MASK; + address &= TARGET_PAGE_MASK; + } + tlb_set_page_with_attrs(cs, address, phys_addr, attrs, + prot, mmu_idx, page_size); + return true; + } else if (probe) { + return false; + } else { + /* now we have a real cpu fault */ + cpu_restore_state(cs, retaddr, true); + arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi); + } +} diff --git a/qemu/target-arm/translate-a64.c b/qemu/target/arm/translate-a64.c similarity index 61% rename from qemu/target-arm/translate-a64.c rename to qemu/target/arm/translate-a64.c index 52337b9f..a9eb1ca0 100644 --- a/qemu/target-arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -16,29 +16,26 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ -#include -#include -#include -#include -#include "unicorn/platform.h" +#include "qemu/osdep.h" #include "cpu.h" -#include "tcg-op.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" #include "qemu/log.h" #include "arm_ldst.h" #include "translate.h" #include "internals.h" #include "qemu/host-utils.h" +#include "exec/gen-icount.h" + #include "exec/helper-proto.h" #include "exec/helper-gen.h" -#include "exec/gen-icount.h" - -#ifdef CONFIG_USER_ONLY -static TCGv_i64 cpu_exclusive_test; -static TCGv_i32 cpu_exclusive_info; -#endif +#include "translate-a64.h" +#include "qemu/atomic128.h" +#include "kvm-consts.h" static const char *regnames[] = { "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", @@ -66,108 +63,206 @@ typedef struct AArch64DecodeTable { } AArch64DecodeTable; /* Function prototype for gen_ functions for calling Neon helpers */ -typedef void NeonGenOneOpEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i32); -typedef void NeonGenTwoOpFn(TCGContext *t, TCGv_i32, TCGv_i32, TCGv_i32); -typedef void NeonGenTwoOpEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); -typedef void NeonGenTwo64OpFn(TCGContext *t, TCGv_i64, TCGv_i64, TCGv_i64); -typedef void NeonGenTwo64OpEnvFn(TCGContext *t, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); -typedef void NeonGenNarrowFn(TCGContext *t, TCGv_i32, TCGv_i64); -typedef void NeonGenNarrowEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i64); -typedef void NeonGenWidenFn(TCGContext *t, TCGv_i64, TCGv_i32); -typedef void NeonGenTwoSingleOPFn(TCGContext *t, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); -typedef void NeonGenTwoDoubleOPFn(TCGContext *t, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); -typedef void NeonGenOneOpFn(TCGContext *t, TCGv_i64, TCGv_i64); -typedef void CryptoTwoOpEnvFn(TCGContext *t, TCGv_ptr, TCGv_i32, TCGv_i32); -typedef void CryptoThreeOpEnvFn(TCGContext *t, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); +typedef void NeonGenOneOpEnvFn(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32); +typedef void NeonGenTwoOpFn(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); +typedef void NeonGenTwoOpEnvFn(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); +typedef void NeonGenTwo64OpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); +typedef void NeonGenTwo64OpEnvFn(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); +typedef void NeonGenNarrowFn(TCGContext *, TCGv_i32, TCGv_i64); +typedef void NeonGenNarrowEnvFn(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i64); +typedef void NeonGenWidenFn(TCGContext *, TCGv_i64, TCGv_i32); +typedef void NeonGenTwoSingleOPFn(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); +typedef void NeonGenTwoDoubleOPFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); +typedef void NeonGenOneOpFn(TCGContext *, TCGv_i64, TCGv_i64); +typedef void CryptoTwoOpFn(TCGContext *, TCGv_ptr, TCGv_ptr); +typedef void CryptoThreeOpIntFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void CryptoThreeOpFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr); +typedef void AtomicThreeOpFn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); /* initialize TCG globals. */ void a64_translate_init(struct uc_struct *uc) { - TCGContext *tcg_ctx = uc->tcg_ctx; int i; + TCGContext *tcg_ctx = uc->tcg_ctx; - tcg_ctx->cpu_pc = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + tcg_ctx->cpu_pc_arm64 = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, pc), "pc"); for (i = 0; i < 32; i++) { - tcg_ctx->cpu_X[i] = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + tcg_ctx->cpu_X[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, xregs[i]), regnames[i]); } - tcg_ctx->cpu_NF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, NF), "NF"); - tcg_ctx->cpu_ZF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, ZF), "ZF"); - tcg_ctx->cpu_CF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, CF), "CF"); - tcg_ctx->cpu_VF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, VF), "VF"); - - tcg_ctx->cpu_exclusive_addr = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); - tcg_ctx->cpu_exclusive_val = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_val), "exclusive_val"); - tcg_ctx->cpu_exclusive_high = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + tcg_ctx->cpu_exclusive_high = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, exclusive_high), "exclusive_high"); -#ifdef CONFIG_USER_ONLY - cpu_exclusive_test = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_test), "exclusive_test"); - cpu_exclusive_info = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, - offsetof(CPUARMState, exclusive_info), "exclusive_info"); -#endif } -#if 0 -void aarch64_cpu_dump_state(CPUState *cs, FILE *f, - fprintf_function cpu_fprintf, int flags) +/* + * Return the core mmu_idx to use for A64 "unprivileged load/store" insns + */ +static int get_a64_user_mem_index(DisasContext *s) { - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint32_t psr = pstate_read(env); - int i; + /* + * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL, + * which is the usual mmu_idx for this cpu state. + */ + ARMMMUIdx useridx = s->mmu_idx; - cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n", - env->pc, env->xregs[31]); - for (i = 0; i < 31; i++) { - cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]); - if ((i % 4) == 3) { - cpu_fprintf(f, "\n"); - } else { - cpu_fprintf(f, " "); + if (s->unpriv) { + /* + * We have pre-computed the condition for AccType_UNPRIV. + * Therefore we should never get here with a mmu_idx for + * which we do not know the corresponding user mmu_idx. + */ + switch (useridx) { + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + useridx = ARMMMUIdx_E10_0; + break; + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + useridx = ARMMMUIdx_E20_0; + break; + case ARMMMUIdx_SE10_1: + case ARMMMUIdx_SE10_1_PAN: + useridx = ARMMMUIdx_SE10_0; + break; + default: + g_assert_not_reached(); } } - cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n", - psr, - psr & PSTATE_N ? 'N' : '-', - psr & PSTATE_Z ? 'Z' : '-', - psr & PSTATE_C ? 'C' : '-', - psr & PSTATE_V ? 'V' : '-'); - cpu_fprintf(f, "\n"); + return arm_to_core_mmu_idx(useridx); +} - if (flags & CPU_DUMP_FPU) { - int numvfpregs = 32; - for (i = 0; i < numvfpregs; i += 2) { - uint64_t vlo = float64_val(env->vfp.regs[i * 2]); - uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]); - cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ", - i, vhi, vlo); - vlo = float64_val(env->vfp.regs[(i + 1) * 2]); - vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]); - cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n", - i + 1, vhi, vlo); - } - cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n", - vfp_get_fpcr(env), vfp_get_fpsr(env)); +static void reset_btype(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (s->btype != 0) { + TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_st_i32(tcg_ctx, zero, tcg_ctx->cpu_env, offsetof(CPUARMState, btype)); + tcg_temp_free_i32(tcg_ctx, zero); + s->btype = 0; } } -#endif -void gen_a64_set_pc_im(DisasContext *s, uint64_t val) +static void set_btype(DisasContext *s, int val) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_pc, val); + TCGv_i32 tcg_val; + + /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */ + tcg_debug_assert(val >= 1 && val <= 3); + + tcg_val = tcg_const_i32(tcg_ctx, val); + tcg_gen_st_i32(tcg_ctx, tcg_val, tcg_ctx->cpu_env, offsetof(CPUARMState, btype)); + tcg_temp_free_i32(tcg_ctx, tcg_val); + s->btype = -1; } -static void gen_exception_internal(DisasContext *s, int excp) +void gen_a64_set_pc_im(TCGContext *tcg_ctx, uint64_t val) +{ + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_pc_arm64, val); +} + +/* + * Handle Top Byte Ignore (TBI) bits. + * + * If address tagging is enabled via the TCR TBI bits: + * + for EL2 and EL3 there is only one TBI bit, and if it is set + * then the address is zero-extended, clearing bits [63:56] + * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0 + * and TBI1 controls addressses with bit 55 == 1. + * If the appropriate TBI bit is set for the address then + * the address is sign-extended from bit 55 into bits [63:56] + * + * Here We have concatenated TBI{1,0} into tbi. + */ +static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst, + TCGv_i64 src, int tbi) { TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (tbi == 0) { + /* Load unmodified address */ + tcg_gen_mov_i64(tcg_ctx, dst, src); + } else if (!regime_has_2_ranges(s->mmu_idx)) { + /* Force tag byte to all zero */ + tcg_gen_extract_i64(tcg_ctx, dst, src, 0, 56); + } else { + /* Sign-extend from bit 55. */ + tcg_gen_sextract_i64(tcg_ctx, dst, src, 0, 56); + + if (tbi != 3) { + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + + /* + * The two TBI bits differ. + * If tbi0, then !tbi1: only use the extension if positive. + * if !tbi0, then tbi1: only use the extension if negative. + */ + tcg_gen_movcond_i64(tcg_ctx, tbi == 1 ? TCG_COND_GE : TCG_COND_LT, + dst, dst, tcg_zero, dst, src); + tcg_temp_free_i64(tcg_ctx, tcg_zero); + } + } +} + +static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src) +{ + /* + * If address tagging is enabled for instructions via the TCR TBI bits, + * then loading an address into the PC will clear out any tag. + */ + gen_top_byte_ignore(s, s->uc->tcg_ctx->cpu_pc_arm64, src, s->tbii); +} + +/* + * Return a "clean" address for ADDR according to TBID. + * This is always a fresh temporary, as we need to be able to + * increment this independently of a dirty write-back address. + */ +static TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 clean = new_tmp_a64(s); + /* + * In order to get the correct value in the FAR_ELx register, + * we must present the memory subsystem with the "dirty" address + * including the TBI. In system mode we can make this work via + * the TLB, dropping the TBI during translation. But for user-only + * mode we don't have that option, and must remove the top byte now. + */ + tcg_gen_mov_i64(tcg_ctx, clean, addr); + return clean; +} + +typedef struct DisasCompare64 { + TCGCond cond; + TCGv_i64 value; +} DisasCompare64; + +static void a64_test_cc(TCGContext *tcg_ctx, DisasCompare64 *c64, int cc) +{ + DisasCompare c32; + + arm_test_cc(tcg_ctx, &c32, cc); + + /* Sign-extend the 32-bit value so that the GE/LT comparisons work + * properly. The NE/EQ comparisons are also fine with this choice. */ + c64->cond = c32.cond; + c64->value = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, c64->value, c32.value); + + arm_free_cc(tcg_ctx, &c32); +} + +static void a64_free_cc(TCGContext *tcg_ctx, DisasCompare64 *c64) +{ + tcg_temp_free_i64(tcg_ctx, c64->value); +} + +static void gen_exception_internal(TCGContext *tcg_ctx, int excp) +{ TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); assert(excp_is_internal(excp)); @@ -175,42 +270,33 @@ static void gen_exception_internal(DisasContext *s, int excp) tcg_temp_free_i32(tcg_ctx, tcg_excp); } -static void gen_exception(DisasContext *s, int excp, uint32_t syndrome) +static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); - TCGv_i32 tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + gen_a64_set_pc_im(tcg_ctx, pc); + gen_exception_internal(tcg_ctx, excp); + s->base.is_jmp = DISAS_NORETURN; +} - gen_helper_exception_with_syndrome(tcg_ctx, tcg_ctx->cpu_env, tcg_excp, tcg_syn); +static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp, + uint32_t syndrome, uint32_t target_el) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_a64_set_pc_im(tcg_ctx, pc); + gen_exception(tcg_ctx, excp, syndrome, target_el); + s->base.is_jmp = DISAS_NORETURN; +} + +static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_syn; + + gen_a64_set_pc_im(tcg_ctx, s->pc_curr); + tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + gen_helper_exception_bkpt_insn(tcg_ctx, tcg_ctx->cpu_env, tcg_syn); tcg_temp_free_i32(tcg_ctx, tcg_syn); - tcg_temp_free_i32(tcg_ctx, tcg_excp); -} - -static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) -{ - gen_a64_set_pc_im(s, s->pc - offset); - gen_exception_internal(s, excp); - s->is_jmp = DISAS_EXC; -} - -static void gen_exception_insn(DisasContext *s, int offset, int excp, - uint32_t syndrome) -{ - gen_a64_set_pc_im(s, s->pc - offset); - gen_exception(s, excp, syndrome); - s->is_jmp = DISAS_EXC; -} - -static void gen_ss_advance(DisasContext *s) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* If the singlestep state is Active-not-pending, advance to - * Active-pending. - */ - if (s->ss_active) { - s->pstate_ss = 0; - gen_helper_clear_pstate_ss(tcg_ctx, tcg_ctx->cpu_env); - } + s->base.is_jmp = DISAS_NORETURN; } static void gen_step_complete_exception(DisasContext *s) @@ -225,21 +311,23 @@ static void gen_step_complete_exception(DisasContext *s) * of the exception, and our syndrome information is always correct. */ gen_ss_advance(s); - gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex)); - s->is_jmp = DISAS_EXC; + gen_swstep_exception(s, 1, s->is_ldex); + s->base.is_jmp = DISAS_NORETURN; } static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest) { + struct uc_struct *uc = s->uc; /* No direct tb linking with singlestep (either QEMU's or the ARM * debug architecture kind) or deterministic io */ - if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) { + if (s->base.singlestep_enabled || s->ss_active || + (tb_cflags(s->base.tb) & CF_LAST_IO)) { return false; } /* Only link tbs from inside the same guest page */ - if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { + if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { return false; } @@ -248,50 +336,39 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest) static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) { - TranslationBlock *tb; TCGContext *tcg_ctx = s->uc->tcg_ctx; + TranslationBlock *tb; - tb = s->tb; + tb = s->base.tb; if (use_goto_tb(s, n, dest)) { tcg_gen_goto_tb(tcg_ctx, n); - gen_a64_set_pc_im(s, dest); - tcg_gen_exit_tb(tcg_ctx, (intptr_t)tb + n); - s->is_jmp = DISAS_TB_JUMP; + gen_a64_set_pc_im(tcg_ctx, dest); + tcg_gen_exit_tb(tcg_ctx, tb, n); + s->base.is_jmp = DISAS_NORETURN; } else { - gen_a64_set_pc_im(s, dest); + gen_a64_set_pc_im(tcg_ctx, dest); if (s->ss_active) { gen_step_complete_exception(s); - } else if (s->singlestep_enabled) { - gen_exception_internal(s, EXCP_DEBUG); + } else if (s->base.singlestep_enabled) { + gen_exception_internal(tcg_ctx, EXCP_DEBUG); } else { - tcg_gen_exit_tb(tcg_ctx, 0); - s->is_jmp = DISAS_TB_JUMP; + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + s->base.is_jmp = DISAS_NORETURN; } } } -static void unallocated_encoding(DisasContext *s) +void unallocated_encoding(DisasContext *s) { /* Unallocated and reserved encodings are uncategorized */ - gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized()); + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), + default_exception_el(s)); } -#define unsupported_encoding(s, insn) \ - do { \ - qemu_log_mask(LOG_UNIMP, \ - "%s:%d: unsupported instruction encoding 0x%08x " \ - "at pc=%016" PRIx64 "\n", \ - __FILE__, __LINE__, insn, s->pc - 4); \ - unallocated_encoding(s); \ - } while (0); - static void init_tmp_a64_array(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG - int i; - for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) { - TCGV_UNUSED_I64(s->tmp_a64[i]); - } + memset(s->tmp_a64, 0, sizeof(s->tmp_a64)); #endif s->tmp_a64_count = 0; } @@ -306,14 +383,14 @@ static void free_tmp_a64(DisasContext *s) init_tmp_a64_array(s); } -static TCGv_i64 new_tmp_a64(DisasContext *s) +TCGv_i64 new_tmp_a64(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; assert(s->tmp_a64_count < TMP_A64_MAX); return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(tcg_ctx); } -static TCGv_i64 new_tmp_a64_zero(DisasContext *s) +TCGv_i64 new_tmp_a64_zero(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 t = new_tmp_a64(s); @@ -333,31 +410,29 @@ static TCGv_i64 new_tmp_a64_zero(DisasContext *s) * * In instruction register encoding 31 can refer to ZR (zero register) or * the SP (stack pointer) depending on context. In QEMU's case we map SP - * to tcg_ctx->cpu_X[31] and ZR accesses to a temporary which can be discarded. + * to cpu_X[31] and ZR accesses to a temporary which can be discarded. * This is the point of the _sp forms. */ -static TCGv_i64 cpu_reg(DisasContext *s, int reg) +TCGv_i64 cpu_reg(DisasContext *s, int reg) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; if (reg == 31) { return new_tmp_a64_zero(s); } else { - return tcg_ctx->cpu_X[reg]; + return s->uc->tcg_ctx->cpu_X[reg]; } } /* register access for when 31 == SP */ -static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) +TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; - return tcg_ctx->cpu_X[reg]; + return s->uc->tcg_ctx->cpu_X[reg]; } /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64 * representing the register contents. This TCGv is an auto-freed * temporary so it need not be explicitly freed, and may be modified. */ -static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) +TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 v = new_tmp_a64(s); @@ -373,7 +448,7 @@ static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) return v; } -static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) +TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 v = new_tmp_a64(s); @@ -385,68 +460,20 @@ static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) return v; } -/* We should have at some point before trying to access an FP register - * done the necessary access check, so assert that - * (a) we did the check and - * (b) we didn't then just plough ahead anyway if it failed. - * Print the instruction pattern in the abort message so we can figure - * out what we need to fix if a user encounters this problem in the wild. - */ -static inline void assert_fp_access_checked(DisasContext *s) -{ -#ifdef CONFIG_DEBUG_TCG - if (unlikely(!s->fp_access_checked || !s->cpacr_fpen)) { - fprintf(stderr, "target-arm: FP access check missing for " - "instruction 0x%08x\n", s->insn); - abort(); - } -#endif -} - -/* Return the offset into CPUARMState of an element of specified - * size, 'element' places in from the least significant end of - * the FP/vector register Qn. - */ -static inline int vec_reg_offset(DisasContext *s, int regno, - int element, TCGMemOp size) -{ - int offs = offsetof(CPUARMState, vfp.regs[regno * 2]); -#ifdef HOST_WORDS_BIGENDIAN - /* This is complicated slightly because vfp.regs[2n] is - * still the low half and vfp.regs[2n+1] the high half - * of the 128 bit vector, even on big endian systems. - * Calculate the offset assuming a fully bigendian 128 bits, - * then XOR to account for the order of the two 64 bit halves. - */ - offs += (16 - ((element + 1) * (1 << size))); - offs ^= 8; -#else - offs += element * (1 << size); -#endif - assert_fp_access_checked(s); - return offs; -} - /* Return the offset into CPUARMState of a slice (from * the least significant end) of FP register Qn (ie * Dn, Sn, Hn or Bn). * (Note that this is not the same mapping as for A32; see cpu.h) */ -static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size) +static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) { - int offs = offsetof(CPUARMState, vfp.regs[regno * 2]); -#ifdef HOST_WORDS_BIGENDIAN - offs += (8 - (1 << size)); -#endif - assert_fp_access_checked(s); - return offs; + return vec_reg_offset(s, regno, 0, size); } /* Offset of the high half of the 128 bit vector Qn */ static inline int fp_reg_hi_offset(DisasContext *s, int regno) { - assert_fp_access_checked(s); - return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]); + return vec_reg_offset(s, regno, 1, MO_64); } /* Convenience accessors for reading and writing single and double @@ -473,14 +500,41 @@ static TCGv_i32 read_fp_sreg(DisasContext *s, int reg) return v; } -static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) +static TCGv_i32 read_fp_hreg(DisasContext *s, int reg) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + TCGv_i32 v = tcg_temp_new_i32(tcg_ctx); - tcg_gen_st_i64(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_64)); - tcg_gen_st_i64(tcg_ctx, tcg_zero, tcg_ctx->cpu_env, fp_reg_hi_offset(s, reg)); - tcg_temp_free_i64(tcg_ctx, tcg_zero); + tcg_gen_ld16u_i32(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_16)); + return v; +} + +/* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64). + * If SVE is not enabled, then there are only 128 bits in the vector. + */ +static void clear_vec_high(DisasContext *s, bool is_q, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned ofs = fp_reg_offset(s, rd, MO_64); + unsigned vsz = vec_full_reg_size(s); + + if (!is_q) { + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + tcg_gen_st_i64(tcg_ctx, tcg_zero, tcg_ctx->cpu_env, ofs + 8); + tcg_temp_free_i64(tcg_ctx, tcg_zero); + } + if (vsz > 16) { + tcg_gen_gvec_dup8i(tcg_ctx, ofs + 16, vsz - 16, vsz - 16, 0); + } +} + +void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned ofs = fp_reg_offset(s, reg, MO_64); + + tcg_gen_st_i64(tcg_ctx, v, tcg_ctx->cpu_env, ofs); + clear_vec_high(s, false, reg); } static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) @@ -493,32 +547,132 @@ static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) tcg_temp_free_i64(tcg_ctx, tmp); } -static TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx) +TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, bool is_f16) { TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); int offset; - /* In A64 all instructions (both FP and Neon) use the FPCR; - * there is no equivalent of the A32 Neon "standard FPSCR value" - * and all operations use vfp.fp_status. + /* In A64 all instructions (both FP and Neon) use the FPCR; there + * is no equivalent of the A32 Neon "standard FPSCR value". + * However half-precision operations operate under a different + * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status. */ - offset = offsetof(CPUARMState, vfp.fp_status); + if (is_f16) { + offset = offsetof(CPUARMState, vfp.fp_status_f16); + } else { + offset = offsetof(CPUARMState, vfp.fp_status); + } tcg_gen_addi_ptr(tcg_ctx, statusptr, tcg_ctx->cpu_env, offset); return statusptr; } +/* Expand a 2-operand AdvSIMD vector operation using an expander function. */ +static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn, + GVecGen2Fn *gvec_fn, int vece) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + is_q ? 16 : 8, vec_full_reg_size(s)); +} + +/* Expand a 2-operand + immediate AdvSIMD vector operation using + * an expander function. + */ +static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn, + int64_t imm, GVecGen2iFn *gvec_fn, int vece) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + imm, is_q ? 16 : 8, vec_full_reg_size(s)); +} + +/* Expand a 3-operand AdvSIMD vector operation using an expander function. */ +static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm, + GVecGen3Fn *gvec_fn, int vece) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); +} + +/* Expand a 4-operand AdvSIMD vector operation using an expander function. */ +static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm, + int rx, GVecGen4Fn *gvec_fn, int vece) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx), + is_q ? 16 : 8, vec_full_reg_size(s)); +} + +/* Expand a 2-operand + immediate AdvSIMD vector operation using + * an op descriptor. + */ +static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd, + int rn, int64_t imm, const GVecGen2i *gvec_op) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_gvec_2i(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op); +} + +/* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */ +static void gen_gvec_op3(DisasContext *s, bool is_q, int rd, + int rn, int rm, const GVecGen3 *gvec_op) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_gvec_3(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), is_q ? 16 : 8, + vec_full_reg_size(s), gvec_op); +} + +/* Expand a 3-operand operation using an out-of-line helper. */ +static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd, + int rn, int rm, int data, gen_helper_gvec_3 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + is_q ? 16 : 8, vec_full_reg_size(s), data, fn); +} + +/* Expand a 3-operand + env pointer operation using + * an out-of-line helper. + */ +static void gen_gvec_op3_env(DisasContext *s, bool is_q, int rd, + int rn, int rm, gen_helper_gvec_3_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), tcg_ctx->cpu_env, + is_q ? 16 : 8, vec_full_reg_size(s), 0, fn); +} + +/* Expand a 3-operand + fpstatus pointer + simd data value operation using + * an out-of-line helper. + */ +static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, + int rm, bool is_fp16, int data, + gen_helper_gvec_3_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, is_fp16); + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), fpst, + is_q ? 16 : 8, vec_full_reg_size(s), data, fn); + tcg_temp_free_ptr(tcg_ctx, fpst); +} + /* Set ZF and NF based on a 64 bit result. This is alas fiddlier * than the 32 bit equivalent. */ static inline void gen_set_NZ64(TCGContext *tcg_ctx, TCGv_i64 result) { - TCGv_i64 flag = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, flag, result, 0); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, flag); - tcg_gen_shri_i64(tcg_ctx, flag, result, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, flag); - tcg_temp_free_i64(tcg_ctx, flag); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF, result); + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); } /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */ @@ -527,17 +681,16 @@ static inline void gen_logic_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 result) if (sf) { gen_set_NZ64(tcg_ctx, result); } else { - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, result); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, result); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, result); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_ZF); } tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, 0); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); } /* dest = T0 + T1; compute C, N, V and Z flags */ -static void gen_add_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +static void gen_add_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sf) { TCGv_i64 result, flag, tmp; result = tcg_temp_new_i64(tcg_ctx); @@ -547,7 +700,7 @@ static void gen_add_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv tcg_gen_movi_i64(tcg_ctx, tmp, 0); tcg_gen_add2_i64(tcg_ctx, result, flag, t0, tmp, t1, tmp); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); gen_set_NZ64(tcg_ctx, result); @@ -555,8 +708,7 @@ static void gen_add_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); tcg_gen_andc_i64(tcg_ctx, flag, flag, tmp); tcg_temp_free_i64(tcg_ctx, tmp); - tcg_gen_shri_i64(tcg_ctx, flag, flag, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); tcg_gen_mov_i64(tcg_ctx, dest, result); tcg_temp_free_i64(tcg_ctx, result); @@ -568,8 +720,8 @@ static void gen_add_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); tcg_gen_movi_i32(tcg_ctx, tmp, 0); - tcg_gen_trunc_i64_i32(tcg_ctx, t0_32, t0); - tcg_gen_trunc_i64_i32(tcg_ctx, t1_32, t1); + tcg_gen_extrl_i64_i32(tcg_ctx, t0_32, t0); + tcg_gen_extrl_i64_i32(tcg_ctx, t1_32, t1); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0_32, tmp, t1_32, tmp); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0_32); @@ -584,9 +736,8 @@ static void gen_add_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv } /* dest = T0 - T1; compute C, N, V and Z flags */ -static void gen_sub_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +static void gen_sub_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sf) { /* 64 bit arithmetic */ TCGv_i64 result, flag, tmp; @@ -598,15 +749,14 @@ static void gen_sub_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv gen_set_NZ64(tcg_ctx, result); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_GEU, flag, t0, t1); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); tcg_gen_xor_i64(tcg_ctx, flag, result, t0); tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); tcg_gen_and_i64(tcg_ctx, flag, flag, tmp); tcg_temp_free_i64(tcg_ctx, tmp); - tcg_gen_shri_i64(tcg_ctx, flag, flag, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); tcg_gen_mov_i64(tcg_ctx, dest, result); tcg_temp_free_i64(tcg_ctx, flag); tcg_temp_free_i64(tcg_ctx, result); @@ -616,8 +766,8 @@ static void gen_sub_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv TCGv_i32 t1_32 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tmp; - tcg_gen_trunc_i64_i32(tcg_ctx, t0_32, t0); - tcg_gen_trunc_i64_i32(tcg_ctx, t1_32, t1); + tcg_gen_extrl_i64_i32(tcg_ctx, t0_32, t0); + tcg_gen_extrl_i64_i32(tcg_ctx, t1_32, t1); tcg_gen_sub_i32(tcg_ctx, tcg_ctx->cpu_NF, t0_32, t1_32); tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_CF, t0_32, t1_32); @@ -633,9 +783,8 @@ static void gen_sub_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv } /* dest = T0 + T1 + CF; do not compute flags. */ -static void gen_adc(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +static void gen_adc(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 flag = tcg_temp_new_i64(tcg_ctx); tcg_gen_extu_i32_i64(tcg_ctx, flag, tcg_ctx->cpu_CF); tcg_gen_add_i64(tcg_ctx, dest, t0, t1); @@ -648,9 +797,8 @@ static void gen_adc(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i6 } /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */ -static void gen_adc_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +static void gen_adc_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; if (sf) { TCGv_i64 result, cf_64, vf_64, tmp; result = tcg_temp_new_i64(tcg_ctx); @@ -661,14 +809,13 @@ static void gen_adc_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv tcg_gen_extu_i32_i64(tcg_ctx, cf_64, tcg_ctx->cpu_CF); tcg_gen_add2_i64(tcg_ctx, result, cf_64, t0, tmp, cf_64, tmp); tcg_gen_add2_i64(tcg_ctx, result, cf_64, result, cf_64, t1, tmp); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, cf_64); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, cf_64); gen_set_NZ64(tcg_ctx, result); tcg_gen_xor_i64(tcg_ctx, vf_64, result, t0); tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); tcg_gen_andc_i64(tcg_ctx, vf_64, vf_64, tmp); - tcg_gen_shri_i64(tcg_ctx, vf_64, vf_64, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, vf_64); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, vf_64); tcg_gen_mov_i64(tcg_ctx, dest, result); @@ -682,8 +829,8 @@ static void gen_adc_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv t1_32 = tcg_temp_new_i32(tcg_ctx); tmp = tcg_const_i32(tcg_ctx, 0); - tcg_gen_trunc_i64_i32(tcg_ctx, t0_32, t0); - tcg_gen_trunc_i64_i32(tcg_ctx, t1_32, t1); + tcg_gen_extrl_i64_i32(tcg_ctx, t0_32, t0); + tcg_gen_extrl_i64_i32(tcg_ctx, t1_32, t1); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0_32, tmp, tcg_ctx->cpu_CF, tmp); tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t1_32, tmp); @@ -707,26 +854,51 @@ static void gen_adc_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv * Store from GPR register to memory. */ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, - TCGv_i64 tcg_addr, int size, int memidx) + TCGv_i64 tcg_addr, int size, int memidx, + bool iss_valid, + unsigned int iss_srt, + bool iss_sf, bool iss_ar) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; g_assert(size <= 3); - tcg_gen_qemu_st_i64(s->uc, source, tcg_addr, memidx, MO_TE + size); + tcg_gen_qemu_st_i64(tcg_ctx, source, tcg_addr, memidx, s->be_data + size); + + if (iss_valid) { + uint32_t syn; + + syn = syn_data_abort_with_iss(0, + size, + false, + iss_srt, + iss_sf, + iss_ar, + 0, 0, 0, 0, 0, false); + disas_set_insn_syndrome(s, syn); + } } static void do_gpr_st(DisasContext *s, TCGv_i64 source, - TCGv_i64 tcg_addr, int size) + TCGv_i64 tcg_addr, int size, + bool iss_valid, + unsigned int iss_srt, + bool iss_sf, bool iss_ar) { - do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s)); + do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s), + iss_valid, iss_srt, iss_sf, iss_ar); } /* * Load from memory to GPR register */ -static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, - int size, bool is_signed, bool extend, int memidx) +static void do_gpr_ld_memidx(DisasContext *s, + TCGv_i64 dest, TCGv_i64 tcg_addr, + int size, bool is_signed, + bool extend, int memidx, + bool iss_valid, unsigned int iss_srt, + bool iss_sf, bool iss_ar) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGMemOp memop = MO_TE + size; + MemOp memop = s->be_data + size; g_assert(size <= 3); @@ -734,19 +906,36 @@ static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, memop += MO_SIGN; } - tcg_gen_qemu_ld_i64(s->uc, dest, tcg_addr, memidx, memop); + tcg_gen_qemu_ld_i64(tcg_ctx, dest, tcg_addr, memidx, memop); if (extend && is_signed) { g_assert(size < 3); tcg_gen_ext32u_i64(tcg_ctx, dest, dest); } + + if (iss_valid) { + uint32_t syn; + + syn = syn_data_abort_with_iss(0, + size, + is_signed, + iss_srt, + iss_sf, + iss_ar, + 0, 0, 0, 0, 0, false); + disas_set_insn_syndrome(s, syn); + } } -static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, - int size, bool is_signed, bool extend) +static void do_gpr_ld(DisasContext *s, + TCGv_i64 dest, TCGv_i64 tcg_addr, + int size, bool is_signed, bool extend, + bool iss_valid, unsigned int iss_srt, + bool iss_sf, bool iss_ar) { do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend, - get_mem_index(s)); + get_mem_index(s), + iss_valid, iss_srt, iss_sf, iss_ar); } /* @@ -759,13 +948,18 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_offset(s, srcidx, MO_64)); if (size < 4) { - tcg_gen_qemu_st_i64(s->uc, tmp, tcg_addr, get_mem_index(s), MO_TE + size); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, tcg_addr, get_mem_index(s), + s->be_data + size); } else { + bool be = s->be_data == MO_BE; TCGv_i64 tcg_hiaddr = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_st_i64(s->uc, tmp, tcg_addr, get_mem_index(s), MO_TEQ); - tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, srcidx)); + tcg_gen_addi_i64(tcg_ctx, tcg_hiaddr, tcg_addr, 8); - tcg_gen_qemu_st_i64(s->uc, tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s), + s->be_data | MO_Q); + tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, srcidx)); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s), + s->be_data | MO_Q); tcg_temp_free_i64(tcg_ctx, tcg_hiaddr); } @@ -783,17 +977,21 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) TCGv_i64 tmphi; if (size < 4) { - TCGMemOp memop = MO_TE + size; + MemOp memop = s->be_data + size; tmphi = tcg_const_i64(tcg_ctx, 0); - tcg_gen_qemu_ld_i64(s->uc, tmplo, tcg_addr, get_mem_index(s), memop); + tcg_gen_qemu_ld_i64(tcg_ctx, tmplo, tcg_addr, get_mem_index(s), memop); } else { + bool be = s->be_data == MO_BE; TCGv_i64 tcg_hiaddr; + tmphi = tcg_temp_new_i64(tcg_ctx); tcg_hiaddr = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld_i64(s->uc, tmplo, tcg_addr, get_mem_index(s), MO_TEQ); tcg_gen_addi_i64(tcg_ctx, tcg_hiaddr, tcg_addr, 8); - tcg_gen_qemu_ld_i64(s->uc, tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ); + tcg_gen_qemu_ld_i64(tcg_ctx, tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s), + s->be_data | MO_Q); + tcg_gen_qemu_ld_i64(tcg_ctx, tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s), + s->be_data | MO_Q); tcg_temp_free_i64(tcg_ctx, tcg_hiaddr); } @@ -802,6 +1000,8 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) tcg_temp_free_i64(tcg_ctx, tmplo); tcg_temp_free_i64(tcg_ctx, tmphi); + + clear_vec_high(s, true, destidx); } /* @@ -818,7 +1018,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) /* Get value of an element within a vector register */ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); @@ -851,7 +1051,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, } static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, - int element, TCGMemOp memop) + int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); @@ -879,11 +1079,10 @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, /* Set value of an element within a vector register */ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, - int element, TCGMemOp memop) + int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); - CPUState *cs; switch (memop) { case MO_8: tcg_gen_st8_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); @@ -898,15 +1097,12 @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, tcg_gen_st_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); break; default: - cs = CPU(s->uc->cpu); - cs->exception_index = EXCP_UDEF; - cpu_loop_exit(cs); - break; + g_assert_not_reached(); } } static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, - int destidx, int element, TCGMemOp memop) + int destidx, int element, MemOp memop) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); @@ -925,41 +1121,27 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, } } -/* Clear the high 64 bits of a 128 bit vector (in general non-quad - * vector ops all need to do this). - */ -static void clear_vec_high(DisasContext *s, int rd) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); - - write_vec_element(s, tcg_zero, rd, 1, MO_64); - tcg_temp_free_i64(tcg_ctx, tcg_zero); -} - /* Store from vector register to memory */ static void do_vec_st(DisasContext *s, int srcidx, int element, - TCGv_i64 tcg_addr, int size) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGMemOp memop = MO_TE + size; TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); read_vec_element(s, tcg_tmp, srcidx, element, size); - tcg_gen_qemu_st_i64(s->uc, tcg_tmp, tcg_addr, get_mem_index(s), memop); + tcg_gen_qemu_st_i64(tcg_ctx, tcg_tmp, tcg_addr, get_mem_index(s), endian | size); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } /* Load from memory to vector register */ static void do_vec_ld(DisasContext *s, int destidx, int element, - TCGv_i64 tcg_addr, int size) + TCGv_i64 tcg_addr, int size, MemOp endian) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGMemOp memop = MO_TE + size; TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld_i64(s->uc, tcg_tmp, tcg_addr, get_mem_index(s), memop); + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_tmp, tcg_addr, get_mem_index(s), endian | size); write_vec_element(s, tcg_tmp, destidx, element, size); tcg_temp_free_i64(tcg_ctx, tcg_tmp); @@ -977,14 +1159,28 @@ static inline bool fp_access_check(DisasContext *s) assert(!s->fp_access_checked); s->fp_access_checked = true; - if (s->cpacr_fpen) { + if (!s->fp_excp_el) { return true; } - gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false)); + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); return false; } +/* Check that SVE access is enabled. If it is, return true. + * If not, emit code to generate an appropriate exception and return false. + */ +bool sve_access_check(DisasContext *s) +{ + if (s->sve_excp_el) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(), + s->sve_excp_el); + return false; + } + return fp_access_check(s); +} + /* * This utility function is for doing register extension with an * optional shift. You will likely want to pass a temporary for the @@ -1072,12 +1268,14 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, } /* - * the instruction disassembly implemented here matches - * the instruction encoding classifications in chapter 3 (C3) - * of the ARM Architecture Reference Manual (DDI0487A_a) + * The instruction disassembly implemented here matches + * the instruction encoding classifications in chapter C4 + * of the ARM Architecture Reference Manual (DDI0487B_a); + * classification names and decode diagrams here should generally + * match up with those in the manual. */ -/* C3.2.7 Unconditional branch (immediate) +/* Unconditional branch (immediate) * 31 30 26 25 0 * +----+-----------+-------------------------------------+ * | op | 0 0 1 0 1 | imm26 | @@ -1086,18 +1284,19 @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4; + uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4; if (insn & (1U << 31)) { - /* C5.6.26 BL Branch with link */ - tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->pc); + /* BL Branch with link */ + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->base.pc_next); } - /* C5.6.20 B Branch / C5.6.26 BL Branch with link */ + /* B Branch / BL Branch with link */ + reset_btype(s); gen_goto_tb(s, 0, addr); } -/* C3.2.1 Compare & branch (immediate) +/* Compare and branch (immediate) * 31 30 25 24 23 5 4 0 * +----+-------------+----+---------------------+--------+ * | sf | 0 1 1 0 1 0 | op | imm19 | Rt | @@ -1108,26 +1307,27 @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn) TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, op, rt; uint64_t addr; - int label_match; + TCGLabel *label_match; TCGv_i64 tcg_cmp; sf = extract32(insn, 31, 1); op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ rt = extract32(insn, 0, 5); - addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; + addr = s->pc_curr + sextract32(insn, 5, 19) * 4; tcg_cmp = read_cpu_reg(s, rt, sf); label_match = gen_new_label(tcg_ctx); + reset_btype(s); tcg_gen_brcondi_i64(tcg_ctx, op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, label_match); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(tcg_ctx, label_match); gen_goto_tb(s, 1, addr); } -/* C3.2.5 Test & branch (immediate) +/* Test and branch (immediate) * 31 30 25 24 23 19 18 5 4 0 * +----+-------------+----+-------+-------------+------+ * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt | @@ -1138,26 +1338,28 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn) TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int bit_pos, op, rt; uint64_t addr; - int label_match; + TCGLabel *label_match; TCGv_i64 tcg_cmp; bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5); op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */ - addr = s->pc + sextract32(insn, 5, 14) * 4 - 4; + addr = s->pc_curr + sextract32(insn, 5, 14) * 4; rt = extract32(insn, 0, 5); tcg_cmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_andi_i64(tcg_ctx, tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos)); label_match = gen_new_label(tcg_ctx); + + reset_btype(s); tcg_gen_brcondi_i64(tcg_ctx, op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, label_match); tcg_temp_free_i64(tcg_ctx, tcg_cmp); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(tcg_ctx, label_match); gen_goto_tb(s, 1, addr); } -/* C3.2.2 / C5.6.19 Conditional branch (immediate) +/* Conditional branch (immediate) * 31 25 24 23 5 4 3 0 * +---------------+----+---------------------+----+------+ * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond | @@ -1173,14 +1375,15 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn) unallocated_encoding(s); return; } - addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; + addr = s->pc_curr + sextract32(insn, 5, 19) * 4; cond = extract32(insn, 0, 4); + reset_btype(s); if (cond < 0x0e) { /* genuinely conditional branches */ - int label_match = gen_new_label(tcg_ctx); + TCGLabel *label_match = gen_new_label(tcg_ctx); arm_gen_test_cc(tcg_ctx, cond, label_match); - gen_goto_tb(s, 0, s->pc); + gen_goto_tb(s, 0, s->base.pc_next); gen_set_label(tcg_ctx, label_match); gen_goto_tb(s, 1, addr); } else { @@ -1189,10 +1392,11 @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn) } } -/* C5.6.68 HINT */ +/* HINT instruction group, including various allocated HINTs */ static void handle_hint(DisasContext *s, uint32_t insn, unsigned int op1, unsigned int op2, unsigned int crm) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int selector = crm << 3 | op2; if (op1 != 3) { @@ -1201,22 +1405,102 @@ static void handle_hint(DisasContext *s, uint32_t insn, } switch (selector) { - case 0: /* NOP */ - return; - case 3: /* WFI */ - s->is_jmp = DISAS_WFI; - return; - case 1: /* YIELD */ - case 2: /* WFE */ - s->is_jmp = DISAS_WFE; - return; - case 4: /* SEV */ - case 5: /* SEVL */ + case 0: // 0b00000: /* NOP */ + break; + case 3: // 0b00011: /* WFI */ + s->base.is_jmp = DISAS_WFI; + break; + case 1: // 0b00001: /* YIELD */ + /* When running in MTTCG we don't generate jumps to the yield and + * WFE helpers as it won't affect the scheduling of other vCPUs. + * If we wanted to more completely model WFE/SEV so we don't busy + * spin unnecessarily we would need to do something more involved. + */ + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + s->base.is_jmp = DISAS_YIELD; + } + break; + case 2: // 0b00010: /* WFE */ + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + s->base.is_jmp = DISAS_WFE; + } + break; + case 4: // 0b00100: /* SEV */ + case 5: // 0b00101: /* SEVL */ /* we treat all as NOP at least for now */ - return; + break; + case 7: // 0b00111: /* XPACLRI */ + if (s->pauth_active) { + gen_helper_xpaci(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30]); + } + break; + case 8: // 0b01000: /* PACIA1716 */ + if (s->pauth_active) { + gen_helper_pacia(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); + } + break; + case 0xa: // 0b01010: /* PACIB1716 */ + if (s->pauth_active) { + gen_helper_pacib(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); + } + break; + case 0xc: // 0b01100: /* AUTIA1716 */ + if (s->pauth_active) { + gen_helper_autia(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); + } + break; + case 0xe: // 0b01110: /* AUTIB1716 */ + if (s->pauth_active) { + gen_helper_autib(tcg_ctx, tcg_ctx->cpu_X[17], tcg_ctx->cpu_env, tcg_ctx->cpu_X[17], tcg_ctx->cpu_X[16]); + } + break; + case 0x18: // 0b11000: /* PACIAZ */ + if (s->pauth_active) { + gen_helper_pacia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], + new_tmp_a64_zero(s)); + } + break; + case 0x19: // 0b11001: /* PACIASP */ + if (s->pauth_active) { + gen_helper_pacia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); + } + break; + case 0x1a: // 0b11010: /* PACIBZ */ + if (s->pauth_active) { + gen_helper_pacib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], + new_tmp_a64_zero(s)); + } + break; + case 0x1b: // 0b11011: /* PACIBSP */ + if (s->pauth_active) { + gen_helper_pacib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); + } + break; + case 0x1c: // 0b11100: /* AUTIAZ */ + if (s->pauth_active) { + gen_helper_autia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], + new_tmp_a64_zero(s)); + } + break; + case 0x1d: // 0b11101: /* AUTIASP */ + if (s->pauth_active) { + gen_helper_autia(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); + } + break; + case 0x1e: // 0b11110: /* AUTIBZ */ + if (s->pauth_active) { + gen_helper_autib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], + new_tmp_a64_zero(s)); + } + break; + case 0x1f: // 0b11111: /* AUTIBSP */ + if (s->pauth_active) { + gen_helper_autib(tcg_ctx, tcg_ctx->cpu_X[30], tcg_ctx->cpu_env, tcg_ctx->cpu_X[30], tcg_ctx->cpu_X[31]); + } + break; default: /* default specified as NOP equivalent */ - return; + break; } } @@ -1230,6 +1514,9 @@ static void gen_clrex(DisasContext *s, uint32_t insn) static void handle_sync(DisasContext *s, uint32_t insn, unsigned int op1, unsigned int op2, unsigned int crm) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGBar bar; + if (op1 != 3) { unallocated_encoding(s); return; @@ -1241,41 +1528,178 @@ static void handle_sync(DisasContext *s, uint32_t insn, return; case 4: /* DSB */ case 5: /* DMB */ - case 6: /* ISB */ - /* We don't emulate caches so barriers are no-ops */ + switch (crm & 3) { + case 1: /* MBReqTypes_Reads */ + bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST; + break; + case 2: /* MBReqTypes_Writes */ + bar = TCG_BAR_SC | TCG_MO_ST_ST; + break; + default: /* MBReqTypes_All */ + bar = TCG_BAR_SC | TCG_MO_ALL; + break; + } + tcg_gen_mb(tcg_ctx, bar); return; + case 6: /* ISB */ + /* We need to break the TB after this insn to execute + * a self-modified code correctly and also to take + * any pending interrupts immediately. + */ + reset_btype(s); + gen_goto_tb(s, 0, s->base.pc_next); + return; + + case 7: /* SB */ + if (crm != 0 || !dc_isar_feature(aa64_sb, s)) { + goto do_unallocated; + } + /* + * TODO: There is no speculation barrier opcode for TCG; + * MB and end the TB instead. + */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + gen_goto_tb(s, 0, s->base.pc_next); + return; + default: + do_unallocated: unallocated_encoding(s); return; } } -/* C5.6.130 MSR (immediate) - move immediate to processor state field */ +static void gen_xaflag(TCGContext *tcg_ctx) +{ + TCGv_i32 z = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, z, tcg_ctx->cpu_ZF, 0); + + /* + * (!C & !Z) << 31 + * (!(C | Z)) << 31 + * ~((C | Z) << 31) + * ~-(C | Z) + * (C | Z) - 1 + */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, z); + tcg_gen_subi_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, 1); + + /* !(Z & C) */ + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_ZF, z, tcg_ctx->cpu_CF); + tcg_gen_xori_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, 1); + + /* (!C & Z) << 31 -> -(Z & ~C) */ + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, z, tcg_ctx->cpu_CF); + tcg_gen_neg_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF); + + /* C | Z */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, z); + + tcg_temp_free_i32(tcg_ctx, z); +} + +static void gen_axflag(TCGContext *tcg_ctx) +{ + tcg_gen_sari_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, 31); /* V ? -1 : 0 */ + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_ctx->cpu_VF); /* C & !V */ + + /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */ + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_ctx->cpu_VF); + + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_NF, 0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); +} + +/* MSR (immediate) - move immediate to processor state field */ static void handle_msr_i(DisasContext *s, uint32_t insn, unsigned int op1, unsigned int op2, unsigned int crm) { TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1; int op = op1 << 3 | op2; + + /* End the TB by default, chaining is ok. */ + s->base.is_jmp = DISAS_TOO_MANY; + switch (op) { + case 0x00: /* CFINV */ + if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) { + goto do_unallocated; + } + tcg_gen_xori_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, 1); + s->base.is_jmp = DISAS_NEXT; + break; + + case 0x01: /* XAFlag */ + if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) { + goto do_unallocated; + } + gen_xaflag(tcg_ctx); + s->base.is_jmp = DISAS_NEXT; + break; + + case 0x02: /* AXFlag */ + if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) { + goto do_unallocated; + } + gen_axflag(tcg_ctx); + s->base.is_jmp = DISAS_NEXT; + break; + + case 0x03: /* UAO */ + if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) { + goto do_unallocated; + } + if (crm & 1) { + set_pstate_bits(tcg_ctx, PSTATE_UAO); + } else { + clear_pstate_bits(tcg_ctx, PSTATE_UAO); + } + t1 = tcg_const_i32(tcg_ctx, s->current_el); + gen_helper_rebuild_hflags_a64(tcg_ctx, tcg_ctx->cpu_env, t1); + tcg_temp_free_i32(tcg_ctx, t1); + break; + + case 0x04: /* PAN */ + if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) { + goto do_unallocated; + } + if (crm & 1) { + set_pstate_bits(tcg_ctx, PSTATE_PAN); + } else { + clear_pstate_bits(tcg_ctx, PSTATE_PAN); + } + t1 = tcg_const_i32(tcg_ctx, s->current_el); + gen_helper_rebuild_hflags_a64(tcg_ctx, tcg_ctx->cpu_env, t1); + tcg_temp_free_i32(tcg_ctx, t1); + break; + case 0x05: /* SPSel */ if (s->current_el == 0) { - unallocated_encoding(s); - return; + goto do_unallocated; } - /* fall through */ - case 0x1e: /* DAIFSet */ - case 0x1f: /* DAIFClear */ - { - TCGv_i32 tcg_imm = tcg_const_i32(tcg_ctx, crm); - TCGv_i32 tcg_op = tcg_const_i32(tcg_ctx, op); - gen_a64_set_pc_im(s, s->pc - 4); - gen_helper_msr_i_pstate(tcg_ctx, tcg_ctx->cpu_env, tcg_op, tcg_imm); - tcg_temp_free_i32(tcg_ctx, tcg_imm); - tcg_temp_free_i32(tcg_ctx, tcg_op); - s->is_jmp = DISAS_UPDATE; + t1 = tcg_const_i32(tcg_ctx, crm & PSTATE_SP); + gen_helper_msr_i_spsel(tcg_ctx, tcg_ctx->cpu_env, t1); + tcg_temp_free_i32(tcg_ctx, t1); break; - } + + case 0x1e: /* DAIFSet */ + t1 = tcg_const_i32(tcg_ctx, crm); + gen_helper_msr_i_daifset(tcg_ctx, tcg_ctx->cpu_env, t1); + tcg_temp_free_i32(tcg_ctx, t1); + break; + + case 0x1f: /* DAIFClear */ + t1 = tcg_const_i32(tcg_ctx, crm); + gen_helper_msr_i_daifclear(tcg_ctx, tcg_ctx->cpu_env, t1); + tcg_temp_free_i32(tcg_ctx, t1); + /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */ + s->base.is_jmp = DISAS_UPDATE; + break; + default: + do_unallocated: unallocated_encoding(s); return; } @@ -1304,12 +1728,11 @@ static void gen_get_nzcv(TCGContext *tcg_ctx, TCGv_i64 tcg_rt) } static void gen_set_nzcv(TCGContext *tcg_ctx, TCGv_i64 tcg_rt) - { TCGv_i32 nzcv = tcg_temp_new_i32(tcg_ctx); /* take NZCV from R[t] */ - tcg_gen_trunc_i64_i32(tcg_ctx, nzcv, tcg_rt); + tcg_gen_extrl_i64_i32(tcg_ctx, nzcv, tcg_rt); /* bit 31, N */ tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_NF, nzcv, (1U << 31)); @@ -1325,10 +1748,10 @@ static void gen_set_nzcv(TCGContext *tcg_ctx, TCGv_i64 tcg_rt) tcg_temp_free_i32(tcg_ctx, nzcv); } -/* C5.6.129 MRS - move from system register - * C5.6.131 MSR (register) - move to system register - * C5.6.204 SYS - * C5.6.205 SYSL +/* MRS - move from system register + * MSR (register) - move to system register + * SYS + * SYSL * These are all essentially the same insn in 'read' and 'write' * versions, with varying op0 fields. */ @@ -1366,16 +1789,24 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, * runtime; this may result in an exception. */ TCGv_ptr tmpptr; - TCGv_i32 tcg_syn; + TCGv_i32 tcg_syn, tcg_isread; uint32_t syndrome; - gen_a64_set_pc_im(s, s->pc - 4); + gen_a64_set_pc_im(tcg_ctx, s->pc_curr); tmpptr = tcg_const_ptr(tcg_ctx, ri); syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); tcg_syn = tcg_const_i32(tcg_ctx, syndrome); - gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn); + tcg_isread = tcg_const_i32(tcg_ctx, isread); + gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn, tcg_isread); tcg_temp_free_ptr(tcg_ctx, tmpptr); tcg_temp_free_i32(tcg_ctx, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_isread); + } else if (ri->type & ARM_CP_RAISES_EXC) { + /* + * The readfn or writefn might raise an exception; + * synchronize the CPU state in case it does. + */ + gen_a64_set_pc_im(tcg_ctx, s->pc_curr); } /* Handle special cases first */ @@ -1399,12 +1830,17 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, return; case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ - tcg_rt = cpu_reg(s, rt); + tcg_rt = clean_data_tbi(s, cpu_reg(s, rt)); gen_helper_dc_zva(tcg_ctx, tcg_ctx->cpu_env, tcg_rt); return; default: break; } + if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) { + return; + } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { + return; + } tcg_rt = cpu_reg(s, rt); @@ -1433,16 +1869,28 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, } } + if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { + /* I/O operations must end the TB here (whether read or write) */ + s->base.is_jmp = DISAS_UPDATE; + } if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { - /* We default to ending the TB on a coprocessor register write, + /* + * A write to any coprocessor regiser that ends a TB + * must rebuild the hflags for the next TB. + */ + TCGv_i32 tcg_el = tcg_const_i32(tcg_ctx, s->current_el); + gen_helper_rebuild_hflags_a64(tcg_ctx, tcg_ctx->cpu_env, tcg_el); + tcg_temp_free_i32(tcg_ctx, tcg_el); + /* + * We default to ending the TB on a coprocessor register write, * but allow this to be suppressed by the register definition * (usually only necessary to work around guest bugs). */ - s->is_jmp = DISAS_UPDATE; + s->base.is_jmp = DISAS_UPDATE; } } -/* C3.2.4 System +/* System * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0 * +---------------------+---+-----+-----+-------+-------+-----+------+ * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt | @@ -1465,13 +1913,13 @@ static void disas_system(DisasContext *s, uint32_t insn) return; } switch (crn) { - case 2: /* C5.6.68 HINT */ + case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */ handle_hint(s, insn, op1, op2, crm); break; case 3: /* CLREX, DSB, DMB, ISB */ handle_sync(s, insn, op1, op2, crm); break; - case 4: /* C5.6.130 MSR (immediate) */ + case 4: /* MSR (immediate) */ handle_msr_i(s, insn, op1, op2, crm); break; default: @@ -1483,7 +1931,7 @@ static void disas_system(DisasContext *s, uint32_t insn) handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt); } -/* C3.2.3 Exception generation +/* Exception generation * * 31 24 23 21 20 5 4 2 1 0 * +-----------------+-----+------------------------+-----+----+ @@ -1506,11 +1954,12 @@ static void disas_exc(DisasContext *s, uint32_t insn) * instruction works properly. */ switch (op2_ll) { - case 1: + case 1: /* SVC */ gen_ss_advance(s); - gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16)); + gen_exception_insn(s, s->base.pc_next, EXCP_SWI, + syn_aa64_svc(imm16), default_exception_el(s)); break; - case 2: + case 2: /* HVC */ if (s->current_el == 0) { unallocated_encoding(s); break; @@ -1518,22 +1967,24 @@ static void disas_exc(DisasContext *s, uint32_t insn) /* The pre HVC helper handles cases when HVC gets trapped * as an undefined insn by runtime configuration. */ - gen_a64_set_pc_im(s, s->pc - 4); + gen_a64_set_pc_im(tcg_ctx, s->pc_curr); gen_helper_pre_hvc(tcg_ctx, tcg_ctx->cpu_env); gen_ss_advance(s); - gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16)); + gen_exception_insn(s, s->base.pc_next, EXCP_HVC, + syn_aa64_hvc(imm16), 2); break; - case 3: + case 3: /* SMC */ if (s->current_el == 0) { unallocated_encoding(s); break; } - gen_a64_set_pc_im(s, s->pc - 4); + gen_a64_set_pc_im(tcg_ctx, s->pc_curr); tmp = tcg_const_i32(tcg_ctx, syn_aa64_smc(imm16)); gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); tcg_temp_free_i32(tcg_ctx, tmp); gen_ss_advance(s); - gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16)); + gen_exception_insn(s, s->base.pc_next, EXCP_SMC, + syn_aa64_smc(imm16), 3); break; default: unallocated_encoding(s); @@ -1546,14 +1997,35 @@ static void disas_exc(DisasContext *s, uint32_t insn) break; } /* BRK */ - gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16)); + gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16)); break; case 2: if (op2_ll != 0) { unallocated_encoding(s); break; } - /* HLT */ + /* HLT. This has two purposes. + * Architecturally, it is an external halting debug instruction. + * Since QEMU doesn't implement external debug, we treat this as + * it is required for halting debug disabled: it will UNDEF. + * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction. + */ +#if 0 + if (semihosting_enabled() && imm16 == 0xf000) { + /* In system mode, don't allow userspace access to semihosting, + * to provide some semblance of security (and for consistency + * with our 32-bit semihosting). + */ + if (s->current_el == 0) { + unsupported_encoding(s, insn); + break; + } + gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); + } else { + unsupported_encoding(s, insn); + } +#endif + unsupported_encoding(s, insn); break; case 5: @@ -1570,7 +2042,7 @@ static void disas_exc(DisasContext *s, uint32_t insn) } } -/* C3.2.7 Unconditional branch (register) +/* Unconditional branch (register) * 31 25 24 21 20 16 15 10 9 5 4 0 * +---------------+-------+-------+-------+------+-------+ * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 | @@ -1580,6 +2052,9 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int opc, op2, op3, rn, op4; + unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */ + TCGv_i64 dst; + TCGv_i64 modifier; opc = extract32(insn, 21, 4); op2 = extract32(insn, 16, 5); @@ -1587,44 +2062,175 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) rn = extract32(insn, 5, 5); op4 = extract32(insn, 0, 5); - if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) { - unallocated_encoding(s); - return; + if (op2 != 0x1f) { + goto do_unallocated; } switch (opc) { case 0: /* BR */ - case 2: /* RET */ - tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_pc, cpu_reg(s, rn)); - break; case 1: /* BLR */ - tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_pc, cpu_reg(s, rn)); - tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->pc); + case 2: /* RET */ + btype_mod = opc; + switch (op3) { + case 0: + /* BR, BLR, RET */ + if (op4 != 0) { + goto do_unallocated; + } + dst = cpu_reg(s, rn); + break; + + case 2: + case 3: + if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + if (opc == 2) { + /* RETAA, RETAB */ + if (rn != 0x1f || op4 != 0x1f) { + goto do_unallocated; + } + rn = 30; + modifier = tcg_ctx->cpu_X[31]; + } else { + /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */ + if (op4 != 0x1f) { + goto do_unallocated; + } + modifier = new_tmp_a64_zero(s); + } + if (s->pauth_active) { + dst = new_tmp_a64(s); + if (op3 == 2) { + gen_helper_autia(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); + } else { + gen_helper_autib(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); + } + } else { + dst = cpu_reg(s, rn); + } + break; + + default: + goto do_unallocated; + } + gen_a64_set_pc(s, dst); + /* BLR also needs to load return address */ + if (opc == 1) { + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->base.pc_next); + } break; + + case 8: /* BRAA */ + case 9: /* BLRAA */ + if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + if ((op3 & ~1) != 2) { + goto do_unallocated; + } + btype_mod = opc & 1; + if (s->pauth_active) { + dst = new_tmp_a64(s); + modifier = cpu_reg_sp(s, op4); + if (op3 == 2) { + gen_helper_autia(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); + } else { + gen_helper_autib(tcg_ctx, dst, tcg_ctx->cpu_env, cpu_reg(s, rn), modifier); + } + } else { + dst = cpu_reg(s, rn); + } + gen_a64_set_pc(s, dst); + /* BLRAA also needs to load return address */ + if (opc == 9) { + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->base.pc_next); + } + break; + case 4: /* ERET */ if (s->current_el == 0) { - unallocated_encoding(s); - return; + goto do_unallocated; } - gen_helper_exception_return(tcg_ctx, tcg_ctx->cpu_env); - s->is_jmp = DISAS_JUMP; + switch (op3) { + case 0: /* ERET */ + if (op4 != 0) { + goto do_unallocated; + } + dst = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, + offsetof(CPUARMState, elr_el[s->current_el])); + break; + + case 2: /* ERETAA */ + case 3: /* ERETAB */ + if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + if (rn != 0x1f || op4 != 0x1f) { + goto do_unallocated; + } + dst = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, + offsetof(CPUARMState, elr_el[s->current_el])); + if (s->pauth_active) { + modifier = tcg_ctx->cpu_X[31]; + if (op3 == 2) { + gen_helper_autia(tcg_ctx, dst, tcg_ctx->cpu_env, dst, modifier); + } else { + gen_helper_autib(tcg_ctx, dst, tcg_ctx->cpu_env, dst, modifier); + } + } + break; + + default: + goto do_unallocated; + } + + gen_helper_exception_return(tcg_ctx, tcg_ctx->cpu_env, dst); + tcg_temp_free_i64(tcg_ctx, dst); + /* Must exit loop to check un-masked IRQs */ + s->base.is_jmp = DISAS_EXIT; return; + case 5: /* DRPS */ - if (rn != 0x1f) { - unallocated_encoding(s); + if (op3 != 0 || op4 != 0 || rn != 0x1f) { + goto do_unallocated; } else { unsupported_encoding(s, insn); } return; + default: + do_unallocated: unallocated_encoding(s); return; } - s->is_jmp = DISAS_JUMP; + switch (btype_mod) { + case 0: /* BR */ + if (dc_isar_feature(aa64_bti, s)) { + /* BR to {x16,x17} or !guard -> 1, else 3. */ + set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3); + } + break; + + case 1: /* BLR */ + if (dc_isar_feature(aa64_bti, s)) { + /* BLR sets BTYPE to 2, regardless of source guarded page. */ + set_btype(s, 2); + } + break; + + default: /* RET or none of the above. */ + /* BTYPE will be set to 0 by normal end-of-insn processing. */ + break; + } + + s->base.is_jmp = DISAS_JUMP; } -/* C3.2 Branches, exception generating and system instructions */ +/* Branches, exception generating and system instructions */ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) { switch (extract32(insn, 25, 7)) { @@ -1643,7 +2249,11 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) break; case 0x6a: /* Exception generation / System */ if (insn & (1 << 24)) { - disas_system(s, insn); + if (extract32(insn, 22, 2) == 0) { + disas_system(s, insn); + } else { + unallocated_encoding(s); + } } else { disas_exc(s, insn); } @@ -1664,52 +2274,56 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) * mandated semantics, but it works for typical guest code sequences * and avoids having to monitor regular stores. * - * In system emulation mode only one CPU will be running at once, so - * this sequence is effectively atomic. In user emulation mode we - * throw an exception and handle the atomic operation elsewhere. + * The store exclusive uses the atomic cmpxchg primitives to avoid + * races in multi-threaded linux-user and when MTTCG softmmu is + * enabled. */ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i64 addr, int size, bool is_pair) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); - TCGMemOp memop = MO_TE + size; + int idx = get_mem_index(s); + MemOp memop = s->be_data; g_assert(size <= 3); - tcg_gen_qemu_ld_i64(s->uc, tmp, addr, get_mem_index(s), memop); - if (is_pair) { - TCGv_i64 addr2 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 hitmp = tcg_temp_new_i64(tcg_ctx); - g_assert(size >= 2); - tcg_gen_addi_i64(tcg_ctx, addr2, addr, 1ULL << size); - tcg_gen_qemu_ld_i64(s->uc, hitmp, addr2, get_mem_index(s), memop); - tcg_temp_free_i64(tcg_ctx, addr2); - tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_high, hitmp); - tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt2), hitmp); - tcg_temp_free_i64(tcg_ctx, hitmp); + if (size == 2) { + /* The pair must be single-copy atomic for the doubleword. */ + memop |= MO_64 | MO_ALIGN; + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, addr, idx, memop); + if (s->be_data == MO_LE) { + tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val, 0, 32); + tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_val, 32, 32); + } else { + tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val, 32, 32); + tcg_gen_extract_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_val, 0, 32); + } + } else { + /* The pair must be single-copy atomic for *each* doubleword, not + the entire quadword, however it must be quadword aligned. */ + memop |= MO_64; + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, addr, idx, + memop | MO_ALIGN_16); + + TCGv_i64 addr2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_addi_i64(tcg_ctx, addr2, addr, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_high, addr2, idx, memop); + tcg_temp_free_i64(tcg_ctx, addr2); + + tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val); + tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt2), tcg_ctx->cpu_exclusive_high); + } + } else { + memop |= size | MO_ALIGN; + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, addr, idx, memop); + tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tcg_ctx->cpu_exclusive_val); } - - tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp); - tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tmp); - - tcg_temp_free_i64(tcg_ctx, tmp); tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr); } -#ifdef CONFIG_USER_ONLY static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv_i64 addr, int size, int is_pair) -{ - tcg_gen_mov_i64(tcg_ctx, cpu_exclusive_test, addr); - tcg_gen_movi_i32(tcg_ctx, cpu_exclusive_info, - size | is_pair << 2 | (rd << 4) | (rt << 9) | (rt2 << 14)); - gen_exception_internal_insn(s, 4, EXCP_STREX); -} -#else -static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, - TCGv_i64 inaddr, int size, int is_pair) { TCGContext *tcg_ctx = s->uc->tcg_ctx; /* if (env->exclusive_addr == addr && env->exclusive_val == [addr] @@ -1724,58 +2338,187 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, * } * env->exclusive_addr = -1; */ - int fail_label = gen_new_label(tcg_ctx); - int done_label = gen_new_label(tcg_ctx); - TCGv_i64 addr = tcg_temp_local_new_i64(tcg_ctx); + TCGLabel *fail_label = gen_new_label(tcg_ctx); + TCGLabel *done_label = gen_new_label(tcg_ctx); TCGv_i64 tmp; - /* Copy input into a local temp so it is not trashed when the - * basic block ends at the branch insn. - */ - tcg_gen_mov_i64(tcg_ctx, addr, inaddr); tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, addr, tcg_ctx->cpu_exclusive_addr, fail_label); tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld_i64(s->uc, tmp, addr, get_mem_index(s), MO_TE + size); - tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, tmp, tcg_ctx->cpu_exclusive_val, fail_label); + if (is_pair) { + if (size == 2) { + if (s->be_data == MO_LE) { + tcg_gen_concat32_i64(tcg_ctx, tmp, cpu_reg(s, rt), cpu_reg(s, rt2)); + } else { + tcg_gen_concat32_i64(tcg_ctx, tmp, cpu_reg(s, rt2), cpu_reg(s, rt)); + } + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tmp, tcg_ctx->cpu_exclusive_addr, + tcg_ctx->cpu_exclusive_val, tmp, + get_mem_index(s), + MO_64 | MO_ALIGN | s->be_data); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, tmp, tmp, tcg_ctx->cpu_exclusive_val); + } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!HAVE_CMPXCHG128) { + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + s->base.is_jmp = DISAS_NORETURN; + } else if (s->be_data == MO_LE) { + gen_helper_paired_cmpxchg64_le_parallel(tcg_ctx, tmp, tcg_ctx->cpu_env, + tcg_ctx->cpu_exclusive_addr, + cpu_reg(s, rt), + cpu_reg(s, rt2)); + } else { + gen_helper_paired_cmpxchg64_be_parallel(tcg_ctx, tmp, tcg_ctx->cpu_env, + tcg_ctx->cpu_exclusive_addr, + cpu_reg(s, rt), + cpu_reg(s, rt2)); + } + } else if (s->be_data == MO_LE) { + gen_helper_paired_cmpxchg64_le(tcg_ctx, tmp, tcg_ctx->cpu_env, tcg_ctx->cpu_exclusive_addr, + cpu_reg(s, rt), cpu_reg(s, rt2)); + } else { + gen_helper_paired_cmpxchg64_be(tcg_ctx, tmp, tcg_ctx->cpu_env, tcg_ctx->cpu_exclusive_addr, + cpu_reg(s, rt), cpu_reg(s, rt2)); + } + } else { + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tmp, tcg_ctx->cpu_exclusive_addr, tcg_ctx->cpu_exclusive_val, + cpu_reg(s, rt), get_mem_index(s), + size | MO_ALIGN | s->be_data); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, tmp, tmp, tcg_ctx->cpu_exclusive_val); + } + tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rd), tmp); tcg_temp_free_i64(tcg_ctx, tmp); - - if (is_pair) { - TCGv_i64 addrhi = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 tmphi = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_addi_i64(tcg_ctx, addrhi, addr, 1ULL << size); - tcg_gen_qemu_ld_i64(s->uc, tmphi, addrhi, get_mem_index(s), MO_TE + size); - tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, tmphi, tcg_ctx->cpu_exclusive_high, fail_label); - - tcg_temp_free_i64(tcg_ctx, tmphi); - tcg_temp_free_i64(tcg_ctx, addrhi); - } - - /* We seem to still have the exclusive monitor, so do the store */ - tcg_gen_qemu_st_i64(s->uc, cpu_reg(s, rt), addr, get_mem_index(s), MO_TE + size); - if (is_pair) { - TCGv_i64 addrhi = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_addi_i64(tcg_ctx, addrhi, addr, 1ULL << size); - tcg_gen_qemu_st_i64(s->uc, cpu_reg(s, rt2), addrhi, - get_mem_index(s), MO_TE + size); - tcg_temp_free_i64(tcg_ctx, addrhi); - } - - tcg_temp_free_i64(tcg_ctx, addr); - - tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), 0); tcg_gen_br(tcg_ctx, done_label); + gen_set_label(tcg_ctx, fail_label); tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), 1); gen_set_label(tcg_ctx, done_label); tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); - } -#endif -/* C3.3.6 Load/store exclusive +static void gen_compare_and_swap(DisasContext *s, int rs, int rt, + int rn, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_rs = cpu_reg(s, rs); + TCGv_i64 tcg_rt = cpu_reg(s, rt); + int memidx = get_mem_index(s); + TCGv_i64 clean_addr; + + if (rn == 31) { + gen_check_sp_alignment(s); + } + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx, + size | MO_ALIGN | s->be_data); +} + +static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, + int rn, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 s1 = cpu_reg(s, rs); + TCGv_i64 s2 = cpu_reg(s, rs + 1); + TCGv_i64 t1 = cpu_reg(s, rt); + TCGv_i64 t2 = cpu_reg(s, rt + 1); + TCGv_i64 clean_addr; + int memidx = get_mem_index(s); + + if (rn == 31) { + gen_check_sp_alignment(s); + } + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + + if (size == 2) { + TCGv_i64 cmp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); + + if (s->be_data == MO_LE) { + tcg_gen_concat32_i64(tcg_ctx, val, t1, t2); + tcg_gen_concat32_i64(tcg_ctx, cmp, s1, s2); + } else { + tcg_gen_concat32_i64(tcg_ctx, val, t2, t1); + tcg_gen_concat32_i64(tcg_ctx, cmp, s2, s1); + } + + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, cmp, clean_addr, cmp, val, memidx, + MO_64 | MO_ALIGN | s->be_data); + tcg_temp_free_i64(tcg_ctx, val); + + if (s->be_data == MO_LE) { + tcg_gen_extr32_i64(tcg_ctx, s1, s2, cmp); + } else { + tcg_gen_extr32_i64(tcg_ctx, s2, s1, cmp); + } + tcg_temp_free_i64(tcg_ctx, cmp); + } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (HAVE_CMPXCHG128) { + TCGv_i32 tcg_rs = tcg_const_i32(tcg_ctx, rs); + if (s->be_data == MO_LE) { + gen_helper_casp_le_parallel(tcg_ctx, tcg_ctx->cpu_env, tcg_rs, + clean_addr, t1, t2); + } else { + gen_helper_casp_be_parallel(tcg_ctx, tcg_ctx->cpu_env, tcg_rs, + clean_addr, t1, t2); + } + tcg_temp_free_i32(tcg_ctx, tcg_rs); + } else { + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + s->base.is_jmp = DISAS_NORETURN; + } + } else { + TCGv_i64 d1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 d2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 a2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 c1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 c2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + + /* Load the two words, in memory order. */ + tcg_gen_qemu_ld_i64(tcg_ctx, d1, clean_addr, memidx, + MO_64 | MO_ALIGN_16 | s->be_data); + tcg_gen_addi_i64(tcg_ctx, a2, clean_addr, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, d2, a2, memidx, MO_64 | s->be_data); + + /* Compare the two words, also in memory order. */ + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_EQ, c1, d1, s1); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_EQ, c2, d2, s2); + tcg_gen_and_i64(tcg_ctx, c2, c2, c1); + + /* If compare equal, write back new data, else write back old data. */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, c1, c2, zero, t1, d1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, c2, c2, zero, t2, d2); + tcg_gen_qemu_st_i64(tcg_ctx, c1, clean_addr, memidx, MO_64 | s->be_data); + tcg_gen_qemu_st_i64(tcg_ctx, c2, a2, memidx, MO_64 | s->be_data); + tcg_temp_free_i64(tcg_ctx, a2); + tcg_temp_free_i64(tcg_ctx, c1); + tcg_temp_free_i64(tcg_ctx, c2); + tcg_temp_free_i64(tcg_ctx, zero); + + /* Write back the data from memory to Rs. */ + tcg_gen_mov_i64(tcg_ctx, s1, d1); + tcg_gen_mov_i64(tcg_ctx, s2, d2); + tcg_temp_free_i64(tcg_ctx, d1); + tcg_temp_free_i64(tcg_ctx, d2); + } +} + +/* Update the Sixty-Four bit (SF) registersize. This logic is derived + * from the ARMv8 specs for LDR (Shared decode for all encodings). + */ +static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc) +{ + int opc0 = extract32(opc, 0, 1); + int regsize; + + if (is_signed) { + regsize = opc0 ? 32 : 64; + } else { + regsize = size == 3 ? 64 : 32; + } + return regsize == 64; +} + +/* Load/store exclusive * * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0 * +-----+-------------+----+---+----+------+----+-------+------+------+ @@ -1787,9 +2530,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, * o2: 0 -> exclusive, 1 -> not * o1: 0 -> single register, 1 -> register pair * o0: 1 -> load-acquire/store-release, 0 -> not - * - * o0 == 0 AND o2 == 1 is un-allocated - * o1 == 1 is un-allocated except for 32 and 64 bit sizes */ static void disas_ldst_excl(DisasContext *s, uint32_t insn) { @@ -1797,57 +2537,130 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rt2 = extract32(insn, 10, 5); - int is_lasr = extract32(insn, 15, 1); int rs = extract32(insn, 16, 5); - int is_pair = extract32(insn, 21, 1); - int is_store = !extract32(insn, 22, 1); - int is_excl = !extract32(insn, 23, 1); + int is_lasr = extract32(insn, 15, 1); + int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr; int size = extract32(insn, 30, 2); - TCGv_i64 tcg_addr; + TCGv_i64 clean_addr; - if ((!is_excl && !is_lasr) || - (is_pair && size < 2)) { - unallocated_encoding(s); + switch (o2_L_o1_o0) { + case 0x0: /* STXR */ + case 0x1: /* STLXR */ + if (rn == 31) { + gen_check_sp_alignment(s); + } + if (is_lasr) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + } + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false); return; - } - if (rn == 31) { - gen_check_sp_alignment(s); - } - tcg_addr = read_cpu_reg_sp(s, rn, 1); - - /* Note that since TCG is single threaded load-acquire/store-release - * semantics require no extra if (is_lasr) { ... } handling. - */ - - if (is_excl) { - if (!is_store) { - s->is_ldex = true; - gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair); - } else { - gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair); + case 0x4: /* LDXR */ + case 0x5: /* LDAXR */ + if (rn == 31) { + gen_check_sp_alignment(s); } - } else { - TCGv_i64 tcg_rt = cpu_reg(s, rt); - if (is_store) { - do_gpr_st(s, tcg_rt, tcg_addr, size); - } else { - do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false); + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + s->is_ldex = true; + gen_load_exclusive(s, rt, rt2, clean_addr, size, false); + if (is_lasr) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); } - if (is_pair) { - TCGv_i64 tcg_rt2 = cpu_reg(s, rt); - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, 1ULL << size); - if (is_store) { - do_gpr_st(s, tcg_rt2, tcg_addr, size); - } else { - do_gpr_ld(s, tcg_rt2, tcg_addr, size, false, false); + return; + + case 0x8: /* STLLR */ + if (!dc_isar_feature(aa64_lor, s)) { + break; + } + /* StoreLORelease is the same as Store-Release for QEMU. */ + /* fall through */ + case 0x9: /* STLR */ + /* Generate ISS for non-exclusive accesses including LASR. */ + if (rn == 31) { + gen_check_sp_alignment(s); + } + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, + disas_ldst_compute_iss_sf(size, false, 0), is_lasr); + return; + + case 0xc: /* LDLAR */ + if (!dc_isar_feature(aa64_lor, s)) { + break; + } + /* LoadLOAcquire is the same as Load-Acquire for QEMU. */ + /* fall through */ + case 0xd: /* LDAR */ + /* Generate ISS for non-exclusive accesses including LASR. */ + if (rn == 31) { + gen_check_sp_alignment(s); + } + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt, + disas_ldst_compute_iss_sf(size, false, 0), is_lasr); + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); + return; + + case 0x2: case 0x3: /* CASP / STXP */ + if (size & 2) { /* STXP / STLXP */ + if (rn == 31) { + gen_check_sp_alignment(s); } + if (is_lasr) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + } + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true); + return; } + if (rt2 == 31 + && ((rt | rs) & 1) == 0 + && dc_isar_feature(aa64_atomics, s)) { + /* CASP / CASPL */ + gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); + return; + } + break; + + case 0x6: case 0x7: /* CASPA / LDXP */ + if (size & 2) { /* LDXP / LDAXP */ + if (rn == 31) { + gen_check_sp_alignment(s); + } + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + s->is_ldex = true; + gen_load_exclusive(s, rt, rt2, clean_addr, size, true); + if (is_lasr) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); + } + return; + } + if (rt2 == 31 + && ((rt | rs) & 1) == 0 + && dc_isar_feature(aa64_atomics, s)) { + /* CASPA / CASPAL */ + gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); + return; + } + break; + + case 0xa: /* CAS */ + case 0xb: /* CASL */ + case 0xe: /* CASA */ + case 0xf: /* CASAL */ + if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) { + gen_compare_and_swap(s, rs, rt, rn, size); + return; + } + break; } + unallocated_encoding(s); } /* - * C3.3.5 Load register (literal) + * Load register (literal) * * 31 30 29 27 26 25 24 23 5 4 0 * +-----+-------+---+-----+-------------------+-------+ @@ -1863,12 +2676,12 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); - int64_t imm = (int32_t)(((uint32_t)sextract32(insn, 5, 19)) << 2); + int64_t imm = sextract32(insn, 5, 19) << 2; bool is_vector = extract32(insn, 26, 1); int opc = extract32(insn, 30, 2); bool is_signed = false; int size = 2; - TCGv_i64 tcg_rt, tcg_addr; + TCGv_i64 tcg_rt, clean_addr; if (is_vector) { if (opc == 3) { @@ -1890,25 +2703,29 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) tcg_rt = cpu_reg(s, rt); - tcg_addr = tcg_const_i64(tcg_ctx, (s->pc - 4) + imm); + clean_addr = tcg_const_i64(tcg_ctx, s->pc_curr + imm); if (is_vector) { - do_fp_ld(s, rt, tcg_addr, size); + do_fp_ld(s, rt, clean_addr, size); } else { - do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false); + /* Only unsigned 32bit loads target 32bit registers. */ + bool iss_sf = opc != 0; + + do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false, + true, rt, iss_sf, false); } - tcg_temp_free_i64(tcg_ctx, tcg_addr); + tcg_temp_free_i64(tcg_ctx, clean_addr); } /* - * C5.6.80 LDNP (Load Pair - non-temporal hint) - * C5.6.81 LDP (Load Pair - non vector) - * C5.6.82 LDPSW (Load Pair Signed Word - non vector) - * C5.6.176 STNP (Store Pair - non-temporal hint) - * C5.6.177 STP (Store Pair - non vector) - * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint) - * C6.3.165 LDP (Load Pair of SIMD&FP) - * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint) - * C6.3.284 STP (Store Pair of SIMD&FP) + * LDNP (Load Pair - non-temporal hint) + * LDP (Load Pair - non vector) + * LDPSW (Load Pair Signed Word - non vector) + * STNP (Store Pair - non-temporal hint) + * STP (Store Pair - non vector) + * LDNP (Load Pair of SIMD&FP - non-temporal hint) + * LDP (Load Pair of SIMD&FP) + * STNP (Store Pair of SIMD&FP - non-temporal hint) + * STP (Store Pair of SIMD&FP) * * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0 * +-----+-------+---+---+-------+---+-----------------------------+ @@ -1943,7 +2760,8 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) bool postindex = false; bool wback = false; - TCGv_i64 tcg_addr; /* calculated address */ + TCGv_i64 clean_addr, dirty_addr; + int size; if (opc == 3) { @@ -1999,56 +2817,63 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) gen_check_sp_alignment(s); } - tcg_addr = read_cpu_reg_sp(s, rn, 1); - + dirty_addr = read_cpu_reg_sp(s, rn, 1); if (!postindex) { - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, offset); + tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); } + clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_load) { - do_fp_ld(s, rt, tcg_addr, size); + do_fp_ld(s, rt, clean_addr, size); } else { - do_fp_st(s, rt, tcg_addr, size); + do_fp_st(s, rt, clean_addr, size); + } + tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 1ULL << size); + if (is_load) { + do_fp_ld(s, rt2, clean_addr, size); + } else { + do_fp_st(s, rt2, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); - if (is_load) { - do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false); - } else { - do_gpr_st(s, tcg_rt, tcg_addr, size); - } - } - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, 1ULL << size); - if (is_vector) { - if (is_load) { - do_fp_ld(s, rt2, tcg_addr, size); - } else { - do_fp_st(s, rt2, tcg_addr, size); - } - } else { TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); + if (is_load) { - do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* Do not modify tcg_rt before recognizing any exception + * from the second load. + */ + do_gpr_ld(s, tmp, clean_addr, size, is_signed, false, + false, 0, false, false); + tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 1ULL << size); + do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false, + false, 0, false, false); + + tcg_gen_mov_i64(tcg_ctx, tcg_rt, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); } else { - do_gpr_st(s, tcg_rt2, tcg_addr, size); + do_gpr_st(s, tcg_rt, clean_addr, size, + false, 0, false, false); + tcg_gen_addi_i64(tcg_ctx, clean_addr, clean_addr, 1ULL << size); + do_gpr_st(s, tcg_rt2, clean_addr, size, + false, 0, false, false); } } if (wback) { if (postindex) { - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, offset - (1ULL << size)); - } else { - tcg_gen_subi_i64(tcg_ctx, tcg_addr, tcg_addr, 1ULL << size); + tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); } - tcg_gen_mov_i64(tcg_ctx, cpu_reg_sp(s, rn), tcg_addr); + tcg_gen_mov_i64(tcg_ctx, cpu_reg_sp(s, rn), dirty_addr); } } /* - * C3.3.8 Load/store (immediate post-indexed) - * C3.3.9 Load/store (immediate pre-indexed) - * C3.3.12 Load/store (unscaled immediate) + * Load/store (immediate post-indexed) + * Load/store (immediate pre-indexed) + * Load/store (unscaled immediate) * * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0 * +----+-------+---+-----+-----+---+--------+-----+------+------+ @@ -2061,24 +2886,25 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 */ -static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn) +static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, + int opc, + int size, + int rt, + bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int imm9 = sextract32(insn, 12, 9); - int opc = extract32(insn, 22, 2); - int size = extract32(insn, 30, 2); int idx = extract32(insn, 10, 2); bool is_signed = false; bool is_store = false; bool is_extended = false; bool is_unpriv = (idx == 2); - bool is_vector = extract32(insn, 26, 1); + bool iss_valid = !is_vector; bool post_index; bool writeback; - TCGv_i64 tcg_addr; + TCGv_i64 clean_addr, dirty_addr; if (is_vector) { size |= (opc & 2) << 1; @@ -2093,7 +2919,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn) } else { if (size == 3 && opc == 2) { /* PRFM - prefetch */ - if (is_unpriv) { + if (idx != 0) { unallocated_encoding(s); return; } @@ -2104,8 +2930,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn) return; } is_store = (opc == 0); - is_signed = opc & (1<<1); - is_extended = (size < 3) && (opc & 1); + is_signed = extract32(opc, 1, 1); + is_extended = (size < 3) && extract32(opc, 0, 1); } switch (idx) { @@ -2122,46 +2948,52 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn) post_index = false; writeback = true; break; + default: + g_assert_not_reached(); } if (rn == 31) { gen_check_sp_alignment(s); } - tcg_addr = read_cpu_reg_sp(s, rn, 1); + dirty_addr = read_cpu_reg_sp(s, rn, 1); if (!post_index) { - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, imm9); + tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, imm9); } + clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_store) { - do_fp_st(s, rt, tcg_addr, size); + do_fp_st(s, rt, clean_addr, size); } else { - do_fp_ld(s, rt, tcg_addr, size); + do_fp_ld(s, rt, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); - int memidx = is_unpriv ? MMU_USER_IDX : get_mem_index(s); + int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); + bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { - do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx); + do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx, + iss_valid, rt, iss_sf, false); } else { - do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size, - is_signed, is_extended, memidx); + do_gpr_ld_memidx(s, tcg_rt, clean_addr, size, + is_signed, is_extended, memidx, + iss_valid, rt, iss_sf, false); } } if (writeback) { TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); if (post_index) { - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, imm9); + tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, imm9); } - tcg_gen_mov_i64(tcg_ctx, tcg_rn, tcg_addr); + tcg_gen_mov_i64(tcg_ctx, tcg_rn, dirty_addr); } } /* - * C3.3.10 Load/store (register offset) + * Load/store (register offset) * * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+ @@ -2181,23 +3013,22 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn) * Rn: address register or SP for base * Rm: offset register or ZR for offset */ -static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn) +static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, + int opc, + int size, + int rt, + bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int shift = extract32(insn, 12, 1); int rm = extract32(insn, 16, 5); - int opc = extract32(insn, 22, 2); int opt = extract32(insn, 13, 3); - int size = extract32(insn, 30, 2); bool is_signed = false; bool is_store = false; bool is_extended = false; - bool is_vector = extract32(insn, 26, 1); - TCGv_i64 tcg_rm; - TCGv_i64 tcg_addr; + TCGv_i64 tcg_rm, clean_addr, dirty_addr; if (extract32(opt, 1, 1) == 0) { unallocated_encoding(s); @@ -2231,31 +3062,36 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn) if (rn == 31) { gen_check_sp_alignment(s); } - tcg_addr = read_cpu_reg_sp(s, rn, 1); + dirty_addr = read_cpu_reg_sp(s, rn, 1); tcg_rm = read_cpu_reg(s, rm, 1); ext_and_shift_reg(tcg_ctx, tcg_rm, tcg_rm, opt, shift ? size : 0); - tcg_gen_add_i64(tcg_ctx, tcg_addr, tcg_addr, tcg_rm); + tcg_gen_add_i64(tcg_ctx, dirty_addr, dirty_addr, tcg_rm); + clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_store) { - do_fp_st(s, rt, tcg_addr, size); + do_fp_st(s, rt, clean_addr, size); } else { - do_fp_ld(s, rt, tcg_addr, size); + do_fp_ld(s, rt, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); + bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { - do_gpr_st(s, tcg_rt, tcg_addr, size); + do_gpr_st(s, tcg_rt, clean_addr, size, + true, rt, iss_sf, false); } else { - do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended); + do_gpr_ld(s, tcg_rt, clean_addr, size, + is_signed, is_extended, + true, rt, iss_sf, false); } } } /* - * C3.3.13 Load/store (unsigned immediate) + * Load/store (unsigned immediate) * * 31 30 29 27 26 25 24 23 22 21 10 9 5 * +----+-------+---+-----+-----+------------+-------+------+ @@ -2271,18 +3107,18 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn) * Rn: base address register (inc SP) * Rt: target register */ -static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn) +static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, + int opc, + int size, + int rt, + bool is_vector) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); unsigned int imm12 = extract32(insn, 10, 12); - bool is_vector = extract32(insn, 26, 1); - int size = extract32(insn, 30, 2); - int opc = extract32(insn, 22, 2); unsigned int offset; - TCGv_i64 tcg_addr; + TCGv_i64 clean_addr, dirty_addr; bool is_store; bool is_signed = false; @@ -2315,58 +3151,321 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn) if (rn == 31) { gen_check_sp_alignment(s); } - tcg_addr = read_cpu_reg_sp(s, rn, 1); + dirty_addr = read_cpu_reg_sp(s, rn, 1); offset = imm12 << size; - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, offset); + tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); + clean_addr = clean_data_tbi(s, dirty_addr); if (is_vector) { if (is_store) { - do_fp_st(s, rt, tcg_addr, size); + do_fp_st(s, rt, clean_addr, size); } else { - do_fp_ld(s, rt, tcg_addr, size); + do_fp_ld(s, rt, clean_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); + bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); if (is_store) { - do_gpr_st(s, tcg_rt, tcg_addr, size); + do_gpr_st(s, tcg_rt, clean_addr, size, + true, rt, iss_sf, false); } else { - do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended); + do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended, + true, rt, iss_sf, false); } } } +/* Atomic memory operations + * + * 31 30 27 26 24 22 21 16 15 12 10 5 0 + * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+ + * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt | + * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+ + * + * Rt: the result register + * Rn: base address or SP + * Rs: the source register for the operation + * V: vector flag (always 0 as of v8.3) + * A: acquire flag + * R: release flag + */ +static void disas_ldst_atomic(DisasContext *s, uint32_t insn, + int size, int rt, bool is_vector) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rs = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int o3_opc = extract32(insn, 12, 4); + bool r = extract32(insn, 22, 1); + bool a = extract32(insn, 23, 1); + TCGv_i64 tcg_rs, clean_addr; + AtomicThreeOpFn *fn; + + if (is_vector || !dc_isar_feature(aa64_atomics, s)) { + unallocated_encoding(s); + return; + } + switch (o3_opc) { + case 000: /* LDADD */ + fn = tcg_gen_atomic_fetch_add_i64; + break; + case 001: /* LDCLR */ + fn = tcg_gen_atomic_fetch_and_i64; + break; + case 002: /* LDEOR */ + fn = tcg_gen_atomic_fetch_xor_i64; + break; + case 003: /* LDSET */ + fn = tcg_gen_atomic_fetch_or_i64; + break; + case 004: /* LDSMAX */ + fn = tcg_gen_atomic_fetch_smax_i64; + break; + case 005: /* LDSMIN */ + fn = tcg_gen_atomic_fetch_smin_i64; + break; + case 006: /* LDUMAX */ + fn = tcg_gen_atomic_fetch_umax_i64; + break; + case 007: /* LDUMIN */ + fn = tcg_gen_atomic_fetch_umin_i64; + break; + case 010: /* SWP */ + fn = tcg_gen_atomic_xchg_i64; + break; + case 014: /* LDAPR, LDAPRH, LDAPRB */ + if (!dc_isar_feature(aa64_rcpc_8_3, s) || + rs != 31 || a != 1 || r != 0) { + unallocated_encoding(s); + return; + } + break; + default: + unallocated_encoding(s); + return; + } + + if (rn == 31) { + gen_check_sp_alignment(s); + } + clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); + + if (o3_opc == 014) { + /* + * LDAPR* are a special case because they are a simple load, not a + * fetch-and-do-something op. + * The architectural consistency requirements here are weaker than + * full load-acquire (we only need "load-acquire processor consistent"), + * but we choose to implement them as full LDAQ. + */ + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, + true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); + return; + } + + tcg_rs = read_cpu_reg(s, rs, true); + + if (o3_opc == 1) { /* LDCLR */ + tcg_gen_not_i64(tcg_ctx, tcg_rs, tcg_rs); + } + + /* The tcg atomic primitives are all full barriers. Therefore we + * can ignore the Acquire and Release bits of this instruction. + */ + fn(tcg_ctx, cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s), + s->be_data | size | MO_ALIGN); +} + +/* + * PAC memory operations + * + * 31 30 27 26 24 22 21 12 11 10 5 0 + * +------+-------+---+-----+-----+---+--------+---+---+----+-----+ + * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt | + * +------+-------+---+-----+-----+---+--------+---+---+----+-----+ + * + * Rt: the result register + * Rn: base address or SP + * V: vector flag (always 0 as of v8.3) + * M: clear for key DA, set for key DB + * W: pre-indexing flag + * S: sign for imm9. + */ +static void disas_ldst_pac(DisasContext *s, uint32_t insn, + int size, int rt, bool is_vector) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rn = extract32(insn, 5, 5); + bool is_wback = extract32(insn, 11, 1); + bool use_key_a = !extract32(insn, 23, 1); + int offset; + TCGv_i64 clean_addr, dirty_addr, tcg_rt; + + if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) { + unallocated_encoding(s); + return; + } + + if (rn == 31) { + gen_check_sp_alignment(s); + } + dirty_addr = read_cpu_reg_sp(s, rn, 1); + + if (s->pauth_active) { + if (use_key_a) { + gen_helper_autda(tcg_ctx, dirty_addr, tcg_ctx->cpu_env, dirty_addr, tcg_ctx->cpu_X[31]); + } else { + gen_helper_autdb(tcg_ctx, dirty_addr, tcg_ctx->cpu_env, dirty_addr, tcg_ctx->cpu_X[31]); + } + } + + /* Form the 10-bit signed, scaled offset. */ + offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9); + offset = sextract32(offset << size, 0, 10 + size); + tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); + + /* Note that "clean" and "dirty" here refer to TBI not PAC. */ + clean_addr = clean_data_tbi(s, dirty_addr); + + tcg_rt = cpu_reg(s, rt); + do_gpr_ld(s, tcg_rt, clean_addr, size, /* is_signed */ false, + /* extend */ false, /* iss_valid */ !is_wback, + /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false); + + if (is_wback) { + tcg_gen_mov_i64(tcg_ctx, cpu_reg_sp(s, rn), dirty_addr); + } +} + +/* + * LDAPR/STLR (unscaled immediate) + * + * 31 30 24 22 21 12 10 5 0 + * +------+-------------+-----+---+--------+-----+----+-----+ + * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt | + * +------+-------------+-----+---+--------+-----+----+-----+ + * + * Rt: source or destination register + * Rn: base register + * imm9: unscaled immediate offset + * opc: 00: STLUR*, 01/10/11: various LDAPUR* + * size: size of load/store + */ +static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int offset = sextract32(insn, 12, 9); + int opc = extract32(insn, 22, 2); + int size = extract32(insn, 30, 2); + TCGv_i64 clean_addr, dirty_addr; + bool is_store = false; + bool is_signed = false; + bool extend = false; + bool iss_sf; + + if (!dc_isar_feature(aa64_rcpc_8_4, s)) { + unallocated_encoding(s); + return; + } + + switch (opc) { + case 0: /* STLURB */ + is_store = true; + break; + case 1: /* LDAPUR* */ + break; + case 2: /* LDAPURS* 64-bit variant */ + if (size == 3) { + unallocated_encoding(s); + return; + } + is_signed = true; + break; + case 3: /* LDAPURS* 32-bit variant */ + if (size > 1) { + unallocated_encoding(s); + return; + } + is_signed = true; + extend = true; /* zero-extend 32->64 after signed load */ + break; + default: + g_assert_not_reached(); + } + + iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); + + if (rn == 31) { + gen_check_sp_alignment(s); + } + + dirty_addr = read_cpu_reg_sp(s, rn, 1); + tcg_gen_addi_i64(tcg_ctx, dirty_addr, dirty_addr, offset); + clean_addr = clean_data_tbi(s, dirty_addr); + + if (is_store) { + /* Store-Release semantics */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true); + } else { + /* + * Load-AcquirePC semantics; we implement as the slightly more + * restrictive Load-Acquire. + */ + do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend, + true, rt, iss_sf, true); + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); + } +} + /* Load/store register (all forms) */ static void disas_ldst_reg(DisasContext *s, uint32_t insn) { + int rt = extract32(insn, 0, 5); + int opc = extract32(insn, 22, 2); + bool is_vector = extract32(insn, 26, 1); + int size = extract32(insn, 30, 2); + switch (extract32(insn, 24, 2)) { case 0: - if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) { - disas_ldst_reg_roffset(s, insn); - } else { + if (extract32(insn, 21, 1) == 0) { /* Load/store register (unscaled immediate) * Load/store immediate pre/post-indexed * Load/store register unprivileged */ - disas_ldst_reg_imm9(s, insn); + disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector); + return; + } + switch (extract32(insn, 10, 2)) { + case 0: + disas_ldst_atomic(s, insn, size, rt, is_vector); + return; + case 2: + disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector); + return; + default: + disas_ldst_pac(s, insn, size, rt, is_vector); + return; } break; case 1: - disas_ldst_reg_unsigned_imm(s, insn); - break; - default: - unallocated_encoding(s); - break; + disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector); + return; } + unallocated_encoding(s); } -/* C3.3.1 AdvSIMD load/store multiple structures +/* AdvSIMD load/store multiple structures * * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0 * +---+---+---------------+---+-------------+--------+------+------+------+ * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt | * +---+---+---------------+---+-------------+--------+------+------+------+ * - * C3.3.2 AdvSIMD load/store multiple structures (post-indexed) + * AdvSIMD load/store multiple structures (post-indexed) * * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---------------+---+---+---------+--------+------+------+------+ @@ -2382,15 +3481,17 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); + int rm = extract32(insn, 16, 5); int size = extract32(insn, 10, 2); int opcode = extract32(insn, 12, 4); bool is_store = !extract32(insn, 22, 1); bool is_postidx = extract32(insn, 23, 1); bool is_q = extract32(insn, 30, 1); - TCGv_i64 tcg_addr, tcg_rn; + TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; + MemOp endian = s->be_data; - int ebytes = 1 << size; - int elements = (is_q ? 128 : 64) / (8 << size); + int ebytes; /* bytes per element */ + int elements; /* elements per vector */ int rpt; /* num iterations */ int selem; /* structure elements */ int r; @@ -2400,6 +3501,11 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) return; } + if (!is_postidx && rm != 0) { + unallocated_encoding(s); + return; + } + /* From the shared decode logic */ switch (opcode) { case 0x0: @@ -2449,57 +3555,72 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) gen_check_sp_alignment(s); } + /* For our purposes, bytes are always little-endian. */ + if (size == 0) { + endian = MO_LE; + } + + /* Consecutive little-endian elements from a single register + * can be promoted to a larger little-endian operation. + */ + if (selem == 1 && endian == MO_LE) { + size = 3; + } + ebytes = 1 << size; + elements = (is_q ? 16 : 8) / ebytes; + tcg_rn = cpu_reg_sp(s, rn); - tcg_addr = tcg_temp_new_i64(tcg_ctx); - tcg_gen_mov_i64(tcg_ctx, tcg_addr, tcg_rn); + clean_addr = clean_data_tbi(s, tcg_rn); + tcg_ebytes = tcg_const_i64(tcg_ctx, ebytes); for (r = 0; r < rpt; r++) { int e; for (e = 0; e < elements; e++) { - int tt = (rt + r) % 32; int xs; for (xs = 0; xs < selem; xs++) { + int tt = (rt + r + xs) % 32; if (is_store) { - do_vec_st(s, tt, e, tcg_addr, size); + do_vec_st(s, tt, e, clean_addr, size, endian); } else { - do_vec_ld(s, tt, e, tcg_addr, size); - - /* For non-quad operations, setting a slice of the low - * 64 bits of the register clears the high 64 bits (in - * the ARM ARM pseudocode this is implicit in the fact - * that 'rval' is a 64 bit wide variable). We optimize - * by noticing that we only need to do this the first - * time we touch a register. - */ - if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) { - clear_vec_high(s, tt); - } + do_vec_ld(s, tt, e, clean_addr, size, endian); } - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, ebytes); - tt = (tt + 1) % 32; + tcg_gen_add_i64(tcg_ctx, clean_addr, clean_addr, tcg_ebytes); } } } + tcg_temp_free_i64(tcg_ctx, tcg_ebytes); + + if (!is_store) { + /* For non-quad operations, setting a slice of the low + * 64 bits of the register clears the high 64 bits (in + * the ARM ARM pseudocode this is implicit in the fact + * that 'rval' is a 64 bit wide variable). + * For quad operations, we might still need to zero the + * high bits of SVE. + */ + for (r = 0; r < rpt * selem; r++) { + int tt = (rt + r) % 32; + clear_vec_high(s, is_q, tt); + } + } if (is_postidx) { - int rm = extract32(insn, 16, 5); if (rm == 31) { - tcg_gen_mov_i64(tcg_ctx, tcg_rn, tcg_addr); + tcg_gen_addi_i64(tcg_ctx, tcg_rn, tcg_rn, rpt * elements * selem * ebytes); } else { tcg_gen_add_i64(tcg_ctx, tcg_rn, tcg_rn, cpu_reg(s, rm)); } } - tcg_temp_free_i64(tcg_ctx, tcg_addr); } -/* C3.3.3 AdvSIMD load/store single structure +/* AdvSIMD load/store single structure * * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt | * +---+---+---------------+-----+-----------+-----+---+------+------+------+ * - * C3.3.4 AdvSIMD load/store single structure (post-indexed) + * AdvSIMD load/store single structure (post-indexed) * * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ @@ -2519,6 +3640,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) TCGContext *tcg_ctx = s->uc->tcg_ctx; int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); + int rm = extract32(insn, 16, 5); int size = extract32(insn, 10, 2); int S = extract32(insn, 12, 1); int opc = extract32(insn, 13, 3); @@ -2532,7 +3654,16 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) bool replicate = false; int index = is_q << 3 | S << 2 | size; int ebytes, xs; - TCGv_i64 tcg_addr, tcg_rn; + TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; + + if (extract32(insn, 31, 1)) { + unallocated_encoding(s); + return; + } + if (!is_postidx && rm != 0) { + unallocated_encoding(s); + return; + } switch (scale) { case 3: @@ -2583,67 +3714,43 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) } tcg_rn = cpu_reg_sp(s, rn); - tcg_addr = tcg_temp_new_i64(tcg_ctx); - tcg_gen_mov_i64(tcg_ctx, tcg_addr, tcg_rn); + clean_addr = clean_data_tbi(s, tcg_rn); + tcg_ebytes = tcg_const_i64(tcg_ctx, ebytes); for (xs = 0; xs < selem; xs++) { if (replicate) { /* Load and replicate to all elements */ - uint64_t mulconst; TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld_i64(s->uc, tcg_tmp, tcg_addr, - get_mem_index(s), MO_TE + scale); - switch (scale) { - case 0: - mulconst = 0x0101010101010101ULL; - break; - case 1: - mulconst = 0x0001000100010001ULL; - break; - case 2: - mulconst = 0x0000000100000001ULL; - break; - case 3: - mulconst = 0; - break; - default: - g_assert_not_reached(); - } - if (mulconst) { - tcg_gen_muli_i64(tcg_ctx, tcg_tmp, tcg_tmp, mulconst); - } - write_vec_element(s, tcg_tmp, rt, 0, MO_64); - if (is_q) { - write_vec_element(s, tcg_tmp, rt, 1, MO_64); - } else { - clear_vec_high(s, rt); - } + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_tmp, clean_addr, + get_mem_index(s), s->be_data + scale); + tcg_gen_gvec_dup_i64(tcg_ctx, scale, vec_full_reg_offset(s, rt), + (is_q + 1) * 8, vec_full_reg_size(s), + tcg_tmp); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } else { /* Load/store one element per register */ if (is_load) { - do_vec_ld(s, rt, index, tcg_addr, MO_TE + scale); + do_vec_ld(s, rt, index, clean_addr, scale, s->be_data); } else { - do_vec_st(s, rt, index, tcg_addr, MO_TE + scale); + do_vec_st(s, rt, index, clean_addr, scale, s->be_data); } } - tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, ebytes); + tcg_gen_add_i64(tcg_ctx, clean_addr, clean_addr, tcg_ebytes); rt = (rt + 1) % 32; } + tcg_temp_free_i64(tcg_ctx, tcg_ebytes); if (is_postidx) { - int rm = extract32(insn, 16, 5); if (rm == 31) { - tcg_gen_mov_i64(tcg_ctx, tcg_rn, tcg_addr); + tcg_gen_addi_i64(tcg_ctx, tcg_rn, tcg_rn, selem * ebytes); } else { tcg_gen_add_i64(tcg_ctx, tcg_rn, tcg_rn, cpu_reg(s, rm)); } } - tcg_temp_free_i64(tcg_ctx, tcg_addr); } -/* C3.3 Loads and stores */ +/* Loads and stores */ static void disas_ldst(DisasContext *s, uint32_t insn) { switch (extract32(insn, 24, 6)) { @@ -2667,13 +3774,21 @@ static void disas_ldst(DisasContext *s, uint32_t insn) case 0x0d: /* AdvSIMD load/store single structure */ disas_ldst_single_struct(s, insn); break; + case 0x19: /* LDAPR/STLR (unscaled immediate) */ + if (extract32(insn, 10, 2) != 0 || + extract32(insn, 21, 1) != 0) { + unallocated_encoding(s); + break; + } + disas_ldst_ldapr_stlr(s, insn); + break; default: unallocated_encoding(s); break; } } -/* C3.4.6 PC-rel. addressing +/* PC-rel. addressing * 31 30 29 28 24 23 5 4 0 * +----+-------+-----------+-------------------+------+ * | op | immlo | 1 0 0 0 0 | immhi | Rd | @@ -2684,25 +3799,26 @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int page, rd; uint64_t base; - int64_t offset; + uint64_t offset; page = extract32(insn, 31, 1); /* SignExtend(immhi:immlo) -> offset */ - offset = (int64_t)((uint64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2); + offset = sextract64(insn, 5, 19); + offset = offset << 2 | extract32(insn, 29, 2); rd = extract32(insn, 0, 5); - base = s->pc - 4; + base = s->pc_curr; if (page) { /* ADRP (page based) */ base &= ~0xfff; - offset = ((uint64_t)offset) << 12; + offset <<= 12; } tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), base + offset); } /* - * C3.4.1 Add/subtract (immediate) + * Add/subtract (immediate) * * 31 30 29 28 24 23 22 21 10 9 5 4 0 * +--+--+--+-----------+-----+-------------+-----+-----+ @@ -2750,9 +3866,9 @@ static void disas_add_sub_imm(DisasContext *s, uint32_t insn) } else { TCGv_i64 tcg_imm = tcg_const_i64(tcg_ctx, imm); if (sub_op) { - gen_sub_CC(s, is_64bit, tcg_result, tcg_rn, tcg_imm); + gen_sub_CC(tcg_ctx, is_64bit, tcg_result, tcg_rn, tcg_imm); } else { - gen_add_CC(s, is_64bit, tcg_result, tcg_rn, tcg_imm); + gen_add_CC(tcg_ctx, is_64bit, tcg_result, tcg_rn, tcg_imm); } tcg_temp_free_i64(tcg_ctx, tcg_imm); } @@ -2792,8 +3908,8 @@ static inline uint64_t bitmask64(unsigned int length) * value (ie should cause a guest UNDEF exception), and true if they are * valid, in which case the decoded bit pattern is written to result. */ -static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, - unsigned int imms, unsigned int immr) +bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, + unsigned int imms, unsigned int immr) { uint64_t mask; unsigned e, levels, s, r; @@ -2843,14 +3959,17 @@ static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, * by r within the element (which is e bits wide)... */ mask = bitmask64(s + 1); - mask = (mask >> r) | (mask << ((e - r) & 0x3f) ); + if (r) { + mask = (mask >> r) | (mask << (e - r)); + mask &= bitmask64(e); + } /* ...then replicate the element over the whole 64 bit value */ mask = bitfield_replicate(mask, e); *result = mask; return true; } -/* C3.4.4 Logical (immediate) +/* Logical (immediate) * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 * +----+-----+-------------+---+------+------+------+------+ * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd | @@ -2924,7 +4043,7 @@ static void disas_logic_imm(DisasContext *s, uint32_t insn) } /* - * C3.4.5 Move wide (immediate) + * Move wide (immediate) * * 31 30 29 28 23 22 21 20 5 4 0 * +--+-----+-------------+-----+----------------+------+ @@ -2977,7 +4096,7 @@ static void disas_movw_imm(DisasContext *s, uint32_t insn) } } -/* C3.4.2 Bitfield +/* Bitfield * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 * +----+-----+-------------+---+------+------+------+------+ * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd | @@ -3004,39 +4123,59 @@ static void disas_bitfield(DisasContext *s, uint32_t insn) } tcg_rd = cpu_reg(s, rd); - tcg_tmp = read_cpu_reg(s, rn, sf); - /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */ + /* Suppress the zero-extend for !sf. Since RI and SI are constrained + to be smaller than bitsize, we'll never reference data outside the + low 32-bits anyway. */ + tcg_tmp = read_cpu_reg(s, rn, 1); - if (opc != 1) { /* SBFM or UBFM */ - tcg_gen_movi_i64(tcg_ctx, tcg_rd, 0); - } - - /* do the bit move operation */ + /* Recognize simple(r) extractions. */ if (si >= ri) { /* Wd = Wn */ + len = (si - ri) + 1; + if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */ + tcg_gen_sextract_i64(tcg_ctx, tcg_rd, tcg_tmp, ri, len); + goto done; + } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */ + tcg_gen_extract_i64(tcg_ctx, tcg_rd, tcg_tmp, ri, len); + return; + } + /* opc == 1, BFXIL fall through to deposit */ tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_tmp, ri); pos = 0; - len = (si - ri) + 1; } else { - /* Wd<32+s-r,32-r> = Wn */ - pos = bitsize - ri; + /* Handle the ri > si case with a deposit + * Wd<32+s-r,32-r> = Wn + */ len = si + 1; + pos = (bitsize - ri) & (bitsize - 1); } - tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, pos, len); - - if (opc == 0) { /* SBFM - sign extend the destination field */ - tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rd, 64 - (pos + len)); - tcg_gen_sari_i64(tcg_ctx, tcg_rd, tcg_rd, 64 - (pos + len)); + if (opc == 0 && len < ri) { + /* SBFM: sign extend the destination field from len to fill + the balance of the word. Let the deposit below insert all + of those sign bits. */ + tcg_gen_sextract_i64(tcg_ctx, tcg_tmp, tcg_tmp, 0, len); + len = ri; } + if (opc == 1) { /* BFM, BFXIL */ + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, pos, len); + } else { + /* SBFM or UBFM: We start with zero, and we haven't modified + any bits outside bitsize, therefore the zero-extension + below is unneeded. */ + tcg_gen_deposit_z_i64(tcg_ctx, tcg_rd, tcg_tmp, pos, len); + return; + } + + done: if (!sf) { /* zero extend final result */ tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } } -/* C3.4.3 Extract +/* Extract * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0 * +----+------+-------------+---+----+------+--------+------+------+ * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd | @@ -3064,17 +4203,7 @@ static void disas_extract(DisasContext *s, uint32_t insn) tcg_rd = cpu_reg(s, rd); - if (imm) { - /* OPTME: we can special case rm==rn as a rotate */ - tcg_rm = read_cpu_reg(s, rm, sf); - tcg_rn = read_cpu_reg(s, rn, sf); - tcg_gen_shri_i64(tcg_ctx, tcg_rm, tcg_rm, imm); - tcg_gen_shli_i64(tcg_ctx, tcg_rn, tcg_rn, bitsize - imm); - tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rm, tcg_rn); - if (!sf) { - tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); - } - } else { + if (unlikely(imm == 0)) { /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, * so an extract from bit 0 is a special case. */ @@ -3083,12 +4212,33 @@ static void disas_extract(DisasContext *s, uint32_t insn) } else { tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, cpu_reg(s, rm)); } - } + } else { + tcg_rm = cpu_reg(s, rm); + tcg_rn = cpu_reg(s, rn); + if (sf) { + /* Specialization to ROR happens in EXTRACT2. */ + tcg_gen_extract2_i64(tcg_ctx, tcg_rd, tcg_rm, tcg_rn, imm); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_extrl_i64_i32(tcg_ctx, t0, tcg_rm); + if (rm == rn) { + tcg_gen_rotri_i32(tcg_ctx, t0, t0, imm); + } else { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t1, tcg_rn); + tcg_gen_extract2_i32(tcg_ctx, t0, t0, t1, imm); + tcg_temp_free_i32(tcg_ctx, t1); + } + tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } + } } } -/* C3.4 Data processing - immediate */ +/* Data processing - immediate */ static void disas_data_proc_imm(DisasContext *s, uint32_t insn) { switch (extract32(insn, 23, 6)) { @@ -3144,8 +4294,8 @@ static void shift_reg(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, int sf, TCGv_i32 t0, t1; t0 = tcg_temp_new_i32(tcg_ctx); t1 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, t0, src); - tcg_gen_trunc_i64_i32(tcg_ctx, t1, shift_amount); + tcg_gen_extrl_i64_i32(tcg_ctx, t0, src); + tcg_gen_extrl_i64_i32(tcg_ctx, t1, shift_amount); tcg_gen_rotr_i32(tcg_ctx, t0, t0, t1); tcg_gen_extu_i32_i64(tcg_ctx, dst, t0); tcg_temp_free_i32(tcg_ctx, t0); @@ -3182,7 +4332,7 @@ static void shift_reg_imm(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, int s } } -/* C3.5.10 Logical (shifted register) +/* Logical (shifted register) * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 * +----+-----+-----------+-------+---+------+--------+------+------+ * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd | @@ -3274,7 +4424,7 @@ static void disas_logic_reg(DisasContext *s, uint32_t insn) } /* - * C3.5.1 Add/subtract (extended register) + * Add/subtract (extended register) * * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0| * +--+--+--+-----------+-----+--+-------+------+------+----+----+ @@ -3298,6 +4448,7 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) int imm3 = extract32(insn, 10, 3); int option = extract32(insn, 13, 3); int rm = extract32(insn, 16, 5); + int opt = extract32(insn, 22, 2); bool setflags = extract32(insn, 29, 1); bool sub_op = extract32(insn, 30, 1); bool sf = extract32(insn, 31, 1); @@ -3306,7 +4457,7 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) TCGv_i64 tcg_rd; TCGv_i64 tcg_result; - if (imm3 > 4) { + if (imm3 > 4 || opt != 0) { unallocated_encoding(s); return; } @@ -3332,9 +4483,9 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) } } else { if (sub_op) { - gen_sub_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + gen_sub_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } else { - gen_add_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + gen_add_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } } @@ -3348,7 +4499,7 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) } /* - * C3.5.2 Add/subtract (shifted register) + * Add/subtract (shifted register) * * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 * +--+--+--+-----------+-----+--+-------+---------+------+------+ @@ -3397,9 +4548,9 @@ static void disas_add_sub_reg(DisasContext *s, uint32_t insn) } } else { if (sub_op) { - gen_sub_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + gen_sub_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } else { - gen_add_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + gen_add_CC(tcg_ctx, sf, tcg_result, tcg_rn, tcg_rm); } } @@ -3412,13 +4563,12 @@ static void disas_add_sub_reg(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_result); } -/* C3.5.9 Data-processing (3 source) - - 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0 - +--+------+-----------+------+------+----+------+------+------+ - |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd | - +--+------+-----------+------+------+----+------+------+------+ - +/* Data-processing (3 source) + * + * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0 + * +--+------+-----------+------+------+----+------+------+------+ + * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd | + * +--+------+-----------+------+------+----+------+------+------+ */ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) { @@ -3512,12 +4662,11 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_tmp); } -/* C3.5.3 - Add/subtract (with carry) - * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0 - * +--+--+--+------------------------+------+---------+------+-----+ - * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd | - * +--+--+--+------------------------+------+---------+------+-----+ - * [000000] +/* Add/subtract (with carry) + * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0 + * +--+--+--+------------------------+------+-------------+------+-----+ + * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd | + * +--+--+--+------------------------+------+-------------+------+-----+ */ static void disas_adc_sbc(DisasContext *s, uint32_t insn) @@ -3526,11 +4675,6 @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn) unsigned int sf, op, setflags, rm, rn, rd; TCGv_i64 tcg_y, tcg_rn, tcg_rd; - if (extract32(insn, 10, 6) != 0) { - unallocated_encoding(s); - return; - } - sf = extract32(insn, 31, 1); op = extract32(insn, 30, 1); setflags = extract32(insn, 29, 1); @@ -3549,13 +4693,93 @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn) } if (setflags) { - gen_adc_CC(s, sf, tcg_rd, tcg_rn, tcg_y); + gen_adc_CC(tcg_ctx, sf, tcg_rd, tcg_rn, tcg_y); } else { - gen_adc(s, sf, tcg_rd, tcg_rn, tcg_y); + gen_adc(tcg_ctx, sf, tcg_rd, tcg_rn, tcg_y); } } -/* C3.5.4 - C3.5.5 Conditional compare (immediate / register) +/* + * Rotate right into flags + * 31 30 29 21 15 10 5 4 0 + * +--+--+--+-----------------+--------+-----------+------+--+------+ + * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask | + * +--+--+--+-----------------+--------+-----------+------+--+------+ + */ +static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mask = extract32(insn, 0, 4); + int o2 = extract32(insn, 4, 1); + int rn = extract32(insn, 5, 5); + int imm6 = extract32(insn, 15, 6); + int sf_op_s = extract32(insn, 29, 3); + TCGv_i64 tcg_rn; + TCGv_i32 nzcv; + + if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) { + unallocated_encoding(s); + return; + } + + tcg_rn = read_cpu_reg(s, rn, 1); + tcg_gen_rotri_i64(tcg_ctx, tcg_rn, tcg_rn, imm6); + + nzcv = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, nzcv, tcg_rn); + + if (mask & 8) { /* N */ + tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_NF, nzcv, 31 - 3); + } + if (mask & 4) { /* Z */ + tcg_gen_not_i32(tcg_ctx, tcg_ctx->cpu_ZF, nzcv); + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, 4); + } + if (mask & 2) { /* C */ + tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cpu_CF, nzcv, 1, 1); + } + if (mask & 1) { /* V */ + tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_VF, nzcv, 31 - 0); + } + + tcg_temp_free_i32(tcg_ctx, nzcv); +} + +/* + * Evaluate into flags + * 31 30 29 21 15 14 10 5 4 0 + * +--+--+--+-----------------+---------+----+---------+------+--+------+ + * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask | + * +--+--+--+-----------------+---------+----+---------+------+--+------+ + */ +static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int o3_mask = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int o2 = extract32(insn, 15, 6); + int sz = extract32(insn, 14, 1); + int sf_op_s = extract32(insn, 29, 3); + TCGv_i32 tmp; + int shift; + + if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd || + !dc_isar_feature(aa64_condm_4, s)) { + unallocated_encoding(s); + return; + } + shift = sz ? 16 : 24; /* SETF16 or SETF8 */ + + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, cpu_reg(s, rn)); + tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_NF, tmp, shift); + tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_VF, tmp, shift - 1); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* Conditional compare (immediate / register) * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv | @@ -3566,8 +4790,9 @@ static void disas_cc(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, op, y, cond, rn, nzcv, is_imm; - int label_continue = -1; + TCGv_i32 tcg_t0, tcg_t1, tcg_t2; TCGv_i64 tcg_tmp, tcg_y, tcg_rn; + DisasCompare c; if (!extract32(insn, 29, 1)) { unallocated_encoding(s); @@ -3585,19 +4810,13 @@ static void disas_cc(DisasContext *s, uint32_t insn) rn = extract32(insn, 5, 5); nzcv = extract32(insn, 0, 4); - if (cond < 0x0e) { /* not always */ - int label_match = gen_new_label(tcg_ctx); - label_continue = gen_new_label(tcg_ctx); - arm_gen_test_cc(tcg_ctx, cond, label_match); - /* nomatch: */ - tcg_tmp = tcg_temp_new_i64(tcg_ctx); - tcg_gen_movi_i64(tcg_ctx, tcg_tmp, nzcv << 28); - gen_set_nzcv(tcg_ctx, tcg_tmp); - tcg_temp_free_i64(tcg_ctx, tcg_tmp); - tcg_gen_br(tcg_ctx, label_continue); - gen_set_label(tcg_ctx, label_match); - } - /* match, or condition is always */ + /* Set T0 = !COND. */ + tcg_t0 = tcg_temp_new_i32(tcg_ctx); + arm_test_cc(tcg_ctx, &c, cond); + tcg_gen_setcondi_i32(tcg_ctx, tcg_invert_cond(c.cond), tcg_t0, c.value, 0); + arm_free_cc(tcg_ctx, &c); + + /* Load the arguments for the new comparison. */ if (is_imm) { tcg_y = new_tmp_a64(s); tcg_gen_movi_i64(tcg_ctx, tcg_y, y); @@ -3606,20 +4825,67 @@ static void disas_cc(DisasContext *s, uint32_t insn) } tcg_rn = cpu_reg(s, rn); + /* Set the flags for the new comparison. */ tcg_tmp = tcg_temp_new_i64(tcg_ctx); if (op) { - gen_sub_CC(s, sf, tcg_tmp, tcg_rn, tcg_y); + gen_sub_CC(tcg_ctx, sf, tcg_tmp, tcg_rn, tcg_y); } else { - gen_add_CC(s, sf, tcg_tmp, tcg_rn, tcg_y); + gen_add_CC(tcg_ctx, sf, tcg_tmp, tcg_rn, tcg_y); } tcg_temp_free_i64(tcg_ctx, tcg_tmp); - if (cond < 0x0e) { /* continue */ - gen_set_label(tcg_ctx, label_continue); + /* If COND was false, force the flags to #nzcv. Compute two masks + * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). + * For tcg hosts that support ANDC, we can make do with just T1. + * In either case, allow the tcg optimizer to delete any unused mask. + */ + tcg_t1 = tcg_temp_new_i32(tcg_ctx); + tcg_t2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_neg_i32(tcg_ctx, tcg_t1, tcg_t0); + tcg_gen_subi_i32(tcg_ctx, tcg_t2, tcg_t0, 1); + + if (nzcv & 8) { /* N */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, tcg_t1); + } else { + if (TCG_TARGET_HAS_andc_i32) { + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, tcg_t1); + } else { + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF, tcg_t2); + } } + if (nzcv & 4) { /* Z */ + if (TCG_TARGET_HAS_andc_i32) { + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_t1); + } else { + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_t2); + } + } else { + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, tcg_t0); + } + if (nzcv & 2) { /* C */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_t0); + } else { + if (TCG_TARGET_HAS_andc_i32) { + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_t1); + } else { + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, tcg_t2); + } + } + if (nzcv & 1) { /* V */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_t1); + } else { + if (TCG_TARGET_HAS_andc_i32) { + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_t1); + } else { + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_t2); + } + } + tcg_temp_free_i32(tcg_ctx, tcg_t0); + tcg_temp_free_i32(tcg_ctx, tcg_t1); + tcg_temp_free_i32(tcg_ctx, tcg_t2); } -/* C3.5.6 Conditional select +/* Conditional select * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0 * +----+----+---+-----------------+------+------+-----+------+------+ * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd | @@ -3629,7 +4895,8 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, else_inv, rm, cond, else_inc, rn, rd; - TCGv_i64 tcg_rd, tcg_src; + TCGv_i64 tcg_rd, zero; + DisasCompare64 c; if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) { /* S == 1 or op2<1> == 1 */ @@ -3644,48 +4911,35 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); - if (rd == 31) { - /* silly no-op write; until we use movcond we must special-case - * this to avoid a dead temporary across basic blocks. - */ - return; - } - tcg_rd = cpu_reg(s, rd); - if (cond >= 0x0e) { /* condition "always" */ - tcg_src = read_cpu_reg(s, rn, sf); - tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_src); + a64_test_cc(tcg_ctx, &c, cond); + zero = tcg_const_i64(tcg_ctx, 0); + + if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) { + /* CSET & CSETM. */ + tcg_gen_setcond_i64(tcg_ctx, tcg_invert_cond(c.cond), tcg_rd, c.value, zero); + if (else_inv) { + tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); + } } else { - /* OPTME: we could use movcond here, at the cost of duplicating - * a lot of the arm_gen_test_cc() logic. - */ - int label_match = gen_new_label(tcg_ctx); - int label_continue = gen_new_label(tcg_ctx); - - arm_gen_test_cc(tcg_ctx, cond, label_match); - /* nomatch: */ - tcg_src = cpu_reg(s, rm); - + TCGv_i64 t_true = cpu_reg(s, rn); + TCGv_i64 t_false = read_cpu_reg(s, rm, 1); if (else_inv && else_inc) { - tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_src); + tcg_gen_neg_i64(tcg_ctx, t_false, t_false); } else if (else_inv) { - tcg_gen_not_i64(tcg_ctx, tcg_rd, tcg_src); + tcg_gen_not_i64(tcg_ctx, t_false, t_false); } else if (else_inc) { - tcg_gen_addi_i64(tcg_ctx, tcg_rd, tcg_src, 1); - } else { - tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_src); + tcg_gen_addi_i64(tcg_ctx, t_false, t_false, 1); } - if (!sf) { - tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); - } - tcg_gen_br(tcg_ctx, label_continue); - /* match: */ - gen_set_label(tcg_ctx, label_match); - tcg_src = read_cpu_reg(s, rn, sf); - tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_src); - /* continue: */ - gen_set_label(tcg_ctx, label_continue); + tcg_gen_movcond_i64(tcg_ctx, c.cond, tcg_rd, c.value, zero, t_true, t_false); + } + + tcg_temp_free_i64(tcg_ctx, zero); + a64_free_cc(tcg_ctx, &c); + + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); } } @@ -3698,11 +4952,11 @@ static void handle_clz(DisasContext *s, unsigned int sf, tcg_rn = cpu_reg(s, rn); if (sf) { - gen_helper_clz64(tcg_ctx, tcg_rd, tcg_rn); + tcg_gen_clzi_i64(tcg_ctx, tcg_rd, tcg_rn, 64); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); - gen_helper_clz(tcg_ctx, tcg_tmp32, tcg_tmp32); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); + tcg_gen_clzi_i32(tcg_ctx, tcg_tmp32, tcg_tmp32, 32); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_ctx, tcg_tmp32); } @@ -3717,11 +4971,11 @@ static void handle_cls(DisasContext *s, unsigned int sf, tcg_rn = cpu_reg(s, rn); if (sf) { - gen_helper_cls64(tcg_ctx, tcg_rd, tcg_rn); + tcg_gen_clrsb_i64(tcg_ctx, tcg_rd, tcg_rn); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); - gen_helper_cls32(tcg_ctx, tcg_tmp32, tcg_tmp32); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); + tcg_gen_clrsb_i32(tcg_ctx, tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_ctx, tcg_tmp32); } @@ -3739,14 +4993,14 @@ static void handle_rbit(DisasContext *s, unsigned int sf, gen_helper_rbit64(tcg_ctx, tcg_rd, tcg_rn); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); gen_helper_rbit(tcg_ctx, tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_ctx, tcg_tmp32); } } -/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */ +/* REV with sf==1, opcode==3 ("REV64") */ static void handle_rev64(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { @@ -3758,8 +5012,8 @@ static void handle_rev64(DisasContext *s, unsigned int sf, tcg_gen_bswap64_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, rn)); } -/* C5.6.149 REV with sf==0, opcode==2 - * C5.6.151 REV32 (sf==1, opcode==2) +/* REV with sf==0, opcode==2 + * REV32 (sf==1, opcode==2) */ static void handle_rev32(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) @@ -3785,7 +5039,7 @@ static void handle_rev32(DisasContext *s, unsigned int sf, } } -/* C5.6.150 REV16 (opcode==1) */ +/* REV16 (opcode==1) */ static void handle_rev16(DisasContext *s, unsigned int sf, unsigned int rn, unsigned int rd) { @@ -3793,30 +5047,19 @@ static void handle_rev16(DisasContext *s, unsigned int sf, TCGv_i64 tcg_rd = cpu_reg(s, rd); TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); + TCGv_i64 mask = tcg_const_i64(tcg_ctx, sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff); - tcg_gen_andi_i64(tcg_ctx, tcg_tmp, tcg_rn, 0xffff); - tcg_gen_bswap16_i64(tcg_ctx, tcg_rd, tcg_tmp); - - tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 16); - tcg_gen_andi_i64(tcg_ctx, tcg_tmp, tcg_tmp, 0xffff); - tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); - tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, 16, 16); - - if (sf) { - tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 32); - tcg_gen_andi_i64(tcg_ctx, tcg_tmp, tcg_tmp, 0xffff); - tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); - tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, 32, 16); - - tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 48); - tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); - tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, 48, 16); - } + tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 8); + tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rn, mask); + tcg_gen_and_i64(tcg_ctx, tcg_tmp, tcg_tmp, mask); + tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rd, 8); + tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp); + tcg_temp_free_i64(tcg_ctx, mask); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } -/* C3.5.7 Data-processing (1 source) +/* Data-processing (1 source) * 31 30 29 28 21 20 16 15 10 9 5 4 0 * +----+---+---+-----------------+---------+--------+------+------+ * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd | @@ -3824,38 +5067,198 @@ static void handle_rev16(DisasContext *s, unsigned int sf, */ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) { - unsigned int sf, opcode, rn, rd; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, opcode, opcode2, rn, rd; + TCGv_i64 tcg_rd; - if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) { + if (extract32(insn, 29, 1)) { unallocated_encoding(s); return; } sf = extract32(insn, 31, 1); opcode = extract32(insn, 10, 6); + opcode2 = extract32(insn, 16, 5); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); - switch (opcode) { - case 0: /* RBIT */ +#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7)) + + switch (MAP(sf, opcode2, opcode)) { + case MAP(0, 0x00, 0x00): /* RBIT */ + case MAP(1, 0x00, 0x00): handle_rbit(s, sf, rn, rd); break; - case 1: /* REV16 */ + case MAP(0, 0x00, 0x01): /* REV16 */ + case MAP(1, 0x00, 0x01): handle_rev16(s, sf, rn, rd); break; - case 2: /* REV32 */ + case MAP(0, 0x00, 0x02): /* REV/REV32 */ + case MAP(1, 0x00, 0x02): handle_rev32(s, sf, rn, rd); break; - case 3: /* REV64 */ + case MAP(1, 0x00, 0x03): /* REV64 */ handle_rev64(s, sf, rn, rd); break; - case 4: /* CLZ */ + case MAP(0, 0x00, 0x04): /* CLZ */ + case MAP(1, 0x00, 0x04): handle_clz(s, sf, rn, rd); break; - case 5: /* CLS */ + case MAP(0, 0x00, 0x05): /* CLS */ + case MAP(1, 0x00, 0x05): handle_cls(s, sf, rn, rd); break; + case MAP(1, 0x01, 0x00): /* PACIA */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x01): /* PACIB */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x02): /* PACDA */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x03): /* PACDB */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x04): /* AUTIA */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x05): /* AUTIB */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x06): /* AUTDA */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x07): /* AUTDB */ + if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, cpu_reg_sp(s, rn)); + } else if (!dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + break; + case MAP(1, 0x01, 0x08): /* PACIZA */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x09): /* PACIZB */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x0a): /* PACDZA */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x0b): /* PACDZB */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_pacdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x0c): /* AUTIZA */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autia(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x0d): /* AUTIZB */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autib(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x0e): /* AUTDZA */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autda(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x0f): /* AUTDZB */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_autdb(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd, new_tmp_a64_zero(s)); + } + break; + case MAP(1, 0x01, 0x10): /* XPACI */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_xpaci(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd); + } + break; + case MAP(1, 0x01, 0x11): /* XPACD */ + if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { + goto do_unallocated; + } else if (s->pauth_active) { + tcg_rd = cpu_reg(s, rd); + gen_helper_xpacd(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rd); + } + break; + default: + do_unallocated: + unallocated_encoding(s); + break; } + +#undef MAP } static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, @@ -3886,7 +5289,7 @@ static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, } } -/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */ +/* LSLV, LSRV, ASRV, RORV */ static void handle_shift_reg(DisasContext *s, enum a64_shift_type shift_type, unsigned int sf, unsigned int rm, unsigned int rn, unsigned int rd) @@ -3910,7 +5313,7 @@ static void handle_crc32(DisasContext *s, TCGv_i64 tcg_acc, tcg_val; TCGv_i32 tcg_bytes; - if (!arm_dc_feature(s, ARM_FEATURE_CRC) + if (!dc_isar_feature(aa64_crc32, s) || (sf == 1 && sz != 3) || (sf == 0 && sz == 3)) { unallocated_encoding(s); @@ -3950,7 +5353,7 @@ static void handle_crc32(DisasContext *s, tcg_temp_free_i32(tcg_ctx, tcg_bytes); } -/* C3.5.8 Data-processing (2 source) +/* Data-processing (2 source) * 31 30 29 28 21 20 16 15 10 9 5 4 0 * +----+---+---+-----------------+------+--------+------+------+ * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd | @@ -3958,6 +5361,7 @@ static void handle_crc32(DisasContext *s, */ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int sf, rm, opcode, rn, rd; sf = extract32(insn, 31, 1); rm = extract32(insn, 16, 5); @@ -3989,6 +5393,13 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) case 11: /* RORV */ handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd); break; + case 12: /* PACGA */ + if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) { + goto do_unallocated; + } + gen_helper_pacga(tcg_ctx, cpu_reg(s, rd), tcg_ctx->cpu_env, + cpu_reg(s, rn), cpu_reg_sp(s, rm)); + break; case 16: case 17: case 18: @@ -4004,66 +5415,108 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) break; } default: + do_unallocated: unallocated_encoding(s); break; } } -/* C3.5 Data processing - register */ +/* + * Data processing - register + * 31 30 29 28 25 21 20 16 10 0 + * +--+---+--+---+-------+-----+-------+-------+---------+ + * | |op0| |op1| 1 0 1 | op2 | | op3 | | + * +--+---+--+---+-------+-----+-------+-------+---------+ + */ static void disas_data_proc_reg(DisasContext *s, uint32_t insn) { - switch (extract32(insn, 24, 5)) { - case 0x0a: /* Logical (shifted register) */ - disas_logic_reg(s, insn); - break; - case 0x0b: /* Add/subtract */ - if (insn & (1 << 21)) { /* (extended register) */ - disas_add_sub_ext_reg(s, insn); + int op0 = extract32(insn, 30, 1); + int op1 = extract32(insn, 28, 1); + int op2 = extract32(insn, 21, 4); + int op3 = extract32(insn, 10, 6); + + if (!op1) { + if (op2 & 8) { + if (op2 & 1) { + /* Add/sub (extended register) */ + disas_add_sub_ext_reg(s, insn); + } else { + /* Add/sub (shifted register) */ + disas_add_sub_reg(s, insn); + } } else { - disas_add_sub_reg(s, insn); + /* Logical (shifted register) */ + disas_logic_reg(s, insn); } - break; - case 0x1b: /* Data-processing (3 source) */ - disas_data_proc_3src(s, insn); - break; - case 0x1a: - switch (extract32(insn, 21, 3)) { - case 0x0: /* Add/subtract (with carry) */ + return; + } + + switch (op2) { + case 0x0: + switch (op3) { + case 0x00: /* Add/subtract (with carry) */ disas_adc_sbc(s, insn); break; - case 0x2: /* Conditional compare */ - disas_cc(s, insn); /* both imm and reg forms */ + + case 0x01: /* Rotate right into flags */ + case 0x21: + disas_rotate_right_into_flags(s, insn); break; - case 0x4: /* Conditional select */ - disas_cond_select(s, insn); - break; - case 0x6: /* Data-processing */ - if (insn & (1 << 30)) { /* (1 source) */ - disas_data_proc_1src(s, insn); - } else { /* (2 source) */ - disas_data_proc_2src(s, insn); - } + + case 0x02: /* Evaluate into flags */ + case 0x12: + case 0x22: + case 0x32: + disas_evaluate_into_flags(s, insn); break; + default: - unallocated_encoding(s); - break; + goto do_unallocated; } break; + + case 0x2: /* Conditional compare */ + disas_cc(s, insn); /* both imm and reg forms */ + break; + + case 0x4: /* Conditional select */ + disas_cond_select(s, insn); + break; + + case 0x6: /* Data-processing */ + if (op0) { /* (1 source) */ + disas_data_proc_1src(s, insn); + } else { /* (2 source) */ + disas_data_proc_2src(s, insn); + } + break; + case 0x8: /* (3 source) */ + case 0x9: /* (3 source) */ + case 0xa: /* (3 source) */ + case 0xb: /* (3 source) */ + case 0xc: /* (3 source) */ + case 0xd: /* (3 source) */ + case 0xe: /* (3 source) */ + case 0xf: /* (3 source) */ + disas_data_proc_3src(s, insn); + break; + default: + do_unallocated: unallocated_encoding(s); break; } } -static void handle_fp_compare(DisasContext *s, bool is_double, +static void handle_fp_compare(DisasContext *s, int size, unsigned int rn, unsigned int rm, bool cmp_with_zero, bool signal_all_nans) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_flags = tcg_temp_new_i64(tcg_ctx); - TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); - if (is_double) { + if (size == MO_64) { TCGv_i64 tcg_vn, tcg_vm; tcg_vn = read_fp_dreg(s, rn); @@ -4080,19 +5533,35 @@ static void handle_fp_compare(DisasContext *s, bool is_double, tcg_temp_free_i64(tcg_ctx, tcg_vn); tcg_temp_free_i64(tcg_ctx, tcg_vm); } else { - TCGv_i32 tcg_vn, tcg_vm; + TCGv_i32 tcg_vn = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_vm = tcg_temp_new_i32(tcg_ctx); - tcg_vn = read_fp_sreg(s, rn); + read_vec_element_i32(s, tcg_vn, rn, 0, size); if (cmp_with_zero) { - tcg_vm = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movi_i32(tcg_ctx, tcg_vm, 0); } else { - tcg_vm = read_fp_sreg(s, rm); + read_vec_element_i32(s, tcg_vm, rm, 0, size); } - if (signal_all_nans) { - gen_helper_vfp_cmpes_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); - } else { - gen_helper_vfp_cmps_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + + switch (size) { + case MO_32: + if (signal_all_nans) { + gen_helper_vfp_cmpes_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } else { + gen_helper_vfp_cmps_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } + break; + case MO_16: + if (signal_all_nans) { + gen_helper_vfp_cmpeh_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } else { + gen_helper_vfp_cmph_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } + break; + default: + g_assert_not_reached(); } + tcg_temp_free_i32(tcg_ctx, tcg_vn); tcg_temp_free_i32(tcg_ctx, tcg_vm); } @@ -4104,7 +5573,7 @@ static void handle_fp_compare(DisasContext *s, bool is_double, tcg_temp_free_i64(tcg_ctx, tcg_flags); } -/* C3.6.22 Floating point compare +/* Floating point compare * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 | @@ -4113,16 +5582,35 @@ static void handle_fp_compare(DisasContext *s, bool is_double, static void disas_fp_compare(DisasContext *s, uint32_t insn) { unsigned int mos, type, rm, op, rn, opc, op2r; + int size; mos = extract32(insn, 29, 3); - type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ + type = extract32(insn, 22, 2); rm = extract32(insn, 16, 5); op = extract32(insn, 14, 2); rn = extract32(insn, 5, 5); opc = extract32(insn, 3, 2); op2r = extract32(insn, 0, 3); - if (mos || op || op2r || type > 1) { + if (mos || op || op2r) { + unallocated_encoding(s); + return; + } + + switch (type) { + case 0: + size = MO_32; + break; + case 1: + size = MO_64; + break; + case 3: + size = MO_16; + if (dc_isar_feature(aa64_fp16, s)) { + break; + } + /* fallthru */ + default: unallocated_encoding(s); return; } @@ -4131,10 +5619,10 @@ static void disas_fp_compare(DisasContext *s, uint32_t insn) return; } - handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2); + handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2); } -/* C3.6.23 Floating point conditional compare +/* Floating point conditional compare * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv | @@ -4145,17 +5633,36 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int mos, type, rm, cond, rn, op, nzcv; TCGv_i64 tcg_flags; - int label_continue = -1; + TCGLabel *label_continue = NULL; + int size; mos = extract32(insn, 29, 3); - type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ + type = extract32(insn, 22, 2); rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); rn = extract32(insn, 5, 5); op = extract32(insn, 4, 1); nzcv = extract32(insn, 0, 4); - if (mos || type > 1) { + if (mos) { + unallocated_encoding(s); + return; + } + + switch (type) { + case 0: + size = MO_32; + break; + case 1: + size = MO_64; + break; + case 3: + size = MO_16; + if (dc_isar_feature(aa64_fp16, s)) { + break; + } + /* fallthru */ + default: unallocated_encoding(s); return; } @@ -4165,7 +5672,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) } if (cond < 0x0e) { /* not always */ - int label_match = gen_new_label(tcg_ctx); + TCGLabel *label_match = gen_new_label(tcg_ctx); label_continue = gen_new_label(tcg_ctx); arm_gen_test_cc(tcg_ctx, cond, label_match); /* nomatch: */ @@ -4176,29 +5683,14 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) gen_set_label(tcg_ctx, label_match); } - handle_fp_compare(s, type, rn, rm, false, op); + handle_fp_compare(s, size, rn, rm, false, op); if (cond < 0x0e) { gen_set_label(tcg_ctx, label_continue); } } -/* copy src FP register to dst FP register; type specifies single or double */ -static void gen_mov_fp2fp(DisasContext *s, int type, int dst, int src) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - if (type) { - TCGv_i64 v = read_fp_dreg(s, src); - write_fp_dreg(s, dst, v); - tcg_temp_free_i64(tcg_ctx, v); - } else { - TCGv_i32 v = read_fp_sreg(s, src); - write_fp_sreg(s, dst, v); - tcg_temp_free_i32(tcg_ctx, v); - } -} - -/* C3.6.24 Floating point conditional select +/* Floating point conditional select * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+------+-----+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd | @@ -4208,16 +5700,36 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; unsigned int mos, type, rm, cond, rn, rd; - int label_continue = -1; + TCGv_i64 t_true, t_false, t_zero; + DisasCompare64 c; + MemOp sz; mos = extract32(insn, 29, 3); - type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ + type = extract32(insn, 22, 2); rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); - if (mos || type > 1) { + if (mos) { + unallocated_encoding(s); + return; + } + + switch (type) { + case 0: + sz = MO_32; + break; + case 1: + sz = MO_64; + break; + case 3: + sz = MO_16; + if (dc_isar_feature(aa64_fp16, s)) { + break; + } + /* fallthru */ + default: unallocated_encoding(s); return; } @@ -4226,47 +5738,46 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) return; } - if (cond < 0x0e) { /* not always */ - int label_match = gen_new_label(tcg_ctx); - label_continue = gen_new_label(tcg_ctx); - arm_gen_test_cc(tcg_ctx, cond, label_match); - /* nomatch: */ - gen_mov_fp2fp(s, type, rd, rm); - tcg_gen_br(tcg_ctx, label_continue); - gen_set_label(tcg_ctx, label_match); - } + /* Zero extend sreg & hreg inputs to 64 bits now. */ + t_true = tcg_temp_new_i64(tcg_ctx); + t_false = tcg_temp_new_i64(tcg_ctx); + read_vec_element(s, t_true, rn, 0, sz); + read_vec_element(s, t_false, rm, 0, sz); - gen_mov_fp2fp(s, type, rd, rn); + a64_test_cc(tcg_ctx, &c, cond); + t_zero = tcg_const_i64(tcg_ctx, 0); + tcg_gen_movcond_i64(tcg_ctx, c.cond, t_true, c.value, t_zero, t_true, t_false); + tcg_temp_free_i64(tcg_ctx, t_zero); + tcg_temp_free_i64(tcg_ctx, t_false); + a64_free_cc(tcg_ctx, &c); - if (cond < 0x0e) { /* continue */ - gen_set_label(tcg_ctx, label_continue); - } + /* Note that sregs & hregs write back zeros to the high bits, + and we've already done the zero-extension. */ + write_fp_dreg(s, rd, t_true); + tcg_temp_free_i64(tcg_ctx, t_true); } -/* C3.6.25 Floating-point data-processing (1 source) - single precision */ -static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) +/* Floating-point data-processing (1 source) - half precision */ +static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TCGv_ptr fpst; - TCGv_i32 tcg_op; - TCGv_i32 tcg_res; - - fpst = get_fpstatus_ptr(tcg_ctx); - tcg_op = read_fp_sreg(s, rn); - tcg_res = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr fpst = NULL; + TCGv_i32 tcg_op = read_fp_hreg(s, rn); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); switch (opcode) { case 0x0: /* FMOV */ tcg_gen_mov_i32(tcg_ctx, tcg_res, tcg_op); break; case 0x1: /* FABS */ - gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_op); + tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_op, 0x7fff); break; case 0x2: /* FNEG */ - gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_op); + tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_op, 0x8000); break; case 0x3: /* FSQRT */ - gen_helper_vfp_sqrts(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); + fpst = get_fpstatus_ptr(tcg_ctx, true); + gen_helper_sqrt_f16(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0x8: /* FRINTN */ case 0x9: /* FRINTP */ @@ -4275,19 +5786,22 @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) case 0xc: /* FRINTA */ { TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(opcode & 7)); + fpst = get_fpstatus_ptr(tcg_ctx, true); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + gen_helper_advsimd_rinth(tcg_ctx, tcg_res, tcg_op, fpst); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); tcg_temp_free_i32(tcg_ctx, tcg_rmode); break; } case 0xe: /* FRINTX */ - gen_helper_rints_exact(tcg_ctx, tcg_res, tcg_op, fpst); + fpst = get_fpstatus_ptr(tcg_ctx, true); + gen_helper_advsimd_rinth_exact(tcg_ctx, tcg_res, tcg_op, fpst); break; case 0xf: /* FRINTI */ - gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); + fpst = get_fpstatus_ptr(tcg_ctx, true); + gen_helper_advsimd_rinth(tcg_ctx, tcg_res, tcg_op, fpst); break; default: abort(); @@ -4295,64 +5809,162 @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) write_fp_sreg(s, rd, tcg_res); - tcg_temp_free_ptr(tcg_ctx, fpst); + if (fpst) { + tcg_temp_free_ptr(tcg_ctx, fpst); + } tcg_temp_free_i32(tcg_ctx, tcg_op); tcg_temp_free_i32(tcg_ctx, tcg_res); } -/* C3.6.25 Floating-point data-processing (1 source) - double precision */ -static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) +/* Floating-point data-processing (1 source) - single precision */ +static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; + void (*gen_fpst)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_ptr) = NULL; + TCGv_i32 tcg_op, tcg_res; TCGv_ptr fpst; - TCGv_i64 tcg_op; - TCGv_i64 tcg_res; + int rmode = -1; - fpst = get_fpstatus_ptr(tcg_ctx); - tcg_op = read_fp_dreg(s, rn); - tcg_res = tcg_temp_new_i64(tcg_ctx); + tcg_op = read_fp_sreg(s, rn); + tcg_res = tcg_temp_new_i32(tcg_ctx); switch (opcode) { case 0x0: /* FMOV */ - tcg_gen_mov_i64(tcg_ctx, tcg_res, tcg_op); - break; + tcg_gen_mov_i32(tcg_ctx, tcg_res, tcg_op); + goto done; case 0x1: /* FABS */ - gen_helper_vfp_absd(tcg_ctx, tcg_res, tcg_op); - break; + gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_op); + goto done; case 0x2: /* FNEG */ - gen_helper_vfp_negd(tcg_ctx, tcg_res, tcg_op); - break; + gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_op); + goto done; case 0x3: /* FSQRT */ - gen_helper_vfp_sqrtd(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); - break; + gen_helper_vfp_sqrts(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); + goto done; case 0x8: /* FRINTN */ case 0x9: /* FRINTP */ case 0xa: /* FRINTM */ case 0xb: /* FRINTZ */ case 0xc: /* FRINTA */ - { - TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(opcode & 7)); - - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); - - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - tcg_temp_free_i32(tcg_ctx, tcg_rmode); + rmode = arm_rmode_to_sf(opcode & 7); + gen_fpst = gen_helper_rints; break; - } case 0xe: /* FRINTX */ - gen_helper_rintd_exact(tcg_ctx, tcg_res, tcg_op, fpst); + gen_fpst = gen_helper_rints_exact; break; case 0xf: /* FRINTI */ - gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); + gen_fpst = gen_helper_rints; + break; + case 0x10: /* FRINT32Z */ + rmode = float_round_to_zero; + gen_fpst = gen_helper_frint32_s; + break; + case 0x11: /* FRINT32X */ + gen_fpst = gen_helper_frint32_s; + break; + case 0x12: /* FRINT64Z */ + rmode = float_round_to_zero; + gen_fpst = gen_helper_frint64_s; + break; + case 0x13: /* FRINT64X */ + gen_fpst = gen_helper_frint64_s; break; default: - abort(); + g_assert_not_reached(); } - write_fp_dreg(s, rd, tcg_res); - + fpst = get_fpstatus_ptr(tcg_ctx, false); + if (rmode >= 0) { + TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, rmode); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + } else { + gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); + } tcg_temp_free_ptr(tcg_ctx, fpst); + + done: + write_fp_sreg(s, rd, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op); + tcg_temp_free_i32(tcg_ctx, tcg_res); +} + +/* Floating-point data-processing (1 source) - double precision */ +static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + void (*gen_fpst)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_ptr) = NULL; + TCGv_i64 tcg_op, tcg_res; + TCGv_ptr fpst; + int rmode = -1; + + switch (opcode) { + case 0x0: /* FMOV */ + gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0); + return; + } + + tcg_op = read_fp_dreg(s, rn); + tcg_res = tcg_temp_new_i64(tcg_ctx); + + switch (opcode) { + case 0x1: /* FABS */ + gen_helper_vfp_absd(tcg_ctx, tcg_res, tcg_op); + goto done; + case 0x2: /* FNEG */ + gen_helper_vfp_negd(tcg_ctx, tcg_res, tcg_op); + goto done; + case 0x3: /* FSQRT */ + gen_helper_vfp_sqrtd(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); + goto done; + case 0x8: /* FRINTN */ + case 0x9: /* FRINTP */ + case 0xa: /* FRINTM */ + case 0xb: /* FRINTZ */ + case 0xc: /* FRINTA */ + rmode = arm_rmode_to_sf(opcode & 7); + gen_fpst = gen_helper_rintd; + break; + case 0xe: /* FRINTX */ + gen_fpst = gen_helper_rintd_exact; + break; + case 0xf: /* FRINTI */ + gen_fpst = gen_helper_rintd; + break; + case 0x10: /* FRINT32Z */ + rmode = float_round_to_zero; + gen_fpst = gen_helper_frint32_d; + break; + case 0x11: /* FRINT32X */ + gen_fpst = gen_helper_frint32_d; + break; + case 0x12: /* FRINT64Z */ + rmode = float_round_to_zero; + gen_fpst = gen_helper_frint64_d; + break; + case 0x13: /* FRINT64X */ + gen_fpst = gen_helper_frint64_d; + break; + default: + g_assert_not_reached(); + } + + fpst = get_fpstatus_ptr(tcg_ctx, false); + if (rmode >= 0) { + TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, rmode); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + } else { + gen_fpst(tcg_ctx, tcg_res, tcg_op, fpst); + } + tcg_temp_free_ptr(tcg_ctx, fpst); + + done: + write_fp_dreg(s, rd, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op); tcg_temp_free_i64(tcg_ctx, tcg_res); } @@ -4374,10 +5986,15 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, } else { /* Single to half */ TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); - gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + TCGv_i32 ahp = get_ahp_flag(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); + + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ write_fp_sreg(s, rd, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd); + tcg_temp_free_i32(tcg_ctx, ahp); + tcg_temp_free_ptr(tcg_ctx, fpst); } tcg_temp_free_i32(tcg_ctx, tcg_rn); break; @@ -4390,9 +6007,13 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, /* Double to single */ gen_helper_vfp_fcvtsd(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); } else { + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); + TCGv_i32 ahp = get_ahp_flag(tcg_ctx); /* Double to half */ - gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, ahp); } write_fp_sreg(s, rd, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd); @@ -4402,21 +6023,25 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, case 0x3: { TCGv_i32 tcg_rn = read_fp_sreg(s, rn); + TCGv_ptr tcg_fpst = get_fpstatus_ptr(tcg_ctx, false); + TCGv_i32 tcg_ahp = get_ahp_flag(tcg_ctx); tcg_gen_ext16u_i32(tcg_ctx, tcg_rn, tcg_rn); if (dtype == 0) { /* Half to single */ TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); - gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_sreg(s, rd, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd); } else { /* Half to double */ TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); - gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_dreg(s, rd, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rd); } tcg_temp_free_i32(tcg_ctx, tcg_rn); + tcg_temp_free_ptr(tcg_ctx, tcg_fpst); + tcg_temp_free_i32(tcg_ctx, tcg_ahp); break; } default: @@ -4424,7 +6049,7 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, } } -/* C3.6.25 Floating point data-processing (1 source) +/* Floating point data-processing (1 source) * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0 * +---+---+---+-----------+------+---+--------+-----------+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd | @@ -4432,11 +6057,17 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, */ static void disas_fp_1src(DisasContext *s, uint32_t insn) { + int mos = extract32(insn, 29, 3); int type = extract32(insn, 22, 2); int opcode = extract32(insn, 15, 6); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); + if (mos) { + unallocated_encoding(s); + return; + } + switch (opcode) { case 0x4: case 0x5: case 0x7: { @@ -4453,36 +6084,66 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn) handle_fp_fcvt(s, opcode, rd, rn, dtype, type); break; } - case 0x0: case 0x1: case 0x2: case 0x3: - case 0x8: case 0x9: case 0xa: case 0xb: case 0xc: - case 0xe: case 0xf: + + case 0x10: /* FRINT{32,64}{X,Z} */ + case 0x11: /* FRINT{32,64}{X,Z} */ + case 0x12: /* FRINT{32,64}{X,Z} */ + case 0x13: /* FRINT{32,64}{X,Z} */ + if (type > 1 || !dc_isar_feature(aa64_frint, s)) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x0: + case 0x1: + case 0x2: + case 0x3: + + case 0x8: + case 0x9: + case 0xa: + case 0xb: + case 0xc: + + case 0xe: + case 0xf: /* 32-to-32 and 64-to-64 ops */ switch (type) { case 0: if (!fp_access_check(s)) { return; } - handle_fp_1src_single(s, opcode, rd, rn); break; case 1: if (!fp_access_check(s)) { return; } - handle_fp_1src_double(s, opcode, rd, rn); break; + case 3: + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + handle_fp_1src_half(s, opcode, rd, rn); + break; default: unallocated_encoding(s); } break; + default: unallocated_encoding(s); break; } } -/* C3.6.26 Floating-point data-processing (2 source) - single precision */ +/* Floating-point data-processing (2 source) - single precision */ static void handle_fp_2src_single(DisasContext *s, int opcode, int rd, int rn, int rm) { @@ -4493,7 +6154,7 @@ static void handle_fp_2src_single(DisasContext *s, int opcode, TCGv_ptr fpst; tcg_res = tcg_temp_new_i32(tcg_ctx); - fpst = get_fpstatus_ptr(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_sreg(s, rn); tcg_op2 = read_fp_sreg(s, rm); @@ -4536,7 +6197,7 @@ static void handle_fp_2src_single(DisasContext *s, int opcode, tcg_temp_free_i32(tcg_ctx, tcg_res); } -/* C3.6.26 Floating-point data-processing (2 source) - double precision */ +/* Floating-point data-processing (2 source) - double precision */ static void handle_fp_2src_double(DisasContext *s, int opcode, int rd, int rn, int rm) { @@ -4547,7 +6208,7 @@ static void handle_fp_2src_double(DisasContext *s, int opcode, TCGv_ptr fpst; tcg_res = tcg_temp_new_i64(tcg_ctx); - fpst = get_fpstatus_ptr(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_dreg(s, rn); tcg_op2 = read_fp_dreg(s, rm); @@ -4590,7 +6251,63 @@ static void handle_fp_2src_double(DisasContext *s, int opcode, tcg_temp_free_i64(tcg_ctx, tcg_res); } -/* C3.6.26 Floating point data-processing (2 source) +/* Floating-point data-processing (2 source) - half precision */ +static void handle_fp_2src_half(DisasContext *s, int opcode, + int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_op1; + TCGv_i32 tcg_op2; + TCGv_i32 tcg_res; + TCGv_ptr fpst; + + tcg_res = tcg_temp_new_i32(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, true); + tcg_op1 = read_fp_hreg(s, rn); + tcg_op2 = read_fp_hreg(s, rm); + + switch (opcode) { + case 0x0: /* FMUL */ + gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1: /* FDIV */ + gen_helper_advsimd_divh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2: /* FADD */ + gen_helper_advsimd_addh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3: /* FSUB */ + gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x4: /* FMAX */ + gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5: /* FMIN */ + gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x6: /* FMAXNM */ + gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7: /* FMINNM */ + gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x8: /* FNMUL */ + gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_res, 0x8000); + break; + default: + g_assert_not_reached(); + } + + write_fp_sreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_i32(tcg_ctx, tcg_res); +} + +/* Floating point data-processing (2 source) * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd | @@ -4598,13 +6315,14 @@ static void handle_fp_2src_double(DisasContext *s, int opcode, */ static void disas_fp_2src(DisasContext *s, uint32_t insn) { + int mos = extract32(insn, 29, 3); int type = extract32(insn, 22, 2); int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int opcode = extract32(insn, 12, 4); - if (opcode > 8) { + if (opcode > 8 || mos) { unallocated_encoding(s); return; } @@ -4622,19 +6340,29 @@ static void disas_fp_2src(DisasContext *s, uint32_t insn) } handle_fp_2src_double(s, opcode, rd, rn, rm); break; + case 3: + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_fp_2src_half(s, opcode, rd, rn, rm); + break; default: unallocated_encoding(s); } } -/* C3.6.27 Floating-point data-processing (3 source) - single precision */ +/* Floating-point data-processing (3 source) - single precision */ static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1, int rd, int rn, int rm, int ra) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 tcg_op1, tcg_op2, tcg_op3; TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); - TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_sreg(s, rn); tcg_op2 = read_fp_sreg(s, rm); @@ -4666,14 +6394,14 @@ static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1, tcg_temp_free_i32(tcg_ctx, tcg_res); } -/* C3.6.27 Floating-point data-processing (3 source) - double precision */ +/* Floating-point data-processing (3 source) - double precision */ static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, int rd, int rn, int rm, int ra) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i64 tcg_op1, tcg_op2, tcg_op3; TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); - TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); tcg_op1 = read_fp_dreg(s, rn); tcg_op2 = read_fp_dreg(s, rm); @@ -4705,7 +6433,46 @@ static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, tcg_temp_free_i64(tcg_ctx, tcg_res); } -/* C3.6.27 Floating point data-processing (3 source) +/* Floating-point data-processing (3 source) - half precision */ +static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1, + int rd, int rn, int rm, int ra) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_op1, tcg_op2, tcg_op3; + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, true); + + tcg_op1 = read_fp_hreg(s, rn); + tcg_op2 = read_fp_hreg(s, rm); + tcg_op3 = read_fp_hreg(s, ra); + + /* These are fused multiply-add, and must be done as one + * floating point operation with no rounding between the + * multiplication and addition steps. + * NB that doing the negations here as separate steps is + * correct : an input NaN should come out with its sign bit + * flipped if it is a negated-input. + */ + if (o1 == true) { + tcg_gen_xori_i32(tcg_ctx, tcg_op3, tcg_op3, 0x8000); + } + + if (o0 != o1) { + tcg_gen_xori_i32(tcg_ctx, tcg_op1, tcg_op1, 0x8000); + } + + gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); + + write_fp_sreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_i32(tcg_ctx, tcg_op3); + tcg_temp_free_i32(tcg_ctx, tcg_res); +} + +/* Floating point data-processing (3 source) * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0 * +---+---+---+-----------+------+----+------+----+------+------+------+ * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd | @@ -4713,6 +6480,7 @@ static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, */ static void disas_fp_3src(DisasContext *s, uint32_t insn) { + int mos = extract32(insn, 29, 3); int type = extract32(insn, 22, 2); int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); @@ -4721,6 +6489,11 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn) bool o0 = extract32(insn, 15, 1); bool o1 = extract32(insn, 21, 1); + if (mos) { + unallocated_encoding(s); + return; + } + switch (type) { case 0: if (!fp_access_check(s)) { @@ -4734,12 +6507,22 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn) } handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra); break; + case 3: + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra); + break; default: unallocated_encoding(s); } } -/* C3.6.28 Floating point immediate +/* Floating point immediate * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 * +---+---+---+-----------+------+---+------------+-------+------+------+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd | @@ -4749,12 +6532,33 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); + int imm5 = extract32(insn, 5, 5); int imm8 = extract32(insn, 13, 8); - int is_double = extract32(insn, 22, 2); + int type = extract32(insn, 22, 2); + int mos = extract32(insn, 29, 3); uint64_t imm; TCGv_i64 tcg_res; + MemOp sz; - if (is_double > 1) { + if (mos || imm5) { + unallocated_encoding(s); + return; + } + + switch (type) { + case 0: + sz = MO_32; + break; + case 1: + sz = MO_64; + break; + case 3: + sz = MO_16; + if (dc_isar_feature(aa64_fp16, s)) { + break; + } + /* fallthru */ + default: unallocated_encoding(s); return; } @@ -4763,22 +6567,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) return; } - /* The imm8 encodes the sign bit, enough bits to represent - * an exponent in the range 01....1xx to 10....0xx, - * and the most significant 4 bits of the mantissa; see - * VFPExpandImm() in the v8 ARM ARM. - */ - if (is_double) { - imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | - (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) | - extract32(imm8, 0, 6); - imm <<= 48; - } else { - imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | - (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) | - (extract32(imm8, 0, 6) << 3); - imm <<= 16; - } + imm = vfp_expand_imm(sz, imm8); tcg_res = tcg_const_i64(tcg_ctx, imm); write_fp_dreg(s, rd, tcg_res); @@ -4795,11 +6584,11 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool is_signed = !(opcode & 1); - bool is_double = type; TCGv_ptr tcg_fpstatus; - TCGv_i32 tcg_shift; + TCGv_i32 tcg_shift, tcg_single; + TCGv_i64 tcg_double; - tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, type == 3); tcg_shift = tcg_const_i32(tcg_ctx, 64 - scale); @@ -4817,8 +6606,9 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_int = tcg_extend; } - if (is_double) { - TCGv_i64 tcg_double = tcg_temp_new_i64(tcg_ctx); + switch (type) { + case 1: /* float64 */ + tcg_double = tcg_temp_new_i64(tcg_ctx); if (is_signed) { gen_helper_vfp_sqtod(tcg_ctx, tcg_double, tcg_int, tcg_shift, tcg_fpstatus); @@ -4828,8 +6618,10 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, } write_fp_dreg(s, rd, tcg_double); tcg_temp_free_i64(tcg_ctx, tcg_double); - } else { - TCGv_i32 tcg_single = tcg_temp_new_i32(tcg_ctx); + break; + + case 0: /* float32 */ + tcg_single = tcg_temp_new_i32(tcg_ctx); if (is_signed) { gen_helper_vfp_sqtos(tcg_ctx, tcg_single, tcg_int, tcg_shift, tcg_fpstatus); @@ -4839,6 +6631,23 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, } write_fp_sreg(s, rd, tcg_single); tcg_temp_free_i32(tcg_ctx, tcg_single); + break; + + case 3: /* float16 */ + tcg_single = tcg_temp_new_i32(tcg_ctx); + if (is_signed) { + gen_helper_vfp_sqtoh(tcg_ctx, tcg_single, tcg_int, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_uqtoh(tcg_ctx, tcg_single, tcg_int, + tcg_shift, tcg_fpstatus); + } + write_fp_sreg(s, rd, tcg_single); + tcg_temp_free_i32(tcg_ctx, tcg_single); + break; + + default: + g_assert_not_reached(); } } else { TCGv_i64 tcg_int = cpu_reg(s, rd); @@ -4853,10 +6662,11 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); - if (is_double) { - TCGv_i64 tcg_double = read_fp_dreg(s, rn); + switch (type) { + case 1: /* float64 */ + tcg_double = read_fp_dreg(s, rn); if (is_signed) { if (!sf) { gen_helper_vfp_tosld(tcg_ctx, tcg_int, tcg_double, @@ -4874,9 +6684,14 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } } + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_int, tcg_int); + } tcg_temp_free_i64(tcg_ctx, tcg_double); - } else { - TCGv_i32 tcg_single = read_fp_sreg(s, rn); + break; + + case 0: /* float32 */ + tcg_single = read_fp_sreg(s, rn); if (sf) { if (is_signed) { gen_helper_vfp_tosqs(tcg_ctx, tcg_int, tcg_single, @@ -4898,21 +6713,46 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_temp_free_i32(tcg_ctx, tcg_dest); } tcg_temp_free_i32(tcg_ctx, tcg_single); + break; + + case 3: /* float16 */ + tcg_single = read_fp_sreg(s, rn); + if (sf) { + if (is_signed) { + gen_helper_vfp_tosqh(tcg_ctx, tcg_int, tcg_single, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_touqh(tcg_ctx, tcg_int, tcg_single, + tcg_shift, tcg_fpstatus); + } + } else { + TCGv_i32 tcg_dest = tcg_temp_new_i32(tcg_ctx); + if (is_signed) { + gen_helper_vfp_toslh(tcg_ctx, tcg_dest, tcg_single, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_toulh(tcg_ctx, tcg_dest, tcg_single, + tcg_shift, tcg_fpstatus); + } + tcg_gen_extu_i32_i64(tcg_ctx, tcg_int, tcg_dest); + tcg_temp_free_i32(tcg_ctx, tcg_dest); + } + tcg_temp_free_i32(tcg_ctx, tcg_single); + break; + + default: + g_assert_not_reached(); } - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); - - if (!sf) { - tcg_gen_ext32u_i64(tcg_ctx, tcg_int, tcg_int); - } } tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); } -/* C3.6.29 Floating point <-> fixed point conversions +/* Floating point <-> fixed point conversions * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd | @@ -4930,8 +6770,21 @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) bool sf = extract32(insn, 31, 1); bool itof; - if (sbit || (type > 1) - || (!sf && scale < 32)) { + if (sbit || (!sf && scale < 32)) { + unallocated_encoding(s); + return; + } + + switch (type) { + case 0: /* float32 */ + case 1: /* float64 */ + break; + case 3: /* float16 */ + if (dc_isar_feature(aa64_fp16, s)) { + break; + } + /* fallthru */ + default: unallocated_encoding(s); return; } @@ -4966,32 +6819,34 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) if (itof) { TCGv_i64 tcg_rn = cpu_reg(s, rn); + TCGv_i64 tmp; switch (type) { case 0: - { /* 32 bit */ - TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_temp_new_i64(tcg_ctx); tcg_gen_ext32u_i64(tcg_ctx, tmp, tcg_rn); - tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_offset(s, rd, MO_64)); - tcg_gen_movi_i64(tcg_ctx, tmp, 0); - tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rd)); + write_fp_dreg(s, rd, tmp); tcg_temp_free_i64(tcg_ctx, tmp); break; - } case 1: - { /* 64 bit */ - TCGv_i64 tmp = tcg_const_i64(tcg_ctx, 0); - tcg_gen_st_i64(tcg_ctx, tcg_rn, tcg_ctx->cpu_env, fp_reg_offset(s, rd, MO_64)); - tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rd)); - tcg_temp_free_i64(tcg_ctx, tmp); + write_fp_dreg(s, rd, tcg_rn); break; - } case 2: /* 64 bit to top half. */ tcg_gen_st_i64(tcg_ctx, tcg_rn, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rd)); + clear_vec_high(s, true, rd); break; + case 3: + /* 16 bit */ + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext16u_i64(tcg_ctx, tmp, tcg_rn); + write_fp_dreg(s, rd, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + default: + g_assert_not_reached(); } } else { TCGv_i64 tcg_rd = cpu_reg(s, rd); @@ -5009,11 +6864,36 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) /* 64 bits from top half */ tcg_gen_ld_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rn)); break; + case 3: + /* 16 bit */ + tcg_gen_ld16u_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_offset(s, rn, MO_16)); + break; + default: + g_assert_not_reached(); } } } -/* C3.6.30 Floating point <-> integer conversions +static void handle_fjcvtzs(DisasContext *s, int rd, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t = read_fp_dreg(s, rn); + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, false); + + gen_helper_fjcvtzs(tcg_ctx, t, t, fpstatus); + + tcg_temp_free_ptr(tcg_ctx, fpstatus); + + tcg_gen_ext32u_i64(tcg_ctx, cpu_reg(s, rd), t); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, t); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, 0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_NF, 0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); + + tcg_temp_free_i64(tcg_ctx, t); +} + +/* Floating point <-> integer conversions * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd | @@ -5028,49 +6908,80 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn) int type = extract32(insn, 22, 2); bool sbit = extract32(insn, 29, 1); bool sf = extract32(insn, 31, 1); + bool itof = false; if (sbit) { - unallocated_encoding(s); - return; + goto do_unallocated; } - if (opcode > 5) { - /* FMOV */ - bool itof = opcode & 1; - - if (rmode >= 2) { - unallocated_encoding(s); - return; + switch (opcode) { + case 2: /* SCVTF */ + case 3: /* UCVTF */ + itof = true; + /* fallthru */ + case 4: /* FCVTAS */ + case 5: /* FCVTAU */ + if (rmode != 0) { + goto do_unallocated; } - - switch (sf << 3 | type << 1 | rmode) { - case 0x0: /* 32 bit */ - case 0xa: /* 64 bit */ - case 0xd: /* 64 bit to top half of quad */ + /* fallthru */ + case 0: /* FCVT[NPMZ]S */ + case 1: /* FCVT[NPMZ]U */ + switch (type) { + case 0: /* float32 */ + case 1: /* float64 */ + break; + case 3: /* float16 */ + if (!dc_isar_feature(aa64_fp16, s)) { + goto do_unallocated; + } break; default: - /* all other sf/type/rmode combinations are invalid */ - unallocated_encoding(s); - break; + goto do_unallocated; } - - if (!fp_access_check(s)) { - return; - } - handle_fmov(s, rd, rn, type, itof); - } else { - /* actual FP conversions */ - bool itof = extract32(opcode, 1, 1); - - if (type > 1 || (rmode != 0 && opcode > 1)) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { return; } handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type); + break; + + default: + switch (sf << 7 | type << 5 | rmode << 3 | opcode) { + case 0x66: // 0b01100110: /* FMOV half <-> 32-bit int */ + case 0x67: // 0b01100111: + case 0xe6: // 0b11100110: /* FMOV half <-> 64-bit int */ + case 0xe7: // 0b11100111: + if (!dc_isar_feature(aa64_fp16, s)) { + goto do_unallocated; + } + /* fallthru */ + case 6: // 0b00000110: /* FMOV 32-bit */ + case 7: // 0b00000111: + case 0xa6: // 0b10100110: /* FMOV 64-bit */ + case 0xa7: // 0b10100111: + case 0xce: // 0b11001110: /* FMOV top half of 128-bit */ + case 0xcf: // 0b11001111: + if (!fp_access_check(s)) { + return; + } + itof = opcode & 1; + handle_fmov(s, rd, rn, type, itof); + break; + + case 0x3e: // 0b00111110: /* FJCVTZS */ + if (!dc_isar_feature(aa64_jscvt, s)) { + goto do_unallocated; + } else if (fp_access_check(s)) { + handle_fjcvtzs(s, rd, rn); + } + break; + + default: + do_unallocated: + unallocated_encoding(s); + return; + } + break; } } @@ -5149,7 +7060,7 @@ static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, tcg_temp_free_i64(tcg_ctx, tcg_tmp); } -/* C3.6.1 EXT +/* EXT * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0 * +---+---+-------------+-----+---+------+---+------+---+------+------+ * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd | @@ -5221,9 +7132,10 @@ static void disas_simd_ext(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resh); + clear_vec_high(s, true, rd); } -/* C3.6.2 TBL/TBX +/* TBL/TBX * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd | @@ -5290,9 +7202,10 @@ static void disas_simd_tb(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resh); + clear_vec_high(s, true, rd); } -/* C3.6.3 ZIP/UZP/TRN +/* ZIP/UZP/TRN * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 * +---+---+-------------+------+---+------+---+------------------+------+ * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd | @@ -5380,33 +7293,83 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_resh); + clear_vec_high(s, true, rd); } -static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2, - int opc, bool is_min, TCGv_ptr fpst) +/* + * do_reduction_op helper + * + * This mirrors the Reduce() pseudocode in the ARM ARM. It is + * important for correct NaN propagation that we do these + * operations in exactly the order specified by the pseudocode. + * + * This is a recursive function, TCG temps should be freed by the + * calling function once it is done with the values. + */ +static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, + int esize, int size, int vmap, TCGv_ptr fpst) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* Helper function for disas_simd_across_lanes: do a single precision - * min/max operation on the specified two inputs, - * and return the result in tcg_elt1. - */ - if (opc == 0xc) { - if (is_min) { - gen_helper_vfp_minnums(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); - } else { - gen_helper_vfp_maxnums(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); - } + if (esize == size) { + int element; + MemOp msize = esize == 16 ? MO_16 : MO_32; + TCGv_i32 tcg_elem; + + /* We should have one register left here */ + assert(ctpop8(vmap) == 1); + element = ctz32(vmap); + assert(element < 8); + + tcg_elem = tcg_temp_new_i32(tcg_ctx); + read_vec_element_i32(s, tcg_elem, rn, element, msize); + return tcg_elem; } else { - assert(opc == 0xf); - if (is_min) { - gen_helper_vfp_mins(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); - } else { - gen_helper_vfp_maxs(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); + int bits = size / 2; + int shift = ctpop8(vmap) / 2; + int vmap_lo = (vmap >> shift) & vmap; + int vmap_hi = (vmap & ~vmap_lo); + TCGv_i32 tcg_hi, tcg_lo, tcg_res; + + tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst); + tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst); + tcg_res = tcg_temp_new_i32(tcg_ctx); + + switch (fpopcode) { + case 0x0c: /* fmaxnmv half-precision */ + gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + case 0x0f: /* fmaxv half-precision */ + gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + case 0x1c: /* fminnmv half-precision */ + gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + case 0x1f: /* fminv half-precision */ + gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + case 0x2c: /* fmaxnmv */ + gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + case 0x2f: /* fmaxv */ + gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + case 0x3c: /* fminnmv */ + gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + case 0x3f: /* fminv */ + gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_lo, tcg_hi, fpst); + break; + default: + g_assert_not_reached(); } + + tcg_temp_free_i32(tcg_ctx, tcg_hi); + tcg_temp_free_i32(tcg_ctx, tcg_lo); + return tcg_res; } } -/* C3.6.4 AdvSIMD across lanes +/* AdvSIMD across lanes * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | @@ -5445,16 +7408,21 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) break; case 0xc: /* FMAXNMV, FMINNMV */ case 0xf: /* FMAXV, FMINV */ - if (!is_u || !is_q || extract32(size, 0, 1)) { - unallocated_encoding(s); - return; - } - /* Bit 1 of size field encodes min vs max, and actual size is always - * 32 bits: adjust the size variable so following code can rely on it + /* Bit 1 of size field encodes min vs max and the actual size + * depends on the encoding of the U bit. If not set (and FP16 + * enabled) then we do half-precision float instead of single + * precision. */ is_min = extract32(size, 1, 1); is_fp = true; - size = 2; + if (!is_u && dc_isar_feature(aa64_fp16, s)) { + size = 1; + } else if (!is_u || !is_q || extract32(size, 0, 1)) { + unallocated_encoding(s); + return; + } else { + size = 2; + } break; default: unallocated_encoding(s); @@ -5495,15 +7463,18 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) tcg_gen_add_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); break; case 0x0a: /* SMAXV / UMAXV */ - tcg_gen_movcond_i64(tcg_ctx, is_u ? TCG_COND_GEU : TCG_COND_GE, - tcg_res, - tcg_res, tcg_elt, tcg_res, tcg_elt); + if (is_u) { + tcg_gen_umax_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); + } else { + tcg_gen_smax_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); + } break; case 0x1a: /* SMINV / UMINV */ - tcg_gen_movcond_i64(tcg_ctx, is_u ? TCG_COND_LEU : TCG_COND_LE, - tcg_res, - tcg_res, tcg_elt, tcg_res, tcg_elt); - break; + if (is_u) { + tcg_gen_umin_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); + } else { + tcg_gen_smin_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); + } break; default: g_assert_not_reached(); @@ -5511,38 +7482,18 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) } } else { - /* Floating point ops which work on 32 bit (single) intermediates. + /* Floating point vector reduction ops which work across 32 + * bit (single) or 16 bit (half-precision) intermediates. * Note that correct NaN propagation requires that we do these * operations in exactly the order specified by the pseudocode. */ - TCGv_i32 tcg_elt1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 tcg_elt2 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 tcg_elt3 = tcg_temp_new_i32(tcg_ctx); - TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); - - assert(esize == 32); - assert(elements == 4); - - read_vec_element(s, tcg_elt, rn, 0, MO_32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt1, tcg_elt); - read_vec_element(s, tcg_elt, rn, 1, MO_32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt2, tcg_elt); - - do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst); - - read_vec_element(s, tcg_elt, rn, 2, MO_32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt2, tcg_elt); - read_vec_element(s, tcg_elt, rn, 3, MO_32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt3, tcg_elt); - - do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst); - - do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst); - - tcg_gen_extu_i32_i64(tcg_ctx, tcg_res, tcg_elt1); - tcg_temp_free_i32(tcg_ctx, tcg_elt1); - tcg_temp_free_i32(tcg_ctx, tcg_elt2); - tcg_temp_free_i32(tcg_ctx, tcg_elt3); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); + int fpopcode = opcode | is_min << 4 | is_u << 5; + int vmap = (1 << elements) - 1; + TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, + (is_q ? 128 : 64), vmap, fpst); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_res, tcg_res32); + tcg_temp_free_i32(tcg_ctx, tcg_res32); tcg_temp_free_ptr(tcg_ctx, fpst); } @@ -5574,7 +7525,7 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_res); } -/* C6.3.31 DUP (Element, Vector) +/* DUP (Element, Vector) * * 31 30 29 21 20 16 15 10 9 5 4 0 * +---+---+-------------------+--------+-------------+------+------+ @@ -5588,10 +7539,7 @@ static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn, { TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = ctz32(imm5); - int esize = 8 << (size & 0x1f); - int elements = (is_q ? 128 : 64) / esize; - int index, i; - TCGv_i64 tmp; + int index; if (size > 3 || (size == 3 && !is_q)) { unallocated_encoding(s); @@ -5603,22 +7551,12 @@ static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn, } index = imm5 >> (size + 1); - - tmp = tcg_temp_new_i64(tcg_ctx); - read_vec_element(s, tmp, rn, index, size); - - for (i = 0; i < elements; i++) { - write_vec_element(s, tmp, rd, i, size); - } - - if (!is_q) { - clear_vec_high(s, rd); - } - - tcg_temp_free_i64(tcg_ctx, tmp); + tcg_gen_gvec_dup_mem(tcg_ctx, size, vec_full_reg_offset(s, rd), + vec_reg_offset(s, rn, index, size), + is_q ? 16 : 8, vec_full_reg_size(s)); } -/* C6.3.31 DUP (element, scalar) +/* DUP (element, scalar) * 31 21 20 16 15 10 9 5 4 0 * +-----------------------+--------+-------------+------+------+ * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd | @@ -5652,7 +7590,7 @@ static void handle_simd_dupes(DisasContext *s, int rd, int rn, tcg_temp_free_i64(tcg_ctx, tmp); } -/* C6.3.32 DUP (General) +/* DUP (General) * * 31 30 29 21 20 16 15 10 9 5 4 0 * +---+---+-------------------+--------+-------------+------+------+ @@ -5664,10 +7602,9 @@ static void handle_simd_dupes(DisasContext *s, int rd, int rn, static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn, int imm5) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = ctz32(imm5); - int esize = 8 << (size & 0x1f); - int elements = (is_q ? 128 : 64)/esize; - int i = 0; + uint32_t dofs, oprsz, maxsz; if (size > 3 || ((size == 3) && !is_q)) { unallocated_encoding(s); @@ -5678,15 +7615,14 @@ static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn, return; } - for (i = 0; i < elements; i++) { - write_vec_element(s, cpu_reg(s, rn), rd, i, size); - } - if (!is_q) { - clear_vec_high(s, rd); - } + dofs = vec_full_reg_offset(s, rd); + oprsz = is_q ? 16 : 8; + maxsz = vec_full_reg_size(s); + + tcg_gen_gvec_dup_i64(tcg_ctx, size, dofs, oprsz, maxsz, cpu_reg(s, rn)); } -/* C6.3.150 INS (Element) +/* INS (Element) * * 31 21 20 16 15 14 11 10 9 5 4 0 * +-----------------------+--------+------------+---+------+------+ @@ -5722,10 +7658,13 @@ static void handle_simd_inse(DisasContext *s, int rd, int rn, write_vec_element(s, tmp, rd, dst_index, size); tcg_temp_free_i64(tcg_ctx, tmp); + + /* INS is considered a 128-bit write for SVE. */ + clear_vec_high(s, true, rd); } -/* C6.3.151 INS (General) +/* INS (General) * * 31 21 20 16 15 10 9 5 4 0 * +-----------------------+--------+-------------+------+------+ @@ -5751,11 +7690,14 @@ static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5) idx = extract32(imm5, 1 + size, 4 - size); write_vec_element(s, cpu_reg(s, rn), rd, idx, size); + + /* INS is considered a 128-bit write for SVE. */ + clear_vec_high(s, true, rd); } /* - * C6.3.321 UMOV (General) - * C6.3.237 SMOV (General) + * UMOV (General) + * SMOV (General) * * 31 30 29 21 20 16 15 12 10 9 5 4 0 * +---+---+-------------------+--------+-------------+------+------+ @@ -5801,7 +7743,7 @@ static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed, } } -/* C3.6.5 AdvSIMD copy +/* AdvSIMD copy * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 * +---+---+----+-----------------+------+---+------+---+------+------+ * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | @@ -5853,7 +7795,7 @@ static void disas_simd_copy(DisasContext *s, uint32_t insn) } } -/* C3.6.6 AdvSIMD modified immediate +/* AdvSIMD modified immediate * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0 * +---+---+----+---------------------+-----+-------+----+---+-------+------+ * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd | @@ -5864,6 +7806,8 @@ static void disas_simd_copy(DisasContext *s, uint32_t insn) * MVNI - move inverted (shifted) imm into register * ORR - bitwise OR of (shifted) imm with register * BIC - bitwise clear of (shifted) imm with register + * With ARMv8.2 we also have: + * FMOV half-precision */ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) { @@ -5877,12 +7821,13 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) bool is_neg = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); uint64_t imm = 0; - TCGv_i64 tcg_rd, tcg_imm; - int i; if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) { - unallocated_encoding(s); - return; + /* Check for FMOV (vector, immediate) - half-precision */ + if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) { + unallocated_encoding(s); + return; + } } if (!fp_access_check(s)) { @@ -5940,54 +7885,50 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) imm |= 0x4000000000000000ULL; } } else { - imm = (abcdefgh & 0x3f) << 19; - if (abcdefgh & 0x80) { - imm |= 0x80000000; - } - if (abcdefgh & 0x40) { - imm |= 0x3e000000; + if (o2) { + /* FMOV (vector, immediate) - half-precision */ + imm = vfp_expand_imm(MO_16, abcdefgh); + /* now duplicate across the lanes */ + imm = bitfield_replicate(imm, 16); } else { - imm |= 0x40000000; + imm = (abcdefgh & 0x3f) << 19; + if (abcdefgh & 0x80) { + imm |= 0x80000000; + } + if (abcdefgh & 0x40) { + imm |= 0x3e000000; + } else { + imm |= 0x40000000; + } + imm |= (imm << 32); } - imm |= (imm << 32); } } break; + default: + fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1); + g_assert_not_reached(); } if (cmode_3_1 != 7 && is_neg) { imm = ~imm; } - tcg_imm = tcg_const_i64(tcg_ctx, imm); - tcg_rd = new_tmp_a64(s); - - for (i = 0; i < 2; i++) { - int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64); - - if (i == 1 && !is_q) { - /* non-quad ops clear high half of vector */ - tcg_gen_movi_i64(tcg_ctx, tcg_rd, 0); - } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) { - tcg_gen_ld_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, foffs); - if (is_neg) { - /* AND (BIC) */ - tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_imm); - } else { - /* ORR */ - tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_imm); - } + if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) { + /* MOVI or MVNI, with MVNI negation handled above. */ + tcg_gen_gvec_dup64i(tcg_ctx, vec_full_reg_offset(s, rd), is_q ? 16 : 8, + vec_full_reg_size(s), imm); + } else { + /* ORR or BIC, with BIC negation to AND handled above. */ + if (is_neg) { + gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64); } else { - /* MOVI */ - tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_imm); + gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64); } - tcg_gen_st_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, foffs); } - - tcg_temp_free_i64(tcg_ctx, tcg_imm); } -/* C3.6.7 AdvSIMD scalar copy +/* AdvSIMD scalar copy * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 * +-----+----+-----------------+------+---+------+---+------+------+ * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | @@ -6010,7 +7951,7 @@ static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn) handle_simd_dupes(s, rd, rn, imm5); } -/* C3.6.8 AdvSIMD scalar pairwise +/* AdvSIMD scalar pairwise * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----+---+-----------+------+-----------+--------+-----+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | @@ -6042,31 +7983,37 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) return; } - TCGV_UNUSED_PTR(fpst); + fpst = NULL; break; case 0xc: /* FMAXNMP */ case 0xd: /* FADDP */ case 0xf: /* FMAXP */ case 0x2c: /* FMINNMP */ case 0x2f: /* FMINP */ - /* FP op, size[0] is 32 or 64 bit */ + /* FP op, size[0] is 32 or 64 bit*/ if (!u) { - unallocated_encoding(s); - return; + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } else { + size = MO_16; + } + } else { + size = extract32(size, 0, 1) ? MO_64 : MO_32; } + if (!fp_access_check(s)) { return; } - size = extract32(size, 0, 1) ? 3 : 2; - fpst = get_fpstatus_ptr(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); break; default: unallocated_encoding(s); return; } - if (size == 3) { + if (size == MO_64) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); @@ -6107,27 +8054,49 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); - read_vec_element_i32(s, tcg_op1, rn, 0, MO_32); - read_vec_element_i32(s, tcg_op2, rn, 1, MO_32); + read_vec_element_i32(s, tcg_op1, rn, 0, size); + read_vec_element_i32(s, tcg_op2, rn, 1, size); - switch (opcode) { - case 0xc: /* FMAXNMP */ - gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); - break; - case 0xd: /* FADDP */ - gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); - break; - case 0xf: /* FMAXP */ - gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); - break; - case 0x2c: /* FMINNMP */ - gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); - break; - case 0x2f: /* FMINP */ - gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); - break; - default: - g_assert_not_reached(); + if (size == MO_16) { + switch (opcode) { + case 0xc: /* FMAXNMP */ + gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xd: /* FADDP */ + gen_helper_advsimd_addh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xf: /* FMAXP */ + gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2c: /* FMINNMP */ + gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2f: /* FMINP */ + gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + } else { + switch (opcode) { + case 0xc: /* FMAXNMP */ + gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xd: /* FADDP */ + gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xf: /* FMAXP */ + gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2c: /* FMINNMP */ + gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2f: /* FMINP */ + gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } } write_fp_sreg(s, rd, tcg_res); @@ -6137,7 +8106,7 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tcg_ctx, tcg_res); } - if (!TCGV_IS_UNUSED_PTR(fpst)) { + if (fpst) { tcg_temp_free_ptr(tcg_ctx, fpst); } } @@ -6148,13 +8117,12 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) * This code is handles the common shifting code and is used by both * the vector and scalar code. */ -static void handle_shri_with_rndacc(DisasContext *s, TCGv_i64 tcg_res, TCGv_i64 tcg_src, +static void handle_shri_with_rndacc(TCGContext *tcg_ctx, TCGv_i64 tcg_res, TCGv_i64 tcg_src, TCGv_i64 tcg_rnd, bool accumulate, bool is_u, int size, int shift) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; bool extended_result = false; - bool round = !TCGV_IS_UNUSED_I64(tcg_rnd); + bool round = tcg_rnd != NULL; int ext_lshift = 0; TCGv_i64 tcg_src_hi; @@ -6231,32 +8199,6 @@ static void handle_shri_with_rndacc(DisasContext *s, TCGv_i64 tcg_res, TCGv_i64 } } -/* Common SHL/SLI - Shift left with an optional insert */ -static void handle_shli_with_ins(TCGContext *tcg_ctx, TCGv_i64 tcg_res, TCGv_i64 tcg_src, - bool insert, int shift) -{ - if (insert) { /* SLI */ - tcg_gen_deposit_i64(tcg_ctx, tcg_res, tcg_res, tcg_src, shift, 64 - shift); - } else { /* SHL */ - tcg_gen_shli_i64(tcg_ctx, tcg_res, tcg_src, shift); - } -} - -/* SRI: shift right with insert */ -static void handle_shri_with_ins(TCGContext *tcg_ctx, TCGv_i64 tcg_res, TCGv_i64 tcg_src, - int size, int shift) -{ - int esize = 8 << size; - - /* shift count same as element size is valid but does nothing; - * special case to avoid potential shift by 64. - */ - if (shift != esize) { - tcg_gen_shri_i64(tcg_ctx, tcg_src, tcg_src, shift); - tcg_gen_deposit_i64(tcg_ctx, tcg_res, tcg_res, tcg_src, 0, esize - shift); - } -} - /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ static void handle_scalar_simd_shri(DisasContext *s, bool is_u, int immh, int immb, @@ -6301,16 +8243,23 @@ static void handle_scalar_simd_shri(DisasContext *s, uint64_t round_const = 1ULL << (shift - 1); tcg_round = tcg_const_i64(tcg_ctx, round_const); } else { - TCGV_UNUSED_I64(tcg_round); + tcg_round = NULL; } tcg_rn = read_fp_dreg(s, rn); tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64(tcg_ctx); if (insert) { - handle_shri_with_ins(tcg_ctx, tcg_rd, tcg_rn, size, shift); + /* shift count same as element size is valid but does nothing; + * special case to avoid potential shift by 64. + */ + int esize = 8 << size; + if (shift != esize) { + tcg_gen_shri_i64(tcg_ctx, tcg_rn, tcg_rn, shift); + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_rn, 0, esize - shift); + } } else { - handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, + handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, accumulate, is_u, size, shift); } @@ -6347,7 +8296,11 @@ static void handle_scalar_simd_shli(DisasContext *s, bool insert, tcg_rn = read_fp_dreg(s, rn); tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64(tcg_ctx); - handle_shli_with_ins(tcg_ctx, tcg_rd, tcg_rn, insert, shift); + if (insert) { + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift); + } else { + tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rn, shift); + } write_fp_dreg(s, rd, tcg_rd); @@ -6369,7 +8322,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, int shift = (2 * esize) - immhb; int elements = is_scalar ? 1 : (64 / esize); bool round = extract32(opcode, 0, 1); - TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); + MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); TCGv_i64 tcg_rn, tcg_rd, tcg_round; TCGv_i32 tcg_rd_narrowed; TCGv_i64 tcg_final; @@ -6419,12 +8372,12 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, uint64_t round_const = 1ULL << (shift - 1); tcg_round = tcg_const_i64(tcg_ctx, round_const); } else { - TCGV_UNUSED_I64(tcg_round); + tcg_round = NULL; } for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, ldop); - handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, + handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, false, is_u_shift, size+1, shift); narrowfn(tcg_ctx, tcg_rd_narrowed, tcg_ctx->cpu_env, tcg_rd); tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_rd_narrowed); @@ -6432,7 +8385,6 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, } if (!is_q) { - clear_vec_high(s, rd); write_vec_element(s, tcg_final, rd, 0, MO_64); } else { write_vec_element(s, tcg_final, rd, 1, MO_64); @@ -6445,7 +8397,8 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rd_narrowed); tcg_temp_free_i64(tcg_ctx, tcg_final); - return; + + clear_vec_high(s, is_q, rd); } /* SQSHLU, UQSHL, SQSHL: saturating left shifts */ @@ -6510,10 +8463,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, tcg_temp_free_i64(tcg_ctx, tcg_op); } tcg_temp_free_i64(tcg_ctx, tcg_shift); - - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } else { TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, shift); static NeonGenTwoOpEnvFn * const fns[2][2][3] = { @@ -6532,7 +8482,7 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } }; NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; - TCGMemOp memop = scalar ? size : MO_32; + MemOp memop = scalar ? size : MO_32; int maxpass = scalar ? 1 : is_q ? 4 : 2; for (pass = 0; pass < maxpass; pass++) { @@ -6562,8 +8512,8 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } tcg_temp_free_i32(tcg_ctx, tcg_shift); - if (!is_q && !scalar) { - clear_vec_high(s, rd); + if (!scalar) { + clear_vec_high(s, is_q, rd); } } } @@ -6574,23 +8524,28 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, int fracbits, int size) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - bool is_double = size == 3 ? true : false; - TCGv_ptr tcg_fpst = get_fpstatus_ptr(tcg_ctx); - TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, fracbits); - TCGv_i64 tcg_int = tcg_temp_new_i64(tcg_ctx); - TCGMemOp mop = size | (is_signed ? MO_SIGN : 0); + TCGv_ptr tcg_fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); + TCGv_i32 tcg_shift = NULL; + + MemOp mop = size | (is_signed ? MO_SIGN : 0); int pass; - for (pass = 0; pass < elements; pass++) { - read_vec_element(s, tcg_int, rn, pass, mop); + if (fracbits || size == MO_64) { + tcg_shift = tcg_const_i32(tcg_ctx, fracbits); + } + + if (size == MO_64) { + TCGv_i64 tcg_int64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_double = tcg_temp_new_i64(tcg_ctx); + + for (pass = 0; pass < elements; pass++) { + read_vec_element(s, tcg_int64, rn, pass, mop); - if (is_double) { - TCGv_i64 tcg_double = tcg_temp_new_i64(tcg_ctx); if (is_signed) { - gen_helper_vfp_sqtod(tcg_ctx, tcg_double, tcg_int, + gen_helper_vfp_sqtod(tcg_ctx, tcg_double, tcg_int64, tcg_shift, tcg_fpst); } else { - gen_helper_vfp_uqtod(tcg_ctx, tcg_double, tcg_int, + gen_helper_vfp_uqtod(tcg_ctx, tcg_double, tcg_int64, tcg_shift, tcg_fpst); } if (elements == 1) { @@ -6598,32 +8553,74 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, } else { write_vec_element(s, tcg_double, rd, pass, MO_64); } - tcg_temp_free_i64(tcg_ctx, tcg_double); - } else { - TCGv_i32 tcg_single = tcg_temp_new_i32(tcg_ctx); - if (is_signed) { - gen_helper_vfp_sqtos(tcg_ctx, tcg_single, tcg_int, - tcg_shift, tcg_fpst); - } else { - gen_helper_vfp_uqtos(tcg_ctx, tcg_single, tcg_int, - tcg_shift, tcg_fpst); - } - if (elements == 1) { - write_fp_sreg(s, rd, tcg_single); - } else { - write_vec_element_i32(s, tcg_single, rd, pass, MO_32); - } - tcg_temp_free_i32(tcg_ctx, tcg_single); } + + tcg_temp_free_i64(tcg_ctx, tcg_int64); + tcg_temp_free_i64(tcg_ctx, tcg_double); + + } else { + TCGv_i32 tcg_int32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_float = tcg_temp_new_i32(tcg_ctx); + + for (pass = 0; pass < elements; pass++) { + read_vec_element_i32(s, tcg_int32, rn, pass, mop); + + switch (size) { + case MO_32: + if (fracbits) { + if (is_signed) { + gen_helper_vfp_sltos(tcg_ctx, tcg_float, tcg_int32, + tcg_shift, tcg_fpst); + } else { + gen_helper_vfp_ultos(tcg_ctx, tcg_float, tcg_int32, + tcg_shift, tcg_fpst); + } + } else { + if (is_signed) { + gen_helper_vfp_sitos(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); + } else { + gen_helper_vfp_uitos(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); + } + } + break; + case MO_16: + if (fracbits) { + if (is_signed) { + gen_helper_vfp_sltoh(tcg_ctx, tcg_float, tcg_int32, + tcg_shift, tcg_fpst); + } else { + gen_helper_vfp_ultoh(tcg_ctx, tcg_float, tcg_int32, + tcg_shift, tcg_fpst); + } + } else { + if (is_signed) { + gen_helper_vfp_sitoh(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); + } else { + gen_helper_vfp_uitoh(tcg_ctx, tcg_float, tcg_int32, tcg_fpst); + } + } + break; + default: + g_assert_not_reached(); + } + + if (elements == 1) { + write_fp_sreg(s, rd, tcg_float); + } else { + write_vec_element_i32(s, tcg_float, rd, pass, size); + } + } + + tcg_temp_free_i32(tcg_ctx, tcg_int32); + tcg_temp_free_i32(tcg_ctx, tcg_float); } - if (!is_double && elements == 2) { - clear_vec_high(s, rd); - } - - tcg_temp_free_i64(tcg_ctx, tcg_int); tcg_temp_free_ptr(tcg_ctx, tcg_fpst); - tcg_temp_free_i32(tcg_ctx, tcg_shift); + if (tcg_shift) { + tcg_temp_free_i32(tcg_ctx, tcg_shift); + } + + clear_vec_high(s, elements << size == 16, rd); } /* UCVTF/SCVTF - Integer to FP conversion */ @@ -6632,13 +8629,26 @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, int immh, int immb, int opcode, int rn, int rd) { - bool is_double = extract32(immh, 3, 1); - int size = is_double ? MO_64 : MO_32; - int elements; + int size, elements, fracbits; int immhb = immh << 3 | immb; - int fracbits = (is_double ? 128 : 64) - immhb; - if (!extract32(immh, 2, 2)) { + if (immh & 8) { + size = MO_64; + if (!is_scalar && !is_q) { + unallocated_encoding(s); + return; + } + } else if (immh & 4) { + size = MO_32; + } else if (immh & 2) { + size = MO_16; + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + } else { + /* immh == 0 would be a failure of the decode logic */ + g_assert(immh == 1); unallocated_encoding(s); return; } @@ -6646,20 +8656,14 @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, if (is_scalar) { elements = 1; } else { - elements = is_double ? 2 : is_q ? 4 : 2; - if (is_double && !is_q) { - unallocated_encoding(s); - return; - } + elements = (8 << is_q) >> size; } + fracbits = (16 << size) - immhb; if (!fp_access_check(s)) { return; } - /* immh == 0 would be a failure of the decode logic */ - g_assert(immh); - handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size); } @@ -6669,19 +8673,28 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, int immh, int immb, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - bool is_double = extract32(immh, 3, 1); int immhb = immh << 3 | immb; - int fracbits = (is_double ? 128 : 64) - immhb; - int pass; + int pass, size, fracbits; TCGv_ptr tcg_fpstatus; TCGv_i32 tcg_rmode, tcg_shift; - if (!extract32(immh, 2, 2)) { - unallocated_encoding(s); - return; - } - - if (!is_scalar && !is_q && is_double) { + if (immh & 0x8) { + size = MO_64; + if (!is_scalar && !is_q) { + unallocated_encoding(s); + return; + } + } else if (immh & 0x4) { + size = MO_32; + } else if (immh & 0x2) { + size = MO_16; + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + } else { + /* Should have split out AdvSIMD modified immediate earlier. */ + assert(immh == 1); unallocated_encoding(s); return; } @@ -6693,11 +8706,12 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, assert(!(is_scalar && is_q)); tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(FPROUNDING_ZERO)); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, size == MO_16); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); + fracbits = (16 << size) - immhb; tcg_shift = tcg_const_i32(tcg_ctx, fracbits); - if (is_double) { + if (size == MO_64) { int maxpass = is_scalar ? 1 : 2; for (pass = 0; pass < maxpass; pass++) { @@ -6712,39 +8726,54 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, write_vec_element(s, tcg_op, rd, pass, MO_64); tcg_temp_free_i64(tcg_ctx, tcg_op); } - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } else { - int maxpass = is_scalar ? 1 : is_q ? 4 : 2; + void (*fn)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); + int maxpass = is_scalar ? 1 : ((8 << is_q) >> size); + + switch (size) { + case MO_16: + if (is_u) { + fn = gen_helper_vfp_touhh; + } else { + fn = gen_helper_vfp_toshh; + } + break; + case MO_32: + if (is_u) { + fn = gen_helper_vfp_touls; + } else { + fn = gen_helper_vfp_tosls; + } + break; + default: + g_assert_not_reached(); + } + for (pass = 0; pass < maxpass; pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); - read_vec_element_i32(s, tcg_op, rn, pass, MO_32); - if (is_u) { - gen_helper_vfp_touls(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_tosls(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); - } + read_vec_element_i32(s, tcg_op, rn, pass, size); + fn(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); if (is_scalar) { write_fp_sreg(s, rd, tcg_op); } else { - write_vec_element_i32(s, tcg_op, rd, pass, MO_32); + write_vec_element_i32(s, tcg_op, rd, pass, size); } tcg_temp_free_i32(tcg_ctx, tcg_op); } - if (!is_q && !is_scalar) { - clear_vec_high(s, rd); + if (!is_scalar) { + clear_vec_high(s, is_q, rd); } } tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_shift); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } -/* C3.6.9 AdvSIMD scalar shift by immediate +/* AdvSIMD scalar shift by immediate * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 * +-----+---+-------------+------+------+--------+---+------+------+ * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | @@ -6819,7 +8848,7 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn) } } -/* C3.6.10 AdvSIMD scalar three different +/* AdvSIMD scalar three different * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +-----+---+-----------+------+---+------+--------+-----+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | @@ -6890,13 +8919,10 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_op2); tcg_temp_free_i64(tcg_ctx, tcg_res); } else { - TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); - TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op1 = read_fp_hreg(s, rn); + TCGv_i32 tcg_op2 = read_fp_hreg(s, rm); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); - read_vec_element_i32(s, tcg_op1, rn, 0, MO_16); - read_vec_element_i32(s, tcg_op2, rm, 0, MO_16); - gen_helper_neon_mull_s16(tcg_ctx, tcg_res, tcg_op1, tcg_op2); gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_res, tcg_res); @@ -6935,8 +8961,7 @@ static void handle_3same_64(DisasContext *s, int opcode, bool u, /* Handle 64x64->64 opcodes which are shared between the scalar * and vector 3-same groups. We cover every opcode where size == 3 * is valid in either the three-reg-same (integer, not pairwise) - * or scalar-three-reg-same groups. (Some opcodes are not yet - * implemented.) + * or scalar-three-reg-same groups. */ TCGCond cond; @@ -6972,16 +8997,13 @@ static void handle_3same_64(DisasContext *s, int opcode, bool u, cond = TCG_COND_EQ; goto do_cmop; } - /* CMTST : test is "if (X & Y != 0)". */ - tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); - tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, tcg_rd, tcg_rd, 0); - tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); + gen_cmtst_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); break; case 0x8: /* SSHL, USHL */ if (u) { - gen_helper_neon_shl_u64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + gen_ushl_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } else { - gen_helper_neon_shl_s64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + gen_sshl_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); } break; case 0x9: /* SQSHL, UQSHL */ @@ -7026,7 +9048,7 @@ static void handle_3same_float(DisasContext *s, int size, int elements, { TCGContext *tcg_ctx = s->uc->tcg_ctx; int pass; - TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); for (pass = 0; pass < elements; pass++) { if (size) { @@ -7203,13 +9225,10 @@ static void handle_3same_float(DisasContext *s, int size, int elements, tcg_temp_free_ptr(tcg_ctx, fpst); - if ((elements << size) < 4) { - /* scalar, or non-quad vector op */ - clear_vec_high(s, rd); - } + clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd); } -/* C3.6.11 AdvSIMD scalar three same +/* AdvSIMD scalar three same * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 * +-----+---+-----------+------+---+------+--------+---+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | @@ -7301,7 +9320,7 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) * OPTME: special-purpose helpers would avoid doing some * unnecessary work in the helper for the 8 and 16 bit cases. */ - NeonGenTwoOpEnvFn *genenvfn; + NeonGenTwoOpEnvFn *genenvfn = NULL; TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_rm = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_rd32 = tcg_temp_new_i32(tcg_ctx); @@ -7376,6 +9395,186 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_rd); } +/* AdvSIMD scalar three same FP16 + * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0 + * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+ + * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd | + * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+ + * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400 + * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400 + */ +static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s, + uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 11, 3); + int rm = extract32(insn, 16, 5); + bool u = extract32(insn, 29, 1); + bool a = extract32(insn, 23, 1); + int fpopcode = opcode | (a << 3) | (u << 4); + TCGv_ptr fpst; + TCGv_i32 tcg_op1; + TCGv_i32 tcg_op2; + TCGv_i32 tcg_res; + + switch (fpopcode) { + case 0x03: /* FMULX */ + case 0x04: /* FCMEQ (reg) */ + case 0x07: /* FRECPS */ + case 0x0f: /* FRSQRTS */ + case 0x14: /* FCMGE (reg) */ + case 0x15: /* FACGE */ + case 0x1a: /* FABD */ + case 0x1c: /* FCMGT (reg) */ + case 0x1d: /* FACGT */ + break; + default: + unallocated_encoding(s); + return; + } + + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + } + + if (!fp_access_check(s)) { + return; + } + + fpst = get_fpstatus_ptr(tcg_ctx, true); + + tcg_op1 = read_fp_hreg(s, rn); + tcg_op2 = read_fp_hreg(s, rm); + tcg_res = tcg_temp_new_i32(tcg_ctx); + + switch (fpopcode) { + case 0x03: /* FMULX */ + gen_helper_advsimd_mulxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x04: /* FCMEQ (reg) */ + gen_helper_advsimd_ceq_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x07: /* FRECPS */ + gen_helper_recpsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x0f: /* FRSQRTS */ + gen_helper_rsqrtsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x14: /* FCMGE (reg) */ + gen_helper_advsimd_cge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x15: /* FACGE */ + gen_helper_advsimd_acge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1a: /* FABD */ + gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_res, 0x7fff); + break; + case 0x1c: /* FCMGT (reg) */ + gen_helper_advsimd_cgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1d: /* FACGT */ + gen_helper_advsimd_acgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + + write_fp_sreg(s, rd, tcg_res); + + + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_ptr(tcg_ctx, fpst); +} + +/* AdvSIMD scalar three same extra + * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0 + * +-----+---+-----------+------+---+------+---+--------+---+----+----+ + * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd | + * +-----+---+-----------+------+---+------+---+--------+---+----+----+ + */ +static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, + uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 11, 4); + int rm = extract32(insn, 16, 5); + int size = extract32(insn, 22, 2); + bool u = extract32(insn, 29, 1); + TCGv_i32 ele1, ele2, ele3; + TCGv_i64 res; + bool feature; + + switch (u * 16 + opcode) { + case 0x10: /* SQRDMLAH (vector) */ + case 0x11: /* SQRDMLSH (vector) */ + if (size != 1 && size != 2) { + unallocated_encoding(s); + return; + } + feature = dc_isar_feature(aa64_rdm, s); + break; + default: + unallocated_encoding(s); + return; + } + if (!feature) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + + /* Do a single operation on the lowest element in the vector. + * We use the standard Neon helpers and rely on 0 OP 0 == 0 + * with no side effects for all these operations. + * OPTME: special-purpose helpers would avoid doing some + * unnecessary work in the helper for the 16 bit cases. + */ + ele1 = tcg_temp_new_i32(tcg_ctx); + ele2 = tcg_temp_new_i32(tcg_ctx); + ele3 = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, ele1, rn, 0, size); + read_vec_element_i32(s, ele2, rm, 0, size); + read_vec_element_i32(s, ele3, rd, 0, size); + + switch (opcode) { + case 0x0: /* SQRDMLAH */ + if (size == 1) { + gen_helper_neon_qrdmlah_s16(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); + } else { + gen_helper_neon_qrdmlah_s32(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); + } + break; + case 0x1: /* SQRDMLSH */ + if (size == 1) { + gen_helper_neon_qrdmlsh_s16(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); + } else { + gen_helper_neon_qrdmlsh_s32(tcg_ctx, ele3, tcg_ctx->cpu_env, ele1, ele2, ele3); + } + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_i32(tcg_ctx, ele1); + tcg_temp_free_i32(tcg_ctx, ele2); + + res = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, res, ele3); + tcg_temp_free_i32(tcg_ctx, ele3); + + write_fp_dreg(s, rd, res); + tcg_temp_free_i64(tcg_ctx, res); +} + static void handle_2misc_64(DisasContext *s, int opcode, bool u, TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus) @@ -7392,9 +9591,9 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u, switch (opcode) { case 0x4: /* CLS, CLZ */ if (u) { - gen_helper_clz64(tcg_ctx, tcg_rd, tcg_rn); + tcg_gen_clzi_i64(tcg_ctx, tcg_rd, tcg_rn, 64); } else { - gen_helper_cls64(tcg_ctx, tcg_rd, tcg_rn); + tcg_gen_clrsb_i64(tcg_ctx, tcg_rd, tcg_rn); } break; case 0x5: /* NOT */ @@ -7430,11 +9629,7 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u, if (u) { tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rn); } else { - TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); - tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rn); - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero, - tcg_rn, tcg_rd); - tcg_temp_free_i64(tcg_ctx, tcg_zero); + tcg_gen_abs_i64(tcg_ctx, tcg_rd, tcg_rn); } break; case 0x2f: /* FABS */ @@ -7479,6 +9674,14 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u, case 0x59: /* FRINTX */ gen_helper_rintd_exact(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); break; + case 0x1e: /* FRINT32Z */ + case 0x5e: /* FRINT32X */ + gen_helper_frint32_d(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); + break; + case 0x1f: /* FRINT64Z */ + case 0x5f: /* FRINT64X */ + gen_helper_frint64_d(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); + break; default: g_assert_not_reached(); } @@ -7489,20 +9692,20 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, int size, int rn, int rd) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - bool is_double = (size == 3); + bool is_double = (size == MO_64); TCGv_ptr fpst; if (!fp_access_check(s)) { return; } - fpst = get_fpstatus_ptr(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, size == MO_16); if (is_double) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); - NeonGenTwoDoubleOPFn *genfn; + NeonGenTwoDoubleOPFn *genfn = NULL; bool swap = false; int pass; @@ -7535,49 +9738,70 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, } write_vec_element(s, tcg_res, rd, pass, MO_64); } - if (is_scalar) { - clear_vec_high(s, rd); - } - tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_zero); tcg_temp_free_i64(tcg_ctx, tcg_op); + + clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); - NeonGenTwoSingleOPFn *genfn; + NeonGenTwoSingleOPFn *genfn = NULL; bool swap = false; int pass, maxpasses; - switch (opcode) { - case 0x2e: /* FCMLT (zero) */ - swap = true; - /* fall through */ - case 0x2c: /* FCMGT (zero) */ - genfn = gen_helper_neon_cgt_f32; - break; - case 0x2d: /* FCMEQ (zero) */ - genfn = gen_helper_neon_ceq_f32; - break; - case 0x6d: /* FCMLE (zero) */ - swap = true; - /* fall through */ - case 0x6c: /* FCMGE (zero) */ - genfn = gen_helper_neon_cge_f32; - break; - default: - g_assert_not_reached(); + if (size == MO_16) { + switch (opcode) { + case 0x2e: /* FCMLT (zero) */ + swap = true; + /* fall through */ + case 0x2c: /* FCMGT (zero) */ + genfn = gen_helper_advsimd_cgt_f16; + break; + case 0x2d: /* FCMEQ (zero) */ + genfn = gen_helper_advsimd_ceq_f16; + break; + case 0x6d: /* FCMLE (zero) */ + swap = true; + /* fall through */ + case 0x6c: /* FCMGE (zero) */ + genfn = gen_helper_advsimd_cge_f16; + break; + default: + g_assert_not_reached(); + } + } else { + switch (opcode) { + case 0x2e: /* FCMLT (zero) */ + swap = true; + /* fall through */ + case 0x2c: /* FCMGT (zero) */ + genfn = gen_helper_neon_cgt_f32; + break; + case 0x2d: /* FCMEQ (zero) */ + genfn = gen_helper_neon_ceq_f32; + break; + case 0x6d: /* FCMLE (zero) */ + swap = true; + /* fall through */ + case 0x6c: /* FCMGE (zero) */ + genfn = gen_helper_neon_cge_f32; + break; + default: + g_assert_not_reached(); + } } if (is_scalar) { maxpasses = 1; } else { - maxpasses = is_q ? 4 : 2; + int vector_size = 8 << is_q; + maxpasses = vector_size >> size; } for (pass = 0; pass < maxpasses; pass++) { - read_vec_element_i32(s, tcg_op, rn, pass, MO_32); + read_vec_element_i32(s, tcg_op, rn, pass, size); if (swap) { genfn(tcg_ctx, tcg_res, tcg_zero, tcg_op, fpst); } else { @@ -7586,14 +9810,14 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, if (is_scalar) { write_fp_sreg(s, rd, tcg_res); } else { - write_vec_element_i32(s, tcg_res, rd, pass, MO_32); + write_vec_element_i32(s, tcg_res, rd, pass, size); } } tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_zero); tcg_temp_free_i32(tcg_ctx, tcg_op); - if (!is_q && !is_scalar) { - clear_vec_high(s, rd); + if (!is_scalar) { + clear_vec_high(s, is_q, rd); } } @@ -7606,7 +9830,7 @@ static void handle_2misc_reciprocal(DisasContext *s, int opcode, { TCGContext *tcg_ctx = s->uc->tcg_ctx; bool is_double = (size == 3); - TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); if (is_double) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); @@ -7630,12 +9854,9 @@ static void handle_2misc_reciprocal(DisasContext *s, int opcode, } write_vec_element(s, tcg_res, rd, pass, MO_64); } - if (is_scalar) { - clear_vec_high(s, rd); - } - tcg_temp_free_i64(tcg_ctx, tcg_res); tcg_temp_free_i64(tcg_ctx, tcg_op); + clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); @@ -7675,8 +9896,8 @@ static void handle_2misc_reciprocal(DisasContext *s, int opcode, } tcg_temp_free_i32(tcg_ctx, tcg_res); tcg_temp_free_i32(tcg_ctx, tcg_op); - if (!is_q && !is_scalar) { - clear_vec_high(s, rd); + if (!is_scalar) { + clear_vec_high(s, is_q, rd); } } tcg_temp_free_ptr(tcg_ctx, fpst); @@ -7717,7 +9938,7 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, static NeonGenNarrowFn * const xtnfns[3] = { gen_helper_neon_narrow_u8, gen_helper_neon_narrow_u16, - tcg_gen_trunc_i64_i32, + tcg_gen_extrl_i64_i32, }; static NeonGenNarrowEnvFn * const sqxtunfns[3] = { gen_helper_neon_unarrow_sat8, @@ -7751,14 +9972,17 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, } else { TCGv_i32 tcg_lo = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_hi = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_lo, tcg_op); - gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_lo, tcg_lo, tcg_ctx->cpu_env); - tcg_gen_shri_i64(tcg_ctx, tcg_op, tcg_op, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, tcg_hi, tcg_op); - gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_hi, tcg_hi, tcg_ctx->cpu_env); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); + TCGv_i32 ahp = get_ahp_flag(tcg_ctx); + + tcg_gen_extr_i64_i32(tcg_ctx, tcg_lo, tcg_hi, tcg_op); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_lo, tcg_lo, fpst, ahp); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_hi, tcg_hi, fpst, ahp); tcg_gen_deposit_i32(tcg_ctx, tcg_res[pass], tcg_lo, tcg_hi, 16, 16); tcg_temp_free_i32(tcg_ctx, tcg_lo); tcg_temp_free_i32(tcg_ctx, tcg_hi); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, ahp); } break; case 0x56: /* FCVTXN, FCVTXN2 */ @@ -7785,9 +10009,7 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } /* Remaining saturating accumulating ops */ @@ -7813,12 +10035,9 @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, } write_vec_element(s, tcg_rd, rd, pass, MO_64); } - if (is_scalar) { - clear_vec_high(s, rd); - } - tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_rn); + clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); @@ -7876,17 +10095,13 @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, } write_vec_element_i32(s, tcg_rd, rd, pass, MO_32); } - - if (!is_q) { - clear_vec_high(s, rd); - } - tcg_temp_free_i32(tcg_ctx, tcg_rd); tcg_temp_free_i32(tcg_ctx, tcg_rn); + clear_vec_high(s, is_q, rd); } } -/* C3.6.12 AdvSIMD scalar two reg misc +/* AdvSIMD scalar two reg misc * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----+---+-----------+------+-----------+--------+-----+------+------+ * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | @@ -7944,8 +10159,18 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) } handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd); return; - case 0x0c: case 0x0d: case 0x0e: case 0x0f: - case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: + case 0xc: + case 0xd: + case 0xe: + case 0xf: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: case 0x1f: /* Floating point: U, size[1] and opcode indicate operation; * size[0] indicates single or double precision. @@ -8021,11 +10246,11 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) if (is_fcvt) { tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); - tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, false); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); } else { - TCGV_UNUSED_I32(tcg_rmode); - TCGV_UNUSED_PTR(tcg_fpstatus); + tcg_rmode = NULL; + tcg_fpstatus = NULL; } if (size == 3) { @@ -8087,7 +10312,7 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) } if (is_fcvt) { - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); } @@ -8102,26 +10327,21 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, int immhb = immh << 3 | immb; int shift = 2 * (8 << size) - immhb; bool accumulate = false; - bool round = false; - bool insert = false; int dsize = is_q ? 128 : 64; int esize = 8 << size; int elements = dsize/esize; - TCGMemOp memop = size | (is_u ? 0 : MO_SIGN); + MemOp memop = size | (is_u ? 0 : MO_SIGN); TCGv_i64 tcg_rn = new_tmp_a64(s); TCGv_i64 tcg_rd = new_tmp_a64(s); TCGv_i64 tcg_round; + uint64_t round_const; int i; if (extract32(immh, 3, 1) && !is_q) { unallocated_encoding(s); return; } - - if (size > 3 && !is_q) { - unallocated_encoding(s); - return; - } + tcg_debug_assert(size <= 3); if (!fp_access_check(s)) { return; @@ -8129,93 +10349,99 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, switch (opcode) { case 0x02: /* SSRA / USRA (accumulate) */ - accumulate = true; - break; + if (is_u) { + /* Shift count same as element size produces zero to add. */ + if (shift == 8 << size) { + goto done; + } + gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]); + } else { + /* Shift count same as element size produces all sign to add. */ + if (shift == 8 << size) { + shift -= 1; + } + gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]); + } + return; + case 0x08: /* SRI */ + /* Shift count same as element size is valid but does nothing. */ + if (shift == 8 << size) { + goto done; + } + gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]); + return; + + case 0x00: /* SSHR / USHR */ + if (is_u) { + if (shift == 8 << size) { + /* Shift count the same size as element size produces zero. */ + tcg_gen_gvec_dup8i(tcg_ctx, vec_full_reg_offset(s, rd), + is_q ? 16 : 8, vec_full_reg_size(s), 0); + } else { + gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size); + } + } else { + /* Shift count the same size as element size produces all sign. */ + if (shift == 8 << size) { + shift -= 1; + } + gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size); + } + return; + case 0x04: /* SRSHR / URSHR (rounding) */ - round = true; break; case 0x06: /* SRSRA / URSRA (accum + rounding) */ - accumulate = round = true; - break; - case 0x08: /* SRI */ - insert = true; + accumulate = true; break; + default: + g_assert_not_reached(); } - if (round) { - uint64_t round_const = 1ULL << (shift - 1); - tcg_round = tcg_const_i64(tcg_ctx, round_const); - } else { - TCGV_UNUSED_I64(tcg_round); - } + round_const = 1ULL << (shift - 1); + tcg_round = tcg_const_i64(tcg_ctx, round_const); for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, memop); - if (accumulate || insert) { + if (accumulate) { read_vec_element(s, tcg_rd, rd, i, memop); } - if (insert) { - handle_shri_with_ins(tcg_ctx, tcg_rd, tcg_rn, size, shift); - } else { - handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, - accumulate, is_u, size, shift); - } + handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, + accumulate, is_u, size, shift); write_vec_element(s, tcg_rd, rd, i, size); } + tcg_temp_free_i64(tcg_ctx, tcg_round); - if (!is_q) { - clear_vec_high(s, rd); - } - - if (round) { - tcg_temp_free_i64(tcg_ctx, tcg_round); - } + done: + clear_vec_high(s, is_q, rd); } /* SHL/SLI - Vector shift left */ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, - int immh, int immb, int opcode, int rn, int rd) + int immh, int immb, int opcode, int rn, int rd) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; int size = 32 - clz32(immh) - 1; int immhb = immh << 3 | immb; int shift = immhb - (8 << size); - int dsize = is_q ? 128 : 64; - int esize = 8 << size; - int elements = dsize/esize; - TCGv_i64 tcg_rn = new_tmp_a64(s); - TCGv_i64 tcg_rd = new_tmp_a64(s); - int i; + + /* Range of size is limited by decode: immh is a non-zero 4 bit field */ + assert(size >= 0 && size <= 3); if (extract32(immh, 3, 1) && !is_q) { unallocated_encoding(s); return; } - if (size > 3 && !is_q) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { return; } - for (i = 0; i < elements; i++) { - read_vec_element(s, tcg_rn, rn, i, size); - if (insert) { - read_vec_element(s, tcg_rd, rd, i, size); - } - - handle_shli_with_ins(tcg_ctx, tcg_rd, tcg_rn, insert, shift); - - write_vec_element(s, tcg_rd, rd, i, size); - } - - if (!is_q) { - clear_vec_high(s, rd); + if (insert) { + gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]); + } else { + gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size); } } @@ -8291,35 +10517,34 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, uint64_t round_const = 1ULL << (shift - 1); tcg_round = tcg_const_i64(tcg_ctx, round_const); } else { - TCGV_UNUSED_I64(tcg_round); + tcg_round = NULL; } for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, size+1); - handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, + handle_shri_with_rndacc(tcg_ctx, tcg_rd, tcg_rn, tcg_round, false, true, size+1, shift); tcg_gen_deposit_i64(tcg_ctx, tcg_final, tcg_final, tcg_rd, esize * i, esize); } if (!is_q) { - clear_vec_high(s, rd); write_vec_element(s, tcg_final, rd, 0, MO_64); } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } - if (round) { tcg_temp_free_i64(tcg_ctx, tcg_round); } tcg_temp_free_i64(tcg_ctx, tcg_rn); tcg_temp_free_i64(tcg_ctx, tcg_rd); tcg_temp_free_i64(tcg_ctx, tcg_final); - return; + + clear_vec_high(s, is_q, rd); } -/* C3.6.14 AdvSIMD shift by immediate +/* AdvSIMD shift by immediate * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 * +---+---+---+-------------+------+------+--------+---+------+------+ * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | @@ -8335,6 +10560,9 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) bool is_u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); + /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */ + assert(immh != 0); + switch (opcode) { case 0x08: /* SRI */ if (!is_u) { @@ -8451,7 +10679,7 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_passres; - TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); + MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); int elt = pass + is_q * 2; @@ -8598,10 +10826,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, tcg_passres, tcg_passres); break; - case 14: /* PMULL */ - assert(size == 0); - gen_helper_neon_mull_p8(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); - break; default: g_assert_not_reached(); } @@ -8668,16 +10892,10 @@ static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, } } -static void do_narrow_high_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i64 in) -{ - tcg_gen_shri_i64(tcg_ctx, in, in, 32); - tcg_gen_trunc_i64_i32(tcg_ctx, res, in); -} - static void do_narrow_round_high_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i64 in) { tcg_gen_addi_i64(tcg_ctx, in, in, 1U << 31); - do_narrow_high_u32(tcg_ctx, res, in); + tcg_gen_extrh_i64_i32(tcg_ctx, res, in); } static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, @@ -8697,7 +10915,7 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, gen_helper_neon_narrow_round_high_u8 }, { gen_helper_neon_narrow_high_u16, gen_helper_neon_narrow_round_high_u16 }, - { do_narrow_high_u32, do_narrow_round_high_u32 }, + { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 }, }; NeonGenNarrowFn *gennarrow = narrowfns[size][is_u]; @@ -8718,37 +10936,10 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } -static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm) -{ - TCGContext *tcg_ctx = s->uc->tcg_ctx; - /* PMULL of 64 x 64 -> 128 is an odd special case because it - * is the only three-reg-diff instruction which produces a - * 128-bit wide result from a single operation. However since - * it's possible to calculate the two halves more or less - * separately we just use two helper calls. - */ - TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); - TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); - - read_vec_element(s, tcg_op1, rn, is_q, MO_64); - read_vec_element(s, tcg_op2, rm, is_q, MO_64); - gen_helper_neon_pmull_64_lo(tcg_ctx, tcg_res, tcg_op1, tcg_op2); - write_vec_element(s, tcg_res, rd, 0, MO_64); - gen_helper_neon_pmull_64_hi(tcg_ctx, tcg_res, tcg_op1, tcg_op2); - write_vec_element(s, tcg_res, rd, 1, MO_64); - - tcg_temp_free_i64(tcg_ctx, tcg_op1); - tcg_temp_free_i64(tcg_ctx, tcg_op2); - tcg_temp_free_i64(tcg_ctx, tcg_res); -} - -/* C3.6.15 AdvSIMD three different +/* AdvSIMD three different * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | @@ -8800,22 +10991,38 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm); break; case 14: /* PMULL, PMULL2 */ - if (is_u || size == 1 || size == 2) { + if (is_u) { unallocated_encoding(s); return; } - if (size == 3) { - if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { + switch (size) { + case 0: /* PMULL.P8 */ + if (!fp_access_check(s)) { + return; + } + /* The Q field specifies lo/hi half input for this insn. */ + gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, + gen_helper_neon_pmull_h); + break; + + case 3: /* PMULL.P64 */ + if (!dc_isar_feature(aa64_pmull, s)) { unallocated_encoding(s); return; } if (!fp_access_check(s)) { return; } - handle_pmull_64(s, is_q, rd, rn, rm); - return; + /* The Q field specifies lo/hi half input for this insn. */ + gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, + gen_helper_gvec_pmull_q); + break; + + default: + unallocated_encoding(s); + break; } - goto is_widening; + return; case 9: /* SQDMLAL, SQDMLAL2 */ case 11: /* SQDMLSL, SQDMLSL2 */ case 13: /* SQDMULL, SQDMULL2 */ @@ -8836,7 +11043,6 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) unallocated_encoding(s); return; } - is_widening: if (!fp_access_check(s)) { return; } @@ -8853,104 +11059,47 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) /* Logic op (opcode == 3) subgroup of C3.6.16. */ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) { - TCGContext *tcg_ctx = s->uc->tcg_ctx; int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); int size = extract32(insn, 22, 2); bool is_u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); - TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; - int pass; if (!fp_access_check(s)) { return; } - tcg_op1 = tcg_temp_new_i64(tcg_ctx); - tcg_op2 = tcg_temp_new_i64(tcg_ctx); - tcg_res[0] = tcg_temp_new_i64(tcg_ctx); - tcg_res[1] = tcg_temp_new_i64(tcg_ctx); + switch (size + 4 * is_u) { + case 0: /* AND */ + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0); + return; + case 1: /* BIC */ + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0); + return; + case 2: /* ORR */ + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0); + return; + case 3: /* ORN */ + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0); + return; + case 4: /* EOR */ + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0); + return; - for (pass = 0; pass < (is_q ? 2 : 1); pass++) { - read_vec_element(s, tcg_op1, rn, pass, MO_64); - read_vec_element(s, tcg_op2, rm, pass, MO_64); + case 5: /* BSL bitwise select */ + gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0); + return; + case 6: /* BIT, bitwise insert if true */ + gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0); + return; + case 7: /* BIF, bitwise insert if false */ + gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0); + return; - if (!is_u) { - switch (size) { - case 0: /* AND */ - tcg_gen_and_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); - break; - case 1: /* BIC */ - tcg_gen_andc_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); - break; - case 2: /* ORR */ - tcg_gen_or_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); - break; - case 3: /* ORN */ - tcg_gen_orc_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); - break; - } - } else { - if (size != 0) { - /* B* ops need res loaded to operate on */ - read_vec_element(s, tcg_res[pass], rd, pass, MO_64); - } - - switch (size) { - case 0: /* EOR */ - tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); - break; - case 1: /* BSL bitwise select */ - tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2); - tcg_gen_and_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]); - tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op2, tcg_op1); - break; - case 2: /* BIT, bitwise insert if true */ - tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]); - tcg_gen_and_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2); - tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); - break; - case 3: /* BIF, bitwise insert if false */ - tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]); - tcg_gen_andc_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2); - tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); - break; - } - } + default: + g_assert_not_reached(); } - - write_vec_element(s, tcg_res[0], rd, 0, MO_64); - if (!is_q) { - tcg_gen_movi_i64(tcg_ctx, tcg_res[1], 0); - } - write_vec_element(s, tcg_res[1], rd, 1, MO_64); - - tcg_temp_free_i64(tcg_ctx, tcg_op1); - tcg_temp_free_i64(tcg_ctx, tcg_op2); - tcg_temp_free_i64(tcg_ctx, tcg_res[0]); - tcg_temp_free_i64(tcg_ctx, tcg_res[1]); -} - -/* Helper functions for 32 bit comparisons */ -static void gen_max_s32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) -{ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, res, op1, op2, op1, op2); -} - -static void gen_max_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) -{ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GEU, res, op1, op2, op1, op2); -} - -static void gen_min_s32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) -{ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LE, res, op1, op2, op1, op2); -} - -static void gen_min_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) -{ - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LEU, res, op1, op2, op1, op2); } /* Pairwise op subgroup of C3.6.16. @@ -8967,9 +11116,9 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, /* Floating point operations need fpst */ if (opcode >= 0x58) { - fpst = get_fpstatus_ptr(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, false); } else { - TCGV_UNUSED_PTR(fpst); + fpst = NULL; } if (!fp_access_check(s)) { @@ -9053,7 +11202,7 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 }, { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 }, - { gen_max_s32, gen_max_u32 }, + { tcg_gen_smax_i32, tcg_gen_umax_i32 }, }; genfn = fns[size][u]; break; @@ -9063,7 +11212,7 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, static NeonGenTwoOpFn * const fns[3][2] = { { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 }, { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 }, - { gen_min_s32, gen_min_u32 }, + { tcg_gen_smin_i32, tcg_gen_umin_i32 }, }; genfn = fns[size][u]; break; @@ -9101,12 +11250,10 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } - if (!TCGV_IS_UNUSED_PTR(fpst)) { + if (fpst) { tcg_temp_free_ptr(tcg_ctx, fpst); } } @@ -9114,6 +11261,7 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, /* Floating point op subgroup of C3.6.16. */ static void disas_simd_3same_float(DisasContext *s, uint32_t insn) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; /* For floating point ops, the U, size[1] and opcode bits * together indicate the operation. size[0] indicates single * or double. @@ -9171,9 +11319,29 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn) if (!fp_access_check(s)) { return; } - handle_3same_float(s, size, elements, fpopcode, rd, rn, rm); return; + + case 0x1d: /* FMLAL */ + case 0x3d: /* FMLSL */ + case 0x59: /* FMLAL2 */ + case 0x79: /* FMLSL2 */ + if (size & 1 || !dc_isar_feature(aa64_fhm, s)) { + unallocated_encoding(s); + return; + } + if (fp_access_check(s)) { + int is_s = extract32(insn, 23, 1); + int is_2 = extract32(insn, 29, 1); + int data = (is_2 << 1) | is_s; + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), tcg_ctx->cpu_env, + is_q ? 16 : 8, vec_full_reg_size(s), + data, gen_helper_gvec_fmlal_a64); + } + return; + default: unallocated_encoding(s); return; @@ -9192,6 +11360,7 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); int pass; + TCGCond cond; switch (opcode) { case 0x13: /* MUL, PMUL */ @@ -9231,6 +11400,83 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) return; } + switch (opcode) { + case 0x01: /* SQADD, UQADD */ + tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(s, rd), + offsetof(CPUARMState, vfp.qc), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + is_q ? 16 : 8, vec_full_reg_size(s), + (u ? uqadd_op : sqadd_op) + size); + return; + case 0x05: /* SQSUB, UQSUB */ + tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(s, rd), + offsetof(CPUARMState, vfp.qc), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + is_q ? 16 : 8, vec_full_reg_size(s), + (u ? uqsub_op : sqsub_op) + size); + return; + case 0x08: /* SSHL, USHL */ + gen_gvec_op3(s, is_q, rd, rn, rm, + u ? &ushl_op[size] : &sshl_op[size]); + return; + case 0x0c: /* SMAX, UMAX */ + if (u) { + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size); + } else { + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size); + } + return; + case 0x0d: /* SMIN, UMIN */ + if (u) { + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size); + } else { + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size); + } + return; + case 0x10: /* ADD, SUB */ + if (u) { + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size); + } else { + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size); + } + return; + case 0x13: /* MUL, PMUL */ + if (!u) { /* MUL */ + gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size); + } else { /* PMUL */ + gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b); + } + return; + case 0x12: /* MLA, MLS */ + if (u) { + gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]); + } else { + gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]); + } + return; + case 0x11: + if (!u) { /* CMTST */ + gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]); + return; + } + /* else CMEQ */ + cond = TCG_COND_EQ; + goto do_gvec_cmp; + case 0x06: /* CMGT, CMHI */ + cond = u ? TCG_COND_GTU : TCG_COND_GT; + goto do_gvec_cmp; + case 0x07: /* CMGE, CMHS */ + cond = u ? TCG_COND_GEU : TCG_COND_GE; + do_gvec_cmp: + tcg_gen_gvec_cmp(tcg_ctx, cond, size, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + is_q ? 16 : 8, vec_full_reg_size(s)); + return; + } + if (size == 3) { assert(is_q); for (pass = 0; pass < 2; pass++) { @@ -9271,16 +11517,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) genfn = fns[size][u]; break; } - case 0x1: /* SQADD, UQADD */ - { - static NeonGenTwoOpEnvFn * const fns[3][2] = { - { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 }, - { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 }, - { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 }, - }; - genenvfn = fns[size][u]; - break; - } case 0x2: /* SRHADD, URHADD */ { static NeonGenTwoOpFn * const fns[3][2] = { @@ -9301,46 +11537,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) genfn = fns[size][u]; break; } - case 0x5: /* SQSUB, UQSUB */ - { - static NeonGenTwoOpEnvFn * const fns[3][2] = { - { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 }, - { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 }, - { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 }, - }; - genenvfn = fns[size][u]; - break; - } - case 0x6: /* CMGT, CMHI */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 }, - { gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 }, - { gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 }, - }; - genfn = fns[size][u]; - break; - } - case 0x7: /* CMGE, CMHS */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 }, - { gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 }, - { gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 }, - }; - genfn = fns[size][u]; - break; - } - case 0x8: /* SSHL, USHL */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 }, - { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 }, - { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 }, - }; - genfn = fns[size][u]; - break; - } case 0x9: /* SQSHL, UQSHL */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { @@ -9371,27 +11567,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) genenvfn = fns[size][u]; break; } - case 0xc: /* SMAX, UMAX */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_max_s8, gen_helper_neon_max_u8 }, - { gen_helper_neon_max_s16, gen_helper_neon_max_u16 }, - { gen_max_s32, gen_max_u32 }, - }; - genfn = fns[size][u]; - break; - } - - case 0xd: /* SMIN, UMIN */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_min_s8, gen_helper_neon_min_u8 }, - { gen_helper_neon_min_s16, gen_helper_neon_min_u16 }, - { gen_min_s32, gen_min_u32 }, - }; - genfn = fns[size][u]; - break; - } case 0xe: /* SABD, UABD */ case 0xf: /* SABA, UABA */ { @@ -9403,44 +11578,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) genfn = fns[size][u]; break; } - case 0x10: /* ADD, SUB */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 }, - { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, - { tcg_gen_add_i32, tcg_gen_sub_i32 }, - }; - genfn = fns[size][u]; - break; - } - case 0x11: /* CMTST, CMEQ */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 }, - { gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 }, - { gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 }, - }; - genfn = fns[size][u]; - break; - } - case 0x13: /* MUL, PMUL */ - if (u) { - /* PMUL */ - assert(size == 0); - genfn = gen_helper_neon_mul_p8; - break; - } - /* fall through : MUL */ - case 0x12: /* MLA, MLS */ - { - static NeonGenTwoOpFn * const fns[3] = { - gen_helper_neon_mul_u8, - gen_helper_neon_mul_u16, - tcg_gen_mul_i32, - }; - genfn = fns[size]; - break; - } case 0x16: /* SQDMULH, SQRDMULH */ { static NeonGenTwoOpEnvFn * const fns[2][2] = { @@ -9461,18 +11598,16 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) genfn(tcg_ctx, tcg_res, tcg_op1, tcg_op2); } - if (opcode == 0xf || opcode == 0x12) { - /* SABA, UABA, MLA, MLS: accumulating ops */ - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 }, - { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, - { tcg_gen_add_i32, tcg_gen_sub_i32 }, + if (opcode == 0xf) { + /* SABA, UABA: accumulating ops */ + static NeonGenTwoOpFn * const fns[3] = { + gen_helper_neon_add_u8, + gen_helper_neon_add_u16, + tcg_gen_add_i32, }; - bool is_sub = (opcode == 0x12 && u); /* MLS */ - genfn = fns[size][is_sub]; read_vec_element_i32(s, tcg_op1, rd, pass, MO_32); - genfn(tcg_ctx, tcg_res, tcg_op1, tcg_res); + fns[size](tcg_ctx, tcg_res, tcg_op1, tcg_res); } write_vec_element_i32(s, tcg_res, rd, pass, MO_32); @@ -9482,13 +11617,10 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tcg_ctx, tcg_op2); } } - - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } -/* C3.6.16 AdvSIMD three same +/* AdvSIMD three same * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+--------+---+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | @@ -9527,10 +11659,32 @@ static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn) handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd); break; } - case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: - case 0x20: case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: - case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x2e: case 0x2f: - case 0x30: case 0x31: + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: + case 0x1e: + case 0x1f: + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2a: + case 0x2b: + case 0x2c: + case 0x2d: + case 0x2e: + case 0x2f: + case 0x30: + case 0x31: /* floating point ops, sz[1] and U are part of opcode */ disas_simd_3same_float(s, insn); break; @@ -9540,6 +11694,343 @@ static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn) } } +/* + * Advanced SIMD three same (ARMv8.2 FP16 variants) + * + * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0 + * +---+---+---+-----------+---------+------+-----+--------+---+------+------+ + * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd | + * +---+---+---+-----------+---------+------+-----+--------+---+------+------+ + * + * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE + * (register), FACGE, FABD, FCMGT (register) and FACGT. + * + */ +static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opcode, fpopcode; + int is_q, u, a, rm, rn, rd; + int datasize, elements; + int pass; + TCGv_ptr fpst; + bool pairwise = false; + + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + /* For these floating point ops, the U, a and opcode bits + * together indicate the operation. + */ + opcode = extract32(insn, 11, 3); + u = extract32(insn, 29, 1); + a = extract32(insn, 23, 1); + is_q = extract32(insn, 30, 1); + rm = extract32(insn, 16, 5); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + fpopcode = opcode | (a << 3) | (u << 4); + datasize = is_q ? 128 : 64; + elements = datasize / 16; + + switch (fpopcode) { + case 0x10: /* FMAXNMP */ + case 0x12: /* FADDP */ + case 0x16: /* FMAXP */ + case 0x18: /* FMINNMP */ + case 0x1e: /* FMINP */ + pairwise = true; + break; + } + + fpst = get_fpstatus_ptr(tcg_ctx, true); + + if (pairwise) { + int maxpass = is_q ? 8 : 4; + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res[8]; + + for (pass = 0; pass < maxpass; pass++) { + int passreg = pass < (maxpass / 2) ? rn : rm; + int passelt = (pass << 1) & (maxpass - 1); + + read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16); + read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16); + tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); + + switch (fpopcode) { + case 0x10: /* FMAXNMP */ + gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, + fpst); + break; + case 0x12: /* FADDP */ + gen_helper_advsimd_addh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x16: /* FMAXP */ + gen_helper_advsimd_maxh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x18: /* FMINNMP */ + gen_helper_advsimd_minnumh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, + fpst); + break; + case 0x1e: /* FMINP */ + gen_helper_advsimd_minh(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + } + + for (pass = 0; pass < maxpass; pass++) { + write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16); + tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); + } + + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + + } else { + for (pass = 0; pass < elements; pass++) { + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op1, rn, pass, MO_16); + read_vec_element_i32(s, tcg_op2, rm, pass, MO_16); + + switch (fpopcode) { + case 0x0: /* FMAXNM */ + gen_helper_advsimd_maxnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1: /* FMLA */ + read_vec_element_i32(s, tcg_res, rd, pass, MO_16); + gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_res, + fpst); + break; + case 0x2: /* FADD */ + gen_helper_advsimd_addh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3: /* FMULX */ + gen_helper_advsimd_mulxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x4: /* FCMEQ */ + gen_helper_advsimd_ceq_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x6: /* FMAX */ + gen_helper_advsimd_maxh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7: /* FRECPS */ + gen_helper_recpsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x8: /* FMINNM */ + gen_helper_advsimd_minnumh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x9: /* FMLS */ + /* As usual for ARM, separate negation for fused multiply-add */ + tcg_gen_xori_i32(tcg_ctx, tcg_op1, tcg_op1, 0x8000); + read_vec_element_i32(s, tcg_res, rd, pass, MO_16); + gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_res, + fpst); + break; + case 0xa: /* FSUB */ + gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xe: /* FMIN */ + gen_helper_advsimd_minh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xf: /* FRSQRTS */ + gen_helper_rsqrtsf_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x13: /* FMUL */ + gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x14: /* FCMGE */ + gen_helper_advsimd_cge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x15: /* FACGE */ + gen_helper_advsimd_acge_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x17: /* FDIV */ + gen_helper_advsimd_divh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1a: /* FABD */ + gen_helper_advsimd_subh(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_res, 0x7fff); + break; + case 0x1c: /* FCMGT */ + gen_helper_advsimd_cgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1d: /* FACGT */ + gen_helper_advsimd_acgt_f16(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n", + __func__, insn, fpopcode, s->pc_curr); + g_assert_not_reached(); + } + + write_vec_element_i32(s, tcg_res, rd, pass, MO_16); + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + } + } + + tcg_temp_free_ptr(tcg_ctx, fpst); + + clear_vec_high(s, is_q, rd); +} + +/* AdvSIMD three same extra + * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0 + * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ + * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd | + * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ + */ +static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) +{ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 11, 4); + int rm = extract32(insn, 16, 5); + int size = extract32(insn, 22, 2); + bool u = extract32(insn, 29, 1); + bool is_q = extract32(insn, 30, 1); + bool feature; + int rot; + + switch (u * 16 + opcode) { + case 0x10: /* SQRDMLAH (vector) */ + case 0x11: /* SQRDMLSH (vector) */ + if (size != 1 && size != 2) { + unallocated_encoding(s); + return; + } + feature = dc_isar_feature(aa64_rdm, s); + break; + case 0x02: /* SDOT (vector) */ + case 0x12: /* UDOT (vector) */ + if (size != MO_32) { + unallocated_encoding(s); + return; + } + feature = dc_isar_feature(aa64_dp, s); + break; + case 0x18: /* FCMLA, #0 */ + case 0x19: /* FCMLA, #90 */ + case 0x1a: /* FCMLA, #180 */ + case 0x1b: /* FCMLA, #270 */ + case 0x1c: /* FCADD, #90 */ + case 0x1e: /* FCADD, #270 */ + if (size == 0 + || (size == 1 && !dc_isar_feature(aa64_fp16, s)) + || (size == 3 && !is_q)) { + unallocated_encoding(s); + return; + } + feature = dc_isar_feature(aa64_fcma, s); + break; + default: + unallocated_encoding(s); + return; + } + if (!feature) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + + switch (opcode) { + case 0x0: /* SQRDMLAH (vector) */ + switch (size) { + case 1: + gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s16); + break; + case 2: + gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s32); + break; + default: + g_assert_not_reached(); + } + return; + + case 0x1: /* SQRDMLSH (vector) */ + switch (size) { + case 1: + gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s16); + break; + case 2: + gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s32); + break; + default: + g_assert_not_reached(); + } + return; + + case 0x2: /* SDOT / UDOT */ + gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, + u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b); + return; + + case 0x8: /* FCMLA, #0 */ + case 0x9: /* FCMLA, #90 */ + case 0xa: /* FCMLA, #180 */ + case 0xb: /* FCMLA, #270 */ + rot = extract32(opcode, 0, 2); + switch (size) { + case 1: + gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot, + gen_helper_gvec_fcmlah); + break; + case 2: + gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot, + gen_helper_gvec_fcmlas); + break; + case 3: + gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot, + gen_helper_gvec_fcmlad); + break; + default: + g_assert_not_reached(); + } + return; + + case 0xc: /* FCADD, #90 */ + case 0xe: /* FCADD, #270 */ + rot = extract32(opcode, 1, 1); + switch (size) { + case 1: + gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, + gen_helper_gvec_fcaddh); + break; + case 2: + gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, + gen_helper_gvec_fcadds); + break; + case 3: + gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, + gen_helper_gvec_fcaddd); + break; + default: + g_assert_not_reached(); + } + return; + + default: + g_assert_not_reached(); + } +} + static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, int size, int rn, int rd) { @@ -9571,18 +12062,23 @@ static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, /* 16 -> 32 bit fp conversion */ int srcelt = is_q ? 4 : 0; TCGv_i32 tcg_res[4]; + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, false); + TCGv_i32 ahp = get_ahp_flag(tcg_ctx); for (pass = 0; pass < 4; pass++) { tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16); gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_res[pass], tcg_res[pass], - tcg_ctx->cpu_env); + fpst, ahp); } for (pass = 0; pass < 4; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); } + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, ahp); } } @@ -9629,9 +12125,7 @@ static void handle_rev(DisasContext *s, int opcode, bool u, write_vec_element(s, tcg_tmp, rd, i, grp_size); tcg_temp_free_i64(tcg_ctx, tcg_tmp); } - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } else { int revmask = (1 << grp_size) - 1; int esize = 8 << size; @@ -9676,7 +12170,7 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, if (size == 2) { /* 32 + 32 -> 64 op */ - TCGMemOp memop = size + (u ? 0 : MO_SIGN); + MemOp memop = size + (u ? 0 : MO_SIGN); for (pass = 0; pass < maxpass; pass++) { TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); @@ -9764,7 +12258,7 @@ static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) } } -/* C3.6.17 AdvSIMD two reg misc +/* AdvSIMD two reg misc * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | @@ -9792,8 +12286,7 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) return; case 0x5: /* CNT, NOT, RBIT */ if (u && size == 0) { - /* NOT: adjust size so we can use the 64-bits-at-a-time loop. */ - size = 3; + /* NOT */ break; } else if (u && size == 1) { /* RBIT */ @@ -9873,8 +12366,19 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) return; } break; - case 0x0c: case 0x0d: case 0x0e: case 0x0f: - case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: + case 0xc: + case 0xd: + case 0xe: + case 0xf: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: + case 0x1e: case 0x1f: { /* Floating point: U, size[1] and opcode indicate operation; @@ -10018,6 +12522,19 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) } need_fpstatus = true; break; + case 0x1e: /* FRINT32Z */ + case 0x1f: /* FRINT64Z */ + need_rmode = true; + rmode = FPROUNDING_ZERO; + /* fall through */ + case 0x5e: /* FRINT32X */ + case 0x5f: /* FRINT64X */ + need_fpstatus = true; + if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) { + unallocated_encoding(s); + return; + } + break; default: unallocated_encoding(s); return; @@ -10033,23 +12550,43 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) return; } - if (need_fpstatus) { - tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + if (need_fpstatus || need_rmode) { + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, false); } else { - TCGV_UNUSED_PTR(tcg_fpstatus); + tcg_fpstatus = NULL; } if (need_rmode) { tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); } else { - TCGV_UNUSED_I32(tcg_rmode); + tcg_rmode = NULL; + } + + switch (opcode) { + case 0x5: + if (u && size == 0) { /* NOT */ + gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0); + return; + } + break; + case 0xb: + if (u) { /* ABS, NEG */ + gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size); + } else { + gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size); + } + return; } if (size == 3) { /* All 64-bit element operations can be shared with scalar 2misc */ int pass; - for (pass = 0; pass < (is_q ? 2 : 1); pass++) { + /* Coverity claims (size == 3 && !is_q) has been eliminated + * from all paths leading to here. + */ + tcg_debug_assert(is_q); + for (pass = 0; pass < 2; pass++) { TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); @@ -10094,9 +12631,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) goto do_cmop; case 0x4: /* CLS */ if (u) { - gen_helper_clz32(tcg_ctx, tcg_res, tcg_op); + tcg_gen_clzi_i32(tcg_ctx, tcg_res, tcg_op, 32); } else { - gen_helper_cls32(tcg_ctx, tcg_res, tcg_op); + tcg_gen_clrsb_i32(tcg_ctx, tcg_res, tcg_op); } break; case 0x7: /* SQABS, SQNEG */ @@ -10106,17 +12643,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) gen_helper_neon_qabs_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op); } break; - case 0xb: /* ABS, NEG */ - if (u) { - tcg_gen_neg_i32(tcg_ctx, tcg_res, tcg_op); - } else { - TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); - tcg_gen_neg_i32(tcg_ctx, tcg_res, tcg_op); - tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GT, tcg_res, tcg_op, - tcg_zero, tcg_op, tcg_res); - tcg_temp_free_i32(tcg_ctx, tcg_zero); - } - break; case 0x2f: /* FABS */ gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_op); break; @@ -10164,6 +12690,14 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) case 0x7c: /* URSQRTE */ gen_helper_rsqrte_u32(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); break; + case 0x1e: /* FRINT32Z */ + case 0x5e: /* FRINT32X */ + gen_helper_frint32_s(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x1f: /* FRINT64Z */ + case 0x5f: /* FRINT64X */ + gen_helper_frint64_s(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; default: g_assert_not_reached(); } @@ -10221,23 +12755,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tcg_ctx, tcg_zero); break; } - case 0xb: /* ABS, NEG */ - if (u) { - TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); - if (size) { - gen_helper_neon_sub_u16(tcg_ctx, tcg_res, tcg_zero, tcg_op); - } else { - gen_helper_neon_sub_u8(tcg_ctx, tcg_res, tcg_zero, tcg_op); - } - tcg_temp_free_i32(tcg_ctx, tcg_zero); - } else { - if (size) { - gen_helper_neon_abs_s16(tcg_ctx, tcg_res, tcg_op); - } else { - gen_helper_neon_abs_s8(tcg_ctx, tcg_res, tcg_op); - } - } - break; case 0x4: /* CLS, CLZ */ if (u) { if (size == 0) { @@ -10264,12 +12781,10 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tcg_ctx, tcg_op); } } - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); if (need_rmode) { - gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); tcg_temp_free_i32(tcg_ctx, tcg_rmode); } if (need_fpstatus) { @@ -10277,12 +12792,312 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) } } -/* C3.6.13 AdvSIMD scalar x indexed element +/* AdvSIMD [scalar] two register miscellaneous (FP16) + * + * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0 + * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ + * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd | + * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ + * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00 + * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800 + * + * This actually covers two groups where scalar access is governed by + * bit 28. A bunch of the instructions (float to integral) only exist + * in the vector form and are un-allocated for the scalar decode. Also + * in the scalar decode Q is always 1. + */ +static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int fpop, opcode, a, u; + int rn, rd; + bool is_q; + bool is_scalar; + bool only_in_vector = false; + + int pass; + TCGv_i32 tcg_rmode = NULL; + TCGv_ptr tcg_fpstatus = NULL; + bool need_rmode = false; + bool need_fpst = true; + int rmode; + + if (!dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + + rd = extract32(insn, 0, 5); + rn = extract32(insn, 5, 5); + + a = extract32(insn, 23, 1); + u = extract32(insn, 29, 1); + is_scalar = extract32(insn, 28, 1); + is_q = extract32(insn, 30, 1); + + opcode = extract32(insn, 12, 5); + fpop = deposit32(opcode, 5, 1, a); + fpop = deposit32(fpop, 6, 1, u); + + rd = extract32(insn, 0, 5); + rn = extract32(insn, 5, 5); + + switch (fpop) { + case 0x1d: /* SCVTF */ + case 0x5d: /* UCVTF */ + { + int elements; + + if (is_scalar) { + elements = 1; + } else { + elements = (is_q ? 8 : 4); + } + + if (!fp_access_check(s)) { + return; + } + handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16); + return; + } + break; + case 0x2c: /* FCMGT (zero) */ + case 0x2d: /* FCMEQ (zero) */ + case 0x2e: /* FCMLT (zero) */ + case 0x6c: /* FCMGE (zero) */ + case 0x6d: /* FCMLE (zero) */ + handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd); + return; + case 0x3d: /* FRECPE */ + case 0x3f: /* FRECPX */ + break; + case 0x18: /* FRINTN */ + need_rmode = true; + only_in_vector = true; + rmode = FPROUNDING_TIEEVEN; + break; + case 0x19: /* FRINTM */ + need_rmode = true; + only_in_vector = true; + rmode = FPROUNDING_NEGINF; + break; + case 0x38: /* FRINTP */ + need_rmode = true; + only_in_vector = true; + rmode = FPROUNDING_POSINF; + break; + case 0x39: /* FRINTZ */ + need_rmode = true; + only_in_vector = true; + rmode = FPROUNDING_ZERO; + break; + case 0x58: /* FRINTA */ + need_rmode = true; + only_in_vector = true; + rmode = FPROUNDING_TIEAWAY; + break; + case 0x59: /* FRINTX */ + case 0x79: /* FRINTI */ + only_in_vector = true; + /* current rounding mode */ + break; + case 0x1a: /* FCVTNS */ + need_rmode = true; + rmode = FPROUNDING_TIEEVEN; + break; + case 0x1b: /* FCVTMS */ + need_rmode = true; + rmode = FPROUNDING_NEGINF; + break; + case 0x1c: /* FCVTAS */ + need_rmode = true; + rmode = FPROUNDING_TIEAWAY; + break; + case 0x3a: /* FCVTPS */ + need_rmode = true; + rmode = FPROUNDING_POSINF; + break; + case 0x3b: /* FCVTZS */ + need_rmode = true; + rmode = FPROUNDING_ZERO; + break; + case 0x5a: /* FCVTNU */ + need_rmode = true; + rmode = FPROUNDING_TIEEVEN; + break; + case 0x5b: /* FCVTMU */ + need_rmode = true; + rmode = FPROUNDING_NEGINF; + break; + case 0x5c: /* FCVTAU */ + need_rmode = true; + rmode = FPROUNDING_TIEAWAY; + break; + case 0x7a: /* FCVTPU */ + need_rmode = true; + rmode = FPROUNDING_POSINF; + break; + case 0x7b: /* FCVTZU */ + need_rmode = true; + rmode = FPROUNDING_ZERO; + break; + case 0x2f: /* FABS */ + case 0x6f: /* FNEG */ + need_fpst = false; + break; + case 0x7d: /* FRSQRTE */ + case 0x7f: /* FSQRT (vector) */ + break; + default: + fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop); + g_assert_not_reached(); + } + + + /* Check additional constraints for the scalar encoding */ + if (is_scalar) { + if (!is_q) { + unallocated_encoding(s); + return; + } + /* FRINTxx is only in the vector form */ + if (only_in_vector) { + unallocated_encoding(s); + return; + } + } + + if (!fp_access_check(s)) { + return; + } + + if (need_rmode || need_fpst) { + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx, true); + } + + if (need_rmode) { + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); + } + + if (is_scalar) { + TCGv_i32 tcg_op = read_fp_hreg(s, rn); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + + switch (fpop) { + case 0x1a: /* FCVTNS */ + case 0x1b: /* FCVTMS */ + case 0x1c: /* FCVTAS */ + case 0x3a: /* FCVTPS */ + case 0x3b: /* FCVTZS */ + gen_helper_advsimd_f16tosinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x3d: /* FRECPE */ + gen_helper_recpe_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x3f: /* FRECPX */ + gen_helper_frecpx_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x5a: /* FCVTNU */ + case 0x5b: /* FCVTMU */ + case 0x5c: /* FCVTAU */ + case 0x7a: /* FCVTPU */ + case 0x7b: /* FCVTZU */ + gen_helper_advsimd_f16touinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x6f: /* FNEG */ + tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_op, 0x8000); + break; + case 0x7d: /* FRSQRTE */ + gen_helper_rsqrte_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + default: + g_assert_not_reached(); + } + + /* limit any sign extension going on */ + tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_res, 0xffff); + write_fp_sreg(s, rd, tcg_res); + + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op); + } else { + for (pass = 0; pass < (is_q ? 8 : 4); pass++) { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op, rn, pass, MO_16); + + switch (fpop) { + case 0x1a: /* FCVTNS */ + case 0x1b: /* FCVTMS */ + case 0x1c: /* FCVTAS */ + case 0x3a: /* FCVTPS */ + case 0x3b: /* FCVTZS */ + gen_helper_advsimd_f16tosinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x3d: /* FRECPE */ + gen_helper_recpe_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x5a: /* FCVTNU */ + case 0x5b: /* FCVTMU */ + case 0x5c: /* FCVTAU */ + case 0x7a: /* FCVTPU */ + case 0x7b: /* FCVTZU */ + gen_helper_advsimd_f16touinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x18: /* FRINTN */ + case 0x19: /* FRINTM */ + case 0x38: /* FRINTP */ + case 0x39: /* FRINTZ */ + case 0x58: /* FRINTA */ + case 0x79: /* FRINTI */ + gen_helper_advsimd_rinth(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x59: /* FRINTX */ + gen_helper_advsimd_rinth_exact(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x2f: /* FABS */ + tcg_gen_andi_i32(tcg_ctx, tcg_res, tcg_op, 0x7fff); + break; + case 0x6f: /* FNEG */ + tcg_gen_xori_i32(tcg_ctx, tcg_res, tcg_op, 0x8000); + break; + case 0x7d: /* FRSQRTE */ + gen_helper_rsqrte_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x7f: /* FSQRT */ + gen_helper_sqrt_f16(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + default: + g_assert_not_reached(); + } + + write_vec_element_i32(s, tcg_res, rd, pass, MO_16); + + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op); + } + + clear_vec_high(s, is_q, rd); + } + + if (tcg_rmode) { + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + } + + if (tcg_fpstatus) { + tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); + } +} + +/* AdvSIMD scalar x indexed element * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ - * C3.6.18 AdvSIMD vector x indexed element + * AdvSIMD vector x indexed element * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | @@ -10311,90 +13126,155 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); bool is_long = false; - bool is_fp = false; + int is_fp = 0; + bool is_fp16 = false; int index; TCGv_ptr fpst; - switch (opcode) { - case 0x0: /* MLA */ - case 0x4: /* MLS */ - if (!u || is_scalar) { + switch (16 * u + opcode) { + case 0x08: /* MUL */ + case 0x10: /* MLA */ + case 0x14: /* MLS */ + if (is_scalar) { unallocated_encoding(s); return; } break; - case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ - case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ - case 0xa: /* SMULL, SMULL2, UMULL, UMULL2 */ + case 0x02: /* SMLAL, SMLAL2 */ + case 0x12: /* UMLAL, UMLAL2 */ + case 0x06: /* SMLSL, SMLSL2 */ + case 0x16: /* UMLSL, UMLSL2 */ + case 0x0a: /* SMULL, SMULL2 */ + case 0x1a: /* UMULL, UMULL2 */ if (is_scalar) { unallocated_encoding(s); return; } is_long = true; break; - case 0x3: /* SQDMLAL, SQDMLAL2 */ - case 0x7: /* SQDMLSL, SQDMLSL2 */ - case 0xb: /* SQDMULL, SQDMULL2 */ + case 0x03: /* SQDMLAL, SQDMLAL2 */ + case 0x07: /* SQDMLSL, SQDMLSL2 */ + case 0x0b: /* SQDMULL, SQDMULL2 */ is_long = true; - /* fall through */ - case 0xc: /* SQDMULH */ - case 0xd: /* SQRDMULH */ - if (u) { + break; + case 0x0c: /* SQDMULH */ + case 0x0d: /* SQRDMULH */ + break; + case 0x01: /* FMLA */ + case 0x05: /* FMLS */ + case 0x09: /* FMUL */ + case 0x19: /* FMULX */ + is_fp = 1; + break; + case 0x1d: /* SQRDMLAH */ + case 0x1f: /* SQRDMLSH */ + if (!dc_isar_feature(aa64_rdm, s)) { unallocated_encoding(s); return; } break; - case 0x8: /* MUL */ - if (u || is_scalar) { + case 0x0e: /* SDOT */ + case 0x1e: /* UDOT */ + if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) { unallocated_encoding(s); return; } break; - case 0x1: /* FMLA */ - case 0x5: /* FMLS */ - if (u) { + case 0x11: /* FCMLA #0 */ + case 0x13: /* FCMLA #90 */ + case 0x15: /* FCMLA #180 */ + case 0x17: /* FCMLA #270 */ + if (is_scalar || !dc_isar_feature(aa64_fcma, s)) { unallocated_encoding(s); return; } - /* fall through */ - case 0x9: /* FMUL, FMULX */ - if (!extract32(size, 1, 1)) { + is_fp = 2; + break; + case 0x00: /* FMLAL */ + case 0x04: /* FMLSL */ + case 0x18: /* FMLAL2 */ + case 0x1c: /* FMLSL2 */ + if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) { unallocated_encoding(s); return; } - is_fp = true; + size = MO_16; + /* is_fp, but we pass tcg_ctx->cpu_env not fp_status. */ break; default: unallocated_encoding(s); return; } - if (is_fp) { - /* low bit of size indicates single/double */ - size = extract32(size, 0, 1) ? 3 : 2; - if (size == 2) { - index = h << 1 | l; - } else { - if (l || !is_q) { - unallocated_encoding(s); - return; - } - index = h; - } - rm |= (m << 4); - } else { + switch (is_fp) { + case 1: /* normal fp */ + /* convert insn encoded size to MemOp size */ switch (size) { - case 1: - index = h << 2 | l << 1 | m; + case 0: /* half-precision */ + size = MO_16; + is_fp16 = true; break; - case 2: - index = h << 1 | l; - rm |= (m << 4); + case MO_32: /* single precision */ + case MO_64: /* double precision */ break; default: unallocated_encoding(s); return; } + break; + + case 2: /* complex fp */ + /* Each indexable element is a complex pair. */ + size += 1; + switch (size) { + case MO_32: + if (h && !is_q) { + unallocated_encoding(s); + return; + } + is_fp16 = true; + break; + case MO_64: + break; + default: + unallocated_encoding(s); + return; + } + break; + + default: /* integer */ + switch (size) { + case MO_8: + case MO_64: + unallocated_encoding(s); + return; + } + break; + } + if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) { + unallocated_encoding(s); + return; + } + + /* Given MemOp size, adjust register and indexing. */ + switch (size) { + case MO_16: + index = h << 2 | l << 1 | m; + break; + case MO_32: + index = h << 1 | l; + rm |= m << 4; + break; + case MO_64: + if (l || !is_q) { + unallocated_encoding(s); + return; + } + index = h; + rm |= m << 4; + break; + default: + g_assert_not_reached(); } if (!fp_access_check(s)) { @@ -10402,9 +13282,51 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } if (is_fp) { - fpst = get_fpstatus_ptr(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, is_fp16); } else { - TCGV_UNUSED_PTR(fpst); + fpst = NULL; + } + + switch (16 * u + opcode) { + case 0x0e: /* SDOT */ + case 0x1e: /* UDOT */ + gen_gvec_op3_ool(s, is_q, rd, rn, rm, index, + u ? gen_helper_gvec_udot_idx_b + : gen_helper_gvec_sdot_idx_b); + return; + case 0x11: /* FCMLA #0 */ + case 0x13: /* FCMLA #90 */ + case 0x15: /* FCMLA #180 */ + case 0x17: /* FCMLA #270 */ + { + int rot = extract32(insn, 13, 2); + int data = (index << 2) | rot; + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), fpst, + is_q ? 16 : 8, vec_full_reg_size(s), data, + size == MO_64 + ? gen_helper_gvec_fcmlas_idx + : gen_helper_gvec_fcmlah_idx); + tcg_temp_free_ptr(tcg_ctx, fpst); + } + return; + + case 0x00: /* FMLAL */ + case 0x04: /* FMLSL */ + case 0x18: /* FMLAL2 */ + case 0x1c: /* FMLSL2 */ + { + int is_s = extract32(opcode, 2, 1); + int is_2 = u; + int data = (index << 2) | (is_2 << 1) | is_s; + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), tcg_ctx->cpu_env, + is_q ? 16 : 8, vec_full_reg_size(s), + data, gen_helper_gvec_fmlal_idx_a64); + } + return; } if (size == 3) { @@ -10421,21 +13343,20 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) read_vec_element(s, tcg_op, rn, pass, MO_64); - switch (opcode) { - case 0x5: /* FMLS */ + switch (16 * u + opcode) { + case 0x05: /* FMLS */ /* As usual for ARM, separate negation for fused multiply-add */ gen_helper_vfp_negd(tcg_ctx, tcg_op, tcg_op); /* fall through */ - case 0x1: /* FMLA */ + case 0x01: /* FMLA */ read_vec_element(s, tcg_res, rd, pass, MO_64); gen_helper_vfp_muladdd(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); break; - case 0x9: /* FMUL, FMULX */ - if (u) { - gen_helper_vfp_mulxd(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); - } else { - gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); - } + case 0x09: /* FMUL */ + gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + break; + case 0x19: /* FMULX */ + gen_helper_vfp_mulxd(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); break; default: g_assert_not_reached(); @@ -10446,11 +13367,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_ctx, tcg_res); } - if (is_scalar) { - clear_vec_high(s, rd); - } - tcg_temp_free_i64(tcg_ctx, tcg_idx); + clear_vec_high(s, !is_scalar, rd); } else if (!is_long) { /* 32 bit floating point, or 16 or 32 bit integer. * For the 16 bit scalar case we use the usual Neon helpers and @@ -10481,10 +13399,10 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32); - switch (opcode) { - case 0x0: /* MLA */ - case 0x4: /* MLS */ - case 0x8: /* MUL */ + switch (16 * u + opcode) { + case 0x08: /* MUL */ + case 0x10: /* MLA */ + case 0x14: /* MLS */ { static NeonGenTwoOpFn * const fns[2][2] = { { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, @@ -10506,22 +13424,75 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) genfn(tcg_ctx, tcg_res, tcg_op, tcg_res); break; } - case 0x5: /* FMLS */ - /* As usual for ARM, separate negation for fused multiply-add */ - gen_helper_vfp_negs(tcg_ctx, tcg_op, tcg_op); - /* fall through */ - case 0x1: /* FMLA */ - read_vec_element_i32(s, tcg_res, rd, pass, MO_32); - gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); - break; - case 0x9: /* FMUL, FMULX */ - if (u) { - gen_helper_vfp_mulxs(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); - } else { - gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + case 0x05: /* FMLS */ + case 0x01: /* FMLA */ + read_vec_element_i32(s, tcg_res, rd, pass, + is_scalar ? size : MO_32); + switch (size) { + case 1: + if (opcode == 0x5) { + /* As usual for ARM, separate negation for fused + * multiply-add */ + tcg_gen_xori_i32(tcg_ctx, tcg_op, tcg_op, 0x80008000); + } + if (is_scalar) { + gen_helper_advsimd_muladdh(tcg_ctx, tcg_res, tcg_op, tcg_idx, + tcg_res, fpst); + } else { + gen_helper_advsimd_muladd2h(tcg_ctx, tcg_res, tcg_op, tcg_idx, + tcg_res, fpst); + } + break; + case 2: + if (opcode == 0x5) { + /* As usual for ARM, separate negation for + * fused multiply-add */ + tcg_gen_xori_i32(tcg_ctx, tcg_op, tcg_op, 0x80000000); + } + gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op, tcg_idx, + tcg_res, fpst); + break; + default: + g_assert_not_reached(); } break; - case 0xc: /* SQDMULH */ + case 0x09: /* FMUL */ + switch (size) { + case 1: + if (is_scalar) { + gen_helper_advsimd_mulh(tcg_ctx, tcg_res, tcg_op, + tcg_idx, fpst); + } else { + gen_helper_advsimd_mul2h(tcg_ctx, tcg_res, tcg_op, + tcg_idx, fpst); + } + break; + case 2: + gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + break; + default: + g_assert_not_reached(); + } + break; + case 0x19: /* FMULX */ + switch (size) { + case 1: + if (is_scalar) { + gen_helper_advsimd_mulxh(tcg_ctx, tcg_res, tcg_op, + tcg_idx, fpst); + } else { + gen_helper_advsimd_mulx2h(tcg_ctx, tcg_res, tcg_op, + tcg_idx, fpst); + } + break; + case 2: + gen_helper_vfp_mulxs(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + break; + default: + g_assert_not_reached(); + } + break; + case 0x0c: /* SQDMULH */ if (size == 1) { gen_helper_neon_qdmulh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx); @@ -10530,7 +13501,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) tcg_op, tcg_idx); } break; - case 0xd: /* SQRDMULH */ + case 0x0d: /* SQRDMULH */ if (size == 1) { gen_helper_neon_qrdmulh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op, tcg_idx); @@ -10539,6 +13510,28 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) tcg_op, tcg_idx); } break; + case 0x1d: /* SQRDMLAH */ + read_vec_element_i32(s, tcg_res, rd, pass, + is_scalar ? size : MO_32); + if (size == 1) { + gen_helper_neon_qrdmlah_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx, tcg_res); + } else { + gen_helper_neon_qrdmlah_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx, tcg_res); + } + break; + case 0x1f: /* SQRDMLSH */ + read_vec_element_i32(s, tcg_res, rd, pass, + is_scalar ? size : MO_32); + if (size == 1) { + gen_helper_neon_qrdmlsh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx, tcg_res); + } else { + gen_helper_neon_qrdmlsh_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx, tcg_res); + } + break; default: g_assert_not_reached(); } @@ -10554,16 +13547,13 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } tcg_temp_free_i32(tcg_ctx, tcg_idx); - - if (!is_q) { - clear_vec_high(s, rd); - } + clear_vec_high(s, is_q, rd); } else { /* long ops: 16x16->32 or 32x32->64 */ TCGv_i64 tcg_res[2]; int pass; bool satop = extract32(opcode, 0, 1); - TCGMemOp memop = MO_32; + MemOp memop = MO_32; if (satop || !u) { memop |= MO_SIGN; @@ -10634,9 +13624,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } tcg_temp_free_i64(tcg_ctx, tcg_idx); - if (is_scalar) { - clear_vec_high(s, rd); - } + clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_idx = tcg_temp_new_i32(tcg_ctx); @@ -10728,12 +13716,12 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } } - if (!TCGV_IS_UNUSED_PTR(fpst)) { + if (fpst) { tcg_temp_free_ptr(tcg_ctx, fpst); } } -/* C3.6.19 Crypto AES +/* Crypto AES * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----------------+------+-----------+--------+-----+------+------+ * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | @@ -10747,11 +13735,11 @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn) int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); int decrypt; - TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_decrypt; - CryptoThreeOpEnvFn *genfn; + TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; + TCGv_i32 tcg_decrypt; + CryptoThreeOpIntFn *genfn; - if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) - || size != 0) { + if (!dc_isar_feature(aa64_aes, s) || size != 0) { unallocated_encoding(s); return; } @@ -10778,22 +13766,22 @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn) return; } - /* Note that we convert the Vx register indexes into the - * index within the vfp.regs[] array, so we can share the - * helper with the AArch32 instructions. - */ - tcg_rd_regno = tcg_const_i32(tcg_ctx, rd << 1); - tcg_rn_regno = tcg_const_i32(tcg_ctx, rn << 1); + if (!fp_access_check(s)) { + return; + } + + tcg_rd_ptr = vec_full_reg_ptr(s, rd); + tcg_rn_ptr = vec_full_reg_ptr(s, rn); tcg_decrypt = tcg_const_i32(tcg_ctx, decrypt); - genfn(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_decrypt); + genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt); - tcg_temp_free_i32(tcg_ctx, tcg_rd_regno); - tcg_temp_free_i32(tcg_ctx, tcg_rn_regno); + tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); tcg_temp_free_i32(tcg_ctx, tcg_decrypt); } -/* C3.6.20 Crypto three-reg SHA +/* Crypto three-reg SHA * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 * +-----------------+------+---+------+---+--------+-----+------+------+ * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd | @@ -10807,9 +13795,9 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); - CryptoThreeOpEnvFn *genfn; - TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_rm_regno; - int feature = ARM_FEATURE_V8_SHA256; + CryptoThreeOpFn *genfn; + TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; + bool feature; if (size != 0) { unallocated_encoding(s); @@ -10822,47 +13810,54 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) case 2: /* SHA1M */ case 3: /* SHA1SU0 */ genfn = NULL; - feature = ARM_FEATURE_V8_SHA1; + feature = dc_isar_feature(aa64_sha1, s); break; case 4: /* SHA256H */ genfn = gen_helper_crypto_sha256h; + feature = dc_isar_feature(aa64_sha256, s); break; case 5: /* SHA256H2 */ genfn = gen_helper_crypto_sha256h2; + feature = dc_isar_feature(aa64_sha256, s); break; case 6: /* SHA256SU1 */ genfn = gen_helper_crypto_sha256su1; + feature = dc_isar_feature(aa64_sha256, s); break; default: unallocated_encoding(s); return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } - tcg_rd_regno = tcg_const_i32(tcg_ctx, rd << 1); - tcg_rn_regno = tcg_const_i32(tcg_ctx, rn << 1); - tcg_rm_regno = tcg_const_i32(tcg_ctx, rm << 1); + if (!fp_access_check(s)) { + return; + } + + tcg_rd_ptr = vec_full_reg_ptr(s, rd); + tcg_rn_ptr = vec_full_reg_ptr(s, rn); + tcg_rm_ptr = vec_full_reg_ptr(s, rm); if (genfn) { - genfn(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_rm_regno); + genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr); } else { TCGv_i32 tcg_opcode = tcg_const_i32(tcg_ctx, opcode); - gen_helper_crypto_sha1_3reg(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, - tcg_rn_regno, tcg_rm_regno, tcg_opcode); + gen_helper_crypto_sha1_3reg(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, + tcg_rm_ptr, tcg_opcode); tcg_temp_free_i32(tcg_ctx, tcg_opcode); } - tcg_temp_free_i32(tcg_ctx, tcg_rd_regno); - tcg_temp_free_i32(tcg_ctx, tcg_rn_regno); - tcg_temp_free_i32(tcg_ctx, tcg_rm_regno); + tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rm_ptr); } -/* C3.6.21 Crypto two-reg SHA +/* Crypto two-reg SHA * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 * +-----------------+------+-----------+--------+-----+------+------+ * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | @@ -10875,9 +13870,9 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) int opcode = extract32(insn, 12, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); - CryptoTwoOpEnvFn *genfn; - int feature; - TCGv_i32 tcg_rd_regno, tcg_rn_regno; + CryptoTwoOpFn *genfn; + bool feature; + TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; if (size != 0) { unallocated_encoding(s); @@ -10886,15 +13881,15 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) switch (opcode) { case 0: /* SHA1H */ - feature = ARM_FEATURE_V8_SHA1; + feature = dc_isar_feature(aa64_sha1, s); genfn = gen_helper_crypto_sha1h; break; case 1: /* SHA1SU1 */ - feature = ARM_FEATURE_V8_SHA1; + feature = dc_isar_feature(aa64_sha1, s); genfn = gen_helper_crypto_sha1su1; break; case 2: /* SHA256SU0 */ - feature = ARM_FEATURE_V8_SHA256; + feature = dc_isar_feature(aa64_sha256, s); genfn = gen_helper_crypto_sha256su0; break; default: @@ -10902,18 +13897,364 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } - tcg_rd_regno = tcg_const_i32(tcg_ctx, rd << 1); - tcg_rn_regno = tcg_const_i32(tcg_ctx, rn << 1); + if (!fp_access_check(s)) { + return; + } - genfn(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, tcg_rn_regno); + tcg_rd_ptr = vec_full_reg_ptr(s, rd); + tcg_rn_ptr = vec_full_reg_ptr(s, rn); - tcg_temp_free_i32(tcg_ctx, tcg_rd_regno); - tcg_temp_free_i32(tcg_ctx, tcg_rn_regno); + genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr); + + tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); +} + +/* Crypto three-reg SHA512 + * 31 21 20 16 15 14 13 12 11 10 9 5 4 0 + * +-----------------------+------+---+---+-----+--------+------+------+ + * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd | + * +-----------------------+------+---+---+-----+--------+------+------+ + */ +static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opcode = extract32(insn, 10, 2); + int o = extract32(insn, 14, 1); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + bool feature; + CryptoThreeOpFn *genfn; + + if (o == 0) { + switch (opcode) { + case 0: /* SHA512H */ + feature = dc_isar_feature(aa64_sha512, s); + genfn = gen_helper_crypto_sha512h; + break; + case 1: /* SHA512H2 */ + feature = dc_isar_feature(aa64_sha512, s); + genfn = gen_helper_crypto_sha512h2; + break; + case 2: /* SHA512SU1 */ + feature = dc_isar_feature(aa64_sha512, s); + genfn = gen_helper_crypto_sha512su1; + break; + case 3: /* RAX1 */ + feature = dc_isar_feature(aa64_sha3, s); + genfn = NULL; + break; + default: + g_assert_not_reached(); + } + } else { + switch (opcode) { + case 0: /* SM3PARTW1 */ + feature = dc_isar_feature(aa64_sm3, s); + genfn = gen_helper_crypto_sm3partw1; + break; + case 1: /* SM3PARTW2 */ + feature = dc_isar_feature(aa64_sm3, s); + genfn = gen_helper_crypto_sm3partw2; + break; + case 2: /* SM4EKEY */ + feature = dc_isar_feature(aa64_sm4, s); + genfn = gen_helper_crypto_sm4ekey; + break; + default: + unallocated_encoding(s); + return; + } + } + + if (!feature) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (genfn) { + TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; + + tcg_rd_ptr = vec_full_reg_ptr(s, rd); + tcg_rn_ptr = vec_full_reg_ptr(s, rn); + tcg_rm_ptr = vec_full_reg_ptr(s, rm); + + genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr); + + tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rm_ptr); + } else { + TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; + int pass; + + tcg_op1 = tcg_temp_new_i64(tcg_ctx); + tcg_op2 = tcg_temp_new_i64(tcg_ctx); + tcg_res[0] = tcg_temp_new_i64(tcg_ctx); + tcg_res[1] = tcg_temp_new_i64(tcg_ctx); + + for (pass = 0; pass < 2; pass++) { + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element(s, tcg_op2, rm, pass, MO_64); + + tcg_gen_rotli_i64(tcg_ctx, tcg_res[pass], tcg_op2, 1); + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); + } + write_vec_element(s, tcg_res[0], rd, 0, MO_64); + write_vec_element(s, tcg_res[1], rd, 1, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res[0]); + tcg_temp_free_i64(tcg_ctx, tcg_res[1]); + } +} + +/* Crypto two-reg SHA512 + * 31 12 11 10 9 5 4 0 + * +-----------------------------------------+--------+------+------+ + * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd | + * +-----------------------------------------+--------+------+------+ + */ +static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opcode = extract32(insn, 10, 2); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; + bool feature; + CryptoTwoOpFn *genfn; + + switch (opcode) { + case 0: /* SHA512SU0 */ + feature = dc_isar_feature(aa64_sha512, s); + genfn = gen_helper_crypto_sha512su0; + break; + case 1: /* SM4E */ + feature = dc_isar_feature(aa64_sm4, s); + genfn = gen_helper_crypto_sm4e; + break; + default: + unallocated_encoding(s); + return; + } + + if (!feature) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_rd_ptr = vec_full_reg_ptr(s, rd); + tcg_rn_ptr = vec_full_reg_ptr(s, rn); + + genfn(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr); + + tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); +} + +/* Crypto four-register + * 31 23 22 21 20 16 15 14 10 9 5 4 0 + * +-------------------+-----+------+---+------+------+------+ + * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd | + * +-------------------+-----+------+---+------+------+------+ + */ +static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op0 = extract32(insn, 21, 2); + int rm = extract32(insn, 16, 5); + int ra = extract32(insn, 10, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + bool feature; + + switch (op0) { + case 0: /* EOR3 */ + case 1: /* BCAX */ + feature = dc_isar_feature(aa64_sha3, s); + break; + case 2: /* SM3SS1 */ + feature = dc_isar_feature(aa64_sm3, s); + break; + default: + unallocated_encoding(s); + return; + } + + if (!feature) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (op0 < 2) { + TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2]; + int pass; + + tcg_op1 = tcg_temp_new_i64(tcg_ctx); + tcg_op2 = tcg_temp_new_i64(tcg_ctx); + tcg_op3 = tcg_temp_new_i64(tcg_ctx); + tcg_res[0] = tcg_temp_new_i64(tcg_ctx); + tcg_res[1] = tcg_temp_new_i64(tcg_ctx); + + for (pass = 0; pass < 2; pass++) { + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element(s, tcg_op2, rm, pass, MO_64); + read_vec_element(s, tcg_op3, ra, pass, MO_64); + + if (op0 == 0) { + /* EOR3 */ + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op2, tcg_op3); + } else { + /* BCAX */ + tcg_gen_andc_i64(tcg_ctx, tcg_res[pass], tcg_op2, tcg_op3); + } + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); + } + write_vec_element(s, tcg_res[0], rd, 0, MO_64); + write_vec_element(s, tcg_res[1], rd, 1, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_op3); + tcg_temp_free_i64(tcg_ctx, tcg_res[0]); + tcg_temp_free_i64(tcg_ctx, tcg_res[1]); + } else { + TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero; + + tcg_op1 = tcg_temp_new_i32(tcg_ctx); + tcg_op2 = tcg_temp_new_i32(tcg_ctx); + tcg_op3 = tcg_temp_new_i32(tcg_ctx); + tcg_res = tcg_temp_new_i32(tcg_ctx); + tcg_zero = tcg_const_i32(tcg_ctx, 0); + + read_vec_element_i32(s, tcg_op1, rn, 3, MO_32); + read_vec_element_i32(s, tcg_op2, rm, 3, MO_32); + read_vec_element_i32(s, tcg_op3, ra, 3, MO_32); + + tcg_gen_rotri_i32(tcg_ctx, tcg_res, tcg_op1, 20); + tcg_gen_add_i32(tcg_ctx, tcg_res, tcg_res, tcg_op2); + tcg_gen_add_i32(tcg_ctx, tcg_res, tcg_res, tcg_op3); + tcg_gen_rotri_i32(tcg_ctx, tcg_res, tcg_res, 25); + + write_vec_element_i32(s, tcg_zero, rd, 0, MO_32); + write_vec_element_i32(s, tcg_zero, rd, 1, MO_32); + write_vec_element_i32(s, tcg_zero, rd, 2, MO_32); + write_vec_element_i32(s, tcg_res, rd, 3, MO_32); + + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_i32(tcg_ctx, tcg_op3); + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_zero); + } +} + +/* Crypto XAR + * 31 21 20 16 15 10 9 5 4 0 + * +-----------------------+------+--------+------+------+ + * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd | + * +-----------------------+------+--------+------+------+ + */ +static void disas_crypto_xar(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rm = extract32(insn, 16, 5); + int imm6 = extract32(insn, 10, 6); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; + int pass; + + if (!dc_isar_feature(aa64_sha3, s)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_op1 = tcg_temp_new_i64(tcg_ctx); + tcg_op2 = tcg_temp_new_i64(tcg_ctx); + tcg_res[0] = tcg_temp_new_i64(tcg_ctx); + tcg_res[1] = tcg_temp_new_i64(tcg_ctx); + + for (pass = 0; pass < 2; pass++) { + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element(s, tcg_op2, rm, pass, MO_64); + + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + tcg_gen_rotri_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], imm6); + } + write_vec_element(s, tcg_res[0], rd, 0, MO_64); + write_vec_element(s, tcg_res[1], rd, 1, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res[0]); + tcg_temp_free_i64(tcg_ctx, tcg_res[1]); +} + +/* Crypto three-reg imm2 + * 31 21 20 16 15 14 13 12 11 10 9 5 4 0 + * +-----------------------+------+-----+------+--------+------+------+ + * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd | + * +-----------------------+------+-----+------+--------+------+------+ + */ +static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opcode = extract32(insn, 10, 2); + int imm2 = extract32(insn, 12, 2); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; + TCGv_i32 tcg_imm2, tcg_opcode; + + if (!dc_isar_feature(aa64_sm3, s)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_rd_ptr = vec_full_reg_ptr(s, rd); + tcg_rn_ptr = vec_full_reg_ptr(s, rn); + tcg_rm_ptr = vec_full_reg_ptr(s, rm); + tcg_imm2 = tcg_const_i32(tcg_ctx, imm2); + tcg_opcode = tcg_const_i32(tcg_ctx, opcode); + + gen_helper_crypto_sm3tt(tcg_ctx, tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2, + tcg_opcode); + + tcg_temp_free_ptr(tcg_ctx, tcg_rd_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rn_ptr); + tcg_temp_free_ptr(tcg_ctx, tcg_rm_ptr); + tcg_temp_free_i32(tcg_ctx, tcg_imm2); + tcg_temp_free_i32(tcg_ctx, tcg_opcode); } /* C3.6 Data processing - SIMD, inc Crypto @@ -10924,6 +14265,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) static const AArch64DecodeTable data_proc_simd[] = { /* pattern , mask , fn */ { 0x0e200400, 0x9f200400, disas_simd_three_reg_same }, + { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra }, { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff }, { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc }, { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes }, @@ -10936,6 +14278,7 @@ static const AArch64DecodeTable data_proc_simd[] = { { 0x0e000800, 0xbf208c00, disas_simd_zip_trn }, { 0x2e000000, 0xbf208400, disas_simd_ext }, { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same }, + { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra }, { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff }, { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc }, { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise }, @@ -10945,6 +14288,14 @@ static const AArch64DecodeTable data_proc_simd[] = { { 0x4e280800, 0xff3e0c00, disas_crypto_aes }, { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha }, { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha }, + { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 }, + { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 }, + { 0xce000000, 0xff808000, disas_crypto_four_reg }, + { 0xce800000, 0xffe00000, disas_crypto_xar }, + { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 }, + { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 }, + { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 }, + { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 }, { 0x00000000, 0x00000000, NULL } }; @@ -10973,36 +14324,158 @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) } } +/** + * is_guarded_page: + * @env: The cpu environment + * @s: The DisasContext + * + * Return true if the page is guarded. + */ +static bool is_guarded_page(CPUARMState *env, DisasContext *s) +{ + uint64_t addr = s->base.pc_first; + int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx); + unsigned int index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + + /* + * We test this immediately after reading an insn, which means + * that any normal page must be in the TLB. The only exception + * would be for executing from flash or device memory, which + * does not retain the TLB entry. + * + * FIXME: Assume false for those, for now. We could use + * arm_cpu_get_phys_page_attrs_debug to re-read the page + * table entry even for that case. + */ + return (tlb_hit(s->uc, entry->addr_code, addr) && + env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0); +} + +/** + * btype_destination_ok: + * @insn: The instruction at the branch destination + * @bt: SCTLR_ELx.BT + * @btype: PSTATE.BTYPE, and is non-zero + * + * On a guarded page, there are a limited number of insns + * that may be present at the branch target: + * - branch target identifiers, + * - paciasp, pacibsp, + * - BRK insn + * - HLT insn + * Anything else causes a Branch Target Exception. + * + * Return true if the branch is compatible, false to raise BTITRAP. + */ +static bool btype_destination_ok(uint32_t insn, bool bt, int btype) +{ + if ((insn & 0xfffff01fu) == 0xd503201fu) { + /* HINT space */ + switch (extract32(insn, 5, 7)) { + case 0x19: // 0b011001: /* PACIASP */ + case 0x1b: // 0b011011: /* PACIBSP */ + /* + * If SCTLR_ELx.BT, then PACI*SP are not compatible + * with btype == 3. Otherwise all btype are ok. + */ + return !bt || btype != 3; + case 0x20: // 0b100000: /* BTI */ + /* Not compatible with any btype. */ + return false; + case 0x22: // 0b100010: /* BTI c */ + /* Not compatible with btype == 3 */ + return btype != 3; + case 0x24: // 0b100100: /* BTI j */ + /* Not compatible with btype == 2 */ + return btype != 2; + case 0x26: // 0b100110: /* BTI jc */ + /* Compatible with any btype. */ + return true; + } + } else { + switch (insn & 0xffe0001fu) { + case 0xd4200000u: /* BRK */ + case 0xd4400000u: /* HLT */ + /* Give priority to the breakpoint exception. */ + return true; + } + } + return false; +} + /* C3.1 A64 instruction index by encoding */ static void disas_a64_insn(CPUARMState *env, DisasContext *s) { uint32_t insn; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - // Unicorn: end address tells us to stop emulation - if (s->pc == s->uc->addr_end) { - // imitate WFI instruction to halt emulation - s->is_jmp = DISAS_WFI; - return; - } - - insn = arm_ldl_code(env, s->pc, s->bswap_code); + s->pc_curr = s->base.pc_next; + insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); +#ifdef TARGET_WORDS_BIGENDIAN + /* aarch64eb swap again to little endian */ + insn = bswap32(insn); +#endif s->insn = insn; - s->pc += 4; + s->base.pc_next += 4; // Unicorn: trace this instruction on request - if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, s->pc - 4)) { - gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, env->uc, s->pc - 4); + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, s->pc_curr)) { + TCGContext *tcg_ctx = env->uc->tcg_ctx; + + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, env->uc, s->pc_curr); // the callback might want to stop emulation immediately check_exit_request(tcg_ctx); } s->fp_access_checked = false; + if (dc_isar_feature(aa64_bti, s)) { + if (s->base.num_insns == 1) { + /* + * At the first insn of the TB, compute s->guarded_page. + * We delayed computing this until successfully reading + * the first insn of the TB, above. This (mostly) ensures + * that the softmmu tlb entry has been populated, and the + * page table GP bit is available. + * + * Note that we need to compute this even if btype == 0, + * because this value is used for BR instructions later + * where ENV is not available. + */ + s->guarded_page = is_guarded_page(env, s); + + /* First insn can have btype set to non-zero. */ + tcg_debug_assert(s->btype >= 0); + + /* + * Note that the Branch Target Exception has fairly high + * priority -- below debugging exceptions but above most + * everything else. This allows us to handle this now + * instead of waiting until the insn is otherwise decoded. + */ + if (s->btype != 0 + && s->guarded_page + && !btype_destination_ok(insn, s->bt, s->btype)) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_btitrap(s->btype), + default_exception_el(s)); + return; + } + } else { + /* Not the first insn: btype must be 0. */ + tcg_debug_assert(s->btype == 0); + } + } + switch (extract32(insn, 25, 4)) { - case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */ + case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ unallocated_encoding(s); break; + case 0x2: + if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) { + unallocated_encoding(s); + } + break; case 0x8: case 0x9: /* Data processing - immediate */ disas_data_proc_imm(s, insn); break; @@ -11030,54 +14503,58 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) /* if we allocated any temporaries, free them here */ free_tmp_a64(s); + + /* + * After execution of most insns, btype is reset to 0. + * Note that we set btype == -1 when the insn sets btype. + */ + if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { + reset_btype(s); + } } -void gen_intermediate_code_internal_a64(ARMCPU *cpu, - TranslationBlock *tb, - bool search_pc) +static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, + CPUState *cpu) { - CPUState *cs = CPU(cpu); - CPUARMState *env = &cpu->env; - DisasContext dc1, *dc = &dc1; - CPUBreakpoint *bp; - uint16_t *gen_opc_end; - int j, lj; - target_ulong pc_start; - target_ulong next_page_start; - int num_insns; - int max_insns; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - bool block_full = false; + DisasContext *dc = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = cpu->uc; + CPUARMState *env = cpu->env_ptr; + ARMCPU *arm_cpu = env_archcpu(env); + uint32_t tb_flags = dc->base.tb->flags; + int bound, core_mmu_idx; - pc_start = tb->pc; - - dc->uc = env->uc; - dc->tb = tb; - - gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; - - dc->is_jmp = DISAS_NEXT; - dc->pc = pc_start; - dc->singlestep_enabled = cs->singlestep_enabled; + // unicorn handle + dc->uc = uc; + dc->isar = &arm_cpu->isar; dc->condjmp = 0; dc->aarch64 = 1; + /* If we are coming from secure EL0 in a system with a 32-bit EL3, then + * there is no secure EL1, so we route exceptions to EL3. + */ + dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && + !arm_el_is_aa64(env, 3); dc->thumb = 0; -#if defined(TARGET_WORDS_BIGENDIAN) - dc->bswap_code = 1; -#else - dc->bswap_code = 0; -#endif + dc->sctlr_b = 0; + dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; dc->condexec_mask = 0; dc->condexec_cond = 0; -#if !defined(CONFIG_USER_ONLY) - dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0); -#endif - dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags); + core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX); + dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); + dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII); + dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID); + dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); + dc->user = (dc->current_el == 0); + dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL); + dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL); + dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16; + dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE); + dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT); + dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE); + dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV); dc->vec_len = 0; dc->vec_stride = 0; - dc->cp_regs = cpu->cp_regs; - dc->current_el = arm_current_el(env); + dc->cp_regs = arm_cpu->cp_regs; dc->features = env->features; /* Single step state. The code-generation logic here is: @@ -11095,79 +14572,75 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, * emit code to generate a software step exception * end the TB */ - dc->ss_active = ARM_TBFLAG_AA64_SS_ACTIVE(tb->flags); - dc->pstate_ss = ARM_TBFLAG_AA64_PSTATE_SS(tb->flags); + dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); + dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); dc->is_ldex = false; - dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el); + dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); + + /* Bound the number of insns to execute to those left on the page. */ +#ifdef _MSC_VER + bound = (0 - (dc->base.pc_first | TARGET_PAGE_MASK)) / 4; +#else + bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; +#endif + + /* If architectural single step active, limit to 1. */ + if (dc->ss_active) { + bound = 1; + } + dc->base.max_insns = MIN(dc->base.max_insns, bound); init_tmp_a64_array(dc); +} - next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; - lj = -1; - num_insns = 0; - max_insns = tb->cflags & CF_COUNT_MASK; - if (max_insns == 0) { - max_insns = CF_COUNT_MASK; - } +static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) +{ +} - tcg_clear_temp_count(); +static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; - // Unicorn: early check to see if the address of this block is the until address - if (tb->pc == env->uc->addr_end) { - // imitate WFI instruction to halt emulation - gen_tb_start(tcg_ctx); - dc->is_jmp = DISAS_WFI; - goto tb_end; - } + tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, 0, 0); + dc->insn_start = tcg_last_op(tcg_ctx); +} - // Unicorn: trace this block on request - // Only hook this block if it is not broken from previous translation due to - // full translation cache - if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { - // save block address to see if we need to patch block size later - env->uc->block_addr = pc_start; - env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); +static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, + const CPUBreakpoint *bp) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + if (bp->flags & BP_CPU) { + gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); + gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env); + /* End the TB early; it likely won't be executed */ + dc->base.is_jmp = DISAS_TOO_MANY; } else { - env->uc->size_arg = -1; + gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG); + /* The address covered by the breakpoint must be + included in [tb->pc, tb->pc + tb->size) in order + to for it to be properly cleared -- thus we + increment the PC here so that the logic setting + tb->size below does the right thing. */ + dc->base.pc_next += 4; + dc->base.is_jmp = DISAS_NORETURN; } - gen_tb_start(tcg_ctx); + return true; +} - do { - if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == dc->pc) { - gen_exception_internal_insn(dc, 0, EXCP_DEBUG); - /* Advance PC so that clearing the breakpoint will - invalidate this TB. */ - dc->pc += 2; - goto done_generating; - } - } - } - - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - if (lj < j) { - lj++; - while (lj < j) { - tcg_ctx->gen_opc_instr_start[lj++] = 0; - } - } - tcg_ctx->gen_opc_pc[lj] = dc->pc; - tcg_ctx->gen_opc_instr_start[lj] = 1; - //tcg_ctx->gen_opc_icount[lj] = num_insns; - } - - //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { - // gen_io_start(); - //} - - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { - tcg_gen_debug_insn_start(tcg_ctx, dc->pc); - } +static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + CPUARMState *env = cpu->env_ptr; + // Unicorn: end address tells us to stop emulation + if (dcbase->pc_next == dc->uc->addr_end) { + // imitate WFI instruction to halt emulation + dcbase->is_jmp = DISAS_WFI; + } else { if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either @@ -11179,102 +14652,95 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, * "did not step an insn" case, and so the syndrome ISV and EX * bits should be zero. */ - assert(num_insns == 0); - gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0)); - dc->is_jmp = DISAS_EXC; - break; + assert(dc->base.num_insns == 1); + gen_swstep_exception(dc, 0, 0); + dc->base.is_jmp = DISAS_NORETURN; + } else { + disas_a64_insn(env, dc); } - disas_a64_insn(env, dc); - - if (tcg_check_temp_count()) { - fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n", - dc->pc); - } - - /* Translation stops when a conditional branch is encountered. - * Otherwise the subsequent code could get translated several times. - * Also stop translation when a page boundary is reached. This - * ensures prefetch aborts occur at the right place. - */ - num_insns++; - } while (!dc->is_jmp && tcg_ctx->gen_opc_ptr < gen_opc_end && - !cs->singlestep_enabled && - !dc->ss_active && - dc->pc < next_page_start && - num_insns < max_insns); - - /* if too long translation, save this info */ - if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) { - block_full = true; + translator_loop_temp_check(&dc->base); } +} - //if (tb->cflags & CF_LAST_IO) { - // gen_io_end(); - //} +static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; -tb_end: - if (unlikely(cs->singlestep_enabled || dc->ss_active) - && dc->is_jmp != DISAS_EXC) { + if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) { /* Note that this means single stepping WFI doesn't halt the CPU. * For conditional branch insns this is harmless unreachable code as * gen_goto_tb() has already handled emitting the debug exception * (and thus a tb-jump is not possible when singlestepping). */ - assert(dc->is_jmp != DISAS_TB_JUMP); - if (dc->is_jmp != DISAS_JUMP) { - gen_a64_set_pc_im(dc, dc->pc); - } - if (cs->singlestep_enabled) { - gen_exception_internal(dc, EXCP_DEBUG); - } else { - gen_step_complete_exception(dc); + switch (dc->base.is_jmp) { + default: + gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); + /* fall through */ + case DISAS_EXIT: + case DISAS_JUMP: + if (dc->base.singlestep_enabled) { + gen_exception_internal(tcg_ctx, EXCP_DEBUG); + } else { + gen_step_complete_exception(dc); + } + break; + case DISAS_NORETURN: + break; } } else { - switch (dc->is_jmp) { + switch (dc->base.is_jmp) { case DISAS_NEXT: - gen_goto_tb(dc, 1, dc->pc); + case DISAS_TOO_MANY: + gen_goto_tb(dc, 1, dc->base.pc_next); break; default: case DISAS_UPDATE: - gen_a64_set_pc_im(dc, dc->pc); + gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); /* fall through */ - case DISAS_JUMP: - /* indicate that the hash table must be used to find the next TB */ - tcg_gen_exit_tb(tcg_ctx, 0); + case DISAS_EXIT: + tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; - case DISAS_TB_JUMP: - case DISAS_EXC: + case DISAS_JUMP: + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + break; + case DISAS_NORETURN: case DISAS_SWI: break; case DISAS_WFE: - gen_a64_set_pc_im(dc, dc->pc); + gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); gen_helper_wfe(tcg_ctx, tcg_ctx->cpu_env); break; + case DISAS_YIELD: + gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); + gen_helper_yield(tcg_ctx, tcg_ctx->cpu_env); + break; case DISAS_WFI: + { /* This is a special case because we don't want to just halt the CPU * if trying to debug across a WFI. */ - gen_a64_set_pc_im(dc, dc->pc); - gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env); + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 4); + + gen_a64_set_pc_im(tcg_ctx, dc->base.pc_next); + gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + /* The helper doesn't necessarily throw an exception, but we + * must go back to the main loop to check for interrupts anyway. + */ + tcg_gen_exit_tb(tcg_ctx, NULL, 0); break; } - } - -done_generating: - gen_tb_end(tcg_ctx, tb, num_insns); - *tcg_ctx->gen_opc_ptr = INDEX_op_end; - - if (search_pc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - lj++; - while (lj <= j) { - tcg_ctx->gen_opc_instr_start[lj++] = 0; } - } else { - tb->size = dc->pc - pc_start; - tb->icount = num_insns; } - - env->uc->block_full = block_full; } + +const TranslatorOps aarch64_translator_ops = { + .init_disas_context = aarch64_tr_init_disas_context, + .tb_start = aarch64_tr_tb_start, + .insn_start = aarch64_tr_insn_start, + .breakpoint_check = aarch64_tr_breakpoint_check, + .translate_insn = aarch64_tr_translate_insn, + .tb_stop = aarch64_tr_tb_stop, +}; diff --git a/qemu/target/arm/translate-a64.h b/qemu/target/arm/translate-a64.h new file mode 100644 index 00000000..6092d1b0 --- /dev/null +++ b/qemu/target/arm/translate-a64.h @@ -0,0 +1,129 @@ +/* + * AArch64 translation, common definitions. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TARGET_ARM_TRANSLATE_A64_H +#define TARGET_ARM_TRANSLATE_A64_H + +void unallocated_encoding(DisasContext *s); +typedef struct TCGContext TCGContext; + +#define unsupported_encoding(s, insn) \ + do { \ + qemu_log_mask(LOG_UNIMP, \ + "%s:%d: unsupported instruction encoding 0x%08x " \ + "at pc=%016" PRIx64 "\n", \ + __FILE__, __LINE__, insn, s->pc_curr); \ + unallocated_encoding(s); \ + } while (0) + +TCGv_i64 new_tmp_a64(DisasContext *s); +TCGv_i64 new_tmp_a64_zero(DisasContext *s); +TCGv_i64 cpu_reg(DisasContext *s, int reg); +TCGv_i64 cpu_reg_sp(DisasContext *s, int reg); +TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf); +TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf); +void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v); +TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, bool); +bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, + unsigned int imms, unsigned int immr); +bool sve_access_check(DisasContext *s); + +/* We should have at some point before trying to access an FP register + * done the necessary access check, so assert that + * (a) we did the check and + * (b) we didn't then just plough ahead anyway if it failed. + * Print the instruction pattern in the abort message so we can figure + * out what we need to fix if a user encounters this problem in the wild. + */ +static inline void assert_fp_access_checked(DisasContext *s) +{ +#ifdef CONFIG_DEBUG_TCG + if (unlikely(!s->fp_access_checked || s->fp_excp_el)) { + fprintf(stderr, "target-arm: FP access check missing for " + "instruction 0x%08x\n", s->insn); + abort(); + } +#endif +} + +/* Return the offset into CPUARMState of an element of specified + * size, 'element' places in from the least significant end of + * the FP/vector register Qn. + */ +static inline int vec_reg_offset(DisasContext *s, int regno, + int element, MemOp size) +{ + int element_size = 1 << size; + int offs = element * element_size; +#ifdef HOST_WORDS_BIGENDIAN + /* This is complicated slightly because vfp.zregs[n].d[0] is + * still the lowest and vfp.zregs[n].d[15] the highest of the + * 256 byte vector, even on big endian systems. + * + * Calculate the offset assuming fully little-endian, + * then XOR to account for the order of the 8-byte units. + * + * For 16 byte elements, the two 8 byte halves will not form a + * host int128 if the host is bigendian, since they're in the + * wrong order. However the only 16 byte operation we have is + * a move, so we can ignore this for the moment. More complicated + * operations will have to special case loading and storing from + * the zregs array. + */ + if (element_size < 8) { + offs ^= 8 - element_size; + } +#endif + offs += offsetof(CPUARMState, vfp.zregs[regno]); + assert_fp_access_checked(s); + return offs; +} + +/* Return the offset info CPUARMState of the "whole" vector register Qn. */ +static inline int vec_full_reg_offset(DisasContext *s, int regno) +{ + assert_fp_access_checked(s); + return offsetof(CPUARMState, vfp.zregs[regno]); +} + +/* Return a newly allocated pointer to the vector register. */ +static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr ret = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, ret, tcg_ctx->cpu_env, vec_full_reg_offset(s, regno)); + return ret; +} + +/* Return the byte size of the "whole" vector register, VL / 8. */ +static inline int vec_full_reg_size(DisasContext *s) +{ + return s->sve_len; +} + +bool disas_sve(DisasContext *, uint32_t); + +/* Note that the gvec expanders operate on offsets + sizes. */ +typedef void GVecGen2Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, uint32_t); +typedef void GVecGen2iFn(TCGContext *, unsigned, uint32_t, uint32_t, int64_t, + uint32_t, uint32_t); +typedef void GVecGen3Fn(TCGContext *, unsigned, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t); +typedef void GVecGen4Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t); + +#endif /* TARGET_ARM_TRANSLATE_A64_H */ diff --git a/qemu/target/arm/translate-sve.c b/qemu/target/arm/translate-sve.c new file mode 100644 index 00000000..ff2f8ff3 --- /dev/null +++ b/qemu/target/arm/translate-sve.c @@ -0,0 +1,5588 @@ +/* + * AArch64 SVE translation + * + * Copyright (c) 2018 Linaro, Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "tcg/tcg-gvec-desc.h" +#include "qemu/log.h" +#include "arm_ldst.h" +#include "translate.h" +#include "internals.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" +#include "translate-a64.h" +#include "fpu/softfloat.h" + + +typedef void GVecGen2sFn(TCGContext *, unsigned, uint32_t, uint32_t, + TCGv_i64, uint32_t, uint32_t); + +typedef void gen_helper_gvec_flags_3(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_i32); +typedef void gen_helper_gvec_flags_4(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_ptr, TCGv_i32); + +typedef void gen_helper_gvec_mem(TCGContext *, TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32); +typedef void gen_helper_gvec_mem_scatter(TCGContext *, TCGv_env, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_i64, TCGv_i32); + +/* + * Helpers for extracting complex instruction fields. + */ + +/* See e.g. ASR (immediate, predicated). + * Returns -1 for unallocated encoding; diagnose later. + */ +static int tszimm_esz(DisasContext *s, int x) +{ + x >>= 3; /* discard imm3 */ + return 31 - clz32(x); +} + +static int tszimm_shr(DisasContext *s, int x) +{ + return (16 << tszimm_esz(s, x)) - x; +} + +/* See e.g. LSL (immediate, predicated). */ +static int tszimm_shl(DisasContext *s, int x) +{ + return x - (8 << tszimm_esz(s, x)); +} + +static inline int plus1(DisasContext *s, int x) +{ + return x + 1; +} + +/* The SH bit is in bit 8. Extract the low 8 and shift. */ +static inline int expand_imm_sh8s(DisasContext *s, int x) +{ + return (int8_t)x << (x & 0x100 ? 8 : 0); +} + +static inline int expand_imm_sh8u(DisasContext *s, int x) +{ + return (uint8_t)x << (x & 0x100 ? 8 : 0); +} + +/* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype) + * with unsigned data. C.f. SVE Memory Contiguous Load Group. + */ +static inline int msz_dtype(DisasContext *s, int msz) +{ + static const uint8_t dtype[4] = { 0, 5, 10, 15 }; + return dtype[msz]; +} + +/* + * Include the generated decoder. + */ + +#include "decode-sve.inc.c" + +/* + * Implement all of the translator functions referenced by the decoder. + */ + +/* Return the offset info CPUARMState of the predicate vector register Pn. + * Note for this purpose, FFR is P16. + */ +static inline int pred_full_reg_offset(DisasContext *s, int regno) +{ + return offsetof(CPUARMState, vfp.pregs[regno]); +} + +/* Return the byte size of the whole predicate register, VL / 64. */ +static inline int pred_full_reg_size(DisasContext *s) +{ + return s->sve_len >> 3; +} + +/* Round up the size of a register to a size allowed by + * the tcg vector infrastructure. Any operation which uses this + * size may assume that the bits above pred_full_reg_size are zero, + * and must leave them the same way. + * + * Note that this is not needed for the vector registers as they + * are always properly sized for tcg vectors. + */ +static int size_for_gvec(int size) +{ + if (size <= 8) { + return 8; + } else { + return QEMU_ALIGN_UP(size, 16); + } +} + +static int pred_gvec_reg_size(DisasContext *s) +{ + return size_for_gvec(pred_full_reg_size(s)); +} + +/* Invoke a vector expander on two Zregs. */ +static bool do_vector2_z(DisasContext *s, GVecGen2Fn *gvec_fn, + int esz, int rd, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + gvec_fn(tcg_ctx, esz, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), vsz, vsz); + } + return true; +} + +/* Invoke a vector expander on three Zregs. */ +static bool do_vector3_z(DisasContext *s, GVecGen3Fn *gvec_fn, + int esz, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + gvec_fn(tcg_ctx, esz, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), vsz, vsz); + } + return true; +} + +/* Invoke a vector move on two Zregs. */ +static bool do_mov_z(DisasContext *s, int rd, int rn) +{ + return do_vector2_z(s, tcg_gen_gvec_mov, 0, rd, rn); +} + +/* Initialize a Zreg with replications of a 64-bit immediate. */ +static void do_dupi_z(DisasContext *s, int rd, uint64_t word) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_dup64i(tcg_ctx, vec_full_reg_offset(s, rd), vsz, vsz, word); +} + +/* Invoke a vector expander on two Pregs. */ +static bool do_vector2_p(DisasContext *s, GVecGen2Fn *gvec_fn, + int esz, int rd, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned psz = pred_gvec_reg_size(s); + gvec_fn(tcg_ctx, esz, pred_full_reg_offset(s, rd), + pred_full_reg_offset(s, rn), psz, psz); + } + return true; +} + +/* Invoke a vector expander on three Pregs. */ +static bool do_vector3_p(DisasContext *s, GVecGen3Fn *gvec_fn, + int esz, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned psz = pred_gvec_reg_size(s); + gvec_fn(tcg_ctx, esz, pred_full_reg_offset(s, rd), + pred_full_reg_offset(s, rn), + pred_full_reg_offset(s, rm), psz, psz); + } + return true; +} + +/* Invoke a vector operation on four Pregs. */ +static bool do_vecop4_p(DisasContext *s, const GVecGen4 *gvec_op, + int rd, int rn, int rm, int rg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned psz = pred_gvec_reg_size(s); + tcg_gen_gvec_4(tcg_ctx, pred_full_reg_offset(s, rd), + pred_full_reg_offset(s, rn), + pred_full_reg_offset(s, rm), + pred_full_reg_offset(s, rg), + psz, psz, gvec_op); + } + return true; +} + +/* Invoke a vector move on two Pregs. */ +static bool do_mov_p(DisasContext *s, int rd, int rn) +{ + return do_vector2_p(s, tcg_gen_gvec_mov, 0, rd, rn); +} + +/* Set the cpu flags as per a return from an SVE helper. */ +static void do_pred_flags(TCGContext *tcg_ctx, TCGv_i32 t) +{ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, t); + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_ZF, t, 2); + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, t, 1); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); +} + +/* Subroutines computing the ARM PredTest psuedofunction. */ +static void do_predtest1(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 g) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + gen_helper_sve_predtest1(tcg_ctx, t, d, g); + do_pred_flags(tcg_ctx, t); + tcg_temp_free_i32(tcg_ctx, t); +} + +static void do_predtest(DisasContext *s, int dofs, int gofs, int words) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr dptr = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr gptr = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t; + + tcg_gen_addi_ptr(tcg_ctx, dptr, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, gptr, tcg_ctx->cpu_env, gofs); + t = tcg_const_i32(tcg_ctx, words); + + gen_helper_sve_predtest(tcg_ctx, t, dptr, gptr, t); + tcg_temp_free_ptr(tcg_ctx, dptr); + tcg_temp_free_ptr(tcg_ctx, gptr); + + do_pred_flags(tcg_ctx, t); + tcg_temp_free_i32(tcg_ctx, t); +} + +/* For each element size, the bits within a predicate word that are active. */ +const uint64_t pred_esz_masks[4] = { + 0xffffffffffffffffull, 0x5555555555555555ull, + 0x1111111111111111ull, 0x0101010101010101ull +}; + +/* + *** SVE Logical - Unpredicated Group + */ + +static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->rm); +} + +static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_or, 0, a->rd, a->rn, a->rm); +} + +static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_xor, 0, a->rd, a->rn, a->rm); +} + +static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm); +} + +/* + *** SVE Integer Arithmetic - Unpredicated Group + */ + +static bool trans_ADD_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_add, a->esz, a->rd, a->rn, a->rm); +} + +static bool trans_SUB_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_sub, a->esz, a->rd, a->rn, a->rm); +} + +static bool trans_SQADD_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_ssadd, a->esz, a->rd, a->rn, a->rm); +} + +static bool trans_SQSUB_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_sssub, a->esz, a->rd, a->rn, a->rm); +} + +static bool trans_UQADD_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_usadd, a->esz, a->rd, a->rn, a->rm); +} + +static bool trans_UQSUB_zzz(DisasContext *s, arg_rrr_esz *a) +{ + return do_vector3_z(s, tcg_gen_gvec_ussub, a->esz, a->rd, a->rn, a->rm); +} + +/* + *** SVE Integer Arithmetic - Binary Predicated Group + */ + +static bool do_zpzz_ool(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + pred_full_reg_offset(s, a->pg), + vsz, vsz, 0, fn); + } + return true; +} + +/* Select active elememnts from Zn and inactive elements from Zm, + * storing the result in Zd. + */ +static void do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_4 * const fns[4] = { + gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, + gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d + }; + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + pred_full_reg_offset(s, pg), + vsz, vsz, 0, fns[esz]); +} + +#define DO_ZPZZ(NAME, name) \ +static bool trans_##NAME##_zpzz(DisasContext *s, arg_rprr_esz *a) \ +{ \ + static gen_helper_gvec_4 * const fns[4] = { \ + gen_helper_sve_##name##_zpzz_b, gen_helper_sve_##name##_zpzz_h, \ + gen_helper_sve_##name##_zpzz_s, gen_helper_sve_##name##_zpzz_d, \ + }; \ + return do_zpzz_ool(s, a, fns[a->esz]); \ +} + +DO_ZPZZ(AND, and) +DO_ZPZZ(EOR, eor) +DO_ZPZZ(ORR, orr) +DO_ZPZZ(BIC, bic) + +DO_ZPZZ(ADD, add) +DO_ZPZZ(SUB, sub) + +DO_ZPZZ(SMAX, smax) +DO_ZPZZ(UMAX, umax) +DO_ZPZZ(SMIN, smin) +DO_ZPZZ(UMIN, umin) +DO_ZPZZ(SABD, sabd) +DO_ZPZZ(UABD, uabd) + +DO_ZPZZ(MUL, mul) +DO_ZPZZ(SMULH, smulh) +DO_ZPZZ(UMULH, umulh) + +DO_ZPZZ(ASR, asr) +DO_ZPZZ(LSR, lsr) +DO_ZPZZ(LSL, lsl) + +static bool trans_SDIV_zpzz(DisasContext *s, arg_rprr_esz *a) +{ + static gen_helper_gvec_4 * const fns[4] = { + NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d + }; + return do_zpzz_ool(s, a, fns[a->esz]); +} + +static bool trans_UDIV_zpzz(DisasContext *s, arg_rprr_esz *a) +{ + static gen_helper_gvec_4 * const fns[4] = { + NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d + }; + return do_zpzz_ool(s, a, fns[a->esz]); +} + +static bool trans_SEL_zpzz(DisasContext *s, arg_rprr_esz *a) +{ + if (sve_access_check(s)) { + do_sel_z(s, a->rd, a->rn, a->rm, a->pg, a->esz); + } + return true; +} + +#undef DO_ZPZZ + +/* + *** SVE Integer Arithmetic - Unary Predicated Group + */ + +static bool do_zpz_ool(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_3 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + pred_full_reg_offset(s, a->pg), + vsz, vsz, 0, fn); + } + return true; +} + +#define DO_ZPZ(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ +{ \ + static gen_helper_gvec_3 * const fns[4] = { \ + gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ + }; \ + return do_zpz_ool(s, a, fns[a->esz]); \ +} + +DO_ZPZ(CLS, cls) +DO_ZPZ(CLZ, clz) +DO_ZPZ(CNT_zpz, cnt_zpz) +DO_ZPZ(CNOT, cnot) +DO_ZPZ(NOT_zpz, not_zpz) +DO_ZPZ(ABS, abs) +DO_ZPZ(NEG, neg) + +static bool trans_FABS(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, + gen_helper_sve_fabs_h, + gen_helper_sve_fabs_s, + gen_helper_sve_fabs_d + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_FNEG(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, + gen_helper_sve_fneg_h, + gen_helper_sve_fneg_s, + gen_helper_sve_fneg_d + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_SXTB(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, + gen_helper_sve_sxtb_h, + gen_helper_sve_sxtb_s, + gen_helper_sve_sxtb_d + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_UXTB(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, + gen_helper_sve_uxtb_h, + gen_helper_sve_uxtb_s, + gen_helper_sve_uxtb_d + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_SXTH(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, NULL, + gen_helper_sve_sxth_s, + gen_helper_sve_sxth_d + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_UXTH(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, NULL, + gen_helper_sve_uxth_s, + gen_helper_sve_uxth_d + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_SXTW(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_sxtw_d : NULL); +} + +static bool trans_UXTW(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_uxtw_d : NULL); +} + +#undef DO_ZPZ + +/* + *** SVE Integer Reduction Group + */ + +typedef void gen_helper_gvec_reduc(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32); +static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, + gen_helper_gvec_reduc *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr t_zn, t_pg; + TCGv_i32 desc; + TCGv_i64 temp; + + if (fn == NULL) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + temp = tcg_temp_new_i64(tcg_ctx); + t_zn = tcg_temp_new_ptr(tcg_ctx); + t_pg = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + fn(tcg_ctx, temp, t_zn, t_pg, desc); + tcg_temp_free_ptr(tcg_ctx, t_zn); + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_i32(tcg_ctx, desc); + + write_fp_dreg(s, a->rd, temp); + tcg_temp_free_i64(tcg_ctx, temp); + return true; +} + +#define DO_VPZ(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ +{ \ + static gen_helper_gvec_reduc * const fns[4] = { \ + gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ + }; \ + return do_vpz_ool(s, a, fns[a->esz]); \ +} + +DO_VPZ(ORV, orv) +DO_VPZ(ANDV, andv) +DO_VPZ(EORV, eorv) + +DO_VPZ(UADDV, uaddv) +DO_VPZ(SMAXV, smaxv) +DO_VPZ(UMAXV, umaxv) +DO_VPZ(SMINV, sminv) +DO_VPZ(UMINV, uminv) + +static bool trans_SADDV(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_reduc * const fns[4] = { + gen_helper_sve_saddv_b, gen_helper_sve_saddv_h, + gen_helper_sve_saddv_s, NULL + }; + return do_vpz_ool(s, a, fns[a->esz]); +} + +#undef DO_VPZ + +/* + *** SVE Shift by Immediate - Predicated Group + */ + +/* Store zero into every active element of Zd. We will use this for two + * and three-operand predicated instructions for which logic dictates a + * zero result. + */ +static bool do_clr_zp(DisasContext *s, int rd, int pg, int esz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_2 * const fns[4] = { + gen_helper_sve_clr_b, gen_helper_sve_clr_h, + gen_helper_sve_clr_s, gen_helper_sve_clr_d, + }; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, rd), + pred_full_reg_offset(s, pg), + vsz, vsz, 0, fns[esz]); + } + return true; +} + +/* Copy Zn into Zd, storing zeros into inactive elements. */ +static void do_movz_zpz(DisasContext *s, int rd, int rn, int pg, int esz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_movz_b, gen_helper_sve_movz_h, + gen_helper_sve_movz_s, gen_helper_sve_movz_d, + }; + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + pred_full_reg_offset(s, pg), + vsz, vsz, 0, fns[esz]); +} + +static bool do_zpzi_ool(DisasContext *s, arg_rpri_esz *a, + gen_helper_gvec_3 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + pred_full_reg_offset(s, a->pg), + vsz, vsz, a->imm, fn); + } + return true; +} + +static bool trans_ASR_zpzi(DisasContext *s, arg_rpri_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h, + gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d, + }; + if (a->esz < 0) { + /* Invalid tsz encoding -- see tszimm_esz. */ + return false; + } + /* Shift by element size is architecturally valid. For + arithmetic right-shift, it's the same as by one less. */ + a->imm = MIN(a->imm, (8 << a->esz) - 1); + return do_zpzi_ool(s, a, fns[a->esz]); +} + +static bool trans_LSR_zpzi(DisasContext *s, arg_rpri_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h, + gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d, + }; + if (a->esz < 0) { + return false; + } + /* Shift by element size is architecturally valid. + For logical shifts, it is a zeroing operation. */ + if (a->imm >= (8 << a->esz)) { + return do_clr_zp(s, a->rd, a->pg, a->esz); + } else { + return do_zpzi_ool(s, a, fns[a->esz]); + } +} + +static bool trans_LSL_zpzi(DisasContext *s, arg_rpri_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h, + gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d, + }; + if (a->esz < 0) { + return false; + } + /* Shift by element size is architecturally valid. + For logical shifts, it is a zeroing operation. */ + if (a->imm >= (8 << a->esz)) { + return do_clr_zp(s, a->rd, a->pg, a->esz); + } else { + return do_zpzi_ool(s, a, fns[a->esz]); + } +} + +static bool trans_ASRD(DisasContext *s, arg_rpri_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_asrd_b, gen_helper_sve_asrd_h, + gen_helper_sve_asrd_s, gen_helper_sve_asrd_d, + }; + if (a->esz < 0) { + return false; + } + /* Shift by element size is architecturally valid. For arithmetic + right shift for division, it is a zeroing operation. */ + if (a->imm >= (8 << a->esz)) { + return do_clr_zp(s, a->rd, a->pg, a->esz); + } else { + return do_zpzi_ool(s, a, fns[a->esz]); + } +} + +/* + *** SVE Bitwise Shift - Predicated Group + */ + +#define DO_ZPZW(NAME, name) \ +static bool trans_##NAME##_zpzw(DisasContext *s, arg_rprr_esz *a) \ +{ \ + static gen_helper_gvec_4 * const fns[3] = { \ + gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \ + gen_helper_sve_##name##_zpzw_s, \ + }; \ + if (a->esz < 0 || a->esz >= 3) { \ + return false; \ + } \ + return do_zpzz_ool(s, a, fns[a->esz]); \ +} + +DO_ZPZW(ASR, asr) +DO_ZPZW(LSR, lsr) +DO_ZPZW(LSL, lsl) + +#undef DO_ZPZW + +/* + *** SVE Bitwise Shift - Unpredicated Group + */ + +static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr, + void (*gvec_fn)(TCGContext *, unsigned, uint32_t, uint32_t, + int64_t, uint32_t, uint32_t)) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz < 0) { + /* Invalid tsz encoding -- see tszimm_esz. */ + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + /* Shift by element size is architecturally valid. For + arithmetic right-shift, it's the same as by one less. + Otherwise it is a zeroing operation. */ + if (a->imm >= 8 << a->esz) { + if (asr) { + a->imm = (8 << a->esz) - 1; + } else { + do_dupi_z(s, a->rd, 0); + return true; + } + } + gvec_fn(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); + } + return true; +} + +static bool trans_ASR_zzi(DisasContext *s, arg_rri_esz *a) +{ + return do_shift_imm(s, a, true, tcg_gen_gvec_sari); +} + +static bool trans_LSR_zzi(DisasContext *s, arg_rri_esz *a) +{ + return do_shift_imm(s, a, false, tcg_gen_gvec_shri); +} + +static bool trans_LSL_zzi(DisasContext *s, arg_rri_esz *a) +{ + return do_shift_imm(s, a, false, tcg_gen_gvec_shli); +} + +static bool do_zzw_ool(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, 0, fn); + } + return true; +} + +#define DO_ZZW(NAME, name) \ +static bool trans_##NAME##_zzw(DisasContext *s, arg_rrr_esz *a) \ +{ \ + static gen_helper_gvec_3 * const fns[4] = { \ + gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \ + gen_helper_sve_##name##_zzw_s, NULL \ + }; \ + return do_zzw_ool(s, a, fns[a->esz]); \ +} + +DO_ZZW(ASR, asr) +DO_ZZW(LSR, lsr) +DO_ZZW(LSL, lsl) + +#undef DO_ZZW + +/* + *** SVE Integer Multiply-Add Group + */ + +static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a, + gen_helper_gvec_5 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_5_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->ra), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + pred_full_reg_offset(s, a->pg), + vsz, vsz, 0, fn); + } + return true; +} + +#define DO_ZPZZZ(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \ +{ \ + static gen_helper_gvec_5 * const fns[4] = { \ + gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ + }; \ + return do_zpzzz_ool(s, a, fns[a->esz]); \ +} + +DO_ZPZZZ(MLA, mla) +DO_ZPZZZ(MLS, mls) + +#undef DO_ZPZZZ + +/* + *** SVE Index Generation Group + */ + +static void do_index(DisasContext *s, int esz, int rd, + TCGv_i64 start, TCGv_i64 incr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + TCGv_ptr t_zd = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, rd)); + if (esz == 3) { + gen_helper_sve_index_d(tcg_ctx, t_zd, start, incr, desc); + } else { + typedef void index_fn(TCGContext *, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); + static index_fn * const fns[3] = { + gen_helper_sve_index_b, + gen_helper_sve_index_h, + gen_helper_sve_index_s, + }; + TCGv_i32 s32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 i32 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_extrl_i64_i32(tcg_ctx, s32, start); + tcg_gen_extrl_i64_i32(tcg_ctx, i32, incr); + fns[esz](tcg_ctx, t_zd, s32, i32, desc); + + tcg_temp_free_i32(tcg_ctx, s32); + tcg_temp_free_i32(tcg_ctx, i32); + } + tcg_temp_free_ptr(tcg_ctx, t_zd); + tcg_temp_free_i32(tcg_ctx, desc); +} + +static bool trans_INDEX_ii(DisasContext *s, arg_INDEX_ii *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 start = tcg_const_i64(tcg_ctx, a->imm1); + TCGv_i64 incr = tcg_const_i64(tcg_ctx, a->imm2); + do_index(s, a->esz, a->rd, start, incr); + tcg_temp_free_i64(tcg_ctx, start); + tcg_temp_free_i64(tcg_ctx, incr); + } + return true; +} + +static bool trans_INDEX_ir(DisasContext *s, arg_INDEX_ir *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 start = tcg_const_i64(tcg_ctx, a->imm); + TCGv_i64 incr = cpu_reg(s, a->rm); + do_index(s, a->esz, a->rd, start, incr); + tcg_temp_free_i64(tcg_ctx, start); + } + return true; +} + +static bool trans_INDEX_ri(DisasContext *s, arg_INDEX_ri *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 start = cpu_reg(s, a->rn); + TCGv_i64 incr = tcg_const_i64(tcg_ctx, a->imm); + do_index(s, a->esz, a->rd, start, incr); + tcg_temp_free_i64(tcg_ctx, incr); + } + return true; +} + +static bool trans_INDEX_rr(DisasContext *s, arg_INDEX_rr *a) +{ + if (sve_access_check(s)) { + TCGv_i64 start = cpu_reg(s, a->rn); + TCGv_i64 incr = cpu_reg(s, a->rm); + do_index(s, a->esz, a->rd, start, incr); + } + return true; +} + +/* + *** SVE Stack Allocation Group + */ + +static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 rd = cpu_reg_sp(s, a->rd); + TCGv_i64 rn = cpu_reg_sp(s, a->rn); + tcg_gen_addi_i64(tcg_ctx, rd, rn, a->imm * vec_full_reg_size(s)); + } + return true; +} + +static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 rd = cpu_reg_sp(s, a->rd); + TCGv_i64 rn = cpu_reg_sp(s, a->rn); + tcg_gen_addi_i64(tcg_ctx, rd, rn, a->imm * pred_full_reg_size(s)); + } + return true; +} + +static bool trans_RDVL(DisasContext *s, arg_RDVL *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 reg = cpu_reg(s, a->rd); + tcg_gen_movi_i64(tcg_ctx, reg, a->imm * vec_full_reg_size(s)); + } + return true; +} + +/* + *** SVE Compute Vector Address Group + */ + +static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, a->imm, fn); + } + return true; +} + +static bool trans_ADR_p32(DisasContext *s, arg_rrri *a) +{ + return do_adr(s, a, gen_helper_sve_adr_p32); +} + +static bool trans_ADR_p64(DisasContext *s, arg_rrri *a) +{ + return do_adr(s, a, gen_helper_sve_adr_p64); +} + +static bool trans_ADR_s32(DisasContext *s, arg_rrri *a) +{ + return do_adr(s, a, gen_helper_sve_adr_s32); +} + +static bool trans_ADR_u32(DisasContext *s, arg_rrri *a) +{ + return do_adr(s, a, gen_helper_sve_adr_u32); +} + +/* + *** SVE Integer Misc - Unpredicated Group + */ + +static bool trans_FEXPA(DisasContext *s, arg_rr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_2 * const fns[4] = { + NULL, + gen_helper_sve_fexpa_h, + gen_helper_sve_fexpa_s, + gen_helper_sve_fexpa_d, + }; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vsz, vsz, 0, fns[a->esz]); + } + return true; +} + +static bool trans_FTSSEL(DisasContext *s, arg_rrr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3 * const fns[4] = { + NULL, + gen_helper_sve_ftssel_h, + gen_helper_sve_ftssel_s, + gen_helper_sve_ftssel_d, + }; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, 0, fns[a->esz]); + } + return true; +} + +/* + *** SVE Predicate Logical Operations Group + */ + +static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a, + const GVecGen4 *gvec_op) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned psz = pred_gvec_reg_size(s); + int dofs = pred_full_reg_offset(s, a->rd); + int nofs = pred_full_reg_offset(s, a->rn); + int mofs = pred_full_reg_offset(s, a->rm); + int gofs = pred_full_reg_offset(s, a->pg); + + if (psz == 8) { + /* Do the operation and the flags generation in temps. */ + TCGv_i64 pd = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 pn = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 pm = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 pg = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_i64(tcg_ctx, pn, tcg_ctx->cpu_env, nofs); + tcg_gen_ld_i64(tcg_ctx, pm, tcg_ctx->cpu_env, mofs); + tcg_gen_ld_i64(tcg_ctx, pg, tcg_ctx->cpu_env, gofs); + + gvec_op->fni8(tcg_ctx, pd, pn, pm, pg); + tcg_gen_st_i64(tcg_ctx, pd, tcg_ctx->cpu_env, dofs); + + do_predtest1(tcg_ctx, pd, pg); + + tcg_temp_free_i64(tcg_ctx, pd); + tcg_temp_free_i64(tcg_ctx, pn); + tcg_temp_free_i64(tcg_ctx, pm); + tcg_temp_free_i64(tcg_ctx, pg); + } else { + /* The operation and flags generation is large. The computation + * of the flags depends on the original contents of the guarding + * predicate. If the destination overwrites the guarding predicate, + * then the easiest way to get this right is to save a copy. + */ + int tofs = gofs; + if (a->rd == a->pg) { + tofs = offsetof(CPUARMState, vfp.preg_tmp); + tcg_gen_gvec_mov(tcg_ctx, 0, tofs, gofs, psz, psz); + } + + tcg_gen_gvec_4(tcg_ctx, dofs, nofs, mofs, gofs, psz, psz, gvec_op); + do_predtest(s, dofs, tofs, psz / 8); + } + return true; +} + +static void gen_and_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_and_i64(tcg_ctx, pd, pn, pm); + tcg_gen_and_i64(tcg_ctx, pd, pd, pg); +} + +static void gen_and_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_and_vec(tcg_ctx, vece, pd, pn, pm); + tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); +} + +static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_and_pg_i64, + .fniv = gen_and_pg_vec, + .fno = gen_helper_sve_and_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return do_pppp_flags(s, a, &op); + } else if (a->rn == a->rm) { + if (a->pg == a->rn) { + return do_mov_p(s, a->rd, a->rn); + } else { + return do_vector3_p(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->pg); + } + } else if (a->pg == a->rn || a->pg == a->rm) { + return do_vector3_p(s, tcg_gen_gvec_and, 0, a->rd, a->rn, a->rm); + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +static void gen_bic_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_andc_i64(tcg_ctx, pd, pn, pm); + tcg_gen_and_i64(tcg_ctx, pd, pd, pg); +} + +static void gen_bic_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_andc_vec(tcg_ctx, vece, pd, pn, pm); + tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); +} + +static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_bic_pg_i64, + .fniv = gen_bic_pg_vec, + .fno = gen_helper_sve_bic_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return do_pppp_flags(s, a, &op); + } else if (a->pg == a->rn) { + return do_vector3_p(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm); + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +static void gen_eor_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_xor_i64(tcg_ctx, pd, pn, pm); + tcg_gen_and_i64(tcg_ctx, pd, pd, pg); +} + +static void gen_eor_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_xor_vec(tcg_ctx, vece, pd, pn, pm); + tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); +} + +static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_eor_pg_i64, + .fniv = gen_eor_pg_vec, + .fno = gen_helper_sve_eor_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return do_pppp_flags(s, a, &op); + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +static void gen_sel_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_and_i64(tcg_ctx, pn, pn, pg); + tcg_gen_andc_i64(tcg_ctx, pm, pm, pg); + tcg_gen_or_i64(tcg_ctx, pd, pn, pm); +} + +static void gen_sel_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_and_vec(tcg_ctx, vece, pn, pn, pg); + tcg_gen_andc_vec(tcg_ctx, vece, pm, pm, pg); + tcg_gen_or_vec(tcg_ctx, vece, pd, pn, pm); +} + +static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_sel_pg_i64, + .fniv = gen_sel_pg_vec, + .fno = gen_helper_sve_sel_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return false; + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +static void gen_orr_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_or_i64(tcg_ctx, pd, pn, pm); + tcg_gen_and_i64(tcg_ctx, pd, pd, pg); +} + +static void gen_orr_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_or_vec(tcg_ctx, vece, pd, pn, pm); + tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); +} + +static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_orr_pg_i64, + .fniv = gen_orr_pg_vec, + .fno = gen_helper_sve_orr_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return do_pppp_flags(s, a, &op); + } else if (a->pg == a->rn && a->rn == a->rm) { + return do_mov_p(s, a->rd, a->rn); + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +static void gen_orn_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_orc_i64(tcg_ctx, pd, pn, pm); + tcg_gen_and_i64(tcg_ctx, pd, pd, pg); +} + +static void gen_orn_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_orc_vec(tcg_ctx, vece, pd, pn, pm); + tcg_gen_and_vec(tcg_ctx, vece, pd, pd, pg); +} + +static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_orn_pg_i64, + .fniv = gen_orn_pg_vec, + .fno = gen_helper_sve_orn_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return do_pppp_flags(s, a, &op); + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +static void gen_nor_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_or_i64(tcg_ctx, pd, pn, pm); + tcg_gen_andc_i64(tcg_ctx, pd, pg, pd); +} + +static void gen_nor_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_or_vec(tcg_ctx, vece, pd, pn, pm); + tcg_gen_andc_vec(tcg_ctx, vece, pd, pg, pd); +} + +static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_nor_pg_i64, + .fniv = gen_nor_pg_vec, + .fno = gen_helper_sve_nor_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return do_pppp_flags(s, a, &op); + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +static void gen_nand_pg_i64(TCGContext *tcg_ctx, TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) +{ + tcg_gen_and_i64(tcg_ctx, pd, pn, pm); + tcg_gen_andc_i64(tcg_ctx, pd, pg, pd); +} + +static void gen_nand_pg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec pd, TCGv_vec pn, + TCGv_vec pm, TCGv_vec pg) +{ + tcg_gen_and_vec(tcg_ctx, vece, pd, pn, pm); + tcg_gen_andc_vec(tcg_ctx, vece, pd, pg, pd); +} + +static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a) +{ + static const GVecGen4 op = { + .fni8 = gen_nand_pg_i64, + .fniv = gen_nand_pg_vec, + .fno = gen_helper_sve_nand_pppp, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (a->s) { + return do_pppp_flags(s, a, &op); + } else { + return do_vecop4_p(s, &op, a->rd, a->rn, a->rm, a->pg); + } +} + +/* + *** SVE Predicate Misc Group + */ + +static bool trans_PTEST(DisasContext *s, arg_PTEST *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + int nofs = pred_full_reg_offset(s, a->rn); + int gofs = pred_full_reg_offset(s, a->pg); + int words = DIV_ROUND_UP(pred_full_reg_size(s), 8); + + if (words == 1) { + TCGv_i64 pn = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 pg = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_i64(tcg_ctx, pn, tcg_ctx->cpu_env, nofs); + tcg_gen_ld_i64(tcg_ctx, pg, tcg_ctx->cpu_env, gofs); + do_predtest1(tcg_ctx, pn, pg); + + tcg_temp_free_i64(tcg_ctx, pn); + tcg_temp_free_i64(tcg_ctx, pg); + } else { + do_predtest(s, nofs, gofs, words); + } + } + return true; +} + +/* See the ARM pseudocode DecodePredCount. */ +static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz) +{ + unsigned elements = fullsz >> esz; + unsigned bound; + + switch (pattern) { + case 0x0: /* POW2 */ + return pow2floor(elements); + case 0x1: /* VL1 */ + case 0x2: /* VL2 */ + case 0x3: /* VL3 */ + case 0x4: /* VL4 */ + case 0x5: /* VL5 */ + case 0x6: /* VL6 */ + case 0x7: /* VL7 */ + case 0x8: /* VL8 */ + bound = pattern; + break; + case 0x9: /* VL16 */ + case 0xa: /* VL32 */ + case 0xb: /* VL64 */ + case 0xc: /* VL128 */ + case 0xd: /* VL256 */ + bound = 16 << (pattern - 9); + break; + case 0x1d: /* MUL4 */ + return elements - elements % 4; + case 0x1e: /* MUL3 */ + return elements - elements % 3; + case 0x1f: /* ALL */ + return elements; + default: /* #uimm5 */ + return 0; + } + return elements >= bound ? bound : 0; +} + +/* This handles all of the predicate initialization instructions, + * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32 + * so that decode_pred_count returns 0. For SETFFR, we will have + * set RD == 16 == FFR. + */ +static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned fullsz = vec_full_reg_size(s); + unsigned ofs = pred_full_reg_offset(s, rd); + unsigned numelem, setsz, i; + uint64_t word, lastword; + TCGv_i64 t; + + numelem = decode_pred_count(fullsz, pat, esz); + + /* Determine what we must store into each bit, and how many. */ + if (numelem == 0) { + lastword = word = 0; + setsz = fullsz; + } else { + setsz = numelem << esz; + lastword = word = pred_esz_masks[esz]; + if (setsz % 64) { + lastword &= MAKE_64BIT_MASK(0, setsz % 64); + } + } + + t = tcg_temp_new_i64(tcg_ctx); + if (fullsz <= 64) { + tcg_gen_movi_i64(tcg_ctx, t, lastword); + tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs); + goto done; + } + + if (word == lastword) { + unsigned maxsz = size_for_gvec(fullsz / 8); + unsigned oprsz = size_for_gvec(setsz / 8); + + if (oprsz * 8 == setsz) { + tcg_gen_gvec_dup64i(tcg_ctx, ofs, oprsz, maxsz, word); + goto done; + } + } + + setsz /= 8; + fullsz /= 8; + + tcg_gen_movi_i64(tcg_ctx, t, word); + for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) { + tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs + i); + } + if (lastword != word) { + tcg_gen_movi_i64(tcg_ctx, t, lastword); + tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs + i); + i += 8; + } + if (i < fullsz) { + tcg_gen_movi_i64(tcg_ctx, t, 0); + for (; i < fullsz; i += 8) { + tcg_gen_st_i64(tcg_ctx, t, tcg_ctx->cpu_env, ofs + i); + } + } + + done: + tcg_temp_free_i64(tcg_ctx, t); + + /* PTRUES */ + if (setflag) { + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_NF, -(word != 0)); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, word == 0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + } + return true; +} + +static bool trans_PTRUE(DisasContext *s, arg_PTRUE *a) +{ + return do_predset(s, a->esz, a->rd, a->pat, a->s); +} + +static bool trans_SETFFR(DisasContext *s, arg_SETFFR *a) +{ + /* Note pat == 31 is #all, to set all elements. */ + return do_predset(s, 0, FFR_PRED_NUM, 31, false); +} + +static bool trans_PFALSE(DisasContext *s, arg_PFALSE *a) +{ + /* Note pat == 32 is #unimp, to set no elements. */ + return do_predset(s, 0, a->rd, 32, false); +} + +static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) +{ + /* The path through do_pppp_flags is complicated enough to want to avoid + * duplication. Frob the arguments into the form of a predicated AND. + */ + arg_rprr_s alt_a = { + .rd = a->rd, .pg = a->pg, .s = a->s, + .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, + }; + return trans_AND_pppp(s, &alt_a); +} + +static bool trans_RDFFR(DisasContext *s, arg_RDFFR *a) +{ + return do_mov_p(s, a->rd, FFR_PRED_NUM); +} + +static bool trans_WRFFR(DisasContext *s, arg_WRFFR *a) +{ + return do_mov_p(s, FFR_PRED_NUM, a->rn); +} + +static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, + void (*gen_fn)(TCGContext *, TCGv_i32, TCGv_ptr, + TCGv_ptr, TCGv_i32)) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + TCGv_ptr t_pd = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t; + unsigned desc; + + desc = DIV_ROUND_UP(pred_full_reg_size(s), 8); + desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); + + tcg_gen_addi_ptr(tcg_ctx, t_pd, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); + t = tcg_const_i32(tcg_ctx, desc); + + gen_fn(tcg_ctx, t, t_pd, t_pg, t); + tcg_temp_free_ptr(tcg_ctx, t_pd); + tcg_temp_free_ptr(tcg_ctx, t_pg); + + do_pred_flags(tcg_ctx, t); + tcg_temp_free_i32(tcg_ctx, t); + return true; +} + +static bool trans_PFIRST(DisasContext *s, arg_rr_esz *a) +{ + return do_pfirst_pnext(s, a, gen_helper_sve_pfirst); +} + +static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a) +{ + return do_pfirst_pnext(s, a, gen_helper_sve_pnext); +} + +/* + *** SVE Element Count Group + */ + +/* Perform an inline saturating addition of a 32-bit value within + * a 64-bit register. The second operand is known to be positive, + * which halves the comparisions we must perform to bound the result. + */ +static void do_sat_addsub_32(TCGContext *tcg_ctx, TCGv_i64 reg, TCGv_i64 val, bool u, bool d) +{ + int64_t ibound; + TCGv_i64 bound; + TCGCond cond; + + /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ + if (u) { + tcg_gen_ext32u_i64(tcg_ctx, reg, reg); + } else { + tcg_gen_ext32s_i64(tcg_ctx, reg, reg); + } + if (d) { + tcg_gen_sub_i64(tcg_ctx, reg, reg, val); + ibound = (u ? 0 : INT32_MIN); + cond = TCG_COND_LT; + } else { + tcg_gen_add_i64(tcg_ctx, reg, reg, val); + ibound = (u ? UINT32_MAX : INT32_MAX); + cond = TCG_COND_GT; + } + bound = tcg_const_i64(tcg_ctx, ibound); + tcg_gen_movcond_i64(tcg_ctx, cond, reg, reg, bound, bound, reg); + tcg_temp_free_i64(tcg_ctx, bound); +} + +/* Similarly with 64-bit values. */ +static void do_sat_addsub_64(TCGContext *tcg_ctx, TCGv_i64 reg, TCGv_i64 val, bool u, bool d) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2; + + if (u) { + if (d) { + tcg_gen_sub_i64(tcg_ctx, t0, reg, val); + tcg_gen_movi_i64(tcg_ctx, t1, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, reg, reg, val, t1, t0); + } else { + tcg_gen_add_i64(tcg_ctx, t0, reg, val); + tcg_gen_movi_i64(tcg_ctx, t1, -1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, reg, t0, reg, t1, t0); + } + } else { + if (d) { + /* Detect signed overflow for subtraction. */ + tcg_gen_xor_i64(tcg_ctx, t0, reg, val); + tcg_gen_sub_i64(tcg_ctx, t1, reg, val); + tcg_gen_xor_i64(tcg_ctx, reg, reg, t1); + tcg_gen_and_i64(tcg_ctx, t0, t0, reg); + + /* Bound the result. */ + tcg_gen_movi_i64(tcg_ctx, reg, INT64_MIN); + t2 = tcg_const_i64(tcg_ctx, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, reg, t0, t2, reg, t1); + } else { + /* Detect signed overflow for addition. */ + tcg_gen_xor_i64(tcg_ctx, t0, reg, val); + tcg_gen_add_i64(tcg_ctx, reg, reg, val); + tcg_gen_xor_i64(tcg_ctx, t1, reg, val); + tcg_gen_andc_i64(tcg_ctx, t0, t1, t0); + + /* Bound the result. */ + tcg_gen_movi_i64(tcg_ctx, t1, INT64_MAX); + t2 = tcg_const_i64(tcg_ctx, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, reg, t0, t2, t1, reg); + } + tcg_temp_free_i64(tcg_ctx, t2); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* Similarly with a vector and a scalar operand. */ +static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, + TCGv_i64 val, bool u, bool d) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr dptr, nptr; + TCGv_i32 t32, desc; + TCGv_i64 t64; + + dptr = tcg_temp_new_ptr(tcg_ctx); + nptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, dptr, tcg_ctx->cpu_env, vec_full_reg_offset(s, rd)); + tcg_gen_addi_ptr(tcg_ctx, nptr, tcg_ctx->cpu_env, vec_full_reg_offset(s, rn)); + desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + + switch (esz) { + case MO_8: + t32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t32, val); + if (d) { + tcg_gen_neg_i32(tcg_ctx, t32, t32); + } + if (u) { + gen_helper_sve_uqaddi_b(tcg_ctx, dptr, nptr, t32, desc); + } else { + gen_helper_sve_sqaddi_b(tcg_ctx, dptr, nptr, t32, desc); + } + tcg_temp_free_i32(tcg_ctx, t32); + break; + + case MO_16: + t32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t32, val); + if (d) { + tcg_gen_neg_i32(tcg_ctx, t32, t32); + } + if (u) { + gen_helper_sve_uqaddi_h(tcg_ctx, dptr, nptr, t32, desc); + } else { + gen_helper_sve_sqaddi_h(tcg_ctx, dptr, nptr, t32, desc); + } + tcg_temp_free_i32(tcg_ctx, t32); + break; + + case MO_32: + t64 = tcg_temp_new_i64(tcg_ctx); + if (d) { + tcg_gen_neg_i64(tcg_ctx, t64, val); + } else { + tcg_gen_mov_i64(tcg_ctx, t64, val); + } + if (u) { + gen_helper_sve_uqaddi_s(tcg_ctx, dptr, nptr, t64, desc); + } else { + gen_helper_sve_sqaddi_s(tcg_ctx, dptr, nptr, t64, desc); + } + tcg_temp_free_i64(tcg_ctx, t64); + break; + + case MO_64: + if (u) { + if (d) { + gen_helper_sve_uqsubi_d(tcg_ctx, dptr, nptr, val, desc); + } else { + gen_helper_sve_uqaddi_d(tcg_ctx, dptr, nptr, val, desc); + } + } else if (d) { + t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_neg_i64(tcg_ctx, t64, val); + gen_helper_sve_sqaddi_d(tcg_ctx, dptr, nptr, t64, desc); + tcg_temp_free_i64(tcg_ctx, t64); + } else { + gen_helper_sve_sqaddi_d(tcg_ctx, dptr, nptr, val, desc); + } + break; + + default: + g_assert_not_reached(); + } + + tcg_temp_free_ptr(tcg_ctx, dptr); + tcg_temp_free_ptr(tcg_ctx, nptr); + tcg_temp_free_i32(tcg_ctx, desc); +} + +static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned fullsz = vec_full_reg_size(s); + unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, a->rd), numelem * a->imm); + } + return true; +} + +static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned fullsz = vec_full_reg_size(s); + unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); + int inc = numelem * a->imm * (a->d ? -1 : 1); + TCGv_i64 reg = cpu_reg(s, a->rd); + + tcg_gen_addi_i64(tcg_ctx, reg, reg, inc); + } + return true; +} + +static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned fullsz = vec_full_reg_size(s); + unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); + int inc = numelem * a->imm; + TCGv_i64 reg = cpu_reg(s, a->rd); + + /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ + if (inc == 0) { + if (a->u) { + tcg_gen_ext32u_i64(tcg_ctx, reg, reg); + } else { + tcg_gen_ext32s_i64(tcg_ctx, reg, reg); + } + } else { + TCGv_i64 t = tcg_const_i64(tcg_ctx, inc); + do_sat_addsub_32(tcg_ctx, reg, t, a->u, a->d); + tcg_temp_free_i64(tcg_ctx, t); + } + return true; +} + +static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned fullsz = vec_full_reg_size(s); + unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); + int inc = numelem * a->imm; + TCGv_i64 reg = cpu_reg(s, a->rd); + + if (inc != 0) { + TCGv_i64 t = tcg_const_i64(tcg_ctx, inc); + do_sat_addsub_64(tcg_ctx, reg, t, a->u, a->d); + tcg_temp_free_i64(tcg_ctx, t); + } + return true; +} + +static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0) { + return false; + } + + unsigned fullsz = vec_full_reg_size(s); + unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); + int inc = numelem * a->imm; + + if (inc != 0) { + if (sve_access_check(s)) { + TCGv_i64 t = tcg_const_i64(tcg_ctx, a->d ? -inc : inc); + tcg_gen_gvec_adds(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + t, fullsz, fullsz); + tcg_temp_free_i64(tcg_ctx, t); + } + } else { + do_mov_z(s, a->rd, a->rn); + } + return true; +} + +static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0) { + return false; + } + + unsigned fullsz = vec_full_reg_size(s); + unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); + int inc = numelem * a->imm; + + if (inc != 0) { + if (sve_access_check(s)) { + TCGv_i64 t = tcg_const_i64(tcg_ctx, inc); + do_sat_addsub_vec(s, a->esz, a->rd, a->rn, t, a->u, a->d); + tcg_temp_free_i64(tcg_ctx, t); + } + } else { + do_mov_z(s, a->rd, a->rn); + } + return true; +} + +/* + *** SVE Bitwise Immediate Group + */ + +static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint64_t imm; + if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), + extract32(a->dbm, 0, 6), + extract32(a->dbm, 6, 6))) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + gvec_fn(tcg_ctx, MO_64, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), imm, vsz, vsz); + } + return true; +} + +static bool trans_AND_zzi(DisasContext *s, arg_rr_dbm *a) +{ + return do_zz_dbm(s, a, tcg_gen_gvec_andi); +} + +static bool trans_ORR_zzi(DisasContext *s, arg_rr_dbm *a) +{ + return do_zz_dbm(s, a, tcg_gen_gvec_ori); +} + +static bool trans_EOR_zzi(DisasContext *s, arg_rr_dbm *a) +{ + return do_zz_dbm(s, a, tcg_gen_gvec_xori); +} + +static bool trans_DUPM(DisasContext *s, arg_DUPM *a) +{ + uint64_t imm; + if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), + extract32(a->dbm, 0, 6), + extract32(a->dbm, 6, 6))) { + return false; + } + if (sve_access_check(s)) { + do_dupi_z(s, a->rd, imm); + } + return true; +} + +/* + *** SVE Integer Wide Immediate - Predicated Group + */ + +/* Implement all merging copies. This is used for CPY (immediate), + * FCPY, CPY (scalar), CPY (SIMD&FP scalar). + */ +static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, + TCGv_i64 val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + typedef void gen_cpy(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); + static gen_cpy * const fns[4] = { + gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h, + gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d, + }; + unsigned vsz = vec_full_reg_size(s); + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + TCGv_ptr t_zd = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_zn = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, rd)); + tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, rn)); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); + + fns[esz](tcg_ctx, t_zd, t_zn, t_pg, val, desc); + + tcg_temp_free_ptr(tcg_ctx, t_zd); + tcg_temp_free_ptr(tcg_ctx, t_zn); + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_i32(tcg_ctx, desc); +} + +static bool trans_FCPY(DisasContext *s, arg_FCPY *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + /* Decode the VFP immediate. */ + uint64_t imm = vfp_expand_imm(a->esz, a->imm); + TCGv_i64 t_imm = tcg_const_i64(tcg_ctx, imm); + do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm); + tcg_temp_free_i64(tcg_ctx, t_imm); + } + return true; +} + +static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0 && extract32(s->insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + TCGv_i64 t_imm = tcg_const_i64(tcg_ctx, a->imm); + do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm); + tcg_temp_free_i64(tcg_ctx, t_imm); + } + return true; +} + +static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_2i * const fns[4] = { + gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h, + gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d, + }; + + if (a->esz == 0 && extract32(s->insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_i64 t_imm = tcg_const_i64(tcg_ctx, a->imm); + tcg_gen_gvec_2i_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + pred_full_reg_offset(s, a->pg), + t_imm, vsz, vsz, 0, fns[a->esz]); + tcg_temp_free_i64(tcg_ctx, t_imm); + } + return true; +} + +/* + *** SVE Permute Extract Group + */ + +static bool trans_EXT(DisasContext *s, arg_EXT *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned vsz = vec_full_reg_size(s); + unsigned n_ofs = a->imm >= vsz ? 0 : a->imm; + unsigned n_siz = vsz - n_ofs; + unsigned d = vec_full_reg_offset(s, a->rd); + unsigned n = vec_full_reg_offset(s, a->rn); + unsigned m = vec_full_reg_offset(s, a->rm); + + /* Use host vector move insns if we have appropriate sizes + * and no unfortunate overlap. + */ + if (m != d + && n_ofs == size_for_gvec(n_ofs) + && n_siz == size_for_gvec(n_siz) + && (d != n || n_siz <= n_ofs)) { + tcg_gen_gvec_mov(tcg_ctx, 0, d, n + n_ofs, n_siz, n_siz); + if (n_ofs != 0) { + tcg_gen_gvec_mov(tcg_ctx, 0, d + n_siz, m, n_ofs, n_ofs); + } + } else { + tcg_gen_gvec_3_ool(tcg_ctx, d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext); + } + return true; +} + +/* + *** SVE Permute - Unpredicated Group + */ + +static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_dup_i64(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vsz, vsz, cpu_reg_sp(s, a->rn)); + } + return true; +} + +static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if ((a->imm & 0x1f) == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + unsigned dofs = vec_full_reg_offset(s, a->rd); + unsigned esz, index; + + esz = ctz32(a->imm); + index = a->imm >> (esz + 1); + + if ((index << esz) < vsz) { + unsigned nofs = vec_reg_offset(s, a->rn, index, esz); + tcg_gen_gvec_dup_mem(tcg_ctx, esz, dofs, nofs, vsz, vsz); + } else { + tcg_gen_gvec_dup64i(tcg_ctx, dofs, vsz, vsz, 0); + } + } + return true; +} + +static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + typedef void gen_insr(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); + static gen_insr * const fns[4] = { + gen_helper_sve_insr_b, gen_helper_sve_insr_h, + gen_helper_sve_insr_s, gen_helper_sve_insr_d, + }; + unsigned vsz = vec_full_reg_size(s); + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + TCGv_ptr t_zd = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_zn = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); + + fns[a->esz](tcg_ctx, t_zd, t_zn, val, desc); + + tcg_temp_free_ptr(tcg_ctx, t_zd); + tcg_temp_free_ptr(tcg_ctx, t_zn); + tcg_temp_free_i32(tcg_ctx, desc); +} + +static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, t, tcg_ctx->cpu_env, vec_reg_offset(s, a->rm, 0, MO_64)); + do_insr_i64(s, a, t); + tcg_temp_free_i64(tcg_ctx, t); + } + return true; +} + +static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a) +{ + if (sve_access_check(s)) { + do_insr_i64(s, a, cpu_reg(s, a->rm)); + } + return true; +} + +static bool trans_REV_v(DisasContext *s, arg_rr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_2 * const fns[4] = { + gen_helper_sve_rev_b, gen_helper_sve_rev_h, + gen_helper_sve_rev_s, gen_helper_sve_rev_d + }; + + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vsz, vsz, 0, fns[a->esz]); + } + return true; +} + +static bool trans_TBL(DisasContext *s, arg_rrr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_tbl_b, gen_helper_sve_tbl_h, + gen_helper_sve_tbl_s, gen_helper_sve_tbl_d + }; + + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, 0, fns[a->esz]); + } + return true; +} + +static bool trans_UNPK(DisasContext *s, arg_UNPK *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_2 * const fns[4][2] = { + { NULL, NULL }, + { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h }, + { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s }, + { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d }, + }; + + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn) + + (a->h ? vsz / 2 : 0), + vsz, vsz, 0, fns[a->esz][a->u]); + } + return true; +} + +/* + *** SVE Permute - Predicates Group + */ + +static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, + gen_helper_gvec_3 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned vsz = pred_full_reg_size(s); + + /* Predicate sizes may be smaller and cannot use simd_desc. + We cannot round up, as we do elsewhere, because we need + the exact size for ZIP2 and REV. We retain the style for + the other helpers for consistency. */ + TCGv_ptr t_d = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_n = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_m = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t_desc; + int desc; + + desc = vsz - 2; + desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); + desc = deposit32(desc, SIMD_DATA_SHIFT + 2, 2, high_odd); + + tcg_gen_addi_ptr(tcg_ctx, t_d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, t_n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); + tcg_gen_addi_ptr(tcg_ctx, t_m, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rm)); + t_desc = tcg_const_i32(tcg_ctx, desc); + + fn(tcg_ctx, t_d, t_n, t_m, t_desc); + + tcg_temp_free_ptr(tcg_ctx, t_d); + tcg_temp_free_ptr(tcg_ctx, t_n); + tcg_temp_free_ptr(tcg_ctx, t_m); + tcg_temp_free_i32(tcg_ctx, t_desc); + return true; +} + +static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, + gen_helper_gvec_2 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned vsz = pred_full_reg_size(s); + TCGv_ptr t_d = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_n = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t_desc; + int desc; + + tcg_gen_addi_ptr(tcg_ctx, t_d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, t_n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); + + /* Predicate sizes may be smaller and cannot use simd_desc. + We cannot round up, as we do elsewhere, because we need + the exact size for ZIP2 and REV. We retain the style for + the other helpers for consistency. */ + + desc = vsz - 2; + desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); + desc = deposit32(desc, SIMD_DATA_SHIFT + 2, 2, high_odd); + t_desc = tcg_const_i32(tcg_ctx, desc); + + fn(tcg_ctx, t_d, t_n, t_desc); + + tcg_temp_free_i32(tcg_ctx, t_desc); + tcg_temp_free_ptr(tcg_ctx, t_d); + tcg_temp_free_ptr(tcg_ctx, t_n); + return true; +} + +static bool trans_ZIP1_p(DisasContext *s, arg_rrr_esz *a) +{ + return do_perm_pred3(s, a, 0, gen_helper_sve_zip_p); +} + +static bool trans_ZIP2_p(DisasContext *s, arg_rrr_esz *a) +{ + return do_perm_pred3(s, a, 1, gen_helper_sve_zip_p); +} + +static bool trans_UZP1_p(DisasContext *s, arg_rrr_esz *a) +{ + return do_perm_pred3(s, a, 0, gen_helper_sve_uzp_p); +} + +static bool trans_UZP2_p(DisasContext *s, arg_rrr_esz *a) +{ + return do_perm_pred3(s, a, 1, gen_helper_sve_uzp_p); +} + +static bool trans_TRN1_p(DisasContext *s, arg_rrr_esz *a) +{ + return do_perm_pred3(s, a, 0, gen_helper_sve_trn_p); +} + +static bool trans_TRN2_p(DisasContext *s, arg_rrr_esz *a) +{ + return do_perm_pred3(s, a, 1, gen_helper_sve_trn_p); +} + +static bool trans_REV_p(DisasContext *s, arg_rr_esz *a) +{ + return do_perm_pred2(s, a, 0, gen_helper_sve_rev_p); +} + +static bool trans_PUNPKLO(DisasContext *s, arg_PUNPKLO *a) +{ + return do_perm_pred2(s, a, 0, gen_helper_sve_punpk_p); +} + +static bool trans_PUNPKHI(DisasContext *s, arg_PUNPKHI *a) +{ + return do_perm_pred2(s, a, 1, gen_helper_sve_punpk_p); +} + +/* + *** SVE Permute - Interleaving Group + */ + +static bool do_zip(DisasContext *s, arg_rrr_esz *a, bool high) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_zip_b, gen_helper_sve_zip_h, + gen_helper_sve_zip_s, gen_helper_sve_zip_d, + }; + + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + unsigned high_ofs = high ? vsz / 2 : 0; + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn) + high_ofs, + vec_full_reg_offset(s, a->rm) + high_ofs, + vsz, vsz, 0, fns[a->esz]); + } + return true; +} + +static bool do_zzz_data_ool(DisasContext *s, arg_rrr_esz *a, int data, + gen_helper_gvec_3 *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, data, fn); + } + return true; +} + +static bool trans_ZIP1_z(DisasContext *s, arg_rrr_esz *a) +{ + return do_zip(s, a, false); +} + +static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a) +{ + return do_zip(s, a, true); +} + +static gen_helper_gvec_3 * const uzp_fns[4] = { + gen_helper_sve_uzp_b, gen_helper_sve_uzp_h, + gen_helper_sve_uzp_s, gen_helper_sve_uzp_d, +}; + +static bool trans_UZP1_z(DisasContext *s, arg_rrr_esz *a) +{ + return do_zzz_data_ool(s, a, 0, uzp_fns[a->esz]); +} + +static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a) +{ + return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]); +} + +static gen_helper_gvec_3 * const trn_fns[4] = { + gen_helper_sve_trn_b, gen_helper_sve_trn_h, + gen_helper_sve_trn_s, gen_helper_sve_trn_d, +}; + +static bool trans_TRN1_z(DisasContext *s, arg_rrr_esz *a) +{ + return do_zzz_data_ool(s, a, 0, trn_fns[a->esz]); +} + +static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a) +{ + return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]); +} + +/* + *** SVE Permute Vector - Predicated Group + */ + +static bool trans_COMPACT(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +/* Call the helper that computes the ARM LastActiveElement pseudocode + * function, scaled by the element size. This includes the not found + * indication; e.g. not found for esz=3 is -8. + */ +static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Predicate sizes may be smaller and cannot use simd_desc. We cannot + * round up, as we do elsewhere, because we need the exact size. + */ + TCGv_ptr t_p = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t_desc; + unsigned vsz = pred_full_reg_size(s); + unsigned desc; + + desc = vsz - 2; + desc = deposit32(desc, SIMD_DATA_SHIFT, 2, esz); + + tcg_gen_addi_ptr(tcg_ctx, t_p, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); + t_desc = tcg_const_i32(tcg_ctx, desc); + + gen_helper_sve_last_active_element(tcg_ctx, ret, t_p, t_desc); + + tcg_temp_free_i32(tcg_ctx, t_desc); + tcg_temp_free_ptr(tcg_ctx, t_p); +} + +/* Increment LAST to the offset of the next element in the vector, + * wrapping around to 0. + */ +static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + + tcg_gen_addi_i32(tcg_ctx, last, last, 1 << esz); + if (is_power_of_2(vsz)) { + tcg_gen_andi_i32(tcg_ctx, last, last, vsz - 1); + } else { + TCGv_i32 max = tcg_const_i32(tcg_ctx, vsz); + TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GEU, last, last, max, zero, last); + tcg_temp_free_i32(tcg_ctx, max); + tcg_temp_free_i32(tcg_ctx, zero); + } +} + +/* If LAST < 0, set LAST to the offset of the last element in the vector. */ +static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + + if (is_power_of_2(vsz)) { + tcg_gen_andi_i32(tcg_ctx, last, last, vsz - 1); + } else { + TCGv_i32 max = tcg_const_i32(tcg_ctx, vsz - (1 << esz)); + TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, last, last, zero, max, last); + tcg_temp_free_i32(tcg_ctx, max); + tcg_temp_free_i32(tcg_ctx, zero); + } +} + +/* Load an unsigned element of ESZ from BASE+OFS. */ +static TCGv_i64 load_esz(TCGContext *tcg_ctx, TCGv_ptr base, int ofs, int esz) +{ + TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); + + switch (esz) { + case 0: + tcg_gen_ld8u_i64(tcg_ctx, r, base, ofs); + break; + case 1: + tcg_gen_ld16u_i64(tcg_ctx, r, base, ofs); + break; + case 2: + tcg_gen_ld32u_i64(tcg_ctx, r, base, ofs); + break; + case 3: + tcg_gen_ld_i64(tcg_ctx, r, base, ofs); + break; + default: + g_assert_not_reached(); + } + return r; +} + +/* Load an unsigned element of ESZ from RM[LAST]. */ +static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, + int rm, int esz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr p = tcg_temp_new_ptr(tcg_ctx); + TCGv_i64 r; + + /* Convert offset into vector into offset into ENV. + * The final adjustment for the vector register base + * is added via constant offset to the load. + */ +#ifdef HOST_WORDS_BIGENDIAN + /* Adjust for element ordering. See vec_reg_offset. */ + if (esz < 3) { + tcg_gen_xori_i32(tcg_ctx, last, last, 8 - (1 << esz)); + } +#endif + tcg_gen_ext_i32_ptr(tcg_ctx, p, last); + tcg_gen_add_ptr(tcg_ctx, p, p, tcg_ctx->cpu_env); + + r = load_esz(tcg_ctx, p, vec_full_reg_offset(s, rm), esz); + tcg_temp_free_ptr(tcg_ctx, p); + + return r; +} + +/* Compute CLAST for a Zreg. */ +static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 last; + TCGLabel *over; + TCGv_i64 ele; + unsigned vsz, esz = a->esz; + + if (!sve_access_check(s)) { + return true; + } + + last = tcg_temp_local_new_i32(tcg_ctx); + over = gen_new_label(tcg_ctx); + + find_last_active(s, last, esz, a->pg); + + /* There is of course no movcond for a 2048-bit vector, + * so we must branch over the actual store. + */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, last, 0, over); + + if (!before) { + incr_last_active(s, last, esz); + } + + ele = load_last_active(s, last, a->rm, esz); + tcg_temp_free_i32(tcg_ctx, last); + + vsz = vec_full_reg_size(s); + tcg_gen_gvec_dup_i64(tcg_ctx, esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele); + tcg_temp_free_i64(tcg_ctx, ele); + + /* If this insn used MOVPRFX, we may need a second move. */ + if (a->rd != a->rn) { + TCGLabel *done = gen_new_label(tcg_ctx); + tcg_gen_br(tcg_ctx, done); + + gen_set_label(tcg_ctx, over); + do_mov_z(s, a->rd, a->rn); + + gen_set_label(tcg_ctx, done); + } else { + gen_set_label(tcg_ctx, over); + } + return true; +} + +static bool trans_CLASTA_z(DisasContext *s, arg_rprr_esz *a) +{ + return do_clast_vector(s, a, false); +} + +static bool trans_CLASTB_z(DisasContext *s, arg_rprr_esz *a) +{ + return do_clast_vector(s, a, true); +} + +/* Compute CLAST for a scalar. */ +static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, + bool before, TCGv_i64 reg_val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 last = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 ele, cmp, zero; + + find_last_active(s, last, esz, pg); + + /* Extend the original value of last prior to incrementing. */ + cmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, cmp, last); + + if (!before) { + incr_last_active(s, last, esz); + } + + /* The conceit here is that while last < 0 indicates not found, after + * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address + * from which we can load garbage. We then discard the garbage with + * a conditional move. + */ + ele = load_last_active(s, last, rm, esz); + tcg_temp_free_i32(tcg_ctx, last); + + zero = tcg_const_i64(tcg_ctx, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, reg_val, cmp, zero, ele, reg_val); + + tcg_temp_free_i64(tcg_ctx, zero); + tcg_temp_free_i64(tcg_ctx, cmp); + tcg_temp_free_i64(tcg_ctx, ele); +} + +/* Compute CLAST for a Vreg. */ +static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + int esz = a->esz; + int ofs = vec_reg_offset(s, a->rd, 0, esz); + TCGv_i64 reg = load_esz(tcg_ctx, tcg_ctx->cpu_env, ofs, esz); + + do_clast_scalar(s, esz, a->pg, a->rn, before, reg); + write_fp_dreg(s, a->rd, reg); + tcg_temp_free_i64(tcg_ctx, reg); + } + return true; +} + +static bool trans_CLASTA_v(DisasContext *s, arg_rpr_esz *a) +{ + return do_clast_fp(s, a, false); +} + +static bool trans_CLASTB_v(DisasContext *s, arg_rpr_esz *a) +{ + return do_clast_fp(s, a, true); +} + +/* Compute CLAST for a Xreg. */ +static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 reg; + + if (!sve_access_check(s)) { + return true; + } + + reg = cpu_reg(s, a->rd); + switch (a->esz) { + case 0: + tcg_gen_ext8u_i64(tcg_ctx, reg, reg); + break; + case 1: + tcg_gen_ext16u_i64(tcg_ctx, reg, reg); + break; + case 2: + tcg_gen_ext32u_i64(tcg_ctx, reg, reg); + break; + case 3: + break; + default: + g_assert_not_reached(); + } + + do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg); + return true; +} + +static bool trans_CLASTA_r(DisasContext *s, arg_rpr_esz *a) +{ + return do_clast_general(s, a, false); +} + +static bool trans_CLASTB_r(DisasContext *s, arg_rpr_esz *a) +{ + return do_clast_general(s, a, true); +} + +/* Compute LAST for a scalar. */ +static TCGv_i64 do_last_scalar(DisasContext *s, int esz, + int pg, int rm, bool before) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 last = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 ret; + + find_last_active(s, last, esz, pg); + if (before) { + wrap_last_active(s, last, esz); + } else { + incr_last_active(s, last, esz); + } + + ret = load_last_active(s, last, rm, esz); + tcg_temp_free_i32(tcg_ctx, last); + return ret; +} + +/* Compute LAST for a Vreg. */ +static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); + write_fp_dreg(s, a->rd, val); + tcg_temp_free_i64(tcg_ctx, val); + } + return true; +} + +static bool trans_LASTA_v(DisasContext *s, arg_rpr_esz *a) +{ + return do_last_fp(s, a, false); +} + +static bool trans_LASTB_v(DisasContext *s, arg_rpr_esz *a) +{ + return do_last_fp(s, a, true); +} + +/* Compute LAST for a Xreg. */ +static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); + tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, a->rd), val); + tcg_temp_free_i64(tcg_ctx, val); + } + return true; +} + +static bool trans_LASTA_r(DisasContext *s, arg_rpr_esz *a) +{ + return do_last_general(s, a, false); +} + +static bool trans_LASTB_r(DisasContext *s, arg_rpr_esz *a) +{ + return do_last_general(s, a, true); +} + +static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a) +{ + if (sve_access_check(s)) { + do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn)); + } + return true; +} + +static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + int ofs = vec_reg_offset(s, a->rn, 0, a->esz); + TCGv_i64 t = load_esz(tcg_ctx, tcg_ctx->cpu_env, ofs, a->esz); + do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); + tcg_temp_free_i64(tcg_ctx, t); + } + return true; +} + +static bool trans_REVB(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, + gen_helper_sve_revb_h, + gen_helper_sve_revb_s, + gen_helper_sve_revb_d, + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_REVH(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + NULL, + NULL, + gen_helper_sve_revh_s, + gen_helper_sve_revh_d, + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_REVW(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_revw_d : NULL); +} + +static bool trans_RBIT(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve_rbit_b, + gen_helper_sve_rbit_h, + gen_helper_sve_rbit_s, + gen_helper_sve_rbit_d, + }; + return do_zpz_ool(s, a, fns[a->esz]); +} + +static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + pred_full_reg_offset(s, a->pg), + vsz, vsz, a->esz, gen_helper_sve_splice); + } + return true; +} + +/* + *** SVE Integer Compare - Vectors Group + */ + +static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, + gen_helper_gvec_flags_4 *gen_fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr pd, zn, zm, pg; + unsigned vsz; + TCGv_i32 t; + + if (gen_fn == NULL) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + vsz = vec_full_reg_size(s); + t = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + pd = tcg_temp_new_ptr(tcg_ctx); + zn = tcg_temp_new_ptr(tcg_ctx); + zm = tcg_temp_new_ptr(tcg_ctx); + pg = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, pd, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); + tcg_gen_addi_ptr(tcg_ctx, zm, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rm)); + tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + + gen_fn(tcg_ctx, t, pd, zn, zm, pg, t); + + tcg_temp_free_ptr(tcg_ctx, pd); + tcg_temp_free_ptr(tcg_ctx, zn); + tcg_temp_free_ptr(tcg_ctx, zm); + tcg_temp_free_ptr(tcg_ctx, pg); + + do_pred_flags(tcg_ctx, t); + + tcg_temp_free_i32(tcg_ctx, t); + return true; +} + +#define DO_PPZZ(NAME, name) \ +static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \ +{ \ + static gen_helper_gvec_flags_4 * const fns[4] = { \ + gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \ + gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \ + }; \ + return do_ppzz_flags(s, a, fns[a->esz]); \ +} + +DO_PPZZ(CMPEQ, cmpeq) +DO_PPZZ(CMPNE, cmpne) +DO_PPZZ(CMPGT, cmpgt) +DO_PPZZ(CMPGE, cmpge) +DO_PPZZ(CMPHI, cmphi) +DO_PPZZ(CMPHS, cmphs) + +#undef DO_PPZZ + +#define DO_PPZW(NAME, name) \ +static bool trans_##NAME##_ppzw(DisasContext *s, arg_rprr_esz *a) \ +{ \ + static gen_helper_gvec_flags_4 * const fns[4] = { \ + gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \ + gen_helper_sve_##name##_ppzw_s, NULL \ + }; \ + return do_ppzz_flags(s, a, fns[a->esz]); \ +} + +DO_PPZW(CMPEQ, cmpeq) +DO_PPZW(CMPNE, cmpne) +DO_PPZW(CMPGT, cmpgt) +DO_PPZW(CMPGE, cmpge) +DO_PPZW(CMPHI, cmphi) +DO_PPZW(CMPHS, cmphs) +DO_PPZW(CMPLT, cmplt) +DO_PPZW(CMPLE, cmple) +DO_PPZW(CMPLO, cmplo) +DO_PPZW(CMPLS, cmpls) + +#undef DO_PPZW + +/* + *** SVE Integer Compare - Immediate Groups + */ + +static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, + gen_helper_gvec_flags_3 *gen_fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr pd, zn, pg; + unsigned vsz; + TCGv_i32 t; + + if (gen_fn == NULL) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + vsz = vec_full_reg_size(s); + t = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, a->imm)); + pd = tcg_temp_new_ptr(tcg_ctx); + zn = tcg_temp_new_ptr(tcg_ctx); + pg = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, pd, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); + tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + + gen_fn(tcg_ctx, t, pd, zn, pg, t); + + tcg_temp_free_ptr(tcg_ctx, pd); + tcg_temp_free_ptr(tcg_ctx, zn); + tcg_temp_free_ptr(tcg_ctx, pg); + + do_pred_flags(tcg_ctx, t); + + tcg_temp_free_i32(tcg_ctx, t); + return true; +} + +#define DO_PPZI(NAME, name) \ +static bool trans_##NAME##_ppzi(DisasContext *s, arg_rpri_esz *a) \ +{ \ + static gen_helper_gvec_flags_3 * const fns[4] = { \ + gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \ + gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \ + }; \ + return do_ppzi_flags(s, a, fns[a->esz]); \ +} + +DO_PPZI(CMPEQ, cmpeq) +DO_PPZI(CMPNE, cmpne) +DO_PPZI(CMPGT, cmpgt) +DO_PPZI(CMPGE, cmpge) +DO_PPZI(CMPHI, cmphi) +DO_PPZI(CMPHS, cmphs) +DO_PPZI(CMPLT, cmplt) +DO_PPZI(CMPLE, cmple) +DO_PPZI(CMPLO, cmplo) +DO_PPZI(CMPLS, cmpls) + +#undef DO_PPZI + +/* + *** SVE Partition Break Group + */ + +static bool do_brk3(DisasContext *s, arg_rprr_s *a, + gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned vsz = pred_full_reg_size(s); + + /* Predicate sizes may be smaller and cannot use simd_desc. */ + TCGv_ptr d = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr n = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr m = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr g = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t = tcg_const_i32(tcg_ctx, vsz - 2); + + tcg_gen_addi_ptr(tcg_ctx, d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); + tcg_gen_addi_ptr(tcg_ctx, m, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rm)); + tcg_gen_addi_ptr(tcg_ctx, g, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + + if (a->s) { + fn_s(tcg_ctx, t, d, n, m, g, t); + do_pred_flags(tcg_ctx, t); + } else { + fn(tcg_ctx, d, n, m, g, t); + } + tcg_temp_free_ptr(tcg_ctx, d); + tcg_temp_free_ptr(tcg_ctx, n); + tcg_temp_free_ptr(tcg_ctx, m); + tcg_temp_free_ptr(tcg_ctx, g); + tcg_temp_free_i32(tcg_ctx, t); + return true; +} + +static bool do_brk2(DisasContext *s, arg_rpr_s *a, + gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned vsz = pred_full_reg_size(s); + + /* Predicate sizes may be smaller and cannot use simd_desc. */ + TCGv_ptr d = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr n = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr g = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t = tcg_const_i32(tcg_ctx, vsz - 2); + + tcg_gen_addi_ptr(tcg_ctx, d, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + tcg_gen_addi_ptr(tcg_ctx, n, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rn)); + tcg_gen_addi_ptr(tcg_ctx, g, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + + if (a->s) { + fn_s(tcg_ctx, t, d, n, g, t); + do_pred_flags(tcg_ctx, t); + } else { + fn(tcg_ctx, d, n, g, t); + } + tcg_temp_free_ptr(tcg_ctx, d); + tcg_temp_free_ptr(tcg_ctx, n); + tcg_temp_free_ptr(tcg_ctx, g); + tcg_temp_free_i32(tcg_ctx, t); + return true; +} + +static bool trans_BRKPA(DisasContext *s, arg_rprr_s *a) +{ + return do_brk3(s, a, gen_helper_sve_brkpa, gen_helper_sve_brkpas); +} + +static bool trans_BRKPB(DisasContext *s, arg_rprr_s *a) +{ + return do_brk3(s, a, gen_helper_sve_brkpb, gen_helper_sve_brkpbs); +} + +static bool trans_BRKA_m(DisasContext *s, arg_rpr_s *a) +{ + return do_brk2(s, a, gen_helper_sve_brka_m, gen_helper_sve_brkas_m); +} + +static bool trans_BRKB_m(DisasContext *s, arg_rpr_s *a) +{ + return do_brk2(s, a, gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m); +} + +static bool trans_BRKA_z(DisasContext *s, arg_rpr_s *a) +{ + return do_brk2(s, a, gen_helper_sve_brka_z, gen_helper_sve_brkas_z); +} + +static bool trans_BRKB_z(DisasContext *s, arg_rpr_s *a) +{ + return do_brk2(s, a, gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z); +} + +static bool trans_BRKN(DisasContext *s, arg_rpr_s *a) +{ + return do_brk2(s, a, gen_helper_sve_brkn, gen_helper_sve_brkns); +} + +/* + *** SVE Predicate Count Group + */ + +static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned psz = pred_full_reg_size(s); + + if (psz <= 8) { + uint64_t psz_mask; + + tcg_gen_ld_i64(tcg_ctx, val, tcg_ctx->cpu_env, pred_full_reg_offset(s, pn)); + if (pn != pg) { + TCGv_i64 g = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, g, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); + tcg_gen_and_i64(tcg_ctx, val, val, g); + tcg_temp_free_i64(tcg_ctx, g); + } + + /* Reduce the pred_esz_masks value simply to reduce the + * size of the code generated here. + */ + psz_mask = MAKE_64BIT_MASK(0, psz * 8); + tcg_gen_andi_i64(tcg_ctx, val, val, pred_esz_masks[esz] & psz_mask); + + tcg_gen_ctpop_i64(tcg_ctx, val, val); + } else { + TCGv_ptr t_pn = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); + unsigned desc; + TCGv_i32 t_desc; + + desc = psz - 2; + desc = deposit32(desc, SIMD_DATA_SHIFT, 2, esz); + + tcg_gen_addi_ptr(tcg_ctx, t_pn, tcg_ctx->cpu_env, pred_full_reg_offset(s, pn)); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); + t_desc = tcg_const_i32(tcg_ctx, desc); + + gen_helper_sve_cntp(tcg_ctx, val, t_pn, t_pg, t_desc); + tcg_temp_free_ptr(tcg_ctx, t_pn); + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_i32(tcg_ctx, t_desc); + } +} + +static bool trans_CNTP(DisasContext *s, arg_CNTP *a) +{ + if (sve_access_check(s)) { + do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg); + } + return true; +} + +static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 reg = cpu_reg(s, a->rd); + TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); + + do_cntp(s, val, a->esz, a->pg, a->pg); + if (a->d) { + tcg_gen_sub_i64(tcg_ctx, reg, reg, val); + } else { + tcg_gen_add_i64(tcg_ctx, reg, reg, val); + } + tcg_temp_free_i64(tcg_ctx, val); + } + return true; +} + +static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); + GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds; + + do_cntp(s, val, a->esz, a->pg, a->pg); + gvec_fn(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), val, vsz, vsz); + } + return true; +} + +static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 reg = cpu_reg(s, a->rd); + TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); + + do_cntp(s, val, a->esz, a->pg, a->pg); + do_sat_addsub_32(tcg_ctx, reg, val, a->u, a->d); + } + return true; +} + +static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 reg = cpu_reg(s, a->rd); + TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); + + do_cntp(s, val, a->esz, a->pg, a->pg); + do_sat_addsub_64(tcg_ctx, reg, val, a->u, a->d); + } + return true; +} + +static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); + do_cntp(s, val, a->esz, a->pg, a->pg); + do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d); + } + return true; +} + +/* + *** SVE Integer Compare Scalars Group + */ + +static bool trans_CTERM(DisasContext *s, arg_CTERM *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ); + TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf); + TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf); + TCGv_i64 cmp = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_setcond_i64(tcg_ctx, cond, cmp, rn, rm); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, cmp); + tcg_temp_free_i64(tcg_ctx, cmp); + + /* VF = !NF & !CF. */ + tcg_gen_xori_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, 1); + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tcg_ctx->cpu_CF); + + /* Both NF and VF actually look at bit 31. */ + tcg_gen_neg_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_NF); + tcg_gen_neg_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF); + return true; +} + +static bool trans_WHILE(DisasContext *s, arg_WHILE *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 op0, op1, t0, t1, tmax; + TCGv_i32 t2, t3; + TCGv_ptr ptr; + unsigned desc, vsz = vec_full_reg_size(s); + TCGCond cond; + + if (!sve_access_check(s)) { + return true; + } + + op0 = read_cpu_reg(s, a->rn, 1); + op1 = read_cpu_reg(s, a->rm, 1); + + if (!a->sf) { + if (a->u) { + tcg_gen_ext32u_i64(tcg_ctx, op0, op0); + tcg_gen_ext32u_i64(tcg_ctx, op1, op1); + } else { + tcg_gen_ext32s_i64(tcg_ctx, op0, op0); + tcg_gen_ext32s_i64(tcg_ctx, op1, op1); + } + } + + /* For the helper, compress the different conditions into a computation + * of how many iterations for which the condition is true. + */ + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_sub_i64(tcg_ctx, t0, op1, op0); + + tmax = tcg_const_i64(tcg_ctx, vsz >> a->esz); + if (a->eq) { + /* Equality means one more iteration. */ + tcg_gen_addi_i64(tcg_ctx, t0, t0, 1); + + /* If op1 is max (un)signed integer (and the only time the addition + * above could overflow), then we produce an all-true predicate by + * setting the count to the vector length. This is because the + * pseudocode is described as an increment + compare loop, and the + * max integer would always compare true. + */ + tcg_gen_movi_i64(tcg_ctx, t1, (a->sf + ? (a->u ? UINT64_MAX : INT64_MAX) + : (a->u ? UINT32_MAX : INT32_MAX))); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, op1, t1, tmax, t0); + } + + /* Bound to the maximum. */ + tcg_gen_umin_i64(tcg_ctx, t0, t0, tmax); + tcg_temp_free_i64(tcg_ctx, tmax); + + /* Set the count to zero if the condition is false. */ + cond = (a->u + ? (a->eq ? TCG_COND_LEU : TCG_COND_LTU) + : (a->eq ? TCG_COND_LE : TCG_COND_LT)); + tcg_gen_movi_i64(tcg_ctx, t1, 0); + tcg_gen_movcond_i64(tcg_ctx, cond, t0, op0, op1, t0, t1); + tcg_temp_free_i64(tcg_ctx, t1); + + /* Since we're bounded, pass as a 32-bit type. */ + t2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t2, t0); + tcg_temp_free_i64(tcg_ctx, t0); + + /* Scale elements to bits. */ + tcg_gen_shli_i32(tcg_ctx, t2, t2, a->esz); + + desc = (vsz / 8) - 2; + desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz); + t3 = tcg_const_i32(tcg_ctx, desc); + + ptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, ptr, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->rd)); + + gen_helper_sve_while(tcg_ctx, t2, ptr, t2, t3); + do_pred_flags(tcg_ctx, t2); + + tcg_temp_free_ptr(tcg_ctx, ptr); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + return true; +} + +/* + *** SVE Integer Wide Immediate - Unpredicated Group + */ + +static bool trans_FDUP(DisasContext *s, arg_FDUP *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + int dofs = vec_full_reg_offset(s, a->rd); + uint64_t imm; + + /* Decode the VFP immediate. */ + imm = vfp_expand_imm(a->esz, a->imm); + imm = dup_const(a->esz, imm); + + tcg_gen_gvec_dup64i(tcg_ctx, dofs, vsz, vsz, imm); + } + return true; +} + +static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0 && extract32(s->insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + int dofs = vec_full_reg_offset(s, a->rd); + + tcg_gen_gvec_dup64i(tcg_ctx, dofs, vsz, vsz, dup_const(a->esz, a->imm)); + } + return true; +} + +static bool trans_ADD_zzi(DisasContext *s, arg_rri_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0 && extract32(s->insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_addi(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); + } + return true; +} + +static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a) +{ + a->imm = -a->imm; + return trans_ADD_zzi(s, a); +} + +static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 }; + static const GVecGen2s op[4] = { + { .fni8 = tcg_gen_vec_sub8_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_sve_subri_b, + .opt_opc = vecop_list, + .vece = MO_8, + .scalar_first = true }, + { .fni8 = tcg_gen_vec_sub16_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_sve_subri_h, + .opt_opc = vecop_list, + .vece = MO_16, + .scalar_first = true }, + { .fni4 = tcg_gen_sub_i32, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_sve_subri_s, + .opt_opc = vecop_list, + .vece = MO_32, + .scalar_first = true }, + { .fni8 = tcg_gen_sub_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_sve_subri_d, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64, + .scalar_first = true } + }; + + if (a->esz == 0 && extract32(s->insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_i64 c = tcg_const_i64(tcg_ctx, a->imm); + tcg_gen_gvec_2s(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vsz, vsz, c, &op[a->esz]); + tcg_temp_free_i64(tcg_ctx, c); + } + return true; +} + +static bool trans_MUL_zzi(DisasContext *s, arg_rri_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_muli(tcg_ctx, a->esz, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); + } + return true; +} + +static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0 && extract32(s->insn, 13, 1)) { + return false; + } + if (sve_access_check(s)) { + TCGv_i64 val = tcg_const_i64(tcg_ctx, a->imm); + do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, u, d); + tcg_temp_free_i64(tcg_ctx, val); + } + return true; +} + +static bool trans_SQADD_zzi(DisasContext *s, arg_rri_esz *a) +{ + return do_zzi_sat(s, a, false, false); +} + +static bool trans_UQADD_zzi(DisasContext *s, arg_rri_esz *a) +{ + return do_zzi_sat(s, a, true, false); +} + +static bool trans_SQSUB_zzi(DisasContext *s, arg_rri_esz *a) +{ + return do_zzi_sat(s, a, false, true); +} + +static bool trans_UQSUB_zzi(DisasContext *s, arg_rri_esz *a) +{ + return do_zzi_sat(s, a, true, true); +} + +static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_i64 c = tcg_const_i64(tcg_ctx, a->imm); + + tcg_gen_gvec_2i_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + c, vsz, vsz, 0, fn); + tcg_temp_free_i64(tcg_ctx, c); + } + return true; +} + +#define DO_ZZI(NAME, name) \ +static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a) \ +{ \ + static gen_helper_gvec_2i * const fns[4] = { \ + gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \ + gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \ + }; \ + return do_zzi_ool(s, a, fns[a->esz]); \ +} + +DO_ZZI(SMAX, smax) +DO_ZZI(UMAX, umax) +DO_ZZI(SMIN, smin) +DO_ZZI(UMIN, umin) + +#undef DO_ZZI + +static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3 * const fns[2][2] = { + { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, + { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } + }; + + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, 0, fns[a->u][a->sz]); + } + return true; +} + +static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3 * const fns[2][2] = { + { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h }, + { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h } + }; + + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, a->index, fns[a->u][a->sz]); + } + return true; +} + + +/* + *** SVE Floating Point Multiply-Add Indexed Group + */ + +static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_4_ptr * const fns[3] = { + gen_helper_gvec_fmla_idx_h, + gen_helper_gvec_fmla_idx_s, + gen_helper_gvec_fmla_idx_d, + }; + + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vec_full_reg_offset(s, a->ra), + status, vsz, vsz, (a->index << 1) | a->sub, + fns[a->esz - 1]); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +/* + *** SVE Floating Point Multiply Indexed Group + */ + +static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3_ptr * const fns[3] = { + gen_helper_gvec_fmul_idx_h, + gen_helper_gvec_fmul_idx_s, + gen_helper_gvec_fmul_idx_d, + }; + + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + status, vsz, vsz, a->index, fns[a->esz - 1]); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +/* + *** SVE Floating Point Fast Reduction Group + */ + +typedef void gen_helper_fp_reduce(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_ptr, + TCGv_ptr, TCGv_i32); + +static void do_reduce(DisasContext *s, arg_rpr_esz *a, + gen_helper_fp_reduce *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + unsigned p2vsz = pow2ceil(vsz); + TCGv_i32 t_desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, p2vsz, 0)); + TCGv_ptr t_zn, t_pg, status; + TCGv_i64 temp; + + temp = tcg_temp_new_i64(tcg_ctx); + t_zn = tcg_temp_new_ptr(tcg_ctx); + t_pg = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rn)); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + + fn(tcg_ctx, temp, t_zn, t_pg, status, t_desc); + tcg_temp_free_ptr(tcg_ctx, t_zn); + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_ptr(tcg_ctx, status); + tcg_temp_free_i32(tcg_ctx, t_desc); + + write_fp_dreg(s, a->rd, temp); + tcg_temp_free_i64(tcg_ctx, temp); +} + +#define DO_VPZ(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ +{ \ + static gen_helper_fp_reduce * const fns[3] = { \ + gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, \ + gen_helper_sve_##name##_d, \ + }; \ + if (a->esz == 0) { \ + return false; \ + } \ + if (sve_access_check(s)) { \ + do_reduce(s, a, fns[a->esz - 1]); \ + } \ + return true; \ +} + +DO_VPZ(FADDV, faddv) +DO_VPZ(FMINNMV, fminnmv) +DO_VPZ(FMAXNMV, fmaxnmv) +DO_VPZ(FMINV, fminv) +DO_VPZ(FMAXV, fmaxv) + +/* + *** SVE Floating Point Unary Operations - Unpredicated Group + */ + +static void do_zz_fp(DisasContext *s, arg_rr_esz *a, gen_helper_gvec_2_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + + tcg_gen_gvec_2_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + status, vsz, vsz, 0, fn); + tcg_temp_free_ptr(tcg_ctx, status); +} + +static bool trans_FRECPE(DisasContext *s, arg_rr_esz *a) +{ + static gen_helper_gvec_2_ptr * const fns[3] = { + gen_helper_gvec_frecpe_h, + gen_helper_gvec_frecpe_s, + gen_helper_gvec_frecpe_d, + }; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + do_zz_fp(s, a, fns[a->esz - 1]); + } + return true; +} + +static bool trans_FRSQRTE(DisasContext *s, arg_rr_esz *a) +{ + static gen_helper_gvec_2_ptr * const fns[3] = { + gen_helper_gvec_frsqrte_h, + gen_helper_gvec_frsqrte_s, + gen_helper_gvec_frsqrte_d, + }; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + do_zz_fp(s, a, fns[a->esz - 1]); + } + return true; +} + +/* + *** SVE Floating Point Compare with Zero Group + */ + +static void do_ppz_fp(DisasContext *s, arg_rpr_esz *a, + gen_helper_gvec_3_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + + tcg_gen_gvec_3_ptr(tcg_ctx, pred_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + pred_full_reg_offset(s, a->pg), + status, vsz, vsz, 0, fn); + tcg_temp_free_ptr(tcg_ctx, status); +} + +#define DO_PPZ(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \ +{ \ + static gen_helper_gvec_3_ptr * const fns[3] = { \ + gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, \ + gen_helper_sve_##name##_d, \ + }; \ + if (a->esz == 0) { \ + return false; \ + } \ + if (sve_access_check(s)) { \ + do_ppz_fp(s, a, fns[a->esz - 1]); \ + } \ + return true; \ +} + +DO_PPZ(FCMGE_ppz0, fcmge0) +DO_PPZ(FCMGT_ppz0, fcmgt0) +DO_PPZ(FCMLE_ppz0, fcmle0) +DO_PPZ(FCMLT_ppz0, fcmlt0) +DO_PPZ(FCMEQ_ppz0, fcmeq0) +DO_PPZ(FCMNE_ppz0, fcmne0) + +#undef DO_PPZ + +/* + *** SVE floating-point trig multiply-add coefficient + */ + +static bool trans_FTMAD(DisasContext *s, arg_FTMAD *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3_ptr * const fns[3] = { + gen_helper_sve_ftmad_h, + gen_helper_sve_ftmad_s, + gen_helper_sve_ftmad_d, + }; + + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + status, vsz, vsz, a->imm, fns[a->esz - 1]); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +/* + *** SVE Floating Point Accumulating Reduction Group + */ + +static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + typedef void fadda_fn(TCGContext *, TCGv_i64, TCGv_i64, TCGv_ptr, + TCGv_ptr, TCGv_ptr, TCGv_i32); + static fadda_fn * const fns[3] = { + gen_helper_sve_fadda_h, + gen_helper_sve_fadda_s, + gen_helper_sve_fadda_d, + }; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr t_rm, t_pg, t_fpst; + TCGv_i64 t_val; + TCGv_i32 t_desc; + + if (a->esz == 0) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + t_val = load_esz(tcg_ctx, tcg_ctx->cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz); + t_rm = tcg_temp_new_ptr(tcg_ctx); + t_pg = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, t_rm, tcg_ctx->cpu_env, vec_full_reg_offset(s, a->rm)); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + t_fpst = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + t_desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + + fns[a->esz - 1](tcg_ctx,t_val, t_val, t_rm, t_pg, t_fpst, t_desc); + + tcg_temp_free_i32(tcg_ctx, t_desc); + tcg_temp_free_ptr(tcg_ctx, t_fpst); + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_ptr(tcg_ctx, t_rm); + + write_fp_dreg(s, a->rd, t_val); + tcg_temp_free_i64(tcg_ctx, t_val); + return true; +} + +/* + *** SVE Floating Point Arithmetic - Unpredicated Group + */ + +static bool do_zzz_fp(DisasContext *s, arg_rrr_esz *a, + gen_helper_gvec_3_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + status, vsz, vsz, 0, fn); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + + +#define DO_FP3(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \ +{ \ + static gen_helper_gvec_3_ptr * const fns[4] = { \ + NULL, gen_helper_gvec_##name##_h, \ + gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ + }; \ + return do_zzz_fp(s, a, fns[a->esz]); \ +} + +DO_FP3(FADD_zzz, fadd) +DO_FP3(FSUB_zzz, fsub) +DO_FP3(FMUL_zzz, fmul) +DO_FP3(FTSMUL, ftsmul) +DO_FP3(FRECPS, recps) +DO_FP3(FRSQRTS, rsqrts) + +#undef DO_FP3 + +/* + *** SVE Floating Point Arithmetic - Predicated Group + */ + +static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a, + gen_helper_gvec_4_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + pred_full_reg_offset(s, a->pg), + status, vsz, vsz, 0, fn); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +#define DO_FP3(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \ +{ \ + static gen_helper_gvec_4_ptr * const fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ + }; \ + return do_zpzz_fp(s, a, fns[a->esz]); \ +} + +DO_FP3(FADD_zpzz, fadd) +DO_FP3(FSUB_zpzz, fsub) +DO_FP3(FMUL_zpzz, fmul) +DO_FP3(FMIN_zpzz, fmin) +DO_FP3(FMAX_zpzz, fmax) +DO_FP3(FMINNM_zpzz, fminnum) +DO_FP3(FMAXNM_zpzz, fmaxnum) +DO_FP3(FABD, fabd) +DO_FP3(FSCALE, fscalbn) +DO_FP3(FDIV, fdiv) +DO_FP3(FMULX, fmulx) + +#undef DO_FP3 + +typedef void gen_helper_sve_fp2scalar(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr, + TCGv_i64, TCGv_ptr, TCGv_i32); + +static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, + TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr t_zd, t_zn, t_pg, status; + TCGv_i32 desc; + + t_zd = tcg_temp_new_ptr(tcg_ctx); + t_zn = tcg_temp_new_ptr(tcg_ctx); + t_pg = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, t_zd, tcg_ctx->cpu_env, vec_full_reg_offset(s, zd)); + tcg_gen_addi_ptr(tcg_ctx, t_zn, tcg_ctx->cpu_env, vec_full_reg_offset(s, zn)); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); + + status = get_fpstatus_ptr(tcg_ctx, is_fp16); + desc = tcg_const_i32(tcg_ctx, simd_desc(vsz, vsz, 0)); + fn(tcg_ctx, t_zd, t_zn, t_pg, scalar, status, desc); + + tcg_temp_free_i32(tcg_ctx, desc); + tcg_temp_free_ptr(tcg_ctx, status); + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_ptr(tcg_ctx, t_zn); + tcg_temp_free_ptr(tcg_ctx, t_zd); +} + +static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, + gen_helper_sve_fp2scalar *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 temp = tcg_const_i64(tcg_ctx, imm); + do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, temp, fn); + tcg_temp_free_i64(tcg_ctx, temp); +} + +#define DO_FP_IMM(NAME, name, const0, const1) \ +static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \ +{ \ + static gen_helper_sve_fp2scalar * const fns[3] = { \ + gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, \ + gen_helper_sve_##name##_d \ + }; \ + static uint64_t const val[3][2] = { \ + { float16_##const0, float16_##const1 }, \ + { float32_##const0, float32_##const1 }, \ + { float64_##const0, float64_##const1 }, \ + }; \ + if (a->esz == 0) { \ + return false; \ + } \ + if (sve_access_check(s)) { \ + do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \ + } \ + return true; \ +} + +#define float16_two make_float16(0x4000) +#define float32_two make_float32(0x40000000) +#define float64_two make_float64(0x4000000000000000ULL) + +DO_FP_IMM(FADD, fadds, half, one) +DO_FP_IMM(FSUB, fsubs, half, one) +DO_FP_IMM(FMUL, fmuls, half, two) +DO_FP_IMM(FSUBR, fsubrs, half, one) +DO_FP_IMM(FMAXNM, fmaxnms, zero, one) +DO_FP_IMM(FMINNM, fminnms, zero, one) +DO_FP_IMM(FMAX, fmaxs, zero, one) +DO_FP_IMM(FMIN, fmins, zero, one) + +#undef DO_FP_IMM + +static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, + gen_helper_gvec_4_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (fn == NULL) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_4_ptr(tcg_ctx, pred_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + pred_full_reg_offset(s, a->pg), + status, vsz, vsz, 0, fn); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +#define DO_FPCMP(NAME, name) \ +static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \ +{ \ + static gen_helper_gvec_4_ptr * const fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ + }; \ + return do_fp_cmp(s, a, fns[a->esz]); \ +} + +DO_FPCMP(FCMGE, fcmge) +DO_FPCMP(FCMGT, fcmgt) +DO_FPCMP(FCMEQ, fcmeq) +DO_FPCMP(FCMNE, fcmne) +DO_FPCMP(FCMUO, fcmuo) +DO_FPCMP(FACGE, facge) +DO_FPCMP(FACGT, facgt) + +#undef DO_FPCMP + +static bool trans_FCADD(DisasContext *s, arg_FCADD *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_4_ptr * const fns[3] = { + gen_helper_sve_fcadd_h, + gen_helper_sve_fcadd_s, + gen_helper_sve_fcadd_d + }; + + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + pred_full_reg_offset(s, a->pg), + status, vsz, vsz, a->rot, fns[a->esz - 1]); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +typedef void gen_helper_sve_fmla(TCGContext *, TCGv_env, TCGv_ptr, TCGv_i32); + +static bool do_fmla(DisasContext *s, arg_rprrr_esz *a, gen_helper_sve_fmla *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (fn == NULL) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + unsigned vsz = vec_full_reg_size(s); + unsigned desc; + TCGv_i32 t_desc; + TCGv_ptr pg = tcg_temp_new_ptr(tcg_ctx); + + /* We would need 7 operands to pass these arguments "properly". + * So we encode all the register numbers into the descriptor. + */ + desc = deposit32(a->rd, 5, 5, a->rn); + desc = deposit32(desc, 10, 5, a->rm); + desc = deposit32(desc, 15, 5, a->ra); + desc = simd_desc(vsz, vsz, desc); + + t_desc = tcg_const_i32(tcg_ctx, desc); + tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + fn(tcg_ctx, tcg_ctx->cpu_env, pg, t_desc); + tcg_temp_free_i32(tcg_ctx, t_desc); + tcg_temp_free_ptr(tcg_ctx, pg); + return true; +} + +#define DO_FMLA(NAME, name) \ +static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \ +{ \ + static gen_helper_sve_fmla * const fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ + }; \ + return do_fmla(s, a, fns[a->esz]); \ +} + +DO_FMLA(FMLA_zpzzz, fmla_zpzzz) +DO_FMLA(FMLS_zpzzz, fmls_zpzzz) +DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz) +DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz) + +#undef DO_FMLA + +static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_sve_fmla * const fns[3] = { + gen_helper_sve_fcmla_zpzzz_h, + gen_helper_sve_fcmla_zpzzz_s, + gen_helper_sve_fcmla_zpzzz_d, + }; + + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + unsigned desc; + TCGv_i32 t_desc; + TCGv_ptr pg = tcg_temp_new_ptr(tcg_ctx); + + /* We would need 7 operands to pass these arguments "properly". + * So we encode all the register numbers into the descriptor. + */ + desc = deposit32(a->rd, 5, 5, a->rn); + desc = deposit32(desc, 10, 5, a->rm); + desc = deposit32(desc, 15, 5, a->ra); + desc = deposit32(desc, 20, 2, a->rot); + desc = sextract32(desc, 0, 22); + desc = simd_desc(vsz, vsz, desc); + + t_desc = tcg_const_i32(tcg_ctx, desc); + tcg_gen_addi_ptr(tcg_ctx, pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + fns[a->esz - 1](tcg_ctx, tcg_ctx->cpu_env, pg, t_desc); + tcg_temp_free_i32(tcg_ctx, t_desc); + tcg_temp_free_ptr(tcg_ctx, pg); + } + return true; +} + +static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_3_ptr * const fns[2] = { + gen_helper_gvec_fcmlah_idx, + gen_helper_gvec_fcmlas_idx, + }; + + tcg_debug_assert(a->esz == 1 || a->esz == 2); + tcg_debug_assert(a->rd == a->ra); + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + status, vsz, vsz, + a->index * 4 + a->rot, + fns[a->esz - 1]); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +/* + *** SVE Floating Point Unary Operations Predicated Group + */ + +static bool do_zpz_ptr(DisasContext *s, int rd, int rn, int pg, + bool is_fp16, gen_helper_gvec_3_ptr *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, is_fp16); + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + pred_full_reg_offset(s, pg), + status, vsz, vsz, 0, fn); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +static bool trans_FCVT_sh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sh); +} + +static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs); +} + +static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh); +} + +static bool trans_FCVT_hd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hd); +} + +static bool trans_FCVT_ds(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_ds); +} + +static bool trans_FCVT_sd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sd); +} + +static bool trans_FCVTZS_hh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hh); +} + +static bool trans_FCVTZU_hh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hh); +} + +static bool trans_FCVTZS_hs(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hs); +} + +static bool trans_FCVTZU_hs(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hs); +} + +static bool trans_FCVTZS_hd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hd); +} + +static bool trans_FCVTZU_hd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hd); +} + +static bool trans_FCVTZS_ss(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ss); +} + +static bool trans_FCVTZU_ss(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ss); +} + +static bool trans_FCVTZS_sd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_sd); +} + +static bool trans_FCVTZU_sd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_sd); +} + +static bool trans_FCVTZS_ds(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ds); +} + +static bool trans_FCVTZU_ds(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ds); +} + +static bool trans_FCVTZS_dd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_dd); +} + +static bool trans_FCVTZU_dd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_dd); +} + +static gen_helper_gvec_3_ptr * const frint_fns[3] = { + gen_helper_sve_frint_h, + gen_helper_sve_frint_s, + gen_helper_sve_frint_d +}; + +static bool trans_FRINTI(DisasContext *s, arg_rpr_esz *a) +{ + if (a->esz == 0) { + return false; + } + return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, + frint_fns[a->esz - 1]); +} + +static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3_ptr * const fns[3] = { + gen_helper_sve_frintx_h, + gen_helper_sve_frintx_s, + gen_helper_sve_frintx_d + }; + if (a->esz == 0) { + return false; + } + return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); +} + +static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, int mode) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->esz == 0) { + return false; + } + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + TCGv_i32 tmode = tcg_const_i32(tcg_ctx, mode); + TCGv_ptr status = get_fpstatus_ptr(tcg_ctx, a->esz == MO_16); + + gen_helper_set_rmode(tcg_ctx, tmode, tmode, status); + + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + pred_full_reg_offset(s, a->pg), + status, vsz, vsz, 0, frint_fns[a->esz - 1]); + + gen_helper_set_rmode(tcg_ctx, tmode, tmode, status); + tcg_temp_free_i32(tcg_ctx, tmode); + tcg_temp_free_ptr(tcg_ctx, status); + } + return true; +} + +static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a) +{ + return do_frint_mode(s, a, float_round_nearest_even); +} + +static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a) +{ + return do_frint_mode(s, a, float_round_up); +} + +static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a) +{ + return do_frint_mode(s, a, float_round_down); +} + +static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a) +{ + return do_frint_mode(s, a, float_round_to_zero); +} + +static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a) +{ + return do_frint_mode(s, a, float_round_ties_away); +} + +static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3_ptr * const fns[3] = { + gen_helper_sve_frecpx_h, + gen_helper_sve_frecpx_s, + gen_helper_sve_frecpx_d + }; + if (a->esz == 0) { + return false; + } + return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); +} + +static bool trans_FSQRT(DisasContext *s, arg_rpr_esz *a) +{ + static gen_helper_gvec_3_ptr * const fns[3] = { + gen_helper_sve_fsqrt_h, + gen_helper_sve_fsqrt_s, + gen_helper_sve_fsqrt_d + }; + if (a->esz == 0) { + return false; + } + return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]); +} + +static bool trans_SCVTF_hh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_hh); +} + +static bool trans_SCVTF_sh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_sh); +} + +static bool trans_SCVTF_dh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_dh); +} + +static bool trans_SCVTF_ss(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ss); +} + +static bool trans_SCVTF_ds(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ds); +} + +static bool trans_SCVTF_sd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_sd); +} + +static bool trans_SCVTF_dd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_dd); +} + +static bool trans_UCVTF_hh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_hh); +} + +static bool trans_UCVTF_sh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_sh); +} + +static bool trans_UCVTF_dh(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_dh); +} + +static bool trans_UCVTF_ss(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ss); +} + +static bool trans_UCVTF_ds(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ds); +} + +static bool trans_UCVTF_sd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_sd); +} + +static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a) +{ + return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_dd); +} + +/* + *** SVE Memory - 32-bit Gather and Unsized Contiguous Group + */ + +/* Subroutine loading a vector register at VOFS of LEN bytes. + * The load should begin at the address Rn + IMM. + */ + +static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int len_align = QEMU_ALIGN_DOWN(len, 8); + int len_remain = len % 8; + int nparts = len / 8 + ctpop8(len_remain); + int midx = get_mem_index(s); + TCGv_i64 addr, t0, t1; + + addr = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + + /* Note that unpredicated load/store of vector/predicate registers + * are defined as a stream of bytes, which equates to little-endian + * operations on larger quantities. There is no nice way to force + * a little-endian load for aarch64_be-linux-user out of line. + * + * Attempt to keep code expansion to a minimum by limiting the + * amount of unrolling done. + */ + if (nparts <= 4) { + int i; + + for (i = 0; i < len_align; i += 8) { + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + i); + tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LEQ); + tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + i); + } + } else { + TCGLabel *loop = gen_new_label(tcg_ctx); + TCGv_ptr tp, i = tcg_const_local_ptr(tcg_ctx, 0); + + gen_set_label(tcg_ctx, loop); + + /* Minimize the number of local temps that must be re-read from + * the stack each iteration. Instead, re-compute values other + * than the loop counter. + */ + tp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, tp, i, imm); + tcg_gen_extu_ptr_i64(tcg_ctx, addr, tp); + tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, rn)); + + tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LEQ); + + tcg_gen_add_ptr(tcg_ctx, tp, tcg_ctx->cpu_env, i); + tcg_gen_addi_ptr(tcg_ctx, i, i, 8); + tcg_gen_st_i64(tcg_ctx, t0, tp, vofs); + tcg_temp_free_ptr(tcg_ctx, tp); + + tcg_gen_brcondi_ptr(tcg_ctx, TCG_COND_LTU, i, len_align, loop); + tcg_temp_free_ptr(tcg_ctx, i); + } + + /* Predicate register loads can be any multiple of 2. + * Note that we still store the entire 64-bit unit into cpu_env. + */ + if (len_remain) { + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + len_align); + + switch (len_remain) { + case 2: + case 4: + case 8: + tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LE | ctz32(len_remain)); + break; + + case 6: + t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, t0, addr, midx, MO_LEUL); + tcg_gen_addi_i64(tcg_ctx, addr, addr, 4); + tcg_gen_qemu_ld_i64(tcg_ctx, t1, addr, midx, MO_LEUW); + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 32); + tcg_temp_free_i64(tcg_ctx, t1); + break; + + default: + g_assert_not_reached(); + } + tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + len_align); + } + tcg_temp_free_i64(tcg_ctx, addr); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* Similarly for stores. */ +static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int len_align = QEMU_ALIGN_DOWN(len, 8); + int len_remain = len % 8; + int nparts = len / 8 + ctpop8(len_remain); + int midx = get_mem_index(s); + TCGv_i64 addr, t0; + + addr = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + + /* Note that unpredicated load/store of vector/predicate registers + * are defined as a stream of bytes, which equates to little-endian + * operations on larger quantities. There is no nice way to force + * a little-endian store for aarch64_be-linux-user out of line. + * + * Attempt to keep code expansion to a minimum by limiting the + * amount of unrolling done. + */ + if (nparts <= 4) { + int i; + + for (i = 0; i < len_align; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + i); + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + i); + tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEQ); + } + } else { + TCGLabel *loop = gen_new_label(tcg_ctx); + TCGv_ptr t2, i = tcg_const_local_ptr(tcg_ctx, 0); + + gen_set_label(tcg_ctx, loop); + + t2 = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_add_ptr(tcg_ctx, t2, tcg_ctx->cpu_env, i); + tcg_gen_ld_i64(tcg_ctx, t0, t2, vofs); + + /* Minimize the number of local temps that must be re-read from + * the stack each iteration. Instead, re-compute values other + * than the loop counter. + */ + tcg_gen_addi_ptr(tcg_ctx, t2, i, imm); + tcg_gen_extu_ptr_i64(tcg_ctx, addr, t2); + tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, rn)); + tcg_temp_free_ptr(tcg_ctx, t2); + + tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEQ); + + tcg_gen_addi_ptr(tcg_ctx, i, i, 8); + + tcg_gen_brcondi_ptr(tcg_ctx, TCG_COND_LTU, i, len_align, loop); + tcg_temp_free_ptr(tcg_ctx, i); + } + + /* Predicate register stores can be any multiple of 2. */ + if (len_remain) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, vofs + len_align); + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, rn), imm + len_align); + + switch (len_remain) { + case 2: + case 4: + case 8: + tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LE | ctz32(len_remain)); + break; + + case 6: + tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEUL); + tcg_gen_addi_i64(tcg_ctx, addr, addr, 4); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); + tcg_gen_qemu_st_i64(tcg_ctx, t0, addr, midx, MO_LEUW); + break; + + default: + g_assert_not_reached(); + } + } + tcg_temp_free_i64(tcg_ctx, addr); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static bool trans_LDR_zri(DisasContext *s, arg_rri *a) +{ + if (sve_access_check(s)) { + int size = vec_full_reg_size(s); + int off = vec_full_reg_offset(s, a->rd); + do_ldr(s, off, size, a->rn, a->imm * size); + } + return true; +} + +static bool trans_LDR_pri(DisasContext *s, arg_rri *a) +{ + if (sve_access_check(s)) { + int size = pred_full_reg_size(s); + int off = pred_full_reg_offset(s, a->rd); + do_ldr(s, off, size, a->rn, a->imm * size); + } + return true; +} + +static bool trans_STR_zri(DisasContext *s, arg_rri *a) +{ + if (sve_access_check(s)) { + int size = vec_full_reg_size(s); + int off = vec_full_reg_offset(s, a->rd); + do_str(s, off, size, a->rn, a->imm * size); + } + return true; +} + +static bool trans_STR_pri(DisasContext *s, arg_rri *a) +{ + if (sve_access_check(s)) { + int size = pred_full_reg_size(s); + int off = pred_full_reg_offset(s, a->rd); + do_str(s, off, size, a->rn, a->imm * size); + } + return true; +} + +/* + *** SVE Memory - Contiguous Load Group + */ + +/* The memory mode of the dtype. */ +static const MemOp dtype_mop[16] = { + MO_UB, MO_UB, MO_UB, MO_UB, + MO_SL, MO_UW, MO_UW, MO_UW, + MO_SW, MO_SW, MO_UL, MO_UL, + MO_SB, MO_SB, MO_SB, MO_Q +}; + +#define dtype_msz(x) (dtype_mop[x] & MO_SIZE) + +/* The vector element size of dtype. */ +static const uint8_t dtype_esz[16] = { + 0, 1, 2, 3, + 3, 1, 2, 3, + 3, 2, 2, 3, + 3, 2, 1, 3 +}; + +static TCGMemOpIdx sve_memopidx(DisasContext *s, int dtype) +{ + return make_memop_idx(s->be_data | dtype_mop[dtype], get_mem_index(s)); +} + +static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, + int dtype, gen_helper_gvec_mem *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr t_pg; + TCGv_i32 t_desc; + int desc; + + /* For e.g. LD4, there are not enough arguments to pass all 4 + * registers as pointers, so encode the regno into the data field. + * For consistency, do this even for LD1. + */ + desc = sve_memopidx(s, dtype); + desc |= zt << MEMOPIDX_SHIFT; + desc = simd_desc(vsz, vsz, desc); + t_desc = tcg_const_i32(tcg_ctx, desc); + t_pg = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); + fn(tcg_ctx, tcg_ctx->cpu_env, t_pg, addr, t_desc); + + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_i32(tcg_ctx, t_desc); +} + +static void do_ld_zpa(DisasContext *s, int zt, int pg, + TCGv_i64 addr, int dtype, int nreg) +{ + static gen_helper_gvec_mem * const fns[2][16][4] = { + /* Little-endian */ + { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, + gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, + { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, + + { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r, + gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r }, + { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL }, + + { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r, + gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r }, + { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL }, + + { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r, + gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } }, + + /* Big-endian */ + { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, + gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, + { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, + + { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r, + gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r }, + { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL }, + + { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r, + gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r }, + { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL }, + + { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r, + gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } + }; + gen_helper_gvec_mem *fn = fns[s->be_data == MO_BE][dtype][nreg]; + + /* While there are holes in the table, they are not + * accessible via the instruction encoding. + */ + assert(fn != NULL); + do_mem_zpa(s, zt, pg, addr, dtype, fn); +} + +static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->rm == 31) { + return false; + } + if (sve_access_check(s)) { + TCGv_i64 addr = new_tmp_a64(s); + tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); + tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); + do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); + } + return true; +} + +static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + int vsz = vec_full_reg_size(s); + int elements = vsz >> dtype_esz[a->dtype]; + TCGv_i64 addr = new_tmp_a64(s); + + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), + (a->imm * elements * (a->nreg + 1)) + << dtype_msz(a->dtype)); + do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); + } + return true; +} + +static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_mem * const fns[2][16] = { + /* Little-endian */ + { gen_helper_sve_ldff1bb_r, + gen_helper_sve_ldff1bhu_r, + gen_helper_sve_ldff1bsu_r, + gen_helper_sve_ldff1bdu_r, + + gen_helper_sve_ldff1sds_le_r, + gen_helper_sve_ldff1hh_le_r, + gen_helper_sve_ldff1hsu_le_r, + gen_helper_sve_ldff1hdu_le_r, + + gen_helper_sve_ldff1hds_le_r, + gen_helper_sve_ldff1hss_le_r, + gen_helper_sve_ldff1ss_le_r, + gen_helper_sve_ldff1sdu_le_r, + + gen_helper_sve_ldff1bds_r, + gen_helper_sve_ldff1bss_r, + gen_helper_sve_ldff1bhs_r, + gen_helper_sve_ldff1dd_le_r }, + + /* Big-endian */ + { gen_helper_sve_ldff1bb_r, + gen_helper_sve_ldff1bhu_r, + gen_helper_sve_ldff1bsu_r, + gen_helper_sve_ldff1bdu_r, + + gen_helper_sve_ldff1sds_be_r, + gen_helper_sve_ldff1hh_be_r, + gen_helper_sve_ldff1hsu_be_r, + gen_helper_sve_ldff1hdu_be_r, + + gen_helper_sve_ldff1hds_be_r, + gen_helper_sve_ldff1hss_be_r, + gen_helper_sve_ldff1ss_be_r, + gen_helper_sve_ldff1sdu_be_r, + + gen_helper_sve_ldff1bds_r, + gen_helper_sve_ldff1bss_r, + gen_helper_sve_ldff1bhs_r, + gen_helper_sve_ldff1dd_be_r }, + }; + + if (sve_access_check(s)) { + TCGv_i64 addr = new_tmp_a64(s); + tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); + tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); + do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, + fns[s->be_data == MO_BE][a->dtype]); + } + return true; +} + +static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_mem * const fns[2][16] = { + /* Little-endian */ + { gen_helper_sve_ldnf1bb_r, + gen_helper_sve_ldnf1bhu_r, + gen_helper_sve_ldnf1bsu_r, + gen_helper_sve_ldnf1bdu_r, + + gen_helper_sve_ldnf1sds_le_r, + gen_helper_sve_ldnf1hh_le_r, + gen_helper_sve_ldnf1hsu_le_r, + gen_helper_sve_ldnf1hdu_le_r, + + gen_helper_sve_ldnf1hds_le_r, + gen_helper_sve_ldnf1hss_le_r, + gen_helper_sve_ldnf1ss_le_r, + gen_helper_sve_ldnf1sdu_le_r, + + gen_helper_sve_ldnf1bds_r, + gen_helper_sve_ldnf1bss_r, + gen_helper_sve_ldnf1bhs_r, + gen_helper_sve_ldnf1dd_le_r }, + + /* Big-endian */ + { gen_helper_sve_ldnf1bb_r, + gen_helper_sve_ldnf1bhu_r, + gen_helper_sve_ldnf1bsu_r, + gen_helper_sve_ldnf1bdu_r, + + gen_helper_sve_ldnf1sds_be_r, + gen_helper_sve_ldnf1hh_be_r, + gen_helper_sve_ldnf1hsu_be_r, + gen_helper_sve_ldnf1hdu_be_r, + + gen_helper_sve_ldnf1hds_be_r, + gen_helper_sve_ldnf1hss_be_r, + gen_helper_sve_ldnf1ss_be_r, + gen_helper_sve_ldnf1sdu_be_r, + + gen_helper_sve_ldnf1bds_r, + gen_helper_sve_ldnf1bss_r, + gen_helper_sve_ldnf1bhs_r, + gen_helper_sve_ldnf1dd_be_r }, + }; + + if (sve_access_check(s)) { + int vsz = vec_full_reg_size(s); + int elements = vsz >> dtype_esz[a->dtype]; + int off = (a->imm * elements) << dtype_msz(a->dtype); + TCGv_i64 addr = new_tmp_a64(s); + + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), off); + do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, + fns[s->be_data == MO_BE][a->dtype]); + } + return true; +} + +static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static gen_helper_gvec_mem * const fns[2][4] = { + { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_le_r, + gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld1dd_le_r }, + { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_be_r, + gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld1dd_be_r }, + }; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr t_pg; + TCGv_i32 t_desc; + int desc, poff; + + /* Load the first quadword using the normal predicated load helpers. */ + desc = sve_memopidx(s, msz_dtype(s, msz)); + desc |= zt << MEMOPIDX_SHIFT; + desc = simd_desc(16, 16, desc); + t_desc = tcg_const_i32(tcg_ctx, desc); + + poff = pred_full_reg_offset(s, pg); + if (vsz > 16) { + /* + * Zero-extend the first 16 bits of the predicate into a temporary. + * This avoids triggering an assert making sure we don't have bits + * set within a predicate beyond VQ, but we have lowered VQ to 1 + * for this load operation. + */ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); +#ifdef HOST_WORDS_BIGENDIAN + poff += 6; +#endif + tcg_gen_ld16u_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, poff); + + poff = offsetof(CPUARMState, vfp.preg_tmp); + tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, poff); + tcg_temp_free_i64(tcg_ctx, tmp); + } + + t_pg = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, poff); + + fns[s->be_data == MO_BE][msz](tcg_ctx, tcg_ctx->cpu_env, t_pg, addr, t_desc); + + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_i32(tcg_ctx, t_desc); + + /* Replicate that first quadword. */ + if (vsz > 16) { + unsigned dofs = vec_full_reg_offset(s, zt); + tcg_gen_gvec_dup_mem(tcg_ctx, 4, dofs + 16, dofs, vsz - 16, vsz - 16); + } +} + +static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->rm == 31) { + return false; + } + if (sve_access_check(s)) { + int msz = dtype_msz(a->dtype); + TCGv_i64 addr = new_tmp_a64(s); + tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), msz); + tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); + do_ldrq(s, a->rd, a->pg, addr, msz); + } + return true; +} + +static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sve_access_check(s)) { + TCGv_i64 addr = new_tmp_a64(s); + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), a->imm * 16); + do_ldrq(s, a->rd, a->pg, addr, dtype_msz(a->dtype)); + } + return true; +} + +/* Load and broadcast element. */ +static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sve_access_check(s)) { + return true; + } + + unsigned vsz = vec_full_reg_size(s); + unsigned psz = pred_full_reg_size(s); + unsigned esz = dtype_esz[a->dtype]; + unsigned msz = dtype_msz(a->dtype); + TCGLabel *over = gen_new_label(tcg_ctx); + TCGv_i64 temp; + + /* If the guarding predicate has no bits set, no load occurs. */ + if (psz <= 8) { + /* Reduce the pred_esz_masks value simply to reduce the + * size of the code generated here. + */ + uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8); + temp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, temp, tcg_ctx->cpu_env, pred_full_reg_offset(s, a->pg)); + tcg_gen_andi_i64(tcg_ctx, temp, temp, pred_esz_masks[esz] & psz_mask); + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_EQ, temp, 0, over); + tcg_temp_free_i64(tcg_ctx, temp); + } else { + TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); + find_last_active(s, t32, esz, a->pg); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, t32, 0, over); + tcg_temp_free_i32(tcg_ctx, t32); + } + + /* Load the data. */ + temp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_addi_i64(tcg_ctx, temp, cpu_reg_sp(s, a->rn), a->imm << msz); + tcg_gen_qemu_ld_i64(tcg_ctx, temp, temp, get_mem_index(s), + s->be_data | dtype_mop[a->dtype]); + + /* Broadcast to *all* elements. */ + tcg_gen_gvec_dup_i64(tcg_ctx, esz, vec_full_reg_offset(s, a->rd), + vsz, vsz, temp); + tcg_temp_free_i64(tcg_ctx, temp); + + /* Zero the inactive elements. */ + gen_set_label(tcg_ctx, over); + do_movz_zpz(s, a->rd, a->rd, a->pg, esz); + return true; +} + +static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, + int msz, int esz, int nreg) +{ + static gen_helper_gvec_mem * const fn_single[2][4][4] = { + { { gen_helper_sve_st1bb_r, + gen_helper_sve_st1bh_r, + gen_helper_sve_st1bs_r, + gen_helper_sve_st1bd_r }, + { NULL, + gen_helper_sve_st1hh_le_r, + gen_helper_sve_st1hs_le_r, + gen_helper_sve_st1hd_le_r }, + { NULL, NULL, + gen_helper_sve_st1ss_le_r, + gen_helper_sve_st1sd_le_r }, + { NULL, NULL, NULL, + gen_helper_sve_st1dd_le_r } }, + { { gen_helper_sve_st1bb_r, + gen_helper_sve_st1bh_r, + gen_helper_sve_st1bs_r, + gen_helper_sve_st1bd_r }, + { NULL, + gen_helper_sve_st1hh_be_r, + gen_helper_sve_st1hs_be_r, + gen_helper_sve_st1hd_be_r }, + { NULL, NULL, + gen_helper_sve_st1ss_be_r, + gen_helper_sve_st1sd_be_r }, + { NULL, NULL, NULL, + gen_helper_sve_st1dd_be_r } }, + }; + static gen_helper_gvec_mem * const fn_multiple[2][3][4] = { + { { gen_helper_sve_st2bb_r, + gen_helper_sve_st2hh_le_r, + gen_helper_sve_st2ss_le_r, + gen_helper_sve_st2dd_le_r }, + { gen_helper_sve_st3bb_r, + gen_helper_sve_st3hh_le_r, + gen_helper_sve_st3ss_le_r, + gen_helper_sve_st3dd_le_r }, + { gen_helper_sve_st4bb_r, + gen_helper_sve_st4hh_le_r, + gen_helper_sve_st4ss_le_r, + gen_helper_sve_st4dd_le_r } }, + { { gen_helper_sve_st2bb_r, + gen_helper_sve_st2hh_be_r, + gen_helper_sve_st2ss_be_r, + gen_helper_sve_st2dd_be_r }, + { gen_helper_sve_st3bb_r, + gen_helper_sve_st3hh_be_r, + gen_helper_sve_st3ss_be_r, + gen_helper_sve_st3dd_be_r }, + { gen_helper_sve_st4bb_r, + gen_helper_sve_st4hh_be_r, + gen_helper_sve_st4ss_be_r, + gen_helper_sve_st4dd_be_r } }, + }; + gen_helper_gvec_mem *fn; + int be = s->be_data == MO_BE; + + if (nreg == 0) { + /* ST1 */ + fn = fn_single[be][msz][esz]; + } else { + /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */ + assert(msz == esz); + fn = fn_multiple[be][nreg - 1][msz]; + } + assert(fn != NULL); + do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), fn); +} + +static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->rm == 31 || a->msz > a->esz) { + return false; + } + if (sve_access_check(s)) { + TCGv_i64 addr = new_tmp_a64(s); + tcg_gen_shli_i64(tcg_ctx, addr, cpu_reg(s, a->rm), a->msz); + tcg_gen_add_i64(tcg_ctx, addr, addr, cpu_reg_sp(s, a->rn)); + do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); + } + return true; +} + +static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->msz > a->esz) { + return false; + } + if (sve_access_check(s)) { + int vsz = vec_full_reg_size(s); + int elements = vsz >> a->esz; + TCGv_i64 addr = new_tmp_a64(s); + + tcg_gen_addi_i64(tcg_ctx, addr, cpu_reg_sp(s, a->rn), + (a->imm * elements * (a->nreg + 1)) << a->msz); + do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); + } + return true; +} + +/* + *** SVE gather loads / scatter stores + */ + +static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, + int scale, TCGv_i64 scalar, int msz, + gen_helper_gvec_mem_scatter *fn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned vsz = vec_full_reg_size(s); + TCGv_ptr t_zm = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_pg = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr t_zt = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 t_desc; + int desc; + + desc = sve_memopidx(s, msz_dtype(s, msz)); + desc |= scale << MEMOPIDX_SHIFT; + desc = simd_desc(vsz, vsz, desc); + t_desc = tcg_const_i32(tcg_ctx, desc); + + tcg_gen_addi_ptr(tcg_ctx, t_pg, tcg_ctx->cpu_env, pred_full_reg_offset(s, pg)); + tcg_gen_addi_ptr(tcg_ctx, t_zm, tcg_ctx->cpu_env, vec_full_reg_offset(s, zm)); + tcg_gen_addi_ptr(tcg_ctx, t_zt, tcg_ctx->cpu_env, vec_full_reg_offset(s, zt)); + fn(tcg_ctx, tcg_ctx->cpu_env, t_zt, t_pg, t_zm, scalar, t_desc); + + tcg_temp_free_ptr(tcg_ctx, t_zt); + tcg_temp_free_ptr(tcg_ctx, t_zm); + tcg_temp_free_ptr(tcg_ctx, t_pg); + tcg_temp_free_i32(tcg_ctx, t_desc); +} + +/* Indexed by [be][ff][xs][u][msz]. */ +static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][2][3] = { + /* Little-endian */ + { { { { gen_helper_sve_ldbss_zsu, + gen_helper_sve_ldhss_le_zsu, + NULL, }, + { gen_helper_sve_ldbsu_zsu, + gen_helper_sve_ldhsu_le_zsu, + gen_helper_sve_ldss_le_zsu, } }, + { { gen_helper_sve_ldbss_zss, + gen_helper_sve_ldhss_le_zss, + NULL, }, + { gen_helper_sve_ldbsu_zss, + gen_helper_sve_ldhsu_le_zss, + gen_helper_sve_ldss_le_zss, } } }, + + /* First-fault */ + { { { gen_helper_sve_ldffbss_zsu, + gen_helper_sve_ldffhss_le_zsu, + NULL, }, + { gen_helper_sve_ldffbsu_zsu, + gen_helper_sve_ldffhsu_le_zsu, + gen_helper_sve_ldffss_le_zsu, } }, + { { gen_helper_sve_ldffbss_zss, + gen_helper_sve_ldffhss_le_zss, + NULL, }, + { gen_helper_sve_ldffbsu_zss, + gen_helper_sve_ldffhsu_le_zss, + gen_helper_sve_ldffss_le_zss, } } } }, + + /* Big-endian */ + { { { { gen_helper_sve_ldbss_zsu, + gen_helper_sve_ldhss_be_zsu, + NULL, }, + { gen_helper_sve_ldbsu_zsu, + gen_helper_sve_ldhsu_be_zsu, + gen_helper_sve_ldss_be_zsu, } }, + { { gen_helper_sve_ldbss_zss, + gen_helper_sve_ldhss_be_zss, + NULL, }, + { gen_helper_sve_ldbsu_zss, + gen_helper_sve_ldhsu_be_zss, + gen_helper_sve_ldss_be_zss, } } }, + + /* First-fault */ + { { { gen_helper_sve_ldffbss_zsu, + gen_helper_sve_ldffhss_be_zsu, + NULL, }, + { gen_helper_sve_ldffbsu_zsu, + gen_helper_sve_ldffhsu_be_zsu, + gen_helper_sve_ldffss_be_zsu, } }, + { { gen_helper_sve_ldffbss_zss, + gen_helper_sve_ldffhss_be_zss, + NULL, }, + { gen_helper_sve_ldffbsu_zss, + gen_helper_sve_ldffhsu_be_zss, + gen_helper_sve_ldffss_be_zss, } } } }, +}; + +/* Note that we overload xs=2 to indicate 64-bit offset. */ +static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][2][3][2][4] = { + /* Little-endian */ + { { { { gen_helper_sve_ldbds_zsu, + gen_helper_sve_ldhds_le_zsu, + gen_helper_sve_ldsds_le_zsu, + NULL, }, + { gen_helper_sve_ldbdu_zsu, + gen_helper_sve_ldhdu_le_zsu, + gen_helper_sve_ldsdu_le_zsu, + gen_helper_sve_lddd_le_zsu, } }, + { { gen_helper_sve_ldbds_zss, + gen_helper_sve_ldhds_le_zss, + gen_helper_sve_ldsds_le_zss, + NULL, }, + { gen_helper_sve_ldbdu_zss, + gen_helper_sve_ldhdu_le_zss, + gen_helper_sve_ldsdu_le_zss, + gen_helper_sve_lddd_le_zss, } }, + { { gen_helper_sve_ldbds_zd, + gen_helper_sve_ldhds_le_zd, + gen_helper_sve_ldsds_le_zd, + NULL, }, + { gen_helper_sve_ldbdu_zd, + gen_helper_sve_ldhdu_le_zd, + gen_helper_sve_ldsdu_le_zd, + gen_helper_sve_lddd_le_zd, } } }, + + /* First-fault */ + { { { gen_helper_sve_ldffbds_zsu, + gen_helper_sve_ldffhds_le_zsu, + gen_helper_sve_ldffsds_le_zsu, + NULL, }, + { gen_helper_sve_ldffbdu_zsu, + gen_helper_sve_ldffhdu_le_zsu, + gen_helper_sve_ldffsdu_le_zsu, + gen_helper_sve_ldffdd_le_zsu, } }, + { { gen_helper_sve_ldffbds_zss, + gen_helper_sve_ldffhds_le_zss, + gen_helper_sve_ldffsds_le_zss, + NULL, }, + { gen_helper_sve_ldffbdu_zss, + gen_helper_sve_ldffhdu_le_zss, + gen_helper_sve_ldffsdu_le_zss, + gen_helper_sve_ldffdd_le_zss, } }, + { { gen_helper_sve_ldffbds_zd, + gen_helper_sve_ldffhds_le_zd, + gen_helper_sve_ldffsds_le_zd, + NULL, }, + { gen_helper_sve_ldffbdu_zd, + gen_helper_sve_ldffhdu_le_zd, + gen_helper_sve_ldffsdu_le_zd, + gen_helper_sve_ldffdd_le_zd, } } } }, + + /* Big-endian */ + { { { { gen_helper_sve_ldbds_zsu, + gen_helper_sve_ldhds_be_zsu, + gen_helper_sve_ldsds_be_zsu, + NULL, }, + { gen_helper_sve_ldbdu_zsu, + gen_helper_sve_ldhdu_be_zsu, + gen_helper_sve_ldsdu_be_zsu, + gen_helper_sve_lddd_be_zsu, } }, + { { gen_helper_sve_ldbds_zss, + gen_helper_sve_ldhds_be_zss, + gen_helper_sve_ldsds_be_zss, + NULL, }, + { gen_helper_sve_ldbdu_zss, + gen_helper_sve_ldhdu_be_zss, + gen_helper_sve_ldsdu_be_zss, + gen_helper_sve_lddd_be_zss, } }, + { { gen_helper_sve_ldbds_zd, + gen_helper_sve_ldhds_be_zd, + gen_helper_sve_ldsds_be_zd, + NULL, }, + { gen_helper_sve_ldbdu_zd, + gen_helper_sve_ldhdu_be_zd, + gen_helper_sve_ldsdu_be_zd, + gen_helper_sve_lddd_be_zd, } } }, + + /* First-fault */ + { { { gen_helper_sve_ldffbds_zsu, + gen_helper_sve_ldffhds_be_zsu, + gen_helper_sve_ldffsds_be_zsu, + NULL, }, + { gen_helper_sve_ldffbdu_zsu, + gen_helper_sve_ldffhdu_be_zsu, + gen_helper_sve_ldffsdu_be_zsu, + gen_helper_sve_ldffdd_be_zsu, } }, + { { gen_helper_sve_ldffbds_zss, + gen_helper_sve_ldffhds_be_zss, + gen_helper_sve_ldffsds_be_zss, + NULL, }, + { gen_helper_sve_ldffbdu_zss, + gen_helper_sve_ldffhdu_be_zss, + gen_helper_sve_ldffsdu_be_zss, + gen_helper_sve_ldffdd_be_zss, } }, + { { gen_helper_sve_ldffbds_zd, + gen_helper_sve_ldffhds_be_zd, + gen_helper_sve_ldffsds_be_zd, + NULL, }, + { gen_helper_sve_ldffbdu_zd, + gen_helper_sve_ldffhdu_be_zd, + gen_helper_sve_ldffsdu_be_zd, + gen_helper_sve_ldffdd_be_zd, } } } }, +}; + +static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) +{ + gen_helper_gvec_mem_scatter *fn = NULL; + int be = s->be_data == MO_BE; + + if (!sve_access_check(s)) { + return true; + } + + switch (a->esz) { + case MO_32: + fn = gather_load_fn32[be][a->ff][a->xs][a->u][a->msz]; + break; + case MO_64: + fn = gather_load_fn64[be][a->ff][a->xs][a->u][a->msz]; + break; + } + assert(fn != NULL); + + do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, + cpu_reg_sp(s, a->rn), a->msz, fn); + return true; +} + +static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_gvec_mem_scatter *fn = NULL; + int be = s->be_data == MO_BE; + TCGv_i64 imm; + + if (a->esz < a->msz || (a->esz == a->msz && !a->u)) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + switch (a->esz) { + case MO_32: + fn = gather_load_fn32[be][a->ff][0][a->u][a->msz]; + break; + case MO_64: + fn = gather_load_fn64[be][a->ff][2][a->u][a->msz]; + break; + } + assert(fn != NULL); + + /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x]) + * by loading the immediate into the scalar parameter. + */ + imm = tcg_const_i64(tcg_ctx, a->imm << a->msz); + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn); + tcg_temp_free_i64(tcg_ctx, imm); + return true; +} + +/* Indexed by [be][xs][msz]. */ +static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][3] = { + /* Little-endian */ + { { gen_helper_sve_stbs_zsu, + gen_helper_sve_sths_le_zsu, + gen_helper_sve_stss_le_zsu, }, + { gen_helper_sve_stbs_zss, + gen_helper_sve_sths_le_zss, + gen_helper_sve_stss_le_zss, } }, + /* Big-endian */ + { { gen_helper_sve_stbs_zsu, + gen_helper_sve_sths_be_zsu, + gen_helper_sve_stss_be_zsu, }, + { gen_helper_sve_stbs_zss, + gen_helper_sve_sths_be_zss, + gen_helper_sve_stss_be_zss, } }, +}; + +/* Note that we overload xs=2 to indicate 64-bit offset. */ +static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][3][4] = { + /* Little-endian */ + { { gen_helper_sve_stbd_zsu, + gen_helper_sve_sthd_le_zsu, + gen_helper_sve_stsd_le_zsu, + gen_helper_sve_stdd_le_zsu, }, + { gen_helper_sve_stbd_zss, + gen_helper_sve_sthd_le_zss, + gen_helper_sve_stsd_le_zss, + gen_helper_sve_stdd_le_zss, }, + { gen_helper_sve_stbd_zd, + gen_helper_sve_sthd_le_zd, + gen_helper_sve_stsd_le_zd, + gen_helper_sve_stdd_le_zd, } }, + /* Big-endian */ + { { gen_helper_sve_stbd_zsu, + gen_helper_sve_sthd_be_zsu, + gen_helper_sve_stsd_be_zsu, + gen_helper_sve_stdd_be_zsu, }, + { gen_helper_sve_stbd_zss, + gen_helper_sve_sthd_be_zss, + gen_helper_sve_stsd_be_zss, + gen_helper_sve_stdd_be_zss, }, + { gen_helper_sve_stbd_zd, + gen_helper_sve_sthd_be_zd, + gen_helper_sve_stsd_be_zd, + gen_helper_sve_stdd_be_zd, } }, +}; + +static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) +{ + gen_helper_gvec_mem_scatter *fn = NULL; + int be = s->be_data == MO_BE; + + if (a->esz < a->msz || (a->msz == 0 && a->scale)) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + switch (a->esz) { + case MO_32: + fn = scatter_store_fn32[be][a->xs][a->msz]; + break; + case MO_64: + fn = scatter_store_fn64[be][a->xs][a->msz]; + break; + default: + g_assert_not_reached(); + } + do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, + cpu_reg_sp(s, a->rn), a->msz, fn); + return true; +} + +static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_gvec_mem_scatter *fn = NULL; + int be = s->be_data == MO_BE; + TCGv_i64 imm; + + if (a->esz < a->msz) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + switch (a->esz) { + case MO_32: + fn = scatter_store_fn32[be][0][a->msz]; + break; + case MO_64: + fn = scatter_store_fn64[be][2][a->msz]; + break; + } + assert(fn != NULL); + + /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x]) + * by loading the immediate into the scalar parameter. + */ + imm = tcg_const_i64(tcg_ctx, a->imm << a->msz); + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn); + tcg_temp_free_i64(tcg_ctx, imm); + return true; +} + +/* + * Prefetches + */ + +static bool trans_PRF(DisasContext *s, arg_PRF *a) +{ + /* Prefetch is a nop within QEMU. */ + (void)sve_access_check(s); + return true; +} + +static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) +{ + if (a->rm == 31) { + return false; + } + /* Prefetch is a nop within QEMU. */ + (void)sve_access_check(s); + return true; +} + +/* + * Move Prefix + * + * TODO: The implementation so far could handle predicated merging movprfx. + * The helper functions as written take an extra source register to + * use in the operation, but the result is only written when predication + * succeeds. For unpredicated movprfx, we need to rearrange the helpers + * to allow the final write back to the destination to be unconditional. + * For predicated zeroing movprfx, we need to rearrange the helpers to + * allow the final write back to zero inactives. + * + * In the meantime, just emit the moves. + */ + +static bool trans_MOVPRFX(DisasContext *s, arg_MOVPRFX *a) +{ + return do_mov_z(s, a->rd, a->rn); +} + +static bool trans_MOVPRFX_m(DisasContext *s, arg_rpr_esz *a) +{ + if (sve_access_check(s)) { + do_sel_z(s, a->rd, a->rn, a->rd, a->pg, a->esz); + } + return true; +} + +static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a) +{ + if (sve_access_check(s)) { + do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz); + } + return true; +} diff --git a/qemu/target/arm/translate-vfp.inc.c b/qemu/target/arm/translate-vfp.inc.c new file mode 100644 index 00000000..4773efb9 --- /dev/null +++ b/qemu/target/arm/translate-vfp.inc.c @@ -0,0 +1,2935 @@ +/* + * ARM translation: AArch32 VFP instructions + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005-2007 CodeSourcery + * Copyright (c) 2007 OpenedHand, Ltd. + * Copyright (c) 2019 Linaro, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * This file is intended to be included from translate.c; it uses + * some macros and definitions provided by that file. + * It might be possible to convert it to a standalone .c file eventually. + */ + +/* Include the generated VFP decoder */ +#include "decode-vfp.inc.c" +#include "decode-vfp-uncond.inc.c" + + +/* + * The imm8 encodes the sign bit, enough bits to represent an exponent in + * the range 01....1xx to 10....0xx, and the most significant 4 bits of + * the mantissa; see VFPExpandImm() in the v8 ARM ARM. + */ +uint64_t vfp_expand_imm(int size, uint8_t imm8) +{ + uint64_t imm = 0; + + switch (size) { + case MO_64: + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) | + extract32(imm8, 0, 6); + imm <<= 48; + break; + case MO_32: + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) | + (extract32(imm8, 0, 6) << 3); + imm <<= 16; + break; + case MO_16: + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) | + (extract32(imm8, 0, 6) << 6); + break; + default: + g_assert_not_reached(); + } + return imm; +} + +/* + * Return the offset of a 16-bit half of the specified VFP single-precision + * register. If top is true, returns the top 16 bits; otherwise the bottom + * 16 bits. + */ +static inline long vfp_f16_offset(unsigned reg, bool top) +{ + long offs = vfp_reg_offset(false, reg); +#ifdef HOST_WORDS_BIGENDIAN + if (!top) { + offs += 2; + } +#else + if (top) { + offs += 2; + } +#endif + return offs; +} + +/* + * Check that VFP access is enabled. If it is, do the necessary + * M-profile lazy-FP handling and then return true. + * If not, emit code to generate an appropriate exception and + * return false. + * The ignore_vfp_enabled argument specifies that we should ignore + * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX + * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns. + */ +static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (s->fp_excp_el) { + if (arm_dc_feature(s, ARM_FEATURE_M)) { + gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(), + s->fp_excp_el); + } else { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, false), + s->fp_excp_el); + } + return false; + } + + if (!s->vfp_enabled && !ignore_vfp_enabled) { + assert(!arm_dc_feature(s, ARM_FEATURE_M)); + unallocated_encoding(s); + return false; + } + + if (arm_dc_feature(s, ARM_FEATURE_M)) { + /* Handle M-profile lazy FP state mechanics */ + + /* Trigger lazy-state preservation if necessary */ + if (s->v7m_lspact) { + /* + * Lazy state saving affects external memory and also the NVIC, + * so we must mark it as an IO operation for icount. + */ + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_v7m_preserve_fp_state(tcg_ctx, tcg_ctx->cpu_env); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + } + /* + * If the preserve_fp_state helper doesn't throw an exception + * then it will clear LSPACT; we don't need to repeat this for + * any further FP insns in this TB. + */ + s->v7m_lspact = false; + } + + /* Update ownership of FP context: set FPCCR.S to match current state */ + if (s->v8m_fpccr_s_wrong) { + TCGv_i32 tmp; + + tmp = load_cpu_field(tcg_ctx, v7m.fpccr[M_REG_S]); + if (s->v8m_secure) { + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, R_V7M_FPCCR_S_MASK); + } else { + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, ~R_V7M_FPCCR_S_MASK); + } + store_cpu_field(tcg_ctx, tmp, v7m.fpccr[M_REG_S]); + /* Don't need to do this for any further FP insns in this TB */ + s->v8m_fpccr_s_wrong = false; + } + + if (s->v7m_new_fp_ctxt_needed) { + /* + * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA + * and the FPSCR. + */ + TCGv_i32 control, fpscr; + uint32_t bits = R_V7M_CONTROL_FPCA_MASK; + + fpscr = load_cpu_field(tcg_ctx, v7m.fpdscr[s->v8m_secure]); + gen_helper_vfp_set_fpscr(tcg_ctx, tcg_ctx->cpu_env, fpscr); + tcg_temp_free_i32(tcg_ctx, fpscr); + /* + * We don't need to arrange to end the TB, because the only + * parts of FPSCR which we cache in the TB flags are the VECLEN + * and VECSTRIDE, and those don't exist for M-profile. + */ + + if (s->v8m_secure) { + bits |= R_V7M_CONTROL_SFPA_MASK; + } + control = load_cpu_field(tcg_ctx, v7m.control[M_REG_S]); + tcg_gen_ori_i32(tcg_ctx, control, control, bits); + store_cpu_field(tcg_ctx, control, v7m.control[M_REG_S]); + /* Don't need to do this for any further FP insns in this TB */ + s->v7m_new_fp_ctxt_needed = false; + } + } + + return true; +} + +/* + * The most usual kind of VFP access check, for everything except + * FMXR/FMRX to the always-available special registers. + */ +static bool vfp_access_check(DisasContext *s) +{ + return full_vfp_access_check(s, false); +} + +static bool trans_VSEL(DisasContext *s, arg_VSEL *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t rd, rn, rm; + bool dp = a->dp; + + if (!dc_isar_feature(aa32_vsel, s)) { + return false; + } + + if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_simd_r32, s) && + ((a->vm | a->vn | a->vd) & 0x10)) { + return false; + } + + rd = a->vd; + rn = a->vn; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + if (dp) { + TCGv_i64 frn, frm, dest; + TCGv_i64 tmp, zero, zf, nf, vf; + + zero = tcg_const_i64(tcg_ctx, 0); + + frn = tcg_temp_new_i64(tcg_ctx); + frm = tcg_temp_new_i64(tcg_ctx); + dest = tcg_temp_new_i64(tcg_ctx); + + zf = tcg_temp_new_i64(tcg_ctx); + nf = tcg_temp_new_i64(tcg_ctx); + vf = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, zf, tcg_ctx->cpu_ZF); + tcg_gen_ext_i32_i64(tcg_ctx, nf, tcg_ctx->cpu_NF); + tcg_gen_ext_i32_i64(tcg_ctx, vf, tcg_ctx->cpu_VF); + + neon_load_reg64(tcg_ctx, frn, rn); + neon_load_reg64(tcg_ctx, frm, rm); + switch (a->cc) { + case 0: /* eq: Z */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, dest, zf, zero, + frn, frm); + break; + case 1: /* vs: V */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, dest, vf, zero, + frn, frm); + break; + case 2: /* ge: N == V -> N ^ V == 0 */ + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + frn, frm); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + case 3: /* gt: !Z && N == V */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, dest, zf, zero, + frn, frm); + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + dest, frm); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + } + neon_store_reg64(tcg_ctx, dest, rd); + tcg_temp_free_i64(tcg_ctx, frn); + tcg_temp_free_i64(tcg_ctx, frm); + tcg_temp_free_i64(tcg_ctx, dest); // qq + + tcg_temp_free_i64(tcg_ctx, zf); + tcg_temp_free_i64(tcg_ctx, nf); + tcg_temp_free_i64(tcg_ctx, vf); + + tcg_temp_free_i64(tcg_ctx, zero); + } else { + TCGv_i32 frn, frm, dest; + TCGv_i32 tmp, zero; + + zero = tcg_const_i32(tcg_ctx, 0); + + frn = tcg_temp_new_i32(tcg_ctx); + frm = tcg_temp_new_i32(tcg_ctx); + dest = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, frn, rn); + neon_load_reg32(tcg_ctx, frm, rm); + switch (a->cc) { + case 0: /* eq: Z */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, dest, tcg_ctx->cpu_ZF, zero, + frn, frm); + break; + case 1: /* vs: V */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, dest, tcg_ctx->cpu_VF, zero, + frn, frm); + break; + case 2: /* ge: N == V -> N ^ V == 0 */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + frn, frm); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 3: /* gt: !Z && N == V */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, dest, tcg_ctx->cpu_ZF, zero, + frn, frm); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + dest, frm); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + } + neon_store_reg32(tcg_ctx, dest, rd); + tcg_temp_free_i32(tcg_ctx, frn); + tcg_temp_free_i32(tcg_ctx, frm); + tcg_temp_free_i32(tcg_ctx, dest); + + tcg_temp_free_i32(tcg_ctx, zero); + } + + return true; +} + +/* + * Table for converting the most common AArch32 encoding of + * rounding mode to arm_fprounding order (which matches the + * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). + */ +static const uint8_t fp_decode_rm[] = { + FPROUNDING_TIEAWAY, + FPROUNDING_TIEEVEN, + FPROUNDING_POSINF, + FPROUNDING_NEGINF, +}; + +static bool trans_VRINT(DisasContext *s, arg_VRINT *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t rd, rm; + bool dp = a->dp; + TCGv_ptr fpst; + TCGv_i32 tcg_rmode; + int rounding = fp_decode_rm[a->rm]; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_simd_r32, s) && + ((a->vm | a->vd) & 0x10)) { + return false; + } + + rd = a->vd; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, 0); + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + + if (dp) { + TCGv_i64 tcg_op; + TCGv_i64 tcg_res; + tcg_op = tcg_temp_new_i64(tcg_ctx); + tcg_res = tcg_temp_new_i64(tcg_ctx); + neon_load_reg64(tcg_ctx, tcg_op, rm); + gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); + neon_store_reg64(tcg_ctx, tcg_res, rd); + tcg_temp_free_i64(tcg_ctx, tcg_op); + tcg_temp_free_i64(tcg_ctx, tcg_res); + } else { + TCGv_i32 tcg_op; + TCGv_i32 tcg_res; + tcg_op = tcg_temp_new_i32(tcg_ctx); + tcg_res = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tcg_op, rm); + gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); + neon_store_reg32(tcg_ctx, tcg_res, rd); + tcg_temp_free_i32(tcg_ctx, tcg_op); + tcg_temp_free_i32(tcg_ctx, tcg_res); + } + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + + tcg_temp_free_ptr(tcg_ctx, fpst); + return true; +} + +static bool trans_VCVT(DisasContext *s, arg_VCVT *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t rd, rm; + bool dp = a->dp; + TCGv_ptr fpst; + TCGv_i32 tcg_rmode, tcg_shift; + int rounding = fp_decode_rm[a->rm]; + bool is_signed = a->op; + + if (!dc_isar_feature(aa32_vcvt_dr, s)) { + return false; + } + + if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (dp && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { + return false; + } + + rd = a->vd; + rm = a->vm; + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, 0); + + tcg_shift = tcg_const_i32(tcg_ctx, 0); + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + + if (dp) { + TCGv_i64 tcg_double, tcg_res; + TCGv_i32 tcg_tmp; + tcg_double = tcg_temp_new_i64(tcg_ctx); + tcg_res = tcg_temp_new_i64(tcg_ctx); + tcg_tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg64(tcg_ctx, tcg_double, rm); + if (is_signed) { + gen_helper_vfp_tosld(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); + } else { + gen_helper_vfp_tould(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); + } + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_tmp, tcg_res); + neon_store_reg32(tcg_ctx, tcg_tmp, rd); + tcg_temp_free_i32(tcg_ctx, tcg_tmp); + tcg_temp_free_i64(tcg_ctx, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_double); + } else { + TCGv_i32 tcg_single, tcg_res; + tcg_single = tcg_temp_new_i32(tcg_ctx); + tcg_res = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tcg_single, rm); + if (is_signed) { + gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); + } else { + gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); + } + neon_store_reg32(tcg_ctx, tcg_res, rd); + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_single); + } + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + + tcg_temp_free_i32(tcg_ctx, tcg_shift); + + tcg_temp_free_ptr(tcg_ctx, fpst); + + return true; +} + +static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* VMOV scalar to general purpose register */ + TCGv_i32 tmp; + int pass; + uint32_t offset; + + /* SIZE == 2 is a VFP instruction; otherwise NEON. */ + if (a->size == 2 + ? !dc_isar_feature(aa32_fpsp_v2, s) + : !arm_dc_feature(s, ARM_FEATURE_NEON)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { + return false; + } + + offset = a->index << a->size; + pass = extract32(offset, 2, 1); + offset = extract32(offset, 0, 2) * 8; + + if (!vfp_access_check(s)) { + return true; + } + + tmp = neon_load_reg(tcg_ctx, a->vn, pass); + switch (a->size) { + case 0: + if (offset) { + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, offset); + } + if (a->u) { + gen_uxtb(tmp); + } else { + gen_sxtb(tmp); + } + break; + case 1: + if (a->u) { + if (offset) { + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + } else { + gen_uxth(tmp); + } + } else { + if (offset) { + tcg_gen_sari_i32(tcg_ctx, tmp, tmp, 16); + } else { + gen_sxth(tmp); + } + } + break; + case 2: + break; + } + store_reg(s, a->rt, tmp); + + return true; +} + +static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* VMOV general purpose register to scalar */ + TCGv_i32 tmp, tmp2; + int pass; + uint32_t offset; + + /* SIZE == 2 is a VFP instruction; otherwise NEON. */ + if (a->size == 2 + ? !dc_isar_feature(aa32_fpsp_v2, s) + : !arm_dc_feature(s, ARM_FEATURE_NEON)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { + return false; + } + + offset = a->index << a->size; + pass = extract32(offset, 2, 1); + offset = extract32(offset, 0, 2) * 8; + + if (!vfp_access_check(s)) { + return true; + } + + tmp = load_reg(s, a->rt); + switch (a->size) { + case 0: + tmp2 = neon_load_reg(tcg_ctx, a->vn, pass); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 8); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case 1: + tmp2 = neon_load_reg(tcg_ctx, a->vn, pass); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 16); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case 2: + break; + } + neon_store_reg(tcg_ctx, a->vn, pass, tmp); + + return true; +} + +static bool trans_VDUP(DisasContext *s, arg_VDUP *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* VDUP (general purpose register) */ + TCGv_i32 tmp; + int size, vec_size; + + if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { + return false; + } + + if (a->b && a->e) { + return false; + } + + if (a->q && (a->vn & 1)) { + return false; + } + + vec_size = a->q ? 16 : 8; + if (a->b) { + size = 0; + } else if (a->e) { + size = 1; + } else { + size = 2; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = load_reg(s, a->rt); + tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(a->vn, 0), + vec_size, vec_size, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + + return true; +} + +static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = 0; + bool ignore_vfp_enabled = false; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + if (arm_dc_feature(s, ARM_FEATURE_M)) { + /* + * The only M-profile VFP vmrs/vmsr sysreg is FPSCR. + * Accesses to R15 are UNPREDICTABLE; we choose to undef. + * (FPSCR -> r15 is a special case which writes to the PSR flags.) + */ + if (a->rt == 15 && (!a->l || a->reg != ARM_VFP_FPSCR)) { + return false; + } + } + + switch (a->reg) { + case ARM_VFP_FPSID: + /* + * VFPv2 allows access to FPSID from userspace; VFPv3 restricts + * all ID registers to privileged access only. + */ + if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_MVFR2: + if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_FPSCR: + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) { + return false; + } + ignore_vfp_enabled = true; + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + /* Not present in VFPv3 */ + if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) { + return false; + } + break; + default: + return false; + } + + if (!full_vfp_access_check(s, ignore_vfp_enabled)) { + return true; + } + + if (a->l) { + /* VMRS, move VFP special register to gp register */ + switch (a->reg) { + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: + case ARM_VFP_FPSID: + if (s->current_el == 1) { + TCGv_i32 tcg_reg, tcg_rt; + + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + tcg_reg = tcg_const_i32(tcg_ctx, a->reg); + tcg_rt = tcg_const_i32(tcg_ctx, a->rt); + gen_helper_check_hcr_el2_trap(tcg_ctx, tcg_ctx->cpu_env, tcg_rt, tcg_reg); + tcg_temp_free_i32(tcg_ctx, tcg_reg); + tcg_temp_free_i32(tcg_ctx, tcg_rt); + } + /* fall through */ + case ARM_VFP_FPEXC: + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + tmp = load_cpu_field(tcg_ctx, vfp.xregs[a->reg]); + break; + case ARM_VFP_FPSCR: + if (a->rt == 15) { + tmp = load_cpu_field(tcg_ctx, vfp.xregs[ARM_VFP_FPSCR]); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xf0000000); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_vfp_get_fpscr(tcg_ctx, tmp, tcg_ctx->cpu_env); + } + break; + default: + g_assert_not_reached(); + } + + if (a->rt == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + store_reg(s, a->rt, tmp); + } + } else { + /* VMSR, move gp register to VFP special register */ + switch (a->reg) { + case ARM_VFP_FPSID: + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: + /* Writes are ignored. */ + break; + case ARM_VFP_FPSCR: + tmp = load_reg(s, a->rt); + gen_helper_vfp_set_fpscr(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_lookup_tb(s); + break; + case ARM_VFP_FPEXC: + /* + * TODO: VFP subarchitecture support. + * For now, keep the EN bit only + */ + tmp = load_reg(s, a->rt); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 1 << 30); + store_cpu_field(tcg_ctx, tmp, vfp.xregs[a->reg]); + gen_lookup_tb(s); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + tmp = load_reg(s, a->rt); + store_cpu_field(tcg_ctx, tmp, vfp.xregs[a->reg]); + break; + default: + g_assert_not_reached(); + } + } + + return true; +} + +static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (a->l) { + /* VFP to general purpose register */ + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vn); + if (a->rt == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + store_reg(s, a->rt, tmp); + } + } else { + /* general purpose register to VFP */ + tmp = load_reg(s, a->rt); + neon_store_reg32(tcg_ctx, tmp, a->vn); + tcg_temp_free_i32(tcg_ctx, tmp); + } + + return true; +} + +static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + /* + * VMOV between two general-purpose registers and two single precision + * floating point registers + */ + if (!vfp_access_check(s)) { + return true; + } + + if (a->op) { + /* fpreg to gpreg */ + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vm); + store_reg(s, a->rt, tmp); + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vm + 1); + store_reg(s, a->rt2, tmp); + } else { + /* gpreg to fpreg */ + tmp = load_reg(s, a->rt); + neon_store_reg32(tcg_ctx, tmp, a->vm); + tcg_temp_free_i32(tcg_ctx, tmp); + tmp = load_reg(s, a->rt2); + neon_store_reg32(tcg_ctx, tmp, a->vm + 1); + tcg_temp_free_i32(tcg_ctx, tmp); + } + + return true; +} + +static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + /* + * VMOV between two general-purpose registers and one double precision + * floating point register. Note that this does not require support + * for double precision arithmetic. + */ + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (a->op) { + /* fpreg to gpreg */ + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vm * 2); + store_reg(s, a->rt, tmp); + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vm * 2 + 1); + store_reg(s, a->rt2, tmp); + } else { + /* gpreg to fpreg */ + tmp = load_reg(s, a->rt); + neon_store_reg32(tcg_ctx, tmp, a->vm * 2); + tcg_temp_free_i32(tcg_ctx, tmp); + tmp = load_reg(s, a->rt2); + neon_store_reg32(tcg_ctx, tmp, a->vm * 2 + 1); + tcg_temp_free_i32(tcg_ctx, tmp); + } + + return true; +} + +static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + TCGv_i32 addr, tmp; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + offset = a->imm << 2; + if (!a->u) { +#ifdef _MSC_VER + offset = 0 - offset; +#else + offset = -offset; +#endif + } + + /* For thumb, use of PC is UNPREDICTABLE. */ + addr = add_reg_for_lit(s, a->rn, offset); + tmp = tcg_temp_new_i32(tcg_ctx); + if (a->l) { + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + neon_store_reg32(tcg_ctx, tmp, a->vd); + } else { + neon_load_reg32(tcg_ctx, tmp, a->vd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + + return true; +} + +static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + TCGv_i32 addr; + TCGv_i64 tmp; + + /* Note that this does not require support for double arithmetic. */ + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + offset = a->imm << 2; + if (!a->u) { +#ifdef _MSC_VER + offset = 0 - offset; +#else + offset = -offset; +#endif + } + + /* For thumb, use of PC is UNPREDICTABLE. */ + addr = add_reg_for_lit(s, a->rn, offset); + tmp = tcg_temp_new_i64(tcg_ctx); + if (a->l) { + gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); + neon_store_reg64(tcg_ctx, tmp, a->vd); + } else { + neon_load_reg64(tcg_ctx, tmp, a->vd); + gen_aa32_st64(s, tmp, addr, get_mem_index(s)); + } + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + + return true; +} + +static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + TCGv_i32 addr, tmp; + int i, n; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + n = a->imm; + + if (n == 0 || (a->vd + n) > 32) { + /* + * UNPREDICTABLE cases for bad immediates: we choose to + * UNDEF to avoid generating huge numbers of TCG ops + */ + return false; + } + if (a->rn == 15 && a->w) { + /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + /* For thumb, use of PC is UNPREDICTABLE. */ + addr = add_reg_for_lit(s, a->rn, 0); + if (a->p) { + /* pre-decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -(a->imm << 2)); + } + + if (s->v8m_stackcheck && a->rn == 13 && a->w) { + /* + * Here 'addr' is the lowest address we will store to, + * and is either the old SP (if post-increment) or + * the new SP (if pre-decrement). For post-increment + * where the old value is below the limit and the new + * value is above, it is UNKNOWN whether the limit check + * triggers; we choose to trigger. + */ + gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); + } + + offset = 4; + tmp = tcg_temp_new_i32(tcg_ctx); + for (i = 0; i < n; i++) { + if (a->l) { + /* load */ + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + neon_store_reg32(tcg_ctx, tmp, a->vd + i); + } else { + /* store */ + neon_load_reg32(tcg_ctx, tmp, a->vd + i); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + } + tcg_temp_free_i32(tcg_ctx, tmp); + if (a->w) { + /* writeback */ + if (a->p) { +#ifdef _MSC_VER + offset = (0 - offset) * n; +#else + offset = -offset * n; +#endif + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + } + store_reg(s, a->rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + + return true; +} + +static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + TCGv_i32 addr; + TCGv_i64 tmp; + int i, n; + + /* Note that this does not require support for double arithmetic. */ + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + n = a->imm >> 1; + + if (n == 0 || (a->vd + n) > 32 || n > 16) { + /* + * UNPREDICTABLE cases for bad immediates: we choose to + * UNDEF to avoid generating huge numbers of TCG ops + */ + return false; + } + if (a->rn == 15 && a->w) { + /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + /* For thumb, use of PC is UNPREDICTABLE. */ + addr = add_reg_for_lit(s, a->rn, 0); + if (a->p) { + /* pre-decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -(a->imm << 2)); + } + + if (s->v8m_stackcheck && a->rn == 13 && a->w) { + /* + * Here 'addr' is the lowest address we will store to, + * and is either the old SP (if post-increment) or + * the new SP (if pre-decrement). For post-increment + * where the old value is below the limit and the new + * value is above, it is UNKNOWN whether the limit check + * triggers; we choose to trigger. + */ + gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); + } + + offset = 8; + tmp = tcg_temp_new_i64(tcg_ctx); + for (i = 0; i < n; i++) { + if (a->l) { + /* load */ + gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); + neon_store_reg64(tcg_ctx, tmp, a->vd + i); + } else { + /* store */ + neon_load_reg64(tcg_ctx, tmp, a->vd + i); + gen_aa32_st64(s, tmp, addr, get_mem_index(s)); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + } + tcg_temp_free_i64(tcg_ctx, tmp); + if (a->w) { + /* writeback */ + if (a->p) { +#ifdef _MSC_VER + offset = (0 - offset) * n; +#else + offset = -offset * n; +#endif + } else if (a->imm & 1) { + offset = 4; + } else { + offset = 0; + } + + if (offset != 0) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + } + store_reg(s, a->rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + + return true; +} + +/* + * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp(). + * The callback should emit code to write a value to vd. If + * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd + * will contain the old value of the relevant VFP register; + * otherwise it must be written to only. + */ +typedef void VFPGen3OpSPFn(TCGContext *, TCGv_i32 vd, + TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst); +typedef void VFPGen3OpDPFn(TCGContext *, TCGv_i64 vd, + TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst); + +/* + * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp(). + * The callback should emit code to write a value to vd (which + * should be written to only). + */ +typedef void VFPGen2OpSPFn(TCGContext *, TCGv_i32 vd, TCGv_i32 vm); +typedef void VFPGen2OpDPFn(TCGContext *, TCGv_i64 vd, TCGv_i64 vm); + +/* + * Return true if the specified S reg is in a scalar bank + * (ie if it is s0..s7) + */ +static inline bool vfp_sreg_is_scalar(int reg) +{ + return (reg & 0x18) == 0; +} + +/* + * Return true if the specified D reg is in a scalar bank + * (ie if it is d0..d3 or d16..d19) + */ +static inline bool vfp_dreg_is_scalar(int reg) +{ + return (reg & 0xc) == 0; +} + +/* + * Advance the S reg number forwards by delta within its bank + * (ie increment the low 3 bits but leave the rest the same) + */ +static inline int vfp_advance_sreg(int reg, int delta) +{ + return ((reg + delta) & 0x7) | (reg & ~0x7); +} + +/* + * Advance the D reg number forwards by delta within its bank + * (ie increment the low 2 bits but leave the rest the same) + */ +static inline int vfp_advance_dreg(int reg, int delta) +{ + return ((reg + delta) & 0x3) | (reg & ~0x3); +} + +/* + * Perform a 3-operand VFP data processing instruction. fn is the + * callback to do the actual operation; this function deals with the + * code to handle looping around for VFP vector processing. + */ +static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, + int vd, int vn, int vm, bool reads_vd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t delta_m = 0; + uint32_t delta_d = 0; + int veclen = s->vec_len; + TCGv_i32 f0, f1, fd; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + /* Figure out what type of vector operation this is. */ + if (vfp_sreg_is_scalar(vd)) { + /* scalar */ + veclen = 0; + } else { + delta_d = s->vec_stride + 1; + + if (vfp_sreg_is_scalar(vm)) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i32(tcg_ctx); + f1 = tcg_temp_new_i32(tcg_ctx); + fd = tcg_temp_new_i32(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, 0); + + neon_load_reg32(tcg_ctx, f0, vn); + neon_load_reg32(tcg_ctx, f1, vm); + + for (;;) { + if (reads_vd) { + neon_load_reg32(tcg_ctx, fd, vd); + } + fn(tcg_ctx, fd, f0, f1, fpst); + neon_store_reg32(tcg_ctx, fd, vd); + + if (veclen == 0) { + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = vfp_advance_sreg(vd, delta_d); + vn = vfp_advance_sreg(vn, delta_d); + neon_load_reg32(tcg_ctx, f0, vn); + if (delta_m) { + vm = vfp_advance_sreg(vm, delta_m); + neon_load_reg32(tcg_ctx, f1, vm); + } + } + + tcg_temp_free_i32(tcg_ctx, f0); + tcg_temp_free_i32(tcg_ctx, f1); + tcg_temp_free_i32(tcg_ctx, fd); + tcg_temp_free_ptr(tcg_ctx, fpst); + + return true; +} + +static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, + int vd, int vn, int vm, bool reads_vd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t delta_m = 0; + uint32_t delta_d = 0; + int veclen = s->vec_len; + TCGv_i64 f0, f1, fd; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + /* Figure out what type of vector operation this is. */ + if (vfp_dreg_is_scalar(vd)) { + /* scalar */ + veclen = 0; + } else { + delta_d = (s->vec_stride >> 1) + 1; + + if (vfp_dreg_is_scalar(vm)) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i64(tcg_ctx); + f1 = tcg_temp_new_i64(tcg_ctx); + fd = tcg_temp_new_i64(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx, 0); + + neon_load_reg64(tcg_ctx, f0, vn); + neon_load_reg64(tcg_ctx, f1, vm); + + for (;;) { + if (reads_vd) { + neon_load_reg64(tcg_ctx, fd, vd); + } + fn(tcg_ctx, fd, f0, f1, fpst); + neon_store_reg64(tcg_ctx, fd, vd); + + if (veclen == 0) { + break; + } + /* Set up the operands for the next iteration */ + veclen--; + vd = vfp_advance_dreg(vd, delta_d); + vn = vfp_advance_dreg(vn, delta_d); + neon_load_reg64(tcg_ctx, f0, vn); + if (delta_m) { + vm = vfp_advance_dreg(vm, delta_m); + neon_load_reg64(tcg_ctx, f1, vm); + } + } + + tcg_temp_free_i64(tcg_ctx, f0); + tcg_temp_free_i64(tcg_ctx, f1); + tcg_temp_free_i64(tcg_ctx, fd); + tcg_temp_free_ptr(tcg_ctx, fpst); + + return true; +} + +static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t delta_m = 0; + uint32_t delta_d = 0; + int veclen = s->vec_len; + TCGv_i32 f0, fd; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + /* Figure out what type of vector operation this is. */ + if (vfp_sreg_is_scalar(vd)) { + /* scalar */ + veclen = 0; + } else { + delta_d = s->vec_stride + 1; + + if (vfp_sreg_is_scalar(vm)) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i32(tcg_ctx); + fd = tcg_temp_new_i32(tcg_ctx); + + neon_load_reg32(tcg_ctx, f0, vm); + + for (;;) { + fn(tcg_ctx, fd, f0); + neon_store_reg32(tcg_ctx, fd, vd); + + if (veclen == 0) { + break; + } + + if (delta_m == 0) { + /* single source one-many */ + while (veclen--) { + vd = vfp_advance_sreg(vd, delta_d); + neon_store_reg32(tcg_ctx, fd, vd); + } + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = vfp_advance_sreg(vd, delta_d); + vm = vfp_advance_sreg(vm, delta_m); + neon_load_reg32(tcg_ctx, f0, vm); + } + + tcg_temp_free_i32(tcg_ctx, f0); + tcg_temp_free_i32(tcg_ctx, fd); + + return true; +} + +static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t delta_m = 0; + uint32_t delta_d = 0; + int veclen = s->vec_len; + TCGv_i64 f0, fd; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist */ + if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + /* Figure out what type of vector operation this is. */ + if (vfp_dreg_is_scalar(vd)) { + /* scalar */ + veclen = 0; + } else { + delta_d = (s->vec_stride >> 1) + 1; + + if (vfp_dreg_is_scalar(vm)) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + f0 = tcg_temp_new_i64(tcg_ctx); + fd = tcg_temp_new_i64(tcg_ctx); + + neon_load_reg64(tcg_ctx, f0, vm); + + for (;;) { + fn(tcg_ctx, fd, f0); + neon_store_reg64(tcg_ctx, fd, vd); + + if (veclen == 0) { + break; + } + + if (delta_m == 0) { + /* single source one-many */ + while (veclen--) { + vd = vfp_advance_dreg(vd, delta_d); + neon_store_reg64(tcg_ctx, fd, vd); + } + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = vfp_advance_dreg(vd, delta_d); + vd = vfp_advance_dreg(vm, delta_m); + neon_load_reg64(tcg_ctx, f0, vm); + } + + tcg_temp_free_i64(tcg_ctx, f0); + tcg_temp_free_i64(tcg_ctx, fd); + + return true; +} + +static void gen_VMLA_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* Note that order of inputs to the add matters for NaNs */ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a) +{ + return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VMLA_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* Note that order of inputs to the add matters for NaNs */ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a) +{ + return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true); +} + +static void gen_VMLS_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* + * VMLS: vd = vd + -(vn * vm) + * Note that order of inputs to the add matters for NaNs. + */ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_negs(tcg_ctx, tmp, tmp); + gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a) +{ + return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VMLS_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* + * VMLS: vd = vd + -(vn * vm) + * Note that order of inputs to the add matters for NaNs. + */ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_negd(tcg_ctx, tmp, tmp); + gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a) +{ + return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true); +} + +static void gen_VNMLS_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* + * VNMLS: -fd + (fn * fm) + * Note that it isn't valid to replace (-A + B) with (B - A) or similar + * plausible looking simplifications because this will give wrong results + * for NaNs. + */ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_negs(tcg_ctx, vd, vd); + gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a) +{ + return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VNMLS_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* + * VNMLS: -fd + (fn * fm) + * Note that it isn't valid to replace (-A + B) with (B - A) or similar + * plausible looking simplifications because this will give wrong results + * for NaNs. + */ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_negd(tcg_ctx, vd, vd); + gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a) +{ + return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true); +} + +static void gen_VNMLA_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* VNMLA: -fd + -(fn * fm) */ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + gen_helper_vfp_muls(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_negs(tcg_ctx, tmp, tmp); + gen_helper_vfp_negs(tcg_ctx, vd, vd); + gen_helper_vfp_adds(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a) +{ + return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true); +} + +static void gen_VNMLA_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* VNMLA: -fd + (fn * fm) */ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + gen_helper_vfp_muld(tcg_ctx, tmp, vn, vm, fpst); + gen_helper_vfp_negd(tcg_ctx, tmp, tmp); + gen_helper_vfp_negd(tcg_ctx, vd, vd); + gen_helper_vfp_addd(tcg_ctx, vd, vd, tmp, fpst); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a) +{ + return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true); +} + +static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false); +} + +static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false); +} + +static void gen_VNMUL_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) +{ + /* VNMUL: -(fn * fm) */ + gen_helper_vfp_muls(tcg_ctx, vd, vn, vm, fpst); + gen_helper_vfp_negs(tcg_ctx, vd, vd); +} + +static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a) +{ + return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false); +} + +static void gen_VNMUL_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) +{ + /* VNMUL: -(fn * fm) */ + gen_helper_vfp_muld(tcg_ctx, vd, vn, vm, fpst); + gen_helper_vfp_negd(tcg_ctx, vd, vd); +} + +static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a) +{ + return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false); +} + +static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false); +} + +static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false); +} + +static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false); +} + +static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false); +} + +static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a) +{ + return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false); +} + +static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a) +{ + return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false); +} + +static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a) +{ + if (!dc_isar_feature(aa32_vminmaxnm, s)) { + return false; + } + return do_vfp_3op_sp(s, gen_helper_vfp_minnums, + a->vd, a->vn, a->vm, false); +} + +static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a) +{ + if (!dc_isar_feature(aa32_vminmaxnm, s)) { + return false; + } + return do_vfp_3op_sp(s, gen_helper_vfp_maxnums, + a->vd, a->vn, a->vm, false); +} + +static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a) +{ + if (!dc_isar_feature(aa32_vminmaxnm, s)) { + return false; + } + return do_vfp_3op_dp(s, gen_helper_vfp_minnumd, + a->vd, a->vn, a->vm, false); +} + +static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a) +{ + if (!dc_isar_feature(aa32_vminmaxnm, s)) { + return false; + } + return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd, + a->vd, a->vn, a->vm, false); +} + +static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* + * VFNMA : fd = muladd(-fd, fn, fm) + * VFNMS : fd = muladd(-fd, -fn, fm) + * VFMA : fd = muladd( fd, fn, fm) + * VFMS : fd = muladd( fd, -fn, fm) + * + * These are fused multiply-add, and must be done as one floating + * point operation with no rounding between the multiplication and + * addition steps. NB that doing the negations here as separate + * steps is correct : an input NaN should come out with its sign + * bit flipped if it is a negated-input. + */ + TCGv_ptr fpst; + TCGv_i32 vn, vm, vd; + + /* + * Present in VFPv4 only. + * Note that we can't rely on the SIMDFMAC check alone, because + * in a Neon-no-VFP core that ID register field will be non-zero. + */ + if (!dc_isar_feature(aa32_simdfmac, s) || + !dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + /* + * In v7A, UNPREDICTABLE with non-zero vector length/stride; from + * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A. + */ + if (s->vec_len != 0 || s->vec_stride != 0) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vn = tcg_temp_new_i32(tcg_ctx); + vm = tcg_temp_new_i32(tcg_ctx); + vd = tcg_temp_new_i32(tcg_ctx); + + neon_load_reg32(tcg_ctx, vn, a->vn); + neon_load_reg32(tcg_ctx, vm, a->vm); + if (neg_n) { + /* VFNMS, VFMS */ + gen_helper_vfp_negs(tcg_ctx, vn, vn); + } + neon_load_reg32(tcg_ctx, vd, a->vd); + if (neg_d) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negs(tcg_ctx, vd, vd); + } + fpst = get_fpstatus_ptr(tcg_ctx, 0); + gen_helper_vfp_muladds(tcg_ctx, vd, vn, vm, vd, fpst); + neon_store_reg32(tcg_ctx, vd, a->vd); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, vn); + tcg_temp_free_i32(tcg_ctx, vm); + tcg_temp_free_i32(tcg_ctx, vd); + + return true; +} + +static bool trans_VFMA_sp(DisasContext *s, arg_VFMA_sp *a) +{ + return do_vfm_sp(s, a, false, false); +} + +static bool trans_VFMS_sp(DisasContext *s, arg_VFMS_sp *a) +{ + return do_vfm_sp(s, a, true, false); +} + +static bool trans_VFNMA_sp(DisasContext *s, arg_VFNMA_sp *a) +{ + return do_vfm_sp(s, a, false, true); +} + +static bool trans_VFNMS_sp(DisasContext *s, arg_VFNMS_sp *a) +{ + return do_vfm_sp(s, a, true, true); +} + +static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* + * VFNMA : fd = muladd(-fd, fn, fm) + * VFNMS : fd = muladd(-fd, -fn, fm) + * VFMA : fd = muladd( fd, fn, fm) + * VFMS : fd = muladd( fd, -fn, fm) + * + * These are fused multiply-add, and must be done as one floating + * point operation with no rounding between the multiplication and + * addition steps. NB that doing the negations here as separate + * steps is correct : an input NaN should come out with its sign + * bit flipped if it is a negated-input. + */ + TCGv_ptr fpst; + TCGv_i64 vn, vm, vd; + + /* + * Present in VFPv4 only. + * Note that we can't rely on the SIMDFMAC check alone, because + * in a Neon-no-VFP core that ID register field will be non-zero. + */ + if (!dc_isar_feature(aa32_simdfmac, s) || + !dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + /* + * In v7A, UNPREDICTABLE with non-zero vector length/stride; from + * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A. + */ + if (s->vec_len != 0 || s->vec_stride != 0) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && + ((a->vd | a->vn | a->vm) & 0x10)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && + ((a->vd | a->vn | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vn = tcg_temp_new_i64(tcg_ctx); + vm = tcg_temp_new_i64(tcg_ctx); + vd = tcg_temp_new_i64(tcg_ctx); + + neon_load_reg64(tcg_ctx, vn, a->vn); + neon_load_reg64(tcg_ctx, vm, a->vm); + if (neg_n) { + /* VFNMS, VFMS */ + gen_helper_vfp_negd(tcg_ctx, vn, vn); + } + neon_load_reg64(tcg_ctx, vd, a->vd); + if (neg_d) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negd(tcg_ctx, vd, vd); + } + fpst = get_fpstatus_ptr(tcg_ctx, 0); + gen_helper_vfp_muladdd(tcg_ctx, vd, vn, vm, vd, fpst); + neon_store_reg64(tcg_ctx, vd, a->vd); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, vn); + tcg_temp_free_i64(tcg_ctx, vm); + tcg_temp_free_i64(tcg_ctx, vd); + + return true; +} + +static bool trans_VFMA_dp(DisasContext *s, arg_VFMA_dp *a) +{ + return do_vfm_dp(s, a, false, false); +} + +static bool trans_VFMS_dp(DisasContext *s, arg_VFMS_dp *a) +{ + return do_vfm_dp(s, a, true, false); +} + +static bool trans_VFNMA_dp(DisasContext *s, arg_VFNMA_dp *a) +{ + return do_vfm_dp(s, a, false, true); +} + +static bool trans_VFNMS_dp(DisasContext *s, arg_VFNMS_dp *a) +{ + return do_vfm_dp(s, a, true, true); +} + +static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t delta_d = 0; + int veclen = s->vec_len; + TCGv_i32 fd; + uint32_t vd; + + vd = a->vd; + + if (!dc_isar_feature(aa32_fpsp_v3, s)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + /* Figure out what type of vector operation this is. */ + if (vfp_sreg_is_scalar(vd)) { + /* scalar */ + veclen = 0; + } else { + delta_d = s->vec_stride + 1; + } + } + + fd = tcg_const_i32(tcg_ctx, vfp_expand_imm(MO_32, a->imm)); + + for (;;) { + neon_store_reg32(tcg_ctx, fd, vd); + + if (veclen == 0) { + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = vfp_advance_sreg(vd, delta_d); + } + + tcg_temp_free_i32(tcg_ctx, fd); + return true; +} + +static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t delta_d = 0; + int veclen = s->vec_len; + TCGv_i64 fd; + uint32_t vd; + + vd = a->vd; + + if (!dc_isar_feature(aa32_fpdp_v3, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) { + return false; + } + + if (!dc_isar_feature(aa32_fpshvec, s) && + (veclen != 0 || s->vec_stride != 0)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + if (veclen > 0) { + /* Figure out what type of vector operation this is. */ + if (vfp_dreg_is_scalar(vd)) { + /* scalar */ + veclen = 0; + } else { + delta_d = (s->vec_stride >> 1) + 1; + } + } + + fd = tcg_const_i64(tcg_ctx, vfp_expand_imm(MO_64, a->imm)); + + for (;;) { + neon_store_reg64(tcg_ctx, fd, vd); + + if (veclen == 0) { + break; + } + + /* Set up the operands for the next iteration */ + veclen--; + vd = vfp_advance_dreg(vd, delta_d); + } + + tcg_temp_free_i64(tcg_ctx, fd); + return true; +} + +static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a) +{ + return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm); +} + +static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a) +{ + return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm); +} + +static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a) +{ + return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm); +} + +static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a) +{ + return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm); +} + +static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a) +{ + return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm); +} + +static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a) +{ + return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm); +} + +static void gen_VSQRT_sp(TCGContext *tcg_ctx, TCGv_i32 vd, TCGv_i32 vm) +{ + gen_helper_vfp_sqrts(tcg_ctx, vd, vm, tcg_ctx->cpu_env); +} + +static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a) +{ + return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm); +} + +static void gen_VSQRT_dp(TCGContext *tcg_ctx, TCGv_i64 vd, TCGv_i64 vm) +{ + gen_helper_vfp_sqrtd(tcg_ctx, vd, vm, tcg_ctx->cpu_env); +} + +static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a) +{ + return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm); +} + +static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 vd, vm; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + /* Vm/M bits must be zero for the Z variant */ + if (a->z && a->vm != 0) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vd = tcg_temp_new_i32(tcg_ctx); + vm = tcg_temp_new_i32(tcg_ctx); + + neon_load_reg32(tcg_ctx, vd, a->vd); + if (a->z) { + tcg_gen_movi_i32(tcg_ctx, vm, 0); + } else { + neon_load_reg32(tcg_ctx, vm, a->vm); + } + + if (a->e) { + gen_helper_vfp_cmpes(tcg_ctx, vd, vm, tcg_ctx->cpu_env); + } else { + gen_helper_vfp_cmps(tcg_ctx, vd, vm, tcg_ctx->cpu_env); + } + + tcg_temp_free_i32(tcg_ctx, vd); + tcg_temp_free_i32(tcg_ctx, vm); + + return true; +} + +static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 vd, vm; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* Vm/M bits must be zero for the Z variant */ + if (a->z && a->vm != 0) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vd = tcg_temp_new_i64(tcg_ctx); + vm = tcg_temp_new_i64(tcg_ctx); + + neon_load_reg64(tcg_ctx, vd, a->vd); + if (a->z) { + tcg_gen_movi_i64(tcg_ctx, vm, 0); + } else { + neon_load_reg64(tcg_ctx, vm, a->vm); + } + + if (a->e) { + gen_helper_vfp_cmped(tcg_ctx, vd, vm, tcg_ctx->cpu_env); + } else { + gen_helper_vfp_cmpd(tcg_ctx, vd, vm, tcg_ctx->cpu_env); + } + + tcg_temp_free_i64(tcg_ctx, vd); + tcg_temp_free_i64(tcg_ctx, vm); + + return true; +} + +static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_fp16_spconv, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, false); + ahp_mode = get_ahp_flag(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + /* The T bit tells us if we want the low or high 16 bits of Vm */ + tcg_gen_ld16u_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vm, a->t)); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp, tmp, fpst, ahp_mode); + neon_store_reg32(tcg_ctx, tmp, a->vd); + tcg_temp_free_i32(tcg_ctx, ahp_mode); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); + return true; +} + +static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + TCGv_i64 vd; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_fp16_dpconv, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, false); + ahp_mode = get_ahp_flag(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + /* The T bit tells us if we want the low or high 16 bits of Vm */ + tcg_gen_ld16u_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vm, a->t)); + vd = tcg_temp_new_i64(tcg_ctx); + gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, vd, tmp, fpst, ahp_mode); + neon_store_reg64(tcg_ctx, vd, a->vd); + tcg_temp_free_i32(tcg_ctx, ahp_mode); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, vd); + return true; +} + +static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_fp16_spconv, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, false); + ahp_mode = get_ahp_flag(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + + neon_load_reg32(tcg_ctx, tmp, a->vm); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tmp, fpst, ahp_mode); + tcg_gen_st16_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vd, a->t)); + tcg_temp_free_i32(tcg_ctx, ahp_mode); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); + return true; +} + +static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 ahp_mode; + TCGv_i32 tmp; + TCGv_i64 vm; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_fp16_dpconv, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, false); + ahp_mode = get_ahp_flag(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + vm = tcg_temp_new_i64(tcg_ctx); + + neon_load_reg64(tcg_ctx, vm, a->vm); + gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tmp, vm, fpst, ahp_mode); + tcg_temp_free_i64(tcg_ctx, vm); + tcg_gen_st16_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, vfp_f16_offset(a->vd, a->t)); + tcg_temp_free_i32(tcg_ctx, ahp_mode); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); + return true; +} + +static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + gen_helper_rints(tcg_ctx, tmp, tmp, fpst); + neon_store_reg32(tcg_ctx, tmp, a->vd); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); + return true; +} + +static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i64 tmp; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + neon_load_reg64(tcg_ctx, tmp, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + gen_helper_rintd(tcg_ctx, tmp, tmp, fpst); + neon_store_reg64(tcg_ctx, tmp, a->vd); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, tmp); + return true; +} + +static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 tmp; + TCGv_i32 tcg_rmode; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + tcg_rmode = tcg_const_i32(tcg_ctx, float_round_to_zero); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + gen_helper_rints(tcg_ctx, tmp, tmp, fpst); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + neon_store_reg32(tcg_ctx, tmp, a->vd); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + tcg_temp_free_i32(tcg_ctx, tmp); + return true; +} + +static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i64 tmp; + TCGv_i32 tcg_rmode; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + neon_load_reg64(tcg_ctx, tmp, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + tcg_rmode = tcg_const_i32(tcg_ctx, float_round_to_zero); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + gen_helper_rintd(tcg_ctx, tmp, tmp, fpst); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, fpst); + neon_store_reg64(tcg_ctx, tmp, a->vd); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + return true; +} + +static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 tmp; + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, tmp, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + gen_helper_rints_exact(tcg_ctx, tmp, tmp, fpst); + neon_store_reg32(tcg_ctx, tmp, a->vd); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tmp); + return true; +} + +static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i64 tmp; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_vrint, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + neon_load_reg64(tcg_ctx, tmp, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + gen_helper_rintd_exact(tcg_ctx, tmp, tmp, fpst); + neon_store_reg64(tcg_ctx, tmp, a->vd); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, tmp); + return true; +} + +static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 vd; + TCGv_i32 vm; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i32(tcg_ctx); + vd = tcg_temp_new_i64(tcg_ctx); + neon_load_reg32(tcg_ctx, vm, a->vm); + gen_helper_vfp_fcvtds(tcg_ctx, vd, vm, tcg_ctx->cpu_env); + neon_store_reg64(tcg_ctx, vd, a->vd); + tcg_temp_free_i32(tcg_ctx, vm); + tcg_temp_free_i64(tcg_ctx, vd); + return true; +} + +static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 vm; + TCGv_i32 vd; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vd = tcg_temp_new_i32(tcg_ctx); + vm = tcg_temp_new_i64(tcg_ctx); + neon_load_reg64(tcg_ctx, vm, a->vm); + gen_helper_vfp_fcvtsd(tcg_ctx, vd, vm, tcg_ctx->cpu_env); + neon_store_reg32(tcg_ctx, vd, a->vd); + tcg_temp_free_i32(tcg_ctx, vd); + tcg_temp_free_i64(tcg_ctx, vm); + return true; +} + +static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 vm; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, vm, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + if (a->s) { + /* i32 -> f32 */ + gen_helper_vfp_sitos(tcg_ctx, vm, vm, fpst); + } else { + /* u32 -> f32 */ + gen_helper_vfp_uitos(tcg_ctx, vm, vm, fpst); + } + neon_store_reg32(tcg_ctx, vm, a->vd); + tcg_temp_free_i32(tcg_ctx, vm); + tcg_temp_free_ptr(tcg_ctx, fpst); + return true; +} + +static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 vm; + TCGv_i64 vd; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i32(tcg_ctx); + vd = tcg_temp_new_i64(tcg_ctx); + neon_load_reg32(tcg_ctx, vm, a->vm); + fpst = get_fpstatus_ptr(tcg_ctx, false); + if (a->s) { + /* i32 -> f64 */ + gen_helper_vfp_sitod(tcg_ctx, vd, vm, fpst); + } else { + /* u32 -> f64 */ + gen_helper_vfp_uitod(tcg_ctx, vd, vm, fpst); + } + neon_store_reg64(tcg_ctx, vd, a->vd); + tcg_temp_free_i32(tcg_ctx, vm); + tcg_temp_free_i64(tcg_ctx, vd); + tcg_temp_free_ptr(tcg_ctx, fpst); + return true; +} + +static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 vd; + TCGv_i64 vm; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + if (!dc_isar_feature(aa32_jscvt, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + vm = tcg_temp_new_i64(tcg_ctx); + vd = tcg_temp_new_i32(tcg_ctx); + neon_load_reg64(tcg_ctx, vm, a->vm); + gen_helper_vjcvt(tcg_ctx, vd, vm, tcg_ctx->cpu_env); + neon_store_reg32(tcg_ctx, vd, a->vd); + tcg_temp_free_i64(tcg_ctx, vm); + tcg_temp_free_i32(tcg_ctx, vd); + return true; +} + +static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 vd, shift; + TCGv_ptr fpst; + int frac_bits; + + if (!dc_isar_feature(aa32_fpsp_v3, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); + + vd = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, vd, a->vd); + + fpst = get_fpstatus_ptr(tcg_ctx, false); + shift = tcg_const_i32(tcg_ctx, frac_bits); + + /* Switch on op:U:sx bits */ + switch (a->opc) { + case 0: + gen_helper_vfp_shtos(tcg_ctx, vd, vd, shift, fpst); + break; + case 1: + gen_helper_vfp_sltos(tcg_ctx, vd, vd, shift, fpst); + break; + case 2: + gen_helper_vfp_uhtos(tcg_ctx, vd, vd, shift, fpst); + break; + case 3: + gen_helper_vfp_ultos(tcg_ctx, vd, vd, shift, fpst); + break; + case 4: + gen_helper_vfp_toshs_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + case 5: + gen_helper_vfp_tosls_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + case 6: + gen_helper_vfp_touhs_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + case 7: + gen_helper_vfp_touls_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + default: + g_assert_not_reached(); + } + + neon_store_reg32(tcg_ctx, vd, a->vd); + tcg_temp_free_i32(tcg_ctx, vd); + tcg_temp_free_i32(tcg_ctx, shift); + tcg_temp_free_ptr(tcg_ctx, fpst); + return true; +} + +static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 vd; + TCGv_i32 shift; + TCGv_ptr fpst; + int frac_bits; + + if (!dc_isar_feature(aa32_fpdp_v3, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm); + + vd = tcg_temp_new_i64(tcg_ctx); + neon_load_reg64(tcg_ctx, vd, a->vd); + + fpst = get_fpstatus_ptr(tcg_ctx, false); + shift = tcg_const_i32(tcg_ctx, frac_bits); + + /* Switch on op:U:sx bits */ + switch (a->opc) { + case 0: + gen_helper_vfp_shtod(tcg_ctx, vd, vd, shift, fpst); + break; + case 1: + gen_helper_vfp_sltod(tcg_ctx, vd, vd, shift, fpst); + break; + case 2: + gen_helper_vfp_uhtod(tcg_ctx, vd, vd, shift, fpst); + break; + case 3: + gen_helper_vfp_ultod(tcg_ctx, vd, vd, shift, fpst); + break; + case 4: + gen_helper_vfp_toshd_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + case 5: + gen_helper_vfp_tosld_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + case 6: + gen_helper_vfp_touhd_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + case 7: + gen_helper_vfp_tould_round_to_zero(tcg_ctx, vd, vd, shift, fpst); + break; + default: + g_assert_not_reached(); + } + + neon_store_reg64(tcg_ctx, vd, a->vd); + tcg_temp_free_i64(tcg_ctx, vd); + tcg_temp_free_i32(tcg_ctx, shift); + tcg_temp_free_ptr(tcg_ctx, fpst); + return true; +} + +static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 vm; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_fpsp_v2, s)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, false); + vm = tcg_temp_new_i32(tcg_ctx); + neon_load_reg32(tcg_ctx, vm, a->vm); + + if (a->s) { + if (a->rz) { + gen_helper_vfp_tosizs(tcg_ctx, vm, vm, fpst); + } else { + gen_helper_vfp_tosis(tcg_ctx, vm, vm, fpst); + } + } else { + if (a->rz) { + gen_helper_vfp_touizs(tcg_ctx, vm, vm, fpst); + } else { + gen_helper_vfp_touis(tcg_ctx, vm, vm, fpst); + } + } + neon_store_reg32(tcg_ctx, vm, a->vd); + tcg_temp_free_i32(tcg_ctx, vm); + tcg_temp_free_ptr(tcg_ctx, fpst); + return true; +} + +static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 vd; + TCGv_i64 vm; + TCGv_ptr fpst; + + if (!dc_isar_feature(aa32_fpdp_v2, s)) { + return false; + } + + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + fpst = get_fpstatus_ptr(tcg_ctx, false); + vm = tcg_temp_new_i64(tcg_ctx); + vd = tcg_temp_new_i32(tcg_ctx); + neon_load_reg64(tcg_ctx, vm, a->vm); + + if (a->s) { + if (a->rz) { + gen_helper_vfp_tosizd(tcg_ctx, vd, vm, fpst); + } else { + gen_helper_vfp_tosid(tcg_ctx, vd, vm, fpst); + } + } else { + if (a->rz) { + gen_helper_vfp_touizd(tcg_ctx, vd, vm, fpst); + } else { + gen_helper_vfp_touid(tcg_ctx, vd, vm, fpst); + } + } + neon_store_reg32(tcg_ctx, vd, a->vd); + tcg_temp_free_i32(tcg_ctx, vd); + tcg_temp_free_i64(tcg_ctx, vm); + tcg_temp_free_ptr(tcg_ctx, fpst); + return true; +} + +/* + * Decode VLLDM and VLSTM are nonstandard because: + * * if there is no FPU then these insns must NOP in + * Secure state and UNDEF in Nonsecure state + * * if there is an FPU then these insns do not have + * the usual behaviour that vfp_access_check() provides of + * being controlled by CPACR/NSACR enable bits or the + * lazy-stacking logic. + */ +static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 fptr; + + if (!arm_dc_feature(s, ARM_FEATURE_M) || + !arm_dc_feature(s, ARM_FEATURE_V8)) { + return false; + } + /* If not secure, UNDEF. */ + if (!s->v8m_secure) { + return false; + } + /* If no fpu, NOP. */ + if (!dc_isar_feature(aa32_vfp, s)) { + return true; + } + + fptr = load_reg(s, a->rn); + if (a->l) { + gen_helper_v7m_vlldm(tcg_ctx, tcg_ctx->cpu_env, fptr); + } else { + gen_helper_v7m_vlstm(tcg_ctx, tcg_ctx->cpu_env, fptr); + } + tcg_temp_free_i32(tcg_ctx, fptr); + + /* End the TB, because we have updated FP control bits */ + s->base.is_jmp = DISAS_UPDATE; + return true; +} diff --git a/qemu/target/arm/translate.c b/qemu/target/arm/translate.c new file mode 100644 index 00000000..742a55fd --- /dev/null +++ b/qemu/target/arm/translate.c @@ -0,0 +1,11761 @@ +/* + * ARM translation + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005-2007 CodeSourcery + * Copyright (c) 2007 OpenedHand, Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" + +#include "cpu.h" +#include "internals.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "qemu/log.h" +#include "qemu/bitops.h" +#include "arm_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T) +#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) +/* currently all emulated v5 cores are also v5TE, so don't bother */ +#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5) +#define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s) +#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6) +#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K) +#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2) +#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7) +#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8) + +#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0) + +#include "translate.h" + +#define IS_USER(s) (s->user) + +#include "exec/gen-icount.h" + +static const char * const regnames[] = + { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; + +/* Function prototypes for gen_ functions calling Neon helpers. */ +typedef void NeonGenThreeOpEnvFn(TCGContext *, TCGv_i32, TCGv_env, TCGv_i32, + TCGv_i32, TCGv_i32); +/* Function prototypes for gen_ functions for fix point conversions */ +typedef void VFPGenFixPointFn(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); + +/* initialize TCG globals. */ +void arm_translate_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + int i; + + for (i = 0; i < 16; i++) { + tcg_ctx->cpu_R[i] = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUARMState, regs[i]), + regnames[i]); + } + tcg_ctx->cpu_CF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, CF), "CF"); + tcg_ctx->cpu_NF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, NF), "NF"); + tcg_ctx->cpu_VF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, VF), "VF"); + tcg_ctx->cpu_ZF = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUARMState, ZF), "ZF"); + + tcg_ctx->cpu_exclusive_addr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); + tcg_ctx->cpu_exclusive_val = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUARMState, exclusive_val), "exclusive_val"); + + a64_translate_init(uc); +} + +/* Flags for the disas_set_da_iss info argument: + * lower bits hold the Rt register number, higher bits are flags. + */ +typedef enum ISSInfo { + ISSNone = 0, + ISSRegMask = 0x1f, + ISSInvalid = (1 << 5), + ISSIsAcqRel = (1 << 6), + ISSIsWrite = (1 << 7), + ISSIs16Bit = (1 << 8), +} ISSInfo; + +/* Save the syndrome information for a Data Abort */ +static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo) +{ + uint32_t syn; + int sas = memop & MO_SIZE; + bool sse = memop & MO_SIGN; + bool is_acqrel = issinfo & ISSIsAcqRel; + bool is_write = issinfo & ISSIsWrite; + bool is_16bit = issinfo & ISSIs16Bit; + int srt = issinfo & ISSRegMask; + + if (issinfo & ISSInvalid) { + /* Some callsites want to conditionally provide ISS info, + * eg "only if this was not a writeback" + */ + return; + } + + if (srt == 15) { + /* For AArch32, insns where the src/dest is R15 never generate + * ISS information. Catching that here saves checking at all + * the call sites. + */ + return; + } + + syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel, + 0, 0, 0, is_write, 0, is_16bit); + disas_set_insn_syndrome(s, syn); +} + +static inline int get_a32_user_mem_index(DisasContext *s) +{ + /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store" + * insns: + * if PL2, UNPREDICTABLE (we choose to implement as if PL0) + * otherwise, access as if at PL0. + */ + switch (s->mmu_idx) { + case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */ + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + return arm_to_core_mmu_idx(ARMMMUIdx_E10_0); + case ARMMMUIdx_SE3: + case ARMMMUIdx_SE10_0: + case ARMMMUIdx_SE10_1: + case ARMMMUIdx_SE10_1_PAN: + return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0); + case ARMMMUIdx_MUser: + case ARMMMUIdx_MPriv: + return arm_to_core_mmu_idx(ARMMMUIdx_MUser); + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MPrivNegPri: + return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri); + case ARMMMUIdx_MSUser: + case ARMMMUIdx_MSPriv: + return arm_to_core_mmu_idx(ARMMMUIdx_MSUser); + case ARMMMUIdx_MSUserNegPri: + case ARMMMUIdx_MSPrivNegPri: + return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri); + default: + g_assert_not_reached(); + // never reach here + return 0; + } +} + +static inline TCGv_i32 load_cpu_offset(TCGContext *tcg_ctx, int offset) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offset); + return tmp; +} + +#define load_cpu_field(tcg_ctx, name) load_cpu_offset(tcg_ctx, offsetof(CPUARMState, name)) + +static inline void store_cpu_offset(TCGContext *tcg_ctx, TCGv_i32 var, int offset) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + tcg_temp_free_i32(tcg_ctx, var); +} + +#define store_cpu_field(tcg_ctx, var, name) \ + store_cpu_offset(tcg_ctx, var, offsetof(CPUARMState, name)) + +/* The architectural value of PC. */ +static uint32_t read_pc(DisasContext *s) +{ + return s->pc_curr + (s->thumb ? 4 : 8); +} + +/* Set a variable to the value of a CPU register. */ +static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (reg == 15) { + tcg_gen_movi_i32(tcg_ctx, var, read_pc(s)); + } else { + tcg_gen_mov_i32(tcg_ctx, var, tcg_ctx->cpu_R[reg]); + } +} + +/* Create a new temporary and set it to the value of a CPU register. */ +static inline TCGv_i32 load_reg(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + load_reg_var(s, tmp, reg); + return tmp; +} + +/* + * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4). + * This is used for load/store for which use of PC implies (literal), + * or ADD that implies ADR. + */ +static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + if (reg == 15) { + tcg_gen_movi_i32(tcg_ctx, tmp, (read_pc(s) & ~3) + ofs); + } else { + tcg_gen_addi_i32(tcg_ctx, tmp, tcg_ctx->cpu_R[reg], ofs); + } + return tmp; +} + +/* Set a CPU register. The source must be a temporary and will be + marked as dead. */ +static void store_reg(DisasContext *s, int reg, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (reg == 15) { + /* In Thumb mode, we must ignore bit 0. + * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0] + * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0]. + * We choose to ignore [1:0] in ARM mode for all architecture versions. + */ + tcg_gen_andi_i32(tcg_ctx, var, var, s->thumb ? ~1 : ~3); + s->base.is_jmp = DISAS_JUMP; + } + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[reg], var); + tcg_temp_free_i32(tcg_ctx, var); +} + +/* + * Variant of store_reg which applies v8M stack-limit checks before updating + * SP. If the check fails this will result in an exception being taken. + * We disable the stack checks for CONFIG_USER_ONLY because we have + * no idea what the stack limits should be in that case. + * If stack checking is not being done this just acts like store_reg(). + */ +static void store_sp_checked(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (s->v8m_stackcheck) { + gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, var); + } + store_reg(s, 13, var); +} + +/* Value extensions. */ +#define gen_uxtb(var) tcg_gen_ext8u_i32(tcg_ctx, var, var) +#define gen_uxth(var) tcg_gen_ext16u_i32(tcg_ctx, var, var) +#define gen_sxtb(var) tcg_gen_ext8s_i32(tcg_ctx, var, var) +#define gen_sxth(var) tcg_gen_ext16s_i32(tcg_ctx, var, var) + +#define gen_sxtb16(var) gen_helper_sxtb16(tcg_ctx, var, var) +#define gen_uxtb16(var) gen_helper_uxtb16(tcg_ctx, var, var) + + +static inline void gen_set_cpsr(TCGContext *tcg_ctx, TCGv_i32 var, uint32_t mask) +{ + TCGv_i32 tmp_mask = tcg_const_i32(tcg_ctx, mask); + gen_helper_cpsr_write(tcg_ctx, tcg_ctx->cpu_env, var, tmp_mask); + tcg_temp_free_i32(tcg_ctx, tmp_mask); +} +/* Set NZCV flags from the high 4 bits of var. */ +#define gen_set_nzcv(var) gen_set_cpsr(tcg_ctx, var, CPSR_NZCV) + +static void gen_exception_internal(TCGContext *tcg_ctx, int excp) +{ + TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); + + assert(excp_is_internal(excp)); + gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tcg_excp); + tcg_temp_free_i32(tcg_ctx, tcg_excp); +} + +static void gen_step_complete_exception(DisasContext *s) +{ + /* We just completed step of an insn. Move from Active-not-pending + * to Active-pending, and then also take the swstep exception. + * This corresponds to making the (IMPDEF) choice to prioritize + * swstep exceptions over asynchronous exceptions taken to an exception + * level where debug is disabled. This choice has the advantage that + * we do not need to maintain internal state corresponding to the + * ISV/EX syndrome bits between completion of the step and generation + * of the exception, and our syndrome information is always correct. + */ + gen_ss_advance(s); + gen_swstep_exception(s, 1, s->is_ldex); + s->base.is_jmp = DISAS_NORETURN; +} + +static void gen_singlestep_exception(DisasContext *s) +{ + /* Generate the right kind of exception for singlestep, which is + * either the architectural singlestep or EXCP_DEBUG for QEMU's + * gdb singlestepping. + */ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (s->ss_active) { + gen_step_complete_exception(s); + } else { + gen_exception_internal(tcg_ctx, EXCP_DEBUG); + } +} + +static inline bool is_singlestepping(DisasContext *s) +{ + /* Return true if we are singlestepping either because of + * architectural singlestep or QEMU gdbstub singlestep. This does + * not include the command line '-singlestep' mode which is rather + * misnamed as it only means "one instruction per TB" and doesn't + * affect the code we generate. + */ + return s->base.singlestep_enabled || s->ss_active; +} + +static void gen_smul_dual(TCGContext *tcg_ctx, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 tmp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ext16s_i32(tcg_ctx, tmp1, a); + tcg_gen_ext16s_i32(tcg_ctx, tmp2, b); + tcg_gen_mul_i32(tcg_ctx, tmp1, tmp1, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_sari_i32(tcg_ctx, a, a, 16); + tcg_gen_sari_i32(tcg_ctx, b, b, 16); + tcg_gen_mul_i32(tcg_ctx, b, b, a); + tcg_gen_mov_i32(tcg_ctx, a, tmp1); + tcg_temp_free_i32(tcg_ctx, tmp1); +} + +/* Byteswap each halfword. */ +static void gen_rev16(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 var) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 mask = tcg_const_i32(tcg_ctx, 0x00ff00ff); + tcg_gen_shri_i32(tcg_ctx, tmp, var, 8); + tcg_gen_and_i32(tcg_ctx, tmp, tmp, mask); + tcg_gen_and_i32(tcg_ctx, var, var, mask); + tcg_gen_shli_i32(tcg_ctx, var, var, 8); + tcg_gen_or_i32(tcg_ctx, dest, var, tmp); + tcg_temp_free_i32(tcg_ctx, mask); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* Byteswap low halfword and sign extend. */ +static void gen_revsh(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 var) +{ + tcg_gen_ext16u_i32(tcg_ctx, var, var); + tcg_gen_bswap16_i32(tcg_ctx, var, var); + tcg_gen_ext16s_i32(tcg_ctx, dest, var); +} + +/* 32x32->64 multiply. Marks inputs as dead. */ +static TCGv_i64 gen_mulu_i64_i32(TCGContext *tcg_ctx, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 ret; + + tcg_gen_mulu2_i32(tcg_ctx, lo, hi, a, b); + tcg_temp_free_i32(tcg_ctx, a); + tcg_temp_free_i32(tcg_ctx, b); + + ret = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); + tcg_temp_free_i32(tcg_ctx, lo); + tcg_temp_free_i32(tcg_ctx, hi); + + return ret; +} + +static TCGv_i64 gen_muls_i64_i32(TCGContext *tcg_ctx, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 ret; + + tcg_gen_muls2_i32(tcg_ctx, lo, hi, a, b); + tcg_temp_free_i32(tcg_ctx, a); + tcg_temp_free_i32(tcg_ctx, b); + + ret = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); + tcg_temp_free_i32(tcg_ctx, lo); + tcg_temp_free_i32(tcg_ctx, hi); + + return ret; +} + +/* Swap low and high halfwords. */ +static void gen_swap_half(TCGContext *tcg_ctx, TCGv_i32 var) +{ + tcg_gen_rotri_i32(tcg_ctx, var, var, 16); +} + +/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. + tmp = (t0 ^ t1) & 0x8000; + t0 &= ~0x8000; + t1 &= ~0x8000; + t0 = (t0 + t1) ^ tmp; + */ + +static void gen_add16(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x8000); + tcg_gen_andi_i32(tcg_ctx, t0, t0, ~0x8000); + tcg_gen_andi_i32(tcg_ctx, t1, t1, ~0x8000); + tcg_gen_add_i32(tcg_ctx, t0, t0, t1); + tcg_gen_xor_i32(tcg_ctx, dest, t0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* Set N and Z flags from var. */ +static inline void gen_logic_CC(TCGContext *tcg_ctx, TCGv_i32 var) +{ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, var); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, var); +} + +/* dest = T0 + T1 + CF. */ +static void gen_add_carry(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + tcg_gen_add_i32(tcg_ctx, dest, t0, t1); + tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); +} + +/* dest = T0 - T1 + CF - 1. */ +static void gen_sub_carry(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + tcg_gen_sub_i32(tcg_ctx, dest, t0, t1); + tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); + tcg_gen_subi_i32(tcg_ctx, dest, dest, 1); +} + +/* dest = T0 + T1. Compute C, N, V and Z flags */ +static void gen_add_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, t1, tmp); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); +} + +/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */ +static void gen_adc_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + if (TCG_TARGET_HAS_add2_i32) { + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, tcg_ctx->cpu_CF, tmp); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t1, tmp); + } else { + TCGv_i64 q0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 q1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, q0, t0); + tcg_gen_extu_i32_i64(tcg_ctx, q1, t1); + tcg_gen_add_i64(tcg_ctx, q0, q0, q1); + tcg_gen_extu_i32_i64(tcg_ctx, q1, tcg_ctx->cpu_CF); + tcg_gen_add_i64(tcg_ctx, q0, q0, q1); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, q0); + tcg_temp_free_i64(tcg_ctx, q0); + tcg_temp_free_i64(tcg_ctx, q1); + } + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); +} + +/* dest = T0 - T1. Compute C, N, V and Z flags */ +static void gen_sub_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 tmp; + tcg_gen_sub_i32(tcg_ctx, tcg_ctx->cpu_NF, t0, t1); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_CF, t0, t1); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); +} + +/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */ +static void gen_sbc_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_not_i32(tcg_ctx, tmp, t1); + gen_adc_CC(tcg_ctx, dest, t0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +#define GEN_SHIFT(name) \ +static void gen_##name(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \ +{ \ + TCGv_i32 tmp1, tmp2, tmp3; \ + tmp1 = tcg_temp_new_i32(tcg_ctx); \ + tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); \ + tmp2 = tcg_const_i32(tcg_ctx, 0); \ + tmp3 = tcg_const_i32(tcg_ctx, 0x1f); \ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \ + tcg_temp_free_i32(tcg_ctx, tmp3); \ + tcg_gen_andi_i32(tcg_ctx, tmp1, tmp1, 0x1f); \ + tcg_gen_##name##_i32(tcg_ctx, dest, tmp2, tmp1); \ + tcg_temp_free_i32(tcg_ctx, tmp2); \ + tcg_temp_free_i32(tcg_ctx, tmp1); \ +} +GEN_SHIFT(shl) +GEN_SHIFT(shr) +#undef GEN_SHIFT + +static void gen_sar(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 tmp1, tmp2; + tmp1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); + tmp2 = tcg_const_i32(tcg_ctx, 0x1f); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_sar_i32(tcg_ctx, dest, t0, tmp1); + tcg_temp_free_i32(tcg_ctx, tmp1); +} + +static void shifter_out_im(TCGContext *tcg_ctx, TCGv_i32 var, int shift) +{ + tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cpu_CF, var, shift, 1); +} + +/* Shift by immediate. Includes special handling for shift == 0. */ +static inline void gen_arm_shift_im(TCGContext *tcg_ctx, TCGv_i32 var, int shiftop, + int shift, int flags) +{ + switch (shiftop) { + case 0: /* LSL */ + if (shift != 0) { + if (flags) + shifter_out_im(tcg_ctx, var, 32 - shift); + tcg_gen_shli_i32(tcg_ctx, var, var, shift); + } + break; + case 1: /* LSR */ + if (shift == 0) { + if (flags) { + tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 31); + } + tcg_gen_movi_i32(tcg_ctx, var, 0); + } else { + if (flags) + shifter_out_im(tcg_ctx, var, shift - 1); + tcg_gen_shri_i32(tcg_ctx, var, var, shift); + } + break; + case 2: /* ASR */ + if (shift == 0) + shift = 32; + if (flags) + shifter_out_im(tcg_ctx, var, shift - 1); + if (shift == 32) + shift = 31; + tcg_gen_sari_i32(tcg_ctx, var, var, shift); + break; + case 3: /* ROR/RRX */ + if (shift != 0) { + if (flags) + shifter_out_im(tcg_ctx, var, shift - 1); + tcg_gen_rotri_i32(tcg_ctx, var, var, shift); break; + } else { + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shli_i32(tcg_ctx, tmp, tcg_ctx->cpu_CF, 31); + if (flags) + shifter_out_im(tcg_ctx, var, 0); + tcg_gen_shri_i32(tcg_ctx, var, var, 1); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } + } +}; + +static inline void gen_arm_shift_reg(TCGContext *tcg_ctx, TCGv_i32 var, int shiftop, + TCGv_i32 shift, int flags) +{ + if (flags) { + switch (shiftop) { + case 0: gen_helper_shl_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + case 1: gen_helper_shr_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + case 2: gen_helper_sar_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + case 3: gen_helper_ror_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + } + } else { + switch (shiftop) { + case 0: + gen_shl(tcg_ctx, var, var, shift); + break; + case 1: + gen_shr(tcg_ctx, var, var, shift); + break; + case 2: + gen_sar(tcg_ctx, var, var, shift); + break; + case 3: tcg_gen_andi_i32(tcg_ctx, shift, shift, 0x1f); + tcg_gen_rotr_i32(tcg_ctx, var, var, shift); break; + } + } + tcg_temp_free_i32(tcg_ctx, shift); +} + +/* + * Generate a conditional based on ARM condition code cc. + * This is common between ARM and Aarch64 targets. + */ +void arm_test_cc(TCGContext *tcg_ctx, DisasCompare *cmp, int cc) +{ + TCGv_i32 value; + TCGCond cond; + bool global = true; + + switch (cc) { + case 0: /* eq: Z */ + case 1: /* ne: !Z */ + cond = TCG_COND_EQ; + value = tcg_ctx->cpu_ZF; + break; + + case 2: /* cs: C */ + case 3: /* cc: !C */ + cond = TCG_COND_NE; + value = tcg_ctx->cpu_CF; + break; + + case 4: /* mi: N */ + case 5: /* pl: !N */ + cond = TCG_COND_LT; + value = tcg_ctx->cpu_NF; + break; + + case 6: /* vs: V */ + case 7: /* vc: !V */ + cond = TCG_COND_LT; + value = tcg_ctx->cpu_VF; + break; + + case 8: /* hi: C && !Z */ + case 9: /* ls: !C || Z -> !(C && !Z) */ + cond = TCG_COND_NE; + value = tcg_temp_new_i32(tcg_ctx); + global = false; + /* CF is 1 for C, so -CF is an all-bits-set mask for C; + ZF is non-zero for !Z; so AND the two subexpressions. */ + tcg_gen_neg_i32(tcg_ctx, value, tcg_ctx->cpu_CF); + tcg_gen_and_i32(tcg_ctx, value, value, tcg_ctx->cpu_ZF); + break; + + case 10: /* ge: N == V -> N ^ V == 0 */ + case 11: /* lt: N != V -> N ^ V != 0 */ + /* Since we're only interested in the sign bit, == 0 is >= 0. */ + cond = TCG_COND_GE; + value = tcg_temp_new_i32(tcg_ctx); + global = false; + tcg_gen_xor_i32(tcg_ctx, value, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + break; + + case 12: /* gt: !Z && N == V */ + case 13: /* le: Z || N != V */ + cond = TCG_COND_NE; + value = tcg_temp_new_i32(tcg_ctx); + global = false; + /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate + * the sign bit then AND with ZF to yield the result. */ + tcg_gen_xor_i32(tcg_ctx, value, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_sari_i32(tcg_ctx, value, value, 31); + tcg_gen_andc_i32(tcg_ctx, value, tcg_ctx->cpu_ZF, value); + break; + + case 14: /* always */ + case 15: /* always */ + /* Use the ALWAYS condition, which will fold early. + * It doesn't matter what we use for the value. */ + cond = TCG_COND_ALWAYS; + value = tcg_ctx->cpu_ZF; + goto no_invert; + + default: + fprintf(stderr, "Bad condition code 0x%x\n", cc); + abort(); + } + + if (cc & 1) { + cond = tcg_invert_cond(cond); + } + + no_invert: + cmp->cond = cond; + cmp->value = value; + cmp->value_global = global; +} + +void arm_free_cc(TCGContext *tcg_ctx, DisasCompare *cmp) +{ + if (!cmp->value_global) { + tcg_temp_free_i32(tcg_ctx, cmp->value); + } +} + +void arm_jump_cc(TCGContext *tcg_ctx, DisasCompare *cmp, TCGLabel *label) +{ + tcg_gen_brcondi_i32(tcg_ctx, cmp->cond, cmp->value, 0, label); +} + +void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, TCGLabel *label) +{ + DisasCompare cmp; + arm_test_cc(tcg_ctx, &cmp, cc); + arm_jump_cc(tcg_ctx, &cmp, label); + arm_free_cc(tcg_ctx, &cmp); +} + +static inline void gen_set_condexec(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (s->condexec_mask) { + uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + store_cpu_field(tcg_ctx, tmp, condexec_bits); + } +} + +static inline void gen_set_pc_im(DisasContext *s, target_ulong val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], val); +} + +/* Set PC and Thumb state from var. var is marked as dead. */ +static inline void gen_bx(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + s->base.is_jmp = DISAS_JUMP; + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[15], var, ~1); + tcg_gen_andi_i32(tcg_ctx, var, var, 1); + store_cpu_field(tcg_ctx, var, thumb); +} + +/* + * Set PC and Thumb state from var. var is marked as dead. + * For M-profile CPUs, include logic to detect exception-return + * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC, + * and BX reg, and no others, and happens only for code in Handler mode. + * The Security Extension also requires us to check for the FNC_RETURN + * which signals a function return from non-secure state; this can happen + * in both Handler and Thread mode. + * To avoid having to do multiple comparisons in inline generated code, + * we make the check we do here loose, so it will match for EXC_RETURN + * in Thread mode. For system emulation do_v7m_exception_exit() checks + * for these spurious cases and returns without doing anything (giving + * the same behaviour as for a branch to a non-magic address). + * + * In linux-user mode it is unclear what the right behaviour for an + * attempted FNC_RETURN should be, because in real hardware this will go + * directly to Secure code (ie not the Linux kernel) which will then treat + * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN + * attempt behave the way it would on a CPU without the security extension, + * which is to say "like a normal branch". That means we can simply treat + * all branches as normal with no magic address behaviour. + */ +static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var) +{ + /* Generate the same code here as for a simple bx, but flag via + * s->base.is_jmp that we need to do the rest of the work later. + */ + gen_bx(s, var); + if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) || + (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) { + s->base.is_jmp = DISAS_BX_EXCRET; + } +} + +static inline void gen_bx_excret_final_code(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Generate the code to finish possible exception return and end the TB */ + TCGLabel *excret_label = gen_new_label(tcg_ctx); + uint32_t min_magic; + + if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) { + /* Covers FNC_RETURN and EXC_RETURN magic */ + min_magic = FNC_RETURN_MIN_MAGIC; + } else { + /* EXC_RETURN magic only */ + min_magic = EXC_RETURN_MIN_MAGIC; + } + + /* Is the new PC value in the magic range indicating exception return? */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_R[15], min_magic, excret_label); + /* No: end the TB as we would for a DISAS_JMP */ + if (is_singlestepping(s)) { + gen_singlestep_exception(s); + } else { + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } + gen_set_label(tcg_ctx, excret_label); + /* Yes: this is an exception return. + * At this point in runtime env->regs[15] and env->thumb will hold + * the exception-return magic number, which do_v7m_exception_exit() + * will read. Nothing else will be able to see those values because + * the cpu-exec main loop guarantees that we will always go straight + * from raising the exception to the exception-handling code. + * + * gen_ss_advance(s) does nothing on M profile currently but + * calling it is conceptually the right thing as we have executed + * this instruction (compare SWI, HVC, SMC handling). + */ + gen_ss_advance(s); + gen_exception_internal(tcg_ctx, EXCP_EXCEPTION_EXIT); +} + +static inline void gen_bxns(DisasContext *s, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 var = load_reg(s, rm); + + /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory + * we need to sync state before calling it, but: + * - we don't need to do gen_set_pc_im() because the bxns helper will + * always set the PC itself + * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE + * unless it's outside an IT block or the last insn in an IT block, + * so we know that condexec == 0 (already set at the top of the TB) + * is correct in the non-UNPREDICTABLE cases, and we can choose + * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise. + */ + gen_helper_v7m_bxns(tcg_ctx, tcg_ctx->cpu_env, var); + tcg_temp_free_i32(tcg_ctx, var); + s->base.is_jmp = DISAS_EXIT; +} + +static inline void gen_blxns(DisasContext *s, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 var = load_reg(s, rm); + + /* We don't need to sync condexec state, for the same reason as bxns. + * We do however need to set the PC, because the blxns helper reads it. + * The blxns helper may throw an exception. + */ + gen_set_pc_im(s, s->base.pc_next); + gen_helper_v7m_blxns(tcg_ctx, tcg_ctx->cpu_env, var); + tcg_temp_free_i32(tcg_ctx, var); + s->base.is_jmp = DISAS_EXIT; +} + +/* Variant of store_reg which uses branch&exchange logic when storing + to r15 in ARM architecture v7 and above. The source must be a temporary + and will be marked as dead. */ +static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var) +{ + if (reg == 15 && ENABLE_ARCH_7) { + gen_bx(s, var); + } else { + store_reg(s, reg, var); + } +} + +/* Variant of store_reg which uses branch&exchange logic when storing + * to r15 in ARM architecture v5T and above. This is used for storing + * the results of a LDR/LDM/POP into r15, and corresponds to the cases + * in the ARM ARM which use the LoadWritePC() pseudocode function. */ +static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) +{ + if (reg == 15 && ENABLE_ARCH_5) { + gen_bx_excret(s, var); + } else { + store_reg(s, reg, var); + } +} + +#define IS_USER_ONLY 0 + +/* Abstractions of "generate code to do a guest load/store for + * AArch32", where a vaddr is always 32 bits (and is zero + * extended if we're a 64 bit core) and data is also + * 32 bits unless specifically doing a 64 bit access. + * These functions work like tcg_gen_qemu_{ld,st}* except + * that the address argument is TCGv_i32 rather than TCGv. + */ + +static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr = tcg_temp_new(tcg_ctx); + tcg_gen_extu_i32_tl(tcg_ctx, addr, a32); + + /* Not needed for user-mode BE32, where we use MO_BE instead. */ + if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) { + tcg_gen_xori_tl(tcg_ctx, addr, addr, 4 - (1 << (op & MO_SIZE))); + } + return addr; +} + +static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, + int index, MemOp opc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr; + + if (arm_dc_feature(s, ARM_FEATURE_M) && + !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) { + opc |= MO_ALIGN; + } + + addr = gen_aa32_addr(s, a32, opc); + tcg_gen_qemu_ld_i32(tcg_ctx, val, addr, index, opc); + tcg_temp_free(tcg_ctx, addr); +} + +static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, + int index, MemOp opc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr; + + if (arm_dc_feature(s, ARM_FEATURE_M) && + !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) { + opc |= MO_ALIGN; + } + + addr = gen_aa32_addr(s, a32, opc); + tcg_gen_qemu_st_i32(tcg_ctx, val, addr, index, opc); + tcg_temp_free(tcg_ctx, addr); +} + +#define DO_GEN_LD(SUFF, OPC) \ +static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \ + TCGv_i32 a32, int index) \ +{ \ + gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \ +} + +#define DO_GEN_ST(SUFF, OPC) \ +static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \ + TCGv_i32 a32, int index) \ +{ \ + gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \ +} + +static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Not needed for user-mode BE32, where we use MO_BE instead. */ + if (!IS_USER_ONLY && s->sctlr_b) { + tcg_gen_rotri_i64(tcg_ctx, val, val, 32); + } +} + +static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, + int index, MemOp opc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr = gen_aa32_addr(s, a32, opc); + tcg_gen_qemu_ld_i64(tcg_ctx, val, addr, index, opc); + gen_aa32_frob64(s, val); + tcg_temp_free(tcg_ctx, addr); +} + +static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index) +{ + gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data); +} + +static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32, + int index, MemOp opc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr = gen_aa32_addr(s, a32, opc); + + /* Not needed for user-mode BE32, where we use MO_BE instead. */ + if (!IS_USER_ONLY && s->sctlr_b) { + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_rotri_i64(tcg_ctx, tmp, val, 32); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, addr, index, opc); + tcg_temp_free_i64(tcg_ctx, tmp); + } else { + tcg_gen_qemu_st_i64(tcg_ctx, val, addr, index, opc); + } + tcg_temp_free(tcg_ctx, addr); +} + +static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, + TCGv_i32 a32, int index) +{ + gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data); +} + +DO_GEN_LD(8u, MO_UB) +DO_GEN_LD(16u, MO_UW) +DO_GEN_LD(32u, MO_UL) +DO_GEN_ST(8, MO_UB) +DO_GEN_ST(16, MO_UW) +DO_GEN_ST(32, MO_UL) + +static inline void gen_hvc(DisasContext *s, int imm16) +{ + /* The pre HVC helper handles cases when HVC gets trapped + * as an undefined insn by runtime configuration (ie before + * the insn really executes). + */ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_set_pc_im(s, s->pc_curr); + gen_helper_pre_hvc(tcg_ctx, tcg_ctx->cpu_env); + /* Otherwise we will treat this as a real exception which + * happens after execution of the insn. (The distinction matters + * for the PC value reported to the exception handler and also + * for single stepping.) + */ + s->svc_imm = imm16; + gen_set_pc_im(s, s->base.pc_next); + s->base.is_jmp = DISAS_HVC; +} + +static inline void gen_smc(DisasContext *s) +{ + /* As with HVC, we may take an exception either before or after + * the insn executes. + */ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + gen_set_pc_im(s, s->pc_curr); + tmp = tcg_const_i32(tcg_ctx, syn_aa32_smc()); + gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_set_pc_im(s, s->base.pc_next); + s->base.is_jmp = DISAS_SMC; +} + +static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_set_condexec(s); + gen_set_pc_im(s, pc); + gen_exception_internal(tcg_ctx, excp); + s->base.is_jmp = DISAS_NORETURN; +} + +static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp, + int syn, uint32_t target_el) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_set_condexec(s); + gen_set_pc_im(s, pc); + gen_exception(tcg_ctx, excp, syn, target_el); + s->base.is_jmp = DISAS_NORETURN; +} + +static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_syn; + + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + tcg_syn = tcg_const_i32(tcg_ctx, syn); + gen_helper_exception_bkpt_insn(tcg_ctx, tcg_ctx->cpu_env, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_syn); + s->base.is_jmp = DISAS_NORETURN; +} + +static void unallocated_encoding(DisasContext *s) +{ + /* Unallocated and reserved encodings are uncategorized */ + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), + default_exception_el(s)); +} + +/* Force a TB lookup after an instruction that changes the CPU state. */ +static inline void gen_lookup_tb(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], s->base.pc_next); + s->base.is_jmp = DISAS_EXIT; +} + +static inline void gen_hlt(DisasContext *s, int imm) +{ + /* HLT. This has two purposes. + * Architecturally, it is an external halting debug instruction. + * Since QEMU doesn't implement external debug, we treat this as + * it is required for halting debug disabled: it will UNDEF. + * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction, + * and "HLT 0xF000" is an A32 semihosting syscall. These traps + * must trigger semihosting even for ARMv7 and earlier, where + * HLT was an undefined encoding. + * In system mode, we don't allow userspace access to + * semihosting, to provide some semblance of security + * (and for consistency with our 32-bit semihosting). + */ + unallocated_encoding(s); +} + +static TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx, int neon) +{ + TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); + int offset; + if (neon) { + offset = offsetof(CPUARMState, vfp.standard_fp_status); + } else { + offset = offsetof(CPUARMState, vfp.fp_status); + } + tcg_gen_addi_ptr(tcg_ctx, statusptr, tcg_ctx->cpu_env, offset); + return statusptr; +} + +static inline long vfp_reg_offset(bool dp, unsigned reg) +{ + if (dp) { + return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]); + } else { + long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]); + if (reg & 1) { + ofs += offsetof(CPU_DoubleU, l.upper); + } else { + ofs += offsetof(CPU_DoubleU, l.lower); + } + return ofs; + } +} + +/* Return the offset of a 32-bit piece of a NEON register. + zero is the least significant end of the register. */ +static inline long +neon_reg_offset (int reg, int n) +{ + int sreg; + sreg = reg * 2 + n; + return vfp_reg_offset(0, sreg); +} + +/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE, + * where 0 is the least significant end of the register. + */ +static inline long +neon_element_offset(int reg, int element, MemOp size) +{ + int element_size = 1 << size; + int ofs = element * element_size; +#ifdef HOST_WORDS_BIGENDIAN + /* Calculate the offset assuming fully little-endian, + * then XOR to account for the order of the 8-byte units. + */ + if (element_size < 8) { + ofs ^= 8 - element_size; + } +#endif + return neon_reg_offset(reg, 0) + ofs; +} + +static TCGv_i32 neon_load_reg(TCGContext *tcg_ctx, int reg, int pass) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); + return tmp; +} + +static void neon_load_element(TCGContext *tcg_ctx, TCGv_i32 var, int reg, int ele, MemOp mop) +{ + long offset = neon_element_offset(reg, ele, mop & MO_SIZE); + + switch (mop) { + case MO_UB: + tcg_gen_ld8u_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_UW: + tcg_gen_ld16u_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_UL: + tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + default: + g_assert_not_reached(); + break; + } +} + +static void neon_load_element64(TCGContext *tcg_ctx, TCGv_i64 var, int reg, int ele, MemOp mop) +{ + long offset = neon_element_offset(reg, ele, mop & MO_SIZE); + + switch (mop) { + case MO_UB: + tcg_gen_ld8u_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_UW: + tcg_gen_ld16u_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_UL: + tcg_gen_ld32u_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_Q: + tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + default: + g_assert_not_reached(); + break; + } +} + +static void neon_store_reg(TCGContext *tcg_ctx, int reg, int pass, TCGv_i32 var) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); + tcg_temp_free_i32(tcg_ctx, var); +} + +static void neon_store_element(TCGContext *tcg_ctx, int reg, int ele, MemOp size, TCGv_i32 var) +{ + long offset = neon_element_offset(reg, ele, size); + + switch (size) { + case MO_8: + tcg_gen_st8_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_16: + tcg_gen_st16_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_32: + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + default: + g_assert_not_reached(); + break; + } +} + +static void neon_store_element64(TCGContext *tcg_ctx, int reg, int ele, MemOp size, TCGv_i64 var) +{ + long offset = neon_element_offset(reg, ele, size); + + switch (size) { + case MO_8: + tcg_gen_st8_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_16: + tcg_gen_st16_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_32: + tcg_gen_st32_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + case MO_64: + tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, offset); + break; + default: + g_assert_not_reached(); + break; + } +} + +static inline void neon_load_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) +{ + tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); +} + +static inline void neon_store_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) +{ + tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); +} + +static inline void neon_load_reg32(TCGContext *tcg_ctx, TCGv_i32 var, int reg) +{ + tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(false, reg)); +} + +static inline void neon_store_reg32(TCGContext *tcg_ctx, TCGv_i32 var, int reg) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(false, reg)); +} + +static TCGv_ptr vfp_reg_ptr(TCGContext *tcg_ctx, bool dp, int reg) +{ + TCGv_ptr ret = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, ret, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); + return ret; +} + +#define ARM_CP_RW_BIT (1 << 20) + +/* Include the VFP decoder */ +#include "translate-vfp.inc.c" + +static inline void iwmmxt_load_reg(TCGContext *tcg_ctx, TCGv_i64 var, int reg) +{ + tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); +} + +static inline void iwmmxt_store_reg(TCGContext *tcg_ctx, TCGv_i64 var, int reg) +{ + tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); +} + +static inline TCGv_i32 iwmmxt_load_creg(TCGContext *tcg_ctx, int reg) +{ + TCGv_i32 var = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); + return var; +} + +static inline void iwmmxt_store_creg(TCGContext *tcg_ctx, int reg, TCGv_i32 var) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); + tcg_temp_free_i32(tcg_ctx, var); +} + +static inline void gen_op_iwmmxt_movq_wRn_M0(TCGContext *tcg_ctx, int rn) +{ + iwmmxt_store_reg(tcg_ctx, tcg_ctx->cpu_M0, rn); +} + +static inline void gen_op_iwmmxt_movq_M0_wRn(TCGContext *tcg_ctx, int rn) +{ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_M0, rn); +} + +static inline void gen_op_iwmmxt_orq_M0_wRn(TCGContext *tcg_ctx, int rn) +{ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +static inline void gen_op_iwmmxt_andq_M0_wRn(TCGContext *tcg_ctx, int rn) +{ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); + tcg_gen_and_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +static inline void gen_op_iwmmxt_xorq_M0_wRn(TCGContext *tcg_ctx, int rn) +{ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); + tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +#define IWMMXT_OP(name) \ +static inline void gen_op_iwmmxt_##name##_M0_wRn(TCGContext *tcg_ctx, int rn) \ +{ \ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); \ + gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ +} + +#define IWMMXT_OP_ENV(name) \ +static inline void gen_op_iwmmxt_##name##_M0_wRn(TCGContext *tcg_ctx, int rn) \ +{ \ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); \ + gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ +} + +#define IWMMXT_OP_ENV_SIZE(name) \ +IWMMXT_OP_ENV(name##b) \ +IWMMXT_OP_ENV(name##w) \ +IWMMXT_OP_ENV(name##l) + +#define IWMMXT_OP_ENV1(name) \ +static inline void gen_op_iwmmxt_##name##_M0(TCGContext *tcg_ctx) \ +{ \ + gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0); \ +} + +IWMMXT_OP(maddsq) +IWMMXT_OP(madduq) +IWMMXT_OP(sadb) +IWMMXT_OP(sadw) +IWMMXT_OP(mulslw) +IWMMXT_OP(mulshw) +IWMMXT_OP(mululw) +IWMMXT_OP(muluhw) +IWMMXT_OP(macsw) +IWMMXT_OP(macuw) + +IWMMXT_OP_ENV_SIZE(unpackl) +IWMMXT_OP_ENV_SIZE(unpackh) + +IWMMXT_OP_ENV1(unpacklub) +IWMMXT_OP_ENV1(unpackluw) +IWMMXT_OP_ENV1(unpacklul) +IWMMXT_OP_ENV1(unpackhub) +IWMMXT_OP_ENV1(unpackhuw) +IWMMXT_OP_ENV1(unpackhul) +IWMMXT_OP_ENV1(unpacklsb) +IWMMXT_OP_ENV1(unpacklsw) +IWMMXT_OP_ENV1(unpacklsl) +IWMMXT_OP_ENV1(unpackhsb) +IWMMXT_OP_ENV1(unpackhsw) +IWMMXT_OP_ENV1(unpackhsl) + +IWMMXT_OP_ENV_SIZE(cmpeq) +IWMMXT_OP_ENV_SIZE(cmpgtu) +IWMMXT_OP_ENV_SIZE(cmpgts) + +IWMMXT_OP_ENV_SIZE(mins) +IWMMXT_OP_ENV_SIZE(minu) +IWMMXT_OP_ENV_SIZE(maxs) +IWMMXT_OP_ENV_SIZE(maxu) + +IWMMXT_OP_ENV_SIZE(subn) +IWMMXT_OP_ENV_SIZE(addn) +IWMMXT_OP_ENV_SIZE(subu) +IWMMXT_OP_ENV_SIZE(addu) +IWMMXT_OP_ENV_SIZE(subs) +IWMMXT_OP_ENV_SIZE(adds) + +IWMMXT_OP_ENV(avgb0) +IWMMXT_OP_ENV(avgb1) +IWMMXT_OP_ENV(avgw0) +IWMMXT_OP_ENV(avgw1) + +IWMMXT_OP_ENV(packuw) +IWMMXT_OP_ENV(packul) +IWMMXT_OP_ENV(packuq) +IWMMXT_OP_ENV(packsw) +IWMMXT_OP_ENV(packsl) +IWMMXT_OP_ENV(packsq) + +static void gen_op_iwmmxt_set_mup(TCGContext *tcg_ctx) +{ + TCGv_i32 tmp; + tmp = load_cpu_field(tcg_ctx, iwmmxt.cregs[ARM_IWMMXT_wCon]); + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 2); + store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); +} + +static void gen_op_iwmmxt_set_cup(TCGContext *tcg_ctx) +{ + TCGv_i32 tmp; + tmp = load_cpu_field(tcg_ctx, iwmmxt.cregs[ARM_IWMMXT_wCon]); + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 1); + store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); +} + +static void gen_op_iwmmxt_setpsr_nz(TCGContext *tcg_ctx) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_iwmmxt_setpsr_nz(tcg_ctx, tmp, tcg_ctx->cpu_M0); + store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]); +} + +static inline void gen_op_iwmmxt_addl_M0_wRn(TCGContext *tcg_ctx, int rn) +{ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rn); + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1); + tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, + TCGv_i32 dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd; + uint32_t offset; + TCGv_i32 tmp; + + rd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + + offset = (insn & 0xff) << ((insn >> 7) & 2); + if (insn & (1 << 24)) { + /* Pre indexed */ + if (insn & (1 << 23)) + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); + else +#ifdef _MSC_VER + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0 - offset); +#else + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, -offset); +#endif + tcg_gen_mov_i32(tcg_ctx, dest, tmp); + if (insn & (1 << 21)) + store_reg(s, rd, tmp); + else + tcg_temp_free_i32(tcg_ctx, tmp); + } else if (insn & (1 << 21)) { + /* Post indexed */ + tcg_gen_mov_i32(tcg_ctx, dest, tmp); + if (insn & (1 << 23)) + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); + else +#ifdef _MSC_VER + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0 - offset); +#else + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, -offset); +#endif + store_reg(s, rd, tmp); + } else if (!(insn & (1 << 23))) + return 1; + return 0; +} + +static inline int gen_iwmmxt_shift(TCGContext *tcg_ctx, uint32_t insn, uint32_t mask, TCGv_i32 dest) +{ + int rd = (insn >> 0) & 0xf; + TCGv_i32 tmp; + + if (insn & (1 << 8)) { + if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) { + return 1; + } else { + tmp = iwmmxt_load_creg(tcg_ctx, rd); + } + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V0, rd); + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); + } + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, mask); + tcg_gen_mov_i32(tcg_ctx, dest, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + return 0; +} + +/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd, wrd; + int rdhi, rdlo, rd0, rd1, i; + TCGv_i32 addr; + TCGv_i32 tmp, tmp2, tmp3; + + if ((insn & 0x0e000e00) == 0x0c000000) { + if ((insn & 0x0fe00ff0) == 0x0c400000) { + wrd = insn & 0xf; + rdlo = (insn >> 12) & 0xf; + rdhi = (insn >> 16) & 0xf; + if (insn & ARM_CP_RW_BIT) { /* TMRRC */ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V0, wrd); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); + } else { /* TMCRR */ + tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); + iwmmxt_store_reg(tcg_ctx, tcg_ctx->cpu_V0, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + } + return 0; + } + + wrd = (insn >> 12) & 0xf; + addr = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_address(s, insn, addr)) { + tcg_temp_free_i32(tcg_ctx, addr); + return 1; + } + if (insn & ARM_CP_RW_BIT) { + if ((insn >> 28) == 0xf) { /* WLDRW wCx */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + iwmmxt_store_creg(tcg_ctx, wrd, tmp); + } else { + i = 1; + if (insn & (1 << 8)) { + if (insn & (1 << 22)) { /* WLDRD */ + gen_aa32_ld64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); + i = 0; + } else { /* WLDRW wRd */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + } + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + if (insn & (1 << 22)) { /* WLDRH */ + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + } else { /* WLDRB */ + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + } + } + if (i) { + tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_M0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + } + } else { + if ((insn >> 28) == 0xf) { /* WSTRW wCx */ + tmp = iwmmxt_load_creg(tcg_ctx, wrd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + } else { + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); + tmp = tcg_temp_new_i32(tcg_ctx); + if (insn & (1 << 8)) { + if (insn & (1 << 22)) { /* WSTRD */ + gen_aa32_st64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); + } else { /* WSTRW wRd */ + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + } + } else { + if (insn & (1 << 22)) { /* WSTRH */ + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + } else { /* WSTRB */ + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + } + } + } + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + return 0; + } + + if ((insn & 0x0f000000) != 0x0e000000) + return 1; + + switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { + case 0x000: /* WOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + gen_op_iwmmxt_orq_M0_wRn(tcg_ctx, rd1); + gen_op_iwmmxt_setpsr_nz(tcg_ctx); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x011: /* TMCR */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + switch (wrd) { + case ARM_IWMMXT_wCID: + case ARM_IWMMXT_wCASF: + break; + case ARM_IWMMXT_wCon: + gen_op_iwmmxt_set_cup(tcg_ctx); + /* Fall through. */ + case ARM_IWMMXT_wCSSF: + tmp = iwmmxt_load_creg(tcg_ctx, wrd); + tmp2 = load_reg(s, rd); + tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + iwmmxt_store_creg(tcg_ctx, wrd, tmp); + break; + case ARM_IWMMXT_wCGR0: + case ARM_IWMMXT_wCGR1: + case ARM_IWMMXT_wCGR2: + case ARM_IWMMXT_wCGR3: + gen_op_iwmmxt_set_cup(tcg_ctx); + tmp = load_reg(s, rd); + iwmmxt_store_creg(tcg_ctx, wrd, tmp); + break; + default: + return 1; + } + break; + case 0x100: /* WXOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + gen_op_iwmmxt_xorq_M0_wRn(tcg_ctx, rd1); + gen_op_iwmmxt_setpsr_nz(tcg_ctx); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x111: /* TMRC */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = iwmmxt_load_creg(tcg_ctx, wrd); + store_reg(s, rd, tmp); + break; + case 0x300: /* WANDN */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tcg_gen_neg_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + gen_op_iwmmxt_andq_M0_wRn(tcg_ctx, rd1); + gen_op_iwmmxt_setpsr_nz(tcg_ctx); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x200: /* WAND */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + gen_op_iwmmxt_andq_M0_wRn(tcg_ctx, rd1); + gen_op_iwmmxt_setpsr_nz(tcg_ctx); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x810: case 0xa10: /* WMADD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_maddsq_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_madduq_M0_wRn(tcg_ctx, rd1); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpacklb_M0_wRn(tcg_ctx, rd1); + break; + case 1: + gen_op_iwmmxt_unpacklw_M0_wRn(tcg_ctx, rd1); + break; + case 2: + gen_op_iwmmxt_unpackll_M0_wRn(tcg_ctx, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpackhb_M0_wRn(tcg_ctx, rd1); + break; + case 1: + gen_op_iwmmxt_unpackhw_M0_wRn(tcg_ctx, rd1); + break; + case 2: + gen_op_iwmmxt_unpackhl_M0_wRn(tcg_ctx, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + if (insn & (1 << 22)) + gen_op_iwmmxt_sadw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_sadb_M0_wRn(tcg_ctx, rd1); + if (!(insn & (1 << 20))) + gen_op_iwmmxt_addl_M0_wRn(tcg_ctx, wrd); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + if (insn & (1 << 21)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_mulshw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_mulslw_M0_wRn(tcg_ctx, rd1); + } else { + if (insn & (1 << 20)) + gen_op_iwmmxt_muluhw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_mululw_M0_wRn(tcg_ctx, rd1); + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_macsw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_macuw_M0_wRn(tcg_ctx, rd1); + if (!(insn & (1 << 20))) { + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, wrd); + tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_cmpeqb_M0_wRn(tcg_ctx, rd1); + break; + case 1: + gen_op_iwmmxt_cmpeqw_M0_wRn(tcg_ctx, rd1); + break; + case 2: + gen_op_iwmmxt_cmpeql_M0_wRn(tcg_ctx, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + if (insn & (1 << 22)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_avgw1_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_avgw0_M0_wRn(tcg_ctx, rd1); + } else { + if (insn & (1 << 20)) + gen_op_iwmmxt_avgb1_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_avgb0_M0_wRn(tcg_ctx, rd1); + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 7); + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rd1); + gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); + switch ((insn >> 6) & 3) { + case 0: + tmp2 = tcg_const_i32(tcg_ctx, 0xff); + tmp3 = tcg_const_i32(tcg_ctx, (insn & 7) << 3); + break; + case 1: + tmp2 = tcg_const_i32(tcg_ctx, 0xffff); + tmp3 = tcg_const_i32(tcg_ctx, (insn & 3) << 4); + break; + case 2: + tmp2 = tcg_const_i32(tcg_ctx, 0xffffffff); + tmp3 = tcg_const_i32(tcg_ctx, (insn & 1) << 5); + break; + default: + tmp2 = NULL; + tmp3 = NULL; + } + gen_helper_iwmmxt_insr(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + if (rd == 15 || ((insn >> 22) & 3) == 3) + return 1; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); + tmp = tcg_temp_new_i32(tcg_ctx); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 7) << 3); + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + if (insn & 8) { + tcg_gen_ext8s_i32(tcg_ctx, tmp, tmp); + } else { + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xff); + } + break; + case 1: + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 3) << 4); + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + if (insn & 8) { + tcg_gen_ext16s_i32(tcg_ctx, tmp, tmp); + } else { + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff); + } + break; + case 2: + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 1) << 5); + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + } + store_reg(s, rd, tmp); + break; + case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ + if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCASF); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 7) << 2) + 0); + break; + case 1: + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 3) << 3) + 4); + break; + case 2: + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 1) << 4) + 12); + break; + } + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 28); + gen_set_nzcv(tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + switch ((insn >> 6) & 3) { + case 0: + gen_helper_iwmmxt_bcstb(tcg_ctx, tcg_ctx->cpu_M0, tmp); + break; + case 1: + gen_helper_iwmmxt_bcstw(tcg_ctx, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_bcstl(tcg_ctx, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + break; + } + gen_set_nzcv(tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_addcb(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + break; + case 1: + gen_helper_iwmmxt_addcw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + break; + case 2: + gen_helper_iwmmxt_addcl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(tcg_ctx, ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + break; + } + gen_set_nzcv(tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ + rd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) + return 1; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_msbb(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + case 1: + gen_helper_iwmmxt_msbw(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + case 2: + gen_helper_iwmmxt_msbl(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + } + store_reg(s, rd, tmp); + break; + case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ + case 0x906: case 0xb06: case 0xd06: case 0xf06: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsb_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_cmpgtub_M0_wRn(tcg_ctx, rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_cmpgtuw_M0_wRn(tcg_ctx, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsl_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_cmpgtul_M0_wRn(tcg_ctx, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ + case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsb_M0(tcg_ctx); + else + gen_op_iwmmxt_unpacklub_M0(tcg_ctx); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsw_M0(tcg_ctx); + else + gen_op_iwmmxt_unpackluw_M0(tcg_ctx); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsl_M0(tcg_ctx); + else + gen_op_iwmmxt_unpacklul_M0(tcg_ctx); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ + case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsb_M0(tcg_ctx); + else + gen_op_iwmmxt_unpackhub_M0(tcg_ctx); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsw_M0(tcg_ctx); + else + gen_op_iwmmxt_unpackhuw_M0(tcg_ctx); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsl_M0(tcg_ctx); + else + gen_op_iwmmxt_unpackhul_M0(tcg_ctx); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ + case 0x214: case 0x614: case 0xa14: case 0xe14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_shift(tcg_ctx, insn, 0xff, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_srlw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_srll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_srlq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ + case 0x014: case 0x414: case 0x814: case 0xc14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_shift(tcg_ctx, insn, 0xff, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sraw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_sral(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sraq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ + case 0x114: case 0x514: case 0x914: case 0xd14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_shift(tcg_ctx, insn, 0xff, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sllw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_slll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sllq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ + case 0x314: case 0x714: case 0xb14: case 0xf14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + switch ((insn >> 22) & 3) { + case 1: + if (gen_iwmmxt_shift(tcg_ctx, insn, 0xf, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + gen_helper_iwmmxt_rorw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + if (gen_iwmmxt_shift(tcg_ctx, insn, 0x1f, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + gen_helper_iwmmxt_rorl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + if (gen_iwmmxt_shift(tcg_ctx, insn, 0x3f, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + gen_helper_iwmmxt_rorq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ + case 0x916: case 0xb16: case 0xd16: case 0xf16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsb_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_minub_M0_wRn(tcg_ctx, rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_minuw_M0_wRn(tcg_ctx, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsl_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_minul_M0_wRn(tcg_ctx, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ + case 0x816: case 0xa16: case 0xc16: case 0xe16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsb_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_maxub_M0_wRn(tcg_ctx, rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_maxuw_M0_wRn(tcg_ctx, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsl_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_maxul_M0_wRn(tcg_ctx, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ + case 0x402: case 0x502: case 0x602: case 0x702: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = tcg_const_i32(tcg_ctx, (insn >> 20) & 3); + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V1, rd1); + gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ + case 0x41a: case 0x51a: case 0x61a: case 0x71a: + case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: + case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_subnb_M0_wRn(tcg_ctx, rd1); + break; + case 0x1: + gen_op_iwmmxt_subub_M0_wRn(tcg_ctx, rd1); + break; + case 0x3: + gen_op_iwmmxt_subsb_M0_wRn(tcg_ctx, rd1); + break; + case 0x4: + gen_op_iwmmxt_subnw_M0_wRn(tcg_ctx, rd1); + break; + case 0x5: + gen_op_iwmmxt_subuw_M0_wRn(tcg_ctx, rd1); + break; + case 0x7: + gen_op_iwmmxt_subsw_M0_wRn(tcg_ctx, rd1); + break; + case 0x8: + gen_op_iwmmxt_subnl_M0_wRn(tcg_ctx, rd1); + break; + case 0x9: + gen_op_iwmmxt_subul_M0_wRn(tcg_ctx, rd1); + break; + case 0xb: + gen_op_iwmmxt_subsl_M0_wRn(tcg_ctx, rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ + case 0x41e: case 0x51e: case 0x61e: case 0x71e: + case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: + case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + tmp = tcg_const_i32(tcg_ctx, ((insn >> 16) & 0xf0) | (insn & 0x0f)); + gen_helper_iwmmxt_shufh(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ + case 0x418: case 0x518: case 0x618: case 0x718: + case 0x818: case 0x918: case 0xa18: case 0xb18: + case 0xc18: case 0xd18: case 0xe18: case 0xf18: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_addnb_M0_wRn(tcg_ctx, rd1); + break; + case 0x1: + gen_op_iwmmxt_addub_M0_wRn(tcg_ctx, rd1); + break; + case 0x3: + gen_op_iwmmxt_addsb_M0_wRn(tcg_ctx, rd1); + break; + case 0x4: + gen_op_iwmmxt_addnw_M0_wRn(tcg_ctx, rd1); + break; + case 0x5: + gen_op_iwmmxt_adduw_M0_wRn(tcg_ctx, rd1); + break; + case 0x7: + gen_op_iwmmxt_addsw_M0_wRn(tcg_ctx, rd1); + break; + case 0x8: + gen_op_iwmmxt_addnl_M0_wRn(tcg_ctx, rd1); + break; + case 0x9: + gen_op_iwmmxt_addul_M0_wRn(tcg_ctx, rd1); + break; + case 0xb: + gen_op_iwmmxt_addsl_M0_wRn(tcg_ctx, rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ + case 0x408: case 0x508: case 0x608: case 0x708: + case 0x808: case 0x908: case 0xa08: case 0xb08: + case 0xc08: case 0xd08: case 0xe08: case 0xf08: + if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, rd0); + switch ((insn >> 22) & 3) { + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsw_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_packuw_M0_wRn(tcg_ctx, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsl_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_packul_M0_wRn(tcg_ctx, rd1); + break; + case 3: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsq_M0_wRn(tcg_ctx, rd1); + else + gen_op_iwmmxt_packuq_M0_wRn(tcg_ctx, rd1); + break; + } + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + gen_op_iwmmxt_set_cup(tcg_ctx); + break; + case 0x201: case 0x203: case 0x205: case 0x207: + case 0x209: case 0x20b: case 0x20d: case 0x20f: + case 0x211: case 0x213: case 0x215: case 0x217: + case 0x219: case 0x21b: case 0x21d: case 0x21f: + wrd = (insn >> 5) & 0xf; + rd0 = (insn >> 12) & 0xf; + rd1 = (insn >> 0) & 0xf; + if (rd0 == 0xf || rd1 == 0xf) + return 1; + gen_op_iwmmxt_movq_M0_wRn(tcg_ctx, wrd); + tmp = load_reg(s, rd0); + tmp2 = load_reg(s, rd1); + switch ((insn >> 16) & 0xf) { + case 0x0: /* TMIA */ + gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0x8: /* TMIAPH */ + gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + default: + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, wrd); + gen_op_iwmmxt_set_mup(tcg_ctx); + break; + default: + return 1; + } + + return 0; +} + +/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_dsp_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int acc, rd0, rd1, rdhi, rdlo; + TCGv_i32 tmp, tmp2; + + if ((insn & 0x0ff00f10) == 0x0e200010) { + /* Multiply with Internal Accumulate Format */ + rd0 = (insn >> 12) & 0xf; + rd1 = insn & 0xf; + acc = (insn >> 5) & 7; + + if (acc != 0) + return 1; + + tmp = load_reg(s, rd0); + tmp2 = load_reg(s, rd1); + switch ((insn >> 16) & 0xf) { + case 0x0: /* MIA */ + gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0x8: /* MIAPH */ + gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0xc: /* MIABB */ + case 0xd: /* MIABT */ + case 0xe: /* MIATB */ + case 0xf: /* MIATT */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + default: + return 1; + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + + gen_op_iwmmxt_movq_wRn_M0(tcg_ctx, acc); + return 0; + } + + if ((insn & 0x0fe00ff8) == 0x0c400000) { + /* Internal Accumulator Access Format */ + rdhi = (insn >> 16) & 0xf; + rdlo = (insn >> 12) & 0xf; + acc = insn & 7; + + if (acc != 0) + return 1; + + if (insn & ARM_CP_RW_BIT) { /* MRA */ + iwmmxt_load_reg(tcg_ctx, tcg_ctx->cpu_V0, acc); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_R[rdhi], (1 << (40 - 32)) - 1); + } else { /* MAR */ + tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); + iwmmxt_store_reg(tcg_ctx, tcg_ctx->cpu_V0, acc); + } + return 0; + } + + return 1; +} + +#ifdef _MSC_VER +#define VFP_REG_SHR_NEG(insn, n) ((insn) << -(n)) +#define VFP_SREG_NEG(insn, bigbit, smallbit) \ + ((VFP_REG_SHR_NEG(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) + +#define VFP_REG_SHR_POS(x, n) ((insn) >> (n)) +#define VFP_SREG_POS(insn, bigbit, smallbit) \ + ((VFP_REG_SHR_POS(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) +#else +#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n)) +#define VFP_SREG(insn, bigbit, smallbit) \ + ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) +#endif +#define VFP_DREG(reg, insn, bigbit, smallbit) do { \ + if (dc_isar_feature(aa32_simd_r32, s)) { \ + reg = (((insn) >> (bigbit)) & 0x0f) \ + | (((insn) >> ((smallbit) - 4)) & 0x10); \ + } else { \ + if (insn & (1 << (smallbit))) \ + return 1; \ + reg = ((insn) >> (bigbit)) & 0x0f; \ + }} while (0) + +#ifdef _MSC_VER +#define VFP_SREG_D(insn) VFP_SREG_POS(insn, 12, 22) +#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) +#define VFP_SREG_N(insn) VFP_SREG_POS(insn, 16, 7) +#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) +#define VFP_SREG_M(insn) VFP_SREG_NEG(insn, 0, 5) +#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) +#else +#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22) +#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) +#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7) +#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) +#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5) +#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) +#endif + +static void gen_neon_dup_low16(TCGContext *tcg_ctx, TCGv_i32 var) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ext16u_i32(tcg_ctx, var, var); + tcg_gen_shli_i32(tcg_ctx, tmp, var, 16); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void gen_neon_dup_high16(TCGContext *tcg_ctx, TCGv_i32 var) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, var, var, 0xffff0000); + tcg_gen_shri_i32(tcg_ctx, tmp, var, 16); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static inline bool use_goto_tb(DisasContext *s, target_ulong dest) +{ + struct uc_struct *uc = s->uc; + return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || + ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +} + +static void gen_goto_ptr(TCGContext *tcg_ctx) +{ + tcg_gen_lookup_and_goto_ptr(tcg_ctx); +} + +/* This will end the TB but doesn't guarantee we'll return to + * cpu_loop_exec. Any live exit_requests will be processed as we + * enter the next TB. + */ +static void gen_goto_tb(DisasContext *s, int n, target_ulong dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (use_goto_tb(s, dest)) { + tcg_gen_goto_tb(tcg_ctx, n); + gen_set_pc_im(s, dest); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, n); + } else { + gen_set_pc_im(s, dest); + gen_goto_ptr(tcg_ctx); + } + s->base.is_jmp = DISAS_NORETURN; +} + +static inline void gen_jmp (DisasContext *s, uint32_t dest) +{ + if (unlikely(is_singlestepping(s))) { + /* An indirect jump so that we still trigger the debug exception. */ + gen_set_pc_im(s, dest); + s->base.is_jmp = DISAS_JUMP; + } else { + gen_goto_tb(s, 0, dest); + } +} + +static inline void gen_mulxy(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1, int x, int y) +{ + if (x) + tcg_gen_sari_i32(tcg_ctx, t0, t0, 16); + else + gen_sxth(t0); + if (y) + tcg_gen_sari_i32(tcg_ctx, t1, t1, 16); + else + gen_sxth(t1); + tcg_gen_mul_i32(tcg_ctx, t0, t0, t1); +} + +/* Return the mask of PSR bits set by a MSR instruction. */ +static uint32_t msr_mask(DisasContext *s, int flags, int spsr) +{ + uint32_t mask = 0; + + if (flags & (1 << 0)) { + mask |= 0xff; + } + if (flags & (1 << 1)) { + mask |= 0xff00; + } + if (flags & (1 << 2)) { + mask |= 0xff0000; + } + if (flags & (1 << 3)) { + mask |= 0xff000000; + } + + /* Mask out undefined and reserved bits. */ + mask &= aarch32_cpsr_valid_mask(s->features, s->isar); + + /* Mask out execution state. */ + if (!spsr) { + mask &= ~CPSR_EXEC; + } + + /* Mask out privileged bits. */ + if (IS_USER(s)) { + mask &= CPSR_USER; + } + return mask; +} + +/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ +static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + if (spsr) { + /* ??? This is also undefined in system mode. */ + if (IS_USER(s)) + return 1; + + tmp = load_cpu_field(tcg_ctx, spsr); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, ~mask); + tcg_gen_andi_i32(tcg_ctx, t0, t0, mask); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, t0); + store_cpu_field(tcg_ctx, tmp, spsr); + } else { + gen_set_cpsr(tcg_ctx, t0, mask); + } + tcg_temp_free_i32(tcg_ctx, t0); + gen_lookup_tb(s); + return 0; +} + +/* Returns nonzero if access to the PSR is not permitted. */ +static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + return gen_set_psr(s, mask, spsr, tmp); +} + +static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, + int *tgtmode, int *regno) +{ + /* Decode the r and sysm fields of MSR/MRS banked accesses into + * the target mode and register number, and identify the various + * unpredictable cases. + * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if: + * + executed in user mode + * + using R15 as the src/dest register + * + accessing an unimplemented register + * + accessing a register that's inaccessible at current PL/security state* + * + accessing a register that you could access with a different insn + * We choose to UNDEF in all these cases. + * Since we don't know which of the various AArch32 modes we are in + * we have to defer some checks to runtime. + * Accesses to Monitor mode registers from Secure EL1 (which implies + * that EL3 is AArch64) must trap to EL3. + * + * If the access checks fail this function will emit code to take + * an exception and return false. Otherwise it will return true, + * and set *tgtmode and *regno appropriately. + */ + int exc_target = default_exception_el(s); + + /* These instructions are present only in ARMv8, or in ARMv7 with the + * Virtualization Extensions. + */ + if (!arm_dc_feature(s, ARM_FEATURE_V8) && + !arm_dc_feature(s, ARM_FEATURE_EL2)) { + goto undef; + } + + if (IS_USER(s) || rn == 15) { + goto undef; + } + + /* The table in the v8 ARM ARM section F5.2.3 describes the encoding + * of registers into (r, sysm). + */ + if (r) { + /* SPSRs for other modes */ + switch (sysm) { + case 0xe: /* SPSR_fiq */ + *tgtmode = ARM_CPU_MODE_FIQ; + break; + case 0x10: /* SPSR_irq */ + *tgtmode = ARM_CPU_MODE_IRQ; + break; + case 0x12: /* SPSR_svc */ + *tgtmode = ARM_CPU_MODE_SVC; + break; + case 0x14: /* SPSR_abt */ + *tgtmode = ARM_CPU_MODE_ABT; + break; + case 0x16: /* SPSR_und */ + *tgtmode = ARM_CPU_MODE_UND; + break; + case 0x1c: /* SPSR_mon */ + *tgtmode = ARM_CPU_MODE_MON; + break; + case 0x1e: /* SPSR_hyp */ + *tgtmode = ARM_CPU_MODE_HYP; + break; + default: /* unallocated */ + goto undef; + } + /* We arbitrarily assign SPSR a register number of 16. */ + *regno = 16; + } else { + /* general purpose registers for other modes */ + switch (sysm) { + case 0x0: /* 0b00xxx : r8_usr ... r14_usr */ + case 0x1: /* 0b00xxx : r8_usr ... r14_usr */ + case 0x2: /* 0b00xxx : r8_usr ... r14_usr */ + case 0x3: /* 0b00xxx : r8_usr ... r14_usr */ + case 0x4: /* 0b00xxx : r8_usr ... r14_usr */ + case 0x5: /* 0b00xxx : r8_usr ... r14_usr */ + case 0x6: /* 0b00xxx : r8_usr ... r14_usr */ + *tgtmode = ARM_CPU_MODE_USR; + *regno = sysm + 8; + break; + case 0x8: /* 0b01xxx : r8_fiq ... r14_fiq */ + case 0x9: /* 0b01xxx : r8_fiq ... r14_fiq */ + case 0xa: /* 0b01xxx : r8_fiq ... r14_fiq */ + case 0xb: /* 0b01xxx : r8_fiq ... r14_fiq */ + case 0xc: /* 0b01xxx : r8_fiq ... r14_fiq */ + case 0xd: /* 0b01xxx : r8_fiq ... r14_fiq */ + case 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */ + *tgtmode = ARM_CPU_MODE_FIQ; + *regno = sysm; + break; + case 0x10: /* 0b1000x : r14_irq, r13_irq */ + case 0x11: /* 0b1000x : r14_irq, r13_irq */ + *tgtmode = ARM_CPU_MODE_IRQ; + *regno = sysm & 1 ? 13 : 14; + break; + case 0x12: /* 0b1001x : r14_svc, r13_svc */ + case 0x13: /* 0b1001x : r14_svc, r13_svc */ + *tgtmode = ARM_CPU_MODE_SVC; + *regno = sysm & 1 ? 13 : 14; + break; + case 0x14: /* 0b1010x : r14_abt, r13_abt */ + case 0x15: /* 0b1010x : r14_abt, r13_abt */ + *tgtmode = ARM_CPU_MODE_ABT; + *regno = sysm & 1 ? 13 : 14; + break; + case 0x16: /* 0b1011x : r14_und, r13_und */ + case 0x17: /* 0b1011x : r14_und, r13_und */ + *tgtmode = ARM_CPU_MODE_UND; + *regno = sysm & 1 ? 13 : 14; + break; + case 0x1c: /* 0b1110x : r14_mon, r13_mon */ + case 0x1d: /* 0b1110x : r14_mon, r13_mon */ + *tgtmode = ARM_CPU_MODE_MON; + *regno = sysm & 1 ? 13 : 14; + break; + case 0x1e: /* 0b1111x : elr_hyp, r13_hyp */ + case 0x1f: /* 0b1111x : elr_hyp, r13_hyp */ + *tgtmode = ARM_CPU_MODE_HYP; + /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */ + *regno = sysm & 1 ? 13 : 17; + break; + default: /* unallocated */ + goto undef; + } + } + + /* Catch the 'accessing inaccessible register' cases we can detect + * at translate time. + */ + switch (*tgtmode) { + case ARM_CPU_MODE_MON: + if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) { + goto undef; + } + if (s->current_el == 1) { + /* If we're in Secure EL1 (which implies that EL3 is AArch64) + * then accesses to Mon registers trap to EL3 + */ + exc_target = 3; + goto undef; + } + break; + case ARM_CPU_MODE_HYP: + /* + * SPSR_hyp and r13_hyp can only be accessed from Monitor mode + * (and so we can forbid accesses from EL2 or below). elr_hyp + * can be accessed also from Hyp mode, so forbid accesses from + * EL0 or EL1. + */ + if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 || + (s->current_el < 3 && *regno != 17)) { + goto undef; + } + break; + default: + break; + } + + return true; + +undef: + /* If we get here then some access check did not pass */ + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_uncategorized(), exc_target); + return false; +} + +static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno; + int tgtmode = 0, regno = 0; + + if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) { + return; + } + + /* Sync state because msr_banked() can raise exceptions */ + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + tcg_reg = load_reg(s, rn); + tcg_tgtmode = tcg_const_i32(tcg_ctx, tgtmode); + tcg_regno = tcg_const_i32(tcg_ctx, regno); + gen_helper_msr_banked(tcg_ctx, tcg_ctx->cpu_env, tcg_reg, tcg_tgtmode, tcg_regno); + tcg_temp_free_i32(tcg_ctx, tcg_tgtmode); + tcg_temp_free_i32(tcg_ctx, tcg_regno); + tcg_temp_free_i32(tcg_ctx, tcg_reg); + s->base.is_jmp = DISAS_UPDATE; +} + +static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno; + int tgtmode = 0, regno = 0; + + if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) { + return; + } + + /* Sync state because mrs_banked() can raise exceptions */ + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + tcg_reg = tcg_temp_new_i32(tcg_ctx); + tcg_tgtmode = tcg_const_i32(tcg_ctx, tgtmode); + tcg_regno = tcg_const_i32(tcg_ctx, regno); + gen_helper_mrs_banked(tcg_ctx, tcg_reg, tcg_ctx->cpu_env, tcg_tgtmode, tcg_regno); + tcg_temp_free_i32(tcg_ctx, tcg_tgtmode); + tcg_temp_free_i32(tcg_ctx, tcg_regno); + store_reg(s, rn, tcg_reg); + s->base.is_jmp = DISAS_UPDATE; +} + +/* Store value to PC as for an exception return (ie don't + * mask bits). The subsequent call to gen_helper_cpsr_write_eret() + * will do the masking based on the new value of the Thumb bit. + */ +static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[15], pc); + tcg_temp_free_i32(tcg_ctx, pc); +} + +/* Generate a v6 exception return. Marks both values as dead. */ +static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_pc_exc_ret(s, pc); + /* The cpsr_write_eret helper will mask the low bits of PC + * appropriately depending on the new Thumb bit, so it must + * be called after storing the new PC. + */ + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(tcg_ctx); + } + gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, cpsr); + tcg_temp_free_i32(tcg_ctx, cpsr); + /* Must exit loop to check un-masked IRQs */ + s->base.is_jmp = DISAS_EXIT; +} + +/* Generate an old-style exception return. Marks pc as dead. */ +static void gen_exception_return(DisasContext *s, TCGv_i32 pc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_rfe(s, pc, load_cpu_field(tcg_ctx, spsr)); +} + +#define CPU_V001 tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1 + +static inline void gen_neon_add(TCGContext *tcg_ctx, int size, TCGv_i32 t0, TCGv_i32 t1) +{ + switch (size) { + case 0: gen_helper_neon_add_u8(tcg_ctx, t0, t0, t1); break; + case 1: gen_helper_neon_add_u16(tcg_ctx, t0, t0, t1); break; + case 2: tcg_gen_add_i32(tcg_ctx, t0, t0, t1); break; + default: abort(); + } +} + +static inline void gen_neon_rsb(TCGContext *tcg_ctx, int size, TCGv_i32 t0, TCGv_i32 t1) +{ + switch (size) { + case 0: gen_helper_neon_sub_u8(tcg_ctx, t0, t1, t0); break; + case 1: gen_helper_neon_sub_u16(tcg_ctx, t0, t1, t0); break; + case 2: tcg_gen_sub_i32(tcg_ctx, t0, t1, t0); break; + default: return; + } +} + +/* 32-bit pairwise ops end up the same as the elementwise versions. */ +#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32 +#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32 +#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32 +#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32 + +#define GEN_NEON_INTEGER_OP_ENV(name) do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + default: return 1; \ + }} while (0) + +#define GEN_NEON_INTEGER_OP(name) do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + default: return 1; \ + }} while (0) + +static TCGv_i32 neon_load_scratch(TCGContext *tcg_ctx, int scratch) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); + return tmp; +} + +static void neon_store_scratch(TCGContext *tcg_ctx, int scratch, TCGv_i32 var) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); + tcg_temp_free_i32(tcg_ctx, var); +} + +static inline TCGv_i32 neon_get_scalar(TCGContext *tcg_ctx, int size, int reg) +{ + TCGv_i32 tmp; + if (size == 1) { + tmp = neon_load_reg(tcg_ctx, reg & 7, reg >> 4); + if (reg & 8) { + gen_neon_dup_high16(tcg_ctx, tmp); + } else { + gen_neon_dup_low16(tcg_ctx, tmp); + } + } else { + tmp = neon_load_reg(tcg_ctx, reg & 15, reg >> 4); + } + return tmp; +} + +static int gen_neon_unzip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) +{ + TCGv_ptr pd, pm; + + if (!q && size == 2) { + return 1; + } + pd = vfp_reg_ptr(tcg_ctx, true, rd); + pm = vfp_reg_ptr(tcg_ctx, true, rm); + if (q) { + switch (size) { + case 0: + gen_helper_neon_qunzip8(tcg_ctx, pd, pm); + break; + case 1: + gen_helper_neon_qunzip16(tcg_ctx, pd, pm); + break; + case 2: + gen_helper_neon_qunzip32(tcg_ctx, pd, pm); + break; + default: + abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_unzip8(tcg_ctx, pd, pm); + break; + case 1: + gen_helper_neon_unzip16(tcg_ctx, pd, pm); + break; + default: + abort(); + } + } + tcg_temp_free_ptr(tcg_ctx, pd); + tcg_temp_free_ptr(tcg_ctx, pm); + return 0; +} + +static int gen_neon_zip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) +{ + TCGv_ptr pd, pm; + + if (!q && size == 2) { + return 1; + } + pd = vfp_reg_ptr(tcg_ctx, true, rd); + pm = vfp_reg_ptr(tcg_ctx, true, rm); + if (q) { + switch (size) { + case 0: + gen_helper_neon_qzip8(tcg_ctx, pd, pm); + break; + case 1: + gen_helper_neon_qzip16(tcg_ctx, pd, pm); + break; + case 2: + gen_helper_neon_qzip32(tcg_ctx, pd, pm); + break; + default: + abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_zip8(tcg_ctx, pd, pm); + break; + case 1: + gen_helper_neon_zip16(tcg_ctx, pd, pm); + break; + default: + abort(); + } + } + tcg_temp_free_ptr(tcg_ctx, pd); + tcg_temp_free_ptr(tcg_ctx, pm); + return 0; +} + +static void gen_neon_trn_u8(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 rd, tmp; + + rd = tcg_temp_new_i32(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_shli_i32(tcg_ctx, rd, t0, 8); + tcg_gen_andi_i32(tcg_ctx, rd, rd, 0xff00ff00); + tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0x00ff00ff); + tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); + + tcg_gen_shri_i32(tcg_ctx, t1, t1, 8); + tcg_gen_andi_i32(tcg_ctx, t1, t1, 0x00ff00ff); + tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xff00ff00); + tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); + tcg_gen_mov_i32(tcg_ctx, t0, rd); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, rd); +} + +static void gen_neon_trn_u16(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 rd, tmp; + + rd = tcg_temp_new_i32(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_shli_i32(tcg_ctx, rd, t0, 16); + tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0xffff); + tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); + tcg_gen_shri_i32(tcg_ctx, t1, t1, 16); + tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xffff0000); + tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); + tcg_gen_mov_i32(tcg_ctx, t0, rd); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, rd); +} + + +static struct { + int nregs; + int interleave; + int spacing; +} const neon_ls_element_type[11] = { + {1, 4, 1}, + {1, 4, 2}, + {4, 1, 1}, + {2, 2, 2}, + {1, 3, 1}, + {1, 3, 2}, + {3, 1, 1}, + {1, 1, 1}, + {1, 2, 1}, + {1, 2, 2}, + {2, 1, 1} +}; + +/* Translate a NEON load/store element instruction. Return nonzero if the + instruction is invalid. */ +static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd, rn, rm; + int op; + int nregs; + int interleave; + int spacing; + int stride; + int size; + int reg; + int load; + int n; + int vec_size; + int mmu_idx; + MemOp endian; + TCGv_i32 addr; + TCGv_i32 tmp; + TCGv_i32 tmp2; + TCGv_i64 tmp64; + + /* FIXME: this access check should not take precedence over UNDEF + * for invalid encodings; we will generate incorrect syndrome information + * for attempts to execute invalid vfp/neon encodings with FP disabled. + */ + if (s->fp_excp_el) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); + return 0; + } + + if (!s->vfp_enabled) + return 1; + VFP_DREG_D(rd, insn); + rn = (insn >> 16) & 0xf; + rm = insn & 0xf; + load = (insn & (1 << 21)) != 0; + endian = s->be_data; + mmu_idx = get_mem_index(s); + if ((insn & (1 << 23)) == 0) { + /* Load store all elements. */ + op = (insn >> 8) & 0xf; + size = (insn >> 6) & 3; + if (op > 10) + return 1; + /* Catch UNDEF cases for bad values of align field */ + switch (op & 0xc) { + case 4: + if (((insn >> 5) & 1) == 1) { + return 1; + } + break; + case 8: + if (((insn >> 4) & 3) == 3) { + return 1; + } + break; + default: + break; + } + nregs = neon_ls_element_type[op].nregs; + interleave = neon_ls_element_type[op].interleave; + spacing = neon_ls_element_type[op].spacing; + if (size == 3 && (interleave | spacing) != 1) { + return 1; + } + /* For our purposes, bytes are always little-endian. */ + if (size == 0) { + endian = MO_LE; + } + /* Consecutive little-endian elements from a single register + * can be promoted to a larger little-endian operation. + */ + if (interleave == 1 && endian == MO_LE) { + size = 3; + } + tmp64 = tcg_temp_new_i64(tcg_ctx); + addr = tcg_temp_new_i32(tcg_ctx); + tmp2 = tcg_const_i32(tcg_ctx, 1 << size); + load_reg_var(s, addr, rn); + for (reg = 0; reg < nregs; reg++) { + for (n = 0; n < 8 >> size; n++) { + int xs; + for (xs = 0; xs < interleave; xs++) { + int tt = rd + reg + spacing * xs; + + if (load) { + gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size); + neon_store_element64(tcg_ctx, tt, n, size, tmp64); + } else { + neon_load_element64(tcg_ctx, tmp64, tt, n, size); + gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size); + } + tcg_gen_add_i32(tcg_ctx, addr, addr, tmp2); + } + } + } + tcg_temp_free_i32(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i64(tcg_ctx, tmp64); + stride = nregs * interleave * 8; + } else { + size = (insn >> 10) & 3; + if (size == 3) { + /* Load single element to all lanes. */ + int a = (insn >> 4) & 1; + if (!load) { + return 1; + } + size = (insn >> 6) & 3; + nregs = ((insn >> 8) & 3) + 1; + + if (size == 3) { + if (nregs != 4 || a == 0) { + return 1; + } + /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */ + size = 2; + } + if (nregs == 1 && a == 1 && size == 0) { + return 1; + } + if (nregs == 3 && a == 1) { + return 1; + } + addr = tcg_temp_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + + /* VLD1 to all lanes: bit 5 indicates how many Dregs to write. + * VLD2/3/4 to all lanes: bit 5 indicates register stride. + */ + stride = (insn & (1 << 5)) ? 2 : 1; + vec_size = nregs == 1 ? stride * 8 : 8; + + tmp = tcg_temp_new_i32(tcg_ctx); + for (reg = 0; reg < nregs; reg++) { + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), + s->be_data | size); + if ((rd & 1) && vec_size == 16) { + /* We cannot write 16 bytes at once because the + * destination is unaligned. + */ + tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(rd, 0), + 8, 8, tmp); + tcg_gen_gvec_mov(tcg_ctx, 0, neon_reg_offset(rd + 1, 0), + neon_reg_offset(rd, 0), 8, 8); + } else { + tcg_gen_gvec_dup_i32(tcg_ctx, size, neon_reg_offset(rd, 0), + vec_size, vec_size, tmp); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); + rd += stride; + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + stride = (1 << size) * nregs; + } else { + /* Single element. */ + int idx = (insn >> 4) & 0xf; + int reg_idx; + switch (size) { + case 0: + reg_idx = (insn >> 5) & 7; + stride = 1; + break; + case 1: + reg_idx = (insn >> 6) & 3; + stride = (insn & (1 << 5)) ? 2 : 1; + break; + case 2: + reg_idx = (insn >> 7) & 1; + stride = (insn & (1 << 6)) ? 2 : 1; + break; + default: + abort(); + } + nregs = ((insn >> 8) & 3) + 1; + /* Catch the UNDEF cases. This is unavoidably a bit messy. */ + switch (nregs) { + case 1: + if (((idx & (1 << size)) != 0) || + (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { + return 1; + } + break; + case 3: + if ((idx & 1) != 0) { + return 1; + } + /* fall through */ + case 2: + if (size == 2 && (idx & 2) != 0) { + return 1; + } + break; + case 4: + if ((size == 2) && ((idx & 3) == 3)) { + return 1; + } + break; + default: + abort(); + } + if ((rd + stride * (nregs - 1)) > 31) { + /* Attempts to write off the end of the register file + * are UNPREDICTABLE; we choose to UNDEF because otherwise + * the neon_load_reg() would write off the end of the array. + */ + return 1; + } + tmp = tcg_temp_new_i32(tcg_ctx); + addr = tcg_temp_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + for (reg = 0; reg < nregs; reg++) { + if (load) { + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), + s->be_data | size); + neon_store_element(tcg_ctx, rd, reg_idx, size, tmp); + } else { /* Store */ + neon_load_element(tcg_ctx, tmp, rd, reg_idx, size); + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), + s->be_data | size); + } + rd += stride; + tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); + } + tcg_temp_free_i32(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, tmp); + stride = nregs * (1 << size); + } + } + if (rm != 15) { + TCGv_i32 base; + + base = load_reg(s, rn); + if (rm == 13) { + tcg_gen_addi_i32(tcg_ctx, base, base, stride); + } else { + TCGv_i32 index; + index = load_reg(s, rm); + tcg_gen_add_i32(tcg_ctx, base, base, index); + tcg_temp_free_i32(tcg_ctx, index); + } + store_reg(s, rn, base); + } + return 0; +} + +static inline void gen_neon_narrow(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_narrow_u8(tcg_ctx, dest, src); break; + case 1: gen_helper_neon_narrow_u16(tcg_ctx, dest, src); break; + case 2: tcg_gen_extrl_i64_i32(tcg_ctx, dest, src); break; + default: abort(); + } +} + +static inline void gen_neon_narrow_sats(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_narrow_sat_s8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 1: gen_helper_neon_narrow_sat_s16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 2: gen_helper_neon_narrow_sat_s32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_narrow_satu(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_narrow_sat_u8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 1: gen_helper_neon_narrow_sat_u16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 2: gen_helper_neon_narrow_sat_u32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_unarrow_sats(TCGContext *tcg_ctx, int size, TCGv_i32 dest, TCGv_i64 src) +{ + switch (size) { + case 0: gen_helper_neon_unarrow_sat8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 1: gen_helper_neon_unarrow_sat16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 2: gen_helper_neon_unarrow_sat32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_shift_narrow(TCGContext *tcg_ctx, int size, TCGv_i32 var, TCGv_i32 shift, + int q, int u) +{ + if (q) { + if (u) { + switch (size) { + case 1: gen_helper_neon_rshl_u16(tcg_ctx, var, var, shift); break; + case 2: gen_helper_neon_rshl_u32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } else { + switch (size) { + case 1: gen_helper_neon_rshl_s16(tcg_ctx, var, var, shift); break; + case 2: gen_helper_neon_rshl_s32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } + } else { + if (u) { + switch (size) { + case 1: gen_helper_neon_shl_u16(tcg_ctx, var, var, shift); break; + case 2: gen_ushl_i32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } else { + switch (size) { + case 1: gen_helper_neon_shl_s16(tcg_ctx, var, var, shift); break; + case 2: gen_sshl_i32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } + } +} + +static inline void gen_neon_widen(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 src, int size, int u) +{ + if (u) { + switch (size) { + case 0: gen_helper_neon_widen_u8(tcg_ctx, dest, src); break; + case 1: gen_helper_neon_widen_u16(tcg_ctx, dest, src); break; + case 2: tcg_gen_extu_i32_i64(tcg_ctx, dest, src); break; + default: abort(); + } + } else { + switch (size) { + case 0: gen_helper_neon_widen_s8(tcg_ctx, dest, src); break; + case 1: gen_helper_neon_widen_s16(tcg_ctx, dest, src); break; + case 2: tcg_gen_ext_i32_i64(tcg_ctx, dest, src); break; + default: abort(); + } + } + tcg_temp_free_i32(tcg_ctx, src); +} + +static inline void gen_neon_addl(TCGContext *tcg_ctx, int size) +{ + switch (size) { + case 0: gen_helper_neon_addl_u16(tcg_ctx, CPU_V001); break; + case 1: gen_helper_neon_addl_u32(tcg_ctx, CPU_V001); break; + case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; + default: abort(); + } +} + +static inline void gen_neon_subl(TCGContext *tcg_ctx, int size) +{ + switch (size) { + case 0: gen_helper_neon_subl_u16(tcg_ctx, CPU_V001); break; + case 1: gen_helper_neon_subl_u32(tcg_ctx, CPU_V001); break; + case 2: tcg_gen_sub_i64(tcg_ctx, CPU_V001); break; + default: abort(); + } +} + +static inline void gen_neon_negl(TCGContext *tcg_ctx, TCGv_i64 var, int size) +{ + switch (size) { + case 0: gen_helper_neon_negl_u16(tcg_ctx, var, var); break; + case 1: gen_helper_neon_negl_u32(tcg_ctx, var, var); break; + case 2: + tcg_gen_neg_i64(tcg_ctx, var, var); + break; + default: abort(); + } +} + +static inline void gen_neon_addl_saturate(TCGContext *tcg_ctx, TCGv_i64 op0, TCGv_i64 op1, int size) +{ + switch (size) { + case 1: gen_helper_neon_addl_saturate_s32(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; + case 2: gen_helper_neon_addl_saturate_s64(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; + default: abort(); + } +} + +static inline void gen_neon_mull(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b, + int size, int u) +{ + TCGv_i64 tmp; + + switch ((size << 1) | u) { + case 0: gen_helper_neon_mull_s8(tcg_ctx, dest, a, b); break; + case 1: gen_helper_neon_mull_u8(tcg_ctx, dest, a, b); break; + case 2: gen_helper_neon_mull_s16(tcg_ctx, dest, a, b); break; + case 3: gen_helper_neon_mull_u16(tcg_ctx, dest, a, b); break; + case 4: + tmp = gen_muls_i64_i32(tcg_ctx, a, b); + tcg_gen_mov_i64(tcg_ctx, dest, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + case 5: + tmp = gen_mulu_i64_i32(tcg_ctx, a, b); + tcg_gen_mov_i64(tcg_ctx, dest, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + default: abort(); + } + + /* gen_helper_neon_mull_[su]{8|16} do not free their parameters. + Don't forget to clean them now. */ + if (size < 2) { + tcg_temp_free_i32(tcg_ctx, a); + tcg_temp_free_i32(tcg_ctx, b); + } +} + +static void gen_neon_narrow_op(TCGContext *tcg_ctx, int op, int u, int size, + TCGv_i32 dest, TCGv_i64 src) +{ + if (op) { + if (u) { + gen_neon_unarrow_sats(tcg_ctx, size, dest, src); + } else { + gen_neon_narrow(tcg_ctx, size, dest, src); + } + } else { + if (u) { + gen_neon_narrow_satu(tcg_ctx, size, dest, src); + } else { + gen_neon_narrow_sats(tcg_ctx, size, dest, src); + } + } +} + +/* Symbolic constants for op fields for Neon 3-register same-length. + * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B + * table A7-9. + */ +#define NEON_3R_VHADD 0 +#define NEON_3R_VQADD 1 +#define NEON_3R_VRHADD 2 +#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */ +#define NEON_3R_VHSUB 4 +#define NEON_3R_VQSUB 5 +#define NEON_3R_VCGT 6 +#define NEON_3R_VCGE 7 +#define NEON_3R_VSHL 8 +#define NEON_3R_VQSHL 9 +#define NEON_3R_VRSHL 10 +#define NEON_3R_VQRSHL 11 +#define NEON_3R_VMAX 12 +#define NEON_3R_VMIN 13 +#define NEON_3R_VABD 14 +#define NEON_3R_VABA 15 +#define NEON_3R_VADD_VSUB 16 +#define NEON_3R_VTST_VCEQ 17 +#define NEON_3R_VML 18 /* VMLA, VMLS */ +#define NEON_3R_VMUL 19 +#define NEON_3R_VPMAX 20 +#define NEON_3R_VPMIN 21 +#define NEON_3R_VQDMULH_VQRDMULH 22 +#define NEON_3R_VPADD_VQRDMLAH 23 +#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */ +#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */ +#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ +#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */ +#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ +#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */ +#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */ +#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */ + +static const uint8_t neon_3r_sizes[] = { + [NEON_3R_VHADD] = 0x7, + [NEON_3R_VQADD] = 0xf, + [NEON_3R_VRHADD] = 0x7, + [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */ + [NEON_3R_VHSUB] = 0x7, + [NEON_3R_VQSUB] = 0xf, + [NEON_3R_VCGT] = 0x7, + [NEON_3R_VCGE] = 0x7, + [NEON_3R_VSHL] = 0xf, + [NEON_3R_VQSHL] = 0xf, + [NEON_3R_VRSHL] = 0xf, + [NEON_3R_VQRSHL] = 0xf, + [NEON_3R_VMAX] = 0x7, + [NEON_3R_VMIN] = 0x7, + [NEON_3R_VABD] = 0x7, + [NEON_3R_VABA] = 0x7, + [NEON_3R_VADD_VSUB] = 0xf, + [NEON_3R_VTST_VCEQ] = 0x7, + [NEON_3R_VML] = 0x7, + [NEON_3R_VMUL] = 0x7, + [NEON_3R_VPMAX] = 0x7, + [NEON_3R_VPMIN] = 0x7, + [NEON_3R_VQDMULH_VQRDMULH] = 0x6, + [NEON_3R_VPADD_VQRDMLAH] = 0x7, + [NEON_3R_SHA] = 0xf, /* size field encodes op type */ + [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */ + [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */ + [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */ +}; + +/* Symbolic constants for op fields for Neon 2-register miscellaneous. + * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B + * table A7-13. + */ +#define NEON_2RM_VREV64 0 +#define NEON_2RM_VREV32 1 +#define NEON_2RM_VREV16 2 +#define NEON_2RM_VPADDL 4 +#define NEON_2RM_VPADDL_U 5 +#define NEON_2RM_AESE 6 /* Includes AESD */ +#define NEON_2RM_AESMC 7 /* Includes AESIMC */ +#define NEON_2RM_VCLS 8 +#define NEON_2RM_VCLZ 9 +#define NEON_2RM_VCNT 10 +#define NEON_2RM_VMVN 11 +#define NEON_2RM_VPADAL 12 +#define NEON_2RM_VPADAL_U 13 +#define NEON_2RM_VQABS 14 +#define NEON_2RM_VQNEG 15 +#define NEON_2RM_VCGT0 16 +#define NEON_2RM_VCGE0 17 +#define NEON_2RM_VCEQ0 18 +#define NEON_2RM_VCLE0 19 +#define NEON_2RM_VCLT0 20 +#define NEON_2RM_SHA1H 21 +#define NEON_2RM_VABS 22 +#define NEON_2RM_VNEG 23 +#define NEON_2RM_VCGT0_F 24 +#define NEON_2RM_VCGE0_F 25 +#define NEON_2RM_VCEQ0_F 26 +#define NEON_2RM_VCLE0_F 27 +#define NEON_2RM_VCLT0_F 28 +#define NEON_2RM_VABS_F 30 +#define NEON_2RM_VNEG_F 31 +#define NEON_2RM_VSWP 32 +#define NEON_2RM_VTRN 33 +#define NEON_2RM_VUZP 34 +#define NEON_2RM_VZIP 35 +#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */ +#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */ +#define NEON_2RM_VSHLL 38 +#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */ +#define NEON_2RM_VRINTN 40 +#define NEON_2RM_VRINTX 41 +#define NEON_2RM_VRINTA 42 +#define NEON_2RM_VRINTZ 43 +#define NEON_2RM_VCVT_F16_F32 44 +#define NEON_2RM_VRINTM 45 +#define NEON_2RM_VCVT_F32_F16 46 +#define NEON_2RM_VRINTP 47 +#define NEON_2RM_VCVTAU 48 +#define NEON_2RM_VCVTAS 49 +#define NEON_2RM_VCVTNU 50 +#define NEON_2RM_VCVTNS 51 +#define NEON_2RM_VCVTPU 52 +#define NEON_2RM_VCVTPS 53 +#define NEON_2RM_VCVTMU 54 +#define NEON_2RM_VCVTMS 55 +#define NEON_2RM_VRECPE 56 +#define NEON_2RM_VRSQRTE 57 +#define NEON_2RM_VRECPE_F 58 +#define NEON_2RM_VRSQRTE_F 59 +#define NEON_2RM_VCVT_FS 60 +#define NEON_2RM_VCVT_FU 61 +#define NEON_2RM_VCVT_SF 62 +#define NEON_2RM_VCVT_UF 63 + +static bool neon_2rm_is_v8_op(int op) +{ + /* Return true if this neon 2reg-misc op is ARMv8 and up */ + switch (op) { + case NEON_2RM_VRINTN: + case NEON_2RM_VRINTA: + case NEON_2RM_VRINTM: + case NEON_2RM_VRINTP: + case NEON_2RM_VRINTZ: + case NEON_2RM_VRINTX: + case NEON_2RM_VCVTAU: + case NEON_2RM_VCVTAS: + case NEON_2RM_VCVTNU: + case NEON_2RM_VCVTNS: + case NEON_2RM_VCVTPU: + case NEON_2RM_VCVTPS: + case NEON_2RM_VCVTMU: + case NEON_2RM_VCVTMS: + return true; + default: + return false; + } +} + +/* Each entry in this array has bit n set if the insn allows + * size value n (otherwise it will UNDEF). Since unallocated + * op values will have no bits set they always UNDEF. + */ +static const uint8_t neon_2rm_sizes[] = { + [NEON_2RM_VREV64] = 0x7, + [NEON_2RM_VREV32] = 0x3, + [NEON_2RM_VREV16] = 0x1, + [NEON_2RM_VPADDL] = 0x7, + [NEON_2RM_VPADDL_U] = 0x7, + [NEON_2RM_AESE] = 0x1, + [NEON_2RM_AESMC] = 0x1, + [NEON_2RM_VCLS] = 0x7, + [NEON_2RM_VCLZ] = 0x7, + [NEON_2RM_VCNT] = 0x1, + [NEON_2RM_VMVN] = 0x1, + [NEON_2RM_VPADAL] = 0x7, + [NEON_2RM_VPADAL_U] = 0x7, + [NEON_2RM_VQABS] = 0x7, + [NEON_2RM_VQNEG] = 0x7, + [NEON_2RM_VCGT0] = 0x7, + [NEON_2RM_VCGE0] = 0x7, + [NEON_2RM_VCEQ0] = 0x7, + [NEON_2RM_VCLE0] = 0x7, + [NEON_2RM_VCLT0] = 0x7, + [NEON_2RM_SHA1H] = 0x4, + [NEON_2RM_VABS] = 0x7, + [NEON_2RM_VNEG] = 0x7, + [NEON_2RM_VCGT0_F] = 0x4, + [NEON_2RM_VCGE0_F] = 0x4, + [NEON_2RM_VCEQ0_F] = 0x4, + [NEON_2RM_VCLE0_F] = 0x4, + [NEON_2RM_VCLT0_F] = 0x4, + [NEON_2RM_VABS_F] = 0x4, + [NEON_2RM_VNEG_F] = 0x4, + [NEON_2RM_VSWP] = 0x1, + [NEON_2RM_VTRN] = 0x7, + [NEON_2RM_VUZP] = 0x7, + [NEON_2RM_VZIP] = 0x7, + [NEON_2RM_VMOVN] = 0x7, + [NEON_2RM_VQMOVN] = 0x7, + [NEON_2RM_VSHLL] = 0x7, + [NEON_2RM_SHA1SU1] = 0x4, + [NEON_2RM_VRINTN] = 0x4, + [NEON_2RM_VRINTX] = 0x4, + [NEON_2RM_VRINTA] = 0x4, + [NEON_2RM_VRINTZ] = 0x4, + [NEON_2RM_VCVT_F16_F32] = 0x2, + [NEON_2RM_VRINTM] = 0x4, + [NEON_2RM_VCVT_F32_F16] = 0x2, + [NEON_2RM_VRINTP] = 0x4, + [NEON_2RM_VCVTAU] = 0x4, + [NEON_2RM_VCVTAS] = 0x4, + [NEON_2RM_VCVTNU] = 0x4, + [NEON_2RM_VCVTNS] = 0x4, + [NEON_2RM_VCVTPU] = 0x4, + [NEON_2RM_VCVTPS] = 0x4, + [NEON_2RM_VCVTMU] = 0x4, + [NEON_2RM_VCVTMS] = 0x4, + [NEON_2RM_VRECPE] = 0x4, + [NEON_2RM_VRSQRTE] = 0x4, + [NEON_2RM_VRECPE_F] = 0x4, + [NEON_2RM_VRSQRTE_F] = 0x4, + [NEON_2RM_VCVT_FS] = 0x4, + [NEON_2RM_VCVT_FU] = 0x4, + [NEON_2RM_VCVT_SF] = 0x4, + [NEON_2RM_VCVT_UF] = 0x4, +}; + + +/* Expand v8.1 simd helper. */ +static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, + int q, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dc_isar_feature(aa32_rdm, s)) { + int opr_sz = (1 + q) * 8; + tcg_gen_gvec_3_ptr(tcg_ctx, vfp_reg_offset(1, rd), + vfp_reg_offset(1, rn), + vfp_reg_offset(1, rm), tcg_ctx->cpu_env, + opr_sz, opr_sz, 0, fn); + return 0; + } + return 1; +} + +static void gen_ssra8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_sar8i_i64(tcg_ctx, a, a, shift); + tcg_gen_vec_add8_i64(tcg_ctx, d, d, a); +} + +static void gen_ssra16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_sar16i_i64(tcg_ctx, a, a, shift); + tcg_gen_vec_add16_i64(tcg_ctx, d, d, a); +} + +static void gen_ssra32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_sari_i32(tcg_ctx, a, a, shift); + tcg_gen_add_i32(tcg_ctx, d, d, a); +} + +static void gen_ssra64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_sari_i64(tcg_ctx, a, a, shift); + tcg_gen_add_i64(tcg_ctx, d, d, a); +} + +static void gen_ssra_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + tcg_gen_sari_vec(tcg_ctx, vece, a, a, sh); + tcg_gen_add_vec(tcg_ctx, vece, d, d, a); +} + +static const TCGOpcode vecop_list_ssra[] = { + INDEX_op_sari_vec, INDEX_op_add_vec, 0 +}; + +const GVecGen2i ssra_op[4] = { + { .fni8 = gen_ssra8_i64, + .fniv = gen_ssra_vec, + .load_dest = true, + .opt_opc = vecop_list_ssra, + .vece = MO_8 }, + { .fni8 = gen_ssra16_i64, + .fniv = gen_ssra_vec, + .load_dest = true, + .opt_opc = vecop_list_ssra, + .vece = MO_16 }, + { .fni4 = gen_ssra32_i32, + .fniv = gen_ssra_vec, + .load_dest = true, + .opt_opc = vecop_list_ssra, + .vece = MO_32 }, + { .fni8 = gen_ssra64_i64, + .fniv = gen_ssra_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .opt_opc = vecop_list_ssra, + .load_dest = true, + .vece = MO_64 }, +}; + +static void gen_usra8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_shr8i_i64(tcg_ctx, a, a, shift); + tcg_gen_vec_add8_i64(tcg_ctx, d, d, a); +} + +static void gen_usra16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_shr16i_i64(tcg_ctx, a, a, shift); + tcg_gen_vec_add16_i64(tcg_ctx, d, d, a); +} + +static void gen_usra32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_shri_i32(tcg_ctx, a, a, shift); + tcg_gen_add_i32(tcg_ctx, d, d, a); +} + +static void gen_usra64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_shri_i64(tcg_ctx, a, a, shift); + tcg_gen_add_i64(tcg_ctx, d, d, a); +} + +static void gen_usra_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + tcg_gen_shri_vec(tcg_ctx, vece, a, a, sh); + tcg_gen_add_vec(tcg_ctx, vece, d, d, a); +} + +static const TCGOpcode vecop_list_usra[] = { + INDEX_op_shri_vec, INDEX_op_add_vec, 0 +}; + +const GVecGen2i usra_op[4] = { + { .fni8 = gen_usra8_i64, + .fniv = gen_usra_vec, + .load_dest = true, + .opt_opc = vecop_list_usra, + .vece = MO_8, }, + { .fni8 = gen_usra16_i64, + .fniv = gen_usra_vec, + .load_dest = true, + .opt_opc = vecop_list_usra, + .vece = MO_16, }, + { .fni4 = gen_usra32_i32, + .fniv = gen_usra_vec, + .load_dest = true, + .opt_opc = vecop_list_usra, + .vece = MO_32, }, + { .fni8 = gen_usra64_i64, + .fniv = gen_usra_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opt_opc = vecop_list_usra, + .vece = MO_64, }, +}; + +static void gen_shr8_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_8, 0xff >> shift); + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shri_i64(tcg_ctx, t, a, shift); + tcg_gen_andi_i64(tcg_ctx, t, t, mask); + tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); + tcg_gen_or_i64(tcg_ctx, d, d, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +static void gen_shr16_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_16, 0xffff >> shift); + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shri_i64(tcg_ctx, t, a, shift); + tcg_gen_andi_i64(tcg_ctx, t, t, mask); + tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); + tcg_gen_or_i64(tcg_ctx, d, d, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +static void gen_shr32_ins_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_shri_i32(tcg_ctx, a, a, shift); + tcg_gen_deposit_i32(tcg_ctx, d, d, a, 0, 32 - shift); +} + +static void gen_shr64_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_shri_i64(tcg_ctx, a, a, shift); + tcg_gen_deposit_i64(tcg_ctx, d, d, a, 0, 64 - shift); +} + +static void gen_shr_ins_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + if (sh == 0) { + tcg_gen_mov_vec(tcg_ctx, d, a); + } else { + TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); + TCGv_vec m = tcg_temp_new_vec_matching(tcg_ctx, d); + + tcg_gen_dupi_vec(tcg_ctx, vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh)); + tcg_gen_shri_vec(tcg_ctx, vece, t, a, sh); + tcg_gen_and_vec(tcg_ctx, vece, d, d, m); + tcg_gen_or_vec(tcg_ctx, vece, d, d, t); + + tcg_temp_free_vec(tcg_ctx, t); + tcg_temp_free_vec(tcg_ctx, m); + } +} + +static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 }; + +const GVecGen2i sri_op[4] = { + { .fni8 = gen_shr8_ins_i64, + .fniv = gen_shr_ins_vec, + .load_dest = true, + .opt_opc = vecop_list_sri, + .vece = MO_8 }, + { .fni8 = gen_shr16_ins_i64, + .fniv = gen_shr_ins_vec, + .load_dest = true, + .opt_opc = vecop_list_sri, + .vece = MO_16 }, + { .fni4 = gen_shr32_ins_i32, + .fniv = gen_shr_ins_vec, + .load_dest = true, + .opt_opc = vecop_list_sri, + .vece = MO_32 }, + { .fni8 = gen_shr64_ins_i64, + .fniv = gen_shr_ins_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opt_opc = vecop_list_sri, + .vece = MO_64 }, +}; + +static void gen_shl8_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_8, 0xff << shift); + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shli_i64(tcg_ctx, t, a, shift); + tcg_gen_andi_i64(tcg_ctx, t, t, mask); + tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); + tcg_gen_or_i64(tcg_ctx, d, d, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +static void gen_shl16_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_16, 0xffff << shift); + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shli_i64(tcg_ctx, t, a, shift); + tcg_gen_andi_i64(tcg_ctx, t, t, mask); + tcg_gen_andi_i64(tcg_ctx, d, d, ~mask); + tcg_gen_or_i64(tcg_ctx, d, d, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +static void gen_shl32_ins_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_deposit_i32(tcg_ctx, d, d, a, shift, 32 - shift); +} + +static void gen_shl64_ins_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_deposit_i64(tcg_ctx, d, d, a, shift, 64 - shift); +} + +static void gen_shl_ins_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + if (sh == 0) { + tcg_gen_mov_vec(tcg_ctx, d, a); + } else { + TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); + TCGv_vec m = tcg_temp_new_vec_matching(tcg_ctx, d); + + tcg_gen_dupi_vec(tcg_ctx, vece, m, MAKE_64BIT_MASK(0, sh)); + tcg_gen_shli_vec(tcg_ctx, vece, t, a, sh); + tcg_gen_and_vec(tcg_ctx, vece, d, d, m); + tcg_gen_or_vec(tcg_ctx, vece, d, d, t); + + tcg_temp_free_vec(tcg_ctx, t); + tcg_temp_free_vec(tcg_ctx, m); + } +} + +static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 }; + +const GVecGen2i sli_op[4] = { + { .fni8 = gen_shl8_ins_i64, + .fniv = gen_shl_ins_vec, + .load_dest = true, + .opt_opc = vecop_list_sli, + .vece = MO_8 }, + { .fni8 = gen_shl16_ins_i64, + .fniv = gen_shl_ins_vec, + .load_dest = true, + .opt_opc = vecop_list_sli, + .vece = MO_16 }, + { .fni4 = gen_shl32_ins_i32, + .fniv = gen_shl_ins_vec, + .load_dest = true, + .opt_opc = vecop_list_sli, + .vece = MO_32 }, + { .fni8 = gen_shl64_ins_i64, + .fniv = gen_shl_ins_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opt_opc = vecop_list_sli, + .vece = MO_64 }, +}; + +static void gen_mla8_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u8(tcg_ctx, a, a, b); + gen_helper_neon_add_u8(tcg_ctx, d, d, a); +} + +static void gen_mls8_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u8(tcg_ctx, a, a, b); + gen_helper_neon_sub_u8(tcg_ctx, d, d, a); +} + +static void gen_mla16_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u16(tcg_ctx, a, a, b); + gen_helper_neon_add_u16(tcg_ctx, d, d, a); +} + +static void gen_mls16_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u16(tcg_ctx, a, a, b); + gen_helper_neon_sub_u16(tcg_ctx, d, d, a); +} + +static void gen_mla32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_mul_i32(tcg_ctx, a, a, b); + tcg_gen_add_i32(tcg_ctx, d, d, a); +} + +static void gen_mls32_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_mul_i32(tcg_ctx, a, a, b); + tcg_gen_sub_i32(tcg_ctx, d, d, a); +} + +static void gen_mla64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_mul_i64(tcg_ctx, a, a, b); + tcg_gen_add_i64(tcg_ctx, d, d, a); +} + +static void gen_mls64_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_mul_i64(tcg_ctx, a, a, b); + tcg_gen_sub_i64(tcg_ctx, d, d, a); +} + +static void gen_mla_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_mul_vec(tcg_ctx, vece, a, a, b); + tcg_gen_add_vec(tcg_ctx, vece, d, d, a); +} + +static void gen_mls_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_mul_vec(tcg_ctx, vece, a, a, b); + tcg_gen_sub_vec(tcg_ctx, vece, d, d, a); +} + +/* Note that while NEON does not support VMLA and VMLS as 64-bit ops, + * these tables are shared with AArch64 which does support them. + */ + +static const TCGOpcode vecop_list_mla[] = { + INDEX_op_mul_vec, INDEX_op_add_vec, 0 +}; + +static const TCGOpcode vecop_list_mls[] = { + INDEX_op_mul_vec, INDEX_op_sub_vec, 0 +}; + +const GVecGen3 mla_op[4] = { + { .fni4 = gen_mla8_i32, + .fniv = gen_mla_vec, + .load_dest = true, + .opt_opc = vecop_list_mla, + .vece = MO_8 }, + { .fni4 = gen_mla16_i32, + .fniv = gen_mla_vec, + .load_dest = true, + .opt_opc = vecop_list_mla, + .vece = MO_16 }, + { .fni4 = gen_mla32_i32, + .fniv = gen_mla_vec, + .load_dest = true, + .opt_opc = vecop_list_mla, + .vece = MO_32 }, + { .fni8 = gen_mla64_i64, + .fniv = gen_mla_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opt_opc = vecop_list_mla, + .vece = MO_64 }, +}; + +const GVecGen3 mls_op[4] = { + { .fni4 = gen_mls8_i32, + .fniv = gen_mls_vec, + .load_dest = true, + .opt_opc = vecop_list_mls, + .vece = MO_8 }, + { .fni4 = gen_mls16_i32, + .fniv = gen_mls_vec, + .load_dest = true, + .opt_opc = vecop_list_mls, + .vece = MO_16 }, + { .fni4 = gen_mls32_i32, + .fniv = gen_mls_vec, + .load_dest = true, + .opt_opc = vecop_list_mls, + .vece = MO_32 }, + { .fni8 = gen_mls64_i64, + .fniv = gen_mls_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opt_opc = vecop_list_mls, + .vece = MO_64 }, +}; + +/* CMTST : test is "if (X & Y != 0)". */ +static void gen_cmtst_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_and_i32(tcg_ctx, d, a, b); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_NE, d, d, 0); + tcg_gen_neg_i32(tcg_ctx, d, d); +} + +void gen_cmtst_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_and_i64(tcg_ctx, d, a, b); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, d, d, 0); + tcg_gen_neg_i64(tcg_ctx, d, d); +} + +static void gen_cmtst_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_and_vec(tcg_ctx, vece, d, a, b); + tcg_gen_dupi_vec(tcg_ctx, vece, a, 0); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, d, d, a); +} + +static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 }; + +const GVecGen3 cmtst_op[4] = { + { .fni4 = gen_helper_neon_tst_u8, + .fniv = gen_cmtst_vec, + .opt_opc = vecop_list_cmtst, + .vece = MO_8 }, + { .fni4 = gen_helper_neon_tst_u16, + .fniv = gen_cmtst_vec, + .opt_opc = vecop_list_cmtst, + .vece = MO_16 }, + { .fni4 = gen_cmtst_i32, + .fniv = gen_cmtst_vec, + .opt_opc = vecop_list_cmtst, + .vece = MO_32 }, + { .fni8 = gen_cmtst_i64, + .fniv = gen_cmtst_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .opt_opc = vecop_list_cmtst, + .vece = MO_64 }, +}; + +void gen_ushl_i32(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) +{ + TCGv_i32 lval = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 rval = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 lsh = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 rsh = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); + TCGv_i32 max = tcg_const_i32(tcg_ctx, 32); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i32(tcg_ctx, lsh, shift); + tcg_gen_neg_i32(tcg_ctx, rsh, lsh); + tcg_gen_shl_i32(tcg_ctx, lval, src, lsh); + tcg_gen_shr_i32(tcg_ctx, rval, src, rsh); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, dst, lsh, max, lval, zero); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, dst, rsh, max, rval, dst); + + tcg_temp_free_i32(tcg_ctx, lval); + tcg_temp_free_i32(tcg_ctx, rval); + tcg_temp_free_i32(tcg_ctx, lsh); + tcg_temp_free_i32(tcg_ctx, rsh); + tcg_temp_free_i32(tcg_ctx, zero); + tcg_temp_free_i32(tcg_ctx, max); +} + +void gen_ushl_i64(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) +{ + TCGv_i64 lval = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 rval = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 lsh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 rsh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + TCGv_i64 max = tcg_const_i64(tcg_ctx, 64); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i64(tcg_ctx, lsh, shift); + tcg_gen_neg_i64(tcg_ctx, rsh, lsh); + tcg_gen_shl_i64(tcg_ctx, lval, src, lsh); + tcg_gen_shr_i64(tcg_ctx, rval, src, rsh); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, dst, lsh, max, lval, zero); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, dst, rsh, max, rval, dst); + + tcg_temp_free_i64(tcg_ctx, lval); + tcg_temp_free_i64(tcg_ctx, rval); + tcg_temp_free_i64(tcg_ctx, lsh); + tcg_temp_free_i64(tcg_ctx, rsh); + tcg_temp_free_i64(tcg_ctx, zero); + tcg_temp_free_i64(tcg_ctx, max); +} + +static void gen_ushl_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec dst, + TCGv_vec src, TCGv_vec shift) +{ + TCGv_vec lval = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec rval = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec lsh = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec rsh = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec msk, max; + + tcg_gen_neg_vec(tcg_ctx, vece, rsh, shift); + if (vece == MO_8) { + tcg_gen_mov_vec(tcg_ctx, lsh, shift); + } else { + msk = tcg_temp_new_vec_matching(tcg_ctx, dst); + tcg_gen_dupi_vec(tcg_ctx, vece, msk, 0xff); + tcg_gen_and_vec(tcg_ctx, vece, lsh, shift, msk); + tcg_gen_and_vec(tcg_ctx, vece, rsh, rsh, msk); + tcg_temp_free_vec(tcg_ctx, msk); + } + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_shlv_vec(tcg_ctx, vece, lval, src, lsh); + tcg_gen_shrv_vec(tcg_ctx, vece, rval, src, rsh); + + max = tcg_temp_new_vec_matching(tcg_ctx, dst); + tcg_gen_dupi_vec(tcg_ctx, vece, max, 8 << vece); + + /* + * The choice of LT (signed) and GEU (unsigned) are biased toward + * the instructions of the x86_64 host. For MO_8, the whole byte + * is significant so we must use an unsigned compare; otherwise we + * have already masked to a byte and so a signed compare works. + * Other tcg hosts have a full set of comparisons and do not care. + */ + if (vece == MO_8) { + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GEU, vece, lsh, lsh, max); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GEU, vece, rsh, rsh, max); + tcg_gen_andc_vec(tcg_ctx, vece, lval, lval, lsh); + tcg_gen_andc_vec(tcg_ctx, vece, rval, rval, rsh); + } else { + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_LT, vece, lsh, lsh, max); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_LT, vece, rsh, rsh, max); + tcg_gen_and_vec(tcg_ctx, vece, lval, lval, lsh); + tcg_gen_and_vec(tcg_ctx, vece, rval, rval, rsh); + } + tcg_gen_or_vec(tcg_ctx, vece, dst, lval, rval); + + tcg_temp_free_vec(tcg_ctx, max); + tcg_temp_free_vec(tcg_ctx, lval); + tcg_temp_free_vec(tcg_ctx, rval); + tcg_temp_free_vec(tcg_ctx, lsh); + tcg_temp_free_vec(tcg_ctx, rsh); +} + +static const TCGOpcode ushl_list[] = { + INDEX_op_neg_vec, INDEX_op_shlv_vec, + INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0 +}; + +const GVecGen3 ushl_op[4] = { + { .fniv = gen_ushl_vec, + .fno = gen_helper_gvec_ushl_b, + .opt_opc = ushl_list, + .vece = MO_8 }, + { .fniv = gen_ushl_vec, + .fno = gen_helper_gvec_ushl_h, + .opt_opc = ushl_list, + .vece = MO_16 }, + { .fni4 = gen_ushl_i32, + .fniv = gen_ushl_vec, + .opt_opc = ushl_list, + .vece = MO_32 }, + { .fni8 = gen_ushl_i64, + .fniv = gen_ushl_vec, + .opt_opc = ushl_list, + .vece = MO_64 }, +}; + +void gen_sshl_i32(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) +{ + TCGv_i32 lval = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 rval = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 lsh = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 rsh = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); + TCGv_i32 max = tcg_const_i32(tcg_ctx, 31); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i32(tcg_ctx, lsh, shift); + tcg_gen_neg_i32(tcg_ctx, rsh, lsh); + tcg_gen_shl_i32(tcg_ctx, lval, src, lsh); + tcg_gen_umin_i32(tcg_ctx, rsh, rsh, max); + tcg_gen_sar_i32(tcg_ctx, rval, src, rsh); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LEU, lval, lsh, max, lval, zero); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, dst, lsh, zero, rval, lval); + + tcg_temp_free_i32(tcg_ctx, lval); + tcg_temp_free_i32(tcg_ctx, rval); + tcg_temp_free_i32(tcg_ctx, lsh); + tcg_temp_free_i32(tcg_ctx, rsh); + tcg_temp_free_i32(tcg_ctx, zero); + tcg_temp_free_i32(tcg_ctx, max); +} + +void gen_sshl_i64(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) +{ + TCGv_i64 lval = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 rval = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 lsh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 rsh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + TCGv_i64 max = tcg_const_i64(tcg_ctx, 63); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i64(tcg_ctx, lsh, shift); + tcg_gen_neg_i64(tcg_ctx, rsh, lsh); + tcg_gen_shl_i64(tcg_ctx, lval, src, lsh); + tcg_gen_umin_i64(tcg_ctx, rsh, rsh, max); + tcg_gen_sar_i64(tcg_ctx, rval, src, rsh); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LEU, lval, lsh, max, lval, zero); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, dst, lsh, zero, rval, lval); + + tcg_temp_free_i64(tcg_ctx, lval); + tcg_temp_free_i64(tcg_ctx, rval); + tcg_temp_free_i64(tcg_ctx, lsh); + tcg_temp_free_i64(tcg_ctx, rsh); + tcg_temp_free_i64(tcg_ctx, zero); + tcg_temp_free_i64(tcg_ctx, max); +} + +static void gen_sshl_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec dst, + TCGv_vec src, TCGv_vec shift) +{ + TCGv_vec lval = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec rval = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec lsh = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec rsh = tcg_temp_new_vec_matching(tcg_ctx, dst); + TCGv_vec tmp = tcg_temp_new_vec_matching(tcg_ctx, dst); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_neg_vec(tcg_ctx, vece, rsh, shift); + if (vece == MO_8) { + tcg_gen_mov_vec(tcg_ctx, lsh, shift); + } else { + tcg_gen_dupi_vec(tcg_ctx, vece, tmp, 0xff); + tcg_gen_and_vec(tcg_ctx, vece, lsh, shift, tmp); + tcg_gen_and_vec(tcg_ctx, vece, rsh, rsh, tmp); + } + + /* Bound rsh so out of bound right shift gets -1. */ + tcg_gen_dupi_vec(tcg_ctx, vece, tmp, (8 << vece) - 1); + tcg_gen_umin_vec(tcg_ctx, vece, rsh, rsh, tmp); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GT, vece, tmp, lsh, tmp); + + tcg_gen_shlv_vec(tcg_ctx, vece, lval, src, lsh); + tcg_gen_sarv_vec(tcg_ctx, vece, rval, src, rsh); + + /* Select in-bound left shift. */ + tcg_gen_andc_vec(tcg_ctx, vece, lval, lval, tmp); + + /* Select between left and right shift. */ + if (vece == MO_8) { + tcg_gen_dupi_vec(tcg_ctx, vece, tmp, 0); + tcg_gen_cmpsel_vec(tcg_ctx, TCG_COND_LT, vece, dst, lsh, tmp, rval, lval); + } else { + tcg_gen_dupi_vec(tcg_ctx, vece, tmp, 0x80); + tcg_gen_cmpsel_vec(tcg_ctx, TCG_COND_LT, vece, dst, lsh, tmp, lval, rval); + } + + tcg_temp_free_vec(tcg_ctx, lval); + tcg_temp_free_vec(tcg_ctx, rval); + tcg_temp_free_vec(tcg_ctx, lsh); + tcg_temp_free_vec(tcg_ctx, rsh); + tcg_temp_free_vec(tcg_ctx, tmp); +} + +static const TCGOpcode sshl_list[] = { + INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec, + INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0 +}; + +const GVecGen3 sshl_op[4] = { + { .fniv = gen_sshl_vec, + .fno = gen_helper_gvec_sshl_b, + .opt_opc = sshl_list, + .vece = MO_8 }, + { .fniv = gen_sshl_vec, + .fno = gen_helper_gvec_sshl_h, + .opt_opc = sshl_list, + .vece = MO_16 }, + { .fni4 = gen_sshl_i32, + .fniv = gen_sshl_vec, + .opt_opc = sshl_list, + .vece = MO_32 }, + { .fni8 = gen_sshl_i64, + .fniv = gen_sshl_vec, + .opt_opc = sshl_list, + .vece = MO_64 }, +}; + +static void gen_uqadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, + TCGv_vec a, TCGv_vec b) +{ + TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); + tcg_gen_add_vec(tcg_ctx, vece, x, a, b); + tcg_gen_usadd_vec(tcg_ctx, vece, t, a, b); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); + tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); + tcg_temp_free_vec(tcg_ctx, x); +} + +static const TCGOpcode vecop_list_uqadd[] = { + INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 +}; + +const GVecGen4 uqadd_op[4] = { + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_b, + .write_aofs = true, + .opt_opc = vecop_list_uqadd, + .vece = MO_8 }, + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_h, + .write_aofs = true, + .opt_opc = vecop_list_uqadd, + .vece = MO_16 }, + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_s, + .write_aofs = true, + .opt_opc = vecop_list_uqadd, + .vece = MO_32 }, + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_d, + .write_aofs = true, + .opt_opc = vecop_list_uqadd, + .vece = MO_64 }, +}; + +static void gen_sqadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, + TCGv_vec a, TCGv_vec b) +{ + TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); + tcg_gen_add_vec(tcg_ctx, vece, x, a, b); + tcg_gen_ssadd_vec(tcg_ctx, vece, t, a, b); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); + tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); + tcg_temp_free_vec(tcg_ctx, x); +} + +static const TCGOpcode vecop_list_sqadd[] = { + INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 +}; + +const GVecGen4 sqadd_op[4] = { + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_b, + .opt_opc = vecop_list_sqadd, + .write_aofs = true, + .vece = MO_8 }, + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_h, + .opt_opc = vecop_list_sqadd, + .write_aofs = true, + .vece = MO_16 }, + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_s, + .opt_opc = vecop_list_sqadd, + .write_aofs = true, + .vece = MO_32 }, + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_d, + .opt_opc = vecop_list_sqadd, + .write_aofs = true, + .vece = MO_64 }, +}; + +static void gen_uqsub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, + TCGv_vec a, TCGv_vec b) +{ + TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); + tcg_gen_sub_vec(tcg_ctx, vece, x, a, b); + tcg_gen_ussub_vec(tcg_ctx, vece, t, a, b); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); + tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); + tcg_temp_free_vec(tcg_ctx, x); +} + +static const TCGOpcode vecop_list_uqsub[] = { + INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 +}; + +const GVecGen4 uqsub_op[4] = { + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_b, + .opt_opc = vecop_list_uqsub, + .write_aofs = true, + .vece = MO_8 }, + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_h, + .opt_opc = vecop_list_uqsub, + .write_aofs = true, + .vece = MO_16 }, + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_s, + .opt_opc = vecop_list_uqsub, + .write_aofs = true, + .vece = MO_32 }, + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_d, + .opt_opc = vecop_list_uqsub, + .write_aofs = true, + .vece = MO_64 }, +}; + +static void gen_sqsub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, TCGv_vec sat, + TCGv_vec a, TCGv_vec b) +{ + TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); + tcg_gen_sub_vec(tcg_ctx, vece, x, a, b); + tcg_gen_sssub_vec(tcg_ctx, vece, t, a, b); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, vece, x, x, t); + tcg_gen_or_vec(tcg_ctx, vece, sat, sat, x); + tcg_temp_free_vec(tcg_ctx, x); +} + +static const TCGOpcode vecop_list_sqsub[] = { + INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 +}; + +const GVecGen4 sqsub_op[4] = { + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_b, + .opt_opc = vecop_list_sqsub, + .write_aofs = true, + .vece = MO_8 }, + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_h, + .opt_opc = vecop_list_sqsub, + .write_aofs = true, + .vece = MO_16 }, + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_s, + .opt_opc = vecop_list_sqsub, + .write_aofs = true, + .vece = MO_32 }, + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_d, + .opt_opc = vecop_list_sqsub, + .write_aofs = true, + .vece = MO_64 }, +}; + +/* Translate a NEON data processing instruction. Return nonzero if the + instruction is invalid. + We process data in a mixture of 32-bit and 64-bit chunks. + Mostly we use 32-bit chunks so we can use normal scalar instructions. */ + +static int disas_neon_data_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op; + int q; + int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs; + int size; + int shift; + int pass; + int count; + int pairwise; + int u; + int vec_size; + uint32_t imm; + TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5; + TCGv_ptr ptr1, ptr2, ptr3; + TCGv_i64 tmp64; + + /* FIXME: this access check should not take precedence over UNDEF + * for invalid encodings; we will generate incorrect syndrome information + * for attempts to execute invalid vfp/neon encodings with FP disabled. + */ + if (s->fp_excp_el) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); + return 0; + } + + if (!s->vfp_enabled) + return 1; + q = (insn & (1 << 6)) != 0; + u = (insn >> 24) & 1; + VFP_DREG_D(rd, insn); + VFP_DREG_N(rn, insn); + VFP_DREG_M(rm, insn); + size = (insn >> 20) & 3; + vec_size = q ? 16 : 8; + rd_ofs = neon_reg_offset(rd, 0); + rn_ofs = neon_reg_offset(rn, 0); + rm_ofs = neon_reg_offset(rm, 0); + + if ((insn & (1 << 23)) == 0) { + /* Three register same length. */ + op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); + /* Catch invalid op and bad size combinations: UNDEF */ + if ((neon_3r_sizes[op] & (1 << size)) == 0) { + return 1; + } + /* All insns of this form UNDEF for either this condition or the + * superset of cases "Q==1"; we catch the latter later. + */ + if (q && ((rd | rn | rm) & 1)) { + return 1; + } + switch (op) { + case NEON_3R_SHA: + /* The SHA-1/SHA-256 3-register instructions require special + * treatment here, as their size field is overloaded as an + * op type selector, and they all consume their input in a + * single pass. + */ + if (!q) { + return 1; + } + if (!u) { /* SHA-1 */ + if (!dc_isar_feature(aa32_sha1, s)) { + return 1; + } + ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); + ptr2 = vfp_reg_ptr(tcg_ctx, true, rn); + ptr3 = vfp_reg_ptr(tcg_ctx, true, rm); + tmp4 = tcg_const_i32(tcg_ctx, size); + gen_helper_crypto_sha1_3reg(tcg_ctx, ptr1, ptr2, ptr3, tmp4); + tcg_temp_free_i32(tcg_ctx, tmp4); + } else { /* SHA-256 */ + if (!dc_isar_feature(aa32_sha2, s) || size == 3) { + return 1; + } + ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); + ptr2 = vfp_reg_ptr(tcg_ctx, true, rn); + ptr3 = vfp_reg_ptr(tcg_ctx, true, rm); + switch (size) { + case 0: + gen_helper_crypto_sha256h(tcg_ctx, ptr1, ptr2, ptr3); + break; + case 1: + gen_helper_crypto_sha256h2(tcg_ctx, ptr1, ptr2, ptr3); + break; + case 2: + gen_helper_crypto_sha256su1(tcg_ctx, ptr1, ptr2, ptr3); + break; + } + } + tcg_temp_free_ptr(tcg_ctx, ptr1); + tcg_temp_free_ptr(tcg_ctx, ptr2); + tcg_temp_free_ptr(tcg_ctx, ptr3); + return 0; + + case NEON_3R_VPADD_VQRDMLAH: + if (!u) { + break; /* VPADD */ + } + /* VQRDMLAH */ + switch (size) { + case 1: + return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16, + q, rd, rn, rm); + case 2: + return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32, + q, rd, rn, rm); + } + return 1; + + case NEON_3R_VFM_VQRDMLSH: + if (!u) { + /* VFM, VFMS */ + if (size == 1) { + return 1; + } + break; + } + /* VQRDMLSH */ + switch (size) { + case 1: + return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16, + q, rd, rn, rm); + case 2: + return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32, + q, rd, rn, rm); + } + return 1; + + case NEON_3R_LOGIC: /* Logic ops. */ + switch ((u << 2) | size) { + case 0: /* VAND */ + tcg_gen_gvec_and(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 1: /* VBIC */ + tcg_gen_gvec_andc(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 2: /* VORR */ + tcg_gen_gvec_or(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 3: /* VORN */ + tcg_gen_gvec_orc(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 4: /* VEOR */ + tcg_gen_gvec_xor(tcg_ctx, 0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 5: /* VBSL */ + tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 6: /* VBIT */ + tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs, + vec_size, vec_size); + break; + case 7: /* VBIF */ + tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs, + vec_size, vec_size); + break; + } + return 0; + + case NEON_3R_VADD_VSUB: + if (u) { + tcg_gen_gvec_sub(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } else { + tcg_gen_gvec_add(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } + return 0; + + case NEON_3R_VQADD: + tcg_gen_gvec_4(tcg_ctx, rd_ofs, offsetof(CPUARMState, vfp.qc), + rn_ofs, rm_ofs, vec_size, vec_size, + (u ? uqadd_op : sqadd_op) + size); + return 0; + + case NEON_3R_VQSUB: + tcg_gen_gvec_4(tcg_ctx, rd_ofs, offsetof(CPUARMState, vfp.qc), + rn_ofs, rm_ofs, vec_size, vec_size, + (u ? uqsub_op : sqsub_op) + size); + return 0; + + case NEON_3R_VMUL: /* VMUL */ + if (u) { + /* Polynomial case allows only P8. */ + if (size != 0) { + return 1; + } + tcg_gen_gvec_3_ool(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, + 0, gen_helper_gvec_pmul_b); + } else { + tcg_gen_gvec_mul(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } + return 0; + + case NEON_3R_VML: /* VMLA, VMLS */ + tcg_gen_gvec_3(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, + u ? &mls_op[size] : &mla_op[size]); + return 0; + + case NEON_3R_VTST_VCEQ: + if (u) { /* VCEQ */ + tcg_gen_gvec_cmp(tcg_ctx, TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } else { /* VTST */ + tcg_gen_gvec_3(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size, &cmtst_op[size]); + } + return 0; + + case NEON_3R_VCGT: + tcg_gen_gvec_cmp(tcg_ctx, u ? TCG_COND_GTU : TCG_COND_GT, size, + rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); + return 0; + + case NEON_3R_VCGE: + tcg_gen_gvec_cmp(tcg_ctx, u ? TCG_COND_GEU : TCG_COND_GE, size, + rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); + return 0; + + case NEON_3R_VMAX: + if (u) { + tcg_gen_gvec_umax(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } else { + tcg_gen_gvec_smax(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } + return 0; + case NEON_3R_VMIN: + if (u) { + tcg_gen_gvec_umin(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } else { + tcg_gen_gvec_smin(tcg_ctx, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } + return 0; + + case NEON_3R_VSHL: + /* Note the operation is vshl vd,vm,vn */ + tcg_gen_gvec_3(tcg_ctx, rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size, + u ? &ushl_op[size] : &sshl_op[size]); + return 0; + } + + if (size == 3) { + /* 64-bit element instructions. */ + for (pass = 0; pass < (q ? 2 : 1); pass++) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); + switch (op) { + case NEON_3R_VQSHL: + if (u) { + gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } else { + gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } + break; + case NEON_3R_VRSHL: + if (u) { + gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } else { + gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } + break; + case NEON_3R_VQRSHL: + if (u) { + gen_helper_neon_qrshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } else { + gen_helper_neon_qrshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } + break; + default: + abort(); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + return 0; + } + pairwise = 0; + switch (op) { + case NEON_3R_VQSHL: + case NEON_3R_VRSHL: + case NEON_3R_VQRSHL: + { + int rtmp; + /* Shift instruction operands are reversed. */ + rtmp = rn; + rn = rm; + rm = rtmp; + } + break; + case NEON_3R_VPADD_VQRDMLAH: + case NEON_3R_VPMAX: + case NEON_3R_VPMIN: + pairwise = 1; + break; + case NEON_3R_FLOAT_ARITH: + pairwise = (u && size < 2); /* if VPADD (float) */ + break; + case NEON_3R_FLOAT_MINMAX: + pairwise = u; /* if VPMIN/VPMAX (float) */ + break; + case NEON_3R_FLOAT_CMP: + if (!u && size) { + /* no encoding for U=0 C=1x */ + return 1; + } + break; + case NEON_3R_FLOAT_ACMP: + if (!u) { + return 1; + } + break; + case NEON_3R_FLOAT_MISC: + /* VMAXNM/VMINNM in ARMv8 */ + if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) { + return 1; + } + break; + case NEON_3R_VFM_VQRDMLSH: + if (!dc_isar_feature(aa32_simdfmac, s)) { + return 1; + } + break; + default: + break; + } + + if (pairwise && q) { + /* All the pairwise insns UNDEF if Q is set */ + return 1; + } + + for (pass = 0; pass < (q ? 4 : 2); pass++) { + + if (pairwise) { + /* Pairwise. */ + if (pass < 1) { + tmp = neon_load_reg(tcg_ctx, rn, 0); + tmp2 = neon_load_reg(tcg_ctx, rn, 1); + } else { + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + } + } else { + /* Elementwise. */ + tmp = neon_load_reg(tcg_ctx, rn, pass); + tmp2 = neon_load_reg(tcg_ctx, rm, pass); + } + switch (op) { + case NEON_3R_VHADD: + GEN_NEON_INTEGER_OP(hadd); + break; + case NEON_3R_VRHADD: + GEN_NEON_INTEGER_OP(rhadd); + break; + case NEON_3R_VHSUB: + GEN_NEON_INTEGER_OP(hsub); + break; + case NEON_3R_VQSHL: + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + case NEON_3R_VRSHL: + GEN_NEON_INTEGER_OP(rshl); + break; + case NEON_3R_VQRSHL: + GEN_NEON_INTEGER_OP_ENV(qrshl); + break; + case NEON_3R_VABD: + GEN_NEON_INTEGER_OP(abd); + break; + case NEON_3R_VABA: + GEN_NEON_INTEGER_OP(abd); + tcg_temp_free_i32(tcg_ctx, tmp2); + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + gen_neon_add(tcg_ctx, size, tmp, tmp2); + break; + case NEON_3R_VPMAX: + GEN_NEON_INTEGER_OP(pmax); + break; + case NEON_3R_VPMIN: + GEN_NEON_INTEGER_OP(pmin); + break; + case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ + if (!u) { /* VQDMULH */ + switch (size) { + case 1: + gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: abort(); + } + } else { /* VQRDMULH */ + switch (size) { + case 1: + gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: abort(); + } + } + break; + case NEON_3R_VPADD_VQRDMLAH: + switch (size) { + case 0: gen_helper_neon_padd_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_padd_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + break; + case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + switch ((u << 2) | size) { + case 0: /* VADD */ + case 4: /* VPADD */ + gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); + break; + case 2: /* VSUB */ + gen_helper_vfp_subs(tcg_ctx, tmp, tmp, tmp2, fpstatus); + break; + case 6: /* VABD */ + gen_helper_neon_abd_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + break; + default: + abort(); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_MULTIPLY: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); + if (!u) { + tcg_temp_free_i32(tcg_ctx, tmp2); + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + if (size == 0) { + gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); + } + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_CMP: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + if (!u) { + gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + if (size == 0) { + gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_ACMP: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + if (size == 0) { + gen_helper_neon_acge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_acgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_MINMAX: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + if (size == 0) { + gen_helper_vfp_maxs(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_mins(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_MISC: + if (u) { + /* VMAXNM/VMINNM */ + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + if (size == 0) { + gen_helper_vfp_maxnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_minnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + } else { + if (size == 0) { + gen_helper_recps_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); + } else { + gen_helper_rsqrts_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); + } + } + break; + case NEON_3R_VFM_VQRDMLSH: + { + /* VFMA, VFMS: fused multiply-add */ + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + TCGv_i32 tmp3 = neon_load_reg(tcg_ctx, rd, pass); + if (size) { + /* VFMS */ + gen_helper_vfp_negs(tcg_ctx, tmp, tmp); + } + gen_helper_vfp_muladds(tcg_ctx, tmp, tmp, tmp2, tmp3, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp3); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + default: + abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + + /* Save the result. For elementwise operations we can put it + straight into the destination register. For pairwise operations + we have to be careful to avoid clobbering the source operands. */ + if (pairwise && rd == rm) { + neon_store_scratch(tcg_ctx, pass, tmp); + } else { + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + + } /* for pass */ + if (pairwise && rd == rm) { + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tmp = neon_load_scratch(tcg_ctx, pass); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + } + /* End of 3 register same size operations. */ + } else if (insn & (1 << 4)) { + if ((insn & 0x00380080) != 0) { + /* Two registers and shift. */ + op = (insn >> 8) & 0xf; + if (insn & (1 << 7)) { + /* 64-bit shift. */ + if (op > 7) { + return 1; + } + size = 3; + } else { + size = 2; + while ((insn & (1 << (size + 19))) == 0) + size--; + } + shift = (insn >> 16) & ((1 << (3 + size)) - 1); + if (op < 8) { + /* Shift by immediate: + VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ + if (q && ((rd | rm) & 1)) { + return 1; + } + if (!u && (op == 4 || op == 6)) { + return 1; + } + /* Right shifts are encoded as N - shift, where N is the + element size in bits. */ + if (op <= 4) { + shift = shift - (1 << (size + 3)); + } + + switch (op) { + case 0: /* VSHR */ + /* Right shift comes here negative. */ + shift = -shift; + /* Shifts larger than the element size are architecturally + * valid. Unsigned results in all zeros; signed results + * in all sign bits. + */ + if (!u) { + tcg_gen_gvec_sari(tcg_ctx, size, rd_ofs, rm_ofs, + MIN(shift, (8 << size) - 1), + vec_size, vec_size); + } else if (shift >= 8 << size) { + tcg_gen_gvec_dup8i(tcg_ctx, rd_ofs, vec_size, vec_size, 0); + } else { + tcg_gen_gvec_shri(tcg_ctx, size, rd_ofs, rm_ofs, shift, + vec_size, vec_size); + } + return 0; + + case 1: /* VSRA */ + /* Right shift comes here negative. */ + shift = -shift; + /* Shifts larger than the element size are architecturally + * valid. Unsigned results in all zeros; signed results + * in all sign bits. + */ + if (!u) { + tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size, + MIN(shift, (8 << size) - 1), + &ssra_op[size]); + } else if (shift >= 8 << size) { + /* rd += 0 */ + } else { + tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size, + shift, &usra_op[size]); + } + return 0; + + case 4: /* VSRI */ + if (!u) { + return 1; + } + /* Right shift comes here negative. */ + shift = -shift; + /* Shift out of range leaves destination unchanged. */ + if (shift < 8 << size) { + tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, vec_size, + shift, &sri_op[size]); + } + return 0; + + case 5: /* VSHL, VSLI */ + if (u) { /* VSLI */ + /* Shift out of range leaves destination unchanged. */ + if (shift < 8 << size) { + tcg_gen_gvec_2i(tcg_ctx, rd_ofs, rm_ofs, vec_size, + vec_size, shift, &sli_op[size]); + } + } else { /* VSHL */ + /* Shifts larger than the element size are + * architecturally valid and results in zero. + */ + if (shift >= 8 << size) { + tcg_gen_gvec_dup8i(tcg_ctx, rd_ofs, vec_size, vec_size, 0); + } else { + tcg_gen_gvec_shli(tcg_ctx, size, rd_ofs, rm_ofs, shift, + vec_size, vec_size); + } + } + return 0; + } + + if (size == 3) { + count = q + 1; + } else { + count = q ? 4: 2; + } + + /* To avoid excessive duplication of ops we implement shift + * by immediate using the variable shift operations. + */ + imm = dup_const(size, shift); + + for (pass = 0; pass < count; pass++) { + if (size == 3) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_V1, imm); + switch (op) { + case 2: /* VRSHR */ + case 3: /* VRSRA */ + if (u) + gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + else + gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + break; + case 6: /* VQSHLU */ + gen_helper_neon_qshlu_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + break; + case 7: /* VQSHL */ + if (u) { + gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } else { + gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + break; + default: + g_assert_not_reached(); + break; + } + if (op == 3) { + /* Accumulate. */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } else { /* size < 3 */ + /* Operands in T0 and T1. */ + tmp = neon_load_reg(tcg_ctx, rm, pass); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, imm); + switch (op) { + case 2: /* VRSHR */ + case 3: /* VRSRA */ + GEN_NEON_INTEGER_OP(rshl); + break; + case 6: /* VQSHLU */ + switch (size) { + case 0: + gen_helper_neon_qshlu_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, + tmp, tmp2); + break; + case 1: + gen_helper_neon_qshlu_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, + tmp, tmp2); + break; + case 2: + gen_helper_neon_qshlu_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, + tmp, tmp2); + break; + default: + abort(); + } + break; + case 7: /* VQSHL */ + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + default: + g_assert_not_reached(); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp2); + + if (op == 3) { + /* Accumulate. */ + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + gen_neon_add(tcg_ctx, size, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + } /* for pass */ + } else if (op < 10) { + /* Shift by immediate and narrow: + VSHRN, VRSHRN, VQSHRN, VQRSHRN. */ + int input_unsigned = (op == 8) ? !u : u; + if (rm & 1) { + return 1; + } + shift = shift - (1 << (size + 3)); + size++; + if (size == 3) { + tmp64 = tcg_const_i64(tcg_ctx, shift); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); + for (pass = 0; pass < 2; pass++) { + TCGv_i64 in; + if (pass == 0) { + in = tcg_ctx->cpu_V0; + } else { + in = tcg_ctx->cpu_V1; + } + if (q) { + if (input_unsigned) { + gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } else { + gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } + } else { + if (input_unsigned) { + gen_ushl_i64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } else { + gen_sshl_i64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } + } + tmp = tcg_temp_new_i32(tcg_ctx); + gen_neon_narrow_op(tcg_ctx, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } /* for pass */ + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + if (size == 1) { + imm = (uint16_t)shift; + imm |= imm << 16; + } else { + /* size == 2 */ + imm = (uint32_t)shift; + } + tmp2 = tcg_const_i32(tcg_ctx, imm); + tmp4 = neon_load_reg(tcg_ctx, rm + 1, 0); + tmp5 = neon_load_reg(tcg_ctx, rm + 1, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + tmp = neon_load_reg(tcg_ctx, rm, 0); + } else { + tmp = tmp4; + } + gen_neon_shift_narrow(tcg_ctx, size, tmp, tmp2, q, + input_unsigned); + if (pass == 0) { + tmp3 = neon_load_reg(tcg_ctx, rm, 1); + } else { + tmp3 = tmp5; + } + gen_neon_shift_narrow(tcg_ctx, size, tmp3, tmp2, q, + input_unsigned); + tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp3); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_neon_narrow_op(tcg_ctx, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } /* for pass */ + tcg_temp_free_i32(tcg_ctx, tmp2); + } + } else if (op == 10) { + /* VSHLL, VMOVL */ + if (q || (rd & 1)) { + return 1; + } + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 1) + tmp = tmp2; + + gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, u); + + if (shift != 0) { + /* The shift is less than the width of the source + type, so we can just shift the whole register. */ + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, shift); + /* Widen the result of shift: we need to clear + * the potential overflow bits resulting from + * left bits of the narrow input appearing as + * right bits of left the neighbour narrow + * input. */ + if (size < 2 || !u) { + uint64_t imm64; + if (size == 0) { + imm = (0xffu >> (8 - shift)); + imm |= imm << 16; + } else if (size == 1) { + imm = 0xffff >> (16 - shift); + } else { + /* size == 2 */ + imm = 0xffffffff >> (32 - shift); + } + if (size < 2) { + imm64 = imm | (((uint64_t)imm) << 32); + } else { + imm64 = imm; + } + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, ~imm64); + } + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + } else if (op >= 14) { + /* VCVT fixed-point. */ + TCGv_ptr fpst; + TCGv_i32 shiftv; + VFPGenFixPointFn *fn; + + if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) { + return 1; + } + + if (!(op & 1)) { + if (u) { + fn = gen_helper_vfp_ultos; + } else { + fn = gen_helper_vfp_sltos; + } + } else { + if (u) { + fn = gen_helper_vfp_touls_round_to_zero; + } else { + fn = gen_helper_vfp_tosls_round_to_zero; + } + } + + /* We have already masked out the must-be-1 top bit of imm6, + * hence this 32-shift where the ARM ARM has 64-imm6. + */ + shift = 32 - shift; + fpst = get_fpstatus_ptr(tcg_ctx, 1); + shiftv = tcg_const_i32(tcg_ctx, shift); + for (pass = 0; pass < (q ? 4 : 2); pass++) { + TCGv_i32 tmpf = neon_load_reg(tcg_ctx, rm, pass); + fn(tcg_ctx, tmpf, tmpf, shiftv, fpst); + neon_store_reg(tcg_ctx, rd, pass, tmpf); + } + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, shiftv); + } else { + return 1; + } + } else { /* (insn & 0x00380080) == 0 */ + int invert, reg_ofs, vec_size; + + if (q && (rd & 1)) { + return 1; + } + + op = (insn >> 8) & 0xf; + /* One register and immediate. */ + imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf); + invert = (insn & (1 << 5)) != 0; + /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE. + * We choose to not special-case this and will behave as if a + * valid constant encoding of 0 had been given. + */ + switch (op) { + case 0: case 1: + /* no-op */ + break; + case 2: case 3: + imm <<= 8; + break; + case 4: case 5: + imm <<= 16; + break; + case 6: case 7: + imm <<= 24; + break; + case 8: case 9: + imm |= imm << 16; + break; + case 10: case 11: + imm = (imm << 8) | (imm << 24); + break; + case 12: + imm = (imm << 8) | 0xff; + break; + case 13: + imm = (imm << 16) | 0xffff; + break; + case 14: + imm |= (imm << 8) | (imm << 16) | (imm << 24); + if (invert) { + imm = ~imm; + } + break; + case 15: + if (invert) { + return 1; + } + imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) + | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); + break; + } + if (invert) { + imm = ~imm; + } + + reg_ofs = neon_reg_offset(rd, 0); + vec_size = q ? 16 : 8; + + if (op & 1 && op < 12) { + if (invert) { + /* The immediate value has already been inverted, + * so BIC becomes AND. + */ + tcg_gen_gvec_andi(tcg_ctx, MO_32, reg_ofs, reg_ofs, imm, + vec_size, vec_size); + } else { + tcg_gen_gvec_ori(tcg_ctx, MO_32, reg_ofs, reg_ofs, imm, + vec_size, vec_size); + } + } else { + /* VMOV, VMVN. */ + if (op == 14 && invert) { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + for (pass = 0; pass <= q; ++pass) { + uint64_t val = 0; + int n; + + for (n = 0; n < 8; n++) { + if (imm & (1 << (n + pass * 8))) { + val |= 0xffull << (n * 8); + } + } + tcg_gen_movi_i64(tcg_ctx, t64, val); + neon_store_reg64(tcg_ctx, t64, rd + pass); + } + tcg_temp_free_i64(tcg_ctx, t64); + } else { + tcg_gen_gvec_dup32i(tcg_ctx, reg_ofs, vec_size, vec_size, imm); + } + } + } + } else { /* (insn & 0x00800010 == 0x00800000) */ + if (size != 3) { + op = (insn >> 8) & 0xf; + if ((insn & (1 << 6)) == 0) { + /* Three registers of different lengths. */ + int src1_wide; + int src2_wide; + int prewiden; + /* undefreq: bit 0 : UNDEF if size == 0 + * bit 1 : UNDEF if size == 1 + * bit 2 : UNDEF if size == 2 + * bit 3 : UNDEF if U == 1 + * Note that [2:0] set implies 'always UNDEF' + */ + int undefreq; + /* prewiden, src1_wide, src2_wide, undefreq */ + static const int neon_3reg_wide[16][4] = { + {1, 0, 0, 0}, /* VADDL */ + {1, 1, 0, 0}, /* VADDW */ + {1, 0, 0, 0}, /* VSUBL */ + {1, 1, 0, 0}, /* VSUBW */ + {0, 1, 1, 0}, /* VADDHN */ + {0, 0, 0, 0}, /* VABAL */ + {0, 1, 1, 0}, /* VSUBHN */ + {0, 0, 0, 0}, /* VABDL */ + {0, 0, 0, 0}, /* VMLAL */ + {0, 0, 0, 9}, /* VQDMLAL */ + {0, 0, 0, 0}, /* VMLSL */ + {0, 0, 0, 9}, /* VQDMLSL */ + {0, 0, 0, 0}, /* Integer VMULL */ + {0, 0, 0, 1}, /* VQDMULL */ + {0, 0, 0, 0xa}, /* Polynomial VMULL */ + {0, 0, 0, 7}, /* Reserved: always UNDEF */ + }; + + prewiden = neon_3reg_wide[op][0]; + src1_wide = neon_3reg_wide[op][1]; + src2_wide = neon_3reg_wide[op][2]; + undefreq = neon_3reg_wide[op][3]; + + if ((undefreq & (1 << size)) || + ((undefreq & 8) && u)) { + return 1; + } + if ((src1_wide && (rn & 1)) || + (src2_wide && (rm & 1)) || + (!src2_wide && (rd & 1))) { + return 1; + } + + /* Handle polynomial VMULL in a single pass. */ + if (op == 14) { + if (size == 0) { + /* VMULL.P8 */ + tcg_gen_gvec_3_ool(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, 16, 16, + 0, gen_helper_neon_pmull_h); + } else { + /* VMULL.P64 */ + if (!dc_isar_feature(aa32_pmull, s)) { + return 1; + } + tcg_gen_gvec_3_ool(tcg_ctx, rd_ofs, rn_ofs, rm_ofs, 16, 16, + 0, gen_helper_gvec_pmull_q); + } + return 0; + } + + /* Avoid overlapping operands. Wide source operands are + always aligned so will never overlap with wide + destinations in problematic ways. */ + if (rd == rm && !src2_wide) { + tmp = neon_load_reg(tcg_ctx, rm, 1); + neon_store_scratch(tcg_ctx, 2, tmp); + } else if (rd == rn && !src1_wide) { + tmp = neon_load_reg(tcg_ctx, rn, 1); + neon_store_scratch(tcg_ctx, 2, tmp); + } + tmp3 = NULL; + for (pass = 0; pass < 2; pass++) { + if (src1_wide) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); + tmp = NULL; + } else { + if (pass == 1 && rd == rn) { + tmp = neon_load_scratch(tcg_ctx, 2); + } else { + tmp = neon_load_reg(tcg_ctx, rn, pass); + } + if (prewiden) { + gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, u); + } + } + if (src2_wide) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); + tmp2 = NULL; + } else { + if (pass == 1 && rd == rm) { + tmp2 = neon_load_scratch(tcg_ctx, 2); + } else { + tmp2 = neon_load_reg(tcg_ctx, rm, pass); + } + if (prewiden) { + gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V1, tmp2, size, u); + } + } + switch (op) { + case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ + gen_neon_addl(tcg_ctx, size); + break; + case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ + gen_neon_subl(tcg_ctx, size); + break; + case 5: case 7: /* VABAL, VABDL */ + switch ((size << 1) | u) { + case 0: + gen_helper_neon_abdl_s16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 1: + gen_helper_neon_abdl_u16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 2: + gen_helper_neon_abdl_s32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 3: + gen_helper_neon_abdl_u32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 4: + gen_helper_neon_abdl_s64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 5: + gen_helper_neon_abdl_u64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 8: case 9: case 10: case 11: case 12: case 13: + /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ + gen_neon_mull(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2, size, u); + break; + default: /* 15 is RESERVED: caught earlier */ + abort(); + } + if (op == 13) { + /* VQDMULL */ + gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } else if (op == 5 || (op >= 8 && op <= 11)) { + /* Accumulate. */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + switch (op) { + case 10: /* VMLSL */ + gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); + /* Fall through */ + case 5: case 8: /* VABAL, VMLAL */ + gen_neon_addl(tcg_ctx, size); + break; + case 9: case 11: /* VQDMLAL, VQDMLSL */ + gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + if (op == 11) { + gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); + } + gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); + break; + default: + abort(); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } else if (op == 4 || op == 6) { + /* Narrowing operation. */ + tmp = tcg_temp_new_i32(tcg_ctx); + if (!u) { + switch (size) { + case 0: + gen_helper_neon_narrow_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 1: + gen_helper_neon_narrow_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 2: + tcg_gen_extrh_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + default: abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_narrow_round_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 1: + gen_helper_neon_narrow_round_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 2: + tcg_gen_addi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 1u << 31); + tcg_gen_extrh_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + default: abort(); + } + } + if (pass == 0) { + tmp3 = tmp; + } else { + neon_store_reg(tcg_ctx, rd, 0, tmp3); + neon_store_reg(tcg_ctx, rd, 1, tmp); + } + } else { + /* Write back the result. */ + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + } + } else { + /* Two registers and a scalar. NB that for ops of this form + * the ARM ARM labels bit 24 as Q, but it is in our variable + * 'u', not 'q'. + */ + if (size == 0) { + return 1; + } + switch (op) { + case 1: /* Float VMLA scalar */ + case 5: /* Floating point VMLS scalar */ + case 9: /* Floating point VMUL scalar */ + if (size == 1) { + return 1; + } + /* fall through */ + case 0: /* Integer VMLA scalar */ + case 4: /* Integer VMLS scalar */ + case 8: /* Integer VMUL scalar */ + case 12: /* VQDMULH scalar */ + case 13: /* VQRDMULH scalar */ + if (u && ((rd | rn) & 1)) { + return 1; + } + tmp = neon_get_scalar(tcg_ctx, size, rm); + neon_store_scratch(tcg_ctx, 0, tmp); + for (pass = 0; pass < (u ? 4 : 2); pass++) { + tmp = neon_load_scratch(tcg_ctx, 0); + tmp2 = neon_load_reg(tcg_ctx, rn, pass); + if (op == 12) { + if (size == 1) { + gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } + } else if (op == 13) { + if (size == 1) { + gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } + } else if (op & 1) { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + } else { + switch (size) { + case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (op < 8) { + /* Accumulate. */ + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + switch (op) { + case 0: + gen_neon_add(tcg_ctx, size, tmp, tmp2); + break; + case 1: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case 4: + gen_neon_rsb(tcg_ctx, size, tmp, tmp2); + break; + case 5: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + default: + abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + } + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + break; + case 3: /* VQDMLAL scalar */ + case 7: /* VQDMLSL scalar */ + case 11: /* VQDMULL scalar */ + if (u == 1) { + return 1; + } + /* fall through */ + case 2: /* VMLAL sclar */ + case 6: /* VMLSL scalar */ + case 10: /* VMULL scalar */ + if (rd & 1) { + return 1; + } + tmp2 = neon_get_scalar(tcg_ctx, size, rm); + /* We need a copy of tmp2 because gen_neon_mull + * deletes it during pass 0. */ + tmp4 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp4, tmp2); + tmp3 = neon_load_reg(tcg_ctx, rn, 1); + + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + tmp = neon_load_reg(tcg_ctx, rn, 0); + } else { + tmp = tmp3; + tmp2 = tmp4; + } + gen_neon_mull(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2, size, u); + if (op != 11) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + } + switch (op) { + case 6: + gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); + /* Fall through */ + case 2: + gen_neon_addl(tcg_ctx, size); + break; + case 3: case 7: + gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + if (op == 7) { + gen_neon_negl(tcg_ctx, tcg_ctx->cpu_V0, size); + } + gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); + break; + case 10: + /* no-op */ + break; + case 11: + gen_neon_addl_saturate(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + break; + default: + abort(); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + break; + case 14: /* VQRDMLAH scalar */ + case 15: /* VQRDMLSH scalar */ + { + NeonGenThreeOpEnvFn *fn; + + if (!dc_isar_feature(aa32_rdm, s)) { + return 1; + } + if (u && ((rd | rn) & 1)) { + return 1; + } + if (op == 14) { + if (size == 1) { + fn = gen_helper_neon_qrdmlah_s16; + } else { + fn = gen_helper_neon_qrdmlah_s32; + } + } else { + if (size == 1) { + fn = gen_helper_neon_qrdmlsh_s16; + } else { + fn = gen_helper_neon_qrdmlsh_s32; + } + } + + tmp2 = neon_get_scalar(tcg_ctx, size, rm); + for (pass = 0; pass < (u ? 4 : 2); pass++) { + tmp = neon_load_reg(tcg_ctx, rn, pass); + tmp3 = neon_load_reg(tcg_ctx, rd, pass); + fn(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp3); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + default: + g_assert_not_reached(); + break; + } + } + } else { /* size == 3 */ + if (!u) { + /* Extract. */ + imm = (insn >> 8) & 0xf; + + if (imm > 7 && !q) + return 1; + + if (q && ((rd | rn | rm) & 1)) { + return 1; + } + + if (imm == 0) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); + if (q) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rn + 1); + } + } else if (imm == 8) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); + if (q) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); + } + } else if (q) { + tmp64 = tcg_temp_new_i64(tcg_ctx); + if (imm < 8) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); + neon_load_reg64(tcg_ctx, tmp64, rn + 1); + } else { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); + neon_load_reg64(tcg_ctx, tmp64, rm); + } + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, (imm & 7) * 8); + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tmp64, 64 - ((imm & 7) * 8)); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + if (imm < 8) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); + } else { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); + imm -= 8; + } + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); + tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, imm * 8); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + /* BUGFIX */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, imm * 8); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd); + if (q) { + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + 1); + } + } else if ((insn & (1 << 11)) == 0) { + /* Two register misc. */ + op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf); + size = (insn >> 18) & 3; + /* UNDEF for unknown op values and bad op-size combinations */ + if ((neon_2rm_sizes[op] & (1 << size)) == 0) { + return 1; + } + if (neon_2rm_is_v8_op(op) && + !arm_dc_feature(s, ARM_FEATURE_V8)) { + return 1; + } + if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) && + q && ((rm | rd) & 1)) { + return 1; + } + switch (op) { + case NEON_2RM_VREV64: + for (pass = 0; pass < (q ? 2 : 1); pass++) { + tmp = neon_load_reg(tcg_ctx, rm, pass * 2); + tmp2 = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); + switch (size) { + case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; + case 1: gen_swap_half(tcg_ctx, tmp); break; + case 2: /* no-op */ break; + default: abort(); + } + neon_store_reg(tcg_ctx, rd, pass * 2 + 1, tmp); + if (size == 2) { + neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); + } else { + switch (size) { + case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp2, tmp2); break; + case 1: gen_swap_half(tcg_ctx, tmp2); break; + default: abort(); + } + neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); + } + } + break; + case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U: + case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U: + for (pass = 0; pass < q + 1; pass++) { + tmp = neon_load_reg(tcg_ctx, rm, pass * 2); + gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, op & 1); + tmp = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); + gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V1, tmp, size, op & 1); + switch (size) { + case 0: gen_helper_neon_paddl_u16(tcg_ctx, CPU_V001); break; + case 1: gen_helper_neon_paddl_u32(tcg_ctx, CPU_V001); break; + case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; + default: abort(); + } + if (op >= NEON_2RM_VPADAL) { + /* Accumulate. */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + gen_neon_addl(tcg_ctx, size); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + break; + case NEON_2RM_VTRN: + if (size == 2) { + int n; + for (n = 0; n < (q ? 4 : 2); n += 2) { + tmp = neon_load_reg(tcg_ctx, rm, n); + tmp2 = neon_load_reg(tcg_ctx, rd, n + 1); + neon_store_reg(tcg_ctx, rm, n, tmp2); + neon_store_reg(tcg_ctx, rd, n + 1, tmp); + } + } else { + goto elementwise; + } + break; + case NEON_2RM_VUZP: + if (gen_neon_unzip(tcg_ctx, rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VZIP: + if (gen_neon_zip(tcg_ctx, rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN: + /* also VQMOVUN; op field and mnemonics don't line up */ + if (rm & 1) { + return 1; + } + tmp2 = NULL; + for (pass = 0; pass < 2; pass++) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_neon_narrow_op(tcg_ctx, op == NEON_2RM_VMOVN, q, size, + tmp, tcg_ctx->cpu_V0); + if (pass == 0) { + tmp2 = tmp; + } else { + neon_store_reg(tcg_ctx, rd, 0, tmp2); + neon_store_reg(tcg_ctx, rd, 1, tmp); + } + } + break; + case NEON_2RM_VSHLL: + if (q || (rd & 1)) { + return 1; + } + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 1) + tmp = tmp2; + gen_neon_widen(tcg_ctx, tcg_ctx->cpu_V0, tmp, size, 1); + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 8 << size); + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + break; + case NEON_2RM_VCVT_F16_F32: + { + TCGv_ptr fpst; + TCGv_i32 ahp; + + if (!dc_isar_feature(aa32_fp16_spconv, s) || + q || (rm & 1)) { + return 1; + } + fpst = get_fpstatus_ptr(tcg_ctx, true); + ahp = get_ahp_flag(tcg_ctx); + tmp = neon_load_reg(tcg_ctx, rm, 0); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tmp, fpst, ahp); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp2, tmp2, fpst, ahp); + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tmp = neon_load_reg(tcg_ctx, rm, 2); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tmp, fpst, ahp); + tmp3 = neon_load_reg(tcg_ctx, rm, 3); + neon_store_reg(tcg_ctx, rd, 0, tmp2); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp3, tmp3, fpst, ahp); + tcg_gen_shli_i32(tcg_ctx, tmp3, tmp3, 16); + tcg_gen_or_i32(tcg_ctx, tmp3, tmp3, tmp); + neon_store_reg(tcg_ctx, rd, 1, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, ahp); + tcg_temp_free_ptr(tcg_ctx, fpst); + break; + } + case NEON_2RM_VCVT_F32_F16: + { + TCGv_ptr fpst; + TCGv_i32 ahp; + if (!dc_isar_feature(aa32_fp16_spconv, s) || + q || (rd & 1)) { + return 1; + } + fpst = get_fpstatus_ptr(tcg_ctx, true); + ahp = get_ahp_flag(tcg_ctx); + tmp3 = tcg_temp_new_i32(tcg_ctx); + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp3, tmp3, fpst, ahp); + neon_store_reg(tcg_ctx, rd, 0, tmp3); + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp, tmp, fpst, ahp); + neon_store_reg(tcg_ctx, rd, 1, tmp); + tmp3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp2); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp3, tmp3, fpst, ahp); + neon_store_reg(tcg_ctx, rd, 2, tmp3); + tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tmp2, tmp2, fpst, ahp); + neon_store_reg(tcg_ctx, rd, 3, tmp2); + tcg_temp_free_i32(tcg_ctx, ahp); + tcg_temp_free_ptr(tcg_ctx, fpst); + break; + } + case NEON_2RM_AESE: case NEON_2RM_AESMC: + if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) { + return 1; + } + ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); + ptr2 = vfp_reg_ptr(tcg_ctx, true, rm); + + /* Bit 6 is the lowest opcode bit; it distinguishes between + * encryption (AESE/AESMC) and decryption (AESD/AESIMC) + */ + tmp3 = tcg_const_i32(tcg_ctx, extract32(insn, 6, 1)); + + if (op == NEON_2RM_AESE) { + gen_helper_crypto_aese(tcg_ctx, ptr1, ptr2, tmp3); + } else { + gen_helper_crypto_aesmc(tcg_ctx, ptr1, ptr2, tmp3); + } + tcg_temp_free_ptr(tcg_ctx, ptr1); + tcg_temp_free_ptr(tcg_ctx, ptr2); + tcg_temp_free_i32(tcg_ctx, tmp3); + break; + case NEON_2RM_SHA1H: + if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) { + return 1; + } + ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); + ptr2 = vfp_reg_ptr(tcg_ctx, true, rm); + + gen_helper_crypto_sha1h(tcg_ctx, ptr1, ptr2); + + tcg_temp_free_ptr(tcg_ctx, ptr1); + tcg_temp_free_ptr(tcg_ctx, ptr2); + break; + case NEON_2RM_SHA1SU1: + if ((rm | rd) & 1) { + return 1; + } + /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */ + if (q) { + if (!dc_isar_feature(aa32_sha2, s)) { + return 1; + } + } else if (!dc_isar_feature(aa32_sha1, s)) { + return 1; + } + ptr1 = vfp_reg_ptr(tcg_ctx, true, rd); + ptr2 = vfp_reg_ptr(tcg_ctx, true, rm); + if (q) { + gen_helper_crypto_sha256su0(tcg_ctx, ptr1, ptr2); + } else { + gen_helper_crypto_sha1su1(tcg_ctx, ptr1, ptr2); + } + tcg_temp_free_ptr(tcg_ctx, ptr1); + tcg_temp_free_ptr(tcg_ctx, ptr2); + break; + + case NEON_2RM_VMVN: + tcg_gen_gvec_not(tcg_ctx, 0, rd_ofs, rm_ofs, vec_size, vec_size); + break; + case NEON_2RM_VNEG: + tcg_gen_gvec_neg(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); + break; + case NEON_2RM_VABS: + tcg_gen_gvec_abs(tcg_ctx, size, rd_ofs, rm_ofs, vec_size, vec_size); + break; + + default: + elementwise: + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tmp = neon_load_reg(tcg_ctx, rm, pass); + switch (op) { + case NEON_2RM_VREV32: + switch (size) { + case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; + case 1: gen_swap_half(tcg_ctx, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VREV16: + gen_rev16(tcg_ctx, tmp, tmp); + break; + case NEON_2RM_VCLS: + switch (size) { + case 0: gen_helper_neon_cls_s8(tcg_ctx, tmp, tmp); break; + case 1: gen_helper_neon_cls_s16(tcg_ctx, tmp, tmp); break; + case 2: gen_helper_neon_cls_s32(tcg_ctx, tmp, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VCLZ: + switch (size) { + case 0: gen_helper_neon_clz_u8(tcg_ctx, tmp, tmp); break; + case 1: gen_helper_neon_clz_u16(tcg_ctx, tmp, tmp); break; + case 2: tcg_gen_clzi_i32(tcg_ctx, tmp, tmp, 32); break; + default: abort(); + } + break; + case NEON_2RM_VCNT: + gen_helper_neon_cnt_u8(tcg_ctx, tmp, tmp); + break; + case NEON_2RM_VQABS: + switch (size) { + case 0: + gen_helper_neon_qabs_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 1: + gen_helper_neon_qabs_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 2: + gen_helper_neon_qabs_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + default: abort(); + } + break; + case NEON_2RM_VQNEG: + switch (size) { + case 0: + gen_helper_neon_qneg_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 1: + gen_helper_neon_qneg_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 2: + gen_helper_neon_qneg_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + default: abort(); + } + break; + case NEON_2RM_VCGT0: case NEON_2RM_VCLE0: + tmp2 = tcg_const_i32(tcg_ctx, 0); + switch(size) { + case 0: gen_helper_neon_cgt_s8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_cgt_s16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_cgt_s32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (op == NEON_2RM_VCLE0) { + tcg_gen_not_i32(tcg_ctx, tmp, tmp); + } + break; + case NEON_2RM_VCGE0: case NEON_2RM_VCLT0: + tmp2 = tcg_const_i32(tcg_ctx, 0); + switch(size) { + case 0: gen_helper_neon_cge_s8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_cge_s16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_cge_s32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (op == NEON_2RM_VCLT0) { + tcg_gen_not_i32(tcg_ctx, tmp, tmp); + } + break; + case NEON_2RM_VCEQ0: + tmp2 = tcg_const_i32(tcg_ctx, 0); + switch(size) { + case 0: gen_helper_neon_ceq_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_ceq_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_ceq_u32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case NEON_2RM_VCGT0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCGE0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCEQ0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCLE0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCLT0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VABS_F: + gen_helper_vfp_abss(tcg_ctx, tmp, tmp); + break; + case NEON_2RM_VNEG_F: + gen_helper_vfp_negs(tcg_ctx, tmp, tmp); + break; + case NEON_2RM_VSWP: + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + neon_store_reg(tcg_ctx, rm, pass, tmp2); + break; + case NEON_2RM_VTRN: + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + switch (size) { + case 0: gen_neon_trn_u8(tcg_ctx, tmp, tmp2); break; + case 1: gen_neon_trn_u16(tcg_ctx, tmp, tmp2); break; + default: abort(); + } + neon_store_reg(tcg_ctx, rm, pass, tmp2); + break; + case NEON_2RM_VRINTN: + case NEON_2RM_VRINTA: + case NEON_2RM_VRINTM: + case NEON_2RM_VRINTP: + case NEON_2RM_VRINTZ: + { + TCGv_i32 tcg_rmode; + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + int rmode; + + if (op == NEON_2RM_VRINTZ) { + rmode = FPROUNDING_ZERO; + } else { + rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1]; + } + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + gen_helper_rints(tcg_ctx, tmp, tmp, fpstatus); + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + break; + } + case NEON_2RM_VRINTX: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_rints_exact(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCVTAU: + case NEON_2RM_VCVTAS: + case NEON_2RM_VCVTNU: + case NEON_2RM_VCVTNS: + case NEON_2RM_VCVTPU: + case NEON_2RM_VCVTPS: + case NEON_2RM_VCVTMU: + case NEON_2RM_VCVTMS: + { + bool is_signed = !extract32(insn, 7, 1); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx, 1); + TCGv_i32 tcg_rmode, tcg_shift; + int rmode = fp_decode_rm[extract32(insn, 8, 2)]; + + tcg_shift = tcg_const_i32(tcg_ctx, 0); + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + + if (is_signed) { + gen_helper_vfp_tosls(tcg_ctx, tmp, tmp, + tcg_shift, fpst); + } else { + gen_helper_vfp_touls(tcg_ctx, tmp, tmp, + tcg_shift, fpst); + } + + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + tcg_temp_free_ptr(tcg_ctx, fpst); + break; + } + case NEON_2RM_VRECPE: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_recpe_u32(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VRSQRTE: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_rsqrte_u32(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VRECPE_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_recpe_f32(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VRSQRTE_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_rsqrte_f32(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_sitos(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_uitos(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_tosizs(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(tcg_ctx, 1); + gen_helper_vfp_touizs(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + default: + /* Reserved op values were caught by the + * neon_2rm_sizes[] check earlier. + */ + abort(); + } + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + break; + } + } else if ((insn & (1 << 10)) == 0) { + /* VTBL, VTBX. */ + int n = ((insn >> 8) & 3) + 1; + if ((rn + n) > 32) { + /* This is UNPREDICTABLE; we choose to UNDEF to avoid the + * helper function running off the end of the register file. + */ + return 1; + } + n <<= 3; + if (insn & (1 << 6)) { + tmp = neon_load_reg(tcg_ctx, rd, 0); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } + tmp2 = neon_load_reg(tcg_ctx, rm, 0); + ptr1 = vfp_reg_ptr(tcg_ctx, true, rn); + tmp5 = tcg_const_i32(tcg_ctx, n); + gen_helper_neon_tbl(tcg_ctx, tmp2, tmp2, tmp, ptr1, tmp5); + tcg_temp_free_i32(tcg_ctx, tmp); + if (insn & (1 << 6)) { + tmp = neon_load_reg(tcg_ctx, rd, 1); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } + tmp3 = neon_load_reg(tcg_ctx, rm, 1); + gen_helper_neon_tbl(tcg_ctx, tmp3, tmp3, tmp, ptr1, tmp5); + tcg_temp_free_i32(tcg_ctx, tmp5); + tcg_temp_free_ptr(tcg_ctx, ptr1); + neon_store_reg(tcg_ctx, rd, 0, tmp2); + neon_store_reg(tcg_ctx, rd, 1, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp); + } else if ((insn & 0x380) == 0) { + /* VDUP */ + int element; + MemOp size; + + if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { + return 1; + } + if (insn & (1 << 16)) { + size = MO_8; + element = (insn >> 17) & 7; + } else if (insn & (1 << 17)) { + size = MO_16; + element = (insn >> 18) & 3; + } else { + size = MO_32; + element = (insn >> 19) & 1; + } + tcg_gen_gvec_dup_mem(tcg_ctx, size, neon_reg_offset(rd, 0), + neon_element_offset(rm, element, size), + q ? 16 : 8, q ? 16 : 8); + } else { + return 1; + } + } + } + return 0; +} + +/* Advanced SIMD three registers of the same length extension. + * 31 25 23 22 20 16 12 11 10 9 8 3 0 + * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+ + * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm | + * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+ + */ +static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_gvec_3 *fn_gvec = NULL; + gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL; + int rd, rn, rm, opr_sz; + int data = 0; + int off_rn, off_rm; + bool is_long = false, q = extract32(insn, 6, 1); + bool ptr_is_env = false; + + if ((insn & 0xfe200f10) == 0xfc200800) { + /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */ + int size = extract32(insn, 20, 1); + data = extract32(insn, 23, 2); /* rot */ + if (!dc_isar_feature(aa32_vcma, s) + || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { + return 1; + } + fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah; + } else if ((insn & 0xfea00f10) == 0xfc800800) { + /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */ + int size = extract32(insn, 20, 1); + data = extract32(insn, 24, 1); /* rot */ + if (!dc_isar_feature(aa32_vcma, s) + || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { + return 1; + } + fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh; + } else if ((insn & 0xfeb00f00) == 0xfc200d00) { + /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */ + bool u = extract32(insn, 4, 1); + if (!dc_isar_feature(aa32_dp, s)) { + return 1; + } + fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b; + } else if ((insn & 0xff300f10) == 0xfc200810) { + /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */ + int is_s = extract32(insn, 23, 1); + if (!dc_isar_feature(aa32_fhm, s)) { + return 1; + } + is_long = true; + data = is_s; /* is_2 == 0 */ + fn_gvec_ptr = gen_helper_gvec_fmlal_a32; + ptr_is_env = true; + } else { + return 1; + } + + VFP_DREG_D(rd, insn); + if (rd & q) { + return 1; + } + if (q || !is_long) { + VFP_DREG_N(rn, insn); + VFP_DREG_M(rm, insn); + if ((rn | rm) & q & !is_long) { + return 1; + } + off_rn = vfp_reg_offset(1, rn); + off_rm = vfp_reg_offset(1, rm); + } else { + rn = VFP_SREG_N(insn); + rm = VFP_SREG_M(insn); + off_rn = vfp_reg_offset(0, rn); + off_rm = vfp_reg_offset(0, rm); + } + + if (s->fp_excp_el) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); + return 0; + } + if (!s->vfp_enabled) { + return 1; + } + + opr_sz = (1 + q) * 8; + if (fn_gvec_ptr) { + TCGv_ptr ptr; + if (ptr_is_env) { + ptr = tcg_ctx->cpu_env; + } else { + ptr = get_fpstatus_ptr(tcg_ctx, 1); + } + tcg_gen_gvec_3_ptr(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, ptr, + opr_sz, opr_sz, data, fn_gvec_ptr); + if (!ptr_is_env) { + tcg_temp_free_ptr(tcg_ctx, ptr); + } + } else { + tcg_gen_gvec_3_ool(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, + opr_sz, opr_sz, data, fn_gvec); + } + return 0; +} + +/* Advanced SIMD two registers and a scalar extension. + * 31 24 23 22 20 16 12 11 10 9 8 3 0 + * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+ + * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm | + * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+ + * + */ + +static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_gvec_3 *fn_gvec = NULL; + gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL; + int rd, rn, rm, opr_sz, data; + int off_rn, off_rm; + bool is_long = false, q = extract32(insn, 6, 1); + bool ptr_is_env = false; + + if ((insn & 0xff000f10) == 0xfe000800) { + /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */ + int rot = extract32(insn, 20, 2); + int size = extract32(insn, 23, 1); + int index; + + if (!dc_isar_feature(aa32_vcma, s)) { + return 1; + } + if (size == 0) { + if (!dc_isar_feature(aa32_fp16_arith, s)) { + return 1; + } + /* For fp16, rm is just Vm, and index is M. */ + rm = extract32(insn, 0, 4); + index = extract32(insn, 5, 1); + } else { + /* For fp32, rm is the usual M:Vm, and index is 0. */ + VFP_DREG_M(rm, insn); + index = 0; + } + data = (index << 2) | rot; + fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx + : gen_helper_gvec_fcmlah_idx); + } else if ((insn & 0xffb00f00) == 0xfe200d00) { + /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */ + int u = extract32(insn, 4, 1); + + if (!dc_isar_feature(aa32_dp, s)) { + return 1; + } + fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b; + /* rm is just Vm, and index is M. */ + data = extract32(insn, 5, 1); /* index */ + rm = extract32(insn, 0, 4); + } else if ((insn & 0xffa00f10) == 0xfe000810) { + /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */ + int is_s = extract32(insn, 20, 1); + int vm20 = extract32(insn, 0, 3); + int vm3 = extract32(insn, 3, 1); + int m = extract32(insn, 5, 1); + int index; + + if (!dc_isar_feature(aa32_fhm, s)) { + return 1; + } + if (q) { + rm = vm20; + index = m * 2 + vm3; + } else { + rm = vm20 * 2 + m; + index = vm3; + } + is_long = true; + data = (index << 2) | is_s; /* is_2 == 0 */ + fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32; + ptr_is_env = true; + } else { + return 1; + } + + VFP_DREG_D(rd, insn); + if (rd & q) { + return 1; + } + if (q || !is_long) { + VFP_DREG_N(rn, insn); + if (rn & q & !is_long) { + return 1; + } + off_rn = vfp_reg_offset(1, rn); + off_rm = vfp_reg_offset(1, rm); + } else { + rn = VFP_SREG_N(insn); + off_rn = vfp_reg_offset(0, rn); + off_rm = vfp_reg_offset(0, rm); + } + if (s->fp_excp_el) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); + return 0; + } + if (!s->vfp_enabled) { + return 1; + } + + opr_sz = (1 + q) * 8; + if (fn_gvec_ptr) { + TCGv_ptr ptr; + if (ptr_is_env) { + ptr = tcg_ctx->cpu_env; + } else { + ptr = get_fpstatus_ptr(tcg_ctx, 1); + } + tcg_gen_gvec_3_ptr(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, ptr, + opr_sz, opr_sz, data, fn_gvec_ptr); + if (!ptr_is_env) { + tcg_temp_free_ptr(tcg_ctx, ptr); + } + } else { + tcg_gen_gvec_3_ool(tcg_ctx, vfp_reg_offset(1, rd), off_rn, off_rm, + opr_sz, opr_sz, data, fn_gvec); + } + return 0; +} + +static int disas_coproc_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2; + const ARMCPRegInfo *ri; + + cpnum = (insn >> 8) & 0xf; + + /* First check for coprocessor space used for XScale/iwMMXt insns */ + if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) { + if (extract32(s->c15_cpar, cpnum, 1) == 0) { + return 1; + } + if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { + return disas_iwmmxt_insn(s, insn); + } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) { + return disas_dsp_insn(s, insn); + } + return 1; + } + + /* Otherwise treat as a generic register access */ + is64 = (insn & (1 << 25)) == 0; + if (!is64 && ((insn & (1 << 4)) == 0)) { + /* cdp */ + return 1; + } + + crm = insn & 0xf; + if (is64) { + crn = 0; + opc1 = (insn >> 4) & 0xf; + opc2 = 0; + rt2 = (insn >> 16) & 0xf; + } else { + crn = (insn >> 16) & 0xf; + opc1 = (insn >> 21) & 7; + opc2 = (insn >> 5) & 7; + rt2 = 0; + } + isread = (insn >> 20) & 1; + rt = (insn >> 12) & 0xf; + + ri = get_arm_cp_reginfo(s->cp_regs, + ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2)); + if (ri) { + bool need_exit_tb; + + /* Check access permissions */ + if (!cp_access_ok(s->current_el, ri, isread)) { + return 1; + } + + if (s->hstr_active || ri->accessfn || + (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) { + /* Emit code to perform further access permissions checks at + * runtime; this may result in an exception. + * Note that on XScale all cp0..c13 registers do an access check + * call in order to handle c15_cpar. + */ + TCGv_ptr tmpptr; + TCGv_i32 tcg_syn, tcg_isread; + uint32_t syndrome; + + /* Note that since we are an implementation which takes an + * exception on a trapped conditional instruction only if the + * instruction passes its condition code check, we can take + * advantage of the clause in the ARM ARM that allows us to set + * the COND field in the instruction to 0xE in all cases. + * We could fish the actual condition out of the insn (ARM) + * or the condexec bits (Thumb) but it isn't necessary. + */ + switch (cpnum) { + case 14: + if (is64) { + syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2, + isread, false); + } else { + syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm, + rt, isread, false); + } + break; + case 15: + if (is64) { + syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2, + isread, false); + } else { + syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm, + rt, isread, false); + } + break; + default: + /* ARMv8 defines that only coprocessors 14 and 15 exist, + * so this can only happen if this is an ARMv7 or earlier CPU, + * in which case the syndrome information won't actually be + * guest visible. + */ + assert(!arm_dc_feature(s, ARM_FEATURE_V8)); + syndrome = syn_uncategorized(); + break; + } + + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + tcg_isread = tcg_const_i32(tcg_ctx, isread); + gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn, + tcg_isread); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + tcg_temp_free_i32(tcg_ctx, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_isread); + } else if (ri->type & ARM_CP_RAISES_EXC) { + /* + * The readfn or writefn might raise an exception; + * synchronize the CPU state in case it does. + */ + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + } + + /* Handle special cases first */ + switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { + case ARM_CP_NOP: + return 0; + case ARM_CP_WFI: + if (isread) { + return 1; + } + gen_set_pc_im(s, s->base.pc_next); + s->base.is_jmp = DISAS_WFI; + return 0; + default: + break; + } + + if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { + // gen_io_start(tcg_ctx); + } + + if (isread) { + /* Read */ + if (is64) { + TCGv_i64 tmp64; + TCGv_i32 tmp; + if (ri->type & ARM_CP_CONST) { + tmp64 = tcg_const_i64(tcg_ctx, ri->resetvalue); + } else if (ri->readfn) { + TCGv_ptr tmpptr; + tmp64 = tcg_temp_new_i64(tcg_ctx); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_get_cp_reg64(tcg_ctx, tmp64, tcg_ctx->cpu_env, tmpptr); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tmp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); + } + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, tmp64); + store_reg(s, rt, tmp); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrh_i64_i32(tcg_ctx, tmp, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + store_reg(s, rt2, tmp); + } else { + TCGv_i32 tmp; + if (ri->type & ARM_CP_CONST) { + tmp = tcg_const_i32(tcg_ctx, ri->resetvalue); + } else if (ri->readfn) { + TCGv_ptr tmpptr; + tmp = tcg_temp_new_i32(tcg_ctx); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_get_cp_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmpptr); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tmp = load_cpu_offset(tcg_ctx, ri->fieldoffset); + } + if (rt == 15) { + /* Destination register of r15 for 32 bit loads sets + * the condition codes from the high 4 bits of the value + */ + gen_set_nzcv(tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + store_reg(s, rt, tmp); + } + } + } else { + /* Write */ + if (ri->type & ARM_CP_CONST) { + /* If not forbidden by access permissions, treat as WI */ + return 0; + } + + if (is64) { + TCGv_i32 tmplo, tmphi; + TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); + tmplo = load_reg(s, rt); + tmphi = load_reg(s, rt2); + tcg_gen_concat_i32_i64(tcg_ctx, tmp64, tmplo, tmphi); + tcg_temp_free_i32(tcg_ctx, tmplo); + tcg_temp_free_i32(tcg_ctx, tmphi); + if (ri->writefn) { + TCGv_ptr tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_set_cp_reg64(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp64); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tcg_gen_st_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); + } + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + if (ri->writefn) { + TCGv_i32 tmp; + TCGv_ptr tmpptr; + tmp = load_reg(s, rt); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_set_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + TCGv_i32 tmp = load_reg(s, rt); + store_cpu_offset(tcg_ctx, tmp, ri->fieldoffset); + } + } + } + + /* I/O operations must end the TB here (whether read or write) */ + need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && + (ri->type & ARM_CP_IO)); + + if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { + /* + * A write to any coprocessor register that ends a TB + * must rebuild the hflags for the next TB. + */ + TCGv_i32 tcg_el = tcg_const_i32(tcg_ctx, s->current_el); + if (arm_dc_feature(s, ARM_FEATURE_M)) { + gen_helper_rebuild_hflags_m32(tcg_ctx, tcg_ctx->cpu_env, tcg_el); + } else { + if (ri->type & ARM_CP_NEWEL) { + gen_helper_rebuild_hflags_a32_newel(tcg_ctx, tcg_ctx->cpu_env); + } else { + gen_helper_rebuild_hflags_a32(tcg_ctx, tcg_ctx->cpu_env, tcg_el); + } + } + tcg_temp_free_i32(tcg_ctx, tcg_el); + /* + * We default to ending the TB on a coprocessor register write, + * but allow this to be suppressed by the register definition + * (usually only necessary to work around guest bugs). + */ + need_exit_tb = true; + } + if (need_exit_tb) { + gen_lookup_tb(s); + } + + return 0; + } + + /* Unknown register; this might be a guest error or a QEMU + * unimplemented feature. + */ + if (is64) { + qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " + "64 bit system register cp:%d opc1: %d crm:%d " + "(%s)\n", + isread ? "read" : "write", cpnum, opc1, crm, + s->ns ? "non-secure" : "secure"); + } else { + qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " + "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d " + "(%s)\n", + isread ? "read" : "write", cpnum, opc1, crn, crm, opc2, + s->ns ? "non-secure" : "secure"); + } + + return 1; +} + + +/* Store a 64-bit value to a register pair. Clobbers val. */ +static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, tmp, val); + store_reg(s, rlow, tmp); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrh_i64_i32(tcg_ctx, tmp, val); + store_reg(s, rhigh, tmp); +} + +/* load and add a 64-bit value from a register pair. */ +static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp; + TCGv_i32 tmpl; + TCGv_i32 tmph; + + /* Load 64-bit value rd:rn. */ + tmpl = load_reg(s, rlow); + tmph = load_reg(s, rhigh); + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, tmp, tmpl, tmph); + tcg_temp_free_i32(tcg_ctx, tmpl); + tcg_temp_free_i32(tcg_ctx, tmph); + tcg_gen_add_i64(tcg_ctx, val, val, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* Set N and Z flags from hi|lo. */ +static void gen_logicq_cc(TCGContext *tcg_ctx, TCGv_i32 lo, TCGv_i32 hi) +{ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, hi); + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, lo, hi); +} + +/* Load/Store exclusive instructions are implemented by remembering + the value/address loaded, and seeing if these are the same + when the store is performed. This should be sufficient to implement + the architecturally mandated semantics, and avoids having to monitor + regular stores. The compare vs the remembered value is done during + the cmpxchg operation, but we must compare the addresses manually. */ +static void gen_load_exclusive(DisasContext *s, int rt, int rt2, + TCGv_i32 addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + MemOp opc = size | MO_ALIGN | s->be_data; + + s->is_ldex = true; + + if (size == 3) { + TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + /* For AArch32, architecturally the 32-bit word at the lowest + * address is always Rt and the one at addr+4 is Rt2, even if + * the CPU is big-endian. That means we don't want to do a + * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if + * for an architecturally 64-bit access, but instead do a + * 64-bit access using MO_BE if appropriate and then split + * the two halves. + * This only makes a difference for BE32 user-mode, where + * frob64() must not flip the two halves of the 64-bit data + * but this code must treat BE32 user-mode like BE32 system. + */ + TCGv taddr = gen_aa32_addr(s, addr, opc); + + tcg_gen_qemu_ld_i64(tcg_ctx, t64, taddr, get_mem_index(s), opc); + tcg_temp_free(tcg_ctx, taddr); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, t64); + if (s->be_data == MO_BE) { + tcg_gen_extr_i64_i32(tcg_ctx, tmp2, tmp, t64); + } else { + tcg_gen_extr_i64_i32(tcg_ctx, tmp, tmp2, t64); + } + tcg_temp_free_i64(tcg_ctx, t64); + + store_reg(s, rt2, tmp2); + } else { + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp); + } + + store_reg(s, rt, tmp); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr); +} + +static void gen_clrex(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); +} + +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, + TCGv_i32 addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1, t2; + TCGv_i64 extaddr; + TCGv taddr; + TCGLabel *done_label; + TCGLabel *fail_label; + MemOp opc = size | MO_ALIGN | s->be_data; + + /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { + [addr] = {Rt}; + {Rd} = 0; + } else { + {Rd} = 1; + } */ + fail_label = gen_new_label(tcg_ctx); + done_label = gen_new_label(tcg_ctx); + extaddr = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, extaddr, addr); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, extaddr, tcg_ctx->cpu_exclusive_addr, fail_label); + tcg_temp_free_i64(tcg_ctx, extaddr); + + taddr = gen_aa32_addr(s, addr, opc); + t0 = tcg_temp_new_i32(tcg_ctx); + t1 = load_reg(s, rt); + if (size == 3) { + TCGv_i64 o64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 n64 = tcg_temp_new_i64(tcg_ctx); + + t2 = load_reg(s, rt2); + /* For AArch32, architecturally the 32-bit word at the lowest + * address is always Rt and the one at addr+4 is Rt2, even if + * the CPU is big-endian. Since we're going to treat this as a + * single 64-bit BE store, we need to put the two halves in the + * opposite order for BE to LE, so that they end up in the right + * places. + * We don't want gen_aa32_frob64() because that does the wrong + * thing for BE32 usermode. + */ + if (s->be_data == MO_BE) { + tcg_gen_concat_i32_i64(tcg_ctx, n64, t2, t1); + } else { + tcg_gen_concat_i32_i64(tcg_ctx, n64, t1, t2); + } + tcg_temp_free_i32(tcg_ctx, t2); + + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, o64, taddr, tcg_ctx->cpu_exclusive_val, n64, + get_mem_index(s), opc); + tcg_temp_free_i64(tcg_ctx, n64); + + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, o64, o64, tcg_ctx->cpu_exclusive_val); + tcg_gen_extrl_i64_i32(tcg_ctx, t0, o64); + + tcg_temp_free_i64(tcg_ctx, o64); + } else { + t2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t2, tcg_ctx->cpu_exclusive_val); + tcg_gen_atomic_cmpxchg_i32(tcg_ctx, t0, taddr, t2, t1, get_mem_index(s), opc); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, t0, t0, t2); + tcg_temp_free_i32(tcg_ctx, t2); + } + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, taddr); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[rd], t0); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_gen_br(tcg_ctx, done_label); + + gen_set_label(tcg_ctx, fail_label); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[rd], 1); + gen_set_label(tcg_ctx, done_label); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); +} + +/* gen_srs: + * @env: CPUARMState + * @s: DisasContext + * @mode: mode field from insn (which stack to store to) + * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn + * @writeback: true if writeback bit set + * + * Generate code for the SRS (Store Return State) insn. + */ +static void gen_srs(DisasContext *s, + uint32_t mode, uint32_t amode, bool writeback) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int32_t offset; + TCGv_i32 addr, tmp; + bool undef = false; + + /* SRS is: + * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1 + * and specified mode is monitor mode + * - UNDEFINED in Hyp mode + * - UNPREDICTABLE in User or System mode + * - UNPREDICTABLE if the specified mode is: + * -- not implemented + * -- not a valid mode number + * -- a mode that's at a higher exception level + * -- Monitor, if we are Non-secure + * For the UNPREDICTABLE cases we choose to UNDEF. + */ + if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3); + return; + } + + if (s->current_el == 0 || s->current_el == 2) { + undef = true; + } + + switch (mode) { + case ARM_CPU_MODE_USR: + case ARM_CPU_MODE_FIQ: + case ARM_CPU_MODE_IRQ: + case ARM_CPU_MODE_SVC: + case ARM_CPU_MODE_ABT: + case ARM_CPU_MODE_UND: + case ARM_CPU_MODE_SYS: + break; + case ARM_CPU_MODE_HYP: + if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) { + undef = true; + } + break; + case ARM_CPU_MODE_MON: + /* No need to check specifically for "are we non-secure" because + * we've already made EL0 UNDEF and handled the trap for S-EL1; + * so if this isn't EL3 then we must be non-secure. + */ + if (s->current_el != 3) { + undef = true; + } + break; + default: + undef = true; + } + + if (undef) { + unallocated_encoding(s); + return; + } + + addr = tcg_temp_new_i32(tcg_ctx); + tmp = tcg_const_i32(tcg_ctx, mode); + /* get_r13_banked() will raise an exception if called from System mode */ + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + gen_helper_get_r13_banked(tcg_ctx, addr, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + switch (amode) { + case 0: /* DA */ + offset = -4; + break; + case 1: /* IA */ + offset = 0; + break; + case 2: /* DB */ + offset = -8; + break; + case 3: /* IB */ + offset = 4; + break; + default: + abort(); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + tmp = load_reg(s, 14); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + tmp = load_cpu_field(tcg_ctx, spsr); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + if (writeback) { + switch (amode) { + case 0: + offset = -8; + break; + case 1: + offset = 4; + break; + case 2: + offset = -4; + break; + case 3: + offset = 0; + break; + default: + abort(); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + tmp = tcg_const_i32(tcg_ctx, mode); + gen_helper_set_r13_banked(tcg_ctx, tcg_ctx->cpu_env, tmp, addr); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + s->base.is_jmp = DISAS_UPDATE; +} + +/* Generate a label used for skipping this instruction */ +static void arm_gen_condlabel(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!s->condjmp) { + s->condlabel = gen_new_label(tcg_ctx); + s->condjmp = 1; + } +} + +/* Skip this instruction if the ARM condition is false */ +static void arm_skip_unless(DisasContext *s, uint32_t cond) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + arm_gen_condlabel(s); + arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); +} + + +/* + * Constant expanders for the decoders. + */ + +static int negate(DisasContext *s, int x) +{ + return -x; +} + +static int plus_2(DisasContext *s, int x) +{ + return x + 2; +} + +static int times_2(DisasContext *s, int x) +{ + return x * 2; +} + +static int times_4(DisasContext *s, int x) +{ + return x * 4; +} + +/* Return only the rotation part of T32ExpandImm. */ +static int t32_expandimm_rot(DisasContext *s, int x) +{ + return x & 0xc00 ? extract32(x, 7, 5) : 0; +} + +/* Return the unrotated immediate from T32ExpandImm. */ +static int t32_expandimm_imm(DisasContext *s, int x) +{ + int imm = extract32(x, 0, 8); + + switch (extract32(x, 8, 4)) { + case 0: /* XY */ + /* Nothing to do. */ + break; + case 1: /* 00XY00XY */ + imm *= 0x00010001; + break; + case 2: /* XY00XY00 */ + imm *= 0x01000100; + break; + case 3: /* XYXYXYXY */ + imm *= 0x01010101; + break; + default: + /* Rotated constant. */ + imm |= 0x80; + break; + } + return imm; +} + +static int t32_branch24(DisasContext *s, int x) +{ + /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */ + x ^= !(x < 0) * (3 << 21); + /* Append the final zero. */ + return x << 1; +} + +static int t16_setflags(DisasContext *s) +{ + return s->condexec_mask == 0; +} + +static int t16_push_list(DisasContext *s, int x) +{ + return (x & 0xff) | (x & 0x100) << (14 - 8); +} + +static int t16_pop_list(DisasContext *s, int x) +{ + return (x & 0xff) | (x & 0x100) << (15 - 8); +} + +/* + * Include the generated decoders. + */ + +#include "decode-a32.inc.c" +#include "decode-a32-uncond.inc.c" +#include "decode-t32.inc.c" +#include "decode-t16.inc.c" + +/* Helpers to swap operands for reverse-subtract. */ +static void gen_rsb(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_sub_i32(tcg_ctx, dst, b, a); +} + +static void gen_rsb_CC(TCGContext *tcg_ctx, TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b) +{ + gen_sub_CC(tcg_ctx, dst, b, a); +} + +static void gen_rsc(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b) +{ + gen_sub_carry(tcg_ctx, dest, b, a); +} + +static void gen_rsc_CC(TCGContext *tcg_ctx, TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b) +{ + gen_sbc_CC(tcg_ctx, dest, b, a); +} + +/* + * Helpers for the data processing routines. + * + * After the computation store the results back. + * This may be suppressed altogether (STREG_NONE), require a runtime + * check against the stack limits (STREG_SP_CHECK), or generate an + * exception return. Oh, or store into a register. + * + * Always return true, indicating success for a trans_* function. + */ +typedef enum { + STREG_NONE, + STREG_NORMAL, + STREG_SP_CHECK, + STREG_EXC_RET, +} StoreRegKind; + +static bool store_reg_kind(DisasContext *s, int rd, + TCGv_i32 val, StoreRegKind kind) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (kind) { + case STREG_NONE: + tcg_temp_free_i32(tcg_ctx, val); + return true; + case STREG_NORMAL: + /* See ALUWritePC: Interworking only from a32 mode. */ + if (s->thumb) { + store_reg(s, rd, val); + } else { + store_reg_bx(s, rd, val); + } + return true; + case STREG_SP_CHECK: + store_sp_checked(s, val); + return true; + case STREG_EXC_RET: + gen_exception_return(s, val); + return true; + } + g_assert_not_reached(); + // never reach here + return true; +} + +/* + * Data Processing (register) + * + * Operate, with set flags, one register source, + * one immediate shifted register source, and a destination. + */ +static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32), + int logic_cc, StoreRegKind kind) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp1, tmp2; + + tmp2 = load_reg(s, a->rm); + gen_arm_shift_im(tcg_ctx, tmp2, a->shty, a->shim, logic_cc); + tmp1 = load_reg(s, a->rn); + + gen(tcg_ctx, tmp1, tmp1, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + + if (logic_cc) { + gen_logic_CC(tcg_ctx, tmp1); + } + return store_reg_kind(s, a->rd, tmp1, kind); +} + +static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32), + int logic_cc, StoreRegKind kind) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + tmp = load_reg(s, a->rm); + gen_arm_shift_im(tcg_ctx, tmp, a->shty, a->shim, logic_cc); + + gen(tcg_ctx, tmp, tmp); + if (logic_cc) { + gen_logic_CC(tcg_ctx, tmp); + } + return store_reg_kind(s, a->rd, tmp, kind); +} + +/* + * Data-processing (register-shifted register) + * + * Operate, with set flags, one register source, + * one register shifted register source, and a destination. + */ +static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32), + int logic_cc, StoreRegKind kind) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp1, tmp2; + + tmp1 = load_reg(s, a->rs); + tmp2 = load_reg(s, a->rm); + gen_arm_shift_reg(tcg_ctx, tmp2, a->shty, tmp1, logic_cc); + tmp1 = load_reg(s, a->rn); + + gen(tcg_ctx, tmp1, tmp1, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + + if (logic_cc) { + gen_logic_CC(tcg_ctx, tmp1); + } + return store_reg_kind(s, a->rd, tmp1, kind); +} + +static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32), + int logic_cc, StoreRegKind kind) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp1, tmp2; + + tmp1 = load_reg(s, a->rs); + tmp2 = load_reg(s, a->rm); + gen_arm_shift_reg(tcg_ctx, tmp2, a->shty, tmp1, logic_cc); + + gen(tcg_ctx, tmp2, tmp2); + if (logic_cc) { + gen_logic_CC(tcg_ctx, tmp2); + } + return store_reg_kind(s, a->rd, tmp2, kind); +} + +/* + * Data-processing (immediate) + * + * Operate, with set flags, one register source, + * one rotated immediate, and a destination. + * + * Note that logic_cc && a->rot setting CF based on the msb of the + * immediate is the reason why we must pass in the unrotated form + * of the immediate. + */ +static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32), + int logic_cc, StoreRegKind kind) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp1, tmp2; + uint32_t imm; + + imm = ror32(a->imm, a->rot); + if (logic_cc && a->rot) { + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, imm >> 31); + } + tmp2 = tcg_const_i32(tcg_ctx, imm); + tmp1 = load_reg(s, a->rn); + + gen(tcg_ctx, tmp1, tmp1, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + + if (logic_cc) { + gen_logic_CC(tcg_ctx, tmp1); + } + return store_reg_kind(s, a->rd, tmp1, kind); +} + +static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32), + int logic_cc, StoreRegKind kind) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + uint32_t imm; + + imm = ror32(a->imm, a->rot); + if (logic_cc && a->rot) { + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, imm >> 31); + } + tmp = tcg_const_i32(tcg_ctx, imm); + + gen(tcg_ctx, tmp, tmp); + if (logic_cc) { + gen_logic_CC(tcg_ctx, tmp); + } + return store_reg_kind(s, a->rd, tmp, kind); +} + +#define DO_ANY3(NAME, OP, L, K) \ + static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \ + { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \ + static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \ + { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \ + static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \ + { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); } + +#define DO_ANY3_unicorn(NAME, OP, L, K) \ + static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \ + { StoreRegKind k = K(s, a->rd, a->rn, &a->s); return op_s_rrr_shi(s, a, OP, L, k); } \ + static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \ + { StoreRegKind k = K(s, a->rd, a->rn, &a->s); return op_s_rrr_shr(s, a, OP, L, k); } \ + static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \ + { StoreRegKind k = K(s, a->rd, a->rn, &a->s); return op_s_rri_rot(s, a, OP, L, k); } + +#define DO_ANY2(NAME, OP, L, K) \ + static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \ + { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \ + static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \ + { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \ + static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \ + { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); } + +#define DO_ANY2_unicorn(NAME, OP, L, K) \ + static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \ + { StoreRegKind k = K(s, a->rd, &a->s); return op_s_rxr_shi(s, a, OP, L, k); } \ + static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \ + { StoreRegKind k = K(s, a->rd, &a->s); return op_s_rxr_shr(s, a, OP, L, k); } \ + static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \ + { StoreRegKind k = K(s, a->rd, &a->s); return op_s_rxi_rot(s, a, OP, L, k); } + +#define DO_CMP2(NAME, OP, L) \ + static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \ + { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \ + static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \ + { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \ + static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \ + { return op_s_rri_rot(s, a, OP, L, STREG_NONE); } + +DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL) +DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL) +DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL) +DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL) + +DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL) +DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL) +DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL) +DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL) + +DO_CMP2(TST, tcg_gen_and_i32, true) +DO_CMP2(TEQ, tcg_gen_xor_i32, true) +DO_CMP2(CMN, gen_add_CC, false) +DO_CMP2(CMP, gen_sub_CC, false) + +DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false, + a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL) + +/* + * Note for the computation of StoreRegKind we return out of the + * middle of the functions that are expanded by DO_ANY3, and that + * we modify a->s via that parameter before it is used by OP. + */ + +static StoreRegKind get_storage3(DisasContext *s, int rd, int rn, int *as) +{ + StoreRegKind ret = STREG_NORMAL; + if (rd == 15 && *as) { + /* + * See ALUExceptionReturn: + * In User mode, UNPREDICTABLE; we choose UNDEF. + * In Hyp mode, UNDEFINED. + */ + if (IS_USER(s) || s->current_el == 2) { + unallocated_encoding(s); + return true; + } + /* There is no writeback of nzcv to PSTATE. */ + *as = 0; + ret = STREG_EXC_RET; + } else if (rd == 13 && rn == 13) { + ret = STREG_SP_CHECK; + } + + return ret; +} + +DO_ANY3_unicorn(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false, get_storage3) + +static StoreRegKind get_storage2(DisasContext *s, int rd, int *as) +{ + StoreRegKind ret = STREG_NORMAL; + if (rd == 15 && *as) { + /* + * See ALUExceptionReturn: + * In User mode, UNPREDICTABLE; we choose UNDEF. + * In Hyp mode, UNDEFINED. + */ + if (IS_USER(s) || s->current_el == 2) { + unallocated_encoding(s); + return true; + } + /* There is no writeback of nzcv to PSTATE. */ + *as = 0; + ret = STREG_EXC_RET; + } else if (rd == 13) { + ret = STREG_SP_CHECK; + } + + return ret; +} + +DO_ANY2_unicorn(MOV, tcg_gen_mov_i32, a->s, get_storage2) + +DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL) + +/* + * ORN is only available with T32, so there is no register-shifted-register + * form of the insn. Using the DO_ANY3 macro would create an unused function. + */ +static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a) +{ + return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL); +} + +static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a) +{ + return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL); +} + +#undef DO_ANY3 +#undef DO_ANY2 +#undef DO_CMP2 + +static bool trans_ADR(DisasContext *s, arg_ri *a) +{ + store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm)); + return true; +} + +static bool trans_MOVW(DisasContext *s, arg_MOVW *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!ENABLE_ARCH_6T2) { + return false; + } + + tmp = tcg_const_i32(tcg_ctx, a->imm); + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_MOVT(DisasContext *s, arg_MOVW *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!ENABLE_ARCH_6T2) { + return false; + } + + tmp = load_reg(s, a->rd); + tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, a->imm << 16); + store_reg(s, a->rd, tmp); + return true; +} + +/* + * Multiply and multiply accumulate + */ + +static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2; + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + tcg_gen_mul_i32(tcg_ctx, t1, t1, t2); + tcg_temp_free_i32(tcg_ctx, t2); + if (add) { + t2 = load_reg(s, a->ra); + tcg_gen_add_i32(tcg_ctx, t1, t1, t2); + tcg_temp_free_i32(tcg_ctx, t2); + } + if (a->s) { + gen_logic_CC(tcg_ctx, t1); + } + store_reg(s, a->rd, t1); + return true; +} + +static bool trans_MUL(DisasContext *s, arg_MUL *a) +{ + return op_mla(s, a, false); +} + +static bool trans_MLA(DisasContext *s, arg_MLA *a) +{ + return op_mla(s, a, true); +} + +static bool trans_MLS(DisasContext *s, arg_MLS *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2; + + if (!ENABLE_ARCH_6T2) { + return false; + } + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + tcg_gen_mul_i32(tcg_ctx, t1, t1, t2); + tcg_temp_free_i32(tcg_ctx, t2); + t2 = load_reg(s, a->ra); + tcg_gen_sub_i32(tcg_ctx, t1, t2, t1); + tcg_temp_free_i32(tcg_ctx, t2); + store_reg(s, a->rd, t1); + return true; +} + +static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1, t2, t3; + + t0 = load_reg(s, a->rm); + t1 = load_reg(s, a->rn); + if (uns) { + tcg_gen_mulu2_i32(tcg_ctx, t0, t1, t0, t1); + } else { + tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); + } + if (add) { + t2 = load_reg(s, a->ra); + t3 = load_reg(s, a->rd); + tcg_gen_add2_i32(tcg_ctx, t0, t1, t0, t1, t2, t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + if (a->s) { + gen_logicq_cc(tcg_ctx, t0, t1); + } + store_reg(s, a->ra, t0); + store_reg(s, a->rd, t1); + return true; +} + +static bool trans_UMULL(DisasContext *s, arg_UMULL *a) +{ + return op_mlal(s, a, true, false); +} + +static bool trans_SMULL(DisasContext *s, arg_SMULL *a) +{ + return op_mlal(s, a, false, false); +} + +static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a) +{ + return op_mlal(s, a, true, true); +} + +static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a) +{ + return op_mlal(s, a, false, true); +} + +static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1, t2, zero; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_6) { + return false; + } + + t0 = load_reg(s, a->rm); + t1 = load_reg(s, a->rn); + tcg_gen_mulu2_i32(tcg_ctx, t0, t1, t0, t1); + zero = tcg_const_i32(tcg_ctx, 0); + t2 = load_reg(s, a->ra); + tcg_gen_add2_i32(tcg_ctx, t0, t1, t0, t1, t2, zero); + tcg_temp_free_i32(tcg_ctx, t2); + t2 = load_reg(s, a->rd); + tcg_gen_add2_i32(tcg_ctx, t0, t1, t0, t1, t2, zero); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, zero); + store_reg(s, a->ra, t0); + store_reg(s, a->rd, t1); + return true; +} + +/* + * Saturating addition and subtraction + */ + +static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_5TE) { + return false; + } + + t0 = load_reg(s, a->rm); + t1 = load_reg(s, a->rn); + if (doub) { + gen_helper_add_saturate(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t1); + } + if (add) { + gen_helper_add_saturate(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + } else { + gen_helper_sub_saturate(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + } + tcg_temp_free_i32(tcg_ctx, t1); + store_reg(s, a->rd, t0); + return true; +} + +#define DO_QADDSUB(NAME, ADD, DOUB) \ +static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ +{ \ + return op_qaddsub(s, a, ADD, DOUB); \ +} + +DO_QADDSUB(QADD, true, false) +DO_QADDSUB(QSUB, false, false) +DO_QADDSUB(QDADD, true, true) +DO_QADDSUB(QDSUB, false, true) + +#undef DO_QADDSUB + +/* + * Halfword multiply and multiply accumulate + */ + +static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, + int add_long, bool nt, bool mt) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1, tl, th; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_5TE) { + return false; + } + + t0 = load_reg(s, a->rn); + t1 = load_reg(s, a->rm); + gen_mulxy(tcg_ctx, t0, t1, nt, mt); + tcg_temp_free_i32(tcg_ctx, t1); + + switch (add_long) { + case 0: + store_reg(s, a->rd, t0); + break; + case 1: + t1 = load_reg(s, a->ra); + gen_helper_add_setq(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + store_reg(s, a->rd, t0); + break; + case 2: + tl = load_reg(s, a->ra); + th = load_reg(s, a->rd); + /* Sign-extend the 32-bit product to 64 bits. */ + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_sari_i32(tcg_ctx, t1, t0, 31); + tcg_gen_add2_i32(tcg_ctx, tl, th, tl, th, t0, t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + store_reg(s, a->ra, tl); + store_reg(s, a->rd, th); + break; + default: + g_assert_not_reached(); + break; + } + return true; +} + +#define DO_SMLAX(NAME, add, nt, mt) \ +static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \ +{ \ + return op_smlaxxx(s, a, add, nt, mt); \ +} + +DO_SMLAX(SMULBB, 0, 0, 0) +DO_SMLAX(SMULBT, 0, 0, 1) +DO_SMLAX(SMULTB, 0, 1, 0) +DO_SMLAX(SMULTT, 0, 1, 1) + +DO_SMLAX(SMLABB, 1, 0, 0) +DO_SMLAX(SMLABT, 1, 0, 1) +DO_SMLAX(SMLATB, 1, 1, 0) +DO_SMLAX(SMLATT, 1, 1, 1) + +DO_SMLAX(SMLALBB, 2, 0, 0) +DO_SMLAX(SMLALBT, 2, 0, 1) +DO_SMLAX(SMLALTB, 2, 1, 0) +DO_SMLAX(SMLALTT, 2, 1, 1) + +#undef DO_SMLAX + +static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1; + + if (!ENABLE_ARCH_5TE) { + return false; + } + + t0 = load_reg(s, a->rn); + t1 = load_reg(s, a->rm); + /* + * Since the nominal result is product<47:16>, shift the 16-bit + * input up by 16 bits, so that the result is at product<63:32>. + */ + if (mt) { + tcg_gen_andi_i32(tcg_ctx, t1, t1, 0xffff0000); + } else { + tcg_gen_shli_i32(tcg_ctx, t1, t1, 16); + } + tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); + tcg_temp_free_i32(tcg_ctx, t0); + if (add) { + t0 = load_reg(s, a->ra); + gen_helper_add_setq(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } + store_reg(s, a->rd, t1); + return true; +} + +#define DO_SMLAWX(NAME, add, mt) \ +static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \ +{ \ + return op_smlawx(s, a, add, mt); \ +} + +DO_SMLAWX(SMULWB, 0, 0) +DO_SMLAWX(SMULWT, 0, 1) +DO_SMLAWX(SMLAWB, 1, 0) +DO_SMLAWX(SMLAWT, 1, 1) + +#undef DO_SMLAWX + +/* + * MSR (immediate) and hints + */ + +static bool trans_YIELD(DisasContext *s, arg_YIELD *a) +{ + /* + * When running single-threaded TCG code, use the helper to ensure that + * the next round-robin scheduled vCPU gets a crack. When running in + * MTTCG we don't generate jumps to the helper as it won't affect the + * scheduling of other vCPUs. + */ + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_set_pc_im(s, s->base.pc_next); + s->base.is_jmp = DISAS_YIELD; + } + return true; +} + +static bool trans_WFE(DisasContext *s, arg_WFE *a) +{ + /* + * When running single-threaded TCG code, use the helper to ensure that + * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we + * just skip this instruction. Currently the SEV/SEVL instructions, + * which are *one* of many ways to wake the CPU from WFE, are not + * implemented so we can't sleep like WFI does. + */ + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_set_pc_im(s, s->base.pc_next); + s->base.is_jmp = DISAS_WFE; + } + return true; +} + +static bool trans_WFI(DisasContext *s, arg_WFI *a) +{ + /* For WFI, halt the vCPU until an IRQ. */ + gen_set_pc_im(s, s->base.pc_next); + s->base.is_jmp = DISAS_WFI; + return true; +} + +static bool trans_NOP(DisasContext *s, arg_NOP *a) +{ + return true; +} + +static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a) +{ + uint32_t val = ror32(a->imm, a->rot * 2); + uint32_t mask = msr_mask(s, a->mask, a->r); + + if (gen_set_psr_im(s, mask, a->r, val)) { + unallocated_encoding(s); + } + return true; +} + +/* + * Cyclic Redundancy Check + */ + +static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2, t3; + + if (!dc_isar_feature(aa32_crc32, s)) { + return false; + } + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + switch (sz) { + case MO_8: + gen_uxtb(t2); + break; + case MO_16: + gen_uxth(t2); + break; + case MO_32: + break; + default: + g_assert_not_reached(); + break; + } + t3 = tcg_const_i32(tcg_ctx, 1 << sz); + if (c) { + gen_helper_crc32c(tcg_ctx, t1, t1, t2, t3); + } else { + gen_helper_crc32(tcg_ctx, t1, t1, t2, t3); + } + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + store_reg(s, a->rd, t1); + return true; +} + +#define DO_CRC32(NAME, c, sz) \ +static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ + { return op_crc32(s, a, c, sz); } + +DO_CRC32(CRC32B, false, MO_8) +DO_CRC32(CRC32H, false, MO_16) +DO_CRC32(CRC32W, false, MO_32) +DO_CRC32(CRC32CB, true, MO_8) +DO_CRC32(CRC32CH, true, MO_16) +DO_CRC32(CRC32CW, true, MO_32) + +#undef DO_CRC32 + +/* + * Miscellaneous instructions + */ + +static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a) +{ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + gen_mrs_banked(s, a->r, a->sysm, a->rd); + return true; +} + +static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a) +{ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + gen_msr_banked(s, a->r, a->sysm, a->rn); + return true; +} + +static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + if (a->r) { + if (IS_USER(s)) { + unallocated_encoding(s); + return true; + } + tmp = load_cpu_field(tcg_ctx, spsr); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_cpsr_read(tcg_ctx, tmp, tcg_ctx->cpu_env); + } + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a) +{ + TCGv_i32 tmp; + uint32_t mask = msr_mask(s, a->mask, a->r); + + if (arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + tmp = load_reg(s, a->rn); + if (gen_set_psr(s, mask, a->r, tmp)) { + unallocated_encoding(s); + } + return true; +} + +static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + tmp = tcg_const_i32(tcg_ctx, a->sysm); + gen_helper_v7m_mrs(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr, reg; + + if (!arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + addr = tcg_const_i32(tcg_ctx, (a->mask << 10) | a->sysm); + reg = load_reg(s, a->rn); + gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, reg); + tcg_temp_free_i32(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, reg); + /* If we wrote to CONTROL, the EL might have changed */ + gen_helper_rebuild_hflags_m32_newel(tcg_ctx, tcg_ctx->cpu_env); + gen_lookup_tb(s); + return true; +} + +static bool trans_BX(DisasContext *s, arg_BX *a) +{ + if (!ENABLE_ARCH_4T) { + return false; + } + gen_bx_excret(s, load_reg(s, a->rm)); + return true; +} + +static bool trans_BXJ(DisasContext *s, arg_BXJ *a) +{ + if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + /* Trivial implementation equivalent to bx. */ + gen_bx(s, load_reg(s, a->rm)); + return true; +} + +static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!ENABLE_ARCH_5) { + return false; + } + tmp = load_reg(s, a->rm); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | s->thumb); + gen_bx(s, tmp); + return true; +} + +/* + * BXNS/BLXNS: only exist for v8M with the security extensions, + * and always UNDEF if NonSecure. We don't implement these in + * the user-only mode either (in theory you can use them from + * Secure User mode but they are too tied in to system emulation). + */ +static bool trans_BXNS(DisasContext *s, arg_BXNS *a) +{ + if (!s->v8m_secure || IS_USER_ONLY) { + unallocated_encoding(s); + } else { + gen_bxns(s, a->rm); + } + return true; +} + +static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a) +{ + if (!s->v8m_secure || IS_USER_ONLY) { + unallocated_encoding(s); + } else { + gen_blxns(s, a->rm); + } + return true; +} + +static bool trans_CLZ(DisasContext *s, arg_CLZ *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!ENABLE_ARCH_5) { + return false; + } + tmp = load_reg(s, a->rm); + tcg_gen_clzi_i32(tcg_ctx, tmp, tmp, 32); + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_ERET(DisasContext *s, arg_ERET *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) { + return false; + } + if (IS_USER(s)) { + unallocated_encoding(s); + return true; + } + if (s->current_el == 2) { + /* ERET from Hyp uses ELR_Hyp, not LR */ + tmp = load_cpu_field(tcg_ctx, elr_el[2]); + } else { + tmp = load_reg(s, 14); + } + gen_exception_return(s, tmp); + return true; +} + +static bool trans_HLT(DisasContext *s, arg_HLT *a) +{ + gen_hlt(s, a->imm); + return true; +} + +static bool trans_BKPT(DisasContext *s, arg_BKPT *a) +{ + if (!ENABLE_ARCH_5) { + return false; + } + if (arm_dc_feature(s, ARM_FEATURE_M) && false && // semihosting_enabled() && + !IS_USER(s) && + (a->imm == 0xab)) { + gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); + } else { + gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false)); + } + return true; +} + +static bool trans_HVC(DisasContext *s, arg_HVC *a) +{ + if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + if (IS_USER(s)) { + unallocated_encoding(s); + } else { + gen_hvc(s, a->imm); + } + return true; +} + +static bool trans_SMC(DisasContext *s, arg_SMC *a) +{ + if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + if (IS_USER(s)) { + unallocated_encoding(s); + } else { + gen_smc(s); + } + return true; +} + +static bool trans_SG(DisasContext *s, arg_SG *a) +{ + if (!arm_dc_feature(s, ARM_FEATURE_M) || + !arm_dc_feature(s, ARM_FEATURE_V8)) { + return false; + } + /* + * SG (v8M only) + * The bulk of the behaviour for this instruction is implemented + * in v7m_handle_execute_nsc(), which deals with the insn when + * it is executed by a CPU in non-secure state from memory + * which is Secure & NonSecure-Callable. + * Here we only need to handle the remaining cases: + * * in NS memory (including the "security extension not + * implemented" case) : NOP + * * in S memory but CPU already secure (clear IT bits) + * We know that the attribute for the memory this insn is + * in must match the current CPU state, because otherwise + * get_phys_addr_pmsav8 would have generated an exception. + */ + if (s->v8m_secure) { + /* Like the IT insn, we don't need to generate any code */ + s->condexec_cond = 0; + s->condexec_mask = 0; + } + return true; +} + +static bool trans_TT(DisasContext *s, arg_TT *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr, tmp; + + if (!arm_dc_feature(s, ARM_FEATURE_M) || + !arm_dc_feature(s, ARM_FEATURE_V8)) { + return false; + } + if (a->rd == 13 || a->rd == 15 || a->rn == 15) { + /* We UNDEF for these UNPREDICTABLE cases */ + unallocated_encoding(s); + return true; + } + if (a->A && !s->v8m_secure) { + /* This case is UNDEFINED. */ + unallocated_encoding(s); + return true; + } + + addr = load_reg(s, a->rn); + tmp = tcg_const_i32(tcg_ctx, (a->A << 1) | a->T); + gen_helper_v7m_tt(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + store_reg(s, a->rd, tmp); + return true; +} + +/* + * Load/store register index + */ + +static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w) +{ + ISSInfo ret; + + /* ISS not valid if writeback */ + if (p && !w) { + ret = rd; + if (s->base.pc_next - s->pc_curr == 2) { + ret |= ISSIs16Bit; + } + } else { + ret = ISSInvalid; + } + return ret; +} + +static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr = load_reg(s, a->rn); + + if (s->v8m_stackcheck && a->rn == 13 && a->w) { + gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); + } + + if (a->p) { + TCGv_i32 ofs = load_reg(s, a->rm); + gen_arm_shift_im(tcg_ctx, ofs, a->shtype, a->shimm, 0); + if (a->u) { + tcg_gen_add_i32(tcg_ctx, addr, addr, ofs); + } else { + tcg_gen_sub_i32(tcg_ctx, addr, addr, ofs); + } + tcg_temp_free_i32(tcg_ctx, ofs); + } + return addr; +} + +static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a, + TCGv_i32 addr, int address_offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!a->p) { + TCGv_i32 ofs = load_reg(s, a->rm); + gen_arm_shift_im(tcg_ctx, ofs, a->shtype, a->shimm, 0); + if (a->u) { + tcg_gen_add_i32(tcg_ctx, addr, addr, ofs); + } else { + tcg_gen_sub_i32(tcg_ctx, addr, addr, ofs); + } + tcg_temp_free_i32(tcg_ctx, ofs); + } else if (!a->w) { + tcg_temp_free_i32(tcg_ctx, addr); + return; + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, address_offset); + store_reg(s, a->rn, addr); +} + +static bool op_load_rr(DisasContext *s, arg_ldst_rr *a, + MemOp mop, int mem_idx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); + TCGv_i32 addr, tmp; + + addr = op_addr_rr_pre(s, a); + + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data); + disas_set_da_iss(s, mop, issinfo); + + /* + * Perform base writeback before the loaded value to + * ensure correct behavior with overlapping index registers. + */ + op_addr_rr_post(s, a, addr, 0); + store_reg_from_load(s, a->rt, tmp); + return true; +} + +static bool op_store_rr(DisasContext *s, arg_ldst_rr *a, + MemOp mop, int mem_idx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; + TCGv_i32 addr, tmp; + + addr = op_addr_rr_pre(s, a); + + tmp = load_reg(s, a->rt); + gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data); + disas_set_da_iss(s, mop, issinfo); + tcg_temp_free_i32(tcg_ctx, tmp); + + op_addr_rr_post(s, a, addr, 0); + return true; +} + +static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mem_idx = get_mem_index(s); + TCGv_i32 addr, tmp; + + if (!ENABLE_ARCH_5TE) { + return false; + } + if (a->rt & 1) { + unallocated_encoding(s); + return true; + } + addr = op_addr_rr_pre(s, a); + + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + store_reg(s, a->rt, tmp); + + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + store_reg(s, a->rt + 1, tmp); + + /* LDRD w/ base writeback is undefined if the registers overlap. */ + op_addr_rr_post(s, a, addr, -4); + return true; +} + +static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mem_idx = get_mem_index(s); + TCGv_i32 addr, tmp; + + if (!ENABLE_ARCH_5TE) { + return false; + } + if (a->rt & 1) { + unallocated_encoding(s); + return true; + } + addr = op_addr_rr_pre(s, a); + + tmp = load_reg(s, a->rt); + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + tcg_temp_free_i32(tcg_ctx, tmp); + + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + + tmp = load_reg(s, a->rt + 1); + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + tcg_temp_free_i32(tcg_ctx, tmp); + + op_addr_rr_post(s, a, addr, -4); + return true; +} + +/* + * Load/store immediate index + */ + +static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int ofs = a->imm; + + if (!a->u) { + ofs = -ofs; + } + + if (s->v8m_stackcheck && a->rn == 13 && a->w) { + /* + * Stackcheck. Here we know 'addr' is the current SP; + * U is set if we're moving SP up, else down. It is + * UNKNOWN whether the limit check triggers when SP starts + * below the limit and ends up above it; we chose to do so. + */ + if (!a->u) { + TCGv_i32 newsp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_addi_i32(tcg_ctx, newsp, tcg_ctx->cpu_R[13], ofs); + gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, newsp); + tcg_temp_free_i32(tcg_ctx, newsp); + } else { + gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->cpu_R[13]); + } + } + + return add_reg_for_lit(s, a->rn, a->p ? ofs : 0); +} + +static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a, + TCGv_i32 addr, int address_offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!a->p) { + if (a->u) { + address_offset += a->imm; + } else { + address_offset -= a->imm; + } + } else if (!a->w) { + tcg_temp_free_i32(tcg_ctx, addr); + return; + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, address_offset); + store_reg(s, a->rn, addr); +} + +static bool op_load_ri(DisasContext *s, arg_ldst_ri *a, + MemOp mop, int mem_idx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); + TCGv_i32 addr, tmp; + + addr = op_addr_ri_pre(s, a); + + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data); + disas_set_da_iss(s, mop, issinfo); + + /* + * Perform base writeback before the loaded value to + * ensure correct behavior with overlapping index registers. + */ + op_addr_ri_post(s, a, addr, 0); + store_reg_from_load(s, a->rt, tmp); + return true; +} + +static bool op_store_ri(DisasContext *s, arg_ldst_ri *a, + MemOp mop, int mem_idx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; + TCGv_i32 addr, tmp; + + addr = op_addr_ri_pre(s, a); + + tmp = load_reg(s, a->rt); + gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data); + disas_set_da_iss(s, mop, issinfo); + tcg_temp_free_i32(tcg_ctx, tmp); + + op_addr_ri_post(s, a, addr, 0); + return true; +} + +static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mem_idx = get_mem_index(s); + TCGv_i32 addr, tmp; + + addr = op_addr_ri_pre(s, a); + + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + store_reg(s, a->rt, tmp); + + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + store_reg(s, rt2, tmp); + + /* LDRD w/ base writeback is undefined if the registers overlap. */ + op_addr_ri_post(s, a, addr, -4); + return true; +} + +static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a) +{ + if (!ENABLE_ARCH_5TE || (a->rt & 1)) { + return false; + } + return op_ldrd_ri(s, a, a->rt + 1); +} + +static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a) +{ + arg_ldst_ri b = { + .u = a->u, .w = a->w, .p = a->p, + .rn = a->rn, .rt = a->rt, .imm = a->imm + }; + return op_ldrd_ri(s, &b, a->rt2); +} + +static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mem_idx = get_mem_index(s); + TCGv_i32 addr, tmp; + + addr = op_addr_ri_pre(s, a); + + tmp = load_reg(s, a->rt); + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + tcg_temp_free_i32(tcg_ctx, tmp); + + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + + tmp = load_reg(s, rt2); + gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data); + tcg_temp_free_i32(tcg_ctx, tmp); + + op_addr_ri_post(s, a, addr, -4); + return true; +} + +static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a) +{ + if (!ENABLE_ARCH_5TE || (a->rt & 1)) { + return false; + } + return op_strd_ri(s, a, a->rt + 1); +} + +static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a) +{ + arg_ldst_ri b = { + .u = a->u, .w = a->w, .p = a->p, + .rn = a->rn, .rt = a->rt, .imm = a->imm + }; + return op_strd_ri(s, &b, a->rt2); +} + +#define DO_LDST(NAME, WHICH, MEMOP) \ +static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \ +{ \ + return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \ +} \ +static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \ +{ \ + return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \ +} \ +static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \ +{ \ + return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \ +} \ +static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \ +{ \ + return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \ +} + +DO_LDST(LDR, load, MO_UL) +DO_LDST(LDRB, load, MO_UB) +DO_LDST(LDRH, load, MO_UW) +DO_LDST(LDRSB, load, MO_SB) +DO_LDST(LDRSH, load, MO_SW) + +DO_LDST(STR, store, MO_UL) +DO_LDST(STRB, store, MO_UB) +DO_LDST(STRH, store, MO_UW) + +#undef DO_LDST + +/* + * Synchronization primitives + */ + +static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr, tmp; + TCGv taddr; + + opc |= s->be_data; + addr = load_reg(s, a->rn); + taddr = gen_aa32_addr(s, addr, opc); + tcg_temp_free_i32(tcg_ctx, addr); + + tmp = load_reg(s, a->rt2); + tcg_gen_atomic_xchg_i32(tcg_ctx, tmp, taddr, tmp, get_mem_index(s), opc); + tcg_temp_free(tcg_ctx, taddr); + + store_reg(s, a->rt, tmp); + return true; +} + +static bool trans_SWP(DisasContext *s, arg_SWP *a) +{ + return op_swp(s, a, MO_UL | MO_ALIGN); +} + +static bool trans_SWPB(DisasContext *s, arg_SWP *a) +{ + return op_swp(s, a, MO_UB); +} + +/* + * Load/Store Exclusive and Load-Acquire/Store-Release + */ + +static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr; + /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */ + bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M); + + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rd == 15 || a->rn == 15 || a->rt == 15 + || a->rd == a->rn || a->rd == a->rt + || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13)) + || (mop == MO_64 + && (a->rt2 == 15 + || a->rd == a->rt2 + || (!v8a && s->thumb && a->rt2 == 13)))) { + unallocated_encoding(s); + return true; + } + + if (rel) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + } + + addr = tcg_temp_local_new_i32(tcg_ctx); + load_reg_var(s, addr, a->rn); + tcg_gen_addi_i32(tcg_ctx, addr, addr, a->imm); + + gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop); + tcg_temp_free_i32(tcg_ctx, addr); + return true; +} + +static bool trans_STREX(DisasContext *s, arg_STREX *a) +{ + if (!ENABLE_ARCH_6) { + return false; + } + return op_strex(s, a, MO_32, false); +} + +static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a) +{ + if (!ENABLE_ARCH_6K) { + return false; + } + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rt & 1) { + unallocated_encoding(s); + return true; + } + a->rt2 = a->rt + 1; + return op_strex(s, a, MO_64, false); +} + +static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a) +{ + return op_strex(s, a, MO_64, false); +} + +static bool trans_STREXB(DisasContext *s, arg_STREX *a) +{ + if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { + return false; + } + return op_strex(s, a, MO_8, false); +} + +static bool trans_STREXH(DisasContext *s, arg_STREX *a) +{ + if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { + return false; + } + return op_strex(s, a, MO_16, false); +} + +static bool trans_STLEX(DisasContext *s, arg_STREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_strex(s, a, MO_32, true); +} + +static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rt & 1) { + unallocated_encoding(s); + return true; + } + a->rt2 = a->rt + 1; + return op_strex(s, a, MO_64, true); +} + +static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_strex(s, a, MO_64, true); +} + +static bool trans_STLEXB(DisasContext *s, arg_STREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_strex(s, a, MO_8, true); +} + +static bool trans_STLEXH(DisasContext *s, arg_STREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_strex(s, a, MO_16, true); +} + +static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr, tmp; + + if (!ENABLE_ARCH_8) { + return false; + } + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rn == 15 || a->rt == 15) { + unallocated_encoding(s); + return true; + } + + addr = load_reg(s, a->rn); + tmp = load_reg(s, a->rt); + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data); + disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + return true; +} + +static bool trans_STL(DisasContext *s, arg_STL *a) +{ + return op_stl(s, a, MO_UL); +} + +static bool trans_STLB(DisasContext *s, arg_STL *a) +{ + return op_stl(s, a, MO_UB); +} + +static bool trans_STLH(DisasContext *s, arg_STL *a) +{ + return op_stl(s, a, MO_UW); +} + +static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr; + /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */ + bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M); + + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rn == 15 || a->rt == 15 + || (!v8a && s->thumb && a->rt == 13) + || (mop == MO_64 + && (a->rt2 == 15 || a->rt == a->rt2 + || (!v8a && s->thumb && a->rt2 == 13)))) { + unallocated_encoding(s); + return true; + } + + addr = tcg_temp_local_new_i32(tcg_ctx); + load_reg_var(s, addr, a->rn); + tcg_gen_addi_i32(tcg_ctx, addr, addr, a->imm); + + gen_load_exclusive(s, a->rt, a->rt2, addr, mop); + tcg_temp_free_i32(tcg_ctx, addr); + + if (acq) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); + } + return true; +} + +static bool trans_LDREX(DisasContext *s, arg_LDREX *a) +{ + if (!ENABLE_ARCH_6) { + return false; + } + return op_ldrex(s, a, MO_32, false); +} + +static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a) +{ + if (!ENABLE_ARCH_6K) { + return false; + } + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rt & 1) { + unallocated_encoding(s); + return true; + } + a->rt2 = a->rt + 1; + return op_ldrex(s, a, MO_64, false); +} + +static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a) +{ + return op_ldrex(s, a, MO_64, false); +} + +static bool trans_LDREXB(DisasContext *s, arg_LDREX *a) +{ + if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { + return false; + } + return op_ldrex(s, a, MO_8, false); +} + +static bool trans_LDREXH(DisasContext *s, arg_LDREX *a) +{ + if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { + return false; + } + return op_ldrex(s, a, MO_16, false); +} + +static bool trans_LDAEX(DisasContext *s, arg_LDREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_ldrex(s, a, MO_32, true); +} + +static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rt & 1) { + unallocated_encoding(s); + return true; + } + a->rt2 = a->rt + 1; + return op_ldrex(s, a, MO_64, true); +} + +static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_ldrex(s, a, MO_64, true); +} + +static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_ldrex(s, a, MO_8, true); +} + +static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a) +{ + if (!ENABLE_ARCH_8) { + return false; + } + return op_ldrex(s, a, MO_16, true); +} + +static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr, tmp; + + if (!ENABLE_ARCH_8) { + return false; + } + /* We UNDEF for these UNPREDICTABLE cases. */ + if (a->rn == 15 || a->rt == 15) { + unallocated_encoding(s); + return true; + } + + addr = load_reg(s, a->rn); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data); + disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); + tcg_temp_free_i32(tcg_ctx, addr); + + store_reg(s, a->rt, tmp); + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + return true; +} + +static bool trans_LDA(DisasContext *s, arg_LDA *a) +{ + return op_lda(s, a, MO_UL); +} + +static bool trans_LDAB(DisasContext *s, arg_LDA *a) +{ + return op_lda(s, a, MO_UB); +} + +static bool trans_LDAH(DisasContext *s, arg_LDA *a) +{ + return op_lda(s, a, MO_UW); +} + +/* + * Media instructions + */ + +static bool trans_USADA8(DisasContext *s, arg_USADA8 *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2; + + if (!ENABLE_ARCH_6) { + return false; + } + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + gen_helper_usad8(tcg_ctx, t1, t1, t2); + tcg_temp_free_i32(tcg_ctx, t2); + if (a->ra != 15) { + t2 = load_reg(s, a->ra); + tcg_gen_add_i32(tcg_ctx, t1, t1, t2); + tcg_temp_free_i32(tcg_ctx, t2); + } + store_reg(s, a->rd, t1); + return true; +} + +static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + int width = a->widthm1 + 1; + int shift = a->lsb; + + if (!ENABLE_ARCH_6T2) { + return false; + } + if (shift + width > 32) { + /* UNPREDICTABLE; we choose to UNDEF */ + unallocated_encoding(s); + return true; + } + + tmp = load_reg(s, a->rn); + if (u) { + tcg_gen_extract_i32(tcg_ctx, tmp, tmp, shift, width); + } else { + tcg_gen_sextract_i32(tcg_ctx, tmp, tmp, shift, width); + } + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_SBFX(DisasContext *s, arg_SBFX *a) +{ + return op_bfx(s, a, false); +} + +static bool trans_UBFX(DisasContext *s, arg_UBFX *a) +{ + return op_bfx(s, a, true); +} + +static bool trans_BFCI(DisasContext *s, arg_BFCI *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + int msb = a->msb, lsb = a->lsb; + int width; + + if (!ENABLE_ARCH_6T2) { + return false; + } + if (msb < lsb) { + /* UNPREDICTABLE; we choose to UNDEF */ + unallocated_encoding(s); + return true; + } + + width = msb + 1 - lsb; + if (a->rn == 15) { + /* BFC */ + tmp = tcg_const_i32(tcg_ctx, 0); + } else { + /* BFI */ + tmp = load_reg(s, a->rn); + } + if (width != 32) { + TCGv_i32 tmp2 = load_reg(s, a->rd); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, lsb, width); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_UDF(DisasContext *s, arg_UDF *a) +{ + unallocated_encoding(s); + return true; +} + +/* + * Parallel addition and subtraction + */ + +static bool op_par_addsub(DisasContext *s, arg_rrr *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_6) { + return false; + } + + t0 = load_reg(s, a->rn); + t1 = load_reg(s, a->rm); + + gen(tcg_ctx, t0, t0, t1); + + tcg_temp_free_i32(tcg_ctx, t1); + store_reg(s, a->rd, t0); + return true; +} + +static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32, + TCGv_i32, TCGv_ptr)) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t0, t1; + TCGv_ptr ge; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_6) { + return false; + } + + t0 = load_reg(s, a->rn); + t1 = load_reg(s, a->rm); + + ge = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, ge, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + gen(tcg_ctx, t0, t0, t1, ge); + + tcg_temp_free_ptr(tcg_ctx, ge); + tcg_temp_free_i32(tcg_ctx, t1); + store_reg(s, a->rd, t0); + return true; +} + +#define DO_PAR_ADDSUB(NAME, helper) \ +static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ +{ \ + return op_par_addsub(s, a, helper); \ +} + +#define DO_PAR_ADDSUB_GE(NAME, helper) \ +static bool trans_##NAME(DisasContext *s, arg_rrr *a) \ +{ \ + return op_par_addsub_ge(s, a, helper); \ +} + +DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16) +DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx) +DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx) +DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16) +DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8) +DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8) + +DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16) +DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx) +DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx) +DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16) +DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8) +DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8) + +DO_PAR_ADDSUB(QADD16, gen_helper_qadd16) +DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx) +DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx) +DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16) +DO_PAR_ADDSUB(QADD8, gen_helper_qadd8) +DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8) + +DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16) +DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx) +DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx) +DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16) +DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8) +DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8) + +DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16) +DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx) +DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx) +DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16) +DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8) +DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8) + +DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16) +DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx) +DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx) +DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16) +DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8) +DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8) + +#undef DO_PAR_ADDSUB +#undef DO_PAR_ADDSUB_GE + +/* + * Packing, unpacking, saturation, and reversal + */ + +static bool trans_PKH(DisasContext *s, arg_PKH *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tn, tm; + int shift = a->imm; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_6) { + return false; + } + + tn = load_reg(s, a->rn); + tm = load_reg(s, a->rm); + if (a->tb) { + /* PKHTB */ + if (shift == 0) { + shift = 31; + } + tcg_gen_sari_i32(tcg_ctx, tm, tm, shift); + tcg_gen_deposit_i32(tcg_ctx, tn, tn, tm, 0, 16); + } else { + /* PKHBT */ + tcg_gen_shli_i32(tcg_ctx, tm, tm, shift); + tcg_gen_deposit_i32(tcg_ctx, tn, tm, tn, 0, 16); + } + tcg_temp_free_i32(tcg_ctx, tm); + store_reg(s, a->rd, tn); + return true; +} + +static bool op_sat(DisasContext *s, arg_sat *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp, satimm; + int shift = a->imm; + + if (!ENABLE_ARCH_6) { + return false; + } + + tmp = load_reg(s, a->rn); + if (a->sh) { + tcg_gen_sari_i32(tcg_ctx, tmp, tmp, shift ? shift : 31); + } else { + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); + } + + satimm = tcg_const_i32(tcg_ctx, a->satimm); + gen(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, satimm); + tcg_temp_free_i32(tcg_ctx, satimm); + + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_SSAT(DisasContext *s, arg_sat *a) +{ + return op_sat(s, a, gen_helper_ssat); +} + +static bool trans_USAT(DisasContext *s, arg_sat *a) +{ + return op_sat(s, a, gen_helper_usat); +} + +static bool trans_SSAT16(DisasContext *s, arg_sat *a) +{ + if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { + return false; + } + return op_sat(s, a, gen_helper_ssat16); +} + +static bool trans_USAT16(DisasContext *s, arg_sat *a) +{ + if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { + return false; + } + return op_sat(s, a, gen_helper_usat16); +} + +static bool op_xta(DisasContext *s, arg_rrr_rot *a, + void (*gen_extract)(TCGContext *, TCGv_i32, TCGv_i32), + void (*gen_add)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + if (!ENABLE_ARCH_6) { + return false; + } + + tmp = load_reg(s, a->rm); + /* + * TODO: In many cases we could do a shift instead of a rotate. + * Combined with a simple extend, that becomes an extract. + */ + tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, a->rot * 8); + gen_extract(tcg_ctx, tmp, tmp); + + if (a->rn != 15) { + TCGv_i32 tmp2 = load_reg(s, a->rn); + gen_add(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a) +{ + return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32); +} + +static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a) +{ + return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32); +} + +static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a) +{ + if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { + return false; + } + return op_xta(s, a, gen_helper_sxtb16, gen_add16); +} + +static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a) +{ + return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32); +} + +static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a) +{ + return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32); +} + +static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a) +{ + if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { + return false; + } + return op_xta(s, a, gen_helper_uxtb16, gen_add16); +} + +static bool trans_SEL(DisasContext *s, arg_rrr *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2, t3; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_6) { + return false; + } + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, t3, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + gen_helper_sel_flags(tcg_ctx, t1, t3, t1, t2); + tcg_temp_free_i32(tcg_ctx, t3); + tcg_temp_free_i32(tcg_ctx, t2); + store_reg(s, a->rd, t1); + return true; +} + +static bool op_rr(DisasContext *s, arg_rr *a, + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + tmp = load_reg(s, a->rm); + gen(tcg_ctx, tmp, tmp); + store_reg(s, a->rd, tmp); + return true; +} + +static bool trans_REV(DisasContext *s, arg_rr *a) +{ + if (!ENABLE_ARCH_6) { + return false; + } + return op_rr(s, a, tcg_gen_bswap32_i32); +} + +static bool trans_REV16(DisasContext *s, arg_rr *a) +{ + if (!ENABLE_ARCH_6) { + return false; + } + return op_rr(s, a, gen_rev16); +} + +static bool trans_REVSH(DisasContext *s, arg_rr *a) +{ + if (!ENABLE_ARCH_6) { + return false; + } + return op_rr(s, a, gen_revsh); +} + +static bool trans_RBIT(DisasContext *s, arg_rr *a) +{ + if (!ENABLE_ARCH_6T2) { + return false; + } + return op_rr(s, a, gen_helper_rbit); +} + +/* + * Signed multiply, signed and unsigned divide + */ + +static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2; + + if (!ENABLE_ARCH_6) { + return false; + } + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + if (m_swap) { + gen_swap_half(tcg_ctx, t2); + } + gen_smul_dual(tcg_ctx, t1, t2); + + if (sub) { + /* This subtraction cannot overflow. */ + tcg_gen_sub_i32(tcg_ctx, t1, t1, t2); + } else { + /* + * This addition cannot overflow 32 bits; however it may + * overflow considered as a signed operation, in which case + * we must set the Q flag. + */ + gen_helper_add_setq(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); + } + tcg_temp_free_i32(tcg_ctx, t2); + + if (a->ra != 15) { + t2 = load_reg(s, a->ra); + gen_helper_add_setq(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); + tcg_temp_free_i32(tcg_ctx, t2); + } + store_reg(s, a->rd, t1); + return true; +} + +static bool trans_SMLAD(DisasContext *s, arg_rrrr *a) +{ + return op_smlad(s, a, false, false); +} + +static bool trans_SMLADX(DisasContext *s, arg_rrrr *a) +{ + return op_smlad(s, a, true, false); +} + +static bool trans_SMLSD(DisasContext *s, arg_rrrr *a) +{ + return op_smlad(s, a, false, true); +} + +static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a) +{ + return op_smlad(s, a, true, true); +} + +static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2; + TCGv_i64 l1, l2; + + if (!ENABLE_ARCH_6) { + return false; + } + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + if (m_swap) { + gen_swap_half(tcg_ctx, t2); + } + gen_smul_dual(tcg_ctx, t1, t2); + + l1 = tcg_temp_new_i64(tcg_ctx); + l2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, l1, t1); + tcg_gen_ext_i32_i64(tcg_ctx, l2, t2); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + + if (sub) { + tcg_gen_sub_i64(tcg_ctx, l1, l1, l2); + } else { + tcg_gen_add_i64(tcg_ctx, l1, l1, l2); + } + tcg_temp_free_i64(tcg_ctx, l2); + + gen_addq(s, l1, a->ra, a->rd); + gen_storeq_reg(s, a->ra, a->rd, l1); + tcg_temp_free_i64(tcg_ctx, l1); + return true; +} + +static bool trans_SMLALD(DisasContext *s, arg_rrrr *a) +{ + return op_smlald(s, a, false, false); +} + +static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a) +{ + return op_smlald(s, a, true, false); +} + +static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a) +{ + return op_smlald(s, a, false, true); +} + +static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a) +{ + return op_smlald(s, a, true, true); +} + +static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2; + + if (s->thumb + ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP) + : !ENABLE_ARCH_6) { + return false; + } + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + tcg_gen_muls2_i32(tcg_ctx, t2, t1, t1, t2); + + if (a->ra != 15) { + TCGv_i32 t3 = load_reg(s, a->ra); + if (sub) { + /* + * For SMMLS, we need a 64-bit subtract. Borrow caused by + * a non-zero multiplicand lowpart, and the correct result + * lowpart for rounding. + */ + TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_sub2_i32(tcg_ctx, t2, t1, zero, t3, t2, t1); + tcg_temp_free_i32(tcg_ctx, zero); + } else { + tcg_gen_add_i32(tcg_ctx, t1, t1, t3); + } + tcg_temp_free_i32(tcg_ctx, t3); + } + if (round) { + /* + * Adding 0x80000000 to the 64-bit quantity means that we have + * carry in to the high word when the low word has the msb set. + */ + tcg_gen_shri_i32(tcg_ctx, t2, t2, 31); + tcg_gen_add_i32(tcg_ctx, t1, t1, t2); + } + tcg_temp_free_i32(tcg_ctx, t2); + store_reg(s, a->rd, t1); + return true; +} + +static bool trans_SMMLA(DisasContext *s, arg_rrrr *a) +{ + return op_smmla(s, a, false, false); +} + +static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a) +{ + return op_smmla(s, a, true, false); +} + +static bool trans_SMMLS(DisasContext *s, arg_rrrr *a) +{ + return op_smmla(s, a, false, true); +} + +static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a) +{ + return op_smmla(s, a, true, true); +} + +static bool op_div(DisasContext *s, arg_rrr *a, bool u) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1, t2; + + if (s->thumb + ? !dc_isar_feature(aa32_thumb_div, s) + : !dc_isar_feature(aa32_arm_div, s)) { + return false; + } + + t1 = load_reg(s, a->rn); + t2 = load_reg(s, a->rm); + if (u) { + gen_helper_udiv(tcg_ctx, t1, t1, t2); + } else { + gen_helper_sdiv(tcg_ctx, t1, t1, t2); + } + tcg_temp_free_i32(tcg_ctx, t2); + store_reg(s, a->rd, t1); + return true; +} + +static bool trans_SDIV(DisasContext *s, arg_rrr *a) +{ + return op_div(s, a, false); +} + +static bool trans_UDIV(DisasContext *s, arg_rrr *a) +{ + return op_div(s, a, true); +} + +/* + * Block data transfer + */ + +static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr = load_reg(s, a->rn); + + if (a->b) { + if (a->i) { + /* pre increment */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } else { + /* pre decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); + } + } else if (!a->i && n != 1) { + /* post decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); + } + + if (s->v8m_stackcheck && a->rn == 13 && a->w) { + /* + * If the writeback is incrementing SP rather than + * decrementing it, and the initial SP is below the + * stack limit but the final written-back SP would + * be above, then then we must not perform any memory + * accesses, but it is IMPDEF whether we generate + * an exception. We choose to do so in this case. + * At this point 'addr' is the lowest address, so + * either the original SP (if incrementing) or our + * final SP (if decrementing), so that's what we check. + */ + gen_helper_v8m_stackcheck(tcg_ctx, tcg_ctx->cpu_env, addr); + } + + return addr; +} + +static void op_addr_block_post(DisasContext *s, arg_ldst_block *a, + TCGv_i32 addr, int n) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (a->w) { + /* write back */ + if (!a->b) { + if (a->i) { + /* post increment */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } else { + /* post decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); + } + } else if (!a->i && n != 1) { + /* pre decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); + } + store_reg(s, a->rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } +} + +static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int i, j, n, list, mem_idx; + bool user = a->u; + TCGv_i32 addr, tmp, tmp2; + + if (user) { + /* STM (user) */ + if (IS_USER(s)) { + /* Only usable in supervisor mode. */ + unallocated_encoding(s); + return true; + } + } + + list = a->list; + n = ctpop16(list); + if (n < min_n || a->rn == 15) { + unallocated_encoding(s); + return true; + } + + addr = op_addr_block_pre(s, a, n); + mem_idx = get_mem_index(s); + + for (i = j = 0; i < 16; i++) { + if (!(list & (1 << i))) { + continue; + } + + if (user && i != 15) { + tmp = tcg_temp_new_i32(tcg_ctx); + tmp2 = tcg_const_i32(tcg_ctx, i); + gen_helper_get_user_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } else { + tmp = load_reg(s, i); + } + gen_aa32_st32(s, tmp, addr, mem_idx); + tcg_temp_free_i32(tcg_ctx, tmp); + + /* No need to add after the last transfer. */ + if (++j != n) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + } + + op_addr_block_post(s, a, addr, n); + return true; +} + +static bool trans_STM(DisasContext *s, arg_ldst_block *a) +{ + /* BitCount(list) < 1 is UNPREDICTABLE */ + return op_stm(s, a, 1); +} + +static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a) +{ + /* Writeback register in register list is UNPREDICTABLE for T32. */ + if (a->w && (a->list & (1 << a->rn))) { + unallocated_encoding(s); + return true; + } + /* BitCount(list) < 2 is UNPREDICTABLE */ + return op_stm(s, a, 2); +} + +static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int i, j, n, list, mem_idx; + bool loaded_base; + bool user = a->u; + bool exc_return = false; + TCGv_i32 addr, tmp, tmp2, loaded_var; + + if (user) { + /* LDM (user), LDM (exception return) */ + if (IS_USER(s)) { + /* Only usable in supervisor mode. */ + unallocated_encoding(s); + return true; + } + if (extract32(a->list, 15, 1)) { + exc_return = true; + user = false; + } else { + /* LDM (user) does not allow writeback. */ + if (a->w) { + unallocated_encoding(s); + return true; + } + } + } + + list = a->list; + n = ctpop16(list); + if (n < min_n || a->rn == 15) { + unallocated_encoding(s); + return true; + } + + addr = op_addr_block_pre(s, a, n); + mem_idx = get_mem_index(s); + loaded_base = false; + loaded_var = NULL; + + for (i = j = 0; i < 16; i++) { + if (!(list & (1 << i))) { + continue; + } + + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, mem_idx); + if (user) { + tmp2 = tcg_const_i32(tcg_ctx, i); + gen_helper_set_user_reg(tcg_ctx, tcg_ctx->cpu_env, tmp2, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + } else if (i == a->rn) { + loaded_var = tmp; + loaded_base = true; + } else if (i == 15 && exc_return) { + store_pc_exc_ret(s, tmp); + } else { + store_reg_from_load(s, i, tmp); + } + + /* No need to add after the last transfer. */ + if (++j != n) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + } + + op_addr_block_post(s, a, addr, n); + + if (loaded_base) { + /* Note that we reject base == pc above. */ + store_reg(s, a->rn, loaded_var); + } + + if (exc_return) { + /* Restore CPSR from SPSR. */ + tmp = load_cpu_field(tcg_ctx, spsr); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(tcg_ctx); + } + gen_helper_cpsr_write_eret(tcg_ctx, tcg_ctx->cpu_env, tmp); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(tcg_ctx); + } + tcg_temp_free_i32(tcg_ctx, tmp); + /* Must exit loop to check un-masked IRQs */ + s->base.is_jmp = DISAS_EXIT; + } + return true; +} + +static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a) +{ + /* + * Writeback register in register list is UNPREDICTABLE + * for ArchVersion() >= 7. Prior to v7, A32 would write + * an UNKNOWN value to the base register. + */ + if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) { + unallocated_encoding(s); + return true; + } + /* BitCount(list) < 1 is UNPREDICTABLE */ + return do_ldm(s, a, 1); +} + +static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a) +{ + /* Writeback register in register list is UNPREDICTABLE for T32. */ + if (a->w && (a->list & (1 << a->rn))) { + unallocated_encoding(s); + return true; + } + /* BitCount(list) < 2 is UNPREDICTABLE */ + return do_ldm(s, a, 2); +} + +static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a) +{ + /* Writeback is conditional on the base register not being loaded. */ + a->w = !(a->list & (1 << a->rn)); + /* BitCount(list) < 1 is UNPREDICTABLE */ + return do_ldm(s, a, 1); +} + +/* + * Branch, branch with link + */ + +static bool trans_B(DisasContext *s, arg_i *a) +{ + gen_jmp(s, read_pc(s) + a->imm); + return true; +} + +static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a) +{ + /* This has cond from encoding, required to be outside IT block. */ + if (a->cond >= 0xe) { + return false; + } + if (s->condexec_mask) { + unallocated_encoding(s); + return true; + } + arm_skip_unless(s, a->cond); + gen_jmp(s, read_pc(s) + a->imm); + return true; +} + +static bool trans_BL(DisasContext *s, arg_i *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | s->thumb); + gen_jmp(s, read_pc(s) + a->imm); + return true; +} + +static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + /* For A32, ARCH(5) is checked near the start of the uncond block. */ + if (s->thumb && (a->imm & 2)) { + return false; + } + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | s->thumb); + tmp = tcg_const_i32(tcg_ctx, !s->thumb); + store_cpu_field(tcg_ctx, tmp, thumb); + gen_jmp(s, (read_pc(s) & ~3) + a->imm); + return true; +} + +static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], read_pc(s) + (a->imm << 12)); + return true; +} + +static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); + tcg_gen_addi_i32(tcg_ctx, tmp, tcg_ctx->cpu_R[14], (a->imm << 1) | 1); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | 1); + gen_bx(s, tmp); + return true; +} + +static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + + assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2)); + if (!ENABLE_ARCH_5) { + return false; + } + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_addi_i32(tcg_ctx, tmp, tcg_ctx->cpu_R[14], a->imm << 1); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xfffffffc); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->base.pc_next | 1); + gen_bx(s, tmp); + return true; +} + +static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 addr, tmp; + + tmp = load_reg(s, a->rm); + if (half) { + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp); + } + addr = load_reg(s, a->rn); + tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); + + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), + half ? MO_UW | s->be_data : MO_UB); + tcg_temp_free_i32(tcg_ctx, addr); + + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp); + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, read_pc(s)); + store_reg(s, 15, tmp); + return true; +} + +static bool trans_TBB(DisasContext *s, arg_tbranch *a) +{ + return op_tbranch(s, a, false); +} + +static bool trans_TBH(DisasContext *s, arg_tbranch *a) +{ + return op_tbranch(s, a, true); +} + +static bool trans_CBZ(DisasContext *s, arg_CBZ *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = load_reg(s, a->rn); + + arm_gen_condlabel(s); + tcg_gen_brcondi_i32(tcg_ctx, a->nz ? TCG_COND_EQ : TCG_COND_NE, + tmp, 0, s->condlabel); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_jmp(s, read_pc(s) + a->imm); + return true; +} + +/* + * Supervisor call - both T32 & A32 come here so we need to check + * which mode we are in when checking for semihosting. + */ + +static bool trans_SVC(DisasContext *s, arg_SVC *a) +{ + const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456; + + if (!arm_dc_feature(s, ARM_FEATURE_M) && false && // semihosting_enabled() && + !IS_USER(s) && + (a->imm == semihost_imm)) { + gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST); + } else { + gen_set_pc_im(s, s->base.pc_next); + s->svc_imm = a->imm; + s->base.is_jmp = DISAS_SWI; + } + return true; +} + +/* + * Unconditional system instructions + */ + +static bool trans_RFE(DisasContext *s, arg_RFE *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + static const int8_t pre_offset[4] = { + /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4 + }; + static const int8_t post_offset[4] = { + /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0 + }; + TCGv_i32 addr, t1, t2; + + if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + if (IS_USER(s)) { + unallocated_encoding(s); + return true; + } + + addr = load_reg(s, a->rn); + tcg_gen_addi_i32(tcg_ctx, addr, addr, pre_offset[a->pu]); + + /* Load PC into tmp and CPSR into tmp2. */ + t1 = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, t1, addr, get_mem_index(s)); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + t2 = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, t2, addr, get_mem_index(s)); + + if (a->w) { + /* Base writeback. */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, post_offset[a->pu]); + store_reg(s, a->rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + gen_rfe(s, t1, t2); + return true; +} + +static bool trans_SRS(DisasContext *s, arg_SRS *a) +{ + if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + gen_srs(s, a->mode, a->pu, a->w); + return true; +} + +static bool trans_CPS(DisasContext *s, arg_CPS *a) +{ + uint32_t mask, val; + + if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + if (IS_USER(s)) { + /* Implemented as NOP in user mode. */ + return true; + } + /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */ + + mask = val = 0; + if (a->imod & 2) { + if (a->A) { + mask |= CPSR_A; + } + if (a->I) { + mask |= CPSR_I; + } + if (a->F) { + mask |= CPSR_F; + } + if (a->imod & 1) { + val |= mask; + } + } + if (a->M) { + mask |= CPSR_M; + val |= a->mode; + } + if (mask) { + gen_set_psr_im(s, mask, 0, val); + } + return true; +} + +static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp, addr, el; + + if (!arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + if (IS_USER(s)) { + /* Implemented as NOP in user mode. */ + return true; + } + + tmp = tcg_const_i32(tcg_ctx, a->im); + /* FAULTMASK */ + if (a->F) { + addr = tcg_const_i32(tcg_ctx, 19); + gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + } + /* PRIMASK */ + if (a->I) { + addr = tcg_const_i32(tcg_ctx, 16); + gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + } + el = tcg_const_i32(tcg_ctx, s->current_el); + gen_helper_rebuild_hflags_m32(tcg_ctx, tcg_ctx->cpu_env, el); + tcg_temp_free_i32(tcg_ctx, el); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_lookup_tb(s); + return true; +} + +/* + * Clear-Exclusive, Barriers + */ + +static bool trans_CLREX(DisasContext *s, arg_CLREX *a) +{ + if (s->thumb + ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M) + : !ENABLE_ARCH_6K) { + return false; + } + gen_clrex(s); + return true; +} + +static bool trans_DSB(DisasContext *s, arg_DSB *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + return true; +} + +static bool trans_DMB(DisasContext *s, arg_DMB *a) +{ + return trans_DSB(s, NULL); +} + +static bool trans_ISB(DisasContext *s, arg_ISB *a) +{ + if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) { + return false; + } + /* + * We need to break the TB after this insn to execute + * self-modifying code correctly and also to take + * any pending interrupts immediately. + */ + gen_goto_tb(s, 0, s->base.pc_next); + return true; +} + +static bool trans_SB(DisasContext *s, arg_SB *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!dc_isar_feature(aa32_sb, s)) { + return false; + } + /* + * TODO: There is no speculation barrier opcode + * for TCG; MB and end the TB instead. + */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + gen_goto_tb(s, 0, s->base.pc_next); + return true; +} + +static bool trans_SETEND(DisasContext *s, arg_SETEND *a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!ENABLE_ARCH_6) { + return false; + } + if (a->E != (s->be_data == MO_BE)) { + gen_helper_setend(tcg_ctx, tcg_ctx->cpu_env); + s->base.is_jmp = DISAS_UPDATE; + } + return true; +} + +/* + * Preload instructions + * All are nops, contingent on the appropriate arch level. + */ + +static bool trans_PLD(DisasContext *s, arg_PLD *a) +{ + return ENABLE_ARCH_5TE; +} + +static bool trans_PLDW(DisasContext *s, arg_PLD *a) +{ + return arm_dc_feature(s, ARM_FEATURE_V7MP); +} + +static bool trans_PLI(DisasContext *s, arg_PLD *a) +{ + return ENABLE_ARCH_7; +} + +/* + * If-then + */ + +static bool trans_IT(DisasContext *s, arg_IT *a) +{ + int cond_mask = a->cond_mask; + + /* + * No actual code generated for this insn, just setup state. + * + * Combinations of firstcond and mask which set up an 0b1111 + * condition are UNPREDICTABLE; we take the CONSTRAINED + * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110, + * i.e. both meaning "execute always". + */ + s->condexec_cond = (cond_mask >> 4) & 0xe; + s->condexec_mask = cond_mask & 0x1f; + return true; +} + +/* + * Legacy decoder. + */ + +static void disas_arm_insn(DisasContext *s, unsigned int insn) +{ + unsigned int cond = insn >> 28; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + /* M variants do not implement ARM mode; this must raise the INVSTATE + * UsageFault exception. + */ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(), + default_exception_el(s)); + return; + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->pc_curr)) { + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, s->uc, s->pc_curr); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + if (cond == 0xf) { + /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we + * choose to UNDEF. In ARMv5 and above the space is used + * for miscellaneous unconditional instructions. + */ + ARCH(5); + + /* Unconditional instructions. */ + /* TODO: Perhaps merge these into one decodetree output file. */ + if (disas_a32_uncond(s, insn) || + disas_vfp_uncond(s, insn)) { + return; + } + /* fall back to legacy decoder */ + + if (((insn >> 25) & 7) == 1) { + /* NEON Data processing. */ + if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { + goto illegal_op; + } + + if (disas_neon_data_insn(s, insn)) { + goto illegal_op; + } + return; + } + if ((insn & 0x0f100000) == 0x04000000) { + /* NEON load/store. */ + if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { + goto illegal_op; + } + + if (disas_neon_ls_insn(s, insn)) { + goto illegal_op; + } + return; + } + if ((insn & 0x0e000f00) == 0x0c000100) { + if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { + /* iWMMXt register transfer. */ + if (extract32(s->c15_cpar, 1, 1)) { + if (!disas_iwmmxt_insn(s, insn)) { + return; + } + } + } + } else if ((insn & 0x0e000a00) == 0x0c000800 + && arm_dc_feature(s, ARM_FEATURE_V8)) { + if (disas_neon_insn_3same_ext(s, insn)) { + goto illegal_op; + } + return; + } else if ((insn & 0x0f000a00) == 0x0e000800 + && arm_dc_feature(s, ARM_FEATURE_V8)) { + if (disas_neon_insn_2reg_scalar_ext(s, insn)) { + goto illegal_op; + } + return; + } + goto illegal_op; + } + if (cond != 0xe) { + /* if not always execute, we generate a conditional jump to + next instruction */ + arm_skip_unless(s, cond); + } + + /* TODO: Perhaps merge these into one decodetree output file. */ + if (disas_a32(s, insn) || + disas_vfp(s, insn)) { + return; + } + /* fall back to legacy decoder */ + + switch ((insn >> 24) & 0xf) { + case 0xc: + case 0xd: + case 0xe: + if (((insn >> 8) & 0xe) == 10) { + /* VFP, but failed disas_vfp. */ + goto illegal_op; + } + if (disas_coproc_insn(s, insn)) { + /* Coprocessor. */ + goto illegal_op; + } + break; + default: + illegal_op: + unallocated_encoding(s); + break; + } +} + +static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn) +{ + struct uc_struct *uc = s->uc; + /* + * Return true if this is a 16 bit instruction. We must be precise + * about this (matching the decode). + */ + if ((insn >> 11) < 0x1d) { + /* Definitely a 16-bit instruction */ + return true; + } + + /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the + * first half of a 32-bit Thumb insn. Thumb-1 cores might + * end up actually treating this as two 16-bit insns, though, + * if it's half of a bl/blx pair that might span a page boundary. + */ + if (arm_dc_feature(s, ARM_FEATURE_THUMB2) || + arm_dc_feature(s, ARM_FEATURE_M)) { + /* Thumb2 cores (including all M profile ones) always treat + * 32-bit insns as 32-bit. + */ + return false; + } + + if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) { + /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix + * is not on the next page; we merge this into a 32-bit + * insn. + */ + return false; + } + /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF); + * 0b1111_1xxx_xxxx_xxxx : BL suffix; + * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page + * -- handle as single 16 bit insn + */ + return true; +} + +/* Translate a 32-bit thumb instruction. */ +static void disas_thumb2_insn(DisasContext *s, uint32_t insn) +{ + /* + * ARMv6-M supports a limited subset of Thumb2 instructions. + * Other Thumb1 architectures allow only 32-bit + * combined BL/BLX prefix and suffix. + */ + if (arm_dc_feature(s, ARM_FEATURE_M) && + !arm_dc_feature(s, ARM_FEATURE_V7)) { + int i; + bool found = false; + static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */, + 0xf3b08040 /* dsb */, + 0xf3b08050 /* dmb */, + 0xf3b08060 /* isb */, + 0xf3e08000 /* mrs */, + 0xf000d000 /* bl */}; + static const uint32_t armv6m_mask[] = {0xffe0d000, + 0xfff0d0f0, + 0xfff0d0f0, + 0xfff0d0f0, + 0xffe0d000, + 0xf800d000}; + + for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) { + if ((insn & armv6m_mask[i]) == armv6m_insn[i]) { + found = true; + break; + } + } + if (!found) { + goto illegal_op; + } + } else if ((insn & 0xf800e800) != 0xf000e800) { + ARCH(6T2); + } + + /* + * TODO: Perhaps merge these into one decodetree output file. + * Note disas_vfp is written for a32 with cond field in the + * top nibble. The t32 encoding requires 0xe in the top nibble. + */ + if (disas_t32(s, insn) || + disas_vfp_uncond(s, insn) || + ((insn >> 28) == 0xe && disas_vfp(s, insn))) { + return; + } + /* fall back to legacy decoder */ + + switch ((insn >> 25) & 0xf) { + case 0: case 1: case 2: case 3: + /* 16-bit instructions. Should never happen. */ + abort(); + case 6: case 7: case 14: case 15: + /* Coprocessor. */ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */ + if (extract32(insn, 24, 2) == 3) { + goto illegal_op; /* op0 = 0b11 : unallocated */ + } + + if (((insn >> 8) & 0xe) == 10 && + dc_isar_feature(aa32_fpsp_v2, s)) { + /* FP, and the CPU supports it */ + goto illegal_op; + } else { + /* All other insns: NOCP */ + gen_exception_insn(s, s->pc_curr, EXCP_NOCP, + syn_uncategorized(), + default_exception_el(s)); + } + break; + } + if ((insn & 0xfe000a00) == 0xfc000800 + && arm_dc_feature(s, ARM_FEATURE_V8)) { + /* The Thumb2 and ARM encodings are identical. */ + if (disas_neon_insn_3same_ext(s, insn)) { + goto illegal_op; + } + } else if ((insn & 0xff000a00) == 0xfe000800 + && arm_dc_feature(s, ARM_FEATURE_V8)) { + /* The Thumb2 and ARM encodings are identical. */ + if (disas_neon_insn_2reg_scalar_ext(s, insn)) { + goto illegal_op; + } + } else if (((insn >> 24) & 3) == 3) { + /* Translate into the equivalent ARM encoding. */ + insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); + if (disas_neon_data_insn(s, insn)) { + goto illegal_op; + } + } else if (((insn >> 8) & 0xe) == 10) { + /* VFP, but failed disas_vfp. */ + goto illegal_op; + } else { + if (insn & (1 << 28)) + goto illegal_op; + if (disas_coproc_insn(s, insn)) { + goto illegal_op; + } + } + break; + case 12: + if ((insn & 0x01100000) == 0x01000000) { + if (disas_neon_ls_insn(s, insn)) { + goto illegal_op; + } + break; + } + goto illegal_op; + default: + illegal_op: + unallocated_encoding(s); + } +} + +static void disas_thumb_insn(DisasContext *s, uint32_t insn) +{ + if (!disas_t16(s, insn)) { + unallocated_encoding(s); + } +} + +static bool insn_crosses_page(CPUARMState *env, DisasContext *s) +{ + /* Return true if the insn at dc->base.pc_next might cross a page boundary. + * (False positives are OK, false negatives are not.) + * We know this is a Thumb insn, and our caller ensures we are + * only called if dc->base.pc_next is less than 4 bytes from the page + * boundary, so we cross the page if the first 16 bits indicate + * that this is a 32 bit insn. + */ + uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b); + + return !thumb_insn_is_16bit(s, s->base.pc_next, insn); +} + +static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = cs->uc; + TCGContext *tcg_ctx = cs->uc->tcg_ctx; + CPUARMState *env = cs->env_ptr; + ARMCPU *cpu = env_archcpu(env); + uint32_t tb_flags = dc->base.tb->flags; + uint32_t condexec, core_mmu_idx; + + // unicorn handle + dc->uc = uc; + dc->isar = &cpu->isar; + dc->condjmp = 0; + + dc->aarch64 = 0; + /* If we are coming from secure EL0 in a system with a 32-bit EL3, then + * there is no secure EL1, so we route exceptions to EL3. + */ + dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && + !arm_el_is_aa64(env, 3); + dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB); + dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; + condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC); + dc->condexec_mask = (condexec & 0xf) << 1; + dc->condexec_cond = condexec >> 4; + + core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX); + dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx); + dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); + dc->user = (dc->current_el == 0); + dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL); + + if (arm_feature(env, ARM_FEATURE_M)) { + dc->vfp_enabled = 1; + dc->be_data = MO_TE; + dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER); + dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && + regime_is_secure(env, dc->mmu_idx); + dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK); + dc->v8m_fpccr_s_wrong = + FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG); + dc->v7m_new_fp_ctxt_needed = + FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED); + dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT); + } else { + dc->be_data = + FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; + dc->debug_target_el = + FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL); + dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B); + dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE); + dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS); + dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN); + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR); + } else { + dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN); + dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE); + } + } + dc->cp_regs = cpu->cp_regs; + dc->features = env->features; + + /* Single step state. The code-generation logic here is: + * SS_ACTIVE == 0: + * generate code with no special handling for single-stepping (except + * that anything that can make us go to SS_ACTIVE == 1 must end the TB; + * this happens anyway because those changes are all system register or + * PSTATE writes). + * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) + * emit code for one insn + * emit code to clear PSTATE.SS + * emit code to generate software step exception for completed step + * end TB (as usual for having generated an exception) + * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) + * emit code to generate a software step exception + * end the TB + */ + dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE); + dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS); + dc->is_ldex = false; + + dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; + + /* If architectural single step active, limit to 1. */ + if (is_singlestepping(dc)) { + dc->base.max_insns = 1; + } + + /* ARM is a fixed-length ISA. Bound the number of insns to execute + to those left on the page. */ + if (!dc->thumb) { +#ifdef _MSC_VER + int bound = (0 - (dc->base.pc_first | TARGET_PAGE_MASK)) / 4; +#else + int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; +#endif + dc->base.max_insns = MIN(dc->base.max_insns, bound); + } + + tcg_ctx->cpu_V0 = tcg_temp_new_i64(tcg_ctx); + tcg_ctx->cpu_V1 = tcg_temp_new_i64(tcg_ctx); + /* FIXME: cpu_M0 can probably be the same as cpu_V0. */ + tcg_ctx->cpu_M0 = tcg_temp_new_i64(tcg_ctx); +} + +static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + /* A note on handling of the condexec (IT) bits: + * + * We want to avoid the overhead of having to write the updated condexec + * bits back to the CPUARMState for every instruction in an IT block. So: + * (1) if the condexec bits are not already zero then we write + * zero back into the CPUARMState now. This avoids complications trying + * to do it at the end of the block. (For example if we don't do this + * it's hard to identify whether we can safely skip writing condexec + * at the end of the TB, which we definitely want to do for the case + * where a TB doesn't do anything with the IT state at all.) + * (2) if we are going to leave the TB then we call gen_set_condexec() + * which will write the correct value into CPUARMState if zero is wrong. + * This is done both for leaving the TB at the end, and for leaving + * it because of an exception we know will happen, which is done in + * gen_exception_insn(). The latter is necessary because we need to + * leave the TB with the PC/IT state just prior to execution of the + * instruction which caused the exception. + * (3) if we leave the TB unexpectedly (eg a data abort on a load) + * then the CPUARMState will be wrong and we need to reset it. + * This is handled in the same way as restoration of the + * PC in these situations; we save the value of the condexec bits + * for each PC via tcg_gen_insn_start(), and restore_state_to_opc() + * then uses this to restore them after an exception. + * + * Note that there are no instructions which can read the condexec + * bits, and none which can write non-static values to them, so + * we don't need to care about whether CPUARMState is correct in the + * middle of a TB. + */ + + /* Reset the conditional execution bits immediately. This avoids + complications trying to do it at the end of the block. */ + if (dc->condexec_mask || dc->condexec_cond) { + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + store_cpu_field(tcg_ctx, tmp, condexec_bits); + } +} + +static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, + (dc->condexec_cond << 4) | (dc->condexec_mask >> 1), + 0); + dc->insn_start = tcg_last_op(tcg_ctx); +} + +static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, + const CPUBreakpoint *bp) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + if (bp->flags & BP_CPU) { + gen_set_condexec(dc); + gen_set_pc_im(dc, dc->base.pc_next); + gen_helper_check_breakpoints(tcg_ctx, tcg_ctx->cpu_env); + /* End the TB early; it's likely not going to be executed */ + dc->base.is_jmp = DISAS_TOO_MANY; + } else { + gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG); + /* The address covered by the breakpoint must be + included in [tb->pc, tb->pc + tb->size) in order + to for it to be properly cleared -- thus we + increment the PC here so that the logic setting + tb->size below does the right thing. */ + /* TODO: Advance PC by correct instruction length to + * avoid disassembler error messages */ + dc->base.pc_next += 2; + dc->base.is_jmp = DISAS_NORETURN; + } + + return true; +} + +static bool arm_pre_translate_insn(DisasContext *dc) +{ + if (dc->ss_active && !dc->pstate_ss) { + /* Singlestep state is Active-pending. + * If we're in this state at the start of a TB then either + * a) we just took an exception to an EL which is being debugged + * and this is the first insn in the exception handler + * b) debug exceptions were masked and we just unmasked them + * without changing EL (eg by clearing PSTATE.D) + * In either case we're going to take a swstep exception in the + * "did not step an insn" case, and so the syndrome ISV and EX + * bits should be zero. + */ + assert(dc->base.num_insns == 1); + gen_swstep_exception(dc, 0, 0); + dc->base.is_jmp = DISAS_NORETURN; + return true; + } + + return false; +} + +static void arm_post_translate_insn(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + if (dc->condjmp && !dc->base.is_jmp) { + gen_set_label(tcg_ctx, dc->condlabel); + dc->condjmp = 0; + } + translator_loop_temp_check(&dc->base); +} + +static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + CPUARMState *env = cpu->env_ptr; + unsigned int insn; + + if (arm_pre_translate_insn(dc)) { + return; + } + + // Unicorn: end address tells us to stop emulation + if (dcbase->pc_next == dc->uc->addr_end) { + // imitate WFI instruction to halt emulation + dcbase->is_jmp = DISAS_WFI; + } else { + dc->pc_curr = dc->base.pc_next; + insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b); + dc->insn = insn; + dc->base.pc_next += 4; + disas_arm_insn(dc, insn); + + arm_post_translate_insn(dc); + + /* ARM is a fixed-length ISA. We performed the cross-page check + in init_disas_context by adjusting max_insns. */ + } +} + +static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn) +{ + /* Return true if this Thumb insn is always unconditional, + * even inside an IT block. This is true of only a very few + * instructions: BKPT, HLT, and SG. + * + * A larger class of instructions are UNPREDICTABLE if used + * inside an IT block; we do not need to detect those here, because + * what we do by default (perform the cc check and update the IT + * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE + * choice for those situations. + * + * insn is either a 16-bit or a 32-bit instruction; the two are + * distinguishable because for the 16-bit case the top 16 bits + * are zeroes, and that isn't a valid 32-bit encoding. + */ + if ((insn & 0xffffff00) == 0xbe00) { + /* BKPT */ + return true; + } + + if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) && + !arm_dc_feature(s, ARM_FEATURE_M)) { + /* HLT: v8A only. This is unconditional even when it is going to + * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3. + * For v7 cores this was a plain old undefined encoding and so + * honours its cc check. (We might be using the encoding as + * a semihosting trap, but we don't change the cc check behaviour + * on that account, because a debugger connected to a real v7A + * core and emulating semihosting traps by catching the UNDEF + * exception would also only see cases where the cc check passed. + * No guest code should be trying to do a HLT semihosting trap + * in an IT block anyway. + */ + return true; + } + + if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) && + arm_dc_feature(s, ARM_FEATURE_M)) { + /* SG: v8M only */ + return true; + } + + return false; +} + +static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = dc->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + CPUARMState *env = cpu->env_ptr; + uint32_t insn; + bool is_16bit; + uint32_t insn_size; + + if (arm_pre_translate_insn(dc)) { + return; + } + + // Unicorn: end address tells us to stop emulation + if (dcbase->pc_next == uc->addr_end) { + // imitate WFI instruction to halt emulation + dcbase->is_jmp = DISAS_WFI; + return; + } + + dc->pc_curr = dc->base.pc_next; + insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b); + is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn); + dc->base.pc_next += 2; + if (!is_16bit) { + uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b); + + insn = insn << 16 | insn2; + dc->base.pc_next += 2; + } + dc->insn = insn; + + if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { + uint32_t cond = dc->condexec_cond; + + /* + * Conditionally skip the insn. Note that both 0xe and 0xf mean + * "always"; 0xf is not "never". + */ + if (cond < 0x0e) { + arm_skip_unless(dc, cond); + } + } + + // Unicorn: + // We can't stop in the middle of the IT block. + // In other words, treat the whole IT block as + // a single instruction. + uc->no_exit_request = (dc->condexec_mask != 0); + + // Unicorn: trace this instruction on request + insn_size = is_16bit ? 2 : 4; + if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, dc->base.pc_next - insn_size)) { + if (uc->no_exit_request) { + gen_uc_tracecode(tcg_ctx, insn_size, UC_HOOK_CODE_IDX | UC_HOOK_FLAG_NO_STOP, uc, dc->base.pc_next - insn_size); + } else { + gen_uc_tracecode(tcg_ctx, insn_size, UC_HOOK_CODE_IDX, uc, dc->base.pc_next - insn_size); + } + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + if (is_16bit) { + disas_thumb_insn(dc, insn); + } else { + disas_thumb2_insn(dc, insn); + } + + /* Advance the Thumb condexec condition. */ + if (dc->condexec_mask) { + dc->condexec_cond = ((dc->condexec_cond & 0xe) | + ((dc->condexec_mask >> 4) & 1)); + dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; + if (dc->condexec_mask == 0) { + dc->condexec_cond = 0; + } + } + + arm_post_translate_insn(dc); + + /* Thumb is a variable-length ISA. Stop translation when the next insn + * will touch a new page. This ensures that prefetch aborts occur at + * the right place. + * + * We want to stop the TB if the next insn starts in a new page, + * or if it spans between this page and the next. This means that + * if we're looking at the last halfword in the page we need to + * see if it's a 16-bit Thumb insn (which will fit in this TB) + * or a 32-bit Thumb insn (which won't). + * This is to avoid generating a silly TB with a single 16-bit insn + * in it at the end of this page (which would execute correctly + * but isn't very efficient). + */ + if (dc->base.is_jmp == DISAS_NEXT + && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE + || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3 + && insn_crosses_page(env, dc)))) { + dc->base.is_jmp = DISAS_TOO_MANY; + } +} + +static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = dc->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + + if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) { + /* FIXME: This can theoretically happen with self-modifying code. */ + cpu_abort(cpu, "IO on conditional branch instruction"); + } + + /* At this stage dc->condjmp will only be set when the skipped + instruction was a conditional branch or trap, and the PC has + already been written. */ + gen_set_condexec(dc); + if (dc->base.is_jmp == DISAS_BX_EXCRET) { + /* Exception return branches need some special case code at the + * end of the TB, which is complex enough that it has to + * handle the single-step vs not and the condition-failed + * insn codepath itself. + */ + gen_bx_excret_final_code(dc); + } else if (unlikely(is_singlestepping(dc))) { + /* Unconditional and "condition passed" instruction codepath. */ + switch (dc->base.is_jmp) { + case DISAS_SWI: + gen_ss_advance(dc); + gen_exception(tcg_ctx, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), + default_exception_el(dc)); + break; + case DISAS_HVC: + gen_ss_advance(dc); + gen_exception(tcg_ctx, EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); + break; + case DISAS_SMC: + gen_ss_advance(dc); + gen_exception(tcg_ctx, EXCP_SMC, syn_aa32_smc(), 3); + break; + case DISAS_NEXT: + case DISAS_TOO_MANY: + case DISAS_UPDATE: + gen_set_pc_im(dc, dc->base.pc_next); + /* fall through */ + default: + /* FIXME: Single stepping a WFI insn will not halt the CPU. */ + gen_singlestep_exception(dc); + break; + case DISAS_NORETURN: + break; + } + } else { + /* While branches must always occur at the end of an IT block, + there are a few other things that can cause us to terminate + the TB in the middle of an IT block: + - Exception generating instructions (bkpt, swi, undefined). + - Page boundaries. + - Hardware watchpoints. + Hardware breakpoints have already been handled and skip this code. + */ + switch(dc->base.is_jmp) { + case DISAS_NEXT: + case DISAS_TOO_MANY: + gen_goto_tb(dc, 1, dc->base.pc_next); + break; + case DISAS_JUMP: + gen_goto_ptr(tcg_ctx); + break; + case DISAS_UPDATE: + gen_set_pc_im(dc, dc->base.pc_next); + /* fall through */ + default: + /* indicate that the hash table must be used to find the next TB */ + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + break; + case DISAS_NORETURN: + /* nothing more to generate */ + break; + case DISAS_WFI: + { + gen_set_pc_im(dc, dc->base.pc_next); + + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, (dc->thumb && + !(dc->insn & (1U << 31))) ? 2 : 4); + + gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + /* The helper doesn't necessarily throw an exception, but we + * must go back to the main loop to check for interrupts anyway. + */ + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + break; + } + case DISAS_WFE: + gen_helper_wfe(tcg_ctx, tcg_ctx->cpu_env); + break; + case DISAS_YIELD: + gen_helper_yield(tcg_ctx, tcg_ctx->cpu_env); + break; + case DISAS_SWI: + gen_exception(tcg_ctx, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), + default_exception_el(dc)); + break; + case DISAS_HVC: + gen_exception(tcg_ctx, EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); + break; + case DISAS_SMC: + gen_exception(tcg_ctx, EXCP_SMC, syn_aa32_smc(), 3); + break; + } + } + + if (dc->condjmp) { + /* "Condition failed" instruction codepath for the branch/trap insn */ + gen_set_label(tcg_ctx, dc->condlabel); + gen_set_condexec(dc); + if (unlikely(is_singlestepping(dc))) { + gen_set_pc_im(dc, dc->base.pc_next); + gen_singlestep_exception(dc); + } else { + gen_goto_tb(dc, 1, dc->base.pc_next); + } + } +} + +static const TranslatorOps arm_translator_ops = { + .init_disas_context = arm_tr_init_disas_context, + .tb_start = arm_tr_tb_start, + .insn_start = arm_tr_insn_start, + .breakpoint_check = arm_tr_breakpoint_check, + .translate_insn = arm_tr_translate_insn, + .tb_stop = arm_tr_tb_stop, +}; + +static const TranslatorOps thumb_translator_ops = { + .init_disas_context = arm_tr_init_disas_context, + .tb_start = arm_tr_tb_start, + .insn_start = arm_tr_insn_start, + .breakpoint_check = arm_tr_breakpoint_check, + .translate_insn = thumb_tr_translate_insn, + .tb_stop = arm_tr_tb_stop, +}; + +/* generate intermediate code for basic block 'tb'. */ +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +{ + DisasContext dc = { 0 }; + const TranslatorOps *ops = &arm_translator_ops; + + if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) { + ops = &thumb_translator_ops; + } +#ifdef TARGET_AARCH64 + if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) { + ops = &aarch64_translator_ops; + } +#endif + + translator_loop(ops, &dc.base, cpu, tb, max_insns); +} + +void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, + target_ulong *data) +{ + if (is_a64(env)) { + env->pc = data[0]; + env->condexec_bits = 0; + env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; + } else { + env->regs[15] = data[0]; + env->condexec_bits = data[1]; + env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; + } +} diff --git a/qemu/target/arm/translate.h b/qemu/target/arm/translate.h new file mode 100644 index 00000000..62ea7a52 --- /dev/null +++ b/qemu/target/arm/translate.h @@ -0,0 +1,301 @@ +#ifndef TARGET_ARM_TRANSLATE_H +#define TARGET_ARM_TRANSLATE_H + +#include "exec/translator.h" +#include "internals.h" + +struct uc_struct; + +/* internal defines */ +typedef struct DisasContext { + DisasContextBase base; + const ARMISARegisters *isar; + + /* The address of the current instruction being translated. */ + target_ulong pc_curr; + target_ulong page_start; + uint32_t insn; + /* Nonzero if this instruction has been conditionally skipped. */ + int condjmp; + /* The label that will be jumped to when the instruction is skipped. */ + TCGLabel *condlabel; + /* Thumb-2 conditional execution bits. */ + int condexec_mask; + int condexec_cond; + int thumb; + int sctlr_b; + MemOp be_data; + int user; + ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ + uint8_t tbii; /* TBI1|TBI0 for insns */ + uint8_t tbid; /* TBI1|TBI0 for data */ + bool ns; /* Use non-secure CPREG bank on access */ + int fp_excp_el; /* FP exception EL or 0 if enabled */ + int sve_excp_el; /* SVE exception EL or 0 if enabled */ + int sve_len; /* SVE vector length in bytes */ + /* Flag indicating that exceptions from secure mode are routed to EL3. */ + bool secure_routed_to_el3; + bool vfp_enabled; /* FP enabled via FPSCR.EN */ + int vec_len; + int vec_stride; + bool v7m_handler_mode; + bool v8m_secure; /* true if v8M and we're in Secure mode */ + bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */ + bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */ + bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */ + bool v7m_lspact; /* FPCCR.LSPACT set */ + /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI + * so that top level loop can generate correct syndrome information. + */ + uint32_t svc_imm; + int aarch64; + int current_el; + /* Debug target exception level for single-step exceptions */ + int debug_target_el; + GHashTable *cp_regs; + uint64_t features; /* CPU features bits */ + /* Because unallocated encodings generate different exception syndrome + * information from traps due to FP being disabled, we can't do a single + * "is fp access disabled" check at a high level in the decode tree. + * To help in catching bugs where the access check was forgotten in some + * code path, we set this flag when the access check is done, and assert + * that it is set at the point where we actually touch the FP regs. + */ + bool fp_access_checked; + /* ARMv8 single-step state (this is distinct from the QEMU gdbstub + * single-step support). + */ + bool ss_active; + bool pstate_ss; + /* True if the insn just emitted was a load-exclusive instruction + * (necessary for syndrome information for single step exceptions), + * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. + */ + bool is_ldex; + /* True if AccType_UNPRIV should be used for LDTR et al */ + bool unpriv; + /* True if v8.3-PAuth is active. */ + bool pauth_active; + /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ + bool bt; + /* True if any CP15 access is trapped by HSTR_EL2 */ + bool hstr_active; + /* + * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. + * < 0, set by the current instruction. + */ + int8_t btype; + /* True if this page is guarded. */ + bool guarded_page; + /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ + int c15_cpar; + /* TCG op of the current insn_start. */ + TCGOp *insn_start; +#define TMP_A64_MAX 16 + int tmp_a64_count; + TCGv_i64 tmp_a64[TMP_A64_MAX]; + + // Unicorn + struct uc_struct *uc; +} DisasContext; + +typedef struct DisasCompare { + TCGCond cond; + TCGv_i32 value; + bool value_global; +} DisasCompare; + +static inline int arm_dc_feature(DisasContext *dc, int feature) +{ + return (dc->features & (1ULL << feature)) != 0; +} + +static inline int get_mem_index(DisasContext *s) +{ + return arm_to_core_mmu_idx(s->mmu_idx); +} + +/* Function used to determine the target exception EL when otherwise not known + * or default. + */ +static inline int default_exception_el(DisasContext *s) +{ + /* If we are coming from secure EL0 in a system with a 32-bit EL3, then + * there is no secure EL1, so we route exceptions to EL3. Otherwise, + * exceptions can only be routed to ELs above 1, so we target the higher of + * 1 or the current EL. + */ + return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3) + ? 3 : MAX(1, s->current_el); +} + +static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) +{ + /* We don't need to save all of the syndrome so we mask and shift + * out unneeded bits to help the sleb128 encoder do a better job. + */ + syn &= ARM_INSN_START_WORD2_MASK; + syn >>= ARM_INSN_START_WORD2_SHIFT; + + /* We check and clear insn_start_idx to catch multiple updates. */ + assert(s->insn_start != NULL); + tcg_set_insn_start_param(s->insn_start, 2, syn); + s->insn_start = NULL; +} + +/* is_jmp field values */ +#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ +#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ +/* These instructions trap after executing, so the A32/T32 decoder must + * defer them until after the conditional execution state has been updated. + * WFI also needs special handling when single-stepping. + */ +#define DISAS_WFI DISAS_TARGET_2 +#define DISAS_SWI DISAS_TARGET_3 +/* WFE */ +#define DISAS_WFE DISAS_TARGET_4 +#define DISAS_HVC DISAS_TARGET_5 +#define DISAS_SMC DISAS_TARGET_6 +#define DISAS_YIELD DISAS_TARGET_7 +/* M profile branch which might be an exception return (and so needs + * custom end-of-TB code) + */ +#define DISAS_BX_EXCRET DISAS_TARGET_8 +/* For instructions which want an immediate exit to the main loop, + * as opposed to attempting to use lookup_and_goto_ptr. Unlike + * DISAS_UPDATE this doesn't write the PC on exiting the translation + * loop so you need to ensure something (gen_a64_set_pc_im or runtime + * helper) has done so before we reach return from cpu_tb_exec. + */ +#define DISAS_EXIT DISAS_TARGET_9 + +#ifdef TARGET_AARCH64 +void a64_translate_init(struct uc_struct *uc); +void gen_a64_set_pc_im(TCGContext *tcg_ctx, uint64_t val); +extern const TranslatorOps aarch64_translator_ops; +#else +static inline void a64_translate_init(struct uc_struct *uc) +{ +} + +static inline void gen_a64_set_pc_im(uint64_t val) +{ +} +#endif + +void arm_test_cc(TCGContext *tcg_ctx, DisasCompare *cmp, int cc); +void arm_free_cc(TCGContext *tcg_ctx, DisasCompare *cmp); +void arm_jump_cc(TCGContext *tcg_ctx, DisasCompare *cmp, TCGLabel *label); +void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, TCGLabel *label); + +/* Return state of Alternate Half-precision flag, caller frees result */ +static inline TCGv_i32 get_ahp_flag(TCGContext *tcg_ctx) +{ + TCGv_i32 ret = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ld_i32(tcg_ctx, ret, tcg_ctx->cpu_env, + offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR])); + tcg_gen_extract_i32(tcg_ctx, ret, ret, 26, 1); + + return ret; +} + +/* Set bits within PSTATE. */ +static inline void set_pstate_bits(TCGContext *tcg_ctx, uint32_t bits) +{ + TCGv_i32 p = tcg_temp_new_i32(tcg_ctx); + + tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); + + tcg_gen_ld_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); + tcg_gen_ori_i32(tcg_ctx, p, p, bits); + tcg_gen_st_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); + tcg_temp_free_i32(tcg_ctx, p); +} + +/* Clear bits within PSTATE. */ +static inline void clear_pstate_bits(TCGContext *tcg_ctx, uint32_t bits) +{ + TCGv_i32 p = tcg_temp_new_i32(tcg_ctx); + + tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); + + tcg_gen_ld_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); + tcg_gen_andi_i32(tcg_ctx, p, p, ~bits); + tcg_gen_st_i32(tcg_ctx, p, tcg_ctx->cpu_env, offsetof(CPUARMState, pstate)); + tcg_temp_free_i32(tcg_ctx, p); +} + +/* If the singlestep state is Active-not-pending, advance to Active-pending. */ +static inline void gen_ss_advance(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (s->ss_active) { + s->pstate_ss = 0; + clear_pstate_bits(tcg_ctx, PSTATE_SS); + } +} + +static inline void gen_exception(TCGContext *tcg_ctx, int excp, uint32_t syndrome, + uint32_t target_el) +{ + TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); + TCGv_i32 tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + TCGv_i32 tcg_el = tcg_const_i32(tcg_ctx, target_el); + + gen_helper_exception_with_syndrome(tcg_ctx, tcg_ctx->cpu_env, tcg_excp, + tcg_syn, tcg_el); + + tcg_temp_free_i32(tcg_ctx, tcg_el); + tcg_temp_free_i32(tcg_ctx, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_excp); +} + +/* Generate an architectural singlestep exception */ +static inline void gen_swstep_exception(DisasContext *s, int isv, int ex) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool same_el = (s->debug_target_el == s->current_el); + + /* + * If singlestep is targeting a lower EL than the current one, + * then s->ss_active must be false and we can never get here. + */ + assert(s->debug_target_el >= s->current_el); + + gen_exception(tcg_ctx, EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el); +} + +/* + * Given a VFP floating point constant encoded into an 8 bit immediate in an + * instruction, expand it to the actual constant value of the specified + * size, as per the VFPExpandImm() pseudocode in the Arm ARM. + */ +uint64_t vfp_expand_imm(int size, uint8_t imm8); + +/* Vector operations shared between ARM and AArch64. */ +extern const GVecGen3 mla_op[4]; +extern const GVecGen3 mls_op[4]; +extern const GVecGen3 cmtst_op[4]; +extern const GVecGen3 sshl_op[4]; +extern const GVecGen3 ushl_op[4]; +extern const GVecGen2i ssra_op[4]; +extern const GVecGen2i usra_op[4]; +extern const GVecGen2i sri_op[4]; +extern const GVecGen2i sli_op[4]; +extern const GVecGen4 uqadd_op[4]; +extern const GVecGen4 sqadd_op[4]; +extern const GVecGen4 uqsub_op[4]; +extern const GVecGen4 sqsub_op[4]; +void gen_cmtst_i64(TCGContext *, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void gen_ushl_i32(TCGContext *, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); +void gen_sshl_i32(TCGContext *, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); +void gen_ushl_i64(TCGContext *, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void gen_sshl_i64(TCGContext *, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); + +/* + * Forward to the isar_feature_* tests given a DisasContext pointer. + */ +#define dc_isar_feature(name, ctx) isar_feature_##name(ctx->isar) + +#endif /* TARGET_ARM_TRANSLATE_H */ diff --git a/qemu/target-arm/unicorn.h b/qemu/target/arm/unicorn.h similarity index 50% rename from qemu/target-arm/unicorn.h rename to qemu/target/arm/unicorn.h index a65b072b..be5e23fc 100644 --- a/qemu/target-arm/unicorn.h +++ b/qemu/target/arm/unicorn.h @@ -10,20 +10,21 @@ int arm_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, i int arm64_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); int arm64_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); +int arm_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int arm_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int armeb_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int armeb_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int arm64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int arm64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int arm64eb_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int arm64eb_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); + void arm_reg_reset(struct uc_struct *uc); void arm64_reg_reset(struct uc_struct *uc); -DEFAULT_VISIBILITY void arm_uc_init(struct uc_struct* uc); void armeb_uc_init(struct uc_struct* uc); -DEFAULT_VISIBILITY void arm64_uc_init(struct uc_struct* uc); void arm64eb_uc_init(struct uc_struct* uc); - -extern const int ARM_REGS_STORAGE_SIZE_arm; -extern const int ARM_REGS_STORAGE_SIZE_armeb; -extern const int ARM64_REGS_STORAGE_SIZE_aarch64; -extern const int ARM64_REGS_STORAGE_SIZE_aarch64eb; - #endif diff --git a/qemu/target/arm/unicorn_aarch64.c b/qemu/target/arm/unicorn_aarch64.c new file mode 100644 index 00000000..14361f1c --- /dev/null +++ b/qemu/target/arm/unicorn_aarch64.c @@ -0,0 +1,355 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "qemu/typedefs.h" +#include "unicorn/unicorn.h" +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" +#include "unicorn.h" + +ARMCPU *cpu_aarch64_init(struct uc_struct *uc); + +static void arm64_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUARMState *)uc->cpu->env_ptr)->pc = address; +} + +static void arm64_release(void* ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + ARMCPU *cpu = (ARMCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + ARMELChangeHook *entry, *next; + CPUARMState *env = &cpu->env; + uint32_t nr; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } + + QLIST_FOREACH_SAFE(entry, &cpu->pre_el_change_hooks, node, next) { + QLIST_SAFE_REMOVE(entry, node); + g_free(entry); + } + QLIST_FOREACH_SAFE(entry, &cpu->el_change_hooks, node, next) { + QLIST_SAFE_REMOVE(entry, node); + g_free(entry); + } + + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V7)) { + nr = cpu->pmsav7_dregion; + if (nr) { + if (arm_feature(env, ARM_FEATURE_V8)) { + g_free(env->pmsav8.rbar[M_REG_NS]); + g_free(env->pmsav8.rlar[M_REG_NS]); + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + g_free(env->pmsav8.rbar[M_REG_S]); + g_free(env->pmsav8.rlar[M_REG_S]); + } + } else { + g_free(env->pmsav7.drbar); + g_free(env->pmsav7.drsr); + g_free(env->pmsav7.dracr); + } + } + } + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + nr = cpu->sau_sregion; + if (nr) { + g_free(env->sau.rbar); + g_free(env->sau.rlar); + } + } + + g_free(cpu->cpreg_indexes); + g_free(cpu->cpreg_values); + g_free(cpu->cpreg_vmstate_indexes); + g_free(cpu->cpreg_vmstate_values); + g_hash_table_destroy(cpu->cp_regs); +} + +void arm64_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + memset(env->xregs, 0, sizeof(env->xregs)); + + env->pc = 0; +} + +static void reg_read(CPUARMState *env, unsigned int regid, void *value) +{ + if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { + regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; + } + if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { + *(int64_t *)value = env->xregs[regid - UC_ARM64_REG_X0]; + } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { + *(int32_t *)value = READ_DWORD(env->xregs[regid - UC_ARM64_REG_W0]); + } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { // FIXME + float64 *dst = (float64*) value; + uint32_t reg_index = regid - UC_ARM64_REG_Q0; + dst[0] = env->vfp.zregs[reg_index].d[0]; + dst[1] = env->vfp.zregs[reg_index].d[1]; + } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { + *(float64*)value = env->vfp.zregs[regid - UC_ARM64_REG_D0].d[0]; + } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { + *(int32_t*)value = READ_DWORD(env->vfp.zregs[regid - UC_ARM64_REG_S0].d[0]); + } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { + *(int16_t*)value = READ_WORD(env->vfp.zregs[regid - UC_ARM64_REG_H0].d[0]); + } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { + *(int8_t*)value = READ_BYTE_L(env->vfp.zregs[regid - UC_ARM64_REG_B0].d[0]); + } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { + *(uint64_t*)value = env->elr_el[regid - UC_ARM64_REG_ELR_EL0]; + } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { + *(uint64_t*)value = env->sp_el[regid - UC_ARM64_REG_SP_EL0]; + } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { + *(uint64_t*)value = env->cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0]; + } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { + *(uint64_t*)value = env->cp15.far_el[regid - UC_ARM64_REG_FAR_EL0]; + } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { + *(uint64_t*)value = env->cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0]; + } else { + switch(regid) { + default: break; + case UC_ARM64_REG_CPACR_EL1: + // *(uint32_t *)value = env->cp15.c1_coproc; + break; + case UC_ARM64_REG_TPIDR_EL0: + // *(int64_t *)value = env->cp15.tpidr_el0; + break; + case UC_ARM64_REG_TPIDRRO_EL0: + // *(int64_t *)value = env->cp15.tpidrro_el0; + break; + case UC_ARM64_REG_TPIDR_EL1: + // *(int64_t *)value = env->cp15.tpidr_el1; + break; + case UC_ARM64_REG_X29: + *(int64_t *)value = env->xregs[29]; + break; + case UC_ARM64_REG_X30: + *(int64_t *)value = env->xregs[30]; + break; + case UC_ARM64_REG_PC: + *(uint64_t *)value = env->pc; + break; + case UC_ARM64_REG_SP: + *(int64_t *)value = env->xregs[31]; + break; + case UC_ARM64_REG_NZCV: + *(int32_t *)value = cpsr_read(env) & CPSR_NZCV; + break; + case UC_ARM64_REG_PSTATE: + *(uint32_t *)value = pstate_read(env); + break; + case UC_ARM64_REG_TTBR0_EL1: + // *(uint64_t *)value = env->cp15.ttbr0_el1; + break; + case UC_ARM64_REG_TTBR1_EL1: + // *(uint64_t *)value = env->cp15.ttbr1_el1; + break; + case UC_ARM64_REG_PAR_EL1: + // *(uint64_t *)value = env->cp15.par_el1; + break; + case UC_ARM64_REG_MAIR_EL1: + // *(uint64_t *)value = env->cp15.mair_el1; + break; + } + } + + return; +} + +static void reg_write(CPUARMState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { + regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; + } + if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { + env->xregs[regid - UC_ARM64_REG_X0] = *(uint64_t *)value; + } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { + WRITE_DWORD(env->xregs[regid - UC_ARM64_REG_W0], *(uint32_t *)value); + } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { + float64 *src = (float64*) value; + uint32_t reg_index = regid - UC_ARM64_REG_Q0; + env->vfp.zregs[reg_index].d[0] = src[0]; + env->vfp.zregs[reg_index].d[1] = src[1]; + } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { + env->vfp.zregs[regid - UC_ARM64_REG_D0].d[0] = * (float64*) value; + } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { + WRITE_DWORD(env->vfp.zregs[regid - UC_ARM64_REG_S0].d[0], *(int32_t*) value); + } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { + WRITE_WORD(env->vfp.zregs[regid - UC_ARM64_REG_H0].d[0], *(int16_t*) value); + } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { + WRITE_BYTE_L(env->vfp.zregs[regid - UC_ARM64_REG_B0].d[0], *(int8_t*) value); + } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { + env->elr_el[regid - UC_ARM64_REG_ELR_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { + env->sp_el[regid - UC_ARM64_REG_SP_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { + env->cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { + env->cp15.far_el[regid - UC_ARM64_REG_FAR_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { + env->cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0] = *(uint64_t*)value; + } else { + switch(regid) { + default: break; + case UC_ARM64_REG_CPACR_EL1: + //env->cp15.c1_coproc = *(uint32_t *)value; + break; + case UC_ARM64_REG_TPIDR_EL0: + //env->cp15.tpidr_el0 = *(uint64_t *)value; + break; + case UC_ARM64_REG_TPIDRRO_EL0: + //env->cp15.tpidrro_el0 = *(uint64_t *)value; + break; + case UC_ARM64_REG_TPIDR_EL1: + //env->cp15.tpidr_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_X29: + env->xregs[29] = *(uint64_t *)value; + break; + case UC_ARM64_REG_X30: + env->xregs[30] = *(uint64_t *)value; + break; + case UC_ARM64_REG_PC: + env->pc = *(uint64_t *)value; + break; + case UC_ARM64_REG_SP: + env->xregs[31] = *(uint64_t *)value; + break; + case UC_ARM64_REG_NZCV: + //cpsr_write(env, *(uint32_t *)value, CPSR_NZCV); + break; + case UC_ARM64_REG_PSTATE: + pstate_write(env, *(uint32_t *)value); + break; + case UC_ARM64_REG_TTBR0_EL1: + //env->cp15.ttbr0_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_TTBR1_EL1: + //env->cp15.ttbr1_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_PAR_EL1: + //env->cp15.par_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_MAIR_EL1: + //env->cp15.mair_el1 = *(uint64_t *)value; + break; + } + } + + return; +} + +int arm64_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUARMState *env= &(ARM_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int arm64_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) +{ + CPUARMState *env= &(ARM_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if(regid == UC_ARM64_REG_PC){ + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_WORDS_BIGENDIAN +int arm64eb_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#else +int arm64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#endif +{ + CPUARMState *env= (CPUARMState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_WORDS_BIGENDIAN +int arm64eb_context_reg_write(struct uc_context *ctx, unsigned int *regs, void* const* vals, int count) +#else +int arm64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void* const* vals, int count) +#endif +{ + CPUARMState *env= (CPUARMState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static int arm64_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + ARMCPU *cpu; + + cpu = cpu_aarch64_init(uc); + if (cpu == NULL) { + return -1; + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_WORDS_BIGENDIAN +void arm64eb_uc_init(struct uc_struct* uc) +#else +void arm64_uc_init(struct uc_struct* uc) +#endif +{ + uc->reg_read = arm64_reg_read; + uc->reg_write = arm64_reg_write; + uc->reg_reset = arm64_reg_reset; + uc->set_pc = arm64_set_pc; + uc->release = arm64_release; + uc->cpus_init = arm64_cpus_init; + uc->cpu_context_size = offsetof(CPUARMState, cpu_watchpoint); + uc_common_init(uc); +} diff --git a/qemu/target/arm/unicorn_arm.c b/qemu/target/arm/unicorn_arm.c new file mode 100644 index 00000000..9a5a698d --- /dev/null +++ b/qemu/target/arm/unicorn_arm.c @@ -0,0 +1,495 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "qemu/typedefs.h" +#include "unicorn/unicorn.h" +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "cpu.h" +#include "uc_priv.h" +#include "unicorn_common.h" +#include "unicorn.h" + +ARMCPU *cpu_arm_init(struct uc_struct *uc); + +static void arm_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUARMState *)uc->cpu->env_ptr)->pc = address; + ((CPUARMState *)uc->cpu->env_ptr)->regs[15] = address; +} + +static void arm_release(void *ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + ARMCPU* cpu = (ARMCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + ARMELChangeHook *entry, *next; + CPUARMState *env = &cpu->env; + uint32_t nr; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } + + QLIST_FOREACH_SAFE(entry, &cpu->pre_el_change_hooks, node, next) { + QLIST_SAFE_REMOVE(entry, node); + g_free(entry); + } + QLIST_FOREACH_SAFE(entry, &cpu->el_change_hooks, node, next) { + QLIST_SAFE_REMOVE(entry, node); + g_free(entry); + } + + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V7)) { + nr = cpu->pmsav7_dregion; + if (nr) { + if (arm_feature(env, ARM_FEATURE_V8)) { + g_free(env->pmsav8.rbar[M_REG_NS]); + g_free(env->pmsav8.rlar[M_REG_NS]); + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + g_free(env->pmsav8.rbar[M_REG_S]); + g_free(env->pmsav8.rlar[M_REG_S]); + } + } else { + g_free(env->pmsav7.drbar); + g_free(env->pmsav7.drsr); + g_free(env->pmsav7.dracr); + } + } + } + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + nr = cpu->sau_sregion; + if (nr) { + g_free(env->sau.rbar); + g_free(env->sau.rlar); + } + } + + g_free(cpu->cpreg_indexes); + g_free(cpu->cpreg_values); + g_free(cpu->cpreg_vmstate_indexes); + g_free(cpu->cpreg_vmstate_values); + g_hash_table_destroy(cpu->cp_regs); +} + +void arm_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env; + (void)uc; + + env = uc->cpu->env_ptr; + memset(env->regs, 0, sizeof(env->regs)); + + env->pc = 0; +} + +/* these functions are implemented in helper.c. */ +#include "exec/helper-head.h" +uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg); +void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val); + +static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg) +{ + uint32_t mask = 0; + + if (reg & 1) { + mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ + } + + if (!(reg & 4)) { + mask |= XPSR_NZCV | XPSR_Q; /* APSR */ + if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) { + mask |= XPSR_GE; + } + } + + if (reg & 2) { + mask |= (XPSR_IT_0_1 | XPSR_IT_2_7 | XPSR_T); /* EPSR */ + } + + return xpsr_read(env) & mask; +} + +static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask, uint32_t reg, uint32_t val) +{ + uint32_t xpsrmask = 0; + + if (reg & 1) { + xpsrmask |= XPSR_EXCP; + } + + if (!(reg & 4)) { + if (mask & 8) { + xpsrmask |= XPSR_NZCV | XPSR_Q; + } + if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { + xpsrmask |= XPSR_GE; + } + } + + if (reg & 2) { + xpsrmask |= (XPSR_IT_0_1 | XPSR_IT_2_7 | XPSR_T); + } + + xpsr_write(env, val, xpsrmask); +} + +static void reg_read(CPUARMState *env, unsigned int regid, void *value) +{ + if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) { + *(int32_t *)value = env->regs[regid - UC_ARM_REG_R0]; + } else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) { + uint32_t reg_index = regid - UC_ARM_REG_D0; + *(float64 *)value = env->vfp.zregs[reg_index / 2].d[reg_index & 1]; + } else { + switch(regid) { + case UC_ARM_REG_APSR: + if (arm_feature(env, ARM_FEATURE_M)) { + *(int32_t *)value = v7m_mrs_xpsr(env, 0); + } else { + *(int32_t *)value = cpsr_read(env) & (CPSR_NZCV | CPSR_Q | CPSR_GE); + } + break; + case UC_ARM_REG_APSR_NZCV: + *(int32_t *)value = cpsr_read(env) & CPSR_NZCV; + break; + case UC_ARM_REG_CPSR: + *(int32_t *)value = cpsr_read(env); + break; + case UC_ARM_REG_SPSR: + *(int32_t *)value = env->spsr; + break; + //case UC_ARM_REG_SP: + case UC_ARM_REG_R13: + *(int32_t *)value = env->regs[13]; + break; + //case UC_ARM_REG_LR: + case UC_ARM_REG_R14: + *(int32_t *)value = env->regs[14]; + break; + //case UC_ARM_REG_PC: + case UC_ARM_REG_R15: + *(int32_t *)value = env->regs[15]; + break; + case UC_ARM_REG_C1_C0_2: + *(int32_t *)value = env->cp15.cpacr_el1; + break; + case UC_ARM_REG_C13_C0_3: + *(int32_t *)value = env->cp15.tpidrro_el[0]; + break; + case UC_ARM_REG_FPEXC: + *(int32_t *)value = env->vfp.xregs[ARM_VFP_FPEXC]; + break; + case UC_ARM_REG_IPSR: + *(int32_t *)value = v7m_mrs_xpsr(env, 5); + break; + case UC_ARM_REG_MSP: + *(uint32_t *)value = helper_v7m_mrs(env, 8); + break; + case UC_ARM_REG_PSP: + *(uint32_t *)value = helper_v7m_mrs(env, 9); + break; + case UC_ARM_REG_IAPSR: + *(int32_t *)value = v7m_mrs_xpsr(env, 1); + break; + case UC_ARM_REG_EAPSR: + *(int32_t *)value = v7m_mrs_xpsr(env, 2); + break; + case UC_ARM_REG_XPSR: + *(int32_t *)value = v7m_mrs_xpsr(env, 3); + break; + case UC_ARM_REG_EPSR: + *(int32_t *)value = v7m_mrs_xpsr(env, 6); + break; + case UC_ARM_REG_IEPSR: + *(int32_t *)value = v7m_mrs_xpsr(env, 7); + break; + case UC_ARM_REG_PRIMASK: + *(uint32_t *)value = helper_v7m_mrs(env, 16); + break; + case UC_ARM_REG_BASEPRI: + *(uint32_t *)value = helper_v7m_mrs(env, 17); + break; + case UC_ARM_REG_BASEPRI_MAX: + *(uint32_t *)value = helper_v7m_mrs(env, 18); + break; + case UC_ARM_REG_FAULTMASK: + *(uint32_t *)value = helper_v7m_mrs(env, 19); + break; + case UC_ARM_REG_CONTROL: + *(uint32_t *)value = helper_v7m_mrs(env, 20); + break; + } + } + + return; +} + +static void reg_write(CPUARMState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) { + env->regs[regid - UC_ARM_REG_R0] = *(uint32_t *)value; + } else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) { + uint32_t reg_index = regid - UC_ARM_REG_D0; + env->vfp.zregs[reg_index / 2].d[reg_index & 1] = *(float64 *)value; + } else { + switch(regid) { + case UC_ARM_REG_APSR: + if (!arm_feature(env, ARM_FEATURE_M)) { + cpsr_write(env, *(uint32_t *)value, (CPSR_NZCV | CPSR_Q | CPSR_GE), CPSRWriteRaw); + } else { + // Same with UC_ARM_REG_APSR_NZCVQ + v7m_msr_xpsr(env, 0b1000, 0, *(uint32_t *)value); + } + break; + case UC_ARM_REG_APSR_NZCV: + cpsr_write(env, *(uint32_t *)value, CPSR_NZCV, CPSRWriteRaw); + break; + case UC_ARM_REG_CPSR: + cpsr_write(env, *(uint32_t *)value, ~0, CPSRWriteRaw); + break; + case UC_ARM_REG_SPSR: + env->spsr = *(uint32_t *)value; + break; + //case UC_ARM_REG_SP: + case UC_ARM_REG_R13: + env->regs[13] = *(uint32_t *)value; + break; + //case UC_ARM_REG_LR: + case UC_ARM_REG_R14: + env->regs[14] = *(uint32_t *)value; + break; + //case UC_ARM_REG_PC: + case UC_ARM_REG_R15: + env->pc = (*(uint32_t *)value & ~1); + env->thumb = (*(uint32_t *)value & 1); + env->uc->thumb = (*(uint32_t *)value & 1); + env->regs[15] = (*(uint32_t *)value & ~1); + break; + // case UC_ARM_REG_C1_C0_2: + // env->cp15.c1_coproc = *(int32_t *)value; + // break; + + case UC_ARM_REG_C13_C0_3: + env->cp15.tpidrro_el[0] = *(int32_t *)value; + break; + case UC_ARM_REG_FPEXC: + env->vfp.xregs[ARM_VFP_FPEXC] = *(int32_t *)value; + break; + case UC_ARM_REG_IPSR: + v7m_msr_xpsr(env, 0b1000, 5, *(uint32_t *)value); + break; + case UC_ARM_REG_MSP: + helper_v7m_msr(env, 8, *(uint32_t *)value); + break; + case UC_ARM_REG_PSP: + helper_v7m_msr(env, 9, *(uint32_t *)value); + break; + case UC_ARM_REG_CONTROL: + helper_v7m_msr(env, 20, *(uint32_t *)value); + break; + case UC_ARM_REG_EPSR: + v7m_msr_xpsr(env, 0b1000, 6, *(uint32_t *)value); + break; + case UC_ARM_REG_IEPSR: + v7m_msr_xpsr(env, 0b1000, 7, *(uint32_t *)value); + break; + case UC_ARM_REG_PRIMASK: + helper_v7m_msr(env, 16, *(uint32_t *)value); + break; + case UC_ARM_REG_BASEPRI: + helper_v7m_msr(env, 17, *(uint32_t *)value); + break; + case UC_ARM_REG_BASEPRI_MAX: + helper_v7m_msr(env, 18, *(uint32_t *)value); + break; + case UC_ARM_REG_FAULTMASK: + helper_v7m_msr(env, 19, *(uint32_t *)value); + break; + case UC_ARM_REG_APSR_NZCVQ: + v7m_msr_xpsr(env, 0b1000, 0, *(uint32_t *)value); + break; + case UC_ARM_REG_APSR_G: + v7m_msr_xpsr(env, 0b0100, 0, *(uint32_t *)value); + break; + case UC_ARM_REG_APSR_NZCVQG: + v7m_msr_xpsr(env, 0b1100, 0, *(uint32_t *)value); + break; + case UC_ARM_REG_IAPSR: + case UC_ARM_REG_IAPSR_NZCVQ: + v7m_msr_xpsr(env, 0b1000, 1, *(uint32_t *)value); + break; + case UC_ARM_REG_IAPSR_G: + v7m_msr_xpsr(env, 0b0100, 1, *(uint32_t *)value); + break; + case UC_ARM_REG_IAPSR_NZCVQG: + v7m_msr_xpsr(env, 0b1100, 1, *(uint32_t *)value); + break; + case UC_ARM_REG_EAPSR: + case UC_ARM_REG_EAPSR_NZCVQ: + v7m_msr_xpsr(env, 0b1000, 2, *(uint32_t *)value); + break; + case UC_ARM_REG_EAPSR_G: + v7m_msr_xpsr(env, 0b0100, 2, *(uint32_t *)value); + break; + case UC_ARM_REG_EAPSR_NZCVQG: + v7m_msr_xpsr(env, 0b1100, 2, *(uint32_t *)value); + break; + case UC_ARM_REG_XPSR: + case UC_ARM_REG_XPSR_NZCVQ: + v7m_msr_xpsr(env, 0b1000, 3, *(uint32_t *)value); + break; + case UC_ARM_REG_XPSR_G: + v7m_msr_xpsr(env, 0b0100, 3, *(uint32_t *)value); + break; + case UC_ARM_REG_XPSR_NZCVQG: + v7m_msr_xpsr(env, 0b1100, 3, *(uint32_t *)value); + break; + } + } + + return; +} + +int arm_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUARMState *env = &(ARM_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int arm_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) +{ + CPUArchState *env = &(ARM_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if(regid == UC_ARM_REG_R15){ + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_WORDS_BIGENDIAN +int armeb_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#else +int arm_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#endif +{ + CPUARMState *env = (CPUARMState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_WORDS_BIGENDIAN +int armeb_context_reg_write(struct uc_context *ctx, unsigned int *regs, void* const* vals, int count) +#else +int arm_context_reg_write(struct uc_context *ctx, unsigned int *regs, void* const* vals, int count) +#endif +{ + CPUARMState *env = (CPUARMState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static bool arm_stop_interrupt(struct uc_struct *uc, int intno) +{ + switch(intno) { + default: + return false; + case EXCP_UDEF: + case EXCP_YIELD: + return true; + case EXCP_INVSTATE: + uc->invalid_error = UC_ERR_EXCEPTION; + return true; + } +} + +static uc_err arm_query(struct uc_struct *uc, uc_query_type type, size_t *result) +{ + CPUState *mycpu = uc->cpu; + uint32_t mode; + + switch(type) { + case UC_QUERY_MODE: + // zero out ARM/THUMB mode + mode = uc->mode & ~(UC_MODE_ARM | UC_MODE_THUMB); + // THUMB mode or ARM MOde + mode += ((ARM_CPU(mycpu)->env.thumb != 0)? UC_MODE_THUMB : UC_MODE_ARM); + *result = mode; + return UC_ERR_OK; + default: + return UC_ERR_ARG; + } +} + +static int arm_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + ARMCPU *cpu; + + cpu = cpu_arm_init(uc); + if (cpu == NULL) { + return -1; + } + + return 0; +} + +#ifdef TARGET_WORDS_BIGENDIAN +void armeb_uc_init(struct uc_struct* uc) +#else +void arm_uc_init(struct uc_struct* uc) +#endif +{ + uc->reg_read = arm_reg_read; + uc->reg_write = arm_reg_write; + uc->reg_reset = arm_reg_reset; + uc->set_pc = arm_set_pc; + uc->stop_interrupt = arm_stop_interrupt; + uc->release = arm_release; + uc->query = arm_query; + uc->cpus_init = arm_cpus_init; + uc->cpu_context_size = offsetof(CPUARMState, cpu_watchpoint); + uc_common_init(uc); +} diff --git a/qemu/target/arm/vec_helper.c b/qemu/target/arm/vec_helper.c new file mode 100644 index 00000000..a1839eed --- /dev/null +++ b/qemu/target/arm/vec_helper.c @@ -0,0 +1,1265 @@ +/* + * ARM AdvSIMD / SVE Vector Operations + * + * Copyright (c) 2018 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "tcg/tcg-gvec-desc.h" +#include "fpu/softfloat.h" + + +/* Note that vector data is stored in host-endian 64-bit chunks, + so addressing units smaller than that needs a host-endian fixup. */ +#ifdef HOST_WORDS_BIGENDIAN +#define H1(x) ((x) ^ 7) +#define H2(x) ((x) ^ 3) +#define H4(x) ((x) ^ 1) +#else +#define H1(x) (x) +#define H2(x) (x) +#define H4(x) (x) +#endif + +#define SET_QC() env->vfp.qc[0] = 1 + +static void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz) +{ + uint64_t *d = (uint64_t *)((char *)vd + opr_sz); + uintptr_t i; + + for (i = opr_sz; i < max_sz; i += 8) { + *d++ = 0; + } +} + +/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */ +static uint16_t inl_qrdmlah_s16(CPUARMState *env, int16_t src1, + int16_t src2, int16_t src3) +{ + /* Simplify: + * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16 + * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15 + */ + int32_t ret = (int32_t)src1 * src2; + ret = ((int32_t)src3 << 15) + ret + (1 << 14); + ret >>= 15; + if (ret != (int16_t)ret) { + SET_QC(); + ret = (ret < 0 ? -0x8000 : 0x7fff); + } + return ret; +} + +uint32_t HELPER(neon_qrdmlah_s16)(CPUARMState *env, uint32_t src1, + uint32_t src2, uint32_t src3) +{ + uint16_t e1 = inl_qrdmlah_s16(env, src1, src2, src3); + uint16_t e2 = inl_qrdmlah_s16(env, src1 >> 16, src2 >> 16, src3 >> 16); + return deposit32(e1, 16, 16, e2); +} + +void HELPER(gvec_qrdmlah_s16)(void *vd, void *vn, void *vm, + void *ve, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + int16_t *d = vd; + int16_t *n = vn; + int16_t *m = vm; + CPUARMState *env = ve; + uintptr_t i; + + for (i = 0; i < opr_sz / 2; ++i) { + d[i] = inl_qrdmlah_s16(env, n[i], m[i], d[i]); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* Signed saturating rounding doubling multiply-subtract high half, 16-bit */ +static uint16_t inl_qrdmlsh_s16(CPUARMState *env, int16_t src1, + int16_t src2, int16_t src3) +{ + /* Similarly, using subtraction: + * = ((a3 << 16) - ((e1 * e2) << 1) + (1 << 15)) >> 16 + * = ((a3 << 15) - (e1 * e2) + (1 << 14)) >> 15 + */ + int32_t ret = (int32_t)src1 * src2; + ret = ((int32_t)src3 << 15) - ret + (1 << 14); + ret >>= 15; + if (ret != (int16_t)ret) { + SET_QC(); + ret = (ret < 0 ? -0x8000 : 0x7fff); + } + return ret; +} + +uint32_t HELPER(neon_qrdmlsh_s16)(CPUARMState *env, uint32_t src1, + uint32_t src2, uint32_t src3) +{ + uint16_t e1 = inl_qrdmlsh_s16(env, src1, src2, src3); + uint16_t e2 = inl_qrdmlsh_s16(env, src1 >> 16, src2 >> 16, src3 >> 16); + return deposit32(e1, 16, 16, e2); +} + +void HELPER(gvec_qrdmlsh_s16)(void *vd, void *vn, void *vm, + void *ve, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + int16_t *d = vd; + int16_t *n = vn; + int16_t *m = vm; + CPUARMState *env = ve; + uintptr_t i; + + for (i = 0; i < opr_sz / 2; ++i) { + d[i] = inl_qrdmlsh_s16(env, n[i], m[i], d[i]); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */ +uint32_t HELPER(neon_qrdmlah_s32)(CPUARMState *env, int32_t src1, + int32_t src2, int32_t src3) +{ + /* Simplify similarly to int_qrdmlah_s16 above. */ + int64_t ret = (int64_t)src1 * src2; + ret = ((int64_t)src3 << 31) + ret + (1 << 30); + ret >>= 31; + if (ret != (int32_t)ret) { + SET_QC(); + ret = (ret < 0 ? INT32_MIN : INT32_MAX); + } + return ret; +} + +void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm, + void *ve, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + int32_t *d = vd; + int32_t *n = vn; + int32_t *m = vm; + CPUARMState *env = ve; + uintptr_t i; + + for (i = 0; i < opr_sz / 4; ++i) { + d[i] = helper_neon_qrdmlah_s32(env, n[i], m[i], d[i]); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* Signed saturating rounding doubling multiply-subtract high half, 32-bit */ +uint32_t HELPER(neon_qrdmlsh_s32)(CPUARMState *env, int32_t src1, + int32_t src2, int32_t src3) +{ + /* Simplify similarly to int_qrdmlsh_s16 above. */ + int64_t ret = (int64_t)src1 * src2; + ret = ((int64_t)src3 << 31) - ret + (1 << 30); + ret >>= 31; + if (ret != (int32_t)ret) { + SET_QC(); + ret = (ret < 0 ? INT32_MIN : INT32_MAX); + } + return ret; +} + +void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm, + void *ve, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + int32_t *d = vd; + int32_t *n = vn; + int32_t *m = vm; + CPUARMState *env = ve; + uintptr_t i; + + for (i = 0; i < opr_sz / 4; ++i) { + d[i] = helper_neon_qrdmlsh_s32(env, n[i], m[i], d[i]); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* Integer 8 and 16-bit dot-product. + * + * Note that for the loops herein, host endianness does not matter + * with respect to the ordering of data within the 64-bit lanes. + * All elements are treated equally, no matter where they are. + */ + +void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint32_t *d = vd; + int8_t *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 4; ++i) { + d[i] += n[i * 4 + 0] * m[i * 4 + 0] + + n[i * 4 + 1] * m[i * 4 + 1] + + n[i * 4 + 2] * m[i * 4 + 2] + + n[i * 4 + 3] * m[i * 4 + 3]; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint32_t *d = vd; + uint8_t *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 4; ++i) { + d[i] += n[i * 4 + 0] * m[i * 4 + 0] + + n[i * 4 + 1] * m[i * 4 + 1] + + n[i * 4 + 2] * m[i * 4 + 2] + + n[i * 4 + 3] * m[i * 4 + 3]; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint64_t *d = vd; + int16_t *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; ++i) { + d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0] + + (int64_t)n[i * 4 + 1] * m[i * 4 + 1] + + (int64_t)n[i * 4 + 2] * m[i * 4 + 2] + + (int64_t)n[i * 4 + 3] * m[i * 4 + 3]; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint64_t *d = vd; + uint16_t *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; ++i) { + d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] + + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] + + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] + + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; + intptr_t index = simd_data(desc); + uint32_t *d = vd; + int8_t *n = vn; + int8_t *m_indexed = (int8_t *)vm + index * 4; + + /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. + * Otherwise opr_sz is a multiple of 16. + */ + segend = MIN(4, opr_sz_4); + i = 0; + do { + int8_t m0 = m_indexed[i * 4 + 0]; + int8_t m1 = m_indexed[i * 4 + 1]; + int8_t m2 = m_indexed[i * 4 + 2]; + int8_t m3 = m_indexed[i * 4 + 3]; + + do { + d[i] += n[i * 4 + 0] * m0 + + n[i * 4 + 1] * m1 + + n[i * 4 + 2] * m2 + + n[i * 4 + 3] * m3; + } while (++i < segend); + segend = i + 4; + } while (i < opr_sz_4); + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; + intptr_t index = simd_data(desc); + uint32_t *d = vd; + uint8_t *n = vn; + uint8_t *m_indexed = (uint8_t *)vm + index * 4; + + /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. + * Otherwise opr_sz is a multiple of 16. + */ + segend = MIN(4, opr_sz_4); + i = 0; + do { + uint8_t m0 = m_indexed[i * 4 + 0]; + uint8_t m1 = m_indexed[i * 4 + 1]; + uint8_t m2 = m_indexed[i * 4 + 2]; + uint8_t m3 = m_indexed[i * 4 + 3]; + + do { + d[i] += n[i * 4 + 0] * m0 + + n[i * 4 + 1] * m1 + + n[i * 4 + 2] * m2 + + n[i * 4 + 3] * m3; + } while (++i < segend); + segend = i + 4; + } while (i < opr_sz_4); + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; + intptr_t index = simd_data(desc); + uint64_t *d = vd; + int16_t *n = vn; + int16_t *m_indexed = (int16_t *)vm + index * 4; + + /* This is supported by SVE only, so opr_sz is always a multiple of 16. + * Process the entire segment all at once, writing back the results + * only after we've consumed all of the inputs. + */ + for (i = 0; i < opr_sz_8 ; i += 2) { + uint64_t d0, d1; + + d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0]; + d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1]; + d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2]; + d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3]; + d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0]; + d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1]; + d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2]; + d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3]; + + d[i + 0] += d0; + d[i + 1] += d1; + } + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; + intptr_t index = simd_data(desc); + uint64_t *d = vd; + uint16_t *n = vn; + uint16_t *m_indexed = (uint16_t *)vm + index * 4; + + /* This is supported by SVE only, so opr_sz is always a multiple of 16. + * Process the entire segment all at once, writing back the results + * only after we've consumed all of the inputs. + */ + for (i = 0; i < opr_sz_8 ; i += 2) { + uint64_t d0, d1; + + d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0]; + d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1]; + d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2]; + d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3]; + d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0]; + d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1]; + d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2]; + d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3]; + + d[i + 0] += d0; + d[i + 1] += d1; + } + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float16 *d = vd; + float16 *n = vn; + float16 *m = vm; + float_status *fpst = vfpst; + uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t neg_imag = neg_real ^ 1; + uintptr_t i; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 15; + neg_imag <<= 15; + + for (i = 0; i < opr_sz / 2; i += 2) { + float16 e0 = n[H2(i)]; + float16 e1 = m[H2(i + 1)] ^ neg_imag; + float16 e2 = n[H2(i + 1)]; + float16 e3 = m[H2(i)] ^ neg_real; + + d[H2(i)] = float16_add(e0, e1, fpst); + d[H2(i + 1)] = float16_add(e2, e3, fpst); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float32 *d = vd; + float32 *n = vn; + float32 *m = vm; + float_status *fpst = vfpst; + uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t neg_imag = neg_real ^ 1; + uintptr_t i; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 31; + neg_imag <<= 31; + + for (i = 0; i < opr_sz / 4; i += 2) { + float32 e0 = n[H4(i)]; + float32 e1 = m[H4(i + 1)] ^ neg_imag; + float32 e2 = n[H4(i + 1)]; + float32 e3 = m[H4(i)] ^ neg_real; + + d[H4(i)] = float32_add(e0, e1, fpst); + d[H4(i + 1)] = float32_add(e2, e3, fpst); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float64 *d = vd; + float64 *n = vn; + float64 *m = vm; + float_status *fpst = vfpst; + uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1); + uint64_t neg_imag = neg_real ^ 1; + uintptr_t i; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 63; + neg_imag <<= 63; + + for (i = 0; i < opr_sz / 8; i += 2) { + float64 e0 = n[i]; + float64 e1 = m[i + 1] ^ neg_imag; + float64 e2 = n[i + 1]; + float64 e3 = m[i] ^ neg_real; + + d[i] = float64_add(e0, e1, fpst); + d[i + 1] = float64_add(e2, e3, fpst); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float16 *d = vd; + float16 *n = vn; + float16 *m = vm; + float_status *fpst = vfpst; + intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t neg_real = flip ^ neg_imag; + uintptr_t i; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 15; + neg_imag <<= 15; + + for (i = 0; i < opr_sz / 2; i += 2) { + float16 e2 = n[H2(i + flip)]; + float16 e1 = m[H2(i + flip)] ^ neg_real; + float16 e4 = e2; + float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; + + d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst); + d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float16 *d = vd; + float16 *n = vn; + float16 *m = vm; + float_status *fpst = vfpst; + intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); + uint32_t neg_real = flip ^ neg_imag; + intptr_t elements = opr_sz / sizeof(float16); + intptr_t eltspersegment = 16 / sizeof(float16); + intptr_t i, j; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 15; + neg_imag <<= 15; + + for (i = 0; i < elements; i += eltspersegment) { + float16 mr = m[H2(i + 2 * index + 0)]; + float16 mi = m[H2(i + 2 * index + 1)]; + float16 e1 = neg_real ^ (flip ? mi : mr); + float16 e3 = neg_imag ^ (flip ? mr : mi); + + for (j = i; j < i + eltspersegment; j += 2) { + float16 e2 = n[H2(j + flip)]; + float16 e4 = e2; + + d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst); + d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst); + } + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float32 *d = vd; + float32 *n = vn; + float32 *m = vm; + float_status *fpst = vfpst; + intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t neg_real = flip ^ neg_imag; + uintptr_t i; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 31; + neg_imag <<= 31; + + for (i = 0; i < opr_sz / 4; i += 2) { + float32 e2 = n[H4(i + flip)]; + float32 e1 = m[H4(i + flip)] ^ neg_real; + float32 e4 = e2; + float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; + + d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst); + d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float32 *d = vd; + float32 *n = vn; + float32 *m = vm; + float_status *fpst = vfpst; + intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); + uint32_t neg_real = flip ^ neg_imag; + intptr_t elements = opr_sz / sizeof(float32); + intptr_t eltspersegment = 16 / sizeof(float32); + intptr_t i, j; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 31; + neg_imag <<= 31; + + for (i = 0; i < elements; i += eltspersegment) { + float32 mr = m[H4(i + 2 * index + 0)]; + float32 mi = m[H4(i + 2 * index + 1)]; + float32 e1 = neg_real ^ (flip ? mi : mr); + float32 e3 = neg_imag ^ (flip ? mr : mi); + + for (j = i; j < i + eltspersegment; j += 2) { + float32 e2 = n[H4(j + flip)]; + float32 e4 = e2; + + d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst); + d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst); + } + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, + void *vfpst, uint32_t desc) +{ + uintptr_t opr_sz = simd_oprsz(desc); + float64 *d = vd; + float64 *n = vn; + float64 *m = vm; + float_status *fpst = vfpst; + intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint64_t neg_real = flip ^ neg_imag; + uintptr_t i; + + /* Shift boolean to the sign bit so we can xor to negate. */ + neg_real <<= 63; + neg_imag <<= 63; + + for (i = 0; i < opr_sz / 8; i += 2) { + float64 e2 = n[i + flip]; + float64 e1 = m[i + flip] ^ neg_real; + float64 e4 = e2; + float64 e3 = m[i + 1 - flip] ^ neg_imag; + + d[i] = float64_muladd(e2, e1, d[i], 0, fpst); + d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +#define DO_2OP(NAME, FUNC, TYPE) \ +void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ +{ \ + intptr_t i, oprsz = simd_oprsz(desc); \ + TYPE *d = vd, *n = vn; \ + for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ + d[i] = FUNC(n[i], stat); \ + } \ + clear_tail(d, oprsz, simd_maxsz(desc)); \ +} + +DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16) +DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32) +DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64) + +DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16) +DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32) +DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64) + +#undef DO_2OP + +/* Floating-point trigonometric starting value. + * See the ARM ARM pseudocode function FPTrigSMul. + */ +static float16 float16_ftsmul(float16 op1, uint16_t op2, float_status *stat) +{ + float16 result = float16_mul(op1, op1, stat); + if (!float16_is_any_nan(result)) { + result = float16_set_sign(result, op2 & 1); + } + return result; +} + +static float32 float32_ftsmul(float32 op1, uint32_t op2, float_status *stat) +{ + float32 result = float32_mul(op1, op1, stat); + if (!float32_is_any_nan(result)) { + result = float32_set_sign(result, op2 & 1); + } + return result; +} + +static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat) +{ + float64 result = float64_mul(op1, op1, stat); + if (!float64_is_any_nan(result)) { + result = float64_set_sign(result, op2 & 1); + } + return result; +} + +#define DO_3OP(NAME, FUNC, TYPE) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ +{ \ + intptr_t i, oprsz = simd_oprsz(desc); \ + TYPE *d = vd, *n = vn, *m = vm; \ + for (i = 0; i < oprsz / sizeof(TYPE); i++) { \ + d[i] = FUNC(n[i], m[i], stat); \ + } \ + clear_tail(d, oprsz, simd_maxsz(desc)); \ +} + +DO_3OP(gvec_fadd_h, float16_add, float16) +DO_3OP(gvec_fadd_s, float32_add, float32) +DO_3OP(gvec_fadd_d, float64_add, float64) + +DO_3OP(gvec_fsub_h, float16_sub, float16) +DO_3OP(gvec_fsub_s, float32_sub, float32) +DO_3OP(gvec_fsub_d, float64_sub, float64) + +DO_3OP(gvec_fmul_h, float16_mul, float16) +DO_3OP(gvec_fmul_s, float32_mul, float32) +DO_3OP(gvec_fmul_d, float64_mul, float64) + +DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16) +DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32) +DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64) + +#ifdef TARGET_AARCH64 + +DO_3OP(gvec_recps_h, helper_recpsf_f16, float16) +DO_3OP(gvec_recps_s, helper_recpsf_f32, float32) +DO_3OP(gvec_recps_d, helper_recpsf_f64, float64) + +DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16) +DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32) +DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64) + +#endif +#undef DO_3OP + +/* For the indexed ops, SVE applies the index per 128-bit vector segment. + * For AdvSIMD, there is of course only one such vector segment. + */ + +#define DO_MUL_IDX(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ +{ \ + intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ + intptr_t idx = simd_data(desc); \ + TYPE *d = vd, *n = vn, *m = vm; \ + for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ + TYPE mm = m[H(i + idx)]; \ + for (j = 0; j < segment; j++) { \ + d[i + j] = TYPE##_mul(n[i + j], mm, stat); \ + } \ + } \ + clear_tail(d, oprsz, simd_maxsz(desc)); \ +} + +DO_MUL_IDX(gvec_fmul_idx_h, float16, H2) +DO_MUL_IDX(gvec_fmul_idx_s, float32, H4) +DO_MUL_IDX(gvec_fmul_idx_d, float64, ) + +#undef DO_MUL_IDX + +#define DO_FMLA_IDX(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \ + void *stat, uint32_t desc) \ +{ \ + intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ + TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \ + intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \ + TYPE *d = vd, *n = vn, *m = vm, *a = va; \ + op1_neg <<= (8 * sizeof(TYPE) - 1); \ + for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ + TYPE mm = m[H(i + idx)]; \ + for (j = 0; j < segment; j++) { \ + d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \ + mm, a[i + j], 0, stat); \ + } \ + } \ + clear_tail(d, oprsz, simd_maxsz(desc)); \ +} + +DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2) +DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4) +DO_FMLA_IDX(gvec_fmla_idx_d, float64, ) + +#undef DO_FMLA_IDX + +#define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \ +void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \ +{ \ + intptr_t i, oprsz = simd_oprsz(desc); \ + TYPEN *d = vd, *n = vn; TYPEM *m = vm; \ + bool q = false; \ + for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \ + WTYPE dd = (WTYPE)n[i] OP m[i]; \ + if (dd < MIN) { \ + dd = MIN; \ + q = true; \ + } else if (dd > MAX) { \ + dd = MAX; \ + q = true; \ + } \ + d[i] = dd; \ + } \ + if (q) { \ + uint32_t *qc = vq; \ + qc[0] = 1; \ + } \ + clear_tail(d, oprsz, simd_maxsz(desc)); \ +} + +DO_SAT(gvec_uqadd_b, int, uint8_t, uint8_t, +, 0, UINT8_MAX) +DO_SAT(gvec_uqadd_h, int, uint16_t, uint16_t, +, 0, UINT16_MAX) +DO_SAT(gvec_uqadd_s, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX) + +DO_SAT(gvec_sqadd_b, int, int8_t, int8_t, +, INT8_MIN, INT8_MAX) +DO_SAT(gvec_sqadd_h, int, int16_t, int16_t, +, INT16_MIN, INT16_MAX) +DO_SAT(gvec_sqadd_s, int64_t, int32_t, int32_t, +, INT32_MIN, INT32_MAX) + +DO_SAT(gvec_uqsub_b, int, uint8_t, uint8_t, -, 0, UINT8_MAX) +DO_SAT(gvec_uqsub_h, int, uint16_t, uint16_t, -, 0, UINT16_MAX) +DO_SAT(gvec_uqsub_s, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX) + +DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX) +DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX) +DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX) + +#undef DO_SAT + +void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn, + void *vm, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + uint64_t *d = vd, *n = vn, *m = vm; + bool q = false; + + for (i = 0; i < oprsz / 8; i++) { + uint64_t nn = n[i], mm = m[i], dd = nn + mm; + if (dd < nn) { + dd = UINT64_MAX; + q = true; + } + d[i] = dd; + } + if (q) { + uint32_t *qc = vq; + qc[0] = 1; + } + clear_tail(d, oprsz, simd_maxsz(desc)); +} + +void HELPER(gvec_uqsub_d)(void *vd, void *vq, void *vn, + void *vm, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + uint64_t *d = vd, *n = vn, *m = vm; + bool q = false; + + for (i = 0; i < oprsz / 8; i++) { + uint64_t nn = n[i], mm = m[i], dd = nn - mm; + if (nn < mm) { + dd = 0; + q = true; + } + d[i] = dd; + } + if (q) { + uint32_t *qc = vq; + qc[0] = 1; + } + clear_tail(d, oprsz, simd_maxsz(desc)); +} + +void HELPER(gvec_sqadd_d)(void *vd, void *vq, void *vn, + void *vm, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + int64_t *d = vd, *n = vn, *m = vm; + bool q = false; + + for (i = 0; i < oprsz / 8; i++) { + int64_t nn = n[i], mm = m[i], dd = nn + mm; + if (((dd ^ nn) & ~(nn ^ mm)) & INT64_MIN) { + dd = (nn >> 63) ^ ~INT64_MIN; + q = true; + } + d[i] = dd; + } + if (q) { + uint32_t *qc = vq; + qc[0] = 1; + } + clear_tail(d, oprsz, simd_maxsz(desc)); +} + +void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn, + void *vm, uint32_t desc) +{ + intptr_t i, oprsz = simd_oprsz(desc); + int64_t *d = vd, *n = vn, *m = vm; + bool q = false; + + for (i = 0; i < oprsz / 8; i++) { + int64_t nn = n[i], mm = m[i], dd = nn - mm; + if (((dd ^ nn) & (nn ^ mm)) & INT64_MIN) { + dd = (nn >> 63) ^ ~INT64_MIN; + q = true; + } + d[i] = dd; + } + if (q) { + uint32_t *qc = vq; + qc[0] = 1; + } + clear_tail(d, oprsz, simd_maxsz(desc)); +} + +/* + * Convert float16 to float32, raising no exceptions and + * preserving exceptional values, including SNaN. + * This is effectively an unpack+repack operation. + */ +static float32 float16_to_float32_by_bits(uint32_t f16, bool fz16) +{ + const int f16_bias = 15; + const int f32_bias = 127; + uint32_t sign = extract32(f16, 15, 1); + uint32_t exp = extract32(f16, 10, 5); + uint32_t frac = extract32(f16, 0, 10); + + if (exp == 0x1f) { + /* Inf or NaN */ + exp = 0xff; + } else if (exp == 0) { + /* Zero or denormal. */ + if (frac != 0) { + if (fz16) { + frac = 0; + } else { + /* + * Denormal; these are all normal float32. + * Shift the fraction so that the msb is at bit 11, + * then remove bit 11 as the implicit bit of the + * normalized float32. Note that we still go through + * the shift for normal numbers below, to put the + * float32 fraction at the right place. + */ + int shift = clz32(frac) - 21; + frac = (frac << shift) & 0x3ff; + exp = f32_bias - f16_bias - shift + 1; + } + } + } else { + /* Normal number; adjust the bias. */ + exp += f32_bias - f16_bias; + } + sign <<= 31; + exp <<= 23; + frac <<= 23 - 10; + + return sign | exp | frac; +} + +static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2) +{ + /* + * Branchless load of u32[0], u64[0], u32[1], or u64[1]. + * Load the 2nd qword iff is_q & is_2. + * Shift to the 2nd dword iff !is_q & is_2. + * For !is_q & !is_2, the upper bits of the result are garbage. + */ + return ptr[is_q & is_2] >> ((is_2 & ~is_q) << 5); +} + +/* + * Note that FMLAL requires oprsz == 8 or oprsz == 16, + * as there is not yet SVE versions that might use blocking. + */ + +static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst, + uint32_t desc, bool fz16) +{ + intptr_t i, oprsz = simd_oprsz(desc); + int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + int is_q = oprsz == 16; + uint64_t n_4, m_4; + + /* Pre-load all of the f16 data, avoiding overlap issues. */ + n_4 = load4_f16(vn, is_q, is_2); + m_4 = load4_f16(vm, is_q, is_2); + + /* Negate all inputs for FMLSL at once. */ + if (is_s) { + n_4 ^= 0x8000800080008000ull; + } + + for (i = 0; i < oprsz / 4; i++) { + float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); + float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16); + d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); + } + clear_tail(d, oprsz, simd_maxsz(desc)); +} + +void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, + void *venv, uint32_t desc) +{ + CPUARMState *env = venv; + do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc, + get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); +} + +void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, + void *venv, uint32_t desc) +{ + CPUARMState *env = venv; + do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc, + get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); +} + +static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, + uint32_t desc, bool fz16) +{ + intptr_t i, oprsz = simd_oprsz(desc); + int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3); + int is_q = oprsz == 16; + uint64_t n_4; + float32 m_1; + + /* Pre-load all of the f16 data, avoiding overlap issues. */ + n_4 = load4_f16(vn, is_q, is_2); + + /* Negate all inputs for FMLSL at once. */ + if (is_s) { + n_4 ^= 0x8000800080008000ull; + } + + m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16); + + for (i = 0; i < oprsz / 4; i++) { + float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); + d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); + } + clear_tail(d, oprsz, simd_maxsz(desc)); +} + +void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, + void *venv, uint32_t desc) +{ + CPUARMState *env = venv; + do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc, + get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); +} + +void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, + void *venv, uint32_t desc) +{ + CPUARMState *env = venv; + do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc, + get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); +} + +void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + int8_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz; ++i) { + int8_t mm = m[i]; + int8_t nn = n[i]; + int8_t res = 0; + if (mm >= 0) { + if (mm < 8) { + res = nn << mm; + } + } else { + res = nn >> (mm > -8 ? -mm : 7); + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + int16_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 2; ++i) { + int8_t mm = m[i]; /* only 8 bits of shift are significant */ + int16_t nn = n[i]; + int16_t res = 0; + if (mm >= 0) { + if (mm < 16) { + res = nn << mm; + } + } else { + res = nn >> (mm > -16 ? -mm : 15); + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint8_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz; ++i) { + int8_t mm = m[i]; + uint8_t nn = n[i]; + uint8_t res = 0; + if (mm >= 0) { + if (mm < 8) { + res = nn << mm; + } + } else { + if (mm > -8) { + res = nn >> -mm; + } + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint16_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 2; ++i) { + int8_t mm = m[i]; /* only 8 bits of shift are significant */ + uint16_t nn = n[i]; + uint16_t res = 0; + if (mm >= 0) { + if (mm < 16) { + res = nn << mm; + } + } else { + if (mm > -16) { + res = nn >> -mm; + } + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* + * 8x8->8 polynomial multiply. + * + * Polynomial multiplication is like integer multiplication except the + * partial products are XORed, not added. + * + * TODO: expose this as a generic vector operation, as it is a common + * crypto building block. + */ +void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + uint64_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; ++i) { + uint64_t nn = n[i]; + uint64_t mm = m[i]; + uint64_t rr = 0; + + for (j = 0; j < 8; ++j) { + uint64_t mask = (nn & 0x0101010101010101ull) * 0xff; + rr ^= mm & mask; + mm = (mm << 1) & 0xfefefefefefefefeull; + nn >>= 1; + } + d[i] = rr; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* + * 64x64->128 polynomial multiply. + * Because of the lanes are not accessed in strict columns, + * this probably cannot be turned into a generic helper. + */ +void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + intptr_t hi = simd_data(desc); + uint64_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; i += 2) { + uint64_t nn = n[i + hi]; + uint64_t mm = m[i + hi]; + uint64_t rhi = 0; + uint64_t rlo = 0; + + /* Bit 0 can only influence the low 64-bit result. */ + if (nn & 1) { + rlo = mm; + } + + for (j = 1; j < 64; ++j) { +#ifdef _MSC_VER + uint64_t mask = 0 - ((nn >> j) & 1); +#else + uint64_t mask = -((nn >> j) & 1); +#endif + rlo ^= (mm << j) & mask; + rhi ^= (mm >> (64 - j)) & mask; + } + d[i] = rlo; + d[i + 1] = rhi; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* + * 8x8->16 polynomial multiply. + * + * The byte inputs are expanded to (or extracted from) half-words. + * Note that neon and sve2 get the inputs from different positions. + * This allows 4 bytes to be processed in parallel with uint64_t. + */ + +static uint64_t expand_byte_to_half(uint64_t x) +{ + return (x & 0x000000ff) + | ((x & 0x0000ff00) << 8) + | ((x & 0x00ff0000) << 16) + | ((x & 0xff000000) << 24); +} + +static uint64_t pmull_h(uint64_t op1, uint64_t op2) +{ + uint64_t result = 0; + int i; + + for (i = 0; i < 8; ++i) { + uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff; + result ^= op2 & mask; + op1 >>= 1; + op2 <<= 1; + } + return result; +} + +void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + int hi = simd_data(desc); + uint64_t *d = vd, *n = vn, *m = vm; + uint64_t nn = n[hi], mm = m[hi]; + + d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); + nn >>= 32; + mm >>= 32; + d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); + + clear_tail(d, 16, simd_maxsz(desc)); +} + +#ifdef TARGET_AARCH64 +void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + int shift = simd_data(desc) * 8; + intptr_t i, opr_sz = simd_oprsz(desc); + uint64_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; ++i) { + uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull; + uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull; + + d[i] = pmull_h(nn, mm); + } +} +#endif diff --git a/qemu/target/arm/vfp_helper.c b/qemu/target/arm/vfp_helper.c new file mode 100644 index 00000000..55bce595 --- /dev/null +++ b/qemu/target/arm/vfp_helper.c @@ -0,0 +1,1364 @@ +/* + * ARM VFP floating-point operations + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "internals.h" +#include "qemu/log.h" +#include "fpu/softfloat.h" + +/* VFP support. We follow the convention used for VFP instructions: + Single precision routines have a "s" suffix, double precision a + "d" suffix. */ + +/* Convert host exception flags to vfp form. */ +static inline int vfp_exceptbits_from_host(int host_bits) +{ + int target_bits = 0; + + if (host_bits & float_flag_invalid) { + target_bits |= 1; + } + if (host_bits & float_flag_divbyzero) { + target_bits |= 2; + } + if (host_bits & float_flag_overflow) { + target_bits |= 4; + } + if (host_bits & (float_flag_underflow | float_flag_output_denormal)) { + target_bits |= 8; + } + if (host_bits & float_flag_inexact) { + target_bits |= 0x10; + } + if (host_bits & float_flag_input_denormal) { + target_bits |= 0x80; + } + return target_bits; +} + +/* Convert vfp exception flags to target form. */ +static inline int vfp_exceptbits_to_host(int target_bits) +{ + int host_bits = 0; + + if (target_bits & 1) { + host_bits |= float_flag_invalid; + } + if (target_bits & 2) { + host_bits |= float_flag_divbyzero; + } + if (target_bits & 4) { + host_bits |= float_flag_overflow; + } + if (target_bits & 8) { + host_bits |= float_flag_underflow; + } + if (target_bits & 0x10) { + host_bits |= float_flag_inexact; + } + if (target_bits & 0x80) { + host_bits |= float_flag_input_denormal; + } + return host_bits; +} + +static uint32_t vfp_get_fpscr_from_host(CPUARMState *env) +{ + uint32_t i; + + i = get_float_exception_flags(&env->vfp.fp_status); + i |= get_float_exception_flags(&env->vfp.standard_fp_status); + /* FZ16 does not generate an input denormal exception. */ + i |= (get_float_exception_flags(&env->vfp.fp_status_f16) + & ~float_flag_input_denormal); + return vfp_exceptbits_from_host(i); +} + +static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val) +{ + int i; + uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; + + changed ^= val; + if (changed & (3 << 22)) { + i = (val >> 22) & 3; + switch (i) { + case FPROUNDING_TIEEVEN: + i = float_round_nearest_even; + break; + case FPROUNDING_POSINF: + i = float_round_up; + break; + case FPROUNDING_NEGINF: + i = float_round_down; + break; + case FPROUNDING_ZERO: + i = float_round_to_zero; + break; + } + set_float_rounding_mode(i, &env->vfp.fp_status); + set_float_rounding_mode(i, &env->vfp.fp_status_f16); + } + if (changed & FPCR_FZ16) { + bool ftz_enabled = val & FPCR_FZ16; + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); + } + if (changed & FPCR_FZ) { + bool ftz_enabled = val & FPCR_FZ; + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); + } + if (changed & FPCR_DN) { + bool dnan_enabled = val & FPCR_DN; + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); + } + + /* + * The exception flags are ORed together when we read fpscr so we + * only need to preserve the current state in one of our + * float_status values. + */ + i = vfp_exceptbits_to_host(val); + set_float_exception_flags(i, &env->vfp.fp_status); + set_float_exception_flags(0, &env->vfp.fp_status_f16); + set_float_exception_flags(0, &env->vfp.standard_fp_status); +} + +uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) +{ + uint32_t i, fpscr; + + fpscr = env->vfp.xregs[ARM_VFP_FPSCR] + | (env->vfp.vec_len << 16) + | (env->vfp.vec_stride << 20); + + fpscr |= vfp_get_fpscr_from_host(env); + + i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3]; + fpscr |= i ? FPCR_QC : 0; + + return fpscr; +} + +uint32_t vfp_get_fpscr(CPUARMState *env) +{ + return HELPER(vfp_get_fpscr)(env); +} + +void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) +{ + /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ + if (!cpu_isar_feature(any_fp16, env_archcpu(env))) { + val &= ~FPCR_FZ16; + } + + if (arm_feature(env, ARM_FEATURE_M)) { + /* + * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits + * and also for the trapped-exception-handling bits IxE. + */ + val &= 0xf7c0009f; + } + + vfp_set_fpscr_to_host(env, val); + + /* + * We don't implement trapped exception handling, so the + * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) + * + * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC + * (which are stored in fp_status), and the other RES0 bits + * in between, then we clear all of the low 16 bits. + */ + env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000; + env->vfp.vec_len = (val >> 16) & 7; + env->vfp.vec_stride = (val >> 20) & 3; + + /* + * The bit we set within fpscr_q is arbitrary; the register as a + * whole being zero/non-zero is what counts. + */ + env->vfp.qc[0] = val & FPCR_QC; + env->vfp.qc[1] = 0; + env->vfp.qc[2] = 0; + env->vfp.qc[3] = 0; +} + +void vfp_set_fpscr(CPUARMState *env, uint32_t val) +{ + HELPER(vfp_set_fpscr)(env, val); +} + +#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) + +#define VFP_BINOP(name) \ +float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return float32_ ## name(a, b, fpst); \ +} \ +float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return float64_ ## name(a, b, fpst); \ +} +VFP_BINOP(add) +VFP_BINOP(sub) +VFP_BINOP(mul) +VFP_BINOP(div) +VFP_BINOP(min) +VFP_BINOP(max) +VFP_BINOP(minnum) +VFP_BINOP(maxnum) +#undef VFP_BINOP + +float32 VFP_HELPER(neg, s)(float32 a) +{ + return float32_chs(a); +} + +float64 VFP_HELPER(neg, d)(float64 a) +{ + return float64_chs(a); +} + +float32 VFP_HELPER(abs, s)(float32 a) +{ + return float32_abs(a); +} + +float64 VFP_HELPER(abs, d)(float64 a) +{ + return float64_abs(a); +} + +float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) +{ + return float32_sqrt(a, &env->vfp.fp_status); +} + +float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) +{ + return float64_sqrt(a, &env->vfp.fp_status); +} + +static void softfloat_to_vfp_compare(CPUARMState *env, int cmp) +{ + uint32_t flags = 0; + switch (cmp) { + case float_relation_equal: + flags = 0x6; + break; + case float_relation_less: + flags = 0x8; + break; + case float_relation_greater: + flags = 0x2; + break; + case float_relation_unordered: + flags = 0x3; + break; + default: + g_assert_not_reached(); + break; + } + env->vfp.xregs[ARM_VFP_FPSCR] = + deposit32(env->vfp.xregs[ARM_VFP_FPSCR], 28, 4, flags); +} + +/* XXX: check quiet/signaling case */ +#define DO_VFP_cmp(p, type) \ +void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ +{ \ + softfloat_to_vfp_compare(env, \ + type ## _compare_quiet(a, b, &env->vfp.fp_status)); \ +} \ +void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ +{ \ + softfloat_to_vfp_compare(env, \ + type ## _compare(a, b, &env->vfp.fp_status)); \ +} +DO_VFP_cmp(s, float32) +DO_VFP_cmp(d, float64) +#undef DO_VFP_cmp + +/* Integer to float and float to integer conversions */ + +#define CONV_ITOF(name, ftype, fsz, sign) \ +ftype HELPER(name)(uint32_t x, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ +} + +#define CONV_FTOI(name, ftype, fsz, sign, round) \ +sign##int32_t HELPER(name)(ftype x, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + if (float##fsz##_is_any_nan(x)) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##sign##int32##round(x, fpst); \ +} + +#define FLOAT_CONVS(name, p, ftype, fsz, sign) \ + CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ + CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ + CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) + +FLOAT_CONVS(si, h, uint32_t, 16, ) +FLOAT_CONVS(si, s, float32, 32, ) +FLOAT_CONVS(si, d, float64, 64, ) +FLOAT_CONVS(ui, h, uint32_t, 16, u) +FLOAT_CONVS(ui, s, float32, 32, u) +FLOAT_CONVS(ui, d, float64, 64, u) + +#undef CONV_ITOF +#undef CONV_FTOI +#undef FLOAT_CONVS + +/* floating point conversion */ +float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) +{ + return float32_to_float64(x, &env->vfp.fp_status); +} + +float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) +{ + return float64_to_float32(x, &env->vfp.fp_status); +} + +/* VFP3 fixed point conversion. */ +#ifdef _MSC_VER +#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ +float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ + void *fpstp) \ +{ return itype##_to_##float##fsz##_scalbn(x, 0 - shift, fpstp); } +#else +#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ +float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ + void *fpstp) \ +{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } +#endif + +#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \ +uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \ + void *fpst) \ +{ \ + if (unlikely(float##fsz##_is_any_nan(x))) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ +} + +#define VFP_CONV_FIX(name, p, fsz, isz, itype) \ +VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ + float_round_to_zero, _round_to_zero) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ + get_float_rounding_mode(fpst), ) + +#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ +VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ + get_float_rounding_mode(fpst), ) + +VFP_CONV_FIX(sh, d, 64, 64, int16) +VFP_CONV_FIX(sl, d, 64, 64, int32) +VFP_CONV_FIX_A64(sq, d, 64, 64, int64) +VFP_CONV_FIX(uh, d, 64, 64, uint16) +VFP_CONV_FIX(ul, d, 64, 64, uint32) +VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) +VFP_CONV_FIX(sh, s, 32, 32, int16) +VFP_CONV_FIX(sl, s, 32, 32, int32) +VFP_CONV_FIX_A64(sq, s, 32, 64, int64) +VFP_CONV_FIX(uh, s, 32, 32, uint16) +VFP_CONV_FIX(ul, s, 32, 32, uint32) +VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) + +#undef VFP_CONV_FIX +#undef VFP_CONV_FIX_FLOAT +#undef VFP_CONV_FLOAT_FIX_ROUND +#undef VFP_CONV_FIX_A64 + +uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) +{ +#ifdef _MSC_VER + return int32_to_float16_scalbn(x, 0 - shift, fpst); +#else + return int32_to_float16_scalbn(x, -shift, fpst); +#endif +} + +uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) +{ +#ifdef _MSC_VER + return uint32_to_float16_scalbn(x, 0 - shift, fpst); +#else + return uint32_to_float16_scalbn(x, -shift, fpst); +#endif +} + +uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) +{ +#ifdef _MSC_VER + return int64_to_float16_scalbn(x, 0 - shift, fpst); +#else + return int64_to_float16_scalbn(x, -shift, fpst); +#endif +} + +uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) +{ +#ifdef _MSC_VER + return uint64_to_float16_scalbn(x, 0 - shift, fpst); +#else + return uint64_to_float16_scalbn(x, -shift, fpst); +#endif +} + +uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) +{ + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); +} + +uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) +{ + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); +} + +uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) +{ + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); +} + +uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) +{ + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); +} + +uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) +{ + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); +} + +uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) +{ + if (unlikely(float16_is_any_nan(x))) { + float_raise(float_flag_invalid, fpst); + return 0; + } + return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst), + shift, fpst); +} + +/* Set the current fp rounding mode and return the old one. + * The argument is a softfloat float_round_ value. + */ +uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) +{ + float_status *fp_status = fpstp; + + uint32_t prev_rmode = get_float_rounding_mode(fp_status); + set_float_rounding_mode(rmode, fp_status); + + return prev_rmode; +} + +/* Set the current fp rounding mode in the standard fp status and return + * the old one. This is for NEON instructions that need to change the + * rounding mode but wish to use the standard FPSCR values for everything + * else. Always set the rounding mode back to the correct value after + * modifying it. + * The argument is a softfloat float_round_ value. + */ +uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) +{ + float_status *fp_status = &env->vfp.standard_fp_status; + + uint32_t prev_rmode = get_float_rounding_mode(fp_status); + set_float_rounding_mode(rmode, fp_status); + + return prev_rmode; +} + +/* Half precision conversions. */ +float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing input denormals. + */ + float_status *fpst = fpstp; + flag save = get_flush_inputs_to_zero(fpst); + set_flush_inputs_to_zero(false, fpst); + float32 r = float16_to_float32(a, !ahp_mode, fpst); + set_flush_inputs_to_zero(save, fpst); + return r; +} + +uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing output denormals. + */ + float_status *fpst = fpstp; + flag save = get_flush_to_zero(fpst); + set_flush_to_zero(false, fpst); + float16 r = float32_to_float16(a, !ahp_mode, fpst); + set_flush_to_zero(save, fpst); + return r; +} + +float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing input denormals. + */ + float_status *fpst = fpstp; + flag save = get_flush_inputs_to_zero(fpst); + set_flush_inputs_to_zero(false, fpst); + float64 r = float16_to_float64(a, !ahp_mode, fpst); + set_flush_inputs_to_zero(save, fpst); + return r; +} + +uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing output denormals. + */ + float_status *fpst = fpstp; + flag save = get_flush_to_zero(fpst); + set_flush_to_zero(false, fpst); + float16 r = float64_to_float16(a, !ahp_mode, fpst); + set_flush_to_zero(save, fpst); + return r; +} + +#define float32_two make_float32(0x40000000) +#define float32_three make_float32(0x40400000) +#define float32_one_point_five make_float32(0x3fc00000) + +float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) +{ + float_status *s = &env->vfp.standard_fp_status; + if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || + (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } + return float32_two; + } + return float32_sub(float32_two, float32_mul(a, b, s), s); +} + +float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) +{ + float_status *s = &env->vfp.standard_fp_status; + float32 product; + if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || + (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } + return float32_one_point_five; + } + product = float32_mul(a, b, s); + return float32_div(float32_sub(float32_three, product, s), float32_two, s); +} + +/* NEON helpers. */ + +/* Constants 256 and 512 are used in some helpers; we avoid relying on + * int->float conversions at run-time. */ +#define float64_256 make_float64(0x4070000000000000LL) +#define float64_512 make_float64(0x4080000000000000LL) +#define float16_maxnorm make_float16(0x7bff) +#define float32_maxnorm make_float32(0x7f7fffff) +#define float64_maxnorm make_float64(0x7fefffffffffffffLL) + +/* Reciprocal functions + * + * The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate + */ + +/* See RecipEstimate() + * + * input is a 9 bit fixed point number + * input range 256 .. 511 for a number from 0.5 <= x < 1.0. + * result range 256 .. 511 for a number from 1.0 to 511/256. + */ + +static int recip_estimate(int input) +{ + int a, b, r; + assert(256 <= input && input < 512); + a = (input * 2) + 1; + b = (1 << 19) / a; + r = (b + 1) >> 1; + assert(256 <= r && r < 512); + return r; +} + +/* + * Common wrapper to call recip_estimate + * + * The parameters are exponent and 64 bit fraction (without implicit + * bit) where the binary point is nominally at bit 52. Returns a + * float64 which can then be rounded to the appropriate size by the + * callee. + */ + +static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) +{ + uint32_t scaled, estimate; + uint64_t result_frac; + int result_exp; + + /* Handle sub-normals */ + if (*exp == 0) { + if (extract64(frac, 51, 1) == 0) { + *exp = -1; + frac <<= 2; + } else { + frac <<= 1; + } + } + + /* scaled = UInt('1':fraction<51:44>) */ + scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); + estimate = recip_estimate(scaled); + + result_exp = exp_off - *exp; + result_frac = deposit64(0, 44, 8, estimate); + if (result_exp == 0) { + result_frac = deposit64(result_frac >> 1, 51, 1, 1); + } else if (result_exp == -1) { + result_frac = deposit64(result_frac >> 2, 50, 2, 1); + result_exp = 0; + } + + *exp = result_exp; + + return result_frac; +} + +static bool round_to_inf(float_status *fpst, bool sign_bit) +{ + switch (fpst->float_rounding_mode) { + case float_round_nearest_even: /* Round to Nearest */ + return true; + case float_round_up: /* Round to +Inf */ + return !sign_bit; + case float_round_down: /* Round to -Inf */ + return sign_bit; + case float_round_to_zero: /* Round to Zero */ + return false; + } + + g_assert_not_reached(); + // never reach here + return false; +} + +uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) +{ + float_status *fpst = fpstp; + float16 f16 = float16_squash_input_denormal(input, fpst); + uint32_t f16_val = float16_val(f16); + uint32_t f16_sign = float16_is_neg(f16); + int f16_exp = extract32(f16_val, 10, 5); + uint32_t f16_frac = extract32(f16_val, 0, 10); + uint64_t f64_frac; + + if (float16_is_any_nan(f16)) { + float16 nan = f16; + if (float16_is_signaling_nan(f16, fpst)) { + float_raise(float_flag_invalid, fpst); + nan = float16_silence_nan(f16, fpst); + } + if (fpst->default_nan_mode) { + nan = float16_default_nan(fpst); + } + return nan; + } else if (float16_is_infinity(f16)) { + return float16_set_sign(float16_zero, float16_is_neg(f16)); + } else if (float16_is_zero(f16)) { + float_raise(float_flag_divbyzero, fpst); + return float16_set_sign(float16_infinity, float16_is_neg(f16)); + } else if (float16_abs(f16) < (1 << 8)) { + /* Abs(value) < 2.0^-16 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f16_sign)) { + return float16_set_sign(float16_infinity, f16_sign); + } else { + return float16_set_sign(float16_maxnorm, f16_sign); + } + } else if (f16_exp >= 29 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float16_set_sign(float16_zero, float16_is_neg(f16)); + } + + f64_frac = call_recip_estimate(&f16_exp, 29, + ((uint64_t) f16_frac) << (52 - 10)); + + /* result = sign : result_exp<4:0> : fraction<51:42> */ + f16_val = deposit32(0, 15, 1, f16_sign); + f16_val = deposit32(f16_val, 10, 5, f16_exp); + f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); + return make_float16(f16_val); +} + +float32 HELPER(recpe_f32)(float32 input, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f32 = float32_squash_input_denormal(input, fpst); + uint32_t f32_val = float32_val(f32); + bool f32_sign = float32_is_neg(f32); + int f32_exp = extract32(f32_val, 23, 8); + uint32_t f32_frac = extract32(f32_val, 0, 23); + uint64_t f64_frac; + + if (float32_is_any_nan(f32)) { + float32 nan = f32; + if (float32_is_signaling_nan(f32, fpst)) { + float_raise(float_flag_invalid, fpst); + nan = float32_silence_nan(f32, fpst); + } + if (fpst->default_nan_mode) { + nan = float32_default_nan(fpst); + } + return nan; + } else if (float32_is_infinity(f32)) { + return float32_set_sign(float32_zero, float32_is_neg(f32)); + } else if (float32_is_zero(f32)) { + float_raise(float_flag_divbyzero, fpst); + return float32_set_sign(float32_infinity, float32_is_neg(f32)); + } else if (float32_abs(f32) < (1ULL << 21)) { + /* Abs(value) < 2.0^-128 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f32_sign)) { + return float32_set_sign(float32_infinity, f32_sign); + } else { + return float32_set_sign(float32_maxnorm, f32_sign); + } + } else if (f32_exp >= 253 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float32_set_sign(float32_zero, float32_is_neg(f32)); + } + + f64_frac = call_recip_estimate(&f32_exp, 253, + ((uint64_t) f32_frac) << (52 - 23)); + + /* result = sign : result_exp<7:0> : fraction<51:29> */ + f32_val = deposit32(0, 31, 1, f32_sign); + f32_val = deposit32(f32_val, 23, 8, f32_exp); + f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); + return make_float32(f32_val); +} + +float64 HELPER(recpe_f64)(float64 input, void *fpstp) +{ + float_status *fpst = fpstp; + float64 f64 = float64_squash_input_denormal(input, fpst); + uint64_t f64_val = float64_val(f64); + bool f64_sign = float64_is_neg(f64); + int f64_exp = extract64(f64_val, 52, 11); + uint64_t f64_frac = extract64(f64_val, 0, 52); + + /* Deal with any special cases */ + if (float64_is_any_nan(f64)) { + float64 nan = f64; + if (float64_is_signaling_nan(f64, fpst)) { + float_raise(float_flag_invalid, fpst); + nan = float64_silence_nan(f64, fpst); + } + if (fpst->default_nan_mode) { + nan = float64_default_nan(fpst); + } + return nan; + } else if (float64_is_infinity(f64)) { + return float64_set_sign(float64_zero, float64_is_neg(f64)); + } else if (float64_is_zero(f64)) { + float_raise(float_flag_divbyzero, fpst); + return float64_set_sign(float64_infinity, float64_is_neg(f64)); + } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { + /* Abs(value) < 2.0^-1024 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f64_sign)) { + return float64_set_sign(float64_infinity, f64_sign); + } else { + return float64_set_sign(float64_maxnorm, f64_sign); + } + } else if (f64_exp >= 2045 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float64_set_sign(float64_zero, float64_is_neg(f64)); + } + + f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); + + /* result = sign : result_exp<10:0> : fraction<51:0>; */ + f64_val = deposit64(0, 63, 1, f64_sign); + f64_val = deposit64(f64_val, 52, 11, f64_exp); + f64_val = deposit64(f64_val, 0, 52, f64_frac); + return make_float64(f64_val); +} + +/* The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM. + */ + +static int do_recip_sqrt_estimate(int a) +{ + int b, estimate; + + assert(128 <= a && a < 512); + if (a < 256) { + a = a * 2 + 1; + } else { + a = (a >> 1) << 1; + a = (a + 1) * 2; + } + b = 512; + while (a * (b + 1) * (b + 1) < (1 << 28)) { + b += 1; + } + estimate = (b + 1) / 2; + assert(256 <= estimate && estimate < 512); + + return estimate; +} + + +static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) +{ + int estimate; + uint32_t scaled; + + if (*exp == 0) { + while (extract64(frac, 51, 1) == 0) { + frac = frac << 1; + *exp -= 1; + } + frac = extract64(frac, 0, 51) << 1; + } + + if (*exp & 1) { + /* scaled = UInt('01':fraction<51:45>) */ + scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); + } else { + /* scaled = UInt('1':fraction<51:44>) */ + scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); + } + estimate = do_recip_sqrt_estimate(scaled); + + *exp = (exp_off - *exp) / 2; + return extract64(estimate, 0, 8) << 44; +} + +uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) +{ + float_status *s = fpstp; + float16 f16 = float16_squash_input_denormal(input, s); + uint16_t val = float16_val(f16); + bool f16_sign = float16_is_neg(f16); + int f16_exp = extract32(val, 10, 5); + uint16_t f16_frac = extract32(val, 0, 10); + uint64_t f64_frac; + + if (float16_is_any_nan(f16)) { + float16 nan = f16; + if (float16_is_signaling_nan(f16, s)) { + float_raise(float_flag_invalid, s); + nan = float16_silence_nan(f16, s); + } + if (s->default_nan_mode) { + nan = float16_default_nan(s); + } + return nan; + } else if (float16_is_zero(f16)) { + float_raise(float_flag_divbyzero, s); + return float16_set_sign(float16_infinity, f16_sign); + } else if (f16_sign) { + float_raise(float_flag_invalid, s); + return float16_default_nan(s); + } else if (float16_is_infinity(f16)) { + return float16_zero; + } + + /* Scale and normalize to a double-precision value between 0.25 and 1.0, + * preserving the parity of the exponent. */ + + f64_frac = ((uint64_t) f16_frac) << (52 - 10); + + f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); + + /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ + val = deposit32(0, 15, 1, f16_sign); + val = deposit32(val, 10, 5, f16_exp); + val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); + return make_float16(val); +} + +float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) +{ + float_status *s = fpstp; + float32 f32 = float32_squash_input_denormal(input, s); + uint32_t val = float32_val(f32); + uint32_t f32_sign = float32_is_neg(f32); + int f32_exp = extract32(val, 23, 8); + uint32_t f32_frac = extract32(val, 0, 23); + uint64_t f64_frac; + + if (float32_is_any_nan(f32)) { + float32 nan = f32; + if (float32_is_signaling_nan(f32, s)) { + float_raise(float_flag_invalid, s); + nan = float32_silence_nan(f32, s); + } + if (s->default_nan_mode) { + nan = float32_default_nan(s); + } + return nan; + } else if (float32_is_zero(f32)) { + float_raise(float_flag_divbyzero, s); + return float32_set_sign(float32_infinity, float32_is_neg(f32)); + } else if (float32_is_neg(f32)) { + float_raise(float_flag_invalid, s); + return float32_default_nan(s); + } else if (float32_is_infinity(f32)) { + return float32_zero; + } + + /* Scale and normalize to a double-precision value between 0.25 and 1.0, + * preserving the parity of the exponent. */ + + f64_frac = ((uint64_t) f32_frac) << 29; + + f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); + + /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ + val = deposit32(0, 31, 1, f32_sign); + val = deposit32(val, 23, 8, f32_exp); + val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); + return make_float32(val); +} + +float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) +{ + float_status *s = fpstp; + float64 f64 = float64_squash_input_denormal(input, s); + uint64_t val = float64_val(f64); + bool f64_sign = float64_is_neg(f64); + int f64_exp = extract64(val, 52, 11); + uint64_t f64_frac = extract64(val, 0, 52); + + if (float64_is_any_nan(f64)) { + float64 nan = f64; + if (float64_is_signaling_nan(f64, s)) { + float_raise(float_flag_invalid, s); + nan = float64_silence_nan(f64, s); + } + if (s->default_nan_mode) { + nan = float64_default_nan(s); + } + return nan; + } else if (float64_is_zero(f64)) { + float_raise(float_flag_divbyzero, s); + return float64_set_sign(float64_infinity, float64_is_neg(f64)); + } else if (float64_is_neg(f64)) { + float_raise(float_flag_invalid, s); + return float64_default_nan(s); + } else if (float64_is_infinity(f64)) { + return float64_zero; + } + + f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); + + /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ + val = deposit64(0, 61, 1, f64_sign); + val = deposit64(val, 52, 11, f64_exp); + val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); + return make_float64(val); +} + +uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) +{ + /* float_status *s = fpstp; */ + int input, estimate; + + if ((a & 0x80000000) == 0) { + return 0xffffffff; + } + + input = extract32(a, 23, 9); + estimate = recip_estimate(input); + + return deposit32(0, (32 - 9), 9, estimate); +} + +uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) +{ + int estimate; + + if ((a & 0xc0000000) == 0) { + return 0xffffffff; + } + + estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); + + return deposit32(0, 23, 9, estimate); +} + +/* VFPv4 fused multiply-accumulate */ +float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) +{ + float_status *fpst = fpstp; + return float32_muladd(a, b, c, 0, fpst); +} + +float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) +{ + float_status *fpst = fpstp; + return float64_muladd(a, b, c, 0, fpst); +} + +/* ARMv8 round to integral */ +float32 HELPER(rints_exact)(float32 x, void *fp_status) +{ + return float32_round_to_int(x, fp_status); +} + +float64 HELPER(rintd_exact)(float64 x, void *fp_status) +{ + return float64_round_to_int(x, fp_status); +} + +float32 HELPER(rints)(float32 x, void *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float32 ret; + + ret = float32_round_to_int(x, fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +float64 HELPER(rintd)(float64 x, void *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float64 ret; + + ret = float64_round_to_int(x, fp_status); + + new_flags = get_float_exception_flags(fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +/* Convert ARM rounding mode to softfloat */ +int arm_rmode_to_sf(int rmode) +{ + switch (rmode) { + case FPROUNDING_TIEAWAY: + rmode = float_round_ties_away; + break; + case FPROUNDING_ODD: + /* FIXME: add support for TIEAWAY and ODD */ + qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", + rmode); + /* fall through for now */ + case FPROUNDING_TIEEVEN: + default: + rmode = float_round_nearest_even; + break; + case FPROUNDING_POSINF: + rmode = float_round_up; + break; + case FPROUNDING_NEGINF: + rmode = float_round_down; + break; + case FPROUNDING_ZERO: + rmode = float_round_to_zero; + break; + } + return rmode; +} + +/* + * Implement float64 to int32_t conversion without saturation; + * the result is supplied modulo 2^32. + */ +uint64_t HELPER(fjcvtzs)(float64 value, void *vstatus) +{ + float_status *status = vstatus; + uint32_t exp, sign; + uint64_t frac; + uint32_t inexact = 1; /* !Z */ + + sign = extract64(value, 63, 1); + exp = extract64(value, 52, 11); + frac = extract64(value, 0, 52); + + if (exp == 0) { + /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */ + inexact = sign; + if (frac != 0) { + if (status->flush_inputs_to_zero) { + float_raise(float_flag_input_denormal, status); + } else { + float_raise(float_flag_inexact, status); + inexact = 1; + } + } + frac = 0; + } else if (exp == 0x7ff) { + /* This operation raises Invalid for both NaN and overflow (Inf). */ + float_raise(float_flag_invalid, status); + frac = 0; + } else { + int true_exp = exp - 1023; + int shift = true_exp - 52; + + /* Restore implicit bit. */ + frac |= 1ull << 52; + + /* Shift the fraction into place. */ + if (shift >= 0) { + /* The number is so large we must shift the fraction left. */ + if (shift >= 64) { + /* The fraction is shifted out entirely. */ + frac = 0; + } else { + frac <<= shift; + } + } else if (shift > -64) { + /* Normal case -- shift right and notice if bits shift out. */ + inexact = (frac << (64 + shift)) != 0; + frac >>= -shift; + } else { + /* The fraction is shifted out entirely. */ + frac = 0; + } + + /* Notice overflow or inexact exceptions. */ + if (true_exp > 31 || frac > (sign ? 0x80000000ull : 0x7fffffff)) { + /* Overflow, for which this operation raises invalid. */ + float_raise(float_flag_invalid, status); + inexact = 1; + } else if (inexact) { + float_raise(float_flag_inexact, status); + } + + /* Honor the sign. */ + if (sign) { +#ifdef _MSC_VER + frac = 0 - frac; +#else + frac = -frac; +#endif + } + } + + /* Pack the result and the env->ZF representation of Z together. */ + return deposit64(frac, 32, 32, inexact); +} + +uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env) +{ + uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status); + uint32_t result = pair; + uint32_t z = (pair >> 32) == 0; + + /* Store Z, clear NCV, in FPSCR.NZCV. */ + env->vfp.xregs[ARM_VFP_FPSCR] + = (env->vfp.xregs[ARM_VFP_FPSCR] & ~CPSR_NZCV) | (z * CPSR_Z); + + return result; +} + +/* Round a float32 to an integer that fits in int32_t or int64_t. */ +static float32 frint_s(float32 f, float_status *fpst, int intsize) +{ + int old_flags = get_float_exception_flags(fpst); + uint32_t exp = extract32(f, 23, 8); + + if (unlikely(exp == 0xff)) { + /* NaN or Inf. */ + goto overflow; + } + + /* Round and re-extract the exponent. */ + f = float32_round_to_int(f, fpst); + exp = extract32(f, 23, 8); + + /* Validate the range of the result. */ + if (exp < 126 + intsize) { + /* abs(F) <= INT{N}_MAX */ + return f; + } + if (exp == 126 + intsize) { + uint32_t sign = extract32(f, 31, 1); + uint32_t frac = extract32(f, 0, 23); + if (sign && frac == 0) { + /* F == INT{N}_MIN */ + return f; + } + } + + overflow: + /* + * Raise Invalid and return INT{N}_MIN as a float. Revert any + * inexact exception float32_round_to_int may have raised. + */ + set_float_exception_flags(old_flags | float_flag_invalid, fpst); + return (0x100u + 126u + intsize) << 23; +} + +float32 HELPER(frint32_s)(float32 f, void *fpst) +{ + return frint_s(f, fpst, 32); +} + +float32 HELPER(frint64_s)(float32 f, void *fpst) +{ + return frint_s(f, fpst, 64); +} + +/* Round a float64 to an integer that fits in int32_t or int64_t. */ +static float64 frint_d(float64 f, float_status *fpst, int intsize) +{ + int old_flags = get_float_exception_flags(fpst); + uint32_t exp = extract64(f, 52, 11); + + if (unlikely(exp == 0x7ff)) { + /* NaN or Inf. */ + goto overflow; + } + + /* Round and re-extract the exponent. */ + f = float64_round_to_int(f, fpst); + exp = extract64(f, 52, 11); + + /* Validate the range of the result. */ + if (exp < 1022 + intsize) { + /* abs(F) <= INT{N}_MAX */ + return f; + } + if (exp == 1022 + intsize) { + uint64_t sign = extract64(f, 63, 1); + uint64_t frac = extract64(f, 0, 52); + if (sign && frac == 0) { + /* F == INT{N}_MIN */ + return f; + } + } + + overflow: + /* + * Raise Invalid and return INT{N}_MIN as a float. Revert any + * inexact exception float64_round_to_int may have raised. + */ + set_float_exception_flags(old_flags | float_flag_invalid, fpst); + return (uint64_t)(0x800 + 1022 + intsize) << 52; +} + +float64 HELPER(frint32_d)(float64 f, void *fpst) +{ + return frint_d(f, fpst, 32); +} + +float64 HELPER(frint64_d)(float64 f, void *fpst) +{ + return frint_d(f, fpst, 64); +} + +void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg) +{ + uint32_t syndrome; + + switch (reg) { + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: + if (!(arm_hcr_el2_eff(env) & HCR_TID3)) { + return; + } + break; + case ARM_VFP_FPSID: + if (!(arm_hcr_el2_eff(env) & HCR_TID0)) { + return; + } + break; + default: + g_assert_not_reached(); + break; + } + + syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT) + | ARM_EL_IL + | (1 << 24) | (0xe << 20) | (7 << 14) + | (reg << 10) | (rt << 5) | 1); + + raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); +} diff --git a/qemu/target-i386/TODO b/qemu/target/i386/TODO similarity index 100% rename from qemu/target-i386/TODO rename to qemu/target/i386/TODO diff --git a/qemu/target-i386/arch_memory_mapping.c b/qemu/target/i386/arch_memory_mapping.c similarity index 68% rename from qemu/target-i386/arch_memory_mapping.c rename to qemu/target/i386/arch_memory_mapping.c index e66ebc95..63c44d31 100644 --- a/qemu/target-i386/arch_memory_mapping.c +++ b/qemu/target/i386/arch_memory_mapping.c @@ -11,8 +11,8 @@ * */ +#include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu-all.h" #include "sysemu/memory_mapping.h" /* PAE Paging or IA-32e Paging */ @@ -27,7 +27,11 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pte_addr = (pte_start_addr + i * 8) & a20_mask; - pte = ldq_phys(as, pte_addr); +#ifdef UNICORN_ARCH_POSTFIX + pte = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#else + pte = address_space_ldq(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#endif if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; @@ -57,7 +61,11 @@ static void walk_pte2(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 1024; i++) { pte_addr = (pte_start_addr + i * 4) & a20_mask; - pte = ldl_phys(as, pte_addr); +#ifdef UNICORN_ARCH_POSTFIX + pte = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#else + pte = address_space_ldl(as->uc, as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#endif if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; @@ -89,7 +97,11 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pde_addr = (pde_start_addr + i * 8) & a20_mask; - pde = ldq_phys(as, pde_addr); +#ifdef UNICORN_ARCH_POSTFIX + pde = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#else + pde = address_space_ldq(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#endif if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; @@ -126,7 +138,11 @@ static void walk_pde2(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 1024; i++) { pde_addr = (pde_start_addr + i * 4) & a20_mask; - pde = ldl_phys(as, pde_addr); +#ifdef UNICORN_ARCH_POSTFIX + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#else + pde = address_space_ldl(as->uc, as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#endif if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; @@ -167,7 +183,11 @@ static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 4; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; - pdpe = ldq_phys(as, pdpe_addr); +#ifdef UNICORN_ARCH_POSTFIX + pdpe = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#else + pdpe = address_space_ldq(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#endif if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; @@ -192,7 +212,11 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; - pdpe = ldq_phys(as, pdpe_addr); +#ifdef UNICORN_ARCH_POSTFIX + pdpe = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#else + pdpe = address_space_ldq(as->uc, as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL); +#endif if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; @@ -219,7 +243,8 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, /* IA-32e Paging */ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, - hwaddr pml4e_start_addr, int32_t a20_mask) + hwaddr pml4e_start_addr, int32_t a20_mask, + target_ulong start_line_addr) { hwaddr pml4e_addr, pdpe_start_addr; uint64_t pml4e; @@ -228,52 +253,93 @@ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; - pml4e = ldq_phys(as, pml4e_addr); +#ifdef UNICORN_ARCH_POSTFIX + pml4e = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pml4e_addr, MEMTXATTRS_UNSPECIFIED, +#else + pml4e = address_space_ldq(as->uc, as, pml4e_addr, MEMTXATTRS_UNSPECIFIED, +#endif + NULL); if (!(pml4e & PG_PRESENT_MASK)) { /* not present */ continue; } - line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48); + line_addr = start_line_addr | ((i & 0x1ffULL) << 39); pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask; walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr); } } + +static void walk_pml5e(MemoryMappingList *list, AddressSpace *as, + hwaddr pml5e_start_addr, int32_t a20_mask) +{ + hwaddr pml5e_addr, pml4e_start_addr; + uint64_t pml5e; + target_ulong line_addr; + int i; + + for (i = 0; i < 512; i++) { + pml5e_addr = (pml5e_start_addr + i * 8) & a20_mask; +#ifdef UNICORN_ARCH_POSTFIX + pml5e = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, pml5e_addr, MEMTXATTRS_UNSPECIFIED, +#else + pml5e = address_space_ldq(as->uc, as, pml5e_addr, MEMTXATTRS_UNSPECIFIED, +#endif + NULL); + if (!(pml5e & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + line_addr = (0x7fULL << 57) | ((i & 0x1ffULL) << 48); + pml4e_start_addr = (pml5e & PLM4_ADDR_MASK) & a20_mask; + walk_pml4e(list, as, pml4e_start_addr, a20_mask, line_addr); + } +} #endif -void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list, - Error **errp) +void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list) { - X86CPU *cpu = X86_CPU(cs->uc, cs); + X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; + int32_t a20_mask; if (!cpu_paging_enabled(cs)) { /* paging is disabled */ return; } + a20_mask = x86_get_a20_mask(env); if (env->cr[4] & CR4_PAE_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { - hwaddr pml4e_addr; + if (env->cr[4] & CR4_LA57_MASK) { + hwaddr pml5e_addr; - pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask; - walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask); + pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask; + walk_pml5e(list, cs->as, pml5e_addr, a20_mask); + } else { + hwaddr pml4e_addr; + + pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask; + walk_pml4e(list, cs->as, pml4e_addr, a20_mask, + 0xffffULL << 48); + } } else #endif { hwaddr pdpe_addr; - pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask; - walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask); + pdpe_addr = (env->cr[3] & ~0x1f) & a20_mask; + walk_pdpe2(list, cs->as, pdpe_addr, a20_mask); } } else { hwaddr pde_addr; bool pse; - pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask; + pde_addr = (env->cr[3] & ~0xfff) & a20_mask; pse = !!(env->cr[4] & CR4_PSE_MASK); - walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse); + walk_pde2(list, cs->as, pde_addr, a20_mask, pse); } } diff --git a/qemu/target/i386/bpt_helper.c b/qemu/target/i386/bpt_helper.c new file mode 100644 index 00000000..1cf39ea4 --- /dev/null +++ b/qemu/target/i386/bpt_helper.c @@ -0,0 +1,327 @@ +/* + * i386 breakpoint helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + + +static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) +{ + return (dr7 >> (index * 2)) & 1; +} + +static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index) +{ + return (dr7 >> (index * 2)) & 2; + +} +static inline bool hw_breakpoint_enabled(unsigned long dr7, int index) +{ + return hw_global_breakpoint_enabled(dr7, index) || + hw_local_breakpoint_enabled(dr7, index); +} + +static inline int hw_breakpoint_type(unsigned long dr7, int index) +{ + return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; +} + +static inline int hw_breakpoint_len(unsigned long dr7, int index) +{ + int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); + return (len == 2) ? 8 : len + 1; +} + +static int hw_breakpoint_insert(CPUX86State *env, int index) +{ + CPUState *cs = env_cpu(env); + target_ulong dr7 = env->dr[7]; + target_ulong drN = env->dr[index]; + int err = 0; + + switch (hw_breakpoint_type(dr7, index)) { + case DR7_TYPE_BP_INST: + if (hw_breakpoint_enabled(dr7, index)) { + err = cpu_breakpoint_insert(cs, drN, BP_CPU, + &env->cpu_breakpoint[index]); + } + break; + + case DR7_TYPE_IO_RW: + /* Notice when we should enable calls to bpt_io. */ + return hw_breakpoint_enabled(env->dr[7], index) + ? HF_IOBPT_MASK : 0; + + case DR7_TYPE_DATA_WR: + if (hw_breakpoint_enabled(dr7, index)) { + err = cpu_watchpoint_insert(cs, drN, + hw_breakpoint_len(dr7, index), + BP_CPU | BP_MEM_WRITE, + &env->cpu_watchpoint[index]); + } + break; + + case DR7_TYPE_DATA_RW: + if (hw_breakpoint_enabled(dr7, index)) { + err = cpu_watchpoint_insert(cs, drN, + hw_breakpoint_len(dr7, index), + BP_CPU | BP_MEM_ACCESS, + &env->cpu_watchpoint[index]); + } + break; + } + if (err) { + env->cpu_breakpoint[index] = NULL; + } + return 0; +} + +static void hw_breakpoint_remove(CPUX86State *env, int index) +{ + CPUState *cs = env_cpu(env); + + switch (hw_breakpoint_type(env->dr[7], index)) { + case DR7_TYPE_BP_INST: + if (env->cpu_breakpoint[index]) { + cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]); + env->cpu_breakpoint[index] = NULL; + } + break; + + case DR7_TYPE_DATA_WR: + case DR7_TYPE_DATA_RW: + if (env->cpu_breakpoint[index]) { + cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]); + env->cpu_breakpoint[index] = NULL; + } + break; + + case DR7_TYPE_IO_RW: + /* HF_IOBPT_MASK cleared elsewhere. */ + break; + } +} + +void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7) +{ + target_ulong old_dr7 = env->dr[7]; + int iobpt = 0; + int i; + + new_dr7 |= DR7_FIXED_1; + + /* If nothing is changing except the global/local enable bits, + then we can make the change more efficient. */ + if (((old_dr7 ^ new_dr7) & ~0xff) == 0) { + /* Fold the global and local enable bits together into the + global fields, then xor to show which registers have + changed collective enable state. */ + int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff; + + for (i = 0; i < DR7_MAX_BP; i++) { + if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) { + hw_breakpoint_remove(env, i); + } + } + env->dr[7] = new_dr7; + for (i = 0; i < DR7_MAX_BP; i++) { + if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) { + iobpt |= hw_breakpoint_insert(env, i); + } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW + && hw_breakpoint_enabled(new_dr7, i)) { + iobpt |= HF_IOBPT_MASK; + } + } + } else { + for (i = 0; i < DR7_MAX_BP; i++) { + hw_breakpoint_remove(env, i); + } + env->dr[7] = new_dr7; + for (i = 0; i < DR7_MAX_BP; i++) { + iobpt |= hw_breakpoint_insert(env, i); + } + } + + env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt; +} + +static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update) +{ + target_ulong dr6; + int reg; + bool hit_enabled = false; + + dr6 = env->dr[6] & ~0xf; + for (reg = 0; reg < DR7_MAX_BP; reg++) { + bool bp_match = false; + bool wp_match = false; + + switch (hw_breakpoint_type(env->dr[7], reg)) { + case DR7_TYPE_BP_INST: + if (env->dr[reg] == env->eip) { + bp_match = true; + } + break; + case DR7_TYPE_DATA_WR: + case DR7_TYPE_DATA_RW: + if (env->cpu_watchpoint[reg] && + env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) { + wp_match = true; + } + break; + case DR7_TYPE_IO_RW: + break; + } + if (bp_match || wp_match) { + dr6 |= 1ULL << reg; + if (hw_breakpoint_enabled(env->dr[7], reg)) { + hit_enabled = true; + } + } + } + + if (hit_enabled || force_dr6_update) { + env->dr[6] = dr6; + } + + return hit_enabled; +} + +void breakpoint_handler(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + CPUBreakpoint *bp; + + if (cs->watchpoint_hit) { + if (cs->watchpoint_hit->flags & BP_CPU) { + cs->watchpoint_hit = NULL; + if (check_hw_breakpoints(env, false)) { + raise_exception(env, EXCP01_DB); + } else { + cpu_loop_exit_noexc(cs); + } + } + } else { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == env->eip) { + if (bp->flags & BP_CPU) { + check_hw_breakpoints(env, true); + raise_exception(env, EXCP01_DB); + } + break; + } + } + } +} + +void helper_single_step(CPUX86State *env) +{ + check_hw_breakpoints(env, true); + env->dr[6] |= DR6_BS; + raise_exception(env, EXCP01_DB); +} + +void helper_rechecking_single_step(CPUX86State *env) +{ + if ((env->eflags & TF_MASK) != 0) { + helper_single_step(env); + } +} + +void helper_set_dr(CPUX86State *env, int reg, target_ulong t0) +{ + switch (reg) { + case 0: case 1: case 2: case 3: + if (hw_breakpoint_enabled(env->dr[7], reg) + && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) { + hw_breakpoint_remove(env, reg); + env->dr[reg] = t0; + hw_breakpoint_insert(env, reg); + } else { + env->dr[reg] = t0; + } + return; + case 4: + if (env->cr[4] & CR4_DE_MASK) { + break; + } + /* fallthru */ + case 6: + env->dr[6] = t0 | DR6_FIXED_1; + return; + case 5: + if (env->cr[4] & CR4_DE_MASK) { + break; + } + /* fallthru */ + case 7: + cpu_x86_update_dr7(env, t0); + return; + } + raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); +} + +target_ulong helper_get_dr(CPUX86State *env, int reg) +{ + switch (reg) { + case 0: case 1: case 2: case 3: case 6: case 7: + return env->dr[reg]; + case 4: + if (env->cr[4] & CR4_DE_MASK) { + break; + } else { + return env->dr[6]; + } + case 5: + if (env->cr[4] & CR4_DE_MASK) { + break; + } else { + return env->dr[7]; + } + } + raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); +} + +/* Check if Port I/O is trapped by a breakpoint. */ +void helper_bpt_io(CPUX86State *env, uint32_t port, + uint32_t size, target_ulong next_eip) +{ + target_ulong dr7 = env->dr[7]; + int i, hit = 0; + + for (i = 0; i < DR7_MAX_BP; ++i) { + if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW + && hw_breakpoint_enabled(dr7, i)) { + int bpt_len = hw_breakpoint_len(dr7, i); + if (port + size - 1 >= env->dr[i] + && port <= env->dr[i] + bpt_len - 1) { + hit |= 1 << i; + } + } + } + + if (hit) { + env->dr[6] = (env->dr[6] & ~0xf) | hit; + env->eip = next_eip; + raise_exception(env, EXCP01_DB); + } +} diff --git a/qemu/target-i386/cc_helper.c b/qemu/target/i386/cc_helper.c similarity index 97% rename from qemu/target-i386/cc_helper.c rename to qemu/target/i386/cc_helper.c index 29e3c425..159dda46 100644 --- a/qemu/target-i386/cc_helper.c +++ b/qemu/target/i386/cc_helper.c @@ -17,6 +17,7 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" @@ -104,6 +105,8 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, return src1; case CC_OP_CLR: return CC_Z | CC_P; + case CC_OP_POPCNT: + return src1 ? 0 : CC_Z; case CC_OP_MULB: return compute_all_mulb(dst, src1); @@ -231,6 +234,7 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, case CC_OP_LOGICL: case CC_OP_LOGICQ: case CC_OP_CLR: + case CC_OP_POPCNT: return 0; case CC_OP_EFLAGS: @@ -373,17 +377,7 @@ void helper_sti_vm(CPUX86State *env) { env->eflags |= VIF_MASK; if (env->eflags & VIP_MASK) { - raise_exception(env, EXCP0D_GPF); + raise_exception_ra(env, EXCP0D_GPF, GETPC()); } } #endif - -void helper_set_inhibit_irq(CPUX86State *env) -{ - env->hflags |= HF_INHIBIT_IRQ_MASK; -} - -void helper_reset_inhibit_irq(CPUX86State *env) -{ - env->hflags &= ~HF_INHIBIT_IRQ_MASK; -} diff --git a/qemu/target-i386/cc_helper_template.h b/qemu/target/i386/cc_helper_template.h similarity index 100% rename from qemu/target-i386/cc_helper_template.h rename to qemu/target/i386/cc_helper_template.h diff --git a/qemu/target/i386/cpu-param.h b/qemu/target/i386/cpu-param.h new file mode 100644 index 00000000..57abc64c --- /dev/null +++ b/qemu/target/i386/cpu-param.h @@ -0,0 +1,28 @@ +/* + * i386 cpu parameters for qemu. + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef I386_CPU_PARAM_H +#define I386_CPU_PARAM_H 1 + +#ifdef TARGET_X86_64 +# define TARGET_LONG_BITS 64 +# define TARGET_PHYS_ADDR_SPACE_BITS 52 +/* + * ??? This is really 48 bits, sign-extended, but the only thing + * accessible to userland with bit 48 set is the VSYSCALL, and that + * is handled via other mechanisms. + */ +# define TARGET_VIRT_ADDR_SPACE_BITS 47 +#else +# define TARGET_LONG_BITS 32 +# define TARGET_PHYS_ADDR_SPACE_BITS 36 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +#endif +#define TARGET_PAGE_BITS 12 +#define NB_MMU_MODES 3 + +#endif diff --git a/qemu/target/i386/cpu-qom.h b/qemu/target/i386/cpu-qom.h new file mode 100644 index 00000000..e8f48a31 --- /dev/null +++ b/qemu/target/i386/cpu-qom.h @@ -0,0 +1,62 @@ +/* + * QEMU x86 CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_I386_CPU_QOM_H +#define QEMU_I386_CPU_QOM_H + +#include "hw/core/cpu.h" + +typedef struct X86CPUModel X86CPUModel; + +/** + * X86CPUClass: + * @cpu_def: CPU model definition + * @host_cpuid_required: Whether CPU model requires cpuid from host. + * @ordering: Ordering on the "-cpu help" CPU model list. + * @migration_safe: See CpuDefinitionInfo::migration_safe + * @static_model: See CpuDefinitionInfo::static + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * An x86 CPU model or family. + */ +typedef struct X86CPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + /* CPU definition, automatically loaded by instance_init if not NULL. + * Should be eventually replaced by subclass-specific property defaults. + */ + X86CPUModel *model; + + bool host_cpuid_required; + int ordering; + bool static_model; + + /* Optional description of CPU model. + * If unavailable, cpu_def->model_id is used */ + const char *model_description; + + void (*parent_reset)(CPUState *cpu); +} X86CPUClass; + +typedef struct X86CPU X86CPU; + +#endif diff --git a/qemu/target/i386/cpu.c b/qemu/target/i386/cpu.c new file mode 100644 index 00000000..469b1b0a --- /dev/null +++ b/qemu/target/i386/cpu.c @@ -0,0 +1,4855 @@ +/* + * i386 CPUID helper functions + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qemu/bitops.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "sysemu/cpus.h" +#include "sysemu/sysemu.h" +#include "sysemu/tcg.h" +#include "hw/i386/topology.h" + +#include "uc_priv.h" + + +/* Helpers for building CPUID[2] descriptors: */ + +struct CPUID2CacheDescriptorInfo { + enum CacheType type; + int level; + int size; + int line_size; + int associativity; +}; + +/* + * Known CPUID 2 cache descriptors. + * From Intel SDM Volume 2A, CPUID instruction + */ +static struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { + [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, + .associativity = 4, .line_size = 32, }, + [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, + .associativity = 4, .line_size = 32, }, + [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, + .associativity = 4, .line_size = 64, }, + [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, + .associativity = 2, .line_size = 32, }, + [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, + .associativity = 4, .line_size = 32, }, + [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, + .associativity = 4, .line_size = 64, }, + [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, + .associativity = 6, .line_size = 64, }, + [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, + .associativity = 2, .line_size = 64, }, + [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, + .associativity = 8, .line_size = 64, }, + /* lines per sector is not supported cpuid2_cache_descriptor(), + * so descriptors 0x22, 0x23 are not included + */ + [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, + .associativity = 16, .line_size = 64, }, + /* lines per sector is not supported cpuid2_cache_descriptor(), + * so descriptors 0x25, 0x20 are not included + */ + [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, + .associativity = 8, .line_size = 64, }, + [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, + .associativity = 8, .line_size = 64, }, + [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, + .associativity = 4, .line_size = 32, }, + [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, + .associativity = 4, .line_size = 32, }, + [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, + .associativity = 4, .line_size = 32, }, + [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, + .associativity = 4, .line_size = 32, }, + [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, + .associativity = 4, .line_size = 32, }, + [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, + .associativity = 4, .line_size = 64, }, + [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, + .associativity = 8, .line_size = 64, }, + [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, + .associativity = 12, .line_size = 64, }, + /* Descriptor 0x49 depends on CPU family/model, so it is not included */ + [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, + .associativity = 12, .line_size = 64, }, + [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, + .associativity = 16, .line_size = 64, }, + [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, + .associativity = 12, .line_size = 64, }, + [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, + .associativity = 16, .line_size = 64, }, + [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, + .associativity = 24, .line_size = 64, }, + [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, + .associativity = 8, .line_size = 64, }, + [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, + .associativity = 4, .line_size = 64, }, + [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, + .associativity = 4, .line_size = 64, }, + [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, + .associativity = 4, .line_size = 64, }, + [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, + .associativity = 4, .line_size = 64, }, + /* lines per sector is not supported cpuid2_cache_descriptor(), + * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. + */ + [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, + .associativity = 8, .line_size = 64, }, + [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, + .associativity = 2, .line_size = 64, }, + [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, + .associativity = 8, .line_size = 64, }, + [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, + .associativity = 8, .line_size = 32, }, + [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, + .associativity = 8, .line_size = 32, }, + [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, + .associativity = 8, .line_size = 32, }, + [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, + .associativity = 8, .line_size = 32, }, + [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, + .associativity = 4, .line_size = 64, }, + [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, + .associativity = 8, .line_size = 64, }, + [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, + .associativity = 4, .line_size = 64, }, + [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, + .associativity = 4, .line_size = 64, }, + [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, + .associativity = 4, .line_size = 64, }, + [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, + .associativity = 8, .line_size = 64, }, + [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, + .associativity = 8, .line_size = 64, }, + [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, + .associativity = 8, .line_size = 64, }, + [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, + .associativity = 12, .line_size = 64, }, + [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, + .associativity = 12, .line_size = 64, }, + [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, + .associativity = 12, .line_size = 64, }, + [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, + .associativity = 16, .line_size = 64, }, + [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, + .associativity = 16, .line_size = 64, }, + [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, + .associativity = 16, .line_size = 64, }, + [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, + .associativity = 24, .line_size = 64, }, + [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, + .associativity = 24, .line_size = 64, }, + [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, + .associativity = 24, .line_size = 64, }, +}; + +/* + * "CPUID leaf 2 does not report cache descriptor information, + * use CPUID leaf 4 to query cache parameters" + */ +#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF + +/* + * Return a CPUID 2 cache descriptor for a given cache. + * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE + */ +static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) +{ + int i; + + assert(cache->size > 0); + assert(cache->level > 0); + assert(cache->line_size > 0); + assert(cache->associativity > 0); + for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { + struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; + if (d->level == cache->level && d->type == cache->type && + d->size == cache->size && d->line_size == cache->line_size && + d->associativity == cache->associativity) { + return i; + } + } + + return CACHE_DESCRIPTOR_UNAVAILABLE; +} + +/* CPUID Leaf 4 constants: */ + +/* EAX: */ +#define CACHE_TYPE_D 1 +#define CACHE_TYPE_I 2 +#define CACHE_TYPE_UNIFIED 3 + +#define CACHE_LEVEL(l) (l << 5) + +#define CACHE_SELF_INIT_LEVEL (1 << 8) + +/* EDX: */ +#define CACHE_NO_INVD_SHARING (1 << 0) +#define CACHE_INCLUSIVE (1 << 1) +#define CACHE_COMPLEX_IDX (1 << 2) + +/* Encode CacheType for CPUID[4].EAX */ +#define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ + ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ + ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ + 0 /* Invalid value */) + + +/* Encode cache info for CPUID[4] */ +static void encode_cache_cpuid4(CPUCacheInfo *cache, + int num_apic_ids, int num_cores, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + assert(cache->size == cache->line_size * cache->associativity * + cache->partitions * cache->sets); + + assert(num_apic_ids > 0); + *eax = CACHE_TYPE(cache->type) | + CACHE_LEVEL(cache->level) | + (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | + ((num_cores - 1) << 26) | + ((num_apic_ids - 1) << 14); + + assert(cache->line_size > 0); + assert(cache->partitions > 0); + assert(cache->associativity > 0); + /* We don't implement fully-associative caches */ + assert(cache->associativity < cache->sets); + *ebx = (cache->line_size - 1) | + ((cache->partitions - 1) << 12) | + ((cache->associativity - 1) << 22); + + assert(cache->sets > 0); + *ecx = cache->sets - 1; + + *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | + (cache->inclusive ? CACHE_INCLUSIVE : 0) | + (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); +} + +/* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ +static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) +{ + assert(cache->size % 1024 == 0); + assert(cache->lines_per_tag > 0); + assert(cache->associativity > 0); + assert(cache->line_size > 0); + return ((cache->size / 1024) << 24) | (cache->associativity << 16) | + (cache->lines_per_tag << 8) | (cache->line_size); +} + +#define ASSOC_FULL 0xFF + +/* AMD associativity encoding used on CPUID Leaf 0x80000006: */ +#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ + a == 2 ? 0x2 : \ + a == 4 ? 0x4 : \ + a == 8 ? 0x6 : \ + a == 16 ? 0x8 : \ + a == 32 ? 0xA : \ + a == 48 ? 0xB : \ + a == 64 ? 0xC : \ + a == 96 ? 0xD : \ + a == 128 ? 0xE : \ + a == ASSOC_FULL ? 0xF : \ + 0 /* invalid value */) + +/* + * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX + * @l3 can be NULL. + */ +static void encode_cache_cpuid80000006(CPUCacheInfo *l2, + CPUCacheInfo *l3, + uint32_t *ecx, uint32_t *edx) +{ + assert(l2->size % 1024 == 0); + assert(l2->associativity > 0); + assert(l2->lines_per_tag > 0); + assert(l2->line_size > 0); + *ecx = ((l2->size / 1024) << 16) | + (AMD_ENC_ASSOC(l2->associativity) << 12) | + (l2->lines_per_tag << 8) | (l2->line_size); + + if (l3) { + assert(l3->size % (512 * 1024) == 0); + assert(l3->associativity > 0); + assert(l3->lines_per_tag > 0); + assert(l3->line_size > 0); + *edx = ((l3->size / (512 * 1024)) << 18) | + (AMD_ENC_ASSOC(l3->associativity) << 12) | + (l3->lines_per_tag << 8) | (l3->line_size); + } else { + *edx = 0; + } +} + +/* Encode cache info for CPUID[8000001D] */ +static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, + X86CPUTopoInfo *topo_info, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + uint32_t l3_cores; + unsigned nodes = MAX(topo_info->nodes_per_pkg, 1); + + assert(cache->size == cache->line_size * cache->associativity * + cache->partitions * cache->sets); + + *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | + (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); + + /* L3 is shared among multiple cores */ + if (cache->level == 3) { + l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg * + topo_info->cores_per_die * + topo_info->threads_per_core), + nodes); + *eax |= (l3_cores - 1) << 14; + } else { + *eax |= ((topo_info->threads_per_core - 1) << 14); + } + + assert(cache->line_size > 0); + assert(cache->partitions > 0); + assert(cache->associativity > 0); + /* We don't implement fully-associative caches */ + assert(cache->associativity < cache->sets); + *ebx = (cache->line_size - 1) | + ((cache->partitions - 1) << 12) | + ((cache->associativity - 1) << 22); + + assert(cache->sets > 0); + *ecx = cache->sets - 1; + + *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | + (cache->inclusive ? CACHE_INCLUSIVE : 0) | + (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); +} + +/* Encode cache info for CPUID[8000001E] */ +static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + X86CPUTopoIDs topo_ids = {0}; + unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1); + int shift; + + x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids); + + *eax = cpu->apic_id; + /* + * CPUID_Fn8000001E_EBX + * 31:16 Reserved + * 15:8 Threads per core (The number of threads per core is + * Threads per core + 1) + * 7:0 Core id (see bit decoding below) + * SMT: + * 4:3 node id + * 2 Core complex id + * 1:0 Core id + * Non SMT: + * 5:4 node id + * 3 Core complex id + * 1:0 Core id + */ + *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) | + (topo_ids.core_id); + /* + * CPUID_Fn8000001E_ECX + * 31:11 Reserved + * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) + * 7:0 Node id (see bit decoding below) + * 2 Socket id + * 1:0 Node id + */ + if (nodes <= 4) { + *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id; + } else { + /* + * Node id fix up. Actual hardware supports up to 4 nodes. But with + * more than 32 cores, we may end up with more than 4 nodes. + * Node id is a combination of socket id and node id. Only requirement + * here is that this number should be unique accross the system. + * Shift the socket id to accommodate more nodes. We dont expect both + * socket id and node id to be big number at the same time. This is not + * an ideal config but we need to to support it. Max nodes we can have + * is 32 (255/8) with 8 cores per node and 255 max cores. We only need + * 5 bits for nodes. Find the left most set bit to represent the total + * number of nodes. find_last_bit returns last set bit(0 based). Left + * shift(+1) the socket id to represent all the nodes. + */ + nodes -= 1; + shift = find_last_bit(&nodes, 8); + *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) | + topo_ids.node_id; + } + *edx = 0; +} + +/* + * Definitions of the hardcoded cache entries we expose: + * These are legacy cache values. If there is a need to change any + * of these values please use builtin_x86_defs + */ + +/* L1 data cache: */ +static CPUCacheInfo legacy_l1d_cache = { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 8, + .sets = 64, + .partitions = 1, + .no_invd_sharing = true, +}; + +/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ +static CPUCacheInfo legacy_l1d_cache_amd = { + .type = DATA_CACHE, + .level = 1, + .size = 64 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 2, + .sets = 512, + .partitions = 1, + .lines_per_tag = 1, + .no_invd_sharing = true, +}; + +/* L1 instruction cache: */ +static CPUCacheInfo legacy_l1i_cache = { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 8, + .sets = 64, + .partitions = 1, + .no_invd_sharing = true, +}; + +/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ +static CPUCacheInfo legacy_l1i_cache_amd = { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 64 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 2, + .sets = 512, + .partitions = 1, + .lines_per_tag = 1, + .no_invd_sharing = true, +}; + +/* Level 2 unified cache: */ +static CPUCacheInfo legacy_l2_cache = { + .type = UNIFIED_CACHE, + .level = 2, + .size = 4 * MiB, + .self_init = 1, + .line_size = 64, + .associativity = 16, + .sets = 4096, + .partitions = 1, + .no_invd_sharing = true, +}; + +/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ +static CPUCacheInfo legacy_l2_cache_cpuid2 = { + .type = UNIFIED_CACHE, + .level = 2, + .size = 2 * MiB, + .line_size = 64, + .associativity = 8, +}; + + +/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ +static CPUCacheInfo legacy_l2_cache_amd = { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .lines_per_tag = 1, + .associativity = 16, + .sets = 512, + .partitions = 1, +}; + +/* Level 3 unified cache: */ +static CPUCacheInfo legacy_l3_cache = { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .sets = 16384, + .partitions = 1, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = true, +}; + +/* TLB definitions: */ + +#define L1_DTLB_2M_ASSOC 1 +#define L1_DTLB_2M_ENTRIES 255 +#define L1_DTLB_4K_ASSOC 1 +#define L1_DTLB_4K_ENTRIES 255 + +#define L1_ITLB_2M_ASSOC 1 +#define L1_ITLB_2M_ENTRIES 255 +#define L1_ITLB_4K_ASSOC 1 +#define L1_ITLB_4K_ENTRIES 255 + +#define L2_DTLB_2M_ASSOC 0 /* disabled */ +#define L2_DTLB_2M_ENTRIES 0 /* disabled */ +#define L2_DTLB_4K_ASSOC 4 +#define L2_DTLB_4K_ENTRIES 512 + +#define L2_ITLB_2M_ASSOC 0 /* disabled */ +#define L2_ITLB_2M_ENTRIES 0 /* disabled */ +#define L2_ITLB_4K_ASSOC 4 +#define L2_ITLB_4K_ENTRIES 512 + +/* CPUID Leaf 0x14 constants: */ +#define INTEL_PT_MAX_SUBLEAF 0x1 +/* + * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH + * MSR can be accessed; + * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; + * bit[02]: Support IP Filtering, TraceStop filtering, and preservation + * of Intel PT MSRs across warm reset; + * bit[03]: Support MTC timing packet and suppression of COFI-based packets; + */ +#define INTEL_PT_MINIMAL_EBX 0xf +/* + * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and + * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be + * accessed; + * bit[01]: ToPA tables can hold any number of output entries, up to the + * maximum allowed by the MaskOrTableOffset field of + * IA32_RTIT_OUTPUT_MASK_PTRS; + * bit[02]: Support Single-Range Output scheme; + */ +#define INTEL_PT_MINIMAL_ECX 0x7 +/* generated packets which contain IP payloads have LIP values */ +#define INTEL_PT_IP_LIP (1 << 31) +#define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ +#define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 +#define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ +#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ +#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ + +#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) +#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ + CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) +#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ + CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ + CPUID_PSE36 | CPUID_FXSR) +#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) +#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ + CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ + CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ + CPUID_PAE | CPUID_SEP | CPUID_APIC) + +#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ + CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ + CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ + CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ + CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) + /* partly implemented: + CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ + /* missing: + CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ +#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ + CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ + CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ + CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ + CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ + CPUID_EXT_RDRAND) + /* missing: + CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, + CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, + CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, + CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, + CPUID_EXT_F16C */ + +#ifdef TARGET_X86_64 +#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) +#else +#define TCG_EXT2_X86_64_FEATURES 0 +#endif + +#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ + CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ + CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ + TCG_EXT2_X86_64_FEATURES) +#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ + CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) +#define TCG_EXT4_FEATURES 0 +#define TCG_SVM_FEATURES CPUID_SVM_NPT +#define TCG_KVM_FEATURES 0 +#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ + CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ + CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ + CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ + CPUID_7_0_EBX_ERMS) + /* missing: + CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, + CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, + CPUID_7_0_EBX_RDSEED */ +#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ + /* CPUID_7_0_ECX_OSPKE is dynamic */ \ + CPUID_7_0_ECX_LA57) +#define TCG_7_0_EDX_FEATURES 0 +#define TCG_7_1_EAX_FEATURES 0 +#define TCG_APM_FEATURES 0 +#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT +#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) + /* missing: + CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ + +typedef enum FeatureWordType { + CPUID_FEATURE_WORD, + MSR_FEATURE_WORD, +} FeatureWordType; + +typedef struct FeatureWordInfo { + FeatureWordType type; + /* feature flags names are taken from "Intel Processor Identification and + * the CPUID Instruction" and AMD's "CPUID Specification". + * In cases of disagreement between feature naming conventions, + * aliases may be added. + */ + const char *feat_names[64]; + union { + /* If type==CPUID_FEATURE_WORD */ + struct { + uint32_t eax; /* Input EAX for CPUID */ + bool needs_ecx; /* CPUID instruction uses ECX as input */ + uint32_t ecx; /* Input ECX value for CPUID */ + int reg; /* output register (R_* constant) */ + } cpuid; + /* If type==MSR_FEATURE_WORD */ + struct { + uint32_t index; + } msr; + }; + uint64_t tcg_features; /* Feature flags supported by TCG */ + /* Features that shouldn't be auto-enabled by "-cpu host" */ + uint64_t no_autoenable_flags; +} FeatureWordInfo; + +static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + [FEAT_1_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "fpu", "vme", "de", "pse", + "tsc", "msr", "pae", "mce", + "cx8", "apic", NULL, "sep", + "mtrr", "pge", "mca", "cmov", + "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, + NULL, "ds" /* Intel dts */, "acpi", "mmx", + "fxsr", "sse", "sse2", "ss", + "ht" /* Intel htt */, "tm", "ia64", "pbe", + }, + .cpuid = {.eax = 1, .reg = R_EDX, }, + .tcg_features = TCG_FEATURES, + }, + [FEAT_1_ECX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", + "ds-cpl", "vmx", "smx", "est", + "tm2", "ssse3", "cid", NULL, + "fma", "cx16", "xtpr", "pdcm", + NULL, "pcid", "dca", "sse4.1", + "sse4.2", "x2apic", "movbe", "popcnt", + "tsc-deadline", "aes", "xsave", NULL /* osxsave */, + "avx", "f16c", "rdrand", "hypervisor", + }, + .cpuid = { .eax = 1, .reg = R_ECX, }, + .tcg_features = TCG_EXT_FEATURES, + }, + /* Feature names that are already defined on feature_name[] but + * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their + * names on feat_names below. They are copied automatically + * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. + */ + [FEAT_8000_0001_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, + NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, + NULL /* cx8 */, NULL /* apic */, NULL, "syscall", + NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, + NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, + "nx", NULL, "mmxext", NULL /* mmx */, + NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", + NULL, "lm", "3dnowext", "3dnow", + }, + .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, + .tcg_features = TCG_EXT2_FEATURES, + }, + [FEAT_8000_0001_ECX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "lahf-lm", "cmp-legacy", "svm", "extapic", + "cr8legacy", "abm", "sse4a", "misalignsse", + "3dnowprefetch", "osvw", "ibs", "xop", + "skinit", "wdt", NULL, "lwp", + "fma4", "tce", NULL, "nodeid-msr", + NULL, "tbm", "topoext", "perfctr-core", + "perfctr-nb", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, + .tcg_features = TCG_EXT3_FEATURES, + /* + * TOPOEXT is always allowed but can't be enabled blindly by + * "-cpu host", as it requires consistent cache topology info + * to be provided so it doesn't confuse guests. + */ + .no_autoenable_flags = CPUID_EXT3_TOPOEXT, + }, + [FEAT_C000_0001_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, "xstore", "xstore-en", + NULL, NULL, "xcrypt", "xcrypt-en", + "ace2", "ace2-en", "phe", "phe-en", + "pmm", "pmm-en", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, + .tcg_features = TCG_EXT4_FEATURES, + }, + [FEAT_HV_RECOMM_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL /* hv_recommend_pv_as_switch */, + NULL /* hv_recommend_pv_tlbflush_local */, + NULL /* hv_recommend_pv_tlbflush_remote */, + NULL /* hv_recommend_msr_apic_access */, + NULL /* hv_recommend_msr_reset */, + NULL /* hv_recommend_relaxed_timing */, + NULL /* hv_recommend_dma_remapping */, + NULL /* hv_recommend_int_remapping */, + NULL /* hv_recommend_x2apic_msrs */, + NULL /* hv_recommend_autoeoi_deprecation */, + NULL /* hv_recommend_pv_ipi */, + NULL /* hv_recommend_ex_hypercalls */, + NULL /* hv_hypervisor_is_nested */, + NULL /* hv_recommend_int_mbec */, + NULL /* hv_recommend_evmcs */, + NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, + }, + [FEAT_HV_NESTED_EAX] = { + .type = CPUID_FEATURE_WORD, + .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, + }, + [FEAT_SVM] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "npt", "lbrv", "svm-lock", "nrip-save", + "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", + NULL, NULL, "pause-filter", NULL, + "pfthreshold", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, + .tcg_features = TCG_SVM_FEATURES, + }, + [FEAT_7_0_EBX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "fsgsbase", "tsc-adjust", NULL, "bmi1", + "hle", "avx2", NULL, "smep", + "bmi2", "erms", "invpcid", "rtm", + NULL, NULL, "mpx", NULL, + "avx512f", "avx512dq", "rdseed", "adx", + "smap", "avx512ifma", "pcommit", "clflushopt", + "clwb", "intel-pt", "avx512pf", "avx512er", + "avx512cd", "sha-ni", "avx512bw", "avx512vl", + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 0, + .reg = R_EBX, + }, + .tcg_features = TCG_7_0_EBX_FEATURES, + }, + [FEAT_7_0_ECX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, "avx512vbmi", "umip", "pku", + NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, + "gfni", "vaes", "vpclmulqdq", "avx512vnni", + "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, + "la57", NULL, NULL, NULL, + NULL, NULL, "rdpid", NULL, + NULL, "cldemote", NULL, "movdiri", + "movdir64b", NULL, NULL, NULL, + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 0, + .reg = R_ECX, + }, + .tcg_features = TCG_7_0_ECX_FEATURES, + }, + [FEAT_7_0_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", + NULL, NULL, NULL, NULL, + NULL, NULL, "md-clear", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL /* pconfig */, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, "spec-ctrl", "stibp", + NULL, "arch-capabilities", "core-capability", "ssbd", + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 0, + .reg = R_EDX, + }, + .tcg_features = TCG_7_0_EDX_FEATURES, + }, + [FEAT_7_1_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, "avx512-bf16", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 1, + .reg = R_EAX, + }, + .tcg_features = TCG_7_1_EAX_FEATURES, + }, + [FEAT_8000_0007_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + "invtsc", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, + .tcg_features = TCG_APM_FEATURES, + }, + [FEAT_8000_0008_EBX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "clzero", NULL, "xsaveerptr", NULL, + NULL, NULL, NULL, NULL, + NULL, "wbnoinvd", NULL, NULL, + "ibpb", NULL, NULL, "amd-stibp", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, + .tcg_features = 0, + }, + [FEAT_XSAVE] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "xsaveopt", "xsavec", "xgetbv1", "xsaves", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0xd, + .needs_ecx = true, .ecx = 1, + .reg = R_EAX, + }, + .tcg_features = TCG_XSAVE_FEATURES, + }, + [FEAT_6_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, "arat", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 6, .reg = R_EAX, }, + .tcg_features = TCG_6_EAX_FEATURES, + }, + [FEAT_XSAVE_COMP_LO] = { + .type = CPUID_FEATURE_WORD, + .cpuid = { + .eax = 0xD, + .needs_ecx = true, .ecx = 0, + .reg = R_EAX, + }, + .tcg_features = ~0U, + }, + [FEAT_XSAVE_COMP_HI] = { + .type = CPUID_FEATURE_WORD, + .cpuid = { + .eax = 0xD, + .needs_ecx = true, .ecx = 0, + .reg = R_EDX, + }, + .tcg_features = ~0U, + }, + /*Below are MSR exposed features*/ + [FEAT_ARCH_CAPABILITIES] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", + "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", + "taa-no", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .msr = { + .index = MSR_IA32_ARCH_CAPABILITIES, + }, + }, + [FEAT_CORE_CAPABILITY] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, "split-lock-detect", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .msr = { + .index = MSR_IA32_CORE_CAPABILITY, + }, + }, + + [FEAT_VMX_PROCBASED_CTLS] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", + NULL, NULL, NULL, "vmx-hlt-exit", + NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", + "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", + "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", + "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", + "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", + "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", + }, + .msr = { + .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, + } + }, + + [FEAT_VMX_SECONDARY_CTLS] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", + "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", + "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", + "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", + "vmx-rdseed-exit", "vmx-pml", NULL, NULL, + "vmx-xsaves", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .msr = { + .index = MSR_IA32_VMX_PROCBASED_CTLS2, + } + }, + + [FEAT_VMX_PINBASED_CTLS] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", + NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .msr = { + .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, + } + }, + + [FEAT_VMX_EXIT_CTLS] = { + .type = MSR_FEATURE_WORD, + /* + * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from + * the LM CPUID bit. + */ + .feat_names = { + NULL, NULL, "vmx-exit-nosave-debugctl", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, + "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", + NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", + "vmx-exit-save-efer", "vmx-exit-load-efer", + "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", + NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .msr = { + .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, + } + }, + + [FEAT_VMX_ENTRY_CTLS] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + NULL, NULL, "vmx-entry-noload-debugctl", NULL, + NULL, NULL, NULL, NULL, + NULL, "vmx-entry-ia32e-mode", NULL, NULL, + NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", + "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .msr = { + .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, + } + }, + + [FEAT_VMX_MISC] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", + "vmx-activity-wait-sipi", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, + }, + .msr = { + .index = MSR_IA32_VMX_MISC, + } + }, + + [FEAT_VMX_EPT_VPID_CAPS] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + "vmx-ept-execonly", NULL, NULL, NULL, + NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, + "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, + NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, + NULL, NULL, NULL, NULL, + "vmx-invvpid", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + "vmx-invvpid-single-addr", "vmx-invept-single-context", + "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .msr = { + .index = MSR_IA32_VMX_EPT_VPID_CAP, + } + }, + + [FEAT_VMX_BASIC] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + [54] = "vmx-ins-outs", + [55] = "vmx-true-ctls", + }, + .msr = { + .index = MSR_IA32_VMX_BASIC, + }, + /* Just to be safe - we don't support setting the MSEG version field. */ + .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, + }, + + [FEAT_VMX_VMFUNC] = { + .type = MSR_FEATURE_WORD, + .feat_names = { + [0] = "vmx-eptp-switching", + }, + .msr = { + .index = MSR_IA32_VMX_VMFUNC, + } + }, + +}; + +typedef enum X86CPURegister32 { + X86_CPU_REGISTER32_EAX = 0, + X86_CPU_REGISTER32_EBX = 1, + X86_CPU_REGISTER32_ECX = 2, + X86_CPU_REGISTER32_EDX = 3, + X86_CPU_REGISTER32_ESP = 4, + X86_CPU_REGISTER32_EBP = 5, + X86_CPU_REGISTER32_ESI = 6, + X86_CPU_REGISTER32_EDI = 7, + X86_CPU_REGISTER32_MAX = 8, +} X86CPURegister32; + + +typedef struct X86RegisterInfo32 { + /* Name of register */ + const char *name; + /* QAPI enum value register */ + X86CPURegister32 qapi_enum; +} X86RegisterInfo32; + +#define REGISTER(reg) \ + [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } +static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { + REGISTER(EAX), + REGISTER(ECX), + REGISTER(EDX), + REGISTER(EBX), + REGISTER(ESP), + REGISTER(EBP), + REGISTER(ESI), + REGISTER(EDI), +}; +#undef REGISTER + +typedef struct ExtSaveArea { + uint32_t feature, bits; + uint32_t offset, size; +} ExtSaveArea; + +static const ExtSaveArea x86_ext_save_areas[] = { + [XSTATE_FP_BIT] = { + /* x87 FP state component is always enabled if XSAVE is supported */ + .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, + /* x87 state is in the legacy region of the XSAVE area */ + .offset = 0, + .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), + }, + [XSTATE_SSE_BIT] = { + /* SSE state component is always enabled if XSAVE is supported */ + .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, + /* SSE state is in the legacy region of the XSAVE area */ + .offset = 0, + .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), + }, + [XSTATE_YMM_BIT] = + { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, + .offset = offsetof(X86XSaveArea, avx_state), + .size = sizeof(XSaveAVX) }, + [XSTATE_BNDREGS_BIT] = + { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, + .offset = offsetof(X86XSaveArea, bndreg_state), + .size = sizeof(XSaveBNDREG) }, + [XSTATE_BNDCSR_BIT] = + { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, + .offset = offsetof(X86XSaveArea, bndcsr_state), + .size = sizeof(XSaveBNDCSR) }, + [XSTATE_OPMASK_BIT] = + { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, + .offset = offsetof(X86XSaveArea, opmask_state), + .size = sizeof(XSaveOpmask) }, + [XSTATE_ZMM_Hi256_BIT] = + { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, + .offset = offsetof(X86XSaveArea, zmm_hi256_state), + .size = sizeof(XSaveZMM_Hi256) }, + [XSTATE_Hi16_ZMM_BIT] = + { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, + .offset = offsetof(X86XSaveArea, hi16_zmm_state), + .size = sizeof(XSaveHi16_ZMM) }, + [XSTATE_PKRU_BIT] = + { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, + .offset = offsetof(X86XSaveArea, pkru_state), + .size = sizeof(XSavePKRU) }, +}; + +static uint32_t xsave_area_size(uint64_t mask) +{ + int i; + uint64_t ret = 0; + + for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { + const ExtSaveArea *esa = &x86_ext_save_areas[i]; + if ((mask >> i) & 1) { + ret = MAX(ret, esa->offset + esa->size); + } + } + return ret; +} + +static inline bool accel_uses_host_cpuid(void) +{ + return false; +} + +static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) +{ + return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | + cpu->env.features[FEAT_XSAVE_COMP_LO]; +} + +const char *get_register_name_32(unsigned int reg) +{ + if (reg >= CPU_NB_REGS32) { + return NULL; + } + return x86_reg_info_32[reg].name; +} + +void host_cpuid(uint32_t function, uint32_t count, + uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) +{ + uint32_t vec[4]; + +#ifdef _MSC_VER + __cpuidex((int*)vec, function, count); +#else +#ifdef __x86_64__ + asm volatile("cpuid" + : "=a"(vec[0]), "=b"(vec[1]), + "=c"(vec[2]), "=d"(vec[3]) + : "0"(function), "c"(count) : "cc"); +#elif defined(__i386__) + asm volatile("pusha \n\t" + "cpuid \n\t" + "mov %%eax, 0(%2) \n\t" + "mov %%ebx, 4(%2) \n\t" + "mov %%ecx, 8(%2) \n\t" + "mov %%edx, 12(%2) \n\t" + "popa" + : : "a"(function), "c"(count), "S"(vec) + : "memory", "cc"); +#else + abort(); +#endif +#endif // _MSC_VER + + if (eax) + *eax = vec[0]; + if (ebx) + *ebx = vec[1]; + if (ecx) + *ecx = vec[2]; + if (edx) + *edx = vec[3]; +} + +void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) +{ + uint32_t eax, ebx, ecx, edx; + + host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); + + host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); + if (family) { + *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); + } + if (model) { + *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); + } + if (stepping) { + *stepping = eax & 0x0F; + } +} + +typedef struct PropValue { + const char *prop, *value; +} PropValue; + +typedef struct X86CPUVersionDefinition { + X86CPUVersion version; + const char *alias; + const char *note; + PropValue *props; +} X86CPUVersionDefinition; + +/* Base definition for a CPU model */ +typedef struct X86CPUDefinition { + const char *name; + uint32_t level; + uint32_t xlevel; + /* vendor is zero-terminated, 12 character ASCII string */ + char vendor[CPUID_VENDOR_SZ + 1]; + int family; + int model; + int stepping; + FeatureWordArray features; + const char *model_id; + CPUCaches *cache_info; + + /* Use AMD EPYC encoding for apic id */ + bool use_epyc_apic_id_encoding; + + /* + * Definitions for alternative versions of CPU model. + * List is terminated by item with version == 0. + * If NULL, version 1 will be registered automatically. + */ + const X86CPUVersionDefinition *versions; +} X86CPUDefinition; + +/* Reference to a specific CPU model version */ +struct X86CPUModel { + /* Base CPU definition */ + X86CPUDefinition *cpudef; + /* CPU model version */ + X86CPUVersion version; + const char *note; + /* + * If true, this is an alias CPU model. + * This matters only for "-cpu help" and query-cpu-definitions + */ + bool is_alias; +}; + +static CPUCaches epyc_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 64 * KiB, + .line_size = 64, + .associativity = 4, + .partitions = 1, + .sets = 256, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 8 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 8192, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = true, + }, +}; + +static CPUCaches epyc_rome_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 16384, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = true, + }, +}; + +/* The following VMX features are not supported by KVM and are left out in the + * CPU definitions: + * + * Dual-monitor support (all processors) + * Entry to SMM + * Deactivate dual-monitor treatment + * Number of CR3-target values + * Shutdown activity state + * Wait-for-SIPI activity state + * PAUSE-loop exiting (Westmere and newer) + * EPT-violation #VE (Broadwell and newer) + * Inject event with insn length=0 (Skylake and newer) + * Conceal non-root operation from PT + * Conceal VM exits from PT + * Conceal VM entries from PT + * Enable ENCLS exiting + * Mode-based execute control (XS/XU) + s TSC scaling (Skylake Server and newer) + * GPA translation for PT (IceLake and newer) + * User wait and pause + * ENCLV exiting + * Load IA32_RTIT_CTL + * Clear IA32_RTIT_CTL + * Advanced VM-exit information for EPT violations + * Sub-page write permissions + * PT in VMX operation + */ + +static X86CPUDefinition builtin_x86_defs[] = { + { + .name = "qemu64", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 6, + .model = 6, + .stepping = 3, + .features[FEAT_1_EDX] = + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_CX16, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, + .xlevel = 0x8000000A, + .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, + }, + { + .name = "phenom", + .level = 5, + .vendor = CPUID_VENDOR_AMD, + .family = 16, + .model = 2, + .stepping = 3, + /* Missing: CPUID_HT */ + .features[FEAT_1_EDX] = + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36 | CPUID_VME, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | + CPUID_EXT_POPCNT, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | + CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | + CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, + /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, + CPUID_EXT3_CR8LEG, + CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, + CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | + CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, + /* Missing: CPUID_SVM_LBRV */ + .features[FEAT_SVM] = + CPUID_SVM_NPT, + .xlevel = 0x8000001A, + .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" + }, + { + .name = "core2duo", + .level = 10, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 15, + .stepping = 11, + /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ + .features[FEAT_1_EDX] = + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, + /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, + * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | + CPUID_EXT_CX16, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, + .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, + .xlevel = 0x80000008, + .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", + }, + { + .name = "kvm64", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 15, + .model = 6, + .stepping = 1, + /* Missing: CPUID_HT */ + .features[FEAT_1_EDX] = + PPRO_FEATURES | CPUID_VME | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36, + /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_CX16, + /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, + CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, + CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, + CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ + .features[FEAT_8000_0001_ECX] = + 0, + /* VMX features from Cedar Mill/Prescott */ + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, + .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, + .xlevel = 0x80000008, + .model_id = "Common KVM processor" + }, + { + .name = "qemu32", + .level = 4, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 6, + .stepping = 3, + .features[FEAT_1_EDX] = + PPRO_FEATURES, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3, + .xlevel = 0x80000004, + .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, + }, + { + .name = "kvm32", + .level = 5, + .vendor = CPUID_VENDOR_INTEL, + .family = 15, + .model = 6, + .stepping = 1, + .features[FEAT_1_EDX] = + PPRO_FEATURES | CPUID_VME | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_ECX] = + 0, + /* VMX features from Yonah */ + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, + .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, + .xlevel = 0x80000008, + .model_id = "Common 32-bit KVM processor" + }, + { + .name = "coreduo", + .level = 10, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 14, + .stepping = 8, + /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ + .features[FEAT_1_EDX] = + PPRO_FEATURES | CPUID_VME | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | + CPUID_SS, + /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, + * CPUID_EXT_PDCM, CPUID_EXT_VMX */ + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_NX, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, + .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, + .xlevel = 0x80000008, + .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", + }, + { + .name = "486", + .level = 1, + .vendor = CPUID_VENDOR_INTEL, + .family = 4, + .model = 8, + .stepping = 0, + .features[FEAT_1_EDX] = + I486_FEATURES, + .xlevel = 0, + .model_id = "", + }, + { + .name = "pentium", + .level = 1, + .vendor = CPUID_VENDOR_INTEL, + .family = 5, + .model = 4, + .stepping = 3, + .features[FEAT_1_EDX] = + PENTIUM_FEATURES, + .xlevel = 0, + .model_id = "", + }, + { + .name = "pentium2", + .level = 2, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 5, + .stepping = 2, + .features[FEAT_1_EDX] = + PENTIUM2_FEATURES, + .xlevel = 0, + .model_id = "", + }, + { + .name = "pentium3", + .level = 3, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 7, + .stepping = 3, + .features[FEAT_1_EDX] = + PENTIUM3_FEATURES, + .xlevel = 0, + .model_id = "", + }, + { + .name = "athlon", + .level = 2, + .vendor = CPUID_VENDOR_AMD, + .family = 6, + .model = 2, + .stepping = 3, + .features[FEAT_1_EDX] = + PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | + CPUID_MCA, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, + .xlevel = 0x80000008, + .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, + }, + { + .name = "n270", + .level = 10, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 28, + .stepping = 2, + /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ + .features[FEAT_1_EDX] = + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | + CPUID_ACPI | CPUID_SS, + /* Some CPUs got no CPUID_SEP */ + /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, + * CPUID_EXT_XTPR */ + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | + CPUID_EXT_MOVBE, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_NX, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .xlevel = 0x80000008, + .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", + }, + { + .name = "Conroe", + .level = 10, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 15, + .stepping = 3, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, + .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, + .xlevel = 0x80000008, + .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", + }, + { + .name = "Penryn", + .level = 10, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 23, + .stepping = 3, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, + .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING, + .xlevel = 0x80000008, + .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", + }, + { + .name = "Nehalem", + .level = 11, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 26, + .stepping = 3, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID, + .xlevel = 0x80000008, + .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Nehalem-IBRS", + .props = (PropValue[]) { + { "spec-ctrl", "on" }, + { "model-id", + "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "Westmere", + .level = 11, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 44, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, + .xlevel = 0x80000008, + .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Westmere-IBRS", + .props = (PropValue[]) { + { "spec-ctrl", "on" }, + { "model-id", + "Westmere E56xx/L56xx/X56xx (IBRS update)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "SandyBridge", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 42, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | + CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, + .xlevel = 0x80000008, + .model_id = "Intel Xeon E312xx (Sandy Bridge)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "SandyBridge-IBRS", + .props = (PropValue[]) { + { "spec-ctrl", "on" }, + { "model-id", + "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "IvyBridge", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 58, + .stepping = 9, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | + CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_ERMS, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING, + .xlevel = 0x80000008, + .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "IvyBridge-IBRS", + .props = (PropValue[]) { + { "spec-ctrl", "on" }, + { "model-id", + "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "Haswell", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 60, + .stepping = 4, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Haswell)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Haswell-noTSX", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + { "stepping", "1" }, + { "model-id", "Intel Core Processor (Haswell, no TSX)", }, + { NULL /* end of list */ } + }, + }, + { + .version = 3, + .alias = "Haswell-IBRS", + .props = (PropValue[]) { + /* Restore TSX features removed by -v2 above */ + { "hle", "on" }, + { "rtm", "on" }, + /* + * Haswell and Haswell-IBRS had stepping=4 in + * QEMU 4.0 and older + */ + { "stepping", "4" }, + { "spec-ctrl", "on" }, + { "model-id", + "Intel Core Processor (Haswell, IBRS)" }, + { NULL /* end of list */ } + } + }, + { + .version = 4, + .alias = "Haswell-noTSX-IBRS", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + /* spec-ctrl was already enabled by -v3 above */ + { "stepping", "1" }, + { "model-id", + "Intel Core Processor (Haswell, no TSX, IBRS)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "Broadwell", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 61, + .stepping = 2, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Broadwell)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Broadwell-noTSX", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, + { NULL /* end of list */ } + }, + }, + { + .version = 3, + .alias = "Broadwell-IBRS", + .props = (PropValue[]) { + /* Restore TSX features removed by -v2 above */ + { "hle", "on" }, + { "rtm", "on" }, + { "spec-ctrl", "on" }, + { "model-id", + "Intel Core Processor (Broadwell, IBRS)" }, + { NULL /* end of list */ } + } + }, + { + .version = 4, + .alias = "Broadwell-noTSX-IBRS", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + /* spec-ctrl was already enabled by -v3 above */ + { "model-id", + "Intel Core Processor (Broadwell, no TSX, IBRS)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "Skylake-Client", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 94, + .stepping = 3, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Skylake)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Skylake-Client-IBRS", + .props = (PropValue[]) { + { "spec-ctrl", "on" }, + { "model-id", + "Intel Core Processor (Skylake, IBRS)" }, + { NULL /* end of list */ } + } + }, + { + .version = 3, + .alias = "Skylake-Client-noTSX-IBRS", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + { "model-id", + "Intel Core Processor (Skylake, IBRS, no TSX)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "Skylake-Server", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 85, + .stepping = 4, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | + CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_PKU, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (Skylake)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Skylake-Server-IBRS", + .props = (PropValue[]) { + /* clflushopt was not added to Skylake-Server-IBRS */ + /* TODO: add -v3 including clflushopt */ + { "clflushopt", "off" }, + { "spec-ctrl", "on" }, + { "model-id", + "Intel Xeon Processor (Skylake, IBRS)" }, + { NULL /* end of list */ } + } + }, + { + .version = 3, + .alias = "Skylake-Server-noTSX-IBRS", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + { "model-id", + "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "Cascadelake-Server", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 85, + .stepping = 6, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | + CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512VNNI, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (Cascadelake)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { .version = 2, + .props = (PropValue[]) { + { "arch-capabilities", "on" }, + { "rdctl-no", "on" }, + { "ibrs-all", "on" }, + { "skip-l1dfl-vmentry", "on" }, + { "mds-no", "on" }, + { NULL /* end of list */ } + }, + }, + { .version = 3, + .alias = "Cascadelake-Server-noTSX", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + { NULL /* end of list */ } + }, + }, + { 0 /* end of list */ } + } + }, + { + .name = "Cooperlake", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 85, + .stepping = 10, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | + CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512VNNI, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | + CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | + MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | + MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX512_BF16, + /* + * Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (Cooperlake)", + }, + { + .name = "Icelake-Client", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 126, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Core Processor (Icelake)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Icelake-Client-noTSX", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + { NULL /* end of list */ } + }, + }, + { 0 /* end of list */ } + } + }, + { + .name = "Icelake-Server", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 134, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | + CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, + /* Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (Icelake)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "Icelake-Server-noTSX", + .props = (PropValue[]) { + { "hle", "off" }, + { "rtm", "off" }, + { NULL /* end of list */ } + }, + }, + { + .version = 3, + .props = (PropValue[]) { + { "arch-capabilities", "on" }, + { "rdctl-no", "on" }, + { "ibrs-all", "on" }, + { "skip-l1dfl-vmentry", "on" }, + { "mds-no", "on" }, + { "pschange-mc-no", "on" }, + { "taa-no", "on" }, + { NULL /* end of list */ } + }, + }, + { 0 /* end of list */ } + } + }, + { + .name = "Denverton", + .level = 21, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 95, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | + CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | + CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | + CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | + CPUID_7_0_EDX_SPEC_CTRL_SSBD, + /* + * Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Atom Processor (Denverton)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "monitor", "off" }, + { "mpx", "off" }, + { NULL /* end of list */ }, + }, + }, + { 0 /* end of list */ }, + }, + }, + { + .name = "Snowridge", + .level = 27, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 134, + .stepping = 1, + .features[FEAT_1_EDX] = + /* missing: CPUID_PN CPUID_IA64 */ + /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | + CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | + CPUID_CX8 | CPUID_APIC | CPUID_SEP | + CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | + CPUID_MMX | + CPUID_FXSR | CPUID_SSE | CPUID_SSE2, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | + CPUID_EXT_SSSE3 | + CPUID_EXT_CX16 | + CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | + CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | + CPUID_EXT2_NX | + CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | + CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | + CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | + CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ + CPUID_7_0_EBX_RDSEED | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_SHA_NI, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_UMIP | + /* missing bit 5 */ + CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | + CPUID_7_0_ECX_MOVDIR64B, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_SPEC_CTRL | + CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | + CPUID_7_0_EDX_CORE_CAPABILITY, + .features[FEAT_CORE_CAPABILITY] = + MSR_CORE_CAP_SPLIT_LOCK_DETECT, + /* + * Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component, + * and the only one defined in Skylake (processor tracing) + * probably will block migration anyway. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Atom Processor (SnowRidge)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "mpx", "off" }, + { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, + { NULL /* end of list */ }, + }, + }, + { 0 /* end of list */ }, + }, + }, + { + .name = "KnightsMill", + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 133, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | + CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | + CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | + CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | + CPUID_PSE | CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | + CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | + CPUID_7_0_EBX_AVX512ER, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VPOPCNTDQ, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Phi Processor (Knights Mill)", + }, + { + .name = "Opteron_G1", + .level = 5, + .vendor = CPUID_VENDOR_AMD, + .family = 15, + .model = 6, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .xlevel = 0x80000008, + .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", + }, + { + .name = "Opteron_G2", + .level = 5, + .vendor = CPUID_VENDOR_AMD, + .family = 15, + .model = 6, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_CX16 | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, + .xlevel = 0x80000008, + .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", + }, + { + .name = "Opteron_G3", + .level = 5, + .vendor = CPUID_VENDOR_AMD, + .family = 16, + .model = 2, + .stepping = 3, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | + CPUID_EXT2_RDTSCP, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | + CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, + .xlevel = 0x80000008, + .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", + }, + { + .name = "Opteron_G4", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 21, + .model = 1, + .stepping = 2, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | + CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | + CPUID_EXT3_LAHF_LM, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, + /* no xsaveopt! */ + .xlevel = 0x8000001A, + .model_id = "AMD Opteron 62xx class CPU", + }, + { + .name = "Opteron_G5", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 21, + .model = 2, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | + CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | + CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | + CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | + CPUID_EXT3_LAHF_LM, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, + /* no xsaveopt! */ + .xlevel = 0x8000001A, + .model_id = "AMD Opteron 63xx class CPU", + }, + { + .name = "EPYC", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 23, + .model = 1, + .stepping = 2, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | + CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | + CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_SHA_NI, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, + .xlevel = 0x8000001E, + .model_id = "AMD EPYC Processor", + .cache_info = &epyc_cache_info, + .use_epyc_apic_id_encoding = 1, + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .alias = "EPYC-IBPB", + .props = (PropValue[]) { + { "ibpb", "on" }, + { "model-id", + "AMD EPYC Processor (with IBPB)" }, + { NULL /* end of list */ } + } + }, + { + .version = 3, + .props = (PropValue[]) { + { "ibpb", "on" }, + { "perfctr-core", "on" }, + { "clzero", "on" }, + { "xsaveerptr", "on" }, + { "xsaves", "on" }, + { "model-id", + "AMD EPYC Processor" }, + { NULL /* end of list */ } + } + }, + { 0 /* end of list */ } + } + }, + { + .name = "Dhyana", + .level = 0xd, + .vendor = CPUID_VENDOR_HYGON, + .family = 24, + .model = 0, + .stepping = 1, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | + CPUID_EXT_MONITOR | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_IBPB, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | + CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, + /* + * Missing: XSAVES (not supported by some Linux versions, + * including v4.1 to v4.12). + * KVM doesn't yet expose any XSAVES state save component. + */ + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, + .xlevel = 0x8000001E, + .model_id = "Hygon Dhyana Processor", + .cache_info = &epyc_cache_info, + }, + { + .name = "EPYC-Rome", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 23, + .model = 49, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | + CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | + CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | + CPUID_8000_0008_EBX_STIBP, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | + CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, + .xlevel = 0x8000001E, + .model_id = "AMD EPYC-Rome Processor", + .cache_info = &epyc_rome_cache_info, + .use_epyc_apic_id_encoding = 1, + }, +}; + + +/* + * We resolve CPU model aliases using -v1 when using "-machine + * none", but this is just for compatibility while libvirt isn't + * adapted to resolve CPU model versions before creating VMs. + * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi. + */ +X86CPUVersion default_cpu_version = 1; + +void x86_cpu_set_default_version(X86CPUVersion version) +{ + /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ + assert(version != CPU_VERSION_AUTO); + default_cpu_version = version; +} + +#define CPUID_MODEL_ID_SZ 48 + +static bool x86_cpu_have_filtered_features(X86CPU *cpu) +{ + FeatureWord w; + + for (w = 0; w < FEATURE_WORDS; w++) { + if (cpu->filtered_features[w]) { + return true; + } + } + + return false; +} + +static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, + const char *verbose_prefix) +{ + CPUX86State *env = &cpu->env; + + if (!cpu->force_features) { + env->features[w] &= ~mask; + } + cpu->filtered_features[w] |= mask; + + if (!verbose_prefix) { + return; + } +} + +/* Convert all '_' in a feature string option name to '-', to make feature + * name conform to QOM property naming rule, which uses '-' instead of '_'. + */ +static inline void feat2prop(char *s) +{ + while ((s = strchr(s, '_'))) { + *s = '-'; + } +} + +static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); + +static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + bool migratable_only) +{ + FeatureWordInfo *wi = &feature_word_info[w]; + uint64_t r; + + // TCG enable + r = wi->tcg_features; + + return r; +} + +/* Load data from X86CPUDefinition into a X86CPU object + */ +static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) +{ + X86CPUDefinition *def = model->cpudef; + CPUX86State *env = &cpu->env; + FeatureWord w; + + for (w = 0; w < FEATURE_WORDS; w++) { + env->features[w] = def->features[w]; + } + + /* legacy-cache defaults to 'off' if CPU model provides cache info */ + cpu->legacy_cache = !def->cache_info; + + env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; + + /* sysenter isn't supported in compatibility mode on AMD, + * syscall isn't supported in compatibility mode on Intel. + * Normally we advertise the actual CPU vendor, but you can + * override this using the 'vendor' property if you want to use + * KVM's sysenter/syscall emulation in compatibility mode and + * when doing cross vendor migration + */ + if (accel_uses_host_cpuid()) { + uint32_t ebx = 0, ecx = 0, edx = 0; + host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); + } +} + +void cpu_clear_apic_feature(CPUX86State *env) +{ + env->features[FEAT_1_EDX] &= ~CPUID_APIC; +} + +void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + X86CPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + uint32_t die_offset; + uint32_t limit; + uint32_t signature[3]; + X86CPUTopoInfo topo_info; + + topo_info.nodes_per_pkg = env->nr_nodes; + topo_info.dies_per_pkg = env->nr_dies; + topo_info.cores_per_die = cs->nr_cores; + topo_info.threads_per_core = cs->nr_threads; + + /* Calculate & apply limits for different index ranges */ + if (index >= 0xC0000000) { + limit = env->cpuid_xlevel2; + } else if (index >= 0x80000000) { + limit = env->cpuid_xlevel; + } else if (index >= 0x40000000) { + limit = 0x40000001; + } else { + limit = env->cpuid_level; + } + + if (index > limit) { + /* Intel documentation states that invalid EAX input will + * return the same information as EAX=cpuid_level + * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) + */ + index = env->cpuid_level; + } + + switch(index) { + case 0: + *eax = env->cpuid_level; + *ebx = env->cpuid_vendor1; + *edx = env->cpuid_vendor2; + *ecx = env->cpuid_vendor3; + break; + case 1: + *eax = env->cpuid_version; + *ebx = (cpu->apic_id << 24) | + 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ + *ecx = env->features[FEAT_1_ECX]; + if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { + *ecx |= CPUID_EXT_OSXSAVE; + } + *edx = env->features[FEAT_1_EDX]; + if (cs->nr_cores * cs->nr_threads > 1) { + *ebx |= (cs->nr_cores * cs->nr_threads) << 16; + *edx |= CPUID_HT; + } + break; + case 2: + /* cache info: needed for Pentium Pro compatibility */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, 0, eax, ebx, ecx, edx); + break; + } + *eax = 1; /* Number of CPUID[EAX=2] calls required */ + *ebx = 0; + if (!cpu->enable_l3_cache) { + *ecx = 0; + } else { + *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); + } + *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | + (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | + (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); + break; + case 4: + /* cache info: needed for Core compatibility */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, count, eax, ebx, ecx, edx); + /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ + *eax &= ~0xFC000000; + if ((*eax & 31) && cs->nr_cores > 1) { + *eax |= (cs->nr_cores - 1) << 26; + } + } else { + *eax = 0; + switch (count) { + case 0: /* L1 dcache info */ + encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, + 1, cs->nr_cores, + eax, ebx, ecx, edx); + break; + case 1: /* L1 icache info */ + encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, + 1, cs->nr_cores, + eax, ebx, ecx, edx); + break; + case 2: /* L2 cache info */ + encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, + cs->nr_threads, cs->nr_cores, + eax, ebx, ecx, edx); + break; + case 3: /* L3 cache info */ + die_offset = apicid_die_offset(&topo_info); + if (cpu->enable_l3_cache) { + encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, + (1 << die_offset), cs->nr_cores, + eax, ebx, ecx, edx); + break; + } + /* fall through */ + default: /* end of info */ + *eax = *ebx = *ecx = *edx = 0; + break; + } + } + break; + case 5: + /* MONITOR/MWAIT Leaf */ + *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ + *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ + *ecx = cpu->mwait.ecx; /* flags */ + *edx = cpu->mwait.edx; /* mwait substates */ + break; + case 6: + /* Thermal and Power Leaf */ + *eax = env->features[FEAT_6_EAX]; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 7: + /* Structured Extended Feature Flags Enumeration Leaf */ + if (count == 0) { + /* Maximum ECX value for sub-leaves */ + *eax = env->cpuid_level_func7; + *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ + *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ + if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { + *ecx |= CPUID_7_0_ECX_OSPKE; + } + *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ + } else if (count == 1) { + *eax = env->features[FEAT_7_1_EAX]; + *ebx = 0; + *ecx = 0; + *edx = 0; + } else { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + } + break; + case 9: + /* Direct Cache Access Information Leaf */ + *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 0xA: + /* Architectural Performance Monitoring Leaf */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 0xB: + /* Extended Topology Enumeration Leaf */ + if (!cpu->enable_cpuid_0xb) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + + *ecx = count & 0xff; + *edx = cpu->apic_id; + + switch (count) { + case 0: + *eax = apicid_core_offset(&topo_info); + *ebx = cs->nr_threads; + *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; + break; + case 1: + *eax = env->pkg_offset; + *ebx = cs->nr_cores * cs->nr_threads; + *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; + break; + default: + *eax = 0; + *ebx = 0; + *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; + } + + assert(!(*eax & ~0x1f)); + *ebx &= 0xffff; /* The count doesn't need to be reliable. */ + break; + case 0x1F: + /* V2 Extended Topology Enumeration Leaf */ + if (env->nr_dies < 2) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + + *ecx = count & 0xff; + *edx = cpu->apic_id; + switch (count) { + case 0: + *eax = apicid_core_offset(&topo_info); + *ebx = cs->nr_threads; + *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; + break; + case 1: + *eax = apicid_die_offset(&topo_info); + *ebx = cs->nr_cores * cs->nr_threads; + *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; + break; + case 2: + *eax = env->pkg_offset; + *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; + *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; + break; + default: + *eax = 0; + *ebx = 0; + *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; + } + assert(!(*eax & ~0x1f)); + *ebx &= 0xffff; /* The count doesn't need to be reliable. */ + break; + case 0xD: { + /* Processor Extended State */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { + break; + } + + if (count == 0) { + *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); + *eax = env->features[FEAT_XSAVE_COMP_LO]; + *edx = env->features[FEAT_XSAVE_COMP_HI]; + /* + * The initial value of xcr0 and ebx == 0, On host without kvm + * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 + * even through guest update xcr0, this will crash some legacy guest + * (e.g., CentOS 6), So set ebx == ecx to workaroud it. + */ + *ebx = xsave_area_size(env->xcr0); + } else if (count == 1) { + *eax = env->features[FEAT_XSAVE]; + } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { + if ((x86_cpu_xsave_components(cpu) >> count) & 1) { + const ExtSaveArea *esa = &x86_ext_save_areas[count]; + *eax = esa->size; + *ebx = esa->offset; + } + } + break; + } + case 0x14: { + /* Intel Processor Trace Enumeration */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { + break; + } + + if (count == 0) { + *eax = INTEL_PT_MAX_SUBLEAF; + *ebx = INTEL_PT_MINIMAL_EBX; + *ecx = INTEL_PT_MINIMAL_ECX; + } else if (count == 1) { + *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; + *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; + } + break; + } + case 0x40000000: + /* + * CPUID code in kvm_arch_init_vcpu() ignores stuff + * set here, but we restrict to TCG none the less. + */ + if (cpu->expose_tcg) { + memcpy(signature, "TCGTCGTCGTCG", 12); + *eax = 0x40000001; + *ebx = signature[0]; + *ecx = signature[1]; + *edx = signature[2]; + } else { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + } + break; + case 0x40000001: + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 0x80000000: + *eax = env->cpuid_xlevel; + *ebx = env->cpuid_vendor1; + *edx = env->cpuid_vendor2; + *ecx = env->cpuid_vendor3; + break; + case 0x80000001: + *eax = env->cpuid_version; + *ebx = 0; + *ecx = env->features[FEAT_8000_0001_ECX]; + *edx = env->features[FEAT_8000_0001_EDX]; + + /* The Linux kernel checks for the CMPLegacy bit and + * discards multiple thread information if it is set. + * So don't set it here for Intel to make Linux guests happy. + */ + if (cs->nr_cores * cs->nr_threads > 1) { + if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || + env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || + env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { + *ecx |= 1 << 1; /* CmpLegacy bit */ + } + } + break; + case 0x80000002: + case 0x80000003: + case 0x80000004: + *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; + *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; + *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; + *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; + break; + case 0x80000005: + /* cache info (L1 cache) */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, 0, eax, ebx, ecx, edx); + break; + } + *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ + (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); + *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ + (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); + *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); + *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); + break; + case 0x80000006: + /* cache info (L2 cache) */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, 0, eax, ebx, ecx, edx); + break; + } + *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ + (L2_DTLB_2M_ENTRIES << 16) | \ + (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ + (L2_ITLB_2M_ENTRIES); + *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ + (L2_DTLB_4K_ENTRIES << 16) | \ + (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ + (L2_ITLB_4K_ENTRIES); + encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, + cpu->enable_l3_cache ? + env->cache_info_amd.l3_cache : NULL, + ecx, edx); + break; + case 0x80000007: + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = env->features[FEAT_8000_0007_EDX]; + break; + case 0x80000008: + /* virtual & phys address size in low 2 bytes. */ + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { + /* 64 bit processor */ + *eax = cpu->phys_bits; /* configurable physical bits */ + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { + *eax |= 0x00003900; /* 57 bits virtual */ + } else { + *eax |= 0x00003000; /* 48 bits virtual */ + } + } else { + *eax = cpu->phys_bits; + } + *ebx = env->features[FEAT_8000_0008_EBX]; + *ecx = 0; + *edx = 0; + if (cs->nr_cores * cs->nr_threads > 1) { + *ecx |= (cs->nr_cores * cs->nr_threads) - 1; + } + break; + case 0x8000000A: + if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { + *eax = 0x00000001; /* SVM Revision */ + *ebx = 0x00000010; /* nr of ASIDs */ + *ecx = 0; + *edx = env->features[FEAT_SVM]; /* optional features */ + } else { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + } + break; + case 0x8000001D: + *eax = 0; + if (cpu->cache_info_passthrough) { + host_cpuid(index, count, eax, ebx, ecx, edx); + break; + } + switch (count) { + case 0: /* L1 dcache info */ + encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, + &topo_info, eax, ebx, ecx, edx); + break; + case 1: /* L1 icache info */ + encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, + &topo_info, eax, ebx, ecx, edx); + break; + case 2: /* L2 cache info */ + encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, + &topo_info, eax, ebx, ecx, edx); + break; + case 3: /* L3 cache info */ + encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, + &topo_info, eax, ebx, ecx, edx); + break; + default: /* end of info */ + *eax = *ebx = *ecx = *edx = 0; + break; + } + break; + case 0x8000001E: + assert(cpu->core_id <= 255); + encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx); + break; + case 0xC0000000: + *eax = env->cpuid_xlevel2; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 0xC0000001: + /* Support for VIA CPU's CPUID instruction */ + *eax = env->cpuid_version; + *ebx = 0; + *ecx = 0; + *edx = env->features[FEAT_C000_0001_EDX]; + break; + case 0xC0000002: + case 0xC0000003: + case 0xC0000004: + /* Reserved for the future, and now filled with zero */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + default: + /* reserved values: zero */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + } +} + +static void x86_cpu_reset(CPUState *dev) +{ + CPUState *s = CPU(dev); + X86CPU *cpu = X86_CPU(s); + X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); + CPUX86State *env = &cpu->env; + target_ulong cr4; + uint64_t xcr0; + int i; + + xcc->parent_reset(s); + + memset(env, 0, offsetof(CPUX86State, end_reset_fields)); + + env->old_exception = -1; + + /* init to reset state */ + + env->hflags2 |= HF2_GIF_MASK; + + cpu_x86_update_cr0(env, 0x60000010); + env->a20_mask = ~0x0; + env->smbase = 0x30000; + env->msr_smi_count = 0; + + env->idt.limit = 0xffff; + env->gdt.limit = 0xffff; + env->ldt.limit = 0xffff; + env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); + env->tr.limit = 0xffff; + env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); + + cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | + DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + + env->eip = 0xfff0; + env->regs[R_EDX] = env->cpuid_version; + + env->eflags = 0x2; + + /* FPU init */ + for (i = 0; i < 8; i++) { + env->fptags[i] = 1; + } + cpu_set_fpuc(env, 0x37f); + + env->mxcsr = 0x1f80; + /* All units are in INIT state. */ + env->xstate_bv = 0; + + env->pat = 0x0007040600070406ULL; + env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; + if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { + env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; + } + + memset(env->dr, 0, sizeof(env->dr)); + env->dr[6] = DR6_FIXED_1; + env->dr[7] = DR7_FIXED_1; + cpu_breakpoint_remove_all(s, BP_CPU); + cpu_watchpoint_remove_all(s, BP_CPU); + + cr4 = 0; + xcr0 = XSTATE_FP_MASK; + + /* Enable all the features for user-mode. */ + if (env->features[FEAT_1_EDX] & CPUID_SSE) { + xcr0 |= XSTATE_SSE_MASK; + } + for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { + const ExtSaveArea *esa = &x86_ext_save_areas[i]; + if (env->features[esa->feature] & esa->bits) { + xcr0 |= 1ull << i; + } + } + + if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { + cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; + } + if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { + cr4 |= CR4_FSGSBASE_MASK; + } + + env->xcr0 = xcr0; + cpu_x86_update_cr4(env, cr4); + + /* + * SDM 11.11.5 requires: + * - IA32_MTRR_DEF_TYPE MSR.E = 0 + * - IA32_MTRR_PHYSMASKn.V = 0 + * All other bits are undefined. For simplification, zero it all. + */ + env->mtrr_deftype = 0; + memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); + memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); +} + +static void mce_init(X86CPU *cpu) +{ + CPUX86State *cenv = &cpu->env; + unsigned int bank; + + if (((cenv->cpuid_version >> 8) & 0xf) >= 6 + && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == + (CPUID_MCE | CPUID_MCA)) { + cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | + (cpu->enable_lmce ? MCG_LMCE_P : 0); + cenv->mcg_ctl = ~(uint64_t)0; + for (bank = 0; bank < MCE_BANKS_DEF; bank++) { + cenv->mce_banks[bank * 4] = ~(uint64_t)0; + } + } +} + +/* + * Finishes initialization of CPUID data, filters CPU feature + * words based on host availability of each feature. + * + * Returns: 0 if all flags are supported by the host, non-zero otherwise. + */ +static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) +{ + CPUX86State *env = &cpu->env; + FeatureWord w; + const char *prefix = NULL; + + for (w = 0; w < FEATURE_WORDS; w++) { + uint64_t host_feat = + x86_cpu_get_supported_feature_word(w, false); + uint64_t requested_features = env->features[w]; + uint64_t unavailable_features = requested_features & ~host_feat; + mark_unavailable_features(cpu, w, unavailable_features, prefix); + } +} + +static void x86_cpu_realizefn(struct uc_struct *uc, CPUState *dev) +{ + CPUState *cs = CPU(dev); + X86CPU *cpu = X86_CPU(cs); + X86CPUClass *xcc = X86_CPU_GET_CLASS(cs); + CPUX86State *env = &cpu->env; + + if (cpu->ucode_rev == 0) { + /* The default is the same as KVM's. */ + if (IS_AMD_CPU(env)) { + cpu->ucode_rev = 0x01000065; + } else { + cpu->ucode_rev = 0x100000000ULL; + } + } + + /* mwait extended info: needed for Core compatibility */ + /* We always wake on interrupt even if host does not have the capability */ + cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; + + if (cpu->apic_id == UNASSIGNED_APIC_ID) { + //error_setg(errp, "apic-id property was not initialized properly"); + return; + } + + x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); + + if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { + // error_setg(&local_err, + // accel_uses_host_cpuid() ? + // "Host doesn't support requested features" : + // "TCG doesn't support requested features"); + return; + } + + /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on + * CPUID[1].EDX. + */ + if (IS_AMD_CPU(env)) { + env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; + env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] + & CPUID_EXT2_AMD_ALIASES); + } + + /* For 64bit systems think about the number of physical bits to present. + * ideally this should be the same as the host; anything other than matching + * the host can cause incorrect guest behaviour. + * QEMU used to pick the magic value of 40 bits that corresponds to + * consumer AMD devices but nothing else. + */ + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { + if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { + //error_setg(errp, "TCG only supports phys-bits=%u", + // TCG_PHYS_ADDR_BITS); + return; + } + + /* 0 means it was not explicitly set by the user (or by machine + * compat_props or by the host code above). In this case, the default + * is the value used by TCG (40). + */ + if (cpu->phys_bits == 0) { + cpu->phys_bits = TCG_PHYS_ADDR_BITS; + } + } else { + /* For 32 bit systems don't use the user set value, but keep + * phys_bits consistent with what we tell the guest. + */ + if (cpu->phys_bits != 0) { + //error_setg(errp, "phys-bits is not user-configurable in 32 bit"); + return; + } + + if (env->features[FEAT_1_EDX] & CPUID_PSE36) { + cpu->phys_bits = 36; + } else { + cpu->phys_bits = 32; + } + } + + /* Cache information initialization */ + if (!cpu->legacy_cache) { + if (!xcc->model || !xcc->model->cpudef->cache_info) { + // g_autofree char *name = x86_cpu_class_get_model_name(xcc); + //error_setg(errp, + // "CPU model '%s' doesn't support legacy-cache=off", name); + return; + } + env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = + *xcc->model->cpudef->cache_info; + } else { + /* Build legacy cache information */ + env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; + env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; + env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; + env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; + + env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; + env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; + env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; + env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; + + env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; + env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; + env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; + env->cache_info_amd.l3_cache = &legacy_l3_cache; + } + + cpu_exec_realizefn(cs); + + mce_init(cpu); + + cpu_reset(cs); +} + +static void x86_cpu_initfn(struct uc_struct *uc, CPUState *obj) +{ + X86CPU *cpu = X86_CPU(obj); + X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); + CPUX86State *env = &cpu->env; + + env->nr_dies = 1; + env->nr_nodes = 1; + cpu_set_cpustate_pointers(cpu); + env->uc = uc; + + if (xcc->model) { + x86_cpu_load_model(cpu, xcc->model); + } +} + +static int64_t x86_cpu_get_arch_id(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + + return cpu->apic_id; +} + +static bool x86_cpu_get_paging_enabled(const CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + + return cpu->env.cr[0] & CR0_PG_MASK; +} + +static void x86_cpu_set_pc(CPUState *cs, vaddr value) +{ + X86CPU *cpu = X86_CPU(cs); + + cpu->env.eip = value; +} + +static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + X86CPU *cpu = X86_CPU(cs); + + cpu->env.eip = tb->pc - tb->cs_base; +} + +int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + if (interrupt_request & CPU_INTERRUPT_POLL) { + return CPU_INTERRUPT_POLL; + } + + if (interrupt_request & CPU_INTERRUPT_SIPI) { + return CPU_INTERRUPT_SIPI; + } + + if (env->hflags2 & HF2_GIF_MASK) { + if ((interrupt_request & CPU_INTERRUPT_SMI) && + !(env->hflags & HF_SMM_MASK)) { + return CPU_INTERRUPT_SMI; + } else if ((interrupt_request & CPU_INTERRUPT_NMI) && + !(env->hflags2 & HF2_NMI_MASK)) { + return CPU_INTERRUPT_NMI; + } else if (interrupt_request & CPU_INTERRUPT_MCE) { + return CPU_INTERRUPT_MCE; + } else if ((interrupt_request & CPU_INTERRUPT_HARD) && + (((env->hflags2 & HF2_VINTR_MASK) && + (env->hflags2 & HF2_HIF_MASK)) || + (!(env->hflags2 & HF2_VINTR_MASK) && + (env->eflags & IF_MASK && + !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { + return CPU_INTERRUPT_HARD; + } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && + (env->eflags & IF_MASK) && + !(env->hflags & HF_INHIBIT_IRQ_MASK)) { + return CPU_INTERRUPT_VIRQ; + } + } + + return 0; +} + +static bool x86_cpu_has_work(CPUState *cs) +{ + return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; +} + +void x86_update_hflags(CPUX86State *env) +{ + uint32_t hflags; +#define HFLAG_COPY_MASK \ + ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ + HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ + HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ + HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) + + hflags = env->hflags & HFLAG_COPY_MASK; + hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; + hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); + hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & + (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); + hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); + + if (env->cr[4] & CR4_OSFXSR_MASK) { + hflags |= HF_OSFXSR_MASK; + } + + if (env->efer & MSR_EFER_LMA) { + hflags |= HF_LMA_MASK; + } + + if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { + hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; + } else { + hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_CS32_SHIFT); + hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_SS32_SHIFT); + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || + !(hflags & HF_CS32_MASK)) { + hflags |= HF_ADDSEG_MASK; + } else { + hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | + env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; + } + } + env->hflags = hflags; +} + +static void x86_cpu_common_class_init(struct uc_struct *uc, CPUClass *oc, void *data) +{ + X86CPUClass *xcc = X86_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + + /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ + xcc->parent_reset = cc->reset; + /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ + cc->reset = x86_cpu_reset; + cc->has_work = x86_cpu_has_work; + cc->do_interrupt = x86_cpu_do_interrupt; + cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; + cc->set_pc = x86_cpu_set_pc; + cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; + cc->get_arch_id = x86_cpu_get_arch_id; + cc->get_paging_enabled = x86_cpu_get_paging_enabled; + cc->asidx_from_attrs = x86_asidx_from_attrs; + cc->get_memory_mapping = x86_cpu_get_memory_mapping; + cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; + cc->debug_excp_handler = breakpoint_handler; + cc->cpu_exec_enter = x86_cpu_exec_enter; + cc->cpu_exec_exit = x86_cpu_exec_exit; + cc->tcg_initialize = tcg_x86_init; + cc->tlb_fill = x86_cpu_tlb_fill; +} + +X86CPU *cpu_x86_init(struct uc_struct *uc, const char *cpu_model) +{ + int i; + X86CPU *cpu; + CPUState *cs; + CPUClass *cc; + X86CPUClass *xcc; + + if (cpu_model == NULL) { +#ifdef TARGET_X86_64 + cpu_model = "qemu64"; +#else + cpu_model = "qemu32"; +#endif + } + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = (CPUState *)cpu; + + /* init CPUClass */ + cpu_class_init(uc, cc); + + /* init X86CPUClass */ + x86_cpu_common_class_init(uc, cc, NULL); + + /* init X86CPUModel */ + /* Ignore X86CPUVersion, X86CPUVersionDefinition. + we do not need so many cpu types and their property. + version: more typename. x86_cpu_versioned_model_name(). + alias: more property. */ + xcc = &cpu->cc; + xcc->model = calloc(1, sizeof(*(xcc->model))); + if (xcc->model == NULL) { + free(cpu); + return NULL; + } + + xcc->model->version = CPU_VERSION_AUTO; + for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { + if (strcmp(cpu_model, builtin_x86_defs[i].name) == 0) { + xcc->model->cpudef = &builtin_x86_defs[i]; + break; + } + } + + if (xcc->model->cpudef == NULL) { + free(xcc->model); + free(cpu); + return NULL; + } + + /* init CPUState */ + cpu_common_initfn(uc, cs); + + /* init X86CPU */ + x86_cpu_initfn(uc, cs); + + /* realize X86CPU */ + x86_cpu_realizefn(uc, cs); + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + /* realize CPUState */ + + return cpu; +} diff --git a/qemu/target/i386/cpu.h b/qemu/target/i386/cpu.h new file mode 100644 index 00000000..2d78d756 --- /dev/null +++ b/qemu/target/i386/cpu.h @@ -0,0 +1,2134 @@ +/* + * i386 virtual CPU header + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef I386_CPU_H +#define I386_CPU_H + +#include "sysemu/tcg.h" +#include "cpu-qom.h" +#include "exec/cpu-defs.h" + +/* The x86 has a strong memory model with some store-after-load re-ordering */ +#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) + +/* Maximum instruction code size */ +#define TARGET_MAX_INSN_SIZE 16 + +/* support for self modifying code even if the modified instruction is + close to the modifying instruction */ +#define TARGET_HAS_PRECISE_SMC + +#ifdef TARGET_X86_64 +#define I386_ELF_MACHINE EM_X86_64 +#define ELF_MACHINE_UNAME "x86_64" +#else +#define I386_ELF_MACHINE EM_386 +#define ELF_MACHINE_UNAME "i686" +#endif + +enum { + R_EAX = 0, + R_ECX = 1, + R_EDX = 2, + R_EBX = 3, + R_ESP = 4, + R_EBP = 5, + R_ESI = 6, + R_EDI = 7, + R_R8 = 8, + R_R9 = 9, + R_R10 = 10, + R_R11 = 11, + R_R12 = 12, + R_R13 = 13, + R_R14 = 14, + R_R15 = 15, + + R_AL = 0, + R_CL = 1, + R_DL = 2, + R_BL = 3, + R_AH = 4, + R_CH = 5, + R_DH = 6, + R_BH = 7, +}; + +typedef enum X86Seg { + R_ES = 0, + R_CS = 1, + R_SS = 2, + R_DS = 3, + R_FS = 4, + R_GS = 5, + R_LDTR = 6, + R_TR = 7, +} X86Seg; + +/* segment descriptor fields */ +#define DESC_G_SHIFT 23 +#define DESC_G_MASK (1 << DESC_G_SHIFT) +#define DESC_B_SHIFT 22 +#define DESC_B_MASK (1 << DESC_B_SHIFT) +#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ +#define DESC_L_MASK (1 << DESC_L_SHIFT) +#define DESC_AVL_SHIFT 20 +#define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) +#define DESC_P_SHIFT 15 +#define DESC_P_MASK (1 << DESC_P_SHIFT) +#define DESC_DPL_SHIFT 13 +#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) +#define DESC_S_SHIFT 12 +#define DESC_S_MASK (1 << DESC_S_SHIFT) +#define DESC_TYPE_SHIFT 8 +#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) +#define DESC_A_MASK (1 << 8) + +#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ +#define DESC_C_MASK (1 << 10) /* code: conforming */ +#define DESC_R_MASK (1 << 9) /* code: readable */ + +#define DESC_E_MASK (1 << 10) /* data: expansion direction */ +#define DESC_W_MASK (1 << 9) /* data: writable */ + +#define DESC_TSS_BUSY_MASK (1 << 9) + +/* eflags masks */ +#define CC_C 0x0001 +#define CC_P 0x0004 +#define CC_A 0x0010 +#define CC_Z 0x0040 +#define CC_S 0x0080 +#define CC_O 0x0800 + +#define TF_SHIFT 8 +#define IOPL_SHIFT 12 +#define VM_SHIFT 17 + +#define TF_MASK 0x00000100 +#define IF_MASK 0x00000200 +#define DF_MASK 0x00000400 +#define IOPL_MASK 0x00003000 +#define NT_MASK 0x00004000 +#define RF_MASK 0x00010000 +#define VM_MASK 0x00020000 +#define AC_MASK 0x00040000 +#define VIF_MASK 0x00080000 +#define VIP_MASK 0x00100000 +#define ID_MASK 0x00200000 + +/* hidden flags - used internally by qemu to represent additional cpu + states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We + avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit + positions to ease oring with eflags. */ +/* current cpl */ +#define HF_CPL_SHIFT 0 +/* true if hardware interrupts must be disabled for next instruction */ +#define HF_INHIBIT_IRQ_SHIFT 3 +/* 16 or 32 segments */ +#define HF_CS32_SHIFT 4 +#define HF_SS32_SHIFT 5 +/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ +#define HF_ADDSEG_SHIFT 6 +/* copy of CR0.PE (protected mode) */ +#define HF_PE_SHIFT 7 +#define HF_TF_SHIFT 8 /* must be same as eflags */ +#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ +#define HF_EM_SHIFT 10 +#define HF_TS_SHIFT 11 +#define HF_IOPL_SHIFT 12 /* must be same as eflags */ +#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ +#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ +#define HF_RF_SHIFT 16 /* must be same as eflags */ +#define HF_VM_SHIFT 17 /* must be same as eflags */ +#define HF_AC_SHIFT 18 /* must be same as eflags */ +#define HF_SMM_SHIFT 19 /* CPU in SMM mode */ +#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ +#define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ +#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ +#define HF_SMAP_SHIFT 23 /* CR4.SMAP */ +#define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ +#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ +#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ + +#define HF_CPL_MASK (3 << HF_CPL_SHIFT) +#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) +#define HF_CS32_MASK (1 << HF_CS32_SHIFT) +#define HF_SS32_MASK (1 << HF_SS32_SHIFT) +#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) +#define HF_PE_MASK (1 << HF_PE_SHIFT) +#define HF_TF_MASK (1 << HF_TF_SHIFT) +#define HF_MP_MASK (1 << HF_MP_SHIFT) +#define HF_EM_MASK (1 << HF_EM_SHIFT) +#define HF_TS_MASK (1 << HF_TS_SHIFT) +#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) +#define HF_LMA_MASK (1 << HF_LMA_SHIFT) +#define HF_CS64_MASK (1 << HF_CS64_SHIFT) +#define HF_RF_MASK (1 << HF_RF_SHIFT) +#define HF_VM_MASK (1 << HF_VM_SHIFT) +#define HF_AC_MASK (1 << HF_AC_SHIFT) +#define HF_SMM_MASK (1 << HF_SMM_SHIFT) +#define HF_SVME_MASK (1 << HF_SVME_SHIFT) +#define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) +#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) +#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) +#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) +#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) +#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) + +/* hflags2 */ + +#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ +#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ +#define HF2_NMI_SHIFT 2 /* CPU serving NMI */ +#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ +#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ +#define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ +#define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ +#define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ + +#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) +#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) +#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) +#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) +#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) +#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) +#define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) +#define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) + +#define CR0_PE_SHIFT 0 +#define CR0_MP_SHIFT 1 + +#define CR0_PE_MASK (1U << 0) +#define CR0_MP_MASK (1U << 1) +#define CR0_EM_MASK (1U << 2) +#define CR0_TS_MASK (1U << 3) +#define CR0_ET_MASK (1U << 4) +#define CR0_NE_MASK (1U << 5) +#define CR0_WP_MASK (1U << 16) +#define CR0_AM_MASK (1U << 18) +#define CR0_PG_MASK (1U << 31) + +#define CR4_VME_MASK (1U << 0) +#define CR4_PVI_MASK (1U << 1) +#define CR4_TSD_MASK (1U << 2) +#define CR4_DE_MASK (1U << 3) +#define CR4_PSE_MASK (1U << 4) +#define CR4_PAE_MASK (1U << 5) +#define CR4_MCE_MASK (1U << 6) +#define CR4_PGE_MASK (1U << 7) +#define CR4_PCE_MASK (1U << 8) +#define CR4_OSFXSR_SHIFT 9 +#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) +#define CR4_OSXMMEXCPT_MASK (1U << 10) +#define CR4_LA57_MASK (1U << 12) +#define CR4_VMXE_MASK (1U << 13) +#define CR4_SMXE_MASK (1U << 14) +#define CR4_FSGSBASE_MASK (1U << 16) +#define CR4_PCIDE_MASK (1U << 17) +#define CR4_OSXSAVE_MASK (1U << 18) +#define CR4_SMEP_MASK (1U << 20) +#define CR4_SMAP_MASK (1U << 21) +#define CR4_PKE_MASK (1U << 22) + +#define DR6_BD (1 << 13) +#define DR6_BS (1 << 14) +#define DR6_BT (1 << 15) +#define DR6_FIXED_1 0xffff0ff0 + +#define DR7_GD (1 << 13) +#define DR7_TYPE_SHIFT 16 +#define DR7_LEN_SHIFT 18 +#define DR7_FIXED_1 0x00000400 +#define DR7_GLOBAL_BP_MASK 0xaa +#define DR7_LOCAL_BP_MASK 0x55 +#define DR7_MAX_BP 4 +#define DR7_TYPE_BP_INST 0x0 +#define DR7_TYPE_DATA_WR 0x1 +#define DR7_TYPE_IO_RW 0x2 +#define DR7_TYPE_DATA_RW 0x3 + +#define PG_PRESENT_BIT 0 +#define PG_RW_BIT 1 +#define PG_USER_BIT 2 +#define PG_PWT_BIT 3 +#define PG_PCD_BIT 4 +#define PG_ACCESSED_BIT 5 +#define PG_DIRTY_BIT 6 +#define PG_PSE_BIT 7 +#define PG_GLOBAL_BIT 8 +#define PG_PSE_PAT_BIT 12 +#define PG_PKRU_BIT 59 +#define PG_NX_BIT 63 + +#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) +#define PG_RW_MASK (1 << PG_RW_BIT) +#define PG_USER_MASK (1 << PG_USER_BIT) +#define PG_PWT_MASK (1 << PG_PWT_BIT) +#define PG_PCD_MASK (1 << PG_PCD_BIT) +#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) +#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) +#define PG_PSE_MASK (1 << PG_PSE_BIT) +#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) +#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) +#define PG_ADDRESS_MASK 0x000ffffffffff000LL +#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) +#define PG_HI_USER_MASK 0x7ff0000000000000LL +#define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) +#define PG_NX_MASK (1ULL << PG_NX_BIT) + +#define PG_ERROR_W_BIT 1 + +#define PG_ERROR_P_MASK 0x01 +#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) +#define PG_ERROR_U_MASK 0x04 +#define PG_ERROR_RSVD_MASK 0x08 +#define PG_ERROR_I_D_MASK 0x10 +#define PG_ERROR_PK_MASK 0x20 + +#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ +#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ +#define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ + +#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) +#define MCE_BANKS_DEF 10 + +#define MCG_CAP_BANKS_MASK 0xff + +#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ +#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ +#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ +#define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ + +#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ + +#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ +#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ +#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ +#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ +#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ +#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ +#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ +#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ +#define MCI_STATUS_AR (1ULL<<55) /* Action required */ + +/* MISC register defines */ +#define MCM_ADDR_SEGOFF 0 /* segment offset */ +#define MCM_ADDR_LINEAR 1 /* linear address */ +#define MCM_ADDR_PHYS 2 /* physical address */ +#define MCM_ADDR_MEM 3 /* memory address */ +#define MCM_ADDR_GENERIC 7 /* generic */ + +#define MSR_IA32_TSC 0x10 +#define MSR_IA32_APICBASE 0x1b +#define MSR_IA32_APICBASE_BSP (1<<8) +#define MSR_IA32_APICBASE_ENABLE (1<<11) +#define MSR_IA32_APICBASE_EXTD (1 << 10) +#define MSR_IA32_APICBASE_BASE (0xfffffU<<12) +#define MSR_IA32_FEATURE_CONTROL 0x0000003a +#define MSR_TSC_ADJUST 0x0000003b +#define MSR_IA32_SPEC_CTRL 0x48 +#define MSR_VIRT_SSBD 0xc001011f +#define MSR_IA32_PRED_CMD 0x49 +#define MSR_IA32_UCODE_REV 0x8b +#define MSR_IA32_CORE_CAPABILITY 0xcf + +#define MSR_IA32_ARCH_CAPABILITIES 0x10a +#define ARCH_CAP_TSX_CTRL_MSR (1<<7) + +#define MSR_IA32_TSX_CTRL 0x122 +#define MSR_IA32_TSCDEADLINE 0x6e0 + +#define FEATURE_CONTROL_LOCKED (1<<0) +#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) +#define FEATURE_CONTROL_LMCE (1<<20) + +#define MSR_P6_PERFCTR0 0xc1 + +#define MSR_IA32_SMBASE 0x9e +#define MSR_SMI_COUNT 0x34 +#define MSR_MTRRcap 0xfe +#define MSR_MTRRcap_VCNT 8 +#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) +#define MSR_MTRRcap_WC_SUPPORTED (1 << 10) + +#define MSR_IA32_SYSENTER_CS 0x174 +#define MSR_IA32_SYSENTER_ESP 0x175 +#define MSR_IA32_SYSENTER_EIP 0x176 + +#define MSR_MCG_CAP 0x179 +#define MSR_MCG_STATUS 0x17a +#define MSR_MCG_CTL 0x17b +#define MSR_MCG_EXT_CTL 0x4d0 + +#define MSR_P6_EVNTSEL0 0x186 + +#define MSR_IA32_PERF_STATUS 0x198 + +#define MSR_IA32_MISC_ENABLE 0x1a0 +/* Indicates good rep/movs microcode on some processors: */ +#define MSR_IA32_MISC_ENABLE_DEFAULT 1 +#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) + +#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) +#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) + +#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) + +#define MSR_MTRRfix64K_00000 0x250 +#define MSR_MTRRfix16K_80000 0x258 +#define MSR_MTRRfix16K_A0000 0x259 +#define MSR_MTRRfix4K_C0000 0x268 +#define MSR_MTRRfix4K_C8000 0x269 +#define MSR_MTRRfix4K_D0000 0x26a +#define MSR_MTRRfix4K_D8000 0x26b +#define MSR_MTRRfix4K_E0000 0x26c +#define MSR_MTRRfix4K_E8000 0x26d +#define MSR_MTRRfix4K_F0000 0x26e +#define MSR_MTRRfix4K_F8000 0x26f + +#define MSR_PAT 0x277 + +#define MSR_MTRRdefType 0x2ff + +#define MSR_CORE_PERF_FIXED_CTR0 0x309 +#define MSR_CORE_PERF_FIXED_CTR1 0x30a +#define MSR_CORE_PERF_FIXED_CTR2 0x30b +#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d +#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e +#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f +#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 + +#define MSR_MC0_CTL 0x400 +#define MSR_MC0_STATUS 0x401 +#define MSR_MC0_ADDR 0x402 +#define MSR_MC0_MISC 0x403 + +#define MSR_IA32_RTIT_OUTPUT_BASE 0x560 +#define MSR_IA32_RTIT_OUTPUT_MASK 0x561 +#define MSR_IA32_RTIT_CTL 0x570 +#define MSR_IA32_RTIT_STATUS 0x571 +#define MSR_IA32_RTIT_CR3_MATCH 0x572 +#define MSR_IA32_RTIT_ADDR0_A 0x580 +#define MSR_IA32_RTIT_ADDR0_B 0x581 +#define MSR_IA32_RTIT_ADDR1_A 0x582 +#define MSR_IA32_RTIT_ADDR1_B 0x583 +#define MSR_IA32_RTIT_ADDR2_A 0x584 +#define MSR_IA32_RTIT_ADDR2_B 0x585 +#define MSR_IA32_RTIT_ADDR3_A 0x586 +#define MSR_IA32_RTIT_ADDR3_B 0x587 +#define MAX_RTIT_ADDRS 8 + +#define MSR_EFER 0xc0000080 + +#define MSR_EFER_SCE (1 << 0) +#define MSR_EFER_LME (1 << 8) +#define MSR_EFER_LMA (1 << 10) +#define MSR_EFER_NXE (1 << 11) +#define MSR_EFER_SVME (1 << 12) +#define MSR_EFER_FFXSR (1 << 14) + +#define MSR_STAR 0xc0000081 +#define MSR_LSTAR 0xc0000082 +#define MSR_CSTAR 0xc0000083 +#define MSR_FMASK 0xc0000084 +#define MSR_FSBASE 0xc0000100 +#define MSR_GSBASE 0xc0000101 +#define MSR_KERNELGSBASE 0xc0000102 +#define MSR_TSC_AUX 0xc0000103 + +#define MSR_VM_HSAVE_PA 0xc0010117 + +#define MSR_IA32_BNDCFGS 0x00000d90 +#define MSR_IA32_XSS 0x00000da0 +#define MSR_IA32_UMWAIT_CONTROL 0xe1 + +#define MSR_IA32_VMX_BASIC 0x00000480 +#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 +#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 +#define MSR_IA32_VMX_EXIT_CTLS 0x00000483 +#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 +#define MSR_IA32_VMX_MISC 0x00000485 +#define MSR_IA32_VMX_CR0_FIXED0 0x00000486 +#define MSR_IA32_VMX_CR0_FIXED1 0x00000487 +#define MSR_IA32_VMX_CR4_FIXED0 0x00000488 +#define MSR_IA32_VMX_CR4_FIXED1 0x00000489 +#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a +#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b +#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c +#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d +#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e +#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f +#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 +#define MSR_IA32_VMX_VMFUNC 0x00000491 + +#define XSTATE_FP_BIT 0 +#define XSTATE_SSE_BIT 1 +#define XSTATE_YMM_BIT 2 +#define XSTATE_BNDREGS_BIT 3 +#define XSTATE_BNDCSR_BIT 4 +#define XSTATE_OPMASK_BIT 5 +#define XSTATE_ZMM_Hi256_BIT 6 +#define XSTATE_Hi16_ZMM_BIT 7 +#define XSTATE_PKRU_BIT 9 + +#define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) +#define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) +#define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) +#define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) +#define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) +#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) +#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) +#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) +#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) + +/* CPUID feature words */ +typedef enum FeatureWord { + FEAT_1_EDX, /* CPUID[1].EDX */ + FEAT_1_ECX, /* CPUID[1].ECX */ + FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ + FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ + FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ + FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ + FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ + FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ + FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ + FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ + FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ + FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ + FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ + FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */ + FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */ + FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */ + FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */ + FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */ + FEAT_SVM, /* CPUID[8000_000A].EDX */ + FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ + FEAT_6_EAX, /* CPUID[6].EAX */ + FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ + FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ + FEAT_ARCH_CAPABILITIES, + FEAT_CORE_CAPABILITY, + FEAT_VMX_PROCBASED_CTLS, + FEAT_VMX_SECONDARY_CTLS, + FEAT_VMX_PINBASED_CTLS, + FEAT_VMX_EXIT_CTLS, + FEAT_VMX_ENTRY_CTLS, + FEAT_VMX_MISC, + FEAT_VMX_EPT_VPID_CAPS, + FEAT_VMX_BASIC, + FEAT_VMX_VMFUNC, + FEATURE_WORDS, +} FeatureWord; + +typedef uint64_t FeatureWordArray[FEATURE_WORDS]; + +/* cpuid_features bits */ +#define CPUID_FP87 (1U << 0) +#define CPUID_VME (1U << 1) +#define CPUID_DE (1U << 2) +#define CPUID_PSE (1U << 3) +#define CPUID_TSC (1U << 4) +#define CPUID_MSR (1U << 5) +#define CPUID_PAE (1U << 6) +#define CPUID_MCE (1U << 7) +#define CPUID_CX8 (1U << 8) +#define CPUID_APIC (1U << 9) +#define CPUID_SEP (1U << 11) /* sysenter/sysexit */ +#define CPUID_MTRR (1U << 12) +#define CPUID_PGE (1U << 13) +#define CPUID_MCA (1U << 14) +#define CPUID_CMOV (1U << 15) +#define CPUID_PAT (1U << 16) +#define CPUID_PSE36 (1U << 17) +#define CPUID_PN (1U << 18) +#define CPUID_CLFLUSH (1U << 19) +#define CPUID_DTS (1U << 21) +#define CPUID_ACPI (1U << 22) +#define CPUID_MMX (1U << 23) +#define CPUID_FXSR (1U << 24) +#define CPUID_SSE (1U << 25) +#define CPUID_SSE2 (1U << 26) +#define CPUID_SS (1U << 27) +#define CPUID_HT (1U << 28) +#define CPUID_TM (1U << 29) +#define CPUID_IA64 (1U << 30) +#define CPUID_PBE (1U << 31) + +#define CPUID_EXT_SSE3 (1U << 0) +#define CPUID_EXT_PCLMULQDQ (1U << 1) +#define CPUID_EXT_DTES64 (1U << 2) +#define CPUID_EXT_MONITOR (1U << 3) +#define CPUID_EXT_DSCPL (1U << 4) +#define CPUID_EXT_VMX (1U << 5) +#define CPUID_EXT_SMX (1U << 6) +#define CPUID_EXT_EST (1U << 7) +#define CPUID_EXT_TM2 (1U << 8) +#define CPUID_EXT_SSSE3 (1U << 9) +#define CPUID_EXT_CID (1U << 10) +#define CPUID_EXT_FMA (1U << 12) +#define CPUID_EXT_CX16 (1U << 13) +#define CPUID_EXT_XTPR (1U << 14) +#define CPUID_EXT_PDCM (1U << 15) +#define CPUID_EXT_PCID (1U << 17) +#define CPUID_EXT_DCA (1U << 18) +#define CPUID_EXT_SSE41 (1U << 19) +#define CPUID_EXT_SSE42 (1U << 20) +#define CPUID_EXT_X2APIC (1U << 21) +#define CPUID_EXT_MOVBE (1U << 22) +#define CPUID_EXT_POPCNT (1U << 23) +#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) +#define CPUID_EXT_AES (1U << 25) +#define CPUID_EXT_XSAVE (1U << 26) +#define CPUID_EXT_OSXSAVE (1U << 27) +#define CPUID_EXT_AVX (1U << 28) +#define CPUID_EXT_F16C (1U << 29) +#define CPUID_EXT_RDRAND (1U << 30) +#define CPUID_EXT_HYPERVISOR (1U << 31) + +#define CPUID_EXT2_FPU (1U << 0) +#define CPUID_EXT2_VME (1U << 1) +#define CPUID_EXT2_DE (1U << 2) +#define CPUID_EXT2_PSE (1U << 3) +#define CPUID_EXT2_TSC (1U << 4) +#define CPUID_EXT2_MSR (1U << 5) +#define CPUID_EXT2_PAE (1U << 6) +#define CPUID_EXT2_MCE (1U << 7) +#define CPUID_EXT2_CX8 (1U << 8) +#define CPUID_EXT2_APIC (1U << 9) +#define CPUID_EXT2_SYSCALL (1U << 11) +#define CPUID_EXT2_MTRR (1U << 12) +#define CPUID_EXT2_PGE (1U << 13) +#define CPUID_EXT2_MCA (1U << 14) +#define CPUID_EXT2_CMOV (1U << 15) +#define CPUID_EXT2_PAT (1U << 16) +#define CPUID_EXT2_PSE36 (1U << 17) +#define CPUID_EXT2_MP (1U << 19) +#define CPUID_EXT2_NX (1U << 20) +#define CPUID_EXT2_MMXEXT (1U << 22) +#define CPUID_EXT2_MMX (1U << 23) +#define CPUID_EXT2_FXSR (1U << 24) +#define CPUID_EXT2_FFXSR (1U << 25) +#define CPUID_EXT2_PDPE1GB (1U << 26) +#define CPUID_EXT2_RDTSCP (1U << 27) +#define CPUID_EXT2_LM (1U << 29) +#define CPUID_EXT2_3DNOWEXT (1U << 30) +#define CPUID_EXT2_3DNOW (1U << 31) + +/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ +#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ + CPUID_EXT2_DE | CPUID_EXT2_PSE | \ + CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ + CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ + CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ + CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ + CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ + CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ + CPUID_EXT2_MMX | CPUID_EXT2_FXSR) + +#define CPUID_EXT3_LAHF_LM (1U << 0) +#define CPUID_EXT3_CMP_LEG (1U << 1) +#define CPUID_EXT3_SVM (1U << 2) +#define CPUID_EXT3_EXTAPIC (1U << 3) +#define CPUID_EXT3_CR8LEG (1U << 4) +#define CPUID_EXT3_ABM (1U << 5) +#define CPUID_EXT3_SSE4A (1U << 6) +#define CPUID_EXT3_MISALIGNSSE (1U << 7) +#define CPUID_EXT3_3DNOWPREFETCH (1U << 8) +#define CPUID_EXT3_OSVW (1U << 9) +#define CPUID_EXT3_IBS (1U << 10) +#define CPUID_EXT3_XOP (1U << 11) +#define CPUID_EXT3_SKINIT (1U << 12) +#define CPUID_EXT3_WDT (1U << 13) +#define CPUID_EXT3_LWP (1U << 15) +#define CPUID_EXT3_FMA4 (1U << 16) +#define CPUID_EXT3_TCE (1U << 17) +#define CPUID_EXT3_NODEID (1U << 19) +#define CPUID_EXT3_TBM (1U << 21) +#define CPUID_EXT3_TOPOEXT (1U << 22) +#define CPUID_EXT3_PERFCORE (1U << 23) +#define CPUID_EXT3_PERFNB (1U << 24) + +#define CPUID_SVM_NPT (1U << 0) +#define CPUID_SVM_LBRV (1U << 1) +#define CPUID_SVM_SVMLOCK (1U << 2) +#define CPUID_SVM_NRIPSAVE (1U << 3) +#define CPUID_SVM_TSCSCALE (1U << 4) +#define CPUID_SVM_VMCBCLEAN (1U << 5) +#define CPUID_SVM_FLUSHASID (1U << 6) +#define CPUID_SVM_DECODEASSIST (1U << 7) +#define CPUID_SVM_PAUSEFILTER (1U << 10) +#define CPUID_SVM_PFTHRESHOLD (1U << 12) + +/* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ +#define CPUID_7_0_EBX_FSGSBASE (1U << 0) +/* 1st Group of Advanced Bit Manipulation Extensions */ +#define CPUID_7_0_EBX_BMI1 (1U << 3) +/* Hardware Lock Elision */ +#define CPUID_7_0_EBX_HLE (1U << 4) +/* Intel Advanced Vector Extensions 2 */ +#define CPUID_7_0_EBX_AVX2 (1U << 5) +/* Supervisor-mode Execution Prevention */ +#define CPUID_7_0_EBX_SMEP (1U << 7) +/* 2nd Group of Advanced Bit Manipulation Extensions */ +#define CPUID_7_0_EBX_BMI2 (1U << 8) +/* Enhanced REP MOVSB/STOSB */ +#define CPUID_7_0_EBX_ERMS (1U << 9) +/* Invalidate Process-Context Identifier */ +#define CPUID_7_0_EBX_INVPCID (1U << 10) +/* Restricted Transactional Memory */ +#define CPUID_7_0_EBX_RTM (1U << 11) +/* Memory Protection Extension */ +#define CPUID_7_0_EBX_MPX (1U << 14) +/* AVX-512 Foundation */ +#define CPUID_7_0_EBX_AVX512F (1U << 16) +/* AVX-512 Doubleword & Quadword Instruction */ +#define CPUID_7_0_EBX_AVX512DQ (1U << 17) +/* Read Random SEED */ +#define CPUID_7_0_EBX_RDSEED (1U << 18) +/* ADCX and ADOX instructions */ +#define CPUID_7_0_EBX_ADX (1U << 19) +/* Supervisor Mode Access Prevention */ +#define CPUID_7_0_EBX_SMAP (1U << 20) +/* AVX-512 Integer Fused Multiply Add */ +#define CPUID_7_0_EBX_AVX512IFMA (1U << 21) +/* Persistent Commit */ +#define CPUID_7_0_EBX_PCOMMIT (1U << 22) +/* Flush a Cache Line Optimized */ +#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) +/* Cache Line Write Back */ +#define CPUID_7_0_EBX_CLWB (1U << 24) +/* Intel Processor Trace */ +#define CPUID_7_0_EBX_INTEL_PT (1U << 25) +/* AVX-512 Prefetch */ +#define CPUID_7_0_EBX_AVX512PF (1U << 26) +/* AVX-512 Exponential and Reciprocal */ +#define CPUID_7_0_EBX_AVX512ER (1U << 27) +/* AVX-512 Conflict Detection */ +#define CPUID_7_0_EBX_AVX512CD (1U << 28) +/* SHA1/SHA256 Instruction Extensions */ +#define CPUID_7_0_EBX_SHA_NI (1U << 29) +/* AVX-512 Byte and Word Instructions */ +#define CPUID_7_0_EBX_AVX512BW (1U << 30) +/* AVX-512 Vector Length Extensions */ +#define CPUID_7_0_EBX_AVX512VL (1U << 31) + +/* AVX-512 Vector Byte Manipulation Instruction */ +#define CPUID_7_0_ECX_AVX512_VBMI (1U << 1) +/* User-Mode Instruction Prevention */ +#define CPUID_7_0_ECX_UMIP (1U << 2) +/* Protection Keys for User-mode Pages */ +#define CPUID_7_0_ECX_PKU (1U << 3) +/* OS Enable Protection Keys */ +#define CPUID_7_0_ECX_OSPKE (1U << 4) +/* UMONITOR/UMWAIT/TPAUSE Instructions */ +#define CPUID_7_0_ECX_WAITPKG (1U << 5) +/* Additional AVX-512 Vector Byte Manipulation Instruction */ +#define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6) +/* Galois Field New Instructions */ +#define CPUID_7_0_ECX_GFNI (1U << 8) +/* Vector AES Instructions */ +#define CPUID_7_0_ECX_VAES (1U << 9) +/* Carry-Less Multiplication Quadword */ +#define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) +/* Vector Neural Network Instructions */ +#define CPUID_7_0_ECX_AVX512VNNI (1U << 11) +/* Support for VPOPCNT[B,W] and VPSHUFBITQMB */ +#define CPUID_7_0_ECX_AVX512BITALG (1U << 12) +/* POPCNT for vectors of DW/QW */ +#define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) +/* 5-level Page Tables */ +#define CPUID_7_0_ECX_LA57 (1U << 16) +/* Read Processor ID */ +#define CPUID_7_0_ECX_RDPID (1U << 22) +/* Cache Line Demote Instruction */ +#define CPUID_7_0_ECX_CLDEMOTE (1U << 25) +/* Move Doubleword as Direct Store Instruction */ +#define CPUID_7_0_ECX_MOVDIRI (1U << 27) +/* Move 64 Bytes as Direct Store Instruction */ +#define CPUID_7_0_ECX_MOVDIR64B (1U << 28) + +/* AVX512 Neural Network Instructions */ +#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) +/* AVX512 Multiply Accumulation Single Precision */ +#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) +/* Speculation Control */ +#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) +/* Single Thread Indirect Branch Predictors */ +#define CPUID_7_0_EDX_STIBP (1U << 27) +/* Arch Capabilities */ +#define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) +/* Core Capability */ +#define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) +/* Speculative Store Bypass Disable */ +#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) + +/* AVX512 BFloat16 Instruction */ +#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) + +/* CLZERO instruction */ +#define CPUID_8000_0008_EBX_CLZERO (1U << 0) +/* Always save/restore FP error pointers */ +#define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2) +/* Write back and do not invalidate cache */ +#define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) +/* Indirect Branch Prediction Barrier */ +#define CPUID_8000_0008_EBX_IBPB (1U << 12) +/* Single Thread Indirect Branch Predictors */ +#define CPUID_8000_0008_EBX_STIBP (1U << 15) + +#define CPUID_XSAVE_XSAVEOPT (1U << 0) +#define CPUID_XSAVE_XSAVEC (1U << 1) +#define CPUID_XSAVE_XGETBV1 (1U << 2) +#define CPUID_XSAVE_XSAVES (1U << 3) + +#define CPUID_6_EAX_ARAT (1U << 2) + +/* CPUID[0x80000007].EDX flags: */ +#define CPUID_APM_INVTSC (1U << 8) + +#define CPUID_VENDOR_SZ 12 + +#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ +#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ +#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ +#define CPUID_VENDOR_INTEL "GenuineIntel" + +#define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ +#define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ +#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ +#define CPUID_VENDOR_AMD "AuthenticAMD" + +#define CPUID_VENDOR_VIA "CentaurHauls" + +#define CPUID_VENDOR_HYGON "HygonGenuine" + +#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ + (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ + (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) +#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ + (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ + (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) + +#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ +#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ + +/* CPUID[0xB].ECX level types */ +#define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8) +#define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8) +#define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8) +#define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8) + +/* MSR Feature Bits */ +#define MSR_ARCH_CAP_RDCL_NO (1U << 0) +#define MSR_ARCH_CAP_IBRS_ALL (1U << 1) +#define MSR_ARCH_CAP_RSBA (1U << 2) +#define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) +#define MSR_ARCH_CAP_SSB_NO (1U << 4) +#define MSR_ARCH_CAP_MDS_NO (1U << 5) +#define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) +#define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) +#define MSR_ARCH_CAP_TAA_NO (1U << 8) + +#define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) + +/* VMX MSR features */ +#define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull +#define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32) +#define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32) +#define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49) +#define MSR_VMX_BASIC_INS_OUTS (1ULL << 54) +#define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55) + +#define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full +#define MSR_VMX_MISC_STORE_LMA (1ULL << 5) +#define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6) +#define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7) +#define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8) +#define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull +#define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29) +#define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30) + +#define MSR_VMX_EPT_EXECONLY (1ULL << 0) +#define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6) +#define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7) +#define MSR_VMX_EPT_UC (1ULL << 8) +#define MSR_VMX_EPT_WB (1ULL << 14) +#define MSR_VMX_EPT_2MB (1ULL << 16) +#define MSR_VMX_EPT_1GB (1ULL << 17) +#define MSR_VMX_EPT_INVEPT (1ULL << 20) +#define MSR_VMX_EPT_AD_BITS (1ULL << 21) +#define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22) +#define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25) +#define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26) +#define MSR_VMX_EPT_INVVPID (1ULL << 32) +#define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40) +#define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41) +#define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42) +#define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43) + +#define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0) + + +/* VMX controls */ +#define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 +#define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008 +#define VMX_CPU_BASED_HLT_EXITING 0x00000080 +#define VMX_CPU_BASED_INVLPG_EXITING 0x00000200 +#define VMX_CPU_BASED_MWAIT_EXITING 0x00000400 +#define VMX_CPU_BASED_RDPMC_EXITING 0x00000800 +#define VMX_CPU_BASED_RDTSC_EXITING 0x00001000 +#define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000 +#define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000 +#define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000 +#define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000 +#define VMX_CPU_BASED_TPR_SHADOW 0x00200000 +#define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 +#define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000 +#define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000 +#define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000 +#define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 +#define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000 +#define VMX_CPU_BASED_MONITOR_EXITING 0x20000000 +#define VMX_CPU_BASED_PAUSE_EXITING 0x40000000 +#define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 + +#define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 +#define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002 +#define VMX_SECONDARY_EXEC_DESC 0x00000004 +#define VMX_SECONDARY_EXEC_RDTSCP 0x00000008 +#define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 +#define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020 +#define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040 +#define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 +#define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 +#define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 +#define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 +#define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800 +#define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 +#define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 +#define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000 +#define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000 +#define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000 +#define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000 +#define VMX_SECONDARY_EXEC_XSAVES 0x00100000 + +#define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001 +#define VMX_PIN_BASED_NMI_EXITING 0x00000008 +#define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020 +#define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 +#define VMX_PIN_BASED_POSTED_INTR 0x00000080 + +#define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 +#define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 +#define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 +#define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 +#define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000 +#define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000 +#define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000 +#define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000 +#define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 +#define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000 +#define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 +#define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 + +#define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 +#define VMX_VM_ENTRY_IA32E_MODE 0x00000200 +#define VMX_VM_ENTRY_SMM 0x00000400 +#define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 +#define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 +#define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000 +#define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000 +#define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000 +#define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000 +#define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 + +/* Supported Hyper-V Enlightenments */ +#define HYPERV_FEAT_RELAXED 0 +#define HYPERV_FEAT_VAPIC 1 +#define HYPERV_FEAT_TIME 2 +#define HYPERV_FEAT_CRASH 3 +#define HYPERV_FEAT_RESET 4 +#define HYPERV_FEAT_VPINDEX 5 +#define HYPERV_FEAT_RUNTIME 6 +#define HYPERV_FEAT_SYNIC 7 +#define HYPERV_FEAT_STIMER 8 +#define HYPERV_FEAT_FREQUENCIES 9 +#define HYPERV_FEAT_REENLIGHTENMENT 10 +#define HYPERV_FEAT_TLBFLUSH 11 +#define HYPERV_FEAT_EVMCS 12 +#define HYPERV_FEAT_IPI 13 +#define HYPERV_FEAT_STIMER_DIRECT 14 + +#ifndef HYPERV_SPINLOCK_NEVER_RETRY +#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF +#endif + +#define EXCP00_DIVZ 0 +#define EXCP01_DB 1 +#define EXCP02_NMI 2 +#define EXCP03_INT3 3 +#define EXCP04_INTO 4 +#define EXCP05_BOUND 5 +#define EXCP06_ILLOP 6 +#define EXCP07_PREX 7 +#define EXCP08_DBLE 8 +#define EXCP09_XERR 9 +#define EXCP0A_TSS 10 +#define EXCP0B_NOSEG 11 +#define EXCP0C_STACK 12 +#define EXCP0D_GPF 13 +#define EXCP0E_PAGE 14 +#define EXCP10_COPR 16 +#define EXCP11_ALGN 17 +#define EXCP12_MCHK 18 + +#define EXCP_VMEXIT 0x100 /* only for system emulation */ +#define EXCP_SYSCALL 0x101 /* only for user emulation */ +#define EXCP_VSYSCALL 0x102 /* only for user emulation */ + +/* i386-specific interrupt pending bits. */ +#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 +#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 +#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 +#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 +#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 + +/* Use a clearer name for this. */ +#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET + +/* Instead of computing the condition codes after each x86 instruction, + * QEMU just stores one operand (called CC_SRC), the result + * (called CC_DST) and the type of operation (called CC_OP). When the + * condition codes are needed, the condition codes can be calculated + * using this information. Condition codes are not generated if they + * are only needed for conditional branches. + */ +typedef enum { + CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ + CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ + + CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ + CC_OP_MULW, + CC_OP_MULL, + CC_OP_MULQ, + + CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_ADDW, + CC_OP_ADDL, + CC_OP_ADDQ, + + CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_ADCW, + CC_OP_ADCL, + CC_OP_ADCQ, + + CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_SUBW, + CC_OP_SUBL, + CC_OP_SUBQ, + + CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_SBBW, + CC_OP_SBBL, + CC_OP_SBBQ, + + CC_OP_LOGICB, /* modify all flags, CC_DST = res */ + CC_OP_LOGICW, + CC_OP_LOGICL, + CC_OP_LOGICQ, + + CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + CC_OP_INCW, + CC_OP_INCL, + CC_OP_INCQ, + + CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + CC_OP_DECW, + CC_OP_DECL, + CC_OP_DECQ, + + CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ + CC_OP_SHLW, + CC_OP_SHLL, + CC_OP_SHLQ, + + CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ + CC_OP_SARW, + CC_OP_SARL, + CC_OP_SARQ, + + CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ + CC_OP_BMILGW, + CC_OP_BMILGL, + CC_OP_BMILGQ, + + CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ + CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ + CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ + + CC_OP_CLR, /* Z set, all other flags clear. */ + CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */ + + CC_OP_NB, +} CCOp; + +typedef struct SegmentCache { + uint32_t selector; + target_ulong base; + uint32_t limit; + uint32_t flags; +} SegmentCache; + +#define MMREG_UNION(n, bits) \ + union n { \ + uint8_t _b_##n[(bits)/8]; \ + uint16_t _w_##n[(bits)/16]; \ + uint32_t _l_##n[(bits)/32]; \ + uint64_t _q_##n[(bits)/64]; \ + float32 _s_##n[(bits)/32]; \ + float64 _d_##n[(bits)/64]; \ + } + +typedef union { + uint8_t _b[16]; + uint16_t _w[8]; + uint32_t _l[4]; + uint64_t _q[2]; + float32 _s[4]; + float64 _d[2]; +} XMMReg; + +typedef union { + uint8_t _b[32]; + uint16_t _w[16]; + uint32_t _l[8]; + uint64_t _q[4]; + float32 _s[8]; + float64 _d[4]; +} YMMReg; + +#if 0 +typedef union { + uint8_t _b[64]; + uint16_t _w[32]; + uint32_t _l[16]; + uint64_t _q[8]; + float32 _s[16]; + float64 _d[8]; +} ZMMReg; +#endif + +typedef MMREG_UNION(ZMMReg, 512) ZMMReg; +typedef MMREG_UNION(MMXReg, 64) MMXReg; + +typedef struct BNDReg { + uint64_t lb; + uint64_t ub; +} BNDReg; + +typedef struct BNDCSReg { + uint64_t cfgu; + uint64_t sts; +} BNDCSReg; + +#define BNDCFG_ENABLE 1ULL +#define BNDCFG_BNDPRESERVE 2ULL +#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK + +#ifdef HOST_WORDS_BIGENDIAN +#define ZMM_B(n) _b_ZMMReg[63 - (n)] +#define ZMM_W(n) _w_ZMMReg[31 - (n)] +#define ZMM_L(n) _l_ZMMReg[15 - (n)] +#define ZMM_S(n) _s_ZMMReg[15 - (n)] +#define ZMM_Q(n) _q_ZMMReg[7 - (n)] +#define ZMM_D(n) _d_ZMMReg[7 - (n)] + +#define MMX_B(n) _b_MMXReg[7 - (n)] +#define MMX_W(n) _w_MMXReg[3 - (n)] +#define MMX_L(n) _l_MMXReg[1 - (n)] +#define MMX_S(n) _s_MMXReg[1 - (n)] +#else +#define ZMM_B(n) _b_ZMMReg[n] +#define ZMM_W(n) _w_ZMMReg[n] +#define ZMM_L(n) _l_ZMMReg[n] +#define ZMM_S(n) _s_ZMMReg[n] +#define ZMM_Q(n) _q_ZMMReg[n] +#define ZMM_D(n) _d_ZMMReg[n] + +#define MMX_B(n) _b_MMXReg[n] +#define MMX_W(n) _w_MMXReg[n] +#define MMX_L(n) _l_MMXReg[n] +#define MMX_S(n) _s_MMXReg[n] +#endif +#define MMX_Q(n) _q_MMXReg[n] + +typedef union { + floatx80 QEMU_ALIGN(16, d); + MMXReg mmx; +} FPReg; + +typedef struct { + uint64_t base; + uint64_t mask; +} MTRRVar; + +#define CPU_NB_REGS64 16 +#define CPU_NB_REGS32 8 + +#ifdef TARGET_X86_64 +#define CPU_NB_REGS CPU_NB_REGS64 +#else +#define CPU_NB_REGS CPU_NB_REGS32 +#endif + +#define MAX_FIXED_COUNTERS 3 +#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) + +#define TARGET_INSN_START_EXTRA_WORDS 1 + +#define NB_OPMASK_REGS 8 + +/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish + * that APIC ID hasn't been set yet + */ +#define UNASSIGNED_APIC_ID 0xFFFFFFFF + +typedef union X86LegacyXSaveArea { + struct { + uint16_t fcw; + uint16_t fsw; + uint8_t ftw; + uint8_t reserved; + uint16_t fpop; + uint64_t fpip; + uint64_t fpdp; + uint32_t mxcsr; + uint32_t mxcsr_mask; + FPReg fpregs[8]; + uint8_t xmm_regs[16][16]; + }; + uint8_t data[512]; +} X86LegacyXSaveArea; + +typedef struct X86XSaveHeader { + uint64_t xstate_bv; + uint64_t xcomp_bv; + uint64_t reserve0; + uint8_t reserved[40]; +} X86XSaveHeader; + +/* Ext. save area 2: AVX State */ +typedef struct XSaveAVX { + uint8_t ymmh[16][16]; +} XSaveAVX; + +/* Ext. save area 3: BNDREG */ +typedef struct XSaveBNDREG { + BNDReg bnd_regs[4]; +} XSaveBNDREG; + +/* Ext. save area 4: BNDCSR */ +typedef union XSaveBNDCSR { + BNDCSReg bndcsr; + uint8_t data[64]; +} XSaveBNDCSR; + +/* Ext. save area 5: Opmask */ +typedef struct XSaveOpmask { + uint64_t opmask_regs[NB_OPMASK_REGS]; +} XSaveOpmask; + +/* Ext. save area 6: ZMM_Hi256 */ +typedef struct XSaveZMM_Hi256 { + uint8_t zmm_hi256[16][32]; +} XSaveZMM_Hi256; + +/* Ext. save area 7: Hi16_ZMM */ +typedef struct XSaveHi16_ZMM { + uint8_t hi16_zmm[16][64]; +} XSaveHi16_ZMM; + +/* Ext. save area 9: PKRU state */ +typedef struct XSavePKRU { + uint32_t pkru; + uint32_t padding; +} XSavePKRU; + +typedef struct X86XSaveArea { + X86LegacyXSaveArea legacy; + X86XSaveHeader header; + + /* Extended save areas: */ + + /* AVX State: */ + XSaveAVX avx_state; + uint8_t padding[960 - 576 - sizeof(XSaveAVX)]; + /* MPX State: */ + XSaveBNDREG bndreg_state; + XSaveBNDCSR bndcsr_state; + /* AVX-512 State: */ + XSaveOpmask opmask_state; + XSaveZMM_Hi256 zmm_hi256_state; + XSaveHi16_ZMM hi16_zmm_state; + /* PKRU State: */ + XSavePKRU pkru_state; +} X86XSaveArea; + +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240); +QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0); +QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400); +QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440); +QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480); +QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680); +QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); +QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80); +QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); + +typedef enum TPRAccess { + TPR_ACCESS_READ, + TPR_ACCESS_WRITE, +} TPRAccess; + +/* Cache information data structures: */ + +enum CacheType { + DATA_CACHE, + INSTRUCTION_CACHE, + UNIFIED_CACHE +}; + +typedef struct CPUCacheInfo { + enum CacheType type; + uint8_t level; + /* Size in bytes */ + uint32_t size; + /* Line size, in bytes */ + uint16_t line_size; + /* + * Associativity. + * Note: representation of fully-associative caches is not implemented + */ + uint8_t associativity; + /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ + uint8_t partitions; + /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ + uint32_t sets; + /* + * Lines per tag. + * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. + * (Is this synonym to @partitions?) + */ + uint8_t lines_per_tag; + + /* Self-initializing cache */ + bool self_init; + /* + * WBINVD/INVD is not guaranteed to act upon lower level caches of + * non-originating threads sharing this cache. + * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] + */ + bool no_invd_sharing; + /* + * Cache is inclusive of lower cache levels. + * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. + */ + bool inclusive; + /* + * A complex function is used to index the cache, potentially using all + * address bits. CPUID[4].EDX[bit 2]. + */ + bool complex_indexing; +} CPUCacheInfo; + + +typedef struct CPUCaches { + CPUCacheInfo *l1d_cache; + CPUCacheInfo *l1i_cache; + CPUCacheInfo *l2_cache; + CPUCacheInfo *l3_cache; +} CPUCaches; + +typedef struct CPUX86State { + /* standard registers */ + target_ulong regs[CPU_NB_REGS]; + target_ulong eip; + target_ulong eflags; /* eflags register. During CPU emulation, CC + flags and DF are set to zero because they are + stored elsewhere */ + + /* emulator internal eflags handling */ + target_ulong cc_dst; + target_ulong cc_src; + target_ulong cc_src2; + uint32_t cc_op; + int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ + uint32_t hflags; /* TB flags, see HF_xxx constants. These flags + are known at translation time. */ + uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ + + /* segments */ + SegmentCache segs[6]; /* selector values */ + SegmentCache ldt; + SegmentCache tr; + SegmentCache gdt; /* only base and limit are used */ + SegmentCache idt; /* only base and limit are used */ + + target_ulong cr[5]; /* NOTE: cr1 is unused */ + int32_t a20_mask; + + BNDReg bnd_regs[4]; + BNDCSReg bndcs_regs; + uint64_t msr_bndcfgs; + uint64_t efer; + + /* Beginning of state preserved by INIT (dummy marker). */ + int start_init_save; + + /* FPU state */ + unsigned int fpstt; /* top of stack index */ + uint16_t fpus; + uint16_t fpuc; + uint8_t fptags[8]; /* 0 = valid, 1 = empty */ + FPReg fpregs[8]; + /* KVM-only so far */ + uint16_t fpop; + uint16_t fpcs; + uint16_t fpds; + uint64_t fpip; + uint64_t fpdp; + + /* emulator internal variables */ + float_status fp_status; + floatx80 ft0; + + float_status mmx_status; /* for 3DNow! float ops */ + float_status sse_status; + uint32_t mxcsr; + ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; + ZMMReg xmm_t0; + MMXReg mmx_t0; + + XMMReg ymmh_regs[CPU_NB_REGS]; + + uint64_t opmask_regs[NB_OPMASK_REGS]; + YMMReg zmmh_regs[CPU_NB_REGS]; + ZMMReg hi16_zmm_regs[CPU_NB_REGS]; + + /* sysenter registers */ + uint32_t sysenter_cs; + target_ulong sysenter_esp; + target_ulong sysenter_eip; + uint64_t star; + + uint64_t vm_hsave; + +#ifdef TARGET_X86_64 + target_ulong lstar; + target_ulong cstar; + target_ulong fmask; + target_ulong kernelgsbase; +#endif + + uint64_t tsc; + uint64_t tsc_adjust; + uint64_t tsc_deadline; + uint64_t tsc_aux; + + uint64_t xcr0; + + uint64_t mcg_status; + uint64_t msr_ia32_misc_enable; + uint64_t msr_ia32_feature_control; + + uint64_t msr_fixed_ctr_ctrl; + uint64_t msr_global_ctrl; + uint64_t msr_global_status; + uint64_t msr_global_ovf_ctrl; + uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; + uint64_t msr_gp_counters[MAX_GP_COUNTERS]; + uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; + + uint64_t pat; + uint32_t smbase; + uint64_t msr_smi_count; + + uint32_t pkru; + uint32_t tsx_ctrl; + + uint64_t spec_ctrl; + uint64_t virt_ssbd; + + /* End of state preserved by INIT (dummy marker). */ + int end_init_save; + + uint64_t system_time_msr; + uint64_t wall_clock_msr; + uint64_t steal_time_msr; + uint64_t async_pf_en_msr; + uint64_t pv_eoi_en_msr; + uint64_t poll_control_msr; + + /* exception/interrupt handling */ + int error_code; + int exception_is_int; + target_ulong exception_next_eip; + target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ + union { + struct CPUBreakpoint *cpu_breakpoint[4]; + struct CPUWatchpoint *cpu_watchpoint[4]; + }; /* break/watchpoints for dr[0..3] */ + int old_exception; /* exception in flight */ + + uint64_t vm_vmcb; + uint64_t tsc_offset; + uint64_t intercept; + uint16_t intercept_cr_read; + uint16_t intercept_cr_write; + uint16_t intercept_dr_read; + uint16_t intercept_dr_write; + uint32_t intercept_exceptions; + uint64_t nested_cr3; + uint32_t nested_pg_mode; + uint8_t v_tpr; + + uintptr_t retaddr; + + /* Fields up to this point are cleared by a CPU reset */ + int end_reset_fields; + + /* Fields after this point are preserved across CPU reset. */ + + /* processor features (e.g. for CPUID insn) */ + /* Minimum cpuid leaf 7 value */ + uint32_t cpuid_level_func7; + /* Actual cpuid leaf 7 value */ + uint32_t cpuid_min_level_func7; + /* Minimum level/xlevel/xlevel2, based on CPU model + features */ + uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; + /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ + uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; + /* Actual level/xlevel/xlevel2 value: */ + uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; + uint32_t cpuid_vendor1; + uint32_t cpuid_vendor2; + uint32_t cpuid_vendor3; + uint32_t cpuid_version; + FeatureWordArray features; + /* Features that were explicitly enabled/disabled */ + FeatureWordArray user_features; + uint32_t cpuid_model[12]; + /* Cache information for CPUID. When legacy-cache=on, the cache data + * on each CPUID leaf will be different, because we keep compatibility + * with old QEMU versions. + */ + CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; + + /* MTRRs */ + uint64_t mtrr_fixed[11]; + uint64_t mtrr_deftype; + MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; + + /* For KVM */ + uint32_t mp_state; + int32_t exception_nr; + int32_t interrupt_injected; + uint8_t soft_interrupt; + uint8_t exception_pending; + uint8_t exception_injected; + uint8_t has_error_code; + uint8_t exception_has_payload; + uint64_t exception_payload; + uint32_t ins_len; + uint32_t sipi_vector; + bool tsc_valid; + int64_t tsc_khz; + int64_t user_tsc_khz; /* for sanity check only */ + + uint64_t mcg_cap; + uint64_t mcg_ctl; + uint64_t mcg_ext_ctl; + uint64_t mce_banks[MCE_BANKS_DEF*4]; + uint64_t xstate_bv; + + /* vmstate */ + uint16_t fpus_vmstate; + uint16_t fptag_vmstate; + uint16_t fpregs_format_vmstate; + + uint64_t xss; + uint32_t umwait; + + TPRAccess tpr_access_type; + + unsigned nr_dies; + unsigned nr_nodes; + unsigned pkg_offset; + + // Unicorn engine + struct uc_struct *uc; +} CPUX86State; + +/** + * X86CPU: + * @env: #CPUX86State + * @migratable: If set, only migratable flags will be accepted when "enforce" + * mode is used, and only migratable flags will be included in the "host" + * CPU model. + * + * An x86 CPU. + */ +struct X86CPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUX86State env; + + uint64_t ucode_rev; + + bool check_cpuid; + bool enforce_cpuid; + /* + * Force features to be enabled even if the host doesn't support them. + * This is dangerous and should be done only for testing CPUID + * compatibility. + */ + bool force_features; + bool expose_kvm; + bool expose_tcg; + bool migratable; + bool migrate_smi_count; + bool max_features; /* Enable all supported features automatically */ + uint32_t apic_id; + + /* Enables publishing of TSC increment and Local APIC bus frequencies to + * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ + bool vmware_cpuid_freq; + + /* if true the CPUID code directly forward host cache leaves to the guest */ + bool cache_info_passthrough; + + /* if true the CPUID code directly forwards + * host monitor/mwait leaves to the guest */ + struct { + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + } mwait; + + /* Features that were filtered out because of missing host capabilities */ + FeatureWordArray filtered_features; + + /* Enable PMU CPUID bits. This can't be enabled by default yet because + * it doesn't have ABI stability guarantees, as it passes all PMU CPUID + * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel + * capabilities) directly to the guest. + */ + bool enable_pmu; + + /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is + * disabled by default to avoid breaking migration between QEMU with + * different LMCE configurations. + */ + bool enable_lmce; + + /* Compatibility bits for old machine types. + * If true present virtual l3 cache for VM, the vcpus in the same virtual + * socket share an virtual l3 cache. + */ + bool enable_l3_cache; + + /* Compatibility bits for old machine types. + * If true present the old cache topology information + */ + bool legacy_cache; + + /* Compatibility bits for old machine types: */ + bool enable_cpuid_0xb; + + /* Enable auto level-increase for all CPUID leaves */ + bool full_cpuid_auto_level; + + /* Enable auto level-increase for Intel Processor Trace leave */ + bool intel_pt_auto_level; + + /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ + bool fill_mtrr_mask; + + /* if true override the phys_bits value with a value read from the host */ + bool host_phys_bits; + + /* if set, limit maximum value for phys_bits when host_phys_bits is true */ + uint8_t host_phys_bits_limit; + + /* Number of physical address bits supported */ + uint32_t phys_bits; + +#if 0 + /* in order to simplify APIC support, we leave this pointer to the + user */ + struct DeviceState *apic_state; + struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; +#endif + + int32_t node_id; /* NUMA node this CPU belongs to */ + int32_t socket_id; + int32_t die_id; + int32_t core_id; + int32_t thread_id; + + int32_t hv_max_vps; + + struct X86CPUClass cc; +}; + +#define X86_CPU(obj) ((X86CPU *)obj) +#define X86_CPU_CLASS(klass) ((X86CPUClass *)klass) +#define X86_CPU_GET_CLASS(obj) (&((X86CPU *)obj)->cc) + +/** + * x86_cpu_do_interrupt: + * @cpu: vCPU the interrupt is to be handled by. + */ +void x86_cpu_do_interrupt(CPUState *cpu); +bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); +int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); + +void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list); + +hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs); + +void x86_cpu_exec_enter(CPUState *cpu); +void x86_cpu_exec_exit(CPUState *cpu); + +int cpu_x86_support_mca_broadcast(CPUX86State *env); + +int cpu_get_pic_interrupt(CPUX86State *s); +/* MSDOS compatibility mode FPU exception support */ +void x86_register_ferr_irq(qemu_irq irq); +void cpu_set_ignne(CPUX86State *env); + +/* mpx_helper.c */ +void cpu_sync_bndcs_hflags(CPUX86State *env); + +/* this function must always be used to load data in the segment + cache: it synchronizes the hflags with the segment cache values */ +static inline void cpu_x86_load_seg_cache(CPUX86State *env, + int seg_reg, unsigned int selector, + target_ulong base, + unsigned int limit, + unsigned int flags) +{ + SegmentCache *sc; + unsigned int new_hflags; + + sc = &env->segs[seg_reg]; + sc->selector = selector; + sc->base = base; + sc->limit = limit; + sc->flags = flags; + + /* update the hidden flags */ + { + if (seg_reg == R_CS) { +#ifdef TARGET_X86_64 + if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { + /* long mode */ + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; + env->hflags &= ~(HF_ADDSEG_MASK); + } else +#endif + { + /* legacy / compatibility case */ + new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) + >> (DESC_B_SHIFT - HF_CS32_SHIFT); + env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | + new_hflags; + } + } + if (seg_reg == R_SS) { + int cpl = (flags >> DESC_DPL_SHIFT) & 3; +#if HF_CPL_MASK != 3 +#error HF_CPL_MASK is hardcoded +#endif + env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; + /* Possibly switch between BNDCFGS and BNDCFGU */ + cpu_sync_bndcs_hflags(env); + } + new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) + >> (DESC_B_SHIFT - HF_SS32_SHIFT); + if (env->hflags & HF_CS64_MASK) { + /* zero base assumed for DS, ES and SS in long mode */ + } else if (!(env->cr[0] & CR0_PE_MASK) || + (env->eflags & VM_MASK) || + !(env->hflags & HF_CS32_MASK)) { + /* XXX: try to avoid this test. The problem comes from the + fact that is real mode or vm86 mode we only modify the + 'base' and 'selector' fields of the segment cache to go + faster. A solution may be to force addseg to one in + translate-i386.c. */ + new_hflags |= HF_ADDSEG_MASK; + } else { + new_hflags |= ((env->segs[R_DS].base | + env->segs[R_ES].base | + env->segs[R_SS].base) != 0) << + HF_ADDSEG_SHIFT; + } + env->hflags = (env->hflags & + ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; + } +} + +static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, + uint8_t sipi_vector) +{ + CPUState *cs = CPU(cpu); + CPUX86State *env = &cpu->env; + + env->eip = 0; + cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, + sipi_vector << 12, + env->segs[R_CS].limit, + env->segs[R_CS].flags); + cs->halted = 0; +} + +int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, + target_ulong *base, unsigned int *limit, + unsigned int *flags); + +/* op_helper.c */ +/* used for debug or cpu save/restore */ + +/* cpu-exec.c */ +/* the following helpers are only usable in user mode simulation as + they can trigger unexpected exceptions */ +void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); +void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); +void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); +void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); +void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_x86_signal_handler(int host_signum, void *pinfo, + void *puc); + +/* cpu.c */ +void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx); +void cpu_clear_apic_feature(CPUX86State *env); +void host_cpuid(uint32_t function, uint32_t count, + uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); +void host_vendor_fms(char *vendor, int *family, int *model, int *stepping); + +/* helper.c */ +bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +void x86_cpu_set_a20(X86CPU *cpu, int a20_state); + +static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) +{ + return !!attrs.secure; +} + +static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) +{ + return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); +} + +uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); +uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); +uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); +uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); +void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); +void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); +void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); +void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); +void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); + +void breakpoint_handler(CPUState *cs); + +/* will be suppressed */ +void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); +void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); +void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); + +/* hw/pc.c */ +uint64_t cpu_get_tsc(CPUX86State *env); + +/* XXX: This value should match the one returned by CPUID + * and in exec.c */ +# if defined(TARGET_X86_64) +# define TCG_PHYS_ADDR_BITS 40 +# else +# define TCG_PHYS_ADDR_BITS 36 +# endif + +#define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS) + +#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU +#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) +#define CPU_RESOLVING_TYPE TYPE_X86_CPU + +#ifdef TARGET_X86_64 +#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") +#else +#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") +#endif + +#define cpu_signal_handler cpu_x86_signal_handler + +/* MMU modes definitions */ +#define MMU_KSMAP_IDX 0 +#define MMU_USER_IDX 1 +#define MMU_KNOSMAP_IDX 2 +static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) +{ + return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : + (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) + ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; +} + +static inline int cpu_mmu_index_kernel(CPUX86State *env) +{ + return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : + ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) + ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; +} + +#define CC_DST (env->cc_dst) +#define CC_SRC (env->cc_src) +#define CC_SRC2 (env->cc_src2) +#define CC_OP (env->cc_op) + +/* n must be a constant to be efficient */ +static inline target_long lshift(target_long x, int n) +{ + if (n >= 0) { + return x << n; + } else { + return x >> (-n); + } +} + +/* float macros */ +#define FT0 (env->ft0) +#define ST0 (env->fpregs[env->fpstt].d) +#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) +#define ST1 ST(1) + +/* translate.c */ +void tcg_x86_init(struct uc_struct *uc); + +typedef CPUX86State CPUArchState; +typedef X86CPU ArchCPU; + +#include "exec/cpu-all.h" +#include "svm.h" + +static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *cs_base = env->segs[R_CS].base; + *pc = *cs_base + env->eip; + *flags = env->hflags | + (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); +} + +void do_cpu_init(X86CPU *cpu); +void do_cpu_sipi(X86CPU *cpu); + +/* excp_helper.c */ +void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); +void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, + uintptr_t retaddr); +void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, + int error_code); +void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, + int error_code, uintptr_t retaddr); +void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, + int error_code, int next_eip_addend); + +/* cc_helper.c */ +extern const uint8_t parity_table[256]; +uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); + +static inline uint32_t cpu_compute_eflags(CPUX86State *env) +{ + return (env->eflags & ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)) | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); +} + +/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS + * after generating a call to a helper that uses this. + */ +static inline void cpu_load_eflags(CPUX86State *env, int eflags, + int update_mask) +{ + CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + CC_OP = CC_OP_EFLAGS; + env->df = 1 - (2 * ((eflags >> 10) & 1)); + env->eflags = (env->eflags & ~update_mask) | + (eflags & update_mask) | 0x2; +} + +/* load efer and update the corresponding hflags. XXX: do consistency + checks with cpuid bits? */ +static inline void cpu_load_efer(CPUX86State *env, uint64_t val) +{ + env->efer = val; + env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); + if (env->efer & MSR_EFER_LMA) { + env->hflags |= HF_LMA_MASK; + } + if (env->efer & MSR_EFER_SVME) { + env->hflags |= HF_SVME_MASK; + } +} + +static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) +{ + if ((env->hflags & HF_SMM_MASK) != 0) + return ((MemTxAttrs) { .secure = true }); + else + return ((MemTxAttrs) { .secure = false }); +} + +static inline int32_t x86_get_a20_mask(CPUX86State *env) +{ + if (env->hflags & HF_SMM_MASK) { + return -1; + } else { + return env->a20_mask; + } +} + +static inline bool cpu_has_vmx(CPUX86State *env) +{ + return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; +} + +/* + * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. + * Since it was set, CR4.VMXE must remain set as long as vCPU is in + * VMX operation. This is because CR4.VMXE is one of the bits set + * in MSR_IA32_VMX_CR4_FIXED1. + * + * There is one exception to above statement when vCPU enters SMM mode. + * When a vCPU enters SMM mode, it temporarily exit VMX operation and + * may also reset CR4.VMXE during execution in SMM mode. + * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation + * and CR4.VMXE is restored to it's original value of being set. + * + * Therefore, when vCPU is not in SMM mode, we can infer whether + * VMX is being used by examining CR4.VMXE. Otherwise, we cannot + * know for certain. + */ +static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) +{ + return cpu_has_vmx(env) && + ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); +} + +/* fpu_helper.c */ +void update_fp_status(CPUX86State *env); +void update_mxcsr_status(CPUX86State *env); + +static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) +{ + env->mxcsr = mxcsr; + update_mxcsr_status(env); +} + +static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) +{ + env->fpuc = fpuc; + update_fp_status(env); +} + +/* mem_helper.c */ +void helper_lock_init(void); + +/* svm_helper.c */ +void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, + uint64_t param, uintptr_t retaddr); +void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, + uint64_t exit_info_1, uintptr_t retaddr); +void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1); + +/* seg_helper.c */ +void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); + +/* smm_helper.c */ +void do_smm_enter(X86CPU *cpu); + +/* Special values for X86CPUVersion: */ + +/* Resolve to latest CPU version */ +#define CPU_VERSION_LATEST -1 + +/* + * Resolve to version defined by current machine type. + * See x86_cpu_set_default_version() + */ +#define CPU_VERSION_AUTO -2 + +/* Don't resolve to any versioned CPU models, like old QEMU versions */ +#define CPU_VERSION_LEGACY 0 + +typedef int X86CPUVersion; + +/* + * Set default CPU model version for CPU models having + * version == CPU_VERSION_AUTO. + */ +void x86_cpu_set_default_version(X86CPUVersion version); + +/* Return name of 32-bit register, from a R_* constant */ +const char *get_register_name_32(unsigned int reg); + +#define APIC_DEFAULT_ADDRESS 0xfee00000 +#define APIC_SPACE_SIZE 0x100000 + +/* cpu.c */ +bool cpu_is_bsp(X86CPU *cpu); + +void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf); +void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf); +void x86_update_hflags(CPUX86State* env); + +int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel); +X86CPU *cpu_x86_init(struct uc_struct *uc, const char *cpu_model); + +#endif /* I386_CPU_H */ diff --git a/qemu/target/i386/excp_helper.c b/qemu/target/i386/excp_helper.c new file mode 100644 index 00000000..f658c819 --- /dev/null +++ b/qemu/target/i386/excp_helper.c @@ -0,0 +1,695 @@ +/* + * x86 exception helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "qemu/log.h" +#include "exec/helper-proto.h" +#include "sysemu/sysemu.h" + +#include "uc_priv.h" + +void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) +{ + raise_interrupt(env, intno, 1, 0, next_eip_addend); +} + +void helper_raise_exception(CPUX86State *env, int exception_index) +{ + raise_exception(env, exception_index); +} + +/* + * Check nested exceptions and change to double or triple fault if + * needed. It should only be called, if this is not an interrupt. + * Returns the new exception number. + */ +static int check_exception(CPUX86State *env, int intno, int *error_code, + uintptr_t retaddr) +{ + int first_contributory = env->old_exception == 0 || + (env->old_exception >= 10 && + env->old_exception <= 13); + int second_contributory = intno == 0 || + (intno >= 10 && intno <= 13); + + qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", + env->old_exception, intno); + + if (env->old_exception == EXCP08_DBLE) { + if (env->hflags & HF_GUEST_MASK) { + cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */ + } + + qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); + + qemu_system_reset_request(env->uc); + return EXCP_HLT; + } + + if ((first_contributory && second_contributory) + || (env->old_exception == EXCP0E_PAGE && + (second_contributory || (intno == EXCP0E_PAGE)))) { + intno = EXCP08_DBLE; + *error_code = 0; + } + + if (second_contributory || (intno == EXCP0E_PAGE) || + (intno == EXCP08_DBLE)) { + env->old_exception = intno; + } + + return intno; +} + +/* + * Signal an interruption. It is executed in the main CPU loop. + * is_int is TRUE if coming from the int instruction. next_eip is the + * env->eip value AFTER the interrupt instruction. It is only relevant if + * is_int is TRUE. + */ +static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, + int is_int, int error_code, + int next_eip_addend, + uintptr_t retaddr) +{ + CPUState *cs = env_cpu(env); + + if (!is_int) { + cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, + error_code, retaddr); + intno = check_exception(env, intno, &error_code, retaddr); + } else { + cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr); + } + + cs->exception_index = intno; + env->error_code = error_code; + env->exception_is_int = is_int; + env->exception_next_eip = env->eip + next_eip_addend; + cpu_loop_exit_restore(cs, retaddr); +} + +/* shortcuts to generate exceptions */ + +void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, + int error_code, int next_eip_addend) +{ + raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); +} + +void raise_exception_err(CPUX86State *env, int exception_index, + int error_code) +{ + raise_interrupt2(env, exception_index, 0, error_code, 0, 0); +} + +void raise_exception_err_ra(CPUX86State *env, int exception_index, + int error_code, uintptr_t retaddr) +{ + raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr); +} + +void raise_exception(CPUX86State *env, int exception_index) +{ + raise_interrupt2(env, exception_index, 0, 0, 0, 0); +} + +void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr) +{ + raise_interrupt2(env, exception_index, 0, 0, 0, retaddr); +} + +static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, + int *prot) +{ + CPUX86State *env = &X86_CPU(cs)->env; + uint64_t rsvd_mask = PG_HI_RSVD_MASK; + uint64_t ptep, pte; + uint64_t exit_info_1 = 0; + target_ulong pde_addr, pte_addr; + uint32_t page_offset; + int page_size; + + if (likely(!(env->hflags2 & HF2_NPT_MASK))) { + return gphys; + } + + if (!(env->nested_pg_mode & SVM_NPT_NXE)) { + rsvd_mask |= PG_NX_MASK; + } + + if (env->nested_pg_mode & SVM_NPT_PAE) { + uint64_t pde, pdpe; + target_ulong pdpe_addr; + +#ifdef TARGET_X86_64 + if (env->nested_pg_mode & SVM_NPT_LMA) { + uint64_t pml5e; + uint64_t pml4e_addr, pml4e; + + pml5e = env->nested_cr3; + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + + pml4e_addr = (pml5e & PG_ADDRESS_MASK) + + (((gphys >> 39) & 0x1ff) << 3); + pml4e = x86_ldq_phys(cs, pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pml4e & (rsvd_mask | PG_PSE_MASK)) { + goto do_fault_rsvd; + } + if (!(pml4e & PG_ACCESSED_MASK)) { + pml4e |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); + } + ptep &= pml4e ^ PG_NX_MASK; + pdpe_addr = (pml4e & PG_ADDRESS_MASK) + + (((gphys >> 30) & 0x1ff) << 3); + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pdpe & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pdpe ^ PG_NX_MASK; + if (!(pdpe & PG_ACCESSED_MASK)) { + pdpe |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); + } + if (pdpe & PG_PSE_MASK) { + /* 1 GB page */ + page_size = 1024 * 1024 * 1024; + pte_addr = pdpe_addr; + pte = pdpe; + goto do_check_protect; + } + } else +#endif + { + pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18); + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + rsvd_mask |= PG_HI_USER_MASK; + if (pdpe & (rsvd_mask | PG_NX_MASK)) { + goto do_fault_rsvd; + } + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + } + + pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3); + pde = x86_ldq_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pde & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pde ^ PG_NX_MASK; + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + page_size = 2048 * 1024; + pte_addr = pde_addr; + pte = pde; + goto do_check_protect; + } + /* 4 KB page */ + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pde_addr, pde); + } + pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3); + pte = x86_ldq_phys(cs, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + /* combine pde and pte nx, user and rw protections */ + ptep &= pte ^ PG_NX_MASK; + page_size = 4096; + } else { + uint32_t pde; + + /* page directory entry */ + pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc); + pde = x86_ldl_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + ptep = pde | PG_NX_MASK; + + /* if PSE bit is set, then we use a 4MB page */ + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + page_size = 4096 * 1024; + pte_addr = pde_addr; + + /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. + * Leave bits 20-13 in place for setting accessed/dirty bits below. + */ + pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); + rsvd_mask = 0x200000; + goto do_check_protect_pse36; + } + + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pde_addr, pde); + } + + /* page directory entry */ + pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc); + pte = x86_ldl_phys(cs, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + /* combine pde and pte user and rw protections */ + ptep &= pte | PG_NX_MASK; + page_size = 4096; + rsvd_mask = 0; + } + + do_check_protect: + rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; + do_check_protect_pse36: + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + ptep ^= PG_NX_MASK; + + if (!(ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + if (ptep & PG_NX_MASK) { + if (access_type == MMU_INST_FETCH) { + goto do_fault_protect; + } + *prot &= ~PAGE_EXEC; + } + if (!(ptep & PG_RW_MASK)) { + if (access_type == MMU_DATA_STORE) { + goto do_fault_protect; + } + *prot &= ~PAGE_WRITE; + } + + pte &= PG_ADDRESS_MASK & ~(page_size - 1); + page_offset = gphys & (page_size - 1); + return pte + page_offset; + + do_fault_rsvd: + exit_info_1 |= SVM_NPTEXIT_RSVD; + do_fault_protect: + exit_info_1 |= SVM_NPTEXIT_P; + do_fault: + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + gphys); + exit_info_1 |= SVM_NPTEXIT_US; + if (access_type == MMU_DATA_STORE) { + exit_info_1 |= SVM_NPTEXIT_RW; + } else if (access_type == MMU_INST_FETCH) { + exit_info_1 |= SVM_NPTEXIT_ID; + } + if (prot) { + exit_info_1 |= SVM_NPTEXIT_GPA; + } else { /* page table access */ + exit_info_1 |= SVM_NPTEXIT_GPT; + } + cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr); +} + +/* return value: + * -1 = cannot handle fault + * 0 = nothing more to do + * 1 = generate PF fault + */ +static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, + int is_write1, int mmu_idx) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint64_t ptep, pte; + int32_t a20_mask; + target_ulong pde_addr, pte_addr; + int error_code = 0; + int is_dirty, prot, page_size, is_write, is_user; + hwaddr paddr; + uint64_t rsvd_mask = PG_HI_RSVD_MASK; + uint32_t page_offset; + target_ulong vaddr; + + is_user = mmu_idx == MMU_USER_IDX; +#if defined(DEBUG_MMU) + printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", + addr, is_write1, is_user, env->eip); +#endif + is_write = is_write1 & 1; + + a20_mask = x86_get_a20_mask(env); + if (!(env->cr[0] & CR0_PG_MASK)) { + pte = addr; +#ifdef TARGET_X86_64 + if (!(env->hflags & HF_LMA_MASK)) { + /* Without long mode we can only address 32bits in real mode */ + pte = (uint32_t)pte; + } +#endif + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + page_size = 4096; + goto do_mapping; + } + + if (!(env->efer & MSR_EFER_NXE)) { + rsvd_mask |= PG_NX_MASK; + } + + if (env->cr[4] & CR4_PAE_MASK) { + uint64_t pde, pdpe; + target_ulong pdpe_addr; + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + bool la57 = env->cr[4] & CR4_LA57_MASK; + uint64_t pml5e_addr, pml5e; + uint64_t pml4e_addr, pml4e; + int32_t sext; + + /* test virtual address sign extension */ + sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; + if (sext != 0 && sext != -1) { + env->error_code = 0; + cs->exception_index = EXCP0D_GPF; + return 1; + } + + if (la57) { + pml5e_addr = ((env->cr[3] & ~0xfff) + + (((addr >> 48) & 0x1ff) << 3)) & a20_mask; + pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL); + pml5e = x86_ldq_phys(cs, pml5e_addr); + if (!(pml5e & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pml5e & (rsvd_mask | PG_PSE_MASK)) { + goto do_fault_rsvd; + } + if (!(pml5e & PG_ACCESSED_MASK)) { + pml5e |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); + } + ptep = pml5e ^ PG_NX_MASK; + } else { + pml5e = env->cr[3]; + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + } + + pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + + (((addr >> 39) & 0x1ff) << 3)) & a20_mask; + pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false); + pml4e = x86_ldq_phys(cs, pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pml4e & (rsvd_mask | PG_PSE_MASK)) { + goto do_fault_rsvd; + } + if (!(pml4e & PG_ACCESSED_MASK)) { + pml4e |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); + } + ptep &= pml4e ^ PG_NX_MASK; + pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & + a20_mask; + pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL); + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pdpe & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pdpe ^ PG_NX_MASK; + if (!(pdpe & PG_ACCESSED_MASK)) { + pdpe |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); + } + if (pdpe & PG_PSE_MASK) { + /* 1 GB page */ + page_size = 1024 * 1024 * 1024; + pte_addr = pdpe_addr; + pte = pdpe; + goto do_check_protect; + } + } else +#endif + { + /* XXX: load them when cr3 is loaded ? */ + pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & + a20_mask; + pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false); + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + rsvd_mask |= PG_HI_USER_MASK; + if (pdpe & (rsvd_mask | PG_NX_MASK)) { + goto do_fault_rsvd; + } + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + } + + pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & + a20_mask; + pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); + pde = x86_ldq_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pde & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pde ^ PG_NX_MASK; + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + page_size = 2048 * 1024; + pte_addr = pde_addr; + pte = pde; + goto do_check_protect; + } + /* 4 KB page */ + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pde_addr, pde); + } + pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & + a20_mask; + pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); + pte = x86_ldq_phys(cs, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + /* combine pde and pte nx, user and rw protections */ + ptep &= pte ^ PG_NX_MASK; + page_size = 4096; + } else { + uint32_t pde; + + /* page directory entry */ + pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & + a20_mask; + pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); + pde = x86_ldl_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + ptep = pde | PG_NX_MASK; + + /* if PSE bit is set, then we use a 4MB page */ + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + page_size = 4096 * 1024; + pte_addr = pde_addr; + + /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. + * Leave bits 20-13 in place for setting accessed/dirty bits below. + */ + pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); + rsvd_mask = 0x200000; + goto do_check_protect_pse36; + } + + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + x86_stl_phys_notdirty(cs, pde_addr, pde); + } + + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & + a20_mask; + pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); + pte = x86_ldl_phys(cs, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + /* combine pde and pte user and rw protections */ + ptep &= pte | PG_NX_MASK; + page_size = 4096; + rsvd_mask = 0; + } + +do_check_protect: + rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; +do_check_protect_pse36: + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + ptep ^= PG_NX_MASK; + + /* can the page can be put in the TLB? prot will tell us */ + if (is_user && !(ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + + prot = 0; + if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { + prot |= PAGE_READ; + if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) { + prot |= PAGE_WRITE; + } + } + if (!(ptep & PG_NX_MASK) && + (mmu_idx == MMU_USER_IDX || + !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { + prot |= PAGE_EXEC; + } + if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) && + (ptep & PG_USER_MASK) && env->pkru) { + uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; + uint32_t pkru_ad = (env->pkru >> pk * 2) & 1; + uint32_t pkru_wd = (env->pkru >> pk * 2) & 2; + uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + if (pkru_ad) { + pkru_prot &= ~(PAGE_READ | PAGE_WRITE); + } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) { + pkru_prot &= ~PAGE_WRITE; + } + + prot &= pkru_prot; + if ((pkru_prot & (1 << is_write1)) == 0) { + assert(is_write1 != 2); + error_code |= PG_ERROR_PK_MASK; + goto do_fault_protect; + } + } + + if ((prot & (1 << is_write1)) == 0) { + goto do_fault_protect; + } + + /* yes, it can! */ + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) { + pte |= PG_DIRTY_MASK; + } + x86_stl_phys_notdirty(cs, pte_addr, pte); + } + + if (!(pte & PG_DIRTY_MASK)) { + /* only set write access if already dirty... otherwise wait + for dirty access */ + assert(!is_write); + prot &= ~PAGE_WRITE; + } + + do_mapping: + + pte = pte & a20_mask; + + /* align to page_size */ + pte &= PG_ADDRESS_MASK & ~(page_size - 1); + page_offset = addr & (page_size - 1); + paddr = get_hphys(cs, pte + page_offset, is_write1, &prot); + + /* Even if 4MB pages, we map only one 4KB page in the cache to + avoid filling it too fast */ + vaddr = addr & TARGET_PAGE_MASK; + paddr &= TARGET_PAGE_MASK; + assert(prot & (1 << is_write1)); + + // Unicorn: indentity map guest virtual address to host virtual address + vaddr = addr & TARGET_PAGE_MASK; + paddr = vaddr; + //printf(">>> map address %"PRIx64" to %"PRIx64"\n", vaddr, paddr); + + tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), + prot, mmu_idx, page_size); + return 0; + do_fault_rsvd: + error_code |= PG_ERROR_RSVD_MASK; + do_fault_protect: + error_code |= PG_ERROR_P_MASK; + do_fault: + error_code |= (is_write << PG_ERROR_W_BIT); + if (is_user) + error_code |= PG_ERROR_U_MASK; + if (is_write1 == 2 && + (((env->efer & MSR_EFER_NXE) && + (env->cr[4] & CR4_PAE_MASK)) || + (env->cr[4] & CR4_SMEP_MASK))) + error_code |= PG_ERROR_I_D_MASK; + if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { + /* cr2 is not modified in case of exceptions */ + x86_stq_phys(cs, + env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + addr); + } else { + env->cr[2] = addr; + } + env->error_code = error_code; + cs->exception_index = EXCP0E_PAGE; + return 1; +} + +bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->retaddr = retaddr; + if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) { + /* FIXME: On error in get_hphys we have already jumped out. */ + g_assert(!probe); + raise_exception_err_ra(env, cs->exception_index, + env->error_code, retaddr); + } + return true; +} diff --git a/qemu/target-i386/fpu_helper.c b/qemu/target/i386/fpu_helper.c similarity index 61% rename from qemu/target-i386/fpu_helper.c rename to qemu/target/i386/fpu_helper.c index d4791a2a..48a23c3b 100644 --- a/qemu/target-i386/fpu_helper.c +++ b/qemu/target/i386/fpu_helper.c @@ -17,12 +17,14 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include #include "cpu.h" #include "exec/helper-proto.h" -#include "qemu/aes.h" #include "qemu/host-utils.h" +#include "exec/exec-all.h" #include "exec/cpu_ldst.h" +#include "fpu/softfloat.h" #define FPU_RC_MASK 0xc00 #define FPU_RC_NEAR 0x000 @@ -56,6 +58,25 @@ #define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL) #define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL) +static void cpu_clear_ignne(CPUX86State *env) +{ + env->hflags2 &= ~HF2_IGNNE_MASK; +} + +void cpu_set_ignne(CPUX86State *env) +{ + env->hflags2 |= HF2_IGNNE_MASK; + /* + * We get here in response to a write to port F0h. The chipset should + * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is + * cleared, because FERR# and FP_IRQ are two separate pins on real + * hardware. However, we don't model FERR# as a qemu_irq, so we just + * do directly what the chipset would do, i.e. deassert FP_IRQ. + */ + // qemu_irq_lower(ferr_irq); +} + + static inline void fpush(CPUX86State *env) { env->fpstt = (env->fpstt - 1) & 7; @@ -68,22 +89,24 @@ static inline void fpop(CPUX86State *env) env->fpstt = (env->fpstt + 1) & 7; } -static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr) +static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr, + uintptr_t retaddr) { CPU_LDoubleU temp; - temp.l.lower = cpu_ldq_data(env, ptr); - temp.l.upper = cpu_lduw_data(env, ptr + 8); + temp.l.lower = cpu_ldq_data_ra(env, ptr, retaddr); + temp.l.upper = cpu_lduw_data_ra(env, ptr + 8, retaddr); return temp.d; } -static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr) +static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr, + uintptr_t retaddr) { CPU_LDoubleU temp; temp.d = f; - cpu_stq_data(env, ptr, temp.l.lower); - cpu_stw_data(env, ptr + 8, temp.l.upper); + cpu_stq_data_ra(env, ptr, temp.l.lower, retaddr); + cpu_stw_data_ra(env, ptr + 8, temp.l.upper, retaddr); } /* x87 FPU helpers */ @@ -126,16 +149,11 @@ static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b) return floatx80_div(a, b, &env->fp_status); } -static void fpu_raise_exception(CPUX86State *env) +static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr) { if (env->cr[0] & CR0_NE_MASK) { - raise_exception(env, EXCP10_COPR); + raise_exception_ra(env, EXCP10_COPR, retaddr); } -#if !defined(CONFIG_USER_ONLY) - else { - cpu_set_ferr(env); - } -#endif } void helper_flds_FT0(CPUX86State *env, uint32_t val) @@ -251,16 +269,34 @@ int32_t helper_fist_ST0(CPUX86State *env) int32_t helper_fistl_ST0(CPUX86State *env) { int32_t val; + signed char old_exp_flags; + + old_exp_flags = get_float_exception_flags(&env->fp_status); + set_float_exception_flags(0, &env->fp_status); val = floatx80_to_int32(ST0, &env->fp_status); + if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) { + val = 0x80000000; + } + set_float_exception_flags(get_float_exception_flags(&env->fp_status) + | old_exp_flags, &env->fp_status); return val; } int64_t helper_fistll_ST0(CPUX86State *env) { int64_t val; + signed char old_exp_flags; + + old_exp_flags = get_float_exception_flags(&env->fp_status); + set_float_exception_flags(0, &env->fp_status); val = floatx80_to_int64(ST0, &env->fp_status); + if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) { + val = 0x8000000000000000ULL; + } + set_float_exception_flags(get_float_exception_flags(&env->fp_status) + | old_exp_flags, &env->fp_status); return val; } @@ -277,18 +313,12 @@ int32_t helper_fistt_ST0(CPUX86State *env) int32_t helper_fisttl_ST0(CPUX86State *env) { - int32_t val; - - val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); - return val; + return floatx80_to_int32_round_to_zero(ST0, &env->fp_status); } int64_t helper_fisttll_ST0(CPUX86State *env) { - int64_t val; - - val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status); - return val; + return floatx80_to_int64_round_to_zero(ST0, &env->fp_status); } void helper_fldt_ST0(CPUX86State *env, target_ulong ptr) @@ -296,14 +326,14 @@ void helper_fldt_ST0(CPUX86State *env, target_ulong ptr) int new_fpstt; new_fpstt = (env->fpstt - 1) & 7; - env->fpregs[new_fpstt].d = helper_fldt(env, ptr); + env->fpregs[new_fpstt].d = helper_fldt(env, ptr, GETPC()); env->fpstt = new_fpstt; env->fptags[new_fpstt] = 0; /* validate stack entry */ } void helper_fstt_ST0(CPUX86State *env, target_ulong ptr) { - helper_fstt(env, ST0, ptr); + helper_fstt(env, ST0, ptr, GETPC()); } void helper_fpush(CPUX86State *env) @@ -540,7 +570,7 @@ void helper_fldz_FT0(CPUX86State *env) { //FT0 = floatx80_zero; floatx80 zero = { 0x0000000000000000LL, 0x0000 }; - FT0 = zero; + ST0 = zero; } uint32_t helper_fnstsw(CPUX86State *env) @@ -602,14 +632,18 @@ void helper_fclex(CPUX86State *env) void helper_fwait(CPUX86State *env) { if (env->fpus & FPUS_SE) { - fpu_raise_exception(env); + fpu_raise_exception(env, GETPC()); } } -void helper_fninit(CPUX86State *env) +static void do_fninit(CPUX86State *env) { env->fpus = 0; env->fpstt = 0; + env->fpcs = 0; + env->fpds = 0; + env->fpip = 0; + env->fpdp = 0; cpu_set_fpuc(env, 0x37f); env->fptags[0] = 1; env->fptags[1] = 1; @@ -621,6 +655,11 @@ void helper_fninit(CPUX86State *env) env->fptags[7] = 1; } +void helper_fninit(CPUX86State *env) +{ + do_fninit(env); +} + /* BCD ops */ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr) @@ -632,12 +671,12 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr) val = 0; for (i = 8; i >= 0; i--) { - v = cpu_ldub_data(env, ptr + i); + v = cpu_ldub_data_ra(env, ptr + i, GETPC()); val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); } tmp = int64_to_floatx80(val, &env->fp_status); - if (cpu_ldub_data(env, ptr + 9) & 0x80) { - floatx80_chs(tmp); + if (cpu_ldub_data_ra(env, ptr + 9, GETPC()) & 0x80) { + tmp = floatx80_chs(tmp); } fpush(env); ST0 = tmp; @@ -653,12 +692,12 @@ void helper_fbst_ST0(CPUX86State *env, target_ulong ptr) mem_ref = ptr; mem_end = mem_ref + 9; if (val < 0) { - cpu_stb_data(env, mem_end, 0x80); + cpu_stb_data_ra(env, mem_end, 0x80, GETPC()); if (val != 0x8000000000000000LL) { val = -val; } } else { - cpu_stb_data(env, mem_end, 0x00); + cpu_stb_data_ra(env, mem_end, 0x00, GETPC()); } while (mem_ref < mem_end) { if (val == 0) { @@ -667,10 +706,10 @@ void helper_fbst_ST0(CPUX86State *env, target_ulong ptr) v = val % 100; val = val / 100; v = (int)((unsigned int)(v / 10) << 4) | (v % 10); - cpu_stb_data(env, mem_ref++, v); + cpu_stb_data_ra(env, mem_ref++, v, GETPC()); } while (mem_ref < mem_end) { - cpu_stb_data(env, mem_ref++, 0); + cpu_stb_data_ra(env, mem_ref++, 0, GETPC()); } } @@ -962,7 +1001,11 @@ void helper_fxam_ST0(CPUX86State *env) env->fpus |= 0x200; /* C1 <-- 1 */ } - /* XXX: test fptags too */ + if (env->fptags[env->fpstt]) { + env->fpus |= 0x4100; /* Empty */ + return; + } + expdif = EXPD(temp); if (expdif == MAXEXPD) { if (MANTD(temp) == 0x8000000000000000ULL) { @@ -981,7 +1024,8 @@ void helper_fxam_ST0(CPUX86State *env) } } -void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32) +static void do_fstenv(CPUX86State *env, target_ulong ptr, int data32, + uintptr_t retaddr) { int fpus, fptag, exp, i; uint64_t mant; @@ -1010,73 +1054,85 @@ void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32) if (data32) { /* 32 bit */ - cpu_stl_data(env, ptr, env->fpuc); - cpu_stl_data(env, ptr + 4, fpus); - cpu_stl_data(env, ptr + 8, fptag); - cpu_stl_data(env, ptr + 12, (uint32_t)env->fpip); /* fpip */ - cpu_stl_data(env, ptr + 16, 0); /* fpcs */ - cpu_stl_data(env, ptr + 20, 0); /* fpoo */ - cpu_stl_data(env, ptr + 24, 0); /* fpos */ + cpu_stl_data_ra(env, ptr, env->fpuc, retaddr); + cpu_stl_data_ra(env, ptr + 4, fpus, retaddr); + cpu_stl_data_ra(env, ptr + 8, fptag, retaddr); + cpu_stl_data_ra(env, ptr + 12, env->fpip, retaddr); /* fpip */ + cpu_stl_data_ra(env, ptr + 16, env->fpcs, retaddr); /* fpcs */ + cpu_stl_data_ra(env, ptr + 20, env->fpdp, retaddr); /* fpoo */ + cpu_stl_data_ra(env, ptr + 24, env->fpds, retaddr); /* fpos */ } else { /* 16 bit */ - cpu_stw_data(env, ptr, env->fpuc); - cpu_stw_data(env, ptr + 2, fpus); - cpu_stw_data(env, ptr + 4, fptag); - cpu_stw_data(env, ptr + 6, (uint32_t)env->fpip); - cpu_stw_data(env, ptr + 8, 0); - cpu_stw_data(env, ptr + 10, 0); - cpu_stw_data(env, ptr + 12, 0); + cpu_stw_data_ra(env, ptr, env->fpuc, retaddr); + cpu_stw_data_ra(env, ptr + 2, fpus, retaddr); + cpu_stw_data_ra(env, ptr + 4, fptag, retaddr); + cpu_stw_data_ra(env, ptr + 6, env->fpip, retaddr); + cpu_stw_data_ra(env, ptr + 8, env->fpcs, retaddr); + cpu_stw_data_ra(env, ptr + 10, env->fpdp, retaddr); + cpu_stw_data_ra(env, ptr + 12, env->fpds, retaddr); } - } -void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32) +void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32) +{ + do_fstenv(env, ptr, data32, GETPC()); +} + +static void cpu_set_fpus(CPUX86State *env, uint16_t fpus) +{ + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800 & ~FPUS_B; + env->fpus |= env->fpus & FPUS_SE ? FPUS_B : 0; + if (!(env->fpus & FPUS_SE)) { + /* + * Here the processor deasserts FERR#; in response, the chipset deasserts + * IGNNE#. + */ + cpu_clear_ignne(env); + } +} + +static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32, + uintptr_t retaddr) { int i, fpus, fptag; if (data32) { - cpu_set_fpuc(env, cpu_lduw_data(env, ptr)); - fpus = cpu_lduw_data(env, ptr + 4); - fptag = cpu_lduw_data(env, ptr + 8); + cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr)); + fpus = cpu_lduw_data_ra(env, ptr + 4, retaddr); + fptag = cpu_lduw_data_ra(env, ptr + 8, retaddr); } else { - cpu_set_fpuc(env, cpu_lduw_data(env, ptr)); - fpus = cpu_lduw_data(env, ptr + 2); - fptag = cpu_lduw_data(env, ptr + 4); + cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr)); + fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr); + fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr); } - env->fpstt = (fpus >> 11) & 7; - env->fpus = fpus & ~0x3800; + cpu_set_fpus(env, fpus); for (i = 0; i < 8; i++) { env->fptags[i] = ((fptag & 3) == 3); fptag >>= 2; } } +void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32) +{ + do_fldenv(env, ptr, data32, GETPC()); +} + void helper_fsave(CPUX86State *env, target_ulong ptr, int data32) { floatx80 tmp; int i; - helper_fstenv(env, ptr, data32); + do_fstenv(env, ptr, data32, GETPC()); ptr += (14 << data32); for (i = 0; i < 8; i++) { tmp = ST(i); - helper_fstt(env, tmp, ptr); + helper_fstt(env, tmp, ptr, GETPC()); ptr += 10; } - /* fninit */ - env->fpus = 0; - env->fpstt = 0; - cpu_set_fpuc(env, 0x37f); - env->fptags[0] = 1; - env->fptags[1] = 1; - env->fptags[2] = 1; - env->fptags[3] = 1; - env->fptags[4] = 1; - env->fptags[5] = 1; - env->fptags[6] = 1; - env->fptags[7] = 1; + do_fninit(env); } void helper_frstor(CPUX86State *env, target_ulong ptr, int data32) @@ -1084,157 +1140,430 @@ void helper_frstor(CPUX86State *env, target_ulong ptr, int data32) floatx80 tmp; int i; - helper_fldenv(env, ptr, data32); + do_fldenv(env, ptr, data32, GETPC()); ptr += (14 << data32); for (i = 0; i < 8; i++) { - tmp = helper_fldt(env, ptr); + tmp = helper_fldt(env, ptr, GETPC()); ST(i) = tmp; ptr += 10; } } -#if defined(CONFIG_USER_ONLY) -void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32) -{ - helper_fsave(env, ptr, data32); -} +#define XO(X) offsetof(X86XSaveArea, X) -void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32) +static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) { - helper_frstor(env, ptr, data32); -} -#endif - -void helper_fxsave(CPUX86State *env, target_ulong ptr, int data64) -{ - int fpus, fptag, i, nb_xmm_regs; - floatx80 tmp; + int fpus, fptag, i; target_ulong addr; - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception(env, EXCP0D_GPF); - } - fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for (i = 0; i < 8; i++) { fptag |= (env->fptags[i] << i); } - cpu_stw_data(env, ptr, env->fpuc); - cpu_stw_data(env, ptr + 2, fpus); - cpu_stw_data(env, ptr + 4, fptag ^ 0xff); -#ifdef TARGET_X86_64 - if (data64) { - cpu_stq_data(env, ptr + 0x08, 0); /* rip */ - cpu_stq_data(env, ptr + 0x10, 0); /* rdp */ - } else -#endif - { - cpu_stl_data(env, ptr + 0x08, 0); /* eip */ - cpu_stl_data(env, ptr + 0x0c, 0); /* sel */ - cpu_stl_data(env, ptr + 0x10, 0); /* dp */ - cpu_stl_data(env, ptr + 0x14, 0); /* sel */ - } - addr = ptr + 0x20; + cpu_stw_data_ra(env, ptr + XO(legacy.fcw), env->fpuc, ra); + cpu_stw_data_ra(env, ptr + XO(legacy.fsw), fpus, ra); + cpu_stw_data_ra(env, ptr + XO(legacy.ftw), fptag ^ 0xff, ra); + + /* In 32-bit mode this is eip, sel, dp, sel. + In 64-bit mode this is rip, rdp. + But in either case we don't write actual data, just zeros. */ + cpu_stq_data_ra(env, ptr + XO(legacy.fpip), 0, ra); /* eip+sel; rip */ + cpu_stq_data_ra(env, ptr + XO(legacy.fpdp), 0, ra); /* edp+sel; rdp */ + + addr = ptr + XO(legacy.fpregs); for (i = 0; i < 8; i++) { - tmp = ST(i); - helper_fstt(env, tmp, addr); + floatx80 tmp = ST(i); + helper_fstt(env, tmp, addr, ra); addr += 16; } +} + +static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra); + cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra); +} + +static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + int i, nb_xmm_regs; + target_ulong addr; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + addr = ptr + XO(legacy.xmm_regs); + for (i = 0; i < nb_xmm_regs; i++) { + cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra); + cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra); + addr += 16; + } +} + +static void do_xsave_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs); + int i; + + for (i = 0; i < 4; i++, addr += 16) { + cpu_stq_data_ra(env, addr, env->bnd_regs[i].lb, ra); + cpu_stq_data_ra(env, addr + 8, env->bnd_regs[i].ub, ra); + } +} + +static void do_xsave_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), + env->bndcs_regs.cfgu, ra); + cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), + env->bndcs_regs.sts, ra); +} + +static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + cpu_stq_data_ra(env, ptr, env->pkru, ra); +} + +void helper_fxsave(CPUX86State *env, target_ulong ptr) +{ + uintptr_t ra = GETPC(); + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + do_xsave_fpu(env, ptr, ra); if (env->cr[4] & CR4_OSFXSR_MASK) { - /* XXX: finish it */ - cpu_stl_data(env, ptr + 0x18, env->mxcsr); /* mxcsr */ - cpu_stl_data(env, ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */ - if (env->hflags & HF_CS64_MASK) { - nb_xmm_regs = 16; - } else { - nb_xmm_regs = 8; - } - addr = ptr + 0xa0; + do_xsave_mxcsr(env, ptr, ra); /* Fast FXSAVE leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { - for (i = 0; i < nb_xmm_regs; i++) { - cpu_stq_data(env, addr, env->xmm_regs[i].XMM_Q(0)); - cpu_stq_data(env, addr + 8, env->xmm_regs[i].XMM_Q(1)); - addr += 16; - } + do_xsave_sse(env, ptr, ra); } } } -void helper_fxrstor(CPUX86State *env, target_ulong ptr, int data64) +static uint64_t get_xinuse(CPUX86State *env) { - int i, fpus, fptag, nb_xmm_regs; - floatx80 tmp; - target_ulong addr; + uint64_t inuse = -1; - /* The operand must be 16 byte aligned */ - if (ptr & 0xf) { - raise_exception(env, EXCP0D_GPF); + /* For the most part, we don't track XINUSE. We could calculate it + here for all components, but it's probably less work to simply + indicate in use. That said, the state of BNDREGS is important + enough to track in HFLAGS, so we might as well use that here. */ + if ((env->hflags & HF_MPX_IU_MASK) == 0) { + inuse &= ~XSTATE_BNDREGS_MASK; + } + return inuse; +} + +static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm, + uint64_t inuse, uint64_t opt, uintptr_t ra) +{ + uint64_t old_bv, new_bv; + + /* The OS must have enabled XSAVE. */ + if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { + raise_exception_ra(env, EXCP06_ILLOP, ra); } - cpu_set_fpuc(env, cpu_lduw_data(env, ptr)); - fpus = cpu_lduw_data(env, ptr + 2); - fptag = cpu_lduw_data(env, ptr + 4); - env->fpstt = (fpus >> 11) & 7; - env->fpus = fpus & ~0x3800; + /* The operand must be 64 byte aligned. */ + if (ptr & 63) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + /* Never save anything not enabled by XCR0. */ + rfbm &= env->xcr0; + opt &= rfbm; + + if (opt & XSTATE_FP_MASK) { + do_xsave_fpu(env, ptr, ra); + } + if (rfbm & XSTATE_SSE_MASK) { + /* Note that saving MXCSR is not suppressed by XSAVEOPT. */ + do_xsave_mxcsr(env, ptr, ra); + } + if (opt & XSTATE_SSE_MASK) { + do_xsave_sse(env, ptr, ra); + } + if (opt & XSTATE_BNDREGS_MASK) { + do_xsave_bndregs(env, ptr + XO(bndreg_state), ra); + } + if (opt & XSTATE_BNDCSR_MASK) { + do_xsave_bndcsr(env, ptr + XO(bndcsr_state), ra); + } + if (opt & XSTATE_PKRU_MASK) { + do_xsave_pkru(env, ptr + XO(pkru_state), ra); + } + + /* Update the XSTATE_BV field. */ + old_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra); + new_bv = (old_bv & ~rfbm) | (inuse & rfbm); + cpu_stq_data_ra(env, ptr + XO(header.xstate_bv), new_bv, ra); +} + +void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm) +{ + do_xsave(env, ptr, rfbm, get_xinuse(env), -1, GETPC()); +} + +void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm) +{ + uint64_t inuse = get_xinuse(env); + do_xsave(env, ptr, rfbm, inuse, inuse, GETPC()); +} + +static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + int i, fpuc, fpus, fptag; + target_ulong addr; + + fpuc = cpu_lduw_data_ra(env, ptr + XO(legacy.fcw), ra); + fpus = cpu_lduw_data_ra(env, ptr + XO(legacy.fsw), ra); + fptag = cpu_lduw_data_ra(env, ptr + XO(legacy.ftw), ra); + cpu_set_fpuc(env, fpuc); + cpu_set_fpus(env, fpus); fptag ^= 0xff; for (i = 0; i < 8; i++) { env->fptags[i] = ((fptag >> i) & 1); } - addr = ptr + 0x20; + addr = ptr + XO(legacy.fpregs); for (i = 0; i < 8; i++) { - tmp = helper_fldt(env, addr); + floatx80 tmp = helper_fldt(env, addr, ra); ST(i) = tmp; addr += 16; } +} + +static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + XO(legacy.mxcsr), ra)); +} + +static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + int i, nb_xmm_regs; + target_ulong addr; + + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + + addr = ptr + XO(legacy.xmm_regs); + for (i = 0; i < nb_xmm_regs; i++) { + env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra); + env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra); + addr += 16; + } +} + +static void do_xrstor_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs); + int i; + + for (i = 0; i < 4; i++, addr += 16) { + env->bnd_regs[i].lb = cpu_ldq_data_ra(env, addr, ra); + env->bnd_regs[i].ub = cpu_ldq_data_ra(env, addr + 8, ra); + } +} + +static void do_xrstor_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + /* FIXME: Extend highest implemented bit of linear address. */ + env->bndcs_regs.cfgu + = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), ra); + env->bndcs_regs.sts + = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), ra); +} + +static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra) +{ + env->pkru = cpu_ldq_data_ra(env, ptr, ra); +} + +void helper_fxrstor(CPUX86State *env, target_ulong ptr) +{ + uintptr_t ra = GETPC(); + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + do_xrstor_fpu(env, ptr, ra); if (env->cr[4] & CR4_OSFXSR_MASK) { - /* XXX: finish it */ - cpu_set_mxcsr(env, cpu_ldl_data(env, ptr + 0x18)); - /* cpu_ldl_data(env, ptr + 0x1c); */ - if (env->hflags & HF_CS64_MASK) { - nb_xmm_regs = 16; - } else { - nb_xmm_regs = 8; - } - addr = ptr + 0xa0; - /* Fast FXRESTORE leaves out the XMM registers */ + do_xrstor_mxcsr(env, ptr, ra); + /* Fast FXRSTOR leaves out the XMM registers */ if (!(env->efer & MSR_EFER_FFXSR) || (env->hflags & HF_CPL_MASK) || !(env->hflags & HF_LMA_MASK)) { - for (i = 0; i < nb_xmm_regs; i++) { - env->xmm_regs[i].XMM_Q(0) = cpu_ldq_data(env, addr); - env->xmm_regs[i].XMM_Q(1) = cpu_ldq_data(env, addr + 8); - addr += 16; - } + do_xrstor_sse(env, ptr, ra); } } } -void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) +void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm) { - CPU_LDoubleU temp; + uintptr_t ra = GETPC(); + uint64_t xstate_bv, xcomp_bv, reserve0; - temp.d = f; - *pmant = temp.l.lower; - *pexp = temp.l.upper; + rfbm &= env->xcr0; + + /* The OS must have enabled XSAVE. */ + if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { + raise_exception_ra(env, EXCP06_ILLOP, ra); + } + + /* The operand must be 64 byte aligned. */ + if (ptr & 63) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + xstate_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra); + + if ((int64_t)xstate_bv < 0) { + /* FIXME: Compact form. */ + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + /* Standard form. */ + + /* The XSTATE_BV field must not set bits not present in XCR0. */ + if (xstate_bv & ~env->xcr0) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + /* The XCOMP_BV field must be zero. Note that, as of the April 2016 + revision, the description of the XSAVE Header (Vol 1, Sec 13.4.2) + describes only XCOMP_BV, but the description of the standard form + of XRSTOR (Vol 1, Sec 13.8.1) checks bytes 23:8 for zero, which + includes the next 64-bit field. */ + xcomp_bv = cpu_ldq_data_ra(env, ptr + XO(header.xcomp_bv), ra); + reserve0 = cpu_ldq_data_ra(env, ptr + XO(header.reserve0), ra); + if (xcomp_bv || reserve0) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } + + if (rfbm & XSTATE_FP_MASK) { + if (xstate_bv & XSTATE_FP_MASK) { + do_xrstor_fpu(env, ptr, ra); + } else { + do_fninit(env); + memset(env->fpregs, 0, sizeof(env->fpregs)); + } + } + if (rfbm & XSTATE_SSE_MASK) { + /* Note that the standard form of XRSTOR loads MXCSR from memory + whether or not the XSTATE_BV bit is set. */ + do_xrstor_mxcsr(env, ptr, ra); + if (xstate_bv & XSTATE_SSE_MASK) { + do_xrstor_sse(env, ptr, ra); + } else { + /* ??? When AVX is implemented, we may have to be more + selective in the clearing. */ + memset(env->xmm_regs, 0, sizeof(env->xmm_regs)); + } + } + if (rfbm & XSTATE_BNDREGS_MASK) { + if (xstate_bv & XSTATE_BNDREGS_MASK) { + do_xrstor_bndregs(env, ptr + XO(bndreg_state), ra); + env->hflags |= HF_MPX_IU_MASK; + } else { + memset(env->bnd_regs, 0, sizeof(env->bnd_regs)); + env->hflags &= ~HF_MPX_IU_MASK; + } + } + if (rfbm & XSTATE_BNDCSR_MASK) { + if (xstate_bv & XSTATE_BNDCSR_MASK) { + do_xrstor_bndcsr(env, ptr + XO(bndcsr_state), ra); + } else { + memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs)); + } + cpu_sync_bndcs_hflags(env); + } + if (rfbm & XSTATE_PKRU_MASK) { + uint64_t old_pkru = env->pkru; + if (xstate_bv & XSTATE_PKRU_MASK) { + do_xrstor_pkru(env, ptr + XO(pkru_state), ra); + } else { + env->pkru = 0; + } + if (env->pkru != old_pkru) { + CPUState *cs = env_cpu(env); + tlb_flush(cs); + } + } } -floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) -{ - CPU_LDoubleU temp; +#undef XO - temp.l.upper = upper; - temp.l.lower = mant; - return temp.d; +uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx) +{ + /* The OS must have enabled XSAVE. */ + if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { + raise_exception_ra(env, EXCP06_ILLOP, GETPC()); + } + + switch (ecx) { + case 0: + return env->xcr0; + case 1: + if (env->features[FEAT_XSAVE] & CPUID_XSAVE_XGETBV1) { + return env->xcr0 & get_xinuse(env); + } + break; + } + raise_exception_ra(env, EXCP0D_GPF, GETPC()); +} + +void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask) +{ + uint32_t dummy, ena_lo, ena_hi; + uint64_t ena; + + /* The OS must have enabled XSAVE. */ + if (!(env->cr[4] & CR4_OSXSAVE_MASK)) { + raise_exception_ra(env, EXCP06_ILLOP, GETPC()); + } + + /* Only XCR0 is defined at present; the FPU may not be disabled. */ + if (ecx != 0 || (mask & XSTATE_FP_MASK) == 0) { + goto do_gpf; + } + + /* Disallow enabling unimplemented features. */ + cpu_x86_cpuid(env, 0x0d, 0, &ena_lo, &dummy, &dummy, &ena_hi); + ena = ((uint64_t)ena_hi << 32) | ena_lo; + if (mask & ~ena) { + goto do_gpf; + } + + /* Disallow enabling only half of MPX. */ + if ((mask ^ (mask * (XSTATE_BNDCSR_MASK / XSTATE_BNDREGS_MASK))) + & XSTATE_BNDCSR_MASK) { + goto do_gpf; + } + + env->xcr0 = mask; + cpu_sync_bndcs_hflags(env); + return; + + do_gpf: + raise_exception_ra(env, EXCP0D_GPF, GETPC()); } /* MMX/SSE */ @@ -1248,12 +1577,11 @@ floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) #define SSE_RC_CHOP 0x6000 #define SSE_FZ 0x8000 -void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) +void update_mxcsr_status(CPUX86State *env) { + uint32_t mxcsr = env->mxcsr; int rnd_type; - env->mxcsr = mxcsr; - /* set rounding mode */ switch (mxcsr & SSE_RC_MASK) { default: @@ -1279,12 +1607,6 @@ void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status); } -void cpu_set_fpuc(CPUX86State *env, uint16_t val) -{ - env->fpuc = val; - update_fp_status(env); -} - void helper_ldmxcsr(CPUX86State *env, uint32_t val) { cpu_set_mxcsr(env, val); diff --git a/qemu/target/i386/helper.c b/qemu/target/i386/helper.c new file mode 100644 index 00000000..8a3540e0 --- /dev/null +++ b/qemu/target/i386/helper.c @@ -0,0 +1,521 @@ +/* + * i386 helpers (without register variable usage) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "sysemu/tcg.h" + +void cpu_sync_bndcs_hflags(CPUX86State *env) +{ + uint32_t hflags = env->hflags; + uint32_t hflags2 = env->hflags2; + uint32_t bndcsr; + + if ((hflags & HF_CPL_MASK) == 3) { + bndcsr = env->bndcs_regs.cfgu; + } else { + bndcsr = env->msr_bndcfgs; + } + + if ((env->cr[4] & CR4_OSXSAVE_MASK) + && (env->xcr0 & XSTATE_BNDCSR_MASK) + && (bndcsr & BNDCFG_ENABLE)) { + hflags |= HF_MPX_EN_MASK; + } else { + hflags &= ~HF_MPX_EN_MASK; + } + + if (bndcsr & BNDCFG_BNDPRESERVE) { + hflags2 |= HF2_MPX_PR_MASK; + } else { + hflags2 &= ~HF2_MPX_PR_MASK; + } + + env->hflags = hflags; + env->hflags2 = hflags2; +} + +static void cpu_x86_version(CPUX86State *env, int *family, int *model) +{ + int cpuver = env->cpuid_version; + + if (family == NULL || model == NULL) { + return; + } + + *family = (cpuver >> 8) & 0x0f; + *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f); +} + +/* Broadcast MCA signal for processor version 06H_EH and above */ +int cpu_x86_support_mca_broadcast(CPUX86State *env) +{ + int family = 0; + int model = 0; + + cpu_x86_version(env, &family, &model); + if ((family == 6 && model >= 14) || family > 6) { + return 1; + } + + return 0; +} + +/***********************************************************/ +/* x86 mmu */ +/* XXX: add PGE support */ + +void x86_cpu_set_a20(X86CPU *cpu, int a20_state) +{ + CPUX86State *env = &cpu->env; + + a20_state = (a20_state != 0); + if (a20_state != ((env->a20_mask >> 20) & 1)) { + CPUState *cs = CPU(cpu); + + qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state); + /* if the cpu is currently executing code, we must unlink it and + all the potentially executing TB */ + cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); + + /* when a20 is changed, all the MMU mappings are invalid, so + we must flush everything */ + tlb_flush(cs); + env->a20_mask = ~(1 << 20) | (a20_state << 20); + } +} + +void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) +{ + X86CPU *cpu = env_archcpu(env); + int pe_state; + + qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0); + if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != + (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { + tlb_flush(CPU(cpu)); + } + +#ifdef TARGET_X86_64 + if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && + (env->efer & MSR_EFER_LME)) { + /* enter in long mode */ + /* XXX: generate an exception */ + if (!(env->cr[4] & CR4_PAE_MASK)) + return; + env->efer |= MSR_EFER_LMA; + env->hflags |= HF_LMA_MASK; + } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && + (env->efer & MSR_EFER_LMA)) { + /* exit long mode */ + env->efer &= ~MSR_EFER_LMA; + env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); + env->eip &= 0xffffffff; + } +#endif + env->cr[0] = new_cr0 | CR0_ET_MASK; + + /* update PE flag in hidden flags */ + pe_state = (env->cr[0] & CR0_PE_MASK); + env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); + /* ensure that ADDSEG is always set in real mode */ + env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); + /* update FPU flags */ + env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | + ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); +} + +/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in + the PDPT */ +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) +{ + env->cr[3] = new_cr3; + if (env->cr[0] & CR0_PG_MASK) { + qemu_log_mask(CPU_LOG_MMU, + "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); + tlb_flush(env_cpu(env)); + } +} + +void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) +{ + uint32_t hflags; + +#if defined(DEBUG_MMU) + printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4); +#endif + if ((new_cr4 ^ env->cr[4]) & + (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | + CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) { + tlb_flush(env_cpu(env)); + } + + /* Clear bits we're going to recompute. */ + hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK); + + /* SSE handling */ + if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) { + new_cr4 &= ~CR4_OSFXSR_MASK; + } + if (new_cr4 & CR4_OSFXSR_MASK) { + hflags |= HF_OSFXSR_MASK; + } + + if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { + new_cr4 &= ~CR4_SMAP_MASK; + } + if (new_cr4 & CR4_SMAP_MASK) { + hflags |= HF_SMAP_MASK; + } + + if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { + new_cr4 &= ~CR4_PKE_MASK; + } + + env->cr[4] = new_cr4; + env->hflags = hflags; + + cpu_sync_bndcs_hflags(env); +} + +hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, + MemTxAttrs *attrs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + target_ulong pde_addr, pte_addr; + uint64_t pte; + int32_t a20_mask; + uint32_t page_offset; + int page_size; + + *attrs = cpu_get_mem_attrs(env); + + a20_mask = x86_get_a20_mask(env); + if (!(env->cr[0] & CR0_PG_MASK)) { + pte = addr & a20_mask; + page_size = 4096; + } else if (env->cr[4] & CR4_PAE_MASK) { + target_ulong pdpe_addr; + uint64_t pde, pdpe; + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + bool la57 = env->cr[4] & CR4_LA57_MASK; + uint64_t pml5e_addr, pml5e; + uint64_t pml4e_addr, pml4e; + int32_t sext; + + /* test virtual address sign extension */ + sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; + if (sext != 0 && sext != -1) { + return -1; + } + + if (la57) { + pml5e_addr = ((env->cr[3] & ~0xfff) + + (((addr >> 48) & 0x1ff) << 3)) & a20_mask; + pml5e = x86_ldq_phys(cs, pml5e_addr); + if (!(pml5e & PG_PRESENT_MASK)) { + return -1; + } + } else { + pml5e = env->cr[3]; + } + + pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + + (((addr >> 39) & 0x1ff) << 3)) & a20_mask; + pml4e = x86_ldq_phys(cs, pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + return -1; + } + pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + + (((addr >> 30) & 0x1ff) << 3)) & a20_mask; + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + return -1; + } + if (pdpe & PG_PSE_MASK) { + page_size = 1024 * 1024 * 1024; + pte = pdpe; + goto out; + } + + } else +#endif + { + pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & + a20_mask; + pdpe = x86_ldq_phys(cs, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) + return -1; + } + + pde_addr = ((pdpe & PG_ADDRESS_MASK) + + (((addr >> 21) & 0x1ff) << 3)) & a20_mask; + pde = x86_ldq_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + return -1; + } + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + page_size = 2048 * 1024; + pte = pde; + } else { + /* 4 KB page */ + pte_addr = ((pde & PG_ADDRESS_MASK) + + (((addr >> 12) & 0x1ff) << 3)) & a20_mask; + page_size = 4096; + pte = x86_ldq_phys(cs, pte_addr); + } + if (!(pte & PG_PRESENT_MASK)) { + return -1; + } + } else { + uint32_t pde; + + /* page directory entry */ + pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask; + pde = x86_ldl_phys(cs, pde_addr); + if (!(pde & PG_PRESENT_MASK)) + return -1; + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); + page_size = 4096 * 1024; + } else { + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask; + pte = x86_ldl_phys(cs, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + return -1; + } + page_size = 4096; + } + pte = pte & a20_mask; + } + +#ifdef TARGET_X86_64 +out: +#endif + pte &= PG_ADDRESS_MASK & ~(page_size - 1); + page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); + return pte | page_offset; +} + +int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, + target_ulong *base, unsigned int *limit, + unsigned int *flags) +{ + CPUState *cs = env_cpu(env); + SegmentCache *dt; + target_ulong ptr; + uint32_t e1, e2; + int index; + + if (selector & 0x4) + dt = &env->ldt; + else + dt = &env->gdt; + index = selector & ~7; + ptr = dt->base + index; + if ((index + 7) > dt->limit + || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0 + || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0) + return 0; + + *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); + *limit = (e1 & 0xffff) | (e2 & 0x000f0000); + if (e2 & DESC_G_MASK) + *limit = (*limit << 12) | 0xfff; + *flags = e2; + + return 1; +} + +void do_cpu_init(X86CPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUX86State *env = &cpu->env; + CPUX86State *save = g_new(CPUX86State, 1); + int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI; + + *save = *env; + + cpu_reset(cs); + cs->interrupt_request = sipi; + memcpy(&env->start_init_save, &save->start_init_save, + offsetof(CPUX86State, end_init_save) - + offsetof(CPUX86State, start_init_save)); + g_free(save); + + // apic_init_reset(cpu->apic_state); +} + +void do_cpu_sipi(X86CPU *cpu) +{ + // apic_sipi(cpu->apic_state); +} + +/* Frob eflags into and out of the CPU temporary format. */ + +void x86_cpu_exec_enter(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + env->df = 1 - (2 * ((env->eflags >> 10) & 1)); + CC_OP = CC_OP_EFLAGS; + env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); +} + +void x86_cpu_exec_exit(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + env->eflags = cpu_compute_eflags(env); +} + +uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + return glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); +#else + return address_space_ldub(as->uc, as, addr, attrs, NULL); +#endif +} + +uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + return glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); +#else + return address_space_lduw(as->uc, as, addr, attrs, NULL); +#endif +} + +uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + return glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); +#else + return address_space_ldl(as->uc, as, addr, attrs, NULL); +#endif +} + +uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + return glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, attrs, NULL); +#else + return address_space_ldq(as->uc, as, addr, attrs, NULL); +#endif +} + +void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stb, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); +#else + address_space_stb(as->uc, as, addr, val, attrs, NULL); +#endif +} + +void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stl_notdirty, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); +#else + address_space_stl_notdirty(as->uc, as, addr, val, attrs, NULL); +#endif +} + +void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stw,UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); +#else + address_space_stw(as->uc, as, addr, val, attrs, NULL); +#endif +} + +void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stl, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); +#else + address_space_stl(as->uc, as, addr, val, attrs, NULL); +#endif +} + +void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + +#ifdef UNICORN_ARCH_POSTFIX + glue(address_space_stq, UNICORN_ARCH_POSTFIX)(as->uc, as, addr, val, attrs, NULL); +#else + address_space_stq(as->uc, as, addr, val, attrs, NULL); +#endif +} diff --git a/qemu/target-i386/helper.h b/qemu/target/i386/helper.h similarity index 79% rename from qemu/target-i386/helper.h rename to qemu/target/i386/helper.h index d3b52d1f..acdd64e8 100644 --- a/qemu/target-i386/helper.h +++ b/qemu/target/i386/helper.h @@ -3,8 +3,6 @@ DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) -DEF_HELPER_1(lock, void, env) -DEF_HELPER_1(unlock, void, env) DEF_HELPER_3(write_eflags, void, env, tl, i32) DEF_HELPER_1(read_eflags, tl, env) DEF_HELPER_2(divb_AL, void, env, tl) @@ -17,6 +15,14 @@ DEF_HELPER_2(idivl_EAX, void, env, tl) DEF_HELPER_2(divq_EAX, void, env, tl) DEF_HELPER_2(idivq_EAX, void, env, tl) #endif +DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32) + +DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl) +DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl) +DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64) +DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64) +DEF_HELPER_1(bnd_jmp, void, env) DEF_HELPER_2(aam, void, env, int) DEF_HELPER_2(aad, void, env, int) @@ -32,9 +38,9 @@ DEF_HELPER_2(verw, void, env, tl) DEF_HELPER_2(lldt, void, env, int) DEF_HELPER_2(ltr, void, env, int) DEF_HELPER_3(load_seg, void, env, int, int) -DEF_HELPER_4(ljmp_protected, void, env, int, tl, int) +DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl) DEF_HELPER_5(lcall_real, void, env, int, tl, int, int) -DEF_HELPER_5(lcall_protected, void, env, int, tl, int, int) +DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl) DEF_HELPER_2(iret_real, void, env, int) DEF_HELPER_3(iret_protected, void, env, int, int) DEF_HELPER_3(lret_protected, void, env, int, int) @@ -42,13 +48,10 @@ DEF_HELPER_2(read_crN, tl, env, int) DEF_HELPER_3(write_crN, void, env, int, tl) DEF_HELPER_2(lmsw, void, env, tl) DEF_HELPER_1(clts, void, env) -DEF_HELPER_3(movl_drN_T0, void, env, int, tl) +DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl) +DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int) DEF_HELPER_2(invlpg, void, env, tl) -DEF_HELPER_4(enter_level, void, env, int, int, tl) -#ifdef TARGET_X86_64 -DEF_HELPER_4(enter64_level, void, env, int, int, tl) -#endif DEF_HELPER_2(sysenter, void, env, int) DEF_HELPER_2(sysexit, void, env, int) #ifdef TARGET_X86_64 @@ -67,17 +70,18 @@ DEF_HELPER_1(cli, void, env) DEF_HELPER_1(sti, void, env) DEF_HELPER_1(clac, void, env) DEF_HELPER_1(stac, void, env) -DEF_HELPER_1(set_inhibit_irq, void, env) -DEF_HELPER_1(reset_inhibit_irq, void, env) DEF_HELPER_3(boundw, void, env, tl, int) DEF_HELPER_3(boundl, void, env, tl, int) DEF_HELPER_1(rsm, void, env) DEF_HELPER_2(into, void, env, int) +DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl) DEF_HELPER_2(cmpxchg8b, void, env, tl) #ifdef TARGET_X86_64 +DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl) DEF_HELPER_2(cmpxchg16b, void, env, tl) #endif DEF_HELPER_1(single_step, void, env) +DEF_HELPER_1(rechecking_single_step, void, env) DEF_HELPER_1(cpuid, void, env) DEF_HELPER_1(rdtsc, void, env) DEF_HELPER_1(rdtscp, void, env) @@ -88,15 +92,15 @@ DEF_HELPER_1(wrmsr, void, env) DEF_HELPER_2(check_iob, void, env, i32) DEF_HELPER_2(check_iow, void, env, i32) DEF_HELPER_2(check_iol, void, env, i32) -DEF_HELPER_3(outb, void, ptr, i32, i32) -DEF_HELPER_2(inb, tl, ptr, i32) -DEF_HELPER_3(outw, void, ptr, i32, i32) -DEF_HELPER_2(inw, tl, ptr, i32) -DEF_HELPER_3(outl, void, ptr, i32, i32) -DEF_HELPER_2(inl, tl, ptr, i32) +DEF_HELPER_3(outb, void, env, i32, i32) +DEF_HELPER_2(inb, tl, env, i32) +DEF_HELPER_3(outw, void, env, i32, i32) +DEF_HELPER_2(inw, tl, env, i32) +DEF_HELPER_3(outl, void, env, i32, i32) +DEF_HELPER_2(inl, tl, env, i32) +DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl) DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64) -DEF_HELPER_3(vmexit, void, env, i32, i64) DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32) DEF_HELPER_3(vmrun, void, env, int, int) DEF_HELPER_1(vmmcall, void, env) @@ -189,17 +193,16 @@ DEF_HELPER_3(fstenv, void, env, tl, int) DEF_HELPER_3(fldenv, void, env, tl, int) DEF_HELPER_3(fsave, void, env, tl, int) DEF_HELPER_3(frstor, void, env, tl, int) -DEF_HELPER_3(fxsave, void, env, tl, int) -DEF_HELPER_3(fxrstor, void, env, tl, int) +DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64) +DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64) +DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64) +DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32) +DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64) +DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32) +DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64) -DEF_HELPER_FLAGS_1(clz_x86, TCG_CALL_NO_RWG_SE, tl, tl) - -#ifdef TARGET_I386 -#define helper_clz helper_clz_x86 -#define gen_helper_clz gen_helper_clz_x86 -#endif - -DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl) @@ -225,3 +228,5 @@ DEF_HELPER_3(rcrl, tl, env, tl, tl) DEF_HELPER_3(rclq, tl, env, tl, tl) DEF_HELPER_3(rcrq, tl, env, tl, tl) #endif + +DEF_HELPER_1(rdrand, tl, env) diff --git a/qemu/target-i386/int_helper.c b/qemu/target/i386/int_helper.c similarity index 84% rename from qemu/target-i386/int_helper.c rename to qemu/target/i386/int_helper.c index 0de38c1c..5dea08ab 100644 --- a/qemu/target-i386/int_helper.c +++ b/qemu/target/i386/int_helper.c @@ -17,9 +17,12 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" +#include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" +#include "qemu/guest-random.h" //#define DEBUG_MULDIV @@ -48,11 +51,11 @@ void helper_divb_AL(CPUX86State *env, target_ulong t0) num = (env->regs[R_EAX] & 0xffff); den = (t0 & 0xff); if (den == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); if (q > 0xff) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xff; r = (num % den) & 0xff; @@ -66,11 +69,11 @@ void helper_idivb_AL(CPUX86State *env, target_ulong t0) num = (int16_t)env->regs[R_EAX]; den = (int8_t)t0; if (den == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); if (q != (int8_t)q) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xff; r = (num % den) & 0xff; @@ -84,11 +87,11 @@ void helper_divw_AX(CPUX86State *env, target_ulong t0) num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16); den = (t0 & 0xffff); if (den == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); if (q > 0xffff) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xffff; r = (num % den) & 0xffff; @@ -103,11 +106,11 @@ void helper_idivw_AX(CPUX86State *env, target_ulong t0) num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16); den = (int16_t)t0; if (den == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = ((int64_t)num / den); if (q != (int16_t)q) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q &= 0xffff; r = (num % den) & 0xffff; @@ -123,12 +126,12 @@ void helper_divl_EAX(CPUX86State *env, target_ulong t0) num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); den = (unsigned int)t0; if (den == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); r = (num % den); if (q > 0xffffffff) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = (uint32_t)q; env->regs[R_EDX] = (uint32_t)r; @@ -142,12 +145,12 @@ void helper_idivl_EAX(CPUX86State *env, target_ulong t0) num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); den = (int)t0; if (den == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } q = (num / den); r = (num % den); if (q != (int32_t)q) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = (uint32_t)q; env->regs[R_EDX] = (uint32_t)r; @@ -379,12 +382,12 @@ void helper_divq_EAX(CPUX86State *env, target_ulong t0) uint64_t r0, r1; if (t0 == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } r0 = env->regs[R_EAX]; r1 = env->regs[R_EDX]; if (div64(&r0, &r1, t0)) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = r0; env->regs[R_EDX] = r1; @@ -395,12 +398,12 @@ void helper_idivq_EAX(CPUX86State *env, target_ulong t0) uint64_t r0, r1; if (t0 == 0) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } r0 = env->regs[R_EAX]; r1 = env->regs[R_EDX]; if (idiv64(&r0, &r1, t0)) { - raise_exception(env, EXCP00_DIVZ); + raise_exception_ra(env, EXCP00_DIVZ, GETPC()); } env->regs[R_EAX] = r0; env->regs[R_EDX] = r1; @@ -415,17 +418,6 @@ void helper_idivq_EAX(CPUX86State *env, target_ulong t0) # define clztl clz64 #endif -/* bit operations */ -target_ulong helper_ctz(target_ulong t0) -{ - return ctztl(t0); -} - -target_ulong helper_clz_x86(target_ulong t0) -{ - return clztl(t0); -} - target_ulong helper_pdep(target_ulong src, target_ulong mask) { target_ulong dest = 0; @@ -469,3 +461,31 @@ target_ulong helper_pext(target_ulong src, target_ulong mask) #include "shift_helper_template.h" #undef SHIFT #endif + +/* Test that BIT is enabled in CR4. If not, raise an illegal opcode + exception. This reduces the requirements for rare CR4 bits being + mapped into HFLAGS. */ +void helper_cr4_testbit(CPUX86State *env, uint32_t bit) +{ + if (unlikely((env->cr[4] & bit) == 0)) { + raise_exception_ra(env, EXCP06_ILLOP, GETPC()); + } +} + +target_ulong HELPER(rdrand)(CPUX86State *env) +{ + target_ulong ret; + + if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { + // qemu_log_mask(LOG_UNIMP, "rdrand: Crypto failure: %s", + // error_get_pretty(err)); + // error_free(err); + /* Failure clears CF and all other flags, and returns 0. */ + env->cc_src = 0; + return 0; + } + + /* Success sets CF and clears all others. */ + env->cc_src = CC_C; + return ret; +} diff --git a/qemu/target/i386/machine.c b/qemu/target/i386/machine.c new file mode 100644 index 00000000..2e4eea4f --- /dev/null +++ b/qemu/target/i386/machine.c @@ -0,0 +1,23 @@ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" + +#include "sysemu/tcg.h" + +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) +{ + CPU_LDoubleU temp; + + temp.d = f; + *pmant = temp.l.lower; + *pexp = temp.l.upper; +} + +floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) +{ + CPU_LDoubleU temp; + + temp.l.upper = upper; + temp.l.lower = mant; + return temp.d; +} diff --git a/qemu/target/i386/mem_helper.c b/qemu/target/i386/mem_helper.c new file mode 100644 index 00000000..0f3946b9 --- /dev/null +++ b/qemu/target/i386/mem_helper.c @@ -0,0 +1,184 @@ +/* + * x86 memory access helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "qemu/int128.h" +#include "qemu/atomic128.h" +#include "tcg/tcg.h" + + +void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0) +{ + uintptr_t ra = GETPC(); + uint64_t oldv, cmpv, newv; + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + + cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]); + newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]); + + oldv = cpu_ldq_data_ra(env, a0, ra); + newv = (cmpv == oldv ? newv : oldv); + /* always do the store */ + cpu_stq_data_ra(env, a0, newv, ra); + + if (oldv == cmpv) { + eflags |= CC_Z; + } else { + env->regs[R_EAX] = (uint32_t)oldv; + env->regs[R_EDX] = (uint32_t)(oldv >> 32); + eflags &= ~CC_Z; + } + CC_SRC = eflags; +} + +void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) +{ +#ifdef CONFIG_ATOMIC64 + uint64_t oldv, cmpv, newv; + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + + cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]); + newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]); + + { + uintptr_t ra = GETPC(); + int mem_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx); + oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra); + } + + if (oldv == cmpv) { + eflags |= CC_Z; + } else { + env->regs[R_EAX] = (uint32_t)oldv; + env->regs[R_EDX] = (uint32_t)(oldv >> 32); + eflags &= ~CC_Z; + } + CC_SRC = eflags; +#else + cpu_loop_exit_atomic(env_cpu(env), GETPC()); +#endif /* CONFIG_ATOMIC64 */ +} + +#ifdef TARGET_X86_64 +void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0) +{ + uintptr_t ra = GETPC(); + Int128 oldv, cmpv, newv; + uint64_t o0, o1; + int eflags; + bool success; + + if ((a0 & 0xf) != 0) { + raise_exception_ra(env, EXCP0D_GPF, GETPC()); + } + eflags = cpu_cc_compute_all(env, CC_OP); + + cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); + newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); + + o0 = cpu_ldq_data_ra(env, a0 + 0, ra); + o1 = cpu_ldq_data_ra(env, a0 + 8, ra); + + oldv = int128_make128(o0, o1); + success = int128_eq(oldv, cmpv); + if (!success) { + newv = oldv; + } + + cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra); + cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra); + + if (success) { + eflags |= CC_Z; + } else { + env->regs[R_EAX] = int128_getlo(oldv); + env->regs[R_EDX] = int128_gethi(oldv); + eflags &= ~CC_Z; + } + CC_SRC = eflags; +} + +void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) +{ + uintptr_t ra = GETPC(); + + if ((a0 & 0xf) != 0) { + raise_exception_ra(env, EXCP0D_GPF, ra); + } else if (HAVE_CMPXCHG128) { + int eflags = cpu_cc_compute_all(env, CC_OP); + + Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); + Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); + + int mem_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv, + newv, oi, ra); + + if (int128_eq(oldv, cmpv)) { + eflags |= CC_Z; + } else { + env->regs[R_EAX] = int128_getlo(oldv); + env->regs[R_EDX] = int128_gethi(oldv); + eflags &= ~CC_Z; + } + CC_SRC = eflags; + } else { + cpu_loop_exit_atomic(env_cpu(env), ra); + } +} +#endif + +void helper_boundw(CPUX86State *env, target_ulong a0, int v) +{ + int low, high; + + low = cpu_ldsw_data_ra(env, a0, GETPC()); + high = cpu_ldsw_data_ra(env, a0 + 2, GETPC()); + v = (int16_t)v; + if (v < low || v > high) { + if (env->hflags & HF_MPX_EN_MASK) { + env->bndcs_regs.sts = 0; + } + raise_exception_ra(env, EXCP05_BOUND, GETPC()); + } +} + +void helper_boundl(CPUX86State *env, target_ulong a0, int v) +{ + int low, high; + + low = cpu_ldl_data_ra(env, a0, GETPC()); + high = cpu_ldl_data_ra(env, a0 + 4, GETPC()); + if (v < low || v > high) { + if (env->hflags & HF_MPX_EN_MASK) { + env->bndcs_regs.sts = 0; + } + raise_exception_ra(env, EXCP05_BOUND, GETPC()); + } +} diff --git a/qemu/target-i386/misc_helper.c b/qemu/target/i386/misc_helper.c similarity index 71% rename from qemu/target-i386/misc_helper.c rename to qemu/target/i386/misc_helper.c index a3950b7c..d46953a4 100644 --- a/qemu/target-i386/misc_helper.c +++ b/qemu/target/i386/misc_helper.c @@ -17,41 +17,79 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" -#include "exec/ioport.h" #include "exec/helper-proto.h" +#include "exec/exec-all.h" #include "exec/cpu_ldst.h" +#include "exec/ioport.h" #include "uc_priv.h" -void helper_outb(void *handle, uint32_t port, uint32_t data) +void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) { - cpu_outb(handle, port, data & 0xff); +// #ifdef UNICORN_ARCH_POSTFIX +// glue(address_space_stb, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data & 0xff, +// #else +// address_space_stb(env->uc, &env->uc->address_space_io, port, data & 0xff, +// #endif +// cpu_get_mem_attrs(env), NULL); + return cpu_outb(env->uc, port, data); } -target_ulong helper_inb(void *handle, uint32_t port) +target_ulong helper_inb(CPUX86State *env, uint32_t port) { - return cpu_inb(handle, port); +// #ifdef UNICORN_ARCH_POSTFIX +// return glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, +// #else +// return address_space_ldub(env->uc, &env->uc->address_space_io, port, +// #endif +// cpu_get_mem_attrs(env), NULL); + return cpu_inb(env->uc, port); } -void helper_outw(void *handle, uint32_t port, uint32_t data) +void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) { - cpu_outw(handle, port, data & 0xffff); +// #ifdef UNICORN_ARCH_POSTFIX +// glue(address_space_stw, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data & 0xffff, +// #else +// address_space_stw(env->uc, &env->uc->address_space_io, port, data & 0xffff, +// #endif +// cpu_get_mem_attrs(env), NULL); + return cpu_outw(env->uc, port, data); } -target_ulong helper_inw(void *handle, uint32_t port) +target_ulong helper_inw(CPUX86State *env, uint32_t port) { - return cpu_inw(handle, port); +// #ifdef UNICORN_ARCH_POSTFIX +// return glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, +// #else +// return address_space_lduw(env->uc, &env->uc->address_space_io, port, +// #endif +// cpu_get_mem_attrs(env), NULL); + return cpu_inw(env->uc, port); } -void helper_outl(void *handle, uint32_t port, uint32_t data) +void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) { - cpu_outl(handle, port, data); +// #ifdef UNICORN_ARCH_POSTFIX +// glue(address_space_stl, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, data, +// #else +// address_space_stl(env->uc, &env->uc->address_space_io, port, data, +// #endif +// cpu_get_mem_attrs(env), NULL); + return cpu_outl(env->uc, port, data); } -target_ulong helper_inl(void *handle, uint32_t port) +target_ulong helper_inl(CPUX86State *env, uint32_t port) { - return cpu_inl(handle, port); +// #ifdef UNICORN_ARCH_POSTFIX +// return glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(env->uc, &env->uc->address_space_io, port, +// #else +// return address_space_ldl(env->uc, &env->uc->address_space_io, port, +// #endif +// cpu_get_mem_attrs(env), NULL); + return cpu_inl(env->uc, port); } void helper_into(CPUX86State *env, int next_eip_addend) @@ -64,20 +102,11 @@ void helper_into(CPUX86State *env, int next_eip_addend) } } -void helper_single_step(CPUX86State *env) -{ -#ifndef CONFIG_USER_ONLY - check_hw_breakpoints(env, true); - env->dr[6] |= DR6_BS; -#endif - raise_exception(env, EXCP01_DB); -} - void helper_cpuid(CPUX86State *env) { uint32_t eax, ebx, ecx, edx; - cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0, GETPC()); cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX], &eax, &ebx, &ecx, &edx); @@ -87,32 +116,19 @@ void helper_cpuid(CPUX86State *env) env->regs[R_EDX] = edx; } -#if defined(CONFIG_USER_ONLY) -target_ulong helper_read_crN(CPUX86State *env, int reg) -{ - return 0; -} - -void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) -{ -} - -void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0) -{ -} -#else target_ulong helper_read_crN(CPUX86State *env, int reg) { target_ulong val; - cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0, GETPC()); switch (reg) { default: val = env->cr[reg]; break; case 8: if (!(env->hflags2 & HF2_VINTR_MASK)) { - val = cpu_get_apic_tpr(env->uc, x86_env_get_cpu(env)->apic_state); + // val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); + val = 0; } else { val = env->v_tpr; } @@ -123,7 +139,7 @@ target_ulong helper_read_crN(CPUX86State *env, int reg) void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) { - cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0, GETPC()); switch (reg) { case 0: cpu_x86_update_cr0(env, (uint32_t)t0); @@ -135,9 +151,11 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) cpu_x86_update_cr4(env, (uint32_t)t0); break; case 8: +#if 0 if (!(env->hflags2 & HF2_VINTR_MASK)) { - cpu_set_apic_tpr(env->uc, x86_env_get_cpu(env)->apic_state, (uint8_t)t0); + cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); } +#endif env->v_tpr = t0 & 0x0f; break; default: @@ -146,28 +164,6 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) } } -void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0) -{ - int i; - - if (reg < 4) { - hw_breakpoint_remove(env, reg); - env->dr[reg] = t0; - hw_breakpoint_insert(env, reg); - } else if (reg == 7) { - for (i = 0; i < DR7_MAX_BP; i++) { - hw_breakpoint_remove(env, i); - } - env->dr[7] = t0; - for (i = 0; i < DR7_MAX_BP; i++) { - hw_breakpoint_insert(env, i); - } - } else { - env->dr[reg] = t0; - } -} -#endif - void helper_lmsw(CPUX86State *env, target_ulong t0) { /* only 4 lower bits of CR0 are modified. PE cannot be set to zero @@ -178,9 +174,9 @@ void helper_lmsw(CPUX86State *env, target_ulong t0) void helper_invlpg(CPUX86State *env, target_ulong addr) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = env_archcpu(env); - cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC()); tlb_flush_page(CPU(cpu), addr); } @@ -189,9 +185,9 @@ void helper_rdtsc(CPUX86State *env) uint64_t val; if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { - raise_exception(env, EXCP0D_GPF); + raise_exception_ra(env, EXCP0D_GPF, GETPC()); } - cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0, GETPC()); val = cpu_get_tsc(env) + env->tsc_offset; env->regs[R_EAX] = (uint32_t)(val); @@ -207,29 +203,20 @@ void helper_rdtscp(CPUX86State *env) void helper_rdpmc(CPUX86State *env) { if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { - raise_exception(env, EXCP0D_GPF); + raise_exception_ra(env, EXCP0D_GPF, GETPC()); } - cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0, GETPC()); /* currently unimplemented */ qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n"); raise_exception_err(env, EXCP06_ILLOP, 0); } -#if defined(CONFIG_USER_ONLY) -void helper_wrmsr(CPUX86State *env) -{ -} - -void helper_rdmsr(CPUX86State *env) -{ -} -#else void helper_wrmsr(CPUX86State *env) { uint64_t val; - cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1); + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); val = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); @@ -245,7 +232,7 @@ void helper_wrmsr(CPUX86State *env) env->sysenter_eip = val; break; case MSR_IA32_APICBASE: - cpu_set_apic_base(env->uc, x86_env_get_cpu(env)->apic_state, val); + // cpu_set_apic_base(env_archcpu(env)->apic_state, val); break; case MSR_EFER: { @@ -363,6 +350,12 @@ void helper_wrmsr(CPUX86State *env) case MSR_IA32_MISC_ENABLE: env->msr_ia32_misc_enable = val; break; + case MSR_IA32_BNDCFGS: + /* FIXME: #GP if reserved bits are set. */ + /* FIXME: Extend highest implemented bit of linear address. */ + env->msr_bndcfgs = val; + cpu_sync_bndcs_hflags(env); + break; default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + @@ -381,9 +374,10 @@ void helper_wrmsr(CPUX86State *env) void helper_rdmsr(CPUX86State *env) { + X86CPU *x86_cpu = env_archcpu(env); uint64_t val; - cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); switch ((uint32_t)env->regs[R_ECX]) { case MSR_IA32_SYSENTER_CS: @@ -396,7 +390,7 @@ void helper_rdmsr(CPUX86State *env) val = env->sysenter_eip; break; case MSR_IA32_APICBASE: - val = cpu_get_apic_base(env->uc, x86_env_get_cpu(env)->apic_state); + val = 0; // cpu_get_apic_base(env_archcpu(env)->apic_state); break; case MSR_EFER: val = env->efer; @@ -439,6 +433,9 @@ void helper_rdmsr(CPUX86State *env) val = env->tsc_aux; break; #endif + case MSR_SMI_COUNT: + val = env->msr_smi_count; + break; case MSR_MTRRphysBase(0): case MSR_MTRRphysBase(1): case MSR_MTRRphysBase(2): @@ -508,6 +505,12 @@ void helper_rdmsr(CPUX86State *env) case MSR_IA32_MISC_ENABLE: val = env->msr_ia32_misc_enable; break; + case MSR_IA32_BNDCFGS: + val = env->msr_bndcfgs; + break; + case MSR_IA32_UCODE_REV: + val = x86_cpu->ucode_rev; + break; default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + @@ -523,7 +526,6 @@ void helper_rdmsr(CPUX86State *env) env->regs[R_EAX] = (uint32_t)(val); env->regs[R_EDX] = (uint32_t)(val >> 32); } -#endif static void do_pause(X86CPU *cpu) { @@ -547,9 +549,9 @@ static void do_hlt(X86CPU *cpu) void helper_hlt(CPUX86State *env, int next_eip_addend) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = env_archcpu(env); - cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); env->eip += next_eip_addend; do_hlt(cpu); @@ -558,32 +560,38 @@ void helper_hlt(CPUX86State *env, int next_eip_addend) void helper_monitor(CPUX86State *env, target_ulong ptr) { if ((uint32_t)env->regs[R_ECX] != 0) { - raise_exception(env, EXCP0D_GPF); + raise_exception_ra(env, EXCP0D_GPF, GETPC()); } /* XXX: store address? */ - cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); } void helper_mwait(CPUX86State *env, int next_eip_addend) { - X86CPU *cpu; + CPUState *cs = env_cpu(env); + X86CPU *cpu = env_archcpu(env); if ((uint32_t)env->regs[R_ECX] != 0) { - raise_exception(env, EXCP0D_GPF); + raise_exception_ra(env, EXCP0D_GPF, GETPC()); } - cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); + + cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); env->eip += next_eip_addend; - cpu = x86_env_get_cpu(env); /* XXX: not complete but not completely erroneous */ - do_hlt(cpu); + // if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { // TODO + if (cs->cpu_index != 0) { + // do_pause(cpu); + } else { + do_hlt(cpu); + } } void helper_pause(CPUX86State *env, int next_eip_addend) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = env_archcpu(env); - cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC()); env->eip += next_eip_addend; do_pause(cpu); @@ -591,8 +599,35 @@ void helper_pause(CPUX86State *env, int next_eip_addend) void helper_debug(CPUX86State *env) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); cs->exception_index = EXCP_DEBUG; cpu_loop_exit(cs); } + +uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx) +{ + if ((env->cr[4] & CR4_PKE_MASK) == 0) { + raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); + } + if (ecx != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); + } + + return env->pkru; +} + +void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val) +{ + CPUState *cs = env_cpu(env); + + if ((env->cr[4] & CR4_PKE_MASK) == 0) { + raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); + } + if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) { + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); + } + + env->pkru = val; + tlb_flush(cs); +} diff --git a/qemu/target/i386/mpx_helper.c b/qemu/target/i386/mpx_helper.c new file mode 100644 index 00000000..ade5d245 --- /dev/null +++ b/qemu/target/i386/mpx_helper.c @@ -0,0 +1,138 @@ +/* + * x86 MPX helpers + * + * Copyright (c) 2015 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" + + +void helper_bndck(CPUX86State *env, uint32_t fail) +{ + if (unlikely(fail)) { + env->bndcs_regs.sts = 1; + raise_exception_ra(env, EXCP05_BOUND, GETPC()); + } +} + +static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra) +{ + uint64_t bndcsr, bde, bt; + + if ((env->hflags & HF_CPL_MASK) == 3) { + bndcsr = env->bndcs_regs.cfgu; + } else { + bndcsr = env->msr_bndcfgs; + } + + bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12); + bt = cpu_ldq_data_ra(env, bde, ra); + if ((bt & 1) == 0) { + env->bndcs_regs.sts = bde | 2; + raise_exception_ra(env, EXCP05_BOUND, ra); + } + + return (extract64(base, 3, 17) << 5) + (bt & ~7); +} + +static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra) +{ + uint32_t bndcsr, bde, bt; + + if ((env->hflags & HF_CPL_MASK) == 3) { + bndcsr = env->bndcs_regs.cfgu; + } else { + bndcsr = env->msr_bndcfgs; + } + + bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK); + bt = cpu_ldl_data_ra(env, bde, ra); + if ((bt & 1) == 0) { + env->bndcs_regs.sts = bde | 2; + raise_exception_ra(env, EXCP05_BOUND, ra); + } + + return (extract32(base, 2, 10) << 4) + (bt & ~3); +} + +uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr) +{ + uintptr_t ra = GETPC(); + uint64_t bte, lb, ub, pt; + + bte = lookup_bte64(env, base, ra); + lb = cpu_ldq_data_ra(env, bte, ra); + ub = cpu_ldq_data_ra(env, bte + 8, ra); + pt = cpu_ldq_data_ra(env, bte + 16, ra); + + if (pt != ptr) { + lb = ub = 0; + } + env->mmx_t0.MMX_Q(0) = ub; + return lb; +} + +uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr) +{ + uintptr_t ra = GETPC(); + uint32_t bte, lb, ub, pt; + + bte = lookup_bte32(env, base, ra); + lb = cpu_ldl_data_ra(env, bte, ra); + ub = cpu_ldl_data_ra(env, bte + 4, ra); + pt = cpu_ldl_data_ra(env, bte + 8, ra); + + if (pt != ptr) { + lb = ub = 0; + } + return ((uint64_t)ub << 32) | lb; +} + +void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr, + uint64_t lb, uint64_t ub) +{ + uintptr_t ra = GETPC(); + uint64_t bte; + + bte = lookup_bte64(env, base, ra); + cpu_stq_data_ra(env, bte, lb, ra); + cpu_stq_data_ra(env, bte + 8, ub, ra); + cpu_stq_data_ra(env, bte + 16, ptr, ra); +} + +void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr, + uint64_t lb, uint64_t ub) +{ + uintptr_t ra = GETPC(); + uint32_t bte; + + bte = lookup_bte32(env, base, ra); + cpu_stl_data_ra(env, bte, lb, ra); + cpu_stl_data_ra(env, bte + 4, ub, ra); + cpu_stl_data_ra(env, bte + 8, ptr, ra); +} + +void helper_bnd_jmp(CPUX86State *env) +{ + if (!(env->hflags2 & HF2_MPX_PR_MASK)) { + memset(env->bnd_regs, 0, sizeof(env->bnd_regs)); + env->hflags &= ~HF_MPX_IU_MASK; + } +} diff --git a/qemu/target-i386/ops_sse.h b/qemu/target/i386/ops_sse.h similarity index 83% rename from qemu/target-i386/ops_sse.h rename to qemu/target/i386/ops_sse.h index 8a8cda91..ec1ec745 100644 --- a/qemu/target-i386/ops_sse.h +++ b/qemu/target/i386/ops_sse.h @@ -18,7 +18,7 @@ * License along with this library; if not, see . */ -#include "qemu/aes.h" +#include "crypto/aes.h" #if SHIFT == 0 #define Reg MMXReg @@ -26,15 +26,15 @@ #define B(n) MMX_B(n) #define W(n) MMX_W(n) #define L(n) MMX_L(n) -#define Q(n) q +#define Q(n) MMX_Q(n) #define SUFFIX _mmx #else -#define Reg XMMReg +#define Reg ZMMReg #define XMM_ONLY(...) __VA_ARGS__ -#define B(n) XMM_B(n) -#define W(n) XMM_W(n) -#define L(n) XMM_L(n) -#define Q(n) XMM_Q(n) +#define B(n) ZMM_B(n) +#define W(n) ZMM_W(n) +#define L(n) ZMM_L(n) +#define Q(n) ZMM_Q(n) #define SUFFIX _xmm #endif @@ -363,9 +363,9 @@ static inline int satsw(int x) #define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0) #define FCMPEQ(a, b) ((a) == (b) ? -1 : 0) -#define FMULLW(a, b) ((int64_t)(a) * (b)) +#define FMULLW(a, b) ((a) * (b)) #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) -#define FMULHUW(a, b) ((int64_t)(a) * (b) >> 16) +#define FMULHUW(a, b) ((a) * (b) >> 16) #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) #define FAVG(a, b) (((a) + (b) + 1) >> 1) @@ -441,7 +441,7 @@ void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) #if SHIFT == 0 static inline int abs1(int a) { - if (a < 0 && a != 0x80000000) { + if (a < 0) { return -a; } else { return a; @@ -483,7 +483,7 @@ void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, for (i = 0; i < (8 << SHIFT); i++) { if (s->B(i) & 0x80) { - cpu_stb_data(env, a0 + i, d->B(i)); + cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC()); } } } @@ -582,26 +582,26 @@ void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order) #define SSE_HELPER_S(name, F) \ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ - d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \ - d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \ - d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \ + d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ + d->ZMM_S(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \ + d->ZMM_S(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \ + d->ZMM_S(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \ } \ \ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ } \ \ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ - d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \ + d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ + d->ZMM_D(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \ } \ \ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ } #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) @@ -633,79 +633,79 @@ void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s) { float32 s0, s1; - s0 = s->XMM_S(0); - s1 = s->XMM_S(1); - d->XMM_D(0) = float32_to_float64(s0, &env->sse_status); - d->XMM_D(1) = float32_to_float64(s1, &env->sse_status); + s0 = s->ZMM_S(0); + s1 = s->ZMM_S(1); + d->ZMM_D(0) = float32_to_float64(s0, &env->sse_status); + d->ZMM_D(1) = float32_to_float64(s1, &env->sse_status); } void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s) { - d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); - d->XMM_S(1) = float64_to_float32(s->XMM_D(1), &env->sse_status); + d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); + d->ZMM_S(1) = float64_to_float32(s->ZMM_D(1), &env->sse_status); d->Q(1) = 0; } void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s) { - d->XMM_D(0) = float32_to_float64(s->XMM_S(0), &env->sse_status); + d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status); } void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s) { - d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); + d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); } /* integer to float */ void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s) { - d->XMM_S(0) = int32_to_float32(s->XMM_L(0), &env->sse_status); - d->XMM_S(1) = int32_to_float32(s->XMM_L(1), &env->sse_status); - d->XMM_S(2) = int32_to_float32(s->XMM_L(2), &env->sse_status); - d->XMM_S(3) = int32_to_float32(s->XMM_L(3), &env->sse_status); + d->ZMM_S(0) = int32_to_float32(s->ZMM_L(0), &env->sse_status); + d->ZMM_S(1) = int32_to_float32(s->ZMM_L(1), &env->sse_status); + d->ZMM_S(2) = int32_to_float32(s->ZMM_L(2), &env->sse_status); + d->ZMM_S(3) = int32_to_float32(s->ZMM_L(3), &env->sse_status); } void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s) { int32_t l0, l1; - l0 = (int32_t)s->XMM_L(0); - l1 = (int32_t)s->XMM_L(1); - d->XMM_D(0) = int32_to_float64(l0, &env->sse_status); - d->XMM_D(1) = int32_to_float64(l1, &env->sse_status); + l0 = (int32_t)s->ZMM_L(0); + l1 = (int32_t)s->ZMM_L(1); + d->ZMM_D(0) = int32_to_float64(l0, &env->sse_status); + d->ZMM_D(1) = int32_to_float64(l1, &env->sse_status); } -void helper_cvtpi2ps(CPUX86State *env, XMMReg *d, MMXReg *s) +void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s) { - d->XMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); - d->XMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); + d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); + d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); } -void helper_cvtpi2pd(CPUX86State *env, XMMReg *d, MMXReg *s) +void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s) { - d->XMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); - d->XMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); + d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); + d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); } -void helper_cvtsi2ss(CPUX86State *env, XMMReg *d, uint32_t val) +void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val) { - d->XMM_S(0) = int32_to_float32(val, &env->sse_status); + d->ZMM_S(0) = int32_to_float32(val, &env->sse_status); } -void helper_cvtsi2sd(CPUX86State *env, XMMReg *d, uint32_t val) +void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val) { - d->XMM_D(0) = int32_to_float64(val, &env->sse_status); + d->ZMM_D(0) = int32_to_float64(val, &env->sse_status); } #ifdef TARGET_X86_64 -void helper_cvtsq2ss(CPUX86State *env, XMMReg *d, uint64_t val) +void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val) { - d->XMM_S(0) = int64_to_float32(val, &env->sse_status); + d->ZMM_S(0) = int64_to_float32(val, &env->sse_status); } -void helper_cvtsq2sd(CPUX86State *env, XMMReg *d, uint64_t val) +void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val) { - d->XMM_D(0) = int64_to_float64(val, &env->sse_status); + d->ZMM_D(0) = int64_to_float64(val, &env->sse_status); } #endif @@ -742,139 +742,139 @@ WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN) WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN) WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN) -void helper_cvtps2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_L(0) = x86_float32_to_int32(s->XMM_S(0), &env->sse_status); - d->XMM_L(1) = x86_float32_to_int32(s->XMM_S(1), &env->sse_status); - d->XMM_L(2) = x86_float32_to_int32(s->XMM_S(2), &env->sse_status); - d->XMM_L(3) = x86_float32_to_int32(s->XMM_S(3), &env->sse_status); + d->ZMM_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); + d->ZMM_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); + d->ZMM_L(2) = x86_float32_to_int32(s->ZMM_S(2), &env->sse_status); + d->ZMM_L(3) = x86_float32_to_int32(s->ZMM_S(3), &env->sse_status); } -void helper_cvtpd2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_L(0) = x86_float64_to_int32(s->XMM_D(0), &env->sse_status); - d->XMM_L(1) = x86_float64_to_int32(s->XMM_D(1), &env->sse_status); - d->XMM_Q(1) = 0; + d->ZMM_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); + d->ZMM_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); + d->ZMM_Q(1) = 0; } -void helper_cvtps2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = x86_float32_to_int32(s->XMM_S(0), &env->sse_status); - d->MMX_L(1) = x86_float32_to_int32(s->XMM_S(1), &env->sse_status); + d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); + d->MMX_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); } -void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = x86_float64_to_int32(s->XMM_D(0), &env->sse_status); - d->MMX_L(1) = x86_float64_to_int32(s->XMM_D(1), &env->sse_status); + d->MMX_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); + d->MMX_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); } -int32_t helper_cvtss2si(CPUX86State *env, XMMReg *s) +int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s) { - return x86_float32_to_int32(s->XMM_S(0), &env->sse_status); + return x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); } -int32_t helper_cvtsd2si(CPUX86State *env, XMMReg *s) +int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s) { - return x86_float64_to_int32(s->XMM_D(0), &env->sse_status); + return x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); } #ifdef TARGET_X86_64 -int64_t helper_cvtss2sq(CPUX86State *env, XMMReg *s) +int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s) { - return x86_float32_to_int64(s->XMM_S(0), &env->sse_status); + return x86_float32_to_int64(s->ZMM_S(0), &env->sse_status); } -int64_t helper_cvtsd2sq(CPUX86State *env, XMMReg *s) +int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s) { - return x86_float64_to_int64(s->XMM_D(0), &env->sse_status); + return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status); } #endif /* float to integer truncated */ -void helper_cvttps2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_L(0) = x86_float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); - d->XMM_L(1) = x86_float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); - d->XMM_L(2) = x86_float32_to_int32_round_to_zero(s->XMM_S(2), &env->sse_status); - d->XMM_L(3) = x86_float32_to_int32_round_to_zero(s->XMM_S(3), &env->sse_status); + d->ZMM_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); + d->ZMM_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); + d->ZMM_L(2) = x86_float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status); + d->ZMM_L(3) = x86_float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status); } -void helper_cvttpd2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_L(0) = x86_float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); - d->XMM_L(1) = x86_float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); - d->XMM_Q(1) = 0; + d->ZMM_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); + d->ZMM_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); + d->ZMM_Q(1) = 0; } -void helper_cvttps2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); - d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); + d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); + d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); } -void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); - d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); + d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); + d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); } -int32_t helper_cvttss2si(CPUX86State *env, XMMReg *s) +int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s) { - return x86_float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); + return x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); } -int32_t helper_cvttsd2si(CPUX86State *env, XMMReg *s) +int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s) { - return x86_float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); + return x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); } #ifdef TARGET_X86_64 -int64_t helper_cvttss2sq(CPUX86State *env, XMMReg *s) +int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s) { - return x86_float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status); + return x86_float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status); } -int64_t helper_cvttsd2sq(CPUX86State *env, XMMReg *s) +int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s) { - return x86_float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status); + return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status); } #endif -void helper_rsqrtps(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_S(0) = float32_div(float32_one, - float32_sqrt(s->XMM_S(0), &env->sse_status), + d->ZMM_S(0) = float32_div(float32_one, + float32_sqrt(s->ZMM_S(0), &env->sse_status), &env->sse_status); - d->XMM_S(1) = float32_div(float32_one, - float32_sqrt(s->XMM_S(1), &env->sse_status), + d->ZMM_S(1) = float32_div(float32_one, + float32_sqrt(s->ZMM_S(1), &env->sse_status), &env->sse_status); - d->XMM_S(2) = float32_div(float32_one, - float32_sqrt(s->XMM_S(2), &env->sse_status), + d->ZMM_S(2) = float32_div(float32_one, + float32_sqrt(s->ZMM_S(2), &env->sse_status), &env->sse_status); - d->XMM_S(3) = float32_div(float32_one, - float32_sqrt(s->XMM_S(3), &env->sse_status), + d->ZMM_S(3) = float32_div(float32_one, + float32_sqrt(s->ZMM_S(3), &env->sse_status), &env->sse_status); } -void helper_rsqrtss(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_S(0) = float32_div(float32_one, - float32_sqrt(s->XMM_S(0), &env->sse_status), + d->ZMM_S(0) = float32_div(float32_one, + float32_sqrt(s->ZMM_S(0), &env->sse_status), &env->sse_status); } -void helper_rcpps(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_S(0) = float32_div(float32_one, s->XMM_S(0), &env->sse_status); - d->XMM_S(1) = float32_div(float32_one, s->XMM_S(1), &env->sse_status); - d->XMM_S(2) = float32_div(float32_one, s->XMM_S(2), &env->sse_status); - d->XMM_S(3) = float32_div(float32_one, s->XMM_S(3), &env->sse_status); + d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); + d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status); + d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status); + d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status); } -void helper_rcpss(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_S(0) = float32_div(float32_one, s->XMM_S(0), &env->sse_status); + d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); } static inline uint64_t helper_extrq(uint64_t src, int shift, int len) @@ -884,19 +884,19 @@ static inline uint64_t helper_extrq(uint64_t src, int shift, int len) if (len == 0) { mask = ~0LL; } else { - mask = (1ULL << (len & 0x3f)) - 1; + mask = (1ULL << len) - 1; } - return (src >> (shift & 0x3f)) & mask; + return (src >> shift) & mask; } -void helper_extrq_r(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), s->XMM_B(1), s->XMM_B(0)); + d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0)); } -void helper_extrq_i(CPUX86State *env, XMMReg *d, int index, int length) +void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length) { - d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), index, length); + d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length); } static inline uint64_t helper_insertq(uint64_t src, int shift, int len) @@ -906,99 +906,99 @@ static inline uint64_t helper_insertq(uint64_t src, int shift, int len) if (len == 0) { mask = ~0ULL; } else { - mask = (1ULL << (len & 0x3f)) - 1; + mask = (1ULL << len) - 1; } - return (src & ~(mask << (shift & 0x3f))) | ((src & mask) << (shift & 0x3f)); + return (src & ~(mask << shift)) | ((src & mask) << shift); } -void helper_insertq_r(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_Q(0) = helper_insertq(s->XMM_Q(0), s->XMM_B(9), s->XMM_B(8)); + d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8)); } -void helper_insertq_i(CPUX86State *env, XMMReg *d, int index, int length) +void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length) { - d->XMM_Q(0) = helper_insertq(d->XMM_Q(0), index, length); + d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length); } -void helper_haddps(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_haddps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - XMMReg r; + ZMMReg r; - r.XMM_S(0) = float32_add(d->XMM_S(0), d->XMM_S(1), &env->sse_status); - r.XMM_S(1) = float32_add(d->XMM_S(2), d->XMM_S(3), &env->sse_status); - r.XMM_S(2) = float32_add(s->XMM_S(0), s->XMM_S(1), &env->sse_status); - r.XMM_S(3) = float32_add(s->XMM_S(2), s->XMM_S(3), &env->sse_status); + r.ZMM_S(0) = float32_add(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status); + r.ZMM_S(1) = float32_add(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status); + r.ZMM_S(2) = float32_add(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status); + r.ZMM_S(3) = float32_add(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status); *d = r; } -void helper_haddpd(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_haddpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - XMMReg r; + ZMMReg r; - r.XMM_D(0) = float64_add(d->XMM_D(0), d->XMM_D(1), &env->sse_status); - r.XMM_D(1) = float64_add(s->XMM_D(0), s->XMM_D(1), &env->sse_status); + r.ZMM_D(0) = float64_add(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status); + r.ZMM_D(1) = float64_add(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status); *d = r; } -void helper_hsubps(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_hsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - XMMReg r; + ZMMReg r; - r.XMM_S(0) = float32_sub(d->XMM_S(0), d->XMM_S(1), &env->sse_status); - r.XMM_S(1) = float32_sub(d->XMM_S(2), d->XMM_S(3), &env->sse_status); - r.XMM_S(2) = float32_sub(s->XMM_S(0), s->XMM_S(1), &env->sse_status); - r.XMM_S(3) = float32_sub(s->XMM_S(2), s->XMM_S(3), &env->sse_status); + r.ZMM_S(0) = float32_sub(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status); + r.ZMM_S(1) = float32_sub(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status); + r.ZMM_S(2) = float32_sub(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status); + r.ZMM_S(3) = float32_sub(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status); *d = r; } -void helper_hsubpd(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_hsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - XMMReg r; + ZMMReg r; - r.XMM_D(0) = float64_sub(d->XMM_D(0), d->XMM_D(1), &env->sse_status); - r.XMM_D(1) = float64_sub(s->XMM_D(0), s->XMM_D(1), &env->sse_status); + r.ZMM_D(0) = float64_sub(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status); + r.ZMM_D(1) = float64_sub(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status); *d = r; } -void helper_addsubps(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_addsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_S(0) = float32_sub(d->XMM_S(0), s->XMM_S(0), &env->sse_status); - d->XMM_S(1) = float32_add(d->XMM_S(1), s->XMM_S(1), &env->sse_status); - d->XMM_S(2) = float32_sub(d->XMM_S(2), s->XMM_S(2), &env->sse_status); - d->XMM_S(3) = float32_add(d->XMM_S(3), s->XMM_S(3), &env->sse_status); + d->ZMM_S(0) = float32_sub(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status); + d->ZMM_S(1) = float32_add(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status); + d->ZMM_S(2) = float32_sub(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status); + d->ZMM_S(3) = float32_add(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status); } -void helper_addsubpd(CPUX86State *env, XMMReg *d, XMMReg *s) +void helper_addsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->XMM_D(0) = float64_sub(d->XMM_D(0), s->XMM_D(0), &env->sse_status); - d->XMM_D(1) = float64_add(d->XMM_D(1), s->XMM_D(1), &env->sse_status); + d->ZMM_D(0) = float64_sub(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status); + d->ZMM_D(1) = float64_add(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status); } /* XXX: unordered */ #define SSE_HELPER_CMP(name, F) \ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ - d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \ - d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \ - d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \ + d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ + d->ZMM_L(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \ + d->ZMM_L(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \ + d->ZMM_L(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \ } \ \ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \ } \ \ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ - d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \ + d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ + d->ZMM_Q(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \ } \ \ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \ } #define FPU_CMPEQ(size, a, b) \ @@ -1034,8 +1034,8 @@ void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s) int ret; float32 s0, s1; - s0 = d->XMM_S(0); - s1 = s->XMM_S(0); + s0 = d->ZMM_S(0); + s1 = s->ZMM_S(0); ret = float32_compare_quiet(s0, s1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } @@ -1045,8 +1045,8 @@ void helper_comiss(CPUX86State *env, Reg *d, Reg *s) int ret; float32 s0, s1; - s0 = d->XMM_S(0); - s1 = s->XMM_S(0); + s0 = d->ZMM_S(0); + s1 = s->ZMM_S(0); ret = float32_compare(s0, s1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } @@ -1056,8 +1056,8 @@ void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s) int ret; float64 d0, d1; - d0 = d->XMM_D(0); - d1 = s->XMM_D(0); + d0 = d->ZMM_D(0); + d1 = s->ZMM_D(0); ret = float64_compare_quiet(d0, d1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } @@ -1067,8 +1067,8 @@ void helper_comisd(CPUX86State *env, Reg *d, Reg *s) int ret; float64 d0, d1; - d0 = d->XMM_D(0); - d1 = s->XMM_D(0); + d0 = d->ZMM_D(0); + d1 = s->ZMM_D(0); ret = float64_compare(d0, d1, &env->sse_status); CC_SRC = comis_eflags[ret + 1]; } @@ -1077,10 +1077,10 @@ uint32_t helper_movmskps(CPUX86State *env, Reg *s) { int b0, b1, b2, b3; - b0 = s->XMM_L(0) >> 31; - b1 = s->XMM_L(1) >> 31; - b2 = s->XMM_L(2) >> 31; - b3 = s->XMM_L(3) >> 31; + b0 = s->ZMM_L(0) >> 31; + b1 = s->ZMM_L(1) >> 31; + b2 = s->ZMM_L(2) >> 31; + b3 = s->ZMM_L(3) >> 31; return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3); } @@ -1088,8 +1088,8 @@ uint32_t helper_movmskpd(CPUX86State *env, Reg *s) { int b0, b1; - b0 = s->XMM_L(1) >> 31; - b1 = s->XMM_L(3) >> 31; + b0 = s->ZMM_L(1) >> 31; + b1 = s->ZMM_L(3) >> 31; return b0 | (b1 << 1); } @@ -1447,9 +1447,9 @@ void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - d->L(0) = (int64_t)d->L(0) + (int32_t)d->L(1); + d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1); XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3)); - d->L((1 << SHIFT) + 0) = (uint32_t)((int32_t)s->L(0) + (uint32_t)s->L(1)); + d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1); XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3)); } @@ -1501,9 +1501,9 @@ void glue(helper_phsubw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) void glue(helper_phsubd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - d->L(0) = (int32_t)((int64_t)d->L(0) - (int64_t)d->L(1)); - XMM_ONLY(d->L(1) = (int32_t)((int64_t)d->L(2) - (int64_t)d->L(3))); - d->L((1 << SHIFT) + 0) = (uint32_t)((int64_t)s->L(0) - (int64_t)s->L(1)); + d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1); + XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3)); + d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1); XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3)); } @@ -1521,7 +1521,7 @@ void glue(helper_phsubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) #define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x) #define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x) -#define FABSL(_, x) ((x > INT32_MAX && x != 0x80000000) ? -(int32_t)x : x) +#define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x) SSE_HELPER_B(helper_pabsb, FABSB) SSE_HELPER_W(helper_pabsw, FABSW) SSE_HELPER_L(helper_pabsd, FABSL) @@ -1531,7 +1531,7 @@ SSE_HELPER_W(helper_pmulhrsw, FMULHRSW) #define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d) #define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d) -#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d && d != 0x80000000 : 0 : -(int32_t)d) +#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d) SSE_HELPER_B(helper_psignb, FSIGNB) SSE_HELPER_W(helper_psignw, FSIGNW) SSE_HELPER_L(helper_psignd, FSIGNL) @@ -1649,18 +1649,18 @@ void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) #define SSE_HELPER_F(name, elem, num, F) \ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ { \ - d->elem(0) = F(0); \ - d->elem(1) = F(1); \ if (num > 2) { \ - d->elem(2) = F(2); \ - d->elem(3) = F(3); \ if (num > 4) { \ - d->elem(4) = F(4); \ - d->elem(5) = F(5); \ - d->elem(6) = F(6); \ d->elem(7) = F(7); \ + d->elem(6) = F(6); \ + d->elem(5) = F(5); \ + d->elem(4) = F(4); \ } \ + d->elem(3) = F(3); \ + d->elem(2) = F(2); \ } \ + d->elem(1) = F(1); \ + d->elem(0) = F(0); \ } SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B) @@ -1687,14 +1687,17 @@ SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ) void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) { - d->W(0) = satuw((int32_t) d->L(0)); - d->W(1) = satuw((int32_t) d->L(1)); - d->W(2) = satuw((int32_t) d->L(2)); - d->W(3) = satuw((int32_t) d->L(3)); - d->W(4) = satuw((int32_t) s->L(0)); - d->W(5) = satuw((int32_t) s->L(1)); - d->W(6) = satuw((int32_t) s->L(2)); - d->W(7) = satuw((int32_t) s->L(3)); + Reg r; + + r.W(0) = satuw((int32_t) d->L(0)); + r.W(1) = satuw((int32_t) d->L(1)); + r.W(2) = satuw((int32_t) d->L(2)); + r.W(3) = satuw((int32_t) d->L(3)); + r.W(4) = satuw((int32_t) s->L(0)); + r.W(5) = satuw((int32_t) s->L(1)); + r.W(6) = satuw((int32_t) s->L(2)); + r.W(7) = satuw((int32_t) s->L(3)); + *d = r; } #define FMINSB(d, s) MIN((int8_t)d, (int8_t)s) @@ -1710,7 +1713,7 @@ SSE_HELPER_L(helper_pmaxsd, FMAXSD) SSE_HELPER_W(helper_pmaxuw, MAX) SSE_HELPER_L(helper_pmaxud, MAX) -#define FMULLD(d, s) ((int64_t)d * (int32_t)s) +#define FMULLD(d, s) ((int32_t)d * (int32_t)s) SSE_HELPER_L(helper_pmulld, FMULLD) void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) @@ -1739,10 +1742,10 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) idx = 7; } - d->Q(1) = 0; - d->L(1) = 0; - d->W(1) = idx; d->W(0) = s->W(idx); + d->W(1) = idx; + d->L(1) = 0; + d->Q(1) = 0; } void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, @@ -1768,10 +1771,10 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, } } - d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status); - d->XMM_S(1) = float32_round_to_int(s->XMM_S(1), &env->sse_status); - d->XMM_S(2) = float32_round_to_int(s->XMM_S(2), &env->sse_status); - d->XMM_S(3) = float32_round_to_int(s->XMM_S(3), &env->sse_status); + d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); + d->ZMM_S(1) = float32_round_to_int(s->ZMM_S(1), &env->sse_status); + d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status); + d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { @@ -1806,8 +1809,8 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, } } - d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status); - d->XMM_D(1) = float64_round_to_int(s->XMM_D(1), &env->sse_status); + d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); + d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { @@ -1842,7 +1845,7 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, } } - d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status); + d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { @@ -1877,7 +1880,7 @@ void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, } } - d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status); + d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); #if 0 /* TODO */ if (mode & (1 << 3)) { @@ -1900,32 +1903,32 @@ void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) if (mask & (1 << 4)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(0), s->XMM_S(0), + float32_mul(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status), &env->sse_status); } if (mask & (1 << 5)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(1), s->XMM_S(1), + float32_mul(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status), &env->sse_status); } if (mask & (1 << 6)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(2), s->XMM_S(2), + float32_mul(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status), &env->sse_status); } if (mask & (1 << 7)) { iresult = float32_add(iresult, - float32_mul(d->XMM_S(3), s->XMM_S(3), + float32_mul(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status), &env->sse_status); } - d->XMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero; - d->XMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero; - d->XMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero; - d->XMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero; + d->ZMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero; + d->ZMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero; + d->ZMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero; + d->ZMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero; } void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) @@ -1934,18 +1937,18 @@ void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) if (mask & (1 << 4)) { iresult = float64_add(iresult, - float64_mul(d->XMM_D(0), s->XMM_D(0), + float64_mul(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status), &env->sse_status); } if (mask & (1 << 5)) { iresult = float64_add(iresult, - float64_mul(d->XMM_D(1), s->XMM_D(1), + float64_mul(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status), &env->sse_status); } - d->XMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero; - d->XMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero; + d->ZMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero; + d->ZMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero; } void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, @@ -1973,11 +1976,11 @@ SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ) static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl) { - unsigned int val; + int val; /* Presence of REX.W is indicated by a bit higher than 7 set */ if (ctrl >> 8) { - val = abs1((int)env->regs[reg]); + val = abs1((int64_t)env->regs[reg]); } else { val = abs1((int32_t)env->regs[reg]); } @@ -1991,9 +1994,6 @@ static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl) return 16; } } - if (val == 0x80000000) { - val = 0; - } return val; } @@ -2196,32 +2196,6 @@ target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len) return crc; } -#define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1)) -#define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i))) -target_ulong helper_popcnt(CPUX86State *env, target_ulong n, uint32_t type) -{ - CC_SRC = n ? 0 : CC_Z; - - n = POPCOUNT(n, 0); - n = POPCOUNT(n, 1); - n = POPCOUNT(n, 2); - n = POPCOUNT(n, 3); - if (type == 1) { - return n & 0xff; - } - - n = POPCOUNT(n, 4); -#ifndef TARGET_X86_64 - return n; -#else - if (type == 2) { - return n & 0xff; - } - - return POPCOUNT(n, 5); -#endif -} - void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t ctrl) { @@ -2267,7 +2241,7 @@ void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) Reg rk = *s; for (i = 0; i < 16; i++) { - d->B(i) = rk.B(i) ^ (AES_Td4[st.B(AES_ishifts[i])] & 0xff); + d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]); } } @@ -2292,7 +2266,7 @@ void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) Reg rk = *s; for (i = 0; i < 16; i++) { - d->B(i) = rk.B(i) ^ (AES_Te4[st.B(AES_shifts[i])] & 0xff); + d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]); } } @@ -2303,10 +2277,10 @@ void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) Reg tmp = *s; for (i = 0 ; i < 4 ; i++) { - d->L(i) = bswap32(AES_Td0[AES_Te4[tmp.B(4*i+0)] & 0xff] ^ - AES_Td1[AES_Te4[tmp.B(4*i+1)] & 0xff] ^ - AES_Td2[AES_Te4[tmp.B(4*i+2)] & 0xff] ^ - AES_Td3[AES_Te4[tmp.B(4*i+3)] & 0xff]); + d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^ + AES_imc[tmp.B(4*i+1)][1] ^ + AES_imc[tmp.B(4*i+2)][2] ^ + AES_imc[tmp.B(4*i+3)][3]); } } @@ -2317,8 +2291,8 @@ void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg tmp = *s; for (i = 0 ; i < 4 ; i++) { - d->B(i) = AES_Te4[tmp.B(i + 4)] & 0xff; - d->B(i + 8) = AES_Te4[tmp.B(i + 12)] & 0xff; + d->B(i) = AES_sbox[tmp.B(i + 4)]; + d->B(i + 8) = AES_sbox[tmp.B(i + 12)]; } d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl; d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl; diff --git a/qemu/target-i386/ops_sse_header.h b/qemu/target/i386/ops_sse_header.h similarity index 85% rename from qemu/target-i386/ops_sse_header.h rename to qemu/target/i386/ops_sse_header.h index a68c7cc0..094aafc5 100644 --- a/qemu/target-i386/ops_sse_header.h +++ b/qemu/target/i386/ops_sse_header.h @@ -20,18 +20,18 @@ #define Reg MMXReg #define SUFFIX _mmx #else -#define Reg XMMReg +#define Reg ZMMReg #define SUFFIX _xmm #endif #define dh_alias_Reg ptr -#define dh_alias_XMMReg ptr +#define dh_alias_ZMMReg ptr #define dh_alias_MMXReg ptr #define dh_ctype_Reg Reg * -#define dh_ctype_XMMReg XMMReg * +#define dh_ctype_ZMMReg ZMMReg * #define dh_ctype_MMXReg MMXReg * #define dh_is_signed_Reg dh_is_signed_ptr -#define dh_is_signed_XMMReg dh_is_signed_ptr +#define dh_is_signed_ZMMReg dh_is_signed_ptr #define dh_is_signed_MMXReg dh_is_signed_ptr DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg) @@ -154,52 +154,52 @@ DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg) DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg) DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg) DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg) -DEF_HELPER_3(cvtpi2ps, void, env, XMMReg, MMXReg) -DEF_HELPER_3(cvtpi2pd, void, env, XMMReg, MMXReg) -DEF_HELPER_3(cvtsi2ss, void, env, XMMReg, i32) -DEF_HELPER_3(cvtsi2sd, void, env, XMMReg, i32) +DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg) +DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg) +DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32) +DEF_HELPER_3(cvtsi2sd, void, env, ZMMReg, i32) #ifdef TARGET_X86_64 -DEF_HELPER_3(cvtsq2ss, void, env, XMMReg, i64) -DEF_HELPER_3(cvtsq2sd, void, env, XMMReg, i64) +DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64) +DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64) #endif -DEF_HELPER_3(cvtps2dq, void, env, XMMReg, XMMReg) -DEF_HELPER_3(cvtpd2dq, void, env, XMMReg, XMMReg) -DEF_HELPER_3(cvtps2pi, void, env, MMXReg, XMMReg) -DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, XMMReg) -DEF_HELPER_2(cvtss2si, s32, env, XMMReg) -DEF_HELPER_2(cvtsd2si, s32, env, XMMReg) +DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg) +DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg) +DEF_HELPER_2(cvtss2si, s32, env, ZMMReg) +DEF_HELPER_2(cvtsd2si, s32, env, ZMMReg) #ifdef TARGET_X86_64 -DEF_HELPER_2(cvtss2sq, s64, env, XMMReg) -DEF_HELPER_2(cvtsd2sq, s64, env, XMMReg) +DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg) +DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg) #endif -DEF_HELPER_3(cvttps2dq, void, env, XMMReg, XMMReg) -DEF_HELPER_3(cvttpd2dq, void, env, XMMReg, XMMReg) -DEF_HELPER_3(cvttps2pi, void, env, MMXReg, XMMReg) -DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, XMMReg) -DEF_HELPER_2(cvttss2si, s32, env, XMMReg) -DEF_HELPER_2(cvttsd2si, s32, env, XMMReg) +DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg) +DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg) +DEF_HELPER_2(cvttss2si, s32, env, ZMMReg) +DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg) #ifdef TARGET_X86_64 -DEF_HELPER_2(cvttss2sq, s64, env, XMMReg) -DEF_HELPER_2(cvttsd2sq, s64, env, XMMReg) +DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg) +DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg) #endif -DEF_HELPER_3(rsqrtps, void, env, XMMReg, XMMReg) -DEF_HELPER_3(rsqrtss, void, env, XMMReg, XMMReg) -DEF_HELPER_3(rcpps, void, env, XMMReg, XMMReg) -DEF_HELPER_3(rcpss, void, env, XMMReg, XMMReg) -DEF_HELPER_3(extrq_r, void, env, XMMReg, XMMReg) -DEF_HELPER_4(extrq_i, void, env, XMMReg, int, int) -DEF_HELPER_3(insertq_r, void, env, XMMReg, XMMReg) -DEF_HELPER_4(insertq_i, void, env, XMMReg, int, int) -DEF_HELPER_3(haddps, void, env, XMMReg, XMMReg) -DEF_HELPER_3(haddpd, void, env, XMMReg, XMMReg) -DEF_HELPER_3(hsubps, void, env, XMMReg, XMMReg) -DEF_HELPER_3(hsubpd, void, env, XMMReg, XMMReg) -DEF_HELPER_3(addsubps, void, env, XMMReg, XMMReg) -DEF_HELPER_3(addsubpd, void, env, XMMReg, XMMReg) +DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg) +DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int) +DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg) +DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int) +DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg) +DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg) #define SSE_HELPER_CMP(name, F) \ DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \ @@ -333,7 +333,6 @@ DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32) DEF_HELPER_3(crc32, tl, i32, tl, i32) -DEF_HELPER_3(popcnt, tl, env, tl, i32) #endif /* AES-NI op helpers */ diff --git a/qemu/target-i386/seg_helper.c b/qemu/target/i386/seg_helper.c similarity index 66% rename from qemu/target-i386/seg_helper.c rename to qemu/target/i386/seg_helper.c index fd9765a1..27efae9a 100644 --- a/qemu/target-i386/seg_helper.c +++ b/qemu/target/i386/seg_helper.c @@ -18,11 +18,15 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" #include "qemu/log.h" #include "exec/helper-proto.h" +#include "exec/exec-all.h" #include "exec/cpu_ldst.h" + #include "uc_priv.h" +#include //#define DEBUG_PCALL @@ -35,27 +39,42 @@ # define LOG_PCALL_STATE(cpu) do { } while (0) #endif -#ifndef CONFIG_USER_ONLY -#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env)) -#define MEMSUFFIX _kernel -#define DATA_SIZE 1 -#include "exec/cpu_ldst_template.h" +/* + * TODO: Convert callers to compute cpu_mmu_index_kernel once + * and use *_mmuidx_ra directly. + */ +#define cpu_ldub_kernel_ra(e, p, r) \ + cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_lduw_kernel_ra(e, p, r) \ + cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_ldl_kernel_ra(e, p, r) \ + cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) +#define cpu_ldq_kernel_ra(e, p, r) \ + cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) -#define DATA_SIZE 2 -#include "exec/cpu_ldst_template.h" +#define cpu_stb_kernel_ra(e, p, v, r) \ + cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stw_kernel_ra(e, p, v, r) \ + cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stl_kernel_ra(e, p, v, r) \ + cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) +#define cpu_stq_kernel_ra(e, p, v, r) \ + cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) -#define DATA_SIZE 4 -#include "exec/cpu_ldst_template.h" +#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) +#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) +#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) +#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) -#define DATA_SIZE 8 -#include "exec/cpu_ldst_template.h" -#undef CPU_MMU_INDEX -#undef MEMSUFFIX -#endif +#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) +#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) +#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) +#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) /* return non zero if error */ -static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, - uint32_t *e2_ptr, int selector) +static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, + uint32_t *e2_ptr, int selector, + uintptr_t retaddr) { SegmentCache *dt; int index; @@ -71,11 +90,17 @@ static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, return -1; } ptr = dt->base + index; - *e1_ptr = cpu_ldl_kernel(env, ptr); - *e2_ptr = cpu_ldl_kernel(env, ptr + 4); + *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); + *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); return 0; } +static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, + uint32_t *e2_ptr, int selector) +{ + return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); +} + static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) { unsigned int limit; @@ -111,9 +136,10 @@ static inline void load_seg_vm(CPUX86State *env, int seg, int selector) } static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, - uint32_t *esp_ptr, int dpl) + uint32_t *esp_ptr, int dpl, + uintptr_t retaddr) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = env_archcpu(env); int type, index, shift; #if 0 @@ -140,60 +166,61 @@ static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, shift = type >> 3; index = (dpl * 4 + 2) << shift; if (index + (4 << shift) - 1 > env->tr.limit) { - raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); } if (shift == 0) { - *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index); - *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2); + *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); + *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); } else { - *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index); - *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4); + *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); + *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); } } -static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl) +static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl, + uintptr_t retaddr) { uint32_t e1, e2; int rpl, dpl; if ((selector & 0xfffc) != 0) { - if (load_segment(env, &e1, &e2, selector) != 0) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } if (!(e2 & DESC_S_MASK)) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (seg_reg == R_CS) { if (!(e2 & DESC_CS_MASK)) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } if (dpl != rpl) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } else if (seg_reg == R_SS) { /* SS must be writable data */ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } if (dpl != cpl || dpl != rpl) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } else { /* not readable code */ if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } /* if data or non conforming code, checks the rights */ if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { if (dpl < cpl || dpl < rpl) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); } cpu_x86_load_seg_cache(env, seg_reg, selector, get_seg_base(e1, e2), @@ -201,7 +228,7 @@ static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl) e2); } else { if (seg_reg == R_SS || seg_reg == R_CS) { - raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); } } } @@ -211,9 +238,9 @@ static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl) #define SWITCH_TSS_CALL 2 /* XXX: restore CPU state in registers (PowerPC case) */ -static void switch_tss(CPUX86State *env, int tss_selector, - uint32_t e1, uint32_t e2, int source, - uint32_t next_eip) +static void switch_tss_ra(CPUX86State *env, int tss_selector, + uint32_t e1, uint32_t e2, int source, + uint32_t next_eip, uintptr_t retaddr) { int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; target_ulong tss_base; @@ -231,26 +258,26 @@ static void switch_tss(CPUX86State *env, int tss_selector, /* if task gate, we read the TSS segment and we load it */ if (type == 5) { if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); } tss_selector = e1 >> 16; if (tss_selector & 4) { - raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); } - if (load_segment(env, &e1, &e2, tss_selector) != 0) { - raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); } if (e2 & DESC_S_MASK) { - raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); } type = (e2 >> DESC_TYPE_SHIFT) & 0xf; if ((type & 7) != 1) { - raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); } } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); } if (type & 8) { @@ -262,7 +289,7 @@ static void switch_tss(CPUX86State *env, int tss_selector, tss_base = get_seg_base(e1, e2); if ((tss_selector & 4) != 0 || tss_limit < tss_limit_max) { - raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); } old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; if (old_type & 8) { @@ -274,30 +301,33 @@ static void switch_tss(CPUX86State *env, int tss_selector, /* read all the registers from the new TSS */ if (type & 8) { /* 32 bit */ - new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c); - new_eip = cpu_ldl_kernel(env, tss_base + 0x20); - new_eflags = cpu_ldl_kernel(env, tss_base + 0x24); + new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); + new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); + new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); for (i = 0; i < 8; i++) { - new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4)); + new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), + retaddr); } for (i = 0; i < 6; i++) { - new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4)); + new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), + retaddr); } - new_ldt = cpu_lduw_kernel(env, tss_base + 0x60); - new_trap = cpu_ldl_kernel(env, tss_base + 0x64); + new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); + new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); } else { /* 16 bit */ new_cr3 = 0; - new_eip = cpu_lduw_kernel(env, tss_base + 0x0e); - new_eflags = cpu_lduw_kernel(env, tss_base + 0x10); + new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); + new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); for (i = 0; i < 8; i++) { - new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) | - 0xffff0000; + new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), + retaddr) | 0xffff0000; } for (i = 0; i < 4; i++) { - new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4)); + new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4), + retaddr); } - new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a); + new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); new_segs[R_FS] = 0; new_segs[R_GS] = 0; new_trap = 0; @@ -312,10 +342,10 @@ static void switch_tss(CPUX86State *env, int tss_selector, /* XXX: it can still fail in some cases, so a bigger hack is necessary to valid the TLB after having done the accesses */ - v1 = cpu_ldub_kernel(env, env->tr.base); - v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max); - cpu_stb_kernel(env, env->tr.base, v1); - cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2); + v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); + v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); + cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); + cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); /* clear busy bit (it is restartable) */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { @@ -323,9 +353,9 @@ static void switch_tss(CPUX86State *env, int tss_selector, uint32_t e2; ptr = env->gdt.base + (env->tr.selector & ~7); - e2 = cpu_ldl_kernel(env, ptr + 4); + e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); e2 &= ~DESC_TSS_BUSY_MASK; - cpu_stl_kernel(env, ptr + 4, e2); + cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); } old_eflags = cpu_compute_eflags(env); if (source == SWITCH_TSS_IRET) { @@ -335,35 +365,35 @@ static void switch_tss(CPUX86State *env, int tss_selector, /* save the current state in the old TSS */ if (type & 8) { /* 32 bit */ - cpu_stl_kernel(env, env->tr.base + 0x20, next_eip); - cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags); - cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); - cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); - cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); - cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); - cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); - cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); - cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); - cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); + cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); + cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); + cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); for (i = 0; i < 6; i++) { - cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4), - env->segs[i].selector); + cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), + env->segs[i].selector, retaddr); } } else { /* 16 bit */ - cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip); - cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags); - cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); - cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); - cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); - cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); - cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); - cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); - cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); - cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); + cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); + cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); + cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); for (i = 0; i < 4; i++) { - cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4), - env->segs[i].selector); + cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4), + env->segs[i].selector, retaddr); } } @@ -371,7 +401,7 @@ static void switch_tss(CPUX86State *env, int tss_selector, context */ if (source == SWITCH_TSS_CALL) { - cpu_stw_kernel(env, tss_base, env->tr.selector); + cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); new_eflags |= NT_MASK; } @@ -381,9 +411,9 @@ static void switch_tss(CPUX86State *env, int tss_selector, uint32_t e2; ptr = env->gdt.base + (tss_selector & ~7); - e2 = cpu_ldl_kernel(env, ptr + 4); + e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); e2 |= DESC_TSS_BUSY_MASK; - cpu_stl_kernel(env, ptr + 4, e2); + cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); } /* set the new CPU state */ @@ -435,23 +465,23 @@ static void switch_tss(CPUX86State *env, int tss_selector, /* load the LDT */ if (new_ldt & 4) { - raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } if ((new_ldt & 0xfffc) != 0) { dt = &env->gdt; index = new_ldt & ~7; if ((index + 7) > dt->limit) { - raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } ptr = dt->base + index; - e1 = cpu_ldl_kernel(env, ptr); - e2 = cpu_ldl_kernel(env, ptr + 4); + e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); + e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { - raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); } load_seg_cache_raw_dt(&env->ldt, e1, e2); } @@ -459,36 +489,40 @@ static void switch_tss(CPUX86State *env, int tss_selector, /* load the segments */ if (!(new_eflags & VM_MASK)) { int cpl = new_segs[R_CS] & 3; - tss_load_seg(env, R_CS, new_segs[R_CS], cpl); - tss_load_seg(env, R_SS, new_segs[R_SS], cpl); - tss_load_seg(env, R_ES, new_segs[R_ES], cpl); - tss_load_seg(env, R_DS, new_segs[R_DS], cpl); - tss_load_seg(env, R_FS, new_segs[R_FS], cpl); - tss_load_seg(env, R_GS, new_segs[R_GS], cpl); + tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); + tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); + tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); + tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); + tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); + tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); } /* check that env->eip is in the CS segment limits */ if (new_eip > env->segs[R_CS].limit) { /* XXX: different exception if CALL? */ - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); } -#ifndef CONFIG_USER_ONLY /* reset local breakpoints */ if (env->dr[7] & DR7_LOCAL_BP_MASK) { - for (i = 0; i < DR7_MAX_BP; i++) { - if (hw_local_breakpoint_enabled(env->dr[7], i) && - !hw_global_breakpoint_enabled(env->dr[7], i)) { - hw_breakpoint_remove(env, i); - } - } - env->dr[7] &= ~DR7_LOCAL_BP_MASK; + cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); } -#endif +} + +static void switch_tss(CPUX86State *env, int tss_selector, + uint32_t e1, uint32_t e2, int source, + uint32_t next_eip) +{ + switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); } static inline unsigned int get_sp_mask(unsigned int e2) { +#ifdef TARGET_X86_64 + if (e2 & DESC_L_MASK) { + return 0; + } else +#endif if (e2 & DESC_B_MASK) { return 0xffffffff; } else { @@ -536,34 +570,39 @@ static int exception_has_error_code(int intno) #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) /* XXX: add a is_user flag to have proper security support */ -#define PUSHW(ssp, sp, sp_mask, val) \ +#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ { \ sp -= 2; \ - cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \ + cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ } -#define PUSHL(ssp, sp, sp_mask, val) \ +#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ { \ sp -= 4; \ - cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \ + cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ } -#define POPW(ssp, sp, sp_mask, val) \ +#define POPW_RA(ssp, sp, sp_mask, val, ra) \ { \ - val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \ + val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ sp += 2; \ } -#define POPL(ssp, sp, sp_mask, val) \ +#define POPL_RA(ssp, sp, sp_mask, val, ra) \ { \ - val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \ + val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ sp += 4; \ } +#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) +#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) +#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) +#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) + /* protected mode interrupt */ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, int error_code, unsigned int next_eip, - int is_hw) // qq + int is_hw) { SegmentCache *dt; target_ulong ptr, ssp; @@ -658,9 +697,12 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); } - if (!(e2 & DESC_C_MASK) && dpl < cpl) { + if (e2 & DESC_C_MASK) { + dpl = cpl; + } + if (dpl < cpl) { /* to inner privilege */ - get_ss_esp_from_tss(env, &ss, &esp, dpl); + get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); if ((ss & 0xfffc) == 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } @@ -685,7 +727,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, new_stack = 1; sp_mask = get_sp_mask(ss_e2); ssp = get_seg_base(ss_e1, ss_e2); - } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + } else { /* to same privilege */ if (vm86) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); @@ -694,13 +736,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; esp = env->regs[R_ESP]; - dpl = cpl; - } else { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); - new_stack = 0; /* avoid warning */ - sp_mask = 0; /* avoid warning */ - ssp = 0; /* avoid warning */ - esp = 0; /* avoid warning */ } shift = type >> 3; @@ -778,21 +813,24 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, #ifdef TARGET_X86_64 -#define PUSHQ(sp, val) \ +#define PUSHQ_RA(sp, val, ra) \ { \ sp -= 8; \ - cpu_stq_kernel(env, sp, (val)); \ + cpu_stq_kernel_ra(env, sp, (val), ra); \ } -#define POPQ(sp, val) \ +#define POPQ_RA(sp, val, ra) \ { \ - val = cpu_ldq_kernel(env, sp); \ + val = cpu_ldq_kernel_ra(env, sp, ra); \ sp += 8; \ } +#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) +#define POPQ(sp, val) POPQ_RA(sp, val, 0) + static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = env_archcpu(env); int index; #if 0 @@ -812,7 +850,7 @@ static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) /* 64 bit interrupt */ static void do_interrupt64(CPUX86State *env, int intno, int is_int, - int error_code, target_ulong next_eip, int is_hw) // qq + int error_code, target_ulong next_eip, int is_hw) { SegmentCache *dt; target_ulong ptr; @@ -882,23 +920,21 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } - if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { + if (e2 & DESC_C_MASK) { + dpl = cpl; + } + if (dpl < cpl || ist != 0) { /* to inner privilege */ new_stack = 1; esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); ss = 0; - } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + } else { /* to same privilege */ if (env->eflags & VM_MASK) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } new_stack = 0; esp = env->regs[R_ESP]; - dpl = cpl; - } else { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); - new_stack = 0; /* avoid warning */ - esp = 0; /* avoid warning */ } esp &= ~0xfLL; /* align stack */ @@ -919,7 +955,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, if (new_stack) { ss = 0 | dpl; - cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); } env->regs[R_ESP] = esp; @@ -933,16 +969,6 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, #endif #ifdef TARGET_X86_64 -#if defined(CONFIG_USER_ONLY) -void helper_syscall(CPUX86State *env, int next_eip_addend) -{ - CPUState *cs = CPU(x86_env_get_cpu(env)); - - cs->exception_index = EXCP_SYSCALL; - env->exception_next_eip = env->eip + next_eip_addend; - cpu_loop_exit(cs); -} -#else void helper_syscall(CPUX86State *env, int next_eip_addend) { // Unicorn: call registered syscall hooks @@ -955,63 +981,15 @@ void helper_syscall(CPUX86State *env, int next_eip_addend) continue; if (hook->insn == UC_X86_INS_SYSCALL) ((uc_cb_insn_syscall_t)hook->callback)(env->uc, hook->user_data); + + // the last callback may already asked to stop emulation + if (env->uc->stop_request) + break; } env->eip += next_eip_addend; - return; -/* - int selector; - - if (!(env->efer & MSR_EFER_SCE)) { - raise_exception_err(env, EXCP06_ILLOP, 0); - } - selector = (env->star >> 32) & 0xffff; - if (env->hflags & HF_LMA_MASK) { - int code64; - - env->regs[R_ECX] = env->eip + next_eip_addend; - env->regs[11] = cpu_compute_eflags(env); - - code64 = env->hflags & HF_CS64_MASK; - - env->eflags &= ~env->fmask; - cpu_load_eflags(env, env->eflags, 0); - cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | - DESC_L_MASK); - cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - if (code64) { - env->eip = env->lstar; - } else { - env->eip = env->cstar; - } - } else { - env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend); - - env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); - cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - env->eip = (uint32_t)env->star; - } -*/ } #endif -#endif #ifdef TARGET_X86_64 void helper_sysret(CPUX86State *env, int dflag) @@ -1019,11 +997,11 @@ void helper_sysret(CPUX86State *env, int dflag) int cpl, selector; if (!(env->efer & MSR_EFER_SCE)) { - raise_exception_err(env, EXCP06_ILLOP, 0); + raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); } cpl = env->hflags & HF_CPL_MASK; if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } selector = (env->star >> 48) & 0xffff; if (env->hflags & HF_LMA_MASK) { @@ -1046,7 +1024,7 @@ void helper_sysret(CPUX86State *env, int dflag) DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)env->regs[R_ECX]; } - cpu_x86_load_seg_cache(env, R_SS, selector + 8, + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | @@ -1059,7 +1037,7 @@ void helper_sysret(CPUX86State *env, int dflag) DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)env->regs[R_ECX]; - cpu_x86_load_seg_cache(env, R_SS, selector + 8, + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | @@ -1070,7 +1048,7 @@ void helper_sysret(CPUX86State *env, int dflag) /* real mode interrupt */ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, - int error_code, unsigned int next_eip) // qq + int error_code, unsigned int next_eip) { SegmentCache *dt; target_ulong ptr, ssp; @@ -1107,47 +1085,11 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); } -#if defined(CONFIG_USER_ONLY) -/* fake user mode interrupt */ -static void do_interrupt_user(CPUX86State *env, int intno, int is_int, - int error_code, target_ulong next_eip) -{ - SegmentCache *dt; - target_ulong ptr; - int dpl, cpl, shift; - uint32_t e2; - - dt = &env->idt; - if (env->hflags & HF_LMA_MASK) { - shift = 4; - } else { - shift = 3; - } - ptr = dt->base + (intno << shift); - e2 = cpu_ldl_kernel(env, ptr + 4); - - dpl = (e2 >> DESC_DPL_SHIFT) & 3; - cpl = env->hflags & HF_CPL_MASK; - /* check privilege if software int */ - if (is_int && dpl < cpl) { - raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2); - } - - /* Since we emulate only user space, we cannot do more than - exiting the emulation with the suitable exception and error - code. So update EIP for INT 0x80 and EXCP_SYSCALL. */ - if (is_int || intno == EXCP_SYSCALL) { - env->eip = next_eip; - } -} - -#else - static void handle_even_inj(CPUX86State *env, int intno, int is_int, int error_code, int is_hw, int rm) { - CPUState *cs = CPU(x86_env_get_cpu(env)); - uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + CPUState *cs = env_cpu(env); + uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); if (!(event_inj & SVM_EVTINJ_VALID)) { @@ -1161,16 +1103,15 @@ static void handle_even_inj(CPUX86State *env, int intno, int is_int, event_inj = intno | type | SVM_EVTINJ_VALID; if (!rm && exception_has_error_code(intno)) { event_inj |= SVM_EVTINJ_VALID_ERR; - stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code); } - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj); } } -#endif /* * Begin execution of an interruption. is_int is TRUE if coming from @@ -1182,17 +1123,18 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, { CPUX86State *env = &cpu->env; +#if 0 if (qemu_loglevel_mask(CPU_LOG_INT)) { if ((env->cr[0] & CR0_PE_MASK)) { - //static int count; + // static int count; - //qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx - // " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, - // count, intno, error_code, is_int, - // env->hflags & HF_CPL_MASK, - // env->segs[R_CS].selector, env->eip, - // (int)env->segs[R_CS].base + env->eip, - // env->segs[R_SS].selector, env->regs[R_ESP]); + qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx + " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, + count, intno, error_code, is_int, + env->hflags & HF_CPL_MASK, + env->segs[R_CS].selector, env->eip, + (int)env->segs[R_CS].base + env->eip, + env->segs[R_SS].selector, env->regs[R_ESP]); if (intno == 0x0e) { qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); } else { @@ -1213,15 +1155,15 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, qemu_log("\n"); } #endif - //count++; + count++; } } +#endif + if (env->cr[0] & CR0_PE_MASK) { -#if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) { + if (env->hflags & HF_GUEST_MASK) { handle_even_inj(env, intno, is_int, error_code, is_hw, 0); } -#endif #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); @@ -1232,198 +1174,110 @@ static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, is_hw); } } else { -#if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) { + if (env->hflags & HF_GUEST_MASK) { handle_even_inj(env, intno, is_int, error_code, is_hw, 1); } -#endif do_interrupt_real(env, intno, is_int, error_code, next_eip); } -#if !defined(CONFIG_USER_ONLY) - if (env->hflags & HF_SVMI_MASK) { + if (env->hflags & HF_GUEST_MASK) { CPUState *cs = CPU(cpu); - uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + + uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID); } -#endif } void x86_cpu_do_interrupt(CPUState *cs) { - X86CPU *cpu = X86_CPU(cs->uc, cs); + X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; -#if defined(CONFIG_USER_ONLY) - /* if user mode only, we simulate a fake exception - which will be handled outside the cpu execution - loop */ - do_interrupt_user(env, cs->exception_index, - env->exception_is_int, - env->error_code, - env->exception_next_eip); - /* successfully delivered */ - env->old_exception = -1; -#else - /* simulate a real cpu exception. On i386, it can - trigger new exceptions, but we do not handle - double or triple faults yet. */ - do_interrupt_all(cpu, cs->exception_index, - env->exception_is_int, - env->error_code, - env->exception_next_eip, 0); - /* successfully delivered */ - env->old_exception = -1; -#endif + if (cs->exception_index >= EXCP_VMEXIT) { + assert(env->old_exception == -1); + do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code); + } else { + do_interrupt_all(cpu, cs->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip, 0); + /* successfully delivered */ + env->old_exception = -1; + } } void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) { - do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw); + do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); } bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - X86CPU *cpu = X86_CPU(cs->uc, cs); + X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - bool ret = false; + int intno; -#if !defined(CONFIG_USER_ONLY) - if (interrupt_request & CPU_INTERRUPT_POLL) { + interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); + if (!interrupt_request) { + return false; + } + + /* Don't process multiple interrupt requests in a single call. + * This is required to make icount-driven execution deterministic. + */ + switch (interrupt_request) { + case CPU_INTERRUPT_POLL: cs->interrupt_request &= ~CPU_INTERRUPT_POLL; - apic_poll_irq(cpu->apic_state); - } -#endif - if (interrupt_request & CPU_INTERRUPT_SIPI) { + // apic_poll_irq(cpu->apic_state); + break; + case CPU_INTERRUPT_SIPI: do_cpu_sipi(cpu); - } else if (env->hflags2 & HF2_GIF_MASK) { - if ((interrupt_request & CPU_INTERRUPT_SMI) && - !(env->hflags & HF_SMM_MASK)) { - cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_SMI; - do_smm_enter(cpu); - ret = true; - } else if ((interrupt_request & CPU_INTERRUPT_NMI) && - !(env->hflags2 & HF2_NMI_MASK)) { - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; - env->hflags2 |= HF2_NMI_MASK; - do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); - ret = true; - } else if (interrupt_request & CPU_INTERRUPT_MCE) { - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; - do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); - ret = true; - } else if ((interrupt_request & CPU_INTERRUPT_HARD) && - (((env->hflags2 & HF2_VINTR_MASK) && - (env->hflags2 & HF2_HIF_MASK)) || - (!(env->hflags2 & HF2_VINTR_MASK) && - (env->eflags & IF_MASK && - !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { - int intno; - cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0); - cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | - CPU_INTERRUPT_VIRQ); - intno = cpu_get_pic_interrupt(env); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - /* ensure that no TB jump will be modified as - the program flow was changed */ - ret = true; -#if !defined(CONFIG_USER_ONLY) - } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && - (env->eflags & IF_MASK) && - !(env->hflags & HF_INHIBIT_IRQ_MASK)) { - int intno; - /* FIXME: this should respect TPR */ - cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0); - intno = ldl_phys(cs->as, env->vm_vmcb + break; + case CPU_INTERRUPT_SMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_SMI; + do_smm_enter(cpu); + break; + case CPU_INTERRUPT_NMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + env->hflags2 |= HF2_NMI_MASK; + do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); + break; + case CPU_INTERRUPT_MCE: + cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); + break; + case CPU_INTERRUPT_HARD: + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); + cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | + CPU_INTERRUPT_VIRQ); + // intno = cpu_get_pic_interrupt(env); + intno = 0; + //qemu_log_mask(CPU_LOG_TB_IN_ASM, + // "Servicing hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + break; + case CPU_INTERRUPT_VIRQ: + /* FIXME: this should respect TPR */ + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); + intno = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing virtual hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; - ret = true; -#endif - } + //qemu_log_mask(CPU_LOG_TB_IN_ASM, + // "Servicing virtual hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + break; } - return ret; + /* Ensure that no TB jump will be modified as the program flow was changed. */ + return true; } -void helper_enter_level(CPUX86State *env, int level, int data32, - target_ulong t1) -{ - target_ulong ssp; - uint32_t esp_mask, esp, ebp; - - esp_mask = get_sp_mask(env->segs[R_SS].flags); - ssp = env->segs[R_SS].base; - ebp = env->regs[R_EBP]; - esp = env->regs[R_ESP]; - if (data32) { - /* 32 bit */ - esp -= 4; - while (--level) { - esp -= 4; - ebp -= 4; - cpu_stl_data(env, ssp + (esp & esp_mask), - cpu_ldl_data(env, ssp + (ebp & esp_mask))); - } - esp -= 4; - cpu_stl_data(env, ssp + (esp & esp_mask), t1); - } else { - /* 16 bit */ - esp -= 2; - while (--level) { - esp -= 2; - ebp -= 2; - cpu_stw_data(env, ssp + (esp & esp_mask), - cpu_lduw_data(env, ssp + (ebp & esp_mask))); - } - esp -= 2; - cpu_stw_data(env, ssp + (esp & esp_mask), t1); - } -} - -#ifdef TARGET_X86_64 -void helper_enter64_level(CPUX86State *env, int level, int data64, - target_ulong t1) -{ - target_ulong esp, ebp; - - ebp = env->regs[R_EBP]; - esp = env->regs[R_ESP]; - - if (data64) { - /* 64 bit */ - esp -= 8; - while (--level) { - esp -= 8; - ebp -= 8; - cpu_stq_data(env, esp, cpu_ldq_data(env, ebp)); - } - esp -= 8; - cpu_stq_data(env, esp, t1); - } else { - /* 16 bit */ - esp -= 2; - while (--level) { - esp -= 2; - ebp -= 2; - cpu_stw_data(env, esp, cpu_lduw_data(env, ebp)); - } - esp -= 2; - cpu_stw_data(env, esp, t1); - } -} -#endif - void helper_lldt(CPUX86State *env, int selector) { SegmentCache *dt; @@ -1438,7 +1292,7 @@ void helper_lldt(CPUX86State *env, int selector) env->ldt.limit = 0; } else { if (selector & 0x4) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } dt = &env->gdt; index = selector & ~7; @@ -1451,22 +1305,22 @@ void helper_lldt(CPUX86State *env, int selector) entry_limit = 7; } if ((index + entry_limit) > dt->limit) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } ptr = dt->base + index; - e1 = cpu_ldl_kernel(env, ptr); - e2 = cpu_ldl_kernel(env, ptr + 4); + e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); + e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint32_t e3; - e3 = cpu_ldl_kernel(env, ptr + 8); + e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); load_seg_cache_raw_dt(&env->ldt, e1, e2); env->ldt.base |= (target_ulong)e3 << 32; } else @@ -1493,7 +1347,7 @@ void helper_ltr(CPUX86State *env, int selector) env->tr.flags = 0; } else { if (selector & 0x4) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } dt = &env->gdt; index = selector & ~7; @@ -1506,27 +1360,27 @@ void helper_ltr(CPUX86State *env, int selector) entry_limit = 7; } if ((index + entry_limit) > dt->limit) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } ptr = dt->base + index; - e1 = cpu_ldl_kernel(env, ptr); - e2 = cpu_ldl_kernel(env, ptr + 4); + e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); + e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); type = (e2 >> DESC_TYPE_SHIFT) & 0xf; if ((e2 & DESC_S_MASK) || (type != 1 && type != 9)) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint32_t e3, e4; - e3 = cpu_ldl_kernel(env, ptr + 8); - e4 = cpu_ldl_kernel(env, ptr + 12); + e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); + e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } load_seg_cache_raw_dt(&env->tr, e1, e2); env->tr.base |= (target_ulong)e3 << 32; @@ -1536,7 +1390,7 @@ void helper_ltr(CPUX86State *env, int selector) load_seg_cache_raw_dt(&env->tr, e1, e2); } e2 |= DESC_TSS_BUSY_MASK; - cpu_stl_kernel(env, ptr + 4, e2); + cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); } env->tr.selector = selector; } @@ -1637,7 +1491,7 @@ void helper_load_seg(CPUX86State *env, int seg_reg, int selector) && (!(env->hflags & HF_CS64_MASK) || cpl == 3) #endif ) { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); } else { @@ -1649,51 +1503,51 @@ void helper_load_seg(CPUX86State *env, int seg_reg, int selector) } index = selector & ~7; if ((index + 7) > dt->limit) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } ptr = dt->base + index; - e1 = cpu_ldl_kernel(env, ptr); - e2 = cpu_ldl_kernel(env, ptr + 4); + e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); + e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); if (!(e2 & DESC_S_MASK)) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (seg_reg == R_SS) { /* must be writable segment */ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (rpl != cpl || dpl != cpl) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } } else { /* must be readable segment */ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { /* if not conforming code, test rights */ if (dpl < cpl || dpl < rpl) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } } } if (!(e2 & DESC_P_MASK)) { if (seg_reg == R_SS) { - raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); } else { - raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } } /* set the access bit if not already set */ if (!(e2 & DESC_A_MASK)) { e2 |= DESC_A_MASK; - cpu_stl_kernel(env, ptr + 4, e2); + cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); } cpu_x86_load_seg_cache(env, seg_reg, selector, @@ -1709,46 +1563,45 @@ void helper_load_seg(CPUX86State *env, int seg_reg, int selector) /* protected mode jump */ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, - int next_eip_addend) + target_ulong next_eip) { int gate_cs, type; uint32_t e1, e2, cpl, dpl, rpl, limit; - target_ulong next_eip; if ((new_cs & 0xfffc) == 0) { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } - if (load_segment(env, &e1, &e2, new_cs) != 0) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { /* conforming code segment */ if (dpl > cpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } else { /* non conforming code segment */ rpl = new_cs & 3; if (rpl > cpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } if (dpl != cpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } limit = get_seg_limit(e1, e2); if (new_eip > limit && - !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); @@ -1759,55 +1612,90 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, rpl = new_cs & 3; cpl = env->hflags & HF_CPL_MASK; type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + +#ifdef TARGET_X86_64 + if (env->efer & MSR_EFER_LMA) { + if (type != 12) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); + } + } +#endif switch (type) { case 1: /* 286 TSS */ case 9: /* 386 TSS */ case 5: /* task gate */ if (dpl < cpl || dpl < rpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } - next_eip = env->eip + next_eip_addend; - switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); + switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); break; case 4: /* 286 call gate */ case 12: /* 386 call gate */ if ((dpl < cpl) || (dpl < rpl)) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } gate_cs = e1 >> 16; new_eip = (e1 & 0xffff); if (type == 12) { new_eip |= (e2 & 0xffff0000); } - if (load_segment(env, &e1, &e2, gate_cs) != 0) { - raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + +#ifdef TARGET_X86_64 + if (env->efer & MSR_EFER_LMA) { + /* load the upper 8 bytes of the 64-bit call gate */ + if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, + GETPC()); + } + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + if (type != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, + GETPC()); + } + new_eip |= ((target_ulong)e1) << 32; + } +#endif + + if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; /* must be code segment */ if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != (DESC_S_MASK | DESC_CS_MASK))) { - raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } if (((e2 & DESC_C_MASK) && (dpl > cpl)) || (!(e2 & DESC_C_MASK) && (dpl != cpl))) { - raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } +#ifdef TARGET_X86_64 + if (env->efer & MSR_EFER_LMA) { + if (!(e2 & DESC_L_MASK)) { + raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); + } + if (e2 & DESC_B_MASK) { + raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); + } + } +#endif if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); } limit = get_seg_limit(e1, e2); - if (new_eip > limit) { - raise_exception_err(env, EXCP0D_GPF, 0); + if (new_eip > limit && + (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); env->eip = new_eip; break; default: - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); break; } } @@ -1826,11 +1714,11 @@ void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1, esp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; if (shift) { - PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector); - PUSHL(ssp, esp, esp_mask, next_eip); + PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); + PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); } else { - PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector); - PUSHW(ssp, esp, esp_mask, next_eip); + PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); + PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); } SET_ESP(esp, esp_mask); @@ -1841,47 +1729,46 @@ void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1, /* protected mode call */ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, - int shift, int next_eip_addend) + int shift, target_ulong next_eip) { int new_stack, i; - uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; - uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask; + uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; + uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; uint32_t val, limit, old_sp_mask; - target_ulong ssp, old_ssp, next_eip; + target_ulong ssp, old_ssp, offset, sp; - next_eip = env->eip + next_eip_addend; - LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift); - LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); + LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); + LOG_PCALL_STATE(env_cpu(env)); if ((new_cs & 0xfffc) == 0) { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } - if (load_segment(env, &e1, &e2, new_cs) != 0) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } cpl = env->hflags & HF_CPL_MASK; LOG_PCALL("desc=%08x:%08x\n", e1, e2); if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { /* conforming code segment */ if (dpl > cpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } else { /* non conforming code segment */ rpl = new_cs & 3; if (rpl > cpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } if (dpl != cpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } #ifdef TARGET_X86_64 @@ -1891,8 +1778,8 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, /* 64 bit case */ rsp = env->regs[R_ESP]; - PUSHQ(rsp, env->segs[R_CS].selector); - PUSHQ(rsp, next_eip); + PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); + PUSHQ_RA(rsp, next_eip, GETPC()); /* from this point, not restartable */ env->regs[R_ESP] = rsp; cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, @@ -1906,16 +1793,16 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; if (shift) { - PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHL(ssp, sp, sp_mask, next_eip); + PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); + PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); } else { - PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHW(ssp, sp, sp_mask, next_eip); + PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); + PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); } limit = get_seg_limit(e1, e2); if (new_eip > limit) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } /* from this point, not restartable */ SET_ESP(sp, sp_mask); @@ -1928,104 +1815,162 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; dpl = (e2 >> DESC_DPL_SHIFT) & 3; rpl = new_cs & 3; + +#ifdef TARGET_X86_64 + if (env->efer & MSR_EFER_LMA) { + if (type != 12) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); + } + } +#endif + switch (type) { case 1: /* available 286 TSS */ case 9: /* available 386 TSS */ case 5: /* task gate */ if (dpl < cpl || dpl < rpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } - switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); + switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); return; case 4: /* 286 call gate */ case 12: /* 386 call gate */ break; default: - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); break; } shift = type >> 3; if (dpl < cpl || dpl < rpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } selector = e1 >> 16; - offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); param_count = e2 & 0x1f; + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); +#ifdef TARGET_X86_64 + if (env->efer & MSR_EFER_LMA) { + /* load the upper 8 bytes of the 64-bit call gate */ + if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, + GETPC()); + } + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + if (type != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, + GETPC()); + } + offset |= ((target_ulong)e1) << 32; + } +#endif if ((selector & 0xfffc) == 0) { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } - if (load_segment(env, &e1, &e2, selector) != 0) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { - raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); } +#ifdef TARGET_X86_64 + if (env->efer & MSR_EFER_LMA) { + if (!(e2 & DESC_L_MASK)) { + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); + } + if (e2 & DESC_B_MASK) { + raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); + } + shift++; + } +#endif if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); } if (!(e2 & DESC_C_MASK) && dpl < cpl) { /* to inner privilege */ - get_ss_esp_from_tss(env, &ss, &sp, dpl); - LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" - TARGET_FMT_lx "\n", ss, sp, param_count, - env->regs[R_ESP]); - if ((ss & 0xfffc) == 0) { - raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); - } - if ((ss & 3) != dpl) { - raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); - } - if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { - raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); - } - ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; - if (ss_dpl != dpl) { - raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); - } - if (!(ss_e2 & DESC_S_MASK) || - (ss_e2 & DESC_CS_MASK) || - !(ss_e2 & DESC_W_MASK)) { - raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); - } - if (!(ss_e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); +#ifdef TARGET_X86_64 + if (shift == 2) { + sp = get_rsp_from_tss(env, dpl); + ss = dpl; /* SS = NULL selector with RPL = new CPL */ + new_stack = 1; + sp_mask = 0; + ssp = 0; /* SS base is always zero in IA-32e mode */ + LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" + TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); + } else +#endif + { + uint32_t sp32; + get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); + LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" + TARGET_FMT_lx "\n", ss, sp32, param_count, + env->regs[R_ESP]); + sp = sp32; + if ((ss & 0xfffc) == 0) { + raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); + } + if ((ss & 3) != dpl) { + raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); + } + if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { + raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); + } + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (ss_dpl != dpl) { + raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); + } + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) { + raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); + } + if (!(ss_e2 & DESC_P_MASK)) { + raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); + } + + sp_mask = get_sp_mask(ss_e2); + ssp = get_seg_base(ss_e1, ss_e2); } /* push_size = ((param_count * 2) + 8) << shift; */ old_sp_mask = get_sp_mask(env->segs[R_SS].flags); old_ssp = env->segs[R_SS].base; - - sp_mask = get_sp_mask(ss_e2); - ssp = get_seg_base(ss_e1, ss_e2); - if (shift) { - PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); - PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]); +#ifdef TARGET_X86_64 + if (shift == 2) { + /* XXX: verify if new stack address is canonical */ + PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); + PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); + /* parameters aren't supported for 64-bit call gates */ + } else +#endif + if (shift == 1) { + PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); + PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); for (i = param_count - 1; i >= 0; i--) { - val = cpu_ldl_kernel(env, old_ssp + - ((env->regs[R_ESP] + i * 4) & - old_sp_mask)); - PUSHL(ssp, sp, sp_mask, val); + val = cpu_ldl_kernel_ra(env, old_ssp + + ((env->regs[R_ESP] + i * 4) & + old_sp_mask), GETPC()); + PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); } } else { - PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); - PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]); + PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); + PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); for (i = param_count - 1; i >= 0; i--) { - val = cpu_lduw_kernel(env, old_ssp + - ((env->regs[R_ESP] + i * 2) & - old_sp_mask)); - PUSHW(ssp, sp, sp_mask, val); + val = cpu_lduw_kernel_ra(env, old_ssp + + ((env->regs[R_ESP] + i * 2) & + old_sp_mask), GETPC()); + PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); } } new_stack = 1; @@ -2038,22 +1983,35 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, new_stack = 0; } - if (shift) { - PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHL(ssp, sp, sp_mask, next_eip); +#ifdef TARGET_X86_64 + if (shift == 2) { + PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); + PUSHQ_RA(sp, next_eip, GETPC()); + } else +#endif + if (shift == 1) { + PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); + PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); } else { - PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); - PUSHW(ssp, sp, sp_mask, next_eip); + PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); + PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); } /* from this point, not restartable */ if (new_stack) { - ss = (ss & ~3) | dpl; - cpu_x86_load_seg_cache(env, R_SS, ss, - ssp, - get_seg_limit(ss_e1, ss_e2), - ss_e2); +#ifdef TARGET_X86_64 + if (shift == 2) { + cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); + } else +#endif + { + ss = (ss & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, + ssp, + get_seg_limit(ss_e1, ss_e2), + ss_e2); + } } selector = (selector & ~3) | dpl; @@ -2078,15 +2036,15 @@ void helper_iret_real(CPUX86State *env, int shift) ssp = env->segs[R_SS].base; if (shift == 1) { /* 32 bits */ - POPL(ssp, sp, sp_mask, new_eip); - POPL(ssp, sp, sp_mask, new_cs); + POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); + POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); new_cs &= 0xffff; - POPL(ssp, sp, sp_mask, new_eflags); + POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); } else { /* 16 bits */ - POPW(ssp, sp, sp_mask, new_eip); - POPW(ssp, sp, sp_mask, new_cs); - POPW(ssp, sp, sp_mask, new_eflags); + POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); + POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); + POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); } env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); env->segs[R_CS].selector = new_cs; @@ -2131,7 +2089,8 @@ static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl) /* protected mode iret */ static inline void helper_ret_protected(CPUX86State *env, int shift, - int is_iret, int addend) + int is_iret, int addend, + uintptr_t retaddr) { uint32_t new_cs, new_eflags, new_ss; uint32_t new_es, new_ds, new_fs, new_gs; @@ -2152,65 +2111,65 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, new_eflags = 0; /* avoid warning */ #ifdef TARGET_X86_64 if (shift == 2) { - POPQ(sp, new_eip); - POPQ(sp, new_cs); + POPQ_RA(sp, new_eip, retaddr); + POPQ_RA(sp, new_cs, retaddr); new_cs &= 0xffff; if (is_iret) { - POPQ(sp, new_eflags); + POPQ_RA(sp, new_eflags, retaddr); } } else #endif { if (shift == 1) { /* 32 bits */ - POPL(ssp, sp, sp_mask, new_eip); - POPL(ssp, sp, sp_mask, new_cs); + POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); + POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); new_cs &= 0xffff; if (is_iret) { - POPL(ssp, sp, sp_mask, new_eflags); + POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); if (new_eflags & VM_MASK) { goto return_to_vm86; } } } else { /* 16 bits */ - POPW(ssp, sp, sp_mask, new_eip); - POPW(ssp, sp, sp_mask, new_cs); + POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); + POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); if (is_iret) { - POPW(ssp, sp, sp_mask, new_eflags); + POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); } } } LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", new_cs, new_eip, shift, addend); - LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); + LOG_PCALL_STATE(env_cpu(env)); if ((new_cs & 0xfffc) == 0) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } - if (load_segment(env, &e1, &e2, new_cs) != 0) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } if (!(e2 & DESC_S_MASK) || !(e2 & DESC_CS_MASK)) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } cpl = env->hflags & HF_CPL_MASK; rpl = new_cs & 3; if (rpl < cpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { if (dpl > rpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } } else { if (dpl != rpl) { - raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); } } if (!(e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); } sp += addend; @@ -2225,21 +2184,21 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, /* return to different privilege level */ #ifdef TARGET_X86_64 if (shift == 2) { - POPQ(sp, new_esp); - POPQ(sp, new_ss); + POPQ_RA(sp, new_esp, retaddr); + POPQ_RA(sp, new_ss, retaddr); new_ss &= 0xffff; } else #endif { if (shift == 1) { /* 32 bits */ - POPL(ssp, sp, sp_mask, new_esp); - POPL(ssp, sp, sp_mask, new_ss); + POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); + POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); new_ss &= 0xffff; } else { /* 16 bits */ - POPW(ssp, sp, sp_mask, new_esp); - POPW(ssp, sp, sp_mask, new_ss); + POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); + POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); } } LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", @@ -2258,26 +2217,26 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, } else #endif { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); } } else { if ((new_ss & 3) != rpl) { - raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } - if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) { - raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { + raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) { - raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (dpl != rpl) { - raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); } if (!(ss_e2 & DESC_P_MASK)) { - raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc); + raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); } cpu_x86_load_seg_cache(env, R_SS, new_ss, get_seg_base(ss_e1, ss_e2), @@ -2327,12 +2286,12 @@ static inline void helper_ret_protected(CPUX86State *env, int shift, return; return_to_vm86: - POPL(ssp, sp, sp_mask, new_esp); - POPL(ssp, sp, sp_mask, new_ss); - POPL(ssp, sp, sp_mask, new_es); - POPL(ssp, sp, sp_mask, new_ds); - POPL(ssp, sp, sp_mask, new_fs); - POPL(ssp, sp, sp_mask, new_gs); + POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); + POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); + POPL_RA(ssp, sp, sp_mask, new_es, retaddr); + POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); + POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); + POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); /* modify processor state */ cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | @@ -2358,31 +2317,31 @@ void helper_iret_protected(CPUX86State *env, int shift, int next_eip) if (env->eflags & NT_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } #endif - tss_selector = cpu_lduw_kernel(env, env->tr.base + 0); + tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); if (tss_selector & 4) { - raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); } - if (load_segment(env, &e1, &e2, tss_selector) != 0) { - raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { + raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); } type = (e2 >> DESC_TYPE_SHIFT) & 0x17; /* NOTE: we check both segment and busy TSS */ if (type != 3) { - raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); } - switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip); + switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); } else { - helper_ret_protected(env, shift, 1, 0); + helper_ret_protected(env, shift, 1, 0, GETPC()); } env->hflags2 &= ~HF2_NMI_MASK; } void helper_lret_protected(CPUX86State *env, int shift, int addend) { - helper_ret_protected(env, shift, 0, addend); + helper_ret_protected(env, shift, 0, addend, GETPC()); } void helper_sysenter(CPUX86State *env, int next_eip_addend) @@ -2397,40 +2356,13 @@ void helper_sysenter(CPUX86State *env, int next_eip_addend) continue; if (hook->insn == UC_X86_INS_SYSENTER) ((uc_cb_insn_syscall_t)hook->callback)(env->uc, hook->user_data); + + // the last callback may already asked to stop emulation + if (env->uc->stop_request) + break; } env->eip += next_eip_addend; - return; - - if (env->sysenter_cs == 0) { - raise_exception_err(env, EXCP0D_GPF, 0); - } - env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); - -#ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { - cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | - DESC_L_MASK); - } else -#endif - { - cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); - } - cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, - 0, 0xffffffff, - DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | - DESC_S_MASK | - DESC_W_MASK | DESC_A_MASK); - env->regs[R_ESP] = env->sysenter_esp; - env->eip = env->sysenter_eip; } void helper_sysexit(CPUX86State *env, int dflag) @@ -2439,7 +2371,7 @@ void helper_sysexit(CPUX86State *env, int dflag) cpl = env->hflags & HF_CPL_MASK; if (env->sysenter_cs == 0 || cpl != 0) { - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); } #ifdef TARGET_X86_64 if (dflag == 2) { @@ -2483,7 +2415,7 @@ target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) if ((selector & 0xfffc) == 0) { goto fail; } - if (load_segment(env, &e1, &e2, selector) != 0) { + if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } rpl = selector & 3; @@ -2530,7 +2462,7 @@ target_ulong helper_lar(CPUX86State *env, target_ulong selector1) if ((selector & 0xfffc) == 0) { goto fail; } - if (load_segment(env, &e1, &e2, selector) != 0) { + if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } rpl = selector & 3; @@ -2579,7 +2511,7 @@ void helper_verr(CPUX86State *env, target_ulong selector1) if ((selector & 0xfffc) == 0) { goto fail; } - if (load_segment(env, &e1, &e2, selector) != 0) { + if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } if (!(e2 & DESC_S_MASK)) { @@ -2617,7 +2549,7 @@ void helper_verw(CPUX86State *env, target_ulong selector1) if ((selector & 0xfffc) == 0) { goto fail; } - if (load_segment(env, &e1, &e2, selector) != 0) { + if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { goto fail; } if (!(e2 & DESC_S_MASK)) { @@ -2656,7 +2588,8 @@ void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector) } /* check if Port I/O is allowed in TSS */ -static inline void check_io(CPUX86State *env, int addr, int size) +static inline void check_io(CPUX86State *env, int addr, int size, + uintptr_t retaddr) { int io_offset, val, mask; @@ -2666,33 +2599,33 @@ static inline void check_io(CPUX86State *env, int addr, int size) env->tr.limit < 103) { goto fail; } - io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66); + io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr); io_offset += (addr >> 3); /* Note: the check needs two bytes */ if ((io_offset + 1) > env->tr.limit) { goto fail; } - val = cpu_lduw_kernel(env, env->tr.base + io_offset); + val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr); val >>= (addr & 7); mask = (1 << size) - 1; /* all bits must be zero to allow the I/O */ if ((val & mask) != 0) { fail: - raise_exception_err(env, EXCP0D_GPF, 0); + raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); } } void helper_check_iob(CPUX86State *env, uint32_t t0) { - check_io(env, t0, 1); + check_io(env, t0, 1, GETPC()); } void helper_check_iow(CPUX86State *env, uint32_t t0) { - check_io(env, t0, 2); + check_io(env, t0, 2, GETPC()); } void helper_check_iol(CPUX86State *env, uint32_t t0) { - check_io(env, t0, 4); + check_io(env, t0, 4, GETPC()); } diff --git a/qemu/target-i386/shift_helper_template.h b/qemu/target/i386/shift_helper_template.h similarity index 100% rename from qemu/target-i386/shift_helper_template.h rename to qemu/target/i386/shift_helper_template.h diff --git a/qemu/target/i386/smm_helper.c b/qemu/target/i386/smm_helper.c new file mode 100644 index 00000000..dcfcc17a --- /dev/null +++ b/qemu/target/i386/smm_helper.c @@ -0,0 +1,315 @@ +/* + * x86 SMM helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" + +/* SMM support */ + +#ifdef TARGET_X86_64 +#define SMM_REVISION_ID 0x00020064 +#else +#define SMM_REVISION_ID 0x00020000 +#endif + +void do_smm_enter(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + CPUState *cs = CPU(cpu); + target_ulong sm_state; + SegmentCache *dt; + int i, offset; + + // qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); + // log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); + + env->msr_smi_count++; + env->hflags |= HF_SMM_MASK; + if (env->hflags2 & HF2_NMI_MASK) { + env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; + } else { + env->hflags2 |= HF2_NMI_MASK; + } + + sm_state = env->smbase + 0x8000; + +#ifdef TARGET_X86_64 + for (i = 0; i < 6; i++) { + dt = &env->segs[i]; + offset = 0x7e00 + i * 16; + x86_stw_phys(cs, sm_state + offset, dt->selector); + x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); + x86_stl_phys(cs, sm_state + offset + 4, dt->limit); + x86_stq_phys(cs, sm_state + offset + 8, dt->base); + } + + x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base); + x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit); + + x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector); + x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base); + x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit); + x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); + + x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base); + x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit); + + x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector); + x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base); + x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit); + x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); + + /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS + is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has + 7EA0-7ED7 as "reserved". What's this, and what's really + supposed to happen? */ + x86_stq_phys(cs, sm_state + 0x7ed0, env->efer); + + x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]); + x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]); + x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]); + x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]); + x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]); + x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]); + x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]); + x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]); + for (i = 8; i < 16; i++) { + x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]); + } + x86_stq_phys(cs, sm_state + 0x7f78, env->eip); + x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env)); + x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]); + x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]); + + x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]); + x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]); + x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]); + + x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID); + x86_stl_phys(cs, sm_state + 0x7f00, env->smbase); +#else + x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]); + x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]); + x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env)); + x86_stl_phys(cs, sm_state + 0x7ff0, env->eip); + x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]); + x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]); + x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]); + x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]); + x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]); + x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]); + x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]); + x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]); + x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]); + x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]); + + x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector); + x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base); + x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit); + x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); + + x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector); + x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base); + x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit); + x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); + + x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base); + x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit); + + x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base); + x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit); + + for (i = 0; i < 6; i++) { + dt = &env->segs[i]; + if (i < 3) { + offset = 0x7f84 + i * 12; + } else { + offset = 0x7f2c + (i - 3) * 12; + } + x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector); + x86_stl_phys(cs, sm_state + offset + 8, dt->base); + x86_stl_phys(cs, sm_state + offset + 4, dt->limit); + x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff); + } + x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]); + + x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID); + x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase); +#endif + /* init SMM cpu state */ + +#ifdef TARGET_X86_64 + cpu_load_efer(env, 0); +#endif + cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | + DF_MASK)); + env->eip = 0x00008000; + cpu_x86_update_cr0(env, + env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | + CR0_PG_MASK)); + cpu_x86_update_cr4(env, 0); + env->dr[7] = 0x00000400; + + cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, + 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_G_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_G_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_G_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_G_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_G_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_G_MASK | DESC_A_MASK); +} + +void helper_rsm(CPUX86State *env) +{ + CPUState *cs = env_cpu(env); + target_ulong sm_state; + int i, offset; + uint32_t val; + + sm_state = env->smbase + 0x8000; +#ifdef TARGET_X86_64 + cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0)); + + env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68); + env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64); + + env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70); + env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78); + env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74); + env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8; + + env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88); + env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84); + + env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90); + env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98); + env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94); + env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8; + + env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8); + env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0); + env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8); + env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0); + env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8); + env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0); + env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8); + env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0); + for (i = 8; i < 16; i++) { + env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8); + } + env->eip = x86_ldq_phys(cs, sm_state + 0x7f78); + cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68); + env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60); + + cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48)); + cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50)); + cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58)); + + for (i = 0; i < 6; i++) { + offset = 0x7e00 + i * 16; + cpu_x86_load_seg_cache(env, i, + x86_lduw_phys(cs, sm_state + offset), + x86_ldq_phys(cs, sm_state + offset + 8), + x86_ldl_phys(cs, sm_state + offset + 4), + (x86_lduw_phys(cs, sm_state + offset + 2) & + 0xf0ff) << 8); + } + + val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */ + if (val & 0x20000) { + env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00); + } +#else + cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc)); + cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8)); + cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0); + env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec); + env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8); + env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4); + env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0); + env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc); + env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8); + env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4); + env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0); + env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc); + env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8); + + env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff; + env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64); + env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60); + env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8; + + env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff; + env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80); + env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c); + env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8; + + env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74); + env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70); + + env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58); + env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54); + + for (i = 0; i < 6; i++) { + if (i < 3) { + offset = 0x7f84 + i * 12; + } else { + offset = 0x7f2c + (i - 3) * 12; + } + cpu_x86_load_seg_cache(env, i, + x86_ldl_phys(cs, + sm_state + 0x7fa8 + i * 4) & 0xffff, + x86_ldl_phys(cs, sm_state + offset + 8), + x86_ldl_phys(cs, sm_state + offset + 4), + (x86_ldl_phys(cs, + sm_state + offset) & 0xf0ff) << 8); + } + cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14)); + + val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */ + if (val & 0x20000) { + env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8); + } +#endif + if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) { + env->hflags2 &= ~HF2_NMI_MASK; + } + env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; + env->hflags &= ~HF_SMM_MASK; + + // qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); + // log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); +} diff --git a/qemu/target-i386/svm.h b/qemu/target/i386/svm.h similarity index 63% rename from qemu/target-i386/svm.h rename to qemu/target/i386/svm.h index 188aa280..30649ee9 100644 --- a/qemu/target-i386/svm.h +++ b/qemu/target/i386/svm.h @@ -1,5 +1,7 @@ -#ifndef __SVM_H -#define __SVM_H +#ifndef SVM_H +#define SVM_H + +#include "qemu/compiler.h" #define TLB_CONTROL_DO_NOTHING 0 #define TLB_CONTROL_FLUSH_ALL_ASID 1 @@ -130,93 +132,107 @@ #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ -QEMU_PACK( struct vmcb_control_area { - uint16_t intercept_cr_read; - uint16_t intercept_cr_write; - uint16_t intercept_dr_read; - uint16_t intercept_dr_write; - uint32_t intercept_exceptions; - uint64_t intercept; - uint8_t reserved_1[44]; - uint64_t iopm_base_pa; - uint64_t msrpm_base_pa; - uint64_t tsc_offset; - uint32_t asid; - uint8_t tlb_ctl; - uint8_t reserved_2[3]; - uint32_t int_ctl; - uint32_t int_vector; - uint32_t int_state; - uint8_t reserved_3[4]; - uint64_t exit_code; - uint64_t exit_info_1; - uint64_t exit_info_2; - uint32_t exit_int_info; - uint32_t exit_int_info_err; - uint64_t nested_ctl; - uint8_t reserved_4[16]; - uint32_t event_inj; - uint32_t event_inj_err; - uint64_t nested_cr3; - uint64_t lbr_ctl; - uint8_t reserved_5[832]; +#define SVM_NPT_ENABLED (1 << 0) + +#define SVM_NPT_PAE (1 << 0) +#define SVM_NPT_LMA (1 << 1) +#define SVM_NPT_NXE (1 << 2) + +#define SVM_NPTEXIT_P (1ULL << 0) +#define SVM_NPTEXIT_RW (1ULL << 1) +#define SVM_NPTEXIT_US (1ULL << 2) +#define SVM_NPTEXIT_RSVD (1ULL << 3) +#define SVM_NPTEXIT_ID (1ULL << 4) +#define SVM_NPTEXIT_GPA (1ULL << 32) +#define SVM_NPTEXIT_GPT (1ULL << 33) + +QEMU_PACK(struct vmcb_control_area { + uint16_t intercept_cr_read; + uint16_t intercept_cr_write; + uint16_t intercept_dr_read; + uint16_t intercept_dr_write; + uint32_t intercept_exceptions; + uint64_t intercept; + uint8_t reserved_1[44]; + uint64_t iopm_base_pa; + uint64_t msrpm_base_pa; + uint64_t tsc_offset; + uint32_t asid; + uint8_t tlb_ctl; + uint8_t reserved_2[3]; + uint32_t int_ctl; + uint32_t int_vector; + uint32_t int_state; + uint8_t reserved_3[4]; + uint64_t exit_code; + uint64_t exit_info_1; + uint64_t exit_info_2; + uint32_t exit_int_info; + uint32_t exit_int_info_err; + uint64_t nested_ctl; + uint8_t reserved_4[16]; + uint32_t event_inj; + uint32_t event_inj_err; + uint64_t nested_cr3; + uint64_t lbr_ctl; + uint8_t reserved_5[832]; }); -QEMU_PACK( struct vmcb_seg { - uint16_t selector; - uint16_t attrib; - uint32_t limit; - uint64_t base; +QEMU_PACK(struct vmcb_seg { + uint16_t selector; + uint16_t attrib; + uint32_t limit; + uint64_t base; }); -QEMU_PACK( struct vmcb_save_area { - struct vmcb_seg es; - struct vmcb_seg cs; - struct vmcb_seg ss; - struct vmcb_seg ds; - struct vmcb_seg fs; - struct vmcb_seg gs; - struct vmcb_seg gdtr; - struct vmcb_seg ldtr; - struct vmcb_seg idtr; - struct vmcb_seg tr; - uint8_t reserved_1[43]; - uint8_t cpl; - uint8_t reserved_2[4]; - uint64_t efer; - uint8_t reserved_3[112]; - uint64_t cr4; - uint64_t cr3; - uint64_t cr0; - uint64_t dr7; - uint64_t dr6; - uint64_t rflags; - uint64_t rip; - uint8_t reserved_4[88]; - uint64_t rsp; - uint8_t reserved_5[24]; - uint64_t rax; - uint64_t star; - uint64_t lstar; - uint64_t cstar; - uint64_t sfmask; - uint64_t kernel_gs_base; - uint64_t sysenter_cs; - uint64_t sysenter_esp; - uint64_t sysenter_eip; - uint64_t cr2; - uint8_t reserved_6[32]; - uint64_t g_pat; - uint64_t dbgctl; - uint64_t br_from; - uint64_t br_to; - uint64_t last_excp_from; - uint64_t last_excp_to; +QEMU_PACK(struct vmcb_save_area { + struct vmcb_seg es; + struct vmcb_seg cs; + struct vmcb_seg ss; + struct vmcb_seg ds; + struct vmcb_seg fs; + struct vmcb_seg gs; + struct vmcb_seg gdtr; + struct vmcb_seg ldtr; + struct vmcb_seg idtr; + struct vmcb_seg tr; + uint8_t reserved_1[43]; + uint8_t cpl; + uint8_t reserved_2[4]; + uint64_t efer; + uint8_t reserved_3[112]; + uint64_t cr4; + uint64_t cr3; + uint64_t cr0; + uint64_t dr7; + uint64_t dr6; + uint64_t rflags; + uint64_t rip; + uint8_t reserved_4[88]; + uint64_t rsp; + uint8_t reserved_5[24]; + uint64_t rax; + uint64_t star; + uint64_t lstar; + uint64_t cstar; + uint64_t sfmask; + uint64_t kernel_gs_base; + uint64_t sysenter_cs; + uint64_t sysenter_esp; + uint64_t sysenter_eip; + uint64_t cr2; + uint8_t reserved_6[32]; + uint64_t g_pat; + uint64_t dbgctl; + uint64_t br_from; + uint64_t br_to; + uint64_t last_excp_from; + uint64_t last_excp_to; }); -QEMU_PACK( struct vmcb { - struct vmcb_control_area control; - struct vmcb_save_area save; +QEMU_PACK(struct vmcb { + struct vmcb_control_area control; + struct vmcb_save_area save; }); #endif diff --git a/qemu/target-i386/svm_helper.c b/qemu/target/i386/svm_helper.c similarity index 63% rename from qemu/target-i386/svm_helper.c rename to qemu/target/i386/svm_helper.c index 085749d5..ade26593 100644 --- a/qemu/target-i386/svm_helper.c +++ b/qemu/target/i386/svm_helper.c @@ -17,97 +17,39 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu-all.h" #include "exec/helper-proto.h" +#include "exec/exec-all.h" #include "exec/cpu_ldst.h" /* Secure Virtual Machine helpers */ - -#if defined(CONFIG_USER_ONLY) - -void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) -{ -} - -void helper_vmmcall(CPUX86State *env) -{ -} - -void helper_vmload(CPUX86State *env, int aflag) -{ -} - -void helper_vmsave(CPUX86State *env, int aflag) -{ -} - -void helper_stgi(CPUX86State *env) -{ -} - -void helper_clgi(CPUX86State *env) -{ -} - -void helper_skinit(CPUX86State *env) -{ -} - -void helper_invlpga(CPUX86State *env, int aflag) -{ -} - -void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) -{ -} - -void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1) -{ -} - -void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, - uint64_t param) -{ -} - -void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, - uint64_t param) -{ -} - -void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, - uint32_t next_eip_addend) -{ -} -#else - static inline void svm_save_seg(CPUX86State *env, hwaddr addr, const SegmentCache *sc) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); - stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector), + x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector), sc->selector); - stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base), + x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base), sc->base); - stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit), + x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit), sc->limit); - stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib), + x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib), ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); } static inline void svm_load_seg(CPUX86State *env, hwaddr addr, SegmentCache *sc) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); unsigned int flags; - sc->selector = lduw_phys(cs->as, + sc->selector = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, selector)); - sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base)); - sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit)); - flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib)); + sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base)); + sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit)); + flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib)); sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); } @@ -123,12 +65,13 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr, void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); target_ulong addr; + uint64_t nested_ctl; uint32_t event_inj; uint32_t int_ctl; - cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; @@ -136,37 +79,37 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) addr = (uint32_t)env->regs[R_EAX]; } - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); env->vm_vmcb = addr; /* save the current CPU state in the hsave page */ - stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); - stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), + x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); - stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base); - stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), + x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env)); @@ -179,66 +122,86 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds), &env->segs[R_DS]); - stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip), + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip), env->eip + next_eip_addend); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); /* load the interception bitmaps so we do not need to access the vmcb in svm mode */ - env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept)); - env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb + + env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read)); - env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb + + env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write)); - env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb + + env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read)); - env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb + + env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write)); - env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb + + env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions )); - /* enable intercepts */ - env->hflags |= HF_SVMI_MASK; + nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, + control.nested_ctl)); + if (nested_ctl & SVM_NPT_ENABLED) { + env->nested_cr3 = x86_ldq_phys(cs, + env->vm_vmcb + offsetof(struct vmcb, + control.nested_cr3)); + env->hflags2 |= HF2_NPT_MASK; - env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb + + env->nested_pg_mode = 0; + if (env->cr[4] & CR4_PAE_MASK) { + env->nested_pg_mode |= SVM_NPT_PAE; + } + if (env->hflags & HF_LMA_MASK) { + env->nested_pg_mode |= SVM_NPT_LMA; + } + if (env->efer & MSR_EFER_NXE) { + env->nested_pg_mode |= SVM_NPT_NXE; + } + } + + /* enable intercepts */ + env->hflags |= HF_GUEST_MASK; + + env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset)); - env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base)); - env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit)); - env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base)); - env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit)); /* clear exit_info_2 so we behave like the real hardware */ - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); - cpu_x86_update_cr0(env, ldq_phys(cs->as, + cpu_x86_update_cr0(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0))); - cpu_x86_update_cr4(env, ldq_phys(cs->as, + cpu_x86_update_cr4(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4))); - cpu_x86_update_cr3(env, ldq_phys(cs->as, + cpu_x86_update_cr3(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3))); - env->cr[2] = ldq_phys(cs->as, + env->cr[2] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr2)); - int_ctl = ldl_phys(cs->as, + int_ctl = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); if (int_ctl & V_INTR_MASKING_MASK) { @@ -250,10 +213,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) } cpu_load_efer(env, - ldq_phys(cs->as, + x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.efer))); env->eflags = 0; - cpu_load_eflags(env, ldq_phys(cs->as, + cpu_load_eflags(env, x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); @@ -267,49 +230,49 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS); - env->eip = ldq_phys(cs->as, + env->eip = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip)); - env->regs[R_ESP] = ldq_phys(cs->as, + env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rsp)); - env->regs[R_EAX] = ldq_phys(cs->as, + env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rax)); - env->dr[7] = ldq_phys(cs->as, + env->dr[7] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7)); - env->dr[6] = ldq_phys(cs->as, + env->dr[6] = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6)); /* FIXME: guest state consistency checks */ - switch (ldub_phys(cs->as, + switch (x86_ldub_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { case TLB_CONTROL_DO_NOTHING: break; case TLB_CONTROL_FLUSH_ALL_ASID: /* FIXME: this is not 100% correct but should work for now */ - tlb_flush(cs, 1); + tlb_flush(cs); break; } env->hflags2 |= HF2_GIF_MASK; if (int_ctl & V_IRQ_MASK) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); cs->interrupt_request |= CPU_INTERRUPT_VIRQ; } /* maybe we need to inject an event */ - event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); if (event_inj & SVM_EVTINJ_VALID) { uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; - uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; - uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb + + // uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; + uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)); - qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); /* FIXME: need to implement valid_err */ switch (event_inj & SVM_EVTINJ_TYPE_MASK) { case SVM_EVTINJ_TYPE_INTR: @@ -317,7 +280,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->error_code = event_inj_err; env->exception_is_int = 0; env->exception_next_eip = -1; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); /* XXX: is it always correct? */ do_interrupt_x86_hardirq(env, vector, 1); break; @@ -326,7 +289,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->error_code = event_inj_err; env->exception_is_int = 0; env->exception_next_eip = env->eip; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); cpu_loop_exit(cs); break; case SVM_EVTINJ_TYPE_EXEPT: @@ -334,7 +297,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->error_code = event_inj_err; env->exception_is_int = 0; env->exception_next_eip = -1; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); cpu_loop_exit(cs); break; case SVM_EVTINJ_TYPE_SOFT: @@ -342,27 +305,27 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->error_code = event_inj_err; env->exception_is_int = 1; env->exception_next_eip = env->eip; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); cpu_loop_exit(cs); break; } - qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index, - env->error_code); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index, + // env->error_code); } } void helper_vmmcall(CPUX86State *env) { - cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC()); raise_exception(env, EXCP06_ILLOP); } void helper_vmload(CPUX86State *env, int aflag) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); target_ulong addr; - cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; @@ -370,11 +333,11 @@ void helper_vmload(CPUX86State *env, int aflag) addr = (uint32_t)env->regs[R_EAX]; } - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx - "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, ldq_phys(cs->as, addr + offsetof(struct vmcb, - save.fs.base)), - env->segs[R_FS].base); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx + // "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", + // addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb, + // save.fs.base)), + // env->segs[R_FS].base); svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS); svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS); @@ -382,27 +345,27 @@ void helper_vmload(CPUX86State *env, int aflag) svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); #ifdef TARGET_X86_64 - env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb, + env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base)); - env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar)); - env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar)); - env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask)); + env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar)); + env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar)); + env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask)); #endif - env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star)); - env->sysenter_cs = ldq_phys(cs->as, + env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star)); + env->sysenter_cs = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_cs)); - env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb, + env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp)); - env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb, + env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip)); } void helper_vmsave(CPUX86State *env, int aflag) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); target_ulong addr; - cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; @@ -410,11 +373,11 @@ void helper_vmsave(CPUX86State *env, int aflag) addr = (uint32_t)env->regs[R_EAX]; } - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx - "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, ldq_phys(cs->as, - addr + offsetof(struct vmcb, save.fs.base)), - env->segs[R_FS].base); + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx + // "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", + // addr, x86_ldq_phys(cs, + // addr + offsetof(struct vmcb, save.fs.base)), + // env->segs[R_FS].base); svm_save_seg(env, addr + offsetof(struct vmcb, save.fs), &env->segs[R_FS]); @@ -426,46 +389,46 @@ void helper_vmsave(CPUX86State *env, int aflag) &env->ldt); #ifdef TARGET_X86_64 - stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base), + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase); - stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar); - stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar); - stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask); + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar); + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar); + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask); #endif - stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star); - stq_phys(cs->as, + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star); + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); - stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp), + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp); - stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip), + x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip); } void helper_stgi(CPUX86State *env) { - cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC()); env->hflags2 |= HF2_GIF_MASK; } void helper_clgi(CPUX86State *env) { - cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC()); env->hflags2 &= ~HF2_GIF_MASK; } void helper_skinit(CPUX86State *env) { - cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC()); /* XXX: not implemented */ raise_exception(env, EXCP06_ILLOP); } void helper_invlpga(CPUX86State *env, int aflag) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = env_archcpu(env); target_ulong addr; - cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0); + cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC()); if (aflag == 2) { addr = env->regs[R_EAX]; @@ -478,119 +441,136 @@ void helper_invlpga(CPUX86State *env, int aflag) tlb_flush_page(CPU(cpu), addr); } -void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, - uint64_t param) +void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, + uint64_t param, uintptr_t retaddr) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); - if (likely(!(env->hflags & HF_SVMI_MASK))) { + if (likely(!(env->hflags & HF_GUEST_MASK))) { return; } - if ( (int32_t)type >= SVM_EXIT_READ_CR0 && type <= SVM_EXIT_READ_CR0 + 8 ) { + + if ((int32_t)type >= SVM_EXIT_READ_CR0 && type <= SVM_EXIT_READ_CR0 + 8) { if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { - helper_vmexit(env, type, param); + cpu_vmexit(env, type, param, retaddr); } - } else if ( type >= SVM_EXIT_WRITE_CR0 && type <= SVM_EXIT_WRITE_CR0 + 8 ) { + } else if (type >= SVM_EXIT_WRITE_CR0 && type <= SVM_EXIT_WRITE_CR0 + 8) { if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { - helper_vmexit(env, type, param); + cpu_vmexit(env, type, param, retaddr); } - } else if ( type >= SVM_EXIT_READ_DR0 && type <= SVM_EXIT_READ_DR0 + 7 ) { + } else if (type >= SVM_EXIT_READ_DR0 && type <= SVM_EXIT_READ_DR0 + 7) { if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { - helper_vmexit(env, type, param); + cpu_vmexit(env, type, param, retaddr); } - } else if ( type >= SVM_EXIT_WRITE_DR0 && type <= SVM_EXIT_WRITE_DR0 + 7 ) { + } else if (type >= SVM_EXIT_WRITE_DR0 && type <= SVM_EXIT_WRITE_DR0 + 7) { if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { - helper_vmexit(env, type, param); + cpu_vmexit(env, type, param, retaddr); } - } else if ( type >= SVM_EXIT_EXCP_BASE && type <= SVM_EXIT_EXCP_BASE + 31 ) { + } else if (type >= SVM_EXIT_EXCP_BASE && type <= SVM_EXIT_EXCP_BASE + 31) { if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { - helper_vmexit(env, type, param); + cpu_vmexit(env, type, param, retaddr); } - } else if ( type == SVM_EXIT_MSR ) { + } else if (type == SVM_EXIT_MSR) { if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { /* FIXME: this should be read in at vmrun (faster this way?) */ - uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + - offsetof(struct vmcb, - control.msrpm_base_pa)); - uint32_t t0, t1; + uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + + offsetof(struct vmcb, + control.msrpm_base_pa)); + uint32_t t0, t1, ecx; - uint32_t ecx = (uint32_t)env->regs[R_ECX]; - if ( (int32_t)ecx >= 0 && ecx <= 0x1fff ) { - t0 = (env->regs[R_ECX] * 2) % 8; - t1 = (env->regs[R_ECX] * 2) / 8; - } else if ( ecx >= 0xc0000000 && ecx <= 0xc0001fff ) { - t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2; + ecx = env->regs[R_ECX]; +#define XRANGE(x, a, b) (x >= a && x <= b) + if (XRANGE(ecx, 0, 0x1fff)) { + t0 = (ecx * 2) % 8; + t1 = (ecx * 2) / 8; + } else if (XRANGE(ecx, 0xc0000000, 0xc0001fff)) { + t0 = (8192 + ecx - 0xc0000000) * 2; t1 = (t0 / 8); t0 %= 8; - } else if ( ecx >= 0xc0010000 && ecx <= 0xc0011fff ) { - t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2; + } else if (XRANGE(ecx, 0xc0010000, 0xc0011fff)) { + t0 = (16384 + ecx - 0xc0010000) * 2; t1 = (t0 / 8); t0 %= 8; } else { - helper_vmexit(env, type, param); + cpu_vmexit(env, type, param, retaddr); t0 = 0; t1 = 0; } - if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) { - helper_vmexit(env, type, param); +#undef XRANGE + + if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) { + cpu_vmexit(env, type, param, retaddr); } } - } else { - if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { - helper_vmexit(env, type, param); - } + } else if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { + cpu_vmexit(env, type, param, retaddr); } } -void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, - uint64_t param) +void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, + uint64_t param) { - helper_svm_check_intercept_param(env, type, param); + cpu_svm_check_intercept_param(env, type, param, GETPC()); } void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, uint32_t next_eip_addend) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { /* FIXME: this should be read in at vmrun (faster this way?) */ - uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + + uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa)); uint16_t mask = (1 << ((param >> 4) & 7)) - 1; - if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) { + if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) { /* next env->eip */ - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), env->eip + next_eip_addend); - helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16)); + cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC()); } } } -/* Note: currently only 32 bits of exit_code are used */ -void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) +void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, + uintptr_t retaddr) { - CPUState *cs = CPU(x86_env_get_cpu(env)); + CPUState *cs = env_cpu(env); + + cpu_restore_state(cs, retaddr, true); + + // qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" + // PRIx64 ", " TARGET_FMT_lx ")!\n", + // exit_code, exit_info_1, + // x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, + // control.exit_info_2)), + // env->eip); + + cs->exception_index = EXCP_VMEXIT + exit_code; + env->error_code = exit_info_1; + + /* remove any pending exception */ + env->old_exception = -1; + cpu_loop_exit(cs); +} + +void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) +{ + CPUState *cs = env_cpu(env); uint32_t int_ctl; - qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" - PRIx64 ", " TARGET_FMT_lx ")!\n", - exit_code, exit_info_1, - ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, - control.exit_info_2)), - env->eip); - if (env->hflags & HF_INHIBIT_IRQ_MASK) { - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK); env->hflags &= ~HF_INHIBIT_IRQ_MASK; } else { - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); } + env->hflags2 &= ~HF2_NPT_MASK; /* Save the VM state in the vmcb */ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es), @@ -602,86 +582,86 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), &env->segs[R_DS]); - stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base); - stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit); - stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base); - stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); - int_ctl = ldl_phys(cs->as, + int_ctl = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); int_ctl |= env->v_tpr & V_TPR_MASK; if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { int_ctl |= V_IRQ_MASK; } - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); - stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags), + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env)); - stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip), + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); - stq_phys(cs->as, + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); - stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl), + x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK); /* Reload the host state from vm_hsave */ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); - env->hflags &= ~HF_SVMI_MASK; + env->hflags &= ~HF_GUEST_MASK; env->intercept = 0; env->intercept_exceptions = 0; cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; env->tsc_offset = 0; - env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base)); - env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit)); - env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base)); - env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit)); - cpu_x86_update_cr0(env, ldq_phys(cs->as, + cpu_x86_update_cr0(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); - cpu_x86_update_cr4(env, ldq_phys(cs->as, + cpu_x86_update_cr4(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr4))); - cpu_x86_update_cr3(env, ldq_phys(cs->as, + cpu_x86_update_cr3(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.cr3))); /* we need to set the efer after the crs so the hidden flags get set properly */ - cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.efer))); env->eflags = 0; - cpu_load_eflags(env, ldq_phys(cs->as, + cpu_load_eflags(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK | @@ -696,33 +676,33 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); - env->eip = ldq_phys(cs->as, + env->eip = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip)); - env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave + + env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rsp)); - env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave + + env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rax)); - env->dr[6] = ldq_phys(cs->as, + env->dr[6] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr6)); - env->dr[7] = ldq_phys(cs->as, + env->dr[7] = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.dr7)); /* other setups */ - stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code); - stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), + x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1); - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), - ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj))); - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), - ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err))); - stl_phys(cs->as, + x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); env->hflags2 &= ~HF2_GIF_MASK; @@ -743,18 +723,4 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) /* If the host's rIP reloaded by #VMEXIT is outside the limit of the host's code segment or non-canonical (in the case of long mode), a #GP fault is delivered inside the host. */ - - /* remove any pending exception */ - cs->exception_index = -1; - env->error_code = 0; - env->old_exception = -1; - - cpu_loop_exit(cs); } - -void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) -{ - helper_vmexit(env, exit_code, exit_info_1); -} - -#endif diff --git a/qemu/target/i386/translate.c b/qemu/target/i386/translate.c new file mode 100644 index 00000000..a598a5a9 --- /dev/null +++ b/qemu/target/i386/translate.c @@ -0,0 +1,9373 @@ +/* + * i386 translation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" + +#include "qemu/host-utils.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "exec/cpu_ldst.h" +#include "exec/translator.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" +#include "qemu/compiler.h" + +#include "unicorn/platform.h" +#include "uc_priv.h" + +#define PREFIX_REPZ 0x01 +#define PREFIX_REPNZ 0x02 +#define PREFIX_LOCK 0x04 +#define PREFIX_DATA 0x08 +#define PREFIX_ADR 0x10 +#define PREFIX_VEX 0x20 + +#ifdef TARGET_X86_64 +#define CODE64(s) ((s)->code64) +#define REX_X(s) ((s)->rex_x) +#define REX_B(s) ((s)->rex_b) +#else +#define CODE64(s) 0 +#define REX_X(s) 0 +#define REX_B(s) 0 +#endif + +#ifdef TARGET_X86_64 +# define ctztl ctz64 +# define clztl clz64 +#else +# define ctztl ctz32 +# define clztl clz32 +#endif + +/* For a switch indexed by MODRM, match all memory operands for a given OP. */ +#define CASE_MODRM_MEM_OP(OP) \ + case (0 << 6) | (OP << 3) | 0: \ + case (0 << 6) | (OP << 3) | 1: \ + case (0 << 6) | (OP << 3) | 2: \ + case (0 << 6) | (OP << 3) | 3: \ + case (0 << 6) | (OP << 3) | 4: \ + case (0 << 6) | (OP << 3) | 5: \ + case (0 << 6) | (OP << 3) | 6: \ + case (0 << 6) | (OP << 3) | 7: \ + case (1 << 6) | (OP << 3) | 0: \ + case (1 << 6) | (OP << 3) | 1: \ + case (1 << 6) | (OP << 3) | 2: \ + case (1 << 6) | (OP << 3) | 3: \ + case (1 << 6) | (OP << 3) | 4: \ + case (1 << 6) | (OP << 3) | 5: \ + case (1 << 6) | (OP << 3) | 6: \ + case (1 << 6) | (OP << 3) | 7: \ + case (2 << 6) | (OP << 3) | 0: \ + case (2 << 6) | (OP << 3) | 1: \ + case (2 << 6) | (OP << 3) | 2: \ + case (2 << 6) | (OP << 3) | 3: \ + case (2 << 6) | (OP << 3) | 4: \ + case (2 << 6) | (OP << 3) | 5: \ + case (2 << 6) | (OP << 3) | 6: \ + case (2 << 6) | (OP << 3) | 7 + +#define CASE_MODRM_OP(OP) \ + case (0 << 6) | (OP << 3) | 0: \ + case (0 << 6) | (OP << 3) | 1: \ + case (0 << 6) | (OP << 3) | 2: \ + case (0 << 6) | (OP << 3) | 3: \ + case (0 << 6) | (OP << 3) | 4: \ + case (0 << 6) | (OP << 3) | 5: \ + case (0 << 6) | (OP << 3) | 6: \ + case (0 << 6) | (OP << 3) | 7: \ + case (1 << 6) | (OP << 3) | 0: \ + case (1 << 6) | (OP << 3) | 1: \ + case (1 << 6) | (OP << 3) | 2: \ + case (1 << 6) | (OP << 3) | 3: \ + case (1 << 6) | (OP << 3) | 4: \ + case (1 << 6) | (OP << 3) | 5: \ + case (1 << 6) | (OP << 3) | 6: \ + case (1 << 6) | (OP << 3) | 7: \ + case (2 << 6) | (OP << 3) | 0: \ + case (2 << 6) | (OP << 3) | 1: \ + case (2 << 6) | (OP << 3) | 2: \ + case (2 << 6) | (OP << 3) | 3: \ + case (2 << 6) | (OP << 3) | 4: \ + case (2 << 6) | (OP << 3) | 5: \ + case (2 << 6) | (OP << 3) | 6: \ + case (2 << 6) | (OP << 3) | 7: \ + case (3 << 6) | (OP << 3) | 0: \ + case (3 << 6) | (OP << 3) | 1: \ + case (3 << 6) | (OP << 3) | 2: \ + case (3 << 6) | (OP << 3) | 3: \ + case (3 << 6) | (OP << 3) | 4: \ + case (3 << 6) | (OP << 3) | 5: \ + case (3 << 6) | (OP << 3) | 6: \ + case (3 << 6) | (OP << 3) | 7 + +#include "exec/gen-icount.h" + +typedef struct DisasContext { + DisasContextBase base; + + /* current insn context */ + int override; /* -1 if no override */ + int prefix; + MemOp aflag; + MemOp dflag; + target_ulong pc_start; + target_ulong pc; /* pc = eip + cs_base */ + /* current block context */ + target_ulong cs_base; /* base of CS segment */ + int pe; /* protected mode */ + int code32; /* 32 bit code segment */ +#ifdef TARGET_X86_64 + int lma; /* long mode active */ + int code64; /* 64 bit code segment */ + int rex_x, rex_b; +#endif + int vex_l; /* vex vector length */ + int vex_v; /* vex vvvv register, without 1's complement. */ + int ss32; /* 32 bit stack segment */ + CCOp cc_op; /* current CC operation */ + CCOp last_cc_op; /* Unicorn: last CC operation. Save this to see if cc_op has changed */ + bool cc_op_dirty; +#ifdef TARGET_X86_64 + bool x86_64_hregs; +#endif + int addseg; /* non zero if either DS/ES/SS have a non zero base */ + int f_st; /* currently unused */ + int vm86; /* vm86 mode */ + int cpl; + int iopl; + int tf; /* TF cpu flag */ + int jmp_opt; /* use direct block chaining for direct jumps */ + int repz_opt; /* optimize jumps within repz instructions */ + int mem_index; /* select memory access functions */ + uint64_t flags; /* all execution flags */ + int popl_esp_hack; /* for correct popl with esp base handling */ + int rip_offset; /* only used in x86_64, but left for simplicity */ + int cpuid_features; + int cpuid_ext_features; + int cpuid_ext2_features; + int cpuid_ext3_features; + int cpuid_7_0_ebx_features; + int cpuid_xsave_features; + + /* TCG local temps */ + TCGv cc_srcT; + TCGv A0; + TCGv T0; + TCGv T1; + + /* TCG local register indexes (only used inside old micro ops) */ + TCGv tmp0; + TCGv tmp4; + TCGv_ptr ptr0; + TCGv_ptr ptr1; + TCGv_i32 tmp2_i32; + TCGv_i32 tmp3_i32; + TCGv_i64 tmp1_i64; + + sigjmp_buf jmpbuf; + + // Unicorn + struct uc_struct *uc; + target_ulong prev_pc; /* save address of the previous instruction */ +} DisasContext; + +static void gen_eob(DisasContext *s); +static void gen_jr(DisasContext *s, TCGv dest); +static void gen_jmp(DisasContext *s, target_ulong eip); +static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); +static void gen_op(DisasContext *s, int op, MemOp ot, int d); + +/* i386 arith/logic operations */ +enum { + OP_ADDL, + OP_ORL, + OP_ADCL, + OP_SBBL, + OP_ANDL, + OP_SUBL, + OP_XORL, + OP_CMPL, +}; + +/* i386 shift ops */ +enum { + OP_ROL, + OP_ROR, + OP_RCL, + OP_RCR, + OP_SHL, + OP_SHR, + OP_SHL1, /* undocumented */ + OP_SAR = 7, +}; + +enum { + JCC_O, + JCC_B, + JCC_Z, + JCC_BE, + JCC_S, + JCC_P, + JCC_L, + JCC_LE, +}; + +enum { + /* I386 int registers */ + OR_EAX, /* MUST be even numbered */ + OR_ECX, + OR_EDX, + OR_EBX, + OR_ESP, + OR_EBP, + OR_ESI, + OR_EDI, + + OR_TMP0 = 16, /* temporary operand register */ + OR_TMP1, + OR_A0, /* temporary register used when doing address evaluation */ +}; + +enum { + USES_CC_DST = 1, + USES_CC_SRC = 2, + USES_CC_SRC2 = 4, + USES_CC_SRCT = 8, +}; + +/* Bit set if the global variable is live after setting CC_OP to X. */ +static const uint8_t cc_op_live[CC_OP_NB] = { + [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_EFLAGS] = USES_CC_SRC, + + [CC_OP_MULB] = USES_CC_DST | USES_CC_SRC, + [CC_OP_MULW] = USES_CC_DST | USES_CC_SRC, + [CC_OP_MULL] = USES_CC_DST | USES_CC_SRC, + [CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, + + [CC_OP_ADDB] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADDW] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADDL] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, + + [CC_OP_ADCB] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_ADCW] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_ADCL] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + + [CC_OP_SUBB] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, + [CC_OP_SUBW] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, + [CC_OP_SUBL] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, + [CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, + + [CC_OP_SBBB] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_SBBW] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_SBBL] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + + [CC_OP_LOGICB] = USES_CC_DST, + [CC_OP_LOGICW] = USES_CC_DST, + [CC_OP_LOGICL] = USES_CC_DST, + [CC_OP_LOGICQ] = USES_CC_DST, + + [CC_OP_INCB] = USES_CC_DST | USES_CC_SRC, + [CC_OP_INCW] = USES_CC_DST | USES_CC_SRC, + [CC_OP_INCL] = USES_CC_DST | USES_CC_SRC, + [CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, + + [CC_OP_DECB] = USES_CC_DST | USES_CC_SRC, + [CC_OP_DECW] = USES_CC_DST | USES_CC_SRC, + [CC_OP_DECL] = USES_CC_DST | USES_CC_SRC, + [CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, + + [CC_OP_SHLB] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SHLW] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SHLL] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, + + [CC_OP_SARB] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SARW] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SARL] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, + + [CC_OP_BMILGB] = USES_CC_DST | USES_CC_SRC, + [CC_OP_BMILGW] = USES_CC_DST | USES_CC_SRC, + [CC_OP_BMILGL] = USES_CC_DST | USES_CC_SRC, + [CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, + + [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, + [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_CLR] = 0, + [CC_OP_POPCNT] = USES_CC_SRC, +}; + +static inline void gen_jmp_im(DisasContext *s, target_ulong pc); + +static void set_cc_op(DisasContext *s, CCOp op) +{ + int dead; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + TCGv cpu_cc_dst = tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = tcg_ctx->cpu_cc_src; + TCGv cpu_cc_src2 = tcg_ctx->cpu_cc_src2; + + if (s->cc_op == op) { + return; + } + + /* Discard CC computation that will no longer be used. */ + dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; + if (dead & USES_CC_DST) { + tcg_gen_discard_tl(tcg_ctx, cpu_cc_dst); + } + if (dead & USES_CC_SRC) { + tcg_gen_discard_tl(tcg_ctx, cpu_cc_src); + } + if (dead & USES_CC_SRC2) { + tcg_gen_discard_tl(tcg_ctx, cpu_cc_src2); + } + if (dead & USES_CC_SRCT) { + tcg_gen_discard_tl(tcg_ctx, s->cc_srcT); + } + + if (op == CC_OP_DYNAMIC) { + /* The DYNAMIC setting is translator only, and should never be + stored. Thus we always consider it clean. */ + s->cc_op_dirty = false; + } else { + /* Discard any computed CC_OP value (see shifts). */ + if (s->cc_op == CC_OP_DYNAMIC) { + tcg_gen_discard_i32(tcg_ctx, cpu_cc_op); + } + s->cc_op_dirty = true; + } + s->cc_op = op; +} + +static void gen_update_cc_op(DisasContext *s) +{ + if (s->cc_op_dirty) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + + tcg_gen_movi_i32(tcg_ctx, cpu_cc_op, s->cc_op); + s->cc_op_dirty = false; + } +} + +#ifdef TARGET_X86_64 + +#define NB_OP_SIZES 4 + +#else /* !TARGET_X86_64 */ + +#define NB_OP_SIZES 3 + +#endif /* !TARGET_X86_64 */ + +#if defined(HOST_WORDS_BIGENDIAN) +#define REG_B_OFFSET (sizeof(target_ulong) - 1) +#define REG_H_OFFSET (sizeof(target_ulong) - 2) +#define REG_W_OFFSET (sizeof(target_ulong) - 2) +#define REG_L_OFFSET (sizeof(target_ulong) - 4) +#define REG_LH_OFFSET (sizeof(target_ulong) - 8) +#else +#define REG_B_OFFSET 0 +#define REG_H_OFFSET 1 +#define REG_W_OFFSET 0 +#define REG_L_OFFSET 0 +#define REG_LH_OFFSET 4 +#endif + +/* In instruction encodings for byte register accesses the + * register number usually indicates "low 8 bits of register N"; + * however there are some special cases where N 4..7 indicates + * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return + * true for this special case, false otherwise. + */ +static inline bool byte_reg_is_xH(DisasContext *s, int reg) +{ + if (reg < 4) { + return false; + } +#ifdef TARGET_X86_64 + if (reg >= 8 || s->x86_64_hregs) { + return false; + } +#endif + return true; +} + +/* Select the size of a push/pop operation. */ +static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) +{ + if (CODE64(s)) { + return ot == MO_16 ? MO_16 : MO_64; + } else { + return ot; + } +} + +/* Select the size of the stack pointer. */ +static inline MemOp mo_stacksize(DisasContext *s) +{ + return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; +} + +/* Select only size 64 else 32. Used for SSE operand sizes. */ +static inline MemOp mo_64_32(MemOp ot) +{ +#ifdef TARGET_X86_64 + return ot == MO_64 ? MO_64 : MO_32; +#else + return MO_32; +#endif +} + +/* Select size 8 if lsb of B is clear, else OT. Used for decoding + byte vs word opcodes. */ +static inline MemOp mo_b_d(int b, MemOp ot) +{ + return b & 1 ? ot : MO_8; +} + +/* Select size 8 if lsb of B is clear, else OT capped at 32. + Used for decoding operand size of port opcodes. */ +static inline MemOp mo_b_d32(int b, MemOp ot) +{ + return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; +} + +static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + switch(ot) { + case MO_8: + if (!byte_reg_is_xH(s, reg)) { + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], tcg_ctx->cpu_regs[reg], t0, 0, 8); + } else { + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_regs[reg - 4], tcg_ctx->cpu_regs[reg - 4], t0, 8, 8); + } + break; + case MO_16: + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], tcg_ctx->cpu_regs[reg], t0, 0, 16); + break; + case MO_32: + /* For x86_64, this sets the higher half of register to zero. + For i386, this is equivalent to a mov. */ + tcg_gen_ext32u_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], t0); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], t0); + break; +#endif + default: + tcg_abort(); + } +} + +static inline +void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (ot == MO_8 && byte_reg_is_xH(s, reg)) { + tcg_gen_extract_tl(tcg_ctx, t0, tcg_ctx->cpu_regs[reg - 4], 8, 8); + } else { + tcg_gen_mov_tl(tcg_ctx, t0, tcg_ctx->cpu_regs[reg]); + } +} + +static void gen_add_A0_im(DisasContext *s, int val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, val); + if (!CODE64(s)) { + tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); + } +} + +static inline void gen_op_jmp_v(TCGContext *tcg_ctx, TCGv dest) +{ + tcg_gen_st_tl(tcg_ctx, dest, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); +} + +static inline +void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_addi_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[reg], val); + gen_op_mov_reg_v(s, size, reg, s->tmp0); +} + +static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_add_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[reg], s->T0); + gen_op_mov_reg_v(s, size, reg, s->tmp0); +} + +static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ)) + gen_jmp_im(s, s->prev_pc); // Unicorn: sync EIP + + tcg_gen_qemu_ld_tl(tcg_ctx, t0, a0, s->mem_index, idx | MO_LE); +} + +static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) + gen_jmp_im(s, s->prev_pc); // Unicorn: sync EIP + + tcg_gen_qemu_st_tl(tcg_ctx, t0, a0, s->mem_index, idx | MO_LE); +} + +static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) +{ + if (d == OR_TMP0) { + gen_op_st_v(s, idx, s->T0, s->A0); + } else { + gen_op_mov_reg_v(s, idx, d, s->T0); + } +} + +static inline void gen_jmp_im(DisasContext *s, target_ulong pc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_movi_tl(tcg_ctx, s->tmp0, pc); + gen_op_jmp_v(tcg_ctx, s->tmp0); +} + +/* Compute SEG:REG into A0. SEG is selected from the override segment + (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to + indicate no override. */ +static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, + int def_seg, int ovr_seg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + switch (aflag) { +#ifdef TARGET_X86_64 + case MO_64: + if (ovr_seg < 0) { + tcg_gen_mov_tl(tcg_ctx, s->A0, a0); + return; + } + break; +#endif + case MO_32: + /* 32 bit address */ + if (ovr_seg < 0 && s->addseg) { + ovr_seg = def_seg; + } + if (ovr_seg < 0) { + tcg_gen_ext32u_tl(tcg_ctx, s->A0, a0); + return; + } + break; + case MO_16: + /* 16 bit address */ + tcg_gen_ext16u_tl(tcg_ctx, s->A0, a0); + a0 = s->A0; + if (ovr_seg < 0) { + if (s->addseg) { + ovr_seg = def_seg; + } else { + return; + } + } + break; + default: + tcg_abort(); + } + + if (ovr_seg >= 0) { + TCGv seg = tcg_ctx->cpu_seg_base[ovr_seg]; + + if (aflag == MO_64) { + tcg_gen_add_tl(tcg_ctx, s->A0, a0, seg); + } else if (CODE64(s)) { + tcg_gen_ext32u_tl(tcg_ctx, s->A0, a0); + tcg_gen_add_tl(tcg_ctx, s->A0, s->A0, seg); + } else { + tcg_gen_add_tl(tcg_ctx, s->A0, a0, seg); + tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); + } + } +} + +static inline void gen_string_movl_A0_ESI(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_lea_v_seg(s, s->aflag, tcg_ctx->cpu_regs[R_ESI], R_DS, s->override); +} + +static inline void gen_string_movl_A0_EDI(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_lea_v_seg(s, s->aflag, tcg_ctx->cpu_regs[R_EDI], R_ES, -1); +} + +static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ld32s_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, df)); + tcg_gen_shli_tl(tcg_ctx, s->T0, s->T0, ot); +}; + +static TCGv gen_ext_tl(TCGContext *tcg_ctx, TCGv dst, TCGv src, MemOp size, bool sign) +{ + switch (size) { + case MO_8: + if (sign) { + tcg_gen_ext8s_tl(tcg_ctx, dst, src); + } else { + tcg_gen_ext8u_tl(tcg_ctx, dst, src); + } + return dst; + case MO_16: + if (sign) { + tcg_gen_ext16s_tl(tcg_ctx, dst, src); + } else { + tcg_gen_ext16u_tl(tcg_ctx, dst, src); + } + return dst; +#ifdef TARGET_X86_64 + case MO_32: + if (sign) { + tcg_gen_ext32s_tl(tcg_ctx, dst, src); + } else { + tcg_gen_ext32u_tl(tcg_ctx, dst, src); + } + return dst; +#endif + default: + return src; + } +} + +static void gen_extu(TCGContext *tcg_ctx, MemOp ot, TCGv reg) +{ + gen_ext_tl(tcg_ctx, reg, reg, ot, false); +} + +static void gen_exts(TCGContext *tcg_ctx, MemOp ot, TCGv reg) +{ + gen_ext_tl(tcg_ctx, reg, reg, ot, true); +} + +static inline +void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[R_ECX]); + gen_extu(tcg_ctx, size, s->tmp0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, s->tmp0, 0, label1); +} + +static inline +void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_regs[R_ECX]); + gen_extu(tcg_ctx, size, s->tmp0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, s->tmp0, 0, label1); +} + +static void gen_helper_in_func(TCGContext *tcg_ctx, MemOp ot, TCGv v, TCGv_i32 n) +{ + switch (ot) { + case MO_8: + gen_helper_inb(tcg_ctx, v, tcg_ctx->cpu_env, n); + break; + case MO_16: + gen_helper_inw(tcg_ctx, v, tcg_ctx->cpu_env, n); + break; + case MO_32: + gen_helper_inl(tcg_ctx, v, tcg_ctx->cpu_env, n); + break; + default: + tcg_abort(); + } +} + +static void gen_helper_out_func(TCGContext *tcg_ctx, MemOp ot, TCGv_i32 v, TCGv_i32 n) +{ + switch (ot) { + case MO_8: + gen_helper_outb(tcg_ctx, tcg_ctx->cpu_env, v, n); + break; + case MO_16: + gen_helper_outw(tcg_ctx, tcg_ctx->cpu_env, v, n); + break; + case MO_32: + gen_helper_outl(tcg_ctx, tcg_ctx->cpu_env, v, n); + break; + default: + tcg_abort(); + } +} + +static void gen_check_io(DisasContext *s, MemOp ot, target_ulong cur_eip, + uint32_t svm_flags) +{ + // Unicorn: allow all I/O instructions + return; + + TCGContext *tcg_ctx = s->uc->tcg_ctx; + target_ulong next_eip; + + if (s->pe && (s->cpl > s->iopl || s->vm86)) { + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + switch (ot) { + case MO_8: + gen_helper_check_iob(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + case MO_16: + gen_helper_check_iow(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + case MO_32: + gen_helper_check_iol(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + default: + tcg_abort(); + } + } + if(s->flags & HF_GUEST_MASK) { + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + svm_flags |= (1 << (4 + ot)); + next_eip = s->pc - s->cs_base; + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_svm_check_io(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, + tcg_const_i32(tcg_ctx, svm_flags), + tcg_const_i32(tcg_ctx, next_eip - cur_eip)); + } +} + +static inline void gen_movs(DisasContext *s, MemOp ot) +{ + gen_string_movl_A0_ESI(s); + gen_op_ld_v(s, ot, s->T0, s->A0); + gen_string_movl_A0_EDI(s); + gen_op_st_v(s, ot, s->T0, s->A0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg_T0(s, s->aflag, R_EDI); +} + +static void gen_op_update1_cc(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); +} + +static void gen_op_update2_cc(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); +} + +static void gen_op_update3_cc(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, reg); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); +} + +static inline void gen_op_testl_T0_T1_cc(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, s->T1); +} + +static void gen_op_update_neg_cc(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); + tcg_gen_movi_tl(tcg_ctx, s->cc_srcT, 0); +} + +/* compute all eflags to cc_src */ +static void gen_compute_eflags(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv zero, dst, src1, src2; + int live, dead; + + if (s->cc_op == CC_OP_EFLAGS) { + return; + } + if (s->cc_op == CC_OP_CLR) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, CC_Z | CC_P); + set_cc_op(s, CC_OP_EFLAGS); + return; + } + + zero = NULL; + dst = tcg_ctx->cpu_cc_dst; + src1 = tcg_ctx->cpu_cc_src; + src2 = tcg_ctx->cpu_cc_src2; + + /* Take care to not read values that are not live. */ + live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; + dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); + if (dead) { + zero = tcg_const_tl(tcg_ctx, 0); + if (dead & USES_CC_DST) { + dst = zero; + } + if (dead & USES_CC_SRC) { + src1 = zero; + } + if (dead & USES_CC_SRC2) { + src2 = zero; + } + } + + gen_update_cc_op(s); + gen_helper_cc_compute_all(tcg_ctx, tcg_ctx->cpu_cc_src, dst, src1, src2, tcg_ctx->cpu_cc_op); + set_cc_op(s, CC_OP_EFLAGS); + + if (dead) { + tcg_temp_free(tcg_ctx, zero); + } +} + +typedef struct CCPrepare { + TCGCond cond; + TCGv reg; + TCGv reg2; + target_ulong imm; + target_ulong mask; + bool use_reg2; + bool no_setcond; +} CCPrepare; + +/* compute eflags.C to reg */ +static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv t0, t1; + int size, shift; + + switch (s->cc_op) { + case CC_OP_SUBB: + case CC_OP_SUBW: + case CC_OP_SUBL: + case CC_OP_SUBQ: + /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ + size = s->cc_op - CC_OP_SUBB; + t1 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, false); + /* If no temporary was used, be careful not to alias t1 and t0. */ + t0 = t1 == tcg_ctx->cpu_cc_src ? s->tmp0 : reg; + tcg_gen_mov_tl(tcg_ctx, t0, s->cc_srcT); + gen_extu(tcg_ctx, size, t0); + goto add_sub; + + case CC_OP_ADDB: + case CC_OP_ADDW: + case CC_OP_ADDL: + case CC_OP_ADDQ: + /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ + size = s->cc_op - CC_OP_ADDB; + t1 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, false); + t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, size, false); + add_sub: + return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0, + .reg2 = t1, .mask = -1, .use_reg2 = true }; + + case CC_OP_LOGICB: + case CC_OP_LOGICW: + case CC_OP_LOGICL: + case CC_OP_LOGICQ: + case CC_OP_CLR: + case CC_OP_POPCNT: + return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; + + case CC_OP_INCB: + case CC_OP_INCW: + case CC_OP_INCL: + case CC_OP_INCQ: + + case CC_OP_DECB: + case CC_OP_DECW: + case CC_OP_DECL: + case CC_OP_DECQ: + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, + .mask = -1, .no_setcond = true }; + + case CC_OP_SHLB: + case CC_OP_SHLW: + case CC_OP_SHLL: + case CC_OP_SHLQ: + /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ + size = s->cc_op - CC_OP_SHLB; + shift = (8 << size) - 1; + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, + .mask = (target_ulong)1 << shift }; + + case CC_OP_MULB: + case CC_OP_MULW: + case CC_OP_MULL: + case CC_OP_MULQ: + return (CCPrepare) { .cond = TCG_COND_NE, + .reg = tcg_ctx->cpu_cc_src, .mask = -1 }; + + case CC_OP_BMILGB: + case CC_OP_BMILGW: + case CC_OP_BMILGL: + case CC_OP_BMILGQ: + size = s->cc_op - CC_OP_BMILGB; + t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_src, size, false); + return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; + + case CC_OP_ADCX: + case CC_OP_ADCOX: + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_dst, + .mask = -1, .no_setcond = true }; + + case CC_OP_EFLAGS: + case CC_OP_SARB: + case CC_OP_SARW: + case CC_OP_SARL: + case CC_OP_SARQ: + /* CC_SRC & 1 */ + return (CCPrepare) { .cond = TCG_COND_NE, + .reg = tcg_ctx->cpu_cc_src, .mask = CC_C }; + + default: + /* The need to compute only C from CC_OP_DYNAMIC is important + in efficiently implementing e.g. INC at the start of a TB. */ + gen_update_cc_op(s); + gen_helper_cc_compute_c(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, + tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_op); + return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, + .mask = -1, .no_setcond = true }; + } +} + +/* compute eflags.P to reg */ +static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_compute_eflags(s); + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, + .mask = CC_P }; +} + +/* compute eflags.S to reg */ +static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + switch (s->cc_op) { + case CC_OP_DYNAMIC: + gen_compute_eflags(s); + /* FALLTHRU */ + case CC_OP_EFLAGS: + case CC_OP_ADCX: + case CC_OP_ADOX: + case CC_OP_ADCOX: + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, + .mask = CC_S }; + case CC_OP_CLR: + case CC_OP_POPCNT: + return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; + default: + { + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; + TCGv t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, size, true); + return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; + } + } +} + +/* compute eflags.O to reg */ +static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + switch (s->cc_op) { + case CC_OP_ADOX: + case CC_OP_ADCOX: + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src2, + .mask = -1, .no_setcond = true }; + case CC_OP_CLR: + case CC_OP_POPCNT: + return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; + default: + gen_compute_eflags(s); + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, + .mask = CC_O }; + } +} + +/* compute eflags.Z to reg */ +static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + switch (s->cc_op) { + case CC_OP_DYNAMIC: + gen_compute_eflags(s); + /* FALLTHRU */ + case CC_OP_EFLAGS: + case CC_OP_ADCX: + case CC_OP_ADOX: + case CC_OP_ADCOX: + return (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, + .mask = CC_Z }; + case CC_OP_CLR: + return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 }; + case CC_OP_POPCNT: + return (CCPrepare) { .cond = TCG_COND_EQ, .reg = tcg_ctx->cpu_cc_src, + .mask = -1 }; + default: + { + MemOp size = (s->cc_op - CC_OP_ADDB) & 3; + TCGv t0 = gen_ext_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_dst, size, false); + return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; + } + } +} + +/* perform a conditional store into register 'reg' according to jump opcode + value 'b'. In the fast case, T0 is guaranted not to be used. */ +static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int inv, jcc_op, cond; + MemOp size; + CCPrepare cc; + TCGv t0; + + inv = b & 1; + jcc_op = (b >> 1) & 7; + + switch (s->cc_op) { + case CC_OP_SUBB: + case CC_OP_SUBW: + case CC_OP_SUBL: + case CC_OP_SUBQ: + /* We optimize relational operators for the cmp/jcc case. */ + size = s->cc_op - CC_OP_SUBB; + switch (jcc_op) { + case JCC_BE: + tcg_gen_mov_tl(tcg_ctx, s->tmp4, s->cc_srcT); + gen_extu(tcg_ctx, size, s->tmp4); + t0 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, false); + cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4, + .reg2 = t0, .mask = -1, .use_reg2 = true }; + break; + + case JCC_L: + cond = TCG_COND_LT; + goto fast_jcc_l; + case JCC_LE: + cond = TCG_COND_LE; + fast_jcc_l: + tcg_gen_mov_tl(tcg_ctx, s->tmp4, s->cc_srcT); + gen_exts(tcg_ctx, size, s->tmp4); + t0 = gen_ext_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, size, true); + cc = (CCPrepare) { .cond = cond, .reg = s->tmp4, + .reg2 = t0, .mask = -1, .use_reg2 = true }; + break; + + default: + goto slow_jcc; + } + break; + + default: + slow_jcc: + /* This actually generates good code for JC, JZ and JS. */ + switch (jcc_op) { + case JCC_O: + cc = gen_prepare_eflags_o(s, reg); + break; + case JCC_B: + cc = gen_prepare_eflags_c(s, reg); + break; + case JCC_Z: + cc = gen_prepare_eflags_z(s, reg); + break; + case JCC_BE: + gen_compute_eflags(s); + cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = tcg_ctx->cpu_cc_src, + .mask = CC_Z | CC_C }; + break; + case JCC_S: + cc = gen_prepare_eflags_s(s, reg); + break; + case JCC_P: + cc = gen_prepare_eflags_p(s, reg); + break; + case JCC_L: + gen_compute_eflags(s); + if (reg == tcg_ctx->cpu_cc_src) { + reg = s->tmp0; + } + tcg_gen_shri_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_src, 4); /* CC_O -> CC_S */ + tcg_gen_xor_tl(tcg_ctx, reg, reg, tcg_ctx->cpu_cc_src); + cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, + .mask = CC_S }; + break; + default: + case JCC_LE: + gen_compute_eflags(s); + if (reg == tcg_ctx->cpu_cc_src) { + reg = s->tmp0; + } + tcg_gen_shri_tl(tcg_ctx, reg, tcg_ctx->cpu_cc_src, 4); /* CC_O -> CC_S */ + tcg_gen_xor_tl(tcg_ctx, reg, reg, tcg_ctx->cpu_cc_src); + cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, + .mask = CC_S | CC_Z }; + break; + } + break; + } + + if (inv) { + cc.cond = tcg_invert_cond(cc.cond); + } + return cc; +} + +static void gen_setcc1(DisasContext *s, int b, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + CCPrepare cc = gen_prepare_cc(s, b, reg); + + if (cc.no_setcond) { + if (cc.cond == TCG_COND_EQ) { + tcg_gen_xori_tl(tcg_ctx, reg, cc.reg, 1); + } else { + tcg_gen_mov_tl(tcg_ctx, reg, cc.reg); + } + return; + } + + if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && + cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { + tcg_gen_shri_tl(tcg_ctx, reg, cc.reg, ctztl(cc.mask)); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 1); + return; + } + if (cc.mask != -1) { + tcg_gen_andi_tl(tcg_ctx, reg, cc.reg, cc.mask); + cc.reg = reg; + } + if (cc.use_reg2) { + tcg_gen_setcond_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.reg2); + } else { + tcg_gen_setcondi_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.imm); + } +} + +static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) +{ + gen_setcc1(s, JCC_B << 1, reg); +} + +/* generate a conditional jump to label 'l1' according to jump opcode + value 'b'. In the fast case, T0 is guaranted not to be used. */ +static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + CCPrepare cc = gen_prepare_cc(s, b, s->T0); + + if (cc.mask != -1) { + tcg_gen_andi_tl(tcg_ctx, s->T0, cc.reg, cc.mask); + cc.reg = s->T0; + } + if (cc.use_reg2) { + tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); + } else { + tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); + } +} + +/* Generate a conditional jump to label 'l1' according to jump opcode + value 'b'. In the fast case, T0 is guaranted not to be used. + A translation block must end soon. */ +static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + CCPrepare cc = gen_prepare_cc(s, b, s->T0); + + gen_update_cc_op(s); + if (cc.mask != -1) { + tcg_gen_andi_tl(tcg_ctx, s->T0, cc.reg, cc.mask); + cc.reg = s->T0; + } + set_cc_op(s, CC_OP_DYNAMIC); + if (cc.use_reg2) { + tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); + } else { + tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); + } +} + +/* XXX: does not work with gdbstub "ice" single step - not a + serious problem */ +static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + gen_op_jnz_ecx(s, s->aflag, l1); + gen_set_label(tcg_ctx, l2); + gen_jmp_tb(s, next_eip, 1); + gen_set_label(tcg_ctx, l1); + return l2; +} + +static inline void gen_stos(DisasContext *s, MemOp ot) +{ + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); + gen_string_movl_A0_EDI(s); + gen_op_st_v(s, ot, s->T0, s->A0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_EDI); +} + +static inline void gen_lods(DisasContext *s, MemOp ot) +{ + gen_string_movl_A0_ESI(s); + gen_op_ld_v(s, ot, s->T0, s->A0); + gen_op_mov_reg_v(s, ot, R_EAX, s->T0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); +} + +static inline void gen_scas(DisasContext *s, MemOp ot) +{ + gen_string_movl_A0_EDI(s); + gen_op_ld_v(s, ot, s->T1, s->A0); + gen_op(s, OP_CMPL, ot, R_EAX); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_EDI); +} + +static inline void gen_cmps(DisasContext *s, MemOp ot) +{ + gen_string_movl_A0_EDI(s); + gen_op_ld_v(s, ot, s->T1, s->A0); + gen_string_movl_A0_ESI(s); + gen_op(s, OP_CMPL, ot, OR_TMP0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg_T0(s, s->aflag, R_EDI); +} + +static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (s->flags & HF_IOBPT_MASK) { + TCGv_i32 t_size = tcg_const_i32(tcg_ctx, 1 << ot); + TCGv t_next = tcg_const_tl(tcg_ctx, s->pc - s->cs_base); + + gen_helper_bpt_io(tcg_ctx, tcg_ctx->cpu_env, t_port, t_size, t_next); + tcg_temp_free_i32(tcg_ctx, t_size); + tcg_temp_free(tcg_ctx, t_next); + } +} + + +static inline void gen_ins(DisasContext *s, MemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_string_movl_A0_EDI(s); + /* Note: we must do this dummy write first to be restartable in + case of page fault. */ + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + gen_op_st_v(s, ot, s->T0, s->A0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_EDX]); + tcg_gen_andi_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 0xffff); + gen_helper_in_func(tcg_ctx, ot, s->T0, s->tmp2_i32); + gen_op_st_v(s, ot, s->T0, s->A0); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_EDI); + gen_bpt_io(s, s->tmp2_i32, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + } +} + +static inline void gen_outs(DisasContext *s, MemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_string_movl_A0_ESI(s); + gen_op_ld_v(s, ot, s->T0, s->A0); + + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_EDX]); + tcg_gen_andi_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 0xffff); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T0); + gen_helper_out_func(tcg_ctx, ot, s->tmp2_i32, s->tmp3_i32); + gen_op_movl_T0_Dshift(s, ot); + gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_bpt_io(s, s->tmp2_i32, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + } +} + +/* same method as Valgrind : we generate jumps to current or next + instruction */ +#define GEN_REPZ(op) \ +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ + target_ulong cur_eip, target_ulong next_eip) \ +{ \ + TCGLabel *l2; \ + gen_update_cc_op(s); \ + l2 = gen_jz_ecx_string(s, next_eip); \ + gen_ ## op(s, ot); \ + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ + /* a loop would cause two single step exceptions if ECX = 1 \ + before rep string_insn */ \ + if (s->repz_opt) \ + gen_op_jz_ecx(s, s->aflag, l2); \ + gen_jmp(s, cur_eip); \ +} + +#define GEN_REPZ2(op) \ +static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \ + target_ulong cur_eip, \ + target_ulong next_eip, \ + int nz) \ +{ \ + TCGLabel *l2; \ + gen_update_cc_op(s); \ + l2 = gen_jz_ecx_string(s, next_eip); \ + gen_ ## op(s, ot); \ + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \ + gen_update_cc_op(s); \ + gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ + if (s->repz_opt) \ + gen_op_jz_ecx(s, s->aflag, l2); \ + gen_jmp(s, cur_eip); \ +} + +GEN_REPZ(movs) +GEN_REPZ(stos) +GEN_REPZ(lods) +GEN_REPZ(ins) +GEN_REPZ(outs) +GEN_REPZ2(scas) +GEN_REPZ2(cmps) + +static void gen_helper_fp_arith_ST0_FT0(TCGContext *tcg_ctx, int op) +{ + switch (op) { + case 0: + gen_helper_fadd_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: + gen_helper_fmul_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 3: + gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 4: + gen_helper_fsub_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 5: + gen_helper_fsubr_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 6: + gen_helper_fdiv_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 7: + gen_helper_fdivr_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + } +} + +/* NOTE the exception in "r" op ordering */ +static void gen_helper_fp_arith_STN_ST0(TCGContext *tcg_ctx, int op, int opreg) +{ + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, opreg); + switch (op) { + case 0: + gen_helper_fadd_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); + break; + case 1: + gen_helper_fmul_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); + break; + case 4: + gen_helper_fsubr_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); + break; + case 5: + gen_helper_fsub_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); + break; + case 6: + gen_helper_fdivr_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); + break; + case 7: + gen_helper_fdiv_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tmp); + break; + } +} + +static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, trapno)); + s->base.is_jmp = DISAS_NORETURN; +} + +/* Generate #UD for the current instruction. The assumption here is that + the instruction is known, but it isn't allowed in the current cpu mode. */ +static void gen_illegal_opcode(DisasContext *s) +{ + gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base); +} + +/* if d == OR_TMP0, it means memory operand (address in A0) */ +static void gen_op(DisasContext *s1, int op, MemOp ot, int d) +{ + TCGContext *tcg_ctx = s1->uc->tcg_ctx; + + if (d != OR_TMP0) { + if (s1->prefix & PREFIX_LOCK) { + /* Lock prefix when destination is not memory. */ + gen_illegal_opcode(s1); + return; + } + gen_op_mov_v_reg(s1, ot, s1->T0, d); + } else if (!(s1->prefix & PREFIX_LOCK)) { + gen_op_ld_v(s1, ot, s1->T0, s1->A0); + } + switch(op) { + case OP_ADCL: + gen_compute_eflags_c(s1, s1->tmp4); + if (s1->prefix & PREFIX_LOCK) { + tcg_gen_add_tl(tcg_ctx, s1->T0, s1->tmp4, s1->T1); + tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T0, + s1->mem_index, ot | MO_LE); + } else { + tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); + tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T0, s1->tmp4); + gen_op_st_rm_T0_A0(s1, ot, d); + } + gen_op_update3_cc(s1, s1->tmp4); + set_cc_op(s1, CC_OP_ADCB + ot); + break; + case OP_SBBL: + gen_compute_eflags_c(s1, s1->tmp4); + if (s1->prefix & PREFIX_LOCK) { + tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T1, s1->tmp4); + tcg_gen_neg_tl(tcg_ctx, s1->T0, s1->T0); + tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T0, + s1->mem_index, ot | MO_LE); + } else { + tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); + tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->T0, s1->tmp4); + gen_op_st_rm_T0_A0(s1, ot, d); + } + gen_op_update3_cc(s1, s1->tmp4); + set_cc_op(s1, CC_OP_SBBB + ot); + break; + case OP_ADDL: + if (s1->prefix & PREFIX_LOCK) { + tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, + s1->mem_index, ot | MO_LE); + } else { + tcg_gen_add_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); + gen_op_st_rm_T0_A0(s1, ot, d); + } + gen_op_update2_cc(s1); + set_cc_op(s1, CC_OP_ADDB + ot); + break; + case OP_SUBL: + if (s1->prefix & PREFIX_LOCK) { + tcg_gen_neg_tl(tcg_ctx, s1->T0, s1->T1); + tcg_gen_atomic_fetch_add_tl(tcg_ctx, s1->cc_srcT, s1->A0, s1->T0, + s1->mem_index, ot | MO_LE); + tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->cc_srcT, s1->T1); + } else { + tcg_gen_mov_tl(tcg_ctx, s1->cc_srcT, s1->T0); + tcg_gen_sub_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); + gen_op_st_rm_T0_A0(s1, ot, d); + } + gen_op_update2_cc(s1); + set_cc_op(s1, CC_OP_SUBB + ot); + break; + default: + case OP_ANDL: + if (s1->prefix & PREFIX_LOCK) { + tcg_gen_atomic_and_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, + s1->mem_index, ot | MO_LE); + } else { + tcg_gen_and_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); + gen_op_st_rm_T0_A0(s1, ot, d); + } + gen_op_update1_cc(s1); + set_cc_op(s1, CC_OP_LOGICB + ot); + break; + case OP_ORL: + if (s1->prefix & PREFIX_LOCK) { + tcg_gen_atomic_or_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, + s1->mem_index, ot | MO_LE); + } else { + tcg_gen_or_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); + gen_op_st_rm_T0_A0(s1, ot, d); + } + gen_op_update1_cc(s1); + set_cc_op(s1, CC_OP_LOGICB + ot); + break; + case OP_XORL: + if (s1->prefix & PREFIX_LOCK) { + tcg_gen_atomic_xor_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T1, + s1->mem_index, ot | MO_LE); + } else { + tcg_gen_xor_tl(tcg_ctx, s1->T0, s1->T0, s1->T1); + gen_op_st_rm_T0_A0(s1, ot, d); + } + gen_op_update1_cc(s1); + set_cc_op(s1, CC_OP_LOGICB + ot); + break; + case OP_CMPL: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s1->T1); + tcg_gen_mov_tl(tcg_ctx, s1->cc_srcT, s1->T0); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s1->T0, s1->T1); + set_cc_op(s1, CC_OP_SUBB + ot); + break; + } +} + +/* if d == OR_TMP0, it means memory operand (address in A0) */ +static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) +{ + TCGContext *tcg_ctx = s1->uc->tcg_ctx; + + if (s1->prefix & PREFIX_LOCK) { + if (d != OR_TMP0) { + /* Lock prefix when destination is not memory */ + gen_illegal_opcode(s1); + return; + } + tcg_gen_movi_tl(tcg_ctx, s1->T0, c > 0 ? 1 : -1); + tcg_gen_atomic_add_fetch_tl(tcg_ctx, s1->T0, s1->A0, s1->T0, + s1->mem_index, ot | MO_LE); + } else { + if (d != OR_TMP0) { + gen_op_mov_v_reg(s1, ot, s1->T0, d); + } else { + gen_op_ld_v(s1, ot, s1->T0, s1->A0); + } + tcg_gen_addi_tl(tcg_ctx, s1->T0, s1->T0, (c > 0 ? 1 : -1)); + gen_op_st_rm_T0_A0(s1, ot, d); + } + + gen_compute_eflags_c(s1, tcg_ctx->cpu_cc_src); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s1->T0); + set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); +} + +static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, + TCGv shm1, TCGv count, bool is_right) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 z32, s32, oldop; + TCGv z_tl; + + /* Store the results into the CC variables. If we know that the + variable must be dead, store unconditionally. Otherwise we'll + need to not disrupt the current contents. */ + z_tl = tcg_const_tl(tcg_ctx, 0); + if (cc_op_live[s->cc_op] & USES_CC_DST) { + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_dst, count, z_tl, + result, tcg_ctx->cpu_cc_dst); + } else { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, result); + } + if (cc_op_live[s->cc_op] & USES_CC_SRC) { + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_src, count, z_tl, + shm1, tcg_ctx->cpu_cc_src); + } else { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, shm1); + } + tcg_temp_free(tcg_ctx, z_tl); + + /* Get the two potential CC_OP values into temporaries. */ + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); + if (s->cc_op == CC_OP_DYNAMIC) { + oldop = tcg_ctx->cpu_cc_op; + } else { + tcg_gen_movi_i32(tcg_ctx, s->tmp3_i32, s->cc_op); + oldop = s->tmp3_i32; + } + + /* Conditionally store the CC_OP value. */ + z32 = tcg_const_i32(tcg_ctx, 0); + s32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, s32, count); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_op, s32, z32, s->tmp2_i32, oldop); + tcg_temp_free_i32(tcg_ctx, z32); + tcg_temp_free_i32(tcg_ctx, s32); + + /* The CC_OP value is no longer predictable. */ + set_cc_op(s, CC_OP_DYNAMIC); +} + +static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, + int is_right, int is_arith) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_v_reg(s, ot, s->T0, op1); + } + + tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, mask); + tcg_gen_subi_tl(tcg_ctx, s->tmp0, s->T1, 1); + + if (is_right) { + if (is_arith) { + gen_exts(tcg_ctx, ot, s->T0); + tcg_gen_sar_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); + tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, s->T1); + } else { + gen_extu(tcg_ctx, ot, s->T0); + tcg_gen_shr_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); + tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->T1); + } + } else { + tcg_gen_shl_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); + tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, s->T1); + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); +} + +static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, + int is_right, int is_arith) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mask = (ot == MO_64 ? 0x3f : 0x1f); + + /* load */ + if (op1 == OR_TMP0) + gen_op_ld_v(s, ot, s->T0, s->A0); + else + gen_op_mov_v_reg(s, ot, s->T0, op1); + + op2 &= mask; + if (op2 != 0) { + if (is_right) { + if (is_arith) { + gen_exts(tcg_ctx, ot, s->T0); + tcg_gen_sari_tl(tcg_ctx, s->tmp4, s->T0, op2 - 1); + tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, op2); + } else { + gen_extu(tcg_ctx, ot, s->T0); + tcg_gen_shri_tl(tcg_ctx, s->tmp4, s->T0, op2 - 1); + tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, op2); + } + } else { + tcg_gen_shli_tl(tcg_ctx, s->tmp4, s->T0, op2 - 1); + tcg_gen_shli_tl(tcg_ctx, s->T0, s->T0, op2); + } + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + /* update eflags if non zero shift */ + if (op2 != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp4); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); + } +} + +static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); + TCGv_i32 t0, t1; + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_v_reg(s, ot, s->T0, op1); + } + + tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, mask); + + switch (ot) { + case MO_8: + /* Replicate the 8-bit input so that a 32-bit rotate works. */ + tcg_gen_ext8u_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_muli_tl(tcg_ctx, s->T0, s->T0, 0x01010101); + goto do_long; + case MO_16: + /* Replicate the 16-bit input so that a 32-bit rotate works. */ + tcg_gen_deposit_tl(tcg_ctx, s->T0, s->T0, s->T0, 16, 16); + goto do_long; + do_long: +#ifdef TARGET_X86_64 + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); + if (is_right) { + tcg_gen_rotr_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); + } else { + tcg_gen_rotl_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); + } + tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); + break; +#endif + default: + if (is_right) { + tcg_gen_rotr_tl(tcg_ctx, s->T0, s->T0, s->T1); + } else { + tcg_gen_rotl_tl(tcg_ctx, s->T0, s->T0, s->T1); + } + break; + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + /* We'll need the flags computed into CC_SRC. */ + gen_compute_eflags(s); + + /* The value that was "rotated out" is now present at the other end + of the word. Compute C into CC_DST and O into CC_SRC2. Note that + since we've computed the flags into CC_SRC, these variables are + currently dead. */ + if (is_right) { + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask - 1); + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, mask); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_dst, 1); + } else { + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, 1); + } + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, 1); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_dst); + + /* Now conditionally store the new CC_OP value. If the shift count + is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. + Otherwise reuse CC_OP_ADCOX which have the C and O flags split out + exactly as we computed above. */ + t0 = tcg_const_i32(tcg_ctx, 0); + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, s->T1); + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, CC_OP_ADCOX); + tcg_gen_movi_i32(tcg_ctx, s->tmp3_i32, CC_OP_EFLAGS); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_cc_op, t1, t0, + s->tmp2_i32, s->tmp3_i32); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + + /* The CC_OP value is no longer predictable. */ + set_cc_op(s, CC_OP_DYNAMIC); +} + +static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, + int is_right) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mask = (ot == MO_64 ? 0x3f : 0x1f); + int shift; + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_v_reg(s, ot, s->T0, op1); + } + + op2 &= mask; + if (op2 != 0) { + switch (ot) { +#ifdef TARGET_X86_64 + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + if (is_right) { + tcg_gen_rotri_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, op2); + } else { + tcg_gen_rotli_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, op2); + } + tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); + break; +#endif + default: + if (is_right) { + tcg_gen_rotri_tl(tcg_ctx, s->T0, s->T0, op2); + } else { + tcg_gen_rotli_tl(tcg_ctx, s->T0, s->T0, op2); + } + break; + case MO_8: + mask = 7; + goto do_shifts; + case MO_16: + mask = 15; + do_shifts: + shift = op2 & mask; + if (is_right) { + shift = mask + 1 - shift; + } + gen_extu(tcg_ctx, ot, s->T0); + tcg_gen_shli_tl(tcg_ctx, s->tmp0, s->T0, shift); + tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, mask + 1 - shift); + tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->tmp0); + break; + } + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + if (op2 != 0) { + /* Compute the flags into CC_SRC. */ + gen_compute_eflags(s); + + /* The value that was "rotated out" is now present at the other end + of the word. Compute C into CC_DST and O into CC_SRC2. Note that + since we've computed the flags into CC_SRC, these variables are + currently dead. */ + if (is_right) { + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask - 1); + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, mask); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_dst, 1); + } else { + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s->T0, mask); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0, 1); + } + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, 1); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_src2, tcg_ctx->cpu_cc_dst); + set_cc_op(s, CC_OP_ADCOX); + } +} + +/* XXX: add faster immediate = 1 case */ +static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, + int is_right) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_compute_eflags(s); + // assert(s->cc_op == CC_OP_EFLAGS); + + /* load */ + if (op1 == OR_TMP0) + gen_op_ld_v(s, ot, s->T0, s->A0); + else + gen_op_mov_v_reg(s, ot, s->T0, op1); + + if (is_right) { + switch (ot) { + case MO_8: + gen_helper_rcrb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; + case MO_16: + gen_helper_rcrw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; + case MO_32: + gen_helper_rcrl(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_helper_rcrq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; +#endif + default: + tcg_abort(); + } + } else { + switch (ot) { + case MO_8: + gen_helper_rclb(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; + case MO_16: + gen_helper_rclw(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; + case MO_32: + gen_helper_rcll(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_helper_rclq(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->T0, s->T1); + break; +#endif + default: + tcg_abort(); + } + } + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); +} + +/* XXX: add faster immediate case */ +static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, + bool is_right, TCGv count_in) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + target_ulong mask = (ot == MO_64 ? 63 : 31); + TCGv count; + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_v_reg(s, ot, s->T0, op1); + } + + count = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, count, count_in, mask); + + switch (ot) { + case MO_16: + /* Note: we implement the Intel behaviour for shift count > 16. + This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A + portion by constructing it as a 32-bit value. */ + if (is_right) { + tcg_gen_deposit_tl(tcg_ctx, s->tmp0, s->T0, s->T1, 16, 16); + tcg_gen_mov_tl(tcg_ctx, s->T1, s->T0); + tcg_gen_mov_tl(tcg_ctx, s->T0, s->tmp0); + } else { + tcg_gen_deposit_tl(tcg_ctx, s->T1, s->T0, s->T1, 16, 16); + } + /* FALLTHRU */ +#ifdef TARGET_X86_64 + case MO_32: + /* Concatenate the two 32-bit values and use a 64-bit shift. */ + tcg_gen_subi_tl(tcg_ctx, s->tmp0, count, 1); + if (is_right) { + tcg_gen_concat_tl_i64(tcg_ctx, s->T0, s->T0, s->T1); + tcg_gen_shr_i64(tcg_ctx, s->tmp0, s->T0, s->tmp0); + tcg_gen_shr_i64(tcg_ctx, s->T0, s->T0, count); + } else { + tcg_gen_concat_tl_i64(tcg_ctx, s->T0, s->T1, s->T0); + tcg_gen_shl_i64(tcg_ctx, s->tmp0, s->T0, s->tmp0); + tcg_gen_shl_i64(tcg_ctx, s->T0, s->T0, count); + tcg_gen_shri_i64(tcg_ctx, s->tmp0, s->tmp0, 32); + tcg_gen_shri_i64(tcg_ctx, s->T0, s->T0, 32); + } + break; +#endif + default: + tcg_gen_subi_tl(tcg_ctx, s->tmp0, count, 1); + if (is_right) { + tcg_gen_shr_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); + + tcg_gen_subfi_tl(tcg_ctx, s->tmp4, mask + 1, count); + tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, count); + tcg_gen_shl_tl(tcg_ctx, s->T1, s->T1, s->tmp4); + } else { + tcg_gen_shl_tl(tcg_ctx, s->tmp0, s->T0, s->tmp0); + if (ot == MO_16) { + /* Only needed if count > 16, for Intel behaviour. */ + tcg_gen_subfi_tl(tcg_ctx, s->tmp4, 33, count); + tcg_gen_shr_tl(tcg_ctx, s->tmp4, s->T1, s->tmp4); + tcg_gen_or_tl(tcg_ctx, s->tmp0, s->tmp0, s->tmp4); + } + + tcg_gen_subfi_tl(tcg_ctx, s->tmp4, mask + 1, count); + tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, count); + tcg_gen_shr_tl(tcg_ctx, s->T1, s->T1, s->tmp4); + } + tcg_gen_movi_tl(tcg_ctx, s->tmp4, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, s->T1, count, s->tmp4, + s->tmp4, s->T1); + tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->T1); + break; + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right); + tcg_temp_free(tcg_ctx, count); +} + +static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) +{ + if (s != OR_TMP1) + gen_op_mov_v_reg(s1, ot, s1->T1, s); + switch(op) { + case OP_ROL: + gen_rot_rm_T1(s1, ot, d, 0); + break; + case OP_ROR: + gen_rot_rm_T1(s1, ot, d, 1); + break; + case OP_SHL: + case OP_SHL1: + gen_shift_rm_T1(s1, ot, d, 0, 0); + break; + case OP_SHR: + gen_shift_rm_T1(s1, ot, d, 1, 0); + break; + case OP_SAR: + gen_shift_rm_T1(s1, ot, d, 1, 1); + break; + case OP_RCL: + gen_rotc_rm_T1(s1, ot, d, 0); + break; + case OP_RCR: + gen_rotc_rm_T1(s1, ot, d, 1); + break; + } +} + +static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c) +{ + TCGContext *tcg_ctx = s1->uc->tcg_ctx; + + switch(op) { + case OP_ROL: + gen_rot_rm_im(s1, ot, d, c, 0); + break; + case OP_ROR: + gen_rot_rm_im(s1, ot, d, c, 1); + break; + case OP_SHL: + case OP_SHL1: + gen_shift_rm_im(s1, ot, d, c, 0, 0); + break; + case OP_SHR: + gen_shift_rm_im(s1, ot, d, c, 1, 0); + break; + case OP_SAR: + gen_shift_rm_im(s1, ot, d, c, 1, 1); + break; + default: + /* currently not optimized */ + tcg_gen_movi_tl(tcg_ctx, s1->T1, c); + gen_shift(s1, op, ot, d, OR_TMP1); + break; + } +} + +#define X86_MAX_INSN_LENGTH 15 + +static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes) +{ + uint64_t pc = s->pc; + + s->pc += num_bytes; + if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) { + /* If the instruction's 16th byte is on a different page than the 1st, a + * page fault on the second page wins over the general protection fault + * caused by the instruction being too long. + * This can happen even if the operand is only one byte long! + */ + if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) { + volatile uint8_t unused = + cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK); + (void) unused; + } + siglongjmp(s->jmpbuf, 1); + } + + return pc; +} + +static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s) +{ + return translator_ldub(env->uc->tcg_ctx, env, advance_pc(env, s, 1)); +} + +static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s) +{ + return translator_ldsw(env->uc->tcg_ctx, env, advance_pc(env, s, 2)); +} + +static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s) +{ + return translator_lduw(env->uc->tcg_ctx, env, advance_pc(env, s, 2)); +} + +static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s) +{ + return translator_ldl(env->uc->tcg_ctx, env, advance_pc(env, s, 4)); +} + +#ifdef TARGET_X86_64 +static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s) +{ + return translator_ldq(env->uc->tcg_ctx, env, advance_pc(env, s, 8)); +} +#endif + +/* Decompose an address. */ + +typedef struct AddressParts { + int def_seg; + int base; + int index; + int scale; + target_long disp; +} AddressParts; + +static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, + int modrm) +{ + int def_seg, base, index, scale, mod, rm; + target_long disp; + bool havesib; + + def_seg = R_DS; + index = -1; + scale = 0; + disp = 0; + + mod = (modrm >> 6) & 3; + rm = modrm & 7; + base = rm | REX_B(s); + + if (mod == 3) { + /* Normally filtered out earlier, but including this path + simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */ + goto done; + } + + switch (s->aflag) { + case MO_64: + case MO_32: + havesib = 0; + if (rm == 4) { + int code = x86_ldub_code(env, s); + scale = (code >> 6) & 3; + index = ((code >> 3) & 7) | REX_X(s); + if (index == 4) { + index = -1; /* no index */ + } + base = (code & 7) | REX_B(s); + havesib = 1; + } + + switch (mod) { + case 0: + if ((base & 7) == 5) { + base = -1; + disp = (int32_t)x86_ldl_code(env, s); + if (CODE64(s) && !havesib) { + base = -2; + disp += s->pc + s->rip_offset; + } + } + break; + case 1: + disp = (int8_t)x86_ldub_code(env, s); + break; + default: + case 2: + disp = (int32_t)x86_ldl_code(env, s); + break; + } + + /* For correct popl handling with esp. */ + if (base == R_ESP && s->popl_esp_hack) { + disp += s->popl_esp_hack; + } + if (base == R_EBP || base == R_ESP) { + def_seg = R_SS; + } + break; + + case MO_16: + if (mod == 0) { + if (rm == 6) { + base = -1; + disp = x86_lduw_code(env, s); + break; + } + } else if (mod == 1) { + disp = (int8_t)x86_ldub_code(env, s); + } else { + disp = (int16_t)x86_lduw_code(env, s); + } + + switch (rm) { + case 0: + base = R_EBX; + index = R_ESI; + break; + case 1: + base = R_EBX; + index = R_EDI; + break; + case 2: + base = R_EBP; + index = R_ESI; + def_seg = R_SS; + break; + case 3: + base = R_EBP; + index = R_EDI; + def_seg = R_SS; + break; + case 4: + base = R_ESI; + break; + case 5: + base = R_EDI; + break; + case 6: + base = R_EBP; + def_seg = R_SS; + break; + default: + case 7: + base = R_EBX; + break; + } + break; + + default: + tcg_abort(); + } + + done: + return (AddressParts){ def_seg, base, index, scale, disp }; +} + +/* Compute the address, with a minimum number of TCG ops. */ +static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv ea = NULL; + + if (a.index >= 0) { + if (a.scale == 0) { + ea = tcg_ctx->cpu_regs[a.index]; + } else { + tcg_gen_shli_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[a.index], a.scale); + ea = s->A0; + } + if (a.base >= 0) { + tcg_gen_add_tl(tcg_ctx, s->A0, ea, tcg_ctx->cpu_regs[a.base]); + ea = s->A0; + } + } else if (a.base >= 0) { + ea = tcg_ctx->cpu_regs[a.base]; + } + if (!ea) { + tcg_gen_movi_tl(tcg_ctx, s->A0, a.disp); + ea = s->A0; + } else if (a.disp != 0) { + tcg_gen_addi_tl(tcg_ctx, s->A0, ea, a.disp); + ea = s->A0; + } + + return ea; +} + +static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) +{ + AddressParts a = gen_lea_modrm_0(env, s, modrm); + TCGv ea = gen_lea_modrm_1(s, a); + gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); +} + +static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) +{ + (void)gen_lea_modrm_0(env, s, modrm); +} + +/* Used for BNDCL, BNDCU, BNDCN. */ +static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, + TCGCond cond, TCGv_i64 bndv) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv ea = gen_lea_modrm_1(s, gen_lea_modrm_0(env, s, modrm)); + + tcg_gen_extu_tl_i64(tcg_ctx, s->tmp1_i64, ea); + if (!CODE64(s)) { + tcg_gen_ext32u_i64(tcg_ctx, s->tmp1_i64, s->tmp1_i64); + } + tcg_gen_setcond_i64(tcg_ctx, cond, s->tmp1_i64, s->tmp1_i64, bndv); + tcg_gen_extrl_i64_i32(tcg_ctx, s->tmp2_i32, s->tmp1_i64); + gen_helper_bndck(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); +} + +/* used for LEA and MOV AX, mem */ +static void gen_add_A0_ds_seg(DisasContext *s) +{ + gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override); +} + +/* generate modrm memory load or store of 'reg'. TMP0 is used if reg == + OR_TMP0 */ +static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, + MemOp ot, int reg, int is_store) +{ + int mod, rm; + + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod == 3) { + if (is_store) { + if (reg != OR_TMP0) + gen_op_mov_v_reg(s, ot, s->T0, reg); + gen_op_mov_reg_v(s, ot, rm, s->T0); + } else { + gen_op_mov_v_reg(s, ot, s->T0, rm); + if (reg != OR_TMP0) + gen_op_mov_reg_v(s, ot, reg, s->T0); + } + } else { + gen_lea_modrm(env, s, modrm); + if (is_store) { + if (reg != OR_TMP0) + gen_op_mov_v_reg(s, ot, s->T0, reg); + gen_op_st_v(s, ot, s->T0, s->A0); + } else { + gen_op_ld_v(s, ot, s->T0, s->A0); + if (reg != OR_TMP0) + gen_op_mov_reg_v(s, ot, reg, s->T0); + } + } +} + +static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) +{ + uint32_t ret; + + switch (ot) { + case MO_8: + ret = x86_ldub_code(env, s); + break; + case MO_16: + ret = x86_lduw_code(env, s); + break; + case MO_32: +#ifdef TARGET_X86_64 + case MO_64: +#endif + ret = x86_ldl_code(env, s); + break; + default: + tcg_abort(); + } + return ret; +} + +static inline int insn_const_size(MemOp ot) +{ + if (ot <= MO_32) { + return 1 << ot; + } else { + return 4; + } +} + +static inline bool use_goto_tb(DisasContext *s, target_ulong pc) +{ + return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) || + (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK); +} + +static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + target_ulong pc = s->cs_base + eip; + + if (use_goto_tb(s, pc)) { + /* jump to same page: we can use a direct jump */ + tcg_gen_goto_tb(tcg_ctx, tb_num); + gen_jmp_im(s, eip); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, tb_num); + s->base.is_jmp = DISAS_NORETURN; + } else { + /* jump to another page */ + gen_jmp_im(s, eip); + gen_jr(s, s->tmp0); + } +} + +static inline void gen_jcc(DisasContext *s, int b, + target_ulong val, target_ulong next_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *l1, *l2; + + if (s->jmp_opt) { + l1 = gen_new_label(tcg_ctx); + gen_jcc1(s, b, l1); + + gen_goto_tb(s, 0, next_eip); + + gen_set_label(tcg_ctx, l1); + gen_goto_tb(s, 1, val); + } else { + l1 = gen_new_label(tcg_ctx); + l2 = gen_new_label(tcg_ctx); + gen_jcc1(s, b, l1); + + gen_jmp_im(s, next_eip); + tcg_gen_br(tcg_ctx, l2); + + gen_set_label(tcg_ctx, l1); + gen_jmp_im(s, val); + gen_set_label(tcg_ctx, l2); + gen_eob(s); + } +} + +static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, + int modrm, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + CCPrepare cc; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + + cc = gen_prepare_cc(s, b, s->T1); + if (cc.mask != -1) { + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cc.reg, cc.mask); + cc.reg = t0; + } + if (!cc.use_reg2) { + cc.reg2 = tcg_const_tl(tcg_ctx, cc.imm); + } + + tcg_gen_movcond_tl(tcg_ctx, cc.cond, s->T0, cc.reg, cc.reg2, + s->T0, tcg_ctx->cpu_regs[reg]); + gen_op_mov_reg_v(s, ot, reg, s->T0); + + if (cc.mask != -1) { + tcg_temp_free(tcg_ctx, cc.reg); + } + if (!cc.use_reg2) { + tcg_temp_free(tcg_ctx, cc.reg2); + } +} + +static inline void gen_op_movl_T0_seg(DisasContext *s, int seg_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,segs[seg_reg].selector)); +} + +static inline void gen_op_movl_seg_T0_vm(DisasContext *s, int seg_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,segs[seg_reg].selector)); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_seg_base[seg_reg], s->T0, 4); +} + +/* move T0 to seg_reg and compute if the CPU state may change. Never + call this function with seg_reg == R_CS */ +static void gen_movl_seg_T0(DisasContext *s, int seg_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (s->pe && !s->vm86) { + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_load_seg(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, seg_reg), s->tmp2_i32); + /* abort translation because the addseg value may change or + because ss32 may change. For R_SS, translation must always + stop as a special handling must be done to disable hardware + interrupts for the next instruction */ + if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) { + s->base.is_jmp = DISAS_TOO_MANY; + } + } else { + gen_op_movl_seg_T0_vm(s, seg_reg); + if (seg_reg == R_SS) { + s->base.is_jmp = DISAS_TOO_MANY; + } + } +} + +static inline int svm_is_rep(int prefixes) +{ + return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0); +} + +static inline void +gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, + uint32_t type, uint64_t param) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + /* no SVM activated; fast case */ + if (likely(!(s->flags & HF_GUEST_MASK))) + return; + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_svm_check_intercept_param(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, type), + tcg_const_i64(tcg_ctx, param)); +} + +static inline void +gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) +{ + gen_svm_check_intercept_param(s, pc_start, type, 0); +} + +static inline void gen_stack_update(DisasContext *s, int addend) +{ + gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend); +} + +/* Generate a push. It depends on ss32, addseg and dflag. */ +static void gen_push_v(DisasContext *s, TCGv val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = mo_stacksize(s); + int size = 1 << d_ot; + TCGv new_esp = s->A0; + + tcg_gen_subi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_ESP], size); + + if (!CODE64(s)) { + if (s->addseg) { + new_esp = s->tmp4; + tcg_gen_mov_tl(tcg_ctx, new_esp, s->A0); + } + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + } + + gen_op_st_v(s, d_ot, val, s->A0); + gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp); +} + +/* two step pop is necessary for precise exceptions */ +static MemOp gen_pop_T0(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + MemOp d_ot = mo_pushpop(s, s->dflag); + + gen_lea_v_seg(s, mo_stacksize(s), tcg_ctx->cpu_regs[R_ESP], R_SS, -1); + gen_op_ld_v(s, d_ot, s->T0, s->A0); + + return d_ot; +} + +static inline void gen_pop_update(DisasContext *s, MemOp ot) +{ + gen_stack_update(s, 1 << ot); +} + +static inline void gen_stack_A0(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, tcg_ctx->cpu_regs[R_ESP], R_SS, -1); +} + +static void gen_pusha(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp d_ot = s->dflag; + int size = 1 << d_ot; + int i; + + for (i = 0; i < 8; i++) { + tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_ESP], (i - 8) * size); + gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, tcg_ctx->cpu_regs[7 - i], s->A0); + } + + gen_stack_update(s, -8 * size); +} + +static void gen_popa(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + MemOp s_ot = s->ss32 ? MO_32 : MO_16; + MemOp d_ot = s->dflag; + int size = 1 << d_ot; + int i; + + for (i = 0; i < 8; i++) { + /* ESP is not reloaded */ + if (7 - i == R_ESP) { + continue; + } + tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_ESP], i * size); + gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); + gen_op_ld_v(s, d_ot, s->T0, s->A0); + gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0); + } + + gen_stack_update(s, 8 * size); +} + +static void gen_enter(DisasContext *s, int esp_addend, int level) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16; + int size = 1 << d_ot; + + /* Push BP; compute FrameTemp into T1. */ + tcg_gen_subi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[R_ESP], size); + gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1); + gen_op_st_v(s, d_ot, tcg_ctx->cpu_regs[R_EBP], s->A0); + + level &= 31; + if (level != 0) { + int i; + + /* Copy level-1 pointers from the previous frame. */ + for (i = 1; i < level; ++i) { + tcg_gen_subi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EBP], size * i); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_ld_v(s, d_ot, s->tmp0, s->A0); + + tcg_gen_subi_tl(tcg_ctx, s->A0, s->T1, size * i); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, s->tmp0, s->A0); + } + + /* Push the current FrameTemp as the last level. */ + tcg_gen_subi_tl(tcg_ctx, s->A0, s->T1, size * level); + gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); + gen_op_st_v(s, d_ot, s->T1, s->A0); + } + + /* Copy the FrameTemp value to EBP. */ + gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1); + + /* Compute the final value of ESP. */ + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T1, esp_addend + size * level); + gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); +} + +static void gen_leave(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + MemOp d_ot = mo_pushpop(s, s->dflag); + MemOp a_ot = mo_stacksize(s); + + gen_lea_v_seg(s, a_ot, tcg_ctx->cpu_regs[R_EBP], R_SS, -1); + gen_op_ld_v(s, d_ot, s->T0, s->A0); + + tcg_gen_addi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[R_EBP], 1ULL << d_ot); + + gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0); + gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); +} + +/* Similarly, except that the assumption here is that we don't decode + the instruction at all -- either a missing opcode, an unimplemented + feature, or just a bogus instruction stream. */ +static void gen_unknown_opcode(CPUX86State *env, DisasContext *s) +{ + gen_illegal_opcode(s); +} + +/* an interrupt is different from an exception because of the + privilege checks */ +static void gen_interrupt(DisasContext *s, int intno, + target_ulong cur_eip, target_ulong next_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + gen_helper_raise_interrupt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, intno), + tcg_const_i32(tcg_ctx, next_eip - cur_eip)); + s->base.is_jmp = DISAS_NORETURN; +} + +static void gen_debug(DisasContext *s, target_ulong cur_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); + s->base.is_jmp = DISAS_NORETURN; +} + +static void gen_set_hflag(DisasContext *s, uint32_t mask) +{ + if ((s->flags & mask) == 0) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); + tcg_gen_ori_i32(tcg_ctx, t, t, mask); + tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); + tcg_temp_free_i32(tcg_ctx, t); + s->flags |= mask; + } +} + +static void gen_reset_hflag(DisasContext *s, uint32_t mask) +{ + if (s->flags & mask) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); + tcg_gen_andi_i32(tcg_ctx, t, t, ~mask); + tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUX86State, hflags)); + tcg_temp_free_i32(tcg_ctx, t); + s->flags &= ~mask; + } +} + +/* Clear BND registers during legacy branches. */ +static void gen_bnd_jmp(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + /* Clear the registers only if BND prefix is missing, MPX is enabled, + and if the BNDREGs are known to be in use (non-zero) already. + The helper itself will check BNDPRESERVE at runtime. */ + if ((s->prefix & PREFIX_REPNZ) == 0 + && (s->flags & HF_MPX_EN_MASK) != 0 + && (s->flags & HF_MPX_IU_MASK) != 0) { + gen_helper_bnd_jmp(tcg_ctx, tcg_ctx->cpu_env); + } +} + +/* Generate an end of block. Trace exception is also generated if needed. + If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. + If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of + S->TF. This is used by the syscall/sysret insns. */ +static void +do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_update_cc_op(s); + + /* If several instructions disable interrupts, only the first does it. */ + if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) { + gen_set_hflag(s, HF_INHIBIT_IRQ_MASK); + } else { + gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK); + } + + if (s->base.tb->flags & HF_RF_MASK) { + gen_helper_reset_rf(tcg_ctx, tcg_ctx->cpu_env); + } + if (s->base.singlestep_enabled) { + gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); + } else if (recheck_tf) { + gen_helper_rechecking_single_step(tcg_ctx, tcg_ctx->cpu_env); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } else if (s->tf) { + gen_helper_single_step(tcg_ctx, tcg_ctx->cpu_env); + } else if (jr) { + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + } else { + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } + s->base.is_jmp = DISAS_NORETURN; +} + +static inline void +gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf) +{ + do_gen_eob_worker(s, inhibit, recheck_tf, false); +} + +/* End of block. + If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */ +static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit) +{ + gen_eob_worker(s, inhibit, false); +} + +/* End of block, resetting the inhibit irq flag. */ +static void gen_eob(DisasContext *s) +{ + gen_eob_worker(s, false, false); +} + +/* Jump to register */ +static void gen_jr(DisasContext *s, TCGv dest) +{ + do_gen_eob_worker(s, false, false, true); +} + +/* generate a jump to eip. No segment change must happen before as a + direct call to the next block may occur */ +static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) +{ + gen_update_cc_op(s); + set_cc_op(s, CC_OP_DYNAMIC); + if (s->jmp_opt) { + gen_goto_tb(s, tb_num, eip); + } else { + gen_jmp_im(s, eip); + gen_eob(s); + } +} + +static void gen_jmp(DisasContext *s, target_ulong eip) +{ + gen_jmp_tb(s, eip, 0); +} + +static inline void gen_ldq_env_A0(DisasContext *s, int offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset); +} + +static inline void gen_stq_env_A0(DisasContext *s, int offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset); + tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); +} + +static inline void gen_ldo_env_A0(DisasContext *s, int offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mem_index = s->mem_index; + tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, mem_index, MO_LEQ); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_addi_tl(tcg_ctx, s->tmp0, s->A0, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); +} + +static inline void gen_sto_env_A0(DisasContext *s, int offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mem_index = s->mem_index; + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, mem_index, MO_LEQ); + tcg_gen_addi_tl(tcg_ctx, s->tmp0, s->A0, 8); + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->tmp0, mem_index, MO_LEQ); +} + +static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1))); +} + +static inline void gen_op_movq(DisasContext *s, int d_offset, int s_offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s_offset); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset); +} + +static inline void gen_op_movl(DisasContext *s, int d_offset, int s_offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s_offset); + tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, d_offset); +} + +static inline void gen_op_movq_env_0(DisasContext *s, int d_offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_movi_i64(tcg_ctx, s->tmp1_i64, 0); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, d_offset); +} + +typedef void (*SSEFunc_i_ep)(TCGContext *s, TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); +typedef void (*SSEFunc_l_ep)(TCGContext *s, TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); +typedef void (*SSEFunc_0_epi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val); +typedef void (*SSEFunc_0_epl)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val); +typedef void (*SSEFunc_0_epp)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); +typedef void (*SSEFunc_0_eppi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_i32 val); +typedef void (*SSEFunc_0_ppi)(TCGContext *s, TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); +typedef void (*SSEFunc_0_eppt)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv val); + +#define SSE_SPECIAL ((void *)1) +#define SSE_DUMMY ((void *)2) + +#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } +#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ + gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } + +static const SSEFunc_0_epp sse_op_table1[256][4] = { + /* 3DNow! extensions */ + [0x0e] = { SSE_DUMMY }, /* femms */ + [0x0f] = { SSE_DUMMY }, /* pf... */ + /* pure SSE operations */ + [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ + [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ + [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ + [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ + [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm }, + [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm }, + [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ + [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ + + [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ + [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ + [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ + [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */ + [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ + [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ + [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd }, + [0x2f] = { gen_helper_comiss, gen_helper_comisd }, + [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ + [0x51] = SSE_FOP(sqrt), + [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL }, + [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL }, + [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ + [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ + [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ + [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ + [0x58] = SSE_FOP(add), + [0x59] = SSE_FOP(mul), + [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps, + gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, + [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq }, + [0x5c] = SSE_FOP(sub), + [0x5d] = SSE_FOP(min), + [0x5e] = SSE_FOP(div), + [0x5f] = SSE_FOP(max), + + [0xc2] = SSE_FOP(cmpeq), + [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps, + (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */ + + /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */ + [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, + [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, + + /* MMX ops and their SSE extensions */ + [0x60] = MMX_OP2(punpcklbw), + [0x61] = MMX_OP2(punpcklwd), + [0x62] = MMX_OP2(punpckldq), + [0x63] = MMX_OP2(packsswb), + [0x64] = MMX_OP2(pcmpgtb), + [0x65] = MMX_OP2(pcmpgtw), + [0x66] = MMX_OP2(pcmpgtl), + [0x67] = MMX_OP2(packuswb), + [0x68] = MMX_OP2(punpckhbw), + [0x69] = MMX_OP2(punpckhwd), + [0x6a] = MMX_OP2(punpckhdq), + [0x6b] = MMX_OP2(packssdw), + [0x6c] = { NULL, gen_helper_punpcklqdq_xmm }, + [0x6d] = { NULL, gen_helper_punpckhqdq_xmm }, + [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ + [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ + [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx, + (SSEFunc_0_epp)gen_helper_pshufd_xmm, + (SSEFunc_0_epp)gen_helper_pshufhw_xmm, + (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */ + [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ + [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ + [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ + [0x74] = MMX_OP2(pcmpeqb), + [0x75] = MMX_OP2(pcmpeqw), + [0x76] = MMX_OP2(pcmpeql), + [0x77] = { SSE_DUMMY }, /* emms */ + [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */ + [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r }, + [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, + [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, + [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ + [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ + [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ + [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ + [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps }, + [0xd1] = MMX_OP2(psrlw), + [0xd2] = MMX_OP2(psrld), + [0xd3] = MMX_OP2(psrlq), + [0xd4] = MMX_OP2(paddq), + [0xd5] = MMX_OP2(pmullw), + [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, + [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */ + [0xd8] = MMX_OP2(psubusb), + [0xd9] = MMX_OP2(psubusw), + [0xda] = MMX_OP2(pminub), + [0xdb] = MMX_OP2(pand), + [0xdc] = MMX_OP2(paddusb), + [0xdd] = MMX_OP2(paddusw), + [0xde] = MMX_OP2(pmaxub), + [0xdf] = MMX_OP2(pandn), + [0xe0] = MMX_OP2(pavgb), + [0xe1] = MMX_OP2(psraw), + [0xe2] = MMX_OP2(psrad), + [0xe3] = MMX_OP2(pavgw), + [0xe4] = MMX_OP2(pmulhuw), + [0xe5] = MMX_OP2(pmulhw), + [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, + [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ + [0xe8] = MMX_OP2(psubsb), + [0xe9] = MMX_OP2(psubsw), + [0xea] = MMX_OP2(pminsw), + [0xeb] = MMX_OP2(por), + [0xec] = MMX_OP2(paddsb), + [0xed] = MMX_OP2(paddsw), + [0xee] = MMX_OP2(pmaxsw), + [0xef] = MMX_OP2(pxor), + [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */ + [0xf1] = MMX_OP2(psllw), + [0xf2] = MMX_OP2(pslld), + [0xf3] = MMX_OP2(psllq), + [0xf4] = MMX_OP2(pmuludq), + [0xf5] = MMX_OP2(pmaddwd), + [0xf6] = MMX_OP2(psadbw), + [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx, + (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */ + [0xf8] = MMX_OP2(psubb), + [0xf9] = MMX_OP2(psubw), + [0xfa] = MMX_OP2(psubl), + [0xfb] = MMX_OP2(psubq), + [0xfc] = MMX_OP2(paddb), + [0xfd] = MMX_OP2(paddw), + [0xfe] = MMX_OP2(paddl), +}; + +static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = { + [0 + 2] = MMX_OP2(psrlw), + [0 + 4] = MMX_OP2(psraw), + [0 + 6] = MMX_OP2(psllw), + [8 + 2] = MMX_OP2(psrld), + [8 + 4] = MMX_OP2(psrad), + [8 + 6] = MMX_OP2(pslld), + [16 + 2] = MMX_OP2(psrlq), + [16 + 3] = { NULL, gen_helper_psrldq_xmm }, + [16 + 6] = MMX_OP2(psllq), + [16 + 7] = { NULL, gen_helper_pslldq_xmm }, +}; + +static const SSEFunc_0_epi sse_op_table3ai[] = { + gen_helper_cvtsi2ss, + gen_helper_cvtsi2sd +}; + +#ifdef TARGET_X86_64 +static const SSEFunc_0_epl sse_op_table3aq[] = { + gen_helper_cvtsq2ss, + gen_helper_cvtsq2sd +}; +#endif + +static const SSEFunc_i_ep sse_op_table3bi[] = { + gen_helper_cvttss2si, + gen_helper_cvtss2si, + gen_helper_cvttsd2si, + gen_helper_cvtsd2si +}; + +#ifdef TARGET_X86_64 +static const SSEFunc_l_ep sse_op_table3bq[] = { + gen_helper_cvttss2sq, + gen_helper_cvtss2sq, + gen_helper_cvttsd2sq, + gen_helper_cvtsd2sq +}; +#endif + +static const SSEFunc_0_epp sse_op_table4[8][4] = { + SSE_FOP(cmpeq), + SSE_FOP(cmplt), + SSE_FOP(cmple), + SSE_FOP(cmpunord), + SSE_FOP(cmpneq), + SSE_FOP(cmpnlt), + SSE_FOP(cmpnle), + SSE_FOP(cmpord), +}; + +static const SSEFunc_0_epp sse_op_table5[256] = { + [0x0c] = gen_helper_pi2fw, + [0x0d] = gen_helper_pi2fd, + [0x1c] = gen_helper_pf2iw, + [0x1d] = gen_helper_pf2id, + [0x8a] = gen_helper_pfnacc, + [0x8e] = gen_helper_pfpnacc, + [0x90] = gen_helper_pfcmpge, + [0x94] = gen_helper_pfmin, + [0x96] = gen_helper_pfrcp, + [0x97] = gen_helper_pfrsqrt, + [0x9a] = gen_helper_pfsub, + [0x9e] = gen_helper_pfadd, + [0xa0] = gen_helper_pfcmpgt, + [0xa4] = gen_helper_pfmax, + [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ + [0xa7] = gen_helper_movq, /* pfrsqit1 */ + [0xaa] = gen_helper_pfsubr, + [0xae] = gen_helper_pfacc, + [0xb0] = gen_helper_pfcmpeq, + [0xb4] = gen_helper_pfmul, + [0xb6] = gen_helper_movq, /* pfrcpit2 */ + [0xb7] = gen_helper_pmulhrw_mmx, + [0xbb] = gen_helper_pswapd, + [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ +}; + +struct SSEOpHelper_epp { + SSEFunc_0_epp op[2]; + uint32_t ext_mask; +}; + +struct SSEOpHelper_eppi { + SSEFunc_0_eppi op[2]; + uint32_t ext_mask; +}; + +#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } +#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } +#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } +#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } +#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \ + CPUID_EXT_PCLMULQDQ } +#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES } + +static const struct SSEOpHelper_epp sse_op_table6[256] = { + [0x00] = SSSE3_OP(pshufb), + [0x01] = SSSE3_OP(phaddw), + [0x02] = SSSE3_OP(phaddd), + [0x03] = SSSE3_OP(phaddsw), + [0x04] = SSSE3_OP(pmaddubsw), + [0x05] = SSSE3_OP(phsubw), + [0x06] = SSSE3_OP(phsubd), + [0x07] = SSSE3_OP(phsubsw), + [0x08] = SSSE3_OP(psignb), + [0x09] = SSSE3_OP(psignw), + [0x0a] = SSSE3_OP(psignd), + [0x0b] = SSSE3_OP(pmulhrsw), + [0x10] = SSE41_OP(pblendvb), + [0x14] = SSE41_OP(blendvps), + [0x15] = SSE41_OP(blendvpd), + [0x17] = SSE41_OP(ptest), + [0x1c] = SSSE3_OP(pabsb), + [0x1d] = SSSE3_OP(pabsw), + [0x1e] = SSSE3_OP(pabsd), + [0x20] = SSE41_OP(pmovsxbw), + [0x21] = SSE41_OP(pmovsxbd), + [0x22] = SSE41_OP(pmovsxbq), + [0x23] = SSE41_OP(pmovsxwd), + [0x24] = SSE41_OP(pmovsxwq), + [0x25] = SSE41_OP(pmovsxdq), + [0x28] = SSE41_OP(pmuldq), + [0x29] = SSE41_OP(pcmpeqq), + [0x2a] = SSE41_SPECIAL, /* movntqda */ + [0x2b] = SSE41_OP(packusdw), + [0x30] = SSE41_OP(pmovzxbw), + [0x31] = SSE41_OP(pmovzxbd), + [0x32] = SSE41_OP(pmovzxbq), + [0x33] = SSE41_OP(pmovzxwd), + [0x34] = SSE41_OP(pmovzxwq), + [0x35] = SSE41_OP(pmovzxdq), + [0x37] = SSE42_OP(pcmpgtq), + [0x38] = SSE41_OP(pminsb), + [0x39] = SSE41_OP(pminsd), + [0x3a] = SSE41_OP(pminuw), + [0x3b] = SSE41_OP(pminud), + [0x3c] = SSE41_OP(pmaxsb), + [0x3d] = SSE41_OP(pmaxsd), + [0x3e] = SSE41_OP(pmaxuw), + [0x3f] = SSE41_OP(pmaxud), + [0x40] = SSE41_OP(pmulld), + [0x41] = SSE41_OP(phminposuw), + [0xdb] = AESNI_OP(aesimc), + [0xdc] = AESNI_OP(aesenc), + [0xdd] = AESNI_OP(aesenclast), + [0xde] = AESNI_OP(aesdec), + [0xdf] = AESNI_OP(aesdeclast), +}; + +static const struct SSEOpHelper_eppi sse_op_table7[256] = { + [0x08] = SSE41_OP(roundps), + [0x09] = SSE41_OP(roundpd), + [0x0a] = SSE41_OP(roundss), + [0x0b] = SSE41_OP(roundsd), + [0x0c] = SSE41_OP(blendps), + [0x0d] = SSE41_OP(blendpd), + [0x0e] = SSE41_OP(pblendw), + [0x0f] = SSSE3_OP(palignr), + [0x14] = SSE41_SPECIAL, /* pextrb */ + [0x15] = SSE41_SPECIAL, /* pextrw */ + [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */ + [0x17] = SSE41_SPECIAL, /* extractps */ + [0x20] = SSE41_SPECIAL, /* pinsrb */ + [0x21] = SSE41_SPECIAL, /* insertps */ + [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */ + [0x40] = SSE41_OP(dpps), + [0x41] = SSE41_OP(dppd), + [0x42] = SSE41_OP(mpsadbw), + [0x44] = PCLMULQDQ_OP(pclmulqdq), + [0x60] = SSE42_OP(pcmpestrm), + [0x61] = SSE42_OP(pcmpestri), + [0x62] = SSE42_OP(pcmpistrm), + [0x63] = SSE42_OP(pcmpistri), + [0xdf] = AESNI_OP(aeskeygenassist), +}; + +static void gen_sse(CPUX86State *env, DisasContext *s, int b, + target_ulong pc_start, int rex_r) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + int b1, op1_offset, op2_offset, is_xmm, val; + int modrm, mod, rm, reg; + SSEFunc_0_epp sse_fn_epp; + SSEFunc_0_eppi sse_fn_eppi; + SSEFunc_0_ppi sse_fn_ppi; + SSEFunc_0_eppt sse_fn_eppt; + MemOp ot; + + b &= 0xff; + if (s->prefix & PREFIX_DATA) + b1 = 1; + else if (s->prefix & PREFIX_REPZ) + b1 = 2; + else if (s->prefix & PREFIX_REPNZ) + b1 = 3; + else + b1 = 0; + sse_fn_epp = sse_op_table1[b][b1]; + if (!sse_fn_epp) { + goto unknown_op; + } + if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { + is_xmm = 1; + } else { + if (b1 == 0) { + /* MMX case */ + is_xmm = 0; + } else { + is_xmm = 1; + } + } + /* simple MMX/SSE operation */ + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + return; + } + if (s->flags & HF_EM_MASK) { + illegal_op: + gen_illegal_opcode(s); + return; + } + if (is_xmm + && !(s->flags & HF_OSFXSR_MASK) + && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) { + goto unknown_op; + } + if (b == 0x0e) { + if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) { + /* If we were fully decoding this we might use illegal_op. */ + goto unknown_op; + } + /* femms */ + gen_helper_emms(tcg_ctx, tcg_ctx->cpu_env); + return; + } + if (b == 0x77) { + /* emms */ + gen_helper_emms(tcg_ctx, tcg_ctx->cpu_env); + return; + } + /* prepare MMX state (XXX: optimize by storing fptt and fptags in + the static cpu state) */ + if (!is_xmm) { + gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); + } + + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7); + if (is_xmm) + reg |= rex_r; + mod = (modrm >> 6) & 3; + if (sse_fn_epp == SSE_SPECIAL) { + b |= (b1 << 8); + switch(b) { + case 0x0e7: /* movntq */ + if (mod == 3) { + goto illegal_op; + } + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); + break; + case 0x1e7: /* movntdq */ + case 0x02b: /* movntps */ + case 0x12b: /* movntps */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + break; + case 0x3f0: /* lddqu */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + break; + case 0x22b: /* movntss */ + case 0x32b: /* movntsd */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + if (b1 & 1) { + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + } else { + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(0))); + gen_op_st_v(s, MO_32, s->T0, s->A0); + } + break; + case 0x6e: /* movd mm, ea */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); + tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, fpregs[reg].mmx)); + } else +#endif + { + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx)); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_movl_mm_T0_mmx(tcg_ctx, s->ptr0, s->tmp2_i32); + } + break; + case 0x16e: /* movd xmm, ea */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[reg])); + gen_helper_movq_mm_T0_xmm(tcg_ctx, s->ptr0, s->T0); + } else +#endif + { + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[reg])); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_movl_mm_T0_xmm(tcg_ctx, s->ptr0, s->tmp2_i32); + } + break; + case 0x6f: /* movq mm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); + } else { + rm = (modrm & 7); + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, + offsetof(CPUX86State,fpregs[rm].mmx)); + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx)); + } + break; + case 0x010: /* movups */ + case 0x110: /* movupd */ + case 0x028: /* movaps */ + case 0x128: /* movapd */ + case 0x16f: /* movdqa xmm, ea */ + case 0x26f: /* movdqu xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movo(s, offsetof(CPUX86State, xmm_regs[reg]), + offsetof(CPUX86State,xmm_regs[rm])); + } + break; + case 0x210: /* movss xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, MO_32, s->T0, s->A0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1))); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); + } + break; + case 0x310: /* movsd xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2))); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); + } + break; + case 0x012: /* movlps */ + case 0x112: /* movlpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + } else { + /* movhlps */ + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1))); + } + break; + case 0x212: /* movsldup */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0))); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2))); + } + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2))); + break; + case 0x312: /* movddup */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); + } + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); + break; + case 0x016: /* movhps */ + case 0x116: /* movhpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(1))); + } else { + /* movlhps */ + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); + } + break; + case 0x216: /* movshdup */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1))); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3))); + } + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1))); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3))); + break; + case 0x178: + case 0x378: + { + int bit_index, field_length; + + if (b1 == 1 && reg != 0) + goto illegal_op; + field_length = x86_ldub_code(env, s) & 0x3F; + bit_index = x86_ldub_code(env, s) & 0x3F; + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[reg])); + if (b1 == 1) + gen_helper_extrq_i(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, + tcg_const_i32(tcg_ctx, bit_index), + tcg_const_i32(tcg_ctx, field_length)); + else + gen_helper_insertq_i(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, + tcg_const_i32(tcg_ctx, bit_index), + tcg_const_i32(tcg_ctx, field_length)); + } + break; + case 0x7e: /* movd ea, mm */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + tcg_gen_ld_i64(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx)); + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); + } else +#endif + { + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); + } + break; + case 0x17e: /* movd ea, xmm */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + tcg_gen_ld_i64(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); + } else +#endif + { + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); + } + break; + case 0x27e: /* movq xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); + } + gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); + break; + case 0x7f: /* movq ea, mm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); + } else { + rm = (modrm & 7); + gen_op_movq(s, offsetof(CPUX86State, fpregs[rm].mmx), + offsetof(CPUX86State,fpregs[reg].mmx)); + } + break; + case 0x011: /* movups */ + case 0x111: /* movupd */ + case 0x029: /* movaps */ + case 0x129: /* movapd */ + case 0x17f: /* movdqa ea, xmm */ + case 0x27f: /* movdqu ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movo(s, offsetof(CPUX86State, xmm_regs[rm]), + offsetof(CPUX86State,xmm_regs[reg])); + } + break; + case 0x211: /* movss ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0))); + gen_op_st_v(s, MO_32, s->T0, s->A0); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_L(0)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0))); + } + break; + case 0x311: /* movsd ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); + } + break; + case 0x013: /* movlps */ + case 0x113: /* movlpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + } else { + goto illegal_op; + } + break; + case 0x017: /* movhps */ + case 0x117: /* movhpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(1))); + } else { + goto illegal_op; + } + break; + case 0x71: /* shift mm, im */ + case 0x72: + case 0x73: + case 0x171: /* shift xmm, im */ + case 0x172: + case 0x173: + if (b1 >= 2) { + goto unknown_op; + } + val = x86_ldub_code(env, s); + if (is_xmm) { + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_t0.ZMM_L(0))); + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_t0.ZMM_L(1))); + op1_offset = offsetof(CPUX86State,xmm_t0); + } else { + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, mmx_t0.MMX_L(0))); + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, mmx_t0.MMX_L(1))); + op1_offset = offsetof(CPUX86State,mmx_t0); + } + sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + + (((modrm >> 3)) & 7)][b1]; + if (!sse_fn_epp) { + goto unknown_op; + } + if (is_xmm) { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op2_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op1_offset); + sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + case 0x050: /* movmskps */ + rm = (modrm & 7) | REX_B(s); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[rm])); + gen_helper_movmskps(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); + break; + case 0x150: /* movmskpd */ + rm = (modrm & 7) | REX_B(s); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[rm])); + gen_helper_movmskpd(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); + break; + case 0x02a: /* cvtpi2ps */ + case 0x12a: /* cvtpi2pd */ + gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_ldq_env_A0(s, op2_offset); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + switch(b >> 8) { + case 0x0: + gen_helper_cvtpi2ps(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + default: + case 0x1: + gen_helper_cvtpi2pd(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + } + break; + case 0x22a: /* cvtsi2ss */ + case 0x32a: /* cvtsi2sd */ + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + if (ot == MO_32) { + SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + sse_fn_epi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->tmp2_i32); + } else { +#ifdef TARGET_X86_64 + SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; + sse_fn_epl(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->T0); +#else + goto illegal_op; +#endif + } + break; + case 0x02c: /* cvttps2pi */ + case 0x12c: /* cvttpd2pi */ + case 0x02d: /* cvtps2pi */ + case 0x12d: /* cvtpd2pi */ + gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,xmm_t0); + gen_ldo_env_A0(s, op2_offset); + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + switch(b) { + case 0x02c: + gen_helper_cvttps2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + case 0x12c: + gen_helper_cvttpd2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + case 0x02d: + gen_helper_cvtps2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + case 0x12d: + gen_helper_cvtpd2pi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + } + break; + case 0x22c: /* cvttss2si */ + case 0x32c: /* cvttsd2si */ + case 0x22d: /* cvtss2si */ + case 0x32d: /* cvtsd2si */ + ot = mo_64_32(s->dflag); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + if ((b >> 8) & 1) { + gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0))); + } else { + gen_op_ld_v(s, MO_32, s->T0, s->A0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_t0.ZMM_L(0))); + } + op2_offset = offsetof(CPUX86State,xmm_t0); + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op2_offset); + if (ot == MO_32) { + SSEFunc_i_ep sse_fn_i_ep = + sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; + sse_fn_i_ep(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); + tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); + } else { +#ifdef TARGET_X86_64 + SSEFunc_l_ep sse_fn_l_ep = + sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; + sse_fn_l_ep(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->ptr0); +#else + goto illegal_op; +#endif + } + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + case 0xc4: /* pinsrw */ + case 0x1c4: + s->rip_offset = 1; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + val = x86_ldub_code(env, s); + if (b1) { + val &= 7; + tcg_gen_st16_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val))); + } else { + val &= 3; + tcg_gen_st16_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); + } + break; + case 0xc5: /* pextrw */ + case 0x1c5: + if (mod != 3) + goto illegal_op; + ot = mo_64_32(s->dflag); + val = x86_ldub_code(env, s); + if (b1) { + val &= 7; + rm = (modrm & 7) | REX_B(s); + tcg_gen_ld16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val))); + } else { + val &= 3; + rm = (modrm & 7); + tcg_gen_ld16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); + } + reg = ((modrm >> 3) & 7) | rex_r; + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + case 0x1d6: /* movq ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)), + offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0))); + gen_op_movq_env_0(s, + offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(1))); + } + break; + case 0x2d6: /* movq2dq */ + gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); + rm = (modrm & 7); + gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)), + offsetof(CPUX86State,fpregs[rm].mmx)); + gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1))); + break; + case 0x3d6: /* movdq2q */ + gen_helper_enter_mmx(tcg_ctx, tcg_ctx->cpu_env); + rm = (modrm & 7) | REX_B(s); + gen_op_movq(s, offsetof(CPUX86State, fpregs[reg & 7].mmx), + offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0))); + break; + case 0xd7: /* pmovmskb */ + case 0x1d7: + if (mod != 3) + goto illegal_op; + if (b1) { + rm = (modrm & 7) | REX_B(s); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State, xmm_regs[rm])); + gen_helper_pmovmskb_xmm(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); + } else { + rm = (modrm & 7); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, + offsetof(CPUX86State, fpregs[rm].mmx)); + gen_helper_pmovmskb_mmx(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, s->ptr0); + } + reg = ((modrm >> 3) & 7) | rex_r; + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); + break; + + case 0x138: + case 0x038: + b = modrm; + if ((b & 0xf0) == 0xf0) { + goto do_0f_38_fx; + } + modrm = x86_ldub_code(env, s); + rm = modrm & 7; + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (b1 >= 2) { + goto unknown_op; + } + + sse_fn_epp = sse_op_table6[b].op[b1]; + if (!sse_fn_epp) { + goto unknown_op; + } + if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) + goto illegal_op; + + if (b1) { + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); + } else { + op2_offset = offsetof(CPUX86State,xmm_t0); + gen_lea_modrm(env, s, modrm); + switch (b) { + case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ + case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ + case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ + gen_ldq_env_A0(s, op2_offset + + offsetof(ZMMReg, ZMM_Q(0))); + break; + case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ + case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, op2_offset + + offsetof(ZMMReg, ZMM_L(0))); + break; + case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ + tcg_gen_qemu_ld_tl(tcg_ctx, s->tmp0, s->A0, + s->mem_index, MO_LEUW); + tcg_gen_st16_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_env, op2_offset + + offsetof(ZMMReg, ZMM_W(0))); + break; + case 0x2a: /* movntqda */ + gen_ldo_env_A0(s, op1_offset); + return; + default: + gen_ldo_env_A0(s, op2_offset); + } + } + } else { + op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } else { + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, op2_offset); + } + } + if (sse_fn_epp == SSE_SPECIAL) { + goto unknown_op; + } + + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + + if (b == 0x17) { + set_cc_op(s, CC_OP_EFLAGS); + } + break; + + case 0x238: + case 0x338: + do_0f_38_fx: + /* Various integer extensions at 0f 38 f[0-f]. */ + b = modrm | (b1 << 8); + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + + switch (b) { + case 0x3f0: /* crc32 Gd,Eb */ + case 0x3f1: /* crc32 Gd,Ey */ + do_crc32: + if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) { + goto illegal_op; + } + if ((b & 0xff) == 0xf0) { + ot = MO_8; + } else if (s->dflag != MO_64) { + ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); + } else { + ot = MO_64; + } + + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[reg]); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_helper_crc32(tcg_ctx, s->T0, s->tmp2_i32, + s->T0, tcg_const_i32(tcg_ctx, 8 << ot)); + + ot = mo_64_32(s->dflag); + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + + case 0x1f0: /* crc32 or movbe */ + case 0x1f1: + /* For these insns, the f3 prefix is supposed to have priority + over the 66 prefix, but that's not what we implement above + setting b1. */ + if (s->prefix & PREFIX_REPNZ) { + goto do_crc32; + } + /* FALLTHRU */ + case 0x0f0: /* movbe Gy,My */ + case 0x0f1: /* movbe My,Gy */ + if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) { + goto illegal_op; + } + if (s->dflag != MO_64) { + ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); + } else { + ot = MO_64; + } + + gen_lea_modrm(env, s, modrm); + if ((b & 1) == 0) { + tcg_gen_qemu_ld_tl(tcg_ctx, s->T0, s->A0, + s->mem_index, ot | MO_BE); + gen_op_mov_reg_v(s, ot, reg, s->T0); + } else { + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->A0, + s->mem_index, ot | MO_BE); + } + break; + + case 0x0f2: /* andn Gy, By, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + tcg_gen_andc_tl(tcg_ctx, s->T0, s->T0, tcg_ctx->cpu_regs[s->vex_v]); + gen_op_mov_reg_v(s, ot, reg, s->T0); + gen_op_update1_cc(s); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + + case 0x0f7: /* bextr Gy, Ey, By */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + { + TCGv bound, zero; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + /* Extract START, and shift the operand. + Shifts larger than operand size get zeros. */ + tcg_gen_ext8u_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[s->vex_v]); + tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->A0); + + bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); + zero = tcg_const_tl(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, s->T0, s->A0, bound, + s->T0, zero); + tcg_temp_free(tcg_ctx, zero); + + /* Extract the LEN into a mask. Lengths larger than + operand size get all ones. */ + tcg_gen_extract_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[s->vex_v], 8, 8); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, s->A0, s->A0, bound, + s->A0, bound); + tcg_temp_free(tcg_ctx, bound); + tcg_gen_movi_tl(tcg_ctx, s->T1, 1); + tcg_gen_shl_tl(tcg_ctx, s->T1, s->T1, s->A0); + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T1, 1); + tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); + + gen_op_mov_reg_v(s, ot, reg, s->T0); + gen_op_update1_cc(s); + set_cc_op(s, CC_OP_LOGICB + ot); + } + break; + + case 0x0f5: /* bzhi Gy, Ey, By */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + tcg_gen_ext8u_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); + { + TCGv bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); + /* Note that since we're using BMILG (in order to get O + cleared) we need to store the inverse into C. */ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_cc_src, + s->T1, bound); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, s->T1, s->T1, + bound, bound, s->T1); + tcg_temp_free(tcg_ctx, bound); + } + tcg_gen_movi_tl(tcg_ctx, s->A0, -1); + tcg_gen_shl_tl(tcg_ctx, s->A0, s->A0, s->T1); + tcg_gen_andc_tl(tcg_ctx, s->T0, s->T0, s->A0); + gen_op_mov_reg_v(s, ot, reg, s->T0); + gen_op_update1_cc(s); + set_cc_op(s, CC_OP_BMILGB + ot); + break; + + case 0x3f6: /* mulx By, Gy, rdx, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + switch (ot) { + default: + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, tcg_ctx->cpu_regs[R_EDX]); + tcg_gen_mulu2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[s->vex_v], s->tmp2_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp3_i32); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_mulu2_i64(tcg_ctx, s->T0, s->T1, + s->T0, tcg_ctx->cpu_regs[R_EDX]); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_regs[s->vex_v], s->T0); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T1); + break; +#endif + } + break; + + case 0x3f5: /* pdep Gy, By, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + /* Note that by zero-extending the mask operand, we + automatically handle zero-extending the result. */ + if (ot == MO_64) { + tcg_gen_mov_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); + } else { + tcg_gen_ext32u_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); + } + gen_helper_pdep(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T0, s->T1); + break; + + case 0x2f5: /* pext Gy, By, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + /* Note that by zero-extending the mask operand, we + automatically handle zero-extending the result. */ + if (ot == MO_64) { + tcg_gen_mov_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); + } else { + tcg_gen_ext32u_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v]); + } + gen_helper_pext(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T0, s->T1); + break; + + case 0x1f6: /* adcx Gy, Ey */ + case 0x2f6: /* adox Gy, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) { + goto illegal_op; + } else { + TCGv carry_in, carry_out, zero; + int end_op; + + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + + /* Re-use the carry-out from a previous round. */ + carry_in = NULL; + carry_out = (b == 0x1f6 ? tcg_ctx->cpu_cc_dst : tcg_ctx->cpu_cc_src2); + switch (s->cc_op) { + case CC_OP_ADCX: + if (b == 0x1f6) { + carry_in = tcg_ctx->cpu_cc_dst; + end_op = CC_OP_ADCX; + } else { + end_op = CC_OP_ADCOX; + } + break; + case CC_OP_ADOX: + if (b == 0x1f6) { + end_op = CC_OP_ADCOX; + } else { + carry_in = tcg_ctx->cpu_cc_src2; + end_op = CC_OP_ADOX; + } + break; + case CC_OP_ADCOX: + end_op = CC_OP_ADCOX; + carry_in = carry_out; + break; + default: + end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX); + break; + } + /* If we can't reuse carry-out, get it out of EFLAGS. */ + if (!carry_in) { + if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { + gen_compute_eflags(s); + } + carry_in = s->tmp0; + tcg_gen_extract_tl(tcg_ctx, carry_in, tcg_ctx->cpu_cc_src, + ctz32(b == 0x1f6 ? CC_C : CC_O), 1); + } + + switch (ot) { +#ifdef TARGET_X86_64 + case MO_32: + /* If we know TL is 64-bit, and we want a 32-bit + result, just do everything in 64-bit arithmetic. */ + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], tcg_ctx->cpu_regs[reg]); + tcg_gen_ext32u_i64(tcg_ctx, s->T0, s->T0); + tcg_gen_add_i64(tcg_ctx, s->T0, s->T0, tcg_ctx->cpu_regs[reg]); + tcg_gen_add_i64(tcg_ctx, s->T0, s->T0, carry_in); + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T0); + tcg_gen_shri_i64(tcg_ctx, carry_out, s->T0, 32); + break; +#endif + default: + /* Otherwise compute the carry-out in two steps. */ + zero = tcg_const_tl(tcg_ctx, 0); + tcg_gen_add2_tl(tcg_ctx, s->T0, carry_out, + s->T0, zero, + carry_in, zero); + tcg_gen_add2_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], carry_out, + tcg_ctx->cpu_regs[reg], carry_out, + s->T0, zero); + tcg_temp_free(tcg_ctx, zero); + break; + } + set_cc_op(s, end_op); + } + break; + + case 0x1f7: /* shlx Gy, Ey, By */ + case 0x2f7: /* sarx Gy, Ey, By */ + case 0x3f7: /* shrx Gy, Ey, By */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + if (ot == MO_64) { + tcg_gen_andi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v], 63); + } else { + tcg_gen_andi_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[s->vex_v], 31); + } + if (b == 0x1f7) { + tcg_gen_shl_tl(tcg_ctx, s->T0, s->T0, s->T1); + } else if (b == 0x2f7) { + if (ot != MO_64) { + tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); + } + tcg_gen_sar_tl(tcg_ctx, s->T0, s->T0, s->T1); + } else { + if (ot != MO_64) { + tcg_gen_ext32u_tl(tcg_ctx, s->T0, s->T0); + } + tcg_gen_shr_tl(tcg_ctx, s->T0, s->T0, s->T1); + } + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + + case 0x0f3: + case 0x1f3: + case 0x2f3: + case 0x3f3: /* Group 17 */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); + switch (reg & 7) { + case 1: /* blsr By,Ey */ + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T0, 1); + tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); + break; + case 2: /* blsmsk By,Ey */ + tcg_gen_subi_tl(tcg_ctx, s->T1, s->T0, 1); + tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, s->T1); + break; + case 3: /* blsi By, Ey */ + tcg_gen_neg_tl(tcg_ctx, s->T1, s->T0); + tcg_gen_and_tl(tcg_ctx, s->T0, s->T0, s->T1); + break; + default: + goto unknown_op; + } + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + gen_op_mov_reg_v(s, ot, s->vex_v, s->T0); + set_cc_op(s, CC_OP_BMILGB + ot); + break; + + default: + goto unknown_op; + } + break; + + case 0x03a: + case 0x13a: + b = modrm; + modrm = x86_ldub_code(env, s); + rm = modrm & 7; + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (b1 >= 2) { + goto unknown_op; + } + + sse_fn_eppi = sse_op_table7[b].op[b1]; + if (!sse_fn_eppi) { + goto unknown_op; + } + if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) + goto illegal_op; + + s->rip_offset = 1; + + if (sse_fn_eppi == SSE_SPECIAL) { + ot = mo_64_32(s->dflag); + rm = (modrm & 7) | REX_B(s); + if (mod != 3) + gen_lea_modrm(env, s, modrm); + reg = ((modrm >> 3) & 7) | rex_r; + val = x86_ldub_code(env, s); + switch (b) { + case 0x14: /* pextrb */ + tcg_gen_ld8u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_B(val & 15))); + if (mod == 3) { + gen_op_mov_reg_v(s, ot, rm, s->T0); + } else { + tcg_gen_qemu_st_tl(tcg_ctx, s->T0, s->A0, + s->mem_index, MO_UB); + } + break; + case 0x15: /* pextrw */ + tcg_gen_ld16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_W(val & 7))); + if (mod == 3) { + gen_op_mov_reg_v(s, ot, rm, s->T0); + } else { + tcg_gen_qemu_st_tl(tcg_ctx, s->T0, s->A0, + s->mem_index, MO_LEUW); + } + break; + case 0x16: + if (ot == MO_32) { /* pextrd */ + tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(val & 3))); + if (mod == 3) { + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[rm], s->tmp2_i32); + } else { + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + } + } else { /* pextrq */ +#ifdef TARGET_X86_64 + tcg_gen_ld_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(val & 1))); + if (mod == 3) { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_regs[rm], s->tmp1_i64); + } else { + tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, + s->mem_index, MO_LEQ); + } +#else + goto illegal_op; +#endif + } + break; + case 0x17: /* extractps */ + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(val & 3))); + if (mod == 3) { + gen_op_mov_reg_v(s, ot, rm, s->T0); + } else { + tcg_gen_qemu_st_tl(tcg_ctx, s->T0, s->A0, + s->mem_index, MO_LEUL); + } + break; + case 0x20: /* pinsrb */ + if (mod == 3) { + gen_op_mov_v_reg(s, MO_32, s->T0, rm); + } else { + tcg_gen_qemu_ld_tl(tcg_ctx, s->T0, s->A0, + s->mem_index, MO_UB); + } + tcg_gen_st8_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_B(val & 15))); + break; + case 0x21: /* insertps */ + if (mod == 3) { + tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[rm] + .ZMM_L((val >> 6) & 3))); + } else { + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + } + tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_regs[reg] + .ZMM_L((val >> 4) & 3))); + if ((val >> 0) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(0))); + if ((val >> 1) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(1))); + if ((val >> 2) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(2))); + if ((val >> 3) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + tcg_ctx->cpu_env, offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(3))); + break; + case 0x22: + if (ot == MO_32) { /* pinsrd */ + if (mod == 3) { + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[rm]); + } else { + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + } + tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].ZMM_L(val & 3))); + } else { /* pinsrq */ +#ifdef TARGET_X86_64 + if (mod == 3) { + gen_op_mov_v_reg(s, ot, s->tmp1_i64, rm); + } else { + tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, + s->mem_index, MO_LEQ); + } + tcg_gen_st_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].ZMM_Q(val & 1))); +#else + goto illegal_op; +#endif + } + break; + } + return; + } + + if (b1) { + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); + } else { + op2_offset = offsetof(CPUX86State,xmm_t0); + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, op2_offset); + } + } else { + op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } else { + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, op2_offset); + } + } + val = x86_ldub_code(env, s); + + if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ + set_cc_op(s, CC_OP_EFLAGS); + + if (s->dflag == MO_64) { + /* The helper must use entire 64-bit gp registers */ + val |= 1 << 8; + } + } + + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + sse_fn_eppi(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1, tcg_const_i32(tcg_ctx, val)); + break; + + case 0x33a: + /* Various integer extensions at 0f 3a f[0-f]. */ + b = modrm | (b1 << 8); + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + + switch (b) { + case 0x3f0: /* rorx Gy,Ey, Ib */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + b = x86_ldub_code(env, s); + if (ot == MO_64) { + tcg_gen_rotri_tl(tcg_ctx, s->T0, s->T0, b & 63); + } else { + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + tcg_gen_rotri_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, b & 31); + tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); + } + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + + default: + goto unknown_op; + } + break; + + default: + unknown_op: + gen_unknown_opcode(env, s); + return; + } + } else { + /* generic MMX or SSE operation */ + switch(b) { + case 0x70: /* pshufx insn */ + case 0xc6: /* pshufx insn */ + case 0xc2: /* compare insns */ + s->rip_offset = 1; + break; + default: + break; + } + if (is_xmm) { + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + if (mod != 3) { + int sz = 4; + + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,xmm_t0); + + switch (b) { + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + case 0x58: + case 0x59: + case 0x5a: + + case 0x5c: + case 0x5d: + case 0x5e: + case 0x5f: + + case 0xc2: + /* Most sse scalar operations. */ + if (b1 == 2) { + sz = 2; + } else if (b1 == 3) { + sz = 3; + } + break; + + case 0x2e: /* ucomis[sd] */ + case 0x2f: /* comis[sd] */ + if (b1 == 0) { + sz = 2; + } else { + sz = 3; + } + break; + } + + switch (sz) { + case 2: + /* 32 bit access */ + gen_op_ld_v(s, MO_32, s->T0, s->A0); + tcg_gen_st32_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State,xmm_t0.ZMM_L(0))); + break; + case 3: + /* 64 bit access */ + gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0))); + break; + default: + /* 128 bit access */ + gen_ldo_env_A0(s, op2_offset); + break; + } + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + } else { + op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_ldq_env_A0(s, op2_offset); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + } + switch(b) { + case 0x0f: /* 3DNow! data insns */ + val = x86_ldub_code(env, s); + sse_fn_epp = sse_op_table5[val]; + if (!sse_fn_epp) { + goto unknown_op; + } + if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) { + goto illegal_op; + } + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + case 0x70: /* pshufx insn */ + case 0xc6: /* pshufx insn */ + val = x86_ldub_code(env, s); + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + /* XXX: introduce a new table? */ + sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; + sse_fn_ppi(tcg_ctx, s->ptr0, s->ptr1, tcg_const_i32(tcg_ctx, val)); + break; + case 0xc2: + /* compare insns */ + val = x86_ldub_code(env, s); + if (val >= 8) + goto unknown_op; + sse_fn_epp = sse_op_table4[val][b1]; + + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + case 0xf7: + /* maskmov : we must prepare A0 */ + if (mod != 3) + goto illegal_op; + tcg_gen_mov_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EDI]); + gen_extu(tcg_ctx, s->aflag, s->A0); + gen_add_A0_ds_seg(s); + + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + /* XXX: introduce a new table? */ + sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; + sse_fn_eppt(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1, s->A0); + break; + default: + tcg_gen_addi_ptr(tcg_ctx, s->ptr0, tcg_ctx->cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, s->ptr1, tcg_ctx->cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, tcg_ctx->cpu_env, s->ptr0, s->ptr1); + break; + } + if (b == 0x2e || b == 0x2f) { + set_cc_op(s, CC_OP_EFLAGS); + } + } +} + +// Unicorn: sync EFLAGS on demand +static void sync_eflags(DisasContext *s, TCGContext *tcg_ctx) +{ + gen_update_cc_op(s); + gen_helper_read_eflags(tcg_ctx, s->T0, tcg_ctx->cpu_env); + tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, eflags)); +} + +/* convert one instruction. s->base.is_jmp is set if the translation must + be stopped. Return the next pc value */ +static target_ulong disas_insn(DisasContext *s, CPUState *cpu) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + CPUX86State *env = cpu->env_ptr; + int b, prefixes; + int shift; + MemOp ot, aflag, dflag; + int modrm, reg, rm, mod, op, opreg, val; + target_ulong next_eip, tval; + int rex_w, rex_r; + target_ulong pc_start = s->base.pc_next; + TCGOp *tcg_op, *prev_op = NULL; + bool insn_hook = false; + + s->pc_start = s->pc = pc_start; + s->prefix = 0; + + s->uc = env->uc; + + // Unicorn: end address tells us to stop emulation + if (s->pc == s->uc->addr_end) { + // imitate the HLT instruction + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_hlt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + s->base.is_jmp = DISAS_NORETURN; + return s->pc; + } + + // Unicorn: callback might need to access to EFLAGS, + // or want to stop emulation immediately + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, pc_start)) { + if (s->last_cc_op != s->cc_op) { + sync_eflags(s, tcg_ctx); + s->last_cc_op = s->cc_op; + } + + // save the last operand + prev_op = tcg_last_op(tcg_ctx); + insn_hook = true; + gen_uc_tracecode(tcg_ctx, 0xf1f1f1f1, UC_HOOK_CODE_IDX, env->uc, pc_start); + + check_exit_request(tcg_ctx); + } + + s->override = -1; + +#ifdef TARGET_X86_64 + s->rex_x = 0; + s->rex_b = 0; + s->x86_64_hregs = false; +#endif + s->rip_offset = 0; /* for relative ip address */ + s->vex_l = 0; + s->vex_v = 0; + if (sigsetjmp(s->jmpbuf, 0) != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + return s->pc; + } + + prefixes = 0; + rex_w = -1; + rex_r = 0; + + next_byte: + b = x86_ldub_code(env, s); + /* Collect prefixes. */ + switch (b) { + case 0xf3: + prefixes |= PREFIX_REPZ; + goto next_byte; + case 0xf2: + prefixes |= PREFIX_REPNZ; + goto next_byte; + case 0xf0: + prefixes |= PREFIX_LOCK; + goto next_byte; + case 0x2e: + s->override = R_CS; + goto next_byte; + case 0x36: + s->override = R_SS; + goto next_byte; + case 0x3e: + s->override = R_DS; + goto next_byte; + case 0x26: + s->override = R_ES; + goto next_byte; + case 0x64: + s->override = R_FS; + goto next_byte; + case 0x65: + s->override = R_GS; + goto next_byte; + case 0x66: + prefixes |= PREFIX_DATA; + goto next_byte; + case 0x67: + prefixes |= PREFIX_ADR; + goto next_byte; +#ifdef TARGET_X86_64 + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4a: + case 0x4b: + case 0x4c: + case 0x4d: + case 0x4e: + case 0x4f: + if (CODE64(s)) { + /* REX prefix */ + rex_w = (b >> 3) & 1; + rex_r = (b & 0x4) << 1; + s->rex_x = (b & 0x2) << 2; + REX_B(s) = (b & 0x1) << 3; + /* select uniform byte register addressing */ + s->x86_64_hregs = true; + goto next_byte; + } + break; +#endif + case 0xc5: /* 2-byte VEX */ + case 0xc4: /* 3-byte VEX */ + /* VEX prefixes cannot be used except in 32-bit mode. + Otherwise the instruction is LES or LDS. */ + if (s->code32 && !s->vm86) { + static const int pp_prefix[4] = { + 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ + }; + int vex3, vex2 = x86_ldub_code(env, s); + + if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { + /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, + otherwise the instruction is LES or LDS. */ + s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ + break; + } + + /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ + | PREFIX_LOCK | PREFIX_DATA)) { + goto illegal_op; + } +#ifdef TARGET_X86_64 + if (s->x86_64_hregs) { + goto illegal_op; + } +#endif + rex_r = (~vex2 >> 4) & 8; + if (b == 0xc5) { + /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */ + vex3 = vex2; + b = x86_ldub_code(env, s) | 0x100; + } else { + /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */ +#ifdef TARGET_X86_64 + s->rex_x = (~vex2 >> 3) & 8; + s->rex_b = (~vex2 >> 2) & 8; +#endif + vex3 = x86_ldub_code(env, s); + rex_w = (vex3 >> 7) & 1; + switch (vex2 & 0x1f) { + case 0x01: /* Implied 0f leading opcode bytes. */ + b = x86_ldub_code(env, s) | 0x100; + break; + case 0x02: /* Implied 0f 38 leading opcode bytes. */ + b = 0x138; + break; + case 0x03: /* Implied 0f 3a leading opcode bytes. */ + b = 0x13a; + break; + default: /* Reserved for future use. */ + goto unknown_op; + } + } + s->vex_v = (~vex3 >> 3) & 0xf; + s->vex_l = (vex3 >> 2) & 1; + prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; + } + break; + } + + /* Post-process prefixes. */ + if (CODE64(s)) { + /* In 64-bit mode, the default data size is 32-bit. Select 64-bit + data with rex_w, and 16-bit data with 0x66; rex_w takes precedence + over 0x66 if both are present. */ + dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); + /* In 64-bit mode, 0x67 selects 32-bit addressing. */ + aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); + } else { + /* In 16/32-bit mode, 0x66 selects the opposite data size. */ + if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { + dflag = MO_32; + } else { + dflag = MO_16; + } + /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ + if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { + aflag = MO_32; + } else { + aflag = MO_16; + } + } + + s->prefix = prefixes; + s->aflag = aflag; + s->dflag = dflag; + + /* now check op code */ + reswitch: + switch(b) { + case 0x0f: + /**************************/ + /* extended op code */ + b = x86_ldub_code(env, s) | 0x100; + goto reswitch; + + /**************************/ + /* arith & logic */ + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + + case 0x08: + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: + + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + + case 0x28: + case 0x29: + case 0x2a: + case 0x2b: + case 0x2c: + case 0x2d: + + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + + case 0x38: + case 0x39: + case 0x3a: + case 0x3b: + case 0x3c: + case 0x3d: + { + int op, f, val; + op = (b >> 3) & 7; + f = (b >> 1) & 3; + + ot = mo_b_d(b, dflag); + + switch(f) { + case 0: /* OP Ev, Gv */ + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else if (op == OP_XORL && rm == reg) { + xor_zero: + /* xor reg, reg optimisation */ + set_cc_op(s, CC_OP_CLR); + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + } else { + opreg = rm; + } + gen_op_mov_v_reg(s, ot, s->T1, reg); + gen_op(s, op, ot, opreg); + break; + case 1: /* OP Gv, Ev */ + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + reg = ((modrm >> 3) & 7) | rex_r; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, s->T1, s->A0); + } else if (op == OP_XORL && rm == reg) { + goto xor_zero; + } else { + gen_op_mov_v_reg(s, ot, s->T1, rm); + } + gen_op(s, op, ot, reg); + break; + case 2: /* OP A, Iv */ + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); + gen_op(s, op, ot, OR_EAX); + break; + } + } + break; + + case 0x82: + if (CODE64(s)) + goto illegal_op; + /* fall through */ + case 0x80: /* GRP1 */ + case 0x81: + case 0x83: + { + int val; + + ot = mo_b_d(b, dflag); + + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + + if (mod != 3) { + if (b == 0x83) + s->rip_offset = 1; + else + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else { + opreg = rm; + } + + switch(b) { + default: + case 0x80: + case 0x81: + case 0x82: + val = insn_get(env, s, ot); + break; + case 0x83: + val = (int8_t)insn_get(env, s, MO_8); + break; + } + tcg_gen_movi_tl(tcg_ctx, s->T1, val); + gen_op(s, op, ot, opreg); + } + break; + + /**************************/ + /* inc, dec, and other misc arith */ + case 0x40: /* inc Gv */ + case 0x41: /* inc Gv */ + case 0x42: /* inc Gv */ + case 0x43: /* inc Gv */ + case 0x44: /* inc Gv */ + case 0x45: /* inc Gv */ + case 0x46: /* inc Gv */ + case 0x47: /* inc Gv */ + ot = dflag; + gen_inc(s, ot, OR_EAX + (b & 7), 1); + break; + case 0x48: /* dec Gv */ + case 0x49: /* dec Gv */ + case 0x4a: /* dec Gv */ + case 0x4b: /* dec Gv */ + case 0x4c: /* dec Gv */ + case 0x4d: /* dec Gv */ + case 0x4e: /* dec Gv */ + case 0x4f: /* dec Gv */ + ot = dflag; + gen_inc(s, ot, OR_EAX + (b & 7), -1); + break; + case 0xf6: /* GRP3 */ + case 0xf7: + ot = mo_b_d(b, dflag); + + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + if (mod != 3) { + if (op == 0) { + s->rip_offset = insn_const_size(ot); + } + gen_lea_modrm(env, s, modrm); + /* For those below that handle locked memory, don't load here. */ + if (!(s->prefix & PREFIX_LOCK) + || op != 2) { + gen_op_ld_v(s, ot, s->T0, s->A0); + } + } else { + gen_op_mov_v_reg(s, ot, s->T0, rm); + } + + switch(op) { + case 0: /* test */ + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); + gen_op_testl_T0_T1_cc(s); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + case 2: /* not */ + if (s->prefix & PREFIX_LOCK) { + if (mod == 3) { + goto illegal_op; + } + tcg_gen_movi_tl(tcg_ctx, s->T0, ~0); + tcg_gen_atomic_xor_fetch_tl(tcg_ctx, s->T0, s->A0, s->T0, + s->mem_index, ot | MO_LE); + } else { + tcg_gen_not_tl(tcg_ctx, s->T0, s->T0); + if (mod != 3) { + gen_op_st_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_reg_v(s, ot, rm, s->T0); + } + } + break; + case 3: /* neg */ + if (s->prefix & PREFIX_LOCK) { + TCGLabel *label1; + TCGv a0, t0, t1, t2; + + if (mod == 3) { + goto illegal_op; + } + a0 = tcg_temp_local_new(tcg_ctx); + t0 = tcg_temp_local_new(tcg_ctx); + label1 = gen_new_label(tcg_ctx); + + tcg_gen_mov_tl(tcg_ctx, a0, s->A0); + tcg_gen_mov_tl(tcg_ctx, t0, s->T0); + + gen_set_label(tcg_ctx, label1); + t1 = tcg_temp_new(tcg_ctx); + t2 = tcg_temp_new(tcg_ctx); + tcg_gen_mov_tl(tcg_ctx, t2, t0); + tcg_gen_neg_tl(tcg_ctx, t1, t0); + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, t0, a0, t0, t1, + s->mem_index, ot | MO_LE); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, t2, label1); + + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, a0); + tcg_gen_mov_tl(tcg_ctx, s->T0, t0); + tcg_temp_free(tcg_ctx, t0); + } else { + tcg_gen_neg_tl(tcg_ctx, s->T0, s->T0); + if (mod != 3) { + gen_op_st_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_reg_v(s, ot, rm, s->T0); + } + } + gen_op_update_neg_cc(s); + set_cc_op(s, CC_OP_SUBB + ot); + break; + case 4: /* mul */ + switch(ot) { + case MO_8: + gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); + tcg_gen_ext8u_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_ext8u_tl(tcg_ctx, s->T1, s->T1); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, 0xff00); + set_cc_op(s, CC_OP_MULB); + break; + case MO_16: + gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); + tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_ext16u_tl(tcg_ctx, s->T1, s->T1); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, 16); + gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); + set_cc_op(s, CC_OP_MULW); + break; + default: + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_mulu2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], s->tmp2_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EDX], s->tmp3_i32); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EDX]); + set_cc_op(s, CC_OP_MULL); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_mulu2_i64(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], + s->T0, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EDX]); + set_cc_op(s, CC_OP_MULQ); + break; +#endif + } + break; + case 5: /* imul */ + switch(ot) { + case MO_8: + gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); + tcg_gen_ext8s_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_ext8s_tl(tcg_ctx, s->T1, s->T1); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + tcg_gen_ext8s_tl(tcg_ctx, s->tmp0, s->T0); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, s->tmp0); + set_cc_op(s, CC_OP_MULB); + break; + case MO_16: + gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); + tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_ext16s_tl(tcg_ctx, s->T1, s->T1); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + tcg_gen_ext16s_tl(tcg_ctx, s->tmp0, s->T0); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, s->tmp0); + tcg_gen_shri_tl(tcg_ctx, s->T0, s->T0, 16); + gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); + set_cc_op(s, CC_OP_MULW); + break; + default: + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_muls2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], s->tmp2_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EDX], s->tmp3_i32); + tcg_gen_sari_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 31); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_sub_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp2_i32); + set_cc_op(s, CC_OP_MULL); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_muls2_i64(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], + s->T0, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EAX], 63); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_regs[R_EDX]); + set_cc_op(s, CC_OP_MULQ); + break; +#endif + } + break; + case 6: /* div */ + switch(ot) { + case MO_8: + gen_helper_divb_AL(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; + case MO_16: + gen_helper_divw_AX(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; + default: + case MO_32: + gen_helper_divl_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_helper_divq_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; +#endif + } + break; + case 7: /* idiv */ + switch(ot) { + case MO_8: + gen_helper_idivb_AL(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; + case MO_16: + gen_helper_idivw_AX(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; + default: + case MO_32: + gen_helper_idivl_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_helper_idivq_EAX(tcg_ctx, tcg_ctx->cpu_env, s->T0); + break; +#endif + } + break; + default: + goto unknown_op; + } + break; + + case 0xfe: /* GRP4 */ + case 0xff: /* GRP5 */ + ot = mo_b_d(b, dflag); + + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + if (op >= 2 && b == 0xfe) { + goto unknown_op; + } + if (CODE64(s)) { + if (op == 2 || op == 4) { + /* operand size for jumps is 64 bit */ + ot = MO_64; + } else if (op == 3 || op == 5) { + ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; + } else if (op == 6) { + /* default push size is 64 bit */ + ot = mo_pushpop(s, dflag); + } + } + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + if (op >= 2 && op != 3 && op != 5) + gen_op_ld_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_v_reg(s, ot, s->T0, rm); + } + + switch(op) { + case 0: /* inc Ev */ + if (mod != 3) + opreg = OR_TMP0; + else + opreg = rm; + gen_inc(s, ot, opreg, 1); + break; + case 1: /* dec Ev */ + if (mod != 3) + opreg = OR_TMP0; + else + opreg = rm; + gen_inc(s, ot, opreg, -1); + break; + case 2: /* call Ev */ + /* XXX: optimize if memory (no 'and' is necessary) */ + if (dflag == MO_16) { + tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); + } + next_eip = s->pc - s->cs_base; + tcg_gen_movi_tl(tcg_ctx, s->T1, next_eip); + gen_push_v(s, s->T1); + gen_op_jmp_v(tcg_ctx, s->T0); + gen_bnd_jmp(s); + gen_jr(s, s->T0); + break; + case 3: /* lcall Ev */ + gen_op_ld_v(s, ot, s->T1, s->A0); + gen_add_A0_im(s, 1 << ot); + gen_op_ld_v(s, MO_16, s->T0, s->A0); + do_lcall: + if (s->pe && !s->vm86) { + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_lcall_protected(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T1, + tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_tl(tcg_ctx, s->pc - s->cs_base)); + } else { + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_lcall_real(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T1, + tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); + } + tcg_gen_ld_tl(tcg_ctx, s->tmp4, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); + gen_jr(s, s->tmp4); + break; + case 4: /* jmp Ev */ + if (dflag == MO_16) { + tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); + } + gen_op_jmp_v(tcg_ctx, s->T0); + gen_bnd_jmp(s); + gen_jr(s, s->T0); + break; + case 5: /* ljmp Ev */ + gen_op_ld_v(s, ot, s->T1, s->A0); + gen_add_A0_im(s, 1 << ot); + gen_op_ld_v(s, MO_16, s->T0, s->A0); + do_ljmp: + if (s->pe && !s->vm86) { + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_ljmp_protected(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T1, + tcg_const_tl(tcg_ctx, s->pc - s->cs_base)); + } else { + gen_op_movl_seg_T0_vm(s, R_CS); + gen_op_jmp_v(tcg_ctx, s->T1); + } + tcg_gen_ld_tl(tcg_ctx, s->tmp4, tcg_ctx->cpu_env, offsetof(CPUX86State, eip)); + gen_jr(s, s->tmp4); + break; + case 6: /* push Ev */ + gen_push_v(s, s->T0); + break; + default: + goto unknown_op; + } + break; + + case 0x84: /* test Ev, Gv */ + case 0x85: + ot = mo_b_d(b, dflag); + + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_op_mov_v_reg(s, ot, s->T1, reg); + gen_op_testl_T0_T1_cc(s); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + + case 0xa8: /* test eAX, Iv */ + case 0xa9: + ot = mo_b_d(b, dflag); + val = insn_get(env, s, ot); + + gen_op_mov_v_reg(s, ot, s->T0, OR_EAX); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); + gen_op_testl_T0_T1_cc(s); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + + case 0x98: /* CWDE/CBW */ + switch (dflag) { +#ifdef TARGET_X86_64 + case MO_64: + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); + tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); + gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0); + break; +#endif + case MO_32: + gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); + tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); + gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0); + break; + case MO_16: + gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX); + tcg_gen_ext8s_tl(tcg_ctx, s->T0, s->T0); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); + break; + default: + tcg_abort(); + } + break; + case 0x99: /* CDQ/CWD */ + switch (dflag) { +#ifdef TARGET_X86_64 + case MO_64: + gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX); + tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, 63); + gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0); + break; +#endif + case MO_32: + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); + tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, 31); + gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0); + break; + case MO_16: + gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); + tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_sari_tl(tcg_ctx, s->T0, s->T0, 15); + gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); + break; + default: + tcg_abort(); + } + break; + case 0x1af: /* imul Gv, Ev */ + case 0x69: /* imul Gv, Ev, I */ + case 0x6b: + ot = dflag; + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + if (b == 0x69) + s->rip_offset = insn_const_size(ot); + else if (b == 0x6b) + s->rip_offset = 1; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + if (b == 0x69) { + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); + } else if (b == 0x6b) { + val = (int8_t)insn_get(env, s, MO_8); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); + } else { + gen_op_mov_v_reg(s, ot, s->T1, reg); + } + switch (ot) { +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_muls2_i64(tcg_ctx, tcg_ctx->cpu_regs[reg], s->T1, s->T0, s->T1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[reg]); + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_dst, 63); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, s->T1); + break; +#endif + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); + tcg_gen_muls2_i32(tcg_ctx, s->tmp2_i32, s->tmp3_i32, + s->tmp2_i32, s->tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], s->tmp2_i32); + tcg_gen_sari_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, 31); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_regs[reg]); + tcg_gen_sub_i32(tcg_ctx, s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp2_i32); + break; + default: + tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_ext16s_tl(tcg_ctx, s->T1, s->T1); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, s->T0, s->T0, s->T1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + tcg_gen_ext16s_tl(tcg_ctx, s->tmp0, s->T0); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0, s->tmp0); + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + } + set_cc_op(s, CC_OP_MULB + ot); + break; + case 0x1c0: + case 0x1c1: /* xadd Ev, Gv */ + ot = mo_b_d(b, dflag); + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + gen_op_mov_v_reg(s, ot, s->T0, reg); + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + gen_op_mov_v_reg(s, ot, s->T1, rm); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); + gen_op_mov_reg_v(s, ot, reg, s->T1); + gen_op_mov_reg_v(s, ot, rm, s->T0); + } else { + gen_lea_modrm(env, s, modrm); + if (s->prefix & PREFIX_LOCK) { + tcg_gen_atomic_fetch_add_tl(tcg_ctx, s->T1, s->A0, s->T0, + s->mem_index, ot | MO_LE); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); + } else { + gen_op_ld_v(s, ot, s->T1, s->A0); + tcg_gen_add_tl(tcg_ctx, s->T0, s->T0, s->T1); + gen_op_st_v(s, ot, s->T0, s->A0); + } + gen_op_mov_reg_v(s, ot, reg, s->T1); + } + gen_op_update2_cc(s); + set_cc_op(s, CC_OP_ADDB + ot); + break; + case 0x1b0: + case 0x1b1: /* cmpxchg Ev, Gv */ + { + TCGv oldv, newv, cmpv; + + ot = mo_b_d(b, dflag); + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + oldv = tcg_temp_new(tcg_ctx); + newv = tcg_temp_new(tcg_ctx); + cmpv = tcg_temp_new(tcg_ctx); + gen_op_mov_v_reg(s, ot, newv, reg); + tcg_gen_mov_tl(tcg_ctx, cmpv, tcg_ctx->cpu_regs[R_EAX]); + + if (s->prefix & PREFIX_LOCK) { + if (mod == 3) { + goto illegal_op; + } + gen_lea_modrm(env, s, modrm); + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, s->A0, cmpv, newv, + s->mem_index, ot | MO_LE); + gen_op_mov_reg_v(s, ot, R_EAX, oldv); + } else { + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + gen_op_mov_v_reg(s, ot, oldv, rm); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, oldv, s->A0); + rm = 0; /* avoid warning */ + } + gen_extu(tcg_ctx, ot, oldv); + gen_extu(tcg_ctx, ot, cmpv); + /* store value = (old == cmp ? new : old); */ + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); + if (mod == 3) { + gen_op_mov_reg_v(s, ot, R_EAX, oldv); + gen_op_mov_reg_v(s, ot, rm, newv); + } else { + /* Perform an unconditional store cycle like physical cpu; + must be before changing accumulator to ensure + idempotency if the store faults and the instruction + is restarted */ + gen_op_st_v(s, ot, newv, s->A0); + gen_op_mov_reg_v(s, ot, R_EAX, oldv); + } + } + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, oldv); + tcg_gen_mov_tl(tcg_ctx, s->cc_srcT, cmpv); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cmpv, oldv); + set_cc_op(s, CC_OP_SUBB + ot); + tcg_temp_free(tcg_ctx, oldv); + tcg_temp_free(tcg_ctx, newv); + tcg_temp_free(tcg_ctx, cmpv); + } + break; + case 0x1c7: /* cmpxchg8b */ + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + switch ((modrm >> 3) & 7) { + case 1: /* CMPXCHG8, CMPXCHG16 */ + if (mod == 3) { + goto illegal_op; + } +#ifdef TARGET_X86_64 + if (dflag == MO_64) { + if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) { + goto illegal_op; + } + gen_lea_modrm(env, s, modrm); + if ((s->prefix & PREFIX_LOCK) && + (tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_cmpxchg16b(tcg_ctx, tcg_ctx->cpu_env, s->A0); + } else { + gen_helper_cmpxchg16b_unlocked(tcg_ctx, tcg_ctx->cpu_env, s->A0); + } + set_cc_op(s, CC_OP_EFLAGS); + break; + } +#endif + if (!(s->cpuid_features & CPUID_CX8)) { + goto illegal_op; + } + gen_lea_modrm(env, s, modrm); + if ((s->prefix & PREFIX_LOCK) && + (tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_cmpxchg8b(tcg_ctx, tcg_ctx->cpu_env, s->A0); + } else { + gen_helper_cmpxchg8b_unlocked(tcg_ctx, tcg_ctx->cpu_env, s->A0); + } + set_cc_op(s, CC_OP_EFLAGS); + break; + + case 7: /* RDSEED */ + case 6: /* RDRAND */ + if (mod != 3 || + (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) || + !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) { + goto illegal_op; + } + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_rdrand(tcg_ctx, s->T0, tcg_ctx->cpu_env); + rm = (modrm & 7) | REX_B(s); + gen_op_mov_reg_v(s, dflag, rm, s->T0); + set_cc_op(s, CC_OP_EFLAGS); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + break; + + default: + goto illegal_op; + } + break; + + /**************************/ + /* push/pop */ + case 0x50: /* push */ + case 0x51: /* push */ + case 0x52: /* push */ + case 0x53: /* push */ + case 0x54: /* push */ + case 0x55: /* push */ + case 0x56: /* push */ + case 0x57: /* push */ + gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s)); + gen_push_v(s, s->T0); + break; + case 0x58: /* pop */ + case 0x59: /* pop */ + case 0x5a: /* pop */ + case 0x5b: /* pop */ + case 0x5c: /* pop */ + case 0x5d: /* pop */ + case 0x5e: /* pop */ + case 0x5f: /* pop */ + ot = gen_pop_T0(s); + /* NOTE: order is important for pop %sp */ + gen_pop_update(s, ot); + gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0); + break; + case 0x60: /* pusha */ + if (CODE64(s)) + goto illegal_op; + gen_pusha(s); + break; + case 0x61: /* popa */ + if (CODE64(s)) + goto illegal_op; + gen_popa(s); + break; + case 0x68: /* push Iv */ + case 0x6a: + ot = mo_pushpop(s, dflag); + if (b == 0x68) + val = insn_get(env, s, ot); + else + val = (int8_t)insn_get(env, s, MO_8); + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + gen_push_v(s, s->T0); + break; + case 0x8f: /* pop Ev */ + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + ot = gen_pop_T0(s); + if (mod == 3) { + /* NOTE: order is important for pop %sp */ + gen_pop_update(s, ot); + rm = (modrm & 7) | REX_B(s); + gen_op_mov_reg_v(s, ot, rm, s->T0); + } else { + /* NOTE: order is important too for MMU exceptions */ + s->popl_esp_hack = 1 << ot; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + s->popl_esp_hack = 0; + gen_pop_update(s, ot); + } + break; + case 0xc8: /* enter */ + { + int level; + val = x86_lduw_code(env, s); + level = x86_ldub_code(env, s); + gen_enter(s, val, level); + } + break; + case 0xc9: /* leave */ + gen_leave(s); + break; + case 0x06: /* push es */ + case 0x0e: /* push cs */ + case 0x16: /* push ss */ + case 0x1e: /* push ds */ + if (CODE64(s)) + goto illegal_op; + gen_op_movl_T0_seg(s, b >> 3); + gen_push_v(s, s->T0); + break; + case 0x1a0: /* push fs */ + case 0x1a8: /* push gs */ + gen_op_movl_T0_seg(s, (b >> 3) & 7); + gen_push_v(s, s->T0); + break; + case 0x07: /* pop es */ + case 0x17: /* pop ss */ + case 0x1f: /* pop ds */ + if (CODE64(s)) + goto illegal_op; + reg = b >> 3; + ot = gen_pop_T0(s); + gen_movl_seg_T0(s, reg); + gen_pop_update(s, ot); + /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ + if (s->base.is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + if (reg == R_SS) { + s->tf = 0; + gen_eob_inhibit_irq(s, true); + } else { + gen_eob(s); + } + } + break; + case 0x1a1: /* pop fs */ + case 0x1a9: /* pop gs */ + ot = gen_pop_T0(s); + gen_movl_seg_T0(s, (b >> 3) & 7); + gen_pop_update(s, ot); + if (s->base.is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + + /**************************/ + /* mov */ + case 0x88: + case 0x89: /* mov Gv, Ev */ + ot = mo_b_d(b, dflag); + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + + /* generate a generic store */ + gen_ldst_modrm(env, s, modrm, ot, reg, 1); + break; + case 0xc6: + case 0xc7: /* mov Ev, Iv */ + ot = mo_b_d(b, dflag); + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + reg = ((modrm >> 3) & 7) | rex_r; + if (mod != 3) { + if (reg != 0) + goto illegal_op; + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(env, s, modrm); + } else { + if (reg != 0 && reg != 7) + goto illegal_op; + } + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + if (mod != 3) { + gen_op_st_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0); + } + break; + case 0x8a: + case 0x8b: /* mov Ev, Gv */ + ot = mo_b_d(b, dflag); + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + case 0x8e: /* mov seg, Gv */ + modrm = x86_ldub_code(env, s); + reg = (modrm >> 3) & 7; + if (reg >= 6 || reg == R_CS) + goto illegal_op; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_movl_seg_T0(s, reg); + /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ + if (s->base.is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + if (reg == R_SS) { + s->tf = 0; + gen_eob_inhibit_irq(s, true); + } else { + gen_eob(s); + } + } + break; + case 0x8c: /* mov Gv, seg */ + modrm = x86_ldub_code(env, s); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + if (reg >= 6) + goto illegal_op; + gen_op_movl_T0_seg(s, reg); + ot = mod == 3 ? dflag : MO_16; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + break; + + case 0x1b6: /* movzbS Gv, Eb */ + case 0x1b7: /* movzwS Gv, Eb */ + case 0x1be: /* movsbS Gv, Eb */ + case 0x1bf: /* movswS Gv, Eb */ + { + MemOp d_ot; + MemOp s_ot; + + /* d_ot is the size of destination */ + d_ot = dflag; + /* ot is the size of source */ + ot = (b & 1) + MO_8; + /* s_ot is the sign+size of source */ + s_ot = b & 8 ? MO_SIGN | ot : ot; + + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + + if (mod == 3) { + if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) { + tcg_gen_sextract_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[rm - 4], 8, 8); + } else { + gen_op_mov_v_reg(s, ot, s->T0, rm); + switch (s_ot) { + case MO_UB: + tcg_gen_ext8u_tl(tcg_ctx, s->T0, s->T0); + break; + case MO_SB: + tcg_gen_ext8s_tl(tcg_ctx, s->T0, s->T0); + break; + case MO_UW: + tcg_gen_ext16u_tl(tcg_ctx, s->T0, s->T0); + break; + default: + case MO_SW: + tcg_gen_ext16s_tl(tcg_ctx, s->T0, s->T0); + break; + } + } + gen_op_mov_reg_v(s, d_ot, reg, s->T0); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, s_ot, s->T0, s->A0); + gen_op_mov_reg_v(s, d_ot, reg, s->T0); + } + } + break; + + case 0x8d: /* lea */ + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + reg = ((modrm >> 3) & 7) | rex_r; + { + AddressParts a = gen_lea_modrm_0(env, s, modrm); + TCGv ea = gen_lea_modrm_1(s, a); + gen_lea_v_seg(s, s->aflag, ea, -1, -1); + gen_op_mov_reg_v(s, dflag, reg, s->A0); + } + break; + + case 0xa0: /* mov EAX, Ov */ + case 0xa1: + case 0xa2: /* mov Ov, EAX */ + case 0xa3: + { + target_ulong offset_addr; + + ot = mo_b_d(b, dflag); + switch (s->aflag) { +#ifdef TARGET_X86_64 + case MO_64: + offset_addr = x86_ldq_code(env, s); + break; +#endif + default: + offset_addr = insn_get(env, s, s->aflag); + break; + } + tcg_gen_movi_tl(tcg_ctx, s->A0, offset_addr); + gen_add_A0_ds_seg(s); + if ((b & 2) == 0) { + gen_op_ld_v(s, ot, s->T0, s->A0); + gen_op_mov_reg_v(s, ot, R_EAX, s->T0); + } else { + gen_op_mov_v_reg(s, ot, s->T0, R_EAX); + gen_op_st_v(s, ot, s->T0, s->A0); + } + } + break; + case 0xd7: /* xlat */ + tcg_gen_mov_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EBX]); + tcg_gen_ext8u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EAX]); + tcg_gen_add_tl(tcg_ctx, s->A0, s->A0, s->T0); + gen_extu(tcg_ctx, s->aflag, s->A0); + gen_add_A0_ds_seg(s); + gen_op_ld_v(s, MO_8, s->T0, s->A0); + gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); + break; + case 0xb0: /* mov R, Ib */ + case 0xb1: /* mov R, Ib */ + case 0xb2: /* mov R, Ib */ + case 0xb3: /* mov R, Ib */ + case 0xb4: /* mov R, Ib */ + case 0xb5: /* mov R, Ib */ + case 0xb6: /* mov R, Ib */ + case 0xb7: /* mov R, Ib */ + val = insn_get(env, s, MO_8); + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0); + break; + case 0xb8: /* mov R, Iv */ + case 0xb9: /* mov R, Iv */ + case 0xba: /* mov R, Iv */ + case 0xbb: /* mov R, Iv */ + case 0xbc: /* mov R, Iv */ + case 0xbd: /* mov R, Iv */ + case 0xbe: /* mov R, Iv */ + case 0xbf: /* mov R, Iv */ +#ifdef TARGET_X86_64 + if (dflag == MO_64) { + uint64_t tmp; + /* 64 bit case */ + tmp = x86_ldq_code(env, s); + reg = (b & 7) | REX_B(s); + tcg_gen_movi_tl(tcg_ctx, s->T0, tmp); + gen_op_mov_reg_v(s, MO_64, reg, s->T0); + } else +#endif + { + ot = dflag; + val = insn_get(env, s, ot); + reg = (b & 7) | REX_B(s); + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + gen_op_mov_reg_v(s, ot, reg, s->T0); + } + break; + + case 0x91: /* xchg R, EAX */ + case 0x92: /* xchg R, EAX */ + case 0x93: /* xchg R, EAX */ + case 0x94: /* xchg R, EAX */ + case 0x95: /* xchg R, EAX */ + case 0x96: /* xchg R, EAX */ + case 0x97: /* xchg R, EAX */ + do_xchg_reg_eax: + ot = dflag; + reg = (b & 7) | REX_B(s); + rm = R_EAX; + goto do_xchg_reg; + case 0x86: + case 0x87: /* xchg Ev, Gv */ + ot = mo_b_d(b, dflag); + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + do_xchg_reg: + gen_op_mov_v_reg(s, ot, s->T0, reg); + gen_op_mov_v_reg(s, ot, s->T1, rm); + gen_op_mov_reg_v(s, ot, rm, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T1); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_mov_v_reg(s, ot, s->T0, reg); + /* for xchg, lock is implicit */ + tcg_gen_atomic_xchg_tl(tcg_ctx, s->T1, s->A0, s->T0, + s->mem_index, ot | MO_LE); + gen_op_mov_reg_v(s, ot, reg, s->T1); + } + break; + case 0xc4: /* les Gv */ + /* In CODE64 this is VEX3; see above. */ + op = R_ES; + goto do_lxx; + case 0xc5: /* lds Gv */ + /* In CODE64 this is VEX2; see above. */ + op = R_DS; + goto do_lxx; + case 0x1b2: /* lss Gv */ + op = R_SS; + goto do_lxx; + case 0x1b4: /* lfs Gv */ + op = R_FS; + goto do_lxx; + case 0x1b5: /* lgs Gv */ + op = R_GS; + do_lxx: + ot = dflag != MO_16 ? MO_32 : MO_16; + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, s->T1, s->A0); + gen_add_A0_im(s, 1 << ot); + /* load the segment first to handle exceptions properly */ + gen_op_ld_v(s, MO_16, s->T0, s->A0); + gen_movl_seg_T0(s, op); + /* then put the data */ + gen_op_mov_reg_v(s, ot, reg, s->T1); + if (s->base.is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + + /************************/ + /* shifts */ + case 0xc0: + case 0xc1: + /* shift Ev,Ib */ + shift = 2; + grp2_label: + { + ot = mo_b_d(b, dflag); + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + + if (mod != 3) { + if (shift == 2) { + s->rip_offset = 1; + } + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else { + opreg = (modrm & 7) | REX_B(s); + } + + /* simpler op */ + if (shift == 0) { + gen_shift(s, op, ot, opreg, OR_ECX); + } else { + if (shift == 2) { + shift = x86_ldub_code(env, s); + } + gen_shifti(s, op, ot, opreg, shift); + } + } + break; + case 0xd0: + case 0xd1: + /* shift Ev,1 */ + shift = 1; + goto grp2_label; + case 0xd2: + case 0xd3: + /* shift Ev,cl */ + shift = 0; + goto grp2_label; + + case 0x1a4: /* shld imm */ + op = 0; + shift = 1; + goto do_shiftd; + case 0x1a5: /* shld cl */ + op = 0; + shift = 0; + goto do_shiftd; + case 0x1ac: /* shrd imm */ + op = 1; + shift = 1; + goto do_shiftd; + case 0x1ad: /* shrd cl */ + op = 1; + shift = 0; + do_shiftd: + ot = dflag; + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else { + opreg = rm; + } + gen_op_mov_v_reg(s, ot, s->T1, reg); + + if (shift) { + TCGv imm = tcg_const_tl(tcg_ctx, x86_ldub_code(env, s)); + gen_shiftd_rm_T1(s, ot, opreg, op, imm); + tcg_temp_free(tcg_ctx, imm); + } else { + gen_shiftd_rm_T1(s, ot, opreg, op, tcg_ctx->cpu_regs[R_ECX]); + } + break; + + /************************/ + /* floats */ + case 0xd8: + case 0xd9: + case 0xda: + case 0xdb: + case 0xdc: + case 0xdd: + case 0xde: + case 0xdf: + { + bool update_fip = true; + + if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { + /* if CR0.EM or CR0.TS are set, generate an FPU exception */ + /* XXX: what to do if illegal op ? */ + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + rm = modrm & 7; + op = ((b & 7) << 3) | ((modrm >> 3) & 7); + if (mod != 3) { + /* memory op */ + AddressParts a = gen_lea_modrm_0(env, s, modrm); + TCGv ea = gen_lea_modrm_1(s, a); + TCGv last_addr = tcg_temp_new(tcg_ctx); + bool update_fdp = true; + + tcg_gen_mov_tl(tcg_ctx, last_addr, ea); + gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); + switch(op) { + case 0x00: /* fxxxs */ + case 0x01: /* fxxxs */ + case 0x02: /* fxxxs */ + case 0x03: /* fxxxs */ + case 0x04: /* fxxxs */ + case 0x05: /* fxxxs */ + case 0x06: /* fxxxs */ + case 0x07: /* fxxxs */ + + case 0x10: /* fixxxl */ + case 0x11: /* fixxxl */ + case 0x12: /* fixxxl */ + case 0x13: /* fixxxl */ + case 0x14: /* fixxxl */ + case 0x15: /* fixxxl */ + case 0x16: /* fixxxl */ + case 0x17: /* fixxxl */ + + case 0x20: /* fxxxl */ + case 0x21: /* fxxxl */ + case 0x22: /* fxxxl */ + case 0x23: /* fxxxl */ + case 0x24: /* fxxxl */ + case 0x25: /* fxxxl */ + case 0x26: /* fxxxl */ + case 0x27: /* fxxxl */ + + case 0x30: /* fixxx */ + case 0x31: /* fixxx */ + case 0x32: /* fixxx */ + case 0x33: /* fixxx */ + case 0x34: /* fixxx */ + case 0x35: /* fixxx */ + case 0x36: /* fixxx */ + case 0x37: /* fixxx */ + { + int op1; + op1 = op & 7; + + switch(op >> 4) { + case 0: + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + gen_helper_flds_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + case 1: + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + gen_helper_fildl_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + case 2: + tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, + s->mem_index, MO_LEQ); + gen_helper_fldl_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp1_i64); + break; + case 3: + default: + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LESW); + gen_helper_fildl_FT0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + } + + gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); + if (op1 == 3) { + /* fcomp needs pop */ + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + } + } + break; + case 0x08: /* flds */ + case 0x0a: /* fsts */ + case 0x0b: /* fstps */ + + case 0x18: /* fildl, fisttpl, fistl, fistpl */ + case 0x19: /* fildl, fisttpl, fistl, fistpl */ + case 0x1a: /* fildl, fisttpl, fistl, fistpl */ + case 0x1b: /* fildl, fisttpl, fistl, fistpl */ + + case 0x28: /* fldl, fisttpll, fstl, fstpl */ + case 0x29: /* fldl, fisttpll, fstl, fstpl */ + case 0x2a: /* fldl, fisttpll, fstl, fstpl */ + case 0x2b: /* fldl, fisttpll, fstl, fstpl */ + + case 0x38: /* filds, fisttps, fists, fistps */ + case 0x39: /* filds, fisttps, fists, fistps */ + case 0x3a: /* filds, fisttps, fists, fistps */ + case 0x3b: /* filds, fisttps, fists, fistps */ + switch(op & 7) { + case 0: + switch(op >> 4) { + case 0: + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + gen_helper_flds_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + case 1: + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + gen_helper_fildl_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + case 2: + tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, + s->mem_index, MO_LEQ); + gen_helper_fldl_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp1_i64); + break; + case 3: + default: + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LESW); + gen_helper_fildl_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + } + break; + case 1: + /* XXX: the corresponding CPUID bit must be tested ! */ + switch(op >> 4) { + case 1: + gen_helper_fisttl_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + break; + case 2: + gen_helper_fisttll_ST0(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, + s->mem_index, MO_LEQ); + break; + case 3: + default: + gen_helper_fistt_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUW); + break; + } + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + switch(op >> 4) { + case 0: + gen_helper_fsts_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + break; + case 1: + gen_helper_fistl_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUL); + break; + case 2: + gen_helper_fstl_ST0(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, + s->mem_index, MO_LEQ); + break; + case 3: + default: + gen_helper_fist_ST0(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUW); + break; + } + if ((op & 7) == 3) + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + } + break; + case 0x0c: /* fldenv mem */ + gen_helper_fldenv(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); + update_fip = update_fdp = false; + break; + case 0x0d: /* fldcw mem */ + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUW); + gen_helper_fldcw(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + update_fip = update_fdp = false; + break; + case 0x0e: /* fnstenv mem */ + gen_helper_fstenv(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); + update_fip = update_fdp = false; + break; + case 0x0f: /* fnstcw mem */ + gen_helper_fnstcw(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUW); + update_fip = update_fdp = false; + break; + case 0x1d: /* fldt mem */ + gen_helper_fldt_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); + break; + case 0x1f: /* fstpt mem */ + gen_helper_fstt_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x2c: /* frstor mem */ + gen_helper_frstor(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); + update_fip = update_fdp = false; + break; + case 0x2e: /* fnsave mem */ + gen_helper_fsave(tcg_ctx, tcg_ctx->cpu_env, s->A0, tcg_const_i32(tcg_ctx, dflag - 1)); + update_fip = update_fdp = false; + break; + case 0x2f: /* fnstsw mem */ + gen_helper_fnstsw(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i32(tcg_ctx, s->tmp2_i32, s->A0, + s->mem_index, MO_LEUW); + update_fip = update_fdp = false; + break; + case 0x3c: /* fbld */ + gen_helper_fbld_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); + break; + case 0x3e: /* fbstp */ + gen_helper_fbst_ST0(tcg_ctx, tcg_ctx->cpu_env, s->A0); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x3d: /* fildll */ + tcg_gen_qemu_ld_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); + gen_helper_fildll_ST0(tcg_ctx, tcg_ctx->cpu_env, s->tmp1_i64); + break; + case 0x3f: /* fistpll */ + gen_helper_fistll_ST0(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env); + tcg_gen_qemu_st_i64(tcg_ctx, s->tmp1_i64, s->A0, s->mem_index, MO_LEQ); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + goto unknown_op; + } + + if (update_fdp) { + int last_seg = s->override >= 0 ? s->override : a.def_seg; + + tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State, + segs[last_seg].selector)); + tcg_gen_st16_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State, fpds)); + tcg_gen_st_tl(tcg_ctx, last_addr, tcg_ctx->cpu_env, + offsetof(CPUX86State, fpdp)); + } + tcg_temp_free(tcg_ctx, last_addr); + } else { + /* register float ops */ + opreg = rm; + + switch(op) { + case 0x08: /* fld sti */ + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fmov_ST0_STN(tcg_ctx, tcg_ctx->cpu_env, + tcg_const_i32(tcg_ctx, (opreg + 1) & 7)); + break; + case 0x09: /* fxchg sti */ + case 0x29: /* fxchg4 sti, undocumented op */ + case 0x39: /* fxchg7 sti, undocumented op */ + gen_helper_fxchg_ST0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + break; + case 0x0a: /* grp d9/2 */ + switch(rm) { + case 0: /* fnop */ + /* check exceptions (FreeBSD FPU probe) */ + gen_helper_fwait(tcg_ctx, tcg_ctx->cpu_env); + update_fip = false; + break; + default: + goto unknown_op; + } + break; + case 0x0c: /* grp d9/4 */ + switch(rm) { + case 0: /* fchs */ + gen_helper_fchs_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: /* fabs */ + gen_helper_fabs_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 4: /* ftst */ + gen_helper_fldz_FT0(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 5: /* fxam */ + gen_helper_fxam_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + goto unknown_op; + } + break; + case 0x0d: /* grp d9/5 */ + { + switch(rm) { + case 0: + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fld1_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fldl2t_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fldl2e_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 3: + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fldpi_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 4: + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fldlg2_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 5: + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fldln2_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 6: + gen_helper_fpush(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fldz_ST0(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + goto unknown_op; + } + } + break; + case 0x0e: /* grp d9/6 */ + switch(rm) { + case 0: /* f2xm1 */ + gen_helper_f2xm1(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: /* fyl2x */ + gen_helper_fyl2x(tcg_ctx, tcg_ctx->cpu_env); + break; + case 2: /* fptan */ + gen_helper_fptan(tcg_ctx, tcg_ctx->cpu_env); + break; + case 3: /* fpatan */ + gen_helper_fpatan(tcg_ctx, tcg_ctx->cpu_env); + break; + case 4: /* fxtract */ + gen_helper_fxtract(tcg_ctx, tcg_ctx->cpu_env); + break; + case 5: /* fprem1 */ + gen_helper_fprem1(tcg_ctx, tcg_ctx->cpu_env); + break; + case 6: /* fdecstp */ + gen_helper_fdecstp(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + case 7: /* fincstp */ + gen_helper_fincstp(tcg_ctx, tcg_ctx->cpu_env); + break; + } + break; + case 0x0f: /* grp d9/7 */ + switch(rm) { + case 0: /* fprem */ + gen_helper_fprem(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: /* fyl2xp1 */ + gen_helper_fyl2xp1(tcg_ctx, tcg_ctx->cpu_env); + break; + case 2: /* fsqrt */ + gen_helper_fsqrt(tcg_ctx, tcg_ctx->cpu_env); + break; + case 3: /* fsincos */ + gen_helper_fsincos(tcg_ctx, tcg_ctx->cpu_env); + break; + case 5: /* fscale */ + gen_helper_fscale(tcg_ctx, tcg_ctx->cpu_env); + break; + case 4: /* frndint */ + gen_helper_frndint(tcg_ctx, tcg_ctx->cpu_env); + break; + case 6: /* fsin */ + gen_helper_fsin(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + case 7: /* fcos */ + gen_helper_fcos(tcg_ctx, tcg_ctx->cpu_env); + break; + } + break; + case 0x00: case 0x01: + case 0x04: /* fxxx st, sti */ + case 0x05: /* fxxx st, sti */ + case 0x06: /* fxxx st, sti */ + case 0x07: /* fxxx st, sti */ + + case 0x20: case 0x21: + case 0x24: /* fxxx sti, st */ + case 0x25: /* fxxx sti, st */ + case 0x26: /* fxxx sti, st */ + case 0x27: /* fxxx sti, st */ + + case 0x30: case 0x31: + case 0x34: /* fxxxp sti, st */ + case 0x35: /* fxxxp sti, st */ + case 0x36: /* fxxxp sti, st */ + case 0x37: /* fxxxp sti, st */ + { + int op1; + + op1 = op & 7; + if (op >= 0x20) { + gen_helper_fp_arith_STN_ST0(tcg_ctx, op1, opreg); + if (op >= 0x30) + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + } else { + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); + } + } + break; + case 0x02: /* fcom */ + case 0x22: /* fcom2, undocumented op */ + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x03: /* fcomp */ + case 0x23: /* fcomp3, undocumented op */ + case 0x32: /* fcomp5, undocumented op */ + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x15: /* da/5 */ + switch(rm) { + case 1: /* fucompp */ + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); + gen_helper_fucom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + goto unknown_op; + } + break; + case 0x1c: + switch(rm) { + case 0: /* feni (287 only, just do nop here) */ + break; + case 1: /* fdisi (287 only, just do nop here) */ + break; + case 2: /* fclex */ + gen_helper_fclex(tcg_ctx, tcg_ctx->cpu_env); + update_fip = false; + break; + case 3: /* fninit */ + gen_helper_fninit(tcg_ctx, tcg_ctx->cpu_env); + update_fip = false; + break; + case 4: /* fsetpm (287 only, just do nop here) */ + break; + default: + goto unknown_op; + } + break; + case 0x1d: /* fucomi */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x1e: /* fcomi */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x28: /* ffree sti */ + gen_helper_ffree_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + break; + case 0x2a: /* fst sti */ + gen_helper_fmov_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + break; + case 0x2b: /* fstp sti */ + case 0x0b: /* fstp1 sti, undocumented op */ + case 0x3a: /* fstp8 sti, undocumented op */ + case 0x3b: /* fstp9 sti, undocumented op */ + gen_helper_fmov_STN_ST0(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x2c: /* fucom st(i) */ + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x2d: /* fucomp st(i) */ + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x33: /* de/3 */ + switch(rm) { + case 1: /* fcompp */ + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); + gen_helper_fcom_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + default: + goto unknown_op; + } + break; + case 0x38: /* ffreep sti, undocumented op */ + gen_helper_ffree_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x3c: /* df/4 */ + switch(rm) { + case 0: + gen_helper_fnstsw(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env); + tcg_gen_extu_i32_tl(tcg_ctx, s->T0, s->tmp2_i32); + gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); + break; + default: + goto unknown_op; + } + break; + case 0x3d: /* fucomip */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x3e: /* fcomip */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcomi_ST0_FT0(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fpop(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x10: /* fcmovxx */ + case 0x11: /* fcmovxx */ + case 0x12: /* fcmovxx */ + case 0x13: /* fcmovxx */ + + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + { + int op1; + TCGLabel *l1; + static const uint8_t fcmov_cc[8] = { + (JCC_B << 1), + (JCC_Z << 1), + (JCC_BE << 1), + (JCC_P << 1), + }; + + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); + l1 = gen_new_label(tcg_ctx); + gen_jcc1_noeob(s, op1, l1); + gen_helper_fmov_ST0_STN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_set_label(tcg_ctx, l1); + } + break; + default: + goto unknown_op; + } + } + + if (update_fip) { + tcg_gen_ld_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State, segs[R_CS].selector)); + tcg_gen_st16_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, + offsetof(CPUX86State, fpcs)); + tcg_gen_st_tl(tcg_ctx, tcg_const_tl(tcg_ctx, pc_start - s->cs_base), + tcg_ctx->cpu_env, offsetof(CPUX86State, fpip)); + } + } + break; + /************************/ + /* string ops */ + + case 0xa4: /* movsS */ + case 0xa5: + ot = mo_b_d(b, dflag); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_movs(s, ot); + } + break; + + case 0xaa: /* stosS */ + case 0xab: + ot = mo_b_d(b, dflag); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_stos(s, ot); + } + break; + case 0xac: /* lodsS */ + case 0xad: + ot = mo_b_d(b, dflag); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_lods(s, ot); + } + break; + case 0xae: /* scasS */ + case 0xaf: + ot = mo_b_d(b, dflag); + if (prefixes & PREFIX_REPNZ) { + gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + } else if (prefixes & PREFIX_REPZ) { + gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + } else { + gen_scas(s, ot); + } + break; + + case 0xa6: /* cmpsS */ + case 0xa7: + ot = mo_b_d(b, dflag); + if (prefixes & PREFIX_REPNZ) { + gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + } else if (prefixes & PREFIX_REPZ) { + gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + } else { + gen_cmps(s, ot); + } + break; + case 0x6c: /* insS */ + case 0x6d: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_ins(s, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + } + break; + case 0x6e: /* outsS */ + case 0x6f: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + svm_is_rep(prefixes) | 4); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_outs(s, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + } + break; + + /************************/ + /* port I/O */ + + case 0xe4: + case 0xe5: + ot = mo_b_d32(b, dflag); + val = x86_ldub_code(env, s); + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + gen_check_io(s, ot, pc_start - s->cs_base, + SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, val); + gen_helper_in_func(tcg_ctx, ot, s->T1, s->tmp2_i32); + gen_op_mov_reg_v(s, ot, R_EAX, s->T1); + gen_bpt_io(s, s->tmp2_i32, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + break; + case 0xe6: + case 0xe7: + ot = mo_b_d32(b, dflag); + val = x86_ldub_code(env, s); + tcg_gen_movi_tl(tcg_ctx, s->T0, val); + gen_check_io(s, ot, pc_start - s->cs_base, + svm_is_rep(prefixes)); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); + + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, val); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); + gen_helper_out_func(tcg_ctx, ot, s->tmp2_i32, s->tmp3_i32); + gen_bpt_io(s, s->tmp2_i32, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + break; + case 0xec: + case 0xed: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_in_func(tcg_ctx, ot, s->T1, s->tmp2_i32); + gen_op_mov_reg_v(s, ot, R_EAX, s->T1); + gen_bpt_io(s, s->tmp2_i32, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + break; + case 0xee: + case 0xef: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + svm_is_rep(prefixes)); + gen_op_mov_v_reg(s, ot, s->T1, R_EAX); + + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp3_i32, s->T1); + gen_helper_out_func(tcg_ctx, ot, s->tmp2_i32, s->tmp3_i32); + gen_bpt_io(s, s->tmp2_i32, ot); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + break; + + /************************/ + /* control */ + case 0xc2: /* ret im */ + val = x86_ldsw_code(env, s); + ot = gen_pop_T0(s); + gen_stack_update(s, val + (1 << ot)); + /* Note that gen_pop_T0 uses a zero-extending load. */ + gen_op_jmp_v(tcg_ctx, s->T0); + gen_bnd_jmp(s); + gen_jr(s, s->T0); + break; + case 0xc3: /* ret */ + ot = gen_pop_T0(s); + gen_pop_update(s, ot); + /* Note that gen_pop_T0 uses a zero-extending load. */ + gen_op_jmp_v(tcg_ctx, s->T0); + gen_bnd_jmp(s); + gen_jr(s, s->T0); + break; + case 0xca: /* lret im */ + val = x86_ldsw_code(env, s); + do_lret: + if (s->pe && !s->vm86) { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_lret_protected(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_i32(tcg_ctx, val)); + } else { + gen_stack_A0(s); + /* pop offset */ + gen_op_ld_v(s, dflag, s->T0, s->A0); + /* NOTE: keeping EIP updated is not a problem in case of + exception */ + gen_op_jmp_v(tcg_ctx, s->T0); + /* pop selector */ + gen_add_A0_im(s, 1 << dflag); + gen_op_ld_v(s, dflag, s->T0, s->A0); + gen_op_movl_seg_T0_vm(s, R_CS); + /* add stack offset */ + gen_stack_update(s, val + (2 << dflag)); + } + gen_eob(s); + break; + case 0xcb: /* lret */ + val = 0; + goto do_lret; + case 0xcf: /* iret */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); + if (!s->pe) { + /* real mode */ + gen_helper_iret_real(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + set_cc_op(s, CC_OP_EFLAGS); + } else if (s->vm86) { + if (s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_helper_iret_real(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + set_cc_op(s, CC_OP_EFLAGS); + } + } else { + gen_helper_iret_protected(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); + set_cc_op(s, CC_OP_EFLAGS); + } + gen_eob(s); + break; + case 0xe8: /* call im */ + { + if (dflag != MO_16) { + tval = (int32_t)insn_get(env, s, MO_32); + } else { + tval = (int16_t)insn_get(env, s, MO_16); + } + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (dflag == MO_16) { + tval &= 0xffff; + } else if (!CODE64(s)) { + tval &= 0xffffffff; + } + tcg_gen_movi_tl(tcg_ctx, s->T0, next_eip); + gen_push_v(s, s->T0); + gen_bnd_jmp(s); + gen_jmp(s, tval); + } + break; + case 0x9a: /* lcall im */ + { + unsigned int selector, offset; + + if (CODE64(s)) + goto illegal_op; + ot = dflag; + offset = insn_get(env, s, ot); + selector = insn_get(env, s, MO_16); + + tcg_gen_movi_tl(tcg_ctx, s->T0, selector); + tcg_gen_movi_tl(tcg_ctx, s->T1, offset); + } + goto do_lcall; + case 0xe9: /* jmp im */ + if (dflag != MO_16) { + tval = (int32_t)insn_get(env, s, MO_32); + } else { + tval = (int16_t)insn_get(env, s, MO_16); + } + tval += s->pc - s->cs_base; + if (dflag == MO_16) { + tval &= 0xffff; + } else if (!CODE64(s)) { + tval &= 0xffffffff; + } + gen_bnd_jmp(s); + gen_jmp(s, tval); + break; + case 0xea: /* ljmp im */ + { + unsigned int selector, offset; + + if (CODE64(s)) + goto illegal_op; + ot = dflag; + offset = insn_get(env, s, ot); + selector = insn_get(env, s, MO_16); + + tcg_gen_movi_tl(tcg_ctx, s->T0, selector); + tcg_gen_movi_tl(tcg_ctx, s->T1, offset); + } + goto do_ljmp; + case 0xeb: /* jmp Jb */ + tval = (int8_t)insn_get(env, s, MO_8); + tval += s->pc - s->cs_base; + if (dflag == MO_16) { + tval &= 0xffff; + } + gen_jmp(s, tval); + break; + case 0x70: /* jcc Jb */ + case 0x71: /* jcc Jb */ + case 0x72: /* jcc Jb */ + case 0x73: /* jcc Jb */ + case 0x74: /* jcc Jb */ + case 0x75: /* jcc Jb */ + case 0x76: /* jcc Jb */ + case 0x77: /* jcc Jb */ + case 0x78: /* jcc Jb */ + case 0x79: /* jcc Jb */ + case 0x7a: /* jcc Jb */ + case 0x7b: /* jcc Jb */ + case 0x7c: /* jcc Jb */ + case 0x7d: /* jcc Jb */ + case 0x7e: /* jcc Jb */ + case 0x7f: /* jcc Jb */ + tval = (int8_t)insn_get(env, s, MO_8); + goto do_jcc; + case 0x180: /* jcc Jv */ + case 0x181: /* jcc Jv */ + case 0x182: /* jcc Jv */ + case 0x183: /* jcc Jv */ + case 0x184: /* jcc Jv */ + case 0x185: /* jcc Jv */ + case 0x186: /* jcc Jv */ + case 0x187: /* jcc Jv */ + case 0x188: /* jcc Jv */ + case 0x189: /* jcc Jv */ + case 0x18a: /* jcc Jv */ + case 0x18b: /* jcc Jv */ + case 0x18c: /* jcc Jv */ + case 0x18d: /* jcc Jv */ + case 0x18e: /* jcc Jv */ + case 0x18f: /* jcc Jv */ + if (dflag != MO_16) { + tval = (int32_t)insn_get(env, s, MO_32); + } else { + tval = (int16_t)insn_get(env, s, MO_16); + } + do_jcc: + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (dflag == MO_16) { + tval &= 0xffff; + } + gen_bnd_jmp(s); + gen_jcc(s, b, tval, next_eip); + break; + + case 0x190: /* setcc Gv */ + case 0x191: /* setcc Gv */ + case 0x192: /* setcc Gv */ + case 0x193: /* setcc Gv */ + case 0x194: /* setcc Gv */ + case 0x195: /* setcc Gv */ + case 0x196: /* setcc Gv */ + case 0x197: /* setcc Gv */ + case 0x198: /* setcc Gv */ + case 0x199: /* setcc Gv */ + case 0x19a: /* setcc Gv */ + case 0x19b: /* setcc Gv */ + case 0x19c: /* setcc Gv */ + case 0x19d: /* setcc Gv */ + case 0x19e: /* setcc Gv */ + case 0x19f: /* setcc Gv */ + modrm = x86_ldub_code(env, s); + gen_setcc1(s, b, s->T0); + gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); + break; + case 0x140: /* cmov Gv, Ev */ + case 0x141: /* cmov Gv, Ev */ + case 0x142: /* cmov Gv, Ev */ + case 0x143: /* cmov Gv, Ev */ + case 0x144: /* cmov Gv, Ev */ + case 0x145: /* cmov Gv, Ev */ + case 0x146: /* cmov Gv, Ev */ + case 0x147: /* cmov Gv, Ev */ + case 0x148: /* cmov Gv, Ev */ + case 0x149: /* cmov Gv, Ev */ + case 0x14a: /* cmov Gv, Ev */ + case 0x14b: /* cmov Gv, Ev */ + case 0x14c: /* cmov Gv, Ev */ + case 0x14d: /* cmov Gv, Ev */ + case 0x14e: /* cmov Gv, Ev */ + case 0x14f: /* cmov Gv, Ev */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + ot = dflag; + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + gen_cmovcc1(env, s, ot, b, modrm, reg); + break; + + /************************/ + /* flags */ + case 0x9c: /* pushf */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_helper_read_eflags(tcg_ctx, s->T0, tcg_ctx->cpu_env); + gen_push_v(s, s->T0); + } + break; + case 0x9d: /* popf */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + ot = gen_pop_T0(s); + if (s->cpl == 0) { + if (dflag != MO_16) { + gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK | + IF_MASK | + IOPL_MASK))); + } else { + gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK | + IF_MASK | IOPL_MASK) + & 0xffff)); + } + } else { + if (s->cpl <= s->iopl) { + if (dflag != MO_16) { + gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, + tcg_const_i32(tcg_ctx, (TF_MASK | + AC_MASK | + ID_MASK | + NT_MASK | + IF_MASK))); + } else { + gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, + tcg_const_i32(tcg_ctx, (TF_MASK | + AC_MASK | + ID_MASK | + NT_MASK | + IF_MASK) + & 0xffff)); + } + } else { + if (dflag != MO_16) { + gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK))); + } else { + gen_helper_write_eflags(tcg_ctx, tcg_ctx->cpu_env, s->T0, + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK) + & 0xffff)); + } + } + } + gen_pop_update(s, ot); + set_cc_op(s, CC_OP_EFLAGS); + /* abort translation because TF/AC flag may change */ + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + case 0x9e: /* sahf */ + if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) + goto illegal_op; + gen_op_mov_v_reg(s, MO_8, s->T0, R_AH); + gen_compute_eflags(s); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, CC_O); + tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, s->T0); + break; + case 0x9f: /* lahf */ + if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) + goto illegal_op; + gen_compute_eflags(s); + /* Note: gen_compute_eflags() only gives the condition codes */ + tcg_gen_ori_tl(tcg_ctx, s->T0, tcg_ctx->cpu_cc_src, 0x02); + gen_op_mov_reg_v(s, MO_8, R_AH, s->T0); + break; + case 0xf5: /* cmc */ + gen_compute_eflags(s); + tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, CC_C); + break; + case 0xf8: /* clc */ + gen_compute_eflags(s); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, ~CC_C); + break; + case 0xf9: /* stc */ + gen_compute_eflags(s); + tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, CC_C); + break; + case 0xfc: /* cld */ + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, 1); + tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, df)); + break; + case 0xfd: /* std */ + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, -1); + tcg_gen_st_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_env, offsetof(CPUX86State, df)); + break; + + /************************/ + /* bit operations */ + case 0x1ba: /* bt/bts/btr/btc Gv, im */ + ot = dflag; + modrm = x86_ldub_code(env, s); + op = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + s->rip_offset = 1; + gen_lea_modrm(env, s, modrm); + if (!(s->prefix & PREFIX_LOCK)) { + gen_op_ld_v(s, ot, s->T0, s->A0); + } + } else { + gen_op_mov_v_reg(s, ot, s->T0, rm); + } + /* load shift */ + val = x86_ldub_code(env, s); + tcg_gen_movi_tl(tcg_ctx, s->T1, val); + if (op < 4) + goto unknown_op; + op -= 4; + goto bt_op; + case 0x1a3: /* bt Gv, Ev */ + op = 0; + goto do_btx; + case 0x1ab: /* bts */ + op = 1; + goto do_btx; + case 0x1b3: /* btr */ + op = 2; + goto do_btx; + case 0x1bb: /* btc */ + op = 3; + do_btx: + ot = dflag; + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + gen_op_mov_v_reg(s, MO_32, s->T1, reg); + if (mod != 3) { + AddressParts a = gen_lea_modrm_0(env, s, modrm); + /* specific case: we need to add a displacement */ + gen_exts(tcg_ctx, ot, s->T1); + tcg_gen_sari_tl(tcg_ctx, s->tmp0, s->T1, 3 + ot); + tcg_gen_shli_tl(tcg_ctx, s->tmp0, s->tmp0, ot); + tcg_gen_add_tl(tcg_ctx, s->A0, gen_lea_modrm_1(s, a), s->tmp0); + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); + if (!(s->prefix & PREFIX_LOCK)) { + gen_op_ld_v(s, ot, s->T0, s->A0); + } + } else { + gen_op_mov_v_reg(s, ot, s->T0, rm); + } + bt_op: + tcg_gen_andi_tl(tcg_ctx, s->T1, s->T1, (1 << (3 + ot)) - 1); + tcg_gen_movi_tl(tcg_ctx, s->tmp0, 1); + tcg_gen_shl_tl(tcg_ctx, s->tmp0, s->tmp0, s->T1); + if (s->prefix & PREFIX_LOCK) { + switch (op) { + case 0: /* bt */ + /* Needs no atomic ops; we surpressed the normal + memory load for LOCK above so do it now. */ + gen_op_ld_v(s, ot, s->T0, s->A0); + break; + case 1: /* bts */ + tcg_gen_atomic_fetch_or_tl(tcg_ctx, s->T0, s->A0, s->tmp0, + s->mem_index, ot | MO_LE); + break; + case 2: /* btr */ + tcg_gen_not_tl(tcg_ctx, s->tmp0, s->tmp0); + tcg_gen_atomic_fetch_and_tl(tcg_ctx, s->T0, s->A0, s->tmp0, + s->mem_index, ot | MO_LE); + break; + default: + case 3: /* btc */ + tcg_gen_atomic_fetch_xor_tl(tcg_ctx, s->T0, s->A0, s->tmp0, + s->mem_index, ot | MO_LE); + break; + } + tcg_gen_shr_tl(tcg_ctx, s->tmp4, s->T0, s->T1); + } else { + tcg_gen_shr_tl(tcg_ctx, s->tmp4, s->T0, s->T1); + switch (op) { + case 0: /* bt */ + /* Data already loaded; nothing to do. */ + break; + case 1: /* bts */ + tcg_gen_or_tl(tcg_ctx, s->T0, s->T0, s->tmp0); + break; + case 2: /* btr */ + tcg_gen_andc_tl(tcg_ctx, s->T0, s->T0, s->tmp0); + break; + default: + case 3: /* btc */ + tcg_gen_xor_tl(tcg_ctx, s->T0, s->T0, s->tmp0); + break; + } + if (op != 0) { + if (mod != 3) { + gen_op_st_v(s, ot, s->T0, s->A0); + } else { + gen_op_mov_reg_v(s, ot, rm, s->T0); + } + } + } + + /* Delay all CC updates until after the store above. Note that + C is the result of the test, Z is unchanged, and the others + are all undefined. */ + switch (s->cc_op) { + case CC_OP_MULB: + case CC_OP_MULW: + case CC_OP_MULL: + case CC_OP_MULQ: + + case CC_OP_ADDB: + case CC_OP_ADDW: + case CC_OP_ADDL: + case CC_OP_ADDQ: + + case CC_OP_ADCB: + case CC_OP_ADCW: + case CC_OP_ADCL: + case CC_OP_ADCQ: + + case CC_OP_SUBB: + case CC_OP_SUBW: + case CC_OP_SUBL: + case CC_OP_SUBQ: + + case CC_OP_SBBB: + case CC_OP_SBBW: + case CC_OP_SBBL: + case CC_OP_SBBQ: + + case CC_OP_LOGICB: + case CC_OP_LOGICW: + case CC_OP_LOGICL: + case CC_OP_LOGICQ: + + case CC_OP_INCB: + case CC_OP_INCW: + case CC_OP_INCL: + case CC_OP_INCQ: + + case CC_OP_DECB: + case CC_OP_DECW: + case CC_OP_DECL: + case CC_OP_DECQ: + + case CC_OP_SHLB: + case CC_OP_SHLW: + case CC_OP_SHLL: + case CC_OP_SHLQ: + + case CC_OP_SARB: + case CC_OP_SARW: + case CC_OP_SARL: + case CC_OP_SARQ: + + case CC_OP_BMILGB: + case CC_OP_BMILGW: + case CC_OP_BMILGL: + case CC_OP_BMILGQ: + /* Z was going to be computed from the non-zero status of CC_DST. + We can get that same Z value (and the new C value) by leaving + CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the + same width. */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->tmp4); + set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); + break; + default: + /* Otherwise, generate EFLAGS and replace the C bit. */ + gen_compute_eflags(s); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, s->tmp4, + ctz32(CC_C), 1); + break; + } + break; + case 0x1bc: /* bsf / tzcnt */ + case 0x1bd: /* bsr / lzcnt */ + ot = dflag; + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_extu(tcg_ctx, ot, s->T0); + + /* Note that lzcnt and tzcnt are in different extensions. */ + if ((prefixes & PREFIX_REPZ) + && (b & 1 + ? s->cpuid_ext3_features & CPUID_EXT3_ABM + : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { + int size = 8 << ot; + /* For lzcnt/tzcnt, C bit is defined related to the input. */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); + if (b & 1) { + /* For lzcnt, reduce the target_ulong result by the + number of zeros that we expect to find at the top. */ + tcg_gen_clzi_tl(tcg_ctx, s->T0, s->T0, TARGET_LONG_BITS); + tcg_gen_subi_tl(tcg_ctx, s->T0, s->T0, TARGET_LONG_BITS - size); + } else { + /* For tzcnt, a zero input must return the operand size. */ + tcg_gen_ctzi_tl(tcg_ctx, s->T0, s->T0, size); + } + /* For lzcnt/tzcnt, Z bit is defined related to the result. */ + gen_op_update1_cc(s); + set_cc_op(s, CC_OP_BMILGB + ot); + } else { + /* For bsr/bsf, only the Z bit is defined and it is related + to the input and not the result. */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s->T0); + set_cc_op(s, CC_OP_LOGICB + ot); + + /* ??? The manual says that the output is undefined when the + input is zero, but real hardware leaves it unchanged, and + real programs appear to depend on that. Accomplish this + by passing the output as the value to return upon zero. */ + if (b & 1) { + /* For bsr, return the bit index of the first 1 bit, + not the count of leading zeros. */ + tcg_gen_xori_tl(tcg_ctx, s->T1, tcg_ctx->cpu_regs[reg], TARGET_LONG_BITS - 1); + tcg_gen_clz_tl(tcg_ctx, s->T0, s->T0, s->T1); + tcg_gen_xori_tl(tcg_ctx, s->T0, s->T0, TARGET_LONG_BITS - 1); + } else { + tcg_gen_ctz_tl(tcg_ctx, s->T0, s->T0, tcg_ctx->cpu_regs[reg]); + } + } + gen_op_mov_reg_v(s, ot, reg, s->T0); + break; + /************************/ + /* bcd */ + case 0x27: /* daa */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_daa(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x2f: /* das */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_das(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x37: /* aaa */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_aaa(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x3f: /* aas */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_aas(tcg_ctx, tcg_ctx->cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0xd4: /* aam */ + if (CODE64(s)) + goto illegal_op; + val = x86_ldub_code(env, s); + if (val == 0) { + gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); + } else { + gen_helper_aam(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, val)); + set_cc_op(s, CC_OP_LOGICB); + } + break; + case 0xd5: /* aad */ + if (CODE64(s)) + goto illegal_op; + val = x86_ldub_code(env, s); + gen_helper_aad(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, val)); + set_cc_op(s, CC_OP_LOGICB); + break; + /************************/ + /* misc */ + case 0x90: /* nop */ + /* XXX: correct lock test for all insn */ + if (prefixes & PREFIX_LOCK) { + goto illegal_op; + } + /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ + if (REX_B(s)) { + goto do_xchg_reg_eax; + } + if (prefixes & PREFIX_REPZ) { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_pause(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + s->base.is_jmp = DISAS_NORETURN; + } + break; + case 0x9b: /* fwait */ + if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == + (HF_MP_MASK | HF_TS_MASK)) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + } else { + gen_helper_fwait(tcg_ctx, tcg_ctx->cpu_env); + } + break; + case 0xcc: /* int3 */ + gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); + break; + case 0xcd: /* int N */ + val = x86_ldub_code(env, s); + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); + } + break; + case 0xce: /* into */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_into(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + break; +#ifdef WANT_ICEBP + case 0xf1: /* icebp (undocumented, exits to external debugger) */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); + gen_debug(s, pc_start - s->cs_base); + break; +#endif + case 0xfa: /* cli */ + if (!s->vm86) { + if (s->cpl <= s->iopl) { + gen_helper_cli(tcg_ctx, tcg_ctx->cpu_env); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } else { + if (s->iopl == 3) { + gen_helper_cli(tcg_ctx, tcg_ctx->cpu_env); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } + break; + case 0xfb: /* sti */ + if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) { + gen_helper_sti(tcg_ctx, tcg_ctx->cpu_env); + /* interruptions are enabled only the first insn after sti */ + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob_inhibit_irq(s, true); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + break; + case 0x62: /* bound */ + if (CODE64(s)) + goto illegal_op; + ot = dflag; + modrm = x86_ldub_code(env, s); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_op_mov_v_reg(s, ot, s->T0, reg); + gen_lea_modrm(env, s, modrm); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + if (ot == MO_16) { + gen_helper_boundw(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp2_i32); + } else { + gen_helper_boundl(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp2_i32); + } + break; + case 0x1c8: /* bswap reg */ + case 0x1c9: /* bswap reg */ + case 0x1ca: /* bswap reg */ + case 0x1cb: /* bswap reg */ + case 0x1cc: /* bswap reg */ + case 0x1cd: /* bswap reg */ + case 0x1ce: /* bswap reg */ + case 0x1cf: /* bswap reg */ + reg = (b & 7) | REX_B(s); +#ifdef TARGET_X86_64 + if (dflag == MO_64) { + gen_op_mov_v_reg(s, MO_64, s->T0, reg); + tcg_gen_bswap64_i64(tcg_ctx, s->T0, s->T0); + gen_op_mov_reg_v(s, MO_64, reg, s->T0); + } else +#endif + { + gen_op_mov_v_reg(s, MO_32, s->T0, reg); + tcg_gen_ext32u_tl(tcg_ctx, s->T0, s->T0); + tcg_gen_bswap32_tl(tcg_ctx, s->T0, s->T0); + gen_op_mov_reg_v(s, MO_32, reg, s->T0); + } + break; + case 0xd6: /* salc */ + if (CODE64(s)) + goto illegal_op; + gen_compute_eflags_c(s, s->T0); + tcg_gen_neg_tl(tcg_ctx, s->T0, s->T0); + gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); + break; + case 0xe0: /* loopnz */ + case 0xe1: /* loopz */ + case 0xe2: /* loop */ + case 0xe3: /* jecxz */ + { + TCGLabel *l1, *l2, *l3; + + tval = (int8_t)insn_get(env, s, MO_8); + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (dflag == MO_16) { + tval &= 0xffff; + } + + l1 = gen_new_label(tcg_ctx); + l2 = gen_new_label(tcg_ctx); + l3 = gen_new_label(tcg_ctx); + b &= 3; + switch(b) { + case 0: /* loopnz */ + case 1: /* loopz */ + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); + gen_op_jz_ecx(s, s->aflag, l3); + gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); + break; + case 2: /* loop */ + gen_op_add_reg_im(s, s->aflag, R_ECX, -1); + gen_op_jnz_ecx(s, s->aflag, l1); + break; + default: + case 3: /* jcxz */ + gen_op_jz_ecx(s, s->aflag, l1); + break; + } + + gen_set_label(tcg_ctx, l3); + gen_jmp_im(s, next_eip); + tcg_gen_br(tcg_ctx, l2); + + gen_set_label(tcg_ctx, l1); + gen_jmp_im(s, tval); + gen_set_label(tcg_ctx, l2); + gen_eob(s); + } + break; + case 0x130: /* wrmsr */ + case 0x132: /* rdmsr */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + if (b & 2) { + gen_helper_rdmsr(tcg_ctx, tcg_ctx->cpu_env); + } else { + gen_helper_wrmsr(tcg_ctx, tcg_ctx->cpu_env); + } + } + break; + case 0x131: /* rdtsc */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_rdtsc(tcg_ctx, tcg_ctx->cpu_env); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + break; + case 0x133: /* rdpmc */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_rdpmc(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0x134: /* sysenter */ + /* For Intel SYSENTER is valid on 64-bit */ + if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) + goto illegal_op; + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_helper_sysenter(tcg_ctx, tcg_ctx->cpu_env, 0); + gen_eob(s); + } + break; + case 0x135: /* sysexit */ + /* For Intel SYSEXIT is valid on 64-bit */ + if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) + goto illegal_op; + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_helper_sysexit(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + gen_eob(s); + } + break; +#ifdef TARGET_X86_64 + case 0x105: /* syscall */ + /* XXX: is it usable in real mode ? */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_syscall(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + /* TF handling for the syscall insn is different. The TF bit is checked + after the syscall insn completes. This allows #DB to not be + generated after one has entered CPL0 if TF is set in FMASK. */ + gen_eob_worker(s, false, true); + break; + case 0x107: /* sysret */ + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_helper_sysret(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + /* condition codes are modified only in long mode */ + if (s->lma) { + set_cc_op(s, CC_OP_EFLAGS); + } + /* TF handling for the sysret insn is different. The TF bit is + checked after the sysret insn completes. This allows #DB to be + generated "as if" the syscall insn in userspace has just + completed. */ + gen_eob_worker(s, false, true); + } + break; +#endif + case 0x1a2: /* cpuid */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_cpuid(tcg_ctx, tcg_ctx->cpu_env); + break; + case 0xf4: /* hlt */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_hlt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + s->base.is_jmp = DISAS_NORETURN; + } + break; + case 0x100: + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* sldt */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, ldt.selector)); + ot = mod == 3 ? dflag : MO_16; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + break; + case 2: /* lldt */ + if (!s->pe || s->vm86) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_lldt(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + } + break; + case 1: /* str */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, tr.selector)); + ot = mod == 3 ? dflag : MO_16; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + break; + case 3: /* ltr */ + if (!s->pe || s->vm86) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, s->T0); + gen_helper_ltr(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + } + break; + case 4: /* verr */ + case 5: /* verw */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_update_cc_op(s); + if (op == 4) { + gen_helper_verr(tcg_ctx, tcg_ctx->cpu_env, s->T0); + } else { + gen_helper_verw(tcg_ctx, tcg_ctx->cpu_env, s->T0); + } + set_cc_op(s, CC_OP_EFLAGS); + break; + default: + goto unknown_op; + } + break; + + case 0x101: + modrm = x86_ldub_code(env, s); + switch (modrm) { + CASE_MODRM_MEM_OP(0): /* sgdt */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); + gen_lea_modrm(env, s, modrm); + tcg_gen_ld32u_tl(tcg_ctx, s->T0, + tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.limit)); + gen_op_st_v(s, MO_16, s->T0, s->A0); + gen_add_A0_im(s, 2); + tcg_gen_ld_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.base)); + if (dflag == MO_16) { + tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); + } + gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); + break; + + case 0xc8: /* monitor */ + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + tcg_gen_mov_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[R_EAX]); + gen_extu(tcg_ctx, s->aflag, s->A0); + gen_add_A0_ds_seg(s); + gen_helper_monitor(tcg_ctx, tcg_ctx->cpu_env, s->A0); + break; + + case 0xc9: /* mwait */ + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_mwait(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + gen_eob(s); + break; + + case 0xca: /* clac */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) + || s->cpl != 0) { + goto illegal_op; + } + gen_helper_clac(tcg_ctx, tcg_ctx->cpu_env); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + + case 0xcb: /* stac */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) + || s->cpl != 0) { + goto illegal_op; + } + gen_helper_stac(tcg_ctx, tcg_ctx->cpu_env); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + + CASE_MODRM_MEM_OP(1): /* sidt */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); + gen_lea_modrm(env, s, modrm); + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.limit)); + gen_op_st_v(s, MO_16, s->T0, s->A0); + gen_add_A0_im(s, 2); + tcg_gen_ld_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.base)); + if (dflag == MO_16) { + tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); + } + gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); + break; + + case 0xd0: /* xgetbv */ + if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 + || (s->prefix & (PREFIX_LOCK | PREFIX_DATA + | PREFIX_REPZ | PREFIX_REPNZ))) { + goto illegal_op; + } + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); + gen_helper_xgetbv(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s->tmp2_i32); + tcg_gen_extr_i64_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], s->tmp1_i64); + break; + + case 0xd1: /* xsetbv */ + if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 + || (s->prefix & (PREFIX_LOCK | PREFIX_DATA + | PREFIX_REPZ | PREFIX_REPNZ))) { + goto illegal_op; + } + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], + tcg_ctx->cpu_regs[R_EDX]); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); + gen_helper_xsetbv(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->tmp1_i64); + /* End TB because translation flags may change. */ + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + + case 0xd8: /* VMRUN */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) { + goto illegal_op; + } + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_vmrun(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1), + tcg_const_i32(tcg_ctx, s->pc - pc_start)); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + s->base.is_jmp = DISAS_NORETURN; + break; + + case 0xd9: /* VMMCALL */ + if (!(s->flags & HF_SVME_MASK)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_vmmcall(tcg_ctx, tcg_ctx->cpu_env); + break; + + case 0xda: /* VMLOAD */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) { + goto illegal_op; + } + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_vmload(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); + break; + + case 0xdb: /* VMSAVE */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) { + goto illegal_op; + } + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_vmsave(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); + break; + + case 0xdc: /* STGI */ + if ((!(s->flags & HF_SVME_MASK) + && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) + || !s->pe) { + goto illegal_op; + } + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_update_cc_op(s); + gen_helper_stgi(tcg_ctx, tcg_ctx->cpu_env); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + + case 0xdd: /* CLGI */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) { + goto illegal_op; + } + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_clgi(tcg_ctx, tcg_ctx->cpu_env); + break; + + case 0xde: /* SKINIT */ + if ((!(s->flags & HF_SVME_MASK) + && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) + || !s->pe) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_skinit(tcg_ctx, tcg_ctx->cpu_env); + break; + + case 0xdf: /* INVLPGA */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) { + goto illegal_op; + } + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_invlpga(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); + break; + + CASE_MODRM_MEM_OP(2): /* lgdt */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, MO_16, s->T1, s->A0); + gen_add_A0_im(s, 2); + gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); + if (dflag == MO_16) { + tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); + } + tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.base)); + tcg_gen_st32_tl(tcg_ctx, s->T1, tcg_ctx->cpu_env, offsetof(CPUX86State, gdt.limit)); + break; + + CASE_MODRM_MEM_OP(3): /* lidt */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, MO_16, s->T1, s->A0); + gen_add_A0_im(s, 2); + gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); + if (dflag == MO_16) { + tcg_gen_andi_tl(tcg_ctx, s->T0, s->T0, 0xffffff); + } + tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.base)); + tcg_gen_st32_tl(tcg_ctx, s->T1, tcg_ctx->cpu_env, offsetof(CPUX86State, idt.limit)); + break; + + CASE_MODRM_OP(4): /* smsw */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); + tcg_gen_ld_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, cr[0])); + if (CODE64(s)) { + mod = (modrm >> 6) & 3; + ot = (mod != 3 ? MO_16 : s->dflag); + } else { + ot = MO_16; + } + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + break; + case 0xee: /* rdpkru */ + if (prefixes & PREFIX_LOCK) { + goto illegal_op; + } + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); + gen_helper_rdpkru(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_env, s->tmp2_i32); + tcg_gen_extr_i64_tl(tcg_ctx, tcg_ctx->cpu_regs[R_EAX], tcg_ctx->cpu_regs[R_EDX], s->tmp1_i64); + break; + case 0xef: /* wrpkru */ + if (prefixes & PREFIX_LOCK) { + goto illegal_op; + } + tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], + tcg_ctx->cpu_regs[R_EDX]); + tcg_gen_trunc_tl_i32(tcg_ctx, s->tmp2_i32, tcg_ctx->cpu_regs[R_ECX]); + gen_helper_wrpkru(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->tmp1_i64); + break; + CASE_MODRM_OP(6): /* lmsw */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_helper_lmsw(tcg_ctx, tcg_ctx->cpu_env, s->T0); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + + CASE_MODRM_MEM_OP(7): /* invlpg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_lea_modrm(env, s, modrm); + gen_helper_invlpg(tcg_ctx, tcg_ctx->cpu_env, s->A0); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + + case 0xf8: /* swapgs */ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + tcg_gen_mov_tl(tcg_ctx, s->T0, tcg_ctx->cpu_seg_base[R_GS]); + tcg_gen_ld_tl(tcg_ctx, tcg_ctx->cpu_seg_base[R_GS], tcg_ctx->cpu_env, + offsetof(CPUX86State, kernelgsbase)); + tcg_gen_st_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, + offsetof(CPUX86State, kernelgsbase)); + } + break; + } +#endif + goto illegal_op; + + case 0xf9: /* rdtscp */ + if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_rdtscp(tcg_ctx, tcg_ctx->cpu_env); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_jmp(s, s->pc - s->cs_base); + } + break; + + default: + goto unknown_op; + } + break; + + case 0x108: /* invd */ + case 0x109: /* wbinvd */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); + /* nothing to do */ + } + break; + case 0x63: /* arpl or movslS (x86_64) */ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + int d_ot; + /* d_ot is the size of destination */ + d_ot = dflag; + + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + + if (mod == 3) { + gen_op_mov_v_reg(s, MO_32, s->T0, rm); + /* sign extend */ + if (d_ot == MO_64) { + tcg_gen_ext32s_tl(tcg_ctx, s->T0, s->T0); + } + gen_op_mov_reg_v(s, d_ot, reg, s->T0); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0); + gen_op_mov_reg_v(s, d_ot, reg, s->T0); + } + } else +#endif + { + TCGLabel *label1; + TCGv t0, t1, t2, a0; + + if (!s->pe || s->vm86) + goto illegal_op; + t0 = tcg_temp_local_new(tcg_ctx); + t1 = tcg_temp_local_new(tcg_ctx); + t2 = tcg_temp_local_new(tcg_ctx); + ot = MO_16; + modrm = x86_ldub_code(env, s); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + rm = modrm & 7; + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, t0, s->A0); + a0 = tcg_temp_local_new(tcg_ctx); + tcg_gen_mov_tl(tcg_ctx, a0, s->A0); + } else { + gen_op_mov_v_reg(s, ot, t0, rm); + a0 = NULL; + } + gen_op_mov_v_reg(s, ot, t1, reg); + tcg_gen_andi_tl(tcg_ctx, s->tmp0, t0, 3); + tcg_gen_andi_tl(tcg_ctx, t1, t1, 3); + tcg_gen_movi_tl(tcg_ctx, t2, 0); + label1 = gen_new_label(tcg_ctx); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, s->tmp0, t1, label1); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_gen_movi_tl(tcg_ctx, t2, CC_Z); + gen_set_label(tcg_ctx, label1); + if (mod != 3) { + gen_op_st_v(s, ot, t0, a0); + tcg_temp_free(tcg_ctx, a0); + } else { + gen_op_mov_reg_v(s, ot, rm, t0); + } + gen_compute_eflags(s); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, ~CC_Z); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + } + break; + case 0x102: /* lar */ + case 0x103: /* lsl */ + { + TCGLabel *label1; + TCGv t0; + if (!s->pe || s->vm86) + goto illegal_op; + ot = dflag != MO_16 ? MO_32 : MO_16; + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + t0 = tcg_temp_local_new(tcg_ctx); + gen_update_cc_op(s); + if (b == 0x102) { + gen_helper_lar(tcg_ctx, t0, tcg_ctx->cpu_env, s->T0); + } else { + gen_helper_lsl(tcg_ctx, t0, tcg_ctx->cpu_env, s->T0); + } + tcg_gen_andi_tl(tcg_ctx, s->tmp0, tcg_ctx->cpu_cc_src, CC_Z); + label1 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, s->tmp0, 0, label1); + gen_op_mov_reg_v(s, ot, reg, t0); + gen_set_label(tcg_ctx, label1); + set_cc_op(s, CC_OP_EFLAGS); + tcg_temp_free(tcg_ctx, t0); + } + break; + case 0x118: + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* prefetchnta */ + case 1: /* prefetchnt0 */ + case 2: /* prefetchnt0 */ + case 3: /* prefetchnt0 */ + if (mod == 3) + goto illegal_op; + gen_nop_modrm(env, s, modrm); + /* nothing more to do */ + break; + default: /* nop (multi byte) */ + gen_nop_modrm(env, s, modrm); + break; + } + break; + case 0x11a: + modrm = x86_ldub_code(env, s); + if (s->flags & HF_MPX_EN_MASK) { + mod = (modrm >> 6) & 3; + reg = ((modrm >> 3) & 7) | rex_r; + if (prefixes & PREFIX_REPZ) { + /* bndcl */ + if (reg >= 4 + || (prefixes & PREFIX_LOCK) + || s->aflag == MO_16) { + goto illegal_op; + } + gen_bndck(env, s, modrm, TCG_COND_LTU, tcg_ctx->cpu_bndl[reg]); + } else if (prefixes & PREFIX_REPNZ) { + /* bndcu */ + if (reg >= 4 + || (prefixes & PREFIX_LOCK) + || s->aflag == MO_16) { + goto illegal_op; + } + TCGv_i64 notu = tcg_temp_new_i64(tcg_ctx); + tcg_gen_not_i64(tcg_ctx, notu, tcg_ctx->cpu_bndu[reg]); + gen_bndck(env, s, modrm, TCG_COND_GTU, notu); + tcg_temp_free_i64(tcg_ctx, notu); + } else if (prefixes & PREFIX_DATA) { + /* bndmov -- from reg/mem */ + if (reg >= 4 || s->aflag == MO_16) { + goto illegal_op; + } + if (mod == 3) { + int reg2 = (modrm & 7) | REX_B(s); + if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + if (s->flags & HF_MPX_IU_MASK) { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndl[reg2]); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_bndu[reg2]); + } + } else { + gen_lea_modrm(env, s, modrm); + if (CODE64(s)) { + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, + s->mem_index, MO_LEQ); + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, + s->mem_index, MO_LEQ); + } else { + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, + s->mem_index, MO_LEUL); + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 4); + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, + s->mem_index, MO_LEUL); + } + /* bnd registers are now in-use */ + gen_set_hflag(s, HF_MPX_IU_MASK); + } + } else if (mod != 3) { + /* bndldx */ + AddressParts a = gen_lea_modrm_0(env, s, modrm); + if (reg >= 4 + || (prefixes & PREFIX_LOCK) + || s->aflag == MO_16 + || a.base < -1) { + goto illegal_op; + } + if (a.base >= 0) { + tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[a.base], a.disp); + } else { + tcg_gen_movi_tl(tcg_ctx, s->A0, 0); + } + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); + if (a.index >= 0) { + tcg_gen_mov_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[a.index]); + } else { + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + } + if (CODE64(s)) { + gen_helper_bndldx64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_env, s->A0, s->T0); + tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_env, + offsetof(CPUX86State, mmx_t0.MMX_Q(0))); + } else { + gen_helper_bndldx32(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_env, s->A0, s->T0); + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], tcg_ctx->cpu_bndu[reg], 32); + } + gen_set_hflag(s, HF_MPX_IU_MASK); + } + } + gen_nop_modrm(env, s, modrm); + break; + case 0x11b: + modrm = x86_ldub_code(env, s); + if (s->flags & HF_MPX_EN_MASK) { + mod = (modrm >> 6) & 3; + reg = ((modrm >> 3) & 7) | rex_r; + if (mod != 3 && (prefixes & PREFIX_REPZ)) { + /* bndmk */ + if (reg >= 4 + || (prefixes & PREFIX_LOCK) + || s->aflag == MO_16) { + goto illegal_op; + } + AddressParts a = gen_lea_modrm_0(env, s, modrm); + if (a.base >= 0) { + tcg_gen_extu_tl_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_regs[a.base]); + if (!CODE64(s)) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndl[reg]); + } + } else if (a.base == -1) { + /* no base register has lower bound of 0 */ + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], 0); + } else { + /* rip-relative generates #ud */ + goto illegal_op; + } + tcg_gen_not_tl(tcg_ctx, s->A0, gen_lea_modrm_1(s, a)); + if (!CODE64(s)) { + tcg_gen_ext32u_tl(tcg_ctx, s->A0, s->A0); + } + tcg_gen_extu_tl_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0); + /* bnd registers are now in-use */ + gen_set_hflag(s, HF_MPX_IU_MASK); + break; + } else if (prefixes & PREFIX_REPNZ) { + /* bndcn */ + if (reg >= 4 + || (prefixes & PREFIX_LOCK) + || s->aflag == MO_16) { + goto illegal_op; + } + gen_bndck(env, s, modrm, TCG_COND_GTU, tcg_ctx->cpu_bndu[reg]); + } else if (prefixes & PREFIX_DATA) { + /* bndmov -- to reg/mem */ + if (reg >= 4 || s->aflag == MO_16) { + goto illegal_op; + } + if (mod == 3) { + int reg2 = (modrm & 7) | REX_B(s); + if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + if (s->flags & HF_MPX_IU_MASK) { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg2], tcg_ctx->cpu_bndl[reg]); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg2], tcg_ctx->cpu_bndu[reg]); + } + } else { + gen_lea_modrm(env, s, modrm); + if (CODE64(s)) { + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, + s->mem_index, MO_LEQ); + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 8); + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, + s->mem_index, MO_LEQ); + } else { + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndl[reg], s->A0, + s->mem_index, MO_LEUL); + tcg_gen_addi_tl(tcg_ctx, s->A0, s->A0, 4); + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_bndu[reg], s->A0, + s->mem_index, MO_LEUL); + } + } + } else if (mod != 3) { + /* bndstx */ + AddressParts a = gen_lea_modrm_0(env, s, modrm); + if (reg >= 4 + || (prefixes & PREFIX_LOCK) + || s->aflag == MO_16 + || a.base < -1) { + goto illegal_op; + } + if (a.base >= 0) { + tcg_gen_addi_tl(tcg_ctx, s->A0, tcg_ctx->cpu_regs[a.base], a.disp); + } else { + tcg_gen_movi_tl(tcg_ctx, s->A0, 0); + } + gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); + if (a.index >= 0) { + tcg_gen_mov_tl(tcg_ctx, s->T0, tcg_ctx->cpu_regs[a.index]); + } else { + tcg_gen_movi_tl(tcg_ctx, s->T0, 0); + } + if (CODE64(s)) { + gen_helper_bndstx64(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->T0, + tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); + } else { + gen_helper_bndstx32(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->T0, + tcg_ctx->cpu_bndl[reg], tcg_ctx->cpu_bndu[reg]); + } + } + } + gen_nop_modrm(env, s, modrm); + break; + case 0x119: + case 0x11c: /* nop (multi byte) */ + case 0x11d: /* nop (multi byte) */ + case 0x11e: /* nop (multi byte) */ + case 0x11f: /* nop (multi byte) */ + modrm = x86_ldub_code(env, s); + gen_nop_modrm(env, s, modrm); + break; + case 0x120: /* mov reg, crN */ + case 0x122: /* mov crN, reg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + modrm = x86_ldub_code(env, s); + /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). + * AMD documentation (24594.pdf) and testing of + * intel 386 and 486 processors all show that the mod bits + * are assumed to be 1's, regardless of actual values. + */ + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (CODE64(s)) + ot = MO_64; + else + ot = MO_32; + if ((prefixes & PREFIX_LOCK) && (reg == 0) && + (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { + reg = 8; + } + switch(reg) { + case 0: + case 2: + case 3: + case 4: + case 8: + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + if (b & 2) { + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_op_mov_v_reg(s, ot, s->T0, rm); + gen_helper_write_crN(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, reg), + s->T0); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } else { + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_read_crN(tcg_ctx, s->T0, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, reg)); + gen_op_mov_reg_v(s, ot, rm, s->T0); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + } + } + break; + default: + goto unknown_op; + } + } + break; + case 0x121: /* mov reg, drN */ + case 0x123: /* mov drN, reg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + modrm = x86_ldub_code(env, s); + /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). + * AMD documentation (24594.pdf) and testing of + * intel 386 and 486 processors all show that the mod bits + * are assumed to be 1's, regardless of actual values. + */ + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (CODE64(s)) + ot = MO_64; + else + ot = MO_32; + if (reg >= 8) { + goto illegal_op; + } + if (b & 2) { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); + gen_op_mov_v_reg(s, ot, s->T0, rm); + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, reg); + gen_helper_set_dr(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32, s->T0); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, reg); + gen_helper_get_dr(tcg_ctx, s->T0, tcg_ctx->cpu_env, s->tmp2_i32); + gen_op_mov_reg_v(s, ot, rm, s->T0); + } + } + break; + case 0x106: /* clts */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); + gen_helper_clts(tcg_ctx, tcg_ctx->cpu_env); + /* abort block because static cpu state changed */ + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ + case 0x1c3: /* MOVNTI reg, mem */ + if (!(s->cpuid_features & CPUID_SSE2)) + goto illegal_op; + ot = mo_64_32(dflag); + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + reg = ((modrm >> 3) & 7) | rex_r; + /* generate a generic store */ + gen_ldst_modrm(env, s, modrm, ot, reg, 1); + break; + case 0x1ae: + modrm = x86_ldub_code(env, s); + switch (modrm) { + CASE_MODRM_MEM_OP(0): /* fxsave */ + if (!(s->cpuid_features & CPUID_FXSR) + || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(env, s, modrm); + gen_helper_fxsave(tcg_ctx, tcg_ctx->cpu_env, s->A0); + break; + + CASE_MODRM_MEM_OP(1): /* fxrstor */ + if (!(s->cpuid_features & CPUID_FXSR) + || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(env, s, modrm); + gen_helper_fxrstor(tcg_ctx, tcg_ctx->cpu_env, s->A0); + break; + + CASE_MODRM_MEM_OP(2): /* ldmxcsr */ + if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { + goto illegal_op; + } + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(env, s, modrm); + tcg_gen_qemu_ld_i32(tcg_ctx, s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); + gen_helper_ldmxcsr(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + break; + + CASE_MODRM_MEM_OP(3): /* stmxcsr */ + if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { + goto illegal_op; + } + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(env, s, modrm); + tcg_gen_ld32u_tl(tcg_ctx, s->T0, tcg_ctx->cpu_env, offsetof(CPUX86State, mxcsr)); + gen_op_st_v(s, MO_32, s->T0, s->A0); + break; + + CASE_MODRM_MEM_OP(4): /* xsave */ + if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 + || (prefixes & (PREFIX_LOCK | PREFIX_DATA + | PREFIX_REPZ | PREFIX_REPNZ))) { + goto illegal_op; + } + gen_lea_modrm(env, s, modrm); + tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], + tcg_ctx->cpu_regs[R_EDX]); + gen_helper_xsave(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp1_i64); + break; + + CASE_MODRM_MEM_OP(5): /* xrstor */ + if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 + || (prefixes & (PREFIX_LOCK | PREFIX_DATA + | PREFIX_REPZ | PREFIX_REPNZ))) { + goto illegal_op; + } + gen_lea_modrm(env, s, modrm); + tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], + tcg_ctx->cpu_regs[R_EDX]); + gen_helper_xrstor(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp1_i64); + /* XRSTOR is how MPX is enabled, which changes how + we translate. Thus we need to end the TB. */ + gen_update_cc_op(s); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + + CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ + if (prefixes & PREFIX_LOCK) { + goto illegal_op; + } + if (prefixes & PREFIX_DATA) { + /* clwb */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) { + goto illegal_op; + } + gen_nop_modrm(env, s, modrm); + } else { + /* xsaveopt */ + if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 + || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0 + || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) { + goto illegal_op; + } + gen_lea_modrm(env, s, modrm); + tcg_gen_concat_tl_i64(tcg_ctx, s->tmp1_i64, tcg_ctx->cpu_regs[R_EAX], + tcg_ctx->cpu_regs[R_EDX]); + gen_helper_xsaveopt(tcg_ctx, tcg_ctx->cpu_env, s->A0, s->tmp1_i64); + } + break; + + CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */ + if (prefixes & PREFIX_LOCK) { + goto illegal_op; + } + if (prefixes & PREFIX_DATA) { + /* clflushopt */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) { + goto illegal_op; + } + } else { + /* clflush */ + if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) + || !(s->cpuid_features & CPUID_CLFLUSH)) { + goto illegal_op; + } + } + gen_nop_modrm(env, s, modrm); + break; + + case 0xc0: /* rdfsbase (f3 0f ae /0) */ + case 0xc1: /* rdfsbase (f3 0f ae /0) */ + case 0xc2: /* rdfsbase (f3 0f ae /0) */ + case 0xc3: /* rdfsbase (f3 0f ae /0) */ + case 0xc4: /* rdfsbase (f3 0f ae /0) */ + case 0xc5: /* rdfsbase (f3 0f ae /0) */ + case 0xc6: /* rdfsbase (f3 0f ae /0) */ + case 0xc7: /* rdfsbase (f3 0f ae /0) */ + + case 0xc8: /* rdgsbase (f3 0f ae /1) */ + case 0xc9: /* rdgsbase (f3 0f ae /1) */ + case 0xca: /* rdgsbase (f3 0f ae /1) */ + case 0xcb: /* rdgsbase (f3 0f ae /1) */ + case 0xcc: /* rdgsbase (f3 0f ae /1) */ + case 0xcd: /* rdgsbase (f3 0f ae /1) */ + case 0xce: /* rdgsbase (f3 0f ae /1) */ + case 0xcf: /* rdgsbase (f3 0f ae /1) */ + + case 0xd0: /* wrfsbase (f3 0f ae /2) */ + case 0xd1: /* wrfsbase (f3 0f ae /2) */ + case 0xd2: /* wrfsbase (f3 0f ae /2) */ + case 0xd3: /* wrfsbase (f3 0f ae /2) */ + case 0xd4: /* wrfsbase (f3 0f ae /2) */ + case 0xd5: /* wrfsbase (f3 0f ae /2) */ + case 0xd6: /* wrfsbase (f3 0f ae /2) */ + case 0xd7: /* wrfsbase (f3 0f ae /2) */ + + case 0xd8: /* wrgsbase (f3 0f ae /3) */ + case 0xd9: /* wrgsbase (f3 0f ae /3) */ + case 0xda: /* wrgsbase (f3 0f ae /3) */ + case 0xdb: /* wrgsbase (f3 0f ae /3) */ + case 0xdc: /* wrgsbase (f3 0f ae /3) */ + case 0xdd: /* wrgsbase (f3 0f ae /3) */ + case 0xde: /* wrgsbase (f3 0f ae /3) */ + case 0xdf: /* wrgsbase (f3 0f ae /3) */ + if (CODE64(s) + && (prefixes & PREFIX_REPZ) + && !(prefixes & PREFIX_LOCK) + && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) { + TCGv base, treg, src, dst; + + /* Preserve hflags bits by testing CR4 at runtime. */ + tcg_gen_movi_i32(tcg_ctx, s->tmp2_i32, CR4_FSGSBASE_MASK); + gen_helper_cr4_testbit(tcg_ctx, tcg_ctx->cpu_env, s->tmp2_i32); + + base = tcg_ctx->cpu_seg_base[modrm & 8 ? R_GS : R_FS]; + treg = tcg_ctx->cpu_regs[(modrm & 7) | REX_B(s)]; + + if (modrm & 0x10) { + /* wr*base */ + dst = base, src = treg; + } else { + /* rd*base */ + dst = treg, src = base; + } + + if (s->dflag == MO_32) { + tcg_gen_ext32u_tl(tcg_ctx, dst, src); + } else { + tcg_gen_mov_tl(tcg_ctx, dst, src); + } + break; + } + goto unknown_op; + + case 0xf8: /* sfence / pcommit */ + if (prefixes & PREFIX_DATA) { + /* pcommit */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT) + || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + break; + } + /* fallthru */ + case 0xf9: /* sfence */ + case 0xfa: /* sfence */ + case 0xfb: /* sfence */ + case 0xfc: /* sfence */ + case 0xfd: /* sfence */ + case 0xfe: /* sfence */ + case 0xff: /* sfence */ + if (!(s->cpuid_features & CPUID_SSE) + || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + tcg_gen_mb(tcg_ctx, TCG_MO_ST_ST | TCG_BAR_SC); + break; + case 0xe8: /* lfence */ + case 0xe9: /* lfence */ + case 0xea: /* lfence */ + case 0xeb: /* lfence */ + case 0xec: /* lfence */ + case 0xed: /* lfence */ + case 0xee: /* lfence */ + case 0xef: /* lfence */ + if (!(s->cpuid_features & CPUID_SSE) + || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + tcg_gen_mb(tcg_ctx, TCG_MO_LD_LD | TCG_BAR_SC); + break; + case 0xf0: /* mfence */ + case 0xf1: /* mfence */ + case 0xf2: /* mfence */ + case 0xf3: /* mfence */ + case 0xf4: /* mfence */ + case 0xf5: /* mfence */ + case 0xf6: /* mfence */ + case 0xf7: /* mfence */ + if (!(s->cpuid_features & CPUID_SSE2) + || (prefixes & PREFIX_LOCK)) { + goto illegal_op; + } + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + break; + + default: + goto unknown_op; + } + break; + + case 0x10d: /* 3DNow! prefetch(w) */ + modrm = x86_ldub_code(env, s); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_nop_modrm(env, s, modrm); + break; + case 0x1aa: /* rsm */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); + if (!(s->flags & HF_SMM_MASK)) + goto illegal_op; + gen_update_cc_op(s); + gen_jmp_im(s, s->pc - s->cs_base); + gen_helper_rsm(tcg_ctx, tcg_ctx->cpu_env); + gen_eob(s); + break; + case 0x1b8: /* SSE4.2 popcnt */ + if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != + PREFIX_REPZ) + goto illegal_op; + if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) + goto illegal_op; + + modrm = x86_ldub_code(env, s); + reg = ((modrm >> 3) & 7) | rex_r; + + if (s->prefix & PREFIX_DATA) { + ot = MO_16; + } else { + ot = mo_64_32(dflag); + } + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_extu(tcg_ctx, ot, s->T0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s->T0); + tcg_gen_ctpop_tl(tcg_ctx, s->T0, s->T0); + gen_op_mov_reg_v(s, ot, reg, s->T0); + + set_cc_op(s, CC_OP_POPCNT); + break; + case 0x10e: + case 0x10f: + /* 3DNow! instructions, ignore prefixes */ + s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); + /* fall through */ + case 0x110: + case 0x111: + case 0x112: + case 0x113: + case 0x114: + case 0x115: + case 0x116: + case 0x117: + + case 0x128: + case 0x129: + case 0x12a: + case 0x12b: + case 0x12c: + case 0x12d: + case 0x12e: + case 0x12f: + + case 0x138: + case 0x139: + case 0x13a: + + // case 0x150 ... 0x179: + + case 0x17c: + case 0x17d: + case 0x17e: + case 0x17f: + case 0x1c2: + case 0x1c4: + case 0x1c5: + case 0x1c6: + // case 0x1d0 ... 0x1fe: + gen_sse(env, s, b, pc_start, rex_r); + break; + default: + if (b >= 0x150 && b <= 0x179) { + gen_sse(env, s, b, pc_start, rex_r); + break; + } + + if (b >= 0x1d0 && b <= 0x1fe) { + gen_sse(env, s, b, pc_start, rex_r); + break; + } + + goto unknown_op; + } + + if (insn_hook) { + // Unicorn: patch the callback to have the proper instruction size. + if (prev_op) { + // As explained further up in the function where prev_op is + // assigned, we move forward in the tail queue, so we're modifying the + // move instruction generated by gen_uc_tracecode() that contains + // the instruction size to assign the proper size (replacing 0xF1F1F1F1). + tcg_op = QTAILQ_NEXT(prev_op, link); + } else { + // this instruction is the first emulated code ever, + // so the operand is the first operand + tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); + } + tcg_op->args[1] = s->pc - pc_start; + } + + return s->pc; + + illegal_op: + gen_illegal_opcode(s); + return s->pc; + + unknown_op: + gen_unknown_opcode(env, s); + return s->pc; +} + +void tcg_x86_init(struct uc_struct *uc) +{ + static const char reg_names[CPU_NB_REGS][4] = { +#ifdef TARGET_X86_64 + [R_EAX] = "rax", + [R_EBX] = "rbx", + [R_ECX] = "rcx", + [R_EDX] = "rdx", + [R_ESI] = "rsi", + [R_EDI] = "rdi", + [R_EBP] = "rbp", + [R_ESP] = "rsp", + [8] = "r8", + [9] = "r9", + [10] = "r10", + [11] = "r11", + [12] = "r12", + [13] = "r13", + [14] = "r14", + [15] = "r15", +#else + [R_EAX] = "eax", + [R_EBX] = "ebx", + [R_ECX] = "ecx", + [R_EDX] = "edx", + [R_ESI] = "esi", + [R_EDI] = "edi", + [R_EBP] = "ebp", + [R_ESP] = "esp", +#endif + }; + static const char seg_base_names[6][8] = { + [R_CS] = "cs_base", + [R_DS] = "ds_base", + [R_ES] = "es_base", + [R_FS] = "fs_base", + [R_GS] = "gs_base", + [R_SS] = "ss_base", + }; + static const char bnd_regl_names[4][8] = { + "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb" + }; + static const char bnd_regu_names[4][8] = { + "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub" + }; + int i; + TCGContext *tcg_ctx = uc->tcg_ctx; + + tcg_ctx->cpu_cc_op = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUX86State, cc_op), "cc_op"); + tcg_ctx->cpu_cc_dst = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, cc_dst), + "cc_dst"); + tcg_ctx->cpu_cc_src = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, cc_src), + "cc_src"); + tcg_ctx->cpu_cc_src2 = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUX86State, cc_src2), + "cc_src2"); + + for (i = 0; i < CPU_NB_REGS; ++i) { + tcg_ctx->cpu_regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUX86State, regs[i]), + reg_names[i]); + } + + for (i = 0; i < 6; ++i) { + tcg_ctx->cpu_seg_base[i] + = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUX86State, segs[i].base), + seg_base_names[i]); + } + + for (i = 0; i < 4; ++i) { + tcg_ctx->cpu_bndl[i] + = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUX86State, bnd_regs[i].lb), + bnd_regl_names[i]); + tcg_ctx->cpu_bndu[i] + = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUX86State, bnd_regs[i].ub), + bnd_regu_names[i]); + } +} + +static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = cpu->uc->tcg_ctx; + CPUX86State *env = cpu->env_ptr; + uint32_t flags = dc->base.tb->flags; + target_ulong cs_base = dc->base.tb->cs_base; + + // unicorn setup + dc->uc = cpu->uc; + dc->pe = (flags >> HF_PE_SHIFT) & 1; + dc->code32 = (flags >> HF_CS32_SHIFT) & 1; + dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; + dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; + dc->f_st = 0; + dc->vm86 = (flags >> VM_SHIFT) & 1; + dc->cpl = (flags >> HF_CPL_SHIFT) & 3; + dc->iopl = (flags >> IOPL_SHIFT) & 3; + dc->tf = (flags >> TF_SHIFT) & 1; + dc->cc_op = CC_OP_DYNAMIC; + dc->cc_op_dirty = false; + dc->cs_base = cs_base; + dc->popl_esp_hack = 0; + /* select memory access functions */ + dc->mem_index = 0; + dc->mem_index = cpu_mmu_index(env, false); + dc->cpuid_features = env->features[FEAT_1_EDX]; + dc->cpuid_ext_features = env->features[FEAT_1_ECX]; + dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; + dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; + dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; + dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; +#ifdef TARGET_X86_64 + dc->lma = (flags >> HF_LMA_SHIFT) & 1; + dc->code64 = (flags >> HF_CS64_SHIFT) & 1; +#endif + dc->flags = flags; + dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled || + (flags & HF_INHIBIT_IRQ_MASK)); + /* Do not optimize repz jumps at all in icount mode, because + rep movsS instructions are execured with different paths + in !repz_opt and repz_opt modes. The first one was used + always except single step mode. And this setting + disables jumps optimization and control paths become + equivalent in run and single step modes. + Now there will be no jump optimization for repz in + record/replay modes and there will always be an + additional step for ecx=0 when icount is enabled. + */ + dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT); +#if 0 + /* check addseg logic */ + if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) + printf("ERROR addseg\n"); +#endif + + dc->T0 = tcg_temp_new(tcg_ctx); + dc->T1 = tcg_temp_new(tcg_ctx); + dc->A0 = tcg_temp_new(tcg_ctx); + + dc->tmp0 = tcg_temp_new(tcg_ctx); + dc->tmp1_i64 = tcg_temp_new_i64(tcg_ctx); + dc->tmp2_i32 = tcg_temp_new_i32(tcg_ctx); + dc->tmp3_i32 = tcg_temp_new_i32(tcg_ctx); + dc->tmp4 = tcg_temp_new(tcg_ctx); + dc->ptr0 = tcg_temp_new_ptr(tcg_ctx); + dc->ptr1 = tcg_temp_new_ptr(tcg_ctx); + dc->cc_srcT = tcg_temp_local_new(tcg_ctx); +} + +static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) +{ +} + +static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + dc->prev_pc = dc->base.pc_next; + tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, dc->cc_op); +} + +static bool i386_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, + const CPUBreakpoint *bp) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + + /* If RF is set, suppress an internally generated breakpoint. */ + int flags = dc->base.tb->flags & HF_RF_MASK ? BP_GDB : BP_ANY; + if (bp->flags & flags) { + gen_debug(dc, dc->base.pc_next - dc->cs_base); + dc->base.is_jmp = DISAS_NORETURN; + /* The address covered by the breakpoint must be included in + [tb->pc, tb->pc + tb->size) in order to for it to be + properly cleared -- thus we increment the PC here so that + the generic logic setting tb->size later does the right thing. */ + dc->base.pc_next += 1; + return true; + } else { + return false; + } +} + +static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + target_ulong pc_next; + + pc_next = disas_insn(dc, cpu); + + if (dc->tf || (dc->base.tb->flags & HF_INHIBIT_IRQ_MASK)) { + /* if single step mode, we generate only one instruction and + generate an exception */ + /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear + the flag and abort the translation to give the irqs a + chance to happen */ + dc->base.is_jmp = DISAS_TOO_MANY; + } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) + && ((pc_next & TARGET_PAGE_MASK) + != ((pc_next + TARGET_MAX_INSN_SIZE - 1) + & TARGET_PAGE_MASK) + || (pc_next & ~TARGET_PAGE_MASK) == 0)) { + /* Do not cross the boundary of the pages in icount mode, + it can cause an exception. Do it only when boundary is + crossed by the first instruction in the block. + If current instruction already crossed the bound - it's ok, + because an exception hasn't stopped this code. + */ + dc->base.is_jmp = DISAS_TOO_MANY; + } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) { + dc->base.is_jmp = DISAS_TOO_MANY; + } + + dc->base.pc_next = pc_next; +} + +static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + + if (dc->base.is_jmp == DISAS_TOO_MANY) { + gen_jmp_im(dc, dc->base.pc_next - dc->cs_base); + gen_eob(dc); + } +} + +static const TranslatorOps i386_tr_ops = { + .init_disas_context = i386_tr_init_disas_context, + .tb_start = i386_tr_tb_start, + .insn_start = i386_tr_insn_start, + .breakpoint_check = i386_tr_breakpoint_check, + .translate_insn = i386_tr_translate_insn, + .tb_stop = i386_tr_tb_stop, +}; + +/* generate intermediate code for basic block 'tb'. */ +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +{ + DisasContext dc; + + memset(&dc, 0, sizeof(dc)); + translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns); +} + +void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, + target_ulong *data) +{ + int cc_op = data[1]; + env->eip = data[0] - tb->cs_base; + if (cc_op != CC_OP_DYNAMIC) { + env->cc_op = cc_op; + } +} diff --git a/qemu/target/i386/unicorn.c b/qemu/target/i386/unicorn.c new file mode 100644 index 00000000..7b66800f --- /dev/null +++ b/qemu/target/i386/unicorn.c @@ -0,0 +1,1601 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "uc_priv.h" +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include /* needed for uc_x86_mmr */ +#include "unicorn.h" + +#define FPST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) + +#define X86_NON_CS_FLAGS (DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK) +static void load_seg_16_helper(CPUX86State *env, int seg, uint32_t selector) +{ + cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, X86_NON_CS_FLAGS); +} + + +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); +floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); + +extern void helper_wrmsr(CPUX86State *env); +extern void helper_rdmsr(CPUX86State *env); + +static void x86_set_pc(struct uc_struct *uc, uint64_t address) +{ + if (uc->mode == UC_MODE_16) { + int16_t cs = (uint16_t)X86_CPU(uc->cpu)->env.segs[R_CS].selector; + ((CPUX86State *)uc->cpu->env_ptr)->eip = address - cs*16; + } else + ((CPUX86State *)uc->cpu->env_ptr)->eip = address; +} + +static void x86_release(void *ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + X86CPU *cpu = (X86CPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } + + free(xcc->model); +} + +void x86_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + env->features[FEAT_1_EDX] = CPUID_CX8 | CPUID_CMOV | CPUID_SSE2 | CPUID_FXSR | CPUID_SSE | CPUID_CLFLUSH; + env->features[FEAT_1_ECX] = CPUID_EXT_SSSE3 | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_AES | CPUID_EXT_CX16; + env->features[FEAT_8000_0001_EDX] = CPUID_EXT2_3DNOW | CPUID_EXT2_RDTSCP; + env->features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_SKINIT | CPUID_EXT3_CR8LEG; + env->features[FEAT_7_0_EBX] = CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP; + + memset(env->regs, 0, sizeof(env->regs)); + memset(env->segs, 0, sizeof(env->segs)); + memset(env->cr, 0, sizeof(env->cr)); + + memset(&env->ldt, 0, sizeof(env->ldt)); + memset(&env->gdt, 0, sizeof(env->gdt)); + memset(&env->tr, 0, sizeof(env->tr)); + memset(&env->idt, 0, sizeof(env->idt)); + + env->eip = 0; + env->eflags = 0; + env->cc_op = CC_OP_EFLAGS; + + env->fpstt = 0; /* top of stack index */ + env->fpus = 0; + env->fpuc = 0; + memset(env->fptags, 0, sizeof(env->fptags)); /* 0 = valid, 1 = empty */ + + env->mxcsr = 0; + memset(env->xmm_regs, 0, sizeof(env->xmm_regs)); + memset(&env->xmm_t0, 0, sizeof(env->xmm_t0)); + memset(&env->mmx_t0, 0, sizeof(env->mmx_t0)); + + memset(env->ymmh_regs, 0, sizeof(env->ymmh_regs)); + + memset(env->opmask_regs, 0, sizeof(env->opmask_regs)); + memset(env->zmmh_regs, 0, sizeof(env->zmmh_regs)); + + /* sysenter registers */ + env->sysenter_cs = 0; + env->sysenter_esp = 0; + env->sysenter_eip = 0; + env->efer = 0; + env->star = 0; + + env->vm_hsave = 0; + + env->tsc = 0; + env->tsc_adjust = 0; + env->tsc_deadline = 0; + + env->mcg_status = 0; + env->msr_ia32_misc_enable = 0; + env->msr_ia32_feature_control = 0; + + env->msr_fixed_ctr_ctrl = 0; + env->msr_global_ctrl = 0; + env->msr_global_status = 0; + env->msr_global_ovf_ctrl = 0; + memset(env->msr_fixed_counters, 0, sizeof(env->msr_fixed_counters)); + memset(env->msr_gp_counters, 0, sizeof(env->msr_gp_counters)); + memset(env->msr_gp_evtsel, 0, sizeof(env->msr_gp_evtsel)); + +#ifdef TARGET_X86_64 + memset(env->hi16_zmm_regs, 0, sizeof(env->hi16_zmm_regs)); + env->lstar = 0; + env->cstar = 0; + env->fmask = 0; + env->kernelgsbase = 0; +#endif + + // TODO: reset other registers in CPUX86State qemu/target-i386/cpu.h + + // properly initialize internal setup for each mode + switch(uc->mode) { + default: + break; + case UC_MODE_16: + env->hflags = 0; + env->cr[0] = 0; + //undo the damage done by the memset of env->segs above + //for R_CS, not quite the same as x86_cpu_reset + cpu_x86_load_seg_cache(env, R_CS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | + DESC_R_MASK | DESC_A_MASK); + //remainder yields same state as x86_cpu_reset + load_seg_16_helper(env, R_DS, 0); + load_seg_16_helper(env, R_ES, 0); + load_seg_16_helper(env, R_SS, 0); + load_seg_16_helper(env, R_FS, 0); + load_seg_16_helper(env, R_GS, 0); + + break; + case UC_MODE_32: + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_OSFXSR_MASK; + cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode + break; + case UC_MODE_64: + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_LMA_MASK | HF_OSFXSR_MASK; + env->hflags &= ~(HF_ADDSEG_MASK); + env->efer |= MSR_EFER_LMA | MSR_EFER_LME; // extended mode activated + cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode + /* If we are operating in 64bit mode then add the Long Mode flag + * to the CPUID feature flag + */ + env->features[FEAT_8000_0001_EDX] |= CPUID_EXT2_LM; + break; + } +} + +static int x86_msr_read(CPUX86State *env, uc_x86_msr *msr) +{ + uint64_t ecx = env->regs[R_ECX]; + uint64_t eax = env->regs[R_EAX]; + uint64_t edx = env->regs[R_EDX]; + + env->regs[R_ECX] = msr->rid; + helper_rdmsr(env); + + msr->value = ((uint32_t)env->regs[R_EAX]) | + ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); + + env->regs[R_EAX] = eax; + env->regs[R_ECX] = ecx; + env->regs[R_EDX] = edx; + + /* The implementation doesn't throw exception or return an error if there is one, so + * we will return 0. */ + return 0; +} + +static int x86_msr_write(CPUX86State *env, uc_x86_msr *msr) +{ + uint64_t ecx = env->regs[R_ECX]; + uint64_t eax = env->regs[R_EAX]; + uint64_t edx = env->regs[R_EDX]; + + env->regs[R_ECX] = msr->rid; + env->regs[R_EAX] = (unsigned int)msr->value; + env->regs[R_EDX] = (unsigned int)(msr->value >> 32); + helper_wrmsr(env); + + env->regs[R_ECX] = ecx; + env->regs[R_EAX] = eax; + env->regs[R_EDX] = edx; + + /* The implementation doesn't throw exception or return an error if there is one, so + * we will return 0. */ + return 0; +} + +static void reg_read(CPUX86State *env, unsigned int regid, void *value, uc_mode mode) +{ + switch(regid) { + default: + break; + case UC_X86_REG_FP0: + case UC_X86_REG_FP1: + case UC_X86_REG_FP2: + case UC_X86_REG_FP3: + case UC_X86_REG_FP4: + case UC_X86_REG_FP5: + case UC_X86_REG_FP6: + case UC_X86_REG_FP7: + { + floatx80 reg = env->fpregs[regid - UC_X86_REG_FP0].d; + cpu_get_fp80(value, (uint16_t*)((char*)value+sizeof(uint64_t)), reg); + } + return; + case UC_X86_REG_FPSW: + { + uint16_t fpus = env->fpus; + fpus = fpus & ~0x3800; + fpus |= ( env->fpstt & 0x7 ) << 11; + *(uint16_t*) value = fpus; + } + return; + case UC_X86_REG_FPCW: + *(uint16_t*) value = env->fpuc; + return; + case UC_X86_REG_FPTAG: + { + #define EXPD(fp) (fp.l.upper & 0x7fff) + #define MANTD(fp) (fp.l.lower) + #define MAXEXPD 0x7fff + int fptag, exp, i; + uint64_t mant; + CPU_LDoubleU tmp; + fptag = 0; + for (i = 7; i >= 0; i--) { + fptag <<= 2; + if (env->fptags[i]) { + fptag |= 3; + } else { + tmp.d = env->fpregs[i].d; + exp = EXPD(tmp); + mant = MANTD(tmp); + if (exp == 0 && mant == 0) { + /* zero */ + fptag |= 1; + } else if (exp == 0 || exp == MAXEXPD + || (mant & (1LL << 63)) == 0) { + /* NaNs, infinity, denormal */ + fptag |= 2; + } + } + } + *(uint16_t*) value = fptag; + } + return; + case UC_X86_REG_XMM0: + case UC_X86_REG_XMM1: + case UC_X86_REG_XMM2: + case UC_X86_REG_XMM3: + case UC_X86_REG_XMM4: + case UC_X86_REG_XMM5: + case UC_X86_REG_XMM6: + case UC_X86_REG_XMM7: + { + float64 *dst = (float64*)value; + XMMReg *reg = (XMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; + dst[0] = reg->_d[0]; + dst[1] = reg->_d[1]; + return; + } + case UC_X86_REG_ST0: + case UC_X86_REG_ST1: + case UC_X86_REG_ST2: + case UC_X86_REG_ST3: + case UC_X86_REG_ST4: + case UC_X86_REG_ST5: + case UC_X86_REG_ST6: + case UC_X86_REG_ST7: + { + // value must be big enough to keep 80 bits (10 bytes) + memcpy(value, &FPST(regid - UC_X86_REG_ST0), 10); + return; + } + case UC_X86_REG_YMM0: + case UC_X86_REG_YMM1: + case UC_X86_REG_YMM2: + case UC_X86_REG_YMM3: + case UC_X86_REG_YMM4: + case UC_X86_REG_YMM5: + case UC_X86_REG_YMM6: + case UC_X86_REG_YMM7: + case UC_X86_REG_YMM8: + case UC_X86_REG_YMM9: + case UC_X86_REG_YMM10: + case UC_X86_REG_YMM11: + case UC_X86_REG_YMM12: + case UC_X86_REG_YMM13: + case UC_X86_REG_YMM14: + case UC_X86_REG_YMM15: + { + float64 *dst = (float64*)value; + XMMReg *lo_reg = (XMMReg *)&env->xmm_regs[regid - UC_X86_REG_YMM0]; + XMMReg *hi_reg = &env->ymmh_regs[regid - UC_X86_REG_YMM0]; + dst[0] = lo_reg->_d[0]; + dst[1] = lo_reg->_d[1]; + dst[2] = hi_reg->_d[0]; + dst[3] = hi_reg->_d[1]; + return; + } + } + + switch(mode) { + default: + break; + case UC_MODE_16: + switch(regid) { + default: break; + case UC_X86_REG_ES: + *(int16_t *)value = env->segs[R_ES].selector; + return; + case UC_X86_REG_SS: + *(int16_t *)value = env->segs[R_SS].selector; + return; + case UC_X86_REG_DS: + *(int16_t *)value = env->segs[R_DS].selector; + return; + case UC_X86_REG_FS: + *(int16_t *)value = env->segs[R_FS].selector; + return; + case UC_X86_REG_GS: + *(int16_t *)value = env->segs[R_GS].selector; + return; + case UC_X86_REG_FS_BASE: + *(uint32_t *)value = (uint32_t)env->segs[R_FS].base; + return; + } + // fall-thru + case UC_MODE_32: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + *(int32_t *)value = env->cr[regid - UC_X86_REG_CR0]; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + *(int32_t *)value = env->dr[regid - UC_X86_REG_DR0]; + break; + case UC_X86_REG_FLAGS: + *(int16_t *)value = cpu_compute_eflags(env); + break; + case UC_X86_REG_EFLAGS: + *(int32_t *)value = cpu_compute_eflags(env); + break; + case UC_X86_REG_EAX: + *(int32_t *)value = env->regs[R_EAX]; + break; + case UC_X86_REG_AX: + *(int16_t *)value = READ_WORD(env->regs[R_EAX]); + break; + case UC_X86_REG_AH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_EAX]); + break; + case UC_X86_REG_AL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EAX]); + break; + case UC_X86_REG_EBX: + *(int32_t *)value = env->regs[R_EBX]; + break; + case UC_X86_REG_BX: + *(int16_t *)value = READ_WORD(env->regs[R_EBX]); + break; + case UC_X86_REG_BH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_EBX]); + break; + case UC_X86_REG_BL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EBX]); + break; + case UC_X86_REG_ECX: + *(int32_t *)value = env->regs[R_ECX]; + break; + case UC_X86_REG_CX: + *(int16_t *)value = READ_WORD(env->regs[R_ECX]); + break; + case UC_X86_REG_CH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_ECX]); + break; + case UC_X86_REG_CL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_ECX]); + break; + case UC_X86_REG_EDX: + *(int32_t *)value = env->regs[R_EDX]; + break; + case UC_X86_REG_DX: + *(int16_t *)value = READ_WORD(env->regs[R_EDX]); + break; + case UC_X86_REG_DH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_EDX]); + break; + case UC_X86_REG_DL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EDX]); + break; + case UC_X86_REG_ESP: + *(int32_t *)value = env->regs[R_ESP]; + break; + case UC_X86_REG_SP: + *(int16_t *)value = READ_WORD(env->regs[R_ESP]); + break; + case UC_X86_REG_EBP: + *(int32_t *)value = env->regs[R_EBP]; + break; + case UC_X86_REG_BP: + *(int16_t *)value = READ_WORD(env->regs[R_EBP]); + break; + case UC_X86_REG_ESI: + *(int32_t *)value = env->regs[R_ESI]; + break; + case UC_X86_REG_SI: + *(int16_t *)value = READ_WORD(env->regs[R_ESI]); + break; + case UC_X86_REG_EDI: + *(int32_t *)value = env->regs[R_EDI]; + break; + case UC_X86_REG_DI: + *(int16_t *)value = READ_WORD(env->regs[R_EDI]); + break; + case UC_X86_REG_EIP: + *(int32_t *)value = env->eip; + break; + case UC_X86_REG_IP: + *(int16_t *)value = READ_WORD(env->eip); + break; + case UC_X86_REG_CS: + *(int16_t *)value = (uint16_t)env->segs[R_CS].selector; + break; + case UC_X86_REG_DS: + *(int16_t *)value = (uint16_t)env->segs[R_DS].selector; + break; + case UC_X86_REG_SS: + *(int16_t *)value = (uint16_t)env->segs[R_SS].selector; + break; + case UC_X86_REG_ES: + *(int16_t *)value = (uint16_t)env->segs[R_ES].selector; + break; + case UC_X86_REG_FS: + *(int16_t *)value = (uint16_t)env->segs[R_FS].selector; + break; + case UC_X86_REG_GS: + *(int16_t *)value = (uint16_t)env->segs[R_GS].selector; + break; + case UC_X86_REG_IDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)env->idt.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)env->idt.base; + break; + case UC_X86_REG_GDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)env->gdt.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)env->gdt.base; + break; + case UC_X86_REG_LDTR: + ((uc_x86_mmr *)value)->limit = env->ldt.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)env->ldt.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)env->ldt.selector; + ((uc_x86_mmr *)value)->flags = env->ldt.flags; + break; + case UC_X86_REG_TR: + ((uc_x86_mmr *)value)->limit = env->tr.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)env->tr.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)env->tr.selector; + ((uc_x86_mmr *)value)->flags = env->tr.flags; + break; + case UC_X86_REG_MSR: + x86_msr_read(env, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + *(uint32_t *)value = env->mxcsr; + break; + case UC_X86_REG_FS_BASE: + *(uint32_t *)value = (uint32_t)env->segs[R_FS].base; + break; + } + break; + +#ifdef TARGET_X86_64 + case UC_MODE_64: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + *(int64_t *)value = env->cr[regid - UC_X86_REG_CR0]; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + *(int64_t *)value = env->dr[regid - UC_X86_REG_DR0]; + break; + case UC_X86_REG_FLAGS: + *(int16_t *)value = cpu_compute_eflags(env); + break; + case UC_X86_REG_EFLAGS: + *(int32_t *)value = cpu_compute_eflags(env); + break; + case UC_X86_REG_RFLAGS: + *(int64_t *)value = cpu_compute_eflags(env); + break; + case UC_X86_REG_RAX: + *(uint64_t *)value = env->regs[R_EAX]; + break; + case UC_X86_REG_EAX: + *(int32_t *)value = READ_DWORD(env->regs[R_EAX]); + break; + case UC_X86_REG_AX: + *(int16_t *)value = READ_WORD(env->regs[R_EAX]); + break; + case UC_X86_REG_AH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_EAX]); + break; + case UC_X86_REG_AL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EAX]); + break; + case UC_X86_REG_RBX: + *(uint64_t *)value = env->regs[R_EBX]; + break; + case UC_X86_REG_EBX: + *(int32_t *)value = READ_DWORD(env->regs[R_EBX]); + break; + case UC_X86_REG_BX: + *(int16_t *)value = READ_WORD(env->regs[R_EBX]); + break; + case UC_X86_REG_BH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_EBX]); + break; + case UC_X86_REG_BL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EBX]); + break; + case UC_X86_REG_RCX: + *(uint64_t *)value = env->regs[R_ECX]; + break; + case UC_X86_REG_ECX: + *(int32_t *)value = READ_DWORD(env->regs[R_ECX]); + break; + case UC_X86_REG_CX: + *(int16_t *)value = READ_WORD(env->regs[R_ECX]); + break; + case UC_X86_REG_CH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_ECX]); + break; + case UC_X86_REG_CL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_ECX]); + break; + case UC_X86_REG_RDX: + *(uint64_t *)value = env->regs[R_EDX]; + break; + case UC_X86_REG_EDX: + *(int32_t *)value = READ_DWORD(env->regs[R_EDX]); + break; + case UC_X86_REG_DX: + *(int16_t *)value = READ_WORD(env->regs[R_EDX]); + break; + case UC_X86_REG_DH: + *(int8_t *)value = READ_BYTE_H(env->regs[R_EDX]); + break; + case UC_X86_REG_DL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EDX]); + break; + case UC_X86_REG_RSP: + *(uint64_t *)value = env->regs[R_ESP]; + break; + case UC_X86_REG_ESP: + *(int32_t *)value = READ_DWORD(env->regs[R_ESP]); + break; + case UC_X86_REG_SP: + *(int16_t *)value = READ_WORD(env->regs[R_ESP]); + break; + case UC_X86_REG_SPL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_ESP]); + break; + case UC_X86_REG_RBP: + *(uint64_t *)value = env->regs[R_EBP]; + break; + case UC_X86_REG_EBP: + *(int32_t *)value = READ_DWORD(env->regs[R_EBP]); + break; + case UC_X86_REG_BP: + *(int16_t *)value = READ_WORD(env->regs[R_EBP]); + break; + case UC_X86_REG_BPL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EBP]); + break; + case UC_X86_REG_RSI: + *(uint64_t *)value = env->regs[R_ESI]; + break; + case UC_X86_REG_ESI: + *(int32_t *)value = READ_DWORD(env->regs[R_ESI]); + break; + case UC_X86_REG_SI: + *(int16_t *)value = READ_WORD(env->regs[R_ESI]); + break; + case UC_X86_REG_SIL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_ESI]); + break; + case UC_X86_REG_RDI: + *(uint64_t *)value = env->regs[R_EDI]; + break; + case UC_X86_REG_EDI: + *(int32_t *)value = READ_DWORD(env->regs[R_EDI]); + break; + case UC_X86_REG_DI: + *(int16_t *)value = READ_WORD(env->regs[R_EDI]); + break; + case UC_X86_REG_DIL: + *(int8_t *)value = READ_BYTE_L(env->regs[R_EDI]); + break; + case UC_X86_REG_RIP: + *(uint64_t *)value = env->eip; + break; + case UC_X86_REG_EIP: + *(int32_t *)value = READ_DWORD(env->eip); + break; + case UC_X86_REG_IP: + *(int16_t *)value = READ_WORD(env->eip); + break; + case UC_X86_REG_CS: + *(int16_t *)value = (uint16_t)env->segs[R_CS].selector; + break; + case UC_X86_REG_DS: + *(int16_t *)value = (uint16_t)env->segs[R_DS].selector; + break; + case UC_X86_REG_SS: + *(int16_t *)value = (uint16_t)env->segs[R_SS].selector; + break; + case UC_X86_REG_ES: + *(int16_t *)value = (uint16_t)env->segs[R_ES].selector; + break; + case UC_X86_REG_FS: + *(int16_t *)value = (uint16_t)env->segs[R_FS].selector; + break; + case UC_X86_REG_GS: + *(int16_t *)value = (uint16_t)env->segs[R_GS].selector; + break; + case UC_X86_REG_R8: + *(int64_t *)value = READ_QWORD(env->regs[8]); + break; + case UC_X86_REG_R8D: + *(int32_t *)value = READ_DWORD(env->regs[8]); + break; + case UC_X86_REG_R8W: + *(int16_t *)value = READ_WORD(env->regs[8]); + break; + case UC_X86_REG_R8B: + *(int8_t *)value = READ_BYTE_L(env->regs[8]); + break; + case UC_X86_REG_R9: + *(int64_t *)value = READ_QWORD(env->regs[9]); + break; + case UC_X86_REG_R9D: + *(int32_t *)value = READ_DWORD(env->regs[9]); + break; + case UC_X86_REG_R9W: + *(int16_t *)value = READ_WORD(env->regs[9]); + break; + case UC_X86_REG_R9B: + *(int8_t *)value = READ_BYTE_L(env->regs[9]); + break; + case UC_X86_REG_R10: + *(int64_t *)value = READ_QWORD(env->regs[10]); + break; + case UC_X86_REG_R10D: + *(int32_t *)value = READ_DWORD(env->regs[10]); + break; + case UC_X86_REG_R10W: + *(int16_t *)value = READ_WORD(env->regs[10]); + break; + case UC_X86_REG_R10B: + *(int8_t *)value = READ_BYTE_L(env->regs[10]); + break; + case UC_X86_REG_R11: + *(int64_t *)value = READ_QWORD(env->regs[11]); + break; + case UC_X86_REG_R11D: + *(int32_t *)value = READ_DWORD(env->regs[11]); + break; + case UC_X86_REG_R11W: + *(int16_t *)value = READ_WORD(env->regs[11]); + break; + case UC_X86_REG_R11B: + *(int8_t *)value = READ_BYTE_L(env->regs[11]); + break; + case UC_X86_REG_R12: + *(int64_t *)value = READ_QWORD(env->regs[12]); + break; + case UC_X86_REG_R12D: + *(int32_t *)value = READ_DWORD(env->regs[12]); + break; + case UC_X86_REG_R12W: + *(int16_t *)value = READ_WORD(env->regs[12]); + break; + case UC_X86_REG_R12B: + *(int8_t *)value = READ_BYTE_L(env->regs[12]); + break; + case UC_X86_REG_R13: + *(int64_t *)value = READ_QWORD(env->regs[13]); + break; + case UC_X86_REG_R13D: + *(int32_t *)value = READ_DWORD(env->regs[13]); + break; + case UC_X86_REG_R13W: + *(int16_t *)value = READ_WORD(env->regs[13]); + break; + case UC_X86_REG_R13B: + *(int8_t *)value = READ_BYTE_L(env->regs[13]); + break; + case UC_X86_REG_R14: + *(int64_t *)value = READ_QWORD(env->regs[14]); + break; + case UC_X86_REG_R14D: + *(int32_t *)value = READ_DWORD(env->regs[14]); + break; + case UC_X86_REG_R14W: + *(int16_t *)value = READ_WORD(env->regs[14]); + break; + case UC_X86_REG_R14B: + *(int8_t *)value = READ_BYTE_L(env->regs[14]); + break; + case UC_X86_REG_R15: + *(int64_t *)value = READ_QWORD(env->regs[15]); + break; + case UC_X86_REG_R15D: + *(int32_t *)value = READ_DWORD(env->regs[15]); + break; + case UC_X86_REG_R15W: + *(int16_t *)value = READ_WORD(env->regs[15]); + break; + case UC_X86_REG_R15B: + *(int8_t *)value = READ_BYTE_L(env->regs[15]); + break; + case UC_X86_REG_IDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)env->idt.limit; + ((uc_x86_mmr *)value)->base = env->idt.base; + break; + case UC_X86_REG_GDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)env->gdt.limit; + ((uc_x86_mmr *)value)->base = env->gdt.base; + break; + case UC_X86_REG_LDTR: + ((uc_x86_mmr *)value)->limit = env->ldt.limit; + ((uc_x86_mmr *)value)->base = env->ldt.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)env->ldt.selector; + ((uc_x86_mmr *)value)->flags = env->ldt.flags; + break; + case UC_X86_REG_TR: + ((uc_x86_mmr *)value)->limit = env->tr.limit; + ((uc_x86_mmr *)value)->base = env->tr.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)env->tr.selector; + ((uc_x86_mmr *)value)->flags = env->tr.flags; + break; + case UC_X86_REG_MSR: + x86_msr_read(env, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + *(uint32_t *)value = env->mxcsr; + break; + case UC_X86_REG_XMM8: + case UC_X86_REG_XMM9: + case UC_X86_REG_XMM10: + case UC_X86_REG_XMM11: + case UC_X86_REG_XMM12: + case UC_X86_REG_XMM13: + case UC_X86_REG_XMM14: + case UC_X86_REG_XMM15: + { + float64 *dst = (float64*)value; + XMMReg *reg = (XMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; + dst[0] = reg->_d[0]; + dst[1] = reg->_d[1]; + break; + } + case UC_X86_REG_FS_BASE: + *(uint64_t *)value = (uint64_t)env->segs[R_FS].base; + break; + case UC_X86_REG_GS_BASE: + *(uint64_t *)value = (uint64_t)env->segs[R_GS].base; + break; + } + break; +#endif + } + + return; +} + +static int reg_write(CPUX86State *env, unsigned int regid, const void *value, uc_mode mode) +{ + int ret; + + switch(regid) { + default: + break; + case UC_X86_REG_FP0: + case UC_X86_REG_FP1: + case UC_X86_REG_FP2: + case UC_X86_REG_FP3: + case UC_X86_REG_FP4: + case UC_X86_REG_FP5: + case UC_X86_REG_FP6: + case UC_X86_REG_FP7: + { + uint64_t mant = *(uint64_t*) value; + uint16_t upper = *(uint16_t*) ((char*)value + sizeof(uint64_t)); + env->fpregs[regid - UC_X86_REG_FP0].d = cpu_set_fp80(mant, upper); + } + return 0; + case UC_X86_REG_FPSW: + { + uint16_t fpus = *(uint16_t*) value; + env->fpus = fpus & ~0x3800; + env->fpstt = (fpus >> 11) & 0x7; + } + return 0; + case UC_X86_REG_FPCW: + cpu_set_fpuc(env, *(uint16_t *)value); + return 0; + case UC_X86_REG_FPTAG: + { + int i; + uint16_t fptag = *(uint16_t*) value; + for (i = 0; i < 8; i++) { + env->fptags[i] = ((fptag & 3) == 3); + fptag >>= 2; + } + + return 0; + } + break; + case UC_X86_REG_XMM0: + case UC_X86_REG_XMM1: + case UC_X86_REG_XMM2: + case UC_X86_REG_XMM3: + case UC_X86_REG_XMM4: + case UC_X86_REG_XMM5: + case UC_X86_REG_XMM6: + case UC_X86_REG_XMM7: + { + float64 *src = (float64*)value; + XMMReg *reg = (XMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; + reg->_d[0] = src[0]; + reg->_d[1] = src[1]; + return 0; + } + case UC_X86_REG_ST0: + case UC_X86_REG_ST1: + case UC_X86_REG_ST2: + case UC_X86_REG_ST3: + case UC_X86_REG_ST4: + case UC_X86_REG_ST5: + case UC_X86_REG_ST6: + case UC_X86_REG_ST7: + { + // value must be big enough to keep 80 bits (10 bytes) + memcpy(&FPST(regid - UC_X86_REG_ST0), value, 10); + return 0; + } + case UC_X86_REG_YMM0: + case UC_X86_REG_YMM1: + case UC_X86_REG_YMM2: + case UC_X86_REG_YMM3: + case UC_X86_REG_YMM4: + case UC_X86_REG_YMM5: + case UC_X86_REG_YMM6: + case UC_X86_REG_YMM7: + case UC_X86_REG_YMM8: + case UC_X86_REG_YMM9: + case UC_X86_REG_YMM10: + case UC_X86_REG_YMM11: + case UC_X86_REG_YMM12: + case UC_X86_REG_YMM13: + case UC_X86_REG_YMM14: + case UC_X86_REG_YMM15: + { + float64 *src = (float64*)value; + XMMReg *lo_reg = (XMMReg *)&env->xmm_regs[regid - UC_X86_REG_YMM0]; + XMMReg *hi_reg = &env->ymmh_regs[regid - UC_X86_REG_YMM0]; + lo_reg->_d[0] = src[0]; + lo_reg->_d[1] = src[1]; + hi_reg->_d[0] = src[2]; + hi_reg->_d[1] = src[3]; + return 0; + } + } + + switch(mode) { + default: + break; + + case UC_MODE_16: + switch(regid) { + default: break; + case UC_X86_REG_ES: + load_seg_16_helper(env, R_ES, *(uint16_t *)value); + return 0; + case UC_X86_REG_SS: + load_seg_16_helper(env, R_SS, *(uint16_t *)value); + return 0; + case UC_X86_REG_DS: + load_seg_16_helper(env, R_DS, *(uint16_t *)value); + return 0; + case UC_X86_REG_FS: + load_seg_16_helper(env, R_FS, *(uint16_t *)value); + return 0; + case UC_X86_REG_GS: + load_seg_16_helper(env, R_GS, *(uint16_t *)value); + return 0; + } + // fall-thru + case UC_MODE_32: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + env->cr[regid - UC_X86_REG_CR0] = *(uint32_t *)value; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + env->dr[regid - UC_X86_REG_DR0] = *(uint32_t *)value; + break; + case UC_X86_REG_FLAGS: + cpu_load_eflags(env, *(uint16_t*)value, -1); + break; + case UC_X86_REG_EFLAGS: + cpu_load_eflags(env, *(uint32_t *)value, -1); + break; + case UC_X86_REG_EAX: + env->regs[R_EAX] = *(uint32_t *)value; + break; + case UC_X86_REG_AX: + WRITE_WORD(env->regs[R_EAX], *(uint16_t *)value); + break; + case UC_X86_REG_AH: + WRITE_BYTE_H(env->regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_AL: + WRITE_BYTE_L(env->regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_EBX: + env->regs[R_EBX] = *(uint32_t *)value; + break; + case UC_X86_REG_BX: + WRITE_WORD(env->regs[R_EBX], *(uint16_t *)value); + break; + case UC_X86_REG_BH: + WRITE_BYTE_H(env->regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_BL: + WRITE_BYTE_L(env->regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_ECX: + env->regs[R_ECX] = *(uint32_t *)value; + break; + case UC_X86_REG_CX: + WRITE_WORD(env->regs[R_ECX], *(uint16_t *)value); + break; + case UC_X86_REG_CH: + WRITE_BYTE_H(env->regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_CL: + WRITE_BYTE_L(env->regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_EDX: + env->regs[R_EDX] = *(uint32_t *)value; + break; + case UC_X86_REG_DX: + WRITE_WORD(env->regs[R_EDX], *(uint16_t *)value); + break; + case UC_X86_REG_DH: + WRITE_BYTE_H(env->regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_DL: + WRITE_BYTE_L(env->regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_ESP: + env->regs[R_ESP] = *(uint32_t *)value; + break; + case UC_X86_REG_SP: + WRITE_WORD(env->regs[R_ESP], *(uint16_t *)value); + break; + case UC_X86_REG_EBP: + env->regs[R_EBP] = *(uint32_t *)value; + break; + case UC_X86_REG_BP: + WRITE_WORD(env->regs[R_EBP], *(uint16_t *)value); + break; + case UC_X86_REG_ESI: + env->regs[R_ESI] = *(uint32_t *)value; + break; + case UC_X86_REG_SI: + WRITE_WORD(env->regs[R_ESI], *(uint16_t *)value); + break; + case UC_X86_REG_EDI: + env->regs[R_EDI] = *(uint32_t *)value; + break; + case UC_X86_REG_DI: + WRITE_WORD(env->regs[R_EDI], *(uint16_t *)value); + break; + case UC_X86_REG_EIP: + env->eip = *(uint32_t *)value; + break; + case UC_X86_REG_IP: + env->eip = *(uint16_t *)value; + break; + case UC_X86_REG_CS: + ret = uc_check_cpu_x86_load_seg(env, R_CS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_CS, *(uint16_t *)value); + break; + case UC_X86_REG_DS: + ret = uc_check_cpu_x86_load_seg(env, R_DS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_DS, *(uint16_t *)value); + break; + case UC_X86_REG_SS: + ret = uc_check_cpu_x86_load_seg(env, R_SS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_SS, *(uint16_t *)value); + break; + case UC_X86_REG_ES: + ret = uc_check_cpu_x86_load_seg(env, R_ES, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_ES, *(uint16_t *)value); + break; + case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); + break; + case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); + break; + case UC_X86_REG_IDTR: + env->idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + env->idt.base = (uint32_t)((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_GDTR: + env->gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + env->gdt.base = (uint32_t)((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_LDTR: + env->ldt.limit = ((uc_x86_mmr *)value)->limit; + env->ldt.base = (uint32_t)((uc_x86_mmr *)value)->base; + env->ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + env->ldt.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_TR: + env->tr.limit = ((uc_x86_mmr *)value)->limit; + env->tr.base = (uint32_t)((uc_x86_mmr *)value)->base; + env->tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + env->tr.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_MSR: + x86_msr_write(env, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + cpu_set_mxcsr(env, *(uint32_t *)value); + break; + /* + // Don't think base registers are a "thing" on x86 + case UC_X86_REG_FS_BASE: + env->segs[R_FS].base = *(uint32_t *)value; + continue; + case UC_X86_REG_GS_BASE: + env->segs[R_GS].base = *(uint32_t *)value; + continue; + */ + } + break; + +#ifdef TARGET_X86_64 + case UC_MODE_64: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + env->cr[regid - UC_X86_REG_CR0] = *(uint64_t *)value; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + env->dr[regid - UC_X86_REG_DR0] = *(uint64_t *)value; + break; + case UC_X86_REG_FLAGS: + cpu_load_eflags(env, *(uint16_t*)value, -1); + break; + case UC_X86_REG_EFLAGS: + cpu_load_eflags(env, *(uint32_t *)value, -1); + break; + case UC_X86_REG_RFLAGS: + cpu_load_eflags(env, *(uint64_t *)value, -1); + break; + case UC_X86_REG_RAX: + env->regs[R_EAX] = *(uint64_t *)value; + break; + case UC_X86_REG_EAX: + WRITE_DWORD(env->regs[R_EAX], *(uint32_t *)value); + break; + case UC_X86_REG_AX: + WRITE_WORD(env->regs[R_EAX], *(uint16_t *)value); + break; + case UC_X86_REG_AH: + WRITE_BYTE_H(env->regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_AL: + WRITE_BYTE_L(env->regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_RBX: + env->regs[R_EBX] = *(uint64_t *)value; + break; + case UC_X86_REG_EBX: + WRITE_DWORD(env->regs[R_EBX], *(uint32_t *)value); + break; + case UC_X86_REG_BX: + WRITE_WORD(env->regs[R_EBX], *(uint16_t *)value); + break; + case UC_X86_REG_BH: + WRITE_BYTE_H(env->regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_BL: + WRITE_BYTE_L(env->regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_RCX: + env->regs[R_ECX] = *(uint64_t *)value; + break; + case UC_X86_REG_ECX: + WRITE_DWORD(env->regs[R_ECX], *(uint32_t *)value); + break; + case UC_X86_REG_CX: + WRITE_WORD(env->regs[R_ECX], *(uint16_t *)value); + break; + case UC_X86_REG_CH: + WRITE_BYTE_H(env->regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_CL: + WRITE_BYTE_L(env->regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_RDX: + env->regs[R_EDX] = *(uint64_t *)value; + break; + case UC_X86_REG_EDX: + WRITE_DWORD(env->regs[R_EDX], *(uint32_t *)value); + break; + case UC_X86_REG_DX: + WRITE_WORD(env->regs[R_EDX], *(uint16_t *)value); + break; + case UC_X86_REG_DH: + WRITE_BYTE_H(env->regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_DL: + WRITE_BYTE_L(env->regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_RSP: + env->regs[R_ESP] = *(uint64_t *)value; + break; + case UC_X86_REG_ESP: + WRITE_DWORD(env->regs[R_ESP], *(uint32_t *)value); + break; + case UC_X86_REG_SP: + WRITE_WORD(env->regs[R_ESP], *(uint16_t *)value); + break; + case UC_X86_REG_SPL: + WRITE_BYTE_L(env->regs[R_ESP], *(uint8_t *)value); + break; + case UC_X86_REG_RBP: + env->regs[R_EBP] = *(uint64_t *)value; + break; + case UC_X86_REG_EBP: + WRITE_DWORD(env->regs[R_EBP], *(uint32_t *)value); + break; + case UC_X86_REG_BP: + WRITE_WORD(env->regs[R_EBP], *(uint16_t *)value); + break; + case UC_X86_REG_BPL: + WRITE_BYTE_L(env->regs[R_EBP], *(uint8_t *)value); + break; + case UC_X86_REG_RSI: + env->regs[R_ESI] = *(uint64_t *)value; + break; + case UC_X86_REG_ESI: + WRITE_DWORD(env->regs[R_ESI], *(uint32_t *)value); + break; + case UC_X86_REG_SI: + WRITE_WORD(env->regs[R_ESI], *(uint16_t *)value); + break; + case UC_X86_REG_SIL: + WRITE_BYTE_L(env->regs[R_ESI], *(uint8_t *)value); + break; + case UC_X86_REG_RDI: + env->regs[R_EDI] = *(uint64_t *)value; + break; + case UC_X86_REG_EDI: + WRITE_DWORD(env->regs[R_EDI], *(uint32_t *)value); + break; + case UC_X86_REG_DI: + WRITE_WORD(env->regs[R_EDI], *(uint16_t *)value); + break; + case UC_X86_REG_DIL: + WRITE_BYTE_L(env->regs[R_EDI], *(uint8_t *)value); + break; + case UC_X86_REG_RIP: + env->eip = *(uint64_t *)value; + break; + case UC_X86_REG_EIP: + env->eip = *(uint32_t *)value; + break; + case UC_X86_REG_IP: + WRITE_WORD(env->eip, *(uint16_t *)value); + break; + case UC_X86_REG_CS: + env->segs[R_CS].selector = *(uint16_t *)value; + break; + case UC_X86_REG_DS: + env->segs[R_DS].selector = *(uint16_t *)value; + break; + case UC_X86_REG_SS: + env->segs[R_SS].selector = *(uint16_t *)value; + break; + case UC_X86_REG_ES: + env->segs[R_ES].selector = *(uint16_t *)value; + break; + case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_FS, *(uint16_t *)value); + break; + case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(env, R_GS, *(uint16_t *)value); + break; + case UC_X86_REG_R8: + env->regs[8] = *(uint64_t *)value; + break; + case UC_X86_REG_R8D: + WRITE_DWORD(env->regs[8], *(uint32_t *)value); + break; + case UC_X86_REG_R8W: + WRITE_WORD(env->regs[8], *(uint16_t *)value); + break; + case UC_X86_REG_R8B: + WRITE_BYTE_L(env->regs[8], *(uint8_t *)value); + break; + case UC_X86_REG_R9: + env->regs[9] = *(uint64_t *)value; + break; + case UC_X86_REG_R9D: + WRITE_DWORD(env->regs[9], *(uint32_t *)value); + break; + case UC_X86_REG_R9W: + WRITE_WORD(env->regs[9], *(uint16_t *)value); + break; + case UC_X86_REG_R9B: + WRITE_BYTE_L(env->regs[9], *(uint8_t *)value); + break; + case UC_X86_REG_R10: + env->regs[10] = *(uint64_t *)value; + break; + case UC_X86_REG_R10D: + WRITE_DWORD(env->regs[10], *(uint32_t *)value); + break; + case UC_X86_REG_R10W: + WRITE_WORD(env->regs[10], *(uint16_t *)value); + break; + case UC_X86_REG_R10B: + WRITE_BYTE_L(env->regs[10], *(uint8_t *)value); + break; + case UC_X86_REG_R11: + env->regs[11] = *(uint64_t *)value; + break; + case UC_X86_REG_R11D: + WRITE_DWORD(env->regs[11], *(uint32_t *)value); + break; + case UC_X86_REG_R11W: + WRITE_WORD(env->regs[11], *(uint16_t *)value); + break; + case UC_X86_REG_R11B: + WRITE_BYTE_L(env->regs[11], *(uint8_t *)value); + break; + case UC_X86_REG_R12: + env->regs[12] = *(uint64_t *)value; + break; + case UC_X86_REG_R12D: + WRITE_DWORD(env->regs[12], *(uint32_t *)value); + break; + case UC_X86_REG_R12W: + WRITE_WORD(env->regs[12], *(uint16_t *)value); + break; + case UC_X86_REG_R12B: + WRITE_BYTE_L(env->regs[12], *(uint8_t *)value); + break; + case UC_X86_REG_R13: + env->regs[13] = *(uint64_t *)value; + break; + case UC_X86_REG_R13D: + WRITE_DWORD(env->regs[13], *(uint32_t *)value); + break; + case UC_X86_REG_R13W: + WRITE_WORD(env->regs[13], *(uint16_t *)value); + break; + case UC_X86_REG_R13B: + WRITE_BYTE_L(env->regs[13], *(uint8_t *)value); + break; + case UC_X86_REG_R14: + env->regs[14] = *(uint64_t *)value; + break; + case UC_X86_REG_R14D: + WRITE_DWORD(env->regs[14], *(uint32_t *)value); + break; + case UC_X86_REG_R14W: + WRITE_WORD(env->regs[14], *(uint16_t *)value); + break; + case UC_X86_REG_R14B: + WRITE_BYTE_L(env->regs[14], *(uint8_t *)value); + break; + case UC_X86_REG_R15: + env->regs[15] = *(uint64_t *)value; + break; + case UC_X86_REG_R15D: + WRITE_DWORD(env->regs[15], *(uint32_t *)value); + break; + case UC_X86_REG_R15W: + WRITE_WORD(env->regs[15], *(uint16_t *)value); + break; + case UC_X86_REG_R15B: + WRITE_BYTE_L(env->regs[15], *(uint8_t *)value); + break; + case UC_X86_REG_IDTR: + env->idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + env->idt.base = ((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_GDTR: + env->gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + env->gdt.base = ((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_LDTR: + env->ldt.limit = ((uc_x86_mmr *)value)->limit; + env->ldt.base = ((uc_x86_mmr *)value)->base; + env->ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + env->ldt.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_TR: + env->tr.limit = ((uc_x86_mmr *)value)->limit; + env->tr.base = ((uc_x86_mmr *)value)->base; + env->tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + env->tr.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_MSR: + x86_msr_write(env, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + cpu_set_mxcsr(env, *(uint32_t *)value); + break; + case UC_X86_REG_XMM8: + case UC_X86_REG_XMM9: + case UC_X86_REG_XMM10: + case UC_X86_REG_XMM11: + case UC_X86_REG_XMM12: + case UC_X86_REG_XMM13: + case UC_X86_REG_XMM14: + case UC_X86_REG_XMM15: + { + float64 *src = (float64*)value; + XMMReg *reg = (XMMReg *)&env->xmm_regs[regid - UC_X86_REG_XMM0]; + reg->_d[0] = src[0]; + reg->_d[1] = src[1]; + break; + } + case UC_X86_REG_FS_BASE: + env->segs[R_FS].base = *(uint64_t *)value; + return 0; + case UC_X86_REG_GS_BASE: + env->segs[R_GS].base = *(uint64_t *)value; + return 0; + } + break; +#endif + } + + return 0; +} + +int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUX86State *env = &(X86_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value, uc->mode); + } + + return 0; +} + +int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUX86State* env = &(X86_CPU(uc->cpu)->env); + int i; + int ret; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + ret = reg_write(env, regid, value, uc->mode); + if (ret){ + return ret; + } + switch(uc->mode) { + default: + break; + case UC_MODE_32: + switch(regid) { + default: + break; + case UC_X86_REG_EIP: + case UC_X86_REG_IP: + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + } + +#ifdef TARGET_X86_64 + case UC_MODE_64: + switch(regid) { + default: + break; + case UC_X86_REG_RIP: + case UC_X86_REG_EIP: + case UC_X86_REG_IP: + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + } +#endif + } + } + + return 0; +} + +DEFAULT_VISIBILITY +int x86_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +{ + CPUX86State *env = (CPUX86State *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value, ctx->mode); + } + + return 0; +} + +DEFAULT_VISIBILITY +int x86_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +{ + CPUX86State *env = (CPUX86State *)ctx->data; + int i; + int ret; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + ret = reg_write(env, regid, value, ctx->mode); + if (ret){ + return ret; + } + } + + return 0; +} + +static bool x86_stop_interrupt(struct uc_struct *uc, int intno) +{ + switch(intno) { + default: + return false; + case EXCP06_ILLOP: + return true; + } +} + +static bool x86_insn_hook_validate(uint32_t insn_enum) +{ + //for x86 we can only hook IN, OUT, and SYSCALL + if (insn_enum != UC_X86_INS_IN + && insn_enum != UC_X86_INS_OUT + && insn_enum != UC_X86_INS_SYSCALL + && insn_enum != UC_X86_INS_SYSENTER) { + return false; + } + return true; +} + +static int x86_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + + X86CPU *cpu; + + cpu = cpu_x86_init(uc, cpu_model); + if (cpu == NULL) { + return -1; + } + + return 0; +} + +DEFAULT_VISIBILITY +void x86_uc_init(struct uc_struct* uc) +{ + uc->reg_read = x86_reg_read; + uc->reg_write = x86_reg_write; + uc->reg_reset = x86_reg_reset; + uc->release = x86_release; + uc->set_pc = x86_set_pc; + uc->stop_interrupt = x86_stop_interrupt; + uc->insn_hook_validate = x86_insn_hook_validate; + uc->cpus_init = x86_cpus_init; + uc->cpu_context_size = offsetof(CPUX86State, retaddr); + uc_common_init(uc); +} + +/* vim: set ts=4 sts=4 sw=4 et: */ diff --git a/qemu/target-i386/unicorn.h b/qemu/target/i386/unicorn.h similarity index 62% rename from qemu/target-i386/unicorn.h rename to qemu/target/i386/unicorn.h index cb292001..b07be9d2 100644 --- a/qemu/target-i386/unicorn.h +++ b/qemu/target/i386/unicorn.h @@ -1,5 +1,6 @@ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ #ifndef UC_QEMU_TARGET_I386_H #define UC_QEMU_TARGET_I386_H @@ -7,11 +8,10 @@ // functions to read & write registers int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); +int x86_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int x86_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); void x86_reg_reset(struct uc_struct *uc); void x86_uc_init(struct uc_struct* uc); -int x86_uc_machine_init(struct uc_struct *uc); - -extern const int X86_REGS_STORAGE_SIZE; #endif diff --git a/qemu/target/i386/xsave_helper.c b/qemu/target/i386/xsave_helper.c new file mode 100644 index 00000000..818115e7 --- /dev/null +++ b/qemu/target/i386/xsave_helper.c @@ -0,0 +1,112 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" + +#include "cpu.h" + +void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf) +{ + CPUX86State *env = &cpu->env; + X86XSaveArea *xsave = buf; + + uint16_t cwd, swd, twd; + int i; + memset(xsave, 0, sizeof(X86XSaveArea)); + twd = 0; + swd = env->fpus & ~(7 << 11); + swd |= (env->fpstt & 7) << 11; + cwd = env->fpuc; + for (i = 0; i < 8; ++i) { + twd |= (!env->fptags[i]) << i; + } + xsave->legacy.fcw = cwd; + xsave->legacy.fsw = swd; + xsave->legacy.ftw = twd; + xsave->legacy.fpop = env->fpop; + xsave->legacy.fpip = env->fpip; + xsave->legacy.fpdp = env->fpdp; + memcpy(&xsave->legacy.fpregs, env->fpregs, + sizeof env->fpregs); + xsave->legacy.mxcsr = env->mxcsr; + xsave->header.xstate_bv = env->xstate_bv; + memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs, + sizeof env->bnd_regs); + xsave->bndcsr_state.bndcsr = env->bndcs_regs; + memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs, + sizeof env->opmask_regs); + + for (i = 0; i < CPU_NB_REGS; i++) { + uint8_t *xmm = xsave->legacy.xmm_regs[i]; + uint8_t *ymmh = xsave->avx_state.ymmh[i]; + uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i]; + stq_p(xmm, env->xmm_regs[i].ZMM_Q(0)); + stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1)); + stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2)); + stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3)); + stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4)); + stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5)); + stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6)); + stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7)); + } + +#ifdef TARGET_X86_64 + memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16], + 16 * sizeof env->xmm_regs[16]); + memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru); +#endif + +} + +void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf) +{ + + CPUX86State *env = &cpu->env; + const X86XSaveArea *xsave = buf; + + int i; + uint16_t cwd, swd, twd; + cwd = xsave->legacy.fcw; + swd = xsave->legacy.fsw; + twd = xsave->legacy.ftw; + env->fpop = xsave->legacy.fpop; + env->fpstt = (swd >> 11) & 7; + env->fpus = swd; + env->fpuc = cwd; + for (i = 0; i < 8; ++i) { + env->fptags[i] = !((twd >> i) & 1); + } + env->fpip = xsave->legacy.fpip; + env->fpdp = xsave->legacy.fpdp; + env->mxcsr = xsave->legacy.mxcsr; + memcpy(env->fpregs, &xsave->legacy.fpregs, + sizeof env->fpregs); + env->xstate_bv = xsave->header.xstate_bv; + memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs, + sizeof env->bnd_regs); + env->bndcs_regs = xsave->bndcsr_state.bndcsr; + memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs, + sizeof env->opmask_regs); + + for (i = 0; i < CPU_NB_REGS; i++) { + const uint8_t *xmm = xsave->legacy.xmm_regs[i]; + const uint8_t *ymmh = xsave->avx_state.ymmh[i]; + const uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i]; + env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm); + env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8); + env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh); + env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8); + env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh); + env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8); + env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16); + env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24); + } + +#ifdef TARGET_X86_64 + memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm, + 16 * sizeof env->xmm_regs[16]); + memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru); +#endif + +} diff --git a/qemu/target/m68k/cpu-param.h b/qemu/target/m68k/cpu-param.h new file mode 100644 index 00000000..06556dfb --- /dev/null +++ b/qemu/target/m68k/cpu-param.h @@ -0,0 +1,22 @@ +/* + * m68k cpu parameters for qemu. + * + * Copyright (c) 2005-2007 CodeSourcery + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef M68K_CPU_PARAM_H +#define M68K_CPU_PARAM_H 1 + +#define TARGET_LONG_BITS 32 +/* + * Coldfire Linux uses 8k pages + * and m68k linux uses 4k pages + * use the smallest one + */ +#define TARGET_PAGE_BITS 12 +#define TARGET_PHYS_ADDR_SPACE_BITS 32 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 +#define NB_MMU_MODES 2 + +#endif diff --git a/qemu/target/m68k/cpu-qom.h b/qemu/target/m68k/cpu-qom.h new file mode 100644 index 00000000..36ea6732 --- /dev/null +++ b/qemu/target/m68k/cpu-qom.h @@ -0,0 +1,46 @@ +/* + * QEMU Motorola 68k CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_M68K_CPU_QOM_H +#define QEMU_M68K_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define M68K_CPU(obj) ((M68kCPU *)obj) +#define M68K_CPU_CLASS(klass) ((M68kCPUClass *)klass) +#define M68K_CPU_GET_CLASS(obj) (&((M68kCPU *)obj)->cc) + +/* + * M68kCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A Motorola 68k CPU model. + */ +typedef struct M68kCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + void (*parent_reset)(CPUState *cpu); +} M68kCPUClass; + +typedef struct M68kCPU M68kCPU; + +#endif diff --git a/qemu/target/m68k/cpu.c b/qemu/target/m68k/cpu.c new file mode 100644 index 00000000..535995f3 --- /dev/null +++ b/qemu/target/m68k/cpu.c @@ -0,0 +1,307 @@ +/* + * QEMU Motorola 68k CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "fpu/softfloat.h" +#include "exec/exec-all.h" + +static void m68k_cpu_set_pc(CPUState *cs, vaddr value) +{ + M68kCPU *cpu = M68K_CPU(cs); + + cpu->env.pc = value; +} + +static bool m68k_cpu_has_work(CPUState *cs) +{ + return cs->interrupt_request & CPU_INTERRUPT_HARD; +} + +static void m68k_set_feature(CPUM68KState *env, int feature) +{ + env->features |= (1u << feature); +} + +static void m68k_cpu_reset(CPUState *dev) +{ + CPUState *s = CPU(dev); + M68kCPU *cpu = M68K_CPU(s); + M68kCPUClass *mcc = M68K_CPU_GET_CLASS(cpu); + CPUM68KState *env = &cpu->env; + floatx80 nan = floatx80_default_nan(NULL); + int i; + + mcc->parent_reset(dev); + + memset(env, 0, offsetof(CPUM68KState, end_reset_fields)); + cpu_m68k_set_sr(env, SR_S | SR_I); + for (i = 0; i < 8; i++) { + env->fregs[i].d = nan; + } + cpu_m68k_set_fpcr(env, 0); + env->fpsr = 0; + + /* TODO: We should set PC from the interrupt vector. */ + env->pc = 0; +} + +/* CPU models */ + +static void m5206_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); +} + +static void m68000_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_M68000); + m68k_set_feature(env, M68K_FEATURE_USP); + m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); + m68k_set_feature(env, M68K_FEATURE_MOVEP); +} + +/* common features for 68020, 68030 and 68040 */ +static void m680x0_cpu_common(CPUM68KState *env) +{ + m68k_set_feature(env, M68K_FEATURE_M68000); + m68k_set_feature(env, M68K_FEATURE_USP); + m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); + m68k_set_feature(env, M68K_FEATURE_QUAD_MULDIV); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_BCCL); + m68k_set_feature(env, M68K_FEATURE_BITFIELD); + m68k_set_feature(env, M68K_FEATURE_EXT_FULL); + m68k_set_feature(env, M68K_FEATURE_SCALED_INDEX); + m68k_set_feature(env, M68K_FEATURE_LONG_MULDIV); + m68k_set_feature(env, M68K_FEATURE_FPU); + m68k_set_feature(env, M68K_FEATURE_CAS); + m68k_set_feature(env, M68K_FEATURE_BKPT); + m68k_set_feature(env, M68K_FEATURE_RTD); + m68k_set_feature(env, M68K_FEATURE_CHK2); + m68k_set_feature(env, M68K_FEATURE_MOVEP); +} + +static void m68020_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m680x0_cpu_common(env); + m68k_set_feature(env, M68K_FEATURE_M68020); +} + +static void m68030_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m680x0_cpu_common(env); + m68k_set_feature(env, M68K_FEATURE_M68030); +} + +static void m68040_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m680x0_cpu_common(env); + m68k_set_feature(env, M68K_FEATURE_M68040); +} + +static void m68060_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_M68000); + m68k_set_feature(env, M68K_FEATURE_USP); + m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_BCCL); + m68k_set_feature(env, M68K_FEATURE_BITFIELD); + m68k_set_feature(env, M68K_FEATURE_EXT_FULL); + m68k_set_feature(env, M68K_FEATURE_SCALED_INDEX); + m68k_set_feature(env, M68K_FEATURE_LONG_MULDIV); + m68k_set_feature(env, M68K_FEATURE_FPU); + m68k_set_feature(env, M68K_FEATURE_CAS); + m68k_set_feature(env, M68K_FEATURE_BKPT); + m68k_set_feature(env, M68K_FEATURE_RTD); + m68k_set_feature(env, M68K_FEATURE_CHK2); + m68k_set_feature(env, M68K_FEATURE_M68060); +} + +static void m5208_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_CF_EMAC); + m68k_set_feature(env, M68K_FEATURE_USP); +} + +static void cfv4e_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_CF_FPU); + m68k_set_feature(env, M68K_FEATURE_CF_EMAC); + m68k_set_feature(env, M68K_FEATURE_USP); +} + +static void any_cpu_initfn(CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_CF_FPU); + /* + * MAC and EMAC are mututally exclusive, so pick EMAC. + * It's mostly backwards compatible. + */ + m68k_set_feature(env, M68K_FEATURE_CF_EMAC); + m68k_set_feature(env, M68K_FEATURE_CF_EMAC_B); + m68k_set_feature(env, M68K_FEATURE_USP); + m68k_set_feature(env, M68K_FEATURE_EXT_FULL); + m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); +} + +static void m68k_cpu_realizefn(CPUState *dev) +{ + CPUState *cs = CPU(dev); + M68kCPU *cpu = M68K_CPU(dev); + + register_m68k_insns(&cpu->env); + cpu_exec_realizefn(cs); +} + +static void m68k_cpu_initfn(struct uc_struct *uc, CPUState *obj) +{ + M68kCPU *cpu = M68K_CPU(obj); + CPUM68KState *env = &cpu->env; + + env->uc = uc; + cpu_set_cpustate_pointers(cpu); +} + +static void m68k_cpu_class_init(CPUClass *c) +{ + M68kCPUClass *mcc = M68K_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + + /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ + mcc->parent_reset = cc->reset; + /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ + cc->reset = m68k_cpu_reset; + cc->has_work = m68k_cpu_has_work; + cc->do_interrupt = m68k_cpu_do_interrupt; + cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt; + cc->set_pc = m68k_cpu_set_pc; + cc->tlb_fill = m68k_cpu_tlb_fill; + cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug; + cc->tcg_initialize = m68k_tcg_init; +} + +#define DEFINE_M68K_CPU_TYPE(cpu_model, initfn) \ + { \ + .name = cpu_model, \ + .initfn = initfn, \ + } + +struct M68kCPUInfo { + const char *name; + void (*initfn)(CPUState *obj); +}; + +static struct M68kCPUInfo m68k_cpus_type_infos[] = { + { "m68000", m68000_cpu_initfn }, + { "m68020", m68020_cpu_initfn }, + { "m68030", m68030_cpu_initfn }, + { "m68040", m68040_cpu_initfn }, + { "m68060", m68060_cpu_initfn }, + { "m5206", m5206_cpu_initfn }, + { "m5208", m5208_cpu_initfn }, + { "cfv4e", cfv4e_cpu_initfn }, + { "any", any_cpu_initfn }, +}; + +M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model) +{ + M68kCPU *cpu; + CPUState *cs; + CPUClass *cc; + int i; + + if (cpu_model == NULL) { + cpu_model = "cfv4e"; + } + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = cs; + + cpu_class_init(uc, cc); + + m68k_cpu_class_init(cc); + + cpu_common_initfn(uc, cs); + + m68k_cpu_initfn(uc, cs); + + for (i = 0; i < ARRAY_SIZE(m68k_cpus_type_infos); i++) { + if (strcasecmp(cpu_model, m68k_cpus_type_infos[i].name) == 0) { + m68k_cpus_type_infos[i].initfn(cs); + break; + } + } + + m68k_cpu_realizefn(cs); + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target/m68k/cpu.h b/qemu/target/m68k/cpu.h new file mode 100644 index 00000000..f137e9a5 --- /dev/null +++ b/qemu/target/m68k/cpu.h @@ -0,0 +1,563 @@ +/* + * m68k virtual CPU header + * + * Copyright (c) 2005-2007 CodeSourcery + * Written by Paul Brook + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef M68K_CPU_H +#define M68K_CPU_H + +#include "exec/cpu-defs.h" +#include "cpu-qom.h" + +#define OS_BYTE 0 +#define OS_WORD 1 +#define OS_LONG 2 +#define OS_SINGLE 3 +#define OS_DOUBLE 4 +#define OS_EXTENDED 5 +#define OS_PACKED 6 +#define OS_UNSIZED 7 + +#define MAX_QREGS 32 + +#define EXCP_ACCESS 2 /* Access (MMU) error. */ +#define EXCP_ADDRESS 3 /* Address error. */ +#define EXCP_ILLEGAL 4 /* Illegal instruction. */ +#define EXCP_DIV0 5 /* Divide by zero */ +#define EXCP_CHK 6 /* CHK, CHK2 Instructions */ +#define EXCP_TRAPCC 7 /* FTRAPcc, TRAPcc, TRAPV Instructions */ +#define EXCP_PRIVILEGE 8 /* Privilege violation. */ +#define EXCP_TRACE 9 +#define EXCP_LINEA 10 /* Unimplemented line-A (MAC) opcode. */ +#define EXCP_LINEF 11 /* Unimplemented line-F (FPU) opcode. */ +#define EXCP_DEBUGNBP 12 /* Non-breakpoint debug interrupt. */ +#define EXCP_DEBEGBP 13 /* Breakpoint debug interrupt. */ +#define EXCP_FORMAT 14 /* RTE format error. */ +#define EXCP_UNINITIALIZED 15 +#define EXCP_SPURIOUS 24 /* Spurious interrupt */ +#define EXCP_INT_LEVEL_1 25 /* Level 1 Interrupt autovector */ +#define EXCP_INT_LEVEL_7 31 /* Level 7 Interrupt autovector */ +#define EXCP_TRAP0 32 /* User trap #0. */ +#define EXCP_TRAP15 47 /* User trap #15. */ +#define EXCP_FP_BSUN 48 /* Branch Set on Unordered */ +#define EXCP_FP_INEX 49 /* Inexact result */ +#define EXCP_FP_DZ 50 /* Divide by Zero */ +#define EXCP_FP_UNFL 51 /* Underflow */ +#define EXCP_FP_OPERR 52 /* Operand Error */ +#define EXCP_FP_OVFL 53 /* Overflow */ +#define EXCP_FP_SNAN 54 /* Signaling Not-A-Number */ +#define EXCP_FP_UNIMP 55 /* Unimplemented Data type */ +#define EXCP_MMU_CONF 56 /* MMU Configuration Error */ +#define EXCP_MMU_ILLEGAL 57 /* MMU Illegal Operation Error */ +#define EXCP_MMU_ACCESS 58 /* MMU Access Level Violation Error */ + +#define EXCP_RTE 0x100 +#define EXCP_HALT_INSN 0x101 + +#define M68K_DTTR0 0 +#define M68K_DTTR1 1 +#define M68K_ITTR0 2 +#define M68K_ITTR1 3 + +#define M68K_MAX_TTR 2 +#define TTR(type, index) ttr[((type & ACCESS_CODE) == ACCESS_CODE) * 2 + index] + +#define TARGET_INSN_START_EXTRA_WORDS 1 + +typedef CPU_LDoubleU FPReg; + +typedef struct CPUM68KState { + uint32_t dregs[8]; + uint32_t aregs[8]; + uint32_t pc; + uint32_t sr; + + /* SSP and USP. The current_sp is stored in aregs[7], the other here. */ + int current_sp; + uint32_t sp[3]; + + /* Condition flags. */ + uint32_t cc_op; + uint32_t cc_x; /* always 0/1 */ + uint32_t cc_n; /* in bit 31 (i.e. negative) */ + uint32_t cc_v; /* in bit 31, unused, or computed from cc_n and cc_v */ + uint32_t cc_c; /* either 0/1, unused, or computed from cc_n and cc_v */ + uint32_t cc_z; /* == 0 or unused */ + + FPReg fregs[8]; + FPReg fp_result; + uint32_t fpcr; + uint32_t fpsr; + float_status fp_status; + + uint64_t mactmp; + /* + * EMAC Hardware deals with 48-bit values composed of one 32-bit and + * two 8-bit parts. We store a single 64-bit value and + * rearrange/extend this when changing modes. + */ + uint64_t macc[4]; + uint32_t macsr; + uint32_t mac_mask; + + /* MMU status. */ + struct { + uint32_t ar; + uint32_t ssw; + /* 68040 */ + uint16_t tcr; + uint32_t urp; + uint32_t srp; + bool fault; + uint32_t ttr[4]; + uint32_t mmusr; + } mmu; + + /* Control registers. */ + uint32_t vbr; + uint32_t mbar; + uint32_t rambar0; + uint32_t cacr; + uint32_t sfc; + uint32_t dfc; + + int pending_vector; + int pending_level; + + uint32_t qregs[MAX_QREGS]; + + /* Fields up to this point are cleared by a CPU reset */ + int end_reset_fields; + + /* Fields from here on are preserved across CPU reset. */ + uint32_t features; + + // Unicorn engine + struct uc_struct *uc; +} CPUM68KState; + +/* + * M68kCPU: + * @env: #CPUM68KState + * + * A Motorola 68k CPU. + */ +struct M68kCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUM68KState env; + + struct M68kCPUClass cc; +}; + + +void m68k_cpu_do_interrupt(CPUState *cpu); +bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req); +hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + +void m68k_tcg_init(struct uc_struct *uc); +/* + * you can call this signal handler from your SIGBUS and SIGSEGV + * signal handlers to inform the virtual CPU of exceptions. non zero + * is returned if the signal was handled by the virtual CPU. + */ +int cpu_m68k_signal_handler(int host_signum, void *pinfo, + void *puc); +uint32_t cpu_m68k_get_ccr(CPUM68KState *env); +void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t); +void cpu_m68k_set_sr(CPUM68KState *env, uint32_t); +void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val); + + +/* + * Instead of computing the condition codes after each m68k instruction, + * QEMU just stores one operand (called CC_SRC), the result + * (called CC_DEST) and the type of operation (called CC_OP). When the + * condition codes are needed, the condition codes can be calculated + * using this information. Condition codes are not generated if they + * are only needed for conditional branches. + */ +typedef enum { + /* Translator only -- use env->cc_op. */ + CC_OP_DYNAMIC, + + /* Each flag bit computed into cc_[xcnvz]. */ + CC_OP_FLAGS, + + /* X in cc_x, C = X, N in cc_n, Z in cc_n, V via cc_n/cc_v. */ + CC_OP_ADDB, CC_OP_ADDW, CC_OP_ADDL, + CC_OP_SUBB, CC_OP_SUBW, CC_OP_SUBL, + + /* X in cc_x, {N,Z,C,V} via cc_n/cc_v. */ + CC_OP_CMPB, CC_OP_CMPW, CC_OP_CMPL, + + /* X in cc_x, C = 0, V = 0, N in cc_n, Z in cc_n. */ + CC_OP_LOGIC, + + CC_OP_NB +} CCOp; + +#define CCF_C 0x01 +#define CCF_V 0x02 +#define CCF_Z 0x04 +#define CCF_N 0x08 +#define CCF_X 0x10 + +#define SR_I_SHIFT 8 +#define SR_I 0x0700 +#define SR_M 0x1000 +#define SR_S 0x2000 +#define SR_T_SHIFT 14 +#define SR_T 0xc000 + +#define M68K_SSP 0 +#define M68K_USP 1 +#define M68K_ISP 2 + +/* bits for 68040 special status word */ +#define M68K_CP_040 0x8000 +#define M68K_CU_040 0x4000 +#define M68K_CT_040 0x2000 +#define M68K_CM_040 0x1000 +#define M68K_MA_040 0x0800 +#define M68K_ATC_040 0x0400 +#define M68K_LK_040 0x0200 +#define M68K_RW_040 0x0100 +#define M68K_SIZ_040 0x0060 +#define M68K_TT_040 0x0018 +#define M68K_TM_040 0x0007 + +#define M68K_TM_040_DATA 0x0001 +#define M68K_TM_040_CODE 0x0002 +#define M68K_TM_040_SUPER 0x0004 + +/* bits for 68040 write back status word */ +#define M68K_WBV_040 0x80 +#define M68K_WBSIZ_040 0x60 +#define M68K_WBBYT_040 0x20 +#define M68K_WBWRD_040 0x40 +#define M68K_WBLNG_040 0x00 +#define M68K_WBTT_040 0x18 +#define M68K_WBTM_040 0x07 + +/* bus access size codes */ +#define M68K_BA_SIZE_MASK 0x60 +#define M68K_BA_SIZE_BYTE 0x20 +#define M68K_BA_SIZE_WORD 0x40 +#define M68K_BA_SIZE_LONG 0x00 +#define M68K_BA_SIZE_LINE 0x60 + +/* bus access transfer type codes */ +#define M68K_BA_TT_MOVE16 0x08 + +/* bits for 68040 MMU status register (mmusr) */ +#define M68K_MMU_B_040 0x0800 +#define M68K_MMU_G_040 0x0400 +#define M68K_MMU_U1_040 0x0200 +#define M68K_MMU_U0_040 0x0100 +#define M68K_MMU_S_040 0x0080 +#define M68K_MMU_CM_040 0x0060 +#define M68K_MMU_M_040 0x0010 +#define M68K_MMU_WP_040 0x0004 +#define M68K_MMU_T_040 0x0002 +#define M68K_MMU_R_040 0x0001 + +#define M68K_MMU_SR_MASK_040 (M68K_MMU_G_040 | M68K_MMU_U1_040 | \ + M68K_MMU_U0_040 | M68K_MMU_S_040 | \ + M68K_MMU_CM_040 | M68K_MMU_M_040 | \ + M68K_MMU_WP_040) + +/* bits for 68040 MMU Translation Control Register */ +#define M68K_TCR_ENABLED 0x8000 +#define M68K_TCR_PAGE_8K 0x4000 + +/* bits for 68040 MMU Table Descriptor / Page Descriptor / TTR */ +#define M68K_DESC_WRITEPROT 0x00000004 +#define M68K_DESC_USED 0x00000008 +#define M68K_DESC_MODIFIED 0x00000010 +#define M68K_DESC_CACHEMODE 0x00000060 +#define M68K_DESC_CM_WRTHRU 0x00000000 +#define M68K_DESC_CM_COPYBK 0x00000020 +#define M68K_DESC_CM_SERIAL 0x00000040 +#define M68K_DESC_CM_NCACHE 0x00000060 +#define M68K_DESC_SUPERONLY 0x00000080 +#define M68K_DESC_USERATTR 0x00000300 +#define M68K_DESC_USERATTR_SHIFT 8 +#define M68K_DESC_GLOBAL 0x00000400 +#define M68K_DESC_URESERVED 0x00000800 + +#define M68K_ROOT_POINTER_ENTRIES 128 +#define M68K_4K_PAGE_MASK (~0xff) +#define M68K_POINTER_BASE(entry) (entry & ~0x1ff) +#define M68K_ROOT_INDEX(addr) ((address >> 23) & 0x1fc) +#define M68K_POINTER_INDEX(addr) ((address >> 16) & 0x1fc) +#define M68K_4K_PAGE_BASE(entry) (next & M68K_4K_PAGE_MASK) +#define M68K_4K_PAGE_INDEX(addr) ((address >> 10) & 0xfc) +#define M68K_8K_PAGE_MASK (~0x7f) +#define M68K_8K_PAGE_BASE(entry) (next & M68K_8K_PAGE_MASK) +#define M68K_8K_PAGE_INDEX(addr) ((address >> 11) & 0x7c) +#define M68K_UDT_VALID(entry) (entry & 2) +#define M68K_PDT_VALID(entry) (entry & 3) +#define M68K_PDT_INDIRECT(entry) ((entry & 3) == 2) +#define M68K_INDIRECT_POINTER(addr) (addr & ~3) +#define M68K_TTS_POINTER_SHIFT 18 +#define M68K_TTS_ROOT_SHIFT 25 + +/* bits for 68040 MMU Transparent Translation Registers */ +#define M68K_TTR_ADDR_BASE 0xff000000 +#define M68K_TTR_ADDR_MASK 0x00ff0000 +#define M68K_TTR_ADDR_MASK_SHIFT 8 +#define M68K_TTR_ENABLED 0x00008000 +#define M68K_TTR_SFIELD 0x00006000 +#define M68K_TTR_SFIELD_USER 0x0000 +#define M68K_TTR_SFIELD_SUPER 0x2000 + +/* m68k Control Registers */ + +/* ColdFire */ +/* Memory Management Control Registers */ +#define M68K_CR_ASID 0x003 +#define M68K_CR_ACR0 0x004 +#define M68K_CR_ACR1 0x005 +#define M68K_CR_ACR2 0x006 +#define M68K_CR_ACR3 0x007 +#define M68K_CR_MMUBAR 0x008 + +/* Processor Miscellaneous Registers */ +#define M68K_CR_PC 0x80F + +/* Local Memory and Module Control Registers */ +#define M68K_CR_ROMBAR0 0xC00 +#define M68K_CR_ROMBAR1 0xC01 +#define M68K_CR_RAMBAR0 0xC04 +#define M68K_CR_RAMBAR1 0xC05 +#define M68K_CR_MPCR 0xC0C +#define M68K_CR_EDRAMBAR 0xC0D +#define M68K_CR_SECMBAR 0xC0E +#define M68K_CR_MBAR 0xC0F + +/* Local Memory Address Permutation Control Registers */ +#define M68K_CR_PCR1U0 0xD02 +#define M68K_CR_PCR1L0 0xD03 +#define M68K_CR_PCR2U0 0xD04 +#define M68K_CR_PCR2L0 0xD05 +#define M68K_CR_PCR3U0 0xD06 +#define M68K_CR_PCR3L0 0xD07 +#define M68K_CR_PCR1U1 0xD0A +#define M68K_CR_PCR1L1 0xD0B +#define M68K_CR_PCR2U1 0xD0C +#define M68K_CR_PCR2L1 0xD0D +#define M68K_CR_PCR3U1 0xD0E +#define M68K_CR_PCR3L1 0xD0F + +/* MC680x0 */ +/* MC680[1234]0/CPU32 */ +#define M68K_CR_SFC 0x000 +#define M68K_CR_DFC 0x001 +#define M68K_CR_USP 0x800 +#define M68K_CR_VBR 0x801 /* + Coldfire */ + +/* MC680[234]0 */ +#define M68K_CR_CACR 0x002 /* + Coldfire */ +#define M68K_CR_CAAR 0x802 /* MC68020 and MC68030 only */ +#define M68K_CR_MSP 0x803 +#define M68K_CR_ISP 0x804 + +/* MC68040/MC68LC040 */ +#define M68K_CR_TC 0x003 +#define M68K_CR_ITT0 0x004 +#define M68K_CR_ITT1 0x005 +#define M68K_CR_DTT0 0x006 +#define M68K_CR_DTT1 0x007 +#define M68K_CR_MMUSR 0x805 +#define M68K_CR_URP 0x806 +#define M68K_CR_SRP 0x807 + +/* MC68EC040 */ +#define M68K_CR_IACR0 0x004 +#define M68K_CR_IACR1 0x005 +#define M68K_CR_DACR0 0x006 +#define M68K_CR_DACR1 0x007 + +#define M68K_FPIAR_SHIFT 0 +#define M68K_FPIAR (1 << M68K_FPIAR_SHIFT) +#define M68K_FPSR_SHIFT 1 +#define M68K_FPSR (1 << M68K_FPSR_SHIFT) +#define M68K_FPCR_SHIFT 2 +#define M68K_FPCR (1 << M68K_FPCR_SHIFT) + +/* Floating-Point Status Register */ + +/* Condition Code */ +#define FPSR_CC_MASK 0x0f000000 +#define FPSR_CC_A 0x01000000 /* Not-A-Number */ +#define FPSR_CC_I 0x02000000 /* Infinity */ +#define FPSR_CC_Z 0x04000000 /* Zero */ +#define FPSR_CC_N 0x08000000 /* Negative */ + +/* Quotient */ + +#define FPSR_QT_MASK 0x00ff0000 +#define FPSR_QT_SHIFT 16 + +/* Floating-Point Control Register */ +/* Rounding mode */ +#define FPCR_RND_MASK 0x0030 +#define FPCR_RND_N 0x0000 +#define FPCR_RND_Z 0x0010 +#define FPCR_RND_M 0x0020 +#define FPCR_RND_P 0x0030 + +/* Rounding precision */ +#define FPCR_PREC_MASK 0x00c0 +#define FPCR_PREC_X 0x0000 +#define FPCR_PREC_S 0x0040 +#define FPCR_PREC_D 0x0080 +#define FPCR_PREC_U 0x00c0 + +#define FPCR_EXCP_MASK 0xff00 + +/* CACR fields are implementation defined, but some bits are common. */ +#define M68K_CACR_EUSP 0x10 + +#define MACSR_PAV0 0x100 +#define MACSR_OMC 0x080 +#define MACSR_SU 0x040 +#define MACSR_FI 0x020 +#define MACSR_RT 0x010 +#define MACSR_N 0x008 +#define MACSR_Z 0x004 +#define MACSR_V 0x002 +#define MACSR_EV 0x001 + +void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector); +void m68k_switch_sp(CPUM68KState *env); + +void do_m68k_semihosting(CPUM68KState *env, int nr); + +/* + * There are 4 ColdFire core ISA revisions: A, A+, B and C. + * Each feature covers the subset of instructions common to the + * ISA revisions mentioned. + */ + +enum m68k_features { + M68K_FEATURE_M68000, + M68K_FEATURE_M68020, + M68K_FEATURE_M68030, + M68K_FEATURE_M68040, + M68K_FEATURE_M68060, + M68K_FEATURE_CF_ISA_A, + M68K_FEATURE_CF_ISA_B, /* (ISA B or C). */ + M68K_FEATURE_CF_ISA_APLUSC, /* BIT/BITREV, FF1, STRLDSR (ISA A+ or C). */ + M68K_FEATURE_BRAL, /* Long unconditional branch. (ISA A+ or B). */ + M68K_FEATURE_CF_FPU, + M68K_FEATURE_CF_MAC, + M68K_FEATURE_CF_EMAC, + M68K_FEATURE_CF_EMAC_B, /* Revision B EMAC (dual accumulate). */ + M68K_FEATURE_USP, /* User Stack Pointer. (ISA A+, B or C). */ + M68K_FEATURE_EXT_FULL, /* 68020+ full extension word. */ + M68K_FEATURE_WORD_INDEX, /* word sized address index registers. */ + M68K_FEATURE_SCALED_INDEX, /* scaled address index registers. */ + M68K_FEATURE_LONG_MULDIV, /* 32 bit multiply/divide. */ + M68K_FEATURE_QUAD_MULDIV, /* 64 bit multiply/divide. */ + M68K_FEATURE_BCCL, /* Long conditional branches. */ + M68K_FEATURE_BITFIELD, /* Bit field insns. */ + M68K_FEATURE_FPU, + M68K_FEATURE_CAS, + M68K_FEATURE_BKPT, + M68K_FEATURE_RTD, + M68K_FEATURE_CHK2, + M68K_FEATURE_MOVEP, +}; + +static inline int m68k_feature(CPUM68KState *env, int feature) +{ + return (env->features & (1u << feature)) != 0; +} + +void m68k_cpu_list(void); + +void register_m68k_insns (CPUM68KState *env); + +enum { + /* 1 bit to define user level / supervisor access */ + ACCESS_SUPER = 0x01, + /* 1 bit to indicate direction */ + ACCESS_STORE = 0x02, + /* 1 bit to indicate debug access */ + ACCESS_DEBUG = 0x04, + /* PTEST instruction */ + ACCESS_PTEST = 0x08, + /* Type of instruction that generated the access */ + ACCESS_CODE = 0x10, /* Code fetch access */ + ACCESS_DATA = 0x20, /* Data load/store access */ +}; + +#define cpu_signal_handler cpu_m68k_signal_handler +#define cpu_list m68k_cpu_list + +/* MMU modes definitions */ +#define MMU_KERNEL_IDX 0 +#define MMU_USER_IDX 1 +static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch) +{ + return (env->sr & SR_S) == 0 ? 1 : 0; +} + +bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, + unsigned size, MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); + +typedef CPUM68KState CPUArchState; +typedef M68kCPU ArchCPU; + +#include "exec/cpu-all.h" + +/* TB flags */ +#define TB_FLAGS_MACSR 0x0f +#define TB_FLAGS_MSR_S_BIT 13 +#define TB_FLAGS_MSR_S (1 << TB_FLAGS_MSR_S_BIT) +#define TB_FLAGS_SFC_S_BIT 14 +#define TB_FLAGS_SFC_S (1 << TB_FLAGS_SFC_S_BIT) +#define TB_FLAGS_DFC_S_BIT 15 +#define TB_FLAGS_DFC_S (1 << TB_FLAGS_DFC_S_BIT) + +static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->pc; + *cs_base = 0; + *flags = (env->macsr >> 4) & TB_FLAGS_MACSR; + if (env->sr & SR_S) { + *flags |= TB_FLAGS_MSR_S; + *flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S; + *flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S; + } +} + +// M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model); + +#endif diff --git a/qemu/target/m68k/fpu_helper.c b/qemu/target/m68k/fpu_helper.c new file mode 100644 index 00000000..3f544a05 --- /dev/null +++ b/qemu/target/m68k/fpu_helper.c @@ -0,0 +1,658 @@ +/* + * m68k FPU helpers + * + * Copyright (c) 2006-2007 CodeSourcery + * Written by Paul Brook + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "softfloat.h" + +/* + * Undefined offsets may be different on various FPU. + * On 68040 they return 0.0 (floatx80_zero) + */ + +static const floatx80 fpu_rom[128] = { + [0x00] = make_floatx80_init(0x4000, 0xc90fdaa22168c235ULL), /* Pi */ + [0x0b] = make_floatx80_init(0x3ffd, 0x9a209a84fbcff798ULL), /* Log10(2) */ + [0x0c] = make_floatx80_init(0x4000, 0xadf85458a2bb4a9aULL), /* e */ + [0x0d] = make_floatx80_init(0x3fff, 0xb8aa3b295c17f0bcULL), /* Log2(e) */ + [0x0e] = make_floatx80_init(0x3ffd, 0xde5bd8a937287195ULL), /* Log10(e) */ + [0x0f] = make_floatx80_init(0x0000, 0x0000000000000000ULL), /* Zero */ + [0x30] = make_floatx80_init(0x3ffe, 0xb17217f7d1cf79acULL), /* ln(2) */ + [0x31] = make_floatx80_init(0x4000, 0x935d8dddaaa8ac17ULL), /* ln(10) */ + [0x32] = make_floatx80_init(0x3fff, 0x8000000000000000ULL), /* 10^0 */ + [0x33] = make_floatx80_init(0x4002, 0xa000000000000000ULL), /* 10^1 */ + [0x34] = make_floatx80_init(0x4005, 0xc800000000000000ULL), /* 10^2 */ + [0x35] = make_floatx80_init(0x400c, 0x9c40000000000000ULL), /* 10^4 */ + [0x36] = make_floatx80_init(0x4019, 0xbebc200000000000ULL), /* 10^8 */ + [0x37] = make_floatx80_init(0x4034, 0x8e1bc9bf04000000ULL), /* 10^16 */ + [0x38] = make_floatx80_init(0x4069, 0x9dc5ada82b70b59eULL), /* 10^32 */ + [0x39] = make_floatx80_init(0x40d3, 0xc2781f49ffcfa6d5ULL), /* 10^64 */ + [0x3a] = make_floatx80_init(0x41a8, 0x93ba47c980e98ce0ULL), /* 10^128 */ + [0x3b] = make_floatx80_init(0x4351, 0xaa7eebfb9df9de8eULL), /* 10^256 */ + [0x3c] = make_floatx80_init(0x46a3, 0xe319a0aea60e91c7ULL), /* 10^512 */ + [0x3d] = make_floatx80_init(0x4d48, 0xc976758681750c17ULL), /* 10^1024 */ + [0x3e] = make_floatx80_init(0x5a92, 0x9e8b3b5dc53d5de5ULL), /* 10^2048 */ + [0x3f] = make_floatx80_init(0x7525, 0xc46052028a20979bULL), /* 10^4096 */ +}; + +int32_t HELPER(reds32)(CPUM68KState *env, FPReg *val) +{ + return floatx80_to_int32(val->d, &env->fp_status); +} + +float32 HELPER(redf32)(CPUM68KState *env, FPReg *val) +{ + return floatx80_to_float32(val->d, &env->fp_status); +} + +void HELPER(exts32)(CPUM68KState *env, FPReg *res, int32_t val) +{ + res->d = int32_to_floatx80(val, &env->fp_status); +} + +void HELPER(extf32)(CPUM68KState *env, FPReg *res, float32 val) +{ + res->d = float32_to_floatx80(val, &env->fp_status); +} + +void HELPER(extf64)(CPUM68KState *env, FPReg *res, float64 val) +{ + res->d = float64_to_floatx80(val, &env->fp_status); +} + +float64 HELPER(redf64)(CPUM68KState *env, FPReg *val) +{ + return floatx80_to_float64(val->d, &env->fp_status); +} + +void HELPER(firound)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_round_to_int(val->d, &env->fp_status); +} + +static void m68k_restore_precision_mode(CPUM68KState *env) +{ + switch (env->fpcr & FPCR_PREC_MASK) { + case FPCR_PREC_X: /* extended */ + set_floatx80_rounding_precision(80, &env->fp_status); + break; + case FPCR_PREC_S: /* single */ + set_floatx80_rounding_precision(32, &env->fp_status); + break; + case FPCR_PREC_D: /* double */ + set_floatx80_rounding_precision(64, &env->fp_status); + break; + case FPCR_PREC_U: /* undefined */ + default: + break; + } +} + +static void cf_restore_precision_mode(CPUM68KState *env) +{ + if (env->fpcr & FPCR_PREC_S) { /* single */ + set_floatx80_rounding_precision(32, &env->fp_status); + } else { /* double */ + set_floatx80_rounding_precision(64, &env->fp_status); + } +} + +static void restore_rounding_mode(CPUM68KState *env) +{ + switch (env->fpcr & FPCR_RND_MASK) { + case FPCR_RND_N: /* round to nearest */ + set_float_rounding_mode(float_round_nearest_even, &env->fp_status); + break; + case FPCR_RND_Z: /* round to zero */ + set_float_rounding_mode(float_round_to_zero, &env->fp_status); + break; + case FPCR_RND_M: /* round toward minus infinity */ + set_float_rounding_mode(float_round_down, &env->fp_status); + break; + case FPCR_RND_P: /* round toward positive infinity */ + set_float_rounding_mode(float_round_up, &env->fp_status); + break; + } +} + +void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val) +{ + env->fpcr = val & 0xffff; + + if (m68k_feature(env, M68K_FEATURE_CF_FPU)) { + cf_restore_precision_mode(env); + } else { + m68k_restore_precision_mode(env); + } + restore_rounding_mode(env); +} + +void HELPER(fitrunc)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + int rounding_mode = get_float_rounding_mode(&env->fp_status); + set_float_rounding_mode(float_round_to_zero, &env->fp_status); + res->d = floatx80_round_to_int(val->d, &env->fp_status); + set_float_rounding_mode(rounding_mode, &env->fp_status); +} + +void HELPER(set_fpcr)(CPUM68KState *env, uint32_t val) +{ + cpu_m68k_set_fpcr(env, val); +} + +#define PREC_BEGIN(prec) \ + do { \ + int old; \ + old = get_floatx80_rounding_precision(&env->fp_status); \ + set_floatx80_rounding_precision(prec, &env->fp_status) \ + +#define PREC_END() \ + set_floatx80_rounding_precision(old, &env->fp_status); \ + } while (0) + +void HELPER(fsround)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(32); + res->d = floatx80_round(val->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fdround)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(64); + res->d = floatx80_round(val->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fsqrt)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_sqrt(val->d, &env->fp_status); +} + +void HELPER(fssqrt)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(32); + res->d = floatx80_sqrt(val->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fdsqrt)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(64); + res->d = floatx80_sqrt(val->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fabs)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status); +} + +void HELPER(fsabs)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(32); + res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status); + PREC_END(); +} + +void HELPER(fdabs)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(64); + res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status); + PREC_END(); +} + +void HELPER(fneg)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status); +} + +void HELPER(fsneg)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(32); + res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status); + PREC_END(); +} + +void HELPER(fdneg)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + PREC_BEGIN(64); + res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status); + PREC_END(); +} + +void HELPER(fadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + res->d = floatx80_add(val0->d, val1->d, &env->fp_status); +} + +void HELPER(fsadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(32); + res->d = floatx80_add(val0->d, val1->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fdadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(64); + res->d = floatx80_add(val0->d, val1->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + res->d = floatx80_sub(val1->d, val0->d, &env->fp_status); +} + +void HELPER(fssub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(32); + res->d = floatx80_sub(val1->d, val0->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fdsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(64); + res->d = floatx80_sub(val1->d, val0->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + res->d = floatx80_mul(val0->d, val1->d, &env->fp_status); +} + +void HELPER(fsmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(32); + res->d = floatx80_mul(val0->d, val1->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fdmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(64); + res->d = floatx80_mul(val0->d, val1->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fsglmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + int rounding_mode = get_float_rounding_mode(&env->fp_status); + floatx80 a, b; + + PREC_BEGIN(32); + set_float_rounding_mode(float_round_to_zero, &env->fp_status); + a = floatx80_round(val0->d, &env->fp_status); + b = floatx80_round(val1->d, &env->fp_status); + set_float_rounding_mode(rounding_mode, &env->fp_status); + res->d = floatx80_mul(a, b, &env->fp_status); + PREC_END(); +} + +void HELPER(fdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + res->d = floatx80_div(val1->d, val0->d, &env->fp_status); +} + +void HELPER(fsdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(32); + res->d = floatx80_div(val1->d, val0->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fddiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + PREC_BEGIN(64); + res->d = floatx80_div(val1->d, val0->d, &env->fp_status); + PREC_END(); +} + +void HELPER(fsgldiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + int rounding_mode = get_float_rounding_mode(&env->fp_status); + floatx80 a, b; + + PREC_BEGIN(32); + set_float_rounding_mode(float_round_to_zero, &env->fp_status); + a = floatx80_round(val1->d, &env->fp_status); + b = floatx80_round(val0->d, &env->fp_status); + set_float_rounding_mode(rounding_mode, &env->fp_status); + res->d = floatx80_div(a, b, &env->fp_status); + PREC_END(); +} + +static int float_comp_to_cc(int float_compare) +{ + switch (float_compare) { + case float_relation_equal: + return FPSR_CC_Z; + case float_relation_less: + return FPSR_CC_N; + case float_relation_unordered: + return FPSR_CC_A; + case float_relation_greater: + return 0; + default: + // g_assert_not_reached(); + return 0; + } +} + +void HELPER(fcmp)(CPUM68KState *env, FPReg *val0, FPReg *val1) +{ + int float_compare; + + float_compare = floatx80_compare(val1->d, val0->d, &env->fp_status); + env->fpsr = (env->fpsr & ~FPSR_CC_MASK) | float_comp_to_cc(float_compare); +} + +void HELPER(ftst)(CPUM68KState *env, FPReg *val) +{ + uint32_t cc = 0; + + if (floatx80_is_neg(val->d)) { + cc |= FPSR_CC_N; + } + + if (floatx80_is_any_nan(val->d)) { + cc |= FPSR_CC_A; + } else if (floatx80_is_infinity(val->d)) { + cc |= FPSR_CC_I; + } else if (floatx80_is_zero(val->d)) { + cc |= FPSR_CC_Z; + } + env->fpsr = (env->fpsr & ~FPSR_CC_MASK) | cc; +} + +void HELPER(fconst)(CPUM68KState *env, FPReg *val, uint32_t offset) +{ + val->d = fpu_rom[offset]; +} + +typedef int (*float_access)(CPUM68KState *env, uint32_t addr, FPReg *fp, + uintptr_t ra); + +static uint32_t fmovem_predec(CPUM68KState *env, uint32_t addr, uint32_t mask, + float_access access_fn) +{ + uintptr_t ra = GETPC(); + int i, size; + + for (i = 7; i >= 0; i--, mask <<= 1) { + if (mask & 0x80) { + size = access_fn(env, addr, &env->fregs[i], ra); + if ((mask & 0xff) != 0x80) { + addr -= size; + } + } + } + + return addr; +} + +static uint32_t fmovem_postinc(CPUM68KState *env, uint32_t addr, uint32_t mask, + float_access access_fn) +{ + uintptr_t ra = GETPC(); + int i, size; + + for (i = 0; i < 8; i++, mask <<= 1) { + if (mask & 0x80) { + size = access_fn(env, addr, &env->fregs[i], ra); + addr += size; + } + } + + return addr; +} + +static int cpu_ld_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, + uintptr_t ra) +{ + uint32_t high; + uint64_t low; + + high = cpu_ldl_data_ra(env, addr, ra); + low = cpu_ldq_data_ra(env, addr + 4, ra); + + fp->l.upper = high >> 16; + fp->l.lower = low; + + return 12; +} + +static int cpu_st_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, + uintptr_t ra) +{ + cpu_stl_data_ra(env, addr, fp->l.upper << 16, ra); + cpu_stq_data_ra(env, addr + 4, fp->l.lower, ra); + + return 12; +} + +static int cpu_ld_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, + uintptr_t ra) +{ + uint64_t val; + + val = cpu_ldq_data_ra(env, addr, ra); + fp->d = float64_to_floatx80(*(float64 *)&val, &env->fp_status); + + return 8; +} + +static int cpu_st_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp, + uintptr_t ra) +{ + float64 val; + + val = floatx80_to_float64(fp->d, &env->fp_status); + cpu_stq_data_ra(env, addr, *(uint64_t *)&val, ra); + + return 8; +} + +uint32_t HELPER(fmovemx_st_predec)(CPUM68KState *env, uint32_t addr, + uint32_t mask) +{ + return fmovem_predec(env, addr, mask, cpu_st_floatx80_ra); +} + +uint32_t HELPER(fmovemx_st_postinc)(CPUM68KState *env, uint32_t addr, + uint32_t mask) +{ + return fmovem_postinc(env, addr, mask, cpu_st_floatx80_ra); +} + +uint32_t HELPER(fmovemx_ld_postinc)(CPUM68KState *env, uint32_t addr, + uint32_t mask) +{ + return fmovem_postinc(env, addr, mask, cpu_ld_floatx80_ra); +} + +uint32_t HELPER(fmovemd_st_predec)(CPUM68KState *env, uint32_t addr, + uint32_t mask) +{ + return fmovem_predec(env, addr, mask, cpu_st_float64_ra); +} + +uint32_t HELPER(fmovemd_st_postinc)(CPUM68KState *env, uint32_t addr, + uint32_t mask) +{ + return fmovem_postinc(env, addr, mask, cpu_st_float64_ra); +} + +uint32_t HELPER(fmovemd_ld_postinc)(CPUM68KState *env, uint32_t addr, + uint32_t mask) +{ + return fmovem_postinc(env, addr, mask, cpu_ld_float64_ra); +} + +static void make_quotient(CPUM68KState *env, floatx80 val) +{ + int32_t quotient; + int sign; + + if (floatx80_is_any_nan(val)) { + return; + } + + quotient = floatx80_to_int32(val, &env->fp_status); + sign = quotient < 0; + if (sign) { + quotient = -quotient; + } + + quotient = (sign << 7) | (quotient & 0x7f); + env->fpsr = (env->fpsr & ~FPSR_QT_MASK) | (quotient << FPSR_QT_SHIFT); +} + +void HELPER(fmod)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + res->d = floatx80_mod(val1->d, val0->d, &env->fp_status); + + make_quotient(env, res->d); +} + +void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + res->d = floatx80_rem(val1->d, val0->d, &env->fp_status); + + make_quotient(env, res->d); +} + +void HELPER(fgetexp)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_getexp(val->d, &env->fp_status); +} + +void HELPER(fgetman)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_getman(val->d, &env->fp_status); +} + +void HELPER(fscale)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) +{ + res->d = floatx80_scale(val1->d, val0->d, &env->fp_status); +} + +void HELPER(flognp1)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_lognp1(val->d, &env->fp_status); +} + +void HELPER(flogn)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_logn(val->d, &env->fp_status); +} + +void HELPER(flog10)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_log10(val->d, &env->fp_status); +} + +void HELPER(flog2)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_log2(val->d, &env->fp_status); +} + +void HELPER(fetox)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_etox(val->d, &env->fp_status); +} + +void HELPER(ftwotox)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_twotox(val->d, &env->fp_status); +} + +void HELPER(ftentox)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_tentox(val->d, &env->fp_status); +} + +void HELPER(ftan)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_tan(val->d, &env->fp_status); +} + +void HELPER(fsin)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_sin(val->d, &env->fp_status); +} + +void HELPER(fcos)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_cos(val->d, &env->fp_status); +} + +void HELPER(fsincos)(CPUM68KState *env, FPReg *res0, FPReg *res1, FPReg *val) +{ + floatx80 a = val->d; + /* + * If res0 and res1 specify the same floating-point data register, + * the sine result is stored in the register, and the cosine + * result is discarded. + */ + res1->d = floatx80_cos(a, &env->fp_status); + res0->d = floatx80_sin(a, &env->fp_status); +} + +void HELPER(fatan)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_atan(val->d, &env->fp_status); +} + +void HELPER(fasin)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_asin(val->d, &env->fp_status); +} + +void HELPER(facos)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_acos(val->d, &env->fp_status); +} + +void HELPER(fatanh)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_atanh(val->d, &env->fp_status); +} + +void HELPER(ftanh)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_tanh(val->d, &env->fp_status); +} + +void HELPER(fsinh)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_sinh(val->d, &env->fp_status); +} + +void HELPER(fcosh)(CPUM68KState *env, FPReg *res, FPReg *val) +{ + res->d = floatx80_cosh(val->d, &env->fp_status); +} diff --git a/qemu/target/m68k/helper.c b/qemu/target/m68k/helper.c new file mode 100644 index 00000000..b0f2e298 --- /dev/null +++ b/qemu/target/m68k/helper.c @@ -0,0 +1,1047 @@ +/* + * m68k op helpers + * + * Copyright (c) 2006-2007 CodeSourcery + * Written by Paul Brook + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +#define SIGNBIT (1u << 31) + +void HELPER(cf_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val) +{ + switch (reg) { + case M68K_CR_CACR: + env->cacr = val; + m68k_switch_sp(env); + break; + case M68K_CR_ACR0: + case M68K_CR_ACR1: + case M68K_CR_ACR2: + case M68K_CR_ACR3: + /* TODO: Implement Access Control Registers. */ + break; + case M68K_CR_VBR: + env->vbr = val; + break; + /* TODO: Implement control registers. */ + default: + cpu_abort(env_cpu(env), + "Unimplemented control register write 0x%x = 0x%x\n", + reg, val); + } +} + +void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val) +{ + switch (reg) { + /* MC680[1234]0 */ + case M68K_CR_SFC: + env->sfc = val & 7; + return; + case M68K_CR_DFC: + env->dfc = val & 7; + return; + case M68K_CR_VBR: + env->vbr = val; + return; + /* MC680[2346]0 */ + case M68K_CR_CACR: + if (m68k_feature(env, M68K_FEATURE_M68020)) { + env->cacr = val & 0x0000000f; + } else if (m68k_feature(env, M68K_FEATURE_M68030)) { + env->cacr = val & 0x00003f1f; + } else if (m68k_feature(env, M68K_FEATURE_M68040)) { + env->cacr = val & 0x80008000; + } else if (m68k_feature(env, M68K_FEATURE_M68060)) { + env->cacr = val & 0xf8e0e000; + } + m68k_switch_sp(env); + return; + /* MC680[34]0 */ + case M68K_CR_TC: + env->mmu.tcr = val; + return; + case M68K_CR_MMUSR: + env->mmu.mmusr = val; + return; + case M68K_CR_SRP: + env->mmu.srp = val; + return; + case M68K_CR_URP: + env->mmu.urp = val; + return; + case M68K_CR_USP: + env->sp[M68K_USP] = val; + return; + case M68K_CR_MSP: + env->sp[M68K_SSP] = val; + return; + case M68K_CR_ISP: + env->sp[M68K_ISP] = val; + return; + /* MC68040/MC68LC040 */ + case M68K_CR_ITT0: + env->mmu.ttr[M68K_ITTR0] = val; + return; + case M68K_CR_ITT1: + env->mmu.ttr[M68K_ITTR1] = val; + return; + case M68K_CR_DTT0: + env->mmu.ttr[M68K_DTTR0] = val; + return; + case M68K_CR_DTT1: + env->mmu.ttr[M68K_DTTR1] = val; + return; + } + cpu_abort(env_cpu(env), + "Unimplemented control register write 0x%x = 0x%x\n", + reg, val); +} + +uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg) +{ + switch (reg) { + /* MC680[1234]0 */ + case M68K_CR_SFC: + return env->sfc; + case M68K_CR_DFC: + return env->dfc; + case M68K_CR_VBR: + return env->vbr; + /* MC680[234]0 */ + case M68K_CR_CACR: + return env->cacr; + /* MC680[34]0 */ + case M68K_CR_TC: + return env->mmu.tcr; + case M68K_CR_MMUSR: + return env->mmu.mmusr; + case M68K_CR_SRP: + return env->mmu.srp; + case M68K_CR_USP: + return env->sp[M68K_USP]; + case M68K_CR_MSP: + return env->sp[M68K_SSP]; + case M68K_CR_ISP: + return env->sp[M68K_ISP]; + /* MC68040/MC68LC040 */ + case M68K_CR_URP: + return env->mmu.urp; + case M68K_CR_ITT0: + return env->mmu.ttr[M68K_ITTR0]; + case M68K_CR_ITT1: + return env->mmu.ttr[M68K_ITTR1]; + case M68K_CR_DTT0: + return env->mmu.ttr[M68K_DTTR0]; + case M68K_CR_DTT1: + return env->mmu.ttr[M68K_DTTR1]; + } + cpu_abort(env_cpu(env), "Unimplemented control register read 0x%x\n", + reg); +} + +void HELPER(set_macsr)(CPUM68KState *env, uint32_t val) +{ + uint32_t acc; + int8_t exthigh; + uint8_t extlow; + uint64_t regval; + int i; + if ((env->macsr ^ val) & (MACSR_FI | MACSR_SU)) { + for (i = 0; i < 4; i++) { + regval = env->macc[i]; + exthigh = regval >> 40; + if (env->macsr & MACSR_FI) { + acc = regval >> 8; + extlow = regval; + } else { + acc = regval; + extlow = regval >> 32; + } + if (env->macsr & MACSR_FI) { + regval = (((uint64_t)acc) << 8) | extlow; + regval |= ((int64_t)exthigh) << 40; + } else if (env->macsr & MACSR_SU) { + regval = acc | (((int64_t)extlow) << 32); + regval |= ((int64_t)exthigh) << 40; + } else { + regval = acc | (((uint64_t)extlow) << 32); + regval |= ((uint64_t)(uint8_t)exthigh) << 40; + } + env->macc[i] = regval; + } + } + env->macsr = val; +} + +void m68k_switch_sp(CPUM68KState *env) +{ + int new_sp; + + env->sp[env->current_sp] = env->aregs[7]; + if (m68k_feature(env, M68K_FEATURE_M68000)) { + if (env->sr & SR_S) { + if (env->sr & SR_M) { + new_sp = M68K_SSP; + } else { + new_sp = M68K_ISP; + } + } else { + new_sp = M68K_USP; + } + } else { + new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP) + ? M68K_SSP : M68K_USP; + } + env->aregs[7] = env->sp[new_sp]; + env->current_sp = new_sp; +} + +static int check_TTR(uint32_t ttr, int *prot, target_ulong addr, + int access_type) +{ + uint32_t base, mask; + + /* check if transparent translation is enabled */ + if ((ttr & M68K_TTR_ENABLED) == 0) { + return 0; + } + + /* check mode access */ + switch (ttr & M68K_TTR_SFIELD) { + case M68K_TTR_SFIELD_USER: + /* match only if user */ + if ((access_type & ACCESS_SUPER) != 0) { + return 0; + } + break; + case M68K_TTR_SFIELD_SUPER: + /* match only if supervisor */ + if ((access_type & ACCESS_SUPER) == 0) { + return 0; + } + break; + default: + /* all other values disable mode matching (FC2) */ + break; + } + + /* check address matching */ + + base = ttr & M68K_TTR_ADDR_BASE; + mask = (ttr & M68K_TTR_ADDR_MASK) ^ M68K_TTR_ADDR_MASK; + mask <<= M68K_TTR_ADDR_MASK_SHIFT; + + if ((addr & mask) != (base & mask)) { + return 0; + } + + *prot = PAGE_READ | PAGE_EXEC; + if ((ttr & M68K_DESC_WRITEPROT) == 0) { + *prot |= PAGE_WRITE; + } + + return 1; +} + +static int get_physical_address(CPUM68KState *env, hwaddr *physical, + int *prot, target_ulong address, + int access_type, target_ulong *page_size) +{ + CPUState *cs = env_cpu(env); + uint32_t entry; + uint32_t next; + target_ulong page_mask; + bool debug = access_type & ACCESS_DEBUG; + int page_bits; + int i; + MemTxResult txres; + + /* Transparent Translation (physical = logical) */ + for (i = 0; i < M68K_MAX_TTR; i++) { + if (check_TTR(env->mmu.TTR(access_type, i), + prot, address, access_type)) { + if (access_type & ACCESS_PTEST) { + /* Transparent Translation Register bit */ + env->mmu.mmusr = M68K_MMU_T_040 | M68K_MMU_R_040; + } + *physical = address & TARGET_PAGE_MASK; + *page_size = TARGET_PAGE_SIZE; + return 0; + } + } + + /* Page Table Root Pointer */ + *prot = PAGE_READ | PAGE_WRITE; + if (access_type & ACCESS_CODE) { + *prot |= PAGE_EXEC; + } + if (access_type & ACCESS_SUPER) { + next = env->mmu.srp; + } else { + next = env->mmu.urp; + } + + /* Root Index */ + entry = M68K_POINTER_BASE(next) | M68K_ROOT_INDEX(address); + + next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + if (!M68K_UDT_VALID(next)) { + return -1; + } + if (!(next & M68K_DESC_USED) && !debug) { + glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, + MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + } + if (next & M68K_DESC_WRITEPROT) { + if (access_type & ACCESS_PTEST) { + env->mmu.mmusr |= M68K_MMU_WP_040; + } + *prot &= ~PAGE_WRITE; + if (access_type & ACCESS_STORE) { + return -1; + } + } + + /* Pointer Index */ + entry = M68K_POINTER_BASE(next) | M68K_POINTER_INDEX(address); + + next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + if (!M68K_UDT_VALID(next)) { + return -1; + } + if (!(next & M68K_DESC_USED) && !debug) { + glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, + MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + } + if (next & M68K_DESC_WRITEPROT) { + if (access_type & ACCESS_PTEST) { + env->mmu.mmusr |= M68K_MMU_WP_040; + } + *prot &= ~PAGE_WRITE; + if (access_type & ACCESS_STORE) { + return -1; + } + } + + /* Page Index */ + if (env->mmu.tcr & M68K_TCR_PAGE_8K) { + entry = M68K_8K_PAGE_BASE(next) | M68K_8K_PAGE_INDEX(address); + } else { + entry = M68K_4K_PAGE_BASE(next) | M68K_4K_PAGE_INDEX(address); + } + + next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + + if (!M68K_PDT_VALID(next)) { + return -1; + } + if (M68K_PDT_INDIRECT(next)) { + next = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, M68K_INDIRECT_POINTER(next), + MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + } + if (access_type & ACCESS_STORE) { + if (next & M68K_DESC_WRITEPROT) { + if (!(next & M68K_DESC_USED) && !debug) { + glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, + MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + } + } else if ((next & (M68K_DESC_MODIFIED | M68K_DESC_USED)) != + (M68K_DESC_MODIFIED | M68K_DESC_USED) && !debug) { + glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, + next | (M68K_DESC_MODIFIED | M68K_DESC_USED), + MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + } + } else { + if (!(next & M68K_DESC_USED) && !debug) { + glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, entry, next | M68K_DESC_USED, + MEMTXATTRS_UNSPECIFIED, &txres); + if (txres != MEMTX_OK) { + goto txfail; + } + } + } + + if (env->mmu.tcr & M68K_TCR_PAGE_8K) { + page_bits = 13; + } else { + page_bits = 12; + } + *page_size = 1 << page_bits; + page_mask = ~(*page_size - 1); + *physical = next & page_mask; + + if (access_type & ACCESS_PTEST) { + env->mmu.mmusr |= next & M68K_MMU_SR_MASK_040; + env->mmu.mmusr |= *physical & 0xfffff000; + env->mmu.mmusr |= M68K_MMU_R_040; + } + + if (next & M68K_DESC_WRITEPROT) { + *prot &= ~PAGE_WRITE; + if (access_type & ACCESS_STORE) { + return -1; + } + } + if (next & M68K_DESC_SUPERONLY) { + if ((access_type & ACCESS_SUPER) == 0) { + return -1; + } + } + + return 0; + +txfail: + /* + * A page table load/store failed. TODO: we should really raise a + * suitable guest fault here if this is not a debug access. + * For now just return that the translation failed. + */ + return -1; +} + +hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + hwaddr phys_addr; + int prot; + int access_type; + target_ulong page_size; + + if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) { + /* MMU disabled */ + return addr; + } + + access_type = ACCESS_DATA | ACCESS_DEBUG; + if (env->sr & SR_S) { + access_type |= ACCESS_SUPER; + } + if (get_physical_address(env, &phys_addr, &prot, + addr, access_type, &page_size) != 0) { + return -1; + } + return phys_addr; +} + +/* + * Notify CPU of a pending interrupt. Prioritization and vectoring should + * be handled by the interrupt controller. Real hardware only requests + * the vector when the interrupt is acknowledged by the CPU. For + * simplicity we calculate it when the interrupt is signalled. + */ +void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector) +{ + CPUState *cs = CPU(cpu); + CPUM68KState *env = &cpu->env; + + env->pending_level = level; + env->pending_vector = vector; + if (level) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType qemu_access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + + hwaddr physical; + int prot; + int access_type; + int ret; + target_ulong page_size; + + if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) { + /* MMU disabled */ + tlb_set_page(cs, address & TARGET_PAGE_MASK, + address & TARGET_PAGE_MASK, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, + mmu_idx, TARGET_PAGE_SIZE); + return true; + } + + if (qemu_access_type == MMU_INST_FETCH) { + access_type = ACCESS_CODE; + } else { + access_type = ACCESS_DATA; + if (qemu_access_type == MMU_DATA_STORE) { + access_type |= ACCESS_STORE; + } + } + if (mmu_idx != MMU_USER_IDX) { + access_type |= ACCESS_SUPER; + } + + ret = get_physical_address(&cpu->env, &physical, &prot, + address, access_type, &page_size); + if (likely(ret == 0)) { + address &= TARGET_PAGE_MASK; + physical += address & (page_size - 1); + tlb_set_page(cs, address, physical, + prot, mmu_idx, TARGET_PAGE_SIZE); + return true; + } + + if (probe) { + return false; + } + + /* page fault */ + env->mmu.ssw = M68K_ATC_040; + switch (size) { + case 1: + env->mmu.ssw |= M68K_BA_SIZE_BYTE; + break; + case 2: + env->mmu.ssw |= M68K_BA_SIZE_WORD; + break; + case 4: + env->mmu.ssw |= M68K_BA_SIZE_LONG; + break; + } + if (access_type & ACCESS_SUPER) { + env->mmu.ssw |= M68K_TM_040_SUPER; + } + if (access_type & ACCESS_CODE) { + env->mmu.ssw |= M68K_TM_040_CODE; + } else { + env->mmu.ssw |= M68K_TM_040_DATA; + } + if (!(access_type & ACCESS_STORE)) { + env->mmu.ssw |= M68K_RW_040; + } + + cs->exception_index = EXCP_ACCESS; + env->mmu.ar = address; + cpu_loop_exit_restore(cs, retaddr); +} + +uint32_t HELPER(bitrev)(uint32_t x) +{ + x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau); + x = ((x >> 2) & 0x33333333u) | ((x << 2) & 0xccccccccu); + x = ((x >> 4) & 0x0f0f0f0fu) | ((x << 4) & 0xf0f0f0f0u); + return bswap32(x); +} + +uint32_t HELPER(ff1)(uint32_t x) +{ + int n; + for (n = 32; x; n--) + x >>= 1; + return n; +} + +uint32_t HELPER(sats)(uint32_t val, uint32_t v) +{ + /* The result has the opposite sign to the original value. */ + if ((int32_t)v < 0) { + val = (((int32_t)val) >> 31) ^ SIGNBIT; + } + return val; +} + +void cpu_m68k_set_sr(CPUM68KState *env, uint32_t sr) +{ + env->sr = sr & 0xffe0; + cpu_m68k_set_ccr(env, sr); + m68k_switch_sp(env); +} + +void HELPER(set_sr)(CPUM68KState *env, uint32_t val) +{ + cpu_m68k_set_sr(env, val); +} + +/* MAC unit. */ +/* + * FIXME: The MAC unit implementation is a bit of a mess. Some helpers + * take values, others take register numbers and manipulate the contents + * in-place. + */ +void HELPER(mac_move)(CPUM68KState *env, uint32_t dest, uint32_t src) +{ + uint32_t mask; + env->macc[dest] = env->macc[src]; + mask = MACSR_PAV0 << dest; + if (env->macsr & (MACSR_PAV0 << src)) + env->macsr |= mask; + else + env->macsr &= ~mask; +} + +uint64_t HELPER(macmuls)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + int64_t product; + int64_t res; + + product = (uint64_t)op1 * op2; + res = (product << 24) >> 24; + if (res != product) { + env->macsr |= MACSR_V; + if (env->macsr & MACSR_OMC) { + /* Make sure the accumulate operation overflows. */ + if (product < 0) + res = ~(1ll << 50); + else + res = 1ll << 50; + } + } + return res; +} + +uint64_t HELPER(macmulu)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + uint64_t product; + + product = (uint64_t)op1 * op2; + if (product & (0xffffffull << 40)) { + env->macsr |= MACSR_V; + if (env->macsr & MACSR_OMC) { + /* Make sure the accumulate operation overflows. */ + product = 1ll << 50; + } else { + product &= ((1ull << 40) - 1); + } + } + return product; +} + +uint64_t HELPER(macmulf)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + uint64_t product; + uint32_t remainder; + + product = (uint64_t)op1 * op2; + if (env->macsr & MACSR_RT) { + remainder = product & 0xffffff; + product >>= 24; + if (remainder > 0x800000) + product++; + else if (remainder == 0x800000) + product += (product & 1); + } else { + product >>= 24; + } + return product; +} + +void HELPER(macsats)(CPUM68KState *env, uint32_t acc) +{ + int64_t tmp; + int64_t result; + tmp = env->macc[acc]; + result = ((tmp << 16) >> 16); + if (result != tmp) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_V) { + env->macsr |= MACSR_PAV0 << acc; + if (env->macsr & MACSR_OMC) { + /* + * The result is saturated to 32 bits, despite overflow occurring + * at 48 bits. Seems weird, but that's what the hardware docs + * say. + */ + result = (result >> 63) ^ 0x7fffffff; + } + } + env->macc[acc] = result; +} + +void HELPER(macsatu)(CPUM68KState *env, uint32_t acc) +{ + uint64_t val; + + val = env->macc[acc]; + if (val & (0xffffull << 48)) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_V) { + env->macsr |= MACSR_PAV0 << acc; + if (env->macsr & MACSR_OMC) { + if (val > (1ull << 53)) + val = 0; + else + val = (1ull << 48) - 1; + } else { + val &= ((1ull << 48) - 1); + } + } + env->macc[acc] = val; +} + +void HELPER(macsatf)(CPUM68KState *env, uint32_t acc) +{ + int64_t sum; + int64_t result; + + sum = env->macc[acc]; + result = (sum << 16) >> 16; + if (result != sum) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_V) { + env->macsr |= MACSR_PAV0 << acc; + if (env->macsr & MACSR_OMC) { + result = (result >> 63) ^ 0x7fffffffffffll; + } + } + env->macc[acc] = result; +} + +void HELPER(mac_set_flags)(CPUM68KState *env, uint32_t acc) +{ + uint64_t val; + val = env->macc[acc]; + if (val == 0) { + env->macsr |= MACSR_Z; + } else if (val & (1ull << 47)) { + env->macsr |= MACSR_N; + } + if (env->macsr & (MACSR_PAV0 << acc)) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_FI) { + val = ((int64_t)val) >> 40; + if (val != 0 && val != -1) + env->macsr |= MACSR_EV; + } else if (env->macsr & MACSR_SU) { + val = ((int64_t)val) >> 32; + if (val != 0 && val != -1) + env->macsr |= MACSR_EV; + } else { + if ((val >> 32) != 0) + env->macsr |= MACSR_EV; + } +} + +#define EXTSIGN(val, index) ( \ + (index == 0) ? (int8_t)(val) : ((index == 1) ? (int16_t)(val) : (val)) \ +) + +#define COMPUTE_CCR(op, x, n, z, v, c) { \ + switch (op) { \ + case CC_OP_FLAGS: \ + /* Everything in place. */ \ + break; \ + case CC_OP_ADDB: \ + case CC_OP_ADDW: \ + case CC_OP_ADDL: \ + res = n; \ + src2 = v; \ + src1 = EXTSIGN(res - src2, op - CC_OP_ADDB); \ + c = x; \ + z = n; \ + v = (res ^ src1) & ~(src1 ^ src2); \ + break; \ + case CC_OP_SUBB: \ + case CC_OP_SUBW: \ + case CC_OP_SUBL: \ + res = n; \ + src2 = v; \ + src1 = EXTSIGN(res + src2, op - CC_OP_SUBB); \ + c = x; \ + z = n; \ + v = (res ^ src1) & (src1 ^ src2); \ + break; \ + case CC_OP_CMPB: \ + case CC_OP_CMPW: \ + case CC_OP_CMPL: \ + src1 = n; \ + src2 = v; \ + res = EXTSIGN(src1 - src2, op - CC_OP_CMPB); \ + n = res; \ + z = res; \ + c = src1 < src2; \ + v = (res ^ src1) & (src1 ^ src2); \ + break; \ + case CC_OP_LOGIC: \ + c = v = 0; \ + z = n; \ + break; \ + default: \ + cpu_abort(env_cpu(env), "Bad CC_OP %d", op); \ + } \ +} while (0) + +uint32_t cpu_m68k_get_ccr(CPUM68KState *env) +{ + uint32_t x, c, n, z, v; + uint32_t res, src1, src2; + + x = env->cc_x; + n = env->cc_n; + z = env->cc_z; + v = env->cc_v; + c = env->cc_c; + + COMPUTE_CCR(env->cc_op, x, n, z, v, c); + + n = n >> 31; + z = (z == 0); + v = v >> 31; + + return x * CCF_X + n * CCF_N + z * CCF_Z + v * CCF_V + c * CCF_C; +} + +uint32_t HELPER(get_ccr)(CPUM68KState *env) +{ + return cpu_m68k_get_ccr(env); +} + +void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t ccr) +{ + env->cc_x = (ccr & CCF_X ? 1 : 0); + env->cc_n = (ccr & CCF_N ? -1 : 0); + env->cc_z = (ccr & CCF_Z ? 0 : 1); + env->cc_v = (ccr & CCF_V ? -1 : 0); + env->cc_c = (ccr & CCF_C ? 1 : 0); + env->cc_op = CC_OP_FLAGS; +} + +void HELPER(set_ccr)(CPUM68KState *env, uint32_t ccr) +{ + cpu_m68k_set_ccr(env, ccr); +} + +void HELPER(flush_flags)(CPUM68KState *env, uint32_t cc_op) +{ + uint32_t res, src1, src2; + + COMPUTE_CCR(cc_op, env->cc_x, env->cc_n, env->cc_z, env->cc_v, env->cc_c); + env->cc_op = CC_OP_FLAGS; +} + +uint32_t HELPER(get_macf)(CPUM68KState *env, uint64_t val) +{ + int rem; + uint32_t result; + + if (env->macsr & MACSR_SU) { + /* 16-bit rounding. */ + rem = val & 0xffffff; + val = (val >> 24) & 0xffffu; + if (rem > 0x800000) + val++; + else if (rem == 0x800000) + val += (val & 1); + } else if (env->macsr & MACSR_RT) { + /* 32-bit rounding. */ + rem = val & 0xff; + val >>= 8; + if (rem > 0x80) + val++; + else if (rem == 0x80) + val += (val & 1); + } else { + /* No rounding. */ + val >>= 8; + } + if (env->macsr & MACSR_OMC) { + /* Saturate. */ + if (env->macsr & MACSR_SU) { + if (val != (uint16_t) val) { + result = ((val >> 63) ^ 0x7fff) & 0xffff; + } else { + result = val & 0xffff; + } + } else { + if (val != (uint32_t)val) { + result = ((uint32_t)(val >> 63) & 0x7fffffff); + } else { + result = (uint32_t)val; + } + } + } else { + /* No saturation. */ + if (env->macsr & MACSR_SU) { + result = val & 0xffff; + } else { + result = (uint32_t)val; + } + } + return result; +} + +uint32_t HELPER(get_macs)(uint64_t val) +{ + if (val == (int32_t)val) { + return (int32_t)val; + } else { + return (val >> 61) ^ ~SIGNBIT; + } +} + +uint32_t HELPER(get_macu)(uint64_t val) +{ + if ((val >> 32) == 0) { + return (uint32_t)val; + } else { + return 0xffffffffu; + } +} + +uint32_t HELPER(get_mac_extf)(CPUM68KState *env, uint32_t acc) +{ + uint32_t val; + val = env->macc[acc] & 0x00ff; + val |= (env->macc[acc] >> 32) & 0xff00; + val |= (env->macc[acc + 1] << 16) & 0x00ff0000; + val |= (env->macc[acc + 1] >> 16) & 0xff000000; + return val; +} + +uint32_t HELPER(get_mac_exti)(CPUM68KState *env, uint32_t acc) +{ + uint32_t val; + val = (env->macc[acc] >> 32) & 0xffff; + val |= (env->macc[acc + 1] >> 16) & 0xffff0000; + return val; +} + +void HELPER(set_mac_extf)(CPUM68KState *env, uint32_t val, uint32_t acc) +{ + int64_t res; + int32_t tmp; + res = env->macc[acc] & 0xffffffff00ull; + tmp = (int16_t)(val & 0xff00); + res |= ((int64_t)tmp) << 32; + res |= val & 0xff; + env->macc[acc] = res; + res = env->macc[acc + 1] & 0xffffffff00ull; + tmp = (val & 0xff000000); + res |= ((int64_t)tmp) << 16; + res |= (val >> 16) & 0xff; + env->macc[acc + 1] = res; +} + +void HELPER(set_mac_exts)(CPUM68KState *env, uint32_t val, uint32_t acc) +{ + int64_t res; + int32_t tmp; + res = (uint32_t)env->macc[acc]; + tmp = (int16_t)val; + res |= ((int64_t)tmp) << 32; + env->macc[acc] = res; + res = (uint32_t)env->macc[acc + 1]; + tmp = val & 0xffff0000; + res |= (int64_t)tmp << 16; + env->macc[acc + 1] = res; +} + +void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc) +{ + uint64_t res; + res = (uint32_t)env->macc[acc]; + res |= ((uint64_t)(val & 0xffff)) << 32; + env->macc[acc] = res; + res = (uint32_t)env->macc[acc + 1]; + res |= (uint64_t)(val & 0xffff0000) << 16; + env->macc[acc + 1] = res; +} + +void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read) +{ + hwaddr physical; + int access_type; + int prot; + int ret; + target_ulong page_size; + + access_type = ACCESS_PTEST; + if (env->dfc & 4) { + access_type |= ACCESS_SUPER; + } + if ((env->dfc & 3) == 2) { + access_type |= ACCESS_CODE; + } + if (!is_read) { + access_type |= ACCESS_STORE; + } + + env->mmu.mmusr = 0; + env->mmu.ssw = 0; + ret = get_physical_address(env, &physical, &prot, addr, + access_type, &page_size); + if (ret == 0) { + addr &= TARGET_PAGE_MASK; + physical += addr & (page_size - 1); + tlb_set_page(env_cpu(env), addr, physical, + prot, access_type & ACCESS_SUPER ? + MMU_KERNEL_IDX : MMU_USER_IDX, page_size); + } +} + +void HELPER(pflush)(CPUM68KState *env, uint32_t addr, uint32_t opmode) +{ + CPUState *cs = env_cpu(env); + + switch (opmode) { + case 0: /* Flush page entry if not global */ + case 1: /* Flush page entry */ + tlb_flush_page(cs, addr); + break; + case 2: /* Flush all except global entries */ + tlb_flush(cs); + break; + case 3: /* Flush all entries */ + tlb_flush(cs); + break; + } +} + +void HELPER(reset)(CPUM68KState *env) +{ + /* FIXME: reset all except CPU */ +} diff --git a/qemu/target/m68k/helper.h b/qemu/target/m68k/helper.h new file mode 100644 index 00000000..04cbbf48 --- /dev/null +++ b/qemu/target/m68k/helper.h @@ -0,0 +1,130 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +DEF_HELPER_1(bitrev, i32, i32) +DEF_HELPER_1(ff1, i32, i32) +DEF_HELPER_FLAGS_2(sats, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_3(divuw, void, env, int, i32) +DEF_HELPER_3(divsw, void, env, int, s32) +DEF_HELPER_4(divul, void, env, int, int, i32) +DEF_HELPER_4(divsl, void, env, int, int, s32) +DEF_HELPER_4(divull, void, env, int, int, i32) +DEF_HELPER_4(divsll, void, env, int, int, s32) +DEF_HELPER_2(set_sr, void, env, i32) +DEF_HELPER_3(cf_movec_to, void, env, i32, i32) +DEF_HELPER_3(m68k_movec_to, void, env, i32, i32) +DEF_HELPER_2(m68k_movec_from, i32, env, i32) +DEF_HELPER_4(cas2w, void, env, i32, i32, i32) +DEF_HELPER_4(cas2l, void, env, i32, i32, i32) +DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32) + +#define dh_alias_fp ptr +#define dh_ctype_fp FPReg * +#define dh_is_signed_fp dh_is_signed_ptr + +DEF_HELPER_3(exts32, void, env, fp, s32) +DEF_HELPER_3(extf32, void, env, fp, f32) +DEF_HELPER_3(extf64, void, env, fp, f64) +DEF_HELPER_2(redf32, f32, env, fp) +DEF_HELPER_2(redf64, f64, env, fp) +DEF_HELPER_2(reds32, s32, env, fp) + +DEF_HELPER_3(fsround, void, env, fp, fp) +DEF_HELPER_3(fdround, void, env, fp, fp) +DEF_HELPER_3(firound, void, env, fp, fp) +DEF_HELPER_3(fitrunc, void, env, fp, fp) +DEF_HELPER_3(fsqrt, void, env, fp, fp) +DEF_HELPER_3(fssqrt, void, env, fp, fp) +DEF_HELPER_3(fdsqrt, void, env, fp, fp) +DEF_HELPER_3(fabs, void, env, fp, fp) +DEF_HELPER_3(fsabs, void, env, fp, fp) +DEF_HELPER_3(fdabs, void, env, fp, fp) +DEF_HELPER_3(fneg, void, env, fp, fp) +DEF_HELPER_3(fsneg, void, env, fp, fp) +DEF_HELPER_3(fdneg, void, env, fp, fp) +DEF_HELPER_4(fadd, void, env, fp, fp, fp) +DEF_HELPER_4(fsadd, void, env, fp, fp, fp) +DEF_HELPER_4(fdadd, void, env, fp, fp, fp) +DEF_HELPER_4(fsub, void, env, fp, fp, fp) +DEF_HELPER_4(fssub, void, env, fp, fp, fp) +DEF_HELPER_4(fdsub, void, env, fp, fp, fp) +DEF_HELPER_4(fmul, void, env, fp, fp, fp) +DEF_HELPER_4(fsmul, void, env, fp, fp, fp) +DEF_HELPER_4(fdmul, void, env, fp, fp, fp) +DEF_HELPER_4(fsglmul, void, env, fp, fp, fp) +DEF_HELPER_4(fdiv, void, env, fp, fp, fp) +DEF_HELPER_4(fsdiv, void, env, fp, fp, fp) +DEF_HELPER_4(fddiv, void, env, fp, fp, fp) +DEF_HELPER_4(fsgldiv, void, env, fp, fp, fp) +DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_RWG, void, env, fp, fp) +DEF_HELPER_FLAGS_2(set_fpcr, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_2(ftst, TCG_CALL_NO_RWG, void, env, fp) +DEF_HELPER_3(fconst, void, env, fp, i32) +DEF_HELPER_3(fmovemx_st_predec, i32, env, i32, i32) +DEF_HELPER_3(fmovemx_st_postinc, i32, env, i32, i32) +DEF_HELPER_3(fmovemx_ld_postinc, i32, env, i32, i32) +DEF_HELPER_3(fmovemd_st_predec, i32, env, i32, i32) +DEF_HELPER_3(fmovemd_st_postinc, i32, env, i32, i32) +DEF_HELPER_3(fmovemd_ld_postinc, i32, env, i32, i32) +DEF_HELPER_4(fmod, void, env, fp, fp, fp) +DEF_HELPER_4(frem, void, env, fp, fp, fp) +DEF_HELPER_3(fgetexp, void, env, fp, fp) +DEF_HELPER_3(fgetman, void, env, fp, fp) +DEF_HELPER_4(fscale, void, env, fp, fp, fp) +DEF_HELPER_3(flognp1, void, env, fp, fp) +DEF_HELPER_3(flogn, void, env, fp, fp) +DEF_HELPER_3(flog10, void, env, fp, fp) +DEF_HELPER_3(flog2, void, env, fp, fp) +DEF_HELPER_3(fetox, void, env, fp, fp) +DEF_HELPER_3(ftwotox, void, env, fp, fp) +DEF_HELPER_3(ftentox, void, env, fp, fp) +DEF_HELPER_3(ftan, void, env, fp, fp) +DEF_HELPER_3(fsin, void, env, fp, fp) +DEF_HELPER_3(fcos, void, env, fp, fp) +DEF_HELPER_4(fsincos, void, env, fp, fp, fp) +DEF_HELPER_3(fatan, void, env, fp, fp) +DEF_HELPER_3(fasin, void, env, fp, fp) +DEF_HELPER_3(facos, void, env, fp, fp) +DEF_HELPER_3(fatanh, void, env, fp, fp) +DEF_HELPER_3(ftanh, void, env, fp, fp) +DEF_HELPER_3(fsinh, void, env, fp, fp) +DEF_HELPER_3(fcosh, void, env, fp, fp) + +DEF_HELPER_3(mac_move, void, env, i32, i32) +DEF_HELPER_3(macmulf, i64, env, i32, i32) +DEF_HELPER_3(macmuls, i64, env, i32, i32) +DEF_HELPER_3(macmulu, i64, env, i32, i32) +DEF_HELPER_2(macsats, void, env, i32) +DEF_HELPER_2(macsatu, void, env, i32) +DEF_HELPER_2(macsatf, void, env, i32) +DEF_HELPER_2(mac_set_flags, void, env, i32) +DEF_HELPER_2(set_macsr, void, env, i32) +DEF_HELPER_2(get_macf, i32, env, i64) +DEF_HELPER_1(get_macs, i32, i64) +DEF_HELPER_1(get_macu, i32, i64) +DEF_HELPER_2(get_mac_extf, i32, env, i32) +DEF_HELPER_2(get_mac_exti, i32, env, i32) +DEF_HELPER_3(set_mac_extf, void, env, i32, i32) +DEF_HELPER_3(set_mac_exts, void, env, i32, i32) +DEF_HELPER_3(set_mac_extu, void, env, i32, i32) + +DEF_HELPER_2(flush_flags, void, env, i32) +DEF_HELPER_2(set_ccr, void, env, i32) +DEF_HELPER_FLAGS_1(get_ccr, TCG_CALL_NO_WG_SE, i32, env) +DEF_HELPER_2(raise_exception, void, env, i32) + +DEF_HELPER_FLAGS_3(bfffo_reg, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) + +DEF_HELPER_FLAGS_4(bfexts_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) +DEF_HELPER_FLAGS_4(bfextu_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32) +DEF_HELPER_FLAGS_5(bfins_mem, TCG_CALL_NO_WG, i32, env, i32, i32, s32, i32) +DEF_HELPER_FLAGS_4(bfchg_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) +DEF_HELPER_FLAGS_4(bfclr_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) +DEF_HELPER_FLAGS_4(bfset_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32) +DEF_HELPER_FLAGS_4(bfffo_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32) + +DEF_HELPER_3(chk, void, env, s32, s32) +DEF_HELPER_4(chk2, void, env, s32, s32, s32) + +DEF_HELPER_3(ptest, void, env, i32, i32) +DEF_HELPER_3(pflush, void, env, i32, i32) +DEF_HELPER_FLAGS_1(reset, TCG_CALL_NO_RWG, void, env) diff --git a/qemu/target/m68k/op_helper.c b/qemu/target/m68k/op_helper.c new file mode 100644 index 00000000..91c504f0 --- /dev/null +++ b/qemu/target/m68k/op_helper.c @@ -0,0 +1,1000 @@ +/* + * M68K helper routines + * + * Copyright (c) 2007 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" + + +static void cf_rte(CPUM68KState *env) +{ + uint32_t sp; + uint32_t fmt; + + sp = env->aregs[7]; + fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); + env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0); + sp |= (fmt >> 28) & 3; + env->aregs[7] = sp + 8; + + cpu_m68k_set_sr(env, fmt); +} + +static void m68k_rte(CPUM68KState *env) +{ + uint32_t sp; + uint16_t fmt; + uint16_t sr; + + sp = env->aregs[7]; +throwaway: + sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); + sp += 2; + env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); + sp += 4; + if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) { + /* all except 68000 */ + fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0); + sp += 2; + switch (fmt >> 12) { + case 0: + break; + case 1: + env->aregs[7] = sp; + cpu_m68k_set_sr(env, sr); + goto throwaway; + case 2: + case 3: + sp += 4; + break; + case 4: + sp += 8; + break; + case 7: + sp += 52; + break; + } + } + env->aregs[7] = sp; + cpu_m68k_set_sr(env, sr); +} + +static void cf_interrupt_all(CPUM68KState *env, int is_hw) +{ + CPUState *cs = env_cpu(env); + uint32_t sp; + uint32_t sr; + uint32_t fmt; + uint32_t retaddr; + uint32_t vector; + + fmt = 0; + retaddr = env->pc; + + if (!is_hw) { + switch (cs->exception_index) { + case EXCP_RTE: + /* Return from an exception. */ + cf_rte(env); + return; + case EXCP_HALT_INSN: + cs->halted = 1; + cs->exception_index = EXCP_HLT; + cpu_loop_exit(cs); + return; + } + if (cs->exception_index >= EXCP_TRAP0 + && cs->exception_index <= EXCP_TRAP15) { + /* Move the PC after the trap instruction. */ + retaddr += 2; + } + } + + vector = cs->exception_index << 2; + + sr = env->sr | cpu_m68k_get_ccr(env); + + fmt |= 0x40000000; + fmt |= vector << 16; + fmt |= sr; + + env->sr |= SR_S; + if (is_hw) { + env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT); + env->sr &= ~SR_M; + } + m68k_switch_sp(env); + sp = env->aregs[7]; + fmt |= (sp & 3) << 28; + + /* ??? This could cause MMU faults. */ + sp &= ~3; + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0); + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0); + env->aregs[7] = sp; + /* Jump to vector. */ + env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0); +} + +static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp, + uint16_t format, uint16_t sr, + uint32_t addr, uint32_t retaddr) +{ + if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) { + /* all except 68000 */ + CPUState *cs = env_cpu(env); + switch (format) { + case 4: + *sp -= 4; + cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0); + *sp -= 4; + cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0); + break; + case 3: + case 2: + *sp -= 4; + cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0); + break; + } + *sp -= 2; + cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2), + MMU_KERNEL_IDX, 0); + } + *sp -= 4; + cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0); + *sp -= 2; + cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0); +} + +static void m68k_interrupt_all(CPUM68KState *env, int is_hw) +{ + CPUState *cs = env_cpu(env); + uint32_t sp; + uint32_t retaddr; + uint32_t vector; + uint16_t sr, oldsr; + + retaddr = env->pc; + + if (!is_hw) { + switch (cs->exception_index) { + case EXCP_RTE: + /* Return from an exception. */ + m68k_rte(env); + return; + case EXCP_TRAP0: + case EXCP_TRAP0 + 1: + case EXCP_TRAP0 + 2: + case EXCP_TRAP0 + 3: + case EXCP_TRAP0 + 4: + case EXCP_TRAP0 + 5: + case EXCP_TRAP0 + 6: + case EXCP_TRAP0 + 7: + case EXCP_TRAP0 + 8: + case EXCP_TRAP0 + 9: + case EXCP_TRAP0 + 10: + case EXCP_TRAP0 + 11: + case EXCP_TRAP0 + 12: + case EXCP_TRAP0 + 13: + case EXCP_TRAP0 + 14: + case EXCP_TRAP15: + /* Move the PC after the trap instruction. */ + retaddr += 2; + break; + } + } + + vector = cs->exception_index << 2; + + sr = env->sr | cpu_m68k_get_ccr(env); + + /* + * MC68040UM/AD, chapter 9.3.10 + */ + + /* "the processor first make an internal copy" */ + oldsr = sr; + /* "set the mode to supervisor" */ + sr |= SR_S; + /* "suppress tracing" */ + sr &= ~SR_T; + /* "sets the processor interrupt mask" */ + if (is_hw) { + sr |= (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT); + } + cpu_m68k_set_sr(env, sr); + sp = env->aregs[7]; + + sp &= ~1; + if (cs->exception_index == EXCP_ACCESS) { + if (env->mmu.fault) { + cpu_abort(cs, "DOUBLE MMU FAULT\n"); + } + env->mmu.fault = true; + /* push data 3 */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* push data 2 */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* push data 1 */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 1 / push data 0 */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 1 address */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 2 data */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 2 address */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 3 data */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 3 address */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0); + /* fault address */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0); + /* write back 1 status */ + sp -= 2; + cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 2 status */ + sp -= 2; + cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* write back 3 status */ + sp -= 2; + cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0); + /* special status word */ + sp -= 2; + cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0); + /* effective address */ + sp -= 4; + cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0); + + do_stack_frame(env, &sp, 7, oldsr, 0, retaddr); + env->mmu.fault = false; + } else if (cs->exception_index == EXCP_ADDRESS) { + do_stack_frame(env, &sp, 2, oldsr, 0, retaddr); + } else if (cs->exception_index == EXCP_ILLEGAL || + cs->exception_index == EXCP_DIV0 || + cs->exception_index == EXCP_CHK || + cs->exception_index == EXCP_TRAPCC || + cs->exception_index == EXCP_TRACE) { + /* FIXME: addr is not only env->pc */ + do_stack_frame(env, &sp, 2, oldsr, env->pc, retaddr); + } else if (is_hw && oldsr & SR_M && + cs->exception_index >= EXCP_SPURIOUS && + cs->exception_index <= EXCP_INT_LEVEL_7) { + do_stack_frame(env, &sp, 0, oldsr, 0, retaddr); + oldsr = sr; + env->aregs[7] = sp; + cpu_m68k_set_sr(env, sr &= ~SR_M); + sp = env->aregs[7] & ~1; + do_stack_frame(env, &sp, 1, oldsr, 0, retaddr); + } else { + do_stack_frame(env, &sp, 0, oldsr, 0, retaddr); + } + + env->aregs[7] = sp; + /* Jump to vector. */ + env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0); +} + +static void do_interrupt_all(CPUM68KState *env, int is_hw) +{ + if (m68k_feature(env, M68K_FEATURE_M68000)) { + m68k_interrupt_all(env, is_hw); + return; + } + cf_interrupt_all(env, is_hw); +} + +void m68k_cpu_do_interrupt(CPUState *cs) +{ + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + + do_interrupt_all(env, 0); +} + +static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) +{ + do_interrupt_all(env, 1); +} + +void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, + unsigned size, MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + + cpu_restore_state(cs, retaddr, true); + + if (m68k_feature(env, M68K_FEATURE_M68040)) { + env->mmu.mmusr = 0; + env->mmu.ssw |= M68K_ATC_040; + /* FIXME: manage MMU table access error */ + env->mmu.ssw &= ~M68K_TM_040; + if (env->sr & SR_S) { /* SUPERVISOR */ + env->mmu.ssw |= M68K_TM_040_SUPER; + } + if (access_type == MMU_INST_FETCH) { /* instruction or data */ + env->mmu.ssw |= M68K_TM_040_CODE; + } else { + env->mmu.ssw |= M68K_TM_040_DATA; + } + env->mmu.ssw &= ~M68K_BA_SIZE_MASK; + switch (size) { + case 1: + env->mmu.ssw |= M68K_BA_SIZE_BYTE; + break; + case 2: + env->mmu.ssw |= M68K_BA_SIZE_WORD; + break; + case 4: + env->mmu.ssw |= M68K_BA_SIZE_LONG; + break; + } + + if (access_type != MMU_DATA_STORE) { + env->mmu.ssw |= M68K_RW_040; + } + + env->mmu.ar = addr; + + cs->exception_index = EXCP_ACCESS; + cpu_loop_exit(cs); + } +} + +bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + M68kCPU *cpu = M68K_CPU(cs); + CPUM68KState *env = &cpu->env; + + if (interrupt_request & CPU_INTERRUPT_HARD + && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) { + /* + * Real hardware gets the interrupt vector via an IACK cycle + * at this point. Current emulated hardware doesn't rely on + * this, so we provide/save the vector when the interrupt is + * first signalled. + */ + cs->exception_index = env->pending_vector; + do_interrupt_m68k_hardirq(env); + return true; + } + return false; +} + +static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = tt; + cpu_loop_exit_restore(cs, raddr); +} + +static void raise_exception(CPUM68KState *env, int tt) +{ + raise_exception_ra(env, tt, 0); +} + +void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt) +{ + raise_exception(env, tt); +} + +void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den) +{ + uint32_t num = env->dregs[destr]; + uint32_t quot, rem; + + if (den == 0) { + raise_exception_ra(env, EXCP_DIV0, GETPC()); + } + quot = num / den; + rem = num % den; + + env->cc_c = 0; /* always cleared, even if overflow */ + if (quot > 0xffff) { + env->cc_v = -1; + /* + * real 68040 keeps N and unset Z on overflow, + * whereas documentation says "undefined" + */ + env->cc_z = 1; + return; + } + env->dregs[destr] = deposit32(quot, 16, 16, rem); + env->cc_z = (int16_t)quot; + env->cc_n = (int16_t)quot; + env->cc_v = 0; +} + +void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den) +{ + int32_t num = env->dregs[destr]; + uint32_t quot, rem; + + if (den == 0) { + raise_exception_ra(env, EXCP_DIV0, GETPC()); + } + quot = num / den; + rem = num % den; + + env->cc_c = 0; /* always cleared, even if overflow */ + if (quot != (int16_t)quot) { + env->cc_v = -1; + /* nothing else is modified */ + /* + * real 68040 keeps N and unset Z on overflow, + * whereas documentation says "undefined" + */ + env->cc_z = 1; + return; + } + env->dregs[destr] = deposit32(quot, 16, 16, rem); + env->cc_z = (int16_t)quot; + env->cc_n = (int16_t)quot; + env->cc_v = 0; +} + +void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den) +{ + uint32_t num = env->dregs[numr]; + uint32_t quot, rem; + + if (den == 0) { + raise_exception_ra(env, EXCP_DIV0, GETPC()); + } + quot = num / den; + rem = num % den; + + env->cc_c = 0; + env->cc_z = quot; + env->cc_n = quot; + env->cc_v = 0; + + if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) { + if (numr == regr) { + env->dregs[numr] = quot; + } else { + env->dregs[regr] = rem; + } + } else { + env->dregs[regr] = rem; + env->dregs[numr] = quot; + } +} + +void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den) +{ + int32_t num = env->dregs[numr]; + int32_t quot, rem; + + if (den == 0) { + raise_exception_ra(env, EXCP_DIV0, GETPC()); + } + quot = num / den; + rem = num % den; + + env->cc_c = 0; + env->cc_z = quot; + env->cc_n = quot; + env->cc_v = 0; + + if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) { + if (numr == regr) { + env->dregs[numr] = quot; + } else { + env->dregs[regr] = rem; + } + } else { + env->dregs[regr] = rem; + env->dregs[numr] = quot; + } +} + +void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den) +{ + uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]); + uint64_t quot; + uint32_t rem; + + if (den == 0) { + raise_exception_ra(env, EXCP_DIV0, GETPC()); + } + quot = num / den; + rem = num % den; + + env->cc_c = 0; /* always cleared, even if overflow */ + if (quot > 0xffffffffULL) { + env->cc_v = -1; + /* + * real 68040 keeps N and unset Z on overflow, + * whereas documentation says "undefined" + */ + env->cc_z = 1; + return; + } + env->cc_z = quot; + env->cc_n = quot; + env->cc_v = 0; + + /* + * If Dq and Dr are the same, the quotient is returned. + * therefore we set Dq last. + */ + + env->dregs[regr] = rem; + env->dregs[numr] = quot; +} + +void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den) +{ + int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]); + int64_t quot; + int32_t rem; + + if (den == 0) { + raise_exception_ra(env, EXCP_DIV0, GETPC()); + } + quot = num / den; + rem = num % den; + + env->cc_c = 0; /* always cleared, even if overflow */ + if (quot != (int32_t)quot) { + env->cc_v = -1; + /* + * real 68040 keeps N and unset Z on overflow, + * whereas documentation says "undefined" + */ + env->cc_z = 1; + return; + } + env->cc_z = quot; + env->cc_n = quot; + env->cc_v = 0; + + /* + * If Dq and Dr are the same, the quotient is returned. + * therefore we set Dq last. + */ + + env->dregs[regr] = rem; + env->dregs[numr] = quot; +} + +/* We're executing in a serial context -- no need to be atomic. */ +void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) +{ + uint32_t Dc1 = extract32(regs, 9, 3); + uint32_t Dc2 = extract32(regs, 6, 3); + uint32_t Du1 = extract32(regs, 3, 3); + uint32_t Du2 = extract32(regs, 0, 3); + int16_t c1 = env->dregs[Dc1]; + int16_t c2 = env->dregs[Dc2]; + int16_t u1 = env->dregs[Du1]; + int16_t u2 = env->dregs[Du2]; + int16_t l1, l2; + uintptr_t ra = GETPC(); + + l1 = cpu_lduw_data_ra(env, a1, ra); + l2 = cpu_lduw_data_ra(env, a2, ra); + if (l1 == c1 && l2 == c2) { + cpu_stw_data_ra(env, a1, u1, ra); + cpu_stw_data_ra(env, a2, u2, ra); + } + + if (c1 != l1) { + env->cc_n = l1; + env->cc_v = c1; + } else { + env->cc_n = l2; + env->cc_v = c2; + } + env->cc_op = CC_OP_CMPW; + env->dregs[Dc1] = deposit32(env->dregs[Dc1], 0, 16, l1); + env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2); +} + +static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2, + bool parallel) +{ + uint32_t Dc1 = extract32(regs, 9, 3); + uint32_t Dc2 = extract32(regs, 6, 3); + uint32_t Du1 = extract32(regs, 3, 3); + uint32_t Du2 = extract32(regs, 0, 3); + uint32_t c1 = env->dregs[Dc1]; + uint32_t c2 = env->dregs[Dc2]; + uint32_t u1 = env->dregs[Du1]; + uint32_t u2 = env->dregs[Du2]; + uint32_t l1, l2; + uintptr_t ra = GETPC(); +#if defined(CONFIG_ATOMIC64) + int mmu_idx = cpu_mmu_index(env, 0); + TCGMemOpIdx oi; +#endif + + if (parallel) { + /* We're executing in a parallel context -- must be atomic. */ +#ifdef CONFIG_ATOMIC64 + uint64_t c, u, l; + if ((a1 & 7) == 0 && a2 == a1 + 4) { + c = deposit64(c2, 32, 32, c1); + u = deposit64(u2, 32, 32, u1); + oi = make_memop_idx(MO_BEQ, mmu_idx); + l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra); + l1 = l >> 32; + l2 = l; + } else if ((a2 & 7) == 0 && a1 == a2 + 4) { + c = deposit64(c1, 32, 32, c2); + u = deposit64(u1, 32, 32, u2); + oi = make_memop_idx(MO_BEQ, mmu_idx); + l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra); + l2 = l >> 32; + l1 = l; + } else +#endif + { + /* Tell the main loop we need to serialize this insn. */ + cpu_loop_exit_atomic(env_cpu(env), ra); + } + } else { + /* We're executing in a serial context -- no need to be atomic. */ + l1 = cpu_ldl_data_ra(env, a1, ra); + l2 = cpu_ldl_data_ra(env, a2, ra); + if (l1 == c1 && l2 == c2) { + cpu_stl_data_ra(env, a1, u1, ra); + cpu_stl_data_ra(env, a2, u2, ra); + } + } + + if (c1 != l1) { + env->cc_n = l1; + env->cc_v = c1; + } else { + env->cc_n = l2; + env->cc_v = c2; + } + env->cc_op = CC_OP_CMPL; + env->dregs[Dc1] = l1; + env->dregs[Dc2] = l2; +} + +void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2) +{ + do_cas2l(env, regs, a1, a2, false); +} + +void HELPER(cas2l_parallel)(CPUM68KState *env, uint32_t regs, uint32_t a1, + uint32_t a2) +{ + do_cas2l(env, regs, a1, a2, true); +} + +struct bf_data { + uint32_t addr; + uint32_t bofs; + uint32_t blen; + uint32_t len; +}; + +static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len) +{ + int bofs, blen; + + /* Bound length; map 0 to 32. */ + len = ((len - 1) & 31) + 1; + + /* Note that ofs is signed. */ + addr += ofs / 8; + bofs = ofs % 8; + if (bofs < 0) { + bofs += 8; + addr -= 1; + } + + /* + * Compute the number of bytes required (minus one) to + * satisfy the bitfield. + */ + blen = (bofs + len - 1) / 8; + + /* + * Canonicalize the bit offset for data loaded into a 64-bit big-endian + * word. For the cases where BLEN is not a power of 2, adjust ADDR so + * that we can use the next power of two sized load without crossing a + * page boundary, unless the field itself crosses the boundary. + */ + switch (blen) { + case 0: + bofs += 56; + break; + case 1: + bofs += 48; + break; + case 2: + if (addr & 1) { + bofs += 8; + addr -= 1; + } + /* fallthru */ + case 3: + bofs += 32; + break; + case 4: + if (addr & 3) { + bofs += 8 * (addr & 3); + addr &= -4; + } + break; + default: + g_assert_not_reached(); + } + + return (struct bf_data){ + .addr = addr, + .bofs = bofs, + .blen = blen, + .len = len, + }; +} + +static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen, + uintptr_t ra) +{ + switch (blen) { + case 0: + return cpu_ldub_data_ra(env, addr, ra); + case 1: + return cpu_lduw_data_ra(env, addr, ra); + case 2: + case 3: + return cpu_ldl_data_ra(env, addr, ra); + case 4: + return cpu_ldq_data_ra(env, addr, ra); + default: + // g_assert_not_reached(); + return 0; + } +} + +static void bf_store(CPUM68KState *env, uint32_t addr, int blen, + uint64_t data, uintptr_t ra) +{ + switch (blen) { + case 0: + cpu_stb_data_ra(env, addr, data, ra); + break; + case 1: + cpu_stw_data_ra(env, addr, data, ra); + break; + case 2: + case 3: + cpu_stl_data_ra(env, addr, data, ra); + break; + case 4: + cpu_stq_data_ra(env, addr, data, ra); + break; + default: + g_assert_not_reached(); + } +} + +uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr, + int32_t ofs, uint32_t len) +{ + uintptr_t ra = GETPC(); + struct bf_data d = bf_prep(addr, ofs, len); + uint64_t data = bf_load(env, d.addr, d.blen, ra); + + return (int64_t)(data << d.bofs) >> (64 - d.len); +} + +uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr, + int32_t ofs, uint32_t len) +{ + uintptr_t ra = GETPC(); + struct bf_data d = bf_prep(addr, ofs, len); + uint64_t data = bf_load(env, d.addr, d.blen, ra); + + /* + * Put CC_N at the top of the high word; put the zero-extended value + * at the bottom of the low word. + */ + data <<= d.bofs; + data >>= 64 - d.len; + data |= data << (64 - d.len); + + return data; +} + +uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val, + int32_t ofs, uint32_t len) +{ + uintptr_t ra = GETPC(); + struct bf_data d = bf_prep(addr, ofs, len); + uint64_t data = bf_load(env, d.addr, d.blen, ra); +#ifdef _MSC_VER + uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; +#else + uint64_t mask = -1ull << (64 - d.len) >> d.bofs; +#endif + + data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs); + + bf_store(env, d.addr, d.blen, data, ra); + + /* The field at the top of the word is also CC_N for CC_OP_LOGIC. */ + return val << (32 - d.len); +} + +uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr, + int32_t ofs, uint32_t len) +{ + uintptr_t ra = GETPC(); + struct bf_data d = bf_prep(addr, ofs, len); + uint64_t data = bf_load(env, d.addr, d.blen, ra); +#ifdef _MSC_VER + uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; +#else + uint64_t mask = -1ull << (64 - d.len) >> d.bofs; +#endif + + bf_store(env, d.addr, d.blen, data ^ mask, ra); + + return ((data & mask) << d.bofs) >> 32; +} + +uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr, + int32_t ofs, uint32_t len) +{ + uintptr_t ra = GETPC(); + struct bf_data d = bf_prep(addr, ofs, len); + uint64_t data = bf_load(env, d.addr, d.blen, ra); +#ifdef _MSC_VER + uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; +#else + uint64_t mask = -1ull << (64 - d.len) >> d.bofs; +#endif + + bf_store(env, d.addr, d.blen, data & ~mask, ra); + + return ((data & mask) << d.bofs) >> 32; +} + +uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr, + int32_t ofs, uint32_t len) +{ + uintptr_t ra = GETPC(); + struct bf_data d = bf_prep(addr, ofs, len); + uint64_t data = bf_load(env, d.addr, d.blen, ra); +#ifdef _MSC_VER + uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; +#else + uint64_t mask = -1ull << (64 - d.len) >> d.bofs; +#endif + + bf_store(env, d.addr, d.blen, data | mask, ra); + + return ((data & mask) << d.bofs) >> 32; +} + +uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len) +{ + return (n ? clz32(n) : len) + ofs; +} + +uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr, + int32_t ofs, uint32_t len) +{ + uintptr_t ra = GETPC(); + struct bf_data d = bf_prep(addr, ofs, len); + uint64_t data = bf_load(env, d.addr, d.blen, ra); +#ifdef _MSC_VER + uint64_t mask = 0xffffffffffffffffULL << (64 - d.len) >> d.bofs; +#else + uint64_t mask = -1ull << (64 - d.len) >> d.bofs; +#endif + uint64_t n = (data & mask) << d.bofs; + uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len); + + /* + * Return FFO in the low word and N in the high word. + * Note that because of MASK and the shift, the low word + * is already zero. + */ + return n | ffo; +} + +void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub) +{ + /* + * From the specs: + * X: Not affected, C,V,Z: Undefined, + * N: Set if val < 0; cleared if val > ub, undefined otherwise + * We implement here values found from a real MC68040: + * X,V,Z: Not affected + * N: Set if val < 0; cleared if val >= 0 + * C: if 0 <= ub: set if val < 0 or val > ub, cleared otherwise + * if 0 > ub: set if val > ub and val < 0, cleared otherwise + */ + env->cc_n = val; + env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0; + + if (val < 0 || val > ub) { + CPUState *cs = env_cpu(env); + + /* Recover PC and CC_OP for the beginning of the insn. */ + cpu_restore_state(cs, GETPC(), true); + + /* flags have been modified by gen_flush_flags() */ + env->cc_op = CC_OP_FLAGS; + /* Adjust PC to end of the insn. */ + env->pc += 2; + + cs->exception_index = EXCP_CHK; + cpu_loop_exit(cs); + } +} + +void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub) +{ + /* + * From the specs: + * X: Not affected, N,V: Undefined, + * Z: Set if val is equal to lb or ub + * C: Set if val < lb or val > ub, cleared otherwise + * We implement here values found from a real MC68040: + * X,N,V: Not affected + * Z: Set if val is equal to lb or ub + * C: if lb <= ub: set if val < lb or val > ub, cleared otherwise + * if lb > ub: set if val > ub and val < lb, cleared otherwise + */ + env->cc_z = val != lb && val != ub; + env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb; + + if (env->cc_c) { + CPUState *cs = env_cpu(env); + + /* Recover PC and CC_OP for the beginning of the insn. */ + cpu_restore_state(cs, GETPC(), true); + + /* flags have been modified by gen_flush_flags() */ + env->cc_op = CC_OP_FLAGS; + /* Adjust PC to end of the insn. */ + env->pc += 4; + + cs->exception_index = EXCP_CHK; + cpu_loop_exit(cs); + } +} diff --git a/qemu/target-m68k/qregs.def b/qemu/target/m68k/qregs.def similarity index 50% rename from qemu/target-m68k/qregs.def rename to qemu/target/m68k/qregs.def index 204663e1..1aadc622 100644 --- a/qemu/target-m68k/qregs.def +++ b/qemu/target/m68k/qregs.def @@ -1,11 +1,10 @@ -DEFF64(FP_RESULT, fp_result) DEFO32(PC, pc) DEFO32(SR, sr) DEFO32(CC_OP, cc_op) -DEFO32(CC_DEST, cc_dest) -DEFO32(CC_SRC, cc_src) DEFO32(CC_X, cc_x) -DEFO32(DIV1, div1) -DEFO32(DIV2, div2) +DEFO32(CC_C, cc_c) +DEFO32(CC_N, cc_n) +DEFO32(CC_V, cc_v) +DEFO32(CC_Z, cc_z) DEFO32(MACSR, macsr) DEFO32(MAC_MASK, mac_mask) diff --git a/qemu/target/m68k/softfloat.c b/qemu/target/m68k/softfloat.c new file mode 100644 index 00000000..24c313ed --- /dev/null +++ b/qemu/target/m68k/softfloat.c @@ -0,0 +1,2900 @@ +/* + * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator, + * derived from NetBSD M68040 FPSP functions, + * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic + * Package. Those parts of the code (and some later contributions) are + * provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file will be taken to be licensed under + * the Softfloat-2a license unless specifically indicated otherwise. + */ + +/* + * Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "softfloat.h" +#include "fpu/softfloat-macros.h" +#include "softfloat_fpsp_tables.h" + +#define pi_exp 0x4000 +#define piby2_exp 0x3FFF +#define pi_sig UINT64_C(0xc90fdaa22168c235) + +static floatx80 propagateFloatx80NaNOneArg(floatx80 a, float_status *status) +{ + if (floatx80_is_signaling_nan(a, status)) { + float_raise(float_flag_invalid, status); + a = floatx80_silence_nan(a, status); + } + + if (status->default_nan_mode) { + return floatx80_default_nan(status); + } + + return a; +} + +/* + * Returns the modulo remainder of the extended double-precision floating-point + * value `a' with respect to the corresponding value `b'. + */ + +floatx80 floatx80_mod(floatx80 a, floatx80 b, float_status *status) +{ + flag aSign, zSign; + int32_t aExp, bExp, expDiff; + uint64_t aSig0, aSig1, bSig; + uint64_t qTemp, term0, term1; + + aSig0 = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + bSig = extractFloatx80Frac(b); + bExp = extractFloatx80Exp(b); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig0 << 1) + || ((bExp == 0x7FFF) && (uint64_t) (bSig << 1))) { + return propagateFloatx80NaN(a, b, status); + } + goto invalid; + } + if (bExp == 0x7FFF) { + if ((uint64_t) (bSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } + return a; + } + if (bExp == 0) { + if (bSig == 0) { + invalid: + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + normalizeFloatx80Subnormal(bSig, &bExp, &bSig); + } + if (aExp == 0) { + if ((uint64_t) (aSig0 << 1) == 0) { + return a; + } + normalizeFloatx80Subnormal(aSig0, &aExp, &aSig0); + } + bSig |= UINT64_C(0x8000000000000000); + zSign = aSign; + expDiff = aExp - bExp; + aSig1 = 0; + if (expDiff < 0) { + return a; + } + qTemp = (bSig <= aSig0); + if (qTemp) { + aSig0 -= bSig; + } + expDiff -= 64; + while (0 < expDiff) { + qTemp = estimateDiv128To64(aSig0, aSig1, bSig); + qTemp = (2 < qTemp) ? qTemp - 2 : 0; + mul64To128(bSig, qTemp, &term0, &term1); + sub128(aSig0, aSig1, term0, term1, &aSig0, &aSig1); + shortShift128Left(aSig0, aSig1, 62, &aSig0, &aSig1); + expDiff -= 62; + } + expDiff += 64; + if (0 < expDiff) { + qTemp = estimateDiv128To64(aSig0, aSig1, bSig); + qTemp = (2 < qTemp) ? qTemp - 2 : 0; + qTemp >>= 64 - expDiff; + mul64To128(bSig, qTemp << (64 - expDiff), &term0, &term1); + sub128(aSig0, aSig1, term0, term1, &aSig0, &aSig1); + shortShift128Left(0, bSig, 64 - expDiff, &term0, &term1); + while (le128(term0, term1, aSig0, aSig1)) { + ++qTemp; + sub128(aSig0, aSig1, term0, term1, &aSig0, &aSig1); + } + } + return + normalizeRoundAndPackFloatx80( + 80, zSign, bExp + expDiff, aSig0, aSig1, status); +} + +/* + * Returns the mantissa of the extended double-precision floating-point + * value `a'. + */ + +floatx80 floatx80_getman(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a , status); + } + float_raise(float_flag_invalid , status); + return floatx80_default_nan(status); + } + + if (aExp == 0) { + if (aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + normalizeFloatx80Subnormal(aSig, &aExp, &aSig); + } + + return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, + 0x3FFF, aSig, 0, status); +} + +/* + * Returns the exponent of the extended double-precision floating-point + * value `a' as an extended double-precision value. + */ + +floatx80 floatx80_getexp(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a , status); + } + float_raise(float_flag_invalid , status); + return floatx80_default_nan(status); + } + + if (aExp == 0) { + if (aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + normalizeFloatx80Subnormal(aSig, &aExp, &aSig); + } + + return int32_to_floatx80(aExp - 0x3FFF, status); +} + +/* + * Scales extended double-precision floating-point value in operand `a' by + * value `b'. The function truncates the value in the second operand 'b' to + * an integral value and adds that value to the exponent of the operand 'a'. + * The operation performed according to the IEC/IEEE Standard for Binary + * Floating-Point Arithmetic. + */ + +floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status) +{ + flag aSign, bSign; + int32_t aExp, bExp, shiftCount; + uint64_t aSig, bSig; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + bSig = extractFloatx80Frac(b); + bExp = extractFloatx80Exp(b); + bSign = extractFloatx80Sign(b); + + if (bExp == 0x7FFF) { + if ((uint64_t) (bSig << 1) || + ((aExp == 0x7FFF) && (uint64_t) (aSig << 1))) { + return propagateFloatx80NaN(a, b, status); + } + float_raise(float_flag_invalid , status); + return floatx80_default_nan(status); + } + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaN(a, b, status); + } + return packFloatx80(aSign, floatx80_infinity.high, + floatx80_infinity.low); + } + if (aExp == 0) { + if (aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + if (bExp < 0x3FFF) { + return a; + } + normalizeFloatx80Subnormal(aSig, &aExp, &aSig); + } + + if (bExp < 0x3FFF) { + return a; + } + + if (0x400F < bExp) { + aExp = bSign ? -0x6001 : 0xE000; + return roundAndPackFloatx80(status->floatx80_rounding_precision, + aSign, aExp, aSig, 0, status); + } + + shiftCount = 0x403E - bExp; + bSig >>= shiftCount; + aExp = bSign ? (aExp - bSig) : (aExp + bSig); + + return roundAndPackFloatx80(status->floatx80_rounding_precision, + aSign, aExp, aSig, 0, status); +} + +floatx80 floatx80_move(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t)(aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + return a; + } + if (aExp == 0) { + if (aSig == 0) { + return a; + } + normalizeRoundAndPackFloatx80(status->floatx80_rounding_precision, + aSign, aExp, aSig, 0, status); + } + return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign, + aExp, aSig, 0, status); +} + +/* + * Algorithms for transcendental functions supported by MC68881 and MC68882 + * mathematical coprocessors. The functions are derived from FPSP library. + */ + +#define one_exp 0x3FFF +#define one_sig UINT64_C(0x8000000000000000) + +/* + * Function for compactifying extended double-precision floating point values. + */ + +static int32_t floatx80_make_compact(int32_t aExp, uint64_t aSig) +{ + return (aExp << 16) | (aSig >> 48); +} + +/* + * Log base e of x plus 1 + */ + +floatx80 floatx80_lognp1(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig, fSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, j, k; + floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + propagateFloatx80NaNOneArg(a, status); + } + if (aSign) { + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + if (aSign && aExp >= one_exp) { + if (aExp == one_exp && aSig == one_sig) { + float_raise(float_flag_divbyzero, status); + return packFloatx80(aSign, floatx80_infinity.high, + floatx80_infinity.low); + } + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + if (aExp < 0x3f99 || (aExp == 0x3f99 && aSig == one_sig)) { + /* <= min threshold */ + float_raise(float_flag_inexact, status); + return floatx80_move(a, status); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + fp0 = a; /* Z */ + fp1 = a; + + fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), + status), status); /* X = (1+Z) */ + + aExp = extractFloatx80Exp(fp0); + aSig = extractFloatx80Frac(fp0); + + compact = floatx80_make_compact(aExp, aSig); + + if (compact < 0x3FFE8000 || compact > 0x3FFFC000) { + /* |X| < 1/2 or |X| > 3/2 */ + k = aExp - 0x3FFF; + fp1 = int32_to_floatx80(k, status); + + fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000); + j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */ + + f = packFloatx80(0, 0x3FFF, fSig); /* F */ + fp0 = packFloatx80(0, 0x3FFF, aSig); /* Y */ + + fp0 = floatx80_sub(fp0, f, status); /* Y-F */ + + lp1cont1: + /* LP1CONT1 */ + fp0 = floatx80_mul(fp0, log_tbl[j], status); /* FP0 IS U = (Y-F)/F */ + logof2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC)); + klog2 = floatx80_mul(fp1, logof2, status); /* FP1 IS K*LOG2 */ + fp2 = floatx80_mul(fp0, fp0, status); /* FP2 IS V=U*U */ + + fp3 = fp2; + fp1 = fp2; + + fp1 = floatx80_mul(fp1, float64_to_floatx80( + make_float64(0x3FC2499AB5E4040B), status), + status); /* V*A6 */ + fp2 = floatx80_mul(fp2, float64_to_floatx80( + make_float64(0xBFC555B5848CB7DB), status), + status); /* V*A5 */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3FC99999987D8730), status), + status); /* A4+V*A6 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0xBFCFFFFFFF6F7E97), status), + status); /* A3+V*A5 */ + fp1 = floatx80_mul(fp1, fp3, status); /* V*(A4+V*A6) */ + fp2 = floatx80_mul(fp2, fp3, status); /* V*(A3+V*A5) */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3FD55555555555A4), status), + status); /* A2+V*(A4+V*A6) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0xBFE0000000000008), status), + status); /* A1+V*(A3+V*A5) */ + fp1 = floatx80_mul(fp1, fp3, status); /* V*(A2+V*(A4+V*A6)) */ + fp2 = floatx80_mul(fp2, fp3, status); /* V*(A1+V*(A3+V*A5)) */ + fp1 = floatx80_mul(fp1, fp0, status); /* U*V*(A2+V*(A4+V*A6)) */ + fp0 = floatx80_add(fp0, fp2, status); /* U+V*(A1+V*(A3+V*A5)) */ + + fp1 = floatx80_add(fp1, log_tbl[j + 1], + status); /* LOG(F)+U*V*(A2+V*(A4+V*A6)) */ + fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS LOG(F) + LOG(1+U) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, klog2, status); + + float_raise(float_flag_inexact, status); + + return a; + } else if (compact < 0x3FFEF07D || compact > 0x3FFF8841) { + /* |X| < 1/16 or |X| > -1/16 */ + /* LP1CARE */ + fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000); + f = packFloatx80(0, 0x3FFF, fSig); /* F */ + j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */ + + if (compact >= 0x3FFF8000) { /* 1+Z >= 1 */ + /* KISZERO */ + fp0 = floatx80_sub(float32_to_floatx80(make_float32(0x3F800000), + status), f, status); /* 1-F */ + fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS Y-F = (1-F)+Z */ + fp1 = packFloatx80(0, 0, 0); /* K = 0 */ + } else { + /* KISNEG */ + fp0 = floatx80_sub(float32_to_floatx80(make_float32(0x40000000), + status), f, status); /* 2-F */ + fp1 = floatx80_add(fp1, fp1, status); /* 2Z */ + fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS Y-F = (2-F)+2Z */ + fp1 = packFloatx80(1, one_exp, one_sig); /* K = -1 */ + } + goto lp1cont1; + } else { + /* LP1ONE16 */ + fp1 = floatx80_add(fp1, fp1, status); /* FP1 IS 2Z */ + fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), + status), status); /* FP0 IS 1+X */ + + /* LP1CONT2 */ + fp1 = floatx80_div(fp1, fp0, status); /* U */ + saveu = fp1; + fp0 = floatx80_mul(fp1, fp1, status); /* FP0 IS V = U*U */ + fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS W = V*V */ + + fp3 = float64_to_floatx80(make_float64(0x3F175496ADD7DAD6), + status); /* B5 */ + fp2 = float64_to_floatx80(make_float64(0x3F3C71C2FE80C7E0), + status); /* B4 */ + fp3 = floatx80_mul(fp3, fp1, status); /* W*B5 */ + fp2 = floatx80_mul(fp2, fp1, status); /* W*B4 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0x3F624924928BCCFF), status), + status); /* B3+W*B5 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3F899999999995EC), status), + status); /* B2+W*B4 */ + fp1 = floatx80_mul(fp1, fp3, status); /* W*(B3+W*B5) */ + fp2 = floatx80_mul(fp2, fp0, status); /* V*(B2+W*B4) */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3FB5555555555555), status), + status); /* B1+W*(B3+W*B5) */ + + fp0 = floatx80_mul(fp0, saveu, status); /* FP0 IS U*V */ + fp1 = floatx80_add(fp1, fp2, + status); /* B1+W*(B3+W*B5) + V*(B2+W*B4) */ + fp0 = floatx80_mul(fp0, fp1, + status); /* U*V*([B1+W*(B3+W*B5)] + [V*(B2+W*B4)]) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, saveu, status); + + /*if (!floatx80_is_zero(a)) { */ + float_raise(float_flag_inexact, status); + /*} */ + + return a; + } +} + +/* + * Log base e + */ + +floatx80 floatx80_logn(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig, fSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, j, k, adjk; + floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + propagateFloatx80NaNOneArg(a, status); + } + if (aSign == 0) { + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + } + + adjk = 0; + + if (aExp == 0) { + if (aSig == 0) { /* zero */ + float_raise(float_flag_divbyzero, status); + return packFloatx80(1, floatx80_infinity.high, + floatx80_infinity.low); + } + if ((aSig & one_sig) == 0) { /* denormal */ + normalizeFloatx80Subnormal(aSig, &aExp, &aSig); + adjk = -100; + aExp += 100; + a = packFloatx80(aSign, aExp, aSig); + } + } + + if (aSign) { + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + if (compact < 0x3FFEF07D || compact > 0x3FFF8841) { + /* |X| < 15/16 or |X| > 17/16 */ + k = aExp - 0x3FFF; + k += adjk; + fp1 = int32_to_floatx80(k, status); + + fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000); + j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */ + + f = packFloatx80(0, 0x3FFF, fSig); /* F */ + fp0 = packFloatx80(0, 0x3FFF, aSig); /* Y */ + + fp0 = floatx80_sub(fp0, f, status); /* Y-F */ + + /* LP1CONT1 */ + fp0 = floatx80_mul(fp0, log_tbl[j], status); /* FP0 IS U = (Y-F)/F */ + logof2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC)); + klog2 = floatx80_mul(fp1, logof2, status); /* FP1 IS K*LOG2 */ + fp2 = floatx80_mul(fp0, fp0, status); /* FP2 IS V=U*U */ + + fp3 = fp2; + fp1 = fp2; + + fp1 = floatx80_mul(fp1, float64_to_floatx80( + make_float64(0x3FC2499AB5E4040B), status), + status); /* V*A6 */ + fp2 = floatx80_mul(fp2, float64_to_floatx80( + make_float64(0xBFC555B5848CB7DB), status), + status); /* V*A5 */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3FC99999987D8730), status), + status); /* A4+V*A6 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0xBFCFFFFFFF6F7E97), status), + status); /* A3+V*A5 */ + fp1 = floatx80_mul(fp1, fp3, status); /* V*(A4+V*A6) */ + fp2 = floatx80_mul(fp2, fp3, status); /* V*(A3+V*A5) */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3FD55555555555A4), status), + status); /* A2+V*(A4+V*A6) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0xBFE0000000000008), status), + status); /* A1+V*(A3+V*A5) */ + fp1 = floatx80_mul(fp1, fp3, status); /* V*(A2+V*(A4+V*A6)) */ + fp2 = floatx80_mul(fp2, fp3, status); /* V*(A1+V*(A3+V*A5)) */ + fp1 = floatx80_mul(fp1, fp0, status); /* U*V*(A2+V*(A4+V*A6)) */ + fp0 = floatx80_add(fp0, fp2, status); /* U+V*(A1+V*(A3+V*A5)) */ + + fp1 = floatx80_add(fp1, log_tbl[j + 1], + status); /* LOG(F)+U*V*(A2+V*(A4+V*A6)) */ + fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS LOG(F) + LOG(1+U) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, klog2, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { /* |X-1| >= 1/16 */ + fp0 = a; + fp1 = a; + fp1 = floatx80_sub(fp1, float32_to_floatx80(make_float32(0x3F800000), + status), status); /* FP1 IS X-1 */ + fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), + status), status); /* FP0 IS X+1 */ + fp1 = floatx80_add(fp1, fp1, status); /* FP1 IS 2(X-1) */ + + /* LP1CONT2 */ + fp1 = floatx80_div(fp1, fp0, status); /* U */ + saveu = fp1; + fp0 = floatx80_mul(fp1, fp1, status); /* FP0 IS V = U*U */ + fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS W = V*V */ + + fp3 = float64_to_floatx80(make_float64(0x3F175496ADD7DAD6), + status); /* B5 */ + fp2 = float64_to_floatx80(make_float64(0x3F3C71C2FE80C7E0), + status); /* B4 */ + fp3 = floatx80_mul(fp3, fp1, status); /* W*B5 */ + fp2 = floatx80_mul(fp2, fp1, status); /* W*B4 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0x3F624924928BCCFF), status), + status); /* B3+W*B5 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3F899999999995EC), status), + status); /* B2+W*B4 */ + fp1 = floatx80_mul(fp1, fp3, status); /* W*(B3+W*B5) */ + fp2 = floatx80_mul(fp2, fp0, status); /* V*(B2+W*B4) */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3FB5555555555555), status), + status); /* B1+W*(B3+W*B5) */ + + fp0 = floatx80_mul(fp0, saveu, status); /* FP0 IS U*V */ + fp1 = floatx80_add(fp1, fp2, status); /* B1+W*(B3+W*B5) + V*(B2+W*B4) */ + fp0 = floatx80_mul(fp0, fp1, + status); /* U*V*([B1+W*(B3+W*B5)] + [V*(B2+W*B4)]) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, saveu, status); + + /*if (!floatx80_is_zero(a)) { */ + float_raise(float_flag_inexact, status); + /*} */ + + return a; + } +} + +/* + * Log base 10 + */ + +floatx80 floatx80_log10(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + floatx80 fp0, fp1; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + propagateFloatx80NaNOneArg(a, status); + } + if (aSign == 0) { + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + } + + if (aExp == 0 && aSig == 0) { + float_raise(float_flag_divbyzero, status); + return packFloatx80(1, floatx80_infinity.high, + floatx80_infinity.low); + } + + if (aSign) { + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + fp0 = floatx80_logn(a, status); + fp1 = packFloatx80(0, 0x3FFD, UINT64_C(0xDE5BD8A937287195)); /* INV_L10 */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, fp1, status); /* LOGN(X)*INV_L10 */ + + float_raise(float_flag_inexact, status); + + return a; +} + +/* + * Log base 2 + */ + +floatx80 floatx80_log2(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + floatx80 fp0, fp1; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + propagateFloatx80NaNOneArg(a, status); + } + if (aSign == 0) { + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + } + + if (aExp == 0) { + if (aSig == 0) { + float_raise(float_flag_divbyzero, status); + return packFloatx80(1, floatx80_infinity.high, + floatx80_infinity.low); + } + normalizeFloatx80Subnormal(aSig, &aExp, &aSig); + } + + if (aSign) { + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + if (aSig == one_sig) { /* X is 2^k */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = int32_to_floatx80(aExp - 0x3FFF, status); + } else { + fp0 = floatx80_logn(a, status); + fp1 = packFloatx80(0, 0x3FFF, UINT64_C(0xB8AA3B295C17F0BC)); /* INV_L2 */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, fp1, status); /* LOGN(X)*INV_L2 */ + } + + float_raise(float_flag_inexact, status); + + return a; +} + +/* + * e to x + */ + +floatx80 floatx80_etox(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, n, j, k, m, m1; + floatx80 fp0, fp1, fp2, fp3, l2, scale, adjscale; + flag adjflag; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + if (aSign) { + return packFloatx80(0, 0, 0); + } + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(0, one_exp, one_sig); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + adjflag = 0; + + if (aExp >= 0x3FBE) { /* |X| >= 2^(-65) */ + compact = floatx80_make_compact(aExp, aSig); + + if (compact < 0x400CB167) { /* |X| < 16380 log2 */ + fp0 = a; + fp1 = a; + fp0 = floatx80_mul(fp0, float32_to_floatx80( + make_float32(0x42B8AA3B), status), + status); /* 64/log2 * X */ + adjflag = 0; + n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */ + fp0 = int32_to_floatx80(n, status); + + j = n & 0x3F; /* J = N mod 64 */ + m = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ + if (n < 0 && j) { + /* + * arithmetic right shift is division and + * round towards minus infinity + */ + m--; + } + m += 0x3FFF; /* biased exponent of 2^(M) */ + + expcont1: + fp2 = fp0; /* N */ + fp0 = floatx80_mul(fp0, float32_to_floatx80( + make_float32(0xBC317218), status), + status); /* N * L1, L1 = lead(-log2/64) */ + l2 = packFloatx80(0, 0x3FDC, UINT64_C(0x82E308654361C4C6)); + fp2 = floatx80_mul(fp2, l2, status); /* N * L2, L1+L2 = -log2/64 */ + fp0 = floatx80_add(fp0, fp1, status); /* X + N*L1 */ + fp0 = floatx80_add(fp0, fp2, status); /* R */ + + fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ + fp2 = float32_to_floatx80(make_float32(0x3AB60B70), + status); /* A5 */ + fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*A5 */ + fp3 = floatx80_mul(float32_to_floatx80(make_float32(0x3C088895), + status), fp1, + status); /* fp3 is S*A4 */ + fp2 = floatx80_add(fp2, float64_to_floatx80(make_float64( + 0x3FA5555555554431), status), + status); /* fp2 is A3+S*A5 */ + fp3 = floatx80_add(fp3, float64_to_floatx80(make_float64( + 0x3FC5555555554018), status), + status); /* fp3 is A2+S*A4 */ + fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*(A3+S*A5) */ + fp3 = floatx80_mul(fp3, fp1, status); /* fp3 is S*(A2+S*A4) */ + fp2 = floatx80_add(fp2, float32_to_floatx80( + make_float32(0x3F000000), status), + status); /* fp2 is A1+S*(A3+S*A5) */ + fp3 = floatx80_mul(fp3, fp0, status); /* fp3 IS R*S*(A2+S*A4) */ + fp2 = floatx80_mul(fp2, fp1, + status); /* fp2 IS S*(A1+S*(A3+S*A5)) */ + fp0 = floatx80_add(fp0, fp3, status); /* fp0 IS R+R*S*(A2+S*A4) */ + fp0 = floatx80_add(fp0, fp2, status); /* fp0 IS EXP(R) - 1 */ + + fp1 = exp_tbl[j]; + fp0 = floatx80_mul(fp0, fp1, status); /* 2^(J/64)*(Exp(R)-1) */ + fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status), + status); /* accurate 2^(J/64) */ + fp0 = floatx80_add(fp0, fp1, + status); /* 2^(J/64) + 2^(J/64)*(Exp(R)-1) */ + + scale = packFloatx80(0, m, one_sig); + if (adjflag) { + adjscale = packFloatx80(0, m1, one_sig); + fp0 = floatx80_mul(fp0, adjscale, status); + } + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, scale, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { /* |X| >= 16380 log2 */ + if (compact > 0x400CB27C) { /* |X| >= 16480 log2 */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + if (aSign) { + a = roundAndPackFloatx80( + status->floatx80_rounding_precision, + 0, -0x1000, aSig, 0, status); + } else { + a = roundAndPackFloatx80( + status->floatx80_rounding_precision, + 0, 0x8000, aSig, 0, status); + } + float_raise(float_flag_inexact, status); + + return a; + } else { + fp0 = a; + fp1 = a; + fp0 = floatx80_mul(fp0, float32_to_floatx80( + make_float32(0x42B8AA3B), status), + status); /* 64/log2 * X */ + adjflag = 1; + n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */ + fp0 = int32_to_floatx80(n, status); + + j = n & 0x3F; /* J = N mod 64 */ + /* NOTE: this is really arithmetic right shift by 6 */ + k = n / 64; + if (n < 0 && j) { + /* arithmetic right shift is division and + * round towards minus infinity + */ + k--; + } + /* NOTE: this is really arithmetic right shift by 1 */ + m1 = k / 2; + if (k < 0 && (k & 1)) { + /* arithmetic right shift is division and + * round towards minus infinity + */ + m1--; + } + m = k - m1; + m1 += 0x3FFF; /* biased exponent of 2^(M1) */ + m += 0x3FFF; /* biased exponent of 2^(M) */ + + goto expcont1; + } + } + } else { /* |X| < 2^(-65) */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(a, float32_to_floatx80(make_float32(0x3F800000), + status), status); /* 1 + X */ + + float_raise(float_flag_inexact, status); + + return a; + } +} + +/* + * 2 to x + */ + +floatx80 floatx80_twotox(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, n, j, l, m, m1; + floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + if (aSign) { + return packFloatx80(0, 0, 0); + } + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(0, one_exp, one_sig); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + fp0 = a; + + compact = floatx80_make_compact(aExp, aSig); + + if (compact < 0x3FB98000 || compact > 0x400D80C0) { + /* |X| > 16480 or |X| < 2^(-70) */ + if (compact > 0x3FFF8000) { /* |X| > 16480 */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + if (aSign) { + return roundAndPackFloatx80(status->floatx80_rounding_precision, + 0, -0x1000, aSig, 0, status); + } else { + return roundAndPackFloatx80(status->floatx80_rounding_precision, + 0, 0x8000, aSig, 0, status); + } + } else { /* |X| < 2^(-70) */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, float32_to_floatx80( + make_float32(0x3F800000), status), + status); /* 1 + X */ + + float_raise(float_flag_inexact, status); + + return a; + } + } else { /* 2^(-70) <= |X| <= 16480 */ + fp1 = fp0; /* X */ + fp1 = floatx80_mul(fp1, float32_to_floatx80( + make_float32(0x42800000), status), + status); /* X * 64 */ + n = floatx80_to_int32(fp1, status); + fp1 = int32_to_floatx80(n, status); + j = n & 0x3F; + l = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ + if (n < 0 && j) { + /* + * arithmetic right shift is division and + * round towards minus infinity + */ + l--; + } + m = l / 2; /* NOTE: this is really arithmetic right shift by 1 */ + if (l < 0 && (l & 1)) { + /* + * arithmetic right shift is division and + * round towards minus infinity + */ + m--; + } + m1 = l - m; + m1 += 0x3FFF; /* ADJFACT IS 2^(M') */ + + adjfact = packFloatx80(0, m1, one_sig); + fact1 = exp2_tbl[j]; + fact1.high += m; + fact2.high = exp2_tbl2[j] >> 16; + fact2.high += m; + fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF); + fact2.low <<= 48; + + fp1 = floatx80_mul(fp1, float32_to_floatx80( + make_float32(0x3C800000), status), + status); /* (1/64)*N */ + fp0 = floatx80_sub(fp0, fp1, status); /* X - (1/64)*INT(64 X) */ + fp2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC)); /* LOG2 */ + fp0 = floatx80_mul(fp0, fp2, status); /* R */ + + /* EXPR */ + fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ + fp2 = float64_to_floatx80(make_float64(0x3F56C16D6F7BD0B2), + status); /* A5 */ + fp3 = float64_to_floatx80(make_float64(0x3F811112302C712C), + status); /* A4 */ + fp2 = floatx80_mul(fp2, fp1, status); /* S*A5 */ + fp3 = floatx80_mul(fp3, fp1, status); /* S*A4 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FA5555555554CC1), status), + status); /* A3+S*A5 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0x3FC5555555554A54), status), + status); /* A2+S*A4 */ + fp2 = floatx80_mul(fp2, fp1, status); /* S*(A3+S*A5) */ + fp3 = floatx80_mul(fp3, fp1, status); /* S*(A2+S*A4) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FE0000000000000), status), + status); /* A1+S*(A3+S*A5) */ + fp3 = floatx80_mul(fp3, fp0, status); /* R*S*(A2+S*A4) */ + + fp2 = floatx80_mul(fp2, fp1, status); /* S*(A1+S*(A3+S*A5)) */ + fp0 = floatx80_add(fp0, fp3, status); /* R+R*S*(A2+S*A4) */ + fp0 = floatx80_add(fp0, fp2, status); /* EXP(R) - 1 */ + + fp0 = floatx80_mul(fp0, fact1, status); + fp0 = floatx80_add(fp0, fact2, status); + fp0 = floatx80_add(fp0, fact1, status); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, adjfact, status); + + float_raise(float_flag_inexact, status); + + return a; + } +} + +/* + * 10 to x + */ + +floatx80 floatx80_tentox(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, n, j, l, m, m1; + floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + if (aSign) { + return packFloatx80(0, 0, 0); + } + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(0, one_exp, one_sig); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + fp0 = a; + + compact = floatx80_make_compact(aExp, aSig); + + if (compact < 0x3FB98000 || compact > 0x400B9B07) { + /* |X| > 16480 LOG2/LOG10 or |X| < 2^(-70) */ + if (compact > 0x3FFF8000) { /* |X| > 16480 */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + if (aSign) { + return roundAndPackFloatx80(status->floatx80_rounding_precision, + 0, -0x1000, aSig, 0, status); + } else { + return roundAndPackFloatx80(status->floatx80_rounding_precision, + 0, 0x8000, aSig, 0, status); + } + } else { /* |X| < 2^(-70) */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, float32_to_floatx80( + make_float32(0x3F800000), status), + status); /* 1 + X */ + + float_raise(float_flag_inexact, status); + + return a; + } + } else { /* 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10 */ + fp1 = fp0; /* X */ + fp1 = floatx80_mul(fp1, float64_to_floatx80( + make_float64(0x406A934F0979A371), + status), status); /* X*64*LOG10/LOG2 */ + n = floatx80_to_int32(fp1, status); /* N=INT(X*64*LOG10/LOG2) */ + fp1 = int32_to_floatx80(n, status); + + j = n & 0x3F; + l = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ + if (n < 0 && j) { + /* + * arithmetic right shift is division and + * round towards minus infinity + */ + l--; + } + m = l / 2; /* NOTE: this is really arithmetic right shift by 1 */ + if (l < 0 && (l & 1)) { + /* + * arithmetic right shift is division and + * round towards minus infinity + */ + m--; + } + m1 = l - m; + m1 += 0x3FFF; /* ADJFACT IS 2^(M') */ + + adjfact = packFloatx80(0, m1, one_sig); + fact1 = exp2_tbl[j]; + fact1.high += m; + fact2.high = exp2_tbl2[j] >> 16; + fact2.high += m; + fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF); + fact2.low <<= 48; + + fp2 = fp1; /* N */ + fp1 = floatx80_mul(fp1, float64_to_floatx80( + make_float64(0x3F734413509F8000), status), + status); /* N*(LOG2/64LOG10)_LEAD */ + fp3 = packFloatx80(1, 0x3FCD, UINT64_C(0xC0219DC1DA994FD2)); + fp2 = floatx80_mul(fp2, fp3, status); /* N*(LOG2/64LOG10)_TRAIL */ + fp0 = floatx80_sub(fp0, fp1, status); /* X - N L_LEAD */ + fp0 = floatx80_sub(fp0, fp2, status); /* X - N L_TRAIL */ + fp2 = packFloatx80(0, 0x4000, UINT64_C(0x935D8DDDAAA8AC17)); /* LOG10 */ + fp0 = floatx80_mul(fp0, fp2, status); /* R */ + + /* EXPR */ + fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ + fp2 = float64_to_floatx80(make_float64(0x3F56C16D6F7BD0B2), + status); /* A5 */ + fp3 = float64_to_floatx80(make_float64(0x3F811112302C712C), + status); /* A4 */ + fp2 = floatx80_mul(fp2, fp1, status); /* S*A5 */ + fp3 = floatx80_mul(fp3, fp1, status); /* S*A4 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FA5555555554CC1), status), + status); /* A3+S*A5 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0x3FC5555555554A54), status), + status); /* A2+S*A4 */ + fp2 = floatx80_mul(fp2, fp1, status); /* S*(A3+S*A5) */ + fp3 = floatx80_mul(fp3, fp1, status); /* S*(A2+S*A4) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FE0000000000000), status), + status); /* A1+S*(A3+S*A5) */ + fp3 = floatx80_mul(fp3, fp0, status); /* R*S*(A2+S*A4) */ + + fp2 = floatx80_mul(fp2, fp1, status); /* S*(A1+S*(A3+S*A5)) */ + fp0 = floatx80_add(fp0, fp3, status); /* R+R*S*(A2+S*A4) */ + fp0 = floatx80_add(fp0, fp2, status); /* EXP(R) - 1 */ + + fp0 = floatx80_mul(fp0, fact1, status); + fp0 = floatx80_add(fp0, fact2, status); + fp0 = floatx80_add(fp0, fact1, status); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, adjfact, status); + + float_raise(float_flag_inexact, status); + + return a; + } +} + +/* + * Tangent + */ + +floatx80 floatx80_tan(floatx80 a, float_status *status) +{ + flag aSign, xSign; + int32_t aExp, xExp; + uint64_t aSig, xSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, l, n, j; + floatx80 fp0, fp1, fp2, fp3, fp4, fp5, invtwopi, twopi1, twopi2; + float32 twoto63; + flag endflag; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + fp0 = a; + + if (compact < 0x3FD78000 || compact > 0x4004BC7E) { + /* 2^(-40) > |X| > 15 PI */ + if (compact > 0x3FFF8000) { /* |X| >= 15 PI */ + /* REDUCEX */ + fp1 = packFloatx80(0, 0, 0); + if (compact == 0x7FFEFFFF) { + twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, + UINT64_C(0xC90FDAA200000000)); + twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, + UINT64_C(0x85A308D300000000)); + fp0 = floatx80_add(fp0, twopi1, status); + fp1 = fp0; + fp0 = floatx80_add(fp0, twopi2, status); + fp1 = floatx80_sub(fp1, fp0, status); + fp1 = floatx80_add(fp1, twopi2, status); + } + loop: + xSign = extractFloatx80Sign(fp0); + xExp = extractFloatx80Exp(fp0); + xExp -= 0x3FFF; + if (xExp <= 28) { + l = 0; + endflag = 1; + } else { + l = xExp - 27; + endflag = 0; + } + invtwopi = packFloatx80(0, 0x3FFE - l, + UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */ + twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000)); + twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000)); + + /* SIGN(INARG)*2^63 IN SGL */ + twoto63 = packFloat32(xSign, 0xBE, 0); + + fp2 = floatx80_mul(fp0, invtwopi, status); + fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), + status); /* THE FRACT PART OF FP2 IS ROUNDED */ + fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), + status); /* FP2 is N */ + fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */ + fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */ + fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */ + fp4 = floatx80_sub(fp4, fp3, status); /* W-P */ + fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */ + fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */ + fp3 = fp0; /* FP3 is A */ + fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */ + fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */ + + if (endflag > 0) { + n = floatx80_to_int32(fp2, status); + goto tancont; + } + fp3 = floatx80_sub(fp3, fp0, status); /* A-R */ + fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */ + goto loop; + } else { + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_move(a, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } else { + fp1 = floatx80_mul(fp0, float64_to_floatx80( + make_float64(0x3FE45F306DC9C883), status), + status); /* X*2/PI */ + + n = floatx80_to_int32(fp1, status); + j = 32 + n; + + fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */ + fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), + status); /* FP0 IS R = (X-Y1)-Y2 */ + + tancont: + if (n & 1) { + /* NODD */ + fp1 = fp0; /* R */ + fp0 = floatx80_mul(fp0, fp0, status); /* S = R*R */ + fp3 = float64_to_floatx80(make_float64(0x3EA0B759F50F8688), + status); /* Q4 */ + fp2 = float64_to_floatx80(make_float64(0xBEF2BAA5A8924F04), + status); /* P3 */ + fp3 = floatx80_mul(fp3, fp0, status); /* SQ4 */ + fp2 = floatx80_mul(fp2, fp0, status); /* SP3 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBF346F59B39BA65F), status), + status); /* Q3+SQ4 */ + fp4 = packFloatx80(0, 0x3FF6, UINT64_C(0xE073D3FC199C4A00)); + fp2 = floatx80_add(fp2, fp4, status); /* P2+SP3 */ + fp3 = floatx80_mul(fp3, fp0, status); /* S(Q3+SQ4) */ + fp2 = floatx80_mul(fp2, fp0, status); /* S(P2+SP3) */ + fp4 = packFloatx80(0, 0x3FF9, UINT64_C(0xD23CD68415D95FA1)); + fp3 = floatx80_add(fp3, fp4, status); /* Q2+S(Q3+SQ4) */ + fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0x8895A6C5FB423BCA)); + fp2 = floatx80_add(fp2, fp4, status); /* P1+S(P2+SP3) */ + fp3 = floatx80_mul(fp3, fp0, status); /* S(Q2+S(Q3+SQ4)) */ + fp2 = floatx80_mul(fp2, fp0, status); /* S(P1+S(P2+SP3)) */ + fp4 = packFloatx80(1, 0x3FFD, UINT64_C(0xEEF57E0DA84BC8CE)); + fp3 = floatx80_add(fp3, fp4, status); /* Q1+S(Q2+S(Q3+SQ4)) */ + fp2 = floatx80_mul(fp2, fp1, status); /* RS(P1+S(P2+SP3)) */ + fp0 = floatx80_mul(fp0, fp3, status); /* S(Q1+S(Q2+S(Q3+SQ4))) */ + fp1 = floatx80_add(fp1, fp2, status); /* R+RS(P1+S(P2+SP3)) */ + fp0 = floatx80_add(fp0, float32_to_floatx80( + make_float32(0x3F800000), status), + status); /* 1+S(Q1+S(Q2+S(Q3+SQ4))) */ + + xSign = extractFloatx80Sign(fp1); + xExp = extractFloatx80Exp(fp1); + xSig = extractFloatx80Frac(fp1); + xSign ^= 1; + fp1 = packFloatx80(xSign, xExp, xSig); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_div(fp0, fp1, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { + fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ + fp3 = float64_to_floatx80(make_float64(0x3EA0B759F50F8688), + status); /* Q4 */ + fp2 = float64_to_floatx80(make_float64(0xBEF2BAA5A8924F04), + status); /* P3 */ + fp3 = floatx80_mul(fp3, fp1, status); /* SQ4 */ + fp2 = floatx80_mul(fp2, fp1, status); /* SP3 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBF346F59B39BA65F), status), + status); /* Q3+SQ4 */ + fp4 = packFloatx80(0, 0x3FF6, UINT64_C(0xE073D3FC199C4A00)); + fp2 = floatx80_add(fp2, fp4, status); /* P2+SP3 */ + fp3 = floatx80_mul(fp3, fp1, status); /* S(Q3+SQ4) */ + fp2 = floatx80_mul(fp2, fp1, status); /* S(P2+SP3) */ + fp4 = packFloatx80(0, 0x3FF9, UINT64_C(0xD23CD68415D95FA1)); + fp3 = floatx80_add(fp3, fp4, status); /* Q2+S(Q3+SQ4) */ + fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0x8895A6C5FB423BCA)); + fp2 = floatx80_add(fp2, fp4, status); /* P1+S(P2+SP3) */ + fp3 = floatx80_mul(fp3, fp1, status); /* S(Q2+S(Q3+SQ4)) */ + fp2 = floatx80_mul(fp2, fp1, status); /* S(P1+S(P2+SP3)) */ + fp4 = packFloatx80(1, 0x3FFD, UINT64_C(0xEEF57E0DA84BC8CE)); + fp3 = floatx80_add(fp3, fp4, status); /* Q1+S(Q2+S(Q3+SQ4)) */ + fp2 = floatx80_mul(fp2, fp0, status); /* RS(P1+S(P2+SP3)) */ + fp1 = floatx80_mul(fp1, fp3, status); /* S(Q1+S(Q2+S(Q3+SQ4))) */ + fp0 = floatx80_add(fp0, fp2, status); /* R+RS(P1+S(P2+SP3)) */ + fp1 = floatx80_add(fp1, float32_to_floatx80( + make_float32(0x3F800000), status), + status); /* 1+S(Q1+S(Q2+S(Q3+SQ4))) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_div(fp0, fp1, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } +} + +/* + * Sine + */ + +floatx80 floatx80_sin(floatx80 a, float_status *status) +{ + flag aSign, xSign; + int32_t aExp, xExp; + uint64_t aSig, xSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, l, n, j; + floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2; + float32 posneg1, twoto63; + flag endflag; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + fp0 = a; + + if (compact < 0x3FD78000 || compact > 0x4004BC7E) { + /* 2^(-40) > |X| > 15 PI */ + if (compact > 0x3FFF8000) { /* |X| >= 15 PI */ + /* REDUCEX */ + fp1 = packFloatx80(0, 0, 0); + if (compact == 0x7FFEFFFF) { + twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, + UINT64_C(0xC90FDAA200000000)); + twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, + UINT64_C(0x85A308D300000000)); + fp0 = floatx80_add(fp0, twopi1, status); + fp1 = fp0; + fp0 = floatx80_add(fp0, twopi2, status); + fp1 = floatx80_sub(fp1, fp0, status); + fp1 = floatx80_add(fp1, twopi2, status); + } + loop: + xSign = extractFloatx80Sign(fp0); + xExp = extractFloatx80Exp(fp0); + xExp -= 0x3FFF; + if (xExp <= 28) { + l = 0; + endflag = 1; + } else { + l = xExp - 27; + endflag = 0; + } + invtwopi = packFloatx80(0, 0x3FFE - l, + UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */ + twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000)); + twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000)); + + /* SIGN(INARG)*2^63 IN SGL */ + twoto63 = packFloat32(xSign, 0xBE, 0); + + fp2 = floatx80_mul(fp0, invtwopi, status); + fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), + status); /* THE FRACT PART OF FP2 IS ROUNDED */ + fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), + status); /* FP2 is N */ + fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */ + fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */ + fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */ + fp4 = floatx80_sub(fp4, fp3, status); /* W-P */ + fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */ + fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */ + fp3 = fp0; /* FP3 is A */ + fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */ + fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */ + + if (endflag > 0) { + n = floatx80_to_int32(fp2, status); + goto sincont; + } + fp3 = floatx80_sub(fp3, fp0, status); /* A-R */ + fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */ + goto loop; + } else { + /* SINSM */ + fp0 = float32_to_floatx80(make_float32(0x3F800000), + status); /* 1 */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + /* SINTINY */ + a = floatx80_move(a, status); + float_raise(float_flag_inexact, status); + + return a; + } + } else { + fp1 = floatx80_mul(fp0, float64_to_floatx80( + make_float64(0x3FE45F306DC9C883), status), + status); /* X*2/PI */ + + n = floatx80_to_int32(fp1, status); + j = 32 + n; + + fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */ + fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), + status); /* FP0 IS R = (X-Y1)-Y2 */ + + sincont: + if (n & 1) { + /* COSPOLY */ + fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ + fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ + fp2 = float64_to_floatx80(make_float64(0x3D2AC4D0D6011EE3), + status); /* B8 */ + fp3 = float64_to_floatx80(make_float64(0xBDA9396F9F45AC19), + status); /* B7 */ + + xSign = extractFloatx80Sign(fp0); /* X IS S */ + xExp = extractFloatx80Exp(fp0); + xSig = extractFloatx80Frac(fp0); + + if ((n >> 1) & 1) { + xSign ^= 1; + posneg1 = make_float32(0xBF800000); /* -1 */ + } else { + xSign ^= 0; + posneg1 = make_float32(0x3F800000); /* 1 */ + } /* X IS NOW R'= SGN*R */ + + fp2 = floatx80_mul(fp2, fp1, status); /* TB8 */ + fp3 = floatx80_mul(fp3, fp1, status); /* TB7 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3E21EED90612C972), status), + status); /* B6+TB8 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBE927E4FB79D9FCF), status), + status); /* B5+TB7 */ + fp2 = floatx80_mul(fp2, fp1, status); /* T(B6+TB8) */ + fp3 = floatx80_mul(fp3, fp1, status); /* T(B5+TB7) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3EFA01A01A01D423), status), + status); /* B4+T(B6+TB8) */ + fp4 = packFloatx80(1, 0x3FF5, UINT64_C(0xB60B60B60B61D438)); + fp3 = floatx80_add(fp3, fp4, status); /* B3+T(B5+TB7) */ + fp2 = floatx80_mul(fp2, fp1, status); /* T(B4+T(B6+TB8)) */ + fp1 = floatx80_mul(fp1, fp3, status); /* T(B3+T(B5+TB7)) */ + fp4 = packFloatx80(0, 0x3FFA, UINT64_C(0xAAAAAAAAAAAAAB5E)); + fp2 = floatx80_add(fp2, fp4, status); /* B2+T(B4+T(B6+TB8)) */ + fp1 = floatx80_add(fp1, float32_to_floatx80( + make_float32(0xBF000000), status), + status); /* B1+T(B3+T(B5+TB7)) */ + fp0 = floatx80_mul(fp0, fp2, status); /* S(B2+T(B4+T(B6+TB8))) */ + fp0 = floatx80_add(fp0, fp1, status); /* [B1+T(B3+T(B5+TB7))]+ + * [S(B2+T(B4+T(B6+TB8)))] + */ + + x = packFloatx80(xSign, xExp, xSig); + fp0 = floatx80_mul(fp0, x, status); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status); + + float_raise(float_flag_inexact, status); + + return a; + } else { + /* SINPOLY */ + xSign = extractFloatx80Sign(fp0); /* X IS R */ + xExp = extractFloatx80Exp(fp0); + xSig = extractFloatx80Frac(fp0); + + xSign ^= (n >> 1) & 1; /* X IS NOW R'= SGN*R */ + + fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ + fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ + fp3 = float64_to_floatx80(make_float64(0xBD6AAA77CCC994F5), + status); /* A7 */ + fp2 = float64_to_floatx80(make_float64(0x3DE612097AAE8DA1), + status); /* A6 */ + fp3 = floatx80_mul(fp3, fp1, status); /* T*A7 */ + fp2 = floatx80_mul(fp2, fp1, status); /* T*A6 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBE5AE6452A118AE4), status), + status); /* A5+T*A7 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3EC71DE3A5341531), status), + status); /* A4+T*A6 */ + fp3 = floatx80_mul(fp3, fp1, status); /* T(A5+TA7) */ + fp2 = floatx80_mul(fp2, fp1, status); /* T(A4+TA6) */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBF2A01A01A018B59), status), + status); /* A3+T(A5+TA7) */ + fp4 = packFloatx80(0, 0x3FF8, UINT64_C(0x88888888888859AF)); + fp2 = floatx80_add(fp2, fp4, status); /* A2+T(A4+TA6) */ + fp1 = floatx80_mul(fp1, fp3, status); /* T(A3+T(A5+TA7)) */ + fp2 = floatx80_mul(fp2, fp0, status); /* S(A2+T(A4+TA6)) */ + fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAA99)); + fp1 = floatx80_add(fp1, fp4, status); /* A1+T(A3+T(A5+TA7)) */ + fp1 = floatx80_add(fp1, fp2, + status); /* [A1+T(A3+T(A5+TA7))]+ + * [S(A2+T(A4+TA6))] + */ + + x = packFloatx80(xSign, xExp, xSig); + fp0 = floatx80_mul(fp0, x, status); /* R'*S */ + fp0 = floatx80_mul(fp0, fp1, status); /* SIN(R')-R' */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, x, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } +} + +/* + * Cosine + */ + +floatx80 floatx80_cos(floatx80 a, float_status *status) +{ + flag aSign, xSign; + int32_t aExp, xExp; + uint64_t aSig, xSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, l, n, j; + floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2; + float32 posneg1, twoto63; + flag endflag; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(0, one_exp, one_sig); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + fp0 = a; + + if (compact < 0x3FD78000 || compact > 0x4004BC7E) { + /* 2^(-40) > |X| > 15 PI */ + if (compact > 0x3FFF8000) { /* |X| >= 15 PI */ + /* REDUCEX */ + fp1 = packFloatx80(0, 0, 0); + if (compact == 0x7FFEFFFF) { + twopi1 = packFloatx80(aSign ^ 1, 0x7FFE, + UINT64_C(0xC90FDAA200000000)); + twopi2 = packFloatx80(aSign ^ 1, 0x7FDC, + UINT64_C(0x85A308D300000000)); + fp0 = floatx80_add(fp0, twopi1, status); + fp1 = fp0; + fp0 = floatx80_add(fp0, twopi2, status); + fp1 = floatx80_sub(fp1, fp0, status); + fp1 = floatx80_add(fp1, twopi2, status); + } + loop: + xSign = extractFloatx80Sign(fp0); + xExp = extractFloatx80Exp(fp0); + xExp -= 0x3FFF; + if (xExp <= 28) { + l = 0; + endflag = 1; + } else { + l = xExp - 27; + endflag = 0; + } + invtwopi = packFloatx80(0, 0x3FFE - l, + UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */ + twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000)); + twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000)); + + /* SIGN(INARG)*2^63 IN SGL */ + twoto63 = packFloat32(xSign, 0xBE, 0); + + fp2 = floatx80_mul(fp0, invtwopi, status); + fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status), + status); /* THE FRACT PART OF FP2 IS ROUNDED */ + fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status), + status); /* FP2 is N */ + fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */ + fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */ + fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */ + fp4 = floatx80_sub(fp4, fp3, status); /* W-P */ + fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */ + fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */ + fp3 = fp0; /* FP3 is A */ + fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */ + fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */ + + if (endflag > 0) { + n = floatx80_to_int32(fp2, status); + goto sincont; + } + fp3 = floatx80_sub(fp3, fp0, status); /* A-R */ + fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */ + goto loop; + } else { + /* SINSM */ + fp0 = float32_to_floatx80(make_float32(0x3F800000), status); /* 1 */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + /* COSTINY */ + a = floatx80_sub(fp0, float32_to_floatx80( + make_float32(0x00800000), status), + status); + float_raise(float_flag_inexact, status); + + return a; + } + } else { + fp1 = floatx80_mul(fp0, float64_to_floatx80( + make_float64(0x3FE45F306DC9C883), status), + status); /* X*2/PI */ + + n = floatx80_to_int32(fp1, status); + j = 32 + n; + + fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */ + fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status), + status); /* FP0 IS R = (X-Y1)-Y2 */ + + sincont: + if ((n + 1) & 1) { + /* COSPOLY */ + fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ + fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ + fp2 = float64_to_floatx80(make_float64(0x3D2AC4D0D6011EE3), + status); /* B8 */ + fp3 = float64_to_floatx80(make_float64(0xBDA9396F9F45AC19), + status); /* B7 */ + + xSign = extractFloatx80Sign(fp0); /* X IS S */ + xExp = extractFloatx80Exp(fp0); + xSig = extractFloatx80Frac(fp0); + + if (((n + 1) >> 1) & 1) { + xSign ^= 1; + posneg1 = make_float32(0xBF800000); /* -1 */ + } else { + xSign ^= 0; + posneg1 = make_float32(0x3F800000); /* 1 */ + } /* X IS NOW R'= SGN*R */ + + fp2 = floatx80_mul(fp2, fp1, status); /* TB8 */ + fp3 = floatx80_mul(fp3, fp1, status); /* TB7 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3E21EED90612C972), status), + status); /* B6+TB8 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBE927E4FB79D9FCF), status), + status); /* B5+TB7 */ + fp2 = floatx80_mul(fp2, fp1, status); /* T(B6+TB8) */ + fp3 = floatx80_mul(fp3, fp1, status); /* T(B5+TB7) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3EFA01A01A01D423), status), + status); /* B4+T(B6+TB8) */ + fp4 = packFloatx80(1, 0x3FF5, UINT64_C(0xB60B60B60B61D438)); + fp3 = floatx80_add(fp3, fp4, status); /* B3+T(B5+TB7) */ + fp2 = floatx80_mul(fp2, fp1, status); /* T(B4+T(B6+TB8)) */ + fp1 = floatx80_mul(fp1, fp3, status); /* T(B3+T(B5+TB7)) */ + fp4 = packFloatx80(0, 0x3FFA, UINT64_C(0xAAAAAAAAAAAAAB5E)); + fp2 = floatx80_add(fp2, fp4, status); /* B2+T(B4+T(B6+TB8)) */ + fp1 = floatx80_add(fp1, float32_to_floatx80( + make_float32(0xBF000000), status), + status); /* B1+T(B3+T(B5+TB7)) */ + fp0 = floatx80_mul(fp0, fp2, status); /* S(B2+T(B4+T(B6+TB8))) */ + fp0 = floatx80_add(fp0, fp1, status); + /* [B1+T(B3+T(B5+TB7))]+[S(B2+T(B4+T(B6+TB8)))] */ + + x = packFloatx80(xSign, xExp, xSig); + fp0 = floatx80_mul(fp0, x, status); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status); + + float_raise(float_flag_inexact, status); + + return a; + } else { + /* SINPOLY */ + xSign = extractFloatx80Sign(fp0); /* X IS R */ + xExp = extractFloatx80Exp(fp0); + xSig = extractFloatx80Frac(fp0); + + xSign ^= ((n + 1) >> 1) & 1; /* X IS NOW R'= SGN*R */ + + fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */ + fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */ + fp3 = float64_to_floatx80(make_float64(0xBD6AAA77CCC994F5), + status); /* A7 */ + fp2 = float64_to_floatx80(make_float64(0x3DE612097AAE8DA1), + status); /* A6 */ + fp3 = floatx80_mul(fp3, fp1, status); /* T*A7 */ + fp2 = floatx80_mul(fp2, fp1, status); /* T*A6 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBE5AE6452A118AE4), status), + status); /* A5+T*A7 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3EC71DE3A5341531), status), + status); /* A4+T*A6 */ + fp3 = floatx80_mul(fp3, fp1, status); /* T(A5+TA7) */ + fp2 = floatx80_mul(fp2, fp1, status); /* T(A4+TA6) */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBF2A01A01A018B59), status), + status); /* A3+T(A5+TA7) */ + fp4 = packFloatx80(0, 0x3FF8, UINT64_C(0x88888888888859AF)); + fp2 = floatx80_add(fp2, fp4, status); /* A2+T(A4+TA6) */ + fp1 = floatx80_mul(fp1, fp3, status); /* T(A3+T(A5+TA7)) */ + fp2 = floatx80_mul(fp2, fp0, status); /* S(A2+T(A4+TA6)) */ + fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAA99)); + fp1 = floatx80_add(fp1, fp4, status); /* A1+T(A3+T(A5+TA7)) */ + fp1 = floatx80_add(fp1, fp2, status); + /* [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))] */ + + x = packFloatx80(xSign, xExp, xSig); + fp0 = floatx80_mul(fp0, x, status); /* R'*S */ + fp0 = floatx80_mul(fp0, fp1, status); /* SIN(R')-R' */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, x, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } +} + +/* + * Arc tangent + */ + +floatx80 floatx80_atan(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, tbl_index; + floatx80 fp0, fp1, fp2, fp3, xsave; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + a = packFloatx80(aSign, piby2_exp, pi_sig); + float_raise(float_flag_inexact, status); + return floatx80_move(a, status); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + compact = floatx80_make_compact(aExp, aSig); + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + if (compact < 0x3FFB8000 || compact > 0x4002FFFF) { + /* |X| >= 16 or |X| < 1/16 */ + if (compact > 0x3FFF8000) { /* |X| >= 16 */ + if (compact > 0x40638000) { /* |X| > 2^(100) */ + fp0 = packFloatx80(aSign, piby2_exp, pi_sig); + fp1 = packFloatx80(aSign, 0x0001, one_sig); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_sub(fp0, fp1, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { + fp0 = a; + fp1 = packFloatx80(1, one_exp, one_sig); /* -1 */ + fp1 = floatx80_div(fp1, fp0, status); /* X' = -1/X */ + xsave = fp1; + fp0 = floatx80_mul(fp1, fp1, status); /* Y = X'*X' */ + fp1 = floatx80_mul(fp0, fp0, status); /* Z = Y*Y */ + fp3 = float64_to_floatx80(make_float64(0xBFB70BF398539E6A), + status); /* C5 */ + fp2 = float64_to_floatx80(make_float64(0x3FBC7187962D1D7D), + status); /* C4 */ + fp3 = floatx80_mul(fp3, fp1, status); /* Z*C5 */ + fp2 = floatx80_mul(fp2, fp1, status); /* Z*C4 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBFC24924827107B8), status), + status); /* C3+Z*C5 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FC999999996263E), status), + status); /* C2+Z*C4 */ + fp1 = floatx80_mul(fp1, fp3, status); /* Z*(C3+Z*C5) */ + fp2 = floatx80_mul(fp2, fp0, status); /* Y*(C2+Z*C4) */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0xBFD5555555555536), status), + status); /* C1+Z*(C3+Z*C5) */ + fp0 = floatx80_mul(fp0, xsave, status); /* X'*Y */ + /* [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)] */ + fp1 = floatx80_add(fp1, fp2, status); + /* X'*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]) ?? */ + fp0 = floatx80_mul(fp0, fp1, status); + fp0 = floatx80_add(fp0, xsave, status); + fp1 = packFloatx80(aSign, piby2_exp, pi_sig); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, fp1, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } else { /* |X| < 1/16 */ + if (compact < 0x3FD78000) { /* |X| < 2^(-40) */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_move(a, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { + fp0 = a; + xsave = a; + fp0 = floatx80_mul(fp0, fp0, status); /* Y = X*X */ + fp1 = floatx80_mul(fp0, fp0, status); /* Z = Y*Y */ + fp2 = float64_to_floatx80(make_float64(0x3FB344447F876989), + status); /* B6 */ + fp3 = float64_to_floatx80(make_float64(0xBFB744EE7FAF45DB), + status); /* B5 */ + fp2 = floatx80_mul(fp2, fp1, status); /* Z*B6 */ + fp3 = floatx80_mul(fp3, fp1, status); /* Z*B5 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FBC71C646940220), status), + status); /* B4+Z*B6 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0xBFC24924921872F9), + status), status); /* B3+Z*B5 */ + fp2 = floatx80_mul(fp2, fp1, status); /* Z*(B4+Z*B6) */ + fp1 = floatx80_mul(fp1, fp3, status); /* Z*(B3+Z*B5) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FC9999999998FA9), status), + status); /* B2+Z*(B4+Z*B6) */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0xBFD5555555555555), status), + status); /* B1+Z*(B3+Z*B5) */ + fp2 = floatx80_mul(fp2, fp0, status); /* Y*(B2+Z*(B4+Z*B6)) */ + fp0 = floatx80_mul(fp0, xsave, status); /* X*Y */ + /* [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))] */ + fp1 = floatx80_add(fp1, fp2, status); + /* X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]) */ + fp0 = floatx80_mul(fp0, fp1, status); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, xsave, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } + } else { + aSig &= UINT64_C(0xF800000000000000); + aSig |= UINT64_C(0x0400000000000000); + xsave = packFloatx80(aSign, aExp, aSig); /* F */ + fp0 = a; + fp1 = a; /* X */ + fp2 = packFloatx80(0, one_exp, one_sig); /* 1 */ + fp1 = floatx80_mul(fp1, xsave, status); /* X*F */ + fp0 = floatx80_sub(fp0, xsave, status); /* X-F */ + fp1 = floatx80_add(fp1, fp2, status); /* 1 + X*F */ + fp0 = floatx80_div(fp0, fp1, status); /* U = (X-F)/(1+X*F) */ + + tbl_index = compact; + + tbl_index &= 0x7FFF0000; + tbl_index -= 0x3FFB0000; + tbl_index >>= 1; + tbl_index += compact & 0x00007800; + tbl_index >>= 11; + + fp3 = atan_tbl[tbl_index]; + + fp3.high |= aSign ? 0x8000 : 0; /* ATAN(F) */ + + fp1 = floatx80_mul(fp0, fp0, status); /* V = U*U */ + fp2 = float64_to_floatx80(make_float64(0xBFF6687E314987D8), + status); /* A3 */ + fp2 = floatx80_add(fp2, fp1, status); /* A3+V */ + fp2 = floatx80_mul(fp2, fp1, status); /* V*(A3+V) */ + fp1 = floatx80_mul(fp1, fp0, status); /* U*V */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x4002AC6934A26DB3), status), + status); /* A2+V*(A3+V) */ + fp1 = floatx80_mul(fp1, float64_to_floatx80( + make_float64(0xBFC2476F4E1DA28E), status), + status); /* A1+U*V */ + fp1 = floatx80_mul(fp1, fp2, status); /* A1*U*V*(A2+V*(A3+V)) */ + fp0 = floatx80_add(fp0, fp1, status); /* ATAN(U) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, fp3, status); /* ATAN(X) */ + + float_raise(float_flag_inexact, status); + + return a; + } +} + +/* + * Arc sine + */ + +floatx80 floatx80_asin(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact; + floatx80 fp0, fp1, fp2, one; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + compact = floatx80_make_compact(aExp, aSig); + + if (compact >= 0x3FFF8000) { /* |X| >= 1 */ + if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */ + float_raise(float_flag_inexact, status); + a = packFloatx80(aSign, piby2_exp, pi_sig); + return floatx80_move(a, status); + } else { /* |X| > 1 */ + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + + } /* |X| < 1 */ + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + one = packFloatx80(0, one_exp, one_sig); + fp0 = a; + + fp1 = floatx80_sub(one, fp0, status); /* 1 - X */ + fp2 = floatx80_add(one, fp0, status); /* 1 + X */ + fp1 = floatx80_mul(fp2, fp1, status); /* (1+X)*(1-X) */ + fp1 = floatx80_sqrt(fp1, status); /* SQRT((1+X)*(1-X)) */ + fp0 = floatx80_div(fp0, fp1, status); /* X/SQRT((1+X)*(1-X)) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_atan(fp0, status); /* ATAN(X/SQRT((1+X)*(1-X))) */ + + float_raise(float_flag_inexact, status); + + return a; +} + +/* + * Arc cosine + */ + +floatx80 floatx80_acos(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact; + floatx80 fp0, fp1, one; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + if (aExp == 0 && aSig == 0) { + float_raise(float_flag_inexact, status); + return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, + piby2_exp, pi_sig, 0, status); + } + + compact = floatx80_make_compact(aExp, aSig); + + if (compact >= 0x3FFF8000) { /* |X| >= 1 */ + if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */ + if (aSign) { /* X == -1 */ + a = packFloatx80(0, pi_exp, pi_sig); + float_raise(float_flag_inexact, status); + return floatx80_move(a, status); + } else { /* X == +1 */ + return packFloatx80(0, 0, 0); + } + } else { /* |X| > 1 */ + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + } /* |X| < 1 */ + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + one = packFloatx80(0, one_exp, one_sig); + fp0 = a; + + fp1 = floatx80_add(one, fp0, status); /* 1 + X */ + fp0 = floatx80_sub(one, fp0, status); /* 1 - X */ + fp0 = floatx80_div(fp0, fp1, status); /* (1-X)/(1+X) */ + fp0 = floatx80_sqrt(fp0, status); /* SQRT((1-X)/(1+X)) */ + fp0 = floatx80_atan(fp0, status); /* ATAN(SQRT((1-X)/(1+X))) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, fp0, status); /* 2 * ATAN(SQRT((1-X)/(1+X))) */ + + float_raise(float_flag_inexact, status); + + return a; +} + +/* + * Hyperbolic arc tangent + */ + +floatx80 floatx80_atanh(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact; + floatx80 fp0, fp1, fp2, one; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + compact = floatx80_make_compact(aExp, aSig); + + if (compact >= 0x3FFF8000) { /* |X| >= 1 */ + if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */ + float_raise(float_flag_divbyzero, status); + return packFloatx80(aSign, floatx80_infinity.high, + floatx80_infinity.low); + } else { /* |X| > 1 */ + float_raise(float_flag_invalid, status); + return floatx80_default_nan(status); + } + } /* |X| < 1 */ + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + one = packFloatx80(0, one_exp, one_sig); + fp2 = packFloatx80(aSign, 0x3FFE, one_sig); /* SIGN(X) * (1/2) */ + fp0 = packFloatx80(0, aExp, aSig); /* Y = |X| */ + fp1 = packFloatx80(1, aExp, aSig); /* -Y */ + fp0 = floatx80_add(fp0, fp0, status); /* 2Y */ + fp1 = floatx80_add(fp1, one, status); /* 1-Y */ + fp0 = floatx80_div(fp0, fp1, status); /* Z = 2Y/(1-Y) */ + fp0 = floatx80_lognp1(fp0, status); /* LOG1P(Z) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, fp2, + status); /* ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z) */ + + float_raise(float_flag_inexact, status); + + return a; +} + +/* + * e to x minus 1 + */ + +floatx80 floatx80_etoxm1(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact, n, j, m, m1; + floatx80 fp0, fp1, fp2, fp3, l2, sc, onebysc; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + if (aSign) { + return packFloatx80(aSign, one_exp, one_sig); + } + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + if (aExp >= 0x3FFD) { /* |X| >= 1/4 */ + compact = floatx80_make_compact(aExp, aSig); + + if (compact <= 0x4004C215) { /* |X| <= 70 log2 */ + fp0 = a; + fp1 = a; + fp0 = floatx80_mul(fp0, float32_to_floatx80( + make_float32(0x42B8AA3B), status), + status); /* 64/log2 * X */ + n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */ + fp0 = int32_to_floatx80(n, status); + + j = n & 0x3F; /* J = N mod 64 */ + m = n / 64; /* NOTE: this is really arithmetic right shift by 6 */ + if (n < 0 && j) { + /* + * arithmetic right shift is division and + * round towards minus infinity + */ + m--; + } + m1 = -m; + /*m += 0x3FFF; // biased exponent of 2^(M) */ + /*m1 += 0x3FFF; // biased exponent of -2^(-M) */ + + fp2 = fp0; /* N */ + fp0 = floatx80_mul(fp0, float32_to_floatx80( + make_float32(0xBC317218), status), + status); /* N * L1, L1 = lead(-log2/64) */ + l2 = packFloatx80(0, 0x3FDC, UINT64_C(0x82E308654361C4C6)); + fp2 = floatx80_mul(fp2, l2, status); /* N * L2, L1+L2 = -log2/64 */ + fp0 = floatx80_add(fp0, fp1, status); /* X + N*L1 */ + fp0 = floatx80_add(fp0, fp2, status); /* R */ + + fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */ + fp2 = float32_to_floatx80(make_float32(0x3950097B), + status); /* A6 */ + fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*A6 */ + fp3 = floatx80_mul(float32_to_floatx80(make_float32(0x3AB60B6A), + status), fp1, status); /* fp3 is S*A5 */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3F81111111174385), status), + status); /* fp2 IS A4+S*A6 */ + fp3 = floatx80_add(fp3, float64_to_floatx80( + make_float64(0x3FA5555555554F5A), status), + status); /* fp3 is A3+S*A5 */ + fp2 = floatx80_mul(fp2, fp1, status); /* fp2 IS S*(A4+S*A6) */ + fp3 = floatx80_mul(fp3, fp1, status); /* fp3 IS S*(A3+S*A5) */ + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FC5555555555555), status), + status); /* fp2 IS A2+S*(A4+S*A6) */ + fp3 = floatx80_add(fp3, float32_to_floatx80( + make_float32(0x3F000000), status), + status); /* fp3 IS A1+S*(A3+S*A5) */ + fp2 = floatx80_mul(fp2, fp1, + status); /* fp2 IS S*(A2+S*(A4+S*A6)) */ + fp1 = floatx80_mul(fp1, fp3, + status); /* fp1 IS S*(A1+S*(A3+S*A5)) */ + fp2 = floatx80_mul(fp2, fp0, + status); /* fp2 IS R*S*(A2+S*(A4+S*A6)) */ + fp0 = floatx80_add(fp0, fp1, + status); /* fp0 IS R+S*(A1+S*(A3+S*A5)) */ + fp0 = floatx80_add(fp0, fp2, status); /* fp0 IS EXP(R) - 1 */ + + fp0 = floatx80_mul(fp0, exp_tbl[j], + status); /* 2^(J/64)*(Exp(R)-1) */ + + if (m >= 64) { + fp1 = float32_to_floatx80(exp_tbl2[j], status); + onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */ + fp1 = floatx80_add(fp1, onebysc, status); + fp0 = floatx80_add(fp0, fp1, status); + fp0 = floatx80_add(fp0, exp_tbl[j], status); + } else if (m < -3) { + fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], + status), status); + fp0 = floatx80_add(fp0, exp_tbl[j], status); + onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */ + fp0 = floatx80_add(fp0, onebysc, status); + } else { /* -3 <= m <= 63 */ + fp1 = exp_tbl[j]; + fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], + status), status); + onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */ + fp1 = floatx80_add(fp1, onebysc, status); + fp0 = floatx80_add(fp0, fp1, status); + } + + sc = packFloatx80(0, m + 0x3FFF, one_sig); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, sc, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { /* |X| > 70 log2 */ + if (aSign) { + fp0 = float32_to_floatx80(make_float32(0xBF800000), + status); /* -1 */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, float32_to_floatx80( + make_float32(0x00800000), status), + status); /* -1 + 2^(-126) */ + + float_raise(float_flag_inexact, status); + + return a; + } else { + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + return floatx80_etox(a, status); + } + } + } else { /* |X| < 1/4 */ + if (aExp >= 0x3FBE) { + fp0 = a; + fp0 = floatx80_mul(fp0, fp0, status); /* S = X*X */ + fp1 = float32_to_floatx80(make_float32(0x2F30CAA8), + status); /* B12 */ + fp1 = floatx80_mul(fp1, fp0, status); /* S * B12 */ + fp2 = float32_to_floatx80(make_float32(0x310F8290), + status); /* B11 */ + fp1 = floatx80_add(fp1, float32_to_floatx80( + make_float32(0x32D73220), status), + status); /* B10 */ + fp2 = floatx80_mul(fp2, fp0, status); + fp1 = floatx80_mul(fp1, fp0, status); + fp2 = floatx80_add(fp2, float32_to_floatx80( + make_float32(0x3493F281), status), + status); /* B9 */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3EC71DE3A5774682), status), + status); /* B8 */ + fp2 = floatx80_mul(fp2, fp0, status); + fp1 = floatx80_mul(fp1, fp0, status); + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3EFA01A019D7CB68), status), + status); /* B7 */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3F2A01A01A019DF3), status), + status); /* B6 */ + fp2 = floatx80_mul(fp2, fp0, status); + fp1 = floatx80_mul(fp1, fp0, status); + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3F56C16C16C170E2), status), + status); /* B5 */ + fp1 = floatx80_add(fp1, float64_to_floatx80( + make_float64(0x3F81111111111111), status), + status); /* B4 */ + fp2 = floatx80_mul(fp2, fp0, status); + fp1 = floatx80_mul(fp1, fp0, status); + fp2 = floatx80_add(fp2, float64_to_floatx80( + make_float64(0x3FA5555555555555), status), + status); /* B3 */ + fp3 = packFloatx80(0, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAAAB)); + fp1 = floatx80_add(fp1, fp3, status); /* B2 */ + fp2 = floatx80_mul(fp2, fp0, status); + fp1 = floatx80_mul(fp1, fp0, status); + + fp2 = floatx80_mul(fp2, fp0, status); + fp1 = floatx80_mul(fp1, a, status); + + fp0 = floatx80_mul(fp0, float32_to_floatx80( + make_float32(0x3F000000), status), + status); /* S*B1 */ + fp1 = floatx80_add(fp1, fp2, status); /* Q */ + fp0 = floatx80_add(fp0, fp1, status); /* S*B1+Q */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, a, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { /* |X| < 2^(-65) */ + sc = packFloatx80(1, 1, one_sig); + fp0 = a; + + if (aExp < 0x0033) { /* |X| < 2^(-16382) */ + fp0 = floatx80_mul(fp0, float64_to_floatx80( + make_float64(0x48B0000000000000), status), + status); + fp0 = floatx80_add(fp0, sc, status); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, float64_to_floatx80( + make_float64(0x3730000000000000), status), + status); + } else { + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, sc, status); + } + + float_raise(float_flag_inexact, status); + + return a; + } + } +} + +/* + * Hyperbolic tangent + */ + +floatx80 floatx80_tanh(floatx80 a, float_status *status) +{ + flag aSign, vSign; + int32_t aExp, vExp; + uint64_t aSig, vSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact; + floatx80 fp0, fp1; + uint32_t sign; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + return packFloatx80(aSign, one_exp, one_sig); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + if (compact < 0x3FD78000 || compact > 0x3FFFDDCE) { + /* TANHBORS */ + if (compact < 0x3FFF8000) { + /* TANHSM */ + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_move(a, status); + + float_raise(float_flag_inexact, status); + + return a; + } else { + if (compact > 0x40048AA1) { + /* TANHHUGE */ + sign = 0x3F800000; + sign |= aSign ? 0x80000000 : 0x00000000; + fp0 = float32_to_floatx80(make_float32(sign), status); + sign &= 0x80000000; + sign ^= 0x80800000; /* -SIGN(X)*EPS */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, float32_to_floatx80(make_float32(sign), + status), status); + + float_raise(float_flag_inexact, status); + + return a; + } else { + fp0 = packFloatx80(0, aExp + 1, aSig); /* Y = 2|X| */ + fp0 = floatx80_etox(fp0, status); /* FP0 IS EXP(Y) */ + fp0 = floatx80_add(fp0, float32_to_floatx80( + make_float32(0x3F800000), + status), status); /* EXP(Y)+1 */ + sign = aSign ? 0x80000000 : 0x00000000; + fp1 = floatx80_div(float32_to_floatx80(make_float32( + sign ^ 0xC0000000), status), fp0, + status); /* -SIGN(X)*2 / [EXP(Y)+1] */ + fp0 = float32_to_floatx80(make_float32(sign | 0x3F800000), + status); /* SIGN */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp1, fp0, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } + } else { /* 2**(-40) < |X| < (5/2)LOG2 */ + fp0 = packFloatx80(0, aExp + 1, aSig); /* Y = 2|X| */ + fp0 = floatx80_etoxm1(fp0, status); /* FP0 IS Z = EXPM1(Y) */ + fp1 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x40000000), + status), + status); /* Z+2 */ + + vSign = extractFloatx80Sign(fp1); + vExp = extractFloatx80Exp(fp1); + vSig = extractFloatx80Frac(fp1); + + fp1 = packFloatx80(vSign ^ aSign, vExp, vSig); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_div(fp0, fp1, status); + + float_raise(float_flag_inexact, status); + + return a; + } +} + +/* + * Hyperbolic sine + */ + +floatx80 floatx80_sinh(floatx80 a, float_status *status) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact; + floatx80 fp0, fp1, fp2; + float32 fact; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + aSign = extractFloatx80Sign(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + return packFloatx80(aSign, floatx80_infinity.high, + floatx80_infinity.low); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(aSign, 0, 0); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + if (compact > 0x400CB167) { + /* SINHBIG */ + if (compact > 0x400CB2B3) { + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + return roundAndPackFloatx80(status->floatx80_rounding_precision, + aSign, 0x8000, aSig, 0, status); + } else { + fp0 = floatx80_abs(a); /* Y = |X| */ + fp0 = floatx80_sub(fp0, float64_to_floatx80( + make_float64(0x40C62D38D3D64634), status), + status); /* (|X|-16381LOG2_LEAD) */ + fp0 = floatx80_sub(fp0, float64_to_floatx80( + make_float64(0x3D6F90AEB1E75CC7), status), + status); /* |X| - 16381 LOG2, ACCURATE */ + fp0 = floatx80_etox(fp0, status); + fp2 = packFloatx80(aSign, 0x7FFB, one_sig); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, fp2, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } else { /* |X| < 16380 LOG2 */ + fp0 = floatx80_abs(a); /* Y = |X| */ + fp0 = floatx80_etoxm1(fp0, status); /* FP0 IS Z = EXPM1(Y) */ + fp1 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000), + status), status); /* 1+Z */ + fp2 = fp0; + fp0 = floatx80_div(fp0, fp1, status); /* Z/(1+Z) */ + fp0 = floatx80_add(fp0, fp2, status); + + fact = packFloat32(aSign, 0x7E, 0); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, float32_to_floatx80(fact, status), status); + + float_raise(float_flag_inexact, status); + + return a; + } +} + +/* + * Hyperbolic cosine + */ + +floatx80 floatx80_cosh(floatx80 a, float_status *status) +{ + int32_t aExp; + uint64_t aSig; + + int8_t user_rnd_mode, user_rnd_prec; + + int32_t compact; + floatx80 fp0, fp1; + + aSig = extractFloatx80Frac(a); + aExp = extractFloatx80Exp(a); + + if (aExp == 0x7FFF) { + if ((uint64_t) (aSig << 1)) { + return propagateFloatx80NaNOneArg(a, status); + } + return packFloatx80(0, floatx80_infinity.high, + floatx80_infinity.low); + } + + if (aExp == 0 && aSig == 0) { + return packFloatx80(0, one_exp, one_sig); + } + + user_rnd_mode = status->float_rounding_mode; + user_rnd_prec = status->floatx80_rounding_precision; + status->float_rounding_mode = float_round_nearest_even; + status->floatx80_rounding_precision = 80; + + compact = floatx80_make_compact(aExp, aSig); + + if (compact > 0x400CB167) { + if (compact > 0x400CB2B3) { + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + return roundAndPackFloatx80(status->floatx80_rounding_precision, 0, + 0x8000, one_sig, 0, status); + } else { + fp0 = packFloatx80(0, aExp, aSig); + fp0 = floatx80_sub(fp0, float64_to_floatx80( + make_float64(0x40C62D38D3D64634), status), + status); + fp0 = floatx80_sub(fp0, float64_to_floatx80( + make_float64(0x3D6F90AEB1E75CC7), status), + status); + fp0 = floatx80_etox(fp0, status); + fp1 = packFloatx80(0, 0x7FFB, one_sig); + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_mul(fp0, fp1, status); + + float_raise(float_flag_inexact, status); + + return a; + } + } + + fp0 = packFloatx80(0, aExp, aSig); /* |X| */ + fp0 = floatx80_etox(fp0, status); /* EXP(|X|) */ + fp0 = floatx80_mul(fp0, float32_to_floatx80(make_float32(0x3F000000), + status), status); /* (1/2)*EXP(|X|) */ + fp1 = float32_to_floatx80(make_float32(0x3E800000), status); /* 1/4 */ + fp1 = floatx80_div(fp1, fp0, status); /* 1/(2*EXP(|X|)) */ + + status->float_rounding_mode = user_rnd_mode; + status->floatx80_rounding_precision = user_rnd_prec; + + a = floatx80_add(fp0, fp1, status); + + float_raise(float_flag_inexact, status); + + return a; +} diff --git a/qemu/target/m68k/softfloat.h b/qemu/target/m68k/softfloat.h new file mode 100644 index 00000000..365ef6ac --- /dev/null +++ b/qemu/target/m68k/softfloat.h @@ -0,0 +1,49 @@ +/* + * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator, + * derived from NetBSD M68040 FPSP functions, + * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic + * Package. Those parts of the code (and some later contributions) are + * provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file will be taken to be licensed under + * the Softfloat-2a license unless specifically indicated otherwise. + */ + +/* + * Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + +#ifndef TARGET_M68K_SOFTFLOAT_H +#define TARGET_M68K_SOFTFLOAT_H +#include "fpu/softfloat.h" + +floatx80 floatx80_mod(floatx80 a, floatx80 b, float_status *status); +floatx80 floatx80_getman(floatx80 a, float_status *status); +floatx80 floatx80_getexp(floatx80 a, float_status *status); +floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status); +floatx80 floatx80_move(floatx80 a, float_status *status); +floatx80 floatx80_lognp1(floatx80 a, float_status *status); +floatx80 floatx80_logn(floatx80 a, float_status *status); +floatx80 floatx80_log10(floatx80 a, float_status *status); +floatx80 floatx80_log2(floatx80 a, float_status *status); +floatx80 floatx80_etox(floatx80 a, float_status *status); +floatx80 floatx80_twotox(floatx80 a, float_status *status); +floatx80 floatx80_tentox(floatx80 a, float_status *status); +floatx80 floatx80_tan(floatx80 a, float_status *status); +floatx80 floatx80_sin(floatx80 a, float_status *status); +floatx80 floatx80_cos(floatx80 a, float_status *status); +floatx80 floatx80_atan(floatx80 a, float_status *status); +floatx80 floatx80_asin(floatx80 a, float_status *status); +floatx80 floatx80_acos(floatx80 a, float_status *status); +floatx80 floatx80_atanh(floatx80 a, float_status *status); +floatx80 floatx80_etoxm1(floatx80 a, float_status *status); +floatx80 floatx80_tanh(floatx80 a, float_status *status); +floatx80 floatx80_sinh(floatx80 a, float_status *status); +floatx80 floatx80_cosh(floatx80 a, float_status *status); +#endif diff --git a/qemu/target/m68k/softfloat_fpsp_tables.h b/qemu/target/m68k/softfloat_fpsp_tables.h new file mode 100644 index 00000000..2ccd9e8b --- /dev/null +++ b/qemu/target/m68k/softfloat_fpsp_tables.h @@ -0,0 +1,642 @@ +/* + * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator, + * derived from NetBSD M68040 FPSP functions, + * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic + * Package. Those parts of the code (and some later contributions) are + * provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file will be taken to be licensed under + * the Softfloat-2a license unless specifically indicated otherwise. + */ + +/* + * Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + +#ifndef TARGET_M68K_SOFTFLOAT_FPSP_TABLES_H +#define TARGET_M68K_SOFTFLOAT_FPSP_TABLES_H + +static const floatx80 log_tbl[128] = { + make_floatx80_init(0x3FFE, 0xFE03F80FE03F80FE), + make_floatx80_init(0x3FF7, 0xFF015358833C47E2), + make_floatx80_init(0x3FFE, 0xFA232CF252138AC0), + make_floatx80_init(0x3FF9, 0xBDC8D83EAD88D549), + make_floatx80_init(0x3FFE, 0xF6603D980F6603DA), + make_floatx80_init(0x3FFA, 0x9CF43DCFF5EAFD48), + make_floatx80_init(0x3FFE, 0xF2B9D6480F2B9D65), + make_floatx80_init(0x3FFA, 0xDA16EB88CB8DF614), + make_floatx80_init(0x3FFE, 0xEF2EB71FC4345238), + make_floatx80_init(0x3FFB, 0x8B29B7751BD70743), + make_floatx80_init(0x3FFE, 0xEBBDB2A5C1619C8C), + make_floatx80_init(0x3FFB, 0xA8D839F830C1FB49), + make_floatx80_init(0x3FFE, 0xE865AC7B7603A197), + make_floatx80_init(0x3FFB, 0xC61A2EB18CD907AD), + make_floatx80_init(0x3FFE, 0xE525982AF70C880E), + make_floatx80_init(0x3FFB, 0xE2F2A47ADE3A18AF), + make_floatx80_init(0x3FFE, 0xE1FC780E1FC780E2), + make_floatx80_init(0x3FFB, 0xFF64898EDF55D551), + make_floatx80_init(0x3FFE, 0xDEE95C4CA037BA57), + make_floatx80_init(0x3FFC, 0x8DB956A97B3D0148), + make_floatx80_init(0x3FFE, 0xDBEB61EED19C5958), + make_floatx80_init(0x3FFC, 0x9B8FE100F47BA1DE), + make_floatx80_init(0x3FFE, 0xD901B2036406C80E), + make_floatx80_init(0x3FFC, 0xA9372F1D0DA1BD17), + make_floatx80_init(0x3FFE, 0xD62B80D62B80D62C), + make_floatx80_init(0x3FFC, 0xB6B07F38CE90E46B), + make_floatx80_init(0x3FFE, 0xD3680D3680D3680D), + make_floatx80_init(0x3FFC, 0xC3FD032906488481), + make_floatx80_init(0x3FFE, 0xD0B69FCBD2580D0B), + make_floatx80_init(0x3FFC, 0xD11DE0FF15AB18CA), + make_floatx80_init(0x3FFE, 0xCE168A7725080CE1), + make_floatx80_init(0x3FFC, 0xDE1433A16C66B150), + make_floatx80_init(0x3FFE, 0xCB8727C065C393E0), + make_floatx80_init(0x3FFC, 0xEAE10B5A7DDC8ADD), + make_floatx80_init(0x3FFE, 0xC907DA4E871146AD), + make_floatx80_init(0x3FFC, 0xF7856E5EE2C9B291), + make_floatx80_init(0x3FFE, 0xC6980C6980C6980C), + make_floatx80_init(0x3FFD, 0x82012CA5A68206D7), + make_floatx80_init(0x3FFE, 0xC4372F855D824CA6), + make_floatx80_init(0x3FFD, 0x882C5FCD7256A8C5), + make_floatx80_init(0x3FFE, 0xC1E4BBD595F6E947), + make_floatx80_init(0x3FFD, 0x8E44C60B4CCFD7DE), + make_floatx80_init(0x3FFE, 0xBFA02FE80BFA02FF), + make_floatx80_init(0x3FFD, 0x944AD09EF4351AF6), + make_floatx80_init(0x3FFE, 0xBD69104707661AA3), + make_floatx80_init(0x3FFD, 0x9A3EECD4C3EAA6B2), + make_floatx80_init(0x3FFE, 0xBB3EE721A54D880C), + make_floatx80_init(0x3FFD, 0xA0218434353F1DE8), + make_floatx80_init(0x3FFE, 0xB92143FA36F5E02E), + make_floatx80_init(0x3FFD, 0xA5F2FCABBBC506DA), + make_floatx80_init(0x3FFE, 0xB70FBB5A19BE3659), + make_floatx80_init(0x3FFD, 0xABB3B8BA2AD362A5), + make_floatx80_init(0x3FFE, 0xB509E68A9B94821F), + make_floatx80_init(0x3FFD, 0xB1641795CE3CA97B), + make_floatx80_init(0x3FFE, 0xB30F63528917C80B), + make_floatx80_init(0x3FFD, 0xB70475515D0F1C61), + make_floatx80_init(0x3FFE, 0xB11FD3B80B11FD3C), + make_floatx80_init(0x3FFD, 0xBC952AFEEA3D13E1), + make_floatx80_init(0x3FFE, 0xAF3ADDC680AF3ADE), + make_floatx80_init(0x3FFD, 0xC2168ED0F458BA4A), + make_floatx80_init(0x3FFE, 0xAD602B580AD602B6), + make_floatx80_init(0x3FFD, 0xC788F439B3163BF1), + make_floatx80_init(0x3FFE, 0xAB8F69E28359CD11), + make_floatx80_init(0x3FFD, 0xCCECAC08BF04565D), + make_floatx80_init(0x3FFE, 0xA9C84A47A07F5638), + make_floatx80_init(0x3FFD, 0xD24204872DD85160), + make_floatx80_init(0x3FFE, 0xA80A80A80A80A80B), + make_floatx80_init(0x3FFD, 0xD78949923BC3588A), + make_floatx80_init(0x3FFE, 0xA655C4392D7B73A8), + make_floatx80_init(0x3FFD, 0xDCC2C4B49887DACC), + make_floatx80_init(0x3FFE, 0xA4A9CF1D96833751), + make_floatx80_init(0x3FFD, 0xE1EEBD3E6D6A6B9E), + make_floatx80_init(0x3FFE, 0xA3065E3FAE7CD0E0), + make_floatx80_init(0x3FFD, 0xE70D785C2F9F5BDC), + make_floatx80_init(0x3FFE, 0xA16B312EA8FC377D), + make_floatx80_init(0x3FFD, 0xEC1F392C5179F283), + make_floatx80_init(0x3FFE, 0x9FD809FD809FD80A), + make_floatx80_init(0x3FFD, 0xF12440D3E36130E6), + make_floatx80_init(0x3FFE, 0x9E4CAD23DD5F3A20), + make_floatx80_init(0x3FFD, 0xF61CCE92346600BB), + make_floatx80_init(0x3FFE, 0x9CC8E160C3FB19B9), + make_floatx80_init(0x3FFD, 0xFB091FD38145630A), + make_floatx80_init(0x3FFE, 0x9B4C6F9EF03A3CAA), + make_floatx80_init(0x3FFD, 0xFFE97042BFA4C2AD), + make_floatx80_init(0x3FFE, 0x99D722DABDE58F06), + make_floatx80_init(0x3FFE, 0x825EFCED49369330), + make_floatx80_init(0x3FFE, 0x9868C809868C8098), + make_floatx80_init(0x3FFE, 0x84C37A7AB9A905C9), + make_floatx80_init(0x3FFE, 0x97012E025C04B809), + make_floatx80_init(0x3FFE, 0x87224C2E8E645FB7), + make_floatx80_init(0x3FFE, 0x95A02568095A0257), + make_floatx80_init(0x3FFE, 0x897B8CAC9F7DE298), + make_floatx80_init(0x3FFE, 0x9445809445809446), + make_floatx80_init(0x3FFE, 0x8BCF55DEC4CD05FE), + make_floatx80_init(0x3FFE, 0x92F113840497889C), + make_floatx80_init(0x3FFE, 0x8E1DC0FB89E125E5), + make_floatx80_init(0x3FFE, 0x91A2B3C4D5E6F809), + make_floatx80_init(0x3FFE, 0x9066E68C955B6C9B), + make_floatx80_init(0x3FFE, 0x905A38633E06C43B), + make_floatx80_init(0x3FFE, 0x92AADE74C7BE59E0), + make_floatx80_init(0x3FFE, 0x8F1779D9FDC3A219), + make_floatx80_init(0x3FFE, 0x94E9BFF615845643), + make_floatx80_init(0x3FFE, 0x8DDA520237694809), + make_floatx80_init(0x3FFE, 0x9723A1B720134203), + make_floatx80_init(0x3FFE, 0x8CA29C046514E023), + make_floatx80_init(0x3FFE, 0x995899C890EB8990), + make_floatx80_init(0x3FFE, 0x8B70344A139BC75A), + make_floatx80_init(0x3FFE, 0x9B88BDAA3A3DAE2F), + make_floatx80_init(0x3FFE, 0x8A42F8705669DB46), + make_floatx80_init(0x3FFE, 0x9DB4224FFFE1157C), + make_floatx80_init(0x3FFE, 0x891AC73AE9819B50), + make_floatx80_init(0x3FFE, 0x9FDADC268B7A12DA), + make_floatx80_init(0x3FFE, 0x87F78087F78087F8), + make_floatx80_init(0x3FFE, 0xA1FCFF17CE733BD4), + make_floatx80_init(0x3FFE, 0x86D905447A34ACC6), + make_floatx80_init(0x3FFE, 0xA41A9E8F5446FB9F), + make_floatx80_init(0x3FFE, 0x85BF37612CEE3C9B), + make_floatx80_init(0x3FFE, 0xA633CD7E6771CD8B), + make_floatx80_init(0x3FFE, 0x84A9F9C8084A9F9D), + make_floatx80_init(0x3FFE, 0xA8489E600B435A5E), + make_floatx80_init(0x3FFE, 0x839930523FBE3368), + make_floatx80_init(0x3FFE, 0xAA59233CCCA4BD49), + make_floatx80_init(0x3FFE, 0x828CBFBEB9A020A3), + make_floatx80_init(0x3FFE, 0xAC656DAE6BCC4985), + make_floatx80_init(0x3FFE, 0x81848DA8FAF0D277), + make_floatx80_init(0x3FFE, 0xAE6D8EE360BB2468), + make_floatx80_init(0x3FFE, 0x8080808080808081), + make_floatx80_init(0x3FFE, 0xB07197A23C46C654) +}; + +static const floatx80 exp_tbl[64] = { + make_floatx80_init(0x3FFF, 0x8000000000000000), + make_floatx80_init(0x3FFF, 0x8164D1F3BC030774), + make_floatx80_init(0x3FFF, 0x82CD8698AC2BA1D8), + make_floatx80_init(0x3FFF, 0x843A28C3ACDE4048), + make_floatx80_init(0x3FFF, 0x85AAC367CC487B14), + make_floatx80_init(0x3FFF, 0x871F61969E8D1010), + make_floatx80_init(0x3FFF, 0x88980E8092DA8528), + make_floatx80_init(0x3FFF, 0x8A14D575496EFD9C), + make_floatx80_init(0x3FFF, 0x8B95C1E3EA8BD6E8), + make_floatx80_init(0x3FFF, 0x8D1ADF5B7E5BA9E4), + make_floatx80_init(0x3FFF, 0x8EA4398B45CD53C0), + make_floatx80_init(0x3FFF, 0x9031DC431466B1DC), + make_floatx80_init(0x3FFF, 0x91C3D373AB11C338), + make_floatx80_init(0x3FFF, 0x935A2B2F13E6E92C), + make_floatx80_init(0x3FFF, 0x94F4EFA8FEF70960), + make_floatx80_init(0x3FFF, 0x96942D3720185A00), + make_floatx80_init(0x3FFF, 0x9837F0518DB8A970), + make_floatx80_init(0x3FFF, 0x99E0459320B7FA64), + make_floatx80_init(0x3FFF, 0x9B8D39B9D54E5538), + make_floatx80_init(0x3FFF, 0x9D3ED9A72CFFB750), + make_floatx80_init(0x3FFF, 0x9EF5326091A111AC), + make_floatx80_init(0x3FFF, 0xA0B0510FB9714FC4), + make_floatx80_init(0x3FFF, 0xA27043030C496818), + make_floatx80_init(0x3FFF, 0xA43515AE09E680A0), + make_floatx80_init(0x3FFF, 0xA5FED6A9B15138EC), + make_floatx80_init(0x3FFF, 0xA7CD93B4E9653568), + make_floatx80_init(0x3FFF, 0xA9A15AB4EA7C0EF8), + make_floatx80_init(0x3FFF, 0xAB7A39B5A93ED338), + make_floatx80_init(0x3FFF, 0xAD583EEA42A14AC8), + make_floatx80_init(0x3FFF, 0xAF3B78AD690A4374), + make_floatx80_init(0x3FFF, 0xB123F581D2AC2590), + make_floatx80_init(0x3FFF, 0xB311C412A9112488), + make_floatx80_init(0x3FFF, 0xB504F333F9DE6484), + make_floatx80_init(0x3FFF, 0xB6FD91E328D17790), + make_floatx80_init(0x3FFF, 0xB8FBAF4762FB9EE8), + make_floatx80_init(0x3FFF, 0xBAFF5AB2133E45FC), + make_floatx80_init(0x3FFF, 0xBD08A39F580C36C0), + make_floatx80_init(0x3FFF, 0xBF1799B67A731084), + make_floatx80_init(0x3FFF, 0xC12C4CCA66709458), + make_floatx80_init(0x3FFF, 0xC346CCDA24976408), + make_floatx80_init(0x3FFF, 0xC5672A115506DADC), + make_floatx80_init(0x3FFF, 0xC78D74C8ABB9B15C), + make_floatx80_init(0x3FFF, 0xC9B9BD866E2F27A4), + make_floatx80_init(0x3FFF, 0xCBEC14FEF2727C5C), + make_floatx80_init(0x3FFF, 0xCE248C151F8480E4), + make_floatx80_init(0x3FFF, 0xD06333DAEF2B2594), + make_floatx80_init(0x3FFF, 0xD2A81D91F12AE45C), + make_floatx80_init(0x3FFF, 0xD4F35AABCFEDFA20), + make_floatx80_init(0x3FFF, 0xD744FCCAD69D6AF4), + make_floatx80_init(0x3FFF, 0xD99D15C278AFD7B4), + make_floatx80_init(0x3FFF, 0xDBFBB797DAF23754), + make_floatx80_init(0x3FFF, 0xDE60F4825E0E9124), + make_floatx80_init(0x3FFF, 0xE0CCDEEC2A94E110), + make_floatx80_init(0x3FFF, 0xE33F8972BE8A5A50), + make_floatx80_init(0x3FFF, 0xE5B906E77C8348A8), + make_floatx80_init(0x3FFF, 0xE8396A503C4BDC68), + make_floatx80_init(0x3FFF, 0xEAC0C6E7DD243930), + make_floatx80_init(0x3FFF, 0xED4F301ED9942B84), + make_floatx80_init(0x3FFF, 0xEFE4B99BDCDAF5CC), + make_floatx80_init(0x3FFF, 0xF281773C59FFB138), + make_floatx80_init(0x3FFF, 0xF5257D152486CC2C), + make_floatx80_init(0x3FFF, 0xF7D0DF730AD13BB8), + make_floatx80_init(0x3FFF, 0xFA83B2DB722A033C), + make_floatx80_init(0x3FFF, 0xFD3E0C0CF486C174) +}; + +static const float32 exp_tbl2[64] = { + const_float32(0x00000000), + const_float32(0x9F841A9B), + const_float32(0x9FC1D5B9), + const_float32(0xA0728369), + const_float32(0x1FC5C95C), + const_float32(0x1EE85C9F), + const_float32(0x9FA20729), + const_float32(0xA07BF9AF), + const_float32(0xA0020DCF), + const_float32(0x205A63DA), + const_float32(0x1EB70051), + const_float32(0x1F6EB029), + const_float32(0xA0781494), + const_float32(0x9EB319B0), + const_float32(0x2017457D), + const_float32(0x1F11D537), + const_float32(0x9FB952DD), + const_float32(0x1FE43087), + const_float32(0x1FA2A818), + const_float32(0x1FDE494D), + const_float32(0x20504890), + const_float32(0xA073691C), + const_float32(0x1F9B7A05), + const_float32(0xA0797126), + const_float32(0xA071A140), + const_float32(0x204F62DA), + const_float32(0x1F283C4A), + const_float32(0x9F9A7FDC), + const_float32(0xA05B3FAC), + const_float32(0x1FDF2610), + const_float32(0x9F705F90), + const_float32(0x201F678A), + const_float32(0x1F32FB13), + const_float32(0x20038B30), + const_float32(0x200DC3CC), + const_float32(0x9F8B2AE6), + const_float32(0xA02BBF70), + const_float32(0xA00BF518), + const_float32(0xA041DD41), + const_float32(0x9FDF137B), + const_float32(0x201F1568), + const_float32(0x1FC13A2E), + const_float32(0xA03F8F03), + const_float32(0x1FF4907D), + const_float32(0x9E6E53E4), + const_float32(0x1FD6D45C), + const_float32(0xA076EDB9), + const_float32(0x9FA6DE21), + const_float32(0x1EE69A2F), + const_float32(0x207F439F), + const_float32(0x201EC207), + const_float32(0x9E8BE175), + const_float32(0x20032C4B), + const_float32(0x2004DFF5), + const_float32(0x1E72F47A), + const_float32(0x1F722F22), + const_float32(0xA017E945), + const_float32(0x1F401A5B), + const_float32(0x9FB9A9E3), + const_float32(0x20744C05), + const_float32(0x1F773A19), + const_float32(0x1FFE90D5), + const_float32(0xA041ED22), + const_float32(0x1F853F3A), +}; + +static const floatx80 exp2_tbl[64] = { + make_floatx80_init(0x3FFF, 0x8000000000000000), + make_floatx80_init(0x3FFF, 0x8164D1F3BC030773), + make_floatx80_init(0x3FFF, 0x82CD8698AC2BA1D7), + make_floatx80_init(0x3FFF, 0x843A28C3ACDE4046), + make_floatx80_init(0x3FFF, 0x85AAC367CC487B15), + make_floatx80_init(0x3FFF, 0x871F61969E8D1010), + make_floatx80_init(0x3FFF, 0x88980E8092DA8527), + make_floatx80_init(0x3FFF, 0x8A14D575496EFD9A), + make_floatx80_init(0x3FFF, 0x8B95C1E3EA8BD6E7), + make_floatx80_init(0x3FFF, 0x8D1ADF5B7E5BA9E6), + make_floatx80_init(0x3FFF, 0x8EA4398B45CD53C0), + make_floatx80_init(0x3FFF, 0x9031DC431466B1DC), + make_floatx80_init(0x3FFF, 0x91C3D373AB11C336), + make_floatx80_init(0x3FFF, 0x935A2B2F13E6E92C), + make_floatx80_init(0x3FFF, 0x94F4EFA8FEF70961), + make_floatx80_init(0x3FFF, 0x96942D3720185A00), + make_floatx80_init(0x3FFF, 0x9837F0518DB8A96F), + make_floatx80_init(0x3FFF, 0x99E0459320B7FA65), + make_floatx80_init(0x3FFF, 0x9B8D39B9D54E5539), + make_floatx80_init(0x3FFF, 0x9D3ED9A72CFFB751), + make_floatx80_init(0x3FFF, 0x9EF5326091A111AE), + make_floatx80_init(0x3FFF, 0xA0B0510FB9714FC2), + make_floatx80_init(0x3FFF, 0xA27043030C496819), + make_floatx80_init(0x3FFF, 0xA43515AE09E6809E), + make_floatx80_init(0x3FFF, 0xA5FED6A9B15138EA), + make_floatx80_init(0x3FFF, 0xA7CD93B4E965356A), + make_floatx80_init(0x3FFF, 0xA9A15AB4EA7C0EF8), + make_floatx80_init(0x3FFF, 0xAB7A39B5A93ED337), + make_floatx80_init(0x3FFF, 0xAD583EEA42A14AC6), + make_floatx80_init(0x3FFF, 0xAF3B78AD690A4375), + make_floatx80_init(0x3FFF, 0xB123F581D2AC2590), + make_floatx80_init(0x3FFF, 0xB311C412A9112489), + make_floatx80_init(0x3FFF, 0xB504F333F9DE6484), + make_floatx80_init(0x3FFF, 0xB6FD91E328D17791), + make_floatx80_init(0x3FFF, 0xB8FBAF4762FB9EE9), + make_floatx80_init(0x3FFF, 0xBAFF5AB2133E45FB), + make_floatx80_init(0x3FFF, 0xBD08A39F580C36BF), + make_floatx80_init(0x3FFF, 0xBF1799B67A731083), + make_floatx80_init(0x3FFF, 0xC12C4CCA66709456), + make_floatx80_init(0x3FFF, 0xC346CCDA24976407), + make_floatx80_init(0x3FFF, 0xC5672A115506DADD), + make_floatx80_init(0x3FFF, 0xC78D74C8ABB9B15D), + make_floatx80_init(0x3FFF, 0xC9B9BD866E2F27A3), + make_floatx80_init(0x3FFF, 0xCBEC14FEF2727C5D), + make_floatx80_init(0x3FFF, 0xCE248C151F8480E4), + make_floatx80_init(0x3FFF, 0xD06333DAEF2B2595), + make_floatx80_init(0x3FFF, 0xD2A81D91F12AE45A), + make_floatx80_init(0x3FFF, 0xD4F35AABCFEDFA1F), + make_floatx80_init(0x3FFF, 0xD744FCCAD69D6AF4), + make_floatx80_init(0x3FFF, 0xD99D15C278AFD7B6), + make_floatx80_init(0x3FFF, 0xDBFBB797DAF23755), + make_floatx80_init(0x3FFF, 0xDE60F4825E0E9124), + make_floatx80_init(0x3FFF, 0xE0CCDEEC2A94E111), + make_floatx80_init(0x3FFF, 0xE33F8972BE8A5A51), + make_floatx80_init(0x3FFF, 0xE5B906E77C8348A8), + make_floatx80_init(0x3FFF, 0xE8396A503C4BDC68), + make_floatx80_init(0x3FFF, 0xEAC0C6E7DD24392F), + make_floatx80_init(0x3FFF, 0xED4F301ED9942B84), + make_floatx80_init(0x3FFF, 0xEFE4B99BDCDAF5CB), + make_floatx80_init(0x3FFF, 0xF281773C59FFB13A), + make_floatx80_init(0x3FFF, 0xF5257D152486CC2C), + make_floatx80_init(0x3FFF, 0xF7D0DF730AD13BB9), + make_floatx80_init(0x3FFF, 0xFA83B2DB722A033A), + make_floatx80_init(0x3FFF, 0xFD3E0C0CF486C175) +}; + +static const uint32_t exp2_tbl2[64] = { + 0x3F738000, 0x3FBEF7CA, 0x3FBDF8A9, 0x3FBCD7C9, + 0xBFBDE8DA, 0x3FBDE85C, 0x3FBEBBF1, 0x3FBB80CA, + 0xBFBA8373, 0xBFBE9670, 0x3FBDB700, 0x3FBEEEB0, + 0x3FBBFD6D, 0xBFBDB319, 0x3FBDBA2B, 0x3FBE91D5, + 0x3FBE8D5A, 0xBFBCDE7B, 0xBFBEBAAF, 0xBFBD86DA, + 0xBFBEBEDD, 0x3FBCC96E, 0xBFBEC90B, 0x3FBBD1DB, + 0x3FBCE5EB, 0xBFBEC274, 0x3FBEA83C, 0x3FBECB00, + 0x3FBE9301, 0xBFBD8367, 0xBFBEF05F, 0x3FBDFB3C, + 0x3FBEB2FB, 0x3FBAE2CB, 0x3FBCDC3C, 0x3FBEE9AA, + 0xBFBEAEFD, 0xBFBCBF51, 0x3FBEF88A, 0x3FBD83B2, + 0x3FBDF8AB, 0xBFBDFB17, 0xBFBEFE3C, 0xBFBBB6F8, + 0xBFBCEE53, 0xBFBDA4AE, 0x3FBC9124, 0x3FBEB243, + 0x3FBDE69A, 0xBFB8BC61, 0x3FBDF610, 0xBFBD8BE1, + 0x3FBACB12, 0x3FBB9BFE, 0x3FBCF2F4, 0x3FBEF22F, + 0xBFBDBF4A, 0x3FBEC01A, 0x3FBE8CAC, 0xBFBCBB3F, + 0x3FBEF73A, 0xBFB8B795, 0x3FBEF84B, 0xBFBEF581 +}; + +static const floatx80 pi_tbl[65] = { + make_floatx80_init(0xC004, 0xC90FDAA22168C235), + make_floatx80_init(0xC004, 0xC2C75BCD105D7C23), + make_floatx80_init(0xC004, 0xBC7EDCF7FF523611), + make_floatx80_init(0xC004, 0xB6365E22EE46F000), + make_floatx80_init(0xC004, 0xAFEDDF4DDD3BA9EE), + make_floatx80_init(0xC004, 0xA9A56078CC3063DD), + make_floatx80_init(0xC004, 0xA35CE1A3BB251DCB), + make_floatx80_init(0xC004, 0x9D1462CEAA19D7B9), + make_floatx80_init(0xC004, 0x96CBE3F9990E91A8), + make_floatx80_init(0xC004, 0x9083652488034B96), + make_floatx80_init(0xC004, 0x8A3AE64F76F80584), + make_floatx80_init(0xC004, 0x83F2677A65ECBF73), + make_floatx80_init(0xC003, 0xFB53D14AA9C2F2C2), + make_floatx80_init(0xC003, 0xEEC2D3A087AC669F), + make_floatx80_init(0xC003, 0xE231D5F66595DA7B), + make_floatx80_init(0xC003, 0xD5A0D84C437F4E58), + make_floatx80_init(0xC003, 0xC90FDAA22168C235), + make_floatx80_init(0xC003, 0xBC7EDCF7FF523611), + make_floatx80_init(0xC003, 0xAFEDDF4DDD3BA9EE), + make_floatx80_init(0xC003, 0xA35CE1A3BB251DCB), + make_floatx80_init(0xC003, 0x96CBE3F9990E91A8), + make_floatx80_init(0xC003, 0x8A3AE64F76F80584), + make_floatx80_init(0xC002, 0xFB53D14AA9C2F2C2), + make_floatx80_init(0xC002, 0xE231D5F66595DA7B), + make_floatx80_init(0xC002, 0xC90FDAA22168C235), + make_floatx80_init(0xC002, 0xAFEDDF4DDD3BA9EE), + make_floatx80_init(0xC002, 0x96CBE3F9990E91A8), + make_floatx80_init(0xC001, 0xFB53D14AA9C2F2C2), + make_floatx80_init(0xC001, 0xC90FDAA22168C235), + make_floatx80_init(0xC001, 0x96CBE3F9990E91A8), + make_floatx80_init(0xC000, 0xC90FDAA22168C235), + make_floatx80_init(0xBFFF, 0xC90FDAA22168C235), + make_floatx80_init(0x0000, 0x0000000000000000), + make_floatx80_init(0x3FFF, 0xC90FDAA22168C235), + make_floatx80_init(0x4000, 0xC90FDAA22168C235), + make_floatx80_init(0x4001, 0x96CBE3F9990E91A8), + make_floatx80_init(0x4001, 0xC90FDAA22168C235), + make_floatx80_init(0x4001, 0xFB53D14AA9C2F2C2), + make_floatx80_init(0x4002, 0x96CBE3F9990E91A8), + make_floatx80_init(0x4002, 0xAFEDDF4DDD3BA9EE), + make_floatx80_init(0x4002, 0xC90FDAA22168C235), + make_floatx80_init(0x4002, 0xE231D5F66595DA7B), + make_floatx80_init(0x4002, 0xFB53D14AA9C2F2C2), + make_floatx80_init(0x4003, 0x8A3AE64F76F80584), + make_floatx80_init(0x4003, 0x96CBE3F9990E91A8), + make_floatx80_init(0x4003, 0xA35CE1A3BB251DCB), + make_floatx80_init(0x4003, 0xAFEDDF4DDD3BA9EE), + make_floatx80_init(0x4003, 0xBC7EDCF7FF523611), + make_floatx80_init(0x4003, 0xC90FDAA22168C235), + make_floatx80_init(0x4003, 0xD5A0D84C437F4E58), + make_floatx80_init(0x4003, 0xE231D5F66595DA7B), + make_floatx80_init(0x4003, 0xEEC2D3A087AC669F), + make_floatx80_init(0x4003, 0xFB53D14AA9C2F2C2), + make_floatx80_init(0x4004, 0x83F2677A65ECBF73), + make_floatx80_init(0x4004, 0x8A3AE64F76F80584), + make_floatx80_init(0x4004, 0x9083652488034B96), + make_floatx80_init(0x4004, 0x96CBE3F9990E91A8), + make_floatx80_init(0x4004, 0x9D1462CEAA19D7B9), + make_floatx80_init(0x4004, 0xA35CE1A3BB251DCB), + make_floatx80_init(0x4004, 0xA9A56078CC3063DD), + make_floatx80_init(0x4004, 0xAFEDDF4DDD3BA9EE), + make_floatx80_init(0x4004, 0xB6365E22EE46F000), + make_floatx80_init(0x4004, 0xBC7EDCF7FF523611), + make_floatx80_init(0x4004, 0xC2C75BCD105D7C23), + make_floatx80_init(0x4004, 0xC90FDAA22168C235) +}; + +static const float32 pi_tbl2[65] = { + const_float32(0x21800000), + const_float32(0xA0D00000), + const_float32(0xA1E80000), + const_float32(0x21480000), + const_float32(0xA1200000), + const_float32(0x21FC0000), + const_float32(0x21100000), + const_float32(0xA1580000), + const_float32(0x21E00000), + const_float32(0x20B00000), + const_float32(0xA1880000), + const_float32(0x21C40000), + const_float32(0x20000000), + const_float32(0x21380000), + const_float32(0xA1300000), + const_float32(0x9FC00000), + const_float32(0x21000000), + const_float32(0xA1680000), + const_float32(0xA0A00000), + const_float32(0x20900000), + const_float32(0x21600000), + const_float32(0xA1080000), + const_float32(0x1F800000), + const_float32(0xA0B00000), + const_float32(0x20800000), + const_float32(0xA0200000), + const_float32(0x20E00000), + const_float32(0x1F000000), + const_float32(0x20000000), + const_float32(0x20600000), + const_float32(0x1F800000), + const_float32(0x1F000000), + const_float32(0x00000000), + const_float32(0x9F000000), + const_float32(0x9F800000), + const_float32(0xA0600000), + const_float32(0xA0000000), + const_float32(0x9F000000), + const_float32(0xA0E00000), + const_float32(0x20200000), + const_float32(0xA0800000), + const_float32(0x20B00000), + const_float32(0x9F800000), + const_float32(0x21080000), + const_float32(0xA1600000), + const_float32(0xA0900000), + const_float32(0x20A00000), + const_float32(0x21680000), + const_float32(0xA1000000), + const_float32(0x1FC00000), + const_float32(0x21300000), + const_float32(0xA1380000), + const_float32(0xA0000000), + const_float32(0xA1C40000), + const_float32(0x21880000), + const_float32(0xA0B00000), + const_float32(0xA1E00000), + const_float32(0x21580000), + const_float32(0xA1100000), + const_float32(0xA1FC0000), + const_float32(0x21200000), + const_float32(0xA1480000), + const_float32(0x21E80000), + const_float32(0x20D00000), + const_float32(0xA1800000), +}; + +static const floatx80 atan_tbl[128] = { + make_floatx80_init(0x3FFB, 0x83D152C5060B7A51), + make_floatx80_init(0x3FFB, 0x8BC8544565498B8B), + make_floatx80_init(0x3FFB, 0x93BE406017626B0D), + make_floatx80_init(0x3FFB, 0x9BB3078D35AEC202), + make_floatx80_init(0x3FFB, 0xA3A69A525DDCE7DE), + make_floatx80_init(0x3FFB, 0xAB98E94362765619), + make_floatx80_init(0x3FFB, 0xB389E502F9C59862), + make_floatx80_init(0x3FFB, 0xBB797E436B09E6FB), + make_floatx80_init(0x3FFB, 0xC367A5C739E5F446), + make_floatx80_init(0x3FFB, 0xCB544C61CFF7D5C6), + make_floatx80_init(0x3FFB, 0xD33F62F82488533E), + make_floatx80_init(0x3FFB, 0xDB28DA8162404C77), + make_floatx80_init(0x3FFB, 0xE310A4078AD34F18), + make_floatx80_init(0x3FFB, 0xEAF6B0A8188EE1EB), + make_floatx80_init(0x3FFB, 0xF2DAF1949DBE79D5), + make_floatx80_init(0x3FFB, 0xFABD581361D47E3E), + make_floatx80_init(0x3FFC, 0x8346AC210959ECC4), + make_floatx80_init(0x3FFC, 0x8B232A08304282D8), + make_floatx80_init(0x3FFC, 0x92FB70B8D29AE2F9), + make_floatx80_init(0x3FFC, 0x9ACF476F5CCD1CB4), + make_floatx80_init(0x3FFC, 0xA29E76304954F23F), + make_floatx80_init(0x3FFC, 0xAA68C5D08AB85230), + make_floatx80_init(0x3FFC, 0xB22DFFFD9D539F83), + make_floatx80_init(0x3FFC, 0xB9EDEF453E900EA5), + make_floatx80_init(0x3FFC, 0xC1A85F1CC75E3EA5), + make_floatx80_init(0x3FFC, 0xC95D1BE828138DE6), + make_floatx80_init(0x3FFC, 0xD10BF300840D2DE4), + make_floatx80_init(0x3FFC, 0xD8B4B2BA6BC05E7A), + make_floatx80_init(0x3FFC, 0xE0572A6BB42335F6), + make_floatx80_init(0x3FFC, 0xE7F32A70EA9CAA8F), + make_floatx80_init(0x3FFC, 0xEF88843264ECEFAA), + make_floatx80_init(0x3FFC, 0xF7170A28ECC06666), + make_floatx80_init(0x3FFD, 0x812FD288332DAD32), + make_floatx80_init(0x3FFD, 0x88A8D1B1218E4D64), + make_floatx80_init(0x3FFD, 0x9012AB3F23E4AEE8), + make_floatx80_init(0x3FFD, 0x976CC3D411E7F1B9), + make_floatx80_init(0x3FFD, 0x9EB689493889A227), + make_floatx80_init(0x3FFD, 0xA5EF72C34487361B), + make_floatx80_init(0x3FFD, 0xAD1700BAF07A7227), + make_floatx80_init(0x3FFD, 0xB42CBCFAFD37EFB7), + make_floatx80_init(0x3FFD, 0xBB303A940BA80F89), + make_floatx80_init(0x3FFD, 0xC22115C6FCAEBBAF), + make_floatx80_init(0x3FFD, 0xC8FEF3E686331221), + make_floatx80_init(0x3FFD, 0xCFC98330B4000C70), + make_floatx80_init(0x3FFD, 0xD6807AA1102C5BF9), + make_floatx80_init(0x3FFD, 0xDD2399BC31252AA3), + make_floatx80_init(0x3FFD, 0xE3B2A8556B8FC517), + make_floatx80_init(0x3FFD, 0xEA2D764F64315989), + make_floatx80_init(0x3FFD, 0xF3BF5BF8BAD1A21D), + make_floatx80_init(0x3FFE, 0x801CE39E0D205C9A), + make_floatx80_init(0x3FFE, 0x8630A2DADA1ED066), + make_floatx80_init(0x3FFE, 0x8C1AD445F3E09B8C), + make_floatx80_init(0x3FFE, 0x91DB8F1664F350E2), + make_floatx80_init(0x3FFE, 0x97731420365E538C), + make_floatx80_init(0x3FFE, 0x9CE1C8E6A0B8CDBA), + make_floatx80_init(0x3FFE, 0xA22832DBCADAAE09), + make_floatx80_init(0x3FFE, 0xA746F2DDB7602294), + make_floatx80_init(0x3FFE, 0xAC3EC0FB997DD6A2), + make_floatx80_init(0x3FFE, 0xB110688AEBDC6F6A), + make_floatx80_init(0x3FFE, 0xB5BCC49059ECC4B0), + make_floatx80_init(0x3FFE, 0xBA44BC7DD470782F), + make_floatx80_init(0x3FFE, 0xBEA94144FD049AAC), + make_floatx80_init(0x3FFE, 0xC2EB4ABB661628B6), + make_floatx80_init(0x3FFE, 0xC70BD54CE602EE14), + make_floatx80_init(0x3FFE, 0xCD000549ADEC7159), + make_floatx80_init(0x3FFE, 0xD48457D2D8EA4EA3), + make_floatx80_init(0x3FFE, 0xDB948DA712DECE3B), + make_floatx80_init(0x3FFE, 0xE23855F969E8096A), + make_floatx80_init(0x3FFE, 0xE8771129C4353259), + make_floatx80_init(0x3FFE, 0xEE57C16E0D379C0D), + make_floatx80_init(0x3FFE, 0xF3E10211A87C3779), + make_floatx80_init(0x3FFE, 0xF919039D758B8D41), + make_floatx80_init(0x3FFE, 0xFE058B8F64935FB3), + make_floatx80_init(0x3FFF, 0x8155FB497B685D04), + make_floatx80_init(0x3FFF, 0x83889E3549D108E1), + make_floatx80_init(0x3FFF, 0x859CFA76511D724B), + make_floatx80_init(0x3FFF, 0x87952ECFFF8131E7), + make_floatx80_init(0x3FFF, 0x89732FD19557641B), + make_floatx80_init(0x3FFF, 0x8B38CAD101932A35), + make_floatx80_init(0x3FFF, 0x8CE7A8D8301EE6B5), + make_floatx80_init(0x3FFF, 0x8F46A39E2EAE5281), + make_floatx80_init(0x3FFF, 0x922DA7D791888487), + make_floatx80_init(0x3FFF, 0x94D19FCBDEDF5241), + make_floatx80_init(0x3FFF, 0x973AB94419D2A08B), + make_floatx80_init(0x3FFF, 0x996FF00E08E10B96), + make_floatx80_init(0x3FFF, 0x9B773F9512321DA7), + make_floatx80_init(0x3FFF, 0x9D55CC320F935624), + make_floatx80_init(0x3FFF, 0x9F100575006CC571), + make_floatx80_init(0x3FFF, 0xA0A9C290D97CC06C), + make_floatx80_init(0x3FFF, 0xA22659EBEBC0630A), + make_floatx80_init(0x3FFF, 0xA388B4AFF6EF0EC9), + make_floatx80_init(0x3FFF, 0xA4D35F1061D292C4), + make_floatx80_init(0x3FFF, 0xA60895DCFBE3187E), + make_floatx80_init(0x3FFF, 0xA72A51DC7367BEAC), + make_floatx80_init(0x3FFF, 0xA83A51530956168F), + make_floatx80_init(0x3FFF, 0xA93A20077539546E), + make_floatx80_init(0x3FFF, 0xAA9E7245023B2605), + make_floatx80_init(0x3FFF, 0xAC4C84BA6FE4D58F), + make_floatx80_init(0x3FFF, 0xADCE4A4A606B9712), + make_floatx80_init(0x3FFF, 0xAF2A2DCD8D263C9C), + make_floatx80_init(0x3FFF, 0xB0656F81F22265C7), + make_floatx80_init(0x3FFF, 0xB18465150F71496A), + make_floatx80_init(0x3FFF, 0xB28AAA156F9ADA35), + make_floatx80_init(0x3FFF, 0xB37B44FF3766B895), + make_floatx80_init(0x3FFF, 0xB458C3DCE9630433), + make_floatx80_init(0x3FFF, 0xB525529D562246BD), + make_floatx80_init(0x3FFF, 0xB5E2CCA95F9D88CC), + make_floatx80_init(0x3FFF, 0xB692CADA7ACA1ADA), + make_floatx80_init(0x3FFF, 0xB736AEA7A6925838), + make_floatx80_init(0x3FFF, 0xB7CFAB287E9F7B36), + make_floatx80_init(0x3FFF, 0xB85ECC66CB219835), + make_floatx80_init(0x3FFF, 0xB8E4FD5A20A593DA), + make_floatx80_init(0x3FFF, 0xB99F41F64AFF9BB5), + make_floatx80_init(0x3FFF, 0xBA7F1E17842BBE7B), + make_floatx80_init(0x3FFF, 0xBB4712857637E17D), + make_floatx80_init(0x3FFF, 0xBBFABE8A4788DF6F), + make_floatx80_init(0x3FFF, 0xBC9D0FAD2B689D79), + make_floatx80_init(0x3FFF, 0xBD306A39471ECD86), + make_floatx80_init(0x3FFF, 0xBDB6C731856AF18A), + make_floatx80_init(0x3FFF, 0xBE31CAC502E80D70), + make_floatx80_init(0x3FFF, 0xBEA2D55CE33194E2), + make_floatx80_init(0x3FFF, 0xBF0B10B7C03128F0), + make_floatx80_init(0x3FFF, 0xBF6B7A18DACB778D), + make_floatx80_init(0x3FFF, 0xBFC4EA4663FA18F6), + make_floatx80_init(0x3FFF, 0xC0181BDE8B89A454), + make_floatx80_init(0x3FFF, 0xC065B066CFBF6439), + make_floatx80_init(0x3FFF, 0xC0AE345F56340AE6), + make_floatx80_init(0x3FFF, 0xC0F222919CB9E6A7) +}; +#endif diff --git a/qemu/target/m68k/translate.c b/qemu/target/m68k/translate.c new file mode 100644 index 00000000..323eff44 --- /dev/null +++ b/qemu/target/m68k/translate.c @@ -0,0 +1,6452 @@ +/* + * m68k translation + * + * Copyright (c) 2005-2007 CodeSourcery + * Written by Paul Brook + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "exec/cpu_ldst.h" +#include "exec/translator.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "fpu/softfloat.h" + + +//#define DEBUG_DISPATCH 1 + +#define DEFO32(name, offset) static TCGv QREG_##name; +#define DEFO64(name, offset) static TCGv_i64 QREG_##name; +#include "qregs.def" +#undef DEFO32 +#undef DEFO64 + +#define REG(insn, pos) (((insn) >> (pos)) & 7) +#define DREG(insn, pos) tcg_ctx->cpu_dregs[REG(insn, pos)] +#define AREG(insn, pos) get_areg(s, REG(insn, pos)) +#define MACREG(acc) tcg_ctx->cpu_macc[acc] +#define QREG_SP get_areg(s, 7) + +#define IS_NULL_QREG(t) (t == tcg_ctx->NULL_QREG) + +#include "exec/gen-icount.h" + +void m68k_tcg_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + char *p; + int i; + +#define DEFO32(name, offset) \ + QREG_##name = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, \ + offsetof(CPUM68KState, offset), #name); +#define DEFO64(name, offset) \ + QREG_##name = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, \ + offsetof(CPUM68KState, offset), #name); +#include "qregs.def" +#undef DEFO32 +#undef DEFO64 + + tcg_ctx->cpu_halted = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, +#ifdef _MSC_VER + 0 - offsetof(M68kCPU, env) + +#else + -offsetof(M68kCPU, env) + +#endif + offsetof(CPUState, halted), "HALTED"); + tcg_ctx->cpu_exception_index = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, +#ifdef _MSC_VER + 0 - offsetof(M68kCPU, env) + +#else + -offsetof(M68kCPU, env) + +#endif + offsetof(CPUState, exception_index), + "EXCEPTION"); + + p = tcg_ctx->cpu_reg_names; + for (i = 0; i < 8; i++) { + sprintf(p, "D%d", i); + tcg_ctx->cpu_dregs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUM68KState, dregs[i]), p); + p += 3; + sprintf(p, "A%d", i); + tcg_ctx->cpu_aregs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUM68KState, aregs[i]), p); + p += 3; + } + for (i = 0; i < 4; i++) { + sprintf(p, "ACC%d", i); + tcg_ctx->cpu_macc[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUM68KState, macc[i]), p); + p += 5; + } + + tcg_ctx->NULL_QREG = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, -4, "NULL"); + tcg_ctx->store_dummy = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, -8, "NULL"); +} + +/* internal defines */ +typedef struct DisasContext { + DisasContextBase base; + CPUM68KState *env; + target_ulong pc; + CCOp cc_op; /* Current CC operation */ + int cc_op_synced; + TCGv_i64 mactmp; + int done_mac; + int writeback_mask; + TCGv writeback[8]; +#define MAX_TO_RELEASE 8 + int release_count; + TCGv release[MAX_TO_RELEASE]; + + // Unicorn + struct uc_struct *uc; +} DisasContext; + +static void init_release_array(DisasContext *s) +{ +#ifdef CONFIG_DEBUG_TCG + memset(s->release, 0, sizeof(s->release)); +#endif + s->release_count = 0; +} + +static void do_release(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int i; + for (i = 0; i < s->release_count; i++) { + tcg_temp_free(tcg_ctx, s->release[i]); + } + init_release_array(s); +} + +static TCGv mark_to_release(DisasContext *s, TCGv tmp) +{ + g_assert(s->release_count < MAX_TO_RELEASE); + return s->release[s->release_count++] = tmp; +} + +static TCGv get_areg(DisasContext *s, unsigned regno) +{ + if (s->writeback_mask & (1 << regno)) { + return s->writeback[regno]; + } else { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + return tcg_ctx->cpu_aregs[regno]; + } +} + +static void delay_set_areg(DisasContext *s, unsigned regno, + TCGv val, bool give_temp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (s->writeback_mask & (1 << regno)) { + if (give_temp) { + tcg_temp_free(tcg_ctx, s->writeback[regno]); + s->writeback[regno] = val; + } else { + tcg_gen_mov_i32(tcg_ctx, s->writeback[regno], val); + } + } else { + s->writeback_mask |= 1 << regno; + if (give_temp) { + s->writeback[regno] = val; + } else { + TCGv tmp = tcg_temp_new(tcg_ctx); + s->writeback[regno] = tmp; + tcg_gen_mov_i32(tcg_ctx, tmp, val); + } + } +} + +static void do_writebacks(DisasContext *s) +{ + unsigned mask = s->writeback_mask; + if (mask) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + s->writeback_mask = 0; + do { + unsigned regno = ctz32(mask); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[regno], s->writeback[regno]); + tcg_temp_free(tcg_ctx, s->writeback[regno]); + mask &= mask - 1; + } while (mask); + } +} + +/* is_jmp field values */ +#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ +#define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */ + +#define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S)) +#define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \ + MMU_KERNEL_IDX : MMU_USER_IDX) +#define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \ + MMU_KERNEL_IDX : MMU_USER_IDX) + +typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn); + +#ifdef DEBUG_DISPATCH +#define DISAS_INSN(name) \ + static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn); \ + static void disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn) \ + { \ + qemu_log("Dispatch " #name "\n"); \ + real_disas_##name(env, s, insn); \ + } \ + static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn) +#else +#define DISAS_INSN(name) \ + static void disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn) +#endif + +static const uint8_t cc_op_live[CC_OP_NB] = { + [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X, + [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X, + + [CC_OP_ADDB] = CCF_X | CCF_N | CCF_V, + [CC_OP_ADDW] = CCF_X | CCF_N | CCF_V, + [CC_OP_ADDL] = CCF_X | CCF_N | CCF_V, + + [CC_OP_SUBB] = CCF_X | CCF_N | CCF_V, + [CC_OP_SUBW] = CCF_X | CCF_N | CCF_V, + [CC_OP_SUBL] = CCF_X | CCF_N | CCF_V, + + [CC_OP_CMPB] = CCF_X | CCF_N | CCF_V, + [CC_OP_CMPW] = CCF_X | CCF_N | CCF_V, + [CC_OP_CMPL] = CCF_X | CCF_N | CCF_V, + + [CC_OP_LOGIC] = CCF_X | CCF_N +}; + +static void set_cc_op(DisasContext *s, CCOp op) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + CCOp old_op = s->cc_op; + int dead; + + if (old_op == op) { + return; + } + s->cc_op = op; + s->cc_op_synced = 0; + + /* + * Discard CC computation that will no longer be used. + * Note that X and N are never dead. + */ + dead = cc_op_live[old_op] & ~cc_op_live[op]; + if (dead & CCF_C) { + tcg_gen_discard_i32(tcg_ctx, QREG_CC_C); + } + if (dead & CCF_Z) { + tcg_gen_discard_i32(tcg_ctx, QREG_CC_Z); + } + if (dead & CCF_V) { + tcg_gen_discard_i32(tcg_ctx, QREG_CC_V); + } +} + +/* Update the CPU env CC_OP state. */ +static void update_cc_op(DisasContext *s) +{ + if (!s->cc_op_synced) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + s->cc_op_synced = 1; + tcg_gen_movi_i32(tcg_ctx, QREG_CC_OP, s->cc_op); + } +} + +/* Generate a jump to an immediate address. */ +static void gen_jmp_im(DisasContext *s, uint32_t dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + update_cc_op(s); + tcg_gen_movi_i32(tcg_ctx, QREG_PC, dest); + s->base.is_jmp = DISAS_JUMP; +} + +/* Generate a jump to the address in qreg DEST. */ +static void gen_jmp(DisasContext *s, TCGv dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + update_cc_op(s); + tcg_gen_mov_i32(tcg_ctx, QREG_PC, dest); + s->base.is_jmp = DISAS_JUMP; +} + +static void gen_raise_exception(TCGContext *tcg_ctx, int nr) +{ + TCGv_i32 tmp; + + tmp = tcg_const_i32(tcg_ctx, nr); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void gen_exception(DisasContext *s, uint32_t dest, int nr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + update_cc_op(s); + tcg_gen_movi_i32(tcg_ctx, QREG_PC, dest); + + gen_raise_exception(tcg_ctx, nr); + + s->base.is_jmp = DISAS_NORETURN; +} + +static inline void gen_addr_fault(DisasContext *s) +{ + gen_exception(s, s->base.pc_next, EXCP_ADDRESS); +} + +/* + * Generate a load from the specified address. Narrow values are + * sign extended to full register width. + */ +static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, + int sign, int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + tmp = tcg_temp_new_i32(tcg_ctx); + switch(opsize) { + case OS_BYTE: + if (sign) + tcg_gen_qemu_ld8s(tcg_ctx, tmp, addr, index); + else + tcg_gen_qemu_ld8u(tcg_ctx, tmp, addr, index); + break; + case OS_WORD: + if (sign) + tcg_gen_qemu_ld16s(tcg_ctx, tmp, addr, index); + else + tcg_gen_qemu_ld16u(tcg_ctx, tmp, addr, index); + break; + case OS_LONG: + tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); + break; + default: + g_assert_not_reached(); + } + return tmp; +} + +/* Generate a store. */ +static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val, + int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch(opsize) { + case OS_BYTE: + tcg_gen_qemu_st8(tcg_ctx, val, addr, index); + break; + case OS_WORD: + tcg_gen_qemu_st16(tcg_ctx, val, addr, index); + break; + case OS_LONG: + tcg_gen_qemu_st32(tcg_ctx, val, addr, index); + break; + default: + g_assert_not_reached(); + } +} + +typedef enum { + EA_STORE, + EA_LOADU, + EA_LOADS +} ea_what; + +/* + * Generate an unsigned load if VAL is 0 a signed load if val is -1, + * otherwise generate a store. + */ +static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, + ea_what what, int index) +{ + if (what == EA_STORE) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_store(s, opsize, addr, val, index); + return tcg_ctx->store_dummy; + } else { + return mark_to_release(s, gen_load(s, opsize, addr, + what == EA_LOADS, index)); + } +} + +/* Read a 16-bit immediate constant */ +static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t im; + im = translator_lduw(tcg_ctx, env, s->pc); + s->pc += 2; + return im; +} + +/* Read an 8-bit immediate constant */ +static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s) +{ + return read_im16(env, s); +} + +/* Read a 32-bit immediate constant. */ +static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s) +{ + uint32_t im; + im = read_im16(env, s) << 16; + im |= 0xffff & read_im16(env, s); + return im; +} + +/* Read a 64-bit immediate constant. */ +static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s) +{ + uint64_t im; + im = (uint64_t)read_im32(env, s) << 32; + im |= (uint64_t)read_im32(env, s); + return im; +} + +/* Calculate and address index. */ +static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv add; + int scale; + + add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12); + if ((ext & 0x800) == 0) { + tcg_gen_ext16s_i32(tcg_ctx, tmp, add); + add = tmp; + } + scale = (ext >> 9) & 3; + if (scale != 0) { + tcg_gen_shli_i32(tcg_ctx, tmp, add, scale); + add = tmp; + } + return add; +} + +/* + * Handle a base + index + displacement effective addresss. + * A NULL_QREG base means pc-relative. + */ +static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + uint16_t ext; + TCGv add; + TCGv tmp; + uint32_t bd, od; + + offset = s->pc; + ext = read_im16(env, s); + + if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX)) + return tcg_ctx->NULL_QREG; + + if (m68k_feature(s->env, M68K_FEATURE_M68000) && + !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) { + ext &= ~(3 << 9); + } + + if (ext & 0x100) { + /* full extension word format */ + if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) + return tcg_ctx->NULL_QREG; + + if ((ext & 0x30) > 0x10) { + /* base displacement */ + if ((ext & 0x30) == 0x20) { + bd = (int16_t)read_im16(env, s); + } else { + bd = read_im32(env, s); + } + } else { + bd = 0; + } + tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); + if ((ext & 0x44) == 0) { + /* pre-index */ + add = gen_addr_index(s, ext, tmp); + } else { + add = tcg_ctx->NULL_QREG; + } + if ((ext & 0x80) == 0) { + /* base not suppressed */ + if (IS_NULL_QREG(base)) { + base = mark_to_release(s, tcg_const_i32(tcg_ctx, offset + bd)); + bd = 0; + } + if (!IS_NULL_QREG(add)) { + tcg_gen_add_i32(tcg_ctx, tmp, add, base); + add = tmp; + } else { + add = base; + } + } + if (!IS_NULL_QREG(add)) { + if (bd != 0) { + tcg_gen_addi_i32(tcg_ctx, tmp, add, bd); + add = tmp; + } + } else { + add = mark_to_release(s, tcg_const_i32(tcg_ctx, bd)); + } + if ((ext & 3) != 0) { + /* memory indirect */ + base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s))); + if ((ext & 0x44) == 4) { + add = gen_addr_index(s, ext, tmp); + tcg_gen_add_i32(tcg_ctx, tmp, add, base); + add = tmp; + } else { + add = base; + } + if ((ext & 3) > 1) { + /* outer displacement */ + if ((ext & 3) == 2) { + od = (int16_t)read_im16(env, s); + } else { + od = read_im32(env, s); + } + } else { + od = 0; + } + if (od != 0) { + tcg_gen_addi_i32(tcg_ctx, tmp, add, od); + add = tmp; + } + } + } else { + /* brief extension word format */ + tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); + add = gen_addr_index(s, ext, tmp); + if (!IS_NULL_QREG(base)) { + tcg_gen_add_i32(tcg_ctx, tmp, add, base); + if ((int8_t)ext) + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, (int8_t)ext); + } else { + tcg_gen_addi_i32(tcg_ctx, tmp, add, offset + (int8_t)ext); + } + add = tmp; + } + return add; +} + +/* Sign or zero extend a value. */ + +static inline void gen_ext(TCGContext *tcg_ctx, TCGv res, TCGv val, int opsize, int sign) +{ + switch (opsize) { + case OS_BYTE: + if (sign) { + tcg_gen_ext8s_i32(tcg_ctx, res, val); + } else { + tcg_gen_ext8u_i32(tcg_ctx, res, val); + } + break; + case OS_WORD: + if (sign) { + tcg_gen_ext16s_i32(tcg_ctx, res, val); + } else { + tcg_gen_ext16u_i32(tcg_ctx, res, val); + } + break; + case OS_LONG: + tcg_gen_mov_i32(tcg_ctx, res, val); + break; + default: + g_assert_not_reached(); + } +} + +/* Evaluate all the CC flags. */ + +static void gen_flush_flags(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv t0, t1; + + switch (s->cc_op) { + case CC_OP_FLAGS: + return; + + case CC_OP_ADDB: + case CC_OP_ADDW: + case CC_OP_ADDL: + tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); + /* Compute signed overflow for addition. */ + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_sub_i32(tcg_ctx, t0, QREG_CC_N, QREG_CC_V); + gen_ext(tcg_ctx, t0, t0, s->cc_op - CC_OP_ADDB, 1); + tcg_gen_xor_i32(tcg_ctx, t1, QREG_CC_N, QREG_CC_V); + tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_andc_i32(tcg_ctx, QREG_CC_V, t1, QREG_CC_V); + tcg_temp_free(tcg_ctx, t1); + break; + + case CC_OP_SUBB: + case CC_OP_SUBW: + case CC_OP_SUBL: + tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); + /* Compute signed overflow for subtraction. */ + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_add_i32(tcg_ctx, t0, QREG_CC_N, QREG_CC_V); + gen_ext(tcg_ctx, t0, t0, s->cc_op - CC_OP_SUBB, 1); + tcg_gen_xor_i32(tcg_ctx, t1, QREG_CC_N, t0); + tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t1); + tcg_temp_free(tcg_ctx, t1); + break; + + case CC_OP_CMPB: + case CC_OP_CMPW: + case CC_OP_CMPL: + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V); + tcg_gen_sub_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, QREG_CC_V); + gen_ext(tcg_ctx, QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1); + /* Compute signed overflow for subtraction. */ + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, t0, QREG_CC_Z, QREG_CC_N); + tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, QREG_CC_N); + tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, QREG_CC_Z); + break; + + case CC_OP_LOGIC: + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, 0); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); + break; + + case CC_OP_DYNAMIC: + gen_helper_flush_flags(tcg_ctx, tcg_ctx->cpu_env, QREG_CC_OP); + s->cc_op_synced = 1; + break; + + default: + t0 = tcg_const_i32(tcg_ctx, s->cc_op); + gen_helper_flush_flags(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + s->cc_op_synced = 1; + break; + } + + /* Note that flush_flags also assigned to env->cc_op. */ + s->cc_op = CC_OP_FLAGS; +} + +static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + if (opsize == OS_LONG) { + tmp = val; + } else { + tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); + gen_ext(tcg_ctx, tmp, val, opsize, sign); + } + + return tmp; +} + +static void gen_logic_cc(DisasContext *s, TCGv val, int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_ext(tcg_ctx, QREG_CC_N, val, opsize, 1); + set_cc_op(s, CC_OP_LOGIC); +} + +static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, dest); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_V, src); + set_cc_op(s, CC_OP_CMPB + opsize); +} + +static void gen_update_cc_add(TCGContext *tcg_ctx, TCGv dest, TCGv src, int opsize) +{ + gen_ext(tcg_ctx, QREG_CC_N, dest, opsize, 1); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_V, src); +} + +static inline int opsize_bytes(int opsize) +{ + switch (opsize) { + case OS_BYTE: return 1; + case OS_WORD: return 2; + case OS_LONG: return 4; + case OS_SINGLE: return 4; + case OS_DOUBLE: return 8; + case OS_EXTENDED: return 12; + case OS_PACKED: return 12; + default: + // g_assert_not_reached(); + return 0; + } +} + +static inline int insn_opsize(int insn) +{ + switch ((insn >> 6) & 3) { + case 0: return OS_BYTE; + case 1: return OS_WORD; + case 2: return OS_LONG; + default: + // g_assert_not_reached(); + return 0; + } +} + +static inline int ext_opsize(int ext, int pos) +{ + switch ((ext >> pos) & 7) { + case 0: return OS_LONG; + case 1: return OS_SINGLE; + case 2: return OS_EXTENDED; + case 3: return OS_PACKED; + case 4: return OS_WORD; + case 5: return OS_DOUBLE; + case 6: return OS_BYTE; + default: + // g_assert_not_reached(); + return 0; + } +} + +/* + * Assign value to a register. If the width is less than the register width + * only the low part of the register is set. + */ +static void gen_partset_reg(TCGContext *tcg_ctx, int opsize, TCGv reg, TCGv val) +{ + TCGv tmp; + switch (opsize) { + case OS_BYTE: + tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffffff00); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_ext8u_i32(tcg_ctx, tmp, val); + tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); + tcg_temp_free(tcg_ctx, tmp); + break; + case OS_WORD: + tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffff0000); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_ext16u_i32(tcg_ctx, tmp, val); + tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); + tcg_temp_free(tcg_ctx, tmp); + break; + case OS_LONG: + case OS_SINGLE: + tcg_gen_mov_i32(tcg_ctx, reg, val); + break; + default: + g_assert_not_reached(); + } +} + +/* + * Generate code for an "effective address". Does not adjust the base + * register for autoincrement addressing modes. + */ +static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, + int mode, int reg0, int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + uint16_t ext; + uint32_t offset; + + switch (mode) { + case 0: /* Data register direct. */ + case 1: /* Address register direct. */ + return tcg_ctx->NULL_QREG; + case 3: /* Indirect postincrement. */ + if (opsize == OS_UNSIZED) { + return tcg_ctx->NULL_QREG; + } + /* fallthru */ + case 2: /* Indirect register */ + return get_areg(s, reg0); + case 4: /* Indirect predecrememnt. */ + if (opsize == OS_UNSIZED) { + return tcg_ctx->NULL_QREG; + } + reg = get_areg(s, reg0); + tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); + if (reg0 == 7 && opsize == OS_BYTE && + m68k_feature(s->env, M68K_FEATURE_M68000)) { + tcg_gen_subi_i32(tcg_ctx, tmp, reg, 2); + } else { + tcg_gen_subi_i32(tcg_ctx, tmp, reg, opsize_bytes(opsize)); + } + return tmp; + case 5: /* Indirect displacement. */ + reg = get_areg(s, reg0); + tmp = mark_to_release(s, tcg_temp_new(tcg_ctx)); + ext = read_im16(env, s); + tcg_gen_addi_i32(tcg_ctx, tmp, reg, (int16_t)ext); + return tmp; + case 6: /* Indirect index + displacement. */ + reg = get_areg(s, reg0); + return gen_lea_indexed(env, s, reg); + case 7: /* Other */ + switch (reg0) { + case 0: /* Absolute short. */ + offset = (int16_t)read_im16(env, s); + return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); + case 1: /* Absolute long. */ + offset = read_im32(env, s); + return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); + case 2: /* pc displacement */ + offset = s->pc; + offset += (int16_t)read_im16(env, s); + return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); + case 3: /* pc index+displacement. */ + return gen_lea_indexed(env, s, tcg_ctx->NULL_QREG); + case 4: /* Immediate. */ + default: + return tcg_ctx->NULL_QREG; + } + } + /* Should never happen. */ + return tcg_ctx->NULL_QREG; +} + +static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn, + int opsize) +{ + int mode = extract32(insn, 3, 3); + int reg0 = REG(insn, 0); + return gen_lea_mode(env, s, mode, reg0, opsize); +} + +/* + * Generate code to load/store a value from/into an EA. If WHAT > 0 this is + * a write otherwise it is a read (0 == sign extend, -1 == zero extend). + * ADDRP is non-null for readwrite operands. + */ +static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, + int opsize, TCGv val, TCGv *addrp, ea_what what, + int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg, tmp, result; + int32_t offset; + + switch (mode) { + case 0: /* Data register direct. */ + reg = tcg_ctx->cpu_dregs[reg0]; + if (what == EA_STORE) { + gen_partset_reg(tcg_ctx, opsize, reg, val); + return tcg_ctx->store_dummy; + } else { + return gen_extend(s, reg, opsize, what == EA_LOADS); + } + case 1: /* Address register direct. */ + reg = get_areg(s, reg0); + if (what == EA_STORE) { + tcg_gen_mov_i32(tcg_ctx, reg, val); + return tcg_ctx->store_dummy; + } else { + return gen_extend(s, reg, opsize, what == EA_LOADS); + } + case 2: /* Indirect register */ + reg = get_areg(s, reg0); + return gen_ldst(s, opsize, reg, val, what, index); + case 3: /* Indirect postincrement. */ + reg = get_areg(s, reg0); + result = gen_ldst(s, opsize, reg, val, what, index); + if (what == EA_STORE || !addrp) { + TCGv tmp = tcg_temp_new(tcg_ctx); + if (reg0 == 7 && opsize == OS_BYTE && + m68k_feature(s->env, M68K_FEATURE_M68000)) { + tcg_gen_addi_i32(tcg_ctx, tmp, reg, 2); + } else { + tcg_gen_addi_i32(tcg_ctx, tmp, reg, opsize_bytes(opsize)); + } + delay_set_areg(s, reg0, tmp, true); + } + return result; + case 4: /* Indirect predecrememnt. */ + if (addrp && what == EA_STORE) { + tmp = *addrp; + } else { + tmp = gen_lea_mode(env, s, mode, reg0, opsize); + if (IS_NULL_QREG(tmp)) { + return tmp; + } + if (addrp) { + *addrp = tmp; + } + } + result = gen_ldst(s, opsize, tmp, val, what, index); + if (what == EA_STORE || !addrp) { + delay_set_areg(s, reg0, tmp, false); + } + return result; + case 5: /* Indirect displacement. */ + case 6: /* Indirect index + displacement. */ + do_indirect: + if (addrp && what == EA_STORE) { + tmp = *addrp; + } else { + tmp = gen_lea_mode(env, s, mode, reg0, opsize); + if (IS_NULL_QREG(tmp)) { + return tmp; + } + if (addrp) { + *addrp = tmp; + } + } + return gen_ldst(s, opsize, tmp, val, what, index); + case 7: /* Other */ + switch (reg0) { + case 0: /* Absolute short. */ + case 1: /* Absolute long. */ + case 2: /* pc displacement */ + case 3: /* pc index+displacement. */ + goto do_indirect; + case 4: /* Immediate. */ + /* Sign extend values for consistency. */ + switch (opsize) { + case OS_BYTE: + if (what == EA_LOADS) { + offset = (int8_t)read_im8(env, s); + } else { + offset = read_im8(env, s); + } + break; + case OS_WORD: + if (what == EA_LOADS) { + offset = (int16_t)read_im16(env, s); + } else { + offset = read_im16(env, s); + } + break; + case OS_LONG: + offset = read_im32(env, s); + break; + default: + g_assert_not_reached(); + } + return mark_to_release(s, tcg_const_i32(tcg_ctx, offset)); + default: + return tcg_ctx->NULL_QREG; + } + } + /* Should never happen. */ + return tcg_ctx->NULL_QREG; +} + +static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn, + int opsize, TCGv val, TCGv *addrp, ea_what what, int index) +{ + int mode = extract32(insn, 3, 3); + int reg0 = REG(insn, 0); + return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index); +} + +static TCGv_ptr gen_fp_ptr(TCGContext *tcg_ctx, int freg) +{ + TCGv_ptr fp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, fp, tcg_ctx->cpu_env, offsetof(CPUM68KState, fregs[freg])); + return fp; +} + +static TCGv_ptr gen_fp_result_ptr(TCGContext *tcg_ctx) +{ + TCGv_ptr fp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, fp, tcg_ctx->cpu_env, offsetof(CPUM68KState, fp_result)); + return fp; +} + +static void gen_fp_move(TCGContext *tcg_ctx, TCGv_ptr dest, TCGv_ptr src) +{ + TCGv t32; + TCGv_i64 t64; + + t32 = tcg_temp_new(tcg_ctx); + tcg_gen_ld16u_i32(tcg_ctx, t32, src, offsetof(FPReg, l.upper)); + tcg_gen_st16_i32(tcg_ctx, t32, dest, offsetof(FPReg, l.upper)); + tcg_temp_free(tcg_ctx, t32); + + t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, t64, src, offsetof(FPReg, l.lower)); + tcg_gen_st_i64(tcg_ctx, t64, dest, offsetof(FPReg, l.lower)); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, + int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + TCGv_i64 t64; + + t64 = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + switch (opsize) { + case OS_BYTE: + tcg_gen_qemu_ld8s(tcg_ctx, tmp, addr, index); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + break; + case OS_WORD: + tcg_gen_qemu_ld16s(tcg_ctx, tmp, addr, index); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + break; + case OS_LONG: + tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + break; + case OS_SINGLE: + tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); + gen_helper_extf32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + break; + case OS_DOUBLE: + tcg_gen_qemu_ld64(tcg_ctx, t64, addr, index); + gen_helper_extf64(tcg_ctx, tcg_ctx->cpu_env, fp, t64); + break; + case OS_EXTENDED: + if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { + gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); + break; + } + tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + tcg_gen_st16_i32(tcg_ctx, tmp, fp, offsetof(FPReg, l.upper)); + tcg_gen_addi_i32(tcg_ctx, tmp, addr, 4); + tcg_gen_qemu_ld64(tcg_ctx, t64, tmp, index); + tcg_gen_st_i64(tcg_ctx, t64, fp, offsetof(FPReg, l.lower)); + break; + case OS_PACKED: + /* + * unimplemented data type on 68040/ColdFire + * FIXME if needed for another FPU + */ + gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, + int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + TCGv_i64 t64; + + t64 = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + switch (opsize) { + case OS_BYTE: + gen_helper_reds32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); + tcg_gen_qemu_st8(tcg_ctx, tmp, addr, index); + break; + case OS_WORD: + gen_helper_reds32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); + tcg_gen_qemu_st16(tcg_ctx, tmp, addr, index); + break; + case OS_LONG: + gen_helper_reds32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); + tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); + break; + case OS_SINGLE: + gen_helper_redf32(tcg_ctx, tmp, tcg_ctx->cpu_env, fp); + tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); + break; + case OS_DOUBLE: + gen_helper_redf64(tcg_ctx, t64, tcg_ctx->cpu_env, fp); + tcg_gen_qemu_st64(tcg_ctx, t64, addr, index); + break; + case OS_EXTENDED: + if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { + gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); + break; + } + tcg_gen_ld16u_i32(tcg_ctx, tmp, fp, offsetof(FPReg, l.upper)); + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 16); + tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); + tcg_gen_addi_i32(tcg_ctx, tmp, addr, 4); + tcg_gen_ld_i64(tcg_ctx, t64, fp, offsetof(FPReg, l.lower)); + tcg_gen_qemu_st64(tcg_ctx, t64, tmp, index); + break; + case OS_PACKED: + /* + * unimplemented data type on 68040/ColdFire + * FIXME if needed for another FPU + */ + gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr, + TCGv_ptr fp, ea_what what, int index) +{ + if (what == EA_STORE) { + gen_store_fp(s, opsize, addr, fp, index); + } else { + gen_load_fp(s, opsize, addr, fp, index); + } +} + +static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, + int reg0, int opsize, TCGv_ptr fp, ea_what what, + int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg, addr, tmp; + TCGv_i64 t64; + + switch (mode) { + case 0: /* Data register direct. */ + reg = tcg_ctx->cpu_dregs[reg0]; + if (what == EA_STORE) { + switch (opsize) { + case OS_BYTE: + case OS_WORD: + case OS_LONG: + gen_helper_reds32(tcg_ctx, reg, tcg_ctx->cpu_env, fp); + break; + case OS_SINGLE: + gen_helper_redf32(tcg_ctx, reg, tcg_ctx->cpu_env, fp); + break; + default: + g_assert_not_reached(); + } + } else { + tmp = tcg_temp_new(tcg_ctx); + switch (opsize) { + case OS_BYTE: + tcg_gen_ext8s_i32(tcg_ctx, tmp, reg); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + break; + case OS_WORD: + tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + break; + case OS_LONG: + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, reg); + break; + case OS_SINGLE: + gen_helper_extf32(tcg_ctx, tcg_ctx->cpu_env, fp, reg); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free(tcg_ctx, tmp); + } + return 0; + case 1: /* Address register direct. */ + return -1; + case 2: /* Indirect register */ + addr = get_areg(s, reg0); + gen_ldst_fp(s, opsize, addr, fp, what, index); + return 0; + case 3: /* Indirect postincrement. */ + addr = tcg_ctx->cpu_aregs[reg0]; + gen_ldst_fp(s, opsize, addr, fp, what, index); + tcg_gen_addi_i32(tcg_ctx, addr, addr, opsize_bytes(opsize)); + return 0; + case 4: /* Indirect predecrememnt. */ + addr = gen_lea_mode(env, s, mode, reg0, opsize); + if (IS_NULL_QREG(addr)) { + return -1; + } + gen_ldst_fp(s, opsize, addr, fp, what, index); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[reg0], addr); + return 0; + case 5: /* Indirect displacement. */ + case 6: /* Indirect index + displacement. */ + do_indirect: + addr = gen_lea_mode(env, s, mode, reg0, opsize); + if (IS_NULL_QREG(addr)) { + return -1; + } + gen_ldst_fp(s, opsize, addr, fp, what, index); + return 0; + case 7: /* Other */ + switch (reg0) { + case 0: /* Absolute short. */ + case 1: /* Absolute long. */ + case 2: /* pc displacement */ + case 3: /* pc index+displacement. */ + goto do_indirect; + case 4: /* Immediate. */ + if (what == EA_STORE) { + return -1; + } + switch (opsize) { + case OS_BYTE: + tmp = tcg_const_i32(tcg_ctx, (int8_t)read_im8(env, s)); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + tcg_temp_free(tcg_ctx, tmp); + break; + case OS_WORD: + tmp = tcg_const_i32(tcg_ctx, (int16_t)read_im16(env, s)); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + tcg_temp_free(tcg_ctx, tmp); + break; + case OS_LONG: + tmp = tcg_const_i32(tcg_ctx, read_im32(env, s)); + gen_helper_exts32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + tcg_temp_free(tcg_ctx, tmp); + break; + case OS_SINGLE: + tmp = tcg_const_i32(tcg_ctx, read_im32(env, s)); + gen_helper_extf32(tcg_ctx, tcg_ctx->cpu_env, fp, tmp); + tcg_temp_free(tcg_ctx, tmp); + break; + case OS_DOUBLE: + t64 = tcg_const_i64(tcg_ctx, read_im64(env, s)); + gen_helper_extf64(tcg_ctx, tcg_ctx->cpu_env, fp, t64); + tcg_temp_free_i64(tcg_ctx, t64); + break; + case OS_EXTENDED: + if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { + gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); + break; + } + tmp = tcg_const_i32(tcg_ctx, read_im32(env, s) >> 16); + tcg_gen_st16_i32(tcg_ctx, tmp, fp, offsetof(FPReg, l.upper)); + tcg_temp_free(tcg_ctx, tmp); + t64 = tcg_const_i64(tcg_ctx, read_im64(env, s)); + tcg_gen_st_i64(tcg_ctx, t64, fp, offsetof(FPReg, l.lower)); + tcg_temp_free_i64(tcg_ctx, t64); + break; + case OS_PACKED: + /* + * unimplemented data type on 68040/ColdFire + * FIXME if needed for another FPU + */ + gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP); + break; + default: + g_assert_not_reached(); + } + return 0; + default: + return -1; + } + } + return -1; +} + +static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn, + int opsize, TCGv_ptr fp, ea_what what, int index) +{ + int mode = extract32(insn, 3, 3); + int reg0 = REG(insn, 0); + return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index); +} + +typedef struct { + TCGCond tcond; + bool g1; + bool g2; + TCGv v1; + TCGv v2; +} DisasCompare; + +static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp, tmp2; + TCGCond tcond; + CCOp op = s->cc_op; + + /* The CC_OP_CMP form can handle most normal comparisons directly. */ + if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) { + c->g1 = c->g2 = 1; + c->v1 = QREG_CC_N; + c->v2 = QREG_CC_V; + switch (cond) { + case 2: /* HI */ + case 3: /* LS */ + tcond = TCG_COND_LEU; + goto done; + case 4: /* CC */ + case 5: /* CS */ + tcond = TCG_COND_LTU; + goto done; + case 6: /* NE */ + case 7: /* EQ */ + tcond = TCG_COND_EQ; + goto done; + case 10: /* PL */ + case 11: /* MI */ + c->g1 = c->g2 = 0; + c->v2 = tcg_const_i32(tcg_ctx, 0); + c->v1 = tmp = tcg_temp_new(tcg_ctx); + tcg_gen_sub_i32(tcg_ctx, tmp, QREG_CC_N, QREG_CC_V); + gen_ext(tcg_ctx, tmp, tmp, op - CC_OP_CMPB, 1); + /* fallthru */ + case 12: /* GE */ + case 13: /* LT */ + tcond = TCG_COND_LT; + goto done; + case 14: /* GT */ + case 15: /* LE */ + tcond = TCG_COND_LE; + goto done; + } + } + + c->g1 = 1; + c->g2 = 0; + c->v2 = tcg_const_i32(tcg_ctx, 0); + + switch (cond) { + case 0: /* T */ + case 1: /* F */ + c->v1 = c->v2; + tcond = TCG_COND_NEVER; + goto done; + case 14: /* GT (!(Z || (N ^ V))) */ + case 15: /* LE (Z || (N ^ V)) */ + /* + * Logic operations clear V, which simplifies LE to (Z || N), + * and since Z and N are co-located, this becomes a normal + * comparison vs N. + */ + if (op == CC_OP_LOGIC) { + c->v1 = QREG_CC_N; + tcond = TCG_COND_LE; + goto done; + } + break; + case 12: /* GE (!(N ^ V)) */ + case 13: /* LT (N ^ V) */ + /* Logic operations clear V, which simplifies this to N. */ + if (op != CC_OP_LOGIC) { + break; + } + /* fallthru */ + case 10: /* PL (!N) */ + case 11: /* MI (N) */ + /* Several cases represent N normally. */ + if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || + op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL || + op == CC_OP_LOGIC) { + c->v1 = QREG_CC_N; + tcond = TCG_COND_LT; + goto done; + } + break; + case 6: /* NE (!Z) */ + case 7: /* EQ (Z) */ + /* Some cases fold Z into N. */ + if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || + op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL || + op == CC_OP_LOGIC) { + tcond = TCG_COND_EQ; + c->v1 = QREG_CC_N; + goto done; + } + break; + case 4: /* CC (!C) */ + case 5: /* CS (C) */ + /* Some cases fold C into X. */ + if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || + op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) { + tcond = TCG_COND_NE; + c->v1 = QREG_CC_X; + goto done; + } + /* fallthru */ + case 8: /* VC (!V) */ + case 9: /* VS (V) */ + /* Logic operations clear V and C. */ + if (op == CC_OP_LOGIC) { + tcond = TCG_COND_NEVER; + c->v1 = c->v2; + goto done; + } + break; + } + + /* Otherwise, flush flag state to CC_OP_FLAGS. */ + gen_flush_flags(s); + + switch (cond) { + case 0: /* T */ + case 1: /* F */ + default: + /* Invalid, or handled above. */ + abort(); + case 2: /* HI (!C && !Z) -> !(C || Z)*/ + case 3: /* LS (C || Z) */ + c->v1 = tmp = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, QREG_CC_C); + tcond = TCG_COND_NE; + break; + case 4: /* CC (!C) */ + case 5: /* CS (C) */ + c->v1 = QREG_CC_C; + tcond = TCG_COND_NE; + break; + case 6: /* NE (!Z) */ + case 7: /* EQ (Z) */ + c->v1 = QREG_CC_Z; + tcond = TCG_COND_EQ; + break; + case 8: /* VC (!V) */ + case 9: /* VS (V) */ + c->v1 = QREG_CC_V; + tcond = TCG_COND_LT; + break; + case 10: /* PL (!N) */ + case 11: /* MI (N) */ + c->v1 = QREG_CC_N; + tcond = TCG_COND_LT; + break; + case 12: /* GE (!(N ^ V)) */ + case 13: /* LT (N ^ V) */ + c->v1 = tmp = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_xor_i32(tcg_ctx, tmp, QREG_CC_N, QREG_CC_V); + tcond = TCG_COND_LT; + break; + case 14: /* GT (!(Z || (N ^ V))) */ + case 15: /* LE (Z || (N ^ V)) */ + c->v1 = tmp = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); + tcg_gen_neg_i32(tcg_ctx, tmp, tmp); + tmp2 = tcg_temp_new(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp2, QREG_CC_N, QREG_CC_V); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free(tcg_ctx, tmp2); + tcond = TCG_COND_LT; + break; + } + + done: + if ((cond & 1) == 0) { + tcond = tcg_invert_cond(tcond); + } + c->tcond = tcond; +} + +static void free_cond(TCGContext *tcg_ctx, DisasCompare *c) +{ + if (!c->g1) { + tcg_temp_free(tcg_ctx, c->v1); + } + if (!c->g2) { + tcg_temp_free(tcg_ctx, c->v2); + } +} + +static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + DisasCompare c; + + gen_cc_cond(&c, s, cond); + update_cc_op(s); + tcg_gen_brcond_i32(tcg_ctx, c.tcond, c.v1, c.v2, l1); + free_cond(tcg_ctx, &c); +} + +/* Force a TB lookup after an instruction that changes the CPU state. */ +static void gen_exit_tb(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + update_cc_op(s); + tcg_gen_movi_i32(tcg_ctx, QREG_PC, s->pc); + s->base.is_jmp = DISAS_EXIT; +} + +#define SRC_EA(env, result, opsize, op_sign, addrp) do { \ + result = gen_ea(env, s, insn, opsize, tcg_ctx->NULL_QREG, addrp, \ + op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \ + if (IS_NULL_QREG(result)) { \ + gen_addr_fault(s); \ + return; \ + } \ + } while (0) + +#define DEST_EA(env, insn, opsize, val, addrp) do { \ + TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \ + EA_STORE, IS_USER(s)); \ + if (IS_NULL_QREG(ea_result)) { \ + gen_addr_fault(s); \ + return; \ + } \ + } while (0) + +static inline bool use_goto_tb(DisasContext *s, uint32_t dest) +{ + return (s->base.pc_first & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) + || (s->base.pc_next & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +} + +/* Generate a jump to an immediate address. */ +static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (unlikely(s->base.singlestep_enabled)) { + gen_exception(s, dest, EXCP_DEBUG); + } else if (use_goto_tb(s, dest)) { + tcg_gen_goto_tb(tcg_ctx, n); + tcg_gen_movi_i32(tcg_ctx, QREG_PC, dest); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, n); + } else { + gen_jmp_im(s, dest); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } + s->base.is_jmp = DISAS_NORETURN; +} + +DISAS_INSN(scc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + DisasCompare c; + int cond; + TCGv tmp; + + cond = (insn >> 8) & 0xf; + gen_cc_cond(&c, s, cond); + + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_setcond_i32(tcg_ctx, c.tcond, tmp, c.v1, c.v2); + free_cond(tcg_ctx, &c); + + tcg_gen_neg_i32(tcg_ctx, tmp, tmp); + DEST_EA(env, insn, OS_BYTE, tmp, NULL); + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(dbcc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *l1; + TCGv reg; + TCGv tmp; + int16_t offset; + uint32_t base; + + reg = DREG(insn, 0); + base = s->pc; + offset = (int16_t)read_im16(env, s); + l1 = gen_new_label(tcg_ctx); + gen_jmpcc(s, (insn >> 8) & 0xf, l1); + + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, -1); + gen_partset_reg(tcg_ctx, OS_WORD, reg, tmp); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, -1, l1); + gen_jmp_tb(s, 1, base + offset); + gen_set_label(tcg_ctx, l1); + gen_jmp_tb(s, 0, s->pc); +} + +DISAS_INSN(undef_mac) +{ + gen_exception(s, s->base.pc_next, EXCP_LINEA); +} + +DISAS_INSN(undef_fpu) +{ + gen_exception(s, s->base.pc_next, EXCP_LINEF); +} + +DISAS_INSN(undef) +{ + /* + * ??? This is both instructions that are as yet unimplemented + * for the 680x0 series, as well as those that are implemented + * but actually illegal for CPU32 or pre-68020. + */ + //qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n", + // insn, s->base.pc_next); + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); +} + +DISAS_INSN(mulw) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + TCGv src; + int sign; + + sign = (insn & 0x100) != 0; + reg = DREG(insn, 9); + tmp = tcg_temp_new(tcg_ctx); + if (sign) + tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); + else + tcg_gen_ext16u_i32(tcg_ctx, tmp, reg); + SRC_EA(env, src, OS_WORD, sign, NULL); + tcg_gen_mul_i32(tcg_ctx, tmp, tmp, src); + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + gen_logic_cc(s, tmp, OS_LONG); + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(divw) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int sign; + TCGv src; + TCGv destr; + + /* divX.w ,Dn 32/16 -> 16r:16q */ + + sign = (insn & 0x100) != 0; + + /* dest.l / src.w */ + + SRC_EA(env, src, OS_WORD, sign, NULL); + destr = tcg_const_i32(tcg_ctx, REG(insn, 9)); + if (sign) { + gen_helper_divsw(tcg_ctx, tcg_ctx->cpu_env, destr, src); + } else { + gen_helper_divuw(tcg_ctx, tcg_ctx->cpu_env, destr, src); + } + tcg_temp_free(tcg_ctx, destr); + + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(divl) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv num, reg, den; + int sign; + uint16_t ext; + + ext = read_im16(env, s); + + sign = (ext & 0x0800) != 0; + + if (ext & 0x400) { + if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) { + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + return; + } + + /* divX.l , Dr:Dq 64/32 -> 32r:32q */ + + SRC_EA(env, den, OS_LONG, 0, NULL); + num = tcg_const_i32(tcg_ctx, REG(ext, 12)); + reg = tcg_const_i32(tcg_ctx, REG(ext, 0)); + if (sign) { + gen_helper_divsll(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); + } else { + gen_helper_divull(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); + } + tcg_temp_free(tcg_ctx, reg); + tcg_temp_free(tcg_ctx, num); + set_cc_op(s, CC_OP_FLAGS); + return; + } + + /* divX.l , Dq 32/32 -> 32q */ + /* divXl.l , Dr:Dq 32/32 -> 32r:32q */ + + SRC_EA(env, den, OS_LONG, 0, NULL); + num = tcg_const_i32(tcg_ctx, REG(ext, 12)); + reg = tcg_const_i32(tcg_ctx, REG(ext, 0)); + if (sign) { + gen_helper_divsl(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); + } else { + gen_helper_divul(tcg_ctx, tcg_ctx->cpu_env, num, reg, den); + } + tcg_temp_free(tcg_ctx, reg); + tcg_temp_free(tcg_ctx, num); + + set_cc_op(s, CC_OP_FLAGS); +} + +static void bcd_add(TCGContext *tcg_ctx, TCGv dest, TCGv src) +{ + TCGv t0, t1; + + /* + * dest10 = dest10 + src10 + X + * + * t1 = src + * t2 = t1 + 0x066 + * t3 = t2 + dest + X + * t4 = t2 ^ dest + * t5 = t3 ^ t4 + * t6 = ~t5 & 0x110 + * t7 = (t6 >> 2) | (t6 >> 3) + * return t3 - t7 + */ + + /* + * t1 = (src + 0x066) + dest + X + * = result with some possible exceding 0x6 + */ + + t0 = tcg_const_i32(tcg_ctx, 0x066); + tcg_gen_add_i32(tcg_ctx, t0, t0, src); + + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_add_i32(tcg_ctx, t1, t0, dest); + tcg_gen_add_i32(tcg_ctx, t1, t1, QREG_CC_X); + + /* we will remove exceding 0x6 where there is no carry */ + + /* + * t0 = (src + 0x0066) ^ dest + * = t1 without carries + */ + + tcg_gen_xor_i32(tcg_ctx, t0, t0, dest); + + /* + * extract the carries + * t0 = t0 ^ t1 + * = only the carries + */ + + tcg_gen_xor_i32(tcg_ctx, t0, t0, t1); + + /* + * generate 0x1 where there is no carry + * and for each 0x10, generate a 0x6 + */ + + tcg_gen_shri_i32(tcg_ctx, t0, t0, 3); + tcg_gen_not_i32(tcg_ctx, t0, t0); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 0x22); + tcg_gen_add_i32(tcg_ctx, dest, t0, t0); + tcg_gen_add_i32(tcg_ctx, dest, dest, t0); + tcg_temp_free(tcg_ctx, t0); + + /* + * remove the exceding 0x6 + * for digits that have not generated a carry + */ + + tcg_gen_sub_i32(tcg_ctx, dest, t1, dest); + tcg_temp_free(tcg_ctx, t1); +} + +static void bcd_sub(TCGContext *tcg_ctx, TCGv dest, TCGv src) +{ + TCGv t0, t1, t2; + + /* + * dest10 = dest10 - src10 - X + * = bcd_add(tcg_ctx, dest + 1 - X, 0x199 - src) + */ + + /* t0 = 0x066 + (0x199 - src) */ + + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_subfi_i32(tcg_ctx, t0, 0x1ff, src); + + /* t1 = t0 + dest + 1 - X*/ + + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_add_i32(tcg_ctx, t1, t0, dest); + tcg_gen_addi_i32(tcg_ctx, t1, t1, 1); + tcg_gen_sub_i32(tcg_ctx, t1, t1, QREG_CC_X); + + /* t2 = t0 ^ dest */ + + t2 = tcg_temp_new(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, t2, t0, dest); + + /* t0 = t1 ^ t2 */ + + tcg_gen_xor_i32(tcg_ctx, t0, t1, t2); + + /* + * t2 = ~t0 & 0x110 + * t0 = (t2 >> 2) | (t2 >> 3) + * + * to fit on 8bit operands, changed in: + * + * t2 = ~(t0 >> 3) & 0x22 + * t0 = t2 + t2 + * t0 = t0 + t2 + */ + + tcg_gen_shri_i32(tcg_ctx, t2, t0, 3); + tcg_gen_not_i32(tcg_ctx, t2, t2); + tcg_gen_andi_i32(tcg_ctx, t2, t2, 0x22); + tcg_gen_add_i32(tcg_ctx, t0, t2, t2); + tcg_gen_add_i32(tcg_ctx, t0, t0, t2); + tcg_temp_free(tcg_ctx, t2); + + /* return t1 - t0 */ + + tcg_gen_sub_i32(tcg_ctx, dest, t1, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void bcd_flags(TCGContext *tcg_ctx, TCGv val) +{ + tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, val, 0x0ff); + tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_C); + + tcg_gen_extract_i32(tcg_ctx, QREG_CC_C, val, 8, 1); + + tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, QREG_CC_C); +} + +DISAS_INSN(abcd_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv dest; + + gen_flush_flags(s); /* !Z is sticky */ + + src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); + dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0); + bcd_add(tcg_ctx, dest, src); + gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 9), dest); + + bcd_flags(tcg_ctx, dest); +} + +DISAS_INSN(abcd_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src, dest, addr; + + gen_flush_flags(s); /* !Z is sticky */ + + /* Indirect pre-decrement load (mode 4) */ + + src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE, + tcg_ctx->NULL_QREG, NULL, EA_LOADU, IS_USER(s)); + dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, + tcg_ctx->NULL_QREG, &addr, EA_LOADU, IS_USER(s)); + + bcd_add(tcg_ctx, dest, src); + + gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, + EA_STORE, IS_USER(s)); + + bcd_flags(tcg_ctx, dest); +} + +DISAS_INSN(sbcd_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src, dest; + + gen_flush_flags(s); /* !Z is sticky */ + + src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); + dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0); + + bcd_sub(tcg_ctx, dest, src); + + gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 9), dest); + + bcd_flags(tcg_ctx, dest); +} + +DISAS_INSN(sbcd_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src, dest, addr; + + gen_flush_flags(s); /* !Z is sticky */ + + /* Indirect pre-decrement load (mode 4) */ + + src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE, + tcg_ctx->NULL_QREG, NULL, EA_LOADU, IS_USER(s)); + dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, + tcg_ctx->NULL_QREG, &addr, EA_LOADU, IS_USER(s)); + + bcd_sub(tcg_ctx, dest, src); + + gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, + EA_STORE, IS_USER(s)); + + bcd_flags(tcg_ctx, dest); +} + +DISAS_INSN(nbcd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src, dest; + TCGv addr; + + gen_flush_flags(s); /* !Z is sticky */ + + SRC_EA(env, src, OS_BYTE, 0, &addr); + + dest = tcg_const_i32(tcg_ctx, 0); + bcd_sub(tcg_ctx, dest, src); + + DEST_EA(env, insn, OS_BYTE, dest, &addr); + + bcd_flags(tcg_ctx, dest); + + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(addsub) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv dest; + TCGv src; + TCGv tmp; + TCGv addr; + int add; + int opsize; + + add = (insn & 0x4000) != 0; + opsize = insn_opsize(insn); + reg = gen_extend(s, DREG(insn, 9), opsize, 1); + dest = tcg_temp_new(tcg_ctx); + if (insn & 0x100) { + SRC_EA(env, tmp, opsize, 1, &addr); + src = reg; + } else { + tmp = reg; + SRC_EA(env, src, opsize, 1, NULL); + } + if (add) { + tcg_gen_add_i32(tcg_ctx, dest, tmp, src); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, src); + set_cc_op(s, CC_OP_ADDB + opsize); + } else { + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, tmp, src); + tcg_gen_sub_i32(tcg_ctx, dest, tmp, src); + set_cc_op(s, CC_OP_SUBB + opsize); + } + gen_update_cc_add(tcg_ctx, dest, src, opsize); + if (insn & 0x100) { + DEST_EA(env, insn, opsize, dest, &addr); + } else { + gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), dest); + } + tcg_temp_free(tcg_ctx, dest); +} + +/* Reverse the order of the bits in REG. */ +DISAS_INSN(bitrev) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = DREG(insn, 0); + gen_helper_bitrev(tcg_ctx, reg, reg); +} + +DISAS_INSN(bitop_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + int op; + TCGv src1; + TCGv src2; + TCGv tmp; + TCGv addr; + TCGv dest; + + if ((insn & 0x38) != 0) + opsize = OS_BYTE; + else + opsize = OS_LONG; + op = (insn >> 6) & 3; + SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); + + gen_flush_flags(s); + src2 = tcg_temp_new(tcg_ctx); + if (opsize == OS_BYTE) + tcg_gen_andi_i32(tcg_ctx, src2, DREG(insn, 9), 7); + else + tcg_gen_andi_i32(tcg_ctx, src2, DREG(insn, 9), 31); + + tmp = tcg_const_i32(tcg_ctx, 1); + tcg_gen_shl_i32(tcg_ctx, tmp, tmp, src2); + tcg_temp_free(tcg_ctx, src2); + + tcg_gen_and_i32(tcg_ctx, QREG_CC_Z, src1, tmp); + + dest = tcg_temp_new(tcg_ctx); + switch (op) { + case 1: /* bchg */ + tcg_gen_xor_i32(tcg_ctx, dest, src1, tmp); + break; + case 2: /* bclr */ + tcg_gen_andc_i32(tcg_ctx, dest, src1, tmp); + break; + case 3: /* bset */ + tcg_gen_or_i32(tcg_ctx, dest, src1, tmp); + break; + default: /* btst */ + break; + } + tcg_temp_free(tcg_ctx, tmp); + if (op) { + DEST_EA(env, insn, opsize, dest, &addr); + } + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(sats) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = DREG(insn, 0); + gen_flush_flags(s); + gen_helper_sats(tcg_ctx, reg, reg, QREG_CC_V); + gen_logic_cc(s, reg, OS_LONG); +} + +static void gen_push(DisasContext *s, TCGv val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); + gen_store(s, OS_LONG, tmp, val, IS_USER(s)); + tcg_gen_mov_i32(tcg_ctx, QREG_SP, tmp); + tcg_temp_free(tcg_ctx, tmp); +} + +static TCGv mreg(TCGContext *tcg_ctx, int reg) +{ + if (reg < 8) { + /* Dx */ + return tcg_ctx->cpu_dregs[reg]; + } + /* Ax */ + return tcg_ctx->cpu_aregs[reg & 7]; +} + +DISAS_INSN(movem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr, incr, tmp, r[16]; + int is_load = (insn & 0x0400) != 0; + int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD; + uint16_t mask = read_im16(env, s); + int mode = extract32(insn, 3, 3); + int reg0 = REG(insn, 0); + int i; + + tmp = tcg_ctx->cpu_aregs[reg0]; + + switch (mode) { + case 0: /* data register direct */ + case 1: /* addr register direct */ + do_addr_fault: + gen_addr_fault(s); + return; + + case 2: /* indirect */ + break; + + case 3: /* indirect post-increment */ + if (!is_load) { + /* post-increment is not allowed */ + goto do_addr_fault; + } + break; + + case 4: /* indirect pre-decrement */ + if (is_load) { + /* pre-decrement is not allowed */ + goto do_addr_fault; + } + /* + * We want a bare copy of the address reg, without any pre-decrement + * adjustment, as gen_lea would provide. + */ + break; + + default: + tmp = gen_lea_mode(env, s, mode, reg0, opsize); + if (IS_NULL_QREG(tmp)) { + goto do_addr_fault; + } + break; + } + + addr = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, addr, tmp); + incr = tcg_const_i32(tcg_ctx, opsize_bytes(opsize)); + + if (is_load) { + /* memory to register */ + for (i = 0; i < 16; i++) { + if (mask & (1 << i)) { + r[i] = gen_load(s, opsize, addr, 1, IS_USER(s)); + tcg_gen_add_i32(tcg_ctx, addr, addr, incr); + } + } + for (i = 0; i < 16; i++) { + if (mask & (1 << i)) { + tcg_gen_mov_i32(tcg_ctx, mreg(tcg_ctx, i), r[i]); + tcg_temp_free(tcg_ctx, r[i]); + } + } + if (mode == 3) { + /* post-increment: movem (An)+,X */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[reg0], addr); + } + } else { + /* register to memory */ + if (mode == 4) { + /* pre-decrement: movem X,-(An) */ + for (i = 15; i >= 0; i--) { + if ((mask << i) & 0x8000) { + tcg_gen_sub_i32(tcg_ctx, addr, addr, incr); + if (reg0 + 8 == i && + m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) { + /* + * M68020+: if the addressing register is the + * register moved to memory, the value written + * is the initial value decremented by the size of + * the operation, regardless of how many actual + * stores have been performed until this point. + * M68000/M68010: the value is the initial value. + */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_sub_i32(tcg_ctx, tmp, tcg_ctx->cpu_aregs[reg0], incr); + gen_store(s, opsize, addr, tmp, IS_USER(s)); + tcg_temp_free(tcg_ctx, tmp); + } else { + gen_store(s, opsize, addr, mreg(tcg_ctx, i), IS_USER(s)); + } + } + } + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_aregs[reg0], addr); + } else { + for (i = 0; i < 16; i++) { + if (mask & (1 << i)) { + gen_store(s, opsize, addr, mreg(tcg_ctx, i), IS_USER(s)); + tcg_gen_add_i32(tcg_ctx, addr, addr, incr); + } + } + } + } + + tcg_temp_free(tcg_ctx, incr); + tcg_temp_free(tcg_ctx, addr); +} + +DISAS_INSN(movep) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint8_t i; + int16_t displ; + TCGv reg; + TCGv addr; + TCGv abuf; + TCGv dbuf; + + displ = read_im16(env, s); + + addr = AREG(insn, 0); + reg = DREG(insn, 9); + + abuf = tcg_temp_new(tcg_ctx); + tcg_gen_addi_i32(tcg_ctx, abuf, addr, displ); + dbuf = tcg_temp_new(tcg_ctx); + + if (insn & 0x40) { + i = 4; + } else { + i = 2; + } + + if (insn & 0x80) { + for ( ; i > 0 ; i--) { + tcg_gen_shri_i32(tcg_ctx, dbuf, reg, (i - 1) * 8); + tcg_gen_qemu_st8(tcg_ctx, dbuf, abuf, IS_USER(s)); + if (i > 1) { + tcg_gen_addi_i32(tcg_ctx, abuf, abuf, 2); + } + } + } else { + for ( ; i > 0 ; i--) { + tcg_gen_qemu_ld8u(tcg_ctx, dbuf, abuf, IS_USER(s)); + tcg_gen_deposit_i32(tcg_ctx, reg, reg, dbuf, (i - 1) * 8, 8); + if (i > 1) { + tcg_gen_addi_i32(tcg_ctx, abuf, abuf, 2); + } + } + } + tcg_temp_free(tcg_ctx, abuf); + tcg_temp_free(tcg_ctx, dbuf); +} + +DISAS_INSN(bitop_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + int op; + TCGv src1; + uint32_t mask; + int bitnum; + TCGv tmp; + TCGv addr; + + if ((insn & 0x38) != 0) + opsize = OS_BYTE; + else + opsize = OS_LONG; + op = (insn >> 6) & 3; + + bitnum = read_im16(env, s); + if (m68k_feature(s->env, M68K_FEATURE_M68000)) { + if (bitnum & 0xfe00) { + disas_undef(env, s, insn); + return; + } + } else { + if (bitnum & 0xff00) { + disas_undef(env, s, insn); + return; + } + } + + SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); + + gen_flush_flags(s); + if (opsize == OS_BYTE) + bitnum &= 7; + else + bitnum &= 31; + mask = 1 << bitnum; + + tcg_gen_andi_i32(tcg_ctx, QREG_CC_Z, src1, mask); + + if (op) { + tmp = tcg_temp_new(tcg_ctx); + switch (op) { + case 1: /* bchg */ + tcg_gen_xori_i32(tcg_ctx, tmp, src1, mask); + break; + case 2: /* bclr */ + tcg_gen_andi_i32(tcg_ctx, tmp, src1, ~mask); + break; + case 3: /* bset */ + tcg_gen_ori_i32(tcg_ctx, tmp, src1, mask); + break; + default: /* btst */ + break; + } + DEST_EA(env, insn, opsize, tmp, &addr); + tcg_temp_free(tcg_ctx, tmp); + } +} + +static TCGv gen_get_ccr(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv dest; + + update_cc_op(s); + dest = tcg_temp_new(tcg_ctx); + gen_helper_get_ccr(tcg_ctx, dest, tcg_ctx->cpu_env); + return dest; +} + +static TCGv gen_get_sr(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv ccr; + TCGv sr; + + ccr = gen_get_ccr(s); + sr = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, sr, QREG_SR, 0xffe0); + tcg_gen_or_i32(tcg_ctx, sr, sr, ccr); + tcg_temp_free(tcg_ctx, ccr); + return sr; +} + +static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (ccr_only) { + tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, val & CCF_C ? 1 : 0); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, val & CCF_V ? -1 : 0); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_Z, val & CCF_Z ? 0 : 1); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_N, val & CCF_N ? -1 : 0); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_X, val & CCF_X ? 1 : 0); + } else { + TCGv sr = tcg_const_i32(tcg_ctx, val); + gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, sr); + tcg_temp_free(tcg_ctx, sr); + } + set_cc_op(s, CC_OP_FLAGS); +} + +static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (ccr_only) { + gen_helper_set_ccr(tcg_ctx, tcg_ctx->cpu_env, val); + } else { + gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, val); + } + set_cc_op(s, CC_OP_FLAGS); +} + +static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn, + bool ccr_only) +{ + if ((insn & 0x3f) == 0x3c) { + uint16_t val; + val = read_im16(env, s); + gen_set_sr_im(s, val, ccr_only); + } else { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + SRC_EA(env, src, OS_WORD, 0, NULL); + gen_set_sr(s, src, ccr_only); + } +} + +DISAS_INSN(arith_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op; + TCGv im; + TCGv src1; + TCGv dest; + TCGv addr; + int opsize; + bool with_SR = ((insn & 0x3f) == 0x3c); + + op = (insn >> 9) & 7; + opsize = insn_opsize(insn); + switch (opsize) { + case OS_BYTE: + im = tcg_const_i32(tcg_ctx, (int8_t)read_im8(env, s)); + break; + case OS_WORD: + im = tcg_const_i32(tcg_ctx, (int16_t)read_im16(env, s)); + break; + case OS_LONG: + im = tcg_const_i32(tcg_ctx, read_im32(env, s)); + break; + default: + g_assert_not_reached(); + } + + if (with_SR) { + /* SR/CCR can only be used with andi/eori/ori */ + if (op == 2 || op == 3 || op == 6) { + disas_undef(env, s, insn); + return; + } + switch (opsize) { + case OS_BYTE: + src1 = gen_get_ccr(s); + break; + case OS_WORD: + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + src1 = gen_get_sr(s); + break; + default: + /* OS_LONG; others already g_assert_not_reached. */ + disas_undef(env, s, insn); + return; + } + } else { + SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr); + } + dest = tcg_temp_new(tcg_ctx); + switch (op) { + case 0: /* ori */ + tcg_gen_or_i32(tcg_ctx, dest, src1, im); + if (with_SR) { + gen_set_sr(s, dest, opsize == OS_BYTE); + } else { + DEST_EA(env, insn, opsize, dest, &addr); + gen_logic_cc(s, dest, opsize); + } + break; + case 1: /* andi */ + tcg_gen_and_i32(tcg_ctx, dest, src1, im); + if (with_SR) { + gen_set_sr(s, dest, opsize == OS_BYTE); + } else { + DEST_EA(env, insn, opsize, dest, &addr); + gen_logic_cc(s, dest, opsize); + } + break; + case 2: /* subi */ + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, src1, im); + tcg_gen_sub_i32(tcg_ctx, dest, src1, im); + gen_update_cc_add(tcg_ctx, dest, im, opsize); + set_cc_op(s, CC_OP_SUBB + opsize); + DEST_EA(env, insn, opsize, dest, &addr); + break; + case 3: /* addi */ + tcg_gen_add_i32(tcg_ctx, dest, src1, im); + gen_update_cc_add(tcg_ctx, dest, im, opsize); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, im); + set_cc_op(s, CC_OP_ADDB + opsize); + DEST_EA(env, insn, opsize, dest, &addr); + break; + case 5: /* eori */ + tcg_gen_xor_i32(tcg_ctx, dest, src1, im); + if (with_SR) { + gen_set_sr(s, dest, opsize == OS_BYTE); + } else { + DEST_EA(env, insn, opsize, dest, &addr); + gen_logic_cc(s, dest, opsize); + } + break; + case 6: /* cmpi */ + gen_update_cc_cmp(s, src1, im, opsize); + break; + default: + abort(); + } + tcg_temp_free(tcg_ctx, im); + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(cas) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv addr; + uint16_t ext; + TCGv load; + TCGv cmp; + MemOp opc; + + switch ((insn >> 9) & 3) { + case 1: + opsize = OS_BYTE; + opc = MO_SB; + break; + case 2: + opsize = OS_WORD; + opc = MO_TESW; + break; + case 3: + opsize = OS_LONG; + opc = MO_TESL; + break; + default: + /* unreachable */ + abort(); + } + + ext = read_im16(env, s); + + /* cas Dc,Du, */ + + addr = gen_lea(env, s, insn, opsize); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + + cmp = gen_extend(s, DREG(ext, 0), opsize, 1); + + /* + * if == Dc then + * = Du + * Dc = (because == Dc) + * else + * Dc = + */ + + load = tcg_temp_new(tcg_ctx); + tcg_gen_atomic_cmpxchg_i32(tcg_ctx, load, addr, cmp, DREG(ext, 6), + IS_USER(s), opc); + /* update flags before setting cmp to load */ + gen_update_cc_cmp(s, load, cmp, opsize); + gen_partset_reg(tcg_ctx, opsize, DREG(ext, 0), load); + + tcg_temp_free(tcg_ctx, load); + + switch (extract32(insn, 3, 3)) { + case 3: /* Indirect postincrement. */ + tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, opsize_bytes(opsize)); + break; + case 4: /* Indirect predecrememnt. */ + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); + break; + } +} + +DISAS_INSN(cas2w) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext1, ext2; + TCGv addr1, addr2; + TCGv regs; + + /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ + + ext1 = read_im16(env, s); + + if (ext1 & 0x8000) { + /* Address Register */ + addr1 = AREG(ext1, 12); + } else { + /* Data Register */ + addr1 = DREG(ext1, 12); + } + + ext2 = read_im16(env, s); + if (ext2 & 0x8000) { + /* Address Register */ + addr2 = AREG(ext2, 12); + } else { + /* Data Register */ + addr2 = DREG(ext2, 12); + } + + /* + * if (R1) == Dc1 && (R2) == Dc2 then + * (R1) = Du1 + * (R2) = Du2 + * else + * Dc1 = (R1) + * Dc2 = (R2) + */ + + regs = tcg_const_i32(tcg_ctx, REG(ext2, 6) | + (REG(ext1, 6) << 3) | + (REG(ext2, 0) << 6) | + (REG(ext1, 0) << 9)); + if (tb_cflags(s->base.tb) & CF_PARALLEL) { + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + } else { + gen_helper_cas2w(tcg_ctx, tcg_ctx->cpu_env, regs, addr1, addr2); + } + tcg_temp_free(tcg_ctx, regs); + + /* Note that cas2w also assigned to env->cc_op. */ + s->cc_op = CC_OP_CMPW; + s->cc_op_synced = 1; +} + +DISAS_INSN(cas2l) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext1, ext2; + TCGv addr1, addr2, regs; + + /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ + + ext1 = read_im16(env, s); + + if (ext1 & 0x8000) { + /* Address Register */ + addr1 = AREG(ext1, 12); + } else { + /* Data Register */ + addr1 = DREG(ext1, 12); + } + + ext2 = read_im16(env, s); + if (ext2 & 0x8000) { + /* Address Register */ + addr2 = AREG(ext2, 12); + } else { + /* Data Register */ + addr2 = DREG(ext2, 12); + } + + /* + * if (R1) == Dc1 && (R2) == Dc2 then + * (R1) = Du1 + * (R2) = Du2 + * else + * Dc1 = (R1) + * Dc2 = (R2) + */ + + regs = tcg_const_i32(tcg_ctx, REG(ext2, 6) | + (REG(ext1, 6) << 3) | + (REG(ext2, 0) << 6) | + (REG(ext1, 0) << 9)); + if (tb_cflags(s->base.tb) & CF_PARALLEL) { + gen_helper_cas2l_parallel(tcg_ctx, tcg_ctx->cpu_env, regs, addr1, addr2); + } else { + gen_helper_cas2l(tcg_ctx, tcg_ctx->cpu_env, regs, addr1, addr2); + } + tcg_temp_free(tcg_ctx, regs); + + /* Note that cas2l also assigned to env->cc_op. */ + s->cc_op = CC_OP_CMPL; + s->cc_op_synced = 1; +} + +DISAS_INSN(byterev) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + + reg = DREG(insn, 0); + tcg_gen_bswap32_i32(tcg_ctx, reg, reg); +} + +DISAS_INSN(move) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv dest; + int op; + int opsize; + + switch (insn >> 12) { + case 1: /* move.b */ + opsize = OS_BYTE; + break; + case 2: /* move.l */ + opsize = OS_LONG; + break; + case 3: /* move.w */ + opsize = OS_WORD; + break; + default: + abort(); + } + SRC_EA(env, src, opsize, 1, NULL); + op = (insn >> 6) & 7; + if (op == 1) { + /* movea */ + /* The value will already have been sign extended. */ + dest = AREG(insn, 9); + tcg_gen_mov_i32(tcg_ctx, dest, src); + } else { + /* normal move */ + uint16_t dest_ea; + dest_ea = ((insn >> 9) & 7) | (op << 3); + DEST_EA(env, dest_ea, opsize, src, NULL); + /* This will be correct because loads sign extend. */ + gen_logic_cc(s, src, opsize); + } +} + +DISAS_INSN(negx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv z; + TCGv src; + TCGv addr; + int opsize; + + opsize = insn_opsize(insn); + SRC_EA(env, src, opsize, 1, &addr); + + gen_flush_flags(s); /* compute old Z */ + + /* + * Perform substract with borrow. + * (X, N) = -(src + X); + */ + + z = tcg_const_i32(tcg_ctx, 0); + tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z); + tcg_gen_sub2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X); + tcg_temp_free(tcg_ctx, z); + gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); + + tcg_gen_andi_i32(tcg_ctx, QREG_CC_X, QREG_CC_X, 1); + + /* + * Compute signed-overflow for negation. The normal formula for + * subtraction is (res ^ src) & (src ^ dest), but with dest==0 + * this simplies to res & src. + */ + + tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, src); + + /* Copy the rest of the results into place. */ + tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ + tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); + + set_cc_op(s, CC_OP_FLAGS); + + /* result is in QREG_CC_N */ + + DEST_EA(env, insn, opsize, QREG_CC_N, &addr); +} + +DISAS_INSN(lea) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + + reg = AREG(insn, 9); + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + tcg_gen_mov_i32(tcg_ctx, reg, tmp); +} + +DISAS_INSN(clr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv zero; + + zero = tcg_const_i32(tcg_ctx, 0); + + opsize = insn_opsize(insn); + DEST_EA(env, insn, opsize, zero, NULL); + gen_logic_cc(s, zero, opsize); + tcg_temp_free(tcg_ctx, zero); +} + +DISAS_INSN(move_from_ccr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv ccr; + + ccr = gen_get_ccr(s); + DEST_EA(env, insn, OS_WORD, ccr, NULL); +} + +DISAS_INSN(neg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src1; + TCGv dest; + TCGv addr; + int opsize; + + opsize = insn_opsize(insn); + SRC_EA(env, src1, opsize, 1, &addr); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_neg_i32(tcg_ctx, dest, src1); + set_cc_op(s, CC_OP_SUBB + opsize); + gen_update_cc_add(tcg_ctx, dest, src1, opsize); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_NE, QREG_CC_X, dest, 0); + DEST_EA(env, insn, opsize, dest, &addr); + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(move_to_ccr) +{ + gen_move_to_sr(env, s, insn, true); +} + +DISAS_INSN(not) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src1; + TCGv dest; + TCGv addr; + int opsize; + + opsize = insn_opsize(insn); + SRC_EA(env, src1, opsize, 1, &addr); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_not_i32(tcg_ctx, dest, src1); + DEST_EA(env, insn, opsize, dest, &addr); + gen_logic_cc(s, dest, opsize); +} + +DISAS_INSN(swap) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src1; + TCGv src2; + TCGv reg; + + src1 = tcg_temp_new(tcg_ctx); + src2 = tcg_temp_new(tcg_ctx); + reg = DREG(insn, 0); + tcg_gen_shli_i32(tcg_ctx, src1, reg, 16); + tcg_gen_shri_i32(tcg_ctx, src2, reg, 16); + tcg_gen_or_i32(tcg_ctx, reg, src1, src2); + tcg_temp_free(tcg_ctx, src2); + tcg_temp_free(tcg_ctx, src1); + gen_logic_cc(s, reg, OS_LONG); +} + +DISAS_INSN(bkpt) +{ + gen_exception(s, s->base.pc_next, EXCP_DEBUG); +} + +DISAS_INSN(pea) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + gen_push(s, tmp); +} + +DISAS_INSN(ext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op; + TCGv reg; + TCGv tmp; + + reg = DREG(insn, 0); + op = (insn >> 6) & 7; + tmp = tcg_temp_new(tcg_ctx); + if (op == 3) + tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); + else + tcg_gen_ext8s_i32(tcg_ctx, tmp, reg); + if (op == 2) + gen_partset_reg(tcg_ctx, OS_WORD, reg, tmp); + else + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + gen_logic_cc(s, tmp, OS_LONG); + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(tst) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv tmp; + + opsize = insn_opsize(insn); + SRC_EA(env, tmp, opsize, 1, NULL); + gen_logic_cc(s, tmp, opsize); +} + +DISAS_INSN(pulse) +{ + /* Implemented as a NOP. */ +} + +DISAS_INSN(illegal) +{ + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); +} + +/* ??? This should be atomic. */ +DISAS_INSN(tas) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv dest; + TCGv src1; + TCGv addr; + + dest = tcg_temp_new(tcg_ctx); + SRC_EA(env, src1, OS_BYTE, 1, &addr); + gen_logic_cc(s, src1, OS_BYTE); + tcg_gen_ori_i32(tcg_ctx, dest, src1, 0x80); + DEST_EA(env, insn, OS_BYTE, dest, &addr); + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(mull) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + TCGv src1; + int sign; + + ext = read_im16(env, s); + + sign = ext & 0x800; + + if (ext & 0x400) { + if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) { + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + return; + } + + SRC_EA(env, src1, OS_LONG, 0, NULL); + + if (sign) { + tcg_gen_muls2_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12)); + } else { + tcg_gen_mulu2_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12)); + } + /* if Dl == Dh, 68040 returns low word */ + tcg_gen_mov_i32(tcg_ctx, DREG(ext, 0), QREG_CC_N); + tcg_gen_mov_i32(tcg_ctx, DREG(ext, 12), QREG_CC_Z); + tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); + + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, 0); + + set_cc_op(s, CC_OP_FLAGS); + return; + } + SRC_EA(env, src1, OS_LONG, 0, NULL); + if (m68k_feature(s->env, M68K_FEATURE_M68000)) { + tcg_gen_movi_i32(tcg_ctx, QREG_CC_C, 0); + if (sign) { + tcg_gen_muls2_i32(tcg_ctx, QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12)); + /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */ + tcg_gen_sari_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N, 31); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z); + } else { + tcg_gen_mulu2_i32(tcg_ctx, QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12)); + /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */ + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C); + } + tcg_gen_neg_i32(tcg_ctx, QREG_CC_V, QREG_CC_V); + tcg_gen_mov_i32(tcg_ctx, DREG(ext, 12), QREG_CC_N); + + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); + + set_cc_op(s, CC_OP_FLAGS); + } else { + /* + * The upper 32 bits of the product are discarded, so + * muls.l and mulu.l are functionally equivalent. + */ + tcg_gen_mul_i32(tcg_ctx, DREG(ext, 12), src1, DREG(ext, 12)); + gen_logic_cc(s, DREG(ext, 12), OS_LONG); + } +} + +static void gen_link(DisasContext *s, uint16_t insn, int32_t offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + + reg = AREG(insn, 0); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); + gen_store(s, OS_LONG, tmp, reg, IS_USER(s)); + if ((insn & 7) != 7) { + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + } + tcg_gen_addi_i32(tcg_ctx, QREG_SP, tmp, offset); + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(link) +{ + int16_t offset; + + offset = read_im16(env, s); + gen_link(s, insn, offset); +} + +DISAS_INSN(linkl) +{ + int32_t offset; + + offset = read_im32(env, s); + gen_link(s, insn, offset); +} + +DISAS_INSN(unlk) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + TCGv tmp; + + src = tcg_temp_new(tcg_ctx); + reg = AREG(insn, 0); + tcg_gen_mov_i32(tcg_ctx, src, reg); + tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s)); + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + tcg_gen_addi_i32(tcg_ctx, QREG_SP, src, 4); + tcg_temp_free(tcg_ctx, src); + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(reset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + gen_helper_reset(tcg_ctx, tcg_ctx->cpu_env); +} + +DISAS_INSN(nop) +{ +} + +DISAS_INSN(rtd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + int16_t offset = read_im16(env, s); + + tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s)); + tcg_gen_addi_i32(tcg_ctx, QREG_SP, QREG_SP, offset + 4); + gen_jmp(s, tmp); +} + +DISAS_INSN(rts) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s)); + tcg_gen_addi_i32(tcg_ctx, QREG_SP, QREG_SP, 4); + gen_jmp(s, tmp); +} + +DISAS_INSN(jump) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + /* + * Load the target address first to ensure correct exception + * behavior. + */ + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + if ((insn & 0x40) == 0) { + /* jsr */ + gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); + } + gen_jmp(s, tmp); +} + +DISAS_INSN(addsubq) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv dest; + TCGv val; + int imm; + TCGv addr; + int opsize; + + if ((insn & 070) == 010) { + /* Operation on address register is always long. */ + opsize = OS_LONG; + } else { + opsize = insn_opsize(insn); + } + SRC_EA(env, src, opsize, 1, &addr); + imm = (insn >> 9) & 7; + if (imm == 0) { + imm = 8; + } + val = tcg_const_i32(tcg_ctx, imm); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, dest, src); + if ((insn & 0x38) == 0x08) { + /* + * Don't update condition codes if the destination is an + * address register. + */ + if (insn & 0x0100) { + tcg_gen_sub_i32(tcg_ctx, dest, dest, val); + } else { + tcg_gen_add_i32(tcg_ctx, dest, dest, val); + } + } else { + if (insn & 0x0100) { + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, val); + tcg_gen_sub_i32(tcg_ctx, dest, dest, val); + set_cc_op(s, CC_OP_SUBB + opsize); + } else { + tcg_gen_add_i32(tcg_ctx, dest, dest, val); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, QREG_CC_X, dest, val); + set_cc_op(s, CC_OP_ADDB + opsize); + } + gen_update_cc_add(tcg_ctx, dest, val, opsize); + } + tcg_temp_free(tcg_ctx, val); + DEST_EA(env, insn, opsize, dest, &addr); + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(tpf) +{ + switch (insn & 7) { + case 2: /* One extension word. */ + s->pc += 2; + break; + case 3: /* Two extension words. */ + s->pc += 4; + break; + case 4: /* No extension words. */ + break; + default: + disas_undef(env, s, insn); + } +} + +DISAS_INSN(branch) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int32_t offset; + uint32_t base; + int op; + + base = s->pc; + op = (insn >> 8) & 0xf; + offset = (int8_t)insn; + if (offset == 0) { + offset = (int16_t)read_im16(env, s); + } else if (offset == -1) { + offset = read_im32(env, s); + } + if (op == 1) { + /* bsr */ + gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); + } + if (op > 1) { + /* Bcc */ + TCGLabel *l1 = gen_new_label(tcg_ctx); + gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1); + gen_jmp_tb(s, 1, base + offset); + gen_set_label(tcg_ctx, l1); + gen_jmp_tb(s, 0, s->pc); + } else { + /* Unconditional branch. */ + update_cc_op(s); + gen_jmp_tb(s, 0, base + offset); + } +} + +DISAS_INSN(moveq) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, DREG(insn, 9), (int8_t)insn); + gen_logic_cc(s, DREG(insn, 9), OS_LONG); +} + +DISAS_INSN(mvzs) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv src; + TCGv reg; + + if (insn & 0x40) + opsize = OS_WORD; + else + opsize = OS_BYTE; + SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL); + reg = DREG(insn, 9); + tcg_gen_mov_i32(tcg_ctx, reg, src); + gen_logic_cc(s, src, opsize); +} + +DISAS_INSN(or) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv dest; + TCGv src; + TCGv addr; + int opsize; + + opsize = insn_opsize(insn); + reg = gen_extend(s, DREG(insn, 9), opsize, 0); + dest = tcg_temp_new(tcg_ctx); + if (insn & 0x100) { + SRC_EA(env, src, opsize, 0, &addr); + tcg_gen_or_i32(tcg_ctx, dest, src, reg); + DEST_EA(env, insn, opsize, dest, &addr); + } else { + SRC_EA(env, src, opsize, 0, NULL); + tcg_gen_or_i32(tcg_ctx, dest, src, reg); + gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), dest); + } + gen_logic_cc(s, dest, opsize); + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(suba) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + + SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL); + reg = AREG(insn, 9); + tcg_gen_sub_i32(tcg_ctx, reg, reg, src); +} + +static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + gen_flush_flags(s); /* compute old Z */ + + /* + * Perform substract with borrow. + * (X, N) = dest - (src + X); + */ + + tmp = tcg_const_i32(tcg_ctx, 0); + tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp); + tcg_gen_sub2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X); + gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); + tcg_gen_andi_i32(tcg_ctx, QREG_CC_X, QREG_CC_X, 1); + + /* Compute signed-overflow for substract. */ + + tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, dest); + tcg_gen_xor_i32(tcg_ctx, tmp, dest, src); + tcg_gen_and_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, tmp); + tcg_temp_free(tcg_ctx, tmp); + + /* Copy the rest of the results into place. */ + tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ + tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); + + set_cc_op(s, CC_OP_FLAGS); + + /* result is in QREG_CC_N */ +} + +DISAS_INSN(subx_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv dest; + TCGv src; + int opsize; + + opsize = insn_opsize(insn); + + src = gen_extend(s, DREG(insn, 0), opsize, 1); + dest = gen_extend(s, DREG(insn, 9), opsize, 1); + + gen_subx(s, src, dest, opsize); + + gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), QREG_CC_N); +} + +DISAS_INSN(subx_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv addr_src; + TCGv dest; + TCGv addr_dest; + int opsize; + + opsize = insn_opsize(insn); + + addr_src = AREG(insn, 0); + tcg_gen_subi_i32(tcg_ctx, addr_src, addr_src, opsize_bytes(opsize)); + src = gen_load(s, opsize, addr_src, 1, IS_USER(s)); + + addr_dest = AREG(insn, 9); + tcg_gen_subi_i32(tcg_ctx, addr_dest, addr_dest, opsize_bytes(opsize)); + dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s)); + + gen_subx(s, src, dest, opsize); + + gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); + + tcg_temp_free(tcg_ctx, dest); + tcg_temp_free(tcg_ctx, src); +} + +DISAS_INSN(mov3q) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + int val; + + val = (insn >> 9) & 7; + if (val == 0) + val = -1; + src = tcg_const_i32(tcg_ctx, val); + gen_logic_cc(s, src, OS_LONG); + DEST_EA(env, insn, OS_LONG, src, NULL); + tcg_temp_free(tcg_ctx, src); +} + +DISAS_INSN(cmp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + int opsize; + + opsize = insn_opsize(insn); + SRC_EA(env, src, opsize, 1, NULL); + reg = gen_extend(s, DREG(insn, 9), opsize, 1); + gen_update_cc_cmp(s, reg, src, opsize); +} + +DISAS_INSN(cmpa) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv src; + TCGv reg; + + if (insn & 0x100) { + opsize = OS_LONG; + } else { + opsize = OS_WORD; + } + SRC_EA(env, src, opsize, 1, NULL); + reg = AREG(insn, 9); + gen_update_cc_cmp(s, reg, src, OS_LONG); +} + +DISAS_INSN(cmpm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize = insn_opsize(insn); + TCGv src, dst; + + /* Post-increment load (mode 3) from Ay. */ + src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize, + tcg_ctx->NULL_QREG, NULL, EA_LOADS, IS_USER(s)); + /* Post-increment load (mode 3) from Ax. */ + dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize, + tcg_ctx->NULL_QREG, NULL, EA_LOADS, IS_USER(s)); + + gen_update_cc_cmp(s, dst, src, opsize); +} + +DISAS_INSN(eor) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv dest; + TCGv addr; + int opsize; + + opsize = insn_opsize(insn); + + SRC_EA(env, src, opsize, 0, &addr); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, dest, src, DREG(insn, 9)); + gen_logic_cc(s, dest, opsize); + DEST_EA(env, insn, opsize, dest, &addr); + tcg_temp_free(tcg_ctx, dest); +} + +static void do_exg(TCGContext *tcg_ctx, TCGv reg1, TCGv reg2) +{ + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, temp, reg1); + tcg_gen_mov_i32(tcg_ctx, reg1, reg2); + tcg_gen_mov_i32(tcg_ctx, reg2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +DISAS_INSN(exg_dd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* exchange Dx and Dy */ + do_exg(tcg_ctx, DREG(insn, 9), DREG(insn, 0)); +} + +DISAS_INSN(exg_aa) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* exchange Ax and Ay */ + do_exg(tcg_ctx, AREG(insn, 9), AREG(insn, 0)); +} + +DISAS_INSN(exg_da) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* exchange Dx and Ay */ + do_exg(tcg_ctx, DREG(insn, 9), AREG(insn, 0)); +} + +DISAS_INSN(and) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + TCGv dest; + TCGv addr; + int opsize; + + dest = tcg_temp_new(tcg_ctx); + + opsize = insn_opsize(insn); + reg = DREG(insn, 9); + if (insn & 0x100) { + SRC_EA(env, src, opsize, 0, &addr); + tcg_gen_and_i32(tcg_ctx, dest, src, reg); + DEST_EA(env, insn, opsize, dest, &addr); + } else { + SRC_EA(env, src, opsize, 0, NULL); + tcg_gen_and_i32(tcg_ctx, dest, src, reg); + gen_partset_reg(tcg_ctx, opsize, reg, dest); + } + gen_logic_cc(s, dest, opsize); + tcg_temp_free(tcg_ctx, dest); +} + +DISAS_INSN(adda) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + + SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL); + reg = AREG(insn, 9); + tcg_gen_add_i32(tcg_ctx, reg, reg, src); +} + +static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + gen_flush_flags(s); /* compute old Z */ + + /* + * Perform addition with carry. + * (X, N) = src + dest + X; + */ + + tmp = tcg_const_i32(tcg_ctx, 0); + tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp); + tcg_gen_add2_i32(tcg_ctx, QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp); + gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); + + /* Compute signed-overflow for addition. */ + + tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, src); + tcg_gen_xor_i32(tcg_ctx, tmp, dest, src); + tcg_gen_andc_i32(tcg_ctx, QREG_CC_V, QREG_CC_V, tmp); + tcg_temp_free(tcg_ctx, tmp); + + /* Copy the rest of the results into place. */ + tcg_gen_or_i32(tcg_ctx, QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ + tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, QREG_CC_X); + + set_cc_op(s, CC_OP_FLAGS); + + /* result is in QREG_CC_N */ +} + +DISAS_INSN(addx_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv dest; + TCGv src; + int opsize; + + opsize = insn_opsize(insn); + + dest = gen_extend(s, DREG(insn, 9), opsize, 1); + src = gen_extend(s, DREG(insn, 0), opsize, 1); + + gen_addx(s, src, dest, opsize); + + gen_partset_reg(tcg_ctx, opsize, DREG(insn, 9), QREG_CC_N); +} + +DISAS_INSN(addx_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv addr_src; + TCGv dest; + TCGv addr_dest; + int opsize; + + opsize = insn_opsize(insn); + + addr_src = AREG(insn, 0); + tcg_gen_subi_i32(tcg_ctx, addr_src, addr_src, opsize_bytes(opsize)); + src = gen_load(s, opsize, addr_src, 1, IS_USER(s)); + + addr_dest = AREG(insn, 9); + tcg_gen_subi_i32(tcg_ctx, addr_dest, addr_dest, opsize_bytes(opsize)); + dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s)); + + gen_addx(s, src, dest, opsize); + + gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); + + tcg_temp_free(tcg_ctx, dest); + tcg_temp_free(tcg_ctx, src); +} + +static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int count = (insn >> 9) & 7; + int logical = insn & 8; + int left = insn & 0x100; + int bits = opsize_bytes(opsize) * 8; + TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical); + + if (count == 0) { + count = 8; + } + + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); + if (left) { + tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, reg, bits - count); + tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, reg, count); + + /* + * Note that ColdFire always clears V (done above), + * while M68000 sets if the most significant bit is changed at + * any time during the shift operation. + */ + if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { + /* if shift count >= bits, V is (reg != 0) */ + if (count >= bits) { + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V); + } else { + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_sari_i32(tcg_ctx, QREG_CC_V, reg, bits - 1); + tcg_gen_sari_i32(tcg_ctx, t0, reg, bits - count - 1); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0); + tcg_temp_free(tcg_ctx, t0); + } + tcg_gen_neg_i32(tcg_ctx, QREG_CC_V, QREG_CC_V); + } + } else { + tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, reg, count - 1); + if (logical) { + tcg_gen_shri_i32(tcg_ctx, QREG_CC_N, reg, count); + } else { + tcg_gen_sari_i32(tcg_ctx, QREG_CC_N, reg, count); + } + } + + gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); + tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 1); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, QREG_CC_C); + + gen_partset_reg(tcg_ctx, opsize, DREG(insn, 0), QREG_CC_N); + set_cc_op(s, CC_OP_FLAGS); +} + +static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int logical = insn & 8; + int left = insn & 0x100; + int bits = opsize_bytes(opsize) * 8; + TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical); + TCGv s32; + TCGv_i64 t64, s64; + + t64 = tcg_temp_new_i64(tcg_ctx); + s64 = tcg_temp_new_i64(tcg_ctx); + s32 = tcg_temp_new(tcg_ctx); + + /* + * Note that m68k truncates the shift count modulo 64, not 32. + * In addition, a 64-bit shift makes it easy to find "the last + * bit shifted out", for the carry flag. + */ + tcg_gen_andi_i32(tcg_ctx, s32, DREG(insn, 9), 63); + tcg_gen_extu_i32_i64(tcg_ctx, s64, s32); + tcg_gen_extu_i32_i64(tcg_ctx, t64, reg); + + /* Optimistically set V=0. Also used as a zero source below. */ + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); + if (left) { + tcg_gen_shl_i64(tcg_ctx, t64, t64, s64); + + if (opsize == OS_LONG) { + tcg_gen_extr_i64_i32(tcg_ctx, QREG_CC_N, QREG_CC_C, t64); + /* Note that C=0 if shift count is 0, and we get that for free. */ + } else { + TCGv zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_extrl_i64_i32(tcg_ctx, QREG_CC_N, t64); + tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, QREG_CC_N, bits); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, + s32, zero, zero, QREG_CC_C); + tcg_temp_free(tcg_ctx, zero); + } + tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 1); + + /* X = C, but only if the shift count was non-zero. */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V, + QREG_CC_C, QREG_CC_X); + + /* + * M68000 sets V if the most significant bit is changed at + * any time during the shift operation. Do this via creating + * an extension of the sign bit, comparing, and discarding + * the bits below the sign bit. I.e. + * int64_t s = (intN_t)reg; + * int64_t t = (int64_t)(intN_t)reg << count; + * V = ((s ^ t) & (-1 << (bits - 1))) != 0 + */ + if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { + TCGv_i64 tt = tcg_const_i64(tcg_ctx, 32); + /* if shift is greater than 32, use 32 */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GT, s64, s64, tt, tt, s64); + tcg_temp_free_i64(tcg_ctx, tt); + /* Sign extend the input to 64 bits; re-do the shift. */ + tcg_gen_ext_i32_i64(tcg_ctx, t64, reg); + tcg_gen_shl_i64(tcg_ctx, s64, t64, s64); + /* Clear all bits that are unchanged. */ + tcg_gen_xor_i64(tcg_ctx, t64, t64, s64); + /* Ignore the bits below the sign bit. */ +#ifdef _MSC_VER + tcg_gen_andi_i64(tcg_ctx, t64, t64, 0xffffffffffffffffULL << (bits - 1)); +#else + tcg_gen_andi_i64(tcg_ctx, t64, t64, -1ULL << (bits - 1)); +#endif + /* If any bits remain set, we have overflow. */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t64, t64, 0); + tcg_gen_extrl_i64_i32(tcg_ctx, QREG_CC_V, t64); + tcg_gen_neg_i32(tcg_ctx, QREG_CC_V, QREG_CC_V); + } + } else { + tcg_gen_shli_i64(tcg_ctx, t64, t64, 32); + if (logical) { + tcg_gen_shr_i64(tcg_ctx, t64, t64, s64); + } else { + tcg_gen_sar_i64(tcg_ctx, t64, t64, s64); + } + tcg_gen_extr_i64_i32(tcg_ctx, QREG_CC_C, QREG_CC_N, t64); + + /* Note that C=0 if shift count is 0, and we get that for free. */ + tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 31); + + /* X = C, but only if the shift count was non-zero. */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V, + QREG_CC_C, QREG_CC_X); + } + gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, opsize, 1); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); + + tcg_temp_free(tcg_ctx, s32); + tcg_temp_free_i64(tcg_ctx, s64); + tcg_temp_free_i64(tcg_ctx, t64); + + /* Write back the result. */ + gen_partset_reg(tcg_ctx, opsize, DREG(insn, 0), QREG_CC_N); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(shift8_im) +{ + shift_im(s, insn, OS_BYTE); +} + +DISAS_INSN(shift16_im) +{ + shift_im(s, insn, OS_WORD); +} + +DISAS_INSN(shift_im) +{ + shift_im(s, insn, OS_LONG); +} + +DISAS_INSN(shift8_reg) +{ + shift_reg(s, insn, OS_BYTE); +} + +DISAS_INSN(shift16_reg) +{ + shift_reg(s, insn, OS_WORD); +} + +DISAS_INSN(shift_reg) +{ + shift_reg(s, insn, OS_LONG); +} + +DISAS_INSN(shift_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int logical = insn & 8; + int left = insn & 0x100; + TCGv src; + TCGv addr; + + SRC_EA(env, src, OS_WORD, !logical, &addr); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); + if (left) { + tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, src, 15); + tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, src, 1); + + /* + * Note that ColdFire always clears V, + * while M68000 sets if the most significant bit is changed at + * any time during the shift operation + */ + if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { + src = gen_extend(s, src, OS_WORD, 1); + tcg_gen_xor_i32(tcg_ctx, QREG_CC_V, QREG_CC_N, src); + } + } else { + tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, src); + if (logical) { + tcg_gen_shri_i32(tcg_ctx, QREG_CC_N, src, 1); + } else { + tcg_gen_sari_i32(tcg_ctx, QREG_CC_N, src, 1); + } + } + + gen_ext(tcg_ctx, QREG_CC_N, QREG_CC_N, OS_WORD, 1); + tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, QREG_CC_C, 1); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, QREG_CC_N); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, QREG_CC_C); + + DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr); + set_cc_op(s, CC_OP_FLAGS); +} + +static void rotate(TCGContext *tcg_ctx, TCGv reg, TCGv shift, int left, int size) +{ + switch (size) { + case 8: + /* Replicate the 8-bit input so that a 32-bit rotate works. */ + tcg_gen_ext8u_i32(tcg_ctx, reg, reg); + tcg_gen_muli_i32(tcg_ctx, reg, reg, 0x01010101); + goto do_long; + case 16: + /* Replicate the 16-bit input so that a 32-bit rotate works. */ + tcg_gen_deposit_i32(tcg_ctx, reg, reg, reg, 16, 16); + goto do_long; + do_long: + default: + if (left) { + tcg_gen_rotl_i32(tcg_ctx, reg, reg, shift); + } else { + tcg_gen_rotr_i32(tcg_ctx, reg, reg, shift); + } + } + + /* compute flags */ + + switch (size) { + case 8: + tcg_gen_ext8s_i32(tcg_ctx, reg, reg); + break; + case 16: + tcg_gen_ext16s_i32(tcg_ctx, reg, reg); + break; + default: + break; + } + + /* QREG_CC_X is not affected */ + + tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, reg); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, reg); + + if (left) { + tcg_gen_andi_i32(tcg_ctx, QREG_CC_C, reg, 1); + } else { + tcg_gen_shri_i32(tcg_ctx, QREG_CC_C, reg, 31); + } + + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); /* always cleared */ +} + +static void rotate_x_flags(TCGContext *tcg_ctx, TCGv reg, TCGv X, int size) +{ + switch (size) { + case 8: + tcg_gen_ext8s_i32(tcg_ctx, reg, reg); + break; + case 16: + tcg_gen_ext16s_i32(tcg_ctx, reg, reg); + break; + default: + break; + } + tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, reg); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_Z, reg); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_X, X); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_C, X); + tcg_gen_movi_i32(tcg_ctx, QREG_CC_V, 0); +} + +/* Result of rotate_x() is valid if 0 <= shift <= size */ +static TCGv rotate_x(TCGContext *tcg_ctx, TCGv reg, TCGv shift, int left, int size) +{ + TCGv X, shl, shr, shx, sz, zero; + + sz = tcg_const_i32(tcg_ctx, size); + + shr = tcg_temp_new(tcg_ctx); + shl = tcg_temp_new(tcg_ctx); + shx = tcg_temp_new(tcg_ctx); + if (left) { + tcg_gen_mov_i32(tcg_ctx, shl, shift); /* shl = shift */ + tcg_gen_movi_i32(tcg_ctx, shr, size + 1); + tcg_gen_sub_i32(tcg_ctx, shr, shr, shift); /* shr = size + 1 - shift */ + tcg_gen_subi_i32(tcg_ctx, shx, shift, 1); /* shx = shift - 1 */ + /* shx = shx < 0 ? size : shx; */ + zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, shx, shx, zero, sz, shx); + tcg_temp_free(tcg_ctx, zero); + } else { + tcg_gen_mov_i32(tcg_ctx, shr, shift); /* shr = shift */ + tcg_gen_movi_i32(tcg_ctx, shl, size + 1); + tcg_gen_sub_i32(tcg_ctx, shl, shl, shift); /* shl = size + 1 - shift */ + tcg_gen_sub_i32(tcg_ctx, shx, sz, shift); /* shx = size - shift */ + } + tcg_temp_free_i32(tcg_ctx, sz); + + /* reg = (reg << shl) | (reg >> shr) | (x << shx); */ + + tcg_gen_shl_i32(tcg_ctx, shl, reg, shl); + tcg_gen_shr_i32(tcg_ctx, shr, reg, shr); + tcg_gen_or_i32(tcg_ctx, reg, shl, shr); + tcg_temp_free(tcg_ctx, shl); + tcg_temp_free(tcg_ctx, shr); + tcg_gen_shl_i32(tcg_ctx, shx, QREG_CC_X, shx); + tcg_gen_or_i32(tcg_ctx, reg, reg, shx); + tcg_temp_free(tcg_ctx, shx); + + /* X = (reg >> size) & 1 */ + + X = tcg_temp_new(tcg_ctx); + tcg_gen_extract_i32(tcg_ctx, X, reg, size, 1); + + return X; +} + +/* Result of rotate32_x() is valid if 0 <= shift < 33 */ +static TCGv rotate32_x(TCGContext *tcg_ctx, TCGv reg, TCGv shift, int left) +{ + TCGv_i64 t0, shift64; + TCGv X, lo, hi, zero; + + shift64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, shift64, shift); + + t0 = tcg_temp_new_i64(tcg_ctx); + + X = tcg_temp_new(tcg_ctx); + lo = tcg_temp_new(tcg_ctx); + hi = tcg_temp_new(tcg_ctx); + + if (left) { + /* create [reg:X:..] */ + + tcg_gen_shli_i32(tcg_ctx, lo, QREG_CC_X, 31); + tcg_gen_concat_i32_i64(tcg_ctx, t0, lo, reg); + + /* rotate */ + + tcg_gen_rotl_i64(tcg_ctx, t0, t0, shift64); + tcg_temp_free_i64(tcg_ctx, shift64); + + /* result is [reg:..:reg:X] */ + + tcg_gen_extr_i64_i32(tcg_ctx, lo, hi, t0); + tcg_gen_andi_i32(tcg_ctx, X, lo, 1); + + tcg_gen_shri_i32(tcg_ctx, lo, lo, 1); + } else { + /* create [..:X:reg] */ + + tcg_gen_concat_i32_i64(tcg_ctx, t0, reg, QREG_CC_X); + + tcg_gen_rotr_i64(tcg_ctx, t0, t0, shift64); + tcg_temp_free_i64(tcg_ctx, shift64); + + /* result is value: [X:reg:..:reg] */ + + tcg_gen_extr_i64_i32(tcg_ctx, lo, hi, t0); + + /* extract X */ + + tcg_gen_shri_i32(tcg_ctx, X, hi, 31); + + /* extract result */ + + tcg_gen_shli_i32(tcg_ctx, hi, hi, 1); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_gen_or_i32(tcg_ctx, lo, lo, hi); + tcg_temp_free(tcg_ctx, hi); + + /* if shift == 0, register and X are not affected */ + + zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, X, shift, zero, QREG_CC_X, X); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, reg, shift, zero, reg, lo); + tcg_temp_free(tcg_ctx, zero); + tcg_temp_free(tcg_ctx, lo); + + return X; +} + +DISAS_INSN(rotate_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv shift; + int tmp; + int left = (insn & 0x100); + + tmp = (insn >> 9) & 7; + if (tmp == 0) { + tmp = 8; + } + + shift = tcg_const_i32(tcg_ctx, tmp); + if (insn & 8) { + rotate(tcg_ctx, DREG(insn, 0), shift, left, 32); + } else { + TCGv X = rotate32_x(tcg_ctx, DREG(insn, 0), shift, left); + rotate_x_flags(tcg_ctx, DREG(insn, 0), X, 32); + tcg_temp_free(tcg_ctx, X); + } + tcg_temp_free(tcg_ctx, shift); + + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(rotate8_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int left = (insn & 0x100); + TCGv reg; + TCGv shift; + int tmp; + + reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); + + tmp = (insn >> 9) & 7; + if (tmp == 0) { + tmp = 8; + } + + shift = tcg_const_i32(tcg_ctx, tmp); + if (insn & 8) { + rotate(tcg_ctx, reg, shift, left, 8); + } else { + TCGv X = rotate_x(tcg_ctx, reg, shift, left, 8); + rotate_x_flags(tcg_ctx, reg, X, 8); + tcg_temp_free(tcg_ctx, X); + } + tcg_temp_free(tcg_ctx, shift); + gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 0), reg); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(rotate16_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int left = (insn & 0x100); + TCGv reg; + TCGv shift; + int tmp; + + reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0); + tmp = (insn >> 9) & 7; + if (tmp == 0) { + tmp = 8; + } + + shift = tcg_const_i32(tcg_ctx, tmp); + if (insn & 8) { + rotate(tcg_ctx, reg, shift, left, 16); + } else { + TCGv X = rotate_x(tcg_ctx, reg, shift, left, 16); + rotate_x_flags(tcg_ctx, reg, X, 16); + tcg_temp_free(tcg_ctx, X); + } + tcg_temp_free(tcg_ctx, shift); + gen_partset_reg(tcg_ctx, OS_WORD, DREG(insn, 0), reg); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(rotate_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv src; + TCGv t0, t1; + int left = (insn & 0x100); + + reg = DREG(insn, 0); + src = DREG(insn, 9); + /* shift in [0..63] */ + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, t0, src, 63); + t1 = tcg_temp_new_i32(tcg_ctx); + if (insn & 8) { + tcg_gen_andi_i32(tcg_ctx, t1, src, 31); + rotate(tcg_ctx, reg, t1, left, 32); + /* if shift == 0, clear C */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, + t0, QREG_CC_V /* 0 */, + QREG_CC_V /* 0 */, QREG_CC_C); + } else { + TCGv X; + /* modulo 33 */ + tcg_gen_movi_i32(tcg_ctx, t1, 33); + tcg_gen_remu_i32(tcg_ctx, t1, t0, t1); + X = rotate32_x(tcg_ctx, DREG(insn, 0), t1, left); + rotate_x_flags(tcg_ctx, DREG(insn, 0), X, 32); + tcg_temp_free(tcg_ctx, X); + } + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(rotate8_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv src; + TCGv t0, t1; + int left = (insn & 0x100); + + reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); + src = DREG(insn, 9); + /* shift in [0..63] */ + t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, t0, src, 63); + t1 = tcg_temp_new_i32(tcg_ctx); + if (insn & 8) { + tcg_gen_andi_i32(tcg_ctx, t1, src, 7); + rotate(tcg_ctx, reg, t1, left, 8); + /* if shift == 0, clear C */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, + t0, QREG_CC_V /* 0 */, + QREG_CC_V /* 0 */, QREG_CC_C); + } else { + TCGv X; + /* modulo 9 */ + tcg_gen_movi_i32(tcg_ctx, t1, 9); + tcg_gen_remu_i32(tcg_ctx, t1, t0, t1); + X = rotate_x(tcg_ctx, reg, t1, left, 8); + rotate_x_flags(tcg_ctx, reg, X, 8); + tcg_temp_free(tcg_ctx, X); + } + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + gen_partset_reg(tcg_ctx, OS_BYTE, DREG(insn, 0), reg); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(rotate16_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv src; + TCGv t0, t1; + int left = (insn & 0x100); + + reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0); + src = DREG(insn, 9); + /* shift in [0..63] */ + t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, t0, src, 63); + t1 = tcg_temp_new_i32(tcg_ctx); + if (insn & 8) { + tcg_gen_andi_i32(tcg_ctx, t1, src, 15); + rotate(tcg_ctx, reg, t1, left, 16); + /* if shift == 0, clear C */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, QREG_CC_C, + t0, QREG_CC_V /* 0 */, + QREG_CC_V /* 0 */, QREG_CC_C); + } else { + TCGv X; + /* modulo 17 */ + tcg_gen_movi_i32(tcg_ctx, t1, 17); + tcg_gen_remu_i32(tcg_ctx, t1, t0, t1); + X = rotate_x(tcg_ctx, reg, t1, left, 16); + rotate_x_flags(tcg_ctx, reg, X, 16); + tcg_temp_free(tcg_ctx, X); + } + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + gen_partset_reg(tcg_ctx, OS_WORD, DREG(insn, 0), reg); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(rotate_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv addr; + TCGv shift; + int left = (insn & 0x100); + + SRC_EA(env, src, OS_WORD, 0, &addr); + + shift = tcg_const_i32(tcg_ctx, 1); + if (insn & 0x0200) { + rotate(tcg_ctx, src, shift, left, 16); + } else { + TCGv X = rotate_x(tcg_ctx, src, shift, left, 16); + rotate_x_flags(tcg_ctx, src, X, 16); + tcg_temp_free(tcg_ctx, X); + } + tcg_temp_free(tcg_ctx, shift); + DEST_EA(env, insn, OS_WORD, src, &addr); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(bfext_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int ext = read_im16(env, s); + int is_sign = insn & 0x200; + TCGv src = DREG(insn, 0); + TCGv dst = DREG(ext, 12); + int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; + int ofs = extract32(ext, 6, 5); /* big bit-endian */ + int pos = 32 - ofs - len; /* little bit-endian */ + TCGv tmp = tcg_temp_new(tcg_ctx); + TCGv shift; + + /* + * In general, we're going to rotate the field so that it's at the + * top of the word and then right-shift by the complement of the + * width to extend the field. + */ + if (ext & 0x20) { + /* Variable width. */ + if (ext & 0x800) { + /* Variable offset. */ + tcg_gen_andi_i32(tcg_ctx, tmp, DREG(ext, 6), 31); + tcg_gen_rotl_i32(tcg_ctx, tmp, src, tmp); + } else { + tcg_gen_rotli_i32(tcg_ctx, tmp, src, ofs); + } + + shift = tcg_temp_new(tcg_ctx); + tcg_gen_neg_i32(tcg_ctx, shift, DREG(ext, 0)); + tcg_gen_andi_i32(tcg_ctx, shift, shift, 31); + tcg_gen_sar_i32(tcg_ctx, QREG_CC_N, tmp, shift); + if (is_sign) { + tcg_gen_mov_i32(tcg_ctx, dst, QREG_CC_N); + } else { + tcg_gen_shr_i32(tcg_ctx, dst, tmp, shift); + } + tcg_temp_free(tcg_ctx, shift); + } else { + /* Immediate width. */ + if (ext & 0x800) { + /* Variable offset */ + tcg_gen_andi_i32(tcg_ctx, tmp, DREG(ext, 6), 31); + tcg_gen_rotl_i32(tcg_ctx, tmp, src, tmp); + src = tmp; + pos = 32 - len; + } else { + /* + * Immediate offset. If the field doesn't wrap around the + * end of the word, rely on (s)extract completely. + */ + if (pos < 0) { + tcg_gen_rotli_i32(tcg_ctx, tmp, src, ofs); + src = tmp; + pos = 32 - len; + } + } + + tcg_gen_sextract_i32(tcg_ctx, QREG_CC_N, src, pos, len); + if (is_sign) { + tcg_gen_mov_i32(tcg_ctx, dst, QREG_CC_N); + } else { + tcg_gen_extract_i32(tcg_ctx, dst, src, pos, len); + } + } + + tcg_temp_free(tcg_ctx, tmp); + set_cc_op(s, CC_OP_LOGIC); +} + +DISAS_INSN(bfext_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int ext = read_im16(env, s); + int is_sign = insn & 0x200; + TCGv dest = DREG(ext, 12); + TCGv addr, len, ofs; + + addr = gen_lea(env, s, insn, OS_UNSIZED); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + + if (ext & 0x20) { + len = DREG(ext, 0); + } else { + len = tcg_const_i32(tcg_ctx, extract32(ext, 0, 5)); + } + if (ext & 0x800) { + ofs = DREG(ext, 6); + } else { + ofs = tcg_const_i32(tcg_ctx, extract32(ext, 6, 5)); + } + + if (is_sign) { + gen_helper_bfexts_mem(tcg_ctx, dest, tcg_ctx->cpu_env, addr, ofs, len); + tcg_gen_mov_i32(tcg_ctx, QREG_CC_N, dest); + } else { + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + gen_helper_bfextu_mem(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, ofs, len); + tcg_gen_extr_i64_i32(tcg_ctx, dest, QREG_CC_N, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + } + set_cc_op(s, CC_OP_LOGIC); + + if (!(ext & 0x20)) { + tcg_temp_free(tcg_ctx, len); + } + if (!(ext & 0x800)) { + tcg_temp_free(tcg_ctx, ofs); + } +} + +DISAS_INSN(bfop_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int ext = read_im16(env, s); + TCGv src = DREG(insn, 0); + int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; + int ofs = extract32(ext, 6, 5); /* big bit-endian */ + TCGv mask, tofs, tlen; + + tofs = NULL; + tlen = NULL; + if ((insn & 0x0f00) == 0x0d00) { /* bfffo */ + tofs = tcg_temp_new(tcg_ctx); + tlen = tcg_temp_new(tcg_ctx); + } + + if ((ext & 0x820) == 0) { + /* Immediate width and offset. */ + uint32_t maski = 0x7fffffffu >> (len - 1); + if (ofs + len <= 32) { + tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, src, ofs); + } else { + tcg_gen_rotli_i32(tcg_ctx, QREG_CC_N, src, ofs); + } + tcg_gen_andi_i32(tcg_ctx, QREG_CC_N, QREG_CC_N, ~maski); + mask = tcg_const_i32(tcg_ctx, ror32(maski, ofs)); + if (tofs) { + tcg_gen_movi_i32(tcg_ctx, tofs, ofs); + tcg_gen_movi_i32(tcg_ctx, tlen, len); + } + } else { + TCGv tmp = tcg_temp_new(tcg_ctx); + if (ext & 0x20) { + /* Variable width */ + tcg_gen_subi_i32(tcg_ctx, tmp, DREG(ext, 0), 1); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 31); + mask = tcg_const_i32(tcg_ctx, 0x7fffffffu); + tcg_gen_shr_i32(tcg_ctx, mask, mask, tmp); + if (tlen) { + tcg_gen_addi_i32(tcg_ctx, tlen, tmp, 1); + } + } else { + /* Immediate width */ + mask = tcg_const_i32(tcg_ctx, 0x7fffffffu >> (len - 1)); + if (tlen) { + tcg_gen_movi_i32(tcg_ctx, tlen, len); + } + } + if (ext & 0x800) { + /* Variable offset */ + tcg_gen_andi_i32(tcg_ctx, tmp, DREG(ext, 6), 31); + tcg_gen_rotl_i32(tcg_ctx, QREG_CC_N, src, tmp); + tcg_gen_andc_i32(tcg_ctx, QREG_CC_N, QREG_CC_N, mask); + tcg_gen_rotr_i32(tcg_ctx, mask, mask, tmp); + if (tofs) { + tcg_gen_mov_i32(tcg_ctx, tofs, tmp); + } + } else { + /* Immediate offset (and variable width) */ + tcg_gen_rotli_i32(tcg_ctx, QREG_CC_N, src, ofs); + tcg_gen_andc_i32(tcg_ctx, QREG_CC_N, QREG_CC_N, mask); + tcg_gen_rotri_i32(tcg_ctx, mask, mask, ofs); + if (tofs) { + tcg_gen_movi_i32(tcg_ctx, tofs, ofs); + } + } + tcg_temp_free(tcg_ctx, tmp); + } + set_cc_op(s, CC_OP_LOGIC); + + switch (insn & 0x0f00) { + case 0x0a00: /* bfchg */ + tcg_gen_eqv_i32(tcg_ctx, src, src, mask); + break; + case 0x0c00: /* bfclr */ + tcg_gen_and_i32(tcg_ctx, src, src, mask); + break; + case 0x0d00: /* bfffo */ + gen_helper_bfffo_reg(tcg_ctx, DREG(ext, 12), QREG_CC_N, tofs, tlen); + tcg_temp_free(tcg_ctx, tlen); + tcg_temp_free(tcg_ctx, tofs); + break; + case 0x0e00: /* bfset */ + tcg_gen_orc_i32(tcg_ctx, src, src, mask); + break; + case 0x0800: /* bftst */ + /* flags already set; no other work to do. */ + break; + default: + g_assert_not_reached(); + } + tcg_temp_free(tcg_ctx, mask); +} + +DISAS_INSN(bfop_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int ext = read_im16(env, s); + TCGv addr, len, ofs; + TCGv_i64 t64; + + addr = gen_lea(env, s, insn, OS_UNSIZED); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + + if (ext & 0x20) { + len = DREG(ext, 0); + } else { + len = tcg_const_i32(tcg_ctx, extract32(ext, 0, 5)); + } + if (ext & 0x800) { + ofs = DREG(ext, 6); + } else { + ofs = tcg_const_i32(tcg_ctx, extract32(ext, 6, 5)); + } + + switch (insn & 0x0f00) { + case 0x0a00: /* bfchg */ + gen_helper_bfchg_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); + break; + case 0x0c00: /* bfclr */ + gen_helper_bfclr_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); + break; + case 0x0d00: /* bfffo */ + t64 = tcg_temp_new_i64(tcg_ctx); + gen_helper_bfffo_mem(tcg_ctx, t64, tcg_ctx->cpu_env, addr, ofs, len); + tcg_gen_extr_i64_i32(tcg_ctx, DREG(ext, 12), QREG_CC_N, t64); + tcg_temp_free_i64(tcg_ctx, t64); + break; + case 0x0e00: /* bfset */ + gen_helper_bfset_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); + break; + case 0x0800: /* bftst */ + gen_helper_bfexts_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, ofs, len); + break; + default: + g_assert_not_reached(); + } + set_cc_op(s, CC_OP_LOGIC); + + if (!(ext & 0x20)) { + tcg_temp_free(tcg_ctx, len); + } + if (!(ext & 0x800)) { + tcg_temp_free(tcg_ctx, ofs); + } +} + +DISAS_INSN(bfins_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int ext = read_im16(env, s); + TCGv dst = DREG(insn, 0); + TCGv src = DREG(ext, 12); + int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; + int ofs = extract32(ext, 6, 5); /* big bit-endian */ + int pos = 32 - ofs - len; /* little bit-endian */ + TCGv tmp; + + tmp = tcg_temp_new(tcg_ctx); + + if (ext & 0x20) { + /* Variable width */ + tcg_gen_neg_i32(tcg_ctx, tmp, DREG(ext, 0)); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 31); + tcg_gen_shl_i32(tcg_ctx, QREG_CC_N, src, tmp); + } else { + /* Immediate width */ + tcg_gen_shli_i32(tcg_ctx, QREG_CC_N, src, 32 - len); + } + set_cc_op(s, CC_OP_LOGIC); + + /* Immediate width and offset */ + if ((ext & 0x820) == 0) { + /* Check for suitability for deposit. */ + if (pos >= 0) { + tcg_gen_deposit_i32(tcg_ctx, dst, dst, src, pos, len); + } else { +#ifdef _MSC_VER + uint32_t maski = 0xfffffffeU << (len - 1); +#else + uint32_t maski = -2U << (len - 1); +#endif + uint32_t roti = (ofs + len) & 31; + tcg_gen_andi_i32(tcg_ctx, tmp, src, ~maski); + tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, roti); + tcg_gen_andi_i32(tcg_ctx, dst, dst, ror32(maski, roti)); + tcg_gen_or_i32(tcg_ctx, dst, dst, tmp); + } + } else { + TCGv mask = tcg_temp_new(tcg_ctx); + TCGv rot = tcg_temp_new(tcg_ctx); + + if (ext & 0x20) { + /* Variable width */ + tcg_gen_subi_i32(tcg_ctx, rot, DREG(ext, 0), 1); + tcg_gen_andi_i32(tcg_ctx, rot, rot, 31); + tcg_gen_movi_i32(tcg_ctx, mask, -2); + tcg_gen_shl_i32(tcg_ctx, mask, mask, rot); + tcg_gen_mov_i32(tcg_ctx, rot, DREG(ext, 0)); + tcg_gen_andc_i32(tcg_ctx, tmp, src, mask); + } else { + /* Immediate width (variable offset) */ +#ifdef _MSC_VER + uint32_t maski = 0xfffffffeU << (len - 1); +#else + uint32_t maski = -2U << (len - 1); +#endif + tcg_gen_andi_i32(tcg_ctx, tmp, src, ~maski); + tcg_gen_movi_i32(tcg_ctx, mask, maski); + tcg_gen_movi_i32(tcg_ctx, rot, len & 31); + } + if (ext & 0x800) { + /* Variable offset */ + tcg_gen_add_i32(tcg_ctx, rot, rot, DREG(ext, 6)); + } else { + /* Immediate offset (variable width) */ + tcg_gen_addi_i32(tcg_ctx, rot, rot, ofs); + } + tcg_gen_andi_i32(tcg_ctx, rot, rot, 31); + tcg_gen_rotr_i32(tcg_ctx, mask, mask, rot); + tcg_gen_rotr_i32(tcg_ctx, tmp, tmp, rot); + tcg_gen_and_i32(tcg_ctx, dst, dst, mask); + tcg_gen_or_i32(tcg_ctx, dst, dst, tmp); + + tcg_temp_free(tcg_ctx, rot); + tcg_temp_free(tcg_ctx, mask); + } + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(bfins_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int ext = read_im16(env, s); + TCGv src = DREG(ext, 12); + TCGv addr, len, ofs; + + addr = gen_lea(env, s, insn, OS_UNSIZED); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + + if (ext & 0x20) { + len = DREG(ext, 0); + } else { + len = tcg_const_i32(tcg_ctx, extract32(ext, 0, 5)); + } + if (ext & 0x800) { + ofs = DREG(ext, 6); + } else { + ofs = tcg_const_i32(tcg_ctx, extract32(ext, 6, 5)); + } + + gen_helper_bfins_mem(tcg_ctx, QREG_CC_N, tcg_ctx->cpu_env, addr, src, ofs, len); + set_cc_op(s, CC_OP_LOGIC); + + if (!(ext & 0x20)) { + tcg_temp_free(tcg_ctx, len); + } + if (!(ext & 0x800)) { + tcg_temp_free(tcg_ctx, ofs); + } +} + +DISAS_INSN(ff1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = DREG(insn, 0); + gen_logic_cc(s, reg, OS_LONG); + gen_helper_ff1(tcg_ctx, reg, reg); +} + +DISAS_INSN(chk) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src, reg; + int opsize; + + switch ((insn >> 7) & 3) { + case 3: + opsize = OS_WORD; + break; + case 2: + if (m68k_feature(env, M68K_FEATURE_CHK2)) { + opsize = OS_LONG; + break; + } + /* fallthru */ + default: + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + return; + } + SRC_EA(env, src, opsize, 1, NULL); + reg = gen_extend(s, DREG(insn, 9), opsize, 1); + + gen_flush_flags(s); + gen_helper_chk(tcg_ctx, tcg_ctx->cpu_env, reg, src); +} + +DISAS_INSN(chk2) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + TCGv addr1, addr2, bound1, bound2, reg; + int opsize; + + switch ((insn >> 9) & 3) { + case 0: + opsize = OS_BYTE; + break; + case 1: + opsize = OS_WORD; + break; + case 2: + opsize = OS_LONG; + break; + default: + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + return; + } + + ext = read_im16(env, s); + if ((ext & 0x0800) == 0) { + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + return; + } + + addr1 = gen_lea(env, s, insn, OS_UNSIZED); + addr2 = tcg_temp_new(tcg_ctx); + tcg_gen_addi_i32(tcg_ctx, addr2, addr1, opsize_bytes(opsize)); + + bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s)); + tcg_temp_free(tcg_ctx, addr1); + bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s)); + tcg_temp_free(tcg_ctx, addr2); + + reg = tcg_temp_new(tcg_ctx); + if (ext & 0x8000) { + tcg_gen_mov_i32(tcg_ctx, reg, AREG(ext, 12)); + } else { + gen_ext(tcg_ctx, reg, DREG(ext, 12), opsize, 1); + } + + gen_flush_flags(s); + gen_helper_chk2(tcg_ctx, tcg_ctx->cpu_env, reg, bound1, bound2); + tcg_temp_free(tcg_ctx, reg); + tcg_temp_free(tcg_ctx, bound1); + tcg_temp_free(tcg_ctx, bound2); +} + +static void m68k_copy_line(TCGContext *tcg_ctx, TCGv dst, TCGv src, int index) +{ + TCGv addr; + TCGv_i64 t0, t1; + + addr = tcg_temp_new(tcg_ctx); + + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, addr, src, ~15); + tcg_gen_qemu_ld64(tcg_ctx, t0, addr, index); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 8); + tcg_gen_qemu_ld64(tcg_ctx, t1, addr, index); + + tcg_gen_andi_i32(tcg_ctx, addr, dst, ~15); + tcg_gen_qemu_st64(tcg_ctx, t0, addr, index); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 8); + tcg_gen_qemu_st64(tcg_ctx, t1, addr, index); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, addr); +} + +DISAS_INSN(move16_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int index = IS_USER(s); + TCGv tmp; + uint16_t ext; + + ext = read_im16(env, s); + if ((ext & (1 << 15)) == 0) { + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + } + + m68k_copy_line(tcg_ctx, AREG(ext, 12), AREG(insn, 0), index); + + /* Ax can be Ay, so save Ay before incrementing Ax */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp, AREG(ext, 12)); + tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), AREG(insn, 0), 16); + tcg_gen_addi_i32(tcg_ctx, AREG(ext, 12), tmp, 16); + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(move16_mem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int index = IS_USER(s); + TCGv reg, addr; + + reg = AREG(insn, 0); + addr = tcg_const_i32(tcg_ctx, read_im32(env, s)); + + if ((insn >> 3) & 1) { + /* MOVE16 (xxx).L, (Ay) */ + m68k_copy_line(tcg_ctx, reg, addr, index); + } else { + /* MOVE16 (Ay), (xxx).L */ + m68k_copy_line(tcg_ctx, addr, reg, index); + } + + tcg_temp_free(tcg_ctx, addr); + + if (((insn >> 3) & 2) == 0) { + /* (Ay)+ */ + tcg_gen_addi_i32(tcg_ctx, reg, reg, 16); + } +} + +DISAS_INSN(strldsr) +{ + uint16_t ext; + uint32_t addr; + + addr = s->pc - 2; + ext = read_im16(env, s); + if (ext != 0x46FC) { + gen_exception(s, addr, EXCP_ILLEGAL); + return; + } + ext = read_im16(env, s); + if (IS_USER(s) || (ext & SR_S) == 0) { + gen_exception(s, addr, EXCP_PRIVILEGE); + return; + } + gen_push(s, gen_get_sr(s)); + gen_set_sr_im(s, ext, 0); +} + +DISAS_INSN(move_from_sr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv sr; + + if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + sr = gen_get_sr(s); + DEST_EA(env, insn, OS_WORD, sr, NULL); +} + +DISAS_INSN(moves) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + uint16_t ext; + TCGv reg; + TCGv addr; + int extend; + + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + ext = read_im16(env, s); + + opsize = insn_opsize(insn); + + if (ext & 0x8000) { + /* address register */ + reg = AREG(ext, 12); + extend = 1; + } else { + /* data register */ + reg = DREG(ext, 12); + extend = 0; + } + + addr = gen_lea(env, s, insn, opsize); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + + if (ext & 0x0800) { + /* from reg to ea */ + gen_store(s, opsize, addr, reg, DFC_INDEX(s)); + } else { + /* from ea to reg */ + TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s)); + if (extend) { + gen_ext(tcg_ctx, reg, tmp, opsize, 1); + } else { + gen_partset_reg(tcg_ctx, opsize, reg, tmp); + } + tcg_temp_free(tcg_ctx, tmp); + } + switch (extract32(insn, 3, 3)) { + case 3: /* Indirect postincrement. */ + tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, + REG(insn, 0) == 7 && opsize == OS_BYTE + ? 2 + : opsize_bytes(opsize)); + break; + case 4: /* Indirect predecrememnt. */ + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); + break; + } +} + +DISAS_INSN(move_to_sr) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + gen_move_to_sr(env, s, insn, false); + gen_exit_tb(s); +} + +DISAS_INSN(move_from_usp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + tcg_gen_ld_i32(tcg_ctx, AREG(insn, 0), tcg_ctx->cpu_env, + offsetof(CPUM68KState, sp[M68K_USP])); +} + +DISAS_INSN(move_to_usp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + tcg_gen_st_i32(tcg_ctx, AREG(insn, 0), tcg_ctx->cpu_env, + offsetof(CPUM68KState, sp[M68K_USP])); +} + +DISAS_INSN(halt) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + gen_exception(s, s->pc, EXCP_HALT_INSN); +} + +DISAS_INSN(stop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + ext = read_im16(env, s); + + gen_set_sr_im(s, ext, 0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_halted, 1); + gen_exception(s, s->pc, EXCP_HLT); +} + +DISAS_INSN(rte) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + gen_exception(s, s->base.pc_next, EXCP_RTE); +} + +DISAS_INSN(cf_movec) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + TCGv reg; + + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + ext = read_im16(env, s); + + if (ext & 0x8000) { + reg = AREG(ext, 12); + } else { + reg = DREG(ext, 12); + } + gen_helper_cf_movec_to(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff), reg); + gen_exit_tb(s); +} + +DISAS_INSN(m68k_movec) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + TCGv reg; + + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + ext = read_im16(env, s); + + if (ext & 0x8000) { + reg = AREG(ext, 12); + } else { + reg = DREG(ext, 12); + } + if (insn & 1) { + gen_helper_m68k_movec_to(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff), reg); + } else { + gen_helper_m68k_movec_from(tcg_ctx, reg, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff)); + } + gen_exit_tb(s); +} + +DISAS_INSN(intouch) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + /* ICache fetch. Implement as no-op. */ +} + +DISAS_INSN(cpushl) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + /* Cache push/invalidate. Implement as no-op. */ +} + +DISAS_INSN(cpush) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + /* Cache push/invalidate. Implement as no-op. */ +} + +DISAS_INSN(cinv) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + /* Invalidate cache line. Implement as no-op. */ +} + +DISAS_INSN(pflush) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv opmode; + + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + opmode = tcg_const_i32(tcg_ctx, (insn >> 3) & 3); + gen_helper_pflush(tcg_ctx, tcg_ctx->cpu_env, AREG(insn, 0), opmode); + tcg_temp_free(tcg_ctx, opmode); +} + +DISAS_INSN(ptest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv is_read; + + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + is_read = tcg_const_i32(tcg_ctx, (insn >> 5) & 1); + gen_helper_ptest(tcg_ctx, tcg_ctx->cpu_env, AREG(insn, 0), is_read); + tcg_temp_free(tcg_ctx, is_read); +} + +DISAS_INSN(wddata) +{ + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); +} + +DISAS_INSN(wdebug) +{ + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + /* TODO: Implement wdebug. */ + cpu_abort(env_cpu(env), "WDEBUG not implemented"); +} + +DISAS_INSN(trap) +{ + gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf)); +} + +static void gen_load_fcr(DisasContext *s, TCGv res, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (reg) { + case M68K_FPIAR: + tcg_gen_movi_i32(tcg_ctx, res, 0); + break; + case M68K_FPSR: + tcg_gen_ld_i32(tcg_ctx, res, tcg_ctx->cpu_env, offsetof(CPUM68KState, fpsr)); + break; + case M68K_FPCR: + tcg_gen_ld_i32(tcg_ctx, res, tcg_ctx->cpu_env, offsetof(CPUM68KState, fpcr)); + break; + } +} + +static void gen_store_fcr(DisasContext *s, TCGv val, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (reg) { + case M68K_FPIAR: + break; + case M68K_FPSR: + tcg_gen_st_i32(tcg_ctx, val, tcg_ctx->cpu_env, offsetof(CPUM68KState, fpsr)); + break; + case M68K_FPCR: + gen_helper_set_fpcr(tcg_ctx, tcg_ctx->cpu_env, val); + break; + } +} + +static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int index = IS_USER(s); + TCGv tmp; + + tmp = tcg_temp_new(tcg_ctx); + gen_load_fcr(s, tmp, reg); + tcg_gen_qemu_st32(tcg_ctx, tmp, addr, index); + tcg_temp_free(tcg_ctx, tmp); +} + +static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int index = IS_USER(s); + TCGv tmp; + + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_qemu_ld32u(tcg_ctx, tmp, addr, index); + gen_store_fcr(s, tmp, reg); + tcg_temp_free(tcg_ctx, tmp); +} + + +static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, + uint32_t insn, uint32_t ext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int mask = (ext >> 10) & 7; + int is_write = (ext >> 13) & 1; + int mode = extract32(insn, 3, 3); + int i; + TCGv addr, tmp; + + switch (mode) { + case 0: /* Dn */ + if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) { + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + return; + } + if (is_write) { + gen_load_fcr(s, DREG(insn, 0), mask); + } else { + gen_store_fcr(s, DREG(insn, 0), mask); + } + return; + case 1: /* An, only with FPIAR */ + if (mask != M68K_FPIAR) { + gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); + return; + } + if (is_write) { + gen_load_fcr(s, AREG(insn, 0), mask); + } else { + gen_store_fcr(s, AREG(insn, 0), mask); + } + return; + default: + break; + } + + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + + addr = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, addr, tmp); + + /* + * mask: + * + * 0b100 Floating-Point Control Register + * 0b010 Floating-Point Status Register + * 0b001 Floating-Point Instruction Address Register + * + */ + + if (is_write && mode == 4) { + for (i = 2; i >= 0; i--, mask >>= 1) { + if (mask & 1) { + gen_qemu_store_fcr(s, addr, 1 << i); + if (mask != 1) { + tcg_gen_subi_i32(tcg_ctx, addr, addr, opsize_bytes(OS_LONG)); + } + } + } + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); + } else { + for (i = 0; i < 3; i++, mask >>= 1) { + if (mask & 1) { + if (is_write) { + gen_qemu_store_fcr(s, addr, 1 << i); + } else { + gen_qemu_load_fcr(s, addr, 1 << i); + } + if (mask != 1 || mode == 3) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, opsize_bytes(OS_LONG)); + } + } + } + if (mode == 3) { + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); + } + } + tcg_temp_free_i32(tcg_ctx, addr); +} + +static void gen_op_fmovem(CPUM68KState *env, DisasContext *s, + uint32_t insn, uint32_t ext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv addr, tmp; + int mode = (ext >> 11) & 0x3; + int is_load = ((ext & 0x2000) == 0); + + if (m68k_feature(s->env, M68K_FEATURE_FPU)) { + opsize = OS_EXTENDED; + } else { + opsize = OS_DOUBLE; /* FIXME */ + } + + addr = gen_lea(env, s, insn, opsize); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + + tmp = tcg_temp_new(tcg_ctx); + if (mode & 0x1) { + /* Dynamic register list */ + tcg_gen_ext8u_i32(tcg_ctx, tmp, DREG(ext, 4)); + } else { + /* Static register list */ + tcg_gen_movi_i32(tcg_ctx, tmp, ext & 0xff); + } + + if (!is_load && (mode & 2) == 0) { + /* + * predecrement addressing mode + * only available to store register to memory + */ + if (opsize == OS_EXTENDED) { + gen_helper_fmovemx_st_predec(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); + } else { + gen_helper_fmovemd_st_predec(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); + } + } else { + /* postincrement addressing mode */ + if (opsize == OS_EXTENDED) { + if (is_load) { + gen_helper_fmovemx_ld_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); + } else { + gen_helper_fmovemx_st_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); + } + } else { + if (is_load) { + gen_helper_fmovemd_ld_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); + } else { + gen_helper_fmovemd_st_postinc(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, tmp); + } + } + } + if ((insn & 070) == 030 || (insn & 070) == 040) { + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp); + } + tcg_temp_free(tcg_ctx, tmp); +} + +/* + * ??? FP exceptions are not implemented. Most exceptions are deferred until + * immediately before the next FP instruction is executed. + */ +DISAS_INSN(fpu) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + int opmode; + int opsize; + TCGv_ptr cpu_src, cpu_dest; + + ext = read_im16(env, s); + opmode = ext & 0x7f; + switch ((ext >> 13) & 7) { + case 0: + break; + case 1: + goto undef; + case 2: + if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) { + /* fmovecr */ + TCGv rom_offset = tcg_const_i32(tcg_ctx, opmode); + cpu_dest = gen_fp_ptr(tcg_ctx, REG(ext, 7)); + gen_helper_fconst(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, rom_offset); + tcg_temp_free_ptr(tcg_ctx, cpu_dest); + tcg_temp_free(tcg_ctx, rom_offset); + return; + } + break; + case 3: /* fmove out */ + cpu_src = gen_fp_ptr(tcg_ctx, REG(ext, 7)); + opsize = ext_opsize(ext, 10); + if (gen_ea_fp(env, s, insn, opsize, cpu_src, + EA_STORE, IS_USER(s)) == -1) { + gen_addr_fault(s); + } + gen_helper_ftst(tcg_ctx, tcg_ctx->cpu_env, cpu_src); + tcg_temp_free_ptr(tcg_ctx, cpu_src); + return; + case 4: /* fmove to control register. */ + case 5: /* fmove from control register. */ + gen_op_fmove_fcr(env, s, insn, ext); + return; + case 6: /* fmovem */ + case 7: + if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) { + goto undef; + } + gen_op_fmovem(env, s, insn, ext); + return; + } + if (ext & (1 << 14)) { + /* Source effective address. */ + opsize = ext_opsize(ext, 10); + cpu_src = gen_fp_result_ptr(tcg_ctx); + if (gen_ea_fp(env, s, insn, opsize, cpu_src, + EA_LOADS, IS_USER(s)) == -1) { + gen_addr_fault(s); + return; + } + } else { + /* Source register. */ + opsize = OS_EXTENDED; + cpu_src = gen_fp_ptr(tcg_ctx, REG(ext, 10)); + } + cpu_dest = gen_fp_ptr(tcg_ctx, REG(ext, 7)); + switch (opmode) { + case 0: /* fmove */ + gen_fp_move(tcg_ctx, cpu_dest, cpu_src); + break; + case 0x40: /* fsmove */ + gen_helper_fsround(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x44: /* fdmove */ + gen_helper_fdround(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 1: /* fint */ + gen_helper_firound(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 2: /* fsinh */ + gen_helper_fsinh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 3: /* fintrz */ + gen_helper_fitrunc(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 4: /* fsqrt */ + gen_helper_fsqrt(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x41: /* fssqrt */ + gen_helper_fssqrt(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x45: /* fdsqrt */ + gen_helper_fdsqrt(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x06: /* flognp1 */ + gen_helper_flognp1(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x09: /* ftanh */ + gen_helper_ftanh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x0a: /* fatan */ + gen_helper_fatan(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x0c: /* fasin */ + gen_helper_fasin(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x0d: /* fatanh */ + gen_helper_fatanh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x0e: /* fsin */ + gen_helper_fsin(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x0f: /* ftan */ + gen_helper_ftan(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x10: /* fetox */ + gen_helper_fetox(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x11: /* ftwotox */ + gen_helper_ftwotox(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x12: /* ftentox */ + gen_helper_ftentox(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x14: /* flogn */ + gen_helper_flogn(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x15: /* flog10 */ + gen_helper_flog10(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x16: /* flog2 */ + gen_helper_flog2(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x18: /* fabs */ + gen_helper_fabs(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x58: /* fsabs */ + gen_helper_fsabs(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x5c: /* fdabs */ + gen_helper_fdabs(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x19: /* fcosh */ + gen_helper_fcosh(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x1a: /* fneg */ + gen_helper_fneg(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x5a: /* fsneg */ + gen_helper_fsneg(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x5e: /* fdneg */ + gen_helper_fdneg(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x1c: /* facos */ + gen_helper_facos(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x1d: /* fcos */ + gen_helper_fcos(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x1e: /* fgetexp */ + gen_helper_fgetexp(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x1f: /* fgetman */ + gen_helper_fgetman(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src); + break; + case 0x20: /* fdiv */ + gen_helper_fdiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x60: /* fsdiv */ + gen_helper_fsdiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x64: /* fddiv */ + gen_helper_fddiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x21: /* fmod */ + gen_helper_fmod(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x22: /* fadd */ + gen_helper_fadd(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x62: /* fsadd */ + gen_helper_fsadd(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x66: /* fdadd */ + gen_helper_fdadd(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x23: /* fmul */ + gen_helper_fmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x63: /* fsmul */ + gen_helper_fsmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x67: /* fdmul */ + gen_helper_fdmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x24: /* fsgldiv */ + gen_helper_fsgldiv(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x25: /* frem */ + gen_helper_frem(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x26: /* fscale */ + gen_helper_fscale(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x27: /* fsglmul */ + gen_helper_fsglmul(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x28: /* fsub */ + gen_helper_fsub(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x68: /* fssub */ + gen_helper_fssub(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x6c: /* fdsub */ + gen_helper_fdsub(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_src, cpu_dest); + break; + case 0x30: case 0x31: case 0x32: + case 0x33: case 0x34: case 0x35: + case 0x36: case 0x37: { + TCGv_ptr cpu_dest2 = gen_fp_ptr(tcg_ctx, REG(ext, 0)); + gen_helper_fsincos(tcg_ctx, tcg_ctx->cpu_env, cpu_dest, cpu_dest2, cpu_src); + tcg_temp_free_ptr(tcg_ctx, cpu_dest2); + } + break; + case 0x38: /* fcmp */ + gen_helper_fcmp(tcg_ctx, tcg_ctx->cpu_env, cpu_src, cpu_dest); + return; + case 0x3a: /* ftst */ + gen_helper_ftst(tcg_ctx, tcg_ctx->cpu_env, cpu_src); + return; + default: + goto undef; + } + tcg_temp_free_ptr(tcg_ctx, cpu_src); + gen_helper_ftst(tcg_ctx, tcg_ctx->cpu_env, cpu_dest); + tcg_temp_free_ptr(tcg_ctx, cpu_dest); + return; +undef: + /* FIXME: Is this right for offset addressing modes? */ + s->pc -= 2; + disas_undef_fpu(env, s, insn); +} + +static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv fpsr; + + c->g1 = 1; + c->v2 = tcg_const_i32(tcg_ctx, 0); + c->g2 = 0; + /* TODO: Raise BSUN exception. */ + fpsr = tcg_temp_new(tcg_ctx); + gen_load_fcr(s, fpsr, M68K_FPSR); + switch (cond) { + case 0: /* False */ + case 16: /* Signaling False */ + c->v1 = c->v2; + c->tcond = TCG_COND_NEVER; + break; + case 1: /* EQual Z */ + case 17: /* Signaling EQual Z */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); + c->tcond = TCG_COND_NE; + break; + case 2: /* Ordered Greater Than !(A || Z || N) */ + case 18: /* Greater Than !(A || Z || N) */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, + FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); + c->tcond = TCG_COND_EQ; + break; + case 3: /* Ordered Greater than or Equal Z || !(A || N) */ + case 19: /* Greater than or Equal Z || !(A || N) */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); + tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); + tcg_gen_andi_i32(tcg_ctx, fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N); + tcg_gen_or_i32(tcg_ctx, c->v1, c->v1, fpsr); + tcg_gen_xori_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N); + c->tcond = TCG_COND_NE; + break; + case 4: /* Ordered Less Than !(!N || A || Z); */ + case 20: /* Less Than !(!N || A || Z); */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_xori_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_N); + tcg_gen_andi_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z); + c->tcond = TCG_COND_EQ; + break; + case 5: /* Ordered Less than or Equal Z || (N && !A) */ + case 21: /* Less than or Equal Z || (N && !A) */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); + tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); + tcg_gen_andc_i32(tcg_ctx, c->v1, fpsr, c->v1); + tcg_gen_andi_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N); + c->tcond = TCG_COND_NE; + break; + case 6: /* Ordered Greater or Less than !(A || Z) */ + case 22: /* Greater or Less than !(A || Z) */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); + c->tcond = TCG_COND_EQ; + break; + case 7: /* Ordered !A */ + case 23: /* Greater, Less or Equal !A */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); + c->tcond = TCG_COND_EQ; + break; + case 8: /* Unordered A */ + case 24: /* Not Greater, Less or Equal A */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A); + c->tcond = TCG_COND_NE; + break; + case 9: /* Unordered or Equal A || Z */ + case 25: /* Not Greater or Less then A || Z */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); + c->tcond = TCG_COND_NE; + break; + case 10: /* Unordered or Greater Than A || !(N || Z)) */ + case 26: /* Not Less or Equal A || !(N || Z)) */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); + tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); + tcg_gen_andi_i32(tcg_ctx, fpsr, fpsr, FPSR_CC_A | FPSR_CC_N); + tcg_gen_or_i32(tcg_ctx, c->v1, c->v1, fpsr); + tcg_gen_xori_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N); + c->tcond = TCG_COND_NE; + break; + case 11: /* Unordered or Greater or Equal A || Z || !N */ + case 27: /* Not Less Than A || Z || !N */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); + tcg_gen_xori_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_N); + c->tcond = TCG_COND_NE; + break; + case 12: /* Unordered or Less Than A || (N && !Z) */ + case 28: /* Not Greater than or Equal A || (N && !Z) */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); + tcg_gen_shli_i32(tcg_ctx, c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); + tcg_gen_andc_i32(tcg_ctx, c->v1, fpsr, c->v1); + tcg_gen_andi_i32(tcg_ctx, c->v1, c->v1, FPSR_CC_A | FPSR_CC_N); + c->tcond = TCG_COND_NE; + break; + case 13: /* Unordered or Less or Equal A || Z || N */ + case 29: /* Not Greater Than A || Z || N */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); + c->tcond = TCG_COND_NE; + break; + case 14: /* Not Equal !Z */ + case 30: /* Signaling Not Equal !Z */ + c->v1 = tcg_temp_new(tcg_ctx); + c->g1 = 0; + tcg_gen_andi_i32(tcg_ctx, c->v1, fpsr, FPSR_CC_Z); + c->tcond = TCG_COND_EQ; + break; + case 15: /* True */ + case 31: /* Signaling True */ + c->v1 = c->v2; + c->tcond = TCG_COND_ALWAYS; + break; + } + tcg_temp_free(tcg_ctx, fpsr); +} + +static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + DisasCompare c; + + gen_fcc_cond(&c, s, cond); + update_cc_op(s); + tcg_gen_brcond_i32(tcg_ctx, c.tcond, c.v1, c.v2, l1); + free_cond(tcg_ctx, &c); +} + +DISAS_INSN(fbcc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + uint32_t base; + TCGLabel *l1; + + base = s->pc; + offset = (int16_t)read_im16(env, s); + if (insn & (1 << 6)) { + offset = (offset << 16) | read_im16(env, s); + } + + l1 = gen_new_label(tcg_ctx); + update_cc_op(s); + gen_fjmpcc(s, insn & 0x3f, l1); + gen_jmp_tb(s, 0, s->pc); + gen_set_label(tcg_ctx, l1); + gen_jmp_tb(s, 1, base + offset); +} + +DISAS_INSN(fscc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + DisasCompare c; + int cond; + TCGv tmp; + uint16_t ext; + + ext = read_im16(env, s); + cond = ext & 0x3f; + gen_fcc_cond(&c, s, cond); + + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_setcond_i32(tcg_ctx, c.tcond, tmp, c.v1, c.v2); + free_cond(tcg_ctx, &c); + + tcg_gen_neg_i32(tcg_ctx, tmp, tmp); + DEST_EA(env, insn, OS_BYTE, tmp, NULL); + tcg_temp_free(tcg_ctx, tmp); +} + +DISAS_INSN(frestore) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr; + + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + if (m68k_feature(s->env, M68K_FEATURE_M68040)) { + SRC_EA(env, addr, OS_LONG, 0, NULL); + /* FIXME: check the state frame */ + } else { + disas_undef(env, s, insn); + } +} + +DISAS_INSN(fsave) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (IS_USER(s)) { + gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); + return; + } + + if (m68k_feature(s->env, M68K_FEATURE_M68040)) { + /* always write IDLE */ + TCGv idle = tcg_const_i32(tcg_ctx, 0x41000000); + DEST_EA(env, insn, OS_LONG, idle, NULL); + tcg_temp_free(tcg_ctx, idle); + } else { + disas_undef(env, s, insn); + } +} + +static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp = tcg_temp_new(tcg_ctx); + if (s->env->macsr & MACSR_FI) { + if (upper) + tcg_gen_andi_i32(tcg_ctx, tmp, val, 0xffff0000); + else + tcg_gen_shli_i32(tcg_ctx, tmp, val, 16); + } else if (s->env->macsr & MACSR_SU) { + if (upper) + tcg_gen_sari_i32(tcg_ctx, tmp, val, 16); + else + tcg_gen_ext16s_i32(tcg_ctx, tmp, val); + } else { + if (upper) + tcg_gen_shri_i32(tcg_ctx, tmp, val, 16); + else + tcg_gen_ext16u_i32(tcg_ctx, tmp, val); + } + return tmp; +} + +static void gen_mac_clear_flags(TCGContext *tcg_ctx) +{ + tcg_gen_andi_i32(tcg_ctx, QREG_MACSR, QREG_MACSR, + ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV)); +} + +DISAS_INSN(mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv rx; + TCGv ry; + uint16_t ext; + int acc; + TCGv tmp; + TCGv addr; + TCGv loadval; + int dual; + TCGv saved_flags; + + if (!s->done_mac) { + s->mactmp = tcg_temp_new_i64(tcg_ctx); + s->done_mac = 1; + } + + ext = read_im16(env, s); + + acc = ((insn >> 7) & 1) | ((ext >> 3) & 2); + dual = ((insn & 0x30) != 0 && (ext & 3) != 0); + if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) { + disas_undef(env, s, insn); + return; + } + if (insn & 0x30) { + /* MAC with load. */ + tmp = gen_lea(env, s, insn, OS_LONG); + addr = tcg_temp_new(tcg_ctx); + tcg_gen_and_i32(tcg_ctx, addr, tmp, QREG_MAC_MASK); + /* + * Load the value now to ensure correct exception behavior. + * Perform writeback after reading the MAC inputs. + */ + loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s)); + + acc ^= 1; + rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12); + ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0); + } else { + loadval = addr = tcg_ctx->NULL_QREG; + rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); + ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + } + + gen_mac_clear_flags(tcg_ctx); +#if 0 + l1 = -1; + /* Disabled because conditional branches clobber temporary vars. */ + if ((s->env->macsr & MACSR_OMC) != 0 && !dual) { + /* Skip the multiply if we know we will ignore it. */ + l1 = gen_new_label(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, QREG_MACSR, 1 << (acc + 8)); + gen_op_jmp_nz32(tmp, l1); + } +#endif + + if ((ext & 0x0800) == 0) { + /* Word. */ + rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0); + ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0); + } + if (s->env->macsr & MACSR_FI) { + gen_helper_macmulf(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); + } else { + if (s->env->macsr & MACSR_SU) + gen_helper_macmuls(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); + else + gen_helper_macmulu(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); + switch ((ext >> 9) & 3) { + case 1: + tcg_gen_shli_i64(tcg_ctx, s->mactmp, s->mactmp, 1); + break; + case 3: + tcg_gen_shri_i64(tcg_ctx, s->mactmp, s->mactmp, 1); + break; + } + } + + if (dual) { + /* Save the overflow flag from the multiply. */ + saved_flags = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, saved_flags, QREG_MACSR); + } else { + saved_flags = tcg_ctx->NULL_QREG; + } + +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if ((s->env->macsr & MACSR_OMC) != 0 && dual) { + /* Skip the accumulate if the value is already saturated. */ + l1 = gen_new_label(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); + gen_op_jmp_nz32(tmp, l1); + } +#endif + + if (insn & 0x100) + tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + else + tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + + if (s->env->macsr & MACSR_FI) + gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else if (s->env->macsr & MACSR_SU) + gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else + gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if (l1 != -1) + gen_set_label(tcg_ctx, l1); +#endif + + if (dual) { + /* Dual accumulate variant. */ + acc = (ext >> 2) & 3; + /* Restore the overflow flag from the multiplier. */ + tcg_gen_mov_i32(tcg_ctx, QREG_MACSR, saved_flags); +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if ((s->env->macsr & MACSR_OMC) != 0) { + /* Skip the accumulate if the value is already saturated. */ + l1 = gen_new_label(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); + gen_op_jmp_nz32(tmp, l1); + } +#endif + if (ext & 2) + tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + else + tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + if (s->env->macsr & MACSR_FI) + gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else if (s->env->macsr & MACSR_SU) + gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else + gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if (l1 != -1) + gen_set_label(tcg_ctx, l1); +#endif + } + gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + + if (insn & 0x30) { + TCGv rw; + rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); + tcg_gen_mov_i32(tcg_ctx, rw, loadval); + /* + * FIXME: Should address writeback happen with the masked or + * unmasked value? + */ + switch ((insn >> 3) & 7) { + case 3: /* Post-increment. */ + tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, 4); + break; + case 4: /* Pre-decrement. */ + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); + } + tcg_temp_free(tcg_ctx, loadval); + } +} + +DISAS_INSN(from_mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv rx; + TCGv_i64 acc; + int accnum; + + rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + accnum = (insn >> 9) & 3; + acc = MACREG(accnum); + if (s->env->macsr & MACSR_FI) { + gen_helper_get_macf(tcg_ctx, rx, tcg_ctx->cpu_env, acc); + } else if ((s->env->macsr & MACSR_OMC) == 0) { + tcg_gen_extrl_i64_i32(tcg_ctx, rx, acc); + } else if (s->env->macsr & MACSR_SU) { + gen_helper_get_macs(tcg_ctx, rx, acc); + } else { + gen_helper_get_macu(tcg_ctx, rx, acc); + } + if (insn & 0x40) { + tcg_gen_movi_i64(tcg_ctx, acc, 0); + tcg_gen_andi_i32(tcg_ctx, QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); + } +} + +DISAS_INSN(move_mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* FIXME: This can be done without a helper. */ + int src; + TCGv dest; + src = insn & 3; + dest = tcg_const_i32(tcg_ctx, (insn >> 9) & 3); + gen_helper_mac_move(tcg_ctx, tcg_ctx->cpu_env, dest, tcg_const_i32(tcg_ctx, src)); + gen_mac_clear_flags(tcg_ctx); + gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, dest); +} + +DISAS_INSN(from_macsr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + + reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + tcg_gen_mov_i32(tcg_ctx, reg, QREG_MACSR); +} + +DISAS_INSN(from_mask) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + tcg_gen_mov_i32(tcg_ctx, reg, QREG_MAC_MASK); +} + +DISAS_INSN(from_mext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv acc; + reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); + if (s->env->macsr & MACSR_FI) + gen_helper_get_mac_extf(tcg_ctx, reg, tcg_ctx->cpu_env, acc); + else + gen_helper_get_mac_exti(tcg_ctx, reg, tcg_ctx->cpu_env, acc); +} + +DISAS_INSN(macsr_to_ccr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, QREG_MACSR, 0xf); + gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free(tcg_ctx, tmp); + set_cc_op(s, CC_OP_FLAGS); +} + +DISAS_INSN(to_mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 acc; + TCGv val; + int accnum; + accnum = (insn >> 9) & 3; + acc = MACREG(accnum); + SRC_EA(env, val, OS_LONG, 0, NULL); + if (s->env->macsr & MACSR_FI) { + tcg_gen_ext_i32_i64(tcg_ctx, acc, val); + tcg_gen_shli_i64(tcg_ctx, acc, acc, 8); + } else if (s->env->macsr & MACSR_SU) { + tcg_gen_ext_i32_i64(tcg_ctx, acc, val); + } else { + tcg_gen_extu_i32_i64(tcg_ctx, acc, val); + } + tcg_gen_andi_i32(tcg_ctx, QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); + gen_mac_clear_flags(tcg_ctx); + gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, accnum)); +} + +DISAS_INSN(to_macsr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv val; + SRC_EA(env, val, OS_LONG, 0, NULL); + gen_helper_set_macsr(tcg_ctx, tcg_ctx->cpu_env, val); + gen_exit_tb(s); +} + +DISAS_INSN(to_mask) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv val; + SRC_EA(env, val, OS_LONG, 0, NULL); + tcg_gen_ori_i32(tcg_ctx, QREG_MAC_MASK, val, 0xffff0000); +} + +DISAS_INSN(to_mext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv val; + TCGv acc; + SRC_EA(env, val, OS_LONG, 0, NULL); + acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); + if (s->env->macsr & MACSR_FI) + gen_helper_set_mac_extf(tcg_ctx, tcg_ctx->cpu_env, val, acc); + else if (s->env->macsr & MACSR_SU) + gen_helper_set_mac_exts(tcg_ctx, tcg_ctx->cpu_env, val, acc); + else + gen_helper_set_mac_extu(tcg_ctx, tcg_ctx->cpu_env, val, acc); +} + +static disas_proc opcode_table[65536]; + +static void +register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask) +{ + int i; + int from; + int to; + + /* Sanity check. All set bits must be included in the mask. */ + if (opcode & ~mask) { + fprintf(stderr, + "qemu internal error: bogus opcode definition %04x/%04x\n", + opcode, mask); + abort(); + } + /* + * This could probably be cleverer. For now just optimize the case where + * the top bits are known. + */ + /* Find the first zero bit in the mask. */ + i = 0x8000; + while ((i & mask) != 0) + i >>= 1; + /* Iterate over all combinations of this and lower bits. */ + if (i == 0) + i = 1; + else + i <<= 1; + from = opcode & ~(i - 1); + to = from + i; + for (i = from; i < to; i++) { + if ((i & mask) == opcode) + opcode_table[i] = proc; + } +} + +/* + * Register m68k opcode handlers. Order is important. + * Later insn override earlier ones. + */ +void register_m68k_insns (CPUM68KState *env) +{ + /* + * Build the opcode table only once to avoid + * multithreading issues. + */ + if (opcode_table[0] != NULL) { + return; + } + + /* + * use BASE() for instruction available + * for CF_ISA_A and M68000. + */ +#define BASE(name, opcode, mask) \ + register_opcode(disas_##name, 0x##opcode, 0x##mask) +#define INSN(name, opcode, mask, feature) do { \ + if (m68k_feature(env, M68K_FEATURE_##feature)) \ + BASE(name, opcode, mask); \ + } while(0) + BASE(undef, 0000, 0000); + INSN(arith_im, 0080, fff8, CF_ISA_A); + INSN(arith_im, 0000, ff00, M68000); + INSN(chk2, 00c0, f9c0, CHK2); + INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC); + BASE(bitop_reg, 0100, f1c0); + BASE(bitop_reg, 0140, f1c0); + BASE(bitop_reg, 0180, f1c0); + BASE(bitop_reg, 01c0, f1c0); + INSN(movep, 0108, f138, MOVEP); + INSN(arith_im, 0280, fff8, CF_ISA_A); + INSN(arith_im, 0200, ff00, M68000); + INSN(undef, 02c0, ffc0, M68000); + INSN(byterev, 02c0, fff8, CF_ISA_APLUSC); + INSN(arith_im, 0480, fff8, CF_ISA_A); + INSN(arith_im, 0400, ff00, M68000); + INSN(undef, 04c0, ffc0, M68000); + INSN(arith_im, 0600, ff00, M68000); + INSN(undef, 06c0, ffc0, M68000); + INSN(ff1, 04c0, fff8, CF_ISA_APLUSC); + INSN(arith_im, 0680, fff8, CF_ISA_A); + INSN(arith_im, 0c00, ff38, CF_ISA_A); + INSN(arith_im, 0c00, ff00, M68000); + BASE(bitop_im, 0800, ffc0); + BASE(bitop_im, 0840, ffc0); + BASE(bitop_im, 0880, ffc0); + BASE(bitop_im, 08c0, ffc0); + INSN(arith_im, 0a80, fff8, CF_ISA_A); + INSN(arith_im, 0a00, ff00, M68000); + INSN(moves, 0e00, ff00, M68000); + INSN(cas, 0ac0, ffc0, CAS); + INSN(cas, 0cc0, ffc0, CAS); + INSN(cas, 0ec0, ffc0, CAS); + INSN(cas2w, 0cfc, ffff, CAS); + INSN(cas2l, 0efc, ffff, CAS); + BASE(move, 1000, f000); + BASE(move, 2000, f000); + BASE(move, 3000, f000); + INSN(chk, 4000, f040, M68000); + INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC); + INSN(negx, 4080, fff8, CF_ISA_A); + INSN(negx, 4000, ff00, M68000); + INSN(undef, 40c0, ffc0, M68000); + INSN(move_from_sr, 40c0, fff8, CF_ISA_A); + INSN(move_from_sr, 40c0, ffc0, M68000); + BASE(lea, 41c0, f1c0); + BASE(clr, 4200, ff00); + BASE(undef, 42c0, ffc0); + INSN(move_from_ccr, 42c0, fff8, CF_ISA_A); + INSN(move_from_ccr, 42c0, ffc0, M68000); + INSN(neg, 4480, fff8, CF_ISA_A); + INSN(neg, 4400, ff00, M68000); + INSN(undef, 44c0, ffc0, M68000); + BASE(move_to_ccr, 44c0, ffc0); + INSN(not, 4680, fff8, CF_ISA_A); + INSN(not, 4600, ff00, M68000); + BASE(move_to_sr, 46c0, ffc0); + INSN(nbcd, 4800, ffc0, M68000); + INSN(linkl, 4808, fff8, M68000); + BASE(pea, 4840, ffc0); + BASE(swap, 4840, fff8); + INSN(bkpt, 4848, fff8, BKPT); + INSN(movem, 48d0, fbf8, CF_ISA_A); + INSN(movem, 48e8, fbf8, CF_ISA_A); + INSN(movem, 4880, fb80, M68000); + BASE(ext, 4880, fff8); + BASE(ext, 48c0, fff8); + BASE(ext, 49c0, fff8); + BASE(tst, 4a00, ff00); + INSN(tas, 4ac0, ffc0, CF_ISA_B); + INSN(tas, 4ac0, ffc0, M68000); + INSN(halt, 4ac8, ffff, CF_ISA_A); + INSN(pulse, 4acc, ffff, CF_ISA_A); + BASE(illegal, 4afc, ffff); + INSN(mull, 4c00, ffc0, CF_ISA_A); + INSN(mull, 4c00, ffc0, LONG_MULDIV); + INSN(divl, 4c40, ffc0, CF_ISA_A); + INSN(divl, 4c40, ffc0, LONG_MULDIV); + INSN(sats, 4c80, fff8, CF_ISA_B); + BASE(trap, 4e40, fff0); + BASE(link, 4e50, fff8); + BASE(unlk, 4e58, fff8); + INSN(move_to_usp, 4e60, fff8, USP); + INSN(move_from_usp, 4e68, fff8, USP); + INSN(reset, 4e70, ffff, M68000); + BASE(stop, 4e72, ffff); + BASE(rte, 4e73, ffff); + INSN(cf_movec, 4e7b, ffff, CF_ISA_A); + INSN(m68k_movec, 4e7a, fffe, M68000); + BASE(nop, 4e71, ffff); + INSN(rtd, 4e74, ffff, RTD); + BASE(rts, 4e75, ffff); + BASE(jump, 4e80, ffc0); + BASE(jump, 4ec0, ffc0); + INSN(addsubq, 5000, f080, M68000); + BASE(addsubq, 5080, f0c0); + INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */ + INSN(scc, 50c0, f0c0, M68000); /* Scc.B */ + INSN(dbcc, 50c8, f0f8, M68000); + INSN(tpf, 51f8, fff8, CF_ISA_A); + + /* Branch instructions. */ + BASE(branch, 6000, f000); + /* Disable long branch instructions, then add back the ones we want. */ + BASE(undef, 60ff, f0ff); /* All long branches. */ + INSN(branch, 60ff, f0ff, CF_ISA_B); + INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */ + INSN(branch, 60ff, ffff, BRAL); + INSN(branch, 60ff, f0ff, BCCL); + + BASE(moveq, 7000, f100); + INSN(mvzs, 7100, f100, CF_ISA_B); + BASE(or, 8000, f000); + BASE(divw, 80c0, f0c0); + INSN(sbcd_reg, 8100, f1f8, M68000); + INSN(sbcd_mem, 8108, f1f8, M68000); + BASE(addsub, 9000, f000); + INSN(undef, 90c0, f0c0, CF_ISA_A); + INSN(subx_reg, 9180, f1f8, CF_ISA_A); + INSN(subx_reg, 9100, f138, M68000); + INSN(subx_mem, 9108, f138, M68000); + INSN(suba, 91c0, f1c0, CF_ISA_A); + INSN(suba, 90c0, f0c0, M68000); + + BASE(undef_mac, a000, f000); + INSN(mac, a000, f100, CF_EMAC); + INSN(from_mac, a180, f9b0, CF_EMAC); + INSN(move_mac, a110, f9fc, CF_EMAC); + INSN(from_macsr,a980, f9f0, CF_EMAC); + INSN(from_mask, ad80, fff0, CF_EMAC); + INSN(from_mext, ab80, fbf0, CF_EMAC); + INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC); + INSN(to_mac, a100, f9c0, CF_EMAC); + INSN(to_macsr, a900, ffc0, CF_EMAC); + INSN(to_mext, ab00, fbc0, CF_EMAC); + INSN(to_mask, ad00, ffc0, CF_EMAC); + + INSN(mov3q, a140, f1c0, CF_ISA_B); + INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */ + INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */ + INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */ + INSN(cmp, b080, f1c0, CF_ISA_A); + INSN(cmpa, b1c0, f1c0, CF_ISA_A); + INSN(cmp, b000, f100, M68000); + INSN(eor, b100, f100, M68000); + INSN(cmpm, b108, f138, M68000); + INSN(cmpa, b0c0, f0c0, M68000); + INSN(eor, b180, f1c0, CF_ISA_A); + BASE(and, c000, f000); + INSN(exg_dd, c140, f1f8, M68000); + INSN(exg_aa, c148, f1f8, M68000); + INSN(exg_da, c188, f1f8, M68000); + BASE(mulw, c0c0, f0c0); + INSN(abcd_reg, c100, f1f8, M68000); + INSN(abcd_mem, c108, f1f8, M68000); + BASE(addsub, d000, f000); + INSN(undef, d0c0, f0c0, CF_ISA_A); + INSN(addx_reg, d180, f1f8, CF_ISA_A); + INSN(addx_reg, d100, f138, M68000); + INSN(addx_mem, d108, f138, M68000); + INSN(adda, d1c0, f1c0, CF_ISA_A); + INSN(adda, d0c0, f0c0, M68000); + INSN(shift_im, e080, f0f0, CF_ISA_A); + INSN(shift_reg, e0a0, f0f0, CF_ISA_A); + INSN(shift8_im, e000, f0f0, M68000); + INSN(shift16_im, e040, f0f0, M68000); + INSN(shift_im, e080, f0f0, M68000); + INSN(shift8_reg, e020, f0f0, M68000); + INSN(shift16_reg, e060, f0f0, M68000); + INSN(shift_reg, e0a0, f0f0, M68000); + INSN(shift_mem, e0c0, fcc0, M68000); + INSN(rotate_im, e090, f0f0, M68000); + INSN(rotate8_im, e010, f0f0, M68000); + INSN(rotate16_im, e050, f0f0, M68000); + INSN(rotate_reg, e0b0, f0f0, M68000); + INSN(rotate8_reg, e030, f0f0, M68000); + INSN(rotate16_reg, e070, f0f0, M68000); + INSN(rotate_mem, e4c0, fcc0, M68000); + INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */ + INSN(bfext_reg, e9c0, fdf8, BITFIELD); + INSN(bfins_mem, efc0, ffc0, BITFIELD); + INSN(bfins_reg, efc0, fff8, BITFIELD); + INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */ + INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */ + INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */ + INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */ + INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */ + INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */ + INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */ + INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */ + INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */ + INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */ + BASE(undef_fpu, f000, f000); + INSN(fpu, f200, ffc0, CF_FPU); + INSN(fbcc, f280, ffc0, CF_FPU); + INSN(fpu, f200, ffc0, FPU); + INSN(fscc, f240, ffc0, FPU); + INSN(fbcc, f280, ff80, FPU); + INSN(frestore, f340, ffc0, CF_FPU); + INSN(fsave, f300, ffc0, CF_FPU); + INSN(frestore, f340, ffc0, FPU); + INSN(fsave, f300, ffc0, FPU); + INSN(intouch, f340, ffc0, CF_ISA_A); + INSN(cpushl, f428, ff38, CF_ISA_A); + INSN(cpush, f420, ff20, M68040); + INSN(cinv, f400, ff20, M68040); + INSN(pflush, f500, ffe0, M68040); + INSN(ptest, f548, ffd8, M68040); + INSN(wddata, fb00, ff00, CF_ISA_A); + INSN(wdebug, fbc0, ffc0, CF_ISA_A); + INSN(move16_mem, f600, ffe0, M68040); + INSN(move16_reg, f620, fff8, M68040); +#undef INSN +} + +static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + CPUM68KState *env = cpu->env_ptr; + + // unicorn setup + dc->uc = cpu->uc; + + dc->env = env; + dc->pc = dc->base.pc_first; + dc->cc_op = CC_OP_DYNAMIC; + dc->cc_op_synced = 1; + dc->done_mac = 0; + dc->writeback_mask = 0; + init_release_array(dc); +} + +static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) +{ +} + +static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, dc->cc_op); +} + +static bool m68k_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, + const CPUBreakpoint *bp) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + + gen_exception(dc, dc->base.pc_next, EXCP_DEBUG); + /* + * The address covered by the breakpoint must be included in + * [tb->pc, tb->pc + tb->size) in order to for it to be + * properly cleared -- thus we increment the PC here so that + * the logic setting tb->size below does the right thing. + */ + dc->base.pc_next += 2; + + return true; +} + +static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = dc->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + CPUM68KState *env = cpu->env_ptr; + uint16_t insn; + + // Unicorn: end address tells us to stop emulation + if (dc->pc == uc->addr_end) { + gen_exception(dc, dc->pc, EXCP_HLT); + return; + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, dc->pc)) { + gen_uc_tracecode(tcg_ctx, 2, UC_HOOK_CODE_IDX, uc, dc->pc); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + insn = read_im16(env, dc); + + opcode_table[insn](env, dc, insn); + do_writebacks(dc); + do_release(dc); + + dc->base.pc_next = dc->pc; + + if (dc->base.is_jmp == DISAS_NEXT) { + /* + * Stop translation when the next insn might touch a new page. + * This ensures that prefetch aborts at the right place. + * + * We cannot determine the size of the next insn without + * completely decoding it. However, the maximum insn size + * is 32 bytes, so end if we do not have that much remaining. + * This may produce several small TBs at the end of each page, + * but they will all be linked with goto_tb. + * + * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also + * smaller than MC68020's. + */ + target_ulong start_page_offset + = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK); + + if (start_page_offset >= TARGET_PAGE_SIZE - 32) { + dc->base.is_jmp = DISAS_TOO_MANY; + } + } +} + +static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + switch (dc->base.is_jmp) { + case DISAS_NORETURN: + break; + case DISAS_TOO_MANY: + update_cc_op(dc); + if (dc->base.singlestep_enabled) { + tcg_gen_movi_i32(tcg_ctx, QREG_PC, dc->pc); + gen_raise_exception(tcg_ctx, EXCP_DEBUG); + } else { + gen_jmp_tb(dc, 0, dc->pc); + } + break; + case DISAS_JUMP: + /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */ + if (dc->base.singlestep_enabled) { + gen_raise_exception(tcg_ctx, EXCP_DEBUG); + } else { + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + } + break; + case DISAS_EXIT: + /* + * We updated CC_OP and PC in gen_exit_tb, but also modified + * other state that may require returning to the main loop. + */ + if (dc->base.singlestep_enabled) { + gen_raise_exception(tcg_ctx, EXCP_DEBUG); + } else { + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } + break; + default: + g_assert_not_reached(); + } +} + +static const TranslatorOps m68k_tr_ops = { + .init_disas_context = m68k_tr_init_disas_context, + .tb_start = m68k_tr_tb_start, + .insn_start = m68k_tr_insn_start, + .breakpoint_check = m68k_tr_breakpoint_check, + .translate_insn = m68k_tr_translate_insn, + .tb_stop = m68k_tr_tb_stop, +}; + +void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) +{ + DisasContext dc; + + memset(&dc, 0, sizeof(dc)); + translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns); +} + +#if 0 +static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low) +{ + floatx80 a = { .high = high, .low = low }; + union { + float64 f64; + double d; + } u; + + u.f64 = floatx80_to_float64(a, &env->fp_status); + return u.d; +} +#endif + +void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, + target_ulong *data) +{ + int cc_op = data[1]; + env->pc = data[0]; + if (cc_op != CC_OP_DYNAMIC) { + env->cc_op = cc_op; + } +} diff --git a/qemu/target/m68k/unicorn.c b/qemu/target/m68k/unicorn.c new file mode 100644 index 00000000..1b9c5b14 --- /dev/null +++ b/qemu/target/m68k/unicorn.c @@ -0,0 +1,166 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" +#include "unicorn.h" + +M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model); + +static void m68k_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUM68KState *)uc->cpu->env_ptr)->pc = address; +} + +static void m68k_release(void* ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + M68kCPU *cpu = (M68kCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } +} + +void m68k_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + memset(env->aregs, 0, sizeof(env->aregs)); + memset(env->dregs, 0, sizeof(env->dregs)); + + env->pc = 0; +} + +static void reg_read(CPUM68KState *env, unsigned int regid, void *value) +{ + if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) + *(int32_t *)value = env->aregs[regid - UC_M68K_REG_A0]; + else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) + *(int32_t *)value = env->dregs[regid - UC_M68K_REG_D0]; + else { + switch(regid) { + default: break; + case UC_M68K_REG_PC: + *(int32_t *)value = env->pc; + break; + } + } + + return; +} + +static void reg_write(CPUM68KState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) + env->aregs[regid - UC_M68K_REG_A0] = *(uint32_t *)value; + else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) + env->dregs[regid - UC_M68K_REG_D0] = *(uint32_t *)value; + else { + switch(regid) { + default: break; + case UC_M68K_REG_PC: + env->pc = *(uint32_t *)value; + break; + } + } +} + +int m68k_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUM68KState* env = &(M68K_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int m68k_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUM68KState* env = &(M68K_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if (regid == UC_M68K_REG_PC){ + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +DEFAULT_VISIBILITY +int m68k_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +{ + CPUM68KState* env = (CPUM68KState* )ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +int m68k_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +{ + CPUM68KState* env = (CPUM68KState* )ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static int m68k_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + M68kCPU *cpu; + + cpu = cpu_m68k_init(uc, cpu_model); + if (cpu == NULL) { + return -1; + } + return 0; +} + +DEFAULT_VISIBILITY +void m68k_uc_init(struct uc_struct* uc) +{ + uc->release = m68k_release; + uc->reg_read = m68k_reg_read; + uc->reg_write = m68k_reg_write; + uc->reg_reset = m68k_reg_reset; + uc->set_pc = m68k_set_pc; + uc->cpus_init = m68k_cpus_init; + uc->cpu_context_size = offsetof(CPUM68KState, end_reset_fields); + uc_common_init(uc); +} diff --git a/qemu/target-m68k/unicorn.h b/qemu/target/m68k/unicorn.h similarity index 69% rename from qemu/target-m68k/unicorn.h rename to qemu/target/m68k/unicorn.h index 59471865..2eeeaae8 100644 --- a/qemu/target-m68k/unicorn.h +++ b/qemu/target/m68k/unicorn.h @@ -7,10 +7,10 @@ // functions to read & write registers int m68k_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); int m68k_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); +int m68k_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int m68k_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); void m68k_reg_reset(struct uc_struct *uc); void m68k_uc_init(struct uc_struct* uc); - -extern const int M68K_REGS_STORAGE_SIZE; #endif diff --git a/qemu/target-mips/TODO b/qemu/target/mips/TODO similarity index 100% rename from qemu/target-mips/TODO rename to qemu/target/mips/TODO diff --git a/qemu/target/mips/cp0_helper.c b/qemu/target/mips/cp0_helper.c new file mode 100644 index 00000000..e3600c26 --- /dev/null +++ b/qemu/target/mips/cp0_helper.c @@ -0,0 +1,1692 @@ +/* + * Helpers for emulation of CP0-related MIPS instructions. + * + * Copyright (C) 2004-2005 Jocelyn Mayer + * Copyright (C) 2020 Wave Computing, Inc. + * Copyright (C) 2020 Aleksandar Markovic + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/memop.h" +//#include "sysemu/kvm.h" + + +/* SMP helpers. */ +static bool mips_vpe_is_wfi(MIPSCPU *c) +{ + CPUState *cpu = CPU(c); + CPUMIPSState *env = &c->env; + + /* + * If the VPE is halted but otherwise active, it means it's waiting for + * an interrupt.\ + */ + return cpu->halted && mips_vpe_active(env); +} + +#if 0 +static bool mips_vp_is_wfi(MIPSCPU *c) +{ + CPUState *cpu = CPU(c); + CPUMIPSState *env = &c->env; + + return cpu->halted && mips_vp_active(env, cpu); +} +#endif + +static inline void mips_vpe_wake(MIPSCPU *c) +{ + /* + * Don't set ->halted = 0 directly, let it be done via cpu_has_work + * because there might be other conditions that state that c should + * be sleeping. + */ + cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); +} + +static inline void mips_vpe_sleep(MIPSCPU *cpu) +{ + CPUState *cs = CPU(cpu); + + /* + * The VPE was shut off, really go to bed. + * Reset any old _WAKE requests. + */ + cs->halted = 1; + cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); +} + +static inline void mips_tc_wake(MIPSCPU *cpu, int tc) +{ + CPUMIPSState *c = &cpu->env; + + /* FIXME: TC reschedule. */ + if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) { + mips_vpe_wake(cpu); + } +} + +static inline void mips_tc_sleep(MIPSCPU *cpu, int tc) +{ + CPUMIPSState *c = &cpu->env; + + /* FIXME: TC reschedule. */ + if (!mips_vpe_active(c)) { + mips_vpe_sleep(cpu); + } +} + +/** + * mips_cpu_map_tc: + * @env: CPU from which mapping is performed. + * @tc: Should point to an int with the value of the global TC index. + * + * This function will transform @tc into a local index within the + * returned #CPUMIPSState. + */ + +/* + * FIXME: This code assumes that all VPEs have the same number of TCs, + * which depends on runtime setup. Can probably be fixed by + * walking the list of CPUMIPSStates. + */ +static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc) +{ + CPUState *cs; + // int vpe_idx; + int tc_idx = *tc; + + if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) { + /* Not allowed to address other CPUs. */ + *tc = env->current_tc; + return env; + } + + cs = env_cpu(env); + // vpe_idx = tc_idx / cs->nr_threads; + *tc = tc_idx % cs->nr_threads; + return env; + +#if 0 + MIPSCPU *cpu; + CPUState *other_cs; + other_cs = qemu_get_cpu(vpe_idx); + if (other_cs == NULL) { + return env; + } + cpu = MIPS_CPU(other_cs); + return &cpu->env; +#endif +} + +/* + * The per VPE CP0_Status register shares some fields with the per TC + * CP0_TCStatus registers. These fields are wired to the same registers, + * so changes to either of them should be reflected on both registers. + * + * Also, EntryHi shares the bottom 8 bit ASID with TCStauts. + * + * These helper call synchronizes the regs for a given cpu. + */ + +/* + * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. + * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, + * int tc); + */ + +/* Called for updates to CP0_TCStatus. */ +static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, + target_ulong v) +{ + uint32_t status; + uint32_t tcu, tmx, tasid, tksu; + uint32_t mask = ((1U << CP0St_CU3) + | (1 << CP0St_CU2) + | (1 << CP0St_CU1) + | (1 << CP0St_CU0) + | (1 << CP0St_MX) + | (3 << CP0St_KSU)); + + tcu = (v >> CP0TCSt_TCU0) & 0xf; + tmx = (v >> CP0TCSt_TMX) & 0x1; + tasid = v & cpu->CP0_EntryHi_ASID_mask; + tksu = (v >> CP0TCSt_TKSU) & 0x3; + + status = tcu << CP0St_CU0; + status |= tmx << CP0St_MX; + status |= tksu << CP0St_KSU; + + cpu->CP0_Status &= ~mask; + cpu->CP0_Status |= status; + + /* Sync the TASID with EntryHi. */ + cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask; + cpu->CP0_EntryHi |= tasid; + + compute_hflags(cpu); +} + +/* Called for updates to CP0_EntryHi. */ +static void sync_c0_entryhi(CPUMIPSState *cpu, int tc) +{ + int32_t *tcst; + uint32_t asid, v = cpu->CP0_EntryHi; + + asid = v & cpu->CP0_EntryHi_ASID_mask; + + if (tc == cpu->current_tc) { + tcst = &cpu->active_tc.CP0_TCStatus; + } else { + tcst = &cpu->tcs[tc].CP0_TCStatus; + } + + *tcst &= ~cpu->CP0_EntryHi_ASID_mask; + *tcst |= asid; +} + +/* CP0 helpers */ +target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env) +{ + return env->mvp->CP0_MVPControl; +} + +target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env) +{ + return env->mvp->CP0_MVPConf0; +} + +target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env) +{ + return env->mvp->CP0_MVPConf1; +} + +target_ulong helper_mfc0_random(CPUMIPSState *env) +{ + return (int32_t)cpu_mips_get_random(env); +} + +target_ulong helper_mfc0_tcstatus(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCStatus; +} + +target_ulong helper_mftc0_tcstatus(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.CP0_TCStatus; + } else { + return other->tcs[other_tc].CP0_TCStatus; + } +} + +target_ulong helper_mfc0_tcbind(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCBind; +} + +target_ulong helper_mftc0_tcbind(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.CP0_TCBind; + } else { + return other->tcs[other_tc].CP0_TCBind; + } +} + +target_ulong helper_mfc0_tcrestart(CPUMIPSState *env) +{ + return env->active_tc.PC; +} + +target_ulong helper_mftc0_tcrestart(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.PC; + } else { + return other->tcs[other_tc].PC; + } +} + +target_ulong helper_mfc0_tchalt(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCHalt; +} + +target_ulong helper_mftc0_tchalt(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.CP0_TCHalt; + } else { + return other->tcs[other_tc].CP0_TCHalt; + } +} + +target_ulong helper_mfc0_tccontext(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCContext; +} + +target_ulong helper_mftc0_tccontext(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.CP0_TCContext; + } else { + return other->tcs[other_tc].CP0_TCContext; + } +} + +target_ulong helper_mfc0_tcschedule(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCSchedule; +} + +target_ulong helper_mftc0_tcschedule(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.CP0_TCSchedule; + } else { + return other->tcs[other_tc].CP0_TCSchedule; + } +} + +target_ulong helper_mfc0_tcschefback(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCScheFBack; +} + +target_ulong helper_mftc0_tcschefback(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.CP0_TCScheFBack; + } else { + return other->tcs[other_tc].CP0_TCScheFBack; + } +} + +target_ulong helper_mfc0_count(CPUMIPSState *env) +{ + // return (int32_t)cpu_mips_get_count(env); + return 0; +} + +target_ulong helper_mfc0_saar(CPUMIPSState *env) +{ + if ((env->CP0_SAARI & 0x3f) < 2) { + return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f]; + } + return 0; +} + +target_ulong helper_mfhc0_saar(CPUMIPSState *env) +{ + if ((env->CP0_SAARI & 0x3f) < 2) { + return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32; + } + return 0; +} + +target_ulong helper_mftc0_entryhi(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_EntryHi; +} + +target_ulong helper_mftc0_cause(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + int32_t tccause; + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + tccause = other->CP0_Cause; + } else { + tccause = other->CP0_Cause; + } + + return tccause; +} + +target_ulong helper_mftc0_status(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_Status; +} + +target_ulong helper_mfc0_lladdr(CPUMIPSState *env) +{ + return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift); +} + +target_ulong helper_mfc0_maar(CPUMIPSState *env) +{ + return (int32_t) env->CP0_MAAR[env->CP0_MAARI]; +} + +target_ulong helper_mfhc0_maar(CPUMIPSState *env) +{ + return env->CP0_MAAR[env->CP0_MAARI] >> 32; +} + +target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel) +{ + return (int32_t)env->CP0_WatchLo[sel]; +} + +target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel) +{ + return (int32_t) env->CP0_WatchHi[sel]; +} + +target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel) +{ + return env->CP0_WatchHi[sel] >> 32; +} + +target_ulong helper_mfc0_debug(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Debug; + if (env->hflags & MIPS_HFLAG_DM) { + t0 |= 1 << CP0DB_DM; + } + + return t0; +} + +target_ulong helper_mftc0_debug(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + int32_t tcstatus; + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + tcstatus = other->active_tc.CP0_Debug_tcstatus; + } else { + tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus; + } + + /* XXX: Might be wrong, check with EJTAG spec. */ + return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | + (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); +} + +#if defined(TARGET_MIPS64) +target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env) +{ + return env->active_tc.PC; +} + +target_ulong helper_dmfc0_tchalt(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCHalt; +} + +target_ulong helper_dmfc0_tccontext(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCContext; +} + +target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCSchedule; +} + +target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCScheFBack; +} + +target_ulong helper_dmfc0_lladdr(CPUMIPSState *env) +{ + return env->CP0_LLAddr >> env->CP0_LLAddr_shift; +} + +target_ulong helper_dmfc0_maar(CPUMIPSState *env) +{ + return env->CP0_MAAR[env->CP0_MAARI]; +} + +target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel) +{ + return env->CP0_WatchLo[sel]; +} + +target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel) +{ + return env->CP0_WatchHi[sel]; +} + +target_ulong helper_dmfc0_saar(CPUMIPSState *env) +{ + if ((env->CP0_SAARI & 0x3f) < 2) { + return env->CP0_SAAR[env->CP0_SAARI & 0x3f]; + } + return 0; +} +#endif /* TARGET_MIPS64 */ + +void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t index_p = env->CP0_Index & 0x80000000; + uint32_t tlb_index = arg1 & 0x7fffffff; + if (tlb_index < env->tlb->nb_tlb) { + if (env->insn_flags & ISA_MIPS32R6) { + index_p |= arg1 & 0x80000000; + } + env->CP0_Index = index_p | tlb_index; + } +} + +void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0; + uint32_t newval; + + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { + mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | + (1 << CP0MVPCo_EVP); + } + if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { + mask |= (1 << CP0MVPCo_STLB); + } + newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask); + + /* TODO: Enable/disable shared TLB, enable/disable VPEs. */ + + env->mvp->CP0_MVPControl = newval; +} + +void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask; + uint32_t newval; + + mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | + (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); + newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask); + + /* + * Yield scheduler intercept not implemented. + * Gating storage scheduler intercept not implemented. + */ + + /* TODO: Enable/disable TCs. */ + + env->CP0_VPEControl = newval; +} + +void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + uint32_t mask; + uint32_t newval; + + mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | + (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); + newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask); + + /* TODO: Enable/disable TCs. */ + + other->CP0_VPEControl = newval; +} + +target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + /* FIXME: Mask away return zero on read bits. */ + return other->CP0_VPEControl; +} + +target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_VPEConf0; +} + +void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0; + uint32_t newval; + + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) { + mask |= (0xff << CP0VPEC0_XTC); + } + mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); + } + newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask); + + /* TODO: TC exclusive handling due to ERL/EXL. */ + + env->CP0_VPEConf0 = newval; +} + +void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + uint32_t mask = 0; + uint32_t newval; + + mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); + newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask); + + /* TODO: TC exclusive handling due to ERL/EXL. */ + other->CP0_VPEConf0 = newval; +} + +void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0; + uint32_t newval; + + if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) + mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | + (0xff << CP0VPEC1_NCP1); + newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask); + + /* UDI not implemented. */ + /* CP2 not implemented. */ + + /* TODO: Handle FPU (CP1) binding. */ + + env->CP0_VPEConf1 = newval; +} + +void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1) +{ + /* Yield qualifier inputs not implemented. */ + env->CP0_YQMask = 0x00000000; +} + +void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_VPEOpt = arg1 & 0x0000ffff; +} + +#define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF) + +void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1) +{ + /* 1k pages not implemented */ + target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); + env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env)) + | (rxi << (CP0EnLo_XI - 30)); +} + +#if defined(TARGET_MIPS64) +#define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6) + +void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1) +{ + uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); + env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; +} +#endif + +void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = env->CP0_TCStatus_rw_bitmask; + uint32_t newval; + + newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask); + + env->active_tc.CP0_TCStatus = newval; + sync_c0_tcstatus(env, env->current_tc, newval); +} + +void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.CP0_TCStatus = arg1; + } else { + other->tcs[other_tc].CP0_TCStatus = arg1; + } + sync_c0_tcstatus(other, other_tc, arg1); +} + +void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = (1 << CP0TCBd_TBE); + uint32_t newval; + + if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { + mask |= (1 << CP0TCBd_CurVPE); + } + newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); + env->active_tc.CP0_TCBind = newval; +} + +void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + uint32_t mask = (1 << CP0TCBd_TBE); + uint32_t newval; + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { + mask |= (1 << CP0TCBd_CurVPE); + } + if (other_tc == other->current_tc) { + newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); + other->active_tc.CP0_TCBind = newval; + } else { + newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask); + other->tcs[other_tc].CP0_TCBind = newval; + } +} + +void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.PC = arg1; + env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); + env->CP0_LLAddr = 0; + env->lladdr = 0; + /* MIPS16 not implemented. */ +} + +void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.PC = arg1; + other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); + other->CP0_LLAddr = 0; + other->lladdr = 0; + /* MIPS16 not implemented. */ + } else { + other->tcs[other_tc].PC = arg1; + other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS); + other->CP0_LLAddr = 0; + other->lladdr = 0; + /* MIPS16 not implemented. */ + } +} + +void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) +{ + MIPSCPU *cpu = env_archcpu(env); + + env->active_tc.CP0_TCHalt = arg1 & 0x1; + + /* TODO: Halt TC / Restart (if allocated+active) TC. */ + if (env->active_tc.CP0_TCHalt & 1) { + mips_tc_sleep(cpu, env->current_tc); + } else { + mips_tc_wake(cpu, env->current_tc); + } +} + +void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + MIPSCPU *other_cpu = env_archcpu(other); + + /* TODO: Halt TC / Restart (if allocated+active) TC. */ + + if (other_tc == other->current_tc) { + other->active_tc.CP0_TCHalt = arg1; + } else { + other->tcs[other_tc].CP0_TCHalt = arg1; + } + + if (arg1 & 1) { + mips_tc_sleep(other_cpu, other_tc); + } else { + mips_tc_wake(other_cpu, other_tc); + } +} + +void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.CP0_TCContext = arg1; +} + +void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.CP0_TCContext = arg1; + } else { + other->tcs[other_tc].CP0_TCContext = arg1; + } +} + +void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.CP0_TCSchedule = arg1; +} + +void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.CP0_TCSchedule = arg1; + } else { + other->tcs[other_tc].CP0_TCSchedule = arg1; + } +} + +void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.CP0_TCScheFBack = arg1; +} + +void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.CP0_TCScheFBack = arg1; + } else { + other->tcs[other_tc].CP0_TCScheFBack = arg1; + } +} + +void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1) +{ + /* 1k pages not implemented */ + target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); + env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env)) + | (rxi << (CP0EnLo_XI - 30)); +} + +#if defined(TARGET_MIPS64) +void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1) +{ + uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); + env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; +} +#endif + +void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); +} + +void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1) +{ + int32_t old; + old = env->CP0_MemoryMapID; + env->CP0_MemoryMapID = (int32_t) arg1; + /* If the MemoryMapID changes, flush qemu's TLB. */ + if (old != env->CP0_MemoryMapID) { + cpu_mips_tlb_flush(env); + } +} + +void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask) +{ + uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1); + if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) || + (mask == 0x0000 || mask == 0x0003 || mask == 0x000F || + mask == 0x003F || mask == 0x00FF || mask == 0x03FF || + mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) { + env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1)); + } +} + +void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) +{ + update_pagemask(env, arg1, &env->CP0_PageMask); +} + +void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) +{ + /* SmartMIPS not implemented */ + /* 1k pages not implemented */ + env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) | + (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask); + compute_hflags(env); + restore_pamask(env); +} + +void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1) +{ + CPUState *cs = env_cpu(env); + + env->CP0_SegCtl0 = arg1 & CP0SC0_MASK; + tlb_flush(cs); +} + +void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1) +{ + CPUState *cs = env_cpu(env); + + env->CP0_SegCtl1 = arg1 & CP0SC1_MASK; + tlb_flush(cs); +} + +void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1) +{ + CPUState *cs = env_cpu(env); + + env->CP0_SegCtl2 = arg1 & CP0SC2_MASK; + tlb_flush(cs); +} + +void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1) +{ +#if defined(TARGET_MIPS64) + uint64_t mask = 0x3F3FFFFFFFULL; + uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL; + uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL; + + if ((env->insn_flags & ISA_MIPS32R6)) { + if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_BDI); + } + if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_GDI); + } + if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_UDI); + } + if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_MDI); + } + if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_PTI); + } + } + env->CP0_PWField = arg1 & mask; + + if ((new_ptei >= 32) || + ((env->insn_flags & ISA_MIPS32R6) && + (new_ptei == 0 || new_ptei == 1))) { + env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) | + (old_ptei << CP0PF_PTEI); + } +#else + uint32_t mask = 0x3FFFFFFF; + uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; + uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F; + + if ((env->insn_flags & ISA_MIPS32R6)) { + if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_GDW); + } + if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_UDW); + } + if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_MDW); + } + if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_PTW); + } + } + env->CP0_PWField = arg1 & mask; + + if ((new_ptew >= 32) || + ((env->insn_flags & ISA_MIPS32R6) && + (new_ptew == 0 || new_ptew == 1))) { + env->CP0_PWField = (env->CP0_PWField & ~0x3F) | + (old_ptew << CP0PF_PTEW); + } +#endif +} + +void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1) +{ +#if defined(TARGET_MIPS64) + env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL; +#else + env->CP0_PWSize = arg1 & 0x3FFFFFFF; +#endif +} + +void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) +{ + if (env->insn_flags & ISA_MIPS32R6) { + if (arg1 < env->tlb->nb_tlb) { + env->CP0_Wired = arg1; + } + } else { + env->CP0_Wired = arg1 % env->tlb->nb_tlb; + } +} + +void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1) +{ +#if defined(TARGET_MIPS64) + /* PWEn = 0. Hardware page table walking is not implemented. */ + env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F); +#else + env->CP0_PWCtl = (arg1 & 0x800000FF); +#endif +} + +void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; +} + +void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask; +} + +void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask; +} + +void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask; +} + +void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask; +} + +void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0x0000000F; + + if ((env->CP0_Config1 & (1 << CP0C1_PC)) && + (env->insn_flags & ISA_MIPS32R6)) { + mask |= (1 << 4); + } + if (env->insn_flags & ISA_MIPS32R6) { + mask |= (1 << 5); + } + if (env->CP0_Config3 & (1 << CP0C3_ULRI)) { + mask |= (1 << 29); + + if (arg1 & (1 << 29)) { + env->hflags |= MIPS_HFLAG_HWRENA_ULR; + } else { + env->hflags &= ~MIPS_HFLAG_HWRENA_ULR; + } + } + + env->CP0_HWREna = arg1 & mask; +} + +void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) +{ + //cpu_mips_store_count(env, arg1); +} + +void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t target = arg1 & 0x3f; + if (target <= 1) { + env->CP0_SAARI = target; + } +} + +void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t target = env->CP0_SAARI & 0x3f; + if (target < 2) { + env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL; + switch (target) { + case 0: + if (env->itu) { + // itc_reconfigure(env->itu); + } + break; + } + } +} + +void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t target = env->CP0_SAARI & 0x3f; + if (target < 2) { + env->CP0_SAAR[target] = + (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) | + (env->CP0_SAAR[target] & 0x00000000ffffffffULL); + switch (target) { + case 0: + if (env->itu) { + // itc_reconfigure(env->itu); + } + break; + } + } +} + +void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) +{ + target_ulong old, val, mask; + mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask; + if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { + mask |= 1 << CP0EnHi_EHINV; + } + + /* 1k pages not implemented */ +#if defined(TARGET_MIPS64) + if (env->insn_flags & ISA_MIPS32R6) { + int entryhi_r = extract64(arg1, 62, 2); + int config0_at = extract32(env->CP0_Config0, 13, 2); + bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; + if ((entryhi_r == 2) || + (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { + /* skip EntryHi.R field if new value is reserved */ + mask &= ~(0x3ull << 62); + } + } + mask &= env->SEGMask; +#endif + old = env->CP0_EntryHi; + val = (arg1 & mask) | (old & ~mask); + env->CP0_EntryHi = val; + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + sync_c0_entryhi(env, env->current_tc); + } + /* If the ASID changes, flush qemu's TLB. */ + if ((old & env->CP0_EntryHi_ASID_mask) != + (val & env->CP0_EntryHi_ASID_mask)) { + tlb_flush(env_cpu(env)); + } +} + +void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + other->CP0_EntryHi = arg1; + sync_c0_entryhi(other, other_tc); +} + +void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1) +{ + // cpu_mips_store_compare(env, arg1); +} + +void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) +{ + cpu_mips_store_status(env, arg1); + +#if 0 + uint32_t val, old; + val = env->CP0_Status; + old = env->CP0_Status; + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x", + old, old & env->CP0_Cause & CP0Ca_IP_mask, + val, val & env->CP0_Cause & CP0Ca_IP_mask, + env->CP0_Cause); + switch (cpu_mmu_index(env, false)) { + case 3: + qemu_log(", ERL\n"); + break; + case MIPS_HFLAG_UM: + qemu_log(", UM\n"); + break; + case MIPS_HFLAG_SM: + qemu_log(", SM\n"); + break; + case MIPS_HFLAG_KM: + qemu_log("\n"); + break; + default: + cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); + break; + } + } +#endif +} + +void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018; + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask); + sync_c0_status(env, other, other_tc); +} + +void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0); +} + +void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); + env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask); +} + +void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1) +{ + cpu_mips_store_cause(env, arg1); +} + +void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + cpu_mips_store_cause(other, arg1); +} + +target_ulong helper_mftc0_epc(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_EPC; +} + +target_ulong helper_mftc0_ebase(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_EBase; +} + +void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1) +{ + target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; + if (arg1 & env->CP0_EBaseWG_rw_bitmask) { + mask |= ~0x3FFFFFFF; + } + env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask); +} + +void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; + if (arg1 & env->CP0_EBaseWG_rw_bitmask) { + mask |= ~0x3FFFFFFF; + } + other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask); +} + +target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + switch (idx) { + case 0: return other->CP0_Config0; + case 1: return other->CP0_Config1; + case 2: return other->CP0_Config2; + case 3: return other->CP0_Config3; + /* 4 and 5 are reserved. */ + case 6: return other->CP0_Config6; + case 7: return other->CP0_Config7; + default: + break; + } + return 0; +} + +void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007); +} + +void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1) +{ + /* tertiary/secondary caches not implemented */ + env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF); +} + +void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1) +{ + if (env->insn_flags & ASE_MICROMIPS) { + env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) | + (arg1 & (1 << CP0C3_ISA_ON_EXC)); + } +} + +void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) | + (arg1 & env->CP0_Config4_rw_bitmask); +} + +void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) | + (arg1 & env->CP0_Config5_rw_bitmask); + env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? + 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; + compute_hflags(env); +} + +void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1) +{ + target_long mask = env->CP0_LLAddr_rw_bitmask; + arg1 = arg1 << env->CP0_LLAddr_shift; + env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask); +} + +#define MTC0_MAAR_MASK(env) \ + ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3) + +void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env); +} + +void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_MAAR[env->CP0_MAARI] = + (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) | + (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL); +} + +void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1) +{ + int index = arg1 & 0x3f; + if (index == 0x3f) { + /* + * Software may write all ones to INDEX to determine the + * maximum value supported. + */ + env->CP0_MAARI = MIPS_MAAR_MAX - 1; + } else if (index < MIPS_MAAR_MAX) { + env->CP0_MAARI = index; + } + /* + * Other than the all ones, if the value written is not supported, + * then INDEX is unchanged from its previous value. + */ +} + +void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + /* + * Watch exceptions for instructions, data loads, data stores + * not implemented. + */ + env->CP0_WatchLo[sel] = (arg1 & ~0x7); +} + +void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID); + if ((env->CP0_Config5 >> CP0C5_MI) & 1) { + mask |= 0xFFFFFFFF00000000ULL; /* MMID */ + } + env->CP0_WatchHi[sel] = arg1 & mask; + env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); +} + +void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) | + (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL); +} + +void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1) +{ + target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; + env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask); +} + +void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Framemask = arg1; /* XXX */ +} + +void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120); + if (arg1 & (1 << CP0DB_DM)) { + env->hflags |= MIPS_HFLAG_DM; + } else { + env->hflags &= ~MIPS_HFLAG_DM; + } +} + +void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + /* XXX: Might be wrong, check with EJTAG spec. */ + if (other_tc == other->current_tc) { + other->active_tc.CP0_Debug_tcstatus = val; + } else { + other->tcs[other_tc].CP0_Debug_tcstatus = val; + } + other->CP0_Debug = (other->CP0_Debug & + ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | + (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); +} + +void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Performance0 = arg1 & 0x000007ff; +} + +void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1) +{ + int32_t wst = arg1 & (1 << CP0EC_WST); + int32_t spr = arg1 & (1 << CP0EC_SPR); + int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0; + + env->CP0_ErrCtl = wst | spr | itc; + + if (itc && !wst && !spr) { + env->hflags |= MIPS_HFLAG_ITC_CACHE; + } else { + env->hflags &= ~MIPS_HFLAG_ITC_CACHE; + } +} + +void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1) +{ + if (env->hflags & MIPS_HFLAG_ITC_CACHE) { + /* + * If CACHE instruction is configured for ITC tags then make all + * CP0.TagLo bits writable. The actual write to ITC Configuration + * Tag will take care of the read-only bits. + */ + env->CP0_TagLo = arg1; + } else { + env->CP0_TagLo = arg1 & 0xFFFFFCF6; + } +} + +void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_DataLo = arg1; /* XXX */ +} + +void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_TagHi = arg1; /* XXX */ +} + +void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_DataHi = arg1; /* XXX */ +} + +/* MIPS MT functions */ +target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.gpr[sel]; + } else { + return other->tcs[other_tc].gpr[sel]; + } +} + +target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.LO[sel]; + } else { + return other->tcs[other_tc].LO[sel]; + } +} + +target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.HI[sel]; + } else { + return other->tcs[other_tc].HI[sel]; + } +} + +target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.ACX[sel]; + } else { + return other->tcs[other_tc].ACX[sel]; + } +} + +target_ulong helper_mftdsp(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + return other->active_tc.DSPControl; + } else { + return other->tcs[other_tc].DSPControl; + } +} + +void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.gpr[sel] = arg1; + } else { + other->tcs[other_tc].gpr[sel] = arg1; + } +} + +void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.LO[sel] = arg1; + } else { + other->tcs[other_tc].LO[sel] = arg1; + } +} + +void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.HI[sel] = arg1; + } else { + other->tcs[other_tc].HI[sel] = arg1; + } +} + +void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.ACX[sel] = arg1; + } else { + other->tcs[other_tc].ACX[sel] = arg1; + } +} + +void helper_mttdsp(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.DSPControl = arg1; + } else { + other->tcs[other_tc].DSPControl = arg1; + } +} + +/* MIPS MT functions */ +target_ulong helper_dmt(void) +{ + /* TODO */ + return 0; +} + +target_ulong helper_emt(void) +{ + /* TODO */ + return 0; +} + +target_ulong helper_dvpe(CPUMIPSState *env) +{ +#if 0 // FIXME + CPUState *other_cs = first_cpu; + target_ulong prev = env->mvp->CP0_MVPControl; + + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + /* Turn off all VPEs except the one executing the dvpe. */ + if (&other_cpu->env != env) { + other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); + mips_vpe_sleep(other_cpu); + } + } + return prev; +#endif + return 0; +} + +target_ulong helper_evpe(CPUMIPSState *env) +{ +#if 0 + CPUState *other_cs = first_cpu; + target_ulong prev = env->mvp->CP0_MVPControl; + + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + + if (&other_cpu->env != env + /* If the VPE is WFI, don't disturb its sleep. */ + && !mips_vpe_is_wfi(other_cpu)) { + /* Enable the VPE. */ + other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); + mips_vpe_wake(other_cpu); /* And wake it up. */ + } + } + return prev; +#endif + return 0; +} + +/* R6 Multi-threading */ +target_ulong helper_dvp(CPUMIPSState *env) +{ +#if 0 + CPUState *other_cs = first_cpu; + target_ulong prev = env->CP0_VPControl; + + if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + /* Turn off all VPs except the one executing the dvp. */ + if (&other_cpu->env != env) { + mips_vpe_sleep(other_cpu); + } + } + env->CP0_VPControl |= (1 << CP0VPCtl_DIS); + } + return prev; +#endif + + return 0; +} + +target_ulong helper_evp(CPUMIPSState *env) +{ +#if 0 + CPUState *other_cs = first_cpu; + target_ulong prev = env->CP0_VPControl; + + if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) { + /* + * If the VP is WFI, don't disturb its sleep. + * Otherwise, wake it up. + */ + mips_vpe_wake(other_cpu); + } + } + env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS); + } + return prev; +#endif + return 0; +} diff --git a/qemu/hw/mips/cputimer.c b/qemu/target/mips/cp0_timer.c similarity index 51% rename from qemu/hw/mips/cputimer.c rename to qemu/target/mips/cp0_timer.c index 71d28815..223e070b 100644 --- a/qemu/hw/mips/cputimer.c +++ b/qemu/target/mips/cp0_timer.c @@ -20,43 +20,53 @@ * THE SOFTWARE. */ -#include "hw/hw.h" -#include "hw/mips/cpudevs.h" +#include "qemu/osdep.h" +//#include "hw/irq.h" +//#include "hw/mips/cpudevs.h" #include "qemu/timer.h" +//#include "sysemu/kvm.h" +#include "internal.h" -#define TIMER_FREQ 100 * 1000 * 1000 +#define TIMER_PERIOD 10 /* 10 ns period for 100 Mhz frequency */ /* XXX: do not use a global */ -uint32_t cpu_mips_get_random (CPUMIPSState *env) +uint32_t cpu_mips_get_random(CPUMIPSState *env) { - static uint32_t lfsr = 1; + static uint32_t seed = 1; static uint32_t prev_idx = 0; uint32_t idx; + uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired; + + if (nb_rand_tlb == 1) { + return env->tlb->nb_tlb - 1; + } + /* Don't return same value twice, so get another value */ do { - lfsr = (lfsr >> 1) ^ ((0-(lfsr & 1u)) & 0xd0000001u); - idx = lfsr % (env->tlb->nb_tlb - env->CP0_Wired) + env->CP0_Wired; + /* + * Use a simple algorithm of Linear Congruential Generator + * from ISO/IEC 9899 standard. + */ + seed = 1103515245 * seed + 12345; + idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired; } while (idx == prev_idx); prev_idx = idx; return idx; } +#if 0 /* MIPS R4K timer */ static void cpu_mips_timer_update(CPUMIPSState *env) { -#if 0 uint64_t now, next; uint32_t wait; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - wait = env->CP0_Compare - env->CP0_Count - - (uint32_t)muldiv64(now, TIMER_FREQ, get_ticks_per_sec()); - next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ); + wait = env->CP0_Compare - env->CP0_Count - (uint32_t)(now / TIMER_PERIOD); + next = now + (uint64_t)wait * TIMER_PERIOD; timer_mod(env->timer, next); -#endif } -#if 0 /* Expire the timer. */ static void cpu_mips_timer_expire(CPUMIPSState *env) { @@ -64,11 +74,10 @@ static void cpu_mips_timer_expire(CPUMIPSState *env) if (env->insn_flags & ISA_MIPS32R2) { env->CP0_Cause |= 1 << CP0Ca_TI; } - //qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); + qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); } -#endif -uint32_t cpu_mips_get_count (CPUMIPSState *env) +uint32_t cpu_mips_get_count(CPUMIPSState *env) { if (env->CP0_Cause & (1 << CP0Ca_DC)) { return env->CP0_Count; @@ -76,46 +85,44 @@ uint32_t cpu_mips_get_count (CPUMIPSState *env) uint64_t now; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - //if (timer_pending(env->timer) - // && timer_expired(env->timer, now)) { - // /* The timer has already expired. */ - // cpu_mips_timer_expire(env); - //} + if (timer_pending(env->timer) + && timer_expired(env->timer, now)) { + /* The timer has already expired. */ + cpu_mips_timer_expire(env); + } - return env->CP0_Count + - (uint32_t)muldiv64(now, TIMER_FREQ, get_ticks_per_sec()); + return env->CP0_Count + (uint32_t)(now / TIMER_PERIOD); } } -void cpu_mips_store_count (CPUMIPSState *env, uint32_t count) +void cpu_mips_store_count(CPUMIPSState *env, uint32_t count) { -#if 0 /* * This gets called from cpu_state_reset(), potentially before timer init. * So env->timer may be NULL, which is also the case with KVM enabled so * treat timer as disabled in that case. */ - if (env->CP0_Cause & (1 << CP0Ca_DC) || !env->timer) + if (env->CP0_Cause & (1 << CP0Ca_DC) || !env->timer) { env->CP0_Count = count; - else { + } else { /* Store new count register */ - env->CP0_Count = - count - (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - TIMER_FREQ, get_ticks_per_sec()); + env->CP0_Count = count - + (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD); /* Update timer timer */ cpu_mips_timer_update(env); } -#endif } -void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value) +void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value) { env->CP0_Compare = value; - if (!(env->CP0_Cause & (1 << CP0Ca_DC))) + if (!(env->CP0_Cause & (1 << CP0Ca_DC))) { cpu_mips_timer_update(env); - if (env->insn_flags & ISA_MIPS32R2) + } + if (env->insn_flags & ISA_MIPS32R2) { env->CP0_Cause &= ~(1 << CP0Ca_TI); - //qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); + } + qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); } void cpu_mips_start_count(CPUMIPSState *env) @@ -126,6 +133,40 @@ void cpu_mips_start_count(CPUMIPSState *env) void cpu_mips_stop_count(CPUMIPSState *env) { /* Store the current value */ - env->CP0_Count += (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - TIMER_FREQ, get_ticks_per_sec()); + env->CP0_Count += (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / + TIMER_PERIOD); } + +static void mips_timer_cb(void *opaque) +{ + CPUMIPSState *env; + + env = opaque; + + if (env->CP0_Cause & (1 << CP0Ca_DC)) { + return; + } + + /* + * ??? This callback should occur when the counter is exactly equal to + * the comparator value. Offset the count by one to avoid immediately + * retriggering the callback before any virtual time has passed. + */ + env->CP0_Count++; + cpu_mips_timer_expire(env); + env->CP0_Count--; +} + +void cpu_mips_clock_init(MIPSCPU *cpu) +{ + CPUMIPSState *env = &cpu->env; + + /* + * If we're in KVM mode, don't create the periodic timer, that is handled in + * kernel. + */ + if (!kvm_enabled()) { + env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &mips_timer_cb, env); + } +} +#endif diff --git a/qemu/target/mips/cpu-param.h b/qemu/target/mips/cpu-param.h new file mode 100644 index 00000000..f073f379 --- /dev/null +++ b/qemu/target/mips/cpu-param.h @@ -0,0 +1,25 @@ +/* + * MIPS cpu parameters for qemu. + * + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef MIPS_CPU_PARAM_H +#define MIPS_CPU_PARAM_H 1 + +#ifdef TARGET_MIPS64 +# define TARGET_LONG_BITS 64 +#else +# define TARGET_LONG_BITS 32 +#endif +#ifdef TARGET_MIPS64 +#define TARGET_PHYS_ADDR_SPACE_BITS 48 +#define TARGET_VIRT_ADDR_SPACE_BITS 48 +#else +#define TARGET_PHYS_ADDR_SPACE_BITS 40 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 +#endif +#define TARGET_PAGE_BITS 12 +#define NB_MMU_MODES 4 + +#endif diff --git a/qemu/target-mips/cpu-qom.h b/qemu/target/mips/cpu-qom.h similarity index 52% rename from qemu/target-mips/cpu-qom.h rename to qemu/target/mips/cpu-qom.h index 89581d45..02594d31 100644 --- a/qemu/target-mips/cpu-qom.h +++ b/qemu/target/mips/cpu-qom.h @@ -20,7 +20,7 @@ #ifndef QEMU_MIPS_CPU_QOM_H #define QEMU_MIPS_CPU_QOM_H -#include "qom/cpu.h" +#include "hw/core/cpu.h" #ifdef TARGET_MIPS64 #define TYPE_MIPS_CPU "mips64-cpu" @@ -28,11 +28,9 @@ #define TYPE_MIPS_CPU "mips-cpu" #endif -#define MIPS_CPU_CLASS(uc, klass) \ - OBJECT_CLASS_CHECK(uc, MIPSCPUClass, (klass), TYPE_MIPS_CPU) -#define MIPS_CPU(uc, obj) ((MIPSCPU *)obj) -#define MIPS_CPU_GET_CLASS(uc, obj) \ - OBJECT_GET_CLASS(uc, MIPSCPUClass, (obj), TYPE_MIPS_CPU) +#define MIPS_CPU(obj) ((MIPSCPU *)obj) +#define MIPS_CPU_CLASS(klass) ((MIPSCPUClass *)klass) +#define MIPS_CPU_GET_CLASS(obj) (&((MIPSCPU *)obj)->cc) /** * MIPSCPUClass: @@ -46,39 +44,9 @@ typedef struct MIPSCPUClass { CPUClass parent_class; /*< public >*/ - DeviceRealize parent_realize; void (*parent_reset)(CPUState *cpu); } MIPSCPUClass; -/** - * MIPSCPU: - * @env: #CPUMIPSState - * - * A MIPS CPU. - */ -typedef struct MIPSCPU { - /*< private >*/ - CPUState parent_obj; - /*< public >*/ - - CPUMIPSState env; -} MIPSCPU; - -static inline MIPSCPU *mips_env_get_cpu(CPUMIPSState *env) -{ - return container_of(env, MIPSCPU, env); -} - -#define ENV_GET_CPU(e) CPU(mips_env_get_cpu(e)) - -#define ENV_OFFSET offsetof(MIPSCPU, env) - -void mips_cpu_do_interrupt(CPUState *cpu); -bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); -hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); -int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); +typedef struct MIPSCPU MIPSCPU; #endif diff --git a/qemu/target/mips/cpu.c b/qemu/target/mips/cpu.c new file mode 100644 index 00000000..3f041afc --- /dev/null +++ b/qemu/target/mips/cpu.c @@ -0,0 +1,208 @@ +/* + * QEMU MIPS CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" + +#include + +static void mips_cpu_set_pc(CPUState *cs, vaddr value) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + env->active_tc.PC = value & ~(target_ulong)1; + if (value & 1) { + env->hflags |= MIPS_HFLAG_M16; + } else { + env->hflags &= ~(MIPS_HFLAG_M16); + } +} + +static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + env->active_tc.PC = tb->pc; + env->hflags &= ~MIPS_HFLAG_BMASK; + env->hflags |= tb->flags & MIPS_HFLAG_BMASK; +} + +static bool mips_cpu_has_work(CPUState *cs) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + bool has_work = false; + + /* + * Prior to MIPS Release 6 it is implementation dependent if non-enabled + * interrupts wake-up the CPU, however most of the implementations only + * check for interrupts that can be taken. + */ + if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_mips_hw_interrupts_pending(env)) { + if (cpu_mips_hw_interrupts_enabled(env) || + (env->insn_flags & ISA_MIPS32R6)) { + has_work = true; + } + } + + /* MIPS-MT has the ability to halt the CPU. */ + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + /* + * The QEMU model will issue an _WAKE request whenever the CPUs + * should be woken up. + */ + if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { + has_work = true; + } + + if (!mips_vpe_active(env)) { + has_work = false; + } + } + + /* MIPS Release 6 has the ability to halt the CPU. */ + if (env->CP0_Config5 & (1 << CP0C5_VP)) { + if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { + has_work = true; + } + if (!mips_vp_active(env, cs)) { + has_work = false; + } + } + + return has_work; +} + +static void mips_cpu_reset(CPUState *dev) +{ + CPUState *s = CPU(dev); + MIPSCPU *cpu = MIPS_CPU(s); + MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu); + CPUMIPSState *env = &cpu->env; + + mcc->parent_reset(dev); + + memset(env, 0, offsetof(CPUMIPSState, end_reset_fields)); + + cpu_state_reset(env); +} + +static void mips_cpu_realizefn(CPUState *dev) +{ + CPUState *cs = CPU(dev); + MIPSCPU *cpu = MIPS_CPU(dev); + + cpu_exec_realizefn(cs); + + cpu_mips_realize_env(&cpu->env); + + cpu_reset(cs); +} + +static void mips_cpu_initfn(struct uc_struct *uc, CPUState *obj) +{ + MIPSCPU *cpu = MIPS_CPU(obj); + CPUMIPSState *env = &cpu->env; + + env->uc = uc; + cpu_set_cpustate_pointers(cpu); +} + +static void mips_cpu_class_init(CPUClass *c) +{ + MIPSCPUClass *mcc = MIPS_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + + /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ + mcc->parent_reset = cc->reset; + /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ + cc->reset = mips_cpu_reset; + cc->has_work = mips_cpu_has_work; + cc->do_interrupt = mips_cpu_do_interrupt; + cc->cpu_exec_interrupt = mips_cpu_exec_interrupt; + cc->set_pc = mips_cpu_set_pc; + cc->synchronize_from_tb = mips_cpu_synchronize_from_tb; + cc->do_unaligned_access = mips_cpu_do_unaligned_access; + cc->get_phys_page_debug = mips_cpu_get_phys_page_debug; + cc->tcg_initialize = mips_tcg_init; + cc->tlb_fill = mips_cpu_tlb_fill; +} + +MIPSCPU *cpu_mips_init(struct uc_struct *uc, const char *cpu_model) +{ + MIPSCPU *cpu; + CPUState *cs; + CPUClass *cc; + CPUMIPSState *env; + int i; + + if (cpu_model == NULL) { +#ifdef TARGET_MIPS64 + cpu_model = "R4000"; +#else + cpu_model = "24Kf"; +#endif + } + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = cs; + + cpu_class_init(uc, cc); + + mips_cpu_class_init(cc); + + cpu_common_initfn(uc, cs); + + mips_cpu_initfn(uc, cs); + + env = &cpu->env; + for (i = 0; i < mips_defs_number; i++) { + if (strcasecmp(cpu_model, mips_defs[i].name) == 0) { + env->cpu_model = &(mips_defs[i]); + break; + } + } + if (env->cpu_model == NULL) { + free(cpu); + return NULL; + } + + mips_cpu_realizefn(cs); + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target/mips/cpu.h b/qemu/target/mips/cpu.h new file mode 100644 index 00000000..95f6bf50 --- /dev/null +++ b/qemu/target/mips/cpu.h @@ -0,0 +1,1272 @@ +#ifndef MIPS_CPU_H +#define MIPS_CPU_H + +#include "cpu-qom.h" +#include "exec/cpu-defs.h" +#include "fpu/softfloat-types.h" +#include "mips-defs.h" + +#define TCG_GUEST_DEFAULT_MO (0) + +typedef struct CPUMIPSTLBContext CPUMIPSTLBContext; + +/* MSA Context */ +#define MSA_WRLEN (128) + +typedef union wr_t wr_t; +union wr_t { + int8_t b[MSA_WRLEN / 8]; + int16_t h[MSA_WRLEN / 16]; + int32_t w[MSA_WRLEN / 32]; + int64_t d[MSA_WRLEN / 64]; +}; + +typedef union fpr_t fpr_t; +union fpr_t { + float64 fd; /* ieee double precision */ + float32 fs[2];/* ieee single precision */ + uint64_t d; /* binary double fixed-point */ + uint32_t w[2]; /* binary single fixed-point */ +/* FPU/MSA register mapping is not tested on big-endian hosts. */ + wr_t wr; /* vector data */ +}; +/* + *define FP_ENDIAN_IDX to access the same location + * in the fpr_t union regardless of the host endianness + */ +#if defined(HOST_WORDS_BIGENDIAN) +# define FP_ENDIAN_IDX 1 +#else +# define FP_ENDIAN_IDX 0 +#endif + +typedef struct CPUMIPSFPUContext CPUMIPSFPUContext; +struct CPUMIPSFPUContext { + /* Floating point registers */ + fpr_t fpr[32]; + float_status fp_status; + /* fpu implementation/revision register (fir) */ + uint32_t fcr0; +#define FCR0_FREP 29 +#define FCR0_UFRP 28 +#define FCR0_HAS2008 23 +#define FCR0_F64 22 +#define FCR0_L 21 +#define FCR0_W 20 +#define FCR0_3D 19 +#define FCR0_PS 18 +#define FCR0_D 17 +#define FCR0_S 16 +#define FCR0_PRID 8 +#define FCR0_REV 0 + /* fcsr */ + uint32_t fcr31_rw_bitmask; + uint32_t fcr31; +#define FCR31_FS 24 +#define FCR31_ABS2008 19 +#define FCR31_NAN2008 18 +#define SET_FP_COND(num, env) do { ((env).fcr31) |= \ + ((num) ? (1 << ((num) + 24)) : \ + (1 << 23)); \ + } while (0) +#define CLEAR_FP_COND(num, env) do { ((env).fcr31) &= \ + ~((num) ? (1 << ((num) + 24)) : \ + (1 << 23)); \ + } while (0) +#define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | \ + (((env).fcr31 >> 23) & 0x1)) +#define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f) +#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f) +#define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f) +#define SET_FP_CAUSE(reg, v) do { (reg) = ((reg) & ~(0x3f << 12)) | \ + ((v & 0x3f) << 12); \ + } while (0) +#define SET_FP_ENABLE(reg, v) do { (reg) = ((reg) & ~(0x1f << 7)) | \ + ((v & 0x1f) << 7); \ + } while (0) +#define SET_FP_FLAGS(reg, v) do { (reg) = ((reg) & ~(0x1f << 2)) | \ + ((v & 0x1f) << 2); \ + } while (0) +#define UPDATE_FP_FLAGS(reg, v) do { (reg) |= ((v & 0x1f) << 2); } while (0) +#define FP_INEXACT 1 +#define FP_UNDERFLOW 2 +#define FP_OVERFLOW 4 +#define FP_DIV0 8 +#define FP_INVALID 16 +#define FP_UNIMPLEMENTED 32 +}; + +#define TARGET_INSN_START_EXTRA_WORDS 2 + +typedef struct CPUMIPSMVPContext CPUMIPSMVPContext; +struct CPUMIPSMVPContext { + int32_t CP0_MVPControl; +#define CP0MVPCo_CPA 3 +#define CP0MVPCo_STLB 2 +#define CP0MVPCo_VPC 1 +#define CP0MVPCo_EVP 0 + int32_t CP0_MVPConf0; +#define CP0MVPC0_M 31 +#define CP0MVPC0_TLBS 29 +#define CP0MVPC0_GS 28 +#define CP0MVPC0_PCP 27 +#define CP0MVPC0_PTLBE 16 +#define CP0MVPC0_TCA 15 +#define CP0MVPC0_PVPE 10 +#define CP0MVPC0_PTC 0 + int32_t CP0_MVPConf1; +#define CP0MVPC1_CIM 31 +#define CP0MVPC1_CIF 30 +#define CP0MVPC1_PCX 20 +#define CP0MVPC1_PCP2 10 +#define CP0MVPC1_PCP1 0 +}; + +typedef struct mips_def_t mips_def_t; + +#define MIPS_SHADOW_SET_MAX 16 +#define MIPS_TC_MAX 5 +#define MIPS_FPU_MAX 1 +#define MIPS_DSP_ACC 4 +#define MIPS_KSCRATCH_NUM 6 +#define MIPS_MAAR_MAX 16 /* Must be an even number. */ + + +/* + * Summary of CP0 registers + * ======================== + * + * + * Register 0 Register 1 Register 2 Register 3 + * ---------- ---------- ---------- ---------- + * + * 0 Index Random EntryLo0 EntryLo1 + * 1 MVPControl VPEControl TCStatus GlobalNumber + * 2 MVPConf0 VPEConf0 TCBind + * 3 MVPConf1 VPEConf1 TCRestart + * 4 VPControl YQMask TCHalt + * 5 VPESchedule TCContext + * 6 VPEScheFBack TCSchedule + * 7 VPEOpt TCScheFBack TCOpt + * + * + * Register 4 Register 5 Register 6 Register 7 + * ---------- ---------- ---------- ---------- + * + * 0 Context PageMask Wired HWREna + * 1 ContextConfig PageGrain SRSConf0 + * 2 UserLocal SegCtl0 SRSConf1 + * 3 XContextConfig SegCtl1 SRSConf2 + * 4 DebugContextID SegCtl2 SRSConf3 + * 5 MemoryMapID PWBase SRSConf4 + * 6 PWField PWCtl + * 7 PWSize + * + * + * Register 8 Register 9 Register 10 Register 11 + * ---------- ---------- ----------- ----------- + * + * 0 BadVAddr Count EntryHi Compare + * 1 BadInstr + * 2 BadInstrP + * 3 BadInstrX + * 4 GuestCtl1 GuestCtl0Ext + * 5 GuestCtl2 + * 6 SAARI GuestCtl3 + * 7 SAAR + * + * + * Register 12 Register 13 Register 14 Register 15 + * ----------- ----------- ----------- ----------- + * + * 0 Status Cause EPC PRId + * 1 IntCtl EBase + * 2 SRSCtl NestedEPC CDMMBase + * 3 SRSMap CMGCRBase + * 4 View_IPL View_RIPL BEVVA + * 5 SRSMap2 NestedExc + * 6 GuestCtl0 + * 7 GTOffset + * + * + * Register 16 Register 17 Register 18 Register 19 + * ----------- ----------- ----------- ----------- + * + * 0 Config LLAddr WatchLo0 WatchHi + * 1 Config1 MAAR WatchLo1 WatchHi + * 2 Config2 MAARI WatchLo2 WatchHi + * 3 Config3 WatchLo3 WatchHi + * 4 Config4 WatchLo4 WatchHi + * 5 Config5 WatchLo5 WatchHi + * 6 WatchLo6 WatchHi + * 7 WatchLo7 WatchHi + * + * + * Register 20 Register 21 Register 22 Register 23 + * ----------- ----------- ----------- ----------- + * + * 0 XContext Debug + * 1 TraceControl + * 2 TraceControl2 + * 3 UserTraceData1 + * 4 TraceIBPC + * 5 TraceDBPC + * 6 Debug2 + * 7 + * + * + * Register 24 Register 25 Register 26 Register 27 + * ----------- ----------- ----------- ----------- + * + * 0 DEPC PerfCnt ErrCtl CacheErr + * 1 PerfCnt + * 2 TraceControl3 PerfCnt + * 3 UserTraceData2 PerfCnt + * 4 PerfCnt + * 5 PerfCnt + * 6 PerfCnt + * 7 PerfCnt + * + * + * Register 28 Register 29 Register 30 Register 31 + * ----------- ----------- ----------- ----------- + * + * 0 DataLo DataHi ErrorEPC DESAVE + * 1 TagLo TagHi + * 2 DataLo1 DataHi1 KScratch + * 3 TagLo1 TagHi1 KScratch + * 4 DataLo2 DataHi2 KScratch + * 5 TagLo2 TagHi2 KScratch + * 6 DataLo3 DataHi3 KScratch + * 7 TagLo3 TagHi3 KScratch + * + */ +#define CP0_REGISTER_00 0 +#define CP0_REGISTER_01 1 +#define CP0_REGISTER_02 2 +#define CP0_REGISTER_03 3 +#define CP0_REGISTER_04 4 +#define CP0_REGISTER_05 5 +#define CP0_REGISTER_06 6 +#define CP0_REGISTER_07 7 +#define CP0_REGISTER_08 8 +#define CP0_REGISTER_09 9 +#define CP0_REGISTER_10 10 +#define CP0_REGISTER_11 11 +#define CP0_REGISTER_12 12 +#define CP0_REGISTER_13 13 +#define CP0_REGISTER_14 14 +#define CP0_REGISTER_15 15 +#define CP0_REGISTER_16 16 +#define CP0_REGISTER_17 17 +#define CP0_REGISTER_18 18 +#define CP0_REGISTER_19 19 +#define CP0_REGISTER_20 20 +#define CP0_REGISTER_21 21 +#define CP0_REGISTER_22 22 +#define CP0_REGISTER_23 23 +#define CP0_REGISTER_24 24 +#define CP0_REGISTER_25 25 +#define CP0_REGISTER_26 26 +#define CP0_REGISTER_27 27 +#define CP0_REGISTER_28 28 +#define CP0_REGISTER_29 29 +#define CP0_REGISTER_30 30 +#define CP0_REGISTER_31 31 + + +/* CP0 Register 00 */ +#define CP0_REG00__INDEX 0 +#define CP0_REG00__MVPCONTROL 1 +#define CP0_REG00__MVPCONF0 2 +#define CP0_REG00__MVPCONF1 3 +#define CP0_REG00__VPCONTROL 4 +/* CP0 Register 01 */ +#define CP0_REG01__RANDOM 0 +#define CP0_REG01__VPECONTROL 1 +#define CP0_REG01__VPECONF0 2 +#define CP0_REG01__VPECONF1 3 +#define CP0_REG01__YQMASK 4 +#define CP0_REG01__VPESCHEDULE 5 +#define CP0_REG01__VPESCHEFBACK 6 +#define CP0_REG01__VPEOPT 7 +/* CP0 Register 02 */ +#define CP0_REG02__ENTRYLO0 0 +#define CP0_REG02__TCSTATUS 1 +#define CP0_REG02__TCBIND 2 +#define CP0_REG02__TCRESTART 3 +#define CP0_REG02__TCHALT 4 +#define CP0_REG02__TCCONTEXT 5 +#define CP0_REG02__TCSCHEDULE 6 +#define CP0_REG02__TCSCHEFBACK 7 +/* CP0 Register 03 */ +#define CP0_REG03__ENTRYLO1 0 +#define CP0_REG03__GLOBALNUM 1 +#define CP0_REG03__TCOPT 7 +/* CP0 Register 04 */ +#define CP0_REG04__CONTEXT 0 +#define CP0_REG04__CONTEXTCONFIG 1 +#define CP0_REG04__USERLOCAL 2 +#define CP0_REG04__XCONTEXTCONFIG 3 +#define CP0_REG04__DBGCONTEXTID 4 +#define CP0_REG04__MMID 5 +/* CP0 Register 05 */ +#define CP0_REG05__PAGEMASK 0 +#define CP0_REG05__PAGEGRAIN 1 +#define CP0_REG05__SEGCTL0 2 +#define CP0_REG05__SEGCTL1 3 +#define CP0_REG05__SEGCTL2 4 +#define CP0_REG05__PWBASE 5 +#define CP0_REG05__PWFIELD 6 +#define CP0_REG05__PWSIZE 7 +/* CP0 Register 06 */ +#define CP0_REG06__WIRED 0 +#define CP0_REG06__SRSCONF0 1 +#define CP0_REG06__SRSCONF1 2 +#define CP0_REG06__SRSCONF2 3 +#define CP0_REG06__SRSCONF3 4 +#define CP0_REG06__SRSCONF4 5 +#define CP0_REG06__PWCTL 6 +/* CP0 Register 07 */ +#define CP0_REG07__HWRENA 0 +/* CP0 Register 08 */ +#define CP0_REG08__BADVADDR 0 +#define CP0_REG08__BADINSTR 1 +#define CP0_REG08__BADINSTRP 2 +#define CP0_REG08__BADINSTRX 3 +/* CP0 Register 09 */ +#define CP0_REG09__COUNT 0 +#define CP0_REG09__SAARI 6 +#define CP0_REG09__SAAR 7 +/* CP0 Register 10 */ +#define CP0_REG10__ENTRYHI 0 +#define CP0_REG10__GUESTCTL1 4 +#define CP0_REG10__GUESTCTL2 5 +#define CP0_REG10__GUESTCTL3 6 +/* CP0 Register 11 */ +#define CP0_REG11__COMPARE 0 +#define CP0_REG11__GUESTCTL0EXT 4 +/* CP0 Register 12 */ +#define CP0_REG12__STATUS 0 +#define CP0_REG12__INTCTL 1 +#define CP0_REG12__SRSCTL 2 +#define CP0_REG12__SRSMAP 3 +#define CP0_REG12__VIEW_IPL 4 +#define CP0_REG12__SRSMAP2 5 +#define CP0_REG12__GUESTCTL0 6 +#define CP0_REG12__GTOFFSET 7 +/* CP0 Register 13 */ +#define CP0_REG13__CAUSE 0 +#define CP0_REG13__VIEW_RIPL 4 +#define CP0_REG13__NESTEDEXC 5 +/* CP0 Register 14 */ +#define CP0_REG14__EPC 0 +#define CP0_REG14__NESTEDEPC 2 +/* CP0 Register 15 */ +#define CP0_REG15__PRID 0 +#define CP0_REG15__EBASE 1 +#define CP0_REG15__CDMMBASE 2 +#define CP0_REG15__CMGCRBASE 3 +#define CP0_REG15__BEVVA 4 +/* CP0 Register 16 */ +#define CP0_REG16__CONFIG 0 +#define CP0_REG16__CONFIG1 1 +#define CP0_REG16__CONFIG2 2 +#define CP0_REG16__CONFIG3 3 +#define CP0_REG16__CONFIG4 4 +#define CP0_REG16__CONFIG5 5 +#define CP0_REG16__CONFIG6 6 +#define CP0_REG16__CONFIG7 7 +/* CP0 Register 17 */ +#define CP0_REG17__LLADDR 0 +#define CP0_REG17__MAAR 1 +#define CP0_REG17__MAARI 2 +/* CP0 Register 18 */ +#define CP0_REG18__WATCHLO0 0 +#define CP0_REG18__WATCHLO1 1 +#define CP0_REG18__WATCHLO2 2 +#define CP0_REG18__WATCHLO3 3 +#define CP0_REG18__WATCHLO4 4 +#define CP0_REG18__WATCHLO5 5 +#define CP0_REG18__WATCHLO6 6 +#define CP0_REG18__WATCHLO7 7 +/* CP0 Register 19 */ +#define CP0_REG19__WATCHHI0 0 +#define CP0_REG19__WATCHHI1 1 +#define CP0_REG19__WATCHHI2 2 +#define CP0_REG19__WATCHHI3 3 +#define CP0_REG19__WATCHHI4 4 +#define CP0_REG19__WATCHHI5 5 +#define CP0_REG19__WATCHHI6 6 +#define CP0_REG19__WATCHHI7 7 +/* CP0 Register 20 */ +#define CP0_REG20__XCONTEXT 0 +/* CP0 Register 21 */ +/* CP0 Register 22 */ +/* CP0 Register 23 */ +#define CP0_REG23__DEBUG 0 +#define CP0_REG23__TRACECONTROL 1 +#define CP0_REG23__TRACECONTROL2 2 +#define CP0_REG23__USERTRACEDATA1 3 +#define CP0_REG23__TRACEIBPC 4 +#define CP0_REG23__TRACEDBPC 5 +#define CP0_REG23__DEBUG2 6 +/* CP0 Register 24 */ +#define CP0_REG24__DEPC 0 +/* CP0 Register 25 */ +#define CP0_REG25__PERFCTL0 0 +#define CP0_REG25__PERFCNT0 1 +#define CP0_REG25__PERFCTL1 2 +#define CP0_REG25__PERFCNT1 3 +#define CP0_REG25__PERFCTL2 4 +#define CP0_REG25__PERFCNT2 5 +#define CP0_REG25__PERFCTL3 6 +#define CP0_REG25__PERFCNT3 7 +/* CP0 Register 26 */ +#define CP0_REG26__ERRCTL 0 +/* CP0 Register 27 */ +#define CP0_REG27__CACHERR 0 +/* CP0 Register 28 */ +#define CP0_REG28__TAGLO 0 +#define CP0_REG28__DATALO 1 +#define CP0_REG28__TAGLO1 2 +#define CP0_REG28__DATALO1 3 +#define CP0_REG28__TAGLO2 4 +#define CP0_REG28__DATALO2 5 +#define CP0_REG28__TAGLO3 6 +#define CP0_REG28__DATALO3 7 +/* CP0 Register 29 */ +#define CP0_REG29__TAGHI 0 +#define CP0_REG29__DATAHI 1 +#define CP0_REG29__TAGHI1 2 +#define CP0_REG29__DATAHI1 3 +#define CP0_REG29__TAGHI2 4 +#define CP0_REG29__DATAHI2 5 +#define CP0_REG29__TAGHI3 6 +#define CP0_REG29__DATAHI3 7 +/* CP0 Register 30 */ +#define CP0_REG30__ERROREPC 0 +/* CP0 Register 31 */ +#define CP0_REG31__DESAVE 0 +#define CP0_REG31__KSCRATCH1 2 +#define CP0_REG31__KSCRATCH2 3 +#define CP0_REG31__KSCRATCH3 4 +#define CP0_REG31__KSCRATCH4 5 +#define CP0_REG31__KSCRATCH5 6 +#define CP0_REG31__KSCRATCH6 7 + + +typedef struct TCState TCState; +struct TCState { + target_ulong gpr[32]; + target_ulong PC; + target_ulong HI[MIPS_DSP_ACC]; + target_ulong LO[MIPS_DSP_ACC]; + target_ulong ACX[MIPS_DSP_ACC]; + target_ulong DSPControl; + int32_t CP0_TCStatus; +#define CP0TCSt_TCU3 31 +#define CP0TCSt_TCU2 30 +#define CP0TCSt_TCU1 29 +#define CP0TCSt_TCU0 28 +#define CP0TCSt_TMX 27 +#define CP0TCSt_RNST 23 +#define CP0TCSt_TDS 21 +#define CP0TCSt_DT 20 +#define CP0TCSt_DA 15 +#define CP0TCSt_A 13 +#define CP0TCSt_TKSU 11 +#define CP0TCSt_IXMT 10 +#define CP0TCSt_TASID 0 + int32_t CP0_TCBind; +#define CP0TCBd_CurTC 21 +#define CP0TCBd_TBE 17 +#define CP0TCBd_CurVPE 0 + target_ulong CP0_TCHalt; + target_ulong CP0_TCContext; + target_ulong CP0_TCSchedule; + target_ulong CP0_TCScheFBack; + int32_t CP0_Debug_tcstatus; + target_ulong CP0_UserLocal; + + int32_t msacsr; + +#define MSACSR_FS 24 +#define MSACSR_FS_MASK (1 << MSACSR_FS) +#define MSACSR_NX 18 +#define MSACSR_NX_MASK (1 << MSACSR_NX) +#define MSACSR_CEF 2 +#define MSACSR_CEF_MASK (0xffff << MSACSR_CEF) +#define MSACSR_RM 0 +#define MSACSR_RM_MASK (0x3 << MSACSR_RM) +#define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \ + MSACSR_FS_MASK) + + float_status msa_fp_status; + + /* Upper 64-bit MMRs (multimedia registers); the lower 64-bit are GPRs */ + uint64_t mmr[32]; + +#define NUMBER_OF_MXU_REGISTERS 16 + target_ulong mxu_gpr[NUMBER_OF_MXU_REGISTERS - 1]; + target_ulong mxu_cr; +#define MXU_CR_LC 31 +#define MXU_CR_RC 30 +#define MXU_CR_BIAS 2 +#define MXU_CR_RD_EN 1 +#define MXU_CR_MXU_EN 0 + +}; + +struct MIPSITUState; +typedef struct CPUMIPSState CPUMIPSState; +struct CPUMIPSState { + TCState active_tc; + CPUMIPSFPUContext active_fpu; + + uint32_t current_tc; + uint32_t current_fpu; + + uint32_t SEGBITS; + uint32_t PABITS; +#if defined(TARGET_MIPS64) +# define PABITS_BASE 36 +#else +# define PABITS_BASE 32 +#endif + target_ulong SEGMask; + uint64_t PAMask; +#define PAMASK_BASE ((1ULL << PABITS_BASE) - 1) + + int32_t msair; +#define MSAIR_ProcID 8 +#define MSAIR_Rev 0 + +/* + * CP0 Register 0 + */ + int32_t CP0_Index; + /* CP0_MVP* are per MVP registers. */ + int32_t CP0_VPControl; +#define CP0VPCtl_DIS 0 +/* + * CP0 Register 1 + */ + int32_t CP0_Random; + int32_t CP0_VPEControl; +#define CP0VPECo_YSI 21 +#define CP0VPECo_GSI 20 +#define CP0VPECo_EXCPT 16 +#define CP0VPECo_TE 15 +#define CP0VPECo_TargTC 0 + int32_t CP0_VPEConf0; +#define CP0VPEC0_M 31 +#define CP0VPEC0_XTC 21 +#define CP0VPEC0_TCS 19 +#define CP0VPEC0_SCS 18 +#define CP0VPEC0_DSC 17 +#define CP0VPEC0_ICS 16 +#define CP0VPEC0_MVP 1 +#define CP0VPEC0_VPA 0 + int32_t CP0_VPEConf1; +#define CP0VPEC1_NCX 20 +#define CP0VPEC1_NCP2 10 +#define CP0VPEC1_NCP1 0 + target_ulong CP0_YQMask; + target_ulong CP0_VPESchedule; + target_ulong CP0_VPEScheFBack; + int32_t CP0_VPEOpt; +#define CP0VPEOpt_IWX7 15 +#define CP0VPEOpt_IWX6 14 +#define CP0VPEOpt_IWX5 13 +#define CP0VPEOpt_IWX4 12 +#define CP0VPEOpt_IWX3 11 +#define CP0VPEOpt_IWX2 10 +#define CP0VPEOpt_IWX1 9 +#define CP0VPEOpt_IWX0 8 +#define CP0VPEOpt_DWX7 7 +#define CP0VPEOpt_DWX6 6 +#define CP0VPEOpt_DWX5 5 +#define CP0VPEOpt_DWX4 4 +#define CP0VPEOpt_DWX3 3 +#define CP0VPEOpt_DWX2 2 +#define CP0VPEOpt_DWX1 1 +#define CP0VPEOpt_DWX0 0 +/* + * CP0 Register 2 + */ + uint64_t CP0_EntryLo0; +/* + * CP0 Register 3 + */ + uint64_t CP0_EntryLo1; +#if defined(TARGET_MIPS64) +# define CP0EnLo_RI 63 +# define CP0EnLo_XI 62 +#else +# define CP0EnLo_RI 31 +# define CP0EnLo_XI 30 +#endif + int32_t CP0_GlobalNumber; +#define CP0GN_VPId 0 +/* + * CP0 Register 4 + */ + target_ulong CP0_Context; + int32_t CP0_MemoryMapID; +/* + * CP0 Register 5 + */ + int32_t CP0_PageMask; + int32_t CP0_PageGrain_rw_bitmask; + int32_t CP0_PageGrain; +#define CP0PG_RIE 31 +#define CP0PG_XIE 30 +#define CP0PG_ELPA 29 +#define CP0PG_IEC 27 + target_ulong CP0_SegCtl0; + target_ulong CP0_SegCtl1; + target_ulong CP0_SegCtl2; +#define CP0SC_PA 9 +#define CP0SC_PA_MASK (0x7FULL << CP0SC_PA) +#define CP0SC_PA_1GMASK (0x7EULL << CP0SC_PA) +#define CP0SC_AM 4 +#define CP0SC_AM_MASK (0x7ULL << CP0SC_AM) +#define CP0SC_AM_UK 0ULL +#define CP0SC_AM_MK 1ULL +#define CP0SC_AM_MSK 2ULL +#define CP0SC_AM_MUSK 3ULL +#define CP0SC_AM_MUSUK 4ULL +#define CP0SC_AM_USK 5ULL +#define CP0SC_AM_UUSK 7ULL +#define CP0SC_EU 3 +#define CP0SC_EU_MASK (1ULL << CP0SC_EU) +#define CP0SC_C 0 +#define CP0SC_C_MASK (0x7ULL << CP0SC_C) +#define CP0SC_MASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \ + CP0SC_PA_MASK) +#define CP0SC_1GMASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \ + CP0SC_PA_1GMASK) +#define CP0SC0_MASK (CP0SC_MASK | (CP0SC_MASK << 16)) +#define CP0SC1_XAM 59 +#define CP0SC1_XAM_MASK (0x7ULL << CP0SC1_XAM) +#define CP0SC1_MASK (CP0SC_MASK | (CP0SC_MASK << 16) | CP0SC1_XAM_MASK) +#define CP0SC2_XR 56 +#define CP0SC2_XR_MASK (0xFFULL << CP0SC2_XR) +#define CP0SC2_MASK (CP0SC_1GMASK | (CP0SC_1GMASK << 16) | CP0SC2_XR_MASK) + target_ulong CP0_PWBase; + target_ulong CP0_PWField; +#if defined(TARGET_MIPS64) +#define CP0PF_BDI 32 /* 37..32 */ +#define CP0PF_GDI 24 /* 29..24 */ +#define CP0PF_UDI 18 /* 23..18 */ +#define CP0PF_MDI 12 /* 17..12 */ +#define CP0PF_PTI 6 /* 11..6 */ +#define CP0PF_PTEI 0 /* 5..0 */ +#else +#define CP0PF_GDW 24 /* 29..24 */ +#define CP0PF_UDW 18 /* 23..18 */ +#define CP0PF_MDW 12 /* 17..12 */ +#define CP0PF_PTW 6 /* 11..6 */ +#define CP0PF_PTEW 0 /* 5..0 */ +#endif + target_ulong CP0_PWSize; +#if defined(TARGET_MIPS64) +#define CP0PS_BDW 32 /* 37..32 */ +#endif +#define CP0PS_PS 30 +#define CP0PS_GDW 24 /* 29..24 */ +#define CP0PS_UDW 18 /* 23..18 */ +#define CP0PS_MDW 12 /* 17..12 */ +#define CP0PS_PTW 6 /* 11..6 */ +#define CP0PS_PTEW 0 /* 5..0 */ +/* + * CP0 Register 6 + */ + int32_t CP0_Wired; + int32_t CP0_PWCtl; +#define CP0PC_PWEN 31 +#if defined(TARGET_MIPS64) +#define CP0PC_PWDIREXT 30 +#define CP0PC_XK 28 +#define CP0PC_XS 27 +#define CP0PC_XU 26 +#endif +#define CP0PC_DPH 7 +#define CP0PC_HUGEPG 6 +#define CP0PC_PSN 0 /* 5..0 */ + int32_t CP0_SRSConf0_rw_bitmask; + int32_t CP0_SRSConf0; +#define CP0SRSC0_M 31 +#define CP0SRSC0_SRS3 20 +#define CP0SRSC0_SRS2 10 +#define CP0SRSC0_SRS1 0 + int32_t CP0_SRSConf1_rw_bitmask; + int32_t CP0_SRSConf1; +#define CP0SRSC1_M 31 +#define CP0SRSC1_SRS6 20 +#define CP0SRSC1_SRS5 10 +#define CP0SRSC1_SRS4 0 + int32_t CP0_SRSConf2_rw_bitmask; + int32_t CP0_SRSConf2; +#define CP0SRSC2_M 31 +#define CP0SRSC2_SRS9 20 +#define CP0SRSC2_SRS8 10 +#define CP0SRSC2_SRS7 0 + int32_t CP0_SRSConf3_rw_bitmask; + int32_t CP0_SRSConf3; +#define CP0SRSC3_M 31 +#define CP0SRSC3_SRS12 20 +#define CP0SRSC3_SRS11 10 +#define CP0SRSC3_SRS10 0 + int32_t CP0_SRSConf4_rw_bitmask; + int32_t CP0_SRSConf4; +#define CP0SRSC4_SRS15 20 +#define CP0SRSC4_SRS14 10 +#define CP0SRSC4_SRS13 0 +/* + * CP0 Register 7 + */ + int32_t CP0_HWREna; +/* + * CP0 Register 8 + */ + target_ulong CP0_BadVAddr; + uint32_t CP0_BadInstr; + uint32_t CP0_BadInstrP; + uint32_t CP0_BadInstrX; +/* + * CP0 Register 9 + */ + int32_t CP0_Count; + uint32_t CP0_SAARI; +#define CP0SAARI_TARGET 0 /* 5..0 */ + uint64_t CP0_SAAR[2]; +#define CP0SAAR_BASE 12 /* 43..12 */ +#define CP0SAAR_SIZE 1 /* 5..1 */ +#define CP0SAAR_EN 0 +/* + * CP0 Register 10 + */ + target_ulong CP0_EntryHi; +#define CP0EnHi_EHINV 10 + target_ulong CP0_EntryHi_ASID_mask; +/* + * CP0 Register 11 + */ + int32_t CP0_Compare; +/* + * CP0 Register 12 + */ + int32_t CP0_Status; +#define CP0St_CU3 31 +#define CP0St_CU2 30 +#define CP0St_CU1 29 +#define CP0St_CU0 28 +#define CP0St_RP 27 +#define CP0St_FR 26 +#define CP0St_RE 25 +#define CP0St_MX 24 +#define CP0St_PX 23 +#define CP0St_BEV 22 +#define CP0St_TS 21 +#define CP0St_SR 20 +#define CP0St_NMI 19 +#define CP0St_IM 8 +#define CP0St_KX 7 +#define CP0St_SX 6 +#define CP0St_UX 5 +#define CP0St_KSU 3 +#define CP0St_ERL 2 +#define CP0St_EXL 1 +#define CP0St_IE 0 + int32_t CP0_IntCtl; +#define CP0IntCtl_IPTI 29 +#define CP0IntCtl_IPPCI 26 +#define CP0IntCtl_VS 5 + int32_t CP0_SRSCtl; +#define CP0SRSCtl_HSS 26 +#define CP0SRSCtl_EICSS 18 +#define CP0SRSCtl_ESS 12 +#define CP0SRSCtl_PSS 6 +#define CP0SRSCtl_CSS 0 + int32_t CP0_SRSMap; +#define CP0SRSMap_SSV7 28 +#define CP0SRSMap_SSV6 24 +#define CP0SRSMap_SSV5 20 +#define CP0SRSMap_SSV4 16 +#define CP0SRSMap_SSV3 12 +#define CP0SRSMap_SSV2 8 +#define CP0SRSMap_SSV1 4 +#define CP0SRSMap_SSV0 0 +/* + * CP0 Register 13 + */ + int32_t CP0_Cause; +#define CP0Ca_BD 31 +#define CP0Ca_TI 30 +#define CP0Ca_CE 28 +#define CP0Ca_DC 27 +#define CP0Ca_PCI 26 +#define CP0Ca_IV 23 +#define CP0Ca_WP 22 +#define CP0Ca_IP 8 +#define CP0Ca_IP_mask 0x0000FF00 +#define CP0Ca_EC 2 +/* + * CP0 Register 14 + */ + target_ulong CP0_EPC; +/* + * CP0 Register 15 + */ + int32_t CP0_PRid; + target_ulong CP0_EBase; + target_ulong CP0_EBaseWG_rw_bitmask; +#define CP0EBase_WG 11 + target_ulong CP0_CMGCRBase; +/* + * CP0 Register 16 + */ + int32_t CP0_Config0; +#define CP0C0_M 31 +#define CP0C0_K23 28 /* 30..28 */ +#define CP0C0_KU 25 /* 27..25 */ +#define CP0C0_MDU 20 +#define CP0C0_MM 18 +#define CP0C0_BM 16 +#define CP0C0_Impl 16 /* 24..16 */ +#define CP0C0_BE 15 +#define CP0C0_AT 13 /* 14..13 */ +#define CP0C0_AR 10 /* 12..10 */ +#define CP0C0_MT 7 /* 9..7 */ +#define CP0C0_VI 3 +#define CP0C0_K0 0 /* 2..0 */ + int32_t CP0_Config1; +#define CP0C1_M 31 +#define CP0C1_MMU 25 /* 30..25 */ +#define CP0C1_IS 22 /* 24..22 */ +#define CP0C1_IL 19 /* 21..19 */ +#define CP0C1_IA 16 /* 18..16 */ +#define CP0C1_DS 13 /* 15..13 */ +#define CP0C1_DL 10 /* 12..10 */ +#define CP0C1_DA 7 /* 9..7 */ +#define CP0C1_C2 6 +#define CP0C1_MD 5 +#define CP0C1_PC 4 +#define CP0C1_WR 3 +#define CP0C1_CA 2 +#define CP0C1_EP 1 +#define CP0C1_FP 0 + int32_t CP0_Config2; +#define CP0C2_M 31 +#define CP0C2_TU 28 /* 30..28 */ +#define CP0C2_TS 24 /* 27..24 */ +#define CP0C2_TL 20 /* 23..20 */ +#define CP0C2_TA 16 /* 19..16 */ +#define CP0C2_SU 12 /* 15..12 */ +#define CP0C2_SS 8 /* 11..8 */ +#define CP0C2_SL 4 /* 7..4 */ +#define CP0C2_SA 0 /* 3..0 */ + int32_t CP0_Config3; +#define CP0C3_M 31 +#define CP0C3_BPG 30 +#define CP0C3_CMGCR 29 +#define CP0C3_MSAP 28 +#define CP0C3_BP 27 +#define CP0C3_BI 26 +#define CP0C3_SC 25 +#define CP0C3_PW 24 +#define CP0C3_VZ 23 +#define CP0C3_IPLV 21 /* 22..21 */ +#define CP0C3_MMAR 18 /* 20..18 */ +#define CP0C3_MCU 17 +#define CP0C3_ISA_ON_EXC 16 +#define CP0C3_ISA 14 /* 15..14 */ +#define CP0C3_ULRI 13 +#define CP0C3_RXI 12 +#define CP0C3_DSP2P 11 +#define CP0C3_DSPP 10 +#define CP0C3_CTXTC 9 +#define CP0C3_ITL 8 +#define CP0C3_LPA 7 +#define CP0C3_VEIC 6 +#define CP0C3_VInt 5 +#define CP0C3_SP 4 +#define CP0C3_CDMM 3 +#define CP0C3_MT 2 +#define CP0C3_SM 1 +#define CP0C3_TL 0 + int32_t CP0_Config4; + int32_t CP0_Config4_rw_bitmask; +#define CP0C4_M 31 +#define CP0C4_IE 29 /* 30..29 */ +#define CP0C4_AE 28 +#define CP0C4_VTLBSizeExt 24 /* 27..24 */ +#define CP0C4_KScrExist 16 +#define CP0C4_MMUExtDef 14 +#define CP0C4_FTLBPageSize 8 /* 12..8 */ +/* bit layout if MMUExtDef=1 */ +#define CP0C4_MMUSizeExt 0 /* 7..0 */ +/* bit layout if MMUExtDef=2 */ +#define CP0C4_FTLBWays 4 /* 7..4 */ +#define CP0C4_FTLBSets 0 /* 3..0 */ + int32_t CP0_Config5; + int32_t CP0_Config5_rw_bitmask; +#define CP0C5_M 31 +#define CP0C5_K 30 +#define CP0C5_CV 29 +#define CP0C5_EVA 28 +#define CP0C5_MSAEn 27 +#define CP0C5_PMJ 23 /* 25..23 */ +#define CP0C5_WR2 22 +#define CP0C5_NMS 21 +#define CP0C5_ULS 20 +#define CP0C5_XPA 19 +#define CP0C5_CRCP 18 +#define CP0C5_MI 17 +#define CP0C5_GI 15 /* 16..15 */ +#define CP0C5_CA2 14 +#define CP0C5_XNP 13 +#define CP0C5_DEC 11 +#define CP0C5_L2C 10 +#define CP0C5_UFE 9 +#define CP0C5_FRE 8 +#define CP0C5_VP 7 +#define CP0C5_SBRI 6 +#define CP0C5_MVH 5 +#define CP0C5_LLB 4 +#define CP0C5_MRP 3 +#define CP0C5_UFR 2 +#define CP0C5_NFExists 0 + int32_t CP0_Config6; + int32_t CP0_Config7; + uint64_t CP0_LLAddr; + uint64_t CP0_MAAR[MIPS_MAAR_MAX]; + int32_t CP0_MAARI; + /* XXX: Maybe make LLAddr per-TC? */ +/* + * CP0 Register 17 + */ + target_ulong lladdr; /* LL virtual address compared against SC */ + target_ulong llval; + uint64_t llval_wp; + uint32_t llnewval_wp; + uint64_t CP0_LLAddr_rw_bitmask; + int CP0_LLAddr_shift; +/* + * CP0 Register 18 + */ + target_ulong CP0_WatchLo[8]; +/* + * CP0 Register 19 + */ + uint64_t CP0_WatchHi[8]; +#define CP0WH_ASID 16 +/* + * CP0 Register 20 + */ + target_ulong CP0_XContext; + int32_t CP0_Framemask; +/* + * CP0 Register 23 + */ + int32_t CP0_Debug; +#define CP0DB_DBD 31 +#define CP0DB_DM 30 +#define CP0DB_LSNM 28 +#define CP0DB_Doze 27 +#define CP0DB_Halt 26 +#define CP0DB_CNT 25 +#define CP0DB_IBEP 24 +#define CP0DB_DBEP 21 +#define CP0DB_IEXI 20 +#define CP0DB_VER 15 +#define CP0DB_DEC 10 +#define CP0DB_SSt 8 +#define CP0DB_DINT 5 +#define CP0DB_DIB 4 +#define CP0DB_DDBS 3 +#define CP0DB_DDBL 2 +#define CP0DB_DBp 1 +#define CP0DB_DSS 0 +/* + * CP0 Register 24 + */ + target_ulong CP0_DEPC; +/* + * CP0 Register 25 + */ + int32_t CP0_Performance0; +/* + * CP0 Register 26 + */ + int32_t CP0_ErrCtl; +#define CP0EC_WST 29 +#define CP0EC_SPR 28 +#define CP0EC_ITC 26 +/* + * CP0 Register 28 + */ + uint64_t CP0_TagLo; + int32_t CP0_DataLo; +/* + * CP0 Register 29 + */ + int32_t CP0_TagHi; + int32_t CP0_DataHi; +/* + * CP0 Register 30 + */ + target_ulong CP0_ErrorEPC; +/* + * CP0 Register 31 + */ + int32_t CP0_DESAVE; + target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM]; + + /* We waste some space so we can handle shadow registers like TCs. */ + TCState tcs[MIPS_SHADOW_SET_MAX]; + CPUMIPSFPUContext fpus[MIPS_FPU_MAX]; + /* QEMU */ + int error_code; +#define EXCP_TLB_NOMATCH 0x1 +#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */ + uint32_t hflags; /* CPU State */ + /* TMASK defines different execution modes */ +#define MIPS_HFLAG_TMASK 0x1F5807FF +#define MIPS_HFLAG_MODE 0x00007 /* execution modes */ + /* + * The KSU flags must be the lowest bits in hflags. The flag order + * must be the same as defined for CP0 Status. This allows to use + * the bits as the value of mmu_idx. + */ +#define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */ +#define MIPS_HFLAG_UM 0x00002 /* user mode flag */ +#define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */ +#define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */ +#define MIPS_HFLAG_DM 0x00004 /* Debug mode */ +#define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */ +#define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */ +#define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */ +#define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */ + /* + * True if the MIPS IV COP1X instructions can be used. This also + * controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S + * and RSQRT.D. + */ +#define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */ +#define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */ +#define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */ +#define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */ +#define MIPS_HFLAG_M16_SHIFT 10 + /* + * If translation is interrupted between the branch instruction and + * the delay slot, record what type of branch it is so that we can + * resume translation properly. It might be possible to reduce + * this from three bits to two. + */ +#define MIPS_HFLAG_BMASK_BASE 0x803800 +#define MIPS_HFLAG_B 0x00800 /* Unconditional branch */ +#define MIPS_HFLAG_BC 0x01000 /* Conditional branch */ +#define MIPS_HFLAG_BL 0x01800 /* Likely branch */ +#define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */ + /* Extra flags about the current pending branch. */ +#define MIPS_HFLAG_BMASK_EXT 0x7C000 +#define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */ +#define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */ +#define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */ +#define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */ +#define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */ +#define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT) + /* MIPS DSP resources access. */ +#define MIPS_HFLAG_DSP 0x080000 /* Enable access to DSP resources. */ +#define MIPS_HFLAG_DSP_R2 0x100000 /* Enable access to DSP R2 resources. */ +#define MIPS_HFLAG_DSP_R3 0x20000000 /* Enable access to DSP R3 resources. */ + /* Extra flag about HWREna register. */ +#define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */ +#define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */ +#define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */ +#define MIPS_HFLAG_MSA 0x1000000 +#define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */ +#define MIPS_HFLAG_ELPA 0x4000000 +#define MIPS_HFLAG_ITC_CACHE 0x8000000 /* CACHE instr. operates on ITC tag */ +#define MIPS_HFLAG_ERL 0x10000000 /* error level flag */ + target_ulong btarget; /* Jump / branch target */ + target_ulong bcond; /* Branch condition (if needed) */ + + int SYNCI_Step; /* Address step size for SYNCI */ + int CCRes; /* Cycle count resolution/divisor */ + uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */ + uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */ + uint64_t insn_flags; /* Supported instruction set */ + int saarp; + + /* Fields up to this point are cleared by a CPU reset */ +#ifdef _MSC_VER + int end_reset_fields; +#else + struct {} end_reset_fields; +#endif + + /* Fields from here on are preserved across CPU reset. */ + CPUMIPSMVPContext *mvp; + CPUMIPSTLBContext *tlb; + + const mips_def_t *cpu_model; + void *irq[8]; + QEMUTimer *timer; /* Internal timer */ + struct MIPSITUState *itu; + MemoryRegion *itc_tag; /* ITC Configuration Tags */ + target_ulong exception_base; /* ExceptionBase input to the core */ + + // Unicorn engine + struct uc_struct *uc; +}; + +/** + * MIPSCPU: + * @env: #CPUMIPSState + * + * A MIPS CPU. + */ +struct MIPSCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUMIPSState env; + + struct MIPSCPUClass cc; +}; + + +#define cpu_signal_handler cpu_mips_signal_handler + +extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); +extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env); + +/* + * MMU modes definitions. We carefully match the indices with our + * hflags layout. + */ +#define MMU_USER_IDX 2 + +static inline int hflags_mmu_index(uint32_t hflags) +{ + if (hflags & MIPS_HFLAG_ERL) { + return 3; /* ERL */ + } else { + return hflags & MIPS_HFLAG_KSU; + } +} + +static inline int cpu_mmu_index(CPUMIPSState *env, bool ifetch) +{ + return hflags_mmu_index(env->hflags); +} + +typedef CPUMIPSState CPUArchState; +typedef MIPSCPU ArchCPU; + +#include "exec/cpu-all.h" + +/* + * Memory access type : + * may be needed for precise access rights control and precise exceptions. + */ +enum { + /* 1 bit to define user level / supervisor access */ + ACCESS_USER = 0x00, + ACCESS_SUPER = 0x01, + /* 1 bit to indicate direction */ + ACCESS_STORE = 0x02, + /* Type of instruction that generated the access */ + ACCESS_CODE = 0x10, /* Code fetch access */ + ACCESS_INT = 0x20, /* Integer load/store access */ + ACCESS_FLOAT = 0x30, /* floating point load/store access */ +}; + +/* Exceptions */ +enum { + EXCP_NONE = -1, + EXCP_RESET = 0, + EXCP_SRESET, + EXCP_DSS, + EXCP_DINT, + EXCP_DDBL, + EXCP_DDBS, + EXCP_NMI, + EXCP_MCHECK, + EXCP_EXT_INTERRUPT, /* 8 */ + EXCP_DFWATCH, + EXCP_DIB, + EXCP_IWATCH, + EXCP_AdEL, + EXCP_AdES, + EXCP_TLBF, + EXCP_IBE, + EXCP_DBp, /* 16 */ + EXCP_SYSCALL, + EXCP_BREAK, + EXCP_CpU, + EXCP_RI, + EXCP_OVERFLOW, + EXCP_TRAP, + EXCP_FPE, + EXCP_DWATCH, /* 24 */ + EXCP_LTLBL, + EXCP_TLBL, + EXCP_TLBS, + EXCP_DBE, + EXCP_THREAD, + EXCP_MDMX, + EXCP_C2E, + EXCP_CACHE, /* 32 */ + EXCP_DSPDIS, + EXCP_MSADIS, + EXCP_MSAFPE, + EXCP_TLBXI, + EXCP_TLBRI, + + EXCP_LAST = EXCP_TLBRI, +}; + +/* + * This is an internally generated WAKE request line. + * It is driven by the CPU itself. Raised when the MT + * block wants to wake a VPE from an inactive state and + * cleared when VPE goes from active to inactive. + */ +#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 + +int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc); + +#define MIPS_CPU_TYPE_SUFFIX "-" TYPE_MIPS_CPU +#define MIPS_CPU_TYPE_NAME(model) model MIPS_CPU_TYPE_SUFFIX +#define CPU_RESOLVING_TYPE TYPE_MIPS_CPU + +bool cpu_supports_cps_smp(const char *cpu_type); +bool cpu_supports_isa(const char *cpu_type, uint64_t isa); +void cpu_set_exception_base(int vp_index, target_ulong address); + +/* mips_int.c */ +void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level); + +/* mips_itu.c */ +void itc_reconfigure(struct MIPSITUState *tag); + +/* helper.c */ +target_ulong exception_resume_pc(CPUMIPSState *env); + +static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->active_tc.PC; + *cs_base = 0; + *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK | + MIPS_HFLAG_HWRENA_ULR); +} + +#endif /* MIPS_CPU_H */ diff --git a/qemu/target-mips/dsp_helper.c b/qemu/target/mips/dsp_helper.c similarity index 98% rename from qemu/target-mips/dsp_helper.c rename to qemu/target/mips/dsp_helper.c index 46528de3..8c58eeb0 100644 --- a/qemu/target-mips/dsp_helper.c +++ b/qemu/target/mips/dsp_helper.c @@ -17,12 +17,15 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/bitops.h" -/* As the byte ordering doesn't matter, i.e. all columns are treated - identically, these unions can be used directly. */ +/* + * As the byte ordering doesn't matter, i.e. all columns are treated + * identically, these unions can be used directly. + */ typedef union { uint8_t ub[4]; int8_t sb[4]; @@ -44,9 +47,9 @@ typedef union { } DSP64Value; /*** MIPS DSP internal functions begin ***/ -#define MIPSDSP_ABS(x) (((x) >= 0) ? x : -x) -#define MIPSDSP_OVERFLOW_ADD(a, b, c, d) (~(a ^ b) & (a ^ c) & d) -#define MIPSDSP_OVERFLOW_SUB(a, b, c, d) ((a ^ b) & (a ^ c) & d) +#define MIPSDSP_ABS(x) (((x) >= 0) ? (x) : -(x)) +#define MIPSDSP_OVERFLOW_ADD(a, b, c, d) (~((a) ^ (b)) & ((a) ^ (c)) & (d)) +#define MIPSDSP_OVERFLOW_SUB(a, b, c, d) (((a) ^ (b)) & ((a) ^ (c)) & (d)) static inline void set_DSPControl_overflow_flag(uint32_t flag, int position, CPUMIPSState *env) @@ -1046,47 +1049,47 @@ static inline int32_t mipsdsp_cmpu_lt(uint32_t a, uint32_t b) #define MIPSDSP_SPLIT32_8(num, a, b, c, d) \ do { \ - a = (num >> 24) & MIPSDSP_Q0; \ - b = (num >> 16) & MIPSDSP_Q0; \ - c = (num >> 8) & MIPSDSP_Q0; \ - d = num & MIPSDSP_Q0; \ + a = ((num) >> 24) & MIPSDSP_Q0; \ + b = ((num) >> 16) & MIPSDSP_Q0; \ + c = ((num) >> 8) & MIPSDSP_Q0; \ + d = (num) & MIPSDSP_Q0; \ } while (0) #define MIPSDSP_SPLIT32_16(num, a, b) \ do { \ - a = (num >> 16) & MIPSDSP_LO; \ - b = num & MIPSDSP_LO; \ + a = ((num) >> 16) & MIPSDSP_LO; \ + b = (num) & MIPSDSP_LO; \ } while (0) -#define MIPSDSP_RETURN32_8(a, b, c, d) ((target_long)(int32_t) \ - (((uint32_t)a << 24) | \ - (((uint32_t)b << 16) | \ - (((uint32_t)c << 8) | \ - ((uint32_t)d & 0xFF))))) -#define MIPSDSP_RETURN32_16(a, b) ((target_long)(int32_t) \ - (((uint32_t)a << 16) | \ - ((uint32_t)b & 0xFFFF))) +#define MIPSDSP_RETURN32_8(a, b, c, d) ((target_long)(int32_t) \ + (((uint32_t)(a) << 24) | \ + ((uint32_t)(b) << 16) | \ + ((uint32_t)(c) << 8) | \ + ((uint32_t)(d) & 0xFF))) +#define MIPSDSP_RETURN32_16(a, b) ((target_long)(int32_t) \ + (((uint32_t)(a) << 16) | \ + ((uint32_t)(b) & 0xFFFF))) #ifdef TARGET_MIPS64 #define MIPSDSP_SPLIT64_16(num, a, b, c, d) \ do { \ - a = (num >> 48) & MIPSDSP_LO; \ - b = (num >> 32) & MIPSDSP_LO; \ - c = (num >> 16) & MIPSDSP_LO; \ - d = num & MIPSDSP_LO; \ + a = ((num) >> 48) & MIPSDSP_LO; \ + b = ((num) >> 32) & MIPSDSP_LO; \ + c = ((num) >> 16) & MIPSDSP_LO; \ + d = (num) & MIPSDSP_LO; \ } while (0) #define MIPSDSP_SPLIT64_32(num, a, b) \ do { \ - a = (num >> 32) & MIPSDSP_LLO; \ - b = num & MIPSDSP_LLO; \ + a = ((num) >> 32) & MIPSDSP_LLO; \ + b = (num) & MIPSDSP_LLO; \ } while (0) -#define MIPSDSP_RETURN64_16(a, b, c, d) (((uint64_t)a << 48) | \ - ((uint64_t)b << 32) | \ - ((uint64_t)c << 16) | \ - (uint64_t)d) -#define MIPSDSP_RETURN64_32(a, b) (((uint64_t)a << 32) | (uint64_t)b) +#define MIPSDSP_RETURN64_16(a, b, c, d) (((uint64_t)(a) << 48) | \ + ((uint64_t)(b) << 32) | \ + ((uint64_t)(c) << 16) | \ + (uint64_t)(d)) +#define MIPSDSP_RETURN64_32(a, b) (((uint64_t)(a) << 32) | (uint64_t)(b)) #endif /** DSP Arithmetic Sub-class insns **/ @@ -1444,9 +1447,15 @@ target_ulong helper_precr_ob_qh(target_ulong rs, target_ulong rt) return temp; } -#define PRECR_QH_PW(name, var) \ -target_ulong helper_precr_##name##_qh_pw(target_ulong rs, target_ulong rt, \ - uint32_t sa) \ + +/* + * In case sa == 0, use rt2, rt0, rs2, rs0. + * In case sa != 0, use rt3, rt1, rs3, rs1. + */ +#define PRECR_QH_PW(name, var) \ +target_ulong helper_precr_##name##_qh_pw(target_ulong rs, \ + target_ulong rt, \ + uint32_t sa) \ { \ uint16_t rs3, rs2, rs1, rs0; \ uint16_t rt3, rt2, rt1, rt0; \ @@ -1455,8 +1464,6 @@ target_ulong helper_precr_##name##_qh_pw(target_ulong rs, target_ulong rt, \ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ \ - /* When sa = 0, we use rt2, rt0, rs2, rs0; \ - * when sa != 0, we use rt3, rt1, rs3, rs1. */ \ if (sa == 0) { \ tempD = rt2 << var; \ tempC = rt0 << var; \ @@ -1964,7 +1971,8 @@ SHIFT_PH(shra_r, rnd16_rashift); #undef SHIFT_PH /** DSP Multiply Sub-class insns **/ -/* Return value made up by two 16bits value. +/* + * Return value made up by two 16bits value. * FIXME give the macro a better name. */ #define MUL_RETURN32_16_PH(name, func, \ @@ -3273,14 +3281,15 @@ target_ulong helper_dextr_l(target_ulong ac, target_ulong shift, CPUMIPSState *env) { uint64_t temp[3]; - target_ulong result; + target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); - result = (temp[1] << 63) | (temp[0] >> 1); - return result; + ret = (temp[1] << 63) | (temp[0] >> 1); + + return ret; } target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, @@ -3288,7 +3297,7 @@ target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, { uint64_t temp[3]; uint32_t temp128; - target_ulong result; + target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); @@ -3308,9 +3317,9 @@ target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, set_DSPControl_overflow_flag(1, 23, env); } - result = (temp[1] << 63) | (temp[0] >> 1); + ret = (temp[1] << 63) | (temp[0] >> 1); - return result; + return ret; } target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, @@ -3318,7 +3327,7 @@ target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, { uint64_t temp[3]; uint32_t temp128; - target_ulong result; + target_ulong ret; shift = shift & 0x3F; mipsdsp_rndrashift_acc(temp, ac, shift, env); @@ -3344,9 +3353,10 @@ target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, } set_DSPControl_overflow_flag(1, 23, env); } - result = (temp[1] << 63) | (temp[0] >> 1); - return result; + ret = (temp[1] << 63) | (temp[0] >> 1); + + return ret; } #endif @@ -3476,7 +3486,7 @@ target_ulong helper_dextp(target_ulong ac, target_ulong size, CPUMIPSState *env) if (sub >= -1) { temp = (tempB << (64 - len)) | (tempA >> len); - temp = temp & ((0x01 << (size + 1)) - 1); + temp = temp & ((1ULL << (size + 1)) - 1); set_DSPControl_efi(0, env); } else { set_DSPControl_efi(1, env); @@ -3505,7 +3515,7 @@ target_ulong helper_dextpdp(target_ulong ac, target_ulong size, if (sub >= -1) { temp = (tempB << (64 - len)) | (tempA >> len); - temp = temp & ((0x01 << (size + 1)) - 1); + temp = temp & ((1ULL << (size + 1)) - 1); set_DSPControl_pos(sub, env); set_DSPControl_efi(0, env); } else { diff --git a/qemu/target/mips/fpu_helper.c b/qemu/target/mips/fpu_helper.c new file mode 100644 index 00000000..34431468 --- /dev/null +++ b/qemu/target/mips/fpu_helper.c @@ -0,0 +1,1910 @@ +/* + * Helpers for emulation of FPU-related MIPS instructions. + * + * Copyright (C) 2004-2005 Jocelyn Mayer + * Copyright (C) 2020 Wave Computing, Inc. + * Copyright (C) 2020 Aleksandar Markovic + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/memop.h" +//#include "sysemu/kvm.h" +#include "fpu/softfloat.h" + + +/* Complex FPU operations which may need stack space. */ + +#define FLOAT_TWO32 make_float32(1 << 30) +#define FLOAT_TWO64 make_float64(1ULL << 62) + +#define FP_TO_INT32_OVERFLOW 0x7fffffff +#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL + +/* convert MIPS rounding mode in FCR31 to IEEE library */ +unsigned int ieee_rm[] = { + float_round_nearest_even, + float_round_to_zero, + float_round_up, + float_round_down +}; + +target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg) +{ + target_ulong arg1 = 0; + + switch (reg) { + case 0: + arg1 = (int32_t)env->active_fpu.fcr0; + break; + case 1: + /* UFR Support - Read Status FR */ + if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) { + if (env->CP0_Config5 & (1 << CP0C5_UFR)) { + arg1 = (int32_t) + ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR); + } else { + do_raise_exception(env, EXCP_RI, GETPC()); + } + } + break; + case 5: + /* FRE Support - read Config5.FRE bit */ + if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { + if (env->CP0_Config5 & (1 << CP0C5_UFE)) { + arg1 = (env->CP0_Config5 >> CP0C5_FRE) & 1; + } else { + helper_raise_exception(env, EXCP_RI); + } + } + break; + case 25: + arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | + ((env->active_fpu.fcr31 >> 23) & 0x1); + break; + case 26: + arg1 = env->active_fpu.fcr31 & 0x0003f07c; + break; + case 28: + arg1 = (env->active_fpu.fcr31 & 0x00000f83) | + ((env->active_fpu.fcr31 >> 22) & 0x4); + break; + default: + arg1 = (int32_t)env->active_fpu.fcr31; + break; + } + + return arg1; +} + +void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt) +{ + switch (fs) { + case 1: + /* UFR Alias - Reset Status FR */ + if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { + return; + } + if (env->CP0_Config5 & (1 << CP0C5_UFR)) { + env->CP0_Status &= ~(1 << CP0St_FR); + compute_hflags(env); + } else { + do_raise_exception(env, EXCP_RI, GETPC()); + } + break; + case 4: + /* UNFR Alias - Set Status FR */ + if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { + return; + } + if (env->CP0_Config5 & (1 << CP0C5_UFR)) { + env->CP0_Status |= (1 << CP0St_FR); + compute_hflags(env); + } else { + do_raise_exception(env, EXCP_RI, GETPC()); + } + break; + case 5: + /* FRE Support - clear Config5.FRE bit */ + if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) { + return; + } + if (env->CP0_Config5 & (1 << CP0C5_UFE)) { + env->CP0_Config5 &= ~(1 << CP0C5_FRE); + compute_hflags(env); + } else { + helper_raise_exception(env, EXCP_RI); + } + break; + case 6: + /* FRE Support - set Config5.FRE bit */ + if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) { + return; + } + if (env->CP0_Config5 & (1 << CP0C5_UFE)) { + env->CP0_Config5 |= (1 << CP0C5_FRE); + compute_hflags(env); + } else { + helper_raise_exception(env, EXCP_RI); + } + break; + case 25: + if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) { + return; + } + env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | + ((arg1 & 0xfe) << 24) | + ((arg1 & 0x1) << 23); + break; + case 26: + if (arg1 & 0x007c0000) { + return; + } + env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | + (arg1 & 0x0003f07c); + break; + case 28: + if (arg1 & 0x007c0000) { + return; + } + env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | + (arg1 & 0x00000f83) | + ((arg1 & 0x4) << 22); + break; + case 31: + env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) | + (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask)); + break; + default: + if (env->insn_flags & ISA_MIPS32R6) { + do_raise_exception(env, EXCP_RI, GETPC()); + } + return; + } + restore_fp_status(env); + set_float_exception_flags(0, &env->active_fpu.fp_status); + if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & + GET_FP_CAUSE(env->active_fpu.fcr31)) { + do_raise_exception(env, EXCP_FPE, GETPC()); + } +} + +int ieee_ex_to_mips(int xcpt) +{ + int ret = 0; + if (xcpt) { + if (xcpt & float_flag_invalid) { + ret |= FP_INVALID; + } + if (xcpt & float_flag_overflow) { + ret |= FP_OVERFLOW; + } + if (xcpt & float_flag_underflow) { + ret |= FP_UNDERFLOW; + } + if (xcpt & float_flag_divbyzero) { + ret |= FP_DIV0; + } + if (xcpt & float_flag_inexact) { + ret |= FP_INEXACT; + } + } + return ret; +} + +static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc) +{ + int tmp = ieee_ex_to_mips(get_float_exception_flags( + &env->active_fpu.fp_status)); + + SET_FP_CAUSE(env->active_fpu.fcr31, tmp); + + if (tmp) { + set_float_exception_flags(0, &env->active_fpu.fp_status); + + if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) { + do_raise_exception(env, EXCP_FPE, pc); + } else { + UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp); + } + } +} + +/* + * Float support. + * Single precition routines have a "s" suffix, double precision a + * "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps", + * paired single lower "pl", paired single upper "pu". + */ + +/* unary operations, modifying fp status */ +uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0) +{ + fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt0; +} + +uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0) +{ + fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst0; +} + +uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t fdt2; + + fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0) +{ + uint64_t fdt2; + + fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0) +{ + uint64_t fdt2; + + fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0) +{ + uint32_t fst2; + uint32_t fsth2; + + fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); + fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + uint32_t wth2; + int excp, excph; + + wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); + excp = get_float_exception_flags(&env->active_fpu.fp_status); + if (excp & (float_flag_overflow | float_flag_invalid)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + + set_float_exception_flags(0, &env->active_fpu.fp_status); + wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status); + excph = get_float_exception_flags(&env->active_fpu.fp_status); + if (excph & (float_flag_overflow | float_flag_invalid)) { + wth2 = FP_TO_INT32_OVERFLOW; + } + + set_float_exception_flags(excp | excph, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + + return ((uint64_t)wth2 << 32) | wt2; +} + +uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t fst2; + + fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0) +{ + uint32_t fst2; + + fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0) +{ + uint32_t fst2; + + fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0) +{ + uint32_t wt2; + + wt2 = wt0; + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0) +{ + uint32_t wt2; + + wt2 = wth0; + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64_round_to_zero(fdt0, + &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + dt2 = 0; + } + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float64_is_any_nan(fdt0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & float_flag_invalid) { + if (float32_is_any_nan(fst0)) { + wt2 = 0; + } + } + update_fcr31(env, GETPC()); + return wt2; +} + +/* unary operations, not modifying fp status */ +#define FLOAT_UNOP(name) \ +uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \ +{ \ + return float64_ ## name(fdt0); \ +} \ +uint32_t helper_float_ ## name ## _s(uint32_t fst0) \ +{ \ + return float32_ ## name(fst0); \ +} \ +uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \ +{ \ + uint32_t wt0; \ + uint32_t wth0; \ + \ + wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \ + wth0 = float32_ ## name(fdt0 >> 32); \ + return ((uint64_t)wth0 << 32) | wt0; \ +} +FLOAT_UNOP(abs) +FLOAT_UNOP(chs) +#undef FLOAT_UNOP + +/* MIPS specific unary operations */ +uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); + fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, + &env->active_fpu.fp_status); + fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); + fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); + fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status); + fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); + fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +#define FLOAT_RINT(name, bits) \ +uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ + uint ## bits ## _t fs) \ +{ \ + uint ## bits ## _t fdret; \ + \ + fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return fdret; \ +} + +FLOAT_RINT(rint_s, 32) +FLOAT_RINT(rint_d, 64) +#undef FLOAT_RINT + +#define FLOAT_CLASS_SIGNALING_NAN 0x001 +#define FLOAT_CLASS_QUIET_NAN 0x002 +#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 +#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 +#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 +#define FLOAT_CLASS_NEGATIVE_ZERO 0x020 +#define FLOAT_CLASS_POSITIVE_INFINITY 0x040 +#define FLOAT_CLASS_POSITIVE_NORMAL 0x080 +#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 +#define FLOAT_CLASS_POSITIVE_ZERO 0x200 + +#define FLOAT_CLASS(name, bits) \ +uint ## bits ## _t float_ ## name(uint ## bits ## _t arg, \ + float_status *status) \ +{ \ + if (float ## bits ## _is_signaling_nan(arg, status)) { \ + return FLOAT_CLASS_SIGNALING_NAN; \ + } else if (float ## bits ## _is_quiet_nan(arg, status)) { \ + return FLOAT_CLASS_QUIET_NAN; \ + } else if (float ## bits ## _is_neg(arg)) { \ + if (float ## bits ## _is_infinity(arg)) { \ + return FLOAT_CLASS_NEGATIVE_INFINITY; \ + } else if (float ## bits ## _is_zero(arg)) { \ + return FLOAT_CLASS_NEGATIVE_ZERO; \ + } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ + return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \ + } else { \ + return FLOAT_CLASS_NEGATIVE_NORMAL; \ + } \ + } else { \ + if (float ## bits ## _is_infinity(arg)) { \ + return FLOAT_CLASS_POSITIVE_INFINITY; \ + } else if (float ## bits ## _is_zero(arg)) { \ + return FLOAT_CLASS_POSITIVE_ZERO; \ + } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ + return FLOAT_CLASS_POSITIVE_SUBNORMAL; \ + } else { \ + return FLOAT_CLASS_POSITIVE_NORMAL; \ + } \ + } \ +} \ + \ +uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ + uint ## bits ## _t arg) \ +{ \ + return float_ ## name(arg, &env->active_fpu.fp_status); \ +} + +FLOAT_CLASS(class_s, 32) +FLOAT_CLASS(class_d, 64) +#undef FLOAT_CLASS + +/* binary operations */ +#define FLOAT_BINOP(name) \ +uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ + uint64_t fdt0, uint64_t fdt1) \ +{ \ + uint64_t dt2; \ + \ + dt2 = float64_ ## name(fdt0, fdt1, &env->active_fpu.fp_status);\ + update_fcr31(env, GETPC()); \ + return dt2; \ +} \ + \ +uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ + uint32_t fst0, uint32_t fst1) \ +{ \ + uint32_t wt2; \ + \ + wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status);\ + update_fcr31(env, GETPC()); \ + return wt2; \ +} \ + \ +uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ + uint64_t fdt0, \ + uint64_t fdt1) \ +{ \ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ + uint32_t fsth0 = fdt0 >> 32; \ + uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ + uint32_t fsth1 = fdt1 >> 32; \ + uint32_t wt2; \ + uint32_t wth2; \ + \ + wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status); \ + wth2 = float32_ ## name(fsth0, fsth1, &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return ((uint64_t)wth2 << 32) | wt2; \ +} + +FLOAT_BINOP(add) +FLOAT_BINOP(sub) +FLOAT_BINOP(mul) +FLOAT_BINOP(div) +#undef FLOAT_BINOP + +/* MIPS specific binary operations */ +uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); + fdt2 = float64_chs(float64_sub(fdt2, float64_one, + &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) +{ + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_sub(fst2, float32_one, + &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst2 = fdt2 & 0XFFFFFFFF; + uint32_t fsth2 = fdt2 >> 32; + + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_sub(fst2, float32_one, + &env->active_fpu.fp_status)); + fsth2 = float32_chs(float32_sub(fsth2, float32_one, + &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); + fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status); + fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, + &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) +{ + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, + &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst2 = fdt2 & 0XFFFFFFFF; + uint32_t fsth2 = fdt2 >> 32; + + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); + fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); + fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, + &env->active_fpu.fp_status)); + fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, + &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst1 = fdt1 & 0XFFFFFFFF; + uint32_t fsth1 = fdt1 >> 32; + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_add(fst0, fsth0, &env->active_fpu.fp_status); + fsth2 = float32_add(fst1, fsth1, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst1 = fdt1 & 0XFFFFFFFF; + uint32_t fsth1 = fdt1 >> 32; + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_mul(fst0, fsth0, &env->active_fpu.fp_status); + fsth2 = float32_mul(fst1, fsth1, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +#define FLOAT_MINMAX(name, bits, minmaxfunc) \ +uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ + uint ## bits ## _t fs, \ + uint ## bits ## _t ft) \ +{ \ + uint ## bits ## _t fdret; \ + \ + fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \ + &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return fdret; \ +} + +FLOAT_MINMAX(max_s, 32, maxnum) +FLOAT_MINMAX(max_d, 64, maxnum) +FLOAT_MINMAX(maxa_s, 32, maxnummag) +FLOAT_MINMAX(maxa_d, 64, maxnummag) + +FLOAT_MINMAX(min_s, 32, minnum) +FLOAT_MINMAX(min_d, 64, minnum) +FLOAT_MINMAX(mina_s, 32, minnummag) +FLOAT_MINMAX(mina_d, 64, minnummag) +#undef FLOAT_MINMAX + +/* ternary operations */ +#define UNFUSED_FMA(prefix, a, b, c, flags) \ +{ \ + a = prefix##_mul(a, b, &env->active_fpu.fp_status); \ + if ((flags) & float_muladd_negate_c) { \ + a = prefix##_sub(a, c, &env->active_fpu.fp_status); \ + } else { \ + a = prefix##_add(a, c, &env->active_fpu.fp_status); \ + } \ + if ((flags) & float_muladd_negate_result) { \ + a = prefix##_chs(a); \ + } \ +} + +/* FMA based operations */ +#define FLOAT_FMA(name, type) \ +uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ + uint64_t fdt0, uint64_t fdt1, \ + uint64_t fdt2) \ +{ \ + UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \ + update_fcr31(env, GETPC()); \ + return fdt0; \ +} \ + \ +uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ + uint32_t fst0, uint32_t fst1, \ + uint32_t fst2) \ +{ \ + UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ + update_fcr31(env, GETPC()); \ + return fst0; \ +} \ + \ +uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ + uint64_t fdt0, uint64_t fdt1, \ + uint64_t fdt2) \ +{ \ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ + uint32_t fsth0 = fdt0 >> 32; \ + uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ + uint32_t fsth1 = fdt1 >> 32; \ + uint32_t fst2 = fdt2 & 0XFFFFFFFF; \ + uint32_t fsth2 = fdt2 >> 32; \ + \ + UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ + UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \ + update_fcr31(env, GETPC()); \ + return ((uint64_t)fsth0 << 32) | fst0; \ +} +FLOAT_FMA(madd, 0) +FLOAT_FMA(msub, float_muladd_negate_c) +FLOAT_FMA(nmadd, float_muladd_negate_result) +FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c) +#undef FLOAT_FMA + +#define FLOAT_FMADDSUB(name, bits, muladd_arg) \ +uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ + uint ## bits ## _t fs, \ + uint ## bits ## _t ft, \ + uint ## bits ## _t fd) \ +{ \ + uint ## bits ## _t fdret; \ + \ + fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \ + &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return fdret; \ +} + +FLOAT_FMADDSUB(maddf_s, 32, 0) +FLOAT_FMADDSUB(maddf_d, 64, 0) +FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product) +FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product) +#undef FLOAT_FMADDSUB + +/* compare operations */ +#define FOP_COND_D(op, cond) \ +void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + int c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} \ +void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + int c; \ + fdt0 = float64_abs(fdt0); \ + fdt1 = float64_abs(fdt1); \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} + +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered_quiet() is still called. + */ +FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status), 0)) +FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status)) +FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_eq_quiet(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt_quiet(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_le_quiet(fdt0, fdt1, + &env->active_fpu.fp_status)) +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered() is still called. + */ +FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status), 0)) +FOP_COND_D(ngle, float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status)) +FOP_COND_D(seq, float64_eq(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_eq(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(lt, float64_lt(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(nge, float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(le, float64_le(fdt0, fdt1, + &env->active_fpu.fp_status)) +FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_le(fdt0, fdt1, + &env->active_fpu.fp_status)) + +#define FOP_COND_S(op, cond) \ +void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ + uint32_t fst1, int cc) \ +{ \ + int c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} \ +void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ + uint32_t fst1, int cc) \ +{ \ + int c; \ + fst0 = float32_abs(fst0); \ + fst1 = float32_abs(fst1); \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} + +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered_quiet() is still called. + */ +FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status), 0)) +FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status)) +FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_eq_quiet(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(ole, float32_le_quiet(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le_quiet(fst0, fst1, + &env->active_fpu.fp_status)) +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered() is still called. + */ +FOP_COND_S(sf, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status), 0)) +FOP_COND_S(ngle, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status)) +FOP_COND_S(seq, float32_eq(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(ngl, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_eq(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(lt, float32_lt(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(nge, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(le, float32_le(fst0, fst1, + &env->active_fpu.fp_status)) +FOP_COND_S(ngt, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le(fst0, fst1, + &env->active_fpu.fp_status)) + +#define FOP_COND_PS(op, condl, condh) \ +void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + uint32_t fst0, fsth0, fst1, fsth1; \ + int ch, cl; \ + fst0 = fdt0 & 0XFFFFFFFF; \ + fsth0 = fdt0 >> 32; \ + fst1 = fdt1 & 0XFFFFFFFF; \ + fsth1 = fdt1 >> 32; \ + cl = condl; \ + ch = condh; \ + update_fcr31(env, GETPC()); \ + if (cl) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ + if (ch) \ + SET_FP_COND(cc + 1, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc + 1, env->active_fpu); \ +} \ +void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + uint32_t fst0, fsth0, fst1, fsth1; \ + int ch, cl; \ + fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \ + fsth0 = float32_abs(fdt0 >> 32); \ + fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \ + fsth1 = float32_abs(fdt1 >> 32); \ + cl = condl; \ + ch = condh; \ + update_fcr31(env, GETPC()); \ + if (cl) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ + if (ch) \ + SET_FP_COND(cc + 1, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc + 1, env->active_fpu); \ +} + +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered_quiet() is still called. + */ +FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status), 0), + (float32_unordered_quiet(fsth1, fsth0, + &env->active_fpu.fp_status), 0)) +FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, + &env->active_fpu.fp_status)) +FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, + &env->active_fpu.fp_status), + float32_eq_quiet(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_eq_quiet(fst0, fst1, + &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, + &env->active_fpu.fp_status) + || float32_eq_quiet(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status), + float32_lt_quiet(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, + &env->active_fpu.fp_status) + || float32_lt_quiet(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, + &env->active_fpu.fp_status), + float32_le_quiet(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le_quiet(fst0, fst1, + &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, + &env->active_fpu.fp_status) + || float32_le_quiet(fsth0, fsth1, + &env->active_fpu.fp_status)) +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered() is still called. + */ +FOP_COND_PS(sf, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status), 0), + (float32_unordered(fsth1, fsth0, + &env->active_fpu.fp_status), 0)) +FOP_COND_PS(ngle, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, + &env->active_fpu.fp_status)) +FOP_COND_PS(seq, float32_eq(fst0, fst1, + &env->active_fpu.fp_status), + float32_eq(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(ngl, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_eq(fst0, fst1, + &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, + &env->active_fpu.fp_status) + || float32_eq(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(lt, float32_lt(fst0, fst1, + &env->active_fpu.fp_status), + float32_lt(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(nge, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, + &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, + &env->active_fpu.fp_status) + || float32_lt(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(le, float32_le(fst0, fst1, + &env->active_fpu.fp_status), + float32_le(fsth0, fsth1, + &env->active_fpu.fp_status)) +FOP_COND_PS(ngt, float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le(fst0, fst1, + &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, + &env->active_fpu.fp_status) + || float32_le(fsth0, fsth1, + &env->active_fpu.fp_status)) + +/* R6 compare operations */ +#define FOP_CONDN_D(op, cond) \ +uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1) \ +{ \ + uint64_t c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) { \ + return -1; \ + } else { \ + return 0; \ + } \ +} + +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered_quiet() is still called. + */ +FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status), 0)) +FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status))) +FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_eq_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_le_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered() is still called.\ + */ +FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status), 0)) +FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status))) +FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_eq(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_le(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_le_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt_quiet(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_le(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt(fdt0, fdt1, + &env->active_fpu.fp_status))) +FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, + &env->active_fpu.fp_status) + || float64_lt(fdt0, fdt1, + &env->active_fpu.fp_status))) + +#define FOP_CONDN_S(op, cond) \ +uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ + uint32_t fst1) \ +{ \ + uint64_t c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) { \ + return -1; \ + } else { \ + return 0; \ + } \ +} + +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered_quiet() is still called. + */ +FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status), 0)) +FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status))) +FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_eq_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered() is still called. + */ +FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status), 0)) +FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status))) +FOP_CONDN_S(seq, (float32_eq(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_eq(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(slt, (float32_lt(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(sle, (float32_le(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(sor, (float32_le(fst1, fst0, + &env->active_fpu.fp_status) + || float32_le(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, + &env->active_fpu.fp_status))) +FOP_CONDN_S(sne, (float32_lt(fst1, fst0, + &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, + &env->active_fpu.fp_status))) diff --git a/qemu/target/mips/helper.c b/qemu/target/mips/helper.c new file mode 100644 index 00000000..35a512a8 --- /dev/null +++ b/qemu/target/mips/helper.c @@ -0,0 +1,1498 @@ +/* + * MIPS emulation helpers for qemu. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" + +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "hw/mips/cpudevs.h" + +enum { + TLBRET_XI = -6, + TLBRET_RI = -5, + TLBRET_DIRTY = -4, + TLBRET_INVALID = -3, + TLBRET_NOMATCH = -2, + TLBRET_BADADDR = -1, + TLBRET_MATCH = 0 +}; + + +/* no MMU emulation */ +int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type) +{ + *physical = address; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; +} + +/* fixed mapping MMU emulation */ +int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type) +{ + if (address <= (int32_t)0x7FFFFFFFUL) { + if (!(env->CP0_Status & (1 << CP0St_ERL))) { + *physical = address + 0x40000000UL; + } else { + *physical = address; + } + } else if (address <= (int32_t)0xBFFFFFFFUL) { + *physical = address & 0x1FFFFFFF; + } else { + *physical = address; + } + + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; +} + +/* MIPS32/MIPS64 R4000-style MMU emulation */ +int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type) +{ + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + uint32_t tlb_mmid; + int i; + + MMID = mi ? MMID : (uint32_t) ASID; + + for (i = 0; i < env->tlb->tlb_in_use; i++) { + r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); + target_ulong tag = address & ~mask; + target_ulong VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + + /* Check ASID/MMID, virtual page number & size */ + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { + /* TLB match */ + int n = !!(address & mask & ~(mask >> 1)); + /* Check access rights */ + if (!(n ? tlb->V1 : tlb->V0)) { + return TLBRET_INVALID; + } + if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) { + return TLBRET_XI; + } + if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) { + return TLBRET_RI; + } + if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) { + *physical = tlb->PFN[n] | (address & (mask >> 1)); + *prot = PAGE_READ; + if (n ? tlb->D1 : tlb->D0) { + *prot |= PAGE_WRITE; + } + if (!(n ? tlb->XI1 : tlb->XI0)) { + *prot |= PAGE_EXEC; + } + return TLBRET_MATCH; + } + return TLBRET_DIRTY; + } + } + return TLBRET_NOMATCH; +} + +static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx) +{ + /* + * Interpret access control mode and mmu_idx. + * AdE? TLB? + * AM K S U E K S U E + * UK 0 0 1 1 0 0 - - 0 + * MK 1 0 1 1 0 1 - - !eu + * MSK 2 0 0 1 0 1 1 - !eu + * MUSK 3 0 0 0 0 1 1 1 !eu + * MUSUK 4 0 0 0 0 0 1 1 0 + * USK 5 0 0 1 0 0 0 - 0 + * - 6 - - - - - - - - + * UUSK 7 0 0 0 0 0 0 0 0 + */ + int32_t adetlb_mask; + + switch (mmu_idx) { + case 3: /* ERL */ + /* If EU is set, always unmapped */ + if (eu) { + return 0; + } + /* fall through */ + case MIPS_HFLAG_KM: + /* Never AdE, TLB mapped if AM={1,2,3} */ + adetlb_mask = 0x70000000; + goto check_tlb; + + case MIPS_HFLAG_SM: + /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */ + adetlb_mask = 0xc0380000; + goto check_ade; + + case MIPS_HFLAG_UM: + /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */ + adetlb_mask = 0xe4180000; + /* fall through */ + check_ade: + /* does this AM cause AdE in current execution mode */ + if ((adetlb_mask << am) < 0) { + return TLBRET_BADADDR; + } + adetlb_mask <<= 8; + /* fall through */ + check_tlb: + /* is this AM mapped in current execution mode */ + return ((adetlb_mask << am) < 0); + default: + assert(0); + return TLBRET_BADADDR; + }; +} + +static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + int rw, int access_type, int mmu_idx, + unsigned int am, bool eu, + target_ulong segmask, + hwaddr physical_base) +{ + int mapped = is_seg_am_mapped(am, eu, mmu_idx); + + if (mapped < 0) { + /* is_seg_am_mapped can report TLBRET_BADADDR */ + return mapped; + } else if (mapped) { + /* The segment is TLB mapped */ + return env->tlb->map_address(env, physical, prot, real_address, rw, + access_type); + } else { + /* The segment is unmapped */ + *physical = physical_base | (real_address & segmask); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } +} + +static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + int rw, int access_type, int mmu_idx, + uint16_t segctl, target_ulong segmask) +{ + unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM; + bool eu = (segctl >> CP0SC_EU) & 1; + hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20; + + return get_seg_physical_address(env, physical, prot, real_address, rw, + access_type, mmu_idx, am, eu, segmask, + pa & ~(hwaddr)segmask); +} + +static int get_physical_address(CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + int rw, int access_type, int mmu_idx) +{ + /* User mode can only access useg/xuseg */ +#if defined(TARGET_MIPS64) + int user_mode = mmu_idx == MIPS_HFLAG_UM; + int supervisor_mode = mmu_idx == MIPS_HFLAG_SM; + int kernel_mode = !user_mode && !supervisor_mode; + int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; + int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; + int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; +#endif + int ret = TLBRET_MATCH; + /* effective address (modified for KVM T&E kernel segments) */ + target_ulong address = real_address; + +#define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL) +#define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL) +#define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL) +#define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL) +#define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL) + +#define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL) +#define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL) + +#if 0 + if (mips_um_ksegs_enabled()) { + /* KVM T&E adds guest kernel segments in useg */ + if (real_address >= KVM_KSEG0_BASE) { + if (real_address < KVM_KSEG2_BASE) { + /* kseg0 */ + address += KSEG0_BASE - KVM_KSEG0_BASE; + } else if (real_address <= USEG_LIMIT) { + /* kseg2/3 */ + address += KSEG2_BASE - KVM_KSEG2_BASE; + } + } + } +#endif + + if (address <= USEG_LIMIT) { + /* useg */ + uint16_t segctl; + + if (address >= 0x40000000UL) { + segctl = env->CP0_SegCtl2; + } else { + segctl = env->CP0_SegCtl2 >> 16; + } + ret = get_segctl_physical_address(env, physical, prot, + real_address, rw, access_type, + mmu_idx, segctl, 0x3FFFFFFF); +#if defined(TARGET_MIPS64) + } else if (address < 0x4000000000000000ULL) { + /* xuseg */ + if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, + real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0x8000000000000000ULL) { + /* xsseg */ + if ((supervisor_mode || kernel_mode) && + SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, + real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0xC000000000000000ULL) { + /* xkphys */ + if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { + /* KX/SX/UX bit to check for each xkphys EVA access mode */ + static const uint8_t am_ksux[8] = { + [CP0SC_AM_UK] = (1u << CP0St_KX), + [CP0SC_AM_MK] = (1u << CP0St_KX), + [CP0SC_AM_MSK] = (1u << CP0St_SX), + [CP0SC_AM_MUSK] = (1u << CP0St_UX), + [CP0SC_AM_MUSUK] = (1u << CP0St_UX), + [CP0SC_AM_USK] = (1u << CP0St_SX), + [6] = (1u << CP0St_KX), + [CP0SC_AM_UUSK] = (1u << CP0St_UX), + }; + unsigned int am = CP0SC_AM_UK; + unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR; + + if (xr & (1 << ((address >> 59) & 0x7))) { + am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM; + } + /* Does CP0_Status.KX/SX/UX permit the access mode (am) */ + if (env->CP0_Status & am_ksux[am]) { + ret = get_seg_physical_address(env, physical, prot, + real_address, rw, access_type, + mmu_idx, am, false, env->PAMask, + 0); + } else { + ret = TLBRET_BADADDR; + } + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0xFFFFFFFF80000000ULL) { + /* xkseg */ + if (kernel_mode && KX && + address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, + real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } +#endif + } else if (address < KSEG1_BASE) { + /* kseg0 */ + ret = get_segctl_physical_address(env, physical, prot, real_address, rw, + access_type, mmu_idx, + env->CP0_SegCtl1 >> 16, 0x1FFFFFFF); + } else if (address < KSEG2_BASE) { + /* kseg1 */ + ret = get_segctl_physical_address(env, physical, prot, real_address, rw, + access_type, mmu_idx, + env->CP0_SegCtl1, 0x1FFFFFFF); + } else if (address < KSEG3_BASE) { + /* sseg (kseg2) */ + ret = get_segctl_physical_address(env, physical, prot, real_address, rw, + access_type, mmu_idx, + env->CP0_SegCtl0 >> 16, 0x1FFFFFFF); + } else { + /* + * kseg3 + * XXX: debug segment is not emulated + */ + ret = get_segctl_physical_address(env, physical, prot, real_address, rw, + access_type, mmu_idx, + env->CP0_SegCtl0, 0x1FFFFFFF); + } + return ret; +} + +void cpu_mips_tlb_flush(CPUMIPSState *env) +{ + /* Flush qemu's TLB and discard all shadowed entries. */ + tlb_flush(env_cpu(env)); + env->tlb->tlb_in_use = env->tlb->nb_tlb; +} + +/* Called for updates to CP0_Status. */ +void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) +{ + int32_t tcstatus, *tcst; + uint32_t v = cpu->CP0_Status; + uint32_t cu, mx, asid, ksu; + uint32_t mask = ((1 << CP0TCSt_TCU3) + | (1 << CP0TCSt_TCU2) + | (1 << CP0TCSt_TCU1) + | (1 << CP0TCSt_TCU0) + | (1 << CP0TCSt_TMX) + | (3 << CP0TCSt_TKSU) + | (0xff << CP0TCSt_TASID)); + + cu = (v >> CP0St_CU0) & 0xf; + mx = (v >> CP0St_MX) & 0x1; + ksu = (v >> CP0St_KSU) & 0x3; + asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + + tcstatus = cu << CP0TCSt_TCU0; + tcstatus |= mx << CP0TCSt_TMX; + tcstatus |= ksu << CP0TCSt_TKSU; + tcstatus |= asid; + + if (tc == cpu->current_tc) { + tcst = &cpu->active_tc.CP0_TCStatus; + } else { + tcst = &cpu->tcs[tc].CP0_TCStatus; + } + + *tcst &= ~mask; + *tcst |= tcstatus; + compute_hflags(cpu); +} + +void cpu_mips_store_status(CPUMIPSState *env, target_ulong val) +{ + uint32_t mask = env->CP0_Status_rw_bitmask; + target_ulong old = env->CP0_Status; + + if (env->insn_flags & ISA_MIPS32R6) { + bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3; +#if defined(TARGET_MIPS64) + uint32_t ksux = (1 << CP0St_KX) & val; + ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */ + ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */ + val = (val & ~(7 << CP0St_UX)) | ksux; +#endif + if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) { + mask &= ~(3 << CP0St_KSU); + } + mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val); + } + + env->CP0_Status = (old & ~mask) | (val & mask); +#if defined(TARGET_MIPS64) + if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) { + /* Access to at least one of the 64-bit segments has been disabled */ + tlb_flush(env_cpu(env)); + } +#endif + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + sync_c0_status(env, env, env->current_tc); + } else { + compute_hflags(env); + } +} + +void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val) +{ + uint32_t mask = 0x00C00300; + + if (env->insn_flags & ISA_MIPS32R2) { + mask |= 1 << CP0Ca_DC; + } + if (env->insn_flags & ISA_MIPS32R6) { + mask &= ~((1 << CP0Ca_WP) & val); + } + + env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask); + +#if 0 + uint32_t old = env->CP0_Cause; + if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) { + if (env->CP0_Cause & (1 << CP0Ca_DC)) { + cpu_mips_stop_count(env); + } else { + cpu_mips_start_count(env); + } + } + + int i; + /* Set/reset software interrupts */ + for (i = 0 ; i < 2 ; i++) { + if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) { + cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i))); + } + } +#endif +} + +static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, + int rw, int tlb_error) +{ + CPUState *cs = env_cpu(env); + int exception = 0, error_code = 0; + + if (rw == MMU_INST_FETCH) { + error_code |= EXCP_INST_NOTAVAIL; + } + + switch (tlb_error) { + default: + case TLBRET_BADADDR: + /* Reference to kernel address from user mode or supervisor mode */ + /* Reference to supervisor address from user mode */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_AdES; + } else { + exception = EXCP_AdEL; + } + break; + case TLBRET_NOMATCH: + /* No TLB match for a mapped address */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_TLBS; + } else { + exception = EXCP_TLBL; + } + error_code |= EXCP_TLB_NOMATCH; + break; + case TLBRET_INVALID: + /* TLB match with no valid bit */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_TLBS; + } else { + exception = EXCP_TLBL; + } + break; + case TLBRET_DIRTY: + /* TLB match but 'D' bit is cleared */ + exception = EXCP_LTLBL; + break; + case TLBRET_XI: + /* Execute-Inhibit Exception */ + if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { + exception = EXCP_TLBXI; + } else { + exception = EXCP_TLBL; + } + break; + case TLBRET_RI: + /* Read-Inhibit Exception */ + if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { + exception = EXCP_TLBRI; + } else { + exception = EXCP_TLBL; + } + break; + } + /* Raise exception */ + if (!(env->hflags & MIPS_HFLAG_DM)) { + env->CP0_BadVAddr = address; + } + env->CP0_Context = (env->CP0_Context & ~0x007fffff) | + ((address >> 9) & 0x007ffff0); + env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) | + (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) | + (address & (TARGET_PAGE_MASK << 1)); +#if defined(TARGET_MIPS64) + env->CP0_EntryHi &= env->SEGMask; + env->CP0_XContext = + (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */ + (extract64(address, 62, 2) << (env->SEGBITS - 9)) | /* R */ + (extract64(address, 13, env->SEGBITS - 13) << 4); /* BadVPN2 */ +#endif + cs->exception_index = exception; + env->error_code = error_code; +} + +hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + hwaddr phys_addr; + int prot; + + if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT, + cpu_mmu_index(env, false)) != 0) { + return -1; + } + return phys_addr; +} + +#if !defined(TARGET_MIPS64) + +/* + * Perform hardware page table walk + * + * Memory accesses are performed using the KERNEL privilege level. + * Synchronous exceptions detected on memory accesses cause a silent exit + * from page table walking, resulting in a TLB or XTLB Refill exception. + * + * Implementations are not required to support page table walk memory + * accesses from mapped memory regions. When an unsupported access is + * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill + * exception. + * + * Note that if an exception is caused by AddressTranslation or LoadMemory + * functions, the exception is not taken, a silent exit is taken, + * resulting in a TLB or XTLB Refill exception. + */ + +static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size, + uint64_t *pte) +{ + if ((vaddr & ((entry_size >> 3) - 1)) != 0) { + return false; + } + if (entry_size == 64) { + *pte = cpu_ldq_code(env, vaddr); + } else { + *pte = cpu_ldl_code(env, vaddr); + } + return true; +} + +static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry, + int entry_size, int ptei) +{ + uint64_t result = entry; + uint64_t rixi; + if (ptei > entry_size) { + ptei -= 32; + } + result >>= (ptei - 2); + rixi = result & 3; + result >>= 2; + result |= rixi << CP0EnLo_XI; + return result; +} + +static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, + int directory_index, bool *huge_page, bool *hgpg_directory_hit, + uint64_t *pw_entrylo0, uint64_t *pw_entrylo1) +{ + int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; + int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; + int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; + int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; + int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; + int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; + int directory_shift = (ptew > 1) ? -1 : + (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; + int leaf_shift = (ptew > 1) ? -1 : + (ptew == 1) ? native_shift + 1 : native_shift; + uint32_t direntry_size = 1 << (directory_shift + 3); + uint32_t leafentry_size = 1 << (leaf_shift + 3); + uint64_t entry; + uint64_t paddr; + int prot; + uint64_t lsb = 0; + uint64_t w = 0; + + if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + /* wrong base address */ + return 0; + } + if (!get_pte(env, *vaddr, direntry_size, &entry)) { + return 0; + } + + if ((entry & (1ULL << psn)) && hugepg) { + *huge_page = true; + *hgpg_directory_hit = true; + entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); + w = directory_index - 1; + if (directory_index & 0x1) { + /* Generate adjacent page from same PTE for odd TLB page */ + lsb = (1 << w) >> 6; + *pw_entrylo0 = entry & ~lsb; /* even page */ + *pw_entrylo1 = entry | lsb; /* odd page */ + } else if (dph) { + int oddpagebit = 1 << leaf_shift; + uint64_t vaddr2 = *vaddr ^ oddpagebit; + if (*vaddr & oddpagebit) { + *pw_entrylo1 = entry; + } else { + *pw_entrylo0 = entry; + } + if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + return 0; + } + if (!get_pte(env, vaddr2, leafentry_size, &entry)) { + return 0; + } + entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); + if (*vaddr & oddpagebit) { + *pw_entrylo0 = entry; + } else { + *pw_entrylo1 = entry; + } + } else { + return 0; + } + return 1; + } else { + *vaddr = entry; + return 2; + } +} + +static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, int rw, + int mmu_idx) +{ + int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F; + int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F; + int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F; + int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F; + int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; + + /* Initial values */ + bool huge_page = false; + bool hgpg_bdhit = false; + bool hgpg_gdhit = false; + bool hgpg_udhit = false; + bool hgpg_mdhit = false; + + int32_t pw_pagemask = 0; + target_ulong pw_entryhi = 0; + uint64_t pw_entrylo0 = 0; + uint64_t pw_entrylo1 = 0; + + /* Native pointer size */ + /*For the 32-bit architectures, this bit is fixed to 0.*/ + int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; + + /* Indices from PWField */ + int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F; + int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F; + int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F; + int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F; + int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; + + /* Indices computed from faulting address */ + int gindex = (address >> pf_gdw) & ((1 << gdw) - 1); + int uindex = (address >> pf_udw) & ((1 << udw) - 1); + int mindex = (address >> pf_mdw) & ((1 << mdw) - 1); + int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1); + + /* Other HTW configs */ + int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; + + /* HTW Shift values (depend on entry size) */ + int directory_shift = (ptew > 1) ? -1 : + (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; + int leaf_shift = (ptew > 1) ? -1 : + (ptew == 1) ? native_shift + 1 : native_shift; + + /* Offsets into tables */ + int goffset = gindex << directory_shift; + int uoffset = uindex << directory_shift; + int moffset = mindex << directory_shift; + int ptoffset0 = (ptindex >> 1) << (leaf_shift + 1); + int ptoffset1 = ptoffset0 | (1 << (leaf_shift)); + + uint32_t leafentry_size = 1 << (leaf_shift + 3); + + /* Starting address - Page Table Base */ + uint64_t vaddr = env->CP0_PWBase; + + uint64_t dir_entry; + uint64_t paddr; + int prot; + int m; + + if (!(env->CP0_Config3 & (1 << CP0C3_PW))) { + /* walker is unimplemented */ + return false; + } + if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) { + /* walker is disabled */ + return false; + } + if (!(gdw > 0 || udw > 0 || mdw > 0)) { + /* no structure to walk */ + return false; + } + if ((directory_shift == -1) || (leaf_shift == -1)) { + return false; + } + + /* Global Directory */ + if (gdw > 0) { + vaddr |= goffset; + switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit, + &pw_entrylo0, &pw_entrylo1)) + { + case 0: + return false; + case 1: + goto refill; + case 2: + default: + break; + } + } + + /* Upper directory */ + if (udw > 0) { + vaddr |= uoffset; + switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit, + &pw_entrylo0, &pw_entrylo1)) + { + case 0: + return false; + case 1: + goto refill; + case 2: + default: + break; + } + } + + /* Middle directory */ + if (mdw > 0) { + vaddr |= moffset; + switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit, + &pw_entrylo0, &pw_entrylo1)) + { + case 0: + return false; + case 1: + goto refill; + case 2: + default: + break; + } + } + + /* Leaf Level Page Table - First half of PTE pair */ + vaddr |= ptoffset0; + if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + return false; + } + if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { + return false; + } + dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); + pw_entrylo0 = dir_entry; + + /* Leaf Level Page Table - Second half of PTE pair */ + vaddr |= ptoffset1; + if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + return false; + } + if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { + return false; + } + dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); + pw_entrylo1 = dir_entry; + +refill: + + m = (1 << pf_ptw) - 1; + + if (huge_page) { + switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 | + hgpg_mdhit) + { + case 4: + m = (1 << pf_gdw) - 1; + if (pf_gdw & 1) { + m >>= 1; + } + break; + case 2: + m = (1 << pf_udw) - 1; + if (pf_udw & 1) { + m >>= 1; + } + break; + case 1: + m = (1 << pf_mdw) - 1; + if (pf_mdw & 1) { + m >>= 1; + } + break; + } + } + pw_pagemask = m >> 12; + update_pagemask(env, pw_pagemask << 13, &pw_pagemask); + pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF); + { + target_ulong tmp_entryhi = env->CP0_EntryHi; + int32_t tmp_pagemask = env->CP0_PageMask; + uint64_t tmp_entrylo0 = env->CP0_EntryLo0; + uint64_t tmp_entrylo1 = env->CP0_EntryLo1; + + env->CP0_EntryHi = pw_entryhi; + env->CP0_PageMask = pw_pagemask; + env->CP0_EntryLo0 = pw_entrylo0; + env->CP0_EntryLo1 = pw_entrylo1; + + /* + * The hardware page walker inserts a page into the TLB in a manner + * identical to a TLBWR instruction as executed by the software refill + * handler. + */ + r4k_helper_tlbwr(env); + + env->CP0_EntryHi = tmp_entryhi; + env->CP0_PageMask = tmp_pagemask; + env->CP0_EntryLo0 = tmp_entrylo0; + env->CP0_EntryLo1 = tmp_entrylo1; + } + return true; +} +#endif + +bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + hwaddr physical; + int prot; + int mips_access_type; + int ret = TLBRET_BADADDR; + + /* data access */ + /* XXX: put correct access by using cpu_restore_state() correctly */ + mips_access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, address, + access_type, mips_access_type, mmu_idx); + switch (ret) { + case TLBRET_MATCH: + qemu_log_mask(CPU_LOG_MMU, + "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx + " prot %d\n", __func__, address, physical, prot); + break; + default: + qemu_log_mask(CPU_LOG_MMU, + "%s address=%" VADDR_PRIx " ret %d\n", __func__, address, + ret); + break; + } + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot, + mmu_idx, TARGET_PAGE_SIZE); + return true; + } +#if !defined(TARGET_MIPS64) + if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) { + /* + * Memory reads during hardware page table walking are performed + * as if they were kernel-mode load instructions. + */ + int mode = (env->hflags & MIPS_HFLAG_KSU); + bool ret_walker; + env->hflags &= ~MIPS_HFLAG_KSU; + ret_walker = page_table_walk_refill(env, address, access_type, mmu_idx); + env->hflags |= mode; + if (ret_walker) { + ret = get_physical_address(env, &physical, &prot, address, + access_type, mips_access_type, mmu_idx); + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot, + mmu_idx, TARGET_PAGE_SIZE); + return true; + } + } + } +#endif + if (probe) { + return false; + } + + raise_mmu_exception(env, address, access_type, ret); + do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); +} + +hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, + int rw) +{ + hwaddr physical; + int prot; + int access_type; + int ret = 0; + + /* data access */ + access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, address, rw, access_type, + cpu_mmu_index(env, false)); + if (ret != TLBRET_MATCH) { + raise_mmu_exception(env, address, rw, ret); + return -1LL; + } else { + return physical; + } +} + +#if 0 +static const char * const excp_names[EXCP_LAST + 1] = { + [EXCP_RESET] = "reset", + [EXCP_SRESET] = "soft reset", + [EXCP_DSS] = "debug single step", + [EXCP_DINT] = "debug interrupt", + [EXCP_NMI] = "non-maskable interrupt", + [EXCP_MCHECK] = "machine check", + [EXCP_EXT_INTERRUPT] = "interrupt", + [EXCP_DFWATCH] = "deferred watchpoint", + [EXCP_DIB] = "debug instruction breakpoint", + [EXCP_IWATCH] = "instruction fetch watchpoint", + [EXCP_AdEL] = "address error load", + [EXCP_AdES] = "address error store", + [EXCP_TLBF] = "TLB refill", + [EXCP_IBE] = "instruction bus error", + [EXCP_DBp] = "debug breakpoint", + [EXCP_SYSCALL] = "syscall", + [EXCP_BREAK] = "break", + [EXCP_CpU] = "coprocessor unusable", + [EXCP_RI] = "reserved instruction", + [EXCP_OVERFLOW] = "arithmetic overflow", + [EXCP_TRAP] = "trap", + [EXCP_FPE] = "floating point", + [EXCP_DDBS] = "debug data break store", + [EXCP_DWATCH] = "data watchpoint", + [EXCP_LTLBL] = "TLB modify", + [EXCP_TLBL] = "TLB load", + [EXCP_TLBS] = "TLB store", + [EXCP_DBE] = "data bus error", + [EXCP_DDBL] = "debug data break load", + [EXCP_THREAD] = "thread", + [EXCP_MDMX] = "MDMX", + [EXCP_C2E] = "precise coprocessor 2", + [EXCP_CACHE] = "cache error", + [EXCP_TLBXI] = "TLB execute-inhibit", + [EXCP_TLBRI] = "TLB read-inhibit", + [EXCP_MSADIS] = "MSA disabled", + [EXCP_MSAFPE] = "MSA floating point", +}; +#endif + +target_ulong exception_resume_pc(CPUMIPSState *env) +{ + target_ulong bad_pc; + target_ulong isa_mode; + + isa_mode = !!(env->hflags & MIPS_HFLAG_M16); + bad_pc = env->active_tc.PC | isa_mode; + if (env->hflags & MIPS_HFLAG_BMASK) { + /* + * If the exception was raised from a delay slot, come back to + * the jump. + */ + bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); + } + + return bad_pc; +} + +static void set_hflags_for_handler(CPUMIPSState *env) +{ + /* Exception handlers are entered in 32-bit mode. */ + env->hflags &= ~(MIPS_HFLAG_M16); + /* ...except that microMIPS lets you choose. */ + if (env->insn_flags & ASE_MICROMIPS) { + env->hflags |= (!!(env->CP0_Config3 & + (1 << CP0C3_ISA_ON_EXC)) + << MIPS_HFLAG_M16_SHIFT); + } +} + +static inline void set_badinstr_registers(CPUMIPSState *env) +{ + if (env->insn_flags & ISA_NANOMIPS32) { + if (env->CP0_Config3 & (1 << CP0C3_BI)) { + uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16; + if ((instr & 0x10000000) == 0) { + instr |= cpu_lduw_code(env, env->active_tc.PC + 2); + } + env->CP0_BadInstr = instr; + + if ((instr & 0xFC000000) == 0x60000000) { + instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16; + env->CP0_BadInstrX = instr; + } + } + return; + } + + if (env->hflags & MIPS_HFLAG_M16) { + /* TODO: add BadInstr support for microMIPS */ + return; + } + if (env->CP0_Config3 & (1 << CP0C3_BI)) { + env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC); + } + if ((env->CP0_Config3 & (1 << CP0C3_BP)) && + (env->hflags & MIPS_HFLAG_BMASK)) { + env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4); + } +} + +void mips_cpu_do_interrupt(CPUState *cs) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + bool update_badinstr = 0; + target_ulong offset; + int cause = -1; + +#if 0 + const char *name; + if (qemu_loglevel_mask(CPU_LOG_INT) + && cs->exception_index != EXCP_EXT_INTERRUPT) { + if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { + name = "unknown"; + } else { + name = excp_names[cs->exception_index]; + } + + qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx + " %s exception\n", + __func__, env->active_tc.PC, env->CP0_EPC, name); + } +#endif + + if (cs->exception_index == EXCP_EXT_INTERRUPT && + (env->hflags & MIPS_HFLAG_DM)) { + cs->exception_index = EXCP_DINT; + } + offset = 0x180; + switch (cs->exception_index) { + case EXCP_DSS: + env->CP0_Debug |= 1 << CP0DB_DSS; + /* + * Debug single step cannot be raised inside a delay slot and + * resume will always occur on the next instruction + * (but we assume the pc has always been updated during + * code translation). + */ + env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16); + goto enter_debug_mode; + case EXCP_DINT: + env->CP0_Debug |= 1 << CP0DB_DINT; + goto set_DEPC; + case EXCP_DIB: + env->CP0_Debug |= 1 << CP0DB_DIB; + goto set_DEPC; + case EXCP_DBp: + env->CP0_Debug |= 1 << CP0DB_DBp; + /* Setup DExcCode - SDBBP instruction */ + env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) | + (9 << CP0DB_DEC); + goto set_DEPC; + case EXCP_DDBS: + env->CP0_Debug |= 1 << CP0DB_DDBS; + goto set_DEPC; + case EXCP_DDBL: + env->CP0_Debug |= 1 << CP0DB_DDBL; + set_DEPC: + env->CP0_DEPC = exception_resume_pc(env); + env->hflags &= ~MIPS_HFLAG_BMASK; + enter_debug_mode: + if (env->insn_flags & ISA_MIPS3) { + env->hflags |= MIPS_HFLAG_64; + if (!(env->insn_flags & ISA_MIPS64R6) || + env->CP0_Status & (1 << CP0St_KX)) { + env->hflags &= ~MIPS_HFLAG_AWRAP; + } + } + env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0; + env->hflags &= ~(MIPS_HFLAG_KSU); + /* EJTAG probe trap enable is not implemented... */ + if (!(env->CP0_Status & (1 << CP0St_EXL))) { + env->CP0_Cause &= ~(1U << CP0Ca_BD); + } + env->active_tc.PC = env->exception_base + 0x480; + set_hflags_for_handler(env); + break; + case EXCP_RESET: + cpu_reset(CPU(cpu)); + break; + case EXCP_SRESET: + env->CP0_Status |= (1 << CP0St_SR); + memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo)); + goto set_error_EPC; + case EXCP_NMI: + env->CP0_Status |= (1 << CP0St_NMI); + set_error_EPC: + env->CP0_ErrorEPC = exception_resume_pc(env); + env->hflags &= ~MIPS_HFLAG_BMASK; + env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); + if (env->insn_flags & ISA_MIPS3) { + env->hflags |= MIPS_HFLAG_64; + if (!(env->insn_flags & ISA_MIPS64R6) || + env->CP0_Status & (1 << CP0St_KX)) { + env->hflags &= ~MIPS_HFLAG_AWRAP; + } + } + env->hflags |= MIPS_HFLAG_CP0; + env->hflags &= ~(MIPS_HFLAG_KSU); + if (!(env->CP0_Status & (1 << CP0St_EXL))) { + env->CP0_Cause &= ~(1U << CP0Ca_BD); + } + env->active_tc.PC = env->exception_base; + set_hflags_for_handler(env); + break; + case EXCP_EXT_INTERRUPT: + cause = 0; + if (env->CP0_Cause & (1 << CP0Ca_IV)) { + uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f; + + if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) { + offset = 0x200; + } else { + uint32_t vector = 0; + uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP; + + if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { + /* + * For VEIC mode, the external interrupt controller feeds + * the vector through the CP0Cause IP lines. + */ + vector = pending; + } else { + /* + * Vectored Interrupts + * Mask with Status.IM7-IM0 to get enabled interrupts. + */ + pending &= (env->CP0_Status >> CP0St_IM) & 0xff; + /* Find the highest-priority interrupt. */ + while (pending >>= 1) { + vector++; + } + } + offset = 0x200 + (vector * (spacing << 5)); + } + } + goto set_EPC; + case EXCP_LTLBL: + cause = 1; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + goto set_EPC; + case EXCP_TLBL: + cause = 2; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + if ((env->error_code & EXCP_TLB_NOMATCH) && + !(env->CP0_Status & (1 << CP0St_EXL))) { +#if defined(TARGET_MIPS64) + int R = env->CP0_BadVAddr >> 62; + int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; + int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; + + if ((R != 0 || UX) && (R != 3 || KX) && + (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) { + offset = 0x080; + } else { +#endif + offset = 0x000; +#if defined(TARGET_MIPS64) + } +#endif + } + goto set_EPC; + case EXCP_TLBS: + cause = 3; + update_badinstr = 1; + if ((env->error_code & EXCP_TLB_NOMATCH) && + !(env->CP0_Status & (1 << CP0St_EXL))) { +#if defined(TARGET_MIPS64) + int R = env->CP0_BadVAddr >> 62; + int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; + int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; + + if ((R != 0 || UX) && (R != 3 || KX) && + (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) { + offset = 0x080; + } else { +#endif + offset = 0x000; +#if defined(TARGET_MIPS64) + } +#endif + } + goto set_EPC; + case EXCP_AdEL: + cause = 4; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + goto set_EPC; + case EXCP_AdES: + cause = 5; + update_badinstr = 1; + goto set_EPC; + case EXCP_IBE: + cause = 6; + goto set_EPC; + case EXCP_DBE: + cause = 7; + goto set_EPC; + case EXCP_SYSCALL: + cause = 8; + update_badinstr = 1; + goto set_EPC; + case EXCP_BREAK: + cause = 9; + update_badinstr = 1; + goto set_EPC; + case EXCP_RI: + cause = 10; + update_badinstr = 1; + goto set_EPC; + case EXCP_CpU: + cause = 11; + update_badinstr = 1; + env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | + (env->error_code << CP0Ca_CE); + goto set_EPC; + case EXCP_OVERFLOW: + cause = 12; + update_badinstr = 1; + goto set_EPC; + case EXCP_TRAP: + cause = 13; + update_badinstr = 1; + goto set_EPC; + case EXCP_MSAFPE: + cause = 14; + update_badinstr = 1; + goto set_EPC; + case EXCP_FPE: + cause = 15; + update_badinstr = 1; + goto set_EPC; + case EXCP_C2E: + cause = 18; + goto set_EPC; + case EXCP_TLBRI: + cause = 19; + update_badinstr = 1; + goto set_EPC; + case EXCP_TLBXI: + cause = 20; + goto set_EPC; + case EXCP_MSADIS: + cause = 21; + update_badinstr = 1; + goto set_EPC; + case EXCP_MDMX: + cause = 22; + goto set_EPC; + case EXCP_DWATCH: + cause = 23; + /* XXX: TODO: manage deferred watch exceptions */ + goto set_EPC; + case EXCP_MCHECK: + cause = 24; + goto set_EPC; + case EXCP_THREAD: + cause = 25; + goto set_EPC; + case EXCP_DSPDIS: + cause = 26; + goto set_EPC; + case EXCP_CACHE: + cause = 30; + offset = 0x100; + set_EPC: + if (!(env->CP0_Status & (1 << CP0St_EXL))) { + env->CP0_EPC = exception_resume_pc(env); + if (update_badinstr) { + set_badinstr_registers(env); + } + if (env->hflags & MIPS_HFLAG_BMASK) { + env->CP0_Cause |= (1U << CP0Ca_BD); + } else { + env->CP0_Cause &= ~(1U << CP0Ca_BD); + } + env->CP0_Status |= (1 << CP0St_EXL); + if (env->insn_flags & ISA_MIPS3) { + env->hflags |= MIPS_HFLAG_64; + if (!(env->insn_flags & ISA_MIPS64R6) || + env->CP0_Status & (1 << CP0St_KX)) { + env->hflags &= ~MIPS_HFLAG_AWRAP; + } + } + env->hflags |= MIPS_HFLAG_CP0; + env->hflags &= ~(MIPS_HFLAG_KSU); + } + env->hflags &= ~MIPS_HFLAG_BMASK; + if (env->CP0_Status & (1 << CP0St_BEV)) { + env->active_tc.PC = env->exception_base + 0x200; + } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) && + env->CP0_Config5 & (1 << CP0C5_CV))) { + /* Force KSeg1 for cache errors */ + env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000); + } else { + env->active_tc.PC = env->CP0_EBase & ~0xfff; + } + + env->active_tc.PC += offset; + set_hflags_for_handler(env); + env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | + (cause << CP0Ca_EC); + break; + default: + abort(); + } +#if 0 + if (qemu_loglevel_mask(CPU_LOG_INT) + && cs->exception_index != EXCP_EXT_INTERRUPT) { + qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" + " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", + __func__, env->active_tc.PC, env->CP0_EPC, cause, + env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, + env->CP0_DEPC); + } +#endif + + cs->exception_index = EXCP_NONE; +} + +bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + if (cpu_mips_hw_interrupts_enabled(env) && + cpu_mips_hw_interrupts_pending(env)) { + /* Raise it */ + cs->exception_index = EXCP_EXT_INTERRUPT; + env->error_code = 0; + mips_cpu_do_interrupt(cs); + return true; + } + } + return false; +} + +void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra) +{ + CPUState *cs = env_cpu(env); + r4k_tlb_t *tlb; + target_ulong addr; + target_ulong end; + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + uint32_t tlb_mmid; + target_ulong mask; + + MMID = mi ? MMID : (uint32_t) ASID; + + tlb = &env->tlb->mmu.r4k.tlb[idx]; + /* + * The qemu TLB is flushed when the ASID/MMID changes, so no need to + * flush these entries again. + */ + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + if (tlb->G == 0 && tlb_mmid != MMID) { + return; + } + + if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) { + /* + * For tlbwr, we can shadow the discarded entry into + * a new (fake) TLB entry, as long as the guest can not + * tell that it's there. + */ + env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb; + env->tlb->tlb_in_use++; + return; + } + + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); + if (tlb->V0) { + addr = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { + addr |= 0x3FFFFF0000000000ULL; + } +#endif + end = addr | (mask >> 1); + while (addr < end) { + tlb_flush_page(cs, addr); + addr += TARGET_PAGE_SIZE; + } + } + if (tlb->V1) { + addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); +#if defined(TARGET_MIPS64) + if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { + addr |= 0x3FFFFF0000000000ULL; + } +#endif + end = addr | mask; + while (addr - 1 < end) { + tlb_flush_page(cs, addr); + addr += TARGET_PAGE_SIZE; + } + } +} + +void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, + uint32_t exception, + int error_code, + uintptr_t pc) +{ + CPUState *cs = env_cpu(env); + +#if 0 + qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n", + __func__, exception, error_code); +#endif + cs->exception_index = exception; + env->error_code = error_code; + + // Unicorn: Imported from https://github.com/unicorn-engine/unicorn/pull/1098 + if (exception == 0x11){ + env->uc->next_pc = env->active_tc.PC + 4; + } + + cpu_loop_exit_restore(cs, pc); +} diff --git a/qemu/target-mips/helper.h b/qemu/target/mips/helper.h similarity index 73% rename from qemu/target-mips/helper.h rename to qemu/target/mips/helper.h index 1924bf6f..1ff0c475 100644 --- a/qemu/target-mips/helper.h +++ b/qemu/target/mips/helper.h @@ -2,6 +2,9 @@ DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int) DEF_HELPER_2(raise_exception, noreturn, env, i32) +DEF_HELPER_1(raise_exception_debug, noreturn, env) + +// DEF_HELPER_1(do_semihosting, void, env) #ifdef TARGET_MIPS64 DEF_HELPER_4(sdl, void, env, tl, tl, int) @@ -10,20 +13,9 @@ DEF_HELPER_4(sdr, void, env, tl, tl, int) DEF_HELPER_4(swl, void, env, tl, tl, int) DEF_HELPER_4(swr, void, env, tl, tl, int) -#ifndef CONFIG_USER_ONLY DEF_HELPER_3(ll, tl, env, tl, int) -DEF_HELPER_4(sc, tl, env, tl, tl, int) #ifdef TARGET_MIPS64 DEF_HELPER_3(lld, tl, env, tl, int) -DEF_HELPER_4(scd, tl, env, tl, tl, int) -#endif -#endif - -DEF_HELPER_FLAGS_1(clo, TCG_CALL_NO_RWG_SE, tl, tl) -DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl) -#ifdef TARGET_MIPS64 -DEF_HELPER_FLAGS_1(dclo, TCG_CALL_NO_RWG_SE, tl, tl) -DEF_HELPER_FLAGS_1(dclz, TCG_CALL_NO_RWG_SE, tl, tl) #endif DEF_HELPER_3(muls, tl, env, tl, tl) @@ -46,7 +38,8 @@ DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl) #endif -#ifndef CONFIG_USER_ONLY +DEF_HELPER_FLAGS_4(rotx, TCG_CALL_NO_RWG_SE, tl, tl, i32, i32, i32) + /* CP0 helpers */ DEF_HELPER_1(mfc0_mvpcontrol, tl, env) DEF_HELPER_1(mfc0_mvpconf0, tl, env) @@ -69,6 +62,8 @@ DEF_HELPER_1(mftc0_tcschedule, tl, env) DEF_HELPER_1(mfc0_tcschefback, tl, env) DEF_HELPER_1(mftc0_tcschefback, tl, env) DEF_HELPER_1(mfc0_count, tl, env) +DEF_HELPER_1(mfc0_saar, tl, env) +DEF_HELPER_1(mfhc0_saar, tl, env) DEF_HELPER_1(mftc0_entryhi, tl, env) DEF_HELPER_1(mftc0_status, tl, env) DEF_HELPER_1(mftc0_cause, tl, env) @@ -76,8 +71,11 @@ DEF_HELPER_1(mftc0_epc, tl, env) DEF_HELPER_1(mftc0_ebase, tl, env) DEF_HELPER_2(mftc0_configx, tl, env, tl) DEF_HELPER_1(mfc0_lladdr, tl, env) +DEF_HELPER_1(mfc0_maar, tl, env) +DEF_HELPER_1(mfhc0_maar, tl, env) DEF_HELPER_2(mfc0_watchlo, tl, env, i32) DEF_HELPER_2(mfc0_watchhi, tl, env, i32) +DEF_HELPER_2(mfhc0_watchhi, tl, env, i32) DEF_HELPER_1(mfc0_debug, tl, env) DEF_HELPER_1(mftc0_debug, tl, env) #ifdef TARGET_MIPS64 @@ -87,7 +85,10 @@ DEF_HELPER_1(dmfc0_tccontext, tl, env) DEF_HELPER_1(dmfc0_tcschedule, tl, env) DEF_HELPER_1(dmfc0_tcschefback, tl, env) DEF_HELPER_1(dmfc0_lladdr, tl, env) +DEF_HELPER_1(dmfc0_maar, tl, env) DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) +DEF_HELPER_2(dmfc0_watchhi, tl, env, i32) +DEF_HELPER_1(dmfc0_saar, tl, env) #endif /* TARGET_MIPS64 */ DEF_HELPER_2(mtc0_index, void, env, tl) @@ -116,8 +117,14 @@ DEF_HELPER_2(mtc0_tcschefback, void, env, tl) DEF_HELPER_2(mttc0_tcschefback, void, env, tl) DEF_HELPER_2(mtc0_entrylo1, void, env, tl) DEF_HELPER_2(mtc0_context, void, env, tl) +DEF_HELPER_2(mtc0_memorymapid, void, env, tl) DEF_HELPER_2(mtc0_pagemask, void, env, tl) DEF_HELPER_2(mtc0_pagegrain, void, env, tl) +DEF_HELPER_2(mtc0_segctl0, void, env, tl) +DEF_HELPER_2(mtc0_segctl1, void, env, tl) +DEF_HELPER_2(mtc0_segctl2, void, env, tl) +DEF_HELPER_2(mtc0_pwfield, void, env, tl) +DEF_HELPER_2(mtc0_pwsize, void, env, tl) DEF_HELPER_2(mtc0_wired, void, env, tl) DEF_HELPER_2(mtc0_srsconf0, void, env, tl) DEF_HELPER_2(mtc0_srsconf1, void, env, tl) @@ -125,7 +132,11 @@ DEF_HELPER_2(mtc0_srsconf2, void, env, tl) DEF_HELPER_2(mtc0_srsconf3, void, env, tl) DEF_HELPER_2(mtc0_srsconf4, void, env, tl) DEF_HELPER_2(mtc0_hwrena, void, env, tl) +DEF_HELPER_2(mtc0_pwctl, void, env, tl) DEF_HELPER_2(mtc0_count, void, env, tl) +DEF_HELPER_2(mtc0_saari, void, env, tl) +DEF_HELPER_2(mtc0_saar, void, env, tl) +DEF_HELPER_2(mthc0_saar, void, env, tl) DEF_HELPER_2(mtc0_entryhi, void, env, tl) DEF_HELPER_2(mttc0_entryhi, void, env, tl) DEF_HELPER_2(mtc0_compare, void, env, tl) @@ -139,16 +150,22 @@ DEF_HELPER_2(mtc0_ebase, void, env, tl) DEF_HELPER_2(mttc0_ebase, void, env, tl) DEF_HELPER_2(mtc0_config0, void, env, tl) DEF_HELPER_2(mtc0_config2, void, env, tl) +DEF_HELPER_2(mtc0_config3, void, env, tl) DEF_HELPER_2(mtc0_config4, void, env, tl) DEF_HELPER_2(mtc0_config5, void, env, tl) DEF_HELPER_2(mtc0_lladdr, void, env, tl) +DEF_HELPER_2(mtc0_maar, void, env, tl) +DEF_HELPER_2(mthc0_maar, void, env, tl) +DEF_HELPER_2(mtc0_maari, void, env, tl) DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32) DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32) +DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32) DEF_HELPER_2(mtc0_xcontext, void, env, tl) DEF_HELPER_2(mtc0_framemask, void, env, tl) DEF_HELPER_2(mtc0_debug, void, env, tl) DEF_HELPER_2(mttc0_debug, void, env, tl) DEF_HELPER_2(mtc0_performance0, void, env, tl) +DEF_HELPER_2(mtc0_errctl, void, env, tl) DEF_HELPER_2(mtc0_taglo, void, env, tl) DEF_HELPER_2(mtc0_datalo, void, env, tl) DEF_HELPER_2(mtc0_taghi, void, env, tl) @@ -174,7 +191,10 @@ DEF_HELPER_0(dmt, tl) DEF_HELPER_0(emt, tl) DEF_HELPER_1(dvpe, tl, env) DEF_HELPER_1(evpe, tl, env) -#endif /* !CONFIG_USER_ONLY */ + +/* R6 Multi-threading */ +DEF_HELPER_1(dvp, tl, env) +DEF_HELPER_1(evp, tl, env) /* microMIPS functions */ DEF_HELPER_4(lwm, void, env, tl, tl, i32) @@ -194,8 +214,6 @@ DEF_HELPER_4(ctc1, void, env, tl, i32, i32) DEF_HELPER_2(float_cvtd_s, i64, env, i32) DEF_HELPER_2(float_cvtd_w, i64, env, i32) DEF_HELPER_2(float_cvtd_l, i64, env, i64) -DEF_HELPER_2(float_cvtl_d, i64, env, i64) -DEF_HELPER_2(float_cvtl_s, i64, env, i32) DEF_HELPER_2(float_cvtps_pw, i64, env, i64) DEF_HELPER_2(float_cvtpw_ps, i64, env, i64) DEF_HELPER_2(float_cvts_d, i32, env, i64) @@ -203,14 +221,12 @@ DEF_HELPER_2(float_cvts_w, i32, env, i32) DEF_HELPER_2(float_cvts_l, i32, env, i64) DEF_HELPER_2(float_cvts_pl, i32, env, i32) DEF_HELPER_2(float_cvts_pu, i32, env, i32) -DEF_HELPER_2(float_cvtw_s, i32, env, i32) -DEF_HELPER_2(float_cvtw_d, i32, env, i64) DEF_HELPER_3(float_addr_ps, i64, env, i64, i64) DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64) -DEF_HELPER_FLAGS_1(float_class_s, TCG_CALL_NO_RWG_SE, i32, i32) -DEF_HELPER_FLAGS_1(float_class_d, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32) +DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64) #define FOP_PROTO(op) \ DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \ @@ -229,14 +245,20 @@ FOP_PROTO(mina) #undef FOP_PROTO #define FOP_PROTO(op) \ -DEF_HELPER_2(float_ ## op ## l_s, i64, env, i32) \ -DEF_HELPER_2(float_ ## op ## l_d, i64, env, i64) \ -DEF_HELPER_2(float_ ## op ## w_s, i32, env, i32) \ -DEF_HELPER_2(float_ ## op ## w_d, i32, env, i64) +DEF_HELPER_2(float_ ## op ## _l_s, i64, env, i32) \ +DEF_HELPER_2(float_ ## op ## _l_d, i64, env, i64) \ +DEF_HELPER_2(float_ ## op ## _w_s, i32, env, i32) \ +DEF_HELPER_2(float_ ## op ## _w_d, i32, env, i64) +FOP_PROTO(cvt) FOP_PROTO(round) FOP_PROTO(trunc) FOP_PROTO(ceil) FOP_PROTO(floor) +FOP_PROTO(cvt_2008) +FOP_PROTO(round_2008) +FOP_PROTO(trunc_2008) +FOP_PROTO(ceil_2008) +FOP_PROTO(floor_2008) #undef FOP_PROTO #define FOP_PROTO(op) \ @@ -339,7 +361,6 @@ FOP_PROTO(sne) #undef FOP_PROTO /* Special functions */ -#ifndef CONFIG_USER_ONLY DEF_HELPER_1(tlbwi, void, env) DEF_HELPER_1(tlbwr, void, env) DEF_HELPER_1(tlbp, void, env) @@ -349,12 +370,15 @@ DEF_HELPER_1(tlbinvf, void, env) DEF_HELPER_1(di, tl, env) DEF_HELPER_1(ei, tl, env) DEF_HELPER_1(eret, void, env) +DEF_HELPER_1(eretnc, void, env) DEF_HELPER_1(deret, void, env) -#endif /* !CONFIG_USER_ONLY */ +DEF_HELPER_3(ginvt, void, env, tl, i32) DEF_HELPER_1(rdhwr_cpunum, tl, env) DEF_HELPER_1(rdhwr_synci_step, tl, env) DEF_HELPER_1(rdhwr_cc, tl, env) DEF_HELPER_1(rdhwr_ccres, tl, env) +DEF_HELPER_1(rdhwr_performance, tl, env) +DEF_HELPER_1(rdhwr_xnp, tl, env) DEF_HELPER_2(pmon, void, env, int) DEF_HELPER_1(wait, void, env) @@ -752,6 +776,250 @@ DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env) DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env) /* MIPS SIMD Architecture */ + +DEF_HELPER_3(msa_nloc_b, void, env, i32, i32) +DEF_HELPER_3(msa_nloc_h, void, env, i32, i32) +DEF_HELPER_3(msa_nloc_w, void, env, i32, i32) +DEF_HELPER_3(msa_nloc_d, void, env, i32, i32) + +DEF_HELPER_3(msa_nlzc_b, void, env, i32, i32) +DEF_HELPER_3(msa_nlzc_h, void, env, i32, i32) +DEF_HELPER_3(msa_nlzc_w, void, env, i32, i32) +DEF_HELPER_3(msa_nlzc_d, void, env, i32, i32) + +DEF_HELPER_3(msa_pcnt_b, void, env, i32, i32) +DEF_HELPER_3(msa_pcnt_h, void, env, i32, i32) +DEF_HELPER_3(msa_pcnt_w, void, env, i32, i32) +DEF_HELPER_3(msa_pcnt_d, void, env, i32, i32) + +DEF_HELPER_4(msa_binsl_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_binsl_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_binsl_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_binsl_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_binsr_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_binsr_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_binsr_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_binsr_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_bmnz_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bmz_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bsel_v, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_bclr_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bclr_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bclr_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bclr_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_bneg_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bneg_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bneg_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bneg_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_bset_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bset_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bset_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bset_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_add_a_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_add_a_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_add_a_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_add_a_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_adds_a_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_a_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_a_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_a_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_adds_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_adds_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_adds_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_addv_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_addv_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_addv_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_addv_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_hadd_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hadd_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hadd_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_hadd_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hadd_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hadd_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_ave_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ave_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ave_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ave_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_ave_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ave_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ave_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ave_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_aver_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_aver_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_aver_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_aver_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_aver_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_aver_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_aver_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_aver_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_ceq_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ceq_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ceq_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ceq_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_cle_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_cle_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_cle_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_cle_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_cle_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_cle_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_cle_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_cle_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_clt_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_clt_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_clt_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_clt_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_clt_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_clt_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_clt_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_clt_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_div_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_div_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_div_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_div_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_div_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_div_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_div_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_div_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_max_a_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_a_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_a_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_a_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_s_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_max_u_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_a_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_a_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_a_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_a_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_s_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_min_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_mod_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_mod_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_mod_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_mod_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_mod_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_mod_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_mod_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_mod_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_asub_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_asub_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_asub_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_asub_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_asub_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_asub_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_asub_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_asub_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_hsub_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hsub_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hsub_s_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_hsub_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hsub_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_hsub_u_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_ilvev_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvev_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvev_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvev_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvod_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvod_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvod_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvod_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvl_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvl_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvl_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvl_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvr_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvr_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvr_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ilvr_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_and_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_nor_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_or_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_xor_v, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_pckev_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pckev_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pckev_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pckev_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pckod_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pckod_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pckod_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pckod_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_sll_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_sll_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_sll_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_sll_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_sra_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_sra_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_sra_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_sra_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_srar_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srar_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srar_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srar_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_srl_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srl_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srl_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srl_d, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_srlr_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srlr_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srlr_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_srlr_d, void, env, i32, i32, i32) + +DEF_HELPER_3(msa_move_v, void, env, i32, i32) + DEF_HELPER_4(msa_andi_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_ori_b, void, env, i32, i32, i32) DEF_HELPER_4(msa_nori_b, void, env, i32, i32, i32) @@ -787,48 +1055,16 @@ DEF_HELPER_5(msa_sat_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_srari_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_srlri_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_sll_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_sra_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_srl_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_bclr_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_bset_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_bneg_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_binsl_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_binsr_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_addv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subv_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_max_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_max_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_min_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_min_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_max_a_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_min_a_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ceq_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_clt_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_clt_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_cle_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_cle_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_add_a_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_adds_a_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_adds_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_adds_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ave_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ave_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_aver_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_aver_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subs_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subs_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subsus_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_subsuu_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_asub_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_asub_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_mulv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_maddv_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_msubv_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_div_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_div_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_mod_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_mod_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dotp_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dotp_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dpadd_s_df, void, env, i32, i32, i32, i32) @@ -837,29 +1073,14 @@ DEF_HELPER_5(msa_dpsub_s_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_dpsub_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_sld_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_splat_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_pckev_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_pckod_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ilvl_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ilvr_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ilvev_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ilvod_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_vshf_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_srar_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_srlr_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_hadd_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_hadd_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_hsub_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_hsub_u_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_sldi_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_splati_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_copy_s_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_copy_u_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_insert_df, void, env, i32, i32, i32, i32) + DEF_HELPER_5(msa_insve_df, void, env, i32, i32, i32, i32) DEF_HELPER_3(msa_ctcmsa, void, env, tl, i32) DEF_HELPER_2(msa_cfcmsa, tl, env, i32) -DEF_HELPER_3(msa_move_v, void, env, i32, i32) DEF_HELPER_5(msa_fcaf_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_fcun_df, void, env, i32, i32, i32, i32) @@ -903,17 +1124,19 @@ DEF_HELPER_5(msa_mulr_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_maddr_q_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_msubr_q_df, void, env, i32, i32, i32, i32) -DEF_HELPER_4(msa_and_v, void, env, i32, i32, i32) -DEF_HELPER_4(msa_or_v, void, env, i32, i32, i32) -DEF_HELPER_4(msa_nor_v, void, env, i32, i32, i32) -DEF_HELPER_4(msa_xor_v, void, env, i32, i32, i32) -DEF_HELPER_4(msa_bmnz_v, void, env, i32, i32, i32) -DEF_HELPER_4(msa_bmz_v, void, env, i32, i32, i32) -DEF_HELPER_4(msa_bsel_v, void, env, i32, i32, i32) DEF_HELPER_4(msa_fill_df, void, env, i32, i32, i32) -DEF_HELPER_4(msa_pcnt_df, void, env, i32, i32, i32) -DEF_HELPER_4(msa_nloc_df, void, env, i32, i32, i32) -DEF_HELPER_4(msa_nlzc_df, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_copy_s_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_copy_s_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_copy_s_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_copy_s_d, void, env, i32, i32, i32) +DEF_HELPER_4(msa_copy_u_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_copy_u_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_copy_u_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_insert_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_insert_h, void, env, i32, i32, i32) +DEF_HELPER_4(msa_insert_w, void, env, i32, i32, i32) +DEF_HELPER_4(msa_insert_d, void, env, i32, i32, i32) DEF_HELPER_4(msa_fclass_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ftrunc_s_df, void, env, i32, i32, i32) @@ -932,5 +1155,13 @@ DEF_HELPER_4(msa_ftint_u_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ffint_s_df, void, env, i32, i32, i32) DEF_HELPER_4(msa_ffint_u_df, void, env, i32, i32, i32) -DEF_HELPER_5(msa_ld_df, void, env, i32, i32, i32, s32) -DEF_HELPER_5(msa_st_df, void, env, i32, i32, i32, s32) +#define MSALDST_PROTO(type) \ +DEF_HELPER_3(msa_ld_ ## type, void, env, i32, tl) \ +DEF_HELPER_3(msa_st_ ## type, void, env, i32, tl) +MSALDST_PROTO(b) +MSALDST_PROTO(h) +MSALDST_PROTO(w) +MSALDST_PROTO(d) +#undef MSALDST_PROTO + +DEF_HELPER_3(cache, void, env, tl, i32) diff --git a/qemu/target/mips/internal.h b/qemu/target/mips/internal.h new file mode 100644 index 00000000..6978801d --- /dev/null +++ b/qemu/target/mips/internal.h @@ -0,0 +1,451 @@ +/* + * MIPS internal definitions and helpers + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef MIPS_INTERNAL_H +#define MIPS_INTERNAL_H + +#include "fpu/softfloat-helpers.h" +#include "cpu.h" + +struct uc_struct; + +/* + * MMU types, the first four entries have the same layout as the + * CP0C0_MT field. + */ +enum mips_mmu_types { + MMU_TYPE_NONE, + MMU_TYPE_R4000, + MMU_TYPE_RESERVED, + MMU_TYPE_FMT, + MMU_TYPE_R3000, + MMU_TYPE_R6000, + MMU_TYPE_R8000 +}; + +struct mips_def_t { + const char *name; + int32_t CP0_PRid; + int32_t CP0_Config0; + int32_t CP0_Config1; + int32_t CP0_Config2; + int32_t CP0_Config3; + int32_t CP0_Config4; + int32_t CP0_Config4_rw_bitmask; + int32_t CP0_Config5; + int32_t CP0_Config5_rw_bitmask; + int32_t CP0_Config6; + int32_t CP0_Config7; + target_ulong CP0_LLAddr_rw_bitmask; + int CP0_LLAddr_shift; + int32_t SYNCI_Step; + int32_t CCRes; + int32_t CP0_Status_rw_bitmask; + int32_t CP0_TCStatus_rw_bitmask; + int32_t CP0_SRSCtl; + int32_t CP1_fcr0; + int32_t CP1_fcr31_rw_bitmask; + int32_t CP1_fcr31; + int32_t MSAIR; + int32_t SEGBITS; + int32_t PABITS; + int32_t CP0_SRSConf0_rw_bitmask; + int32_t CP0_SRSConf0; + int32_t CP0_SRSConf1_rw_bitmask; + int32_t CP0_SRSConf1; + int32_t CP0_SRSConf2_rw_bitmask; + int32_t CP0_SRSConf2; + int32_t CP0_SRSConf3_rw_bitmask; + int32_t CP0_SRSConf3; + int32_t CP0_SRSConf4_rw_bitmask; + int32_t CP0_SRSConf4; + int32_t CP0_PageGrain_rw_bitmask; + int32_t CP0_PageGrain; + target_ulong CP0_EBaseWG_rw_bitmask; + uint64_t insn_flags; + enum mips_mmu_types mmu_type; + int32_t SAARP; +}; + +extern const struct mips_def_t mips_defs[]; +extern const int mips_defs_number; + +enum CPUMIPSMSADataFormat { + DF_BYTE = 0, + DF_HALF, + DF_WORD, + DF_DOUBLE +}; + +void mips_cpu_do_interrupt(CPUState *cpu); +bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); +hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + +typedef struct r4k_tlb_t r4k_tlb_t; +struct r4k_tlb_t { + target_ulong VPN; + uint32_t PageMask; + uint16_t ASID; + uint32_t MMID; + unsigned int G:1; + unsigned int C0:3; + unsigned int C1:3; + unsigned int V0:1; + unsigned int V1:1; + unsigned int D0:1; + unsigned int D1:1; + unsigned int XI0:1; + unsigned int XI1:1; + unsigned int RI0:1; + unsigned int RI1:1; + unsigned int EHINV:1; + uint64_t PFN[2]; +}; + +struct CPUMIPSTLBContext { + uint32_t nb_tlb; + uint32_t tlb_in_use; + int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); + void (*helper_tlbwi)(struct CPUMIPSState *env); + void (*helper_tlbwr)(struct CPUMIPSState *env); + void (*helper_tlbp)(struct CPUMIPSState *env); + void (*helper_tlbr)(struct CPUMIPSState *env); + void (*helper_tlbinv)(struct CPUMIPSState *env); + void (*helper_tlbinvf)(struct CPUMIPSState *env); + union { + struct { + r4k_tlb_t tlb[MIPS_TLB_MAX]; + } r4k; + } mmu; +}; + +int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +void r4k_helper_tlbwi(CPUMIPSState *env); +void r4k_helper_tlbwr(CPUMIPSState *env); +void r4k_helper_tlbp(CPUMIPSState *env); +void r4k_helper_tlbr(CPUMIPSState *env); +void r4k_helper_tlbinv(CPUMIPSState *env); +void r4k_helper_tlbinvf(CPUMIPSState *env); +void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra); + +void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); +hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, + int rw); + +#define cpu_signal_handler cpu_mips_signal_handler + +static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) +{ + return (env->CP0_Status & (1 << CP0St_IE)) && + !(env->CP0_Status & (1 << CP0St_EXL)) && + !(env->CP0_Status & (1 << CP0St_ERL)) && + !(env->hflags & MIPS_HFLAG_DM) && + /* + * Note that the TCStatus IXMT field is initialized to zero, + * and only MT capable cores can set it to one. So we don't + * need to check for MT capabilities here. + */ + !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)); +} + +/* Check if there is pending and not masked out interrupt */ +static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env) +{ + int32_t pending; + int32_t status; + bool r; + + pending = env->CP0_Cause & CP0Ca_IP_mask; + status = env->CP0_Status & CP0Ca_IP_mask; + + if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { + /* + * A MIPS configured with a vectorizing external interrupt controller + * will feed a vector into the Cause pending lines. The core treats + * the status lines as a vector level, not as indiviual masks. + */ + r = pending > status; + } else { + /* + * A MIPS configured with compatibility or VInt (Vectored Interrupts) + * treats the pending lines as individual interrupt lines, the status + * lines are individual masks. + */ + r = (pending & status) != 0; + } + return r; +} + +void mips_tcg_init(struct uc_struct *uc); + +/* TODO QOM'ify CPU reset and remove */ +void cpu_state_reset(CPUMIPSState *s); +void cpu_mips_realize_env(CPUMIPSState *env); + +/* cp0_timer.c */ +uint32_t cpu_mips_get_random(CPUMIPSState *env); +uint32_t cpu_mips_get_count(CPUMIPSState *env); +void cpu_mips_store_count(CPUMIPSState *env, uint32_t value); +void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value); +void cpu_mips_start_count(CPUMIPSState *env); +void cpu_mips_stop_count(CPUMIPSState *env); + +/* helper.c */ +bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + +/* op_helper.c */ +uint32_t float_class_s(uint32_t arg, float_status *fst); +uint64_t float_class_d(uint64_t arg, float_status *fst); + +extern unsigned int ieee_rm[]; +int ieee_ex_to_mips(int xcpt); +void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); + +static inline void restore_rounding_mode(CPUMIPSState *env) +{ + set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], + &env->active_fpu.fp_status); +} + +static inline void restore_flush_mode(CPUMIPSState *env) +{ + set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0, + &env->active_fpu.fp_status); +} + +static inline void restore_snan_bit_mode(CPUMIPSState *env) +{ + set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0, + &env->active_fpu.fp_status); +} + +static inline void restore_fp_status(CPUMIPSState *env) +{ + restore_rounding_mode(env); + restore_flush_mode(env); + restore_snan_bit_mode(env); +} + +static inline void restore_msa_fp_status(CPUMIPSState *env) +{ + float_status *status = &env->active_tc.msa_fp_status; + int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM; + bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0; + + set_float_rounding_mode(ieee_rm[rounding_mode], status); + set_flush_to_zero(flush_to_zero, status); + set_flush_inputs_to_zero(flush_to_zero, status); +} + +static inline void restore_pamask(CPUMIPSState *env) +{ + if (env->hflags & MIPS_HFLAG_ELPA) { + env->PAMask = (1ULL << env->PABITS) - 1; + } else { + env->PAMask = PAMASK_BASE; + } +} + +static inline int mips_vpe_active(CPUMIPSState *env) +{ + int active = 1; + + /* Check that the VPE is enabled. */ + if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) { + active = 0; + } + /* Check that the VPE is activated. */ + if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) { + active = 0; + } + + /* + * Now verify that there are active thread contexts in the VPE. + * + * This assumes the CPU model will internally reschedule threads + * if the active one goes to sleep. If there are no threads available + * the active one will be in a sleeping state, and we can turn off + * the entire VPE. + */ + if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) { + /* TC is not activated. */ + active = 0; + } + if (env->active_tc.CP0_TCHalt & 1) { + /* TC is in halt state. */ + active = 0; + } + + return active; +} + +static inline int mips_vp_active(CPUMIPSState *env, CPUState *cpu) +{ + /* Check if the VP disabled other VPs (which means the VP is enabled) */ + if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { + return 1; + } + + /* Check if the virtual processor is disabled due to a DVP */ + MIPSCPU *cs = MIPS_CPU(cpu); + if ((&cs->env != env) && + ((cs->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) { + return 0; + } + + return 1; +} + +static inline void compute_hflags(CPUMIPSState *env) +{ + env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | + MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | + MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | + MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | + MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL); + if (env->CP0_Status & (1 << CP0St_ERL)) { + env->hflags |= MIPS_HFLAG_ERL; + } + if (!(env->CP0_Status & (1 << CP0St_EXL)) && + !(env->CP0_Status & (1 << CP0St_ERL)) && + !(env->hflags & MIPS_HFLAG_DM)) { + env->hflags |= (env->CP0_Status >> CP0St_KSU) & + MIPS_HFLAG_KSU; + } +#if defined(TARGET_MIPS64) + if ((env->insn_flags & ISA_MIPS3) && + (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) || + (env->CP0_Status & (1 << CP0St_PX)) || + (env->CP0_Status & (1 << CP0St_UX)))) { + env->hflags |= MIPS_HFLAG_64; + } + + if (!(env->insn_flags & ISA_MIPS3)) { + env->hflags |= MIPS_HFLAG_AWRAP; + } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) && + !(env->CP0_Status & (1 << CP0St_UX))) { + env->hflags |= MIPS_HFLAG_AWRAP; + } else if (env->insn_flags & ISA_MIPS64R6) { + /* Address wrapping for Supervisor and Kernel is specified in R6 */ + if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) && + !(env->CP0_Status & (1 << CP0St_SX))) || + (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) && + !(env->CP0_Status & (1 << CP0St_KX)))) { + env->hflags |= MIPS_HFLAG_AWRAP; + } + } +#endif + if (((env->CP0_Status & (1 << CP0St_CU0)) && + !(env->insn_flags & ISA_MIPS32R6)) || + !(env->hflags & MIPS_HFLAG_KSU)) { + env->hflags |= MIPS_HFLAG_CP0; + } + if (env->CP0_Status & (1 << CP0St_CU1)) { + env->hflags |= MIPS_HFLAG_FPU; + } + if (env->CP0_Status & (1 << CP0St_FR)) { + env->hflags |= MIPS_HFLAG_F64; + } + if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) && + (env->CP0_Config5 & (1 << CP0C5_SBRI))) { + env->hflags |= MIPS_HFLAG_SBRI; + } + if (env->insn_flags & ASE_DSP_R3) { + /* + * Our cpu supports DSP R3 ASE, so enable + * access to DSP R3 resources. + */ + if (env->CP0_Status & (1 << CP0St_MX)) { + env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | + MIPS_HFLAG_DSP_R3; + } + } else if (env->insn_flags & ASE_DSP_R2) { + /* + * Our cpu supports DSP R2 ASE, so enable + * access to DSP R2 resources. + */ + if (env->CP0_Status & (1 << CP0St_MX)) { + env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2; + } + + } else if (env->insn_flags & ASE_DSP) { + /* + * Our cpu supports DSP ASE, so enable + * access to DSP resources. + */ + if (env->CP0_Status & (1 << CP0St_MX)) { + env->hflags |= MIPS_HFLAG_DSP; + } + + } + if (env->insn_flags & ISA_MIPS32R2) { + if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { + env->hflags |= MIPS_HFLAG_COP1X; + } + } else if (env->insn_flags & ISA_MIPS32) { + if (env->hflags & MIPS_HFLAG_64) { + env->hflags |= MIPS_HFLAG_COP1X; + } + } else if (env->insn_flags & ISA_MIPS4) { + /* + * All supported MIPS IV CPUs use the XX (CU3) to enable + * and disable the MIPS IV extensions to the MIPS III ISA. + * Some other MIPS IV CPUs ignore the bit, so the check here + * would be too restrictive for them. + */ + if (env->CP0_Status & (1U << CP0St_CU3)) { + env->hflags |= MIPS_HFLAG_COP1X; + } + } + if (env->insn_flags & ASE_MSA) { + if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) { + env->hflags |= MIPS_HFLAG_MSA; + } + } + if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { + if (env->CP0_Config5 & (1 << CP0C5_FRE)) { + env->hflags |= MIPS_HFLAG_FRE; + } + } + if (env->CP0_Config3 & (1 << CP0C3_LPA)) { + if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) { + env->hflags |= MIPS_HFLAG_ELPA; + } + } +} + +void cpu_mips_tlb_flush(CPUMIPSState *env); +void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc); +void cpu_mips_store_status(CPUMIPSState *env, target_ulong val); +void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); + +void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception, + int error_code, uintptr_t pc); + +static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, + uint32_t exception, + uintptr_t pc) +{ + do_raise_exception_err(env, exception, 0, pc); +} + +#endif diff --git a/qemu/target-mips/lmi_helper.c b/qemu/target/mips/lmi_helper.c similarity index 98% rename from qemu/target-mips/lmi_helper.c rename to qemu/target/mips/lmi_helper.c index bbfcd59c..6c645cf6 100644 --- a/qemu/target-mips/lmi_helper.c +++ b/qemu/target/mips/lmi_helper.c @@ -17,12 +17,15 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -/* If the byte ordering doesn't matter, i.e. all columns are treated - identically, then this union can be used directly. If byte ordering - does matter, we generally ignore dumping to memory. */ +/* + * If the byte ordering doesn't matter, i.e. all columns are treated + * identically, then this union can be used directly. If byte ordering + * does matter, we generally ignore dumping to memory. + */ typedef union { uint8_t ub[8]; int8_t sb[8]; diff --git a/qemu/target/mips/mips-defs.h b/qemu/target/mips/mips-defs.h new file mode 100644 index 00000000..a831bb43 --- /dev/null +++ b/qemu/target/mips/mips-defs.h @@ -0,0 +1,105 @@ +#ifndef QEMU_MIPS_DEFS_H +#define QEMU_MIPS_DEFS_H + +/* + * If we want to use host float regs... + * + * #define USE_HOST_FLOAT_REGS + */ + +/* Real pages are variable size... */ +#define MIPS_TLB_MAX 128 + +/* + * bit definitions for insn_flags (ISAs/ASEs flags) + * ------------------------------------------------ + */ +/* + * bits 0-31: MIPS base instruction sets + */ +#define ISA_MIPS1 0x0000000000000001ULL +#define ISA_MIPS2 0x0000000000000002ULL +#define ISA_MIPS3 0x0000000000000004ULL +#define ISA_MIPS4 0x0000000000000008ULL +#define ISA_MIPS5 0x0000000000000010ULL +#define ISA_MIPS32 0x0000000000000020ULL +#define ISA_MIPS32R2 0x0000000000000040ULL +#define ISA_MIPS64 0x0000000000000080ULL +#define ISA_MIPS64R2 0x0000000000000100ULL +#define ISA_MIPS32R3 0x0000000000000200ULL +#define ISA_MIPS64R3 0x0000000000000400ULL +#define ISA_MIPS32R5 0x0000000000000800ULL +#define ISA_MIPS64R5 0x0000000000001000ULL +#define ISA_MIPS32R6 0x0000000000002000ULL +#define ISA_MIPS64R6 0x0000000000004000ULL +#define ISA_NANOMIPS32 0x0000000000008000ULL +/* + * bits 32-47: MIPS ASEs + */ +#define ASE_MIPS16 0x0000000100000000ULL +#define ASE_MIPS3D 0x0000000200000000ULL +#define ASE_MDMX 0x0000000400000000ULL +#define ASE_DSP 0x0000000800000000ULL +#define ASE_DSP_R2 0x0000001000000000ULL +#define ASE_DSP_R3 0x0000002000000000ULL +#define ASE_MT 0x0000004000000000ULL +#define ASE_SMARTMIPS 0x0000008000000000ULL +#define ASE_MICROMIPS 0x0000010000000000ULL +#define ASE_MSA 0x0000020000000000ULL +/* + * bits 48-55: vendor-specific base instruction sets + */ +#define INSN_LOONGSON2E 0x0001000000000000ULL +#define INSN_LOONGSON2F 0x0002000000000000ULL +#define INSN_VR54XX 0x0004000000000000ULL +#define INSN_R5900 0x0008000000000000ULL +/* + * bits 56-63: vendor-specific ASEs + */ +#define ASE_MMI 0x0100000000000000ULL +#define ASE_MXU 0x0200000000000000ULL + +/* MIPS CPU defines. */ +#define CPU_MIPS1 (ISA_MIPS1) +#define CPU_MIPS2 (CPU_MIPS1 | ISA_MIPS2) +#define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3) +#define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4) +#define CPU_VR54XX (CPU_MIPS4 | INSN_VR54XX) +#define CPU_R5900 (CPU_MIPS3 | INSN_R5900) +#define CPU_LOONGSON2E (CPU_MIPS3 | INSN_LOONGSON2E) +#define CPU_LOONGSON2F (CPU_MIPS3 | INSN_LOONGSON2F) + +#define CPU_MIPS5 (CPU_MIPS4 | ISA_MIPS5) + +/* MIPS Technologies "Release 1" */ +#define CPU_MIPS32 (CPU_MIPS2 | ISA_MIPS32) +#define CPU_MIPS64 (CPU_MIPS5 | CPU_MIPS32 | ISA_MIPS64) + +/* MIPS Technologies "Release 2" */ +#define CPU_MIPS32R2 (CPU_MIPS32 | ISA_MIPS32R2) +#define CPU_MIPS64R2 (CPU_MIPS64 | CPU_MIPS32R2 | ISA_MIPS64R2) + +/* MIPS Technologies "Release 3" */ +#define CPU_MIPS32R3 (CPU_MIPS32R2 | ISA_MIPS32R3) +#define CPU_MIPS64R3 (CPU_MIPS64R2 | CPU_MIPS32R3 | ISA_MIPS64R3) + +/* MIPS Technologies "Release 5" */ +#define CPU_MIPS32R5 (CPU_MIPS32R3 | ISA_MIPS32R5) +#define CPU_MIPS64R5 (CPU_MIPS64R3 | CPU_MIPS32R5 | ISA_MIPS64R5) + +/* MIPS Technologies "Release 6" */ +#define CPU_MIPS32R6 (CPU_MIPS32R5 | ISA_MIPS32R6) +#define CPU_MIPS64R6 (CPU_MIPS64R5 | CPU_MIPS32R6 | ISA_MIPS64R6) + +/* Wave Computing: "nanoMIPS" */ +#define CPU_NANOMIPS32 (CPU_MIPS32R6 | ISA_NANOMIPS32) + +/* + * Strictly follow the architecture standard: + * - Disallow "special" instruction handling for PMON/SPIM. + * Note that we still maintain Count/Compare to match the host clock. + * + * #define MIPS_STRICT_STANDARD 1 + */ + +#endif /* QEMU_MIPS_DEFS_H */ diff --git a/qemu/target/mips/msa_helper.c b/qemu/target/mips/msa_helper.c new file mode 100644 index 00000000..c3501927 --- /dev/null +++ b/qemu/target/mips/msa_helper.c @@ -0,0 +1,7419 @@ +/* + * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU. + * + * Copyright (c) 2014 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +/* Data format min and max values */ +#define DF_BITS(df) (1ULL << ((df) + 3)) + +#define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1) +#define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1) + +#ifdef _MSC_VER +#define DF_MIN_INT(df) (int64_t)(0 - (1LL << (DF_BITS(df) - 1))) +#define M_MIN_INT(m) (int64_t)(0 - (1LL << ((m) - 1))) + +#define DF_MAX_UINT(df) (uint64_t)(0xffffffffffffffffULL >> (64 - DF_BITS(df))) +#define M_MAX_UINT(m) (uint64_t)(0xffffffffffffffffULL >> (64 - (m))) +#else +#define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1))) +#define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1))) + +#define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df))) +#define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m))) +#endif + +#define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df)) +#define SIGNED(x, df) \ + ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df))) + +/* Element-by-element access macros */ +#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) + + + +/* + * Bit Count + * --------- + * + * +---------------+----------------------------------------------------------+ + * | NLOC.B | Vector Leading Ones Count (byte) | + * | NLOC.H | Vector Leading Ones Count (halfword) | + * | NLOC.W | Vector Leading Ones Count (word) | + * | NLOC.D | Vector Leading Ones Count (doubleword) | + * | NLZC.B | Vector Leading Zeros Count (byte) | + * | NLZC.H | Vector Leading Zeros Count (halfword) | + * | NLZC.W | Vector Leading Zeros Count (word) | + * | NLZC.D | Vector Leading Zeros Count (doubleword) | + * | PCNT.B | Vector Population Count (byte) | + * | PCNT.H | Vector Population Count (halfword) | + * | PCNT.W | Vector Population Count (word) | + * | PCNT.D | Vector Population Count (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg) +{ + uint64_t x, y; + int n, c; + + x = UNSIGNED(arg, df); + n = DF_BITS(df); + c = DF_BITS(df) / 2; + + do { + y = x >> c; + if (y != 0) { + n = n - c; + x = y; + } + c = c >> 1; + } while (c != 0); + + return n - x; +} + +static inline int64_t msa_nloc_df(uint32_t df, int64_t arg) +{ + return msa_nlzc_df(df, UNSIGNED((~arg), df)); +} + +void helper_msa_nloc_b(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->b[0] = msa_nloc_df(DF_BYTE, pws->b[0]); + pwd->b[1] = msa_nloc_df(DF_BYTE, pws->b[1]); + pwd->b[2] = msa_nloc_df(DF_BYTE, pws->b[2]); + pwd->b[3] = msa_nloc_df(DF_BYTE, pws->b[3]); + pwd->b[4] = msa_nloc_df(DF_BYTE, pws->b[4]); + pwd->b[5] = msa_nloc_df(DF_BYTE, pws->b[5]); + pwd->b[6] = msa_nloc_df(DF_BYTE, pws->b[6]); + pwd->b[7] = msa_nloc_df(DF_BYTE, pws->b[7]); + pwd->b[8] = msa_nloc_df(DF_BYTE, pws->b[8]); + pwd->b[9] = msa_nloc_df(DF_BYTE, pws->b[9]); + pwd->b[10] = msa_nloc_df(DF_BYTE, pws->b[10]); + pwd->b[11] = msa_nloc_df(DF_BYTE, pws->b[11]); + pwd->b[12] = msa_nloc_df(DF_BYTE, pws->b[12]); + pwd->b[13] = msa_nloc_df(DF_BYTE, pws->b[13]); + pwd->b[14] = msa_nloc_df(DF_BYTE, pws->b[14]); + pwd->b[15] = msa_nloc_df(DF_BYTE, pws->b[15]); +} + +void helper_msa_nloc_h(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->h[0] = msa_nloc_df(DF_HALF, pws->h[0]); + pwd->h[1] = msa_nloc_df(DF_HALF, pws->h[1]); + pwd->h[2] = msa_nloc_df(DF_HALF, pws->h[2]); + pwd->h[3] = msa_nloc_df(DF_HALF, pws->h[3]); + pwd->h[4] = msa_nloc_df(DF_HALF, pws->h[4]); + pwd->h[5] = msa_nloc_df(DF_HALF, pws->h[5]); + pwd->h[6] = msa_nloc_df(DF_HALF, pws->h[6]); + pwd->h[7] = msa_nloc_df(DF_HALF, pws->h[7]); +} + +void helper_msa_nloc_w(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->w[0] = msa_nloc_df(DF_WORD, pws->w[0]); + pwd->w[1] = msa_nloc_df(DF_WORD, pws->w[1]); + pwd->w[2] = msa_nloc_df(DF_WORD, pws->w[2]); + pwd->w[3] = msa_nloc_df(DF_WORD, pws->w[3]); +} + +void helper_msa_nloc_d(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->d[0] = msa_nloc_df(DF_DOUBLE, pws->d[0]); + pwd->d[1] = msa_nloc_df(DF_DOUBLE, pws->d[1]); +} + +void helper_msa_nlzc_b(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->b[0] = msa_nlzc_df(DF_BYTE, pws->b[0]); + pwd->b[1] = msa_nlzc_df(DF_BYTE, pws->b[1]); + pwd->b[2] = msa_nlzc_df(DF_BYTE, pws->b[2]); + pwd->b[3] = msa_nlzc_df(DF_BYTE, pws->b[3]); + pwd->b[4] = msa_nlzc_df(DF_BYTE, pws->b[4]); + pwd->b[5] = msa_nlzc_df(DF_BYTE, pws->b[5]); + pwd->b[6] = msa_nlzc_df(DF_BYTE, pws->b[6]); + pwd->b[7] = msa_nlzc_df(DF_BYTE, pws->b[7]); + pwd->b[8] = msa_nlzc_df(DF_BYTE, pws->b[8]); + pwd->b[9] = msa_nlzc_df(DF_BYTE, pws->b[9]); + pwd->b[10] = msa_nlzc_df(DF_BYTE, pws->b[10]); + pwd->b[11] = msa_nlzc_df(DF_BYTE, pws->b[11]); + pwd->b[12] = msa_nlzc_df(DF_BYTE, pws->b[12]); + pwd->b[13] = msa_nlzc_df(DF_BYTE, pws->b[13]); + pwd->b[14] = msa_nlzc_df(DF_BYTE, pws->b[14]); + pwd->b[15] = msa_nlzc_df(DF_BYTE, pws->b[15]); +} + +void helper_msa_nlzc_h(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->h[0] = msa_nlzc_df(DF_HALF, pws->h[0]); + pwd->h[1] = msa_nlzc_df(DF_HALF, pws->h[1]); + pwd->h[2] = msa_nlzc_df(DF_HALF, pws->h[2]); + pwd->h[3] = msa_nlzc_df(DF_HALF, pws->h[3]); + pwd->h[4] = msa_nlzc_df(DF_HALF, pws->h[4]); + pwd->h[5] = msa_nlzc_df(DF_HALF, pws->h[5]); + pwd->h[6] = msa_nlzc_df(DF_HALF, pws->h[6]); + pwd->h[7] = msa_nlzc_df(DF_HALF, pws->h[7]); +} + +void helper_msa_nlzc_w(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->w[0] = msa_nlzc_df(DF_WORD, pws->w[0]); + pwd->w[1] = msa_nlzc_df(DF_WORD, pws->w[1]); + pwd->w[2] = msa_nlzc_df(DF_WORD, pws->w[2]); + pwd->w[3] = msa_nlzc_df(DF_WORD, pws->w[3]); +} + +void helper_msa_nlzc_d(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->d[0] = msa_nlzc_df(DF_DOUBLE, pws->d[0]); + pwd->d[1] = msa_nlzc_df(DF_DOUBLE, pws->d[1]); +} + +static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg) +{ + uint64_t x; + + x = UNSIGNED(arg, df); + + x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL); + x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); + x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL); + x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL); + x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL); + x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32)); + + return x; +} + +void helper_msa_pcnt_b(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->b[0] = msa_pcnt_df(DF_BYTE, pws->b[0]); + pwd->b[1] = msa_pcnt_df(DF_BYTE, pws->b[1]); + pwd->b[2] = msa_pcnt_df(DF_BYTE, pws->b[2]); + pwd->b[3] = msa_pcnt_df(DF_BYTE, pws->b[3]); + pwd->b[4] = msa_pcnt_df(DF_BYTE, pws->b[4]); + pwd->b[5] = msa_pcnt_df(DF_BYTE, pws->b[5]); + pwd->b[6] = msa_pcnt_df(DF_BYTE, pws->b[6]); + pwd->b[7] = msa_pcnt_df(DF_BYTE, pws->b[7]); + pwd->b[8] = msa_pcnt_df(DF_BYTE, pws->b[8]); + pwd->b[9] = msa_pcnt_df(DF_BYTE, pws->b[9]); + pwd->b[10] = msa_pcnt_df(DF_BYTE, pws->b[10]); + pwd->b[11] = msa_pcnt_df(DF_BYTE, pws->b[11]); + pwd->b[12] = msa_pcnt_df(DF_BYTE, pws->b[12]); + pwd->b[13] = msa_pcnt_df(DF_BYTE, pws->b[13]); + pwd->b[14] = msa_pcnt_df(DF_BYTE, pws->b[14]); + pwd->b[15] = msa_pcnt_df(DF_BYTE, pws->b[15]); +} + +void helper_msa_pcnt_h(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->h[0] = msa_pcnt_df(DF_HALF, pws->h[0]); + pwd->h[1] = msa_pcnt_df(DF_HALF, pws->h[1]); + pwd->h[2] = msa_pcnt_df(DF_HALF, pws->h[2]); + pwd->h[3] = msa_pcnt_df(DF_HALF, pws->h[3]); + pwd->h[4] = msa_pcnt_df(DF_HALF, pws->h[4]); + pwd->h[5] = msa_pcnt_df(DF_HALF, pws->h[5]); + pwd->h[6] = msa_pcnt_df(DF_HALF, pws->h[6]); + pwd->h[7] = msa_pcnt_df(DF_HALF, pws->h[7]); +} + +void helper_msa_pcnt_w(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->w[0] = msa_pcnt_df(DF_WORD, pws->w[0]); + pwd->w[1] = msa_pcnt_df(DF_WORD, pws->w[1]); + pwd->w[2] = msa_pcnt_df(DF_WORD, pws->w[2]); + pwd->w[3] = msa_pcnt_df(DF_WORD, pws->w[3]); +} + +void helper_msa_pcnt_d(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + pwd->d[0] = msa_pcnt_df(DF_DOUBLE, pws->d[0]); + pwd->d[1] = msa_pcnt_df(DF_DOUBLE, pws->d[1]); +} + + +/* + * Bit Move + * -------- + * + * +---------------+----------------------------------------------------------+ + * | BINSL.B | Vector Bit Insert Left (byte) | + * | BINSL.H | Vector Bit Insert Left (halfword) | + * | BINSL.W | Vector Bit Insert Left (word) | + * | BINSL.D | Vector Bit Insert Left (doubleword) | + * | BINSR.B | Vector Bit Insert Right (byte) | + * | BINSR.H | Vector Bit Insert Right (halfword) | + * | BINSR.W | Vector Bit Insert Right (word) | + * | BINSR.D | Vector Bit Insert Right (doubleword) | + * | BMNZ.V | Vector Bit Move If Not Zero | + * | BMZ.V | Vector Bit Move If Zero | + * | BSEL.V | Vector Bit Select | + * +---------------+----------------------------------------------------------+ + */ + +/* Data format bit position and unsigned values */ +#define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df)) + +static inline int64_t msa_binsl_df(uint32_t df, + int64_t dest, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_dest = UNSIGNED(dest, df); + int32_t sh_d = BIT_POSITION(arg2, df) + 1; + int32_t sh_a = DF_BITS(df) - sh_d; + if (sh_d == DF_BITS(df)) { + return u_arg1; + } else { + return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) | + UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df); + } +} + +void helper_msa_binsl_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_binsl_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); + pwd->b[1] = msa_binsl_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); + pwd->b[2] = msa_binsl_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); + pwd->b[3] = msa_binsl_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); + pwd->b[4] = msa_binsl_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); + pwd->b[5] = msa_binsl_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); + pwd->b[6] = msa_binsl_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); + pwd->b[7] = msa_binsl_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); + pwd->b[8] = msa_binsl_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); + pwd->b[9] = msa_binsl_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); + pwd->b[10] = msa_binsl_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); + pwd->b[11] = msa_binsl_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); + pwd->b[12] = msa_binsl_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); + pwd->b[13] = msa_binsl_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); + pwd->b[14] = msa_binsl_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); + pwd->b[15] = msa_binsl_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); +} + +void helper_msa_binsl_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_binsl_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]); + pwd->h[1] = msa_binsl_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]); + pwd->h[2] = msa_binsl_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]); + pwd->h[3] = msa_binsl_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]); + pwd->h[4] = msa_binsl_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]); + pwd->h[5] = msa_binsl_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]); + pwd->h[6] = msa_binsl_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]); + pwd->h[7] = msa_binsl_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]); +} + +void helper_msa_binsl_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_binsl_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]); + pwd->w[1] = msa_binsl_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]); + pwd->w[2] = msa_binsl_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]); + pwd->w[3] = msa_binsl_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]); +} + +void helper_msa_binsl_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_binsl_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]); + pwd->d[1] = msa_binsl_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_binsr_df(uint32_t df, + int64_t dest, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_dest = UNSIGNED(dest, df); + int32_t sh_d = BIT_POSITION(arg2, df) + 1; + int32_t sh_a = DF_BITS(df) - sh_d; + if (sh_d == DF_BITS(df)) { + return u_arg1; + } else { + return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) | + UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df); + } +} + +void helper_msa_binsr_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_binsr_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); + pwd->b[1] = msa_binsr_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); + pwd->b[2] = msa_binsr_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); + pwd->b[3] = msa_binsr_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); + pwd->b[4] = msa_binsr_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); + pwd->b[5] = msa_binsr_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); + pwd->b[6] = msa_binsr_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); + pwd->b[7] = msa_binsr_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); + pwd->b[8] = msa_binsr_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); + pwd->b[9] = msa_binsr_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); + pwd->b[10] = msa_binsr_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); + pwd->b[11] = msa_binsr_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); + pwd->b[12] = msa_binsr_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); + pwd->b[13] = msa_binsr_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); + pwd->b[14] = msa_binsr_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); + pwd->b[15] = msa_binsr_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); +} + +void helper_msa_binsr_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_binsr_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]); + pwd->h[1] = msa_binsr_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]); + pwd->h[2] = msa_binsr_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]); + pwd->h[3] = msa_binsr_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]); + pwd->h[4] = msa_binsr_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]); + pwd->h[5] = msa_binsr_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]); + pwd->h[6] = msa_binsr_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]); + pwd->h[7] = msa_binsr_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]); +} + +void helper_msa_binsr_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_binsr_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]); + pwd->w[1] = msa_binsr_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]); + pwd->w[2] = msa_binsr_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]); + pwd->w[3] = msa_binsr_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]); +} + +void helper_msa_binsr_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_binsr_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]); + pwd->d[1] = msa_binsr_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]); +} + +void helper_msa_bmnz_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = UNSIGNED( \ + ((pwd->d[0] & (~pwt->d[0])) | (pws->d[0] & pwt->d[0])), DF_DOUBLE); + pwd->d[1] = UNSIGNED( \ + ((pwd->d[1] & (~pwt->d[1])) | (pws->d[1] & pwt->d[1])), DF_DOUBLE); +} + +void helper_msa_bmz_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = UNSIGNED( \ + ((pwd->d[0] & pwt->d[0]) | (pws->d[0] & (~pwt->d[0]))), DF_DOUBLE); + pwd->d[1] = UNSIGNED( \ + ((pwd->d[1] & pwt->d[1]) | (pws->d[1] & (~pwt->d[1]))), DF_DOUBLE); +} + +void helper_msa_bsel_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = UNSIGNED( \ + (pws->d[0] & (~pwd->d[0])) | (pwt->d[0] & pwd->d[0]), DF_DOUBLE); + pwd->d[1] = UNSIGNED( \ + (pws->d[1] & (~pwd->d[1])) | (pwt->d[1] & pwd->d[1]), DF_DOUBLE); +} + + +/* + * Bit Set + * ------- + * + * +---------------+----------------------------------------------------------+ + * | BCLR.B | Vector Bit Clear (byte) | + * | BCLR.H | Vector Bit Clear (halfword) | + * | BCLR.W | Vector Bit Clear (word) | + * | BCLR.D | Vector Bit Clear (doubleword) | + * | BNEG.B | Vector Bit Negate (byte) | + * | BNEG.H | Vector Bit Negate (halfword) | + * | BNEG.W | Vector Bit Negate (word) | + * | BNEG.D | Vector Bit Negate (doubleword) | + * | BSET.B | Vector Bit Set (byte) | + * | BSET.H | Vector Bit Set (halfword) | + * | BSET.W | Vector Bit Set (word) | + * | BSET.D | Vector Bit Set (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return UNSIGNED(arg1 & (~(1LL << b_arg2)), df); +} + +void helper_msa_bclr_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_bclr_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_bclr_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_bclr_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_bclr_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_bclr_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_bclr_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_bclr_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_bclr_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_bclr_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_bclr_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_bclr_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_bclr_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_bclr_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_bclr_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_bclr_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_bclr_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_bclr_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_bclr_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_bclr_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_bclr_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_bclr_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_bclr_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_bclr_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_bclr_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_bclr_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_bclr_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_bclr_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_bclr_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_bclr_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_bclr_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_bclr_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_bclr_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_bclr_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return UNSIGNED(arg1 ^ (1LL << b_arg2), df); +} + +void helper_msa_bneg_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_bneg_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_bneg_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_bneg_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_bneg_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_bneg_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_bneg_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_bneg_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_bneg_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_bneg_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_bneg_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_bneg_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_bneg_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_bneg_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_bneg_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_bneg_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_bneg_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_bneg_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_bneg_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_bneg_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_bneg_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_bneg_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_bneg_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_bneg_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_bneg_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_bneg_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_bneg_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_bneg_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_bneg_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_bneg_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_bneg_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_bneg_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_bneg_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_bneg_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_bset_df(uint32_t df, int64_t arg1, + int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return UNSIGNED(arg1 | (1LL << b_arg2), df); +} + +void helper_msa_bset_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_bset_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_bset_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_bset_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_bset_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_bset_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_bset_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_bset_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_bset_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_bset_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_bset_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_bset_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_bset_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_bset_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_bset_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_bset_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_bset_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_bset_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_bset_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_bset_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_bset_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_bset_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_bset_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_bset_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_bset_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_bset_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_bset_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_bset_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_bset_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_bset_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_bset_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_bset_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_bset_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_bset_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Fixed Multiply + * -------------- + * + * +---------------+----------------------------------------------------------+ + * | MADD_Q.H | Vector Fixed-Point Multiply and Add (halfword) | + * | MADD_Q.W | Vector Fixed-Point Multiply and Add (word) | + * | MADDR_Q.H | Vector Fixed-Point Multiply and Add Rounded (halfword) | + * | MADDR_Q.W | Vector Fixed-Point Multiply and Add Rounded (word) | + * | MSUB_Q.H | Vector Fixed-Point Multiply and Subtr. (halfword) | + * | MSUB_Q.W | Vector Fixed-Point Multiply and Subtr. (word) | + * | MSUBR_Q.H | Vector Fixed-Point Multiply and Subtr. Rounded (halfword)| + * | MSUBR_Q.W | Vector Fixed-Point Multiply and Subtr. Rounded (word) | + * | MUL_Q.H | Vector Fixed-Point Multiply (halfword) | + * | MUL_Q.W | Vector Fixed-Point Multiply (word) | + * | MULR_Q.H | Vector Fixed-Point Multiply Rounded (halfword) | + * | MULR_Q.W | Vector Fixed-Point Multiply Rounded (word) | + * +---------------+----------------------------------------------------------+ + */ + +/* TODO: insert Fixed Multiply group helpers here */ + + +/* + * Float Max Min + * ------------- + * + * +---------------+----------------------------------------------------------+ + * | FMAX_A.W | Vector Floating-Point Maximum (Absolute) (word) | + * | FMAX_A.D | Vector Floating-Point Maximum (Absolute) (doubleword) | + * | FMAX.W | Vector Floating-Point Maximum (word) | + * | FMAX.D | Vector Floating-Point Maximum (doubleword) | + * | FMIN_A.W | Vector Floating-Point Minimum (Absolute) (word) | + * | FMIN_A.D | Vector Floating-Point Minimum (Absolute) (doubleword) | + * | FMIN.W | Vector Floating-Point Minimum (word) | + * | FMIN.D | Vector Floating-Point Minimum (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +/* TODO: insert Float Max Min group helpers here */ + + +/* + * Int Add + * ------- + * + * +---------------+----------------------------------------------------------+ + * | ADD_A.B | Vector Add Absolute Values (byte) | + * | ADD_A.H | Vector Add Absolute Values (halfword) | + * | ADD_A.W | Vector Add Absolute Values (word) | + * | ADD_A.D | Vector Add Absolute Values (doubleword) | + * | ADDS_A.B | Vector Signed Saturated Add (of Absolute) (byte) | + * | ADDS_A.H | Vector Signed Saturated Add (of Absolute) (halfword) | + * | ADDS_A.W | Vector Signed Saturated Add (of Absolute) (word) | + * | ADDS_A.D | Vector Signed Saturated Add (of Absolute) (doubleword) | + * | ADDS_S.B | Vector Signed Saturated Add (of Signed) (byte) | + * | ADDS_S.H | Vector Signed Saturated Add (of Signed) (halfword) | + * | ADDS_S.W | Vector Signed Saturated Add (of Signed) (word) | + * | ADDS_S.D | Vector Signed Saturated Add (of Signed) (doubleword) | + * | ADDS_U.B | Vector Unsigned Saturated Add (of Unsigned) (byte) | + * | ADDS_U.H | Vector Unsigned Saturated Add (of Unsigned) (halfword) | + * | ADDS_U.W | Vector Unsigned Saturated Add (of Unsigned) (word) | + * | ADDS_U.D | Vector Unsigned Saturated Add (of Unsigned) (doubleword) | + * | ADDV.B | Vector Add (byte) | + * | ADDV.H | Vector Add (halfword) | + * | ADDV.W | Vector Add (word) | + * | ADDV.D | Vector Add (doubleword) | + * | HADD_S.H | Vector Signed Horizontal Add (halfword) | + * | HADD_S.W | Vector Signed Horizontal Add (word) | + * | HADD_S.D | Vector Signed Horizontal Add (doubleword) | + * | HADD_U.H | Vector Unigned Horizontal Add (halfword) | + * | HADD_U.W | Vector Unigned Horizontal Add (word) | + * | HADD_U.D | Vector Unigned Horizontal Add (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + + +static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + return abs_arg1 + abs_arg2; +} + +void helper_msa_add_a_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_add_a_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_add_a_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_add_a_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_add_a_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_add_a_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_add_a_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_add_a_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_add_a_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_add_a_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_add_a_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_add_a_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_add_a_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_add_a_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_add_a_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_add_a_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_add_a_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_add_a_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_add_a_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_add_a_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_add_a_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_add_a_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_add_a_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_add_a_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_add_a_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_add_a_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_add_a_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_add_a_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_add_a_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_add_a_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_add_a_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_add_a_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_add_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_add_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t max_int = (uint64_t)DF_MAX_INT(df); + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + if (abs_arg1 > max_int || abs_arg2 > max_int) { + return (int64_t)max_int; + } else { + return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int; + } +} + +void helper_msa_adds_a_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_adds_a_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_adds_a_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_adds_a_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_adds_a_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_adds_a_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_adds_a_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_adds_a_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_adds_a_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_adds_a_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_adds_a_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_adds_a_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_adds_a_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_adds_a_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_adds_a_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_adds_a_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_adds_a_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_adds_a_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_adds_a_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_adds_a_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_adds_a_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_adds_a_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_adds_a_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_adds_a_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_adds_a_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_adds_a_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_adds_a_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_adds_a_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_adds_a_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_adds_a_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_adds_a_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_adds_a_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_adds_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_adds_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t max_int = DF_MAX_INT(df); + int64_t min_int = DF_MIN_INT(df); + if (arg1 < 0) { + return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int; + } else { + return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int; + } +} + +void helper_msa_adds_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_adds_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_adds_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_adds_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_adds_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_adds_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_adds_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_adds_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_adds_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_adds_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_adds_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_adds_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_adds_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_adds_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_adds_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_adds_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_adds_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_adds_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_adds_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_adds_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_adds_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_adds_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_adds_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_adds_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_adds_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_adds_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_adds_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_adds_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_adds_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_adds_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_adds_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_adds_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_adds_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_adds_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t max_uint = DF_MAX_UINT(df); + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint; +} + +void helper_msa_adds_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_adds_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_adds_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_adds_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_adds_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_adds_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_adds_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_adds_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_adds_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_adds_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_adds_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_adds_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_adds_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_adds_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_adds_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_adds_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_adds_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_adds_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_adds_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_adds_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_adds_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_adds_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_adds_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_adds_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_adds_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_adds_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_adds_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_adds_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_adds_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_adds_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_adds_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_adds_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_adds_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_adds_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 + arg2; +} + +void helper_msa_addv_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_addv_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_addv_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_addv_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_addv_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_addv_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_addv_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_addv_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_addv_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_addv_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_addv_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_addv_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_addv_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_addv_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_addv_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_addv_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_addv_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_addv_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_addv_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_addv_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_addv_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_addv_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_addv_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_addv_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_addv_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_addv_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_addv_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_addv_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_addv_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_addv_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_addv_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_addv_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_addv_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_addv_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +#define SIGNED_EVEN(a, df) \ + ((((int64_t)(a)) << (64 - DF_BITS(df) / 2)) >> (64 - DF_BITS(df) / 2)) + +#define UNSIGNED_EVEN(a, df) \ + ((((uint64_t)(a)) << (64 - DF_BITS(df) / 2)) >> (64 - DF_BITS(df) / 2)) + +#define SIGNED_ODD(a, df) \ + ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df) / 2)) + +#define UNSIGNED_ODD(a, df) \ + ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df) / 2)) + + +static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df); +} + +void helper_msa_hadd_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_hadd_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_hadd_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_hadd_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_hadd_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_hadd_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_hadd_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_hadd_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_hadd_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_hadd_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_hadd_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_hadd_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_hadd_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_hadd_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_hadd_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_hadd_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_hadd_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df); +} + +void helper_msa_hadd_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_hadd_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_hadd_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_hadd_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_hadd_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_hadd_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_hadd_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_hadd_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_hadd_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_hadd_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_hadd_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_hadd_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_hadd_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_hadd_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_hadd_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_hadd_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_hadd_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Int Average + * ----------- + * + * +---------------+----------------------------------------------------------+ + * | AVE_S.B | Vector Signed Average (byte) | + * | AVE_S.H | Vector Signed Average (halfword) | + * | AVE_S.W | Vector Signed Average (word) | + * | AVE_S.D | Vector Signed Average (doubleword) | + * | AVE_U.B | Vector Unsigned Average (byte) | + * | AVE_U.H | Vector Unsigned Average (halfword) | + * | AVE_U.W | Vector Unsigned Average (word) | + * | AVE_U.D | Vector Unsigned Average (doubleword) | + * | AVER_S.B | Vector Signed Average Rounded (byte) | + * | AVER_S.H | Vector Signed Average Rounded (halfword) | + * | AVER_S.W | Vector Signed Average Rounded (word) | + * | AVER_S.D | Vector Signed Average Rounded (doubleword) | + * | AVER_U.B | Vector Unsigned Average Rounded (byte) | + * | AVER_U.H | Vector Unsigned Average Rounded (halfword) | + * | AVER_U.W | Vector Unsigned Average Rounded (word) | + * | AVER_U.D | Vector Unsigned Average Rounded (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + /* signed shift */ + return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1); +} + +void helper_msa_ave_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_ave_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_ave_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_ave_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_ave_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_ave_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_ave_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_ave_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_ave_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_ave_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_ave_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_ave_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_ave_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_ave_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_ave_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_ave_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_ave_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_ave_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_ave_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_ave_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_ave_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_ave_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_ave_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_ave_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_ave_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_ave_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_ave_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_ave_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_ave_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_ave_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_ave_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_ave_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_ave_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_ave_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + /* unsigned shift */ + return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1); +} + +void helper_msa_ave_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_ave_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_ave_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_ave_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_ave_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_ave_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_ave_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_ave_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_ave_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_ave_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_ave_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_ave_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_ave_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_ave_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_ave_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_ave_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_ave_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_ave_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_ave_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_ave_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_ave_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_ave_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_ave_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_ave_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_ave_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_ave_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_ave_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_ave_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_ave_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_ave_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_ave_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_ave_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_ave_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_ave_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + /* signed shift */ + return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1); +} + +void helper_msa_aver_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_aver_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_aver_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_aver_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_aver_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_aver_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_aver_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_aver_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_aver_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_aver_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_aver_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_aver_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_aver_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_aver_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_aver_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_aver_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_aver_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_aver_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_aver_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_aver_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_aver_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_aver_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_aver_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_aver_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_aver_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_aver_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_aver_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_aver_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_aver_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_aver_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_aver_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_aver_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_aver_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_aver_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + /* unsigned shift */ + return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1); +} + +void helper_msa_aver_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_aver_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_aver_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_aver_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_aver_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_aver_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_aver_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_aver_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_aver_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_aver_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_aver_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_aver_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_aver_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_aver_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_aver_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_aver_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_aver_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_aver_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_aver_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_aver_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_aver_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_aver_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_aver_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_aver_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_aver_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_aver_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_aver_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_aver_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_aver_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_aver_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_aver_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_aver_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_aver_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_aver_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Int Compare + * ----------- + * + * +---------------+----------------------------------------------------------+ + * | CEQ.B | Vector Compare Equal (byte) | + * | CEQ.H | Vector Compare Equal (halfword) | + * | CEQ.W | Vector Compare Equal (word) | + * | CEQ.D | Vector Compare Equal (doubleword) | + * | CLE_S.B | Vector Compare Signed Less Than or Equal (byte) | + * | CLE_S.H | Vector Compare Signed Less Than or Equal (halfword) | + * | CLE_S.W | Vector Compare Signed Less Than or Equal (word) | + * | CLE_S.D | Vector Compare Signed Less Than or Equal (doubleword) | + * | CLE_U.B | Vector Compare Unsigned Less Than or Equal (byte) | + * | CLE_U.H | Vector Compare Unsigned Less Than or Equal (halfword) | + * | CLE_U.W | Vector Compare Unsigned Less Than or Equal (word) | + * | CLE_U.D | Vector Compare Unsigned Less Than or Equal (doubleword) | + * | CLT_S.B | Vector Compare Signed Less Than (byte) | + * | CLT_S.H | Vector Compare Signed Less Than (halfword) | + * | CLT_S.W | Vector Compare Signed Less Than (word) | + * | CLT_S.D | Vector Compare Signed Less Than (doubleword) | + * | CLT_U.B | Vector Compare Unsigned Less Than (byte) | + * | CLT_U.H | Vector Compare Unsigned Less Than (halfword) | + * | CLT_U.W | Vector Compare Unsigned Less Than (word) | + * | CLT_U.D | Vector Compare Unsigned Less Than (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 == arg2 ? -1 : 0; +} + +static inline int8_t msa_ceq_b(int8_t arg1, int8_t arg2) +{ + return arg1 == arg2 ? -1 : 0; +} + +void helper_msa_ceq_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_ceq_b(pws->b[0], pwt->b[0]); + pwd->b[1] = msa_ceq_b(pws->b[1], pwt->b[1]); + pwd->b[2] = msa_ceq_b(pws->b[2], pwt->b[2]); + pwd->b[3] = msa_ceq_b(pws->b[3], pwt->b[3]); + pwd->b[4] = msa_ceq_b(pws->b[4], pwt->b[4]); + pwd->b[5] = msa_ceq_b(pws->b[5], pwt->b[5]); + pwd->b[6] = msa_ceq_b(pws->b[6], pwt->b[6]); + pwd->b[7] = msa_ceq_b(pws->b[7], pwt->b[7]); + pwd->b[8] = msa_ceq_b(pws->b[8], pwt->b[8]); + pwd->b[9] = msa_ceq_b(pws->b[9], pwt->b[9]); + pwd->b[10] = msa_ceq_b(pws->b[10], pwt->b[10]); + pwd->b[11] = msa_ceq_b(pws->b[11], pwt->b[11]); + pwd->b[12] = msa_ceq_b(pws->b[12], pwt->b[12]); + pwd->b[13] = msa_ceq_b(pws->b[13], pwt->b[13]); + pwd->b[14] = msa_ceq_b(pws->b[14], pwt->b[14]); + pwd->b[15] = msa_ceq_b(pws->b[15], pwt->b[15]); +} + +static inline int16_t msa_ceq_h(int16_t arg1, int16_t arg2) +{ + return arg1 == arg2 ? -1 : 0; +} + +void helper_msa_ceq_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_ceq_h(pws->h[0], pwt->h[0]); + pwd->h[1] = msa_ceq_h(pws->h[1], pwt->h[1]); + pwd->h[2] = msa_ceq_h(pws->h[2], pwt->h[2]); + pwd->h[3] = msa_ceq_h(pws->h[3], pwt->h[3]); + pwd->h[4] = msa_ceq_h(pws->h[4], pwt->h[4]); + pwd->h[5] = msa_ceq_h(pws->h[5], pwt->h[5]); + pwd->h[6] = msa_ceq_h(pws->h[6], pwt->h[6]); + pwd->h[7] = msa_ceq_h(pws->h[7], pwt->h[7]); +} + +static inline int32_t msa_ceq_w(int32_t arg1, int32_t arg2) +{ + return arg1 == arg2 ? -1 : 0; +} + +void helper_msa_ceq_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_ceq_w(pws->w[0], pwt->w[0]); + pwd->w[1] = msa_ceq_w(pws->w[1], pwt->w[1]); + pwd->w[2] = msa_ceq_w(pws->w[2], pwt->w[2]); + pwd->w[3] = msa_ceq_w(pws->w[3], pwt->w[3]); +} + +static inline int64_t msa_ceq_d(int64_t arg1, int64_t arg2) +{ + return arg1 == arg2 ? -1 : 0; +} + +void helper_msa_ceq_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_ceq_d(pws->d[0], pwt->d[0]); + pwd->d[1] = msa_ceq_d(pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 <= arg2 ? -1 : 0; +} + +void helper_msa_cle_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_cle_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_cle_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_cle_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_cle_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_cle_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_cle_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_cle_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_cle_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_cle_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_cle_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_cle_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_cle_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_cle_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_cle_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_cle_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_cle_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_cle_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_cle_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_cle_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_cle_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_cle_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_cle_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_cle_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_cle_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_cle_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_cle_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_cle_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_cle_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_cle_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_cle_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_cle_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_cle_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_cle_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 <= u_arg2 ? -1 : 0; +} + +void helper_msa_cle_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_cle_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_cle_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_cle_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_cle_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_cle_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_cle_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_cle_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_cle_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_cle_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_cle_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_cle_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_cle_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_cle_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_cle_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_cle_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_cle_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_cle_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_cle_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_cle_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_cle_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_cle_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_cle_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_cle_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_cle_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_cle_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_cle_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_cle_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_cle_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_cle_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_cle_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_cle_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_cle_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_cle_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 < arg2 ? -1 : 0; +} + +static inline int8_t msa_clt_s_b(int8_t arg1, int8_t arg2) +{ + return arg1 < arg2 ? -1 : 0; +} + +void helper_msa_clt_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_clt_s_b(pws->b[0], pwt->b[0]); + pwd->b[1] = msa_clt_s_b(pws->b[1], pwt->b[1]); + pwd->b[2] = msa_clt_s_b(pws->b[2], pwt->b[2]); + pwd->b[3] = msa_clt_s_b(pws->b[3], pwt->b[3]); + pwd->b[4] = msa_clt_s_b(pws->b[4], pwt->b[4]); + pwd->b[5] = msa_clt_s_b(pws->b[5], pwt->b[5]); + pwd->b[6] = msa_clt_s_b(pws->b[6], pwt->b[6]); + pwd->b[7] = msa_clt_s_b(pws->b[7], pwt->b[7]); + pwd->b[8] = msa_clt_s_b(pws->b[8], pwt->b[8]); + pwd->b[9] = msa_clt_s_b(pws->b[9], pwt->b[9]); + pwd->b[10] = msa_clt_s_b(pws->b[10], pwt->b[10]); + pwd->b[11] = msa_clt_s_b(pws->b[11], pwt->b[11]); + pwd->b[12] = msa_clt_s_b(pws->b[12], pwt->b[12]); + pwd->b[13] = msa_clt_s_b(pws->b[13], pwt->b[13]); + pwd->b[14] = msa_clt_s_b(pws->b[14], pwt->b[14]); + pwd->b[15] = msa_clt_s_b(pws->b[15], pwt->b[15]); +} + +static inline int16_t msa_clt_s_h(int16_t arg1, int16_t arg2) +{ + return arg1 < arg2 ? -1 : 0; +} + +void helper_msa_clt_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_clt_s_h(pws->h[0], pwt->h[0]); + pwd->h[1] = msa_clt_s_h(pws->h[1], pwt->h[1]); + pwd->h[2] = msa_clt_s_h(pws->h[2], pwt->h[2]); + pwd->h[3] = msa_clt_s_h(pws->h[3], pwt->h[3]); + pwd->h[4] = msa_clt_s_h(pws->h[4], pwt->h[4]); + pwd->h[5] = msa_clt_s_h(pws->h[5], pwt->h[5]); + pwd->h[6] = msa_clt_s_h(pws->h[6], pwt->h[6]); + pwd->h[7] = msa_clt_s_h(pws->h[7], pwt->h[7]); +} + +static inline int32_t msa_clt_s_w(int32_t arg1, int32_t arg2) +{ + return arg1 < arg2 ? -1 : 0; +} + +void helper_msa_clt_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_clt_s_w(pws->w[0], pwt->w[0]); + pwd->w[1] = msa_clt_s_w(pws->w[1], pwt->w[1]); + pwd->w[2] = msa_clt_s_w(pws->w[2], pwt->w[2]); + pwd->w[3] = msa_clt_s_w(pws->w[3], pwt->w[3]); +} + +static inline int64_t msa_clt_s_d(int64_t arg1, int64_t arg2) +{ + return arg1 < arg2 ? -1 : 0; +} + +void helper_msa_clt_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_clt_s_d(pws->d[0], pwt->d[0]); + pwd->d[1] = msa_clt_s_d(pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 < u_arg2 ? -1 : 0; +} + +void helper_msa_clt_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_clt_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_clt_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_clt_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_clt_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_clt_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_clt_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_clt_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_clt_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_clt_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_clt_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_clt_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_clt_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_clt_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_clt_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_clt_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_clt_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_clt_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_clt_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_clt_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_clt_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_clt_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_clt_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_clt_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_clt_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_clt_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_clt_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_clt_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_clt_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_clt_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_clt_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_clt_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_clt_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_clt_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Int Divide + * ---------- + * + * +---------------+----------------------------------------------------------+ + * | DIV_S.B | Vector Signed Divide (byte) | + * | DIV_S.H | Vector Signed Divide (halfword) | + * | DIV_S.W | Vector Signed Divide (word) | + * | DIV_S.D | Vector Signed Divide (doubleword) | + * | DIV_U.B | Vector Unsigned Divide (byte) | + * | DIV_U.H | Vector Unsigned Divide (halfword) | + * | DIV_U.W | Vector Unsigned Divide (word) | + * | DIV_U.D | Vector Unsigned Divide (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + + +static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + if (arg1 == DF_MIN_INT(df) && arg2 == -1) { + return DF_MIN_INT(df); + } + return arg2 ? arg1 / arg2 + : arg1 >= 0 ? -1 : 1; +} + +void helper_msa_div_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_div_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_div_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_div_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_div_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_div_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_div_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_div_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_div_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_div_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_div_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_div_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_div_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_div_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_div_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_div_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_div_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_div_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_div_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_div_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_div_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_div_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_div_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_div_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_div_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_div_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_div_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_div_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_div_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_div_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_div_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_div_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_div_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_div_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return arg2 ? u_arg1 / u_arg2 : -1; +} + +void helper_msa_div_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_div_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_div_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_div_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_div_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_div_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_div_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_div_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_div_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_div_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_div_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_div_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_div_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_div_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_div_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_div_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_div_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_div_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_div_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_div_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_div_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_div_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_div_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_div_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_div_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_div_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_div_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_div_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_div_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_div_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_div_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_div_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_div_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_div_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Int Dot Product + * --------------- + * + * +---------------+----------------------------------------------------------+ + * | DOTP_S.H | Vector Signed Dot Product (halfword) | + * | DOTP_S.W | Vector Signed Dot Product (word) | + * | DOTP_S.D | Vector Signed Dot Product (doubleword) | + * | DOTP_U.H | Vector Unsigned Dot Product (halfword) | + * | DOTP_U.W | Vector Unsigned Dot Product (word) | + * | DOTP_U.D | Vector Unsigned Dot Product (doubleword) | + * | DPADD_S.H | Vector Signed Dot Product (halfword) | + * | DPADD_S.W | Vector Signed Dot Product (word) | + * | DPADD_S.D | Vector Signed Dot Product (doubleword) | + * | DPADD_U.H | Vector Unsigned Dot Product (halfword) | + * | DPADD_U.W | Vector Unsigned Dot Product (word) | + * | DPADD_U.D | Vector Unsigned Dot Product (doubleword) | + * | DPSUB_S.H | Vector Signed Dot Product (halfword) | + * | DPSUB_S.W | Vector Signed Dot Product (word) | + * | DPSUB_S.D | Vector Signed Dot Product (doubleword) | + * | DPSUB_U.H | Vector Unsigned Dot Product (halfword) | + * | DPSUB_U.W | Vector Unsigned Dot Product (word) | + * | DPSUB_U.D | Vector Unsigned Dot Product (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +/* TODO: insert Int Dot Product group helpers here */ + + +/* + * Int Max Min + * ----------- + * + * +---------------+----------------------------------------------------------+ + * | MAX_A.B | Vector Maximum Based on Absolute Value (byte) | + * | MAX_A.H | Vector Maximum Based on Absolute Value (halfword) | + * | MAX_A.W | Vector Maximum Based on Absolute Value (word) | + * | MAX_A.D | Vector Maximum Based on Absolute Value (doubleword) | + * | MAX_S.B | Vector Signed Maximum (byte) | + * | MAX_S.H | Vector Signed Maximum (halfword) | + * | MAX_S.W | Vector Signed Maximum (word) | + * | MAX_S.D | Vector Signed Maximum (doubleword) | + * | MAX_U.B | Vector Unsigned Maximum (byte) | + * | MAX_U.H | Vector Unsigned Maximum (halfword) | + * | MAX_U.W | Vector Unsigned Maximum (word) | + * | MAX_U.D | Vector Unsigned Maximum (doubleword) | + * | MIN_A.B | Vector Minimum Based on Absolute Value (byte) | + * | MIN_A.H | Vector Minimum Based on Absolute Value (halfword) | + * | MIN_A.W | Vector Minimum Based on Absolute Value (word) | + * | MIN_A.D | Vector Minimum Based on Absolute Value (doubleword) | + * | MIN_S.B | Vector Signed Minimum (byte) | + * | MIN_S.H | Vector Signed Minimum (halfword) | + * | MIN_S.W | Vector Signed Minimum (word) | + * | MIN_S.D | Vector Signed Minimum (doubleword) | + * | MIN_U.B | Vector Unsigned Minimum (byte) | + * | MIN_U.H | Vector Unsigned Minimum (halfword) | + * | MIN_U.W | Vector Unsigned Minimum (word) | + * | MIN_U.D | Vector Unsigned Minimum (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + return abs_arg1 > abs_arg2 ? arg1 : arg2; +} + +void helper_msa_max_a_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_max_a_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_max_a_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_max_a_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_max_a_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_max_a_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_max_a_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_max_a_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_max_a_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_max_a_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_max_a_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_max_a_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_max_a_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_max_a_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_max_a_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_max_a_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_max_a_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_max_a_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_max_a_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_max_a_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_max_a_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_max_a_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_max_a_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_max_a_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_max_a_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_max_a_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_max_a_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_max_a_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_max_a_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_max_a_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_max_a_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_max_a_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_max_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_max_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 > arg2 ? arg1 : arg2; +} + +void helper_msa_max_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_max_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_max_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_max_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_max_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_max_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_max_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_max_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_max_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_max_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_max_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_max_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_max_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_max_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_max_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_max_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_max_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_max_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_max_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_max_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_max_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_max_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_max_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_max_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_max_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_max_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_max_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_max_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_max_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_max_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_max_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_max_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_max_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_max_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 > u_arg2 ? arg1 : arg2; +} + +void helper_msa_max_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_max_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_max_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_max_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_max_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_max_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_max_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_max_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_max_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_max_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_max_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_max_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_max_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_max_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_max_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_max_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_max_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_max_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_max_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_max_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_max_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_max_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_max_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_max_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_max_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_max_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_max_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_max_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_max_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_max_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_max_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_max_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_max_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_max_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + return abs_arg1 < abs_arg2 ? arg1 : arg2; +} + +void helper_msa_min_a_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_min_a_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_min_a_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_min_a_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_min_a_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_min_a_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_min_a_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_min_a_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_min_a_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_min_a_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_min_a_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_min_a_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_min_a_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_min_a_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_min_a_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_min_a_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_min_a_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_min_a_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_min_a_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_min_a_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_min_a_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_min_a_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_min_a_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_min_a_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_min_a_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_min_a_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_min_a_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_min_a_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_min_a_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_min_a_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_min_a_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_min_a_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_min_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_min_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 < arg2 ? arg1 : arg2; +} + +void helper_msa_min_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_min_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_min_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_min_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_min_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_min_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_min_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_min_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_min_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_min_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_min_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_min_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_min_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_min_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_min_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_min_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_min_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_min_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_min_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_min_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_min_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_min_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_min_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_min_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_min_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_min_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_min_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_min_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_min_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_min_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_min_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_min_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_min_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_min_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 < u_arg2 ? arg1 : arg2; +} + +void helper_msa_min_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_min_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_min_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_min_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_min_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_min_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_min_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_min_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_min_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_min_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_min_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_min_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_min_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_min_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_min_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_min_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_min_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_min_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_min_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_min_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_min_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_min_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_min_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_min_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_min_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_min_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_min_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_min_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_min_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_min_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_min_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_min_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_min_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_min_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Int Modulo + * ---------- + * + * +---------------+----------------------------------------------------------+ + * | MOD_S.B | Vector Signed Modulo (byte) | + * | MOD_S.H | Vector Signed Modulo (halfword) | + * | MOD_S.W | Vector Signed Modulo (word) | + * | MOD_S.D | Vector Signed Modulo (doubleword) | + * | MOD_U.B | Vector Unsigned Modulo (byte) | + * | MOD_U.H | Vector Unsigned Modulo (halfword) | + * | MOD_U.W | Vector Unsigned Modulo (word) | + * | MOD_U.D | Vector Unsigned Modulo (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + if (arg1 == DF_MIN_INT(df) && arg2 == -1) { + return 0; + } + return arg2 ? arg1 % arg2 : arg1; +} + +void helper_msa_mod_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_mod_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_mod_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_mod_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_mod_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_mod_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_mod_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_mod_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_mod_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_mod_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_mod_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_mod_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_mod_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_mod_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_mod_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_mod_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_mod_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_mod_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_mod_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_mod_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_mod_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_mod_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_mod_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_mod_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_mod_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_mod_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_mod_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_mod_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_mod_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_mod_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_mod_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_mod_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_mod_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_mod_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + +static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg2 ? u_arg1 % u_arg2 : u_arg1; +} + +void helper_msa_mod_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_mod_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_mod_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_mod_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_mod_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_mod_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_mod_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_mod_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_mod_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_mod_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_mod_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_mod_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_mod_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_mod_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_mod_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_mod_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_mod_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_mod_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_mod_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_mod_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_mod_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_mod_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_mod_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_mod_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_mod_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_mod_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_mod_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_mod_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_mod_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_mod_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_mod_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_mod_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_mod_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_mod_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Int Multiply + * ------------ + * + * +---------------+----------------------------------------------------------+ + * | MADDV.B | Vector Multiply and Add (byte) | + * | MADDV.H | Vector Multiply and Add (halfword) | + * | MADDV.W | Vector Multiply and Add (word) | + * | MADDV.D | Vector Multiply and Add (doubleword) | + * | MSUBV.B | Vector Multiply and Subtract (byte) | + * | MSUBV.H | Vector Multiply and Subtract (halfword) | + * | MSUBV.W | Vector Multiply and Subtract (word) | + * | MSUBV.D | Vector Multiply and Subtract (doubleword) | + * | MULV.B | Vector Multiply (byte) | + * | MULV.H | Vector Multiply (halfword) | + * | MULV.W | Vector Multiply (word) | + * | MULV.D | Vector Multiply (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + +/* TODO: insert Int Multiply group helpers here */ + + +/* + * Int Subtract + * ------------ + * + * +---------------+----------------------------------------------------------+ + * | ASUB_S.B | Vector Absolute Values of Signed Subtract (byte) | + * | ASUB_S.H | Vector Absolute Values of Signed Subtract (halfword) | + * | ASUB_S.W | Vector Absolute Values of Signed Subtract (word) | + * | ASUB_S.D | Vector Absolute Values of Signed Subtract (doubleword) | + * | ASUB_U.B | Vector Absolute Values of Unsigned Subtract (byte) | + * | ASUB_U.H | Vector Absolute Values of Unsigned Subtract (halfword) | + * | ASUB_U.W | Vector Absolute Values of Unsigned Subtract (word) | + * | ASUB_U.D | Vector Absolute Values of Unsigned Subtract (doubleword) | + * | HSUB_S.H | Vector Signed Horizontal Subtract (halfword) | + * | HSUB_S.W | Vector Signed Horizontal Subtract (word) | + * | HSUB_S.D | Vector Signed Horizontal Subtract (doubleword) | + * | HSUB_U.H | Vector Unigned Horizontal Subtract (halfword) | + * | HSUB_U.W | Vector Unigned Horizontal Subtract (word) | + * | HSUB_U.D | Vector Unigned Horizontal Subtract (doubleword) | + * | SUBS_S.B | Vector Signed Saturated Subtract (of Signed) (byte) | + * | SUBS_S.H | Vector Signed Saturated Subtract (of Signed) (halfword) | + * | SUBS_S.W | Vector Signed Saturated Subtract (of Signed) (word) | + * | SUBS_S.D | Vector Signed Saturated Subtract (of Signed) (doubleword)| + * | SUBS_U.B | Vector Unsigned Saturated Subtract (of Uns.) (byte) | + * | SUBS_U.H | Vector Unsigned Saturated Subtract (of Uns.) (halfword) | + * | SUBS_U.W | Vector Unsigned Saturated Subtract (of Uns.) (word) | + * | SUBS_U.D | Vector Unsigned Saturated Subtract (of Uns.) (doubleword)| + * | SUBSUS_U.B | Vector Uns. Sat. Subtract (of S. from Uns.) (byte) | + * | SUBSUS_U.H | Vector Uns. Sat. Subtract (of S. from Uns.) (halfword) | + * | SUBSUS_U.W | Vector Uns. Sat. Subtract (of S. from Uns.) (word) | + * | SUBSUS_U.D | Vector Uns. Sat. Subtract (of S. from Uns.) (doubleword) | + * | SUBSUU_S.B | Vector Signed Saturated Subtract (of Uns.) (byte) | + * | SUBSUU_S.H | Vector Signed Saturated Subtract (of Uns.) (halfword) | + * | SUBSUU_S.W | Vector Signed Saturated Subtract (of Uns.) (word) | + * | SUBSUU_S.D | Vector Signed Saturated Subtract (of Uns.) (doubleword) | + * | SUBV.B | Vector Subtract (byte) | + * | SUBV.H | Vector Subtract (halfword) | + * | SUBV.W | Vector Subtract (word) | + * | SUBV.D | Vector Subtract (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + + +static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + /* signed compare */ + return (arg1 < arg2) ? + (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2); +} + +void helper_msa_asub_s_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_asub_s_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_asub_s_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_asub_s_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_asub_s_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_asub_s_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_asub_s_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_asub_s_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_asub_s_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_asub_s_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_asub_s_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_asub_s_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_asub_s_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_asub_s_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_asub_s_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_asub_s_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_asub_s_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_asub_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_asub_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_asub_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_asub_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_asub_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_asub_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_asub_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_asub_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_asub_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_asub_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_asub_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_asub_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_asub_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_asub_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_asub_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_asub_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_asub_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + /* unsigned compare */ + return (u_arg1 < u_arg2) ? + (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2); +} + +void helper_msa_asub_u_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_asub_u_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_asub_u_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_asub_u_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_asub_u_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_asub_u_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_asub_u_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_asub_u_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_asub_u_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_asub_u_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_asub_u_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_asub_u_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_asub_u_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_asub_u_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_asub_u_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_asub_u_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_asub_u_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_asub_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_asub_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_asub_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_asub_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_asub_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_asub_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_asub_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_asub_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_asub_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_asub_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_asub_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_asub_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_asub_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_asub_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_asub_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_asub_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_asub_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* TODO: insert the rest of Int Subtract group helpers here */ + + +static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df); +} + +void helper_msa_hsub_s_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_hsub_s_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_hsub_s_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_hsub_s_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_hsub_s_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_hsub_s_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_hsub_s_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_hsub_s_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_hsub_s_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_hsub_s_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_hsub_s_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_hsub_s_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_hsub_s_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_hsub_s_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_hsub_s_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_hsub_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_hsub_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df); +} + +void helper_msa_hsub_u_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_hsub_u_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_hsub_u_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_hsub_u_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_hsub_u_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_hsub_u_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_hsub_u_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_hsub_u_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_hsub_u_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_hsub_u_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_hsub_u_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_hsub_u_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_hsub_u_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_hsub_u_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_hsub_u_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_hsub_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_hsub_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +/* + * Interleave + * ---------- + * + * +---------------+----------------------------------------------------------+ + * | ILVEV.B | Vector Interleave Even (byte) | + * | ILVEV.H | Vector Interleave Even (halfword) | + * | ILVEV.W | Vector Interleave Even (word) | + * | ILVEV.D | Vector Interleave Even (doubleword) | + * | ILVOD.B | Vector Interleave Odd (byte) | + * | ILVOD.H | Vector Interleave Odd (halfword) | + * | ILVOD.W | Vector Interleave Odd (word) | + * | ILVOD.D | Vector Interleave Odd (doubleword) | + * | ILVL.B | Vector Interleave Left (byte) | + * | ILVL.H | Vector Interleave Left (halfword) | + * | ILVL.W | Vector Interleave Left (word) | + * | ILVL.D | Vector Interleave Left (doubleword) | + * | ILVR.B | Vector Interleave Right (byte) | + * | ILVR.H | Vector Interleave Right (halfword) | + * | ILVR.W | Vector Interleave Right (word) | + * | ILVR.D | Vector Interleave Right (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + + +void helper_msa_ilvev_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->b[8] = pws->b[9]; + pwd->b[9] = pwt->b[9]; + pwd->b[10] = pws->b[11]; + pwd->b[11] = pwt->b[11]; + pwd->b[12] = pws->b[13]; + pwd->b[13] = pwt->b[13]; + pwd->b[14] = pws->b[15]; + pwd->b[15] = pwt->b[15]; + pwd->b[0] = pws->b[1]; + pwd->b[1] = pwt->b[1]; + pwd->b[2] = pws->b[3]; + pwd->b[3] = pwt->b[3]; + pwd->b[4] = pws->b[5]; + pwd->b[5] = pwt->b[5]; + pwd->b[6] = pws->b[7]; + pwd->b[7] = pwt->b[7]; +#else + pwd->b[15] = pws->b[14]; + pwd->b[14] = pwt->b[14]; + pwd->b[13] = pws->b[12]; + pwd->b[12] = pwt->b[12]; + pwd->b[11] = pws->b[10]; + pwd->b[10] = pwt->b[10]; + pwd->b[9] = pws->b[8]; + pwd->b[8] = pwt->b[8]; + pwd->b[7] = pws->b[6]; + pwd->b[6] = pwt->b[6]; + pwd->b[5] = pws->b[4]; + pwd->b[4] = pwt->b[4]; + pwd->b[3] = pws->b[2]; + pwd->b[2] = pwt->b[2]; + pwd->b[1] = pws->b[0]; + pwd->b[0] = pwt->b[0]; +#endif +} + +void helper_msa_ilvev_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->h[4] = pws->h[5]; + pwd->h[5] = pwt->h[5]; + pwd->h[6] = pws->h[7]; + pwd->h[7] = pwt->h[7]; + pwd->h[0] = pws->h[1]; + pwd->h[1] = pwt->h[1]; + pwd->h[2] = pws->h[3]; + pwd->h[3] = pwt->h[3]; +#else + pwd->h[7] = pws->h[6]; + pwd->h[6] = pwt->h[6]; + pwd->h[5] = pws->h[4]; + pwd->h[4] = pwt->h[4]; + pwd->h[3] = pws->h[2]; + pwd->h[2] = pwt->h[2]; + pwd->h[1] = pws->h[0]; + pwd->h[0] = pwt->h[0]; +#endif +} + +void helper_msa_ilvev_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->w[2] = pws->w[3]; + pwd->w[3] = pwt->w[3]; + pwd->w[0] = pws->w[1]; + pwd->w[1] = pwt->w[1]; +#else + pwd->w[3] = pws->w[2]; + pwd->w[2] = pwt->w[2]; + pwd->w[1] = pws->w[0]; + pwd->w[0] = pwt->w[0]; +#endif +} + +void helper_msa_ilvev_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[1] = pws->d[0]; + pwd->d[0] = pwt->d[0]; +} + + +void helper_msa_ilvod_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->b[7] = pwt->b[6]; + pwd->b[6] = pws->b[6]; + pwd->b[5] = pwt->b[4]; + pwd->b[4] = pws->b[4]; + pwd->b[3] = pwt->b[2]; + pwd->b[2] = pws->b[2]; + pwd->b[1] = pwt->b[0]; + pwd->b[0] = pws->b[0]; + pwd->b[15] = pwt->b[14]; + pwd->b[14] = pws->b[14]; + pwd->b[13] = pwt->b[12]; + pwd->b[12] = pws->b[12]; + pwd->b[11] = pwt->b[10]; + pwd->b[10] = pws->b[10]; + pwd->b[9] = pwt->b[8]; + pwd->b[8] = pws->b[8]; +#else + pwd->b[0] = pwt->b[1]; + pwd->b[1] = pws->b[1]; + pwd->b[2] = pwt->b[3]; + pwd->b[3] = pws->b[3]; + pwd->b[4] = pwt->b[5]; + pwd->b[5] = pws->b[5]; + pwd->b[6] = pwt->b[7]; + pwd->b[7] = pws->b[7]; + pwd->b[8] = pwt->b[9]; + pwd->b[9] = pws->b[9]; + pwd->b[10] = pwt->b[11]; + pwd->b[11] = pws->b[11]; + pwd->b[12] = pwt->b[13]; + pwd->b[13] = pws->b[13]; + pwd->b[14] = pwt->b[15]; + pwd->b[15] = pws->b[15]; +#endif +} + +void helper_msa_ilvod_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->h[3] = pwt->h[2]; + pwd->h[2] = pws->h[2]; + pwd->h[1] = pwt->h[0]; + pwd->h[0] = pws->h[0]; + pwd->h[7] = pwt->h[6]; + pwd->h[6] = pws->h[6]; + pwd->h[5] = pwt->h[4]; + pwd->h[4] = pws->h[4]; +#else + pwd->h[0] = pwt->h[1]; + pwd->h[1] = pws->h[1]; + pwd->h[2] = pwt->h[3]; + pwd->h[3] = pws->h[3]; + pwd->h[4] = pwt->h[5]; + pwd->h[5] = pws->h[5]; + pwd->h[6] = pwt->h[7]; + pwd->h[7] = pws->h[7]; +#endif +} + +void helper_msa_ilvod_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->w[1] = pwt->w[0]; + pwd->w[0] = pws->w[0]; + pwd->w[3] = pwt->w[2]; + pwd->w[2] = pws->w[2]; +#else + pwd->w[0] = pwt->w[1]; + pwd->w[1] = pws->w[1]; + pwd->w[2] = pwt->w[3]; + pwd->w[3] = pws->w[3]; +#endif +} + +void helper_msa_ilvod_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = pwt->d[1]; + pwd->d[1] = pws->d[1]; +} + + +void helper_msa_ilvl_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->b[7] = pwt->b[15]; + pwd->b[6] = pws->b[15]; + pwd->b[5] = pwt->b[14]; + pwd->b[4] = pws->b[14]; + pwd->b[3] = pwt->b[13]; + pwd->b[2] = pws->b[13]; + pwd->b[1] = pwt->b[12]; + pwd->b[0] = pws->b[12]; + pwd->b[15] = pwt->b[11]; + pwd->b[14] = pws->b[11]; + pwd->b[13] = pwt->b[10]; + pwd->b[12] = pws->b[10]; + pwd->b[11] = pwt->b[9]; + pwd->b[10] = pws->b[9]; + pwd->b[9] = pwt->b[8]; + pwd->b[8] = pws->b[8]; +#else + pwd->b[0] = pwt->b[8]; + pwd->b[1] = pws->b[8]; + pwd->b[2] = pwt->b[9]; + pwd->b[3] = pws->b[9]; + pwd->b[4] = pwt->b[10]; + pwd->b[5] = pws->b[10]; + pwd->b[6] = pwt->b[11]; + pwd->b[7] = pws->b[11]; + pwd->b[8] = pwt->b[12]; + pwd->b[9] = pws->b[12]; + pwd->b[10] = pwt->b[13]; + pwd->b[11] = pws->b[13]; + pwd->b[12] = pwt->b[14]; + pwd->b[13] = pws->b[14]; + pwd->b[14] = pwt->b[15]; + pwd->b[15] = pws->b[15]; +#endif +} + +void helper_msa_ilvl_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->h[3] = pwt->h[7]; + pwd->h[2] = pws->h[7]; + pwd->h[1] = pwt->h[6]; + pwd->h[0] = pws->h[6]; + pwd->h[7] = pwt->h[5]; + pwd->h[6] = pws->h[5]; + pwd->h[5] = pwt->h[4]; + pwd->h[4] = pws->h[4]; +#else + pwd->h[0] = pwt->h[4]; + pwd->h[1] = pws->h[4]; + pwd->h[2] = pwt->h[5]; + pwd->h[3] = pws->h[5]; + pwd->h[4] = pwt->h[6]; + pwd->h[5] = pws->h[6]; + pwd->h[6] = pwt->h[7]; + pwd->h[7] = pws->h[7]; +#endif +} + +void helper_msa_ilvl_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->w[1] = pwt->w[3]; + pwd->w[0] = pws->w[3]; + pwd->w[3] = pwt->w[2]; + pwd->w[2] = pws->w[2]; +#else + pwd->w[0] = pwt->w[2]; + pwd->w[1] = pws->w[2]; + pwd->w[2] = pwt->w[3]; + pwd->w[3] = pws->w[3]; +#endif +} + +void helper_msa_ilvl_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = pwt->d[1]; + pwd->d[1] = pws->d[1]; +} + + +void helper_msa_ilvr_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->b[8] = pws->b[0]; + pwd->b[9] = pwt->b[0]; + pwd->b[10] = pws->b[1]; + pwd->b[11] = pwt->b[1]; + pwd->b[12] = pws->b[2]; + pwd->b[13] = pwt->b[2]; + pwd->b[14] = pws->b[3]; + pwd->b[15] = pwt->b[3]; + pwd->b[0] = pws->b[4]; + pwd->b[1] = pwt->b[4]; + pwd->b[2] = pws->b[5]; + pwd->b[3] = pwt->b[5]; + pwd->b[4] = pws->b[6]; + pwd->b[5] = pwt->b[6]; + pwd->b[6] = pws->b[7]; + pwd->b[7] = pwt->b[7]; +#else + pwd->b[15] = pws->b[7]; + pwd->b[14] = pwt->b[7]; + pwd->b[13] = pws->b[6]; + pwd->b[12] = pwt->b[6]; + pwd->b[11] = pws->b[5]; + pwd->b[10] = pwt->b[5]; + pwd->b[9] = pws->b[4]; + pwd->b[8] = pwt->b[4]; + pwd->b[7] = pws->b[3]; + pwd->b[6] = pwt->b[3]; + pwd->b[5] = pws->b[2]; + pwd->b[4] = pwt->b[2]; + pwd->b[3] = pws->b[1]; + pwd->b[2] = pwt->b[1]; + pwd->b[1] = pws->b[0]; + pwd->b[0] = pwt->b[0]; +#endif +} + +void helper_msa_ilvr_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->h[4] = pws->h[0]; + pwd->h[5] = pwt->h[0]; + pwd->h[6] = pws->h[1]; + pwd->h[7] = pwt->h[1]; + pwd->h[0] = pws->h[2]; + pwd->h[1] = pwt->h[2]; + pwd->h[2] = pws->h[3]; + pwd->h[3] = pwt->h[3]; +#else + pwd->h[7] = pws->h[3]; + pwd->h[6] = pwt->h[3]; + pwd->h[5] = pws->h[2]; + pwd->h[4] = pwt->h[2]; + pwd->h[3] = pws->h[1]; + pwd->h[2] = pwt->h[1]; + pwd->h[1] = pws->h[0]; + pwd->h[0] = pwt->h[0]; +#endif +} + +void helper_msa_ilvr_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->w[2] = pws->w[0]; + pwd->w[3] = pwt->w[0]; + pwd->w[0] = pws->w[1]; + pwd->w[1] = pwt->w[1]; +#else + pwd->w[3] = pws->w[1]; + pwd->w[2] = pwt->w[1]; + pwd->w[1] = pws->w[0]; + pwd->w[0] = pwt->w[0]; +#endif +} + +void helper_msa_ilvr_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[1] = pws->d[0]; + pwd->d[0] = pwt->d[0]; +} + + +/* + * Logic + * ----- + * + * +---------------+----------------------------------------------------------+ + * | AND.V | Vector Logical And | + * | NOR.V | Vector Logical Negated Or | + * | OR.V | Vector Logical Or | + * | XOR.V | Vector Logical Exclusive Or | + * +---------------+----------------------------------------------------------+ + */ + + +void helper_msa_and_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = pws->d[0] & pwt->d[0]; + pwd->d[1] = pws->d[1] & pwt->d[1]; +} + +void helper_msa_nor_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = ~(pws->d[0] | pwt->d[0]); + pwd->d[1] = ~(pws->d[1] | pwt->d[1]); +} + +void helper_msa_or_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = pws->d[0] | pwt->d[0]; + pwd->d[1] = pws->d[1] | pwt->d[1]; +} + +void helper_msa_xor_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = pws->d[0] ^ pwt->d[0]; + pwd->d[1] = pws->d[1] ^ pwt->d[1]; +} + + +/* + * Move + * ---- + * + * +---------------+----------------------------------------------------------+ + * | MOVE.V | Vector Move | + * +---------------+----------------------------------------------------------+ + */ + +static inline void msa_move_v(wr_t *pwd, wr_t *pws) +{ + pwd->d[0] = pws->d[0]; + pwd->d[1] = pws->d[1]; +} + +void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_move_v(pwd, pws); +} + + +/* + * Pack + * ---- + * + * +---------------+----------------------------------------------------------+ + * | PCKEV.B | Vector Pack Even (byte) | + * | PCKEV.H | Vector Pack Even (halfword) | + * | PCKEV.W | Vector Pack Even (word) | + * | PCKEV.D | Vector Pack Even (doubleword) | + * | PCKOD.B | Vector Pack Odd (byte) | + * | PCKOD.H | Vector Pack Odd (halfword) | + * | PCKOD.W | Vector Pack Odd (word) | + * | PCKOD.D | Vector Pack Odd (doubleword) | + * | VSHF.B | Vector Data Preserving Shuffle (byte) | + * | VSHF.H | Vector Data Preserving Shuffle (halfword) | + * | VSHF.W | Vector Data Preserving Shuffle (word) | + * | VSHF.D | Vector Data Preserving Shuffle (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + + +void helper_msa_pckev_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->b[8] = pws->b[9]; + pwd->b[10] = pws->b[13]; + pwd->b[12] = pws->b[1]; + pwd->b[14] = pws->b[5]; + pwd->b[0] = pwt->b[9]; + pwd->b[2] = pwt->b[13]; + pwd->b[4] = pwt->b[1]; + pwd->b[6] = pwt->b[5]; + pwd->b[9] = pws->b[11]; + pwd->b[13] = pws->b[3]; + pwd->b[1] = pwt->b[11]; + pwd->b[5] = pwt->b[3]; + pwd->b[11] = pws->b[15]; + pwd->b[3] = pwt->b[15]; + pwd->b[15] = pws->b[7]; + pwd->b[7] = pwt->b[7]; +#else + pwd->b[15] = pws->b[14]; + pwd->b[13] = pws->b[10]; + pwd->b[11] = pws->b[6]; + pwd->b[9] = pws->b[2]; + pwd->b[7] = pwt->b[14]; + pwd->b[5] = pwt->b[10]; + pwd->b[3] = pwt->b[6]; + pwd->b[1] = pwt->b[2]; + pwd->b[14] = pws->b[12]; + pwd->b[10] = pws->b[4]; + pwd->b[6] = pwt->b[12]; + pwd->b[2] = pwt->b[4]; + pwd->b[12] = pws->b[8]; + pwd->b[4] = pwt->b[8]; + pwd->b[8] = pws->b[0]; + pwd->b[0] = pwt->b[0]; +#endif +} + +void helper_msa_pckev_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->h[4] = pws->h[5]; + pwd->h[6] = pws->h[1]; + pwd->h[0] = pwt->h[5]; + pwd->h[2] = pwt->h[1]; + pwd->h[5] = pws->h[7]; + pwd->h[1] = pwt->h[7]; + pwd->h[7] = pws->h[3]; + pwd->h[3] = pwt->h[3]; +#else + pwd->h[7] = pws->h[6]; + pwd->h[5] = pws->h[2]; + pwd->h[3] = pwt->h[6]; + pwd->h[1] = pwt->h[2]; + pwd->h[6] = pws->h[4]; + pwd->h[2] = pwt->h[4]; + pwd->h[4] = pws->h[0]; + pwd->h[0] = pwt->h[0]; +#endif +} + +void helper_msa_pckev_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->w[2] = pws->w[3]; + pwd->w[0] = pwt->w[3]; + pwd->w[3] = pws->w[1]; + pwd->w[1] = pwt->w[1]; +#else + pwd->w[3] = pws->w[2]; + pwd->w[1] = pwt->w[2]; + pwd->w[2] = pws->w[0]; + pwd->w[0] = pwt->w[0]; +#endif +} + +void helper_msa_pckev_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[1] = pws->d[0]; + pwd->d[0] = pwt->d[0]; +} + + +void helper_msa_pckod_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->b[7] = pwt->b[6]; + pwd->b[5] = pwt->b[2]; + pwd->b[3] = pwt->b[14]; + pwd->b[1] = pwt->b[10]; + pwd->b[15] = pws->b[6]; + pwd->b[13] = pws->b[2]; + pwd->b[11] = pws->b[14]; + pwd->b[9] = pws->b[10]; + pwd->b[6] = pwt->b[4]; + pwd->b[2] = pwt->b[12]; + pwd->b[14] = pws->b[4]; + pwd->b[10] = pws->b[12]; + pwd->b[4] = pwt->b[0]; + pwd->b[12] = pws->b[0]; + pwd->b[0] = pwt->b[8]; + pwd->b[8] = pws->b[8]; +#else + pwd->b[0] = pwt->b[1]; + pwd->b[2] = pwt->b[5]; + pwd->b[4] = pwt->b[9]; + pwd->b[6] = pwt->b[13]; + pwd->b[8] = pws->b[1]; + pwd->b[10] = pws->b[5]; + pwd->b[12] = pws->b[9]; + pwd->b[14] = pws->b[13]; + pwd->b[1] = pwt->b[3]; + pwd->b[5] = pwt->b[11]; + pwd->b[9] = pws->b[3]; + pwd->b[13] = pws->b[11]; + pwd->b[3] = pwt->b[7]; + pwd->b[11] = pws->b[7]; + pwd->b[7] = pwt->b[15]; + pwd->b[15] = pws->b[15]; +#endif + +} + +void helper_msa_pckod_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->h[3] = pwt->h[2]; + pwd->h[1] = pwt->h[6]; + pwd->h[7] = pws->h[2]; + pwd->h[5] = pws->h[6]; + pwd->h[2] = pwt->h[0]; + pwd->h[6] = pws->h[0]; + pwd->h[0] = pwt->h[4]; + pwd->h[4] = pws->h[4]; +#else + pwd->h[0] = pwt->h[1]; + pwd->h[2] = pwt->h[5]; + pwd->h[4] = pws->h[1]; + pwd->h[6] = pws->h[5]; + pwd->h[1] = pwt->h[3]; + pwd->h[5] = pws->h[3]; + pwd->h[3] = pwt->h[7]; + pwd->h[7] = pws->h[7]; +#endif +} + +void helper_msa_pckod_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + +#if defined(HOST_WORDS_BIGENDIAN) + pwd->w[1] = pwt->w[0]; + pwd->w[3] = pws->w[0]; + pwd->w[0] = pwt->w[2]; + pwd->w[2] = pws->w[2]; +#else + pwd->w[0] = pwt->w[1]; + pwd->w[2] = pws->w[1]; + pwd->w[1] = pwt->w[3]; + pwd->w[3] = pws->w[3]; +#endif +} + +void helper_msa_pckod_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = pwt->d[1]; + pwd->d[1] = pws->d[1]; +} + + +/* + * Shift + * ----- + * + * +---------------+----------------------------------------------------------+ + * | SLL.B | Vector Shift Left (byte) | + * | SLL.H | Vector Shift Left (halfword) | + * | SLL.W | Vector Shift Left (word) | + * | SLL.D | Vector Shift Left (doubleword) | + * | SRA.B | Vector Shift Right Arithmetic (byte) | + * | SRA.H | Vector Shift Right Arithmetic (halfword) | + * | SRA.W | Vector Shift Right Arithmetic (word) | + * | SRA.D | Vector Shift Right Arithmetic (doubleword) | + * | SRAR.B | Vector Shift Right Arithmetic Rounded (byte) | + * | SRAR.H | Vector Shift Right Arithmetic Rounded (halfword) | + * | SRAR.W | Vector Shift Right Arithmetic Rounded (word) | + * | SRAR.D | Vector Shift Right Arithmetic Rounded (doubleword) | + * | SRL.B | Vector Shift Right Logical (byte) | + * | SRL.H | Vector Shift Right Logical (halfword) | + * | SRL.W | Vector Shift Right Logical (word) | + * | SRL.D | Vector Shift Right Logical (doubleword) | + * | SRLR.B | Vector Shift Right Logical Rounded (byte) | + * | SRLR.H | Vector Shift Right Logical Rounded (halfword) | + * | SRLR.W | Vector Shift Right Logical Rounded (word) | + * | SRLR.D | Vector Shift Right Logical Rounded (doubleword) | + * +---------------+----------------------------------------------------------+ + */ + + +static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return arg1 << b_arg2; +} + +void helper_msa_sll_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_sll_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_sll_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_sll_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_sll_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_sll_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_sll_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_sll_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_sll_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_sll_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_sll_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_sll_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_sll_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_sll_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_sll_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_sll_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_sll_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_sll_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_sll_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_sll_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_sll_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_sll_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_sll_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_sll_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_sll_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_sll_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_sll_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_sll_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_sll_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_sll_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_sll_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_sll_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_sll_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_sll_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return arg1 >> b_arg2; +} + +void helper_msa_sra_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_sra_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_sra_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_sra_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_sra_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_sra_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_sra_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_sra_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_sra_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_sra_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_sra_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_sra_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_sra_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_sra_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_sra_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_sra_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_sra_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_sra_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_sra_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_sra_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_sra_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_sra_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_sra_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_sra_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_sra_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_sra_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_sra_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_sra_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_sra_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_sra_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_sra_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_sra_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_sra_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_sra_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + if (b_arg2 == 0) { + return arg1; + } else { + int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1; + return (arg1 >> b_arg2) + r_bit; + } +} + +void helper_msa_srar_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_srar_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_srar_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_srar_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_srar_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_srar_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_srar_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_srar_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_srar_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_srar_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_srar_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_srar_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_srar_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_srar_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_srar_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_srar_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_srar_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_srar_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_srar_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_srar_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_srar_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_srar_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_srar_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_srar_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_srar_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_srar_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_srar_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_srar_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_srar_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_srar_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_srar_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_srar_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_srar_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_srar_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + int32_t b_arg2 = BIT_POSITION(arg2, df); + return u_arg1 >> b_arg2; +} + +void helper_msa_srl_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_srl_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_srl_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_srl_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_srl_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_srl_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_srl_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_srl_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_srl_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_srl_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_srl_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_srl_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_srl_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_srl_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_srl_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_srl_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_srl_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_srl_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_srl_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_srl_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_srl_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_srl_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_srl_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_srl_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_srl_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_srl_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_srl_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_srl_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_srl_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_srl_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_srl_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_srl_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_srl_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_srl_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + int32_t b_arg2 = BIT_POSITION(arg2, df); + if (b_arg2 == 0) { + return u_arg1; + } else { + uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1; + return (u_arg1 >> b_arg2) + r_bit; + } +} + +void helper_msa_srlr_b(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->b[0] = msa_srlr_df(DF_BYTE, pws->b[0], pwt->b[0]); + pwd->b[1] = msa_srlr_df(DF_BYTE, pws->b[1], pwt->b[1]); + pwd->b[2] = msa_srlr_df(DF_BYTE, pws->b[2], pwt->b[2]); + pwd->b[3] = msa_srlr_df(DF_BYTE, pws->b[3], pwt->b[3]); + pwd->b[4] = msa_srlr_df(DF_BYTE, pws->b[4], pwt->b[4]); + pwd->b[5] = msa_srlr_df(DF_BYTE, pws->b[5], pwt->b[5]); + pwd->b[6] = msa_srlr_df(DF_BYTE, pws->b[6], pwt->b[6]); + pwd->b[7] = msa_srlr_df(DF_BYTE, pws->b[7], pwt->b[7]); + pwd->b[8] = msa_srlr_df(DF_BYTE, pws->b[8], pwt->b[8]); + pwd->b[9] = msa_srlr_df(DF_BYTE, pws->b[9], pwt->b[9]); + pwd->b[10] = msa_srlr_df(DF_BYTE, pws->b[10], pwt->b[10]); + pwd->b[11] = msa_srlr_df(DF_BYTE, pws->b[11], pwt->b[11]); + pwd->b[12] = msa_srlr_df(DF_BYTE, pws->b[12], pwt->b[12]); + pwd->b[13] = msa_srlr_df(DF_BYTE, pws->b[13], pwt->b[13]); + pwd->b[14] = msa_srlr_df(DF_BYTE, pws->b[14], pwt->b[14]); + pwd->b[15] = msa_srlr_df(DF_BYTE, pws->b[15], pwt->b[15]); +} + +void helper_msa_srlr_h(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->h[0] = msa_srlr_df(DF_HALF, pws->h[0], pwt->h[0]); + pwd->h[1] = msa_srlr_df(DF_HALF, pws->h[1], pwt->h[1]); + pwd->h[2] = msa_srlr_df(DF_HALF, pws->h[2], pwt->h[2]); + pwd->h[3] = msa_srlr_df(DF_HALF, pws->h[3], pwt->h[3]); + pwd->h[4] = msa_srlr_df(DF_HALF, pws->h[4], pwt->h[4]); + pwd->h[5] = msa_srlr_df(DF_HALF, pws->h[5], pwt->h[5]); + pwd->h[6] = msa_srlr_df(DF_HALF, pws->h[6], pwt->h[6]); + pwd->h[7] = msa_srlr_df(DF_HALF, pws->h[7], pwt->h[7]); +} + +void helper_msa_srlr_w(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->w[0] = msa_srlr_df(DF_WORD, pws->w[0], pwt->w[0]); + pwd->w[1] = msa_srlr_df(DF_WORD, pws->w[1], pwt->w[1]); + pwd->w[2] = msa_srlr_df(DF_WORD, pws->w[2], pwt->w[2]); + pwd->w[3] = msa_srlr_df(DF_WORD, pws->w[3], pwt->w[3]); +} + +void helper_msa_srlr_d(CPUMIPSState *env, + uint32_t wd, uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + pwd->d[0] = msa_srlr_df(DF_DOUBLE, pws->d[0], pwt->d[0]); + pwd->d[1] = msa_srlr_df(DF_DOUBLE, pws->d[1], pwt->d[1]); +} + + +#define MSA_FN_IMM8(FUNC, DEST, OPERATION) \ +void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \ + uint32_t i8) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + DEST = OPERATION; \ + } \ +} + +MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8) +MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8) +MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8)) +MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8) + +#define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \ + UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df) +MSA_FN_IMM8(bmnzi_b, pwd->b[i], + BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) + +#define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \ + UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df) +MSA_FN_IMM8(bmzi_b, pwd->b[i], + BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) + +#define BIT_SELECT(dest, arg1, arg2, df) \ + UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df) +MSA_FN_IMM8(bseli_b, pwd->b[i], + BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE)) + +#undef BIT_SELECT +#undef BIT_MOVE_IF_ZERO +#undef BIT_MOVE_IF_NOT_ZERO +#undef MSA_FN_IMM8 + +#define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03)) + +void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t imm) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t wx, *pwx = &wx; + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwx->b[i] = pws->b[SHF_POS(i, imm)]; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwx->h[i] = pws->h[SHF_POS(i, imm)]; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwx->w[i] = pws->w[SHF_POS(i, imm)]; + } + break; + default: + assert(0); + } + msa_move_v(pwd, pwx); +} + +static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 - arg2; +} + +#define MSA_BINOP_IMM_DF(helper, func) \ +void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ + uint32_t wd, uint32_t ws, int32_t u5) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_BINOP_IMM_DF(addvi, addv) +MSA_BINOP_IMM_DF(subvi, subv) +MSA_BINOP_IMM_DF(ceqi, ceq) +MSA_BINOP_IMM_DF(clei_s, cle_s) +MSA_BINOP_IMM_DF(clei_u, cle_u) +MSA_BINOP_IMM_DF(clti_s, clt_s) +MSA_BINOP_IMM_DF(clti_u, clt_u) +MSA_BINOP_IMM_DF(maxi_s, max_s) +MSA_BINOP_IMM_DF(maxi_u, max_u) +MSA_BINOP_IMM_DF(mini_s, min_s) +MSA_BINOP_IMM_DF(mini_u, min_u) +#undef MSA_BINOP_IMM_DF + +void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + int32_t s10) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwd->b[i] = (int8_t)s10; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwd->h[i] = (int16_t)s10; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwd->w[i] = (int32_t)s10; + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = (int64_t)s10; + } + break; + default: + assert(0); + } +} + +static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m) +{ + return arg < M_MIN_INT(m + 1) ? M_MIN_INT(m + 1) : + arg > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) : + arg; +} + +static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m) +{ + uint64_t u_arg = UNSIGNED(arg, df); + return u_arg < M_MAX_UINT(m + 1) ? u_arg : + M_MAX_UINT(m + 1); +} + +#define MSA_BINOP_IMMU_DF(helper, func) \ +void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ + uint32_t ws, uint32_t u5) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_BINOP_IMMU_DF(slli, sll) +MSA_BINOP_IMMU_DF(srai, sra) +MSA_BINOP_IMMU_DF(srli, srl) +MSA_BINOP_IMMU_DF(bclri, bclr) +MSA_BINOP_IMMU_DF(bseti, bset) +MSA_BINOP_IMMU_DF(bnegi, bneg) +MSA_BINOP_IMMU_DF(sat_s, sat_s) +MSA_BINOP_IMMU_DF(sat_u, sat_u) +MSA_BINOP_IMMU_DF(srari, srar) +MSA_BINOP_IMMU_DF(srlri, srlr) +#undef MSA_BINOP_IMMU_DF + +#define MSA_TEROP_IMMU_DF(helper, func) \ +void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ + uint32_t wd, uint32_t ws, uint32_t u5) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \ + u5); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \ + u5); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \ + u5); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \ + u5); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_TEROP_IMMU_DF(binsli, binsl) +MSA_TEROP_IMMU_DF(binsri, binsr) +#undef MSA_TEROP_IMMU_DF + +static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t max_int = DF_MAX_INT(df); + int64_t min_int = DF_MIN_INT(df); + if (arg2 > 0) { + return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int; + } else { + return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int; + } +} + +static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0; +} + +static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t max_uint = DF_MAX_UINT(df); + if (arg2 >= 0) { + uint64_t u_arg2 = (uint64_t)arg2; + return (u_arg1 > u_arg2) ? + (int64_t)(u_arg1 - u_arg2) : + 0; + } else { + uint64_t u_arg2 = (uint64_t)(-arg2); + return (u_arg1 < max_uint - u_arg2) ? + (int64_t)(u_arg1 + u_arg2) : + (int64_t)max_uint; + } +} + +static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + int64_t max_int = DF_MAX_INT(df); + int64_t min_int = DF_MIN_INT(df); + if (u_arg1 > u_arg2) { + return u_arg1 - u_arg2 < (uint64_t)max_int ? + (int64_t)(u_arg1 - u_arg2) : + max_int; + } else { + return u_arg2 - u_arg1 < (uint64_t)(-min_int) ? + (int64_t)(u_arg1 - u_arg2) : + min_int; + } +} + +static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 * arg2; +} + +#define SIGNED_EXTRACT(e, o, a, df) \ + do { \ + e = SIGNED_EVEN(a, df); \ + o = SIGNED_ODD(a, df); \ + } while (0) + +#define UNSIGNED_EXTRACT(e, o, a, df) \ + do { \ + e = UNSIGNED_EVEN(a, df); \ + o = UNSIGNED_ODD(a, df); \ + } while (0) + +static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +#define CONCATENATE_AND_SLIDE(s, k) \ + do { \ + for (i = 0; i < s; i++) { \ + v[i] = pws->b[s * k + i]; \ + v[i + s] = pwd->b[s * k + i]; \ + } \ + for (i = 0; i < s; i++) { \ + pwd->b[s * k + i] = v[i + n]; \ + } \ + } while (0) + +static inline void msa_sld_df(uint32_t df, wr_t *pwd, + wr_t *pws, target_ulong rt) +{ + uint32_t n = rt % DF_ELEMENTS(df); + uint8_t v[64]; + uint32_t i, k; + + switch (df) { + case DF_BYTE: + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0); + break; + case DF_HALF: + for (k = 0; k < 2; k++) { + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k); + } + break; + case DF_WORD: + for (k = 0; k < 4; k++) { + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k); + } + break; + case DF_DOUBLE: + for (k = 0; k < 8; k++) { + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k); + } + break; + default: + assert(0); + } +} + +static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t q_min = DF_MIN_INT(df); + int64_t q_max = DF_MAX_INT(df); + + if (arg1 == q_min && arg2 == q_min) { + return q_max; + } + return (arg1 * arg2) >> (DF_BITS(df) - 1); +} + +static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t q_min = DF_MIN_INT(df); + int64_t q_max = DF_MAX_INT(df); + int64_t r_bit = 1ULL << (DF_BITS(df) - 2); + + if (arg1 == q_min && arg2 == q_min) { + return q_max; + } + return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1); +} + +#define MSA_BINOP_DF(func) \ +void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \ + uint32_t wd, uint32_t ws, uint32_t wt) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ + \ + switch (df) { \ + case DF_BYTE: \ + pwd->b[0] = msa_ ## func ## _df(df, pws->b[0], pwt->b[0]); \ + pwd->b[1] = msa_ ## func ## _df(df, pws->b[1], pwt->b[1]); \ + pwd->b[2] = msa_ ## func ## _df(df, pws->b[2], pwt->b[2]); \ + pwd->b[3] = msa_ ## func ## _df(df, pws->b[3], pwt->b[3]); \ + pwd->b[4] = msa_ ## func ## _df(df, pws->b[4], pwt->b[4]); \ + pwd->b[5] = msa_ ## func ## _df(df, pws->b[5], pwt->b[5]); \ + pwd->b[6] = msa_ ## func ## _df(df, pws->b[6], pwt->b[6]); \ + pwd->b[7] = msa_ ## func ## _df(df, pws->b[7], pwt->b[7]); \ + pwd->b[8] = msa_ ## func ## _df(df, pws->b[8], pwt->b[8]); \ + pwd->b[9] = msa_ ## func ## _df(df, pws->b[9], pwt->b[9]); \ + pwd->b[10] = msa_ ## func ## _df(df, pws->b[10], pwt->b[10]); \ + pwd->b[11] = msa_ ## func ## _df(df, pws->b[11], pwt->b[11]); \ + pwd->b[12] = msa_ ## func ## _df(df, pws->b[12], pwt->b[12]); \ + pwd->b[13] = msa_ ## func ## _df(df, pws->b[13], pwt->b[13]); \ + pwd->b[14] = msa_ ## func ## _df(df, pws->b[14], pwt->b[14]); \ + pwd->b[15] = msa_ ## func ## _df(df, pws->b[15], pwt->b[15]); \ + break; \ + case DF_HALF: \ + pwd->h[0] = msa_ ## func ## _df(df, pws->h[0], pwt->h[0]); \ + pwd->h[1] = msa_ ## func ## _df(df, pws->h[1], pwt->h[1]); \ + pwd->h[2] = msa_ ## func ## _df(df, pws->h[2], pwt->h[2]); \ + pwd->h[3] = msa_ ## func ## _df(df, pws->h[3], pwt->h[3]); \ + pwd->h[4] = msa_ ## func ## _df(df, pws->h[4], pwt->h[4]); \ + pwd->h[5] = msa_ ## func ## _df(df, pws->h[5], pwt->h[5]); \ + pwd->h[6] = msa_ ## func ## _df(df, pws->h[6], pwt->h[6]); \ + pwd->h[7] = msa_ ## func ## _df(df, pws->h[7], pwt->h[7]); \ + break; \ + case DF_WORD: \ + pwd->w[0] = msa_ ## func ## _df(df, pws->w[0], pwt->w[0]); \ + pwd->w[1] = msa_ ## func ## _df(df, pws->w[1], pwt->w[1]); \ + pwd->w[2] = msa_ ## func ## _df(df, pws->w[2], pwt->w[2]); \ + pwd->w[3] = msa_ ## func ## _df(df, pws->w[3], pwt->w[3]); \ + break; \ + case DF_DOUBLE: \ + pwd->d[0] = msa_ ## func ## _df(df, pws->d[0], pwt->d[0]); \ + pwd->d[1] = msa_ ## func ## _df(df, pws->d[1], pwt->d[1]); \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_BINOP_DF(subv) +MSA_BINOP_DF(subs_s) +MSA_BINOP_DF(subs_u) +MSA_BINOP_DF(subsus_u) +MSA_BINOP_DF(subsuu_s) +MSA_BINOP_DF(mulv) +MSA_BINOP_DF(dotp_s) +MSA_BINOP_DF(dotp_u) + +MSA_BINOP_DF(mul_q) +MSA_BINOP_DF(mulr_q) +#undef MSA_BINOP_DF + +void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t rt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]); +} + +static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + return dest + arg1 * arg2; +} + +static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + return dest - arg1 * arg2; +} + +static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); +} + +static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); +} + +static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + int64_t r_bit = 1ULL << (DF_BITS(df) - 2); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + int64_t r_bit = 1ULL << (DF_BITS(df) - 2); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +#define MSA_TEROP_DF(func) \ +void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ + uint32_t ws, uint32_t wt) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ + \ + switch (df) { \ + case DF_BYTE: \ + pwd->b[0] = msa_ ## func ## _df(df, pwd->b[0], pws->b[0], \ + pwt->b[0]); \ + pwd->b[1] = msa_ ## func ## _df(df, pwd->b[1], pws->b[1], \ + pwt->b[1]); \ + pwd->b[2] = msa_ ## func ## _df(df, pwd->b[2], pws->b[2], \ + pwt->b[2]); \ + pwd->b[3] = msa_ ## func ## _df(df, pwd->b[3], pws->b[3], \ + pwt->b[3]); \ + pwd->b[4] = msa_ ## func ## _df(df, pwd->b[4], pws->b[4], \ + pwt->b[4]); \ + pwd->b[5] = msa_ ## func ## _df(df, pwd->b[5], pws->b[5], \ + pwt->b[5]); \ + pwd->b[6] = msa_ ## func ## _df(df, pwd->b[6], pws->b[6], \ + pwt->b[6]); \ + pwd->b[7] = msa_ ## func ## _df(df, pwd->b[7], pws->b[7], \ + pwt->b[7]); \ + pwd->b[8] = msa_ ## func ## _df(df, pwd->b[8], pws->b[8], \ + pwt->b[8]); \ + pwd->b[9] = msa_ ## func ## _df(df, pwd->b[9], pws->b[9], \ + pwt->b[9]); \ + pwd->b[10] = msa_ ## func ## _df(df, pwd->b[10], pws->b[10], \ + pwt->b[10]); \ + pwd->b[11] = msa_ ## func ## _df(df, pwd->b[11], pws->b[11], \ + pwt->b[11]); \ + pwd->b[12] = msa_ ## func ## _df(df, pwd->b[12], pws->b[12], \ + pwt->b[12]); \ + pwd->b[13] = msa_ ## func ## _df(df, pwd->b[13], pws->b[13], \ + pwt->b[13]); \ + pwd->b[14] = msa_ ## func ## _df(df, pwd->b[14], pws->b[14], \ + pwt->b[14]); \ + pwd->b[15] = msa_ ## func ## _df(df, pwd->b[15], pws->b[15], \ + pwt->b[15]); \ + break; \ + case DF_HALF: \ + pwd->h[0] = msa_ ## func ## _df(df, pwd->h[0], pws->h[0], pwt->h[0]); \ + pwd->h[1] = msa_ ## func ## _df(df, pwd->h[1], pws->h[1], pwt->h[1]); \ + pwd->h[2] = msa_ ## func ## _df(df, pwd->h[2], pws->h[2], pwt->h[2]); \ + pwd->h[3] = msa_ ## func ## _df(df, pwd->h[3], pws->h[3], pwt->h[3]); \ + pwd->h[4] = msa_ ## func ## _df(df, pwd->h[4], pws->h[4], pwt->h[4]); \ + pwd->h[5] = msa_ ## func ## _df(df, pwd->h[5], pws->h[5], pwt->h[5]); \ + pwd->h[6] = msa_ ## func ## _df(df, pwd->h[6], pws->h[6], pwt->h[6]); \ + pwd->h[7] = msa_ ## func ## _df(df, pwd->h[7], pws->h[7], pwt->h[7]); \ + break; \ + case DF_WORD: \ + pwd->w[0] = msa_ ## func ## _df(df, pwd->w[0], pws->w[0], pwt->w[0]); \ + pwd->w[1] = msa_ ## func ## _df(df, pwd->w[1], pws->w[1], pwt->w[1]); \ + pwd->w[2] = msa_ ## func ## _df(df, pwd->w[2], pws->w[2], pwt->w[2]); \ + pwd->w[3] = msa_ ## func ## _df(df, pwd->w[3], pws->w[3], pwt->w[3]); \ + break; \ + case DF_DOUBLE: \ + pwd->d[0] = msa_ ## func ## _df(df, pwd->d[0], pws->d[0], pwt->d[0]); \ + pwd->d[1] = msa_ ## func ## _df(df, pwd->d[1], pws->d[1], pwt->d[1]); \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_TEROP_DF(maddv) +MSA_TEROP_DF(msubv) +MSA_TEROP_DF(dpadd_s) +MSA_TEROP_DF(dpadd_u) +MSA_TEROP_DF(dpsub_s) +MSA_TEROP_DF(dpsub_u) +MSA_TEROP_DF(binsl) +MSA_TEROP_DF(binsr) +MSA_TEROP_DF(madd_q) +MSA_TEROP_DF(msub_q) +MSA_TEROP_DF(maddr_q) +MSA_TEROP_DF(msubr_q) +#undef MSA_TEROP_DF + +static inline void msa_splat_df(uint32_t df, wr_t *pwd, + wr_t *pws, target_ulong rt) +{ + uint32_t n = rt % DF_ELEMENTS(df); + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwd->b[i] = pws->b[n]; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwd->h[i] = pws->h[n]; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwd->w[i] = pws->w[n]; + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = pws->d[n]; + } + break; + default: + assert(0); + } +} + +void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t rt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]); +} + +#define MSA_DO_B MSA_DO(b) +#define MSA_DO_H MSA_DO(h) +#define MSA_DO_W MSA_DO(w) +#define MSA_DO_D MSA_DO(d) + +#define MSA_LOOP_B MSA_LOOP(B) +#define MSA_LOOP_H MSA_LOOP(H) +#define MSA_LOOP_W MSA_LOOP(W) +#define MSA_LOOP_D MSA_LOOP(D) + +#define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE) +#define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF) +#define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD) +#define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE) + +#define MSA_LOOP(DF) \ + do { \ + for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \ + MSA_DO_ ## DF; \ + } \ + } while (0) + +#define MSA_FN_DF(FUNC) \ +void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ + uint32_t ws, uint32_t wt) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ + wr_t wx, *pwx = &wx; \ + uint32_t i; \ + switch (df) { \ + case DF_BYTE: \ + MSA_LOOP_B; \ + break; \ + case DF_HALF: \ + MSA_LOOP_H; \ + break; \ + case DF_WORD: \ + MSA_LOOP_W; \ + break; \ + case DF_DOUBLE: \ + MSA_LOOP_D; \ + break; \ + default: \ + assert(0); \ + } \ + msa_move_v(pwd, pwx); \ +} + +#define MSA_LOOP_COND(DF) \ + (DF_ELEMENTS(DF) / 2) + +#define Rb(pwr, i) (pwr->b[i]) +#define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE) / 2]) +#define Rh(pwr, i) (pwr->h[i]) +#define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF) / 2]) +#define Rw(pwr, i) (pwr->w[i]) +#define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD) / 2]) +#define Rd(pwr, i) (pwr->d[i]) +#define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE) / 2]) + +#undef MSA_LOOP_COND + +#define MSA_LOOP_COND(DF) \ + (DF_ELEMENTS(DF)) + +#define MSA_DO(DF) \ + do { \ + uint32_t n = DF_ELEMENTS(df); \ + uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \ + pwx->DF[i] = \ + (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \ + } while (0) +MSA_FN_DF(vshf_df) +#undef MSA_DO +#undef MSA_LOOP_COND +#undef MSA_FN_DF + + +void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_sld_df(df, pwd, pws, n); +} + +void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_splat_df(df, pwd, pws, n); +} + +void helper_msa_copy_s_b(CPUMIPSState *env, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= 16; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 8) { + n = 8 - n - 1; + } else { + n = 24 - n - 1; + } +#endif + env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n]; +} + +void helper_msa_copy_s_h(CPUMIPSState *env, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= 8; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 4) { + n = 4 - n - 1; + } else { + n = 12 - n - 1; + } +#endif + env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n]; +} + +void helper_msa_copy_s_w(CPUMIPSState *env, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= 4; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 2) { + n = 2 - n - 1; + } else { + n = 6 - n - 1; + } +#endif + env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n]; +} + +void helper_msa_copy_s_d(CPUMIPSState *env, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= 2; + env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n]; +} + +void helper_msa_copy_u_b(CPUMIPSState *env, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= 16; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 8) { + n = 8 - n - 1; + } else { + n = 24 - n - 1; + } +#endif + env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n]; +} + +void helper_msa_copy_u_h(CPUMIPSState *env, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= 8; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 4) { + n = 4 - n - 1; + } else { + n = 12 - n - 1; + } +#endif + env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n]; +} + +void helper_msa_copy_u_w(CPUMIPSState *env, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= 4; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 2) { + n = 2 - n - 1; + } else { + n = 6 - n - 1; + } +#endif + env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n]; +} + +void helper_msa_insert_b(CPUMIPSState *env, uint32_t wd, + uint32_t rs_num, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + target_ulong rs = env->active_tc.gpr[rs_num]; + n %= 16; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 8) { + n = 8 - n - 1; + } else { + n = 24 - n - 1; + } +#endif + pwd->b[n] = (int8_t)rs; +} + +void helper_msa_insert_h(CPUMIPSState *env, uint32_t wd, + uint32_t rs_num, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + target_ulong rs = env->active_tc.gpr[rs_num]; + n %= 8; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 4) { + n = 4 - n - 1; + } else { + n = 12 - n - 1; + } +#endif + pwd->h[n] = (int16_t)rs; +} + +void helper_msa_insert_w(CPUMIPSState *env, uint32_t wd, + uint32_t rs_num, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + target_ulong rs = env->active_tc.gpr[rs_num]; + n %= 4; +#if defined(HOST_WORDS_BIGENDIAN) + if (n < 2) { + n = 2 - n - 1; + } else { + n = 6 - n - 1; + } +#endif + pwd->w[n] = (int32_t)rs; +} + +void helper_msa_insert_d(CPUMIPSState *env, uint32_t wd, + uint32_t rs_num, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + target_ulong rs = env->active_tc.gpr[rs_num]; + n %= 2; + pwd->d[n] = (int64_t)rs; +} + +void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + switch (df) { + case DF_BYTE: + pwd->b[n] = (int8_t)pws->b[0]; + break; + case DF_HALF: + pwd->h[n] = (int16_t)pws->h[0]; + break; + case DF_WORD: + pwd->w[n] = (int32_t)pws->w[0]; + break; + case DF_DOUBLE: + pwd->d[n] = (int64_t)pws->d[0]; + break; + default: + assert(0); + } +} + +void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd) +{ + switch (cd) { + case 0: + break; + case 1: + env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK; + restore_msa_fp_status(env); + /* check exception */ + if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED) + & GET_FP_CAUSE(env->active_tc.msacsr)) { + do_raise_exception(env, EXCP_MSAFPE, GETPC()); + } + break; + } +} + +target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs) +{ + switch (cs) { + case 0: + return env->msair; + case 1: + return env->active_tc.msacsr & MSACSR_MASK; + } + return 0; +} + +void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t rs) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwd->b[i] = (int8_t)env->active_tc.gpr[rs]; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwd->h[i] = (int16_t)env->active_tc.gpr[rs]; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwd->w[i] = (int32_t)env->active_tc.gpr[rs]; + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = (int64_t)env->active_tc.gpr[rs]; + } + break; + default: + assert(0); + } +} + + +#define FLOAT_ONE32 make_float32(0x3f8 << 20) +#define FLOAT_ONE64 make_float64(0x3ffULL << 52) + +#define FLOAT_SNAN16(s) (float16_default_nan(s) ^ 0x0220) + /* 0x7c20 */ +#define FLOAT_SNAN32(s) (float32_default_nan(s) ^ 0x00400020) + /* 0x7f800020 */ +#define FLOAT_SNAN64(s) (float64_default_nan(s) ^ 0x0008000000000020ULL) + /* 0x7ff0000000000020 */ + +static inline void clear_msacsr_cause(CPUMIPSState *env) +{ + SET_FP_CAUSE(env->active_tc.msacsr, 0); +} + +static inline void check_msacsr_cause(CPUMIPSState *env, uintptr_t retaddr) +{ + if ((GET_FP_CAUSE(env->active_tc.msacsr) & + (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) { + UPDATE_FP_FLAGS(env->active_tc.msacsr, + GET_FP_CAUSE(env->active_tc.msacsr)); + } else { + do_raise_exception(env, EXCP_MSAFPE, retaddr); + } +} + +/* Flush-to-zero use cases for update_msacsr() */ +#define CLEAR_FS_UNDERFLOW 1 +#define CLEAR_IS_INEXACT 2 +#define RECIPROCAL_INEXACT 4 + +static inline int update_msacsr(CPUMIPSState *env, int action, int denormal) +{ + int ieee_ex; + + int c; + int cause; + int enable; + + ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status); + + /* QEMU softfloat does not signal all underflow cases */ + if (denormal) { + ieee_ex |= float_flag_underflow; + } + + c = ieee_ex_to_mips(ieee_ex); + enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; + + /* Set Inexact (I) when flushing inputs to zero */ + if ((ieee_ex & float_flag_input_denormal) && + (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { + if (action & CLEAR_IS_INEXACT) { + c &= ~FP_INEXACT; + } else { + c |= FP_INEXACT; + } + } + + /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */ + if ((ieee_ex & float_flag_output_denormal) && + (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { + c |= FP_INEXACT; + if (action & CLEAR_FS_UNDERFLOW) { + c &= ~FP_UNDERFLOW; + } else { + c |= FP_UNDERFLOW; + } + } + + /* Set Inexact (I) when Overflow (O) is not enabled */ + if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) { + c |= FP_INEXACT; + } + + /* Clear Exact Underflow when Underflow (U) is not enabled */ + if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 && + (c & FP_INEXACT) == 0) { + c &= ~FP_UNDERFLOW; + } + + /* + * Reciprocal operations set only Inexact when valid and not + * divide by zero + */ + if ((action & RECIPROCAL_INEXACT) && + (c & (FP_INVALID | FP_DIV0)) == 0) { + c = FP_INEXACT; + } + + cause = c & enable; /* all current enabled exceptions */ + + if (cause == 0) { + /* + * No enabled exception, update the MSACSR Cause + * with all current exceptions + */ + SET_FP_CAUSE(env->active_tc.msacsr, + (GET_FP_CAUSE(env->active_tc.msacsr) | c)); + } else { + /* Current exceptions are enabled */ + if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) { + /* + * Exception(s) will trap, update MSACSR Cause + * with all enabled exceptions + */ + SET_FP_CAUSE(env->active_tc.msacsr, + (GET_FP_CAUSE(env->active_tc.msacsr) | c)); + } + } + + return c; +} + +static inline int get_enabled_exceptions(const CPUMIPSState *env, int c) +{ + int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; + return c & enable; +} + +static inline float16 float16_from_float32(int32_t a, flag ieee, + float_status *status) +{ + float16 f_val; + + f_val = float32_to_float16((float32)a, ieee, status); + + return a < 0 ? (f_val | (1 << 15)) : f_val; +} + +static inline float32 float32_from_float64(int64_t a, float_status *status) +{ + float32 f_val; + + f_val = float64_to_float32((float64)a, status); + + return a < 0 ? (f_val | (1 << 31)) : f_val; +} + +static inline float32 float32_from_float16(int16_t a, flag ieee, + float_status *status) +{ + float32 f_val; + + f_val = float16_to_float32((float16)a, ieee, status); + + return a < 0 ? (f_val | (1 << 31)) : f_val; +} + +static inline float64 float64_from_float32(int32_t a, float_status *status) +{ + float64 f_val; + + f_val = float32_to_float64((float64)a, status); + + return a < 0 ? (f_val | (1ULL << 63)) : f_val; +} + +static inline float32 float32_from_q16(int16_t a, float_status *status) +{ + float32 f_val; + + /* conversion as integer and scaling */ + f_val = int32_to_float32(a, status); + f_val = float32_scalbn(f_val, -15, status); + + return f_val; +} + +static inline float64 float64_from_q32(int32_t a, float_status *status) +{ + float64 f_val; + + /* conversion as integer and scaling */ + f_val = int32_to_float64(a, status); + f_val = float64_scalbn(f_val, -31, status); + + return f_val; +} + +static inline int16_t float32_to_q16(float32 a, float_status *status) +{ + int32_t q_val; + int32_t q_min = 0xffff8000; + int32_t q_max = 0x00007fff; + + int ieee_ex; + + if (float32_is_any_nan(a)) { + float_raise(float_flag_invalid, status); + return 0; + } + + /* scaling */ + a = float32_scalbn(a, 15, status); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + , status); + + if (ieee_ex & float_flag_overflow) { + float_raise(float_flag_inexact, status); + return (int32_t)a < 0 ? q_min : q_max; + } + + /* conversion to int */ + q_val = float32_to_int32(a, status); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + , status); + + if (ieee_ex & float_flag_invalid) { + set_float_exception_flags(ieee_ex & (~float_flag_invalid) + , status); + float_raise(float_flag_overflow | float_flag_inexact, status); + return (int32_t)a < 0 ? q_min : q_max; + } + + if (q_val < q_min) { + float_raise(float_flag_overflow | float_flag_inexact, status); + return (int16_t)q_min; + } + + if (q_max < q_val) { + float_raise(float_flag_overflow | float_flag_inexact, status); + return (int16_t)q_max; + } + + return (int16_t)q_val; +} + +static inline int32_t float64_to_q32(float64 a, float_status *status) +{ + int64_t q_val; + int64_t q_min = 0xffffffff80000000LL; + int64_t q_max = 0x000000007fffffffLL; + + int ieee_ex; + + if (float64_is_any_nan(a)) { + float_raise(float_flag_invalid, status); + return 0; + } + + /* scaling */ + a = float64_scalbn(a, 31, status); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + , status); + + if (ieee_ex & float_flag_overflow) { + float_raise(float_flag_inexact, status); + return (int64_t)a < 0 ? q_min : q_max; + } + + /* conversion to integer */ + q_val = float64_to_int64(a, status); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + , status); + + if (ieee_ex & float_flag_invalid) { + set_float_exception_flags(ieee_ex & (~float_flag_invalid) + , status); + float_raise(float_flag_overflow | float_flag_inexact, status); + return (int64_t)a < 0 ? q_min : q_max; + } + + if (q_val < q_min) { + float_raise(float_flag_overflow | float_flag_inexact, status); + return (int32_t)q_min; + } + + if (q_max < q_val) { + float_raise(float_flag_overflow | float_flag_inexact, status); + return (int32_t)q_max; + } + + return (int32_t)q_val; +} + +#define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + int64_t cond; \ + set_float_exception_flags(0, status); \ + if (!QUIET) { \ + cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \ + } else { \ + cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \ + } \ + DEST = cond ? M_MAX_UINT(BITS) : 0; \ + c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +#define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ + if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \ + DEST = 0; \ + } \ + } while (0) + +#define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ + } \ + } \ + } while (0) + +#define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \ + } \ + } while (0) + +static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32, + quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64, + quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet, + uintptr_t retaddr) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, retaddr); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_af(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_un(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_eq(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ueq(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_lt(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ult(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_le(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ule(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_af(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_un(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_eq(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ueq(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_lt(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ult(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_le(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ule(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_or(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_une(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ne(env, pwd, pws, pwt, df, 1, GETPC()); +} + +void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_or(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_une(env, pwd, pws, pwt, df, 0, GETPC()); +} + +void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ne(env, pwd, pws, pwt, df, 0, GETPC()); +} + +#define float16_is_zero(ARG) 0 +#define float16_is_zero_or_denormal(ARG) 0 + +#define IS_DENORMAL(ARG, BITS) \ + (!float ## BITS ## _is_zero(ARG) \ + && float ## BITS ## _is_zero_or_denormal(ARG)) + +#define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + msa_move_v(pwd, pwx); +} + +void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + msa_move_v(pwd, pwx); +} + +void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], + pws->w[i], pwt->w[i], 0, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], + pws->d[i], pwt->d[i], 0, 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], + pws->w[i], pwt->w[i], + float_muladd_negate_product, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], + pws->d[i], pwt->d[i], + float_muladd_negate_product, 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i], + pwt->w[i] > 0x200 ? 0x200 : + pwt->w[i] < -0x200 ? -0x200 : pwt->w[i], + 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i], + pwt->d[i] > 0x1000 ? 0x1000 : + pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i], + 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + DEST = float ## BITS ## _ ## OP(ARG, status); \ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + /* + * Half precision floats come in two formats: standard + * IEEE and "ARM" format. The latter gains extra exponent + * range by omitting the NaN/Inf encodings. + */ + flag ieee = 1; + + MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16); + MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32); + MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + DEST = float ## BITS ## _ ## OP(ARG, status); \ + c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## XBITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16); + MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32); + MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS, STATUS) \ + !float ## BITS ## _is_any_nan(ARG1) \ + && float ## BITS ## _is_quiet_nan(ARG2, STATUS) + +#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \ + c = update_msacsr(env, 0, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +#define FMAXMIN_A(F, G, X, _S, _T, BITS, STATUS) \ + do { \ + uint## BITS ##_t S = _S, T = _T; \ + uint## BITS ##_t as, at, xs, xt, xd; \ + if (NUMBER_QNAN_PAIR(S, T, BITS, STATUS)) { \ + T = S; \ + } \ + else if (NUMBER_QNAN_PAIR(T, S, BITS, STATUS)) { \ + S = T; \ + } \ + as = float## BITS ##_abs(S); \ + at = float## BITS ##_abs(T); \ + MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \ + MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \ + MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \ + X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \ + } while (0) + +void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + float_status *status = &env->active_tc.msa_fp_status; + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + clear_msacsr_cause(env); + + if (df == DF_WORD) { + + if (NUMBER_QNAN_PAIR(pws->w[0], pwt->w[0], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[0], min, pws->w[0], pws->w[0], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[0], pws->w[0], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[0], min, pwt->w[0], pwt->w[0], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[0], min, pws->w[0], pwt->w[0], 32); + } + + if (NUMBER_QNAN_PAIR(pws->w[1], pwt->w[1], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[1], min, pws->w[1], pws->w[1], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[1], pws->w[1], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[1], min, pwt->w[1], pwt->w[1], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[1], min, pws->w[1], pwt->w[1], 32); + } + + if (NUMBER_QNAN_PAIR(pws->w[2], pwt->w[2], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[2], min, pws->w[2], pws->w[2], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[2], pws->w[2], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[2], min, pwt->w[2], pwt->w[2], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[2], min, pws->w[2], pwt->w[2], 32); + } + + if (NUMBER_QNAN_PAIR(pws->w[3], pwt->w[3], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[3], min, pws->w[3], pws->w[3], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[3], pws->w[3], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[3], min, pwt->w[3], pwt->w[3], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[3], min, pws->w[3], pwt->w[3], 32); + } + + } else if (df == DF_DOUBLE) { + + if (NUMBER_QNAN_PAIR(pws->d[0], pwt->d[0], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[0], min, pws->d[0], pws->d[0], 64); + } else if (NUMBER_QNAN_PAIR(pwt->d[0], pws->d[0], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[0], min, pwt->d[0], pwt->d[0], 64); + } else { + MSA_FLOAT_MAXOP(pwx->d[0], min, pws->d[0], pwt->d[0], 64); + } + + if (NUMBER_QNAN_PAIR(pws->d[1], pwt->d[1], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[1], min, pws->d[1], pws->d[1], 64); + } else if (NUMBER_QNAN_PAIR(pwt->d[1], pws->d[1], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[1], min, pwt->d[1], pwt->d[1], 64); + } else { + MSA_FLOAT_MAXOP(pwx->d[1], min, pws->d[1], pwt->d[1], 64); + } + + } else { + + assert(0); + + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + float_status *status = &env->active_tc.msa_fp_status; + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + clear_msacsr_cause(env); + + if (df == DF_WORD) { + FMAXMIN_A(min, max, pwx->w[0], pws->w[0], pwt->w[0], 32, status); + FMAXMIN_A(min, max, pwx->w[1], pws->w[1], pwt->w[1], 32, status); + FMAXMIN_A(min, max, pwx->w[2], pws->w[2], pwt->w[2], 32, status); + FMAXMIN_A(min, max, pwx->w[3], pws->w[3], pwt->w[3], 32, status); + } else if (df == DF_DOUBLE) { + FMAXMIN_A(min, max, pwx->d[0], pws->d[0], pwt->d[0], 64, status); + FMAXMIN_A(min, max, pwx->d[1], pws->d[1], pwt->d[1], 64, status); + } else { + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + float_status *status = &env->active_tc.msa_fp_status; + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + clear_msacsr_cause(env); + + if (df == DF_WORD) { + + if (NUMBER_QNAN_PAIR(pws->w[0], pwt->w[0], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[0], max, pws->w[0], pws->w[0], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[0], pws->w[0], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[0], max, pwt->w[0], pwt->w[0], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[0], max, pws->w[0], pwt->w[0], 32); + } + + if (NUMBER_QNAN_PAIR(pws->w[1], pwt->w[1], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[1], max, pws->w[1], pws->w[1], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[1], pws->w[1], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[1], max, pwt->w[1], pwt->w[1], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[1], max, pws->w[1], pwt->w[1], 32); + } + + if (NUMBER_QNAN_PAIR(pws->w[2], pwt->w[2], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[2], max, pws->w[2], pws->w[2], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[2], pws->w[2], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[2], max, pwt->w[2], pwt->w[2], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[2], max, pws->w[2], pwt->w[2], 32); + } + + if (NUMBER_QNAN_PAIR(pws->w[3], pwt->w[3], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[3], max, pws->w[3], pws->w[3], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[3], pws->w[3], 32, status)) { + MSA_FLOAT_MAXOP(pwx->w[3], max, pwt->w[3], pwt->w[3], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[3], max, pws->w[3], pwt->w[3], 32); + } + + } else if (df == DF_DOUBLE) { + + if (NUMBER_QNAN_PAIR(pws->d[0], pwt->d[0], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[0], max, pws->d[0], pws->d[0], 64); + } else if (NUMBER_QNAN_PAIR(pwt->d[0], pws->d[0], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[0], max, pwt->d[0], pwt->d[0], 64); + } else { + MSA_FLOAT_MAXOP(pwx->d[0], max, pws->d[0], pwt->d[0], 64); + } + + if (NUMBER_QNAN_PAIR(pws->d[1], pwt->d[1], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[1], max, pws->d[1], pws->d[1], 64); + } else if (NUMBER_QNAN_PAIR(pwt->d[1], pws->d[1], 64, status)) { + MSA_FLOAT_MAXOP(pwx->d[1], max, pwt->d[1], pwt->d[1], 64); + } else { + MSA_FLOAT_MAXOP(pwx->d[1], max, pws->d[1], pwt->d[1], 64); + } + + } else { + + assert(0); + + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + float_status *status = &env->active_tc.msa_fp_status; + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + + clear_msacsr_cause(env); + + if (df == DF_WORD) { + FMAXMIN_A(max, min, pwx->w[0], pws->w[0], pwt->w[0], 32, status); + FMAXMIN_A(max, min, pwx->w[1], pws->w[1], pwt->w[1], 32, status); + FMAXMIN_A(max, min, pwx->w[2], pws->w[2], pwt->w[2], 32, status); + FMAXMIN_A(max, min, pwx->w[3], pws->w[3], pwt->w[3], 32, status); + } else if (df == DF_DOUBLE) { + FMAXMIN_A(max, min, pwx->d[0], pws->d[0], pwt->d[0], 64, status); + FMAXMIN_A(max, min, pwx->d[1], pws->d[1], pwt->d[1], 64, status); + } else { + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df, + uint32_t wd, uint32_t ws) +{ + float_status *status = &env->active_tc.msa_fp_status; + + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + if (df == DF_WORD) { + pwd->w[0] = float_class_s(pws->w[0], status); + pwd->w[1] = float_class_s(pws->w[1], status); + pwd->w[2] = float_class_s(pws->w[2], status); + pwd->w[3] = float_class_s(pws->w[3], status); + } else if (df == DF_DOUBLE) { + pwd->d[0] = float_class_d(pws->d[0], status); + pwd->d[1] = float_class_d(pws->d[1], status); + } else { + assert(0); + } +} + +#define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + DEST = float ## BITS ## _ ## OP(ARG, status); \ + c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } else if (float ## BITS ## _is_any_nan(ARG)) { \ + DEST = 0; \ + } \ + } while (0) + +void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \ + c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \ + float ## BITS ## _is_quiet_nan(DEST, status) ? \ + 0 : RECIPROCAL_INEXACT, \ + IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i], + &env->active_tc.msa_fp_status), 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i], + &env->active_tc.msa_fp_status), 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_LOGB(DEST, ARG, BITS) \ + do { \ + float_status *status = &env->active_tc.msa_fp_status; \ + int c; \ + \ + set_float_exception_flags(0, status); \ + set_float_rounding_mode(float_round_down, status); \ + DEST = float ## BITS ## _ ## log2(ARG, status); \ + DEST = float ## BITS ## _ ## round_to_int(DEST, status); \ + set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \ + MSACSR_RM_MASK) >> MSACSR_RM], \ + status); \ + \ + set_float_exception_flags(get_float_exception_flags(status) & \ + (~float_flag_inexact), \ + status); \ + \ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + /* + * Half precision floats come in two formats: standard + * IEEE and "ARM" format. The latter gains extra exponent + * range by omitting the NaN/Inf encodings. + */ + flag ieee = 1; + + MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + msa_move_v(pwd, pwx); +} + +void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + /* + * Half precision floats come in two formats: standard + * IEEE and "ARM" format. The latter gains extra exponent + * range by omitting the NaN/Inf encodings. + */ + flag ieee = 1; + + MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + msa_move_v(pwd, pwx); +} + +void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64); + } + break; + default: + assert(0); + } + + msa_move_v(pwd, pwx); +} + +void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64); + } + break; + default: + assert(0); + } + + msa_move_v(pwd, pwx); +} + +void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +#define float32_from_int32 int32_to_float32 +#define float32_from_uint32 uint32_to_float32 + +#define float64_from_int64 int64_to_float64 +#define float64_from_uint64 uint64_to_float64 + +void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} + +void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env, GETPC()); + + msa_move_v(pwd, pwx); +} diff --git a/qemu/target/mips/op_helper.c b/qemu/target/mips/op_helper.c new file mode 100644 index 00000000..fea51a69 --- /dev/null +++ b/qemu/target/mips/op_helper.c @@ -0,0 +1,1364 @@ +/* + * MIPS emulation helpers for qemu. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/memop.h" + + +/*****************************************************************************/ +/* Exceptions processing helpers */ + +void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception, + int error_code) +{ + do_raise_exception_err(env, exception, error_code, 0); +} + +void helper_raise_exception(CPUMIPSState *env, uint32_t exception) +{ + do_raise_exception(env, exception, GETPC()); +} + +void helper_raise_exception_debug(CPUMIPSState *env) +{ + do_raise_exception(env, EXCP_DEBUG, 0); +} + +static void raise_exception(CPUMIPSState *env, uint32_t exception) +{ + do_raise_exception(env, exception, 0); +} + +/* 64 bits arithmetic for 32 bits hosts */ +static inline uint64_t get_HILO(CPUMIPSState *env) +{ + return ((uint64_t)(env->active_tc.HI[0]) << 32) | + (uint32_t)env->active_tc.LO[0]; +} + +static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) +{ + env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + return env->active_tc.HI[0] = (int32_t)(HILO >> 32); +} + +static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) +{ + target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + env->active_tc.HI[0] = (int32_t)(HILO >> 32); + return tmp; +} + +/* Multiplication variants of the vr54xx. */ +target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2)); +} + +target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) + + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) + + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) - + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) - + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +static inline target_ulong bitswap(target_ulong v) +{ + v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | + ((v & (target_ulong)0x5555555555555555ULL) << 1); + v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | + ((v & (target_ulong)0x3333333333333333ULL) << 2); + v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | + ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); + return v; +} + +#ifdef TARGET_MIPS64 +target_ulong helper_dbitswap(target_ulong rt) +{ + return bitswap(rt); +} +#endif + +target_ulong helper_bitswap(target_ulong rt) +{ + return (int32_t)bitswap(rt); +} + +target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx, + uint32_t stripe) +{ + int i; + uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff); + uint64_t tmp1 = tmp0; + for (i = 0; i <= 46; i++) { + int s; + if (i & 0x8) { + s = shift; + } else { + s = shiftx; + } + + if (stripe != 0 && !(i & 0x4)) { + s = ~s; + } + if (s & 0x10) { + if (tmp0 & (1LL << (i + 16))) { + tmp1 |= 1LL << i; + } else { + tmp1 &= ~(1LL << i); + } + } + } + + uint64_t tmp2 = tmp1; + for (i = 0; i <= 38; i++) { + int s; + if (i & 0x4) { + s = shift; + } else { + s = shiftx; + } + + if (s & 0x8) { + if (tmp1 & (1LL << (i + 8))) { + tmp2 |= 1LL << i; + } else { + tmp2 &= ~(1LL << i); + } + } + } + + uint64_t tmp3 = tmp2; + for (i = 0; i <= 34; i++) { + int s; + if (i & 0x2) { + s = shift; + } else { + s = shiftx; + } + if (s & 0x4) { + if (tmp2 & (1LL << (i + 4))) { + tmp3 |= 1LL << i; + } else { + tmp3 &= ~(1LL << i); + } + } + } + + uint64_t tmp4 = tmp3; + for (i = 0; i <= 32; i++) { + int s; + if (i & 0x1) { + s = shift; + } else { + s = shiftx; + } + if (s & 0x2) { + if (tmp3 & (1LL << (i + 2))) { + tmp4 |= 1LL << i; + } else { + tmp4 &= ~(1LL << i); + } + } + } + + uint64_t tmp5 = tmp4; + for (i = 0; i <= 31; i++) { + int s; + s = shift; + if (s & 0x1) { + if (tmp4 & (1LL << (i + 1))) { + tmp5 |= 1LL << i; + } else { + tmp5 &= ~(1LL << i); + } + } + } + + return (int64_t)(int32_t)(uint32_t)tmp5; +} + +static inline hwaddr do_translate_address(CPUMIPSState *env, + target_ulong address, + int rw, uintptr_t retaddr) +{ + hwaddr paddr; + CPUState *cs = env_cpu(env); + + paddr = cpu_mips_translate_address(env, address, rw); + + if (paddr == -1LL) { + cpu_loop_exit_restore(cs, retaddr); + } else { + return paddr; + } +} + +#define HELPER_LD_ATOMIC(name, insn, almask, do_cast) \ +target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ +{ \ + if (arg & almask) { \ + if (!(env->hflags & MIPS_HFLAG_DM)) { \ + env->CP0_BadVAddr = arg; \ + } \ + do_raise_exception(env, EXCP_AdEL, GETPC()); \ + } \ + env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \ + env->lladdr = arg; \ + env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \ + return env->llval; \ +} +HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t)) +#ifdef TARGET_MIPS64 +HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) +#endif +#undef HELPER_LD_ATOMIC + +#ifdef TARGET_WORDS_BIGENDIAN +#define GET_LMASK(v) ((v) & 3) +#define GET_OFFSET(addr, offset) (addr + (offset)) +#else +#define GET_LMASK(v) (((v) & 3) ^ 3) +#define GET_OFFSET(addr, offset) (addr - (offset)) +#endif + +void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); + + if (GET_LMASK(arg2) <= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) <= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) == 0) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, + mem_idx, GETPC()); + } +} + +void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); + + if (GET_LMASK(arg2) >= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) >= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK(arg2) == 3) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), + mem_idx, GETPC()); + } +} + +#if defined(TARGET_MIPS64) +/* + * "half" load and stores. We must do the memory access inline, + * or fault handling won't work. + */ +#ifdef TARGET_WORDS_BIGENDIAN +#define GET_LMASK64(v) ((v) & 7) +#else +#define GET_LMASK64(v) (((v) & 7) ^ 7) +#endif + +void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); + + if (GET_LMASK64(arg2) <= 6) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 5) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 4) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 3) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) <= 0) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, + mem_idx, GETPC()); + } +} + +void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); + + if (GET_LMASK64(arg2) >= 1) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 2) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 3) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 4) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 5) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) >= 6) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), + mem_idx, GETPC()); + } + + if (GET_LMASK64(arg2) == 7) { + cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), + mem_idx, GETPC()); + } +} +#endif /* TARGET_MIPS64 */ + +static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 }; + +void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + env->active_tc.gpr[multiple_regs[i]] = + (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); + addr += 4; + } + } + + if (do_r31) { + env->active_tc.gpr[31] = + (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); + } +} + +void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], + mem_idx, GETPC()); + addr += 4; + } + } + + if (do_r31) { + cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); + } +} + +#if defined(TARGET_MIPS64) +void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + env->active_tc.gpr[multiple_regs[i]] = + cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); + addr += 8; + } + } + + if (do_r31) { + env->active_tc.gpr[31] = + cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); + } +} + +void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], + mem_idx, GETPC()); + addr += 8; + } + } + + if (do_r31) { + cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); + } +} +#endif + + +void helper_fork(target_ulong arg1, target_ulong arg2) +{ + /* + * arg1 = rt, arg2 = rs + * TODO: store to TC register + */ +} + +target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) +{ + target_long arg1 = arg; + + if (arg1 < 0) { + /* No scheduling policy implemented. */ + if (arg1 != -2) { + if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && + env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) { + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT; + do_raise_exception(env, EXCP_THREAD, GETPC()); + } + } + } else if (arg1 == 0) { + if (0) { + /* TODO: TC underflow */ + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + do_raise_exception(env, EXCP_THREAD, GETPC()); + } else { + /* TODO: Deallocate TC */ + } + } else if (arg1 > 0) { + /* Yield qualifier inputs not implemented. */ + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT; + do_raise_exception(env, EXCP_THREAD, GETPC()); + } + return env->CP0_YQMask; +} + +/* TLB management */ +static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first) +{ + /* Discard entries from env->tlb[first] onwards. */ + while (env->tlb->tlb_in_use > first) { + r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); + } +} + +static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo) +{ +#if defined(TARGET_MIPS64) + return extract64(entrylo, 6, 54); +#else + return extract64(entrylo, 6, 24) | /* PFN */ + (extract64(entrylo, 32, 32) << 24); /* PFNX */ +#endif +} + +static void r4k_fill_tlb(CPUMIPSState *env, int idx) +{ + r4k_tlb_t *tlb; + uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1); + + /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */ + tlb = &env->tlb->mmu.r4k.tlb[idx]; + if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) { + tlb->EHINV = 1; + return; + } + tlb->EHINV = 0; + tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); +#if defined(TARGET_MIPS64) + tlb->VPN &= env->SEGMask; +#endif + tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + tlb->MMID = env->CP0_MemoryMapID; + tlb->PageMask = env->CP0_PageMask; + tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; + tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; + tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; + tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; + tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; + tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; + tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12; + tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; + tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; + tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; + tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; + tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; + tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12; +} + +void r4k_helper_tlbinv(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + r4k_tlb_t *tlb; + int idx; + + MMID = mi ? MMID : (uint32_t) ASID; + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + tlb = &env->tlb->mmu.r4k.tlb[idx]; + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + if (!tlb->G && tlb_mmid == MMID) { + tlb->EHINV = 1; + } + } + cpu_mips_tlb_flush(env); +} + +void r4k_helper_tlbinvf(CPUMIPSState *env) +{ + int idx; + + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + env->tlb->mmu.r4k.tlb[idx].EHINV = 1; + } + cpu_mips_tlb_flush(env); +} + +void r4k_helper_tlbwi(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + target_ulong VPN; + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1; + r4k_tlb_t *tlb; + int idx; + + MMID = mi ? MMID : (uint32_t) ASID; + + idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; + tlb = &env->tlb->mmu.r4k.tlb[idx]; + VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); +#if defined(TARGET_MIPS64) + VPN &= env->SEGMask; +#endif + EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0; + G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; + V0 = (env->CP0_EntryLo0 & 2) != 0; + D0 = (env->CP0_EntryLo0 & 4) != 0; + XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1; + RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1; + V1 = (env->CP0_EntryLo1 & 2) != 0; + D1 = (env->CP0_EntryLo1 & 4) != 0; + XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1; + RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1; + + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* + * Discard cached TLB entries, unless tlbwi is just upgrading access + * permissions on the current entry. + */ + if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G || + (!tlb->EHINV && EHINV) || + (tlb->V0 && !V0) || (tlb->D0 && !D0) || + (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) || + (tlb->V1 && !V1) || (tlb->D1 && !D1) || + (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) { + r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); + } + + r4k_invalidate_tlb(env, idx, 0); + r4k_fill_tlb(env, idx); +} + +void r4k_helper_tlbwr(CPUMIPSState *env) +{ + int r = cpu_mips_get_random(env); + + r4k_invalidate_tlb(env, r, 1); + r4k_fill_tlb(env, r); +} + +void r4k_helper_tlbp(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + r4k_tlb_t *tlb; + target_ulong mask; + target_ulong tag; + target_ulong VPN; + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + int i; + + MMID = mi ? MMID : (uint32_t) ASID; + for (i = 0; i < env->tlb->nb_tlb; i++) { + tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); + tag = env->CP0_EntryHi & ~mask; + VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* Check ASID/MMID, virtual page number & size */ + if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { + /* TLB match */ + env->CP0_Index = i; + break; + } + } + if (i == env->tlb->nb_tlb) { + /* No match. Discard any shadow entries, if any of them match. */ + for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { + tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); + tag = env->CP0_EntryHi & ~mask; + VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* Check ASID/MMID, virtual page number & size */ + if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) { + r4k_mips_tlb_flush_extra(env, i); + break; + } + } + + env->CP0_Index |= 0x80000000; + } +} + +static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn) +{ +#if defined(TARGET_MIPS64) + return tlb_pfn << 6; +#else + return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */ + (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */ +#endif +} + +void r4k_helper_tlbr(CPUMIPSState *env) +{ + bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); + uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; + uint32_t MMID = env->CP0_MemoryMapID; + uint32_t tlb_mmid; + r4k_tlb_t *tlb; + int idx; + + MMID = mi ? MMID : (uint32_t) ASID; + idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; + tlb = &env->tlb->mmu.r4k.tlb[idx]; + + tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; + /* If this will change the current ASID/MMID, flush qemu's TLB. */ + if (MMID != tlb_mmid) { + cpu_mips_tlb_flush(env); + } + + r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); + + if (tlb->EHINV) { + env->CP0_EntryHi = 1 << CP0EnHi_EHINV; + env->CP0_PageMask = 0; + env->CP0_EntryLo0 = 0; + env->CP0_EntryLo1 = 0; + } else { + env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID; + env->CP0_MemoryMapID = tlb->MMID; + env->CP0_PageMask = tlb->PageMask; + env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | + ((uint64_t)tlb->RI0 << CP0EnLo_RI) | + ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) | + get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12); + env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | + ((uint64_t)tlb->RI1 << CP0EnLo_RI) | + ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) | + get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12); + } +} + +void helper_tlbwi(CPUMIPSState *env) +{ + env->tlb->helper_tlbwi(env); +} + +void helper_tlbwr(CPUMIPSState *env) +{ + env->tlb->helper_tlbwr(env); +} + +void helper_tlbp(CPUMIPSState *env) +{ + env->tlb->helper_tlbp(env); +} + +void helper_tlbr(CPUMIPSState *env) +{ + env->tlb->helper_tlbr(env); +} + +void helper_tlbinv(CPUMIPSState *env) +{ + env->tlb->helper_tlbinv(env); +} + +void helper_tlbinvf(CPUMIPSState *env) +{ + env->tlb->helper_tlbinvf(env); +} + +#if 0 +static void global_invalidate_tlb(CPUMIPSState *env, + uint32_t invMsgVPN2, + uint8_t invMsgR, + uint32_t invMsgMMid, + bool invAll, + bool invVAMMid, + bool invMMid, + bool invVA) +{ + + int idx; + r4k_tlb_t *tlb; + bool VAMatch; + bool MMidMatch; + + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + tlb = &env->tlb->mmu.r4k.tlb[idx]; + VAMatch = + (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask)) +#ifdef TARGET_MIPS64 + && + (extract64(env->CP0_EntryHi, 62, 2) == invMsgR) +#endif + ); + MMidMatch = tlb->MMID == invMsgMMid; + if ((invAll && (idx > env->CP0_Wired)) || + (VAMatch && invVAMMid && (tlb->G || MMidMatch)) || + (VAMatch && invVA) || + (MMidMatch && !(tlb->G) && invMMid)) { + tlb->EHINV = 1; + } + } + cpu_mips_tlb_flush(env); +} +#endif + +void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type) +{ +#if 0 + FIXME + bool invAll = type == 0; + bool invVA = type == 1; + bool invMMid = type == 2; + bool invVAMMid = type == 3; + uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1); + uint8_t invMsgR = 0; + uint32_t invMsgMMid = env->CP0_MemoryMapID; + CPUState *other_cs = first_cpu; + +#ifdef TARGET_MIPS64 + invMsgR = extract64(arg, 62, 2); +#endif + + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid, + invAll, invVAMMid, invMMid, invVA); + } +#endif +} + +/* Specials */ +target_ulong helper_di(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Status; + + env->CP0_Status = t0 & ~(1 << CP0St_IE); + return t0; +} + +target_ulong helper_ei(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Status; + + env->CP0_Status = t0 | (1 << CP0St_IE); + return t0; +} + +static void set_pc(CPUMIPSState *env, target_ulong error_pc) +{ + env->active_tc.PC = error_pc & ~(target_ulong)1; + if (error_pc & 1) { + env->hflags |= MIPS_HFLAG_M16; + } else { + env->hflags &= ~(MIPS_HFLAG_M16); + } +} + +static inline void exception_return(CPUMIPSState *env) +{ + if (env->CP0_Status & (1 << CP0St_ERL)) { + set_pc(env, env->CP0_ErrorEPC); + env->CP0_Status &= ~(1 << CP0St_ERL); + } else { + set_pc(env, env->CP0_EPC); + env->CP0_Status &= ~(1 << CP0St_EXL); + } + compute_hflags(env); +} + +void helper_eret(CPUMIPSState *env) +{ + exception_return(env); + env->CP0_LLAddr = 1; + env->lladdr = 1; +} + +void helper_eretnc(CPUMIPSState *env) +{ + exception_return(env); +} + +void helper_deret(CPUMIPSState *env) +{ + env->hflags &= ~MIPS_HFLAG_DM; + compute_hflags(env); + + set_pc(env, env->CP0_DEPC); +} + +static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc) +{ + if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) { + return; + } + do_raise_exception(env, EXCP_RI, pc); +} + +target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) +{ + check_hwrena(env, 0, GETPC()); + return env->CP0_EBase & 0x3ff; +} + +target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) +{ + check_hwrena(env, 1, GETPC()); + return env->SYNCI_Step; +} + +target_ulong helper_rdhwr_cc(CPUMIPSState *env) +{ + check_hwrena(env, 2, GETPC()); + // return (int32_t)cpu_mips_get_count(env); + return 0; +} + +target_ulong helper_rdhwr_ccres(CPUMIPSState *env) +{ + check_hwrena(env, 3, GETPC()); + return env->CCRes; +} + +target_ulong helper_rdhwr_performance(CPUMIPSState *env) +{ + check_hwrena(env, 4, GETPC()); + return env->CP0_Performance0; +} + +target_ulong helper_rdhwr_xnp(CPUMIPSState *env) +{ + check_hwrena(env, 5, GETPC()); + return (env->CP0_Config5 >> CP0C5_XNP) & 1; +} + +void helper_pmon(CPUMIPSState *env, int function) +{ + function /= 2; + switch (function) { + case 2: /* TODO: char inbyte(int waitflag); */ + if (env->active_tc.gpr[4] == 0) { + env->active_tc.gpr[2] = -1; + } + /* Fall through */ + case 11: /* TODO: char inbyte (void); */ + env->active_tc.gpr[2] = -1; + break; + case 3: + case 12: + printf("%c", (char)(env->active_tc.gpr[4] & 0xFF)); + break; + case 17: + break; + case 158: + { + unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4]; + printf("%s", fmt); + } + break; + } +} + +void helper_wait(CPUMIPSState *env) +{ + CPUState *cs = env_cpu(env); + + cs->halted = 1; + cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); + /* + * Last instruction in the block, PC was updated before + * - no need to recover PC and icount. + */ + raise_exception(env, EXCP_HLT); +} + +void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + int error_code = 0; + int excp; + + if (!(env->hflags & MIPS_HFLAG_DM)) { + env->CP0_BadVAddr = addr; + } + + if (access_type == MMU_DATA_STORE) { + excp = EXCP_AdES; + } else { + excp = EXCP_AdEL; + if (access_type == MMU_INST_FETCH) { + error_code |= EXCP_INST_NOTAVAIL; + } + } + + do_raise_exception_err(env, excp, error_code, retaddr); +} + +void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + CPUMIPSState *env = &cpu->env; + + if (access_type == MMU_INST_FETCH) { + do_raise_exception(env, EXCP_IBE, retaddr); + } else { + do_raise_exception(env, EXCP_DBE, retaddr); + } +} + + +/* MSA */ +/* Data format min and max values */ +#define DF_BITS(df) (1 << ((df) + 3)) + +/* Element-by-element access macros */ +#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) + +#define MEMOP_IDX(DF) \ + TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ + cpu_mmu_index(env, false)); + +void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + MEMOP_IDX(DF_BYTE) +#if !defined(HOST_WORDS_BIGENDIAN) + pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); + pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); + pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); + pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); + pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); + pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); + pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); + pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); + pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); + pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); + pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); + pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); + pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); + pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); + pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); + pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); +#else + pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); + pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); + pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); + pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); + pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); + pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); + pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); + pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); + pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); + pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); + pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); + pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); + pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); + pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); + pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); + pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); +#endif +} + +void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + MEMOP_IDX(DF_HALF) +#if !defined(HOST_WORDS_BIGENDIAN) + pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); + pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); + pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); + pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); + pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); + pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); + pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); + pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); +#else + pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); + pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); + pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); + pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); + pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); + pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); + pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); + pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); +#endif +} + +void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + MEMOP_IDX(DF_WORD) +#if !defined(HOST_WORDS_BIGENDIAN) + pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); + pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); + pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); + pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); +#else + pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); + pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); + pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); + pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); +#endif +} + +void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + MEMOP_IDX(DF_DOUBLE) + pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC()); + pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC()); +} + +#define MSA_PAGESPAN(x) \ + ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE) + +static inline void ensure_writable_pages(CPUMIPSState *env, + target_ulong addr, + int mmu_idx, + uintptr_t retaddr) +{ + /* FIXME: Probe the actual accesses (pass and use a size) */ + if (unlikely(MSA_PAGESPAN(addr))) { + /* first page */ + probe_write(env, addr, 0, mmu_idx, retaddr); + /* second page */ + addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + probe_write(env, addr, 0, mmu_idx, retaddr); + } +} + +void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + int mmu_idx = cpu_mmu_index(env, false); + + MEMOP_IDX(DF_BYTE) + ensure_writable_pages(env, addr, mmu_idx, GETPC()); +#if !defined(HOST_WORDS_BIGENDIAN) + helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC()); +#else + helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC()); + helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC()); +#endif +} + +void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + int mmu_idx = cpu_mmu_index(env, false); + + MEMOP_IDX(DF_HALF) + ensure_writable_pages(env, addr, mmu_idx, GETPC()); +#if !defined(HOST_WORDS_BIGENDIAN) + helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC()); +#else + helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC()); + helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC()); +#endif +} + +void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + int mmu_idx = cpu_mmu_index(env, false); + + MEMOP_IDX(DF_WORD) + ensure_writable_pages(env, addr, mmu_idx, GETPC()); +#if !defined(HOST_WORDS_BIGENDIAN) + helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC()); + helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC()); + helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC()); + helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC()); +#else + helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC()); + helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC()); + helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC()); + helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC()); +#endif +} + +void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, + target_ulong addr) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + int mmu_idx = cpu_mmu_index(env, false); + + MEMOP_IDX(DF_DOUBLE) + ensure_writable_pages(env, addr, mmu_idx, GETPC()); + helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC()); + helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC()); +} + +void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) +{ + target_ulong index = addr & 0x1fffffff; + if (op == 9) { + /* Index Store Tag */ + memory_region_dispatch_write(env->uc, env->itc_tag, index, env->CP0_TagLo, + MO_64, MEMTXATTRS_UNSPECIFIED); + } else if (op == 5) { + /* Index Load Tag */ + memory_region_dispatch_read(env->uc, env->itc_tag, index, &env->CP0_TagLo, + MO_64, MEMTXATTRS_UNSPECIFIED); + } +} diff --git a/qemu/target/mips/translate.c b/qemu/target/mips/translate.c new file mode 100644 index 00000000..97e680a3 --- /dev/null +++ b/qemu/target/mips/translate.c @@ -0,0 +1,31409 @@ +/* + * MIPS emulation for QEMU - main translation routines + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2006 Marius Groeger (FPU operations) + * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) + * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) + * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "exec/cpu_ldst.h" +#include "hw/mips/cpudevs.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/translator.h" + +#define MIPS_DEBUG_DISAS 0 + +/* MIPS major opcodes */ +#define MASK_OP_MAJOR(op) (op & (0x3F << 26)) + +enum { + /* indirect opcode tables */ + OPC_SPECIAL = (0x00 << 26), + OPC_REGIMM = (0x01 << 26), + OPC_CP0 = (0x10 << 26), + OPC_CP1 = (0x11 << 26), + OPC_CP2 = (0x12 << 26), + OPC_CP3 = (0x13 << 26), + OPC_SPECIAL2 = (0x1C << 26), + OPC_SPECIAL3 = (0x1F << 26), + /* arithmetic with immediate */ + OPC_ADDI = (0x08 << 26), + OPC_ADDIU = (0x09 << 26), + OPC_SLTI = (0x0A << 26), + OPC_SLTIU = (0x0B << 26), + /* logic with immediate */ + OPC_ANDI = (0x0C << 26), + OPC_ORI = (0x0D << 26), + OPC_XORI = (0x0E << 26), + OPC_LUI = (0x0F << 26), + /* arithmetic with immediate */ + OPC_DADDI = (0x18 << 26), + OPC_DADDIU = (0x19 << 26), + /* Jump and branches */ + OPC_J = (0x02 << 26), + OPC_JAL = (0x03 << 26), + OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */ + OPC_BEQL = (0x14 << 26), + OPC_BNE = (0x05 << 26), + OPC_BNEL = (0x15 << 26), + OPC_BLEZ = (0x06 << 26), + OPC_BLEZL = (0x16 << 26), + OPC_BGTZ = (0x07 << 26), + OPC_BGTZL = (0x17 << 26), + OPC_JALX = (0x1D << 26), + OPC_DAUI = (0x1D << 26), + /* Load and stores */ + OPC_LDL = (0x1A << 26), + OPC_LDR = (0x1B << 26), + OPC_LB = (0x20 << 26), + OPC_LH = (0x21 << 26), + OPC_LWL = (0x22 << 26), + OPC_LW = (0x23 << 26), + OPC_LWPC = OPC_LW | 0x5, + OPC_LBU = (0x24 << 26), + OPC_LHU = (0x25 << 26), + OPC_LWR = (0x26 << 26), + OPC_LWU = (0x27 << 26), + OPC_SB = (0x28 << 26), + OPC_SH = (0x29 << 26), + OPC_SWL = (0x2A << 26), + OPC_SW = (0x2B << 26), + OPC_SDL = (0x2C << 26), + OPC_SDR = (0x2D << 26), + OPC_SWR = (0x2E << 26), + OPC_LL = (0x30 << 26), + OPC_LLD = (0x34 << 26), + OPC_LD = (0x37 << 26), + OPC_LDPC = OPC_LD | 0x5, + OPC_SC = (0x38 << 26), + OPC_SCD = (0x3C << 26), + OPC_SD = (0x3F << 26), + /* Floating point load/store */ + OPC_LWC1 = (0x31 << 26), + OPC_LWC2 = (0x32 << 26), + OPC_LDC1 = (0x35 << 26), + OPC_LDC2 = (0x36 << 26), + OPC_SWC1 = (0x39 << 26), + OPC_SWC2 = (0x3A << 26), + OPC_SDC1 = (0x3D << 26), + OPC_SDC2 = (0x3E << 26), + /* Compact Branches */ + OPC_BLEZALC = (0x06 << 26), + OPC_BGEZALC = (0x06 << 26), + OPC_BGEUC = (0x06 << 26), + OPC_BGTZALC = (0x07 << 26), + OPC_BLTZALC = (0x07 << 26), + OPC_BLTUC = (0x07 << 26), + OPC_BOVC = (0x08 << 26), + OPC_BEQZALC = (0x08 << 26), + OPC_BEQC = (0x08 << 26), + OPC_BLEZC = (0x16 << 26), + OPC_BGEZC = (0x16 << 26), + OPC_BGEC = (0x16 << 26), + OPC_BGTZC = (0x17 << 26), + OPC_BLTZC = (0x17 << 26), + OPC_BLTC = (0x17 << 26), + OPC_BNVC = (0x18 << 26), + OPC_BNEZALC = (0x18 << 26), + OPC_BNEC = (0x18 << 26), + OPC_BC = (0x32 << 26), + OPC_BEQZC = (0x36 << 26), + OPC_JIC = (0x36 << 26), + OPC_BALC = (0x3A << 26), + OPC_BNEZC = (0x3E << 26), + OPC_JIALC = (0x3E << 26), + /* MDMX ASE specific */ + OPC_MDMX = (0x1E << 26), + /* MSA ASE, same as MDMX */ + OPC_MSA = OPC_MDMX, + /* Cache and prefetch */ + OPC_CACHE = (0x2F << 26), + OPC_PREF = (0x33 << 26), + /* PC-relative address computation / loads */ + OPC_PCREL = (0x3B << 26), +}; + +/* PC-relative address computation / loads */ +#define MASK_OPC_PCREL_TOP2BITS(op) (MASK_OP_MAJOR(op) | (op & (3 << 19))) +#define MASK_OPC_PCREL_TOP5BITS(op) (MASK_OP_MAJOR(op) | (op & (0x1f << 16))) +enum { + /* Instructions determined by bits 19 and 20 */ + OPC_ADDIUPC = OPC_PCREL | (0 << 19), + R6_OPC_LWPC = OPC_PCREL | (1 << 19), + OPC_LWUPC = OPC_PCREL | (2 << 19), + + /* Instructions determined by bits 16 ... 20 */ + OPC_AUIPC = OPC_PCREL | (0x1e << 16), + OPC_ALUIPC = OPC_PCREL | (0x1f << 16), + + /* Other */ + R6_OPC_LDPC = OPC_PCREL | (6 << 18), +}; + +/* MIPS special opcodes */ +#define MASK_SPECIAL(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) + +enum { + /* Shifts */ + OPC_SLL = 0x00 | OPC_SPECIAL, + /* NOP is SLL r0, r0, 0 */ + /* SSNOP is SLL r0, r0, 1 */ + /* EHB is SLL r0, r0, 3 */ + OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */ + OPC_ROTR = OPC_SRL | (1 << 21), + OPC_SRA = 0x03 | OPC_SPECIAL, + OPC_SLLV = 0x04 | OPC_SPECIAL, + OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */ + OPC_ROTRV = OPC_SRLV | (1 << 6), + OPC_SRAV = 0x07 | OPC_SPECIAL, + OPC_DSLLV = 0x14 | OPC_SPECIAL, + OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */ + OPC_DROTRV = OPC_DSRLV | (1 << 6), + OPC_DSRAV = 0x17 | OPC_SPECIAL, + OPC_DSLL = 0x38 | OPC_SPECIAL, + OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */ + OPC_DROTR = OPC_DSRL | (1 << 21), + OPC_DSRA = 0x3B | OPC_SPECIAL, + OPC_DSLL32 = 0x3C | OPC_SPECIAL, + OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */ + OPC_DROTR32 = OPC_DSRL32 | (1 << 21), + OPC_DSRA32 = 0x3F | OPC_SPECIAL, + /* Multiplication / division */ + OPC_MULT = 0x18 | OPC_SPECIAL, + OPC_MULTU = 0x19 | OPC_SPECIAL, + OPC_DIV = 0x1A | OPC_SPECIAL, + OPC_DIVU = 0x1B | OPC_SPECIAL, + OPC_DMULT = 0x1C | OPC_SPECIAL, + OPC_DMULTU = 0x1D | OPC_SPECIAL, + OPC_DDIV = 0x1E | OPC_SPECIAL, + OPC_DDIVU = 0x1F | OPC_SPECIAL, + + /* 2 registers arithmetic / logic */ + OPC_ADD = 0x20 | OPC_SPECIAL, + OPC_ADDU = 0x21 | OPC_SPECIAL, + OPC_SUB = 0x22 | OPC_SPECIAL, + OPC_SUBU = 0x23 | OPC_SPECIAL, + OPC_AND = 0x24 | OPC_SPECIAL, + OPC_OR = 0x25 | OPC_SPECIAL, + OPC_XOR = 0x26 | OPC_SPECIAL, + OPC_NOR = 0x27 | OPC_SPECIAL, + OPC_SLT = 0x2A | OPC_SPECIAL, + OPC_SLTU = 0x2B | OPC_SPECIAL, + OPC_DADD = 0x2C | OPC_SPECIAL, + OPC_DADDU = 0x2D | OPC_SPECIAL, + OPC_DSUB = 0x2E | OPC_SPECIAL, + OPC_DSUBU = 0x2F | OPC_SPECIAL, + /* Jumps */ + OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */ + OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */ + /* Traps */ + OPC_TGE = 0x30 | OPC_SPECIAL, + OPC_TGEU = 0x31 | OPC_SPECIAL, + OPC_TLT = 0x32 | OPC_SPECIAL, + OPC_TLTU = 0x33 | OPC_SPECIAL, + OPC_TEQ = 0x34 | OPC_SPECIAL, + OPC_TNE = 0x36 | OPC_SPECIAL, + /* HI / LO registers load & stores */ + OPC_MFHI = 0x10 | OPC_SPECIAL, + OPC_MTHI = 0x11 | OPC_SPECIAL, + OPC_MFLO = 0x12 | OPC_SPECIAL, + OPC_MTLO = 0x13 | OPC_SPECIAL, + /* Conditional moves */ + OPC_MOVZ = 0x0A | OPC_SPECIAL, + OPC_MOVN = 0x0B | OPC_SPECIAL, + + OPC_SELEQZ = 0x35 | OPC_SPECIAL, + OPC_SELNEZ = 0x37 | OPC_SPECIAL, + + OPC_MOVCI = 0x01 | OPC_SPECIAL, + + /* Special */ + OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */ + OPC_SYSCALL = 0x0C | OPC_SPECIAL, + OPC_BREAK = 0x0D | OPC_SPECIAL, + OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */ + OPC_SYNC = 0x0F | OPC_SPECIAL, + + OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL, + OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL, + OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL, + OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL, +}; + +/* + * R6 Multiply and Divide instructions have the same opcode + * and function field as legacy OPC_MULT[U]/OPC_DIV[U] + */ +#define MASK_R6_MULDIV(op) (MASK_SPECIAL(op) | (op & (0x7ff))) + +enum { + R6_OPC_MUL = OPC_MULT | (2 << 6), + R6_OPC_MUH = OPC_MULT | (3 << 6), + R6_OPC_MULU = OPC_MULTU | (2 << 6), + R6_OPC_MUHU = OPC_MULTU | (3 << 6), + R6_OPC_DIV = OPC_DIV | (2 << 6), + R6_OPC_MOD = OPC_DIV | (3 << 6), + R6_OPC_DIVU = OPC_DIVU | (2 << 6), + R6_OPC_MODU = OPC_DIVU | (3 << 6), + + R6_OPC_DMUL = OPC_DMULT | (2 << 6), + R6_OPC_DMUH = OPC_DMULT | (3 << 6), + R6_OPC_DMULU = OPC_DMULTU | (2 << 6), + R6_OPC_DMUHU = OPC_DMULTU | (3 << 6), + R6_OPC_DDIV = OPC_DDIV | (2 << 6), + R6_OPC_DMOD = OPC_DDIV | (3 << 6), + R6_OPC_DDIVU = OPC_DDIVU | (2 << 6), + R6_OPC_DMODU = OPC_DDIVU | (3 << 6), + + R6_OPC_CLZ = 0x10 | OPC_SPECIAL, + R6_OPC_CLO = 0x11 | OPC_SPECIAL, + R6_OPC_DCLZ = 0x12 | OPC_SPECIAL, + R6_OPC_DCLO = 0x13 | OPC_SPECIAL, + R6_OPC_SDBBP = 0x0e | OPC_SPECIAL, + + OPC_LSA = 0x05 | OPC_SPECIAL, + OPC_DLSA = 0x15 | OPC_SPECIAL, +}; + +/* Multiplication variants of the vr54xx. */ +#define MASK_MUL_VR54XX(op) (MASK_SPECIAL(op) | (op & (0x1F << 6))) + +enum { + OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT, + OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU, + OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT, + OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU, + OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT, + OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU, + OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT, + OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU, + OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT, + OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU, + OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT, + OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU, + OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT, + OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU, +}; + +/* REGIMM (rt field) opcodes */ +#define MASK_REGIMM(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 16))) + +enum { + OPC_BLTZ = (0x00 << 16) | OPC_REGIMM, + OPC_BLTZL = (0x02 << 16) | OPC_REGIMM, + OPC_BGEZ = (0x01 << 16) | OPC_REGIMM, + OPC_BGEZL = (0x03 << 16) | OPC_REGIMM, + OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM, + OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM, + OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM, + OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM, + OPC_TGEI = (0x08 << 16) | OPC_REGIMM, + OPC_TGEIU = (0x09 << 16) | OPC_REGIMM, + OPC_TLTI = (0x0A << 16) | OPC_REGIMM, + OPC_TLTIU = (0x0B << 16) | OPC_REGIMM, + OPC_TEQI = (0x0C << 16) | OPC_REGIMM, + OPC_TNEI = (0x0E << 16) | OPC_REGIMM, + OPC_SIGRIE = (0x17 << 16) | OPC_REGIMM, + OPC_SYNCI = (0x1F << 16) | OPC_REGIMM, + + OPC_DAHI = (0x06 << 16) | OPC_REGIMM, + OPC_DATI = (0x1e << 16) | OPC_REGIMM, +}; + +/* Special2 opcodes */ +#define MASK_SPECIAL2(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) + +enum { + /* Multiply & xxx operations */ + OPC_MADD = 0x00 | OPC_SPECIAL2, + OPC_MADDU = 0x01 | OPC_SPECIAL2, + OPC_MUL = 0x02 | OPC_SPECIAL2, + OPC_MSUB = 0x04 | OPC_SPECIAL2, + OPC_MSUBU = 0x05 | OPC_SPECIAL2, + /* Loongson 2F */ + OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2, + OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2, + OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2, + OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2, + OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2, + OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2, + OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2, + OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2, + OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2, + OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2, + OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2, + OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2, + /* Misc */ + OPC_CLZ = 0x20 | OPC_SPECIAL2, + OPC_CLO = 0x21 | OPC_SPECIAL2, + OPC_DCLZ = 0x24 | OPC_SPECIAL2, + OPC_DCLO = 0x25 | OPC_SPECIAL2, + /* Special */ + OPC_SDBBP = 0x3F | OPC_SPECIAL2, +}; + +/* Special3 opcodes */ +#define MASK_SPECIAL3(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) + +enum { + OPC_EXT = 0x00 | OPC_SPECIAL3, + OPC_DEXTM = 0x01 | OPC_SPECIAL3, + OPC_DEXTU = 0x02 | OPC_SPECIAL3, + OPC_DEXT = 0x03 | OPC_SPECIAL3, + OPC_INS = 0x04 | OPC_SPECIAL3, + OPC_DINSM = 0x05 | OPC_SPECIAL3, + OPC_DINSU = 0x06 | OPC_SPECIAL3, + OPC_DINS = 0x07 | OPC_SPECIAL3, + OPC_FORK = 0x08 | OPC_SPECIAL3, + OPC_YIELD = 0x09 | OPC_SPECIAL3, + OPC_BSHFL = 0x20 | OPC_SPECIAL3, + OPC_DBSHFL = 0x24 | OPC_SPECIAL3, + OPC_RDHWR = 0x3B | OPC_SPECIAL3, + OPC_GINV = 0x3D | OPC_SPECIAL3, + + /* Loongson 2E */ + OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3, + OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3, + OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3, + OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3, + OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3, + OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3, + OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3, + OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3, + OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3, + OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3, + OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3, + OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3, + + /* MIPS DSP Load */ + OPC_LX_DSP = 0x0A | OPC_SPECIAL3, + /* MIPS DSP Arithmetic */ + OPC_ADDU_QB_DSP = 0x10 | OPC_SPECIAL3, + OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3, + OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3, + OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3, + /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */ + /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3, + OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3, + /* MIPS DSP GPR-Based Shift Sub-class */ + OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3, + OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3, + /* MIPS DSP Multiply Sub-class insns */ + /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */ + /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3, + OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3, + /* DSP Bit/Manipulation Sub-class */ + OPC_INSV_DSP = 0x0C | OPC_SPECIAL3, + OPC_DINSV_DSP = 0x0D | OPC_SPECIAL3, + /* MIPS DSP Append Sub-class */ + OPC_APPEND_DSP = 0x31 | OPC_SPECIAL3, + OPC_DAPPEND_DSP = 0x35 | OPC_SPECIAL3, + /* MIPS DSP Accumulator and DSPControl Access Sub-class */ + OPC_EXTR_W_DSP = 0x38 | OPC_SPECIAL3, + OPC_DEXTR_W_DSP = 0x3C | OPC_SPECIAL3, + + /* EVA */ + OPC_LWLE = 0x19 | OPC_SPECIAL3, + OPC_LWRE = 0x1A | OPC_SPECIAL3, + OPC_CACHEE = 0x1B | OPC_SPECIAL3, + OPC_SBE = 0x1C | OPC_SPECIAL3, + OPC_SHE = 0x1D | OPC_SPECIAL3, + OPC_SCE = 0x1E | OPC_SPECIAL3, + OPC_SWE = 0x1F | OPC_SPECIAL3, + OPC_SWLE = 0x21 | OPC_SPECIAL3, + OPC_SWRE = 0x22 | OPC_SPECIAL3, + OPC_PREFE = 0x23 | OPC_SPECIAL3, + OPC_LBUE = 0x28 | OPC_SPECIAL3, + OPC_LHUE = 0x29 | OPC_SPECIAL3, + OPC_LBE = 0x2C | OPC_SPECIAL3, + OPC_LHE = 0x2D | OPC_SPECIAL3, + OPC_LLE = 0x2E | OPC_SPECIAL3, + OPC_LWE = 0x2F | OPC_SPECIAL3, + + /* R6 */ + R6_OPC_PREF = 0x35 | OPC_SPECIAL3, + R6_OPC_CACHE = 0x25 | OPC_SPECIAL3, + R6_OPC_LL = 0x36 | OPC_SPECIAL3, + R6_OPC_SC = 0x26 | OPC_SPECIAL3, + R6_OPC_LLD = 0x37 | OPC_SPECIAL3, + R6_OPC_SCD = 0x27 | OPC_SPECIAL3, +}; + +/* BSHFL opcodes */ +#define MASK_BSHFL(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) + +enum { + OPC_WSBH = (0x02 << 6) | OPC_BSHFL, + OPC_SEB = (0x10 << 6) | OPC_BSHFL, + OPC_SEH = (0x18 << 6) | OPC_BSHFL, + OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp (010.00 to 010.11) */ + OPC_ALIGN_1 = (0x09 << 6) | OPC_BSHFL, + OPC_ALIGN_2 = (0x0A << 6) | OPC_BSHFL, + OPC_ALIGN_3 = (0x0B << 6) | OPC_BSHFL, + OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */ +}; + +/* DBSHFL opcodes */ +#define MASK_DBSHFL(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) + +enum { + OPC_DSBH = (0x02 << 6) | OPC_DBSHFL, + OPC_DSHD = (0x05 << 6) | OPC_DBSHFL, + OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp (01.000 to 01.111) */ + OPC_DALIGN_1 = (0x09 << 6) | OPC_DBSHFL, + OPC_DALIGN_2 = (0x0A << 6) | OPC_DBSHFL, + OPC_DALIGN_3 = (0x0B << 6) | OPC_DBSHFL, + OPC_DALIGN_4 = (0x0C << 6) | OPC_DBSHFL, + OPC_DALIGN_5 = (0x0D << 6) | OPC_DBSHFL, + OPC_DALIGN_6 = (0x0E << 6) | OPC_DBSHFL, + OPC_DALIGN_7 = (0x0F << 6) | OPC_DBSHFL, + OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */ +}; + +/* MIPS DSP REGIMM opcodes */ +enum { + OPC_BPOSGE32 = (0x1C << 16) | OPC_REGIMM, + OPC_BPOSGE64 = (0x1D << 16) | OPC_REGIMM, +}; + +#define MASK_LX(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +/* MIPS DSP Load */ +enum { + OPC_LBUX = (0x06 << 6) | OPC_LX_DSP, + OPC_LHX = (0x04 << 6) | OPC_LX_DSP, + OPC_LWX = (0x00 << 6) | OPC_LX_DSP, + OPC_LDX = (0x08 << 6) | OPC_LX_DSP, +}; + +#define MASK_ADDU_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_ADDQ_PH = (0x0A << 6) | OPC_ADDU_QB_DSP, + OPC_ADDQ_S_PH = (0x0E << 6) | OPC_ADDU_QB_DSP, + OPC_ADDQ_S_W = (0x16 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_QB = (0x00 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_S_QB = (0x04 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_PH = (0x08 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_S_PH = (0x0C << 6) | OPC_ADDU_QB_DSP, + OPC_SUBQ_PH = (0x0B << 6) | OPC_ADDU_QB_DSP, + OPC_SUBQ_S_PH = (0x0F << 6) | OPC_ADDU_QB_DSP, + OPC_SUBQ_S_W = (0x17 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_QB = (0x01 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_S_QB = (0x05 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_PH = (0x09 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_S_PH = (0x0D << 6) | OPC_ADDU_QB_DSP, + OPC_ADDSC = (0x10 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDWC = (0x11 << 6) | OPC_ADDU_QB_DSP, + OPC_MODSUB = (0x12 << 6) | OPC_ADDU_QB_DSP, + OPC_RADDU_W_QB = (0x14 << 6) | OPC_ADDU_QB_DSP, + /* MIPS DSP Multiply Sub-class insns */ + OPC_MULEU_S_PH_QBL = (0x06 << 6) | OPC_ADDU_QB_DSP, + OPC_MULEU_S_PH_QBR = (0x07 << 6) | OPC_ADDU_QB_DSP, + OPC_MULQ_RS_PH = (0x1F << 6) | OPC_ADDU_QB_DSP, + OPC_MULEQ_S_W_PHL = (0x1C << 6) | OPC_ADDU_QB_DSP, + OPC_MULEQ_S_W_PHR = (0x1D << 6) | OPC_ADDU_QB_DSP, + OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP, +}; + +#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E +#define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_ADDUH_QB = (0x00 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDUH_R_QB = (0x02 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_PH = (0x08 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_R_PH = (0x0A << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_W = (0x10 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_R_W = (0x12 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBUH_QB = (0x01 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBUH_R_QB = (0x03 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_PH = (0x09 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_R_PH = (0x0B << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_W = (0x11 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_R_W = (0x13 << 6) | OPC_ADDUH_QB_DSP, + /* MIPS DSP Multiply Sub-class insns */ + OPC_MUL_PH = (0x0C << 6) | OPC_ADDUH_QB_DSP, + OPC_MUL_S_PH = (0x0E << 6) | OPC_ADDUH_QB_DSP, + OPC_MULQ_S_W = (0x16 << 6) | OPC_ADDUH_QB_DSP, + OPC_MULQ_RS_W = (0x17 << 6) | OPC_ADDUH_QB_DSP, +}; + +#define MASK_ABSQ_S_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_ABSQ_S_QB = (0x01 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_ABSQ_S_PH = (0x09 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_ABSQ_S_W = (0x11 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQ_W_PHL = (0x0C << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQ_W_PHR = (0x0D << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBL = (0x04 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBR = (0x05 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBLA = (0x06 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBRA = (0x07 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBL = (0x1C << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBR = (0x1D << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBLA = (0x1E << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBRA = (0x1F << 6) | OPC_ABSQ_S_PH_DSP, + /* DSP Bit/Manipulation Sub-class */ + OPC_BITREV = (0x1B << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPL_QB = (0x02 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPLV_QB = (0x03 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPL_PH = (0x0A << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPLV_PH = (0x0B << 6) | OPC_ABSQ_S_PH_DSP, +}; + +#define MASK_CMPU_EQ_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_PRECR_QB_PH = (0x0D << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQ_QB_PH = (0x0C << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECR_SRA_PH_W = (0x1E << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECR_SRA_R_PH_W = (0x1F << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQ_PH_W = (0x14 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQ_RS_PH_W = (0x15 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQU_S_QB_PH = (0x0F << 6) | OPC_CMPU_EQ_QB_DSP, + /* DSP Compare-Pick Sub-class */ + OPC_CMPU_EQ_QB = (0x00 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPU_LT_QB = (0x01 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPU_LE_QB = (0x02 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGU_EQ_QB = (0x04 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGU_LT_QB = (0x05 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGU_LE_QB = (0x06 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGDU_EQ_QB = (0x18 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGDU_LT_QB = (0x19 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGDU_LE_QB = (0x1A << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMP_EQ_PH = (0x08 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMP_LT_PH = (0x09 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMP_LE_PH = (0x0A << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PICK_QB = (0x03 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PICK_PH = (0x0B << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PACKRL_PH = (0x0E << 6) | OPC_CMPU_EQ_QB_DSP, +}; + +#define MASK_SHLL_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP GPR-Based Shift Sub-class */ + OPC_SHLL_QB = (0x00 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_QB = (0x02 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLL_PH = (0x08 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_PH = (0x0A << 6) | OPC_SHLL_QB_DSP, + OPC_SHLL_S_PH = (0x0C << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_S_PH = (0x0E << 6) | OPC_SHLL_QB_DSP, + OPC_SHLL_S_W = (0x14 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_S_W = (0x16 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRL_QB = (0x01 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRLV_QB = (0x03 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRL_PH = (0x19 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRLV_PH = (0x1B << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_QB = (0x04 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_R_QB = (0x05 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_QB = (0x06 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_R_QB = (0x07 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_PH = (0x09 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_PH = (0x0B << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_R_PH = (0x0D << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_R_PH = (0x0F << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_R_W = (0x15 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_R_W = (0x17 << 6) | OPC_SHLL_QB_DSP, +}; + +#define MASK_DPA_W_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Multiply Sub-class insns */ + OPC_DPAU_H_QBL = (0x03 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAU_H_QBR = (0x07 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSU_H_QBL = (0x0B << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSU_H_QBR = (0x0F << 6) | OPC_DPA_W_PH_DSP, + OPC_DPA_W_PH = (0x00 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAX_W_PH = (0x08 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQ_S_W_PH = (0x04 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQX_S_W_PH = (0x18 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQX_SA_W_PH = (0x1A << 6) | OPC_DPA_W_PH_DSP, + OPC_DPS_W_PH = (0x01 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSX_W_PH = (0x09 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQ_S_W_PH = (0x05 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQX_S_W_PH = (0x19 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQX_SA_W_PH = (0x1B << 6) | OPC_DPA_W_PH_DSP, + OPC_MULSAQ_S_W_PH = (0x06 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQ_SA_L_W = (0x0C << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQ_SA_L_W = (0x0D << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_S_W_PHL = (0x14 << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_S_W_PHR = (0x16 << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_SA_W_PHL = (0x10 << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_SA_W_PHR = (0x12 << 6) | OPC_DPA_W_PH_DSP, + OPC_MULSA_W_PH = (0x02 << 6) | OPC_DPA_W_PH_DSP, +}; + +#define MASK_INSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Bit/Manipulation Sub-class */ + OPC_INSV = (0x00 << 6) | OPC_INSV_DSP, +}; + +#define MASK_APPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Append Sub-class */ + OPC_APPEND = (0x00 << 6) | OPC_APPEND_DSP, + OPC_PREPEND = (0x01 << 6) | OPC_APPEND_DSP, + OPC_BALIGN = (0x10 << 6) | OPC_APPEND_DSP, +}; + +#define MASK_EXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Accumulator and DSPControl Access Sub-class */ + OPC_EXTR_W = (0x00 << 6) | OPC_EXTR_W_DSP, + OPC_EXTR_R_W = (0x04 << 6) | OPC_EXTR_W_DSP, + OPC_EXTR_RS_W = (0x06 << 6) | OPC_EXTR_W_DSP, + OPC_EXTR_S_H = (0x0E << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_S_H = (0x0F << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_W = (0x01 << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_R_W = (0x05 << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_RS_W = (0x07 << 6) | OPC_EXTR_W_DSP, + OPC_EXTP = (0x02 << 6) | OPC_EXTR_W_DSP, + OPC_EXTPV = (0x03 << 6) | OPC_EXTR_W_DSP, + OPC_EXTPDP = (0x0A << 6) | OPC_EXTR_W_DSP, + OPC_EXTPDPV = (0x0B << 6) | OPC_EXTR_W_DSP, + OPC_SHILO = (0x1A << 6) | OPC_EXTR_W_DSP, + OPC_SHILOV = (0x1B << 6) | OPC_EXTR_W_DSP, + OPC_MTHLIP = (0x1F << 6) | OPC_EXTR_W_DSP, + OPC_WRDSP = (0x13 << 6) | OPC_EXTR_W_DSP, + OPC_RDDSP = (0x12 << 6) | OPC_EXTR_W_DSP, +}; + +#define MASK_ABSQ_S_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_PRECEQ_L_PWL = (0x14 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_L_PWR = (0x15 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHL = (0x0C << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHR = (0x0D << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHLA = (0x0E << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHRA = (0x0F << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBL = (0x04 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBR = (0x05 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBLA = (0x06 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBRA = (0x07 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBL = (0x1C << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBR = (0x1D << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBLA = (0x1E << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBRA = (0x1F << 6) | OPC_ABSQ_S_QH_DSP, + OPC_ABSQ_S_OB = (0x01 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_ABSQ_S_PW = (0x11 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_ABSQ_S_QH = (0x09 << 6) | OPC_ABSQ_S_QH_DSP, + /* DSP Bit/Manipulation Sub-class */ + OPC_REPL_OB = (0x02 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPL_PW = (0x12 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPL_QH = (0x0A << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPLV_OB = (0x03 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPLV_PW = (0x13 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPLV_QH = (0x0B << 6) | OPC_ABSQ_S_QH_DSP, +}; + +#define MASK_ADDU_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Multiply Sub-class insns */ + OPC_MULEQ_S_PW_QHL = (0x1C << 6) | OPC_ADDU_OB_DSP, + OPC_MULEQ_S_PW_QHR = (0x1D << 6) | OPC_ADDU_OB_DSP, + OPC_MULEU_S_QH_OBL = (0x06 << 6) | OPC_ADDU_OB_DSP, + OPC_MULEU_S_QH_OBR = (0x07 << 6) | OPC_ADDU_OB_DSP, + OPC_MULQ_RS_QH = (0x1F << 6) | OPC_ADDU_OB_DSP, + /* MIPS DSP Arithmetic Sub-class */ + OPC_RADDU_L_OB = (0x14 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_PW = (0x13 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_S_PW = (0x17 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_QH = (0x0B << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_S_QH = (0x0F << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_OB = (0x01 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_S_OB = (0x05 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_QH = (0x09 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_S_QH = (0x0D << 6) | OPC_ADDU_OB_DSP, + OPC_SUBUH_OB = (0x19 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBUH_R_OB = (0x1B << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_PW = (0x12 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_S_PW = (0x16 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_QH = (0x0A << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_S_QH = (0x0E << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_OB = (0x00 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_S_OB = (0x04 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_QH = (0x08 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_S_QH = (0x0C << 6) | OPC_ADDU_OB_DSP, + OPC_ADDUH_OB = (0x18 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDUH_R_OB = (0x1A << 6) | OPC_ADDU_OB_DSP, +}; + +#define MASK_CMPU_EQ_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Compare-Pick Sub-class */ + OPC_CMP_EQ_PW = (0x10 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LT_PW = (0x11 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LE_PW = (0x12 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_EQ_QH = (0x08 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LT_QH = (0x09 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LE_QH = (0x0A << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGDU_EQ_OB = (0x18 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGDU_LT_OB = (0x19 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGDU_LE_OB = (0x1A << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGU_EQ_OB = (0x04 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGU_LT_OB = (0x05 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGU_LE_OB = (0x06 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPU_EQ_OB = (0x00 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPU_LT_OB = (0x01 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPU_LE_OB = (0x02 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PACKRL_PW = (0x0E << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PICK_OB = (0x03 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PICK_PW = (0x13 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PICK_QH = (0x0B << 6) | OPC_CMPU_EQ_OB_DSP, + /* MIPS DSP Arithmetic Sub-class */ + OPC_PRECR_OB_QH = (0x0D << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECR_SRA_QH_PW = (0x1E << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECR_SRA_R_QH_PW = (0x1F << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_OB_QH = (0x0C << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_PW_L = (0x1C << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_QH_PW = (0x14 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_RS_QH_PW = (0x15 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQU_S_OB_QH = (0x0F << 6) | OPC_CMPU_EQ_OB_DSP, +}; + +#define MASK_DAPPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Append Sub-class */ + OPC_DAPPEND = (0x00 << 6) | OPC_DAPPEND_DSP, + OPC_PREPENDD = (0x03 << 6) | OPC_DAPPEND_DSP, + OPC_PREPENDW = (0x01 << 6) | OPC_DAPPEND_DSP, + OPC_DBALIGN = (0x10 << 6) | OPC_DAPPEND_DSP, +}; + +#define MASK_DEXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Accumulator and DSPControl Access Sub-class */ + OPC_DMTHLIP = (0x1F << 6) | OPC_DEXTR_W_DSP, + OPC_DSHILO = (0x1A << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTP = (0x02 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTPDP = (0x0A << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTPDPV = (0x0B << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTPV = (0x03 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_L = (0x10 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_R_L = (0x14 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_RS_L = (0x16 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_W = (0x00 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_R_W = (0x04 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_RS_W = (0x06 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_S_H = (0x0E << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_L = (0x11 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_R_L = (0x15 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_RS_L = (0x17 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_S_H = (0x0F << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_W = (0x01 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_R_W = (0x05 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_RS_W = (0x07 << 6) | OPC_DEXTR_W_DSP, + OPC_DSHILOV = (0x1B << 6) | OPC_DEXTR_W_DSP, +}; + +#define MASK_DINSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Bit/Manipulation Sub-class */ + OPC_DINSV = (0x00 << 6) | OPC_DINSV_DSP, +}; + +#define MASK_DPAQ_W_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Multiply Sub-class insns */ + OPC_DMADD = (0x19 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DMADDU = (0x1D << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DMSUB = (0x1B << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DMSUBU = (0x1F << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPA_W_QH = (0x00 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAQ_S_W_QH = (0x04 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAQ_SA_L_PW = (0x0C << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAU_H_OBL = (0x03 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAU_H_OBR = (0x07 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPS_W_QH = (0x01 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSQ_S_W_QH = (0x05 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSQ_SA_L_PW = (0x0D << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSU_H_OBL = (0x0B << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSU_H_OBR = (0x0F << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_L_PWL = (0x1C << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_L_PWR = (0x1E << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHLL = (0x14 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHLL = (0x10 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHLR = (0x15 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHLR = (0x11 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHRL = (0x16 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHRL = (0x12 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHRR = (0x17 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHRR = (0x13 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MULSAQ_S_L_PW = (0x0E << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MULSAQ_S_W_QH = (0x06 << 6) | OPC_DPAQ_W_QH_DSP, +}; + +#define MASK_SHLL_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP GPR-Based Shift Sub-class */ + OPC_SHLL_PW = (0x10 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_S_PW = (0x14 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_OB = (0x02 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_PW = (0x12 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_S_PW = (0x16 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_QH = (0x0A << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_S_QH = (0x0E << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_PW = (0x11 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_R_PW = (0x15 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_OB = (0x06 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_R_OB = (0x07 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_PW = (0x13 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_R_PW = (0x17 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_QH = (0x0B << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_R_QH = (0x0F << 6) | OPC_SHLL_OB_DSP, + OPC_SHRLV_OB = (0x03 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRLV_QH = (0x1B << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_OB = (0x00 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_QH = (0x08 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_S_QH = (0x0C << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_OB = (0x04 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_R_OB = (0x05 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_QH = (0x09 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_R_QH = (0x0D << 6) | OPC_SHLL_OB_DSP, + OPC_SHRL_OB = (0x01 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRL_QH = (0x19 << 6) | OPC_SHLL_OB_DSP, +}; + +/* Coprocessor 0 (rs field) */ +#define MASK_CP0(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21))) + +enum { + OPC_MFC0 = (0x00 << 21) | OPC_CP0, + OPC_DMFC0 = (0x01 << 21) | OPC_CP0, + OPC_MFHC0 = (0x02 << 21) | OPC_CP0, + OPC_MTC0 = (0x04 << 21) | OPC_CP0, + OPC_DMTC0 = (0x05 << 21) | OPC_CP0, + OPC_MTHC0 = (0x06 << 21) | OPC_CP0, + OPC_MFTR = (0x08 << 21) | OPC_CP0, + OPC_RDPGPR = (0x0A << 21) | OPC_CP0, + OPC_MFMC0 = (0x0B << 21) | OPC_CP0, + OPC_MTTR = (0x0C << 21) | OPC_CP0, + OPC_WRPGPR = (0x0E << 21) | OPC_CP0, + OPC_C0 = (0x10 << 21) | OPC_CP0, + OPC_C0_1 = (0x11 << 21) | OPC_CP0, + OPC_C0_2 = (0x12 << 21) | OPC_CP0, + OPC_C0_3 = (0x13 << 21) | OPC_CP0, + OPC_C0_4 = (0x14 << 21) | OPC_CP0, + OPC_C0_5 = (0x15 << 21) | OPC_CP0, + OPC_C0_6 = (0x16 << 21) | OPC_CP0, + OPC_C0_7 = (0x17 << 21) | OPC_CP0, + OPC_C0_8 = (0x18 << 21) | OPC_CP0, + OPC_C0_9 = (0x19 << 21) | OPC_CP0, + OPC_C0_A = (0x1A << 21) | OPC_CP0, + OPC_C0_B = (0x1B << 21) | OPC_CP0, + OPC_C0_C = (0x1C << 21) | OPC_CP0, + OPC_C0_D = (0x1D << 21) | OPC_CP0, + OPC_C0_E = (0x1E << 21) | OPC_CP0, + OPC_C0_F = (0x1F << 21) | OPC_CP0, +}; + +/* MFMC0 opcodes */ +#define MASK_MFMC0(op) (MASK_CP0(op) | (op & 0xFFFF)) + +enum { + OPC_DMT = 0x01 | (0 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, + OPC_EMT = 0x01 | (1 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, + OPC_DVPE = 0x01 | (0 << 5) | OPC_MFMC0, + OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0, + OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0, + OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0, + OPC_DVP = 0x04 | (0 << 3) | (1 << 5) | (0 << 11) | OPC_MFMC0, + OPC_EVP = 0x04 | (0 << 3) | (0 << 5) | (0 << 11) | OPC_MFMC0, +}; + +/* Coprocessor 0 (with rs == C0) */ +#define MASK_C0(op) (MASK_CP0(op) | (op & 0x3F)) + +enum { + OPC_TLBR = 0x01 | OPC_C0, + OPC_TLBWI = 0x02 | OPC_C0, + OPC_TLBINV = 0x03 | OPC_C0, + OPC_TLBINVF = 0x04 | OPC_C0, + OPC_TLBWR = 0x06 | OPC_C0, + OPC_TLBP = 0x08 | OPC_C0, + OPC_RFE = 0x10 | OPC_C0, + OPC_ERET = 0x18 | OPC_C0, + OPC_DERET = 0x1F | OPC_C0, + OPC_WAIT = 0x20 | OPC_C0, +}; + +/* Coprocessor 1 (rs field) */ +#define MASK_CP1(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21))) + +/* Values for the fmt field in FP instructions */ +enum { + /* 0 - 15 are reserved */ + FMT_S = 16, /* single fp */ + FMT_D = 17, /* double fp */ + FMT_E = 18, /* extended fp */ + FMT_Q = 19, /* quad fp */ + FMT_W = 20, /* 32-bit fixed */ + FMT_L = 21, /* 64-bit fixed */ + FMT_PS = 22, /* paired single fp */ + /* 23 - 31 are reserved */ +}; + +enum { + OPC_MFC1 = (0x00 << 21) | OPC_CP1, + OPC_DMFC1 = (0x01 << 21) | OPC_CP1, + OPC_CFC1 = (0x02 << 21) | OPC_CP1, + OPC_MFHC1 = (0x03 << 21) | OPC_CP1, + OPC_MTC1 = (0x04 << 21) | OPC_CP1, + OPC_DMTC1 = (0x05 << 21) | OPC_CP1, + OPC_CTC1 = (0x06 << 21) | OPC_CP1, + OPC_MTHC1 = (0x07 << 21) | OPC_CP1, + OPC_BC1 = (0x08 << 21) | OPC_CP1, /* bc */ + OPC_BC1ANY2 = (0x09 << 21) | OPC_CP1, + OPC_BC1ANY4 = (0x0A << 21) | OPC_CP1, + OPC_BZ_V = (0x0B << 21) | OPC_CP1, + OPC_BNZ_V = (0x0F << 21) | OPC_CP1, + OPC_S_FMT = (FMT_S << 21) | OPC_CP1, + OPC_D_FMT = (FMT_D << 21) | OPC_CP1, + OPC_E_FMT = (FMT_E << 21) | OPC_CP1, + OPC_Q_FMT = (FMT_Q << 21) | OPC_CP1, + OPC_W_FMT = (FMT_W << 21) | OPC_CP1, + OPC_L_FMT = (FMT_L << 21) | OPC_CP1, + OPC_PS_FMT = (FMT_PS << 21) | OPC_CP1, + OPC_BC1EQZ = (0x09 << 21) | OPC_CP1, + OPC_BC1NEZ = (0x0D << 21) | OPC_CP1, + OPC_BZ_B = (0x18 << 21) | OPC_CP1, + OPC_BZ_H = (0x19 << 21) | OPC_CP1, + OPC_BZ_W = (0x1A << 21) | OPC_CP1, + OPC_BZ_D = (0x1B << 21) | OPC_CP1, + OPC_BNZ_B = (0x1C << 21) | OPC_CP1, + OPC_BNZ_H = (0x1D << 21) | OPC_CP1, + OPC_BNZ_W = (0x1E << 21) | OPC_CP1, + OPC_BNZ_D = (0x1F << 21) | OPC_CP1, +}; + +#define MASK_CP1_FUNC(op) (MASK_CP1(op) | (op & 0x3F)) +#define MASK_BC1(op) (MASK_CP1(op) | (op & (0x3 << 16))) + +enum { + OPC_BC1F = (0x00 << 16) | OPC_BC1, + OPC_BC1T = (0x01 << 16) | OPC_BC1, + OPC_BC1FL = (0x02 << 16) | OPC_BC1, + OPC_BC1TL = (0x03 << 16) | OPC_BC1, +}; + +enum { + OPC_BC1FANY2 = (0x00 << 16) | OPC_BC1ANY2, + OPC_BC1TANY2 = (0x01 << 16) | OPC_BC1ANY2, +}; + +enum { + OPC_BC1FANY4 = (0x00 << 16) | OPC_BC1ANY4, + OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4, +}; + +#define MASK_CP2(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21))) + +enum { + OPC_MFC2 = (0x00 << 21) | OPC_CP2, + OPC_DMFC2 = (0x01 << 21) | OPC_CP2, + OPC_CFC2 = (0x02 << 21) | OPC_CP2, + OPC_MFHC2 = (0x03 << 21) | OPC_CP2, + OPC_MTC2 = (0x04 << 21) | OPC_CP2, + OPC_DMTC2 = (0x05 << 21) | OPC_CP2, + OPC_CTC2 = (0x06 << 21) | OPC_CP2, + OPC_MTHC2 = (0x07 << 21) | OPC_CP2, + OPC_BC2 = (0x08 << 21) | OPC_CP2, + OPC_BC2EQZ = (0x09 << 21) | OPC_CP2, + OPC_BC2NEZ = (0x0D << 21) | OPC_CP2, +}; + +#define MASK_LMI(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)) | (op & 0x1F)) + +enum { + OPC_PADDSH = (24 << 21) | (0x00) | OPC_CP2, + OPC_PADDUSH = (25 << 21) | (0x00) | OPC_CP2, + OPC_PADDH = (26 << 21) | (0x00) | OPC_CP2, + OPC_PADDW = (27 << 21) | (0x00) | OPC_CP2, + OPC_PADDSB = (28 << 21) | (0x00) | OPC_CP2, + OPC_PADDUSB = (29 << 21) | (0x00) | OPC_CP2, + OPC_PADDB = (30 << 21) | (0x00) | OPC_CP2, + OPC_PADDD = (31 << 21) | (0x00) | OPC_CP2, + + OPC_PSUBSH = (24 << 21) | (0x01) | OPC_CP2, + OPC_PSUBUSH = (25 << 21) | (0x01) | OPC_CP2, + OPC_PSUBH = (26 << 21) | (0x01) | OPC_CP2, + OPC_PSUBW = (27 << 21) | (0x01) | OPC_CP2, + OPC_PSUBSB = (28 << 21) | (0x01) | OPC_CP2, + OPC_PSUBUSB = (29 << 21) | (0x01) | OPC_CP2, + OPC_PSUBB = (30 << 21) | (0x01) | OPC_CP2, + OPC_PSUBD = (31 << 21) | (0x01) | OPC_CP2, + + OPC_PSHUFH = (24 << 21) | (0x02) | OPC_CP2, + OPC_PACKSSWH = (25 << 21) | (0x02) | OPC_CP2, + OPC_PACKSSHB = (26 << 21) | (0x02) | OPC_CP2, + OPC_PACKUSHB = (27 << 21) | (0x02) | OPC_CP2, + OPC_XOR_CP2 = (28 << 21) | (0x02) | OPC_CP2, + OPC_NOR_CP2 = (29 << 21) | (0x02) | OPC_CP2, + OPC_AND_CP2 = (30 << 21) | (0x02) | OPC_CP2, + OPC_PANDN = (31 << 21) | (0x02) | OPC_CP2, + + OPC_PUNPCKLHW = (24 << 21) | (0x03) | OPC_CP2, + OPC_PUNPCKHHW = (25 << 21) | (0x03) | OPC_CP2, + OPC_PUNPCKLBH = (26 << 21) | (0x03) | OPC_CP2, + OPC_PUNPCKHBH = (27 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_0 = (28 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_1 = (29 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_2 = (30 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_3 = (31 << 21) | (0x03) | OPC_CP2, + + OPC_PAVGH = (24 << 21) | (0x08) | OPC_CP2, + OPC_PAVGB = (25 << 21) | (0x08) | OPC_CP2, + OPC_PMAXSH = (26 << 21) | (0x08) | OPC_CP2, + OPC_PMINSH = (27 << 21) | (0x08) | OPC_CP2, + OPC_PMAXUB = (28 << 21) | (0x08) | OPC_CP2, + OPC_PMINUB = (29 << 21) | (0x08) | OPC_CP2, + + OPC_PCMPEQW = (24 << 21) | (0x09) | OPC_CP2, + OPC_PCMPGTW = (25 << 21) | (0x09) | OPC_CP2, + OPC_PCMPEQH = (26 << 21) | (0x09) | OPC_CP2, + OPC_PCMPGTH = (27 << 21) | (0x09) | OPC_CP2, + OPC_PCMPEQB = (28 << 21) | (0x09) | OPC_CP2, + OPC_PCMPGTB = (29 << 21) | (0x09) | OPC_CP2, + + OPC_PSLLW = (24 << 21) | (0x0A) | OPC_CP2, + OPC_PSLLH = (25 << 21) | (0x0A) | OPC_CP2, + OPC_PMULLH = (26 << 21) | (0x0A) | OPC_CP2, + OPC_PMULHH = (27 << 21) | (0x0A) | OPC_CP2, + OPC_PMULUW = (28 << 21) | (0x0A) | OPC_CP2, + OPC_PMULHUH = (29 << 21) | (0x0A) | OPC_CP2, + + OPC_PSRLW = (24 << 21) | (0x0B) | OPC_CP2, + OPC_PSRLH = (25 << 21) | (0x0B) | OPC_CP2, + OPC_PSRAW = (26 << 21) | (0x0B) | OPC_CP2, + OPC_PSRAH = (27 << 21) | (0x0B) | OPC_CP2, + OPC_PUNPCKLWD = (28 << 21) | (0x0B) | OPC_CP2, + OPC_PUNPCKHWD = (29 << 21) | (0x0B) | OPC_CP2, + + OPC_ADDU_CP2 = (24 << 21) | (0x0C) | OPC_CP2, + OPC_OR_CP2 = (25 << 21) | (0x0C) | OPC_CP2, + OPC_ADD_CP2 = (26 << 21) | (0x0C) | OPC_CP2, + OPC_DADD_CP2 = (27 << 21) | (0x0C) | OPC_CP2, + OPC_SEQU_CP2 = (28 << 21) | (0x0C) | OPC_CP2, + OPC_SEQ_CP2 = (29 << 21) | (0x0C) | OPC_CP2, + + OPC_SUBU_CP2 = (24 << 21) | (0x0D) | OPC_CP2, + OPC_PASUBUB = (25 << 21) | (0x0D) | OPC_CP2, + OPC_SUB_CP2 = (26 << 21) | (0x0D) | OPC_CP2, + OPC_DSUB_CP2 = (27 << 21) | (0x0D) | OPC_CP2, + OPC_SLTU_CP2 = (28 << 21) | (0x0D) | OPC_CP2, + OPC_SLT_CP2 = (29 << 21) | (0x0D) | OPC_CP2, + + OPC_SLL_CP2 = (24 << 21) | (0x0E) | OPC_CP2, + OPC_DSLL_CP2 = (25 << 21) | (0x0E) | OPC_CP2, + OPC_PEXTRH = (26 << 21) | (0x0E) | OPC_CP2, + OPC_PMADDHW = (27 << 21) | (0x0E) | OPC_CP2, + OPC_SLEU_CP2 = (28 << 21) | (0x0E) | OPC_CP2, + OPC_SLE_CP2 = (29 << 21) | (0x0E) | OPC_CP2, + + OPC_SRL_CP2 = (24 << 21) | (0x0F) | OPC_CP2, + OPC_DSRL_CP2 = (25 << 21) | (0x0F) | OPC_CP2, + OPC_SRA_CP2 = (26 << 21) | (0x0F) | OPC_CP2, + OPC_DSRA_CP2 = (27 << 21) | (0x0F) | OPC_CP2, + OPC_BIADD = (28 << 21) | (0x0F) | OPC_CP2, + OPC_PMOVMSKB = (29 << 21) | (0x0F) | OPC_CP2, +}; + + +#define MASK_CP3(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) + +enum { + OPC_LWXC1 = 0x00 | OPC_CP3, + OPC_LDXC1 = 0x01 | OPC_CP3, + OPC_LUXC1 = 0x05 | OPC_CP3, + OPC_SWXC1 = 0x08 | OPC_CP3, + OPC_SDXC1 = 0x09 | OPC_CP3, + OPC_SUXC1 = 0x0D | OPC_CP3, + OPC_PREFX = 0x0F | OPC_CP3, + OPC_ALNV_PS = 0x1E | OPC_CP3, + OPC_MADD_S = 0x20 | OPC_CP3, + OPC_MADD_D = 0x21 | OPC_CP3, + OPC_MADD_PS = 0x26 | OPC_CP3, + OPC_MSUB_S = 0x28 | OPC_CP3, + OPC_MSUB_D = 0x29 | OPC_CP3, + OPC_MSUB_PS = 0x2E | OPC_CP3, + OPC_NMADD_S = 0x30 | OPC_CP3, + OPC_NMADD_D = 0x31 | OPC_CP3, + OPC_NMADD_PS = 0x36 | OPC_CP3, + OPC_NMSUB_S = 0x38 | OPC_CP3, + OPC_NMSUB_D = 0x39 | OPC_CP3, + OPC_NMSUB_PS = 0x3E | OPC_CP3, +}; + +/* MSA Opcodes */ +#define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) +enum { + OPC_MSA_I8_00 = 0x00 | OPC_MSA, + OPC_MSA_I8_01 = 0x01 | OPC_MSA, + OPC_MSA_I8_02 = 0x02 | OPC_MSA, + OPC_MSA_I5_06 = 0x06 | OPC_MSA, + OPC_MSA_I5_07 = 0x07 | OPC_MSA, + OPC_MSA_BIT_09 = 0x09 | OPC_MSA, + OPC_MSA_BIT_0A = 0x0A | OPC_MSA, + OPC_MSA_3R_0D = 0x0D | OPC_MSA, + OPC_MSA_3R_0E = 0x0E | OPC_MSA, + OPC_MSA_3R_0F = 0x0F | OPC_MSA, + OPC_MSA_3R_10 = 0x10 | OPC_MSA, + OPC_MSA_3R_11 = 0x11 | OPC_MSA, + OPC_MSA_3R_12 = 0x12 | OPC_MSA, + OPC_MSA_3R_13 = 0x13 | OPC_MSA, + OPC_MSA_3R_14 = 0x14 | OPC_MSA, + OPC_MSA_3R_15 = 0x15 | OPC_MSA, + OPC_MSA_ELM = 0x19 | OPC_MSA, + OPC_MSA_3RF_1A = 0x1A | OPC_MSA, + OPC_MSA_3RF_1B = 0x1B | OPC_MSA, + OPC_MSA_3RF_1C = 0x1C | OPC_MSA, + OPC_MSA_VEC = 0x1E | OPC_MSA, + + /* MI10 instruction */ + OPC_LD_B = (0x20) | OPC_MSA, + OPC_LD_H = (0x21) | OPC_MSA, + OPC_LD_W = (0x22) | OPC_MSA, + OPC_LD_D = (0x23) | OPC_MSA, + OPC_ST_B = (0x24) | OPC_MSA, + OPC_ST_H = (0x25) | OPC_MSA, + OPC_ST_W = (0x26) | OPC_MSA, + OPC_ST_D = (0x27) | OPC_MSA, +}; + +enum { + /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */ + OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06, + OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07, + OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06, + OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06, + OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07, + OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06, + OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07, + OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06, + OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07, + OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06, + OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07, + OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07, + + /* I8 instruction */ + OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00, + OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01, + OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02, + OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00, + OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01, + OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02, + OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00, + OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01, + OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02, + OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00, + + /* VEC/2R/2RF instruction */ + OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC, + OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC, + OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC, + OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC, + OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC, + OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC, + OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC, + + OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC, + OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC, + + /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */ + OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R, + OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R, + OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R, + OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R, + + /* 2RF instruction df(bit 16) = _w, _d */ + OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF, + OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF, + OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF, + OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF, + OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF, + OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF, + OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF, + OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF, + OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF, + OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF, + OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF, + OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF, + OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF, + OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF, + OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF, + OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF, + + /* 3R instruction df(bits 22..21) = _b, _h, _w, d */ + OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D, + OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E, + OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F, + OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10, + OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11, + OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12, + OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13, + OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14, + OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15, + OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D, + OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E, + OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10, + OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11, + OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12, + OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13, + OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14, + OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15, + OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D, + OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E, + OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F, + OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10, + OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11, + OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12, + OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13, + OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14, + OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15, + OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D, + OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E, + OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F, + OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10, + OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11, + OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13, + OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14, + OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D, + OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E, + OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F, + OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10, + OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11, + OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12, + OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13, + OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14, + OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15, + OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D, + OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E, + OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F, + OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10, + OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11, + OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12, + OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13, + OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14, + OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15, + OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D, + OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E, + OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10, + OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12, + OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14, + OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15, + OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D, + OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E, + OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10, + OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12, + OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14, + OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15, + + /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */ + OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM, + OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM, + OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM, + OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM, + + /* 3RF instruction _df(bit 21) = _w, _d */ + OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A, + OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B, + OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A, + OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B, + OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C, + OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A, + OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B, + OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C, + OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A, + OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B, + OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C, + OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A, + OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B, + OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C, + OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A, + OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B, + OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C, + OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A, + OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C, + OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A, + OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B, + OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A, + OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B, + OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A, + OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C, + OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A, + OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B, + OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C, + OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A, + OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C, + OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A, + OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B, + OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C, + OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A, + OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B, + OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C, + OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A, + OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B, + OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C, + OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A, + OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B, + + /* BIT instruction df(bits 22..16) = _B _H _W _D */ + OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09, + OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A, + OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09, + OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A, + OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09, + OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A, + OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09, + OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A, + OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09, + OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09, + OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09, + OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09, +}; + + +/* + * + * AN OVERVIEW OF MXU EXTENSION INSTRUCTION SET + * ============================================ + * + * + * MXU (full name: MIPS eXtension/enhanced Unit) is a SIMD extension of MIPS32 + * instructions set. It is designed to fit the needs of signal, graphical and + * video processing applications. MXU instruction set is used in Xburst family + * of microprocessors by Ingenic. + * + * MXU unit contains 17 registers called X0-X16. X0 is always zero, and X16 is + * the control register. + * + * + * The notation used in MXU assembler mnemonics + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Register operands: + * + * XRa, XRb, XRc, XRd - MXU registers + * Rb, Rc, Rd, Rs, Rt - general purpose MIPS registers + * + * Non-register operands: + * + * aptn1 - 1-bit accumulate add/subtract pattern + * aptn2 - 2-bit accumulate add/subtract pattern + * eptn2 - 2-bit execute add/subtract pattern + * optn2 - 2-bit operand pattern + * optn3 - 3-bit operand pattern + * sft4 - 4-bit shift amount + * strd2 - 2-bit stride amount + * + * Prefixes: + * + * Level of parallelism: Operand size: + * S - single operation at a time 32 - word + * D - two operations in parallel 16 - half word + * Q - four operations in parallel 8 - byte + * + * Operations: + * + * ADD - Add or subtract + * ADDC - Add with carry-in + * ACC - Accumulate + * ASUM - Sum together then accumulate (add or subtract) + * ASUMC - Sum together then accumulate (add or subtract) with carry-in + * AVG - Average between 2 operands + * ABD - Absolute difference + * ALN - Align data + * AND - Logical bitwise 'and' operation + * CPS - Copy sign + * EXTR - Extract bits + * I2M - Move from GPR register to MXU register + * LDD - Load data from memory to XRF + * LDI - Load data from memory to XRF (and increase the address base) + * LUI - Load unsigned immediate + * MUL - Multiply + * MULU - Unsigned multiply + * MADD - 64-bit operand add 32x32 product + * MSUB - 64-bit operand subtract 32x32 product + * MAC - Multiply and accumulate (add or subtract) + * MAD - Multiply and add or subtract + * MAX - Maximum between 2 operands + * MIN - Minimum between 2 operands + * M2I - Move from MXU register to GPR register + * MOVZ - Move if zero + * MOVN - Move if non-zero + * NOR - Logical bitwise 'nor' operation + * OR - Logical bitwise 'or' operation + * STD - Store data from XRF to memory + * SDI - Store data from XRF to memory (and increase the address base) + * SLT - Set of less than comparison + * SAD - Sum of absolute differences + * SLL - Logical shift left + * SLR - Logical shift right + * SAR - Arithmetic shift right + * SAT - Saturation + * SFL - Shuffle + * SCOP - Calculate x's scope (-1, means x<0; 0, means x==0; 1, means x>0) + * XOR - Logical bitwise 'exclusive or' operation + * + * Suffixes: + * + * E - Expand results + * F - Fixed point multiplication + * L - Low part result + * R - Doing rounding + * V - Variable instead of immediate + * W - Combine above L and V + * + * + * The list of MXU instructions grouped by functionality + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Load/Store instructions Multiplication instructions + * ----------------------- --------------------------- + * + * S32LDD XRa, Rb, s12 S32MADD XRa, XRd, Rs, Rt + * S32STD XRa, Rb, s12 S32MADDU XRa, XRd, Rs, Rt + * S32LDDV XRa, Rb, rc, strd2 S32MSUB XRa, XRd, Rs, Rt + * S32STDV XRa, Rb, rc, strd2 S32MSUBU XRa, XRd, Rs, Rt + * S32LDI XRa, Rb, s12 S32MUL XRa, XRd, Rs, Rt + * S32SDI XRa, Rb, s12 S32MULU XRa, XRd, Rs, Rt + * S32LDIV XRa, Rb, rc, strd2 D16MUL XRa, XRb, XRc, XRd, optn2 + * S32SDIV XRa, Rb, rc, strd2 D16MULE XRa, XRb, XRc, optn2 + * S32LDDR XRa, Rb, s12 D16MULF XRa, XRb, XRc, optn2 + * S32STDR XRa, Rb, s12 D16MAC XRa, XRb, XRc, XRd, aptn2, optn2 + * S32LDDVR XRa, Rb, rc, strd2 D16MACE XRa, XRb, XRc, XRd, aptn2, optn2 + * S32STDVR XRa, Rb, rc, strd2 D16MACF XRa, XRb, XRc, XRd, aptn2, optn2 + * S32LDIR XRa, Rb, s12 D16MADL XRa, XRb, XRc, XRd, aptn2, optn2 + * S32SDIR XRa, Rb, s12 S16MAD XRa, XRb, XRc, XRd, aptn1, optn2 + * S32LDIVR XRa, Rb, rc, strd2 Q8MUL XRa, XRb, XRc, XRd + * S32SDIVR XRa, Rb, rc, strd2 Q8MULSU XRa, XRb, XRc, XRd + * S16LDD XRa, Rb, s10, eptn2 Q8MAC XRa, XRb, XRc, XRd, aptn2 + * S16STD XRa, Rb, s10, eptn2 Q8MACSU XRa, XRb, XRc, XRd, aptn2 + * S16LDI XRa, Rb, s10, eptn2 Q8MADL XRa, XRb, XRc, XRd, aptn2 + * S16SDI XRa, Rb, s10, eptn2 + * S8LDD XRa, Rb, s8, eptn3 + * S8STD XRa, Rb, s8, eptn3 Addition and subtraction instructions + * S8LDI XRa, Rb, s8, eptn3 ------------------------------------- + * S8SDI XRa, Rb, s8, eptn3 + * LXW Rd, Rs, Rt, strd2 D32ADD XRa, XRb, XRc, XRd, eptn2 + * LXH Rd, Rs, Rt, strd2 D32ADDC XRa, XRb, XRc, XRd + * LXHU Rd, Rs, Rt, strd2 D32ACC XRa, XRb, XRc, XRd, eptn2 + * LXB Rd, Rs, Rt, strd2 D32ACCM XRa, XRb, XRc, XRd, eptn2 + * LXBU Rd, Rs, Rt, strd2 D32ASUM XRa, XRb, XRc, XRd, eptn2 + * S32CPS XRa, XRb, XRc + * Q16ADD XRa, XRb, XRc, XRd, eptn2, optn2 + * Comparison instructions Q16ACC XRa, XRb, XRc, XRd, eptn2 + * ----------------------- Q16ACCM XRa, XRb, XRc, XRd, eptn2 + * D16ASUM XRa, XRb, XRc, XRd, eptn2 + * S32MAX XRa, XRb, XRc D16CPS XRa, XRb, + * S32MIN XRa, XRb, XRc D16AVG XRa, XRb, XRc + * S32SLT XRa, XRb, XRc D16AVGR XRa, XRb, XRc + * S32MOVZ XRa, XRb, XRc Q8ADD XRa, XRb, XRc, eptn2 + * S32MOVN XRa, XRb, XRc Q8ADDE XRa, XRb, XRc, XRd, eptn2 + * D16MAX XRa, XRb, XRc Q8ACCE XRa, XRb, XRc, XRd, eptn2 + * D16MIN XRa, XRb, XRc Q8ABD XRa, XRb, XRc + * D16SLT XRa, XRb, XRc Q8SAD XRa, XRb, XRc, XRd + * D16MOVZ XRa, XRb, XRc Q8AVG XRa, XRb, XRc + * D16MOVN XRa, XRb, XRc Q8AVGR XRa, XRb, XRc + * Q8MAX XRa, XRb, XRc D8SUM XRa, XRb, XRc, XRd + * Q8MIN XRa, XRb, XRc D8SUMC XRa, XRb, XRc, XRd + * Q8SLT XRa, XRb, XRc + * Q8SLTU XRa, XRb, XRc + * Q8MOVZ XRa, XRb, XRc Shift instructions + * Q8MOVN XRa, XRb, XRc ------------------ + * + * D32SLL XRa, XRb, XRc, XRd, sft4 + * Bitwise instructions D32SLR XRa, XRb, XRc, XRd, sft4 + * -------------------- D32SAR XRa, XRb, XRc, XRd, sft4 + * D32SARL XRa, XRb, XRc, sft4 + * S32NOR XRa, XRb, XRc D32SLLV XRa, XRb, Rb + * S32AND XRa, XRb, XRc D32SLRV XRa, XRb, Rb + * S32XOR XRa, XRb, XRc D32SARV XRa, XRb, Rb + * S32OR XRa, XRb, XRc D32SARW XRa, XRb, XRc, Rb + * Q16SLL XRa, XRb, XRc, XRd, sft4 + * Q16SLR XRa, XRb, XRc, XRd, sft4 + * Miscellaneous instructions Q16SAR XRa, XRb, XRc, XRd, sft4 + * ------------------------- Q16SLLV XRa, XRb, Rb + * Q16SLRV XRa, XRb, Rb + * S32SFL XRa, XRb, XRc, XRd, optn2 Q16SARV XRa, XRb, Rb + * S32ALN XRa, XRb, XRc, Rb + * S32ALNI XRa, XRb, XRc, s3 + * S32LUI XRa, s8, optn3 Move instructions + * S32EXTR XRa, XRb, Rb, bits5 ----------------- + * S32EXTRV XRa, XRb, Rs, Rt + * Q16SCOP XRa, XRb, XRc, XRd S32M2I XRa, Rb + * Q16SAT XRa, XRb, XRc S32I2M XRa, Rb + * + * + * The opcode organization of MXU instructions + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The bits 31..26 of all MXU instructions are equal to 0x1C (also referred + * as opcode SPECIAL2 in the base MIPS ISA). The organization and meaning of + * other bits up to the instruction level is as follows: + * + * bits + * 05..00 + * + * |- 000000 - OPC_MXU_S32MADD + * |- 000001 - OPC_MXU_S32MADDU + * |- 000010 - (non-MXU OPC_MUL) + * | + * | 20..18 + * |- 000011 - OPC_MXU__POOL00 --- 000 - OPC_MXU_S32MAX + * | |- 001 - OPC_MXU_S32MIN + * | |- 010 - OPC_MXU_D16MAX + * | |- 011 - OPC_MXU_D16MIN + * | |- 100 - OPC_MXU_Q8MAX + * | |- 101 - OPC_MXU_Q8MIN + * | |- 110 - OPC_MXU_Q8SLT + * | |- 111 - OPC_MXU_Q8SLTU + * |- 000100 - OPC_MXU_S32MSUB + * |- 000101 - OPC_MXU_S32MSUBU 20..18 + * |- 000110 - OPC_MXU__POOL01 --- 000 - OPC_MXU_S32SLT + * | |- 001 - OPC_MXU_D16SLT + * | |- 010 - OPC_MXU_D16AVG + * | |- 011 - OPC_MXU_D16AVGR + * | |- 100 - OPC_MXU_Q8AVG + * | |- 101 - OPC_MXU_Q8AVGR + * | |- 111 - OPC_MXU_Q8ADD + * | + * | 20..18 + * |- 000111 - OPC_MXU__POOL02 --- 000 - OPC_MXU_S32CPS + * | |- 010 - OPC_MXU_D16CPS + * | |- 100 - OPC_MXU_Q8ABD + * | |- 110 - OPC_MXU_Q16SAT + * |- 001000 - OPC_MXU_D16MUL + * | 25..24 + * |- 001001 - OPC_MXU__POOL03 --- 00 - OPC_MXU_D16MULF + * | |- 01 - OPC_MXU_D16MULE + * |- 001010 - OPC_MXU_D16MAC + * |- 001011 - OPC_MXU_D16MACF + * |- 001100 - OPC_MXU_D16MADL + * |- 001101 - OPC_MXU_S16MAD + * |- 001110 - OPC_MXU_Q16ADD + * |- 001111 - OPC_MXU_D16MACE 23 + * | |- 0 - OPC_MXU_S32LDD + * |- 010000 - OPC_MXU__POOL04 --- 1 - OPC_MXU_S32LDDR + * | + * | 23 + * |- 010001 - OPC_MXU__POOL05 --- 0 - OPC_MXU_S32STD + * | |- 1 - OPC_MXU_S32STDR + * | + * | 13..10 + * |- 010010 - OPC_MXU__POOL06 --- 0000 - OPC_MXU_S32LDDV + * | |- 0001 - OPC_MXU_S32LDDVR + * | + * | 13..10 + * |- 010011 - OPC_MXU__POOL07 --- 0000 - OPC_MXU_S32STDV + * | |- 0001 - OPC_MXU_S32STDVR + * | + * | 23 + * |- 010100 - OPC_MXU__POOL08 --- 0 - OPC_MXU_S32LDI + * | |- 1 - OPC_MXU_S32LDIR + * | + * | 23 + * |- 010101 - OPC_MXU__POOL09 --- 0 - OPC_MXU_S32SDI + * | |- 1 - OPC_MXU_S32SDIR + * | + * | 13..10 + * |- 010110 - OPC_MXU__POOL10 --- 0000 - OPC_MXU_S32LDIV + * | |- 0001 - OPC_MXU_S32LDIVR + * | + * | 13..10 + * |- 010111 - OPC_MXU__POOL11 --- 0000 - OPC_MXU_S32SDIV + * | |- 0001 - OPC_MXU_S32SDIVR + * |- 011000 - OPC_MXU_D32ADD + * | 23..22 + * MXU |- 011001 - OPC_MXU__POOL12 --- 00 - OPC_MXU_D32ACC + * opcodes -| |- 01 - OPC_MXU_D32ACCM + * | |- 10 - OPC_MXU_D32ASUM + * |- 011010 - + * | 23..22 + * |- 011011 - OPC_MXU__POOL13 --- 00 - OPC_MXU_Q16ACC + * | |- 01 - OPC_MXU_Q16ACCM + * | |- 10 - OPC_MXU_Q16ASUM + * | + * | 23..22 + * |- 011100 - OPC_MXU__POOL14 --- 00 - OPC_MXU_Q8ADDE + * | |- 01 - OPC_MXU_D8SUM + * |- 011101 - OPC_MXU_Q8ACCE |- 10 - OPC_MXU_D8SUMC + * |- 011110 - + * |- 011111 - + * |- 100000 - (overlaps with CLZ) + * |- 100001 - (overlaps with CLO) + * |- 100010 - OPC_MXU_S8LDD + * |- 100011 - OPC_MXU_S8STD 15..14 + * |- 100100 - OPC_MXU_S8LDI |- 00 - OPC_MXU_S32MUL + * |- 100101 - OPC_MXU_S8SDI |- 00 - OPC_MXU_S32MULU + * | |- 00 - OPC_MXU_S32EXTR + * |- 100110 - OPC_MXU__POOL15 --- 00 - OPC_MXU_S32EXTRV + * | + * | 20..18 + * |- 100111 - OPC_MXU__POOL16 --- 000 - OPC_MXU_D32SARW + * | |- 001 - OPC_MXU_S32ALN + * | |- 010 - OPC_MXU_S32ALNI + * | |- 011 - OPC_MXU_S32LUI + * | |- 100 - OPC_MXU_S32NOR + * | |- 101 - OPC_MXU_S32AND + * | |- 110 - OPC_MXU_S32OR + * | |- 111 - OPC_MXU_S32XOR + * | + * | 7..5 + * |- 101000 - OPC_MXU__POOL17 --- 000 - OPC_MXU_LXB + * | |- 001 - OPC_MXU_LXH + * |- 101001 - |- 011 - OPC_MXU_LXW + * |- 101010 - OPC_MXU_S16LDD |- 100 - OPC_MXU_LXBU + * |- 101011 - OPC_MXU_S16STD |- 101 - OPC_MXU_LXHU + * |- 101100 - OPC_MXU_S16LDI + * |- 101101 - OPC_MXU_S16SDI + * |- 101110 - OPC_MXU_S32M2I + * |- 101111 - OPC_MXU_S32I2M + * |- 110000 - OPC_MXU_D32SLL + * |- 110001 - OPC_MXU_D32SLR 20..18 + * |- 110010 - OPC_MXU_D32SARL |- 000 - OPC_MXU_D32SLLV + * |- 110011 - OPC_MXU_D32SAR |- 001 - OPC_MXU_D32SLRV + * |- 110100 - OPC_MXU_Q16SLL |- 010 - OPC_MXU_D32SARV + * |- 110101 - OPC_MXU_Q16SLR |- 011 - OPC_MXU_Q16SLLV + * | |- 100 - OPC_MXU_Q16SLRV + * |- 110110 - OPC_MXU__POOL18 --- 101 - OPC_MXU_Q16SARV + * | + * |- 110111 - OPC_MXU_Q16SAR + * | 23..22 + * |- 111000 - OPC_MXU__POOL19 --- 00 - OPC_MXU_Q8MUL + * | |- 01 - OPC_MXU_Q8MULSU + * | + * | 20..18 + * |- 111001 - OPC_MXU__POOL20 --- 000 - OPC_MXU_Q8MOVZ + * | |- 001 - OPC_MXU_Q8MOVN + * | |- 010 - OPC_MXU_D16MOVZ + * | |- 011 - OPC_MXU_D16MOVN + * | |- 100 - OPC_MXU_S32MOVZ + * | |- 101 - OPC_MXU_S32MOVN + * | + * | 23..22 + * |- 111010 - OPC_MXU__POOL21 --- 00 - OPC_MXU_Q8MAC + * | |- 10 - OPC_MXU_Q8MACSU + * |- 111011 - OPC_MXU_Q16SCOP + * |- 111100 - OPC_MXU_Q8MADL + * |- 111101 - OPC_MXU_S32SFL + * |- 111110 - OPC_MXU_Q8SAD + * |- 111111 - (overlaps with SDBBP) + * + * + * Compiled after: + * + * "XBurst(c) Instruction Set Architecture MIPS eXtension/enhanced Unit + * Programming Manual", Ingenic Semiconductor Co, Ltd., revision June 2, 2017 + */ + +enum { + OPC_MXU_S32MADD = 0x00, + OPC_MXU_S32MADDU = 0x01, + OPC__MXU_MUL = 0x02, + OPC_MXU__POOL00 = 0x03, + OPC_MXU_S32MSUB = 0x04, + OPC_MXU_S32MSUBU = 0x05, + OPC_MXU__POOL01 = 0x06, + OPC_MXU__POOL02 = 0x07, + OPC_MXU_D16MUL = 0x08, + OPC_MXU__POOL03 = 0x09, + OPC_MXU_D16MAC = 0x0A, + OPC_MXU_D16MACF = 0x0B, + OPC_MXU_D16MADL = 0x0C, + OPC_MXU_S16MAD = 0x0D, + OPC_MXU_Q16ADD = 0x0E, + OPC_MXU_D16MACE = 0x0F, + OPC_MXU__POOL04 = 0x10, + OPC_MXU__POOL05 = 0x11, + OPC_MXU__POOL06 = 0x12, + OPC_MXU__POOL07 = 0x13, + OPC_MXU__POOL08 = 0x14, + OPC_MXU__POOL09 = 0x15, + OPC_MXU__POOL10 = 0x16, + OPC_MXU__POOL11 = 0x17, + OPC_MXU_D32ADD = 0x18, + OPC_MXU__POOL12 = 0x19, + /* not assigned 0x1A */ + OPC_MXU__POOL13 = 0x1B, + OPC_MXU__POOL14 = 0x1C, + OPC_MXU_Q8ACCE = 0x1D, + /* not assigned 0x1E */ + /* not assigned 0x1F */ + /* not assigned 0x20 */ + /* not assigned 0x21 */ + OPC_MXU_S8LDD = 0x22, + OPC_MXU_S8STD = 0x23, + OPC_MXU_S8LDI = 0x24, + OPC_MXU_S8SDI = 0x25, + OPC_MXU__POOL15 = 0x26, + OPC_MXU__POOL16 = 0x27, + OPC_MXU__POOL17 = 0x28, + /* not assigned 0x29 */ + OPC_MXU_S16LDD = 0x2A, + OPC_MXU_S16STD = 0x2B, + OPC_MXU_S16LDI = 0x2C, + OPC_MXU_S16SDI = 0x2D, + OPC_MXU_S32M2I = 0x2E, + OPC_MXU_S32I2M = 0x2F, + OPC_MXU_D32SLL = 0x30, + OPC_MXU_D32SLR = 0x31, + OPC_MXU_D32SARL = 0x32, + OPC_MXU_D32SAR = 0x33, + OPC_MXU_Q16SLL = 0x34, + OPC_MXU_Q16SLR = 0x35, + OPC_MXU__POOL18 = 0x36, + OPC_MXU_Q16SAR = 0x37, + OPC_MXU__POOL19 = 0x38, + OPC_MXU__POOL20 = 0x39, + OPC_MXU__POOL21 = 0x3A, + OPC_MXU_Q16SCOP = 0x3B, + OPC_MXU_Q8MADL = 0x3C, + OPC_MXU_S32SFL = 0x3D, + OPC_MXU_Q8SAD = 0x3E, + /* not assigned 0x3F */ +}; + + +/* + * MXU pool 00 + */ +enum { + OPC_MXU_S32MAX = 0x00, + OPC_MXU_S32MIN = 0x01, + OPC_MXU_D16MAX = 0x02, + OPC_MXU_D16MIN = 0x03, + OPC_MXU_Q8MAX = 0x04, + OPC_MXU_Q8MIN = 0x05, + OPC_MXU_Q8SLT = 0x06, + OPC_MXU_Q8SLTU = 0x07, +}; + +/* + * MXU pool 01 + */ +enum { + OPC_MXU_S32SLT = 0x00, + OPC_MXU_D16SLT = 0x01, + OPC_MXU_D16AVG = 0x02, + OPC_MXU_D16AVGR = 0x03, + OPC_MXU_Q8AVG = 0x04, + OPC_MXU_Q8AVGR = 0x05, + OPC_MXU_Q8ADD = 0x07, +}; + +/* + * MXU pool 02 + */ +enum { + OPC_MXU_S32CPS = 0x00, + OPC_MXU_D16CPS = 0x02, + OPC_MXU_Q8ABD = 0x04, + OPC_MXU_Q16SAT = 0x06, +}; + +/* + * MXU pool 03 + */ +enum { + OPC_MXU_D16MULF = 0x00, + OPC_MXU_D16MULE = 0x01, +}; + +/* + * MXU pool 04 + */ +enum { + OPC_MXU_S32LDD = 0x00, + OPC_MXU_S32LDDR = 0x01, +}; + +/* + * MXU pool 05 + */ +enum { + OPC_MXU_S32STD = 0x00, + OPC_MXU_S32STDR = 0x01, +}; + +/* + * MXU pool 06 + */ +enum { + OPC_MXU_S32LDDV = 0x00, + OPC_MXU_S32LDDVR = 0x01, +}; + +/* + * MXU pool 07 + */ +enum { + OPC_MXU_S32STDV = 0x00, + OPC_MXU_S32STDVR = 0x01, +}; + +/* + * MXU pool 08 + */ +enum { + OPC_MXU_S32LDI = 0x00, + OPC_MXU_S32LDIR = 0x01, +}; + +/* + * MXU pool 09 + */ +enum { + OPC_MXU_S32SDI = 0x00, + OPC_MXU_S32SDIR = 0x01, +}; + +/* + * MXU pool 10 + */ +enum { + OPC_MXU_S32LDIV = 0x00, + OPC_MXU_S32LDIVR = 0x01, +}; + +/* + * MXU pool 11 + */ +enum { + OPC_MXU_S32SDIV = 0x00, + OPC_MXU_S32SDIVR = 0x01, +}; + +/* + * MXU pool 12 + */ +enum { + OPC_MXU_D32ACC = 0x00, + OPC_MXU_D32ACCM = 0x01, + OPC_MXU_D32ASUM = 0x02, +}; + +/* + * MXU pool 13 + */ +enum { + OPC_MXU_Q16ACC = 0x00, + OPC_MXU_Q16ACCM = 0x01, + OPC_MXU_Q16ASUM = 0x02, +}; + +/* + * MXU pool 14 + */ +enum { + OPC_MXU_Q8ADDE = 0x00, + OPC_MXU_D8SUM = 0x01, + OPC_MXU_D8SUMC = 0x02, +}; + +/* + * MXU pool 15 + */ +enum { + OPC_MXU_S32MUL = 0x00, + OPC_MXU_S32MULU = 0x01, + OPC_MXU_S32EXTR = 0x02, + OPC_MXU_S32EXTRV = 0x03, +}; + +/* + * MXU pool 16 + */ +enum { + OPC_MXU_D32SARW = 0x00, + OPC_MXU_S32ALN = 0x01, + OPC_MXU_S32ALNI = 0x02, + OPC_MXU_S32LUI = 0x03, + OPC_MXU_S32NOR = 0x04, + OPC_MXU_S32AND = 0x05, + OPC_MXU_S32OR = 0x06, + OPC_MXU_S32XOR = 0x07, +}; + +/* + * MXU pool 17 + */ +enum { + OPC_MXU_LXB = 0x00, + OPC_MXU_LXH = 0x01, + OPC_MXU_LXW = 0x03, + OPC_MXU_LXBU = 0x04, + OPC_MXU_LXHU = 0x05, +}; + +/* + * MXU pool 18 + */ +enum { + OPC_MXU_D32SLLV = 0x00, + OPC_MXU_D32SLRV = 0x01, + OPC_MXU_D32SARV = 0x03, + OPC_MXU_Q16SLLV = 0x04, + OPC_MXU_Q16SLRV = 0x05, + OPC_MXU_Q16SARV = 0x07, +}; + +/* + * MXU pool 19 + */ +enum { + OPC_MXU_Q8MUL = 0x00, + OPC_MXU_Q8MULSU = 0x01, +}; + +/* + * MXU pool 20 + */ +enum { + OPC_MXU_Q8MOVZ = 0x00, + OPC_MXU_Q8MOVN = 0x01, + OPC_MXU_D16MOVZ = 0x02, + OPC_MXU_D16MOVN = 0x03, + OPC_MXU_S32MOVZ = 0x04, + OPC_MXU_S32MOVN = 0x05, +}; + +/* + * MXU pool 21 + */ +enum { + OPC_MXU_Q8MAC = 0x00, + OPC_MXU_Q8MACSU = 0x01, +}; + +/* + * Overview of the TX79-specific instruction set + * ============================================= + * + * The R5900 and the C790 have 128-bit wide GPRs, where the upper 64 bits + * are only used by the specific quadword (128-bit) LQ/SQ load/store + * instructions and certain multimedia instructions (MMIs). These MMIs + * configure the 128-bit data path as two 64-bit, four 32-bit, eight 16-bit + * or sixteen 8-bit paths. + * + * Reference: + * + * The Toshiba TX System RISC TX79 Core Architecture manual, + * https://wiki.qemu.org/File:C790.pdf + * + * Three-Operand Multiply and Multiply-Add (4 instructions) + * -------------------------------------------------------- + * MADD [rd,] rs, rt Multiply/Add + * MADDU [rd,] rs, rt Multiply/Add Unsigned + * MULT [rd,] rs, rt Multiply (3-operand) + * MULTU [rd,] rs, rt Multiply Unsigned (3-operand) + * + * Multiply Instructions for Pipeline 1 (10 instructions) + * ------------------------------------------------------ + * MULT1 [rd,] rs, rt Multiply Pipeline 1 + * MULTU1 [rd,] rs, rt Multiply Unsigned Pipeline 1 + * DIV1 rs, rt Divide Pipeline 1 + * DIVU1 rs, rt Divide Unsigned Pipeline 1 + * MADD1 [rd,] rs, rt Multiply-Add Pipeline 1 + * MADDU1 [rd,] rs, rt Multiply-Add Unsigned Pipeline 1 + * MFHI1 rd Move From HI1 Register + * MFLO1 rd Move From LO1 Register + * MTHI1 rs Move To HI1 Register + * MTLO1 rs Move To LO1 Register + * + * Arithmetic (19 instructions) + * ---------------------------- + * PADDB rd, rs, rt Parallel Add Byte + * PSUBB rd, rs, rt Parallel Subtract Byte + * PADDH rd, rs, rt Parallel Add Halfword + * PSUBH rd, rs, rt Parallel Subtract Halfword + * PADDW rd, rs, rt Parallel Add Word + * PSUBW rd, rs, rt Parallel Subtract Word + * PADSBH rd, rs, rt Parallel Add/Subtract Halfword + * PADDSB rd, rs, rt Parallel Add with Signed Saturation Byte + * PSUBSB rd, rs, rt Parallel Subtract with Signed Saturation Byte + * PADDSH rd, rs, rt Parallel Add with Signed Saturation Halfword + * PSUBSH rd, rs, rt Parallel Subtract with Signed Saturation Halfword + * PADDSW rd, rs, rt Parallel Add with Signed Saturation Word + * PSUBSW rd, rs, rt Parallel Subtract with Signed Saturation Word + * PADDUB rd, rs, rt Parallel Add with Unsigned saturation Byte + * PSUBUB rd, rs, rt Parallel Subtract with Unsigned saturation Byte + * PADDUH rd, rs, rt Parallel Add with Unsigned saturation Halfword + * PSUBUH rd, rs, rt Parallel Subtract with Unsigned saturation Halfword + * PADDUW rd, rs, rt Parallel Add with Unsigned saturation Word + * PSUBUW rd, rs, rt Parallel Subtract with Unsigned saturation Word + * + * Min/Max (4 instructions) + * ------------------------ + * PMAXH rd, rs, rt Parallel Maximum Halfword + * PMINH rd, rs, rt Parallel Minimum Halfword + * PMAXW rd, rs, rt Parallel Maximum Word + * PMINW rd, rs, rt Parallel Minimum Word + * + * Absolute (2 instructions) + * ------------------------- + * PABSH rd, rt Parallel Absolute Halfword + * PABSW rd, rt Parallel Absolute Word + * + * Logical (4 instructions) + * ------------------------ + * PAND rd, rs, rt Parallel AND + * POR rd, rs, rt Parallel OR + * PXOR rd, rs, rt Parallel XOR + * PNOR rd, rs, rt Parallel NOR + * + * Shift (9 instructions) + * ---------------------- + * PSLLH rd, rt, sa Parallel Shift Left Logical Halfword + * PSRLH rd, rt, sa Parallel Shift Right Logical Halfword + * PSRAH rd, rt, sa Parallel Shift Right Arithmetic Halfword + * PSLLW rd, rt, sa Parallel Shift Left Logical Word + * PSRLW rd, rt, sa Parallel Shift Right Logical Word + * PSRAW rd, rt, sa Parallel Shift Right Arithmetic Word + * PSLLVW rd, rt, rs Parallel Shift Left Logical Variable Word + * PSRLVW rd, rt, rs Parallel Shift Right Logical Variable Word + * PSRAVW rd, rt, rs Parallel Shift Right Arithmetic Variable Word + * + * Compare (6 instructions) + * ------------------------ + * PCGTB rd, rs, rt Parallel Compare for Greater Than Byte + * PCEQB rd, rs, rt Parallel Compare for Equal Byte + * PCGTH rd, rs, rt Parallel Compare for Greater Than Halfword + * PCEQH rd, rs, rt Parallel Compare for Equal Halfword + * PCGTW rd, rs, rt Parallel Compare for Greater Than Word + * PCEQW rd, rs, rt Parallel Compare for Equal Word + * + * LZC (1 instruction) + * ------------------- + * PLZCW rd, rs Parallel Leading Zero or One Count Word + * + * Quadword Load and Store (2 instructions) + * ---------------------------------------- + * LQ rt, offset(base) Load Quadword + * SQ rt, offset(base) Store Quadword + * + * Multiply and Divide (19 instructions) + * ------------------------------------- + * PMULTW rd, rs, rt Parallel Multiply Word + * PMULTUW rd, rs, rt Parallel Multiply Unsigned Word + * PDIVW rs, rt Parallel Divide Word + * PDIVUW rs, rt Parallel Divide Unsigned Word + * PMADDW rd, rs, rt Parallel Multiply-Add Word + * PMADDUW rd, rs, rt Parallel Multiply-Add Unsigned Word + * PMSUBW rd, rs, rt Parallel Multiply-Subtract Word + * PMULTH rd, rs, rt Parallel Multiply Halfword + * PMADDH rd, rs, rt Parallel Multiply-Add Halfword + * PMSUBH rd, rs, rt Parallel Multiply-Subtract Halfword + * PHMADH rd, rs, rt Parallel Horizontal Multiply-Add Halfword + * PHMSBH rd, rs, rt Parallel Horizontal Multiply-Subtract Halfword + * PDIVBW rs, rt Parallel Divide Broadcast Word + * PMFHI rd Parallel Move From HI Register + * PMFLO rd Parallel Move From LO Register + * PMTHI rs Parallel Move To HI Register + * PMTLO rs Parallel Move To LO Register + * PMFHL rd Parallel Move From HI/LO Register + * PMTHL rs Parallel Move To HI/LO Register + * + * Pack/Extend (11 instructions) + * ----------------------------- + * PPAC5 rd, rt Parallel Pack to 5 bits + * PPACB rd, rs, rt Parallel Pack to Byte + * PPACH rd, rs, rt Parallel Pack to Halfword + * PPACW rd, rs, rt Parallel Pack to Word + * PEXT5 rd, rt Parallel Extend Upper from 5 bits + * PEXTUB rd, rs, rt Parallel Extend Upper from Byte + * PEXTLB rd, rs, rt Parallel Extend Lower from Byte + * PEXTUH rd, rs, rt Parallel Extend Upper from Halfword + * PEXTLH rd, rs, rt Parallel Extend Lower from Halfword + * PEXTUW rd, rs, rt Parallel Extend Upper from Word + * PEXTLW rd, rs, rt Parallel Extend Lower from Word + * + * Others (16 instructions) + * ------------------------ + * PCPYH rd, rt Parallel Copy Halfword + * PCPYLD rd, rs, rt Parallel Copy Lower Doubleword + * PCPYUD rd, rs, rt Parallel Copy Upper Doubleword + * PREVH rd, rt Parallel Reverse Halfword + * PINTH rd, rs, rt Parallel Interleave Halfword + * PINTEH rd, rs, rt Parallel Interleave Even Halfword + * PEXEH rd, rt Parallel Exchange Even Halfword + * PEXCH rd, rt Parallel Exchange Center Halfword + * PEXEW rd, rt Parallel Exchange Even Word + * PEXCW rd, rt Parallel Exchange Center Word + * QFSRV rd, rs, rt Quadword Funnel Shift Right Variable + * MFSA rd Move from Shift Amount Register + * MTSA rs Move to Shift Amount Register + * MTSAB rs, immediate Move Byte Count to Shift Amount Register + * MTSAH rs, immediate Move Halfword Count to Shift Amount Register + * PROT3W rd, rt Parallel Rotate 3 Words + * + * MMI (MultiMedia Instruction) encodings + * ====================================== + * + * MMI instructions encoding table keys: + * + * * This code is reserved for future use. An attempt to execute it + * causes a Reserved Instruction exception. + * % This code indicates an instruction class. The instruction word + * must be further decoded by examining additional tables that show + * the values for other instruction fields. + * # This code is reserved for the unsupported instructions DMULT, + * DMULTU, DDIV, DDIVU, LL, LLD, SC, SCD, LWC2 and SWC2. An attempt + * to execute it causes a Reserved Instruction exception. + * + * MMI instructions encoded by opcode field (MMI, LQ, SQ): + * + * 31 26 0 + * +--------+----------------------------------------+ + * | opcode | | + * +--------+----------------------------------------+ + * + * opcode bits 28..26 + * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + * 31..29 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111 + * -------+-------+-------+-------+-------+-------+-------+-------+------- + * 0 000 |SPECIAL| REGIMM| J | JAL | BEQ | BNE | BLEZ | BGTZ + * 1 001 | ADDI | ADDIU | SLTI | SLTIU | ANDI | ORI | XORI | LUI + * 2 010 | COP0 | COP1 | * | * | BEQL | BNEL | BLEZL | BGTZL + * 3 011 | DADDI | DADDIU| LDL | LDR | MMI% | * | LQ | SQ + * 4 100 | LB | LH | LWL | LW | LBU | LHU | LWR | LWU + * 5 101 | SB | SH | SWL | SW | SDL | SDR | SWR | CACHE + * 6 110 | # | LWC1 | # | PREF | # | LDC1 | # | LD + * 7 111 | # | SWC1 | # | * | # | SDC1 | # | SD + */ + +enum { + MMI_OPC_CLASS_MMI = 0x1C << 26, /* Same as OPC_SPECIAL2 */ + MMI_OPC_LQ = 0x1E << 26, /* Same as OPC_MSA */ + MMI_OPC_SQ = 0x1F << 26, /* Same as OPC_SPECIAL3 */ +}; + +/* + * MMI instructions with opcode field = MMI: + * + * 31 26 5 0 + * +--------+-------------------------------+--------+ + * | MMI | |function| + * +--------+-------------------------------+--------+ + * + * function bits 2..0 + * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + * 5..3 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111 + * -------+-------+-------+-------+-------+-------+-------+-------+------- + * 0 000 | MADD | MADDU | * | * | PLZCW | * | * | * + * 1 001 | MMI0% | MMI2% | * | * | * | * | * | * + * 2 010 | MFHI1 | MTHI1 | MFLO1 | MTLO1 | * | * | * | * + * 3 011 | MULT1 | MULTU1| DIV1 | DIVU1 | * | * | * | * + * 4 100 | MADD1 | MADDU1| * | * | * | * | * | * + * 5 101 | MMI1% | MMI3% | * | * | * | * | * | * + * 6 110 | PMFHL | PMTHL | * | * | PSLLH | * | PSRLH | PSRAH + * 7 111 | * | * | * | * | PSLLW | * | PSRLW | PSRAW + */ + +#define MASK_MMI(op) (MASK_OP_MAJOR(op) | ((op) & 0x3F)) +enum { + MMI_OPC_MADD = 0x00 | MMI_OPC_CLASS_MMI, /* Same as OPC_MADD */ + MMI_OPC_MADDU = 0x01 | MMI_OPC_CLASS_MMI, /* Same as OPC_MADDU */ + MMI_OPC_PLZCW = 0x04 | MMI_OPC_CLASS_MMI, + MMI_OPC_CLASS_MMI0 = 0x08 | MMI_OPC_CLASS_MMI, + MMI_OPC_CLASS_MMI2 = 0x09 | MMI_OPC_CLASS_MMI, + MMI_OPC_MFHI1 = 0x10 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MFHI */ + MMI_OPC_MTHI1 = 0x11 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MTHI */ + MMI_OPC_MFLO1 = 0x12 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MFLO */ + MMI_OPC_MTLO1 = 0x13 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MTLO */ + MMI_OPC_MULT1 = 0x18 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MULT */ + MMI_OPC_MULTU1 = 0x19 | MMI_OPC_CLASS_MMI, /* Same min. as OPC_MULTU */ + MMI_OPC_DIV1 = 0x1A | MMI_OPC_CLASS_MMI, /* Same minor as OPC_DIV */ + MMI_OPC_DIVU1 = 0x1B | MMI_OPC_CLASS_MMI, /* Same minor as OPC_DIVU */ + MMI_OPC_MADD1 = 0x20 | MMI_OPC_CLASS_MMI, + MMI_OPC_MADDU1 = 0x21 | MMI_OPC_CLASS_MMI, + MMI_OPC_CLASS_MMI1 = 0x28 | MMI_OPC_CLASS_MMI, + MMI_OPC_CLASS_MMI3 = 0x29 | MMI_OPC_CLASS_MMI, + MMI_OPC_PMFHL = 0x30 | MMI_OPC_CLASS_MMI, + MMI_OPC_PMTHL = 0x31 | MMI_OPC_CLASS_MMI, + MMI_OPC_PSLLH = 0x34 | MMI_OPC_CLASS_MMI, + MMI_OPC_PSRLH = 0x36 | MMI_OPC_CLASS_MMI, + MMI_OPC_PSRAH = 0x37 | MMI_OPC_CLASS_MMI, + MMI_OPC_PSLLW = 0x3C | MMI_OPC_CLASS_MMI, + MMI_OPC_PSRLW = 0x3E | MMI_OPC_CLASS_MMI, + MMI_OPC_PSRAW = 0x3F | MMI_OPC_CLASS_MMI, +}; + +/* + * MMI instructions with opcode field = MMI and bits 5..0 = MMI0: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI0 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 | PADDW | PSUBW | PCGTW | PMAXW + * 1 001 | PADDH | PSUBH | PCGTH | PMAXH + * 2 010 | PADDB | PSUBB | PCGTB | * + * 3 011 | * | * | * | * + * 4 100 | PADDSW| PSUBSW| PEXTLW| PPACW + * 5 101 | PADDSH| PSUBSH| PEXTLH| PPACH + * 6 110 | PADDSB| PSUBSB| PEXTLB| PPACB + * 7 111 | * | * | PEXT5 | PPAC5 + */ + +#define MASK_MMI0(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + MMI_OPC_0_PADDW = (0x00 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PSUBW = (0x01 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PCGTW = (0x02 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PMAXW = (0x03 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PADDH = (0x04 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PSUBH = (0x05 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PCGTH = (0x06 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PMAXH = (0x07 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PADDB = (0x08 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PSUBB = (0x09 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PCGTB = (0x0A << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PADDSW = (0x10 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PSUBSW = (0x11 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PEXTLW = (0x12 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PPACW = (0x13 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PADDSH = (0x14 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PSUBSH = (0x15 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PEXTLH = (0x16 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PPACH = (0x17 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PADDSB = (0x18 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PSUBSB = (0x19 << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PEXTLB = (0x1A << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PPACB = (0x1B << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PEXT5 = (0x1E << 6) | MMI_OPC_CLASS_MMI0, + MMI_OPC_0_PPAC5 = (0x1F << 6) | MMI_OPC_CLASS_MMI0, +}; + +/* + * MMI instructions with opcode field = MMI and bits 5..0 = MMI1: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI1 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 | * | PABSW | PCEQW | PMINW + * 1 001 | PADSBH| PABSH | PCEQH | PMINH + * 2 010 | * | * | PCEQB | * + * 3 011 | * | * | * | * + * 4 100 | PADDUW| PSUBUW| PEXTUW| * + * 5 101 | PADDUH| PSUBUH| PEXTUH| * + * 6 110 | PADDUB| PSUBUB| PEXTUB| QFSRV + * 7 111 | * | * | * | * + */ + +#define MASK_MMI1(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + MMI_OPC_1_PABSW = (0x01 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PCEQW = (0x02 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PMINW = (0x03 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PADSBH = (0x04 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PABSH = (0x05 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PCEQH = (0x06 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PMINH = (0x07 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PCEQB = (0x0A << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PADDUW = (0x10 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PSUBUW = (0x11 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PEXTUW = (0x12 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PADDUH = (0x14 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PSUBUH = (0x15 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PEXTUH = (0x16 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PADDUB = (0x18 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PSUBUB = (0x19 << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_PEXTUB = (0x1A << 6) | MMI_OPC_CLASS_MMI1, + MMI_OPC_1_QFSRV = (0x1B << 6) | MMI_OPC_CLASS_MMI1, +}; + +/* + * MMI instructions with opcode field = MMI and bits 5..0 = MMI2: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI2 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 | PMADDW| * | PSLLVW| PSRLVW + * 1 001 | PMSUBW| * | * | * + * 2 010 | PMFHI | PMFLO | PINTH | * + * 3 011 | PMULTW| PDIVW | PCPYLD| * + * 4 100 | PMADDH| PHMADH| PAND | PXOR + * 5 101 | PMSUBH| PHMSBH| * | * + * 6 110 | * | * | PEXEH | PREVH + * 7 111 | PMULTH| PDIVBW| PEXEW | PROT3W + */ + +#define MASK_MMI2(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + MMI_OPC_2_PMADDW = (0x00 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PSLLVW = (0x02 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PSRLVW = (0x03 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PMSUBW = (0x04 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PMFHI = (0x08 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PMFLO = (0x09 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PINTH = (0x0A << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PMULTW = (0x0C << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PDIVW = (0x0D << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PCPYLD = (0x0E << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PMADDH = (0x10 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PHMADH = (0x11 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PAND = (0x12 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PXOR = (0x13 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PMSUBH = (0x14 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PHMSBH = (0x15 << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PEXEH = (0x1A << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PREVH = (0x1B << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PMULTH = (0x1C << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PDIVBW = (0x1D << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PEXEW = (0x1E << 6) | MMI_OPC_CLASS_MMI2, + MMI_OPC_2_PROT3W = (0x1F << 6) | MMI_OPC_CLASS_MMI2, +}; + +/* + * MMI instructions with opcode field = MMI and bits 5..0 = MMI3: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI3 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 |PMADDUW| * | * | PSRAVW + * 1 001 | * | * | * | * + * 2 010 | PMTHI | PMTLO | PINTEH| * + * 3 011 |PMULTUW| PDIVUW| PCPYUD| * + * 4 100 | * | * | POR | PNOR + * 5 101 | * | * | * | * + * 6 110 | * | * | PEXCH | PCPYH + * 7 111 | * | * | PEXCW | * + */ + +#define MASK_MMI3(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + MMI_OPC_3_PMADDUW = (0x00 << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PSRAVW = (0x03 << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PMTHI = (0x08 << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PMTLO = (0x09 << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PINTEH = (0x0A << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PMULTUW = (0x0C << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PDIVUW = (0x0D << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PCPYUD = (0x0E << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_POR = (0x12 << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PNOR = (0x13 << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PEXCH = (0x1A << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PCPYH = (0x1B << 6) | MMI_OPC_CLASS_MMI3, + MMI_OPC_3_PEXCW = (0x1E << 6) | MMI_OPC_CLASS_MMI3, +}; + +#include "exec/gen-icount.h" + +#define gen_helper_0e0i(name, arg) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +#define gen_helper_0e1i(name, arg1, arg2) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +#define gen_helper_1e0i(name, ret, arg1) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg1); \ + gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +#define gen_helper_1e1i(name, ret, arg1, arg2) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ + gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +#define gen_helper_1e2i(name, ret, arg1, arg2, arg3) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ + gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +#define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg4); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, arg3, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +typedef struct DisasContext { + DisasContextBase base; + target_ulong saved_pc; + target_ulong page_start; + uint32_t opcode; + uint64_t insn_flags; + int32_t CP0_Config1; + int32_t CP0_Config2; + int32_t CP0_Config3; + int32_t CP0_Config5; + /* Routine used to access memory */ + int mem_idx; + MemOp default_tcg_memop_mask; + uint32_t hflags, saved_hflags; + target_ulong btarget; + bool ulri; + int kscrexist; + bool rxi; + int ie; + bool bi; + bool bp; + uint64_t PAMask; + bool mvh; + bool eva; + bool sc; + int CP0_LLAddr_shift; + bool ps; + bool vp; + bool cmgcr; + bool mrp; + bool nan2008; + bool abs2008; + bool saar; + bool mi; + int gi; + + // Unicorn + struct uc_struct *uc; +} DisasContext; + +#define DISAS_STOP DISAS_TARGET_0 +#define DISAS_EXIT DISAS_TARGET_1 + +static const char * const regnames[] = { + "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3", + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", +}; + +static const char * const regnames_HI[] = { + "HI0", "HI1", "HI2", "HI3", +}; + +static const char * const regnames_LO[] = { + "LO0", "LO1", "LO2", "LO3", +}; + +static const char * const msaregnames[] = { + "w0.d0", "w0.d1", "w1.d0", "w1.d1", + "w2.d0", "w2.d1", "w3.d0", "w3.d1", + "w4.d0", "w4.d1", "w5.d0", "w5.d1", + "w6.d0", "w6.d1", "w7.d0", "w7.d1", + "w8.d0", "w8.d1", "w9.d0", "w9.d1", + "w10.d0", "w10.d1", "w11.d0", "w11.d1", + "w12.d0", "w12.d1", "w13.d0", "w13.d1", + "w14.d0", "w14.d1", "w15.d0", "w15.d1", + "w16.d0", "w16.d1", "w17.d0", "w17.d1", + "w18.d0", "w18.d1", "w19.d0", "w19.d1", + "w20.d0", "w20.d1", "w21.d0", "w21.d1", + "w22.d0", "w22.d1", "w23.d0", "w23.d1", + "w24.d0", "w24.d1", "w25.d0", "w25.d1", + "w26.d0", "w26.d1", "w27.d0", "w27.d1", + "w28.d0", "w28.d1", "w29.d0", "w29.d1", + "w30.d0", "w30.d1", "w31.d0", "w31.d1", +}; + +#if !defined(TARGET_MIPS64) +static const char * const mxuregnames[] = { + "XR1", "XR2", "XR3", "XR4", "XR5", "XR6", "XR7", "XR8", + "XR9", "XR10", "XR11", "XR12", "XR13", "XR14", "XR15", "MXU_CR", +}; +#endif + +#define LOG_DISAS(...) \ + do { \ + if (MIPS_DEBUG_DISAS) { \ + qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \ + } \ + } while (0) + +#define MIPS_INVAL(op) \ + do { \ + if (MIPS_DEBUG_DISAS) { \ + qemu_log_mask(CPU_LOG_TB_IN_ASM, \ + TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \ + ctx->base.pc_next, ctx->opcode, op, \ + ctx->opcode >> 26, ctx->opcode & 0x3F, \ + ((ctx->opcode >> 16) & 0x1F)); \ + } \ + } while (0) + +/* General purpose registers moves. */ +static inline void gen_load_gpr(TCGContext *tcg_ctx, TCGv t, int reg) +{ + if (reg == 0) { + tcg_gen_movi_tl(tcg_ctx, t, 0); + } else { + tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->cpu_gpr[reg]); + } +} + +static inline void gen_store_gpr(TCGContext *tcg_ctx, TCGv t, int reg) +{ + if (reg != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], t); + } +} + +/* Moves to/from shadow registers. */ +static inline void gen_load_srsgpr(TCGContext *tcg_ctx, int from, int to) +{ + TCGv t0 = tcg_temp_new(tcg_ctx); + + if (from == 0) { + tcg_gen_movi_tl(tcg_ctx, t0, 0); + } else { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); + tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); + tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); + tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); + tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); + tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); + + tcg_gen_ld_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * from); + tcg_temp_free_ptr(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, t2); + } + gen_store_gpr(tcg_ctx, t0, to); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_store_srsgpr(TCGContext *tcg_ctx, int from, int to) +{ + if (to != 0) { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, from); + tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); + tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); + tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); + tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); + tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); + tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); + + tcg_gen_st_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * to); + tcg_temp_free_ptr(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t0); + } +} + +#if !defined(TARGET_MIPS64) +/* MXU General purpose registers moves. */ +static inline void gen_load_mxu_gpr(TCGContext *tcg_ctx, TCGv t, unsigned int reg) +{ + if (reg == 0) { + tcg_gen_movi_tl(tcg_ctx, t, 0); + } else if (reg <= 15) { + tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->mxu_gpr[reg - 1]); + } +} + +static inline void gen_store_mxu_gpr(TCGContext *tcg_ctx, TCGv t, unsigned int reg) +{ + if (reg > 0 && reg <= 15) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->mxu_gpr[reg - 1], t); + } +} + +/* MXU control register moves. */ +static inline void gen_load_mxu_cr(TCGContext *tcg_ctx, TCGv t) +{ + tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->mxu_CR); +} + +static inline void gen_store_mxu_cr(TCGContext *tcg_ctx, TCGv t) +{ + /* TODO: Add handling of RW rules for MXU_CR. */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->mxu_CR, t); +} +#endif + + +/* Tests */ +static inline void gen_save_pc(TCGContext *tcg_ctx, target_ulong pc) +{ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, pc); +} + +static inline void save_cpu_state(DisasContext *ctx, int do_save_pc) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags); + if (do_save_pc && ctx->base.pc_next != ctx->saved_pc) { + gen_save_pc(tcg_ctx, ctx->base.pc_next); + ctx->saved_pc = ctx->base.pc_next; + } + if (ctx->hflags != ctx->saved_hflags) { + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags); + ctx->saved_hflags = ctx->hflags; + switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_BR: + break; + case MIPS_HFLAG_BC: + case MIPS_HFLAG_BL: + case MIPS_HFLAG_B: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->btarget, ctx->btarget); + break; + } + } +} + +static inline void restore_cpu_state(CPUMIPSState *env, DisasContext *ctx) +{ + ctx->saved_hflags = ctx->hflags; + switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_BR: + break; + case MIPS_HFLAG_BC: + case MIPS_HFLAG_BL: + case MIPS_HFLAG_B: + ctx->btarget = env->btarget; + break; + } +} + +static inline void generate_exception_err(DisasContext *ctx, int excp, int err) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 texcp = tcg_const_i32(tcg_ctx, excp); + TCGv_i32 terr = tcg_const_i32(tcg_ctx, err); + save_cpu_state(ctx, 1); + gen_helper_raise_exception_err(tcg_ctx, tcg_ctx->cpu_env, texcp, terr); + tcg_temp_free_i32(tcg_ctx, terr); + tcg_temp_free_i32(tcg_ctx, texcp); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static inline void generate_exception(DisasContext *ctx, int excp) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_0e0i(raise_exception, excp); +} + +static inline void generate_exception_end(DisasContext *ctx, int excp) +{ + generate_exception_err(ctx, excp, 0); +} + +/* Floating point register moves. */ +static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_FRE) { + generate_exception(ctx, EXCP_RI); + } + tcg_gen_extrl_i64_i32(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); +} + +static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t64; + if (ctx->hflags & MIPS_HFLAG_FRE) { + generate_exception(ctx, EXCP_RI); + } + t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t64, t); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 0, 32); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + tcg_gen_extrh_i64_i32(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); + } else { + gen_load_fpr32(ctx, t, reg | 1); + } +} + +static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t64, t); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 32, 32); + tcg_temp_free_i64(tcg_ctx, t64); + } else { + gen_store_fpr32(ctx, t, reg | 1); + } +} + +static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + tcg_gen_mov_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); + } else { + tcg_gen_concat32_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg | 1]); + } +} + +static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], t); + } else { + TCGv_i64 t0; + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg & ~1], t, 0, 32); + t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t0, t, 32); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg | 1], tcg_ctx->fpu_f64[reg | 1], t0, 0, 32); + tcg_temp_free_i64(tcg_ctx, t0); + } +} + +static inline int get_fp_bit(int cc) +{ + if (cc) { + return 24 + cc; + } else { + return 23; + } +} + +/* Addresses computation */ +static inline void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, + TCGv arg1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_add_tl(tcg_ctx, ret, arg0, arg1); + +#if defined(TARGET_MIPS64) + if (ctx->hflags & MIPS_HFLAG_AWRAP) { + tcg_gen_ext32s_i64(tcg_ctx, ret, ret); + } +#endif +} + +static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, + target_long ofs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_addi_tl(tcg_ctx, ret, base, ofs); + +#if defined(TARGET_MIPS64) + if (ctx->hflags & MIPS_HFLAG_AWRAP) { + tcg_gen_ext32s_i64(tcg_ctx, ret, ret); + } +#endif +} + +/* Addresses computation (translation time) */ +static target_long addr_add(DisasContext *ctx, target_long base, + target_long offset) +{ + target_long sum = base + offset; + +#if defined(TARGET_MIPS64) + if (ctx->hflags & MIPS_HFLAG_AWRAP) { + sum = (int32_t)sum; + } +#endif + return sum; +} + +/* Sign-extract the low 32-bits to a target_long. */ +static inline void gen_move_low32(TCGContext *tcg_ctx, TCGv ret, TCGv_i64 arg) +{ +#if defined(TARGET_MIPS64) + tcg_gen_ext32s_i64(tcg_ctx, ret, arg); +#else + tcg_gen_extrl_i64_i32(tcg_ctx, ret, arg); +#endif +} + +/* Sign-extract the high 32-bits to a target_long. */ +static inline void gen_move_high32(TCGContext *tcg_ctx, TCGv ret, TCGv_i64 arg) +{ +#if defined(TARGET_MIPS64) + tcg_gen_sari_i64(tcg_ctx, ret, arg, 32); +#else + tcg_gen_extrh_i64_i32(tcg_ctx, ret, arg); +#endif +} + +static inline void check_cp0_enabled(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) { + generate_exception_err(ctx, EXCP_CpU, 0); + } +} + +static inline void check_cp1_enabled(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_FPU))) { + generate_exception_err(ctx, EXCP_CpU, 1); + } +} + +/* + * Verify that the processor is running with COP1X instructions enabled. + * This is associated with the nabla symbol in the MIPS32 and MIPS64 + * opcode tables. + */ +static inline void check_cop1x(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * Verify that the processor is running with 64-bit floating-point + * operations enabled. + */ +static inline void check_cp1_64bitmode(DisasContext *ctx) +{ + if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * Verify if floating point register is valid; an operation is not defined + * if bit 0 of any register specification is set and the FR bit in the + * Status register equals zero, since the register numbers specify an + * even-odd pair of adjacent coprocessor general registers. When the FR bit + * in the Status register equals one, both even and odd register numbers + * are valid. This limitation exists only for 64 bit wide (d,l,ps) registers. + * + * Multiple 64 bit wide registers can be checked by calling + * gen_op_cp1_registers(freg1 | freg2 | ... | fregN); + */ +static inline void check_cp1_registers(DisasContext *ctx, int regs) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64) && (regs & 1))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * Verify that the processor is running with DSP instructions enabled. + * This is enabled by CP0 Status register MX(24) bit. + */ +static inline void check_dsp(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP))) { + if (ctx->insn_flags & ASE_DSP) { + generate_exception_end(ctx, EXCP_DSPDIS); + } else { + generate_exception_end(ctx, EXCP_RI); + } + } +} + +static inline void check_dsp_r2(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R2))) { + if (ctx->insn_flags & ASE_DSP) { + generate_exception_end(ctx, EXCP_DSPDIS); + } else { + generate_exception_end(ctx, EXCP_RI); + } + } +} + +static inline void check_dsp_r3(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R3))) { + if (ctx->insn_flags & ASE_DSP) { + generate_exception_end(ctx, EXCP_DSPDIS); + } else { + generate_exception_end(ctx, EXCP_RI); + } + } +} + +/* + * This code generates a "reserved instruction" exception if the + * CPU does not support the instruction set corresponding to flags. + */ +static inline void check_insn(DisasContext *ctx, uint64_t flags) +{ + if (unlikely(!(ctx->insn_flags & flags))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * This code generates a "reserved instruction" exception if the + * CPU has corresponding flag set which indicates that the instruction + * has been removed. + */ +static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags) +{ + if (unlikely(ctx->insn_flags & flags)) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * The Linux kernel traps certain reserved instruction exceptions to + * emulate the corresponding instructions. QEMU is the kernel in user + * mode, so those traps are emulated by accepting the instructions. + * + * A reserved instruction exception is generated for flagged CPUs if + * QEMU runs in system mode. + */ +static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags) +{ + check_insn_opc_removed(ctx, flags); +} + +/* + * This code generates a "reserved instruction" exception if the + * CPU does not support 64-bit paired-single (PS) floating point data type. + */ +static inline void check_ps(DisasContext *ctx) +{ + if (unlikely(!ctx->ps)) { + generate_exception(ctx, EXCP_RI); + } + check_cp1_64bitmode(ctx); +} + +#ifdef TARGET_MIPS64 +/* + * This code generates a "reserved instruction" exception if 64-bit + * instructions are not enabled. + */ +static inline void check_mips_64(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_64))) { + generate_exception_end(ctx, EXCP_RI); + } +} +#endif + +static inline void check_mvh(DisasContext *ctx) +{ + if (unlikely(!ctx->mvh)) { + generate_exception(ctx, EXCP_RI); + } +} + +/* + * This code generates a "reserved instruction" exception if the + * Config5 XNP bit is set. + */ +static inline void check_xnp(DisasContext *ctx) +{ + if (unlikely(ctx->CP0_Config5 & (1 << CP0C5_XNP))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * This code generates a "reserved instruction" exception if the + * Config3 PW bit is NOT set. + */ +static inline void check_pw(DisasContext *ctx) +{ + if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_PW)))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * This code generates a "reserved instruction" exception if the + * Config3 MT bit is NOT set. + */ +static inline void check_mt(DisasContext *ctx) +{ + if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_MT)))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * This code generates a "coprocessor unusable" exception if CP0 is not + * available, and, if that is not the case, generates a "reserved instruction" + * exception if the Config5 MT bit is NOT set. This is needed for availability + * control of some of MT ASE instructions. + */ +static inline void check_cp0_mt(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) { + generate_exception_err(ctx, EXCP_CpU, 0); + } else { + if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_MT)))) { + generate_exception_err(ctx, EXCP_RI, 0); + } + } +} + +/* + * This code generates a "reserved instruction" exception if the + * Config5 NMS bit is set. + */ +static inline void check_nms(DisasContext *ctx) +{ + if (unlikely(ctx->CP0_Config5 & (1 << CP0C5_NMS))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * This code generates a "reserved instruction" exception if the + * Config5 NMS bit is set, and Config1 DL, Config1 IL, Config2 SL, + * Config2 TL, and Config5 L2C are unset. + */ +static inline void check_nms_dl_il_sl_tl_l2c(DisasContext *ctx) +{ + if (unlikely((ctx->CP0_Config5 & (1 << CP0C5_NMS)) && + !(ctx->CP0_Config1 & (1 << CP0C1_DL)) && + !(ctx->CP0_Config1 & (1 << CP0C1_IL)) && + !(ctx->CP0_Config2 & (1 << CP0C2_SL)) && + !(ctx->CP0_Config2 & (1 << CP0C2_TL)) && + !(ctx->CP0_Config5 & (1 << CP0C5_L2C)))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * This code generates a "reserved instruction" exception if the + * Config5 EVA bit is NOT set. + */ +static inline void check_eva(DisasContext *ctx) +{ + if (unlikely(!(ctx->CP0_Config5 & (1 << CP0C5_EVA)))) { + generate_exception_end(ctx, EXCP_RI); + } +} + + +/* + * Define small wrappers for gen_load_fpr* so that we have a uniform + * calling interface for 32 and 64-bit FPRs. No sense in changing + * all callers for gen_load_fpr32 when we need the CTX parameter for + * this one use. + */ +#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y) +#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y) +#define FOP_CONDS(type, abs, fmt, ifmt, bits) \ +static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \ + int ft, int fs, int cc) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i##bits fp0 = tcg_temp_new_i##bits(tcg_ctx); \ + TCGv_i##bits fp1 = tcg_temp_new_i##bits(tcg_ctx); \ + switch (ifmt) { \ + case FMT_PS: \ + check_ps(ctx); \ + break; \ + case FMT_D: \ + if (abs) { \ + check_cop1x(ctx); \ + } \ + check_cp1_registers(ctx, fs | ft); \ + break; \ + case FMT_S: \ + if (abs) { \ + check_cop1x(ctx); \ + } \ + break; \ + } \ + gen_ldcmp_fpr##bits(ctx, fp0, fs); \ + gen_ldcmp_fpr##bits(ctx, fp1, ft); \ + switch (n) { \ + case 0: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); \ + break; \ + case 1: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); \ + break; \ + case 2: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); \ + break; \ + case 3: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); \ + break; \ + case 4: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); \ + break; \ + case 5: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); \ + break; \ + case 6: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); \ + break; \ + case 7: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); \ + break; \ + case 8: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); \ + break; \ + case 9: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); \ + break; \ + case 10: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); \ + break; \ + case 11: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); \ + break; \ + case 12: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); \ + break; \ + case 13: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); \ + break; \ + case 14: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); \ + break; \ + case 15: \ + gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); \ + break; \ + default: \ + abort(); \ + } \ + tcg_temp_free_i##bits(tcg_ctx, fp0); \ + tcg_temp_free_i##bits(tcg_ctx, fp1); \ +} + +FOP_CONDS(, 0, d, FMT_D, 64) +FOP_CONDS(abs, 1, d, FMT_D, 64) +FOP_CONDS(, 0, s, FMT_S, 32) +FOP_CONDS(abs, 1, s, FMT_S, 32) +FOP_CONDS(, 0, ps, FMT_PS, 64) +FOP_CONDS(abs, 1, ps, FMT_PS, 64) +#undef FOP_CONDS + +#define FOP_CONDNS(fmt, ifmt, bits, STORE) \ +static inline void gen_r6_cmp_ ## fmt(DisasContext *ctx, int n, \ + int ft, int fs, int fd) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(tcg_ctx); \ + TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(tcg_ctx); \ + if (ifmt == FMT_D) { \ + check_cp1_registers(ctx, fs | ft | fd); \ + } \ + gen_ldcmp_fpr ## bits(ctx, fp0, fs); \ + gen_ldcmp_fpr ## bits(ctx, fp1, ft); \ + switch (n) { \ + case 0: \ + gen_helper_r6_cmp_ ## fmt ## _af(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 1: \ + gen_helper_r6_cmp_ ## fmt ## _un(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 2: \ + gen_helper_r6_cmp_ ## fmt ## _eq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 3: \ + gen_helper_r6_cmp_ ## fmt ## _ueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 4: \ + gen_helper_r6_cmp_ ## fmt ## _lt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 5: \ + gen_helper_r6_cmp_ ## fmt ## _ult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 6: \ + gen_helper_r6_cmp_ ## fmt ## _le(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 7: \ + gen_helper_r6_cmp_ ## fmt ## _ule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 8: \ + gen_helper_r6_cmp_ ## fmt ## _saf(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 9: \ + gen_helper_r6_cmp_ ## fmt ## _sun(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 10: \ + gen_helper_r6_cmp_ ## fmt ## _seq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 11: \ + gen_helper_r6_cmp_ ## fmt ## _sueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 12: \ + gen_helper_r6_cmp_ ## fmt ## _slt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 13: \ + gen_helper_r6_cmp_ ## fmt ## _sult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 14: \ + gen_helper_r6_cmp_ ## fmt ## _sle(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 15: \ + gen_helper_r6_cmp_ ## fmt ## _sule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 17: \ + gen_helper_r6_cmp_ ## fmt ## _or(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 18: \ + gen_helper_r6_cmp_ ## fmt ## _une(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 19: \ + gen_helper_r6_cmp_ ## fmt ## _ne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 25: \ + gen_helper_r6_cmp_ ## fmt ## _sor(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 26: \ + gen_helper_r6_cmp_ ## fmt ## _sune(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 27: \ + gen_helper_r6_cmp_ ## fmt ## _sne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + default: \ + abort(); \ + } \ + STORE; \ + tcg_temp_free_i ## bits(tcg_ctx, fp0); \ + tcg_temp_free_i ## bits(tcg_ctx, fp1); \ +} + +FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd)) +FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd)) +#undef FOP_CONDNS +#undef gen_ldcmp_fpr32 +#undef gen_ldcmp_fpr64 + +/* load/store instructions. */ +#define OP_LD_ATOMIC(insn, fname) \ +static inline void op_ld_##insn(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, int mem_idx, \ + DisasContext *ctx) \ +{ \ + gen_helper_1e1i(insn, ret, arg1, mem_idx); \ +} + +OP_LD_ATOMIC(ll, ld32s); +#if defined(TARGET_MIPS64) +OP_LD_ATOMIC(lld, ld64); +#endif +#undef OP_LD_ATOMIC + +static void gen_base_offset_addr(DisasContext *ctx, TCGv addr, + int base, int offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (base == 0) { + tcg_gen_movi_tl(tcg_ctx, addr, offset); + } else if (offset == 0) { + gen_load_gpr(tcg_ctx, addr, base); + } else { + tcg_gen_movi_tl(tcg_ctx, addr, offset); + gen_op_addr_add(ctx, addr, tcg_ctx->cpu_gpr[base], addr); + } +} + +static target_ulong pc_relative_pc(DisasContext *ctx) +{ + target_ulong pc = ctx->base.pc_next; + + if (ctx->hflags & MIPS_HFLAG_BMASK) { + int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4; + + pc -= branch_bytes; + } + + pc &= ~(target_ulong)3; + return pc; +} + +/* Load */ +static void gen_ld(DisasContext *ctx, uint32_t opc, + int rt, int base, int offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1, t2; + int mem_idx = ctx->mem_idx; + + if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)) { + /* + * Loongson CPU uses a load to zero register for prefetch. + * We emulate it as a NOP. On other CPU we must perform the + * actual memory access. + */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_base_offset_addr(ctx, t0, base, offset); + + switch (opc) { +#if defined(TARGET_MIPS64) + case OPC_LWU: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LD: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ | + ctx->default_tcg_memop_mask); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LLD: + case R6_OPC_LLD: + op_ld_lld(tcg_ctx, t0, t0, mem_idx, ctx); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LDL: + t1 = tcg_temp_new(tcg_ctx); + /* + * Do a byte access to possibly trigger a page + * fault with the unaligned address. + */ + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); +#ifndef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ); + tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); + t2 = tcg_const_tl(tcg_ctx, -1); + tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LDR: + t1 = tcg_temp_new(tcg_ctx); + /* + * Do a byte access to possibly trigger a page + * fault with the unaligned address. + */ + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); +#ifdef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ); + tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); + tcg_gen_xori_tl(tcg_ctx, t1, t1, 63); + t2 = tcg_const_tl(tcg_ctx, 0xfffffffffffffffeull); + tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LDPC: + t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t0, rt); + break; +#endif + case OPC_LWPC: + t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LWE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LW: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TESL | + ctx->default_tcg_memop_mask); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LHE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LH: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TESW | + ctx->default_tcg_memop_mask); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LHUE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LHU: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUW | + ctx->default_tcg_memop_mask); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LBE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LB: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_SB); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LBUE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LBU: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_UB); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LWLE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LWL: + t1 = tcg_temp_new(tcg_ctx); + /* + * Do a byte access to possibly trigger a page + * fault with the unaligned address. + */ + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); +#ifndef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUL); + tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); + t2 = tcg_const_tl(tcg_ctx, -1); + tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LWRE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LWR: + t1 = tcg_temp_new(tcg_ctx); + /* + * Do a byte access to possibly trigger a page + * fault with the unaligned address. + */ + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, mem_idx, MO_UB); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); +#ifdef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, mem_idx, MO_TEUL); + tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); + tcg_gen_xori_tl(tcg_ctx, t1, t1, 31); + t2 = tcg_const_tl(tcg_ctx, 0xfffffffeull); + tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_LLE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_LL: + case R6_OPC_LL: + op_ld_ll(tcg_ctx, t0, t0, mem_idx, ctx); + gen_store_gpr(tcg_ctx, t0, rt); + break; + } + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, + uint32_t reg1, uint32_t reg2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv taddr = tcg_temp_new(tcg_ctx); + TCGv_i64 tval = tcg_temp_new_i64(tcg_ctx); + TCGv tmp1 = tcg_temp_new(tcg_ctx); + TCGv tmp2 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, taddr, base, offset); + tcg_gen_qemu_ld64(tcg_ctx, tval, taddr, ctx->mem_idx); +#ifdef TARGET_WORDS_BIGENDIAN + tcg_gen_extr_i64_tl(tcg_ctx, tmp2, tmp1, tval); +#else + tcg_gen_extr_i64_tl(tcg_ctx, tmp1, tmp2, tval); +#endif + gen_store_gpr(tcg_ctx, tmp1, reg1); + tcg_temp_free(tcg_ctx, tmp1); + gen_store_gpr(tcg_ctx, tmp2, reg2); + tcg_temp_free(tcg_ctx, tmp2); + tcg_gen_st_i64(tcg_ctx, tval, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval_wp)); + tcg_temp_free_i64(tcg_ctx, tval); + tcg_gen_st_tl(tcg_ctx, taddr, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); + tcg_temp_free(tcg_ctx, taddr); +} + +/* Store */ +static void gen_st(DisasContext *ctx, uint32_t opc, int rt, + int base, int offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + int mem_idx = ctx->mem_idx; + + gen_base_offset_addr(ctx, t0, base, offset); + gen_load_gpr(tcg_ctx, t1, rt); + switch (opc) { +#if defined(TARGET_MIPS64) + case OPC_SD: + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_TEQ | + ctx->default_tcg_memop_mask); + break; + case OPC_SDL: + gen_helper_0e2i(sdl, t1, t0, mem_idx); + break; + case OPC_SDR: + gen_helper_0e2i(sdr, t1, t0, mem_idx); + break; +#endif + case OPC_SWE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_SW: + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); + break; + case OPC_SHE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_SH: + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_TEUW | + ctx->default_tcg_memop_mask); + break; + case OPC_SBE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_SB: + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, mem_idx, MO_8); + break; + case OPC_SWLE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_SWL: + gen_helper_0e2i(swl, t1, t0, mem_idx); + break; + case OPC_SWRE: + mem_idx = MIPS_HFLAG_UM; + /* fall through */ + case OPC_SWR: + gen_helper_0e2i(swr, t1, t0, mem_idx); + break; + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + + +/* Store conditional */ +static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, + MemOp tcg_mo, bool eva) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv addr, t0, val; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *done = gen_new_label(tcg_ctx); + + t0 = tcg_temp_new(tcg_ctx); + addr = tcg_temp_new(tcg_ctx); + /* compare the address against that of the preceeding LL */ + gen_base_offset_addr(ctx, addr, base, offset); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, addr, tcg_ctx->cpu_lladdr, l1); + tcg_temp_free(tcg_ctx, addr); + tcg_gen_movi_tl(tcg_ctx, t0, 0); + gen_store_gpr(tcg_ctx, t0, rt); + tcg_gen_br(tcg_ctx, done); + + gen_set_label(tcg_ctx, l1); + /* generate cmpxchg */ + val = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, val, rt); + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, t0, tcg_ctx->cpu_lladdr, tcg_ctx->cpu_llval, val, + eva ? MIPS_HFLAG_UM : ctx->mem_idx, tcg_mo); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, t0, t0, tcg_ctx->cpu_llval); + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, val); + + gen_set_label(tcg_ctx, done); + tcg_temp_free(tcg_ctx, t0); +} + + +static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, + uint32_t reg1, uint32_t reg2, bool eva) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv taddr = tcg_temp_local_new(tcg_ctx); + TCGv lladdr = tcg_temp_local_new(tcg_ctx); + TCGv_i64 tval = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 llval = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 val = tcg_temp_new_i64(tcg_ctx); + TCGv tmp1 = tcg_temp_new(tcg_ctx); + TCGv tmp2 = tcg_temp_new(tcg_ctx); + TCGLabel *lab_fail = gen_new_label(tcg_ctx); + TCGLabel *lab_done = gen_new_label(tcg_ctx); + + gen_base_offset_addr(ctx, taddr, base, offset); + + tcg_gen_ld_tl(tcg_ctx, lladdr, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, taddr, lladdr, lab_fail); + + gen_load_gpr(tcg_ctx, tmp1, reg1); + gen_load_gpr(tcg_ctx, tmp2, reg2); + +#ifdef TARGET_WORDS_BIGENDIAN + tcg_gen_concat_tl_i64(tcg_ctx, tval, tmp2, tmp1); +#else + tcg_gen_concat_tl_i64(tcg_ctx, tval, tmp1, tmp2); +#endif + + tcg_gen_ld_i64(tcg_ctx, llval, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval_wp)); + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, val, taddr, llval, tval, + eva ? MIPS_HFLAG_UM : ctx->mem_idx, MO_64); + if (reg1 != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg1], 1); + } + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_EQ, val, llval, lab_done); + + gen_set_label(tcg_ctx, lab_fail); + + if (reg1 != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg1], 0); + } + gen_set_label(tcg_ctx, lab_done); + tcg_gen_movi_tl(tcg_ctx, lladdr, -1); + tcg_gen_st_tl(tcg_ctx, lladdr, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); +} + +/* Load and store */ +static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, + TCGv t0) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* + * Don't do NOP if destination is zero: we must perform the actual + * memory access. + */ + switch (opc) { + case OPC_LWC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_qemu_ld_i32(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TESL | + ctx->default_tcg_memop_mask); + gen_store_fpr32(ctx, fp0, ft); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_SWC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, ft); + tcg_gen_qemu_st_i32(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEUL | + ctx->default_tcg_memop_mask); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_LDC1: + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ | + ctx->default_tcg_memop_mask); + gen_store_fpr64(ctx, fp0, ft); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_SDC1: + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, ft); + tcg_gen_qemu_st_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ | + ctx->default_tcg_memop_mask); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + default: + MIPS_INVAL("flt_ldst"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt, + int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + + if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + switch (op) { + case OPC_LDC1: + case OPC_SDC1: + check_insn(ctx, ISA_MIPS2); + /* Fallthrough */ + default: + gen_base_offset_addr(ctx, t0, rs, imm); + gen_flt_ldst(ctx, op, rt, t0); + } + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + tcg_temp_free(tcg_ctx, t0); +} + +/* Arithmetic with immediate operand */ +static void gen_arith_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ + + if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) { + /* + * If no destination, treat it as a NOP. + * For addi, we must generate the overflow exception when needed. + */ + return; + } + switch (opc) { + case OPC_ADDI: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + + tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); + tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_ADDIU: + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DADDI: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); + + tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); + tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_DADDIU: + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); + } + break; +#endif + } +} + +/* Logic with immediate operand */ +static void gen_logic_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + uimm = (uint16_t)imm; + switch (opc) { + case OPC_ANDI: + if (likely(rs != 0)) { + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], 0); + } + break; + case OPC_ORI: + if (rs != 0) { + tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); + } + break; + case OPC_XORI: + if (likely(rs != 0)) { + tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], uimm); + } + break; + case OPC_LUI: + if (rs != 0 && (ctx->insn_flags & ISA_MIPS32R6)) { + /* OPC_AUI */ + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], imm << 16); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], imm << 16); + } + break; + + default: + break; + } +} + +/* Set on less than with immediate operand */ +static void gen_slt_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ + TCGv t0; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rs); + switch (opc) { + case OPC_SLTI: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr[rt], t0, uimm); + break; + case OPC_SLTIU: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr[rt], t0, uimm); + break; + } + tcg_temp_free(tcg_ctx, t0); +} + +/* Shifts with immediate operand */ +static void gen_shift_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = ((uint16_t)imm) & 0x1f; + TCGv t0; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rs); + switch (opc) { + case OPC_SLL: + tcg_gen_shli_tl(tcg_ctx, t0, t0, uimm); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); + break; + case OPC_SRA: + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); + break; + case OPC_SRL: + if (uimm != 0) { + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); + } else { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); + } + break; + case OPC_ROTR: + if (uimm != 0) { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); + tcg_gen_rotri_i32(tcg_ctx, t1, t1, uimm); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t1); + tcg_temp_free_i32(tcg_ctx, t1); + } else { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DSLL: + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); + break; + case OPC_DSRA: + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); + break; + case OPC_DSRL: + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); + break; + case OPC_DROTR: + if (uimm != 0) { + tcg_gen_rotri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm); + } else { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0); + } + break; + case OPC_DSLL32: + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); + break; + case OPC_DSRA32: + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); + break; + case OPC_DSRL32: + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); + break; + case OPC_DROTR32: + tcg_gen_rotri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, uimm + 32); + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); +} + +/* Arithmetic */ +static void gen_arith(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB + && opc != OPC_DADD && opc != OPC_DSUB) { + /* + * If no destination, treat it as a NOP. + * For add & sub, we must generate the overflow exception when needed. + */ + return; + } + + switch (opc) { + case OPC_ADD: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + gen_load_gpr(tcg_ctx, t2, rt); + tcg_gen_add_tl(tcg_ctx, t0, t1, t2); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); + tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_ADDU: + if (rs != 0 && rt != 0) { + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; + case OPC_SUB: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + gen_load_gpr(tcg_ctx, t2, rt); + tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* + * operands of different sign, first operand and the result + * of different sign + */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_SUBU: + if (rs != 0 && rt != 0) { + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + } else if (rs == 0 && rt != 0) { + tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DADD: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + gen_load_gpr(tcg_ctx, t2, rt); + tcg_gen_add_tl(tcg_ctx, t0, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); + tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_DADDU: + if (rs != 0 && rt != 0) { + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; + case OPC_DSUB: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + gen_load_gpr(tcg_ctx, t2, rt); + tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* + * Operands of different sign, first operand and result different + * sign. + */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_DSUBU: + if (rs != 0 && rt != 0) { + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; +#endif + case OPC_MUL: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; + } +} + +/* Conditional move */ +static void gen_cond_move(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1, t2; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rt); + t1 = tcg_const_tl(tcg_ctx, 0); + t2 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t2, rs); + switch (opc) { + case OPC_MOVN: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[rd], t0, t1, t2, tcg_ctx->cpu_gpr[rd]); + break; + case OPC_MOVZ: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[rd], t0, t1, t2, tcg_ctx->cpu_gpr[rd]); + break; + case OPC_SELNEZ: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[rd], t0, t1, t2, t1); + break; + case OPC_SELEQZ: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[rd], t0, t1, t2, t1); + break; + } + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); +} + +/* Logic */ +static void gen_logic(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + switch (opc) { + case OPC_AND: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; + case OPC_NOR: + if (rs != 0 && rt != 0) { + tcg_gen_nor_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], ~((target_ulong)0)); + } + break; + case OPC_OR: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; + case OPC_XOR: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + break; + } +} + +/* Set on lower than */ +static void gen_slt(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + switch (opc) { + case OPC_SLT: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr[rd], t0, t1); + break; + case OPC_SLTU: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr[rd], t0, t1); + break; + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* Shifts */ +static void gen_shift(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + if (rd == 0) { + /* + * If no destination, treat it as a NOP. + * For add & sub, we must generate the overflow exception when needed. + */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + switch (opc) { + case OPC_SLLV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); + tcg_gen_shl_tl(tcg_ctx, t0, t1, t0); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; + case OPC_SRAV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); + tcg_gen_sar_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); + break; + case OPC_SRLV: + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); + tcg_gen_shr_tl(tcg_ctx, t0, t1, t0); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; + case OPC_ROTRV: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_andi_i32(tcg_ctx, t2, t2, 0x1f); + tcg_gen_rotr_i32(tcg_ctx, t2, t3, t2); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DSLLV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); + break; + case OPC_DSRAV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_sar_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); + break; + case OPC_DSRLV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_shr_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); + break; + case OPC_DROTRV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_rotr_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +#if defined(TARGET_MIPS64) +/* Copy GPR to and from TX79 HI1/LO1 register. */ +static void gen_HILO1_tx79(DisasContext *ctx, uint32_t opc, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (reg == 0 && (opc == MMI_OPC_MFHI1 || opc == MMI_OPC_MFLO1)) { + /* Treat as NOP. */ + return; + } + + switch (opc) { + case MMI_OPC_MFHI1: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_HI[1]); + break; + case MMI_OPC_MFLO1: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_LO[1]); + break; + case MMI_OPC_MTHI1: + if (reg != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_HI[1], tcg_ctx->cpu_gpr[reg]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_HI[1], 0); + } + break; + case MMI_OPC_MTLO1: + if (reg != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_LO[1], tcg_ctx->cpu_gpr[reg]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_LO[1], 0); + } + break; + default: + MIPS_INVAL("mfthilo1 TX79"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} +#endif + +/* Arithmetic on HI/LO registers */ +static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) { + /* Treat as NOP. */ + return; + } + + if (acc != 0) { + check_dsp(ctx); + } + + switch (opc) { + case OPC_MFHI: +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_HI[acc]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_HI[acc]); + } + break; + case OPC_MFLO: +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_LO[acc]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], tcg_ctx->cpu_LO[acc]); + } + break; + case OPC_MTHI: + if (reg != 0) { +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_gpr[reg]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_gpr[reg]); + } + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], 0); + } + break; + case OPC_MTLO: + if (reg != 0) { +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_gpr[reg]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_gpr[reg]); + } + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], 0); + } + break; + } +} + +static inline void gen_r6_ld(TCGContext *tcg_ctx, target_long addr, int reg, int memidx, + MemOp memop) +{ + TCGv t0 = tcg_const_tl(tcg_ctx, addr); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, memidx, memop); + gen_store_gpr(tcg_ctx, t0, reg); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, + int rs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_long offset; + target_long addr; + + switch (MASK_OPC_PCREL_TOP2BITS(opc)) { + case OPC_ADDIUPC: + if (rs != 0) { + offset = sextract32(ctx->opcode << 2, 0, 21); + addr = addr_add(ctx, pc, offset); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], addr); + } + break; + case R6_OPC_LWPC: + offset = sextract32(ctx->opcode << 2, 0, 21); + addr = addr_add(ctx, pc, offset); + gen_r6_ld(tcg_ctx, addr, rs, ctx->mem_idx, MO_TESL); + break; +#if defined(TARGET_MIPS64) + case OPC_LWUPC: + check_mips_64(ctx); + offset = sextract32(ctx->opcode << 2, 0, 21); + addr = addr_add(ctx, pc, offset); + gen_r6_ld(tcg_ctx, addr, rs, ctx->mem_idx, MO_TEUL); + break; +#endif + default: + switch (MASK_OPC_PCREL_TOP5BITS(opc)) { + case OPC_AUIPC: + if (rs != 0) { + offset = sextract32(ctx->opcode, 0, 16) << 16; + addr = addr_add(ctx, pc, offset); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], addr); + } + break; + case OPC_ALUIPC: + if (rs != 0) { + offset = sextract32(ctx->opcode, 0, 16) << 16; + addr = ~0xFFFF & addr_add(ctx, pc, offset); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], addr); + } + break; +#if defined(TARGET_MIPS64) + case R6_OPC_LDPC: /* bits 16 and 17 are part of immediate */ + case R6_OPC_LDPC + (1 << 16): + case R6_OPC_LDPC + (2 << 16): + case R6_OPC_LDPC + (3 << 16): + check_mips_64(ctx); + offset = sextract32(ctx->opcode << 3, 0, 21); + addr = addr_add(ctx, (pc & ~0x7), offset); + gen_r6_ld(tcg_ctx, addr, rs, ctx->mem_idx, MO_TEQ); + break; +#endif + default: + MIPS_INVAL("OPC_PCREL"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + } +} + +static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + if (rd == 0) { + /* Treat as NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + + switch (opc) { + case R6_OPC_DIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_MOD: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_DIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_MODU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_MUL: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case R6_OPC_MUH: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case R6_OPC_MULU: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case R6_OPC_MUHU: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; +#if defined(TARGET_MIPS64) + case R6_OPC_DDIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1LL << 63); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_DMOD: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1LL << 63); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_DDIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_DMODU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_remu_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_DMUL: + tcg_gen_mul_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + break; + case R6_OPC_DMUH: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_muls2_i64(tcg_ctx, t2, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t2); + } + break; + case R6_OPC_DMULU: + tcg_gen_mul_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + break; + case R6_OPC_DMUHU: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_mulu2_i64(tcg_ctx, t2, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t2); + } + break; +#endif + default: + MIPS_INVAL("r6 mul/div"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +#if defined(TARGET_MIPS64) +static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + + switch (opc) { + case MMI_OPC_DIV1: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_LO[1], t0, t1); + tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_HI[1], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[1], tcg_ctx->cpu_LO[1]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[1], tcg_ctx->cpu_HI[1]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case MMI_OPC_DIVU1: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_LO[1], t0, t1); + tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_HI[1], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[1], tcg_ctx->cpu_LO[1]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[1], tcg_ctx->cpu_HI[1]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + default: + MIPS_INVAL("div1 TX79"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} +#endif + +static void gen_muldiv(DisasContext *ctx, uint32_t opc, + int acc, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + + if (acc != 0) { + check_dsp(ctx); + } + + switch (opc) { + case OPC_DIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); + tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_LO[acc]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_HI[acc]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case OPC_DIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); + tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_LO[acc]); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], tcg_ctx->cpu_HI[acc]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case OPC_MULT: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case OPC_MULTU: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DDIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1LL << 63); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); + tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case OPC_DDIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_i64(tcg_ctx, tcg_ctx->cpu_LO[acc], t0, t1); + tcg_gen_remu_i64(tcg_ctx, tcg_ctx->cpu_HI[acc], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + break; + case OPC_DMULT: + tcg_gen_muls2_i64(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc], t0, t1); + break; + case OPC_DMULTU: + tcg_gen_mulu2_i64(tcg_ctx, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc], t0, t1); + break; +#endif + case OPC_MADD: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); + tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case OPC_MADDU: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case OPC_MSUB: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); + tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case OPC_MSUBU: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + default: + MIPS_INVAL("mul/div"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* + * These MULT[U] and MADD[U] instructions implemented in for example + * the Toshiba/Sony R5900 and the Toshiba TX19, TX39 and TX79 core + * architectures are special three-operand variants with the syntax + * + * MULT[U][1] rd, rs, rt + * + * such that + * + * (rd, LO, HI) <- rs * rt + * + * and + * + * MADD[U][1] rd, rs, rt + * + * such that + * + * (rd, LO, HI) <- (LO, HI) + rs * rt + * + * where the low-order 32-bits of the result is placed into both the + * GPR rd and the special register LO. The high-order 32-bits of the + * result is placed into the special register HI. + * + * If the GPR rd is omitted in assembly language, it is taken to be 0, + * which is the zero register that always reads as 0. + */ +static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + int acc = 0; + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + + switch (opc) { + case MMI_OPC_MULT1: + acc = 1; + /* Fall through */ + case OPC_MULT: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); + if (rd) { + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + } + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case MMI_OPC_MULTU1: + acc = 1; + /* Fall through */ + case OPC_MULTU: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); + if (rd) { + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + } + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case MMI_OPC_MADD1: + acc = 1; + /* Fall through */ + case MMI_OPC_MADD: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); + tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + if (rd) { + gen_move_low32(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + } + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case MMI_OPC_MADDU1: + acc = 1; + /* Fall through */ + case MMI_OPC_MADDU: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + if (rd) { + gen_move_low32(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + } + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + default: + MIPS_INVAL("mul/madd TXx9"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_mul_vr54xx(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + + switch (opc) { + case OPC_VR54XX_MULS: + gen_helper_muls(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MULSU: + gen_helper_mulsu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MACC: + gen_helper_macc(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MACCU: + gen_helper_maccu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MSAC: + gen_helper_msac(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MSACU: + gen_helper_msacu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MULHI: + gen_helper_mulhi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MULHIU: + gen_helper_mulhiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MULSHI: + gen_helper_mulshi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MULSHIU: + gen_helper_mulshiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MACCHI: + gen_helper_macchi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MACCHIU: + gen_helper_macchiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MSACHI: + gen_helper_msachi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + case OPC_VR54XX_MSACHIU: + gen_helper_msachiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + break; + default: + MIPS_INVAL("mul vr54xx"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + gen_store_gpr(tcg_ctx, t0, rd); + + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_cl(DisasContext *ctx, uint32_t opc, + int rd, int rs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_ctx->cpu_gpr[rd]; + gen_load_gpr(tcg_ctx, t0, rs); + + switch (opc) { + case OPC_CLO: + case R6_OPC_CLO: +#if defined(TARGET_MIPS64) + case OPC_DCLO: + case R6_OPC_DCLO: +#endif + tcg_gen_not_tl(tcg_ctx, t0, t0); + break; + } + + switch (opc) { + case OPC_CLO: + case R6_OPC_CLO: + case OPC_CLZ: + case R6_OPC_CLZ: + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_clzi_tl(tcg_ctx, t0, t0, TARGET_LONG_BITS); + tcg_gen_subi_tl(tcg_ctx, t0, t0, TARGET_LONG_BITS - 32); + break; +#if defined(TARGET_MIPS64) + case OPC_DCLO: + case R6_OPC_DCLO: + case OPC_DCLZ: + case R6_OPC_DCLZ: + tcg_gen_clzi_i64(tcg_ctx, t0, t0, 64); + break; +#endif + } +} + +/* Godson integer instructions */ +static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + if (rd == 0) { + /* Treat as NOP. */ + return; + } + + switch (opc) { + case OPC_MULT_G_2E: + case OPC_MULT_G_2F: + case OPC_MULTU_G_2E: + case OPC_MULTU_G_2F: +#if defined(TARGET_MIPS64) + case OPC_DMULT_G_2E: + case OPC_DMULT_G_2F: + case OPC_DMULTU_G_2E: + case OPC_DMULTU_G_2F: +#endif + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + break; + default: + t0 = tcg_temp_local_new(tcg_ctx); + t1 = tcg_temp_local_new(tcg_ctx); + break; + } + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + + switch (opc) { + case OPC_MULT_G_2E: + case OPC_MULT_G_2F: + tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + break; + case OPC_MULTU_G_2E: + case OPC_MULTU_G_2F: + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + break; + case OPC_DIV_G_2E: + case OPC_DIV_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGLabel *l3 = gen_new_label(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + gen_set_label(tcg_ctx, l3); + } + break; + case OPC_DIVU_G_2E: + case OPC_DIVU_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + gen_set_label(tcg_ctx, l2); + } + break; + case OPC_MOD_G_2E: + case OPC_MOD_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGLabel *l3 = gen_new_label(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + gen_set_label(tcg_ctx, l3); + } + break; + case OPC_MODU_G_2E: + case OPC_MODU_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + gen_set_label(tcg_ctx, l2); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DMULT_G_2E: + case OPC_DMULT_G_2F: + tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + break; + case OPC_DMULTU_G_2E: + case OPC_DMULTU_G_2F: + tcg_gen_mul_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + break; + case OPC_DDIV_G_2E: + case OPC_DDIV_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGLabel *l3 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1LL << 63, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_div_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l3); + } + break; + case OPC_DDIVU_G_2E: + case OPC_DDIVU_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_divu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l2); + } + break; + case OPC_DMOD_G_2E: + case OPC_DMOD_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGLabel *l3 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1LL << 63, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_rem_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l3); + } + break; + case OPC_DMODU_G_2E: + case OPC_DMODU_G_2F: + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_remu_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l2); + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* Loongson multimedia instructions */ +static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t opc, shift_max; + TCGv_i64 t0, t1; + TCGCond cond; + + opc = MASK_LMI(ctx->opcode); + switch (opc) { + case OPC_ADD_CP2: + case OPC_SUB_CP2: + case OPC_DADD_CP2: + case OPC_DSUB_CP2: + t0 = tcg_temp_local_new_i64(tcg_ctx); + t1 = tcg_temp_local_new_i64(tcg_ctx); + break; + default: + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + break; + } + + check_cp1_enabled(ctx); + gen_load_fpr64(ctx, t0, rs); + gen_load_fpr64(ctx, t1, rt); + + switch (opc) { + case OPC_PADDSH: + gen_helper_paddsh(tcg_ctx, t0, t0, t1); + break; + case OPC_PADDUSH: + gen_helper_paddush(tcg_ctx, t0, t0, t1); + break; + case OPC_PADDH: + gen_helper_paddh(tcg_ctx, t0, t0, t1); + break; + case OPC_PADDW: + gen_helper_paddw(tcg_ctx, t0, t0, t1); + break; + case OPC_PADDSB: + gen_helper_paddsb(tcg_ctx, t0, t0, t1); + break; + case OPC_PADDUSB: + gen_helper_paddusb(tcg_ctx, t0, t0, t1); + break; + case OPC_PADDB: + gen_helper_paddb(tcg_ctx, t0, t0, t1); + break; + + case OPC_PSUBSH: + gen_helper_psubsh(tcg_ctx, t0, t0, t1); + break; + case OPC_PSUBUSH: + gen_helper_psubush(tcg_ctx, t0, t0, t1); + break; + case OPC_PSUBH: + gen_helper_psubh(tcg_ctx, t0, t0, t1); + break; + case OPC_PSUBW: + gen_helper_psubw(tcg_ctx, t0, t0, t1); + break; + case OPC_PSUBSB: + gen_helper_psubsb(tcg_ctx, t0, t0, t1); + break; + case OPC_PSUBUSB: + gen_helper_psubusb(tcg_ctx, t0, t0, t1); + break; + case OPC_PSUBB: + gen_helper_psubb(tcg_ctx, t0, t0, t1); + break; + + case OPC_PSHUFH: + gen_helper_pshufh(tcg_ctx, t0, t0, t1); + break; + case OPC_PACKSSWH: + gen_helper_packsswh(tcg_ctx, t0, t0, t1); + break; + case OPC_PACKSSHB: + gen_helper_packsshb(tcg_ctx, t0, t0, t1); + break; + case OPC_PACKUSHB: + gen_helper_packushb(tcg_ctx, t0, t0, t1); + break; + + case OPC_PUNPCKLHW: + gen_helper_punpcklhw(tcg_ctx, t0, t0, t1); + break; + case OPC_PUNPCKHHW: + gen_helper_punpckhhw(tcg_ctx, t0, t0, t1); + break; + case OPC_PUNPCKLBH: + gen_helper_punpcklbh(tcg_ctx, t0, t0, t1); + break; + case OPC_PUNPCKHBH: + gen_helper_punpckhbh(tcg_ctx, t0, t0, t1); + break; + case OPC_PUNPCKLWD: + gen_helper_punpcklwd(tcg_ctx, t0, t0, t1); + break; + case OPC_PUNPCKHWD: + gen_helper_punpckhwd(tcg_ctx, t0, t0, t1); + break; + + case OPC_PAVGH: + gen_helper_pavgh(tcg_ctx, t0, t0, t1); + break; + case OPC_PAVGB: + gen_helper_pavgb(tcg_ctx, t0, t0, t1); + break; + case OPC_PMAXSH: + gen_helper_pmaxsh(tcg_ctx, t0, t0, t1); + break; + case OPC_PMINSH: + gen_helper_pminsh(tcg_ctx, t0, t0, t1); + break; + case OPC_PMAXUB: + gen_helper_pmaxub(tcg_ctx, t0, t0, t1); + break; + case OPC_PMINUB: + gen_helper_pminub(tcg_ctx, t0, t0, t1); + break; + + case OPC_PCMPEQW: + gen_helper_pcmpeqw(tcg_ctx, t0, t0, t1); + break; + case OPC_PCMPGTW: + gen_helper_pcmpgtw(tcg_ctx, t0, t0, t1); + break; + case OPC_PCMPEQH: + gen_helper_pcmpeqh(tcg_ctx, t0, t0, t1); + break; + case OPC_PCMPGTH: + gen_helper_pcmpgth(tcg_ctx, t0, t0, t1); + break; + case OPC_PCMPEQB: + gen_helper_pcmpeqb(tcg_ctx, t0, t0, t1); + break; + case OPC_PCMPGTB: + gen_helper_pcmpgtb(tcg_ctx, t0, t0, t1); + break; + + case OPC_PSLLW: + gen_helper_psllw(tcg_ctx, t0, t0, t1); + break; + case OPC_PSLLH: + gen_helper_psllh(tcg_ctx, t0, t0, t1); + break; + case OPC_PSRLW: + gen_helper_psrlw(tcg_ctx, t0, t0, t1); + break; + case OPC_PSRLH: + gen_helper_psrlh(tcg_ctx, t0, t0, t1); + break; + case OPC_PSRAW: + gen_helper_psraw(tcg_ctx, t0, t0, t1); + break; + case OPC_PSRAH: + gen_helper_psrah(tcg_ctx, t0, t0, t1); + break; + + case OPC_PMULLH: + gen_helper_pmullh(tcg_ctx, t0, t0, t1); + break; + case OPC_PMULHH: + gen_helper_pmulhh(tcg_ctx, t0, t0, t1); + break; + case OPC_PMULHUH: + gen_helper_pmulhuh(tcg_ctx, t0, t0, t1); + break; + case OPC_PMADDHW: + gen_helper_pmaddhw(tcg_ctx, t0, t0, t1); + break; + + case OPC_PASUBUB: + gen_helper_pasubub(tcg_ctx, t0, t0, t1); + break; + case OPC_BIADD: + gen_helper_biadd(tcg_ctx, t0, t0); + break; + case OPC_PMOVMSKB: + gen_helper_pmovmskb(tcg_ctx, t0, t0); + break; + + case OPC_PADDD: + tcg_gen_add_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_PSUBD: + tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_XOR_CP2: + tcg_gen_xor_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_NOR_CP2: + tcg_gen_nor_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_AND_CP2: + tcg_gen_and_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_OR_CP2: + tcg_gen_or_i64(tcg_ctx, t0, t0, t1); + break; + + case OPC_PANDN: + tcg_gen_andc_i64(tcg_ctx, t0, t1, t0); + break; + + case OPC_PINSRH_0: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 0, 16); + break; + case OPC_PINSRH_1: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 16, 16); + break; + case OPC_PINSRH_2: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 16); + break; + case OPC_PINSRH_3: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 48, 16); + break; + + case OPC_PEXTRH: + tcg_gen_andi_i64(tcg_ctx, t1, t1, 3); + tcg_gen_shli_i64(tcg_ctx, t1, t1, 4); + tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); + tcg_gen_ext16u_i64(tcg_ctx, t0, t0); + break; + + case OPC_ADDU_CP2: + tcg_gen_add_i64(tcg_ctx, t0, t0, t1); + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + break; + case OPC_SUBU_CP2: + tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + break; + + case OPC_SLL_CP2: + shift_max = 32; + goto do_shift; + case OPC_SRL_CP2: + shift_max = 32; + goto do_shift; + case OPC_SRA_CP2: + shift_max = 32; + goto do_shift; + case OPC_DSLL_CP2: + shift_max = 64; + goto do_shift; + case OPC_DSRL_CP2: + shift_max = 64; + goto do_shift; + case OPC_DSRA_CP2: + shift_max = 64; + goto do_shift; + do_shift: + /* Make sure shift count isn't TCG undefined behaviour. */ + tcg_gen_andi_i64(tcg_ctx, t1, t1, shift_max - 1); + + switch (opc) { + case OPC_SLL_CP2: + case OPC_DSLL_CP2: + tcg_gen_shl_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_SRA_CP2: + case OPC_DSRA_CP2: + /* + * Since SRA is UndefinedResult without sign-extended inputs, + * we can treat SRA and DSRA the same. + */ + tcg_gen_sar_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_SRL_CP2: + /* We want to shift in zeros for SRL; zero-extend first. */ + tcg_gen_ext32u_i64(tcg_ctx, t0, t0); + /* FALLTHRU */ + case OPC_DSRL_CP2: + tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); + break; + } + + if (shift_max == 32) { + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + } + + /* Shifts larger than MAX produce zero. */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LTU, t1, t1, shift_max); + tcg_gen_neg_i64(tcg_ctx, t1, t1); + tcg_gen_and_i64(tcg_ctx, t0, t0, t1); + break; + + case OPC_ADD_CP2: + case OPC_DADD_CP2: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGLabel *lab = gen_new_label(tcg_ctx); + + tcg_gen_mov_i64(tcg_ctx, t2, t0); + tcg_gen_add_i64(tcg_ctx, t0, t1, t2); + if (opc == OPC_ADD_CP2) { + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + } + tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); + tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); + tcg_gen_andc_i64(tcg_ctx, t1, t2, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, lab); + break; + } + + case OPC_SUB_CP2: + case OPC_DSUB_CP2: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGLabel *lab = gen_new_label(tcg_ctx); + + tcg_gen_mov_i64(tcg_ctx, t2, t0); + tcg_gen_sub_i64(tcg_ctx, t0, t1, t2); + if (opc == OPC_SUB_CP2) { + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + } + tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); + tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); + tcg_gen_and_i64(tcg_ctx, t1, t1, t2); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, lab); + break; + } + + case OPC_PMULUW: + tcg_gen_ext32u_i64(tcg_ctx, t0, t0); + tcg_gen_ext32u_i64(tcg_ctx, t1, t1); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + break; + + case OPC_SEQU_CP2: + case OPC_SEQ_CP2: + cond = TCG_COND_EQ; + goto do_cc_cond; + break; + case OPC_SLTU_CP2: + cond = TCG_COND_LTU; + goto do_cc_cond; + break; + case OPC_SLT_CP2: + cond = TCG_COND_LT; + goto do_cc_cond; + break; + case OPC_SLEU_CP2: + cond = TCG_COND_LEU; + goto do_cc_cond; + break; + case OPC_SLE_CP2: + cond = TCG_COND_LE; + do_cc_cond: + { + int cc = (ctx->opcode >> 8) & 0x7; + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_setcond_i64(tcg_ctx, cond, t64, t0, t1); + tcg_gen_extrl_i64_i32(tcg_ctx, t32, t64); + tcg_gen_deposit_i32(tcg_ctx, tcg_ctx->fpu_fcr31, tcg_ctx->fpu_fcr31, t32, + get_fp_bit(cc), 1); + + tcg_temp_free_i32(tcg_ctx, t32); + tcg_temp_free_i64(tcg_ctx, t64); + } + goto no_rd; + break; + default: + MIPS_INVAL("loongson_cp2"); + generate_exception_end(ctx, EXCP_RI); + return; + } + + gen_store_fpr64(ctx, t0, rd); + +no_rd: + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* Traps */ +static void gen_trap(DisasContext *ctx, uint32_t opc, + int rs, int rt, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + cond = 0; + /* Load needed operands */ + switch (opc) { + case OPC_TEQ: + case OPC_TGE: + case OPC_TGEU: + case OPC_TLT: + case OPC_TLTU: + case OPC_TNE: + /* Compare two registers */ + if (rs != rt) { + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + cond = 1; + } + break; + case OPC_TEQI: + case OPC_TGEI: + case OPC_TGEIU: + case OPC_TLTI: + case OPC_TLTIU: + case OPC_TNEI: + /* Compare register to immediate */ + if (rs != 0 || imm != 0) { + gen_load_gpr(tcg_ctx, t0, rs); + tcg_gen_movi_tl(tcg_ctx, t1, (int32_t)imm); + cond = 1; + } + break; + } + if (cond == 0) { + switch (opc) { + case OPC_TEQ: /* rs == rs */ + case OPC_TEQI: /* r0 == 0 */ + case OPC_TGE: /* rs >= rs */ + case OPC_TGEI: /* r0 >= 0 */ + case OPC_TGEU: /* rs >= rs unsigned */ + case OPC_TGEIU: /* r0 >= 0 unsigned */ + /* Always trap */ + generate_exception_end(ctx, EXCP_TRAP); + break; + case OPC_TLT: /* rs < rs */ + case OPC_TLTI: /* r0 < 0 */ + case OPC_TLTU: /* rs < rs unsigned */ + case OPC_TLTIU: /* r0 < 0 unsigned */ + case OPC_TNE: /* rs != rs */ + case OPC_TNEI: /* r0 != 0 */ + /* Never trap: treat as NOP. */ + break; + } + } else { + TCGLabel *l1 = gen_new_label(tcg_ctx); + + switch (opc) { + case OPC_TEQ: + case OPC_TEQI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, t1, l1); + break; + case OPC_TGE: + case OPC_TGEI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LT, t0, t1, l1); + break; + case OPC_TGEU: + case OPC_TGEIU: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LTU, t0, t1, l1); + break; + case OPC_TLT: + case OPC_TLTI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, t0, t1, l1); + break; + case OPC_TLTU: + case OPC_TLTIU: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GEU, t0, t1, l1); + break; + case OPC_TNE: + case OPC_TNEI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, t0, t1, l1); + break; + } + generate_exception(ctx, EXCP_TRAP); + gen_set_label(tcg_ctx, l1); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) +{ + if (unlikely(ctx->base.singlestep_enabled)) { + return false; + } + + return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +} + +static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (use_goto_tb(ctx, dest)) { + tcg_gen_goto_tb(tcg_ctx, n); + gen_save_pc(tcg_ctx, dest); + tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); + } else { + gen_save_pc(tcg_ctx, dest); + if (ctx->base.singlestep_enabled) { + save_cpu_state(ctx, 0); + gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); + } + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + } +} + +/* Branches (before delay slot) */ +static void gen_compute_branch(DisasContext *ctx, uint32_t opc, + int insn_bytes, + int rs, int rt, int32_t offset, + int delayslot_size) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong btgt = -1; + int blink = 0; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + if (ctx->hflags & MIPS_HFLAG_BMASK) { +#ifdef MIPS_DEBUG_DISAS + LOG_DISAS("Branch in delay / forbidden slot at PC 0x" + TARGET_FMT_lx "\n", ctx->base.pc_next); +#endif + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + /* Load needed operands */ + switch (opc) { + case OPC_BEQ: + case OPC_BEQL: + case OPC_BNE: + case OPC_BNEL: + /* Compare two registers */ + if (rs != rt) { + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + bcond_compute = 1; + } + btgt = ctx->base.pc_next + insn_bytes + offset; + break; + case OPC_BGEZ: + case OPC_BGEZAL: + case OPC_BGEZALL: + case OPC_BGEZL: + case OPC_BGTZ: + case OPC_BGTZL: + case OPC_BLEZ: + case OPC_BLEZL: + case OPC_BLTZ: + case OPC_BLTZAL: + case OPC_BLTZALL: + case OPC_BLTZL: + /* Compare to zero */ + if (rs != 0) { + gen_load_gpr(tcg_ctx, t0, rs); + bcond_compute = 1; + } + btgt = ctx->base.pc_next + insn_bytes + offset; + break; + case OPC_BPOSGE32: +#if defined(TARGET_MIPS64) + case OPC_BPOSGE64: + tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_dspctrl, 0x7F); +#else + tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_dspctrl, 0x3F); +#endif + bcond_compute = 1; + btgt = ctx->base.pc_next + insn_bytes + offset; + break; + case OPC_J: + case OPC_JAL: + case OPC_JALX: + /* Jump to immediate */ + btgt = ((ctx->base.pc_next + insn_bytes) & (int32_t)0xF0000000) | + (uint32_t)offset; + break; + case OPC_JR: + case OPC_JALR: + /* Jump to register */ + if (offset != 0 && offset != 16) { + /* + * Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the + * others are reserved. + */ + MIPS_INVAL("jump hint"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + gen_load_gpr(tcg_ctx, tcg_ctx->btarget, rs); + break; + default: + MIPS_INVAL("branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + if (bcond_compute == 0) { + /* No condition to be computed */ + switch (opc) { + case OPC_BEQ: /* rx == rx */ + case OPC_BEQL: /* rx == rx likely */ + case OPC_BGEZ: /* 0 >= 0 */ + case OPC_BGEZL: /* 0 >= 0 likely */ + case OPC_BLEZ: /* 0 <= 0 */ + case OPC_BLEZL: /* 0 <= 0 likely */ + /* Always take */ + ctx->hflags |= MIPS_HFLAG_B; + break; + case OPC_BGEZAL: /* 0 >= 0 */ + case OPC_BGEZALL: /* 0 >= 0 likely */ + /* Always take and link */ + blink = 31; + ctx->hflags |= MIPS_HFLAG_B; + break; + case OPC_BNE: /* rx != rx */ + case OPC_BGTZ: /* 0 > 0 */ + case OPC_BLTZ: /* 0 < 0 */ + /* Treat as NOP. */ + goto out; + case OPC_BLTZAL: /* 0 < 0 */ + /* + * Handle as an unconditional branch to get correct delay + * slot checking. + */ + blink = 31; + btgt = ctx->base.pc_next + insn_bytes + delayslot_size; + ctx->hflags |= MIPS_HFLAG_B; + break; + case OPC_BLTZALL: /* 0 < 0 likely */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 8); + /* Skip the instruction in the delay slot */ + ctx->base.pc_next += 4; + goto out; + case OPC_BNEL: /* rx != rx likely */ + case OPC_BGTZL: /* 0 > 0 likely */ + case OPC_BLTZL: /* 0 < 0 likely */ + /* Skip the instruction in the delay slot */ + ctx->base.pc_next += 4; + goto out; + case OPC_J: + ctx->hflags |= MIPS_HFLAG_B; + break; + case OPC_JALX: + ctx->hflags |= MIPS_HFLAG_BX; + /* Fallthrough */ + case OPC_JAL: + blink = 31; + ctx->hflags |= MIPS_HFLAG_B; + break; + case OPC_JR: + ctx->hflags |= MIPS_HFLAG_BR; + break; + case OPC_JALR: + blink = rt; + ctx->hflags |= MIPS_HFLAG_BR; + break; + default: + MIPS_INVAL("branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + } else { + switch (opc) { + case OPC_BEQ: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, t0, t1); + goto not_likely; + case OPC_BEQL: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, t0, t1); + goto likely; + case OPC_BNE: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, t0, t1); + goto not_likely; + case OPC_BNEL: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, t0, t1); + goto likely; + case OPC_BGEZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); + goto not_likely; + case OPC_BGEZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); + goto likely; + case OPC_BGEZAL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); + blink = 31; + goto not_likely; + case OPC_BGEZALL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); + blink = 31; + goto likely; + case OPC_BGTZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->bcond, t0, 0); + goto not_likely; + case OPC_BGTZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->bcond, t0, 0); + goto likely; + case OPC_BLEZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, tcg_ctx->bcond, t0, 0); + goto not_likely; + case OPC_BLEZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, tcg_ctx->bcond, t0, 0); + goto likely; + case OPC_BLTZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); + goto not_likely; + case OPC_BLTZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); + goto likely; + case OPC_BPOSGE32: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 32); + goto not_likely; +#if defined(TARGET_MIPS64) + case OPC_BPOSGE64: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 64); + goto not_likely; +#endif + case OPC_BLTZAL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); + blink = 31; + not_likely: + ctx->hflags |= MIPS_HFLAG_BC; + break; + case OPC_BLTZALL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->bcond, t0, 0); + blink = 31; + likely: + ctx->hflags |= MIPS_HFLAG_BL; + break; + default: + MIPS_INVAL("conditional branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + } + + ctx->btarget = btgt; + + switch (delayslot_size) { + case 2: + ctx->hflags |= MIPS_HFLAG_BDS16; + break; + case 4: + ctx->hflags |= MIPS_HFLAG_BDS32; + break; + } + + if (blink > 0) { + int post_delay = insn_bytes + delayslot_size; + int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16); + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[blink], + ctx->base.pc_next + post_delay + lowbit); + } + + out: + if (insn_bytes == 2) { + ctx->hflags |= MIPS_HFLAG_B16; + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + + +/* nanoMIPS Branches */ +static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc, + int insn_bytes, + int rs, int rt, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong btgt = -1; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + /* Load needed operands */ + switch (opc) { + case OPC_BEQ: + case OPC_BNE: + /* Compare two registers */ + if (rs != rt) { + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + bcond_compute = 1; + } + btgt = ctx->base.pc_next + insn_bytes + offset; + break; + case OPC_BGEZAL: + /* Compare to zero */ + if (rs != 0) { + gen_load_gpr(tcg_ctx, t0, rs); + bcond_compute = 1; + } + btgt = ctx->base.pc_next + insn_bytes + offset; + break; + case OPC_BPOSGE32: + tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_dspctrl, 0x3F); + bcond_compute = 1; + btgt = ctx->base.pc_next + insn_bytes + offset; + break; + case OPC_JR: + case OPC_JALR: + /* Jump to register */ + if (offset != 0 && offset != 16) { + /* + * Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the + * others are reserved. + */ + MIPS_INVAL("jump hint"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + gen_load_gpr(tcg_ctx, tcg_ctx->btarget, rs); + break; + default: + MIPS_INVAL("branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + if (bcond_compute == 0) { + /* No condition to be computed */ + switch (opc) { + case OPC_BEQ: /* rx == rx */ + /* Always take */ + ctx->hflags |= MIPS_HFLAG_B; + break; + case OPC_BGEZAL: /* 0 >= 0 */ + /* Always take and link */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], + ctx->base.pc_next + insn_bytes); + ctx->hflags |= MIPS_HFLAG_B; + break; + case OPC_BNE: /* rx != rx */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 8); + /* Skip the instruction in the delay slot */ + ctx->base.pc_next += 4; + goto out; + case OPC_JR: + ctx->hflags |= MIPS_HFLAG_BR; + break; + case OPC_JALR: + if (rt > 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], + ctx->base.pc_next + insn_bytes); + } + ctx->hflags |= MIPS_HFLAG_BR; + break; + default: + MIPS_INVAL("branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + } else { + switch (opc) { + case OPC_BEQ: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, t0, t1); + goto not_likely; + case OPC_BNE: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, t0, t1); + goto not_likely; + case OPC_BGEZAL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 0); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], + ctx->base.pc_next + insn_bytes); + goto not_likely; + case OPC_BPOSGE32: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->bcond, t0, 32); + not_likely: + ctx->hflags |= MIPS_HFLAG_BC; + break; + default: + MIPS_INVAL("conditional branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + } + + ctx->btarget = btgt; + + out: + if (insn_bytes == 2) { + ctx->hflags |= MIPS_HFLAG_B16; + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + + +/* special3 bitfield operations */ +static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt, + int rs, int lsb, int msb) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + switch (opc) { + case OPC_EXT: + if (lsb + msb > 31) { + goto fail; + } + if (msb != 31) { + tcg_gen_extract_tl(tcg_ctx, t0, t1, lsb, msb + 1); + } else { + /* + * The two checks together imply that lsb == 0, + * so this is a simple sign-extension. + */ + tcg_gen_ext32s_tl(tcg_ctx, t0, t1); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DEXTU: + lsb += 32; + goto do_dext; + case OPC_DEXTM: + msb += 32; + goto do_dext; + case OPC_DEXT: + do_dext: + if (lsb + msb > 63) { + goto fail; + } + tcg_gen_extract_tl(tcg_ctx, t0, t1, lsb, msb + 1); + break; +#endif + case OPC_INS: + if (lsb > msb) { + goto fail; + } + gen_load_gpr(tcg_ctx, t0, rt); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + break; +#if defined(TARGET_MIPS64) + case OPC_DINSU: + lsb += 32; + /* FALLTHRU */ + case OPC_DINSM: + msb += 32; + /* FALLTHRU */ + case OPC_DINS: + if (lsb > msb) { + goto fail; + } + gen_load_gpr(tcg_ctx, t0, rt); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); + break; +#endif + default: +fail: + MIPS_INVAL("bitops"); + generate_exception_end(ctx, EXCP_RI); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + return; + } + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rt); + switch (op2) { + case OPC_WSBH: + { + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_const_tl(tcg_ctx, 0x00FF00FF); + + tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_gen_and_tl(tcg_ctx, t0, t0, t2); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + } + break; + case OPC_SEB: + tcg_gen_ext8s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; + case OPC_SEH: + tcg_gen_ext16s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; +#if defined(TARGET_MIPS64) + case OPC_DSBH: + { + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_const_tl(tcg_ctx, 0x00FF00FF00FF00FFULL); + + tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_gen_and_tl(tcg_ctx, t0, t0, t2); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + } + break; + case OPC_DSHD: + { + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_const_tl(tcg_ctx, 0x0000FFFF0000FFFFULL); + + tcg_gen_shri_tl(tcg_ctx, t1, t0, 16); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_gen_and_tl(tcg_ctx, t0, t0, t2); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 16); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_gen_shri_tl(tcg_ctx, t1, t0, 32); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 32); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + } + break; +#endif + default: + MIPS_INVAL("bsfhl"); + generate_exception_end(ctx, EXCP_RI); + tcg_temp_free(tcg_ctx, t0); + return; + } + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_lsa(DisasContext *ctx, int opc, int rd, int rs, int rt, + int imm2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv t1; + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_shli_tl(tcg_ctx, t0, t0, imm2 + 1); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0, t1); + if (opc == OPC_LSA) { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rd]); + } + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + + return; +} + +static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, + int rt, int bits) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(tcg_ctx); + if (bits == 0 || bits == wordsz) { + if (bits == 0) { + gen_load_gpr(tcg_ctx, t0, rt); + } else { + gen_load_gpr(tcg_ctx, t0, rs); + } + switch (wordsz) { + case 32: + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; +#if defined(TARGET_MIPS64) + case 64: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; +#endif + } + } else { + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rt); + gen_load_gpr(tcg_ctx, t1, rs); + switch (wordsz) { + case 32: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_tl_i64(tcg_ctx, t2, t1, t0); + tcg_gen_shri_i64(tcg_ctx, t2, t2, 32 - bits); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_gpr[rd], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; +#if defined(TARGET_MIPS64) + case 64: + tcg_gen_shli_tl(tcg_ctx, t0, t0, bits); + tcg_gen_shri_tl(tcg_ctx, t1, t1, 64 - bits); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1, t0); + break; +#endif + } + tcg_temp_free(tcg_ctx, t1); + } + + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, + int bp) +{ + gen_align_bits(ctx, wordsz, rd, rs, rt, bp * 8); +} + +static void gen_ext(DisasContext *ctx, int wordsz, int rd, int rs, int rt, + int shift) +{ + gen_align_bits(ctx, wordsz, rd, rs, rt, wordsz - shift); +} + +static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rt); + switch (opc) { + case OPC_BITSWAP: + gen_helper_bitswap(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; +#if defined(TARGET_MIPS64) + case OPC_DBITSWAP: + gen_helper_dbitswap(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); +} + +/* CP0 (MMU and control) */ +static inline void gen_mthc0_entrylo(TCGContext *tcg_ctx, TCGv arg, target_ulong off) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_tl_i64(tcg_ctx, t0, arg); + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); +#if defined(TARGET_MIPS64) + tcg_gen_deposit_i64(tcg_ctx, t1, t1, t0, 30, 32); +#else + tcg_gen_concat32_i64(tcg_ctx, t1, t1, t0); +#endif + tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static inline void gen_mthc0_store64(TCGContext *tcg_ctx, TCGv arg, target_ulong off) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_tl_i64(tcg_ctx, t0, arg); + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); + tcg_gen_concat32_i64(tcg_ctx, t1, t1, t0); + tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, off); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static inline void gen_mfhc0_entrylo(TCGContext *tcg_ctx, TCGv arg, target_ulong off) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, off); +#if defined(TARGET_MIPS64) + tcg_gen_shri_i64(tcg_ctx, t0, t0, 30); +#else + tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); +#endif + gen_move_low32(tcg_ctx, arg, t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static inline void gen_mfhc0_load64(TCGContext *tcg_ctx, TCGv arg, target_ulong off, int shift) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, off); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 32 + shift); + gen_move_low32(tcg_ctx, arg, t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static inline void gen_mfc0_load32(TCGContext *tcg_ctx, TCGv arg, target_ulong off) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); + tcg_gen_ext_i32_tl(tcg_ctx, arg, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static inline void gen_mfc0_load64(TCGContext *tcg_ctx, TCGv arg, target_ulong off) +{ + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, off); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); +} + +static inline void gen_mtc0_store32(TCGContext *tcg_ctx, TCGv arg, target_ulong off) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg); + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); + tcg_temp_free_i32(tcg_ctx, t0); +} + +#define CP0_CHECK(c) \ + do { \ + if (!(c)) { \ + goto cp0_unimplemented; \ + } \ + } while (0) + +static void gen_mfhc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + //const char *register_name = "invalid"; + + switch (reg) { + case CP0_REGISTER_02: + switch (sel) { + case 0: + CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); + gen_mfhc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo0)); + //register_name = "EntryLo0"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_03: + switch (sel) { + case CP0_REG03__ENTRYLO1: + CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); + gen_mfhc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo1)); + //register_name = "EntryLo1"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_09: + switch (sel) { + case CP0_REG09__SAAR: + CP0_CHECK(ctx->saar); + gen_helper_mfhc0_saar(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "SAAR"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_17: + switch (sel) { + case CP0_REG17__LLADDR: + gen_mfhc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_LLAddr), + ctx->CP0_LLAddr_shift); + //register_name = "LLAddr"; + break; + case CP0_REG17__MAAR: + CP0_CHECK(ctx->mrp); + gen_helper_mfhc0_maar(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MAAR"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_19: + switch (sel) { + case CP0_REG19__WATCHHI0: + case CP0_REG19__WATCHHI1: + case CP0_REG19__WATCHHI2: + case CP0_REG19__WATCHHI3: + case CP0_REG19__WATCHHI4: + case CP0_REG19__WATCHHI5: + case CP0_REG19__WATCHHI6: + case CP0_REG19__WATCHHI7: + /* upper 32 bits are only available when Config5MI != 0 */ + CP0_CHECK(ctx->mi); + gen_mfhc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_WatchHi[sel]), 0); + //register_name = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_28: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_mfhc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagLo), 0); + //register_name = "TagLo"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + return; + +cp0_unimplemented: + // qemu_log_mask(LOG_UNIMP, "mfhc0 %s (reg %d sel %d)\n", + // register_name, reg, sel); + tcg_gen_movi_tl(tcg_ctx, arg, 0); +} + +static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + //const char *register_name = "invalid"; + uint64_t mask = ctx->PAMask >> 36; + + switch (reg) { + case CP0_REGISTER_02: + switch (sel) { + case 0: + CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); + tcg_gen_andi_tl(tcg_ctx, arg, arg, mask); + gen_mthc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo0)); + //register_name = "EntryLo0"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_03: + switch (sel) { + case CP0_REG03__ENTRYLO1: + CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA); + tcg_gen_andi_tl(tcg_ctx, arg, arg, mask); + gen_mthc0_entrylo(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_EntryLo1)); + //register_name = "EntryLo1"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_09: + switch (sel) { + case CP0_REG09__SAAR: + CP0_CHECK(ctx->saar); + gen_helper_mthc0_saar(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SAAR"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_17: + switch (sel) { + case CP0_REG17__LLADDR: + /* + * LLAddr is read-only (the only exception is bit 0 if LLB is + * supported); the CP0_LLAddr_rw_bitmask does not seem to be + * relevant for modern MIPS cores supporting MTHC0, therefore + * treating MTHC0 to LLAddr as NOP. + */ + //register_name = "LLAddr"; + break; + case CP0_REG17__MAAR: + CP0_CHECK(ctx->mrp); + gen_helper_mthc0_maar(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MAAR"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_19: + switch (sel) { + case CP0_REG19__WATCHHI0: + case CP0_REG19__WATCHHI1: + case CP0_REG19__WATCHHI2: + case CP0_REG19__WATCHHI3: + case CP0_REG19__WATCHHI4: + case CP0_REG19__WATCHHI5: + case CP0_REG19__WATCHHI6: + case CP0_REG19__WATCHHI7: + /* upper 32 bits are only available when Config5MI != 0 */ + CP0_CHECK(ctx->mi); + gen_helper_0e1i(mthc0_watchhi, arg, sel); + //register_name = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_28: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + tcg_gen_andi_tl(tcg_ctx, arg, arg, mask); + gen_mthc0_store64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); + //register_name = "TagLo"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + +cp0_unimplemented: + //qemu_log_mask(LOG_UNIMP, "mthc0 %s (reg %d sel %d)\n", + // register_name, reg, sel); + return; +} + +static inline void gen_mfc0_unimplemented(DisasContext *ctx, TCGv arg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->insn_flags & ISA_MIPS32R6) { + tcg_gen_movi_tl(tcg_ctx, arg, 0); + } else { + tcg_gen_movi_tl(tcg_ctx, arg, ~0); + } +} + +static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + //const char *register_name = "invalid"; + + if (sel != 0) { + check_insn(ctx, ISA_MIPS32); + } + + switch (reg) { + case CP0_REGISTER_00: + switch (sel) { + case CP0_REG00__INDEX: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Index)); + //register_name = "Index"; + break; + case CP0_REG00__MVPCONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MVPControl"; + break; + case CP0_REG00__MVPCONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MVPConf0"; + break; + case CP0_REG00__MVPCONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MVPConf1"; + break; + case CP0_REG00__VPCONTROL: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPControl)); + //register_name = "VPControl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_01: + switch (sel) { + case CP0_REG01__RANDOM: + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "Random"; + break; + case CP0_REG01__VPECONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); + //register_name = "VPEControl"; + break; + case CP0_REG01__VPECONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); + //register_name = "VPEConf0"; + break; + case CP0_REG01__VPECONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); + //register_name = "VPEConf1"; + break; + case CP0_REG01__YQMASK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_YQMask)); + //register_name = "YQMask"; + break; + case CP0_REG01__VPESCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPESchedule)); + //register_name = "VPESchedule"; + break; + case CP0_REG01__VPESCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load64(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); + //register_name = "VPEScheFBack"; + break; + case CP0_REG01__VPEOPT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); + //register_name = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_02: + switch (sel) { + case CP0_REG02__ENTRYLO0: + { + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_EntryLo0)); +#if defined(TARGET_MIPS64) + if (ctx->rxi) { + /* Move RI/XI fields to bits 31:30 */ + tcg_gen_shri_tl(tcg_ctx, arg, tmp, CP0EnLo_XI); + tcg_gen_deposit_tl(tcg_ctx, tmp, tmp, arg, 30, 2); + } +#endif + gen_move_low32(tcg_ctx, arg, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + } + //register_name = "EntryLo0"; + break; + case CP0_REG02__TCSTATUS: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCStatus"; + break; + case CP0_REG02__TCBIND: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCBind"; + break; + case CP0_REG02__TCRESTART: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCRestart"; + break; + case CP0_REG02__TCHALT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCHalt"; + break; + case CP0_REG02__TCCONTEXT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCContext"; + break; + case CP0_REG02__TCSCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCSchedule"; + break; + case CP0_REG02__TCSCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_03: + switch (sel) { + case CP0_REG03__ENTRYLO1: + { + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_EntryLo1)); +#if defined(TARGET_MIPS64) + if (ctx->rxi) { + /* Move RI/XI fields to bits 31:30 */ + tcg_gen_shri_tl(tcg_ctx, arg, tmp, CP0EnLo_XI); + tcg_gen_deposit_tl(tcg_ctx, tmp, tmp, arg, 30, 2); + } +#endif + gen_move_low32(tcg_ctx, arg, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + } + //register_name = "EntryLo1"; + break; + case CP0_REG03__GLOBALNUM: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_GlobalNumber)); + //register_name = "GlobalNumber"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_04: + switch (sel) { + case CP0_REG04__CONTEXT: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "Context"; + break; + case CP0_REG04__CONTEXTCONFIG: + /* SmartMIPS ASE */ + /* gen_helper_mfc0_contextconfig(tcg_ctx, arg); */ + //register_name = "ContextConfig"; + goto cp0_unimplemented; + case CP0_REG04__USERLOCAL: + CP0_CHECK(ctx->ulri); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "UserLocal"; + break; + case CP0_REG04__MMID: + CP0_CHECK(ctx->mi); + gen_helper_mtc0_memorymapid(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MMID"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_05: + switch (sel) { + case CP0_REG05__PAGEMASK: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); + //register_name = "PageMask"; + break; + case CP0_REG05__PAGEGRAIN: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); + //register_name = "PageGrain"; + break; + case CP0_REG05__SEGCTL0: + CP0_CHECK(ctx->sc); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "SegCtl0"; + break; + case CP0_REG05__SEGCTL1: + CP0_CHECK(ctx->sc); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "SegCtl1"; + break; + case CP0_REG05__SEGCTL2: + CP0_CHECK(ctx->sc); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "SegCtl2"; + break; + case CP0_REG05__PWBASE: + check_pw(ctx); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWBase)); + //register_name = "PWBase"; + break; + case CP0_REG05__PWFIELD: + check_pw(ctx); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWField)); + //register_name = "PWField"; + break; + case CP0_REG05__PWSIZE: + check_pw(ctx); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWSize)); + //register_name = "PWSize"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_06: + switch (sel) { + case CP0_REG06__WIRED: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); + //register_name = "Wired"; + break; + case CP0_REG06__SRSCONF0: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); + //register_name = "SRSConf0"; + break; + case CP0_REG06__SRSCONF1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); + //register_name = "SRSConf1"; + break; + case CP0_REG06__SRSCONF2: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); + //register_name = "SRSConf2"; + break; + case CP0_REG06__SRSCONF3: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); + //register_name = "SRSConf3"; + break; + case CP0_REG06__SRSCONF4: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); + //register_name = "SRSConf4"; + break; + case CP0_REG06__PWCTL: + check_pw(ctx); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWCtl)); + //register_name = "PWCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_07: + switch (sel) { + case CP0_REG07__HWRENA: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); + //register_name = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_08: + switch (sel) { + case CP0_REG08__BADVADDR: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "BadVAddr"; + break; + case CP0_REG08__BADINSTR: + CP0_CHECK(ctx->bi); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); + //register_name = "BadInstr"; + break; + case CP0_REG08__BADINSTRP: + CP0_CHECK(ctx->bp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); + //register_name = "BadInstrP"; + break; + case CP0_REG08__BADINSTRX: + CP0_CHECK(ctx->bi); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrX)); + tcg_gen_andi_tl(tcg_ctx, arg, arg, ~0xffff); + //register_name = "BadInstrX"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_09: + switch (sel) { + case CP0_REG09__COUNT: + /* Mark as an IO operation because we read the time. */ + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); + /* + * Break the TB to be able to take timer interrupts immediately + * after reading count. DISAS_STOP isn't sufficient, we need to + * ensure we break completely out of translated code. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Count"; + break; + case CP0_REG09__SAARI: + CP0_CHECK(ctx->saar); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SAARI)); + //register_name = "SAARI"; + break; + case CP0_REG09__SAAR: + CP0_CHECK(ctx->saar); + gen_helper_mfc0_saar(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "SAAR"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_10: + switch (sel) { + case CP0_REG10__ENTRYHI: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_11: + switch (sel) { + case CP0_REG11__COMPARE: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); + //register_name = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_12: + switch (sel) { + case CP0_REG12__STATUS: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Status)); + //register_name = "Status"; + break; + case CP0_REG12__INTCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); + //register_name = "IntCtl"; + break; + case CP0_REG12__SRSCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); + //register_name = "SRSCtl"; + break; + case CP0_REG12__SRSMAP: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + //register_name = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_13: + switch (sel) { + case CP0_REG13__CAUSE: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); + //register_name = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_14: + switch (sel) { + case CP0_REG14__EPC: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_15: + switch (sel) { + case CP0_REG15__PRID: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); + //register_name = "PRid"; + break; + case CP0_REG15__EBASE: + check_insn(ctx, ISA_MIPS32R2); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EBase)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "EBase"; + break; + case CP0_REG15__CMGCRBASE: + check_insn(ctx, ISA_MIPS32R2); + CP0_CHECK(ctx->cmgcr); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "CMGCRBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_16: + switch (sel) { + case CP0_REG16__CONFIG: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); + //register_name = "Config"; + break; + case CP0_REG16__CONFIG1: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); + //register_name = "Config1"; + break; + case CP0_REG16__CONFIG2: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); + //register_name = "Config2"; + break; + case CP0_REG16__CONFIG3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); + //register_name = "Config3"; + break; + case CP0_REG16__CONFIG4: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); + //register_name = "Config4"; + break; + case CP0_REG16__CONFIG5: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); + //register_name = "Config5"; + break; + /* 6,7 are implementation dependent */ + case CP0_REG16__CONFIG6: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); + //register_name = "Config6"; + break; + case CP0_REG16__CONFIG7: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); + //register_name = "Config7"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_17: + switch (sel) { + case CP0_REG17__LLADDR: + gen_helper_mfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "LLAddr"; + break; + case CP0_REG17__MAAR: + CP0_CHECK(ctx->mrp); + gen_helper_mfc0_maar(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MAAR"; + break; + case CP0_REG17__MAARI: + CP0_CHECK(ctx->mrp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MAARI)); + //register_name = "MAARI"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_18: + switch (sel) { + case CP0_REG18__WATCHLO0: + case CP0_REG18__WATCHLO1: + case CP0_REG18__WATCHLO2: + case CP0_REG18__WATCHLO3: + case CP0_REG18__WATCHLO4: + case CP0_REG18__WATCHLO5: + case CP0_REG18__WATCHLO6: + case CP0_REG18__WATCHLO7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_1e0i(mfc0_watchlo, arg, sel); + //register_name = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_19: + switch (sel) { + case CP0_REG19__WATCHHI0: + case CP0_REG19__WATCHHI1: + case CP0_REG19__WATCHHI2: + case CP0_REG19__WATCHHI3: + case CP0_REG19__WATCHHI4: + case CP0_REG19__WATCHHI5: + case CP0_REG19__WATCHHI6: + case CP0_REG19__WATCHHI7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_1e0i(mfc0_watchhi, arg, sel); + //register_name = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_20: + switch (sel) { + case CP0_REG20__XCONTEXT: +#if defined(TARGET_MIPS64) + check_insn(ctx, ISA_MIPS3); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "XContext"; + break; +#endif + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); + //register_name = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_22: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + //register_name = "'Diagnostic"; /* implementation dependent */ + break; + case CP0_REGISTER_23: + switch (sel) { + case CP0_REG23__DEBUG: + gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ + //register_name = "Debug"; + break; + case CP0_REG23__TRACECONTROL: + /* PDtrace support */ + /* gen_helper_mfc0_tracecontrol(tcg_ctx, arg); */ + //register_name = "TraceControl"; + goto cp0_unimplemented; + case CP0_REG23__TRACECONTROL2: + /* PDtrace support */ + /* gen_helper_mfc0_tracecontrol2(tcg_ctx, arg); */ + //register_name = "TraceControl2"; + goto cp0_unimplemented; + case CP0_REG23__USERTRACEDATA1: + /* PDtrace support */ + /* gen_helper_mfc0_usertracedata1(tcg_ctx, arg);*/ + //register_name = "UserTraceData1"; + goto cp0_unimplemented; + case CP0_REG23__TRACEIBPC: + /* PDtrace support */ + /* gen_helper_mfc0_traceibpc(tcg_ctx, arg); */ + //register_name = "TraceIBPC"; + goto cp0_unimplemented; + case CP0_REG23__TRACEDBPC: + /* PDtrace support */ + /* gen_helper_mfc0_tracedbpc(tcg_ctx, arg); */ + //register_name = "TraceDBPC"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_24: + switch (sel) { + case CP0_REG24__DEPC: + /* EJTAG support */ + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_25: + switch (sel) { + case CP0_REG25__PERFCTL0: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); + //register_name = "Performance0"; + break; + case CP0_REG25__PERFCNT0: + /* gen_helper_mfc0_performance1(tcg_ctx, arg); */ + //register_name = "Performance1"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL1: + /* gen_helper_mfc0_performance2(tcg_ctx, arg); */ + //register_name = "Performance2"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT1: + /* gen_helper_mfc0_performance3(tcg_ctx, arg); */ + //register_name = "Performance3"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL2: + /* gen_helper_mfc0_performance4(tcg_ctx, arg); */ + //register_name = "Performance4"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT2: + /* gen_helper_mfc0_performance5(tcg_ctx, arg); */ + //register_name = "Performance5"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL3: + /* gen_helper_mfc0_performance6(tcg_ctx, arg); */ + //register_name = "Performance6"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT3: + /* gen_helper_mfc0_performance7(tcg_ctx, arg); */ + //register_name = "Performance7"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_26: + switch (sel) { + case CP0_REG26__ERRCTL: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_ErrCtl)); + //register_name = "ErrCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_27: + switch (sel) { + case CP0_REG27__CACHERR: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + //register_name = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_28: + switch (sel) { + case CP0_REG28__TAGLO: + case CP0_REG28__TAGLO1: + case CP0_REG28__TAGLO2: + case CP0_REG28__TAGLO3: + { + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_TagLo)); + gen_move_low32(tcg_ctx, arg, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + } + //register_name = "TagLo"; + break; + case CP0_REG28__DATALO: + case CP0_REG28__DATALO1: + case CP0_REG28__DATALO2: + case CP0_REG28__DATALO3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); + //register_name = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_29: + switch (sel) { + case CP0_REG29__TAGHI: + case CP0_REG29__TAGHI1: + case CP0_REG29__TAGHI2: + case CP0_REG29__TAGHI3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); + //register_name = "TagHi"; + break; + case CP0_REG29__DATAHI: + case CP0_REG29__DATAHI1: + case CP0_REG29__DATAHI2: + case CP0_REG29__DATAHI3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); + //register_name = "DataHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_30: + switch (sel) { + case CP0_REG30__ERROREPC: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_31: + switch (sel) { + case CP0_REG31__DESAVE: + /* EJTAG support */ + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + //register_name = "DESAVE"; + break; + case CP0_REG31__KSCRATCH1: + case CP0_REG31__KSCRATCH2: + case CP0_REG31__KSCRATCH3: + case CP0_REG31__KSCRATCH4: + case CP0_REG31__KSCRATCH5: + case CP0_REG31__KSCRATCH6: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + //register_name = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + return; + +cp0_unimplemented: + //qemu_log_mask(LOG_UNIMP, "mfc0 %s (reg %d sel %d)\n", + //register_name, reg, sel); + gen_mfc0_unimplemented(ctx, arg); +} + +static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + //const char *register_name = "invalid"; + + if (sel != 0) { + check_insn(ctx, ISA_MIPS32); + } + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + + switch (reg) { + case CP0_REGISTER_00: + switch (sel) { + case CP0_REG00__INDEX: + gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Index"; + break; + case CP0_REG00__MVPCONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MVPControl"; + break; + case CP0_REG00__MVPCONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + //register_name = "MVPConf0"; + break; + case CP0_REG00__MVPCONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + //register_name = "MVPConf1"; + break; + case CP0_REG00__VPCONTROL: + CP0_CHECK(ctx->vp); + /* ignored */ + //register_name = "VPControl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_01: + switch (sel) { + case CP0_REG01__RANDOM: + /* ignored */ + //register_name = "Random"; + break; + case CP0_REG01__VPECONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEControl"; + break; + case CP0_REG01__VPECONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEConf0"; + break; + case CP0_REG01__VPECONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEConf1"; + break; + case CP0_REG01__YQMASK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "YQMask"; + break; + case CP0_REG01__VPESCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_VPESchedule)); + //register_name = "VPESchedule"; + break; + case CP0_REG01__VPESCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_VPEScheFBack)); + //register_name = "VPEScheFBack"; + break; + case CP0_REG01__VPEOPT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_02: + switch (sel) { + case CP0_REG02__ENTRYLO0: + gen_helper_mtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EntryLo0"; + break; + case CP0_REG02__TCSTATUS: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCStatus"; + break; + case CP0_REG02__TCBIND: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCBind"; + break; + case CP0_REG02__TCRESTART: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCRestart"; + break; + case CP0_REG02__TCHALT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCHalt"; + break; + case CP0_REG02__TCCONTEXT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCContext"; + break; + case CP0_REG02__TCSCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCSchedule"; + break; + case CP0_REG02__TCSCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_03: + switch (sel) { + case CP0_REG03__ENTRYLO1: + gen_helper_mtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EntryLo1"; + break; + case CP0_REG03__GLOBALNUM: + CP0_CHECK(ctx->vp); + /* ignored */ + //register_name = "GlobalNumber"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_04: + switch (sel) { + case CP0_REG04__CONTEXT: + gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Context"; + break; + case CP0_REG04__CONTEXTCONFIG: + /* SmartMIPS ASE */ + /* gen_helper_mtc0_contextconfig(tcg_ctx, arg); */ + //register_name = "ContextConfig"; + goto cp0_unimplemented; + case CP0_REG04__USERLOCAL: + CP0_CHECK(ctx->ulri); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + //register_name = "UserLocal"; + break; + case CP0_REG04__MMID: + CP0_CHECK(ctx->mi); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MemoryMapID)); + //register_name = "MMID"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_05: + switch (sel) { + case CP0_REG05__PAGEMASK: + gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PageMask"; + break; + case CP0_REG05__PAGEGRAIN: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PageGrain"; + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG05__SEGCTL0: + CP0_CHECK(ctx->sc); + gen_helper_mtc0_segctl0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SegCtl0"; + break; + case CP0_REG05__SEGCTL1: + CP0_CHECK(ctx->sc); + gen_helper_mtc0_segctl1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SegCtl1"; + break; + case CP0_REG05__SEGCTL2: + CP0_CHECK(ctx->sc); + gen_helper_mtc0_segctl2(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SegCtl2"; + break; + case CP0_REG05__PWBASE: + check_pw(ctx); + gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWBase)); + //register_name = "PWBase"; + break; + case CP0_REG05__PWFIELD: + check_pw(ctx); + gen_helper_mtc0_pwfield(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PWField"; + break; + case CP0_REG05__PWSIZE: + check_pw(ctx); + gen_helper_mtc0_pwsize(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PWSize"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_06: + switch (sel) { + case CP0_REG06__WIRED: + gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Wired"; + break; + case CP0_REG06__SRSCONF0: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf0"; + break; + case CP0_REG06__SRSCONF1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf1"; + break; + case CP0_REG06__SRSCONF2: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf2"; + break; + case CP0_REG06__SRSCONF3: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf3"; + break; + case CP0_REG06__SRSCONF4: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf4"; + break; + case CP0_REG06__PWCTL: + check_pw(ctx); + gen_helper_mtc0_pwctl(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PWCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_07: + switch (sel) { + case CP0_REG07__HWRENA: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); + ctx->base.is_jmp = DISAS_STOP; + //register_name = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_08: + switch (sel) { + case CP0_REG08__BADVADDR: + /* ignored */ + //register_name = "BadVAddr"; + break; + case CP0_REG08__BADINSTR: + /* ignored */ + //register_name = "BadInstr"; + break; + case CP0_REG08__BADINSTRP: + /* ignored */ + //register_name = "BadInstrP"; + break; + case CP0_REG08__BADINSTRX: + /* ignored */ + //register_name = "BadInstrX"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_09: + switch (sel) { + case CP0_REG09__COUNT: + gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Count"; + break; + case CP0_REG09__SAARI: + CP0_CHECK(ctx->saar); + gen_helper_mtc0_saari(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SAARI"; + break; + case CP0_REG09__SAAR: + CP0_CHECK(ctx->saar); + gen_helper_mtc0_saar(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SAAR"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_10: + switch (sel) { + case CP0_REG10__ENTRYHI: + gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_11: + switch (sel) { + case CP0_REG11__COMPARE: + gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_12: + switch (sel) { + case CP0_REG12__STATUS: + save_cpu_state(ctx, 1); + gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); + /* DISAS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Status"; + break; + case CP0_REG12__INTCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "IntCtl"; + break; + case CP0_REG12__SRSCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "SRSCtl"; + break; + case CP0_REG12__SRSMAP: + check_insn(ctx, ISA_MIPS32R2); + gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_13: + switch (sel) { + case CP0_REG13__CAUSE: + save_cpu_state(ctx, 1); + gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); + /* + * Stop translation as we may have triggered an interrupt. + * DISAS_STOP isn't sufficient, we need to ensure we break out of + * translated code to check for pending interrupts. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_14: + switch (sel) { + case CP0_REG14__EPC: + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); + //register_name = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_15: + switch (sel) { + case CP0_REG15__PRID: + /* ignored */ + //register_name = "PRid"; + break; + case CP0_REG15__EBASE: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_16: + switch (sel) { + case CP0_REG16__CONFIG: + gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG16__CONFIG1: + /* ignored, read only */ + //register_name = "Config1"; + break; + case CP0_REG16__CONFIG2: + gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config2"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG16__CONFIG3: + gen_helper_mtc0_config3(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config3"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG16__CONFIG4: + gen_helper_mtc0_config4(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config4"; + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG16__CONFIG5: + gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config5"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + /* 6,7 are implementation dependent */ + case CP0_REG16__CONFIG6: + /* ignored */ + //register_name = "Config6"; + break; + case CP0_REG16__CONFIG7: + /* ignored */ + //register_name = "Config7"; + break; + default: + //register_name = "Invalid config selector"; + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_17: + switch (sel) { + case CP0_REG17__LLADDR: + gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "LLAddr"; + break; + case CP0_REG17__MAAR: + CP0_CHECK(ctx->mrp); + gen_helper_mtc0_maar(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MAAR"; + break; + case CP0_REG17__MAARI: + CP0_CHECK(ctx->mrp); + gen_helper_mtc0_maari(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MAARI"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_18: + switch (sel) { + case CP0_REG18__WATCHLO0: + case CP0_REG18__WATCHLO1: + case CP0_REG18__WATCHLO2: + case CP0_REG18__WATCHLO3: + case CP0_REG18__WATCHLO4: + case CP0_REG18__WATCHLO5: + case CP0_REG18__WATCHLO6: + case CP0_REG18__WATCHLO7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_0e1i(mtc0_watchlo, arg, sel); + //register_name = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_19: + switch (sel) { + case CP0_REG19__WATCHHI0: + case CP0_REG19__WATCHHI1: + case CP0_REG19__WATCHHI2: + case CP0_REG19__WATCHHI3: + case CP0_REG19__WATCHHI4: + case CP0_REG19__WATCHHI5: + case CP0_REG19__WATCHHI6: + case CP0_REG19__WATCHHI7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_0e1i(mtc0_watchhi, arg, sel); + //register_name = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_20: + switch (sel) { + case CP0_REG20__XCONTEXT: +#if defined(TARGET_MIPS64) + check_insn(ctx, ISA_MIPS3); + gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "XContext"; + break; +#endif + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_22: + /* ignored */ + //register_name = "Diagnostic"; /* implementation dependent */ + break; + case CP0_REGISTER_23: + switch (sel) { + case CP0_REG23__DEBUG: + gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ + /* DISAS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Debug"; + break; + case CP0_REG23__TRACECONTROL: + /* PDtrace support */ + /* gen_helper_mtc0_tracecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "TraceControl"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + goto cp0_unimplemented; + case CP0_REG23__TRACECONTROL2: + /* PDtrace support */ + /* gen_helper_mtc0_tracecontrol2(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "TraceControl2"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + goto cp0_unimplemented; + case CP0_REG23__USERTRACEDATA1: + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + /* PDtrace support */ + /* gen_helper_mtc0_usertracedata1(tcg_ctx, tcg_ctx->cpu_env, arg);*/ + //register_name = "UserTraceData"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + goto cp0_unimplemented; + case CP0_REG23__TRACEIBPC: + /* PDtrace support */ + /* gen_helper_mtc0_traceibpc(tcg_ctxtcg_ctx, ->cpu_env, arg); */ + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "TraceIBPC"; + goto cp0_unimplemented; + case CP0_REG23__TRACEDBPC: + /* PDtrace support */ + /* gen_helper_mtc0_tracedbpc(tcg_ctx, tcg_ctx->cpu_env, arg); */ + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "TraceDBPC"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_24: + switch (sel) { + case CP0_REG24__DEPC: + /* EJTAG support */ + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); + //register_name = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_25: + switch (sel) { + case CP0_REG25__PERFCTL0: + gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Performance0"; + break; + case CP0_REG25__PERFCNT0: + /* gen_helper_mtc0_performance1(tcg_ctx, arg); */ + //register_name = "Performance1"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL1: + /* gen_helper_mtc0_performance2(tcg_ctx, arg); */ + //register_name = "Performance2"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT1: + /* gen_helper_mtc0_performance3(tcg_ctx, arg); */ + //register_name = "Performance3"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL2: + /* gen_helper_mtc0_performance4(tcg_ctx, arg); */ + //register_name = "Performance4"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT2: + /* gen_helper_mtc0_performance5(tcg_ctx, arg); */ + //register_name = "Performance5"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL3: + /* gen_helper_mtc0_performance6(tcg_ctx, arg); */ + //register_name = "Performance6"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT3: + /* gen_helper_mtc0_performance7(tcg_ctx, arg); */ + //register_name = "Performance7"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_26: + switch (sel) { + case CP0_REG26__ERRCTL: + gen_helper_mtc0_errctl(tcg_ctx, tcg_ctx->cpu_env, arg); + ctx->base.is_jmp = DISAS_STOP; + //register_name = "ErrCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_27: + switch (sel) { + case CP0_REG27__CACHERR: + /* ignored */ + //register_name = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_28: + switch (sel) { + case CP0_REG28__TAGLO: + case CP0_REG28__TAGLO1: + case CP0_REG28__TAGLO2: + case CP0_REG28__TAGLO3: + gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TagLo"; + break; + case CP0_REG28__DATALO: + case CP0_REG28__DATALO1: + case CP0_REG28__DATALO2: + case CP0_REG28__DATALO3: + gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_29: + switch (sel) { + case CP0_REG29__TAGHI: + case CP0_REG29__TAGHI1: + case CP0_REG29__TAGHI2: + case CP0_REG29__TAGHI3: + gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TagHi"; + break; + case CP0_REG29__DATAHI: + case CP0_REG29__DATAHI1: + case CP0_REG29__DATAHI2: + case CP0_REG29__DATAHI3: + gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "DataHi"; + break; + default: + //register_name = "invalid sel"; + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_30: + switch (sel) { + case CP0_REG30__ERROREPC: + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); + //register_name = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_31: + switch (sel) { + case CP0_REG31__DESAVE: + /* EJTAG support */ + gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + //register_name = "DESAVE"; + break; + case CP0_REG31__KSCRATCH1: + case CP0_REG31__KSCRATCH2: + case CP0_REG31__KSCRATCH3: + case CP0_REG31__KSCRATCH4: + case CP0_REG31__KSCRATCH5: + case CP0_REG31__KSCRATCH6: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); + //register_name = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + + /* For simplicity assume that all writes can cause interrupts. */ + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + /* + * DISAS_STOP isn't sufficient, we need to ensure we break out of + * translated code to check for pending interrupts. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + } + return; + +cp0_unimplemented: + //qemu_log_mask(LOG_UNIMP, "mtc0 %s (reg %d sel %d)\n", + //register_name, reg, sel); + return; +} + +#if defined(TARGET_MIPS64) +static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + //const char *register_name = "invalid"; + + if (sel != 0) { + check_insn(ctx, ISA_MIPS64); + } + + switch (reg) { + case CP0_REGISTER_00: + switch (sel) { + case CP0_REG00__INDEX: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Index)); + //register_name = "Index"; + break; + case CP0_REG00__MVPCONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MVPControl"; + break; + case CP0_REG00__MVPCONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MVPConf0"; + break; + case CP0_REG00__MVPCONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MVPConf1"; + break; + case CP0_REG00__VPCONTROL: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPControl)); + //register_name = "VPControl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_01: + switch (sel) { + case CP0_REG01__RANDOM: + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "Random"; + break; + case CP0_REG01__VPECONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); + //register_name = "VPEControl"; + break; + case CP0_REG01__VPECONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); + //register_name = "VPEConf0"; + break; + case CP0_REG01__VPECONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); + //register_name = "VPEConf1"; + break; + case CP0_REG01__YQMASK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_YQMask)); + //register_name = "YQMask"; + break; + case CP0_REG01__VPESCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_VPESchedule)); + //register_name = "VPESchedule"; + break; + case CP0_REG01__VPESCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_VPEScheFBack)); + //register_name = "VPEScheFBack"; + break; + case CP0_REG01__VPEOPT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); + //register_name = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_02: + switch (sel) { + case CP0_REG02__ENTRYLO0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_EntryLo0)); + //register_name = "EntryLo0"; + break; + case CP0_REG02__TCSTATUS: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCStatus"; + break; + case CP0_REG02__TCBIND: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCBind"; + break; + case CP0_REG02__TCRESTART: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCRestart"; + break; + case CP0_REG02__TCHALT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCHalt"; + break; + case CP0_REG02__TCCONTEXT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCContext"; + break; + case CP0_REG02__TCSCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCSchedule"; + break; + case CP0_REG02__TCSCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_03: + switch (sel) { + case CP0_REG03__ENTRYLO1: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); + //register_name = "EntryLo1"; + break; + case CP0_REG03__GLOBALNUM: + CP0_CHECK(ctx->vp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_GlobalNumber)); + //register_name = "GlobalNumber"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_04: + switch (sel) { + case CP0_REG04__CONTEXT: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); + //register_name = "Context"; + break; + case CP0_REG04__CONTEXTCONFIG: + /* SmartMIPS ASE */ + /* gen_helper_dmfc0_contextconfig(tcg_ctx, arg); */ + //register_name = "ContextConfig"; + goto cp0_unimplemented; + case CP0_REG04__USERLOCAL: + CP0_CHECK(ctx->ulri); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + //register_name = "UserLocal"; + break; + case CP0_REG04__MMID: + CP0_CHECK(ctx->mi); + gen_helper_mtc0_memorymapid(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MMID"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_05: + switch (sel) { + case CP0_REG05__PAGEMASK: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); + //register_name = "PageMask"; + break; + case CP0_REG05__PAGEGRAIN: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); + //register_name = "PageGrain"; + break; + case CP0_REG05__SEGCTL0: + CP0_CHECK(ctx->sc); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0)); + //register_name = "SegCtl0"; + break; + case CP0_REG05__SEGCTL1: + CP0_CHECK(ctx->sc); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1)); + //register_name = "SegCtl1"; + break; + case CP0_REG05__SEGCTL2: + CP0_CHECK(ctx->sc); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2)); + //register_name = "SegCtl2"; + break; + case CP0_REG05__PWBASE: + check_pw(ctx); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWBase)); + //register_name = "PWBase"; + break; + case CP0_REG05__PWFIELD: + check_pw(ctx); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWField)); + //register_name = "PWField"; + break; + case CP0_REG05__PWSIZE: + check_pw(ctx); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWSize)); + //register_name = "PWSize"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_06: + switch (sel) { + case CP0_REG06__WIRED: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); + //register_name = "Wired"; + break; + case CP0_REG06__SRSCONF0: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); + //register_name = "SRSConf0"; + break; + case CP0_REG06__SRSCONF1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); + //register_name = "SRSConf1"; + break; + case CP0_REG06__SRSCONF2: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); + //register_name = "SRSConf2"; + break; + case CP0_REG06__SRSCONF3: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); + //register_name = "SRSConf3"; + break; + case CP0_REG06__SRSCONF4: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); + //register_name = "SRSConf4"; + break; + case CP0_REG06__PWCTL: + check_pw(ctx); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PWCtl)); + //register_name = "PWCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_07: + switch (sel) { + case CP0_REG07__HWRENA: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); + //register_name = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_08: + switch (sel) { + case CP0_REG08__BADVADDR: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); + //register_name = "BadVAddr"; + break; + case CP0_REG08__BADINSTR: + CP0_CHECK(ctx->bi); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); + //register_name = "BadInstr"; + break; + case CP0_REG08__BADINSTRP: + CP0_CHECK(ctx->bp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); + //register_name = "BadInstrP"; + break; + case CP0_REG08__BADINSTRX: + CP0_CHECK(ctx->bi); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrX)); + tcg_gen_andi_tl(tcg_ctx, arg, arg, ~0xffff); + //register_name = "BadInstrX"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_09: + switch (sel) { + case CP0_REG09__COUNT: + /* Mark as an IO operation because we read the time. */ + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); + /* + * Break the TB to be able to take timer interrupts immediately + * after reading count. DISAS_STOP isn't sufficient, we need to + * ensure we break completely out of translated code. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Count"; + break; + case CP0_REG09__SAARI: + CP0_CHECK(ctx->saar); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SAARI)); + //register_name = "SAARI"; + break; + case CP0_REG09__SAAR: + CP0_CHECK(ctx->saar); + gen_helper_dmfc0_saar(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "SAAR"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_10: + switch (sel) { + case CP0_REG10__ENTRYHI: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); + //register_name = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_11: + switch (sel) { + case CP0_REG11__COMPARE: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); + //register_name = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_12: + switch (sel) { + case CP0_REG12__STATUS: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Status)); + //register_name = "Status"; + break; + case CP0_REG12__INTCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); + //register_name = "IntCtl"; + break; + case CP0_REG12__SRSCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); + //register_name = "SRSCtl"; + break; + case CP0_REG12__SRSMAP: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + //register_name = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_13: + switch (sel) { + case CP0_REG13__CAUSE: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); + //register_name = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_14: + switch (sel) { + case CP0_REG14__EPC: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); + //register_name = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_15: + switch (sel) { + case CP0_REG15__PRID: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); + //register_name = "PRid"; + break; + case CP0_REG15__EBASE: + check_insn(ctx, ISA_MIPS32R2); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EBase)); + //register_name = "EBase"; + break; + case CP0_REG15__CMGCRBASE: + check_insn(ctx, ISA_MIPS32R2); + CP0_CHECK(ctx->cmgcr); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase)); + //register_name = "CMGCRBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_16: + switch (sel) { + case CP0_REG16__CONFIG: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); + //register_name = "Config"; + break; + case CP0_REG16__CONFIG1: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); + //register_name = "Config1"; + break; + case CP0_REG16__CONFIG2: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); + //register_name = "Config2"; + break; + case CP0_REG16__CONFIG3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); + //register_name = "Config3"; + break; + case CP0_REG16__CONFIG4: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); + //register_name = "Config4"; + break; + case CP0_REG16__CONFIG5: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); + //register_name = "Config5"; + break; + /* 6,7 are implementation dependent */ + case CP0_REG16__CONFIG6: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); + //register_name = "Config6"; + break; + case CP0_REG16__CONFIG7: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); + //register_name = "Config7"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_17: + switch (sel) { + case CP0_REG17__LLADDR: + gen_helper_dmfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "LLAddr"; + break; + case CP0_REG17__MAAR: + CP0_CHECK(ctx->mrp); + gen_helper_dmfc0_maar(tcg_ctx, arg, tcg_ctx->cpu_env); + //register_name = "MAAR"; + break; + case CP0_REG17__MAARI: + CP0_CHECK(ctx->mrp); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MAARI)); + //register_name = "MAARI"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_18: + switch (sel) { + case CP0_REG18__WATCHLO0: + case CP0_REG18__WATCHLO1: + case CP0_REG18__WATCHLO2: + case CP0_REG18__WATCHLO3: + case CP0_REG18__WATCHLO4: + case CP0_REG18__WATCHLO5: + case CP0_REG18__WATCHLO6: + case CP0_REG18__WATCHLO7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_1e0i(dmfc0_watchlo, arg, sel); + //register_name = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_19: + switch (sel) { + case CP0_REG19__WATCHHI0: + case CP0_REG19__WATCHHI1: + case CP0_REG19__WATCHHI2: + case CP0_REG19__WATCHHI3: + case CP0_REG19__WATCHHI4: + case CP0_REG19__WATCHHI5: + case CP0_REG19__WATCHHI6: + case CP0_REG19__WATCHHI7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_1e0i(dmfc0_watchhi, arg, sel); + //register_name = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_20: + switch (sel) { + case CP0_REG20__XCONTEXT: + check_insn(ctx, ISA_MIPS3); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); + //register_name = "XContext"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); + //register_name = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_22: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + //register_name = "'Diagnostic"; /* implementation dependent */ + break; + case CP0_REGISTER_23: + switch (sel) { + case CP0_REG23__DEBUG: + gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ + //register_name = "Debug"; + break; + case CP0_REG23__TRACECONTROL: + /* PDtrace support */ + /* gen_helper_dmfc0_tracecontrol(tcg_ctx, arg, tcg_ctx->cpu_env); */ + //register_name = "TraceControl"; + goto cp0_unimplemented; + case CP0_REG23__TRACECONTROL2: + /* PDtrace support */ + /* gen_helper_dmfc0_tracecontrol2(tcg_ctx, arg, tcg_ctx->cpu_env); */ + //register_name = "TraceControl2"; + goto cp0_unimplemented; + case CP0_REG23__USERTRACEDATA1: + /* PDtrace support */ + /* gen_helper_dmfc0_usertracedata1(tcg_ctx, arg, tcg_ctx->cpu_env);*/ + //register_name = "UserTraceData1"; + goto cp0_unimplemented; + case CP0_REG23__TRACEIBPC: + /* PDtrace support */ + /* gen_helper_dmfc0_traceibpc(tcg_ctx, arg, tcg_ctx->cpu_env); */ + //register_name = "TraceIBPC"; + goto cp0_unimplemented; + case CP0_REG23__TRACEDBPC: + /* PDtrace support */ + /* gen_helper_dmfc0_tracedbpc(tcg_ctx, arg, tcg_ctx->cpu_env); */ + //register_name = "TraceDBPC"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_24: + switch (sel) { + case CP0_REG24__DEPC: + /* EJTAG support */ + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); + //register_name = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_25: + switch (sel) { + case CP0_REG25__PERFCTL0: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); + //register_name = "Performance0"; + break; + case CP0_REG25__PERFCNT0: + /* gen_helper_dmfc0_performance1(tcg_ctx, arg); */ + //register_name = "Performance1"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL1: + /* gen_helper_dmfc0_performance2(tcg_ctx, arg); */ + //register_name = "Performance2"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT1: + /* gen_helper_dmfc0_performance3(tcg_ctx, arg); */ + //register_name = "Performance3"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL2: + /* gen_helper_dmfc0_performance4(tcg_ctx, arg); */ + //register_name = "Performance4"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT2: + /* gen_helper_dmfc0_performance5(tcg_ctx, arg); */ + //register_name = "Performance5"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL3: + /* gen_helper_dmfc0_performance6(tcg_ctx, arg); */ + //register_name = "Performance6"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT3: + /* gen_helper_dmfc0_performance7(tcg_ctx, arg); */ + //register_name = "Performance7"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_26: + switch (sel) { + case CP0_REG26__ERRCTL: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_ErrCtl)); + //register_name = "ErrCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_27: + switch (sel) { + /* ignored */ + case CP0_REG27__CACHERR: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + //register_name = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_28: + switch (sel) { + case CP0_REG28__TAGLO: + case CP0_REG28__TAGLO1: + case CP0_REG28__TAGLO2: + case CP0_REG28__TAGLO3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); + //register_name = "TagLo"; + break; + case CP0_REG28__DATALO: + case CP0_REG28__DATALO1: + case CP0_REG28__DATALO2: + case CP0_REG28__DATALO3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); + //register_name = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_29: + switch (sel) { + case CP0_REG29__TAGHI: + case CP0_REG29__TAGHI1: + case CP0_REG29__TAGHI2: + case CP0_REG29__TAGHI3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); + //register_name = "TagHi"; + break; + case CP0_REG29__DATAHI: + case CP0_REG29__DATAHI1: + case CP0_REG29__DATAHI2: + case CP0_REG29__DATAHI3: + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); + //register_name = "DataHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_30: + switch (sel) { + case CP0_REG30__ERROREPC: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); + //register_name = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_31: + switch (sel) { + case CP0_REG31__DESAVE: + /* EJTAG support */ + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + //register_name = "DESAVE"; + break; + case CP0_REG31__KSCRATCH1: + case CP0_REG31__KSCRATCH2: + case CP0_REG31__KSCRATCH3: + case CP0_REG31__KSCRATCH4: + case CP0_REG31__KSCRATCH5: + case CP0_REG31__KSCRATCH6: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); + //register_name = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + return; + +cp0_unimplemented: + //qemu_log_mask(LOG_UNIMP, "dmfc0 %s (reg %d sel %d)\n", + //register_name, reg, sel); + gen_mfc0_unimplemented(ctx, arg); +} + +static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + //const char *register_name = "invalid"; + + if (sel != 0) { + check_insn(ctx, ISA_MIPS64); + } + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + + switch (reg) { + case CP0_REGISTER_00: + switch (sel) { + case CP0_REG00__INDEX: + gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Index"; + break; + case CP0_REG00__MVPCONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MVPControl"; + break; + case CP0_REG00__MVPCONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + //register_name = "MVPConf0"; + break; + case CP0_REG00__MVPCONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + //register_name = "MVPConf1"; + break; + case CP0_REG00__VPCONTROL: + CP0_CHECK(ctx->vp); + /* ignored */ + //register_name = "VPControl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_01: + switch (sel) { + case CP0_REG01__RANDOM: + /* ignored */ + //register_name = "Random"; + break; + case CP0_REG01__VPECONTROL: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEControl"; + break; + case CP0_REG01__VPECONF0: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEConf0"; + break; + case CP0_REG01__VPECONF1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEConf1"; + break; + case CP0_REG01__YQMASK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "YQMask"; + break; + case CP0_REG01__VPESCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_VPESchedule)); + //register_name = "VPESchedule"; + break; + case CP0_REG01__VPESCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_VPEScheFBack)); + //register_name = "VPEScheFBack"; + break; + case CP0_REG01__VPEOPT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_02: + switch (sel) { + case CP0_REG02__ENTRYLO0: + gen_helper_dmtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EntryLo0"; + break; + case CP0_REG02__TCSTATUS: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCStatus"; + break; + case CP0_REG02__TCBIND: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCBind"; + break; + case CP0_REG02__TCRESTART: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCRestart"; + break; + case CP0_REG02__TCHALT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCHalt"; + break; + case CP0_REG02__TCCONTEXT: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCContext"; + break; + case CP0_REG02__TCSCHEDULE: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCSchedule"; + break; + case CP0_REG02__TCSCHEFBACK: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_03: + switch (sel) { + case CP0_REG03__ENTRYLO1: + gen_helper_dmtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EntryLo1"; + break; + case CP0_REG03__GLOBALNUM: + CP0_CHECK(ctx->vp); + /* ignored */ + //register_name = "GlobalNumber"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_04: + switch (sel) { + case CP0_REG04__CONTEXT: + gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Context"; + break; + case CP0_REG04__CONTEXTCONFIG: + /* SmartMIPS ASE */ + /* gen_helper_dmtc0_contextconfig(tcg_ctx, arg); */ + //register_name = "ContextConfig"; + goto cp0_unimplemented; + case CP0_REG04__USERLOCAL: + CP0_CHECK(ctx->ulri); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + //register_name = "UserLocal"; + break; + case CP0_REG04__MMID: + CP0_CHECK(ctx->mi); + gen_mfc0_load32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_MemoryMapID)); + //register_name = "MMID"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_05: + switch (sel) { + case CP0_REG05__PAGEMASK: + gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PageMask"; + break; + case CP0_REG05__PAGEGRAIN: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PageGrain"; + break; + case CP0_REG05__SEGCTL0: + CP0_CHECK(ctx->sc); + gen_helper_mtc0_segctl0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SegCtl0"; + break; + case CP0_REG05__SEGCTL1: + CP0_CHECK(ctx->sc); + gen_helper_mtc0_segctl1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SegCtl1"; + break; + case CP0_REG05__SEGCTL2: + CP0_CHECK(ctx->sc); + gen_helper_mtc0_segctl2(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SegCtl2"; + break; + case CP0_REG05__PWBASE: + check_pw(ctx); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_PWBase)); + //register_name = "PWBase"; + break; + case CP0_REG05__PWFIELD: + check_pw(ctx); + gen_helper_mtc0_pwfield(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PWField"; + break; + case CP0_REG05__PWSIZE: + check_pw(ctx); + gen_helper_mtc0_pwsize(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PWSize"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_06: + switch (sel) { + case CP0_REG06__WIRED: + gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Wired"; + break; + case CP0_REG06__SRSCONF0: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf0"; + break; + case CP0_REG06__SRSCONF1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf1"; + break; + case CP0_REG06__SRSCONF2: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf2"; + break; + case CP0_REG06__SRSCONF3: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf3"; + break; + case CP0_REG06__SRSCONF4: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SRSConf4"; + break; + case CP0_REG06__PWCTL: + check_pw(ctx); + gen_helper_mtc0_pwctl(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "PWCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_07: + switch (sel) { + case CP0_REG07__HWRENA: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); + ctx->base.is_jmp = DISAS_STOP; + //register_name = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_08: + switch (sel) { + case CP0_REG08__BADVADDR: + /* ignored */ + //register_name = "BadVAddr"; + break; + case CP0_REG08__BADINSTR: + /* ignored */ + //register_name = "BadInstr"; + break; + case CP0_REG08__BADINSTRP: + /* ignored */ + //register_name = "BadInstrP"; + break; + case CP0_REG08__BADINSTRX: + /* ignored */ + //register_name = "BadInstrX"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_09: + switch (sel) { + case CP0_REG09__COUNT: + gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Count"; + break; + case CP0_REG09__SAARI: + CP0_CHECK(ctx->saar); + gen_helper_mtc0_saari(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SAARI"; + break; + case CP0_REG09__SAAR: + CP0_CHECK(ctx->saar); + gen_helper_mtc0_saar(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "SAAR"; + break; + default: + goto cp0_unimplemented; + } + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REGISTER_10: + switch (sel) { + case CP0_REG10__ENTRYHI: + gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_11: + switch (sel) { + case CP0_REG11__COMPARE: + gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REGISTER_12: + switch (sel) { + case CP0_REG12__STATUS: + save_cpu_state(ctx, 1); + gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); + /* DISAS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Status"; + break; + case CP0_REG12__INTCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "IntCtl"; + break; + case CP0_REG12__SRSCTL: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "SRSCtl"; + break; + case CP0_REG12__SRSMAP: + check_insn(ctx, ISA_MIPS32R2); + gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_13: + switch (sel) { + case CP0_REG13__CAUSE: + save_cpu_state(ctx, 1); + gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); + /* + * Stop translation as we may have triggered an interrupt. + * DISAS_STOP isn't sufficient, we need to ensure we break out of + * translated code to check for pending interrupts. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_14: + switch (sel) { + case CP0_REG14__EPC: + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); + //register_name = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_15: + switch (sel) { + case CP0_REG15__PRID: + /* ignored */ + //register_name = "PRid"; + break; + case CP0_REG15__EBASE: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "EBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_16: + switch (sel) { + case CP0_REG16__CONFIG: + gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG16__CONFIG1: + /* ignored, read only */ + //register_name = "Config1"; + break; + case CP0_REG16__CONFIG2: + gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config2"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG16__CONFIG3: + gen_helper_mtc0_config3(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config3"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + case CP0_REG16__CONFIG4: + /* currently ignored */ + //register_name = "Config4"; + break; + case CP0_REG16__CONFIG5: + gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Config5"; + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + break; + /* 6,7 are implementation dependent */ + default: + //register_name = "Invalid config selector"; + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_17: + switch (sel) { + case CP0_REG17__LLADDR: + gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "LLAddr"; + break; + case CP0_REG17__MAAR: + CP0_CHECK(ctx->mrp); + gen_helper_mtc0_maar(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MAAR"; + break; + case CP0_REG17__MAARI: + CP0_CHECK(ctx->mrp); + gen_helper_mtc0_maari(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "MAARI"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_18: + switch (sel) { + case CP0_REG18__WATCHLO0: + case CP0_REG18__WATCHLO1: + case CP0_REG18__WATCHLO2: + case CP0_REG18__WATCHLO3: + case CP0_REG18__WATCHLO4: + case CP0_REG18__WATCHLO5: + case CP0_REG18__WATCHLO6: + case CP0_REG18__WATCHLO7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_0e1i(mtc0_watchlo, arg, sel); + //register_name = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_19: + switch (sel) { + case CP0_REG19__WATCHHI0: + case CP0_REG19__WATCHHI1: + case CP0_REG19__WATCHHI2: + case CP0_REG19__WATCHHI3: + case CP0_REG19__WATCHHI4: + case CP0_REG19__WATCHHI5: + case CP0_REG19__WATCHHI6: + case CP0_REG19__WATCHHI7: + CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR)); + gen_helper_0e1i(mtc0_watchhi, arg, sel); + //register_name = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_20: + switch (sel) { + case CP0_REG20__XCONTEXT: + check_insn(ctx, ISA_MIPS3); + gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "XContext"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_22: + /* ignored */ + //register_name = "Diagnostic"; /* implementation dependent */ + break; + case CP0_REGISTER_23: + switch (sel) { + case CP0_REG23__DEBUG: + gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ + /* DISAS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + //register_name = "Debug"; + break; + case CP0_REG23__TRACECONTROL: + /* PDtrace support */ + /* gen_helper_mtc0_tracecontroltcg_ctx, (tcg_ctx->cpu_env, arg); */ + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "TraceControl"; + goto cp0_unimplemented; + case CP0_REG23__TRACECONTROL2: + /* PDtrace support */ + /* gen_helper_mtc0_tracecontrol2(tcg_ctx, tcg_ctx->cpu_env, arg); */ + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "TraceControl2"; + goto cp0_unimplemented; + case CP0_REG23__USERTRACEDATA1: + /* PDtrace support */ + /* gen_helper_mtc0_usertracedata1(tcg_ctx, tcg_ctx->cpu_env, arg);*/ + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "UserTraceData1"; + goto cp0_unimplemented; + case CP0_REG23__TRACEIBPC: + /* PDtrace support */ + /* gen_helper_mtc0_traceibpc(tcg_ctx, tcg_ctx->cpu_env, arg); */ + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "TraceIBPC"; + goto cp0_unimplemented; + case CP0_REG23__TRACEDBPC: + /* PDtrace support */ + /* gen_helper_mtc0_tracedbpc(tcg_ctx, tcg_ctx->cpu_env, arg); */ + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + //register_name = "TraceDBPC"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_24: + switch (sel) { + case CP0_REG24__DEPC: + /* EJTAG support */ + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); + //register_name = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_25: + switch (sel) { + case CP0_REG25__PERFCTL0: + gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "Performance0"; + break; + case CP0_REG25__PERFCNT0: + /* gen_helper_mtc0_performance1(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "Performance1"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL1: + /* gen_helper_mtc0_performance2(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "Performance2"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT1: + /* gen_helper_mtc0_performance3(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "Performance3"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL2: + /* gen_helper_mtc0_performance4(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "Performance4"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT2: + /* gen_helper_mtc0_performance5(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "Performance5"; + goto cp0_unimplemented; + case CP0_REG25__PERFCTL3: + /* gen_helper_mtc0_performance6(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "Performance6"; + goto cp0_unimplemented; + case CP0_REG25__PERFCNT3: + /* gen_helper_mtc0_performance7(tcg_ctx, tcg_ctx->cpu_env, arg); */ + //register_name = "Performance7"; + goto cp0_unimplemented; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_26: + switch (sel) { + case CP0_REG26__ERRCTL: + gen_helper_mtc0_errctl(tcg_ctx, tcg_ctx->cpu_env, arg); + ctx->base.is_jmp = DISAS_STOP; + //register_name = "ErrCtl"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_27: + switch (sel) { + case CP0_REG27__CACHERR: + /* ignored */ + //register_name = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_28: + switch (sel) { + case CP0_REG28__TAGLO: + case CP0_REG28__TAGLO1: + case CP0_REG28__TAGLO2: + case CP0_REG28__TAGLO3: + gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TagLo"; + break; + case CP0_REG28__DATALO: + case CP0_REG28__DATALO1: + case CP0_REG28__DATALO2: + case CP0_REG28__DATALO3: + gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_29: + switch (sel) { + case CP0_REG29__TAGHI: + case CP0_REG29__TAGHI1: + case CP0_REG29__TAGHI2: + case CP0_REG29__TAGHI3: + gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "TagHi"; + break; + case CP0_REG29__DATAHI: + case CP0_REG29__DATAHI1: + case CP0_REG29__DATAHI2: + case CP0_REG29__DATAHI3: + gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); + //register_name = "DataHi"; + break; + default: + //register_name = "invalid sel"; + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_30: + switch (sel) { + case CP0_REG30__ERROREPC: + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); + //register_name = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case CP0_REGISTER_31: + switch (sel) { + case CP0_REG31__DESAVE: + /* EJTAG support */ + gen_mtc0_store32(tcg_ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + //register_name = "DESAVE"; + break; + case CP0_REG31__KSCRATCH1: + case CP0_REG31__KSCRATCH2: + case CP0_REG31__KSCRATCH3: + case CP0_REG31__KSCRATCH4: + case CP0_REG31__KSCRATCH5: + case CP0_REG31__KSCRATCH6: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel - 2])); + //register_name = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + + /* For simplicity assume that all writes can cause interrupts. */ + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + /* + * DISAS_STOP isn't sufficient, we need to ensure we break out of + * translated code to check for pending interrupts. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + } + return; + +cp0_unimplemented: + //qemu_log_mask(LOG_UNIMP, "dmtc0 %s (reg %d sel %d)\n", + //register_name, reg, sel); + return; +} +#endif /* TARGET_MIPS64 */ + +static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, + int u, int sel, int h) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + TCGv t0 = tcg_temp_local_new(tcg_ctx); + + if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && + ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != + (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) { + tcg_gen_movi_tl(tcg_ctx, t0, -1); + } else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > + (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) { + tcg_gen_movi_tl(tcg_ctx, t0, -1); + } else if (u == 0) { + switch (rt) { + case 1: + switch (sel) { + case 1: + gen_helper_mftc0_vpecontrol(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_mftc0_vpeconf0(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 2: + switch (sel) { + case 1: + gen_helper_mftc0_tcstatus(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_mftc0_tcbind(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 3: + gen_helper_mftc0_tcrestart(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 4: + gen_helper_mftc0_tchalt(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 5: + gen_helper_mftc0_tccontext(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 6: + gen_helper_mftc0_tcschedule(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 7: + gen_helper_mftc0_tcschefback(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + break; + case 10: + switch (sel) { + case 0: + gen_helper_mftc0_entryhi(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + break; + case 12: + switch (sel) { + case 0: + gen_helper_mftc0_status(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + break; + case 13: + switch (sel) { + case 0: + gen_helper_mftc0_cause(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 14: + switch (sel) { + case 0: + gen_helper_mftc0_epc(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 15: + switch (sel) { + case 1: + gen_helper_mftc0_ebase(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 16: + switch (sel) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + gen_helper_mftc0_configx(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_const_tl(tcg_ctx, sel)); + break; + default: + goto die; + break; + } + break; + case 23: + switch (sel) { + case 0: + gen_helper_mftc0_debug(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + break; + default: + gen_mfc0(ctx, t0, rt, sel); + } + } else { + switch (sel) { + /* GPR registers. */ + case 0: + gen_helper_1e0i(mftgpr, t0, rt); + break; + /* Auxiliary CPU registers */ + case 1: + switch (rt) { + case 0: + gen_helper_1e0i(mftlo, t0, 0); + break; + case 1: + gen_helper_1e0i(mfthi, t0, 0); + break; + case 2: + gen_helper_1e0i(mftacx, t0, 0); + break; + case 4: + gen_helper_1e0i(mftlo, t0, 1); + break; + case 5: + gen_helper_1e0i(mfthi, t0, 1); + break; + case 6: + gen_helper_1e0i(mftacx, t0, 1); + break; + case 8: + gen_helper_1e0i(mftlo, t0, 2); + break; + case 9: + gen_helper_1e0i(mfthi, t0, 2); + break; + case 10: + gen_helper_1e0i(mftacx, t0, 2); + break; + case 12: + gen_helper_1e0i(mftlo, t0, 3); + break; + case 13: + gen_helper_1e0i(mfthi, t0, 3); + break; + case 14: + gen_helper_1e0i(mftacx, t0, 3); + break; + case 16: + gen_helper_mftdsp(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + } + break; + /* Floating point (COP1). */ + case 2: + /* XXX: For now we support only a single FPU context. */ + if (h == 0) { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, rt); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, rt); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case 3: + /* XXX: For now we support only a single FPU context. */ + gen_helper_1e0i(cfc1, t0, rt); + break; + /* COP2: Not implemented. */ + case 4: + case 5: + /* fall through */ + default: + goto die; + } + } + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + return; + +die: + tcg_temp_free(tcg_ctx, t0); + LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); + generate_exception_end(ctx, EXCP_RI); +} + +static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, + int u, int sel, int h) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + TCGv t0 = tcg_temp_local_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && + ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != + (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) { + /* NOP */ + ; + } else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > + (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) { + /* NOP */ + ; + } else if (u == 0) { + switch (rd) { + case 1: + switch (sel) { + case 1: + gen_helper_mttc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 2: + gen_helper_mttc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + break; + } + break; + case 2: + switch (sel) { + case 1: + gen_helper_mttc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 2: + gen_helper_mttc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 3: + gen_helper_mttc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 4: + gen_helper_mttc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 5: + gen_helper_mttc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 6: + gen_helper_mttc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 7: + gen_helper_mttc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + break; + case 10: + switch (sel) { + case 0: + gen_helper_mttc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + break; + case 12: + switch (sel) { + case 0: + gen_helper_mttc0_status(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + break; + case 13: + switch (sel) { + case 0: + gen_helper_mttc0_cause(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + break; + } + break; + case 15: + switch (sel) { + case 1: + gen_helper_mttc0_ebase(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + break; + } + break; + case 23: + switch (sel) { + case 0: + gen_helper_mttc0_debug(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + break; + default: + gen_mtc0(ctx, t0, rd, sel); + } + } else { + switch (sel) { + /* GPR registers. */ + case 0: + gen_helper_0e1i(mttgpr, t0, rd); + break; + /* Auxiliary CPU registers */ + case 1: + switch (rd) { + case 0: + gen_helper_0e1i(mttlo, t0, 0); + break; + case 1: + gen_helper_0e1i(mtthi, t0, 0); + break; + case 2: + gen_helper_0e1i(mttacx, t0, 0); + break; + case 4: + gen_helper_0e1i(mttlo, t0, 1); + break; + case 5: + gen_helper_0e1i(mtthi, t0, 1); + break; + case 6: + gen_helper_0e1i(mttacx, t0, 1); + break; + case 8: + gen_helper_0e1i(mttlo, t0, 2); + break; + case 9: + gen_helper_0e1i(mtthi, t0, 2); + break; + case 10: + gen_helper_0e1i(mttacx, t0, 2); + break; + case 12: + gen_helper_0e1i(mttlo, t0, 3); + break; + case 13: + gen_helper_0e1i(mtthi, t0, 3); + break; + case 14: + gen_helper_0e1i(mttacx, t0, 3); + break; + case 16: + gen_helper_mttdsp(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + } + break; + /* Floating point (COP1). */ + case 2: + /* XXX: For now we support only a single FPU context. */ + if (h == 0) { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32(ctx, fp0, rd); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32h(ctx, fp0, rd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case 3: + /* XXX: For now we support only a single FPU context. */ + { + TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, rd); + + gen_helper_0e2i(ctc1, t0, fs_tmp, rt); + tcg_temp_free_i32(tcg_ctx, fs_tmp); + } + /* Stop translation as we may have changed hflags */ + ctx->base.is_jmp = DISAS_STOP; + break; + /* COP2: Not implemented. */ + case 4: + case 5: + /* fall through */ + default: + goto die; + } + } + tcg_temp_free(tcg_ctx, t0); + return; + +die: + tcg_temp_free(tcg_ctx, t0); + LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); + generate_exception_end(ctx, EXCP_RI); +} + +static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc, + int rt, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "ldst"; + + check_cp0_enabled(ctx); + switch (opc) { + case OPC_MFC0: + if (rt == 0) { + /* Treat as NOP. */ + return; + } + gen_mfc0(ctx, tcg_ctx->cpu_gpr[rt], rd, ctx->opcode & 0x7); + opn = "mfc0"; + break; + case OPC_MTC0: + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7); + tcg_temp_free(tcg_ctx, t0); + } + opn = "mtc0"; + break; +#if defined(TARGET_MIPS64) + case OPC_DMFC0: + check_insn(ctx, ISA_MIPS3); + if (rt == 0) { + /* Treat as NOP. */ + return; + } + gen_dmfc0(ctx, tcg_ctx->cpu_gpr[rt], rd, ctx->opcode & 0x7); + opn = "dmfc0"; + break; + case OPC_DMTC0: + check_insn(ctx, ISA_MIPS3); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7); + tcg_temp_free(tcg_ctx, t0); + } + opn = "dmtc0"; + break; +#endif + case OPC_MFHC0: + check_mvh(ctx); + if (rt == 0) { + /* Treat as NOP. */ + return; + } + gen_mfhc0(ctx, tcg_ctx->cpu_gpr[rt], rd, ctx->opcode & 0x7); + opn = "mfhc0"; + break; + case OPC_MTHC0: + check_mvh(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rt); + gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7); + tcg_temp_free(tcg_ctx, t0); + } + opn = "mthc0"; + break; + case OPC_MFTR: + check_cp0_enabled(ctx); + if (rd == 0) { + /* Treat as NOP. */ + return; + } + gen_mftr(env, ctx, rt, rd, (ctx->opcode >> 5) & 1, + ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); + opn = "mftr"; + break; + case OPC_MTTR: + check_cp0_enabled(ctx); + gen_mttr(env, ctx, rd, rt, (ctx->opcode >> 5) & 1, + ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); + opn = "mttr"; + break; + case OPC_TLBWI: + opn = "tlbwi"; + if (!env->tlb->helper_tlbwi) { + goto die; + } + gen_helper_tlbwi(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_TLBINV: + opn = "tlbinv"; + if (ctx->ie >= 2) { + if (!env->tlb->helper_tlbinv) { + goto die; + } + gen_helper_tlbinv(tcg_ctx, tcg_ctx->cpu_env); + } /* treat as nop if TLBINV not supported */ + break; + case OPC_TLBINVF: + opn = "tlbinvf"; + if (ctx->ie >= 2) { + if (!env->tlb->helper_tlbinvf) { + goto die; + } + gen_helper_tlbinvf(tcg_ctx, tcg_ctx->cpu_env); + } /* treat as nop if TLBINV not supported */ + break; + case OPC_TLBWR: + opn = "tlbwr"; + if (!env->tlb->helper_tlbwr) { + goto die; + } + gen_helper_tlbwr(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_TLBP: + opn = "tlbp"; + if (!env->tlb->helper_tlbp) { + goto die; + } + gen_helper_tlbp(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_TLBR: + opn = "tlbr"; + if (!env->tlb->helper_tlbr) { + goto die; + } + gen_helper_tlbr(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_ERET: /* OPC_ERETNC */ + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + goto die; + } else { + int bit_shift = (ctx->hflags & MIPS_HFLAG_M16) ? 16 : 6; + if (ctx->opcode & (1 << bit_shift)) { + /* OPC_ERETNC */ + opn = "eretnc"; + check_insn(ctx, ISA_MIPS32R5); + gen_helper_eretnc(tcg_ctx, tcg_ctx->cpu_env); + } else { + /* OPC_ERET */ + opn = "eret"; + check_insn(ctx, ISA_MIPS2); + gen_helper_eret(tcg_ctx, tcg_ctx->cpu_env); + } + ctx->base.is_jmp = DISAS_EXIT; + } + break; + case OPC_DERET: + opn = "deret"; + check_insn(ctx, ISA_MIPS32); + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + goto die; + } + if (!(ctx->hflags & MIPS_HFLAG_DM)) { + MIPS_INVAL(opn); + generate_exception_end(ctx, EXCP_RI); + } else { + gen_helper_deret(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_EXIT; + } + break; + case OPC_WAIT: + opn = "wait"; + check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + goto die; + } + /* If we get an exception, we want to restart at next instruction */ + ctx->base.pc_next += 4; + save_cpu_state(ctx, 1); + ctx->base.pc_next -= 4; + gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + break; + default: + die: + MIPS_INVAL(opn); + generate_exception_end(ctx, EXCP_RI); + return; + } + (void)opn; /* avoid a compiler warning */ +} + +/* CP1 Branches (before delay slot) */ +static void gen_compute_branch1(DisasContext *ctx, uint32_t op, + int32_t cc, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong btarget; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + if (cc != 0) { + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); + } + + btarget = ctx->base.pc_next + 4 + offset; + + switch (op) { + case OPC_BC1F: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_not_i32(tcg_ctx, t0, t0); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + goto not_likely; + case OPC_BC1FL: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_not_i32(tcg_ctx, t0, t0); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + goto likely; + case OPC_BC1T: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + goto not_likely; + case OPC_BC1TL: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + likely: + ctx->hflags |= MIPS_HFLAG_BL; + break; + case OPC_BC1FANY2: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); + tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + } + goto not_likely; + case OPC_BC1TANY2: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + } + goto not_likely; + case OPC_BC1FANY4: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); + tcg_gen_and_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 2)); + tcg_gen_and_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 3)); + tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + } + goto not_likely; + case OPC_BC1TANY4: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 1)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 2)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc + 3)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, tcg_ctx->bcond, t0); + } + not_likely: + ctx->hflags |= MIPS_HFLAG_BC; + break; + default: + MIPS_INVAL("cp1 cond branch"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + ctx->btarget = btarget; + ctx->hflags |= MIPS_HFLAG_BDS32; + out: + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* R6 CP1 Branches */ +static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, + int32_t ft, int32_t offset, + int delayslot_size) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong btarget; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + + if (ctx->hflags & MIPS_HFLAG_BMASK) { +#ifdef MIPS_DEBUG_DISAS + LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx + "\n", ctx->base.pc_next); +#endif + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + gen_load_fpr64(ctx, t0, ft); + tcg_gen_andi_i64(tcg_ctx, t0, t0, 1); + + btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + + switch (op) { + case OPC_BC1EQZ: + tcg_gen_xori_i64(tcg_ctx, t0, t0, 1); + ctx->hflags |= MIPS_HFLAG_BC; + break; + case OPC_BC1NEZ: + /* t0 already set */ + ctx->hflags |= MIPS_HFLAG_BC; + break; + default: + MIPS_INVAL("cp1 cond branch"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + tcg_gen_trunc_i64_tl(tcg_ctx, tcg_ctx->bcond, t0); + + ctx->btarget = btarget; + + switch (delayslot_size) { + case 2: + ctx->hflags |= MIPS_HFLAG_BDS16; + break; + case 4: + ctx->hflags |= MIPS_HFLAG_BDS32; + break; + } + +out: + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* Coprocessor 1 (FPU) */ + +#define FOP(func, fmt) (((fmt) << 21) | (func)) + +enum fopcode { + OPC_ADD_S = FOP(0, FMT_S), + OPC_SUB_S = FOP(1, FMT_S), + OPC_MUL_S = FOP(2, FMT_S), + OPC_DIV_S = FOP(3, FMT_S), + OPC_SQRT_S = FOP(4, FMT_S), + OPC_ABS_S = FOP(5, FMT_S), + OPC_MOV_S = FOP(6, FMT_S), + OPC_NEG_S = FOP(7, FMT_S), + OPC_ROUND_L_S = FOP(8, FMT_S), + OPC_TRUNC_L_S = FOP(9, FMT_S), + OPC_CEIL_L_S = FOP(10, FMT_S), + OPC_FLOOR_L_S = FOP(11, FMT_S), + OPC_ROUND_W_S = FOP(12, FMT_S), + OPC_TRUNC_W_S = FOP(13, FMT_S), + OPC_CEIL_W_S = FOP(14, FMT_S), + OPC_FLOOR_W_S = FOP(15, FMT_S), + OPC_SEL_S = FOP(16, FMT_S), + OPC_MOVCF_S = FOP(17, FMT_S), + OPC_MOVZ_S = FOP(18, FMT_S), + OPC_MOVN_S = FOP(19, FMT_S), + OPC_SELEQZ_S = FOP(20, FMT_S), + OPC_RECIP_S = FOP(21, FMT_S), + OPC_RSQRT_S = FOP(22, FMT_S), + OPC_SELNEZ_S = FOP(23, FMT_S), + OPC_MADDF_S = FOP(24, FMT_S), + OPC_MSUBF_S = FOP(25, FMT_S), + OPC_RINT_S = FOP(26, FMT_S), + OPC_CLASS_S = FOP(27, FMT_S), + OPC_MIN_S = FOP(28, FMT_S), + OPC_RECIP2_S = FOP(28, FMT_S), + OPC_MINA_S = FOP(29, FMT_S), + OPC_RECIP1_S = FOP(29, FMT_S), + OPC_MAX_S = FOP(30, FMT_S), + OPC_RSQRT1_S = FOP(30, FMT_S), + OPC_MAXA_S = FOP(31, FMT_S), + OPC_RSQRT2_S = FOP(31, FMT_S), + OPC_CVT_D_S = FOP(33, FMT_S), + OPC_CVT_W_S = FOP(36, FMT_S), + OPC_CVT_L_S = FOP(37, FMT_S), + OPC_CVT_PS_S = FOP(38, FMT_S), + OPC_CMP_F_S = FOP(48, FMT_S), + OPC_CMP_UN_S = FOP(49, FMT_S), + OPC_CMP_EQ_S = FOP(50, FMT_S), + OPC_CMP_UEQ_S = FOP(51, FMT_S), + OPC_CMP_OLT_S = FOP(52, FMT_S), + OPC_CMP_ULT_S = FOP(53, FMT_S), + OPC_CMP_OLE_S = FOP(54, FMT_S), + OPC_CMP_ULE_S = FOP(55, FMT_S), + OPC_CMP_SF_S = FOP(56, FMT_S), + OPC_CMP_NGLE_S = FOP(57, FMT_S), + OPC_CMP_SEQ_S = FOP(58, FMT_S), + OPC_CMP_NGL_S = FOP(59, FMT_S), + OPC_CMP_LT_S = FOP(60, FMT_S), + OPC_CMP_NGE_S = FOP(61, FMT_S), + OPC_CMP_LE_S = FOP(62, FMT_S), + OPC_CMP_NGT_S = FOP(63, FMT_S), + + OPC_ADD_D = FOP(0, FMT_D), + OPC_SUB_D = FOP(1, FMT_D), + OPC_MUL_D = FOP(2, FMT_D), + OPC_DIV_D = FOP(3, FMT_D), + OPC_SQRT_D = FOP(4, FMT_D), + OPC_ABS_D = FOP(5, FMT_D), + OPC_MOV_D = FOP(6, FMT_D), + OPC_NEG_D = FOP(7, FMT_D), + OPC_ROUND_L_D = FOP(8, FMT_D), + OPC_TRUNC_L_D = FOP(9, FMT_D), + OPC_CEIL_L_D = FOP(10, FMT_D), + OPC_FLOOR_L_D = FOP(11, FMT_D), + OPC_ROUND_W_D = FOP(12, FMT_D), + OPC_TRUNC_W_D = FOP(13, FMT_D), + OPC_CEIL_W_D = FOP(14, FMT_D), + OPC_FLOOR_W_D = FOP(15, FMT_D), + OPC_SEL_D = FOP(16, FMT_D), + OPC_MOVCF_D = FOP(17, FMT_D), + OPC_MOVZ_D = FOP(18, FMT_D), + OPC_MOVN_D = FOP(19, FMT_D), + OPC_SELEQZ_D = FOP(20, FMT_D), + OPC_RECIP_D = FOP(21, FMT_D), + OPC_RSQRT_D = FOP(22, FMT_D), + OPC_SELNEZ_D = FOP(23, FMT_D), + OPC_MADDF_D = FOP(24, FMT_D), + OPC_MSUBF_D = FOP(25, FMT_D), + OPC_RINT_D = FOP(26, FMT_D), + OPC_CLASS_D = FOP(27, FMT_D), + OPC_MIN_D = FOP(28, FMT_D), + OPC_RECIP2_D = FOP(28, FMT_D), + OPC_MINA_D = FOP(29, FMT_D), + OPC_RECIP1_D = FOP(29, FMT_D), + OPC_MAX_D = FOP(30, FMT_D), + OPC_RSQRT1_D = FOP(30, FMT_D), + OPC_MAXA_D = FOP(31, FMT_D), + OPC_RSQRT2_D = FOP(31, FMT_D), + OPC_CVT_S_D = FOP(32, FMT_D), + OPC_CVT_W_D = FOP(36, FMT_D), + OPC_CVT_L_D = FOP(37, FMT_D), + OPC_CMP_F_D = FOP(48, FMT_D), + OPC_CMP_UN_D = FOP(49, FMT_D), + OPC_CMP_EQ_D = FOP(50, FMT_D), + OPC_CMP_UEQ_D = FOP(51, FMT_D), + OPC_CMP_OLT_D = FOP(52, FMT_D), + OPC_CMP_ULT_D = FOP(53, FMT_D), + OPC_CMP_OLE_D = FOP(54, FMT_D), + OPC_CMP_ULE_D = FOP(55, FMT_D), + OPC_CMP_SF_D = FOP(56, FMT_D), + OPC_CMP_NGLE_D = FOP(57, FMT_D), + OPC_CMP_SEQ_D = FOP(58, FMT_D), + OPC_CMP_NGL_D = FOP(59, FMT_D), + OPC_CMP_LT_D = FOP(60, FMT_D), + OPC_CMP_NGE_D = FOP(61, FMT_D), + OPC_CMP_LE_D = FOP(62, FMT_D), + OPC_CMP_NGT_D = FOP(63, FMT_D), + + OPC_CVT_S_W = FOP(32, FMT_W), + OPC_CVT_D_W = FOP(33, FMT_W), + OPC_CVT_S_L = FOP(32, FMT_L), + OPC_CVT_D_L = FOP(33, FMT_L), + OPC_CVT_PS_PW = FOP(38, FMT_W), + + OPC_ADD_PS = FOP(0, FMT_PS), + OPC_SUB_PS = FOP(1, FMT_PS), + OPC_MUL_PS = FOP(2, FMT_PS), + OPC_DIV_PS = FOP(3, FMT_PS), + OPC_ABS_PS = FOP(5, FMT_PS), + OPC_MOV_PS = FOP(6, FMT_PS), + OPC_NEG_PS = FOP(7, FMT_PS), + OPC_MOVCF_PS = FOP(17, FMT_PS), + OPC_MOVZ_PS = FOP(18, FMT_PS), + OPC_MOVN_PS = FOP(19, FMT_PS), + OPC_ADDR_PS = FOP(24, FMT_PS), + OPC_MULR_PS = FOP(26, FMT_PS), + OPC_RECIP2_PS = FOP(28, FMT_PS), + OPC_RECIP1_PS = FOP(29, FMT_PS), + OPC_RSQRT1_PS = FOP(30, FMT_PS), + OPC_RSQRT2_PS = FOP(31, FMT_PS), + + OPC_CVT_S_PU = FOP(32, FMT_PS), + OPC_CVT_PW_PS = FOP(36, FMT_PS), + OPC_CVT_S_PL = FOP(40, FMT_PS), + OPC_PLL_PS = FOP(44, FMT_PS), + OPC_PLU_PS = FOP(45, FMT_PS), + OPC_PUL_PS = FOP(46, FMT_PS), + OPC_PUU_PS = FOP(47, FMT_PS), + OPC_CMP_F_PS = FOP(48, FMT_PS), + OPC_CMP_UN_PS = FOP(49, FMT_PS), + OPC_CMP_EQ_PS = FOP(50, FMT_PS), + OPC_CMP_UEQ_PS = FOP(51, FMT_PS), + OPC_CMP_OLT_PS = FOP(52, FMT_PS), + OPC_CMP_ULT_PS = FOP(53, FMT_PS), + OPC_CMP_OLE_PS = FOP(54, FMT_PS), + OPC_CMP_ULE_PS = FOP(55, FMT_PS), + OPC_CMP_SF_PS = FOP(56, FMT_PS), + OPC_CMP_NGLE_PS = FOP(57, FMT_PS), + OPC_CMP_SEQ_PS = FOP(58, FMT_PS), + OPC_CMP_NGL_PS = FOP(59, FMT_PS), + OPC_CMP_LT_PS = FOP(60, FMT_PS), + OPC_CMP_NGE_PS = FOP(61, FMT_PS), + OPC_CMP_LE_PS = FOP(62, FMT_PS), + OPC_CMP_NGT_PS = FOP(63, FMT_PS), +}; + +enum r6_f_cmp_op { + R6_OPC_CMP_AF_S = FOP(0, FMT_W), + R6_OPC_CMP_UN_S = FOP(1, FMT_W), + R6_OPC_CMP_EQ_S = FOP(2, FMT_W), + R6_OPC_CMP_UEQ_S = FOP(3, FMT_W), + R6_OPC_CMP_LT_S = FOP(4, FMT_W), + R6_OPC_CMP_ULT_S = FOP(5, FMT_W), + R6_OPC_CMP_LE_S = FOP(6, FMT_W), + R6_OPC_CMP_ULE_S = FOP(7, FMT_W), + R6_OPC_CMP_SAF_S = FOP(8, FMT_W), + R6_OPC_CMP_SUN_S = FOP(9, FMT_W), + R6_OPC_CMP_SEQ_S = FOP(10, FMT_W), + R6_OPC_CMP_SEUQ_S = FOP(11, FMT_W), + R6_OPC_CMP_SLT_S = FOP(12, FMT_W), + R6_OPC_CMP_SULT_S = FOP(13, FMT_W), + R6_OPC_CMP_SLE_S = FOP(14, FMT_W), + R6_OPC_CMP_SULE_S = FOP(15, FMT_W), + R6_OPC_CMP_OR_S = FOP(17, FMT_W), + R6_OPC_CMP_UNE_S = FOP(18, FMT_W), + R6_OPC_CMP_NE_S = FOP(19, FMT_W), + R6_OPC_CMP_SOR_S = FOP(25, FMT_W), + R6_OPC_CMP_SUNE_S = FOP(26, FMT_W), + R6_OPC_CMP_SNE_S = FOP(27, FMT_W), + + R6_OPC_CMP_AF_D = FOP(0, FMT_L), + R6_OPC_CMP_UN_D = FOP(1, FMT_L), + R6_OPC_CMP_EQ_D = FOP(2, FMT_L), + R6_OPC_CMP_UEQ_D = FOP(3, FMT_L), + R6_OPC_CMP_LT_D = FOP(4, FMT_L), + R6_OPC_CMP_ULT_D = FOP(5, FMT_L), + R6_OPC_CMP_LE_D = FOP(6, FMT_L), + R6_OPC_CMP_ULE_D = FOP(7, FMT_L), + R6_OPC_CMP_SAF_D = FOP(8, FMT_L), + R6_OPC_CMP_SUN_D = FOP(9, FMT_L), + R6_OPC_CMP_SEQ_D = FOP(10, FMT_L), + R6_OPC_CMP_SEUQ_D = FOP(11, FMT_L), + R6_OPC_CMP_SLT_D = FOP(12, FMT_L), + R6_OPC_CMP_SULT_D = FOP(13, FMT_L), + R6_OPC_CMP_SLE_D = FOP(14, FMT_L), + R6_OPC_CMP_SULE_D = FOP(15, FMT_L), + R6_OPC_CMP_OR_D = FOP(17, FMT_L), + R6_OPC_CMP_UNE_D = FOP(18, FMT_L), + R6_OPC_CMP_NE_D = FOP(19, FMT_L), + R6_OPC_CMP_SOR_D = FOP(25, FMT_L), + R6_OPC_CMP_SUNE_D = FOP(26, FMT_L), + R6_OPC_CMP_SNE_D = FOP(27, FMT_L), +}; + +static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + + switch (opc) { + case OPC_MFC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_MTC1: + gen_load_gpr(tcg_ctx, t0, rt); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32(ctx, fp0, fs); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_CFC1: + gen_helper_1e0i(cfc1, t0, fs); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_CTC1: + gen_load_gpr(tcg_ctx, t0, rt); + save_cpu_state(ctx, 0); + { + TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, fs); + + gen_helper_0e2i(ctc1, t0, fs_tmp, rt); + tcg_temp_free_i32(tcg_ctx, fs_tmp); + } + /* Stop translation as we may have changed hflags */ + ctx->base.is_jmp = DISAS_STOP; + break; +#if defined(TARGET_MIPS64) + case OPC_DMFC1: + gen_load_fpr64(ctx, t0, fs); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_DMTC1: + gen_load_gpr(tcg_ctx, t0, rt); + gen_store_fpr64(ctx, t0, fs); + break; +#endif + case OPC_MFHC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_MTHC1: + gen_load_gpr(tcg_ctx, t0, rt); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32h(ctx, fp0, fs); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + default: + MIPS_INVAL("cp1 move"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + out: + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1; + TCGCond cond; + TCGv_i32 t0; + + if (rd == 0) { + /* Treat as NOP. */ + return; + } + + if (tf) { + cond = TCG_COND_EQ; + } else { + cond = TCG_COND_NE; + } + + l1 = gen_new_label(tcg_ctx); + t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + tcg_temp_free_i32(tcg_ctx, t0); + if (rs == 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } else { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } + gen_set_label(tcg_ctx, l1); +} + +static inline void gen_movcf_s(DisasContext *ctx, int fs, int fd, int cc, + int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + + if (tf) { + cond = TCG_COND_EQ; + } else { + cond = TCG_COND_NE; + } + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + gen_load_fpr32(ctx, t0, fs); + gen_store_fpr32(ctx, t0, fd); + gen_set_label(tcg_ctx, l1); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc, + int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp0; + TCGLabel *l1 = gen_new_label(tcg_ctx); + + if (tf) { + cond = TCG_COND_EQ; + } else { + cond = TCG_COND_NE; + } + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + tcg_temp_free_i32(tcg_ctx, t0); + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); +} + +static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, + int cc, int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + + if (tf) { + cond = TCG_COND_EQ; + } else { + cond = TCG_COND_NE; + } + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + gen_load_fpr32(ctx, t0, fs); + gen_store_fpr32(ctx, t0, fd); + gen_set_label(tcg_ctx, l1); + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1 << get_fp_bit(cc + 1)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l2); + gen_load_fpr32h(ctx, t0, fs); + gen_store_fpr32h(ctx, t0, fd); + tcg_temp_free_i32(tcg_ctx, t0); + gen_set_label(tcg_ctx, l2); +} + +static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, + int fs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fd); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fs); + + switch (op1) { + case OPC_SEL_S: + tcg_gen_andi_i32(tcg_ctx, fp0, fp0, 1); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); + break; + case OPC_SELEQZ_S: + tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); + break; + case OPC_SELNEZ_S: + tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); + break; + default: + MIPS_INVAL("gen_sel_s"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, + int fs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t1 = tcg_const_i64(tcg_ctx, 0); + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fd); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fs); + + switch (op1) { + case OPC_SEL_D: + tcg_gen_andi_i64(tcg_ctx, fp0, fp0, 1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); + break; + case OPC_SELEQZ_D: + tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); + break; + case OPC_SELNEZ_D: + tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); + break; + default: + MIPS_INVAL("gen_sel_d"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_farith(DisasContext *ctx, enum fopcode op1, + int ft, int fs, int fd, int cc) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t func = ctx->opcode & 0x3f; + switch (op1) { + case OPC_ADD_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_add_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_SUB_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_sub_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_MUL_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_mul_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_DIV_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_div_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_SQRT_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_sqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_ABS_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + if (ctx->abs2008) { + tcg_gen_andi_i32(tcg_ctx, fp0, fp0, 0x7fffffffUL); + } else { + gen_helper_float_abs_s(tcg_ctx, fp0, fp0); + } + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_MOV_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_NEG_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + if (ctx->abs2008) { + tcg_gen_xori_i32(tcg_ctx, fp0, fp0, 1UL << 31); + } else { + gen_helper_float_chs_s(tcg_ctx, fp0, fp0); + } + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_ROUND_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + if (ctx->nan2008) { + gen_helper_float_round_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } else { + gen_helper_float_round_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_TRUNC_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } else { + gen_helper_float_trunc_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_CEIL_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } else { + gen_helper_float_ceil_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_FLOOR_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + if (ctx->nan2008) { + gen_helper_float_floor_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } else { + gen_helper_float_floor_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_ROUND_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_round_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_round_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_TRUNC_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_trunc_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_CEIL_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_ceil_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_FLOOR_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_floor_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_floor_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_SEL_S: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_s(ctx, op1, fd, ft, fs); + break; + case OPC_SELEQZ_S: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_s(ctx, op1, fd, ft, fs); + break; + case OPC_SELNEZ_S: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_s(ctx, op1, fd, ft, fs); + break; + case OPC_MOVCF_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_movcf_s(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); + break; + case OPC_MOVZ_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv_i32 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[ft], 0, l1); + } + fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + break; + case OPC_MOVN_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv_i32 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[ft], 0, l1); + fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + } + break; + case OPC_RECIP_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_recip_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_RSQRT_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rsqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_MADDF_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fd); + gen_helper_float_maddf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_MSUBF_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fd); + gen_helper_float_msubf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_RINT_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rint_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_CLASS_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_class_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_MIN_S: /* OPC_RECIP2_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MIN_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_min_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + /* OPC_RECIP2_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_recip2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + } + break; + case OPC_MINA_S: /* OPC_RECIP1_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MINA_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_mina_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + /* OPC_RECIP1_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_recip1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + } + break; + case OPC_MAX_S: /* OPC_RSQRT1_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAX_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_max_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + /* OPC_RSQRT1_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rsqrt1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + } + break; + case OPC_MAXA_S: /* OPC_RSQRT2_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAXA_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_maxa_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + /* OPC_RSQRT2_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_rsqrt2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + } + break; + case OPC_CVT_D_S: + check_cp1_registers(ctx, fd); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvtd_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_CVT_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_cvt_w_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_CVT_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } else { + gen_helper_float_cvt_l_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + } + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_CVT_PS_S: + check_ps(ctx); + { + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i32 fp32_0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp32_1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp32_0, fs); + gen_load_fpr32(ctx, fp32_1, ft); + tcg_gen_concat_i32_i64(tcg_ctx, fp64, fp32_1, fp32_0); + tcg_temp_free_i32(tcg_ctx, fp32_1); + tcg_temp_free_i32(tcg_ctx, fp32_0); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_CMP_F_S: + case OPC_CMP_UN_S: + case OPC_CMP_EQ_S: + case OPC_CMP_UEQ_S: + case OPC_CMP_OLT_S: + case OPC_CMP_ULT_S: + case OPC_CMP_OLE_S: + case OPC_CMP_ULE_S: + case OPC_CMP_SF_S: + case OPC_CMP_NGLE_S: + case OPC_CMP_SEQ_S: + case OPC_CMP_NGL_S: + case OPC_CMP_LT_S: + case OPC_CMP_NGE_S: + case OPC_CMP_LE_S: + case OPC_CMP_NGT_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->opcode & (1 << 6)) { + gen_cmpabs_s(ctx, func - 48, ft, fs, cc); + } else { + gen_cmp_s(ctx, func - 48, ft, fs, cc); + } + break; + case OPC_ADD_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_add_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_SUB_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_sub_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MUL_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mul_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_DIV_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_div_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_SQRT_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_sqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_ABS_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + if (ctx->abs2008) { + tcg_gen_andi_i64(tcg_ctx, fp0, fp0, 0x7fffffffffffffffULL); + } else { + gen_helper_float_abs_d(tcg_ctx, fp0, fp0); + } + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MOV_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_NEG_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + if (ctx->abs2008) { + tcg_gen_xori_i64(tcg_ctx, fp0, fp0, 1ULL << 63); + } else { + gen_helper_float_chs_d(tcg_ctx, fp0, fp0); + } + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_ROUND_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_round_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_round_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_TRUNC_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_trunc_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_CEIL_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_ceil_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_FLOOR_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_floor_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_floor_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_ROUND_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + if (ctx->nan2008) { + gen_helper_float_round_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } else { + gen_helper_float_round_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + break; + case OPC_TRUNC_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + if (ctx->nan2008) { + gen_helper_float_trunc_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } else { + gen_helper_float_trunc_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + break; + case OPC_CEIL_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + if (ctx->nan2008) { + gen_helper_float_ceil_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } else { + gen_helper_float_ceil_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + break; + case OPC_FLOOR_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + if (ctx->nan2008) { + gen_helper_float_floor_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } else { + gen_helper_float_floor_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + break; + case OPC_SEL_D: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_d(ctx, op1, fd, ft, fs); + break; + case OPC_SELEQZ_D: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_d(ctx, op1, fd, ft, fs); + break; + case OPC_SELNEZ_D: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_d(ctx, op1, fd, ft, fs); + break; + case OPC_MOVCF_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_movcf_d(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); + break; + case OPC_MOVZ_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[ft], 0, l1); + } + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + break; + case OPC_MOVN_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[ft], 0, l1); + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + } + break; + case OPC_RECIP_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_recip_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_RSQRT_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rsqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MADDF_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fd); + gen_helper_float_maddf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MSUBF_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fd); + gen_helper_float_msubf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_RINT_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rint_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_CLASS_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_class_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MIN_D: /* OPC_RECIP2_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MIN_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_min_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + } else { + /* OPC_RECIP2_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_recip2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + } + break; + case OPC_MINA_D: /* OPC_RECIP1_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MINA_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mina_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + } else { + /* OPC_RECIP1_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_recip1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + } + break; + case OPC_MAX_D: /* OPC_RSQRT1_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAX_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_max_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + } else { + /* OPC_RSQRT1_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rsqrt1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + } + break; + case OPC_MAXA_D: /* OPC_RSQRT2_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAXA_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_maxa_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + } else { + /* OPC_RSQRT2_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_rsqrt2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + } + break; + case OPC_CMP_F_D: + case OPC_CMP_UN_D: + case OPC_CMP_EQ_D: + case OPC_CMP_UEQ_D: + case OPC_CMP_OLT_D: + case OPC_CMP_ULT_D: + case OPC_CMP_OLE_D: + case OPC_CMP_ULE_D: + case OPC_CMP_SF_D: + case OPC_CMP_NGLE_D: + case OPC_CMP_SEQ_D: + case OPC_CMP_NGL_D: + case OPC_CMP_LT_D: + case OPC_CMP_NGE_D: + case OPC_CMP_LE_D: + case OPC_CMP_NGT_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->opcode & (1 << 6)) { + gen_cmpabs_d(ctx, func - 48, ft, fs, cc); + } else { + gen_cmp_d(ctx, func - 48, ft, fs, cc); + } + break; + case OPC_CVT_S_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvts_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + break; + case OPC_CVT_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } else { + gen_helper_float_cvt_w_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + } + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + break; + case OPC_CVT_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + if (ctx->nan2008) { + gen_helper_float_cvt_2008_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } else { + gen_helper_float_cvt_l_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + } + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_CVT_S_W: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_cvts_w(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_CVT_D_W: + check_cp1_registers(ctx, fd); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvtd_w(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + break; + case OPC_CVT_S_L: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvts_l(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + break; + case OPC_CVT_D_L: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtd_l(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_CVT_PS_PW: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtps_pw(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_ADD_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_add_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_SUB_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_sub_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MUL_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mul_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_ABS_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_abs_ps(tcg_ctx, fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MOV_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_NEG_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_chs_ps(tcg_ctx, fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MOVCF_PS: + check_ps(ctx); + gen_movcf_ps(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); + break; + case OPC_MOVZ_PS: + check_ps(ctx); + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr[ft], 0, l1); + } + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + break; + case OPC_MOVN_PS: + check_ps(ctx); + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr[ft], 0, l1); + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + } + break; + case OPC_ADDR_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, ft); + gen_load_fpr64(ctx, fp1, fs); + gen_helper_float_addr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_MULR_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, ft); + gen_load_fpr64(ctx, fp1, fs); + gen_helper_float_mulr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_RECIP2_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_recip2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_RECIP1_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_recip1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_RSQRT1_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rsqrt1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_RSQRT2_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_rsqrt2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_CVT_S_PU: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + gen_helper_float_cvts_pu(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_CVT_PW_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtpw_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_CVT_S_PL: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_cvts_pl(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_PLL_PS: + check_ps(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_store_fpr32h(ctx, fp0, fd); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + break; + case OPC_PLU_PS: + check_ps(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32h(ctx, fp1, ft); + gen_store_fpr32(ctx, fp1, fd); + gen_store_fpr32h(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + break; + case OPC_PUL_PS: + check_ps(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_store_fpr32(ctx, fp1, fd); + gen_store_fpr32h(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + break; + case OPC_PUU_PS: + check_ps(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + gen_load_fpr32h(ctx, fp1, ft); + gen_store_fpr32(ctx, fp1, fd); + gen_store_fpr32h(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + break; + case OPC_CMP_F_PS: + case OPC_CMP_UN_PS: + case OPC_CMP_EQ_PS: + case OPC_CMP_UEQ_PS: + case OPC_CMP_OLT_PS: + case OPC_CMP_ULT_PS: + case OPC_CMP_OLE_PS: + case OPC_CMP_ULE_PS: + case OPC_CMP_SF_PS: + case OPC_CMP_NGLE_PS: + case OPC_CMP_SEQ_PS: + case OPC_CMP_NGL_PS: + case OPC_CMP_LT_PS: + case OPC_CMP_NGE_PS: + case OPC_CMP_LE_PS: + case OPC_CMP_NGT_PS: + if (ctx->opcode & (1 << 6)) { + gen_cmpabs_ps(ctx, func - 48, ft, fs, cc); + } else { + gen_cmp_ps(ctx, func - 48, ft, fs, cc); + } + break; + default: + MIPS_INVAL("farith"); + generate_exception_end(ctx, EXCP_RI); + return; + } +} + +/* Coprocessor 3 (FPU) */ +static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, + int fd, int fs, int base, int index) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + + if (base == 0) { + gen_load_gpr(tcg_ctx, t0, index); + } else if (index == 0) { + gen_load_gpr(tcg_ctx, t0, base); + } else { + gen_op_addr_add(ctx, t0, tcg_ctx->cpu_gpr[base], tcg_ctx->cpu_gpr[index]); + } + /* + * Don't do NOP if destination is zero: we must perform the actual + * memory access. + */ + switch (opc) { + case OPC_LWXC1: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESL); + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_LDXC1: + check_cop1x(ctx); + check_cp1_registers(ctx, fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_LUXC1: + check_cp1_64bitmode(ctx); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_qemu_ld_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_SWXC1: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + tcg_gen_qemu_st_i32(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEUL); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case OPC_SDXC1: + check_cop1x(ctx); + check_cp1_registers(ctx, fs); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + tcg_gen_qemu_st_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + case OPC_SUXC1: + check_cp1_64bitmode(ctx); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + tcg_gen_qemu_st_i64(tcg_ctx, fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_temp_free_i64(tcg_ctx, fp0); + } + break; + } + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, + int fd, int fr, int fs, int ft) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + switch (opc) { + case OPC_ALNV_PS: + check_ps(ctx); + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv_i32 fp = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fph = tcg_temp_new_i32(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, fr); + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x7); + + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 0, l1); + gen_load_fpr32(ctx, fp, fs); + gen_load_fpr32h(ctx, fph, fs); + gen_store_fpr32(ctx, fp, fd); + gen_store_fpr32h(ctx, fph, fd); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 4, l2); + tcg_temp_free(tcg_ctx, t0); +#ifdef TARGET_WORDS_BIGENDIAN + gen_load_fpr32(ctx, fp, fs); + gen_load_fpr32h(ctx, fph, ft); + gen_store_fpr32h(ctx, fp, fd); + gen_store_fpr32(ctx, fph, fd); +#else + gen_load_fpr32h(ctx, fph, fs); + gen_load_fpr32(ctx, fp, ft); + gen_store_fpr32(ctx, fph, fd); + gen_store_fpr32h(ctx, fp, fd); +#endif + gen_set_label(tcg_ctx, l2); + tcg_temp_free_i32(tcg_ctx, fp); + tcg_temp_free_i32(tcg_ctx, fph); + } + break; + case OPC_MADD_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_madd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + break; + case OPC_MADD_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_madd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + case OPC_MADD_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_madd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + case OPC_MSUB_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_msub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + break; + case OPC_MSUB_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_msub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + case OPC_MSUB_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_msub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + case OPC_NMADD_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_nmadd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + break; + case OPC_NMADD_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmadd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + case OPC_NMADD_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmadd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + case OPC_NMSUB_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_nmsub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + break; + case OPC_NMSUB_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmsub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + case OPC_NMSUB_PS: + check_ps(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmsub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + break; + default: + MIPS_INVAL("flt3_arith"); + generate_exception_end(ctx, EXCP_RI); + return; + } +} + +static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + /* + * The Linux kernel will emulate rdhwr if it's not supported natively. + * Therefore only check the ISA in system mode. + */ + check_insn(ctx, ISA_MIPS32R2); + t0 = tcg_temp_new(tcg_ctx); + + switch (rd) { + case 0: + gen_helper_rdhwr_cpunum(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 1: + gen_helper_rdhwr_synci_step(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 2: + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_rdhwr_cc(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + /* + * Break the TB to be able to take timer interrupts immediately + * after reading count. DISAS_STOP isn't sufficient, we need to ensure + * we break completely out of translated code. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + break; + case 3: + gen_helper_rdhwr_ccres(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 4: + check_insn(ctx, ISA_MIPS32R6); + if (sel != 0) { + /* + * Performance counter registers are not implemented other than + * control register 0. + */ + generate_exception(ctx, EXCP_RI); + } + gen_helper_rdhwr_performance(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 5: + check_insn(ctx, ISA_MIPS32R6); + gen_helper_rdhwr_xnp(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 29: + if ((ctx->hflags & MIPS_HFLAG_CP0) || + (ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) { + tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + gen_store_gpr(tcg_ctx, t0, rt); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + default: /* Invalid */ + MIPS_INVAL("rdhwr"); + generate_exception_end(ctx, EXCP_RI); + break; + } + tcg_temp_free(tcg_ctx, t0); +} + +static inline void clear_branch_hflags(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + ctx->hflags &= ~MIPS_HFLAG_BMASK; + if (ctx->base.is_jmp == DISAS_NEXT) { + save_cpu_state(ctx, 0); + } else { + /* + * It is not safe to save ctx->hflags as hflags may be changed + * in execution time by the instruction in delay / forbidden slot. + */ + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, ~MIPS_HFLAG_BMASK); + } +} + +static void gen_branch(DisasContext *ctx, int insn_bytes) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_BMASK) { + int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK; + /* Branches completion */ + clear_branch_hflags(ctx); + ctx->base.is_jmp = DISAS_NORETURN; + /* FIXME: Need to clear can_do_io. */ + switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_FBNSLOT: + gen_goto_tb(ctx, 0, ctx->base.pc_next + insn_bytes); + break; + case MIPS_HFLAG_B: + /* unconditional branch */ + if (proc_hflags & MIPS_HFLAG_BX) { + tcg_gen_xori_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, MIPS_HFLAG_M16); + } + gen_goto_tb(ctx, 0, ctx->btarget); + break; + case MIPS_HFLAG_BL: + /* blikely taken case */ + gen_goto_tb(ctx, 0, ctx->btarget); + break; + case MIPS_HFLAG_BC: + /* Conditional branch */ + { + TCGLabel *l1 = gen_new_label(tcg_ctx); + + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, 0, l1); + gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes); + gen_set_label(tcg_ctx, l1); + gen_goto_tb(ctx, 0, ctx->btarget); + } + break; + case MIPS_HFLAG_BR: + /* unconditional branch to register */ + if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->btarget, 0x1); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, ~(uint32_t)MIPS_HFLAG_M16); + tcg_gen_shli_i32(tcg_ctx, t1, t1, MIPS_HFLAG_M16_SHIFT); + tcg_gen_or_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, t1); + tcg_temp_free_i32(tcg_ctx, t1); + + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->btarget, ~(target_ulong)0x1); + } else { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->btarget); + } + if (ctx->base.singlestep_enabled) { + save_cpu_state(ctx, 0); + gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); + } + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + break; + default: + fprintf(stderr, "unknown branch 0x%x\n", proc_hflags); + abort(); + } + } +} + +/* Compact Branches */ +static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, + int rs, int rt, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + int m16_lowbit = (ctx->hflags & MIPS_HFLAG_M16) != 0; + + if (ctx->hflags & MIPS_HFLAG_BMASK) { +#ifdef MIPS_DEBUG_DISAS + LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx + "\n", ctx->base.pc_next); +#endif + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + /* Load needed operands and calculate btarget */ + switch (opc) { + /* compact branch */ + case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + if (rs <= rt && rs == 0) { + /* OPC_BEQZALC, OPC_BNEZALC */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); + } + break; + case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ + case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + break; + case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ + case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ + if (rs == 0 || rs == rt) { + /* OPC_BLEZALC, OPC_BGEZALC */ + /* OPC_BGTZALC, OPC_BLTZALC */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); + } + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + break; + case OPC_BC: + case OPC_BALC: + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + break; + case OPC_BEQZC: + case OPC_BNEZC: + if (rs != 0) { + /* OPC_BEQZC, OPC_BNEZC */ + gen_load_gpr(tcg_ctx, t0, rs); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + } else { + /* OPC_JIC, OPC_JIALC */ + TCGv tbase = tcg_temp_new(tcg_ctx); + TCGv toffset = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, tbase, rt); + tcg_gen_movi_tl(tcg_ctx, toffset, offset); + gen_op_addr_add(ctx, tcg_ctx->btarget, tbase, toffset); + tcg_temp_free(tcg_ctx, tbase); + tcg_temp_free(tcg_ctx, toffset); + } + break; + default: + MIPS_INVAL("Compact branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + if (bcond_compute == 0) { + /* Uncoditional compact branch */ + switch (opc) { + case OPC_JIALC: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); + /* Fallthrough */ + case OPC_JIC: + ctx->hflags |= MIPS_HFLAG_BR; + break; + case OPC_BALC: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit); + /* Fallthrough */ + case OPC_BC: + ctx->hflags |= MIPS_HFLAG_B; + break; + default: + MIPS_INVAL("Compact branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + /* Generating branch here as compact branches don't have delay slot */ + gen_branch(ctx, 4); + } else { + /* Conditional compact branch */ + TCGLabel *fs = gen_new_label(tcg_ctx); + save_cpu_state(ctx, 0); + + switch (opc) { + case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ + if (rs == 0 && rt != 0) { + /* OPC_BLEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BGEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); + } else { + /* OPC_BGEUC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GEU), t0, t1, fs); + } + break; + case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ + if (rs == 0 && rt != 0) { + /* OPC_BGTZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BLTZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); + } else { + /* OPC_BLTUC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LTU), t0, t1, fs); + } + break; + case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ + if (rs == 0 && rt != 0) { + /* OPC_BLEZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BGEZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); + } else { + /* OPC_BGEC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t0, t1, fs); + } + break; + case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ + if (rs == 0 && rt != 0) { + /* OPC_BGTZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BLTZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); + } else { + /* OPC_BLTC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t0, t1, fs); + } + break; + case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ + if (rs >= rt) { + /* OPC_BOVC, OPC_BNVC */ + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + TCGv t4 = tcg_temp_new(tcg_ctx); + TCGv input_overflow = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_ext32s_tl(tcg_ctx, t2, t0); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, input_overflow, t2, t0); + tcg_gen_ext32s_tl(tcg_ctx, t3, t1); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, t4, t3, t1); + tcg_gen_or_tl(tcg_ctx, input_overflow, input_overflow, t4); + + tcg_gen_add_tl(tcg_ctx, t4, t2, t3); + tcg_gen_ext32s_tl(tcg_ctx, t4, t4); + tcg_gen_xor_tl(tcg_ctx, t2, t2, t3); + tcg_gen_xor_tl(tcg_ctx, t3, t4, t3); + tcg_gen_andc_tl(tcg_ctx, t2, t3, t2); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, t4, t2, 0); + tcg_gen_or_tl(tcg_ctx, t4, t4, input_overflow); + if (opc == OPC_BOVC) { + /* OPC_BOVC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t4, 0, fs); + } else { + /* OPC_BNVC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t4, 0, fs); + } + tcg_temp_free(tcg_ctx, input_overflow); + tcg_temp_free(tcg_ctx, t4); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } else if (rs < rt && rs == 0) { + /* OPC_BEQZALC, OPC_BNEZALC */ + if (opc == OPC_BEQZALC) { + /* OPC_BEQZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t1, 0, fs); + } else { + /* OPC_BNEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t1, 0, fs); + } + } else { + /* OPC_BEQC, OPC_BNEC */ + if (opc == OPC_BEQC) { + /* OPC_BEQC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, t1, fs); + } else { + /* OPC_BNEC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, t1, fs); + } + } + break; + case OPC_BEQZC: + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, 0, fs); + break; + case OPC_BNEZC: + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, 0, fs); + break; + default: + MIPS_INVAL("Compact conditional branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + /* Generating branch here as compact branches don't have delay slot */ + gen_goto_tb(ctx, 1, ctx->btarget); + gen_set_label(tcg_ctx, fs); + + ctx->hflags |= MIPS_HFLAG_FBNSLOT; + } + +out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* ISA extensions (ASEs) */ +/* MIPS16 extension to MIPS32 */ + +/* MIPS16 major opcodes */ +enum { + M16_OPC_ADDIUSP = 0x00, + M16_OPC_ADDIUPC = 0x01, + M16_OPC_B = 0x02, + M16_OPC_JAL = 0x03, + M16_OPC_BEQZ = 0x04, + M16_OPC_BNEQZ = 0x05, + M16_OPC_SHIFT = 0x06, + M16_OPC_LD = 0x07, + M16_OPC_RRIA = 0x08, + M16_OPC_ADDIU8 = 0x09, + M16_OPC_SLTI = 0x0a, + M16_OPC_SLTIU = 0x0b, + M16_OPC_I8 = 0x0c, + M16_OPC_LI = 0x0d, + M16_OPC_CMPI = 0x0e, + M16_OPC_SD = 0x0f, + M16_OPC_LB = 0x10, + M16_OPC_LH = 0x11, + M16_OPC_LWSP = 0x12, + M16_OPC_LW = 0x13, + M16_OPC_LBU = 0x14, + M16_OPC_LHU = 0x15, + M16_OPC_LWPC = 0x16, + M16_OPC_LWU = 0x17, + M16_OPC_SB = 0x18, + M16_OPC_SH = 0x19, + M16_OPC_SWSP = 0x1a, + M16_OPC_SW = 0x1b, + M16_OPC_RRR = 0x1c, + M16_OPC_RR = 0x1d, + M16_OPC_EXTEND = 0x1e, + M16_OPC_I64 = 0x1f +}; + +/* I8 funct field */ +enum { + I8_BTEQZ = 0x0, + I8_BTNEZ = 0x1, + I8_SWRASP = 0x2, + I8_ADJSP = 0x3, + I8_SVRS = 0x4, + I8_MOV32R = 0x5, + I8_MOVR32 = 0x7 +}; + +/* RRR f field */ +enum { + RRR_DADDU = 0x0, + RRR_ADDU = 0x1, + RRR_DSUBU = 0x2, + RRR_SUBU = 0x3 +}; + +/* RR funct field */ +enum { + RR_JR = 0x00, + RR_SDBBP = 0x01, + RR_SLT = 0x02, + RR_SLTU = 0x03, + RR_SLLV = 0x04, + RR_BREAK = 0x05, + RR_SRLV = 0x06, + RR_SRAV = 0x07, + RR_DSRL = 0x08, + RR_CMP = 0x0a, + RR_NEG = 0x0b, + RR_AND = 0x0c, + RR_OR = 0x0d, + RR_XOR = 0x0e, + RR_NOT = 0x0f, + RR_MFHI = 0x10, + RR_CNVT = 0x11, + RR_MFLO = 0x12, + RR_DSRA = 0x13, + RR_DSLLV = 0x14, + RR_DSRLV = 0x16, + RR_DSRAV = 0x17, + RR_MULT = 0x18, + RR_MULTU = 0x19, + RR_DIV = 0x1a, + RR_DIVU = 0x1b, + RR_DMULT = 0x1c, + RR_DMULTU = 0x1d, + RR_DDIV = 0x1e, + RR_DDIVU = 0x1f +}; + +/* I64 funct field */ +enum { + I64_LDSP = 0x0, + I64_SDSP = 0x1, + I64_SDRASP = 0x2, + I64_DADJSP = 0x3, + I64_LDPC = 0x4, + I64_DADDIU5 = 0x5, + I64_DADDIUPC = 0x6, + I64_DADDIUSP = 0x7 +}; + +/* RR ry field for CNVT */ +enum { + RR_RY_CNVT_ZEB = 0x0, + RR_RY_CNVT_ZEH = 0x1, + RR_RY_CNVT_ZEW = 0x2, + RR_RY_CNVT_SEB = 0x4, + RR_RY_CNVT_SEH = 0x5, + RR_RY_CNVT_SEW = 0x6, +}; + +static int xlat(int r) +{ + static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + + return map[r]; +} + +static void gen_mips16_save(DisasContext *ctx, + int xsregs, int aregs, + int do_ra, int do_s0, int do_s1, + int framesize) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + int args, astatic; + + switch (aregs) { + case 0: + case 1: + case 2: + case 3: + case 11: + args = 0; + break; + case 4: + case 5: + case 6: + case 7: + args = 1; + break; + case 8: + case 9: + case 10: + args = 2; + break; + case 12: + case 13: + args = 3; + break; + case 14: + args = 4; + break; + default: + generate_exception_end(ctx, EXCP_RI); + return; + } + + switch (args) { + case 4: + gen_base_offset_addr(ctx, t0, 29, 12); + gen_load_gpr(tcg_ctx, t1, 7); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); + /* Fall through */ + case 3: + gen_base_offset_addr(ctx, t0, 29, 8); + gen_load_gpr(tcg_ctx, t1, 6); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); + /* Fall through */ + case 2: + gen_base_offset_addr(ctx, t0, 29, 4); + gen_load_gpr(tcg_ctx, t1, 5); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); + /* Fall through */ + case 1: + gen_base_offset_addr(ctx, t0, 29, 0); + gen_load_gpr(tcg_ctx, t1, 4); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); + } + + gen_load_gpr(tcg_ctx, t0, 29); + +#define DECR_AND_STORE(reg) do { \ + tcg_gen_movi_tl(tcg_ctx, t2, -4); \ + gen_op_addr_add(ctx, t0, t0, t2); \ + gen_load_gpr(tcg_ctx, t1, reg); \ + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); \ + } while (0) + + if (do_ra) { + DECR_AND_STORE(31); + } + + switch (xsregs) { + case 7: + DECR_AND_STORE(30); + /* Fall through */ + case 6: + DECR_AND_STORE(23); + /* Fall through */ + case 5: + DECR_AND_STORE(22); + /* Fall through */ + case 4: + DECR_AND_STORE(21); + /* Fall through */ + case 3: + DECR_AND_STORE(20); + /* Fall through */ + case 2: + DECR_AND_STORE(19); + /* Fall through */ + case 1: + DECR_AND_STORE(18); + } + + if (do_s1) { + DECR_AND_STORE(17); + } + if (do_s0) { + DECR_AND_STORE(16); + } + + switch (aregs) { + case 0: + case 4: + case 8: + case 12: + case 14: + astatic = 0; + break; + case 1: + case 5: + case 9: + case 13: + astatic = 1; + break; + case 2: + case 6: + case 10: + astatic = 2; + break; + case 3: + case 7: + astatic = 3; + break; + case 11: + astatic = 4; + break; + default: + generate_exception_end(ctx, EXCP_RI); + return; + } + + if (astatic > 0) { + DECR_AND_STORE(7); + if (astatic > 1) { + DECR_AND_STORE(6); + if (astatic > 2) { + DECR_AND_STORE(5); + if (astatic > 3) { + DECR_AND_STORE(4); + } + } + } + } +#undef DECR_AND_STORE + + tcg_gen_movi_tl(tcg_ctx, t2, -framesize); + gen_op_addr_add(ctx, tcg_ctx->cpu_gpr[29], tcg_ctx->cpu_gpr[29], t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void gen_mips16_restore(DisasContext *ctx, + int xsregs, int aregs, + int do_ra, int do_s0, int do_s1, + int framesize) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int astatic; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t2, framesize); + gen_op_addr_add(ctx, t0, tcg_ctx->cpu_gpr[29], t2); + +#define DECR_AND_LOAD(reg) do { \ + tcg_gen_movi_tl(tcg_ctx, t2, -4); \ + gen_op_addr_add(ctx, t0, t0, t2); \ + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); \ + gen_store_gpr(tcg_ctx, t1, reg); \ + } while (0) + + if (do_ra) { + DECR_AND_LOAD(31); + } + + switch (xsregs) { + case 7: + DECR_AND_LOAD(30); + /* Fall through */ + case 6: + DECR_AND_LOAD(23); + /* Fall through */ + case 5: + DECR_AND_LOAD(22); + /* Fall through */ + case 4: + DECR_AND_LOAD(21); + /* Fall through */ + case 3: + DECR_AND_LOAD(20); + /* Fall through */ + case 2: + DECR_AND_LOAD(19); + /* Fall through */ + case 1: + DECR_AND_LOAD(18); + } + + if (do_s1) { + DECR_AND_LOAD(17); + } + if (do_s0) { + DECR_AND_LOAD(16); + } + + switch (aregs) { + case 0: + case 4: + case 8: + case 12: + case 14: + astatic = 0; + break; + case 1: + case 5: + case 9: + case 13: + astatic = 1; + break; + case 2: + case 6: + case 10: + astatic = 2; + break; + case 3: + case 7: + astatic = 3; + break; + case 11: + astatic = 4; + break; + default: + generate_exception_end(ctx, EXCP_RI); + return; + } + + if (astatic > 0) { + DECR_AND_LOAD(7); + if (astatic > 1) { + DECR_AND_LOAD(6); + if (astatic > 2) { + DECR_AND_LOAD(5); + if (astatic > 3) { + DECR_AND_LOAD(4); + } + } + } + } +#undef DECR_AND_LOAD + + tcg_gen_movi_tl(tcg_ctx, t2, framesize); + gen_op_addr_add(ctx, tcg_ctx->cpu_gpr[29], tcg_ctx->cpu_gpr[29], t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void gen_addiupc(DisasContext *ctx, int rx, int imm, + int is_64_bit, int extended) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { + generate_exception_end(ctx, EXCP_RI); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t0, pc_relative_pc(ctx)); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], t0, imm); + if (!is_64_bit) { + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); + } + + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, + int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, op); + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_base_offset_addr(ctx, t1, base, offset); + gen_helper_cache(tcg_ctx, tcg_ctx->cpu_env, t1, t0); +} + +#if defined(TARGET_MIPS64) +static void decode_i64_mips16(DisasContext *ctx, + int ry, int funct, int16_t offset, + int extended) +{ + switch (funct) { + case I64_LDSP: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + offset = extended ? offset : offset << 3; + gen_ld(ctx, OPC_LD, ry, 29, offset); + break; + case I64_SDSP: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + offset = extended ? offset : offset << 3; + gen_st(ctx, OPC_SD, ry, 29, offset); + break; + case I64_SDRASP: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + offset = extended ? offset : (ctx->opcode & 0xff) << 3; + gen_st(ctx, OPC_SD, 31, 29, offset); + break; + case I64_DADJSP: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + offset = extended ? offset : ((int8_t)ctx->opcode) << 3; + gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset); + break; + case I64_LDPC: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { + generate_exception_end(ctx, EXCP_RI); + } else { + offset = extended ? offset : offset << 3; + gen_ld(ctx, OPC_LDPC, ry, 0, offset); + } + break; + case I64_DADDIU5: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + offset = extended ? offset : ((int8_t)(offset << 3)) >> 3; + gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset); + break; + case I64_DADDIUPC: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + offset = extended ? offset : offset << 2; + gen_addiupc(ctx, ry, offset, 1, extended); + break; + case I64_DADDIUSP: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + offset = extended ? offset : offset << 2; + gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset); + break; + } +} +#endif + +static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int extend = cpu_lduw_code(env, ctx->base.pc_next + 2); + int op, rx, ry, funct, sa; + int16_t imm, offset; + + ctx->opcode = (ctx->opcode << 16) | extend; + op = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 22) & 0x1f; + funct = (ctx->opcode >> 8) & 0x7; + rx = xlat((ctx->opcode >> 8) & 0x7); + ry = xlat((ctx->opcode >> 5) & 0x7); + offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11 + | ((ctx->opcode >> 21) & 0x3f) << 5 + | (ctx->opcode & 0x1f)); + + /* + * The extended opcodes cleverly reuse the opcodes from their 16-bit + * counterparts. + */ + switch (op) { + case M16_OPC_ADDIUSP: + gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); + break; + case M16_OPC_ADDIUPC: + gen_addiupc(ctx, rx, imm, 0, 1); + break; + case M16_OPC_B: + gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_BEQZ: + gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_BNEQZ: + gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_SHIFT: + switch (ctx->opcode & 0x3) { + case 0x0: + gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); + break; + case 0x1: +#if defined(TARGET_MIPS64) + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); +#else + generate_exception_end(ctx, EXCP_RI); +#endif + break; + case 0x2: + gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); + break; + case 0x3: + gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); + break; + } + break; +#if defined(TARGET_MIPS64) + case M16_OPC_LD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_ld(ctx, OPC_LD, ry, rx, offset); + break; +#endif + case M16_OPC_RRIA: + imm = ctx->opcode & 0xf; + imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4; + imm = imm | ((ctx->opcode >> 16) & 0xf) << 11; + imm = (int16_t) (imm << 1) >> 1; + if ((ctx->opcode >> 4) & 0x1) { +#if defined(TARGET_MIPS64) + check_mips_64(ctx); + gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); +#else + generate_exception_end(ctx, EXCP_RI); +#endif + } else { + gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); + } + break; + case M16_OPC_ADDIU8: + gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); + break; + case M16_OPC_SLTI: + gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); + break; + case M16_OPC_SLTIU: + gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); + break; + case M16_OPC_I8: + switch (funct) { + case I8_BTEQZ: + gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0); + break; + case I8_BTNEZ: + gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0); + break; + case I8_SWRASP: + gen_st(ctx, OPC_SW, 31, 29, imm); + break; + case I8_ADJSP: + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm); + break; + case I8_SVRS: + check_insn(ctx, ISA_MIPS32); + { + int xsregs = (ctx->opcode >> 24) & 0x7; + int aregs = (ctx->opcode >> 16) & 0xf; + int do_ra = (ctx->opcode >> 6) & 0x1; + int do_s0 = (ctx->opcode >> 5) & 0x1; + int do_s1 = (ctx->opcode >> 4) & 0x1; + int framesize = (((ctx->opcode >> 20) & 0xf) << 4 + | (ctx->opcode & 0xf)) << 3; + + if (ctx->opcode & (1 << 7)) { + gen_mips16_save(ctx, xsregs, aregs, + do_ra, do_s0, do_s1, + framesize); + } else { + gen_mips16_restore(ctx, xsregs, aregs, + do_ra, do_s0, do_s1, + framesize); + } + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case M16_OPC_LI: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], (uint16_t) imm); + break; + case M16_OPC_CMPI: + tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr[24], tcg_ctx->cpu_gpr[rx], (uint16_t) imm); + break; +#if defined(TARGET_MIPS64) + case M16_OPC_SD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_st(ctx, OPC_SD, ry, rx, offset); + break; +#endif + case M16_OPC_LB: + gen_ld(ctx, OPC_LB, ry, rx, offset); + break; + case M16_OPC_LH: + gen_ld(ctx, OPC_LH, ry, rx, offset); + break; + case M16_OPC_LWSP: + gen_ld(ctx, OPC_LW, rx, 29, offset); + break; + case M16_OPC_LW: + gen_ld(ctx, OPC_LW, ry, rx, offset); + break; + case M16_OPC_LBU: + gen_ld(ctx, OPC_LBU, ry, rx, offset); + break; + case M16_OPC_LHU: + gen_ld(ctx, OPC_LHU, ry, rx, offset); + break; + case M16_OPC_LWPC: + gen_ld(ctx, OPC_LWPC, rx, 0, offset); + break; +#if defined(TARGET_MIPS64) + case M16_OPC_LWU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_ld(ctx, OPC_LWU, ry, rx, offset); + break; +#endif + case M16_OPC_SB: + gen_st(ctx, OPC_SB, ry, rx, offset); + break; + case M16_OPC_SH: + gen_st(ctx, OPC_SH, ry, rx, offset); + break; + case M16_OPC_SWSP: + gen_st(ctx, OPC_SW, rx, 29, offset); + break; + case M16_OPC_SW: + gen_st(ctx, OPC_SW, ry, rx, offset); + break; +#if defined(TARGET_MIPS64) + case M16_OPC_I64: + decode_i64_mips16(ctx, ry, funct, offset, 1); + break; +#endif + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + return 4; +} + +static inline bool is_uhi(int sdbbp_code) +{ + // return semihosting_enabled() && sdbbp_code == 1; FIXME + return false; +} + +static int decode_mips16_opc(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rx, ry; + int sa; + int op, cnvt_op, op1, offset; + int funct; + int n_bytes; + + op = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 2) & 0x7; + sa = sa == 0 ? 8 : sa; + rx = xlat((ctx->opcode >> 8) & 0x7); + cnvt_op = (ctx->opcode >> 5) & 0x7; + ry = xlat((ctx->opcode >> 5) & 0x7); + op1 = offset = ctx->opcode & 0x1f; + + n_bytes = 2; + + switch (op) { + case M16_OPC_ADDIUSP: + { + int16_t imm = ((uint8_t) ctx->opcode) << 2; + + gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); + } + break; + case M16_OPC_ADDIUPC: + gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0); + break; + case M16_OPC_B: + offset = (ctx->opcode & 0x7ff) << 1; + offset = (int16_t)(offset << 4) >> 4; + gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_JAL: + offset = cpu_lduw_code(env, ctx->base.pc_next + 2); + offset = (((ctx->opcode & 0x1f) << 21) + | ((ctx->opcode >> 5) & 0x1f) << 16 + | offset) << 2; + op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL; + gen_compute_branch(ctx, op, 4, rx, ry, offset, 2); + n_bytes = 4; + break; + case M16_OPC_BEQZ: + gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, + ((int8_t)ctx->opcode) << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_BNEQZ: + gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, + ((int8_t)ctx->opcode) << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_SHIFT: + switch (ctx->opcode & 0x3) { + case 0x0: + gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); + break; + case 0x1: +#if defined(TARGET_MIPS64) + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); +#else + generate_exception_end(ctx, EXCP_RI); +#endif + break; + case 0x2: + gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); + break; + case 0x3: + gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); + break; + } + break; +#if defined(TARGET_MIPS64) + case M16_OPC_LD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_ld(ctx, OPC_LD, ry, rx, offset << 3); + break; +#endif + case M16_OPC_RRIA: + { + int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4; + + if ((ctx->opcode >> 4) & 1) { +#if defined(TARGET_MIPS64) + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); +#else + generate_exception_end(ctx, EXCP_RI); +#endif + } else { + gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); + } + } + break; + case M16_OPC_ADDIU8: + { + int16_t imm = (int8_t) ctx->opcode; + + gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); + } + break; + case M16_OPC_SLTI: + { + int16_t imm = (uint8_t) ctx->opcode; + gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); + } + break; + case M16_OPC_SLTIU: + { + int16_t imm = (uint8_t) ctx->opcode; + gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); + } + break; + case M16_OPC_I8: + { + int reg32; + + funct = (ctx->opcode >> 8) & 0x7; + switch (funct) { + case I8_BTEQZ: + gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0, + ((int8_t)ctx->opcode) << 1, 0); + break; + case I8_BTNEZ: + gen_compute_branch(ctx, OPC_BNE, 2, 24, 0, + ((int8_t)ctx->opcode) << 1, 0); + break; + case I8_SWRASP: + gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2); + break; + case I8_ADJSP: + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, + ((int8_t)ctx->opcode) << 3); + break; + case I8_SVRS: + check_insn(ctx, ISA_MIPS32); + { + int do_ra = ctx->opcode & (1 << 6); + int do_s0 = ctx->opcode & (1 << 5); + int do_s1 = ctx->opcode & (1 << 4); + int framesize = ctx->opcode & 0xf; + + if (framesize == 0) { + framesize = 128; + } else { + framesize = framesize << 3; + } + + if (ctx->opcode & (1 << 7)) { + gen_mips16_save(ctx, 0, 0, + do_ra, do_s0, do_s1, framesize); + } else { + gen_mips16_restore(ctx, 0, 0, + do_ra, do_s0, do_s1, framesize); + } + } + break; + case I8_MOV32R: + { + int rz = xlat(ctx->opcode & 0x7); + + reg32 = (((ctx->opcode >> 3) & 0x3) << 3) | + ((ctx->opcode >> 5) & 0x7); + gen_arith(ctx, OPC_ADDU, reg32, rz, 0); + } + break; + case I8_MOVR32: + reg32 = ctx->opcode & 0x1f; + gen_arith(ctx, OPC_ADDU, ry, reg32, 0); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + } + break; + case M16_OPC_LI: + { + int16_t imm = (uint8_t) ctx->opcode; + + gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm); + } + break; + case M16_OPC_CMPI: + { + int16_t imm = (uint8_t) ctx->opcode; + gen_logic_imm(ctx, OPC_XORI, 24, rx, imm); + } + break; +#if defined(TARGET_MIPS64) + case M16_OPC_SD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_st(ctx, OPC_SD, ry, rx, offset << 3); + break; +#endif + case M16_OPC_LB: + gen_ld(ctx, OPC_LB, ry, rx, offset); + break; + case M16_OPC_LH: + gen_ld(ctx, OPC_LH, ry, rx, offset << 1); + break; + case M16_OPC_LWSP: + gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2); + break; + case M16_OPC_LW: + gen_ld(ctx, OPC_LW, ry, rx, offset << 2); + break; + case M16_OPC_LBU: + gen_ld(ctx, OPC_LBU, ry, rx, offset); + break; + case M16_OPC_LHU: + gen_ld(ctx, OPC_LHU, ry, rx, offset << 1); + break; + case M16_OPC_LWPC: + gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2); + break; +#if defined(TARGET_MIPS64) + case M16_OPC_LWU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_ld(ctx, OPC_LWU, ry, rx, offset << 2); + break; +#endif + case M16_OPC_SB: + gen_st(ctx, OPC_SB, ry, rx, offset); + break; + case M16_OPC_SH: + gen_st(ctx, OPC_SH, ry, rx, offset << 1); + break; + case M16_OPC_SWSP: + gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2); + break; + case M16_OPC_SW: + gen_st(ctx, OPC_SW, ry, rx, offset << 2); + break; + case M16_OPC_RRR: + { + int rz = xlat((ctx->opcode >> 2) & 0x7); + int mips32_op; + + switch (ctx->opcode & 0x3) { + case RRR_ADDU: + mips32_op = OPC_ADDU; + break; + case RRR_SUBU: + mips32_op = OPC_SUBU; + break; +#if defined(TARGET_MIPS64) + case RRR_DADDU: + mips32_op = OPC_DADDU; + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + break; + case RRR_DSUBU: + mips32_op = OPC_DSUBU; + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + break; +#endif + default: + generate_exception_end(ctx, EXCP_RI); + goto done; + } + + gen_arith(ctx, mips32_op, rz, rx, ry); + done: + ; + } + break; + case M16_OPC_RR: + switch (op1) { + case RR_JR: + { + int nd = (ctx->opcode >> 7) & 0x1; + int link = (ctx->opcode >> 6) & 0x1; + int ra = (ctx->opcode >> 5) & 0x1; + + if (nd) { + check_insn(ctx, ISA_MIPS32); + } + + if (link) { + op = OPC_JALR; + } else { + op = OPC_JR; + } + + gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0, + (nd ? 0 : 2)); + } + break; + case RR_SDBBP: + if (is_uhi(extract32(ctx->opcode, 5, 6))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + /* + * XXX: not clear which exception should be raised + * when in debug mode... + */ + check_insn(ctx, ISA_MIPS32); + generate_exception_end(ctx, EXCP_DBp); + } + break; + case RR_SLT: + gen_slt(ctx, OPC_SLT, 24, rx, ry); + break; + case RR_SLTU: + gen_slt(ctx, OPC_SLTU, 24, rx, ry); + break; + case RR_BREAK: + generate_exception_end(ctx, EXCP_BREAK); + break; + case RR_SLLV: + gen_shift(ctx, OPC_SLLV, ry, rx, ry); + break; + case RR_SRLV: + gen_shift(ctx, OPC_SRLV, ry, rx, ry); + break; + case RR_SRAV: + gen_shift(ctx, OPC_SRAV, ry, rx, ry); + break; +#if defined(TARGET_MIPS64) + case RR_DSRL: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa); + break; +#endif + case RR_CMP: + gen_logic(ctx, OPC_XOR, 24, rx, ry); + break; + case RR_NEG: + gen_arith(ctx, OPC_SUBU, rx, 0, ry); + break; + case RR_AND: + gen_logic(ctx, OPC_AND, rx, rx, ry); + break; + case RR_OR: + gen_logic(ctx, OPC_OR, rx, rx, ry); + break; + case RR_XOR: + gen_logic(ctx, OPC_XOR, rx, rx, ry); + break; + case RR_NOT: + gen_logic(ctx, OPC_NOR, rx, ry, 0); + break; + case RR_MFHI: + gen_HILO(ctx, OPC_MFHI, 0, rx); + break; + case RR_CNVT: + check_insn(ctx, ISA_MIPS32); + switch (cnvt_op) { + case RR_RY_CNVT_ZEB: + tcg_gen_ext8u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); + break; + case RR_RY_CNVT_ZEH: + tcg_gen_ext16u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); + break; + case RR_RY_CNVT_SEB: + tcg_gen_ext8s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); + break; + case RR_RY_CNVT_SEH: + tcg_gen_ext16s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); + break; +#if defined(TARGET_MIPS64) + case RR_RY_CNVT_ZEW: + check_insn(ctx, ISA_MIPS64); + check_mips_64(ctx); + tcg_gen_ext32u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); + break; + case RR_RY_CNVT_SEW: + check_insn(ctx, ISA_MIPS64); + check_mips_64(ctx); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rx], tcg_ctx->cpu_gpr[rx]); + break; +#endif + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case RR_MFLO: + gen_HILO(ctx, OPC_MFLO, 0, rx); + break; +#if defined(TARGET_MIPS64) + case RR_DSRA: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa); + break; + case RR_DSLLV: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift(ctx, OPC_DSLLV, ry, rx, ry); + break; + case RR_DSRLV: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift(ctx, OPC_DSRLV, ry, rx, ry); + break; + case RR_DSRAV: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift(ctx, OPC_DSRAV, ry, rx, ry); + break; +#endif + case RR_MULT: + gen_muldiv(ctx, OPC_MULT, 0, rx, ry); + break; + case RR_MULTU: + gen_muldiv(ctx, OPC_MULTU, 0, rx, ry); + break; + case RR_DIV: + gen_muldiv(ctx, OPC_DIV, 0, rx, ry); + break; + case RR_DIVU: + gen_muldiv(ctx, OPC_DIVU, 0, rx, ry); + break; +#if defined(TARGET_MIPS64) + case RR_DMULT: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DMULT, 0, rx, ry); + break; + case RR_DMULTU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry); + break; + case RR_DDIV: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DDIV, 0, rx, ry); + break; + case RR_DDIVU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry); + break; +#endif + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case M16_OPC_EXTEND: + decode_extended_mips16_opc(env, ctx); + n_bytes = 4; + break; +#if defined(TARGET_MIPS64) + case M16_OPC_I64: + funct = (ctx->opcode >> 8) & 0x7; + decode_i64_mips16(ctx, ry, funct, offset, 0); + break; +#endif + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + return n_bytes; +} + +/* microMIPS extension to MIPS32/MIPS64 */ + +/* + * microMIPS32/microMIPS64 major opcodes + * + * 1. MIPS Architecture for Programmers Volume II-B: + * The microMIPS32 Instruction Set (Revision 3.05) + * + * Table 6.2 microMIPS32 Encoding of Major Opcode Field + * + * 2. MIPS Architecture For Programmers Volume II-A: + * The MIPS64 Instruction Set (Revision 3.51) + */ + +enum { + POOL32A = 0x00, + POOL16A = 0x01, + LBU16 = 0x02, + MOVE16 = 0x03, + ADDI32 = 0x04, + R6_LUI = 0x04, + AUI = 0x04, + LBU32 = 0x05, + SB32 = 0x06, + LB32 = 0x07, + + POOL32B = 0x08, + POOL16B = 0x09, + LHU16 = 0x0a, + ANDI16 = 0x0b, + ADDIU32 = 0x0c, + LHU32 = 0x0d, + SH32 = 0x0e, + LH32 = 0x0f, + + POOL32I = 0x10, + POOL16C = 0x11, + LWSP16 = 0x12, + POOL16D = 0x13, + ORI32 = 0x14, + POOL32F = 0x15, + POOL32S = 0x16, /* MIPS64 */ + DADDIU32 = 0x17, /* MIPS64 */ + + POOL32C = 0x18, + LWGP16 = 0x19, + LW16 = 0x1a, + POOL16E = 0x1b, + XORI32 = 0x1c, + JALS32 = 0x1d, + BOVC = 0x1d, + BEQC = 0x1d, + BEQZALC = 0x1d, + ADDIUPC = 0x1e, + PCREL = 0x1e, + BNVC = 0x1f, + BNEC = 0x1f, + BNEZALC = 0x1f, + + R6_BEQZC = 0x20, + JIC = 0x20, + POOL16F = 0x21, + SB16 = 0x22, + BEQZ16 = 0x23, + BEQZC16 = 0x23, + SLTI32 = 0x24, + BEQ32 = 0x25, + BC = 0x25, + SWC132 = 0x26, + LWC132 = 0x27, + + /* 0x29 is reserved */ + RES_29 = 0x29, + R6_BNEZC = 0x28, + JIALC = 0x28, + SH16 = 0x2a, + BNEZ16 = 0x2b, + BNEZC16 = 0x2b, + SLTIU32 = 0x2c, + BNE32 = 0x2d, + BALC = 0x2d, + SDC132 = 0x2e, + LDC132 = 0x2f, + + /* 0x31 is reserved */ + RES_31 = 0x31, + BLEZALC = 0x30, + BGEZALC = 0x30, + BGEUC = 0x30, + SWSP16 = 0x32, + B16 = 0x33, + BC16 = 0x33, + ANDI32 = 0x34, + J32 = 0x35, + BGTZC = 0x35, + BLTZC = 0x35, + BLTC = 0x35, + SD32 = 0x36, /* MIPS64 */ + LD32 = 0x37, /* MIPS64 */ + + /* 0x39 is reserved */ + RES_39 = 0x39, + BGTZALC = 0x38, + BLTZALC = 0x38, + BLTUC = 0x38, + SW16 = 0x3a, + LI16 = 0x3b, + JALX32 = 0x3c, + JAL32 = 0x3d, + BLEZC = 0x3d, + BGEZC = 0x3d, + BGEC = 0x3d, + SW32 = 0x3e, + LW32 = 0x3f +}; + +/* PCREL Instructions perform PC-Relative address calculation. bits 20..16 */ +enum { + ADDIUPC_00 = 0x00, + ADDIUPC_01 = 0x01, + ADDIUPC_02 = 0x02, + ADDIUPC_03 = 0x03, + ADDIUPC_04 = 0x04, + ADDIUPC_05 = 0x05, + ADDIUPC_06 = 0x06, + ADDIUPC_07 = 0x07, + AUIPC = 0x1e, + ALUIPC = 0x1f, + LWPC_08 = 0x08, + LWPC_09 = 0x09, + LWPC_0A = 0x0A, + LWPC_0B = 0x0B, + LWPC_0C = 0x0C, + LWPC_0D = 0x0D, + LWPC_0E = 0x0E, + LWPC_0F = 0x0F, +}; + +/* POOL32A encoding of minor opcode field */ + +enum { + /* + * These opcodes are distinguished only by bits 9..6; those bits are + * what are recorded below. + */ + SLL32 = 0x0, + SRL32 = 0x1, + SRA = 0x2, + ROTR = 0x3, + SELEQZ = 0x5, + SELNEZ = 0x6, + R6_RDHWR = 0x7, + + SLLV = 0x0, + SRLV = 0x1, + SRAV = 0x2, + ROTRV = 0x3, + ADD = 0x4, + ADDU32 = 0x5, + SUB = 0x6, + SUBU32 = 0x7, + MUL = 0x8, + AND = 0x9, + OR32 = 0xa, + NOR = 0xb, + XOR32 = 0xc, + SLT = 0xd, + SLTU = 0xe, + + MOVN = 0x0, + R6_MUL = 0x0, + MOVZ = 0x1, + MUH = 0x1, + MULU = 0x2, + MUHU = 0x3, + LWXS = 0x4, + R6_DIV = 0x4, + MOD = 0x5, + R6_DIVU = 0x6, + MODU = 0x7, + + /* The following can be distinguished by their lower 6 bits. */ + BREAK32 = 0x07, + INS = 0x0c, + LSA = 0x0f, + ALIGN = 0x1f, + EXT = 0x2c, + POOL32AXF = 0x3c, + SIGRIE = 0x3f +}; + +/* POOL32AXF encoding of minor opcode field extension */ + +/* + * 1. MIPS Architecture for Programmers Volume II-B: + * The microMIPS32 Instruction Set (Revision 3.05) + * + * Table 6.5 POOL32Axf Encoding of Minor Opcode Extension Field + * + * 2. MIPS Architecture for Programmers VolumeIV-e: + * The MIPS DSP Application-Specific Extension + * to the microMIPS32 Architecture (Revision 2.34) + * + * Table 5.5 POOL32Axf Encoding of Minor Opcode Extension Field + */ + +enum { + /* bits 11..6 */ + TEQ = 0x00, + TGE = 0x08, + TGEU = 0x10, + TLT = 0x20, + TLTU = 0x28, + TNE = 0x30, + + MFC0 = 0x03, + MTC0 = 0x0b, + + /* begin of microMIPS32 DSP */ + + /* bits 13..12 for 0x01 */ + MFHI_ACC = 0x0, + MFLO_ACC = 0x1, + MTHI_ACC = 0x2, + MTLO_ACC = 0x3, + + /* bits 13..12 for 0x2a */ + MADD_ACC = 0x0, + MADDU_ACC = 0x1, + MSUB_ACC = 0x2, + MSUBU_ACC = 0x3, + + /* bits 13..12 for 0x32 */ + MULT_ACC = 0x0, + MULTU_ACC = 0x1, + + /* end of microMIPS32 DSP */ + + /* bits 15..12 for 0x2c */ + BITSWAP = 0x0, + SEB = 0x2, + SEH = 0x3, + CLO = 0x4, + CLZ = 0x5, + RDHWR = 0x6, + WSBH = 0x7, + MULT = 0x8, + MULTU = 0x9, + DIV = 0xa, + DIVU = 0xb, + MADD = 0xc, + MADDU = 0xd, + MSUB = 0xe, + MSUBU = 0xf, + + /* bits 15..12 for 0x34 */ + MFC2 = 0x4, + MTC2 = 0x5, + MFHC2 = 0x8, + MTHC2 = 0x9, + CFC2 = 0xc, + CTC2 = 0xd, + + /* bits 15..12 for 0x3c */ + JALR = 0x0, + JR = 0x0, /* alias */ + JALRC = 0x0, + JRC = 0x0, + JALR_HB = 0x1, + JALRC_HB = 0x1, + JALRS = 0x4, + JALRS_HB = 0x5, + + /* bits 15..12 for 0x05 */ + RDPGPR = 0xe, + WRPGPR = 0xf, + + /* bits 15..12 for 0x0d */ + TLBP = 0x0, + TLBR = 0x1, + TLBWI = 0x2, + TLBWR = 0x3, + TLBINV = 0x4, + TLBINVF = 0x5, + WAIT = 0x9, + IRET = 0xd, + DERET = 0xe, + ERET = 0xf, + + /* bits 15..12 for 0x15 */ + DMT = 0x0, + DVPE = 0x1, + EMT = 0x2, + EVPE = 0x3, + + /* bits 15..12 for 0x1d */ + DI = 0x4, + EI = 0x5, + + /* bits 15..12 for 0x2d */ + SYNC = 0x6, + SYSCALL = 0x8, + SDBBP = 0xd, + + /* bits 15..12 for 0x35 */ + MFHI32 = 0x0, + MFLO32 = 0x1, + MTHI32 = 0x2, + MTLO32 = 0x3, +}; + +/* POOL32B encoding of minor opcode field (bits 15..12) */ + +enum { + LWC2 = 0x0, + LWP = 0x1, + LDP = 0x4, + LWM32 = 0x5, + CACHE = 0x6, + LDM = 0x7, + SWC2 = 0x8, + SWP = 0x9, + SDP = 0xc, + SWM32 = 0xd, + SDM = 0xf +}; + +/* POOL32C encoding of minor opcode field (bits 15..12) */ + +enum { + LWL = 0x0, + SWL = 0x8, + LWR = 0x1, + SWR = 0x9, + PREF = 0x2, + ST_EVA = 0xa, + LL = 0x3, + SC = 0xb, + LDL = 0x4, + SDL = 0xc, + LDR = 0x5, + SDR = 0xd, + LD_EVA = 0x6, + LWU = 0xe, + LLD = 0x7, + SCD = 0xf +}; + +/* POOL32C LD-EVA encoding of minor opcode field (bits 11..9) */ + +enum { + LBUE = 0x0, + LHUE = 0x1, + LWLE = 0x2, + LWRE = 0x3, + LBE = 0x4, + LHE = 0x5, + LLE = 0x6, + LWE = 0x7, +}; + +/* POOL32C ST-EVA encoding of minor opcode field (bits 11..9) */ + +enum { + SWLE = 0x0, + SWRE = 0x1, + PREFE = 0x2, + CACHEE = 0x3, + SBE = 0x4, + SHE = 0x5, + SCE = 0x6, + SWE = 0x7, +}; + +/* POOL32F encoding of minor opcode field (bits 5..0) */ + +enum { + /* These are the bit 7..6 values */ + ADD_FMT = 0x0, + + SUB_FMT = 0x1, + + MUL_FMT = 0x2, + + DIV_FMT = 0x3, + + /* These are the bit 8..6 values */ + MOVN_FMT = 0x0, + RSQRT2_FMT = 0x0, + MOVF_FMT = 0x0, + RINT_FMT = 0x0, + SELNEZ_FMT = 0x0, + + MOVZ_FMT = 0x1, + LWXC1 = 0x1, + MOVT_FMT = 0x1, + CLASS_FMT = 0x1, + SELEQZ_FMT = 0x1, + + PLL_PS = 0x2, + SWXC1 = 0x2, + SEL_FMT = 0x2, + + PLU_PS = 0x3, + LDXC1 = 0x3, + + MOVN_FMT_04 = 0x4, + PUL_PS = 0x4, + SDXC1 = 0x4, + RECIP2_FMT = 0x4, + + MOVZ_FMT_05 = 0x05, + PUU_PS = 0x5, + LUXC1 = 0x5, + + CVT_PS_S = 0x6, + SUXC1 = 0x6, + ADDR_PS = 0x6, + PREFX = 0x6, + MADDF_FMT = 0x6, + + MULR_PS = 0x7, + MSUBF_FMT = 0x7, + + MADD_S = 0x01, + MADD_D = 0x09, + MADD_PS = 0x11, + ALNV_PS = 0x19, + MSUB_S = 0x21, + MSUB_D = 0x29, + MSUB_PS = 0x31, + + NMADD_S = 0x02, + NMADD_D = 0x0a, + NMADD_PS = 0x12, + NMSUB_S = 0x22, + NMSUB_D = 0x2a, + NMSUB_PS = 0x32, + + MIN_FMT = 0x3, + MAX_FMT = 0xb, + MINA_FMT = 0x23, + MAXA_FMT = 0x2b, + POOL32FXF = 0x3b, + + CABS_COND_FMT = 0x1c, /* MIPS3D */ + C_COND_FMT = 0x3c, + + CMP_CONDN_S = 0x5, + CMP_CONDN_D = 0x15 +}; + +/* POOL32Fxf encoding of minor opcode extension field */ + +enum { + CVT_L = 0x04, + RSQRT_FMT = 0x08, + FLOOR_L = 0x0c, + CVT_PW_PS = 0x1c, + CVT_W = 0x24, + SQRT_FMT = 0x28, + FLOOR_W = 0x2c, + CVT_PS_PW = 0x3c, + CFC1 = 0x40, + RECIP_FMT = 0x48, + CEIL_L = 0x4c, + CTC1 = 0x60, + CEIL_W = 0x6c, + MFC1 = 0x80, + CVT_S_PL = 0x84, + TRUNC_L = 0x8c, + MTC1 = 0xa0, + CVT_S_PU = 0xa4, + TRUNC_W = 0xac, + MFHC1 = 0xc0, + ROUND_L = 0xcc, + MTHC1 = 0xe0, + ROUND_W = 0xec, + + MOV_FMT = 0x01, + MOVF = 0x05, + ABS_FMT = 0x0d, + RSQRT1_FMT = 0x1d, + MOVT = 0x25, + NEG_FMT = 0x2d, + CVT_D = 0x4d, + RECIP1_FMT = 0x5d, + CVT_S = 0x6d +}; + +/* POOL32I encoding of minor opcode field (bits 25..21) */ + +enum { + BLTZ = 0x00, + BLTZAL = 0x01, + BGEZ = 0x02, + BGEZAL = 0x03, + BLEZ = 0x04, + BNEZC = 0x05, + BGTZ = 0x06, + BEQZC = 0x07, + TLTI = 0x08, + BC1EQZC = 0x08, + TGEI = 0x09, + BC1NEZC = 0x09, + TLTIU = 0x0a, + BC2EQZC = 0x0a, + TGEIU = 0x0b, + BC2NEZC = 0x0a, + TNEI = 0x0c, + R6_SYNCI = 0x0c, + LUI = 0x0d, + TEQI = 0x0e, + SYNCI = 0x10, + BLTZALS = 0x11, + BGEZALS = 0x13, + BC2F = 0x14, + BC2T = 0x15, + BPOSGE64 = 0x1a, + BPOSGE32 = 0x1b, + /* These overlap and are distinguished by bit16 of the instruction */ + BC1F = 0x1c, + BC1T = 0x1d, + BC1ANY2F = 0x1c, + BC1ANY2T = 0x1d, + BC1ANY4F = 0x1e, + BC1ANY4T = 0x1f +}; + +/* POOL16A encoding of minor opcode field */ + +enum { + ADDU16 = 0x0, + SUBU16 = 0x1 +}; + +/* POOL16B encoding of minor opcode field */ + +enum { + SLL16 = 0x0, + SRL16 = 0x1 +}; + +/* POOL16C encoding of minor opcode field */ + +enum { + NOT16 = 0x00, + XOR16 = 0x04, + AND16 = 0x08, + OR16 = 0x0c, + LWM16 = 0x10, + SWM16 = 0x14, + JR16 = 0x18, + JRC16 = 0x1a, + JALR16 = 0x1c, + JALR16S = 0x1e, + MFHI16 = 0x20, + MFLO16 = 0x24, + BREAK16 = 0x28, + SDBBP16 = 0x2c, + JRADDIUSP = 0x30 +}; + +/* R6 POOL16C encoding of minor opcode field (bits 0..5) */ + +enum { + R6_NOT16 = 0x00, + R6_AND16 = 0x01, + R6_LWM16 = 0x02, + R6_JRC16 = 0x03, + MOVEP = 0x04, + MOVEP_05 = 0x05, + MOVEP_06 = 0x06, + MOVEP_07 = 0x07, + R6_XOR16 = 0x08, + R6_OR16 = 0x09, + R6_SWM16 = 0x0a, + JALRC16 = 0x0b, + MOVEP_0C = 0x0c, + MOVEP_0D = 0x0d, + MOVEP_0E = 0x0e, + MOVEP_0F = 0x0f, + JRCADDIUSP = 0x13, + R6_BREAK16 = 0x1b, + R6_SDBBP16 = 0x3b +}; + +/* POOL16D encoding of minor opcode field */ + +enum { + ADDIUS5 = 0x0, + ADDIUSP = 0x1 +}; + +/* POOL16E encoding of minor opcode field */ + +enum { + ADDIUR2 = 0x0, + ADDIUR1SP = 0x1 +}; + +static int mmreg(int r) +{ + static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + + return map[r]; +} + +/* Used for 16-bit store instructions. */ +static int mmreg2(int r) +{ + static const int map[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; + + return map[r]; +} + +#define uMIPS_RD(op) ((op >> 7) & 0x7) +#define uMIPS_RS(op) ((op >> 4) & 0x7) +#define uMIPS_RS2(op) uMIPS_RS(op) +#define uMIPS_RS1(op) ((op >> 1) & 0x7) +#define uMIPS_RD5(op) ((op >> 5) & 0x1f) +#define uMIPS_RS5(op) (op & 0x1f) + +/* Signed immediate */ +#define SIMM(op, start, width) \ + ((int32_t)(((op >> start) & ((~0U) >> (32 - width))) \ + << (32 - width)) \ + >> (32 - width)) +/* Zero-extended immediate */ +#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32 - width))) + +static void gen_addiur1sp(DisasContext *ctx) +{ + int rd = mmreg(uMIPS_RD(ctx->opcode)); + + gen_arith_imm(ctx, OPC_ADDIU, rd, 29, ((ctx->opcode >> 1) & 0x3f) << 2); +} + +static void gen_addiur2(DisasContext *ctx) +{ + static const int decoded_imm[] = { 1, 4, 8, 12, 16, 20, 24, -1 }; + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs = mmreg(uMIPS_RS(ctx->opcode)); + + gen_arith_imm(ctx, OPC_ADDIU, rd, rs, decoded_imm[ZIMM(ctx->opcode, 1, 3)]); +} + +static void gen_addiusp(DisasContext *ctx) +{ + int encoded = ZIMM(ctx->opcode, 1, 9); + int decoded; + + if (encoded <= 1) { + decoded = 256 + encoded; + } else if (encoded <= 255) { + decoded = encoded; + } else if (encoded <= 509) { + decoded = encoded - 512; + } else { + decoded = encoded - 768; + } + + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, decoded << 2); +} + +static void gen_addius5(DisasContext *ctx) +{ + int imm = SIMM(ctx->opcode, 1, 4); + int rd = (ctx->opcode >> 5) & 0x1f; + + gen_arith_imm(ctx, OPC_ADDIU, rd, rd, imm); +} + +static void gen_andi16(DisasContext *ctx) +{ + static const int decoded_imm[] = { 128, 1, 2, 3, 4, 7, 8, 15, 16, + 31, 32, 63, 64, 255, 32768, 65535 }; + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs = mmreg(uMIPS_RS(ctx->opcode)); + int encoded = ZIMM(ctx->opcode, 0, 4); + + gen_logic_imm(ctx, OPC_ANDI, rd, rs, decoded_imm[encoded]); +} + +static void gen_ldst_multiple(DisasContext *ctx, uint32_t opc, int reglist, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + TCGv_i32 t2; + + if (ctx->hflags & MIPS_HFLAG_BMASK) { + generate_exception_end(ctx, EXCP_RI); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, t0, base, offset); + + t1 = tcg_const_tl(tcg_ctx, reglist); + t2 = tcg_const_i32(tcg_ctx, ctx->mem_idx); + + save_cpu_state(ctx, 1); + switch (opc) { + case LWM32: + gen_helper_lwm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + break; + case SWM32: + gen_helper_swm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + break; +#ifdef TARGET_MIPS64 + case LDM: + gen_helper_ldm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + break; + case SDM: + gen_helper_sdm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); +} + + +static void gen_pool16c_insn(DisasContext *ctx) +{ + int rd = mmreg((ctx->opcode >> 3) & 0x7); + int rs = mmreg(ctx->opcode & 0x7); + + switch (((ctx->opcode) >> 4) & 0x3f) { + case NOT16 + 0: + case NOT16 + 1: + case NOT16 + 2: + case NOT16 + 3: + gen_logic(ctx, OPC_NOR, rd, rs, 0); + break; + case XOR16 + 0: + case XOR16 + 1: + case XOR16 + 2: + case XOR16 + 3: + gen_logic(ctx, OPC_XOR, rd, rd, rs); + break; + case AND16 + 0: + case AND16 + 1: + case AND16 + 2: + case AND16 + 3: + gen_logic(ctx, OPC_AND, rd, rd, rs); + break; + case OR16 + 0: + case OR16 + 1: + case OR16 + 2: + case OR16 + 3: + gen_logic(ctx, OPC_OR, rd, rd, rs); + break; + case LWM16 + 0: + case LWM16 + 1: + case LWM16 + 2: + case LWM16 + 3: + { + static const int lwm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; + int offset = ZIMM(ctx->opcode, 0, 4); + + gen_ldst_multiple(ctx, LWM32, lwm_convert[(ctx->opcode >> 4) & 0x3], + 29, offset << 2); + } + break; + case SWM16 + 0: + case SWM16 + 1: + case SWM16 + 2: + case SWM16 + 3: + { + static const int swm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; + int offset = ZIMM(ctx->opcode, 0, 4); + + gen_ldst_multiple(ctx, SWM32, swm_convert[(ctx->opcode >> 4) & 0x3], + 29, offset << 2); + } + break; + case JR16 + 0: + case JR16 + 1: + { + int reg = ctx->opcode & 0x1f; + + gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 4); + } + break; + case JRC16 + 0: + case JRC16 + 1: + { + int reg = ctx->opcode & 0x1f; + gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 0); + /* + * Let normal delay slot handling in our caller take us + * to the branch target. + */ + } + break; + case JALR16 + 0: + case JALR16 + 1: + gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case JALR16S + 0: + case JALR16S + 1: + gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case MFHI16 + 0: + case MFHI16 + 1: + gen_HILO(ctx, OPC_MFHI, 0, uMIPS_RS5(ctx->opcode)); + break; + case MFLO16 + 0: + case MFLO16 + 1: + gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode)); + break; + case BREAK16: + generate_exception_end(ctx, EXCP_BREAK); + break; + case SDBBP16: + if (is_uhi(extract32(ctx->opcode, 0, 4))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + /* + * XXX: not clear which exception should be raised + * when in debug mode... + */ + check_insn(ctx, ISA_MIPS32); + generate_exception_end(ctx, EXCP_DBp); + } + break; + case JRADDIUSP + 0: + case JRADDIUSP + 1: + { + int imm = ZIMM(ctx->opcode, 0, 5); + gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0); + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2); + /* + * Let normal delay slot handling in our caller take us + * to the branch target. + */ + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static inline void gen_movep(DisasContext *ctx, int enc_dest, int enc_rt, + int enc_rs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rd, rs, re, rt; + static const int rd_enc[] = { 5, 5, 6, 4, 4, 4, 4, 4 }; + static const int re_enc[] = { 6, 7, 7, 21, 22, 5, 6, 7 }; + static const int rs_rt_enc[] = { 0, 17, 2, 3, 16, 18, 19, 20 }; + rd = rd_enc[enc_dest]; + re = re_enc[enc_dest]; + rs = rs_rt_enc[enc_rs]; + rt = rs_rt_enc[enc_rt]; + if (rs) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } + if (rt) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[re], tcg_ctx->cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[re], 0); + } +} + +static void gen_pool16c_r6_insn(DisasContext *ctx) +{ + int rt = mmreg((ctx->opcode >> 7) & 0x7); + int rs = mmreg((ctx->opcode >> 4) & 0x7); + + switch (ctx->opcode & 0xf) { + case R6_NOT16: + gen_logic(ctx, OPC_NOR, rt, rs, 0); + break; + case R6_AND16: + gen_logic(ctx, OPC_AND, rt, rt, rs); + break; + case R6_LWM16: + { + int lwm_converted = 0x11 + extract32(ctx->opcode, 8, 2); + int offset = extract32(ctx->opcode, 4, 4); + gen_ldst_multiple(ctx, LWM32, lwm_converted, 29, offset << 2); + } + break; + case R6_JRC16: /* JRCADDIUSP */ + if ((ctx->opcode >> 4) & 1) { + /* JRCADDIUSP */ + int imm = extract32(ctx->opcode, 5, 5); + gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0); + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2); + } else { + /* JRC16 */ + rs = extract32(ctx->opcode, 5, 5); + gen_compute_branch(ctx, OPC_JR, 2, rs, 0, 0, 0); + } + break; + case MOVEP: + case MOVEP_05: + case MOVEP_06: + case MOVEP_07: + case MOVEP_0C: + case MOVEP_0D: + case MOVEP_0E: + case MOVEP_0F: + { + int enc_dest = uMIPS_RD(ctx->opcode); + int enc_rt = uMIPS_RS2(ctx->opcode); + int enc_rs = (ctx->opcode & 3) | ((ctx->opcode >> 1) & 4); + gen_movep(ctx, enc_dest, enc_rt, enc_rs); + } + break; + case R6_XOR16: + gen_logic(ctx, OPC_XOR, rt, rt, rs); + break; + case R6_OR16: + gen_logic(ctx, OPC_OR, rt, rt, rs); + break; + case R6_SWM16: + { + int swm_converted = 0x11 + extract32(ctx->opcode, 8, 2); + int offset = extract32(ctx->opcode, 4, 4); + gen_ldst_multiple(ctx, SWM32, swm_converted, 29, offset << 2); + } + break; + case JALRC16: /* BREAK16, SDBBP16 */ + switch (ctx->opcode & 0x3f) { + case JALRC16: + case JALRC16 + 0x20: + /* JALRC16 */ + gen_compute_branch(ctx, OPC_JALR, 2, (ctx->opcode >> 5) & 0x1f, + 31, 0, 0); + break; + case R6_BREAK16: + /* BREAK16 */ + generate_exception(ctx, EXCP_BREAK); + break; + case R6_SDBBP16: + /* SDBBP16 */ + if (is_uhi(extract32(ctx->opcode, 6, 4))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + if (ctx->hflags & MIPS_HFLAG_SBRI) { + generate_exception(ctx, EXCP_RI); + } else { + generate_exception(ctx, EXCP_DBp); + } + } + break; + } + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void gen_ldxs(DisasContext *ctx, int base, int index, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, base); + + if (index != 0) { + gen_load_gpr(tcg_ctx, t1, index); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 2); + gen_op_addr_add(ctx, t0, t1, t0); + } + + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t1, rd); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + if (ctx->hflags & MIPS_HFLAG_BMASK || rd == 31) { + generate_exception_end(ctx, EXCP_RI); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, t0, base, offset); + + switch (opc) { + case LWP: + if (rd == base) { + generate_exception_end(ctx, EXCP_RI); + return; + } + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t1, rd); + tcg_gen_movi_tl(tcg_ctx, t1, 4); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t1, rd + 1); + break; + case SWP: + gen_load_gpr(tcg_ctx, t1, rd); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_movi_tl(tcg_ctx, t1, 4); + gen_op_addr_add(ctx, t0, t0, t1); + gen_load_gpr(tcg_ctx, t1, rd + 1); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); + break; +#ifdef TARGET_MIPS64 + case LDP: + if (rd == base) { + generate_exception_end(ctx, EXCP_RI); + return; + } + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t1, rd); + tcg_gen_movi_tl(tcg_ctx, t1, 8); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t1, rd + 1); + break; + case SDP: + gen_load_gpr(tcg_ctx, t1, rd); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_movi_tl(tcg_ctx, t1, 8); + gen_op_addr_add(ctx, t0, t0, t1); + gen_load_gpr(tcg_ctx, t1, rd + 1); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEQ); + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_sync(TCGContext *tcg_ctx, int stype) +{ + TCGBar tcg_mo = TCG_BAR_SC; + + switch (stype) { + case 0x4: /* SYNC_WMB */ + tcg_mo |= TCG_MO_ST_ST; + break; + case 0x10: /* SYNC_MB */ + tcg_mo |= TCG_MO_ALL; + break; + case 0x11: /* SYNC_ACQUIRE */ + tcg_mo |= TCG_MO_LD_LD | TCG_MO_LD_ST; + break; + case 0x12: /* SYNC_RELEASE */ + tcg_mo |= TCG_MO_ST_ST | TCG_MO_LD_ST; + break; + case 0x13: /* SYNC_RMB */ + tcg_mo |= TCG_MO_LD_LD; + break; + default: + tcg_mo |= TCG_MO_ALL; + break; + } + + tcg_gen_mb(tcg_ctx, tcg_mo); +} + +static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int extension = (ctx->opcode >> 6) & 0x3f; + int minor = (ctx->opcode >> 12) & 0xf; + uint32_t mips32_op; + + switch (extension) { + case TEQ: + mips32_op = OPC_TEQ; + goto do_trap; + case TGE: + mips32_op = OPC_TGE; + goto do_trap; + case TGEU: + mips32_op = OPC_TGEU; + goto do_trap; + case TLT: + mips32_op = OPC_TLT; + goto do_trap; + case TLTU: + mips32_op = OPC_TLTU; + goto do_trap; + case TNE: + mips32_op = OPC_TNE; + do_trap: + gen_trap(ctx, mips32_op, rs, rt, -1); + break; + case MFC0: + case MFC0 + 32: + check_cp0_enabled(ctx); + if (rt == 0) { + /* Treat as NOP. */ + break; + } + gen_mfc0(ctx, tcg_ctx->cpu_gpr[rt], rs, (ctx->opcode >> 11) & 0x7); + break; + case MTC0: + case MTC0 + 32: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7); + tcg_temp_free(tcg_ctx, t0); + } + break; + case 0x2a: + switch (minor & 3) { + case MADD_ACC: + gen_muldiv(ctx, OPC_MADD, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MADDU_ACC: + gen_muldiv(ctx, OPC_MADDU, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MSUB_ACC: + gen_muldiv(ctx, OPC_MSUB, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MSUBU_ACC: + gen_muldiv(ctx, OPC_MSUBU, (ctx->opcode >> 14) & 3, rs, rt); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x32: + switch (minor & 3) { + case MULT_ACC: + gen_muldiv(ctx, OPC_MULT, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MULTU_ACC: + gen_muldiv(ctx, OPC_MULTU, (ctx->opcode >> 14) & 3, rs, rt); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x2c: + switch (minor) { + case BITSWAP: + check_insn(ctx, ISA_MIPS32R6); + gen_bitswap(ctx, OPC_BITSWAP, rs, rt); + break; + case SEB: + gen_bshfl(ctx, OPC_SEB, rs, rt); + break; + case SEH: + gen_bshfl(ctx, OPC_SEH, rs, rt); + break; + case CLO: + mips32_op = OPC_CLO; + goto do_cl; + case CLZ: + mips32_op = OPC_CLZ; + do_cl: + check_insn(ctx, ISA_MIPS32); + gen_cl(ctx, mips32_op, rt, rs); + break; + case RDHWR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_rdhwr(ctx, rt, rs, 0); + break; + case WSBH: + gen_bshfl(ctx, OPC_WSBH, rs, rt); + break; + case MULT: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MULT; + goto do_mul; + case MULTU: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MULTU; + goto do_mul; + case DIV: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_DIV; + goto do_div; + case DIVU: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_DIVU; + goto do_div; + do_div: + check_insn(ctx, ISA_MIPS32); + gen_muldiv(ctx, mips32_op, 0, rs, rt); + break; + case MADD: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MADD; + goto do_mul; + case MADDU: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MADDU; + goto do_mul; + case MSUB: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MSUB; + goto do_mul; + case MSUBU: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MSUBU; + do_mul: + check_insn(ctx, ISA_MIPS32); + gen_muldiv(ctx, mips32_op, 0, rs, rt); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x34: + switch (minor) { + case MFC2: + case MTC2: + case MFHC2: + case MTHC2: + case CFC2: + case CTC2: + generate_exception_err(ctx, EXCP_CpU, 2); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x3c: + switch (minor) { + case JALR: /* JALRC */ + case JALR_HB: /* JALRC_HB */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* JALRC, JALRC_HB */ + gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 0); + } else { + /* JALR, JALR_HB */ + gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + } + break; + case JALRS: + case JALRS_HB: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + default: + goto pool32axf_invalid; + } + break; + case 0x05: + switch (minor) { + case RDPGPR: + check_cp0_enabled(ctx); + check_insn(ctx, ISA_MIPS32R2); + gen_load_srsgpr(tcg_ctx, rs, rt); + break; + case WRPGPR: + check_cp0_enabled(ctx); + check_insn(ctx, ISA_MIPS32R2); + gen_store_srsgpr(tcg_ctx, rs, rt); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x0d: + switch (minor) { + case TLBP: + mips32_op = OPC_TLBP; + goto do_cp0; + case TLBR: + mips32_op = OPC_TLBR; + goto do_cp0; + case TLBWI: + mips32_op = OPC_TLBWI; + goto do_cp0; + case TLBWR: + mips32_op = OPC_TLBWR; + goto do_cp0; + case TLBINV: + mips32_op = OPC_TLBINV; + goto do_cp0; + case TLBINVF: + mips32_op = OPC_TLBINVF; + goto do_cp0; + case WAIT: + mips32_op = OPC_WAIT; + goto do_cp0; + case DERET: + mips32_op = OPC_DERET; + goto do_cp0; + case ERET: + mips32_op = OPC_ERET; + do_cp0: + gen_cp0(env, ctx, mips32_op, rt, rs); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x1d: + switch (minor) { + case DI: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + save_cpu_state(ctx, 1); + gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rs); + /* + * Stop translation as we may have switched the execution + * mode. + */ + ctx->base.is_jmp = DISAS_STOP; + tcg_temp_free(tcg_ctx, t0); + } + break; + case EI: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + save_cpu_state(ctx, 1); + gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rs); + /* + * DISAS_STOP isn't sufficient, we need to ensure we break out + * of translated code to check for pending interrupts. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + tcg_temp_free(tcg_ctx, t0); + } + break; + default: + goto pool32axf_invalid; + } + break; + case 0x2d: + switch (minor) { + case SYNC: + gen_sync(tcg_ctx, extract32(ctx->opcode, 16, 5)); + break; + case SYSCALL: + generate_exception_end(ctx, EXCP_SYSCALL); + break; + case SDBBP: + if (is_uhi(extract32(ctx->opcode, 16, 10))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + check_insn(ctx, ISA_MIPS32); + if (ctx->hflags & MIPS_HFLAG_SBRI) { + generate_exception_end(ctx, EXCP_RI); + } else { + generate_exception_end(ctx, EXCP_DBp); + } + } + break; + default: + goto pool32axf_invalid; + } + break; + case 0x01: + switch (minor & 3) { + case MFHI_ACC: + gen_HILO(ctx, OPC_MFHI, minor >> 2, rs); + break; + case MFLO_ACC: + gen_HILO(ctx, OPC_MFLO, minor >> 2, rs); + break; + case MTHI_ACC: + gen_HILO(ctx, OPC_MTHI, minor >> 2, rs); + break; + case MTLO_ACC: + gen_HILO(ctx, OPC_MTLO, minor >> 2, rs); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x35: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + switch (minor) { + case MFHI32: + gen_HILO(ctx, OPC_MFHI, 0, rs); + break; + case MFLO32: + gen_HILO(ctx, OPC_MFLO, 0, rs); + break; + case MTHI32: + gen_HILO(ctx, OPC_MTHI, 0, rs); + break; + case MTLO32: + gen_HILO(ctx, OPC_MTLO, 0, rs); + break; + default: + goto pool32axf_invalid; + } + break; + default: + pool32axf_invalid: + MIPS_INVAL("pool32axf"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * Values for microMIPS fmt field. Variable-width, depending on which + * formats the instruction supports. + */ +enum { + FMT_SD_S = 0, + FMT_SD_D = 1, + + FMT_SDPS_S = 0, + FMT_SDPS_D = 1, + FMT_SDPS_PS = 2, + + FMT_SWL_S = 0, + FMT_SWL_W = 1, + FMT_SWL_L = 2, + + FMT_DWL_D = 0, + FMT_DWL_W = 1, + FMT_DWL_L = 2 +}; + +static void gen_pool32fxf(DisasContext *ctx, int rt, int rs) +{ + int extension = (ctx->opcode >> 6) & 0x3ff; + uint32_t mips32_op; + +#define FLOAT_1BIT_FMT(opc, fmt) ((fmt << 8) | opc) +#define FLOAT_2BIT_FMT(opc, fmt) ((fmt << 7) | opc) +#define COND_FLOAT_MOV(opc, cond) ((cond << 7) | opc) + + switch (extension) { + case FLOAT_1BIT_FMT(CFC1, 0): + mips32_op = OPC_CFC1; + goto do_cp1; + case FLOAT_1BIT_FMT(CTC1, 0): + mips32_op = OPC_CTC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MFC1, 0): + mips32_op = OPC_MFC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MTC1, 0): + mips32_op = OPC_MTC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MFHC1, 0): + mips32_op = OPC_MFHC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MTHC1, 0): + mips32_op = OPC_MTHC1; + do_cp1: + gen_cp1(ctx, mips32_op, rt, rs); + break; + + /* Reciprocal square root */ + case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_S): + mips32_op = OPC_RSQRT_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_D): + mips32_op = OPC_RSQRT_D; + goto do_unaryfp; + + /* Square root */ + case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_S): + mips32_op = OPC_SQRT_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_D): + mips32_op = OPC_SQRT_D; + goto do_unaryfp; + + /* Reciprocal */ + case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_S): + mips32_op = OPC_RECIP_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_D): + mips32_op = OPC_RECIP_D; + goto do_unaryfp; + + /* Floor */ + case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_S): + mips32_op = OPC_FLOOR_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_D): + mips32_op = OPC_FLOOR_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_S): + mips32_op = OPC_FLOOR_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_D): + mips32_op = OPC_FLOOR_W_D; + goto do_unaryfp; + + /* Ceiling */ + case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_S): + mips32_op = OPC_CEIL_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_D): + mips32_op = OPC_CEIL_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_S): + mips32_op = OPC_CEIL_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_D): + mips32_op = OPC_CEIL_W_D; + goto do_unaryfp; + + /* Truncation */ + case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_S): + mips32_op = OPC_TRUNC_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_D): + mips32_op = OPC_TRUNC_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_S): + mips32_op = OPC_TRUNC_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_D): + mips32_op = OPC_TRUNC_W_D; + goto do_unaryfp; + + /* Round */ + case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_S): + mips32_op = OPC_ROUND_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_D): + mips32_op = OPC_ROUND_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_S): + mips32_op = OPC_ROUND_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_D): + mips32_op = OPC_ROUND_W_D; + goto do_unaryfp; + + /* Integer to floating-point conversion */ + case FLOAT_1BIT_FMT(CVT_L, FMT_SD_S): + mips32_op = OPC_CVT_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_L, FMT_SD_D): + mips32_op = OPC_CVT_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_W, FMT_SD_S): + mips32_op = OPC_CVT_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_W, FMT_SD_D): + mips32_op = OPC_CVT_W_D; + goto do_unaryfp; + + /* Paired-foo conversions */ + case FLOAT_1BIT_FMT(CVT_S_PL, 0): + mips32_op = OPC_CVT_S_PL; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_S_PU, 0): + mips32_op = OPC_CVT_S_PU; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_PW_PS, 0): + mips32_op = OPC_CVT_PW_PS; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_PS_PW, 0): + mips32_op = OPC_CVT_PS_PW; + goto do_unaryfp; + + /* Floating-point moves */ + case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_S): + mips32_op = OPC_MOV_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_D): + mips32_op = OPC_MOV_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_PS): + mips32_op = OPC_MOV_PS; + goto do_unaryfp; + + /* Absolute value */ + case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_S): + mips32_op = OPC_ABS_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_D): + mips32_op = OPC_ABS_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_PS): + mips32_op = OPC_ABS_PS; + goto do_unaryfp; + + /* Negation */ + case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_S): + mips32_op = OPC_NEG_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_D): + mips32_op = OPC_NEG_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_PS): + mips32_op = OPC_NEG_PS; + goto do_unaryfp; + + /* Reciprocal square root step */ + case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_S): + mips32_op = OPC_RSQRT1_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_D): + mips32_op = OPC_RSQRT1_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_PS): + mips32_op = OPC_RSQRT1_PS; + goto do_unaryfp; + + /* Reciprocal step */ + case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_S): + mips32_op = OPC_RECIP1_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_D): + mips32_op = OPC_RECIP1_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_PS): + mips32_op = OPC_RECIP1_PS; + goto do_unaryfp; + + /* Conversions from double */ + case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_S): + mips32_op = OPC_CVT_D_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_W): + mips32_op = OPC_CVT_D_W; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_L): + mips32_op = OPC_CVT_D_L; + goto do_unaryfp; + + /* Conversions from single */ + case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_D): + mips32_op = OPC_CVT_S_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_W): + mips32_op = OPC_CVT_S_W; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_L): + mips32_op = OPC_CVT_S_L; + do_unaryfp: + gen_farith(ctx, mips32_op, -1, rs, rt, 0); + break; + + /* Conditional moves on floating-point codes */ + case COND_FLOAT_MOV(MOVT, 0): + case COND_FLOAT_MOV(MOVT, 1): + case COND_FLOAT_MOV(MOVT, 2): + case COND_FLOAT_MOV(MOVT, 3): + case COND_FLOAT_MOV(MOVT, 4): + case COND_FLOAT_MOV(MOVT, 5): + case COND_FLOAT_MOV(MOVT, 6): + case COND_FLOAT_MOV(MOVT, 7): + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 1); + break; + case COND_FLOAT_MOV(MOVF, 0): + case COND_FLOAT_MOV(MOVF, 1): + case COND_FLOAT_MOV(MOVF, 2): + case COND_FLOAT_MOV(MOVF, 3): + case COND_FLOAT_MOV(MOVF, 4): + case COND_FLOAT_MOV(MOVF, 5): + case COND_FLOAT_MOV(MOVF, 6): + case COND_FLOAT_MOV(MOVF, 7): + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 0); + break; + default: + MIPS_INVAL("pool32fxf"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) +{ + int32_t offset; + uint16_t insn; + int rt, rs, rd, rr; + int16_t imm; + uint32_t op, minor, minor2, mips32_op; + uint32_t cond, fmt, cc; + + insn = cpu_lduw_code(env, ctx->base.pc_next + 2); + ctx->opcode = (ctx->opcode << 16) | insn; + + rt = (ctx->opcode >> 21) & 0x1f; + rs = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + rr = (ctx->opcode >> 6) & 0x1f; + imm = (int16_t) ctx->opcode; + + op = (ctx->opcode >> 26) & 0x3f; + switch (op) { + case POOL32A: + minor = ctx->opcode & 0x3f; + switch (minor) { + case 0x00: + minor = (ctx->opcode >> 6) & 0xf; + switch (minor) { + case SLL32: + mips32_op = OPC_SLL; + goto do_shifti; + case SRA: + mips32_op = OPC_SRA; + goto do_shifti; + case SRL32: + mips32_op = OPC_SRL; + goto do_shifti; + case ROTR: + mips32_op = OPC_ROTR; + do_shifti: + gen_shift_imm(ctx, mips32_op, rt, rs, rd); + break; + case SELEQZ: + check_insn(ctx, ISA_MIPS32R6); + gen_cond_move(ctx, OPC_SELEQZ, rd, rs, rt); + break; + case SELNEZ: + check_insn(ctx, ISA_MIPS32R6); + gen_cond_move(ctx, OPC_SELNEZ, rd, rs, rt); + break; + case R6_RDHWR: + check_insn(ctx, ISA_MIPS32R6); + gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3)); + break; + default: + goto pool32a_invalid; + } + break; + case 0x10: + minor = (ctx->opcode >> 6) & 0xf; + switch (minor) { + /* Arithmetic */ + case ADD: + mips32_op = OPC_ADD; + goto do_arith; + case ADDU32: + mips32_op = OPC_ADDU; + goto do_arith; + case SUB: + mips32_op = OPC_SUB; + goto do_arith; + case SUBU32: + mips32_op = OPC_SUBU; + goto do_arith; + case MUL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MUL; + do_arith: + gen_arith(ctx, mips32_op, rd, rs, rt); + break; + /* Shifts */ + case SLLV: + mips32_op = OPC_SLLV; + goto do_shift; + case SRLV: + mips32_op = OPC_SRLV; + goto do_shift; + case SRAV: + mips32_op = OPC_SRAV; + goto do_shift; + case ROTRV: + mips32_op = OPC_ROTRV; + do_shift: + gen_shift(ctx, mips32_op, rd, rs, rt); + break; + /* Logical operations */ + case AND: + mips32_op = OPC_AND; + goto do_logic; + case OR32: + mips32_op = OPC_OR; + goto do_logic; + case NOR: + mips32_op = OPC_NOR; + goto do_logic; + case XOR32: + mips32_op = OPC_XOR; + do_logic: + gen_logic(ctx, mips32_op, rd, rs, rt); + break; + /* Set less than */ + case SLT: + mips32_op = OPC_SLT; + goto do_slt; + case SLTU: + mips32_op = OPC_SLTU; + do_slt: + gen_slt(ctx, mips32_op, rd, rs, rt); + break; + default: + goto pool32a_invalid; + } + break; + case 0x18: + minor = (ctx->opcode >> 6) & 0xf; + switch (minor) { + /* Conditional moves */ + case MOVN: /* MUL */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* MUL */ + gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt); + } else { + /* MOVN */ + gen_cond_move(ctx, OPC_MOVN, rd, rs, rt); + } + break; + case MOVZ: /* MUH */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* MUH */ + gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt); + } else { + /* MOVZ */ + gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt); + } + break; + case MULU: + check_insn(ctx, ISA_MIPS32R6); + gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt); + break; + case MUHU: + check_insn(ctx, ISA_MIPS32R6); + gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt); + break; + case LWXS: /* DIV */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* DIV */ + gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt); + } else { + /* LWXS */ + gen_ldxs(ctx, rs, rt, rd); + } + break; + case MOD: + check_insn(ctx, ISA_MIPS32R6); + gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt); + break; + case R6_DIVU: + check_insn(ctx, ISA_MIPS32R6); + gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt); + break; + case MODU: + check_insn(ctx, ISA_MIPS32R6); + gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt); + break; + default: + goto pool32a_invalid; + } + break; + case INS: + gen_bitops(ctx, OPC_INS, rt, rs, rr, rd); + return; + case LSA: + check_insn(ctx, ISA_MIPS32R6); + gen_lsa(ctx, OPC_LSA, rd, rs, rt, + extract32(ctx->opcode, 9, 2)); + break; + case ALIGN: + check_insn(ctx, ISA_MIPS32R6); + gen_align(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 9, 2)); + break; + case EXT: + gen_bitops(ctx, OPC_EXT, rt, rs, rr, rd); + return; + case POOL32AXF: + gen_pool32axf(env, ctx, rt, rs); + break; + case BREAK32: + generate_exception_end(ctx, EXCP_BREAK); + break; + case SIGRIE: + check_insn(ctx, ISA_MIPS32R6); + generate_exception_end(ctx, EXCP_RI); + break; + default: + pool32a_invalid: + MIPS_INVAL("pool32a"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case POOL32B: + minor = (ctx->opcode >> 12) & 0xf; + switch (minor) { + case CACHE: + check_cp0_enabled(ctx); + if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { + gen_cache_operation(ctx, rt, rs, imm); + } + break; + case LWC2: + case SWC2: + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + break; +#ifdef TARGET_MIPS64 + case LDP: + case SDP: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); +#endif + /* fall through */ + case LWP: + case SWP: + gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; +#ifdef TARGET_MIPS64 + case LDM: + case SDM: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); +#endif + /* fall through */ + case LWM32: + case SWM32: + gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; + default: + MIPS_INVAL("pool32b"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case POOL32F: + if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { + minor = ctx->opcode & 0x3f; + check_cp1_enabled(ctx); + switch (minor) { + case ALNV_PS: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_ALNV_PS; + goto do_madd; + case MADD_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MADD_S; + goto do_madd; + case MADD_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MADD_D; + goto do_madd; + case MADD_PS: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MADD_PS; + goto do_madd; + case MSUB_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MSUB_S; + goto do_madd; + case MSUB_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MSUB_D; + goto do_madd; + case MSUB_PS: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_MSUB_PS; + goto do_madd; + case NMADD_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_NMADD_S; + goto do_madd; + case NMADD_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_NMADD_D; + goto do_madd; + case NMADD_PS: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_NMADD_PS; + goto do_madd; + case NMSUB_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_NMSUB_S; + goto do_madd; + case NMSUB_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_NMSUB_D; + goto do_madd; + case NMSUB_PS: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_NMSUB_PS; + do_madd: + gen_flt3_arith(ctx, mips32_op, rd, rr, rs, rt); + break; + case CABS_COND_FMT: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + cond = (ctx->opcode >> 6) & 0xf; + cc = (ctx->opcode >> 13) & 0x7; + fmt = (ctx->opcode >> 10) & 0x3; + switch (fmt) { + case 0x0: + gen_cmpabs_s(ctx, cond, rt, rs, cc); + break; + case 0x1: + gen_cmpabs_d(ctx, cond, rt, rs, cc); + break; + case 0x2: + gen_cmpabs_ps(ctx, cond, rt, rs, cc); + break; + default: + goto pool32f_invalid; + } + break; + case C_COND_FMT: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + cond = (ctx->opcode >> 6) & 0xf; + cc = (ctx->opcode >> 13) & 0x7; + fmt = (ctx->opcode >> 10) & 0x3; + switch (fmt) { + case 0x0: + gen_cmp_s(ctx, cond, rt, rs, cc); + break; + case 0x1: + gen_cmp_d(ctx, cond, rt, rs, cc); + break; + case 0x2: + gen_cmp_ps(ctx, cond, rt, rs, cc); + break; + default: + goto pool32f_invalid; + } + break; + case CMP_CONDN_S: + check_insn(ctx, ISA_MIPS32R6); + gen_r6_cmp_s(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd); + break; + case CMP_CONDN_D: + check_insn(ctx, ISA_MIPS32R6); + gen_r6_cmp_d(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd); + break; + case POOL32FXF: + gen_pool32fxf(ctx, rt, rs); + break; + case 0x00: + /* PLL foo */ + switch ((ctx->opcode >> 6) & 0x7) { + case PLL_PS: + mips32_op = OPC_PLL_PS; + goto do_ps; + case PLU_PS: + mips32_op = OPC_PLU_PS; + goto do_ps; + case PUL_PS: + mips32_op = OPC_PUL_PS; + goto do_ps; + case PUU_PS: + mips32_op = OPC_PUU_PS; + goto do_ps; + case CVT_PS_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_CVT_PS_S; + do_ps: + gen_farith(ctx, mips32_op, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case MIN_FMT: + check_insn(ctx, ISA_MIPS32R6); + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case 0x08: + /* [LS][WDU]XC1 */ + switch ((ctx->opcode >> 6) & 0x7) { + case LWXC1: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LWXC1; + goto do_ldst_cp1; + case SWXC1: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SWXC1; + goto do_ldst_cp1; + case LDXC1: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LDXC1; + goto do_ldst_cp1; + case SDXC1: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SDXC1; + goto do_ldst_cp1; + case LUXC1: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LUXC1; + goto do_ldst_cp1; + case SUXC1: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SUXC1; + do_ldst_cp1: + gen_flt3_ldst(ctx, mips32_op, rd, rd, rt, rs); + break; + default: + goto pool32f_invalid; + } + break; + case MAX_FMT: + check_insn(ctx, ISA_MIPS32R6); + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case 0x18: + /* 3D insns */ + check_insn_opc_removed(ctx, ISA_MIPS32R6); + fmt = (ctx->opcode >> 9) & 0x3; + switch ((ctx->opcode >> 6) & 0x7) { + case RSQRT2_FMT: + switch (fmt) { + case FMT_SDPS_S: + mips32_op = OPC_RSQRT2_S; + goto do_3d; + case FMT_SDPS_D: + mips32_op = OPC_RSQRT2_D; + goto do_3d; + case FMT_SDPS_PS: + mips32_op = OPC_RSQRT2_PS; + goto do_3d; + default: + goto pool32f_invalid; + } + break; + case RECIP2_FMT: + switch (fmt) { + case FMT_SDPS_S: + mips32_op = OPC_RECIP2_S; + goto do_3d; + case FMT_SDPS_D: + mips32_op = OPC_RECIP2_D; + goto do_3d; + case FMT_SDPS_PS: + mips32_op = OPC_RECIP2_PS; + goto do_3d; + default: + goto pool32f_invalid; + } + break; + case ADDR_PS: + mips32_op = OPC_ADDR_PS; + goto do_3d; + case MULR_PS: + mips32_op = OPC_MULR_PS; + do_3d: + gen_farith(ctx, mips32_op, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case 0x20: + /* MOV[FT].fmt, PREFX, RINT.fmt, CLASS.fmt*/ + cc = (ctx->opcode >> 13) & 0x7; + fmt = (ctx->opcode >> 9) & 0x3; + switch ((ctx->opcode >> 6) & 0x7) { + case MOVF_FMT: /* RINT_FMT */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* RINT_FMT */ + switch (fmt) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0); + break; + default: + goto pool32f_invalid; + } + } else { + /* MOVF_FMT */ + switch (fmt) { + case FMT_SDPS_S: + gen_movcf_s(ctx, rs, rt, cc, 0); + break; + case FMT_SDPS_D: + gen_movcf_d(ctx, rs, rt, cc, 0); + break; + case FMT_SDPS_PS: + check_ps(ctx); + gen_movcf_ps(ctx, rs, rt, cc, 0); + break; + default: + goto pool32f_invalid; + } + } + break; + case MOVT_FMT: /* CLASS_FMT */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* CLASS_FMT */ + switch (fmt) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0); + break; + default: + goto pool32f_invalid; + } + } else { + /* MOVT_FMT */ + switch (fmt) { + case FMT_SDPS_S: + gen_movcf_s(ctx, rs, rt, cc, 1); + break; + case FMT_SDPS_D: + gen_movcf_d(ctx, rs, rt, cc, 1); + break; + case FMT_SDPS_PS: + check_ps(ctx); + gen_movcf_ps(ctx, rs, rt, cc, 1); + break; + default: + goto pool32f_invalid; + } + } + break; + case PREFX: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + break; + default: + goto pool32f_invalid; + } + break; +#define FINSN_3ARG_SDPS(prfx) \ + switch ((ctx->opcode >> 8) & 0x3) { \ + case FMT_SDPS_S: \ + mips32_op = OPC_##prfx##_S; \ + goto do_fpop; \ + case FMT_SDPS_D: \ + mips32_op = OPC_##prfx##_D; \ + goto do_fpop; \ + case FMT_SDPS_PS: \ + check_ps(ctx); \ + mips32_op = OPC_##prfx##_PS; \ + goto do_fpop; \ + default: \ + goto pool32f_invalid; \ + } + case MINA_FMT: + check_insn(ctx, ISA_MIPS32R6); + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case MAXA_FMT: + check_insn(ctx, ISA_MIPS32R6); + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case 0x30: + /* regular FP ops */ + switch ((ctx->opcode >> 6) & 0x3) { + case ADD_FMT: + FINSN_3ARG_SDPS(ADD); + break; + case SUB_FMT: + FINSN_3ARG_SDPS(SUB); + break; + case MUL_FMT: + FINSN_3ARG_SDPS(MUL); + break; + case DIV_FMT: + fmt = (ctx->opcode >> 8) & 0x3; + if (fmt == 1) { + mips32_op = OPC_DIV_D; + } else if (fmt == 0) { + mips32_op = OPC_DIV_S; + } else { + goto pool32f_invalid; + } + goto do_fpop; + default: + goto pool32f_invalid; + } + break; + case 0x38: + /* cmovs */ + switch ((ctx->opcode >> 6) & 0x7) { + case MOVN_FMT: /* SELEQZ_FMT */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* SELEQZ_FMT */ + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs); + break; + case FMT_SDPS_D: + gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs); + break; + default: + goto pool32f_invalid; + } + } else { + /* MOVN_FMT */ + FINSN_3ARG_SDPS(MOVN); + } + break; + case MOVN_FMT_04: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + FINSN_3ARG_SDPS(MOVN); + break; + case MOVZ_FMT: /* SELNEZ_FMT */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* SELNEZ_FMT */ + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs); + break; + case FMT_SDPS_D: + gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs); + break; + default: + goto pool32f_invalid; + } + } else { + /* MOVZ_FMT */ + FINSN_3ARG_SDPS(MOVZ); + } + break; + case MOVZ_FMT_05: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + FINSN_3ARG_SDPS(MOVZ); + break; + case SEL_FMT: + check_insn(ctx, ISA_MIPS32R6); + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs); + break; + case FMT_SDPS_D: + gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs); + break; + default: + goto pool32f_invalid; + } + break; + case MADDF_FMT: + check_insn(ctx, ISA_MIPS32R6); + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + mips32_op = OPC_MADDF_S; + goto do_fpop; + case FMT_SDPS_D: + mips32_op = OPC_MADDF_D; + goto do_fpop; + default: + goto pool32f_invalid; + } + break; + case MSUBF_FMT: + check_insn(ctx, ISA_MIPS32R6); + switch ((ctx->opcode >> 9) & 0x3) { + case FMT_SDPS_S: + mips32_op = OPC_MSUBF_S; + goto do_fpop; + case FMT_SDPS_D: + mips32_op = OPC_MSUBF_D; + goto do_fpop; + default: + goto pool32f_invalid; + } + break; + default: + goto pool32f_invalid; + } + break; + do_fpop: + gen_farith(ctx, mips32_op, rt, rs, rd, 0); + break; + default: + pool32f_invalid: + MIPS_INVAL("pool32f"); + generate_exception_end(ctx, EXCP_RI); + break; + } + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + case POOL32I: + minor = (ctx->opcode >> 21) & 0x1f; + switch (minor) { + case BLTZ: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BLTZ, 4, rs, -1, imm << 1, 4); + break; + case BLTZAL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BLTZALS: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BGEZ: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BGEZ, 4, rs, -1, imm << 1, 4); + break; + case BGEZAL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BGEZALS: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BLEZ: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BLEZ, 4, rs, -1, imm << 1, 4); + break; + case BGTZ: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, OPC_BGTZ, 4, rs, -1, imm << 1, 4); + break; + + /* Traps */ + case TLTI: /* BC1EQZC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* BC1EQZC */ + check_cp1_enabled(ctx); + gen_compute_branch1_r6(ctx, OPC_BC1EQZ, rs, imm << 1, 0); + } else { + /* TLTI */ + mips32_op = OPC_TLTI; + goto do_trapi; + } + break; + case TGEI: /* BC1NEZC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* BC1NEZC */ + check_cp1_enabled(ctx); + gen_compute_branch1_r6(ctx, OPC_BC1NEZ, rs, imm << 1, 0); + } else { + /* TGEI */ + mips32_op = OPC_TGEI; + goto do_trapi; + } + break; + case TLTIU: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_TLTIU; + goto do_trapi; + case TGEIU: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_TGEIU; + goto do_trapi; + case TNEI: /* SYNCI */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* SYNCI */ + /* + * Break the TB to be able to sync copied instructions + * immediately. + */ + ctx->base.is_jmp = DISAS_STOP; + } else { + /* TNEI */ + mips32_op = OPC_TNEI; + goto do_trapi; + } + break; + case TEQI: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_TEQI; + do_trapi: + gen_trap(ctx, mips32_op, rs, -1, imm); + break; + + case BNEZC: + case BEQZC: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ, + 4, rs, 0, imm << 1, 0); + /* + * Compact branches don't have a delay slot, so just let + * the normal delay slot handling take us to the branch + * target. + */ + break; + case LUI: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_logic_imm(ctx, OPC_LUI, rs, 0, imm); + break; + case SYNCI: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* + * Break the TB to be able to sync copied instructions + * immediately. + */ + ctx->base.is_jmp = DISAS_STOP; + break; + case BC2F: + case BC2T: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + break; + case BC1F: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1FANY2 : OPC_BC1F; + goto do_cp1branch; + case BC1T: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1TANY2 : OPC_BC1T; + goto do_cp1branch; + case BC1ANY4F: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_BC1FANY4; + goto do_cp1mips3d; + case BC1ANY4T: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_BC1TANY4; + do_cp1mips3d: + check_cop1x(ctx); + check_insn(ctx, ASE_MIPS3D); + /* Fall through */ + do_cp1branch: + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + gen_compute_branch1(ctx, mips32_op, + (ctx->opcode >> 18) & 0x7, imm << 1); + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + case BPOSGE64: + case BPOSGE32: + /* MIPS DSP: not implemented */ + /* Fall through */ + default: + MIPS_INVAL("pool32i"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case POOL32C: + minor = (ctx->opcode >> 12) & 0xf; + offset = sextract32(ctx->opcode, 0, + (ctx->insn_flags & ISA_MIPS32R6) ? 9 : 12); + switch (minor) { + case LWL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LWL; + goto do_ld_lr; + case SWL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SWL; + goto do_st_lr; + case LWR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LWR; + goto do_ld_lr; + case SWR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SWR; + goto do_st_lr; +#if defined(TARGET_MIPS64) + case LDL: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LDL; + goto do_ld_lr; + case SDL: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SDL; + goto do_st_lr; + case LDR: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LDR; + goto do_ld_lr; + case SDR: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SDR; + goto do_st_lr; + case LWU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + mips32_op = OPC_LWU; + goto do_ld_lr; + case LLD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + mips32_op = OPC_LLD; + goto do_ld_lr; +#endif + case LL: + mips32_op = OPC_LL; + goto do_ld_lr; + do_ld_lr: + gen_ld(ctx, mips32_op, rt, rs, offset); + break; + do_st_lr: + gen_st(ctx, mips32_op, rt, rs, offset); + break; + case SC: + gen_st_cond(ctx, rt, rs, offset, MO_TESL, false); + break; +#if defined(TARGET_MIPS64) + case SCD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_st_cond(ctx, rt, rs, offset, MO_TEQ, false); + break; +#endif + case LD_EVA: + if (!ctx->eva) { + MIPS_INVAL("pool32c ld-eva"); + generate_exception_end(ctx, EXCP_RI); + break; + } + check_cp0_enabled(ctx); + + minor2 = (ctx->opcode >> 9) & 0x7; + offset = sextract32(ctx->opcode, 0, 9); + switch (minor2) { + case LBUE: + mips32_op = OPC_LBUE; + goto do_ld_lr; + case LHUE: + mips32_op = OPC_LHUE; + goto do_ld_lr; + case LWLE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LWLE; + goto do_ld_lr; + case LWRE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_LWRE; + goto do_ld_lr; + case LBE: + mips32_op = OPC_LBE; + goto do_ld_lr; + case LHE: + mips32_op = OPC_LHE; + goto do_ld_lr; + case LLE: + mips32_op = OPC_LLE; + goto do_ld_lr; + case LWE: + mips32_op = OPC_LWE; + goto do_ld_lr; + }; + break; + case ST_EVA: + if (!ctx->eva) { + MIPS_INVAL("pool32c st-eva"); + generate_exception_end(ctx, EXCP_RI); + break; + } + check_cp0_enabled(ctx); + + minor2 = (ctx->opcode >> 9) & 0x7; + offset = sextract32(ctx->opcode, 0, 9); + switch (minor2) { + case SWLE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SWLE; + goto do_st_lr; + case SWRE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + mips32_op = OPC_SWRE; + goto do_st_lr; + case PREFE: + /* Treat as no-op */ + if ((ctx->insn_flags & ISA_MIPS32R6) && (rt >= 24)) { + /* hint codes 24-31 are reserved and signal RI */ + generate_exception(ctx, EXCP_RI); + } + break; + case CACHEE: + /* Treat as no-op */ + if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { + gen_cache_operation(ctx, rt, rs, offset); + } + break; + case SBE: + mips32_op = OPC_SBE; + goto do_st_lr; + case SHE: + mips32_op = OPC_SHE; + goto do_st_lr; + case SCE: + gen_st_cond(ctx, rt, rs, offset, MO_TESL, true); + break; + case SWE: + mips32_op = OPC_SWE; + goto do_st_lr; + }; + break; + case PREF: + /* Treat as no-op */ + if ((ctx->insn_flags & ISA_MIPS32R6) && (rt >= 24)) { + /* hint codes 24-31 are reserved and signal RI */ + generate_exception(ctx, EXCP_RI); + } + break; + default: + MIPS_INVAL("pool32c"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case ADDI32: /* AUI, LUI */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* AUI, LUI */ + gen_logic_imm(ctx, OPC_LUI, rt, rs, imm); + } else { + /* ADDI32 */ + mips32_op = OPC_ADDI; + goto do_addi; + } + break; + case ADDIU32: + mips32_op = OPC_ADDIU; + do_addi: + gen_arith_imm(ctx, mips32_op, rt, rs, imm); + break; + + /* Logical operations */ + case ORI32: + mips32_op = OPC_ORI; + goto do_logici; + case XORI32: + mips32_op = OPC_XORI; + goto do_logici; + case ANDI32: + mips32_op = OPC_ANDI; + do_logici: + gen_logic_imm(ctx, mips32_op, rt, rs, imm); + break; + + /* Set less than immediate */ + case SLTI32: + mips32_op = OPC_SLTI; + goto do_slti; + case SLTIU32: + mips32_op = OPC_SLTIU; + do_slti: + gen_slt_imm(ctx, mips32_op, rt, rs, imm); + break; + case JALX32: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; + gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case JALS32: /* BOVC, BEQC, BEQZALC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rs >= rt) { + /* BOVC */ + mips32_op = OPC_BOVC; + } else if (rs < rt && rs == 0) { + /* BEQZALC */ + mips32_op = OPC_BEQZALC; + } else { + /* BEQC */ + mips32_op = OPC_BEQC; + } + gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); + } else { + /* JALS32 */ + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1; + gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, offset, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + } + break; + case BEQ32: /* BC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* BC */ + gen_compute_compact_branch(ctx, OPC_BC, 0, 0, + sextract32(ctx->opcode << 1, 0, 27)); + } else { + /* BEQ32 */ + gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1, 4); + } + break; + case BNE32: /* BALC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* BALC */ + gen_compute_compact_branch(ctx, OPC_BALC, 0, 0, + sextract32(ctx->opcode << 1, 0, 27)); + } else { + /* BNE32 */ + gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1, 4); + } + break; + case J32: /* BGTZC, BLTZC, BLTC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rs == 0 && rt != 0) { + /* BGTZC */ + mips32_op = OPC_BGTZC; + } else if (rs != 0 && rt != 0 && rs == rt) { + /* BLTZC */ + mips32_op = OPC_BLTZC; + } else { + /* BLTC */ + mips32_op = OPC_BLTC; + } + gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); + } else { + /* J32 */ + gen_compute_branch(ctx, OPC_J, 4, rt, rs, + (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); + } + break; + case JAL32: /* BLEZC, BGEZC, BGEC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rs == 0 && rt != 0) { + /* BLEZC */ + mips32_op = OPC_BLEZC; + } else if (rs != 0 && rt != 0 && rs == rt) { + /* BGEZC */ + mips32_op = OPC_BGEZC; + } else { + /* BGEC */ + mips32_op = OPC_BGEC; + } + gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); + } else { + /* JAL32 */ + gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, + (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + } + break; + /* Floating point (COP1) */ + case LWC132: + mips32_op = OPC_LWC1; + goto do_cop1; + case LDC132: + mips32_op = OPC_LDC1; + goto do_cop1; + case SWC132: + mips32_op = OPC_SWC1; + goto do_cop1; + case SDC132: + mips32_op = OPC_SDC1; + do_cop1: + gen_cop1_ldst(ctx, mips32_op, rt, rs, imm); + break; + case ADDIUPC: /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */ + switch ((ctx->opcode >> 16) & 0x1f) { + case ADDIUPC_00: + case ADDIUPC_01: + case ADDIUPC_02: + case ADDIUPC_03: + case ADDIUPC_04: + case ADDIUPC_05: + case ADDIUPC_06: + case ADDIUPC_07: + gen_pcrel(ctx, OPC_ADDIUPC, ctx->base.pc_next & ~0x3, rt); + break; + case AUIPC: + gen_pcrel(ctx, OPC_AUIPC, ctx->base.pc_next, rt); + break; + case ALUIPC: + gen_pcrel(ctx, OPC_ALUIPC, ctx->base.pc_next, rt); + break; + case LWPC_08: + case LWPC_09: + case LWPC_0A: + case LWPC_0B: + case LWPC_0C: + case LWPC_0D: + case LWPC_0E: + case LWPC_0F: + gen_pcrel(ctx, R6_OPC_LWPC, ctx->base.pc_next & ~0x3, rt); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + } else { + /* ADDIUPC */ + int reg = mmreg(ZIMM(ctx->opcode, 23, 3)); + offset = SIMM(ctx->opcode, 0, 23) << 2; + + gen_addiupc(ctx, reg, offset, 0, 0); + } + break; + case BNVC: /* BNEC, BNEZALC */ + check_insn(ctx, ISA_MIPS32R6); + if (rs >= rt) { + /* BNVC */ + mips32_op = OPC_BNVC; + } else if (rs < rt && rs == 0) { + /* BNEZALC */ + mips32_op = OPC_BNEZALC; + } else { + /* BNEC */ + mips32_op = OPC_BNEC; + } + gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); + break; + case R6_BNEZC: /* JIALC */ + check_insn(ctx, ISA_MIPS32R6); + if (rt != 0) { + /* BNEZC */ + gen_compute_compact_branch(ctx, OPC_BNEZC, rt, 0, + sextract32(ctx->opcode << 1, 0, 22)); + } else { + /* JIALC */ + gen_compute_compact_branch(ctx, OPC_JIALC, 0, rs, imm); + } + break; + case R6_BEQZC: /* JIC */ + check_insn(ctx, ISA_MIPS32R6); + if (rt != 0) { + /* BEQZC */ + gen_compute_compact_branch(ctx, OPC_BEQZC, rt, 0, + sextract32(ctx->opcode << 1, 0, 22)); + } else { + /* JIC */ + gen_compute_compact_branch(ctx, OPC_JIC, 0, rs, imm); + } + break; + case BLEZALC: /* BGEZALC, BGEUC */ + check_insn(ctx, ISA_MIPS32R6); + if (rs == 0 && rt != 0) { + /* BLEZALC */ + mips32_op = OPC_BLEZALC; + } else if (rs != 0 && rt != 0 && rs == rt) { + /* BGEZALC */ + mips32_op = OPC_BGEZALC; + } else { + /* BGEUC */ + mips32_op = OPC_BGEUC; + } + gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); + break; + case BGTZALC: /* BLTZALC, BLTUC */ + check_insn(ctx, ISA_MIPS32R6); + if (rs == 0 && rt != 0) { + /* BGTZALC */ + mips32_op = OPC_BGTZALC; + } else if (rs != 0 && rt != 0 && rs == rt) { + /* BLTZALC */ + mips32_op = OPC_BLTZALC; + } else { + /* BLTUC */ + mips32_op = OPC_BLTUC; + } + gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1); + break; + /* Loads and stores */ + case LB32: + mips32_op = OPC_LB; + goto do_ld; + case LBU32: + mips32_op = OPC_LBU; + goto do_ld; + case LH32: + mips32_op = OPC_LH; + goto do_ld; + case LHU32: + mips32_op = OPC_LHU; + goto do_ld; + case LW32: + mips32_op = OPC_LW; + goto do_ld; +#ifdef TARGET_MIPS64 + case LD32: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + mips32_op = OPC_LD; + goto do_ld; + case SD32: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + mips32_op = OPC_SD; + goto do_st; +#endif + case SB32: + mips32_op = OPC_SB; + goto do_st; + case SH32: + mips32_op = OPC_SH; + goto do_st; + case SW32: + mips32_op = OPC_SW; + goto do_st; + do_ld: + gen_ld(ctx, mips32_op, rt, rs, imm); + break; + do_st: + gen_st(ctx, mips32_op, rt, rs, imm); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static int decode_micromips_opc(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t op; + + /* make sure instructions are on a halfword boundary */ + if (ctx->base.pc_next & 0x1) { + env->CP0_BadVAddr = ctx->base.pc_next; + generate_exception_end(ctx, EXCP_AdEL); + return 2; + } + + op = (ctx->opcode >> 10) & 0x3f; + /* Enforce properly-sized instructions in a delay slot */ + if (ctx->hflags & MIPS_HFLAG_BDS_STRICT) { + switch (op & 0x7) { /* MSB-3..MSB-5 */ + case 0: + /* POOL32A, POOL32B, POOL32I, POOL32C */ + case 4: + /* ADDI32, ADDIU32, ORI32, XORI32, SLTI32, SLTIU32, ANDI32, JALX32 */ + case 5: + /* LBU32, LHU32, POOL32F, JALS32, BEQ32, BNE32, J32, JAL32 */ + case 6: + /* SB32, SH32, ADDIUPC, SWC132, SDC132, SW32 */ + case 7: + /* LB32, LH32, LWC132, LDC132, LW32 */ + if (ctx->hflags & MIPS_HFLAG_BDS16) { + generate_exception_end(ctx, EXCP_RI); + return 2; + } + break; + case 1: + /* POOL16A, POOL16B, POOL16C, LWGP16, POOL16F */ + case 2: + /* LBU16, LHU16, LWSP16, LW16, SB16, SH16, SWSP16, SW16 */ + case 3: + /* MOVE16, ANDI16, POOL16D, POOL16E, BEQZ16, BNEZ16, B16, LI16 */ + if (ctx->hflags & MIPS_HFLAG_BDS32) { + generate_exception_end(ctx, EXCP_RI); + return 2; + } + break; + } + } + + switch (op) { + case POOL16A: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs1 = mmreg(uMIPS_RS1(ctx->opcode)); + int rs2 = mmreg(uMIPS_RS2(ctx->opcode)); + uint32_t opc = 0; + + switch (ctx->opcode & 0x1) { + case ADDU16: + opc = OPC_ADDU; + break; + case SUBU16: + opc = OPC_SUBU; + break; + } + if (ctx->insn_flags & ISA_MIPS32R6) { + /* + * In the Release 6, the register number location in + * the instruction encoding has changed. + */ + gen_arith(ctx, opc, rs1, rd, rs2); + } else { + gen_arith(ctx, opc, rd, rs1, rs2); + } + } + break; + case POOL16B: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs = mmreg(uMIPS_RS(ctx->opcode)); + int amount = (ctx->opcode >> 1) & 0x7; + uint32_t opc = 0; + amount = amount == 0 ? 8 : amount; + + switch (ctx->opcode & 0x1) { + case SLL16: + opc = OPC_SLL; + break; + case SRL16: + opc = OPC_SRL; + break; + } + + gen_shift_imm(ctx, opc, rd, rs, amount); + } + break; + case POOL16C: + if (ctx->insn_flags & ISA_MIPS32R6) { + gen_pool16c_r6_insn(ctx); + } else { + gen_pool16c_insn(ctx); + } + break; + case LWGP16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = 28; /* GP */ + int16_t offset = SIMM(ctx->opcode, 0, 7) << 2; + + gen_ld(ctx, OPC_LW, rd, rb, offset); + } + break; + case POOL16F: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->opcode & 1) { + generate_exception_end(ctx, EXCP_RI); + } else { + /* MOVEP */ + int enc_dest = uMIPS_RD(ctx->opcode); + int enc_rt = uMIPS_RS2(ctx->opcode); + int enc_rs = uMIPS_RS1(ctx->opcode); + gen_movep(ctx, enc_dest, enc_rt, enc_rs); + } + break; + case LBU16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4); + offset = (offset == 0xf ? -1 : offset); + + gen_ld(ctx, OPC_LBU, rd, rb, offset); + } + break; + case LHU16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; + + gen_ld(ctx, OPC_LHU, rd, rb, offset); + } + break; + case LWSP16: + { + int rd = (ctx->opcode >> 5) & 0x1f; + int rb = 29; /* SP */ + int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; + + gen_ld(ctx, OPC_LW, rd, rb, offset); + } + break; + case LW16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; + + gen_ld(ctx, OPC_LW, rd, rb, offset); + } + break; + case SB16: + { + int rd = mmreg2(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4); + + gen_st(ctx, OPC_SB, rd, rb, offset); + } + break; + case SH16: + { + int rd = mmreg2(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; + + gen_st(ctx, OPC_SH, rd, rb, offset); + } + break; + case SWSP16: + { + int rd = (ctx->opcode >> 5) & 0x1f; + int rb = 29; /* SP */ + int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; + + gen_st(ctx, OPC_SW, rd, rb, offset); + } + break; + case SW16: + { + int rd = mmreg2(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; + + gen_st(ctx, OPC_SW, rd, rb, offset); + } + break; + case MOVE16: + { + int rd = uMIPS_RD5(ctx->opcode); + int rs = uMIPS_RS5(ctx->opcode); + + gen_arith(ctx, OPC_ADDU, rd, rs, 0); + } + break; + case ANDI16: + gen_andi16(ctx); + break; + case POOL16D: + switch (ctx->opcode & 0x1) { + case ADDIUS5: + gen_addius5(ctx); + break; + case ADDIUSP: + gen_addiusp(ctx); + break; + } + break; + case POOL16E: + switch (ctx->opcode & 0x1) { + case ADDIUR2: + gen_addiur2(ctx); + break; + case ADDIUR1SP: + gen_addiur1sp(ctx); + break; + } + break; + case B16: /* BC16 */ + gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, + sextract32(ctx->opcode, 0, 10) << 1, + (ctx->insn_flags & ISA_MIPS32R6) ? 0 : 4); + break; + case BNEZ16: /* BNEZC16 */ + case BEQZ16: /* BEQZC16 */ + gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2, + mmreg(uMIPS_RD(ctx->opcode)), + 0, sextract32(ctx->opcode, 0, 7) << 1, + (ctx->insn_flags & ISA_MIPS32R6) ? 0 : 4); + + break; + case LI16: + { + int reg = mmreg(uMIPS_RD(ctx->opcode)); + int imm = ZIMM(ctx->opcode, 0, 7); + + imm = (imm == 0x7f ? -1 : imm); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg], imm); + } + break; + case RES_29: + case RES_31: + case RES_39: + generate_exception_end(ctx, EXCP_RI); + break; + default: + decode_micromips32_opc(env, ctx); + return 4; + } + + return 2; +} + +/* + * + * nanoMIPS opcodes + * + */ + +/* MAJOR, P16, and P32 pools opcodes */ +enum { + NM_P_ADDIU = 0x00, + NM_ADDIUPC = 0x01, + NM_MOVE_BALC = 0x02, + NM_P16_MV = 0x04, + NM_LW16 = 0x05, + NM_BC16 = 0x06, + NM_P16_SR = 0x07, + + NM_POOL32A = 0x08, + NM_P_BAL = 0x0a, + NM_P16_SHIFT = 0x0c, + NM_LWSP16 = 0x0d, + NM_BALC16 = 0x0e, + NM_P16_4X4 = 0x0f, + + NM_P_GP_W = 0x10, + NM_P_GP_BH = 0x11, + NM_P_J = 0x12, + NM_P16C = 0x14, + NM_LWGP16 = 0x15, + NM_P16_LB = 0x17, + + NM_P48I = 0x18, + NM_P16_A1 = 0x1c, + NM_LW4X4 = 0x1d, + NM_P16_LH = 0x1f, + + NM_P_U12 = 0x20, + NM_P_LS_U12 = 0x21, + NM_P_BR1 = 0x22, + NM_P16_A2 = 0x24, + NM_SW16 = 0x25, + NM_BEQZC16 = 0x26, + + NM_POOL32F = 0x28, + NM_P_LS_S9 = 0x29, + NM_P_BR2 = 0x2a, + + NM_P16_ADDU = 0x2c, + NM_SWSP16 = 0x2d, + NM_BNEZC16 = 0x2e, + NM_MOVEP = 0x2f, + + NM_POOL32S = 0x30, + NM_P_BRI = 0x32, + NM_LI16 = 0x34, + NM_SWGP16 = 0x35, + NM_P16_BR = 0x36, + + NM_P_LUI = 0x38, + NM_ANDI16 = 0x3c, + NM_SW4X4 = 0x3d, + NM_MOVEPREV = 0x3f, +}; + +/* POOL32A instruction pool */ +enum { + NM_POOL32A0 = 0x00, + NM_SPECIAL2 = 0x01, + NM_COP2_1 = 0x02, + NM_UDI = 0x03, + NM_POOL32A5 = 0x05, + NM_POOL32A7 = 0x07, +}; + +/* P.GP.W instruction pool */ +enum { + NM_ADDIUGP_W = 0x00, + NM_LWGP = 0x02, + NM_SWGP = 0x03, +}; + +/* P48I instruction pool */ +enum { + NM_LI48 = 0x00, + NM_ADDIU48 = 0x01, + NM_ADDIUGP48 = 0x02, + NM_ADDIUPC48 = 0x03, + NM_LWPC48 = 0x0b, + NM_SWPC48 = 0x0f, +}; + +/* P.U12 instruction pool */ +enum { + NM_ORI = 0x00, + NM_XORI = 0x01, + NM_ANDI = 0x02, + NM_P_SR = 0x03, + NM_SLTI = 0x04, + NM_SLTIU = 0x05, + NM_SEQI = 0x06, + NM_ADDIUNEG = 0x08, + NM_P_SHIFT = 0x0c, + NM_P_ROTX = 0x0d, + NM_P_INS = 0x0e, + NM_P_EXT = 0x0f, +}; + +/* POOL32F instruction pool */ +enum { + NM_POOL32F_0 = 0x00, + NM_POOL32F_3 = 0x03, + NM_POOL32F_5 = 0x05, +}; + +/* POOL32S instruction pool */ +enum { + NM_POOL32S_0 = 0x00, + NM_POOL32S_4 = 0x04, +}; + +/* P.LUI instruction pool */ +enum { + NM_LUI = 0x00, + NM_ALUIPC = 0x01, +}; + +/* P.GP.BH instruction pool */ +enum { + NM_LBGP = 0x00, + NM_SBGP = 0x01, + NM_LBUGP = 0x02, + NM_ADDIUGP_B = 0x03, + NM_P_GP_LH = 0x04, + NM_P_GP_SH = 0x05, + NM_P_GP_CP1 = 0x06, +}; + +/* P.LS.U12 instruction pool */ +enum { + NM_LB = 0x00, + NM_SB = 0x01, + NM_LBU = 0x02, + NM_P_PREFU12 = 0x03, + NM_LH = 0x04, + NM_SH = 0x05, + NM_LHU = 0x06, + NM_LWU = 0x07, + NM_LW = 0x08, + NM_SW = 0x09, + NM_LWC1 = 0x0a, + NM_SWC1 = 0x0b, + NM_LDC1 = 0x0e, + NM_SDC1 = 0x0f, +}; + +/* P.LS.S9 instruction pool */ +enum { + NM_P_LS_S0 = 0x00, + NM_P_LS_S1 = 0x01, + NM_P_LS_E0 = 0x02, + NM_P_LS_WM = 0x04, + NM_P_LS_UAWM = 0x05, +}; + +/* P.BAL instruction pool */ +enum { + NM_BC = 0x00, + NM_BALC = 0x01, +}; + +/* P.J instruction pool */ +enum { + NM_JALRC = 0x00, + NM_JALRC_HB = 0x01, + NM_P_BALRSC = 0x08, +}; + +/* P.BR1 instruction pool */ +enum { + NM_BEQC = 0x00, + NM_P_BR3A = 0x01, + NM_BGEC = 0x02, + NM_BGEUC = 0x03, +}; + +/* P.BR2 instruction pool */ +enum { + NM_BNEC = 0x00, + NM_BLTC = 0x02, + NM_BLTUC = 0x03, +}; + +/* P.BRI instruction pool */ +enum { + NM_BEQIC = 0x00, + NM_BBEQZC = 0x01, + NM_BGEIC = 0x02, + NM_BGEIUC = 0x03, + NM_BNEIC = 0x04, + NM_BBNEZC = 0x05, + NM_BLTIC = 0x06, + NM_BLTIUC = 0x07, +}; + +/* P16.SHIFT instruction pool */ +enum { + NM_SLL16 = 0x00, + NM_SRL16 = 0x01, +}; + +/* POOL16C instruction pool */ +enum { + NM_POOL16C_0 = 0x00, + NM_LWXS16 = 0x01, +}; + +/* P16.A1 instruction pool */ +enum { + NM_ADDIUR1SP = 0x01, +}; + +/* P16.A2 instruction pool */ +enum { + NM_ADDIUR2 = 0x00, + NM_P_ADDIURS5 = 0x01, +}; + +/* P16.ADDU instruction pool */ +enum { + NM_ADDU16 = 0x00, + NM_SUBU16 = 0x01, +}; + +/* P16.SR instruction pool */ +enum { + NM_SAVE16 = 0x00, + NM_RESTORE_JRC16 = 0x01, +}; + +/* P16.4X4 instruction pool */ +enum { + NM_ADDU4X4 = 0x00, + NM_MUL4X4 = 0x01, +}; + +/* P16.LB instruction pool */ +enum { + NM_LB16 = 0x00, + NM_SB16 = 0x01, + NM_LBU16 = 0x02, +}; + +/* P16.LH instruction pool */ +enum { + NM_LH16 = 0x00, + NM_SH16 = 0x01, + NM_LHU16 = 0x02, +}; + +/* P.RI instruction pool */ +enum { + NM_SIGRIE = 0x00, + NM_P_SYSCALL = 0x01, + NM_BREAK = 0x02, + NM_SDBBP = 0x03, +}; + +/* POOL32A0 instruction pool */ +enum { + NM_P_TRAP = 0x00, + NM_SEB = 0x01, + NM_SLLV = 0x02, + NM_MUL = 0x03, + NM_MFC0 = 0x06, + NM_MFHC0 = 0x07, + NM_SEH = 0x09, + NM_SRLV = 0x0a, + NM_MUH = 0x0b, + NM_MTC0 = 0x0e, + NM_MTHC0 = 0x0f, + NM_SRAV = 0x12, + NM_MULU = 0x13, + NM_ROTRV = 0x1a, + NM_MUHU = 0x1b, + NM_ADD = 0x22, + NM_DIV = 0x23, + NM_ADDU = 0x2a, + NM_MOD = 0x2b, + NM_SUB = 0x32, + NM_DIVU = 0x33, + NM_RDHWR = 0x38, + NM_SUBU = 0x3a, + NM_MODU = 0x3b, + NM_P_CMOVE = 0x42, + NM_FORK = 0x45, + NM_MFTR = 0x46, + NM_MFHTR = 0x47, + NM_AND = 0x4a, + NM_YIELD = 0x4d, + NM_MTTR = 0x4e, + NM_MTHTR = 0x4f, + NM_OR = 0x52, + NM_D_E_MT_VPE = 0x56, + NM_NOR = 0x5a, + NM_XOR = 0x62, + NM_SLT = 0x6a, + NM_P_SLTU = 0x72, + NM_SOV = 0x7a, +}; + +/* CRC32 instruction pool */ +enum { + NM_CRC32B = 0x00, + NM_CRC32H = 0x01, + NM_CRC32W = 0x02, + NM_CRC32CB = 0x04, + NM_CRC32CH = 0x05, + NM_CRC32CW = 0x06, +}; + +/* POOL32A5 instruction pool */ +enum { + NM_CMP_EQ_PH = 0x00, + NM_CMP_LT_PH = 0x08, + NM_CMP_LE_PH = 0x10, + NM_CMPGU_EQ_QB = 0x18, + NM_CMPGU_LT_QB = 0x20, + NM_CMPGU_LE_QB = 0x28, + NM_CMPGDU_EQ_QB = 0x30, + NM_CMPGDU_LT_QB = 0x38, + NM_CMPGDU_LE_QB = 0x40, + NM_CMPU_EQ_QB = 0x48, + NM_CMPU_LT_QB = 0x50, + NM_CMPU_LE_QB = 0x58, + NM_ADDQ_S_W = 0x60, + NM_SUBQ_S_W = 0x68, + NM_ADDSC = 0x70, + NM_ADDWC = 0x78, + + NM_ADDQ_S_PH = 0x01, + NM_ADDQH_R_PH = 0x09, + NM_ADDQH_R_W = 0x11, + NM_ADDU_S_QB = 0x19, + NM_ADDU_S_PH = 0x21, + NM_ADDUH_R_QB = 0x29, + NM_SHRAV_R_PH = 0x31, + NM_SHRAV_R_QB = 0x39, + NM_SUBQ_S_PH = 0x41, + NM_SUBQH_R_PH = 0x49, + NM_SUBQH_R_W = 0x51, + NM_SUBU_S_QB = 0x59, + NM_SUBU_S_PH = 0x61, + NM_SUBUH_R_QB = 0x69, + NM_SHLLV_S_PH = 0x71, + NM_PRECR_SRA_R_PH_W = 0x79, + + NM_MULEU_S_PH_QBL = 0x12, + NM_MULEU_S_PH_QBR = 0x1a, + NM_MULQ_RS_PH = 0x22, + NM_MULQ_S_PH = 0x2a, + NM_MULQ_RS_W = 0x32, + NM_MULQ_S_W = 0x3a, + NM_APPEND = 0x42, + NM_MODSUB = 0x52, + NM_SHRAV_R_W = 0x5a, + NM_SHRLV_PH = 0x62, + NM_SHRLV_QB = 0x6a, + NM_SHLLV_QB = 0x72, + NM_SHLLV_S_W = 0x7a, + + NM_SHILO = 0x03, + + NM_MULEQ_S_W_PHL = 0x04, + NM_MULEQ_S_W_PHR = 0x0c, + + NM_MUL_S_PH = 0x05, + NM_PRECR_QB_PH = 0x0d, + NM_PRECRQ_QB_PH = 0x15, + NM_PRECRQ_PH_W = 0x1d, + NM_PRECRQ_RS_PH_W = 0x25, + NM_PRECRQU_S_QB_PH = 0x2d, + NM_PACKRL_PH = 0x35, + NM_PICK_QB = 0x3d, + NM_PICK_PH = 0x45, + + NM_SHRA_R_W = 0x5e, + NM_SHRA_R_PH = 0x66, + NM_SHLL_S_PH = 0x76, + NM_SHLL_S_W = 0x7e, + + NM_REPL_PH = 0x07 +}; + +/* POOL32A7 instruction pool */ +enum { + NM_P_LSX = 0x00, + NM_LSA = 0x01, + NM_EXTW = 0x03, + NM_POOL32AXF = 0x07, +}; + +/* P.SR instruction pool */ +enum { + NM_PP_SR = 0x00, + NM_P_SR_F = 0x01, +}; + +/* P.SHIFT instruction pool */ +enum { + NM_P_SLL = 0x00, + NM_SRL = 0x02, + NM_SRA = 0x04, + NM_ROTR = 0x06, +}; + +/* P.ROTX instruction pool */ +enum { + NM_ROTX = 0x00, +}; + +/* P.INS instruction pool */ +enum { + NM_INS = 0x00, +}; + +/* P.EXT instruction pool */ +enum { + NM_EXT = 0x00, +}; + +/* POOL32F_0 (fmt) instruction pool */ +enum { + NM_RINT_S = 0x04, + NM_RINT_D = 0x44, + NM_ADD_S = 0x06, + NM_SELEQZ_S = 0x07, + NM_SELEQZ_D = 0x47, + NM_CLASS_S = 0x0c, + NM_CLASS_D = 0x4c, + NM_SUB_S = 0x0e, + NM_SELNEZ_S = 0x0f, + NM_SELNEZ_D = 0x4f, + NM_MUL_S = 0x16, + NM_SEL_S = 0x17, + NM_SEL_D = 0x57, + NM_DIV_S = 0x1e, + NM_ADD_D = 0x26, + NM_SUB_D = 0x2e, + NM_MUL_D = 0x36, + NM_MADDF_S = 0x37, + NM_MADDF_D = 0x77, + NM_DIV_D = 0x3e, + NM_MSUBF_S = 0x3f, + NM_MSUBF_D = 0x7f, +}; + +/* POOL32F_3 instruction pool */ +enum { + NM_MIN_FMT = 0x00, + NM_MAX_FMT = 0x01, + NM_MINA_FMT = 0x04, + NM_MAXA_FMT = 0x05, + NM_POOL32FXF = 0x07, +}; + +/* POOL32F_5 instruction pool */ +enum { + NM_CMP_CONDN_S = 0x00, + NM_CMP_CONDN_D = 0x02, +}; + +/* P.GP.LH instruction pool */ +enum { + NM_LHGP = 0x00, + NM_LHUGP = 0x01, +}; + +/* P.GP.SH instruction pool */ +enum { + NM_SHGP = 0x00, +}; + +/* P.GP.CP1 instruction pool */ +enum { + NM_LWC1GP = 0x00, + NM_SWC1GP = 0x01, + NM_LDC1GP = 0x02, + NM_SDC1GP = 0x03, +}; + +/* P.LS.S0 instruction pool */ +enum { + NM_LBS9 = 0x00, + NM_LHS9 = 0x04, + NM_LWS9 = 0x08, + NM_LDS9 = 0x0c, + + NM_SBS9 = 0x01, + NM_SHS9 = 0x05, + NM_SWS9 = 0x09, + NM_SDS9 = 0x0d, + + NM_LBUS9 = 0x02, + NM_LHUS9 = 0x06, + NM_LWC1S9 = 0x0a, + NM_LDC1S9 = 0x0e, + + NM_P_PREFS9 = 0x03, + NM_LWUS9 = 0x07, + NM_SWC1S9 = 0x0b, + NM_SDC1S9 = 0x0f, +}; + +/* P.LS.S1 instruction pool */ +enum { + NM_ASET_ACLR = 0x02, + NM_UALH = 0x04, + NM_UASH = 0x05, + NM_CACHE = 0x07, + NM_P_LL = 0x0a, + NM_P_SC = 0x0b, +}; + +/* P.LS.E0 instruction pool */ +enum { + NM_LBE = 0x00, + NM_SBE = 0x01, + NM_LBUE = 0x02, + NM_P_PREFE = 0x03, + NM_LHE = 0x04, + NM_SHE = 0x05, + NM_LHUE = 0x06, + NM_CACHEE = 0x07, + NM_LWE = 0x08, + NM_SWE = 0x09, + NM_P_LLE = 0x0a, + NM_P_SCE = 0x0b, +}; + +/* P.PREFE instruction pool */ +enum { + NM_SYNCIE = 0x00, + NM_PREFE = 0x01, +}; + +/* P.LLE instruction pool */ +enum { + NM_LLE = 0x00, + NM_LLWPE = 0x01, +}; + +/* P.SCE instruction pool */ +enum { + NM_SCE = 0x00, + NM_SCWPE = 0x01, +}; + +/* P.LS.WM instruction pool */ +enum { + NM_LWM = 0x00, + NM_SWM = 0x01, +}; + +/* P.LS.UAWM instruction pool */ +enum { + NM_UALWM = 0x00, + NM_UASWM = 0x01, +}; + +/* P.BR3A instruction pool */ +enum { + NM_BC1EQZC = 0x00, + NM_BC1NEZC = 0x01, + NM_BC2EQZC = 0x02, + NM_BC2NEZC = 0x03, + NM_BPOSGE32C = 0x04, +}; + +/* P16.RI instruction pool */ +enum { + NM_P16_SYSCALL = 0x01, + NM_BREAK16 = 0x02, + NM_SDBBP16 = 0x03, +}; + +/* POOL16C_0 instruction pool */ +enum { + NM_POOL16C_00 = 0x00, +}; + +/* P16.JRC instruction pool */ +enum { + NM_JRC = 0x00, + NM_JALRC16 = 0x01, +}; + +/* P.SYSCALL instruction pool */ +enum { + NM_SYSCALL = 0x00, + NM_HYPCALL = 0x01, +}; + +/* P.TRAP instruction pool */ +enum { + NM_TEQ = 0x00, + NM_TNE = 0x01, +}; + +/* P.CMOVE instruction pool */ +enum { + NM_MOVZ = 0x00, + NM_MOVN = 0x01, +}; + +/* POOL32Axf instruction pool */ +enum { + NM_POOL32AXF_1 = 0x01, + NM_POOL32AXF_2 = 0x02, + NM_POOL32AXF_4 = 0x04, + NM_POOL32AXF_5 = 0x05, + NM_POOL32AXF_7 = 0x07, +}; + +/* POOL32Axf_1 instruction pool */ +enum { + NM_POOL32AXF_1_0 = 0x00, + NM_POOL32AXF_1_1 = 0x01, + NM_POOL32AXF_1_3 = 0x03, + NM_POOL32AXF_1_4 = 0x04, + NM_POOL32AXF_1_5 = 0x05, + NM_POOL32AXF_1_7 = 0x07, +}; + +/* POOL32Axf_2 instruction pool */ +enum { + NM_POOL32AXF_2_0_7 = 0x00, + NM_POOL32AXF_2_8_15 = 0x01, + NM_POOL32AXF_2_16_23 = 0x02, + NM_POOL32AXF_2_24_31 = 0x03, +}; + +/* POOL32Axf_7 instruction pool */ +enum { + NM_SHRA_R_QB = 0x0, + NM_SHRL_PH = 0x1, + NM_REPL_QB = 0x2, +}; + +/* POOL32Axf_1_0 instruction pool */ +enum { + NM_MFHI = 0x0, + NM_MFLO = 0x1, + NM_MTHI = 0x2, + NM_MTLO = 0x3, +}; + +/* POOL32Axf_1_1 instruction pool */ +enum { + NM_MTHLIP = 0x0, + NM_SHILOV = 0x1, +}; + +/* POOL32Axf_1_3 instruction pool */ +enum { + NM_RDDSP = 0x0, + NM_WRDSP = 0x1, + NM_EXTP = 0x2, + NM_EXTPDP = 0x3, +}; + +/* POOL32Axf_1_4 instruction pool */ +enum { + NM_SHLL_QB = 0x0, + NM_SHRL_QB = 0x1, +}; + +/* POOL32Axf_1_5 instruction pool */ +enum { + NM_MAQ_S_W_PHR = 0x0, + NM_MAQ_S_W_PHL = 0x1, + NM_MAQ_SA_W_PHR = 0x2, + NM_MAQ_SA_W_PHL = 0x3, +}; + +/* POOL32Axf_1_7 instruction pool */ +enum { + NM_EXTR_W = 0x0, + NM_EXTR_R_W = 0x1, + NM_EXTR_RS_W = 0x2, + NM_EXTR_S_H = 0x3, +}; + +/* POOL32Axf_2_0_7 instruction pool */ +enum { + NM_DPA_W_PH = 0x0, + NM_DPAQ_S_W_PH = 0x1, + NM_DPS_W_PH = 0x2, + NM_DPSQ_S_W_PH = 0x3, + NM_BALIGN = 0x4, + NM_MADD = 0x5, + NM_MULT = 0x6, + NM_EXTRV_W = 0x7, +}; + +/* POOL32Axf_2_8_15 instruction pool */ +enum { + NM_DPAX_W_PH = 0x0, + NM_DPAQ_SA_L_W = 0x1, + NM_DPSX_W_PH = 0x2, + NM_DPSQ_SA_L_W = 0x3, + NM_MADDU = 0x5, + NM_MULTU = 0x6, + NM_EXTRV_R_W = 0x7, +}; + +/* POOL32Axf_2_16_23 instruction pool */ +enum { + NM_DPAU_H_QBL = 0x0, + NM_DPAQX_S_W_PH = 0x1, + NM_DPSU_H_QBL = 0x2, + NM_DPSQX_S_W_PH = 0x3, + NM_EXTPV = 0x4, + NM_MSUB = 0x5, + NM_MULSA_W_PH = 0x6, + NM_EXTRV_RS_W = 0x7, +}; + +/* POOL32Axf_2_24_31 instruction pool */ +enum { + NM_DPAU_H_QBR = 0x0, + NM_DPAQX_SA_W_PH = 0x1, + NM_DPSU_H_QBR = 0x2, + NM_DPSQX_SA_W_PH = 0x3, + NM_EXTPDPV = 0x4, + NM_MSUBU = 0x5, + NM_MULSAQ_S_W_PH = 0x6, + NM_EXTRV_S_H = 0x7, +}; + +/* POOL32Axf_{4, 5} instruction pool */ +enum { + NM_CLO = 0x25, + NM_CLZ = 0x2d, + + NM_TLBP = 0x01, + NM_TLBR = 0x09, + NM_TLBWI = 0x11, + NM_TLBWR = 0x19, + NM_TLBINV = 0x03, + NM_TLBINVF = 0x0b, + NM_DI = 0x23, + NM_EI = 0x2b, + NM_RDPGPR = 0x70, + NM_WRPGPR = 0x78, + NM_WAIT = 0x61, + NM_DERET = 0x71, + NM_ERETX = 0x79, + + /* nanoMIPS DSP instructions */ + NM_ABSQ_S_QB = 0x00, + NM_ABSQ_S_PH = 0x08, + NM_ABSQ_S_W = 0x10, + NM_PRECEQ_W_PHL = 0x28, + NM_PRECEQ_W_PHR = 0x30, + NM_PRECEQU_PH_QBL = 0x38, + NM_PRECEQU_PH_QBR = 0x48, + NM_PRECEU_PH_QBL = 0x58, + NM_PRECEU_PH_QBR = 0x68, + NM_PRECEQU_PH_QBLA = 0x39, + NM_PRECEQU_PH_QBRA = 0x49, + NM_PRECEU_PH_QBLA = 0x59, + NM_PRECEU_PH_QBRA = 0x69, + NM_REPLV_PH = 0x01, + NM_REPLV_QB = 0x09, + NM_BITREV = 0x18, + NM_INSV = 0x20, + NM_RADDU_W_QB = 0x78, + + NM_BITSWAP = 0x05, + NM_WSBH = 0x3d, +}; + +/* PP.SR instruction pool */ +enum { + NM_SAVE = 0x00, + NM_RESTORE = 0x02, + NM_RESTORE_JRC = 0x03, +}; + +/* P.SR.F instruction pool */ +enum { + NM_SAVEF = 0x00, + NM_RESTOREF = 0x01, +}; + +/* P16.SYSCALL instruction pool */ +enum { + NM_SYSCALL16 = 0x00, + NM_HYPCALL16 = 0x01, +}; + +/* POOL16C_00 instruction pool */ +enum { + NM_NOT16 = 0x00, + NM_XOR16 = 0x01, + NM_AND16 = 0x02, + NM_OR16 = 0x03, +}; + +/* PP.LSX and PP.LSXS instruction pool */ +enum { + NM_LBX = 0x00, + NM_LHX = 0x04, + NM_LWX = 0x08, + NM_LDX = 0x0c, + + NM_SBX = 0x01, + NM_SHX = 0x05, + NM_SWX = 0x09, + NM_SDX = 0x0d, + + NM_LBUX = 0x02, + NM_LHUX = 0x06, + NM_LWC1X = 0x0a, + NM_LDC1X = 0x0e, + + NM_LWUX = 0x07, + NM_SWC1X = 0x0b, + NM_SDC1X = 0x0f, + + NM_LHXS = 0x04, + NM_LWXS = 0x08, + NM_LDXS = 0x0c, + + NM_SHXS = 0x05, + NM_SWXS = 0x09, + NM_SDXS = 0x0d, + + NM_LHUXS = 0x06, + NM_LWC1XS = 0x0a, + NM_LDC1XS = 0x0e, + + NM_LWUXS = 0x07, + NM_SWC1XS = 0x0b, + NM_SDC1XS = 0x0f, +}; + +/* ERETx instruction pool */ +enum { + NM_ERET = 0x00, + NM_ERETNC = 0x01, +}; + +/* POOL32FxF_{0, 1} insturction pool */ +enum { + NM_CFC1 = 0x40, + NM_CTC1 = 0x60, + NM_MFC1 = 0x80, + NM_MTC1 = 0xa0, + NM_MFHC1 = 0xc0, + NM_MTHC1 = 0xe0, + + NM_CVT_S_PL = 0x84, + NM_CVT_S_PU = 0xa4, + + NM_CVT_L_S = 0x004, + NM_CVT_L_D = 0x104, + NM_CVT_W_S = 0x024, + NM_CVT_W_D = 0x124, + + NM_RSQRT_S = 0x008, + NM_RSQRT_D = 0x108, + + NM_SQRT_S = 0x028, + NM_SQRT_D = 0x128, + + NM_RECIP_S = 0x048, + NM_RECIP_D = 0x148, + + NM_FLOOR_L_S = 0x00c, + NM_FLOOR_L_D = 0x10c, + + NM_FLOOR_W_S = 0x02c, + NM_FLOOR_W_D = 0x12c, + + NM_CEIL_L_S = 0x04c, + NM_CEIL_L_D = 0x14c, + NM_CEIL_W_S = 0x06c, + NM_CEIL_W_D = 0x16c, + NM_TRUNC_L_S = 0x08c, + NM_TRUNC_L_D = 0x18c, + NM_TRUNC_W_S = 0x0ac, + NM_TRUNC_W_D = 0x1ac, + NM_ROUND_L_S = 0x0cc, + NM_ROUND_L_D = 0x1cc, + NM_ROUND_W_S = 0x0ec, + NM_ROUND_W_D = 0x1ec, + + NM_MOV_S = 0x01, + NM_MOV_D = 0x81, + NM_ABS_S = 0x0d, + NM_ABS_D = 0x8d, + NM_NEG_S = 0x2d, + NM_NEG_D = 0xad, + NM_CVT_D_S = 0x04d, + NM_CVT_D_W = 0x0cd, + NM_CVT_D_L = 0x14d, + NM_CVT_S_D = 0x06d, + NM_CVT_S_W = 0x0ed, + NM_CVT_S_L = 0x16d, +}; + +/* P.LL instruction pool */ +enum { + NM_LL = 0x00, + NM_LLWP = 0x01, +}; + +/* P.SC instruction pool */ +enum { + NM_SC = 0x00, + NM_SCWP = 0x01, +}; + +/* P.DVP instruction pool */ +enum { + NM_DVP = 0x00, + NM_EVP = 0x01, +}; + + +/* + * + * nanoMIPS decoding engine + * + */ + + +/* extraction utilities */ + +#define NANOMIPS_EXTRACT_RT3(op) ((op >> 7) & 0x7) +#define NANOMIPS_EXTRACT_RS3(op) ((op >> 4) & 0x7) +#define NANOMIPS_EXTRACT_RD3(op) ((op >> 1) & 0x7) +#define NANOMIPS_EXTRACT_RD5(op) ((op >> 5) & 0x1f) +#define NANOMIPS_EXTRACT_RS5(op) (op & 0x1f) + +/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3'). */ +static inline int decode_gpr_gpr3(int r) +{ + static const int map[] = { 16, 17, 18, 19, 4, 5, 6, 7 }; + + return map[r & 0x7]; +} + +/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3.src.store'). */ +static inline int decode_gpr_gpr3_src_store(int r) +{ + static const int map[] = { 0, 17, 18, 19, 4, 5, 6, 7 }; + + return map[r & 0x7]; +} + +/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4'). */ +static inline int decode_gpr_gpr4(int r) +{ + static const int map[] = { 8, 9, 10, 11, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23 }; + + return map[r & 0xf]; +} + +/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4.zero'). */ +static inline int decode_gpr_gpr4_zero(int r) +{ + static const int map[] = { 8, 9, 10, 0, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23 }; + + return map[r & 0xf]; +} + + +static void gen_adjust_sp(DisasContext *ctx, int u) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[29], tcg_ctx->cpu_gpr[29], u); +} + +static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count, + uint8_t gp, uint16_t u) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int counter = 0; + TCGv va = tcg_temp_new(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + + while (counter != count) { + bool use_gp = gp && (counter == count - 1); + int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f); + int this_offset = -((counter + 1) << 2); + gen_base_offset_addr(ctx, va, 29, this_offset); + gen_load_gpr(tcg_ctx, t0, this_rt); + tcg_gen_qemu_st_tl(tcg_ctx, t0, va, ctx->mem_idx, + (MO_TEUL | ctx->default_tcg_memop_mask)); + counter++; + } + + /* adjust stack pointer */ + gen_adjust_sp(ctx, -u); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, va); +} + +static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, + uint8_t gp, uint16_t u) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int counter = 0; + TCGv va = tcg_temp_new(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + + while (counter != count) { + bool use_gp = gp && (counter == count - 1); + int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f); + int this_offset = u - ((counter + 1) << 2); + gen_base_offset_addr(ctx, va, 29, this_offset); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, va, ctx->mem_idx, MO_TESL | + ctx->default_tcg_memop_mask); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + gen_store_gpr(tcg_ctx, t0, this_rt); + counter++; + } + + /* adjust stack pointer */ + gen_adjust_sp(ctx, u); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, va); +} + +static void gen_pool16c_nanomips_insn(DisasContext *ctx) +{ + int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode)); + int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode)); + + switch (extract32(ctx->opcode, 2, 2)) { + case NM_NOT16: + gen_logic(ctx, OPC_NOR, rt, rs, 0); + break; + case NM_AND16: + gen_logic(ctx, OPC_AND, rt, rt, rs); + break; + case NM_XOR16: + gen_logic(ctx, OPC_XOR, rt, rt, rs); + break; + case NM_OR16: + gen_logic(ctx, OPC_OR, rt, rt, rs); + break; + } +} + +static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rt = extract32(ctx->opcode, 21, 5); + int rs = extract32(ctx->opcode, 16, 5); + int rd = extract32(ctx->opcode, 11, 5); + + switch (extract32(ctx->opcode, 3, 7)) { + case NM_P_TRAP: + switch (extract32(ctx->opcode, 10, 1)) { + case NM_TEQ: + check_nms(ctx); + gen_trap(ctx, OPC_TEQ, rs, rt, -1); + break; + case NM_TNE: + check_nms(ctx); + gen_trap(ctx, OPC_TNE, rs, rt, -1); + break; + } + break; + case NM_RDHWR: + check_nms(ctx); + gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3)); + break; + case NM_SEB: + check_nms(ctx); + gen_bshfl(ctx, OPC_SEB, rs, rt); + break; + case NM_SEH: + gen_bshfl(ctx, OPC_SEH, rs, rt); + break; + case NM_SLLV: + gen_shift(ctx, OPC_SLLV, rd, rt, rs); + break; + case NM_SRLV: + gen_shift(ctx, OPC_SRLV, rd, rt, rs); + break; + case NM_SRAV: + gen_shift(ctx, OPC_SRAV, rd, rt, rs); + break; + case NM_ROTRV: + gen_shift(ctx, OPC_ROTRV, rd, rt, rs); + break; + case NM_ADD: + gen_arith(ctx, OPC_ADD, rd, rs, rt); + break; + case NM_ADDU: + gen_arith(ctx, OPC_ADDU, rd, rs, rt); + break; + case NM_SUB: + check_nms(ctx); + gen_arith(ctx, OPC_SUB, rd, rs, rt); + break; + case NM_SUBU: + gen_arith(ctx, OPC_SUBU, rd, rs, rt); + break; + case NM_P_CMOVE: + switch (extract32(ctx->opcode, 10, 1)) { + case NM_MOVZ: + gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt); + break; + case NM_MOVN: + gen_cond_move(ctx, OPC_MOVN, rd, rs, rt); + break; + } + break; + case NM_AND: + gen_logic(ctx, OPC_AND, rd, rs, rt); + break; + case NM_OR: + gen_logic(ctx, OPC_OR, rd, rs, rt); + break; + case NM_NOR: + gen_logic(ctx, OPC_NOR, rd, rs, rt); + break; + case NM_XOR: + gen_logic(ctx, OPC_XOR, rd, rs, rt); + break; + case NM_SLT: + gen_slt(ctx, OPC_SLT, rd, rs, rt); + break; + case NM_P_SLTU: + if (rd == 0) { + /* P_DVP */ + TCGv t0 = tcg_temp_new(tcg_ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case NM_DVP: + if (ctx->vp) { + check_cp0_enabled(ctx); + gen_helper_dvp(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + } + break; + case NM_EVP: + if (ctx->vp) { + check_cp0_enabled(ctx); + gen_helper_evp(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + } + break; + } + tcg_temp_free(tcg_ctx, t0); + } else { + gen_slt(ctx, OPC_SLTU, rd, rs, rt); + } + break; + case NM_SOV: + { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t1, rs); + gen_load_gpr(tcg_ctx, t2, rt); + tcg_gen_add_tl(tcg_ctx, t0, t1, t2); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); + tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); + + /* operands of same sign, result different sign */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, t0, t1, 0); + gen_store_gpr(tcg_ctx, t0, rd); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + } + break; + case NM_MUL: + gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt); + break; + case NM_MUH: + gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt); + break; + case NM_MULU: + gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt); + break; + case NM_MUHU: + gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt); + break; + case NM_DIV: + gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt); + break; + case NM_MOD: + gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt); + break; + case NM_DIVU: + gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt); + break; + case NM_MODU: + gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt); + break; + case NM_MFC0: + check_cp0_enabled(ctx); + if (rt == 0) { + /* Treat as NOP. */ + break; + } + gen_mfc0(ctx, tcg_ctx->cpu_gpr[rt], rs, extract32(ctx->opcode, 11, 3)); + break; + case NM_MTC0: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3)); + tcg_temp_free(tcg_ctx, t0); + } + break; + case NM_D_E_MT_VPE: + { + uint8_t sc = extract32(ctx->opcode, 10, 1); + TCGv t0 = tcg_temp_new(tcg_ctx); + + switch (sc) { + case 0: + if (rs == 1) { + /* DMT */ + check_cp0_mt(ctx); + gen_helper_dmt(tcg_ctx, t0); + gen_store_gpr(tcg_ctx, t0, rt); + } else if (rs == 0) { + /* DVPE */ + check_cp0_mt(ctx); + gen_helper_dvpe(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + case 1: + if (rs == 1) { + /* EMT */ + check_cp0_mt(ctx); + gen_helper_emt(tcg_ctx, t0); + gen_store_gpr(tcg_ctx, t0, rt); + } else if (rs == 0) { + /* EVPE */ + check_cp0_mt(ctx); + gen_helper_evpe(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + } + + tcg_temp_free(tcg_ctx, t0); + } + break; + case NM_FORK: + check_mt(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_load_gpr(tcg_ctx, t1, rs); + gen_helper_fork(tcg_ctx, t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + case NM_MFTR: + case NM_MFHTR: + check_cp0_enabled(ctx); + if (rd == 0) { + /* Treat as NOP. */ + return; + } + gen_mftr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1), + extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1)); + break; + case NM_MTTR: + case NM_MTHTR: + check_cp0_enabled(ctx); + gen_mttr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1), + extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1)); + break; + case NM_YIELD: + check_mt(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_helper_yield(tcg_ctx, t0, tcg_ctx->cpu_env, t0); + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, t0); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* dsp */ +static void gen_pool32axf_1_5_nanomips_insn(DisasContext *ctx, uint32_t opc, + int ret, int v1, int v2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + TCGv v0_t; + TCGv v1_t; + + t0 = tcg_temp_new_i32(tcg_ctx); + + v0_t = tcg_temp_new(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_i32(tcg_ctx, t0, v2 >> 3); + + gen_load_gpr(tcg_ctx, v0_t, ret); + gen_load_gpr(tcg_ctx, v1_t, v1); + + switch (opc) { + case NM_MAQ_S_W_PHR: + check_dsp(ctx); + gen_helper_maq_s_w_phr(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); + break; + case NM_MAQ_S_W_PHL: + check_dsp(ctx); + gen_helper_maq_s_w_phl(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); + break; + case NM_MAQ_SA_W_PHR: + check_dsp(ctx); + gen_helper_maq_sa_w_phr(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); + break; + case NM_MAQ_SA_W_PHL: + check_dsp(ctx); + gen_helper_maq_sa_w_phl(tcg_ctx, t0, v1_t, v0_t, tcg_ctx->cpu_env); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, t0); + + tcg_temp_free(tcg_ctx, v0_t); + tcg_temp_free(tcg_ctx, v1_t); +} + + +static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, + int ret, int v1, int v2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int16_t imm; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv v0_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, v0_t, v1); + + switch (opc) { + case NM_POOL32AXF_1_0: + check_dsp(ctx); + switch (extract32(ctx->opcode, 12, 2)) { + case NM_MFHI: + gen_HILO(ctx, OPC_MFHI, v2 >> 3, ret); + break; + case NM_MFLO: + gen_HILO(ctx, OPC_MFLO, v2 >> 3, ret); + break; + case NM_MTHI: + gen_HILO(ctx, OPC_MTHI, v2 >> 3, v1); + break; + case NM_MTLO: + gen_HILO(ctx, OPC_MTLO, v2 >> 3, v1); + break; + } + break; + case NM_POOL32AXF_1_1: + check_dsp(ctx); + switch (extract32(ctx->opcode, 12, 2)) { + case NM_MTHLIP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_mthlip(tcg_ctx, t0, v0_t, tcg_ctx->cpu_env); + break; + case NM_SHILOV: + tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); + gen_helper_shilo(tcg_ctx, t0, v0_t, tcg_ctx->cpu_env); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32AXF_1_3: + check_dsp(ctx); + imm = extract32(ctx->opcode, 14, 7); + switch (extract32(ctx->opcode, 12, 2)) { + case NM_RDDSP: + tcg_gen_movi_tl(tcg_ctx, t0, imm); + gen_helper_rddsp(tcg_ctx, t0, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_WRDSP: + gen_load_gpr(tcg_ctx, t0, ret); + tcg_gen_movi_tl(tcg_ctx, t1, imm); + gen_helper_wrdsp(tcg_ctx, t0, t1, tcg_ctx->cpu_env); + break; + case NM_EXTP: + tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extp(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_EXTPDP: + tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extpdp(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + } + break; + case NM_POOL32AXF_1_4: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 2); + switch (extract32(ctx->opcode, 12, 1)) { + case NM_SHLL_QB: + gen_helper_shll_qb(tcg_ctx, t0, t0, v0_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_SHRL_QB: + gen_helper_shrl_qb(tcg_ctx, t0, t0, v0_t); + gen_store_gpr(tcg_ctx, t0, ret); + break; + } + break; + case NM_POOL32AXF_1_5: + opc = extract32(ctx->opcode, 12, 2); + gen_pool32axf_1_5_nanomips_insn(ctx, opc, ret, v1, v2); + break; + case NM_POOL32AXF_1_7: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, v2 >> 3); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + switch (extract32(ctx->opcode, 12, 2)) { + case NM_EXTR_W: + gen_helper_extr_w(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_EXTR_R_W: + gen_helper_extr_r_w(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_EXTR_RS_W: + gen_helper_extr_rs_w(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_EXTR_S_H: + gen_helper_extr_s_h(tcg_ctx, t0, t0, t1, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, v0_t); +} + +static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, + TCGv v0, TCGv v1, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + + t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_movi_i32(tcg_ctx, t0, rd >> 3); + + switch (opc) { + case NM_POOL32AXF_2_0_7: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPA_W_PH: + check_dsp_r2(ctx); + gen_helper_dpa_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + case NM_DPAQ_S_W_PH: + check_dsp(ctx); + gen_helper_dpaq_s_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + case NM_DPS_W_PH: + check_dsp_r2(ctx); + gen_helper_dps_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + case NM_DPSQ_S_W_PH: + check_dsp(ctx); + gen_helper_dpsq_s_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32AXF_2_8_15: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPAX_W_PH: + check_dsp_r2(ctx); + gen_helper_dpax_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + case NM_DPAQ_SA_L_W: + check_dsp(ctx); + gen_helper_dpaq_sa_l_w(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + case NM_DPSX_W_PH: + check_dsp_r2(ctx); + gen_helper_dpsx_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + case NM_DPSQ_SA_L_W: + check_dsp(ctx); + gen_helper_dpsq_sa_l_w(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32AXF_2_16_23: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPAU_H_QBL: + check_dsp(ctx); + gen_helper_dpau_h_qbl(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + case NM_DPAQX_S_W_PH: + check_dsp_r2(ctx); + gen_helper_dpaqx_s_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + case NM_DPSU_H_QBL: + check_dsp(ctx); + gen_helper_dpsu_h_qbl(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + case NM_DPSQX_S_W_PH: + check_dsp_r2(ctx); + gen_helper_dpsqx_s_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + case NM_MULSA_W_PH: + check_dsp_r2(ctx); + gen_helper_mulsa_w_ph(tcg_ctx, t0, v0, v1, tcg_ctx->cpu_env); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32AXF_2_24_31: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPAU_H_QBR: + check_dsp(ctx); + gen_helper_dpau_h_qbr(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + case NM_DPAQX_SA_W_PH: + check_dsp_r2(ctx); + gen_helper_dpaqx_sa_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + case NM_DPSU_H_QBR: + check_dsp(ctx); + gen_helper_dpsu_h_qbr(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + case NM_DPSQX_SA_W_PH: + check_dsp_r2(ctx); + gen_helper_dpsqx_sa_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + case NM_MULSAQ_S_W_PH: + check_dsp(ctx); + gen_helper_mulsaq_s_w_ph(tcg_ctx, t0, v1, v0, tcg_ctx->cpu_env); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, + int rt, int rs, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ret = rt; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv v0_t = tcg_temp_new(tcg_ctx); + TCGv v1_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, v0_t, rt); + gen_load_gpr(tcg_ctx, v1_t, rs); + + switch (opc) { + case NM_POOL32AXF_2_0_7: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPA_W_PH: + case NM_DPAQ_S_W_PH: + case NM_DPS_W_PH: + case NM_DPSQ_S_W_PH: + gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); + break; + case NM_BALIGN: + check_dsp_r2(ctx); + if (rt != 0) { + gen_load_gpr(tcg_ctx, t0, rs); + rd &= 3; + if (rd != 0 && rd != 2) { + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], 8 * rd); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (4 - rd)); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + } + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); + } + break; + case NM_MADD: + check_dsp(ctx); + { + int acc = extract32(ctx->opcode, 14, 2); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_load_gpr(tcg_ctx, t1, rs); + tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); + tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case NM_MULT: + check_dsp(ctx); + { + int acc = extract32(ctx->opcode, 14, 2); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case NM_EXTRV_W: + check_dsp(ctx); + gen_load_gpr(tcg_ctx, v1_t, rs); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); + gen_helper_extr_w(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + } + break; + case NM_POOL32AXF_2_8_15: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPAX_W_PH: + case NM_DPAQ_SA_L_W: + case NM_DPSX_W_PH: + case NM_DPSQ_SA_L_W: + gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); + break; + case NM_MADDU: + check_dsp(ctx); + { + int acc = extract32(ctx->opcode, 14, 2); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case NM_MULTU: + check_dsp(ctx); + { + int acc = extract32(ctx->opcode, 14, 2); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, tcg_ctx->cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + break; + case NM_EXTRV_R_W: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); + gen_helper_extr_r_w(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32AXF_2_16_23: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPAU_H_QBL: + case NM_DPAQX_S_W_PH: + case NM_DPSU_H_QBL: + case NM_DPSQX_S_W_PH: + case NM_MULSA_W_PH: + gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); + break; + case NM_EXTPV: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); + gen_helper_extp(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_MSUB: + check_dsp(ctx); + { + int acc = extract32(ctx->opcode, 14, 2); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); + tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case NM_EXTRV_RS_W: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); + gen_helper_extr_rs_w(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + } + break; + case NM_POOL32AXF_2_24_31: + switch (extract32(ctx->opcode, 9, 3)) { + case NM_DPAU_H_QBR: + case NM_DPAQX_SA_W_PH: + case NM_DPSU_H_QBR: + case NM_DPSQX_SA_W_PH: + case NM_MULSAQ_S_W_PH: + gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); + break; + case NM_EXTPDPV: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); + gen_helper_extpdp(tcg_ctx, t0, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + case NM_MSUBU: + check_dsp(ctx); + { + int acc = extract32(ctx->opcode, 14, 2); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, tcg_ctx->cpu_LO[acc], tcg_ctx->cpu_HI[acc]); + tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); + tcg_temp_free_i64(tcg_ctx, t3); + gen_move_low32(tcg_ctx, tcg_ctx->cpu_LO[acc], t2); + gen_move_high32(tcg_ctx, tcg_ctx->cpu_HI[acc], t2); + tcg_temp_free_i64(tcg_ctx, t2); + } + break; + case NM_EXTRV_S_H: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 3); + gen_helper_extr_s_h(tcg_ctx, t0, t0, v0_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, ret); + break; + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + + tcg_temp_free(tcg_ctx, v0_t); + tcg_temp_free(tcg_ctx, v1_t); +} + +static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, + int rt, int rs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ret = rt; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv v0_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, v0_t, rs); + + switch (opc) { + case NM_ABSQ_S_QB: + check_dsp_r2(ctx); + gen_helper_absq_s_qb(tcg_ctx, v0_t, v0_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_ABSQ_S_PH: + check_dsp(ctx); + gen_helper_absq_s_ph(tcg_ctx, v0_t, v0_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_ABSQ_S_W: + check_dsp(ctx); + gen_helper_absq_s_w(tcg_ctx, v0_t, v0_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEQ_W_PHL: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, v0_t, v0_t, 0xFFFF0000); + tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEQ_W_PHR: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, v0_t, v0_t, 0x0000FFFF); + tcg_gen_shli_tl(tcg_ctx, v0_t, v0_t, 16); + tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEQU_PH_QBL: + check_dsp(ctx); + gen_helper_precequ_ph_qbl(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEQU_PH_QBR: + check_dsp(ctx); + gen_helper_precequ_ph_qbr(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEQU_PH_QBLA: + check_dsp(ctx); + gen_helper_precequ_ph_qbla(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEQU_PH_QBRA: + check_dsp(ctx); + gen_helper_precequ_ph_qbra(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEU_PH_QBL: + check_dsp(ctx); + gen_helper_preceu_ph_qbl(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEU_PH_QBR: + check_dsp(ctx); + gen_helper_preceu_ph_qbr(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEU_PH_QBLA: + check_dsp(ctx); + gen_helper_preceu_ph_qbla(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_PRECEU_PH_QBRA: + check_dsp(ctx); + gen_helper_preceu_ph_qbra(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_REPLV_PH: + check_dsp(ctx); + tcg_gen_ext16u_tl(tcg_ctx, v0_t, v0_t); + tcg_gen_shli_tl(tcg_ctx, t0, v0_t, 16); + tcg_gen_or_tl(tcg_ctx, v0_t, v0_t, t0); + tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_REPLV_QB: + check_dsp(ctx); + tcg_gen_ext8u_tl(tcg_ctx, v0_t, v0_t); + tcg_gen_shli_tl(tcg_ctx, t0, v0_t, 8); + tcg_gen_or_tl(tcg_ctx, v0_t, v0_t, t0); + tcg_gen_shli_tl(tcg_ctx, t0, v0_t, 16); + tcg_gen_or_tl(tcg_ctx, v0_t, v0_t, t0); + tcg_gen_ext32s_tl(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_BITREV: + check_dsp(ctx); + gen_helper_bitrev(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_INSV: + check_dsp(ctx); + { + TCGv tv0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, tv0, rt); + gen_helper_insv(tcg_ctx, v0_t, tcg_ctx->cpu_env, v0_t, tv0); + gen_store_gpr(tcg_ctx, v0_t, ret); + tcg_temp_free(tcg_ctx, tv0); + } + break; + case NM_RADDU_W_QB: + check_dsp(ctx); + gen_helper_raddu_w_qb(tcg_ctx, v0_t, v0_t); + gen_store_gpr(tcg_ctx, v0_t, ret); + break; + case NM_BITSWAP: + gen_bitswap(ctx, OPC_BITSWAP, ret, rs); + break; + case NM_CLO: + check_nms(ctx); + gen_cl(ctx, OPC_CLO, ret, rs); + break; + case NM_CLZ: + check_nms(ctx); + gen_cl(ctx, OPC_CLZ, ret, rs); + break; + case NM_WSBH: + gen_bshfl(ctx, OPC_WSBH, ret, rs); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free(tcg_ctx, v0_t); + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, + int rt, int rs, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv rs_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, rs_t, rs); + + switch (opc) { + case NM_SHRA_R_QB: + check_dsp_r2(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 2); + switch (extract32(ctx->opcode, 12, 1)) { + case 0: + /* NM_SHRA_QB */ + gen_helper_shra_qb(tcg_ctx, t0, t0, rs_t); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 1: + /* NM_SHRA_R_QB */ + gen_helper_shra_r_qb(tcg_ctx, t0, t0, rs_t); + gen_store_gpr(tcg_ctx, t0, rt); + break; + } + break; + case NM_SHRL_PH: + check_dsp_r2(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 1); + gen_helper_shrl_ph(tcg_ctx, t0, t0, rs_t); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case NM_REPL_QB: + check_dsp(ctx); + { + int16_t imm; + target_long result; + imm = extract32(ctx->opcode, 13, 8); + result = (uint32_t)imm << 24 | + (uint32_t)imm << 16 | + (uint32_t)imm << 8 | + (uint32_t)imm; + result = (int32_t)result; + tcg_gen_movi_tl(tcg_ctx, t0, result); + gen_store_gpr(tcg_ctx, t0, rt); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, rs_t); +} + + +static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rt = extract32(ctx->opcode, 21, 5); + int rs = extract32(ctx->opcode, 16, 5); + int rd = extract32(ctx->opcode, 11, 5); + + switch (extract32(ctx->opcode, 6, 3)) { + case NM_POOL32AXF_1: + { + int32_t op1 = extract32(ctx->opcode, 9, 3); + gen_pool32axf_1_nanomips_insn(ctx, op1, rt, rs, rd); + } + break; + case NM_POOL32AXF_2: + { + int32_t op1 = extract32(ctx->opcode, 12, 2); + gen_pool32axf_2_nanomips_insn(ctx, op1, rt, rs, rd); + } + break; + case NM_POOL32AXF_4: + { + int32_t op1 = extract32(ctx->opcode, 9, 7); + gen_pool32axf_4_nanomips_insn(ctx, op1, rt, rs); + } + break; + case NM_POOL32AXF_5: + switch (extract32(ctx->opcode, 9, 7)) { + case NM_TLBP: + gen_cp0(env, ctx, OPC_TLBP, 0, 0); + break; + case NM_TLBR: + gen_cp0(env, ctx, OPC_TLBR, 0, 0); + break; + case NM_TLBWI: + gen_cp0(env, ctx, OPC_TLBWI, 0, 0); + break; + case NM_TLBWR: + gen_cp0(env, ctx, OPC_TLBWR, 0, 0); + break; + case NM_TLBINV: + gen_cp0(env, ctx, OPC_TLBINV, 0, 0); + break; + case NM_TLBINVF: + gen_cp0(env, ctx, OPC_TLBINVF, 0, 0); + break; + case NM_DI: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + save_cpu_state(ctx, 1); + gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + tcg_temp_free(tcg_ctx, t0); + } + break; + case NM_EI: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + save_cpu_state(ctx, 1); + gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + /* Stop translation as we may have switched the execution mode */ + ctx->base.is_jmp = DISAS_STOP; + tcg_temp_free(tcg_ctx, t0); + } + break; + case NM_RDPGPR: + gen_load_srsgpr(tcg_ctx, rs, rt); + break; + case NM_WRPGPR: + gen_store_srsgpr(tcg_ctx, rs, rt); + break; + case NM_WAIT: + gen_cp0(env, ctx, OPC_WAIT, 0, 0); + break; + case NM_DERET: + gen_cp0(env, ctx, OPC_DERET, 0, 0); + break; + case NM_ERETX: + gen_cp0(env, ctx, OPC_ERET, 0, 0); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32AXF_7: + { + int32_t op1 = extract32(ctx->opcode, 9, 3); + gen_pool32axf_7_nanomips_insn(ctx, op1, rt, rs, rd); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* Immediate Value Compact Branches */ +static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, + int rt, int32_t imm, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGCond cond; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + tcg_gen_movi_tl(tcg_ctx, t1, imm); + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + + /* Load needed operands and calculate btarget */ + switch (opc) { + case NM_BEQIC: + if (rt == 0 && imm == 0) { + /* Unconditional branch */ + } else if (rt == 0 && imm != 0) { + /* Treat as NOP */ + goto out; + } else { + bcond_compute = 1; + cond = TCG_COND_EQ; + } + break; + case NM_BBEQZC: + case NM_BBNEZC: + check_nms(ctx); + if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) { + generate_exception_end(ctx, EXCP_RI); + goto out; + } else if (rt == 0 && opc == NM_BBEQZC) { + /* Unconditional branch */ + } else if (rt == 0 && opc == NM_BBNEZC) { + /* Treat as NOP */ + goto out; + } else { + tcg_gen_shri_tl(tcg_ctx, t0, t0, imm); + tcg_gen_andi_tl(tcg_ctx, t0, t0, 1); + tcg_gen_movi_tl(tcg_ctx, t1, 0); + bcond_compute = 1; + if (opc == NM_BBEQZC) { + cond = TCG_COND_EQ; + } else { + cond = TCG_COND_NE; + } + } + break; + case NM_BNEIC: + if (rt == 0 && imm == 0) { + /* Treat as NOP */ + goto out; + } else if (rt == 0 && imm != 0) { + /* Unconditional branch */ + } else { + bcond_compute = 1; + cond = TCG_COND_NE; + } + break; + case NM_BGEIC: + if (rt == 0 && imm == 0) { + /* Unconditional branch */ + } else { + bcond_compute = 1; + cond = TCG_COND_GE; + } + break; + case NM_BLTIC: + bcond_compute = 1; + cond = TCG_COND_LT; + break; + case NM_BGEIUC: + if (rt == 0 && imm == 0) { + /* Unconditional branch */ + } else { + bcond_compute = 1; + cond = TCG_COND_GEU; + } + break; + case NM_BLTIUC: + bcond_compute = 1; + cond = TCG_COND_LTU; + break; + default: + MIPS_INVAL("Immediate Value Compact branch"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + /* branch completion */ + clear_branch_hflags(ctx); + ctx->base.is_jmp = DISAS_NORETURN; + + if (bcond_compute == 0) { + /* Uncoditional compact branch */ + gen_goto_tb(ctx, 0, ctx->btarget); + } else { + /* Conditional compact branch */ + TCGLabel *fs = gen_new_label(tcg_ctx); + + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(cond), t0, t1, fs); + + gen_goto_tb(ctx, 1, ctx->btarget); + gen_set_label(tcg_ctx, fs); + + gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); + } + +out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */ +static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, + int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + /* load rs */ + gen_load_gpr(tcg_ctx, t0, rs); + + /* link */ + if (rt != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], ctx->base.pc_next + 4); + } + + /* calculate btarget */ + tcg_gen_shli_tl(tcg_ctx, t0, t0, 1); + tcg_gen_movi_tl(tcg_ctx, t1, ctx->base.pc_next + 4); + gen_op_addr_add(ctx, tcg_ctx->btarget, t1, t0); + + /* branch completion */ + clear_branch_hflags(ctx); + ctx->base.is_jmp = DISAS_NORETURN; + + /* unconditional branch to register */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->btarget); + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* nanoMIPS Branches */ +static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, + int rs, int rt, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + /* Load needed operands and calculate btarget */ + switch (opc) { + /* compact branch */ + case OPC_BGEC: + case OPC_BLTC: + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + break; + case OPC_BGEUC: + case OPC_BLTUC: + if (rs == 0 || rs == rt) { + /* OPC_BLEZALC, OPC_BGEZALC */ + /* OPC_BGTZALC, OPC_BLTZALC */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[31], ctx->base.pc_next + 4); + } + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + break; + case OPC_BC: + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + break; + case OPC_BEQZC: + if (rs != 0) { + /* OPC_BEQZC, OPC_BNEZC */ + gen_load_gpr(tcg_ctx, t0, rs); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + } else { + /* OPC_JIC, OPC_JIALC */ + TCGv tbase = tcg_temp_new(tcg_ctx); + TCGv toffset = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, tbase, rt); + tcg_gen_movi_tl(tcg_ctx, toffset, offset); + gen_op_addr_add(ctx, tcg_ctx->btarget, tbase, toffset); + tcg_temp_free(tcg_ctx, tbase); + tcg_temp_free(tcg_ctx, toffset); + } + break; + default: + MIPS_INVAL("Compact branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + if (bcond_compute == 0) { + /* Uncoditional compact branch */ + switch (opc) { + case OPC_BC: + gen_goto_tb(ctx, 0, ctx->btarget); + break; + default: + MIPS_INVAL("Compact branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + } else { + /* Conditional compact branch */ + TCGLabel *fs = gen_new_label(tcg_ctx); + + switch (opc) { + case OPC_BGEUC: + if (rs == 0 && rt != 0) { + /* OPC_BLEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BGEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); + } else { + /* OPC_BGEUC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GEU), t0, t1, fs); + } + break; + case OPC_BLTUC: + if (rs == 0 && rt != 0) { + /* OPC_BGTZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BLTZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); + } else { + /* OPC_BLTUC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LTU), t0, t1, fs); + } + break; + case OPC_BGEC: + if (rs == 0 && rt != 0) { + /* OPC_BLEZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BGEZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); + } else { + /* OPC_BGEC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t0, t1, fs); + } + break; + case OPC_BLTC: + if (rs == 0 && rt != 0) { + /* OPC_BGTZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BLTZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); + } else { + /* OPC_BLTC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t0, t1, fs); + } + break; + case OPC_BEQZC: + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, 0, fs); + break; + default: + MIPS_INVAL("Compact conditional branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + /* branch completion */ + clear_branch_hflags(ctx); + ctx->base.is_jmp = DISAS_NORETURN; + + /* Generating branch here as compact branches don't have delay slot */ + gen_goto_tb(ctx, 1, ctx->btarget); + gen_set_label(tcg_ctx, fs); + + gen_goto_tb(ctx, 0, ctx->base.pc_next + 4); + } + +out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + + +/* nanoMIPS CP1 Branches */ +static void gen_compute_branch_cp1_nm(DisasContext *ctx, uint32_t op, + int32_t ft, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong btarget; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, t0, ft); + tcg_gen_andi_i64(tcg_ctx, t0, t0, 1); + + btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); + + switch (op) { + case NM_BC1EQZC: + tcg_gen_xori_i64(tcg_ctx, t0, t0, 1); + ctx->hflags |= MIPS_HFLAG_BC; + break; + case NM_BC1NEZC: + /* t0 already set */ + ctx->hflags |= MIPS_HFLAG_BC; + break; + default: + MIPS_INVAL("cp1 cond branch"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + tcg_gen_trunc_i64_tl(tcg_ctx, tcg_ctx->bcond, t0); + + ctx->btarget = btarget; + +out: + tcg_temp_free_i64(tcg_ctx, t0); +} + + +static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + + if ((extract32(ctx->opcode, 6, 1)) == 1) { + /* PP.LSXS instructions require shifting */ + switch (extract32(ctx->opcode, 7, 4)) { + case NM_SHXS: + check_nms(ctx); + /* fall through */ + case NM_LHXS: + case NM_LHUXS: + tcg_gen_shli_tl(tcg_ctx, t0, t0, 1); + break; + case NM_SWXS: + check_nms(ctx); + /* fall through */ + case NM_LWXS: + case NM_LWC1XS: + case NM_SWC1XS: + tcg_gen_shli_tl(tcg_ctx, t0, t0, 2); + break; + case NM_LDC1XS: + case NM_SDC1XS: + tcg_gen_shli_tl(tcg_ctx, t0, t0, 3); + break; + } + } + gen_op_addr_add(ctx, t0, t0, t1); + + switch (extract32(ctx->opcode, 7, 4)) { + case NM_LBX: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, + MO_SB); + gen_store_gpr(tcg_ctx, t0, rd); + break; + case NM_LHX: + /*case NM_LHXS:*/ + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, + MO_TESW); + gen_store_gpr(tcg_ctx, t0, rd); + break; + case NM_LWX: + /*case NM_LWXS:*/ + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, + MO_TESL); + gen_store_gpr(tcg_ctx, t0, rd); + break; + case NM_LBUX: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, + MO_UB); + gen_store_gpr(tcg_ctx, t0, rd); + break; + case NM_LHUX: + /*case NM_LHUXS:*/ + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, + MO_TEUW); + gen_store_gpr(tcg_ctx, t0, rd); + break; + case NM_SBX: + check_nms(ctx); + gen_load_gpr(tcg_ctx, t1, rd); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, + MO_8); + break; + case NM_SHX: + /*case NM_SHXS:*/ + check_nms(ctx); + gen_load_gpr(tcg_ctx, t1, rd); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, + MO_TEUW); + break; + case NM_SWX: + /*case NM_SWXS:*/ + check_nms(ctx); + gen_load_gpr(tcg_ctx, t1, rd); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, + MO_TEUL); + break; + case NM_LWC1X: + /*case NM_LWC1XS:*/ + case NM_LDC1X: + /*case NM_LDC1XS:*/ + case NM_SWC1X: + /*case NM_SWC1XS:*/ + case NM_SDC1X: + /*case NM_SDC1XS:*/ + if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + switch (extract32(ctx->opcode, 7, 4)) { + case NM_LWC1X: + /*case NM_LWC1XS:*/ + gen_flt_ldst(ctx, OPC_LWC1, rd, t0); + break; + case NM_LDC1X: + /*case NM_LDC1XS:*/ + gen_flt_ldst(ctx, OPC_LDC1, rd, t0); + break; + case NM_SWC1X: + /*case NM_SWC1XS:*/ + gen_flt_ldst(ctx, OPC_SWC1, rd, t0); + break; + case NM_SDC1X: + /*case NM_SDC1XS:*/ + gen_flt_ldst(ctx, OPC_SDC1, rd, t0); + break; + } + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_pool32f_nanomips_insn(DisasContext *ctx) +{ + int rt, rs, rd; + + rt = extract32(ctx->opcode, 21, 5); + rs = extract32(ctx->opcode, 16, 5); + rd = extract32(ctx->opcode, 11, 5); + + if (!(ctx->CP0_Config1 & (1 << CP0C1_FP))) { + generate_exception_end(ctx, EXCP_RI); + return; + } + check_cp1_enabled(ctx); + switch (extract32(ctx->opcode, 0, 3)) { + case NM_POOL32F_0: + switch (extract32(ctx->opcode, 3, 7)) { + case NM_RINT_S: + gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0); + break; + case NM_RINT_D: + gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0); + break; + case NM_CLASS_S: + gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0); + break; + case NM_CLASS_D: + gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0); + break; + case NM_ADD_S: + gen_farith(ctx, OPC_ADD_S, rt, rs, rd, 0); + break; + case NM_ADD_D: + gen_farith(ctx, OPC_ADD_D, rt, rs, rd, 0); + break; + case NM_SUB_S: + gen_farith(ctx, OPC_SUB_S, rt, rs, rd, 0); + break; + case NM_SUB_D: + gen_farith(ctx, OPC_SUB_D, rt, rs, rd, 0); + break; + case NM_MUL_S: + gen_farith(ctx, OPC_MUL_S, rt, rs, rd, 0); + break; + case NM_MUL_D: + gen_farith(ctx, OPC_MUL_D, rt, rs, rd, 0); + break; + case NM_DIV_S: + gen_farith(ctx, OPC_DIV_S, rt, rs, rd, 0); + break; + case NM_DIV_D: + gen_farith(ctx, OPC_DIV_D, rt, rs, rd, 0); + break; + case NM_SELEQZ_S: + gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs); + break; + case NM_SELEQZ_D: + gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs); + break; + case NM_SELNEZ_S: + gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs); + break; + case NM_SELNEZ_D: + gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs); + break; + case NM_SEL_S: + gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs); + break; + case NM_SEL_D: + gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs); + break; + case NM_MADDF_S: + gen_farith(ctx, OPC_MADDF_S, rt, rs, rd, 0); + break; + case NM_MADDF_D: + gen_farith(ctx, OPC_MADDF_D, rt, rs, rd, 0); + break; + case NM_MSUBF_S: + gen_farith(ctx, OPC_MSUBF_S, rt, rs, rd, 0); + break; + case NM_MSUBF_D: + gen_farith(ctx, OPC_MSUBF_D, rt, rs, rd, 0); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32F_3: + switch (extract32(ctx->opcode, 3, 3)) { + case NM_MIN_FMT: + switch (extract32(ctx->opcode, 9, 1)) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0); + break; + } + break; + case NM_MAX_FMT: + switch (extract32(ctx->opcode, 9, 1)) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0); + break; + } + break; + case NM_MINA_FMT: + switch (extract32(ctx->opcode, 9, 1)) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0); + break; + } + break; + case NM_MAXA_FMT: + switch (extract32(ctx->opcode, 9, 1)) { + case FMT_SDPS_S: + gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0); + break; + case FMT_SDPS_D: + gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0); + break; + } + break; + case NM_POOL32FXF: + switch (extract32(ctx->opcode, 6, 8)) { + case NM_CFC1: + gen_cp1(ctx, OPC_CFC1, rt, rs); + break; + case NM_CTC1: + gen_cp1(ctx, OPC_CTC1, rt, rs); + break; + case NM_MFC1: + gen_cp1(ctx, OPC_MFC1, rt, rs); + break; + case NM_MTC1: + gen_cp1(ctx, OPC_MTC1, rt, rs); + break; + case NM_MFHC1: + gen_cp1(ctx, OPC_MFHC1, rt, rs); + break; + case NM_MTHC1: + gen_cp1(ctx, OPC_MTHC1, rt, rs); + break; + case NM_CVT_S_PL: + gen_farith(ctx, OPC_CVT_S_PL, -1, rs, rt, 0); + break; + case NM_CVT_S_PU: + gen_farith(ctx, OPC_CVT_S_PU, -1, rs, rt, 0); + break; + default: + switch (extract32(ctx->opcode, 6, 9)) { + case NM_CVT_L_S: + gen_farith(ctx, OPC_CVT_L_S, -1, rs, rt, 0); + break; + case NM_CVT_L_D: + gen_farith(ctx, OPC_CVT_L_D, -1, rs, rt, 0); + break; + case NM_CVT_W_S: + gen_farith(ctx, OPC_CVT_W_S, -1, rs, rt, 0); + break; + case NM_CVT_W_D: + gen_farith(ctx, OPC_CVT_W_D, -1, rs, rt, 0); + break; + case NM_RSQRT_S: + gen_farith(ctx, OPC_RSQRT_S, -1, rs, rt, 0); + break; + case NM_RSQRT_D: + gen_farith(ctx, OPC_RSQRT_D, -1, rs, rt, 0); + break; + case NM_SQRT_S: + gen_farith(ctx, OPC_SQRT_S, -1, rs, rt, 0); + break; + case NM_SQRT_D: + gen_farith(ctx, OPC_SQRT_D, -1, rs, rt, 0); + break; + case NM_RECIP_S: + gen_farith(ctx, OPC_RECIP_S, -1, rs, rt, 0); + break; + case NM_RECIP_D: + gen_farith(ctx, OPC_RECIP_D, -1, rs, rt, 0); + break; + case NM_FLOOR_L_S: + gen_farith(ctx, OPC_FLOOR_L_S, -1, rs, rt, 0); + break; + case NM_FLOOR_L_D: + gen_farith(ctx, OPC_FLOOR_L_D, -1, rs, rt, 0); + break; + case NM_FLOOR_W_S: + gen_farith(ctx, OPC_FLOOR_W_S, -1, rs, rt, 0); + break; + case NM_FLOOR_W_D: + gen_farith(ctx, OPC_FLOOR_W_D, -1, rs, rt, 0); + break; + case NM_CEIL_L_S: + gen_farith(ctx, OPC_CEIL_L_S, -1, rs, rt, 0); + break; + case NM_CEIL_L_D: + gen_farith(ctx, OPC_CEIL_L_D, -1, rs, rt, 0); + break; + case NM_CEIL_W_S: + gen_farith(ctx, OPC_CEIL_W_S, -1, rs, rt, 0); + break; + case NM_CEIL_W_D: + gen_farith(ctx, OPC_CEIL_W_D, -1, rs, rt, 0); + break; + case NM_TRUNC_L_S: + gen_farith(ctx, OPC_TRUNC_L_S, -1, rs, rt, 0); + break; + case NM_TRUNC_L_D: + gen_farith(ctx, OPC_TRUNC_L_D, -1, rs, rt, 0); + break; + case NM_TRUNC_W_S: + gen_farith(ctx, OPC_TRUNC_W_S, -1, rs, rt, 0); + break; + case NM_TRUNC_W_D: + gen_farith(ctx, OPC_TRUNC_W_D, -1, rs, rt, 0); + break; + case NM_ROUND_L_S: + gen_farith(ctx, OPC_ROUND_L_S, -1, rs, rt, 0); + break; + case NM_ROUND_L_D: + gen_farith(ctx, OPC_ROUND_L_D, -1, rs, rt, 0); + break; + case NM_ROUND_W_S: + gen_farith(ctx, OPC_ROUND_W_S, -1, rs, rt, 0); + break; + case NM_ROUND_W_D: + gen_farith(ctx, OPC_ROUND_W_D, -1, rs, rt, 0); + break; + case NM_MOV_S: + gen_farith(ctx, OPC_MOV_S, -1, rs, rt, 0); + break; + case NM_MOV_D: + gen_farith(ctx, OPC_MOV_D, -1, rs, rt, 0); + break; + case NM_ABS_S: + gen_farith(ctx, OPC_ABS_S, -1, rs, rt, 0); + break; + case NM_ABS_D: + gen_farith(ctx, OPC_ABS_D, -1, rs, rt, 0); + break; + case NM_NEG_S: + gen_farith(ctx, OPC_NEG_S, -1, rs, rt, 0); + break; + case NM_NEG_D: + gen_farith(ctx, OPC_NEG_D, -1, rs, rt, 0); + break; + case NM_CVT_D_S: + gen_farith(ctx, OPC_CVT_D_S, -1, rs, rt, 0); + break; + case NM_CVT_D_W: + gen_farith(ctx, OPC_CVT_D_W, -1, rs, rt, 0); + break; + case NM_CVT_D_L: + gen_farith(ctx, OPC_CVT_D_L, -1, rs, rt, 0); + break; + case NM_CVT_S_D: + gen_farith(ctx, OPC_CVT_S_D, -1, rs, rt, 0); + break; + case NM_CVT_S_W: + gen_farith(ctx, OPC_CVT_S_W, -1, rs, rt, 0); + break; + case NM_CVT_S_L: + gen_farith(ctx, OPC_CVT_S_L, -1, rs, rt, 0); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + } + break; + } + break; + case NM_POOL32F_5: + switch (extract32(ctx->opcode, 3, 3)) { + case NM_CMP_CONDN_S: + gen_r6_cmp_s(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd); + break; + case NM_CMP_CONDN_D: + gen_r6_cmp_d(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ret = rd; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv v1_t = tcg_temp_new(tcg_ctx); + TCGv v2_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, v1_t, rs); + gen_load_gpr(tcg_ctx, v2_t, rt); + + switch (opc) { + case NM_CMP_EQ_PH: + check_dsp(ctx); + gen_helper_cmp_eq_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case NM_CMP_LT_PH: + check_dsp(ctx); + gen_helper_cmp_lt_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case NM_CMP_LE_PH: + check_dsp(ctx); + gen_helper_cmp_le_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case NM_CMPU_EQ_QB: + check_dsp(ctx); + gen_helper_cmpu_eq_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case NM_CMPU_LT_QB: + check_dsp(ctx); + gen_helper_cmpu_lt_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case NM_CMPU_LE_QB: + check_dsp(ctx); + gen_helper_cmpu_le_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case NM_CMPGU_EQ_QB: + check_dsp(ctx); + gen_helper_cmpgu_eq_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_CMPGU_LT_QB: + check_dsp(ctx); + gen_helper_cmpgu_lt_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_CMPGU_LE_QB: + check_dsp(ctx); + gen_helper_cmpgu_le_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_CMPGDU_EQ_QB: + check_dsp_r2(ctx); + gen_helper_cmpgu_eq_qb(tcg_ctx, v1_t, v1_t, v2_t); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, v1_t, 24, 4); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_CMPGDU_LT_QB: + check_dsp_r2(ctx); + gen_helper_cmpgu_lt_qb(tcg_ctx, v1_t, v1_t, v2_t); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, v1_t, 24, 4); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_CMPGDU_LE_QB: + check_dsp_r2(ctx); + gen_helper_cmpgu_le_qb(tcg_ctx, v1_t, v1_t, v2_t); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, v1_t, 24, 4); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_PACKRL_PH: + check_dsp(ctx); + gen_helper_packrl_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_PICK_QB: + check_dsp(ctx); + gen_helper_pick_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_PICK_PH: + check_dsp(ctx); + gen_helper_pick_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_ADDQ_S_W: + check_dsp(ctx); + gen_helper_addq_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SUBQ_S_W: + check_dsp(ctx); + gen_helper_subq_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_ADDSC: + check_dsp(ctx); + gen_helper_addsc(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_ADDWC: + check_dsp(ctx); + gen_helper_addwc(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_ADDQ_S_PH: + check_dsp(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* ADDQ_PH */ + gen_helper_addq_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* ADDQ_S_PH */ + gen_helper_addq_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_ADDQH_R_PH: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* ADDQH_PH */ + gen_helper_addqh_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* ADDQH_R_PH */ + gen_helper_addqh_r_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_ADDQH_R_W: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* ADDQH_W */ + gen_helper_addqh_w(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* ADDQH_R_W */ + gen_helper_addqh_r_w(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_ADDU_S_QB: + check_dsp(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* ADDU_QB */ + gen_helper_addu_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* ADDU_S_QB */ + gen_helper_addu_s_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_ADDU_S_PH: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* ADDU_PH */ + gen_helper_addu_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* ADDU_S_PH */ + gen_helper_addu_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_ADDUH_R_QB: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* ADDUH_QB */ + gen_helper_adduh_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* ADDUH_R_QB */ + gen_helper_adduh_r_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SHRAV_R_PH: + check_dsp(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SHRAV_PH */ + gen_helper_shra_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SHRAV_R_PH */ + gen_helper_shra_r_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SHRAV_R_QB: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SHRAV_QB */ + gen_helper_shra_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SHRAV_R_QB */ + gen_helper_shra_r_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SUBQ_S_PH: + check_dsp(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SUBQ_PH */ + gen_helper_subq_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SUBQ_S_PH */ + gen_helper_subq_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SUBQH_R_PH: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SUBQH_PH */ + gen_helper_subqh_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SUBQH_R_PH */ + gen_helper_subqh_r_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SUBQH_R_W: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SUBQH_W */ + gen_helper_subqh_w(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SUBQH_R_W */ + gen_helper_subqh_r_w(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SUBU_S_QB: + check_dsp(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SUBU_QB */ + gen_helper_subu_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SUBU_S_QB */ + gen_helper_subu_s_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SUBU_S_PH: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SUBU_PH */ + gen_helper_subu_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SUBU_S_PH */ + gen_helper_subu_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SUBUH_R_QB: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SUBUH_QB */ + gen_helper_subuh_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SUBUH_R_QB */ + gen_helper_subuh_r_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_SHLLV_S_PH: + check_dsp(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SHLLV_PH */ + gen_helper_shll_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* SHLLV_S_PH */ + gen_helper_shll_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_PRECR_SRA_R_PH_W: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* PRECR_SRA_PH_W */ + { + TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, rd); + gen_helper_precr_sra_ph_w(tcg_ctx, v1_t, sa_t, v1_t, + tcg_ctx->cpu_gpr[rt]); + gen_store_gpr(tcg_ctx, v1_t, rt); + tcg_temp_free_i32(tcg_ctx, sa_t); + } + break; + case 1: + /* PRECR_SRA_R_PH_W */ + { + TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, rd); + gen_helper_precr_sra_r_ph_w(tcg_ctx, v1_t, sa_t, v1_t, + tcg_ctx->cpu_gpr[rt]); + gen_store_gpr(tcg_ctx, v1_t, rt); + tcg_temp_free_i32(tcg_ctx, sa_t); + } + break; + } + break; + case NM_MULEU_S_PH_QBL: + check_dsp(ctx); + gen_helper_muleu_s_ph_qbl(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_MULEU_S_PH_QBR: + check_dsp(ctx); + gen_helper_muleu_s_ph_qbr(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_MULQ_RS_PH: + check_dsp(ctx); + gen_helper_mulq_rs_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_MULQ_S_PH: + check_dsp_r2(ctx); + gen_helper_mulq_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_MULQ_RS_W: + check_dsp_r2(ctx); + gen_helper_mulq_rs_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_MULQ_S_W: + check_dsp_r2(ctx); + gen_helper_mulq_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_APPEND: + check_dsp_r2(ctx); + gen_load_gpr(tcg_ctx, t0, rs); + if (rd != 0) { + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, tcg_ctx->cpu_gpr[rt], rd, 32 - rd); + } + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + break; + case NM_MODSUB: + check_dsp(ctx); + gen_helper_modsub(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SHRAV_R_W: + check_dsp(ctx); + gen_helper_shra_r_w(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SHRLV_PH: + check_dsp_r2(ctx); + gen_helper_shrl_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SHRLV_QB: + check_dsp(ctx); + gen_helper_shrl_qb(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SHLLV_QB: + check_dsp(ctx); + gen_helper_shll_qb(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SHLLV_S_W: + check_dsp(ctx); + gen_helper_shll_s_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SHILO: + check_dsp(ctx); + { + TCGv tv0 = tcg_temp_new(tcg_ctx); + TCGv tv1 = tcg_temp_new(tcg_ctx); + int16_t imm = extract32(ctx->opcode, 16, 7); + + tcg_gen_movi_tl(tcg_ctx, tv0, rd >> 3); + tcg_gen_movi_tl(tcg_ctx, tv1, imm); + gen_helper_shilo(tcg_ctx, tv0, tv1, tcg_ctx->cpu_env); + } + break; + case NM_MULEQ_S_W_PHL: + check_dsp(ctx); + gen_helper_muleq_s_w_phl(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_MULEQ_S_W_PHR: + check_dsp(ctx); + gen_helper_muleq_s_w_phr(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_MUL_S_PH: + check_dsp_r2(ctx); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* MUL_PH */ + gen_helper_mul_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case 1: + /* MUL_S_PH */ + gen_helper_mul_s_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + } + break; + case NM_PRECR_QB_PH: + check_dsp_r2(ctx); + gen_helper_precr_qb_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_PRECRQ_QB_PH: + check_dsp(ctx); + gen_helper_precrq_qb_ph(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_PRECRQ_PH_W: + check_dsp(ctx); + gen_helper_precrq_ph_w(tcg_ctx, v1_t, v1_t, v2_t); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_PRECRQ_RS_PH_W: + check_dsp(ctx); + gen_helper_precrq_rs_ph_w(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_PRECRQU_S_QB_PH: + check_dsp(ctx); + gen_helper_precrqu_s_qb_ph(tcg_ctx, v1_t, v1_t, v2_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, ret); + break; + case NM_SHRA_R_W: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd); + gen_helper_shra_r_w(tcg_ctx, v1_t, t0, v1_t); + gen_store_gpr(tcg_ctx, v1_t, rt); + break; + case NM_SHRA_R_PH: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 1); + switch (extract32(ctx->opcode, 10, 1)) { + case 0: + /* SHRA_PH */ + gen_helper_shra_ph(tcg_ctx, v1_t, t0, v1_t); + gen_store_gpr(tcg_ctx, v1_t, rt); + break; + case 1: + /* SHRA_R_PH */ + gen_helper_shra_r_ph(tcg_ctx, v1_t, t0, v1_t); + gen_store_gpr(tcg_ctx, v1_t, rt); + break; + } + break; + case NM_SHLL_S_PH: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd >> 1); + switch (extract32(ctx->opcode, 10, 2)) { + case 0: + /* SHLL_PH */ + gen_helper_shll_ph(tcg_ctx, v1_t, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, rt); + break; + case 2: + /* SHLL_S_PH */ + gen_helper_shll_s_ph(tcg_ctx, v1_t, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, rt); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_SHLL_S_W: + check_dsp(ctx); + tcg_gen_movi_tl(tcg_ctx, t0, rd); + gen_helper_shll_s_w(tcg_ctx, v1_t, t0, v1_t, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, v1_t, rt); + break; + case NM_REPL_PH: + check_dsp(ctx); + { + int16_t imm; + imm = sextract32(ctx->opcode, 11, 11); + imm = (int16_t)(imm << 6) >> 6; + if (rt != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], dup_const(MO_16, imm)); + } + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint16_t insn; + uint32_t op; + int rt, rs, rd; + int offset; + int imm; + + insn = cpu_lduw_code(env, ctx->base.pc_next + 2); + ctx->opcode = (ctx->opcode << 16) | insn; + + rt = extract32(ctx->opcode, 21, 5); + rs = extract32(ctx->opcode, 16, 5); + rd = extract32(ctx->opcode, 11, 5); + + op = extract32(ctx->opcode, 26, 6); + switch (op) { + case NM_P_ADDIU: + if (rt == 0) { + /* P.RI */ + switch (extract32(ctx->opcode, 19, 2)) { + case NM_SIGRIE: + default: + generate_exception_end(ctx, EXCP_RI); + break; + case NM_P_SYSCALL: + if ((extract32(ctx->opcode, 18, 1)) == NM_SYSCALL) { + generate_exception_end(ctx, EXCP_SYSCALL); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + case NM_BREAK: + generate_exception_end(ctx, EXCP_BREAK); + break; + case NM_SDBBP: + if (is_uhi(extract32(ctx->opcode, 0, 19))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + if (ctx->hflags & MIPS_HFLAG_SBRI) { + generate_exception_end(ctx, EXCP_RI); + } else { + generate_exception_end(ctx, EXCP_DBp); + } + } + break; + } + } else { + /* NM_ADDIU */ + imm = extract32(ctx->opcode, 0, 16); + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rs], imm); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], imm); + } + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + } + break; + case NM_ADDIUPC: + if (rt != 0) { + offset = sextract32(ctx->opcode, 0, 1) << 21 | + extract32(ctx->opcode, 1, 20) << 1; + target_long addr = addr_add(ctx, ctx->base.pc_next + 4, offset); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr); + } + break; + case NM_POOL32A: + switch (ctx->opcode & 0x07) { + case NM_POOL32A0: + gen_pool32a0_nanomips_insn(env, ctx); + break; + case NM_POOL32A5: + { + int32_t op1 = extract32(ctx->opcode, 3, 7); + gen_pool32a5_nanomips_insn(ctx, op1, rd, rs, rt); + } + break; + case NM_POOL32A7: + switch (extract32(ctx->opcode, 3, 3)) { + case NM_P_LSX: + gen_p_lsx(ctx, rd, rs, rt); + break; + case NM_LSA: + /* + * In nanoMIPS, the shift field directly encodes the shift + * amount, meaning that the supported shift values are in + * the range 0 to 3 (instead of 1 to 4 in MIPSR6). + */ + gen_lsa(ctx, OPC_LSA, rd, rs, rt, + extract32(ctx->opcode, 9, 2) - 1); + break; + case NM_EXTW: + gen_ext(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 6, 5)); + break; + case NM_POOL32AXF: + gen_pool32axf_nanomips_insn(env, ctx); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P_GP_W: + switch (ctx->opcode & 0x03) { + case NM_ADDIUGP_W: + if (rt != 0) { + offset = extract32(ctx->opcode, 0, 21); + gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[28], offset); + } + break; + case NM_LWGP: + gen_ld(ctx, OPC_LW, rt, 28, extract32(ctx->opcode, 2, 19) << 2); + break; + case NM_SWGP: + gen_st(ctx, OPC_SW, rt, 28, extract32(ctx->opcode, 2, 19) << 2); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P48I: + { + insn = cpu_lduw_code(env, ctx->base.pc_next + 4); + target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16; + switch (extract32(ctx->opcode, 16, 5)) { + case NM_LI48: + check_nms(ctx); + if (rt != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr_off); + } + break; + case NM_ADDIU48: + check_nms(ctx); + if (rt != 0) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], addr_off); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + } + break; + case NM_ADDIUGP48: + check_nms(ctx); + if (rt != 0) { + gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[28], addr_off); + } + break; + case NM_ADDIUPC48: + check_nms(ctx); + if (rt != 0) { + target_long addr = addr_add(ctx, ctx->base.pc_next + 6, + addr_off); + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr); + } + break; + case NM_LWPC48: + check_nms(ctx); + if (rt != 0) { + TCGv t0; + t0 = tcg_temp_new(tcg_ctx); + + target_long addr = addr_add(ctx, ctx->base.pc_next + 6, + addr_off); + + tcg_gen_movi_tl(tcg_ctx, t0, addr); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL); + tcg_temp_free(tcg_ctx, t0); + } + break; + case NM_SWPC48: + check_nms(ctx); + { + TCGv t0, t1; + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + target_long addr = addr_add(ctx, ctx->base.pc_next + 6, + addr_off); + + tcg_gen_movi_tl(tcg_ctx, t0, addr); + gen_load_gpr(tcg_ctx, t1, rt); + + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUL); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + return 6; + } + case NM_P_U12: + switch (extract32(ctx->opcode, 12, 4)) { + case NM_ORI: + gen_logic_imm(ctx, OPC_ORI, rt, rs, extract32(ctx->opcode, 0, 12)); + break; + case NM_XORI: + gen_logic_imm(ctx, OPC_XORI, rt, rs, extract32(ctx->opcode, 0, 12)); + break; + case NM_ANDI: + gen_logic_imm(ctx, OPC_ANDI, rt, rs, extract32(ctx->opcode, 0, 12)); + break; + case NM_P_SR: + switch (extract32(ctx->opcode, 20, 1)) { + case NM_PP_SR: + switch (ctx->opcode & 3) { + case NM_SAVE: + gen_save(ctx, rt, extract32(ctx->opcode, 16, 4), + extract32(ctx->opcode, 2, 1), + extract32(ctx->opcode, 3, 9) << 3); + break; + case NM_RESTORE: + case NM_RESTORE_JRC: + gen_restore(ctx, rt, extract32(ctx->opcode, 16, 4), + extract32(ctx->opcode, 2, 1), + extract32(ctx->opcode, 3, 9) << 3); + if ((ctx->opcode & 3) == NM_RESTORE_JRC) { + gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P_SR_F: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_SLTI: + gen_slt_imm(ctx, OPC_SLTI, rt, rs, extract32(ctx->opcode, 0, 12)); + break; + case NM_SLTIU: + gen_slt_imm(ctx, OPC_SLTIU, rt, rs, extract32(ctx->opcode, 0, 12)); + break; + case NM_SEQI: + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + imm = extract32(ctx->opcode, 0, 12); + gen_load_gpr(tcg_ctx, t0, rs); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t0, t0, imm); + gen_store_gpr(tcg_ctx, t0, rt); + + tcg_temp_free(tcg_ctx, t0); + } + break; + case NM_ADDIUNEG: + imm = (int16_t) extract32(ctx->opcode, 0, 12); + gen_arith_imm(ctx, OPC_ADDIU, rt, rs, -imm); + break; + case NM_P_SHIFT: + { + int shift = extract32(ctx->opcode, 0, 5); + switch (extract32(ctx->opcode, 5, 4)) { + case NM_P_SLL: + if (rt == 0 && shift == 0) { + /* NOP */ + } else if (rt == 0 && shift == 3) { + /* EHB - treat as NOP */ + } else if (rt == 0 && shift == 5) { + /* PAUSE - treat as NOP */ + } else if (rt == 0 && shift == 6) { + /* SYNC */ + gen_sync(tcg_ctx, extract32(ctx->opcode, 16, 5)); + } else { + /* SLL */ + gen_shift_imm(ctx, OPC_SLL, rt, rs, + extract32(ctx->opcode, 0, 5)); + } + break; + case NM_SRL: + gen_shift_imm(ctx, OPC_SRL, rt, rs, + extract32(ctx->opcode, 0, 5)); + break; + case NM_SRA: + gen_shift_imm(ctx, OPC_SRA, rt, rs, + extract32(ctx->opcode, 0, 5)); + break; + case NM_ROTR: + gen_shift_imm(ctx, OPC_ROTR, rt, rs, + extract32(ctx->opcode, 0, 5)); + break; + } + } + break; + case NM_P_ROTX: + check_nms(ctx); + if (rt != 0) { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 shift = tcg_const_i32(tcg_ctx, extract32(ctx->opcode, 0, 5)); + TCGv_i32 shiftx = tcg_const_i32(tcg_ctx, extract32(ctx->opcode, 7, 4) + << 1); + TCGv_i32 stripe = tcg_const_i32(tcg_ctx, extract32(ctx->opcode, 6, 1)); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_helper_rotx(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, shift, shiftx, stripe); + tcg_temp_free(tcg_ctx, t0); + + tcg_temp_free_i32(tcg_ctx, shift); + tcg_temp_free_i32(tcg_ctx, shiftx); + tcg_temp_free_i32(tcg_ctx, stripe); + } + break; + case NM_P_INS: + switch (((ctx->opcode >> 10) & 2) | + (extract32(ctx->opcode, 5, 1))) { + case NM_INS: + check_nms(ctx); + gen_bitops(ctx, OPC_INS, rt, rs, extract32(ctx->opcode, 0, 5), + extract32(ctx->opcode, 6, 5)); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P_EXT: + switch (((ctx->opcode >> 10) & 2) | + (extract32(ctx->opcode, 5, 1))) { + case NM_EXT: + check_nms(ctx); + gen_bitops(ctx, OPC_EXT, rt, rs, extract32(ctx->opcode, 0, 5), + extract32(ctx->opcode, 6, 5)); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_POOL32F: + gen_pool32f_nanomips_insn(ctx); + break; + case NM_POOL32S: + break; + case NM_P_LUI: + switch (extract32(ctx->opcode, 1, 1)) { + case NM_LUI: + if (rt != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], + sextract32(ctx->opcode, 0, 1) << 31 | + extract32(ctx->opcode, 2, 10) << 21 | + extract32(ctx->opcode, 12, 9) << 12); + } + break; + case NM_ALUIPC: + if (rt != 0) { + offset = sextract32(ctx->opcode, 0, 1) << 31 | + extract32(ctx->opcode, 2, 10) << 21 | + extract32(ctx->opcode, 12, 9) << 12; + target_long addr; + addr = ~0xFFF & addr_add(ctx, ctx->base.pc_next + 4, offset); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], addr); + } + break; + } + break; + case NM_P_GP_BH: + { + uint32_t u = extract32(ctx->opcode, 0, 18); + + switch (extract32(ctx->opcode, 18, 3)) { + case NM_LBGP: + gen_ld(ctx, OPC_LB, rt, 28, u); + break; + case NM_SBGP: + gen_st(ctx, OPC_SB, rt, 28, u); + break; + case NM_LBUGP: + gen_ld(ctx, OPC_LBU, rt, 28, u); + break; + case NM_ADDIUGP_B: + if (rt != 0) { + gen_op_addr_addi(ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[28], u); + } + break; + case NM_P_GP_LH: + u &= ~1; + switch (ctx->opcode & 1) { + case NM_LHGP: + gen_ld(ctx, OPC_LH, rt, 28, u); + break; + case NM_LHUGP: + gen_ld(ctx, OPC_LHU, rt, 28, u); + break; + } + break; + case NM_P_GP_SH: + u &= ~1; + switch (ctx->opcode & 1) { + case NM_SHGP: + gen_st(ctx, OPC_SH, rt, 28, u); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P_GP_CP1: + u &= ~0x3; + switch (ctx->opcode & 0x3) { + case NM_LWC1GP: + gen_cop1_ldst(ctx, OPC_LWC1, rt, 28, u); + break; + case NM_LDC1GP: + gen_cop1_ldst(ctx, OPC_LDC1, rt, 28, u); + break; + case NM_SWC1GP: + gen_cop1_ldst(ctx, OPC_SWC1, rt, 28, u); + break; + case NM_SDC1GP: + gen_cop1_ldst(ctx, OPC_SDC1, rt, 28, u); + break; + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + } + break; + case NM_P_LS_U12: + { + uint32_t u = extract32(ctx->opcode, 0, 12); + + switch (extract32(ctx->opcode, 12, 4)) { + case NM_P_PREFU12: + if (rt == 31) { + /* SYNCI */ + /* + * Break the TB to be able to sync copied instructions + * immediately. + */ + ctx->base.is_jmp = DISAS_STOP; + } else { + /* PREF */ + /* Treat as NOP. */ + } + break; + case NM_LB: + gen_ld(ctx, OPC_LB, rt, rs, u); + break; + case NM_LH: + gen_ld(ctx, OPC_LH, rt, rs, u); + break; + case NM_LW: + gen_ld(ctx, OPC_LW, rt, rs, u); + break; + case NM_LBU: + gen_ld(ctx, OPC_LBU, rt, rs, u); + break; + case NM_LHU: + gen_ld(ctx, OPC_LHU, rt, rs, u); + break; + case NM_SB: + gen_st(ctx, OPC_SB, rt, rs, u); + break; + case NM_SH: + gen_st(ctx, OPC_SH, rt, rs, u); + break; + case NM_SW: + gen_st(ctx, OPC_SW, rt, rs, u); + break; + case NM_LWC1: + gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, u); + break; + case NM_LDC1: + gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, u); + break; + case NM_SWC1: + gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, u); + break; + case NM_SDC1: + gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, u); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + } + break; + case NM_P_LS_S9: + { + int32_t s = (sextract32(ctx->opcode, 15, 1) << 8) | + extract32(ctx->opcode, 0, 8); + + switch (extract32(ctx->opcode, 8, 3)) { + case NM_P_LS_S0: + switch (extract32(ctx->opcode, 11, 4)) { + case NM_LBS9: + gen_ld(ctx, OPC_LB, rt, rs, s); + break; + case NM_LHS9: + gen_ld(ctx, OPC_LH, rt, rs, s); + break; + case NM_LWS9: + gen_ld(ctx, OPC_LW, rt, rs, s); + break; + case NM_LBUS9: + gen_ld(ctx, OPC_LBU, rt, rs, s); + break; + case NM_LHUS9: + gen_ld(ctx, OPC_LHU, rt, rs, s); + break; + case NM_SBS9: + gen_st(ctx, OPC_SB, rt, rs, s); + break; + case NM_SHS9: + gen_st(ctx, OPC_SH, rt, rs, s); + break; + case NM_SWS9: + gen_st(ctx, OPC_SW, rt, rs, s); + break; + case NM_LWC1S9: + gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, s); + break; + case NM_LDC1S9: + gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, s); + break; + case NM_SWC1S9: + gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, s); + break; + case NM_SDC1S9: + gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, s); + break; + case NM_P_PREFS9: + if (rt == 31) { + /* SYNCI */ + /* + * Break the TB to be able to sync copied instructions + * immediately. + */ + ctx->base.is_jmp = DISAS_STOP; + } else { + /* PREF */ + /* Treat as NOP. */ + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P_LS_S1: + switch (extract32(ctx->opcode, 11, 4)) { + case NM_UALH: + case NM_UASH: + check_nms(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, t0, rs, s); + + switch (extract32(ctx->opcode, 11, 4)) { + case NM_UALH: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESW | + MO_UNALN); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case NM_UASH: + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_qemu_st_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_TEUW | + MO_UNALN); + break; + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + case NM_P_LL: + switch (ctx->opcode & 0x03) { + case NM_LL: + gen_ld(ctx, OPC_LL, rt, rs, s); + break; + case NM_LLWP: + check_xnp(ctx); + gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5)); + break; + } + break; + case NM_P_SC: + switch (ctx->opcode & 0x03) { + case NM_SC: + gen_st_cond(ctx, rt, rs, s, MO_TESL, false); + break; + case NM_SCWP: + check_xnp(ctx); + gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5), + false); + break; + } + break; + case NM_CACHE: + check_cp0_enabled(ctx); + if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { + gen_cache_operation(ctx, rt, rs, s); + } + break; + } + break; + case NM_P_LS_E0: + switch (extract32(ctx->opcode, 11, 4)) { + case NM_LBE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_ld(ctx, OPC_LBE, rt, rs, s); + break; + case NM_SBE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_st(ctx, OPC_SBE, rt, rs, s); + break; + case NM_LBUE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_ld(ctx, OPC_LBUE, rt, rs, s); + break; + case NM_P_PREFE: + if (rt == 31) { + /* case NM_SYNCIE */ + check_eva(ctx); + check_cp0_enabled(ctx); + /* + * Break the TB to be able to sync copied instructions + * immediately. + */ + ctx->base.is_jmp = DISAS_STOP; + } else { + /* case NM_PREFE */ + check_eva(ctx); + check_cp0_enabled(ctx); + /* Treat as NOP. */ + } + break; + case NM_LHE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_ld(ctx, OPC_LHE, rt, rs, s); + break; + case NM_SHE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_st(ctx, OPC_SHE, rt, rs, s); + break; + case NM_LHUE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_ld(ctx, OPC_LHUE, rt, rs, s); + break; + case NM_CACHEE: + check_nms_dl_il_sl_tl_l2c(ctx); + gen_cache_operation(ctx, rt, rs, s); + break; + case NM_LWE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_ld(ctx, OPC_LWE, rt, rs, s); + break; + case NM_SWE: + check_eva(ctx); + check_cp0_enabled(ctx); + gen_st(ctx, OPC_SWE, rt, rs, s); + break; + case NM_P_LLE: + switch (extract32(ctx->opcode, 2, 2)) { + case NM_LLE: + check_xnp(ctx); + check_eva(ctx); + check_cp0_enabled(ctx); + gen_ld(ctx, OPC_LLE, rt, rs, s); + break; + case NM_LLWPE: + check_xnp(ctx); + check_eva(ctx); + check_cp0_enabled(ctx); + gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5)); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P_SCE: + switch (extract32(ctx->opcode, 2, 2)) { + case NM_SCE: + check_xnp(ctx); + check_eva(ctx); + check_cp0_enabled(ctx); + gen_st_cond(ctx, rt, rs, s, MO_TESL, true); + break; + case NM_SCWPE: + check_xnp(ctx); + check_eva(ctx); + check_cp0_enabled(ctx); + gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5), + true); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + } + break; + case NM_P_LS_WM: + case NM_P_LS_UAWM: + check_nms(ctx); + { + int count = extract32(ctx->opcode, 12, 3); + int counter = 0; + + offset = sextract32(ctx->opcode, 15, 1) << 8 | + extract32(ctx->opcode, 0, 8); + TCGv va = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + MemOp memop = (extract32(ctx->opcode, 8, 3)) == + NM_P_LS_UAWM ? MO_UNALN : 0; + + count = (count == 0) ? 8 : count; + while (counter != count) { + int this_rt = ((rt + counter) & 0x1f) | (rt & 0x10); + int this_offset = offset + (counter << 2); + + gen_base_offset_addr(ctx, va, rs, this_offset); + + switch (extract32(ctx->opcode, 11, 1)) { + case NM_LWM: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, va, ctx->mem_idx, + memop | MO_TESL); + gen_store_gpr(tcg_ctx, t1, this_rt); + if ((this_rt == rs) && + (counter != (count - 1))) { + /* UNPREDICTABLE */ + } + break; + case NM_SWM: + this_rt = (rt == 0) ? 0 : this_rt; + gen_load_gpr(tcg_ctx, t1, this_rt); + tcg_gen_qemu_st_tl(tcg_ctx, t1, va, ctx->mem_idx, + memop | MO_TEUL); + break; + } + counter++; + } + tcg_temp_free(tcg_ctx, va); + tcg_temp_free(tcg_ctx, t1); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + } + break; + case NM_MOVE_BALC: + check_nms(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + int32_t s = sextract32(ctx->opcode, 0, 1) << 21 | + extract32(ctx->opcode, 1, 20) << 1; + rd = (extract32(ctx->opcode, 24, 1)) == 0 ? 4 : 5; + rt = decode_gpr_gpr4_zero(extract32(ctx->opcode, 25, 1) << 3 | + extract32(ctx->opcode, 21, 3)); + gen_load_gpr(tcg_ctx, t0, rt); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s); + tcg_temp_free(tcg_ctx, t0); + } + break; + case NM_P_BAL: + { + int32_t s = sextract32(ctx->opcode, 0, 1) << 25 | + extract32(ctx->opcode, 1, 24) << 1; + + if ((extract32(ctx->opcode, 25, 1)) == 0) { + /* BC */ + gen_compute_branch_nm(ctx, OPC_BEQ, 4, 0, 0, s); + } else { + /* BALC */ + gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s); + } + } + break; + case NM_P_J: + switch (extract32(ctx->opcode, 12, 4)) { + case NM_JALRC: + case NM_JALRC_HB: + gen_compute_branch_nm(ctx, OPC_JALR, 4, rs, rt, 0); + break; + case NM_P_BALRSC: + gen_compute_nanomips_pbalrsc_branch(ctx, rs, rt); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P_BR1: + { + int32_t s = sextract32(ctx->opcode, 0, 1) << 14 | + extract32(ctx->opcode, 1, 13) << 1; + switch (extract32(ctx->opcode, 14, 2)) { + case NM_BEQC: + check_nms(ctx); + gen_compute_branch_nm(ctx, OPC_BEQ, 4, rs, rt, s); + break; + case NM_P_BR3A: + s = sextract32(ctx->opcode, 0, 1) << 14 | + extract32(ctx->opcode, 1, 13) << 1; + check_cp1_enabled(ctx); + switch (extract32(ctx->opcode, 16, 5)) { + case NM_BC1EQZC: + gen_compute_branch_cp1_nm(ctx, OPC_BC1EQZ, rt, s); + break; + case NM_BC1NEZC: + gen_compute_branch_cp1_nm(ctx, OPC_BC1NEZ, rt, s); + break; + case NM_BPOSGE32C: + check_dsp_r3(ctx); + { + int32_t imm = extract32(ctx->opcode, 1, 13) | + extract32(ctx->opcode, 0, 1) << 13; + + gen_compute_branch_nm(ctx, OPC_BPOSGE32, 4, -1, -2, + imm); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_BGEC: + if (rs == rt) { + gen_compute_compact_branch_nm(ctx, OPC_BC, rs, rt, s); + } else { + gen_compute_compact_branch_nm(ctx, OPC_BGEC, rs, rt, s); + } + break; + case NM_BGEUC: + if (rs == rt || rt == 0) { + gen_compute_compact_branch_nm(ctx, OPC_BC, 0, 0, s); + } else if (rs == 0) { + gen_compute_compact_branch_nm(ctx, OPC_BEQZC, rt, 0, s); + } else { + gen_compute_compact_branch_nm(ctx, OPC_BGEUC, rs, rt, s); + } + break; + } + } + break; + case NM_P_BR2: + { + int32_t s = sextract32(ctx->opcode, 0, 1) << 14 | + extract32(ctx->opcode, 1, 13) << 1; + switch (extract32(ctx->opcode, 14, 2)) { + case NM_BNEC: + check_nms(ctx); + gen_compute_branch_nm(ctx, OPC_BNE, 4, rs, rt, s); + break; + case NM_BLTC: + if (rs != 0 && rt != 0 && rs == rt) { + /* NOP */ + ctx->hflags |= MIPS_HFLAG_FBNSLOT; + } else { + gen_compute_compact_branch_nm(ctx, OPC_BLTC, rs, rt, s); + } + break; + case NM_BLTUC: + if (rs == 0 || rs == rt) { + /* NOP */ + ctx->hflags |= MIPS_HFLAG_FBNSLOT; + } else { + gen_compute_compact_branch_nm(ctx, OPC_BLTUC, rs, rt, s); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + } + break; + case NM_P_BRI: + { + int32_t s = sextract32(ctx->opcode, 0, 1) << 11 | + extract32(ctx->opcode, 1, 10) << 1; + uint32_t u = extract32(ctx->opcode, 11, 7); + + gen_compute_imm_branch(ctx, extract32(ctx->opcode, 18, 3), + rt, u, s); + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + return 4; +} + +static int decode_nanomips_opc(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t op; + int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode)); + int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode)); + int rd = decode_gpr_gpr3(NANOMIPS_EXTRACT_RD3(ctx->opcode)); + int offset; + int imm; + + /* make sure instructions are on a halfword boundary */ + if (ctx->base.pc_next & 0x1) { + TCGv tmp = tcg_const_tl(tcg_ctx, ctx->base.pc_next); + tcg_gen_st_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); + tcg_temp_free(tcg_ctx, tmp); + generate_exception_end(ctx, EXCP_AdEL); + return 2; + } + + op = extract32(ctx->opcode, 10, 6); + switch (op) { + case NM_P16_MV: + rt = NANOMIPS_EXTRACT_RD5(ctx->opcode); + if (rt != 0) { + /* MOVE */ + rs = NANOMIPS_EXTRACT_RS5(ctx->opcode); + gen_arith(ctx, OPC_ADDU, rt, rs, 0); + } else { + /* P16.RI */ + switch (extract32(ctx->opcode, 3, 2)) { + case NM_P16_SYSCALL: + if (extract32(ctx->opcode, 2, 1) == 0) { + generate_exception_end(ctx, EXCP_SYSCALL); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + case NM_BREAK16: + generate_exception_end(ctx, EXCP_BREAK); + break; + case NM_SDBBP16: + if (is_uhi(extract32(ctx->opcode, 0, 3))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + if (ctx->hflags & MIPS_HFLAG_SBRI) { + generate_exception_end(ctx, EXCP_RI); + } else { + generate_exception_end(ctx, EXCP_DBp); + } + } + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + } + break; + case NM_P16_SHIFT: + { + int shift = extract32(ctx->opcode, 0, 3); + uint32_t opc = 0; + shift = (shift == 0) ? 8 : shift; + + switch (extract32(ctx->opcode, 3, 1)) { + case NM_SLL16: + opc = OPC_SLL; + break; + case NM_SRL16: + opc = OPC_SRL; + break; + } + gen_shift_imm(ctx, opc, rt, rs, shift); + } + break; + case NM_P16C: + switch (ctx->opcode & 1) { + case NM_POOL16C_0: + gen_pool16c_nanomips_insn(ctx); + break; + case NM_LWXS16: + gen_ldxs(ctx, rt, rs, rd); + break; + } + break; + case NM_P16_A1: + switch (extract32(ctx->opcode, 6, 1)) { + case NM_ADDIUR1SP: + imm = extract32(ctx->opcode, 0, 6) << 2; + gen_arith_imm(ctx, OPC_ADDIU, rt, 29, imm); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P16_A2: + switch (extract32(ctx->opcode, 3, 1)) { + case NM_ADDIUR2: + imm = extract32(ctx->opcode, 0, 3) << 2; + gen_arith_imm(ctx, OPC_ADDIU, rt, rs, imm); + break; + case NM_P_ADDIURS5: + rt = extract32(ctx->opcode, 5, 5); + if (rt != 0) { + /* imm = sign_extend(s[3] . s[2:0] , from_nbits = 4) */ + imm = (sextract32(ctx->opcode, 4, 1) << 3) | + (extract32(ctx->opcode, 0, 3)); + gen_arith_imm(ctx, OPC_ADDIU, rt, rt, imm); + } + break; + } + break; + case NM_P16_ADDU: + switch (ctx->opcode & 0x1) { + case NM_ADDU16: + gen_arith(ctx, OPC_ADDU, rd, rs, rt); + break; + case NM_SUBU16: + gen_arith(ctx, OPC_SUBU, rd, rs, rt); + break; + } + break; + case NM_P16_4X4: + rt = (extract32(ctx->opcode, 9, 1) << 3) | + extract32(ctx->opcode, 5, 3); + rs = (extract32(ctx->opcode, 4, 1) << 3) | + extract32(ctx->opcode, 0, 3); + rt = decode_gpr_gpr4(rt); + rs = decode_gpr_gpr4(rs); + switch ((extract32(ctx->opcode, 7, 2) & 0x2) | + (extract32(ctx->opcode, 3, 1))) { + case NM_ADDU4X4: + check_nms(ctx); + gen_arith(ctx, OPC_ADDU, rt, rs, rt); + break; + case NM_MUL4X4: + check_nms(ctx); + gen_r6_muldiv(ctx, R6_OPC_MUL, rt, rs, rt); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_LI16: + { + int imm = extract32(ctx->opcode, 0, 7); + imm = (imm == 0x7f ? -1 : imm); + if (rt != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], imm); + } + } + break; + case NM_ANDI16: + { + uint32_t u = extract32(ctx->opcode, 0, 4); + u = (u == 12) ? 0xff : + (u == 13) ? 0xffff : u; + gen_logic_imm(ctx, OPC_ANDI, rt, rs, u); + } + break; + case NM_P16_LB: + offset = extract32(ctx->opcode, 0, 2); + switch (extract32(ctx->opcode, 2, 2)) { + case NM_LB16: + gen_ld(ctx, OPC_LB, rt, rs, offset); + break; + case NM_SB16: + rt = decode_gpr_gpr3_src_store( + NANOMIPS_EXTRACT_RT3(ctx->opcode)); + gen_st(ctx, OPC_SB, rt, rs, offset); + break; + case NM_LBU16: + gen_ld(ctx, OPC_LBU, rt, rs, offset); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_P16_LH: + offset = extract32(ctx->opcode, 1, 2) << 1; + switch ((extract32(ctx->opcode, 3, 1) << 1) | (ctx->opcode & 1)) { + case NM_LH16: + gen_ld(ctx, OPC_LH, rt, rs, offset); + break; + case NM_SH16: + rt = decode_gpr_gpr3_src_store( + NANOMIPS_EXTRACT_RT3(ctx->opcode)); + gen_st(ctx, OPC_SH, rt, rs, offset); + break; + case NM_LHU16: + gen_ld(ctx, OPC_LHU, rt, rs, offset); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case NM_LW16: + offset = extract32(ctx->opcode, 0, 4) << 2; + gen_ld(ctx, OPC_LW, rt, rs, offset); + break; + case NM_LWSP16: + rt = NANOMIPS_EXTRACT_RD5(ctx->opcode); + offset = extract32(ctx->opcode, 0, 5) << 2; + gen_ld(ctx, OPC_LW, rt, 29, offset); + break; + case NM_LW4X4: + check_nms(ctx); + rt = (extract32(ctx->opcode, 9, 1) << 3) | + extract32(ctx->opcode, 5, 3); + rs = (extract32(ctx->opcode, 4, 1) << 3) | + extract32(ctx->opcode, 0, 3); + offset = (extract32(ctx->opcode, 3, 1) << 3) | + (extract32(ctx->opcode, 8, 1) << 2); + rt = decode_gpr_gpr4(rt); + rs = decode_gpr_gpr4(rs); + gen_ld(ctx, OPC_LW, rt, rs, offset); + break; + case NM_SW4X4: + check_nms(ctx); + rt = (extract32(ctx->opcode, 9, 1) << 3) | + extract32(ctx->opcode, 5, 3); + rs = (extract32(ctx->opcode, 4, 1) << 3) | + extract32(ctx->opcode, 0, 3); + offset = (extract32(ctx->opcode, 3, 1) << 3) | + (extract32(ctx->opcode, 8, 1) << 2); + rt = decode_gpr_gpr4_zero(rt); + rs = decode_gpr_gpr4(rs); + gen_st(ctx, OPC_SW, rt, rs, offset); + break; + case NM_LWGP16: + offset = extract32(ctx->opcode, 0, 7) << 2; + gen_ld(ctx, OPC_LW, rt, 28, offset); + break; + case NM_SWSP16: + rt = NANOMIPS_EXTRACT_RD5(ctx->opcode); + offset = extract32(ctx->opcode, 0, 5) << 2; + gen_st(ctx, OPC_SW, rt, 29, offset); + break; + case NM_SW16: + rt = decode_gpr_gpr3_src_store( + NANOMIPS_EXTRACT_RT3(ctx->opcode)); + rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode)); + offset = extract32(ctx->opcode, 0, 4) << 2; + gen_st(ctx, OPC_SW, rt, rs, offset); + break; + case NM_SWGP16: + rt = decode_gpr_gpr3_src_store( + NANOMIPS_EXTRACT_RT3(ctx->opcode)); + offset = extract32(ctx->opcode, 0, 7) << 2; + gen_st(ctx, OPC_SW, rt, 28, offset); + break; + case NM_BC16: + gen_compute_branch_nm(ctx, OPC_BEQ, 2, 0, 0, + (sextract32(ctx->opcode, 0, 1) << 10) | + (extract32(ctx->opcode, 1, 9) << 1)); + break; + case NM_BALC16: + gen_compute_branch_nm(ctx, OPC_BGEZAL, 2, 0, 0, + (sextract32(ctx->opcode, 0, 1) << 10) | + (extract32(ctx->opcode, 1, 9) << 1)); + break; + case NM_BEQZC16: + gen_compute_branch_nm(ctx, OPC_BEQ, 2, rt, 0, + (sextract32(ctx->opcode, 0, 1) << 7) | + (extract32(ctx->opcode, 1, 6) << 1)); + break; + case NM_BNEZC16: + gen_compute_branch_nm(ctx, OPC_BNE, 2, rt, 0, + (sextract32(ctx->opcode, 0, 1) << 7) | + (extract32(ctx->opcode, 1, 6) << 1)); + break; + case NM_P16_BR: + switch (ctx->opcode & 0xf) { + case 0: + /* P16.JRC */ + switch (extract32(ctx->opcode, 4, 1)) { + case NM_JRC: + gen_compute_branch_nm(ctx, OPC_JR, 2, + extract32(ctx->opcode, 5, 5), 0, 0); + break; + case NM_JALRC16: + gen_compute_branch_nm(ctx, OPC_JALR, 2, + extract32(ctx->opcode, 5, 5), 31, 0); + break; + } + break; + default: + { + /* P16.BRI */ + uint32_t opc = extract32(ctx->opcode, 4, 3) < + extract32(ctx->opcode, 7, 3) ? OPC_BEQ : OPC_BNE; + gen_compute_branch_nm(ctx, opc, 2, rs, rt, + extract32(ctx->opcode, 0, 4) << 1); + } + break; + } + break; + case NM_P16_SR: + { + int count = extract32(ctx->opcode, 0, 4); + int u = extract32(ctx->opcode, 4, 4) << 4; + + rt = 30 + extract32(ctx->opcode, 9, 1); + switch (extract32(ctx->opcode, 8, 1)) { + case NM_SAVE16: + gen_save(ctx, rt, count, 0, u); + break; + case NM_RESTORE_JRC16: + gen_restore(ctx, rt, count, 0, u); + gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0); + break; + } + } + break; + case NM_MOVEP: + case NM_MOVEPREV: + check_nms(ctx); + { + static const int gpr2reg1[] = {4, 5, 6, 7}; + static const int gpr2reg2[] = {5, 6, 7, 8}; + int re; + int rd2 = extract32(ctx->opcode, 3, 1) << 1 | + extract32(ctx->opcode, 8, 1); + int r1 = gpr2reg1[rd2]; + int r2 = gpr2reg2[rd2]; + int r3 = extract32(ctx->opcode, 4, 1) << 3 | + extract32(ctx->opcode, 0, 3); + int r4 = extract32(ctx->opcode, 9, 1) << 3 | + extract32(ctx->opcode, 5, 3); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + if (op == NM_MOVEP) { + rd = r1; + re = r2; + rs = decode_gpr_gpr4_zero(r3); + rt = decode_gpr_gpr4_zero(r4); + } else { + rd = decode_gpr_gpr4(r3); + re = decode_gpr_gpr4(r4); + rs = r1; + rt = r2; + } + gen_load_gpr(tcg_ctx, t0, rs); + gen_load_gpr(tcg_ctx, t1, rt); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], t0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[re], t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + default: + return decode_nanomips_32_48_opc(env, ctx); + } + + return 2; +} + + +/* SmartMIPS extension to MIPS32 */ + +#if defined(TARGET_MIPS64) + +/* MDMX extension to MIPS64 */ + +#endif + +/* MIPSDSP functions. */ +static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc, + int rd, int base, int offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + check_dsp(ctx); + t0 = tcg_temp_new(tcg_ctx); + + if (base == 0) { + gen_load_gpr(tcg_ctx, t0, offset); + } else if (offset == 0) { + gen_load_gpr(tcg_ctx, t0, base); + } else { + gen_op_addr_add(ctx, t0, tcg_ctx->cpu_gpr[base], tcg_ctx->cpu_gpr[offset]); + } + + switch (opc) { + case OPC_LBUX: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_UB); + gen_store_gpr(tcg_ctx, t0, rd); + break; + case OPC_LHX: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESW); + gen_store_gpr(tcg_ctx, t0, rd); + break; + case OPC_LWX: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t0, rd); + break; +#if defined(TARGET_MIPS64) + case OPC_LDX: + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t0, rd); + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int v1, int v2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv v1_t; + TCGv v2_t; + + if (ret == 0) { + /* Treat as NOP. */ + return; + } + + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, v1_t, v1); + gen_load_gpr(tcg_ctx, v2_t, v2); + + switch (op1) { + /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */ + case OPC_MULT_G_2E: + check_dsp_r2(ctx); + switch (op2) { + case OPC_ADDUH_QB: + gen_helper_adduh_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDUH_R_QB: + gen_helper_adduh_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_PH: + gen_helper_addqh_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_R_PH: + gen_helper_addqh_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_W: + gen_helper_addqh_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_R_W: + gen_helper_addqh_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBUH_QB: + gen_helper_subuh_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBUH_R_QB: + gen_helper_subuh_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_PH: + gen_helper_subqh_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_R_PH: + gen_helper_subqh_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_W: + gen_helper_subqh_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_R_W: + gen_helper_subqh_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + } + break; + case OPC_ABSQ_S_PH_DSP: + switch (op2) { + case OPC_ABSQ_S_QB: + check_dsp_r2(ctx); + gen_helper_absq_s_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_PH: + check_dsp(ctx); + gen_helper_absq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_W: + check_dsp(ctx); + gen_helper_absq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_PRECEQ_W_PHL: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 0xFFFF0000); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); + break; + case OPC_PRECEQ_W_PHR: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 0x0000FFFF); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], 16); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); + break; + case OPC_PRECEQU_PH_QBL: + check_dsp(ctx); + gen_helper_precequ_ph_qbl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_PH_QBR: + check_dsp(ctx); + gen_helper_precequ_ph_qbr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_PH_QBLA: + check_dsp(ctx); + gen_helper_precequ_ph_qbla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_PH_QBRA: + check_dsp(ctx); + gen_helper_precequ_ph_qbra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBL: + check_dsp(ctx); + gen_helper_preceu_ph_qbl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBR: + check_dsp(ctx); + gen_helper_preceu_ph_qbr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBLA: + check_dsp(ctx); + gen_helper_preceu_ph_qbla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBRA: + check_dsp(ctx); + gen_helper_preceu_ph_qbra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + } + break; + case OPC_ADDU_QB_DSP: + switch (op2) { + case OPC_ADDQ_PH: + check_dsp(ctx); + gen_helper_addq_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_PH: + check_dsp(ctx); + gen_helper_addq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_W: + check_dsp(ctx); + gen_helper_addq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_QB: + check_dsp(ctx); + gen_helper_addu_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_QB: + check_dsp(ctx); + gen_helper_addu_s_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_PH: + check_dsp_r2(ctx); + gen_helper_addu_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_PH: + check_dsp_r2(ctx); + gen_helper_addu_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_PH: + check_dsp(ctx); + gen_helper_subq_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_PH: + check_dsp(ctx); + gen_helper_subq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_W: + check_dsp(ctx); + gen_helper_subq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_QB: + check_dsp(ctx); + gen_helper_subu_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_QB: + check_dsp(ctx); + gen_helper_subu_s_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_PH: + check_dsp_r2(ctx); + gen_helper_subu_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_PH: + check_dsp_r2(ctx); + gen_helper_subu_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDSC: + check_dsp(ctx); + gen_helper_addsc(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDWC: + check_dsp(ctx); + gen_helper_addwc(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MODSUB: + check_dsp(ctx); + gen_helper_modsub(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_RADDU_W_QB: + check_dsp(ctx); + gen_helper_raddu_w_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t); + break; + } + break; + case OPC_CMPU_EQ_QB_DSP: + switch (op2) { + case OPC_PRECR_QB_PH: + check_dsp_r2(ctx); + gen_helper_precr_qb_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_QB_PH: + check_dsp(ctx); + gen_helper_precrq_qb_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECR_SRA_PH_W: + check_dsp_r2(ctx); + { + TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); + gen_helper_precr_sra_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], sa_t, v1_t, + tcg_ctx->cpu_gpr[ret]); + tcg_temp_free_i32(tcg_ctx, sa_t); + break; + } + case OPC_PRECR_SRA_R_PH_W: + check_dsp_r2(ctx); + { + TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); + gen_helper_precr_sra_r_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], sa_t, v1_t, + tcg_ctx->cpu_gpr[ret]); + tcg_temp_free_i32(tcg_ctx, sa_t); + break; + } + case OPC_PRECRQ_PH_W: + check_dsp(ctx); + gen_helper_precrq_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_RS_PH_W: + check_dsp(ctx); + gen_helper_precrq_rs_ph_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PRECRQU_S_QB_PH: + check_dsp(ctx); + gen_helper_precrqu_s_qb_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_ABSQ_S_QH_DSP: + switch (op2) { + case OPC_PRECEQ_L_PWL: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 0xFFFFFFFF00000000ull); + break; + case OPC_PRECEQ_L_PWR: + check_dsp(ctx); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, 32); + break; + case OPC_PRECEQ_PW_QHL: + check_dsp(ctx); + gen_helper_preceq_pw_qhl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQ_PW_QHR: + check_dsp(ctx); + gen_helper_preceq_pw_qhr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQ_PW_QHLA: + check_dsp(ctx); + gen_helper_preceq_pw_qhla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQ_PW_QHRA: + check_dsp(ctx); + gen_helper_preceq_pw_qhra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBL: + check_dsp(ctx); + gen_helper_precequ_qh_obl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBR: + check_dsp(ctx); + gen_helper_precequ_qh_obr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBLA: + check_dsp(ctx); + gen_helper_precequ_qh_obla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBRA: + check_dsp(ctx); + gen_helper_precequ_qh_obra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBL: + check_dsp(ctx); + gen_helper_preceu_qh_obl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBR: + check_dsp(ctx); + gen_helper_preceu_qh_obr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBLA: + check_dsp(ctx); + gen_helper_preceu_qh_obla(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBRA: + check_dsp(ctx); + gen_helper_preceu_qh_obra(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t); + break; + case OPC_ABSQ_S_OB: + check_dsp_r2(ctx); + gen_helper_absq_s_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_PW: + check_dsp(ctx); + gen_helper_absq_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_QH: + check_dsp(ctx); + gen_helper_absq_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + } + break; + case OPC_ADDU_OB_DSP: + switch (op2) { + case OPC_RADDU_L_OB: + check_dsp(ctx); + gen_helper_raddu_l_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t); + break; + case OPC_SUBQ_PW: + check_dsp(ctx); + gen_helper_subq_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_PW: + check_dsp(ctx); + gen_helper_subq_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_QH: + check_dsp(ctx); + gen_helper_subq_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_QH: + check_dsp(ctx); + gen_helper_subq_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_OB: + check_dsp(ctx); + gen_helper_subu_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_OB: + check_dsp(ctx); + gen_helper_subu_s_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_QH: + check_dsp_r2(ctx); + gen_helper_subu_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_QH: + check_dsp_r2(ctx); + gen_helper_subu_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBUH_OB: + check_dsp_r2(ctx); + gen_helper_subuh_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBUH_R_OB: + check_dsp_r2(ctx); + gen_helper_subuh_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQ_PW: + check_dsp(ctx); + gen_helper_addq_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_PW: + check_dsp(ctx); + gen_helper_addq_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_QH: + check_dsp(ctx); + gen_helper_addq_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_QH: + check_dsp(ctx); + gen_helper_addq_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_OB: + check_dsp(ctx); + gen_helper_addu_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_OB: + check_dsp(ctx); + gen_helper_addu_s_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_QH: + check_dsp_r2(ctx); + gen_helper_addu_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_QH: + check_dsp_r2(ctx); + gen_helper_addu_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDUH_OB: + check_dsp_r2(ctx); + gen_helper_adduh_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDUH_R_OB: + check_dsp_r2(ctx); + gen_helper_adduh_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + } + break; + case OPC_CMPU_EQ_OB_DSP: + switch (op2) { + case OPC_PRECR_OB_QH: + check_dsp_r2(ctx); + gen_helper_precr_ob_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECR_SRA_QH_PW: + check_dsp_r2(ctx); + { + TCGv_i32 ret_t = tcg_const_i32(tcg_ctx, ret); + gen_helper_precr_sra_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, ret_t); + tcg_temp_free_i32(tcg_ctx, ret_t); + break; + } + case OPC_PRECR_SRA_R_QH_PW: + check_dsp_r2(ctx); + { + TCGv_i32 sa_v = tcg_const_i32(tcg_ctx, ret); + gen_helper_precr_sra_r_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, sa_v); + tcg_temp_free_i32(tcg_ctx, sa_v); + break; + } + case OPC_PRECRQ_OB_QH: + check_dsp(ctx); + gen_helper_precrq_ob_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_PW_L: + check_dsp(ctx); + gen_helper_precrq_pw_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_QH_PW: + check_dsp(ctx); + gen_helper_precrq_qh_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_RS_QH_PW: + check_dsp(ctx); + gen_helper_precrq_rs_qh_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PRECRQU_S_OB_QH: + check_dsp(ctx); + gen_helper_precrqu_s_ob_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); +} + +static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, + int ret, int v1, int v2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t op2; + TCGv t0; + TCGv v1_t; + TCGv v2_t; + + if (ret == 0) { + /* Treat as NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t0, v1); + gen_load_gpr(tcg_ctx, v1_t, v1); + gen_load_gpr(tcg_ctx, v2_t, v2); + + switch (opc) { + case OPC_SHLL_QB_DSP: + { + op2 = MASK_SHLL_QB(ctx->opcode); + switch (op2) { + case OPC_SHLL_QB: + check_dsp(ctx); + gen_helper_shll_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_QB: + check_dsp(ctx); + gen_helper_shll_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_PH: + check_dsp(ctx); + gen_helper_shll_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_PH: + check_dsp(ctx); + gen_helper_shll_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_PH: + check_dsp(ctx); + gen_helper_shll_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_PH: + check_dsp(ctx); + gen_helper_shll_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_W: + check_dsp(ctx); + gen_helper_shll_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_W: + check_dsp(ctx); + gen_helper_shll_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHRL_QB: + check_dsp(ctx); + gen_helper_shrl_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRLV_QB: + check_dsp(ctx); + gen_helper_shrl_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRL_PH: + check_dsp_r2(ctx); + gen_helper_shrl_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRLV_PH: + check_dsp_r2(ctx); + gen_helper_shrl_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRA_QB: + check_dsp_r2(ctx); + gen_helper_shra_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRA_R_QB: + check_dsp_r2(ctx); + gen_helper_shra_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRAV_QB: + check_dsp_r2(ctx); + gen_helper_shra_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRAV_R_QB: + check_dsp_r2(ctx); + gen_helper_shra_r_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRA_PH: + check_dsp(ctx); + gen_helper_shra_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRA_R_PH: + check_dsp(ctx); + gen_helper_shra_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRAV_PH: + check_dsp(ctx); + gen_helper_shra_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRAV_R_PH: + check_dsp(ctx); + gen_helper_shra_r_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRA_R_W: + check_dsp(ctx); + gen_helper_shra_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRAV_R_W: + check_dsp(ctx); + gen_helper_shra_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + default: /* Invalid */ + MIPS_INVAL("MASK SHLL.QB"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + } +#ifdef TARGET_MIPS64 + case OPC_SHLL_OB_DSP: + op2 = MASK_SHLL_OB(ctx->opcode); + switch (op2) { + case OPC_SHLL_PW: + check_dsp(ctx); + gen_helper_shll_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_PW: + check_dsp(ctx); + gen_helper_shll_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_PW: + check_dsp(ctx); + gen_helper_shll_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_PW: + check_dsp(ctx); + gen_helper_shll_s_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_OB: + check_dsp(ctx); + gen_helper_shll_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_OB: + check_dsp(ctx); + gen_helper_shll_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_QH: + check_dsp(ctx); + gen_helper_shll_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_QH: + check_dsp(ctx); + gen_helper_shll_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_QH: + check_dsp(ctx); + gen_helper_shll_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_QH: + check_dsp(ctx); + gen_helper_shll_s_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHRA_OB: + check_dsp_r2(ctx); + gen_helper_shra_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_OB: + check_dsp_r2(ctx); + gen_helper_shra_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_R_OB: + check_dsp_r2(ctx); + gen_helper_shra_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_R_OB: + check_dsp_r2(ctx); + gen_helper_shra_r_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_PW: + check_dsp(ctx); + gen_helper_shra_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_PW: + check_dsp(ctx); + gen_helper_shra_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_R_PW: + check_dsp(ctx); + gen_helper_shra_r_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_R_PW: + check_dsp(ctx); + gen_helper_shra_r_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_QH: + check_dsp(ctx); + gen_helper_shra_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_QH: + check_dsp(ctx); + gen_helper_shra_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_R_QH: + check_dsp(ctx); + gen_helper_shra_r_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_R_QH: + check_dsp(ctx); + gen_helper_shra_r_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRL_OB: + check_dsp(ctx); + gen_helper_shrl_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRLV_OB: + check_dsp(ctx); + gen_helper_shrl_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRL_QH: + check_dsp_r2(ctx); + gen_helper_shrl_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRLV_QH: + check_dsp_r2(ctx); + gen_helper_shrl_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v2_t, v1_t); + break; + default: /* Invalid */ + MIPS_INVAL("MASK SHLL.OB"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); +} + +static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int v1, int v2, int check_ret) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + TCGv v1_t; + TCGv v2_t; + + if ((ret == 0) && (check_ret == 1)) { + /* Treat as NOP. */ + return; + } + + t0 = tcg_temp_new_i32(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_i32(tcg_ctx, t0, ret); + gen_load_gpr(tcg_ctx, v1_t, v1); + gen_load_gpr(tcg_ctx, v2_t, v2); + + switch (op1) { + /* + * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have + * the same mask and op1. + */ + case OPC_MULT_G_2E: + check_dsp_r2(ctx); + switch (op2) { + case OPC_MUL_PH: + gen_helper_mul_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MUL_S_PH: + gen_helper_mul_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_S_W: + gen_helper_mulq_s_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_RS_W: + gen_helper_mulq_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; + case OPC_DPA_W_PH_DSP: + switch (op2) { + case OPC_DPAU_H_QBL: + check_dsp(ctx); + gen_helper_dpau_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAU_H_QBR: + check_dsp(ctx); + gen_helper_dpau_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_QBL: + check_dsp(ctx); + gen_helper_dpsu_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_QBR: + check_dsp(ctx); + gen_helper_dpsu_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPA_W_PH: + check_dsp_r2(ctx); + gen_helper_dpa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAX_W_PH: + check_dsp_r2(ctx); + gen_helper_dpax_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_S_W_PH: + check_dsp(ctx); + gen_helper_dpaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQX_S_W_PH: + check_dsp_r2(ctx); + gen_helper_dpaqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQX_SA_W_PH: + check_dsp_r2(ctx); + gen_helper_dpaqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPS_W_PH: + check_dsp_r2(ctx); + gen_helper_dps_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSX_W_PH: + check_dsp_r2(ctx); + gen_helper_dpsx_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_S_W_PH: + check_dsp(ctx); + gen_helper_dpsq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQX_S_W_PH: + check_dsp_r2(ctx); + gen_helper_dpsqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQX_SA_W_PH: + check_dsp_r2(ctx); + gen_helper_dpsqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULSAQ_S_W_PH: + check_dsp(ctx); + gen_helper_mulsaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_SA_L_W: + check_dsp(ctx); + gen_helper_dpaq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_SA_L_W: + check_dsp(ctx); + gen_helper_dpsq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_PHL: + check_dsp(ctx); + gen_helper_maq_s_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_PHR: + check_dsp(ctx); + gen_helper_maq_s_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_PHL: + check_dsp(ctx); + gen_helper_maq_sa_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_PHR: + check_dsp(ctx); + gen_helper_maq_sa_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULSA_W_PH: + check_dsp_r2(ctx); + gen_helper_mulsa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_DPAQ_W_QH_DSP: + { + int ac = ret & 0x03; + tcg_gen_movi_i32(tcg_ctx, t0, ac); + + switch (op2) { + case OPC_DMADD: + check_dsp(ctx); + gen_helper_dmadd(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DMADDU: + check_dsp(ctx); + gen_helper_dmaddu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DMSUB: + check_dsp(ctx); + gen_helper_dmsub(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DMSUBU: + check_dsp(ctx); + gen_helper_dmsubu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPA_W_QH: + check_dsp_r2(ctx); + gen_helper_dpa_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_S_W_QH: + check_dsp(ctx); + gen_helper_dpaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_SA_L_PW: + check_dsp(ctx); + gen_helper_dpaq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAU_H_OBL: + check_dsp(ctx); + gen_helper_dpau_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAU_H_OBR: + check_dsp(ctx); + gen_helper_dpau_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPS_W_QH: + check_dsp_r2(ctx); + gen_helper_dps_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_S_W_QH: + check_dsp(ctx); + gen_helper_dpsq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_SA_L_PW: + check_dsp(ctx); + gen_helper_dpsq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_OBL: + check_dsp(ctx); + gen_helper_dpsu_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_OBR: + check_dsp(ctx); + gen_helper_dpsu_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_L_PWL: + check_dsp(ctx); + gen_helper_maq_s_l_pwl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_L_PWR: + check_dsp(ctx); + gen_helper_maq_s_l_pwr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHLL: + check_dsp(ctx); + gen_helper_maq_s_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHLL: + check_dsp(ctx); + gen_helper_maq_sa_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHLR: + check_dsp(ctx); + gen_helper_maq_s_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHLR: + check_dsp(ctx); + gen_helper_maq_sa_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHRL: + check_dsp(ctx); + gen_helper_maq_s_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHRL: + check_dsp(ctx); + gen_helper_maq_sa_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHRR: + check_dsp(ctx); + gen_helper_maq_s_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHRR: + check_dsp(ctx); + gen_helper_maq_sa_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MULSAQ_S_L_PW: + check_dsp(ctx); + gen_helper_mulsaq_s_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MULSAQ_S_W_QH: + check_dsp(ctx); + gen_helper_mulsaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + } + } + break; +#endif + case OPC_ADDU_QB_DSP: + switch (op2) { + case OPC_MULEU_S_PH_QBL: + check_dsp(ctx); + gen_helper_muleu_s_ph_qbl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEU_S_PH_QBR: + check_dsp(ctx); + gen_helper_muleu_s_ph_qbr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_RS_PH: + check_dsp(ctx); + gen_helper_mulq_rs_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEQ_S_W_PHL: + check_dsp(ctx); + gen_helper_muleq_s_w_phl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEQ_S_W_PHR: + check_dsp(ctx); + gen_helper_muleq_s_w_phr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_S_PH: + check_dsp_r2(ctx); + gen_helper_mulq_s_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_ADDU_OB_DSP: + switch (op2) { + case OPC_MULEQ_S_PW_QHL: + check_dsp(ctx); + gen_helper_muleq_s_pw_qhl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEQ_S_PW_QHR: + check_dsp(ctx); + gen_helper_muleq_s_pw_qhr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEU_S_QH_OBL: + check_dsp(ctx); + gen_helper_muleu_s_qh_obl(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEU_S_QH_OBR: + check_dsp(ctx); + gen_helper_muleu_s_qh_obr(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_RS_QH: + check_dsp(ctx); + gen_helper_mulq_rs_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); +} + +static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int val) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int16_t imm; + TCGv t0; + TCGv val_t; + + if (ret == 0) { + /* Treat as NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + val_t = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, val_t, val); + + switch (op1) { + case OPC_ABSQ_S_PH_DSP: + switch (op2) { + case OPC_BITREV: + check_dsp(ctx); + gen_helper_bitrev(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); + break; + case OPC_REPL_QB: + check_dsp(ctx); + { + target_long result; + imm = (ctx->opcode >> 16) & 0xFF; + result = (uint32_t)imm << 24 | + (uint32_t)imm << 16 | + (uint32_t)imm << 8 | + (uint32_t)imm; + result = (int32_t)result; + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], result); + } + break; + case OPC_REPLV_QB: + check_dsp(ctx); + tcg_gen_ext8u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 8); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); + break; + case OPC_REPL_PH: + check_dsp(ctx); + { + imm = (ctx->opcode >> 16) & 0x03FF; + imm = (int16_t)(imm << 6) >> 6; + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], \ + (target_long)((int32_t)imm << 16 | \ + (uint16_t)imm)); + } + break; + case OPC_REPLV_PH: + check_dsp(ctx); + tcg_gen_ext16u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret]); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_ABSQ_S_QH_DSP: + switch (op2) { + case OPC_REPL_OB: + check_dsp(ctx); + { + target_long temp; + + imm = (ctx->opcode >> 16) & 0xFF; + temp = ((uint64_t)imm << 8) | (uint64_t)imm; + temp = (temp << 16) | temp; + temp = (temp << 32) | temp; + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], temp); + break; + } + case OPC_REPL_PW: + check_dsp(ctx); + { + target_long temp; + + imm = (ctx->opcode >> 16) & 0x03FF; + imm = (int16_t)(imm << 6) >> 6; + temp = ((target_long)imm << 32) \ + | ((target_long)imm & 0xFFFFFFFF); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], temp); + break; + } + case OPC_REPL_QH: + check_dsp(ctx); + { + target_long temp; + + imm = (ctx->opcode >> 16) & 0x03FF; + imm = (int16_t)(imm << 6) >> 6; + + temp = ((uint64_t)(uint16_t)imm << 48) | + ((uint64_t)(uint16_t)imm << 32) | + ((uint64_t)(uint16_t)imm << 16) | + (uint64_t)(uint16_t)imm; + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], temp); + break; + } + case OPC_REPLV_OB: + check_dsp(ctx); + tcg_gen_ext8u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 8); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 32); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + break; + case OPC_REPLV_PW: + check_dsp(ctx); + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 32); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + break; + case OPC_REPLV_QH: + check_dsp(ctx); + tcg_gen_ext16u_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, tcg_ctx->cpu_gpr[ret], 32); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], tcg_ctx->cpu_gpr[ret], t0); + break; + } + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, val_t); +} + +static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, + uint32_t op1, uint32_t op2, + int ret, int v1, int v2, int check_ret) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t1; + TCGv v1_t; + TCGv v2_t; + + if ((ret == 0) && (check_ret == 1)) { + /* Treat as NOP. */ + return; + } + + t1 = tcg_temp_new(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, v1_t, v1); + gen_load_gpr(tcg_ctx, v2_t, v2); + + switch (op1) { + case OPC_CMPU_EQ_QB_DSP: + switch (op2) { + case OPC_CMPU_EQ_QB: + check_dsp(ctx); + gen_helper_cmpu_eq_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LT_QB: + check_dsp(ctx); + gen_helper_cmpu_lt_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LE_QB: + check_dsp(ctx); + gen_helper_cmpu_le_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGU_EQ_QB: + check_dsp(ctx); + gen_helper_cmpgu_eq_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LT_QB: + check_dsp(ctx); + gen_helper_cmpgu_lt_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LE_QB: + check_dsp(ctx); + gen_helper_cmpgu_le_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGDU_EQ_QB: + check_dsp_r2(ctx); + gen_helper_cmpgu_eq_qb(tcg_ctx, t1, v1_t, v2_t); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], t1); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, t1); + break; + case OPC_CMPGDU_LT_QB: + check_dsp_r2(ctx); + gen_helper_cmpgu_lt_qb(tcg_ctx, t1, v1_t, v2_t); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], t1); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, t1); + break; + case OPC_CMPGDU_LE_QB: + check_dsp_r2(ctx); + gen_helper_cmpgu_le_qb(tcg_ctx, t1, v1_t, v2_t); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[ret], t1); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_dspctrl, tcg_ctx->cpu_dspctrl, t1); + break; + case OPC_CMP_EQ_PH: + check_dsp(ctx); + gen_helper_cmp_eq_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LT_PH: + check_dsp(ctx); + gen_helper_cmp_lt_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LE_PH: + check_dsp(ctx); + gen_helper_cmp_le_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_QB: + check_dsp(ctx); + gen_helper_pick_qb(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_PH: + check_dsp(ctx); + gen_helper_pick_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PACKRL_PH: + check_dsp(ctx); + gen_helper_packrl_ph(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_CMPU_EQ_OB_DSP: + switch (op2) { + case OPC_CMP_EQ_PW: + check_dsp(ctx); + gen_helper_cmp_eq_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LT_PW: + check_dsp(ctx); + gen_helper_cmp_lt_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LE_PW: + check_dsp(ctx); + gen_helper_cmp_le_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_EQ_QH: + check_dsp(ctx); + gen_helper_cmp_eq_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LT_QH: + check_dsp(ctx); + gen_helper_cmp_lt_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LE_QH: + check_dsp(ctx); + gen_helper_cmp_le_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGDU_EQ_OB: + check_dsp_r2(ctx); + gen_helper_cmpgdu_eq_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGDU_LT_OB: + check_dsp_r2(ctx); + gen_helper_cmpgdu_lt_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGDU_LE_OB: + check_dsp_r2(ctx); + gen_helper_cmpgdu_le_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGU_EQ_OB: + check_dsp(ctx); + gen_helper_cmpgu_eq_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LT_OB: + check_dsp(ctx); + gen_helper_cmpgu_lt_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LE_OB: + check_dsp(ctx); + gen_helper_cmpgu_le_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPU_EQ_OB: + check_dsp(ctx); + gen_helper_cmpu_eq_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LT_OB: + check_dsp(ctx); + gen_helper_cmpu_lt_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LE_OB: + check_dsp(ctx); + gen_helper_cmpu_le_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PACKRL_PW: + check_dsp(ctx); + gen_helper_packrl_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PICK_OB: + check_dsp(ctx); + gen_helper_pick_ob(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_PW: + check_dsp(ctx); + gen_helper_pick_pw(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_QH: + check_dsp(ctx); + gen_helper_pick_qh(tcg_ctx, tcg_ctx->cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); +} + +static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, + uint32_t op1, int rt, int rs, int sa) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + check_dsp_r2(ctx); + + if (rt == 0) { + /* Treat as NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rs); + + switch (op1) { + case OPC_APPEND_DSP: + switch (MASK_APPEND(ctx->opcode)) { + case OPC_APPEND: + if (sa != 0) { + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, tcg_ctx->cpu_gpr[rt], sa, 32 - sa); + } + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + break; + case OPC_PREPEND: + if (sa != 0) { + tcg_gen_ext32u_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], sa); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 32 - sa); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); + } + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + break; + case OPC_BALIGN: + sa &= 3; + if (sa != 0 && sa != 2) { + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], 8 * sa); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (4 - sa)); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); + } + tcg_gen_ext32s_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt]); + break; + default: /* Invalid */ + MIPS_INVAL("MASK APPEND"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_DAPPEND_DSP: + switch (MASK_DAPPEND(ctx->opcode)) { + case OPC_DAPPEND: + if (sa != 0) { + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, tcg_ctx->cpu_gpr[rt], sa, 64 - sa); + } + break; + case OPC_PREPENDD: + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], 0x20 | sa); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - (0x20 | sa)); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, t0); + break; + case OPC_PREPENDW: + if (sa != 0) { + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], sa); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - sa); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); + } + break; + case OPC_DBALIGN: + sa &= 7; + if (sa != 0 && sa != 2 && sa != 4) { + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], 8 * sa); + tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (8 - sa)); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_gpr[rt], t0); + } + break; + default: /* Invalid */ + MIPS_INVAL("MASK DAPPEND"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int v1, int v2, int check_ret) + +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv t1; + TCGv v1_t; + TCGv v2_t; + int16_t imm; + + if ((ret == 0) && (check_ret == 1)) { + /* Treat as NOP. */ + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, v1_t, v1); + gen_load_gpr(tcg_ctx, v2_t, v2); + + switch (op1) { + case OPC_EXTR_W_DSP: + check_dsp(ctx); + switch (op2) { + case OPC_EXTR_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTR_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTR_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTR_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTPDP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTPDPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHILO: + imm = (ctx->opcode >> 20) & 0x3F; + tcg_gen_movi_tl(tcg_ctx, t0, ret); + tcg_gen_movi_tl(tcg_ctx, t1, imm); + gen_helper_shilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); + break; + case OPC_SHILOV: + tcg_gen_movi_tl(tcg_ctx, t0, ret); + gen_helper_shilo(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_MTHLIP: + tcg_gen_movi_tl(tcg_ctx, t0, ret); + gen_helper_mthlip(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_WRDSP: + imm = (ctx->opcode >> 11) & 0x3FF; + tcg_gen_movi_tl(tcg_ctx, t0, imm); + gen_helper_wrdsp(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); + break; + case OPC_RDDSP: + imm = (ctx->opcode >> 16) & 0x03FF; + tcg_gen_movi_tl(tcg_ctx, t0, imm); + gen_helper_rddsp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_DEXTR_W_DSP: + check_dsp(ctx); + switch (op2) { + case OPC_DMTHLIP: + tcg_gen_movi_tl(tcg_ctx, t0, ret); + gen_helper_dmthlip(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DSHILO: + { + int shift = (ctx->opcode >> 19) & 0x7F; + int ac = (ctx->opcode >> 11) & 0x03; + tcg_gen_movi_tl(tcg_ctx, t0, shift); + tcg_gen_movi_tl(tcg_ctx, t1, ac); + gen_helper_dshilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); + break; + } + case OPC_DSHILOV: + { + int ac = (ctx->opcode >> 11) & 0x03; + tcg_gen_movi_tl(tcg_ctx, t0, ac); + gen_helper_dshilo(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); + break; + } + case OPC_DEXTP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + + gen_helper_dextp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTPDP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTPDPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextpdp(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_R_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_r_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_RS_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_rs_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_s_h(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_R_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_r_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_RS_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_rs_l(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_r_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_rs_w(tcg_ctx, tcg_ctx->cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); +} + +/* End MIPSDSP functions. */ + +static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx) +{ + int rs, rt, rd, sa; + uint32_t op1, op2; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + + op1 = MASK_SPECIAL(ctx->opcode); + switch (op1) { + case OPC_LSA: + gen_lsa(ctx, op1, rd, rs, rt, extract32(ctx->opcode, 6, 2)); + break; + case OPC_MULT: + case OPC_MULTU: + case OPC_DIV: + case OPC_DIVU: + op2 = MASK_R6_MULDIV(ctx->opcode); + switch (op2) { + case R6_OPC_MUL: + case R6_OPC_MUH: + case R6_OPC_MULU: + case R6_OPC_MUHU: + case R6_OPC_DIV: + case R6_OPC_MOD: + case R6_OPC_DIVU: + case R6_OPC_MODU: + gen_r6_muldiv(ctx, op2, rd, rs, rt); + break; + default: + MIPS_INVAL("special_r6 muldiv"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_SELEQZ: + case OPC_SELNEZ: + gen_cond_move(ctx, op1, rd, rs, rt); + break; + case R6_OPC_CLO: + case R6_OPC_CLZ: + if (rt == 0 && sa == 1) { + /* + * Major opcode and function field is shared with preR6 MFHI/MTHI. + * We need additionally to check other fields. + */ + gen_cl(ctx, op1, rd, rs); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + case R6_OPC_SDBBP: + if (is_uhi(extract32(ctx->opcode, 6, 20))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + if (ctx->hflags & MIPS_HFLAG_SBRI) { + generate_exception_end(ctx, EXCP_RI); + } else { + generate_exception_end(ctx, EXCP_DBp); + } + } + break; +#if defined(TARGET_MIPS64) + case OPC_DLSA: + check_mips_64(ctx); + gen_lsa(ctx, op1, rd, rs, rt, extract32(ctx->opcode, 6, 2)); + break; + case R6_OPC_DCLO: + case R6_OPC_DCLZ: + if (rt == 0 && sa == 1) { + /* + * Major opcode and function field is shared with preR6 MFHI/MTHI. + * We need additionally to check other fields. + */ + check_mips_64(ctx); + gen_cl(ctx, op1, rd, rs); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + case OPC_DMULT: + case OPC_DMULTU: + case OPC_DDIV: + case OPC_DDIVU: + + op2 = MASK_R6_MULDIV(ctx->opcode); + switch (op2) { + case R6_OPC_DMUL: + case R6_OPC_DMUH: + case R6_OPC_DMULU: + case R6_OPC_DMUHU: + case R6_OPC_DDIV: + case R6_OPC_DMOD: + case R6_OPC_DDIVU: + case R6_OPC_DMODU: + check_mips_64(ctx); + gen_r6_muldiv(ctx, op2, rd, rs, rt); + break; + default: + MIPS_INVAL("special_r6 muldiv"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special_r6"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special_tx79(CPUMIPSState *env, DisasContext *ctx) +{ + int rs = extract32(ctx->opcode, 21, 5); + int rt = extract32(ctx->opcode, 16, 5); + int rd = extract32(ctx->opcode, 11, 5); + uint32_t op1 = MASK_SPECIAL(ctx->opcode); + + switch (op1) { + case OPC_MOVN: /* Conditional move */ + case OPC_MOVZ: + gen_cond_move(ctx, op1, rd, rs, rt); + break; + case OPC_MFHI: /* Move from HI/LO */ + case OPC_MFLO: + gen_HILO(ctx, op1, 0, rd); + break; + case OPC_MTHI: + case OPC_MTLO: /* Move to HI/LO */ + gen_HILO(ctx, op1, 0, rs); + break; + case OPC_MULT: + case OPC_MULTU: + gen_mul_txx9(ctx, op1, rd, rs, rt); + break; + case OPC_DIV: + case OPC_DIVU: + gen_muldiv(ctx, op1, 0, rs, rt); + break; +#if defined(TARGET_MIPS64) + case OPC_DMULT: + case OPC_DMULTU: + case OPC_DDIV: + case OPC_DDIVU: + check_insn_opc_user_only(ctx, INSN_R5900); + gen_muldiv(ctx, op1, 0, rs, rt); + break; +#endif + case OPC_JR: + gen_compute_branch(ctx, op1, 4, rs, 0, 0, 4); + break; + default: /* Invalid */ + MIPS_INVAL("special_tx79"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) +{ + int rs, rt, rd, sa; + uint32_t op1; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + + op1 = MASK_SPECIAL(ctx->opcode); + switch (op1) { + case OPC_MOVN: /* Conditional move */ + case OPC_MOVZ: + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 | + INSN_LOONGSON2E | INSN_LOONGSON2F); + gen_cond_move(ctx, op1, rd, rs, rt); + break; + case OPC_MFHI: /* Move from HI/LO */ + case OPC_MFLO: + gen_HILO(ctx, op1, rs & 3, rd); + break; + case OPC_MTHI: + case OPC_MTLO: /* Move to HI/LO */ + gen_HILO(ctx, op1, rd & 3, rs); + break; + case OPC_MOVCI: + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + gen_movci(ctx, rd, rs, (ctx->opcode >> 18) & 0x7, + (ctx->opcode >> 16) & 1); + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + case OPC_MULT: + case OPC_MULTU: + if (sa) { + check_insn(ctx, INSN_VR54XX); + op1 = MASK_MUL_VR54XX(ctx->opcode); + gen_mul_vr54xx(ctx, op1, rd, rs, rt); + } else { + gen_muldiv(ctx, op1, rd & 3, rs, rt); + } + break; + case OPC_DIV: + case OPC_DIVU: + gen_muldiv(ctx, op1, 0, rs, rt); + break; +#if defined(TARGET_MIPS64) + case OPC_DMULT: + case OPC_DMULTU: + case OPC_DDIV: + case OPC_DDIVU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_muldiv(ctx, op1, 0, rs, rt); + break; +#endif + case OPC_JR: + gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); + break; + case OPC_SPIM: +#ifdef MIPS_STRICT_STANDARD + MIPS_INVAL("SPIM"); + generate_exception_end(ctx, EXCP_RI); +#else + /* Implemented as RI exception for now. */ + MIPS_INVAL("spim (unofficial)"); + generate_exception_end(ctx, EXCP_RI); +#endif + break; + default: /* Invalid */ + MIPS_INVAL("special_legacy"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rs, rt, rd, sa; + uint32_t op1; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + + op1 = MASK_SPECIAL(ctx->opcode); + switch (op1) { + case OPC_SLL: /* Shift with immediate */ + if (sa == 5 && rd == 0 && + rs == 0 && rt == 0) { /* PAUSE */ + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + generate_exception_end(ctx, EXCP_RI); + break; + } + } + /* Fallthrough */ + case OPC_SRA: + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + case OPC_SRL: + switch ((ctx->opcode >> 21) & 0x1f) { + case 1: + /* rotr is decoded as srl on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_ROTR; + } + /* Fallthrough */ + case 0: + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_ADD: + case OPC_ADDU: + case OPC_SUB: + case OPC_SUBU: + gen_arith(ctx, op1, rd, rs, rt); + break; + case OPC_SLLV: /* Shifts */ + case OPC_SRAV: + gen_shift(ctx, op1, rd, rs, rt); + break; + case OPC_SRLV: + switch ((ctx->opcode >> 6) & 0x1f) { + case 1: + /* rotrv is decoded as srlv on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_ROTRV; + } + /* Fallthrough */ + case 0: + gen_shift(ctx, op1, rd, rs, rt); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_SLT: /* Set on less than */ + case OPC_SLTU: + gen_slt(ctx, op1, rd, rs, rt); + break; + case OPC_AND: /* Logic*/ + case OPC_OR: + case OPC_NOR: + case OPC_XOR: + gen_logic(ctx, op1, rd, rs, rt); + break; + case OPC_JALR: + gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); + break; + case OPC_TGE: /* Traps */ + case OPC_TGEU: + case OPC_TLT: + case OPC_TLTU: + case OPC_TEQ: + case OPC_TNE: + check_insn(ctx, ISA_MIPS2); + gen_trap(ctx, op1, rs, rt, -1); + break; + case OPC_LSA: /* OPC_PMON */ + if ((ctx->insn_flags & ISA_MIPS32R6) || + (env->CP0_Config3 & (1 << CP0C3_MSAP))) { + decode_opc_special_r6(env, ctx); + } else { + /* Pmon entry point, also R4010 selsl */ +#ifdef MIPS_STRICT_STANDARD + MIPS_INVAL("PMON / selsl"); + generate_exception_end(ctx, EXCP_RI); +#else + gen_helper_0e0i(pmon, sa); +#endif + } + break; + case OPC_SYSCALL: + generate_exception_end(ctx, EXCP_SYSCALL); + break; + case OPC_BREAK: + generate_exception_end(ctx, EXCP_BREAK); + break; + case OPC_SYNC: + check_insn(ctx, ISA_MIPS2); + gen_sync(tcg_ctx, extract32(ctx->opcode, 6, 5)); + break; + +#if defined(TARGET_MIPS64) + /* MIPS64 specific opcodes */ + case OPC_DSLL: + case OPC_DSRA: + case OPC_DSLL32: + case OPC_DSRA32: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + case OPC_DSRL: + switch ((ctx->opcode >> 21) & 0x1f) { + case 1: + /* drotr is decoded as dsrl on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_DROTR; + } + /* Fallthrough */ + case 0: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_DSRL32: + switch ((ctx->opcode >> 21) & 0x1f) { + case 1: + /* drotr32 is decoded as dsrl32 on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_DROTR32; + } + /* Fallthrough */ + case 0: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_DADD: + case OPC_DADDU: + case OPC_DSUB: + case OPC_DSUBU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_arith(ctx, op1, rd, rs, rt); + break; + case OPC_DSLLV: + case OPC_DSRAV: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift(ctx, op1, rd, rs, rt); + break; + case OPC_DSRLV: + switch ((ctx->opcode >> 6) & 0x1f) { + case 1: + /* drotrv is decoded as dsrlv on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_DROTRV; + } + /* Fallthrough */ + case 0: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift(ctx, op1, rd, rs, rt); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_DLSA: + if ((ctx->insn_flags & ISA_MIPS32R6) || + (env->CP0_Config3 & (1 << CP0C3_MSAP))) { + decode_opc_special_r6(env, ctx); + } + break; +#endif + default: + if (ctx->insn_flags & ISA_MIPS32R6) { + decode_opc_special_r6(env, ctx); + } else if (ctx->insn_flags & INSN_R5900) { + decode_opc_special_tx79(env, ctx); + } else { + decode_opc_special_legacy(env, ctx); + } + } +} + + +#if defined(TARGET_MIPS64) + +/* + * + * MMI (MultiMedia Interface) ASE instructions + * =========================================== + */ + +/* + * MMI instructions category: data communication + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * PCPYH PEXCH PEXTLB PINTH PPACB PEXT5 PREVH + * PCPYLD PEXCW PEXTLH PINTEH PPACH PPAC5 PROT3W + * PCPYUD PEXEH PEXTLW PPACW + * PEXEW PEXTUB + * PEXTUH + * PEXTUW + */ + +/* + * PCPYH rd, rt + * + * Parallel Copy Halfword + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---------+---------+-----------+ + * | MMI |0 0 0 0 0| rt | rd | PCPYH | MMI3 | + * +-----------+---------+---------+---------+---------+-----------+ + */ +static void gen_mmi_pcpyh(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pd, rt, rd; + uint32_t opcode; + + opcode = ctx->opcode; + + pd = extract32(opcode, 21, 5); + rt = extract32(opcode, 16, 5); + rd = extract32(opcode, 11, 5); + + if (unlikely(pd != 0)) { + generate_exception_end(ctx, EXCP_RI); + } else if (rd == 0) { + /* nop */ + } else if (rt == 0) { + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], 0); + } else { + TCGv_i64 t0 = tcg_temp_new(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new(tcg_ctx); + uint64_t mask = (1ULL << 16) - 1; + + tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_gpr[rt], mask); + tcg_gen_movi_i64(tcg_ctx, t1, 0); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], t1); + + tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_mmr[rt], mask); + tcg_gen_movi_i64(tcg_ctx, t1, 0); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 16); + tcg_gen_or_i64(tcg_ctx, t1, t0, t1); + + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], t1); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } +} + +/* + * PCPYLD rd, rs, rt + * + * Parallel Copy Lower Doubleword + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---------+---------+-----------+ + * | MMI | rs | rt | rd | PCPYLD | MMI2 | + * +-----------+---------+---------+---------+---------+-----------+ + */ +static void gen_mmi_pcpyld(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t rs, rt, rd; + uint32_t opcode; + + opcode = ctx->opcode; + + rs = extract32(opcode, 21, 5); + rt = extract32(opcode, 16, 5); + rd = extract32(opcode, 11, 5); + + if (rd == 0) { + /* nop */ + } else { + if (rs == 0) { + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], 0); + } else { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], tcg_ctx->cpu_gpr[rs]); + } + if (rt == 0) { + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } else { + if (rd != rt) { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_gpr[rt]); + } + } + } +} + +/* + * PCPYUD rd, rs, rt + * + * Parallel Copy Upper Doubleword + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---------+---------+-----------+ + * | MMI | rs | rt | rd | PCPYUD | MMI3 | + * +-----------+---------+---------+---------+---------+-----------+ + */ +static void gen_mmi_pcpyud(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t rs, rt, rd; + uint32_t opcode; + + opcode = ctx->opcode; + + rs = extract32(opcode, 21, 5); + rt = extract32(opcode, 16, 5); + rd = extract32(opcode, 11, 5); + + if (rd == 0) { + /* nop */ + } else { + if (rs == 0) { + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], 0); + } else { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_gpr[rd], tcg_ctx->cpu_mmr[rs]); + } + if (rt == 0) { + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], 0); + } else { + if (rd != rt) { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_mmr[rd], tcg_ctx->cpu_mmr[rt]); + } + } + } +} + +#endif + + +#if !defined(TARGET_MIPS64) + +/* MXU accumulate add/subtract 1-bit pattern 'aptn1' */ +#define MXU_APTN1_A 0 +#define MXU_APTN1_S 1 + +/* MXU accumulate add/subtract 2-bit pattern 'aptn2' */ +#define MXU_APTN2_AA 0 +#define MXU_APTN2_AS 1 +#define MXU_APTN2_SA 2 +#define MXU_APTN2_SS 3 + +/* MXU execute add/subtract 2-bit pattern 'eptn2' */ +#define MXU_EPTN2_AA 0 +#define MXU_EPTN2_AS 1 +#define MXU_EPTN2_SA 2 +#define MXU_EPTN2_SS 3 + +/* MXU operand getting pattern 'optn2' */ +#define MXU_OPTN2_PTN0 0 +#define MXU_OPTN2_PTN1 1 +#define MXU_OPTN2_PTN2 2 +#define MXU_OPTN2_PTN3 3 +/* alternative naming scheme for 'optn2' */ +#define MXU_OPTN2_WW 0 +#define MXU_OPTN2_LW 1 +#define MXU_OPTN2_HW 2 +#define MXU_OPTN2_XW 3 + +/* MXU operand getting pattern 'optn3' */ +#define MXU_OPTN3_PTN0 0 +#define MXU_OPTN3_PTN1 1 +#define MXU_OPTN3_PTN2 2 +#define MXU_OPTN3_PTN3 3 +#define MXU_OPTN3_PTN4 4 +#define MXU_OPTN3_PTN5 5 +#define MXU_OPTN3_PTN6 6 +#define MXU_OPTN3_PTN7 7 + + +/* + * S32I2M XRa, rb - Register move from GRF to XRF + */ +static void gen_mxu_s32i2m(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + uint32_t XRa, Rb; + + t0 = tcg_temp_new(tcg_ctx); + + XRa = extract32(ctx->opcode, 6, 5); + Rb = extract32(ctx->opcode, 16, 5); + + gen_load_gpr(tcg_ctx, t0, Rb); + if (XRa <= 15) { + gen_store_mxu_gpr(tcg_ctx, t0, XRa); + } else if (XRa == 16) { + gen_store_mxu_cr(tcg_ctx, t0); + } + + tcg_temp_free(tcg_ctx, t0); +} + +/* + * S32M2I XRa, rb - Register move from XRF to GRF + */ +static void gen_mxu_s32m2i(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + uint32_t XRa, Rb; + + t0 = tcg_temp_new(tcg_ctx); + + XRa = extract32(ctx->opcode, 6, 5); + Rb = extract32(ctx->opcode, 16, 5); + + if (XRa <= 15) { + gen_load_mxu_gpr(tcg_ctx, t0, XRa); + } else if (XRa == 16) { + gen_load_mxu_cr(tcg_ctx, t0); + } + + gen_store_gpr(tcg_ctx, t0, Rb); + + tcg_temp_free(tcg_ctx, t0); +} + +/* + * S8LDD XRa, Rb, s8, optn3 - Load a byte from memory to XRF + */ +static void gen_mxu_s8ldd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + uint32_t XRa, Rb, s8, optn3; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + XRa = extract32(ctx->opcode, 6, 4); + s8 = extract32(ctx->opcode, 10, 8); + optn3 = extract32(ctx->opcode, 18, 3); + Rb = extract32(ctx->opcode, 21, 5); + + gen_load_gpr(tcg_ctx, t0, Rb); + tcg_gen_addi_tl(tcg_ctx, t0, t0, (int8_t)s8); + + switch (optn3) { + /* XRa[7:0] = tmp8 */ + case MXU_OPTN3_PTN0: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); + gen_load_mxu_gpr(tcg_ctx, t0, XRa); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 0, 8); + break; + /* XRa[15:8] = tmp8 */ + case MXU_OPTN3_PTN1: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); + gen_load_mxu_gpr(tcg_ctx, t0, XRa); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 8, 8); + break; + /* XRa[23:16] = tmp8 */ + case MXU_OPTN3_PTN2: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); + gen_load_mxu_gpr(tcg_ctx, t0, XRa); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 16, 8); + break; + /* XRa[31:24] = tmp8 */ + case MXU_OPTN3_PTN3: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); + gen_load_mxu_gpr(tcg_ctx, t0, XRa); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, 24, 8); + break; + /* XRa = {8'b0, tmp8, 8'b0, tmp8} */ + case MXU_OPTN3_PTN4: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); + tcg_gen_deposit_tl(tcg_ctx, t0, t1, t1, 16, 16); + break; + /* XRa = {tmp8, 8'b0, tmp8, 8'b0} */ + case MXU_OPTN3_PTN5: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 8); + tcg_gen_deposit_tl(tcg_ctx, t0, t1, t1, 16, 16); + break; + /* XRa = {{8{sign of tmp8}}, tmp8, {8{sign of tmp8}}, tmp8} */ + case MXU_OPTN3_PTN6: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_SB); + tcg_gen_mov_tl(tcg_ctx, t0, t1); + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0xFF00FFFF); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 16); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + break; + /* XRa = {tmp8, tmp8, tmp8, tmp8} */ + case MXU_OPTN3_PTN7: + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, MO_UB); + tcg_gen_deposit_tl(tcg_ctx, t1, t1, t1, 8, 8); + tcg_gen_deposit_tl(tcg_ctx, t0, t1, t1, 16, 16); + break; + } + + gen_store_mxu_gpr(tcg_ctx, t0, XRa); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* + * D16MUL XRa, XRb, XRc, XRd, optn2 - Signed 16 bit pattern multiplication + */ +static void gen_mxu_d16mul(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1, t2, t3; + uint32_t XRa, XRb, XRc, XRd, optn2; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + t2 = tcg_temp_new(tcg_ctx); + t3 = tcg_temp_new(tcg_ctx); + + XRa = extract32(ctx->opcode, 6, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRc = extract32(ctx->opcode, 14, 4); + XRd = extract32(ctx->opcode, 18, 4); + optn2 = extract32(ctx->opcode, 22, 2); + + gen_load_mxu_gpr(tcg_ctx, t1, XRb); + tcg_gen_sextract_tl(tcg_ctx, t0, t1, 0, 16); + tcg_gen_sextract_tl(tcg_ctx, t1, t1, 16, 16); + gen_load_mxu_gpr(tcg_ctx, t3, XRc); + tcg_gen_sextract_tl(tcg_ctx, t2, t3, 0, 16); + tcg_gen_sextract_tl(tcg_ctx, t3, t3, 16, 16); + + switch (optn2) { + case MXU_OPTN2_WW: /* XRB.H*XRC.H == lop, XRB.L*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); + break; + case MXU_OPTN2_LW: /* XRB.L*XRC.H == lop, XRB.L*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); + break; + case MXU_OPTN2_HW: /* XRB.H*XRC.H == lop, XRB.H*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); + break; + case MXU_OPTN2_XW: /* XRB.L*XRC.H == lop, XRB.H*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); + break; + } + gen_store_mxu_gpr(tcg_ctx, t3, XRa); + gen_store_mxu_gpr(tcg_ctx, t2, XRd); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t3); +} + +/* + * D16MAC XRa, XRb, XRc, XRd, aptn2, optn2 - Signed 16 bit pattern multiply + * and accumulate + */ +static void gen_mxu_d16mac(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1, t2, t3; + uint32_t XRa, XRb, XRc, XRd, optn2, aptn2; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + t2 = tcg_temp_new(tcg_ctx); + t3 = tcg_temp_new(tcg_ctx); + + XRa = extract32(ctx->opcode, 6, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRc = extract32(ctx->opcode, 14, 4); + XRd = extract32(ctx->opcode, 18, 4); + optn2 = extract32(ctx->opcode, 22, 2); + aptn2 = extract32(ctx->opcode, 24, 2); + + gen_load_mxu_gpr(tcg_ctx, t1, XRb); + tcg_gen_sextract_tl(tcg_ctx, t0, t1, 0, 16); + tcg_gen_sextract_tl(tcg_ctx, t1, t1, 16, 16); + + gen_load_mxu_gpr(tcg_ctx, t3, XRc); + tcg_gen_sextract_tl(tcg_ctx, t2, t3, 0, 16); + tcg_gen_sextract_tl(tcg_ctx, t3, t3, 16, 16); + + switch (optn2) { + case MXU_OPTN2_WW: /* XRB.H*XRC.H == lop, XRB.L*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); + break; + case MXU_OPTN2_LW: /* XRB.L*XRC.H == lop, XRB.L*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t0, t2); + break; + case MXU_OPTN2_HW: /* XRB.H*XRC.H == lop, XRB.H*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t1, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); + break; + case MXU_OPTN2_XW: /* XRB.L*XRC.H == lop, XRB.H*XRC.L == rop */ + tcg_gen_mul_tl(tcg_ctx, t3, t0, t3); + tcg_gen_mul_tl(tcg_ctx, t2, t1, t2); + break; + } + gen_load_mxu_gpr(tcg_ctx, t0, XRa); + gen_load_mxu_gpr(tcg_ctx, t1, XRd); + + switch (aptn2) { + case MXU_APTN2_AA: + tcg_gen_add_tl(tcg_ctx, t3, t0, t3); + tcg_gen_add_tl(tcg_ctx, t2, t1, t2); + break; + case MXU_APTN2_AS: + tcg_gen_add_tl(tcg_ctx, t3, t0, t3); + tcg_gen_sub_tl(tcg_ctx, t2, t1, t2); + break; + case MXU_APTN2_SA: + tcg_gen_sub_tl(tcg_ctx, t3, t0, t3); + tcg_gen_add_tl(tcg_ctx, t2, t1, t2); + break; + case MXU_APTN2_SS: + tcg_gen_sub_tl(tcg_ctx, t3, t0, t3); + tcg_gen_sub_tl(tcg_ctx, t2, t1, t2); + break; + } + gen_store_mxu_gpr(tcg_ctx, t3, XRa); + gen_store_mxu_gpr(tcg_ctx, t2, XRd); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t3); +} + +/* + * Q8MUL XRa, XRb, XRc, XRd - Parallel unsigned 8 bit pattern multiply + * Q8MULSU XRa, XRb, XRc, XRd - Parallel signed 8 bit pattern multiply + */ +static void gen_mxu_q8mul_q8mulsu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1, t2, t3, t4, t5, t6, t7; + uint32_t XRa, XRb, XRc, XRd, sel; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + t2 = tcg_temp_new(tcg_ctx); + t3 = tcg_temp_new(tcg_ctx); + t4 = tcg_temp_new(tcg_ctx); + t5 = tcg_temp_new(tcg_ctx); + t6 = tcg_temp_new(tcg_ctx); + t7 = tcg_temp_new(tcg_ctx); + + XRa = extract32(ctx->opcode, 6, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRc = extract32(ctx->opcode, 14, 4); + XRd = extract32(ctx->opcode, 18, 4); + sel = extract32(ctx->opcode, 22, 2); + + gen_load_mxu_gpr(tcg_ctx, t3, XRb); + gen_load_mxu_gpr(tcg_ctx, t7, XRc); + + if (sel == 0x2) { + /* Q8MULSU */ + tcg_gen_ext8s_tl(tcg_ctx, t0, t3); + tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); + tcg_gen_ext8s_tl(tcg_ctx, t1, t3); + tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); + tcg_gen_ext8s_tl(tcg_ctx, t2, t3); + tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); + tcg_gen_ext8s_tl(tcg_ctx, t3, t3); + } else { + /* Q8MUL */ + tcg_gen_ext8u_tl(tcg_ctx, t0, t3); + tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); + tcg_gen_ext8u_tl(tcg_ctx, t1, t3); + tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); + tcg_gen_ext8u_tl(tcg_ctx, t2, t3); + tcg_gen_shri_tl(tcg_ctx, t3, t3, 8); + tcg_gen_ext8u_tl(tcg_ctx, t3, t3); + } + + tcg_gen_ext8u_tl(tcg_ctx, t4, t7); + tcg_gen_shri_tl(tcg_ctx, t7, t7, 8); + tcg_gen_ext8u_tl(tcg_ctx, t5, t7); + tcg_gen_shri_tl(tcg_ctx, t7, t7, 8); + tcg_gen_ext8u_tl(tcg_ctx, t6, t7); + tcg_gen_shri_tl(tcg_ctx, t7, t7, 8); + tcg_gen_ext8u_tl(tcg_ctx, t7, t7); + + tcg_gen_mul_tl(tcg_ctx, t0, t0, t4); + tcg_gen_mul_tl(tcg_ctx, t1, t1, t5); + tcg_gen_mul_tl(tcg_ctx, t2, t2, t6); + tcg_gen_mul_tl(tcg_ctx, t3, t3, t7); + + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0xFFFF); + tcg_gen_andi_tl(tcg_ctx, t1, t1, 0xFFFF); + tcg_gen_andi_tl(tcg_ctx, t2, t2, 0xFFFF); + tcg_gen_andi_tl(tcg_ctx, t3, t3, 0xFFFF); + + tcg_gen_shli_tl(tcg_ctx, t1, t1, 16); + tcg_gen_shli_tl(tcg_ctx, t3, t3, 16); + + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_gen_or_tl(tcg_ctx, t1, t2, t3); + + gen_store_mxu_gpr(tcg_ctx, t0, XRd); + gen_store_mxu_gpr(tcg_ctx, t1, XRa); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t4); + tcg_temp_free(tcg_ctx, t5); + tcg_temp_free(tcg_ctx, t6); + tcg_temp_free(tcg_ctx, t7); +} + +/* + * S32LDD XRa, Rb, S12 - Load a word from memory to XRF + * S32LDDR XRa, Rb, S12 - Load a word from memory to XRF, reversed byte seq. + */ +static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + uint32_t XRa, Rb, s12, sel; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + XRa = extract32(ctx->opcode, 6, 4); + s12 = extract32(ctx->opcode, 10, 10); + sel = extract32(ctx->opcode, 20, 1); + Rb = extract32(ctx->opcode, 21, 5); + + gen_load_gpr(tcg_ctx, t0, Rb); + + tcg_gen_movi_tl(tcg_ctx, t1, s12); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 2); + if (s12 & 0x200) { + tcg_gen_ori_tl(tcg_ctx, t1, t1, 0xFFFFF000); + } + tcg_gen_add_tl(tcg_ctx, t1, t0, t1); + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t1, ctx->mem_idx, MO_SL); + + if (sel == 1) { + /* S32LDDR */ + tcg_gen_bswap32_tl(tcg_ctx, t1, t1); + } + gen_store_mxu_gpr(tcg_ctx, t1, XRa); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + + +/* + * MXU instruction category: logic + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * S32NOR S32AND S32OR S32XOR + */ + +/* + * S32NOR XRa, XRb, XRc + * Update XRa with the result of logical bitwise 'nor' operation + * applied to the content of XRb and XRc. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| + * +-----------+---------+-----+-------+-------+-------+-----------+ + */ +static void gen_mxu_S32NOR(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pad, XRc, XRb, XRa; + + pad = extract32(ctx->opcode, 21, 5); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRa == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) && (XRc == 0))) { + /* both operands zero registers -> just set destination to all 1s */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0xFFFFFFFF); + } else if (unlikely(XRb == 0)) { + /* XRb zero register -> just set destination to the negation of XRc */ + tcg_gen_not_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } else if (unlikely(XRc == 0)) { + /* XRa zero register -> just set destination to the negation of XRb */ + tcg_gen_not_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just set destination to the negation of XRb */ + tcg_gen_not_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else { + /* the most general case */ + tcg_gen_nor_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } +} + +/* + * S32AND XRa, XRb, XRc + * Update XRa with the result of logical bitwise 'and' operation + * applied to the content of XRb and XRc. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| + * +-----------+---------+-----+-------+-------+-------+-----------+ + */ +static void gen_mxu_S32AND(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pad, XRc, XRb, XRa; + + pad = extract32(ctx->opcode, 21, 5); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRa == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) || (XRc == 0))) { + /* one of operands zero register -> just set destination to all 0s */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just set destination to one of them */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else { + /* the most general case */ + tcg_gen_and_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } +} + +/* + * S32OR XRa, XRb, XRc + * Update XRa with the result of logical bitwise 'or' operation + * applied to the content of XRb and XRc. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| + * +-----------+---------+-----+-------+-------+-------+-----------+ + */ +static void gen_mxu_S32OR(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pad, XRc, XRb, XRa; + + pad = extract32(ctx->opcode, 21, 5); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRa == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) && (XRc == 0))) { + /* both operands zero registers -> just set destination to all 0s */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + } else if (unlikely(XRb == 0)) { + /* XRb zero register -> just set destination to the content of XRc */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } else if (unlikely(XRc == 0)) { + /* XRc zero register -> just set destination to the content of XRb */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just set destination to one of them */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else { + /* the most general case */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } +} + +/* + * S32XOR XRa, XRb, XRc + * Update XRa with the result of logical bitwise 'xor' operation + * applied to the content of XRb and XRc. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL16| + * +-----------+---------+-----+-------+-------+-------+-----------+ + */ +static void gen_mxu_S32XOR(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pad, XRc, XRb, XRa; + + pad = extract32(ctx->opcode, 21, 5); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRa == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) && (XRc == 0))) { + /* both operands zero registers -> just set destination to all 0s */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + } else if (unlikely(XRb == 0)) { + /* XRb zero register -> just set destination to the content of XRc */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } else if (unlikely(XRc == 0)) { + /* XRc zero register -> just set destination to the content of XRb */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just set destination to all 0s */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + } else { + /* the most general case */ + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } +} + + +/* + * MXU instruction category max/min + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * S32MAX D16MAX Q8MAX + * S32MIN D16MIN Q8MIN + */ + +/* + * S32MAX XRa, XRb, XRc + * Update XRa with the maximum of signed 32-bit integers contained + * in XRb and XRc. + * + * S32MIN XRa, XRb, XRc + * Update XRa with the minimum of signed 32-bit integers contained + * in XRb and XRc. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL00| + * +-----------+---------+-----+-------+-------+-------+-----------+ + */ +static void gen_mxu_S32MAX_S32MIN(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pad, opc, XRc, XRb, XRa; + + pad = extract32(ctx->opcode, 21, 5); + opc = extract32(ctx->opcode, 18, 3); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRa == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) && (XRc == 0))) { + /* both operands zero registers -> just set destination to zero */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + } else if (unlikely((XRb == 0) || (XRc == 0))) { + /* exactly one operand is zero register - find which one is not...*/ + uint32_t XRx = XRb ? XRb : XRc; + /* ...and do max/min operation with one operand 0 */ + if (opc == OPC_MXU_S32MAX) { + tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRx - 1], 0); + } else { + tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRx - 1], 0); + } + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just set destination to one of them */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else { + /* the most general case */ + if (opc == OPC_MXU_S32MAX) { + tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], + tcg_ctx->mxu_gpr[XRc - 1]); + } else { + tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], + tcg_ctx->mxu_gpr[XRc - 1]); + } + } +} + +/* + * D16MAX + * Update XRa with the 16-bit-wise maximums of signed integers + * contained in XRb and XRc. + * + * D16MIN + * Update XRa with the 16-bit-wise minimums of signed integers + * contained in XRb and XRc. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL00| + * +-----------+---------+-----+-------+-------+-------+-----------+ + */ +static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pad, opc, XRc, XRb, XRa; + + pad = extract32(ctx->opcode, 21, 5); + opc = extract32(ctx->opcode, 18, 3); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRc == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) && (XRa == 0))) { + /* both operands zero registers -> just set destination to zero */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRc - 1], 0); + } else if (unlikely((XRb == 0) || (XRa == 0))) { + /* exactly one operand is zero register - find which one is not...*/ + uint32_t XRx = XRb ? XRb : XRc; + /* ...and do half-word-wise max/min with one operand 0 */ + TCGv_i32 t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); + + /* the left half-word first */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0xFFFF0000); + if (opc == OPC_MXU_D16MAX) { + tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } + + /* the right half-word */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0x0000FFFF); + /* move half-words to the leftmost position */ + tcg_gen_shli_i32(tcg_ctx, t0, t0, 16); + /* t0 will be max/min of t0 and t1 */ + if (opc == OPC_MXU_D16MAX) { + tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); + } + /* return resulting half-words to its original position */ + tcg_gen_shri_i32(tcg_ctx, t0, t0, 16); + /* finaly update the destination */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just set destination to one of them */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else { + /* the most general case */ + TCGv_i32 t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new(tcg_ctx); + + /* the left half-word first */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0xFFFF0000); + tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFFFF0000); + if (opc == OPC_MXU_D16MAX) { + tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } + + /* the right half-word */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x0000FFFF); + tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0x0000FFFF); + /* move half-words to the leftmost position */ + tcg_gen_shli_i32(tcg_ctx, t0, t0, 16); + tcg_gen_shli_i32(tcg_ctx, t1, t1, 16); + /* t0 will be max/min of t0 and t1 */ + if (opc == OPC_MXU_D16MAX) { + tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); + } + /* return resulting half-words to its original position */ + tcg_gen_shri_i32(tcg_ctx, t0, t0, 16); + /* finaly update the destination */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } +} + +/* + * Q8MAX + * Update XRa with the 8-bit-wise maximums of signed integers + * contained in XRb and XRc. + * + * Q8MIN + * Update XRa with the 8-bit-wise minimums of signed integers + * contained in XRb and XRc. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0| opc | XRc | XRb | XRa |MXU__POOL00| + * +-----------+---------+-----+-------+-------+-------+-----------+ + */ +static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t pad, opc, XRc, XRb, XRa; + + pad = extract32(ctx->opcode, 21, 5); + opc = extract32(ctx->opcode, 18, 3); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRa == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) && (XRc == 0))) { + /* both operands zero registers -> just set destination to zero */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + } else if (unlikely((XRb == 0) || (XRc == 0))) { + /* exactly one operand is zero register - make it be the first...*/ + uint32_t XRx = XRb ? XRb : XRc; + /* ...and do byte-wise max/min with one operand 0 */ + TCGv_i32 t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); + int32_t i; + + /* the leftmost byte (byte 3) first */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0xFF000000); + if (opc == OPC_MXU_Q8MAX) { + tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } + + /* bytes 2, 1, 0 */ + for (i = 2; i >= 0; i--) { + /* extract the byte */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRx - 1], 0xFF << (8 * i)); + /* move the byte to the leftmost position */ + tcg_gen_shli_i32(tcg_ctx, t0, t0, 8 * (3 - i)); + /* t0 will be max/min of t0 and t1 */ + if (opc == OPC_MXU_Q8MAX) { + tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); + } + /* return resulting byte to its original position */ + tcg_gen_shri_i32(tcg_ctx, t0, t0, 8 * (3 - i)); + /* finaly update the destination */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); + } + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just set destination to one of them */ + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } else { + /* the most general case */ + TCGv_i32 t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new(tcg_ctx); + int32_t i; + + /* the leftmost bytes (bytes 3) first */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0xFF000000); + tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFF000000); + if (opc == OPC_MXU_Q8MAX) { + tcg_gen_smax_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + } + + /* bytes 2, 1, 0 */ + for (i = 2; i >= 0; i--) { + /* extract corresponding bytes */ + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0xFF << (8 * i)); + tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFF << (8 * i)); + /* move the bytes to the leftmost position */ + tcg_gen_shli_i32(tcg_ctx, t0, t0, 8 * (3 - i)); + tcg_gen_shli_i32(tcg_ctx, t1, t1, 8 * (3 - i)); + /* t0 will be max/min of t0 and t1 */ + if (opc == OPC_MXU_Q8MAX) { + tcg_gen_smax_i32(tcg_ctx, t0, t0, t1); + } else { + tcg_gen_smin_i32(tcg_ctx, t0, t0, t1); + } + /* return resulting byte to its original position */ + tcg_gen_shri_i32(tcg_ctx, t0, t0, 8 * (3 - i)); + /* finaly update the destination */ + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRa - 1], t0); + } + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } +} + + +/* + * MXU instruction category: align + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * S32ALN S32ALNI + */ + +/* + * S32ALNI XRc, XRb, XRa, optn3 + * Arrange bytes from XRb and XRc according to one of five sets of + * rules determined by optn3, and place the result in XRa. + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+-----+---+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |optn3|0 0|x x x| XRc | XRb | XRa |MXU__POOL16| + * +-----------+-----+---+-----+-------+-------+-------+-----------+ + * + */ +static void gen_mxu_S32ALNI(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t optn3, pad, XRc, XRb, XRa; + + optn3 = extract32(ctx->opcode, 23, 3); + pad = extract32(ctx->opcode, 21, 2); + XRc = extract32(ctx->opcode, 14, 4); + XRb = extract32(ctx->opcode, 10, 4); + XRa = extract32(ctx->opcode, 6, 4); + + if (unlikely(pad != 0)) { + /* opcode padding incorrect -> do nothing */ + } else if (unlikely(XRa == 0)) { + /* destination is zero register -> do nothing */ + } else if (unlikely((XRb == 0) && (XRc == 0))) { + /* both operands zero registers -> just set destination to all 0s */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + } else if (unlikely(XRb == 0)) { + /* XRb zero register -> just appropriatelly shift XRc into XRa */ + switch (optn3) { + case MXU_OPTN3_PTN0: + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + break; + case MXU_OPTN3_PTN1: + case MXU_OPTN3_PTN2: + case MXU_OPTN3_PTN3: + tcg_gen_shri_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1], + 8 * (4 - optn3)); + break; + case MXU_OPTN3_PTN4: + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); + break; + } + } else if (unlikely(XRc == 0)) { + /* XRc zero register -> just appropriatelly shift XRb into XRa */ + switch (optn3) { + case MXU_OPTN3_PTN0: + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + break; + case MXU_OPTN3_PTN1: + case MXU_OPTN3_PTN2: + case MXU_OPTN3_PTN3: + tcg_gen_shri_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], 8 * optn3); + break; + case MXU_OPTN3_PTN4: + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], 0); + break; + } + } else if (unlikely(XRb == XRc)) { + /* both operands same -> just rotation or moving from any of them */ + switch (optn3) { + case MXU_OPTN3_PTN0: + case MXU_OPTN3_PTN4: + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + break; + case MXU_OPTN3_PTN1: + case MXU_OPTN3_PTN2: + case MXU_OPTN3_PTN3: + tcg_gen_rotli_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1], 8 * optn3); + break; + } + } else { + /* the most general case */ + switch (optn3) { + case MXU_OPTN3_PTN0: + { + /* */ + /* XRb XRc */ + /* +---------------+ */ + /* | A B C D | E F G H */ + /* +-------+-------+ */ + /* | */ + /* XRa */ + /* */ + + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRb - 1]); + } + break; + case MXU_OPTN3_PTN1: + { + /* */ + /* XRb XRc */ + /* +-------------------+ */ + /* A | B C D E | F G H */ + /* +---------+---------+ */ + /* | */ + /* XRa */ + /* */ + + TCGv_i32 t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x00FFFFFF); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 8); + + tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFF000000); + tcg_gen_shri_i32(tcg_ctx, t1, t1, 24); + + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } + break; + case MXU_OPTN3_PTN2: + { + /* */ + /* XRb XRc */ + /* +-------------------+ */ + /* A B | C D E F | G H */ + /* +---------+---------+ */ + /* | */ + /* XRa */ + /* */ + + TCGv_i32 t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x0000FFFF); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 16); + + tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFFFF0000); + tcg_gen_shri_i32(tcg_ctx, t1, t1, 16); + + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } + break; + case MXU_OPTN3_PTN3: + { + /* */ + /* XRb XRc */ + /* +-------------------+ */ + /* A B C | D E F G | H */ + /* +---------+---------+ */ + /* | */ + /* XRa */ + /* */ + + TCGv_i32 t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->mxu_gpr[XRb - 1], 0x000000FF); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 24); + + tcg_gen_andi_i32(tcg_ctx, t1, tcg_ctx->mxu_gpr[XRc - 1], 0xFFFFFF00); + tcg_gen_shri_i32(tcg_ctx, t1, t1, 8); + + tcg_gen_or_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], t0, t1); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } + break; + case MXU_OPTN3_PTN4: + { + /* */ + /* XRb XRc */ + /* +---------------+ */ + /* A B C D | E F G H | */ + /* +-------+-------+ */ + /* | */ + /* XRa */ + /* */ + + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->mxu_gpr[XRa - 1], tcg_ctx->mxu_gpr[XRc - 1]); + } + break; + } + } +} + + +/* + * Decoding engine for MXU + * ======================= + */ + +/* + * + * Decode MXU pool00 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL00| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool00(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 18, 3); + + switch (opcode) { + case OPC_MXU_S32MAX: + case OPC_MXU_S32MIN: + gen_mxu_S32MAX_S32MIN(ctx); + break; + case OPC_MXU_D16MAX: + case OPC_MXU_D16MIN: + gen_mxu_D16MAX_D16MIN(ctx); + break; + case OPC_MXU_Q8MAX: + case OPC_MXU_Q8MIN: + gen_mxu_Q8MAX_Q8MIN(ctx); + break; + case OPC_MXU_Q8SLT: + /* TODO: Implement emulation of Q8SLT instruction. */ + MIPS_INVAL("OPC_MXU_Q8SLT"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8SLTU: + /* TODO: Implement emulation of Q8SLTU instruction. */ + MIPS_INVAL("OPC_MXU_Q8SLTU"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool01 + * + * S32SLT, D16SLT, D16AVG, D16AVGR, Q8AVG, Q8AVGR: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL01| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + * Q8ADD: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+-----+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |en2|0 0 0|x x x| XRc | XRb | XRa |MXU__POOL01| + * +-----------+---+-----+-----+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool01(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 18, 3); + + switch (opcode) { + case OPC_MXU_S32SLT: + /* TODO: Implement emulation of S32SLT instruction. */ + MIPS_INVAL("OPC_MXU_S32SLT"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16SLT: + /* TODO: Implement emulation of D16SLT instruction. */ + MIPS_INVAL("OPC_MXU_D16SLT"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16AVG: + /* TODO: Implement emulation of D16AVG instruction. */ + MIPS_INVAL("OPC_MXU_D16AVG"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16AVGR: + /* TODO: Implement emulation of D16AVGR instruction. */ + MIPS_INVAL("OPC_MXU_D16AVGR"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8AVG: + /* TODO: Implement emulation of Q8AVG instruction. */ + MIPS_INVAL("OPC_MXU_Q8AVG"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8AVGR: + /* TODO: Implement emulation of Q8AVGR instruction. */ + MIPS_INVAL("OPC_MXU_Q8AVGR"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8ADD: + /* TODO: Implement emulation of Q8ADD instruction. */ + MIPS_INVAL("OPC_MXU_Q8ADD"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool02 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL02| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool02(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 18, 3); + + switch (opcode) { + case OPC_MXU_S32CPS: + /* TODO: Implement emulation of S32CPS instruction. */ + MIPS_INVAL("OPC_MXU_S32CPS"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16CPS: + /* TODO: Implement emulation of D16CPS instruction. */ + MIPS_INVAL("OPC_MXU_D16CPS"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8ABD: + /* TODO: Implement emulation of Q8ABD instruction. */ + MIPS_INVAL("OPC_MXU_Q8ABD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16SAT: + /* TODO: Implement emulation of Q16SAT instruction. */ + MIPS_INVAL("OPC_MXU_Q16SAT"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool03 + * + * D16MULF: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |x x|on2|0 0 0 0| XRc | XRb | XRa |MXU__POOL03| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + * D16MULE: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |x x|on2| Xd | XRc | XRb | XRa |MXU__POOL03| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool03(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 24, 2); + + switch (opcode) { + case OPC_MXU_D16MULF: + /* TODO: Implement emulation of D16MULF instruction. */ + MIPS_INVAL("OPC_MXU_D16MULF"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16MULE: + /* TODO: Implement emulation of D16MULE instruction. */ + MIPS_INVAL("OPC_MXU_D16MULE"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool04 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-+-------------------+-------+-----------+ + * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL04| + * +-----------+---------+-+-------------------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool04(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 20, 1); + + switch (opcode) { + case OPC_MXU_S32LDD: + case OPC_MXU_S32LDDR: + gen_mxu_s32ldd_s32lddr(ctx); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool05 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-+-------------------+-------+-----------+ + * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL05| + * +-----------+---------+-+-------------------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool05(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 20, 1); + + switch (opcode) { + case OPC_MXU_S32STD: + /* TODO: Implement emulation of S32STD instruction. */ + MIPS_INVAL("OPC_MXU_S32STD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32STDR: + /* TODO: Implement emulation of S32STDR instruction. */ + MIPS_INVAL("OPC_MXU_S32STDR"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool06 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---+-------+-------+-----------+ + * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL06| + * +-----------+---------+---------+---+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool06(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 10, 4); + + switch (opcode) { + case OPC_MXU_S32LDDV: + /* TODO: Implement emulation of S32LDDV instruction. */ + MIPS_INVAL("OPC_MXU_S32LDDV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32LDDVR: + /* TODO: Implement emulation of S32LDDVR instruction. */ + MIPS_INVAL("OPC_MXU_S32LDDVR"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool07 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---+-------+-------+-----------+ + * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL07| + * +-----------+---------+---------+---+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool07(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 10, 4); + + switch (opcode) { + case OPC_MXU_S32STDV: + /* TODO: Implement emulation of S32TDV instruction. */ + MIPS_INVAL("OPC_MXU_S32TDV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32STDVR: + /* TODO: Implement emulation of S32TDVR instruction. */ + MIPS_INVAL("OPC_MXU_S32TDVR"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool08 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-+-------------------+-------+-----------+ + * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL08| + * +-----------+---------+-+-------------------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool08(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 20, 1); + + switch (opcode) { + case OPC_MXU_S32LDI: + /* TODO: Implement emulation of S32LDI instruction. */ + MIPS_INVAL("OPC_MXU_S32LDI"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32LDIR: + /* TODO: Implement emulation of S32LDIR instruction. */ + MIPS_INVAL("OPC_MXU_S32LDIR"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool09 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-+-------------------+-------+-----------+ + * | SPECIAL2 | rb |x| s12 | XRa |MXU__POOL09| + * +-----------+---------+-+-------------------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool09(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 5, 0); + + switch (opcode) { + case OPC_MXU_S32SDI: + /* TODO: Implement emulation of S32SDI instruction. */ + MIPS_INVAL("OPC_MXU_S32SDI"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32SDIR: + /* TODO: Implement emulation of S32SDIR instruction. */ + MIPS_INVAL("OPC_MXU_S32SDIR"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool10 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---+-------+-------+-----------+ + * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL10| + * +-----------+---------+---------+---+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool10(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 5, 0); + + switch (opcode) { + case OPC_MXU_S32LDIV: + /* TODO: Implement emulation of S32LDIV instruction. */ + MIPS_INVAL("OPC_MXU_S32LDIV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32LDIVR: + /* TODO: Implement emulation of S32LDIVR instruction. */ + MIPS_INVAL("OPC_MXU_S32LDIVR"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool11 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---+-------+-------+-----------+ + * | SPECIAL2 | rb | rc |st2|x x x x| XRa |MXU__POOL11| + * +-----------+---------+---------+---+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool11(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 10, 4); + + switch (opcode) { + case OPC_MXU_S32SDIV: + /* TODO: Implement emulation of S32SDIV instruction. */ + MIPS_INVAL("OPC_MXU_S32SDIV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32SDIVR: + /* TODO: Implement emulation of S32SDIVR instruction. */ + MIPS_INVAL("OPC_MXU_S32SDIVR"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool12 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |an2|x x| Xd | XRc | XRb | XRa |MXU__POOL12| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool12(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 22, 2); + + switch (opcode) { + case OPC_MXU_D32ACC: + /* TODO: Implement emulation of D32ACC instruction. */ + MIPS_INVAL("OPC_MXU_D32ACC"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32ACCM: + /* TODO: Implement emulation of D32ACCM instruction. */ + MIPS_INVAL("OPC_MXU_D32ACCM"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32ASUM: + /* TODO: Implement emulation of D32ASUM instruction. */ + MIPS_INVAL("OPC_MXU_D32ASUM"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool13 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |en2|x x|0 0 0 0| XRc | XRb | XRa |MXU__POOL13| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool13(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 22, 2); + + switch (opcode) { + case OPC_MXU_Q16ACC: + /* TODO: Implement emulation of Q16ACC instruction. */ + MIPS_INVAL("OPC_MXU_Q16ACC"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16ACCM: + /* TODO: Implement emulation of Q16ACCM instruction. */ + MIPS_INVAL("OPC_MXU_Q16ACCM"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16ASUM: + /* TODO: Implement emulation of Q16ASUM instruction. */ + MIPS_INVAL("OPC_MXU_Q16ASUM"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool14 + * + * Q8ADDE, Q8ACCE: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0|x x| XRd | XRc | XRb | XRa |MXU__POOL14| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + * D8SUM, D8SUMC: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |en2|x x|0 0 0 0| XRc | XRb | XRa |MXU__POOL14| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool14(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 22, 2); + + switch (opcode) { + case OPC_MXU_Q8ADDE: + /* TODO: Implement emulation of Q8ADDE instruction. */ + MIPS_INVAL("OPC_MXU_Q8ADDE"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D8SUM: + /* TODO: Implement emulation of D8SUM instruction. */ + MIPS_INVAL("OPC_MXU_D8SUM"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D8SUMC: + /* TODO: Implement emulation of D8SUMC instruction. */ + MIPS_INVAL("OPC_MXU_D8SUMC"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool15 + * + * S32MUL, S32MULU, S32EXTRV: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---+-------+-------+-----------+ + * | SPECIAL2 | rs | rt |x x| XRd | XRa |MXU__POOL15| + * +-----------+---------+---------+---+-------+-------+-----------+ + * + * S32EXTR: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---+-------+-------+-----------+ + * | SPECIAL2 | rb | sft5 |x x| XRd | XRa |MXU__POOL15| + * +-----------+---------+---------+---+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool15(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 14, 2); + + switch (opcode) { + case OPC_MXU_S32MUL: + /* TODO: Implement emulation of S32MUL instruction. */ + MIPS_INVAL("OPC_MXU_S32MUL"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32MULU: + /* TODO: Implement emulation of S32MULU instruction. */ + MIPS_INVAL("OPC_MXU_S32MULU"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32EXTR: + /* TODO: Implement emulation of S32EXTR instruction. */ + MIPS_INVAL("OPC_MXU_S32EXTR"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32EXTRV: + /* TODO: Implement emulation of S32EXTRV instruction. */ + MIPS_INVAL("OPC_MXU_S32EXTRV"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool16 + * + * D32SARW: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 | rb |x x x| XRc | XRb | XRa |MXU__POOL16| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + * S32ALN: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 | rs |x x x| XRc | XRb | XRa |MXU__POOL16| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + * S32ALNI: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+-----+---+-----+-------+-------+-------+-----------+ + * | SPECIAL2 | s3 |0 0|x x x| XRc | XRb | XRa |MXU__POOL16| + * +-----------+-----+---+-----+-------+-------+-------+-----------+ + * + * S32LUI: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+-----+---+-----+-------+---------------+-----------+ + * | SPECIAL2 |optn3|0 0|x x x| XRc | s8 |MXU__POOL16| + * +-----------+-----+---+-----+-------+---------------+-----------+ + * + * S32NOR, S32AND, S32OR, S32XOR: + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL16| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool16(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 18, 3); + + switch (opcode) { + case OPC_MXU_D32SARW: + /* TODO: Implement emulation of D32SARW instruction. */ + MIPS_INVAL("OPC_MXU_D32SARW"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32ALN: + /* TODO: Implement emulation of S32ALN instruction. */ + MIPS_INVAL("OPC_MXU_S32ALN"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32ALNI: + gen_mxu_S32ALNI(ctx); + break; + case OPC_MXU_S32LUI: + /* TODO: Implement emulation of S32LUI instruction. */ + MIPS_INVAL("OPC_MXU_S32LUI"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32NOR: + gen_mxu_S32NOR(ctx); + break; + case OPC_MXU_S32AND: + gen_mxu_S32AND(ctx); + break; + case OPC_MXU_S32OR: + gen_mxu_S32OR(ctx); + break; + case OPC_MXU_S32XOR: + gen_mxu_S32XOR(ctx); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool17 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+---------+---+---------+-----+-----------+ + * | SPECIAL2 | rs | rt |0 0| rd |x x x|MXU__POOL15| + * +-----------+---------+---------+---+---------+-----+-----------+ + * + */ +static void decode_opc_mxu__pool17(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 6, 2); + + switch (opcode) { + case OPC_MXU_LXW: + /* TODO: Implement emulation of LXW instruction. */ + MIPS_INVAL("OPC_MXU_LXW"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_LXH: + /* TODO: Implement emulation of LXH instruction. */ + MIPS_INVAL("OPC_MXU_LXH"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_LXHU: + /* TODO: Implement emulation of LXHU instruction. */ + MIPS_INVAL("OPC_MXU_LXHU"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_LXB: + /* TODO: Implement emulation of LXB instruction. */ + MIPS_INVAL("OPC_MXU_LXB"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_LXBU: + /* TODO: Implement emulation of LXBU instruction. */ + MIPS_INVAL("OPC_MXU_LXBU"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} +/* + * + * Decode MXU pool18 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 | rb |x x x| XRd | XRa |0 0 0 0|MXU__POOL18| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool18(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 18, 3); + + switch (opcode) { + case OPC_MXU_D32SLLV: + /* TODO: Implement emulation of D32SLLV instruction. */ + MIPS_INVAL("OPC_MXU_D32SLLV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32SLRV: + /* TODO: Implement emulation of D32SLRV instruction. */ + MIPS_INVAL("OPC_MXU_D32SLRV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32SARV: + /* TODO: Implement emulation of D32SARV instruction. */ + MIPS_INVAL("OPC_MXU_D32SARV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16SLLV: + /* TODO: Implement emulation of Q16SLLV instruction. */ + MIPS_INVAL("OPC_MXU_Q16SLLV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16SLRV: + /* TODO: Implement emulation of Q16SLRV instruction. */ + MIPS_INVAL("OPC_MXU_Q16SLRV"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16SARV: + /* TODO: Implement emulation of Q16SARV instruction. */ + MIPS_INVAL("OPC_MXU_Q16SARV"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool19 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0|x x| XRd | XRc | XRb | XRa |MXU__POOL19| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool19(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 22, 2); + + switch (opcode) { + case OPC_MXU_Q8MUL: + case OPC_MXU_Q8MULSU: + gen_mxu_q8mul_q8mulsu(ctx); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool20 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------+-----+-------+-------+-------+-----------+ + * | SPECIAL2 |0 0 0 0 0|x x x| XRc | XRb | XRa |MXU__POOL20| + * +-----------+---------+-----+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool20(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 18, 3); + + switch (opcode) { + case OPC_MXU_Q8MOVZ: + /* TODO: Implement emulation of Q8MOVZ instruction. */ + MIPS_INVAL("OPC_MXU_Q8MOVZ"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8MOVN: + /* TODO: Implement emulation of Q8MOVN instruction. */ + MIPS_INVAL("OPC_MXU_Q8MOVN"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16MOVZ: + /* TODO: Implement emulation of D16MOVZ instruction. */ + MIPS_INVAL("OPC_MXU_D16MOVZ"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16MOVN: + /* TODO: Implement emulation of D16MOVN instruction. */ + MIPS_INVAL("OPC_MXU_D16MOVN"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32MOVZ: + /* TODO: Implement emulation of S32MOVZ instruction. */ + MIPS_INVAL("OPC_MXU_S32MOVZ"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32MOVN: + /* TODO: Implement emulation of S32MOVN instruction. */ + MIPS_INVAL("OPC_MXU_S32MOVN"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +/* + * + * Decode MXU pool21 + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * | SPECIAL2 |an2|x x| XRd | XRc | XRb | XRa |MXU__POOL21| + * +-----------+---+---+-------+-------+-------+-------+-----------+ + * + */ +static void decode_opc_mxu__pool21(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opcode = extract32(ctx->opcode, 22, 2); + + switch (opcode) { + case OPC_MXU_Q8MAC: + /* TODO: Implement emulation of Q8MAC instruction. */ + MIPS_INVAL("OPC_MXU_Q8MAC"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8MACSU: + /* TODO: Implement emulation of Q8MACSU instruction. */ + MIPS_INVAL("OPC_MXU_Q8MACSU"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + + +/* + * Main MXU decoding function + * + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-----------+---------------------------------------+-----------+ + * | SPECIAL2 | |x x x x x x| + * +-----------+---------------------------------------+-----------+ + * + */ +static void decode_opc_mxu(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* + * TODO: Investigate necessity of including handling of + * CLZ, CLO, SDBB in this function, as they belong to + * SPECIAL2 opcode space for regular pre-R6 MIPS ISAs. + */ + uint32_t opcode = extract32(ctx->opcode, 0, 6); + + if (opcode == OPC__MXU_MUL) { + uint32_t rs, rt, rd, op1; + + rs = extract32(ctx->opcode, 21, 5); + rt = extract32(ctx->opcode, 16, 5); + rd = extract32(ctx->opcode, 11, 5); + op1 = MASK_SPECIAL2(ctx->opcode); + + gen_arith(ctx, op1, rd, rs, rt); + + return; + } + + if (opcode == OPC_MXU_S32M2I) { + gen_mxu_s32m2i(ctx); + return; + } + + if (opcode == OPC_MXU_S32I2M) { + gen_mxu_s32i2m(ctx); + return; + } + + { + TCGv t_mxu_cr = tcg_temp_new(tcg_ctx); + TCGLabel *l_exit = gen_new_label(tcg_ctx); + + gen_load_mxu_cr(tcg_ctx, t_mxu_cr); + tcg_gen_andi_tl(tcg_ctx, t_mxu_cr, t_mxu_cr, MXU_CR_MXU_EN); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t_mxu_cr, MXU_CR_MXU_EN, l_exit); + + switch (opcode) { + case OPC_MXU_S32MADD: + /* TODO: Implement emulation of S32MADD instruction. */ + MIPS_INVAL("OPC_MXU_S32MADD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32MADDU: + /* TODO: Implement emulation of S32MADDU instruction. */ + MIPS_INVAL("OPC_MXU_S32MADDU"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU__POOL00: + decode_opc_mxu__pool00(env, ctx); + break; + case OPC_MXU_S32MSUB: + /* TODO: Implement emulation of S32MSUB instruction. */ + MIPS_INVAL("OPC_MXU_S32MSUB"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32MSUBU: + /* TODO: Implement emulation of S32MSUBU instruction. */ + MIPS_INVAL("OPC_MXU_S32MSUBU"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU__POOL01: + decode_opc_mxu__pool01(env, ctx); + break; + case OPC_MXU__POOL02: + decode_opc_mxu__pool02(env, ctx); + break; + case OPC_MXU_D16MUL: + gen_mxu_d16mul(ctx); + break; + case OPC_MXU__POOL03: + decode_opc_mxu__pool03(env, ctx); + break; + case OPC_MXU_D16MAC: + gen_mxu_d16mac(ctx); + break; + case OPC_MXU_D16MACF: + /* TODO: Implement emulation of D16MACF instruction. */ + MIPS_INVAL("OPC_MXU_D16MACF"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16MADL: + /* TODO: Implement emulation of D16MADL instruction. */ + MIPS_INVAL("OPC_MXU_D16MADL"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S16MAD: + /* TODO: Implement emulation of S16MAD instruction. */ + MIPS_INVAL("OPC_MXU_S16MAD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16ADD: + /* TODO: Implement emulation of Q16ADD instruction. */ + MIPS_INVAL("OPC_MXU_Q16ADD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D16MACE: + /* TODO: Implement emulation of D16MACE instruction. */ + MIPS_INVAL("OPC_MXU_D16MACE"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU__POOL04: + decode_opc_mxu__pool04(env, ctx); + break; + case OPC_MXU__POOL05: + decode_opc_mxu__pool05(env, ctx); + break; + case OPC_MXU__POOL06: + decode_opc_mxu__pool06(env, ctx); + break; + case OPC_MXU__POOL07: + decode_opc_mxu__pool07(env, ctx); + break; + case OPC_MXU__POOL08: + decode_opc_mxu__pool08(env, ctx); + break; + case OPC_MXU__POOL09: + decode_opc_mxu__pool09(env, ctx); + break; + case OPC_MXU__POOL10: + decode_opc_mxu__pool10(env, ctx); + break; + case OPC_MXU__POOL11: + decode_opc_mxu__pool11(env, ctx); + break; + case OPC_MXU_D32ADD: + /* TODO: Implement emulation of D32ADD instruction. */ + MIPS_INVAL("OPC_MXU_D32ADD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU__POOL12: + decode_opc_mxu__pool12(env, ctx); + break; + case OPC_MXU__POOL13: + decode_opc_mxu__pool13(env, ctx); + break; + case OPC_MXU__POOL14: + decode_opc_mxu__pool14(env, ctx); + break; + case OPC_MXU_Q8ACCE: + /* TODO: Implement emulation of Q8ACCE instruction. */ + MIPS_INVAL("OPC_MXU_Q8ACCE"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S8LDD: + gen_mxu_s8ldd(ctx); + break; + case OPC_MXU_S8STD: + /* TODO: Implement emulation of S8STD instruction. */ + MIPS_INVAL("OPC_MXU_S8STD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S8LDI: + /* TODO: Implement emulation of S8LDI instruction. */ + MIPS_INVAL("OPC_MXU_S8LDI"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S8SDI: + /* TODO: Implement emulation of S8SDI instruction. */ + MIPS_INVAL("OPC_MXU_S8SDI"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU__POOL15: + decode_opc_mxu__pool15(env, ctx); + break; + case OPC_MXU__POOL16: + decode_opc_mxu__pool16(env, ctx); + break; + case OPC_MXU__POOL17: + decode_opc_mxu__pool17(env, ctx); + break; + case OPC_MXU_S16LDD: + /* TODO: Implement emulation of S16LDD instruction. */ + MIPS_INVAL("OPC_MXU_S16LDD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S16STD: + /* TODO: Implement emulation of S16STD instruction. */ + MIPS_INVAL("OPC_MXU_S16STD"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S16LDI: + /* TODO: Implement emulation of S16LDI instruction. */ + MIPS_INVAL("OPC_MXU_S16LDI"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S16SDI: + /* TODO: Implement emulation of S16SDI instruction. */ + MIPS_INVAL("OPC_MXU_S16SDI"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32SLL: + /* TODO: Implement emulation of D32SLL instruction. */ + MIPS_INVAL("OPC_MXU_D32SLL"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32SLR: + /* TODO: Implement emulation of D32SLR instruction. */ + MIPS_INVAL("OPC_MXU_D32SLR"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32SARL: + /* TODO: Implement emulation of D32SARL instruction. */ + MIPS_INVAL("OPC_MXU_D32SARL"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_D32SAR: + /* TODO: Implement emulation of D32SAR instruction. */ + MIPS_INVAL("OPC_MXU_D32SAR"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16SLL: + /* TODO: Implement emulation of Q16SLL instruction. */ + MIPS_INVAL("OPC_MXU_Q16SLL"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q16SLR: + /* TODO: Implement emulation of Q16SLR instruction. */ + MIPS_INVAL("OPC_MXU_Q16SLR"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU__POOL18: + decode_opc_mxu__pool18(env, ctx); + break; + case OPC_MXU_Q16SAR: + /* TODO: Implement emulation of Q16SAR instruction. */ + MIPS_INVAL("OPC_MXU_Q16SAR"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU__POOL19: + decode_opc_mxu__pool19(env, ctx); + break; + case OPC_MXU__POOL20: + decode_opc_mxu__pool20(env, ctx); + break; + case OPC_MXU__POOL21: + decode_opc_mxu__pool21(env, ctx); + break; + case OPC_MXU_Q16SCOP: + /* TODO: Implement emulation of Q16SCOP instruction. */ + MIPS_INVAL("OPC_MXU_Q16SCOP"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8MADL: + /* TODO: Implement emulation of Q8MADL instruction. */ + MIPS_INVAL("OPC_MXU_Q8MADL"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_S32SFL: + /* TODO: Implement emulation of S32SFL instruction. */ + MIPS_INVAL("OPC_MXU_S32SFL"); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_MXU_Q8SAD: + /* TODO: Implement emulation of Q8SAD instruction. */ + MIPS_INVAL("OPC_MXU_Q8SAD"); + generate_exception_end(ctx, EXCP_RI); + break; + default: + MIPS_INVAL("decode_opc_mxu"); + generate_exception_end(ctx, EXCP_RI); + } + + gen_set_label(tcg_ctx, l_exit); + tcg_temp_free(tcg_ctx, t_mxu_cr); + } +} + +#endif /* !defined(TARGET_MIPS64) */ + + +static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) +{ + int rs, rt, rd; + uint32_t op1; + + check_insn_opc_removed(ctx, ISA_MIPS32R6); + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + + op1 = MASK_SPECIAL2(ctx->opcode); + switch (op1) { + case OPC_MADD: /* Multiply and add/sub */ + case OPC_MADDU: + case OPC_MSUB: + case OPC_MSUBU: + check_insn(ctx, ISA_MIPS32); + gen_muldiv(ctx, op1, rd & 3, rs, rt); + break; + case OPC_MUL: + gen_arith(ctx, op1, rd, rs, rt); + break; + case OPC_DIV_G_2F: + case OPC_DIVU_G_2F: + case OPC_MULT_G_2F: + case OPC_MULTU_G_2F: + case OPC_MOD_G_2F: + case OPC_MODU_G_2F: + check_insn(ctx, INSN_LOONGSON2F); + gen_loongson_integer(ctx, op1, rd, rs, rt); + break; + case OPC_CLO: + case OPC_CLZ: + check_insn(ctx, ISA_MIPS32); + gen_cl(ctx, op1, rd, rs); + break; + case OPC_SDBBP: + if (is_uhi(extract32(ctx->opcode, 6, 20))) { + // gen_helper_do_semihosting(tcg_ctx, tcg_ctx->cpu_env); + } else { + /* + * XXX: not clear which exception should be raised + * when in debug mode... + */ + check_insn(ctx, ISA_MIPS32); + generate_exception_end(ctx, EXCP_DBp); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DCLO: + case OPC_DCLZ: + check_insn(ctx, ISA_MIPS64); + check_mips_64(ctx); + gen_cl(ctx, op1, rd, rs); + break; + case OPC_DMULT_G_2F: + case OPC_DMULTU_G_2F: + case OPC_DDIV_G_2F: + case OPC_DDIVU_G_2F: + case OPC_DMOD_G_2F: + case OPC_DMODU_G_2F: + check_insn(ctx, INSN_LOONGSON2F); + gen_loongson_integer(ctx, op1, rd, rs, rt); + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special2_legacy"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rs, rt, rd, sa; + uint32_t op1, op2; + int16_t imm; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + imm = (int16_t)ctx->opcode >> 7; + + op1 = MASK_SPECIAL3(ctx->opcode); + switch (op1) { + case R6_OPC_PREF: + if (rt >= 24) { + /* hint codes 24-31 are reserved and signal RI */ + generate_exception_end(ctx, EXCP_RI); + } + /* Treat as NOP. */ + break; + case R6_OPC_CACHE: + check_cp0_enabled(ctx); + if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { + gen_cache_operation(ctx, rt, rs, imm); + } + break; + case R6_OPC_SC: + gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); + break; + case R6_OPC_LL: + gen_ld(ctx, op1, rt, rs, imm); + break; + case OPC_BSHFL: + { + if (rd == 0) { + /* Treat as NOP. */ + break; + } + op2 = MASK_BSHFL(ctx->opcode); + switch (op2) { + case OPC_ALIGN: + case OPC_ALIGN_1: + case OPC_ALIGN_2: + case OPC_ALIGN_3: + gen_align(ctx, 32, rd, rs, rt, sa & 3); + break; + case OPC_BITSWAP: + gen_bitswap(ctx, op2, rd, rt); + break; + } + } + break; + case OPC_GINV: + if (unlikely(ctx->gi <= 1)) { + generate_exception_end(ctx, EXCP_RI); + } + check_cp0_enabled(ctx); + switch ((ctx->opcode >> 6) & 3) { + case 0: /* GINVI */ + /* Treat as NOP. */ + break; + case 2: /* GINVT */ + gen_helper_0e1i(ginvt, tcg_ctx->cpu_gpr[rs], extract32(ctx->opcode, 8, 2)); + break; + default: + generate_exception_end(ctx, EXCP_RI); + break; + } + break; +#if defined(TARGET_MIPS64) + case R6_OPC_SCD: + gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false); + break; + case R6_OPC_LLD: + gen_ld(ctx, op1, rt, rs, imm); + break; + case OPC_DBSHFL: + check_mips_64(ctx); + { + if (rd == 0) { + /* Treat as NOP. */ + break; + } + op2 = MASK_DBSHFL(ctx->opcode); + switch (op2) { + case OPC_DALIGN: + case OPC_DALIGN_1: + case OPC_DALIGN_2: + case OPC_DALIGN_3: + case OPC_DALIGN_4: + case OPC_DALIGN_5: + case OPC_DALIGN_6: + case OPC_DALIGN_7: + gen_align(ctx, 64, rd, rs, rt, sa & 7); + break; + case OPC_DBITSWAP: + gen_bitswap(ctx, op2, rd, rt); + break; + } + + } + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special3_r6"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rs, rt, rd; + uint32_t op1, op2; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + + op1 = MASK_SPECIAL3(ctx->opcode); + switch (op1) { + case OPC_DIV_G_2E: + case OPC_DIVU_G_2E: + case OPC_MOD_G_2E: + case OPC_MODU_G_2E: + case OPC_MULT_G_2E: + case OPC_MULTU_G_2E: + /* + * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have + * the same mask and op1. + */ + if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MULT_G_2E)) { + op2 = MASK_ADDUH_QB(ctx->opcode); + switch (op2) { + case OPC_ADDUH_QB: + case OPC_ADDUH_R_QB: + case OPC_ADDQH_PH: + case OPC_ADDQH_R_PH: + case OPC_ADDQH_W: + case OPC_ADDQH_R_W: + case OPC_SUBUH_QB: + case OPC_SUBUH_R_QB: + case OPC_SUBQH_PH: + case OPC_SUBQH_R_PH: + case OPC_SUBQH_W: + case OPC_SUBQH_R_W: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_MUL_PH: + case OPC_MUL_S_PH: + case OPC_MULQ_S_W: + case OPC_MULQ_RS_W: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); + break; + default: + MIPS_INVAL("MASK ADDUH.QB"); + generate_exception_end(ctx, EXCP_RI); + break; + } + } else if (ctx->insn_flags & INSN_LOONGSON2E) { + gen_loongson_integer(ctx, op1, rd, rs, rt); + } else { + generate_exception_end(ctx, EXCP_RI); + } + break; + case OPC_LX_DSP: + op2 = MASK_LX(ctx->opcode); + switch (op2) { +#if defined(TARGET_MIPS64) + case OPC_LDX: +#endif + case OPC_LBUX: + case OPC_LHX: + case OPC_LWX: + gen_mipsdsp_ld(ctx, op2, rd, rs, rt); + break; + default: /* Invalid */ + MIPS_INVAL("MASK LX"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_ABSQ_S_PH_DSP: + op2 = MASK_ABSQ_S_PH(ctx->opcode); + switch (op2) { + case OPC_ABSQ_S_QB: + case OPC_ABSQ_S_PH: + case OPC_ABSQ_S_W: + case OPC_PRECEQ_W_PHL: + case OPC_PRECEQ_W_PHR: + case OPC_PRECEQU_PH_QBL: + case OPC_PRECEQU_PH_QBR: + case OPC_PRECEQU_PH_QBLA: + case OPC_PRECEQU_PH_QBRA: + case OPC_PRECEU_PH_QBL: + case OPC_PRECEU_PH_QBR: + case OPC_PRECEU_PH_QBLA: + case OPC_PRECEU_PH_QBRA: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_BITREV: + case OPC_REPL_QB: + case OPC_REPLV_QB: + case OPC_REPL_PH: + case OPC_REPLV_PH: + gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); + break; + default: + MIPS_INVAL("MASK ABSQ_S.PH"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_ADDU_QB_DSP: + op2 = MASK_ADDU_QB(ctx->opcode); + switch (op2) { + case OPC_ADDQ_PH: + case OPC_ADDQ_S_PH: + case OPC_ADDQ_S_W: + case OPC_ADDU_QB: + case OPC_ADDU_S_QB: + case OPC_ADDU_PH: + case OPC_ADDU_S_PH: + case OPC_SUBQ_PH: + case OPC_SUBQ_S_PH: + case OPC_SUBQ_S_W: + case OPC_SUBU_QB: + case OPC_SUBU_S_QB: + case OPC_SUBU_PH: + case OPC_SUBU_S_PH: + case OPC_ADDSC: + case OPC_ADDWC: + case OPC_MODSUB: + case OPC_RADDU_W_QB: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_MULEU_S_PH_QBL: + case OPC_MULEU_S_PH_QBR: + case OPC_MULQ_RS_PH: + case OPC_MULEQ_S_W_PHL: + case OPC_MULEQ_S_W_PHR: + case OPC_MULQ_S_PH: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK ADDU.QB"); + generate_exception_end(ctx, EXCP_RI); + break; + + } + break; + case OPC_CMPU_EQ_QB_DSP: + op2 = MASK_CMPU_EQ_QB(ctx->opcode); + switch (op2) { + case OPC_PRECR_SRA_PH_W: + case OPC_PRECR_SRA_R_PH_W: + gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); + break; + case OPC_PRECR_QB_PH: + case OPC_PRECRQ_QB_PH: + case OPC_PRECRQ_PH_W: + case OPC_PRECRQ_RS_PH_W: + case OPC_PRECRQU_S_QB_PH: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_CMPU_EQ_QB: + case OPC_CMPU_LT_QB: + case OPC_CMPU_LE_QB: + case OPC_CMP_EQ_PH: + case OPC_CMP_LT_PH: + case OPC_CMP_LE_PH: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); + break; + case OPC_CMPGU_EQ_QB: + case OPC_CMPGU_LT_QB: + case OPC_CMPGU_LE_QB: + case OPC_CMPGDU_EQ_QB: + case OPC_CMPGDU_LT_QB: + case OPC_CMPGDU_LE_QB: + case OPC_PICK_QB: + case OPC_PICK_PH: + case OPC_PACKRL_PH: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK CMPU.EQ.QB"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_SHLL_QB_DSP: + gen_mipsdsp_shift(ctx, op1, rd, rs, rt); + break; + case OPC_DPA_W_PH_DSP: + op2 = MASK_DPA_W_PH(ctx->opcode); + switch (op2) { + case OPC_DPAU_H_QBL: + case OPC_DPAU_H_QBR: + case OPC_DPSU_H_QBL: + case OPC_DPSU_H_QBR: + case OPC_DPA_W_PH: + case OPC_DPAX_W_PH: + case OPC_DPAQ_S_W_PH: + case OPC_DPAQX_S_W_PH: + case OPC_DPAQX_SA_W_PH: + case OPC_DPS_W_PH: + case OPC_DPSX_W_PH: + case OPC_DPSQ_S_W_PH: + case OPC_DPSQX_S_W_PH: + case OPC_DPSQX_SA_W_PH: + case OPC_MULSAQ_S_W_PH: + case OPC_DPAQ_SA_L_W: + case OPC_DPSQ_SA_L_W: + case OPC_MAQ_S_W_PHL: + case OPC_MAQ_S_W_PHR: + case OPC_MAQ_SA_W_PHL: + case OPC_MAQ_SA_W_PHR: + case OPC_MULSA_W_PH: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK DPAW.PH"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_INSV_DSP: + op2 = MASK_INSV(ctx->opcode); + switch (op2) { + case OPC_INSV: + check_dsp(ctx); + { + TCGv t0, t1; + + if (rt == 0) { + break; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_load_gpr(tcg_ctx, t1, rs); + + gen_helper_insv(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + break; + } + default: /* Invalid */ + MIPS_INVAL("MASK INSV"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_APPEND_DSP: + gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); + break; + case OPC_EXTR_W_DSP: + op2 = MASK_EXTR_W(ctx->opcode); + switch (op2) { + case OPC_EXTR_W: + case OPC_EXTR_R_W: + case OPC_EXTR_RS_W: + case OPC_EXTR_S_H: + case OPC_EXTRV_S_H: + case OPC_EXTRV_W: + case OPC_EXTRV_R_W: + case OPC_EXTRV_RS_W: + case OPC_EXTP: + case OPC_EXTPV: + case OPC_EXTPDP: + case OPC_EXTPDPV: + gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); + break; + case OPC_RDDSP: + gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 1); + break; + case OPC_SHILO: + case OPC_SHILOV: + case OPC_MTHLIP: + case OPC_WRDSP: + gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK EXTR.W"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; +#if defined(TARGET_MIPS64) + case OPC_DDIV_G_2E: + case OPC_DDIVU_G_2E: + case OPC_DMULT_G_2E: + case OPC_DMULTU_G_2E: + case OPC_DMOD_G_2E: + case OPC_DMODU_G_2E: + check_insn(ctx, INSN_LOONGSON2E); + gen_loongson_integer(ctx, op1, rd, rs, rt); + break; + case OPC_ABSQ_S_QH_DSP: + op2 = MASK_ABSQ_S_QH(ctx->opcode); + switch (op2) { + case OPC_PRECEQ_L_PWL: + case OPC_PRECEQ_L_PWR: + case OPC_PRECEQ_PW_QHL: + case OPC_PRECEQ_PW_QHR: + case OPC_PRECEQ_PW_QHLA: + case OPC_PRECEQ_PW_QHRA: + case OPC_PRECEQU_QH_OBL: + case OPC_PRECEQU_QH_OBR: + case OPC_PRECEQU_QH_OBLA: + case OPC_PRECEQU_QH_OBRA: + case OPC_PRECEU_QH_OBL: + case OPC_PRECEU_QH_OBR: + case OPC_PRECEU_QH_OBLA: + case OPC_PRECEU_QH_OBRA: + case OPC_ABSQ_S_OB: + case OPC_ABSQ_S_PW: + case OPC_ABSQ_S_QH: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_REPL_OB: + case OPC_REPL_PW: + case OPC_REPL_QH: + case OPC_REPLV_OB: + case OPC_REPLV_PW: + case OPC_REPLV_QH: + gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); + break; + default: /* Invalid */ + MIPS_INVAL("MASK ABSQ_S.QH"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_ADDU_OB_DSP: + op2 = MASK_ADDU_OB(ctx->opcode); + switch (op2) { + case OPC_RADDU_L_OB: + case OPC_SUBQ_PW: + case OPC_SUBQ_S_PW: + case OPC_SUBQ_QH: + case OPC_SUBQ_S_QH: + case OPC_SUBU_OB: + case OPC_SUBU_S_OB: + case OPC_SUBU_QH: + case OPC_SUBU_S_QH: + case OPC_SUBUH_OB: + case OPC_SUBUH_R_OB: + case OPC_ADDQ_PW: + case OPC_ADDQ_S_PW: + case OPC_ADDQ_QH: + case OPC_ADDQ_S_QH: + case OPC_ADDU_OB: + case OPC_ADDU_S_OB: + case OPC_ADDU_QH: + case OPC_ADDU_S_QH: + case OPC_ADDUH_OB: + case OPC_ADDUH_R_OB: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_MULEQ_S_PW_QHL: + case OPC_MULEQ_S_PW_QHR: + case OPC_MULEU_S_QH_OBL: + case OPC_MULEU_S_QH_OBR: + case OPC_MULQ_RS_QH: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK ADDU.OB"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_CMPU_EQ_OB_DSP: + op2 = MASK_CMPU_EQ_OB(ctx->opcode); + switch (op2) { + case OPC_PRECR_SRA_QH_PW: + case OPC_PRECR_SRA_R_QH_PW: + /* Return value is rt. */ + gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); + break; + case OPC_PRECR_OB_QH: + case OPC_PRECRQ_OB_QH: + case OPC_PRECRQ_PW_L: + case OPC_PRECRQ_QH_PW: + case OPC_PRECRQ_RS_QH_PW: + case OPC_PRECRQU_S_OB_QH: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_CMPU_EQ_OB: + case OPC_CMPU_LT_OB: + case OPC_CMPU_LE_OB: + case OPC_CMP_EQ_QH: + case OPC_CMP_LT_QH: + case OPC_CMP_LE_QH: + case OPC_CMP_EQ_PW: + case OPC_CMP_LT_PW: + case OPC_CMP_LE_PW: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); + break; + case OPC_CMPGDU_EQ_OB: + case OPC_CMPGDU_LT_OB: + case OPC_CMPGDU_LE_OB: + case OPC_CMPGU_EQ_OB: + case OPC_CMPGU_LT_OB: + case OPC_CMPGU_LE_OB: + case OPC_PACKRL_PW: + case OPC_PICK_OB: + case OPC_PICK_PW: + case OPC_PICK_QH: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK CMPU_EQ.OB"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_DAPPEND_DSP: + gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); + break; + case OPC_DEXTR_W_DSP: + op2 = MASK_DEXTR_W(ctx->opcode); + switch (op2) { + case OPC_DEXTP: + case OPC_DEXTPDP: + case OPC_DEXTPDPV: + case OPC_DEXTPV: + case OPC_DEXTR_L: + case OPC_DEXTR_R_L: + case OPC_DEXTR_RS_L: + case OPC_DEXTR_W: + case OPC_DEXTR_R_W: + case OPC_DEXTR_RS_W: + case OPC_DEXTR_S_H: + case OPC_DEXTRV_L: + case OPC_DEXTRV_R_L: + case OPC_DEXTRV_RS_L: + case OPC_DEXTRV_S_H: + case OPC_DEXTRV_W: + case OPC_DEXTRV_R_W: + case OPC_DEXTRV_RS_W: + gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); + break; + case OPC_DMTHLIP: + case OPC_DSHILO: + case OPC_DSHILOV: + gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK EXTR.W"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_DPAQ_W_QH_DSP: + op2 = MASK_DPAQ_W_QH(ctx->opcode); + switch (op2) { + case OPC_DPAU_H_OBL: + case OPC_DPAU_H_OBR: + case OPC_DPSU_H_OBL: + case OPC_DPSU_H_OBR: + case OPC_DPA_W_QH: + case OPC_DPAQ_S_W_QH: + case OPC_DPS_W_QH: + case OPC_DPSQ_S_W_QH: + case OPC_MULSAQ_S_W_QH: + case OPC_DPAQ_SA_L_PW: + case OPC_DPSQ_SA_L_PW: + case OPC_MULSAQ_S_L_PW: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); + break; + case OPC_MAQ_S_W_QHLL: + case OPC_MAQ_S_W_QHLR: + case OPC_MAQ_S_W_QHRL: + case OPC_MAQ_S_W_QHRR: + case OPC_MAQ_SA_W_QHLL: + case OPC_MAQ_SA_W_QHLR: + case OPC_MAQ_SA_W_QHRL: + case OPC_MAQ_SA_W_QHRR: + case OPC_MAQ_S_L_PWL: + case OPC_MAQ_S_L_PWR: + case OPC_DMADD: + case OPC_DMADDU: + case OPC_DMSUB: + case OPC_DMSUBU: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK DPAQ.W.QH"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_DINSV_DSP: + op2 = MASK_INSV(ctx->opcode); + switch (op2) { + case OPC_DINSV: + { + TCGv t0, t1; + + if (rt == 0) { + break; + } + check_dsp(ctx); + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_load_gpr(tcg_ctx, t1, rs); + + gen_helper_dinsv(tcg_ctx, tcg_ctx->cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + break; + } + default: /* Invalid */ + MIPS_INVAL("MASK DINSV"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_SHLL_OB_DSP: + gen_mipsdsp_shift(ctx, op1, rd, rs, rt); + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special3_legacy"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + + +#if defined(TARGET_MIPS64) + +static void decode_mmi0(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_MMI0(ctx->opcode); + + switch (opc) { + case MMI_OPC_0_PADDW: /* TODO: MMI_OPC_0_PADDW */ + case MMI_OPC_0_PSUBW: /* TODO: MMI_OPC_0_PSUBW */ + case MMI_OPC_0_PCGTW: /* TODO: MMI_OPC_0_PCGTW */ + case MMI_OPC_0_PMAXW: /* TODO: MMI_OPC_0_PMAXW */ + case MMI_OPC_0_PADDH: /* TODO: MMI_OPC_0_PADDH */ + case MMI_OPC_0_PSUBH: /* TODO: MMI_OPC_0_PSUBH */ + case MMI_OPC_0_PCGTH: /* TODO: MMI_OPC_0_PCGTH */ + case MMI_OPC_0_PMAXH: /* TODO: MMI_OPC_0_PMAXH */ + case MMI_OPC_0_PADDB: /* TODO: MMI_OPC_0_PADDB */ + case MMI_OPC_0_PSUBB: /* TODO: MMI_OPC_0_PSUBB */ + case MMI_OPC_0_PCGTB: /* TODO: MMI_OPC_0_PCGTB */ + case MMI_OPC_0_PADDSW: /* TODO: MMI_OPC_0_PADDSW */ + case MMI_OPC_0_PSUBSW: /* TODO: MMI_OPC_0_PSUBSW */ + case MMI_OPC_0_PEXTLW: /* TODO: MMI_OPC_0_PEXTLW */ + case MMI_OPC_0_PPACW: /* TODO: MMI_OPC_0_PPACW */ + case MMI_OPC_0_PADDSH: /* TODO: MMI_OPC_0_PADDSH */ + case MMI_OPC_0_PSUBSH: /* TODO: MMI_OPC_0_PSUBSH */ + case MMI_OPC_0_PEXTLH: /* TODO: MMI_OPC_0_PEXTLH */ + case MMI_OPC_0_PPACH: /* TODO: MMI_OPC_0_PPACH */ + case MMI_OPC_0_PADDSB: /* TODO: MMI_OPC_0_PADDSB */ + case MMI_OPC_0_PSUBSB: /* TODO: MMI_OPC_0_PSUBSB */ + case MMI_OPC_0_PEXTLB: /* TODO: MMI_OPC_0_PEXTLB */ + case MMI_OPC_0_PPACB: /* TODO: MMI_OPC_0_PPACB */ + case MMI_OPC_0_PEXT5: /* TODO: MMI_OPC_0_PEXT5 */ + case MMI_OPC_0_PPAC5: /* TODO: MMI_OPC_0_PPAC5 */ + generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI0 */ + break; + default: + MIPS_INVAL("TX79 MMI class MMI0"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_mmi1(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_MMI1(ctx->opcode); + + switch (opc) { + case MMI_OPC_1_PABSW: /* TODO: MMI_OPC_1_PABSW */ + case MMI_OPC_1_PCEQW: /* TODO: MMI_OPC_1_PCEQW */ + case MMI_OPC_1_PMINW: /* TODO: MMI_OPC_1_PMINW */ + case MMI_OPC_1_PADSBH: /* TODO: MMI_OPC_1_PADSBH */ + case MMI_OPC_1_PABSH: /* TODO: MMI_OPC_1_PABSH */ + case MMI_OPC_1_PCEQH: /* TODO: MMI_OPC_1_PCEQH */ + case MMI_OPC_1_PMINH: /* TODO: MMI_OPC_1_PMINH */ + case MMI_OPC_1_PCEQB: /* TODO: MMI_OPC_1_PCEQB */ + case MMI_OPC_1_PADDUW: /* TODO: MMI_OPC_1_PADDUW */ + case MMI_OPC_1_PSUBUW: /* TODO: MMI_OPC_1_PSUBUW */ + case MMI_OPC_1_PEXTUW: /* TODO: MMI_OPC_1_PEXTUW */ + case MMI_OPC_1_PADDUH: /* TODO: MMI_OPC_1_PADDUH */ + case MMI_OPC_1_PSUBUH: /* TODO: MMI_OPC_1_PSUBUH */ + case MMI_OPC_1_PEXTUH: /* TODO: MMI_OPC_1_PEXTUH */ + case MMI_OPC_1_PADDUB: /* TODO: MMI_OPC_1_PADDUB */ + case MMI_OPC_1_PSUBUB: /* TODO: MMI_OPC_1_PSUBUB */ + case MMI_OPC_1_PEXTUB: /* TODO: MMI_OPC_1_PEXTUB */ + case MMI_OPC_1_QFSRV: /* TODO: MMI_OPC_1_QFSRV */ + generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI1 */ + break; + default: + MIPS_INVAL("TX79 MMI class MMI1"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_mmi2(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_MMI2(ctx->opcode); + + switch (opc) { + case MMI_OPC_2_PMADDW: /* TODO: MMI_OPC_2_PMADDW */ + case MMI_OPC_2_PSLLVW: /* TODO: MMI_OPC_2_PSLLVW */ + case MMI_OPC_2_PSRLVW: /* TODO: MMI_OPC_2_PSRLVW */ + case MMI_OPC_2_PMSUBW: /* TODO: MMI_OPC_2_PMSUBW */ + case MMI_OPC_2_PMFHI: /* TODO: MMI_OPC_2_PMFHI */ + case MMI_OPC_2_PMFLO: /* TODO: MMI_OPC_2_PMFLO */ + case MMI_OPC_2_PINTH: /* TODO: MMI_OPC_2_PINTH */ + case MMI_OPC_2_PMULTW: /* TODO: MMI_OPC_2_PMULTW */ + case MMI_OPC_2_PDIVW: /* TODO: MMI_OPC_2_PDIVW */ + case MMI_OPC_2_PMADDH: /* TODO: MMI_OPC_2_PMADDH */ + case MMI_OPC_2_PHMADH: /* TODO: MMI_OPC_2_PHMADH */ + case MMI_OPC_2_PAND: /* TODO: MMI_OPC_2_PAND */ + case MMI_OPC_2_PXOR: /* TODO: MMI_OPC_2_PXOR */ + case MMI_OPC_2_PMSUBH: /* TODO: MMI_OPC_2_PMSUBH */ + case MMI_OPC_2_PHMSBH: /* TODO: MMI_OPC_2_PHMSBH */ + case MMI_OPC_2_PEXEH: /* TODO: MMI_OPC_2_PEXEH */ + case MMI_OPC_2_PREVH: /* TODO: MMI_OPC_2_PREVH */ + case MMI_OPC_2_PMULTH: /* TODO: MMI_OPC_2_PMULTH */ + case MMI_OPC_2_PDIVBW: /* TODO: MMI_OPC_2_PDIVBW */ + case MMI_OPC_2_PEXEW: /* TODO: MMI_OPC_2_PEXEW */ + case MMI_OPC_2_PROT3W: /* TODO: MMI_OPC_2_PROT3W */ + generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI2 */ + break; + case MMI_OPC_2_PCPYLD: + gen_mmi_pcpyld(ctx); + break; + default: + MIPS_INVAL("TX79 MMI class MMI2"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_mmi3(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_MMI3(ctx->opcode); + + switch (opc) { + case MMI_OPC_3_PMADDUW: /* TODO: MMI_OPC_3_PMADDUW */ + case MMI_OPC_3_PSRAVW: /* TODO: MMI_OPC_3_PSRAVW */ + case MMI_OPC_3_PMTHI: /* TODO: MMI_OPC_3_PMTHI */ + case MMI_OPC_3_PMTLO: /* TODO: MMI_OPC_3_PMTLO */ + case MMI_OPC_3_PINTEH: /* TODO: MMI_OPC_3_PINTEH */ + case MMI_OPC_3_PMULTUW: /* TODO: MMI_OPC_3_PMULTUW */ + case MMI_OPC_3_PDIVUW: /* TODO: MMI_OPC_3_PDIVUW */ + case MMI_OPC_3_POR: /* TODO: MMI_OPC_3_POR */ + case MMI_OPC_3_PNOR: /* TODO: MMI_OPC_3_PNOR */ + case MMI_OPC_3_PEXCH: /* TODO: MMI_OPC_3_PEXCH */ + case MMI_OPC_3_PEXCW: /* TODO: MMI_OPC_3_PEXCW */ + generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI3 */ + break; + case MMI_OPC_3_PCPYH: + gen_mmi_pcpyh(ctx); + break; + case MMI_OPC_3_PCPYUD: + gen_mmi_pcpyud(ctx); + break; + default: + MIPS_INVAL("TX79 MMI class MMI3"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_mmi(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_MMI(ctx->opcode); + int rs = extract32(ctx->opcode, 21, 5); + int rt = extract32(ctx->opcode, 16, 5); + int rd = extract32(ctx->opcode, 11, 5); + + switch (opc) { + case MMI_OPC_CLASS_MMI0: + decode_mmi0(env, ctx); + break; + case MMI_OPC_CLASS_MMI1: + decode_mmi1(env, ctx); + break; + case MMI_OPC_CLASS_MMI2: + decode_mmi2(env, ctx); + break; + case MMI_OPC_CLASS_MMI3: + decode_mmi3(env, ctx); + break; + case MMI_OPC_MULT1: + case MMI_OPC_MULTU1: + case MMI_OPC_MADD: + case MMI_OPC_MADDU: + case MMI_OPC_MADD1: + case MMI_OPC_MADDU1: + gen_mul_txx9(ctx, opc, rd, rs, rt); + break; + case MMI_OPC_DIV1: + case MMI_OPC_DIVU1: + gen_div1_tx79(ctx, opc, rs, rt); + break; + case MMI_OPC_MTLO1: + case MMI_OPC_MTHI1: + gen_HILO1_tx79(ctx, opc, rs); + break; + case MMI_OPC_MFLO1: + case MMI_OPC_MFHI1: + gen_HILO1_tx79(ctx, opc, rd); + break; + case MMI_OPC_PLZCW: /* TODO: MMI_OPC_PLZCW */ + case MMI_OPC_PMFHL: /* TODO: MMI_OPC_PMFHL */ + case MMI_OPC_PMTHL: /* TODO: MMI_OPC_PMTHL */ + case MMI_OPC_PSLLH: /* TODO: MMI_OPC_PSLLH */ + case MMI_OPC_PSRLH: /* TODO: MMI_OPC_PSRLH */ + case MMI_OPC_PSRAH: /* TODO: MMI_OPC_PSRAH */ + case MMI_OPC_PSLLW: /* TODO: MMI_OPC_PSLLW */ + case MMI_OPC_PSRLW: /* TODO: MMI_OPC_PSRLW */ + case MMI_OPC_PSRAW: /* TODO: MMI_OPC_PSRAW */ + generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_CLASS_MMI */ + break; + default: + MIPS_INVAL("TX79 MMI class"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void gen_mmi_lq(CPUMIPSState *env, DisasContext *ctx) +{ + generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_LQ */ +} + +static void gen_mmi_sq(DisasContext *ctx, int base, int rt, int offset) +{ + generate_exception_end(ctx, EXCP_RI); /* TODO: MMI_OPC_SQ */ +} + +/* + * The TX79-specific instruction Store Quadword + * + * +--------+-------+-------+------------------------+ + * | 011111 | base | rt | offset | SQ + * +--------+-------+-------+------------------------+ + * 6 5 5 16 + * + * has the same opcode as the Read Hardware Register instruction + * + * +--------+-------+-------+-------+-------+--------+ + * | 011111 | 00000 | rt | rd | 00000 | 111011 | RDHWR + * +--------+-------+-------+-------+-------+--------+ + * 6 5 5 5 5 6 + * + * that is required, trapped and emulated by the Linux kernel. However, all + * RDHWR encodings yield address error exceptions on the TX79 since the SQ + * offset is odd. Therefore all valid SQ instructions can execute normally. + * In user mode, QEMU must verify the upper and lower 11 bits to distinguish + * between SQ and RDHWR, as the Linux kernel does. + */ +static void decode_mmi_sq(CPUMIPSState *env, DisasContext *ctx) +{ + int base = extract32(ctx->opcode, 21, 5); + int rt = extract32(ctx->opcode, 16, 5); + int offset = extract32(ctx->opcode, 0, 16); + + gen_mmi_sq(ctx, base, rt, offset); +} + +#endif + +static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rs, rt, rd, sa; + uint32_t op1, op2; + int16_t imm; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + imm = sextract32(ctx->opcode, 7, 9); + + op1 = MASK_SPECIAL3(ctx->opcode); + + /* + * EVA loads and stores overlap Loongson 2E instructions decoded by + * decode_opc_special3_legacy(), so be careful to allow their decoding when + * EVA is absent. + */ + if (ctx->eva) { + switch (op1) { + case OPC_LWLE: + case OPC_LWRE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* fall through */ + case OPC_LBUE: + case OPC_LHUE: + case OPC_LBE: + case OPC_LHE: + case OPC_LLE: + case OPC_LWE: + check_cp0_enabled(ctx); + gen_ld(ctx, op1, rt, rs, imm); + return; + case OPC_SWLE: + case OPC_SWRE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* fall through */ + case OPC_SBE: + case OPC_SHE: + case OPC_SWE: + check_cp0_enabled(ctx); + gen_st(ctx, op1, rt, rs, imm); + return; + case OPC_SCE: + check_cp0_enabled(ctx); + gen_st_cond(ctx, rt, rs, imm, MO_TESL, true); + return; + case OPC_CACHEE: + check_cp0_enabled(ctx); + if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { + gen_cache_operation(ctx, rt, rs, imm); + } + /* Treat as NOP. */ + return; + case OPC_PREFE: + check_cp0_enabled(ctx); + /* Treat as NOP. */ + return; + } + } + + switch (op1) { + case OPC_EXT: + case OPC_INS: + check_insn(ctx, ISA_MIPS32R2); + gen_bitops(ctx, op1, rt, rs, sa, rd); + break; + case OPC_BSHFL: + op2 = MASK_BSHFL(ctx->opcode); + switch (op2) { + case OPC_ALIGN: + case OPC_ALIGN_1: + case OPC_ALIGN_2: + case OPC_ALIGN_3: + case OPC_BITSWAP: + check_insn(ctx, ISA_MIPS32R6); + decode_opc_special3_r6(env, ctx); + break; + default: + check_insn(ctx, ISA_MIPS32R2); + gen_bshfl(ctx, op2, rt, rd); + break; + } + break; +#if defined(TARGET_MIPS64) + case OPC_DEXTM: + case OPC_DEXTU: + case OPC_DEXT: + case OPC_DINSM: + case OPC_DINSU: + case OPC_DINS: + check_insn(ctx, ISA_MIPS64R2); + check_mips_64(ctx); + gen_bitops(ctx, op1, rt, rs, sa, rd); + break; + case OPC_DBSHFL: + op2 = MASK_DBSHFL(ctx->opcode); + switch (op2) { + case OPC_DALIGN: + case OPC_DALIGN_1: + case OPC_DALIGN_2: + case OPC_DALIGN_3: + case OPC_DALIGN_4: + case OPC_DALIGN_5: + case OPC_DALIGN_6: + case OPC_DALIGN_7: + case OPC_DBITSWAP: + check_insn(ctx, ISA_MIPS32R6); + decode_opc_special3_r6(env, ctx); + break; + default: + check_insn(ctx, ISA_MIPS64R2); + check_mips_64(ctx); + op2 = MASK_DBSHFL(ctx->opcode); + gen_bshfl(ctx, op2, rt, rd); + break; + } + break; +#endif + case OPC_RDHWR: + gen_rdhwr(ctx, rt, rd, extract32(ctx->opcode, 6, 3)); + break; + case OPC_FORK: + check_mt(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rt); + gen_load_gpr(tcg_ctx, t1, rs); + gen_helper_fork(tcg_ctx, t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + case OPC_YIELD: + check_mt(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(tcg_ctx, t0, rs); + gen_helper_yield(tcg_ctx, t0, tcg_ctx->cpu_env, t0); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + break; + default: + if (ctx->insn_flags & ISA_MIPS32R6) { + decode_opc_special3_r6(env, ctx); + } else { + decode_opc_special3_legacy(env, ctx); + } + } +} + +/* MIPS SIMD Architecture (MSA) */ +static inline int check_msa_access(DisasContext *ctx) +{ + if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) && + !(ctx->hflags & MIPS_HFLAG_F64))) { + generate_exception_end(ctx, EXCP_RI); + return 0; + } + + if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) { + if (ctx->insn_flags & ASE_MSA) { + generate_exception_end(ctx, EXCP_MSADIS); + return 0; + } else { + generate_exception_end(ctx, EXCP_RI); + return 0; + } + } + return 1; +} + +static void gen_check_zero_element(TCGContext *tcg_ctx, TCGv tresult, uint8_t df, uint8_t wt) +{ + /* generates tcg ops to check if any element is 0 */ + /* Note this function only works with MSA_WRLEN = 128 */ + uint64_t eval_zero_or_big = 0; + uint64_t eval_big = 0; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + switch (df) { + case DF_BYTE: + eval_zero_or_big = 0x0101010101010101ULL; + eval_big = 0x8080808080808080ULL; + break; + case DF_HALF: + eval_zero_or_big = 0x0001000100010001ULL; + eval_big = 0x8000800080008000ULL; + break; + case DF_WORD: + eval_zero_or_big = 0x0000000100000001ULL; + eval_big = 0x8000000080000000ULL; + break; + case DF_DOUBLE: + eval_zero_or_big = 0x0000000000000001ULL; + eval_big = 0x8000000000000000ULL; + break; + } + tcg_gen_subi_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt << 1], eval_zero_or_big); + tcg_gen_andc_i64(tcg_ctx, t0, t0, tcg_ctx->msa_wr_d[wt << 1]); + tcg_gen_andi_i64(tcg_ctx, t0, t0, eval_big); + tcg_gen_subi_i64(tcg_ctx, t1, tcg_ctx->msa_wr_d[(wt << 1) + 1], eval_zero_or_big); + tcg_gen_andc_i64(tcg_ctx, t1, t1, tcg_ctx->msa_wr_d[(wt << 1) + 1]); + tcg_gen_andi_i64(tcg_ctx, t1, t1, eval_big); + tcg_gen_or_i64(tcg_ctx, t0, t0, t1); + /* if all bits are zero then all elements are not zero */ + /* if some bit is non-zero then some element is zero */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t0, t0, 0); + tcg_gen_trunc_i64_tl(tcg_ctx, tresult, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_msa_branch(CPUMIPSState *env, DisasContext *ctx, uint32_t op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t df = (ctx->opcode >> 21) & 0x3; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + int64_t s16 = (int16_t)ctx->opcode; + + check_msa_access(ctx); + + if (ctx->hflags & MIPS_HFLAG_BMASK) { + generate_exception_end(ctx, EXCP_RI); + return; + } + switch (op1) { + case OPC_BZ_V: + case OPC_BNZ_V: + { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_or_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt << 1], tcg_ctx->msa_wr_d[(wt << 1) + 1]); + tcg_gen_setcondi_i64(tcg_ctx, (op1 == OPC_BZ_V) ? + TCG_COND_EQ : TCG_COND_NE, t0, t0, 0); + tcg_gen_trunc_i64_tl(tcg_ctx, tcg_ctx->bcond, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } + break; + case OPC_BZ_B: + case OPC_BZ_H: + case OPC_BZ_W: + case OPC_BZ_D: + gen_check_zero_element(tcg_ctx, tcg_ctx->bcond, df, wt); + break; + case OPC_BNZ_B: + case OPC_BNZ_H: + case OPC_BNZ_W: + case OPC_BNZ_D: + gen_check_zero_element(tcg_ctx, tcg_ctx->bcond, df, wt); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->bcond, tcg_ctx->bcond, 0); + break; + } + + ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4; + + ctx->hflags |= MIPS_HFLAG_BC; + ctx->hflags |= MIPS_HFLAG_BDS32; +} + +static void gen_msa_i8(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24))) + uint8_t i8 = (ctx->opcode >> 16) & 0xff; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 ti8 = tcg_const_i32(tcg_ctx, i8); + + switch (MASK_MSA_I8(ctx->opcode)) { + case OPC_ANDI_B: + gen_helper_msa_andi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_ORI_B: + gen_helper_msa_ori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_NORI_B: + gen_helper_msa_nori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_XORI_B: + gen_helper_msa_xori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_BMNZI_B: + gen_helper_msa_bmnzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_BMZI_B: + gen_helper_msa_bmzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_BSELI_B: + gen_helper_msa_bseli_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_SHF_B: + case OPC_SHF_H: + case OPC_SHF_W: + { + uint8_t df = (ctx->opcode >> 24) & 0x3; + if (df == DF_DOUBLE) { + generate_exception_end(ctx, EXCP_RI); + } else { + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + gen_helper_msa_shf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, ti8); + tcg_temp_free_i32(tcg_ctx, tdf); + } + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, ti8); +} + +static void gen_msa_i5(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) + uint8_t df = (ctx->opcode >> 21) & 0x3; + int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5); + uint8_t u5 = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 timm = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, timm, u5); + + switch (MASK_MSA_I5(ctx->opcode)) { + case OPC_ADDVI_df: + gen_helper_msa_addvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_SUBVI_df: + gen_helper_msa_subvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MAXI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_maxi_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MAXI_U_df: + gen_helper_msa_maxi_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MINI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_mini_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MINI_U_df: + gen_helper_msa_mini_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CEQI_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_ceqi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLTI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_clti_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLTI_U_df: + gen_helper_msa_clti_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLEI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_clei_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLEI_U_df: + gen_helper_msa_clei_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_LDI_df: + { + int32_t s10 = sextract32(ctx->opcode, 11, 10); + tcg_gen_movi_i32(tcg_ctx, timm, s10); + gen_helper_msa_ldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, timm); + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, tdf); + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, timm); +} + +static void gen_msa_bit(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) + uint8_t dfm = (ctx->opcode >> 16) & 0x7f; + uint32_t df = 0, m = 0; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tdf; + TCGv_i32 tm; + TCGv_i32 twd; + TCGv_i32 tws; + + if ((dfm & 0x40) == 0x00) { + m = dfm & 0x3f; + df = DF_DOUBLE; + } else if ((dfm & 0x60) == 0x40) { + m = dfm & 0x1f; + df = DF_WORD; + } else if ((dfm & 0x70) == 0x60) { + m = dfm & 0x0f; + df = DF_HALF; + } else if ((dfm & 0x78) == 0x70) { + m = dfm & 0x7; + df = DF_BYTE; + } else { + generate_exception_end(ctx, EXCP_RI); + return; + } + + tdf = tcg_const_i32(tcg_ctx, df); + tm = tcg_const_i32(tcg_ctx, m); + twd = tcg_const_i32(tcg_ctx, wd); + tws = tcg_const_i32(tcg_ctx, ws); + + switch (MASK_MSA_BIT(ctx->opcode)) { + case OPC_SLLI_df: + gen_helper_msa_slli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRAI_df: + gen_helper_msa_srai_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRLI_df: + gen_helper_msa_srli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BCLRI_df: + gen_helper_msa_bclri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BSETI_df: + gen_helper_msa_bseti_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BNEGI_df: + gen_helper_msa_bnegi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BINSLI_df: + gen_helper_msa_binsli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BINSRI_df: + gen_helper_msa_binsri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SAT_S_df: + gen_helper_msa_sat_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SAT_U_df: + gen_helper_msa_sat_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRARI_df: + gen_helper_msa_srari_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRLRI_df: + gen_helper_msa_srlri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, tdf); + tcg_temp_free_i32(tcg_ctx, tm); + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); +} + +static void gen_msa_3r(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) + uint8_t df = (ctx->opcode >> 21) & 0x3; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + + switch (MASK_MSA_3R(ctx->opcode)) { + case OPC_BINSL_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_binsl_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_binsl_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_binsl_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_binsl_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_BINSR_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_binsr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_binsr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_binsr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_binsr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_BCLR_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_bclr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_bclr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_bclr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_bclr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_BNEG_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_bneg_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_bneg_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_bneg_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_bneg_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_BSET_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_bset_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_bset_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_bset_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_bset_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ADD_A_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_add_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_add_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_add_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_add_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ADDS_A_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_adds_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_adds_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_adds_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_adds_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ADDS_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_adds_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_adds_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_adds_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_adds_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ADDS_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_adds_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_adds_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_adds_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_adds_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ADDV_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_addv_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_addv_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_addv_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_addv_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_AVE_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_ave_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_ave_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_ave_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_ave_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_AVE_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_ave_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_ave_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_ave_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_ave_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_AVER_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_aver_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_aver_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_aver_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_aver_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_AVER_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_aver_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_aver_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_aver_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_aver_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_CEQ_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_ceq_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_ceq_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_ceq_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_ceq_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_CLE_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_cle_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_cle_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_cle_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_cle_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_CLE_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_cle_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_cle_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_cle_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_cle_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_CLT_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_clt_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_clt_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_clt_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_clt_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_CLT_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_clt_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_clt_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_clt_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_clt_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_DIV_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_div_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_div_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_div_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_div_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_DIV_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_div_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_div_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_div_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_div_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MAX_A_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_max_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_max_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_max_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_max_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MAX_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_max_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_max_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_max_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_max_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MAX_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_max_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_max_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_max_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_max_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MIN_A_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_min_a_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_min_a_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_min_a_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_min_a_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MIN_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_min_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_min_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_min_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_min_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MIN_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_min_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_min_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_min_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_min_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MOD_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_mod_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_mod_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_mod_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_mod_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_MOD_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_mod_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_mod_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_mod_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_mod_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ASUB_S_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_asub_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_asub_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_asub_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_asub_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ASUB_U_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_asub_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_asub_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_asub_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_asub_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ILVEV_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_ilvev_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_ilvev_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_ilvev_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_ilvev_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ILVOD_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_ilvod_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_ilvod_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_ilvod_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_ilvod_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ILVL_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_ilvl_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_ilvl_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_ilvl_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_ilvl_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_ILVR_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_ilvr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_ilvr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_ilvr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_ilvr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_PCKEV_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_pckev_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_pckev_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_pckev_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_pckev_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_PCKOD_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_pckod_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_pckod_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_pckod_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_pckod_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_SLL_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_sll_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_sll_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_sll_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_sll_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_SRA_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_sra_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_sra_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_sra_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_sra_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_SRAR_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_srar_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_srar_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_srar_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_srar_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_SRL_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_srl_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_srl_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_srl_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_srl_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_SRLR_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_srlr_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_HALF: + gen_helper_msa_srlr_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_srlr_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_srlr_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_SUBS_S_df: + gen_helper_msa_subs_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MULV_df: + gen_helper_msa_mulv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SLD_df: + gen_helper_msa_sld_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_VSHF_df: + gen_helper_msa_vshf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBV_df: + gen_helper_msa_subv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBS_U_df: + gen_helper_msa_subs_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MADDV_df: + gen_helper_msa_maddv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SPLAT_df: + gen_helper_msa_splat_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBSUS_U_df: + gen_helper_msa_subsus_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MSUBV_df: + gen_helper_msa_msubv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBSUU_S_df: + gen_helper_msa_subsuu_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + + case OPC_DOTP_S_df: + case OPC_DOTP_U_df: + case OPC_DPADD_S_df: + case OPC_DPADD_U_df: + case OPC_DPSUB_S_df: + case OPC_HADD_S_df: + case OPC_DPSUB_U_df: + case OPC_HADD_U_df: + case OPC_HSUB_S_df: + case OPC_HSUB_U_df: + if (df == DF_BYTE) { + generate_exception_end(ctx, EXCP_RI); + break; + } + switch (MASK_MSA_3R(ctx->opcode)) { + case OPC_HADD_S_df: + switch (df) { + case DF_HALF: + gen_helper_msa_hadd_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_hadd_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_hadd_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_HADD_U_df: + switch (df) { + case DF_HALF: + gen_helper_msa_hadd_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_hadd_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_hadd_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_HSUB_S_df: + switch (df) { + case DF_HALF: + gen_helper_msa_hsub_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_hsub_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_hsub_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_HSUB_U_df: + switch (df) { + case DF_HALF: + gen_helper_msa_hsub_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_WORD: + gen_helper_msa_hsub_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case DF_DOUBLE: + gen_helper_msa_hsub_u_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + } + break; + case OPC_DOTP_S_df: + gen_helper_msa_dotp_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DOTP_U_df: + gen_helper_msa_dotp_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPADD_S_df: + gen_helper_msa_dpadd_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPADD_U_df: + gen_helper_msa_dpadd_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPSUB_S_df: + gen_helper_msa_dpsub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPSUB_U_df: + gen_helper_msa_dpsub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_elm_3e(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16))) + uint8_t source = (ctx->opcode >> 11) & 0x1f; + uint8_t dest = (ctx->opcode >> 6) & 0x1f; + TCGv telm = tcg_temp_new(tcg_ctx); + TCGv_i32 tsr = tcg_const_i32(tcg_ctx, source); + TCGv_i32 tdt = tcg_const_i32(tcg_ctx, dest); + + switch (MASK_MSA_ELM_DF3E(ctx->opcode)) { + case OPC_CTCMSA: + gen_load_gpr(tcg_ctx, telm, source); + gen_helper_msa_ctcmsa(tcg_ctx, tcg_ctx->cpu_env, telm, tdt); + break; + case OPC_CFCMSA: + gen_helper_msa_cfcmsa(tcg_ctx, telm, tcg_ctx->cpu_env, tsr); + gen_store_gpr(tcg_ctx, telm, dest); + break; + case OPC_MOVE_V: + gen_helper_msa_move_v(tcg_ctx, tcg_ctx->cpu_env, tdt, tsr); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free(tcg_ctx, telm); + tcg_temp_free_i32(tcg_ctx, tdt); + tcg_temp_free_i32(tcg_ctx, tsr); +} + +static void gen_msa_elm_df(CPUMIPSState *env, DisasContext *ctx, uint32_t df, + uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tn = tcg_const_i32(tcg_ctx, n); + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + + switch (MASK_MSA_ELM(ctx->opcode)) { + case OPC_SLDI_df: + gen_helper_msa_sldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_SPLATI_df: + gen_helper_msa_splati_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_INSVE_df: + gen_helper_msa_insve_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_COPY_S_df: + case OPC_COPY_U_df: + case OPC_INSERT_df: +#if !defined(TARGET_MIPS64) + /* Double format valid only for MIPS64 */ + if (df == DF_DOUBLE) { + generate_exception_end(ctx, EXCP_RI); + break; + } + if ((MASK_MSA_ELM(ctx->opcode) == OPC_COPY_U_df) && + (df == DF_WORD)) { + generate_exception_end(ctx, EXCP_RI); + break; + } +#endif + switch (MASK_MSA_ELM(ctx->opcode)) { + case OPC_COPY_S_df: + if (likely(wd != 0)) { + switch (df) { + case DF_BYTE: + gen_helper_msa_copy_s_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; + case DF_HALF: + gen_helper_msa_copy_s_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; + case DF_WORD: + gen_helper_msa_copy_s_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; +#if defined(TARGET_MIPS64) + case DF_DOUBLE: + gen_helper_msa_copy_s_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; +#endif + default: + assert(0); + } + } + break; + case OPC_COPY_U_df: + if (likely(wd != 0)) { + switch (df) { + case DF_BYTE: + gen_helper_msa_copy_u_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; + case DF_HALF: + gen_helper_msa_copy_u_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; +#if defined(TARGET_MIPS64) + case DF_WORD: + gen_helper_msa_copy_u_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; +#endif + default: + assert(0); + } + } + break; + case OPC_INSERT_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_insert_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; + case DF_HALF: + gen_helper_msa_insert_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; + case DF_WORD: + gen_helper_msa_insert_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; +#if defined(TARGET_MIPS64) + case DF_DOUBLE: + gen_helper_msa_insert_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws, tn); + break; +#endif + default: + assert(0); + } + break; + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + } + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, tn); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_elm(CPUMIPSState *env, DisasContext *ctx) +{ + uint8_t dfn = (ctx->opcode >> 16) & 0x3f; + uint32_t df = 0, n = 0; + + if ((dfn & 0x30) == 0x00) { + n = dfn & 0x0f; + df = DF_BYTE; + } else if ((dfn & 0x38) == 0x20) { + n = dfn & 0x07; + df = DF_HALF; + } else if ((dfn & 0x3c) == 0x30) { + n = dfn & 0x03; + df = DF_WORD; + } else if ((dfn & 0x3e) == 0x38) { + n = dfn & 0x01; + df = DF_DOUBLE; + } else if (dfn == 0x3E) { + /* CTCMSA, CFCMSA, MOVE.V */ + gen_msa_elm_3e(env, ctx); + return; + } else { + generate_exception_end(ctx, EXCP_RI); + return; + } + + gen_msa_elm_df(env, ctx, df, n); +} + +static void gen_msa_3rf(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) + uint8_t df = (ctx->opcode >> 21) & 0x1; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + TCGv_i32 tdf = tcg_temp_new_i32(tcg_ctx); + + /* adjust df value for floating-point instruction */ + tcg_gen_movi_i32(tcg_ctx, tdf, df + 2); + + switch (MASK_MSA_3RF(ctx->opcode)) { + case OPC_FCAF_df: + gen_helper_msa_fcaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FADD_df: + gen_helper_msa_fadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCUN_df: + gen_helper_msa_fcun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUB_df: + gen_helper_msa_fsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCOR_df: + gen_helper_msa_fcor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCEQ_df: + gen_helper_msa_fceq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMUL_df: + gen_helper_msa_fmul_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCUNE_df: + gen_helper_msa_fcune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCUEQ_df: + gen_helper_msa_fcueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FDIV_df: + gen_helper_msa_fdiv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCNE_df: + gen_helper_msa_fcne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCLT_df: + gen_helper_msa_fclt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMADD_df: + gen_helper_msa_fmadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MUL_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_mul_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCULT_df: + gen_helper_msa_fcult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMSUB_df: + gen_helper_msa_fmsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MADD_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_madd_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCLE_df: + gen_helper_msa_fcle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MSUB_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_msub_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCULE_df: + gen_helper_msa_fcule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FEXP2_df: + gen_helper_msa_fexp2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSAF_df: + gen_helper_msa_fsaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FEXDO_df: + gen_helper_msa_fexdo_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUN_df: + gen_helper_msa_fsun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSOR_df: + gen_helper_msa_fsor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSEQ_df: + gen_helper_msa_fseq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FTQ_df: + gen_helper_msa_ftq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUNE_df: + gen_helper_msa_fsune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUEQ_df: + gen_helper_msa_fsueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSNE_df: + gen_helper_msa_fsne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSLT_df: + gen_helper_msa_fslt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMIN_df: + gen_helper_msa_fmin_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MULR_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_mulr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSULT_df: + gen_helper_msa_fsult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMIN_A_df: + gen_helper_msa_fmin_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MADDR_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_maddr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSLE_df: + gen_helper_msa_fsle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMAX_df: + gen_helper_msa_fmax_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MSUBR_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_msubr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSULE_df: + gen_helper_msa_fsule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMAX_A_df: + gen_helper_msa_fmax_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_2r(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ + (op & (0x7 << 18))) + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + uint8_t df = (ctx->opcode >> 16) & 0x3; + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + + switch (MASK_MSA_2R(ctx->opcode)) { + case OPC_FILL_df: +#if !defined(TARGET_MIPS64) + /* Double format valid only for MIPS64 */ + if (df == DF_DOUBLE) { + generate_exception_end(ctx, EXCP_RI); + break; + } +#endif + gen_helper_msa_fill_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); /* trs */ + break; + case OPC_NLOC_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_nloc_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_HALF: + gen_helper_msa_nloc_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_WORD: + gen_helper_msa_nloc_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_DOUBLE: + gen_helper_msa_nloc_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + } + break; + case OPC_NLZC_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_nlzc_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_HALF: + gen_helper_msa_nlzc_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_WORD: + gen_helper_msa_nlzc_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_DOUBLE: + gen_helper_msa_nlzc_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + } + break; + case OPC_PCNT_df: + switch (df) { + case DF_BYTE: + gen_helper_msa_pcnt_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_HALF: + gen_helper_msa_pcnt_h(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_WORD: + gen_helper_msa_pcnt_w(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + case DF_DOUBLE: + gen_helper_msa_pcnt_d(tcg_ctx, tcg_ctx->cpu_env, twd, tws); + break; + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_2rf(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ + (op & (0xf << 17))) + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + uint8_t df = (ctx->opcode >> 16) & 0x1; + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + /* adjust df value for floating-point instruction */ + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df + 2); + + switch (MASK_MSA_2RF(ctx->opcode)) { + case OPC_FCLASS_df: + gen_helper_msa_fclass_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTRUNC_S_df: + gen_helper_msa_ftrunc_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTRUNC_U_df: + gen_helper_msa_ftrunc_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FSQRT_df: + gen_helper_msa_fsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FRSQRT_df: + gen_helper_msa_frsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FRCP_df: + gen_helper_msa_frcp_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FRINT_df: + gen_helper_msa_frint_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FLOG2_df: + gen_helper_msa_flog2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FEXUPL_df: + gen_helper_msa_fexupl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FEXUPR_df: + gen_helper_msa_fexupr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFQL_df: + gen_helper_msa_ffql_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFQR_df: + gen_helper_msa_ffqr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTINT_S_df: + gen_helper_msa_ftint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTINT_U_df: + gen_helper_msa_ftint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFINT_S_df: + gen_helper_msa_ffint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFINT_U_df: + gen_helper_msa_ffint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_vec_v(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + + switch (MASK_MSA_VEC(ctx->opcode)) { + case OPC_AND_V: + gen_helper_msa_and_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_OR_V: + gen_helper_msa_or_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_NOR_V: + gen_helper_msa_nor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_XOR_V: + gen_helper_msa_xor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_BMNZ_V: + gen_helper_msa_bmnz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_BMZ_V: + gen_helper_msa_bmz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_BSEL_V: + gen_helper_msa_bsel_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); +} + +static void gen_msa_vec(CPUMIPSState *env, DisasContext *ctx) +{ + switch (MASK_MSA_VEC(ctx->opcode)) { + case OPC_AND_V: + case OPC_OR_V: + case OPC_NOR_V: + case OPC_XOR_V: + case OPC_BMNZ_V: + case OPC_BMZ_V: + case OPC_BSEL_V: + gen_msa_vec_v(env, ctx); + break; + case OPC_MSA_2R: + gen_msa_2r(env, ctx); + break; + case OPC_MSA_2RF: + gen_msa_2rf(env, ctx); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void gen_msa(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t opcode = ctx->opcode; + check_insn(ctx, ASE_MSA); + check_msa_access(ctx); + + switch (MASK_MSA_MINOR(opcode)) { + case OPC_MSA_I8_00: + case OPC_MSA_I8_01: + case OPC_MSA_I8_02: + gen_msa_i8(env, ctx); + break; + case OPC_MSA_I5_06: + case OPC_MSA_I5_07: + gen_msa_i5(env, ctx); + break; + case OPC_MSA_BIT_09: + case OPC_MSA_BIT_0A: + gen_msa_bit(env, ctx); + break; + case OPC_MSA_3R_0D: + case OPC_MSA_3R_0E: + case OPC_MSA_3R_0F: + case OPC_MSA_3R_10: + case OPC_MSA_3R_11: + case OPC_MSA_3R_12: + case OPC_MSA_3R_13: + case OPC_MSA_3R_14: + case OPC_MSA_3R_15: + gen_msa_3r(env, ctx); + break; + case OPC_MSA_ELM: + gen_msa_elm(env, ctx); + break; + case OPC_MSA_3RF_1A: + case OPC_MSA_3RF_1B: + case OPC_MSA_3RF_1C: + gen_msa_3rf(env, ctx); + break; + case OPC_MSA_VEC: + gen_msa_vec(env, ctx); + break; + case OPC_LD_B: + case OPC_LD_H: + case OPC_LD_W: + case OPC_LD_D: + case OPC_ST_B: + case OPC_ST_H: + case OPC_ST_W: + case OPC_ST_D: + { + int32_t s10 = sextract32(ctx->opcode, 16, 10); + uint8_t rs = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + uint8_t df = (ctx->opcode >> 0) & 0x3; + + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv taddr = tcg_temp_new(tcg_ctx); + gen_base_offset_addr(ctx, taddr, rs, s10 << df); + + switch (MASK_MSA_MINOR(opcode)) { + case OPC_LD_B: + gen_helper_msa_ld_b(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + case OPC_LD_H: + gen_helper_msa_ld_h(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + case OPC_LD_W: + gen_helper_msa_ld_w(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + case OPC_LD_D: + gen_helper_msa_ld_d(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + case OPC_ST_B: + gen_helper_msa_st_b(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + case OPC_ST_H: + gen_helper_msa_st_h(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + case OPC_ST_W: + gen_helper_msa_st_w(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + case OPC_ST_D: + gen_helper_msa_st_d(tcg_ctx, tcg_ctx->cpu_env, twd, taddr); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free(tcg_ctx, taddr); + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception_end(ctx, EXCP_RI); + break; + } + +} + +static void decode_opc(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int32_t offset; + int rs, rt, rd, sa; + uint32_t op, op1; + int16_t imm; + + /* make sure instructions are on a word boundary */ + if (ctx->base.pc_next & 0x3) { + env->CP0_BadVAddr = ctx->base.pc_next; + generate_exception_err(ctx, EXCP_AdEL, EXCP_INST_NOTAVAIL); + return; + } + + /* Handle blikely not taken case */ + if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) { + TCGLabel *l1 = gen_new_label(tcg_ctx); + + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->bcond, 0, l1); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags & ~MIPS_HFLAG_BMASK); + gen_goto_tb(ctx, 1, ctx->base.pc_next + 4); + gen_set_label(tcg_ctx, l1); + } + + op = MASK_OP_MAJOR(ctx->opcode); + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + imm = (int16_t)ctx->opcode; + switch (op) { + case OPC_SPECIAL: + decode_opc_special(env, ctx); + break; + case OPC_SPECIAL2: +#if defined(TARGET_MIPS64) + if ((ctx->insn_flags & INSN_R5900) && (ctx->insn_flags & ASE_MMI)) { + decode_mmi(env, ctx); +#else + if (ctx->insn_flags & ASE_MXU) { + decode_opc_mxu(env, ctx); +#endif + } else { + decode_opc_special2_legacy(env, ctx); + } + break; + case OPC_SPECIAL3: +#if defined(TARGET_MIPS64) + if (ctx->insn_flags & INSN_R5900) { + decode_mmi_sq(env, ctx); /* MMI_OPC_SQ */ + } else { + decode_opc_special3(env, ctx); + } +#else + decode_opc_special3(env, ctx); +#endif + break; + case OPC_REGIMM: + op1 = MASK_REGIMM(ctx->opcode); + switch (op1) { + case OPC_BLTZL: /* REGIMM branches */ + case OPC_BGEZL: + case OPC_BLTZALL: + case OPC_BGEZALL: + check_insn(ctx, ISA_MIPS2); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* Fallthrough */ + case OPC_BLTZ: + case OPC_BGEZ: + gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4); + break; + case OPC_BLTZAL: + case OPC_BGEZAL: + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rs == 0) { + /* OPC_NAL, OPC_BAL */ + gen_compute_branch(ctx, op1, 4, 0, -1, imm << 2, 4); + } else { + generate_exception_end(ctx, EXCP_RI); + } + } else { + gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4); + } + break; + case OPC_TGEI: /* REGIMM traps */ + case OPC_TGEIU: + case OPC_TLTI: + case OPC_TLTIU: + case OPC_TEQI: + + case OPC_TNEI: + check_insn(ctx, ISA_MIPS2); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_trap(ctx, op1, rs, -1, imm); + break; + case OPC_SIGRIE: + check_insn(ctx, ISA_MIPS32R6); + generate_exception_end(ctx, EXCP_RI); + break; + case OPC_SYNCI: + check_insn(ctx, ISA_MIPS32R2); + /* + * Break the TB to be able to sync copied instructions + * immediately. + */ + ctx->base.is_jmp = DISAS_STOP; + break; + case OPC_BPOSGE32: /* MIPS DSP branch */ +#if defined(TARGET_MIPS64) + case OPC_BPOSGE64: +#endif + check_dsp(ctx); + gen_compute_branch(ctx, op1, 4, -1, -2, (int32_t)imm << 2, 4); + break; +#if defined(TARGET_MIPS64) + case OPC_DAHI: + check_insn(ctx, ISA_MIPS32R6); + check_mips_64(ctx); + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rs], (int64_t)imm << 32); + } + break; + case OPC_DATI: + check_insn(ctx, ISA_MIPS32R6); + check_mips_64(ctx); + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rs], tcg_ctx->cpu_gpr[rs], (int64_t)imm << 48); + } + break; +#endif + default: /* Invalid */ + MIPS_INVAL("regimm"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_CP0: + check_cp0_enabled(ctx); + op1 = MASK_CP0(ctx->opcode); + switch (op1) { + case OPC_MFC0: + case OPC_MTC0: + case OPC_MFTR: + case OPC_MTTR: + case OPC_MFHC0: + case OPC_MTHC0: +#if defined(TARGET_MIPS64) + case OPC_DMFC0: + case OPC_DMTC0: +#endif + gen_cp0(env, ctx, op1, rt, rd); + break; + case OPC_C0: + case OPC_C0_1: + case OPC_C0_2: + case OPC_C0_3: + case OPC_C0_4: + case OPC_C0_5: + case OPC_C0_6: + case OPC_C0_7: + case OPC_C0_8: + case OPC_C0_9: + case OPC_C0_A: + case OPC_C0_B: + case OPC_C0_C: + case OPC_C0_D: + case OPC_C0_E: + case OPC_C0_F: + gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd); + break; + case OPC_MFMC0: + { + uint32_t op2; + TCGv t0 = tcg_temp_new(tcg_ctx); + + op2 = MASK_MFMC0(ctx->opcode); + switch (op2) { + case OPC_DMT: + check_cp0_mt(ctx); + gen_helper_dmt(tcg_ctx, t0); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_EMT: + check_cp0_mt(ctx); + gen_helper_emt(tcg_ctx, t0); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_DVPE: + check_cp0_mt(ctx); + gen_helper_dvpe(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_EVPE: + check_cp0_mt(ctx); + gen_helper_evpe(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_DVP: + check_insn(ctx, ISA_MIPS32R6); + if (ctx->vp) { + gen_helper_dvp(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + } + break; + case OPC_EVP: + check_insn(ctx, ISA_MIPS32R6); + if (ctx->vp) { + gen_helper_evp(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + } + break; + case OPC_DI: + check_insn(ctx, ISA_MIPS32R2); + save_cpu_state(ctx, 1); + gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + /* + * Stop translation as we may have switched + * the execution mode. + */ + ctx->base.is_jmp = DISAS_STOP; + break; + case OPC_EI: + check_insn(ctx, ISA_MIPS32R2); + save_cpu_state(ctx, 1); + gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + /* + * DISAS_STOP isn't sufficient, we need to ensure we break + * out of translated code to check for pending interrupts. + */ + gen_save_pc(tcg_ctx, ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + break; + default: /* Invalid */ + MIPS_INVAL("mfmc0"); + generate_exception_end(ctx, EXCP_RI); + break; + } + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_RDPGPR: + check_insn(ctx, ISA_MIPS32R2); + gen_load_srsgpr(tcg_ctx, rt, rd); + break; + case OPC_WRPGPR: + check_insn(ctx, ISA_MIPS32R2); + gen_store_srsgpr(tcg_ctx, rt, rd); + break; + default: + MIPS_INVAL("cp0"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */ + gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); + } else { + /* OPC_ADDI */ + /* Arithmetic with immediate opcode */ + gen_arith_imm(ctx, op, rt, rs, imm); + } + break; + case OPC_ADDIU: + gen_arith_imm(ctx, op, rt, rs, imm); + break; + case OPC_SLTI: /* Set on less than with immediate opcode */ + case OPC_SLTIU: + gen_slt_imm(ctx, op, rt, rs, imm); + break; + case OPC_ANDI: /* Arithmetic with immediate opcode */ + case OPC_LUI: /* OPC_AUI */ + case OPC_ORI: + case OPC_XORI: + gen_logic_imm(ctx, op, rt, rs, imm); + break; + case OPC_J: /* Jump */ + case OPC_JAL: + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; + gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); + break; + /* Branch */ + case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rt == 0) { + generate_exception_end(ctx, EXCP_RI); + break; + } + /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */ + gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); + } else { + /* OPC_BLEZL */ + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); + } + break; + case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rt == 0) { + generate_exception_end(ctx, EXCP_RI); + break; + } + /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */ + gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); + } else { + /* OPC_BGTZL */ + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); + } + break; + case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */ + if (rt == 0) { + /* OPC_BLEZ */ + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); + } else { + check_insn(ctx, ISA_MIPS32R6); + /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */ + gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); + } + break; + case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */ + if (rt == 0) { + /* OPC_BGTZ */ + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); + } else { + check_insn(ctx, ISA_MIPS32R6); + /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */ + gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); + } + break; + case OPC_BEQL: + case OPC_BNEL: + check_insn(ctx, ISA_MIPS2); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* Fallthrough */ + case OPC_BEQ: + case OPC_BNE: + gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4); + break; + case OPC_LL: /* Load and stores */ + check_insn(ctx, ISA_MIPS2); + if (ctx->insn_flags & INSN_R5900) { + check_insn_opc_user_only(ctx, INSN_R5900); + } + /* Fallthrough */ + case OPC_LWL: + case OPC_LWR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* Fallthrough */ + case OPC_LB: + case OPC_LH: + case OPC_LW: + case OPC_LWPC: + case OPC_LBU: + case OPC_LHU: + gen_ld(ctx, op, rt, rs, imm); + break; + case OPC_SWL: + case OPC_SWR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* fall through */ + case OPC_SB: + case OPC_SH: + case OPC_SW: + gen_st(ctx, op, rt, rs, imm); + break; + case OPC_SC: + check_insn(ctx, ISA_MIPS2); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->insn_flags & INSN_R5900) { + check_insn_opc_user_only(ctx, INSN_R5900); + } + gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); + break; + case OPC_CACHE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_cp0_enabled(ctx); + check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); + if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) { + gen_cache_operation(ctx, rt, rs, imm); + } + /* Treat as NOP. */ + break; + case OPC_PREF: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->insn_flags & INSN_R5900) { + /* Treat as NOP. */ + } else { + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); + /* Treat as NOP. */ + } + break; + + /* Floating point (COP1). */ + case OPC_LWC1: + case OPC_LDC1: + case OPC_SWC1: + case OPC_SDC1: + gen_cop1_ldst(ctx, op, rt, rs, imm); + break; + + case OPC_CP1: + op1 = MASK_CP1(ctx->opcode); + + switch (op1) { + case OPC_MFHC1: + case OPC_MTHC1: + check_cp1_enabled(ctx); + check_insn(ctx, ISA_MIPS32R2); + /* fall through */ + case OPC_MFC1: + case OPC_CFC1: + case OPC_MTC1: + case OPC_CTC1: + check_cp1_enabled(ctx); + gen_cp1(ctx, op1, rt, rd); + break; +#if defined(TARGET_MIPS64) + case OPC_DMFC1: + case OPC_DMTC1: + check_cp1_enabled(ctx); + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_cp1(ctx, op1, rt, rd); + break; +#endif + case OPC_BC1EQZ: /* OPC_BC1ANY2 */ + check_cp1_enabled(ctx); + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BC1EQZ */ + gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), + rt, imm << 2, 4); + } else { + /* OPC_BC1ANY2 */ + check_cop1x(ctx); + check_insn(ctx, ASE_MIPS3D); + gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), + (rt >> 2) & 0x7, imm << 2); + } + break; + case OPC_BC1NEZ: + check_cp1_enabled(ctx); + check_insn(ctx, ISA_MIPS32R6); + gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), + rt, imm << 2, 4); + break; + case OPC_BC1ANY4: + check_cp1_enabled(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_cop1x(ctx); + check_insn(ctx, ASE_MIPS3D); + /* fall through */ + case OPC_BC1: + check_cp1_enabled(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), + (rt >> 2) & 0x7, imm << 2); + break; + case OPC_PS_FMT: + check_ps(ctx); + /* fall through */ + case OPC_S_FMT: + case OPC_D_FMT: + check_cp1_enabled(ctx); + gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, + (imm >> 8) & 0x7); + break; + case OPC_W_FMT: + case OPC_L_FMT: + { + int r6_op = ctx->opcode & FOP(0x3f, 0x1f); + check_cp1_enabled(ctx); + if (ctx->insn_flags & ISA_MIPS32R6) { + switch (r6_op) { + case R6_OPC_CMP_AF_S: + case R6_OPC_CMP_UN_S: + case R6_OPC_CMP_EQ_S: + case R6_OPC_CMP_UEQ_S: + case R6_OPC_CMP_LT_S: + case R6_OPC_CMP_ULT_S: + case R6_OPC_CMP_LE_S: + case R6_OPC_CMP_ULE_S: + case R6_OPC_CMP_SAF_S: + case R6_OPC_CMP_SUN_S: + case R6_OPC_CMP_SEQ_S: + case R6_OPC_CMP_SEUQ_S: + case R6_OPC_CMP_SLT_S: + case R6_OPC_CMP_SULT_S: + case R6_OPC_CMP_SLE_S: + case R6_OPC_CMP_SULE_S: + case R6_OPC_CMP_OR_S: + case R6_OPC_CMP_UNE_S: + case R6_OPC_CMP_NE_S: + case R6_OPC_CMP_SOR_S: + case R6_OPC_CMP_SUNE_S: + case R6_OPC_CMP_SNE_S: + gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa); + break; + case R6_OPC_CMP_AF_D: + case R6_OPC_CMP_UN_D: + case R6_OPC_CMP_EQ_D: + case R6_OPC_CMP_UEQ_D: + case R6_OPC_CMP_LT_D: + case R6_OPC_CMP_ULT_D: + case R6_OPC_CMP_LE_D: + case R6_OPC_CMP_ULE_D: + case R6_OPC_CMP_SAF_D: + case R6_OPC_CMP_SUN_D: + case R6_OPC_CMP_SEQ_D: + case R6_OPC_CMP_SEUQ_D: + case R6_OPC_CMP_SLT_D: + case R6_OPC_CMP_SULT_D: + case R6_OPC_CMP_SLE_D: + case R6_OPC_CMP_SULE_D: + case R6_OPC_CMP_OR_D: + case R6_OPC_CMP_UNE_D: + case R6_OPC_CMP_NE_D: + case R6_OPC_CMP_SOR_D: + case R6_OPC_CMP_SUNE_D: + case R6_OPC_CMP_SNE_D: + gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa); + break; + default: + gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), + rt, rd, sa, (imm >> 8) & 0x7); + + break; + } + } else { + gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, + (imm >> 8) & 0x7); + } + break; + } + case OPC_BZ_V: + case OPC_BNZ_V: + case OPC_BZ_B: + case OPC_BZ_H: + case OPC_BZ_W: + case OPC_BZ_D: + case OPC_BNZ_B: + case OPC_BNZ_H: + case OPC_BNZ_W: + case OPC_BNZ_D: + check_insn(ctx, ASE_MSA); + gen_msa_branch(env, ctx, op1); + break; + default: + MIPS_INVAL("cp1"); + generate_exception_end(ctx, EXCP_RI); + break; + } + break; + + /* Compact branches [R6] and COP2 [non-R6] */ + case OPC_BC: /* OPC_LWC2 */ + case OPC_BALC: /* OPC_SWC2 */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BC, OPC_BALC */ + gen_compute_compact_branch(ctx, op, 0, 0, + sextract32(ctx->opcode << 2, 0, 28)); + } else { + /* OPC_LWC2, OPC_SWC2 */ + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + } + break; + case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */ + case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rs != 0) { + /* OPC_BEQZC, OPC_BNEZC */ + gen_compute_compact_branch(ctx, op, rs, 0, + sextract32(ctx->opcode << 2, 0, 23)); + } else { + /* OPC_JIC, OPC_JIALC */ + gen_compute_compact_branch(ctx, op, 0, rt, imm); + } + } else { + /* OPC_LWC2, OPC_SWC2 */ + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + } + break; + case OPC_CP2: + check_insn(ctx, INSN_LOONGSON2F); + /* Note that these instructions use different fields. */ + gen_loongson_multimedia(ctx, sa, rd, rt); + break; + + case OPC_CP3: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + op1 = MASK_CP3(ctx->opcode); + switch (op1) { + case OPC_LUXC1: + case OPC_SUXC1: + check_insn(ctx, ISA_MIPS5 | ISA_MIPS32R2); + /* Fallthrough */ + case OPC_LWXC1: + case OPC_LDXC1: + case OPC_SWXC1: + case OPC_SDXC1: + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2); + gen_flt3_ldst(ctx, op1, sa, rd, rs, rt); + break; + case OPC_PREFX: + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2); + /* Treat as NOP. */ + break; + case OPC_ALNV_PS: + check_insn(ctx, ISA_MIPS5 | ISA_MIPS32R2); + /* Fallthrough */ + case OPC_MADD_S: + case OPC_MADD_D: + case OPC_MADD_PS: + case OPC_MSUB_S: + case OPC_MSUB_D: + case OPC_MSUB_PS: + case OPC_NMADD_S: + case OPC_NMADD_D: + case OPC_NMADD_PS: + case OPC_NMSUB_S: + case OPC_NMSUB_D: + case OPC_NMSUB_PS: + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2); + gen_flt3_arith(ctx, op1, sa, rs, rd, rt); + break; + default: + MIPS_INVAL("cp3"); + generate_exception_end(ctx, EXCP_RI); + break; + } + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + +#if defined(TARGET_MIPS64) + /* MIPS64 opcodes */ + case OPC_LLD: + if (ctx->insn_flags & INSN_R5900) { + check_insn_opc_user_only(ctx, INSN_R5900); + } + /* fall through */ + case OPC_LDL: + case OPC_LDR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* fall through */ + case OPC_LWU: + case OPC_LD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_ld(ctx, op, rt, rs, imm); + break; + case OPC_SDL: + case OPC_SDR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + /* fall through */ + case OPC_SD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_st(ctx, op, rt, rs, imm); + break; + case OPC_SCD: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_insn(ctx, ISA_MIPS3); + if (ctx->insn_flags & INSN_R5900) { + check_insn_opc_user_only(ctx, INSN_R5900); + } + check_mips_64(ctx); + gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false); + break; + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */ + gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); + } else { + /* OPC_DADDI */ + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_arith_imm(ctx, op, rt, rs, imm); + } + break; + case OPC_DADDIU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_arith_imm(ctx, op, rt, rs, imm); + break; +#else + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + gen_compute_compact_branch(ctx, op, rs, rt, imm << 2); + } else { + MIPS_INVAL("major opcode"); + generate_exception_end(ctx, EXCP_RI); + } + break; +#endif + case OPC_DAUI: /* OPC_JALX */ + if (ctx->insn_flags & ISA_MIPS32R6) { +#if defined(TARGET_MIPS64) + /* OPC_DAUI */ + check_mips_64(ctx); + if (rs == 0) { + generate_exception(ctx, EXCP_RI); + } else if (rt != 0) { + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(tcg_ctx, t0, rs); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rt], t0, imm << 16); + tcg_temp_free(tcg_ctx, t0); + } +#else + generate_exception_end(ctx, EXCP_RI); + MIPS_INVAL("major opcode"); +#endif + } else { + /* OPC_JALX */ + check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS); + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; + gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); + } + break; + case OPC_MSA: /* OPC_MDMX */ + if (ctx->insn_flags & INSN_R5900) { +#if defined(TARGET_MIPS64) + gen_mmi_lq(env, ctx); /* MMI_OPC_LQ */ +#endif + } else { + /* MDMX: Not implemented. */ + gen_msa(env, ctx); + } + break; + case OPC_PCREL: + check_insn(ctx, ISA_MIPS32R6); + gen_pcrel(ctx, ctx->opcode, ctx->base.pc_next, rs); + break; + default: /* Invalid */ + MIPS_INVAL("major opcode"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + CPUMIPSState *env = cs->env_ptr; + + // unicorn setup + ctx->uc = cs->uc; + + ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK; + ctx->saved_pc = -1; + ctx->insn_flags = env->insn_flags; + ctx->CP0_Config1 = env->CP0_Config1; + ctx->CP0_Config2 = env->CP0_Config2; + ctx->CP0_Config3 = env->CP0_Config3; + ctx->CP0_Config5 = env->CP0_Config5; + ctx->btarget = 0; + ctx->kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff; + ctx->rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1; + ctx->ie = (env->CP0_Config4 >> CP0C4_IE) & 3; + ctx->bi = (env->CP0_Config3 >> CP0C3_BI) & 1; + ctx->bp = (env->CP0_Config3 >> CP0C3_BP) & 1; + ctx->PAMask = env->PAMask; + ctx->mvh = (env->CP0_Config5 >> CP0C5_MVH) & 1; + ctx->eva = (env->CP0_Config5 >> CP0C5_EVA) & 1; + ctx->sc = (env->CP0_Config3 >> CP0C3_SC) & 1; + ctx->CP0_LLAddr_shift = env->CP0_LLAddr_shift; + ctx->cmgcr = (env->CP0_Config3 >> CP0C3_CMGCR) & 1; + /* Restore delay slot state from the tb context. */ + ctx->hflags = (uint32_t)ctx->base.tb->flags; /* FIXME: maybe use 64 bits? */ + ctx->ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1; + ctx->ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) || + (env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)); + ctx->vp = (env->CP0_Config5 >> CP0C5_VP) & 1; + ctx->mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1; + ctx->nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1; + ctx->abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1; + ctx->mi = (env->CP0_Config5 >> CP0C5_MI) & 1; + ctx->gi = (env->CP0_Config5 >> CP0C5_GI) & 3; + restore_cpu_state(env, ctx); + ctx->mem_idx = hflags_mmu_index(ctx->hflags); + ctx->default_tcg_memop_mask = (ctx->insn_flags & ISA_MIPS32R6) ? + MO_UNALN : MO_ALIGN; + + LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx, + ctx->hflags); +} + +static void mips_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) +{ +} + +static void mips_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_insn_start(tcg_ctx, ctx->base.pc_next, ctx->hflags & MIPS_HFLAG_BMASK, + ctx->btarget); +} + +static bool mips_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, + const CPUBreakpoint *bp) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + save_cpu_state(ctx, 1); + ctx->base.is_jmp = DISAS_NORETURN; + gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); + /* + * The address covered by the breakpoint must be included in + * [tb->pc, tb->pc + tb->size) in order to for it to be + * properly cleared -- thus we increment the PC here so that + * the logic setting tb->size below does the right thing. + */ + ctx->base.pc_next += 4; + return true; +} + +static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) +{ + CPUMIPSState *env = cs->env_ptr; + DisasContext *ctx = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = cs->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + TCGOp *tcg_op, *prev_op = NULL; + int insn_bytes; + int is_slot; + bool hook_insn = false; + + is_slot = ctx->hflags & MIPS_HFLAG_BMASK; + + // Unicorn: end address tells us to stop emulation + if (ctx->base.pc_next == uc->addr_end) { + // raise a special interrupt to quit + gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + return; + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, ctx->base.pc_next)) { + // save the last operand + prev_op = tcg_last_op(tcg_ctx); + hook_insn = true; + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, ctx->base.pc_next); + // Don't let unicorn stop at the branch delay slot. + if (!is_slot) { + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + } + + if (ctx->insn_flags & ISA_NANOMIPS32) { + ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next); + insn_bytes = decode_nanomips_opc(env, ctx); + } else if (!(ctx->hflags & MIPS_HFLAG_M16)) { + ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next); + insn_bytes = 4; + decode_opc(env, ctx); + } else if (ctx->insn_flags & ASE_MICROMIPS) { + ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next); + insn_bytes = decode_micromips_opc(env, ctx); + } else if (ctx->insn_flags & ASE_MIPS16) { + ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next); + insn_bytes = decode_mips16_opc(env, ctx); + } else { + generate_exception_end(ctx, EXCP_RI); + g_assert(ctx->base.is_jmp == DISAS_NORETURN); + return; + } + + if (hook_insn) { + // Unicorn: patch the callback to have the proper instruction size. + if (prev_op) { + // As explained further up in the function where prev_op is + // assigned, we move forward in the tail queue, so we're modifying the + // move instruction generated by gen_uc_tracecode() that contains + // the instruction size to assign the proper size (replacing 0xF1F1F1F1). + tcg_op = QTAILQ_NEXT(prev_op, link); + } else { + // this instruction is the first emulated code ever, + // so the instruction operand is the first operand + tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); + } + + tcg_op->args[1] = insn_bytes; + } + + if (ctx->hflags & MIPS_HFLAG_BMASK) { + if (!(ctx->hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 | + MIPS_HFLAG_FBNSLOT))) { + /* + * Force to generate branch as there is neither delay nor + * forbidden slot. + */ + is_slot = 1; + } + if ((ctx->hflags & MIPS_HFLAG_M16) && + (ctx->hflags & MIPS_HFLAG_FBNSLOT)) { + /* + * Force to generate branch as microMIPS R6 doesn't restrict + * branches in the forbidden slot. + */ + is_slot = 1; + } + } + if (is_slot) { + gen_branch(ctx, insn_bytes); + } + ctx->base.pc_next += insn_bytes; + + if (ctx->base.is_jmp != DISAS_NEXT) { + return; + } + /* + * Execute a branch and its delay slot as a single instruction. + * This is what GDB expects and is consistent with what the + * hardware does (e.g. if a delay slot instruction faults, the + * reported PC is the PC of the branch). + */ + if (ctx->base.singlestep_enabled && + (ctx->hflags & MIPS_HFLAG_BMASK) == 0) { + ctx->base.is_jmp = DISAS_TOO_MANY; + } + if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) { + ctx->base.is_jmp = DISAS_TOO_MANY; + } +} + +static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) { + save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT); + gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); + } else { + switch (ctx->base.is_jmp) { + case DISAS_STOP: + gen_save_pc(tcg_ctx, ctx->base.pc_next); + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + break; + case DISAS_NEXT: + case DISAS_TOO_MANY: + save_cpu_state(ctx, 0); + gen_goto_tb(ctx, 0, ctx->base.pc_next); + break; + case DISAS_EXIT: + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + break; + case DISAS_NORETURN: + break; + default: + g_assert_not_reached(); + } + } +} + +static const TranslatorOps mips_tr_ops = { + .init_disas_context = mips_tr_init_disas_context, + .tb_start = mips_tr_tb_start, + .insn_start = mips_tr_insn_start, + .breakpoint_check = mips_tr_breakpoint_check, + .translate_insn = mips_tr_translate_insn, + .tb_stop = mips_tr_tb_stop, +}; + +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +{ + DisasContext ctx; + + memset(&ctx, 0, sizeof(ctx)); + translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns); +} + +void mips_tcg_init(struct uc_struct *uc) +{ + int i; + TCGContext *tcg_ctx = uc->tcg_ctx; + + tcg_ctx->cpu_gpr[0] = NULL; + for (i = 1; i < 32; i++) + tcg_ctx->cpu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, + active_tc.gpr[i]), + regnames[i]); + + for (i = 0; i < 32; i++) { + int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); + tcg_ctx->msa_wr_d[i * 2] = + tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, off, msaregnames[i * 2]); + /* + * The scalar floating-point unit (FPU) registers are mapped on + * the MSA vector registers. + */ + tcg_ctx->fpu_f64[i] = tcg_ctx->msa_wr_d[i * 2]; + off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]); + tcg_ctx->msa_wr_d[i * 2 + 1] = + tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, off, msaregnames[i * 2 + 1]); + } + + tcg_ctx->cpu_pc = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.PC), "PC"); + for (i = 0; i < MIPS_DSP_ACC; i++) { + tcg_ctx->cpu_HI[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.HI[i]), + regnames_HI[i]); + tcg_ctx->cpu_LO[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.LO[i]), + regnames_LO[i]); + } + tcg_ctx->cpu_dspctrl = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, + active_tc.DSPControl), + "DSPControl"); + tcg_ctx->bcond = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, bcond), "bcond"); + tcg_ctx->btarget = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, btarget), "btarget"); + tcg_ctx->hflags = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, hflags), "hflags"); + + tcg_ctx->fpu_fcr0 = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_fpu.fcr0), + "fcr0"); + tcg_ctx->fpu_fcr31 = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_fpu.fcr31), + "fcr31"); + tcg_ctx->cpu_lladdr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr), + "lladdr"); + tcg_ctx->cpu_llval = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval), + "llval"); + +#if defined(TARGET_MIPS64) + tcg_ctx->cpu_mmr[0] = NULL; + for (i = 1; i < 32; i++) { + tcg_ctx->cpu_mmr[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, + active_tc.mmr[i]), + regnames[i]); + } +#endif + +#if !defined(TARGET_MIPS64) + for (i = 0; i < NUMBER_OF_MXU_REGISTERS - 1; i++) { + tcg_ctx->mxu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, + active_tc.mxu_gpr[i]), + mxuregnames[i]); + } + + tcg_ctx->mxu_CR = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.mxu_cr), + mxuregnames[NUMBER_OF_MXU_REGISTERS - 1]); +#endif +} + +#include "translate_init.inc.c" + +void cpu_mips_realize_env(CPUMIPSState *env) +{ + env->exception_base = (int32_t)0xBFC00000; + + mmu_init(env, env->cpu_model); + fpu_init(env, env->cpu_model); + mvp_init(env, env->cpu_model); +} + +#if 0 +bool cpu_supports_cps_smp(const char *cpu_type) +{ + const MIPSCPUClass *mcc = MIPS_CPU_CLASS(object_class_by_name(cpu_type)); + return (mcc->cpu_def->CP0_Config3 & (1 << CP0C3_CMGCR)) != 0; +} + +bool cpu_supports_isa(const char *cpu_type, uint64_t isa) +{ + const MIPSCPUClass *mcc = MIPS_CPU_CLASS(object_class_by_name(cpu_type)); + return (mcc->cpu_def->insn_flags & isa) != 0; +} + +void cpu_set_exception_base(int vp_index, target_ulong address) +{ + MIPSCPU *vp = MIPS_CPU(qemu_get_cpu(vp_index)); + vp->env.exception_base = address; +} +#endif + +void cpu_state_reset(CPUMIPSState *env) +{ + CPUState *cs = env_cpu(env); + + /* Reset registers to their default values */ + env->CP0_PRid = env->cpu_model->CP0_PRid; + env->CP0_Config0 = env->cpu_model->CP0_Config0; +#ifdef TARGET_WORDS_BIGENDIAN + env->CP0_Config0 |= (1 << CP0C0_BE); +#endif + env->CP0_Config1 = env->cpu_model->CP0_Config1; + env->CP0_Config2 = env->cpu_model->CP0_Config2; + env->CP0_Config3 = env->cpu_model->CP0_Config3; + env->CP0_Config4 = env->cpu_model->CP0_Config4; + env->CP0_Config4_rw_bitmask = env->cpu_model->CP0_Config4_rw_bitmask; + env->CP0_Config5 = env->cpu_model->CP0_Config5; + env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask; + env->CP0_Config6 = env->cpu_model->CP0_Config6; + env->CP0_Config7 = env->cpu_model->CP0_Config7; + env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask + << env->cpu_model->CP0_LLAddr_shift; + env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift; + env->SYNCI_Step = env->cpu_model->SYNCI_Step; + env->CCRes = env->cpu_model->CCRes; + env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask; + env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask; + env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl; + env->current_tc = 0; + env->SEGBITS = env->cpu_model->SEGBITS; + env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1); +#if defined(TARGET_MIPS64) + if (env->cpu_model->insn_flags & ISA_MIPS3) { + env->SEGMask |= 3ULL << 62; + } +#endif + env->PABITS = env->cpu_model->PABITS; + env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask; + env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0; + env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask; + env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1; + env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask; + env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2; + env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask; + env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3; + env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask; + env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4; + env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask; + env->CP0_PageGrain = env->cpu_model->CP0_PageGrain; + env->CP0_EBaseWG_rw_bitmask = env->cpu_model->CP0_EBaseWG_rw_bitmask; + env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0; + env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask; + env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31; + env->msair = env->cpu_model->MSAIR; + env->insn_flags = env->cpu_model->insn_flags; + + if (env->hflags & MIPS_HFLAG_BMASK) { + /* + * If the exception was raised from a delay slot, + * come back to the jump. + */ + env->CP0_ErrorEPC = (env->active_tc.PC + - (env->hflags & MIPS_HFLAG_B16 ? 2 : 4)); + } else { + env->CP0_ErrorEPC = env->active_tc.PC; + } + env->active_tc.PC = env->exception_base; + env->CP0_Random = env->tlb->nb_tlb - 1; + env->tlb->tlb_in_use = env->tlb->nb_tlb; + env->CP0_Wired = 0; + env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId; + env->CP0_EBase = (cs->cpu_index & 0x3FF); + // if (mips_um_ksegs_enabled()) { + if (false) { + env->CP0_EBase |= 0x40000000; + } else { + env->CP0_EBase |= (int32_t)0x80000000; + } + if (env->CP0_Config3 & (1 << CP0C3_CMGCR)) { + env->CP0_CMGCRBase = 0x1fbf8000 >> 4; + } + env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? + 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; + env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL); + /* + * Vectored interrupts not implemented, timer on int 7, + * no performance counters. + */ + env->CP0_IntCtl = 0xe0000000; + { + int i; + + for (i = 0; i < 7; i++) { + env->CP0_WatchLo[i] = 0; + env->CP0_WatchHi[i] = 0x80000000; + } + env->CP0_WatchLo[7] = 0; + env->CP0_WatchHi[7] = 0; + } + /* Count register increments in debug mode, EJTAG version 1 */ + env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER); + + // cpu_mips_store_count(env, 1); + + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + int i; + + /* Only TC0 on VPE 0 starts as active. */ + for (i = 0; i < ARRAY_SIZE(env->tcs); i++) { + env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE; + env->tcs[i].CP0_TCHalt = 1; + } + env->active_tc.CP0_TCHalt = 1; + cs->halted = 1; + + if (cs->cpu_index == 0) { + /* VPE0 starts up enabled. */ + env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); + env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); + + /* TC0 starts up unhalted. */ + cs->halted = 0; + env->active_tc.CP0_TCHalt = 0; + env->tcs[0].CP0_TCHalt = 0; + /* With thread 0 active. */ + env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A); + env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A); + } + } + + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + env->CP0_Status |= (1 << CP0St_CU1); + } + + /* + * Configure default legacy segmentation control. We use this regardless of + * whether segmentation control is presented to the guest. + */ + /* KSeg3 (seg0 0xE0000000..0xFFFFFFFF) */ + env->CP0_SegCtl0 = (CP0SC_AM_MK << CP0SC_AM); + /* KSeg2 (seg1 0xC0000000..0xDFFFFFFF) */ + env->CP0_SegCtl0 |= ((CP0SC_AM_MSK << CP0SC_AM)) << 16; + /* KSeg1 (seg2 0xA0000000..0x9FFFFFFF) */ + env->CP0_SegCtl1 = (0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) | + (2 << CP0SC_C); + /* KSeg0 (seg3 0x80000000..0x9FFFFFFF) */ + env->CP0_SegCtl1 |= ((0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) | + (3 << CP0SC_C)) << 16; + /* USeg (seg4 0x40000000..0x7FFFFFFF) */ + env->CP0_SegCtl2 = (2 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) | + (1 << CP0SC_EU) | (2 << CP0SC_C); + /* USeg (seg5 0x00000000..0x3FFFFFFF) */ + env->CP0_SegCtl2 |= ((0 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) | + (1 << CP0SC_EU) | (2 << CP0SC_C)) << 16; + /* XKPhys (note, SegCtl2.XR = 0, so XAM won't be used) */ + env->CP0_SegCtl1 |= (CP0SC_AM_UK << CP0SC1_XAM); + if ((env->insn_flags & ISA_MIPS32R6) && + (env->active_fpu.fcr0 & (1 << FCR0_F64))) { + /* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */ + env->CP0_Status |= (1 << CP0St_FR); + } + + if (env->insn_flags & ISA_MIPS32R6) { + /* PTW = 1 */ + env->CP0_PWSize = 0x40; + /* GDI = 12 */ + /* UDI = 12 */ + /* MDI = 12 */ + /* PRI = 12 */ + /* PTEI = 2 */ + env->CP0_PWField = 0x0C30C302; + } else { + /* GDI = 0 */ + /* UDI = 0 */ + /* MDI = 0 */ + /* PRI = 0 */ + /* PTEI = 2 */ + env->CP0_PWField = 0x02; + } + + if (env->CP0_Config3 & (1 << CP0C3_ISA) & (1 << (CP0C3_ISA + 1))) { + /* microMIPS on reset when Config3.ISA is 3 */ + env->hflags |= MIPS_HFLAG_M16; + } + + /* MSA */ + if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { + msa_reset(env); + } + + compute_hflags(env); + restore_fp_status(env); + restore_pamask(env); + cs->exception_index = EXCP_NONE; + +#if 0 + if (semihosting_get_argc()) { + /* UHI interface can be used to obtain argc and argv */ + env->active_tc.gpr[4] = -1; + } +#endif +} + +void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, + target_ulong *data) +{ + env->active_tc.PC = data[0]; + env->hflags &= ~MIPS_HFLAG_BMASK; + env->hflags |= data[1]; + switch (env->hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_BR: + break; + case MIPS_HFLAG_BC: + case MIPS_HFLAG_BL: + case MIPS_HFLAG_B: + env->btarget = data[2]; + break; + } +} diff --git a/qemu/target/mips/translate_init.inc.c b/qemu/target/mips/translate_init.inc.c new file mode 100644 index 00000000..3e395c7e --- /dev/null +++ b/qemu/target/mips/translate_init.inc.c @@ -0,0 +1,941 @@ +/* + * MIPS emulation for qemu: CPU initialisation routines. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2007 Herve Poussineau + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* CPU / CPU family specific config register values. */ + +/* Have config1, uncached coherency */ +#define MIPS_CONFIG0 \ + ((1U << CP0C0_M) | (0x2 << CP0C0_K0)) + +/* Have config2, no coprocessor2 attached, no MDMX support attached, + no performance counters, watch registers present, + no code compression, EJTAG present, no FPU */ +#define MIPS_CONFIG1 \ +((1U << CP0C1_M) | \ + (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ + (1 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ + (0 << CP0C1_FP)) + +/* Have config3, no tertiary/secondary caches implemented */ +#define MIPS_CONFIG2 \ +((1U << CP0C2_M)) + +/* No config4, no DSP ASE, no large physaddr (PABITS), + no external interrupt controller, no vectored interrupts, + no 1kb pages, no SmartMIPS ASE, no trace logic */ +#define MIPS_CONFIG3 \ +((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ + (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ + (0 << CP0C3_SM) | (0 << CP0C3_TL)) + +#define MIPS_CONFIG4 \ +((0 << CP0C4_M)) + +#define MIPS_CONFIG5 \ +((0 << CP0C5_M)) + +/*****************************************************************************/ +/* MIPS CPU definitions */ +const mips_def_t mips_defs[] = +{ + { + .name = "4Kc", + .CP0_PRid = 0x00018000, + .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (0 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1278FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "4Km", + .CP0_PRid = 0x00018300, + /* Config1 implemented, fixed mapping MMU, + no virtual icache, uncached coherency. */ + .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1258FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32 | ASE_MIPS16, + .mmu_type = MMU_TYPE_FMT, + }, + { + .name = "4KEcR1", + .CP0_PRid = 0x00018400, + .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (0 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1278FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "4KEmR1", + .CP0_PRid = 0x00018500, + .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1258FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32 | ASE_MIPS16, + .mmu_type = MMU_TYPE_FMT, + }, + { + .name = "4KEc", + .CP0_PRid = 0x00019000, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (0 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1278FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "4KEm", + .CP0_PRid = 0x00019100, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_FMT << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1258FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16, + .mmu_type = MMU_TYPE_FMT, + }, + { + .name = "24Kc", + .CP0_PRid = 0x00019300, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + /* No DSP implemented. */ + .CP0_Status_rw_bitmask = 0x1278FF1F, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "24KEc", + .CP0_PRid = 0x00019600, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSPP) | (0 << CP0C3_VInt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + /* we have a DSP, but no FPU */ + .CP0_Status_rw_bitmask = 0x1378FF1F, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "24Kf", + .CP0_PRid = 0x00019300, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + /* No DSP implemented. */ + .CP0_Status_rw_bitmask = 0x3678FF1F, + .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "34Kf", + .CP0_PRid = 0x00019500, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_VInt) | (1 << CP0C3_MT) | + (1 << CP0C3_DSPP), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x3778FF1F, + .CP0_TCStatus_rw_bitmask = (0 << CP0TCSt_TCU3) | (0 << CP0TCSt_TCU2) | + (1 << CP0TCSt_TCU1) | (1 << CP0TCSt_TCU0) | + (0 << CP0TCSt_TMX) | (1 << CP0TCSt_DT) | + (1 << CP0TCSt_DA) | (1 << CP0TCSt_A) | + (0x3 << CP0TCSt_TKSU) | (1 << CP0TCSt_IXMT) | + (0xff << CP0TCSt_TASID), + .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .CP0_SRSCtl = (0xf << CP0SRSCtl_HSS), + .CP0_SRSConf0_rw_bitmask = 0x3fffffff, + .CP0_SRSConf0 = (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) | + (0x3fe << CP0SRSC0_SRS2) | (0x3fe << CP0SRSC0_SRS1), + .CP0_SRSConf1_rw_bitmask = 0x3fffffff, + .CP0_SRSConf1 = (1U << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) | + (0x3fe << CP0SRSC1_SRS5) | (0x3fe << CP0SRSC1_SRS4), + .CP0_SRSConf2_rw_bitmask = 0x3fffffff, + .CP0_SRSConf2 = (1U << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) | + (0x3fe << CP0SRSC2_SRS8) | (0x3fe << CP0SRSC2_SRS7), + .CP0_SRSConf3_rw_bitmask = 0x3fffffff, + .CP0_SRSConf3 = (1U << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) | + (0x3fe << CP0SRSC3_SRS11) | (0x3fe << CP0SRSC3_SRS10), + .CP0_SRSConf4_rw_bitmask = 0x3fffffff, + .CP0_SRSConf4 = (0x3fe << CP0SRSC4_SRS15) | + (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13), + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "74Kf", + .CP0_PRid = 0x00019700, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) | + (1 << CP0C3_VInt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x3778FF1F, + .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSP_R2, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "M14K", + .CP0_PRid = 0x00019b00, + /* Config1 implemented, fixed mapping MMU, + no virtual icache, uncached coherency. */ + .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_KU) | (0x2 << CP0C0_K23) | + (0x1 << CP0C0_AR) | (MMU_TYPE_FMT << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1, + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1258FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS, + .mmu_type = MMU_TYPE_FMT, + }, + { + .name = "M14Kc", + /* This is the TLB-based MMU core. */ + .CP0_PRid = 0x00019c00, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x1278FF17, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS, + .mmu_type = MMU_TYPE_R4000, + }, + { + /* FIXME: + * Config3: CMGCR, PW, VZ, CTXTC, CDMM, TL + * Config4: MMUExtDef + * Config5: MRP + * FIR(FCR0): Has2008 + * */ + .name = "P5600", + .CP0_PRid = 0x0001A800, + .CP0_Config0 = MIPS_CONFIG0 | (1 << CP0C0_MM) | (1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (0x3F << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_FP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP) | + (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_SC) | + (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | + (1 << CP0C3_VInt), + .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (2 << CP0C4_IE) | + (0x1c << CP0C4_KScrExist), + .CP0_Config4_rw_bitmask = 0, + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_EVA) | (1 << CP0C5_MVH) | + (1 << CP0C5_LLB) | (1 << CP0C5_MRP), + .CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) | + (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) | + (1 << CP0C5_FRE) | (1 << CP0C5_UFR), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x3C68FF1F, + .CP0_PageGrain_rw_bitmask = (1U << CP0PG_RIE) | (1 << CP0PG_XIE) | + (1 << CP0PG_ELPA) | (1 << CP0PG_IEC), + .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), + .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_UFRP) | (1 << FCR0_HAS2008) | + (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID), + .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 32, + .PABITS = 40, + .insn_flags = CPU_MIPS32R5 | ASE_MSA, + .mmu_type = MMU_TYPE_R4000, + }, + { + /* A generic CPU supporting MIPS32 Release 6 ISA. + FIXME: Support IEEE 754-2008 FP. + Eventually this should be replaced by a real CPU model. */ + .name = "mips32r6-generic", + .CP0_PRid = 0x00010000, + .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_BP) | (1 << CP0C3_BI) | + (2 << CP0C3_ISA) | (1 << CP0C3_ULRI) | + (1 << CP0C3_RXI) | (1U << CP0C3_M), + .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | + (3 << CP0C4_IE) | (1U << CP0C4_M), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB), + .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | + (1 << CP0C5_UFE), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x3058FF1F, + .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | + (1U << CP0PG_RIE), + .CP0_PageGrain_rw_bitmask = 0, + .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .CP1_fcr31_rw_bitmask = 0x0103FFFF, + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "I7200", + .CP0_PRid = 0x00010000, + .CP0_Config0 = MIPS_CONFIG0 | (1 << CP0C0_MM) | (0x2 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = (1U << CP0C1_M) | (15 << CP0C1_MMU) | (2 << CP0C1_IS) | + (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) | + (4 << CP0C1_DL) | (3 << CP0C1_DA) | (1 << CP0C1_PC) | + (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_CMGCR) | + (1 << CP0C3_BI) | (1 << CP0C3_SC) | (3 << CP0C3_MMAR) | + (1 << CP0C3_ISA_ON_EXC) | (1 << CP0C3_ISA) | + (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | + (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) | + (1 << CP0C3_CTXTC) | (1 << CP0C3_VInt) | + (1 << CP0C3_CDMM) | (1 << CP0C3_MT) | (1 << CP0C3_TL), + .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | + (2 << CP0C4_IE) | (1U << CP0C4_M), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_MVH) | (1 << CP0C5_LLB), + .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | + (1 << CP0C5_UFE), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x3158FF1F, + .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | + (1U << CP0PG_RIE), + .CP0_PageGrain_rw_bitmask = 0, + .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x02 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_NANOMIPS32 | ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3 | + ASE_MT, + .mmu_type = MMU_TYPE_R4000, + }, +#if defined(TARGET_MIPS64) + { + .name = "R4000", + .CP0_PRid = 0x00000400, + /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ + .CP0_Config0 = (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), + /* Note: Config1 is only used internally, the R4000 has only Config0. */ + .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), + .CP0_LLAddr_rw_bitmask = 0xFFFFFFFF, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 16, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x3678FFFF, + /* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */ + .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0x0183FFFF, + .SEGBITS = 40, + .PABITS = 36, + .insn_flags = CPU_MIPS3, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "VR5432", + .CP0_PRid = 0x00005400, + /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ + .CP0_Config0 = (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), + .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), + .CP0_LLAddr_rw_bitmask = 0xFFFFFFFFL, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 16, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x3678FFFF, + /* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */ + .CP1_fcr0 = (0x54 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 40, + .PABITS = 32, + .insn_flags = CPU_VR54XX, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "5Kc", + .CP0_PRid = 0x00018100, + .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) | + (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | + (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x12F8FFFF, + .SEGBITS = 42, + .PABITS = 36, + .insn_flags = CPU_MIPS64, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "5Kf", + .CP0_PRid = 0x00018100, + .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | + (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | + (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x36F8FFFF, + /* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */ + .CP1_fcr0 = (1 << FCR0_D) | (1 << FCR0_S) | + (0x81 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 42, + .PABITS = 36, + .insn_flags = CPU_MIPS64, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "20Kc", + /* We emulate a later version of the 20Kc, earlier ones had a broken + WAIT instruction. */ + .CP0_PRid = 0x000182a0, + .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT) | (1 << CP0C0_VI), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (47 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 1, + .CP0_Status_rw_bitmask = 0x36FBFFFF, + /* The 20Kc has F64 / L / W but doesn't use the fcr0 bits. */ + .CP1_fcr0 = (1 << FCR0_3D) | (1 << FCR0_PS) | + (1 << FCR0_D) | (1 << FCR0_S) | + (0x82 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 40, + .PABITS = 36, + .insn_flags = CPU_MIPS64 | ASE_MIPS3D, + .mmu_type = MMU_TYPE_R4000, + }, + { + /* A generic CPU providing MIPS64 Release 2 features. + FIXME: Eventually this should be replaced by a real CPU model. */ + .name = "MIPS64R2-generic", + .CP0_PRid = 0x00010000, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_LPA), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x36FBFFFF, + .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), + .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 42, + .PABITS = 36, + .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "5KEc", + .CP0_PRid = 0x00018900, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) | + (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | + (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x12F8FFFF, + .SEGBITS = 42, + .PABITS = 36, + .insn_flags = CPU_MIPS64R2, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "5KEf", + .CP0_PRid = 0x00018900, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | + (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | + (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3, + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 4, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x36F8FFFF, + .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | + (0x89 << FCR0_PRID) | (0x0 << FCR0_REV), + .SEGBITS = 42, + .PABITS = 36, + .insn_flags = CPU_MIPS64R2, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "I6400", + .CP0_PRid = 0x1A900, + .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) | + (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | + (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) | + (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) | + (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt), + .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) | + (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) | + (1 << CP0C5_LLB) | (1 << CP0C5_MRP), + .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) | + (1 << CP0C5_FRE) | (1 << CP0C5_UFE), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x30D8FFFF, + .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | + (1U << CP0PG_RIE), + .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA), + .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), + .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .CP1_fcr31_rw_bitmask = 0x0103FFFF, + .MSAIR = 0x03 << MSAIR_ProcID, + .SEGBITS = 48, + .PABITS = 48, + .insn_flags = CPU_MIPS64R6 | ASE_MSA, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "I6500", + .CP0_PRid = 0x1B000, + .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) | + (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | + (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) | + (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) | + (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt), + .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) | + (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) | + (1 << CP0C5_LLB) | (1 << CP0C5_MRP), + .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) | + (1 << CP0C5_FRE) | (1 << CP0C5_UFE), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 64, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x30D8FFFF, + .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | + (1U << CP0PG_RIE), + .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA), + .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG), + .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), + .CP1_fcr31_rw_bitmask = 0x0103FFFF, + .MSAIR = 0x03 << MSAIR_ProcID, + .SEGBITS = 48, + .PABITS = 48, + .insn_flags = CPU_MIPS64R6 | ASE_MSA, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "Loongson-2E", + .CP0_PRid = 0x6302, + /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */ + .CP0_Config0 = (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | + (0x1<<5) | (0x1<<4) | (0x1<<1), + /* Note: Config1 is only used internally, + Loongson-2E has only Config0. */ + .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), + .SYNCI_Step = 16, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x35D0FFFF, + .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 40, + .PABITS = 40, + .insn_flags = CPU_LOONGSON2E, + .mmu_type = MMU_TYPE_R4000, + }, + { + .name = "Loongson-2F", + .CP0_PRid = 0x6303, + /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */ + .CP0_Config0 = (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | + (0x1<<5) | (0x1<<4) | (0x1<<1), + /* Note: Config1 is only used internally, + Loongson-2F has only Config0. */ + .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), + .SYNCI_Step = 16, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0xF5D0FF1F, /* Bits 7:5 not writable. */ + .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 40, + .PABITS = 40, + .insn_flags = CPU_LOONGSON2F, + .mmu_type = MMU_TYPE_R4000, + }, + { + /* A generic CPU providing MIPS64 DSP R2 ASE features. + FIXME: Eventually this should be replaced by a real CPU model. */ + .name = "mips64dspr2", + .CP0_PRid = 0x00010000, + .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + .CP0_Config2 = MIPS_CONFIG2, + .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_DSP2P) | + (1 << CP0C3_DSPP) | (1 << CP0C3_LPA), + .CP0_LLAddr_rw_bitmask = 0, + .CP0_LLAddr_shift = 0, + .SYNCI_Step = 32, + .CCRes = 2, + .CP0_Status_rw_bitmask = 0x37FBFFFF, + .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .SEGBITS = 42, + .PABITS = 36, + .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSP_R2, + .mmu_type = MMU_TYPE_R4000, + }, + +#endif +}; +const int mips_defs_number = ARRAY_SIZE(mips_defs); + +static void no_mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->tlb->nb_tlb = 1; + env->tlb->map_address = &no_mmu_map_address; +} + +static void fixed_mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->tlb->nb_tlb = 1; + env->tlb->map_address = &fixed_mmu_map_address; +} + +static void r4k_mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63); + env->tlb->map_address = &r4k_map_address; + env->tlb->helper_tlbwi = r4k_helper_tlbwi; + env->tlb->helper_tlbwr = r4k_helper_tlbwr; + env->tlb->helper_tlbp = r4k_helper_tlbp; + env->tlb->helper_tlbr = r4k_helper_tlbr; + env->tlb->helper_tlbinv = r4k_helper_tlbinv; + env->tlb->helper_tlbinvf = r4k_helper_tlbinvf; +} + +static void mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext)); + + switch (def->mmu_type) { + case MMU_TYPE_NONE: + no_mmu_init(env, def); + break; + case MMU_TYPE_R4000: + r4k_mmu_init(env, def); + break; + case MMU_TYPE_FMT: + fixed_mmu_init(env, def); + break; + case MMU_TYPE_R3000: + case MMU_TYPE_R6000: + case MMU_TYPE_R8000: + default: + cpu_abort(env_cpu(env), "MMU type not supported\n"); + } +} + +static void fpu_init (CPUMIPSState *env, const mips_def_t *def) +{ + int i; + + for (i = 0; i < MIPS_FPU_MAX; i++) + env->fpus[i].fcr0 = def->CP1_fcr0; + + memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu)); +} + +static void mvp_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->mvp = g_malloc0(sizeof(CPUMIPSMVPContext)); + + /* MVPConf1 implemented, TLB sharable, no gating storage support, + programmable cache partitioning implemented, number of allocatable + and sharable TLB entries, MVP has allocatable TCs, 2 VPEs + implemented, 5 TCs implemented. */ + env->mvp->CP0_MVPConf0 = (1U << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) | + (0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) | +// TODO: actually do 2 VPEs. +// (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) | +// (0x04 << CP0MVPC0_PTC); + (1 << CP0MVPC0_TCA) | (0x0 << CP0MVPC0_PVPE) | + (0x00 << CP0MVPC0_PTC); + /* Usermode has no TLB support */ + env->mvp->CP0_MVPConf0 |= (env->tlb->nb_tlb << CP0MVPC0_PTLBE); + + /* Allocatable CP1 have media extensions, allocatable CP1 have FP support, + no UDI implemented, no CP2 implemented, 1 CP1 implemented. */ + env->mvp->CP0_MVPConf1 = (1U << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) | + (0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) | + (0x1 << CP0MVPC1_PCP1); +} + +static void msa_reset(CPUMIPSState *env) +{ + /* MSA CSR: + - non-signaling floating point exception mode off (NX bit is 0) + - Cause, Enables, and Flags are all 0 + - round to nearest / ties to even (RM bits are 0) */ + env->active_tc.msacsr = 0; + + restore_msa_fp_status(env); + + /* tininess detected after rounding.*/ + set_float_detect_tininess(float_tininess_after_rounding, + &env->active_tc.msa_fp_status); + + /* clear float_status exception flags */ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); + + /* clear float_status nan mode */ + set_default_nan_mode(0, &env->active_tc.msa_fp_status); + + /* set proper signanling bit meaning ("1" means "quiet") */ + set_snan_bit_is_one(0, &env->active_tc.msa_fp_status); +} diff --git a/qemu/target/mips/unicorn.c b/qemu/target/mips/unicorn.c new file mode 100644 index 00000000..044f384b --- /dev/null +++ b/qemu/target/mips/unicorn.c @@ -0,0 +1,239 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" +#include "unicorn.h" + +#ifdef TARGET_MIPS64 +typedef uint64_t mipsreg_t; +#else +typedef uint32_t mipsreg_t; +#endif + +MIPSCPU *cpu_mips_init(struct uc_struct *uc, const char *cpu_model); + +static uint64_t mips_mem_redirect(uint64_t address) +{ + // kseg0 range masks off high address bit + if (address >= 0x80000000 && address <= 0x9fffffff) + return address & 0x7fffffff; + + // kseg1 range masks off top 3 address bits + if (address >= 0xa0000000 && address <= 0xbfffffff) { + return address & 0x1fffffff; + } + + // no redirect + return address; +} + +static void mips_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUMIPSState *)uc->cpu->env_ptr)->active_tc.PC = address; +} + + +static void mips_release(void *ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + MIPSCPU *cpu = (MIPSCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } + + g_free(cpu->env.mvp); + g_free(cpu->env.tlb); +} + +void mips_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env; + (void)uc; + env = uc->cpu->env_ptr; + memset(env->active_tc.gpr, 0, sizeof(env->active_tc.gpr)); + + env->active_tc.PC = 0; +} + +static void reg_read(CPUMIPSState *env, unsigned int regid, void *value) +{ + if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) + *(mipsreg_t *)value = env->active_tc.gpr[regid - UC_MIPS_REG_0]; + else { + switch(regid) { + default: break; + case UC_MIPS_REG_PC: + *(mipsreg_t *)value = env->active_tc.PC; + break; + case UC_MIPS_REG_CP0_CONFIG3: + *(mipsreg_t *)value = env->CP0_Config3; + break; + case UC_MIPS_REG_CP0_USERLOCAL: + *(mipsreg_t *)value = env->active_tc.CP0_UserLocal; + break; + } + } + + return; +} + +static void reg_write(CPUMIPSState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) + env->active_tc.gpr[regid - UC_MIPS_REG_0] = *(mipsreg_t *)value; + else { + switch(regid) { + default: break; + case UC_MIPS_REG_PC: + env->active_tc.PC = *(mipsreg_t *)value; + break; + case UC_MIPS_REG_CP0_CONFIG3: + env->CP0_Config3 = *(mipsreg_t *)value; + break; + case UC_MIPS_REG_CP0_USERLOCAL: + env->active_tc.CP0_UserLocal = *(mipsreg_t *)value; + break; + } + } + + return; +} + +int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUMIPSState *env = &(MIPS_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int mips_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUMIPSState *env = &(MIPS_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if(regid == UC_MIPS_REG_PC){ + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_MIPS64 +#ifdef TARGET_WORDS_BIGENDIAN + int mips64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#else + int mips64el_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#endif +#else // if TARGET_MIPS +#ifdef TARGET_WORDS_BIGENDIAN + int mips_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#else + int mipsel_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#endif +#endif +{ + CPUMIPSState *env = (CPUMIPSState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_MIPS64 +#ifdef TARGET_WORDS_BIGENDIAN + int mips64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#else + int mips64el_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#endif +#else // if TARGET_MIPS +#ifdef TARGET_WORDS_BIGENDIAN + int mips_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#else + int mipsel_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#endif +#endif +{ + CPUMIPSState *env = (CPUMIPSState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static int mips_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + MIPSCPU *cpu; + + cpu = cpu_mips_init(uc, NULL); + if (cpu == NULL) { + return -1; + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_MIPS64 +#ifdef TARGET_WORDS_BIGENDIAN + void mips64_uc_init(struct uc_struct* uc) +#else + void mips64el_uc_init(struct uc_struct* uc) +#endif +#else // if TARGET_MIPS +#ifdef TARGET_WORDS_BIGENDIAN + void mips_uc_init(struct uc_struct* uc) +#else + void mipsel_uc_init(struct uc_struct* uc) +#endif +#endif +{ + uc->reg_read = mips_reg_read; + uc->reg_write = mips_reg_write; + uc->reg_reset = mips_reg_reset; + uc->release = mips_release; + uc->set_pc = mips_set_pc; + uc->mem_redirect = mips_mem_redirect; + uc->cpus_init = mips_cpus_init; + uc->cpu_context_size = offsetof(CPUMIPSState, end_reset_fields); + uc_common_init(uc); +} diff --git a/qemu/target/mips/unicorn.h b/qemu/target/mips/unicorn.h new file mode 100644 index 00000000..6179a3c3 --- /dev/null +++ b/qemu/target/mips/unicorn.h @@ -0,0 +1,26 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#ifndef UC_QEMU_TARGET_MIPS_H +#define UC_QEMU_TARGET_MIPS_H + +// functions to read & write registers +int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int mips_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +int mips_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int mips_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int mipsel_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int mipsel_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int mips64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int mips64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int mips64el_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int mips64el_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); + +void mips_reg_reset(struct uc_struct *uc); + +void mips_uc_init(struct uc_struct* uc); +void mipsel_uc_init(struct uc_struct* uc); +void mips64_uc_init(struct uc_struct* uc); +void mips64el_uc_init(struct uc_struct* uc); +#endif diff --git a/qemu/target/ppc/compat.c b/qemu/target/ppc/compat.c new file mode 100644 index 00000000..56eddb04 --- /dev/null +++ b/qemu/target/ppc/compat.c @@ -0,0 +1,330 @@ +/* + * PowerPC CPU initialization for qemu. + * + * Copyright 2016, David Gibson, Red Hat Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "sysemu/cpus.h" +#include "cpu-models.h" +#include "cpu.h" + +typedef struct { + const char *name; + uint32_t pvr; + uint64_t pcr; + uint64_t pcr_level; + + /* + * Maximum allowed virtual threads per virtual core + * + * This is to stop older guests getting confused by seeing more + * threads than they think the cpu can support. Usually it's + * equal to the number of threads supported on bare metal + * hardware, but not always (see POWER9). + */ + int max_vthreads; +} CompatInfo; + +static const CompatInfo compat_table[] = { + /* + * Ordered from oldest to newest - the code relies on this + */ + { /* POWER6, ISA2.05 */ + .name = "power6", + .pvr = CPU_POWERPC_LOGICAL_2_05, + .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | + PCR_COMPAT_2_06 | PCR_COMPAT_2_05 | PCR_TM_DIS | PCR_VSX_DIS, + .pcr_level = PCR_COMPAT_2_05, + .max_vthreads = 2, + }, + { /* POWER7, ISA2.06 */ + .name = "power7", + .pvr = CPU_POWERPC_LOGICAL_2_06, + .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | + PCR_COMPAT_2_06 | PCR_TM_DIS, + .pcr_level = PCR_COMPAT_2_06, + .max_vthreads = 4, + }, + { + .name = "power7+", + .pvr = CPU_POWERPC_LOGICAL_2_06_PLUS, + .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | + PCR_COMPAT_2_06 | PCR_TM_DIS, + .pcr_level = PCR_COMPAT_2_06, + .max_vthreads = 4, + }, + { /* POWER8, ISA2.07 */ + .name = "power8", + .pvr = CPU_POWERPC_LOGICAL_2_07, + .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07, + .pcr_level = PCR_COMPAT_2_07, + .max_vthreads = 8, + }, + { /* POWER9, ISA3.00 */ + .name = "power9", + .pvr = CPU_POWERPC_LOGICAL_3_00, + .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00, + .pcr_level = PCR_COMPAT_3_00, + /* + * POWER9 hardware only supports 4 threads / core, but this + * limit is for guests. We need to support 8 vthreads/vcore + * on POWER9 for POWER8 compatibility guests, and it's very + * confusing if half of the threads disappear from the guest + * if it announces it's POWER9 aware at CAS time. + */ + .max_vthreads = 8, + }, + { /* POWER10, ISA3.10 */ + .name = "power10", + .pvr = CPU_POWERPC_LOGICAL_3_10, + .pcr = PCR_COMPAT_3_10, + .pcr_level = PCR_COMPAT_3_10, + .max_vthreads = 8, + }, +}; + +static const CompatInfo *compat_by_pvr(uint32_t pvr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(compat_table); i++) { + if (compat_table[i].pvr == pvr) { + return &compat_table[i]; + } + } + return NULL; +} + +static bool pcc_compat(PowerPCCPUClass *pcc, uint32_t compat_pvr, + uint32_t min_compat_pvr, uint32_t max_compat_pvr) +{ + const CompatInfo *compat = compat_by_pvr(compat_pvr); + const CompatInfo *min = compat_by_pvr(min_compat_pvr); + const CompatInfo *max = compat_by_pvr(max_compat_pvr); + + g_assert(!min_compat_pvr || min); + g_assert(!max_compat_pvr || max); + + if (!compat) { + /* Not a recognized logical PVR */ + return false; + } + if ((min && (compat < min)) || (max && (compat > max))) { + /* Outside specified range */ + return false; + } + if (!(pcc->pcr_supported & compat->pcr_level)) { + /* Not supported by this CPU */ + return false; + } + return true; +} + +bool ppc_check_compat(PowerPCCPU *cpu, uint32_t compat_pvr, + uint32_t min_compat_pvr, uint32_t max_compat_pvr) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + // g_assert(cpu->vhyp); + + return pcc_compat(pcc, compat_pvr, min_compat_pvr, max_compat_pvr); +} + +#if 0 +bool ppc_type_check_compat(const char *cputype, uint32_t compat_pvr, + uint32_t min_compat_pvr, uint32_t max_compat_pvr) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(object_class_by_name(cputype)); + return pcc_compat(pcc, compat_pvr, min_compat_pvr, max_compat_pvr); +} + +void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) +{ + const CompatInfo *compat = compat_by_pvr(compat_pvr); + CPUPPCState *env = &cpu->env; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + uint64_t pcr; + + if (!compat_pvr) { + pcr = 0; + } else if (!compat) { + error_setg(errp, "Unknown compatibility PVR 0x%08"PRIx32, compat_pvr); + return; + } else if (!ppc_check_compat(cpu, compat_pvr, 0, 0)) { + error_setg(errp, "Compatibility PVR 0x%08"PRIx32" not valid for CPU", + compat_pvr); + return; + } else { + pcr = compat->pcr; + } + + cpu_synchronize_state(CPU(cpu)); + + if (kvm_enabled() && cpu->compat_pvr != compat_pvr) { + int ret = kvmppc_set_compat(cpu, compat_pvr); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Unable to set CPU compatibility mode in KVM"); + return; + } + } + + cpu->compat_pvr = compat_pvr; + env->spr[SPR_PCR] = pcr & pcc->pcr_mask; +} + +typedef struct { + uint32_t compat_pvr; +} SetCompatState; + +static void do_set_compat(CPUState *cs, run_on_cpu_data arg) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + SetCompatState *s = arg.host_ptr; + + ppc_set_compat(cpu, s->compat_pvr, &s->err); +} + +void ppc_set_compat_all(uint32_t compat_pvr) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + SetCompatState s = { + .compat_pvr = compat_pvr, + .err = NULL, + }; + + run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s)); + + if (s.err) { +#if 0 + error_propagate(errp, s.err); +#endif + return; + } + } +} + +int ppc_compat_max_vthreads(PowerPCCPU *cpu) +{ + const CompatInfo *compat = compat_by_pvr(cpu->compat_pvr); + int n_threads = CPU(cpu)->nr_threads; + + if (cpu->compat_pvr) { + g_assert(compat); + n_threads = MIN(n_threads, compat->max_vthreads); + } + + return n_threads; +} + +static void ppc_compat_prop_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint32_t compat_pvr = *((uint32_t *)opaque); + const char *value; + + if (!compat_pvr) { + value = ""; + } else { + const CompatInfo *compat = compat_by_pvr(compat_pvr); + + g_assert(compat); + + value = compat->name; + } + + visit_type_str(v, name, (char **)&value, errp); +} + +static void ppc_compat_prop_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *local_err = NULL; + char *value; + uint32_t compat_pvr; + + visit_type_str(v, name, &value, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (strcmp(value, "") == 0) { + compat_pvr = 0; + } else { + int i; + const CompatInfo *compat = NULL; + + for (i = 0; i < ARRAY_SIZE(compat_table); i++) { + if (strcmp(value, compat_table[i].name) == 0) { + compat = &compat_table[i]; + break; + + } + } + + if (!compat) { + error_setg(errp, "Invalid compatibility mode \"%s\"", value); + goto out; + } + compat_pvr = compat->pvr; + } + + *((uint32_t *)opaque) = compat_pvr; + +out: + g_free(value); +} + +void ppc_compat_add_property(Object *obj, const char *name, + uint32_t *compat_pvr, const char *basedesc, + Error **errp) +{ + Error *local_err = NULL; + gchar *namesv[ARRAY_SIZE(compat_table) + 1]; + gchar *names, *desc; + int i; + + object_property_add(obj, name, "string", + ppc_compat_prop_get, ppc_compat_prop_set, NULL, + compat_pvr, &local_err); + if (local_err) { + goto out; + } + + for (i = 0; i < ARRAY_SIZE(compat_table); i++) { + /* + * Have to discard const here, because g_strjoinv() takes + * (gchar **), not (const gchar **) :( + */ + namesv[i] = (gchar *)compat_table[i].name; + } + namesv[ARRAY_SIZE(compat_table)] = NULL; + + names = g_strjoinv(", ", namesv); + desc = g_strdup_printf("%s. Valid values are %s.", basedesc, names); + object_property_set_description(obj, name, desc, &local_err); + + g_free(names); + g_free(desc); + +out: + error_propagate(errp, local_err); +} +#endif diff --git a/qemu/target/ppc/cpu-models.c b/qemu/target/ppc/cpu-models.c new file mode 100644 index 00000000..4444b225 --- /dev/null +++ b/qemu/target/ppc/cpu-models.c @@ -0,0 +1,945 @@ +/* + * PowerPC CPU initialization for qemu. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2013 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "cpu-models.h" + +#if 0 + +/***************************************************************************/ +/* PowerPC CPU definitions */ +#define POWERPC_DEF_PREFIX(pvr, svr, type) \ + glue(glue(glue(glue(pvr, _), svr), _), type) +#define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \ + static void \ + glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init) \ + (CPUClass *oc, void *data) \ + { \ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \ + \ + pcc->pvr = _pvr; \ + pcc->svr = _svr; \ + } \ + + +#define POWERPC_DEF(_name, _pvr, _type, _desc) \ + POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type) + + /* Embedded PowerPC */ + /* PowerPC 401 family */ + POWERPC_DEF("401", CPU_POWERPC_401, 401, + "Generic PowerPC 401") + /* PowerPC 401 cores */ + POWERPC_DEF("401a1", CPU_POWERPC_401A1, 401, + "PowerPC 401A1") + POWERPC_DEF("401b2", CPU_POWERPC_401B2, 401x2, + "PowerPC 401B2") + POWERPC_DEF("401c2", CPU_POWERPC_401C2, 401x2, + "PowerPC 401C2") + POWERPC_DEF("401d2", CPU_POWERPC_401D2, 401x2, + "PowerPC 401D2") + POWERPC_DEF("401e2", CPU_POWERPC_401E2, 401x2, + "PowerPC 401E2") + POWERPC_DEF("401f2", CPU_POWERPC_401F2, 401x2, + "PowerPC 401F2") + /* XXX: to be checked */ + POWERPC_DEF("401g2", CPU_POWERPC_401G2, 401x2, + "PowerPC 401G2") + /* PowerPC 401 microcontrollers */ + POWERPC_DEF("iop480", CPU_POWERPC_IOP480, IOP480, + "IOP480 (401 microcontroller)") + POWERPC_DEF("cobra", CPU_POWERPC_COBRA, 401, + "IBM Processor for Network Resources") + /* PowerPC 403 family */ + /* PowerPC 403 microcontrollers */ + POWERPC_DEF("403ga", CPU_POWERPC_403GA, 403, + "PowerPC 403 GA") + POWERPC_DEF("403gb", CPU_POWERPC_403GB, 403, + "PowerPC 403 GB") + POWERPC_DEF("403gc", CPU_POWERPC_403GC, 403, + "PowerPC 403 GC") + POWERPC_DEF("403gcx", CPU_POWERPC_403GCX, 403GCX, + "PowerPC 403 GCX") + /* PowerPC 405 family */ + /* PowerPC 405 cores */ + POWERPC_DEF("405d2", CPU_POWERPC_405D2, 405, + "PowerPC 405 D2") + POWERPC_DEF("405d4", CPU_POWERPC_405D4, 405, + "PowerPC 405 D4") + /* PowerPC 405 microcontrollers */ + POWERPC_DEF("405cra", CPU_POWERPC_405CRa, 405, + "PowerPC 405 CRa") + POWERPC_DEF("405crb", CPU_POWERPC_405CRb, 405, + "PowerPC 405 CRb") + POWERPC_DEF("405crc", CPU_POWERPC_405CRc, 405, + "PowerPC 405 CRc") + POWERPC_DEF("405ep", CPU_POWERPC_405EP, 405, + "PowerPC 405 EP") + POWERPC_DEF("405ez", CPU_POWERPC_405EZ, 405, + "PowerPC 405 EZ") + POWERPC_DEF("405gpa", CPU_POWERPC_405GPa, 405, + "PowerPC 405 GPa") + POWERPC_DEF("405gpb", CPU_POWERPC_405GPb, 405, + "PowerPC 405 GPb") + POWERPC_DEF("405gpc", CPU_POWERPC_405GPc, 405, + "PowerPC 405 GPc") + POWERPC_DEF("405gpd", CPU_POWERPC_405GPd, 405, + "PowerPC 405 GPd") + POWERPC_DEF("405gpr", CPU_POWERPC_405GPR, 405, + "PowerPC 405 GPR") + POWERPC_DEF("405lp", CPU_POWERPC_405LP, 405, + "PowerPC 405 LP") + POWERPC_DEF("npe405h", CPU_POWERPC_NPE405H, 405, + "Npe405 H") + POWERPC_DEF("npe405h2", CPU_POWERPC_NPE405H2, 405, + "Npe405 H2") + POWERPC_DEF("npe405l", CPU_POWERPC_NPE405L, 405, + "Npe405 L") + POWERPC_DEF("npe4gs3", CPU_POWERPC_NPE4GS3, 405, + "Npe4GS3") + /* PowerPC 401/403/405 based set-top-box microcontrollers */ + POWERPC_DEF("stb03", CPU_POWERPC_STB03, 405, + "STB03xx") + POWERPC_DEF("stb04", CPU_POWERPC_STB04, 405, + "STB04xx") + POWERPC_DEF("stb25", CPU_POWERPC_STB25, 405, + "STB25xx") + /* Xilinx PowerPC 405 cores */ + POWERPC_DEF("x2vp4", CPU_POWERPC_X2VP4, 405, + NULL) + POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405, + NULL) + /* PowerPC 440 family */ +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440", CPU_POWERPC_440, 440GP, + "Generic PowerPC 440") +#endif + /* PowerPC 440 cores */ + POWERPC_DEF("440-xilinx", CPU_POWERPC_440_XILINX, 440x5, + "PowerPC 440 Xilinx 5") + + POWERPC_DEF("440-xilinx-w-dfpu", CPU_POWERPC_440_XILINX, 440x5wDFPU, + "PowerPC 440 Xilinx 5 With a Double Prec. FPU") + /* PowerPC 440 microcontrollers */ + POWERPC_DEF("440epa", CPU_POWERPC_440EPa, 440EP, + "PowerPC 440 EPa") + POWERPC_DEF("440epb", CPU_POWERPC_440EPb, 440EP, + "PowerPC 440 EPb") + POWERPC_DEF("440epx", CPU_POWERPC_440EPX, 440EP, + "PowerPC 440 EPX") + POWERPC_DEF("460exb", CPU_POWERPC_460EXb, 460EX, + "PowerPC 460 EXb") +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gpb", CPU_POWERPC_440GPb, 440GP, + "PowerPC 440 GPb") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gpc", CPU_POWERPC_440GPc, 440GP, + "PowerPC 440 GPc") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gra", CPU_POWERPC_440GRa, 440x5, + "PowerPC 440 GRa") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440grx", CPU_POWERPC_440GRX, 440x5, + "PowerPC 440 GRX") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxa", CPU_POWERPC_440GXa, 440EP, + "PowerPC 440 GXa") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxb", CPU_POWERPC_440GXb, 440EP, + "PowerPC 440 GXb") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxc", CPU_POWERPC_440GXc, 440EP, + "PowerPC 440 GXc") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxf", CPU_POWERPC_440GXf, 440EP, + "PowerPC 440 GXf") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440sp", CPU_POWERPC_440SP, 440EP, + "PowerPC 440 SP") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440sp2", CPU_POWERPC_440SP2, 440EP, + "PowerPC 440 SP2") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440spe", CPU_POWERPC_440SPE, 440EP, + "PowerPC 440 SPE") +#endif + /* Freescale embedded PowerPC cores */ + /* MPC5xx family (aka RCPU) */ +#if defined(TODO_USER_ONLY) + POWERPC_DEF("mpc5xx", CPU_POWERPC_MPC5xx, MPC5xx, + "Generic MPC5xx core") +#endif + /* MPC8xx family (aka PowerQUICC) */ +#if defined(TODO_USER_ONLY) + POWERPC_DEF("mpc8xx", CPU_POWERPC_MPC8xx, MPC8xx, + "Generic MPC8xx core") +#endif + /* MPC82xx family (aka PowerQUICC-II) */ + POWERPC_DEF("g2", CPU_POWERPC_G2, G2, + "PowerPC G2 core") + POWERPC_DEF("g2h4", CPU_POWERPC_G2H4, G2, + "PowerPC G2 H4 core") + POWERPC_DEF("g2gp", CPU_POWERPC_G2gp, G2, + "PowerPC G2 GP core") + POWERPC_DEF("g2ls", CPU_POWERPC_G2ls, G2, + "PowerPC G2 LS core") + POWERPC_DEF("g2hip3", CPU_POWERPC_G2_HIP3, G2, + "PowerPC G2 HiP3 core") + POWERPC_DEF("g2hip4", CPU_POWERPC_G2_HIP4, G2, + "PowerPC G2 HiP4 core") + POWERPC_DEF("mpc603", CPU_POWERPC_MPC603, 603E, + "PowerPC MPC603 core") + POWERPC_DEF("g2le", CPU_POWERPC_G2LE, G2LE, + "PowerPC G2le core (same as G2 plus little-endian mode support)") + POWERPC_DEF("g2legp", CPU_POWERPC_G2LEgp, G2LE, + "PowerPC G2LE GP core") + POWERPC_DEF("g2lels", CPU_POWERPC_G2LEls, G2LE, + "PowerPC G2LE LS core") + POWERPC_DEF("g2legp1", CPU_POWERPC_G2LEgp1, G2LE, + "PowerPC G2LE GP1 core") + POWERPC_DEF("g2legp3", CPU_POWERPC_G2LEgp3, G2LE, + "PowerPC G2LE GP3 core") + /* PowerPC G2 microcontrollers */ + POWERPC_DEF_SVR("mpc5200_v10", "MPC5200 v1.0", + CPU_POWERPC_MPC5200_v10, POWERPC_SVR_5200_v10, G2LE) + POWERPC_DEF_SVR("mpc5200_v11", "MPC5200 v1.1", + CPU_POWERPC_MPC5200_v11, POWERPC_SVR_5200_v11, G2LE) + POWERPC_DEF_SVR("mpc5200_v12", "MPC5200 v1.2", + CPU_POWERPC_MPC5200_v12, POWERPC_SVR_5200_v12, G2LE) + POWERPC_DEF_SVR("mpc5200b_v20", "MPC5200B v2.0", + CPU_POWERPC_MPC5200B_v20, POWERPC_SVR_5200B_v20, G2LE) + POWERPC_DEF_SVR("mpc5200b_v21", "MPC5200B v2.1", + CPU_POWERPC_MPC5200B_v21, POWERPC_SVR_5200B_v21, G2LE) + /* e200 family */ + POWERPC_DEF("e200z5", CPU_POWERPC_e200z5, e200, + "PowerPC e200z5 core") + POWERPC_DEF("e200z6", CPU_POWERPC_e200z6, e200, + "PowerPC e200z6 core") + /* e300 family */ + POWERPC_DEF("e300c1", CPU_POWERPC_e300c1, e300, + "PowerPC e300c1 core") + POWERPC_DEF("e300c2", CPU_POWERPC_e300c2, e300, + "PowerPC e300c2 core") + POWERPC_DEF("e300c3", CPU_POWERPC_e300c3, e300, + "PowerPC e300c3 core") + POWERPC_DEF("e300c4", CPU_POWERPC_e300c4, e300, + "PowerPC e300c4 core") + /* PowerPC e300 microcontrollers */ + POWERPC_DEF_SVR("mpc8343", "MPC8343", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343, e300) + POWERPC_DEF_SVR("mpc8343a", "MPC8343A", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343A, e300) + POWERPC_DEF_SVR("mpc8343e", "MPC8343E", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343E, e300) + POWERPC_DEF_SVR("mpc8343ea", "MPC8343EA", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343EA, e300) + POWERPC_DEF_SVR("mpc8347t", "MPC8347T", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347T, e300) + POWERPC_DEF_SVR("mpc8347p", "MPC8347P", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347P, e300) + POWERPC_DEF_SVR("mpc8347at", "MPC8347AT", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347AT, e300) + POWERPC_DEF_SVR("mpc8347ap", "MPC8347AP", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347AP, e300) + POWERPC_DEF_SVR("mpc8347et", "MPC8347ET", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347ET, e300) + POWERPC_DEF_SVR("mpc8347ep", "MPC8343EP", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347EP, e300) + POWERPC_DEF_SVR("mpc8347eat", "MPC8347EAT", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAT, e300) + POWERPC_DEF_SVR("mpc8347eap", "MPC8343EAP", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAP, e300) + POWERPC_DEF_SVR("mpc8349", "MPC8349", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349, e300) + POWERPC_DEF_SVR("mpc8349a", "MPC8349A", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349A, e300) + POWERPC_DEF_SVR("mpc8349e", "MPC8349E", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349E, e300) + POWERPC_DEF_SVR("mpc8349ea", "MPC8349EA", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349EA, e300) + POWERPC_DEF_SVR("mpc8377", "MPC8377", + CPU_POWERPC_MPC837x, POWERPC_SVR_8377, e300) + POWERPC_DEF_SVR("mpc8377e", "MPC8377E", + CPU_POWERPC_MPC837x, POWERPC_SVR_8377E, e300) + POWERPC_DEF_SVR("mpc8378", "MPC8378", + CPU_POWERPC_MPC837x, POWERPC_SVR_8378, e300) + POWERPC_DEF_SVR("mpc8378e", "MPC8378E", + CPU_POWERPC_MPC837x, POWERPC_SVR_8378E, e300) + POWERPC_DEF_SVR("mpc8379", "MPC8379", + CPU_POWERPC_MPC837x, POWERPC_SVR_8379, e300) + POWERPC_DEF_SVR("mpc8379e", "MPC8379E", + CPU_POWERPC_MPC837x, POWERPC_SVR_8379E, e300) + /* e500 family */ + POWERPC_DEF_SVR("e500_v10", "PowerPC e500 v1.0 core", + CPU_POWERPC_e500v1_v10, POWERPC_SVR_E500, e500v1); + POWERPC_DEF_SVR("e500_v20", "PowerPC e500 v2.0 core", + CPU_POWERPC_e500v1_v20, POWERPC_SVR_E500, e500v1); + POWERPC_DEF_SVR("e500v2_v10", "PowerPC e500v2 v1.0 core", + CPU_POWERPC_e500v2_v10, POWERPC_SVR_E500, e500v2); + POWERPC_DEF_SVR("e500v2_v20", "PowerPC e500v2 v2.0 core", + CPU_POWERPC_e500v2_v20, POWERPC_SVR_E500, e500v2); + POWERPC_DEF_SVR("e500v2_v21", "PowerPC e500v2 v2.1 core", + CPU_POWERPC_e500v2_v21, POWERPC_SVR_E500, e500v2); + POWERPC_DEF_SVR("e500v2_v22", "PowerPC e500v2 v2.2 core", + CPU_POWERPC_e500v2_v22, POWERPC_SVR_E500, e500v2); + POWERPC_DEF_SVR("e500v2_v30", "PowerPC e500v2 v3.0 core", + CPU_POWERPC_e500v2_v30, POWERPC_SVR_E500, e500v2); + POWERPC_DEF_SVR("e500mc", "e500mc", + CPU_POWERPC_e500mc, POWERPC_SVR_E500, e500mc) +#ifdef TARGET_PPC64 + POWERPC_DEF_SVR("e5500", "e5500", + CPU_POWERPC_e5500, POWERPC_SVR_E500, e5500) + POWERPC_DEF_SVR("e6500", "e6500", + CPU_POWERPC_e6500, POWERPC_SVR_E500, e6500) +#endif + /* PowerPC e500 microcontrollers */ + POWERPC_DEF_SVR("mpc8533_v10", "MPC8533 v1.0", + CPU_POWERPC_MPC8533_v10, POWERPC_SVR_8533_v10, e500v2) + POWERPC_DEF_SVR("mpc8533_v11", "MPC8533 v1.1", + CPU_POWERPC_MPC8533_v11, POWERPC_SVR_8533_v11, e500v2) + POWERPC_DEF_SVR("mpc8533e_v10", "MPC8533E v1.0", + CPU_POWERPC_MPC8533E_v10, POWERPC_SVR_8533E_v10, e500v2) + POWERPC_DEF_SVR("mpc8533e_v11", "MPC8533E v1.1", + CPU_POWERPC_MPC8533E_v11, POWERPC_SVR_8533E_v11, e500v2) + POWERPC_DEF_SVR("mpc8540_v10", "MPC8540 v1.0", + CPU_POWERPC_MPC8540_v10, POWERPC_SVR_8540_v10, e500v1) + POWERPC_DEF_SVR("mpc8540_v20", "MPC8540 v2.0", + CPU_POWERPC_MPC8540_v20, POWERPC_SVR_8540_v20, e500v1) + POWERPC_DEF_SVR("mpc8540_v21", "MPC8540 v2.1", + CPU_POWERPC_MPC8540_v21, POWERPC_SVR_8540_v21, e500v1) + POWERPC_DEF_SVR("mpc8541_v10", "MPC8541 v1.0", + CPU_POWERPC_MPC8541_v10, POWERPC_SVR_8541_v10, e500v1) + POWERPC_DEF_SVR("mpc8541_v11", "MPC8541 v1.1", + CPU_POWERPC_MPC8541_v11, POWERPC_SVR_8541_v11, e500v1) + POWERPC_DEF_SVR("mpc8541e_v10", "MPC8541E v1.0", + CPU_POWERPC_MPC8541E_v10, POWERPC_SVR_8541E_v10, e500v1) + POWERPC_DEF_SVR("mpc8541e_v11", "MPC8541E v1.1", + CPU_POWERPC_MPC8541E_v11, POWERPC_SVR_8541E_v11, e500v1) + POWERPC_DEF_SVR("mpc8543_v10", "MPC8543 v1.0", + CPU_POWERPC_MPC8543_v10, POWERPC_SVR_8543_v10, e500v2) + POWERPC_DEF_SVR("mpc8543_v11", "MPC8543 v1.1", + CPU_POWERPC_MPC8543_v11, POWERPC_SVR_8543_v11, e500v2) + POWERPC_DEF_SVR("mpc8543_v20", "MPC8543 v2.0", + CPU_POWERPC_MPC8543_v20, POWERPC_SVR_8543_v20, e500v2) + POWERPC_DEF_SVR("mpc8543_v21", "MPC8543 v2.1", + CPU_POWERPC_MPC8543_v21, POWERPC_SVR_8543_v21, e500v2) + POWERPC_DEF_SVR("mpc8543e_v10", "MPC8543E v1.0", + CPU_POWERPC_MPC8543E_v10, POWERPC_SVR_8543E_v10, e500v2) + POWERPC_DEF_SVR("mpc8543e_v11", "MPC8543E v1.1", + CPU_POWERPC_MPC8543E_v11, POWERPC_SVR_8543E_v11, e500v2) + POWERPC_DEF_SVR("mpc8543e_v20", "MPC8543E v2.0", + CPU_POWERPC_MPC8543E_v20, POWERPC_SVR_8543E_v20, e500v2) + POWERPC_DEF_SVR("mpc8543e_v21", "MPC8543E v2.1", + CPU_POWERPC_MPC8543E_v21, POWERPC_SVR_8543E_v21, e500v2) + POWERPC_DEF_SVR("mpc8544_v10", "MPC8544 v1.0", + CPU_POWERPC_MPC8544_v10, POWERPC_SVR_8544_v10, e500v2) + POWERPC_DEF_SVR("mpc8544_v11", "MPC8544 v1.1", + CPU_POWERPC_MPC8544_v11, POWERPC_SVR_8544_v11, e500v2) + POWERPC_DEF_SVR("mpc8544e_v10", "MPC8544E v1.0", + CPU_POWERPC_MPC8544E_v10, POWERPC_SVR_8544E_v10, e500v2) + POWERPC_DEF_SVR("mpc8544e_v11", "MPC8544E v1.1", + CPU_POWERPC_MPC8544E_v11, POWERPC_SVR_8544E_v11, e500v2) + POWERPC_DEF_SVR("mpc8545_v20", "MPC8545 v2.0", + CPU_POWERPC_MPC8545_v20, POWERPC_SVR_8545_v20, e500v2) + POWERPC_DEF_SVR("mpc8545_v21", "MPC8545 v2.1", + CPU_POWERPC_MPC8545_v21, POWERPC_SVR_8545_v21, e500v2) + POWERPC_DEF_SVR("mpc8545e_v20", "MPC8545E v2.0", + CPU_POWERPC_MPC8545E_v20, POWERPC_SVR_8545E_v20, e500v2) + POWERPC_DEF_SVR("mpc8545e_v21", "MPC8545E v2.1", + CPU_POWERPC_MPC8545E_v21, POWERPC_SVR_8545E_v21, e500v2) + POWERPC_DEF_SVR("mpc8547e_v20", "MPC8547E v2.0", + CPU_POWERPC_MPC8547E_v20, POWERPC_SVR_8547E_v20, e500v2) + POWERPC_DEF_SVR("mpc8547e_v21", "MPC8547E v2.1", + CPU_POWERPC_MPC8547E_v21, POWERPC_SVR_8547E_v21, e500v2) + POWERPC_DEF_SVR("mpc8548_v10", "MPC8548 v1.0", + CPU_POWERPC_MPC8548_v10, POWERPC_SVR_8548_v10, e500v2) + POWERPC_DEF_SVR("mpc8548_v11", "MPC8548 v1.1", + CPU_POWERPC_MPC8548_v11, POWERPC_SVR_8548_v11, e500v2) + POWERPC_DEF_SVR("mpc8548_v20", "MPC8548 v2.0", + CPU_POWERPC_MPC8548_v20, POWERPC_SVR_8548_v20, e500v2) + POWERPC_DEF_SVR("mpc8548_v21", "MPC8548 v2.1", + CPU_POWERPC_MPC8548_v21, POWERPC_SVR_8548_v21, e500v2) + POWERPC_DEF_SVR("mpc8548e_v10", "MPC8548E v1.0", + CPU_POWERPC_MPC8548E_v10, POWERPC_SVR_8548E_v10, e500v2) + POWERPC_DEF_SVR("mpc8548e_v11", "MPC8548E v1.1", + CPU_POWERPC_MPC8548E_v11, POWERPC_SVR_8548E_v11, e500v2) + POWERPC_DEF_SVR("mpc8548e_v20", "MPC8548E v2.0", + CPU_POWERPC_MPC8548E_v20, POWERPC_SVR_8548E_v20, e500v2) + POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1", + CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2) + POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0", + CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2) + POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1", + CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2) + POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0", + CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2) + POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1", + CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2) + POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0", + CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2) + POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0", + CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2) + POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1", + CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2) + POWERPC_DEF_SVR("mpc8567", "MPC8567", + CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2) + POWERPC_DEF_SVR("mpc8567e", "MPC8567E", + CPU_POWERPC_MPC8567E, POWERPC_SVR_8567E, e500v2) + POWERPC_DEF_SVR("mpc8568", "MPC8568", + CPU_POWERPC_MPC8568, POWERPC_SVR_8568, e500v2) + POWERPC_DEF_SVR("mpc8568e", "MPC8568E", + CPU_POWERPC_MPC8568E, POWERPC_SVR_8568E, e500v2) + POWERPC_DEF_SVR("mpc8572", "MPC8572", + CPU_POWERPC_MPC8572, POWERPC_SVR_8572, e500v2) + POWERPC_DEF_SVR("mpc8572e", "MPC8572E", + CPU_POWERPC_MPC8572E, POWERPC_SVR_8572E, e500v2) + /* e600 family */ + POWERPC_DEF("e600", CPU_POWERPC_e600, e600, + "PowerPC e600 core") + /* PowerPC e600 microcontrollers */ + POWERPC_DEF_SVR("mpc8610", "MPC8610", + CPU_POWERPC_MPC8610, POWERPC_SVR_8610, e600) + POWERPC_DEF_SVR("mpc8641", "MPC8641", + CPU_POWERPC_MPC8641, POWERPC_SVR_8641, e600) + POWERPC_DEF_SVR("mpc8641d", "MPC8641D", + CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600) + /* 32 bits "classic" PowerPC */ + /* PowerPC 6xx family */ + POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601, + "PowerPC 601v0") + POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601, + "PowerPC 601v1") + POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v, + "PowerPC 601v2") + POWERPC_DEF("602", CPU_POWERPC_602, 602, + "PowerPC 602") + POWERPC_DEF("603", CPU_POWERPC_603, 603, + "PowerPC 603") + POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E, + "PowerPC 603e v1.1") + POWERPC_DEF("603e_v1.2", CPU_POWERPC_603E_v12, 603E, + "PowerPC 603e v1.2") + POWERPC_DEF("603e_v1.3", CPU_POWERPC_603E_v13, 603E, + "PowerPC 603e v1.3") + POWERPC_DEF("603e_v1.4", CPU_POWERPC_603E_v14, 603E, + "PowerPC 603e v1.4") + POWERPC_DEF("603e_v2.2", CPU_POWERPC_603E_v22, 603E, + "PowerPC 603e v2.2") + POWERPC_DEF("603e_v3", CPU_POWERPC_603E_v3, 603E, + "PowerPC 603e v3") + POWERPC_DEF("603e_v4", CPU_POWERPC_603E_v4, 603E, + "PowerPC 603e v4") + POWERPC_DEF("603e_v4.1", CPU_POWERPC_603E_v41, 603E, + "PowerPC 603e v4.1") + POWERPC_DEF("603e7", CPU_POWERPC_603E7, 603E, + "PowerPC 603e (aka PID7)") + POWERPC_DEF("603e7t", CPU_POWERPC_603E7t, 603E, + "PowerPC 603e7t") + POWERPC_DEF("603e7v", CPU_POWERPC_603E7v, 603E, + "PowerPC 603e7v") + POWERPC_DEF("603e7v1", CPU_POWERPC_603E7v1, 603E, + "PowerPC 603e7v1") + POWERPC_DEF("603e7v2", CPU_POWERPC_603E7v2, 603E, + "PowerPC 603e7v2") + POWERPC_DEF("603p", CPU_POWERPC_603P, 603E, + "PowerPC 603p (aka PID7v)") + POWERPC_DEF("604", CPU_POWERPC_604, 604, + "PowerPC 604") + POWERPC_DEF("604e_v1.0", CPU_POWERPC_604E_v10, 604E, + "PowerPC 604e v1.0") + POWERPC_DEF("604e_v2.2", CPU_POWERPC_604E_v22, 604E, + "PowerPC 604e v2.2") + POWERPC_DEF("604e_v2.4", CPU_POWERPC_604E_v24, 604E, + "PowerPC 604e v2.4") + POWERPC_DEF("604r", CPU_POWERPC_604R, 604E, + "PowerPC 604r (aka PIDA)") + /* PowerPC 7xx family */ + POWERPC_DEF("740_v1.0", CPU_POWERPC_7x0_v10, 740, + "PowerPC 740 v1.0 (G3)") + POWERPC_DEF("750_v1.0", CPU_POWERPC_7x0_v10, 750, + "PowerPC 750 v1.0 (G3)") + POWERPC_DEF("740_v2.0", CPU_POWERPC_7x0_v20, 740, + "PowerPC 740 v2.0 (G3)") + POWERPC_DEF("750_v2.0", CPU_POWERPC_7x0_v20, 750, + "PowerPC 750 v2.0 (G3)") + POWERPC_DEF("740_v2.1", CPU_POWERPC_7x0_v21, 740, + "PowerPC 740 v2.1 (G3)") + POWERPC_DEF("750_v2.1", CPU_POWERPC_7x0_v21, 750, + "PowerPC 750 v2.1 (G3)") + POWERPC_DEF("740_v2.2", CPU_POWERPC_7x0_v22, 740, + "PowerPC 740 v2.2 (G3)") + POWERPC_DEF("750_v2.2", CPU_POWERPC_7x0_v22, 750, + "PowerPC 750 v2.2 (G3)") + POWERPC_DEF("740_v3.0", CPU_POWERPC_7x0_v30, 740, + "PowerPC 740 v3.0 (G3)") + POWERPC_DEF("750_v3.0", CPU_POWERPC_7x0_v30, 750, + "PowerPC 750 v3.0 (G3)") + POWERPC_DEF("740_v3.1", CPU_POWERPC_7x0_v31, 740, + "PowerPC 740 v3.1 (G3)") + POWERPC_DEF("750_v3.1", CPU_POWERPC_7x0_v31, 750, + "PowerPC 750 v3.1 (G3)") + POWERPC_DEF("740e", CPU_POWERPC_740E, 740, + "PowerPC 740E (G3)") + POWERPC_DEF("750e", CPU_POWERPC_750E, 750, + "PowerPC 750E (G3)") + POWERPC_DEF("740p", CPU_POWERPC_7x0P, 740, + "PowerPC 740P (G3)") + POWERPC_DEF("750p", CPU_POWERPC_7x0P, 750, + "PowerPC 750P (G3)") + POWERPC_DEF("750cl_v1.0", CPU_POWERPC_750CL_v10, 750cl, + "PowerPC 750CL v1.0") + POWERPC_DEF("750cl_v2.0", CPU_POWERPC_750CL_v20, 750cl, + "PowerPC 750CL v2.0") + POWERPC_DEF("750cx_v1.0", CPU_POWERPC_750CX_v10, 750cx, + "PowerPC 750CX v1.0 (G3 embedded)") + POWERPC_DEF("750cx_v2.0", CPU_POWERPC_750CX_v20, 750cx, + "PowerPC 750CX v2.1 (G3 embedded)") + POWERPC_DEF("750cx_v2.1", CPU_POWERPC_750CX_v21, 750cx, + "PowerPC 750CX v2.1 (G3 embedded)") + POWERPC_DEF("750cx_v2.2", CPU_POWERPC_750CX_v22, 750cx, + "PowerPC 750CX v2.2 (G3 embedded)") + POWERPC_DEF("750cxe_v2.1", CPU_POWERPC_750CXE_v21, 750cx, + "PowerPC 750CXe v2.1 (G3 embedded)") + POWERPC_DEF("750cxe_v2.2", CPU_POWERPC_750CXE_v22, 750cx, + "PowerPC 750CXe v2.2 (G3 embedded)") + POWERPC_DEF("750cxe_v2.3", CPU_POWERPC_750CXE_v23, 750cx, + "PowerPC 750CXe v2.3 (G3 embedded)") + POWERPC_DEF("750cxe_v2.4", CPU_POWERPC_750CXE_v24, 750cx, + "PowerPC 750CXe v2.4 (G3 embedded)") + POWERPC_DEF("750cxe_v2.4b", CPU_POWERPC_750CXE_v24b, 750cx, + "PowerPC 750CXe v2.4b (G3 embedded)") + POWERPC_DEF("750cxe_v3.0", CPU_POWERPC_750CXE_v30, 750cx, + "PowerPC 750CXe v3.0 (G3 embedded)") + POWERPC_DEF("750cxe_v3.1", CPU_POWERPC_750CXE_v31, 750cx, + "PowerPC 750CXe v3.1 (G3 embedded)") + POWERPC_DEF("750cxe_v3.1b", CPU_POWERPC_750CXE_v31b, 750cx, + "PowerPC 750CXe v3.1b (G3 embedded)") + POWERPC_DEF("750cxr", CPU_POWERPC_750CXR, 750cx, + "PowerPC 750CXr (G3 embedded)") + POWERPC_DEF("750fl", CPU_POWERPC_750FL, 750fx, + "PowerPC 750FL (G3 embedded)") + POWERPC_DEF("750fx_v1.0", CPU_POWERPC_750FX_v10, 750fx, + "PowerPC 750FX v1.0 (G3 embedded)") + POWERPC_DEF("750fx_v2.0", CPU_POWERPC_750FX_v20, 750fx, + "PowerPC 750FX v2.0 (G3 embedded)") + POWERPC_DEF("750fx_v2.1", CPU_POWERPC_750FX_v21, 750fx, + "PowerPC 750FX v2.1 (G3 embedded)") + POWERPC_DEF("750fx_v2.2", CPU_POWERPC_750FX_v22, 750fx, + "PowerPC 750FX v2.2 (G3 embedded)") + POWERPC_DEF("750fx_v2.3", CPU_POWERPC_750FX_v23, 750fx, + "PowerPC 750FX v2.3 (G3 embedded)") + POWERPC_DEF("750gl", CPU_POWERPC_750GL, 750gx, + "PowerPC 750GL (G3 embedded)") + POWERPC_DEF("750gx_v1.0", CPU_POWERPC_750GX_v10, 750gx, + "PowerPC 750GX v1.0 (G3 embedded)") + POWERPC_DEF("750gx_v1.1", CPU_POWERPC_750GX_v11, 750gx, + "PowerPC 750GX v1.1 (G3 embedded)") + POWERPC_DEF("750gx_v1.2", CPU_POWERPC_750GX_v12, 750gx, + "PowerPC 750GX v1.2 (G3 embedded)") + POWERPC_DEF("750l_v2.0", CPU_POWERPC_750L_v20, 750, + "PowerPC 750L v2.0 (G3 embedded)") + POWERPC_DEF("750l_v2.1", CPU_POWERPC_750L_v21, 750, + "PowerPC 750L v2.1 (G3 embedded)") + POWERPC_DEF("750l_v2.2", CPU_POWERPC_750L_v22, 750, + "PowerPC 750L v2.2 (G3 embedded)") + POWERPC_DEF("750l_v3.0", CPU_POWERPC_750L_v30, 750, + "PowerPC 750L v3.0 (G3 embedded)") + POWERPC_DEF("750l_v3.2", CPU_POWERPC_750L_v32, 750, + "PowerPC 750L v3.2 (G3 embedded)") + POWERPC_DEF("745_v1.0", CPU_POWERPC_7x5_v10, 745, + "PowerPC 745 v1.0") + POWERPC_DEF("755_v1.0", CPU_POWERPC_7x5_v10, 755, + "PowerPC 755 v1.0") + POWERPC_DEF("745_v1.1", CPU_POWERPC_7x5_v11, 745, + "PowerPC 745 v1.1") + POWERPC_DEF("755_v1.1", CPU_POWERPC_7x5_v11, 755, + "PowerPC 755 v1.1") + POWERPC_DEF("745_v2.0", CPU_POWERPC_7x5_v20, 745, + "PowerPC 745 v2.0") + POWERPC_DEF("755_v2.0", CPU_POWERPC_7x5_v20, 755, + "PowerPC 755 v2.0") + POWERPC_DEF("745_v2.1", CPU_POWERPC_7x5_v21, 745, + "PowerPC 745 v2.1") + POWERPC_DEF("755_v2.1", CPU_POWERPC_7x5_v21, 755, + "PowerPC 755 v2.1") + POWERPC_DEF("745_v2.2", CPU_POWERPC_7x5_v22, 745, + "PowerPC 745 v2.2") + POWERPC_DEF("755_v2.2", CPU_POWERPC_7x5_v22, 755, + "PowerPC 755 v2.2") + POWERPC_DEF("745_v2.3", CPU_POWERPC_7x5_v23, 745, + "PowerPC 745 v2.3") + POWERPC_DEF("755_v2.3", CPU_POWERPC_7x5_v23, 755, + "PowerPC 755 v2.3") + POWERPC_DEF("745_v2.4", CPU_POWERPC_7x5_v24, 745, + "PowerPC 745 v2.4") + POWERPC_DEF("755_v2.4", CPU_POWERPC_7x5_v24, 755, + "PowerPC 755 v2.4") + POWERPC_DEF("745_v2.5", CPU_POWERPC_7x5_v25, 745, + "PowerPC 745 v2.5") + POWERPC_DEF("755_v2.5", CPU_POWERPC_7x5_v25, 755, + "PowerPC 755 v2.5") + POWERPC_DEF("745_v2.6", CPU_POWERPC_7x5_v26, 745, + "PowerPC 745 v2.6") + POWERPC_DEF("755_v2.6", CPU_POWERPC_7x5_v26, 755, + "PowerPC 755 v2.6") + POWERPC_DEF("745_v2.7", CPU_POWERPC_7x5_v27, 745, + "PowerPC 745 v2.7") + POWERPC_DEF("755_v2.7", CPU_POWERPC_7x5_v27, 755, + "PowerPC 755 v2.7") + POWERPC_DEF("745_v2.8", CPU_POWERPC_7x5_v28, 745, + "PowerPC 745 v2.8") + POWERPC_DEF("755_v2.8", CPU_POWERPC_7x5_v28, 755, + "PowerPC 755 v2.8") + /* PowerPC 74xx family */ + POWERPC_DEF("7400_v1.0", CPU_POWERPC_7400_v10, 7400, + "PowerPC 7400 v1.0 (G4)") + POWERPC_DEF("7400_v1.1", CPU_POWERPC_7400_v11, 7400, + "PowerPC 7400 v1.1 (G4)") + POWERPC_DEF("7400_v2.0", CPU_POWERPC_7400_v20, 7400, + "PowerPC 7400 v2.0 (G4)") + POWERPC_DEF("7400_v2.1", CPU_POWERPC_7400_v21, 7400, + "PowerPC 7400 v2.1 (G4)") + POWERPC_DEF("7400_v2.2", CPU_POWERPC_7400_v22, 7400, + "PowerPC 7400 v2.2 (G4)") + POWERPC_DEF("7400_v2.6", CPU_POWERPC_7400_v26, 7400, + "PowerPC 7400 v2.6 (G4)") + POWERPC_DEF("7400_v2.7", CPU_POWERPC_7400_v27, 7400, + "PowerPC 7400 v2.7 (G4)") + POWERPC_DEF("7400_v2.8", CPU_POWERPC_7400_v28, 7400, + "PowerPC 7400 v2.8 (G4)") + POWERPC_DEF("7400_v2.9", CPU_POWERPC_7400_v29, 7400, + "PowerPC 7400 v2.9 (G4)") + POWERPC_DEF("7410_v1.0", CPU_POWERPC_7410_v10, 7410, + "PowerPC 7410 v1.0 (G4)") + POWERPC_DEF("7410_v1.1", CPU_POWERPC_7410_v11, 7410, + "PowerPC 7410 v1.1 (G4)") + POWERPC_DEF("7410_v1.2", CPU_POWERPC_7410_v12, 7410, + "PowerPC 7410 v1.2 (G4)") + POWERPC_DEF("7410_v1.3", CPU_POWERPC_7410_v13, 7410, + "PowerPC 7410 v1.3 (G4)") + POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410, + "PowerPC 7410 v1.4 (G4)") + POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400, + "PowerPC 7448 v1.0 (G4)") + POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400, + "PowerPC 7448 v1.1 (G4)") + POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400, + "PowerPC 7448 v2.0 (G4)") + POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400, + "PowerPC 7448 v2.1 (G4)") + POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450, + "PowerPC 7450 v1.0 (G4)") + POWERPC_DEF("7450_v1.1", CPU_POWERPC_7450_v11, 7450, + "PowerPC 7450 v1.1 (G4)") + POWERPC_DEF("7450_v1.2", CPU_POWERPC_7450_v12, 7450, + "PowerPC 7450 v1.2 (G4)") + POWERPC_DEF("7450_v2.0", CPU_POWERPC_7450_v20, 7450, + "PowerPC 7450 v2.0 (G4)") + POWERPC_DEF("7450_v2.1", CPU_POWERPC_7450_v21, 7450, + "PowerPC 7450 v2.1 (G4)") + POWERPC_DEF("7441_v2.1", CPU_POWERPC_7450_v21, 7440, + "PowerPC 7441 v2.1 (G4)") + POWERPC_DEF("7441_v2.3", CPU_POWERPC_74x1_v23, 7440, + "PowerPC 7441 v2.3 (G4)") + POWERPC_DEF("7451_v2.3", CPU_POWERPC_74x1_v23, 7450, + "PowerPC 7451 v2.3 (G4)") + POWERPC_DEF("7441_v2.10", CPU_POWERPC_74x1_v210, 7440, + "PowerPC 7441 v2.10 (G4)") + POWERPC_DEF("7451_v2.10", CPU_POWERPC_74x1_v210, 7450, + "PowerPC 7451 v2.10 (G4)") + POWERPC_DEF("7445_v1.0", CPU_POWERPC_74x5_v10, 7445, + "PowerPC 7445 v1.0 (G4)") + POWERPC_DEF("7455_v1.0", CPU_POWERPC_74x5_v10, 7455, + "PowerPC 7455 v1.0 (G4)") + POWERPC_DEF("7445_v2.1", CPU_POWERPC_74x5_v21, 7445, + "PowerPC 7445 v2.1 (G4)") + POWERPC_DEF("7455_v2.1", CPU_POWERPC_74x5_v21, 7455, + "PowerPC 7455 v2.1 (G4)") + POWERPC_DEF("7445_v3.2", CPU_POWERPC_74x5_v32, 7445, + "PowerPC 7445 v3.2 (G4)") + POWERPC_DEF("7455_v3.2", CPU_POWERPC_74x5_v32, 7455, + "PowerPC 7455 v3.2 (G4)") + POWERPC_DEF("7445_v3.3", CPU_POWERPC_74x5_v33, 7445, + "PowerPC 7445 v3.3 (G4)") + POWERPC_DEF("7455_v3.3", CPU_POWERPC_74x5_v33, 7455, + "PowerPC 7455 v3.3 (G4)") + POWERPC_DEF("7445_v3.4", CPU_POWERPC_74x5_v34, 7445, + "PowerPC 7445 v3.4 (G4)") + POWERPC_DEF("7455_v3.4", CPU_POWERPC_74x5_v34, 7455, + "PowerPC 7455 v3.4 (G4)") + POWERPC_DEF("7447_v1.0", CPU_POWERPC_74x7_v10, 7445, + "PowerPC 7447 v1.0 (G4)") + POWERPC_DEF("7457_v1.0", CPU_POWERPC_74x7_v10, 7455, + "PowerPC 7457 v1.0 (G4)") + POWERPC_DEF("7447_v1.1", CPU_POWERPC_74x7_v11, 7445, + "PowerPC 7447 v1.1 (G4)") + POWERPC_DEF("7457_v1.1", CPU_POWERPC_74x7_v11, 7455, + "PowerPC 7457 v1.1 (G4)") + POWERPC_DEF("7457_v1.2", CPU_POWERPC_74x7_v12, 7455, + "PowerPC 7457 v1.2 (G4)") + POWERPC_DEF("7447a_v1.0", CPU_POWERPC_74x7A_v10, 7445, + "PowerPC 7447A v1.0 (G4)") + POWERPC_DEF("7457a_v1.0", CPU_POWERPC_74x7A_v10, 7455, + "PowerPC 7457A v1.0 (G4)") + POWERPC_DEF("7447a_v1.1", CPU_POWERPC_74x7A_v11, 7445, + "PowerPC 7447A v1.1 (G4)") + POWERPC_DEF("7457a_v1.1", CPU_POWERPC_74x7A_v11, 7455, + "PowerPC 7457A v1.1 (G4)") + POWERPC_DEF("7447a_v1.2", CPU_POWERPC_74x7A_v12, 7445, + "PowerPC 7447A v1.2 (G4)") + POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455, + "PowerPC 7457A v1.2 (G4)") + /* 64 bits PowerPC */ +#if defined(TARGET_PPC64) + POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970, + "PowerPC 970 v2.2") + POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970, + "PowerPC 970FX v1.0 (G5)") + POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970, + "PowerPC 970FX v2.0 (G5)") + POWERPC_DEF("970fx_v2.1", CPU_POWERPC_970FX_v21, 970, + "PowerPC 970FX v2.1 (G5)") + POWERPC_DEF("970fx_v3.0", CPU_POWERPC_970FX_v30, 970, + "PowerPC 970FX v3.0 (G5)") + POWERPC_DEF("970fx_v3.1", CPU_POWERPC_970FX_v31, 970, + "PowerPC 970FX v3.1 (G5)") + POWERPC_DEF("970mp_v1.0", CPU_POWERPC_970MP_v10, 970, + "PowerPC 970MP v1.0") + POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970, + "PowerPC 970MP v1.1") + POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, + "POWER5+ v2.1") + POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7, + "POWER7 v2.3") + POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, + "POWER7+ v2.1") + POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, + "POWER8E v2.1") + POWERPC_DEF("power8_v2.0", CPU_POWERPC_POWER8_v20, POWER8, + "POWER8 v2.0") + POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8, + "POWER8NVL v1.0") + POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9, + "POWER9 v1.0") + POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9, + "POWER9 v2.0") + POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10, + "POWER10 v1.0") +#endif /* defined (TARGET_PPC64) */ + +/***************************************************************************/ +/* PowerPC CPU aliases */ + +PowerPCCPUAlias ppc_cpu_aliases[] = { + { "403", "403gc" }, + { "405", "405d4" }, + { "405cr", "405crc" }, + { "405gp", "405gpd" }, + { "405gpe", "405crc" }, + { "x2vp7", "x2vp4" }, + { "x2vp50", "x2vp20" }, + + { "440ep", "440epb" }, + { "460ex", "460exb" }, +#if defined(TODO_USER_ONLY) + { "440gp", "440gpc" }, + { "440gr", "440gra" }, + { "440gx", "440gxf" }, + + { "rcpu", "mpc5xx" }, + /* MPC5xx microcontrollers */ + { "mgt560", "mpc5xx" }, + { "mpc509", "mpc5xx" }, + { "mpc533", "mpc5xx" }, + { "mpc534", "mpc5xx" }, + { "mpc555", "mpc5xx" }, + { "mpc556", "mpc5xx" }, + { "mpc560", "mpc5xx" }, + { "mpc561", "mpc5xx" }, + { "mpc562", "mpc5xx" }, + { "mpc563", "mpc5xx" }, + { "mpc564", "mpc5xx" }, + { "mpc565", "mpc5xx" }, + { "mpc566", "mpc5xx" }, + + { "powerquicc", "mpc8xx" }, + /* MPC8xx microcontrollers */ + { "mgt823", "mpc8xx" }, + { "mpc821", "mpc8xx" }, + { "mpc823", "mpc8xx" }, + { "mpc850", "mpc8xx" }, + { "mpc852t", "mpc8xx" }, + { "mpc855t", "mpc8xx" }, + { "mpc857", "mpc8xx" }, + { "mpc859", "mpc8xx" }, + { "mpc860", "mpc8xx" }, + { "mpc862", "mpc8xx" }, + { "mpc866", "mpc8xx" }, + { "mpc870", "mpc8xx" }, + { "mpc875", "mpc8xx" }, + { "mpc880", "mpc8xx" }, + { "mpc885", "mpc8xx" }, +#endif + + /* PowerPC MPC603 microcontrollers */ + { "mpc8240", "603" }, + + { "mpc52xx", "mpc5200_v12" }, + { "mpc5200", "mpc5200_v12" }, + { "mpc5200b", "mpc5200b_v21" }, + + { "mpc82xx", "g2legp3" }, + { "powerquicc-ii", "g2legp3" }, + { "mpc8241", "g2hip4" }, + { "mpc8245", "g2hip4" }, + { "mpc8247", "g2legp3" }, + { "mpc8248", "g2legp3" }, + { "mpc8250", "g2hip4" }, + { "mpc8250_hip3", "g2hip3" }, + { "mpc8250_hip4", "g2hip4" }, + { "mpc8255", "g2hip4" }, + { "mpc8255_hip3", "g2hip3" }, + { "mpc8255_hip4", "g2hip4" }, + { "mpc8260", "g2hip4" }, + { "mpc8260_hip3", "g2hip3" }, + { "mpc8260_hip4", "g2hip4" }, + { "mpc8264", "g2hip4" }, + { "mpc8264_hip3", "g2hip3" }, + { "mpc8264_hip4", "g2hip4" }, + { "mpc8265", "g2hip4" }, + { "mpc8265_hip3", "g2hip3" }, + { "mpc8265_hip4", "g2hip4" }, + { "mpc8266", "g2hip4" }, + { "mpc8266_hip3", "g2hip3" }, + { "mpc8266_hip4", "g2hip4" }, + { "mpc8270", "g2legp3" }, + { "mpc8271", "g2legp3" }, + { "mpc8272", "g2legp3" }, + { "mpc8275", "g2legp3" }, + { "mpc8280", "g2legp3" }, + { "e200", "e200z6" }, + { "e300", "e300c3" }, + { "mpc8347", "mpc8347t" }, + { "mpc8347a", "mpc8347at" }, + { "mpc8347e", "mpc8347et" }, + { "mpc8347ea", "mpc8347eat" }, + { "e500", "e500v2_v22" }, + { "e500v1", "e500_v20" }, + { "e500v2", "e500v2_v22" }, + { "mpc8533", "mpc8533_v11" }, + { "mpc8533e", "mpc8533e_v11" }, + { "mpc8540", "mpc8540_v21" }, + { "mpc8541", "mpc8541_v11" }, + { "mpc8541e", "mpc8541e_v11" }, + { "mpc8543", "mpc8543_v21" }, + { "mpc8543e", "mpc8543e_v21" }, + { "mpc8544", "mpc8544_v11" }, + { "mpc8544e", "mpc8544e_v11" }, + { "mpc8545", "mpc8545_v21" }, + { "mpc8545e", "mpc8545e_v21" }, + { "mpc8547e", "mpc8547e_v21" }, + { "mpc8548", "mpc8548_v21" }, + { "mpc8548e", "mpc8548e_v21" }, + { "mpc8555", "mpc8555_v11" }, + { "mpc8555e", "mpc8555e_v11" }, + { "mpc8560", "mpc8560_v21" }, + { "601", "601_v2" }, + { "601v", "601_v2" }, + { "vanilla", "603" }, + { "603e", "603e_v4.1" }, + { "stretch", "603e_v4.1" }, + { "vaillant", "603e7v" }, + { "603r", "603e7t" }, + { "goldeneye", "603e7t" }, + { "604e", "604e_v2.4" }, + { "sirocco", "604e_v2.4" }, + { "mach5", "604r" }, + { "740", "740_v3.1" }, + { "arthur", "740_v3.1" }, + { "750", "750_v3.1" }, + { "typhoon", "750_v3.1" }, + { "g3", "750_v3.1" }, + { "conan/doyle", "750p" }, + { "750cl", "750cl_v2.0" }, + { "750cx", "750cx_v2.2" }, + { "750cxe", "750cxe_v3.1b" }, + { "750fx", "750fx_v2.3" }, + { "750gx", "750gx_v1.2" }, + { "750l", "750l_v3.2" }, + { "lonestar", "750l_v3.2" }, + { "745", "745_v2.8" }, + { "755", "755_v2.8" }, + { "goldfinger", "755_v2.8" }, + { "7400", "7400_v2.9" }, + { "max", "7400_v2.9" }, + { "g4", "7400_v2.9" }, + { "7410", "7410_v1.4" }, + { "nitro", "7410_v1.4" }, + { "7448", "7448_v2.1" }, + { "7450", "7450_v2.1" }, + { "vger", "7450_v2.1" }, + { "7441", "7441_v2.3" }, + { "7451", "7451_v2.3" }, + { "7445", "7445_v3.2" }, + { "7455", "7455_v3.2" }, + { "apollo6", "7455_v3.2" }, + { "7447", "7447_v1.1" }, + { "7457", "7457_v1.2" }, + { "apollo7", "7457_v1.2" }, + { "7447a", "7447a_v1.2" }, + { "7457a", "7457a_v1.2" }, + { "apollo7pm", "7457a_v1.0" }, +#if defined(TARGET_PPC64) + { "970", "970_v2.2" }, + { "970fx", "970fx_v3.1" }, + { "970mp", "970mp_v1.1" }, + { "power5+", "power5+_v2.1" }, + { "power5gs", "power5+_v2.1" }, + { "power7", "power7_v2.3" }, + { "power7+", "power7+_v2.1" }, + { "power8e", "power8e_v2.1" }, + { "power8", "power8_v2.0" }, + { "power8nvl", "power8nvl_v1.0" }, + { "power9", "power9_v2.0" }, + { "power10", "power10_v1.0" }, +#endif + + /* Generic PowerPCs */ +#if defined(TARGET_PPC64) + { "ppc64", "970fx_v3.1" }, +#endif + { "ppc32", "604" }, + { "ppc", "604" }, + { "default", "604" }, + { NULL, NULL } +}; +#endif diff --git a/qemu/target/ppc/cpu-models.h b/qemu/target/ppc/cpu-models.h new file mode 100644 index 00000000..ce750b2d --- /dev/null +++ b/qemu/target/ppc/cpu-models.h @@ -0,0 +1,504 @@ +/* + * PowerPC CPU initialization for qemu. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2013 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef TARGET_PPC_CPU_MODELS_H +#define TARGET_PPC_CPU_MODELS_H + +/** + * PowerPCCPUAlias: + * @alias: The alias name. + * @model: The CPU model @alias refers to, that directly resolves into CPU type + * + * A mapping entry from CPU @alias to CPU @model. + */ +typedef struct PowerPCCPUAlias { + const char *alias; + const char *model; +} PowerPCCPUAlias; + +extern PowerPCCPUAlias ppc_cpu_aliases[]; + +/*****************************************************************************/ +/* PVR definitions for most known PowerPC */ +enum { + /* PowerPC 401 family */ + /* Generic PowerPC 401 */ +#define CPU_POWERPC_401 CPU_POWERPC_401G2 + /* PowerPC 401 cores */ + CPU_POWERPC_401A1 = 0x00210000, + CPU_POWERPC_401B2 = 0x00220000, + CPU_POWERPC_401C2 = 0x00230000, + CPU_POWERPC_401D2 = 0x00240000, + CPU_POWERPC_401E2 = 0x00250000, + CPU_POWERPC_401F2 = 0x00260000, + CPU_POWERPC_401G2 = 0x00270000, + /* PowerPC 401 microcontrolers */ +#define CPU_POWERPC_IOP480 CPU_POWERPC_401B2 + /* IBM Processor for Network Resources */ + CPU_POWERPC_COBRA = 0x10100000, /* XXX: 405 ? */ + /* PowerPC 403 family */ + /* PowerPC 403 microcontrollers */ + CPU_POWERPC_403GA = 0x00200011, + CPU_POWERPC_403GB = 0x00200100, + CPU_POWERPC_403GC = 0x00200200, + CPU_POWERPC_403GCX = 0x00201400, + /* PowerPC 405 family */ + /* PowerPC 405 cores */ + CPU_POWERPC_405D2 = 0x20010000, + CPU_POWERPC_405D4 = 0x41810000, + /* PowerPC 405 microcontrolers */ + /* XXX: missing 0x200108a0 */ + CPU_POWERPC_405CRa = 0x40110041, + CPU_POWERPC_405CRb = 0x401100C5, + CPU_POWERPC_405CRc = 0x40110145, + CPU_POWERPC_405EP = 0x51210950, + CPU_POWERPC_405EZ = 0x41511460, /* 0x51210950 ? */ + CPU_POWERPC_405GPa = 0x40110000, + CPU_POWERPC_405GPb = 0x40110040, + CPU_POWERPC_405GPc = 0x40110082, + CPU_POWERPC_405GPd = 0x401100C4, + CPU_POWERPC_405GPR = 0x50910951, + CPU_POWERPC_405LP = 0x41F10000, + /* IBM network processors */ + CPU_POWERPC_NPE405H = 0x414100C0, + CPU_POWERPC_NPE405H2 = 0x41410140, + CPU_POWERPC_NPE405L = 0x416100C0, + CPU_POWERPC_NPE4GS3 = 0x40B10000, + /* IBM STBxxx (PowerPC 401/403/405 core based microcontrollers) */ + CPU_POWERPC_STB03 = 0x40310000, /* 0x40130000 ? */ + CPU_POWERPC_STB04 = 0x41810000, + CPU_POWERPC_STB25 = 0x51510950, + /* Xilinx cores */ + CPU_POWERPC_X2VP4 = 0x20010820, + CPU_POWERPC_X2VP20 = 0x20010860, + /* PowerPC 440 family */ + /* Generic PowerPC 440 */ +#define CPU_POWERPC_440 CPU_POWERPC_440GXf + /* PowerPC 440 cores */ + CPU_POWERPC_440_XILINX = 0x7ff21910, + /* PowerPC 440 microcontrolers */ + CPU_POWERPC_440EPa = 0x42221850, + CPU_POWERPC_440EPb = 0x422218D3, + CPU_POWERPC_440GPb = 0x40120440, + CPU_POWERPC_440GPc = 0x40120481, +#define CPU_POWERPC_440GRa CPU_POWERPC_440EPb + CPU_POWERPC_440GRX = 0x200008D0, +#define CPU_POWERPC_440EPX CPU_POWERPC_440GRX + CPU_POWERPC_440GXa = 0x51B21850, + CPU_POWERPC_440GXb = 0x51B21851, + CPU_POWERPC_440GXc = 0x51B21892, + CPU_POWERPC_440GXf = 0x51B21894, + CPU_POWERPC_440SP = 0x53221850, + CPU_POWERPC_440SP2 = 0x53221891, + CPU_POWERPC_440SPE = 0x53421890, + CPU_POWERPC_460EXb = 0x130218A4, /* called 460 but 440 core */ + /* Freescale embedded PowerPC cores */ + /* PowerPC MPC 5xx cores (aka RCPU) */ + CPU_POWERPC_MPC5xx = 0x00020020, + /* PowerPC MPC 8xx cores (aka PowerQUICC) */ + CPU_POWERPC_MPC8xx = 0x00500000, + /* G2 cores (aka PowerQUICC-II) */ + CPU_POWERPC_G2 = 0x00810011, + CPU_POWERPC_G2H4 = 0x80811010, + CPU_POWERPC_G2gp = 0x80821010, + CPU_POWERPC_G2ls = 0x90810010, + CPU_POWERPC_MPC603 = 0x00810100, + CPU_POWERPC_G2_HIP3 = 0x00810101, + CPU_POWERPC_G2_HIP4 = 0x80811014, + /* G2_LE core (aka PowerQUICC-II) */ + CPU_POWERPC_G2LE = 0x80820010, + CPU_POWERPC_G2LEgp = 0x80822010, + CPU_POWERPC_G2LEls = 0xA0822010, + CPU_POWERPC_G2LEgp1 = 0x80822011, + CPU_POWERPC_G2LEgp3 = 0x80822013, + /* MPC52xx microcontrollers */ + /* XXX: MPC 5121 ? */ +#define CPU_POWERPC_MPC5200_v10 CPU_POWERPC_G2LEgp1 +#define CPU_POWERPC_MPC5200_v11 CPU_POWERPC_G2LEgp1 +#define CPU_POWERPC_MPC5200_v12 CPU_POWERPC_G2LEgp1 +#define CPU_POWERPC_MPC5200B_v20 CPU_POWERPC_G2LEgp1 +#define CPU_POWERPC_MPC5200B_v21 CPU_POWERPC_G2LEgp1 + /* e200 family */ + /* e200 cores */ + CPU_POWERPC_e200z5 = 0x81000000, + CPU_POWERPC_e200z6 = 0x81120000, + /* e300 family */ + /* e300 cores */ + CPU_POWERPC_e300c1 = 0x00830010, + CPU_POWERPC_e300c2 = 0x00840010, + CPU_POWERPC_e300c3 = 0x00850010, + CPU_POWERPC_e300c4 = 0x00860010, + /* MPC83xx microcontrollers */ +#define CPU_POWERPC_MPC834x CPU_POWERPC_e300c1 +#define CPU_POWERPC_MPC837x CPU_POWERPC_e300c4 + /* e500 family */ + /* e500 cores */ +#define CPU_POWERPC_e500 CPU_POWERPC_e500v2_v22 + CPU_POWERPC_e500v1_v10 = 0x80200010, + CPU_POWERPC_e500v1_v20 = 0x80200020, + CPU_POWERPC_e500v2_v10 = 0x80210010, + CPU_POWERPC_e500v2_v11 = 0x80210011, + CPU_POWERPC_e500v2_v20 = 0x80210020, + CPU_POWERPC_e500v2_v21 = 0x80210021, + CPU_POWERPC_e500v2_v22 = 0x80210022, + CPU_POWERPC_e500v2_v30 = 0x80210030, + CPU_POWERPC_e500mc = 0x80230020, + CPU_POWERPC_e5500 = 0x80240020, + CPU_POWERPC_e6500 = 0x80400020, + /* MPC85xx microcontrollers */ +#define CPU_POWERPC_MPC8533_v10 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8533_v11 CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8533E_v10 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8533E_v11 CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8540_v10 CPU_POWERPC_e500v1_v10 +#define CPU_POWERPC_MPC8540_v20 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8540_v21 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8541_v10 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8541_v11 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8541E_v10 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8541E_v11 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8543_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8543_v11 CPU_POWERPC_e500v2_v11 +#define CPU_POWERPC_MPC8543_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8543_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8543E_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8543E_v11 CPU_POWERPC_e500v2_v11 +#define CPU_POWERPC_MPC8543E_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8543E_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8544_v10 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8544_v11 CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8544E_v11 CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8544E_v10 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8545_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8545_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8545_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8545E_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8545E_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8545E_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8547E_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8547E_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8547E_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8548_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8548_v11 CPU_POWERPC_e500v2_v11 +#define CPU_POWERPC_MPC8548_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8548_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8548E_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11 +#define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11 +#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11 +#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10 +#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20 +#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8568E CPU_POWERPC_e500v2_v22 +#define CPU_POWERPC_MPC8572 CPU_POWERPC_e500v2_v30 +#define CPU_POWERPC_MPC8572E CPU_POWERPC_e500v2_v30 + /* e600 family */ + /* e600 cores */ + CPU_POWERPC_e600 = 0x80040010, + /* MPC86xx microcontrollers */ +#define CPU_POWERPC_MPC8610 CPU_POWERPC_e600 +#define CPU_POWERPC_MPC8641 CPU_POWERPC_e600 +#define CPU_POWERPC_MPC8641D CPU_POWERPC_e600 + /* PowerPC 6xx cores */ + CPU_POWERPC_601_v0 = 0x00010001, + CPU_POWERPC_601_v1 = 0x00010001, + CPU_POWERPC_601_v2 = 0x00010002, + CPU_POWERPC_602 = 0x00050100, + CPU_POWERPC_603 = 0x00030100, + CPU_POWERPC_603E_v11 = 0x00060101, + CPU_POWERPC_603E_v12 = 0x00060102, + CPU_POWERPC_603E_v13 = 0x00060103, + CPU_POWERPC_603E_v14 = 0x00060104, + CPU_POWERPC_603E_v22 = 0x00060202, + CPU_POWERPC_603E_v3 = 0x00060300, + CPU_POWERPC_603E_v4 = 0x00060400, + CPU_POWERPC_603E_v41 = 0x00060401, + CPU_POWERPC_603E7t = 0x00071201, + CPU_POWERPC_603E7v = 0x00070100, + CPU_POWERPC_603E7v1 = 0x00070101, + CPU_POWERPC_603E7v2 = 0x00070201, + CPU_POWERPC_603E7 = 0x00070200, + CPU_POWERPC_603P = 0x00070000, + /* XXX: missing 0x00040303 (604) */ + CPU_POWERPC_604 = 0x00040103, + /* XXX: missing 0x00091203 */ + /* XXX: missing 0x00092110 */ + /* XXX: missing 0x00092120 */ + CPU_POWERPC_604E_v10 = 0x00090100, + CPU_POWERPC_604E_v22 = 0x00090202, + CPU_POWERPC_604E_v24 = 0x00090204, + /* XXX: missing 0x000a0100 */ + /* XXX: missing 0x00093102 */ + CPU_POWERPC_604R = 0x000a0101, + /* PowerPC 740/750 cores (aka G3) */ + /* XXX: missing 0x00084202 */ + CPU_POWERPC_7x0_v10 = 0x00080100, + CPU_POWERPC_7x0_v20 = 0x00080200, + CPU_POWERPC_7x0_v21 = 0x00080201, + CPU_POWERPC_7x0_v22 = 0x00080202, + CPU_POWERPC_7x0_v30 = 0x00080300, + CPU_POWERPC_7x0_v31 = 0x00080301, + CPU_POWERPC_740E = 0x00080100, + CPU_POWERPC_750E = 0x00080200, + CPU_POWERPC_7x0P = 0x10080000, + /* XXX: missing 0x00087010 (CL ?) */ + CPU_POWERPC_750CL_v10 = 0x00087200, + CPU_POWERPC_750CL_v20 = 0x00087210, /* aka rev E */ + CPU_POWERPC_750CX_v10 = 0x00082100, + CPU_POWERPC_750CX_v20 = 0x00082200, + CPU_POWERPC_750CX_v21 = 0x00082201, + CPU_POWERPC_750CX_v22 = 0x00082202, + CPU_POWERPC_750CXE_v21 = 0x00082211, + CPU_POWERPC_750CXE_v22 = 0x00082212, + CPU_POWERPC_750CXE_v23 = 0x00082213, + CPU_POWERPC_750CXE_v24 = 0x00082214, + CPU_POWERPC_750CXE_v24b = 0x00083214, + CPU_POWERPC_750CXE_v30 = 0x00082310, + CPU_POWERPC_750CXE_v31 = 0x00082311, + CPU_POWERPC_750CXE_v31b = 0x00083311, + CPU_POWERPC_750CXR = 0x00083410, + CPU_POWERPC_750FL = 0x70000203, + CPU_POWERPC_750FX_v10 = 0x70000100, + CPU_POWERPC_750FX_v20 = 0x70000200, + CPU_POWERPC_750FX_v21 = 0x70000201, + CPU_POWERPC_750FX_v22 = 0x70000202, + CPU_POWERPC_750FX_v23 = 0x70000203, + CPU_POWERPC_750GL = 0x70020102, + CPU_POWERPC_750GX_v10 = 0x70020100, + CPU_POWERPC_750GX_v11 = 0x70020101, + CPU_POWERPC_750GX_v12 = 0x70020102, + CPU_POWERPC_750L_v20 = 0x00088200, + CPU_POWERPC_750L_v21 = 0x00088201, + CPU_POWERPC_750L_v22 = 0x00088202, + CPU_POWERPC_750L_v30 = 0x00088300, + CPU_POWERPC_750L_v32 = 0x00088302, + /* PowerPC 745/755 cores */ + CPU_POWERPC_7x5_v10 = 0x00083100, + CPU_POWERPC_7x5_v11 = 0x00083101, + CPU_POWERPC_7x5_v20 = 0x00083200, + CPU_POWERPC_7x5_v21 = 0x00083201, + CPU_POWERPC_7x5_v22 = 0x00083202, /* aka D */ + CPU_POWERPC_7x5_v23 = 0x00083203, /* aka E */ + CPU_POWERPC_7x5_v24 = 0x00083204, + CPU_POWERPC_7x5_v25 = 0x00083205, + CPU_POWERPC_7x5_v26 = 0x00083206, + CPU_POWERPC_7x5_v27 = 0x00083207, + CPU_POWERPC_7x5_v28 = 0x00083208, + /* PowerPC 74xx cores (aka G4) */ + /* XXX: missing 0x000C1101 */ + CPU_POWERPC_7400_v10 = 0x000C0100, + CPU_POWERPC_7400_v11 = 0x000C0101, + CPU_POWERPC_7400_v20 = 0x000C0200, + CPU_POWERPC_7400_v21 = 0x000C0201, + CPU_POWERPC_7400_v22 = 0x000C0202, + CPU_POWERPC_7400_v26 = 0x000C0206, + CPU_POWERPC_7400_v27 = 0x000C0207, + CPU_POWERPC_7400_v28 = 0x000C0208, + CPU_POWERPC_7400_v29 = 0x000C0209, + CPU_POWERPC_7410_v10 = 0x800C1100, + CPU_POWERPC_7410_v11 = 0x800C1101, + CPU_POWERPC_7410_v12 = 0x800C1102, /* aka C */ + CPU_POWERPC_7410_v13 = 0x800C1103, /* aka D */ + CPU_POWERPC_7410_v14 = 0x800C1104, /* aka E */ + CPU_POWERPC_7448_v10 = 0x80040100, + CPU_POWERPC_7448_v11 = 0x80040101, + CPU_POWERPC_7448_v20 = 0x80040200, + CPU_POWERPC_7448_v21 = 0x80040201, + CPU_POWERPC_7450_v10 = 0x80000100, + CPU_POWERPC_7450_v11 = 0x80000101, + CPU_POWERPC_7450_v12 = 0x80000102, + CPU_POWERPC_7450_v20 = 0x80000200, /* aka A, B, C, D: 2.04 */ + CPU_POWERPC_7450_v21 = 0x80000201, /* aka E */ + CPU_POWERPC_74x1_v23 = 0x80000203, /* aka G: 2.3 */ + /* XXX: this entry might be a bug in some documentation */ + CPU_POWERPC_74x1_v210 = 0x80000210, /* aka G: 2.3 ? */ + CPU_POWERPC_74x5_v10 = 0x80010100, + /* XXX: missing 0x80010200 */ + CPU_POWERPC_74x5_v21 = 0x80010201, /* aka C: 2.1 */ + CPU_POWERPC_74x5_v32 = 0x80010302, + CPU_POWERPC_74x5_v33 = 0x80010303, /* aka F: 3.3 */ + CPU_POWERPC_74x5_v34 = 0x80010304, /* aka G: 3.4 */ + CPU_POWERPC_74x7_v10 = 0x80020100, /* aka A: 1.0 */ + CPU_POWERPC_74x7_v11 = 0x80020101, /* aka B: 1.1 */ + CPU_POWERPC_74x7_v12 = 0x80020102, /* aka C: 1.2 */ + CPU_POWERPC_74x7A_v10 = 0x80030100, /* aka A: 1.0 */ + CPU_POWERPC_74x7A_v11 = 0x80030101, /* aka B: 1.1 */ + CPU_POWERPC_74x7A_v12 = 0x80030102, /* aka C: 1.2 */ + /* 64 bits PowerPC */ +#if defined(TARGET_PPC64) + CPU_POWERPC_620 = 0x00140000, + CPU_POWERPC_630 = 0x00400000, + CPU_POWERPC_631 = 0x00410104, + CPU_POWERPC_POWER4 = 0x00350000, + CPU_POWERPC_POWER4P = 0x00380000, + /* XXX: missing 0x003A0201 */ + CPU_POWERPC_POWER5 = 0x003A0203, + CPU_POWERPC_POWER5P_v21 = 0x003B0201, + CPU_POWERPC_POWER6 = 0x003E0000, + CPU_POWERPC_POWER_SERVER_MASK = 0xFFFF0000, + CPU_POWERPC_POWER7_BASE = 0x003F0000, + CPU_POWERPC_POWER7_v23 = 0x003F0203, + CPU_POWERPC_POWER7P_BASE = 0x004A0000, + CPU_POWERPC_POWER7P_v21 = 0x004A0201, + CPU_POWERPC_POWER8E_BASE = 0x004B0000, + CPU_POWERPC_POWER8E_v21 = 0x004B0201, + CPU_POWERPC_POWER8_BASE = 0x004D0000, + CPU_POWERPC_POWER8_v20 = 0x004D0200, + CPU_POWERPC_POWER8NVL_BASE = 0x004C0000, + CPU_POWERPC_POWER8NVL_v10 = 0x004C0100, + CPU_POWERPC_POWER9_BASE = 0x004E0000, + CPU_POWERPC_POWER9_DD1 = 0x004E0100, + CPU_POWERPC_POWER9_DD20 = 0x004E1200, + CPU_POWERPC_POWER10_BASE = 0x00800000, + CPU_POWERPC_POWER10_DD1 = 0x00800100, + CPU_POWERPC_970_v22 = 0x00390202, + CPU_POWERPC_970FX_v10 = 0x00391100, + CPU_POWERPC_970FX_v20 = 0x003C0200, + CPU_POWERPC_970FX_v21 = 0x003C0201, + CPU_POWERPC_970FX_v30 = 0x003C0300, + CPU_POWERPC_970FX_v31 = 0x003C0301, + CPU_POWERPC_970MP_v10 = 0x00440100, + CPU_POWERPC_970MP_v11 = 0x00440101, +#define CPU_POWERPC_CELL CPU_POWERPC_CELL_v32 + CPU_POWERPC_CELL_v10 = 0x00700100, + CPU_POWERPC_CELL_v20 = 0x00700400, + CPU_POWERPC_CELL_v30 = 0x00700500, + CPU_POWERPC_CELL_v31 = 0x00700501, +#define CPU_POWERPC_CELL_v32 CPU_POWERPC_CELL_v31 + CPU_POWERPC_RS64 = 0x00330000, + CPU_POWERPC_RS64II = 0x00340000, + CPU_POWERPC_RS64III = 0x00360000, + CPU_POWERPC_RS64IV = 0x00370000, +#endif /* defined(TARGET_PPC64) */ + /* Original POWER */ + /* + * XXX: should be POWER (RIOS), RSC3308, RSC4608, + * POWER2 (RIOS2) & RSC2 (P2SC) here + */ + /* PA Semi core */ + CPU_POWERPC_PA6T = 0x00900000, +}; + +/* Logical PVR definitions for sPAPR */ +enum { + CPU_POWERPC_LOGICAL_2_04 = 0x0F000001, + CPU_POWERPC_LOGICAL_2_05 = 0x0F000002, + CPU_POWERPC_LOGICAL_2_06 = 0x0F000003, + CPU_POWERPC_LOGICAL_2_06_PLUS = 0x0F100003, + CPU_POWERPC_LOGICAL_2_07 = 0x0F000004, + CPU_POWERPC_LOGICAL_3_00 = 0x0F000005, + CPU_POWERPC_LOGICAL_3_10 = 0x0F000006, +}; + +/* System version register (used on MPC 8xxx) */ +enum { + POWERPC_SVR_NONE = 0x00000000, + POWERPC_SVR_5200_v10 = 0x80110010, + POWERPC_SVR_5200_v11 = 0x80110011, + POWERPC_SVR_5200_v12 = 0x80110012, + POWERPC_SVR_5200B_v20 = 0x80110020, + POWERPC_SVR_5200B_v21 = 0x80110021, +#define POWERPC_SVR_55xx POWERPC_SVR_5567 + POWERPC_SVR_8343 = 0x80570010, + POWERPC_SVR_8343A = 0x80570030, + POWERPC_SVR_8343E = 0x80560010, + POWERPC_SVR_8343EA = 0x80560030, + POWERPC_SVR_8347P = 0x80550010, /* PBGA package */ + POWERPC_SVR_8347T = 0x80530010, /* TBGA package */ + POWERPC_SVR_8347AP = 0x80550030, /* PBGA package */ + POWERPC_SVR_8347AT = 0x80530030, /* TBGA package */ + POWERPC_SVR_8347EP = 0x80540010, /* PBGA package */ + POWERPC_SVR_8347ET = 0x80520010, /* TBGA package */ + POWERPC_SVR_8347EAP = 0x80540030, /* PBGA package */ + POWERPC_SVR_8347EAT = 0x80520030, /* TBGA package */ + POWERPC_SVR_8349 = 0x80510010, + POWERPC_SVR_8349A = 0x80510030, + POWERPC_SVR_8349E = 0x80500010, + POWERPC_SVR_8349EA = 0x80500030, +#define POWERPC_SVR_E500 0x40000000 + POWERPC_SVR_8377 = 0x80C70010 | POWERPC_SVR_E500, + POWERPC_SVR_8377E = 0x80C60010 | POWERPC_SVR_E500, + POWERPC_SVR_8378 = 0x80C50010 | POWERPC_SVR_E500, + POWERPC_SVR_8378E = 0x80C40010 | POWERPC_SVR_E500, + POWERPC_SVR_8379 = 0x80C30010 | POWERPC_SVR_E500, + POWERPC_SVR_8379E = 0x80C00010 | POWERPC_SVR_E500, + POWERPC_SVR_8533_v10 = 0x80340010 | POWERPC_SVR_E500, + POWERPC_SVR_8533_v11 = 0x80340011 | POWERPC_SVR_E500, + POWERPC_SVR_8533E_v10 = 0x803C0010 | POWERPC_SVR_E500, + POWERPC_SVR_8533E_v11 = 0x803C0011 | POWERPC_SVR_E500, + POWERPC_SVR_8540_v10 = 0x80300010 | POWERPC_SVR_E500, + POWERPC_SVR_8540_v20 = 0x80300020 | POWERPC_SVR_E500, + POWERPC_SVR_8540_v21 = 0x80300021 | POWERPC_SVR_E500, + POWERPC_SVR_8541_v10 = 0x80720010 | POWERPC_SVR_E500, + POWERPC_SVR_8541_v11 = 0x80720011 | POWERPC_SVR_E500, + POWERPC_SVR_8541E_v10 = 0x807A0010 | POWERPC_SVR_E500, + POWERPC_SVR_8541E_v11 = 0x807A0011 | POWERPC_SVR_E500, + POWERPC_SVR_8543_v10 = 0x80320010 | POWERPC_SVR_E500, + POWERPC_SVR_8543_v11 = 0x80320011 | POWERPC_SVR_E500, + POWERPC_SVR_8543_v20 = 0x80320020 | POWERPC_SVR_E500, + POWERPC_SVR_8543_v21 = 0x80320021 | POWERPC_SVR_E500, + POWERPC_SVR_8543E_v10 = 0x803A0010 | POWERPC_SVR_E500, + POWERPC_SVR_8543E_v11 = 0x803A0011 | POWERPC_SVR_E500, + POWERPC_SVR_8543E_v20 = 0x803A0020 | POWERPC_SVR_E500, + POWERPC_SVR_8543E_v21 = 0x803A0021 | POWERPC_SVR_E500, + POWERPC_SVR_8544_v10 = 0x80340110 | POWERPC_SVR_E500, + POWERPC_SVR_8544_v11 = 0x80340111 | POWERPC_SVR_E500, + POWERPC_SVR_8544E_v10 = 0x803C0110 | POWERPC_SVR_E500, + POWERPC_SVR_8544E_v11 = 0x803C0111 | POWERPC_SVR_E500, + POWERPC_SVR_8545_v20 = 0x80310220 | POWERPC_SVR_E500, + POWERPC_SVR_8545_v21 = 0x80310221 | POWERPC_SVR_E500, + POWERPC_SVR_8545E_v20 = 0x80390220 | POWERPC_SVR_E500, + POWERPC_SVR_8545E_v21 = 0x80390221 | POWERPC_SVR_E500, + POWERPC_SVR_8547E_v20 = 0x80390120 | POWERPC_SVR_E500, + POWERPC_SVR_8547E_v21 = 0x80390121 | POWERPC_SVR_E500, + POWERPC_SVR_8548_v10 = 0x80310010 | POWERPC_SVR_E500, + POWERPC_SVR_8548_v11 = 0x80310011 | POWERPC_SVR_E500, + POWERPC_SVR_8548_v20 = 0x80310020 | POWERPC_SVR_E500, + POWERPC_SVR_8548_v21 = 0x80310021 | POWERPC_SVR_E500, + POWERPC_SVR_8548E_v10 = 0x80390010 | POWERPC_SVR_E500, + POWERPC_SVR_8548E_v11 = 0x80390011 | POWERPC_SVR_E500, + POWERPC_SVR_8548E_v20 = 0x80390020 | POWERPC_SVR_E500, + POWERPC_SVR_8548E_v21 = 0x80390021 | POWERPC_SVR_E500, + POWERPC_SVR_8555_v10 = 0x80710010 | POWERPC_SVR_E500, + POWERPC_SVR_8555_v11 = 0x80710011 | POWERPC_SVR_E500, + POWERPC_SVR_8555E_v10 = 0x80790010 | POWERPC_SVR_E500, + POWERPC_SVR_8555E_v11 = 0x80790011 | POWERPC_SVR_E500, + POWERPC_SVR_8560_v10 = 0x80700010 | POWERPC_SVR_E500, + POWERPC_SVR_8560_v20 = 0x80700020 | POWERPC_SVR_E500, + POWERPC_SVR_8560_v21 = 0x80700021 | POWERPC_SVR_E500, + POWERPC_SVR_8567 = 0x80750111 | POWERPC_SVR_E500, + POWERPC_SVR_8567E = 0x807D0111 | POWERPC_SVR_E500, + POWERPC_SVR_8568 = 0x80750011 | POWERPC_SVR_E500, + POWERPC_SVR_8568E = 0x807D0011 | POWERPC_SVR_E500, + POWERPC_SVR_8572 = 0x80E00010 | POWERPC_SVR_E500, + POWERPC_SVR_8572E = 0x80E80010 | POWERPC_SVR_E500, + POWERPC_SVR_8610 = 0x80A00011, + POWERPC_SVR_8641 = 0x80900021, + POWERPC_SVR_8641D = 0x80900121, +}; + +#endif diff --git a/qemu/target/ppc/cpu-param.h b/qemu/target/ppc/cpu-param.h new file mode 100644 index 00000000..37b458d3 --- /dev/null +++ b/qemu/target/ppc/cpu-param.h @@ -0,0 +1,37 @@ +/* + * PowerPC cpu parameters for qemu. + * + * Copyright (c) 2007 Jocelyn Mayer + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef PPC_CPU_PARAM_H +#define PPC_CPU_PARAM_H 1 + +#ifdef TARGET_PPC64 +# define TARGET_LONG_BITS 64 +/* + * Note that the official physical address space bits is 62-M where M + * is implementation dependent. I've not looked up M for the set of + * cpus we emulate at the system level. + */ +#define TARGET_PHYS_ADDR_SPACE_BITS 62 +/* + * Note that the PPC environment architecture talks about 80 bit virtual + * addresses, with segmentation. Obviously that's not all visible to a + * single process, which is all we're concerned with here. + */ +# ifdef TARGET_ABI32 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +# else +# define TARGET_VIRT_ADDR_SPACE_BITS 64 +# endif +#else +# define TARGET_LONG_BITS 32 +# define TARGET_PHYS_ADDR_SPACE_BITS 36 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +#endif +#define TARGET_PAGE_BITS 12 +#define NB_MMU_MODES 10 + +#endif diff --git a/qemu/target/ppc/cpu-qom.h b/qemu/target/ppc/cpu-qom.h new file mode 100644 index 00000000..57da526e --- /dev/null +++ b/qemu/target/ppc/cpu-qom.h @@ -0,0 +1,213 @@ +/* + * QEMU PowerPC CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_PPC_CPU_QOM_H +#define QEMU_PPC_CPU_QOM_H + +#include "hw/core/cpu.h" + +#ifdef TARGET_PPC64 +#define TYPE_POWERPC_CPU "powerpc64-cpu" +#else +#define TYPE_POWERPC_CPU "powerpc-cpu" +#endif + +#define POWERPC_CPU(obj) ((PowerPCCPU *)obj) +#define POWERPC_CPU_CLASS(klass) ((PowerPCCPUClass *)klass) +#define POWERPC_CPU_GET_CLASS(obj) (&((PowerPCCPU *)obj)->cc) + +typedef struct PowerPCCPU PowerPCCPU; +typedef struct CPUPPCState CPUPPCState; +typedef struct ppc_tb_t ppc_tb_t; +typedef struct ppc_dcr_t ppc_dcr_t; + +/*****************************************************************************/ +/* MMU model */ +typedef enum powerpc_mmu_t powerpc_mmu_t; +enum powerpc_mmu_t { + POWERPC_MMU_UNKNOWN = 0x00000000, + /* Standard 32 bits PowerPC MMU */ + POWERPC_MMU_32B = 0x00000001, + /* PowerPC 6xx MMU with software TLB */ + POWERPC_MMU_SOFT_6xx = 0x00000002, + /* PowerPC 74xx MMU with software TLB */ + POWERPC_MMU_SOFT_74xx = 0x00000003, + /* PowerPC 4xx MMU with software TLB */ + POWERPC_MMU_SOFT_4xx = 0x00000004, + /* PowerPC 4xx MMU with software TLB and zones protections */ + POWERPC_MMU_SOFT_4xx_Z = 0x00000005, + /* PowerPC MMU in real mode only */ + POWERPC_MMU_REAL = 0x00000006, + /* Freescale MPC8xx MMU model */ + POWERPC_MMU_MPC8xx = 0x00000007, + /* BookE MMU model */ + POWERPC_MMU_BOOKE = 0x00000008, + /* BookE 2.06 MMU model */ + POWERPC_MMU_BOOKE206 = 0x00000009, + /* PowerPC 601 MMU model (specific BATs format) */ + POWERPC_MMU_601 = 0x0000000A, +#define POWERPC_MMU_64 0x00010000 + /* 64 bits PowerPC MMU */ + POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, + /* Architecture 2.03 and later (has LPCR) */ + POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002, + /* Architecture 2.06 variant */ + POWERPC_MMU_2_06 = POWERPC_MMU_64 | 0x00000003, + /* Architecture 2.07 variant */ + POWERPC_MMU_2_07 = POWERPC_MMU_64 | 0x00000004, + /* Architecture 3.00 variant */ + POWERPC_MMU_3_00 = POWERPC_MMU_64 | 0x00000005, +}; + +/*****************************************************************************/ +/* Exception model */ +typedef enum powerpc_excp_t powerpc_excp_t; +enum powerpc_excp_t { + POWERPC_EXCP_UNKNOWN = 0, + /* Standard PowerPC exception model */ + POWERPC_EXCP_STD, + /* PowerPC 40x exception model */ + POWERPC_EXCP_40x, + /* PowerPC 601 exception model */ + POWERPC_EXCP_601, + /* PowerPC 602 exception model */ + POWERPC_EXCP_602, + /* PowerPC 603 exception model */ + POWERPC_EXCP_603, + /* PowerPC 603e exception model */ + POWERPC_EXCP_603E, + /* PowerPC G2 exception model */ + POWERPC_EXCP_G2, + /* PowerPC 604 exception model */ + POWERPC_EXCP_604, + /* PowerPC 7x0 exception model */ + POWERPC_EXCP_7x0, + /* PowerPC 7x5 exception model */ + POWERPC_EXCP_7x5, + /* PowerPC 74xx exception model */ + POWERPC_EXCP_74xx, + /* BookE exception model */ + POWERPC_EXCP_BOOKE, + /* PowerPC 970 exception model */ + POWERPC_EXCP_970, + /* POWER7 exception model */ + POWERPC_EXCP_POWER7, + /* POWER8 exception model */ + POWERPC_EXCP_POWER8, + /* POWER9 exception model */ + POWERPC_EXCP_POWER9, +}; + +/*****************************************************************************/ +/* PM instructions */ +typedef enum { + PPC_PM_DOZE, + PPC_PM_NAP, + PPC_PM_SLEEP, + PPC_PM_RVWINKLE, + PPC_PM_STOP, +} powerpc_pm_insn_t; + +/*****************************************************************************/ +/* Input pins model */ +typedef enum powerpc_input_t powerpc_input_t; +enum powerpc_input_t { + PPC_FLAGS_INPUT_UNKNOWN = 0, + /* PowerPC 6xx bus */ + PPC_FLAGS_INPUT_6xx, + /* BookE bus */ + PPC_FLAGS_INPUT_BookE, + /* PowerPC 405 bus */ + PPC_FLAGS_INPUT_405, + /* PowerPC 970 bus */ + PPC_FLAGS_INPUT_970, + /* PowerPC POWER7 bus */ + PPC_FLAGS_INPUT_POWER7, + /* PowerPC POWER9 bus */ + PPC_FLAGS_INPUT_POWER9, + /* PowerPC 401 bus */ + PPC_FLAGS_INPUT_401, + /* Freescale RCPU bus */ + PPC_FLAGS_INPUT_RCPU, +}; + +typedef struct PPCHash64Options PPCHash64Options; + +/** + * PowerPCCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A PowerPC CPU model. + */ +typedef struct PowerPCCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + void (*parent_reset)(CPUState *cpu); + + uint32_t pvr; + bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr); + uint64_t pcr_mask; /* Available bits in PCR register */ + uint64_t pcr_supported; /* Bits for supported PowerISA versions */ + uint32_t svr; + uint64_t insns_flags; + uint64_t insns_flags2; + uint64_t msr_mask; + uint64_t lpcr_mask; /* Available bits in the LPCR */ + uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */ + powerpc_mmu_t mmu_model; + powerpc_excp_t excp_model; + powerpc_input_t bus_model; + uint32_t flags; + int bfd_mach; + uint32_t l1_dcache_size, l1_icache_size; + const PPCHash64Options *hash64_opts; + struct ppc_radix_page_info *radix_page_info; + uint32_t lrg_decr_bits; + int n_host_threads; + void (*init_proc)(CPUPPCState *env); + int (*check_pow)(CPUPPCState *env); + int (*handle_mmu_fault)(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx); + bool (*interrupts_big_endian)(PowerPCCPU *cpu); +} PowerPCCPUClass; + +typedef struct PPCTimebase { + uint64_t guest_timebase; + int64_t time_of_the_day_ns; + bool runstate_paused; +} PPCTimebase; + +#if 0 +extern const VMStateDescription vmstate_ppc_timebase; + +#define VMSTATE_PPC_TIMEBASE_V(_field, _state, _version) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .size = sizeof(PPCTimebase), \ + .vmsd = &vmstate_ppc_timebase, \ + .flags = VMS_STRUCT, \ + .offset = vmstate_offset_value(_state, _field, PPCTimebase), \ +} + +void cpu_ppc_clock_vm_state_change(void *opaque, int running, + RunState state); +#endif +#endif diff --git a/qemu/target/ppc/cpu.c b/qemu/target/ppc/cpu.c new file mode 100644 index 00000000..28011668 --- /dev/null +++ b/qemu/target/ppc/cpu.c @@ -0,0 +1,47 @@ +/* + * PowerPC CPU routines for qemu. + * + * Copyright (c) 2017 Nikunj A Dadhania, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "cpu-models.h" + +target_ulong cpu_read_xer(CPUPPCState *env) +{ + if (is_isa300(env)) { + return env->xer | (env->so << XER_SO) | + (env->ov << XER_OV) | (env->ca << XER_CA) | + (env->ov32 << XER_OV32) | (env->ca32 << XER_CA32); + } + + return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) | + (env->ca << XER_CA); +} + +void cpu_write_xer(CPUPPCState *env, target_ulong xer) +{ + env->so = (xer >> XER_SO) & 1; + env->ov = (xer >> XER_OV) & 1; + env->ca = (xer >> XER_CA) & 1; + /* write all the flags, while reading back check of isa300 */ + env->ov32 = (xer >> XER_OV32) & 1; + env->ca32 = (xer >> XER_CA32) & 1; + env->xer = xer & ~((1ul << XER_SO) | + (1ul << XER_OV) | (1ul << XER_CA) | + (1ul << XER_OV32) | (1ul << XER_CA32)); +} diff --git a/qemu/target/ppc/cpu.h b/qemu/target/ppc/cpu.h new file mode 100644 index 00000000..0357a4ca --- /dev/null +++ b/qemu/target/ppc/cpu.h @@ -0,0 +1,2625 @@ +/* + * PowerPC emulation cpu definitions for qemu. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef PPC_CPU_H +#define PPC_CPU_H + +#include "qemu/int128.h" +#include "exec/cpu-defs.h" +#include "cpu-qom.h" + +typedef struct TCGContext TCGContext; + +#define TCG_GUEST_DEFAULT_MO 0 + +#define TARGET_PAGE_BITS_64K 16 +#define TARGET_PAGE_BITS_16M 24 + +#if defined(TARGET_PPC64) +#define PPC_ELF_MACHINE EM_PPC64 +#else +#define PPC_ELF_MACHINE EM_PPC +#endif + +#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) +#define PPC_BIT32(bit) (0x80000000 >> (bit)) +#define PPC_BIT8(bit) (0x80 >> (bit)) +#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) +#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \ + PPC_BIT32(bs)) +#define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs)) + +/*****************************************************************************/ +/* Exception vectors definitions */ +enum { + POWERPC_EXCP_NONE = -1, + /* The 64 first entries are used by the PowerPC embedded specification */ + POWERPC_EXCP_CRITICAL = 0, /* Critical input */ + POWERPC_EXCP_MCHECK = 1, /* Machine check exception */ + POWERPC_EXCP_DSI = 2, /* Data storage exception */ + POWERPC_EXCP_ISI = 3, /* Instruction storage exception */ + POWERPC_EXCP_EXTERNAL = 4, /* External input */ + POWERPC_EXCP_ALIGN = 5, /* Alignment exception */ + POWERPC_EXCP_PROGRAM = 6, /* Program exception */ + POWERPC_EXCP_FPU = 7, /* Floating-point unavailable exception */ + POWERPC_EXCP_SYSCALL = 8, /* System call exception */ + POWERPC_EXCP_APU = 9, /* Auxiliary processor unavailable */ + POWERPC_EXCP_DECR = 10, /* Decrementer exception */ + POWERPC_EXCP_FIT = 11, /* Fixed-interval timer interrupt */ + POWERPC_EXCP_WDT = 12, /* Watchdog timer interrupt */ + POWERPC_EXCP_DTLB = 13, /* Data TLB miss */ + POWERPC_EXCP_ITLB = 14, /* Instruction TLB miss */ + POWERPC_EXCP_DEBUG = 15, /* Debug interrupt */ + /* Vectors 16 to 31 are reserved */ + POWERPC_EXCP_SPEU = 32, /* SPE/embedded floating-point unavailable */ + POWERPC_EXCP_EFPDI = 33, /* Embedded floating-point data interrupt */ + POWERPC_EXCP_EFPRI = 34, /* Embedded floating-point round interrupt */ + POWERPC_EXCP_EPERFM = 35, /* Embedded performance monitor interrupt */ + POWERPC_EXCP_DOORI = 36, /* Embedded doorbell interrupt */ + POWERPC_EXCP_DOORCI = 37, /* Embedded doorbell critical interrupt */ + POWERPC_EXCP_GDOORI = 38, /* Embedded guest doorbell interrupt */ + POWERPC_EXCP_GDOORCI = 39, /* Embedded guest doorbell critical interrupt*/ + POWERPC_EXCP_HYPPRIV = 41, /* Embedded hypervisor priv instruction */ + /* Vectors 42 to 63 are reserved */ + /* Exceptions defined in the PowerPC server specification */ + POWERPC_EXCP_RESET = 64, /* System reset exception */ + POWERPC_EXCP_DSEG = 65, /* Data segment exception */ + POWERPC_EXCP_ISEG = 66, /* Instruction segment exception */ + POWERPC_EXCP_HDECR = 67, /* Hypervisor decrementer exception */ + POWERPC_EXCP_TRACE = 68, /* Trace exception */ + POWERPC_EXCP_HDSI = 69, /* Hypervisor data storage exception */ + POWERPC_EXCP_HISI = 70, /* Hypervisor instruction storage exception */ + POWERPC_EXCP_HDSEG = 71, /* Hypervisor data segment exception */ + POWERPC_EXCP_HISEG = 72, /* Hypervisor instruction segment exception */ + POWERPC_EXCP_VPU = 73, /* Vector unavailable exception */ + /* 40x specific exceptions */ + POWERPC_EXCP_PIT = 74, /* Programmable interval timer interrupt */ + /* 601 specific exceptions */ + POWERPC_EXCP_IO = 75, /* IO error exception */ + POWERPC_EXCP_RUNM = 76, /* Run mode exception */ + /* 602 specific exceptions */ + POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */ + /* 602/603 specific exceptions */ + POWERPC_EXCP_IFTLB = 78, /* Instruction fetch TLB miss */ + POWERPC_EXCP_DLTLB = 79, /* Data load TLB miss */ + POWERPC_EXCP_DSTLB = 80, /* Data store TLB miss */ + /* Exceptions available on most PowerPC */ + POWERPC_EXCP_FPA = 81, /* Floating-point assist exception */ + POWERPC_EXCP_DABR = 82, /* Data address breakpoint */ + POWERPC_EXCP_IABR = 83, /* Instruction address breakpoint */ + POWERPC_EXCP_SMI = 84, /* System management interrupt */ + POWERPC_EXCP_PERFM = 85, /* Embedded performance monitor interrupt */ + /* 7xx/74xx specific exceptions */ + POWERPC_EXCP_THERM = 86, /* Thermal interrupt */ + /* 74xx specific exceptions */ + POWERPC_EXCP_VPUA = 87, /* Vector assist exception */ + /* 970FX specific exceptions */ + POWERPC_EXCP_SOFTP = 88, /* Soft patch exception */ + POWERPC_EXCP_MAINT = 89, /* Maintenance exception */ + /* Freescale embedded cores specific exceptions */ + POWERPC_EXCP_MEXTBR = 90, /* Maskable external breakpoint */ + POWERPC_EXCP_NMEXTBR = 91, /* Non maskable external breakpoint */ + POWERPC_EXCP_ITLBE = 92, /* Instruction TLB error */ + POWERPC_EXCP_DTLBE = 93, /* Data TLB error */ + /* VSX Unavailable (Power ISA 2.06 and later) */ + POWERPC_EXCP_VSXU = 94, /* VSX Unavailable */ + POWERPC_EXCP_FU = 95, /* Facility Unavailable */ + /* Additional ISA 2.06 and later server exceptions */ + POWERPC_EXCP_HV_EMU = 96, /* HV emulation assistance */ + POWERPC_EXCP_HV_MAINT = 97, /* HMI */ + POWERPC_EXCP_HV_FU = 98, /* Hypervisor Facility unavailable */ + /* Server doorbell variants */ + POWERPC_EXCP_SDOOR = 99, + POWERPC_EXCP_SDOOR_HV = 100, + /* ISA 3.00 additions */ + POWERPC_EXCP_HVIRT = 101, + /* EOL */ + POWERPC_EXCP_NB = 102, + /* QEMU exceptions: used internally during code translation */ + POWERPC_EXCP_STOP = 0x200, /* stop translation */ + POWERPC_EXCP_BRANCH = 0x201, /* branch instruction */ + /* QEMU exceptions: special cases we want to stop translation */ + POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */ + POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */ +}; + +/* Exceptions error codes */ +enum { + /* Exception subtypes for POWERPC_EXCP_ALIGN */ + POWERPC_EXCP_ALIGN_FP = 0x01, /* FP alignment exception */ + POWERPC_EXCP_ALIGN_LST = 0x02, /* Unaligned mult/extern load/store */ + POWERPC_EXCP_ALIGN_LE = 0x03, /* Multiple little-endian access */ + POWERPC_EXCP_ALIGN_PROT = 0x04, /* Access cross protection boundary */ + POWERPC_EXCP_ALIGN_BAT = 0x05, /* Access cross a BAT/seg boundary */ + POWERPC_EXCP_ALIGN_CACHE = 0x06, /* Impossible dcbz access */ + /* Exception subtypes for POWERPC_EXCP_PROGRAM */ + /* FP exceptions */ + POWERPC_EXCP_FP = 0x10, + POWERPC_EXCP_FP_OX = 0x01, /* FP overflow */ + POWERPC_EXCP_FP_UX = 0x02, /* FP underflow */ + POWERPC_EXCP_FP_ZX = 0x03, /* FP divide by zero */ + POWERPC_EXCP_FP_XX = 0x04, /* FP inexact */ + POWERPC_EXCP_FP_VXSNAN = 0x05, /* FP invalid SNaN op */ + POWERPC_EXCP_FP_VXISI = 0x06, /* FP invalid infinite subtraction */ + POWERPC_EXCP_FP_VXIDI = 0x07, /* FP invalid infinite divide */ + POWERPC_EXCP_FP_VXZDZ = 0x08, /* FP invalid zero divide */ + POWERPC_EXCP_FP_VXIMZ = 0x09, /* FP invalid infinite * zero */ + POWERPC_EXCP_FP_VXVC = 0x0A, /* FP invalid compare */ + POWERPC_EXCP_FP_VXSOFT = 0x0B, /* FP invalid operation */ + POWERPC_EXCP_FP_VXSQRT = 0x0C, /* FP invalid square root */ + POWERPC_EXCP_FP_VXCVI = 0x0D, /* FP invalid integer conversion */ + /* Invalid instruction */ + POWERPC_EXCP_INVAL = 0x20, + POWERPC_EXCP_INVAL_INVAL = 0x01, /* Invalid instruction */ + POWERPC_EXCP_INVAL_LSWX = 0x02, /* Invalid lswx instruction */ + POWERPC_EXCP_INVAL_SPR = 0x03, /* Invalid SPR access */ + POWERPC_EXCP_INVAL_FP = 0x04, /* Unimplemented mandatory fp instr */ + /* Privileged instruction */ + POWERPC_EXCP_PRIV = 0x30, + POWERPC_EXCP_PRIV_OPC = 0x01, /* Privileged operation exception */ + POWERPC_EXCP_PRIV_REG = 0x02, /* Privileged register exception */ + /* Trap */ + POWERPC_EXCP_TRAP = 0x40, +}; + +#define PPC_INPUT(env) ((env)->bus_model) + +/*****************************************************************************/ +typedef struct opc_handler_t opc_handler_t; + +/*****************************************************************************/ +/* Types used to describe some PowerPC registers etc. */ +typedef struct DisasContext DisasContext; +typedef struct ppc_spr_t ppc_spr_t; +typedef union ppc_tlb_t ppc_tlb_t; +typedef struct ppc_hash_pte64 ppc_hash_pte64_t; + +/* SPR access micro-ops generations callbacks */ +struct ppc_spr_t { + void (*uea_read)(DisasContext *ctx, int gpr_num, int spr_num); + void (*uea_write)(DisasContext *ctx, int spr_num, int gpr_num); + void (*oea_read)(DisasContext *ctx, int gpr_num, int spr_num); + void (*oea_write)(DisasContext *ctx, int spr_num, int gpr_num); + void (*hea_read)(DisasContext *ctx, int gpr_num, int spr_num); + void (*hea_write)(DisasContext *ctx, int spr_num, int gpr_num); + const char *name; + target_ulong default_value; +#ifdef CONFIG_KVM + /* + * We (ab)use the fact that all the SPRs will have ids for the + * ONE_REG interface will have KVM_REG_PPC to use 0 as meaning, + * don't sync this + */ + uint64_t one_reg_id; +#endif +}; + +/* VSX/Altivec registers (128 bits) */ +typedef union _ppc_vsr_t { + uint8_t u8[16]; + uint16_t u16[8]; + uint32_t u32[4]; + uint64_t u64[2]; + int8_t s8[16]; + int16_t s16[8]; + int32_t s32[4]; + int64_t s64[2]; + float32 f32[4]; + float64 f64[2]; + float128 f128; +#ifdef CONFIG_INT128 + __uint128_t u128; +#endif + Int128 s128; +} ppc_vsr_t; + +typedef ppc_vsr_t ppc_avr_t; +typedef ppc_vsr_t ppc_fprp_t; + +/* Software TLB cache */ +typedef struct ppc6xx_tlb_t ppc6xx_tlb_t; +struct ppc6xx_tlb_t { + target_ulong pte0; + target_ulong pte1; + target_ulong EPN; +}; + +typedef struct ppcemb_tlb_t ppcemb_tlb_t; +struct ppcemb_tlb_t { + uint64_t RPN; + target_ulong EPN; + target_ulong PID; + target_ulong size; + uint32_t prot; + uint32_t attr; /* Storage attributes */ +}; + +typedef struct ppcmas_tlb_t { + uint32_t mas8; + uint32_t mas1; + uint64_t mas2; + uint64_t mas7_3; +} ppcmas_tlb_t; + +union ppc_tlb_t { + ppc6xx_tlb_t *tlb6; + ppcemb_tlb_t *tlbe; + ppcmas_tlb_t *tlbm; +}; + +/* possible TLB variants */ +#define TLB_NONE 0 +#define TLB_6XX 1 +#define TLB_EMB 2 +#define TLB_MAS 3 + +typedef struct PPCHash64SegmentPageSizes PPCHash64SegmentPageSizes; + +typedef struct ppc_slb_t ppc_slb_t; +struct ppc_slb_t { + uint64_t esid; + uint64_t vsid; + const PPCHash64SegmentPageSizes *sps; +}; + +#define MAX_SLB_ENTRIES 64 +#define SEGMENT_SHIFT_256M 28 +#define SEGMENT_MASK_256M (~((1ULL << SEGMENT_SHIFT_256M) - 1)) + +#define SEGMENT_SHIFT_1T 40 +#define SEGMENT_MASK_1T (~((1ULL << SEGMENT_SHIFT_1T) - 1)) + +typedef struct ppc_v3_pate_t { + uint64_t dw0; + uint64_t dw1; +} ppc_v3_pate_t; + +/*****************************************************************************/ +/* Machine state register bits definition */ +#define MSR_SF 63 /* Sixty-four-bit mode hflags */ +#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */ +#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */ +#define MSR_HV 60 /* hypervisor state hflags */ +#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */ +#define MSR_TS1 33 +#define MSR_TM 32 /* Transactional Memory Available (Book3s) */ +#define MSR_CM 31 /* Computation mode for BookE hflags */ +#define MSR_ICM 30 /* Interrupt computation mode for BookE */ +#define MSR_GS 28 /* guest state for BookE */ +#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */ +#define MSR_VR 25 /* altivec available x hflags */ +#define MSR_SPE 25 /* SPE enable for BookE x hflags */ +#define MSR_AP 23 /* Access privilege state on 602 hflags */ +#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */ +#define MSR_SA 22 /* Supervisor access mode on 602 hflags */ +#define MSR_KEY 19 /* key bit on 603e */ +#define MSR_POW 18 /* Power management */ +#define MSR_TGPR 17 /* TGPR usage on 602/603 x */ +#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */ +#define MSR_ILE 16 /* Interrupt little-endian mode */ +#define MSR_EE 15 /* External interrupt enable */ +#define MSR_PR 14 /* Problem state hflags */ +#define MSR_FP 13 /* Floating point available hflags */ +#define MSR_ME 12 /* Machine check interrupt enable */ +#define MSR_FE0 11 /* Floating point exception mode 0 hflags */ +#define MSR_SE 10 /* Single-step trace enable x hflags */ +#define MSR_DWE 10 /* Debug wait enable on 405 x */ +#define MSR_UBLE 10 /* User BTB lock enable on e500 x */ +#define MSR_BE 9 /* Branch trace enable x hflags */ +#define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */ +#define MSR_FE1 8 /* Floating point exception mode 1 hflags */ +#define MSR_AL 7 /* AL bit on POWER */ +#define MSR_EP 6 /* Exception prefix on 601 */ +#define MSR_IR 5 /* Instruction relocate */ +#define MSR_DR 4 /* Data relocate */ +#define MSR_IS 5 /* Instruction address space (BookE) */ +#define MSR_DS 4 /* Data address space (BookE) */ +#define MSR_PE 3 /* Protection enable on 403 */ +#define MSR_PX 2 /* Protection exclusive on 403 x */ +#define MSR_PMM 2 /* Performance monitor mark on POWER x */ +#define MSR_RI 1 /* Recoverable interrupt 1 */ +#define MSR_LE 0 /* Little-endian mode 1 hflags */ + +/* LPCR bits */ +#define LPCR_VPM0 PPC_BIT(0) +#define LPCR_VPM1 PPC_BIT(1) +#define LPCR_ISL PPC_BIT(2) +#define LPCR_KBV PPC_BIT(3) +#define LPCR_DPFD_SHIFT (63 - 11) +#define LPCR_DPFD (0x7ull << LPCR_DPFD_SHIFT) +#define LPCR_VRMASD_SHIFT (63 - 16) +#define LPCR_VRMASD (0x1full << LPCR_VRMASD_SHIFT) +/* P9: Power-saving mode Exit Cause Enable (Upper Section) Mask */ +#define LPCR_PECE_U_SHIFT (63 - 19) +#define LPCR_PECE_U_MASK (0x7ull << LPCR_PECE_U_SHIFT) +#define LPCR_HVEE PPC_BIT(17) /* Hypervisor Virt Exit Enable */ +#define LPCR_RMLS_SHIFT (63 - 37) +#define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT) +#define LPCR_ILE PPC_BIT(38) +#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */ +#define LPCR_AIL (3ull << LPCR_AIL_SHIFT) +#define LPCR_UPRT PPC_BIT(41) /* Use Process Table */ +#define LPCR_EVIRT PPC_BIT(42) /* Enhanced Virtualisation */ +#define LPCR_HR PPC_BIT(43) /* Host Radix */ +#define LPCR_ONL PPC_BIT(45) +#define LPCR_LD PPC_BIT(46) /* Large Decrementer */ +#define LPCR_P7_PECE0 PPC_BIT(49) +#define LPCR_P7_PECE1 PPC_BIT(50) +#define LPCR_P7_PECE2 PPC_BIT(51) +#define LPCR_P8_PECE0 PPC_BIT(47) +#define LPCR_P8_PECE1 PPC_BIT(48) +#define LPCR_P8_PECE2 PPC_BIT(49) +#define LPCR_P8_PECE3 PPC_BIT(50) +#define LPCR_P8_PECE4 PPC_BIT(51) +/* P9: Power-saving mode Exit Cause Enable (Lower Section) Mask */ +#define LPCR_PECE_L_SHIFT (63 - 51) +#define LPCR_PECE_L_MASK (0x1full << LPCR_PECE_L_SHIFT) +#define LPCR_PDEE PPC_BIT(47) /* Privileged Doorbell Exit EN */ +#define LPCR_HDEE PPC_BIT(48) /* Hyperv Doorbell Exit Enable */ +#define LPCR_EEE PPC_BIT(49) /* External Exit Enable */ +#define LPCR_DEE PPC_BIT(50) /* Decrementer Exit Enable */ +#define LPCR_OEE PPC_BIT(51) /* Other Exit Enable */ +#define LPCR_MER PPC_BIT(52) +#define LPCR_GTSE PPC_BIT(53) /* Guest Translation Shootdown */ +#define LPCR_TC PPC_BIT(54) +#define LPCR_HEIC PPC_BIT(59) /* HV Extern Interrupt Control */ +#define LPCR_LPES0 PPC_BIT(60) +#define LPCR_LPES1 PPC_BIT(61) +#define LPCR_RMI PPC_BIT(62) +#define LPCR_HVICE PPC_BIT(62) /* HV Virtualisation Int Enable */ +#define LPCR_HDICE PPC_BIT(63) + +/* PSSCR bits */ +#define PSSCR_ESL PPC_BIT(42) /* Enable State Loss */ +#define PSSCR_EC PPC_BIT(43) /* Exit Criterion */ + +/* HFSCR bits */ +#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */ +#define HFSCR_IC_MSGP 0xA + +#define msr_sf ((env->msr >> MSR_SF) & 1) +#define msr_isf ((env->msr >> MSR_ISF) & 1) +#if defined(TARGET_PPC64) +#define msr_hv ((env->msr >> MSR_HV) & 1) +#else +#define msr_hv (0) +#endif +#define msr_cm ((env->msr >> MSR_CM) & 1) +#define msr_icm ((env->msr >> MSR_ICM) & 1) +#define msr_gs ((env->msr >> MSR_GS) & 1) +#define msr_ucle ((env->msr >> MSR_UCLE) & 1) +#define msr_vr ((env->msr >> MSR_VR) & 1) +#define msr_spe ((env->msr >> MSR_SPE) & 1) +#define msr_ap ((env->msr >> MSR_AP) & 1) +#define msr_vsx ((env->msr >> MSR_VSX) & 1) +#define msr_sa ((env->msr >> MSR_SA) & 1) +#define msr_key ((env->msr >> MSR_KEY) & 1) +#define msr_pow ((env->msr >> MSR_POW) & 1) +#define msr_tgpr ((env->msr >> MSR_TGPR) & 1) +#define msr_ce ((env->msr >> MSR_CE) & 1) +#define msr_ile ((env->msr >> MSR_ILE) & 1) +#define msr_ee ((env->msr >> MSR_EE) & 1) +#define msr_pr ((env->msr >> MSR_PR) & 1) +#define msr_fp ((env->msr >> MSR_FP) & 1) +#define msr_me ((env->msr >> MSR_ME) & 1) +#define msr_fe0 ((env->msr >> MSR_FE0) & 1) +#define msr_se ((env->msr >> MSR_SE) & 1) +#define msr_dwe ((env->msr >> MSR_DWE) & 1) +#define msr_uble ((env->msr >> MSR_UBLE) & 1) +#define msr_be ((env->msr >> MSR_BE) & 1) +#define msr_de ((env->msr >> MSR_DE) & 1) +#define msr_fe1 ((env->msr >> MSR_FE1) & 1) +#define msr_al ((env->msr >> MSR_AL) & 1) +#define msr_ep ((env->msr >> MSR_EP) & 1) +#define msr_ir ((env->msr >> MSR_IR) & 1) +#define msr_dr ((env->msr >> MSR_DR) & 1) +#define msr_is ((env->msr >> MSR_IS) & 1) +#define msr_ds ((env->msr >> MSR_DS) & 1) +#define msr_pe ((env->msr >> MSR_PE) & 1) +#define msr_px ((env->msr >> MSR_PX) & 1) +#define msr_pmm ((env->msr >> MSR_PMM) & 1) +#define msr_ri ((env->msr >> MSR_RI) & 1) +#define msr_le ((env->msr >> MSR_LE) & 1) +#define msr_ts ((env->msr >> MSR_TS1) & 3) +#define msr_tm ((env->msr >> MSR_TM) & 1) + +#define DBCR0_ICMP (1 << 27) +#define DBCR0_BRT (1 << 26) +#define DBSR_ICMP (1 << 27) +#define DBSR_BRT (1 << 26) + +/* Hypervisor bit is more specific */ +#if defined(TARGET_PPC64) +#define MSR_HVB (1ULL << MSR_HV) +#else +#define MSR_HVB (0ULL) +#endif + +/* DSISR */ +#define DSISR_NOPTE 0x40000000 +/* Not permitted by access authority of encoded access authority */ +#define DSISR_PROTFAULT 0x08000000 +#define DSISR_ISSTORE 0x02000000 +/* Not permitted by virtual page class key protection */ +#define DSISR_AMR 0x00200000 +/* Unsupported Radix Tree Configuration */ +#define DSISR_R_BADCONFIG 0x00080000 + +/* SRR1 error code fields */ + +#define SRR1_NOPTE DSISR_NOPTE +/* Not permitted due to no-execute or guard bit set */ +#define SRR1_NOEXEC_GUARD 0x10000000 +#define SRR1_PROTFAULT DSISR_PROTFAULT +#define SRR1_IAMR DSISR_AMR + +/* Facility Status and Control (FSCR) bits */ +#define FSCR_EBB (63 - 56) /* Event-Based Branch Facility */ +#define FSCR_TAR (63 - 55) /* Target Address Register */ +/* Interrupt cause mask and position in FSCR. HFSCR has the same format */ +#define FSCR_IC_MASK (0xFFULL) +#define FSCR_IC_POS (63 - 7) +#define FSCR_IC_DSCR_SPR3 2 +#define FSCR_IC_PMU 3 +#define FSCR_IC_BHRB 4 +#define FSCR_IC_TM 5 +#define FSCR_IC_EBB 7 +#define FSCR_IC_TAR 8 + +/* Exception state register bits definition */ +#define ESR_PIL PPC_BIT(36) /* Illegal Instruction */ +#define ESR_PPR PPC_BIT(37) /* Privileged Instruction */ +#define ESR_PTR PPC_BIT(38) /* Trap */ +#define ESR_FP PPC_BIT(39) /* Floating-Point Operation */ +#define ESR_ST PPC_BIT(40) /* Store Operation */ +#define ESR_AP PPC_BIT(44) /* Auxiliary Processor Operation */ +#define ESR_PUO PPC_BIT(45) /* Unimplemented Operation */ +#define ESR_BO PPC_BIT(46) /* Byte Ordering */ +#define ESR_PIE PPC_BIT(47) /* Imprecise exception */ +#define ESR_DATA PPC_BIT(53) /* Data Access (Embedded page table) */ +#define ESR_TLBI PPC_BIT(54) /* TLB Ineligible (Embedded page table) */ +#define ESR_PT PPC_BIT(55) /* Page Table (Embedded page table) */ +#define ESR_SPV PPC_BIT(56) /* SPE/VMX operation */ +#define ESR_EPID PPC_BIT(57) /* External Process ID operation */ +#define ESR_VLEMI PPC_BIT(58) /* VLE operation */ +#define ESR_MIF PPC_BIT(62) /* Misaligned instruction (VLE) */ + +/* Transaction EXception And Summary Register bits */ +#define TEXASR_FAILURE_PERSISTENT (63 - 7) +#define TEXASR_DISALLOWED (63 - 8) +#define TEXASR_NESTING_OVERFLOW (63 - 9) +#define TEXASR_FOOTPRINT_OVERFLOW (63 - 10) +#define TEXASR_SELF_INDUCED_CONFLICT (63 - 11) +#define TEXASR_NON_TRANSACTIONAL_CONFLICT (63 - 12) +#define TEXASR_TRANSACTION_CONFLICT (63 - 13) +#define TEXASR_TRANSLATION_INVALIDATION_CONFLICT (63 - 14) +#define TEXASR_IMPLEMENTATION_SPECIFIC (63 - 15) +#define TEXASR_INSTRUCTION_FETCH_CONFLICT (63 - 16) +#define TEXASR_ABORT (63 - 31) +#define TEXASR_SUSPENDED (63 - 32) +#define TEXASR_PRIVILEGE_HV (63 - 34) +#define TEXASR_PRIVILEGE_PR (63 - 35) +#define TEXASR_FAILURE_SUMMARY (63 - 36) +#define TEXASR_TFIAR_EXACT (63 - 37) +#define TEXASR_ROT (63 - 38) +#define TEXASR_TRANSACTION_LEVEL (63 - 52) /* 12 bits */ + +enum { + POWERPC_FLAG_NONE = 0x00000000, + /* Flag for MSR bit 25 signification (VRE/SPE) */ + POWERPC_FLAG_SPE = 0x00000001, + POWERPC_FLAG_VRE = 0x00000002, + /* Flag for MSR bit 17 signification (TGPR/CE) */ + POWERPC_FLAG_TGPR = 0x00000004, + POWERPC_FLAG_CE = 0x00000008, + /* Flag for MSR bit 10 signification (SE/DWE/UBLE) */ + POWERPC_FLAG_SE = 0x00000010, + POWERPC_FLAG_DWE = 0x00000020, + POWERPC_FLAG_UBLE = 0x00000040, + /* Flag for MSR bit 9 signification (BE/DE) */ + POWERPC_FLAG_BE = 0x00000080, + POWERPC_FLAG_DE = 0x00000100, + /* Flag for MSR bit 2 signification (PX/PMM) */ + POWERPC_FLAG_PX = 0x00000200, + POWERPC_FLAG_PMM = 0x00000400, + /* Flag for special features */ + /* Decrementer clock: RTC clock (POWER, 601) or bus clock */ + POWERPC_FLAG_RTC_CLK = 0x00010000, + POWERPC_FLAG_BUS_CLK = 0x00020000, + /* Has CFAR */ + POWERPC_FLAG_CFAR = 0x00040000, + /* Has VSX */ + POWERPC_FLAG_VSX = 0x00080000, + /* Has Transaction Memory (ISA 2.07) */ + POWERPC_FLAG_TM = 0x00100000, +}; + +/*****************************************************************************/ +/* Floating point status and control register */ +#define FPSCR_DRN2 34 /* Decimal Floating-Point rounding control */ +#define FPSCR_DRN1 33 /* Decimal Floating-Point rounding control */ +#define FPSCR_DRN0 32 /* Decimal Floating-Point rounding control */ +#define FPSCR_FX 31 /* Floating-point exception summary */ +#define FPSCR_FEX 30 /* Floating-point enabled exception summary */ +#define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */ +#define FPSCR_OX 28 /* Floating-point overflow exception */ +#define FPSCR_UX 27 /* Floating-point underflow exception */ +#define FPSCR_ZX 26 /* Floating-point zero divide exception */ +#define FPSCR_XX 25 /* Floating-point inexact exception */ +#define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */ +#define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */ +#define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */ +#define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */ +#define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */ +#define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */ +#define FPSCR_FR 18 /* Floating-point fraction rounded */ +#define FPSCR_FI 17 /* Floating-point fraction inexact */ +#define FPSCR_C 16 /* Floating-point result class descriptor */ +#define FPSCR_FL 15 /* Floating-point less than or negative */ +#define FPSCR_FG 14 /* Floating-point greater than or negative */ +#define FPSCR_FE 13 /* Floating-point equal or zero */ +#define FPSCR_FU 12 /* Floating-point unordered or NaN */ +#define FPSCR_FPCC 12 /* Floating-point condition code */ +#define FPSCR_FPRF 12 /* Floating-point result flags */ +#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */ +#define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */ +#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */ +#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */ +#define FPSCR_OE 6 /* Floating-point overflow exception enable */ +#define FPSCR_UE 5 /* Floating-point undeflow exception enable */ +#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */ +#define FPSCR_XE 3 /* Floating-point inexact exception enable */ +#define FPSCR_NI 2 /* Floating-point non-IEEE mode */ +#define FPSCR_RN1 1 +#define FPSCR_RN0 0 /* Floating-point rounding control */ +#define fpscr_drn (((env->fpscr) & FP_DRN) >> FPSCR_DRN0) +#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1) +#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1) +#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1) +#define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1) +#define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1) +#define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1) +#define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1) +#define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1) +#define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1) +#define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1) +#define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1) +#define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1) +#define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF) +#define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1) +#define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1) +#define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1) +#define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1) +#define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1) +#define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1) +#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1) +#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1) +#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1) +#define fpscr_rn (((env->fpscr) >> FPSCR_RN0) & 0x3) +/* Invalid operation exception summary */ +#define fpscr_ix ((env->fpscr) & ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \ + (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \ + (1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \ + (1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \ + (1 << FPSCR_VXCVI))) +/* exception summary */ +#define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F) +/* enabled exception summary */ +#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \ + 0x1F) + +#define FP_DRN2 (1ull << FPSCR_DRN2) +#define FP_DRN1 (1ull << FPSCR_DRN1) +#define FP_DRN0 (1ull << FPSCR_DRN0) +#define FP_DRN (FP_DRN2 | FP_DRN1 | FP_DRN0) +#define FP_FX (1ull << FPSCR_FX) +#define FP_FEX (1ull << FPSCR_FEX) +#define FP_VX (1ull << FPSCR_VX) +#define FP_OX (1ull << FPSCR_OX) +#define FP_UX (1ull << FPSCR_UX) +#define FP_ZX (1ull << FPSCR_ZX) +#define FP_XX (1ull << FPSCR_XX) +#define FP_VXSNAN (1ull << FPSCR_VXSNAN) +#define FP_VXISI (1ull << FPSCR_VXISI) +#define FP_VXIDI (1ull << FPSCR_VXIDI) +#define FP_VXZDZ (1ull << FPSCR_VXZDZ) +#define FP_VXIMZ (1ull << FPSCR_VXIMZ) +#define FP_VXVC (1ull << FPSCR_VXVC) +#define FP_FR (1ull << FPSCR_FR) +#define FP_FI (1ull << FPSCR_FI) +#define FP_C (1ull << FPSCR_C) +#define FP_FL (1ull << FPSCR_FL) +#define FP_FG (1ull << FPSCR_FG) +#define FP_FE (1ull << FPSCR_FE) +#define FP_FU (1ull << FPSCR_FU) +#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU) +#define FP_FPRF (FP_C | FP_FPCC) +#define FP_VXSOFT (1ull << FPSCR_VXSOFT) +#define FP_VXSQRT (1ull << FPSCR_VXSQRT) +#define FP_VXCVI (1ull << FPSCR_VXCVI) +#define FP_VE (1ull << FPSCR_VE) +#define FP_OE (1ull << FPSCR_OE) +#define FP_UE (1ull << FPSCR_UE) +#define FP_ZE (1ull << FPSCR_ZE) +#define FP_XE (1ull << FPSCR_XE) +#define FP_NI (1ull << FPSCR_NI) +#define FP_RN1 (1ull << FPSCR_RN1) +#define FP_RN0 (1ull << FPSCR_RN0) +#define FP_RN (FP_RN1 | FP_RN0) + +#define FP_ENABLES (FP_VE | FP_OE | FP_UE | FP_ZE | FP_XE) +#define FP_STATUS (FP_FR | FP_FI | FP_FPRF) + +/* the exception bits which can be cleared by mcrfs - includes FX */ +#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \ + FP_XX | FP_VXSNAN | FP_VXISI | FP_VXIDI | \ + FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \ + FP_VXSQRT | FP_VXCVI) + +/*****************************************************************************/ +/* Vector status and control register */ +#define VSCR_NJ 16 /* Vector non-java */ +#define VSCR_SAT 0 /* Vector saturation */ + +/*****************************************************************************/ +/* BookE e500 MMU registers */ + +#define MAS0_NV_SHIFT 0 +#define MAS0_NV_MASK (0xfff << MAS0_NV_SHIFT) + +#define MAS0_WQ_SHIFT 12 +#define MAS0_WQ_MASK (3 << MAS0_WQ_SHIFT) +/* Write TLB entry regardless of reservation */ +#define MAS0_WQ_ALWAYS (0 << MAS0_WQ_SHIFT) +/* Write TLB entry only already in use */ +#define MAS0_WQ_COND (1 << MAS0_WQ_SHIFT) +/* Clear TLB entry */ +#define MAS0_WQ_CLR_RSRV (2 << MAS0_WQ_SHIFT) + +#define MAS0_HES_SHIFT 14 +#define MAS0_HES (1 << MAS0_HES_SHIFT) + +#define MAS0_ESEL_SHIFT 16 +#define MAS0_ESEL_MASK (0xfff << MAS0_ESEL_SHIFT) + +#define MAS0_TLBSEL_SHIFT 28 +#define MAS0_TLBSEL_MASK (3 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB0 (0 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB1 (1 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB2 (2 << MAS0_TLBSEL_SHIFT) +#define MAS0_TLBSEL_TLB3 (3 << MAS0_TLBSEL_SHIFT) + +#define MAS0_ATSEL_SHIFT 31 +#define MAS0_ATSEL (1 << MAS0_ATSEL_SHIFT) +#define MAS0_ATSEL_TLB 0 +#define MAS0_ATSEL_LRAT MAS0_ATSEL + +#define MAS1_TSIZE_SHIFT 7 +#define MAS1_TSIZE_MASK (0x1f << MAS1_TSIZE_SHIFT) + +#define MAS1_TS_SHIFT 12 +#define MAS1_TS (1 << MAS1_TS_SHIFT) + +#define MAS1_IND_SHIFT 13 +#define MAS1_IND (1 << MAS1_IND_SHIFT) + +#define MAS1_TID_SHIFT 16 +#define MAS1_TID_MASK (0x3fff << MAS1_TID_SHIFT) + +#define MAS1_IPROT_SHIFT 30 +#define MAS1_IPROT (1 << MAS1_IPROT_SHIFT) + +#define MAS1_VALID_SHIFT 31 +#define MAS1_VALID 0x80000000 + +#define MAS2_EPN_SHIFT 12 +#define MAS2_EPN_MASK (~0ULL << MAS2_EPN_SHIFT) + +#define MAS2_ACM_SHIFT 6 +#define MAS2_ACM (1 << MAS2_ACM_SHIFT) + +#define MAS2_VLE_SHIFT 5 +#define MAS2_VLE (1 << MAS2_VLE_SHIFT) + +#define MAS2_W_SHIFT 4 +#define MAS2_W (1 << MAS2_W_SHIFT) + +#define MAS2_I_SHIFT 3 +#define MAS2_I (1 << MAS2_I_SHIFT) + +#define MAS2_M_SHIFT 2 +#define MAS2_M (1 << MAS2_M_SHIFT) + +#define MAS2_G_SHIFT 1 +#define MAS2_G (1 << MAS2_G_SHIFT) + +#define MAS2_E_SHIFT 0 +#define MAS2_E (1 << MAS2_E_SHIFT) + +#define MAS3_RPN_SHIFT 12 +#define MAS3_RPN_MASK (0xfffff << MAS3_RPN_SHIFT) + +#define MAS3_U0 0x00000200 +#define MAS3_U1 0x00000100 +#define MAS3_U2 0x00000080 +#define MAS3_U3 0x00000040 +#define MAS3_UX 0x00000020 +#define MAS3_SX 0x00000010 +#define MAS3_UW 0x00000008 +#define MAS3_SW 0x00000004 +#define MAS3_UR 0x00000002 +#define MAS3_SR 0x00000001 +#define MAS3_SPSIZE_SHIFT 1 +#define MAS3_SPSIZE_MASK (0x3e << MAS3_SPSIZE_SHIFT) + +#define MAS4_TLBSELD_SHIFT MAS0_TLBSEL_SHIFT +#define MAS4_TLBSELD_MASK MAS0_TLBSEL_MASK +#define MAS4_TIDSELD_MASK 0x00030000 +#define MAS4_TIDSELD_PID0 0x00000000 +#define MAS4_TIDSELD_PID1 0x00010000 +#define MAS4_TIDSELD_PID2 0x00020000 +#define MAS4_TIDSELD_PIDZ 0x00030000 +#define MAS4_INDD 0x00008000 /* Default IND */ +#define MAS4_TSIZED_SHIFT MAS1_TSIZE_SHIFT +#define MAS4_TSIZED_MASK MAS1_TSIZE_MASK +#define MAS4_ACMD 0x00000040 +#define MAS4_VLED 0x00000020 +#define MAS4_WD 0x00000010 +#define MAS4_ID 0x00000008 +#define MAS4_MD 0x00000004 +#define MAS4_GD 0x00000002 +#define MAS4_ED 0x00000001 +#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */ +#define MAS4_WIMGED_SHIFT 0 + +#define MAS5_SGS 0x80000000 +#define MAS5_SLPID_MASK 0x00000fff + +#define MAS6_SPID0 0x3fff0000 +#define MAS6_SPID1 0x00007ffe +#define MAS6_ISIZE(x) MAS1_TSIZE(x) +#define MAS6_SAS 0x00000001 +#define MAS6_SPID MAS6_SPID0 +#define MAS6_SIND 0x00000002 /* Indirect page */ +#define MAS6_SIND_SHIFT 1 +#define MAS6_SPID_MASK 0x3fff0000 +#define MAS6_SPID_SHIFT 16 +#define MAS6_ISIZE_MASK 0x00000f80 +#define MAS6_ISIZE_SHIFT 7 + +#define MAS7_RPN 0xffffffff + +#define MAS8_TGS 0x80000000 +#define MAS8_VF 0x40000000 +#define MAS8_TLBPID 0x00000fff + +/* Bit definitions for MMUCFG */ +#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ +#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ +#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ +#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ +#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ +#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ +#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ +#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ +#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ + +/* Bit definitions for MMUCSR0 */ +#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ +#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ +#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ +#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ +#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ + MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) +#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */ +#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */ +#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ +#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ + +/* TLBnCFG encoding */ +#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ +#define TLBnCFG_HES 0x00002000 /* HW select supported */ +#define TLBnCFG_AVAIL 0x00004000 /* variable page size */ +#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */ +#define TLBnCFG_GTWE 0x00010000 /* Guest can write */ +#define TLBnCFG_IND 0x00020000 /* IND entries supported */ +#define TLBnCFG_PT 0x00040000 /* Can load from page table */ +#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ +#define TLBnCFG_MINSIZE_SHIFT 20 +#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ +#define TLBnCFG_MAXSIZE_SHIFT 16 +#define TLBnCFG_ASSOC 0xff000000 /* Associativity */ +#define TLBnCFG_ASSOC_SHIFT 24 + +/* TLBnPS encoding */ +#define TLBnPS_4K 0x00000004 +#define TLBnPS_8K 0x00000008 +#define TLBnPS_16K 0x00000010 +#define TLBnPS_32K 0x00000020 +#define TLBnPS_64K 0x00000040 +#define TLBnPS_128K 0x00000080 +#define TLBnPS_256K 0x00000100 +#define TLBnPS_512K 0x00000200 +#define TLBnPS_1M 0x00000400 +#define TLBnPS_2M 0x00000800 +#define TLBnPS_4M 0x00001000 +#define TLBnPS_8M 0x00002000 +#define TLBnPS_16M 0x00004000 +#define TLBnPS_32M 0x00008000 +#define TLBnPS_64M 0x00010000 +#define TLBnPS_128M 0x00020000 +#define TLBnPS_256M 0x00040000 +#define TLBnPS_512M 0x00080000 +#define TLBnPS_1G 0x00100000 +#define TLBnPS_2G 0x00200000 +#define TLBnPS_4G 0x00400000 +#define TLBnPS_8G 0x00800000 +#define TLBnPS_16G 0x01000000 +#define TLBnPS_32G 0x02000000 +#define TLBnPS_64G 0x04000000 +#define TLBnPS_128G 0x08000000 +#define TLBnPS_256G 0x10000000 + +/* tlbilx action encoding */ +#define TLBILX_T_ALL 0 +#define TLBILX_T_TID 1 +#define TLBILX_T_FULLMATCH 3 +#define TLBILX_T_CLASS0 4 +#define TLBILX_T_CLASS1 5 +#define TLBILX_T_CLASS2 6 +#define TLBILX_T_CLASS3 7 + +/* BookE 2.06 helper defines */ + +#define BOOKE206_FLUSH_TLB0 (1 << 0) +#define BOOKE206_FLUSH_TLB1 (1 << 1) +#define BOOKE206_FLUSH_TLB2 (1 << 2) +#define BOOKE206_FLUSH_TLB3 (1 << 3) + +/* number of possible TLBs */ +#define BOOKE206_MAX_TLBN 4 + +#define EPID_EPID_SHIFT 0x0 +#define EPID_EPID 0xFF +#define EPID_ELPID_SHIFT 0x10 +#define EPID_ELPID 0x3F0000 +#define EPID_EGS 0x20000000 +#define EPID_EGS_SHIFT 29 +#define EPID_EAS 0x40000000 +#define EPID_EAS_SHIFT 30 +#define EPID_EPR 0x80000000 +#define EPID_EPR_SHIFT 31 +/* We don't support EGS and ELPID */ +#define EPID_MASK (EPID_EPID | EPID_EAS | EPID_EPR) + +/*****************************************************************************/ +/* Server and Embedded Processor Control */ + +#define DBELL_TYPE_SHIFT 27 +#define DBELL_TYPE_MASK (0x1f << DBELL_TYPE_SHIFT) +#define DBELL_TYPE_DBELL (0x00 << DBELL_TYPE_SHIFT) +#define DBELL_TYPE_DBELL_CRIT (0x01 << DBELL_TYPE_SHIFT) +#define DBELL_TYPE_G_DBELL (0x02 << DBELL_TYPE_SHIFT) +#define DBELL_TYPE_G_DBELL_CRIT (0x03 << DBELL_TYPE_SHIFT) +#define DBELL_TYPE_G_DBELL_MC (0x04 << DBELL_TYPE_SHIFT) + +#define DBELL_TYPE_DBELL_SERVER (0x05 << DBELL_TYPE_SHIFT) + +#define DBELL_BRDCAST PPC_BIT(37) +#define DBELL_LPIDTAG_SHIFT 14 +#define DBELL_LPIDTAG_MASK (0xfff << DBELL_LPIDTAG_SHIFT) +#define DBELL_PIRTAG_MASK 0x3fff + +#define DBELL_PROCIDTAG_MASK PPC_BITMASK(44, 63) + +#define PPC_PAGE_SIZES_MAX_SZ 8 + +struct ppc_radix_page_info { + uint32_t count; + uint32_t entries[PPC_PAGE_SIZES_MAX_SZ]; +}; + +/*****************************************************************************/ +/* The whole PowerPC CPU context */ + +/* + * PowerPC needs eight modes for different hypervisor/supervisor/guest + * + real/paged mode combinations. The other two modes are for + * external PID load/store. + */ +#define PPC_TLB_EPID_LOAD 8 +#define PPC_TLB_EPID_STORE 9 + +#define PPC_CPU_OPCODES_LEN 0x40 +#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20 + +struct CPUPPCState { + /* Most commonly used resources during translated code execution first */ + target_ulong gpr[32]; /* general purpose registers */ + target_ulong gprh[32]; /* storage for GPR MSB, used by the SPE extension */ + target_ulong lr; + target_ulong ctr; + uint32_t crf[8]; /* condition register */ +#if defined(TARGET_PPC64) + target_ulong cfar; +#endif + target_ulong xer; /* XER (with SO, OV, CA split out) */ + target_ulong so; + target_ulong ov; + target_ulong ca; + target_ulong ov32; + target_ulong ca32; + + target_ulong reserve_addr; /* Reservation address */ + target_ulong reserve_val; /* Reservation value */ + target_ulong reserve_val2; + + /* These are used in supervisor mode only */ + target_ulong msr; /* machine state register */ + target_ulong tgpr[4]; /* temporary general purpose registers, */ + /* used to speed-up TLB assist handlers */ + + target_ulong nip; /* next instruction pointer */ + uint64_t retxh; /* high part of 128-bit helper return */ + + /* when a memory exception occurs, the access type is stored here */ + int access_type; + + /* MMU context, only relevant for full system emulation */ +#if defined(TARGET_PPC64) + ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */ +#endif + target_ulong sr[32]; /* segment registers */ + uint32_t nb_BATs; /* number of BATs */ + target_ulong DBAT[2][8]; + target_ulong IBAT[2][8]; + /* PowerPC TLB registers (for 4xx, e500 and 60x software driven TLBs) */ + int32_t nb_tlb; /* Total number of TLB */ + int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */ + int nb_ways; /* Number of ways in the TLB set */ + int last_way; /* Last used way used to allocate TLB in a LRU way */ + int id_tlbs; /* If 1, MMU has separated TLBs for instructions & data */ + int nb_pids; /* Number of available PID registers */ + int tlb_type; /* Type of TLB we're dealing with */ + ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ + target_ulong pb[4]; /* 403 dedicated access protection registers */ + bool tlb_dirty; /* Set to non-zero when modifying TLB */ + bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ + uint32_t tlb_need_flush; /* Delayed flush needed */ +#define TLB_NEED_LOCAL_FLUSH 0x1 +#define TLB_NEED_GLOBAL_FLUSH 0x2 + + /* Other registers */ + target_ulong spr[1024]; /* special purpose registers */ + ppc_spr_t spr_cb[1024]; + /* Vector status and control register, minus VSCR_SAT */ + uint32_t vscr; + /* VSX registers (including FP and AVR) */ + ppc_vsr_t vsr[64] QEMU_ALIGNED(16); + /* Non-zero if and only if VSCR_SAT should be set */ + ppc_vsr_t vscr_sat QEMU_ALIGNED(16); + /* SPE registers */ + uint64_t spe_acc; + uint32_t spe_fscr; + /* SPE and Altivec share status as they'll never be used simultaneously */ + float_status vec_status; + float_status fp_status; /* Floating point execution context */ + target_ulong fpscr; /* Floating point status and control register */ + + /* Internal devices resources */ + ppc_tb_t *tb_env; /* Time base and decrementer */ + ppc_dcr_t *dcr_env; /* Device control registers */ + + int dcache_line_size; + int icache_line_size; + + /* These resources are used during exception processing */ + /* CPU model definition */ + target_ulong msr_mask; + powerpc_mmu_t mmu_model; + powerpc_excp_t excp_model; + powerpc_input_t bus_model; + int bfd_mach; + uint32_t flags; + uint64_t insns_flags; + uint64_t insns_flags2; + + int error_code; + uint32_t pending_interrupts; + /* + * This is the IRQ controller, which is implementation dependent and only + * relevant when emulating a complete machine. Note that this isn't used + * by recent Book3s compatible CPUs (POWER7 and newer). + */ + uint32_t irq_input_state; + void **irq_inputs; + + target_ulong excp_vectors[POWERPC_EXCP_NB]; /* Exception vectors */ + target_ulong excp_prefix; + target_ulong ivor_mask; + target_ulong ivpr_mask; + target_ulong hreset_vector; + hwaddr mpic_iack; + bool mpic_proxy; /* true if the external proxy facility mode is enabled */ + bool has_hv_mode; /* set when the processor has an HV mode, thus HV priv */ + /* instructions and SPRs are diallowed if MSR:HV is 0 */ + /* + * On P7/P8/P9, set when in PM state so we need to handle resume in a + * special way (such as routing some resume causes to 0x100, i.e. sreset). + */ + bool resume_as_sreset; + + /* These resources are used only in QEMU core */ + target_ulong hflags; /* hflags is MSR & HFLAGS_MASK */ + target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */ + int immu_idx; /* precomputed MMU index to speed up insn accesses */ + int dmmu_idx; /* precomputed MMU index to speed up data accesses */ + + /* Power management */ + int (*check_pow)(CPUPPCState *env); + + void *load_info; /* holds boot loading state */ + + /* booke timers */ + + /* + * Specifies bit locations of the Time Base used to signal a fixed timer + * exception on a transition from 0 to 1 (watchdog or fixed-interval timer) + * + * 0 selects the least significant bit, 63 selects the most significant bit + */ + uint8_t fit_period[4]; + uint8_t wdt_period[4]; + + /* Transactional memory state */ + target_ulong tm_gpr[32]; + ppc_avr_t tm_vsr[64]; + uint64_t tm_cr; + uint64_t tm_lr; + uint64_t tm_ctr; + uint64_t tm_fpscr; + uint64_t tm_amr; + uint64_t tm_ppr; + uint64_t tm_vrsave; + uint32_t tm_vscr; + uint64_t tm_dscr; + uint64_t tm_tar; + + /* Unicorn engine */ + struct uc_struct *uc; +}; + +#define SET_FIT_PERIOD(a_, b_, c_, d_) \ +do { \ + env->fit_period[0] = (a_); \ + env->fit_period[1] = (b_); \ + env->fit_period[2] = (c_); \ + env->fit_period[3] = (d_); \ + } while (0) + +#define SET_WDT_PERIOD(a_, b_, c_, d_) \ +do { \ + env->wdt_period[0] = (a_); \ + env->wdt_period[1] = (b_); \ + env->wdt_period[2] = (c_); \ + env->wdt_period[3] = (d_); \ + } while (0) + +#if 0 +typedef struct PPCVirtualHypervisor PPCVirtualHypervisor; +typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass; +#endif + +/** + * PowerPCCPU: + * @env: #CPUPPCState + * @vcpu_id: vCPU identifier given to KVM + * @compat_pvr: Current logical PVR, zero if in "raw" mode + * + * A PowerPC CPU. + */ +struct PowerPCCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUPPCState env; + + int vcpu_id; + uint32_t compat_pvr; +#if 0 + PPCVirtualHypervisor *vhyp; +#endif + void *machine_data; + int32_t node_id; /* NUMA node this CPU belongs to */ + PPCHash64Options *hash64_opts; + + /* Those resources are used only during code translation */ + /* opcode handlers */ + opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN]; + + /* Fields related to migration compatibility hacks */ + bool pre_2_8_migration; + target_ulong mig_msr_mask; + uint64_t mig_insns_flags; + uint64_t mig_insns_flags2; + uint32_t mig_nb_BATs; + bool pre_2_10_migration; + bool pre_3_0_migration; + int32_t mig_slb_nr; + + struct PowerPCCPUClass cc; +}; + + +PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr); +PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr); +PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc); + +#if 0 +struct PPCVirtualHypervisorClass { + InterfaceClass parent; + void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); + hwaddr (*hpt_mask)(PPCVirtualHypervisor *vhyp); + const ppc_hash_pte64_t *(*map_hptes)(PPCVirtualHypervisor *vhyp, + hwaddr ptex, int n); + void (*unmap_hptes)(PPCVirtualHypervisor *vhyp, + const ppc_hash_pte64_t *hptes, + hwaddr ptex, int n); + void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); + void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); + void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry); + target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp); + void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); + void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu); +}; + +#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor" +#define PPC_VIRTUAL_HYPERVISOR(obj) \ + OBJECT_CHECK(PPCVirtualHypervisor, (obj), TYPE_PPC_VIRTUAL_HYPERVISOR) +#define PPC_VIRTUAL_HYPERVISOR_CLASS(klass) \ + OBJECT_CLASS_CHECK(PPCVirtualHypervisorClass, (klass), \ + TYPE_PPC_VIRTUAL_HYPERVISOR) +#define PPC_VIRTUAL_HYPERVISOR_GET_CLASS(obj) \ + OBJECT_GET_CLASS(PPCVirtualHypervisorClass, (obj), \ + TYPE_PPC_VIRTUAL_HYPERVISOR) +#endif + +void ppc_cpu_do_interrupt(CPUState *cpu); +bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req); +hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + +void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector); +void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector); +#if 0 +extern const VMStateDescription vmstate_ppc_cpu; +#endif + +/*****************************************************************************/ +void ppc_translate_init(struct uc_struct *uc); + +/* + * you can call this signal handler from your SIGBUS and SIGSEGV + * signal handlers to inform the virtual CPU of exceptions. non zero + * is returned if the signal was handled by the virtual CPU. + */ +int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc); +bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + +void ppc_store_sdr1(CPUPPCState *env, target_ulong value); +void ppc_store_ptcr(CPUPPCState *env, target_ulong value); + +void ppc_store_msr(CPUPPCState *env, target_ulong value); + +void ppc_cpu_list(void); + +/* Time-base and decrementer management */ +#ifndef NO_CPU_IO_DEFS +uint64_t cpu_ppc_load_tbl(CPUPPCState *env); +uint32_t cpu_ppc_load_tbu(CPUPPCState *env); +void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value); +void cpu_ppc_store_tbl(CPUPPCState *env, uint32_t value); +uint64_t cpu_ppc_load_atbl(CPUPPCState *env); +uint32_t cpu_ppc_load_atbu(CPUPPCState *env); +void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value); +void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value); +uint64_t cpu_ppc_load_vtb(CPUPPCState *env); +void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value); +bool ppc_decr_clear_on_delivery(CPUPPCState *env); +target_ulong cpu_ppc_load_decr(CPUPPCState *env); +void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value); +target_ulong cpu_ppc_load_hdecr(CPUPPCState *env); +void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value); +void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value); +uint64_t cpu_ppc_load_purr(CPUPPCState *env); +void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value); +uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env); +uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env); +void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value); +void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value); +target_ulong load_40x_pit(CPUPPCState *env); +void store_40x_pit(CPUPPCState *env, target_ulong val); +void store_40x_dbcr0(CPUPPCState *env, uint32_t val); +void store_40x_sler(CPUPPCState *env, uint32_t val); +void store_booke_tcr(CPUPPCState *env, target_ulong val); +void store_booke_tsr(CPUPPCState *env, target_ulong val); +void ppc_tlb_invalidate_all(CPUPPCState *env); +void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr); +#if 0 +void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp); +#endif +#endif + +void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask); +void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit, + const char *caller, uint32_t cause); + +static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn) +{ + uint64_t gprv; + + gprv = env->gpr[gprn]; + if (env->flags & POWERPC_FLAG_SPE) { + /* + * If the CPU implements the SPE extension, we have to get the + * high bits of the GPR from the gprh storage area + */ + gprv &= 0xFFFFFFFFULL; + gprv |= (uint64_t)env->gprh[gprn] << 32; + } + + return gprv; +} + +/* Device control registers */ +int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp); +int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); + +#define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU +#define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX +#define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU + +#define cpu_signal_handler cpu_ppc_signal_handler +#define cpu_list ppc_cpu_list + +/* MMU modes definitions */ +#define MMU_USER_IDX 0 +static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch) +{ + return ifetch ? env->immu_idx : env->dmmu_idx; +} + +/* Compatibility modes */ +#if defined(TARGET_PPC64) +bool ppc_check_compat(PowerPCCPU *cpu, uint32_t compat_pvr, + uint32_t min_compat_pvr, uint32_t max_compat_pvr); +bool ppc_type_check_compat(const char *cputype, uint32_t compat_pvr, + uint32_t min_compat_pvr, uint32_t max_compat_pvr); + +void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); + +void ppc_set_compat_all(uint32_t compat_pvr); +int ppc_compat_max_vthreads(PowerPCCPU *cpu); +#if 0 +void ppc_compat_add_property(Object *obj, const char *name, + uint32_t *compat_pvr, const char *basedesc); +#endif +#endif /* defined(TARGET_PPC64) */ + +typedef CPUPPCState CPUArchState; +typedef PowerPCCPU ArchCPU; + +#include "exec/cpu-all.h" + +/*****************************************************************************/ +/* CRF definitions */ +#define CRF_LT_BIT 3 +#define CRF_GT_BIT 2 +#define CRF_EQ_BIT 1 +#define CRF_SO_BIT 0 +#define CRF_LT (1 << CRF_LT_BIT) +#define CRF_GT (1 << CRF_GT_BIT) +#define CRF_EQ (1 << CRF_EQ_BIT) +#define CRF_SO (1 << CRF_SO_BIT) +/* For SPE extensions */ +#define CRF_CH (1 << CRF_LT_BIT) +#define CRF_CL (1 << CRF_GT_BIT) +#define CRF_CH_OR_CL (1 << CRF_EQ_BIT) +#define CRF_CH_AND_CL (1 << CRF_SO_BIT) + +/* XER definitions */ +#define XER_SO 31 +#define XER_OV 30 +#define XER_CA 29 +#define XER_OV32 19 +#define XER_CA32 18 +#define XER_CMP 8 +#define XER_BC 0 +#define xer_so (env->so) +#define xer_ov (env->ov) +#define xer_ca (env->ca) +#define xer_ov32 (env->ov) +#define xer_ca32 (env->ca) +#define xer_cmp ((env->xer >> XER_CMP) & 0xFF) +#define xer_bc ((env->xer >> XER_BC) & 0x7F) + +/* SPR definitions */ +#define SPR_MQ (0x000) +#define SPR_XER (0x001) +#define SPR_601_VRTCU (0x004) +#define SPR_601_VRTCL (0x005) +#define SPR_601_UDECR (0x006) +#define SPR_LR (0x008) +#define SPR_CTR (0x009) +#define SPR_UAMR (0x00D) +#define SPR_DSCR (0x011) +#define SPR_DSISR (0x012) +#define SPR_DAR (0x013) /* DAE for PowerPC 601 */ +#define SPR_601_RTCU (0x014) +#define SPR_601_RTCL (0x015) +#define SPR_DECR (0x016) +#define SPR_SDR1 (0x019) +#define SPR_SRR0 (0x01A) +#define SPR_SRR1 (0x01B) +#define SPR_CFAR (0x01C) +#define SPR_AMR (0x01D) +#define SPR_ACOP (0x01F) +#define SPR_BOOKE_PID (0x030) +#define SPR_BOOKS_PID (0x030) +#define SPR_BOOKE_DECAR (0x036) +#define SPR_BOOKE_CSRR0 (0x03A) +#define SPR_BOOKE_CSRR1 (0x03B) +#define SPR_BOOKE_DEAR (0x03D) +#define SPR_IAMR (0x03D) +#define SPR_BOOKE_ESR (0x03E) +#define SPR_BOOKE_IVPR (0x03F) +#define SPR_MPC_EIE (0x050) +#define SPR_MPC_EID (0x051) +#define SPR_MPC_NRI (0x052) +#define SPR_TFHAR (0x080) +#define SPR_TFIAR (0x081) +#define SPR_TEXASR (0x082) +#define SPR_TEXASRU (0x083) +#define SPR_UCTRL (0x088) +#define SPR_TIDR (0x090) +#define SPR_MPC_CMPA (0x090) +#define SPR_MPC_CMPB (0x091) +#define SPR_MPC_CMPC (0x092) +#define SPR_MPC_CMPD (0x093) +#define SPR_MPC_ECR (0x094) +#define SPR_MPC_DER (0x095) +#define SPR_MPC_COUNTA (0x096) +#define SPR_MPC_COUNTB (0x097) +#define SPR_CTRL (0x098) +#define SPR_MPC_CMPE (0x098) +#define SPR_MPC_CMPF (0x099) +#define SPR_FSCR (0x099) +#define SPR_MPC_CMPG (0x09A) +#define SPR_MPC_CMPH (0x09B) +#define SPR_MPC_LCTRL1 (0x09C) +#define SPR_MPC_LCTRL2 (0x09D) +#define SPR_UAMOR (0x09D) +#define SPR_MPC_ICTRL (0x09E) +#define SPR_MPC_BAR (0x09F) +#define SPR_PSPB (0x09F) +#define SPR_DPDES (0x0B0) +#define SPR_DAWR (0x0B4) +#define SPR_RPR (0x0BA) +#define SPR_CIABR (0x0BB) +#define SPR_DAWRX (0x0BC) +#define SPR_HFSCR (0x0BE) +#define SPR_VRSAVE (0x100) +#define SPR_USPRG0 (0x100) +#define SPR_USPRG1 (0x101) +#define SPR_USPRG2 (0x102) +#define SPR_USPRG3 (0x103) +#define SPR_USPRG4 (0x104) +#define SPR_USPRG5 (0x105) +#define SPR_USPRG6 (0x106) +#define SPR_USPRG7 (0x107) +#define SPR_VTBL (0x10C) +#define SPR_VTBU (0x10D) +#define SPR_SPRG0 (0x110) +#define SPR_SPRG1 (0x111) +#define SPR_SPRG2 (0x112) +#define SPR_SPRG3 (0x113) +#define SPR_SPRG4 (0x114) +#define SPR_SCOMC (0x114) +#define SPR_SPRG5 (0x115) +#define SPR_SCOMD (0x115) +#define SPR_SPRG6 (0x116) +#define SPR_SPRG7 (0x117) +#define SPR_ASR (0x118) +#define SPR_EAR (0x11A) +#define SPR_TBL (0x11C) +#define SPR_TBU (0x11D) +#define SPR_TBU40 (0x11E) +#define SPR_SVR (0x11E) +#define SPR_BOOKE_PIR (0x11E) +#define SPR_PVR (0x11F) +#define SPR_HSPRG0 (0x130) +#define SPR_BOOKE_DBSR (0x130) +#define SPR_HSPRG1 (0x131) +#define SPR_HDSISR (0x132) +#define SPR_HDAR (0x133) +#define SPR_BOOKE_EPCR (0x133) +#define SPR_SPURR (0x134) +#define SPR_BOOKE_DBCR0 (0x134) +#define SPR_IBCR (0x135) +#define SPR_PURR (0x135) +#define SPR_BOOKE_DBCR1 (0x135) +#define SPR_DBCR (0x136) +#define SPR_HDEC (0x136) +#define SPR_BOOKE_DBCR2 (0x136) +#define SPR_HIOR (0x137) +#define SPR_MBAR (0x137) +#define SPR_RMOR (0x138) +#define SPR_BOOKE_IAC1 (0x138) +#define SPR_HRMOR (0x139) +#define SPR_BOOKE_IAC2 (0x139) +#define SPR_HSRR0 (0x13A) +#define SPR_BOOKE_IAC3 (0x13A) +#define SPR_HSRR1 (0x13B) +#define SPR_BOOKE_IAC4 (0x13B) +#define SPR_BOOKE_DAC1 (0x13C) +#define SPR_MMCRH (0x13C) +#define SPR_DABR2 (0x13D) +#define SPR_BOOKE_DAC2 (0x13D) +#define SPR_TFMR (0x13D) +#define SPR_BOOKE_DVC1 (0x13E) +#define SPR_LPCR (0x13E) +#define SPR_BOOKE_DVC2 (0x13F) +#define SPR_LPIDR (0x13F) +#define SPR_BOOKE_TSR (0x150) +#define SPR_HMER (0x150) +#define SPR_HMEER (0x151) +#define SPR_PCR (0x152) +#define SPR_BOOKE_LPIDR (0x152) +#define SPR_BOOKE_TCR (0x154) +#define SPR_BOOKE_TLB0PS (0x158) +#define SPR_BOOKE_TLB1PS (0x159) +#define SPR_BOOKE_TLB2PS (0x15A) +#define SPR_BOOKE_TLB3PS (0x15B) +#define SPR_AMOR (0x15D) +#define SPR_BOOKE_MAS7_MAS3 (0x174) +#define SPR_BOOKE_IVOR0 (0x190) +#define SPR_BOOKE_IVOR1 (0x191) +#define SPR_BOOKE_IVOR2 (0x192) +#define SPR_BOOKE_IVOR3 (0x193) +#define SPR_BOOKE_IVOR4 (0x194) +#define SPR_BOOKE_IVOR5 (0x195) +#define SPR_BOOKE_IVOR6 (0x196) +#define SPR_BOOKE_IVOR7 (0x197) +#define SPR_BOOKE_IVOR8 (0x198) +#define SPR_BOOKE_IVOR9 (0x199) +#define SPR_BOOKE_IVOR10 (0x19A) +#define SPR_BOOKE_IVOR11 (0x19B) +#define SPR_BOOKE_IVOR12 (0x19C) +#define SPR_BOOKE_IVOR13 (0x19D) +#define SPR_BOOKE_IVOR14 (0x19E) +#define SPR_BOOKE_IVOR15 (0x19F) +#define SPR_BOOKE_IVOR38 (0x1B0) +#define SPR_BOOKE_IVOR39 (0x1B1) +#define SPR_BOOKE_IVOR40 (0x1B2) +#define SPR_BOOKE_IVOR41 (0x1B3) +#define SPR_BOOKE_IVOR42 (0x1B4) +#define SPR_BOOKE_GIVOR2 (0x1B8) +#define SPR_BOOKE_GIVOR3 (0x1B9) +#define SPR_BOOKE_GIVOR4 (0x1BA) +#define SPR_BOOKE_GIVOR8 (0x1BB) +#define SPR_BOOKE_GIVOR13 (0x1BC) +#define SPR_BOOKE_GIVOR14 (0x1BD) +#define SPR_TIR (0x1BE) +#define SPR_PTCR (0x1D0) +#define SPR_BOOKE_SPEFSCR (0x200) +#define SPR_Exxx_BBEAR (0x201) +#define SPR_Exxx_BBTAR (0x202) +#define SPR_Exxx_L1CFG0 (0x203) +#define SPR_Exxx_L1CFG1 (0x204) +#define SPR_Exxx_NPIDR (0x205) +#define SPR_ATBL (0x20E) +#define SPR_ATBU (0x20F) +#define SPR_IBAT0U (0x210) +#define SPR_BOOKE_IVOR32 (0x210) +#define SPR_RCPU_MI_GRA (0x210) +#define SPR_IBAT0L (0x211) +#define SPR_BOOKE_IVOR33 (0x211) +#define SPR_IBAT1U (0x212) +#define SPR_BOOKE_IVOR34 (0x212) +#define SPR_IBAT1L (0x213) +#define SPR_BOOKE_IVOR35 (0x213) +#define SPR_IBAT2U (0x214) +#define SPR_BOOKE_IVOR36 (0x214) +#define SPR_IBAT2L (0x215) +#define SPR_BOOKE_IVOR37 (0x215) +#define SPR_IBAT3U (0x216) +#define SPR_IBAT3L (0x217) +#define SPR_DBAT0U (0x218) +#define SPR_RCPU_L2U_GRA (0x218) +#define SPR_DBAT0L (0x219) +#define SPR_DBAT1U (0x21A) +#define SPR_DBAT1L (0x21B) +#define SPR_DBAT2U (0x21C) +#define SPR_DBAT2L (0x21D) +#define SPR_DBAT3U (0x21E) +#define SPR_DBAT3L (0x21F) +#define SPR_IBAT4U (0x230) +#define SPR_RPCU_BBCMCR (0x230) +#define SPR_MPC_IC_CST (0x230) +#define SPR_Exxx_CTXCR (0x230) +#define SPR_IBAT4L (0x231) +#define SPR_MPC_IC_ADR (0x231) +#define SPR_Exxx_DBCR3 (0x231) +#define SPR_IBAT5U (0x232) +#define SPR_MPC_IC_DAT (0x232) +#define SPR_Exxx_DBCNT (0x232) +#define SPR_IBAT5L (0x233) +#define SPR_IBAT6U (0x234) +#define SPR_IBAT6L (0x235) +#define SPR_IBAT7U (0x236) +#define SPR_IBAT7L (0x237) +#define SPR_DBAT4U (0x238) +#define SPR_RCPU_L2U_MCR (0x238) +#define SPR_MPC_DC_CST (0x238) +#define SPR_Exxx_ALTCTXCR (0x238) +#define SPR_DBAT4L (0x239) +#define SPR_MPC_DC_ADR (0x239) +#define SPR_DBAT5U (0x23A) +#define SPR_BOOKE_MCSRR0 (0x23A) +#define SPR_MPC_DC_DAT (0x23A) +#define SPR_DBAT5L (0x23B) +#define SPR_BOOKE_MCSRR1 (0x23B) +#define SPR_DBAT6U (0x23C) +#define SPR_BOOKE_MCSR (0x23C) +#define SPR_DBAT6L (0x23D) +#define SPR_Exxx_MCAR (0x23D) +#define SPR_DBAT7U (0x23E) +#define SPR_BOOKE_DSRR0 (0x23E) +#define SPR_DBAT7L (0x23F) +#define SPR_BOOKE_DSRR1 (0x23F) +#define SPR_BOOKE_SPRG8 (0x25C) +#define SPR_BOOKE_SPRG9 (0x25D) +#define SPR_BOOKE_MAS0 (0x270) +#define SPR_BOOKE_MAS1 (0x271) +#define SPR_BOOKE_MAS2 (0x272) +#define SPR_BOOKE_MAS3 (0x273) +#define SPR_BOOKE_MAS4 (0x274) +#define SPR_BOOKE_MAS5 (0x275) +#define SPR_BOOKE_MAS6 (0x276) +#define SPR_BOOKE_PID1 (0x279) +#define SPR_BOOKE_PID2 (0x27A) +#define SPR_MPC_DPDR (0x280) +#define SPR_MPC_IMMR (0x288) +#define SPR_BOOKE_TLB0CFG (0x2B0) +#define SPR_BOOKE_TLB1CFG (0x2B1) +#define SPR_BOOKE_TLB2CFG (0x2B2) +#define SPR_BOOKE_TLB3CFG (0x2B3) +#define SPR_BOOKE_EPR (0x2BE) +#define SPR_PERF0 (0x300) +#define SPR_RCPU_MI_RBA0 (0x300) +#define SPR_MPC_MI_CTR (0x300) +#define SPR_POWER_USIER (0x300) +#define SPR_PERF1 (0x301) +#define SPR_RCPU_MI_RBA1 (0x301) +#define SPR_POWER_UMMCR2 (0x301) +#define SPR_PERF2 (0x302) +#define SPR_RCPU_MI_RBA2 (0x302) +#define SPR_MPC_MI_AP (0x302) +#define SPR_POWER_UMMCRA (0x302) +#define SPR_PERF3 (0x303) +#define SPR_RCPU_MI_RBA3 (0x303) +#define SPR_MPC_MI_EPN (0x303) +#define SPR_POWER_UPMC1 (0x303) +#define SPR_PERF4 (0x304) +#define SPR_POWER_UPMC2 (0x304) +#define SPR_PERF5 (0x305) +#define SPR_MPC_MI_TWC (0x305) +#define SPR_POWER_UPMC3 (0x305) +#define SPR_PERF6 (0x306) +#define SPR_MPC_MI_RPN (0x306) +#define SPR_POWER_UPMC4 (0x306) +#define SPR_PERF7 (0x307) +#define SPR_POWER_UPMC5 (0x307) +#define SPR_PERF8 (0x308) +#define SPR_RCPU_L2U_RBA0 (0x308) +#define SPR_MPC_MD_CTR (0x308) +#define SPR_POWER_UPMC6 (0x308) +#define SPR_PERF9 (0x309) +#define SPR_RCPU_L2U_RBA1 (0x309) +#define SPR_MPC_MD_CASID (0x309) +#define SPR_970_UPMC7 (0X309) +#define SPR_PERFA (0x30A) +#define SPR_RCPU_L2U_RBA2 (0x30A) +#define SPR_MPC_MD_AP (0x30A) +#define SPR_970_UPMC8 (0X30A) +#define SPR_PERFB (0x30B) +#define SPR_RCPU_L2U_RBA3 (0x30B) +#define SPR_MPC_MD_EPN (0x30B) +#define SPR_POWER_UMMCR0 (0X30B) +#define SPR_PERFC (0x30C) +#define SPR_MPC_MD_TWB (0x30C) +#define SPR_POWER_USIAR (0X30C) +#define SPR_PERFD (0x30D) +#define SPR_MPC_MD_TWC (0x30D) +#define SPR_POWER_USDAR (0X30D) +#define SPR_PERFE (0x30E) +#define SPR_MPC_MD_RPN (0x30E) +#define SPR_POWER_UMMCR1 (0X30E) +#define SPR_PERFF (0x30F) +#define SPR_MPC_MD_TW (0x30F) +#define SPR_UPERF0 (0x310) +#define SPR_POWER_SIER (0x310) +#define SPR_UPERF1 (0x311) +#define SPR_POWER_MMCR2 (0x311) +#define SPR_UPERF2 (0x312) +#define SPR_POWER_MMCRA (0X312) +#define SPR_UPERF3 (0x313) +#define SPR_POWER_PMC1 (0X313) +#define SPR_UPERF4 (0x314) +#define SPR_POWER_PMC2 (0X314) +#define SPR_UPERF5 (0x315) +#define SPR_POWER_PMC3 (0X315) +#define SPR_UPERF6 (0x316) +#define SPR_POWER_PMC4 (0X316) +#define SPR_UPERF7 (0x317) +#define SPR_POWER_PMC5 (0X317) +#define SPR_UPERF8 (0x318) +#define SPR_POWER_PMC6 (0X318) +#define SPR_UPERF9 (0x319) +#define SPR_970_PMC7 (0X319) +#define SPR_UPERFA (0x31A) +#define SPR_970_PMC8 (0X31A) +#define SPR_UPERFB (0x31B) +#define SPR_POWER_MMCR0 (0X31B) +#define SPR_UPERFC (0x31C) +#define SPR_POWER_SIAR (0X31C) +#define SPR_UPERFD (0x31D) +#define SPR_POWER_SDAR (0X31D) +#define SPR_UPERFE (0x31E) +#define SPR_POWER_MMCR1 (0X31E) +#define SPR_UPERFF (0x31F) +#define SPR_RCPU_MI_RA0 (0x320) +#define SPR_MPC_MI_DBCAM (0x320) +#define SPR_BESCRS (0x320) +#define SPR_RCPU_MI_RA1 (0x321) +#define SPR_MPC_MI_DBRAM0 (0x321) +#define SPR_BESCRSU (0x321) +#define SPR_RCPU_MI_RA2 (0x322) +#define SPR_MPC_MI_DBRAM1 (0x322) +#define SPR_BESCRR (0x322) +#define SPR_RCPU_MI_RA3 (0x323) +#define SPR_BESCRRU (0x323) +#define SPR_EBBHR (0x324) +#define SPR_EBBRR (0x325) +#define SPR_BESCR (0x326) +#define SPR_RCPU_L2U_RA0 (0x328) +#define SPR_MPC_MD_DBCAM (0x328) +#define SPR_RCPU_L2U_RA1 (0x329) +#define SPR_MPC_MD_DBRAM0 (0x329) +#define SPR_RCPU_L2U_RA2 (0x32A) +#define SPR_MPC_MD_DBRAM1 (0x32A) +#define SPR_RCPU_L2U_RA3 (0x32B) +#define SPR_TAR (0x32F) +#define SPR_ASDR (0x330) +#define SPR_IC (0x350) +#define SPR_VTB (0x351) +#define SPR_MMCRC (0x353) +#define SPR_PSSCR (0x357) +#define SPR_440_INV0 (0x370) +#define SPR_440_INV1 (0x371) +#define SPR_440_INV2 (0x372) +#define SPR_440_INV3 (0x373) +#define SPR_440_ITV0 (0x374) +#define SPR_440_ITV1 (0x375) +#define SPR_440_ITV2 (0x376) +#define SPR_440_ITV3 (0x377) +#define SPR_440_CCR1 (0x378) +#define SPR_TACR (0x378) +#define SPR_TCSCR (0x379) +#define SPR_CSIGR (0x37a) +#define SPR_DCRIPR (0x37B) +#define SPR_POWER_SPMC1 (0x37C) +#define SPR_POWER_SPMC2 (0x37D) +#define SPR_POWER_MMCRS (0x37E) +#define SPR_WORT (0x37F) +#define SPR_PPR (0x380) +#define SPR_750_GQR0 (0x390) +#define SPR_440_DNV0 (0x390) +#define SPR_750_GQR1 (0x391) +#define SPR_440_DNV1 (0x391) +#define SPR_750_GQR2 (0x392) +#define SPR_440_DNV2 (0x392) +#define SPR_750_GQR3 (0x393) +#define SPR_440_DNV3 (0x393) +#define SPR_750_GQR4 (0x394) +#define SPR_440_DTV0 (0x394) +#define SPR_750_GQR5 (0x395) +#define SPR_440_DTV1 (0x395) +#define SPR_750_GQR6 (0x396) +#define SPR_440_DTV2 (0x396) +#define SPR_750_GQR7 (0x397) +#define SPR_440_DTV3 (0x397) +#define SPR_750_THRM4 (0x398) +#define SPR_750CL_HID2 (0x398) +#define SPR_440_DVLIM (0x398) +#define SPR_750_WPAR (0x399) +#define SPR_440_IVLIM (0x399) +#define SPR_TSCR (0x399) +#define SPR_750_DMAU (0x39A) +#define SPR_750_DMAL (0x39B) +#define SPR_440_RSTCFG (0x39B) +#define SPR_BOOKE_DCDBTRL (0x39C) +#define SPR_BOOKE_DCDBTRH (0x39D) +#define SPR_BOOKE_ICDBTRL (0x39E) +#define SPR_BOOKE_ICDBTRH (0x39F) +#define SPR_74XX_UMMCR2 (0x3A0) +#define SPR_7XX_UPMC5 (0x3A1) +#define SPR_7XX_UPMC6 (0x3A2) +#define SPR_UBAMR (0x3A7) +#define SPR_7XX_UMMCR0 (0x3A8) +#define SPR_7XX_UPMC1 (0x3A9) +#define SPR_7XX_UPMC2 (0x3AA) +#define SPR_7XX_USIAR (0x3AB) +#define SPR_7XX_UMMCR1 (0x3AC) +#define SPR_7XX_UPMC3 (0x3AD) +#define SPR_7XX_UPMC4 (0x3AE) +#define SPR_USDA (0x3AF) +#define SPR_40x_ZPR (0x3B0) +#define SPR_BOOKE_MAS7 (0x3B0) +#define SPR_74XX_MMCR2 (0x3B0) +#define SPR_7XX_PMC5 (0x3B1) +#define SPR_40x_PID (0x3B1) +#define SPR_7XX_PMC6 (0x3B2) +#define SPR_440_MMUCR (0x3B2) +#define SPR_4xx_CCR0 (0x3B3) +#define SPR_BOOKE_EPLC (0x3B3) +#define SPR_405_IAC3 (0x3B4) +#define SPR_BOOKE_EPSC (0x3B4) +#define SPR_405_IAC4 (0x3B5) +#define SPR_405_DVC1 (0x3B6) +#define SPR_405_DVC2 (0x3B7) +#define SPR_BAMR (0x3B7) +#define SPR_7XX_MMCR0 (0x3B8) +#define SPR_7XX_PMC1 (0x3B9) +#define SPR_40x_SGR (0x3B9) +#define SPR_7XX_PMC2 (0x3BA) +#define SPR_40x_DCWR (0x3BA) +#define SPR_7XX_SIAR (0x3BB) +#define SPR_405_SLER (0x3BB) +#define SPR_7XX_MMCR1 (0x3BC) +#define SPR_405_SU0R (0x3BC) +#define SPR_401_SKR (0x3BC) +#define SPR_7XX_PMC3 (0x3BD) +#define SPR_405_DBCR1 (0x3BD) +#define SPR_7XX_PMC4 (0x3BE) +#define SPR_SDA (0x3BF) +#define SPR_403_VTBL (0x3CC) +#define SPR_403_VTBU (0x3CD) +#define SPR_DMISS (0x3D0) +#define SPR_DCMP (0x3D1) +#define SPR_HASH1 (0x3D2) +#define SPR_HASH2 (0x3D3) +#define SPR_BOOKE_ICDBDR (0x3D3) +#define SPR_TLBMISS (0x3D4) +#define SPR_IMISS (0x3D4) +#define SPR_40x_ESR (0x3D4) +#define SPR_PTEHI (0x3D5) +#define SPR_ICMP (0x3D5) +#define SPR_40x_DEAR (0x3D5) +#define SPR_PTELO (0x3D6) +#define SPR_RPA (0x3D6) +#define SPR_40x_EVPR (0x3D6) +#define SPR_L3PM (0x3D7) +#define SPR_403_CDBCR (0x3D7) +#define SPR_L3ITCR0 (0x3D8) +#define SPR_TCR (0x3D8) +#define SPR_40x_TSR (0x3D8) +#define SPR_IBR (0x3DA) +#define SPR_40x_TCR (0x3DA) +#define SPR_ESASRR (0x3DB) +#define SPR_40x_PIT (0x3DB) +#define SPR_403_TBL (0x3DC) +#define SPR_403_TBU (0x3DD) +#define SPR_SEBR (0x3DE) +#define SPR_40x_SRR2 (0x3DE) +#define SPR_SER (0x3DF) +#define SPR_40x_SRR3 (0x3DF) +#define SPR_L3OHCR (0x3E8) +#define SPR_L3ITCR1 (0x3E9) +#define SPR_L3ITCR2 (0x3EA) +#define SPR_L3ITCR3 (0x3EB) +#define SPR_HID0 (0x3F0) +#define SPR_40x_DBSR (0x3F0) +#define SPR_HID1 (0x3F1) +#define SPR_IABR (0x3F2) +#define SPR_40x_DBCR0 (0x3F2) +#define SPR_601_HID2 (0x3F2) +#define SPR_Exxx_L1CSR0 (0x3F2) +#define SPR_ICTRL (0x3F3) +#define SPR_HID2 (0x3F3) +#define SPR_750CL_HID4 (0x3F3) +#define SPR_Exxx_L1CSR1 (0x3F3) +#define SPR_440_DBDR (0x3F3) +#define SPR_LDSTDB (0x3F4) +#define SPR_750_TDCL (0x3F4) +#define SPR_40x_IAC1 (0x3F4) +#define SPR_MMUCSR0 (0x3F4) +#define SPR_970_HID4 (0x3F4) +#define SPR_DABR (0x3F5) +#define DABR_MASK (~(target_ulong)0x7) +#define SPR_Exxx_BUCSR (0x3F5) +#define SPR_40x_IAC2 (0x3F5) +#define SPR_601_HID5 (0x3F5) +#define SPR_40x_DAC1 (0x3F6) +#define SPR_MSSCR0 (0x3F6) +#define SPR_970_HID5 (0x3F6) +#define SPR_MSSSR0 (0x3F7) +#define SPR_MSSCR1 (0x3F7) +#define SPR_DABRX (0x3F7) +#define SPR_40x_DAC2 (0x3F7) +#define SPR_MMUCFG (0x3F7) +#define SPR_LDSTCR (0x3F8) +#define SPR_L2PMCR (0x3F8) +#define SPR_750FX_HID2 (0x3F8) +#define SPR_Exxx_L1FINV0 (0x3F8) +#define SPR_L2CR (0x3F9) +#define SPR_L3CR (0x3FA) +#define SPR_750_TDCH (0x3FA) +#define SPR_IABR2 (0x3FA) +#define SPR_40x_DCCR (0x3FA) +#define SPR_ICTC (0x3FB) +#define SPR_40x_ICCR (0x3FB) +#define SPR_THRM1 (0x3FC) +#define SPR_403_PBL1 (0x3FC) +#define SPR_SP (0x3FD) +#define SPR_THRM2 (0x3FD) +#define SPR_403_PBU1 (0x3FD) +#define SPR_604_HID13 (0x3FD) +#define SPR_LT (0x3FE) +#define SPR_THRM3 (0x3FE) +#define SPR_RCPU_FPECR (0x3FE) +#define SPR_403_PBL2 (0x3FE) +#define SPR_PIR (0x3FF) +#define SPR_403_PBU2 (0x3FF) +#define SPR_601_HID15 (0x3FF) +#define SPR_604_HID15 (0x3FF) +#define SPR_E500_SVR (0x3FF) + +/* Disable MAS Interrupt Updates for Hypervisor */ +#define EPCR_DMIUH (1 << 22) +/* Disable Guest TLB Management Instructions */ +#define EPCR_DGTMI (1 << 23) +/* Guest Interrupt Computation Mode */ +#define EPCR_GICM (1 << 24) +/* Interrupt Computation Mode */ +#define EPCR_ICM (1 << 25) +/* Disable Embedded Hypervisor Debug */ +#define EPCR_DUVD (1 << 26) +/* Instruction Storage Interrupt Directed to Guest State */ +#define EPCR_ISIGS (1 << 27) +/* Data Storage Interrupt Directed to Guest State */ +#define EPCR_DSIGS (1 << 28) +/* Instruction TLB Error Interrupt Directed to Guest State */ +#define EPCR_ITLBGS (1 << 29) +/* Data TLB Error Interrupt Directed to Guest State */ +#define EPCR_DTLBGS (1 << 30) +/* External Input Interrupt Directed to Guest State */ +#define EPCR_EXTGS (1 << 31) + +#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ +#define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */ +#define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */ +#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ +#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ + +#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */ +#define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */ +#define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */ +#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */ +#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */ + +/* HID0 bits */ +#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */ +#define HID0_DOZE (1 << 23) /* pre-2.06 */ +#define HID0_NAP (1 << 22) /* pre-2.06 */ +#define HID0_HILE PPC_BIT(19) /* POWER8 */ +#define HID0_POWER9_HILE PPC_BIT(4) + +/*****************************************************************************/ +/* PowerPC Instructions types definitions */ +enum { + PPC_NONE = 0x0000000000000000ULL, + /* PowerPC base instructions set */ + PPC_INSNS_BASE = 0x0000000000000001ULL, + /* integer operations instructions */ +#define PPC_INTEGER PPC_INSNS_BASE + /* flow control instructions */ +#define PPC_FLOW PPC_INSNS_BASE + /* virtual memory instructions */ +#define PPC_MEM PPC_INSNS_BASE + /* ld/st with reservation instructions */ +#define PPC_RES PPC_INSNS_BASE + /* spr/msr access instructions */ +#define PPC_MISC PPC_INSNS_BASE + /* Deprecated instruction sets */ + /* Original POWER instruction set */ + PPC_POWER = 0x0000000000000002ULL, + /* POWER2 instruction set extension */ + PPC_POWER2 = 0x0000000000000004ULL, + /* Power RTC support */ + PPC_POWER_RTC = 0x0000000000000008ULL, + /* Power-to-PowerPC bridge (601) */ + PPC_POWER_BR = 0x0000000000000010ULL, + /* 64 bits PowerPC instruction set */ + PPC_64B = 0x0000000000000020ULL, + /* New 64 bits extensions (PowerPC 2.0x) */ + PPC_64BX = 0x0000000000000040ULL, + /* 64 bits hypervisor extensions */ + PPC_64H = 0x0000000000000080ULL, + /* New wait instruction (PowerPC 2.0x) */ + PPC_WAIT = 0x0000000000000100ULL, + /* Time base mftb instruction */ + PPC_MFTB = 0x0000000000000200ULL, + + /* Fixed-point unit extensions */ + /* PowerPC 602 specific */ + PPC_602_SPEC = 0x0000000000000400ULL, + /* isel instruction */ + PPC_ISEL = 0x0000000000000800ULL, + /* popcntb instruction */ + PPC_POPCNTB = 0x0000000000001000ULL, + /* string load / store */ + PPC_STRING = 0x0000000000002000ULL, + /* real mode cache inhibited load / store */ + PPC_CILDST = 0x0000000000004000ULL, + + /* Floating-point unit extensions */ + /* Optional floating point instructions */ + PPC_FLOAT = 0x0000000000010000ULL, + /* New floating-point extensions (PowerPC 2.0x) */ + PPC_FLOAT_EXT = 0x0000000000020000ULL, + PPC_FLOAT_FSQRT = 0x0000000000040000ULL, + PPC_FLOAT_FRES = 0x0000000000080000ULL, + PPC_FLOAT_FRSQRTE = 0x0000000000100000ULL, + PPC_FLOAT_FRSQRTES = 0x0000000000200000ULL, + PPC_FLOAT_FSEL = 0x0000000000400000ULL, + PPC_FLOAT_STFIWX = 0x0000000000800000ULL, + + /* Vector/SIMD extensions */ + /* Altivec support */ + PPC_ALTIVEC = 0x0000000001000000ULL, + /* PowerPC 2.03 SPE extension */ + PPC_SPE = 0x0000000002000000ULL, + /* PowerPC 2.03 SPE single-precision floating-point extension */ + PPC_SPE_SINGLE = 0x0000000004000000ULL, + /* PowerPC 2.03 SPE double-precision floating-point extension */ + PPC_SPE_DOUBLE = 0x0000000008000000ULL, + + /* Optional memory control instructions */ + PPC_MEM_TLBIA = 0x0000000010000000ULL, + PPC_MEM_TLBIE = 0x0000000020000000ULL, + PPC_MEM_TLBSYNC = 0x0000000040000000ULL, + /* sync instruction */ + PPC_MEM_SYNC = 0x0000000080000000ULL, +#ifndef _MSC_VER + /* eieio instruction */ + PPC_MEM_EIEIO = 0x0000000100000000ULL, + + /* Cache control instructions */ + PPC_CACHE = 0x0000000200000000ULL, + /* icbi instruction */ + PPC_CACHE_ICBI = 0x0000000400000000ULL, + /* dcbz instruction */ + PPC_CACHE_DCBZ = 0x0000000800000000ULL, + /* dcba instruction */ + PPC_CACHE_DCBA = 0x0000002000000000ULL, + /* Freescale cache locking instructions */ + PPC_CACHE_LOCK = 0x0000004000000000ULL, + + /* MMU related extensions */ + /* external control instructions */ + PPC_EXTERN = 0x0000010000000000ULL, + /* segment register access instructions */ + PPC_SEGMENT = 0x0000020000000000ULL, + /* PowerPC 6xx TLB management instructions */ + PPC_6xx_TLB = 0x0000040000000000ULL, + /* PowerPC 74xx TLB management instructions */ + PPC_74xx_TLB = 0x0000080000000000ULL, + /* PowerPC 40x TLB management instructions */ + PPC_40x_TLB = 0x0000100000000000ULL, + /* segment register access instructions for PowerPC 64 "bridge" */ + PPC_SEGMENT_64B = 0x0000200000000000ULL, + /* SLB management */ + PPC_SLBI = 0x0000400000000000ULL, + + /* Embedded PowerPC dedicated instructions */ + PPC_WRTEE = 0x0001000000000000ULL, + /* PowerPC 40x exception model */ + PPC_40x_EXCP = 0x0002000000000000ULL, + /* PowerPC 405 Mac instructions */ + PPC_405_MAC = 0x0004000000000000ULL, + /* PowerPC 440 specific instructions */ + PPC_440_SPEC = 0x0008000000000000ULL, + /* BookE (embedded) PowerPC specification */ + PPC_BOOKE = 0x0010000000000000ULL, + /* mfapidi instruction */ + PPC_MFAPIDI = 0x0020000000000000ULL, + /* tlbiva instruction */ + PPC_TLBIVA = 0x0040000000000000ULL, + /* tlbivax instruction */ + PPC_TLBIVAX = 0x0080000000000000ULL, + /* PowerPC 4xx dedicated instructions */ + PPC_4xx_COMMON = 0x0100000000000000ULL, + /* PowerPC 40x ibct instructions */ + PPC_40x_ICBT = 0x0200000000000000ULL, + /* rfmci is not implemented in all BookE PowerPC */ + PPC_RFMCI = 0x0400000000000000ULL, + /* rfdi instruction */ + PPC_RFDI = 0x0800000000000000ULL, + /* DCR accesses */ + PPC_DCR = 0x1000000000000000ULL, + /* DCR extended accesse */ + PPC_DCRX = 0x2000000000000000ULL, + /* user-mode DCR access, implemented in PowerPC 460 */ + PPC_DCRUX = 0x4000000000000000ULL, + /* popcntw and popcntd instructions */ + PPC_POPCNTWD = 0x8000000000000000ULL, +#else +#define PPC_MEM_EIEIO 0x0000000100000000ULL +#define PPC_CACHE 0x0000000200000000ULL +#define PPC_CACHE_ICBI 0x0000000400000000ULL +#define PPC_CACHE_DCBZ 0x0000000800000000ULL +#define PPC_CACHE_DCBA 0x0000002000000000ULL +#define PPC_CACHE_LOCK 0x0000004000000000ULL +#define PPC_EXTERN 0x0000010000000000ULL +#define PPC_SEGMENT 0x0000020000000000ULL +#define PPC_6xx_TLB 0x0000040000000000ULL +#define PPC_74xx_TLB 0x0000080000000000ULL +#define PPC_40x_TLB 0x0000100000000000ULL +#define PPC_SEGMENT_64B 0x0000200000000000ULL +#define PPC_SLBI 0x0000400000000000ULL +#define PPC_WRTEE 0x0001000000000000ULL +#define PPC_40x_EXCP 0x0002000000000000ULL +#define PPC_405_MAC 0x0004000000000000ULL +#define PPC_440_SPEC 0x0008000000000000ULL +#define PPC_BOOKE 0x0010000000000000ULL +#define PPC_MFAPIDI 0x0020000000000000ULL +#define PPC_TLBIVA 0x0040000000000000ULL +#define PPC_TLBIVAX 0x0080000000000000ULL +#define PPC_4xx_COMMON 0x0100000000000000ULL +#define PPC_40x_ICBT 0x0200000000000000ULL +#define PPC_RFMCI 0x0400000000000000ULL +#define PPC_RFDI 0x0800000000000000ULL +#define PPC_DCR 0x1000000000000000ULL +#define PPC_DCRX 0x2000000000000000ULL +#define PPC_DCRUX 0x4000000000000000ULL +#define PPC_POPCNTWD 0x8000000000000000ULL +#endif + +#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_POWER | PPC_POWER2 \ + | PPC_POWER_RTC | PPC_POWER_BR | PPC_64B \ + | PPC_64BX | PPC_64H | PPC_WAIT | PPC_MFTB \ + | PPC_602_SPEC | PPC_ISEL | PPC_POPCNTB \ + | PPC_STRING | PPC_FLOAT | PPC_FLOAT_EXT \ + | PPC_FLOAT_FSQRT | PPC_FLOAT_FRES \ + | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES \ + | PPC_FLOAT_FSEL | PPC_FLOAT_STFIWX \ + | PPC_ALTIVEC | PPC_SPE | PPC_SPE_SINGLE \ + | PPC_SPE_DOUBLE | PPC_MEM_TLBIA \ + | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC \ + | PPC_MEM_SYNC | PPC_MEM_EIEIO \ + | PPC_CACHE | PPC_CACHE_ICBI \ + | PPC_CACHE_DCBZ \ + | PPC_CACHE_DCBA | PPC_CACHE_LOCK \ + | PPC_EXTERN | PPC_SEGMENT | PPC_6xx_TLB \ + | PPC_74xx_TLB | PPC_40x_TLB | PPC_SEGMENT_64B \ + | PPC_SLBI | PPC_WRTEE | PPC_40x_EXCP \ + | PPC_405_MAC | PPC_440_SPEC | PPC_BOOKE \ + | PPC_MFAPIDI | PPC_TLBIVA | PPC_TLBIVAX \ + | PPC_4xx_COMMON | PPC_40x_ICBT | PPC_RFMCI \ + | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_DCRUX \ + | PPC_POPCNTWD | PPC_CILDST) + + /* extended type values */ + + /* BookE 2.06 PowerPC specification */ + PPC2_BOOKE206 = 0x0000000000000001ULL, + /* VSX (extensions to Altivec / VMX) */ + PPC2_VSX = 0x0000000000000002ULL, + /* Decimal Floating Point (DFP) */ + PPC2_DFP = 0x0000000000000004ULL, + /* Embedded.Processor Control */ + PPC2_PRCNTL = 0x0000000000000008ULL, + /* Byte-reversed, indexed, double-word load and store */ + PPC2_DBRX = 0x0000000000000010ULL, + /* Book I 2.05 PowerPC specification */ + PPC2_ISA205 = 0x0000000000000020ULL, + /* VSX additions in ISA 2.07 */ + PPC2_VSX207 = 0x0000000000000040ULL, + /* ISA 2.06B bpermd */ + PPC2_PERM_ISA206 = 0x0000000000000080ULL, + /* ISA 2.06B divide extended variants */ + PPC2_DIVE_ISA206 = 0x0000000000000100ULL, + /* ISA 2.06B larx/stcx. instructions */ + PPC2_ATOMIC_ISA206 = 0x0000000000000200ULL, + /* ISA 2.06B floating point integer conversion */ + PPC2_FP_CVT_ISA206 = 0x0000000000000400ULL, + /* ISA 2.06B floating point test instructions */ + PPC2_FP_TST_ISA206 = 0x0000000000000800ULL, + /* ISA 2.07 bctar instruction */ + PPC2_BCTAR_ISA207 = 0x0000000000001000ULL, + /* ISA 2.07 load/store quadword */ + PPC2_LSQ_ISA207 = 0x0000000000002000ULL, + /* ISA 2.07 Altivec */ + PPC2_ALTIVEC_207 = 0x0000000000004000ULL, + /* PowerISA 2.07 Book3s specification */ + PPC2_ISA207S = 0x0000000000008000ULL, + /* Double precision floating point conversion for signed integer 64 */ + PPC2_FP_CVT_S64 = 0x0000000000010000ULL, + /* Transactional Memory (ISA 2.07, Book II) */ + PPC2_TM = 0x0000000000020000ULL, + /* Server PM instructgions (ISA 2.06, Book III) */ + PPC2_PM_ISA206 = 0x0000000000040000ULL, + /* POWER ISA 3.0 */ + PPC2_ISA300 = 0x0000000000080000ULL, + +#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \ + PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \ + PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \ + PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | \ + PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \ + PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \ + PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \ + PPC2_ISA300) +}; + +/*****************************************************************************/ +/* + * Memory access type : + * may be needed for precise access rights control and precise exceptions. + */ +enum { + /* 1 bit to define user level / supervisor access */ + ACCESS_USER = 0x00, + ACCESS_SUPER = 0x01, + /* Type of instruction that generated the access */ + ACCESS_CODE = 0x10, /* Code fetch access */ + ACCESS_INT = 0x20, /* Integer load/store access */ + ACCESS_FLOAT = 0x30, /* floating point load/store access */ + ACCESS_RES = 0x40, /* load/store with reservation */ + ACCESS_EXT = 0x50, /* external access */ + ACCESS_CACHE = 0x60, /* Cache manipulation */ +}; + +/* + * Hardware interrupt sources: + * all those exception can be raised simulteaneously + */ +/* Input pins definitions */ +enum { + /* 6xx bus input pins */ + PPC6xx_INPUT_HRESET = 0, + PPC6xx_INPUT_SRESET = 1, + PPC6xx_INPUT_CKSTP_IN = 2, + PPC6xx_INPUT_MCP = 3, + PPC6xx_INPUT_SMI = 4, + PPC6xx_INPUT_INT = 5, + PPC6xx_INPUT_TBEN = 6, + PPC6xx_INPUT_WAKEUP = 7, + PPC6xx_INPUT_NB, +}; + +enum { + /* Embedded PowerPC input pins */ + PPCBookE_INPUT_HRESET = 0, + PPCBookE_INPUT_SRESET = 1, + PPCBookE_INPUT_CKSTP_IN = 2, + PPCBookE_INPUT_MCP = 3, + PPCBookE_INPUT_SMI = 4, + PPCBookE_INPUT_INT = 5, + PPCBookE_INPUT_CINT = 6, + PPCBookE_INPUT_NB, +}; + +enum { + /* PowerPC E500 input pins */ + PPCE500_INPUT_RESET_CORE = 0, + PPCE500_INPUT_MCK = 1, + PPCE500_INPUT_CINT = 3, + PPCE500_INPUT_INT = 4, + PPCE500_INPUT_DEBUG = 6, + PPCE500_INPUT_NB, +}; + +enum { + /* PowerPC 40x input pins */ + PPC40x_INPUT_RESET_CORE = 0, + PPC40x_INPUT_RESET_CHIP = 1, + PPC40x_INPUT_RESET_SYS = 2, + PPC40x_INPUT_CINT = 3, + PPC40x_INPUT_INT = 4, + PPC40x_INPUT_HALT = 5, + PPC40x_INPUT_DEBUG = 6, + PPC40x_INPUT_NB, +}; + +enum { + /* RCPU input pins */ + PPCRCPU_INPUT_PORESET = 0, + PPCRCPU_INPUT_HRESET = 1, + PPCRCPU_INPUT_SRESET = 2, + PPCRCPU_INPUT_IRQ0 = 3, + PPCRCPU_INPUT_IRQ1 = 4, + PPCRCPU_INPUT_IRQ2 = 5, + PPCRCPU_INPUT_IRQ3 = 6, + PPCRCPU_INPUT_IRQ4 = 7, + PPCRCPU_INPUT_IRQ5 = 8, + PPCRCPU_INPUT_IRQ6 = 9, + PPCRCPU_INPUT_IRQ7 = 10, + PPCRCPU_INPUT_NB, +}; + +#if defined(TARGET_PPC64) +enum { + /* PowerPC 970 input pins */ + PPC970_INPUT_HRESET = 0, + PPC970_INPUT_SRESET = 1, + PPC970_INPUT_CKSTP = 2, + PPC970_INPUT_TBEN = 3, + PPC970_INPUT_MCP = 4, + PPC970_INPUT_INT = 5, + PPC970_INPUT_THINT = 6, + PPC970_INPUT_NB, +}; + +enum { + /* POWER7 input pins */ + POWER7_INPUT_INT = 0, + /* + * POWER7 probably has other inputs, but we don't care about them + * for any existing machine. We can wire these up when we need + * them + */ + POWER7_INPUT_NB, +}; + +enum { + /* POWER9 input pins */ + POWER9_INPUT_INT = 0, + POWER9_INPUT_HINT = 1, + POWER9_INPUT_NB, +}; +#endif + +/* Hardware exceptions definitions */ +enum { + /* External hardware exception sources */ + PPC_INTERRUPT_RESET = 0, /* Reset exception */ + PPC_INTERRUPT_WAKEUP, /* Wakeup exception */ + PPC_INTERRUPT_MCK, /* Machine check exception */ + PPC_INTERRUPT_EXT, /* External interrupt */ + PPC_INTERRUPT_SMI, /* System management interrupt */ + PPC_INTERRUPT_CEXT, /* Critical external interrupt */ + PPC_INTERRUPT_DEBUG, /* External debug exception */ + PPC_INTERRUPT_THERM, /* Thermal exception */ + /* Internal hardware exception sources */ + PPC_INTERRUPT_DECR, /* Decrementer exception */ + PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */ + PPC_INTERRUPT_PIT, /* Programmable inteval timer interrupt */ + PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */ + PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */ + PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */ + PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */ + PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */ + PPC_INTERRUPT_HMI, /* Hypervisor Maintainance interrupt */ + PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */ + PPC_INTERRUPT_HVIRT, /* Hypervisor virtualization interrupt */ +}; + +/* Processor Compatibility mask (PCR) */ +enum { + PCR_COMPAT_2_05 = PPC_BIT(62), + PCR_COMPAT_2_06 = PPC_BIT(61), + PCR_COMPAT_2_07 = PPC_BIT(60), + PCR_COMPAT_3_00 = PPC_BIT(59), + PCR_COMPAT_3_10 = PPC_BIT(58), +#ifndef _MSC_VER + PCR_VEC_DIS = PPC_BIT(0), /* Vec. disable (bit NA since POWER8) */ + PCR_VSX_DIS = PPC_BIT(1), /* VSX disable (bit NA since POWER8) */ + PCR_TM_DIS = PPC_BIT(2), /* Trans. memory disable (POWER8) */ +#else +#define PCR_VEC_DIS PPC_BIT(0) +#define PCR_VSX_DIS PPC_BIT(1) +#define PCR_TM_DIS PPC_BIT(2) +#endif +}; + +/* HMER/HMEER */ +#ifndef _MSC_VER +enum { + HMER_MALFUNCTION_ALERT = PPC_BIT(0), + HMER_PROC_RECV_DONE = PPC_BIT(2), + HMER_PROC_RECV_ERROR_MASKED = PPC_BIT(3), + HMER_TFAC_ERROR = PPC_BIT(4), + HMER_TFMR_PARITY_ERROR = PPC_BIT(5), + HMER_XSCOM_FAIL = PPC_BIT(8), + HMER_XSCOM_DONE = PPC_BIT(9), + HMER_PROC_RECV_AGAIN = PPC_BIT(11), + HMER_WARN_RISE = PPC_BIT(14), + HMER_WARN_FALL = PPC_BIT(15), + HMER_SCOM_FIR_HMI = PPC_BIT(16), + HMER_TRIG_FIR_HMI = PPC_BIT(17), + HMER_HYP_RESOURCE_ERR = PPC_BIT(20), + HMER_XSCOM_STATUS_MASK = PPC_BITMASK(21, 23), +}; +#else +#define HMER_MALFUNCTION_ALERT PPC_BIT(0) +#define HMER_PROC_RECV_DONE PPC_BIT(2) +#define HMER_PROC_RECV_ERROR_MASKED PPC_BIT(3) +#define HMER_TFAC_ERROR PPC_BIT(4) +#define HMER_TFMR_PARITY_ERROR PPC_BIT(5) +#define HMER_XSCOM_FAIL PPC_BIT(8) +#define HMER_XSCOM_DONE PPC_BIT(9) +#define HMER_PROC_RECV_AGAIN PPC_BIT(11) +#define HMER_WARN_RISE PPC_BIT(14) +#define HMER_WARN_FALL PPC_BIT(15) +#define HMER_SCOM_FIR_HMI PPC_BIT(16) +#define HMER_TRIG_FIR_HMI PPC_BIT(17) +#define HMER_HYP_RESOURCE_ERR PPC_BIT(20) +#define HMER_XSCOM_STATUS_MASK PPC_BITMASK(21, 23) +#endif + +/* Alternate Interrupt Location (AIL) */ +enum { + AIL_NONE = 0, + AIL_RESERVED = 1, + AIL_0001_8000 = 2, + AIL_C000_0000_0000_4000 = 3, +}; + +/*****************************************************************************/ + +#define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300)) +target_ulong cpu_read_xer(CPUPPCState *env); +void cpu_write_xer(CPUPPCState *env, target_ulong xer); + +/* + * All 64-bit server processors compliant with arch 2.x, ie. 970 and newer, + * have PPC_SEGMENT_64B. + */ +#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B)) + +static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->nip; + *cs_base = 0; + *flags = env->hflags; +} + +void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception); +void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception, + uintptr_t raddr); +void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception, + uint32_t error_code); +void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception, + uint32_t error_code, uintptr_t raddr); + +static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm) +{ + uintptr_t tlbml = (uintptr_t)tlbm; + uintptr_t tlbl = (uintptr_t)env->tlb.tlbm; + + return (tlbml - tlbl) / sizeof(env->tlb.tlbm[0]); +} + +static inline int booke206_tlb_size(CPUPPCState *env, int tlbn) +{ + uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + int r = tlbncfg & TLBnCFG_N_ENTRY; + return r; +} + +static inline int booke206_tlb_ways(CPUPPCState *env, int tlbn) +{ + uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + int r = tlbncfg >> TLBnCFG_ASSOC_SHIFT; + return r; +} + +static inline int booke206_tlbm_to_tlbn(CPUPPCState *env, ppcmas_tlb_t *tlbm) +{ + int id = booke206_tlbm_id(env, tlbm); + int end = 0; + int i; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + end += booke206_tlb_size(env, i); + if (id < end) { + return i; + } + } + + cpu_abort(env_cpu(env), "Unknown TLBe: %d\n", id); + return 0; +} + +static inline int booke206_tlbm_to_way(CPUPPCState *env, ppcmas_tlb_t *tlb) +{ + int tlbn = booke206_tlbm_to_tlbn(env, tlb); + int tlbid = booke206_tlbm_id(env, tlb); + return tlbid & (booke206_tlb_ways(env, tlbn) - 1); +} + +static inline ppcmas_tlb_t *booke206_get_tlbm(CPUPPCState *env, const int tlbn, + target_ulong ea, int way) +{ + int r; + uint32_t ways = booke206_tlb_ways(env, tlbn); + int ways_bits = ctz32(ways); + int tlb_bits = ctz32(booke206_tlb_size(env, tlbn)); + int i; + + way &= ways - 1; + ea >>= MAS2_EPN_SHIFT; + ea &= (1 << (tlb_bits - ways_bits)) - 1; + r = (ea << ways_bits) | way; + + if (r >= booke206_tlb_size(env, tlbn)) { + return NULL; + } + + /* bump up to tlbn index */ + for (i = 0; i < tlbn; i++) { + r += booke206_tlb_size(env, i); + } + + return &env->tlb.tlbm[r]; +} + +/* returns bitmap of supported page sizes for a given TLB */ +static inline uint32_t booke206_tlbnps(CPUPPCState *env, const int tlbn) +{ + uint32_t ret = 0; + + if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { + /* MAV2 */ + ret = env->spr[SPR_BOOKE_TLB0PS + tlbn]; + } else { + uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + uint32_t min = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; + uint32_t max = (tlbncfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; + int i; + for (i = min; i <= max; i++) { + ret |= (1 << (i << 1)); + } + } + + return ret; +} + +static inline void booke206_fixed_size_tlbn(CPUPPCState *env, const int tlbn, + ppcmas_tlb_t *tlb) +{ + uint8_t i; + int32_t tsize = -1; + + for (i = 0; i < 32; i++) { + if ((env->spr[SPR_BOOKE_TLB0PS + tlbn]) & (1ULL << i)) { + if (tsize == -1) { + tsize = i; + } else { + return; + } + } + } + + /* TLBnPS unimplemented? Odd.. */ + assert(tsize != -1); + tlb->mas1 &= ~MAS1_TSIZE_MASK; + tlb->mas1 |= ((uint32_t)tsize) << MAS1_TSIZE_SHIFT; +} + +static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr) +{ + if (env->mmu_model == POWERPC_MMU_BOOKE206) { + return msr & (1ULL << MSR_CM); + } + + return msr & (1ULL << MSR_SF); +} + +/** + * Check whether register rx is in the range between start and + * start + nregs (as needed by the LSWX and LSWI instructions) + */ +static inline bool lsw_reg_in_range(int start, int nregs, int rx) +{ + return (start + nregs <= 32 && rx >= start && rx < start + nregs) || + (start + nregs > 32 && (rx >= start || rx < start + nregs - 32)); +} + +/* Accessors for FP, VMX and VSX registers */ +#if defined(HOST_WORDS_BIGENDIAN) +#define VsrB(i) u8[i] +#define VsrSB(i) s8[i] +#define VsrH(i) u16[i] +#define VsrSH(i) s16[i] +#define VsrW(i) u32[i] +#define VsrSW(i) s32[i] +#define VsrD(i) u64[i] +#define VsrSD(i) s64[i] +#else +#define VsrB(i) u8[15 - (i)] +#define VsrSB(i) s8[15 - (i)] +#define VsrH(i) u16[7 - (i)] +#define VsrSH(i) s16[7 - (i)] +#define VsrW(i) u32[3 - (i)] +#define VsrSW(i) s32[3 - (i)] +#define VsrD(i) u64[1 - (i)] +#define VsrSD(i) s64[1 - (i)] +#endif + +static inline int vsr64_offset(int i, bool high) +{ + return offsetof(CPUPPCState, vsr[i].VsrD(high ? 0 : 1)); +} + +static inline int vsr_full_offset(int i) +{ + return offsetof(CPUPPCState, vsr[i].u64[0]); +} + +static inline int fpr_offset(int i) +{ + return vsr64_offset(i, true); +} + +static inline uint64_t *cpu_fpr_ptr(CPUPPCState *env, int i) +{ + return (uint64_t *)((uintptr_t)env + fpr_offset(i)); +} + +static inline uint64_t *cpu_vsrl_ptr(CPUPPCState *env, int i) +{ + return (uint64_t *)((uintptr_t)env + vsr64_offset(i, false)); +} + +static inline long avr64_offset(int i, bool high) +{ + return vsr64_offset(i + 32, high); +} + +static inline int avr_full_offset(int i) +{ + return vsr_full_offset(i + 32); +} + +static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i) +{ + return (ppc_avr_t *)((uintptr_t)env + avr_full_offset(i)); +} + +void dump_mmu(CPUPPCState *env); + +void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len); +#endif /* PPC_CPU_H */ diff --git a/qemu/target/ppc/dfp_helper.c b/qemu/target/ppc/dfp_helper.c new file mode 100644 index 00000000..a025ed36 --- /dev/null +++ b/qemu/target/ppc/dfp_helper.c @@ -0,0 +1,1331 @@ +/* + * PowerPC Decimal Floating Point (DPF) emulation helpers for QEMU. + * + * Copyright (c) 2014 IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" + +#define DECNUMDIGITS 34 +#include "libdecnumber/decContext.h" +#include "libdecnumber/decNumber.h" +#include "libdecnumber/dpd/decimal32.h" +#include "libdecnumber/dpd/decimal64.h" +#include "libdecnumber/dpd/decimal128.h" + + +static void get_dfp64(ppc_vsr_t *dst, ppc_fprp_t *dfp) +{ + dst->VsrD(1) = dfp->VsrD(0); +} + +static void get_dfp128(ppc_vsr_t *dst, ppc_fprp_t *dfp) +{ + dst->VsrD(0) = dfp[0].VsrD(0); + dst->VsrD(1) = dfp[1].VsrD(0); +} + +static void set_dfp64(ppc_fprp_t *dfp, ppc_vsr_t *src) +{ + dfp->VsrD(0) = src->VsrD(1); +} + +static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src) +{ + dfp[0].VsrD(0) = src->VsrD(0); + dfp[1].VsrD(0) = src->VsrD(1); +} + +struct PPC_DFP { + CPUPPCState *env; + ppc_vsr_t vt, va, vb; + decNumber t, a, b; + decContext context; + uint8_t crbf; +}; + +static void dfp_prepare_rounding_mode(decContext *context, uint64_t fpscr) +{ + enum rounding rnd; + + switch ((fpscr & FP_DRN) >> FPSCR_DRN0) { + case 0: + rnd = DEC_ROUND_HALF_EVEN; + break; + case 1: + rnd = DEC_ROUND_DOWN; + break; + case 2: + rnd = DEC_ROUND_CEILING; + break; + case 3: + rnd = DEC_ROUND_FLOOR; + break; + case 4: + rnd = DEC_ROUND_HALF_UP; + break; + case 5: + rnd = DEC_ROUND_HALF_DOWN; + break; + case 6: + rnd = DEC_ROUND_UP; + break; + case 7: + rnd = DEC_ROUND_05UP; + break; + default: + g_assert_not_reached(); + } + + decContextSetRounding(context, rnd); +} + +static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc, + struct PPC_DFP *dfp) +{ + enum rounding rnd; + if (r == 0) { + switch (rmc & 3) { + case 0: + rnd = DEC_ROUND_HALF_EVEN; + break; + case 1: + rnd = DEC_ROUND_DOWN; + break; + case 2: + rnd = DEC_ROUND_HALF_UP; + break; + case 3: /* use FPSCR rounding mode */ + return; + default: + assert(0); /* cannot get here */ + } + } else { /* r == 1 */ + switch (rmc & 3) { + case 0: + rnd = DEC_ROUND_CEILING; + break; + case 1: + rnd = DEC_ROUND_FLOOR; + break; + case 2: + rnd = DEC_ROUND_UP; + break; + case 3: + rnd = DEC_ROUND_HALF_DOWN; + break; + default: + assert(0); /* cannot get here */ + } + } + decContextSetRounding(&dfp->context, rnd); +} + +static void dfp_prepare_decimal64(struct PPC_DFP *dfp, ppc_fprp_t *a, + ppc_fprp_t *b, CPUPPCState *env) +{ + decContextDefault(&dfp->context, DEC_INIT_DECIMAL64); + dfp_prepare_rounding_mode(&dfp->context, env->fpscr); + dfp->env = env; + + if (a) { + get_dfp64(&dfp->va, a); + decimal64ToNumber((decimal64 *)&dfp->va.VsrD(1), &dfp->a); + } else { + dfp->va.VsrD(1) = 0; + decNumberZero(&dfp->a); + } + + if (b) { + get_dfp64(&dfp->vb, b); + decimal64ToNumber((decimal64 *)&dfp->vb.VsrD(1), &dfp->b); + } else { + dfp->vb.VsrD(1) = 0; + decNumberZero(&dfp->b); + } +} + +static void dfp_prepare_decimal128(struct PPC_DFP *dfp, ppc_fprp_t *a, + ppc_fprp_t *b, CPUPPCState *env) +{ + decContextDefault(&dfp->context, DEC_INIT_DECIMAL128); + dfp_prepare_rounding_mode(&dfp->context, env->fpscr); + dfp->env = env; + + if (a) { + get_dfp128(&dfp->va, a); + decimal128ToNumber((decimal128 *)&dfp->va, &dfp->a); + } else { + dfp->va.VsrD(0) = dfp->va.VsrD(1) = 0; + decNumberZero(&dfp->a); + } + + if (b) { + get_dfp128(&dfp->vb, b); + decimal128ToNumber((decimal128 *)&dfp->vb, &dfp->b); + } else { + dfp->vb.VsrD(0) = dfp->vb.VsrD(1) = 0; + decNumberZero(&dfp->b); + } +} + +static void dfp_finalize_decimal64(struct PPC_DFP *dfp) +{ + decimal64FromNumber((decimal64 *)&dfp->vt.VsrD(1), &dfp->t, &dfp->context); +} + +static void dfp_finalize_decimal128(struct PPC_DFP *dfp) +{ + decimal128FromNumber((decimal128 *)&dfp->vt, &dfp->t, &dfp->context); +} + +static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag, + uint64_t enabled) +{ + dfp->env->fpscr |= (flag | FP_FX); + if (dfp->env->fpscr & enabled) { + dfp->env->fpscr |= FP_FEX; + } +} + +static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp, + decContext *context) +{ + uint64_t fprf = 0; + + /* construct FPRF */ + switch (decNumberClass(&dfp->t, context)) { + case DEC_CLASS_SNAN: + fprf = 0x01; + break; + case DEC_CLASS_QNAN: + fprf = 0x11; + break; + case DEC_CLASS_NEG_INF: + fprf = 0x09; + break; + case DEC_CLASS_NEG_NORMAL: + fprf = 0x08; + break; + case DEC_CLASS_NEG_SUBNORMAL: + fprf = 0x18; + break; + case DEC_CLASS_NEG_ZERO: + fprf = 0x12; + break; + case DEC_CLASS_POS_ZERO: + fprf = 0x02; + break; + case DEC_CLASS_POS_SUBNORMAL: + fprf = 0x14; + break; + case DEC_CLASS_POS_NORMAL: + fprf = 0x04; + break; + case DEC_CLASS_POS_INF: + fprf = 0x05; + break; + default: + assert(0); /* should never get here */ + } + dfp->env->fpscr &= ~FP_FPRF; + dfp->env->fpscr |= (fprf << FPSCR_FPRF); +} + +static void dfp_set_FPRF_from_FRT(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT_with_context(dfp, &dfp->context); +} + +static void dfp_set_FPRF_from_FRT_short(struct PPC_DFP *dfp) +{ + decContext shortContext; + decContextDefault(&shortContext, DEC_INIT_DECIMAL32); + dfp_set_FPRF_from_FRT_with_context(dfp, &shortContext); +} + +static void dfp_set_FPRF_from_FRT_long(struct PPC_DFP *dfp) +{ + decContext longContext; + decContextDefault(&longContext, DEC_INIT_DECIMAL64); + dfp_set_FPRF_from_FRT_with_context(dfp, &longContext); +} + +static void dfp_check_for_OX(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Overflow) { + dfp_set_FPSCR_flag(dfp, FP_OX, FP_OE); + } +} + +static void dfp_check_for_UX(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Underflow) { + dfp_set_FPSCR_flag(dfp, FP_UX, FP_UE); + } +} + +static void dfp_check_for_XX(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Inexact) { + dfp_set_FPSCR_flag(dfp, FP_XX | FP_FI, FP_XE); + } +} + +static void dfp_check_for_ZX(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Division_by_zero) { + dfp_set_FPSCR_flag(dfp, FP_ZX, FP_ZE); + } +} + +static void dfp_check_for_VXSNAN(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Invalid_operation) { + if (decNumberIsSNaN(&dfp->a) || decNumberIsSNaN(&dfp->b)) { + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE); + } + } +} + +static void dfp_check_for_VXSNAN_and_convert_to_QNaN(struct PPC_DFP *dfp) +{ + if (decNumberIsSNaN(&dfp->t)) { + dfp->t.bits &= ~DECSNAN; + dfp->t.bits |= DECNAN; + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE); + } +} + +static void dfp_check_for_VXISI(struct PPC_DFP *dfp, int testForSameSign) +{ + if (dfp->context.status & DEC_Invalid_operation) { + if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) { + int same = decNumberClass(&dfp->a, &dfp->context) == + decNumberClass(&dfp->b, &dfp->context); + if ((same && testForSameSign) || (!same && !testForSameSign)) { + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXISI, FP_VE); + } + } + } +} + +static void dfp_check_for_VXISI_add(struct PPC_DFP *dfp) +{ + dfp_check_for_VXISI(dfp, 0); +} + +static void dfp_check_for_VXISI_subtract(struct PPC_DFP *dfp) +{ + dfp_check_for_VXISI(dfp, 1); +} + +static void dfp_check_for_VXIMZ(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Invalid_operation) { + if ((decNumberIsInfinite(&dfp->a) && decNumberIsZero(&dfp->b)) || + (decNumberIsInfinite(&dfp->b) && decNumberIsZero(&dfp->a))) { + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIMZ, FP_VE); + } + } +} + +static void dfp_check_for_VXZDZ(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Division_undefined) { + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXZDZ, FP_VE); + } +} + +static void dfp_check_for_VXIDI(struct PPC_DFP *dfp) +{ + if (dfp->context.status & DEC_Invalid_operation) { + if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) { + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIDI, FP_VE); + } + } +} + +static void dfp_check_for_VXVC(struct PPC_DFP *dfp) +{ + if (decNumberIsNaN(&dfp->a) || decNumberIsNaN(&dfp->b)) { + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXVC, FP_VE); + } +} + +static void dfp_check_for_VXCVI(struct PPC_DFP *dfp) +{ + if ((dfp->context.status & DEC_Invalid_operation) && + (!decNumberIsSNaN(&dfp->a)) && + (!decNumberIsSNaN(&dfp->b))) { + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE); + } +} + +static void dfp_set_CRBF_from_T(struct PPC_DFP *dfp) +{ + if (decNumberIsNaN(&dfp->t)) { + dfp->crbf = 1; + } else if (decNumberIsZero(&dfp->t)) { + dfp->crbf = 2; + } else if (decNumberIsNegative(&dfp->t)) { + dfp->crbf = 8; + } else { + dfp->crbf = 4; + } +} + +static void dfp_set_FPCC_from_CRBF(struct PPC_DFP *dfp) +{ + dfp->env->fpscr &= ~FP_FPCC; + dfp->env->fpscr |= (dfp->crbf << FPSCR_FPCC); +} + +static inline void dfp_makeQNaN(decNumber *dn) +{ + dn->bits &= ~DECSPECIAL; + dn->bits |= DECNAN; +} + +static inline int dfp_get_digit(decNumber *dn, int n) +{ + assert(DECDPUN == 3); + int unit = n / DECDPUN; + int dig = n % DECDPUN; + switch (dig) { + case 0: + return dn->lsu[unit] % 10; + case 1: + return (dn->lsu[unit] / 10) % 10; + case 2: + return dn->lsu[unit] / 100; + } + g_assert_not_reached(); +} + +#define DFP_HELPER_TAB(op, dnop, postprocs, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ + ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + dfp_prepare_decimal##size(&dfp, a, b, env); \ + dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \ + dfp_finalize_decimal##size(&dfp); \ + postprocs(&dfp); \ + set_dfp##size(t, &dfp.vt); \ +} + +static void ADD_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_OX(dfp); + dfp_check_for_UX(dfp); + dfp_check_for_XX(dfp); + dfp_check_for_VXSNAN(dfp); + dfp_check_for_VXISI_add(dfp); +} + +DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64) +DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128) + +static void SUB_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_OX(dfp); + dfp_check_for_UX(dfp); + dfp_check_for_XX(dfp); + dfp_check_for_VXSNAN(dfp); + dfp_check_for_VXISI_subtract(dfp); +} + +DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64) +DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128) + +static void MUL_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_OX(dfp); + dfp_check_for_UX(dfp); + dfp_check_for_XX(dfp); + dfp_check_for_VXSNAN(dfp); + dfp_check_for_VXIMZ(dfp); +} + +DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64) +DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128) + +static void DIV_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_OX(dfp); + dfp_check_for_UX(dfp); + dfp_check_for_ZX(dfp); + dfp_check_for_XX(dfp); + dfp_check_for_VXSNAN(dfp); + dfp_check_for_VXZDZ(dfp); + dfp_check_for_VXIDI(dfp); +} + +DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64) +DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128) + +#define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \ +uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + dfp_prepare_decimal##size(&dfp, a, b, env); \ + dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \ + dfp_finalize_decimal##size(&dfp); \ + postprocs(&dfp); \ + return dfp.crbf; \ +} + +static void CMPU_PPs(struct PPC_DFP *dfp) +{ + dfp_set_CRBF_from_T(dfp); + dfp_set_FPCC_from_CRBF(dfp); + dfp_check_for_VXSNAN(dfp); +} + +DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64) +DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128) + +static void CMPO_PPs(struct PPC_DFP *dfp) +{ + dfp_set_CRBF_from_T(dfp); + dfp_set_FPCC_from_CRBF(dfp); + dfp_check_for_VXSNAN(dfp); + dfp_check_for_VXVC(dfp); +} + +DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64) +DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128) + +#define DFP_HELPER_TSTDC(op, size) \ +uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ +{ \ + struct PPC_DFP dfp; \ + int match = 0; \ + \ + dfp_prepare_decimal##size(&dfp, a, 0, env); \ + \ + match |= (dcm & 0x20) && decNumberIsZero(&dfp.a); \ + match |= (dcm & 0x10) && decNumberIsSubnormal(&dfp.a, &dfp.context); \ + match |= (dcm & 0x08) && decNumberIsNormal(&dfp.a, &dfp.context); \ + match |= (dcm & 0x04) && decNumberIsInfinite(&dfp.a); \ + match |= (dcm & 0x02) && decNumberIsQNaN(&dfp.a); \ + match |= (dcm & 0x01) && decNumberIsSNaN(&dfp.a); \ + \ + if (decNumberIsNegative(&dfp.a)) { \ + dfp.crbf = match ? 0xA : 0x8; \ + } else { \ + dfp.crbf = match ? 0x2 : 0x0; \ + } \ + \ + dfp_set_FPCC_from_CRBF(&dfp); \ + return dfp.crbf; \ +} + +DFP_HELPER_TSTDC(dtstdc, 64) +DFP_HELPER_TSTDC(dtstdcq, 128) + +#define DFP_HELPER_TSTDG(op, size) \ +uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ +{ \ + struct PPC_DFP dfp; \ + int minexp, maxexp, nzero_digits, nzero_idx, is_negative, is_zero, \ + is_extreme_exp, is_subnormal, is_normal, leftmost_is_nonzero, \ + match; \ + \ + dfp_prepare_decimal##size(&dfp, a, 0, env); \ + \ + if ((size) == 64) { \ + minexp = -398; \ + maxexp = 369; \ + nzero_digits = 16; \ + nzero_idx = 5; \ + } else if ((size) == 128) { \ + minexp = -6176; \ + maxexp = 6111; \ + nzero_digits = 34; \ + nzero_idx = 11; \ + } \ + \ + is_negative = decNumberIsNegative(&dfp.a); \ + is_zero = decNumberIsZero(&dfp.a); \ + is_extreme_exp = (dfp.a.exponent == maxexp) || \ + (dfp.a.exponent == minexp); \ + is_subnormal = decNumberIsSubnormal(&dfp.a, &dfp.context); \ + is_normal = decNumberIsNormal(&dfp.a, &dfp.context); \ + leftmost_is_nonzero = (dfp.a.digits == nzero_digits) && \ + (dfp.a.lsu[nzero_idx] != 0); \ + match = 0; \ + \ + match |= (dcm & 0x20) && is_zero && !is_extreme_exp; \ + match |= (dcm & 0x10) && is_zero && is_extreme_exp; \ + match |= (dcm & 0x08) && \ + (is_subnormal || (is_normal && is_extreme_exp)); \ + match |= (dcm & 0x04) && is_normal && !is_extreme_exp && \ + !leftmost_is_nonzero; \ + match |= (dcm & 0x02) && is_normal && !is_extreme_exp && \ + leftmost_is_nonzero; \ + match |= (dcm & 0x01) && decNumberIsSpecial(&dfp.a); \ + \ + if (is_negative) { \ + dfp.crbf = match ? 0xA : 0x8; \ + } else { \ + dfp.crbf = match ? 0x2 : 0x0; \ + } \ + \ + dfp_set_FPCC_from_CRBF(&dfp); \ + return dfp.crbf; \ +} + +DFP_HELPER_TSTDG(dtstdg, 64) +DFP_HELPER_TSTDG(dtstdgq, 128) + +#define DFP_HELPER_TSTEX(op, size) \ +uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + int expa, expb, a_is_special, b_is_special; \ + \ + dfp_prepare_decimal##size(&dfp, a, b, env); \ + \ + expa = dfp.a.exponent; \ + expb = dfp.b.exponent; \ + a_is_special = decNumberIsSpecial(&dfp.a); \ + b_is_special = decNumberIsSpecial(&dfp.b); \ + \ + if (a_is_special || b_is_special) { \ + int atype = a_is_special ? (decNumberIsNaN(&dfp.a) ? 4 : 2) : 1; \ + int btype = b_is_special ? (decNumberIsNaN(&dfp.b) ? 4 : 2) : 1; \ + dfp.crbf = (atype ^ btype) ? 0x1 : 0x2; \ + } else if (expa < expb) { \ + dfp.crbf = 0x8; \ + } else if (expa > expb) { \ + dfp.crbf = 0x4; \ + } else { \ + dfp.crbf = 0x2; \ + } \ + \ + dfp_set_FPCC_from_CRBF(&dfp); \ + return dfp.crbf; \ +} + +DFP_HELPER_TSTEX(dtstex, 64) +DFP_HELPER_TSTEX(dtstexq, 128) + +#define DFP_HELPER_TSTSF(op, size) \ +uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + unsigned k; \ + ppc_vsr_t va; \ + \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + get_dfp64(&va, a); \ + k = va.VsrD(1) & 0x3F; \ + \ + if (unlikely(decNumberIsSpecial(&dfp.b))) { \ + dfp.crbf = 1; \ + } else if (k == 0) { \ + dfp.crbf = 4; \ + } else if (unlikely(decNumberIsZero(&dfp.b))) { \ + /* Zero has no sig digits */ \ + dfp.crbf = 4; \ + } else { \ + unsigned nsd = dfp.b.digits; \ + if (k < nsd) { \ + dfp.crbf = 8; \ + } else if (k > nsd) { \ + dfp.crbf = 4; \ + } else { \ + dfp.crbf = 2; \ + } \ + } \ + \ + dfp_set_FPCC_from_CRBF(&dfp); \ + return dfp.crbf; \ +} + +DFP_HELPER_TSTSF(dtstsf, 64) +DFP_HELPER_TSTSF(dtstsfq, 128) + +#define DFP_HELPER_TSTSFI(op, size) \ +uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + unsigned uim; \ + \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + uim = a & 0x3F; \ + \ + if (unlikely(decNumberIsSpecial(&dfp.b))) { \ + dfp.crbf = 1; \ + } else if (uim == 0) { \ + dfp.crbf = 4; \ + } else if (unlikely(decNumberIsZero(&dfp.b))) { \ + /* Zero has no sig digits */ \ + dfp.crbf = 4; \ + } else { \ + unsigned nsd = dfp.b.digits; \ + if (uim < nsd) { \ + dfp.crbf = 8; \ + } else if (uim > nsd) { \ + dfp.crbf = 4; \ + } else { \ + dfp.crbf = 2; \ + } \ + } \ + \ + dfp_set_FPCC_from_CRBF(&dfp); \ + return dfp.crbf; \ +} + +DFP_HELPER_TSTSFI(dtstsfi, 64) +DFP_HELPER_TSTSFI(dtstsfiq, 128) + +static void QUA_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_XX(dfp); + dfp_check_for_VXSNAN(dfp); + dfp_check_for_VXCVI(dfp); +} + +static void dfp_quantize(uint8_t rmc, struct PPC_DFP *dfp) +{ + dfp_set_round_mode_from_immediate(0, rmc, dfp); + decNumberQuantize(&dfp->t, &dfp->b, &dfp->a, &dfp->context); + if (decNumberIsSNaN(&dfp->a)) { + dfp->t = dfp->a; + dfp_makeQNaN(&dfp->t); + } else if (decNumberIsSNaN(&dfp->b)) { + dfp->t = dfp->b; + dfp_makeQNaN(&dfp->t); + } else if (decNumberIsQNaN(&dfp->a)) { + dfp->t = dfp->a; + } else if (decNumberIsQNaN(&dfp->b)) { + dfp->t = dfp->b; + } +} + +#define DFP_HELPER_QUAI(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ + uint32_t te, uint32_t rmc) \ +{ \ + struct PPC_DFP dfp; \ + \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + decNumberFromUInt32(&dfp.a, 1); \ + dfp.a.exponent = (int32_t)((int8_t)(te << 3) >> 3); \ + \ + dfp_quantize(rmc, &dfp); \ + dfp_finalize_decimal##size(&dfp); \ + QUA_PPs(&dfp); \ + \ + set_dfp##size(t, &dfp.vt); \ +} + +DFP_HELPER_QUAI(dquai, 64) +DFP_HELPER_QUAI(dquaiq, 128) + +#define DFP_HELPER_QUA(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ + ppc_fprp_t *b, uint32_t rmc) \ +{ \ + struct PPC_DFP dfp; \ + \ + dfp_prepare_decimal##size(&dfp, a, b, env); \ + \ + dfp_quantize(rmc, &dfp); \ + dfp_finalize_decimal##size(&dfp); \ + QUA_PPs(&dfp); \ + \ + set_dfp##size(t, &dfp.vt); \ +} + +DFP_HELPER_QUA(dqua, 64) +DFP_HELPER_QUA(dquaq, 128) + +static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax, + struct PPC_DFP *dfp) +{ + int msd_orig, msd_rslt; + + if (unlikely((ref_sig == 0) || (dfp->b.digits <= ref_sig))) { + dfp->t = dfp->b; + if (decNumberIsSNaN(&dfp->b)) { + dfp_makeQNaN(&dfp->t); + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FPSCR_VE); + } + return; + } + + /* Reround is equivalent to quantizing b with 1**E(n) where */ + /* n = exp(b) + numDigits(b) - reference_significance. */ + + decNumberFromUInt32(&dfp->a, 1); + dfp->a.exponent = dfp->b.exponent + dfp->b.digits - ref_sig; + + if (unlikely(dfp->a.exponent > xmax)) { + dfp->t.digits = 0; + dfp->t.bits &= ~DECNEG; + dfp_makeQNaN(&dfp->t); + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE); + return; + } + + dfp_quantize(rmc, dfp); + + msd_orig = dfp_get_digit(&dfp->b, dfp->b.digits-1); + msd_rslt = dfp_get_digit(&dfp->t, dfp->t.digits-1); + + /* If the quantization resulted in rounding up to the next magnitude, */ + /* then we need to shift the significand and adjust the exponent. */ + + if (unlikely((msd_orig == 9) && (msd_rslt == 1))) { + + decNumber negone; + + decNumberFromInt32(&negone, -1); + decNumberShift(&dfp->t, &dfp->t, &negone, &dfp->context); + dfp->t.exponent++; + + if (unlikely(dfp->t.exponent > xmax)) { + dfp_makeQNaN(&dfp->t); + dfp->t.digits = 0; + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE); + /* Inhibit XX in this case */ + decContextClearStatus(&dfp->context, DEC_Inexact); + } + } +} + +#define DFP_HELPER_RRND(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ + ppc_fprp_t *b, uint32_t rmc) \ +{ \ + struct PPC_DFP dfp; \ + ppc_vsr_t va; \ + int32_t ref_sig; \ + int32_t xmax = ((size) == 64) ? 369 : 6111; \ + \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + get_dfp64(&va, a); \ + ref_sig = va.VsrD(1) & 0x3f; \ + \ + _dfp_reround(rmc, ref_sig, xmax, &dfp); \ + dfp_finalize_decimal##size(&dfp); \ + QUA_PPs(&dfp); \ + \ + set_dfp##size(t, &dfp.vt); \ +} + +DFP_HELPER_RRND(drrnd, 64) +DFP_HELPER_RRND(drrndq, 128) + +#define DFP_HELPER_RINT(op, postprocs, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ + uint32_t r, uint32_t rmc) \ +{ \ + struct PPC_DFP dfp; \ + \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + dfp_set_round_mode_from_immediate(r, rmc, &dfp); \ + decNumberToIntegralExact(&dfp.t, &dfp.b, &dfp.context); \ + dfp_finalize_decimal##size(&dfp); \ + postprocs(&dfp); \ + \ + set_dfp##size(t, &dfp.vt); \ +} + +static void RINTX_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_XX(dfp); + dfp_check_for_VXSNAN(dfp); +} + +DFP_HELPER_RINT(drintx, RINTX_PPs, 64) +DFP_HELPER_RINT(drintxq, RINTX_PPs, 128) + +static void RINTN_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_VXSNAN(dfp); +} + +DFP_HELPER_RINT(drintn, RINTN_PPs, 64) +DFP_HELPER_RINT(drintnq, RINTN_PPs, 128) + +void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +{ + struct PPC_DFP dfp; + ppc_vsr_t vb; + uint32_t b_short; + + get_dfp64(&vb, b); + b_short = (uint32_t)vb.VsrD(1); + + dfp_prepare_decimal64(&dfp, 0, 0, env); + decimal32ToNumber((decimal32 *)&b_short, &dfp.t); + dfp_finalize_decimal64(&dfp); + set_dfp64(t, &dfp.vt); + dfp_set_FPRF_from_FRT(&dfp); +} + +void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +{ + struct PPC_DFP dfp; + ppc_vsr_t vb; + dfp_prepare_decimal128(&dfp, 0, 0, env); + get_dfp64(&vb, b); + decimal64ToNumber((decimal64 *)&vb.VsrD(1), &dfp.t); + + dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp); + dfp_set_FPRF_from_FRT(&dfp); + + dfp_finalize_decimal128(&dfp); + set_dfp128(t, &dfp.vt); +} + +void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +{ + struct PPC_DFP dfp; + uint32_t t_short = 0; + ppc_vsr_t vt; + dfp_prepare_decimal64(&dfp, 0, b, env); + decimal32FromNumber((decimal32 *)&t_short, &dfp.b, &dfp.context); + decimal32ToNumber((decimal32 *)&t_short, &dfp.t); + + dfp_set_FPRF_from_FRT_short(&dfp); + dfp_check_for_OX(&dfp); + dfp_check_for_UX(&dfp); + dfp_check_for_XX(&dfp); + + vt.VsrD(1) = (uint64_t)t_short; + set_dfp64(t, &vt); +} + +void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +{ + struct PPC_DFP dfp; + dfp_prepare_decimal128(&dfp, 0, b, env); + decimal64FromNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.b, &dfp.context); + decimal64ToNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.t); + + dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp); + dfp_set_FPRF_from_FRT_long(&dfp); + dfp_check_for_OX(&dfp); + dfp_check_for_UX(&dfp); + dfp_check_for_XX(&dfp); + + dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0; + dfp_finalize_decimal64(&dfp); + set_dfp128(t, &dfp.vt); +} + +#define DFP_HELPER_CFFIX(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + ppc_vsr_t vb; \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + get_dfp64(&vb, b); \ + decNumberFromInt64(&dfp.t, (int64_t)vb.VsrD(1)); \ + dfp_finalize_decimal##size(&dfp); \ + CFFIX_PPs(&dfp); \ + \ + set_dfp##size(t, &dfp.vt); \ +} + +static void CFFIX_PPs(struct PPC_DFP *dfp) +{ + dfp_set_FPRF_from_FRT(dfp); + dfp_check_for_XX(dfp); +} + +DFP_HELPER_CFFIX(dcffix, 64) +DFP_HELPER_CFFIX(dcffixq, 128) + +#define DFP_HELPER_CTFIX(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + if (unlikely(decNumberIsSpecial(&dfp.b))) { \ + uint64_t invalid_flags = FP_VX | FP_VXCVI; \ + if (decNumberIsInfinite(&dfp.b)) { \ + dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \ + INT64_MAX; \ + } else { /* NaN */ \ + dfp.vt.VsrD(1) = INT64_MIN; \ + if (decNumberIsSNaN(&dfp.b)) { \ + invalid_flags |= FP_VXSNAN; \ + } \ + } \ + dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); \ + } else if (unlikely(decNumberIsZero(&dfp.b))) { \ + dfp.vt.VsrD(1) = 0; \ + } else { \ + decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); \ + dfp.vt.VsrD(1) = decNumberIntegralToInt64(&dfp.b, &dfp.context); \ + if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { \ + dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \ + INT64_MAX; \ + dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); \ + } else { \ + dfp_check_for_XX(&dfp); \ + } \ + } \ + \ + set_dfp64(t, &dfp.vt); \ +} + +DFP_HELPER_CTFIX(dctfix, 64) +DFP_HELPER_CTFIX(dctfixq, 128) + +static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit, + unsigned n) +{ + t->VsrD(1) |= ((uint64_t)(digit & 0xF) << (n << 2)); +} + +static inline void dfp_set_bcd_digit_128(ppc_vsr_t *t, uint8_t digit, + unsigned n) +{ + t->VsrD((n & 0x10) ? 0 : 1) |= + ((uint64_t)(digit & 0xF) << ((n & 15) << 2)); +} + +static inline void dfp_set_sign_64(ppc_vsr_t *t, uint8_t sgn) +{ + t->VsrD(1) <<= 4; + t->VsrD(1) |= (sgn & 0xF); +} + +static inline void dfp_set_sign_128(ppc_vsr_t *t, uint8_t sgn) +{ + t->VsrD(0) <<= 4; + t->VsrD(0) |= (t->VsrD(1) >> 60); + t->VsrD(1) <<= 4; + t->VsrD(1) |= (sgn & 0xF); +} + +#define DFP_HELPER_DEDPD(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ + uint32_t sp) \ +{ \ + struct PPC_DFP dfp; \ + uint8_t digits[34]; \ + int i, N; \ + \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + decNumberGetBCD(&dfp.b, digits); \ + dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0; \ + N = dfp.b.digits; \ + \ + for (i = 0; (i < N) && (i < (size)/4); i++) { \ + dfp_set_bcd_digit_##size(&dfp.vt, digits[N - i - 1], i); \ + } \ + \ + if (sp & 2) { \ + uint8_t sgn; \ + \ + if (decNumberIsNegative(&dfp.b)) { \ + sgn = 0xD; \ + } else { \ + sgn = ((sp & 1) ? 0xF : 0xC); \ + } \ + dfp_set_sign_##size(&dfp.vt, sgn); \ + } \ + \ + set_dfp##size(t, &dfp.vt); \ +} + +DFP_HELPER_DEDPD(ddedpd, 64) +DFP_HELPER_DEDPD(ddedpdq, 128) + +static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n) +{ + return t->VsrD(1) >> ((n << 2) & 63) & 15; +} + +static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n) +{ + return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15; +} + +#define DFP_HELPER_ENBCD(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ + uint32_t s) \ +{ \ + struct PPC_DFP dfp; \ + uint8_t digits[32]; \ + int n = 0, offset = 0, sgn = 0, nonzero = 0; \ + \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + decNumberZero(&dfp.t); \ + \ + if (s) { \ + uint8_t sgnNibble = dfp_get_bcd_digit_##size(&dfp.vb, offset++); \ + switch (sgnNibble) { \ + case 0xD: \ + case 0xB: \ + sgn = 1; \ + break; \ + case 0xC: \ + case 0xF: \ + case 0xA: \ + case 0xE: \ + sgn = 0; \ + break; \ + default: \ + dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ + return; \ + } \ + } \ + \ + while (offset < (size) / 4) { \ + n++; \ + digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \ + offset++); \ + if (digits[(size) / 4 - n] > 10) { \ + dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ + return; \ + } else { \ + nonzero |= (digits[(size) / 4 - n] > 0); \ + } \ + } \ + \ + if (nonzero) { \ + decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \ + } \ + \ + if (s && sgn) { \ + dfp.t.bits |= DECNEG; \ + } \ + dfp_finalize_decimal##size(&dfp); \ + dfp_set_FPRF_from_FRT(&dfp); \ + set_dfp##size(t, &dfp.vt); \ +} + +DFP_HELPER_ENBCD(denbcd, 64) +DFP_HELPER_ENBCD(denbcdq, 128) + +#define DFP_HELPER_XEX(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + ppc_vsr_t vt; \ + \ + memset(&dfp, 0, sizeof(dfp)); \ + memset(&vt, 0, sizeof(vt)); \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + if (unlikely(decNumberIsSpecial(&dfp.b))) { \ + if (decNumberIsInfinite(&dfp.b)) { \ + vt.VsrD(1) = -1; \ + } else if (decNumberIsSNaN(&dfp.b)) { \ + vt.VsrD(1) = -3; \ + } else if (decNumberIsQNaN(&dfp.b)) { \ + vt.VsrD(1) = -2; \ + } else { \ + assert(0); \ + } \ + set_dfp64(t, &vt); \ + } else { \ + if ((size) == 64) { \ + vt.VsrD(1) = dfp.b.exponent + 398; \ + } else if ((size) == 128) { \ + vt.VsrD(1) = dfp.b.exponent + 6176; \ + } else { \ + assert(0); \ + } \ + set_dfp64(t, &vt); \ + } \ +} + +DFP_HELPER_XEX(dxex, 64) +DFP_HELPER_XEX(dxexq, 128) + +static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw) +{ + t->VsrD(1) &= 0x8003ffffffffffffULL; + t->VsrD(1) |= (raw << (63 - 13)); +} + +static void dfp_set_raw_exp_128(ppc_vsr_t *t, uint64_t raw) +{ + t->VsrD(0) &= 0x80003fffffffffffULL; + t->VsrD(0) |= (raw << (63 - 17)); +} + +#define DFP_HELPER_IEX(op, size) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ + ppc_fprp_t *b) \ +{ \ + struct PPC_DFP dfp; \ + uint64_t raw_qnan, raw_snan, raw_inf, max_exp; \ + ppc_vsr_t va; \ + int bias; \ + int64_t exp; \ + \ + get_dfp64(&va, a); \ + exp = (int64_t)va.VsrD(1); \ + dfp_prepare_decimal##size(&dfp, 0, b, env); \ + \ + if ((size) == 64) { \ + max_exp = 767; \ + raw_qnan = 0x1F00; \ + raw_snan = 0x1F80; \ + raw_inf = 0x1E00; \ + bias = 398; \ + } else if ((size) == 128) { \ + max_exp = 12287; \ + raw_qnan = 0x1f000; \ + raw_snan = 0x1f800; \ + raw_inf = 0x1e000; \ + bias = 6176; \ + } else { \ + assert(0); \ + } \ + \ + if (unlikely((exp < 0) || (exp > max_exp))) { \ + dfp.vt.VsrD(0) = dfp.vb.VsrD(0); \ + dfp.vt.VsrD(1) = dfp.vb.VsrD(1); \ + if (exp == -1) { \ + dfp_set_raw_exp_##size(&dfp.vt, raw_inf); \ + } else if (exp == -3) { \ + dfp_set_raw_exp_##size(&dfp.vt, raw_snan); \ + } else { \ + dfp_set_raw_exp_##size(&dfp.vt, raw_qnan); \ + } \ + } else { \ + dfp.t = dfp.b; \ + if (unlikely(decNumberIsSpecial(&dfp.t))) { \ + dfp.t.bits &= ~DECSPECIAL; \ + } \ + dfp.t.exponent = exp - bias; \ + dfp_finalize_decimal##size(&dfp); \ + } \ + set_dfp##size(t, &dfp.vt); \ +} + +DFP_HELPER_IEX(diex, 64) +DFP_HELPER_IEX(diexq, 128) + +static void dfp_clear_lmd_from_g5msb(uint64_t *t) +{ + + /* The most significant 5 bits of the PowerPC DFP format combine bits */ + /* from the left-most decimal digit (LMD) and the biased exponent. */ + /* This routine clears the LMD bits while preserving the exponent */ + /* bits. See "Figure 80: Encoding of bits 0:4 of the G field for */ + /* Finite Numbers" in the Power ISA for additional details. */ + + uint64_t g5msb = (*t >> 58) & 0x1F; + + if ((g5msb >> 3) < 3) { /* LMD in [0-7] ? */ + *t &= ~(7ULL << 58); + } else { + switch (g5msb & 7) { + case 0: + case 1: + g5msb = 0; + break; + case 2: + case 3: + g5msb = 0x8; + break; + case 4: + case 5: + g5msb = 0x10; + break; + case 6: + g5msb = 0x1E; + break; + case 7: + g5msb = 0x1F; + break; + } + + *t &= ~(0x1fULL << 58); + *t |= (g5msb << 58); + } +} + +#define DFP_HELPER_SHIFT(op, size, shift_left) \ +void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ + uint32_t sh) \ +{ \ + struct PPC_DFP dfp; \ + unsigned max_digits = ((size) == 64) ? 16 : 34; \ + \ + dfp_prepare_decimal##size(&dfp, a, 0, env); \ + \ + if (sh <= max_digits) { \ + \ + decNumber shd; \ + unsigned special = dfp.a.bits & DECSPECIAL; \ + \ + if (shift_left) { \ + decNumberFromUInt32(&shd, sh); \ + } else { \ + decNumberFromInt32(&shd, -((int32_t)sh)); \ + } \ + \ + dfp.a.bits &= ~DECSPECIAL; \ + decNumberShift(&dfp.t, &dfp.a, &shd, &dfp.context); \ + \ + dfp.t.bits |= special; \ + if (special && (dfp.t.digits >= max_digits)) { \ + dfp.t.digits = max_digits - 1; \ + } \ + \ + dfp_finalize_decimal##size(&dfp); \ + } else { \ + if ((size) == 64) { \ + dfp.vt.VsrD(1) = dfp.va.VsrD(1) & \ + 0xFFFC000000000000ULL; \ + dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(1)); \ + } else { \ + dfp.vt.VsrD(0) = dfp.va.VsrD(0) & \ + 0xFFFFC00000000000ULL; \ + dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(0)); \ + dfp.vt.VsrD(1) = 0; \ + } \ + } \ + \ + set_dfp##size(t, &dfp.vt); \ +} + +DFP_HELPER_SHIFT(dscli, 64, 1) +DFP_HELPER_SHIFT(dscliq, 128, 1) +DFP_HELPER_SHIFT(dscri, 64, 0) +DFP_HELPER_SHIFT(dscriq, 128, 0) diff --git a/qemu/target/ppc/excp_helper.c b/qemu/target/ppc/excp_helper.c new file mode 100644 index 00000000..beaef6cd --- /dev/null +++ b/qemu/target/ppc/excp_helper.c @@ -0,0 +1,1344 @@ +/* + * PowerPC exception emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "internal.h" +#include "helper_regs.h" + +/* #define DEBUG_OP */ +/* #define DEBUG_SOFTWARE_TLB */ +/* #define DEBUG_EXCEPTIONS */ + +#ifdef DEBUG_EXCEPTIONS +# define LOG_EXCP(...) qemu_log(__VA_ARGS__) +#else +# define LOG_EXCP(...) do { } while (0) +#endif + +/*****************************************************************************/ +/* Exception processing */ +static inline void dump_syscall(CPUPPCState *env) +{ + qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 + " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 + " nip=" TARGET_FMT_lx "\n", + ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), + ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), + ppc_dump_gpr(env, 6), env->nip); +} + +static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, + target_ulong *msr) +{ + /* We no longer are in a PM state */ + env->resume_as_sreset = false; + + /* Pretend to be returning from doze always as we don't lose state */ + *msr |= (0x1ull << (63 - 47)); + + /* Machine checks are sent normally */ + if (excp == POWERPC_EXCP_MCHECK) { + return excp; + } + switch (excp) { + case POWERPC_EXCP_RESET: + *msr |= 0x4ull << (63 - 45); + break; + case POWERPC_EXCP_EXTERNAL: + *msr |= 0x8ull << (63 - 45); + break; + case POWERPC_EXCP_DECR: + *msr |= 0x6ull << (63 - 45); + break; + case POWERPC_EXCP_SDOOR: + *msr |= 0x5ull << (63 - 45); + break; + case POWERPC_EXCP_SDOOR_HV: + *msr |= 0x3ull << (63 - 45); + break; + case POWERPC_EXCP_HV_MAINT: + *msr |= 0xaull << (63 - 45); + break; + case POWERPC_EXCP_HVIRT: + *msr |= 0x9ull << (63 - 45); + break; + default: + cpu_abort(cs, "Unsupported exception %d in Power Save mode\n", + excp); + } + return POWERPC_EXCP_RESET; +} + +static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail) +{ + uint64_t offset = 0; + + switch (ail) { + case AIL_NONE: + break; + case AIL_0001_8000: + offset = 0x18000; + break; + case AIL_C000_0000_0000_4000: + offset = 0xc000000000004000ull; + break; + default: + cpu_abort(cs, "Invalid AIL combination %d\n", ail); + break; + } + + return offset; +} + +static inline void powerpc_set_excp_state(PowerPCCPU *cpu, + target_ulong vector, target_ulong msr) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + /* + * We don't use hreg_store_msr here as already have treated any + * special case that could occur. Just store MSR and update hflags + * + * Note: We *MUST* not use hreg_store_msr() as-is anyway because it + * will prevent setting of the HV bit which some exceptions might need + * to do. + */ + env->msr = msr & env->msr_mask; + hreg_compute_hflags(env); + env->nip = vector; + /* Reset exception state */ + cs->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; + + /* Reset the reservation */ + env->reserve_addr = -1; + + /* + * Any interrupt is context synchronizing, check if TCG TLB needs + * a delayed flush on ppc64 + */ + check_tlb_flush(env, false); +} + +/* + * Note that this function should be greatly optimized when called + * with a constant excp, from ppc_hw_interrupt + */ +static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + target_ulong msr, new_msr, vector; + int srr0, srr1, asrr0, asrr1, lev, ail; + bool lpes0; + + qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx + " => %08x (%02x)\n", env->nip, excp, env->error_code); + + /* new srr1 value excluding must-be-zero bits */ + if (excp_model == POWERPC_EXCP_BOOKE) { + msr = env->msr; + } else { + msr = env->msr & ~0x783f0000ULL; + } + + /* + * new interrupt handler msr preserves existing HV and ME unless + * explicitly overriden + */ + new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); + + /* target registers */ + srr0 = SPR_SRR0; + srr1 = SPR_SRR1; + asrr0 = -1; + asrr1 = -1; + + /* + * check for special resume at 0x100 from doze/nap/sleep/winkle on + * P7/P8/P9 + */ + if (env->resume_as_sreset) { + excp = powerpc_reset_wakeup(cs, env, excp, &msr); + } + + /* + * Exception targetting modifiers + * + * LPES0 is supported on POWER7/8/9 + * LPES1 is not supported (old iSeries mode) + * + * On anything else, we behave as if LPES0 is 1 + * (externals don't alter MSR:HV) + * + * AIL is initialized here but can be cleared by + * selected exceptions + */ +#if defined(TARGET_PPC64) + if (excp_model == POWERPC_EXCP_POWER7 || + excp_model == POWERPC_EXCP_POWER8 || + excp_model == POWERPC_EXCP_POWER9) { + lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); + if (excp_model != POWERPC_EXCP_POWER7) { + ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; + } else { + ail = 0; + } + } else +#endif /* defined(TARGET_PPC64) */ + { + lpes0 = true; + ail = 0; + } + + /* + * Hypervisor emulation assistance interrupt only exists on server + * arch 2.05 server or later. We also don't want to generate it if + * we don't have HVB in msr_mask (PAPR mode). + */ + if (excp == POWERPC_EXCP_HV_EMU +#if defined(TARGET_PPC64) + && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB)) +#endif /* defined(TARGET_PPC64) */ + + ) { + excp = POWERPC_EXCP_PROGRAM; + } + + switch (excp) { + case POWERPC_EXCP_NONE: + /* Should never happen */ + return; + case POWERPC_EXCP_CRITICAL: /* Critical input */ + switch (excp_model) { + case POWERPC_EXCP_40x: + srr0 = SPR_40x_SRR2; + srr1 = SPR_40x_SRR3; + break; + case POWERPC_EXCP_BOOKE: + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + case POWERPC_EXCP_G2: + break; + default: + goto excp_invalid; + } + break; + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + if (msr_me == 0) { + /* + * Machine check exception is not enabled. Enter + * checkstop state. + */ + fprintf(stderr, "Machine check while not allowed. " + "Entering checkstop state\n"); +#if 0 + if (qemu_log_separate()) { + qemu_log("Machine check while not allowed. " + "Entering checkstop state\n"); + } +#endif + cs->halted = 1; + cpu_interrupt_exittb(cs); + } + if (env->msr_mask & MSR_HVB) { + /* + * ISA specifies HV, but can be delivered to guest with HV + * clear (e.g., see FWNMI in PAPR). + */ + new_msr |= (target_ulong)MSR_HVB; + } + ail = 0; + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + + /* XXX: should also have something loaded in DAR / DSISR */ + switch (excp_model) { + case POWERPC_EXCP_40x: + srr0 = SPR_40x_SRR2; + srr1 = SPR_40x_SRR3; + break; + case POWERPC_EXCP_BOOKE: + /* FIXME: choose one or the other based on CPU type */ + srr0 = SPR_BOOKE_MCSRR0; + srr1 = SPR_BOOKE_MCSRR1; + asrr0 = SPR_BOOKE_CSRR0; + asrr1 = SPR_BOOKE_CSRR1; + break; + default: + break; + } + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx + "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx + "\n", msr, env->nip); + msr |= env->error_code; + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + cs = CPU(cpu); + + if (!lpes0) { + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + } + if (env->mpic_proxy) { + /* IACK the IRQ on delivery */ +#ifdef UNICORN_ARCH_POSTFIX + env->spr[SPR_BOOKE_EPR] = glue(ldl_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, env->mpic_iack); +#else + env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->uc, cs->as, env->mpic_iack); +#endif + } + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + /* Get rS/rD and rA from faulting opcode */ + /* + * Note: the opcode fields will not be set properly for a + * direct store load/store, but nobody cares as nobody + * actually uses direct store segments. + */ + env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_FP: + if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { + LOG_EXCP("Ignore floating point exception\n"); + cs->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; + return; + } + + /* + * FP exceptions always have NIP pointing to the faulting + * instruction, so always use store_next and claim we are + * precise in the MSR. + */ + msr |= 0x00100000; + env->spr[SPR_BOOKE_ESR] = ESR_FP; + break; + case POWERPC_EXCP_INVAL: + LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); + msr |= 0x00080000; + env->spr[SPR_BOOKE_ESR] = ESR_PIL; + break; + case POWERPC_EXCP_PRIV: + msr |= 0x00040000; + env->spr[SPR_BOOKE_ESR] = ESR_PPR; + break; + case POWERPC_EXCP_TRAP: + msr |= 0x00020000; + env->spr[SPR_BOOKE_ESR] = ESR_PTR; + break; + default: + /* Should never occur */ + cpu_abort(cs, "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } + break; + case POWERPC_EXCP_SYSCALL: /* System call exception */ + dump_syscall(env); + lev = env->error_code; + + /* + * We need to correct the NIP which in this case is supposed + * to point to the next instruction + */ + env->nip += 4; + +#if 0 + /* "PAPR mode" built-in hypercall emulation */ + if ((lev == 1) && cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->hypercall(cpu->vhyp, cpu); + return; + } +#endif + if (lev == 1) { + new_msr |= (target_ulong)MSR_HVB; + } + break; + case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ + case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ + case POWERPC_EXCP_DECR: /* Decrementer exception */ + break; + case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ + /* FIT on 4xx */ + LOG_EXCP("FIT exception\n"); + break; + case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ + LOG_EXCP("WDT exception\n"); + switch (excp_model) { + case POWERPC_EXCP_BOOKE: + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + default: + break; + } + break; + case POWERPC_EXCP_DTLB: /* Data TLB error */ + case POWERPC_EXCP_ITLB: /* Instruction TLB error */ + break; + case POWERPC_EXCP_DEBUG: /* Debug interrupt */ + if (env->flags & POWERPC_FLAG_DE) { + /* FIXME: choose one or the other based on CPU type */ + srr0 = SPR_BOOKE_DSRR0; + srr1 = SPR_BOOKE_DSRR1; + asrr0 = SPR_BOOKE_CSRR0; + asrr1 = SPR_BOOKE_CSRR1; + /* DBSR already modified by caller */ + } else { + cpu_abort(cs, "Debug exception triggered on unsupported model\n"); + } + break; + case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ + env->spr[SPR_BOOKE_ESR] = ESR_SPV; + break; + case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ + /* XXX: TODO */ + cpu_abort(cs, "Embedded floating point data exception " + "is not implemented yet !\n"); + env->spr[SPR_BOOKE_ESR] = ESR_SPV; + break; + case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ + /* XXX: TODO */ + cpu_abort(cs, "Embedded floating point round exception " + "is not implemented yet !\n"); + env->spr[SPR_BOOKE_ESR] = ESR_SPV; + break; + case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ + /* XXX: TODO */ + cpu_abort(cs, + "Performance counter exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ + break; + case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ + srr0 = SPR_BOOKE_CSRR0; + srr1 = SPR_BOOKE_CSRR1; + break; + case POWERPC_EXCP_RESET: /* System reset exception */ + /* A power-saving exception sets ME, otherwise it is unchanged */ + if (msr_pow) { + /* indicate that we resumed from power save mode */ + msr |= 0x10000; + new_msr |= ((target_ulong)1 << MSR_ME); + } + if (env->msr_mask & MSR_HVB) { + /* + * ISA specifies HV, but can be delivered to guest with HV + * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). + */ + new_msr |= (target_ulong)MSR_HVB; + } else { + if (msr_pow) { + cpu_abort(cs, "Trying to deliver power-saving system reset " + "exception %d with no HV support\n", excp); + } + } + ail = 0; + break; + case POWERPC_EXCP_DSEG: /* Data segment exception */ + case POWERPC_EXCP_ISEG: /* Instruction segment exception */ + case POWERPC_EXCP_TRACE: /* Trace exception */ + break; + case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ + case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ + case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ + case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ + case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ + case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ + case POWERPC_EXCP_HV_EMU: + case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */ + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); + break; + case POWERPC_EXCP_VPU: /* Vector unavailable exception */ + case POWERPC_EXCP_VSXU: /* VSX unavailable exception */ + case POWERPC_EXCP_FU: /* Facility unavailable exception */ +#ifdef TARGET_PPC64 + env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56); +#endif + break; + case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */ +#ifdef TARGET_PPC64 + env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS); + srr0 = SPR_HSRR0; + srr1 = SPR_HSRR1; + new_msr |= (target_ulong)MSR_HVB; + new_msr |= env->msr & ((target_ulong)1 << MSR_RI); +#endif + break; + case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ + LOG_EXCP("PIT exception\n"); + break; + case POWERPC_EXCP_IO: /* IO error exception */ + /* XXX: TODO */ + cpu_abort(cs, "601 IO error exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_RUNM: /* Run mode exception */ + /* XXX: TODO */ + cpu_abort(cs, "601 run mode exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_EMUL: /* Emulation trap exception */ + /* XXX: TODO */ + cpu_abort(cs, "602 emulation trap exception " + "is not implemented yet !\n"); + break; + case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ + switch (excp_model) { + case POWERPC_EXCP_602: + case POWERPC_EXCP_603: + case POWERPC_EXCP_603E: + case POWERPC_EXCP_G2: + goto tlb_miss_tgpr; + case POWERPC_EXCP_7x5: + goto tlb_miss; + case POWERPC_EXCP_74xx: + goto tlb_miss_74xx; + default: + cpu_abort(cs, "Invalid instruction TLB miss exception\n"); + break; + } + break; + case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ + switch (excp_model) { + case POWERPC_EXCP_602: + case POWERPC_EXCP_603: + case POWERPC_EXCP_603E: + case POWERPC_EXCP_G2: + goto tlb_miss_tgpr; + case POWERPC_EXCP_7x5: + goto tlb_miss; + case POWERPC_EXCP_74xx: + goto tlb_miss_74xx; + default: + cpu_abort(cs, "Invalid data load TLB miss exception\n"); + break; + } + break; + case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ + switch (excp_model) { + case POWERPC_EXCP_602: + case POWERPC_EXCP_603: + case POWERPC_EXCP_603E: + case POWERPC_EXCP_G2: + tlb_miss_tgpr: + /* Swap temporary saved registers with GPRs */ + if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { + new_msr |= (target_ulong)1 << MSR_TGPR; + hreg_swap_gpr_tgpr(env); + } + goto tlb_miss; + case POWERPC_EXCP_7x5: + tlb_miss: +#if defined(DEBUG_SOFTWARE_TLB) + if (qemu_log_enabled()) { + const char *es; + target_ulong *miss, *cmp; + int en; + + if (excp == POWERPC_EXCP_IFTLB) { + es = "I"; + en = 'I'; + miss = &env->spr[SPR_IMISS]; + cmp = &env->spr[SPR_ICMP]; + } else { + if (excp == POWERPC_EXCP_DLTLB) { + es = "DL"; + } else { + es = "DS"; + } + en = 'D'; + miss = &env->spr[SPR_DMISS]; + cmp = &env->spr[SPR_DCMP]; + } + qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " + TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 " + TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, + env->spr[SPR_HASH1], env->spr[SPR_HASH2], + env->error_code); + } +#endif + msr |= env->crf[0] << 28; + msr |= env->error_code; /* key, D/I, S/L bits */ + /* Set way using a LRU mechanism */ + msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; + break; + case POWERPC_EXCP_74xx: + tlb_miss_74xx: +#if defined(DEBUG_SOFTWARE_TLB) + if (qemu_log_enabled()) { + const char *es; + target_ulong *miss, *cmp; + int en; + + if (excp == POWERPC_EXCP_IFTLB) { + es = "I"; + en = 'I'; + miss = &env->spr[SPR_TLBMISS]; + cmp = &env->spr[SPR_PTEHI]; + } else { + if (excp == POWERPC_EXCP_DLTLB) { + es = "DL"; + } else { + es = "DS"; + } + en = 'D'; + miss = &env->spr[SPR_TLBMISS]; + cmp = &env->spr[SPR_PTEHI]; + } + qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC " + TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp, + env->error_code); + } +#endif + msr |= env->error_code; /* key bit */ + break; + default: + cpu_abort(cs, "Invalid data store TLB miss exception\n"); + break; + } + break; + case POWERPC_EXCP_FPA: /* Floating-point assist exception */ + /* XXX: TODO */ + cpu_abort(cs, "Floating point assist exception " + "is not implemented yet !\n"); + break; + case POWERPC_EXCP_DABR: /* Data address breakpoint */ + /* XXX: TODO */ + cpu_abort(cs, "DABR exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ + /* XXX: TODO */ + cpu_abort(cs, "IABR exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_SMI: /* System management interrupt */ + /* XXX: TODO */ + cpu_abort(cs, "SMI exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_THERM: /* Thermal interrupt */ + /* XXX: TODO */ + cpu_abort(cs, "Thermal management exception " + "is not implemented yet !\n"); + break; + case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ + /* XXX: TODO */ + cpu_abort(cs, + "Performance counter exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_VPUA: /* Vector assist exception */ + /* XXX: TODO */ + cpu_abort(cs, "VPU assist exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_SOFTP: /* Soft patch exception */ + /* XXX: TODO */ + cpu_abort(cs, + "970 soft-patch exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_MAINT: /* Maintenance exception */ + /* XXX: TODO */ + cpu_abort(cs, + "970 maintenance exception is not implemented yet !\n"); + break; + case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ + /* XXX: TODO */ + cpu_abort(cs, "Maskable external exception " + "is not implemented yet !\n"); + break; + case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ + /* XXX: TODO */ + cpu_abort(cs, "Non maskable external exception " + "is not implemented yet !\n"); + break; + default: + excp_invalid: + cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp); + break; + } + + /* Save PC */ + env->spr[srr0] = env->nip; + + /* Save MSR */ + env->spr[srr1] = msr; + + /* Sanity check */ + if (!(env->msr_mask & MSR_HVB)) { + if (new_msr & MSR_HVB) { + cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with " + "no HV support\n", excp); + } + if (srr0 == SPR_HSRR0) { + cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with " + "no HV support\n", excp); + } + } + + /* If any alternate SRR register are defined, duplicate saved values */ + if (asrr0 != -1) { + env->spr[asrr0] = env->spr[srr0]; + } + if (asrr1 != -1) { + env->spr[asrr1] = env->spr[srr1]; + } + + /* + * Sort out endianness of interrupt, this differs depending on the + * CPU, the HV mode, etc... + */ +#ifdef TARGET_PPC64 + if (excp_model == POWERPC_EXCP_POWER7) { + if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) { + new_msr |= (target_ulong)1 << MSR_LE; + } + } else if (excp_model == POWERPC_EXCP_POWER8) { + if (new_msr & MSR_HVB) { + if (env->spr[SPR_HID0] & HID0_HILE) { + new_msr |= (target_ulong)1 << MSR_LE; + } + } else if (env->spr[SPR_LPCR] & LPCR_ILE) { + new_msr |= (target_ulong)1 << MSR_LE; + } + } else if (excp_model == POWERPC_EXCP_POWER9) { + if (new_msr & MSR_HVB) { + if (env->spr[SPR_HID0] & HID0_POWER9_HILE) { + new_msr |= (target_ulong)1 << MSR_LE; + } + } else if (env->spr[SPR_LPCR] & LPCR_ILE) { + new_msr |= (target_ulong)1 << MSR_LE; + } + } else if (msr_ile) { + new_msr |= (target_ulong)1 << MSR_LE; + } +#else + if (msr_ile) { + new_msr |= (target_ulong)1 << MSR_LE; + } +#endif + + /* Jump to handler */ + vector = env->excp_vectors[excp]; +#ifdef _MSC_VER + if (vector == (target_ulong)(0ULL - 1ULL)) { +#else + if (vector == (target_ulong)-1ULL) { +#endif + cpu_abort(cs, "Raised an exception without defined vector %d\n", + excp); + } + vector |= env->excp_prefix; + + /* + * AIL only works if there is no HV transition and we are running + * with translations enabled + */ + if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || + ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { + ail = 0; + } + /* Handle AIL */ + if (ail) { + new_msr |= (1 << MSR_IR) | (1 << MSR_DR); + vector |= ppc_excp_vector_offset(cs, ail); + } + +#if defined(TARGET_PPC64) + if (excp_model == POWERPC_EXCP_BOOKE) { + if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) { + /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */ + new_msr |= (target_ulong)1 << MSR_CM; + } else { + vector = (uint32_t)vector; + } + } else { + if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { + vector = (uint32_t)vector; + } else { + new_msr |= (target_ulong)1 << MSR_SF; + } + } +#endif + + powerpc_set_excp_state(cpu, vector, new_msr); +} + +void ppc_cpu_do_interrupt(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + powerpc_excp(cpu, env->excp_model, cs->exception_index); +} + +static void ppc_hw_interrupt(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + bool async_deliver; + + /* External reset */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); + return; + } + /* Machine check exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); + return; + } +#if 0 /* TODO */ + /* External debug exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); + return; + } +#endif + + /* + * For interrupts that gate on MSR:EE, we need to do something a + * bit more subtle, as we need to let them through even when EE is + * clear when coming out of some power management states (in order + * for them to become a 0x100). + */ + async_deliver = (msr_ee != 0) || env->resume_as_sreset; + + /* Hypervisor decrementer exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { + /* LPCR will be clear when not supported so this will work */ + bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + if ((async_deliver || msr_hv == 0) && hdice) { + /* HDEC clears on delivery */ + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); + return; + } + } + + /* Hypervisor virtualization interrupt */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) { + /* LPCR will be clear when not supported so this will work */ + bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); + if ((async_deliver || msr_hv == 0) && hvice) { + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT); + return; + } + } + + /* External interrupt can ignore MSR:EE under some circumstances */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { + bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); + bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + /* HEIC blocks delivery to the hypervisor */ + if ((async_deliver && !(heic && msr_hv && !msr_pr)) || + (env->has_hv_mode && msr_hv == 0 && !lpes0)) { + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); + return; + } + } + if (msr_ce != 0) { + /* External critical interrupt */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); + return; + } + } + if (async_deliver != 0) { + /* Watchdog timer on embedded PowerPC */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); + return; + } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); + return; + } + /* Fixed interval timer on embedded PowerPC */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); + return; + } + /* Programmable interval timer on embedded PowerPC */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); + return; + } + /* Decrementer exception */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { + if (ppc_decr_clear_on_delivery(env)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); + } + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); + return; + } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); + if (is_book3s_arch2x(env)) { + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR); + } else { + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); + } + return; + } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV); + return; + } + if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); + return; + } + /* Thermal interrupt */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); + return; + } + } + + if (env->resume_as_sreset) { + /* + * This is a bug ! It means that has_work took us out of halt without + * anything to deliver while in a PM state that requires getting + * out via a 0x100 + * + * This means we will incorrectly execute past the power management + * instruction instead of triggering a reset. + * + * It generally means a discrepancy between the wakup conditions in the + * processor has_work implementation and the logic in this function. + */ + cpu_abort(env_cpu(env), + "Wakeup from PM state but interrupt Undelivered"); + } +} + +void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); + if (vector != -1) { + env->nip = vector; + } +} + +void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + target_ulong msr = 0; + + /* + * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already + * been set by KVM. + */ + msr = (1ULL << MSR_ME); + msr |= env->msr & (1ULL << MSR_SF); + if (!(*pcc->interrupts_big_endian)(cpu)) { + msr |= (1ULL << MSR_LE); + } + + powerpc_set_excp_state(cpu, vector, msr); +} + +bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + if (interrupt_request & CPU_INTERRUPT_HARD) { + ppc_hw_interrupt(env); + if (env->pending_interrupts == 0) { + cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + } + return true; + } + return false; +} + +#if defined(DEBUG_OP) +static void cpu_dump_rfi(target_ulong RA, target_ulong msr) +{ + qemu_log("Return from exception at " TARGET_FMT_lx " with flags " + TARGET_FMT_lx "\n", RA, msr); +} +#endif + +/*****************************************************************************/ +/* Exceptions processing helpers */ + +void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, + uint32_t error_code, uintptr_t raddr) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = exception; + env->error_code = error_code; + cpu_loop_exit_restore(cs, raddr); +} + +void raise_exception_err(CPUPPCState *env, uint32_t exception, + uint32_t error_code) +{ + raise_exception_err_ra(env, exception, error_code, 0); +} + +void raise_exception(CPUPPCState *env, uint32_t exception) +{ + raise_exception_err_ra(env, exception, 0, 0); +} + +void raise_exception_ra(CPUPPCState *env, uint32_t exception, + uintptr_t raddr) +{ + raise_exception_err_ra(env, exception, 0, raddr); +} + +void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, + uint32_t error_code) +{ + raise_exception_err_ra(env, exception, error_code, 0); +} + +void helper_raise_exception(CPUPPCState *env, uint32_t exception) +{ + raise_exception_err_ra(env, exception, 0, 0); +} + +void helper_store_msr(CPUPPCState *env, target_ulong val) +{ + uint32_t excp = hreg_store_msr(env, val, 0); + + if (excp != 0) { + CPUState *cs = env_cpu(env); + cpu_interrupt_exittb(cs); + raise_exception(env, excp); + } +} + +#if defined(TARGET_PPC64) +void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) +{ + CPUState *cs; + + cs = env_cpu(env); + cs->halted = 1; + + /* + * The architecture specifies that HDEC interrupts are discarded + * in PM states + */ + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); + + /* Condition for waking up at 0x100 */ + env->resume_as_sreset = (insn != PPC_PM_STOP) || + (env->spr[SPR_PSSCR] & PSSCR_EC); +} +#endif /* defined(TARGET_PPC64) */ + +static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) +{ + CPUState *cs = env_cpu(env); + + /* MSR:POW cannot be set by any form of rfi */ + msr &= ~(1ULL << MSR_POW); + +#if defined(TARGET_PPC64) + /* Switching to 32-bit ? Crop the nip */ + if (!msr_is_64bit(env, msr)) { + nip = (uint32_t)nip; + } +#else + nip = (uint32_t)nip; +#endif + /* XXX: beware: this is false if VLE is supported */ + env->nip = nip & ~((target_ulong)0x00000003); + hreg_store_msr(env, msr, 1); +#if defined(DEBUG_OP) + cpu_dump_rfi(env->nip, env->msr); +#endif + /* + * No need to raise an exception here, as rfi is always the last + * insn of a TB + */ + cpu_interrupt_exittb(cs); + /* Reset the reservation */ + env->reserve_addr = -1; + + /* Context synchronizing: check if TCG TLB needs flush */ + check_tlb_flush(env, false); +} + +void helper_rfi(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); +} + +#define MSR_BOOK3S_MASK +#if defined(TARGET_PPC64) +void helper_rfid(CPUPPCState *env) +{ + /* + * The architeture defines a number of rules for which bits can + * change but in practice, we handle this in hreg_store_msr() + * which will be called by do_rfi(), so there is no need to filter + * here + */ + do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); +} + +void helper_hrfid(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); +} +#endif + +/*****************************************************************************/ +/* Embedded PowerPC specific helpers */ +void helper_40x_rfci(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); +} + +void helper_rfci(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); +} + +void helper_rfdi(CPUPPCState *env) +{ + /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ + do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); +} + +void helper_rfmci(CPUPPCState *env) +{ + /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ + do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); +} + +void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, + uint32_t flags) +{ + if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || + ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || + ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || + ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || + ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_TRAP, GETPC()); + } +} + +#if defined(TARGET_PPC64) +void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, + uint32_t flags) +{ + if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || + ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || + ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || + ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || + ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_TRAP, GETPC()); + } +} +#endif + +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ + +void helper_rfsvc(CPUPPCState *env) +{ + do_rfi(env, env->lr, env->ctr & 0x0000FFFF); +} + +/* Embedded.Processor Control */ +static int dbell2irq(target_ulong rb) +{ + int msg = rb & DBELL_TYPE_MASK; + int irq = -1; + + switch (msg) { + case DBELL_TYPE_DBELL: + irq = PPC_INTERRUPT_DOORBELL; + break; + case DBELL_TYPE_DBELL_CRIT: + irq = PPC_INTERRUPT_CDOORBELL; + break; + case DBELL_TYPE_G_DBELL: + case DBELL_TYPE_G_DBELL_CRIT: + case DBELL_TYPE_G_DBELL_MC: + /* XXX implement */ + default: + break; + } + + return irq; +} + +void helper_msgclr(CPUPPCState *env, target_ulong rb) +{ + int irq = dbell2irq(rb); + + if (irq < 0) { + return; + } + + env->pending_interrupts &= ~(1 << irq); +} + +void helper_msgsnd(CPUPPCState *env, target_ulong rb) +{ + int irq = dbell2irq(rb); + int pir = rb & DBELL_PIRTAG_MASK; + CPUState *cs = (CPUState *)env; + PowerPCCPU *cpu = POWERPC_CPU(env->uc->cpu); + CPUPPCState *cenv = &cpu->env; + + if (irq < 0) { + return; + } + + if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { + cenv->pending_interrupts |= 1 << irq; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +/* Server Processor Control */ + +static bool dbell_type_server(target_ulong rb) +{ + /* + * A Directed Hypervisor Doorbell message is sent only if the + * message type is 5. All other types are reserved and the + * instruction is a no-op + */ + return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; +} + +void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) +{ + if (!dbell_type_server(rb)) { + return; + } + + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL); +} + +static void book3s_msgsnd_common(CPUPPCState *env, int pir, int irq) +{ + CPUState *cs = (CPUState *)env; + PowerPCCPU *cpu = POWERPC_CPU(env->uc->cpu); + CPUPPCState *cenv = &cpu->env; + + /* TODO: broadcast message to all threads of the same processor */ + if (cenv->spr_cb[SPR_PIR].default_value == pir) { + cenv->pending_interrupts |= 1 << irq; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb) +{ + int pir = rb & DBELL_PROCIDTAG_MASK; + + if (!dbell_type_server(rb)) { + return; + } + + book3s_msgsnd_common(env, pir, PPC_INTERRUPT_HDOORBELL); +} + +#if defined(TARGET_PPC64) +void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) +{ + helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); + + if (!dbell_type_server(rb)) { + return; + } + + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); +} + +/* + * sends a message to other threads that are on the same + * multi-threaded processor + */ +void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) +{ + int pir = env->spr_cb[SPR_PIR].default_value; + + helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); + + if (!dbell_type_server(rb)) { + return; + } + + /* TODO: TCG supports only one thread */ + + book3s_msgsnd_common(env, pir, PPC_INTERRUPT_DOORBELL); +} +#endif + +void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + CPUPPCState *env = cs->env_ptr; + uint32_t insn; + + /* Restore state and reload the insn we executed, for filling in DSISR. */ + cpu_restore_state(cs, retaddr, true); + insn = cpu_ldl_code(env, env->nip); + + cs->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = insn & 0x03FF0000; + cpu_loop_exit(cs); +} diff --git a/qemu/target/ppc/fpu_helper.c b/qemu/target/ppc/fpu_helper.c new file mode 100644 index 00000000..fb08eaee --- /dev/null +++ b/qemu/target/ppc/fpu_helper.c @@ -0,0 +1,3461 @@ +/* + * PowerPC floating point and SPE emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "internal.h" +#include "fpu/softfloat.h" + +static inline float128 float128_snan_to_qnan(float128 x) +{ + float128 r; + + r.high = x.high | 0x0000800000000000; + r.low = x.low; + return r; +} + +#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL) +#define float32_snan_to_qnan(x) ((x) | 0x00400000) +#define float16_snan_to_qnan(x) ((x) | 0x0200) + +static inline bool fp_exceptions_enabled(CPUPPCState *env) +{ + return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0; +} + +/*****************************************************************************/ +/* Floating point operations helpers */ + +/* + * This is the non-arithmatic conversion that happens e.g. on loads. + * In the Power ISA pseudocode, this is called DOUBLE. + */ +uint64_t helper_todouble(uint32_t arg) +{ + uint32_t abs_arg = arg & 0x7fffffff; + uint64_t ret; + + if (likely(abs_arg >= 0x00800000)) { + if (unlikely(extract32(arg, 23, 8) == 0xff)) { + /* Inf or NAN. */ + ret = (uint64_t)extract32(arg, 31, 1) << 63; + ret |= (uint64_t)0x7ff << 52; + ret |= (uint64_t)extract32(arg, 0, 23) << 29; + } else { + /* Normalized operand. */ + ret = (uint64_t)extract32(arg, 30, 2) << 62; + ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59; + ret |= (uint64_t)extract32(arg, 0, 30) << 29; + } + } else { + /* Zero or Denormalized operand. */ + ret = (uint64_t)extract32(arg, 31, 1) << 63; + if (unlikely(abs_arg != 0)) { + /* + * Denormalized operand. + * Shift fraction so that the msb is in the implicit bit position. + * Thus, shift is in the range [1:23]. + */ + int shift = clz32(abs_arg) - 8; + /* + * The first 3 terms compute the float64 exponent. We then bias + * this result by -1 so that we can swallow the implicit bit below. + */ + int exp = -126 - shift + 1023 - 1; + + ret |= (uint64_t)exp << 52; + ret += (uint64_t)abs_arg << (52 - 23 + shift); + } + } + return ret; +} + +/* + * This is the non-arithmatic conversion that happens e.g. on stores. + * In the Power ISA pseudocode, this is called SINGLE. + */ +uint32_t helper_tosingle(uint64_t arg) +{ + int exp = extract64(arg, 52, 11); + uint32_t ret; + + if (likely(exp > 896)) { + /* No denormalization required (includes Inf, NaN). */ + ret = extract64(arg, 62, 2) << 30; + ret |= extract64(arg, 29, 30); + } else { + /* + * Zero or Denormal result. If the exponent is in bounds for + * a single-precision denormal result, extract the proper + * bits. If the input is not zero, and the exponent is out of + * bounds, then the result is undefined; this underflows to + * zero. + */ + ret = extract64(arg, 63, 1) << 31; + if (unlikely(exp >= 874)) { + /* Denormal result. */ + ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp); + } + } + return ret; +} + +static inline int ppc_float32_get_unbiased_exp(float32 f) +{ + return ((f >> 23) & 0xFF) - 127; +} + +static inline int ppc_float64_get_unbiased_exp(float64 f) +{ + return ((f >> 52) & 0x7FF) - 1023; +} + +/* Classify a floating-point number. */ +enum { + is_normal = 1, + is_zero = 2, + is_denormal = 4, + is_inf = 8, + is_qnan = 16, + is_snan = 32, + is_neg = 64, +}; + +#define COMPUTE_CLASS(tp) \ +static int tp##_classify(tp arg) \ +{ \ + int ret = tp##_is_neg(arg) * is_neg; \ + if (unlikely(tp##_is_any_nan(arg))) { \ + float_status dummy = { 0 }; /* snan_bit_is_one = 0 */ \ + ret |= (tp##_is_signaling_nan(arg, &dummy) \ + ? is_snan : is_qnan); \ + } else if (unlikely(tp##_is_infinity(arg))) { \ + ret |= is_inf; \ + } else if (tp##_is_zero(arg)) { \ + ret |= is_zero; \ + } else if (tp##_is_zero_or_denormal(arg)) { \ + ret |= is_denormal; \ + } else { \ + ret |= is_normal; \ + } \ + return ret; \ +} + +COMPUTE_CLASS(float16) +COMPUTE_CLASS(float32) +COMPUTE_CLASS(float64) +COMPUTE_CLASS(float128) + +static void set_fprf_from_class(CPUPPCState *env, int class) +{ + static const uint8_t fprf[6][2] = { + { 0x04, 0x08 }, /* normalized */ + { 0x02, 0x12 }, /* zero */ + { 0x14, 0x18 }, /* denormalized */ + { 0x05, 0x09 }, /* infinity */ + { 0x11, 0x11 }, /* qnan */ + { 0x00, 0x00 }, /* snan -- flags are undefined */ + }; + bool isneg = class & is_neg; + + env->fpscr &= ~FP_FPRF; + env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF; +} + +#define COMPUTE_FPRF(tp) \ +void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \ +{ \ + set_fprf_from_class(env, tp##_classify(arg)); \ +} + +COMPUTE_FPRF(float16) +COMPUTE_FPRF(float32) +COMPUTE_FPRF(float64) +COMPUTE_FPRF(float128) + +/* Floating-point invalid operations exception */ +static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr) +{ + /* Update the floating-point invalid operation summary */ + env->fpscr |= FP_VX; + /* Update the floating-point exception summary */ + env->fpscr |= FP_FX; + if (fpscr_ve != 0) { + /* Update the floating-point enabled exception summary */ + env->fpscr |= FP_FEX; + if (fp_exceptions_enabled(env)) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_FP | op, retaddr); + } + } +} + +static void finish_invalid_op_arith(CPUPPCState *env, int op, + bool set_fpcc, uintptr_t retaddr) +{ + env->fpscr &= ~(FP_FR | FP_FI); + if (fpscr_ve == 0) { + if (set_fpcc) { + env->fpscr &= ~FP_FPCC; + env->fpscr |= (FP_C | FP_FU); + } + } + finish_invalid_op_excp(env, op, retaddr); +} + +/* Signalling NaN */ +static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr) +{ + env->fpscr |= FP_VXSNAN; + finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr); +} + +/* Magnitude subtraction of infinities */ +static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr) +{ + env->fpscr |= FP_VXISI; + finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr); +} + +/* Division of infinity by infinity */ +static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr) +{ + env->fpscr |= FP_VXIDI; + finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr); +} + +/* Division of zero by zero */ +static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr) +{ + env->fpscr |= FP_VXZDZ; + finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr); +} + +/* Multiplication of zero by infinity */ +static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr) +{ + env->fpscr |= FP_VXIMZ; + finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr); +} + +/* Square root of a negative number */ +static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr) +{ + env->fpscr |= FP_VXSQRT; + finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr); +} + +/* Ordered comparison of NaN */ +static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr) +{ + env->fpscr |= FP_VXVC; + if (set_fpcc) { + env->fpscr &= ~FP_FPCC; + env->fpscr |= (FP_C | FP_FU); + } + /* Update the floating-point invalid operation summary */ + env->fpscr |= FP_VX; + /* Update the floating-point exception summary */ + env->fpscr |= FP_FX; + /* We must update the target FPR before raising the exception */ + if (fpscr_ve != 0) { + CPUState *cs = env_cpu(env); + + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC; + /* Update the floating-point enabled exception summary */ + env->fpscr |= FP_FEX; + /* Exception is deferred */ + } +} + +/* Invalid conversion */ +static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr) +{ + env->fpscr |= FP_VXCVI; + env->fpscr &= ~(FP_FR | FP_FI); + if (fpscr_ve == 0) { + if (set_fpcc) { + env->fpscr &= ~FP_FPCC; + env->fpscr |= (FP_C | FP_FU); + } + } + finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr); +} + +static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr) +{ + env->fpscr |= FP_ZX; + env->fpscr &= ~(FP_FR | FP_FI); + /* Update the floating-point exception summary */ + env->fpscr |= FP_FX; + if (fpscr_ze != 0) { + /* Update the floating-point enabled exception summary */ + env->fpscr |= FP_FEX; + if (fp_exceptions_enabled(env)) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX, + raddr); + } + } +} + +static inline void float_overflow_excp(CPUPPCState *env) +{ + CPUState *cs = env_cpu(env); + + env->fpscr |= FP_OX; + /* Update the floating-point exception summary */ + env->fpscr |= FP_FX; + if (fpscr_oe != 0) { + /* XXX: should adjust the result */ + /* Update the floating-point enabled exception summary */ + env->fpscr |= FP_FEX; + /* We must update the target FPR before raising the exception */ + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; + } else { + env->fpscr |= FP_XX; + env->fpscr |= FP_FI; + } +} + +static inline void float_underflow_excp(CPUPPCState *env) +{ + CPUState *cs = env_cpu(env); + + env->fpscr |= FP_UX; + /* Update the floating-point exception summary */ + env->fpscr |= FP_FX; + if (fpscr_ue != 0) { + /* XXX: should adjust the result */ + /* Update the floating-point enabled exception summary */ + env->fpscr |= FP_FEX; + /* We must update the target FPR before raising the exception */ + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; + } +} + +static inline void float_inexact_excp(CPUPPCState *env) +{ + CPUState *cs = env_cpu(env); + + env->fpscr |= FP_FI; + env->fpscr |= FP_XX; + /* Update the floating-point exception summary */ + env->fpscr |= FP_FX; + if (fpscr_xe != 0) { + /* Update the floating-point enabled exception summary */ + env->fpscr |= FP_FEX; + /* We must update the target FPR before raising the exception */ + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; + } +} + +static inline void fpscr_set_rounding_mode(CPUPPCState *env) +{ + int rnd_type; + + /* Set rounding mode */ + switch (fpscr_rn) { + case 0: + /* Best approximation (round to nearest) */ + rnd_type = float_round_nearest_even; + break; + case 1: + /* Smaller magnitude (round toward zero) */ + rnd_type = float_round_to_zero; + break; + case 2: + /* Round toward +infinite */ + rnd_type = float_round_up; + break; + default: + case 3: + /* Round toward -infinite */ + rnd_type = float_round_down; + break; + } + set_float_rounding_mode(rnd_type, &env->fp_status); +} + +void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit) +{ + int prev; + + prev = (env->fpscr >> bit) & 1; + env->fpscr &= ~(1 << bit); + if (prev == 1) { + switch (bit) { + case FPSCR_RN1: + case FPSCR_RN0: + fpscr_set_rounding_mode(env); + break; + case FPSCR_VXSNAN: + case FPSCR_VXISI: + case FPSCR_VXIDI: + case FPSCR_VXZDZ: + case FPSCR_VXIMZ: + case FPSCR_VXVC: + case FPSCR_VXSOFT: + case FPSCR_VXSQRT: + case FPSCR_VXCVI: + if (!fpscr_ix) { + /* Set VX bit to zero */ + env->fpscr &= ~FP_VX; + } + break; + case FPSCR_OX: + case FPSCR_UX: + case FPSCR_ZX: + case FPSCR_XX: + case FPSCR_VE: + case FPSCR_OE: + case FPSCR_UE: + case FPSCR_ZE: + case FPSCR_XE: + if (!fpscr_eex) { + /* Set the FEX bit */ + env->fpscr &= ~FP_FEX; + } + break; + default: + break; + } + } +} + +void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit) +{ + CPUState *cs = env_cpu(env); + int prev; + + prev = (env->fpscr >> bit) & 1; + env->fpscr |= 1ULL << bit; + if (prev == 0) { + switch (bit) { + case FPSCR_VX: + env->fpscr |= FP_FX; + if (fpscr_ve) { + goto raise_ve; + } + break; + case FPSCR_OX: + env->fpscr |= FP_FX; + if (fpscr_oe) { + goto raise_oe; + } + break; + case FPSCR_UX: + env->fpscr |= FP_FX; + if (fpscr_ue) { + goto raise_ue; + } + break; + case FPSCR_ZX: + env->fpscr |= FP_FX; + if (fpscr_ze) { + goto raise_ze; + } + break; + case FPSCR_XX: + env->fpscr |= FP_FX; + if (fpscr_xe) { + goto raise_xe; + } + break; + case FPSCR_VXSNAN: + case FPSCR_VXISI: + case FPSCR_VXIDI: + case FPSCR_VXZDZ: + case FPSCR_VXIMZ: + case FPSCR_VXVC: + case FPSCR_VXSOFT: + case FPSCR_VXSQRT: + case FPSCR_VXCVI: + env->fpscr |= FP_VX; + env->fpscr |= FP_FX; + if (fpscr_ve != 0) { + goto raise_ve; + } + break; + case FPSCR_VE: + if (fpscr_vx != 0) { + raise_ve: + env->error_code = POWERPC_EXCP_FP; + if (fpscr_vxsnan) { + env->error_code |= POWERPC_EXCP_FP_VXSNAN; + } + if (fpscr_vxisi) { + env->error_code |= POWERPC_EXCP_FP_VXISI; + } + if (fpscr_vxidi) { + env->error_code |= POWERPC_EXCP_FP_VXIDI; + } + if (fpscr_vxzdz) { + env->error_code |= POWERPC_EXCP_FP_VXZDZ; + } + if (fpscr_vximz) { + env->error_code |= POWERPC_EXCP_FP_VXIMZ; + } + if (fpscr_vxvc) { + env->error_code |= POWERPC_EXCP_FP_VXVC; + } + if (fpscr_vxsoft) { + env->error_code |= POWERPC_EXCP_FP_VXSOFT; + } + if (fpscr_vxsqrt) { + env->error_code |= POWERPC_EXCP_FP_VXSQRT; + } + if (fpscr_vxcvi) { + env->error_code |= POWERPC_EXCP_FP_VXCVI; + } + goto raise_excp; + } + break; + case FPSCR_OE: + if (fpscr_ox != 0) { + raise_oe: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; + goto raise_excp; + } + break; + case FPSCR_UE: + if (fpscr_ux != 0) { + raise_ue: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; + goto raise_excp; + } + break; + case FPSCR_ZE: + if (fpscr_zx != 0) { + raise_ze: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX; + goto raise_excp; + } + break; + case FPSCR_XE: + if (fpscr_xx != 0) { + raise_xe: + env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; + goto raise_excp; + } + break; + case FPSCR_RN1: + case FPSCR_RN0: + fpscr_set_rounding_mode(env); + break; + default: + break; + raise_excp: + /* Update the floating-point enabled exception summary */ + env->fpscr |= FP_FEX; + /* We have to update Rc1 before raising the exception */ + cs->exception_index = POWERPC_EXCP_PROGRAM; + break; + } + } +} + +void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) +{ + CPUState *cs = env_cpu(env); + target_ulong prev, new; + int i; + + prev = env->fpscr; + new = (target_ulong)arg; + new &= ~(FP_FEX | FP_VX); + new |= prev & (FP_FEX | FP_VX); + for (i = 0; i < sizeof(target_ulong) * 2; i++) { + if (mask & (1 << i)) { + env->fpscr &= ~(0xFLL << (4 * i)); + env->fpscr |= new & (0xFLL << (4 * i)); + } + } + /* Update VX and FEX */ + if (fpscr_ix != 0) { + env->fpscr |= FP_VX; + } else { + env->fpscr &= ~FP_VX; + } + if ((fpscr_ex & fpscr_eex) != 0) { + env->fpscr |= FP_FEX; + cs->exception_index = POWERPC_EXCP_PROGRAM; + /* XXX: we should compute it properly */ + env->error_code = POWERPC_EXCP_FP; + } else { + env->fpscr &= ~FP_FEX; + } + fpscr_set_rounding_mode(env); +} + +void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) +{ + helper_store_fpscr(env, arg, mask); +} + +static void do_float_check_status(CPUPPCState *env, uintptr_t raddr) +{ + CPUState *cs = env_cpu(env); + int status = get_float_exception_flags(&env->fp_status); + + if (status & float_flag_overflow) { + float_overflow_excp(env); + } else if (status & float_flag_underflow) { + float_underflow_excp(env); + } + if (status & float_flag_inexact) { + float_inexact_excp(env); + } else { + env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */ + } + + if (cs->exception_index == POWERPC_EXCP_PROGRAM && + (env->error_code & POWERPC_EXCP_FP)) { + /* Deferred floating-point exception after target FPR update */ + if (fp_exceptions_enabled(env)) { + raise_exception_err_ra(env, cs->exception_index, + env->error_code, raddr); + } + } +} + +void helper_float_check_status(CPUPPCState *env) +{ + do_float_check_status(env, GETPC()); +} + +void helper_reset_fpstatus(CPUPPCState *env) +{ + set_float_exception_flags(0, &env->fp_status); +} + +static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc, + uintptr_t retaddr, int classes) +{ + if ((classes & ~is_neg) == is_inf) { + /* Magnitude subtraction of infinities */ + float_invalid_op_vxisi(env, set_fpcc, retaddr); + } else if (classes & is_snan) { + float_invalid_op_vxsnan(env, retaddr); + } +} + +/* fadd - fadd. */ +float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2) +{ + float64 ret = float64_add(arg1, arg2, &env->fp_status); + int status = get_float_exception_flags(&env->fp_status); + + if (unlikely(status & float_flag_invalid)) { + float_invalid_op_addsub(env, 1, GETPC(), + float64_classify(arg1) | + float64_classify(arg2)); + } + + return ret; +} + +/* fsub - fsub. */ +float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2) +{ + float64 ret = float64_sub(arg1, arg2, &env->fp_status); + int status = get_float_exception_flags(&env->fp_status); + + if (unlikely(status & float_flag_invalid)) { + float_invalid_op_addsub(env, 1, GETPC(), + float64_classify(arg1) | + float64_classify(arg2)); + } + + return ret; +} + +static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc, + uintptr_t retaddr, int classes) +{ + if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) { + /* Multiplication of zero by infinity */ + float_invalid_op_vximz(env, set_fprc, retaddr); + } else if (classes & is_snan) { + float_invalid_op_vxsnan(env, retaddr); + } +} + +/* fmul - fmul. */ +float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2) +{ + float64 ret = float64_mul(arg1, arg2, &env->fp_status); + int status = get_float_exception_flags(&env->fp_status); + + if (unlikely(status & float_flag_invalid)) { + float_invalid_op_mul(env, 1, GETPC(), + float64_classify(arg1) | + float64_classify(arg2)); + } + + return ret; +} + +static void float_invalid_op_div(CPUPPCState *env, bool set_fprc, + uintptr_t retaddr, int classes) +{ + classes &= ~is_neg; + if (classes == is_inf) { + /* Division of infinity by infinity */ + float_invalid_op_vxidi(env, set_fprc, retaddr); + } else if (classes == is_zero) { + /* Division of zero by zero */ + float_invalid_op_vxzdz(env, set_fprc, retaddr); + } else if (classes & is_snan) { + float_invalid_op_vxsnan(env, retaddr); + } +} + +/* fdiv - fdiv. */ +float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2) +{ + float64 ret = float64_div(arg1, arg2, &env->fp_status); + int status = get_float_exception_flags(&env->fp_status); + + if (unlikely(status)) { + if (status & float_flag_invalid) { + float_invalid_op_div(env, 1, GETPC(), + float64_classify(arg1) | + float64_classify(arg2)); + } + if (status & float_flag_divbyzero) { + float_zero_divide_excp(env, GETPC()); + } + } + + return ret; +} + +static void float_invalid_cvt(CPUPPCState *env, bool set_fprc, + uintptr_t retaddr, int class1) +{ + float_invalid_op_vxcvi(env, set_fprc, retaddr); + if (class1 & is_snan) { + float_invalid_op_vxsnan(env, retaddr); + } +} + +#define FPU_FCTI(op, cvt, nanval) \ +uint64_t helper_##op(CPUPPCState *env, float64 arg) \ +{ \ + uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \ + int status = get_float_exception_flags(&env->fp_status); \ + \ + if (unlikely(status)) { \ + if (status & float_flag_invalid) { \ + float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \ + ret = nanval; \ + } \ + do_float_check_status(env, GETPC()); \ + } \ + return ret; \ +} + +FPU_FCTI(fctiw, int32, 0x80000000U) +FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U) +FPU_FCTI(fctiwu, uint32, 0x00000000U) +FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U) +FPU_FCTI(fctid, int64, 0x8000000000000000ULL) +FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL) +FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL) +FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL) + +#define FPU_FCFI(op, cvtr, is_single) \ +uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ +{ \ + CPU_DoubleU farg; \ + \ + if (is_single) { \ + float32 tmp = cvtr(arg, &env->fp_status); \ + farg.d = float32_to_float64(tmp, &env->fp_status); \ + } else { \ + farg.d = cvtr(arg, &env->fp_status); \ + } \ + do_float_check_status(env, GETPC()); \ + return farg.ll; \ +} + +FPU_FCFI(fcfid, int64_to_float64, 0) +FPU_FCFI(fcfids, int64_to_float32, 1) +FPU_FCFI(fcfidu, uint64_to_float64, 0) +FPU_FCFI(fcfidus, uint64_to_float32, 1) + +static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, + int rounding_mode) +{ + CPU_DoubleU farg; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { + /* sNaN round */ + float_invalid_op_vxsnan(env, GETPC()); + farg.ll = arg | 0x0008000000000000ULL; + } else { + int inexact = get_float_exception_flags(&env->fp_status) & + float_flag_inexact; + set_float_rounding_mode(rounding_mode, &env->fp_status); + farg.ll = float64_round_to_int(farg.d, &env->fp_status); + /* Restore rounding mode from FPSCR */ + fpscr_set_rounding_mode(env); + + /* fri* does not set FPSCR[XX] */ + if (!inexact) { + env->fp_status.float_exception_flags &= ~float_flag_inexact; + } + } + do_float_check_status(env, GETPC()); + return farg.ll; +} + +uint64_t helper_frin(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_ties_away); +} + +uint64_t helper_friz(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_to_zero); +} + +uint64_t helper_frip(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_up); +} + +uint64_t helper_frim(CPUPPCState *env, uint64_t arg) +{ + return do_fri(env, arg, float_round_down); +} + +#define FPU_MADDSUB_UPDATE(NAME, TP) \ +static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \ + unsigned int madd_flags, uintptr_t retaddr) \ +{ \ + if (TP##_is_signaling_nan(arg1, &env->fp_status) || \ + TP##_is_signaling_nan(arg2, &env->fp_status) || \ + TP##_is_signaling_nan(arg3, &env->fp_status)) { \ + /* sNaN operation */ \ + float_invalid_op_vxsnan(env, retaddr); \ + } \ + if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \ + (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \ + /* Multiplication of zero by infinity */ \ + float_invalid_op_vximz(env, 1, retaddr); \ + } \ + if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \ + TP##_is_infinity(arg3)) { \ + uint8_t aSign, bSign, cSign; \ + \ + aSign = TP##_is_neg(arg1); \ + bSign = TP##_is_neg(arg2); \ + cSign = TP##_is_neg(arg3); \ + if (madd_flags & float_muladd_negate_c) { \ + cSign ^= 1; \ + } \ + if (aSign ^ bSign ^ cSign) { \ + float_invalid_op_vxisi(env, 1, retaddr); \ + } \ + } \ +} +FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32) +FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64) + +#define FPU_FMADD(op, madd_flags) \ +uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \ + uint64_t arg2, uint64_t arg3) \ +{ \ + uint32_t flags; \ + float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \ + &env->fp_status); \ + flags = get_float_exception_flags(&env->fp_status); \ + if (flags) { \ + if (flags & float_flag_invalid) { \ + float64_maddsub_update_excp(env, arg1, arg2, arg3, \ + madd_flags, GETPC()); \ + } \ + do_float_check_status(env, GETPC()); \ + } \ + return ret; \ +} + +#define MADD_FLGS 0 +#define MSUB_FLGS float_muladd_negate_c +#define NMADD_FLGS float_muladd_negate_result +#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result) + +FPU_FMADD(fmadd, MADD_FLGS) +FPU_FMADD(fnmadd, NMADD_FLGS) +FPU_FMADD(fmsub, MSUB_FLGS) +FPU_FMADD(fnmsub, NMSUB_FLGS) + +/* frsp - frsp. */ +uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + float32 f32; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { + float_invalid_op_vxsnan(env, GETPC()); + } + f32 = float64_to_float32(farg.d, &env->fp_status); + farg.d = float32_to_float64(f32, &env->fp_status); + + return farg.ll; +} + +/* fsqrt - fsqrt. */ +float64 helper_fsqrt(CPUPPCState *env, float64 arg) +{ + float64 ret = float64_sqrt(arg, &env->fp_status); + int status = get_float_exception_flags(&env->fp_status); + + if (unlikely(status & float_flag_invalid)) { + if (unlikely(float64_is_any_nan(arg))) { + if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) { + /* sNaN square root */ + float_invalid_op_vxsnan(env, GETPC()); + } + } else { + /* Square root of a negative nonzero number */ + float_invalid_op_vxsqrt(env, 1, GETPC()); + } + } + + return ret; +} + +/* fre - fre. */ +float64 helper_fre(CPUPPCState *env, float64 arg) +{ + /* "Estimate" the reciprocal with actual division. */ + float64 ret = float64_div(float64_one, arg, &env->fp_status); + int status = get_float_exception_flags(&env->fp_status); + + if (unlikely(status)) { + if (status & float_flag_invalid) { + if (float64_is_signaling_nan(arg, &env->fp_status)) { + /* sNaN reciprocal */ + float_invalid_op_vxsnan(env, GETPC()); + } + } + if (status & float_flag_divbyzero) { + float_zero_divide_excp(env, GETPC()); + /* For FPSCR.ZE == 0, the result is 1/2. */ + ret = float64_set_sign(float64_half, float64_is_neg(arg)); + } + } + + return ret; +} + +/* fres - fres. */ +uint64_t helper_fres(CPUPPCState *env, uint64_t arg) +{ + CPU_DoubleU farg; + float32 f32; + + farg.ll = arg; + + if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { + /* sNaN reciprocal */ + float_invalid_op_vxsnan(env, GETPC()); + } + farg.d = float64_div(float64_one, farg.d, &env->fp_status); + f32 = float64_to_float32(farg.d, &env->fp_status); + farg.d = float32_to_float64(f32, &env->fp_status); + + return farg.ll; +} + +/* frsqrte - frsqrte. */ +float64 helper_frsqrte(CPUPPCState *env, float64 arg) +{ + /* "Estimate" the reciprocal with actual division. */ + float64 rets = float64_sqrt(arg, &env->fp_status); + float64 retd = float64_div(float64_one, rets, &env->fp_status); + int status = get_float_exception_flags(&env->fp_status); + + if (unlikely(status)) { + if (status & float_flag_invalid) { + if (float64_is_signaling_nan(arg, &env->fp_status)) { + /* sNaN reciprocal */ + float_invalid_op_vxsnan(env, GETPC()); + } else { + /* Square root of a negative nonzero number */ + float_invalid_op_vxsqrt(env, 1, GETPC()); + } + } + if (status & float_flag_divbyzero) { + /* Reciprocal of (square root of) zero. */ + float_zero_divide_excp(env, GETPC()); + } + } + + return retd; +} + +/* fsel - fsel. */ +uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint64_t arg3) +{ + CPU_DoubleU farg1; + + farg1.ll = arg1; + + if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && + !float64_is_any_nan(farg1.d)) { + return arg2; + } else { + return arg3; + } +} + +uint32_t helper_ftdiv(uint64_t fra, uint64_t frb) +{ + int fe_flag = 0; + int fg_flag = 0; + + if (unlikely(float64_is_infinity(fra) || + float64_is_infinity(frb) || + float64_is_zero(frb))) { + fe_flag = 1; + fg_flag = 1; + } else { + int e_a = ppc_float64_get_unbiased_exp(fra); + int e_b = ppc_float64_get_unbiased_exp(frb); + + if (unlikely(float64_is_any_nan(fra) || + float64_is_any_nan(frb))) { + fe_flag = 1; + } else if ((e_b <= -1022) || (e_b >= 1021)) { + fe_flag = 1; + } else if (!float64_is_zero(fra) && + (((e_a - e_b) >= 1023) || + ((e_a - e_b) <= -1021) || + (e_a <= -970))) { + fe_flag = 1; + } + + if (unlikely(float64_is_zero_or_denormal(frb))) { + /* XB is not zero because of the above check and */ + /* so must be denormalized. */ + fg_flag = 1; + } + } + + return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); +} + +uint32_t helper_ftsqrt(uint64_t frb) +{ + int fe_flag = 0; + int fg_flag = 0; + + if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) { + fe_flag = 1; + fg_flag = 1; + } else { + int e_b = ppc_float64_get_unbiased_exp(frb); + + if (unlikely(float64_is_any_nan(frb))) { + fe_flag = 1; + } else if (unlikely(float64_is_zero(frb))) { + fe_flag = 1; + } else if (unlikely(float64_is_neg(frb))) { + fe_flag = 1; + } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) { + fe_flag = 1; + } + + if (unlikely(float64_is_zero_or_denormal(frb))) { + /* XB is not zero because of the above check and */ + /* therefore must be denormalized. */ + fg_flag = 1; + } + } + + return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); +} + +void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint32_t crfD) +{ + CPU_DoubleU farg1, farg2; + uint32_t ret = 0; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely(float64_is_any_nan(farg1.d) || + float64_is_any_nan(farg2.d))) { + ret = 0x01UL; + } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x08UL; + } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x04UL; + } else { + ret = 0x02UL; + } + + env->fpscr &= ~FP_FPCC; + env->fpscr |= ret << FPSCR_FPCC; + env->crf[crfD] = ret; + if (unlikely(ret == 0x01UL + && (float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status)))) { + /* sNaN comparison */ + float_invalid_op_vxsnan(env, GETPC()); + } +} + +void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2, + uint32_t crfD) +{ + CPU_DoubleU farg1, farg2; + uint32_t ret = 0; + + farg1.ll = arg1; + farg2.ll = arg2; + + if (unlikely(float64_is_any_nan(farg1.d) || + float64_is_any_nan(farg2.d))) { + ret = 0x01UL; + } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x08UL; + } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { + ret = 0x04UL; + } else { + ret = 0x02UL; + } + + env->fpscr &= ~FP_FPCC; + env->fpscr |= ret << FPSCR_FPCC; + env->crf[crfD] = (uint32_t) ret; + if (unlikely(ret == 0x01UL)) { + float_invalid_op_vxvc(env, 1, GETPC()); + if (float64_is_signaling_nan(farg1.d, &env->fp_status) || + float64_is_signaling_nan(farg2.d, &env->fp_status)) { + /* sNaN comparison */ + float_invalid_op_vxsnan(env, GETPC()); + } + } +} + +/* Single-precision floating-point conversions */ +static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.f = int32_to_float32(val, &env->vec_status); + + return u.l; +} + +static inline uint32_t efscfui(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.f = uint32_to_float32(val, &env->vec_status); + + return u.l; +} + +static inline int32_t efsctsi(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { + return 0; + } + + return float32_to_int32(u.f, &env->vec_status); +} + +static inline uint32_t efsctui(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { + return 0; + } + + return float32_to_uint32(u.f, &env->vec_status); +} + +static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { + return 0; + } + + return float32_to_int32_round_to_zero(u.f, &env->vec_status); +} + +static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { + return 0; + } + + return float32_to_uint32_round_to_zero(u.f, &env->vec_status); +} + +static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.f = int32_to_float32(val, &env->vec_status); + tmp = int64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_div(u.f, tmp, &env->vec_status); + + return u.l; +} + +static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.f = uint32_to_float32(val, &env->vec_status); + tmp = uint64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_div(u.f, tmp, &env->vec_status); + + return u.l; +} + +static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { + return 0; + } + tmp = uint64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_mul(u.f, tmp, &env->vec_status); + + return float32_to_int32(u.f, &env->vec_status); +} + +static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val) +{ + CPU_FloatU u; + float32 tmp; + + u.l = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { + return 0; + } + tmp = uint64_to_float32(1ULL << 32, &env->vec_status); + u.f = float32_mul(u.f, tmp, &env->vec_status); + + return float32_to_uint32(u.f, &env->vec_status); +} + +#define HELPER_SPE_SINGLE_CONV(name) \ + uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \ + { \ + return e##name(env, val); \ + } +/* efscfsi */ +HELPER_SPE_SINGLE_CONV(fscfsi); +/* efscfui */ +HELPER_SPE_SINGLE_CONV(fscfui); +/* efscfuf */ +HELPER_SPE_SINGLE_CONV(fscfuf); +/* efscfsf */ +HELPER_SPE_SINGLE_CONV(fscfsf); +/* efsctsi */ +HELPER_SPE_SINGLE_CONV(fsctsi); +/* efsctui */ +HELPER_SPE_SINGLE_CONV(fsctui); +/* efsctsiz */ +HELPER_SPE_SINGLE_CONV(fsctsiz); +/* efsctuiz */ +HELPER_SPE_SINGLE_CONV(fsctuiz); +/* efsctsf */ +HELPER_SPE_SINGLE_CONV(fsctsf); +/* efsctuf */ +HELPER_SPE_SINGLE_CONV(fsctuf); + +#define HELPER_SPE_VECTOR_CONV(name) \ + uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \ + { \ + return ((uint64_t)e##name(env, val >> 32) << 32) | \ + (uint64_t)e##name(env, val); \ + } +/* evfscfsi */ +HELPER_SPE_VECTOR_CONV(fscfsi); +/* evfscfui */ +HELPER_SPE_VECTOR_CONV(fscfui); +/* evfscfuf */ +HELPER_SPE_VECTOR_CONV(fscfuf); +/* evfscfsf */ +HELPER_SPE_VECTOR_CONV(fscfsf); +/* evfsctsi */ +HELPER_SPE_VECTOR_CONV(fsctsi); +/* evfsctui */ +HELPER_SPE_VECTOR_CONV(fsctui); +/* evfsctsiz */ +HELPER_SPE_VECTOR_CONV(fsctsiz); +/* evfsctuiz */ +HELPER_SPE_VECTOR_CONV(fsctuiz); +/* evfsctsf */ +HELPER_SPE_VECTOR_CONV(fsctsf); +/* evfsctuf */ +HELPER_SPE_VECTOR_CONV(fsctuf); + +/* Single-precision floating-point arithmetic */ +static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_add(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_sub(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_mul(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + u1.f = float32_div(u1.f, u2.f, &env->vec_status); + return u1.l; +} + +#define HELPER_SPE_SINGLE_ARITH(name) \ + uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ + { \ + return e##name(env, op1, op2); \ + } +/* efsadd */ +HELPER_SPE_SINGLE_ARITH(fsadd); +/* efssub */ +HELPER_SPE_SINGLE_ARITH(fssub); +/* efsmul */ +HELPER_SPE_SINGLE_ARITH(fsmul); +/* efsdiv */ +HELPER_SPE_SINGLE_ARITH(fsdiv); + +#define HELPER_SPE_VECTOR_ARITH(name) \ + uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ + { \ + return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \ + (uint64_t)e##name(env, op1, op2); \ + } +/* evfsadd */ +HELPER_SPE_VECTOR_ARITH(fsadd); +/* evfssub */ +HELPER_SPE_VECTOR_ARITH(fssub); +/* evfsmul */ +HELPER_SPE_VECTOR_ARITH(fsmul); +/* evfsdiv */ +HELPER_SPE_VECTOR_ARITH(fsdiv); + +/* Single-precision floating-point comparisons */ +static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0; +} + +static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4; +} + +static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + CPU_FloatU u1, u2; + + u1.l = op1; + u2.l = op2; + return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0; +} + +static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: ignore special values (NaN, infinites, ...) */ + return efscmplt(env, op1, op2); +} + +static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: ignore special values (NaN, infinites, ...) */ + return efscmpgt(env, op1, op2); +} + +static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: ignore special values (NaN, infinites, ...) */ + return efscmpeq(env, op1, op2); +} + +#define HELPER_SINGLE_SPE_CMP(name) \ + uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ + { \ + return e##name(env, op1, op2); \ + } +/* efststlt */ +HELPER_SINGLE_SPE_CMP(fststlt); +/* efststgt */ +HELPER_SINGLE_SPE_CMP(fststgt); +/* efststeq */ +HELPER_SINGLE_SPE_CMP(fststeq); +/* efscmplt */ +HELPER_SINGLE_SPE_CMP(fscmplt); +/* efscmpgt */ +HELPER_SINGLE_SPE_CMP(fscmpgt); +/* efscmpeq */ +HELPER_SINGLE_SPE_CMP(fscmpeq); + +static inline uint32_t evcmp_merge(int t0, int t1) +{ + return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); +} + +#define HELPER_VECTOR_SPE_CMP(name) \ + uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ + { \ + return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \ + e##name(env, op1, op2)); \ + } +/* evfststlt */ +HELPER_VECTOR_SPE_CMP(fststlt); +/* evfststgt */ +HELPER_VECTOR_SPE_CMP(fststgt); +/* evfststeq */ +HELPER_VECTOR_SPE_CMP(fststeq); +/* evfscmplt */ +HELPER_VECTOR_SPE_CMP(fscmplt); +/* evfscmpgt */ +HELPER_VECTOR_SPE_CMP(fscmpgt); +/* evfscmpeq */ +HELPER_VECTOR_SPE_CMP(fscmpeq); + +/* Double-precision floating-point conversion */ +uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + + u.d = int32_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.d = int64_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + + u.d = uint32_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.d = uint64_to_float64(val, &env->vec_status); + + return u.ll; +} + +uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_int32(u.d, &env->vec_status); +} + +uint32_t helper_efdctui(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_uint32(u.d, &env->vec_status); +} + +uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_int32_round_to_zero(u.d, &env->vec_status); +} + +uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_int64_round_to_zero(u.d, &env->vec_status); +} + +uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_uint32_round_to_zero(u.d, &env->vec_status); +} + +uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + + return float64_to_uint64_round_to_zero(u.d, &env->vec_status); +} + +uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.d = int32_to_float64(val, &env->vec_status); + tmp = int64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_div(u.d, tmp, &env->vec_status); + + return u.ll; +} + +uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.d = uint32_to_float64(val, &env->vec_status); + tmp = int64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_div(u.d, tmp, &env->vec_status); + + return u.ll; +} + +uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + tmp = uint64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_mul(u.d, tmp, &env->vec_status); + + return float64_to_int32(u.d, &env->vec_status); +} + +uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u; + float64 tmp; + + u.ll = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(float64_is_any_nan(u.d))) { + return 0; + } + tmp = uint64_to_float64(1ULL << 32, &env->vec_status); + u.d = float64_mul(u.d, tmp, &env->vec_status); + + return float64_to_uint32(u.d, &env->vec_status); +} + +uint32_t helper_efscfd(CPUPPCState *env, uint64_t val) +{ + CPU_DoubleU u1; + CPU_FloatU u2; + + u1.ll = val; + u2.f = float64_to_float32(u1.d, &env->vec_status); + + return u2.l; +} + +uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val) +{ + CPU_DoubleU u2; + CPU_FloatU u1; + + u1.l = val; + u2.d = float32_to_float64(u1.f, &env->vec_status); + + return u2.ll; +} + +/* Double precision fixed-point arithmetic */ +uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_add(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_sub(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_mul(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + u1.d = float64_div(u1.d, u2.d, &env->vec_status); + return u1.ll; +} + +/* Double precision floating point helpers */ +uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0; +} + +uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4; +} + +uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + CPU_DoubleU u1, u2; + + u1.ll = op1; + u2.ll = op2; + return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0; +} + +uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtstlt(env, op1, op2); +} + +uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtstgt(env, op1, op2); +} + +uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return helper_efdtsteq(env, op1, op2); +} + +#define float64_to_float64(x, env) x + + +/* + * VSX_ADD_SUB - VSX floating point add/subract + * name - instruction mnemonic + * op - operation (add or sub) + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * sfprf - set FPRF + */ +#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \ +void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + float_invalid_op_addsub(env, sfprf, GETPC(), \ + tp##_classify(xa->fld) | \ + tp##_classify(xb->fld)); \ + } \ + \ + if (r2sp) { \ + t.fld = helper_frsp(env, t.fld); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0) +VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1) +VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0) +VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0) +VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0) +VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1) +VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0) +VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0) + +void helper_xsaddqp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) +{ + ppc_vsr_t t = *xt; + float_status tstat; + + helper_reset_fpstatus(env); + + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + set_float_exception_flags(0, &tstat); + t.f128 = float128_add(xa->f128, xb->f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + float_invalid_op_addsub(env, 1, GETPC(), + float128_classify(xa->f128) | + float128_classify(xb->f128)); + } + + helper_compute_fprf_float128(env, t.f128); + + *xt = t; + do_float_check_status(env, GETPC()); +} + +/* + * VSX_MUL - VSX floating point multiply + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * sfprf - set FPRF + */ +#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + float_invalid_op_mul(env, sfprf, GETPC(), \ + tp##_classify(xa->fld) | \ + tp##_classify(xb->fld)); \ + } \ + \ + if (r2sp) { \ + t.fld = helper_frsp(env, t.fld); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0) +VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1) +VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0) +VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0) + +void helper_xsmulqp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) +{ + ppc_vsr_t t = *xt; + float_status tstat; + + helper_reset_fpstatus(env); + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + set_float_exception_flags(0, &tstat); + t.f128 = float128_mul(xa->f128, xb->f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + float_invalid_op_mul(env, 1, GETPC(), + float128_classify(xa->f128) | + float128_classify(xb->f128)); + } + helper_compute_fprf_float128(env, t.f128); + + *xt = t; + do_float_check_status(env, GETPC()); +} + +/* + * VSX_DIV - VSX floating point divide + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * sfprf - set FPRF + */ +#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + t.fld = tp##_div(xa->fld, xb->fld, &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + float_invalid_op_div(env, sfprf, GETPC(), \ + tp##_classify(xa->fld) | \ + tp##_classify(xb->fld)); \ + } \ + if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \ + float_zero_divide_excp(env, GETPC()); \ + } \ + \ + if (r2sp) { \ + t.fld = helper_frsp(env, t.fld); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0) +VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1) +VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0) +VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0) + +void helper_xsdivqp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) +{ + ppc_vsr_t t = *xt; + float_status tstat; + + helper_reset_fpstatus(env); + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + set_float_exception_flags(0, &tstat); + t.f128 = float128_div(xa->f128, xb->f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + float_invalid_op_div(env, 1, GETPC(), + float128_classify(xa->f128) | + float128_classify(xb->f128)); + } + if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { + float_zero_divide_excp(env, GETPC()); + } + + helper_compute_fprf_float128(env, t.f128); + *xt = t; + do_float_check_status(env, GETPC()); +} + +/* + * VSX_RE - VSX floating point reciprocal estimate + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * sfprf - set FPRF + */ +#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \ + \ + if (r2sp) { \ + t.fld = helper_frsp(env, t.fld); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0) +VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1) +VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0) +VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0) + +/* + * VSX_SQRT - VSX floating point square root + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * sfprf - set FPRF + */ +#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + t.fld = tp##_sqrt(xb->fld, &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \ + float_invalid_op_vxsqrt(env, sfprf, GETPC()); \ + } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + } \ + \ + if (r2sp) { \ + t.fld = helper_frsp(env, t.fld); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0) +VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1) +VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0) +VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0) + +/* + *VSX_RSQRTE - VSX floating point reciprocal square root estimate + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * sfprf - set FPRF + */ +#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + t.fld = tp##_sqrt(xb->fld, &tstat); \ + t.fld = tp##_div(tp##_one, t.fld, &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \ + float_invalid_op_vxsqrt(env, sfprf, GETPC()); \ + } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + } \ + \ + if (r2sp) { \ + t.fld = helper_frsp(env, t.fld); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0) +VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1) +VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0) +VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0) + +/* + * VSX_TDIV - VSX floating point test for divide + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * emin - minimum unbiased exponent + * emax - maximum unbiased exponent + * nbits - number of fraction bits + */ +#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + int i; \ + int fe_flag = 0; \ + int fg_flag = 0; \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_infinity(xa->fld) || \ + tp##_is_infinity(xb->fld) || \ + tp##_is_zero(xb->fld))) { \ + fe_flag = 1; \ + fg_flag = 1; \ + } else { \ + int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \ + int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \ + \ + if (unlikely(tp##_is_any_nan(xa->fld) || \ + tp##_is_any_nan(xb->fld))) { \ + fe_flag = 1; \ + } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \ + fe_flag = 1; \ + } else if (!tp##_is_zero(xa->fld) && \ + (((e_a - e_b) >= emax) || \ + ((e_a - e_b) <= (emin + 1)) || \ + (e_a <= (emin + nbits)))) { \ + fe_flag = 1; \ + } \ + \ + if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \ + /* \ + * XB is not zero because of the above check and so \ + * must be denormalized. \ + */ \ + fg_flag = 1; \ + } \ + } \ + } \ + \ + env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ +} + +VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52) +VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52) +VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23) + +/* + * VSX_TSQRT - VSX floating point test for square root + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * emin - minimum unbiased exponent + * emax - maximum unbiased exponent + * nbits - number of fraction bits + */ +#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \ +{ \ + int i; \ + int fe_flag = 0; \ + int fg_flag = 0; \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_infinity(xb->fld) || \ + tp##_is_zero(xb->fld))) { \ + fe_flag = 1; \ + fg_flag = 1; \ + } else { \ + int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \ + \ + if (unlikely(tp##_is_any_nan(xb->fld))) { \ + fe_flag = 1; \ + } else if (unlikely(tp##_is_zero(xb->fld))) { \ + fe_flag = 1; \ + } else if (unlikely(tp##_is_neg(xb->fld))) { \ + fe_flag = 1; \ + } else if (!tp##_is_zero(xb->fld) && \ + (e_b <= (emin + nbits))) { \ + fe_flag = 1; \ + } \ + \ + if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \ + /* \ + * XB is not zero because of the above check and \ + * therefore must be denormalized. \ + */ \ + fg_flag = 1; \ + } \ + } \ + } \ + \ + env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ +} + +VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52) +VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52) +VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23) + +/* + * VSX_MADD - VSX floating point muliply/add variations + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * maddflgs - flags for the float*muladd routine that control the + * various forms (madd, msub, nmadd, nmsub) + * sfprf - set FPRF + */ +#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\ + /* \ + * Avoid double rounding errors by rounding the intermediate \ + * result to odd. \ + */ \ + set_float_rounding_mode(float_round_to_zero, &tstat); \ + t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \ + maddflgs, &tstat); \ + t.fld |= (get_float_exception_flags(&tstat) & \ + float_flag_inexact) != 0; \ + } else { \ + t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \ + maddflgs, &tstat); \ + } \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + tp##_maddsub_update_excp(env, xa->fld, b->fld, \ + c->fld, maddflgs, GETPC()); \ + } \ + \ + if (r2sp) { \ + t.fld = helper_frsp(env, t.fld); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0) +VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0) +VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0) +VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0) +VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1) +VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1) +VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1) +VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1) + +VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0) +VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0) +VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0) +VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0) + +VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0) +VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0) +VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0) +VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0) + +/* + * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision + * op - instruction mnemonic + * cmp - comparison operation + * exp - expected result of comparison + * svxvc - set VXVC bit + */ +#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \ + \ + if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ + float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + if (fpscr_ve == 0 && svxvc) { \ + vxvc_flag = true; \ + } \ + } else if (svxvc) { \ + vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \ + float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \ + } \ + if (vxsnan_flag) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + if (vxvc_flag) { \ + float_invalid_op_vxvc(env, 0, GETPC()); \ + } \ + vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \ + \ + if (!vex_flag) { \ + if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \ + &env->fp_status) == exp) { \ + t.VsrD(0) = -1; \ + t.VsrD(1) = 0; \ + } else { \ + t.VsrD(0) = 0; \ + t.VsrD(1) = 0; \ + } \ + } \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0) +VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1) +VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1) +VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0) + +void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xa, ppc_vsr_t *xb) +{ + int64_t exp_a, exp_b; + uint32_t cc; + + exp_a = extract64(xa->VsrD(0), 52, 11); + exp_b = extract64(xb->VsrD(0), 52, 11); + + if (unlikely(float64_is_any_nan(xa->VsrD(0)) || + float64_is_any_nan(xb->VsrD(0)))) { + cc = CRF_SO; + } else { + if (exp_a < exp_b) { + cc = CRF_LT; + } else if (exp_a > exp_b) { + cc = CRF_GT; + } else { + cc = CRF_EQ; + } + } + + env->fpscr &= ~FP_FPCC; + env->fpscr |= cc << FPSCR_FPCC; + env->crf[BF(opcode)] = cc; + + do_float_check_status(env, GETPC()); +} + +void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xa, ppc_vsr_t *xb) +{ + int64_t exp_a, exp_b; + uint32_t cc; + + exp_a = extract64(xa->VsrD(0), 48, 15); + exp_b = extract64(xb->VsrD(0), 48, 15); + + if (unlikely(float128_is_any_nan(xa->f128) || + float128_is_any_nan(xb->f128))) { + cc = CRF_SO; + } else { + if (exp_a < exp_b) { + cc = CRF_LT; + } else if (exp_a > exp_b) { + cc = CRF_GT; + } else { + cc = CRF_EQ; + } + } + + env->fpscr &= ~FP_FPCC; + env->fpscr |= cc << FPSCR_FPCC; + env->crf[BF(opcode)] = cc; + + do_float_check_status(env, GETPC()); +} + +#define VSX_SCALAR_CMP(op, ordered) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + uint32_t cc = 0; \ + bool vxsnan_flag = false, vxvc_flag = false; \ + \ + helper_reset_fpstatus(env); \ + \ + if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ + float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + cc = CRF_SO; \ + if (fpscr_ve == 0 && ordered) { \ + vxvc_flag = true; \ + } \ + } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \ + float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { \ + cc = CRF_SO; \ + if (ordered) { \ + vxvc_flag = true; \ + } \ + } \ + if (vxsnan_flag) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + if (vxvc_flag) { \ + float_invalid_op_vxvc(env, 0, GETPC()); \ + } \ + \ + if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \ + cc |= CRF_LT; \ + } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \ + cc |= CRF_GT; \ + } else { \ + cc |= CRF_EQ; \ + } \ + \ + env->fpscr &= ~FP_FPCC; \ + env->fpscr |= cc << FPSCR_FPCC; \ + env->crf[BF(opcode)] = cc; \ + \ + do_float_check_status(env, GETPC()); \ +} + +VSX_SCALAR_CMP(xscmpodp, 1) +VSX_SCALAR_CMP(xscmpudp, 0) + +#define VSX_SCALAR_CMPQ(op, ordered) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + uint32_t cc = 0; \ + bool vxsnan_flag = false, vxvc_flag = false; \ + \ + helper_reset_fpstatus(env); \ + \ + if (float128_is_signaling_nan(xa->f128, &env->fp_status) || \ + float128_is_signaling_nan(xb->f128, &env->fp_status)) { \ + vxsnan_flag = true; \ + cc = CRF_SO; \ + if (fpscr_ve == 0 && ordered) { \ + vxvc_flag = true; \ + } \ + } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) || \ + float128_is_quiet_nan(xb->f128, &env->fp_status)) { \ + cc = CRF_SO; \ + if (ordered) { \ + vxvc_flag = true; \ + } \ + } \ + if (vxsnan_flag) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + if (vxvc_flag) { \ + float_invalid_op_vxvc(env, 0, GETPC()); \ + } \ + \ + if (float128_lt(xa->f128, xb->f128, &env->fp_status)) { \ + cc |= CRF_LT; \ + } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) { \ + cc |= CRF_GT; \ + } else { \ + cc |= CRF_EQ; \ + } \ + \ + env->fpscr &= ~FP_FPCC; \ + env->fpscr |= cc << FPSCR_FPCC; \ + env->crf[BF(opcode)] = cc; \ + \ + do_float_check_status(env, GETPC()); \ +} + +VSX_SCALAR_CMPQ(xscmpoqp, 1) +VSX_SCALAR_CMPQ(xscmpuqp, 0) + +/* + * VSX_MAX_MIN - VSX floating point maximum/minimum + * name - instruction mnemonic + * op - operation (max or min) + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + */ +#define VSX_MAX_MIN(name, op, nels, tp, fld) \ +void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + for (i = 0; i < nels; i++) { \ + t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \ + if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \ + tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0)) +VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i)) +VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i)) +VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0)) +VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i)) +VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i)) + +#define VSX_MAX_MINC(name, max) \ +void helper_##name(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + bool vxsnan_flag = false, vex_flag = false; \ + \ + if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \ + float64_is_any_nan(xb->VsrD(0)))) { \ + if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \ + float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + } \ + t.VsrD(0) = xb->VsrD(0); \ + } else if ((max && \ + !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \ + (!max && \ + float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \ + t.VsrD(0) = xa->VsrD(0); \ + } else { \ + t.VsrD(0) = xb->VsrD(0); \ + } \ + \ + vex_flag = fpscr_ve & vxsnan_flag; \ + if (vxsnan_flag) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + if (!vex_flag) { \ + *xt = t; \ + } \ +} \ + +VSX_MAX_MINC(xsmaxcdp, 1); +VSX_MAX_MINC(xsmincdp, 0); + +#define VSX_MAX_MINJ(name, max) \ +void helper_##name(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + bool vxsnan_flag = false, vex_flag = false; \ + \ + if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \ + if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + } \ + t.VsrD(0) = xa->VsrD(0); \ + } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \ + if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \ + vxsnan_flag = true; \ + } \ + t.VsrD(0) = xb->VsrD(0); \ + } else if (float64_is_zero(xa->VsrD(0)) && \ + float64_is_zero(xb->VsrD(0))) { \ + if (max) { \ + if (!float64_is_neg(xa->VsrD(0)) || \ + !float64_is_neg(xb->VsrD(0))) { \ + t.VsrD(0) = 0ULL; \ + } else { \ + t.VsrD(0) = 0x8000000000000000ULL; \ + } \ + } else { \ + if (float64_is_neg(xa->VsrD(0)) || \ + float64_is_neg(xb->VsrD(0))) { \ + t.VsrD(0) = 0x8000000000000000ULL; \ + } else { \ + t.VsrD(0) = 0ULL; \ + } \ + } \ + } else if ((max && \ + !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \ + (!max && \ + float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \ + t.VsrD(0) = xa->VsrD(0); \ + } else { \ + t.VsrD(0) = xb->VsrD(0); \ + } \ + \ + vex_flag = fpscr_ve & vxsnan_flag; \ + if (vxsnan_flag) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + if (!vex_flag) { \ + *xt = t; \ + } \ +} \ + +VSX_MAX_MINJ(xsmaxjdp, 1); +VSX_MAX_MINJ(xsminjdp, 0); + +/* + * VSX_CMP - VSX floating point compare + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * cmp - comparison operation + * svxvc - set VXVC bit + * exp - expected result of comparison + */ +#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \ +uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + uint32_t crf6 = 0; \ + int i; \ + int all_true = 1; \ + int all_false = 1; \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_any_nan(xa->fld) || \ + tp##_is_any_nan(xb->fld))) { \ + if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \ + tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + if (svxvc) { \ + float_invalid_op_vxvc(env, 0, GETPC()); \ + } \ + t.fld = 0; \ + all_true = 0; \ + } else { \ + if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \ + t.fld = -1; \ + all_false = 0; \ + } else { \ + t.fld = 0; \ + all_true = 0; \ + } \ + } \ + } \ + \ + *xt = t; \ + crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \ + return crf6; \ +} + +VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1) +VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1) +VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1) +VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0) +VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1) +VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1) +VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1) +VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0) + +/* + * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type (float32 or float64) + * ttp - target type (float32 or float64) + * sfld - source vsr_t field + * tfld - target vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + for (i = 0; i < nels; i++) { \ + t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ + if (unlikely(stp##_is_signaling_nan(xb->sfld, \ + &env->fp_status))) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + t.tfld = ttp##_snan_to_qnan(t.tfld); \ + } \ + if (sfprf) { \ + helper_compute_fprf_##ttp(env, t.tfld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1) +VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1) +VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0) +VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0) + +/* + * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type (float32 or float64) + * ttp - target type (float32 or float64) + * sfld - source vsr_t field + * tfld - target vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + for (i = 0; i < nels; i++) { \ + t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ + if (unlikely(stp##_is_signaling_nan(xb->sfld, \ + &env->fp_status))) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + t.tfld = ttp##_snan_to_qnan(t.tfld); \ + } \ + if (sfprf) { \ + helper_compute_fprf_##ttp(env, t.tfld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1) + +/* + * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion + * involving one half precision value + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type + * ttp - target type + * sfld - source vsr_t field + * tfld - target vsr_t field + * sfprf - set FPRF + */ +#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = { 0 }; \ + int i; \ + \ + for (i = 0; i < nels; i++) { \ + t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \ + if (unlikely(stp##_is_signaling_nan(xb->sfld, \ + &env->fp_status))) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + t.tfld = ttp##_snan_to_qnan(t.tfld); \ + } \ + if (sfprf) { \ + helper_compute_fprf_##ttp(env, t.tfld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1) +VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1) +VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0) +VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0) + +/* + * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be + * added to this later. + */ +void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xb) +{ + ppc_vsr_t t = { 0 }; + float_status tstat; + + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + t.VsrD(0) = float128_to_float64(xb->f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) { + float_invalid_op_vxsnan(env, GETPC()); + t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0)); + } + helper_compute_fprf_float64(env, t.VsrD(0)); + + *xt = t; + do_float_check_status(env, GETPC()); +} + +uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) +{ + uint64_t result, sign, exp, frac; + + float_status tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + + sign = extract64(xb, 63, 1); + exp = extract64(xb, 52, 11); + frac = extract64(xb, 0, 52) | 0x10000000000000ULL; + + if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) { + /* DP denormal operand. */ + /* Exponent override to DP min exp. */ + exp = 1; + /* Implicit bit override to 0. */ + frac = deposit64(frac, 53, 1, 0); + } + + if (unlikely(exp < 897 && frac != 0)) { + /* SP tiny operand. */ + if (897 - exp > 63) { + frac = 0; + } else { + /* Denormalize until exp = SP min exp. */ + frac >>= (897 - exp); + } + /* Exponent override to SP min exp - 1. */ + exp = 896; + } + + result = sign << 31; + result |= extract64(exp, 10, 1) << 30; + result |= extract64(exp, 0, 7) << 23; + result |= extract64(frac, 29, 23); + + /* hardware replicates result to both words of the doubleword result. */ + return (result << 32) | result; +} + +uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb) +{ + float_status tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + + return float32_to_float64(xb >> 32, &tstat); +} + +/* + * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type (float32 or float64) + * ttp - target type (int32, uint32, int64 or uint64) + * sfld - source vsr_t field + * tfld - target vsr_t field + * rnan - resulting NaN + */ +#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + int all_flags = env->fp_status.float_exception_flags, flags; \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + for (i = 0; i < nels; i++) { \ + env->fp_status.float_exception_flags = 0; \ + t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \ + flags = env->fp_status.float_exception_flags; \ + if (unlikely(flags & float_flag_invalid)) { \ + float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \ + t.tfld = rnan; \ + } \ + all_flags |= flags; \ + } \ + \ + *xt = t; \ + env->fp_status.float_exception_flags = all_flags; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \ + 0x8000000000000000ULL) +VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \ + 0x80000000U) +VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL) +VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U) +VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \ + 0x8000000000000000ULL) +VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \ + 0x80000000U) +VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL) +VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U) +VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \ + 0x8000000000000000ULL) +VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U) +VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL) +VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U) + +/* + * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion + * op - instruction mnemonic + * stp - source type (float32 or float64) + * ttp - target type (int32, uint32, int64 or uint64) + * sfld - source vsr_t field + * tfld - target vsr_t field + * rnan - resulting NaN + */ +#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = { 0 }; \ + \ + t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \ + if (env->fp_status.float_exception_flags & float_flag_invalid) { \ + float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \ + t.tfld = rnan; \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \ + 0x8000000000000000ULL) + +VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \ + 0xffffffff80000000ULL) +VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL) +VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL) + +/* + * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type (int32, uint32, int64 or uint64) + * ttp - target type (float32 or float64) + * sfld - source vsr_t field + * tfld - target vsr_t field + * jdef - definition of the j index (i or 2*i) + * sfprf - set FPRF + */ +#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + for (i = 0; i < nels; i++) { \ + t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ + if (r2sp) { \ + t.tfld = helper_frsp(env, t.tfld); \ + } \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.tfld); \ + } \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0) +VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0) +VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1) +VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1) +VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0) + +/* + * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion + * op - instruction mnemonic + * stp - source type (int32, uint32, int64 or uint64) + * ttp - target type (float32 or float64) + * sfld - source vsr_t field + * tfld - target vsr_t field + */ +#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \ +void helper_##op(CPUPPCState *env, uint32_t opcode, \ + ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + \ + t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \ + helper_compute_fprf_##ttp(env, t.tfld); \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128) +VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128) + +/* + * For "use current rounding mode", define a value that will not be + * one of the existing rounding model enums. + */ +#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \ + float_round_up + float_round_to_zero) + +/* + * VSX_ROUND - VSX floating point round + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * rmode - rounding mode + * sfprf - set FPRF + */ +#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \ +{ \ + ppc_vsr_t t = *xt; \ + int i; \ + \ + if (rmode != FLOAT_ROUND_CURRENT) { \ + set_float_rounding_mode(rmode, &env->fp_status); \ + } \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_signaling_nan(xb->fld, \ + &env->fp_status))) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + t.fld = tp##_snan_to_qnan(xb->fld); \ + } else { \ + t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \ + } \ + if (sfprf) { \ + helper_compute_fprf_float64(env, t.fld); \ + } \ + } \ + \ + /* \ + * If this is not a "use current rounding mode" instruction, \ + * then inhibit setting of the XX bit and restore rounding \ + * mode from FPSCR \ + */ \ + if (rmode != FLOAT_ROUND_CURRENT) { \ + fpscr_set_rounding_mode(env); \ + env->fp_status.float_exception_flags &= ~float_flag_inexact; \ + } \ + \ + *xt = t; \ + do_float_check_status(env, GETPC()); \ +} + +VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1) +VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1) +VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1) +VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1) +VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1) + +VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0) +VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0) +VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0) +VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0) +VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0) + +VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0) +VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0) +VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0) +VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0) +VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0) + +uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb) +{ + helper_reset_fpstatus(env); + + uint64_t xt = helper_frsp(env, xb); + + helper_compute_fprf_float64(env, xt); + do_float_check_status(env, GETPC()); + return xt; +} + +#define VSX_XXPERM(op, indexed) \ +void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ + ppc_vsr_t *xa, ppc_vsr_t *pcv) \ +{ \ + ppc_vsr_t t = *xt; \ + int i, idx; \ + \ + for (i = 0; i < 16; i++) { \ + idx = pcv->VsrB(i) & 0x1F; \ + if (indexed) { \ + idx = 31 - idx; \ + } \ + t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \ + : xt->VsrB(idx - 16); \ + } \ + *xt = t; \ +} + +VSX_XXPERM(xxperm, 0) +VSX_XXPERM(xxpermr, 1) + +void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) +{ + ppc_vsr_t t = { 0 }; + uint32_t exp, i, fraction; + + for (i = 0; i < 4; i++) { + exp = (xb->VsrW(i) >> 23) & 0xFF; + fraction = xb->VsrW(i) & 0x7FFFFF; + if (exp != 0 && exp != 255) { + t.VsrW(i) = fraction | 0x00800000; + } else { + t.VsrW(i) = fraction; + } + } + *xt = t; +} + +/* + * VSX_TEST_DC - VSX floating point test data class + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * xbn - VSR register number + * tp - type (float32 or float64) + * fld - vsr_t field (VsrD(*) or VsrW(*)) + * tfld - target vsr_t field (VsrD(*) or VsrW(*)) + * fld_max - target field max + * scrf - set result in CR and FPCC + */ +#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \ + ppc_vsr_t *xb = &env->vsr[xbn]; \ + ppc_vsr_t t = { 0 }; \ + uint32_t i, sign, dcmx; \ + uint32_t cc, match = 0; \ + \ + if (!scrf) { \ + dcmx = DCMX_XV(opcode); \ + } else { \ + t = *xt; \ + dcmx = DCMX(opcode); \ + } \ + \ + for (i = 0; i < nels; i++) { \ + sign = tp##_is_neg(xb->fld); \ + if (tp##_is_any_nan(xb->fld)) { \ + match = extract32(dcmx, 6, 1); \ + } else if (tp##_is_infinity(xb->fld)) { \ + match = extract32(dcmx, 4 + !sign, 1); \ + } else if (tp##_is_zero(xb->fld)) { \ + match = extract32(dcmx, 2 + !sign, 1); \ + } else if (tp##_is_zero_or_denormal(xb->fld)) { \ + match = extract32(dcmx, 0 + !sign, 1); \ + } \ + \ + if (scrf) { \ + cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \ + env->fpscr &= ~FP_FPCC; \ + env->fpscr |= cc << FPSCR_FPCC; \ + env->crf[BF(opcode)] = cc; \ + } else { \ + t.tfld = match ? fld_max : 0; \ + } \ + match = 0; \ + } \ + if (!scrf) { \ + *xt = t; \ + } \ +} + +VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0) +VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0) +VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1) +VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1) + +void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) +{ + uint32_t dcmx, sign, exp; + uint32_t cc, match = 0, not_sp = 0; + + dcmx = DCMX(opcode); + exp = (xb->VsrD(0) >> 52) & 0x7FF; + + sign = float64_is_neg(xb->VsrD(0)); + if (float64_is_any_nan(xb->VsrD(0))) { + match = extract32(dcmx, 6, 1); + } else if (float64_is_infinity(xb->VsrD(0))) { + match = extract32(dcmx, 4 + !sign, 1); + } else if (float64_is_zero(xb->VsrD(0))) { + match = extract32(dcmx, 2 + !sign, 1); + } else if (float64_is_zero_or_denormal(xb->VsrD(0)) || + (exp > 0 && exp < 0x381)) { + match = extract32(dcmx, 0 + !sign, 1); + } + + not_sp = !float64_eq(xb->VsrD(0), + float32_to_float64( + float64_to_float32(xb->VsrD(0), &env->fp_status), + &env->fp_status), &env->fp_status); + + cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT; + env->fpscr &= ~FP_FPCC; + env->fpscr |= cc << FPSCR_FPCC; + env->crf[BF(opcode)] = cc; +} + +void helper_xsrqpi(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xb) +{ + ppc_vsr_t t = { 0 }; + uint8_t r = Rrm(opcode); + uint8_t ex = Rc(opcode); + uint8_t rmc = RMC(opcode); + uint8_t rmode = 0; + float_status tstat; + + helper_reset_fpstatus(env); + + if (r == 0 && rmc == 0) { + rmode = float_round_ties_away; + } else if (r == 0 && rmc == 0x3) { + rmode = fpscr_rn; + } else if (r == 1) { + switch (rmc) { + case 0: + rmode = float_round_nearest_even; + break; + case 1: + rmode = float_round_to_zero; + break; + case 2: + rmode = float_round_up; + break; + case 3: + rmode = float_round_down; + break; + default: + abort(); + } + } + + tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + set_float_rounding_mode(rmode, &tstat); + t.f128 = float128_round_to_int(xb->f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + if (float128_is_signaling_nan(xb->f128, &tstat)) { + float_invalid_op_vxsnan(env, GETPC()); + t.f128 = float128_snan_to_qnan(t.f128); + } + } + + if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) { + env->fp_status.float_exception_flags &= ~float_flag_inexact; + } + + helper_compute_fprf_float128(env, t.f128); + do_float_check_status(env, GETPC()); + *xt = t; +} + +void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xb) +{ + ppc_vsr_t t = { 0 }; + uint8_t r = Rrm(opcode); + uint8_t rmc = RMC(opcode); + uint8_t rmode = 0; + floatx80 round_res; + float_status tstat; + + helper_reset_fpstatus(env); + + if (r == 0 && rmc == 0) { + rmode = float_round_ties_away; + } else if (r == 0 && rmc == 0x3) { + rmode = fpscr_rn; + } else if (r == 1) { + switch (rmc) { + case 0: + rmode = float_round_nearest_even; + break; + case 1: + rmode = float_round_to_zero; + break; + case 2: + rmode = float_round_up; + break; + case 3: + rmode = float_round_down; + break; + default: + abort(); + } + } + + tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + set_float_rounding_mode(rmode, &tstat); + round_res = float128_to_floatx80(xb->f128, &tstat); + t.f128 = floatx80_to_float128(round_res, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + if (float128_is_signaling_nan(xb->f128, &tstat)) { + float_invalid_op_vxsnan(env, GETPC()); + t.f128 = float128_snan_to_qnan(t.f128); + } + } + + helper_compute_fprf_float128(env, t.f128); + *xt = t; + do_float_check_status(env, GETPC()); +} + +void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xb) +{ + ppc_vsr_t t = { 0 }; + float_status tstat; + + helper_reset_fpstatus(env); + + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + set_float_exception_flags(0, &tstat); + t.f128 = float128_sqrt(xb->f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + if (float128_is_signaling_nan(xb->f128, &tstat)) { + float_invalid_op_vxsnan(env, GETPC()); + t.f128 = float128_snan_to_qnan(xb->f128); + } else if (float128_is_quiet_nan(xb->f128, &tstat)) { + t.f128 = xb->f128; + } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) { + float_invalid_op_vxsqrt(env, 1, GETPC()); + t.f128 = float128_default_nan(&env->fp_status); + } + } + + helper_compute_fprf_float128(env, t.f128); + *xt = t; + do_float_check_status(env, GETPC()); +} + +void helper_xssubqp(CPUPPCState *env, uint32_t opcode, + ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) +{ + ppc_vsr_t t = *xt; + float_status tstat; + + helper_reset_fpstatus(env); + + tstat = env->fp_status; + if (unlikely(Rc(opcode) != 0)) { + tstat.float_rounding_mode = float_round_to_odd; + } + + set_float_exception_flags(0, &tstat); + t.f128 = float128_sub(xa->f128, xb->f128, &tstat); + env->fp_status.float_exception_flags |= tstat.float_exception_flags; + + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { + float_invalid_op_addsub(env, 1, GETPC(), + float128_classify(xa->f128) | + float128_classify(xb->f128)); + } + + helper_compute_fprf_float128(env, t.f128); + *xt = t; + do_float_check_status(env, GETPC()); +} diff --git a/qemu/target/ppc/helper.h b/qemu/target/ppc/helper.h new file mode 100644 index 00000000..9700011b --- /dev/null +++ b/qemu/target/ppc/helper.h @@ -0,0 +1,761 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, void, env, i32, i32) +DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32) +#if defined(TARGET_PPC64) +DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32) +#endif + +DEF_HELPER_2(store_msr, void, env, tl) +DEF_HELPER_1(rfi, void, env) +DEF_HELPER_1(rfsvc, void, env) +DEF_HELPER_1(40x_rfci, void, env) +DEF_HELPER_1(rfci, void, env) +DEF_HELPER_1(rfdi, void, env) +DEF_HELPER_1(rfmci, void, env) +#if defined(TARGET_PPC64) +DEF_HELPER_2(pminsn, void, env, i32) +DEF_HELPER_1(rfid, void, env) +DEF_HELPER_1(hrfid, void, env) +DEF_HELPER_2(store_lpcr, void, env, tl) +DEF_HELPER_2(store_pcr, void, env, tl) +#endif +DEF_HELPER_1(check_tlb_flush_local, void, env) +DEF_HELPER_1(check_tlb_flush_global, void, env) + +DEF_HELPER_3(lmw, void, env, tl, i32) +DEF_HELPER_FLAGS_3(stmw, TCG_CALL_NO_WG, void, env, tl, i32) +DEF_HELPER_4(lsw, void, env, tl, i32, i32) +DEF_HELPER_5(lswx, void, env, tl, i32, i32, i32) +DEF_HELPER_FLAGS_4(stsw, TCG_CALL_NO_WG, void, env, tl, i32, i32) +DEF_HELPER_FLAGS_3(dcbz, TCG_CALL_NO_WG, void, env, tl, i32) +DEF_HELPER_FLAGS_3(dcbzep, TCG_CALL_NO_WG, void, env, tl, i32) +DEF_HELPER_FLAGS_2(icbi, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_FLAGS_2(icbiep, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32) + +#if defined(TARGET_PPC64) +DEF_HELPER_4(divdeu, i64, env, i64, i64, i32) +DEF_HELPER_4(divde, i64, env, i64, i64, i32) +#endif +DEF_HELPER_4(divweu, tl, env, tl, tl, i32) +DEF_HELPER_4(divwe, tl, env, tl, tl, i32) + +DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_3(sraw, tl, env, tl, tl) +#if defined(TARGET_PPC64) +DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl) +DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_3(srad, tl, env, tl, tl) +DEF_HELPER_0(darn32, tl) +DEF_HELPER_0(darn64, tl) +#endif + +DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_2(brinc, TCG_CALL_NO_RWG_SE, tl, tl, tl) + +DEF_HELPER_1(float_check_status, void, env) +DEF_HELPER_1(reset_fpstatus, void, env) +DEF_HELPER_2(compute_fprf_float64, void, env, i64) +DEF_HELPER_3(store_fpscr, void, env, i64, i32) +DEF_HELPER_2(fpscr_clrbit, void, env, i32) +DEF_HELPER_2(fpscr_setbit, void, env, i32) +DEF_HELPER_FLAGS_1(todouble, TCG_CALL_NO_RWG_SE, i64, i32) +DEF_HELPER_FLAGS_1(tosingle, TCG_CALL_NO_RWG_SE, i32, i64) + +DEF_HELPER_4(fcmpo, void, env, i64, i64, i32) +DEF_HELPER_4(fcmpu, void, env, i64, i64, i32) + +DEF_HELPER_2(fctiw, i64, env, i64) +DEF_HELPER_2(fctiwu, i64, env, i64) +DEF_HELPER_2(fctiwz, i64, env, i64) +DEF_HELPER_2(fctiwuz, i64, env, i64) +DEF_HELPER_2(fcfid, i64, env, i64) +DEF_HELPER_2(fcfidu, i64, env, i64) +DEF_HELPER_2(fcfids, i64, env, i64) +DEF_HELPER_2(fcfidus, i64, env, i64) +DEF_HELPER_2(fctid, i64, env, i64) +DEF_HELPER_2(fctidu, i64, env, i64) +DEF_HELPER_2(fctidz, i64, env, i64) +DEF_HELPER_2(fctiduz, i64, env, i64) +DEF_HELPER_2(frsp, i64, env, i64) +DEF_HELPER_2(frin, i64, env, i64) +DEF_HELPER_2(friz, i64, env, i64) +DEF_HELPER_2(frip, i64, env, i64) +DEF_HELPER_2(frim, i64, env, i64) + +DEF_HELPER_3(fadd, f64, env, f64, f64) +DEF_HELPER_3(fsub, f64, env, f64, f64) +DEF_HELPER_3(fmul, f64, env, f64, f64) +DEF_HELPER_3(fdiv, f64, env, f64, f64) +DEF_HELPER_4(fmadd, i64, env, i64, i64, i64) +DEF_HELPER_4(fmsub, i64, env, i64, i64, i64) +DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64) +DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64) +DEF_HELPER_2(fsqrt, f64, env, f64) +DEF_HELPER_2(fre, i64, env, i64) +DEF_HELPER_2(fres, i64, env, i64) +DEF_HELPER_2(frsqrte, i64, env, i64) +DEF_HELPER_4(fsel, i64, env, i64, i64, i64) + +DEF_HELPER_FLAGS_2(ftdiv, TCG_CALL_NO_RWG_SE, i32, i64, i64) +DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64) + +#define dh_alias_avr ptr +#define dh_ctype_avr ppc_avr_t * +#define dh_is_signed_avr dh_is_signed_ptr + +#define dh_alias_vsr ptr +#define dh_ctype_vsr ppc_vsr_t * +#define dh_is_signed_vsr dh_is_signed_ptr + +DEF_HELPER_3(vavgub, void, avr, avr, avr) +DEF_HELPER_3(vavguh, void, avr, avr, avr) +DEF_HELPER_3(vavguw, void, avr, avr, avr) +DEF_HELPER_3(vabsdub, void, avr, avr, avr) +DEF_HELPER_3(vabsduh, void, avr, avr, avr) +DEF_HELPER_3(vabsduw, void, avr, avr, avr) +DEF_HELPER_3(vavgsb, void, avr, avr, avr) +DEF_HELPER_3(vavgsh, void, avr, avr, avr) +DEF_HELPER_3(vavgsw, void, avr, avr, avr) +DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtud, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsd, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtud_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtsd_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr) +DEF_HELPER_4(vcmpbfp_dot, void, env, avr, avr, avr) +DEF_HELPER_3(vmrglb, void, avr, avr, avr) +DEF_HELPER_3(vmrglh, void, avr, avr, avr) +DEF_HELPER_3(vmrglw, void, avr, avr, avr) +DEF_HELPER_3(vmrghb, void, avr, avr, avr) +DEF_HELPER_3(vmrghh, void, avr, avr, avr) +DEF_HELPER_3(vmrghw, void, avr, avr, avr) +DEF_HELPER_3(vmulesb, void, avr, avr, avr) +DEF_HELPER_3(vmulesh, void, avr, avr, avr) +DEF_HELPER_3(vmulesw, void, avr, avr, avr) +DEF_HELPER_3(vmuleub, void, avr, avr, avr) +DEF_HELPER_3(vmuleuh, void, avr, avr, avr) +DEF_HELPER_3(vmuleuw, void, avr, avr, avr) +DEF_HELPER_3(vmulosb, void, avr, avr, avr) +DEF_HELPER_3(vmulosh, void, avr, avr, avr) +DEF_HELPER_3(vmulosw, void, avr, avr, avr) +DEF_HELPER_3(vmuloub, void, avr, avr, avr) +DEF_HELPER_3(vmulouh, void, avr, avr, avr) +DEF_HELPER_3(vmulouw, void, avr, avr, avr) +DEF_HELPER_3(vmuluwm, void, avr, avr, avr) +DEF_HELPER_3(vslo, void, avr, avr, avr) +DEF_HELPER_3(vsro, void, avr, avr, avr) +DEF_HELPER_3(vsrv, void, avr, avr, avr) +DEF_HELPER_3(vslv, void, avr, avr, avr) +DEF_HELPER_3(vaddcuw, void, avr, avr, avr) +DEF_HELPER_2(vprtybw, void, avr, avr) +DEF_HELPER_2(vprtybd, void, avr, avr) +DEF_HELPER_2(vprtybq, void, avr, avr) +DEF_HELPER_3(vsubcuw, void, avr, avr, avr) +DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vsubsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vsubshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vsubsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vaddubs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vadduhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) +DEF_HELPER_3(vadduqm, void, avr, avr, avr) +DEF_HELPER_4(vaddecuq, void, avr, avr, avr, avr) +DEF_HELPER_4(vaddeuqm, void, avr, avr, avr, avr) +DEF_HELPER_3(vaddcuq, void, avr, avr, avr) +DEF_HELPER_3(vsubuqm, void, avr, avr, avr) +DEF_HELPER_4(vsubecuq, void, avr, avr, avr, avr) +DEF_HELPER_4(vsubeuqm, void, avr, avr, avr, avr) +DEF_HELPER_3(vsubcuq, void, avr, avr, avr) +DEF_HELPER_3(vrlb, void, avr, avr, avr) +DEF_HELPER_3(vrlh, void, avr, avr, avr) +DEF_HELPER_3(vrlw, void, avr, avr, avr) +DEF_HELPER_3(vrld, void, avr, avr, avr) +DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32) +DEF_HELPER_3(vextractub, void, avr, avr, i32) +DEF_HELPER_3(vextractuh, void, avr, avr, i32) +DEF_HELPER_3(vextractuw, void, avr, avr, i32) +DEF_HELPER_3(vextractd, void, avr, avr, i32) +DEF_HELPER_3(vinsertb, void, avr, avr, i32) +DEF_HELPER_3(vinserth, void, avr, avr, i32) +DEF_HELPER_3(vinsertw, void, avr, avr, i32) +DEF_HELPER_3(vinsertd, void, avr, avr, i32) +DEF_HELPER_2(vextsb2w, void, avr, avr) +DEF_HELPER_2(vextsh2w, void, avr, avr) +DEF_HELPER_2(vextsb2d, void, avr, avr) +DEF_HELPER_2(vextsh2d, void, avr, avr) +DEF_HELPER_2(vextsw2d, void, avr, avr) +DEF_HELPER_2(vnegw, void, avr, avr) +DEF_HELPER_2(vnegd, void, avr, avr) +DEF_HELPER_2(vupkhpx, void, avr, avr) +DEF_HELPER_2(vupklpx, void, avr, avr) +DEF_HELPER_2(vupkhsb, void, avr, avr) +DEF_HELPER_2(vupkhsh, void, avr, avr) +DEF_HELPER_2(vupkhsw, void, avr, avr) +DEF_HELPER_2(vupklsb, void, avr, avr) +DEF_HELPER_2(vupklsh, void, avr, avr) +DEF_HELPER_2(vupklsw, void, avr, avr) +DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr) +DEF_HELPER_4(vpkshss, void, env, avr, avr, avr) +DEF_HELPER_4(vpkshus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkswss, void, env, avr, avr, avr) +DEF_HELPER_4(vpkswus, void, env, avr, avr, avr) +DEF_HELPER_4(vpksdss, void, env, avr, avr, avr) +DEF_HELPER_4(vpksdus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuhus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuwus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkudus, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuhum, void, env, avr, avr, avr) +DEF_HELPER_4(vpkuwum, void, env, avr, avr, avr) +DEF_HELPER_4(vpkudum, void, env, avr, avr, avr) +DEF_HELPER_3(vpkpx, void, avr, avr, avr) +DEF_HELPER_5(vmhaddshs, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmhraddshs, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumuhm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr) +DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_2(mtvscr, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_1(mfvscr, TCG_CALL_NO_RWG, i32, env) +DEF_HELPER_3(lvebx, void, env, avr, tl) +DEF_HELPER_3(lvehx, void, env, avr, tl) +DEF_HELPER_3(lvewx, void, env, avr, tl) +DEF_HELPER_3(stvebx, void, env, avr, tl) +DEF_HELPER_3(stvehx, void, env, avr, tl) +DEF_HELPER_3(stvewx, void, env, avr, tl) +#if defined(TARGET_PPC64) +DEF_HELPER_4(lxvl, void, env, tl, vsr, tl) +DEF_HELPER_4(lxvll, void, env, tl, vsr, tl) +DEF_HELPER_4(stxvl, void, env, tl, vsr, tl) +DEF_HELPER_4(stxvll, void, env, tl, vsr, tl) +#endif +DEF_HELPER_4(vsumsws, void, env, avr, avr, avr) +DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr) +DEF_HELPER_4(vsum4sbs, void, env, avr, avr, avr) +DEF_HELPER_4(vsum4shs, void, env, avr, avr, avr) +DEF_HELPER_4(vsum4ubs, void, env, avr, avr, avr) +DEF_HELPER_4(vaddfp, void, env, avr, avr, avr) +DEF_HELPER_4(vsubfp, void, env, avr, avr, avr) +DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr) +DEF_HELPER_4(vminfp, void, env, avr, avr, avr) +DEF_HELPER_3(vrefp, void, env, avr, avr) +DEF_HELPER_3(vrsqrtefp, void, env, avr, avr) +DEF_HELPER_3(vrlwmi, void, avr, avr, avr) +DEF_HELPER_3(vrldmi, void, avr, avr, avr) +DEF_HELPER_3(vrldnm, void, avr, avr, avr) +DEF_HELPER_3(vrlwnm, void, avr, avr, avr) +DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr) +DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr) +DEF_HELPER_3(vexptefp, void, env, avr, avr) +DEF_HELPER_3(vlogefp, void, env, avr, avr) +DEF_HELPER_3(vrfim, void, env, avr, avr) +DEF_HELPER_3(vrfin, void, env, avr, avr) +DEF_HELPER_3(vrfip, void, env, avr, avr) +DEF_HELPER_3(vrfiz, void, env, avr, avr) +DEF_HELPER_4(vcfux, void, env, avr, avr, i32) +DEF_HELPER_4(vcfsx, void, env, avr, avr, i32) +DEF_HELPER_4(vctuxs, void, env, avr, avr, i32) +DEF_HELPER_4(vctsxs, void, env, avr, avr, i32) + +DEF_HELPER_2(vclzb, void, avr, avr) +DEF_HELPER_2(vclzh, void, avr, avr) +DEF_HELPER_2(vctzb, void, avr, avr) +DEF_HELPER_2(vctzh, void, avr, avr) +DEF_HELPER_2(vctzw, void, avr, avr) +DEF_HELPER_2(vctzd, void, avr, avr) +DEF_HELPER_2(vpopcntb, void, avr, avr) +DEF_HELPER_2(vpopcnth, void, avr, avr) +DEF_HELPER_2(vpopcntw, void, avr, avr) +DEF_HELPER_2(vpopcntd, void, avr, avr) +DEF_HELPER_1(vclzlsbb, tl, avr) +DEF_HELPER_1(vctzlsbb, tl, avr) +DEF_HELPER_3(vbpermd, void, avr, avr, avr) +DEF_HELPER_3(vbpermq, void, avr, avr, avr) +DEF_HELPER_3(vpmsumb, void, avr, avr, avr) +DEF_HELPER_3(vpmsumh, void, avr, avr, avr) +DEF_HELPER_3(vpmsumw, void, avr, avr, avr) +DEF_HELPER_3(vpmsumd, void, avr, avr, avr) +DEF_HELPER_2(vextublx, tl, tl, avr) +DEF_HELPER_2(vextuhlx, tl, tl, avr) +DEF_HELPER_2(vextuwlx, tl, tl, avr) +DEF_HELPER_2(vextubrx, tl, tl, avr) +DEF_HELPER_2(vextuhrx, tl, tl, avr) +DEF_HELPER_2(vextuwrx, tl, tl, avr) + +DEF_HELPER_2(vsbox, void, avr, avr) +DEF_HELPER_3(vcipher, void, avr, avr, avr) +DEF_HELPER_3(vcipherlast, void, avr, avr, avr) +DEF_HELPER_3(vncipher, void, avr, avr, avr) +DEF_HELPER_3(vncipherlast, void, avr, avr, avr) +DEF_HELPER_3(vshasigmaw, void, avr, avr, i32) +DEF_HELPER_3(vshasigmad, void, avr, avr, i32) +DEF_HELPER_4(vpermxor, void, avr, avr, avr, avr) + +DEF_HELPER_4(bcdadd, i32, avr, avr, avr, i32) +DEF_HELPER_4(bcdsub, i32, avr, avr, avr, i32) +DEF_HELPER_3(bcdcfn, i32, avr, avr, i32) +DEF_HELPER_3(bcdctn, i32, avr, avr, i32) +DEF_HELPER_3(bcdcfz, i32, avr, avr, i32) +DEF_HELPER_3(bcdctz, i32, avr, avr, i32) +DEF_HELPER_3(bcdcfsq, i32, avr, avr, i32) +DEF_HELPER_3(bcdctsq, i32, avr, avr, i32) +DEF_HELPER_4(bcdcpsgn, i32, avr, avr, avr, i32) +DEF_HELPER_3(bcdsetsgn, i32, avr, avr, i32) +DEF_HELPER_4(bcds, i32, avr, avr, avr, i32) +DEF_HELPER_4(bcdus, i32, avr, avr, avr, i32) +DEF_HELPER_4(bcdsr, i32, avr, avr, avr, i32) +DEF_HELPER_4(bcdtrunc, i32, avr, avr, avr, i32) +DEF_HELPER_4(bcdutrunc, i32, avr, avr, avr, i32) + +DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr) +DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_4(xssubdp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xsmuldp, void, env, vsr, vsr, vsr) +DEF_HELPER_5(xsmulqp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_4(xsdivdp, void, env, vsr, vsr, vsr) +DEF_HELPER_5(xsdivqp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_3(xsredp, void, env, vsr, vsr) +DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr) +DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr) +DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr) +DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr) +DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscmpudp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr) +DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr) +DEF_HELPER_3(xscvdphp, void, env, vsr, vsr) +DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr) +DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr) +DEF_HELPER_2(xscvdpspn, i64, env, i64) +DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr) +DEF_HELPER_4(xscvqpuwz, void, env, i32, vsr, vsr) +DEF_HELPER_3(xscvhpdp, void, env, vsr, vsr) +DEF_HELPER_4(xscvsdqp, void, env, i32, vsr, vsr) +DEF_HELPER_3(xscvspdp, void, env, vsr, vsr) +DEF_HELPER_2(xscvspdpn, i64, env, i64) +DEF_HELPER_3(xscvdpsxds, void, env, vsr, vsr) +DEF_HELPER_3(xscvdpsxws, void, env, vsr, vsr) +DEF_HELPER_3(xscvdpuxds, void, env, vsr, vsr) +DEF_HELPER_3(xscvdpuxws, void, env, vsr, vsr) +DEF_HELPER_3(xscvsxddp, void, env, vsr, vsr) +DEF_HELPER_3(xscvuxdsp, void, env, vsr, vsr) +DEF_HELPER_3(xscvsxdsp, void, env, vsr, vsr) +DEF_HELPER_4(xscvudqp, void, env, i32, vsr, vsr) +DEF_HELPER_3(xscvuxddp, void, env, vsr, vsr) +DEF_HELPER_3(xststdcsp, void, env, i32, vsr) +DEF_HELPER_2(xststdcdp, void, env, i32) +DEF_HELPER_2(xststdcqp, void, env, i32) +DEF_HELPER_3(xsrdpi, void, env, vsr, vsr) +DEF_HELPER_3(xsrdpic, void, env, vsr, vsr) +DEF_HELPER_3(xsrdpim, void, env, vsr, vsr) +DEF_HELPER_3(xsrdpip, void, env, vsr, vsr) +DEF_HELPER_3(xsrdpiz, void, env, vsr, vsr) +DEF_HELPER_4(xsrqpi, void, env, i32, vsr, vsr) +DEF_HELPER_4(xsrqpxp, void, env, i32, vsr, vsr) +DEF_HELPER_4(xssqrtqp, void, env, i32, vsr, vsr) +DEF_HELPER_5(xssubqp, void, env, i32, vsr, vsr, vsr) + +DEF_HELPER_4(xsaddsp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xssubsp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xsmulsp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xsdivsp, void, env, vsr, vsr, vsr) +DEF_HELPER_3(xsresp, void, env, vsr, vsr) +DEF_HELPER_2(xsrsp, i64, env, i64) +DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr) +DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr) +DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr) + +DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvmuldp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvdivdp, void, env, vsr, vsr, vsr) +DEF_HELPER_3(xvredp, void, env, vsr, vsr) +DEF_HELPER_3(xvsqrtdp, void, env, vsr, vsr) +DEF_HELPER_3(xvrsqrtedp, void, env, vsr, vsr) +DEF_HELPER_4(xvtdivdp, void, env, i32, vsr, vsr) +DEF_HELPER_3(xvtsqrtdp, void, env, i32, vsr) +DEF_HELPER_5(xvmadddp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xvmsubdp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xvnmadddp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xvnmsubdp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_4(xvmaxdp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvmindp, void, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpeqdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpgedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpgtdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpnedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_3(xvcvdpsp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvdpsxds, void, env, vsr, vsr) +DEF_HELPER_3(xvcvdpsxws, void, env, vsr, vsr) +DEF_HELPER_3(xvcvdpuxds, void, env, vsr, vsr) +DEF_HELPER_3(xvcvdpuxws, void, env, vsr, vsr) +DEF_HELPER_3(xvcvsxddp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvuxddp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvsxwdp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvuxwdp, void, env, vsr, vsr) +DEF_HELPER_3(xvrdpi, void, env, vsr, vsr) +DEF_HELPER_3(xvrdpic, void, env, vsr, vsr) +DEF_HELPER_3(xvrdpim, void, env, vsr, vsr) +DEF_HELPER_3(xvrdpip, void, env, vsr, vsr) +DEF_HELPER_3(xvrdpiz, void, env, vsr, vsr) + +DEF_HELPER_4(xvaddsp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvsubsp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvmulsp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvdivsp, void, env, vsr, vsr, vsr) +DEF_HELPER_3(xvresp, void, env, vsr, vsr) +DEF_HELPER_3(xvsqrtsp, void, env, vsr, vsr) +DEF_HELPER_3(xvrsqrtesp, void, env, vsr, vsr) +DEF_HELPER_4(xvtdivsp, void, env, i32, vsr, vsr) +DEF_HELPER_3(xvtsqrtsp, void, env, i32, vsr) +DEF_HELPER_5(xvmaddsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xvmsubsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xvnmaddsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_5(xvnmsubsp, void, env, vsr, vsr, vsr, vsr) +DEF_HELPER_4(xvmaxsp, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xvminsp, void, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpeqsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpgesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpgtsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) +DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr) +DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr) +DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr) +DEF_HELPER_3(xvcvspuxws, void, env, vsr, vsr) +DEF_HELPER_3(xvcvsxdsp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvuxdsp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvsxwsp, void, env, vsr, vsr) +DEF_HELPER_3(xvcvuxwsp, void, env, vsr, vsr) +DEF_HELPER_2(xvtstdcsp, void, env, i32) +DEF_HELPER_2(xvtstdcdp, void, env, i32) +DEF_HELPER_3(xvrspi, void, env, vsr, vsr) +DEF_HELPER_3(xvrspic, void, env, vsr, vsr) +DEF_HELPER_3(xvrspim, void, env, vsr, vsr) +DEF_HELPER_3(xvrspip, void, env, vsr, vsr) +DEF_HELPER_3(xvrspiz, void, env, vsr, vsr) +DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr) +DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32) +DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32) +DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr) + +DEF_HELPER_2(efscfsi, i32, env, i32) +DEF_HELPER_2(efscfui, i32, env, i32) +DEF_HELPER_2(efscfuf, i32, env, i32) +DEF_HELPER_2(efscfsf, i32, env, i32) +DEF_HELPER_2(efsctsi, i32, env, i32) +DEF_HELPER_2(efsctui, i32, env, i32) +DEF_HELPER_2(efsctsiz, i32, env, i32) +DEF_HELPER_2(efsctuiz, i32, env, i32) +DEF_HELPER_2(efsctsf, i32, env, i32) +DEF_HELPER_2(efsctuf, i32, env, i32) +DEF_HELPER_2(evfscfsi, i64, env, i64) +DEF_HELPER_2(evfscfui, i64, env, i64) +DEF_HELPER_2(evfscfuf, i64, env, i64) +DEF_HELPER_2(evfscfsf, i64, env, i64) +DEF_HELPER_2(evfsctsi, i64, env, i64) +DEF_HELPER_2(evfsctui, i64, env, i64) +DEF_HELPER_2(evfsctsiz, i64, env, i64) +DEF_HELPER_2(evfsctuiz, i64, env, i64) +DEF_HELPER_2(evfsctsf, i64, env, i64) +DEF_HELPER_2(evfsctuf, i64, env, i64) +DEF_HELPER_3(efsadd, i32, env, i32, i32) +DEF_HELPER_3(efssub, i32, env, i32, i32) +DEF_HELPER_3(efsmul, i32, env, i32, i32) +DEF_HELPER_3(efsdiv, i32, env, i32, i32) +DEF_HELPER_3(evfsadd, i64, env, i64, i64) +DEF_HELPER_3(evfssub, i64, env, i64, i64) +DEF_HELPER_3(evfsmul, i64, env, i64, i64) +DEF_HELPER_3(evfsdiv, i64, env, i64, i64) +DEF_HELPER_3(efststlt, i32, env, i32, i32) +DEF_HELPER_3(efststgt, i32, env, i32, i32) +DEF_HELPER_3(efststeq, i32, env, i32, i32) +DEF_HELPER_3(efscmplt, i32, env, i32, i32) +DEF_HELPER_3(efscmpgt, i32, env, i32, i32) +DEF_HELPER_3(efscmpeq, i32, env, i32, i32) +DEF_HELPER_3(evfststlt, i32, env, i64, i64) +DEF_HELPER_3(evfststgt, i32, env, i64, i64) +DEF_HELPER_3(evfststeq, i32, env, i64, i64) +DEF_HELPER_3(evfscmplt, i32, env, i64, i64) +DEF_HELPER_3(evfscmpgt, i32, env, i64, i64) +DEF_HELPER_3(evfscmpeq, i32, env, i64, i64) +DEF_HELPER_2(efdcfsi, i64, env, i32) +DEF_HELPER_2(efdcfsid, i64, env, i64) +DEF_HELPER_2(efdcfui, i64, env, i32) +DEF_HELPER_2(efdcfuid, i64, env, i64) +DEF_HELPER_2(efdctsi, i32, env, i64) +DEF_HELPER_2(efdctui, i32, env, i64) +DEF_HELPER_2(efdctsiz, i32, env, i64) +DEF_HELPER_2(efdctsidz, i64, env, i64) +DEF_HELPER_2(efdctuiz, i32, env, i64) +DEF_HELPER_2(efdctuidz, i64, env, i64) +DEF_HELPER_2(efdcfsf, i64, env, i32) +DEF_HELPER_2(efdcfuf, i64, env, i32) +DEF_HELPER_2(efdctsf, i32, env, i64) +DEF_HELPER_2(efdctuf, i32, env, i64) +DEF_HELPER_2(efscfd, i32, env, i64) +DEF_HELPER_2(efdcfs, i64, env, i32) +DEF_HELPER_3(efdadd, i64, env, i64, i64) +DEF_HELPER_3(efdsub, i64, env, i64, i64) +DEF_HELPER_3(efdmul, i64, env, i64, i64) +DEF_HELPER_3(efddiv, i64, env, i64, i64) +DEF_HELPER_3(efdtstlt, i32, env, i64, i64) +DEF_HELPER_3(efdtstgt, i32, env, i64, i64) +DEF_HELPER_3(efdtsteq, i32, env, i64, i64) +DEF_HELPER_3(efdcmplt, i32, env, i64, i64) +DEF_HELPER_3(efdcmpgt, i32, env, i64, i64) +DEF_HELPER_3(efdcmpeq, i32, env, i64, i64) + +DEF_HELPER_2(4xx_tlbre_hi, tl, env, tl) +DEF_HELPER_2(4xx_tlbre_lo, tl, env, tl) +DEF_HELPER_3(4xx_tlbwe_hi, void, env, tl, tl) +DEF_HELPER_3(4xx_tlbwe_lo, void, env, tl, tl) +DEF_HELPER_2(4xx_tlbsx, tl, env, tl) +DEF_HELPER_3(440_tlbre, tl, env, i32, tl) +DEF_HELPER_4(440_tlbwe, void, env, i32, tl, tl) +DEF_HELPER_2(440_tlbsx, tl, env, tl) +DEF_HELPER_1(booke206_tlbre, void, env) +DEF_HELPER_1(booke206_tlbwe, void, env) +DEF_HELPER_2(booke206_tlbsx, void, env, tl) +DEF_HELPER_2(booke206_tlbivax, void, env, tl) +DEF_HELPER_2(booke206_tlbilx0, void, env, tl) +DEF_HELPER_2(booke206_tlbilx1, void, env, tl) +DEF_HELPER_2(booke206_tlbilx3, void, env, tl) +DEF_HELPER_2(booke206_tlbflush, void, env, tl) +DEF_HELPER_3(booke_setpid, void, env, i32, tl) +DEF_HELPER_2(booke_set_eplc, void, env, tl) +DEF_HELPER_2(booke_set_epsc, void, env, tl) +DEF_HELPER_2(6xx_tlbd, void, env, tl) +DEF_HELPER_2(6xx_tlbi, void, env, tl) +DEF_HELPER_2(74xx_tlbd, void, env, tl) +DEF_HELPER_2(74xx_tlbi, void, env, tl) +DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl) +#if defined(TARGET_PPC64) +DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl) +DEF_HELPER_2(load_slb_esid, tl, env, tl) +DEF_HELPER_2(load_slb_vsid, tl, env, tl) +DEF_HELPER_2(find_slb_vsid, tl, env, tl) +DEF_HELPER_FLAGS_2(slbia, TCG_CALL_NO_RWG, void, env, i32) +DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl) +#endif +DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl) +DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl) + +DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_2(msgsnd, void, env, tl) +DEF_HELPER_2(msgclr, void, env, tl) +DEF_HELPER_2(book3s_msgsnd, void, env, tl) +DEF_HELPER_2(book3s_msgclr, void, env, tl) + +DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32) +DEF_HELPER_FLAGS_2(clcs, TCG_CALL_NO_RWG_SE, tl, env, i32) +DEF_HELPER_2(rac, tl, env, tl) +DEF_HELPER_3(div, tl, env, tl, tl) +DEF_HELPER_3(divo, tl, env, tl, tl) +DEF_HELPER_3(divs, tl, env, tl, tl) +DEF_HELPER_3(divso, tl, env, tl, tl) + +DEF_HELPER_2(load_dcr, tl, env, tl) +DEF_HELPER_3(store_dcr, void, env, tl, tl) + +DEF_HELPER_2(load_dump_spr, void, env, i32) +DEF_HELPER_2(store_dump_spr, void, env, i32) +DEF_HELPER_4(fscr_facility_check, void, env, i32, i32, i32) +DEF_HELPER_4(msr_facility_check, void, env, i32, i32, i32) +DEF_HELPER_FLAGS_1(load_tbl, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_1(load_tbu, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_1(load_atbl, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_1(load_atbu, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_1(load_vtb, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_1(load_601_rtcl, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_1(load_601_rtcu, TCG_CALL_NO_RWG, tl, env) + +#if defined(TARGET_PPC64) +DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_2(store_purr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_2(store_ptcr, void, env, tl) +DEF_HELPER_FLAGS_1(load_dpdes, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_2(store_dpdes, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_2(book3s_msgsndp, void, env, tl) +DEF_HELPER_2(book3s_msgclrp, void, env, tl) +#endif +DEF_HELPER_2(store_sdr1, void, env, tl) +DEF_HELPER_2(store_pidr, void, env, tl) +DEF_HELPER_2(store_lpidr, void, env, tl) +DEF_HELPER_FLAGS_2(store_tbl, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_tbu, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_atbl, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_atbu, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_601_rtcl, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_601_rtcu, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_1(load_decr, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_2(store_decr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_1(load_hdecr, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_2(store_hdecr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_vtb, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_tbu40, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_2(store_hid0_601, void, env, tl) +DEF_HELPER_3(store_403_pbr, void, env, i32, tl) +DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env) +DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_2(store_40x_dbcr0, void, env, tl) +DEF_HELPER_2(store_40x_sler, void, env, tl) +DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(store_booke_tsr, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_3(store_ibatl, void, env, i32, tl) +DEF_HELPER_3(store_ibatu, void, env, i32, tl) +DEF_HELPER_3(store_dbatl, void, env, i32, tl) +DEF_HELPER_3(store_dbatu, void, env, i32, tl) +DEF_HELPER_3(store_601_batl, void, env, i32, tl) +DEF_HELPER_3(store_601_batu, void, env, i32, tl) + +#define dh_alias_fprp ptr +#define dh_ctype_fprp ppc_fprp_t * +#define dh_is_signed_fprp dh_is_signed_ptr + +DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp) +DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp) +DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp) +DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp) +DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp) +DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp) +DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp) +DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp) +DEF_HELPER_3(dcmpo, i32, env, fprp, fprp) +DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp) +DEF_HELPER_3(dcmpu, i32, env, fprp, fprp) +DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp) +DEF_HELPER_3(dtstdc, i32, env, fprp, i32) +DEF_HELPER_3(dtstdcq, i32, env, fprp, i32) +DEF_HELPER_3(dtstdg, i32, env, fprp, i32) +DEF_HELPER_3(dtstdgq, i32, env, fprp, i32) +DEF_HELPER_3(dtstex, i32, env, fprp, fprp) +DEF_HELPER_3(dtstexq, i32, env, fprp, fprp) +DEF_HELPER_3(dtstsf, i32, env, fprp, fprp) +DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp) +DEF_HELPER_3(dtstsfi, i32, env, i32, fprp) +DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp) +DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32) +DEF_HELPER_3(dctdp, void, env, fprp, fprp) +DEF_HELPER_3(dctqpq, void, env, fprp, fprp) +DEF_HELPER_3(drsp, void, env, fprp, fprp) +DEF_HELPER_3(drdpq, void, env, fprp, fprp) +DEF_HELPER_3(dcffix, void, env, fprp, fprp) +DEF_HELPER_3(dcffixq, void, env, fprp, fprp) +DEF_HELPER_3(dctfix, void, env, fprp, fprp) +DEF_HELPER_3(dctfixq, void, env, fprp, fprp) +DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32) +DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32) +DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32) +DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32) +DEF_HELPER_3(dxex, void, env, fprp, fprp) +DEF_HELPER_3(dxexq, void, env, fprp, fprp) +DEF_HELPER_4(diex, void, env, fprp, fprp, fprp) +DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp) +DEF_HELPER_4(dscri, void, env, fprp, fprp, i32) +DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32) +DEF_HELPER_4(dscli, void, env, fprp, fprp, i32) +DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32) + +DEF_HELPER_1(tbegin, void, env) +DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) + +#ifdef TARGET_PPC64 +DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) +DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) +DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG, + void, env, tl, i64, i64, i32) +DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG, + void, env, tl, i64, i64, i32) +DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32) +DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32) +#endif diff --git a/qemu/target/ppc/helper_regs.h b/qemu/target/ppc/helper_regs.h new file mode 100644 index 00000000..e931eb61 --- /dev/null +++ b/qemu/target/ppc/helper_regs.h @@ -0,0 +1,184 @@ +/* + * PowerPC emulation special registers manipulation helpers for qemu. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef HELPER_REGS_H +#define HELPER_REGS_H + +#include "exec/exec-all.h" + +/* Swap temporary saved registers with GPRs */ +static inline void hreg_swap_gpr_tgpr(CPUPPCState *env) +{ + target_ulong tmp; + + tmp = env->gpr[0]; + env->gpr[0] = env->tgpr[0]; + env->tgpr[0] = tmp; + tmp = env->gpr[1]; + env->gpr[1] = env->tgpr[1]; + env->tgpr[1] = tmp; + tmp = env->gpr[2]; + env->gpr[2] = env->tgpr[2]; + env->tgpr[2] = tmp; + tmp = env->gpr[3]; + env->gpr[3] = env->tgpr[3]; + env->tgpr[3] = tmp; +} + +static inline void hreg_compute_mem_idx(CPUPPCState *env) +{ + /* + * This is our encoding for server processors. The architecture + * specifies that there is no such thing as userspace with + * translation off, however it appears that MacOS does it and some + * 32-bit CPUs support it. Weird... + * + * 0 = Guest User space virtual mode + * 1 = Guest Kernel space virtual mode + * 2 = Guest User space real mode + * 3 = Guest Kernel space real mode + * 4 = HV User space virtual mode + * 5 = HV Kernel space virtual mode + * 6 = HV User space real mode + * 7 = HV Kernel space real mode + * + * For BookE, we need 8 MMU modes as follow: + * + * 0 = AS 0 HV User space + * 1 = AS 0 HV Kernel space + * 2 = AS 1 HV User space + * 3 = AS 1 HV Kernel space + * 4 = AS 0 Guest User space + * 5 = AS 0 Guest Kernel space + * 6 = AS 1 Guest User space + * 7 = AS 1 Guest Kernel space + */ + if (env->mmu_model & POWERPC_MMU_BOOKE) { + env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; + env->immu_idx += msr_is ? 2 : 0; + env->dmmu_idx += msr_ds ? 2 : 0; + env->immu_idx += msr_gs ? 4 : 0; + env->dmmu_idx += msr_gs ? 4 : 0; + } else { + env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1; + env->immu_idx += msr_ir ? 0 : 2; + env->dmmu_idx += msr_dr ? 0 : 2; + env->immu_idx += msr_hv ? 4 : 0; + env->dmmu_idx += msr_hv ? 4 : 0; + } +} + +static inline void hreg_compute_hflags(CPUPPCState *env) +{ + target_ulong hflags_mask; + + /* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */ + hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) | + (1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) | + (1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR); + hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB; + hreg_compute_mem_idx(env); + env->hflags = env->msr & hflags_mask; + /* Merge with hflags coming from other registers */ + env->hflags |= env->hflags_nmsr; +} + +static inline void cpu_interrupt_exittb(CPUState *cs) +{ + cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); +} + +static inline int hreg_store_msr(CPUPPCState *env, target_ulong value, + int alter_hv) +{ + int excp; + CPUState *cs = env_cpu(env); + + excp = 0; + value &= env->msr_mask; + + /* Neither mtmsr nor guest state can alter HV */ + if (!alter_hv || !(env->msr & MSR_HVB)) { + value &= ~MSR_HVB; + value |= env->msr & MSR_HVB; + } + if (((value >> MSR_IR) & 1) != msr_ir || + ((value >> MSR_DR) & 1) != msr_dr) { + cpu_interrupt_exittb(cs); + } + if ((env->mmu_model & POWERPC_MMU_BOOKE) && + ((value >> MSR_GS) & 1) != msr_gs) { + cpu_interrupt_exittb(cs); + } + if (unlikely((env->flags & POWERPC_FLAG_TGPR) && + ((value ^ env->msr) & (1 << MSR_TGPR)))) { + /* Swap temporary saved registers with GPRs */ + hreg_swap_gpr_tgpr(env); + } + if (unlikely((value >> MSR_EP) & 1) != msr_ep) { + /* Change the exception prefix on PowerPC 601 */ + env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; + } + /* + * If PR=1 then EE, IR and DR must be 1 + * + * Note: We only enforce this on 64-bit server processors. + * It appears that: + * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS + * exploits it. + * - 64-bit embedded implementations do not need any operation to be + * performed when PR is set. + */ + if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) { + value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR); + } + + env->msr = value; + hreg_compute_hflags(env); + + if (unlikely(msr_pow == 1)) { + if (!env->pending_interrupts && (*env->check_pow)(env)) { + cs->halted = 1; + excp = EXCP_HALTED; + } + } + + return excp; +} + +static inline void check_tlb_flush(CPUPPCState *env, bool global) +{ + CPUState *cs = env_cpu(env); + + /* Handle global flushes first */ + if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { + env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; + env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; + tlb_flush_all_cpus_synced(cs); + return; + } + + /* Then handle local ones */ + if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) { + env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; + tlb_flush(cs); + } +} + +#endif /* HELPER_REGS_H */ diff --git a/qemu/target/ppc/int_helper.c b/qemu/target/ppc/int_helper.c new file mode 100644 index 00000000..c6ead3e1 --- /dev/null +++ b/qemu/target/ppc/int_helper.c @@ -0,0 +1,2973 @@ +/* + * PowerPC integer and vector emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "crypto/aes.h" +#include "fpu/softfloat.h" +#include "qemu/guest-random.h" + +#include "helper_regs.h" +/*****************************************************************************/ +/* Fixed point operations helpers */ + +static inline void helper_update_ov_legacy(CPUPPCState *env, int ov) +{ + if (unlikely(ov)) { + env->so = env->ov = 1; + } else { + env->ov = 0; + } +} + +target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb, + uint32_t oe) +{ + uint64_t rt = 0; + int overflow = 0; + + uint64_t dividend = (uint64_t)ra << 32; + uint64_t divisor = (uint32_t)rb; + + if (unlikely(divisor == 0)) { + overflow = 1; + } else { + rt = dividend / divisor; + overflow = rt > UINT32_MAX; + } + + if (unlikely(overflow)) { + rt = 0; /* Undefined */ + } + + if (oe) { + helper_update_ov_legacy(env, overflow); + } + + return (target_ulong)rt; +} + +target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb, + uint32_t oe) +{ + int64_t rt = 0; + int overflow = 0; + + int64_t dividend = (int64_t)ra << 32; + int64_t divisor = (int64_t)((int32_t)rb); + +#ifdef _MSC_VER + if (unlikely((divisor == 0) || + ((divisor == (0ULL - 1ULL)) && (dividend == INT64_MIN)))) { +#else + if (unlikely((divisor == 0) || + ((divisor == -1ull) && (dividend == INT64_MIN)))) { +#endif + overflow = 1; + } else { + rt = dividend / divisor; + overflow = rt != (int32_t)rt; + } + + if (unlikely(overflow)) { + rt = 0; /* Undefined */ + } + + if (oe) { + helper_update_ov_legacy(env, overflow); + } + + return (target_ulong)rt; +} + +#if defined(TARGET_PPC64) + +uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) +{ + uint64_t rt = 0; + int overflow = 0; + + overflow = divu128(&rt, &ra, rb); + + if (unlikely(overflow)) { + rt = 0; /* Undefined */ + } + + if (oe) { + helper_update_ov_legacy(env, overflow); + } + + return rt; +} + +uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) +{ + int64_t rt = 0; + int64_t ra = (int64_t)rau; + int64_t rb = (int64_t)rbu; + int overflow = divs128(&rt, &ra, rb); + + if (unlikely(overflow)) { + rt = 0; /* Undefined */ + } + + if (oe) { + helper_update_ov_legacy(env, overflow); + } + + return rt; +} + +#endif + + +#if defined(TARGET_PPC64) +/* if x = 0xab, returns 0xababababababababa */ +#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff)) + +/* + * subtract 1 from each byte, and with inverse, check if MSB is set at each + * byte. + * i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80 + * (0xFF & 0xFF) & 0x80 = 0x80 (zero found) + */ +#define haszero(v) (((v) - pattern(0x01)) & ~(v) & pattern(0x80)) + +/* When you XOR the pattern and there is a match, that byte will be zero */ +#define hasvalue(x, n) (haszero((x) ^ pattern(n))) + +uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb) +{ + return hasvalue(rb, ra) ? CRF_GT : 0; +} + +#undef pattern +#undef haszero +#undef hasvalue + +/* + * Return a random number. + */ +uint64_t helper_darn32(void) +{ + uint32_t ret; + + if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { + return -1; + } + + return ret; +} + +uint64_t helper_darn64(void) +{ + uint64_t ret; + + if (qemu_guest_getrandom(&ret, sizeof(ret)) < 0) { + return -1; + } + + return ret; +} + +uint64_t helper_bpermd(uint64_t rs, uint64_t rb) +{ + int i; + uint64_t ra = 0; + + for (i = 0; i < 8; i++) { + int index = (rs >> (i * 8)) & 0xFF; + if (index < 64) { + if (rb & PPC_BIT(index)) { + ra |= 1ULL << i; + } + } + } + return ra; +} + +#endif + +target_ulong helper_cmpb(target_ulong rs, target_ulong rb) +{ + target_ulong mask = 0xff; + target_ulong ra = 0; + int i; + + for (i = 0; i < sizeof(target_ulong); i++) { + if ((rs & mask) == (rb & mask)) { + ra |= mask; + } + mask <<= 8; + } + return ra; +} + +/* shift right arithmetic helper */ +target_ulong helper_sraw(CPUPPCState *env, target_ulong value, + target_ulong shift) +{ + int32_t ret; + + if (likely(!(shift & 0x20))) { + if (likely((uint32_t)shift != 0)) { + shift &= 0x1f; + ret = (int32_t)value >> shift; + if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { + env->ca32 = env->ca = 0; + } else { + env->ca32 = env->ca = 1; + } + } else { + ret = (int32_t)value; + env->ca32 = env->ca = 0; + } + } else { + ret = (int32_t)value >> 31; + env->ca32 = env->ca = (ret != 0); + } + return (target_long)ret; +} + +#if defined(TARGET_PPC64) +target_ulong helper_srad(CPUPPCState *env, target_ulong value, + target_ulong shift) +{ + int64_t ret; + + if (likely(!(shift & 0x40))) { + if (likely((uint64_t)shift != 0)) { + shift &= 0x3f; + ret = (int64_t)value >> shift; + if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) { + env->ca32 = env->ca = 0; + } else { + env->ca32 = env->ca = 1; + } + } else { + ret = (int64_t)value; + env->ca32 = env->ca = 0; + } + } else { + ret = (int64_t)value >> 63; + env->ca32 = env->ca = (ret != 0); + } + return ret; +} +#endif + +#if defined(TARGET_PPC64) +target_ulong helper_popcntb(target_ulong val) +{ + /* Note that we don't fold past bytes */ + val = (val & 0x5555555555555555ULL) + ((val >> 1) & + 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & + 0x3333333333333333ULL); + val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & + 0x0f0f0f0f0f0f0f0fULL); + return val; +} + +target_ulong helper_popcntw(target_ulong val) +{ + /* Note that we don't fold past words. */ + val = (val & 0x5555555555555555ULL) + ((val >> 1) & + 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & + 0x3333333333333333ULL); + val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & + 0x0f0f0f0f0f0f0f0fULL); + val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & + 0x00ff00ff00ff00ffULL); + val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & + 0x0000ffff0000ffffULL); + return val; +} +#else +target_ulong helper_popcntb(target_ulong val) +{ + /* Note that we don't fold past bytes */ + val = (val & 0x55555555) + ((val >> 1) & 0x55555555); + val = (val & 0x33333333) + ((val >> 2) & 0x33333333); + val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); + return val; +} +#endif + +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ +target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2) +{ + uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; + + if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->spr[SPR_MQ] = tmp % arg2; + return tmp / (int32_t)arg2; + } +} + +target_ulong helper_divo(CPUPPCState *env, target_ulong arg1, + target_ulong arg2) +{ + uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ]; + + if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->so = env->ov = 1; + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->spr[SPR_MQ] = tmp % arg2; + tmp /= (int32_t)arg2; + if ((int32_t)tmp != tmp) { + env->so = env->ov = 1; + } else { + env->ov = 0; + } + return tmp; + } +} + +target_ulong helper_divs(CPUPPCState *env, target_ulong arg1, + target_ulong arg2) +{ + if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; + return (int32_t)arg1 / (int32_t)arg2; + } +} + +target_ulong helper_divso(CPUPPCState *env, target_ulong arg1, + target_ulong arg2) +{ + if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) || + (int32_t)arg2 == 0) { + env->so = env->ov = 1; + env->spr[SPR_MQ] = 0; + return INT32_MIN; + } else { + env->ov = 0; + env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2; + return (int32_t)arg1 / (int32_t)arg2; + } +} + +/*****************************************************************************/ +/* 602 specific instructions */ +/* mfrom is the most crazy instruction ever seen, imho ! */ +/* Real implementation uses a ROM table. Do the same */ +/* + * Extremely decomposed: + * -arg / 256 + * return 256 * log10(10 + 1.0) + 0.5 + */ +target_ulong helper_602_mfrom(target_ulong arg) +{ + if (likely(arg < 602)) { +#include "mfrom_table.inc.c" + return mfrom_ROM_table[arg]; + } else { + return 0; + } +} + +/*****************************************************************************/ +/* Altivec extension helpers */ +#if defined(HOST_WORDS_BIGENDIAN) +#define VECTOR_FOR_INORDER_I(index, element) \ + for (index = 0; index < ARRAY_SIZE(r->element); index++) +#else +#define VECTOR_FOR_INORDER_I(index, element) \ + for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--) +#endif + +/* Saturating arithmetic helpers. */ +#define SATCVT(from, to, from_type, to_type, min, max) \ + static inline to_type cvt##from##to(from_type x, int *sat) \ + { \ + to_type r; \ + \ + if (x < (from_type)min) { \ + r = min; \ + *sat = 1; \ + } else if (x > (from_type)max) { \ + r = max; \ + *sat = 1; \ + } else { \ + r = x; \ + } \ + return r; \ + } +#define SATCVTU(from, to, from_type, to_type, min, max) \ + static inline to_type cvt##from##to(from_type x, int *sat) \ + { \ + to_type r; \ + \ + if (x > (from_type)max) { \ + r = max; \ + *sat = 1; \ + } else { \ + r = x; \ + } \ + return r; \ + } +SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX) +SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX) +SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX) + +SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX) +SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX) +SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX) +SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX) +SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX) +SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX) +#undef SATCVT +#undef SATCVTU + +void helper_mtvscr(CPUPPCState *env, uint32_t vscr) +{ + env->vscr = vscr & ~(1u << VSCR_SAT); + /* Which bit we set is completely arbitrary, but clear the rest. */ + env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT); + env->vscr_sat.u64[1] = 0; + set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status); +} + +uint32_t helper_mfvscr(CPUPPCState *env) +{ + uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0; + return env->vscr | (sat << VSCR_SAT); +} + +static inline void set_vscr_sat(CPUPPCState *env) +{ + /* The choice of non-zero value is arbitrary. */ + env->vscr_sat.u32[0] = 1; +} + +void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + r->u32[i] = ~a->u32[i] < b->u32[i]; + } +} + +/* vprtybw */ +void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + uint64_t res = b->u32[i] ^ (b->u32[i] >> 16); + res ^= res >> 8; + r->u32[i] = res & 1; + } +} + +/* vprtybd */ +void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + for (i = 0; i < ARRAY_SIZE(r->u64); i++) { + uint64_t res = b->u64[i] ^ (b->u64[i] >> 32); + res ^= res >> 16; + res ^= res >> 8; + r->u64[i] = res & 1; + } +} + +/* vprtybq */ +void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b) +{ + uint64_t res = b->u64[0] ^ b->u64[1]; + res ^= res >> 32; + res ^= res >> 16; + res ^= res >> 8; + r->VsrD(1) = res & 1; + r->VsrD(0) = 0; +} + +#define VARITH_DO(name, op, element) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = a->element[i] op b->element[i]; \ + } \ + } +VARITH_DO(muluwm, *, u32) +#undef VARITH_DO +#undef VARITH + +#define VARITHFP(suffix, func) \ + void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ + ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ + r->f32[i] = func(a->f32[i], b->f32[i], &env->vec_status); \ + } \ + } +VARITHFP(addfp, float32_add) +VARITHFP(subfp, float32_sub) +VARITHFP(minfp, float32_min) +VARITHFP(maxfp, float32_max) +#undef VARITHFP + +#define VARITHFPFMA(suffix, type) \ + void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \ + ppc_avr_t *b, ppc_avr_t *c) \ + { \ + int i; \ + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ + r->f32[i] = float32_muladd(a->f32[i], c->f32[i], b->f32[i], \ + type, &env->vec_status); \ + } \ + } +VARITHFPFMA(maddfp, 0); +VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c); +#undef VARITHFPFMA + +#define VARITHSAT_CASE(type, op, cvt, element) \ + { \ + type result = (type)a->element[i] op (type)b->element[i]; \ + r->element[i] = cvt(result, &sat); \ + } + +#define VARITHSAT_DO(name, op, optype, cvt, element) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \ + ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \ + { \ + int sat = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + VARITHSAT_CASE(optype, op, cvt, element); \ + } \ + if (sat) { \ + vscr_sat->u32[0] = 1; \ + } \ + } +#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \ + VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \ + VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element) +#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \ + VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \ + VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element) +VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb) +VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh) +VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw) +VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub) +VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh) +VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw) +#undef VARITHSAT_CASE +#undef VARITHSAT_DO +#undef VARITHSAT_SIGNED +#undef VARITHSAT_UNSIGNED + +#define VAVG_DO(name, element, etype) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \ + r->element[i] = x >> 1; \ + } \ + } + +#define VAVG(type, signed_element, signed_type, unsigned_element, \ + unsigned_type) \ + VAVG_DO(avgs##type, signed_element, signed_type) \ + VAVG_DO(avgu##type, unsigned_element, unsigned_type) +VAVG(b, s8, int16_t, u8, uint16_t) +VAVG(h, s16, int32_t, u16, uint32_t) +VAVG(w, s32, int64_t, u32, uint64_t) +#undef VAVG_DO +#undef VAVG + +#define VABSDU_DO(name, element) \ +void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ +{ \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = (a->element[i] > b->element[i]) ? \ + (a->element[i] - b->element[i]) : \ + (b->element[i] - a->element[i]); \ + } \ +} + +/* + * VABSDU - Vector absolute difference unsigned + * name - instruction mnemonic suffix (b: byte, h: halfword, w: word) + * element - element type to access from vector + */ +#define VABSDU(type, element) \ + VABSDU_DO(absdu##type, element) +VABSDU(b, u8) +VABSDU(h, u16) +VABSDU(w, u32) +#undef VABSDU_DO +#undef VABSDU + +#define VCF(suffix, cvt, element) \ + void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *b, uint32_t uim) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ + float32 t = cvt(b->element[i], &env->vec_status); \ + r->f32[i] = float32_scalbn(t, 0 - uim, &env->vec_status); \ + } \ + } +VCF(ux, uint32_to_float32, u32) +VCF(sx, int32_to_float32, s32) +#undef VCF + +#define VCMP_DO(suffix, compare, element, record) \ + void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *a, ppc_avr_t *b) \ + { \ + uint64_t ones = (uint64_t)-1; \ + uint64_t all = ones; \ + uint64_t none = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + uint64_t result = (a->element[i] compare b->element[i] ? \ + ones : 0x0); \ + switch (sizeof(a->element[0])) { \ + case 8: \ + r->u64[i] = result; \ + break; \ + case 4: \ + r->u32[i] = result; \ + break; \ + case 2: \ + r->u16[i] = result; \ + break; \ + case 1: \ + r->u8[i] = result; \ + break; \ + } \ + all &= result; \ + none |= result; \ + } \ + if (record) { \ + env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ + } \ + } +#define VCMP(suffix, compare, element) \ + VCMP_DO(suffix, compare, element, 0) \ + VCMP_DO(suffix##_dot, compare, element, 1) +VCMP(equb, ==, u8) +VCMP(equh, ==, u16) +VCMP(equw, ==, u32) +VCMP(equd, ==, u64) +VCMP(gtub, >, u8) +VCMP(gtuh, >, u16) +VCMP(gtuw, >, u32) +VCMP(gtud, >, u64) +VCMP(gtsb, >, s8) +VCMP(gtsh, >, s16) +VCMP(gtsw, >, s32) +VCMP(gtsd, >, s64) +#undef VCMP_DO +#undef VCMP + +#define VCMPNE_DO(suffix, element, etype, cmpzero, record) \ +void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *a, ppc_avr_t *b) \ +{ \ + etype ones = (etype)-1; \ + etype all = ones; \ + etype result, none = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + if (cmpzero) { \ + result = ((a->element[i] == 0) \ + || (b->element[i] == 0) \ + || (a->element[i] != b->element[i]) ? \ + ones : 0x0); \ + } else { \ + result = (a->element[i] != b->element[i]) ? ones : 0x0; \ + } \ + r->element[i] = result; \ + all &= result; \ + none |= result; \ + } \ + if (record) { \ + env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ + } \ +} + +/* + * VCMPNEZ - Vector compare not equal to zero + * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word) + * element - element type to access from vector + */ +#define VCMPNE(suffix, element, etype, cmpzero) \ + VCMPNE_DO(suffix, element, etype, cmpzero, 0) \ + VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1) +VCMPNE(zb, u8, uint8_t, 1) +VCMPNE(zh, u16, uint16_t, 1) +VCMPNE(zw, u32, uint32_t, 1) +VCMPNE(b, u8, uint8_t, 0) +VCMPNE(h, u16, uint16_t, 0) +VCMPNE(w, u32, uint32_t, 0) +#undef VCMPNE_DO +#undef VCMPNE + +#define VCMPFP_DO(suffix, compare, order, record) \ + void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *a, ppc_avr_t *b) \ + { \ + uint32_t ones = (uint32_t)-1; \ + uint32_t all = ones; \ + uint32_t none = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ + uint32_t result; \ + int rel = float32_compare_quiet(a->f32[i], b->f32[i], \ + &env->vec_status); \ + if (rel == float_relation_unordered) { \ + result = 0; \ + } else if (rel compare order) { \ + result = ones; \ + } else { \ + result = 0; \ + } \ + r->u32[i] = result; \ + all &= result; \ + none |= result; \ + } \ + if (record) { \ + env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \ + } \ + } +#define VCMPFP(suffix, compare, order) \ + VCMPFP_DO(suffix, compare, order, 0) \ + VCMPFP_DO(suffix##_dot, compare, order, 1) +VCMPFP(eqfp, ==, float_relation_equal) +VCMPFP(gefp, !=, float_relation_less) +VCMPFP(gtfp, ==, float_relation_greater) +#undef VCMPFP_DO +#undef VCMPFP + +static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r, + ppc_avr_t *a, ppc_avr_t *b, int record) +{ + int i; + int all_in = 0; + + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { + int le_rel = float32_compare_quiet(a->f32[i], b->f32[i], + &env->vec_status); + if (le_rel == float_relation_unordered) { + r->u32[i] = 0xc0000000; + all_in = 1; + } else { + float32 bneg = float32_chs(b->f32[i]); + int ge_rel = float32_compare_quiet(a->f32[i], bneg, + &env->vec_status); + int le = le_rel != float_relation_greater; + int ge = ge_rel != float_relation_less; + + r->u32[i] = ((!le) << 31) | ((!ge) << 30); + all_in |= (!le | !ge); + } + } + if (record) { + env->crf[6] = (all_in == 0) << 1; + } +} + +void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + vcmpbfp_internal(env, r, a, b, 0); +} + +void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b) +{ + vcmpbfp_internal(env, r, a, b, 1); +} + +#define VCT(suffix, satcvt, element) \ + void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *b, uint32_t uim) \ + { \ + int i; \ + int sat = 0; \ + float_status s = env->vec_status; \ + \ + set_float_rounding_mode(float_round_to_zero, &s); \ + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ + if (float32_is_any_nan(b->f32[i])) { \ + r->element[i] = 0; \ + } else { \ + float64 t = float32_to_float64(b->f32[i], &s); \ + int64_t j; \ + \ + t = float64_scalbn(t, uim, &s); \ + j = float64_to_int64(t, &s); \ + r->element[i] = satcvt(j, &sat); \ + } \ + } \ + if (sat) { \ + set_vscr_sat(env); \ + } \ + } +VCT(uxs, cvtsduw, u32) +VCT(sxs, cvtsdsw, s32) +#undef VCT + +target_ulong helper_vclzlsbb(ppc_avr_t *r) +{ + target_ulong count = 0; + int i; + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + if (r->VsrB(i) & 0x01) { + break; + } + count++; + } + return count; +} + +target_ulong helper_vctzlsbb(ppc_avr_t *r) +{ + target_ulong count = 0; + int i; + for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) { + if (r->VsrB(i) & 0x01) { + break; + } + count++; + } + return count; +} + +void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int sat = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + int32_t prod = a->s16[i] * b->s16[i]; + int32_t t = (int32_t)c->s16[i] + (prod >> 15); + + r->s16[i] = cvtswsh(t, &sat); + } + + if (sat) { + set_vscr_sat(env); + } +} + +void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int sat = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + int32_t prod = a->s16[i] * b->s16[i] + 0x00004000; + int32_t t = (int32_t)c->s16[i] + (prod >> 15); + r->s16[i] = cvtswsh(t, &sat); + } + + if (sat) { + set_vscr_sat(env); + } +} + +void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + int32_t prod = a->s16[i] * b->s16[i]; + r->s16[i] = (int16_t) (prod + c->s16[i]); + } +} + +#define VMRG_DO(name, element, access, ofs) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + ppc_avr_t result; \ + int i, half = ARRAY_SIZE(r->element) / 2; \ + \ + for (i = 0; i < half; i++) { \ + result.access(i * 2 + 0) = a->access(i + ofs); \ + result.access(i * 2 + 1) = b->access(i + ofs); \ + } \ + *r = result; \ + } + +#define VMRG(suffix, element, access) \ + VMRG_DO(mrgl##suffix, element, access, half) \ + VMRG_DO(mrgh##suffix, element, access, 0) +VMRG(b, u8, VsrB) +VMRG(h, u16, VsrH) +VMRG(w, u32, VsrW) +#undef VMRG_DO +#undef VMRG + +void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int32_t prod[16]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s8); i++) { + prod[i] = (int32_t)a->s8[i] * b->u8[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] + + prod[4 * i + 2] + prod[4 * i + 3]; + } +} + +void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int32_t prod[8]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + prod[i] = a->s16[i] * b->s16[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1]; + } +} + +void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + int32_t prod[8]; + int i; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->s16); i++) { + prod[i] = (int32_t)a->s16[i] * b->s16[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1]; + + r->u32[i] = cvtsdsw(t, &sat); + } + + if (sat) { + set_vscr_sat(env); + } +} + +void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + uint16_t prod[16]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + prod[i] = a->u8[i] * b->u8[i]; + } + + VECTOR_FOR_INORDER_I(i, u32) { + r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] + + prod[4 * i + 2] + prod[4 * i + 3]; + } +} + +void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + uint32_t prod[8]; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u16); i++) { + prod[i] = a->u16[i] * b->u16[i]; + } + + VECTOR_FOR_INORDER_I(i, u32) { + r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1]; + } +} + +void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, + ppc_avr_t *b, ppc_avr_t *c) +{ + uint32_t prod[8]; + int i; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->u16); i++) { + prod[i] = a->u16[i] * b->u16[i]; + } + + VECTOR_FOR_INORDER_I(i, s32) { + uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1]; + + r->u32[i] = cvtuduw(t, &sat); + } + + if (sat) { + set_vscr_sat(env); + } +} + +#define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \ + r->prod_access(i >> 1) = (cast)a->mul_access(i) * \ + (cast)b->mul_access(i); \ + } \ + } + +#define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \ + r->prod_access(i >> 1) = (cast)a->mul_access(i + 1) * \ + (cast)b->mul_access(i + 1); \ + } \ + } + +#define VMUL(suffix, mul_element, mul_access, prod_access, cast) \ + VMUL_DO_EVN(mule##suffix, mul_element, mul_access, prod_access, cast) \ + VMUL_DO_ODD(mulo##suffix, mul_element, mul_access, prod_access, cast) +VMUL(sb, s8, VsrSB, VsrSH, int16_t) +VMUL(sh, s16, VsrSH, VsrSW, int32_t) +VMUL(sw, s32, VsrSW, VsrSD, int64_t) +VMUL(ub, u8, VsrB, VsrH, uint16_t) +VMUL(uh, u16, VsrH, VsrW, uint32_t) +VMUL(uw, u32, VsrW, VsrD, uint64_t) +#undef VMUL_DO_EVN +#undef VMUL_DO_ODD +#undef VMUL + +void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, + ppc_avr_t *c) +{ + ppc_avr_t result; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + int s = c->VsrB(i) & 0x1f; + int index = s & 0xf; + + if (s & 0x10) { + result.VsrB(i) = b->VsrB(index); + } else { + result.VsrB(i) = a->VsrB(index); + } + } + *r = result; +} + +void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, + ppc_avr_t *c) +{ + ppc_avr_t result; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + int s = c->VsrB(i) & 0x1f; + int index = 15 - (s & 0xf); + + if (s & 0x10) { + result.VsrB(i) = a->VsrB(index); + } else { + result.VsrB(i) = b->VsrB(index); + } + } + *r = result; +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)]) +#define VBPERMD_INDEX(i) (i) +#define VBPERMQ_DW(index) (((index) & 0x40) != 0) +#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1)) +#else +#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)]) +#define VBPERMD_INDEX(i) (1 - i) +#define VBPERMQ_DW(index) (((index) & 0x40) == 0) +#define EXTRACT_BIT(avr, i, index) \ + (extract64((avr)->u64[1 - i], 63 - index, 1)) +#endif + +void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j; + ppc_avr_t result = { .u64 = { 0, 0 } }; + VECTOR_FOR_INORDER_I(i, u64) { + for (j = 0; j < 8; j++) { + int index = VBPERMQ_INDEX(b, (i * 8) + j); + if (index < 64 && EXTRACT_BIT(a, i, index)) { + result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j); + } + } + } + *r = result; +} + +void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + uint64_t perm = 0; + + VECTOR_FOR_INORDER_I(i, u8) { + int index = VBPERMQ_INDEX(b, i); + + if (index < 128) { + uint64_t mask = (1ull << (63 - (index & 0x3F))); + if (a->u64[VBPERMQ_DW(index)] & mask) { + perm |= (0x8000 >> i); + } + } + } + + r->VsrD(0) = perm; + r->VsrD(1) = 0; +} + +#undef VBPERMQ_INDEX +#undef VBPERMQ_DW + +#define PMSUM(name, srcfld, trgfld, trgtyp) \ +void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ +{ \ + int i, j; \ + trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \ + \ + VECTOR_FOR_INORDER_I(i, srcfld) { \ + prod[i] = 0; \ + for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \ + if (a->srcfld[i] & (1ull << j)) { \ + prod[i] ^= ((trgtyp)b->srcfld[i] << j); \ + } \ + } \ + } \ + \ + VECTOR_FOR_INORDER_I(i, trgfld) { \ + r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \ + } \ +} + +PMSUM(vpmsumb, u8, u16, uint16_t) +PMSUM(vpmsumh, u16, u32, uint32_t) +PMSUM(vpmsumw, u32, u64, uint64_t) + +void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + +#ifdef CONFIG_INT128 + int i, j; + __uint128_t prod[2]; + + VECTOR_FOR_INORDER_I(i, u64) { + prod[i] = 0; + for (j = 0; j < 64; j++) { + if (a->u64[i] & (1ull << j)) { + prod[i] ^= (((__uint128_t)b->u64[i]) << j); + } + } + } + + r->u128 = prod[0] ^ prod[1]; + +#else + int i, j; + ppc_avr_t prod[2]; + + VECTOR_FOR_INORDER_I(i, u64) { + prod[i].VsrD(1) = prod[i].VsrD(0) = 0; + for (j = 0; j < 64; j++) { + if (a->u64[i] & (1ull << j)) { + ppc_avr_t bshift; + if (j == 0) { + bshift.VsrD(0) = 0; + bshift.VsrD(1) = b->u64[i]; + } else { + bshift.VsrD(0) = b->u64[i] >> (64 - j); + bshift.VsrD(1) = b->u64[i] << j; + } + prod[i].VsrD(1) ^= bshift.VsrD(1); + prod[i].VsrD(0) ^= bshift.VsrD(0); + } + } + } + + r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1); + r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0); +#endif +} + + +#if defined(HOST_WORDS_BIGENDIAN) +#define PKBIG 1 +#else +#define PKBIG 0 +#endif +void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j; + ppc_avr_t result; +#if defined(HOST_WORDS_BIGENDIAN) + const ppc_avr_t *x[2] = { a, b }; +#else + const ppc_avr_t *x[2] = { b, a }; +#endif + + VECTOR_FOR_INORDER_I(i, u64) { + VECTOR_FOR_INORDER_I(j, u32) { + uint32_t e = x[i]->u32[j]; + + result.u16[4 * i + j] = (((e >> 9) & 0xfc00) | + ((e >> 6) & 0x3e0) | + ((e >> 3) & 0x1f)); + } + } + *r = result; +} + +#define VPK(suffix, from, to, cvt, dosat) \ + void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + int sat = 0; \ + ppc_avr_t result; \ + ppc_avr_t *a0 = PKBIG ? a : b; \ + ppc_avr_t *a1 = PKBIG ? b : a; \ + \ + VECTOR_FOR_INORDER_I(i, from) { \ + result.to[i] = cvt(a0->from[i], &sat); \ + result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\ + } \ + *r = result; \ + if (dosat && sat) { \ + set_vscr_sat(env); \ + } \ + } +#define I(x, y) (x) +VPK(shss, s16, s8, cvtshsb, 1) +VPK(shus, s16, u8, cvtshub, 1) +VPK(swss, s32, s16, cvtswsh, 1) +VPK(swus, s32, u16, cvtswuh, 1) +VPK(sdss, s64, s32, cvtsdsw, 1) +VPK(sdus, s64, u32, cvtsduw, 1) +VPK(uhus, u16, u8, cvtuhub, 1) +VPK(uwus, u32, u16, cvtuwuh, 1) +VPK(udus, u64, u32, cvtuduw, 1) +VPK(uhum, u16, u8, I, 0) +VPK(uwum, u32, u16, I, 0) +VPK(udum, u64, u32, I, 0) +#undef I +#undef VPK +#undef PKBIG + +void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { + r->f32[i] = float32_div(float32_one, b->f32[i], &env->vec_status); + } +} + +#define VRFI(suffix, rounding) \ + void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \ + ppc_avr_t *b) \ + { \ + int i; \ + float_status s = env->vec_status; \ + \ + set_float_rounding_mode(rounding, &s); \ + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \ + r->f32[i] = float32_round_to_int (b->f32[i], &s); \ + } \ + } +VRFI(n, float_round_nearest_even) +VRFI(m, float_round_down) +VRFI(p, float_round_up) +VRFI(z, float_round_to_zero) +#undef VRFI + +#define VROTATE(suffix, element, mask) \ + void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + unsigned int shift = b->element[i] & mask; \ + r->element[i] = (a->element[i] << shift) | \ + (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \ + } \ + } +VROTATE(b, u8, 0x7) +VROTATE(h, u16, 0xF) +VROTATE(w, u32, 0x1F) +VROTATE(d, u64, 0x3F) +#undef VROTATE + +void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { + float32 t = float32_sqrt(b->f32[i], &env->vec_status); + + r->f32[i] = float32_div(float32_one, t, &env->vec_status); + } +} + +#define VRLMI(name, size, element, insert) \ +void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ +{ \ + int i; \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + uint##size##_t src1 = a->element[i]; \ + uint##size##_t src2 = b->element[i]; \ + uint##size##_t src3 = r->element[i]; \ + uint##size##_t begin, end, shift, mask, rot_val; \ + \ + shift = extract##size(src2, 0, 6); \ + end = extract##size(src2, 8, 6); \ + begin = extract##size(src2, 16, 6); \ + rot_val = rol##size(src1, shift); \ + mask = mask_u##size(begin, end); \ + if (insert) { \ + r->element[i] = (rot_val & mask) | (src3 & ~mask); \ + } else { \ + r->element[i] = (rot_val & mask); \ + } \ + } \ +} + +VRLMI(vrldmi, 64, u64, 1); +VRLMI(vrlwmi, 32, u32, 1); +VRLMI(vrldnm, 64, u64, 0); +VRLMI(vrlwnm, 32, u32, 0); + +void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, + ppc_avr_t *c) +{ + r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]); + r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]); +} + +void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { + r->f32[i] = float32_exp2(b->f32[i], &env->vec_status); + } +} + +void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->f32); i++) { + r->f32[i] = float32_log2(b->f32[i], &env->vec_status); + } +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define VEXTU_X_DO(name, size, left) \ + target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \ + { \ + int index; \ + if (left) { \ + index = (a & 0xf) * 8; \ + } else { \ + index = ((15 - (a & 0xf) + 1) * 8) - size; \ + } \ + return int128_getlo(int128_rshift(b->s128, index)) & \ + MAKE_64BIT_MASK(0, size); \ + } +#else +#define VEXTU_X_DO(name, size, left) \ + target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \ + { \ + int index; \ + if (left) { \ + index = ((15 - (a & 0xf) + 1) * 8) - size; \ + } else { \ + index = (a & 0xf) * 8; \ + } \ + return int128_getlo(int128_rshift(b->s128, index)) & \ + MAKE_64BIT_MASK(0, size); \ + } +#endif + +VEXTU_X_DO(vextublx, 8, 1) +VEXTU_X_DO(vextuhlx, 16, 1) +VEXTU_X_DO(vextuwlx, 32, 1) +VEXTU_X_DO(vextubrx, 8, 0) +VEXTU_X_DO(vextuhrx, 16, 0) +VEXTU_X_DO(vextuwrx, 32, 0) +#undef VEXTU_X_DO + +void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + unsigned int shift, bytes, size; + + size = ARRAY_SIZE(r->u8); + for (i = 0; i < size; i++) { + shift = b->VsrB(i) & 0x7; /* extract shift value */ + bytes = (a->VsrB(i) << 8) + /* extract adjacent bytes */ + (((i + 1) < size) ? a->VsrB(i + 1) : 0); + r->VsrB(i) = (bytes << shift) >> 8; /* shift and store result */ + } +} + +void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + unsigned int shift, bytes; + + /* + * Use reverse order, as destination and source register can be + * same. Its being modified in place saving temporary, reverse + * order will guarantee that computed result is not fed back. + */ + for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) { + shift = b->VsrB(i) & 0x7; /* extract shift value */ + bytes = ((i ? a->VsrB(i - 1) : 0) << 8) + a->VsrB(i); + /* extract adjacent bytes */ + r->VsrB(i) = (bytes >> shift) & 0xFF; /* shift and store result */ + } +} + +void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift) +{ + int sh = shift & 0xf; + int i; + ppc_avr_t result; + + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + int index = sh + i; + if (index > 0xf) { + result.VsrB(i) = b->VsrB(index - 0x10); + } else { + result.VsrB(i) = a->VsrB(index); + } + } + *r = result; +} + +void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int sh = (b->VsrB(0xf) >> 3) & 0xf; + +#if defined(HOST_WORDS_BIGENDIAN) + memmove(&r->u8[0], &a->u8[sh], 16 - sh); + memset(&r->u8[16 - sh], 0, sh); +#else + memmove(&r->u8[sh], &a->u8[0], 16 - sh); + memset(&r->u8[0], 0, sh); +#endif +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define VINSERT(suffix, element) \ + void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ + { \ + memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \ + sizeof(r->element[0])); \ + } +#else +#define VINSERT(suffix, element) \ + void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ + { \ + uint32_t d = (16 - index) - sizeof(r->element[0]); \ + memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \ + } +#endif +VINSERT(b, u8) +VINSERT(h, u16) +VINSERT(w, u32) +VINSERT(d, u64) +#undef VINSERT +#if defined(HOST_WORDS_BIGENDIAN) +#define VEXTRACT(suffix, element) \ + void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ + { \ + uint32_t es = sizeof(r->element[0]); \ + memmove(&r->u8[8 - es], &b->u8[index], es); \ + memset(&r->u8[8], 0, 8); \ + memset(&r->u8[0], 0, 8 - es); \ + } +#else +#define VEXTRACT(suffix, element) \ + void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ + { \ + uint32_t es = sizeof(r->element[0]); \ + uint32_t s = (16 - index) - es; \ + memmove(&r->u8[8], &b->u8[s], es); \ + memset(&r->u8[0], 0, 8); \ + memset(&r->u8[8 + es], 0, 8 - es); \ + } +#endif +VEXTRACT(ub, u8) +VEXTRACT(uh, u16) +VEXTRACT(uw, u32) +VEXTRACT(d, u64) +#undef VEXTRACT + +void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt, + ppc_vsr_t *xb, uint32_t index) +{ + ppc_vsr_t t = { 0 }; + size_t es = sizeof(uint32_t); + uint32_t ext_index; + int i; + + ext_index = index; + for (i = 0; i < es; i++, ext_index++) { + t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16); + } + + *xt = t; +} + +void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt, + ppc_vsr_t *xb, uint32_t index) +{ + ppc_vsr_t t = *xt; + size_t es = sizeof(uint32_t); + int ins_index, i = 0; + + ins_index = index; + for (i = 0; i < es && ins_index < 16; i++, ins_index++) { + t.VsrB(ins_index) = xb->VsrB(8 - es + i); + } + + *xt = t; +} + +#define VEXT_SIGNED(name, element, cast) \ +void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ +{ \ + int i; \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = (cast)b->element[i]; \ + } \ +} +VEXT_SIGNED(vextsb2w, s32, int8_t) +VEXT_SIGNED(vextsb2d, s64, int8_t) +VEXT_SIGNED(vextsh2w, s32, int16_t) +VEXT_SIGNED(vextsh2d, s64, int16_t) +VEXT_SIGNED(vextsw2d, s64, int32_t) +#undef VEXT_SIGNED + +#define VNEG(name, element) \ +void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ +{ \ + int i; \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = -b->element[i]; \ + } \ +} +VNEG(vnegw, s32) +VNEG(vnegd, s64) +#undef VNEG + +void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int sh = (b->VsrB(0xf) >> 3) & 0xf; + +#if defined(HOST_WORDS_BIGENDIAN) + memmove(&r->u8[sh], &a->u8[0], 16 - sh); + memset(&r->u8[0], 0, sh); +#else + memmove(&r->u8[0], &a->u8[sh], 16 - sh); + memset(&r->u8[16 - sh], 0, sh); +#endif +} + +void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + r->u32[i] = a->u32[i] >= b->u32[i]; + } +} + +void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int64_t t; + int i, upper; + ppc_avr_t result; + int sat = 0; + + upper = ARRAY_SIZE(r->s32) - 1; + t = (int64_t)b->VsrSW(upper); + for (i = 0; i < ARRAY_SIZE(r->s32); i++) { + t += a->VsrSW(i); + result.VsrSW(i) = 0; + } + result.VsrSW(upper) = cvtsdsw(t, &sat); + *r = result; + + if (sat) { + set_vscr_sat(env); + } +} + +void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j, upper; + ppc_avr_t result; + int sat = 0; + + upper = 1; + for (i = 0; i < ARRAY_SIZE(r->u64); i++) { + int64_t t = (int64_t)b->VsrSW(upper + i * 2); + + result.VsrD(i) = 0; + for (j = 0; j < ARRAY_SIZE(r->u64); j++) { + t += a->VsrSW(2 * i + j); + } + result.VsrSW(upper + i * 2) = cvtsdsw(t, &sat); + } + + *r = result; + if (sat) { + set_vscr_sat(env); + } +} + +void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->s32); i++) { + int64_t t = (int64_t)b->s32[i]; + + for (j = 0; j < ARRAY_SIZE(r->s32); j++) { + t += a->s8[4 * i + j]; + } + r->s32[i] = cvtsdsw(t, &sat); + } + + if (sat) { + set_vscr_sat(env); + } +} + +void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int sat = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(r->s32); i++) { + int64_t t = (int64_t)b->s32[i]; + + t += a->s16[2 * i] + a->s16[2 * i + 1]; + r->s32[i] = cvtsdsw(t, &sat); + } + + if (sat) { + set_vscr_sat(env); + } +} + +void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + int i, j; + int sat = 0; + + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + uint64_t t = (uint64_t)b->u32[i]; + + for (j = 0; j < ARRAY_SIZE(r->u32); j++) { + t += a->u8[4 * i + j]; + } + r->u32[i] = cvtuduw(t, &sat); + } + + if (sat) { + set_vscr_sat(env); + } +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define UPKHI 1 +#define UPKLO 0 +#else +#define UPKHI 0 +#define UPKLO 1 +#endif +#define VUPKPX(suffix, hi) \ + void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \ + { \ + int i; \ + ppc_avr_t result; \ + \ + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \ + uint16_t e = b->u16[hi ? i : i + 4]; \ + uint8_t a = (e >> 15) ? 0xff : 0; \ + uint8_t r = (e >> 10) & 0x1f; \ + uint8_t g = (e >> 5) & 0x1f; \ + uint8_t b = e & 0x1f; \ + \ + result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \ + } \ + *r = result; \ + } +VUPKPX(lpx, UPKLO) +VUPKPX(hpx, UPKHI) +#undef VUPKPX + +#define VUPK(suffix, unpacked, packee, hi) \ + void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \ + { \ + int i; \ + ppc_avr_t result; \ + \ + if (hi) { \ + for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \ + result.unpacked[i] = b->packee[i]; \ + } \ + } else { \ + for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \ + i++) { \ + result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \ + } \ + } \ + *r = result; \ + } +VUPK(hsb, s16, s8, UPKHI) +VUPK(hsh, s32, s16, UPKHI) +VUPK(hsw, s64, s32, UPKHI) +VUPK(lsb, s16, s8, UPKLO) +VUPK(lsh, s32, s16, UPKLO) +VUPK(lsw, s64, s32, UPKLO) +#undef VUPK +#undef UPKHI +#undef UPKLO + +#define VGENERIC_DO(name, element) \ + void helper_v##name(ppc_avr_t *r, ppc_avr_t *b) \ + { \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(r->element); i++) { \ + r->element[i] = name(b->element[i]); \ + } \ + } + +#define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8) +#define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16) + +VGENERIC_DO(clzb, u8) +VGENERIC_DO(clzh, u16) + +#undef clzb +#undef clzh + +#define ctzb(v) ((v) ? ctz32(v) : 8) +#define ctzh(v) ((v) ? ctz32(v) : 16) +#define ctzw(v) ctz32((v)) +#define ctzd(v) ctz64((v)) + +VGENERIC_DO(ctzb, u8) +VGENERIC_DO(ctzh, u16) +VGENERIC_DO(ctzw, u32) +VGENERIC_DO(ctzd, u64) + +#undef ctzb +#undef ctzh +#undef ctzw +#undef ctzd + +#define popcntb(v) ctpop8(v) +#define popcnth(v) ctpop16(v) +#define popcntw(v) ctpop32(v) +#define popcntd(v) ctpop64(v) + +VGENERIC_DO(popcntb, u8) +VGENERIC_DO(popcnth, u16) +VGENERIC_DO(popcntw, u32) +VGENERIC_DO(popcntd, u64) + +#undef popcntb +#undef popcnth +#undef popcntw +#undef popcntd + +#undef VGENERIC_DO + +#if defined(HOST_WORDS_BIGENDIAN) +#define QW_ONE { .u64 = { 0, 1 } } +#else +#define QW_ONE { .u64 = { 1, 0 } } +#endif + +#ifndef CONFIG_INT128 + +static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a) +{ + t->u64[0] = ~a.u64[0]; + t->u64[1] = ~a.u64[1]; +} + +static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b) +{ + if (a.VsrD(0) < b.VsrD(0)) { + return -1; + } else if (a.VsrD(0) > b.VsrD(0)) { + return 1; + } else if (a.VsrD(1) < b.VsrD(1)) { + return -1; + } else if (a.VsrD(1) > b.VsrD(1)) { + return 1; + } else { + return 0; + } +} + +static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) +{ + t->VsrD(1) = a.VsrD(1) + b.VsrD(1); + t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + + (~a.VsrD(1) < b.VsrD(1)); +} + +static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) +{ + ppc_avr_t not_a; + t->VsrD(1) = a.VsrD(1) + b.VsrD(1); + t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + + (~a.VsrD(1) < b.VsrD(1)); + avr_qw_not(¬_a, a); + return avr_qw_cmpu(not_a, b) < 0; +} + +#endif + +void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ +#ifdef CONFIG_INT128 + r->u128 = a->u128 + b->u128; +#else + avr_qw_add(r, *a, *b); +#endif +} + +void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ +#ifdef CONFIG_INT128 + r->u128 = a->u128 + b->u128 + (c->u128 & 1); +#else + + if (c->VsrD(1) & 1) { + ppc_avr_t tmp; + + tmp.VsrD(0) = 0; + tmp.VsrD(1) = c->VsrD(1) & 1; + avr_qw_add(&tmp, *a, tmp); + avr_qw_add(r, tmp, *b); + } else { + avr_qw_add(r, *a, *b); + } +#endif +} + +void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ +#ifdef CONFIG_INT128 + r->u128 = (~a->u128 < b->u128); +#else + ppc_avr_t not_a; + + avr_qw_not(¬_a, *a); + + r->VsrD(0) = 0; + r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0); +#endif +} + +void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ +#ifdef CONFIG_INT128 + int carry_out = (~a->u128 < b->u128); + if (!carry_out && (c->u128 & 1)) { + carry_out = ((a->u128 + b->u128 + 1) == 0) && + ((a->u128 != 0) || (b->u128 != 0)); + } + r->u128 = carry_out; +#else + + int carry_in = c->VsrD(1) & 1; + int carry_out = 0; + ppc_avr_t tmp; + + carry_out = avr_qw_addc(&tmp, *a, *b); + + if (!carry_out && carry_in) { + ppc_avr_t one = QW_ONE; + carry_out = avr_qw_addc(&tmp, tmp, one); + } + r->VsrD(0) = 0; + r->VsrD(1) = carry_out; +#endif +} + +void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ +#ifdef CONFIG_INT128 + r->u128 = a->u128 - b->u128; +#else + ppc_avr_t tmp; + ppc_avr_t one = QW_ONE; + + avr_qw_not(&tmp, *b); + avr_qw_add(&tmp, *a, tmp); + avr_qw_add(r, tmp, one); +#endif +} + +void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ +#ifdef CONFIG_INT128 + r->u128 = a->u128 + ~b->u128 + (c->u128 & 1); +#else + ppc_avr_t tmp, sum; + + avr_qw_not(&tmp, *b); + avr_qw_add(&sum, *a, tmp); + + tmp.VsrD(0) = 0; + tmp.VsrD(1) = c->VsrD(1) & 1; + avr_qw_add(r, sum, tmp); +#endif +} + +void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ +#ifdef CONFIG_INT128 + r->u128 = (~a->u128 < ~b->u128) || + (a->u128 + ~b->u128 == (__uint128_t)-1); +#else + int carry = (avr_qw_cmpu(*a, *b) > 0); + if (!carry) { + ppc_avr_t tmp; + avr_qw_not(&tmp, *b); + avr_qw_add(&tmp, *a, tmp); +#ifdef _MSC_VER + carry = ((tmp.VsrSD(0) == (0ULL - 1ULL)) && (tmp.VsrSD(1) == (0ULL - 1ULL))); +#else + carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull)); +#endif + } + r->VsrD(0) = 0; + r->VsrD(1) = carry; +#endif +} + +void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ +#ifdef CONFIG_INT128 + r->u128 = + (~a->u128 < ~b->u128) || + ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1)); +#else + int carry_in = c->VsrD(1) & 1; + int carry_out = (avr_qw_cmpu(*a, *b) > 0); + if (!carry_out && carry_in) { + ppc_avr_t tmp; + avr_qw_not(&tmp, *b); + avr_qw_add(&tmp, *a, tmp); +#ifdef _MSC_VER + carry_out = ((tmp.VsrD(0) == (0ULL - 1ULL)) && (tmp.VsrD(1) == (0ULL - 1ULL))); +#else + carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull)); +#endif + } + + r->VsrD(0) = 0; + r->VsrD(1) = carry_out; +#endif +} + +#define BCD_PLUS_PREF_1 0xC +#define BCD_PLUS_PREF_2 0xF +#define BCD_PLUS_ALT_1 0xA +#define BCD_NEG_PREF 0xD +#define BCD_NEG_ALT 0xB +#define BCD_PLUS_ALT_2 0xE +#define NATIONAL_PLUS 0x2B +#define NATIONAL_NEG 0x2D + +#define BCD_DIG_BYTE(n) (15 - ((n) / 2)) + +static int bcd_get_sgn(ppc_avr_t *bcd) +{ + switch (bcd->VsrB(BCD_DIG_BYTE(0)) & 0xF) { + case BCD_PLUS_PREF_1: + case BCD_PLUS_PREF_2: + case BCD_PLUS_ALT_1: + case BCD_PLUS_ALT_2: + { + return 1; + } + + case BCD_NEG_PREF: + case BCD_NEG_ALT: + { + return -1; + } + + default: + { + return 0; + } + } +} + +static int bcd_preferred_sgn(int sgn, int ps) +{ + if (sgn >= 0) { + return (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2; + } else { + return BCD_NEG_PREF; + } +} + +static uint8_t bcd_get_digit(ppc_avr_t *bcd, int n, int *invalid) +{ + uint8_t result; + if (n & 1) { + result = bcd->VsrB(BCD_DIG_BYTE(n)) >> 4; + } else { + result = bcd->VsrB(BCD_DIG_BYTE(n)) & 0xF; + } + + if (unlikely(result > 9)) { + *invalid = true; + } + return result; +} + +static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n) +{ + if (n & 1) { + bcd->VsrB(BCD_DIG_BYTE(n)) &= 0x0F; + bcd->VsrB(BCD_DIG_BYTE(n)) |= (digit << 4); + } else { + bcd->VsrB(BCD_DIG_BYTE(n)) &= 0xF0; + bcd->VsrB(BCD_DIG_BYTE(n)) |= digit; + } +} + +static bool bcd_is_valid(ppc_avr_t *bcd) +{ + int i; + int invalid = 0; + + if (bcd_get_sgn(bcd) == 0) { + return false; + } + + for (i = 1; i < 32; i++) { + bcd_get_digit(bcd, i, &invalid); + if (unlikely(invalid)) { + return false; + } + } + return true; +} + +static int bcd_cmp_zero(ppc_avr_t *bcd) +{ + if (bcd->VsrD(0) == 0 && (bcd->VsrD(1) >> 4) == 0) { + return CRF_EQ; + } else { + return (bcd_get_sgn(bcd) == 1) ? CRF_GT : CRF_LT; + } +} + +static uint16_t get_national_digit(ppc_avr_t *reg, int n) +{ + return reg->VsrH(7 - n); +} + +static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n) +{ + reg->VsrH(7 - n) = val; +} + +static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b) +{ + int i; + int invalid = 0; + for (i = 31; i > 0; i--) { + uint8_t dig_a = bcd_get_digit(a, i, &invalid); + uint8_t dig_b = bcd_get_digit(b, i, &invalid); + if (unlikely(invalid)) { + return 0; /* doesn't matter */ + } else if (dig_a > dig_b) { + return 1; + } else if (dig_a < dig_b) { + return -1; + } + } + + return 0; +} + +static void bcd_add_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid, + int *overflow) +{ + int carry = 0; + int i; + for (i = 1; i <= 31; i++) { + uint8_t digit = bcd_get_digit(a, i, invalid) + + bcd_get_digit(b, i, invalid) + carry; + if (digit > 9) { + carry = 1; + digit -= 10; + } else { + carry = 0; + } + + bcd_put_digit(t, digit, i); + } + + *overflow = carry; +} + +static void bcd_sub_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid, + int *overflow) +{ + int carry = 0; + int i; + + for (i = 1; i <= 31; i++) { + uint8_t digit = bcd_get_digit(a, i, invalid) - + bcd_get_digit(b, i, invalid) + carry; + if (digit & 0x80) { + carry = -1; + digit += 10; + } else { + carry = 0; + } + + bcd_put_digit(t, digit, i); + } + + *overflow = carry; +} + +uint32_t helper_bcdadd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + + int sgna = bcd_get_sgn(a); + int sgnb = bcd_get_sgn(b); + int invalid = (sgna == 0) || (sgnb == 0); + int overflow = 0; + uint32_t cr = 0; + ppc_avr_t result = { .u64 = { 0, 0 } }; + + if (!invalid) { + if (sgna == sgnb) { + result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps); + bcd_add_mag(&result, a, b, &invalid, &overflow); + cr = bcd_cmp_zero(&result); + } else { + int magnitude = bcd_cmp_mag(a, b); + if (magnitude > 0) { + result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps); + bcd_sub_mag(&result, a, b, &invalid, &overflow); + cr = (sgna > 0) ? CRF_GT : CRF_LT; + } else if (magnitude < 0) { + result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgnb, ps); + bcd_sub_mag(&result, b, a, &invalid, &overflow); + cr = (sgnb > 0) ? CRF_GT : CRF_LT; + } else { + result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(0, ps); + cr = CRF_EQ; + } + } + } + + if (unlikely(invalid)) { + result.VsrD(0) = result.VsrD(1) = -1; + cr = CRF_SO; + } else if (overflow) { + cr |= CRF_SO; + } + + *r = result; + + return cr; +} + +uint32_t helper_bcdsub(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + ppc_avr_t bcopy = *b; + int sgnb = bcd_get_sgn(b); + if (sgnb < 0) { + bcd_put_digit(&bcopy, BCD_PLUS_PREF_1, 0); + } else if (sgnb > 0) { + bcd_put_digit(&bcopy, BCD_NEG_PREF, 0); + } + /* else invalid ... defer to bcdadd code for proper handling */ + + return helper_bcdadd(r, a, &bcopy, ps); +} + +uint32_t helper_bcdcfn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) +{ + int i; + int cr = 0; + uint16_t national = 0; + uint16_t sgnb = get_national_digit(b, 0); + ppc_avr_t ret = { .u64 = { 0, 0 } }; + int invalid = (sgnb != NATIONAL_PLUS && sgnb != NATIONAL_NEG); + + for (i = 1; i < 8; i++) { + national = get_national_digit(b, i); + if (unlikely(national < 0x30 || national > 0x39)) { + invalid = 1; + break; + } + + bcd_put_digit(&ret, national & 0xf, i); + } + + if (sgnb == NATIONAL_PLUS) { + bcd_put_digit(&ret, (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2, 0); + } else { + bcd_put_digit(&ret, BCD_NEG_PREF, 0); + } + + cr = bcd_cmp_zero(&ret); + + if (unlikely(invalid)) { + cr = CRF_SO; + } + + *r = ret; + + return cr; +} + +uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) +{ + int i; + int cr = 0; + int sgnb = bcd_get_sgn(b); + int invalid = (sgnb == 0); + ppc_avr_t ret = { .u64 = { 0, 0 } }; + + int ox_flag = (b->VsrD(0) != 0) || ((b->VsrD(1) >> 32) != 0); + + for (i = 1; i < 8; i++) { + set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i); + + if (unlikely(invalid)) { + break; + } + } + set_national_digit(&ret, (sgnb == -1) ? NATIONAL_NEG : NATIONAL_PLUS, 0); + + cr = bcd_cmp_zero(b); + + if (ox_flag) { + cr |= CRF_SO; + } + + if (unlikely(invalid)) { + cr = CRF_SO; + } + + *r = ret; + + return cr; +} + +uint32_t helper_bcdcfz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) +{ + int i; + int cr = 0; + int invalid = 0; + int zone_digit = 0; + int zone_lead = ps ? 0xF : 0x3; + int digit = 0; + ppc_avr_t ret = { .u64 = { 0, 0 } }; + int sgnb = b->VsrB(BCD_DIG_BYTE(0)) >> 4; + + if (unlikely((sgnb < 0xA) && ps)) { + invalid = 1; + } + + for (i = 0; i < 16; i++) { + zone_digit = i ? b->VsrB(BCD_DIG_BYTE(i * 2)) >> 4 : zone_lead; + digit = b->VsrB(BCD_DIG_BYTE(i * 2)) & 0xF; + if (unlikely(zone_digit != zone_lead || digit > 0x9)) { + invalid = 1; + break; + } + + bcd_put_digit(&ret, digit, i + 1); + } + + if ((ps && (sgnb == 0xB || sgnb == 0xD)) || + (!ps && (sgnb & 0x4))) { + bcd_put_digit(&ret, BCD_NEG_PREF, 0); + } else { + bcd_put_digit(&ret, BCD_PLUS_PREF_1, 0); + } + + cr = bcd_cmp_zero(&ret); + + if (unlikely(invalid)) { + cr = CRF_SO; + } + + *r = ret; + + return cr; +} + +uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) +{ + int i; + int cr = 0; + uint8_t digit = 0; + int sgnb = bcd_get_sgn(b); + int zone_lead = (ps) ? 0xF0 : 0x30; + int invalid = (sgnb == 0); + ppc_avr_t ret = { .u64 = { 0, 0 } }; + + int ox_flag = ((b->VsrD(0) >> 4) != 0); + + for (i = 0; i < 16; i++) { + digit = bcd_get_digit(b, i + 1, &invalid); + + if (unlikely(invalid)) { + break; + } + + ret.VsrB(BCD_DIG_BYTE(i * 2)) = zone_lead + digit; + } + + if (ps) { + bcd_put_digit(&ret, (sgnb == 1) ? 0xC : 0xD, 1); + } else { + bcd_put_digit(&ret, (sgnb == 1) ? 0x3 : 0x7, 1); + } + + cr = bcd_cmp_zero(b); + + if (ox_flag) { + cr |= CRF_SO; + } + + if (unlikely(invalid)) { + cr = CRF_SO; + } + + *r = ret; + + return cr; +} + +uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) +{ + int i; + int cr = 0; + uint64_t lo_value; + uint64_t hi_value; + ppc_avr_t ret = { .u64 = { 0, 0 } }; + + if (b->VsrSD(0) < 0) { + lo_value = -b->VsrSD(1); + hi_value = ~b->VsrD(0) + !lo_value; + bcd_put_digit(&ret, 0xD, 0); + } else { + lo_value = b->VsrD(1); + hi_value = b->VsrD(0); + bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0); + } + + if (divu128(&lo_value, &hi_value, 1000000000000000ULL) || + lo_value > 9999999999999999ULL) { + cr = CRF_SO; + } + + for (i = 1; i < 16; hi_value /= 10, i++) { + bcd_put_digit(&ret, hi_value % 10, i); + } + + for (; i < 32; lo_value /= 10, i++) { + bcd_put_digit(&ret, lo_value % 10, i); + } + + cr |= bcd_cmp_zero(&ret); + + *r = ret; + + return cr; +} + +uint32_t helper_bcdctsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) +{ + uint8_t i; + int cr; + uint64_t carry; + uint64_t unused; + uint64_t lo_value; + uint64_t hi_value = 0; + int sgnb = bcd_get_sgn(b); + int invalid = (sgnb == 0); + + lo_value = bcd_get_digit(b, 31, &invalid); + for (i = 30; i > 0; i--) { + mulu64(&lo_value, &carry, lo_value, 10ULL); + mulu64(&hi_value, &unused, hi_value, 10ULL); + lo_value += bcd_get_digit(b, i, &invalid); + hi_value += carry; + + if (unlikely(invalid)) { + break; + } + } + + if (sgnb == -1) { +#ifdef _MSC_VER + r->VsrSD(1) = 0 - lo_value; +#else + r->VsrSD(1) = -lo_value; +#endif + r->VsrSD(0) = ~hi_value + !r->VsrSD(1); + } else { + r->VsrSD(1) = lo_value; + r->VsrSD(0) = hi_value; + } + + cr = bcd_cmp_zero(b); + + if (unlikely(invalid)) { + cr = CRF_SO; + } + + return cr; +} + +uint32_t helper_bcdcpsgn(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + int i; + int invalid = 0; + + if (bcd_get_sgn(a) == 0 || bcd_get_sgn(b) == 0) { + return CRF_SO; + } + + *r = *a; + bcd_put_digit(r, b->VsrB(BCD_DIG_BYTE(0)) & 0xF, 0); + + for (i = 1; i < 32; i++) { + bcd_get_digit(a, i, &invalid); + bcd_get_digit(b, i, &invalid); + if (unlikely(invalid)) { + return CRF_SO; + } + } + + return bcd_cmp_zero(r); +} + +uint32_t helper_bcdsetsgn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) +{ + int sgnb = bcd_get_sgn(b); + + *r = *b; + bcd_put_digit(r, bcd_preferred_sgn(sgnb, ps), 0); + + if (bcd_is_valid(b) == false) { + return CRF_SO; + } + + return bcd_cmp_zero(r); +} + +uint32_t helper_bcds(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + int cr; + int i = a->VsrSB(7); + bool ox_flag = false; + int sgnb = bcd_get_sgn(b); + ppc_avr_t ret = *b; + ret.VsrD(1) &= ~0xf; + + if (bcd_is_valid(b) == false) { + return CRF_SO; + } + + if (unlikely(i > 31)) { + i = 31; + } else if (unlikely(i < -31)) { + i = -31; + } + + if (i > 0) { + ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); + } else { + urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); + } + bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0); + + *r = ret; + + cr = bcd_cmp_zero(r); + if (ox_flag) { + cr |= CRF_SO; + } + + return cr; +} + +uint32_t helper_bcdus(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + int cr; + int i; + int invalid = 0; + bool ox_flag = false; + ppc_avr_t ret = *b; + + for (i = 0; i < 32; i++) { + bcd_get_digit(b, i, &invalid); + + if (unlikely(invalid)) { + return CRF_SO; + } + } + + i = a->VsrSB(7); + if (i >= 32) { + ox_flag = true; + ret.VsrD(1) = ret.VsrD(0) = 0; + } else if (i <= -32) { + ret.VsrD(1) = ret.VsrD(0) = 0; + } else if (i > 0) { + ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); + } else { + urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); + } + *r = ret; + + cr = bcd_cmp_zero(r); + if (ox_flag) { + cr |= CRF_SO; + } + + return cr; +} + +uint32_t helper_bcdsr(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + int cr; + int unused = 0; + int invalid = 0; + bool ox_flag = false; + int sgnb = bcd_get_sgn(b); + ppc_avr_t ret = *b; + ret.VsrD(1) &= ~0xf; + + int i = a->VsrSB(7); + ppc_avr_t bcd_one; + + bcd_one.VsrD(0) = 0; + bcd_one.VsrD(1) = 0x10; + + if (bcd_is_valid(b) == false) { + return CRF_SO; + } + + if (unlikely(i > 31)) { + i = 31; + } else if (unlikely(i < -31)) { + i = -31; + } + + if (i > 0) { + ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag); + } else { + urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4); + + if (bcd_get_digit(&ret, 0, &invalid) >= 5) { + bcd_add_mag(&ret, &ret, &bcd_one, &invalid, &unused); + } + } + bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0); + + cr = bcd_cmp_zero(&ret); + if (ox_flag) { + cr |= CRF_SO; + } + *r = ret; + + return cr; +} + +uint32_t helper_bcdtrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + uint64_t mask; + uint32_t ox_flag = 0; + int i = a->VsrSH(3) + 1; + ppc_avr_t ret = *b; + + if (bcd_is_valid(b) == false) { + return CRF_SO; + } + + if (i > 16 && i < 32) { + mask = (uint64_t)-1 >> (128 - i * 4); + if (ret.VsrD(0) & ~mask) { + ox_flag = CRF_SO; + } + + ret.VsrD(0) &= mask; + } else if (i >= 0 && i <= 16) { + mask = (uint64_t)-1 >> (64 - i * 4); + if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) { + ox_flag = CRF_SO; + } + + ret.VsrD(1) &= mask; + ret.VsrD(0) = 0; + } + bcd_put_digit(&ret, bcd_preferred_sgn(bcd_get_sgn(b), ps), 0); + *r = ret; + + return bcd_cmp_zero(&ret) | ox_flag; +} + +uint32_t helper_bcdutrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps) +{ + int i; + uint64_t mask; + uint32_t ox_flag = 0; + int invalid = 0; + ppc_avr_t ret = *b; + + for (i = 0; i < 32; i++) { + bcd_get_digit(b, i, &invalid); + + if (unlikely(invalid)) { + return CRF_SO; + } + } + + i = a->VsrSH(3); + if (i > 16 && i < 33) { + mask = (uint64_t)-1 >> (128 - i * 4); + if (ret.VsrD(0) & ~mask) { + ox_flag = CRF_SO; + } + + ret.VsrD(0) &= mask; + } else if (i > 0 && i <= 16) { + mask = (uint64_t)-1 >> (64 - i * 4); + if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) { + ox_flag = CRF_SO; + } + + ret.VsrD(1) &= mask; + ret.VsrD(0) = 0; + } else if (i == 0) { + if (ret.VsrD(0) || ret.VsrD(1)) { + ox_flag = CRF_SO; + } + ret.VsrD(0) = ret.VsrD(1) = 0; + } + + *r = ret; + if (r->VsrD(0) == 0 && r->VsrD(1) == 0) { + return ox_flag | CRF_EQ; + } + + return ox_flag | CRF_GT; +} + +void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a) +{ + int i; + VECTOR_FOR_INORDER_I(i, u8) { + r->u8[i] = AES_sbox[a->u8[i]]; + } +} + +void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + ppc_avr_t result; + int i; + + VECTOR_FOR_INORDER_I(i, u32) { + result.VsrW(i) = b->VsrW(i) ^ + (AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^ + AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^ + AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^ + AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]); + } + *r = result; +} + +void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + ppc_avr_t result; + int i; + + VECTOR_FOR_INORDER_I(i, u8) { + result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]); + } + *r = result; +} + +void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + /* This differs from what is written in ISA V2.07. The RTL is */ + /* incorrect and will be fixed in V2.07B. */ + int i; + ppc_avr_t tmp; + + VECTOR_FOR_INORDER_I(i, u8) { + tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])]; + } + + VECTOR_FOR_INORDER_I(i, u32) { + r->VsrW(i) = + AES_imc[tmp.VsrB(4 * i + 0)][0] ^ + AES_imc[tmp.VsrB(4 * i + 1)][1] ^ + AES_imc[tmp.VsrB(4 * i + 2)][2] ^ + AES_imc[tmp.VsrB(4 * i + 3)][3]; + } +} + +void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +{ + ppc_avr_t result; + int i; + + VECTOR_FOR_INORDER_I(i, u8) { + result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]); + } + *r = result; +} + +void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six) +{ + int st = (st_six & 0x10) != 0; + int six = st_six & 0xF; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u32); i++) { + if (st == 0) { + if ((six & (0x8 >> i)) == 0) { + r->VsrW(i) = ror32(a->VsrW(i), 7) ^ + ror32(a->VsrW(i), 18) ^ + (a->VsrW(i) >> 3); + } else { /* six.bit[i] == 1 */ + r->VsrW(i) = ror32(a->VsrW(i), 17) ^ + ror32(a->VsrW(i), 19) ^ + (a->VsrW(i) >> 10); + } + } else { /* st == 1 */ + if ((six & (0x8 >> i)) == 0) { + r->VsrW(i) = ror32(a->VsrW(i), 2) ^ + ror32(a->VsrW(i), 13) ^ + ror32(a->VsrW(i), 22); + } else { /* six.bit[i] == 1 */ + r->VsrW(i) = ror32(a->VsrW(i), 6) ^ + ror32(a->VsrW(i), 11) ^ + ror32(a->VsrW(i), 25); + } + } + } +} + +void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six) +{ + int st = (st_six & 0x10) != 0; + int six = st_six & 0xF; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u64); i++) { + if (st == 0) { + if ((six & (0x8 >> (2 * i))) == 0) { + r->VsrD(i) = ror64(a->VsrD(i), 1) ^ + ror64(a->VsrD(i), 8) ^ + (a->VsrD(i) >> 7); + } else { /* six.bit[2*i] == 1 */ + r->VsrD(i) = ror64(a->VsrD(i), 19) ^ + ror64(a->VsrD(i), 61) ^ + (a->VsrD(i) >> 6); + } + } else { /* st == 1 */ + if ((six & (0x8 >> (2 * i))) == 0) { + r->VsrD(i) = ror64(a->VsrD(i), 28) ^ + ror64(a->VsrD(i), 34) ^ + ror64(a->VsrD(i), 39); + } else { /* six.bit[2*i] == 1 */ + r->VsrD(i) = ror64(a->VsrD(i), 14) ^ + ror64(a->VsrD(i), 18) ^ + ror64(a->VsrD(i), 41); + } + } + } +} + +void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +{ + ppc_avr_t result; + int i; + + for (i = 0; i < ARRAY_SIZE(r->u8); i++) { + int indexA = c->VsrB(i) >> 4; + int indexB = c->VsrB(i) & 0xF; + + result.VsrB(i) = a->VsrB(indexA) ^ b->VsrB(indexB); + } + *r = result; +} + +#undef VECTOR_FOR_INORDER_I + +/*****************************************************************************/ +/* SPE extension helpers */ +/* Use a table to make this quicker */ +static const uint8_t hbrev[16] = { + 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE, + 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF, +}; + +static inline uint8_t byte_reverse(uint8_t val) +{ + return hbrev[val >> 4] | (hbrev[val & 0xF] << 4); +} + +static inline uint32_t word_reverse(uint32_t val) +{ + return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) | + (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24); +} + +#define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */ +target_ulong helper_brinc(target_ulong arg1, target_ulong arg2) +{ + uint32_t a, b, d, mask; + + mask = UINT32_MAX >> (32 - MASKBITS); + a = arg1 & mask; + b = arg2 & mask; + d = word_reverse(1 + word_reverse(a | ~b)); + return (arg1 & ~mask) | (d & b); +} + +uint32_t helper_cntlsw32(uint32_t val) +{ + if (val & 0x80000000) { + return clz32(~val); + } else { + return clz32(val); + } +} + +uint32_t helper_cntlzw32(uint32_t val) +{ + return clz32(val); +} + +/* 440 specific */ +target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high, + target_ulong low, uint32_t update_Rc) +{ + target_ulong mask; + int i; + + i = 1; + for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { + if ((high & mask) == 0) { + if (update_Rc) { + env->crf[0] = 0x4; + } + goto done; + } + i++; + } + for (mask = 0xFF000000; mask != 0; mask = mask >> 8) { + if ((low & mask) == 0) { + if (update_Rc) { + env->crf[0] = 0x8; + } + goto done; + } + i++; + } + i = 8; + if (update_Rc) { + env->crf[0] = 0x2; + } + done: + env->xer = (env->xer & ~0x7F) | i; + if (update_Rc) { + env->crf[0] |= xer_so; + } + return i; +} diff --git a/qemu/target/ppc/internal.h b/qemu/target/ppc/internal.h new file mode 100644 index 00000000..e4b2d242 --- /dev/null +++ b/qemu/target/ppc/internal.h @@ -0,0 +1,216 @@ +/* + * PowerPC interal definitions for qemu. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef PPC_INTERNAL_H +#define PPC_INTERNAL_H + +#define FUNC_MASK(name, ret_type, size, max_val) \ +static inline ret_type name(uint##size##_t start, \ + uint##size##_t end) \ +{ \ + ret_type ret, max_bit = size - 1; \ + \ + if (likely(start == 0)) { \ + ret = max_val << (max_bit - end); \ + } else if (likely(end == max_bit)) { \ + ret = max_val >> start; \ + } else { \ + ret = (((uint##size##_t)(0ULL - 1ULL)) >> (start)) ^ \ + (((uint##size##_t)(0ULL - 1ULL) >> (end)) >> 1); \ + if (unlikely(start > end)) { \ + return ~ret; \ + } \ + } \ + \ + return ret; \ +} + +#if defined(TARGET_PPC64) +FUNC_MASK(MASK, target_ulong, 64, UINT64_MAX); +#else +FUNC_MASK(MASK, target_ulong, 32, UINT32_MAX); +#endif +FUNC_MASK(mask_u32, uint32_t, 32, UINT32_MAX); +FUNC_MASK(mask_u64, uint64_t, 64, UINT64_MAX); + +/*****************************************************************************/ +/*** Instruction decoding ***/ +#define EXTRACT_HELPER(name, shift, nb) \ +static inline uint32_t name(uint32_t opcode) \ +{ \ + return extract32(opcode, shift, nb); \ +} + +#define EXTRACT_SHELPER(name, shift, nb) \ +static inline int32_t name(uint32_t opcode) \ +{ \ + return sextract32(opcode, shift, nb); \ +} + +#define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \ +static inline uint32_t name(uint32_t opcode) \ +{ \ + return extract32(opcode, shift1, nb1) << nb2 | \ + extract32(opcode, shift2, nb2); \ +} + +#define EXTRACT_HELPER_SPLIT_3(name, \ + d0_bits, shift_op_d0, shift_d0, \ + d1_bits, shift_op_d1, shift_d1, \ + d2_bits, shift_op_d2, shift_d2) \ +static inline int16_t name(uint32_t opcode) \ +{ \ + return \ + (((opcode >> (shift_op_d0)) & ((1 << (d0_bits)) - 1)) << (shift_d0)) | \ + (((opcode >> (shift_op_d1)) & ((1 << (d1_bits)) - 1)) << (shift_d1)) | \ + (((opcode >> (shift_op_d2)) & ((1 << (d2_bits)) - 1)) << (shift_d2)); \ +} + + +/* Opcode part 1 */ +EXTRACT_HELPER(opc1, 26, 6); +/* Opcode part 2 */ +EXTRACT_HELPER(opc2, 1, 5); +/* Opcode part 3 */ +EXTRACT_HELPER(opc3, 6, 5); +/* Opcode part 4 */ +EXTRACT_HELPER(opc4, 16, 5); +/* Update Cr0 flags */ +EXTRACT_HELPER(Rc, 0, 1); +/* Update Cr6 flags (Altivec) */ +EXTRACT_HELPER(Rc21, 10, 1); +/* Destination */ +EXTRACT_HELPER(rD, 21, 5); +/* Source */ +EXTRACT_HELPER(rS, 21, 5); +/* First operand */ +EXTRACT_HELPER(rA, 16, 5); +/* Second operand */ +EXTRACT_HELPER(rB, 11, 5); +/* Third operand */ +EXTRACT_HELPER(rC, 6, 5); +/*** Get CRn ***/ +EXTRACT_HELPER(crfD, 23, 3); +EXTRACT_HELPER(BF, 23, 3); +EXTRACT_HELPER(crfS, 18, 3); +EXTRACT_HELPER(crbD, 21, 5); +EXTRACT_HELPER(crbA, 16, 5); +EXTRACT_HELPER(crbB, 11, 5); +/* SPR / TBL */ +EXTRACT_HELPER(_SPR, 11, 10); +static inline uint32_t SPR(uint32_t opcode) +{ + uint32_t sprn = _SPR(opcode); + + return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); +} +/*** Get constants ***/ +/* 16 bits signed immediate value */ +EXTRACT_SHELPER(SIMM, 0, 16); +/* 16 bits unsigned immediate value */ +EXTRACT_HELPER(UIMM, 0, 16); +/* 5 bits signed immediate value */ +EXTRACT_SHELPER(SIMM5, 16, 5); +/* 5 bits signed immediate value */ +EXTRACT_HELPER(UIMM5, 16, 5); +/* 4 bits unsigned immediate value */ +EXTRACT_HELPER(UIMM4, 16, 4); +/* Bit count */ +EXTRACT_HELPER(NB, 11, 5); +/* Shift count */ +EXTRACT_HELPER(SH, 11, 5); +/* lwat/stwat/ldat/lwat */ +EXTRACT_HELPER(FC, 11, 5); +/* Vector shift count */ +EXTRACT_HELPER(VSH, 6, 4); +/* Mask start */ +EXTRACT_HELPER(MB, 6, 5); +/* Mask end */ +EXTRACT_HELPER(ME, 1, 5); +/* Trap operand */ +EXTRACT_HELPER(TO, 21, 5); + +EXTRACT_HELPER(CRM, 12, 8); + +EXTRACT_HELPER(SR, 16, 4); + +/* mtfsf/mtfsfi */ +EXTRACT_HELPER(FPBF, 23, 3); +EXTRACT_HELPER(FPIMM, 12, 4); +EXTRACT_HELPER(FPL, 25, 1); +EXTRACT_HELPER(FPFLM, 17, 8); +EXTRACT_HELPER(FPW, 16, 1); + +/* mffscrni */ +EXTRACT_HELPER(RM, 11, 2); + +/* addpcis */ +EXTRACT_HELPER_SPLIT_3(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0) +#if defined(TARGET_PPC64) +/* darn */ +EXTRACT_HELPER(L, 16, 2); +#endif + +/*** Jump target decoding ***/ +/* Immediate address */ +static inline target_ulong LI(uint32_t opcode) +{ + return (opcode >> 0) & 0x03FFFFFC; +} + +static inline uint32_t BD(uint32_t opcode) +{ + return (opcode >> 0) & 0xFFFC; +} + +EXTRACT_HELPER(BO, 21, 5); +EXTRACT_HELPER(BI, 16, 5); +/* Absolute/relative address */ +EXTRACT_HELPER(AA, 1, 1); +/* Link */ +EXTRACT_HELPER(LK, 0, 1); + +/* DFP Z22-form */ +EXTRACT_HELPER(DCM, 10, 6) + +/* DFP Z23-form */ +EXTRACT_HELPER(RMC, 9, 2) +EXTRACT_HELPER(Rrm, 16, 1) + +EXTRACT_HELPER_SPLIT(DQxT, 3, 1, 21, 5); +EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5); +EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5); +EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5); +EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5); +EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5); +EXTRACT_HELPER(DM, 8, 2); +EXTRACT_HELPER(UIM, 16, 2); +EXTRACT_HELPER(SHW, 8, 2); +EXTRACT_HELPER(SP, 19, 2); +EXTRACT_HELPER(IMM8, 11, 8); +EXTRACT_HELPER(DCMX, 16, 7); +EXTRACT_HELPER_SPLIT_3(DCMX_XV, 5, 16, 0, 1, 2, 5, 1, 6, 6); + +void helper_compute_fprf_float16(CPUPPCState *env, float16 arg); +void helper_compute_fprf_float32(CPUPPCState *env, float32 arg); +void helper_compute_fprf_float128(CPUPPCState *env, float128 arg); + +/* Raise a data fault alignment exception for the specified virtual address */ +void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); +#endif /* PPC_INTERNAL_H */ diff --git a/qemu/target/ppc/kvm_ppc.h b/qemu/target/ppc/kvm_ppc.h new file mode 100644 index 00000000..80b1294c --- /dev/null +++ b/qemu/target/ppc/kvm_ppc.h @@ -0,0 +1,467 @@ +/* + * Copyright 2008 IBM Corporation. + * Authors: Hollis Blanchard + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#ifndef KVM_PPC_H +#define KVM_PPC_H + +#define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host") + +#ifdef CONFIG_KVM + +uint32_t kvmppc_get_tbfreq(void); +uint64_t kvmppc_get_clockfreq(void); +bool kvmppc_get_host_model(char **buf); +bool kvmppc_get_host_serial(char **buf); +int kvmppc_get_hasidle(CPUPPCState *env); +int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len); +int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level); +void kvmppc_enable_logical_ci_hcalls(void); +void kvmppc_enable_set_mode_hcall(void); +void kvmppc_enable_clear_ref_mod_hcalls(void); +void kvmppc_enable_h_page_init(void); +void kvmppc_set_papr(PowerPCCPU *cpu); +int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); +void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy); +bool kvmppc_get_fwnmi(void); +int kvmppc_set_fwnmi(void); +int kvmppc_smt_threads(void); +void kvmppc_error_append_smt_possible_hint(Error *const *errp); +int kvmppc_set_smt_threads(int smt); +int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); +int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); +int kvmppc_set_tcr(PowerPCCPU *cpu); +int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu); +target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, + bool radix, bool gtse, + uint64_t proc_tbl); +void kvmppc_svm_off(Error **errp); +bool kvmppc_spapr_use_multitce(void); +int kvmppc_spapr_enable_inkernel_multitce(void); +void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, + uint64_t bus_offset, uint32_t nb_table, + int *pfd, bool need_vfio); +int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size); +int kvmppc_reset_htab(int shift_hint); +uint64_t kvmppc_vrma_limit(unsigned int hash_shift); +bool kvmppc_has_cap_spapr_vfio(void); + +bool kvmppc_has_cap_epr(void); +int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function); +int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp); +int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns); +int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, + uint16_t n_valid, uint16_t n_invalid); +void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n); +void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1); +bool kvmppc_has_cap_fixup_hcalls(void); +bool kvmppc_has_cap_htm(void); +bool kvmppc_has_cap_mmu_radix(void); +bool kvmppc_has_cap_mmu_hash_v3(void); +bool kvmppc_has_cap_xive(void); +int kvmppc_get_cap_safe_cache(void); +int kvmppc_get_cap_safe_bounds_check(void); +int kvmppc_get_cap_safe_indirect_branch(void); +int kvmppc_get_cap_count_cache_flush_assist(void); +bool kvmppc_has_cap_nested_kvm_hv(void); +int kvmppc_set_cap_nested_kvm_hv(int enable); +int kvmppc_get_cap_large_decr(void); +int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable); +int kvmppc_enable_hwrng(void); +int kvmppc_put_books_sregs(PowerPCCPU *cpu); +PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void); +void kvmppc_check_papr_resize_hpt(Error **errp); +int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift); +int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift); +bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu); + +bool kvmppc_hpt_needs_host_contiguous_pages(void); +void kvm_check_mmu(PowerPCCPU *cpu, Error **errp); +void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online); +void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset); + +int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run); + +#else + +static inline uint32_t kvmppc_get_tbfreq(void) +{ + return 0; +} + +static inline bool kvmppc_get_host_model(char **buf) +{ + return false; +} + +static inline bool kvmppc_get_host_serial(char **buf) +{ + return false; +} + +static inline uint64_t kvmppc_get_clockfreq(void) +{ + return 0; +} + +static inline uint32_t kvmppc_get_vmx(void) +{ + return 0; +} + +static inline uint32_t kvmppc_get_dfp(void) +{ + return 0; +} + +static inline int kvmppc_get_hasidle(CPUPPCState *env) +{ + return 0; +} + +static inline int kvmppc_get_hypercall(CPUPPCState *env, + uint8_t *buf, int buf_len) +{ + return -1; +} + +static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) +{ + return -1; +} + +static inline void kvmppc_enable_logical_ci_hcalls(void) +{ +} + +static inline void kvmppc_enable_set_mode_hcall(void) +{ +} + +static inline void kvmppc_enable_clear_ref_mod_hcalls(void) +{ +} + +static inline void kvmppc_enable_h_page_init(void) +{ +} + +static inline void kvmppc_set_papr(PowerPCCPU *cpu) +{ +} + +static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) +{ + return 0; +} + +static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy) +{ +} + +static inline bool kvmppc_get_fwnmi(void) +{ + return false; +} + +static inline int kvmppc_set_fwnmi(void) +{ + return -1; +} + +static inline int kvmppc_smt_threads(void) +{ + return 1; +} + +static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp) +{ + return; +} + +static inline int kvmppc_set_smt_threads(int smt) +{ + return 0; +} + +static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) +{ + return 0; +} + +static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) +{ + return 0; +} + +static inline int kvmppc_set_tcr(PowerPCCPU *cpu) +{ + return 0; +} + +static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu) +{ + return -1; +} + +static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, + bool radix, bool gtse, + uint64_t proc_tbl) +{ + return 0; +} + +static inline void kvmppc_svm_off(Error **errp) +{ + return; +} + +static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, + unsigned int online) +{ + return; +} + +static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset) +{ +} + +static inline bool kvmppc_spapr_use_multitce(void) +{ + return false; +} + +static inline int kvmppc_spapr_enable_inkernel_multitce(void) +{ + return -1; +} + +static inline void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, + uint64_t bus_offset, + uint32_t nb_table, + int *pfd, bool need_vfio) +{ + return NULL; +} + +static inline int kvmppc_remove_spapr_tce(void *table, int pfd, + uint32_t nb_table) +{ + return -1; +} + +static inline int kvmppc_reset_htab(int shift_hint) +{ + return 0; +} + +static inline uint64_t kvmppc_vrma_limit(unsigned int hash_shift) +{ + g_assert_not_reached(); +} + +static inline bool kvmppc_hpt_needs_host_contiguous_pages(void) +{ + return false; +} + +static inline void kvm_check_mmu(PowerPCCPU *cpu, Error **errp) +{ +} + +static inline bool kvmppc_has_cap_spapr_vfio(void) +{ + return false; +} + +static inline bool kvmppc_has_cap_epr(void) +{ + return false; +} + +static inline int kvmppc_define_rtas_kernel_token(uint32_t token, + const char *function) +{ + return -1; +} + +static inline int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp) +{ + return -1; +} + +static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, + int64_t max_ns) +{ + abort(); +} + +static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, + uint16_t n_valid, uint16_t n_invalid) +{ + abort(); +} + +static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, + hwaddr ptex, int n) +{ + abort(); +} + +static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1) +{ + abort(); +} + +static inline bool kvmppc_has_cap_fixup_hcalls(void) +{ + abort(); +} + +static inline bool kvmppc_has_cap_htm(void) +{ + return false; +} + +static inline bool kvmppc_has_cap_mmu_radix(void) +{ + return false; +} + +static inline bool kvmppc_has_cap_mmu_hash_v3(void) +{ + return false; +} + +static inline bool kvmppc_has_cap_xive(void) +{ + return false; +} + +static inline int kvmppc_get_cap_safe_cache(void) +{ + return 0; +} + +static inline int kvmppc_get_cap_safe_bounds_check(void) +{ + return 0; +} + +static inline int kvmppc_get_cap_safe_indirect_branch(void) +{ + return 0; +} + +static inline int kvmppc_get_cap_count_cache_flush_assist(void) +{ + return 0; +} + +static inline bool kvmppc_has_cap_nested_kvm_hv(void) +{ + return false; +} + +static inline int kvmppc_set_cap_nested_kvm_hv(int enable) +{ + return -1; +} + +static inline int kvmppc_get_cap_large_decr(void) +{ + return 0; +} + +static inline int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable) +{ + return -1; +} + +static inline int kvmppc_enable_hwrng(void) +{ + return -1; +} + +static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu) +{ + abort(); +} + +static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) +{ + return NULL; +} + +static inline void kvmppc_check_papr_resize_hpt(Error **errp) +{ + return; +} + +static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, + target_ulong flags, int shift) +{ + return -ENOSYS; +} + +static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, + target_ulong flags, int shift) +{ + return -ENOSYS; +} + +static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu) +{ + return false; +} + +#endif + +#ifndef CONFIG_KVM + +#define kvmppc_eieio() do { } while (0) + +static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ +} + +static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ +} + +#else /* CONFIG_KVM */ + +#define kvmppc_eieio() \ + do { \ + if (kvm_enabled()) { \ + asm volatile("eieio" : : : "memory"); \ + } \ + } while (0) + +/* Store data cache blocks back to memory */ +static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ + uint8_t *p; + + for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) { + asm volatile("dcbst 0,%0" : : "r"(p) : "memory"); + } +} + +/* Invalidate instruction cache blocks */ +static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) +{ + uint8_t *p; + + for (p = addr; p < addr + len; p += cpu->env.icache_line_size) { + asm volatile("icbi 0,%0" : : "r"(p)); + } +} + +#endif /* CONFIG_KVM */ + +#endif /* KVM_PPC_H */ diff --git a/qemu/target/ppc/machine.c b/qemu/target/ppc/machine.c new file mode 100644 index 00000000..71e2d940 --- /dev/null +++ b/qemu/target/ppc/machine.c @@ -0,0 +1,852 @@ +#if 0 +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "helper_regs.h" +#include "mmu-hash64.h" +#include "kvm_ppc.h" +#include "exec/helper-proto.h" + +static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + unsigned int i, j; + target_ulong sdr1; + uint32_t fpscr, vscr; +#if defined(TARGET_PPC64) + int32_t slb_nr; +#endif + target_ulong xer; + + for (i = 0; i < 32; i++) { + qemu_get_betls(f, &env->gpr[i]); + } +#if !defined(TARGET_PPC64) + for (i = 0; i < 32; i++) { + qemu_get_betls(f, &env->gprh[i]); + } +#endif + qemu_get_betls(f, &env->lr); + qemu_get_betls(f, &env->ctr); + for (i = 0; i < 8; i++) { + qemu_get_be32s(f, &env->crf[i]); + } + qemu_get_betls(f, &xer); + cpu_write_xer(env, xer); + qemu_get_betls(f, &env->reserve_addr); + qemu_get_betls(f, &env->msr); + for (i = 0; i < 4; i++) { + qemu_get_betls(f, &env->tgpr[i]); + } + for (i = 0; i < 32; i++) { + union { + float64 d; + uint64_t l; + } u; + u.l = qemu_get_be64(f); + *cpu_fpr_ptr(env, i) = u.d; + } + qemu_get_be32s(f, &fpscr); + env->fpscr = fpscr; + qemu_get_sbe32s(f, &env->access_type); +#if defined(TARGET_PPC64) + qemu_get_betls(f, &env->spr[SPR_ASR]); + qemu_get_sbe32s(f, &slb_nr); +#endif + qemu_get_betls(f, &sdr1); + for (i = 0; i < 32; i++) { + qemu_get_betls(f, &env->sr[i]); + } + for (i = 0; i < 2; i++) { + for (j = 0; j < 8; j++) { + qemu_get_betls(f, &env->DBAT[i][j]); + } + } + for (i = 0; i < 2; i++) { + for (j = 0; j < 8; j++) { + qemu_get_betls(f, &env->IBAT[i][j]); + } + } + qemu_get_sbe32s(f, &env->nb_tlb); + qemu_get_sbe32s(f, &env->tlb_per_way); + qemu_get_sbe32s(f, &env->nb_ways); + qemu_get_sbe32s(f, &env->last_way); + qemu_get_sbe32s(f, &env->id_tlbs); + qemu_get_sbe32s(f, &env->nb_pids); + if (env->tlb.tlb6) { + /* XXX assumes 6xx */ + for (i = 0; i < env->nb_tlb; i++) { + qemu_get_betls(f, &env->tlb.tlb6[i].pte0); + qemu_get_betls(f, &env->tlb.tlb6[i].pte1); + qemu_get_betls(f, &env->tlb.tlb6[i].EPN); + } + } + for (i = 0; i < 4; i++) { + qemu_get_betls(f, &env->pb[i]); + } + for (i = 0; i < 1024; i++) { + qemu_get_betls(f, &env->spr[i]); + } + if (!cpu->vhyp) { + ppc_store_sdr1(env, sdr1); + } + qemu_get_be32s(f, &vscr); + helper_mtvscr(env, vscr); + qemu_get_be64s(f, &env->spe_acc); + qemu_get_be32s(f, &env->spe_fscr); + qemu_get_betls(f, &env->msr_mask); + qemu_get_be32s(f, &env->flags); + qemu_get_sbe32s(f, &env->error_code); + qemu_get_be32s(f, &env->pending_interrupts); + qemu_get_be32s(f, &env->irq_input_state); + for (i = 0; i < POWERPC_EXCP_NB; i++) { + qemu_get_betls(f, &env->excp_vectors[i]); + } + qemu_get_betls(f, &env->excp_prefix); + qemu_get_betls(f, &env->ivor_mask); + qemu_get_betls(f, &env->ivpr_mask); + qemu_get_betls(f, &env->hreset_vector); + qemu_get_betls(f, &env->nip); + qemu_get_betls(f, &env->hflags); + qemu_get_betls(f, &env->hflags_nmsr); + qemu_get_sbe32(f); /* Discard unused mmu_idx */ + qemu_get_sbe32(f); /* Discard unused power_mode */ + + /* Recompute mmu indices */ + hreg_compute_mem_idx(env); + + return 0; +} + +static int get_avr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + ppc_avr_t *v = pv; + + v->u64[0] = qemu_get_be64(f); + v->u64[1] = qemu_get_be64(f); + + return 0; +} + +static int put_avr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, QJSON *vmdesc) +{ + ppc_avr_t *v = pv; + + qemu_put_be64(f, v->u64[0]); + qemu_put_be64(f, v->u64[1]); + return 0; +} + +static const VMStateInfo vmstate_info_avr = { + .name = "avr", + .get = get_avr, + .put = put_avr, +}; + +#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t) + +#define VMSTATE_AVR_ARRAY(_f, _s, _n) \ + VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) + +static int get_fpr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + ppc_vsr_t *v = pv; + + v->VsrD(0) = qemu_get_be64(f); + + return 0; +} + +static int put_fpr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, QJSON *vmdesc) +{ + ppc_vsr_t *v = pv; + + qemu_put_be64(f, v->VsrD(0)); + return 0; +} + +static const VMStateInfo vmstate_info_fpr = { + .name = "fpr", + .get = get_fpr, + .put = put_fpr, +}; + +#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t) + +#define VMSTATE_FPR_ARRAY(_f, _s, _n) \ + VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) + +static int get_vsr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + ppc_vsr_t *v = pv; + + v->VsrD(1) = qemu_get_be64(f); + + return 0; +} + +static int put_vsr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, QJSON *vmdesc) +{ + ppc_vsr_t *v = pv; + + qemu_put_be64(f, v->VsrD(1)); + return 0; +} + +static const VMStateInfo vmstate_info_vsr = { + .name = "vsr", + .get = get_vsr, + .put = put_vsr, +}; + +#define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t) + +#define VMSTATE_VSR_ARRAY(_f, _s, _n) \ + VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) + +static bool cpu_pre_2_8_migration(void *opaque, int version_id) +{ + PowerPCCPU *cpu = opaque; + + return cpu->pre_2_8_migration; +} + +#if defined(TARGET_PPC64) +static bool cpu_pre_3_0_migration(void *opaque, int version_id) +{ + PowerPCCPU *cpu = opaque; + + return cpu->pre_3_0_migration; +} +#endif + +static int cpu_pre_save(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int i; + uint64_t insns_compat_mask = + PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB + | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES + | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES + | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT + | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ + | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC + | PPC_64B | PPC_64BX | PPC_ALTIVEC + | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; + uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX + | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 + | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 + | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 + | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 + | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; + + env->spr[SPR_LR] = env->lr; + env->spr[SPR_CTR] = env->ctr; + env->spr[SPR_XER] = cpu_read_xer(env); +#if defined(TARGET_PPC64) + env->spr[SPR_CFAR] = env->cfar; +#endif + env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; + + for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { + env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i]; + env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i]; + env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i]; + env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i]; + } + for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { + env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4]; + env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4]; + env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4]; + env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; + } + + /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ + if (cpu->pre_2_8_migration) { + /* + * Mask out bits that got added to msr_mask since the versions + * which stupidly included it in the migration stream. + */ + target_ulong metamask = 0 +#if defined(TARGET_PPC64) + | (1ULL << MSR_TS0) + | (1ULL << MSR_TS1) +#endif + ; + cpu->mig_msr_mask = env->msr_mask & ~metamask; + cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; + /* + * CPU models supported by old machines all have + * PPC_MEM_TLBIE, so we set it unconditionally to allow + * backward migration from a POWER9 host to a POWER8 host. + */ + cpu->mig_insns_flags |= PPC_MEM_TLBIE; + cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; + cpu->mig_nb_BATs = env->nb_BATs; + } + if (cpu->pre_3_0_migration) { + if (cpu->hash64_opts) { + cpu->mig_slb_nr = cpu->hash64_opts->slb_size; + } + } + + return 0; +} + +/* + * Determine if a given PVR is a "close enough" match to the CPU + * object. For TCG and KVM PR it would probably be sufficient to + * require an exact PVR match. However for KVM HV the user is + * restricted to a PVR exactly matching the host CPU. The correct way + * to handle this is to put the guest into an architected + * compatibility mode. However, to allow a more forgiving transition + * and migration from before this was widely done, we allow migration + * between sufficiently similar PVRs, as determined by the CPU class's + * pvr_match() hook. + */ +static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + if (pvr == pcc->pvr) { + return true; + } + return pcc->pvr_match(pcc, pvr); +} + +static int cpu_post_load(void *opaque, int version_id) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int i; + target_ulong msr; + + /* + * If we're operating in compat mode, we should be ok as long as + * the destination supports the same compatiblity mode. + * + * Otherwise, however, we require that the destination has exactly + * the same CPU model as the source. + */ + +#if defined(TARGET_PPC64) + if (cpu->compat_pvr) { + uint32_t compat_pvr = cpu->compat_pvr; + Error *local_err = NULL; + + cpu->compat_pvr = 0; + ppc_set_compat(cpu, compat_pvr, &local_err); + if (local_err) { + error_report_err(local_err); + return -1; + } + } else +#endif + { + if (!pvr_match(cpu, env->spr[SPR_PVR])) { + return -1; + } + } + + /* + * If we're running with KVM HV, there is a chance that the guest + * is running with KVM HV and its kernel does not have the + * capability of dealing with a different PVR other than this + * exact host PVR in KVM_SET_SREGS. If that happens, the + * guest freezes after migration. + * + * The function kvmppc_pvr_workaround_required does this verification + * by first checking if the kernel has the cap, returning true immediately + * if that is the case. Otherwise, it checks if we're running in KVM PR. + * If the guest kernel does not have the cap and we're not running KVM-PR + * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will + * receive the PVR it expects as a workaround. + * + */ + if (kvmppc_pvr_workaround_required(cpu)) { + env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; + } + + env->lr = env->spr[SPR_LR]; + env->ctr = env->spr[SPR_CTR]; + cpu_write_xer(env, env->spr[SPR_XER]); +#if defined(TARGET_PPC64) + env->cfar = env->spr[SPR_CFAR]; +#endif + env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; + + for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { + env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i]; + env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1]; + env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i]; + env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1]; + } + for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { + env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i]; + env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1]; + env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i]; + env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1]; + } + + if (!cpu->vhyp) { + ppc_store_sdr1(env, env->spr[SPR_SDR1]); + } + + /* + * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB + * before restoring + */ + msr = env->msr; + env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); + ppc_store_msr(env, msr); + + hreg_compute_mem_idx(env); + + return 0; +} + +static bool fpu_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + return cpu->env.insns_flags & PPC_FLOAT; +} + +static const VMStateDescription vmstate_fpu = { + .name = "cpu/fpu", + .version_id = 1, + .minimum_version_id = 1, + .needed = fpu_needed, + .fields = (VMStateField[]) { + VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), + VMSTATE_UINTTL(env.fpscr, PowerPCCPU), + VMSTATE_END_OF_LIST() + }, +}; + +static bool altivec_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + return cpu->env.insns_flags & PPC_ALTIVEC; +} + +static int get_vscr(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field) +{ + PowerPCCPU *cpu = opaque; + helper_mtvscr(&cpu->env, qemu_get_be32(f)); + return 0; +} + +static int put_vscr(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field, QJSON *vmdesc) +{ + PowerPCCPU *cpu = opaque; + qemu_put_be32(f, helper_mfvscr(&cpu->env)); + return 0; +} + +static const VMStateInfo vmstate_vscr = { + .name = "cpu/altivec/vscr", + .get = get_vscr, + .put = put_vscr, +}; + +static const VMStateDescription vmstate_altivec = { + .name = "cpu/altivec", + .version_id = 1, + .minimum_version_id = 1, + .needed = altivec_needed, + .fields = (VMStateField[]) { + VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), + /* + * Save the architecture value of the vscr, not the internally + * expanded version. Since this architecture value does not + * exist in memory to be stored, this requires a but of hoop + * jumping. We want OFFSET=0 so that we effectively pass CPU + * to the helper functions. + */ + { + .name = "vscr", + .version_id = 0, + .size = sizeof(uint32_t), + .info = &vmstate_vscr, + .flags = VMS_SINGLE, + .offset = 0 + }, + VMSTATE_END_OF_LIST() + }, +}; + +static bool vsx_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + return cpu->env.insns_flags2 & PPC2_VSX; +} + +static const VMStateDescription vmstate_vsx = { + .name = "cpu/vsx", + .version_id = 1, + .minimum_version_id = 1, + .needed = vsx_needed, + .fields = (VMStateField[]) { + VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), + VMSTATE_END_OF_LIST() + }, +}; + +#ifdef TARGET_PPC64 +/* Transactional memory state */ +static bool tm_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + return msr_ts; +} + +static const VMStateDescription vmstate_tm = { + .name = "cpu/tm", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = tm_needed, + .fields = (VMStateField []) { + VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), + VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), + VMSTATE_UINT64(env.tm_cr, PowerPCCPU), + VMSTATE_UINT64(env.tm_lr, PowerPCCPU), + VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), + VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), + VMSTATE_UINT64(env.tm_amr, PowerPCCPU), + VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), + VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), + VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), + VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), + VMSTATE_UINT64(env.tm_tar, PowerPCCPU), + VMSTATE_END_OF_LIST() + }, +}; +#endif + +static bool sr_needed(void *opaque) +{ +#ifdef TARGET_PPC64 + PowerPCCPU *cpu = opaque; + + return !(cpu->env.mmu_model & POWERPC_MMU_64); +#else + return true; +#endif +} + +static const VMStateDescription vmstate_sr = { + .name = "cpu/sr", + .version_id = 1, + .minimum_version_id = 1, + .needed = sr_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), + VMSTATE_END_OF_LIST() + }, +}; + +#ifdef TARGET_PPC64 +static int get_slbe(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + ppc_slb_t *v = pv; + + v->esid = qemu_get_be64(f); + v->vsid = qemu_get_be64(f); + + return 0; +} + +static int put_slbe(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, QJSON *vmdesc) +{ + ppc_slb_t *v = pv; + + qemu_put_be64(f, v->esid); + qemu_put_be64(f, v->vsid); + return 0; +} + +static const VMStateInfo vmstate_info_slbe = { + .name = "slbe", + .get = get_slbe, + .put = put_slbe, +}; + +#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) + +#define VMSTATE_SLB_ARRAY(_f, _s, _n) \ + VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) + +static bool slb_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + /* We don't support any of the old segment table based 64-bit CPUs */ + return cpu->env.mmu_model & POWERPC_MMU_64; +} + +static int slb_post_load(void *opaque, int version_id) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int i; + + /* + * We've pulled in the raw esid and vsid values from the migration + * stream, but we need to recompute the page size pointers + */ + for (i = 0; i < cpu->hash64_opts->slb_size; i++) { + if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { + /* Migration source had bad values in its SLB */ + return -1; + } + } + + return 0; +} + +static const VMStateDescription vmstate_slb = { + .name = "cpu/slb", + .version_id = 1, + .minimum_version_id = 1, + .needed = slb_needed, + .post_load = slb_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), + VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), + VMSTATE_END_OF_LIST() + } +}; +#endif /* TARGET_PPC64 */ + +static const VMStateDescription vmstate_tlb6xx_entry = { + .name = "cpu/tlb6xx_entry", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), + VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), + VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), + VMSTATE_END_OF_LIST() + }, +}; + +static bool tlb6xx_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + return env->nb_tlb && (env->tlb_type == TLB_6XX); +} + +static const VMStateDescription vmstate_tlb6xx = { + .name = "cpu/tlb6xx", + .version_id = 1, + .minimum_version_id = 1, + .needed = tlb6xx_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, + env.nb_tlb, + vmstate_tlb6xx_entry, + ppc6xx_tlb_t), + VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_tlbemb_entry = { + .name = "cpu/tlbemb_entry", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(RPN, ppcemb_tlb_t), + VMSTATE_UINTTL(EPN, ppcemb_tlb_t), + VMSTATE_UINTTL(PID, ppcemb_tlb_t), + VMSTATE_UINTTL(size, ppcemb_tlb_t), + VMSTATE_UINT32(prot, ppcemb_tlb_t), + VMSTATE_UINT32(attr, ppcemb_tlb_t), + VMSTATE_END_OF_LIST() + }, +}; + +static bool tlbemb_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + return env->nb_tlb && (env->tlb_type == TLB_EMB); +} + +static bool pbr403_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + uint32_t pvr = cpu->env.spr[SPR_PVR]; + + return (pvr & 0xffff0000) == 0x00200000; +} + +static const VMStateDescription vmstate_pbr403 = { + .name = "cpu/pbr403", + .version_id = 1, + .minimum_version_id = 1, + .needed = pbr403_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_tlbemb = { + .name = "cpu/tlb6xx", + .version_id = 1, + .minimum_version_id = 1, + .needed = tlbemb_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, + env.nb_tlb, + vmstate_tlbemb_entry, + ppcemb_tlb_t), + /* 403 protection registers */ + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_pbr403, + NULL + } +}; + +static const VMStateDescription vmstate_tlbmas_entry = { + .name = "cpu/tlbmas_entry", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(mas8, ppcmas_tlb_t), + VMSTATE_UINT32(mas1, ppcmas_tlb_t), + VMSTATE_UINT64(mas2, ppcmas_tlb_t), + VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), + VMSTATE_END_OF_LIST() + }, +}; + +static bool tlbmas_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + return env->nb_tlb && (env->tlb_type == TLB_MAS); +} + +static const VMStateDescription vmstate_tlbmas = { + .name = "cpu/tlbmas", + .version_id = 1, + .minimum_version_id = 1, + .needed = tlbmas_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, + env.nb_tlb, + vmstate_tlbmas_entry, + ppcmas_tlb_t), + VMSTATE_END_OF_LIST() + } +}; + +static bool compat_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + assert(!(cpu->compat_pvr && !cpu->vhyp)); + return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; +} + +static const VMStateDescription vmstate_compat = { + .name = "cpu/compat", + .version_id = 1, + .minimum_version_id = 1, + .needed = compat_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(compat_pvr, PowerPCCPU), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ppc_cpu = { + .name = "cpu", + .version_id = 5, + .minimum_version_id = 5, + .minimum_version_id_old = 4, + .load_state_old = cpu_load_old, + .pre_save = cpu_pre_save, + .post_load = cpu_post_load, + .fields = (VMStateField[]) { + VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ + + /* User mode architected state */ + VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), +#if !defined(TARGET_PPC64) + VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), +#endif + VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), + VMSTATE_UINTTL(env.nip, PowerPCCPU), + + /* SPRs */ + VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), + VMSTATE_UINT64(env.spe_acc, PowerPCCPU), + + /* Reservation */ + VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), + + /* Supervisor mode architected state */ + VMSTATE_UINTTL(env.msr, PowerPCCPU), + + /* Internal state */ + VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), + /* FIXME: access_type? */ + + /* Sanity checking */ + VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), + VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), + VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, + cpu_pre_2_8_migration), + VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_fpu, + &vmstate_altivec, + &vmstate_vsx, + &vmstate_sr, +#ifdef TARGET_PPC64 + &vmstate_tm, + &vmstate_slb, +#endif /* TARGET_PPC64 */ + &vmstate_tlb6xx, + &vmstate_tlbemb, + &vmstate_tlbmas, + &vmstate_compat, + NULL + } +}; +#endif diff --git a/qemu/target/ppc/mem_helper.c b/qemu/target/ppc/mem_helper.c new file mode 100644 index 00000000..33111f7a --- /dev/null +++ b/qemu/target/ppc/mem_helper.c @@ -0,0 +1,628 @@ +/* + * PowerPC memory access emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "helper_regs.h" +#include "exec/cpu_ldst.h" +#include "tcg/tcg.h" +#include "internal.h" +#include "qemu/atomic128.h" + +/* #define DEBUG_OP */ + +static inline bool needs_byteswap(const CPUPPCState *env) +{ +#if defined(TARGET_WORDS_BIGENDIAN) + return msr_le; +#else + return !msr_le; +#endif +} + +/*****************************************************************************/ +/* Memory load and stores */ + +static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr, + target_long arg) +{ +#if defined(TARGET_PPC64) + if (!msr_is_64bit(env, env->msr)) { + return (uint32_t)(addr + arg); + } else +#endif + { + return addr + arg; + } +} + +static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb, + MMUAccessType access_type, int mmu_idx, + uintptr_t raddr) +{ + char *host1, *host2; + uint32_t nb_pg1, nb_pg2; + +#ifdef _MSC_VER + nb_pg1 = 0 - (addr | TARGET_PAGE_MASK); +#else + nb_pg1 = -(addr | TARGET_PAGE_MASK); +#endif + if (likely(nb <= nb_pg1)) { + /* The entire operation is on a single page. */ + return probe_access(env, addr, nb, access_type, mmu_idx, raddr); + } + + /* The operation spans two pages. */ + nb_pg2 = nb - nb_pg1; + host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr); + addr = addr_add(env, addr, nb_pg1); + host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr); + + /* If the two host pages are contiguous, optimize. */ + if (host2 == host1 + nb_pg1) { + return host1; + } + return NULL; +} + +void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) +{ + uintptr_t raddr = GETPC(); + int mmu_idx = cpu_mmu_index(env, false); + char *host = probe_contiguous(env, addr, (32 - reg) * 4, + MMU_DATA_LOAD, mmu_idx, raddr); + + if (likely(host)) { + /* Fast path -- the entire operation is in RAM at host. */ + for (; reg < 32; reg++) { + env->gpr[reg] = (uint32_t)ldl_be_p(host); + host += 4; + } + } else { + /* Slow path -- at least some of the operation requires i/o. */ + for (; reg < 32; reg++) { + env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); + addr = addr_add(env, addr, 4); + } + } +} + +void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) +{ + uintptr_t raddr = GETPC(); + int mmu_idx = cpu_mmu_index(env, false); + char *host = probe_contiguous(env, addr, (32 - reg) * 4, + MMU_DATA_STORE, mmu_idx, raddr); + + if (likely(host)) { + /* Fast path -- the entire operation is in RAM at host. */ + for (; reg < 32; reg++) { + stl_be_p(host, env->gpr[reg]); + host += 4; + } + } else { + /* Slow path -- at least some of the operation requires i/o. */ + for (; reg < 32; reg++) { + cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); + addr = addr_add(env, addr, 4); + } + } +} + +static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, + uint32_t reg, uintptr_t raddr) +{ + int mmu_idx; + char *host; + uint32_t val; + + if (unlikely(nb == 0)) { + return; + } + + mmu_idx = cpu_mmu_index(env, false); + host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr); + + if (likely(host)) { + /* Fast path -- the entire operation is in RAM at host. */ + for (; nb > 3; nb -= 4) { + env->gpr[reg] = (uint32_t)ldl_be_p(host); + reg = (reg + 1) % 32; + host += 4; + } + switch (nb) { + default: + return; + case 1: + val = ldub_p(host) << 24; + break; + case 2: + val = lduw_be_p(host) << 16; + break; + case 3: + val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8); + break; + } + } else { + /* Slow path -- at least some of the operation requires i/o. */ + for (; nb > 3; nb -= 4) { + env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); + reg = (reg + 1) % 32; + addr = addr_add(env, addr, 4); + } + switch (nb) { + default: + return; + case 1: + val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24; + break; + case 2: + val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; + break; + case 3: + val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; + addr = addr_add(env, addr, 2); + val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8; + break; + } + } + env->gpr[reg] = val; +} + +void helper_lsw(CPUPPCState *env, target_ulong addr, + uint32_t nb, uint32_t reg) +{ + do_lsw(env, addr, nb, reg, GETPC()); +} + +/* + * PPC32 specification says we must generate an exception if rA is in + * the range of registers to be loaded. In an other hand, IBM says + * this is valid, but rA won't be loaded. For now, I'll follow the + * spec... + */ +void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, + uint32_t ra, uint32_t rb) +{ + if (likely(xer_bc != 0)) { + int num_used_regs = DIV_ROUND_UP(xer_bc, 4); + if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) || + lsw_reg_in_range(reg, num_used_regs, rb))) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_LSWX, GETPC()); + } else { + do_lsw(env, addr, xer_bc, reg, GETPC()); + } + } +} + +void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, + uint32_t reg) +{ + uintptr_t raddr = GETPC(); + int mmu_idx; + char *host; + uint32_t val; + + if (unlikely(nb == 0)) { + return; + } + + mmu_idx = cpu_mmu_index(env, false); + host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr); + + if (likely(host)) { + /* Fast path -- the entire operation is in RAM at host. */ + for (; nb > 3; nb -= 4) { + stl_be_p(host, env->gpr[reg]); + reg = (reg + 1) % 32; + host += 4; + } + val = env->gpr[reg]; + switch (nb) { + case 1: + stb_p(host, val >> 24); + break; + case 2: + stw_be_p(host, val >> 16); + break; + case 3: + stw_be_p(host, val >> 16); + stb_p(host + 2, val >> 8); + break; + } + } else { + for (; nb > 3; nb -= 4) { + cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); + reg = (reg + 1) % 32; + addr = addr_add(env, addr, 4); + } + val = env->gpr[reg]; + switch (nb) { + case 1: + cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr); + break; + case 2: + cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); + break; + case 3: + cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); + addr = addr_add(env, addr, 2); + cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr); + break; + } + } +} + +static void dcbz_common(CPUPPCState *env, target_ulong addr, + uint32_t opcode, bool epid, uintptr_t retaddr) +{ + target_ulong mask, dcbz_size = env->dcache_line_size; + uint32_t i; + void *haddr; + int mmu_idx = epid ? PPC_TLB_EPID_STORE : env->dmmu_idx; + +#if defined(TARGET_PPC64) + /* Check for dcbz vs dcbzl on 970 */ + if (env->excp_model == POWERPC_EXCP_970 && + !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) { + dcbz_size = 32; + } +#endif + + /* Align address */ + mask = ~(dcbz_size - 1); + addr &= mask; + + /* Check reservation */ + if ((env->reserve_addr & mask) == addr) { +#ifdef _MSC_VER + env->reserve_addr = (target_ulong)(0ULL - 1ULL); +#else + env->reserve_addr = (target_ulong)-1ULL; +#endif + } + + /* Try fast path translate */ + haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr); + if (haddr) { + memset(haddr, 0, dcbz_size); + } else { + /* Slow path */ + for (i = 0; i < dcbz_size; i += 8) { + cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr); + } + } +} + +void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode) +{ + dcbz_common(env, addr, opcode, false, GETPC()); +} + +void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode) +{ + dcbz_common(env, addr, opcode, true, GETPC()); +} + +void helper_icbi(CPUPPCState *env, target_ulong addr) +{ + addr &= ~(env->dcache_line_size - 1); + /* + * Invalidate one cache line : + * PowerPC specification says this is to be treated like a load + * (not a fetch) by the MMU. To be sure it will be so, + * do the load "by hand". + */ + cpu_ldl_data_ra(env, addr, GETPC()); +} + +void helper_icbiep(CPUPPCState *env, target_ulong addr) +{ + /* See comments above */ + addr &= ~(env->dcache_line_size - 1); + cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC()); +} + +/* XXX: to be tested */ +target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, + uint32_t ra, uint32_t rb) +{ + int i, c, d; + + d = 24; + for (i = 0; i < xer_bc; i++) { + c = cpu_ldub_data_ra(env, addr, GETPC()); + addr = addr_add(env, addr, 1); + /* ra (if not 0) and rb are never modified */ + if (likely(reg != rb && (ra == 0 || reg != ra))) { + env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d); + } + if (unlikely(c == xer_cmp)) { + break; + } + if (likely(d != 0)) { + d -= 8; + } else { + d = 24; + reg++; + reg = reg & 0x1F; + } + } + return i; +} + +#ifdef TARGET_PPC64 +uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, + uint32_t opidx) +{ + Int128 ret; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); + env->retxh = int128_gethi(ret); + return int128_getlo(ret); +} + +uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, + uint32_t opidx) +{ + Int128 ret; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); + env->retxh = int128_gethi(ret); + return int128_getlo(ret); +} + +void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, + uint64_t lo, uint64_t hi, uint32_t opidx) +{ + Int128 val; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + val = int128_make128(lo, hi); + helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); +} + +void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, + uint64_t lo, uint64_t hi, uint32_t opidx) +{ + Int128 val; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + val = int128_make128(lo, hi); + helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); +} + +uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, + uint64_t new_lo, uint64_t new_hi, + uint32_t opidx) +{ + bool success = false; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_CMPXCHG128); + + if (likely(addr == env->reserve_addr)) { + Int128 oldv, cmpv, newv; + + cmpv = int128_make128(env->reserve_val2, env->reserve_val); + newv = int128_make128(new_lo, new_hi); + oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, + opidx, GETPC()); + success = int128_eq(oldv, cmpv); + } + env->reserve_addr = -1; + return env->so + success * CRF_EQ_BIT; +} + +uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, + uint64_t new_lo, uint64_t new_hi, + uint32_t opidx) +{ + bool success = false; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_CMPXCHG128); + + if (likely(addr == env->reserve_addr)) { + Int128 oldv, cmpv, newv; + + cmpv = int128_make128(env->reserve_val2, env->reserve_val); + newv = int128_make128(new_lo, new_hi); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, + opidx, GETPC()); + success = int128_eq(oldv, cmpv); + } + env->reserve_addr = -1; + return env->so + success * CRF_EQ_BIT; +} +#endif + +/*****************************************************************************/ +/* Altivec extension helpers */ +#if defined(HOST_WORDS_BIGENDIAN) +#define HI_IDX 0 +#define LO_IDX 1 +#else +#define HI_IDX 1 +#define LO_IDX 0 +#endif + +/* + * We use msr_le to determine index ordering in a vector. However, + * byteswapping is not simply controlled by msr_le. We also need to + * take into account endianness of the target. This is done for the + * little-endian PPC64 user-mode target. + */ + +#define LVE(name, access, swap, element) \ + void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ + target_ulong addr) \ + { \ + size_t n_elems = ARRAY_SIZE(r->element); \ + int adjust = HI_IDX * (n_elems - 1); \ + int sh = sizeof(r->element[0]) >> 1; \ + int index = (addr & 0xf) >> sh; \ + if (msr_le) { \ + index = n_elems - index - 1; \ + } \ + \ + if (needs_byteswap(env)) { \ + r->element[LO_IDX ? index : (adjust - index)] = \ + swap(access(env, addr, GETPC())); \ + } else { \ + r->element[LO_IDX ? index : (adjust - index)] = \ + access(env, addr, GETPC()); \ + } \ + } +#define I(x) (x) +LVE(lvebx, cpu_ldub_data_ra, I, u8) +LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) +LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) +#undef I +#undef LVE + +#define STVE(name, access, swap, element) \ + void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ + target_ulong addr) \ + { \ + size_t n_elems = ARRAY_SIZE(r->element); \ + int adjust = HI_IDX * (n_elems - 1); \ + int sh = sizeof(r->element[0]) >> 1; \ + int index = (addr & 0xf) >> sh; \ + if (msr_le) { \ + index = n_elems - index - 1; \ + } \ + \ + if (needs_byteswap(env)) { \ + access(env, addr, swap(r->element[LO_IDX ? index : \ + (adjust - index)]), \ + GETPC()); \ + } else { \ + access(env, addr, r->element[LO_IDX ? index : \ + (adjust - index)], GETPC()); \ + } \ + } +#define I(x) (x) +STVE(stvebx, cpu_stb_data_ra, I, u8) +STVE(stvehx, cpu_stw_data_ra, bswap16, u16) +STVE(stvewx, cpu_stl_data_ra, bswap32, u32) +#undef I +#undef LVE + +#ifdef TARGET_PPC64 +#define GET_NB(rb) ((rb >> 56) & 0xFF) + +#define VSX_LXVL(name, lj) \ +void helper_##name(CPUPPCState *env, target_ulong addr, \ + ppc_vsr_t *xt, target_ulong rb) \ +{ \ + ppc_vsr_t t; \ + uint64_t nb = GET_NB(rb); \ + int i; \ + \ + t.s128 = int128_zero(); \ + if (nb) { \ + nb = (nb >= 16) ? 16 : nb; \ + if (msr_le && !lj) { \ + for (i = 16; i > 16 - nb; i--) { \ + t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \ + addr = addr_add(env, addr, 1); \ + } \ + } else { \ + for (i = 0; i < nb; i++) { \ + t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \ + addr = addr_add(env, addr, 1); \ + } \ + } \ + } \ + *xt = t; \ +} + +VSX_LXVL(lxvl, 0) +VSX_LXVL(lxvll, 1) +#undef VSX_LXVL + +#define VSX_STXVL(name, lj) \ +void helper_##name(CPUPPCState *env, target_ulong addr, \ + ppc_vsr_t *xt, target_ulong rb) \ +{ \ + target_ulong nb = GET_NB(rb); \ + int i; \ + \ + if (!nb) { \ + return; \ + } \ + \ + nb = (nb >= 16) ? 16 : nb; \ + if (msr_le && !lj) { \ + for (i = 16; i > 16 - nb; i--) { \ + cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \ + addr = addr_add(env, addr, 1); \ + } \ + } else { \ + for (i = 0; i < nb; i++) { \ + cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \ + addr = addr_add(env, addr, 1); \ + } \ + } \ +} + +VSX_STXVL(stxvl, 0) +VSX_STXVL(stxvll, 1) +#undef VSX_STXVL +#undef GET_NB +#endif /* TARGET_PPC64 */ + +#undef HI_IDX +#undef LO_IDX + +void helper_tbegin(CPUPPCState *env) +{ + /* + * As a degenerate implementation, always fail tbegin. The reason + * given is "Nesting overflow". The "persistent" bit is set, + * providing a hint to the error handler to not retry. The TFIAR + * captures the address of the failure, which is this tbegin + * instruction. Instruction execution will continue with the next + * instruction in memory, which is precisely what we want. + */ + + env->spr[SPR_TEXASR] = + (1ULL << TEXASR_FAILURE_PERSISTENT) | + (1ULL << TEXASR_NESTING_OVERFLOW) | + (msr_hv << TEXASR_PRIVILEGE_HV) | + (msr_pr << TEXASR_PRIVILEGE_PR) | + (1ULL << TEXASR_FAILURE_SUMMARY) | + (1ULL << TEXASR_TFIAR_EXACT); + env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; + env->spr[SPR_TFHAR] = env->nip + 4; + env->crf[0] = 0xB; /* 0b1010 = transaction failure */ +} diff --git a/qemu/target/ppc/mfrom_table.inc.c b/qemu/target/ppc/mfrom_table.inc.c new file mode 100644 index 00000000..1653b974 --- /dev/null +++ b/qemu/target/ppc/mfrom_table.inc.c @@ -0,0 +1,78 @@ +static const uint8_t mfrom_ROM_table[602] = { + 77, 77, 76, 76, 75, 75, 74, 74, + 73, 73, 72, 72, 71, 71, 70, 70, + 69, 69, 68, 68, 68, 67, 67, 66, + 66, 65, 65, 64, 64, 64, 63, 63, + 62, 62, 61, 61, 61, 60, 60, 59, + 59, 58, 58, 58, 57, 57, 56, 56, + 56, 55, 55, 54, 54, 54, 53, 53, + 53, 52, 52, 51, 51, 51, 50, 50, + 50, 49, 49, 49, 48, 48, 47, 47, + 47, 46, 46, 46, 45, 45, 45, 44, + 44, 44, 43, 43, 43, 42, 42, 42, + 42, 41, 41, 41, 40, 40, 40, 39, + 39, 39, 39, 38, 38, 38, 37, 37, + 37, 37, 36, 36, 36, 35, 35, 35, + 35, 34, 34, 34, 34, 33, 33, 33, + 33, 32, 32, 32, 32, 31, 31, 31, + 31, 30, 30, 30, 30, 29, 29, 29, + 29, 28, 28, 28, 28, 28, 27, 27, + 27, 27, 26, 26, 26, 26, 26, 25, + 25, 25, 25, 25, 24, 24, 24, 24, + 24, 23, 23, 23, 23, 23, 23, 22, + 22, 22, 22, 22, 21, 21, 21, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 19, 19, 19, 19, 19, 19, 19, 18, + 18, 18, 18, 18, 18, 17, 17, 17, + 17, 17, 17, 17, 16, 16, 16, 16, + 16, 16, 16, 16, 15, 15, 15, 15, + 15, 15, 15, 15, 14, 14, 14, 14, + 14, 14, 14, 14, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, +}; diff --git a/qemu/target/ppc/mfrom_table_gen.c b/qemu/target/ppc/mfrom_table_gen.c new file mode 100644 index 00000000..f96c4268 --- /dev/null +++ b/qemu/target/ppc/mfrom_table_gen.c @@ -0,0 +1,34 @@ +#define _GNU_SOURCE +#include "qemu/osdep.h" +#include + +int main(void) +{ + double d; + uint8_t n; + int i; + + printf("static const uint8_t mfrom_ROM_table[602] =\n{\n "); + for (i = 0; i < 602; i++) { + /* + * Extremely decomposed: + * -T0 / 256 + * T0 = 256 * log10(10 + 1.0) + 0.5 + */ + d = -i; + d /= 256.0; + d = exp10(d); + d += 1.0; + d = log10(d); + d *= 256; + d += 0.5; + n = d; + printf("%3d, ", n); + if ((i & 7) == 7) { + printf("\n "); + } + } + printf("\n};\n"); + + return 0; +} diff --git a/qemu/target/ppc/misc_helper.c b/qemu/target/ppc/misc_helper.c new file mode 100644 index 00000000..3744d2cb --- /dev/null +++ b/qemu/target/ppc/misc_helper.c @@ -0,0 +1,308 @@ +/* + * Miscellaneous PowerPC emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + +#include "helper_regs.h" + +/*****************************************************************************/ +/* SPR accesses */ +void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn) +{ +#if 0 + qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn, + env->spr[sprn]); +#endif +} + +void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn) +{ +#if 0 + qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn, + env->spr[sprn]); +#endif +} + +#ifdef TARGET_PPC64 +static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit, + const char *caller, uint32_t cause, + uintptr_t raddr) +{ + qemu_log_mask(CPU_LOG_INT, "HV Facility %d is unavailable (%s)\n", + bit, caller); + + env->spr[SPR_HFSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS); + + raise_exception_err_ra(env, POWERPC_EXCP_HV_FU, cause, raddr); +} + +static void raise_fu_exception(CPUPPCState *env, uint32_t bit, + uint32_t sprn, uint32_t cause, + uintptr_t raddr) +{ + // qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit); + + env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS); + cause &= FSCR_IC_MASK; + env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS; + + raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr); +} +#endif + +void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit, + const char *caller, uint32_t cause) +{ +#ifdef TARGET_PPC64 + if ((env->msr_mask & MSR_HVB) && !msr_hv && + !(env->spr[SPR_HFSCR] & (1ULL << bit))) { + raise_hv_fu_exception(env, bit, caller, cause, GETPC()); + } +#endif +} + +void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit, + uint32_t sprn, uint32_t cause) +{ +#ifdef TARGET_PPC64 + if (env->spr[SPR_FSCR] & (1ULL << bit)) { + /* Facility is enabled, continue */ + return; + } + raise_fu_exception(env, bit, sprn, cause, GETPC()); +#endif +} + +void helper_msr_facility_check(CPUPPCState *env, uint32_t bit, + uint32_t sprn, uint32_t cause) +{ +#ifdef TARGET_PPC64 + if (env->msr & (1ULL << bit)) { + /* Facility is enabled, continue */ + return; + } + raise_fu_exception(env, bit, sprn, cause, GETPC()); +#endif +} + +void helper_store_sdr1(CPUPPCState *env, target_ulong val) +{ + if (env->spr[SPR_SDR1] != val) { + ppc_store_sdr1(env, val); + tlb_flush(env_cpu(env)); + } +} + +#if defined(TARGET_PPC64) +void helper_store_ptcr(CPUPPCState *env, target_ulong val) +{ + if (env->spr[SPR_PTCR] != val) { + ppc_store_ptcr(env, val); + tlb_flush(env_cpu(env)); + } +} + +void helper_store_pcr(CPUPPCState *env, target_ulong value) +{ + PowerPCCPU *cpu = env_archcpu(env); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + env->spr[SPR_PCR] = value & pcc->pcr_mask; +} + +/* + * DPDES register is shared. Each bit reflects the state of the + * doorbell interrupt of a thread of the same core. + */ +target_ulong helper_load_dpdes(CPUPPCState *env) +{ + target_ulong dpdes = 0; + + helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP); + + /* TODO: TCG supports only one thread */ + if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { + dpdes = 1; + } + + return dpdes; +} + +void helper_store_dpdes(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + + helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP); + + /* TODO: TCG supports only one thread */ + if (val & ~0x1) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid DPDES register value " + TARGET_FMT_lx"\n", val); + return; + } + + if (val & 0x1) { + env->pending_interrupts |= 1 << PPC_INTERRUPT_DOORBELL; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); + } +} +#endif /* defined(TARGET_PPC64) */ + +void helper_store_pidr(CPUPPCState *env, target_ulong val) +{ + env->spr[SPR_BOOKS_PID] = val; + tlb_flush(env_cpu(env)); +} + +void helper_store_lpidr(CPUPPCState *env, target_ulong val) +{ + env->spr[SPR_LPIDR] = val; + + /* + * We need to flush the TLB on LPID changes as we only tag HV vs + * guest in TCG TLB. Also the quadrants means the HV will + * potentially access and cache entries for the current LPID as + * well. + */ + tlb_flush(env_cpu(env)); +} + +void helper_store_hid0_601(CPUPPCState *env, target_ulong val) +{ + target_ulong hid0; + + hid0 = env->spr[SPR_HID0]; + if ((val ^ hid0) & 0x00000008) { + /* Change current endianness */ + env->hflags &= ~(1 << MSR_LE); + env->hflags_nmsr &= ~(1 << MSR_LE); + env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE); + env->hflags |= env->hflags_nmsr; + // qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__, + // val & 0x8 ? 'l' : 'b', env->hflags); + } + env->spr[SPR_HID0] = (uint32_t)val; +} + +void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value) +{ + if (likely(env->pb[num] != value)) { + env->pb[num] = value; + /* Should be optimized */ + tlb_flush(env_cpu(env)); + } +} + +void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val) +{ + store_40x_dbcr0(env, val); +} + +void helper_store_40x_sler(CPUPPCState *env, target_ulong val) +{ + store_40x_sler(env, val); +} + +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ + +target_ulong helper_clcs(CPUPPCState *env, uint32_t arg) +{ + switch (arg) { + case 0x0CUL: + /* Instruction cache line size */ + return env->icache_line_size; + break; + case 0x0DUL: + /* Data cache line size */ + return env->dcache_line_size; + break; + case 0x0EUL: + /* Minimum cache line size */ + return (env->icache_line_size < env->dcache_line_size) ? + env->icache_line_size : env->dcache_line_size; + break; + case 0x0FUL: + /* Maximum cache line size */ + return (env->icache_line_size > env->dcache_line_size) ? + env->icache_line_size : env->dcache_line_size; + break; + default: + /* Undefined */ + return 0; + break; + } +} + +/*****************************************************************************/ +/* Special registers manipulation */ + +/* GDBstub can read and write MSR... */ +void ppc_store_msr(CPUPPCState *env, target_ulong value) +{ + hreg_store_msr(env, value, 0); +} + +/* + * This code is lifted from MacOnLinux. It is called whenever THRM1,2 + * or 3 is read an fixes up the values in such a way that will make + * MacOS not hang. These registers exist on some 75x and 74xx + * processors. + */ +void helper_fixup_thrm(CPUPPCState *env) +{ + target_ulong v, t; + int i; + +#define THRM1_TIN (1 << 31) +#define THRM1_TIV (1 << 30) +#define THRM1_THRES(x) (((x) & 0x7f) << 23) +#define THRM1_TID (1 << 2) +#define THRM1_TIE (1 << 1) +#define THRM1_V (1 << 0) +#define THRM3_E (1 << 0) + + if (!(env->spr[SPR_THRM3] & THRM3_E)) { + return; + } + + /* Note: Thermal interrupts are unimplemented */ + for (i = SPR_THRM1; i <= SPR_THRM2; i++) { + v = env->spr[i]; + if (!(v & THRM1_V)) { + continue; + } + v |= THRM1_TIV; + v &= ~THRM1_TIN; + t = v & THRM1_THRES(127); + if ((v & THRM1_TID) && t < THRM1_THRES(24)) { + v |= THRM1_TIN; + } + if (!(v & THRM1_TID) && t > THRM1_THRES(24)) { + v |= THRM1_TIN; + } + env->spr[i] = v; + } +} diff --git a/qemu/target/ppc/mmu-book3s-v3.c b/qemu/target/ppc/mmu-book3s-v3.c new file mode 100644 index 00000000..6daace65 --- /dev/null +++ b/qemu/target/ppc/mmu-book3s-v3.c @@ -0,0 +1,66 @@ +/* + * PowerPC ISAV3 BookS emulation generic mmu helpers for qemu. + * + * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "mmu-hash64.h" +#include "mmu-book3s-v3.h" +#include "mmu-radix64.h" + +int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx) +{ + if (ppc64_v3_radix(cpu)) { /* Guest uses radix */ + return ppc_radix64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); + } else { /* Guest uses hash */ + return ppc_hash64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); + } +} + +hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr) +{ + if (ppc64_v3_radix(cpu)) { + return ppc_radix64_get_phys_page_debug(cpu, eaddr); + } else { + return ppc_hash64_get_phys_page_debug(cpu, eaddr); + } +} + +bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry) +{ + uint64_t patb = cpu->env.spr[SPR_PTCR] & PTCR_PATB; + uint64_t pats = cpu->env.spr[SPR_PTCR] & PTCR_PATS; + + /* Calculate number of entries */ + pats = 1ull << (pats + 12 - 4); + if (pats <= lpid) { + return false; + } + + /* Grab entry */ + patb += 16 * lpid; +#ifdef UNICORN_ARCH_POSTFIX + entry->dw0 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, patb); + entry->dw1 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, patb + 8); +#else + entry->dw0 = ldq_phys(cpu->env.uc, CPU(cpu)->as, patb); + entry->dw1 = ldq_phys(cpu->env.uc, CPU(cpu)->as, patb + 8); +#endif + return true; +} diff --git a/qemu/target/ppc/mmu-book3s-v3.h b/qemu/target/ppc/mmu-book3s-v3.h new file mode 100644 index 00000000..ca7f80ca --- /dev/null +++ b/qemu/target/ppc/mmu-book3s-v3.h @@ -0,0 +1,121 @@ +/* + * PowerPC ISAV3 BookS emulation generic mmu definitions for qemu. + * + * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef PPC_MMU_BOOK3S_V3_H +#define PPC_MMU_BOOK3S_V3_H + +#include "mmu-hash64.h" + +/* + * Partition table definitions + */ +#define PTCR_PATB 0x0FFFFFFFFFFFF000ULL /* Partition Table Base */ +#define PTCR_PATS 0x000000000000001FULL /* Partition Table Size */ + +/* Partition Table Entry Fields */ +#define PATE0_HR 0x8000000000000000 + +/* + * WARNING: This field doesn't actually exist in the final version of + * the architecture and is unused by hardware. However, qemu uses it + * as an indication of a radix guest in the pseudo-PATB entry that it + * maintains for SPAPR guests and in the migration stream, so we need + * to keep it around + */ +#define PATE1_GR 0x8000000000000000 + +/* Process Table Entry */ +struct prtb_entry { + uint64_t prtbe0, prtbe1; +}; + +#ifdef TARGET_PPC64 + +static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) +{ + return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT); +} + +bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, + ppc_v3_pate_t *entry); + +/* + * The LPCR:HR bit is a shortcut that avoids having to + * dig out the partition table in the fast path. This is + * also how the HW uses it. + */ +static inline bool ppc64_v3_radix(PowerPCCPU *cpu) +{ + return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR); +} + +hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr); + +int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx); + +static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu) +{ + uint64_t base; + +#if 0 + if (cpu->vhyp) { + return 0; + } +#endif + if (cpu->env.mmu_model == POWERPC_MMU_3_00) { + ppc_v3_pate_t pate; + + if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { + return 0; + } + base = pate.dw0; + } else { + base = cpu->env.spr[SPR_SDR1]; + } + return base & SDR_64_HTABORG; +} + +static inline hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu) +{ + uint64_t base; + +#if 0 + if (cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + return vhc->hpt_mask(cpu->vhyp); + } +#endif + if (cpu->env.mmu_model == POWERPC_MMU_3_00) { + ppc_v3_pate_t pate; + + if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) { + return 0; + } + base = pate.dw0; + } else { + base = cpu->env.spr[SPR_SDR1]; + } + return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1; +} + +#endif /* TARGET_PPC64 */ + +#endif /* PPC_MMU_BOOK3S_V3_H */ diff --git a/qemu/target/ppc/mmu-hash32.c b/qemu/target/ppc/mmu-hash32.c new file mode 100644 index 00000000..a757df2c --- /dev/null +++ b/qemu/target/ppc/mmu-hash32.c @@ -0,0 +1,606 @@ +/* + * PowerPC MMU, TLB and BAT emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright (c) 2013 David Gibson, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "mmu-hash32.h" + +/* #define DEBUG_BAT */ + +#ifdef DEBUG_BATS +# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) +#else +# define LOG_BATS(...) do { } while (0) +#endif + +struct mmu_ctx_hash32 { + hwaddr raddr; /* Real address */ + int prot; /* Protection bits */ + int key; /* Access key */ +}; + +static int ppc_hash32_pp_prot(int key, int pp, int nx) +{ + int prot; + + if (key == 0) { + switch (pp) { + case 0x0: + case 0x1: + case 0x2: + prot = PAGE_READ | PAGE_WRITE; + break; + + case 0x3: + prot = PAGE_READ; + break; + + default: + abort(); + } + } else { + switch (pp) { + case 0x0: + prot = 0; + break; + + case 0x1: + case 0x3: + prot = PAGE_READ; + break; + + case 0x2: + prot = PAGE_READ | PAGE_WRITE; + break; + + default: + abort(); + } + } + if (nx == 0) { + prot |= PAGE_EXEC; + } + + return prot; +} + +static int ppc_hash32_pte_prot(PowerPCCPU *cpu, + target_ulong sr, ppc_hash_pte32_t pte) +{ + CPUPPCState *env = &cpu->env; + unsigned pp, key; + + key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); + pp = pte.pte1 & HPTE32_R_PP; + + return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX)); +} + +static target_ulong hash32_bat_size(PowerPCCPU *cpu, + target_ulong batu, target_ulong batl) +{ + CPUPPCState *env = &cpu->env; + + if ((msr_pr && !(batu & BATU32_VP)) + || (!msr_pr && !(batu & BATU32_VS))) { + return 0; + } + + return BATU32_BEPI & ~((batu & BATU32_BL) << 15); +} + +static int hash32_bat_prot(PowerPCCPU *cpu, + target_ulong batu, target_ulong batl) +{ + int pp, prot; + + prot = 0; + pp = batl & BATL32_PP; + if (pp != 0) { + prot = PAGE_READ | PAGE_EXEC; + if (pp == 0x2) { + prot |= PAGE_WRITE; + } + } + return prot; +} + +static target_ulong hash32_bat_601_size(PowerPCCPU *cpu, + target_ulong batu, target_ulong batl) +{ + if (!(batl & BATL32_601_V)) { + return 0; + } + + return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17); +} + +static int hash32_bat_601_prot(PowerPCCPU *cpu, + target_ulong batu, target_ulong batl) +{ + CPUPPCState *env = &cpu->env; + int key, pp; + + pp = batu & BATU32_601_PP; + if (msr_pr == 0) { + key = !!(batu & BATU32_601_KS); + } else { + key = !!(batu & BATU32_601_KP); + } + return ppc_hash32_pp_prot(key, pp, 0); +} + +static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx, + int *prot) +{ + CPUPPCState *env = &cpu->env; + target_ulong *BATlt, *BATut; + int i; + + LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, + rwx == 2 ? 'I' : 'D', ea); + if (rwx == 2) { + BATlt = env->IBAT[1]; + BATut = env->IBAT[0]; + } else { + BATlt = env->DBAT[1]; + BATut = env->DBAT[0]; + } + for (i = 0; i < env->nb_BATs; i++) { + target_ulong batu = BATut[i]; + target_ulong batl = BATlt[i]; + target_ulong mask; + + if (unlikely(env->mmu_model == POWERPC_MMU_601)) { + mask = hash32_bat_601_size(cpu, batu, batl); + } else { + mask = hash32_bat_size(cpu, batu, batl); + } + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n", __func__, + type == ACCESS_CODE ? 'I' : 'D', i, ea, batu, batl); + + if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { + hwaddr raddr = (batl & mask) | (ea & ~mask); + + if (unlikely(env->mmu_model == POWERPC_MMU_601)) { + *prot = hash32_bat_601_prot(cpu, batu, batl); + } else { + *prot = hash32_bat_prot(cpu, batu, batl); + } + + return raddr & TARGET_PAGE_MASK; + } + } + + /* No hit */ +#if defined(DEBUG_BATS) + if (qemu_log_enabled()) { + LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea); + for (i = 0; i < 4; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & BATU32_BEPIU; + BEPIl = *BATu & BATU32_BEPIL; + bl = (*BATu & 0x00001FFC) << 15; + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " + TARGET_FMT_lx " " TARGET_FMT_lx "\n", + __func__, type == ACCESS_CODE ? 'I' : 'D', i, ea, + *BATu, *BATl, BEPIu, BEPIl, bl); + } + } +#endif + + return -1; +} + +static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, + target_ulong eaddr, int rwx, + hwaddr *raddr, int *prot) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); + + qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); + + if ((sr & 0x1FF00000) >> 20 == 0x07f) { + /* + * Memory-forced I/O controller interface access + * + * If T=1 and BUID=x'07F', the 601 performs a memory access + * to SR[28-31] LA[4-31], bypassing all protection mechanisms. + */ + *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return 0; + } + + if (rwx == 2) { + /* No code fetch is allowed in direct-store areas */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + return 1; + } + + switch (env->access_type) { + case ACCESS_INT: + /* Integer load/store : only access allowed */ + break; + case ACCESS_FLOAT: + /* Floating point load/store */ + cs->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = POWERPC_EXCP_ALIGN_FP; + env->spr[SPR_DAR] = eaddr; + return 1; + case ACCESS_RES: + /* lwarx, ldarx or srwcx. */ + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (rwx == 1) { + env->spr[SPR_DSISR] = 0x06000000; + } else { + env->spr[SPR_DSISR] = 0x04000000; + } + return 1; + case ACCESS_CACHE: + /* + * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi + * + * Should make the instruction do no-op. As it already do + * no-op, it's quite easy :-) + */ + *raddr = eaddr; + return 0; + case ACCESS_EXT: + /* eciwx or ecowx */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (rwx == 1) { + env->spr[SPR_DSISR] = 0x06100000; + } else { + env->spr[SPR_DSISR] = 0x04100000; + } + return 1; + default: + cpu_abort(cs, "ERROR: instruction should not need " + "address translation\n"); + } + if ((rwx == 1 || key != 1) && (rwx == 0 || key != 0)) { + *raddr = eaddr; + return 0; + } else { + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (rwx == 1) { + env->spr[SPR_DSISR] = 0x0a000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + return 1; + } +} + +hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash) +{ + target_ulong mask = ppc_hash32_hpt_mask(cpu); + + return (hash * HASH_PTEG_SIZE_32) & mask; +} + +static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off, + bool secondary, target_ulong ptem, + ppc_hash_pte32_t *pte) +{ + hwaddr pte_offset = pteg_off; + target_ulong pte0, pte1; + int i; + + for (i = 0; i < HPTES_PER_GROUP; i++) { + pte0 = ppc_hash32_load_hpte0(cpu, pte_offset); + /* + * pte0 contains the valid bit and must be read before pte1, + * otherwise we might see an old pte1 with a new valid bit and + * thus an inconsistent hpte value + */ + smp_rmb(); + pte1 = ppc_hash32_load_hpte1(cpu, pte_offset); + + if ((pte0 & HPTE32_V_VALID) + && (secondary == !!(pte0 & HPTE32_V_SECONDARY)) + && HPTE32_V_COMPARE(pte0, ptem)) { + pte->pte0 = pte0; + pte->pte1 = pte1; + return pte_offset; + } + + pte_offset += HASH_PTE_SIZE_32; + } + + return -1; +} + +static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1) +{ + target_ulong base = ppc_hash32_hpt_base(cpu); + hwaddr offset = pte_offset + 6; + + /* The HW performs a non-atomic byte update */ +#ifdef UNICORN_ARCH_POSTFIX + glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); +#else + stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); +#endif +} + +static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1) +{ + target_ulong base = ppc_hash32_hpt_base(cpu); + hwaddr offset = pte_offset + 7; + + /* The HW performs a non-atomic byte update */ +#ifdef UNICORN_ARCH_POSTFIX + glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); +#else + stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); +#endif +} + +static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu, + target_ulong sr, target_ulong eaddr, + ppc_hash_pte32_t *pte) +{ + hwaddr pteg_off, pte_offset; + hwaddr hash; + uint32_t vsid, pgidx, ptem; + + vsid = sr & SR32_VSID; + pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS; + hash = vsid ^ pgidx; + ptem = (vsid << 7) | (pgidx >> 10); + +#if 0 + /* Page address translation */ + qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx + " htab_mask " TARGET_FMT_plx + " hash " TARGET_FMT_plx "\n", + ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); + + /* Primary PTEG lookup */ + qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=%" PRIx32 " ptem=%" PRIx32 + " hash=" TARGET_FMT_plx "\n", + ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), + vsid, ptem, hash); +#endif + pteg_off = get_pteg_offset32(cpu, hash); + pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte); + if (pte_offset == -1) { + /* Secondary PTEG lookup */ +#if 0 + qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=%" PRIx32 " api=%" PRIx32 + " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), + ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash); +#endif + pteg_off = get_pteg_offset32(cpu, ~hash); + pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte); + } + + return pte_offset; +} + +static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte, + target_ulong eaddr) +{ + hwaddr rpn = pte.pte1 & HPTE32_R_RPN; + hwaddr mask = ~TARGET_PAGE_MASK; + + return (rpn & ~mask) | (eaddr & mask); +} + +int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + target_ulong sr; + hwaddr pte_offset; + ppc_hash_pte32_t pte; + int prot; + const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; + hwaddr raddr; + + assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + + /* 1. Handle real mode accesses */ + if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { + /* Translation is off */ + raddr = eaddr; + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, + TARGET_PAGE_SIZE); + return 0; + } + + /* 2. Check Block Address Translation entries (BATs) */ + if (env->nb_BATs != 0) { + raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot); + if (raddr != -1) { + if (need_prot[rwx] & ~prot) { + if (rwx == 2) { + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x08000000; + } else { + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (rwx == 1) { + env->spr[SPR_DSISR] = 0x0a000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + } + return 1; + } + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, + raddr & TARGET_PAGE_MASK, prot, mmu_idx, + TARGET_PAGE_SIZE); + return 0; + } + } + + /* 3. Look up the Segment Register */ + sr = env->sr[eaddr >> 28]; + + /* 4. Handle direct store segments */ + if (sr & SR32_T) { + if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx, + &raddr, &prot) == 0) { + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, + raddr & TARGET_PAGE_MASK, prot, mmu_idx, + TARGET_PAGE_SIZE); + return 0; + } else { + return 1; + } + } + + /* 5. Check for segment level no-execute violation */ + if ((rwx == 2) && (sr & SR32_NX)) { + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + return 1; + } + + /* 6. Locate the PTE in the hash table */ + pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); + if (pte_offset == -1) { + if (rwx == 2) { + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x40000000; + } else { + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (rwx == 1) { + env->spr[SPR_DSISR] = 0x42000000; + } else { + env->spr[SPR_DSISR] = 0x40000000; + } + } + + return 1; + } + qemu_log_mask(CPU_LOG_MMU, + "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); + + /* 7. Check access permissions */ + + prot = ppc_hash32_pte_prot(cpu, sr, pte); + + if (need_prot[rwx] & ~prot) { + /* Access right violation */ + qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); + if (rwx == 2) { + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x08000000; + } else { + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (rwx == 1) { + env->spr[SPR_DSISR] = 0x0a000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + } + return 1; + } + + qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); + + /* 8. Update PTE referenced and changed bits if necessary */ + + if (!(pte.pte1 & HPTE32_R_R)) { + ppc_hash32_set_r(cpu, pte_offset, pte.pte1); + } + if (!(pte.pte1 & HPTE32_R_C)) { + if (rwx == 1) { + ppc_hash32_set_c(cpu, pte_offset, pte.pte1); + } else { + /* + * Treat the page as read-only for now, so that a later write + * will pass through this function again to set the C bit + */ + prot &= ~PAGE_WRITE; + } + } + + /* 9. Determine the real address from the PTE */ + + raddr = ppc_hash32_pte_raddr(sr, pte, eaddr); + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + prot, mmu_idx, TARGET_PAGE_SIZE); + + return 0; +} + +hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) +{ + CPUPPCState *env = &cpu->env; + target_ulong sr; + hwaddr pte_offset; + ppc_hash_pte32_t pte; + int prot; + + if (msr_dr == 0) { + /* Translation is off */ + return eaddr; + } + + if (env->nb_BATs != 0) { + hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot); + if (raddr != -1) { + return raddr; + } + } + + sr = env->sr[eaddr >> 28]; + + if (sr & SR32_T) { + /* FIXME: Add suitable debug support for Direct Store segments */ + return -1; + } + + pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); + if (pte_offset == -1) { + return -1; + } + + return ppc_hash32_pte_raddr(sr, pte, eaddr) & TARGET_PAGE_MASK; +} diff --git a/qemu/target/ppc/mmu-hash32.h b/qemu/target/ppc/mmu-hash32.h new file mode 100644 index 00000000..9b59eed4 --- /dev/null +++ b/qemu/target/ppc/mmu-hash32.h @@ -0,0 +1,130 @@ +#ifndef MMU_HASH32_H +#define MMU_HASH32_H + +hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash); +hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr); +int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw, + int mmu_idx); + +/* + * Segment register definitions + */ + +#define SR32_T 0x80000000 +#define SR32_KS 0x40000000 +#define SR32_KP 0x20000000 +#define SR32_NX 0x10000000 +#define SR32_VSID 0x00ffffff + +/* + * Block Address Translation (BAT) definitions + */ + +#define BATU32_BEPI 0xfffe0000 +#define BATU32_BL 0x00001ffc +#define BATU32_VS 0x00000002 +#define BATU32_VP 0x00000001 + + +#define BATL32_BRPN 0xfffe0000 +#define BATL32_WIMG 0x00000078 +#define BATL32_PP 0x00000003 + +/* PowerPC 601 has slightly different BAT registers */ + +#define BATU32_601_KS 0x00000008 +#define BATU32_601_KP 0x00000004 +#define BATU32_601_PP 0x00000003 + +#define BATL32_601_V 0x00000040 +#define BATL32_601_BL 0x0000003f + +/* + * Hash page table definitions + */ +#define SDR_32_HTABORG 0xFFFF0000UL +#define SDR_32_HTABMASK 0x000001FFUL + +#define HPTES_PER_GROUP 8 +#define HASH_PTE_SIZE_32 8 +#define HASH_PTEG_SIZE_32 (HASH_PTE_SIZE_32 * HPTES_PER_GROUP) + +#define HPTE32_V_VALID 0x80000000 +#define HPTE32_V_VSID 0x7fffff80 +#define HPTE32_V_SECONDARY 0x00000040 +#define HPTE32_V_API 0x0000003f +#define HPTE32_V_COMPARE(x, y) (!(((x) ^ (y)) & 0x7fffffbf)) + +#define HPTE32_R_RPN 0xfffff000 +#define HPTE32_R_R 0x00000100 +#define HPTE32_R_C 0x00000080 +#define HPTE32_R_W 0x00000040 +#define HPTE32_R_I 0x00000020 +#define HPTE32_R_M 0x00000010 +#define HPTE32_R_G 0x00000008 +#define HPTE32_R_WIMG 0x00000078 +#define HPTE32_R_PP 0x00000003 + +static inline hwaddr ppc_hash32_hpt_base(PowerPCCPU *cpu) +{ + return cpu->env.spr[SPR_SDR1] & SDR_32_HTABORG; +} + +static inline hwaddr ppc_hash32_hpt_mask(PowerPCCPU *cpu) +{ + return ((cpu->env.spr[SPR_SDR1] & SDR_32_HTABMASK) << 16) | 0xFFFF; +} + +static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu, + hwaddr pte_offset) +{ + target_ulong base = ppc_hash32_hpt_base(cpu); + +#ifdef UNICORN_ARCH_POSTFIX + return glue(ldl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset); +#else + return ldl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset); +#endif +} + +static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu, + hwaddr pte_offset) +{ + target_ulong base = ppc_hash32_hpt_base(cpu); + +#ifdef UNICORN_ARCH_POSTFIX + return glue(ldl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2); +#else + return ldl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2); +#endif +} + +static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu, + hwaddr pte_offset, target_ulong pte0) +{ + target_ulong base = ppc_hash32_hpt_base(cpu); + +#ifdef UNICORN_ARCH_POSTFIX + glue(stl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset, pte0); +#else + stl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset, pte0); +#endif +} + +static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu, + hwaddr pte_offset, target_ulong pte1) +{ + target_ulong base = ppc_hash32_hpt_base(cpu); + +#ifdef UNICORN_ARCH_POSTFIX + glue(stl_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1); +#else + stl_phys(cpu->env.uc, CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1); +#endif +} + +typedef struct { + uint32_t pte0, pte1; +} ppc_hash_pte32_t; + +#endif /* MMU_HASH32_H */ diff --git a/qemu/target/ppc/mmu-hash64.c b/qemu/target/ppc/mmu-hash64.c new file mode 100644 index 00000000..78e5540e --- /dev/null +++ b/qemu/target/ppc/mmu-hash64.c @@ -0,0 +1,1289 @@ +/* + * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright (c) 2013 David Gibson, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "mmu-hash64.h" +#include "mmu-book3s-v3.h" + +/* #define DEBUG_SLB */ + +#ifdef DEBUG_SLB +# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) +#else +# define LOG_SLB(...) do { } while (0) +#endif + +/* + * SLB handling + */ + +static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) +{ + CPUPPCState *env = &cpu->env; + uint64_t esid_256M, esid_1T; + int n; + + LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); + + esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; + esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; + + for (n = 0; n < cpu->hash64_opts->slb_size; n++) { + ppc_slb_t *slb = &env->slb[n]; + + LOG_SLB("%s: slot %d %016" PRIx64 " %016" + PRIx64 "\n", __func__, n, slb->esid, slb->vsid); + /* + * We check for 1T matches on all MMUs here - if the MMU + * doesn't have 1T segment support, we will have prevented 1T + * entries from being inserted in the slbmte code. + */ + if (((slb->esid == esid_256M) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) + || ((slb->esid == esid_1T) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { + return slb; + } + } + + return NULL; +} + +void dump_slb(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + int i; + uint64_t slbe, slbv; + +#if 0 + cpu_synchronize_state(CPU(cpu)); + + qemu_printf("SLB\tESID\t\t\tVSID\n"); +#endif + for (i = 0; i < cpu->hash64_opts->slb_size; i++) { + slbe = env->slb[i].esid; + slbv = env->slb[i].vsid; + if (slbe == 0 && slbv == 0) { + continue; + } +#if 0 + qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", + i, slbe, slbv); +#endif + } +} + +void helper_slbia(CPUPPCState *env, uint32_t ih) +{ + PowerPCCPU *cpu = env_archcpu(env); + int starting_entry; + int n; + + /* + * slbia must always flush all TLB (which is equivalent to ERAT in ppc + * architecture). Matching on SLB_ESID_V is not good enough, because slbmte + * can overwrite a valid SLB without flushing its lookaside information. + * + * It would be possible to keep the TLB in synch with the SLB by flushing + * when a valid entry is overwritten by slbmte, and therefore slbia would + * not have to flush unless it evicts a valid SLB entry. However it is + * expected that slbmte is more common than slbia, and slbia is usually + * going to evict valid SLB entries, so that tradeoff is unlikely to be a + * good one. + * + * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate + * the same SLB entries (everything but entry 0), but differ in what + * "lookaside information" is invalidated. TCG can ignore this and flush + * everything. + * + * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are + * invalidated. + */ + + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + + starting_entry = 1; /* default for IH=0,1,2,6 */ + + if (env->mmu_model == POWERPC_MMU_3_00) { + switch (ih) { + case 0x7: + /* invalidate no SLBs, but all lookaside information */ + return; + + case 0x3: + case 0x4: + /* also considers SLB entry 0 */ + starting_entry = 0; + break; + + case 0x5: + /* treat undefined values as ih==0, and warn */ + qemu_log_mask(LOG_GUEST_ERROR, + "slbia undefined IH field %u.\n", ih); + break; + + default: + /* 0,1,2,6 */ + break; + } + } + + for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) { + ppc_slb_t *slb = &env->slb[n]; + + if (!(slb->esid & SLB_ESID_V)) { + continue; + } + if (env->mmu_model == POWERPC_MMU_3_00) { + if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) { + /* preserves entries with a class value of 0 */ + continue; + } + } + + slb->esid &= ~SLB_ESID_V; + } +} + +static void __helper_slbie(CPUPPCState *env, target_ulong addr, + target_ulong global) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_slb_t *slb; + + slb = slb_lookup(cpu, addr); + if (!slb) { + return; + } + + if (slb->esid & SLB_ESID_V) { + slb->esid &= ~SLB_ESID_V; + + /* + * XXX: given the fact that segment size is 256 MB or 1TB, + * and we still don't have a tlb_flush_mask(env, n, mask) + * in QEMU, we just invalidate all TLBs + */ + env->tlb_need_flush |= + (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH); + } +} + +void helper_slbie(CPUPPCState *env, target_ulong addr) +{ + __helper_slbie(env, addr, false); +} + +void helper_slbieg(CPUPPCState *env, target_ulong addr) +{ + __helper_slbie(env, addr, true); +} + +int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, + target_ulong esid, target_ulong vsid) +{ + CPUPPCState *env = &cpu->env; + ppc_slb_t *slb = &env->slb[slot]; + const PPCHash64SegmentPageSizes *sps = NULL; + int i; + + if (slot >= cpu->hash64_opts->slb_size) { + return -1; /* Bad slot number */ + } + if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { + return -1; /* Reserved bits set */ + } + if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { + return -1; /* Bad segment size */ + } + if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) { + return -1; /* 1T segment on MMU that doesn't support it */ + } + + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i]; + + if (!sps1->page_shift) { + break; + } + + if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { + sps = sps1; + break; + } + } + + if (!sps) { +#if 0 + error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu + " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, + slot, esid, vsid); +#endif + return -1; + } + + slb->esid = esid; + slb->vsid = vsid; + slb->sps = sps; + +#if 0 + LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx + " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid, + slb->esid, slb->vsid); +#endif + + return 0; +} + +static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, + target_ulong *rt) +{ + CPUPPCState *env = &cpu->env; + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (slot >= cpu->hash64_opts->slb_size) { + return -1; + } + + *rt = slb->esid; + return 0; +} + +static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, + target_ulong *rt) +{ + CPUPPCState *env = &cpu->env; + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (slot >= cpu->hash64_opts->slb_size) { + return -1; + } + + *rt = slb->vsid; + return 0; +} + +static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, + target_ulong *rt) +{ + CPUPPCState *env = &cpu->env; + ppc_slb_t *slb; + + if (!msr_is_64bit(env, env->msr)) { + rb &= 0xffffffff; + } + slb = slb_lookup(cpu, rb); + if (slb == NULL) { +#ifdef _MSC_VER + *rt = (target_ulong)(0UL - 1UL); +#else + *rt = (target_ulong)-1ul; +#endif + } else { + *rt = slb->vsid; + } + return 0; +} + +void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) +{ + PowerPCCPU *cpu = env_archcpu(env); + + if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL, GETPC()); + } +} + +target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) +{ + PowerPCCPU *cpu = env_archcpu(env); + target_ulong rt = 0; + + if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL, GETPC()); + } + return rt; +} + +target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) +{ + PowerPCCPU *cpu = env_archcpu(env); + target_ulong rt = 0; + + if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL, GETPC()); + } + return rt; +} + +target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) +{ + PowerPCCPU *cpu = env_archcpu(env); + target_ulong rt = 0; + + if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL, GETPC()); + } + return rt; +} + +/* Check No-Execute or Guarded Storage */ +static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu, + ppc_hash_pte64_t pte) +{ + /* Exec permissions CANNOT take away read or write permissions */ + return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ? + PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC; +} + +/* Check Basic Storage Protection */ +static int ppc_hash64_pte_prot(PowerPCCPU *cpu, + ppc_slb_t *slb, ppc_hash_pte64_t pte) +{ + CPUPPCState *env = &cpu->env; + unsigned pp, key; + /* + * Some pp bit combinations have undefined behaviour, so default + * to no access in those cases + */ + int prot = 0; + + key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) + : (slb->vsid & SLB_VSID_KS)); + pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); + + if (key == 0) { + switch (pp) { + case 0x0: + case 0x1: + case 0x2: + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + break; + + case 0x3: + case 0x6: + prot = PAGE_READ | PAGE_EXEC; + break; + } + } else { + switch (pp) { + case 0x0: + case 0x6: + break; + + case 0x1: + case 0x3: + prot = PAGE_READ | PAGE_EXEC; + break; + + case 0x2: + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + break; + } + } + + return prot; +} + +/* Check the instruction access permissions specified in the IAMR */ +static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key) +{ + CPUPPCState *env = &cpu->env; + int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3; + + /* + * An instruction fetch is permitted if the IAMR bit is 0. + * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit + * can only take away EXEC permissions not READ or WRITE permissions. + * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since + * EXEC permissions are allowed. + */ + return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE : + PAGE_READ | PAGE_WRITE | PAGE_EXEC; +} + +static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) +{ + CPUPPCState *env = &cpu->env; + int key, amrbits; + int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + /* Only recent MMUs implement Virtual Page Class Key Protection */ + if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) { + return prot; + } + + key = HPTE64_R_KEY(pte.pte1); + amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; + + /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ + /* env->spr[SPR_AMR]); */ + + /* + * A store is permitted if the AMR bit is 0. Remove write + * protection if it is set. + */ + if (amrbits & 0x2) { + prot &= ~PAGE_WRITE; + } + /* + * A load is permitted if the AMR bit is 0. Remove read + * protection if it is set. + */ + if (amrbits & 0x1) { + prot &= ~PAGE_READ; + } + + switch (env->mmu_model) { + /* + * MMU version 2.07 and later support IAMR + * Check if the IAMR allows the instruction access - it will return + * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0 + * if it does (and prot will be unchanged indicating execution support). + */ + case POWERPC_MMU_2_07: + case POWERPC_MMU_3_00: + prot &= ppc_hash64_iamr_prot(cpu, key); + break; + default: + break; + } + + return prot; +} + +const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, + hwaddr ptex, int n) +{ + hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; + hwaddr base; + hwaddr plen = n * HASH_PTE_SIZE_64; + const ppc_hash_pte64_t *hptes; + +#if 0 + if (cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + return vhc->map_hptes(cpu->vhyp, ptex, n); + } +#endif + base = ppc_hash64_hpt_base(cpu); + + if (!base) { + return NULL; + } + + hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false, + MEMTXATTRS_UNSPECIFIED); + if (plen < (n * HASH_PTE_SIZE_64)) { + fprintf(stderr, "%s: Unable to map all requested HPTEs\n", __func__); + } + return hptes; +} + +void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, + hwaddr ptex, int n) +{ +#if 0 + if (cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n); + return; + } +#endif + + address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64, + false, n * HASH_PTE_SIZE_64); +} + +static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps, + uint64_t pte0, uint64_t pte1) +{ + int i; + + if (!(pte0 & HPTE64_V_LARGE)) { + if (sps->page_shift != 12) { + /* 4kiB page in a non 4kiB segment */ + return 0; + } + /* Normal 4kiB page */ + return 12; + } + + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + const PPCHash64PageSize *ps = &sps->enc[i]; + uint64_t mask; + + if (!ps->page_shift) { + break; + } + + if (ps->page_shift == 12) { + /* L bit is set so this can't be a 4kiB page */ + continue; + } + + mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; + + if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) { + return ps->page_shift; + } + } + + return 0; /* Bad page size encoding */ +} + +static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1) +{ + /* Insert B into pte0 */ + *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | + ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << + (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); + + /* Remove B from pte1 */ + *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; +} + + +static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, + const PPCHash64SegmentPageSizes *sps, + target_ulong ptem, + ppc_hash_pte64_t *pte, unsigned *pshift) +{ + int i; + const ppc_hash_pte64_t *pteg; + target_ulong pte0, pte1; + target_ulong ptex; + + ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP; + pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); + if (!pteg) { + return -1; + } + for (i = 0; i < HPTES_PER_GROUP; i++) { + pte0 = ppc_hash64_hpte0(cpu, pteg, i); + /* + * pte0 contains the valid bit and must be read before pte1, + * otherwise we might see an old pte1 with a new valid bit and + * thus an inconsistent hpte value + */ + smp_rmb(); + pte1 = ppc_hash64_hpte1(cpu, pteg, i); + + /* Convert format if necessary */ + if (cpu->env.mmu_model == POWERPC_MMU_3_00) { + ppc64_v3_new_to_old_hpte(&pte0, &pte1); + } + + /* This compares V, B, H (secondary) and the AVPN */ + if (HPTE64_V_COMPARE(pte0, ptem)) { + *pshift = hpte_page_shift(sps, pte0, pte1); + /* + * If there is no match, ignore the PTE, it could simply + * be for a different segment size encoding and the + * architecture specifies we should not match. Linux will + * potentially leave behind PTEs for the wrong base page + * size when demoting segments. + */ + if (*pshift == 0) { + continue; + } + /* + * We don't do anything with pshift yet as qemu TLB only + * deals with 4K pages anyway + */ + pte->pte0 = pte0; + pte->pte1 = pte1; + ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); + return ptex + i; + } + } + ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP); + /* + * We didn't find a valid entry. + */ + return -1; +} + +static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, + ppc_slb_t *slb, target_ulong eaddr, + ppc_hash_pte64_t *pte, unsigned *pshift) +{ + CPUPPCState *env = &cpu->env; + hwaddr hash, ptex; + uint64_t vsid, epnmask, epn, ptem; + const PPCHash64SegmentPageSizes *sps = slb->sps; + + /* + * The SLB store path should prevent any bad page size encodings + * getting in there, so: + */ + assert(sps); + + /* If ISL is set in LPCR we need to clamp the page size to 4K */ + if (env->spr[SPR_LPCR] & LPCR_ISL) { + /* We assume that when using TCG, 4k is first entry of SPS */ + sps = &cpu->hash64_opts->sps[0]; + assert(sps->page_shift == 12); + } + + epnmask = ~((1ULL << sps->page_shift) - 1); + + if (slb->vsid & SLB_VSID_B) { + /* 1TB segment */ + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; + epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; + hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift); + } else { + /* 256M segment */ + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; + epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; + hash = vsid ^ (epn >> sps->page_shift); + } + ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); + ptem |= HPTE64_V_VALID; + + /* Page address translation */ + qemu_log_mask(CPU_LOG_MMU, + "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx + " hash " TARGET_FMT_plx "\n", + ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash); + + /* Primary PTEG lookup */ + qemu_log_mask(CPU_LOG_MMU, + "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx + " hash=" TARGET_FMT_plx "\n", + ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), + vsid, ptem, hash); + ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift); + + if (ptex == -1) { + /* Secondary PTEG lookup */ + ptem |= HPTE64_V_SECONDARY; + qemu_log_mask(CPU_LOG_MMU, + "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx + " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu), + ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash); + + ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift); + } + + return ptex; +} + +unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, + uint64_t pte0, uint64_t pte1) +{ + int i; + + if (!(pte0 & HPTE64_V_LARGE)) { + return 12; + } + + /* + * The encodings in env->sps need to be carefully chosen so that + * this gives an unambiguous result. + */ + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; + unsigned shift; + + if (!sps->page_shift) { + break; + } + + shift = hpte_page_shift(sps, pte0, pte1); + if (shift) { + return shift; + } + } + + return 0; +} + +static bool ppc_hash64_use_vrma(CPUPPCState *env) +{ + switch (env->mmu_model) { + case POWERPC_MMU_3_00: + /* + * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR + * register no longer exist + */ + return true; + + default: + return !!(env->spr[SPR_LPCR] & LPCR_VPM0); + } +} + +static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code) +{ + CPUPPCState *env = &POWERPC_CPU(cs)->env; + bool vpm; + + if (msr_ir) { + vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); + } else { + vpm = ppc_hash64_use_vrma(env); + } + if (vpm && !msr_hv) { + cs->exception_index = POWERPC_EXCP_HISI; + } else { + cs->exception_index = POWERPC_EXCP_ISI; + } + env->error_code = error_code; +} + +static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr) +{ + CPUPPCState *env = &POWERPC_CPU(cs)->env; + bool vpm; + + if (msr_dr) { + vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1); + } else { + vpm = ppc_hash64_use_vrma(env); + } + if (vpm && !msr_hv) { + cs->exception_index = POWERPC_EXCP_HDSI; + env->spr[SPR_HDAR] = dar; + env->spr[SPR_HDSISR] = dsisr; + } else { + cs->exception_index = POWERPC_EXCP_DSI; + env->spr[SPR_DAR] = dar; + env->spr[SPR_DSISR] = dsisr; + } + env->error_code = 0; +} + + +static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) +{ + hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16; + +#if 0 + if (cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->hpte_set_r(cpu->vhyp, ptex, pte1); + return; + } +#endif + base = ppc_hash64_hpt_base(cpu); + + + /* The HW performs a non-atomic byte update */ +#ifdef UNICORN_ARCH_POSTFIX + glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); +#else + stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); +#endif +} + +static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) +{ + hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15; + +#if 0 + if (cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->hpte_set_c(cpu->vhyp, ptex, pte1); + return; + } +#endif + base = ppc_hash64_hpt_base(cpu); + + /* The HW performs a non-atomic byte update */ +#ifdef UNICORN_ARCH_POSTFIX + glue(stb_phys, UNICORN_ARCH_POSTFIX)(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); +#else + stb_phys(cpu->env.uc, CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); +#endif +} + +static target_ulong rmls_limit(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + /* + * In theory the meanings of RMLS values are implementation + * dependent. In practice, this seems to have been the set from + * POWER4+..POWER8, and RMLS is no longer supported in POWER9. + * + * Unsupported values mean the OS has shot itself in the + * foot. Return a 0-sized RMA in this case, which we expect + * to trigger an immediate DSI or ISI + */ + static const target_ulong rma_sizes[16] = { + [0] = 256 * GiB, + [1] = 16 * GiB, + [2] = 1 * GiB, + [3] = 64 * MiB, + [4] = 256 * MiB, + [7] = 128 * MiB, + [8] = 32 * MiB, + }; + target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT; + + return rma_sizes[rmls]; +} + +static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb) +{ + CPUPPCState *env = &cpu->env; + target_ulong lpcr = env->spr[SPR_LPCR]; + uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; + target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK); + int i; + + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; + + if (!sps->page_shift) { + break; + } + + if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) { + slb->esid = SLB_ESID_V; + slb->vsid = vsid; + slb->sps = sps; + return 0; + } + } + +#if 0 + error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x" + TARGET_FMT_lx"\n", lpcr); +#endif + + return -1; +} + +int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, + int rwx, int mmu_idx) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + ppc_slb_t vrma_slbe; + ppc_slb_t *slb; + unsigned apshift; + hwaddr ptex; + ppc_hash_pte64_t pte; + int exec_prot, pp_prot, amr_prot, prot; + const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; + hwaddr raddr; + + assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + + /* + * Note on LPCR usage: 970 uses HID4, but our special variant of + * store_spr copies relevant fields into env->spr[SPR_LPCR]. + * Similarily we filter unimplemented bits when storing into LPCR + * depending on the MMU version. This code can thus just use the + * LPCR "as-is". + */ + + /* 1. Handle real mode accesses */ + if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { + /* + * Translation is supposedly "off", but in real mode the top 4 + * effective address bits are (mostly) ignored + */ + raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; + +#if 0 + if (cpu->vhyp) { + /* + * In virtual hypervisor mode, there's nothing to do: + * EA == GPA == qemu guest address + */ + } else +#endif + if (msr_hv || !env->has_hv_mode) { + /* In HV mode, add HRMOR if top EA bit is clear */ + if (!(eaddr >> 63)) { + raddr |= env->spr[SPR_HRMOR]; + } + } else if (ppc_hash64_use_vrma(env)) { + /* Emulated VRMA mode */ + slb = &vrma_slbe; + if (build_vrma_slbe(cpu, slb) != 0) { + /* Invalid VRMA setup, machine check */ + cs->exception_index = POWERPC_EXCP_MCHECK; + env->error_code = 0; + return 1; + } + + goto skip_slb_search; + } else { + target_ulong limit = rmls_limit(cpu); + + /* Emulated old-style RMO mode, bounds check against RMLS */ + if (raddr >= limit) { + if (rwx == 2) { + ppc_hash64_set_isi(cs, SRR1_PROTFAULT); + } else { + int dsisr = DSISR_PROTFAULT; + if (rwx == 1) { + dsisr |= DSISR_ISSTORE; + } + ppc_hash64_set_dsi(cs, eaddr, dsisr); + } + return 1; + } + + raddr |= env->spr[SPR_RMOR]; + } + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, + TARGET_PAGE_SIZE); + return 0; + } + + /* 2. Translation is on, so look up the SLB */ + slb = slb_lookup(cpu, eaddr); + if (!slb) { + /* No entry found, check if in-memory segment tables are in use */ + if (ppc64_use_proc_tbl(cpu)) { + /* TODO - Unsupported */ + fprintf(stderr, "Segment Table Support Unimplemented"); + exit(1); + } + /* Segment still not found, generate the appropriate interrupt */ + if (rwx == 2) { + cs->exception_index = POWERPC_EXCP_ISEG; + env->error_code = 0; + } else { + cs->exception_index = POWERPC_EXCP_DSEG; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + } + return 1; + } + +skip_slb_search: + + /* 3. Check for segment level no-execute violation */ + if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { + ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD); + return 1; + } + + /* 4. Locate the PTE in the hash table */ + ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); + if (ptex == -1) { + if (rwx == 2) { + ppc_hash64_set_isi(cs, SRR1_NOPTE); + } else { + int dsisr = DSISR_NOPTE; + if (rwx == 1) { + dsisr |= DSISR_ISSTORE; + } + ppc_hash64_set_dsi(cs, eaddr, dsisr); + } + return 1; + } + qemu_log_mask(CPU_LOG_MMU, + "found PTE at index %08" HWADDR_PRIx "\n", ptex); + + /* 5. Check access permissions */ + + exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); + pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); + amr_prot = ppc_hash64_amr_prot(cpu, pte); + prot = exec_prot & pp_prot & amr_prot; + + if ((need_prot[rwx] & ~prot) != 0) { + /* Access right violation */ + qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); + if (rwx == 2) { + int srr1 = 0; + if (PAGE_EXEC & ~exec_prot) { + srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */ + } else if (PAGE_EXEC & ~pp_prot) { + srr1 |= SRR1_PROTFAULT; /* Access violates access authority */ + } + if (PAGE_EXEC & ~amr_prot) { + srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */ + } + ppc_hash64_set_isi(cs, srr1); + } else { + int dsisr = 0; + if (need_prot[rwx] & ~pp_prot) { + dsisr |= DSISR_PROTFAULT; + } + if (rwx == 1) { + dsisr |= DSISR_ISSTORE; + } + if (need_prot[rwx] & ~amr_prot) { + dsisr |= DSISR_AMR; + } + ppc_hash64_set_dsi(cs, eaddr, dsisr); + } + return 1; + } + + qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); + + /* 6. Update PTE referenced and changed bits if necessary */ + + if (!(pte.pte1 & HPTE64_R_R)) { + ppc_hash64_set_r(cpu, ptex, pte.pte1); + } + if (!(pte.pte1 & HPTE64_R_C)) { + if (rwx == 1) { + ppc_hash64_set_c(cpu, ptex, pte.pte1); + } else { + /* + * Treat the page as read-only for now, so that a later write + * will pass through this function again to set the C bit + */ + prot &= ~PAGE_WRITE; + } + } + + /* 7. Determine the real address from the PTE */ + + raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + prot, mmu_idx, 1ULL << apshift); + + return 0; +} + +hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) +{ + CPUPPCState *env = &cpu->env; + ppc_slb_t vrma_slbe; + ppc_slb_t *slb; + hwaddr ptex, raddr; + ppc_hash_pte64_t pte; + unsigned apshift; + + /* Handle real mode */ + if (msr_dr == 0) { + /* In real mode the top 4 effective address bits are ignored */ + raddr = addr & 0x0FFFFFFFFFFFFFFFULL; + +#if 0 + if (cpu->vhyp) { + /* + * In virtual hypervisor mode, there's nothing to do: + * EA == GPA == qemu guest address + */ + return raddr; + } else +#endif + if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) { + /* In HV mode, add HRMOR if top EA bit is clear */ + return raddr | env->spr[SPR_HRMOR]; + } else if (ppc_hash64_use_vrma(env)) { + /* Emulated VRMA mode */ + slb = &vrma_slbe; + if (build_vrma_slbe(cpu, slb) != 0) { + return -1; + } + } else { + target_ulong limit = rmls_limit(cpu); + + /* Emulated old-style RMO mode, bounds check against RMLS */ + if (raddr >= limit) { + return -1; + } + return raddr | env->spr[SPR_RMOR]; + } + } else { + slb = slb_lookup(cpu, addr); + if (!slb) { + return -1; + } + } + + ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift); + if (ptex == -1) { + return -1; + } + + return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr) + & TARGET_PAGE_MASK; +} + +void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, + target_ulong pte0, target_ulong pte1) +{ + /* + * XXX: given the fact that there are too many segments to + * invalidate, and we still don't have a tlb_flush_mask(env, n, + * mask) in QEMU, we just invalidate all TLBs + */ + cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH; +} + +void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + + env->spr[SPR_LPCR] = val & pcc->lpcr_mask; +} + +void helper_store_lpcr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + + ppc_store_lpcr(cpu, val); +} + +void ppc_hash64_init(PowerPCCPU *cpu) +{ +#ifndef NDEBUG + CPUPPCState *env = &cpu->env; +#endif + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + if (!pcc->hash64_opts) { + assert(!(env->mmu_model & POWERPC_MMU_64)); + return; + } + + cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts)); +} + +void ppc_hash64_finalize(PowerPCCPU *cpu) +{ + g_free(cpu->hash64_opts); +} + +const PPCHash64Options ppc_hash64_opts_basic = { + .flags = 0, + .slb_size = 64, + .sps = { + { .page_shift = 12, /* 4K */ + .slb_enc = 0, + .enc = { { .page_shift = 12, .pte_enc = 0 } } + }, + { .page_shift = 24, /* 16M */ + .slb_enc = 0x100, + .enc = { { .page_shift = 24, .pte_enc = 0 } } + }, + }, +}; + +const PPCHash64Options ppc_hash64_opts_POWER7 = { + .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE, + .slb_size = 32, + .sps = { + { + .page_shift = 12, /* 4K */ + .slb_enc = 0, + .enc = { { .page_shift = 12, .pte_enc = 0 }, + { .page_shift = 16, .pte_enc = 0x7 }, + { .page_shift = 24, .pte_enc = 0x38 }, }, + }, + { + .page_shift = 16, /* 64K */ + .slb_enc = SLB_VSID_64K, + .enc = { { .page_shift = 16, .pte_enc = 0x1 }, + { .page_shift = 24, .pte_enc = 0x8 }, }, + }, + { + .page_shift = 24, /* 16M */ + .slb_enc = SLB_VSID_16M, + .enc = { { .page_shift = 24, .pte_enc = 0 }, }, + }, + { + .page_shift = 34, /* 16G */ + .slb_enc = SLB_VSID_16G, + .enc = { { .page_shift = 34, .pte_enc = 0x3 }, }, + }, + } +}; + +void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, + bool (*cb)(void *, uint32_t, uint32_t), + void *opaque) +{ + PPCHash64Options *opts = cpu->hash64_opts; + int i; + int n = 0; + bool ci_largepage = false; + + assert(opts); + + n = 0; + for (i = 0; i < ARRAY_SIZE(opts->sps); i++) { + PPCHash64SegmentPageSizes *sps = &opts->sps[i]; + int j; + int m = 0; + + assert(n <= i); + + if (!sps->page_shift) { + break; + } + + for (j = 0; j < ARRAY_SIZE(sps->enc); j++) { + PPCHash64PageSize *ps = &sps->enc[j]; + + assert(m <= j); + if (!ps->page_shift) { + break; + } + + if (cb(opaque, sps->page_shift, ps->page_shift)) { + if (ps->page_shift >= 16) { + ci_largepage = true; + } + sps->enc[m++] = *ps; + } + } + + /* Clear rest of the row */ + for (j = m; j < ARRAY_SIZE(sps->enc); j++) { + memset(&sps->enc[j], 0, sizeof(sps->enc[j])); + } + + if (m) { + n++; + } + } + + /* Clear the rest of the table */ + for (i = n; i < ARRAY_SIZE(opts->sps); i++) { + memset(&opts->sps[i], 0, sizeof(opts->sps[i])); + } + + if (!ci_largepage) { + opts->flags &= ~PPC_HASH64_CI_LARGEPAGE; + } +} diff --git a/qemu/target/ppc/mmu-hash64.h b/qemu/target/ppc/mmu-hash64.h new file mode 100644 index 00000000..98498d7d --- /dev/null +++ b/qemu/target/ppc/mmu-hash64.h @@ -0,0 +1,161 @@ +#ifndef MMU_HASH64_H +#define MMU_HASH64_H + +#ifdef TARGET_PPC64 +void dump_slb(PowerPCCPU *cpu); +int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, + target_ulong esid, target_ulong vsid); +hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr); +int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw, + int mmu_idx); +void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, + target_ulong pte_index, + target_ulong pte0, target_ulong pte1); +unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, + uint64_t pte0, uint64_t pte1); +void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val); +void ppc_hash64_init(PowerPCCPU *cpu); +void ppc_hash64_finalize(PowerPCCPU *cpu); +void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, + bool (*cb)(void *, uint32_t, uint32_t), + void *opaque); +#endif + +/* + * SLB definitions + */ + +/* Bits in the SLB ESID word */ +#define SLB_ESID_ESID 0xFFFFFFFFF0000000ULL +#define SLB_ESID_V 0x0000000008000000ULL /* valid */ + +/* Bits in the SLB VSID word */ +#define SLB_VSID_SHIFT 12 +#define SLB_VSID_SHIFT_1T 24 +#define SLB_VSID_SSIZE_SHIFT 62 +#define SLB_VSID_B 0xc000000000000000ULL +#define SLB_VSID_B_256M 0x0000000000000000ULL +#define SLB_VSID_B_1T 0x4000000000000000ULL +#define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL +#define SLB_VSID_VRMA (0x0001FFFFFF000000ULL | SLB_VSID_B_1T) +#define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID) +#define SLB_VSID_KS 0x0000000000000800ULL +#define SLB_VSID_KP 0x0000000000000400ULL +#define SLB_VSID_N 0x0000000000000200ULL /* no-execute */ +#define SLB_VSID_L 0x0000000000000100ULL +#define SLB_VSID_C 0x0000000000000080ULL /* class */ +#define SLB_VSID_LP 0x0000000000000030ULL +#define SLB_VSID_ATTR 0x0000000000000FFFULL +#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP) +#define SLB_VSID_4K 0x0000000000000000ULL +#define SLB_VSID_64K 0x0000000000000110ULL +#define SLB_VSID_16M 0x0000000000000100ULL +#define SLB_VSID_16G 0x0000000000000120ULL + +/* + * Hash page table definitions + */ + +#define SDR_64_HTABORG 0x0FFFFFFFFFFC0000ULL +#define SDR_64_HTABSIZE 0x000000000000001FULL + +#define PATE0_HTABORG 0x0FFFFFFFFFFC0000ULL +#define HPTES_PER_GROUP 8 +#define HASH_PTE_SIZE_64 16 +#define HASH_PTEG_SIZE_64 (HASH_PTE_SIZE_64 * HPTES_PER_GROUP) + +#define HPTE64_V_SSIZE SLB_VSID_B +#define HPTE64_V_SSIZE_256M SLB_VSID_B_256M +#define HPTE64_V_SSIZE_1T SLB_VSID_B_1T +#define HPTE64_V_SSIZE_SHIFT 62 +#define HPTE64_V_AVPN_SHIFT 7 +#define HPTE64_V_AVPN 0x3fffffffffffff80ULL +#define HPTE64_V_AVPN_VAL(x) (((x) & HPTE64_V_AVPN) >> HPTE64_V_AVPN_SHIFT) +#define HPTE64_V_COMPARE(x, y) (!(((x) ^ (y)) & 0xffffffffffffff83ULL)) +#define HPTE64_V_BOLTED 0x0000000000000010ULL +#define HPTE64_V_LARGE 0x0000000000000004ULL +#define HPTE64_V_SECONDARY 0x0000000000000002ULL +#define HPTE64_V_VALID 0x0000000000000001ULL + +#define HPTE64_R_PP0 0x8000000000000000ULL +#define HPTE64_R_TS 0x4000000000000000ULL +#define HPTE64_R_KEY_HI 0x3000000000000000ULL +#define HPTE64_R_RPN_SHIFT 12 +#define HPTE64_R_RPN 0x0ffffffffffff000ULL +#define HPTE64_R_FLAGS 0x00000000000003ffULL +#define HPTE64_R_PP 0x0000000000000003ULL +#define HPTE64_R_N 0x0000000000000004ULL +#define HPTE64_R_G 0x0000000000000008ULL +#define HPTE64_R_M 0x0000000000000010ULL +#define HPTE64_R_I 0x0000000000000020ULL +#define HPTE64_R_W 0x0000000000000040ULL +#define HPTE64_R_WIMG 0x0000000000000078ULL +#define HPTE64_R_C 0x0000000000000080ULL +#define HPTE64_R_R 0x0000000000000100ULL +#define HPTE64_R_KEY_LO 0x0000000000000e00ULL +#define HPTE64_R_KEY(x) ((((x) & HPTE64_R_KEY_HI) >> 57) | \ + (((x) & HPTE64_R_KEY_LO) >> 9)) + +#define HPTE64_V_1TB_SEG 0x4000000000000000ULL +#define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL + +/* Format changes for ARCH v3 */ +#define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL +#define HPTE64_R_3_0_SSIZE_SHIFT 58 +#define HPTE64_R_3_0_SSIZE_MASK (3ULL << HPTE64_R_3_0_SSIZE_SHIFT) + +struct ppc_hash_pte64 { + uint64_t pte0, pte1; +}; + +const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, + hwaddr ptex, int n); +void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, + hwaddr ptex, int n); + +static inline uint64_t ppc_hash64_hpte0(PowerPCCPU *cpu, + const ppc_hash_pte64_t *hptes, int i) +{ + return ldq_p(&(hptes[i].pte0)); +} + +static inline uint64_t ppc_hash64_hpte1(PowerPCCPU *cpu, + const ppc_hash_pte64_t *hptes, int i) +{ + return ldq_p(&(hptes[i].pte1)); +} + +/* + * MMU Options + */ + +struct PPCHash64PageSize { + uint32_t page_shift; /* Page shift (or 0) */ + uint32_t pte_enc; /* Encoding in the HPTE (>>12) */ +}; +typedef struct PPCHash64PageSize PPCHash64PageSize; + +struct PPCHash64SegmentPageSizes { + uint32_t page_shift; /* Base page shift of segment (or 0) */ + uint32_t slb_enc; /* SLB encoding for BookS */ + PPCHash64PageSize enc[PPC_PAGE_SIZES_MAX_SZ]; +}; + +struct PPCHash64Options { +#define PPC_HASH64_1TSEG 0x00001 +#define PPC_HASH64_AMR 0x00002 +#define PPC_HASH64_CI_LARGEPAGE 0x00004 + unsigned flags; + unsigned slb_size; + PPCHash64SegmentPageSizes sps[PPC_PAGE_SIZES_MAX_SZ]; +}; + +extern const PPCHash64Options ppc_hash64_opts_basic; +extern const PPCHash64Options ppc_hash64_opts_POWER7; + +static inline bool ppc_hash64_has(PowerPCCPU *cpu, unsigned feature) +{ + return !!(cpu->hash64_opts->flags & feature); +} + +#endif /* MMU_HASH64_H */ diff --git a/qemu/target/ppc/mmu-radix64.c b/qemu/target/ppc/mmu-radix64.c new file mode 100644 index 00000000..5dff912c --- /dev/null +++ b/qemu/target/ppc/mmu-radix64.c @@ -0,0 +1,398 @@ +/* + * PowerPC Radix MMU mulation helpers for QEMU. + * + * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "mmu-radix64.h" +#include "mmu-book3s-v3.h" + +static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr, + uint64_t *lpid, uint64_t *pid) +{ + if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */ + switch (eaddr & R_EADDR_QUADRANT) { + case R_EADDR_QUADRANT0: + *lpid = 0; + *pid = env->spr[SPR_BOOKS_PID]; + break; + case R_EADDR_QUADRANT1: + *lpid = env->spr[SPR_LPIDR]; + *pid = env->spr[SPR_BOOKS_PID]; + break; + case R_EADDR_QUADRANT2: + *lpid = env->spr[SPR_LPIDR]; + *pid = 0; + break; + case R_EADDR_QUADRANT3: + *lpid = 0; + *pid = 0; + break; + } + } else { /* !MSR[HV] -> Guest */ + switch (eaddr & R_EADDR_QUADRANT) { + case R_EADDR_QUADRANT0: /* Guest application */ + *lpid = env->spr[SPR_LPIDR]; + *pid = env->spr[SPR_BOOKS_PID]; + break; + case R_EADDR_QUADRANT1: /* Illegal */ + case R_EADDR_QUADRANT2: + return false; + case R_EADDR_QUADRANT3: /* Guest OS */ + *lpid = env->spr[SPR_LPIDR]; + *pid = 0; /* pid set to 0 -> addresses guest operating system */ + break; + } + } + + return true; +} + +static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + if (rwx == 2) { /* Instruction Segment Interrupt */ + cs->exception_index = POWERPC_EXCP_ISEG; + } else { /* Data Segment Interrupt */ + cs->exception_index = POWERPC_EXCP_DSEG; + env->spr[SPR_DAR] = eaddr; + } + env->error_code = 0; +} + +static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr, + uint32_t cause) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + if (rwx == 2) { /* Instruction Storage Interrupt */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = cause; + } else { /* Data Storage Interrupt */ + cs->exception_index = POWERPC_EXCP_DSI; + if (rwx == 1) { /* Write -> Store */ + cause |= DSISR_ISSTORE; + } + env->spr[SPR_DSISR] = cause; + env->spr[SPR_DAR] = eaddr; + env->error_code = 0; + } +} + + +static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, + int *fault_cause, int *prot) +{ + CPUPPCState *env = &cpu->env; + const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; + + /* Check Page Attributes (pte58:59) */ + if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) { + /* + * Radix PTE entries with the non-idempotent I/O attribute are treated + * as guarded storage + */ + *fault_cause |= SRR1_NOEXEC_GUARD; + return true; + } + + /* Determine permissions allowed by Encoded Access Authority */ + if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */ + *prot = 0; + } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) { + *prot = ppc_radix64_get_prot_eaa(pte); + } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */ + *prot = ppc_radix64_get_prot_eaa(pte); + *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ + } + + /* Check if requested access type is allowed */ + if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */ + *fault_cause |= DSISR_PROTFAULT; + return true; + } + + return false; +} + +static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte, + hwaddr pte_addr, int *prot) +{ + CPUState *cs = CPU(cpu); + uint64_t npte; + + npte = pte | R_PTE_R; /* Always set reference bit */ + + if (rwx == 1) { /* Store/Write */ + npte |= R_PTE_C; /* Set change bit */ + } else { + /* + * Treat the page as read-only for now, so that a later write + * will pass through this function again to set the C bit. + */ + *prot &= ~PAGE_WRITE; + } + + if (pte ^ npte) { /* If pte has changed then write it back */ +#ifdef UNICORN_ARCH_POSTFIX + glue(stq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, pte_addr, npte); +#else + stq_phys(cs->uc, cs->as, pte_addr, npte); +#endif + } +} + +static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr, + uint64_t base_addr, uint64_t nls, + hwaddr *raddr, int *psize, + int *fault_cause, hwaddr *pte_addr) +{ + CPUState *cs = CPU(cpu); + uint64_t index, pde; + + if (nls < 5) { /* Directory maps less than 2**5 entries */ + *fault_cause |= DSISR_R_BADCONFIG; + return 0; + } + + /* Read page entry from guest address space */ + index = eaddr >> (*psize - nls); /* Shift */ + index &= ((1UL << nls) - 1); /* Mask */ +#ifdef UNICORN_ARCH_POSTFIX + pde = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, base_addr + (index * sizeof(pde))); +#else + pde = ldq_phys(cs->uc, cs->as, base_addr + (index * sizeof(pde))); +#endif + if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ + *fault_cause |= DSISR_NOPTE; + return 0; + } + + *psize -= nls; + + /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ + if (pde & R_PTE_LEAF) { + uint64_t rpn = pde & R_PTE_RPN; + uint64_t mask = (1UL << *psize) - 1; + + /* Or high bits of rpn and low bits to ea to form whole real addr */ + *raddr = (rpn & ~mask) | (eaddr & mask); + *pte_addr = base_addr + (index * sizeof(pde)); + return pde; + } + + /* Next Level of Radix Tree */ + return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS, + raddr, psize, fault_cause, pte_addr); +} + +static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) +{ + CPUPPCState *env = &cpu->env; + + if (!(pate->dw0 & PATE0_HR)) { + return false; + } + if (lpid == 0 && !msr_hv) { + return false; + } + /* More checks ... */ + return true; +} + +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; +#if 0 + PPCVirtualHypervisorClass *vhc; +#endif + hwaddr raddr, pte_addr; + uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; + int page_size, prot, fault_cause = 0; + ppc_v3_pate_t pate; + + assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + + /* HV or virtual hypervisor Real Mode Access */ + if ((msr_hv) && + (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) { + /* In real mode top 4 effective addr bits (mostly) ignored */ + raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; + + /* In HV mode, add HRMOR if top EA bit is clear */ + if (msr_hv || !env->has_hv_mode) { + if (!(eaddr >> 63)) { + raddr |= env->spr[SPR_HRMOR]; + } + } + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, + TARGET_PAGE_SIZE); + return 0; + } + + /* + * Check UPRT (we avoid the check in real mode to deal with + * transitional states during kexec. + */ +#if 0 + if (!ppc64_use_proc_tbl(cpu)) { + qemu_log_mask(LOG_GUEST_ERROR, + "LPCR:UPRT not set in radix mode ! LPCR=" + TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); + } +#endif + + /* Virtual Mode Access - get the fully qualified address */ + if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { + ppc_radix64_raise_segi(cpu, rwx, eaddr); + return 1; + } + + /* Get Process Table */ +#if 0 + if (cpu->vhyp) { + vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->get_pate(cpu->vhyp, &pate); + } else { +#endif + if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); + return 1; + } + if (!validate_pate(cpu, lpid, &pate)) { + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); + } + /* We don't support guest mode yet */ + if (lpid != 0) { + fprintf(stderr, "PowerNV guest support Unimplemented"); + exit(1); + } +#if 0 + } +#endif + + /* Index Process Table by PID to Find Corresponding Process Table Entry */ + offset = pid * sizeof(struct prtb_entry); + size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); + if (offset >= size) { + /* offset exceeds size of the process table */ + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); + return 1; + } +#ifdef UNICORN_ARCH_POSTFIX + prtbe0 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); +#else + prtbe0 = ldq_phys(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); +#endif + + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ + page_size = PRTBE_R_GET_RTS(prtbe0); + pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, + prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, + &raddr, &page_size, &fault_cause, &pte_addr); + if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) { + /* Couldn't get pte or access denied due to protection */ + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); + return 1; + } + + /* Update Reference and Change Bits */ + ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + prot, mmu_idx, 1ULL << page_size); + return 0; +} + +hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; +#if 0 + PPCVirtualHypervisorClass *vhc; +#endif + hwaddr raddr, pte_addr; + uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; + int page_size, fault_cause = 0; + ppc_v3_pate_t pate; + + /* Handle Real Mode */ + if (msr_dr == 0) { + /* In real mode top 4 effective addr bits (mostly) ignored */ + return eaddr & 0x0FFFFFFFFFFFFFFFULL; + } + + /* Virtual Mode Access - get the fully qualified address */ + if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { + return -1; + } + + /* Get Process Table */ +#if 0 + if (cpu->vhyp) { + vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->get_pate(cpu->vhyp, &pate); + } else { +#endif + if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { + return -1; + } + if (!validate_pate(cpu, lpid, &pate)) { + return -1; + } + /* We don't support guest mode yet */ + if (lpid != 0) { + fprintf(stderr, "PowerNV guest support Unimplemented"); + exit(1); + } +#if 0 + } +#endif + + /* Index Process Table by PID to Find Corresponding Process Table Entry */ + offset = pid * sizeof(struct prtb_entry); + size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); + if (offset >= size) { + /* offset exceeds size of the process table */ + return -1; + } +#ifdef UNICORN_ARCH_POSTFIX + prtbe0 = glue(ldq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); +#else + prtbe0 = ldq_phys(cs->uc, cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); +#endif + + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ + page_size = PRTBE_R_GET_RTS(prtbe0); + pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, + prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, + &raddr, &page_size, &fault_cause, &pte_addr); + if (!pte) { + return -1; + } + + return raddr & TARGET_PAGE_MASK; +} diff --git a/qemu/target/ppc/mmu-radix64.h b/qemu/target/ppc/mmu-radix64.h new file mode 100644 index 00000000..3e4b2354 --- /dev/null +++ b/qemu/target/ppc/mmu-radix64.h @@ -0,0 +1,69 @@ +#ifndef MMU_RADIX64_H +#define MMU_RADIX64_H + +/* Radix Quadrants */ +#define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF +#define R_EADDR_QUADRANT 0xC000000000000000 +#define R_EADDR_QUADRANT0 0x0000000000000000 +#define R_EADDR_QUADRANT1 0x4000000000000000 +#define R_EADDR_QUADRANT2 0x8000000000000000 +#define R_EADDR_QUADRANT3 0xC000000000000000 + +/* Radix Partition Table Entry Fields */ +#define PATE1_R_PRTB 0x0FFFFFFFFFFFF000 +#define PATE1_R_PRTS 0x000000000000001F + +/* Radix Process Table Entry Fields */ +#define PRTBE_R_GET_RTS(rts) \ + ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31) +#define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00 +#define PRTBE_R_RPDS 0x000000000000001F + +/* Radix Page Directory/Table Entry Fields */ +#define R_PTE_VALID 0x8000000000000000 +#define R_PTE_LEAF 0x4000000000000000 +#define R_PTE_SW0 0x2000000000000000 +#define R_PTE_RPN 0x01FFFFFFFFFFF000 +#define R_PTE_SW1 0x0000000000000E00 +#define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7)) +#define R_PTE_R 0x0000000000000100 +#define R_PTE_C 0x0000000000000080 +#define R_PTE_ATT 0x0000000000000030 +#define R_PTE_ATT_NORMAL 0x0000000000000000 +#define R_PTE_ATT_SAO 0x0000000000000010 +#define R_PTE_ATT_NI_IO 0x0000000000000020 +#define R_PTE_ATT_TOLERANT_IO 0x0000000000000030 +#define R_PTE_EAA_PRIV 0x0000000000000008 +#define R_PTE_EAA_R 0x0000000000000004 +#define R_PTE_EAA_RW 0x0000000000000002 +#define R_PTE_EAA_X 0x0000000000000001 +#define R_PDE_NLB PRTBE_R_RPDB +#define R_PDE_NLS PRTBE_R_RPDS + +#ifdef TARGET_PPC64 + +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx); +hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr); + +static inline int ppc_radix64_get_prot_eaa(uint64_t pte) +{ + return (pte & R_PTE_EAA_R ? PAGE_READ : 0) | + (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) | + (pte & R_PTE_EAA_X ? PAGE_EXEC : 0); +} + +static inline int ppc_radix64_get_prot_amr(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */ + int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */ + + return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */ + (amr & 0x1 ? 0 : PAGE_READ) | + (iamr & 0x1 ? 0 : PAGE_EXEC); +} + +#endif /* TARGET_PPC64 */ + +#endif /* MMU_RADIX64_H */ diff --git a/qemu/target/ppc/mmu_helper.c b/qemu/target/ppc/mmu_helper.c new file mode 100644 index 00000000..45e0d2d7 --- /dev/null +++ b/qemu/target/ppc/mmu_helper.c @@ -0,0 +1,3117 @@ +/* + * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "mmu-hash64.h" +#include "mmu-hash32.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "helper_regs.h" +#include "mmu-book3s-v3.h" +#include "mmu-radix64.h" + +/* #define DEBUG_MMU */ +/* #define DEBUG_BATS */ +/* #define DEBUG_SOFTWARE_TLB */ +/* #define DUMP_PAGE_TABLES */ +/* #define FLUSH_ALL_TLBS */ + +#ifdef DEBUG_MMU +# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) +#else +# define LOG_MMU_STATE(cpu) do { } while (0) +#endif + +#ifdef DEBUG_SOFTWARE_TLB +# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) +#else +# define LOG_SWTLB(...) do { } while (0) +#endif + +#ifdef DEBUG_BATS +# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) +#else +# define LOG_BATS(...) do { } while (0) +#endif + +/*****************************************************************************/ +/* PowerPC MMU emulation */ + +/* Context used internally during MMU translations */ +typedef struct mmu_ctx_t mmu_ctx_t; +struct mmu_ctx_t { + hwaddr raddr; /* Real address */ + hwaddr eaddr; /* Effective address */ + int prot; /* Protection bits */ + hwaddr hash[2]; /* Pagetable hash values */ + target_ulong ptem; /* Virtual segment ID | API */ + int key; /* Access key */ + int nx; /* Non-execute area */ +}; + +/* Common routines used by software and hardware TLBs emulation */ +static inline int pte_is_valid(target_ulong pte0) +{ + return pte0 & 0x80000000 ? 1 : 0; +} + +static inline void pte_invalidate(target_ulong *pte0) +{ + *pte0 &= ~0x80000000; +} + +#define PTE_PTEM_MASK 0x7FFFFFBF +#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) + +static int pp_check(int key, int pp, int nx) +{ + int access; + + /* Compute access rights */ + access = 0; + if (key == 0) { + switch (pp) { + case 0x0: + case 0x1: + case 0x2: + access |= PAGE_WRITE; + /* fall through */ + case 0x3: + access |= PAGE_READ; + break; + } + } else { + switch (pp) { + case 0x0: + access = 0; + break; + case 0x1: + case 0x3: + access = PAGE_READ; + break; + case 0x2: + access = PAGE_READ | PAGE_WRITE; + break; + } + } + if (nx == 0) { + access |= PAGE_EXEC; + } + + return access; +} + +static int check_prot(int prot, int rw, int access_type) +{ + int ret; + + if (access_type == ACCESS_CODE) { + if (prot & PAGE_EXEC) { + ret = 0; + } else { + ret = -2; + } + } else if (rw) { + if (prot & PAGE_WRITE) { + ret = 0; + } else { + ret = -2; + } + } else { + if (prot & PAGE_READ) { + ret = 0; + } else { + ret = -2; + } + } + + return ret; +} + +static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, + target_ulong pte1, int h, + int rw, int type) +{ + target_ulong ptem, mmask; + int access, ret, pteh, ptev, pp; + + ret = -1; + /* Check validity and table match */ + ptev = pte_is_valid(pte0); + pteh = (pte0 >> 6) & 1; + if (ptev && h == pteh) { + /* Check vsid & api */ + ptem = pte0 & PTE_PTEM_MASK; + mmask = PTE_CHECK_MASK; + pp = pte1 & 0x00000003; + if (ptem == ctx->ptem) { +#ifdef _MSC_VER + if (ctx->raddr != (hwaddr)(0ULL - 1ULL)) { +#else + if (ctx->raddr != (hwaddr)-1ULL) { +#endif + /* all matches should have equal RPN, WIMG & PP */ + if ((ctx->raddr & mmask) != (pte1 & mmask)) { + qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n"); + return -3; + } + } + /* Compute access rights */ + access = pp_check(ctx->key, pp, ctx->nx); + /* Keep the matching PTE informations */ + ctx->raddr = pte1; + ctx->prot = access; + ret = check_prot(ctx->prot, rw, type); + if (ret == 0) { + /* Access granted */ + qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); + } else { + /* Access right violation */ + qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); + } + } + } + + return ret; +} + +static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, + int ret, int rw) +{ + int store = 0; + + /* Update page flags */ + if (!(*pte1p & 0x00000100)) { + /* Update accessed flag */ + *pte1p |= 0x00000100; + store = 1; + } + if (!(*pte1p & 0x00000080)) { + if (rw == 1 && ret == 0) { + /* Update changed flag */ + *pte1p |= 0x00000080; + store = 1; + } else { + /* Force page fault for first write access */ + ctx->prot &= ~PAGE_WRITE; + } + } + + return store; +} + +/* Software driven TLB helpers */ +static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, + int way, int is_code) +{ + int nr; + + /* Select TLB num in a way from address */ + nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); + /* Select TLB way */ + nr += env->tlb_per_way * way; + /* 6xx have separate TLBs for instructions and data */ + if (is_code && env->id_tlbs == 1) { + nr += env->nb_tlb; + } + + return nr; +} + +static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) +{ + ppc6xx_tlb_t *tlb; + int nr, max; + + /* LOG_SWTLB("Invalidate all TLBs\n"); */ + /* Invalidate all defined software TLB */ + max = env->nb_tlb; + if (env->id_tlbs == 1) { + max *= 2; + } + for (nr = 0; nr < max; nr++) { + tlb = &env->tlb.tlb6[nr]; + pte_invalidate(&tlb->pte0); + } + tlb_flush(env_cpu(env)); +} + +static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, + target_ulong eaddr, + int is_code, int match_epn) +{ +#if !defined(FLUSH_ALL_TLBS) + CPUState *cs = env_cpu(env); + ppc6xx_tlb_t *tlb; + int way, nr; + + /* Invalidate ITLB + DTLB, all ways */ + for (way = 0; way < env->nb_ways; way++) { + nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); + tlb = &env->tlb.tlb6[nr]; + if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { + LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, + env->nb_tlb, eaddr); + pte_invalidate(&tlb->pte0); + tlb_flush_page(cs, tlb->EPN); + } + } +#else + /* XXX: PowerPC specification say this is valid as well */ + ppc6xx_tlb_invalidate_all(env); +#endif +} + +static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, + target_ulong eaddr, int is_code) +{ + ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); +} + +static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, + int is_code, target_ulong pte0, target_ulong pte1) +{ + ppc6xx_tlb_t *tlb; + int nr; + + nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); + tlb = &env->tlb.tlb6[nr]; + LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx + " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); + /* Invalidate any pending reference in QEMU for this virtual address */ + ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); + tlb->pte0 = pte0; + tlb->pte1 = pte1; + tlb->EPN = EPN; + /* Store last way for LRU mechanism */ + env->last_way = way; +} + +static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int access_type) +{ + ppc6xx_tlb_t *tlb; + int nr, best, way; + int ret; + + best = -1; + ret = -1; /* No TLB found */ + for (way = 0; way < env->nb_ways; way++) { + nr = ppc6xx_tlb_getnum(env, eaddr, way, + access_type == ACCESS_CODE ? 1 : 0); + tlb = &env->tlb.tlb6[nr]; + /* This test "emulates" the PTE index match for hardware TLBs */ + if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { + LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx + "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); + continue; + } + LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " + TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, eaddr, tlb->pte1, + rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); + switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, + 0, rw, access_type)) { + case -3: + /* TLB inconsistency */ + return -1; + case -2: + /* Access violation */ + ret = -2; + best = nr; + break; + case -1: + default: + /* No match */ + break; + case 0: + /* access granted */ + /* + * XXX: we should go on looping to check all TLBs + * consistency but we can speed-up the whole thing as + * the result would be undefined if TLBs are not + * consistent. + */ + ret = 0; + best = nr; + goto done; + } + } + if (best != -1) { + done: + LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", + ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); + /* Update page flags */ + pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw); + } + + return ret; +} + +/* Perform BAT hit & translation */ +static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, + int *validp, int *protp, target_ulong *BATu, + target_ulong *BATl) +{ + target_ulong bl; + int pp, valid, prot; + + bl = (*BATu & 0x00001FFC) << 15; + valid = 0; + prot = 0; + if (((msr_pr == 0) && (*BATu & 0x00000002)) || + ((msr_pr != 0) && (*BATu & 0x00000001))) { + valid = 1; + pp = *BATl & 0x00000003; + if (pp != 0) { + prot = PAGE_READ | PAGE_EXEC; + if (pp == 0x2) { + prot |= PAGE_WRITE; + } + } + } + *blp = bl; + *validp = valid; + *protp = prot; +} + +static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong virtual, int rw, int type) +{ + target_ulong *BATlt, *BATut, *BATu, *BATl; + target_ulong BEPIl, BEPIu, bl; + int i, valid, prot; + int ret = -1; + + LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, + type == ACCESS_CODE ? 'I' : 'D', virtual); + switch (type) { + case ACCESS_CODE: + BATlt = env->IBAT[1]; + BATut = env->IBAT[0]; + break; + default: + BATlt = env->DBAT[1]; + BATut = env->DBAT[0]; + break; + } + for (i = 0; i < env->nb_BATs; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n", __func__, + type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); + if ((virtual & 0xF0000000) == BEPIu && + ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { + /* BAT matches */ + if (valid != 0) { + /* Get physical address */ + ctx->raddr = (*BATl & 0xF0000000) | + ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | + (virtual & 0x0001F000); + /* Compute access rights */ + ctx->prot = prot; + ret = check_prot(ctx->prot, rw, type); + if (ret == 0) { + LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", + i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', + ctx->prot & PAGE_WRITE ? 'W' : '-'); + } + break; + } + } + } + if (ret < 0) { +#if defined(DEBUG_BATS) + if (qemu_log_enabled()) { + LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); + for (i = 0; i < 4; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bl = (*BATu & 0x00001FFC) << 15; + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " + TARGET_FMT_lx " " TARGET_FMT_lx "\n", + __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, + *BATu, *BATl, BEPIu, BEPIl, bl); + } + } +#endif + } + /* No hit */ + return ret; +} + +/* Perform segment based translation */ +static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int type) +{ +#if 0 + PowerPCCPU *cpu = env_archcpu(env); +#endif + hwaddr hash; + target_ulong vsid; + int ds, pr, target_page_bits; + int ret; + target_ulong sr, pgidx; + + pr = msr_pr; + ctx->eaddr = eaddr; + + sr = env->sr[eaddr >> 28]; + ctx->key = (((sr & 0x20000000) && (pr != 0)) || + ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; + ds = sr & 0x80000000 ? 1 : 0; + ctx->nx = sr & 0x10000000 ? 1 : 0; + vsid = sr & 0x00FFFFFF; + target_page_bits = TARGET_PAGE_BITS; +#if 0 + qemu_log_mask(CPU_LOG_MMU, + "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx + " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx + " ir=%d dr=%d pr=%d %d t=%d\n", + eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, + (int)msr_dr, pr != 0 ? 1 : 0, rw, type); +#endif + pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; + hash = vsid ^ pgidx; + ctx->ptem = (vsid << 7) | (pgidx >> 10); + +#if 0 + qemu_log_mask(CPU_LOG_MMU, + "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", + ctx->key, ds, ctx->nx, vsid); +#endif + ret = -1; + if (!ds) { + /* Check if instruction fetch is allowed, if needed */ + if (type != ACCESS_CODE || ctx->nx == 0) { + /* Page address translation */ +#if 0 + qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx + " htab_mask " TARGET_FMT_plx + " hash " TARGET_FMT_plx "\n", + ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); +#endif + ctx->hash[0] = hash; + ctx->hash[1] = ~hash; + + /* Initialize real address with an invalid value */ +#ifdef _MSC_VER + ctx->raddr = (hwaddr)(0ULL - 1ULL); +#else + ctx->raddr = (hwaddr)-1ULL; +#endif + /* Software TLB search */ + ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); +#if defined(DUMP_PAGE_TABLES) + if (qemu_loglevel_mask(CPU_LOG_MMU)) { + CPUState *cs = env_cpu(env); + hwaddr curaddr; + uint32_t a0, a1, a2, a3; + +#if 0 + qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx + "\n", ppc_hash32_hpt_base(cpu), + ppc_hash32_hpt_mask(env) + 0x80); +#endif + for (curaddr = ppc_hash32_hpt_base(cpu); + curaddr < (ppc_hash32_hpt_base(cpu) + + ppc_hash32_hpt_mask(cpu) + 0x80); + curaddr += 16) { + a0 = ldl_phys(cs->as, curaddr); + a1 = ldl_phys(cs->as, curaddr + 4); + a2 = ldl_phys(cs->as, curaddr + 8); + a3 = ldl_phys(cs->as, curaddr + 12); + if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { + qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", + curaddr, a0, a1, a2, a3); + } + } + } +#endif + } else { +#if 0 + qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); +#endif + ret = -3; + } + } else { + target_ulong sr; + +#if 0 + qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); +#endif + /* Direct-store segment : absolutely *BUGGY* for now */ + + /* + * Direct-store implies a 32-bit MMU. + * Check the Segment Register's bus unit ID (BUID). + */ + sr = env->sr[eaddr >> 28]; + if ((sr & 0x1FF00000) >> 20 == 0x07f) { + /* + * Memory-forced I/O controller interface access + * + * If T=1 and BUID=x'07F', the 601 performs a memory + * access to SR[28-31] LA[4-31], bypassing all protection + * mechanisms. + */ + ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); + ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return 0; + } + + switch (type) { + case ACCESS_INT: + /* Integer load/store : only access allowed */ + break; + case ACCESS_CODE: + /* No code fetch is allowed in direct-store areas */ + return -4; + case ACCESS_FLOAT: + /* Floating point load/store */ + return -4; + case ACCESS_RES: + /* lwarx, ldarx or srwcx. */ + return -4; + case ACCESS_CACHE: + /* + * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi + * + * Should make the instruction do no-op. As it already do + * no-op, it's quite easy :-) + */ + ctx->raddr = eaddr; + return 0; + case ACCESS_EXT: + /* eciwx or ecowx */ + return -4; + default: +#if 0 + qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " + "address translation\n"); +#endif + return -4; + } + if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { + ctx->raddr = eaddr; + ret = 2; + } else { + ret = -2; + } + } + + return ret; +} + +/* Generic TLB check function for embedded PowerPC implementations */ +static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddrp, + target_ulong address, uint32_t pid, int ext, + int i) +{ + target_ulong mask; + + /* Check valid flag */ + if (!(tlb->prot & PAGE_VALID)) { + return -1; + } + mask = ~(tlb->size - 1); + LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx + " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, + mask, (uint32_t)tlb->PID, tlb->prot); + /* Check PID */ + if (tlb->PID != 0 && tlb->PID != pid) { + return -1; + } + /* Check effective address */ + if ((address & mask) != tlb->EPN) { + return -1; + } + *raddrp = (tlb->RPN & mask) | (address & ~mask); + if (ext) { + /* Extend the physical address to 36 bits */ + *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32; + } + + return 0; +} + +/* Generic TLB search function for PowerPC embedded implementations */ +static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, + uint32_t pid) +{ + ppcemb_tlb_t *tlb; + hwaddr raddr; + int i, ret; + + /* Default return value is no match */ + ret = -1; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { + ret = i; + break; + } + } + + return ret; +} + +/* Helpers specific to PowerPC 40x implementations */ +static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) +{ + ppcemb_tlb_t *tlb; + int i; + + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + tlb->prot &= ~PAGE_VALID; + } + tlb_flush(env_cpu(env)); +} + +static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcemb_tlb_t *tlb; + hwaddr raddr; + int i, ret, zsel, zpr, pr; + + ret = -1; +#ifdef _MSC_VER + raddr = (hwaddr)(0ULL - 1ULL); +#else + raddr = (hwaddr)-1ULL; +#endif + pr = msr_pr; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, + env->spr[SPR_40x_PID], 0, i) < 0) { + continue; + } + zsel = (tlb->attr >> 4) & 0xF; + zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; + LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n", + __func__, i, zsel, zpr, rw, tlb->attr); + /* Check execute enable bit */ + switch (zpr) { + case 0x2: + if (pr != 0) { + goto check_perms; + } + /* fall through */ + case 0x3: + /* All accesses granted */ + ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = 0; + break; + case 0x0: + if (pr != 0) { + /* Raise Zone protection fault. */ + env->spr[SPR_40x_ESR] = 1 << 22; + ctx->prot = 0; + ret = -2; + break; + } + /* fall through */ + case 0x1: + check_perms: + /* Check from TLB entry */ + ctx->prot = tlb->prot; + ret = check_prot(ctx->prot, rw, access_type); + if (ret == -2) { + env->spr[SPR_40x_ESR] = 0; + } + break; + } + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + return 0; + } + } + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + + return ret; +} + +void store_40x_sler(CPUPPCState *env, uint32_t val) +{ + /* XXX: TO BE FIXED */ + if (val != 0x00000000) { + cpu_abort(env_cpu(env), + "Little-endian regions are not supported by now\n"); + } + env->spr[SPR_405_SLER] = val; +} + +static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddr, int *prot, + target_ulong address, int rw, + int access_type, int i) +{ + int ret, prot2; + + if (ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID], + !env->nb_pids, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { + goto found_tlb; + } + + LOG_SWTLB("%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (msr_pr != 0) { + prot2 = tlb->prot & 0xF; + } else { + prot2 = (tlb->prot >> 4) & 0xF; + } + + /* Check the address space */ + if (access_type == ACCESS_CODE) { + if (msr_ir != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & PAGE_EXEC) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); + ret = -3; + } else { + if (msr_dr != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { + LOG_SWTLB("%s: found TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); + ret = -2; + } + + return ret; +} + +static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcemb_tlb_t *tlb; + hwaddr raddr; + int i, ret; + + ret = -1; +#ifdef _MSC_VER + raddr = (hwaddr)(0ULL - 1ULL); +#else + raddr = (hwaddr)-1ULL; +#endif + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, + access_type, i); + if (ret != -1) { + break; + } + } + + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + } else { + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + } + + return ret; +} + +static void booke206_flush_tlb(CPUPPCState *env, int flags, + const int check_iprot) +{ + int tlb_size; + int i, j; + ppcmas_tlb_t *tlb = env->tlb.tlbm; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + if (flags & (1 << i)) { + tlb_size = booke206_tlb_size(env, i); + for (j = 0; j < tlb_size; j++) { + if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { + tlb[j].mas1 &= ~MAS1_VALID; + } + } + } + tlb += booke206_tlb_size(env, i); + } + + tlb_flush(env_cpu(env)); +} + +static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, + ppcmas_tlb_t *tlb) +{ + int tlbm_size; + + tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + + return 1024ULL << tlbm_size; +} + +/* TLB check function for MAS based SoftTLBs */ +static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, + hwaddr *raddrp, target_ulong address, + uint32_t pid) +{ + hwaddr mask; + uint32_t tlb_pid; + + if (!msr_cm) { + /* In 32bit mode we can only address 32bit EAs */ + address = (uint32_t)address; + } + + /* Check valid flag */ + if (!(tlb->mas1 & MAS1_VALID)) { + return -1; + } + + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" + PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" + PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, + tlb->mas7_3, tlb->mas8); + + /* Check PID */ + tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; + if (tlb_pid != 0 && tlb_pid != pid) { + return -1; + } + + /* Check effective address */ + if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { + return -1; + } + + if (raddrp) { + *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); + } + + return 0; +} + +static bool is_epid_mmu(int mmu_idx) +{ + return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; +} + +static uint32_t mmubooke206_esr(int mmu_idx, bool rw) +{ + uint32_t esr = 0; + if (rw) { + esr |= ESR_ST; + } + if (is_epid_mmu(mmu_idx)) { + esr |= ESR_EPID; + } + return esr; +} + +/* + * Get EPID register given the mmu_idx. If this is regular load, + * construct the EPID access bits from current processor state + * + * Get the effective AS and PR bits and the PID. The PID is returned + * only if EPID load is requested, otherwise the caller must detect + * the correct EPID. Return true if valid EPID is returned. + */ +static bool mmubooke206_get_as(CPUPPCState *env, + int mmu_idx, uint32_t *epid_out, + bool *as_out, bool *pr_out) +{ + if (is_epid_mmu(mmu_idx)) { + uint32_t epidr; + if (mmu_idx == PPC_TLB_EPID_STORE) { + epidr = env->spr[SPR_BOOKE_EPSC]; + } else { + epidr = env->spr[SPR_BOOKE_EPLC]; + } + *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; + *as_out = !!(epidr & EPID_EAS); + *pr_out = !!(epidr & EPID_EPR); + return true; + } else { + *as_out = msr_ds; + *pr_out = msr_pr; + return false; + } +} + +/* Check if the tlb found by hashing really matches */ +static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, + hwaddr *raddr, int *prot, + target_ulong address, int rw, + int access_type, int mmu_idx) +{ + int ret; + int prot2 = 0; + uint32_t epid; + bool as, pr; + bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); + + if (!use_epid) { + if (ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2]) >= 0) { + goto found_tlb; + } + } else { + if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { + goto found_tlb; + } + } + + LOG_SWTLB("%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (pr) { + if (tlb->mas7_3 & MAS3_UR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_UW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_UX) { + prot2 |= PAGE_EXEC; + } + } else { + if (tlb->mas7_3 & MAS3_SR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_SW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_SX) { + prot2 |= PAGE_EXEC; + } + } + + /* Check the address space and permissions */ + if (access_type == ACCESS_CODE) { + /* There is no way to fetch code using epid load */ + assert(!use_epid); + if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & PAGE_EXEC) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); + ret = -3; + } else { + if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { + LOG_SWTLB("%s: found TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); + ret = -2; + } + + return ret; +} + +static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type, int mmu_idx) +{ + ppcmas_tlb_t *tlb; + hwaddr raddr; + int i, j, ret; + + ret = -1; +#ifdef _MSC_VER + raddr = (hwaddr)(0ULL - 1ULL); +#else + raddr = (hwaddr)-1ULL; +#endif + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + if (!tlb) { + continue; + } + ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, + rw, access_type, mmu_idx); + if (ret != -1) { + goto found_tlb; + } + } + } + +found_tlb: + + if (ret >= 0) { + ctx->raddr = raddr; +#if 0 + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); +#endif + } else { +#if 0 + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); +#endif + } + + return ret; +} + +#if 0 +static const char *book3e_tsize_to_str[32] = { + "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", + "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", + "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", + "1T", "2T" +}; + +static void mmubooke_dump_mmu(CPUPPCState *env) +{ + ppcemb_tlb_t *entry; + int i; + + if (kvm_enabled() && !env->kvm_sw_tlb) { + qemu_printf("Cannot access KVM TLB\n"); + return; + } + + qemu_printf("\nTLB:\n"); + qemu_printf("Effective Physical Size PID Prot " + "Attr\n"); + + entry = &env->tlb.tlbe[0]; + for (i = 0; i < env->nb_tlb; i++, entry++) { + hwaddr ea, pa; + target_ulong mask; + uint64_t size = (uint64_t)entry->size; + char size_buf[20]; + + /* Check valid flag */ + if (!(entry->prot & PAGE_VALID)) { + continue; + } + + mask = ~(entry->size - 1); + ea = entry->EPN & mask; + pa = entry->RPN & mask; + /* Extend the physical address to 36 bits */ + pa |= (hwaddr)(entry->RPN & 0xF) << 32; + if (size >= 1 * MiB) { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB); + } else { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB); + } + qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", + (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, + entry->prot, entry->attr); + } +} + +static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset, + int tlbsize) +{ + ppcmas_tlb_t *entry; + int i; + + qemu_printf("\nTLB%d:\n", tlbn); + qemu_printf("Effective Physical Size TID TS SRWX" + " URWX WIMGE U0123\n"); + + entry = &env->tlb.tlbm[offset]; + for (i = 0; i < tlbsize; i++, entry++) { + hwaddr ea, pa, size; + int tsize; + + if (!(entry->mas1 & MAS1_VALID)) { + continue; + } + + tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + size = 1024ULL << tsize; + ea = entry->mas2 & ~(size - 1); + pa = entry->mas7_3 & ~(size - 1); + + qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" + "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", + (uint64_t)ea, (uint64_t)pa, + book3e_tsize_to_str[tsize], + (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, + (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, + entry->mas7_3 & MAS3_SR ? 'R' : '-', + entry->mas7_3 & MAS3_SW ? 'W' : '-', + entry->mas7_3 & MAS3_SX ? 'X' : '-', + entry->mas7_3 & MAS3_UR ? 'R' : '-', + entry->mas7_3 & MAS3_UW ? 'W' : '-', + entry->mas7_3 & MAS3_UX ? 'X' : '-', + entry->mas2 & MAS2_W ? 'W' : '-', + entry->mas2 & MAS2_I ? 'I' : '-', + entry->mas2 & MAS2_M ? 'M' : '-', + entry->mas2 & MAS2_G ? 'G' : '-', + entry->mas2 & MAS2_E ? 'E' : '-', + entry->mas7_3 & MAS3_U0 ? '0' : '-', + entry->mas7_3 & MAS3_U1 ? '1' : '-', + entry->mas7_3 & MAS3_U2 ? '2' : '-', + entry->mas7_3 & MAS3_U3 ? '3' : '-'); + } +} + +static void mmubooke206_dump_mmu(CPUPPCState *env) +{ + int offset = 0; + int i; + +#if 0 + if (kvm_enabled() && !env->kvm_sw_tlb) { + qemu_printf("Cannot access KVM TLB\n"); + return; + } +#endif + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int size = booke206_tlb_size(env, i); + + if (size == 0) { + continue; + } + + mmubooke206_dump_one_tlb(env, i, offset, size); + offset += size; + } +} + +static void mmu6xx_dump_BATs(CPUPPCState *env, int type) +{ + target_ulong *BATlt, *BATut, *BATu, *BATl; + target_ulong BEPIl, BEPIu, bl; + int i; + + switch (type) { + case ACCESS_CODE: + BATlt = env->IBAT[1]; + BATut = env->IBAT[0]; + break; + default: + BATlt = env->DBAT[1]; + BATut = env->DBAT[0]; + break; + } + + for (i = 0; i < env->nb_BATs; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bl = (*BATu & 0x00001FFC) << 15; + qemu_printf("%s BAT%d BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " + TARGET_FMT_lx " " TARGET_FMT_lx "\n", + type == ACCESS_CODE ? "code" : "data", i, + *BATu, *BATl, BEPIu, BEPIl, bl); + } +} + +static void mmu6xx_dump_mmu(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc6xx_tlb_t *tlb; + target_ulong sr; + int type, way, entry, i; + + qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu)); + qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu)); + + qemu_printf("\nSegment registers:\n"); + for (i = 0; i < 32; i++) { + sr = env->sr[i]; + if (sr & 0x80000000) { + qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x " + "CNTLR_SPEC=0x%05x\n", i, + sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, + sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF), + (uint32_t)(sr & 0xFFFFF)); + } else { + qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i, + sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0, + sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0, + (uint32_t)(sr & 0x00FFFFFF)); + } + } + + qemu_printf("\nBATs:\n"); + mmu6xx_dump_BATs(env, ACCESS_INT); + mmu6xx_dump_BATs(env, ACCESS_CODE); + + if (env->id_tlbs != 1) { + qemu_printf("ERROR: 6xx MMU should have separated TLB" + " for code and data\n"); + } + + qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); + + for (type = 0; type < 2; type++) { + for (way = 0; way < env->nb_ways; way++) { + for (entry = env->nb_tlb * type + env->tlb_per_way * way; + entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1)); + entry++) { + + tlb = &env->tlb.tlb6[entry]; + qemu_printf("%s TLB %02d/%02d way:%d %s [" + TARGET_FMT_lx " " TARGET_FMT_lx "]\n", + type ? "code" : "data", entry % env->nb_tlb, + env->nb_tlb, way, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE); + } + } + } +} +#endif + +void dump_mmu(CPUPPCState *env) +{ +#if 0 + switch (env->mmu_model) { + case POWERPC_MMU_BOOKE: + mmubooke_dump_mmu(env); + break; + case POWERPC_MMU_BOOKE206: + mmubooke206_dump_mmu(env); + break; + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + mmu6xx_dump_mmu(env); + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_64B: + case POWERPC_MMU_2_03: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_07: + dump_slb(env_archcpu(env)); + break; + case POWERPC_MMU_3_00: + if (ppc64_v3_radix(env_archcpu(env))) { + /* TODO - Unsupported */ + } else { + dump_slb(env_archcpu(env)); + break; + } +#endif + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); + } +#endif +} + +static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw) +{ + int in_plb, ret; + + ctx->raddr = eaddr; + ctx->prot = PAGE_READ | PAGE_EXEC; + ret = 0; + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_REAL: + case POWERPC_MMU_BOOKE: + ctx->prot |= PAGE_WRITE; + break; + + case POWERPC_MMU_SOFT_4xx_Z: + if (unlikely(msr_pe != 0)) { + /* + * 403 family add some particular protections, using + * PBL/PBU registers for accesses with no translation. + */ + in_plb = + /* Check PLB validity */ + (env->pb[0] < env->pb[1] && + /* and address in plb area */ + eaddr >= env->pb[0] && eaddr < env->pb[1]) || + (env->pb[2] < env->pb[3] && + eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; + if (in_plb ^ msr_px) { + /* Access in protected area */ + if (rw == 1) { + /* Access is not allowed */ + ret = -2; + } + } else { + /* Read-write access is allowed */ + ctx->prot |= PAGE_WRITE; + } + } + break; + + default: + /* Caller's checks mean we should never get here for other models */ + abort(); + return -1; + } + + return ret; +} + +static int get_physical_address_wtlb( + CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int access_type, + int mmu_idx) +{ + int ret = -1; + bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0) + || (access_type != ACCESS_CODE && msr_dr == 0); + + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + if (real_mode) { + ret = check_physical(env, ctx, eaddr, rw); + } else { + /* Try to find a BAT */ + if (env->nb_BATs != 0) { + ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type); + } + if (ret < 0) { + /* We didn't match any BAT entry or don't have BATs */ + ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type); + } + } + break; + + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + if (real_mode) { + ret = check_physical(env, ctx, eaddr, rw); + } else { + ret = mmu40x_get_physical_address(env, ctx, eaddr, + rw, access_type); + } + break; + case POWERPC_MMU_BOOKE: + ret = mmubooke_get_physical_address(env, ctx, eaddr, + rw, access_type); + break; + case POWERPC_MMU_BOOKE206: + ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, + access_type, mmu_idx); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_REAL: + if (real_mode) { + ret = check_physical(env, ctx, eaddr, rw); + } else { + cpu_abort(env_cpu(env), + "PowerPC in real mode do not do any translation\n"); + return -1; + } + break; + default: + cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); + return -1; + } + + return ret; +} + +static int get_physical_address( + CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int access_type) +{ + return get_physical_address_wtlb(env, ctx, eaddr, rw, access_type, 0); +} + +hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + mmu_ctx_t ctx; + + switch (env->mmu_model) { +#if defined(TARGET_PPC64) + case POWERPC_MMU_64B: + case POWERPC_MMU_2_03: + case POWERPC_MMU_2_06: + case POWERPC_MMU_2_07: + return ppc_hash64_get_phys_page_debug(cpu, addr); + case POWERPC_MMU_3_00: + return ppc64_v3_get_phys_page_debug(cpu, addr); +#endif + + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + return ppc_hash32_get_phys_page_debug(cpu, addr); + + default: + ; + } + + if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { + + /* + * Some MMUs have separate TLBs for code and data. If we only + * try an ACCESS_INT, we may not be able to read instructions + * mapped by code TLBs, so we also try a ACCESS_CODE. + */ + if (unlikely(get_physical_address(env, &ctx, addr, 0, + ACCESS_CODE) != 0)) { + return -1; + } + } + + return ctx.raddr & TARGET_PAGE_MASK; +} + +static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, + int rw, int mmu_idx) +{ + uint32_t epid; + bool as, pr; + uint32_t missed_tid = 0; + bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); + if (rw == 2) { + as = msr_ir; + } + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS6] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + /* AS */ + if (as) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; + } + + env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; + env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; + + if (!use_epid) { + switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { + case MAS4_TIDSELD_PID0: + missed_tid = env->spr[SPR_BOOKE_PID]; + break; + case MAS4_TIDSELD_PID1: + missed_tid = env->spr[SPR_BOOKE_PID1]; + break; + case MAS4_TIDSELD_PID2: + missed_tid = env->spr[SPR_BOOKE_PID2]; + break; + } + env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; + } else { + missed_tid = epid; + env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; + } + env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); + + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +/* Perform address translation */ +static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, + int rw, int mmu_idx) +{ + CPUState *cs = env_cpu(env); + PowerPCCPU *cpu = POWERPC_CPU(cs); + mmu_ctx_t ctx; + int access_type; + int ret = 0; + + if (rw == 2) { + /* code access */ + rw = 0; + access_type = ACCESS_CODE; + } else { + /* data access */ + access_type = env->access_type; + } + ret = get_physical_address_wtlb(env, &ctx, address, rw, + access_type, mmu_idx); + if (ret == 0) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + ctx.raddr & TARGET_PAGE_MASK, ctx.prot, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; + } else if (ret < 0) { + LOG_MMU_STATE(cs); + if (access_type == ACCESS_CODE) { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + cs->exception_index = POWERPC_EXCP_IFTLB; + env->error_code = 1 << 18; + env->spr[SPR_IMISS] = address; + env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; + goto tlb_miss; + case POWERPC_MMU_SOFT_74xx: + cs->exception_index = POWERPC_EXCP_IFTLB; + goto tlb_miss_74xx; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + cs->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = address; + env->spr[SPR_40x_ESR] = 0x00000000; + break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, 2, mmu_idx); + /* fall through */ + case POWERPC_MMU_BOOKE: + cs->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = address; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, 0); + return -1; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_REAL: + cpu_abort(cs, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + return -1; + default: + cpu_abort(cs, "Unknown or invalid MMU model\n"); + return -1; + } + break; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x08000000; + break; + case -3: + /* No execute protection violation */ + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_ESR] = 0x00000000; + } + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; + case -4: + /* Direct store exception */ + /* No code fetch is allowed in direct-store areas */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; + } + } else { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + if (rw == 1) { + cs->exception_index = POWERPC_EXCP_DSTLB; + env->error_code = 1 << 16; + } else { + cs->exception_index = POWERPC_EXCP_DLTLB; + env->error_code = 0; + } + env->spr[SPR_DMISS] = address; + env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; + tlb_miss: + env->error_code |= ctx.key << 19; + env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + + get_pteg_offset32(cpu, ctx.hash[0]); + env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + + get_pteg_offset32(cpu, ctx.hash[1]); + break; + case POWERPC_MMU_SOFT_74xx: + if (rw == 1) { + cs->exception_index = POWERPC_EXCP_DSTLB; + } else { + cs->exception_index = POWERPC_EXCP_DLTLB; + } + tlb_miss_74xx: + /* Implement LRU algorithm */ + env->error_code = ctx.key << 19; + env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | + ((env->last_way + 1) & (env->nb_ways - 1)); + env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + cs->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = address; + if (rw) { + env->spr[SPR_40x_ESR] = 0x00800000; + } else { + env->spr[SPR_40x_ESR] = 0x00000000; + } + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, rw, mmu_idx); + /* fall through */ + case POWERPC_MMU_BOOKE: + cs->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = address; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); + return -1; + case POWERPC_MMU_REAL: + cpu_abort(cs, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + return -1; + default: + cpu_abort(cs, "Unknown or invalid MMU model\n"); + return -1; + } + break; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + if (env->mmu_model == POWERPC_MMU_SOFT_4xx + || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { + env->spr[SPR_40x_DEAR] = address; + if (rw) { + env->spr[SPR_40x_ESR] |= 0x00800000; + } + } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_DEAR] = address; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw); + } else { + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x0A000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + } + break; + case -4: + /* Direct store exception */ + switch (access_type) { + case ACCESS_FLOAT: + /* Floating point load/store */ + cs->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = POWERPC_EXCP_ALIGN_FP; + env->spr[SPR_DAR] = address; + break; + case ACCESS_RES: + /* lwarx, ldarx or stwcx. */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x06000000; + } else { + env->spr[SPR_DSISR] = 0x04000000; + } + break; + case ACCESS_EXT: + /* eciwx or ecowx */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x06100000; + } else { + env->spr[SPR_DSISR] = 0x04100000; + } + break; + default: + printf("DSI: invalid exception (%d)\n", ret); + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; + env->spr[SPR_DAR] = address; + break; + } + break; + } + } + ret = 1; + } + + return ret; +} + +/*****************************************************************************/ +/* BATs management */ +#if !defined(FLUSH_ALL_TLBS) +static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, + target_ulong mask) +{ + CPUState *cs = env_cpu(env); + target_ulong base, end, page; + + base = BATu & ~0x0001FFFF; + end = base + mask + 0x00020000; + if (((end - base) >> TARGET_PAGE_BITS) > 1024) { + /* Flushing 1024 4K pages is slower than a complete flush */ + LOG_BATS("Flush all BATs\n"); + tlb_flush(CPU(cs)); + LOG_BATS("Flush done\n"); + return; + } + LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" + TARGET_FMT_lx ")\n", base, end, mask); + for (page = base; page != end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(cs, page); + } + LOG_BATS("Flush done\n"); +} +#endif + +static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, + target_ulong value) +{ + LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, + nr, ul == 0 ? 'u' : 'l', value, env->nip); +} + +void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + PowerPCCPU *cpu = env_archcpu(env); +#endif + + dump_store_bat(env, 'I', 0, nr, value); + if (env->IBAT[0][nr] != value) { + mask = (value << 15) & 0x0FFE0000UL; +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#endif + /* + * When storing valid upper BAT, mask BEPI and BRPN and + * invalidate all TLBs covered by this BAT + */ + mask = (value << 15) & 0x0FFE0000UL; + env->IBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | + (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + tlb_flush(env_cpu(env)); +#endif + } +} + +void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + dump_store_bat(env, 'I', 1, nr, value); + env->IBAT[1][nr] = value; +} + +void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + PowerPCCPU *cpu = env_archcpu(env); +#endif + + dump_store_bat(env, 'D', 0, nr, value); + if (env->DBAT[0][nr] != value) { + /* + * When storing valid upper BAT, mask BEPI and BRPN and + * invalidate all TLBs covered by this BAT + */ + mask = (value << 15) & 0x0FFE0000UL; +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->DBAT[0][nr], mask); +#endif + mask = (value << 15) & 0x0FFE0000UL; + env->DBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | + (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->DBAT[0][nr], mask); +#else + tlb_flush(env_cpu(env)); +#endif + } +} + +void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + dump_store_bat(env, 'D', 1, nr, value); + env->DBAT[1][nr] = value; +} + +void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) +{ + target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + PowerPCCPU *cpu = env_archcpu(env); + int do_inval; +#endif + + dump_store_bat(env, 'I', 0, nr, value); + if (env->IBAT[0][nr] != value) { +#if defined(FLUSH_ALL_TLBS) + do_inval = 0; +#endif + mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; + if (env->IBAT[1][nr] & 0x40) { + /* Invalidate BAT only if it is valid */ +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + /* + * When storing valid upper BAT, mask BEPI and BRPN and + * invalidate all TLBs covered by this BAT + */ + env->IBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->DBAT[0][nr] = env->IBAT[0][nr]; + if (env->IBAT[1][nr] & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } +#if defined(FLUSH_ALL_TLBS) + if (do_inval) { + tlb_flush(env_cpu(env)); + } +#endif + } +} + +void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) +{ +#if !defined(FLUSH_ALL_TLBS) + target_ulong mask; +#else + PowerPCCPU *cpu = env_archcpu(env); + int do_inval; +#endif + + dump_store_bat(env, 'I', 1, nr, value); + if (env->IBAT[1][nr] != value) { +#if defined(FLUSH_ALL_TLBS) + do_inval = 0; +#endif + if (env->IBAT[1][nr] & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + if (value & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + mask = (value << 17) & 0x0FFE0000UL; + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + env->IBAT[1][nr] = value; + env->DBAT[1][nr] = value; +#if defined(FLUSH_ALL_TLBS) + if (do_inval) { + tlb_flush(env_cpu(env)); + } +#endif + } +} + +/*****************************************************************************/ +/* TLB management */ +void ppc_tlb_invalidate_all(CPUPPCState *env) +{ +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + env->tlb_need_flush = 0; + tlb_flush(env_cpu(env)); + } else +#endif /* defined(TARGET_PPC64) */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + ppc6xx_tlb_invalidate_all(env); + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + ppc4xx_tlb_invalidate_all(env); + break; + case POWERPC_MMU_REAL: + cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE: + tlb_flush(env_cpu(env)); + break; + case POWERPC_MMU_BOOKE206: + booke206_flush_tlb(env, -1, 0); + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + env->tlb_need_flush = 0; + tlb_flush(env_cpu(env)); + break; + default: + /* XXX: TODO */ + cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); + break; + } +} + +void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) +{ +#if !defined(FLUSH_ALL_TLBS) + addr &= TARGET_PAGE_MASK; +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + /* tlbie invalidate TLBs for all segments */ + /* + * XXX: given the fact that there are too many segments to invalidate, + * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, + * we just invalidate all TLBs + */ + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + } else +#endif /* defined(TARGET_PPC64) */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + ppc6xx_tlb_invalidate_virt(env, addr, 0); + if (env->id_tlbs == 1) { + ppc6xx_tlb_invalidate_virt(env, addr, 1); + } + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + /* + * Actual CPUs invalidate entire congruence classes based on + * the geometry of their TLBs and some OSes take that into + * account, we just mark the TLB to be flushed later (context + * synchronizing event or sync instruction on 32-bit). + */ + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; + break; + default: + /* Should never reach here with other MMU models */ + assert(0); + } +#else + ppc_tlb_invalidate_all(env); +#endif +} + +/*****************************************************************************/ +/* Special registers manipulation */ +void ppc_store_sdr1(CPUPPCState *env, target_ulong value) +{ +#if 0 + PowerPCCPU *cpu = env_archcpu(env); + qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); + assert(!cpu->vhyp); +#endif +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE; + target_ulong htabsize = value & SDR_64_HTABSIZE; + + if (value & ~sdr_mask) { +#if 0 + error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1", + value & ~sdr_mask); +#endif + value &= sdr_mask; + } + if (htabsize > 28) { +#if 0 + error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1", + htabsize); +#endif + return; + } + } +#endif /* defined(TARGET_PPC64) */ + /* FIXME: Should check for valid HTABMASK values in 32-bit case */ + env->spr[SPR_SDR1] = value; +} + +#if defined(TARGET_PPC64) +void ppc_store_ptcr(CPUPPCState *env, target_ulong value) +{ +#if 0 + PowerPCCPU *cpu = env_archcpu(env); +#endif + target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS; + target_ulong patbsize = value & PTCR_PATS; + +#if 0 + qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); + + assert(!cpu->vhyp); +#endif + assert(env->mmu_model & POWERPC_MMU_3_00); + + if (value & ~ptcr_mask) { +#if 0 + error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR", + value & ~ptcr_mask); +#endif + value &= ptcr_mask; + } + + if (patbsize > 24) { +#if 0 + error_report("Invalid Partition Table size 0x" TARGET_FMT_lx + " stored in PTCR", patbsize); +#endif + return; + } + + env->spr[SPR_PTCR] = value; +} + +#endif /* defined(TARGET_PPC64) */ + +/* Segment registers load and store */ +target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) +{ +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + /* XXX */ + return 0; + } +#endif + return env->sr[sr_num]; +} + +void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) +{ +#if 0 + qemu_log_mask(CPU_LOG_MMU, + "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, + (int)srnum, value, env->sr[srnum]); +#endif +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + PowerPCCPU *cpu = env_archcpu(env); + uint64_t esid, vsid; + + /* ESID = srnum */ + esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; + + /* VSID = VSID */ + vsid = (value & 0xfffffff) << 12; + /* flags = flags */ + vsid |= ((value >> 27) & 0xf) << 8; + + ppc_store_slb(cpu, srnum, esid, vsid); + } else +#endif + if (env->sr[srnum] != value) { + env->sr[srnum] = value; + /* + * Invalidating 256MB of virtual memory in 4kB pages is way + * longer than flusing the whole TLB. + */ +#if !defined(FLUSH_ALL_TLBS) && 0 + { + target_ulong page, end; + /* Invalidate 256 MB of virtual memory */ + page = (16 << 20) * srnum; + end = page + (16 << 20); + for (; page != end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env_cpu(env), page); + } + } +#else + env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; +#endif + } +} + +/* TLB management */ +void helper_tlbia(CPUPPCState *env) +{ + ppc_tlb_invalidate_all(env); +} + +void helper_tlbie(CPUPPCState *env, target_ulong addr) +{ + ppc_tlb_invalidate_one(env, addr); +} + +void helper_tlbiva(CPUPPCState *env, target_ulong addr) +{ + /* tlbiva instruction only exists on BookE */ + assert(env->mmu_model == POWERPC_MMU_BOOKE); + /* XXX: TODO */ + cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); +} + +/* Software driven TLBs management */ +/* PowerPC 602/603 software TLB load instructions helpers */ +static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) +{ + target_ulong RPN, CMP, EPN; + int way; + + RPN = env->spr[SPR_RPA]; + if (is_code) { + CMP = env->spr[SPR_ICMP]; + EPN = env->spr[SPR_IMISS]; + } else { + CMP = env->spr[SPR_DCMP]; + EPN = env->spr[SPR_DMISS]; + } + way = (env->spr[SPR_SRR1] >> 17) & 1; + (void)EPN; /* avoid a compiler warning */ +#if 0 + LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx + " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, + RPN, way); +#endif + /* Store this TLB */ + ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), + way, is_code, CMP, RPN); +} + +void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) +{ + do_6xx_tlb(env, EPN, 0); +} + +void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) +{ + do_6xx_tlb(env, EPN, 1); +} + +/* PowerPC 74xx software TLB load instructions helpers */ +static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) +{ + target_ulong RPN, CMP, EPN; + int way; + + RPN = env->spr[SPR_PTELO]; + CMP = env->spr[SPR_PTEHI]; + EPN = env->spr[SPR_TLBMISS] & ~0x3; + way = env->spr[SPR_TLBMISS] & 0x3; + (void)EPN; /* avoid a compiler warning */ + LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx + " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP, + RPN, way); + /* Store this TLB */ + ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), + way, is_code, CMP, RPN); +} + +void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN) +{ + do_74xx_tlb(env, EPN, 0); +} + +void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN) +{ + do_74xx_tlb(env, EPN, 1); +} + +/*****************************************************************************/ +/* PowerPC 601 specific instructions (POWER bridge) */ + +target_ulong helper_rac(CPUPPCState *env, target_ulong addr) +{ + mmu_ctx_t ctx; + int nb_BATs; + target_ulong ret = 0; + + /* + * We don't have to generate many instances of this instruction, + * as rac is supervisor only. + * + * XXX: FIX THIS: Pretend we have no BAT + */ + nb_BATs = env->nb_BATs; + env->nb_BATs = 0; + if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { + ret = ctx.raddr; + } + env->nb_BATs = nb_BATs; + return ret; +} + +static inline target_ulong booke_tlb_to_page_size(int size) +{ + return 1024 << (2 * size); +} + +static inline int booke_page_size_to_tlb(target_ulong page_size) +{ + int size; + + switch (page_size) { + case 0x00000400UL: + size = 0x0; + break; + case 0x00001000UL: + size = 0x1; + break; + case 0x00004000UL: + size = 0x2; + break; + case 0x00010000UL: + size = 0x3; + break; + case 0x00040000UL: + size = 0x4; + break; + case 0x00100000UL: + size = 0x5; + break; + case 0x00400000UL: + size = 0x6; + break; + case 0x01000000UL: + size = 0x7; + break; + case 0x04000000UL: + size = 0x8; + break; + case 0x10000000UL: + size = 0x9; + break; + case 0x40000000UL: + size = 0xA; + break; +#if defined(TARGET_PPC64) + case 0x000100000000ULL: + size = 0xB; + break; + case 0x000400000000ULL: + size = 0xC; + break; + case 0x001000000000ULL: + size = 0xD; + break; + case 0x004000000000ULL: + size = 0xE; + break; + case 0x010000000000ULL: + size = 0xF; + break; +#endif + default: + size = -1; + break; + } + + return size; +} + +/* Helpers for 4xx TLB management */ +#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ + +#define PPC4XX_TLBHI_V 0x00000040 +#define PPC4XX_TLBHI_E 0x00000020 +#define PPC4XX_TLBHI_SIZE_MIN 0 +#define PPC4XX_TLBHI_SIZE_MAX 7 +#define PPC4XX_TLBHI_SIZE_DEFAULT 1 +#define PPC4XX_TLBHI_SIZE_SHIFT 7 +#define PPC4XX_TLBHI_SIZE_MASK 0x00000007 + +#define PPC4XX_TLBLO_EX 0x00000200 +#define PPC4XX_TLBLO_WR 0x00000100 +#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF +#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 + +target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) +{ + ppcemb_tlb_t *tlb; + target_ulong ret; + int size; + + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + ret = tlb->EPN; + if (tlb->prot & PAGE_VALID) { + ret |= PPC4XX_TLBHI_V; + } + size = booke_page_size_to_tlb(tlb->size); + if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { + size = PPC4XX_TLBHI_SIZE_DEFAULT; + } + ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; + env->spr[SPR_40x_PID] = tlb->PID; + return ret; +} + +target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) +{ + ppcemb_tlb_t *tlb; + target_ulong ret; + + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + ret = tlb->RPN; + if (tlb->prot & PAGE_EXEC) { + ret |= PPC4XX_TLBLO_EX; + } + if (tlb->prot & PAGE_WRITE) { + ret |= PPC4XX_TLBLO_WR; + } + return ret; +} + +void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, + target_ulong val) +{ + CPUState *cs = env_cpu(env); + ppcemb_tlb_t *tlb; + target_ulong page, end; + + LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry, + val); + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + /* Invalidate previous TLB (if it's valid) */ + if (tlb->prot & PAGE_VALID) { + end = tlb->EPN + tlb->size; + LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end " + TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); + for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(cs, page); + } + } + tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) + & PPC4XX_TLBHI_SIZE_MASK); + /* + * We cannot handle TLB size < TARGET_PAGE_SIZE. + * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY + */ + if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { + cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " + "are not supported (%d)\n" + "Please implement TARGET_PAGE_BITS_VARY\n", + tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); + } + tlb->EPN = val & ~(tlb->size - 1); + if (val & PPC4XX_TLBHI_V) { + tlb->prot |= PAGE_VALID; + if (val & PPC4XX_TLBHI_E) { + /* XXX: TO BE FIXED */ + cpu_abort(cs, + "Little-endian TLB entries are not supported by now\n"); + } + } else { + tlb->prot &= ~PAGE_VALID; + } + tlb->PID = env->spr[SPR_40x_PID]; /* PID */ + LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx + " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, + (int)entry, tlb->RPN, tlb->EPN, tlb->size, + tlb->prot & PAGE_READ ? 'r' : '-', + tlb->prot & PAGE_WRITE ? 'w' : '-', + tlb->prot & PAGE_EXEC ? 'x' : '-', + tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); + /* Invalidate new TLB (if valid) */ + if (tlb->prot & PAGE_VALID) { + end = tlb->EPN + tlb->size; + LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end " + TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end); + for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(cs, page); + } + } +} + +void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, + target_ulong val) +{ + ppcemb_tlb_t *tlb; + + LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry, + val); + entry &= PPC4XX_TLB_ENTRY_MASK; + tlb = &env->tlb.tlbe[entry]; + tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; + tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; + tlb->prot = PAGE_READ; + if (val & PPC4XX_TLBLO_EX) { + tlb->prot |= PAGE_EXEC; + } + if (val & PPC4XX_TLBLO_WR) { + tlb->prot |= PAGE_WRITE; + } + LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx + " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, + (int)entry, tlb->RPN, tlb->EPN, tlb->size, + tlb->prot & PAGE_READ ? 'r' : '-', + tlb->prot & PAGE_WRITE ? 'w' : '-', + tlb->prot & PAGE_EXEC ? 'x' : '-', + tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); +} + +target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) +{ + return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); +} + +/* PowerPC 440 TLB management */ +void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, + target_ulong value) +{ + ppcemb_tlb_t *tlb; + target_ulong EPN, RPN, size; + int do_flush_tlbs; + + LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n", + __func__, word, (int)entry, value); + do_flush_tlbs = 0; + entry &= 0x3F; + tlb = &env->tlb.tlbe[entry]; + switch (word) { + default: + /* Just here to please gcc */ + case 0: + EPN = value & 0xFFFFFC00; + if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) { + do_flush_tlbs = 1; + } + tlb->EPN = EPN; + size = booke_tlb_to_page_size((value >> 4) & 0xF); + if ((tlb->prot & PAGE_VALID) && tlb->size < size) { + do_flush_tlbs = 1; + } + tlb->size = size; + tlb->attr &= ~0x1; + tlb->attr |= (value >> 8) & 1; + if (value & 0x200) { + tlb->prot |= PAGE_VALID; + } else { + if (tlb->prot & PAGE_VALID) { + tlb->prot &= ~PAGE_VALID; + do_flush_tlbs = 1; + } + } + tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; + if (do_flush_tlbs) { + tlb_flush(env_cpu(env)); + } + break; + case 1: + RPN = value & 0xFFFFFC0F; + if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { + tlb_flush(env_cpu(env)); + } + tlb->RPN = RPN; + break; + case 2: + tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); + tlb->prot = tlb->prot & PAGE_VALID; + if (value & 0x1) { + tlb->prot |= PAGE_READ << 4; + } + if (value & 0x2) { + tlb->prot |= PAGE_WRITE << 4; + } + if (value & 0x4) { + tlb->prot |= PAGE_EXEC << 4; + } + if (value & 0x8) { + tlb->prot |= PAGE_READ; + } + if (value & 0x10) { + tlb->prot |= PAGE_WRITE; + } + if (value & 0x20) { + tlb->prot |= PAGE_EXEC; + } + break; + } +} + +target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, + target_ulong entry) +{ + ppcemb_tlb_t *tlb; + target_ulong ret; + int size; + + entry &= 0x3F; + tlb = &env->tlb.tlbe[entry]; + switch (word) { + default: + /* Just here to please gcc */ + case 0: + ret = tlb->EPN; + size = booke_page_size_to_tlb(tlb->size); + if (size < 0 || size > 0xF) { + size = 1; + } + ret |= size << 4; + if (tlb->attr & 0x1) { + ret |= 0x100; + } + if (tlb->prot & PAGE_VALID) { + ret |= 0x200; + } + env->spr[SPR_440_MMUCR] &= ~0x000000FF; + env->spr[SPR_440_MMUCR] |= tlb->PID; + break; + case 1: + ret = tlb->RPN; + break; + case 2: + ret = tlb->attr & ~0x1; + if (tlb->prot & (PAGE_READ << 4)) { + ret |= 0x1; + } + if (tlb->prot & (PAGE_WRITE << 4)) { + ret |= 0x2; + } + if (tlb->prot & (PAGE_EXEC << 4)) { + ret |= 0x4; + } + if (tlb->prot & PAGE_READ) { + ret |= 0x8; + } + if (tlb->prot & PAGE_WRITE) { + ret |= 0x10; + } + if (tlb->prot & PAGE_EXEC) { + ret |= 0x20; + } + break; + } + return ret; +} + +target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) +{ + return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); +} + +/* PowerPC BookE 2.06 TLB management */ + +static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) +{ + uint32_t tlbncfg = 0; + int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; + int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); + int tlb; + + tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; + tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; + + if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { + cpu_abort(env_cpu(env), "we don't support HES yet\n"); + } + + return booke206_get_tlbm(env, tlb, ea, esel); +} + +void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) +{ + env->spr[pidn] = pid; + /* changing PIDs mean we're in a different address space now */ + tlb_flush(env_cpu(env)); +} + +void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) +{ + env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; + tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); +} +void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) +{ + env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; + tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); +} + +static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) +{ + if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { + tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); + } else { + tlb_flush(env_cpu(env)); + } +} + +void helper_booke206_tlbwe(CPUPPCState *env) +{ + uint32_t tlbncfg, tlbn; + ppcmas_tlb_t *tlb; + uint32_t size_tlb, size_ps; + target_ulong mask; + + + switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { + case MAS0_WQ_ALWAYS: + /* good to go, write that entry */ + break; + case MAS0_WQ_COND: + /* XXX check if reserved */ + if (0) { + return; + } + break; + case MAS0_WQ_CLR_RSRV: + /* XXX clear entry */ + return; + default: + /* no idea what to do */ + return; + } + + if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && + !msr_gs) { + /* XXX we don't support direct LRAT setting yet */ + fprintf(stderr, "cpu: don't support LRAT setting yet\n"); + return; + } + + tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; + tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; + + tlb = booke206_cur_tlb(env); + + if (!tlb) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL, GETPC()); + } + + /* check that we support the targeted size */ + size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + size_ps = booke206_tlbnps(env, tlbn); + if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && + !(size_ps & (1 << size_tlb))) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL, GETPC()); + } + + if (msr_gs) { + cpu_abort(env_cpu(env), "missing HV implementation\n"); + } + + if (tlb->mas1 & MAS1_VALID) { + /* + * Invalidate the page in QEMU TLB if it was a valid entry. + * + * In "PowerPC e500 Core Family Reference Manual, Rev. 1", + * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": + * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) + * + * "Note that when an L2 TLB entry is written, it may be displacing an + * already valid entry in the same L2 TLB location (a victim). If a + * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 + * TLB entry is automatically invalidated." + */ + flush_page(env, tlb); + } + + tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | + env->spr[SPR_BOOKE_MAS3]; + tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; + + if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { + /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ + booke206_fixed_size_tlbn(env, tlbn, tlb); + } else { + if (!(tlbncfg & TLBnCFG_AVAIL)) { + /* force !AVAIL TLB entries to correct page size */ + tlb->mas1 &= ~MAS1_TSIZE_MASK; + /* XXX can be configured in MMUCSR0 */ + tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; + } + } + + /* Make a mask from TLB size to discard invalid bits in EPN field */ + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + /* Add a mask for page attributes */ + mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; + + if (!msr_cm) { + /* + * Executing a tlbwe instruction in 32-bit mode will set bits + * 0:31 of the TLB EPN field to zero. + */ + mask &= 0xffffffff; + } + + tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; + + if (!(tlbncfg & TLBnCFG_IPROT)) { + /* no IPROT supported by TLB */ + tlb->mas1 &= ~MAS1_IPROT; + } + + flush_page(env, tlb); +} + +static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) +{ + int tlbn = booke206_tlbm_to_tlbn(env, tlb); + int way = booke206_tlbm_to_way(env, tlb); + + env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; + env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; + + env->spr[SPR_BOOKE_MAS1] = tlb->mas1; + env->spr[SPR_BOOKE_MAS2] = tlb->mas2; + env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; + env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; +} + +void helper_booke206_tlbre(CPUPPCState *env) +{ + ppcmas_tlb_t *tlb = NULL; + + tlb = booke206_cur_tlb(env); + if (!tlb) { + env->spr[SPR_BOOKE_MAS1] = 0; + } else { + booke206_tlb_to_mas(env, tlb); + } +} + +void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) +{ + ppcmas_tlb_t *tlb = NULL; + int i, j; + hwaddr raddr; + uint32_t spid, sas; + + spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; + sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + + if (!tlb) { + continue; + } + + if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { + continue; + } + + if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + continue; + } + + booke206_tlb_to_mas(env, tlb); + return; + } + } + + /* no entry found, fill with defaults */ + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + } + + env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) + << MAS1_TID_SHIFT; + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, + uint32_t ea) +{ + int i; + int ways = booke206_tlb_ways(env, tlbn); + target_ulong mask; + + for (i = 0; i < ways; i++) { + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); + if (!tlb) { + continue; + } + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && + !(tlb->mas1 & MAS1_IPROT)) { + tlb->mas1 &= ~MAS1_VALID; + } + } +} + +void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) +{ + CPUState *cs = env_cpu(env); + + if (address & 0x4) { + /* flush all entries */ + if (address & 0x8) { + /* flush all of TLB1 */ + booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); + } else { + /* flush all of TLB0 */ + booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); + } + return; + } + + if (address & 0x8) { + /* flush TLB1 entries */ + booke206_invalidate_ea_tlb(env, 1, address); + tlb_flush(cs); + } else { + /* flush TLB0 entries */ + booke206_invalidate_ea_tlb(env, 0, address); + tlb_flush_page(cs, address & MAS2_EPN_MASK); + } +} + +void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) +{ + /* XXX missing LPID handling */ + booke206_flush_tlb(env, -1, 1); +} + +void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) +{ + int i, j; + int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); + ppcmas_tlb_t *tlb = env->tlb.tlbm; + int tlb_size; + + /* XXX missing LPID handling */ + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + tlb_size = booke206_tlb_size(env, i); + for (j = 0; j < tlb_size; j++) { + if (!(tlb[j].mas1 & MAS1_IPROT) && + ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { + tlb[j].mas1 &= ~MAS1_VALID; + } + } + tlb += booke206_tlb_size(env, i); + } + tlb_flush(env_cpu(env)); +} + +void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) +{ + int i, j; + ppcmas_tlb_t *tlb; + int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); + int pid = tid >> MAS6_SPID_SHIFT; + int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; + int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; + /* XXX check for unsupported isize and raise an invalid opcode then */ + int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; + /* XXX implement MAV2 handling */ + bool mav2 = false; + + /* XXX missing LPID handling */ + /* flush by pid and ea */ + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + if (!tlb) { + continue; + } + if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || + (tlb->mas1 & MAS1_IPROT) || + ((tlb->mas1 & MAS1_IND) != ind) || + ((tlb->mas8 & MAS8_TGS) != sgs)) { + continue; + } + if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { + /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ + continue; + } + /* XXX e500mc doesn't match SAS, but other cores might */ + tlb->mas1 &= ~MAS1_VALID; + } + } + tlb_flush(env_cpu(env)); +} + +void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) +{ + int flags = 0; + + if (type & 2) { + flags |= BOOKE206_FLUSH_TLB1; + } + + if (type & 4) { + flags |= BOOKE206_FLUSH_TLB0; + } + + booke206_flush_tlb(env, flags, 1); +} + + +void helper_check_tlb_flush_local(CPUPPCState *env) +{ + check_tlb_flush(env, false); +} + +void helper_check_tlb_flush_global(CPUPPCState *env) +{ + check_tlb_flush(env, true); +} + +/*****************************************************************************/ + +bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); + CPUPPCState *env = &cpu->env; + int ret; + + if (pcc->handle_mmu_fault) { + ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx); + } else { + ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx); + } + if (unlikely(ret != 0)) { + if (probe) { + return false; + } + raise_exception_err_ra(env, cs->exception_index, env->error_code, + retaddr); + } + return true; +} diff --git a/qemu/target/ppc/timebase_helper.c b/qemu/target/ppc/timebase_helper.c new file mode 100644 index 00000000..8dc0a419 --- /dev/null +++ b/qemu/target/ppc/timebase_helper.c @@ -0,0 +1,206 @@ +/* + * PowerPC emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" + +/*****************************************************************************/ +/* SPR accesses */ + +target_ulong helper_load_tbl(CPUPPCState *env) +{ + return (target_ulong)cpu_ppc_load_tbl(env); +} + +target_ulong helper_load_tbu(CPUPPCState *env) +{ + return cpu_ppc_load_tbu(env); +} + +target_ulong helper_load_atbl(CPUPPCState *env) +{ + return (target_ulong)cpu_ppc_load_atbl(env); +} + +target_ulong helper_load_atbu(CPUPPCState *env) +{ + return cpu_ppc_load_atbu(env); +} + +target_ulong helper_load_vtb(CPUPPCState *env) +{ + return cpu_ppc_load_vtb(env); +} + +#if defined(TARGET_PPC64) +target_ulong helper_load_purr(CPUPPCState *env) +{ + return (target_ulong)cpu_ppc_load_purr(env); +} + +void helper_store_purr(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_purr(env, val); +} +#endif + +target_ulong helper_load_601_rtcl(CPUPPCState *env) +{ + return cpu_ppc601_load_rtcl(env); +} + +target_ulong helper_load_601_rtcu(CPUPPCState *env) +{ + return cpu_ppc601_load_rtcu(env); +} + +void helper_store_tbl(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_tbl(env, val); +} + +void helper_store_tbu(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_tbu(env, val); +} + +void helper_store_atbl(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_atbl(env, val); +} + +void helper_store_atbu(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_atbu(env, val); +} + +void helper_store_601_rtcl(CPUPPCState *env, target_ulong val) +{ + cpu_ppc601_store_rtcl(env, val); +} + +void helper_store_601_rtcu(CPUPPCState *env, target_ulong val) +{ + cpu_ppc601_store_rtcu(env, val); +} + +target_ulong helper_load_decr(CPUPPCState *env) +{ + return cpu_ppc_load_decr(env); +} + +void helper_store_decr(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_decr(env, val); +} + +target_ulong helper_load_hdecr(CPUPPCState *env) +{ + return cpu_ppc_load_hdecr(env); +} + +void helper_store_hdecr(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_hdecr(env, val); +} + +void helper_store_vtb(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_vtb(env, val); +} + +void helper_store_tbu40(CPUPPCState *env, target_ulong val) +{ + cpu_ppc_store_tbu40(env, val); +} + +target_ulong helper_load_40x_pit(CPUPPCState *env) +{ + return load_40x_pit(env); +} + +void helper_store_40x_pit(CPUPPCState *env, target_ulong val) +{ + store_40x_pit(env, val); +} + +void helper_store_booke_tcr(CPUPPCState *env, target_ulong val) +{ + store_booke_tcr(env, val); +} + +void helper_store_booke_tsr(CPUPPCState *env, target_ulong val) +{ + store_booke_tsr(env, val); +} + +/*****************************************************************************/ +/* Embedded PowerPC specific helpers */ + +/* XXX: to be improved to check access rights when in user-mode */ +target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) +{ + uint32_t val = 0; + + if (unlikely(env->dcr_env == NULL)) { + qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n"); + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL, GETPC()); + } else { + int ret; + + ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val); + if (unlikely(ret != 0)) { +#if 0 + qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n", + (uint32_t)dcrn, (uint32_t)dcrn); +#endif + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_PRIV_REG, GETPC()); + } + } + return val; +} + +void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) +{ + if (unlikely(env->dcr_env == NULL)) { +#if 0 + qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n"); +#endif + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_INVAL_INVAL, GETPC()); + } else { + int ret; + ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val); + if (unlikely(ret != 0)) { +#if 0 + qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n", + (uint32_t)dcrn, (uint32_t)dcrn); +#endif + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | + POWERPC_EXCP_PRIV_REG, GETPC()); + } + } +} diff --git a/qemu/target/ppc/translate.c b/qemu/target/ppc/translate.c new file mode 100644 index 00000000..8cfebab2 --- /dev/null +++ b/qemu/target/ppc/translate.c @@ -0,0 +1,7751 @@ +/* + * PowerPC emulation for qemu: main translation routines. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright (C) 2011 Freescale Semiconductor, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "qemu/host-utils.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/translator.h" +#include "qemu/atomic128.h" + + +#define CPU_SINGLE_STEP 0x1 +#define CPU_BRANCH_STEP 0x2 +#define GDBSTUB_SINGLE_STEP 0x4 + +/* Include definitions for instructions classes and implementations flags */ +/* #define PPC_DEBUG_DISAS */ +/* #define DO_PPC_STATISTICS */ + +#ifdef PPC_DEBUG_DISAS +# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) +#else +# define LOG_DISAS(...) do { } while (0) +#endif +/*****************************************************************************/ +/* Code translation helpers */ + +/* global register indexes */ +static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */ + + 10 * 4 + 22 * 5 /* SPE GPRh */ + + 8 * 5 /* CRF */]; +static TCGv cpu_gpr[32]; +static TCGv cpu_gprh[32]; +static TCGv_i32 cpu_crf[8]; +static TCGv cpu_nip; +static TCGv cpu_msr; +static TCGv cpu_ctr; +static TCGv cpu_lr; +#if defined(TARGET_PPC64) +static TCGv cpu_cfar; +#endif +static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; +static TCGv cpu_reserve; +static TCGv cpu_reserve_val; +static TCGv cpu_fpscr; +static TCGv_i32 cpu_access_type; + +#include "exec/gen-icount.h" + +void ppc_translate_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + int i; + char *p; + size_t cpu_reg_names_size; + + p = cpu_reg_names; + cpu_reg_names_size = sizeof(cpu_reg_names); + + for (i = 0; i < 8; i++) { + snprintf(p, cpu_reg_names_size, "crf%d", i); + cpu_crf[i] = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, crf[i]), p); + p += 5; + cpu_reg_names_size -= 5; + } + + for (i = 0; i < 32; i++) { + snprintf(p, cpu_reg_names_size, "r%d", i); + cpu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, gpr[i]), p); + p += (i < 10) ? 3 : 4; + cpu_reg_names_size -= (i < 10) ? 3 : 4; + snprintf(p, cpu_reg_names_size, "r%dH", i); + cpu_gprh[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, gprh[i]), p); + p += (i < 10) ? 4 : 5; + cpu_reg_names_size -= (i < 10) ? 4 : 5; + } + + cpu_nip = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, nip), "nip"); + + cpu_msr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, msr), "msr"); + + cpu_ctr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, ctr), "ctr"); + + cpu_lr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, lr), "lr"); + +#if defined(TARGET_PPC64) + cpu_cfar = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, cfar), "cfar"); +#endif + + cpu_xer = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, xer), "xer"); + cpu_so = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, so), "SO"); + cpu_ov = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, ov), "OV"); + cpu_ca = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, ca), "CA"); + cpu_ov32 = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, ov32), "OV32"); + cpu_ca32 = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, ca32), "CA32"); + + cpu_reserve = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, reserve_addr), + "reserve_addr"); + cpu_reserve_val = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, reserve_val), + "reserve_val"); + + cpu_fpscr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, fpscr), "fpscr"); + + cpu_access_type = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUPPCState, access_type), + "access_type"); +} + +/* internal defines */ +struct DisasContext { + DisasContextBase base; + uint32_t opcode; + uint32_t exception; + /* Routine used to access memory */ + bool pr, hv, dr, le_mode; + bool lazy_tlb_flush; + bool need_access_type; + int mem_idx; + int access_type; + /* Translation flags */ + MemOp default_tcg_memop_mask; +#if defined(TARGET_PPC64) + bool sf_mode; + bool has_cfar; +#endif + bool fpu_enabled; + bool altivec_enabled; + bool vsx_enabled; + bool spe_enabled; + bool tm_enabled; + bool gtse; + ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ + int singlestep_enabled; + uint32_t flags; + uint64_t insns_flags; + uint64_t insns_flags2; + + // Unicorn + struct uc_struct *uc; +}; + +/* Return true iff byteswap is needed in a scalar memop */ +static inline bool need_byteswap(const DisasContext *ctx) +{ +#if defined(TARGET_WORDS_BIGENDIAN) + return ctx->le_mode; +#else + return !ctx->le_mode; +#endif +} + +/* True when active word size < size of target_long. */ +#ifdef TARGET_PPC64 +# define NARROW_MODE(C) (!(C)->sf_mode) +#else +# define NARROW_MODE(C) 0 +#endif + +struct opc_handler_t { + /* invalid bits for instruction 1 (Rc(opcode) == 0) */ + uint32_t inval1; + /* invalid bits for instruction 2 (Rc(opcode) == 1) */ + uint32_t inval2; + /* instruction type */ + uint64_t type; + /* extended instruction type */ + uint64_t type2; + /* handler */ + void (*handler)(DisasContext *ctx); +#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) + const char *oname; +#endif +#if defined(DO_PPC_STATISTICS) + uint64_t count; +#endif +}; + +/* SPR load/store helpers */ +static inline void gen_load_spr(TCGContext *tcg_ctx, TCGv t, int reg) +{ + tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUPPCState, spr[reg])); +} + +static inline void gen_store_spr(TCGContext *tcg_ctx, int reg, TCGv t) +{ + tcg_gen_st_tl(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUPPCState, spr[reg])); +} + +static inline void gen_set_access_type(DisasContext *ctx, int access_type) +{ + if (ctx->need_access_type && ctx->access_type != access_type) { + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, cpu_access_type, access_type); + ctx->access_type = access_type; + } +} + +static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (NARROW_MODE(ctx)) { + nip = (uint32_t)nip; + } + tcg_gen_movi_tl(tcg_ctx, cpu_nip, nip); +} + +static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0, t1; + + /* + * These are all synchronous exceptions, we set the PC back to the + * faulting instruction + */ + if (ctx->exception == POWERPC_EXCP_NONE) { + gen_update_nip(ctx, ctx->base.pc_next - 4); + } + t0 = tcg_const_i32(tcg_ctx, excp); + t1 = tcg_const_i32(tcg_ctx, error); +#ifdef UNICORN_ARCH_POSTFIX + glue(gen_helper_raise_exception_err, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, t0, t1); +#else + gen_helper_raise_exception_err(tcg_ctx, tcg_ctx->cpu_env, t0, t1); +#endif + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + ctx->exception = (excp); +} + +static void gen_exception(DisasContext *ctx, uint32_t excp) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + + /* + * These are all synchronous exceptions, we set the PC back to the + * faulting instruction + */ + if (ctx->exception == POWERPC_EXCP_NONE) { + gen_update_nip(ctx, ctx->base.pc_next - 4); + } + t0 = tcg_const_i32(tcg_ctx, excp); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); + ctx->exception = (excp); +} + +static void gen_exception_nip(DisasContext *ctx, uint32_t excp, + target_ulong nip) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + + gen_update_nip(ctx, nip); + t0 = tcg_const_i32(tcg_ctx, excp); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); + ctx->exception = (excp); +} + +/* + * Tells the caller what is the appropriate exception to generate and prepares + * SPR registers for this exception. + * + * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or + * POWERPC_EXCP_DEBUG (on BookE). + */ +static uint32_t gen_prep_dbgex(DisasContext *ctx) +{ + if (ctx->flags & POWERPC_FLAG_DE) { + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong dbsr = 0; + if (ctx->singlestep_enabled & CPU_SINGLE_STEP) { + dbsr = DBCR0_ICMP; + } else { + /* Must have been branch */ + dbsr = DBCR0_BRT; + } + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_load_spr(tcg_ctx, t0, SPR_BOOKE_DBSR); + tcg_gen_ori_tl(tcg_ctx, t0, t0, dbsr); + gen_store_spr(tcg_ctx, SPR_BOOKE_DBSR, t0); + tcg_temp_free(tcg_ctx, t0); + return POWERPC_EXCP_DEBUG; + } else { + return POWERPC_EXCP_TRACE; + } +} + +static void gen_debug_exception(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + + /* + * These are all synchronous exceptions, we set the PC back to the + * faulting instruction + */ + if ((ctx->exception != POWERPC_EXCP_BRANCH) && + (ctx->exception != POWERPC_EXCP_SYNC)) { + gen_update_nip(ctx, ctx->base.pc_next); + } + t0 = tcg_const_i32(tcg_ctx, EXCP_DEBUG); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) +{ + /* Will be converted to program check if needed */ + gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error); +} + +static inline void gen_priv_exception(DisasContext *ctx, uint32_t error) +{ + gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error); +} + +static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error) +{ + /* Will be converted to program check if needed */ + gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error); +} + +/* Stop translation */ +static inline void gen_stop_exception(DisasContext *ctx) +{ + gen_update_nip(ctx, ctx->base.pc_next); + ctx->exception = POWERPC_EXCP_STOP; +} + +/* No need to update nip here, as execution flow will change */ +static inline void gen_sync_exception(DisasContext *ctx) +{ + ctx->exception = POWERPC_EXCP_SYNC; +} + +#define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ +GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE) + +#define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \ +GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2) + +#define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \ +GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE) + +#define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \ +GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2) + +#define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \ +GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2) + +#define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \ +GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) + +typedef struct opcode_t { + unsigned char opc1, opc2, opc3, opc4; +#if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */ + unsigned char pad[4]; +#endif + opc_handler_t handler; + const char *oname; +} opcode_t; + +/* Helpers for priv. check */ +#define GEN_PRIV \ + do { \ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \ + } while (0) + +#define CHK_HV \ + do { \ + if (unlikely(ctx->pr || !ctx->hv)) { \ + GEN_PRIV; \ + } \ + } while (0) +#define CHK_SV \ + do { \ + if (unlikely(ctx->pr)) { \ + GEN_PRIV; \ + } \ + } while (0) +#define CHK_HVRM \ + do { \ + if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ + GEN_PRIV; \ + } \ + } while (0) + +#define CHK_NONE + +/*****************************************************************************/ +/* PowerPC instructions table */ + +#if defined(DO_PPC_STATISTICS) +#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = 0xff, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + .oname = stringify(name), \ + }, \ + .oname = stringify(name), \ +} +#define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = 0xff, \ + .handler = { \ + .inval1 = invl1, \ + .inval2 = invl2, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + .oname = stringify(name), \ + }, \ + .oname = stringify(name), \ +} +#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = 0xff, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + .oname = onam, \ + }, \ + .oname = onam, \ +} +#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = op4, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + .oname = stringify(name), \ + }, \ + .oname = stringify(name), \ +} +#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = op4, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + .oname = onam, \ + }, \ + .oname = onam, \ +} +#else +#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = 0xff, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + }, \ + .oname = stringify(name), \ +} +#define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = 0xff, \ + .handler = { \ + .inval1 = invl1, \ + .inval2 = invl2, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + }, \ + .oname = stringify(name), \ +} +#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = 0xff, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + }, \ + .oname = onam, \ +} +#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = op4, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + }, \ + .oname = stringify(name), \ +} +#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \ +{ \ + .opc1 = op1, \ + .opc2 = op2, \ + .opc3 = op3, \ + .opc4 = op4, \ + .handler = { \ + .inval1 = invl, \ + .type = _typ, \ + .type2 = _typ2, \ + .handler = &gen_##name, \ + }, \ + .oname = onam, \ +} +#endif + +/* Invalid instruction */ +static void gen_invalid(DisasContext *ctx) +{ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); +} + +static opc_handler_t invalid_handler = { + .inval1 = 0xFFFFFFFF, + .inval2 = 0xFFFFFFFF, + .type = PPC_NONE, + .type2 = PPC_NONE, + .handler = gen_invalid, +}; + +/*** Integer comparison ***/ + +static inline void gen_op_cmp(TCGContext *tcg_ctx, TCGv arg0, TCGv arg1, int s, int crf) +{ + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t0, CRF_EQ); + tcg_gen_movi_tl(tcg_ctx, t1, CRF_LT); + tcg_gen_movcond_tl(tcg_ctx, (s ? TCG_COND_LT : TCG_COND_LTU), + t0, arg0, arg1, t1, t0); + tcg_gen_movi_tl(tcg_ctx, t1, CRF_GT); + tcg_gen_movcond_tl(tcg_ctx, (s ? TCG_COND_GT : TCG_COND_GTU), + t0, arg0, arg1, t1, t0); + + tcg_gen_trunc_tl_i32(tcg_ctx, t, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[crf], cpu_so); + tcg_gen_or_i32(tcg_ctx, cpu_crf[crf], cpu_crf[crf], t); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t); +} + +static inline void gen_op_cmpi(TCGContext *tcg_ctx, TCGv arg0, target_ulong arg1, int s, int crf) +{ + TCGv t0 = tcg_const_tl(tcg_ctx, arg1); + gen_op_cmp(tcg_ctx, arg0, t0, s, crf); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_cmp32(TCGContext *tcg_ctx, TCGv arg0, TCGv arg1, int s, int crf) +{ + TCGv t0, t1; + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + if (s) { + tcg_gen_ext32s_tl(tcg_ctx, t0, arg0); + tcg_gen_ext32s_tl(tcg_ctx, t1, arg1); + } else { + tcg_gen_ext32u_tl(tcg_ctx, t0, arg0); + tcg_gen_ext32u_tl(tcg_ctx, t1, arg1); + } + gen_op_cmp(tcg_ctx, t0, t1, s, crf); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_cmpi32(TCGContext *tcg_ctx, TCGv arg0, target_ulong arg1, int s, int crf) +{ + TCGv t0 = tcg_const_tl(tcg_ctx, arg1); + gen_op_cmp32(tcg_ctx, arg0, t0, s, crf); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (NARROW_MODE(ctx)) { + gen_op_cmpi32(tcg_ctx, reg, 0, 1, 0); + } else { + gen_op_cmpi(tcg_ctx, reg, 0, 1, 0); + } +} + +/* cmp */ +static void gen_cmp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { + gen_op_cmp(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + 1, crfD(ctx->opcode)); + } else { + gen_op_cmp32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + 1, crfD(ctx->opcode)); + } +} + +/* cmpi */ +static void gen_cmpi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { + gen_op_cmpi(tcg_ctx, cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), + 1, crfD(ctx->opcode)); + } else { + gen_op_cmpi32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode), + 1, crfD(ctx->opcode)); + } +} + +/* cmpl */ +static void gen_cmpl(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { + gen_op_cmp(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + 0, crfD(ctx->opcode)); + } else { + gen_op_cmp32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + 0, crfD(ctx->opcode)); + } +} + +/* cmpli */ +static void gen_cmpli(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) { + gen_op_cmpi(tcg_ctx, cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), + 0, crfD(ctx->opcode)); + } else { + gen_op_cmpi32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode), + 0, crfD(ctx->opcode)); + } +} + +/* cmprb - range comparison: isupper, isaplha, islower*/ +static void gen_cmprb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 src1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 src2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 src2lo = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 src2hi = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)]; + + tcg_gen_trunc_tl_i32(tcg_ctx, src1, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_trunc_tl_i32(tcg_ctx, src2, cpu_gpr[rB(ctx->opcode)]); + + tcg_gen_andi_i32(tcg_ctx, src1, src1, 0xFF); + tcg_gen_ext8u_i32(tcg_ctx, src2lo, src2); + tcg_gen_shri_i32(tcg_ctx, src2, src2, 8); + tcg_gen_ext8u_i32(tcg_ctx, src2hi, src2); + + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2lo, src2lo, src1); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2hi, src1, src2hi); + tcg_gen_and_i32(tcg_ctx, crf, src2lo, src2hi); + + if (ctx->opcode & 0x00200000) { + tcg_gen_shri_i32(tcg_ctx, src2, src2, 8); + tcg_gen_ext8u_i32(tcg_ctx, src2lo, src2); + tcg_gen_shri_i32(tcg_ctx, src2, src2, 8); + tcg_gen_ext8u_i32(tcg_ctx, src2hi, src2); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2lo, src2lo, src1); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LEU, src2hi, src1, src2hi); + tcg_gen_and_i32(tcg_ctx, src2lo, src2lo, src2hi); + tcg_gen_or_i32(tcg_ctx, crf, crf, src2lo); + } + tcg_gen_shli_i32(tcg_ctx, crf, crf, CRF_GT_BIT); + tcg_temp_free_i32(tcg_ctx, src1); + tcg_temp_free_i32(tcg_ctx, src2); + tcg_temp_free_i32(tcg_ctx, src2lo); + tcg_temp_free_i32(tcg_ctx, src2hi); +} + +#if defined(TARGET_PPC64) +/* cmpeqb */ +static void gen_cmpeqb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_cmpeqb(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); +} +#endif + +/* isel (PowerPC 2.03 specification) */ +static void gen_isel(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t bi = rC(ctx->opcode); + uint32_t mask = 0x08 >> (bi & 0x03); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv zr; + + tcg_gen_extu_i32_tl(tcg_ctx, t0, cpu_crf[bi >> 2]); + tcg_gen_andi_tl(tcg_ctx, t0, t0, mask); + + zr = tcg_const_tl(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, + rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, + cpu_gpr[rB(ctx->opcode)]); + tcg_temp_free(tcg_ctx, zr); + tcg_temp_free(tcg_ctx, t0); +} + +/* cmpb: PowerPC 2.05 specification */ +static void gen_cmpb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_cmpb(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); +} + +/*** Integer arithmetic ***/ + +static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, + TCGv arg1, TCGv arg2, int sub) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + + tcg_gen_xor_tl(tcg_ctx, cpu_ov, arg0, arg2); + tcg_gen_xor_tl(tcg_ctx, t0, arg1, arg2); + if (sub) { + tcg_gen_and_tl(tcg_ctx, cpu_ov, cpu_ov, t0); + } else { + tcg_gen_andc_tl(tcg_ctx, cpu_ov, cpu_ov, t0); + } + tcg_temp_free(tcg_ctx, t0); + if (NARROW_MODE(ctx)) { + tcg_gen_extract_tl(tcg_ctx, cpu_ov, cpu_ov, 31, 1); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, cpu_ov32, cpu_ov); + } + } else { + if (is_isa300(ctx)) { + tcg_gen_extract_tl(tcg_ctx, cpu_ov32, cpu_ov, 31, 1); + } + tcg_gen_extract_tl(tcg_ctx, cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1); + } + tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); +} + +static inline void gen_op_arith_compute_ca32(DisasContext *ctx, + TCGv res, TCGv arg0, TCGv arg1, + TCGv ca32, int sub) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + if (!is_isa300(ctx)) { + return; + } + + t0 = tcg_temp_new(tcg_ctx); + if (sub) { + tcg_gen_eqv_tl(tcg_ctx, t0, arg0, arg1); + } else { + tcg_gen_xor_tl(tcg_ctx, t0, arg0, arg1); + } + tcg_gen_xor_tl(tcg_ctx, t0, t0, res); + tcg_gen_extract_tl(tcg_ctx, ca32, t0, 32, 1); + tcg_temp_free(tcg_ctx, t0); +} + +/* Common add function */ +static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, + TCGv arg2, TCGv ca, TCGv ca32, + bool add_ca, bool compute_ca, + bool compute_ov, bool compute_rc0) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = ret; + + if (compute_ca || compute_ov) { + t0 = tcg_temp_new(tcg_ctx); + } + + if (compute_ca) { + if (NARROW_MODE(ctx)) { + /* + * Caution: a non-obvious corner case of the spec is that + * we must produce the *entire* 64-bit addition, but + * produce the carry into bit 32. + */ + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_xor_tl(tcg_ctx, t1, arg1, arg2); /* add without carry */ + tcg_gen_add_tl(tcg_ctx, t0, arg1, arg2); + if (add_ca) { + tcg_gen_add_tl(tcg_ctx, t0, t0, ca); + } + tcg_gen_xor_tl(tcg_ctx, ca, t0, t1); /* bits changed w/ carry */ + tcg_temp_free(tcg_ctx, t1); + tcg_gen_extract_tl(tcg_ctx, ca, ca, 32, 1); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, ca32, ca); + } + } else { + TCGv zero = tcg_const_tl(tcg_ctx, 0); + if (add_ca) { + tcg_gen_add2_tl(tcg_ctx, t0, ca, arg1, zero, ca, zero); + tcg_gen_add2_tl(tcg_ctx, t0, ca, t0, ca, arg2, zero); + } else { + tcg_gen_add2_tl(tcg_ctx, t0, ca, arg1, zero, arg2, zero); + } + gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); + tcg_temp_free(tcg_ctx, zero); + } + } else { + tcg_gen_add_tl(tcg_ctx, t0, arg1, arg2); + if (add_ca) { + tcg_gen_add_tl(tcg_ctx, t0, t0, ca); + } + } + + if (compute_ov) { + gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); + } + if (unlikely(compute_rc0)) { + gen_set_Rc0(ctx, t0); + } + + if (t0 != ret) { + tcg_gen_mov_tl(tcg_ctx, ret, t0); + tcg_temp_free(tcg_ctx, t0); + } +} +/* Add functions with two operands */ +#define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ + ca, glue(ca, 32), \ + add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ +} +/* Add functions with one operand and one immediate */ +#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \ + add_ca, compute_ca, compute_ov) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv t0 = tcg_const_tl(tcg_ctx, const_val); \ + gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], t0, \ + ca, glue(ca, 32), \ + add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ + tcg_temp_free(tcg_ctx, t0); \ +} + +/* add add. addo addo. */ +GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0) +GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1) +/* addc addc. addco addco. */ +GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0) +GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1) +/* adde adde. addeo addeo. */ +GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0) +GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1) +/* addme addme. addmeo addmeo. */ +GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0) +GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1) +/* addex */ +GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0); +/* addze addze. addzeo addzeo.*/ +GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0) +GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) +/* addi */ +static void gen_addi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_long simm = SIMM(ctx->opcode); + + if (rA(ctx->opcode) == 0) { + /* li case */ + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], simm); + } else { + tcg_gen_addi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], simm); + } +} +/* addic addic.*/ +static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv c = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); + gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); + tcg_temp_free(tcg_ctx, c); +} + +static void gen_addic(DisasContext *ctx) +{ + gen_op_addic(ctx, 0); +} + +static void gen_addic_(DisasContext *ctx) +{ + gen_op_addic(ctx, 1); +} + +/* addis */ +static void gen_addis(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_long simm = SIMM(ctx->opcode); + + if (rA(ctx->opcode) == 0) { + /* lis case */ + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], simm << 16); + } else { + tcg_gen_addi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], simm << 16); + } +} + +/* addpcis */ +static void gen_addpcis(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_long d = DX(ctx->opcode); + + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], ctx->base.pc_next + (d << 16)); +} + +static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, + TCGv arg2, int sign, int compute_ov) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg1); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, arg2); + if (sign) { + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_i32(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_i32(tcg_ctx, t2, t2, t3); + tcg_gen_movi_i32(tcg_ctx, t3, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_i32(tcg_ctx, t3, t0, t1); + tcg_gen_extu_i32_tl(tcg_ctx, ret, t3); + } else { + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t2, t1, 0); + tcg_gen_movi_i32(tcg_ctx, t3, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_divu_i32(tcg_ctx, t3, t0, t1); + tcg_gen_extu_i32_tl(tcg_ctx, ret, t3); + } + if (compute_ov) { + tcg_gen_extu_i32_tl(tcg_ctx, cpu_ov, t2); + if (is_isa300(ctx)) { + tcg_gen_extu_i32_tl(tcg_ctx, cpu_ov32, t2); + } + tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); + } + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, ret); + } +} +/* Div functions */ +#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ + sign, compute_ov); \ +} +/* divwu divwu. divwuo divwuo. */ +GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); +GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); +/* divw divw. divwo divwo. */ +GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); +GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); + +/* div[wd]eu[o][.] */ +#define GEN_DIVE(name, hlpr, compute_ov) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, compute_ov); \ + gen_helper_##hlpr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ + tcg_temp_free_i32(tcg_ctx, t0); \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ + } \ +} + +GEN_DIVE(divweu, divweu, 0); +GEN_DIVE(divweuo, divweu, 1); +GEN_DIVE(divwe, divwe, 0); +GEN_DIVE(divweo, divwe, 1); + +#if defined(TARGET_PPC64) +static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, + TCGv arg2, int sign, int compute_ov) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_mov_i64(tcg_ctx, t0, arg1); + tcg_gen_mov_i64(tcg_ctx, t1, arg2); + if (sign) { + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t2, t0, INT64_MIN); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_i64(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_i64(tcg_ctx, t2, t2, t3); + tcg_gen_movi_i64(tcg_ctx, t3, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_i64(tcg_ctx, ret, t0, t1); + } else { + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t2, t1, 0); + tcg_gen_movi_i64(tcg_ctx, t3, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_divu_i64(tcg_ctx, ret, t0, t1); + } + if (compute_ov) { + tcg_gen_mov_tl(tcg_ctx, cpu_ov, t2); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, cpu_ov32, t2); + } + tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, ret); + } +} + +#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ + sign, compute_ov); \ +} +/* divdu divdu. divduo divduo. */ +GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); +GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); +/* divd divd. divdo divdo. */ +GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); +GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); + +GEN_DIVE(divdeu, divdeu, 0); +GEN_DIVE(divdeuo, divdeu, 1); +GEN_DIVE(divde, divde, 0); +GEN_DIVE(divdeo, divde, 1); +#endif + +static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, + TCGv arg2, int sign) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg1); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, arg2); + if (sign) { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_i32(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_i32(tcg_ctx, t2, t2, t3); + tcg_gen_movi_i32(tcg_ctx, t3, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_i32(tcg_ctx, t3, t0, t1); + tcg_gen_ext_i32_tl(tcg_ctx, ret, t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } else { + TCGv_i32 t2 = tcg_const_i32(tcg_ctx, 1); + TCGv_i32 t3 = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, t1, t1, t3, t2, t1); + tcg_gen_remu_i32(tcg_ctx, t3, t0, t1); + tcg_gen_extu_i32_tl(tcg_ctx, ret, t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +#define GEN_INT_ARITH_MODW(name, opc3, sign) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ + sign); \ +} + +GEN_INT_ARITH_MODW(moduw, 0x08, 0); +GEN_INT_ARITH_MODW(modsw, 0x18, 1); + +#if defined(TARGET_PPC64) +static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, + TCGv arg2, int sign) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_mov_i64(tcg_ctx, t0, arg1); + tcg_gen_mov_i64(tcg_ctx, t1, arg2); + if (sign) { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t2, t0, INT64_MIN); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_i64(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_i64(tcg_ctx, t2, t2, t3); + tcg_gen_movi_i64(tcg_ctx, t3, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_i64(tcg_ctx, ret, t0, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + } else { + TCGv_i64 t2 = tcg_const_i64(tcg_ctx, 1); + TCGv_i64 t3 = tcg_const_i64(tcg_ctx, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t1, t1, t3, t2, t1); + tcg_gen_remu_i64(tcg_ctx, ret, t0, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +#define GEN_INT_ARITH_MODD(name, opc3, sign) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ + sign); \ +} + +GEN_INT_ARITH_MODD(modud, 0x08, 0); +GEN_INT_ARITH_MODD(modsd, 0x18, 1); +#endif + +/* mulhw mulhw. */ +static void gen_mulhw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mulhwu mulhwu. */ +static void gen_mulhwu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mulu2_i32(tcg_ctx, t0, t1, t0, t1); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mullw mullw. */ +static void gen_mullw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#if defined(TARGET_PPC64) + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_ext32s_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mul_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +#else + tcg_gen_mul_i32(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); +#endif + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mullwo mullwo. */ +static void gen_mullwo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_muls2_i32(tcg_ctx, t0, t1, t0, t1); +#if defined(TARGET_PPC64) + tcg_gen_concat_i32_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, t1); +#else + tcg_gen_mov_i32(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); +#endif + + tcg_gen_sari_i32(tcg_ctx, t0, t0, 31); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_NE, t0, t0, t1); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_ov, t0); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, cpu_ov32, cpu_ov); + } + tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); + + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mulli */ +static void gen_mulli(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_muli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + SIMM(ctx->opcode)); +} + +#if defined(TARGET_PPC64) +/* mulhd mulhd. */ +static void gen_mulhd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv lo = tcg_temp_new(tcg_ctx); + tcg_gen_muls2_tl(tcg_ctx, lo, cpu_gpr[rD(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + tcg_temp_free(tcg_ctx, lo); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mulhdu mulhdu. */ +static void gen_mulhdu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv lo = tcg_temp_new(tcg_ctx); + tcg_gen_mulu2_tl(tcg_ctx, lo, cpu_gpr[rD(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + tcg_temp_free(tcg_ctx, lo); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mulld mulld. */ +static void gen_mulld(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mul_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mulldo mulldo. */ +static void gen_mulldo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_muls2_i64(tcg_ctx, t0, t1, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mov_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + + tcg_gen_sari_i64(tcg_ctx, t0, t0, 63); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cpu_ov, t0, t1); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, cpu_ov32, cpu_ov); + } + tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} +#endif + +/* Common subf function */ +static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, + TCGv arg2, bool add_ca, bool compute_ca, + bool compute_ov, bool compute_rc0) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = ret; + + if (compute_ca || compute_ov) { + t0 = tcg_temp_new(tcg_ctx); + } + + if (compute_ca) { + /* dest = ~arg1 + arg2 [+ ca]. */ + if (NARROW_MODE(ctx)) { + /* + * Caution: a non-obvious corner case of the spec is that + * we must produce the *entire* 64-bit addition, but + * produce the carry into bit 32. + */ + TCGv inv1 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_not_tl(tcg_ctx, inv1, arg1); + if (add_ca) { + tcg_gen_add_tl(tcg_ctx, t0, arg2, cpu_ca); + } else { + tcg_gen_addi_tl(tcg_ctx, t0, arg2, 1); + } + tcg_gen_xor_tl(tcg_ctx, t1, arg2, inv1); /* add without carry */ + tcg_gen_add_tl(tcg_ctx, t0, t0, inv1); + tcg_temp_free(tcg_ctx, inv1); + tcg_gen_xor_tl(tcg_ctx, cpu_ca, t0, t1); /* bits changes w/ carry */ + tcg_temp_free(tcg_ctx, t1); + tcg_gen_extract_tl(tcg_ctx, cpu_ca, cpu_ca, 32, 1); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, cpu_ca32, cpu_ca); + } + } else if (add_ca) { + TCGv zero, inv1 = tcg_temp_new(tcg_ctx); + tcg_gen_not_tl(tcg_ctx, inv1, arg1); + zero = tcg_const_tl(tcg_ctx, 0); + tcg_gen_add2_tl(tcg_ctx, t0, cpu_ca, arg2, zero, cpu_ca, zero); + tcg_gen_add2_tl(tcg_ctx, t0, cpu_ca, t0, cpu_ca, inv1, zero); + gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); + tcg_temp_free(tcg_ctx, zero); + tcg_temp_free(tcg_ctx, inv1); + } else { + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, cpu_ca, arg2, arg1); + tcg_gen_sub_tl(tcg_ctx, t0, arg2, arg1); + gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1); + } + } else if (add_ca) { + /* + * Since we're ignoring carry-out, we can simplify the + * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. + */ + tcg_gen_sub_tl(tcg_ctx, t0, arg2, arg1); + tcg_gen_add_tl(tcg_ctx, t0, t0, cpu_ca); + tcg_gen_subi_tl(tcg_ctx, t0, t0, 1); + } else { + tcg_gen_sub_tl(tcg_ctx, t0, arg2, arg1); + } + + if (compute_ov) { + gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); + } + if (unlikely(compute_rc0)) { + gen_set_Rc0(ctx, t0); + } + + if (t0 != ret) { + tcg_gen_mov_tl(tcg_ctx, ret, t0); + tcg_temp_free(tcg_ctx, t0); + } +} +/* Sub functions with Two operands functions */ +#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ + add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ +} +/* Sub functions with one operand and one immediate */ +#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ + add_ca, compute_ca, compute_ov) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv t0 = tcg_const_tl(tcg_ctx, const_val); \ + gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ + cpu_gpr[rA(ctx->opcode)], t0, \ + add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ + tcg_temp_free(tcg_ctx, t0); \ +} +/* subf subf. subfo subfo. */ +GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) +GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) +/* subfc subfc. subfco subfco. */ +GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) +GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) +/* subfe subfe. subfeo subfo. */ +GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) +GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) +/* subfme subfme. subfmeo subfmeo. */ +GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) +GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) +/* subfze subfze. subfzeo subfzeo.*/ +GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) +GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) + +/* subfic */ +static void gen_subfic(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv c = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); + gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + c, 0, 1, 0, 0); + tcg_temp_free(tcg_ctx, c); +} + +/* neg neg. nego nego. */ +static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv zero = tcg_const_tl(tcg_ctx, 0); + gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + zero, 0, 0, compute_ov, Rc(ctx->opcode)); + tcg_temp_free(tcg_ctx, zero); +} + +static void gen_neg(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_neg_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode))) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +static void gen_nego(DisasContext *ctx) +{ + gen_op_arith_neg(ctx, 1); +} + +/*** Integer logical ***/ +#define GEN_LOGICAL2(name, tcg_op, opc, type) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + tcg_op(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ + cpu_gpr[rB(ctx->opcode)]); \ + if (unlikely(Rc(ctx->opcode) != 0)) \ + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ +} + +#define GEN_LOGICAL1(name, tcg_op, opc, type) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + tcg_op(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ + if (unlikely(Rc(ctx->opcode) != 0)) \ + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ +} + +/* and & and. */ +GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER); +/* andc & andc. */ +GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER); + +/* andi. */ +static void gen_andi_(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], + UIMM(ctx->opcode)); + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); +} + +/* andis. */ +static void gen_andis_(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], + UIMM(ctx->opcode) << 16); + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); +} + +/* cntlzw */ +static void gen_cntlzw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t, cpu_gpr[rS(ctx->opcode)]); + tcg_gen_clzi_i32(tcg_ctx, t, t, 32); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t); + tcg_temp_free_i32(tcg_ctx, t); + + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* cnttzw */ +static void gen_cnttzw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t, cpu_gpr[rS(ctx->opcode)]); + tcg_gen_ctzi_i32(tcg_ctx, t, t, 32); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t); + tcg_temp_free_i32(tcg_ctx, t); + + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* eqv & eqv. */ +GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER); +/* extsb & extsb. */ +GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER); +/* extsh & extsh. */ +GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER); +/* nand & nand. */ +GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER); +/* nor & nor. */ +GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); + +#if defined(TARGET_PPC64) +static void gen_pause(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, 0); +#ifdef _MSC_VER + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, + 0 - offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); +#else + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, + -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); +#endif + tcg_temp_free_i32(tcg_ctx, t0); + + /* Stop translation, this gives other CPUs a chance to run */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); +} +#endif /* defined(TARGET_PPC64) */ + +/* or & or. */ +static void gen_or(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rs, ra, rb; + + rs = rS(ctx->opcode); + ra = rA(ctx->opcode); + rb = rB(ctx->opcode); + /* Optimisation for mr. ri case */ + if (rs != ra || rs != rb) { + if (rs != rb) { + tcg_gen_or_tl(tcg_ctx, cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); + } else { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], cpu_gpr[rs]); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[ra]); + } + } else if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rs]); +#if defined(TARGET_PPC64) + } else if (rs != 0) { /* 0 is nop */ + int prio = 0; + + switch (rs) { + case 1: + /* Set process priority to low */ + prio = 2; + break; + case 6: + /* Set process priority to medium-low */ + prio = 3; + break; + case 2: + /* Set process priority to normal */ + prio = 4; + break; + case 31: + if (!ctx->pr) { + /* Set process priority to very low */ + prio = 1; + } + break; + case 5: + if (!ctx->pr) { + /* Set process priority to medium-hight */ + prio = 5; + } + break; + case 3: + if (!ctx->pr) { + /* Set process priority to high */ + prio = 6; + } + break; + case 7: + if (ctx->hv && !ctx->pr) { + /* Set process priority to very high */ + prio = 7; + } + break; + default: + break; + } + if (prio) { + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_load_spr(tcg_ctx, t0, SPR_PPR); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x001C000000000000ULL); + tcg_gen_ori_tl(tcg_ctx, t0, t0, ((uint64_t)prio) << 50); + gen_store_spr(tcg_ctx, SPR_PPR, t0); + tcg_temp_free(tcg_ctx, t0); + } + /* + * Pause out of TCG otherwise spin loops with smt_low eat too + * much CPU and the kernel hangs. This applies to all + * encodings other than no-op, e.g., miso(rs=26), yield(27), + * mdoio(29), mdoom(30), and all currently undefined. + */ + gen_pause(ctx); +#endif + } +} +/* orc & orc. */ +GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER); + +/* xor & xor. */ +static void gen_xor(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* Optimisation for "set to zero" case */ + if (rS(ctx->opcode) != rB(ctx->opcode)) { + tcg_gen_xor_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + } else { + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], 0); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* ori */ +static void gen_ori(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = UIMM(ctx->opcode); + + if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { + return; + } + tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); +} + +/* oris */ +static void gen_oris(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = UIMM(ctx->opcode); + + if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { + /* NOP */ + return; + } + tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], + uimm << 16); +} + +/* xori */ +static void gen_xori(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = UIMM(ctx->opcode); + + if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { + /* NOP */ + return; + } + tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); +} + +/* xoris */ +static void gen_xoris(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = UIMM(ctx->opcode); + + if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { + /* NOP */ + return; + } + tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], + uimm << 16); +} + +/* popcntb : PowerPC 2.03 specification */ +static void gen_popcntb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_popcntb(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); +} + +static void gen_popcntw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#if defined(TARGET_PPC64) + gen_helper_popcntw(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); +#else + tcg_gen_ctpop_i32(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); +#endif +} + +#if defined(TARGET_PPC64) +/* popcntd: PowerPC 2.06 specification */ +static void gen_popcntd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ctpop_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); +} +#endif + +/* prtyw: PowerPC 2.05 specification */ +static void gen_prtyw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv ra = cpu_gpr[rA(ctx->opcode)]; + TCGv rs = cpu_gpr[rS(ctx->opcode)]; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, t0, rs, 16); + tcg_gen_xor_tl(tcg_ctx, ra, rs, t0); + tcg_gen_shri_tl(tcg_ctx, t0, ra, 8); + tcg_gen_xor_tl(tcg_ctx, ra, ra, t0); + tcg_gen_andi_tl(tcg_ctx, ra, ra, (target_ulong)0x100000001ULL); + tcg_temp_free(tcg_ctx, t0); +} + +#if defined(TARGET_PPC64) +/* prtyd: PowerPC 2.05 specification */ +static void gen_prtyd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv ra = cpu_gpr[rA(ctx->opcode)]; + TCGv rs = cpu_gpr[rS(ctx->opcode)]; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, t0, rs, 32); + tcg_gen_xor_tl(tcg_ctx, ra, rs, t0); + tcg_gen_shri_tl(tcg_ctx, t0, ra, 16); + tcg_gen_xor_tl(tcg_ctx, ra, ra, t0); + tcg_gen_shri_tl(tcg_ctx, t0, ra, 8); + tcg_gen_xor_tl(tcg_ctx, ra, ra, t0); + tcg_gen_andi_tl(tcg_ctx, ra, ra, 1); + tcg_temp_free(tcg_ctx, t0); +} +#endif + +#if defined(TARGET_PPC64) +/* bpermd */ +static void gen_bpermd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_bpermd(tcg_ctx, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); +} +#endif + +#if defined(TARGET_PPC64) +/* extsw & extsw. */ +GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B); + +/* cntlzd */ +static void gen_cntlzd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_clzi_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* cnttzd */ +static void gen_cnttzd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ctzi_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* darn */ +static void gen_darn(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int l = L(ctx->opcode); + + if (l > 2) { + tcg_gen_movi_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], -1); + } else { + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + if (l == 0) { + gen_helper_darn32(tcg_ctx, cpu_gpr[rD(ctx->opcode)]); + } else { + /* Return 64-bit random for both CRN and RRN */ + gen_helper_darn64(tcg_ctx, cpu_gpr[rD(ctx->opcode)]); + } + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_stop_exception(ctx); + } + } +} +#endif + +/*** Integer rotate ***/ + +/* rlwimi & rlwimi. */ +static void gen_rlwimi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; + TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; + uint32_t sh = SH(ctx->opcode); + uint32_t mb = MB(ctx->opcode); + uint32_t me = ME(ctx->opcode); + + if (sh == (31 - me) && mb <= me) { + tcg_gen_deposit_tl(tcg_ctx, t_ra, t_ra, t_rs, sh, me - mb + 1); + } else { + target_ulong mask; + TCGv t1; + +#if defined(TARGET_PPC64) + mb += 32; + me += 32; +#endif + mask = MASK(mb, me); + + t1 = tcg_temp_new(tcg_ctx); + if (mask <= 0xffffffffu) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t0, t_rs); + tcg_gen_rotli_i32(tcg_ctx, t0, t0, sh); + tcg_gen_extu_i32_tl(tcg_ctx, t1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } else { +#if defined(TARGET_PPC64) + tcg_gen_deposit_i64(tcg_ctx, t1, t_rs, t_rs, 32, 32); + tcg_gen_rotli_i64(tcg_ctx, t1, t1, sh); +#else + g_assert_not_reached(); +#endif + } + + tcg_gen_andi_tl(tcg_ctx, t1, t1, mask); + tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, ~mask); + tcg_gen_or_tl(tcg_ctx, t_ra, t_ra, t1); + tcg_temp_free(tcg_ctx, t1); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, t_ra); + } +} + +/* rlwinm & rlwinm. */ +static void gen_rlwinm(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; + TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; + int sh = SH(ctx->opcode); + int mb = MB(ctx->opcode); + int me = ME(ctx->opcode); + int len = me - mb + 1; + int rsh = (32 - sh) & 31; + + if (sh != 0 && len > 0 && me == (31 - sh)) { + tcg_gen_deposit_z_tl(tcg_ctx, t_ra, t_rs, sh, len); + } else if (me == 31 && rsh + len <= 32) { + tcg_gen_extract_tl(tcg_ctx, t_ra, t_rs, rsh, len); + } else { + target_ulong mask; +#if defined(TARGET_PPC64) + mb += 32; + me += 32; +#endif + mask = MASK(mb, me); + if (mask <= 0xffffffffu) { + if (sh == 0) { + tcg_gen_andi_tl(tcg_ctx, t_ra, t_rs, mask); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t0, t_rs); + tcg_gen_rotli_i32(tcg_ctx, t0, t0, sh); + tcg_gen_andi_i32(tcg_ctx, t0, t0, mask); + tcg_gen_extu_i32_tl(tcg_ctx, t_ra, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } + } else { +#if defined(TARGET_PPC64) + tcg_gen_deposit_i64(tcg_ctx, t_ra, t_rs, t_rs, 32, 32); + tcg_gen_rotli_i64(tcg_ctx, t_ra, t_ra, sh); + tcg_gen_andi_i64(tcg_ctx, t_ra, t_ra, mask); +#else + g_assert_not_reached(); +#endif + } + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, t_ra); + } +} + +/* rlwnm & rlwnm. */ +static void gen_rlwnm(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; + TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; + TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; + uint32_t mb = MB(ctx->opcode); + uint32_t me = ME(ctx->opcode); + target_ulong mask; + +#if defined(TARGET_PPC64) + mb += 32; + me += 32; +#endif + mask = MASK(mb, me); + + if (mask <= 0xffffffffu) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t0, t_rb); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, t_rs); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 0x1f); + tcg_gen_rotl_i32(tcg_ctx, t1, t1, t0); + tcg_gen_extu_i32_tl(tcg_ctx, t_ra, t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + } else { +#if defined(TARGET_PPC64) + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, t0, t_rb, 0x1f); + tcg_gen_deposit_i64(tcg_ctx, t_ra, t_rs, t_rs, 32, 32); + tcg_gen_rotl_i64(tcg_ctx, t_ra, t_ra, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#else + g_assert_not_reached(); +#endif + } + + tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, mask); + + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, t_ra); + } +} + +#if defined(TARGET_PPC64) +#define GEN_PPC64_R2(name, opc1, opc2) \ +static void glue(gen_, name##0)(DisasContext *ctx) \ +{ \ + gen_##name(ctx, 0); \ +} \ + \ +static void glue(gen_, name##1)(DisasContext *ctx) \ +{ \ + gen_##name(ctx, 1); \ +} +#define GEN_PPC64_R4(name, opc1, opc2) \ +static void glue(gen_, name##0)(DisasContext *ctx) \ +{ \ + gen_##name(ctx, 0, 0); \ +} \ + \ +static void glue(gen_, name##1)(DisasContext *ctx) \ +{ \ + gen_##name(ctx, 0, 1); \ +} \ + \ +static void glue(gen_, name##2)(DisasContext *ctx) \ +{ \ + gen_##name(ctx, 1, 0); \ +} \ + \ +static void glue(gen_, name##3)(DisasContext *ctx) \ +{ \ + gen_##name(ctx, 1, 1); \ +} + +static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; + TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; + int len = me - mb + 1; + int rsh = (64 - sh) & 63; + + if (sh != 0 && len > 0 && me == (63 - sh)) { + tcg_gen_deposit_z_tl(tcg_ctx, t_ra, t_rs, sh, len); + } else if (me == 63 && rsh + len <= 64) { + tcg_gen_extract_tl(tcg_ctx, t_ra, t_rs, rsh, len); + } else { + tcg_gen_rotli_tl(tcg_ctx, t_ra, t_rs, sh); + tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, MASK(mb, me)); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, t_ra); + } +} + +/* rldicl - rldicl. */ +static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn) +{ + uint32_t sh, mb; + + sh = SH(ctx->opcode) | (shn << 5); + mb = MB(ctx->opcode) | (mbn << 5); + gen_rldinm(ctx, mb, 63, sh); +} +GEN_PPC64_R4(rldicl, 0x1E, 0x00); + +/* rldicr - rldicr. */ +static inline void gen_rldicr(DisasContext *ctx, int men, int shn) +{ + uint32_t sh, me; + + sh = SH(ctx->opcode) | (shn << 5); + me = MB(ctx->opcode) | (men << 5); + gen_rldinm(ctx, 0, me, sh); +} +GEN_PPC64_R4(rldicr, 0x1E, 0x02); + +/* rldic - rldic. */ +static inline void gen_rldic(DisasContext *ctx, int mbn, int shn) +{ + uint32_t sh, mb; + + sh = SH(ctx->opcode) | (shn << 5); + mb = MB(ctx->opcode) | (mbn << 5); + gen_rldinm(ctx, mb, 63 - sh, sh); +} +GEN_PPC64_R4(rldic, 0x1E, 0x04); + +static void gen_rldnm(DisasContext *ctx, int mb, int me) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; + TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; + TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; + TCGv t0; + + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, t_rb, 0x3f); + tcg_gen_rotl_tl(tcg_ctx, t_ra, t_rs, t0); + tcg_temp_free(tcg_ctx, t0); + + tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, MASK(mb, me)); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, t_ra); + } +} + +/* rldcl - rldcl. */ +static inline void gen_rldcl(DisasContext *ctx, int mbn) +{ + uint32_t mb; + + mb = MB(ctx->opcode) | (mbn << 5); + gen_rldnm(ctx, mb, 63); +} +GEN_PPC64_R2(rldcl, 0x1E, 0x08); + +/* rldcr - rldcr. */ +static inline void gen_rldcr(DisasContext *ctx, int men) +{ + uint32_t me; + + me = MB(ctx->opcode) | (men << 5); + gen_rldnm(ctx, 0, me); +} +GEN_PPC64_R2(rldcr, 0x1E, 0x09); + +/* rldimi - rldimi. */ +static void gen_rldimi(DisasContext *ctx, int mbn, int shn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; + TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; + uint32_t sh = SH(ctx->opcode) | (shn << 5); + uint32_t mb = MB(ctx->opcode) | (mbn << 5); + uint32_t me = 63 - sh; + + if (mb <= me) { + tcg_gen_deposit_tl(tcg_ctx, t_ra, t_ra, t_rs, sh, me - mb + 1); + } else { + target_ulong mask = MASK(mb, me); + TCGv t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_rotli_tl(tcg_ctx, t1, t_rs, sh); + tcg_gen_andi_tl(tcg_ctx, t1, t1, mask); + tcg_gen_andi_tl(tcg_ctx, t_ra, t_ra, ~mask); + tcg_gen_or_tl(tcg_ctx, t_ra, t_ra, t1); + tcg_temp_free(tcg_ctx, t1); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, t_ra); + } +} +GEN_PPC64_R4(rldimi, 0x1E, 0x06); +#endif + +/*** Integer shift ***/ + +/* slw & slw. */ +static void gen_slw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + t0 = tcg_temp_new(tcg_ctx); + /* AND rS with a mask that is 0 when rB >= 0x20 */ +#if defined(TARGET_PPC64) + tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x3a); + tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); +#else + tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1a); + tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x1f); +#endif + tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1f); + tcg_gen_shl_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_ext32u_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sraw & sraw. */ +static void gen_sraw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_sraw(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* srawi & srawi. */ +static void gen_srawi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode); + TCGv dst = cpu_gpr[rA(ctx->opcode)]; + TCGv src = cpu_gpr[rS(ctx->opcode)]; + if (sh == 0) { + tcg_gen_ext32s_tl(tcg_ctx, dst, src); + tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); + if (is_isa300(ctx)) { + tcg_gen_movi_tl(tcg_ctx, cpu_ca32, 0); + } + } else { + TCGv t0; + tcg_gen_ext32s_tl(tcg_ctx, dst, src); + tcg_gen_andi_tl(tcg_ctx, cpu_ca, dst, (1ULL << sh) - 1); + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_sari_tl(tcg_ctx, t0, dst, TARGET_LONG_BITS - 1); + tcg_gen_and_tl(tcg_ctx, cpu_ca, cpu_ca, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, cpu_ca, cpu_ca, 0); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, cpu_ca32, cpu_ca); + } + tcg_gen_sari_tl(tcg_ctx, dst, dst, sh); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, dst); + } +} + +/* srw & srw. */ +static void gen_srw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + t0 = tcg_temp_new(tcg_ctx); + /* AND rS with a mask that is 0 when rB >= 0x20 */ +#if defined(TARGET_PPC64) + tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x3a); + tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); +#else + tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1a); + tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x1f); +#endif + tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1f); + tcg_gen_shr_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +#if defined(TARGET_PPC64) +/* sld & sld. */ +static void gen_sld(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + t0 = tcg_temp_new(tcg_ctx); + /* AND rS with a mask that is 0 when rB >= 0x40 */ + tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x39); + tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x3f); + tcg_gen_shl_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* srad & srad. */ +static void gen_srad(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_srad(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} +/* sradi & sradi. */ +static inline void gen_sradi(DisasContext *ctx, int n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode) + (n << 5); + TCGv dst = cpu_gpr[rA(ctx->opcode)]; + TCGv src = cpu_gpr[rS(ctx->opcode)]; + if (sh == 0) { + tcg_gen_mov_tl(tcg_ctx, dst, src); + tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); + if (is_isa300(ctx)) { + tcg_gen_movi_tl(tcg_ctx, cpu_ca32, 0); + } + } else { + TCGv t0; + tcg_gen_andi_tl(tcg_ctx, cpu_ca, src, (1ULL << sh) - 1); + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_sari_tl(tcg_ctx, t0, src, TARGET_LONG_BITS - 1); + tcg_gen_and_tl(tcg_ctx, cpu_ca, cpu_ca, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, cpu_ca, cpu_ca, 0); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(tcg_ctx, cpu_ca32, cpu_ca); + } + tcg_gen_sari_tl(tcg_ctx, dst, src, sh); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, dst); + } +} + +static void gen_sradi0(DisasContext *ctx) +{ + gen_sradi(ctx, 0); +} + +static void gen_sradi1(DisasContext *ctx) +{ + gen_sradi(ctx, 1); +} + +/* extswsli & extswsli. */ +static inline void gen_extswsli(DisasContext *ctx, int n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode) + (n << 5); + TCGv dst = cpu_gpr[rA(ctx->opcode)]; + TCGv src = cpu_gpr[rS(ctx->opcode)]; + + tcg_gen_ext32s_tl(tcg_ctx, dst, src); + tcg_gen_shli_tl(tcg_ctx, dst, dst, sh); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, dst); + } +} + +static void gen_extswsli0(DisasContext *ctx) +{ + gen_extswsli(ctx, 0); +} + +static void gen_extswsli1(DisasContext *ctx) +{ + gen_extswsli(ctx, 1); +} + +/* srd & srd. */ +static void gen_srd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + t0 = tcg_temp_new(tcg_ctx); + /* AND rS with a mask that is 0 when rB >= 0x40 */ + tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x39); + tcg_gen_sari_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_andc_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x3f); + tcg_gen_shr_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} +#endif + +/*** Addressing modes ***/ +/* Register indirect with immediate index : EA = (rA|0) + SIMM */ +static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA, + target_long maskl) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_long simm = SIMM(ctx->opcode); + + simm &= ~maskl; + if (rA(ctx->opcode) == 0) { + if (NARROW_MODE(ctx)) { + simm = (uint32_t)simm; + } + tcg_gen_movi_tl(tcg_ctx, EA, simm); + } else if (likely(simm != 0)) { + tcg_gen_addi_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)], simm); + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, EA, EA); + } + } else { + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); + } else { + tcg_gen_mov_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); + } + } +} + +static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (rA(ctx->opcode) == 0) { + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, EA, cpu_gpr[rB(ctx->opcode)]); + } else { + tcg_gen_mov_tl(tcg_ctx, EA, cpu_gpr[rB(ctx->opcode)]); + } + } else { + tcg_gen_add_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, EA, EA); + } + } +} + +static inline void gen_addr_register(DisasContext *ctx, TCGv EA) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (rA(ctx->opcode) == 0) { + tcg_gen_movi_tl(tcg_ctx, EA, 0); + } else if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); + } else { + tcg_gen_mov_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)]); + } +} + +static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1, + target_long val) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_addi_tl(tcg_ctx, ret, arg1, val); + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, ret, ret); + } +} + +static inline void gen_align_no_le(DisasContext *ctx) +{ + gen_exception_err(ctx, POWERPC_EXCP_ALIGN, + (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE); +} + +/*** Integer load ***/ +#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask) +#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP)) + +#define GEN_QEMU_LOAD_TL(ldop, op) \ +static void glue(gen_qemu_, ldop)(DisasContext *ctx, \ + TCGv val, \ + TCGv addr) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + tcg_gen_qemu_ld_tl(tcg_ctx, val, addr, ctx->mem_idx, op); \ +} + +GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB)) +GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW)) +GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW)) +GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL)) +GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL)) + +GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW)) +GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL)) + +#define GEN_QEMU_LOAD_64(ldop, op) \ +static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \ + TCGv_i64 val, \ + TCGv addr) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + tcg_gen_qemu_ld_i64(tcg_ctx, val, addr, ctx->mem_idx, op); \ +} + +GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB)) +GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW)) +GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL)) +GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL)) +GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q)) + +#if defined(TARGET_PPC64) +GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q)) +#endif + +#define GEN_QEMU_STORE_TL(stop, op) \ +static void glue(gen_qemu_, stop)(DisasContext *ctx, \ + TCGv val, \ + TCGv addr) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + tcg_gen_qemu_st_tl(tcg_ctx, val, addr, ctx->mem_idx, op); \ +} + +GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB)) +GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW)) +GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL)) + +GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW)) +GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL)) + +#define GEN_QEMU_STORE_64(stop, op) \ +static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \ + TCGv_i64 val, \ + TCGv addr) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + tcg_gen_qemu_st_i64(tcg_ctx, val, addr, ctx->mem_idx, op); \ +} + +GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB)) +GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW)) +GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL)) +GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q)) + +#if defined(TARGET_PPC64) +GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q)) +#endif + +#define GEN_LD(name, ldop, opc, type) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0); \ + gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +#define GEN_LDU(name, ldop, opc, type) \ +static void glue(gen_, name##u)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + if (unlikely(rA(ctx->opcode) == 0 || \ + rA(ctx->opcode) == rD(ctx->opcode))) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + if (type == PPC_64B) \ + gen_addr_imm_index(ctx, EA, 0x03); \ + else \ + gen_addr_imm_index(ctx, EA, 0); \ + gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +#define GEN_LDUX(name, ldop, opc2, opc3, type) \ +static void glue(gen_, name##ux)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + if (unlikely(rA(ctx->opcode) == 0 || \ + rA(ctx->opcode) == rD(ctx->opcode))) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ +static void glue(gen_, name##x)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + chk; \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +#define GEN_LDX(name, ldop, opc2, opc3, type) \ + GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE) + +#define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \ + GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM) + +#define GEN_LDS(name, ldop, op, type) \ +GEN_LD(name, ldop, op | 0x20, type); \ +GEN_LDU(name, ldop, op | 0x21, type); \ +GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \ +GEN_LDX(name, ldop, 0x17, op | 0x00, type) + +/* lbz lbzu lbzux lbzx */ +GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER); +/* lha lhau lhaux lhax */ +GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER); +/* lhz lhzu lhzux lhzx */ +GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER); +/* lwz lwzu lwzux lwzx */ +GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER); + +#define GEN_LDEPX(name, ldop, opc2, opc3) \ +static void glue(gen_, name##epx)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + CHK_SV; \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + tcg_gen_qemu_ld_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\ + tcg_temp_free(tcg_ctx, EA); \ +} + +GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) +GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) +GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) +#if defined(TARGET_PPC64) +GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00) +#endif + +#if defined(TARGET_PPC64) +/* lwaux */ +GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B); +/* lwax */ +GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B); +/* ldux */ +GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B); +/* ldx */ +GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B); + +/* CI load/store variants */ +GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) +GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) +GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) +GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) + +static void gen_ld(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + if (Rc(ctx->opcode)) { + if (unlikely(rA(ctx->opcode) == 0 || + rA(ctx->opcode) == rD(ctx->opcode))) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + } + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_imm_index(ctx, EA, 0x03); + if (ctx->opcode & 0x02) { + /* lwa (lwau is undefined) */ + gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA); + } else { + /* ld - ldu */ + gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA); + } + if (Rc(ctx->opcode)) { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); + } + tcg_temp_free(tcg_ctx, EA); +} + +/* lq */ +static void gen_lq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ra, rd; + TCGv EA, hi, lo; + + /* lq is a legal user mode instruction starting in ISA 2.07 */ + bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; + bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; + + if (!legal_in_user_mode && ctx->pr) { + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return; + } + + if (!le_is_supported && ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + ra = rA(ctx->opcode); + rd = rD(ctx->opcode); + if (unlikely((rd & 1) || rd == ra)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_imm_index(ctx, EA, 0x0F); + + /* Note that the low part is always in RD+1, even in LE mode. */ + lo = cpu_gpr[rd + 1]; + hi = cpu_gpr[rd]; + + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + if (HAVE_ATOMIC128) { + TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); + if (ctx->le_mode) { + tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); + gen_helper_lq_le_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); + } else { + tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); + gen_helper_lq_be_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); + } + tcg_temp_free_i32(tcg_ctx, oi); + tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh)); + } else { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } + } else if (ctx->le_mode) { + tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ); + gen_addr_add(ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_LEQ); + } else { + tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_BEQ); + gen_addr_add(ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_BEQ); + } + tcg_temp_free(tcg_ctx, EA); +} +#endif + +/*** Integer store ***/ +#define GEN_ST(name, stop, opc, type) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0); \ + gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +#define GEN_STU(name, stop, opc, type) \ +static void glue(gen_, stop##u)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + if (unlikely(rA(ctx->opcode) == 0)) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + if (type == PPC_64B) \ + gen_addr_imm_index(ctx, EA, 0x03); \ + else \ + gen_addr_imm_index(ctx, EA, 0); \ + gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +#define GEN_STUX(name, stop, opc2, opc3, type) \ +static void glue(gen_, name##ux)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + if (unlikely(rA(ctx->opcode) == 0)) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ +static void glue(gen_, name##x)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + chk; \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ +} +#define GEN_STX(name, stop, opc2, opc3, type) \ + GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE) + +#define GEN_STX_HVRM(name, stop, opc2, opc3, type) \ + GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM) + +#define GEN_STS(name, stop, op, type) \ +GEN_ST(name, stop, op | 0x20, type); \ +GEN_STU(name, stop, op | 0x21, type); \ +GEN_STUX(name, stop, 0x17, op | 0x01, type); \ +GEN_STX(name, stop, 0x17, op | 0x00, type) + +/* stb stbu stbux stbx */ +GEN_STS(stb, st8, 0x06, PPC_INTEGER); +/* sth sthu sthux sthx */ +GEN_STS(sth, st16, 0x0C, PPC_INTEGER); +/* stw stwu stwux stwx */ +GEN_STS(stw, st32, 0x04, PPC_INTEGER); + +#define GEN_STEPX(name, stop, opc2, opc3) \ +static void glue(gen_, name##epx)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + CHK_SV; \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + tcg_gen_qemu_st_tl(tcg_ctx, \ + cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \ + tcg_temp_free(tcg_ctx, EA); \ +} + +GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) +GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) +GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) +#if defined(TARGET_PPC64) +GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04) +#endif + +#if defined(TARGET_PPC64) +GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B); +GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B); +GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) +GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) +GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) +GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) + +static void gen_std(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rs; + TCGv EA; + + rs = rS(ctx->opcode); + if ((ctx->opcode & 0x3) == 0x2) { /* stq */ + bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; + bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; + TCGv hi, lo; + + if (!(ctx->insns_flags & PPC_64BX)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + } + + if (!legal_in_user_mode && ctx->pr) { + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return; + } + + if (!le_is_supported && ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + + if (unlikely(rs & 1)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_imm_index(ctx, EA, 0x03); + + /* Note that the low part is always in RS+1, even in LE mode. */ + lo = cpu_gpr[rs + 1]; + hi = cpu_gpr[rs]; + + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + if (HAVE_ATOMIC128) { + TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); + if (ctx->le_mode) { + tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); + gen_helper_stq_le_parallel(tcg_ctx, tcg_ctx->cpu_env, EA, lo, hi, oi); + } else { + tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); + gen_helper_stq_be_parallel(tcg_ctx, tcg_ctx->cpu_env, EA, lo, hi, oi); + } + tcg_temp_free_i32(tcg_ctx, oi); + } else { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } + } else if (ctx->le_mode) { + tcg_gen_qemu_st_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ); + gen_addr_add(ctx, EA, EA, 8); + tcg_gen_qemu_st_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_LEQ); + } else { + tcg_gen_qemu_st_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_BEQ); + gen_addr_add(ctx, EA, EA, 8); + tcg_gen_qemu_st_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_BEQ); + } + tcg_temp_free(tcg_ctx, EA); + } else { + /* std / stdu */ + if (Rc(ctx->opcode)) { + if (unlikely(rA(ctx->opcode) == 0)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + } + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_imm_index(ctx, EA, 0x03); + gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA); + if (Rc(ctx->opcode)) { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); + } + tcg_temp_free(tcg_ctx, EA); + } +} +#endif +/*** Integer load and store with byte reverse ***/ + +/* lhbrx */ +GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER); + +/* lwbrx */ +GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER); + +#if defined(TARGET_PPC64) +/* ldbrx */ +GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE); +/* stdbrx */ +GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE); +#endif /* TARGET_PPC64 */ + +/* sthbrx */ +GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER); +/* stwbrx */ +GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER); + +/*** Integer load and store multiple ***/ + +/* lmw */ +static void gen_lmw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1; + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + gen_set_access_type(ctx, ACCESS_INT); + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_const_i32(tcg_ctx, rD(ctx->opcode)); + gen_addr_imm_index(ctx, t0, 0); + gen_helper_lmw(tcg_ctx, tcg_ctx->cpu_env, t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +/* stmw */ +static void gen_stmw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1; + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + gen_set_access_type(ctx, ACCESS_INT); + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_const_i32(tcg_ctx, rS(ctx->opcode)); + gen_addr_imm_index(ctx, t0, 0); + gen_helper_stmw(tcg_ctx, tcg_ctx->cpu_env, t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +/*** Integer load and store strings ***/ + +/* lswi */ +/* + * PowerPC32 specification says we must generate an exception if rA is + * in the range of registers to be loaded. In an other hand, IBM says + * this is valid, but rA won't be loaded. For now, I'll follow the + * spec... + */ +static void gen_lswi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1, t2; + int nb = NB(ctx->opcode); + int start = rD(ctx->opcode); + int ra = rA(ctx->opcode); + int nr; + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + if (nb == 0) { + nb = 32; + } + nr = DIV_ROUND_UP(nb, 4); + if (unlikely(lsw_reg_in_range(start, nr, ra))) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); + return; + } + gen_set_access_type(ctx, ACCESS_INT); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_register(ctx, t0); + t1 = tcg_const_i32(tcg_ctx, nb); + t2 = tcg_const_i32(tcg_ctx, start); + gen_helper_lsw(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); +} + +/* lswx */ +static void gen_lswx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1, t2, t3; + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + gen_set_access_type(ctx, ACCESS_INT); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + t1 = tcg_const_i32(tcg_ctx, rD(ctx->opcode)); + t2 = tcg_const_i32(tcg_ctx, rA(ctx->opcode)); + t3 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); + gen_helper_lswx(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2, t3); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); +} + +/* stswi */ +static void gen_stswi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1, t2; + int nb = NB(ctx->opcode); + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + gen_set_access_type(ctx, ACCESS_INT); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_register(ctx, t0); + if (nb == 0) { + nb = 32; + } + t1 = tcg_const_i32(tcg_ctx, nb); + t2 = tcg_const_i32(tcg_ctx, rS(ctx->opcode)); + gen_helper_stsw(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); +} + +/* stswx */ +static void gen_stswx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1, t2; + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return; + } + gen_set_access_type(ctx, ACCESS_INT); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_xer); + tcg_gen_andi_i32(tcg_ctx, t1, t1, 0x7F); + t2 = tcg_const_i32(tcg_ctx, rS(ctx->opcode)); + gen_helper_stsw(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); +} + +/*** Memory synchronisation ***/ +/* eieio */ +static void gen_eieio(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGBar bar = TCG_MO_LD_ST; + + /* + * POWER9 has a eieio instruction variant using bit 6 as a hint to + * tell the CPU it is a store-forwarding barrier. + */ + if (ctx->opcode & 0x2000000) { + /* + * ISA says that "Reserved fields in instructions are ignored + * by the processor". So ignore the bit 6 on non-POWER9 CPU but + * as this is not an instruction software should be using, + * complain to the user. + */ + if (!(ctx->insns_flags2 & PPC2_ISA300)) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @" + TARGET_FMT_lx "\n", ctx->base.pc_next - 4); + } else { + bar = TCG_MO_ST_LD; + } + } + + tcg_gen_mb(tcg_ctx, bar | TCG_BAR_SC); +} + +static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t; + TCGLabel *l; + + if (!ctx->lazy_tlb_flush) { + return; + } + l = gen_new_label(tcg_ctx); + t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUPPCState, tlb_need_flush)); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, t, 0, l); + if (global) { + gen_helper_check_tlb_flush_global(tcg_ctx, tcg_ctx->cpu_env); + } else { + gen_helper_check_tlb_flush_local(tcg_ctx, tcg_ctx->cpu_env); + } + gen_set_label(tcg_ctx, l); + tcg_temp_free_i32(tcg_ctx, t); +} + +/* isync */ +static void gen_isync(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* + * We need to check for a pending TLB flush. This can only happen in + * kernel mode however so check MSR_PR + */ + if (!ctx->pr) { + gen_check_tlb_flush(ctx, false); + } + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + gen_stop_exception(ctx); +} + +#define MEMOP_GET_SIZE(x) (1ULL << ((x) & MO_SIZE)) + +static void gen_load_locked(DisasContext *ctx, MemOp memop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv gpr = cpu_gpr[rD(ctx->opcode)]; + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_set_access_type(ctx, ACCESS_RES); + gen_addr_reg_index(ctx, t0); + tcg_gen_qemu_ld_tl(tcg_ctx, gpr, t0, ctx->mem_idx, memop | MO_ALIGN); + tcg_gen_mov_tl(tcg_ctx, cpu_reserve, t0); + tcg_gen_mov_tl(tcg_ctx, cpu_reserve_val, gpr); + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); + tcg_temp_free(tcg_ctx, t0); +} + +#define LARX(name, memop) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + gen_load_locked(ctx, memop); \ +} + +/* lwarx */ +LARX(lbarx, DEF_MEMOP(MO_UB)) +LARX(lharx, DEF_MEMOP(MO_UW)) +LARX(lwarx, DEF_MEMOP(MO_UL)) + +static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, + TCGv EA, TCGCond cond, int addend) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv u = tcg_temp_new(tcg_ctx); + + tcg_gen_qemu_ld_tl(tcg_ctx, t, EA, ctx->mem_idx, memop); + tcg_gen_addi_tl(tcg_ctx, t2, EA, MEMOP_GET_SIZE(memop)); + tcg_gen_qemu_ld_tl(tcg_ctx, t2, t2, ctx->mem_idx, memop); + tcg_gen_addi_tl(tcg_ctx, u, t, addend); + + /* E.g. for fetch and increment bounded... */ + /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */ + tcg_gen_movcond_tl(tcg_ctx, cond, u, t, t2, u, t); + tcg_gen_qemu_st_tl(tcg_ctx, u, EA, ctx->mem_idx, memop); + + /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ + tcg_gen_movi_tl(tcg_ctx, u, 1ULL << (MEMOP_GET_SIZE(memop) * 8 - 1)); + tcg_gen_movcond_tl(tcg_ctx, cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); + + tcg_temp_free(tcg_ctx, t); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, u); +} + +static void gen_ld_atomic(DisasContext *ctx, MemOp memop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t gpr_FC = FC(ctx->opcode); + TCGv EA = tcg_temp_new(tcg_ctx); + int rt = rD(ctx->opcode); + bool need_serial; + TCGv src, dst; + + gen_addr_register(ctx, EA); + dst = cpu_gpr[rt]; + src = cpu_gpr[(rt + 1) & 31]; + + need_serial = false; + memop |= MO_ALIGN; + switch (gpr_FC) { + case 0: /* Fetch and add */ + tcg_gen_atomic_fetch_add_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 1: /* Fetch and xor */ + tcg_gen_atomic_fetch_xor_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 2: /* Fetch and or */ + tcg_gen_atomic_fetch_or_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 3: /* Fetch and 'and' */ + tcg_gen_atomic_fetch_and_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 4: /* Fetch and max unsigned */ + tcg_gen_atomic_fetch_umax_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 5: /* Fetch and max signed */ + tcg_gen_atomic_fetch_smax_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 6: /* Fetch and min unsigned */ + tcg_gen_atomic_fetch_umin_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 7: /* Fetch and min signed */ + tcg_gen_atomic_fetch_smin_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + case 8: /* Swap */ + tcg_gen_atomic_xchg_tl(tcg_ctx, dst, EA, src, ctx->mem_idx, memop); + break; + + case 16: /* Compare and swap not equal */ + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + need_serial = true; + } else { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_qemu_ld_tl(tcg_ctx, t0, EA, ctx->mem_idx, memop); + if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) { + tcg_gen_mov_tl(tcg_ctx, t1, src); + } else { + tcg_gen_ext32u_tl(tcg_ctx, t1, src); + } + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t0, t1, + cpu_gpr[(rt + 2) & 31], t0); + tcg_gen_qemu_st_tl(tcg_ctx, t1, EA, ctx->mem_idx, memop); + tcg_gen_mov_tl(tcg_ctx, dst, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + + case 24: /* Fetch and increment bounded */ + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + need_serial = true; + } else { + gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1); + } + break; + case 25: /* Fetch and increment equal */ + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + need_serial = true; + } else { + gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1); + } + break; + case 28: /* Fetch and decrement bounded */ + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + need_serial = true; + } else { + gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1); + } + break; + + default: + /* invoke data storage error handler */ + gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); + } + tcg_temp_free(tcg_ctx, EA); + + if (need_serial) { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } +} + +static void gen_lwat(DisasContext *ctx) +{ + gen_ld_atomic(ctx, DEF_MEMOP(MO_UL)); +} + +#ifdef TARGET_PPC64 +static void gen_ldat(DisasContext *ctx) +{ + gen_ld_atomic(ctx, DEF_MEMOP(MO_Q)); +} +#endif + +static void gen_st_atomic(DisasContext *ctx, MemOp memop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t gpr_FC = FC(ctx->opcode); + TCGv EA = tcg_temp_new(tcg_ctx); + TCGv src, discard; + + gen_addr_register(ctx, EA); + src = cpu_gpr[rD(ctx->opcode)]; + discard = tcg_temp_new(tcg_ctx); + + memop |= MO_ALIGN; + switch (gpr_FC) { + case 0: /* add and Store */ + tcg_gen_atomic_add_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 1: /* xor and Store */ + tcg_gen_atomic_xor_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 2: /* Or and Store */ + tcg_gen_atomic_or_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 3: /* 'and' and Store */ + tcg_gen_atomic_and_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 4: /* Store max unsigned */ + tcg_gen_atomic_umax_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 5: /* Store max signed */ + tcg_gen_atomic_smax_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 6: /* Store min unsigned */ + tcg_gen_atomic_umin_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 7: /* Store min signed */ + tcg_gen_atomic_smin_fetch_tl(tcg_ctx, discard, EA, src, ctx->mem_idx, memop); + break; + case 24: /* Store twin */ + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } else { + TCGv t = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv s = tcg_temp_new(tcg_ctx); + TCGv s2 = tcg_temp_new(tcg_ctx); + TCGv ea_plus_s = tcg_temp_new(tcg_ctx); + + tcg_gen_qemu_ld_tl(tcg_ctx, t, EA, ctx->mem_idx, memop); + tcg_gen_addi_tl(tcg_ctx, ea_plus_s, EA, MEMOP_GET_SIZE(memop)); + tcg_gen_qemu_ld_tl(tcg_ctx, t2, ea_plus_s, ctx->mem_idx, memop); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, s, t, t2, src, t); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, s2, t, t2, src, t2); + tcg_gen_qemu_st_tl(tcg_ctx, s, EA, ctx->mem_idx, memop); + tcg_gen_qemu_st_tl(tcg_ctx, s2, ea_plus_s, ctx->mem_idx, memop); + + tcg_temp_free(tcg_ctx, ea_plus_s); + tcg_temp_free(tcg_ctx, s2); + tcg_temp_free(tcg_ctx, s); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t); + } + break; + default: + /* invoke data storage error handler */ + gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); + } + tcg_temp_free(tcg_ctx, discard); + tcg_temp_free(tcg_ctx, EA); +} + +static void gen_stwat(DisasContext *ctx) +{ + gen_st_atomic(ctx, DEF_MEMOP(MO_UL)); +} + +#ifdef TARGET_PPC64 +static void gen_stdat(DisasContext *ctx) +{ + gen_st_atomic(ctx, DEF_MEMOP(MO_Q)); +} +#endif + +static void gen_conditional_store(DisasContext *ctx, MemOp memop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + int reg = rS(ctx->opcode); + + gen_set_access_type(ctx, ACCESS_RES); + gen_addr_reg_index(ctx, t0); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, cpu_reserve, l1); + tcg_temp_free(tcg_ctx, t0); + + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, t0, cpu_reserve, cpu_reserve_val, + cpu_gpr[reg], ctx->mem_idx, + DEF_MEMOP(memop) | MO_ALIGN); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, t0, t0, cpu_reserve_val); + tcg_gen_shli_tl(tcg_ctx, t0, t0, CRF_EQ_BIT); + tcg_gen_or_tl(tcg_ctx, t0, t0, cpu_so); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_br(tcg_ctx, l2); + + gen_set_label(tcg_ctx, l1); + + /* + * Address mismatch implies failure. But we still need to provide + * the memory barrier semantics of the instruction. + */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); + + gen_set_label(tcg_ctx, l2); + tcg_gen_movi_tl(tcg_ctx, cpu_reserve, -1); +} + +#define STCX(name, memop) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + gen_conditional_store(ctx, memop); \ +} + +STCX(stbcx_, DEF_MEMOP(MO_UB)) +STCX(sthcx_, DEF_MEMOP(MO_UW)) +STCX(stwcx_, DEF_MEMOP(MO_UL)) + +#if defined(TARGET_PPC64) +/* ldarx */ +LARX(ldarx, DEF_MEMOP(MO_Q)) +/* stdcx. */ +STCX(stdcx_, DEF_MEMOP(MO_Q)) + +/* lqarx */ +static void gen_lqarx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rd = rD(ctx->opcode); + TCGv EA, hi, lo; + + if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) || + (rd == rB(ctx->opcode)))) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + + gen_set_access_type(ctx, ACCESS_RES); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + + /* Note that the low part is always in RD+1, even in LE mode. */ + lo = cpu_gpr[rd + 1]; + hi = cpu_gpr[rd]; + + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + if (HAVE_ATOMIC128) { + TCGv_i32 oi = tcg_temp_new_i32(tcg_ctx); + if (ctx->le_mode) { + tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, + ctx->mem_idx)); + gen_helper_lq_le_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); + } else { + tcg_gen_movi_i32(tcg_ctx, oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, + ctx->mem_idx)); + gen_helper_lq_be_parallel(tcg_ctx, lo, tcg_ctx->cpu_env, EA, oi); + } + tcg_temp_free_i32(tcg_ctx, oi); + tcg_gen_ld_i64(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, retxh)); + } else { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + tcg_temp_free(tcg_ctx, EA); + return; + } + } else if (ctx->le_mode) { + tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16); + tcg_gen_mov_tl(tcg_ctx, cpu_reserve, EA); + gen_addr_add(ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_LEQ); + } else { + tcg_gen_qemu_ld_i64(tcg_ctx, hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16); + tcg_gen_mov_tl(tcg_ctx, cpu_reserve, EA); + gen_addr_add(ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, lo, EA, ctx->mem_idx, MO_BEQ); + } + tcg_temp_free(tcg_ctx, EA); + + tcg_gen_st_tl(tcg_ctx, hi, tcg_ctx->cpu_env, offsetof(CPUPPCState, reserve_val)); + tcg_gen_st_tl(tcg_ctx, lo, tcg_ctx->cpu_env, offsetof(CPUPPCState, reserve_val2)); +} + +/* stqcx. */ +static void gen_stqcx_(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rs = rS(ctx->opcode); + TCGv EA, hi, lo; + + if (unlikely(rs & 1)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + + gen_set_access_type(ctx, ACCESS_RES); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + + /* Note that the low part is always in RS+1, even in LE mode. */ + lo = cpu_gpr[rs + 1]; + hi = cpu_gpr[rs]; + + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + if (HAVE_CMPXCHG128) { + TCGv_i32 oi = tcg_const_i32(tcg_ctx, DEF_MEMOP(MO_Q) | MO_ALIGN_16); + if (ctx->le_mode) { + gen_helper_stqcx_le_parallel(tcg_ctx, cpu_crf[0], tcg_ctx->cpu_env, + EA, lo, hi, oi); + } else { + gen_helper_stqcx_be_parallel(tcg_ctx, cpu_crf[0], tcg_ctx->cpu_env, + EA, lo, hi, oi); + } + tcg_temp_free_i32(tcg_ctx, oi); + } else { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } + tcg_temp_free(tcg_ctx, EA); + } else { + TCGLabel *lab_fail = gen_new_label(tcg_ctx); + TCGLabel *lab_over = gen_new_label(tcg_ctx); + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, EA, cpu_reserve, lab_fail); + tcg_temp_free(tcg_ctx, EA); + + gen_qemu_ld64_i64(ctx, t0, cpu_reserve); + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, (ctx->le_mode + ? offsetof(CPUPPCState, reserve_val2) + : offsetof(CPUPPCState, reserve_val))); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, t0, t1, lab_fail); + + tcg_gen_addi_i64(tcg_ctx, t0, cpu_reserve, 8); + gen_qemu_ld64_i64(ctx, t0, t0); + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, (ctx->le_mode + ? offsetof(CPUPPCState, reserve_val) + : offsetof(CPUPPCState, reserve_val2))); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, t0, t1, lab_fail); + + /* Success */ + gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve); + tcg_gen_addi_i64(tcg_ctx, t0, cpu_reserve, 8); + gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0); + + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); + tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], CRF_EQ); + tcg_gen_br(tcg_ctx, lab_over); + + gen_set_label(tcg_ctx, lab_fail); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); + + gen_set_label(tcg_ctx, lab_over); + tcg_gen_movi_tl(tcg_ctx, cpu_reserve, -1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } +} +#endif /* defined(TARGET_PPC64) */ + +/* sync */ +static void gen_sync(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t l = (ctx->opcode >> 21) & 3; + + /* + * We may need to check for a pending TLB flush. + * + * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32. + * + * Additionally, this can only happen in kernel mode however so + * check MSR_PR as well. + */ + if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { + gen_check_tlb_flush(ctx, true); + } + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); +} + +/* wait */ +static void gen_wait(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, 1); +#ifdef _MSC_VER + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, + 0 - offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); +#else + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, + -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); +#endif + tcg_temp_free_i32(tcg_ctx, t0); + /* Stop translation, as the CPU is supposed to sleep from now */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); +} + +#if defined(TARGET_PPC64) +static void gen_doze(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t; + + CHK_HV; + t = tcg_const_i32(tcg_ctx, PPC_PM_DOZE); + gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); + tcg_temp_free_i32(tcg_ctx, t); + /* Stop translation, as the CPU is supposed to sleep from now */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); +} + +static void gen_nap(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t; + + CHK_HV; + t = tcg_const_i32(tcg_ctx, PPC_PM_NAP); + gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); + tcg_temp_free_i32(tcg_ctx, t); + /* Stop translation, as the CPU is supposed to sleep from now */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); +} + +static void gen_stop(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t; + + CHK_HV; + t = tcg_const_i32(tcg_ctx, PPC_PM_STOP); + gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); + tcg_temp_free_i32(tcg_ctx, t); + /* Stop translation, as the CPU is supposed to sleep from now */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); +} + +static void gen_sleep(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t; + + CHK_HV; + t = tcg_const_i32(tcg_ctx, PPC_PM_SLEEP); + gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); + tcg_temp_free_i32(tcg_ctx, t); + /* Stop translation, as the CPU is supposed to sleep from now */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); +} + +static void gen_rvwinkle(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t; + + CHK_HV; + t = tcg_const_i32(tcg_ctx, PPC_PM_RVWINKLE); + gen_helper_pminsn(tcg_ctx, tcg_ctx->cpu_env, t); + tcg_temp_free_i32(tcg_ctx, t); + /* Stop translation, as the CPU is supposed to sleep from now */ + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); +} +#endif /* #if defined(TARGET_PPC64) */ + +static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) +{ +#if defined(TARGET_PPC64) + if (ctx->has_cfar) { + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, cpu_cfar, nip); + } +#endif +} + +static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) +{ + if (unlikely(ctx->singlestep_enabled)) { + return false; + } + + return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +} + +static void gen_lookup_and_goto_ptr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sse = ctx->singlestep_enabled; + if (unlikely(sse)) { + if (sse & GDBSTUB_SINGLE_STEP) { + gen_debug_exception(ctx); + } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) { + uint32_t excp = gen_prep_dbgex(ctx); + gen_exception(ctx, excp); + } + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } else { + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + } +} + +/*** Branch ***/ +static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (NARROW_MODE(ctx)) { + dest = (uint32_t) dest; + } + if (use_goto_tb(ctx, dest)) { + tcg_gen_goto_tb(tcg_ctx, n); + tcg_gen_movi_tl(tcg_ctx, cpu_nip, dest & ~3); + tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); + } else { + tcg_gen_movi_tl(tcg_ctx, cpu_nip, dest & ~3); + gen_lookup_and_goto_ptr(ctx); + } +} + +static inline void gen_setlr(DisasContext *ctx, target_ulong nip) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (NARROW_MODE(ctx)) { + nip = (uint32_t)nip; + } + tcg_gen_movi_tl(tcg_ctx, cpu_lr, nip); +} + +/* b ba bl bla */ +static void gen_b(DisasContext *ctx) +{ + target_ulong li, target; + + ctx->exception = POWERPC_EXCP_BRANCH; + /* sign extend LI */ + li = LI(ctx->opcode); + li = (li ^ 0x02000000) - 0x02000000; + if (likely(AA(ctx->opcode) == 0)) { + target = ctx->base.pc_next + li - 4; + } else { + target = li; + } + if (LK(ctx->opcode)) { + gen_setlr(ctx, ctx->base.pc_next); + } + gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_goto_tb(ctx, 0, target); +} + +#define BCOND_IM 0 +#define BCOND_LR 1 +#define BCOND_CTR 2 +#define BCOND_TAR 3 + +static void gen_bcond(DisasContext *ctx, int type) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t bo = BO(ctx->opcode); + TCGLabel *l1; + TCGv target; + ctx->exception = POWERPC_EXCP_BRANCH; + + if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { + target = tcg_temp_local_new(tcg_ctx); + if (type == BCOND_CTR) { + tcg_gen_mov_tl(tcg_ctx, target, cpu_ctr); + } else if (type == BCOND_TAR) { + gen_load_spr(tcg_ctx, target, SPR_TAR); + } else { + tcg_gen_mov_tl(tcg_ctx, target, cpu_lr); + } + } else { + target = NULL; + } + if (LK(ctx->opcode)) { + gen_setlr(ctx, ctx->base.pc_next); + } + l1 = gen_new_label(tcg_ctx); + if ((bo & 0x4) == 0) { + /* Decrement and test CTR */ + TCGv temp = tcg_temp_new(tcg_ctx); + + if (type == BCOND_CTR) { + /* + * All ISAs up to v3 describe this form of bcctr as invalid but + * some processors, ie. 64-bit server processors compliant with + * arch 2.x, do implement a "test and decrement" logic instead, + * as described in their respective UMs. This logic involves CTR + * to act as both the branch target and a counter, which makes + * it basically useless and thus never used in real code. + * + * This form was hence chosen to trigger extra micro-architectural + * side-effect on real HW needed for the Spectre v2 workaround. + * It is up to guests that implement such workaround, ie. linux, to + * use this form in a way it just triggers the side-effect without + * doing anything else harmful. + */ + if (unlikely(!is_book3s_arch2x(ctx))) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, target); + return; + } + + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, temp, cpu_ctr); + } else { + tcg_gen_mov_tl(tcg_ctx, temp, cpu_ctr); + } + if (bo & 0x2) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, temp, 0, l1); + } else { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, temp, 0, l1); + } + tcg_gen_subi_tl(tcg_ctx, cpu_ctr, cpu_ctr, 1); + } else { + tcg_gen_subi_tl(tcg_ctx, cpu_ctr, cpu_ctr, 1); + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, temp, cpu_ctr); + } else { + tcg_gen_mov_tl(tcg_ctx, temp, cpu_ctr); + } + if (bo & 0x2) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, temp, 0, l1); + } else { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, temp, 0, l1); + } + } + tcg_temp_free(tcg_ctx, temp); + } + if ((bo & 0x10) == 0) { + /* Test CR */ + uint32_t bi = BI(ctx->opcode); + uint32_t mask = 0x08 >> (bi & 0x03); + TCGv_i32 temp = tcg_temp_new_i32(tcg_ctx); + + if (bo & 0x8) { + tcg_gen_andi_i32(tcg_ctx, temp, cpu_crf[bi >> 2], mask); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, temp, 0, l1); + } else { + tcg_gen_andi_i32(tcg_ctx, temp, cpu_crf[bi >> 2], mask); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, temp, 0, l1); + } + tcg_temp_free_i32(tcg_ctx, temp); + } + gen_update_cfar(ctx, ctx->base.pc_next - 4); + if (type == BCOND_IM) { + target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); + if (likely(AA(ctx->opcode) == 0)) { + gen_goto_tb(ctx, 0, ctx->base.pc_next + li - 4); + } else { + gen_goto_tb(ctx, 0, li); + } + } else { + if (NARROW_MODE(ctx)) { + tcg_gen_andi_tl(tcg_ctx, cpu_nip, target, (uint32_t)~3); + } else { + tcg_gen_andi_tl(tcg_ctx, cpu_nip, target, ~3); + } + gen_lookup_and_goto_ptr(ctx); + tcg_temp_free(tcg_ctx, target); + } + if ((bo & 0x14) != 0x14) { + /* fallthrough case */ + gen_set_label(tcg_ctx, l1); + gen_goto_tb(ctx, 1, ctx->base.pc_next); + } +} + +static void gen_bc(DisasContext *ctx) +{ + gen_bcond(ctx, BCOND_IM); +} + +static void gen_bcctr(DisasContext *ctx) +{ + gen_bcond(ctx, BCOND_CTR); +} + +static void gen_bclr(DisasContext *ctx) +{ + gen_bcond(ctx, BCOND_LR); +} + +static void gen_bctar(DisasContext *ctx) +{ + gen_bcond(ctx, BCOND_TAR); +} + +/*** Condition register logical ***/ +#define GEN_CRLOGIC(name, tcg_op, opc) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + uint8_t bitmask; \ + int sh; \ + TCGv_i32 t0, t1; \ + sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + if (sh > 0) \ + tcg_gen_shri_i32(tcg_ctx, t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \ + else if (sh < 0) \ + tcg_gen_shli_i32(tcg_ctx, t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \ + else \ + tcg_gen_mov_i32(tcg_ctx, t0, cpu_crf[crbA(ctx->opcode) >> 2]); \ + t1 = tcg_temp_new_i32(tcg_ctx); \ + sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \ + if (sh > 0) \ + tcg_gen_shri_i32(tcg_ctx, t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \ + else if (sh < 0) \ + tcg_gen_shli_i32(tcg_ctx, t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \ + else \ + tcg_gen_mov_i32(tcg_ctx, t1, cpu_crf[crbB(ctx->opcode) >> 2]); \ + tcg_op(tcg_ctx, t0, t0, t1); \ + bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \ + tcg_gen_andi_i32(tcg_ctx, t0, t0, bitmask); \ + tcg_gen_andi_i32(tcg_ctx, t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \ + tcg_gen_or_i32(tcg_ctx, cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \ + tcg_temp_free_i32(tcg_ctx, t0); \ + tcg_temp_free_i32(tcg_ctx, t1); \ +} + +/* crand */ +GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08); +/* crandc */ +GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04); +/* creqv */ +GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09); +/* crnand */ +GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07); +/* crnor */ +GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01); +/* cror */ +GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E); +/* crorc */ +GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D); +/* crxor */ +GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06); + +/* mcrf */ +static void gen_mcrf(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]); +} + +/*** System linkage ***/ + +/* rfi (supervisor only) */ +static void gen_rfi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* + * This instruction doesn't exist anymore on 64-bit server + * processors compliant with arch 2.x + */ + if (is_book3s_arch2x(ctx)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + /* Restore CPU state */ + CHK_SV; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_helper_rfi(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} + +#if defined(TARGET_PPC64) +static void gen_rfid(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* Restore CPU state */ + CHK_SV; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_update_cfar(ctx, ctx->base.pc_next - 4); + gen_helper_rfid(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} + +static void gen_hrfid(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* Restore CPU state */ + CHK_HV; + gen_helper_hrfid(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} +#endif + +/* sc */ +#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL +static void gen_sc(DisasContext *ctx) +{ + uint32_t lev; + + lev = (ctx->opcode >> 5) & 0x7F; + gen_exception_err(ctx, POWERPC_SYSCALL, lev); +} + +/*** Trap ***/ + +/* Check for unconditional traps (always or never) */ +static bool check_unconditional_trap(DisasContext *ctx) +{ + /* Trap never */ + if (TO(ctx->opcode) == 0) { + return true; + } + /* Trap always */ + if (TO(ctx->opcode) == 31) { + gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP); + return true; + } + return false; +} + +/* tw */ +static void gen_tw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + + if (check_unconditional_trap(ctx)) { + return; + } + t0 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); + gen_helper_tw(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* twi */ +static void gen_twi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1; + + if (check_unconditional_trap(ctx)) { + return; + } + t0 = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); + t1 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); + gen_helper_tw(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +#if defined(TARGET_PPC64) +/* td */ +static void gen_td(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + + if (check_unconditional_trap(ctx)) { + return; + } + t0 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); + gen_helper_td(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* tdi */ +static void gen_tdi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + TCGv_i32 t1; + + if (check_unconditional_trap(ctx)) { + return; + } + t0 = tcg_const_tl(tcg_ctx, SIMM(ctx->opcode)); + t1 = tcg_const_i32(tcg_ctx, TO(ctx->opcode)); + gen_helper_td(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} +#endif + +/*** Processor control ***/ + +static void gen_read_xer(DisasContext *ctx, TCGv dst) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_mov_tl(tcg_ctx, dst, cpu_xer); + tcg_gen_shli_tl(tcg_ctx, t0, cpu_so, XER_SO); + tcg_gen_shli_tl(tcg_ctx, t1, cpu_ov, XER_OV); + tcg_gen_shli_tl(tcg_ctx, t2, cpu_ca, XER_CA); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_gen_or_tl(tcg_ctx, dst, dst, t2); + tcg_gen_or_tl(tcg_ctx, dst, dst, t0); + if (is_isa300(ctx)) { + tcg_gen_shli_tl(tcg_ctx, t0, cpu_ov32, XER_OV32); + tcg_gen_or_tl(tcg_ctx, dst, dst, t0); + tcg_gen_shli_tl(tcg_ctx, t0, cpu_ca32, XER_CA32); + tcg_gen_or_tl(tcg_ctx, dst, dst, t0); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void gen_write_xer(TCGContext *tcg_ctx, TCGv src) +{ + /* Write all flags, while reading back check for isa300 */ + tcg_gen_andi_tl(tcg_ctx, cpu_xer, src, + ~((1u << XER_SO) | + (1u << XER_OV) | (1u << XER_OV32) | + (1u << XER_CA) | (1u << XER_CA32))); + tcg_gen_extract_tl(tcg_ctx, cpu_ov32, src, XER_OV32, 1); + tcg_gen_extract_tl(tcg_ctx, cpu_ca32, src, XER_CA32, 1); + tcg_gen_extract_tl(tcg_ctx, cpu_so, src, XER_SO, 1); + tcg_gen_extract_tl(tcg_ctx, cpu_ov, src, XER_OV, 1); + tcg_gen_extract_tl(tcg_ctx, cpu_ca, src, XER_CA, 1); +} + +/* mcrxr */ +static void gen_mcrxr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_so); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_ov); + tcg_gen_trunc_tl_i32(tcg_ctx, dst, cpu_ca); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 3); + tcg_gen_shli_i32(tcg_ctx, t1, t1, 2); + tcg_gen_shli_i32(tcg_ctx, dst, dst, 1); + tcg_gen_or_i32(tcg_ctx, dst, dst, t0); + tcg_gen_or_i32(tcg_ctx, dst, dst, t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + + tcg_gen_movi_tl(tcg_ctx, cpu_so, 0); + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); + tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); +} + +#ifdef TARGET_PPC64 +/* mcrxrx */ +static void gen_mcrxrx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)]; + + /* copy OV and OV32 */ + tcg_gen_shli_tl(tcg_ctx, t0, cpu_ov, 1); + tcg_gen_or_tl(tcg_ctx, t0, t0, cpu_ov32); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 2); + /* copy CA and CA32 */ + tcg_gen_shli_tl(tcg_ctx, t1, cpu_ca, 1); + tcg_gen_or_tl(tcg_ctx, t1, t1, cpu_ca32); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_gen_trunc_tl_i32(tcg_ctx, dst, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} +#endif + +/* mfcr mfocrf */ +static void gen_mfcr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t crm, crn; + + if (likely(ctx->opcode & 0x00100000)) { + crm = CRM(ctx->opcode); + if (likely(crm && ((crm & (crm - 1)) == 0))) { + crn = ctz32(crm); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]); + tcg_gen_shli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], + cpu_gpr[rD(ctx->opcode)], crn * 4); + } + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, t0, cpu_crf[0]); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); + tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[1]); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); + tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[2]); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); + tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[3]); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); + tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[4]); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); + tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[5]); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); + tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[6]); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 4); + tcg_gen_or_i32(tcg_ctx, t0, t0, cpu_crf[7]); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +/* mfmsr */ +static void gen_mfmsr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_msr); +} + +static void spr_noaccess(DisasContext *ctx, int gprn, int sprn) +{ +} +#define SPR_NOACCESS (&spr_noaccess) + +/* mfspr */ +static inline void gen_op_mfspr(DisasContext *ctx) +{ + void (*read_cb)(DisasContext *ctx, int gprn, int sprn); + uint32_t sprn = SPR(ctx->opcode); + + if (ctx->pr) { + read_cb = ctx->spr_cb[sprn].uea_read; + } else if (ctx->hv) { + read_cb = ctx->spr_cb[sprn].hea_read; + } else { + read_cb = ctx->spr_cb[sprn].oea_read; + } + + if (likely(read_cb != NULL)) { + if (likely(read_cb != SPR_NOACCESS)) { + (*read_cb)(ctx, rD(ctx->opcode), sprn); + } else { + /* Privilege exception */ + /* + * This is a hack to avoid warnings when running Linux: + * this OS breaks the PowerPC virtualisation model, + * allowing userland application to read the PVR + */ + if (sprn != SPR_PVR) { + qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr " + "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, + ctx->base.pc_next - 4); + } + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); + } + } else { + /* ISA 2.07 defines these as no-ops */ + if ((ctx->insns_flags2 & PPC2_ISA207S) && + (sprn >= 808 && sprn <= 811)) { + /* This is a nop */ + return; + } + /* Not defined */ + qemu_log_mask(LOG_GUEST_ERROR, + "Trying to read invalid spr %d (0x%03x) at " + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); + + /* + * The behaviour depends on MSR:PR and SPR# bit 0x10, it can + * generate a priv, a hv emu or a no-op + */ + if (sprn & 0x10) { + if (ctx->pr) { + gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + } + } else { + if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) { + gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + } + } + } +} + +static void gen_mfspr(DisasContext *ctx) +{ + gen_op_mfspr(ctx); +} + +/* mftb */ +static void gen_mftb(DisasContext *ctx) +{ + gen_op_mfspr(ctx); +} + +/* mtcrf mtocrf*/ +static void gen_mtcrf(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t crm, crn; + + crm = CRM(ctx->opcode); + if (likely((ctx->opcode & 0x00100000))) { + if (crm && ((crm & (crm - 1)) == 0)) { + TCGv_i32 temp = tcg_temp_new_i32(tcg_ctx); + crn = ctz32(crm); + tcg_gen_trunc_tl_i32(tcg_ctx, temp, cpu_gpr[rS(ctx->opcode)]); + tcg_gen_shri_i32(tcg_ctx, temp, temp, crn * 4); + tcg_gen_andi_i32(tcg_ctx, cpu_crf[7 - crn], temp, 0xf); + tcg_temp_free_i32(tcg_ctx, temp); + } + } else { + TCGv_i32 temp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, temp, cpu_gpr[rS(ctx->opcode)]); + for (crn = 0 ; crn < 8 ; crn++) { + if (crm & (1 << crn)) { + tcg_gen_shri_i32(tcg_ctx, cpu_crf[7 - crn], temp, crn * 4); + tcg_gen_andi_i32(tcg_ctx, cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf); + } + } + tcg_temp_free_i32(tcg_ctx, temp); + } +} + +/* mtmsr */ +#if defined(TARGET_PPC64) +static void gen_mtmsrd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + if (ctx->opcode & 0x00010000) { + /* L=1 form only updates EE and RI */ + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], + (1 << MSR_RI) | (1 << MSR_EE)); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_msr, + ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); + tcg_gen_or_tl(tcg_ctx, t1, t1, t0); + + gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + + } else { + /* + * XXX: we need to update nip before the store if we enter + * power saving mode, we will exit the loop directly from + * ppc_store_msr + */ + gen_update_nip(ctx, ctx->base.pc_next); + gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rS(ctx->opcode)]); + } + /* Must stop the translation as machine state (may have) changed */ + gen_stop_exception(ctx); +} +#endif /* defined(TARGET_PPC64) */ + +static void gen_mtmsr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + if (ctx->opcode & 0x00010000) { + /* L=1 form only updates EE and RI */ + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], + (1 << MSR_RI) | (1 << MSR_EE)); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_msr, + ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); + tcg_gen_or_tl(tcg_ctx, t1, t1, t0); + + gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + + } else { + TCGv msr = tcg_temp_new(tcg_ctx); + + /* + * XXX: we need to update nip before the store if we enter + * power saving mode, we will exit the loop directly from + * ppc_store_msr + */ + gen_update_nip(ctx, ctx->base.pc_next); +#if defined(TARGET_PPC64) + tcg_gen_deposit_tl(tcg_ctx, msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32); +#else + tcg_gen_mov_tl(tcg_ctx, msr, cpu_gpr[rS(ctx->opcode)]); +#endif + gen_helper_store_msr(tcg_ctx, tcg_ctx->cpu_env, msr); + tcg_temp_free(tcg_ctx, msr); + } + /* Must stop the translation as machine state (may have) changed */ + gen_stop_exception(ctx); +} + +/* mtspr */ +static void gen_mtspr(DisasContext *ctx) +{ + void (*write_cb)(DisasContext *ctx, int sprn, int gprn); + uint32_t sprn = SPR(ctx->opcode); + + if (ctx->pr) { + write_cb = ctx->spr_cb[sprn].uea_write; + } else if (ctx->hv) { + write_cb = ctx->spr_cb[sprn].hea_write; + } else { + write_cb = ctx->spr_cb[sprn].oea_write; + } + + if (likely(write_cb != NULL)) { + if (likely(write_cb != SPR_NOACCESS)) { + (*write_cb)(ctx, sprn, rS(ctx->opcode)); + } else { + /* Privilege exception */ + qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr " + "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, + ctx->base.pc_next - 4); + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); + } + } else { + /* ISA 2.07 defines these as no-ops */ + if ((ctx->insns_flags2 & PPC2_ISA207S) && + (sprn >= 808 && sprn <= 811)) { + /* This is a nop */ + return; + } + + /* Not defined */ + qemu_log_mask(LOG_GUEST_ERROR, + "Trying to write invalid spr %d (0x%03x) at " + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); + + + /* + * The behaviour depends on MSR:PR and SPR# bit 0x10, it can + * generate a priv, a hv emu or a no-op + */ + if (sprn & 0x10) { + if (ctx->pr) { + gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + } + } else { + if (ctx->pr || sprn == 0) { + gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + } + } + } +} + +#if defined(TARGET_PPC64) +/* setb */ +static void gen_setb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t8 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tm1 = tcg_temp_new_i32(tcg_ctx); + int crf = crfS(ctx->opcode); + + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_GEU, t0, cpu_crf[crf], 4); + tcg_gen_movi_i32(tcg_ctx, t8, 8); + tcg_gen_movi_i32(tcg_ctx, tm1, -1); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); + tcg_gen_ext_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t8); + tcg_temp_free_i32(tcg_ctx, tm1); +} +#endif + +/*** Cache management ***/ + +/* dcbf */ +static void gen_dcbf(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* XXX: specification says this is treated as a load by the MMU */ + TCGv t0; + gen_set_access_type(ctx, ACCESS_CACHE); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_qemu_ld8u(ctx, t0, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* dcbfep (external PID dcbf) */ +static void gen_dcbfep(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* XXX: specification says this is treated as a load by the MMU */ + TCGv t0; + CHK_SV; + gen_set_access_type(ctx, ACCESS_CACHE); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); + tcg_temp_free(tcg_ctx, t0); +} + +/* dcbi (Supervisor only) */ +static void gen_dcbi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA, val; + + CHK_SV; + EA = tcg_temp_new(tcg_ctx); + gen_set_access_type(ctx, ACCESS_CACHE); + gen_addr_reg_index(ctx, EA); + val = tcg_temp_new(tcg_ctx); + /* XXX: specification says this should be treated as a store by the MMU */ + gen_qemu_ld8u(ctx, val, EA); + gen_qemu_st8(ctx, val, EA); + tcg_temp_free(tcg_ctx, val); + tcg_temp_free(tcg_ctx, EA); +} + +/* dcdst */ +static void gen_dcbst(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* XXX: specification say this is treated as a load by the MMU */ + TCGv t0; + gen_set_access_type(ctx, ACCESS_CACHE); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_qemu_ld8u(ctx, t0, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* dcbstep (dcbstep External PID version) */ +static void gen_dcbstep(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* XXX: specification say this is treated as a load by the MMU */ + TCGv t0; + gen_set_access_type(ctx, ACCESS_CACHE); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + tcg_gen_qemu_ld_tl(tcg_ctx, t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); + tcg_temp_free(tcg_ctx, t0); +} + +/* dcbt */ +static void gen_dcbt(DisasContext *ctx) +{ + /* + * interpreted as no-op + * XXX: specification say this is treated as a load by the MMU but + * does not generate any exception + */ +} + +/* dcbtep */ +static void gen_dcbtep(DisasContext *ctx) +{ + /* + * interpreted as no-op + * XXX: specification say this is treated as a load by the MMU but + * does not generate any exception + */ +} + +/* dcbtst */ +static void gen_dcbtst(DisasContext *ctx) +{ + /* + * interpreted as no-op + * XXX: specification say this is treated as a load by the MMU but + * does not generate any exception + */ +} + +/* dcbtstep */ +static void gen_dcbtstep(DisasContext *ctx) +{ + /* + * interpreted as no-op + * XXX: specification say this is treated as a load by the MMU but + * does not generate any exception + */ +} + +/* dcbtls */ +static void gen_dcbtls(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* Always fails locking the cache */ + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_load_spr(tcg_ctx, t0, SPR_Exxx_L1CSR0); + tcg_gen_ori_tl(tcg_ctx, t0, t0, L1CSR0_CUL); + gen_store_spr(tcg_ctx, SPR_Exxx_L1CSR0, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* dcbz */ +static void gen_dcbz(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv tcgv_addr; + TCGv_i32 tcgv_op; + + gen_set_access_type(ctx, ACCESS_CACHE); + tcgv_addr = tcg_temp_new(tcg_ctx); + tcgv_op = tcg_const_i32(tcg_ctx, ctx->opcode & 0x03FF000); + gen_addr_reg_index(ctx, tcgv_addr); + gen_helper_dcbz(tcg_ctx, tcg_ctx->cpu_env, tcgv_addr, tcgv_op); + tcg_temp_free(tcg_ctx, tcgv_addr); + tcg_temp_free_i32(tcg_ctx, tcgv_op); +} + +/* dcbzep */ +static void gen_dcbzep(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv tcgv_addr; + TCGv_i32 tcgv_op; + + gen_set_access_type(ctx, ACCESS_CACHE); + tcgv_addr = tcg_temp_new(tcg_ctx); + tcgv_op = tcg_const_i32(tcg_ctx, ctx->opcode & 0x03FF000); + gen_addr_reg_index(ctx, tcgv_addr); + gen_helper_dcbzep(tcg_ctx, tcg_ctx->cpu_env, tcgv_addr, tcgv_op); + tcg_temp_free(tcg_ctx, tcgv_addr); + tcg_temp_free_i32(tcg_ctx, tcgv_op); +} + +/* dst / dstt */ +static void gen_dst(DisasContext *ctx) +{ + if (rA(ctx->opcode) == 0) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + } else { + /* interpreted as no-op */ + } +} + +/* dstst /dststt */ +static void gen_dstst(DisasContext *ctx) +{ + if (rA(ctx->opcode) == 0) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + } else { + /* interpreted as no-op */ + } + +} + +/* dss / dssall */ +static void gen_dss(DisasContext *ctx) +{ + /* interpreted as no-op */ +} + +/* icbi */ +static void gen_icbi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + gen_set_access_type(ctx, ACCESS_CACHE); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_helper_icbi(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* icbiep */ +static void gen_icbiep(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + gen_set_access_type(ctx, ACCESS_CACHE); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_helper_icbiep(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* Optional: */ +/* dcba */ +static void gen_dcba(DisasContext *ctx) +{ + /* + * interpreted as no-op + * XXX: specification say this is treated as a store by the MMU + * but does not generate any exception + */ +} + +/*** Segment register manipulation ***/ +/* Supervisor only: */ + +/* mfsr */ +static void gen_mfsr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); + gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* mfsrin */ +static void gen_mfsrin(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); + gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* mtsr */ +static void gen_mtsr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); + gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); + tcg_temp_free(tcg_ctx, t0); +} + +/* mtsrin */ +static void gen_mtsrin(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + CHK_SV; + + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); + gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rD(ctx->opcode)]); + tcg_temp_free(tcg_ctx, t0); +} + +#if defined(TARGET_PPC64) +/* Specific implementation for PowerPC 64 "bridge" emulation using SLB */ + +/* mfsr */ +static void gen_mfsr_64b(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); + gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* mfsrin */ +static void gen_mfsrin_64b(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); + gen_helper_load_sr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* mtsr */ +static void gen_mtsr_64b(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_const_tl(tcg_ctx, SR(ctx->opcode)); + gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); + tcg_temp_free(tcg_ctx, t0); +} + +/* mtsrin */ +static void gen_mtsrin_64b(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_extract_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 28, 4); + gen_helper_store_sr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); + tcg_temp_free(tcg_ctx, t0); +} + +/* slbmte */ +static void gen_slbmte(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + gen_helper_store_slb(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); +} + +static void gen_slbmfee(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + gen_helper_load_slb_esid(tcg_ctx, cpu_gpr[rS(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rB(ctx->opcode)]); +} + +static void gen_slbmfev(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + gen_helper_load_slb_vsid(tcg_ctx, cpu_gpr[rS(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rB(ctx->opcode)]); +} + +static void gen_slbfee_(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1, *l2; + + if (unlikely(ctx->pr)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + return; + } + gen_helper_find_slb_vsid(tcg_ctx, cpu_gpr[rS(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rB(ctx->opcode)]); + l1 = gen_new_label(tcg_ctx); + l2 = gen_new_label(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1); + tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], CRF_EQ); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rS(ctx->opcode)], 0); + gen_set_label(tcg_ctx, l2); +} +#endif /* defined(TARGET_PPC64) */ + +/*** Lookaside buffer management ***/ +/* Optional & supervisor only: */ + +/* tlbia */ +static void gen_tlbia(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_HV; + + gen_helper_tlbia(tcg_ctx, tcg_ctx->cpu_env); +} + +/* tlbiel */ +static void gen_tlbiel(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + gen_helper_tlbie(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +/* tlbie */ +static void gen_tlbie(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t1; + + if (ctx->gtse) { + CHK_SV; /* If gtse is set then tlbie is supervisor privileged */ + } else { + CHK_HV; /* Else hypervisor privileged */ + } + + if (NARROW_MODE(ctx)) { + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); + gen_helper_tlbie(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + } else { + gen_helper_tlbie(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); + } + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, offsetof(CPUPPCState, tlb_need_flush)); + tcg_gen_ori_i32(tcg_ctx, t1, t1, TLB_NEED_GLOBAL_FLUSH); + tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, offsetof(CPUPPCState, tlb_need_flush)); + tcg_temp_free_i32(tcg_ctx, t1); +} + +/* tlbsync */ +static void gen_tlbsync(DisasContext *ctx) +{ + if (ctx->gtse) { + CHK_SV; /* If gtse is set then tlbsync is supervisor privileged */ + } else { + CHK_HV; /* Else hypervisor privileged */ + } + + /* BookS does both ptesync and tlbsync make tlbsync a nop for server */ + if (ctx->insns_flags & PPC_BOOKE) { + gen_check_tlb_flush(ctx, true); + } +} + +#if defined(TARGET_PPC64) +/* slbia */ +static void gen_slbia(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t ih = (ctx->opcode >> 21) & 0x7; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ih); + + CHK_SV; + + gen_helper_slbia(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* slbie */ +static void gen_slbie(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + gen_helper_slbie(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +/* slbieg */ +static void gen_slbieg(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + gen_helper_slbieg(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +/* slbsync */ +static void gen_slbsync(DisasContext *ctx) +{ + CHK_SV; + gen_check_tlb_flush(ctx, true); +} + +#endif /* defined(TARGET_PPC64) */ + +/*** External control ***/ +/* Optional: */ + +/* eciwx */ +static void gen_eciwx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + /* Should check EAR[E] ! */ + gen_set_access_type(ctx, ACCESS_EXT); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + tcg_gen_qemu_ld_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, + DEF_MEMOP(MO_UL | MO_ALIGN)); + tcg_temp_free(tcg_ctx, t0); +} + +/* ecowx */ +static void gen_ecowx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + /* Should check EAR[E] ! */ + gen_set_access_type(ctx, ACCESS_EXT); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + tcg_gen_qemu_st_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, + DEF_MEMOP(MO_UL | MO_ALIGN)); + tcg_temp_free(tcg_ctx, t0); +} + +/* PowerPC 601 specific instructions */ + +/* abs - abs. */ +static void gen_abs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv d = cpu_gpr[rD(ctx->opcode)]; + TCGv a = cpu_gpr[rA(ctx->opcode)]; + + tcg_gen_abs_tl(tcg_ctx, d, a); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, d); + } +} + +/* abso - abso. */ +static void gen_abso(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv d = cpu_gpr[rD(ctx->opcode)]; + TCGv a = cpu_gpr[rA(ctx->opcode)]; + + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_ov, a, 0x80000000); + tcg_gen_abs_tl(tcg_ctx, d, a); + tcg_gen_or_tl(tcg_ctx, cpu_so, cpu_so, cpu_ov); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, d); + } +} + +/* clcs */ +static void gen_clcs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, rA(ctx->opcode)); + gen_helper_clcs(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); + /* Rc=1 sets CR0 to an undefined state */ +} + +/* div - div. */ +static void gen_div(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_div(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* divo - divo. */ +static void gen_divo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_divo(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* divs - divs. */ +static void gen_divs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_divs(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* divso - divso. */ +static void gen_divso(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_divso(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* doz - doz. */ +static void gen_doz(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], l1); + tcg_gen_sub_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)]); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], 0); + gen_set_label(tcg_ctx, l2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* dozo - dozo. */ +static void gen_dozo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + /* Start with XER OV disabled, the most likely case */ + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], l1); + tcg_gen_sub_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_xor_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_xor_tl(tcg_ctx, t2, cpu_gpr[rA(ctx->opcode)], t0); + tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l2); + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 1); + tcg_gen_movi_tl(tcg_ctx, cpu_so, 1); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], 0); + gen_set_label(tcg_ctx, l2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* dozi */ +static void gen_dozi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_long simm = SIMM(ctx->opcode); + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1); + tcg_gen_subfi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], 0); + gen_set_label(tcg_ctx, l2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* lscbx - lscbx. */ +static void gen_lscbx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, rD(ctx->opcode)); + TCGv_i32 t2 = tcg_const_i32(tcg_ctx, rA(ctx->opcode)); + TCGv_i32 t3 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); + + gen_addr_reg_index(ctx, t0); + gen_helper_lscbx(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1, t2, t3); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + tcg_gen_andi_tl(tcg_ctx, cpu_xer, cpu_xer, ~0x7F); + tcg_gen_or_tl(tcg_ctx, cpu_xer, cpu_xer, t0); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, t0); + } + tcg_temp_free(tcg_ctx, t0); +} + +/* maskg - maskg. */ +static void gen_maskg(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_movi_tl(tcg_ctx, t3, 0xFFFFFFFF); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 0x1F); + tcg_gen_addi_tl(tcg_ctx, t2, t0, 1); + tcg_gen_shr_tl(tcg_ctx, t2, t3, t2); + tcg_gen_shr_tl(tcg_ctx, t3, t3, t1); + tcg_gen_xor_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t2, t3); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, t0, t1, l1); + tcg_gen_neg_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + gen_set_label(tcg_ctx, l1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t3); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* maskir - maskir. */ +static void gen_maskir(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_and_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + tcg_gen_andc_tl(tcg_ctx, t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* mul - mul. */ +static void gen_mul(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + tcg_gen_trunc_i64_tl(tcg_ctx, t2, t0); + gen_store_spr(tcg_ctx, SPR_MQ, t2); + tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* mulo - mulo. */ +static void gen_mulo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + /* Start with XER OV disabled, the most likely case */ + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + tcg_gen_trunc_i64_tl(tcg_ctx, t2, t0); + gen_store_spr(tcg_ctx, SPR_MQ, t2); + tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); + tcg_gen_ext32s_i64(tcg_ctx, t1, t0); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_EQ, t0, t1, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 1); + tcg_gen_movi_tl(tcg_ctx, cpu_so, 1); + gen_set_label(tcg_ctx, l1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); + } +} + +/* nabs - nabs. */ +static void gen_nabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv d = cpu_gpr[rD(ctx->opcode)]; + TCGv a = cpu_gpr[rA(ctx->opcode)]; + + tcg_gen_abs_tl(tcg_ctx, d, a); + tcg_gen_neg_tl(tcg_ctx, d, d); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, d); + } +} + +/* nabso - nabso. */ +static void gen_nabso(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv d = cpu_gpr[rD(ctx->opcode)]; + TCGv a = cpu_gpr[rA(ctx->opcode)]; + + tcg_gen_abs_tl(tcg_ctx, d, a); + tcg_gen_neg_tl(tcg_ctx, d, d); + /* nabs never overflows */ + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, d); + } +} + +/* rlmi - rlmi. */ +static void gen_rlmi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t mb = MB(ctx->opcode); + uint32_t me = ME(ctx->opcode); + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_rotl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + tcg_gen_andi_tl(tcg_ctx, t0, t0, MASK(mb, me)); + tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + ~MASK(mb, me)); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0); + tcg_temp_free(tcg_ctx, t0); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* rrib - rrib. */ +static void gen_rrib(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_movi_tl(tcg_ctx, t1, 0x80000000); + tcg_gen_shr_tl(tcg_ctx, t1, t1, t0); + tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + tcg_gen_and_tl(tcg_ctx, t0, t0, t1); + tcg_gen_andc_tl(tcg_ctx, t1, cpu_gpr[rA(ctx->opcode)], t1); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sle - sle. */ +static void gen_sle(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_shl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); + tcg_gen_shr_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_or_tl(tcg_ctx, t1, t0, t1); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + gen_store_spr(tcg_ctx, SPR_MQ, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sleq - sleq. */ +static void gen_sleq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_movi_tl(tcg_ctx, t2, 0xFFFFFFFF); + tcg_gen_shl_tl(tcg_ctx, t2, t2, t0); + tcg_gen_rotl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + gen_load_spr(tcg_ctx, t1, SPR_MQ); + gen_store_spr(tcg_ctx, SPR_MQ, t0); + tcg_gen_and_tl(tcg_ctx, t0, t0, t2); + tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sliq - sliq. */ +static void gen_sliq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_shli_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); + tcg_gen_shri_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); + tcg_gen_or_tl(tcg_ctx, t1, t0, t1); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + gen_store_spr(tcg_ctx, SPR_MQ, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* slliq - slliq. */ +static void gen_slliq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_rotli_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); + gen_load_spr(tcg_ctx, t1, SPR_MQ); + gen_store_spr(tcg_ctx, SPR_MQ, t0); + tcg_gen_andi_tl(tcg_ctx, t0, t0, (0xFFFFFFFFU << sh)); + tcg_gen_andi_tl(tcg_ctx, t1, t1, ~(0xFFFFFFFFU << sh)); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sllq - sllq. */ +static void gen_sllq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_local_new(tcg_ctx); + TCGv t2 = tcg_temp_local_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t2, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_movi_tl(tcg_ctx, t1, 0xFFFFFFFF); + tcg_gen_shl_tl(tcg_ctx, t1, t1, t2); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x20); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); + gen_load_spr(tcg_ctx, t0, SPR_MQ); + tcg_gen_and_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_shl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t2); + gen_load_spr(tcg_ctx, t2, SPR_MQ); + tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + gen_set_label(tcg_ctx, l2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* slq - slq. */ +static void gen_slq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_shl_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); + tcg_gen_shr_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_or_tl(tcg_ctx, t1, t0, t1); + gen_store_spr(tcg_ctx, SPR_MQ, t1); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x20); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], 0); + gen_set_label(tcg_ctx, l1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sraiq - sraiq. */ +static void gen_sraiq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode); + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); + tcg_gen_shli_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + gen_store_spr(tcg_ctx, SPR_MQ, t0); + tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_ca, 1); + gen_set_label(tcg_ctx, l1); + tcg_gen_sari_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sraq - sraq. */ +static void gen_sraq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_local_new(tcg_ctx); + TCGv t2 = tcg_temp_local_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t2, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t2); + tcg_gen_sar_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t2); + tcg_gen_subfi_tl(tcg_ctx, t2, 32, t2); + tcg_gen_shl_tl(tcg_ctx, t2, cpu_gpr[rS(ctx->opcode)], t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t2); + gen_store_spr(tcg_ctx, SPR_MQ, t0); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x20); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t2, 0, l1); + tcg_gen_mov_tl(tcg_ctx, t2, cpu_gpr[rS(ctx->opcode)]); + tcg_gen_sari_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 31); + gen_set_label(tcg_ctx, l1); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t1); + tcg_gen_movi_tl(tcg_ctx, cpu_ca, 0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t2, 0, l2); + tcg_gen_movi_tl(tcg_ctx, cpu_ca, 1); + gen_set_label(tcg_ctx, l2); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sre - sre. */ +static void gen_sre(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); + tcg_gen_shl_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_or_tl(tcg_ctx, t1, t0, t1); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + gen_store_spr(tcg_ctx, SPR_MQ, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* srea - srea. */ +static void gen_srea(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_rotr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); + gen_store_spr(tcg_ctx, SPR_MQ, t0); + tcg_gen_sar_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sreq */ +static void gen_sreq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_movi_tl(tcg_ctx, t1, 0xFFFFFFFF); + tcg_gen_shr_tl(tcg_ctx, t1, t1, t0); + tcg_gen_rotr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t0); + gen_load_spr(tcg_ctx, t2, SPR_MQ); + gen_store_spr(tcg_ctx, SPR_MQ, t0); + tcg_gen_and_tl(tcg_ctx, t0, t0, t1); + tcg_gen_andc_tl(tcg_ctx, t2, t2, t1); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* sriq */ +static void gen_sriq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); + tcg_gen_shli_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], 32 - sh); + tcg_gen_or_tl(tcg_ctx, t1, t0, t1); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + gen_store_spr(tcg_ctx, SPR_MQ, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* srliq */ +static void gen_srliq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sh = SH(ctx->opcode); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_rotri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], sh); + gen_load_spr(tcg_ctx, t1, SPR_MQ); + gen_store_spr(tcg_ctx, SPR_MQ, t0); + tcg_gen_andi_tl(tcg_ctx, t0, t0, (0xFFFFFFFFU >> sh)); + tcg_gen_andi_tl(tcg_ctx, t1, t1, ~(0xFFFFFFFFU >> sh)); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* srlq */ +static void gen_srlq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_local_new(tcg_ctx); + TCGv t2 = tcg_temp_local_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t2, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_movi_tl(tcg_ctx, t1, 0xFFFFFFFF); + tcg_gen_shr_tl(tcg_ctx, t2, t1, t2); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)], 0x20); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); + gen_load_spr(tcg_ctx, t0, SPR_MQ); + tcg_gen_and_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t2); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t2); + tcg_gen_and_tl(tcg_ctx, t0, t0, t2); + gen_load_spr(tcg_ctx, t1, SPR_MQ); + tcg_gen_andc_tl(tcg_ctx, t1, t1, t2); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0, t1); + gen_set_label(tcg_ctx, l2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* srq */ +static void gen_srq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x1F); + tcg_gen_shr_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_subfi_tl(tcg_ctx, t1, 32, t1); + tcg_gen_shl_tl(tcg_ctx, t1, cpu_gpr[rS(ctx->opcode)], t1); + tcg_gen_or_tl(tcg_ctx, t1, t0, t1); + gen_store_spr(tcg_ctx, SPR_MQ, t1); + tcg_gen_andi_tl(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)], 0x20); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], 0); + gen_set_label(tcg_ctx, l1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); + } +} + +/* PowerPC 602 specific instructions */ + +/* dsa */ +static void gen_dsa(DisasContext *ctx) +{ + /* XXX: TODO */ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); +} + +/* esa */ +static void gen_esa(DisasContext *ctx) +{ + /* XXX: TODO */ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); +} + +/* mfrom */ +static void gen_mfrom(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_602_mfrom(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); +} + +/* 602 - 603 - G2 TLB management */ + +/* tlbld */ +static void gen_tlbld_6xx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_6xx_tlbd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +/* tlbli */ +static void gen_tlbli_6xx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_6xx_tlbi(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +/* 74xx TLB management */ + +/* tlbld */ +static void gen_tlbld_74xx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_74xx_tlbd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +/* tlbli */ +static void gen_tlbli_74xx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_74xx_tlbi(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +/* POWER instructions not in PowerPC 601 */ + +/* clf */ +static void gen_clf(DisasContext *ctx) +{ + /* Cache line flush: implemented as no-op */ +} + +/* cli */ +static void gen_cli(DisasContext *ctx) +{ + /* Cache line invalidate: privileged and treated as no-op */ + CHK_SV; +} + +/* dclst */ +static void gen_dclst(DisasContext *ctx) +{ + /* Data cache line store: treated as no-op */ +} + +static void gen_mfsri(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ra = rA(ctx->opcode); + int rd = rD(ctx->opcode); + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + tcg_gen_extract_tl(tcg_ctx, t0, t0, 28, 4); + gen_helper_load_sr(tcg_ctx, cpu_gpr[rd], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + if (ra != 0 && ra != rd) { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], cpu_gpr[rd]); + } +} + +static void gen_rac(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_helper_rac(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_rfsvc(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + gen_helper_rfsvc(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} + +/* svc is not implemented for now */ + +/* BookE specific instructions */ + +/* XXX: not implemented on 440 ? */ +static void gen_mfapidi(DisasContext *ctx) +{ + /* XXX: TODO */ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); +} + +/* XXX: not implemented on 440 ? */ +static void gen_tlbiva(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_helper_tlbiva(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); + tcg_temp_free(tcg_ctx, t0); +} + +/* All 405 MAC instructions are translated here */ +static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, + int ra, int rb, int rt, int Rc) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0, t1; + + t0 = tcg_temp_local_new(tcg_ctx); + t1 = tcg_temp_local_new(tcg_ctx); + + switch (opc3 & 0x0D) { + case 0x05: + /* macchw - macchw. - macchwo - macchwo. */ + /* macchws - macchws. - macchwso - macchwso. */ + /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */ + /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */ + /* mulchw - mulchw. */ + tcg_gen_ext16s_tl(tcg_ctx, t0, cpu_gpr[ra]); + tcg_gen_sari_tl(tcg_ctx, t1, cpu_gpr[rb], 16); + tcg_gen_ext16s_tl(tcg_ctx, t1, t1); + break; + case 0x04: + /* macchwu - macchwu. - macchwuo - macchwuo. */ + /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */ + /* mulchwu - mulchwu. */ + tcg_gen_ext16u_tl(tcg_ctx, t0, cpu_gpr[ra]); + tcg_gen_shri_tl(tcg_ctx, t1, cpu_gpr[rb], 16); + tcg_gen_ext16u_tl(tcg_ctx, t1, t1); + break; + case 0x01: + /* machhw - machhw. - machhwo - machhwo. */ + /* machhws - machhws. - machhwso - machhwso. */ + /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */ + /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */ + /* mulhhw - mulhhw. */ + tcg_gen_sari_tl(tcg_ctx, t0, cpu_gpr[ra], 16); + tcg_gen_ext16s_tl(tcg_ctx, t0, t0); + tcg_gen_sari_tl(tcg_ctx, t1, cpu_gpr[rb], 16); + tcg_gen_ext16s_tl(tcg_ctx, t1, t1); + break; + case 0x00: + /* machhwu - machhwu. - machhwuo - machhwuo. */ + /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */ + /* mulhhwu - mulhhwu. */ + tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[ra], 16); + tcg_gen_ext16u_tl(tcg_ctx, t0, t0); + tcg_gen_shri_tl(tcg_ctx, t1, cpu_gpr[rb], 16); + tcg_gen_ext16u_tl(tcg_ctx, t1, t1); + break; + case 0x0D: + /* maclhw - maclhw. - maclhwo - maclhwo. */ + /* maclhws - maclhws. - maclhwso - maclhwso. */ + /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */ + /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */ + /* mullhw - mullhw. */ + tcg_gen_ext16s_tl(tcg_ctx, t0, cpu_gpr[ra]); + tcg_gen_ext16s_tl(tcg_ctx, t1, cpu_gpr[rb]); + break; + case 0x0C: + /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */ + /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */ + /* mullhwu - mullhwu. */ + tcg_gen_ext16u_tl(tcg_ctx, t0, cpu_gpr[ra]); + tcg_gen_ext16u_tl(tcg_ctx, t1, cpu_gpr[rb]); + break; + } + if (opc2 & 0x04) { + /* (n)multiply-and-accumulate (0x0C / 0x0E) */ + tcg_gen_mul_tl(tcg_ctx, t1, t0, t1); + if (opc2 & 0x02) { + /* nmultiply-and-accumulate (0x0E) */ + tcg_gen_sub_tl(tcg_ctx, t0, cpu_gpr[rt], t1); + } else { + /* multiply-and-accumulate (0x0C) */ + tcg_gen_add_tl(tcg_ctx, t0, cpu_gpr[rt], t1); + } + + if (opc3 & 0x12) { + /* Check overflow and/or saturate */ + TCGLabel *l1 = gen_new_label(tcg_ctx); + + if (opc3 & 0x10) { + /* Start with XER OV disabled, the most likely case */ + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 0); + } + if (opc3 & 0x01) { + /* Signed */ + tcg_gen_xor_tl(tcg_ctx, t1, cpu_gpr[rt], t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_gen_xor_tl(tcg_ctx, t1, cpu_gpr[rt], t0); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_LT, t1, 0, l1); + if (opc3 & 0x02) { + /* Saturate */ + tcg_gen_sari_tl(tcg_ctx, t0, cpu_gpr[rt], 31); + tcg_gen_xori_tl(tcg_ctx, t0, t0, 0x7fffffff); + } + } else { + /* Unsigned */ + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GEU, t0, t1, l1); + if (opc3 & 0x02) { + /* Saturate */ + tcg_gen_movi_tl(tcg_ctx, t0, UINT32_MAX); + } + } + if (opc3 & 0x10) { + /* Check overflow */ + tcg_gen_movi_tl(tcg_ctx, cpu_ov, 1); + tcg_gen_movi_tl(tcg_ctx, cpu_so, 1); + } + gen_set_label(tcg_ctx, l1); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rt], t0); + } + } else { + tcg_gen_mul_tl(tcg_ctx, cpu_gpr[rt], t0, t1); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + if (unlikely(Rc) != 0) { + /* Update Rc0 */ + gen_set_Rc0(ctx, cpu_gpr[rt]); + } +} + +#define GEN_MAC_HANDLER(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \ + rD(ctx->opcode), Rc(ctx->opcode)); \ +} + +/* macchw - macchw. */ +GEN_MAC_HANDLER(macchw, 0x0C, 0x05); +/* macchwo - macchwo. */ +GEN_MAC_HANDLER(macchwo, 0x0C, 0x15); +/* macchws - macchws. */ +GEN_MAC_HANDLER(macchws, 0x0C, 0x07); +/* macchwso - macchwso. */ +GEN_MAC_HANDLER(macchwso, 0x0C, 0x17); +/* macchwsu - macchwsu. */ +GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06); +/* macchwsuo - macchwsuo. */ +GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16); +/* macchwu - macchwu. */ +GEN_MAC_HANDLER(macchwu, 0x0C, 0x04); +/* macchwuo - macchwuo. */ +GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14); +/* machhw - machhw. */ +GEN_MAC_HANDLER(machhw, 0x0C, 0x01); +/* machhwo - machhwo. */ +GEN_MAC_HANDLER(machhwo, 0x0C, 0x11); +/* machhws - machhws. */ +GEN_MAC_HANDLER(machhws, 0x0C, 0x03); +/* machhwso - machhwso. */ +GEN_MAC_HANDLER(machhwso, 0x0C, 0x13); +/* machhwsu - machhwsu. */ +GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02); +/* machhwsuo - machhwsuo. */ +GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12); +/* machhwu - machhwu. */ +GEN_MAC_HANDLER(machhwu, 0x0C, 0x00); +/* machhwuo - machhwuo. */ +GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10); +/* maclhw - maclhw. */ +GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D); +/* maclhwo - maclhwo. */ +GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D); +/* maclhws - maclhws. */ +GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F); +/* maclhwso - maclhwso. */ +GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F); +/* maclhwu - maclhwu. */ +GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C); +/* maclhwuo - maclhwuo. */ +GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C); +/* maclhwsu - maclhwsu. */ +GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E); +/* maclhwsuo - maclhwsuo. */ +GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E); +/* nmacchw - nmacchw. */ +GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05); +/* nmacchwo - nmacchwo. */ +GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15); +/* nmacchws - nmacchws. */ +GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07); +/* nmacchwso - nmacchwso. */ +GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17); +/* nmachhw - nmachhw. */ +GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01); +/* nmachhwo - nmachhwo. */ +GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11); +/* nmachhws - nmachhws. */ +GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03); +/* nmachhwso - nmachhwso. */ +GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13); +/* nmaclhw - nmaclhw. */ +GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D); +/* nmaclhwo - nmaclhwo. */ +GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D); +/* nmaclhws - nmaclhws. */ +GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F); +/* nmaclhwso - nmaclhwso. */ +GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F); + +/* mulchw - mulchw. */ +GEN_MAC_HANDLER(mulchw, 0x08, 0x05); +/* mulchwu - mulchwu. */ +GEN_MAC_HANDLER(mulchwu, 0x08, 0x04); +/* mulhhw - mulhhw. */ +GEN_MAC_HANDLER(mulhhw, 0x08, 0x01); +/* mulhhwu - mulhhwu. */ +GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00); +/* mullhw - mullhw. */ +GEN_MAC_HANDLER(mullhw, 0x08, 0x0D); +/* mullhwu - mullhwu. */ +GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C); + +/* mfdcr */ +static void gen_mfdcr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv dcrn; + + CHK_SV; + dcrn = tcg_const_tl(tcg_ctx, SPR(ctx->opcode)); + gen_helper_load_dcr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, dcrn); + tcg_temp_free(tcg_ctx, dcrn); +} + +/* mtdcr */ +static void gen_mtdcr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv dcrn; + + CHK_SV; + dcrn = tcg_const_tl(tcg_ctx, SPR(ctx->opcode)); + gen_helper_store_dcr(tcg_ctx, tcg_ctx->cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); + tcg_temp_free(tcg_ctx, dcrn); +} + +/* mfdcrx */ +/* XXX: not implemented on 440 ? */ +static void gen_mfdcrx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_load_dcr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rA(ctx->opcode)]); + /* Note: Rc update flag set leads to undefined state of Rc0 */ +} + +/* mtdcrx */ +/* XXX: not implemented on 440 ? */ +static void gen_mtdcrx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_store_dcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); + /* Note: Rc update flag set leads to undefined state of Rc0 */ +} + +/* mfdcrux (PPC 460) : user-mode access to DCR */ +static void gen_mfdcrux(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_dcr(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rA(ctx->opcode)]); + /* Note: Rc update flag set leads to undefined state of Rc0 */ +} + +/* mtdcrux (PPC 460) : user-mode access to DCR */ +static void gen_mtdcrux(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_dcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); + /* Note: Rc update flag set leads to undefined state of Rc0 */ +} + +/* dccci */ +static void gen_dccci(DisasContext *ctx) +{ + CHK_SV; + /* interpreted as no-op */ +} + +/* dcread */ +static void gen_dcread(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA, val; + + CHK_SV; + gen_set_access_type(ctx, ACCESS_CACHE); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + val = tcg_temp_new(tcg_ctx); + gen_qemu_ld32u(ctx, val, EA); + tcg_temp_free(tcg_ctx, val); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], EA); + tcg_temp_free(tcg_ctx, EA); +} + +/* icbt */ +static void gen_icbt_40x(DisasContext *ctx) +{ + /* + * interpreted as no-op + * XXX: specification say this is treated as a load by the MMU but + * does not generate any exception + */ +} + +/* iccci */ +static void gen_iccci(DisasContext *ctx) +{ + CHK_SV; + /* interpreted as no-op */ +} + +/* icread */ +static void gen_icread(DisasContext *ctx) +{ + CHK_SV; + /* interpreted as no-op */ +} + +/* rfci (supervisor only) */ +static void gen_rfci_40x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + /* Restore CPU state */ + gen_helper_40x_rfci(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} + +static void gen_rfci(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + /* Restore CPU state */ + gen_helper_rfci(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} + +/* BookE specific */ + +/* XXX: not implemented on 440 ? */ +static void gen_rfdi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + /* Restore CPU state */ + gen_helper_rfdi(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} + +/* XXX: not implemented on 440 ? */ +static void gen_rfmci(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + /* Restore CPU state */ + gen_helper_rfmci(tcg_ctx, tcg_ctx->cpu_env); + gen_sync_exception(ctx); +} + +/* TLB management - PowerPC 405 implementation */ + +/* tlbre */ +static void gen_tlbre_40x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + switch (rB(ctx->opcode)) { + case 0: + gen_helper_4xx_tlbre_hi(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rA(ctx->opcode)]); + break; + case 1: + gen_helper_4xx_tlbre_lo(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rA(ctx->opcode)]); + break; + default: + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + break; + } +} + +/* tlbsx - tlbsx. */ +static void gen_tlbsx_40x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_helper_4xx_tlbsx(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + if (Rc(ctx->opcode)) { + TCGLabel *l1 = gen_new_label(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); + tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], 0x02); + gen_set_label(tcg_ctx, l1); + } +} + +/* tlbwe */ +static void gen_tlbwe_40x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + switch (rB(ctx->opcode)) { + case 0: + gen_helper_4xx_tlbwe_hi(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); + break; + case 1: + gen_helper_4xx_tlbwe_lo(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); + break; + default: + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + break; + } +} + +/* TLB management - PowerPC 440 implementation */ + +/* tlbre */ +static void gen_tlbre_440(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + + switch (rB(ctx->opcode)) { + case 0: + case 1: + case 2: + { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); + gen_helper_440_tlbre(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, + t0, cpu_gpr[rA(ctx->opcode)]); + tcg_temp_free_i32(tcg_ctx, t0); + } + break; + default: + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + break; + } +} + +/* tlbsx - tlbsx. */ +static void gen_tlbsx_440(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_helper_440_tlbsx(tcg_ctx, cpu_gpr[rD(ctx->opcode)], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + if (Rc(ctx->opcode)) { + TCGLabel *l1 = gen_new_label(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[0], cpu_so); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1); + tcg_gen_ori_i32(tcg_ctx, cpu_crf[0], cpu_crf[0], 0x02); + gen_set_label(tcg_ctx, l1); + } +} + +/* tlbwe */ +static void gen_tlbwe_440(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + switch (rB(ctx->opcode)) { + case 0: + case 1: + case 2: + { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); + gen_helper_440_tlbwe(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rS(ctx->opcode)]); + tcg_temp_free_i32(tcg_ctx, t0); + } + break; + default: + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + break; + } +} + +/* TLB management - PowerPC BookE 2.06 implementation */ + +/* tlbre */ +static void gen_tlbre_booke206(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_booke206_tlbre(tcg_ctx, tcg_ctx->cpu_env); +} + +/* tlbsx - tlbsx. */ +static void gen_tlbsx_booke206(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + if (rA(ctx->opcode)) { + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_mov_tl(tcg_ctx, t0, cpu_gpr[rD(ctx->opcode)]); + } else { + t0 = tcg_const_tl(tcg_ctx, 0); + } + + tcg_gen_add_tl(tcg_ctx, t0, t0, cpu_gpr[rB(ctx->opcode)]); + gen_helper_booke206_tlbsx(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* tlbwe */ +static void gen_tlbwe_booke206(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_booke206_tlbwe(tcg_ctx, tcg_ctx->cpu_env); +} + +static void gen_tlbivax_booke206(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_helper_booke206_tlbivax(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_tlbilx_booke206(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + + switch ((ctx->opcode >> 21) & 0x3) { + case 0: + gen_helper_booke206_tlbilx0(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 1: + gen_helper_booke206_tlbilx1(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 3: + gen_helper_booke206_tlbilx3(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + break; + } + + tcg_temp_free(tcg_ctx, t0); +} + + +/* wrtee */ +static void gen_wrtee(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + CHK_SV; + t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE)); + tcg_gen_andi_tl(tcg_ctx, cpu_msr, cpu_msr, ~(1 << MSR_EE)); + tcg_gen_or_tl(tcg_ctx, cpu_msr, cpu_msr, t0); + tcg_temp_free(tcg_ctx, t0); + /* + * Stop translation to have a chance to raise an exception if we + * just set msr_ee to 1 + */ + gen_stop_exception(ctx); +} + +/* wrteei */ +static void gen_wrteei(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + if (ctx->opcode & 0x00008000) { + tcg_gen_ori_tl(tcg_ctx, cpu_msr, cpu_msr, (1 << MSR_EE)); + /* Stop translation to have a chance to raise an exception */ + gen_stop_exception(ctx); + } else { + tcg_gen_andi_tl(tcg_ctx, cpu_msr, cpu_msr, ~(1 << MSR_EE)); + } +} + +/* PowerPC 440 specific instructions */ + +/* dlmzb */ +static void gen_dlmzb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, Rc(ctx->opcode)); + gen_helper_dlmzb(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tcg_ctx->cpu_env, + cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* mbar replaces eieio on 440 */ +static void gen_mbar(DisasContext *ctx) +{ + /* interpreted as no-op */ +} + +/* msync replaces sync on 440 */ +static void gen_msync_4xx(DisasContext *ctx) +{ + /* Only e500 seems to treat reserved bits as invalid */ + if ((ctx->insns_flags2 & PPC2_BOOKE206) && + (ctx->opcode & 0x03FFF801)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + } + /* otherwise interpreted as no-op */ +} + +/* icbt */ +static void gen_icbt_440(DisasContext *ctx) +{ + /* + * interpreted as no-op + * XXX: specification say this is treated as a load by the MMU but + * does not generate any exception + */ +} + +/* Embedded.Processor Control */ + +static void gen_msgclr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_HV; + if (is_book3s_arch2x(ctx)) { + gen_helper_book3s_msgclr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); + } else { + gen_helper_msgclr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); + } +} + +static void gen_msgsnd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_HV; + if (is_book3s_arch2x(ctx)) { + gen_helper_book3s_msgsnd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); + } else { + gen_helper_msgsnd(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); + } +} + +#if defined(TARGET_PPC64) +static void gen_msgclrp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_book3s_msgclrp(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} + +static void gen_msgsndp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CHK_SV; + gen_helper_book3s_msgsndp(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[rB(ctx->opcode)]); +} +#endif + +static void gen_msgsync(DisasContext *ctx) +{ + CHK_HV; + /* interpreted as no-op */ +} + +#if defined(TARGET_PPC64) +static void gen_maddld(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_mul_i64(tcg_ctx, t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + tcg_gen_add_i64(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* maddhd maddhdu */ +static void gen_maddhd_maddhdu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 lo = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 hi = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + if (Rc(ctx->opcode)) { + tcg_gen_mulu2_i64(tcg_ctx, lo, hi, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + tcg_gen_movi_i64(tcg_ctx, t1, 0); + } else { + tcg_gen_muls2_i64(tcg_ctx, lo, hi, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rB(ctx->opcode)]); + tcg_gen_sari_i64(tcg_ctx, t1, cpu_gpr[rC(ctx->opcode)], 63); + } + tcg_gen_add2_i64(tcg_ctx, t1, cpu_gpr[rD(ctx->opcode)], lo, hi, + cpu_gpr[rC(ctx->opcode)], t1); + tcg_temp_free_i64(tcg_ctx, lo); + tcg_temp_free_i64(tcg_ctx, hi); + tcg_temp_free_i64(tcg_ctx, t1); +} +#endif /* defined(TARGET_PPC64) */ + +static void gen_tbegin(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->tm_enabled)) { + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); + return; + } + gen_helper_tbegin(tcg_ctx, tcg_ctx->cpu_env); +} + +#define GEN_TM_NOOP(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + if (unlikely(!ctx->tm_enabled)) { \ + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ + return; \ + } \ + /* \ + * Because tbegin always fails in QEMU, these user \ + * space instructions all have a simple implementation: \ + * \ + * CR[0] = 0b0 || MSR[TS] || 0b0 \ + * = 0b0 || 0b00 || 0b0 \ + */ \ + tcg_gen_movi_i32(tcg_ctx, cpu_crf[0], 0); \ +} + +GEN_TM_NOOP(tend); +GEN_TM_NOOP(tabort); +GEN_TM_NOOP(tabortwc); +GEN_TM_NOOP(tabortwci); +GEN_TM_NOOP(tabortdc); +GEN_TM_NOOP(tabortdci); +GEN_TM_NOOP(tsr); + +static inline void gen_cp_abort(DisasContext *ctx) +{ + /* Do Nothing */ +} + +#define GEN_CP_PASTE_NOOP(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + /* \ + * Generate invalid exception until we have an \ + * implementation of the copy paste facility \ + */ \ + gen_invalid(ctx); \ +} + +GEN_CP_PASTE_NOOP(copy) +GEN_CP_PASTE_NOOP(paste) + +static void gen_tcheck(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->tm_enabled)) { + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); + return; + } + /* + * Because tbegin always fails, the tcheck implementation is + * simple: + * + * CR[CRF] = TDOOMED || MSR[TS] || 0b0 + * = 0b1 || 0b00 || 0b0 + */ + tcg_gen_movi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], 0x8); +} + +#define GEN_TM_PRIV_NOOP(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + CHK_SV; \ + if (unlikely(!ctx->tm_enabled)) { \ + gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ + return; \ + } \ + /* \ + * Because tbegin always fails, the implementation is \ + * simple: \ + * \ + * CR[0] = 0b0 || MSR[TS] || 0b0 \ + * = 0b0 || 0b00 | 0b0 \ + */ \ + tcg_gen_movi_i32(tcg_ctx, cpu_crf[0], 0); \ +} + +GEN_TM_PRIV_NOOP(treclaim); +GEN_TM_PRIV_NOOP(trechkpt); + +static inline void get_fpr(TCGContext *tcg_ctx, TCGv_i64 dst, int regno) +{ + tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, fpr_offset(regno)); +} + +static inline void set_fpr(TCGContext *tcg_ctx, int regno, TCGv_i64 src) +{ + tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, fpr_offset(regno)); +} + +static inline void get_avr64(TCGContext *tcg_ctx, TCGv_i64 dst, int regno, bool high) +{ + tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, avr64_offset(regno, high)); +} + +static inline void set_avr64(TCGContext *tcg_ctx, int regno, TCGv_i64 src, bool high) +{ + tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, avr64_offset(regno, high)); +} + +#include "translate/fp-impl.inc.c" + +#include "translate/vmx-impl.inc.c" + +#include "translate/vsx-impl.inc.c" + +#include "translate/dfp-impl.inc.c" + +#include "translate/spe-impl.inc.c" + +/* Handles lfdp, lxsd, lxssp */ +static void gen_dform39(DisasContext *ctx) +{ + switch (ctx->opcode & 0x3) { + case 0: /* lfdp */ + if (ctx->insns_flags2 & PPC2_ISA205) { + return gen_lfdp(ctx); + } + break; + case 2: /* lxsd */ + if (ctx->insns_flags2 & PPC2_ISA300) { + return gen_lxsd(ctx); + } + break; + case 3: /* lxssp */ + if (ctx->insns_flags2 & PPC2_ISA300) { + return gen_lxssp(ctx); + } + break; + } + return gen_invalid(ctx); +} + +/* handles stfdp, lxv, stxsd, stxssp lxvx */ +static void gen_dform3D(DisasContext *ctx) +{ + if ((ctx->opcode & 3) == 1) { /* DQ-FORM */ + switch (ctx->opcode & 0x7) { + case 1: /* lxv */ + if (ctx->insns_flags2 & PPC2_ISA300) { + return gen_lxv(ctx); + } + break; + case 5: /* stxv */ + if (ctx->insns_flags2 & PPC2_ISA300) { + return gen_stxv(ctx); + } + break; + } + } else { /* DS-FORM */ + switch (ctx->opcode & 0x3) { + case 0: /* stfdp */ + if (ctx->insns_flags2 & PPC2_ISA205) { + return gen_stfdp(ctx); + } + break; + case 2: /* stxsd */ + if (ctx->insns_flags2 & PPC2_ISA300) { + return gen_stxsd(ctx); + } + break; + case 3: /* stxssp */ + if (ctx->insns_flags2 & PPC2_ISA300) { + return gen_stxssp(ctx); + } + break; + } + } + return gen_invalid(ctx); +} + +static opcode_t opcodes[] = { +GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE), +GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER), +GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER), +GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400001, PPC_INTEGER), +GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER), +#if defined(TARGET_PPC64) +GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300), +#endif +GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205), +GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL), +GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER_E(addpcis, 0x13, 0x2, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), +GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER), +GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER), +GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER), +GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER), +GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +#if defined(TARGET_PPC64) +GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B), +#endif +GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER), +GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER), +GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER), +GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300), +GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER), +GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER), +GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB), +GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD), +GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205), +#if defined(TARGET_PPC64) +GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD), +GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B), +GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205), +GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206), +#endif +GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER), +GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER), +GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER), +GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER), +#if defined(TARGET_PPC64) +GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B), +GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B), +GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B), +GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B), +GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B), +GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000, + PPC_NONE, PPC2_ISA300), +GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000, + PPC_NONE, PPC2_ISA300), +#endif +#if defined(TARGET_PPC64) +GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B), +GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX), +GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B), +#endif +/* handles lfdp, lxsd, lxssp */ +GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), +/* handles stfdp, lxv, stxsd, stxssp, stxv */ +GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), +GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), +GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING), +GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING), +GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING), +GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING), +GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO), +GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM), +GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206), +GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206), +GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES), +GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206), +GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206), +GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES), +#if defined(TARGET_PPC64) +GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B), +GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207), +GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B), +GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207), +#endif +GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC), +GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT), +GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039FF801, PPC_NONE, PPC2_ISA300), +GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW), +GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW), +GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW), +GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW), +GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207), +GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER), +GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW), +#if defined(TARGET_PPC64) +GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B), +GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), +GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), +GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), +GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206), +GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H), +#endif +GEN_HANDLER(sc, 0x11, 0xFF, 0xFF, 0x03FFF01D, PPC_FLOW), +GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW), +GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW), +#if defined(TARGET_PPC64) +GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B), +GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B), +#endif +GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC), +GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC), +GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC), +GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC), +GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB), +GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC), +#if defined(TARGET_PPC64) +GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B), +GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300), +#endif +GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC), +GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC), +GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE), +GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE), +GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE), +GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE), +GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE), +GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ), +GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC), +GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC), +GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC), +GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI), +GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA), +GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT), +GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT), +GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT), +GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT), +#if defined(TARGET_PPC64) +GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B), +GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001, + PPC_SEGMENT_64B), +GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B), +GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001, + PPC_SEGMENT_64B), +GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B), +GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B), +GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B), +GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B), +#endif +GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), +/* + * XXX Those instructions will need to be handled differently for + * different ISA versions + */ +GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), +GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), +GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), +#if defined(TARGET_PPC64) +GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), +GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI), +GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300), +#endif +GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), +GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN), +GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR), +GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR), +GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR), +GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR), +GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC), +GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC), +GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC), +GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB), +GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB), +GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB), +GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB), +GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER), +GEN_HANDLER(cli, 0x1F, 0x16, 0x0F, 0x03E00000, PPC_POWER), +GEN_HANDLER(dclst, 0x1F, 0x16, 0x13, 0x03E00000, PPC_POWER), +GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER), +GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER), +GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER), +GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2), +GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2), +GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2), +GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2), +GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2), +GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2), +GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2), +GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2), +GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI), +GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA), +GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR), +GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR), +GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX), +GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX), +GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX), +GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX), +GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON), +GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON), +GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT), +GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON), +GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON), +GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP), +GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI), +GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI), +GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB), +GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB), +GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB), +GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE), +GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE), +GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE), +GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001, + PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001, + PPC_NONE, PPC2_PRCNTL), +GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001, + PPC_NONE, PPC2_PRCNTL), +GEN_HANDLER2_E(msgsync, "msgsync", 0x1F, 0x16, 0x1B, 0x00000000, + PPC_NONE, PPC2_PRCNTL), +GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), +GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), +GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), +GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, + PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE), +GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, + PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, + PPC_440_SPEC), +GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC), +GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC), +GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC), +GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC), +GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC), +#if defined(TARGET_PPC64) +GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE, + PPC2_ISA300), +GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), +GEN_HANDLER2_E(msgsndp, "msgsndp", 0x1F, 0x0E, 0x04, 0x03ff0001, + PPC_NONE, PPC2_ISA207S), +GEN_HANDLER2_E(msgclrp, "msgclrp", 0x1F, 0x0E, 0x05, 0x03ff0001, + PPC_NONE, PPC2_ISA207S), +#endif + +#undef GEN_INT_ARITH_ADD +#undef GEN_INT_ARITH_ADD_CONST +#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \ +GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER), +#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \ + add_ca, compute_ca, compute_ov) \ +GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER), +GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0) +GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1) +GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0) +GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1) +GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) +GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) +GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) +GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) +GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300), +GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) +GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) + +#undef GEN_INT_ARITH_DIVW +#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ +GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER) +GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0), +GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1), +GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0), +GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1), +GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), + +#if defined(TARGET_PPC64) +#undef GEN_INT_ARITH_DIVD +#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ +GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) +GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0), +GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1), +GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0), +GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1), + +GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), +GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), + +#undef GEN_INT_ARITH_MUL_HELPER +#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \ +GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) +GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00), +GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02), +GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17), +#endif + +#undef GEN_INT_ARITH_SUBF +#undef GEN_INT_ARITH_SUBF_CONST +#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ +GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER), +#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ + add_ca, compute_ca, compute_ov) \ +GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER), +GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) +GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) +GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) +GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) +GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) +GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) +GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) +GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) +GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) +GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) + +#undef GEN_LOGICAL1 +#undef GEN_LOGICAL2 +#define GEN_LOGICAL2(name, tcg_op, opc, type) \ +GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type) +#define GEN_LOGICAL1(name, tcg_op, opc, type) \ +GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type) +GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER), +GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER), +GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER), +GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER), +GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER), +GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER), +GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER), +GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER), +#if defined(TARGET_PPC64) +GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B), +#endif + +#if defined(TARGET_PPC64) +#undef GEN_PPC64_R2 +#undef GEN_PPC64_R4 +#define GEN_PPC64_R2(name, opc1, opc2) \ +GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ +GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ + PPC_64B) +#define GEN_PPC64_R4(name, opc1, opc2) \ +GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\ +GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \ + PPC_64B), \ +GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \ + PPC_64B), \ +GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \ + PPC_64B) +GEN_PPC64_R4(rldicl, 0x1E, 0x00), +GEN_PPC64_R4(rldicr, 0x1E, 0x02), +GEN_PPC64_R4(rldic, 0x1E, 0x04), +GEN_PPC64_R2(rldcl, 0x1E, 0x08), +GEN_PPC64_R2(rldcr, 0x1E, 0x09), +GEN_PPC64_R4(rldimi, 0x1E, 0x06), +#endif + +#undef GEN_LD +#undef GEN_LDU +#undef GEN_LDUX +#undef GEN_LDX_E +#undef GEN_LDS +#define GEN_LD(name, ldop, opc, type) \ +GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_LDU(name, ldop, opc, type) \ +GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_LDUX(name, ldop, opc2, opc3, type) \ +GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type), +#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \ +GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2), +#define GEN_LDS(name, ldop, op, type) \ +GEN_LD(name, ldop, op | 0x20, type) \ +GEN_LDU(name, ldop, op | 0x21, type) \ +GEN_LDUX(name, ldop, 0x17, op | 0x01, type) \ +GEN_LDX(name, ldop, 0x17, op | 0x00, type) + +GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER) +GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER) +GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER) +GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER) +#if defined(TARGET_PPC64) +GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B) +GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B) +GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B) +GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B) +GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE) + +/* HV/P7 and later only */ +GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) +GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST) +GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) +GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) +#endif +GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER) +GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER) + +/* External PID based load */ +#undef GEN_LDEPX +#define GEN_LDEPX(name, ldop, opc2, opc3) \ +GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ + 0x00000001, PPC_NONE, PPC2_BOOKE206), + +GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) +GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08) +GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00) +#if defined(TARGET_PPC64) +GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00) +#endif + +#undef GEN_ST +#undef GEN_STU +#undef GEN_STUX +#undef GEN_STX_E +#undef GEN_STS +#define GEN_ST(name, stop, opc, type) \ +GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_STU(name, stop, opc, type) \ +GEN_HANDLER(stop##u, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_STUX(name, stop, opc2, opc3, type) \ +GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type), +#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \ +GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2), +#define GEN_STS(name, stop, op, type) \ +GEN_ST(name, stop, op | 0x20, type) \ +GEN_STU(name, stop, op | 0x21, type) \ +GEN_STUX(name, stop, 0x17, op | 0x01, type) \ +GEN_STX(name, stop, 0x17, op | 0x00, type) + +GEN_STS(stb, st8, 0x06, PPC_INTEGER) +GEN_STS(sth, st16, 0x0C, PPC_INTEGER) +GEN_STS(stw, st32, 0x04, PPC_INTEGER) +#if defined(TARGET_PPC64) +GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B) +GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B) +GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE) +GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) +GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) +GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) +GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) +#endif +GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER) +GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER) + +#undef GEN_STEPX +#define GEN_STEPX(name, ldop, opc2, opc3) \ +GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \ + 0x00000001, PPC_NONE, PPC2_BOOKE206), + +GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) +GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C) +GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04) +#if defined(TARGET_PPC64) +GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1D, 0x04) +#endif + +#undef GEN_CRLOGIC +#define GEN_CRLOGIC(name, tcg_op, opc) \ +GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER) +GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08), +GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04), +GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09), +GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07), +GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01), +GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E), +GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D), +GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06), + +#undef GEN_MAC_HANDLER +#define GEN_MAC_HANDLER(name, opc2, opc3) \ +GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC) +GEN_MAC_HANDLER(macchw, 0x0C, 0x05), +GEN_MAC_HANDLER(macchwo, 0x0C, 0x15), +GEN_MAC_HANDLER(macchws, 0x0C, 0x07), +GEN_MAC_HANDLER(macchwso, 0x0C, 0x17), +GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06), +GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16), +GEN_MAC_HANDLER(macchwu, 0x0C, 0x04), +GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14), +GEN_MAC_HANDLER(machhw, 0x0C, 0x01), +GEN_MAC_HANDLER(machhwo, 0x0C, 0x11), +GEN_MAC_HANDLER(machhws, 0x0C, 0x03), +GEN_MAC_HANDLER(machhwso, 0x0C, 0x13), +GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02), +GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12), +GEN_MAC_HANDLER(machhwu, 0x0C, 0x00), +GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10), +GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D), +GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D), +GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F), +GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F), +GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C), +GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C), +GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E), +GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E), +GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05), +GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15), +GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07), +GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17), +GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01), +GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11), +GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03), +GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13), +GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D), +GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D), +GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F), +GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F), +GEN_MAC_HANDLER(mulchw, 0x08, 0x05), +GEN_MAC_HANDLER(mulchwu, 0x08, 0x04), +GEN_MAC_HANDLER(mulhhw, 0x08, 0x01), +GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00), +GEN_MAC_HANDLER(mullhw, 0x08, 0x0D), +GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C), + +GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \ + PPC_NONE, PPC2_TM), +GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ + PPC_NONE, PPC2_TM), + +#include "translate/fp-ops.inc.c" + +#include "translate/vmx-ops.inc.c" + +#include "translate/vsx-ops.inc.c" + +#include "translate/dfp-ops.inc.c" + +#include "translate/spe-ops.inc.c" +}; + +#include "helper_regs.h" +#include "translate_init.inc.c" + +static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + CPUPPCState *env = cs->env_ptr; + int bound; + + // unicorn setup + ctx->uc = cs->uc; + + ctx->exception = POWERPC_EXCP_NONE; + ctx->spr_cb = env->spr_cb; + ctx->pr = msr_pr; + ctx->mem_idx = env->dmmu_idx; + ctx->dr = msr_dr; + ctx->hv = msr_hv || !env->has_hv_mode; + ctx->insns_flags = env->insns_flags; + ctx->insns_flags2 = env->insns_flags2; + ctx->access_type = -1; + ctx->need_access_type = !(env->mmu_model & POWERPC_MMU_64B); + ctx->le_mode = !!(env->hflags & (1 << MSR_LE)); + ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE; + ctx->flags = env->flags; +#if defined(TARGET_PPC64) + ctx->sf_mode = msr_is_64bit(env, env->msr); + ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); +#endif + ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B + || env->mmu_model == POWERPC_MMU_601 + || (env->mmu_model & POWERPC_MMU_64B); + + ctx->fpu_enabled = !!msr_fp; + if ((env->flags & POWERPC_FLAG_SPE) && msr_spe) { + ctx->spe_enabled = !!msr_spe; + } else { + ctx->spe_enabled = false; + } + if ((env->flags & POWERPC_FLAG_VRE) && msr_vr) { + ctx->altivec_enabled = !!msr_vr; + } else { + ctx->altivec_enabled = false; + } + if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) { + ctx->vsx_enabled = !!msr_vsx; + } else { + ctx->vsx_enabled = false; + } +#if defined(TARGET_PPC64) + if ((env->flags & POWERPC_FLAG_TM) && msr_tm) { + ctx->tm_enabled = !!msr_tm; + } else { + ctx->tm_enabled = false; + } +#endif + ctx->gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE); + if ((env->flags & POWERPC_FLAG_SE) && msr_se) { + ctx->singlestep_enabled = CPU_SINGLE_STEP; + } else { + ctx->singlestep_enabled = 0; + } + if ((env->flags & POWERPC_FLAG_BE) && msr_be) { + ctx->singlestep_enabled |= CPU_BRANCH_STEP; + } + if ((env->flags & POWERPC_FLAG_DE) && msr_de) { + ctx->singlestep_enabled = 0; + target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; + if (dbcr0 & DBCR0_ICMP) { + ctx->singlestep_enabled |= CPU_SINGLE_STEP; + } + if (dbcr0 & DBCR0_BRT) { + ctx->singlestep_enabled |= CPU_BRANCH_STEP; + } + + } + if (unlikely(ctx->base.singlestep_enabled)) { + ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP; + } +#if defined(DO_SINGLE_STEP) && 0 + /* Single step trace mode */ + msr_se = 1; +#endif + +#ifdef _MSC_VER + bound = (0 - (ctx->base.pc_first | TARGET_PAGE_MASK)) / 4; +#else + bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; +#endif + ctx->base.max_insns = MIN(ctx->base.max_insns, bound); +} + +static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs) +{ +} + +static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) +{ + TCGContext *tcg_ctx = cs->uc->tcg_ctx; + + tcg_gen_insn_start(tcg_ctx, dcbase->pc_next); +} + +static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, + const CPUBreakpoint *bp) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + gen_debug_exception(ctx); + dcbase->is_jmp = DISAS_NORETURN; + /* + * The address covered by the breakpoint must be included in + * [tb->pc, tb->pc + tb->size) in order to for it to be properly + * cleared -- thus we increment the PC here so that the logic + * setting tb->size below does the right thing. + */ + ctx->base.pc_next += 4; + return true; +} + +static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = ctx->uc; + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = cs->env_ptr; + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + opc_handler_t **table, *handler; + + LOG_DISAS("----------------\n"); + LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", + ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); + + // Unicorn: end address tells us to stop emulation + if (ctx->base.pc_next == uc->addr_end) { + gen_wait(ctx); + return; + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, ctx->base.pc_next)) { + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, ctx->base.pc_next); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + ctx->opcode = translator_ldl_swap(tcg_ctx, env, ctx->base.pc_next, + need_byteswap(ctx)); + + LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", + ctx->opcode, opc1(ctx->opcode), opc2(ctx->opcode), + opc3(ctx->opcode), opc4(ctx->opcode), + ctx->le_mode ? "little" : "big"); + + ctx->base.pc_next += 4; + + table = cpu->opcodes; + handler = table[opc1(ctx->opcode)]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + handler = table[opc2(ctx->opcode)]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + handler = table[opc3(ctx->opcode)]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + handler = table[opc4(ctx->opcode)]; + } + } + } + /* Is opcode *REALLY* valid ? */ + if (unlikely(handler->handler == &gen_invalid)) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: " + "%02x - %02x - %02x - %02x (%08x) " + TARGET_FMT_lx " %d\n", + opc1(ctx->opcode), opc2(ctx->opcode), + opc3(ctx->opcode), opc4(ctx->opcode), + ctx->opcode, ctx->base.pc_next - 4, (int)msr_ir); + } else { + uint32_t inval; + + if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) + && Rc(ctx->opcode))) { + inval = handler->inval2; + } else { + inval = handler->inval1; + } + + if (unlikely((ctx->opcode & inval) != 0)) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: " + "%02x - %02x - %02x - %02x (%08x) " + TARGET_FMT_lx "\n", ctx->opcode & inval, + opc1(ctx->opcode), opc2(ctx->opcode), + opc3(ctx->opcode), opc4(ctx->opcode), + ctx->opcode, ctx->base.pc_next - 4); + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + ctx->base.is_jmp = DISAS_NORETURN; + return; + } + } + (*(handler->handler))(ctx); +#if defined(DO_PPC_STATISTICS) + handler->count++; +#endif + /* Check trace mode exceptions */ + if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP && + (ctx->base.pc_next <= 0x100 || ctx->base.pc_next > 0xF00) && + ctx->exception != POWERPC_SYSCALL && + ctx->exception != POWERPC_EXCP_TRAP && + ctx->exception != POWERPC_EXCP_BRANCH)) { + uint32_t excp = gen_prep_dbgex(ctx); + gen_exception_nip(ctx, excp, ctx->base.pc_next); + } + + ctx->base.is_jmp = ctx->exception == POWERPC_EXCP_NONE ? + DISAS_NEXT : DISAS_NORETURN; +} + +static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = cs->uc->tcg_ctx; + + + if (ctx->exception == POWERPC_EXCP_NONE) { + gen_goto_tb(ctx, 0, ctx->base.pc_next); + } else if (ctx->exception != POWERPC_EXCP_BRANCH) { + if (unlikely(ctx->base.singlestep_enabled)) { + gen_debug_exception(ctx); + } + /* Generate the return instruction */ + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } +} + +static const TranslatorOps ppc_tr_ops = { + .init_disas_context = ppc_tr_init_disas_context, + .tb_start = ppc_tr_tb_start, + .insn_start = ppc_tr_insn_start, + .breakpoint_check = ppc_tr_breakpoint_check, + .translate_insn = ppc_tr_translate_insn, + .tb_stop = ppc_tr_tb_stop, +}; + +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +{ + DisasContext ctx; + + memset(&ctx, 0, sizeof(ctx)); + translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns); +} + +void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb, + target_ulong *data) +{ + env->nip = data[0]; +} diff --git a/qemu/target/ppc/translate/dfp-impl.inc.c b/qemu/target/ppc/translate/dfp-impl.inc.c new file mode 100644 index 00000000..172c1c6e --- /dev/null +++ b/qemu/target/ppc/translate/dfp-impl.inc.c @@ -0,0 +1,240 @@ +/*** Decimal Floating Point ***/ + +static inline TCGv_ptr gen_fprp_ptr(TCGContext *tcg_ctx, int reg) +{ + TCGv_ptr r = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, r, tcg_ctx->cpu_env, offsetof(CPUPPCState, vsr[reg].u64[0])); + return r; +} + +#define GEN_DFP_T_A_B_Rc(name) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rd, ra, rb; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + rd = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ + ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb); \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ +} + +#define GEN_DFP_BF_A_B(name) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rb; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ + tcg_ctx->cpu_env, ra, rb); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ +} + +#define GEN_DFP_BF_I_B(name) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 uim; \ + TCGv_ptr rb; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + uim = tcg_const_i32(tcg_ctx, UIMM5(ctx->opcode)); \ + rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ + tcg_ctx->cpu_env, uim, rb); \ + tcg_temp_free_i32(tcg_ctx, uim); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ +} + +#define GEN_DFP_BF_A_DCM(name) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra; \ + TCGv_i32 dcm; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ + dcm = tcg_const_i32(tcg_ctx, DCM(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ + tcg_ctx->cpu_env, ra, dcm); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_i32(tcg_ctx, dcm); \ +} + +#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rt, rb; \ + TCGv_i32 u32_1, u32_2; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ + rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ + u32_1 = tcg_const_i32(tcg_ctx, u32f1(ctx->opcode)); \ + u32_2 = tcg_const_i32(tcg_ctx, u32f2(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, rb, u32_1, u32_2); \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(tcg_ctx, rt); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_i32(tcg_ctx, u32_1); \ + tcg_temp_free_i32(tcg_ctx, u32_2); \ +} + +#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rt, ra, rb; \ + TCGv_i32 i32; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ + ra = gen_fprp_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ + i32 = tcg_const_i32(tcg_ctx, i32fld(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, ra, rb, i32); \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(tcg_ctx, rt); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_i32(tcg_ctx, i32); \ + } + +#define GEN_DFP_T_B_Rc(name) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rt, rb; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ + rb = gen_fprp_ptr(tcg_ctx, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, rb); \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(tcg_ctx, rt); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + } + +#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rt, rs; \ + TCGv_i32 i32; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_update_nip(ctx, ctx->base.pc_next - 4); \ + rt = gen_fprp_ptr(tcg_ctx, rD(ctx->opcode)); \ + rs = gen_fprp_ptr(tcg_ctx, fprfld(ctx->opcode)); \ + i32 = tcg_const_i32(tcg_ctx, i32fld(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rt, rs, i32); \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(tcg_ctx, rt); \ + tcg_temp_free_ptr(tcg_ctx, rs); \ + tcg_temp_free_i32(tcg_ctx, i32); \ +} + +GEN_DFP_T_A_B_Rc(dadd) +GEN_DFP_T_A_B_Rc(daddq) +GEN_DFP_T_A_B_Rc(dsub) +GEN_DFP_T_A_B_Rc(dsubq) +GEN_DFP_T_A_B_Rc(dmul) +GEN_DFP_T_A_B_Rc(dmulq) +GEN_DFP_T_A_B_Rc(ddiv) +GEN_DFP_T_A_B_Rc(ddivq) +GEN_DFP_BF_A_B(dcmpu) +GEN_DFP_BF_A_B(dcmpuq) +GEN_DFP_BF_A_B(dcmpo) +GEN_DFP_BF_A_B(dcmpoq) +GEN_DFP_BF_A_DCM(dtstdc) +GEN_DFP_BF_A_DCM(dtstdcq) +GEN_DFP_BF_A_DCM(dtstdg) +GEN_DFP_BF_A_DCM(dtstdgq) +GEN_DFP_BF_A_B(dtstex) +GEN_DFP_BF_A_B(dtstexq) +GEN_DFP_BF_A_B(dtstsf) +GEN_DFP_BF_A_B(dtstsfq) +GEN_DFP_BF_I_B(dtstsfi) +GEN_DFP_BF_I_B(dtstsfiq) +GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC) +GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC) +GEN_DFP_T_A_B_I32_Rc(dqua, RMC) +GEN_DFP_T_A_B_I32_Rc(dquaq, RMC) +GEN_DFP_T_A_B_I32_Rc(drrnd, RMC) +GEN_DFP_T_A_B_I32_Rc(drrndq, RMC) +GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC) +GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC) +GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC) +GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC) +GEN_DFP_T_B_Rc(dctdp) +GEN_DFP_T_B_Rc(dctqpq) +GEN_DFP_T_B_Rc(drsp) +GEN_DFP_T_B_Rc(drdpq) +GEN_DFP_T_B_Rc(dcffix) +GEN_DFP_T_B_Rc(dcffixq) +GEN_DFP_T_B_Rc(dctfix) +GEN_DFP_T_B_Rc(dctfixq) +GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP) +GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP) +GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP) +GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP) +GEN_DFP_T_B_Rc(dxex) +GEN_DFP_T_B_Rc(dxexq) +GEN_DFP_T_A_B_Rc(diex) +GEN_DFP_T_A_B_Rc(diexq) +GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM) +GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM) +GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM) +GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM) + +#undef GEN_DFP_T_A_B_Rc +#undef GEN_DFP_BF_A_B +#undef GEN_DFP_BF_A_DCM +#undef GEN_DFP_T_B_U32_U32_Rc +#undef GEN_DFP_T_A_B_I32_Rc +#undef GEN_DFP_T_B_Rc +#undef GEN_DFP_T_FPR_I32_Rc diff --git a/qemu/target/ppc/translate/dfp-ops.inc.c b/qemu/target/ppc/translate/dfp-ops.inc.c new file mode 100644 index 00000000..6ef38e57 --- /dev/null +++ b/qemu/target/ppc/translate/dfp-ops.inc.c @@ -0,0 +1,165 @@ +#define _GEN_DFP_LONG(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP) + +#define _GEN_DFP_LONG_300(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300) + +#define _GEN_DFP_LONGx2(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) + +#define _GEN_DFP_LONGx4(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) + +#define _GEN_DFP_QUAD(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP) + +#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300) + +#define _GEN_DFP_QUADx2(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) + +#define _GEN_DFP_QUADx4(name, op1, op2, mask) \ +GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ +GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) + +#define GEN_DFP_T_A_B_Rc(name, op1, op2) \ +_GEN_DFP_LONG(name, op1, op2, 0x00000000) + +#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x00210800) + +#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x00200800) + +#define GEN_DFP_T_B_Rc(name, op1, op2) \ +_GEN_DFP_LONG(name, op1, op2, 0x001F0000) + +#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x003F0800) + +#define GEN_DFP_Tp_B_Rc(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x003F0000) + +#define GEN_DFP_T_Bp_Rc(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x001F0800) + +#define GEN_DFP_BF_A_B(name, op1, op2) \ +_GEN_DFP_LONG(name, op1, op2, 0x00000001) + +#define GEN_DFP_BF_A_B_300(name, op1, op2) \ +_GEN_DFP_LONG_300(name, op1, op2, 0x00400001) + +#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x00610801) + +#define GEN_DFP_BF_A_Bp(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x00600801) + +#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \ +_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001) + +#define GEN_DFP_BF_A_DCM(name, op1, op2) \ +_GEN_DFP_LONGx2(name, op1, op2, 0x00600001) + +#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \ +_GEN_DFP_QUADx2(name, op1, op2, 0x00610001) + +#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \ +_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) + +#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \ +_GEN_DFP_QUADx4(name, op1, op2, 0x02010800) + +#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \ +_GEN_DFP_QUADx4(name, op1, op2, 0x02000800) + +#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \ +_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) + +#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \ +_GEN_DFP_QUADx4(name, op1, op2, 0x00200800) + +#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \ +_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000) + +#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \ +_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800) + +#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \ +_GEN_DFP_LONG(name, op1, op2, 0x00070000) + +#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x00270800) + +#define GEN_DFP_S_T_B_Rc(name, op1, op2) \ +_GEN_DFP_LONG(name, op1, op2, 0x000F0000) + +#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \ +_GEN_DFP_QUAD(name, op1, op2, 0x002F0800) + +#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \ +_GEN_DFP_LONGx2(name, op1, op2, 0x00000000) + +#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \ +_GEN_DFP_QUADx2(name, op1, op2, 0x00210000) + +GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00), +GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00), +GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10), +GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10), +GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01), +GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01), +GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11), +GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11), +GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14), +GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14), +GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04), +GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04), +GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06), +GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06), +GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07), +GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07), +GEN_DFP_BF_A_B(dtstex, 0x02, 0x05), +GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05), +GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15), +GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15), +GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15), +GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15), +GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02), +GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02), +GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00), +GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00), +GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01), +GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01), +GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03), +GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03), +GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07), +GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07), +GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08), +GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08), +GEN_DFP_T_B_Rc(drsp, 0x02, 0x18), +GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18), +GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19), +GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19), +GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09), +GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09), +GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a), +GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a), +GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a), +GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a), +GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b), +GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b), +GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b), +GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b), +GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02), +GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02), +GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03), +GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03), diff --git a/qemu/target/ppc/translate/fp-impl.inc.c b/qemu/target/ppc/translate/fp-impl.inc.c new file mode 100644 index 00000000..58155f21 --- /dev/null +++ b/qemu/target/ppc/translate/fp-impl.inc.c @@ -0,0 +1,1554 @@ +/* + * translate-fp.c + * + * Standard FPU translation + */ + +static inline void gen_reset_fpstatus(TCGContext *tcg_ctx) +{ + gen_helper_reset_fpstatus(tcg_ctx, tcg_ctx->cpu_env); +} + +static inline void gen_compute_fprf_float64(TCGContext *tcg_ctx, TCGv_i64 arg) +{ + gen_helper_compute_fprf_float64(tcg_ctx, tcg_ctx->cpu_env, arg); + gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); +} + +#if defined(TARGET_PPC64) +static void gen_set_cr1_from_fpscr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, tmp, cpu_fpscr); + tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], tmp, 28); + tcg_temp_free_i32(tcg_ctx, tmp); +} +#else +static void gen_set_cr1_from_fpscr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_shri_tl(tcg_ctx, cpu_crf[1], cpu_fpscr, 28); +} +#endif + +/*** Floating-Point arithmetic ***/ +#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \ +static void gen_f##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; \ + TCGv_i64 t1; \ + TCGv_i64 t2; \ + TCGv_i64 t3; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + t2 = tcg_temp_new_i64(tcg_ctx); \ + t3 = tcg_temp_new_i64(tcg_ctx); \ + gen_reset_fpstatus(tcg_ctx); \ + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); \ + get_fpr(tcg_ctx, t1, rC(ctx->opcode)); \ + get_fpr(tcg_ctx, t2, rB(ctx->opcode)); \ + gen_helper_f##op(tcg_ctx, t3, tcg_ctx->cpu_env, t0, t1, t2); \ + if (isfloat) { \ + gen_helper_frsp(tcg_ctx, t3, tcg_ctx->cpu_env, t3); \ + } \ + set_fpr(tcg_ctx, rD(ctx->opcode), t3); \ + if (set_fprf) { \ + gen_compute_fprf_float64(tcg_ctx, t3); \ + } \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ + tcg_temp_free_i64(tcg_ctx, t2); \ + tcg_temp_free_i64(tcg_ctx, t3); \ +} + +#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ +_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \ +_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type); + +#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \ +static void gen_f##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; \ + TCGv_i64 t1; \ + TCGv_i64 t2; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + t2 = tcg_temp_new_i64(tcg_ctx); \ + gen_reset_fpstatus(tcg_ctx); \ + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); \ + get_fpr(tcg_ctx, t1, rB(ctx->opcode)); \ + gen_helper_f##op(tcg_ctx, t2, tcg_ctx->cpu_env, t0, t1); \ + if (isfloat) { \ + gen_helper_frsp(tcg_ctx, t2, tcg_ctx->cpu_env, t2); \ + } \ + set_fpr(tcg_ctx, rD(ctx->opcode), t2); \ + if (set_fprf) { \ + gen_compute_fprf_float64(tcg_ctx, t2); \ + } \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ + tcg_temp_free_i64(tcg_ctx, t2); \ +} +#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ +_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ +_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); + +#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \ +static void gen_f##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; \ + TCGv_i64 t1; \ + TCGv_i64 t2; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + t2 = tcg_temp_new_i64(tcg_ctx); \ + gen_reset_fpstatus(tcg_ctx); \ + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); \ + get_fpr(tcg_ctx, t1, rC(ctx->opcode)); \ + gen_helper_f##op(tcg_ctx, t2, tcg_ctx->cpu_env, t0, t1); \ + if (isfloat) { \ + gen_helper_frsp(tcg_ctx, t2, tcg_ctx->cpu_env, t2); \ + } \ + set_fpr(tcg_ctx, rD(ctx->opcode), t2); \ + if (set_fprf) { \ + gen_compute_fprf_float64(tcg_ctx, t2); \ + } \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ + tcg_temp_free_i64(tcg_ctx, t2); \ +} +#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ +_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \ +_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type); + +#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ +static void gen_f##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; \ + TCGv_i64 t1; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + gen_reset_fpstatus(tcg_ctx); \ + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); \ + gen_helper_f##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ + set_fpr(tcg_ctx, rD(ctx->opcode), t1); \ + if (set_fprf) { \ + gen_compute_fprf_float64(tcg_ctx, t1); \ + } \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ +} + +#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ +static void gen_f##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; \ + TCGv_i64 t1; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + gen_reset_fpstatus(tcg_ctx); \ + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); \ + gen_helper_f##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ + set_fpr(tcg_ctx, rD(ctx->opcode), t1); \ + if (set_fprf) { \ + gen_compute_fprf_float64(tcg_ctx, t1); \ + } \ + if (unlikely(Rc(ctx->opcode) != 0)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ +} + +/* fadd - fadds */ +GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT); +/* fdiv - fdivs */ +GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT); +/* fmul - fmuls */ +GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT); + +/* fre */ +GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT); + +/* fres */ +GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES); + +/* frsqrte */ +GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE); + +/* frsqrtes */ +static void gen_frsqrtes(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_reset_fpstatus(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + gen_helper_frsqrte(tcg_ctx, t1, tcg_ctx->cpu_env, t0); + gen_helper_frsp(tcg_ctx, t1, tcg_ctx->cpu_env, t1); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + gen_compute_fprf_float64(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* fsel */ +_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL); +/* fsub - fsubs */ +GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); +/* Optional: */ + +/* fsqrt */ +static void gen_fsqrt(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_reset_fpstatus(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + gen_helper_fsqrt(tcg_ctx, t1, tcg_ctx->cpu_env, t0); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + gen_compute_fprf_float64(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_fsqrts(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_reset_fpstatus(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + gen_helper_fsqrt(tcg_ctx, t1, tcg_ctx->cpu_env, t0); + gen_helper_frsp(tcg_ctx, t1, tcg_ctx->cpu_env, t1); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + gen_compute_fprf_float64(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/*** Floating-Point multiply-and-add ***/ +/* fmadd - fmadds */ +GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT); +/* fmsub - fmsubs */ +GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT); +/* fnmadd - fnmadds */ +GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT); +/* fnmsub - fnmsubs */ +GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT); + +/*** Floating-Point round & convert ***/ +/* fctiw */ +GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); +/* fctiwu */ +GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); +/* fctiwz */ +GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); +/* fctiwuz */ +GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); +/* frsp */ +GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); +/* fcfid */ +GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); +/* fcfids */ +GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); +/* fcfidu */ +GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); +/* fcfidus */ +GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); +/* fctid */ +GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); +/* fctidu */ +GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); +/* fctidz */ +GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); +/* fctidu */ +GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); + +/* frin */ +GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); +/* friz */ +GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); +/* frip */ +GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); +/* frim */ +GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); + +static void gen_ftdiv(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); + get_fpr(tcg_ctx, t1, rB(ctx->opcode)); + gen_helper_ftdiv(tcg_ctx, cpu_crf[crfD(ctx->opcode)], t0, t1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_ftsqrt(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + gen_helper_ftsqrt(tcg_ctx, cpu_crf[crfD(ctx->opcode)], t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + + + +/*** Floating-Point compare ***/ + +/* fcmpo */ +static void gen_fcmpo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 crf; + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_reset_fpstatus(tcg_ctx); + crf = tcg_const_i32(tcg_ctx, crfD(ctx->opcode)); + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); + get_fpr(tcg_ctx, t1, rB(ctx->opcode)); + gen_helper_fcmpo(tcg_ctx, tcg_ctx->cpu_env, t0, t1, crf); + tcg_temp_free_i32(tcg_ctx, crf); + gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* fcmpu */ +static void gen_fcmpu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 crf; + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_reset_fpstatus(tcg_ctx); + crf = tcg_const_i32(tcg_ctx, crfD(ctx->opcode)); + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); + get_fpr(tcg_ctx, t1, rB(ctx->opcode)); + gen_helper_fcmpu(tcg_ctx, tcg_ctx->cpu_env, t0, t1, crf); + tcg_temp_free_i32(tcg_ctx, crf); + gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/*** Floating-point move ***/ +/* fabs */ +/* XXX: beware that fabs never checks for NaNs nor update FPSCR */ +static void gen_fabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + tcg_gen_andi_i64(tcg_ctx, t1, t0, ~(1ULL << 63)); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + if (unlikely(Rc(ctx->opcode))) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* fmr - fmr. */ +/* XXX: beware that fmr never checks for NaNs nor update FPSCR */ +static void gen_fmr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + if (unlikely(Rc(ctx->opcode))) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* fnabs */ +/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ +static void gen_fnabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + tcg_gen_ori_i64(tcg_ctx, t1, t0, 1ULL << 63); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + if (unlikely(Rc(ctx->opcode))) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* fneg */ +/* XXX: beware that fneg never checks for NaNs nor update FPSCR */ +static void gen_fneg(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + tcg_gen_xori_i64(tcg_ctx, t1, t0, 1ULL << 63); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + if (unlikely(Rc(ctx->opcode))) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* fcpsgn: PowerPC 2.05 specification */ +/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ +static void gen_fcpsgn(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + TCGv_i64 t2; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + t2 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); + get_fpr(tcg_ctx, t1, rB(ctx->opcode)); + tcg_gen_deposit_i64(tcg_ctx, t2, t0, t1, 0, 63); + set_fpr(tcg_ctx, rD(ctx->opcode), t2); + if (unlikely(Rc(ctx->opcode))) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +static void gen_fmrgew(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 b0; + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + b0 = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + tcg_gen_shri_i64(tcg_ctx, b0, t0, 32); + get_fpr(tcg_ctx, t0, rA(ctx->opcode)); + tcg_gen_deposit_i64(tcg_ctx, t1, t0, b0, 0, 32); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + tcg_temp_free_i64(tcg_ctx, b0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_fmrgow(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; + TCGv_i64 t1; + TCGv_i64 t2; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + t2 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t0, rB(ctx->opcode)); + get_fpr(tcg_ctx, t1, rA(ctx->opcode)); + tcg_gen_deposit_i64(tcg_ctx, t2, t0, t1, 32, 32); + set_fpr(tcg_ctx, rD(ctx->opcode), t2); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +/*** Floating-Point status & ctrl register ***/ + +/* mcrfs */ +static void gen_mcrfs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv tmp = tcg_temp_new(tcg_ctx); + TCGv_i32 tmask; + TCGv_i64 tnew_fpscr = tcg_temp_new_i64(tcg_ctx); + int bfa; + int nibble; + int shift; + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + bfa = crfS(ctx->opcode); + nibble = 7 - bfa; + shift = 4 * nibble; + tcg_gen_shri_tl(tcg_ctx, tmp, cpu_fpscr, shift); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], tmp); + tcg_gen_andi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], + 0xf); + tcg_temp_free(tcg_ctx, tmp); + tcg_gen_extu_tl_i64(tcg_ctx, tnew_fpscr, cpu_fpscr); + /* Only the exception bits (including FX) should be cleared if read */ + tcg_gen_andi_i64(tcg_ctx, tnew_fpscr, tnew_fpscr, + ~((0xF << shift) & FP_EX_CLEAR_BITS)); + /* FEX and VX need to be updated, so don't set fpscr directly */ + tmask = tcg_const_i32(tcg_ctx, 1 << nibble); + gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, tnew_fpscr, tmask); + tcg_temp_free_i32(tcg_ctx, tmask); + tcg_temp_free_i64(tcg_ctx, tnew_fpscr); +} + +/* mffs */ +static void gen_mffs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + gen_reset_fpstatus(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + if (unlikely(Rc(ctx->opcode))) { + gen_set_cr1_from_fpscr(ctx); + } + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* mffsl */ +static void gen_mffsl(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + + if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { + return; + } + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + gen_reset_fpstatus(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); + /* Mask everything except mode, status, and enables. */ + tcg_gen_andi_i64(tcg_ctx, t0, t0, FP_DRN | FP_STATUS | FP_ENABLES | FP_RN); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* mffsce */ +static void gen_mffsce(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + TCGv_i32 mask; + + if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { + return; + } + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + + t0 = tcg_temp_new_i64(tcg_ctx); + + gen_reset_fpstatus(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + + /* Clear exception enable bits in the FPSCR. */ + tcg_gen_andi_i64(tcg_ctx, t0, t0, ~FP_ENABLES); + mask = tcg_const_i32(tcg_ctx, 0x0003); + gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t0, mask); + + tcg_temp_free_i32(tcg_ctx, mask); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_helper_mffscrn(DisasContext *ctx, TCGv_i64 t1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i32 mask = tcg_const_i32(tcg_ctx, 0x0001); + + gen_reset_fpstatus(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_fpscr); + tcg_gen_andi_i64(tcg_ctx, t0, t0, FP_DRN | FP_ENABLES | FP_RN); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + + /* Mask FPSCR value to clear RN. */ + tcg_gen_andi_i64(tcg_ctx, t0, t0, ~FP_RN); + + /* Merge RN into FPSCR value. */ + tcg_gen_or_i64(tcg_ctx, t0, t0, t1); + + gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t0, mask); + + tcg_temp_free_i32(tcg_ctx, mask); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* mffscrn */ +static void gen_mffscrn(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t1; + + if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { + return; + } + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + + t1 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t1, rB(ctx->opcode)); + /* Mask FRB to get just RN. */ + tcg_gen_andi_i64(tcg_ctx, t1, t1, FP_RN); + + gen_helper_mffscrn(ctx, t1); + + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* mffscrni */ +static void gen_mffscrni(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t1; + + if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { + return; + } + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + + t1 = tcg_const_i64(tcg_ctx, (uint64_t)RM(ctx->opcode)); + + gen_helper_mffscrn(ctx, t1); + + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* mtfsb0 */ +static void gen_mtfsb0(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t crb; + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + crb = 31 - crbD(ctx->opcode); + gen_reset_fpstatus(tcg_ctx); + if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) { + TCGv_i32 t0; + t0 = tcg_const_i32(tcg_ctx, crb); + gen_helper_fpscr_clrbit(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); + tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); + } +} + +/* mtfsb1 */ +static void gen_mtfsb1(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t crb; + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + crb = 31 - crbD(ctx->opcode); + gen_reset_fpstatus(tcg_ctx); + /* XXX: we pretend we can only do IEEE floating-point computations */ + if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) { + TCGv_i32 t0; + t0 = tcg_const_i32(tcg_ctx, crb); + gen_helper_fpscr_setbit(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } + if (unlikely(Rc(ctx->opcode) != 0)) { + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); + tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); + } + /* We can raise a deferred exception */ + gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); +} + +/* mtfsf */ +static void gen_mtfsf(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + TCGv_i64 t1; + int flm, l, w; + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + flm = FPFLM(ctx->opcode); + l = FPL(ctx->opcode); + w = FPW(ctx->opcode); + if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + gen_reset_fpstatus(tcg_ctx); + if (l) { + t0 = tcg_const_i32(tcg_ctx, (ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff); + } else { + t0 = tcg_const_i32(tcg_ctx, flm << (w * 8)); + } + t1 = tcg_temp_new_i64(tcg_ctx); + get_fpr(tcg_ctx, t1, rB(ctx->opcode)); + gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + if (unlikely(Rc(ctx->opcode) != 0)) { + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); + tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); + } + /* We can raise a deferred exception */ + gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* mtfsfi */ +static void gen_mtfsfi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int bf, sh, w; + TCGv_i64 t0; + TCGv_i32 t1; + + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + w = FPW(ctx->opcode); + bf = FPBF(ctx->opcode); + if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + return; + } + sh = (8 * w) + 7 - bf; + gen_reset_fpstatus(tcg_ctx); + t0 = tcg_const_i64(tcg_ctx, ((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); + t1 = tcg_const_i32(tcg_ctx, 1 << sh); + gen_helper_store_fpscr(tcg_ctx, tcg_ctx->cpu_env, t0, t1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + if (unlikely(Rc(ctx->opcode) != 0)) { + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_crf[1], cpu_fpscr); + tcg_gen_shri_i32(tcg_ctx, cpu_crf[1], cpu_crf[1], FPSCR_OX); + } + /* We can raise a deferred exception */ + gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); +} + +/*** Floating-point load ***/ +#define GEN_LDF(name, ldop, opc, type) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0); \ + gen_qemu_##ldop(ctx, t0, EA); \ + set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_LDUF(name, ldop, opc, type) \ +static void glue(gen_, name##u)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + if (unlikely(rA(ctx->opcode) == 0)) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0); \ + gen_qemu_##ldop(ctx, t0, EA); \ + set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_LDUXF(name, ldop, opc, type) \ +static void glue(gen_, name##ux)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + if (unlikely(rA(ctx->opcode) == 0)) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + gen_qemu_##ldop(ctx, t0, EA); \ + set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_LDXF(name, ldop, opc2, opc3, type) \ +static void glue(gen_, name##x)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + gen_qemu_##ldop(ctx, t0, EA); \ + set_fpr(tcg_ctx, rD(ctx->opcode), t0); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_LDFS(name, ldop, op, type) \ +GEN_LDF(name, ldop, op | 0x20, type); \ +GEN_LDUF(name, ldop, op | 0x21, type); \ +GEN_LDUXF(name, ldop, op | 0x01, type); \ +GEN_LDXF(name, ldop, 0x17, op | 0x00, type) + +static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_qemu_ld_i32(tcg_ctx, tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); + gen_helper_todouble(tcg_ctx, dest, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + + /* lfd lfdu lfdux lfdx */ +GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT); + /* lfs lfsu lfsux lfsx */ +GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT); + +/* lfdepx (external PID lfdx) */ +static void gen_lfdepx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + CHK_SV; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + gen_addr_reg_index(ctx, EA); + tcg_gen_qemu_ld_i64(tcg_ctx, t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q)); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* lfdp */ +static void gen_lfdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_imm_index(ctx, EA, 0); + t0 = tcg_temp_new_i64(tcg_ctx); + /* + * We only need to swap high and low halves. gen_qemu_ld64_i64 + * does necessary 64-bit byteswap already. + */ + if (unlikely(ctx->le_mode)) { + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + } else { + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); + } + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* lfdpx */ +static void gen_lfdpx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + t0 = tcg_temp_new_i64(tcg_ctx); + /* + * We only need to swap high and low halves. gen_qemu_ld64_i64 + * does necessary 64-bit byteswap already. + */ + if (unlikely(ctx->le_mode)) { + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + } else { + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + gen_qemu_ld64_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode) + 1, t0); + } + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* lfiwax */ +static void gen_lfiwax(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv t0; + TCGv_i64 t1; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_addr_reg_index(ctx, EA); + gen_qemu_ld32s(ctx, t0, EA); + tcg_gen_ext_tl_i64(tcg_ctx, t1, t0); + set_fpr(tcg_ctx, rD(ctx->opcode), t1); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* lfiwzx */ +static void gen_lfiwzx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + gen_addr_reg_index(ctx, EA); + gen_qemu_ld32u_i64(ctx, t0, EA); + set_fpr(tcg_ctx, rD(ctx->opcode), t0); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} +/*** Floating-point store ***/ +#define GEN_STF(name, stop, opc, type) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0); \ + get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ + gen_qemu_##stop(ctx, t0, EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_STUF(name, stop, opc, type) \ +static void glue(gen_, name##u)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + if (unlikely(rA(ctx->opcode) == 0)) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0); \ + get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ + gen_qemu_##stop(ctx, t0, EA); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_STUXF(name, stop, opc, type) \ +static void glue(gen_, name##ux)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + if (unlikely(rA(ctx->opcode) == 0)) { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ + gen_qemu_##stop(ctx, t0, EA); \ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_STXF(name, stop, opc2, opc3, type) \ +static void glue(gen_, name##x)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->fpu_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_FPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_FLOAT); \ + EA = tcg_temp_new(tcg_ctx); \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + get_fpr(tcg_ctx, t0, rS(ctx->opcode)); \ + gen_qemu_##stop(ctx, t0, EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +#define GEN_STFS(name, stop, op, type) \ +GEN_STF(name, stop, op | 0x20, type); \ +GEN_STUF(name, stop, op | 0x21, type); \ +GEN_STUXF(name, stop, op | 0x01, type); \ +GEN_STXF(name, stop, 0x17, op | 0x00, type) + +static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_tosingle(tcg_ctx, tmp, src); + tcg_gen_qemu_st_i32(tcg_ctx, tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* stfd stfdu stfdux stfdx */ +GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT); +/* stfs stfsu stfsux stfsx */ +GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT); + +/* stfdepx (external PID lfdx) */ +static void gen_stfdepx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + CHK_SV; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + gen_addr_reg_index(ctx, EA); + get_fpr(tcg_ctx, t0, rD(ctx->opcode)); + tcg_gen_qemu_st_i64(tcg_ctx, t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q)); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* stfdp */ +static void gen_stfdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + gen_addr_imm_index(ctx, EA, 0); + /* + * We only need to swap high and low halves. gen_qemu_st64_i64 + * does necessary 64-bit byteswap already. + */ + if (unlikely(ctx->le_mode)) { + get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); + gen_qemu_st64_i64(ctx, t0, EA); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + get_fpr(tcg_ctx, t0, rD(ctx->opcode)); + gen_qemu_st64_i64(ctx, t0, EA); + } else { + get_fpr(tcg_ctx, t0, rD(ctx->opcode)); + gen_qemu_st64_i64(ctx, t0, EA); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); + gen_qemu_st64_i64(ctx, t0, EA); + } + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* stfdpx */ +static void gen_stfdpx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + EA = tcg_temp_new(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + gen_addr_reg_index(ctx, EA); + /* + * We only need to swap high and low halves. gen_qemu_st64_i64 + * does necessary 64-bit byteswap already. + */ + if (unlikely(ctx->le_mode)) { + get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); + gen_qemu_st64_i64(ctx, t0, EA); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + get_fpr(tcg_ctx, t0, rD(ctx->opcode)); + gen_qemu_st64_i64(ctx, t0, EA); + } else { + get_fpr(tcg_ctx, t0, rD(ctx->opcode)); + gen_qemu_st64_i64(ctx, t0, EA); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + get_fpr(tcg_ctx, t0, rD(ctx->opcode) + 1); + gen_qemu_st64_i64(ctx, t0, EA); + } + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* Optional: */ +static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_trunc_i64_tl(tcg_ctx, t0, arg1), + gen_qemu_st32(ctx, t0, arg2); + tcg_temp_free(tcg_ctx, t0); +} +/* stfiwx */ +GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); + +/* POWER2 specific instructions */ +/* Quad manipulation (load/store two floats at a time) */ + +/* lfq */ +static void gen_lfq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rd = rD(ctx->opcode); + TCGv t0; + TCGv_i64 t1; + gen_set_access_type(ctx, ACCESS_FLOAT); + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_addr_imm_index(ctx, t0, 0); + gen_qemu_ld64_i64(ctx, t1, t0); + set_fpr(tcg_ctx, rd, t1); + gen_addr_add(ctx, t0, t0, 8); + gen_qemu_ld64_i64(ctx, t1, t0); + set_fpr(tcg_ctx, (rd + 1) % 32, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* lfqu */ +static void gen_lfqu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ra = rA(ctx->opcode); + int rd = rD(ctx->opcode); + TCGv t0, t1; + TCGv_i64 t2; + gen_set_access_type(ctx, ACCESS_FLOAT); + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + t2 = tcg_temp_new_i64(tcg_ctx); + gen_addr_imm_index(ctx, t0, 0); + gen_qemu_ld64_i64(ctx, t2, t0); + set_fpr(tcg_ctx, rd, t2); + gen_addr_add(ctx, t1, t0, 8); + gen_qemu_ld64_i64(ctx, t2, t1); + set_fpr(tcg_ctx, (rd + 1) % 32, t2); + if (ra != 0) { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +/* lfqux */ +static void gen_lfqux(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ra = rA(ctx->opcode); + int rd = rD(ctx->opcode); + gen_set_access_type(ctx, ACCESS_FLOAT); + TCGv t0, t1; + TCGv_i64 t2; + t2 = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_qemu_ld64_i64(ctx, t2, t0); + set_fpr(tcg_ctx, rd, t2); + t1 = tcg_temp_new(tcg_ctx); + gen_addr_add(ctx, t1, t0, 8); + gen_qemu_ld64_i64(ctx, t2, t1); + set_fpr(tcg_ctx, (rd + 1) % 32, t2); + tcg_temp_free(tcg_ctx, t1); + if (ra != 0) { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t2); +} + +/* lfqx */ +static void gen_lfqx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rd = rD(ctx->opcode); + TCGv t0; + TCGv_i64 t1; + gen_set_access_type(ctx, ACCESS_FLOAT); + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_addr_reg_index(ctx, t0); + gen_qemu_ld64_i64(ctx, t1, t0); + set_fpr(tcg_ctx, rd, t1); + gen_addr_add(ctx, t0, t0, 8); + gen_qemu_ld64_i64(ctx, t1, t0); + set_fpr(tcg_ctx, (rd + 1) % 32, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* stfq */ +static void gen_stfq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rd = rD(ctx->opcode); + TCGv t0; + TCGv_i64 t1; + gen_set_access_type(ctx, ACCESS_FLOAT); + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_addr_imm_index(ctx, t0, 0); + get_fpr(tcg_ctx, t1, rd); + gen_qemu_st64_i64(ctx, t1, t0); + gen_addr_add(ctx, t0, t0, 8); + get_fpr(tcg_ctx, t1, (rd + 1) % 32); + gen_qemu_st64_i64(ctx, t1, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* stfqu */ +static void gen_stfqu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ra = rA(ctx->opcode); + int rd = rD(ctx->opcode); + TCGv t0, t1; + TCGv_i64 t2; + gen_set_access_type(ctx, ACCESS_FLOAT); + t2 = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_imm_index(ctx, t0, 0); + get_fpr(tcg_ctx, t2, rd); + gen_qemu_st64_i64(ctx, t2, t0); + t1 = tcg_temp_new(tcg_ctx); + gen_addr_add(ctx, t1, t0, 8); + get_fpr(tcg_ctx, t2, (rd + 1) % 32); + gen_qemu_st64_i64(ctx, t2, t1); + tcg_temp_free(tcg_ctx, t1); + if (ra != 0) { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t2); +} + +/* stfqux */ +static void gen_stfqux(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int ra = rA(ctx->opcode); + int rd = rD(ctx->opcode); + TCGv t0, t1; + TCGv_i64 t2; + gen_set_access_type(ctx, ACCESS_FLOAT); + t2 = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + get_fpr(tcg_ctx, t2, rd); + gen_qemu_st64_i64(ctx, t2, t0); + t1 = tcg_temp_new(tcg_ctx); + gen_addr_add(ctx, t1, t0, 8); + get_fpr(tcg_ctx, t2, (rd + 1) % 32); + gen_qemu_st64_i64(ctx, t2, t1); + tcg_temp_free(tcg_ctx, t1); + if (ra != 0) { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[ra], t0); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t2); +} + +/* stfqx */ +static void gen_stfqx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rd = rD(ctx->opcode); + TCGv t0; + TCGv_i64 t1; + gen_set_access_type(ctx, ACCESS_FLOAT); + t1 = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, t0); + get_fpr(tcg_ctx, t1, rd); + gen_qemu_st64_i64(ctx, t1, t0); + gen_addr_add(ctx, t0, t0, 8); + get_fpr(tcg_ctx, t1, (rd + 1) % 32); + gen_qemu_st64_i64(ctx, t1, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +#undef _GEN_FLOAT_ACB +#undef GEN_FLOAT_ACB +#undef _GEN_FLOAT_AB +#undef GEN_FLOAT_AB +#undef _GEN_FLOAT_AC +#undef GEN_FLOAT_AC +#undef GEN_FLOAT_B +#undef GEN_FLOAT_BS + +#undef GEN_LDF +#undef GEN_LDUF +#undef GEN_LDUXF +#undef GEN_LDXF +#undef GEN_LDFS + +#undef GEN_STF +#undef GEN_STUF +#undef GEN_STUXF +#undef GEN_STXF +#undef GEN_STFS diff --git a/qemu/target/ppc/translate/fp-ops.inc.c b/qemu/target/ppc/translate/fp-ops.inc.c new file mode 100644 index 00000000..88fab656 --- /dev/null +++ b/qemu/target/ppc/translate/fp-ops.inc.c @@ -0,0 +1,119 @@ +#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \ +GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type) +#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ +_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \ +_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type) +#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \ +GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) +#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ +_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \ +_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type) +#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \ +GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) +#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ +_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \ +_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type) +#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ +GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type) +#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ +GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type) + +GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT), +GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT), +GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT), +GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT), +GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES), +GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE), +_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL), +GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT), +GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT), +GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT), +GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT), +GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT), +GEN_HANDLER_E(ftdiv, 0x3F, 0x00, 0x04, 1, PPC_NONE, PPC2_FP_TST_ISA206), +GEN_HANDLER_E(ftsqrt, 0x3F, 0x00, 0x05, 1, PPC_NONE, PPC2_FP_TST_ISA206), +GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT), +GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT), +GEN_HANDLER_E(fctiwuz, 0x3F, 0x0F, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT), +GEN_HANDLER_E(fcfid, 0x3F, 0x0E, 0x1A, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), +GEN_HANDLER_E(fcfids, 0x3B, 0x0E, 0x1A, 0, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_HANDLER_E(fcfidu, 0x3F, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_HANDLER_E(fcfidus, 0x3B, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_HANDLER_E(fctid, 0x3F, 0x0E, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), +GEN_HANDLER_E(fctidu, 0x3F, 0x0E, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_HANDLER_E(fctidz, 0x3F, 0x0F, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), +GEN_HANDLER_E(fctiduz, 0x3F, 0x0F, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT), +GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT), +GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT), +GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT), + +#define GEN_LDF(name, ldop, opc, type) \ +GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_LDUF(name, ldop, opc, type) \ +GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_LDUXF(name, ldop, opc, type) \ +GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), +#define GEN_LDXF(name, ldop, opc2, opc3, type) \ +GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), +#define GEN_LDFS(name, ldop, op, type) \ +GEN_LDF(name, ldop, op | 0x20, type) \ +GEN_LDUF(name, ldop, op | 0x21, type) \ +GEN_LDUXF(name, ldop, op | 0x01, type) \ +GEN_LDXF(name, ldop, 0x17, op | 0x00, type) + +GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT) +GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT) +GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205), +GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206), +GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205), + +#define GEN_STF(name, stop, opc, type) \ +GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_STUF(name, stop, opc, type) \ +GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), +#define GEN_STUXF(name, stop, opc, type) \ +GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), +#define GEN_STXF(name, stop, opc2, opc3, type) \ +GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), +#define GEN_STFS(name, stop, op, type) \ +GEN_STF(name, stop, op | 0x20, type) \ +GEN_STUF(name, stop, op | 0x21, type) \ +GEN_STUXF(name, stop, op | 0x01, type) \ +GEN_STXF(name, stop, 0x17, op | 0x00, type) + +GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT) +GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT) +GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX) +GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206), +GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205), + +GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES), +GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT), +GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT), +GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT), +GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT), +GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT), +GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT), +GEN_HANDLER(fnabs, 0x3F, 0x08, 0x04, 0x001F0000, PPC_FLOAT), +GEN_HANDLER(fneg, 0x3F, 0x08, 0x01, 0x001F0000, PPC_FLOAT), +GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205), +GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207), +GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT), +GEN_HANDLER_E_2(mffs, 0x3F, 0x07, 0x12, 0x00, 0x00000000, PPC_FLOAT, PPC_NONE), +GEN_HANDLER_E_2(mffsce, 0x3F, 0x07, 0x12, 0x01, 0x00000000, PPC_FLOAT, + PPC2_ISA300), +GEN_HANDLER_E_2(mffsl, 0x3F, 0x07, 0x12, 0x18, 0x00000000, PPC_FLOAT, + PPC2_ISA300), +GEN_HANDLER_E_2(mffscrn, 0x3F, 0x07, 0x12, 0x16, 0x00000000, PPC_FLOAT, + PPC_NONE), +GEN_HANDLER_E_2(mffscrni, 0x3F, 0x07, 0x12, 0x17, 0x00000000, PPC_FLOAT, + PPC_NONE), +GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT), +GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT), +GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT), +GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006e0800, PPC_FLOAT), diff --git a/qemu/target/ppc/translate/spe-impl.inc.c b/qemu/target/ppc/translate/spe-impl.inc.c new file mode 100644 index 00000000..3b229861 --- /dev/null +++ b/qemu/target/ppc/translate/spe-impl.inc.c @@ -0,0 +1,1283 @@ +/* + * translate-spe.c + * + * Freescale SPE extension translation + */ + +/*** SPE extension ***/ +/* Register moves */ + +static inline void gen_evmra(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* tmp := rA_lo + rA_hi << 32 */ + tcg_gen_concat_tl_i64(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)], + cpu_gprh[rA(ctx->opcode)]); + + /* spe_acc := tmp */ + tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); + tcg_temp_free_i64(tcg_ctx, tmp); + + /* rD := rA */ + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); +} + +static inline void gen_load_gpr64(TCGContext *tcg_ctx, TCGv_i64 t, int reg) +{ + tcg_gen_concat_tl_i64(tcg_ctx, t, cpu_gpr[reg], cpu_gprh[reg]); +} + +static inline void gen_store_gpr64(TCGContext *tcg_ctx, int reg, TCGv_i64 t) +{ + tcg_gen_extr_i64_tl(tcg_ctx, cpu_gpr[reg], cpu_gprh[reg], t); +} + +#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \ +static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ +{ \ + if (Rc(ctx->opcode)) \ + gen_##name1(ctx); \ + else \ + gen_##name0(ctx); \ +} + +/* Handler for undefined SPE opcodes */ +static inline void gen_speundef(DisasContext *ctx) +{ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); +} + +/* SPE logic */ +#define GEN_SPEOP_LOGIC2(name, tcg_op) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + tcg_op(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \ + cpu_gpr[rB(ctx->opcode)]); \ + tcg_op(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \ + cpu_gprh[rB(ctx->opcode)]); \ +} + +GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl); +GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl); +GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl); +GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl); +GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl); +GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl); +GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl); +GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl); + +/* SPE logic immediate */ +#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ + tcg_opi(tcg_ctx, t0, t0, rB(ctx->opcode)); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rA(ctx->opcode)]); \ + tcg_opi(tcg_ctx, t0, t0, rB(ctx->opcode)); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ + \ + tcg_temp_free_i32(tcg_ctx, t0); \ +} +GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32); +GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32); +GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32); +GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32); + +/* SPE arithmetic */ +#define GEN_SPEOP_ARITH1(name, tcg_op) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ + tcg_op(tcg_ctx, t0, t0); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rA(ctx->opcode)]); \ + tcg_op(tcg_ctx, t0, t0); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ + \ + tcg_temp_free_i32(tcg_ctx, t0); \ +} + +GEN_SPEOP_ARITH1(evabs, tcg_gen_abs_i32); +GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32); +GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32); +GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32); +static inline void gen_op_evrndw(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1) +{ + tcg_gen_addi_i32(tcg_ctx, ret, arg1, 0x8000); + tcg_gen_ext16u_i32(tcg_ctx, ret, ret); +} +GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw); +GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32); +GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32); + +#define GEN_SPEOP_ARITH2(name, tcg_op) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0, t1; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + t1 = tcg_temp_new_i32(tcg_ctx); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ + tcg_op(tcg_ctx, t0, t0, t1); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rA(ctx->opcode)]); \ + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gprh[rB(ctx->opcode)]); \ + tcg_op(tcg_ctx, t0, t0, t1); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ + \ + tcg_temp_free_i32(tcg_ctx, t0); \ + tcg_temp_free_i32(tcg_ctx, t1); \ +} + +static inline void gen_op_evsrwu(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); + + /* No error here: 6 bits are used */ + tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x3F); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, t0, 32, l1); + tcg_gen_shr_i32(tcg_ctx, ret, arg1, t0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_i32(tcg_ctx, ret, 0); + gen_set_label(tcg_ctx, l2); + tcg_temp_free_i32(tcg_ctx, t0); +} +GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu); +static inline void gen_op_evsrws(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); + + /* No error here: 6 bits are used */ + tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x3F); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, t0, 32, l1); + tcg_gen_sar_i32(tcg_ctx, ret, arg1, t0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_i32(tcg_ctx, ret, 0); + gen_set_label(tcg_ctx, l2); + tcg_temp_free_i32(tcg_ctx, t0); +} +GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws); +static inline void gen_op_evslw(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); + + /* No error here: 6 bits are used */ + tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x3F); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, t0, 32, l1); + tcg_gen_shl_i32(tcg_ctx, ret, arg1, t0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_i32(tcg_ctx, ret, 0); + gen_set_label(tcg_ctx, l2); + tcg_temp_free_i32(tcg_ctx, t0); +} +GEN_SPEOP_ARITH2(evslw, gen_op_evslw); +static inline void gen_op_evrlw(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, t0, arg2, 0x1F); + tcg_gen_rotl_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} +GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw); +static inline void gen_evmergehi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); +} +GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32); +static inline void gen_op_evsubf(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_sub_i32(tcg_ctx, ret, arg2, arg1); +} +GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf); + +/* SPE arithmetic immediate */ +#define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); \ + tcg_op(tcg_ctx, t0, t0, rA(ctx->opcode)); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gprh[rB(ctx->opcode)]); \ + tcg_op(tcg_ctx, t0, t0, rA(ctx->opcode)); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); \ + \ + tcg_temp_free_i32(tcg_ctx, t0); \ +} +GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32); +GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32); + +/* SPE comparison */ +#define GEN_SPEOP_COMP(name, tcg_cond) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + TCGLabel *l1 = gen_new_label(tcg_ctx); \ + TCGLabel *l2 = gen_new_label(tcg_ctx); \ + TCGLabel *l3 = gen_new_label(tcg_ctx); \ + TCGLabel *l4 = gen_new_label(tcg_ctx); \ + \ + tcg_gen_ext32s_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \ + tcg_gen_ext32s_tl(tcg_ctx, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ + tcg_gen_ext32s_tl(tcg_ctx, cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); \ + tcg_gen_ext32s_tl(tcg_ctx, cpu_gprh[rB(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); \ + \ + tcg_gen_brcond_tl(tcg_ctx, tcg_cond, cpu_gpr[rA(ctx->opcode)], \ + cpu_gpr[rB(ctx->opcode)], l1); \ + tcg_gen_movi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], 0); \ + tcg_gen_br(tcg_ctx, l2); \ + gen_set_label(tcg_ctx, l1); \ + tcg_gen_movi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], \ + CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \ + gen_set_label(tcg_ctx, l2); \ + tcg_gen_brcond_tl(tcg_ctx, tcg_cond, cpu_gprh[rA(ctx->opcode)], \ + cpu_gprh[rB(ctx->opcode)], l3); \ + tcg_gen_andi_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \ + ~(CRF_CH | CRF_CH_AND_CL)); \ + tcg_gen_br(tcg_ctx, l4); \ + gen_set_label(tcg_ctx, l3); \ + tcg_gen_ori_i32(tcg_ctx, cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \ + CRF_CH | CRF_CH_OR_CL); \ + gen_set_label(tcg_ctx, l4); \ +} +GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU); +GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT); +GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU); +GEN_SPEOP_COMP(evcmplts, TCG_COND_LT); +GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ); + +/* SPE misc */ +static inline void gen_brinc(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* Note: brinc is usable even if SPE is disabled */ + gen_helper_brinc(tcg_ctx, cpu_gpr[rD(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); +} + +static inline void gen_evmergelo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); +} + +static inline void gen_evmergehilo(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); +} + +static inline void gen_evmergelohi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + if (rD(ctx->opcode) == rA(ctx->opcode)) { + TCGv tmp = tcg_temp_new(tcg_ctx); + tcg_gen_mov_tl(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], tmp); + tcg_temp_free(tcg_ctx, tmp); + } else { + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + } +} + +static inline void gen_evsplati(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint64_t imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27; + + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], imm); + tcg_gen_movi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], imm); +} + +static inline void gen_evsplatfi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint64_t imm = rA(ctx->opcode) << 27; + + tcg_gen_movi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], imm); + tcg_gen_movi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], imm); +} + +static inline void gen_evsel(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + TCGLabel *l3 = gen_new_label(tcg_ctx); + TCGLabel *l4 = gen_new_label(tcg_ctx); + TCGv_i32 t0 = tcg_temp_local_new_i32(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t0, cpu_crf[ctx->opcode & 0x07], 1 << 3); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, t0, 0, l1); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); + gen_set_label(tcg_ctx, l2); + tcg_gen_andi_i32(tcg_ctx, t0, cpu_crf[ctx->opcode & 0x07], 1 << 2); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, t0, 0, l3); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_br(tcg_ctx, l4); + gen_set_label(tcg_ctx, l3); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); + gen_set_label(tcg_ctx, l4); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void gen_evsel0(DisasContext *ctx) +{ + gen_evsel(ctx); +} + +static void gen_evsel1(DisasContext *ctx) +{ + gen_evsel(ctx); +} + +static void gen_evsel2(DisasContext *ctx) +{ + gen_evsel(ctx); +} + +static void gen_evsel3(DisasContext *ctx) +{ + gen_evsel(ctx); +} + +/* Multiply */ + +static inline void gen_evmwumi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0, t1; + + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + + /* t0 := rA; t1 := rB */ + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_ext32u_i64(tcg_ctx, t0, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_ext32u_i64(tcg_ctx, t1, t1); + + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); /* t0 := rA * rB */ + + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); /* rD := t0 */ + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static inline void gen_evmwumia(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 tmp; + + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + + gen_evmwumi(ctx); /* rD := rA * rB */ + + tmp = tcg_temp_new_i64(tcg_ctx); + + /* acc := rD */ + gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); + tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static inline void gen_evmwumiaa(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 acc; + TCGv_i64 tmp; + + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + + gen_evmwumi(ctx); /* rD := rA * rB */ + + acc = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_temp_new_i64(tcg_ctx); + + /* tmp := rD */ + gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); + + /* Load acc */ + tcg_gen_ld_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); + + /* acc := tmp + acc */ + tcg_gen_add_i64(tcg_ctx, acc, acc, tmp); + + /* Store acc */ + tcg_gen_st_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); + + /* rD := acc */ + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), acc); + + tcg_temp_free_i64(tcg_ctx, acc); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static inline void gen_evmwsmi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0, t1; + + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + + /* t0 := rA; t1 := rB */ + tcg_gen_extu_tl_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); + tcg_gen_ext32s_i64(tcg_ctx, t1, t1); + + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); /* t0 := rA * rB */ + + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); /* rD := t0 */ + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static inline void gen_evmwsmia(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 tmp; + + gen_evmwsmi(ctx); /* rD := rA * rB */ + + tmp = tcg_temp_new_i64(tcg_ctx); + + /* acc := rD */ + gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); + tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); + + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static inline void gen_evmwsmiaa(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 acc = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + gen_evmwsmi(ctx); /* rD := rA * rB */ + + acc = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_temp_new_i64(tcg_ctx); + + /* tmp := rD */ + gen_load_gpr64(tcg_ctx, tmp, rD(ctx->opcode)); + + /* Load acc */ + tcg_gen_ld_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); + + /* acc := tmp + acc */ + tcg_gen_add_i64(tcg_ctx, acc, acc, tmp); + + /* Store acc */ + tcg_gen_st_i64(tcg_ctx, acc, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_acc)); + + /* rD := acc */ + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), acc); + + tcg_temp_free_i64(tcg_ctx, acc); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// +GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); +GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// +GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); +GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); //// +GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); //// +GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); //// +GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE); // +GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE); +GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); //// +GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// +GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// +GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// +GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); //// +GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// +GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// +GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// +GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); +GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE); // +GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE); +GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// +GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE); //// +GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE); //// +GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE); //// +GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE); //// + +/* SPE load and stores */ +static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong uimm = rB(ctx->opcode); + + if (rA(ctx->opcode) == 0) { + tcg_gen_movi_tl(tcg_ctx, EA, uimm << sh); + } else { + tcg_gen_addi_tl(tcg_ctx, EA, cpu_gpr[rA(ctx->opcode)], uimm << sh); + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(tcg_ctx, EA, EA); + } + } +} + +static inline void gen_op_evldd(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + gen_qemu_ld64_i64(ctx, t0, addr); + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static inline void gen_op_evldw(DisasContext *ctx, TCGv addr) +{ + gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr); + gen_addr_add(ctx, addr, addr, 4); + gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr); +} + +static inline void gen_op_evldh(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_or_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 16); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_qemu_ld16s(ctx, t0, addr); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_shli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, 16); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr) +{ + gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr); +} + +static inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr) +{ + gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr); +} + +static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_qemu_ld32u(ctx, t0, addr); + tcg_gen_mov_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0); + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_shli_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], t0, 16); + tcg_gen_or_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_ld16u(ctx, t0, addr); + tcg_gen_shli_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0, 16); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + gen_load_gpr64(tcg_ctx, t0, rS(ctx->opcode)); + gen_qemu_st64_i64(ctx, t0, addr); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr) +{ + gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr); + gen_addr_add(ctx, addr, addr, 4); + gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr); +} + +static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, t0, cpu_gprh[rS(ctx->opcode)], 16); + gen_qemu_st16(ctx, t0, addr); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr); + gen_addr_add(ctx, addr, addr, 2); + tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], 16); + gen_qemu_st16(ctx, t0, addr); + tcg_temp_free(tcg_ctx, t0); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr); +} + +static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, t0, cpu_gprh[rS(ctx->opcode)], 16); + gen_qemu_st16(ctx, t0, addr); + gen_addr_add(ctx, addr, addr, 2); + tcg_gen_shri_tl(tcg_ctx, t0, cpu_gpr[rS(ctx->opcode)], 16); + gen_qemu_st16(ctx, t0, addr); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr) +{ + gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr); + gen_addr_add(ctx, addr, addr, 2); + gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr); +} + +static inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr) +{ + gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr); +} + +static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr) +{ + gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr); +} + +#define GEN_SPEOP_LDST(name, opc2, sh) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv t0; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + t0 = tcg_temp_new(tcg_ctx); \ + if (Rc(ctx->opcode)) { \ + gen_addr_spe_imm_index(ctx, t0, sh); \ + } else { \ + gen_addr_reg_index(ctx, t0); \ + } \ + gen_op_##name(ctx, t0); \ + tcg_temp_free(tcg_ctx, t0); \ +} + +GEN_SPEOP_LDST(evldd, 0x00, 3); +GEN_SPEOP_LDST(evldw, 0x01, 3); +GEN_SPEOP_LDST(evldh, 0x02, 3); +GEN_SPEOP_LDST(evlhhesplat, 0x04, 1); +GEN_SPEOP_LDST(evlhhousplat, 0x06, 1); +GEN_SPEOP_LDST(evlhhossplat, 0x07, 1); +GEN_SPEOP_LDST(evlwhe, 0x08, 2); +GEN_SPEOP_LDST(evlwhou, 0x0A, 2); +GEN_SPEOP_LDST(evlwhos, 0x0B, 2); +GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2); +GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2); + +GEN_SPEOP_LDST(evstdd, 0x10, 3); +GEN_SPEOP_LDST(evstdw, 0x11, 3); +GEN_SPEOP_LDST(evstdh, 0x12, 3); +GEN_SPEOP_LDST(evstwhe, 0x18, 2); +GEN_SPEOP_LDST(evstwho, 0x1A, 2); +GEN_SPEOP_LDST(evstwwe, 0x1C, 2); +GEN_SPEOP_LDST(evstwwo, 0x1E, 2); + +/* Multiply and add - TODO */ +#if 0 +GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);// +GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE); + +GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE); +GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE); +GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE); + +GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); +GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); +GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); +GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, 0x0000F800, PPC_SPE); +GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, 0x00000000, PPC_SPE); + +GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE); + +GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE); + +GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE); + +GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE); +GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE); +#endif + +/*** SPE floating-point extension ***/ +#define GEN_SPEFPUOP_CONV_32_32(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ + tcg_temp_free_i32(tcg_ctx, t0); \ +} +#define GEN_SPEFPUOP_CONV_32_64(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); \ + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); \ + gen_load_gpr64(tcg_ctx, t0, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t1); \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i32(tcg_ctx, t1); \ +} +#define GEN_SPEFPUOP_CONV_64_32(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); \ + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); \ + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t1); \ + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i32(tcg_ctx, t1); \ +} +#define GEN_SPEFPUOP_CONV_64_64(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_load_gpr64(tcg_ctx, t0, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0); \ + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} +#define GEN_SPEFPUOP_ARITH2_32_32(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0, t1; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + t1 = tcg_temp_new_i32(tcg_ctx); \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); \ + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], t0); \ + \ + tcg_temp_free_i32(tcg_ctx, t0); \ + tcg_temp_free_i32(tcg_ctx, t1); \ +} +#define GEN_SPEFPUOP_ARITH2_64_64(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0, t1; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + gen_load_gpr64(tcg_ctx, t0, rA(ctx->opcode)); \ + gen_load_gpr64(tcg_ctx, t1, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); \ + gen_store_gpr64(tcg_ctx, rD(ctx->opcode), t0); \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ +} +#define GEN_SPEFPUOP_COMP_32(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 t0, t1; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + t1 = tcg_temp_new_i32(tcg_ctx); \ + \ + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); \ + tcg_gen_trunc_tl_i32(tcg_ctx, t1, cpu_gpr[rB(ctx->opcode)]); \ + gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], tcg_ctx->cpu_env, t0, t1); \ + \ + tcg_temp_free_i32(tcg_ctx, t0); \ + tcg_temp_free_i32(tcg_ctx, t1); \ +} +#define GEN_SPEFPUOP_COMP_64(name) \ +static inline void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0, t1; \ + if (unlikely(!ctx->spe_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_SPEU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + gen_load_gpr64(tcg_ctx, t0, rA(ctx->opcode)); \ + gen_load_gpr64(tcg_ctx, t1, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, cpu_crf[crfD(ctx->opcode)], tcg_ctx->cpu_env, t0, t1); \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ +} + +/* Single precision floating-point vectors operations */ +/* Arithmetic */ +GEN_SPEFPUOP_ARITH2_64_64(evfsadd); +GEN_SPEFPUOP_ARITH2_64_64(evfssub); +GEN_SPEFPUOP_ARITH2_64_64(evfsmul); +GEN_SPEFPUOP_ARITH2_64_64(evfsdiv); +static inline void gen_evfsabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + ~0x80000000); + tcg_gen_andi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], + ~0x80000000); +} + +static inline void gen_evfsnabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + 0x80000000); + tcg_gen_ori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], + 0x80000000); +} + +static inline void gen_evfsneg(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + 0x80000000); + tcg_gen_xori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], + 0x80000000); +} + +/* Conversion */ +GEN_SPEFPUOP_CONV_64_64(evfscfui); +GEN_SPEFPUOP_CONV_64_64(evfscfsi); +GEN_SPEFPUOP_CONV_64_64(evfscfuf); +GEN_SPEFPUOP_CONV_64_64(evfscfsf); +GEN_SPEFPUOP_CONV_64_64(evfsctui); +GEN_SPEFPUOP_CONV_64_64(evfsctsi); +GEN_SPEFPUOP_CONV_64_64(evfsctuf); +GEN_SPEFPUOP_CONV_64_64(evfsctsf); +GEN_SPEFPUOP_CONV_64_64(evfsctuiz); +GEN_SPEFPUOP_CONV_64_64(evfsctsiz); + +/* Comparison */ +GEN_SPEFPUOP_COMP_64(evfscmpgt); +GEN_SPEFPUOP_COMP_64(evfscmplt); +GEN_SPEFPUOP_COMP_64(evfscmpeq); +GEN_SPEFPUOP_COMP_64(evfststgt); +GEN_SPEFPUOP_COMP_64(evfststlt); +GEN_SPEFPUOP_COMP_64(evfststeq); + +/* Opcodes definitions */ +GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // +GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); // +GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); // +GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // +GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // +GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); // +GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // +GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // +GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // +GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); // + +/* Single precision floating-point operations */ +/* Arithmetic */ +GEN_SPEFPUOP_ARITH2_32_32(efsadd); +GEN_SPEFPUOP_ARITH2_32_32(efssub); +GEN_SPEFPUOP_ARITH2_32_32(efsmul); +GEN_SPEFPUOP_ARITH2_32_32(efsdiv); +static inline void gen_efsabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_andi_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + (target_long)~0x80000000LL); +} + +static inline void gen_efsnabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_ori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + 0x80000000); +} + +static inline void gen_efsneg(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_xori_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], + 0x80000000); +} + +/* Conversion */ +GEN_SPEFPUOP_CONV_32_32(efscfui); +GEN_SPEFPUOP_CONV_32_32(efscfsi); +GEN_SPEFPUOP_CONV_32_32(efscfuf); +GEN_SPEFPUOP_CONV_32_32(efscfsf); +GEN_SPEFPUOP_CONV_32_32(efsctui); +GEN_SPEFPUOP_CONV_32_32(efsctsi); +GEN_SPEFPUOP_CONV_32_32(efsctuf); +GEN_SPEFPUOP_CONV_32_32(efsctsf); +GEN_SPEFPUOP_CONV_32_32(efsctuiz); +GEN_SPEFPUOP_CONV_32_32(efsctsiz); +GEN_SPEFPUOP_CONV_32_64(efscfd); + +/* Comparison */ +GEN_SPEFPUOP_COMP_32(efscmpgt); +GEN_SPEFPUOP_COMP_32(efscmplt); +GEN_SPEFPUOP_COMP_32(efscmpeq); +GEN_SPEFPUOP_COMP_32(efststgt); +GEN_SPEFPUOP_COMP_32(efststlt); +GEN_SPEFPUOP_COMP_32(efststeq); + +/* Opcodes definitions */ +GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // +GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); // +GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); // +GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); // +GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // +GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); // +GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // +GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); // +GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); // +GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); // + +/* Double precision floating-point operations */ +/* Arithmetic */ +GEN_SPEFPUOP_ARITH2_64_64(efdadd); +GEN_SPEFPUOP_ARITH2_64_64(efdsub); +GEN_SPEFPUOP_ARITH2_64_64(efdmul); +GEN_SPEFPUOP_ARITH2_64_64(efddiv); +static inline void gen_efdabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_andi_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], + ~0x80000000); +} + +static inline void gen_efdnabs(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_ori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], + 0x80000000); +} + +static inline void gen_efdneg(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (unlikely(!ctx->spe_enabled)) { + gen_exception(ctx, POWERPC_EXCP_SPEU); + return; + } + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); + tcg_gen_xori_tl(tcg_ctx, cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], + 0x80000000); +} + +/* Conversion */ +GEN_SPEFPUOP_CONV_64_32(efdcfui); +GEN_SPEFPUOP_CONV_64_32(efdcfsi); +GEN_SPEFPUOP_CONV_64_32(efdcfuf); +GEN_SPEFPUOP_CONV_64_32(efdcfsf); +GEN_SPEFPUOP_CONV_32_64(efdctui); +GEN_SPEFPUOP_CONV_32_64(efdctsi); +GEN_SPEFPUOP_CONV_32_64(efdctuf); +GEN_SPEFPUOP_CONV_32_64(efdctsf); +GEN_SPEFPUOP_CONV_32_64(efdctuiz); +GEN_SPEFPUOP_CONV_32_64(efdctsiz); +GEN_SPEFPUOP_CONV_64_32(efdcfs); +GEN_SPEFPUOP_CONV_64_64(efdcfuid); +GEN_SPEFPUOP_CONV_64_64(efdcfsid); +GEN_SPEFPUOP_CONV_64_64(efdctuidz); +GEN_SPEFPUOP_CONV_64_64(efdctsidz); + +/* Comparison */ +GEN_SPEFPUOP_COMP_64(efdcmpgt); +GEN_SPEFPUOP_COMP_64(efdcmplt); +GEN_SPEFPUOP_COMP_64(efdcmpeq); +GEN_SPEFPUOP_COMP_64(efdtstgt); +GEN_SPEFPUOP_COMP_64(efdtstlt); +GEN_SPEFPUOP_COMP_64(efdtsteq); + +/* Opcodes definitions */ +GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); // +GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // +GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE); // +GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE); // +GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); // +GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // +GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); // +GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE); // +GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // +GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // +GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // +GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); // +GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); // +GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); // +GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); // +GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE); // + +#undef GEN_SPE +#undef GEN_SPEOP_LDST diff --git a/qemu/target/ppc/translate/spe-ops.inc.c b/qemu/target/ppc/translate/spe-ops.inc.c new file mode 100644 index 00000000..7efe8b87 --- /dev/null +++ b/qemu/target/ppc/translate/spe-ops.inc.c @@ -0,0 +1,105 @@ +GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE), +GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE), +GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE), +GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE), + +#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \ + GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, PPC_NONE) +GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE), +GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE), +GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE), +GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE), +GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE), +GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE), +GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE), +GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE), +GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE), +GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE), +GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE), +GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE), +GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE), + +GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE), +GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE), +GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE), +GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE), +GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE), +GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE), +GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), +GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), +GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE), +GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE), + +GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE), +GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE), +GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE), +GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE), +GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE), +GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE), +GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), +GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE), +GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE), +GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE), + +GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE), +GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), +GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE), +GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE), +GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE), +GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), +GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE), +GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE), +GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), +GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), +GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), +GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE), +GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE), +GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE), +GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE), +GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE), + +#define GEN_SPEOP_LDST(name, opc2, sh) \ +GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE) +GEN_SPEOP_LDST(evldd, 0x00, 3), +GEN_SPEOP_LDST(evldw, 0x01, 3), +GEN_SPEOP_LDST(evldh, 0x02, 3), +GEN_SPEOP_LDST(evlhhesplat, 0x04, 1), +GEN_SPEOP_LDST(evlhhousplat, 0x06, 1), +GEN_SPEOP_LDST(evlhhossplat, 0x07, 1), +GEN_SPEOP_LDST(evlwhe, 0x08, 2), +GEN_SPEOP_LDST(evlwhou, 0x0A, 2), +GEN_SPEOP_LDST(evlwhos, 0x0B, 2), +GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2), +GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2), + +GEN_SPEOP_LDST(evstdd, 0x10, 3), +GEN_SPEOP_LDST(evstdw, 0x11, 3), +GEN_SPEOP_LDST(evstdh, 0x12, 3), +GEN_SPEOP_LDST(evstwhe, 0x18, 2), +GEN_SPEOP_LDST(evstwho, 0x1A, 2), +GEN_SPEOP_LDST(evstwwe, 0x1C, 2), +GEN_SPEOP_LDST(evstwwo, 0x1E, 2), diff --git a/qemu/target/ppc/translate/vmx-impl.inc.c b/qemu/target/ppc/translate/vmx-impl.inc.c new file mode 100644 index 00000000..9d4211dd --- /dev/null +++ b/qemu/target/ppc/translate/vmx-impl.inc.c @@ -0,0 +1,1606 @@ +/* + * translate/vmx-impl.c + * + * Altivec/VMX translation + */ + +/*** Altivec vector extension ***/ +/* Altivec registers moves */ + +static inline TCGv_ptr gen_avr_ptr(TCGContext *tcg_ctx, int reg) +{ + TCGv_ptr r = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, r, tcg_ctx->cpu_env, avr_full_offset(reg)); + return r; +} + +#define GEN_VR_LDX(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 avr; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + avr = tcg_temp_new_i64(tcg_ctx); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + tcg_gen_andi_tl(tcg_ctx, EA, EA, ~0xf); \ + /* \ + * We only need to swap high and low halves. gen_qemu_ld64_i64 \ + * does necessary 64-bit byteswap already. \ + */ \ + if (ctx->le_mode) { \ + gen_qemu_ld64_i64(ctx, avr, EA); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + gen_qemu_ld64_i64(ctx, avr, EA); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); \ + } else { \ + gen_qemu_ld64_i64(ctx, avr, EA); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + gen_qemu_ld64_i64(ctx, avr, EA); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ + } \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, avr); \ +} + +#define GEN_VR_STX(name, opc2, opc3) \ +static void gen_st##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 avr; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + avr = tcg_temp_new_i64(tcg_ctx); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + tcg_gen_andi_tl(tcg_ctx, EA, EA, ~0xf); \ + /* \ + * We only need to swap high and low halves. gen_qemu_st64_i64 \ + * does necessary 64-bit byteswap already. \ + */ \ + if (ctx->le_mode) { \ + get_avr64(tcg_ctx, avr, rD(ctx->opcode), false); \ + gen_qemu_st64_i64(ctx, avr, EA); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + get_avr64(tcg_ctx, avr, rD(ctx->opcode), true); \ + gen_qemu_st64_i64(ctx, avr, EA); \ + } else { \ + get_avr64(tcg_ctx, avr, rD(ctx->opcode), true); \ + gen_qemu_st64_i64(ctx, avr, EA); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + get_avr64(tcg_ctx, avr, rD(ctx->opcode), false); \ + gen_qemu_st64_i64(ctx, avr, EA); \ + } \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, avr); \ +} + +#define GEN_VR_LVE(name, opc2, opc3, size) \ +static void gen_lve##name(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_ptr rs; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + if (size > 1) { \ + tcg_gen_andi_tl(tcg_ctx, EA, EA, ~(size - 1)); \ + } \ + rs = gen_avr_ptr(tcg_ctx, rS(ctx->opcode)); \ + gen_helper_lve##name(tcg_ctx, tcg_ctx->cpu_env, rs, EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_ptr(tcg_ctx, rs); \ + } + +#define GEN_VR_STVE(name, opc2, opc3, size) \ +static void gen_stve##name(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_ptr rs; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + if (size > 1) { \ + tcg_gen_andi_tl(tcg_ctx, EA, EA, ~(size - 1)); \ + } \ + rs = gen_avr_ptr(tcg_ctx, rS(ctx->opcode)); \ + gen_helper_stve##name(tcg_ctx, tcg_ctx->cpu_env, rs, EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_ptr(tcg_ctx, rs); \ + } + +GEN_VR_LDX(lvx, 0x07, 0x03); +/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ +GEN_VR_LDX(lvxl, 0x07, 0x0B); + +GEN_VR_LVE(bx, 0x07, 0x00, 1); +GEN_VR_LVE(hx, 0x07, 0x01, 2); +GEN_VR_LVE(wx, 0x07, 0x02, 4); + +GEN_VR_STX(svx, 0x07, 0x07); +/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ +GEN_VR_STX(svxl, 0x07, 0x0F); + +GEN_VR_STVE(bx, 0x07, 0x04, 1); +GEN_VR_STVE(hx, 0x07, 0x05, 2); +GEN_VR_STVE(wx, 0x07, 0x06, 4); + +static void gen_mfvscr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t; + TCGv_i64 avr; + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + avr = tcg_temp_new_i64(tcg_ctx); + tcg_gen_movi_i64(tcg_ctx, avr, 0); + set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); + t = tcg_temp_new_i32(tcg_ctx); + gen_helper_mfvscr(tcg_ctx, t, tcg_ctx->cpu_env); + tcg_gen_extu_i32_i64(tcg_ctx, avr, t); + set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); + tcg_temp_free_i32(tcg_ctx, t); + tcg_temp_free_i64(tcg_ctx, avr); +} + +static void gen_mtvscr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 val; + int bofs; + + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + + val = tcg_temp_new_i32(tcg_ctx); + bofs = avr_full_offset(rB(ctx->opcode)); +#ifdef HOST_WORDS_BIGENDIAN + bofs += 3 * 4; +#endif + + tcg_gen_ld_i32(tcg_ctx, val, tcg_ctx->cpu_env, bofs); + gen_helper_mtvscr(tcg_ctx, tcg_ctx->cpu_env, val); + tcg_temp_free_i32(tcg_ctx, val); +} + +#define GEN_VX_VMUL10(name, add_cin, ret_carry) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; \ + TCGv_i64 t1; \ + TCGv_i64 t2; \ + TCGv_i64 avr; \ + TCGv_i64 ten, z; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + t2 = tcg_temp_new_i64(tcg_ctx); \ + avr = tcg_temp_new_i64(tcg_ctx); \ + ten = tcg_const_i64(tcg_ctx, 10); \ + z = tcg_const_i64(tcg_ctx, 0); \ + \ + if (add_cin) { \ + get_avr64(tcg_ctx, avr, rA(ctx->opcode), false); \ + tcg_gen_mulu2_i64(tcg_ctx, t0, t1, avr, ten); \ + get_avr64(tcg_ctx, avr, rB(ctx->opcode), false); \ + tcg_gen_andi_i64(tcg_ctx, t2, avr, 0xF); \ + tcg_gen_add2_i64(tcg_ctx, avr, t2, t0, t1, t2, z); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ + } else { \ + get_avr64(tcg_ctx, avr, rA(ctx->opcode), false); \ + tcg_gen_mulu2_i64(tcg_ctx, avr, t2, avr, ten); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ + } \ + \ + if (ret_carry) { \ + get_avr64(tcg_ctx, avr, rA(ctx->opcode), true); \ + tcg_gen_mulu2_i64(tcg_ctx, t0, t1, avr, ten); \ + tcg_gen_add2_i64(tcg_ctx, t0, avr, t0, t1, t2, z); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, false); \ + set_avr64(tcg_ctx, rD(ctx->opcode), z, true); \ + } else { \ + get_avr64(tcg_ctx, avr, rA(ctx->opcode), true); \ + tcg_gen_mul_i64(tcg_ctx, t0, avr, ten); \ + tcg_gen_add_i64(tcg_ctx, avr, t0, t2); \ + set_avr64(tcg_ctx, rD(ctx->opcode), avr, true); \ + } \ + \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ + tcg_temp_free_i64(tcg_ctx, t2); \ + tcg_temp_free_i64(tcg_ctx, avr); \ + tcg_temp_free_i64(tcg_ctx, ten); \ + tcg_temp_free_i64(tcg_ctx, z); \ +} \ + +GEN_VX_VMUL10(vmul10uq, 0, 0); +GEN_VX_VMUL10(vmul10euq, 1, 0); +GEN_VX_VMUL10(vmul10cuq, 0, 1); +GEN_VX_VMUL10(vmul10ecuq, 1, 1); + +#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + \ + tcg_op(tcg_ctx, vece, \ + avr_full_offset(rD(ctx->opcode)), \ + avr_full_offset(rA(ctx->opcode)), \ + avr_full_offset(rB(ctx->opcode)), \ + 16, 16); \ +} + +/* Logical operations */ +GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16); +GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17); +GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18); +GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19); +GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20); +GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26); +GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22); +GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21); + +#define GEN_VXFORM(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rb, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, rd, ra, rb); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ +} + +#define GEN_VXFORM_TRANS(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + trans_##name(ctx); \ +} + +#define GEN_VXFORM_ENV(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rb, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ +} + +#define GEN_VXFORM3(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rb, rc, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, rd, ra, rb, rc); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rc); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ +} + +/* + * Support for Altivec instruction pairs that use bit 31 (Rc) as + * an opcode bit. In general, these pairs come from different + * versions of the ISA, so we must also support a pair of flags for + * each instruction. + */ +#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ +static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ +{ \ + if ((Rc(ctx->opcode) == 0) && \ + ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ + gen_##name0(ctx); \ + } else if ((Rc(ctx->opcode) == 1) && \ + ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ + gen_##name1(ctx); \ + } else { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + } \ +} + +/* + * We use this macro if one instruction is realized with direct + * translation, and second one with helper. + */ +#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\ +static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ +{ \ + if ((Rc(ctx->opcode) == 0) && \ + ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + trans_##name0(ctx); \ + } else if ((Rc(ctx->opcode) == 1) && \ + ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ + gen_##name1(ctx); \ + } else { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + } \ +} + +/* Adds support to provide invalid mask */ +#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \ + name1, flg1, flg2_1, inval1) \ +static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ +{ \ + if ((Rc(ctx->opcode) == 0) && \ + ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \ + !(ctx->opcode & inval0)) { \ + gen_##name0(ctx); \ + } else if ((Rc(ctx->opcode) == 1) && \ + ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \ + !(ctx->opcode & inval1)) { \ + gen_##name1(ctx); \ + } else { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + } \ +} + +#define GEN_VXFORM_HETRO(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rb; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ +} + +GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0); +GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \ + vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800) +GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1); +GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \ + vmul10ecuq, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2); +GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3); +GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16); +GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17); +GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18); +GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19); +GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0); +GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1); +GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2); +GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3); +GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4); +GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5); +GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6); +GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7); +GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8); +GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9); +GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10); +GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11); +GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12); +GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13); +GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14); +GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15); +GEN_VXFORM(vavgub, 1, 16); +GEN_VXFORM(vabsdub, 1, 16); +GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \ + vabsdub, PPC_NONE, PPC2_ISA300) +GEN_VXFORM(vavguh, 1, 17); +GEN_VXFORM(vabsduh, 1, 17); +GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \ + vabsduh, PPC_NONE, PPC2_ISA300) +GEN_VXFORM(vavguw, 1, 18); +GEN_VXFORM(vabsduw, 1, 18); +GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \ + vabsduw, PPC_NONE, PPC2_ISA300) +GEN_VXFORM(vavgsb, 1, 20); +GEN_VXFORM(vavgsh, 1, 21); +GEN_VXFORM(vavgsw, 1, 22); +GEN_VXFORM(vmrghb, 6, 0); +GEN_VXFORM(vmrghh, 6, 1); +GEN_VXFORM(vmrghw, 6, 2); +GEN_VXFORM(vmrglb, 6, 4); +GEN_VXFORM(vmrglh, 6, 5); +GEN_VXFORM(vmrglw, 6, 6); + +static void trans_vmrgew(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + int VA = rA(ctx->opcode); + int VB = rB(ctx->opcode); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); + + get_avr64(tcg_ctx, avr, VB, true); + tcg_gen_shri_i64(tcg_ctx, tmp, avr, 32); + get_avr64(tcg_ctx, avr, VA, true); + tcg_gen_deposit_i64(tcg_ctx, avr, avr, tmp, 0, 32); + set_avr64(tcg_ctx, VT, avr, true); + + get_avr64(tcg_ctx, avr, VB, false); + tcg_gen_shri_i64(tcg_ctx, tmp, avr, 32); + get_avr64(tcg_ctx, avr, VA, false); + tcg_gen_deposit_i64(tcg_ctx, avr, avr, tmp, 0, 32); + set_avr64(tcg_ctx, VT, avr, false); + + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, avr); +} + +static void trans_vmrgow(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + int VA = rA(ctx->opcode); + int VB = rB(ctx->opcode); + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); + + get_avr64(tcg_ctx, t0, VB, true); + get_avr64(tcg_ctx, t1, VA, true); + tcg_gen_deposit_i64(tcg_ctx, avr, t0, t1, 32, 32); + set_avr64(tcg_ctx, VT, avr, true); + + get_avr64(tcg_ctx, t0, VB, false); + get_avr64(tcg_ctx, t1, VA, false); + tcg_gen_deposit_i64(tcg_ctx, avr, t0, t1, 32, 32); + set_avr64(tcg_ctx, VT, avr, false); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, avr); +} + +/* + * lvsl VRT,RA,RB - Load Vector for Shift Left + * + * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28-31]. + * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. + * Bytes sh:sh+15 of X are placed into vD. + */ +static void trans_lvsl(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); + TCGv EA = tcg_temp_new(tcg_ctx); + + /* Get sh(from description) by anding EA with 0xf. */ + gen_addr_reg_index(ctx, EA); + tcg_gen_extu_tl_i64(tcg_ctx, sh, EA); + tcg_gen_andi_i64(tcg_ctx, sh, sh, 0xfULL); + + /* + * Create bytes sh:sh+7 of X(from description) and place them in + * higher doubleword of vD. + */ + tcg_gen_muli_i64(tcg_ctx, sh, sh, 0x0101010101010101ULL); + tcg_gen_addi_i64(tcg_ctx, result, sh, 0x0001020304050607ull); + set_avr64(tcg_ctx, VT, result, true); + /* + * Create bytes sh+8:sh+15 of X(from description) and place them in + * lower doubleword of vD. + */ + tcg_gen_addi_i64(tcg_ctx, result, sh, 0x08090a0b0c0d0e0fULL); + set_avr64(tcg_ctx, VT, result, false); + + tcg_temp_free_i64(tcg_ctx, result); + tcg_temp_free_i64(tcg_ctx, sh); + tcg_temp_free(tcg_ctx, EA); +} + +/* + * lvsr VRT,RA,RB - Load Vector for Shift Right + * + * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28-31]. + * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. + * Bytes (16-sh):(31-sh) of X are placed into vD. + */ +static void trans_lvsr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); + TCGv EA = tcg_temp_new(tcg_ctx); + + + /* Get sh(from description) by anding EA with 0xf. */ + gen_addr_reg_index(ctx, EA); + tcg_gen_extu_tl_i64(tcg_ctx, sh, EA); + tcg_gen_andi_i64(tcg_ctx, sh, sh, 0xfULL); + + /* + * Create bytes (16-sh):(23-sh) of X(from description) and place them in + * higher doubleword of vD. + */ + tcg_gen_muli_i64(tcg_ctx, sh, sh, 0x0101010101010101ULL); + tcg_gen_subfi_i64(tcg_ctx, result, 0x1011121314151617ULL, sh); + set_avr64(tcg_ctx, VT, result, true); + /* + * Create bytes (24-sh):(32-sh) of X(from description) and place them in + * lower doubleword of vD. + */ + tcg_gen_subfi_i64(tcg_ctx, result, 0x18191a1b1c1d1e1fULL, sh); + set_avr64(tcg_ctx, VT, result, false); + + tcg_temp_free_i64(tcg_ctx, result); + tcg_temp_free_i64(tcg_ctx, sh); + tcg_temp_free(tcg_ctx, EA); +} + +/* + * vsl VRT,VRA,VRB - Vector Shift Left + * + * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB. + * Lowest 3 bits in each byte element of register vB must be identical or + * result is undefined. + */ +static void trans_vsl(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + int VA = rA(ctx->opcode); + int VB = rB(ctx->opcode); + TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 carry = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* Place bits 125-127 of vB in 'sh'. */ + get_avr64(tcg_ctx, avr, VB, false); + tcg_gen_andi_i64(tcg_ctx, sh, avr, 0x07ULL); + + /* + * Save highest 'sh' bits of lower doubleword element of vA in variable + * 'carry' and perform shift on lower doubleword. + */ + get_avr64(tcg_ctx, avr, VA, false); + tcg_gen_subfi_i64(tcg_ctx, tmp, 32, sh); + tcg_gen_shri_i64(tcg_ctx, carry, avr, 32); + tcg_gen_shr_i64(tcg_ctx, carry, carry, tmp); + tcg_gen_shl_i64(tcg_ctx, avr, avr, sh); + set_avr64(tcg_ctx, VT, avr, false); + + /* + * Perform shift on higher doubleword element of vA and replace lowest + * 'sh' bits with 'carry'. + */ + get_avr64(tcg_ctx, avr, VA, true); + tcg_gen_shl_i64(tcg_ctx, avr, avr, sh); + tcg_gen_or_i64(tcg_ctx, avr, avr, carry); + set_avr64(tcg_ctx, VT, avr, true); + + tcg_temp_free_i64(tcg_ctx, avr); + tcg_temp_free_i64(tcg_ctx, sh); + tcg_temp_free_i64(tcg_ctx, carry); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* + * vsr VRT,VRA,VRB - Vector Shift Right + * + * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB. + * Lowest 3 bits in each byte element of register vB must be identical or + * result is undefined. + */ +static void trans_vsr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + int VA = rA(ctx->opcode); + int VB = rB(ctx->opcode); + TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 sh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 carry = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* Place bits 125-127 of vB in 'sh'. */ + get_avr64(tcg_ctx, avr, VB, false); + tcg_gen_andi_i64(tcg_ctx, sh, avr, 0x07ULL); + + /* + * Save lowest 'sh' bits of higher doubleword element of vA in variable + * 'carry' and perform shift on higher doubleword. + */ + get_avr64(tcg_ctx, avr, VA, true); + tcg_gen_subfi_i64(tcg_ctx, tmp, 32, sh); + tcg_gen_shli_i64(tcg_ctx, carry, avr, 32); + tcg_gen_shl_i64(tcg_ctx, carry, carry, tmp); + tcg_gen_shr_i64(tcg_ctx, avr, avr, sh); + set_avr64(tcg_ctx, VT, avr, true); + /* + * Perform shift on lower doubleword element of vA and replace highest + * 'sh' bits with 'carry'. + */ + get_avr64(tcg_ctx, avr, VA, false); + tcg_gen_shr_i64(tcg_ctx, avr, avr, sh); + tcg_gen_or_i64(tcg_ctx, avr, avr, carry); + set_avr64(tcg_ctx, VT, avr, false); + + tcg_temp_free_i64(tcg_ctx, avr); + tcg_temp_free_i64(tcg_ctx, sh); + tcg_temp_free_i64(tcg_ctx, carry); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* + * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword + * + * All ith bits (i in range 1 to 8) of each byte of doubleword element in source + * register are concatenated and placed into ith byte of appropriate doubleword + * element in destination register. + * + * Following solution is done for both doubleword elements of source register + * in parallel, in order to reduce the number of instructions needed(that's why + * arrays are used): + * First, both doubleword elements of source register vB are placed in + * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for + * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of + * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with + * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables + * have to be shifted right for 7 and 8 places, respectively, in order to get + * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so + * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask... + * After first 8 iteration(first loop), all the first bits are in their final + * places, all second bits but second bit from eight byte are in their places... + * only 1 eight bit from eight byte is in it's place). In second loop we do all + * operations symmetrically, in order to get other half of bits in their final + * spots. Results for first and second doubleword elements are saved in + * result[0] and result[1] respectively. In the end those results are saved in + * appropriate doubleword element of destination register vD. + */ +static void trans_vgbbd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + int VB = rB(ctx->opcode); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + uint64_t mask = 0x8040201008040201ULL; + int i, j; + + TCGv_i64 result[2]; + result[0] = tcg_temp_new_i64(tcg_ctx); + result[1] = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 avr[2]; + avr[0] = tcg_temp_new_i64(tcg_ctx); + avr[1] = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_mask = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_movi_i64(tcg_ctx, tcg_mask, mask); + for (j = 0; j < 2; j++) { + get_avr64(tcg_ctx, avr[j], VB, j); + tcg_gen_and_i64(tcg_ctx, result[j], avr[j], tcg_mask); + } + for (i = 1; i < 8; i++) { + tcg_gen_movi_i64(tcg_ctx, tcg_mask, mask >> (i * 8)); + for (j = 0; j < 2; j++) { + tcg_gen_shri_i64(tcg_ctx, tmp, avr[j], i * 7); + tcg_gen_and_i64(tcg_ctx, tmp, tmp, tcg_mask); + tcg_gen_or_i64(tcg_ctx, result[j], result[j], tmp); + } + } + for (i = 1; i < 8; i++) { + tcg_gen_movi_i64(tcg_ctx, tcg_mask, mask << (i * 8)); + for (j = 0; j < 2; j++) { + tcg_gen_shli_i64(tcg_ctx, tmp, avr[j], i * 7); + tcg_gen_and_i64(tcg_ctx, tmp, tmp, tcg_mask); + tcg_gen_or_i64(tcg_ctx, result[j], result[j], tmp); + } + } + for (j = 0; j < 2; j++) { + set_avr64(tcg_ctx, VT, result[j], j); + } + + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, tcg_mask); + tcg_temp_free_i64(tcg_ctx, result[0]); + tcg_temp_free_i64(tcg_ctx, result[1]); + tcg_temp_free_i64(tcg_ctx, avr[0]); + tcg_temp_free_i64(tcg_ctx, avr[1]); +} + +/* + * vclzw VRT,VRB - Vector Count Leading Zeros Word + * + * Counting the number of leading zero bits of each word element in source + * register and placing result in appropriate word element of destination + * register. + */ +static void trans_vclzw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + int VB = rB(ctx->opcode); + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + int i; + + /* Perform count for every word element using tcg_gen_clzi_i32. */ + for (i = 0; i < 4; i++) { + tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, + offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4); + tcg_gen_clzi_i32(tcg_ctx, tmp, tmp, 32); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, + offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4); + } + + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* + * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword + * + * Counting the number of leading zero bits of each doubleword element in source + * register and placing result in appropriate doubleword element of destination + * register. + */ +static void trans_vclzd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int VT = rD(ctx->opcode); + int VB = rB(ctx->opcode); + TCGv_i64 avr = tcg_temp_new_i64(tcg_ctx); + + /* high doubleword */ + get_avr64(tcg_ctx, avr, VB, true); + tcg_gen_clzi_i64(tcg_ctx, avr, avr, 64); + set_avr64(tcg_ctx, VT, avr, true); + + /* low doubleword */ + get_avr64(tcg_ctx, avr, VB, false); + tcg_gen_clzi_i64(tcg_ctx, avr, avr, 64); + set_avr64(tcg_ctx, VT, avr, false); + + tcg_temp_free_i64(tcg_ctx, avr); +} + +GEN_VXFORM(vmuloub, 4, 0); +GEN_VXFORM(vmulouh, 4, 1); +GEN_VXFORM(vmulouw, 4, 2); +GEN_VXFORM(vmuluwm, 4, 2); +GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE, + vmuluwm, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM(vmulosb, 4, 4); +GEN_VXFORM(vmulosh, 4, 5); +GEN_VXFORM(vmulosw, 4, 6); +GEN_VXFORM(vmuleub, 4, 8); +GEN_VXFORM(vmuleuh, 4, 9); +GEN_VXFORM(vmuleuw, 4, 10); +GEN_VXFORM(vmulesb, 4, 12); +GEN_VXFORM(vmulesh, 4, 13); +GEN_VXFORM(vmulesw, 4, 14); +GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4); +GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5); +GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6); +GEN_VXFORM(vrlwnm, 2, 6); +GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \ + vrlwnm, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23); +GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8); +GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9); +GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10); +GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27); +GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12); +GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13); +GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14); +GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15); +GEN_VXFORM(vsrv, 2, 28); +GEN_VXFORM(vslv, 2, 29); +GEN_VXFORM(vslo, 6, 16); +GEN_VXFORM(vsro, 6, 17); +GEN_VXFORM(vaddcuw, 0, 6); +GEN_VXFORM(vsubcuw, 0, 22); + +#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \ +static void glue(glue(gen_, NAME), _vec)(TCGContext *tcg_ctx, unsigned vece, TCGv_vec t, \ + TCGv_vec sat, TCGv_vec a, \ + TCGv_vec b) \ +{ \ + TCGv_vec x = tcg_temp_new_vec_matching(tcg_ctx, t); \ + glue(glue(tcg_gen_, NORM), _vec)(tcg_ctx, VECE, x, a, b); \ + glue(glue(tcg_gen_, SAT), _vec)(tcg_ctx, VECE, t, a, b); \ + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_NE, VECE, x, x, t); \ + tcg_gen_or_vec(tcg_ctx, VECE, sat, sat, x); \ + tcg_temp_free_vec(tcg_ctx, x); \ +} \ +static void glue(gen_, NAME)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + static const TCGOpcode vecop_list[] = { \ + glue(glue(INDEX_op_, NORM), _vec), \ + glue(glue(INDEX_op_, SAT), _vec), \ + INDEX_op_cmp_vec, 0 \ + }; \ + static const GVecGen4 g = { \ + .fniv = glue(glue(gen_, NAME), _vec), \ + .fno = glue(gen_helper_, NAME), \ + .opt_opc = vecop_list, \ + .write_aofs = true, \ + .vece = VECE, \ + }; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + tcg_gen_gvec_4(tcg_ctx, avr_full_offset(rD(ctx->opcode)), \ + offsetof(CPUPPCState, vscr_sat), \ + avr_full_offset(rA(ctx->opcode)), \ + avr_full_offset(rB(ctx->opcode)), \ + 16, 16, &g); \ +} + +GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8); +GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \ + vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800) +GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9); +GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \ + vmul10euq, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10); +GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12); +GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13); +GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14); +GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24); +GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25); +GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); +GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); +GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); +GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); +GEN_VXFORM(vadduqm, 0, 4); +GEN_VXFORM(vaddcuq, 0, 5); +GEN_VXFORM3(vaddeuqm, 30, 0); +GEN_VXFORM3(vaddecuq, 30, 0); +GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ + vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM(vsubuqm, 0, 20); +GEN_VXFORM(vsubcuq, 0, 21); +GEN_VXFORM3(vsubeuqm, 31, 0); +GEN_VXFORM3(vsubecuq, 31, 0); +GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ + vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM(vrlb, 2, 0); +GEN_VXFORM(vrlh, 2, 1); +GEN_VXFORM(vrlw, 2, 2); +GEN_VXFORM(vrlwmi, 2, 2); +GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \ + vrlwmi, PPC_NONE, PPC2_ISA300) +GEN_VXFORM(vrld, 2, 3); +GEN_VXFORM(vrldmi, 2, 3); +GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \ + vrldmi, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_TRANS(vsl, 2, 7); +GEN_VXFORM(vrldnm, 2, 7); +GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \ + vrldnm, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_TRANS(vsr, 2, 11); +GEN_VXFORM_ENV(vpkuhum, 7, 0); +GEN_VXFORM_ENV(vpkuwum, 7, 1); +GEN_VXFORM_ENV(vpkudum, 7, 17); +GEN_VXFORM_ENV(vpkuhus, 7, 2); +GEN_VXFORM_ENV(vpkuwus, 7, 3); +GEN_VXFORM_ENV(vpkudus, 7, 19); +GEN_VXFORM_ENV(vpkshus, 7, 4); +GEN_VXFORM_ENV(vpkswus, 7, 5); +GEN_VXFORM_ENV(vpksdus, 7, 21); +GEN_VXFORM_ENV(vpkshss, 7, 6); +GEN_VXFORM_ENV(vpkswss, 7, 7); +GEN_VXFORM_ENV(vpksdss, 7, 23); +GEN_VXFORM(vpkpx, 7, 12); +GEN_VXFORM_ENV(vsum4ubs, 4, 24); +GEN_VXFORM_ENV(vsum4sbs, 4, 28); +GEN_VXFORM_ENV(vsum4shs, 4, 25); +GEN_VXFORM_ENV(vsum2sws, 4, 26); +GEN_VXFORM_ENV(vsumsws, 4, 30); +GEN_VXFORM_ENV(vaddfp, 5, 0); +GEN_VXFORM_ENV(vsubfp, 5, 1); +GEN_VXFORM_ENV(vmaxfp, 5, 16); +GEN_VXFORM_ENV(vminfp, 5, 17); +GEN_VXFORM_HETRO(vextublx, 6, 24) +GEN_VXFORM_HETRO(vextuhlx, 6, 25) +GEN_VXFORM_HETRO(vextuwlx, 6, 26) +GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207, + vextuwlx, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_HETRO(vextubrx, 6, 28) +GEN_VXFORM_HETRO(vextuhrx, 6, 29) +GEN_VXFORM_HETRO(vextuwrx, 6, 30) +GEN_VXFORM_TRANS(lvsl, 6, 31) +GEN_VXFORM_TRANS(lvsr, 6, 32) +GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, + vextuwrx, PPC_NONE, PPC2_ISA300) + +#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rb, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##opname(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + } + +#define GEN_VXRFORM(name, opc2, opc3) \ + GEN_VXRFORM1(name, name, #name, opc2, opc3) \ + GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) + +/* + * Support for Altivec instructions that use bit 31 (Rc) as an opcode + * bit but also use bit 21 as an actual Rc bit. In general, thse pairs + * come from different versions of the ISA, so we must also support a + * pair of flags for each instruction. + */ +#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \ +static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ +{ \ + if ((Rc(ctx->opcode) == 0) && \ + ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \ + if (Rc21(ctx->opcode) == 0) { \ + gen_##name0(ctx); \ + } else { \ + gen_##name0##_(ctx); \ + } \ + } else if ((Rc(ctx->opcode) == 1) && \ + ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \ + if (Rc21(ctx->opcode) == 0) { \ + gen_##name1(ctx); \ + } else { \ + gen_##name1##_(ctx); \ + } \ + } else { \ + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ + } \ +} + +GEN_VXRFORM(vcmpequb, 3, 0) +GEN_VXRFORM(vcmpequh, 3, 1) +GEN_VXRFORM(vcmpequw, 3, 2) +GEN_VXRFORM(vcmpequd, 3, 3) +GEN_VXRFORM(vcmpnezb, 3, 4) +GEN_VXRFORM(vcmpnezh, 3, 5) +GEN_VXRFORM(vcmpnezw, 3, 6) +GEN_VXRFORM(vcmpgtsb, 3, 12) +GEN_VXRFORM(vcmpgtsh, 3, 13) +GEN_VXRFORM(vcmpgtsw, 3, 14) +GEN_VXRFORM(vcmpgtsd, 3, 15) +GEN_VXRFORM(vcmpgtub, 3, 8) +GEN_VXRFORM(vcmpgtuh, 3, 9) +GEN_VXRFORM(vcmpgtuw, 3, 10) +GEN_VXRFORM(vcmpgtud, 3, 11) +GEN_VXRFORM(vcmpeqfp, 3, 3) +GEN_VXRFORM(vcmpgefp, 3, 7) +GEN_VXRFORM(vcmpgtfp, 3, 11) +GEN_VXRFORM(vcmpbfp, 3, 15) +GEN_VXRFORM(vcmpneb, 3, 0) +GEN_VXRFORM(vcmpneh, 3, 1) +GEN_VXRFORM(vcmpnew, 3, 2) + +GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \ + vcmpneb, PPC_NONE, PPC2_ISA300) +GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \ + vcmpneh, PPC_NONE, PPC2_ISA300) +GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \ + vcmpnew, PPC_NONE, PPC2_ISA300) +GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \ + vcmpequd, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \ + vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \ + vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207) + +#define GEN_VXFORM_DUPI(name, tcg_op, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + int simm; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + simm = SIMM5(ctx->opcode); \ + tcg_op(tcg_ctx, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);\ + } + +GEN_VXFORM_DUPI(vspltisb, tcg_gen_gvec_dup8i, 6, 12); +GEN_VXFORM_DUPI(vspltish, tcg_gen_gvec_dup16i, 6, 13); +GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14); + +#define GEN_VXFORM_NOA(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rb, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, rd, rb); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + } + +#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rb, rd; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, rb); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + } + +#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rb, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, rd, rb); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + } + +#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rb; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, cpu_gpr[rD(ctx->opcode)], rb); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + } +GEN_VXFORM_NOA(vupkhsb, 7, 8); +GEN_VXFORM_NOA(vupkhsh, 7, 9); +GEN_VXFORM_NOA(vupkhsw, 7, 25); +GEN_VXFORM_NOA(vupklsb, 7, 10); +GEN_VXFORM_NOA(vupklsh, 7, 11); +GEN_VXFORM_NOA(vupklsw, 7, 27); +GEN_VXFORM_NOA(vupkhpx, 7, 13); +GEN_VXFORM_NOA(vupklpx, 7, 15); +GEN_VXFORM_NOA_ENV(vrefp, 5, 4); +GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5); +GEN_VXFORM_NOA_ENV(vexptefp, 5, 6); +GEN_VXFORM_NOA_ENV(vlogefp, 5, 7); +GEN_VXFORM_NOA_ENV(vrfim, 5, 11); +GEN_VXFORM_NOA_ENV(vrfin, 5, 8); +GEN_VXFORM_NOA_ENV(vrfip, 5, 10); +GEN_VXFORM_NOA_ENV(vrfiz, 5, 9); +GEN_VXFORM_NOA(vprtybw, 1, 24); +GEN_VXFORM_NOA(vprtybd, 1, 24); +GEN_VXFORM_NOA(vprtybq, 1, 24); + +static void gen_vsplt(DisasContext *ctx, int vece) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int uimm, dofs, bofs; + + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + + uimm = UIMM5(ctx->opcode); + bofs = avr_full_offset(rB(ctx->opcode)); + dofs = avr_full_offset(rD(ctx->opcode)); + + /* Experimental testing shows that hardware masks the immediate. */ + bofs += (uimm << vece) & 15; +#ifndef HOST_WORDS_BIGENDIAN + bofs ^= 15; + bofs &= ~((1 << vece) - 1); +#endif + + tcg_gen_gvec_dup_mem(tcg_ctx, vece, dofs, bofs, 16, 16); +} + +#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); } + +#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rb, rd; \ + TCGv_i32 uimm; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + uimm = tcg_const_i32(tcg_ctx, UIMM5(ctx->opcode)); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, rd, rb, uimm); \ + tcg_temp_free_i32(tcg_ctx, uimm); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + } + +#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rb, rd; \ + uint8_t uimm = UIMM4(ctx->opcode); \ + TCGv_i32 t0; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + if (uimm > splat_max) { \ + uimm = 0; \ + } \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + tcg_gen_movi_i32(tcg_ctx, t0, uimm); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, rd, rb, t0); \ + tcg_temp_free_i32(tcg_ctx, t0); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + } + +GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8); +GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9); +GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10); +GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); +GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); +GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); +GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); +GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15); +GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14); +GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12); +GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8); +GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); +GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); +GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); +GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15); +GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE, + vextractub, PPC_NONE, PPC2_ISA300); +GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, + vextractuh, PPC_NONE, PPC2_ISA300); +GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, + vextractuw, PPC_NONE, PPC2_ISA300); +GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE, + vinsertb, PPC_NONE, PPC2_ISA300); +GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE, + vinserth, PPC_NONE, PPC2_ISA300); +GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE, + vinsertw, PPC_NONE, PPC2_ISA300); + +static void gen_vsldoi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_ptr ra, rb, rd; + TCGv_i32 sh; + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); + sh = tcg_const_i32(tcg_ctx, VSH(ctx->opcode)); + gen_helper_vsldoi(tcg_ctx, rd, ra, rb, sh); + tcg_temp_free_ptr(tcg_ctx, ra); + tcg_temp_free_ptr(tcg_ctx, rb); + tcg_temp_free_ptr(tcg_ctx, rd); + tcg_temp_free_i32(tcg_ctx, sh); +} + +#define GEN_VAFORM_PAIRED(name0, name1, opc2) \ +static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rb, rc, rd; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + if (Rc(ctx->opcode)) { \ + gen_helper_##name1(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb, rc); \ + } else { \ + gen_helper_##name0(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb, rc); \ + } \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rc); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + } + +GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16) + +static void gen_vmladduhm(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_ptr ra, rb, rc, rd; + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); + rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); + gen_helper_vmladduhm(tcg_ctx, rd, ra, rb, rc); + tcg_temp_free_ptr(tcg_ctx, ra); + tcg_temp_free_ptr(tcg_ctx, rb); + tcg_temp_free_ptr(tcg_ctx, rc); + tcg_temp_free_ptr(tcg_ctx, rd); +} + +static void gen_vpermr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_ptr ra, rb, rc, rd; + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); + rc = gen_avr_ptr(tcg_ctx, rC(ctx->opcode)); + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); + gen_helper_vpermr(tcg_ctx, tcg_ctx->cpu_env, rd, ra, rb, rc); + tcg_temp_free_ptr(tcg_ctx, ra); + tcg_temp_free_ptr(tcg_ctx, rb); + tcg_temp_free_ptr(tcg_ctx, rc); + tcg_temp_free_ptr(tcg_ctx, rd); +} + +GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) +GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) +GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) +GEN_VAFORM_PAIRED(vsel, vperm, 21) +GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) + +GEN_VXFORM_NOA(vclzb, 1, 28) +GEN_VXFORM_NOA(vclzh, 1, 29) +GEN_VXFORM_TRANS(vclzw, 1, 30) +GEN_VXFORM_TRANS(vclzd, 1, 31) +GEN_VXFORM_NOA_2(vnegw, 1, 24, 6) +GEN_VXFORM_NOA_2(vnegd, 1, 24, 7) +GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16) +GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17) +GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24) +GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25) +GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26) +GEN_VXFORM_NOA_2(vctzb, 1, 24, 28) +GEN_VXFORM_NOA_2(vctzh, 1, 24, 29) +GEN_VXFORM_NOA_2(vctzw, 1, 24, 30) +GEN_VXFORM_NOA_2(vctzd, 1, 24, 31) +GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0) +GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1) +GEN_VXFORM_NOA(vpopcntb, 1, 28) +GEN_VXFORM_NOA(vpopcnth, 1, 29) +GEN_VXFORM_NOA(vpopcntw, 1, 30) +GEN_VXFORM_NOA(vpopcntd, 1, 31) +GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \ + vpopcntb, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \ + vpopcnth, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \ + vpopcntw, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \ + vpopcntd, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM(vbpermd, 6, 23); +GEN_VXFORM(vbpermq, 6, 21); +GEN_VXFORM_TRANS(vgbbd, 6, 20); +GEN_VXFORM(vpmsumb, 4, 16) +GEN_VXFORM(vpmsumh, 4, 17) +GEN_VXFORM(vpmsumw, 4, 18) +GEN_VXFORM(vpmsumd, 4, 19) + +#define GEN_BCD(op) \ +static void gen_##op(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rb, rd; \ + TCGv_i32 ps; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + \ + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + \ + ps = tcg_const_i32(tcg_ctx, (ctx->opcode & 0x200) != 0); \ + \ + gen_helper_##op(tcg_ctx, cpu_crf[6], rd, ra, rb, ps); \ + \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + tcg_temp_free_i32(tcg_ctx, ps); \ +} + +#define GEN_BCD2(op) \ +static void gen_##op(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr rd, rb; \ + TCGv_i32 ps; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + \ + rb = gen_avr_ptr(tcg_ctx, rB(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + \ + ps = tcg_const_i32(tcg_ctx, (ctx->opcode & 0x200) != 0); \ + \ + gen_helper_##op(tcg_ctx, cpu_crf[6], rd, rb, ps); \ + \ + tcg_temp_free_ptr(tcg_ctx, rb); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + tcg_temp_free_i32(tcg_ctx, ps); \ +} + +GEN_BCD(bcdadd) +GEN_BCD(bcdsub) +GEN_BCD2(bcdcfn) +GEN_BCD2(bcdctn) +GEN_BCD2(bcdcfz) +GEN_BCD2(bcdctz) +GEN_BCD2(bcdcfsq) +GEN_BCD2(bcdctsq) +GEN_BCD2(bcdsetsgn) +GEN_BCD(bcdcpsgn); +GEN_BCD(bcds); +GEN_BCD(bcdus); +GEN_BCD(bcdsr); +GEN_BCD(bcdtrunc); +GEN_BCD(bcdutrunc); + +static void gen_xpnd04_1(DisasContext *ctx) +{ + switch (opc4(ctx->opcode)) { + case 0: + gen_bcdctsq(ctx); + break; + case 2: + gen_bcdcfsq(ctx); + break; + case 4: + gen_bcdctz(ctx); + break; + case 5: + gen_bcdctn(ctx); + break; + case 6: + gen_bcdcfz(ctx); + break; + case 7: + gen_bcdcfn(ctx); + break; + case 31: + gen_bcdsetsgn(ctx); + break; + default: + gen_invalid(ctx); + break; + } +} + +static void gen_xpnd04_2(DisasContext *ctx) +{ + switch (opc4(ctx->opcode)) { + case 0: + gen_bcdctsq(ctx); + break; + case 2: + gen_bcdcfsq(ctx); + break; + case 4: + gen_bcdctz(ctx); + break; + case 6: + gen_bcdcfz(ctx); + break; + case 7: + gen_bcdcfn(ctx); + break; + case 31: + gen_bcdsetsgn(ctx); + break; + default: + gen_invalid(ctx); + break; + } +} + + +GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \ + xpnd04_1, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \ + xpnd04_2, PPC_NONE, PPC2_ISA300) + +GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \ + bcdadd, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \ + bcdadd, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \ + bcdsub, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \ + bcdsub, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \ + bcdcpsgn, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \ + bcds, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ + bcdus, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ + bcdtrunc, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ + bcdtrunc, PPC_NONE, PPC2_ISA300) +GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ + bcdutrunc, PPC_NONE, PPC2_ISA300) + + +static void gen_vsbox(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_ptr ra, rd; + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); + gen_helper_vsbox(tcg_ctx, rd, ra); + tcg_temp_free_ptr(tcg_ctx, ra); + tcg_temp_free_ptr(tcg_ctx, rd); +} + +GEN_VXFORM(vcipher, 4, 20) +GEN_VXFORM(vcipherlast, 4, 20) +GEN_VXFORM(vncipher, 4, 21) +GEN_VXFORM(vncipherlast, 4, 21) + +GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207, + vcipherlast, PPC_NONE, PPC2_ALTIVEC_207) +GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207, + vncipherlast, PPC_NONE, PPC2_ALTIVEC_207) + +#define VSHASIGMA(op) \ +static void gen_##op(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr ra, rd; \ + TCGv_i32 st_six; \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + ra = gen_avr_ptr(tcg_ctx, rA(ctx->opcode)); \ + rd = gen_avr_ptr(tcg_ctx, rD(ctx->opcode)); \ + st_six = tcg_const_i32(tcg_ctx, rB(ctx->opcode)); \ + gen_helper_##op(tcg_ctx, rd, ra, st_six); \ + tcg_temp_free_ptr(tcg_ctx, ra); \ + tcg_temp_free_ptr(tcg_ctx, rd); \ + tcg_temp_free_i32(tcg_ctx, st_six); \ +} + +VSHASIGMA(vshasigmaw) +VSHASIGMA(vshasigmad) + +GEN_VXFORM3(vpermxor, 22, 0xFF) +GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, + vpermxor, PPC_NONE, PPC2_ALTIVEC_207) + +#undef GEN_VR_LDX +#undef GEN_VR_STX +#undef GEN_VR_LVE +#undef GEN_VR_STVE + +#undef GEN_VX_LOGICAL +#undef GEN_VX_LOGICAL_207 +#undef GEN_VXFORM +#undef GEN_VXFORM_207 +#undef GEN_VXFORM_DUAL +#undef GEN_VXRFORM_DUAL +#undef GEN_VXRFORM1 +#undef GEN_VXRFORM +#undef GEN_VXFORM_DUPI +#undef GEN_VXFORM_NOA +#undef GEN_VXFORM_UIMM +#undef GEN_VAFORM_PAIRED + +#undef GEN_BCD2 diff --git a/qemu/target/ppc/translate/vmx-ops.inc.c b/qemu/target/ppc/translate/vmx-ops.inc.c new file mode 100644 index 00000000..84e05fb8 --- /dev/null +++ b/qemu/target/ppc/translate/vmx-ops.inc.c @@ -0,0 +1,301 @@ +#define GEN_VR_LDX(name, opc2, opc3) \ +GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) +#define GEN_VR_STX(name, opc2, opc3) \ +GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) +#define GEN_VR_LVE(name, opc2, opc3) \ + GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) +#define GEN_VR_STVE(name, opc2, opc3) \ + GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) +GEN_VR_LDX(lvx, 0x07, 0x03), +GEN_VR_LDX(lvxl, 0x07, 0x0B), +GEN_VR_LVE(bx, 0x07, 0x00), +GEN_VR_LVE(hx, 0x07, 0x01), +GEN_VR_LVE(wx, 0x07, 0x02), +GEN_VR_STX(svx, 0x07, 0x07), +GEN_VR_STX(svxl, 0x07, 0x0F), +GEN_VR_STVE(bx, 0x07, 0x04), +GEN_VR_STVE(hx, 0x07, 0x05), +GEN_VR_STVE(wx, 0x07, 0x06), + +#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \ +GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) + +#define GEN_VX_LOGICAL_207(name, tcg_op, opc2, opc3) \ +GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207) + +GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16), +GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17), +GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18), +GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19), +GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20), +GEN_VX_LOGICAL_207(veqv, tcg_gen_eqv_i64, 2, 26), +GEN_VX_LOGICAL_207(vnand, tcg_gen_nand_i64, 2, 22), +GEN_VX_LOGICAL_207(vorc, tcg_gen_orc_i64, 2, 21), + +#define GEN_VXFORM(name, opc2, opc3) \ +GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) + +#define GEN_VXFORM_207(name, opc2, opc3) \ +GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207) + +#define GEN_VXFORM_300(name, opc2, opc3) \ +GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300) + +#define GEN_VXFORM_300_EXT(name, opc2, opc3, inval) \ +GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300) + +#define GEN_VXFORM_300_EO(name, opc2, opc3, opc4) \ +GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \ + PPC2_ISA300) + +#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \ +GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1) + +#define GEN_VXRFORM_DUAL(name0, name1, opc2, opc3, tp0, tp1) \ +GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, tp0, tp1), \ +GEN_HANDLER_E(name0##_##name1, 0x4, opc2, (opc3 | 0x10), 0x00000000, tp0, tp1), + +GEN_VXFORM_DUAL(vaddubm, vmul10cuq, 0, 0, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vadduhm, vmul10ecuq, 0, 1, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(vadduwm, 0, 2), +GEN_VXFORM_207(vaddudm, 0, 3), +GEN_VXFORM_DUAL(vsububm, bcdadd, 0, 16, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vsubuhm, bcdsub, 0, 17, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vsubuwm, bcdus, 0, 18, PPC_ALTIVEC, PPC2_ISA300), +GEN_VXFORM_DUAL(vsubudm, bcds, 0, 19, PPC2_ALTIVEC_207, PPC2_ISA300), +GEN_VXFORM_300(bcds, 0, 27), +GEN_VXFORM(vmaxub, 1, 0), +GEN_VXFORM(vmaxuh, 1, 1), +GEN_VXFORM(vmaxuw, 1, 2), +GEN_VXFORM_207(vmaxud, 1, 3), +GEN_VXFORM(vmaxsb, 1, 4), +GEN_VXFORM(vmaxsh, 1, 5), +GEN_VXFORM(vmaxsw, 1, 6), +GEN_VXFORM_207(vmaxsd, 1, 7), +GEN_VXFORM(vminub, 1, 8), +GEN_VXFORM(vminuh, 1, 9), +GEN_VXFORM(vminuw, 1, 10), +GEN_VXFORM_207(vminud, 1, 11), +GEN_VXFORM(vminsb, 1, 12), +GEN_VXFORM(vminsh, 1, 13), +GEN_VXFORM(vminsw, 1, 14), +GEN_VXFORM_207(vminsd, 1, 15), +GEN_VXFORM_DUAL(vavgub, vabsdub, 1, 16, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vavguh, vabsduh, 1, 17, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vavguw, vabsduw, 1, 18, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(vavgsb, 1, 20), +GEN_VXFORM(vavgsh, 1, 21), +GEN_VXFORM(vavgsw, 1, 22), +GEN_VXFORM(vmrghb, 6, 0), +GEN_VXFORM(vmrghh, 6, 1), +GEN_VXFORM(vmrghw, 6, 2), +GEN_VXFORM(vmrglb, 6, 4), +GEN_VXFORM(vmrglh, 6, 5), +GEN_VXFORM(vmrglw, 6, 6), +GEN_VXFORM_300(vextublx, 6, 24), +GEN_VXFORM_300(vextuhlx, 6, 25), +GEN_VXFORM_DUAL(vmrgow, vextuwlx, 6, 26, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_300(vextubrx, 6, 28), +GEN_VXFORM_300(vextuhrx, 6, 29), +GEN_VXFORM_DUAL(vmrgew, vextuwrx, 6, 30, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM(vmuloub, 4, 0), +GEN_VXFORM(vmulouh, 4, 1), +GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(vmulosb, 4, 4), +GEN_VXFORM(vmulosh, 4, 5), +GEN_VXFORM_207(vmulosw, 4, 6), +GEN_VXFORM(vmuleub, 4, 8), +GEN_VXFORM(vmuleuh, 4, 9), +GEN_VXFORM_207(vmuleuw, 4, 10), +GEN_VXFORM(vmulesb, 4, 12), +GEN_VXFORM(vmulesh, 4, 13), +GEN_VXFORM_207(vmulesw, 4, 14), +GEN_VXFORM(vslb, 2, 4), +GEN_VXFORM(vslh, 2, 5), +GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_207(vsld, 2, 23), +GEN_VXFORM(vsrb, 2, 8), +GEN_VXFORM(vsrh, 2, 9), +GEN_VXFORM(vsrw, 2, 10), +GEN_VXFORM_207(vsrd, 2, 27), +GEN_VXFORM(vsrab, 2, 12), +GEN_VXFORM(vsrah, 2, 13), +GEN_VXFORM(vsraw, 2, 14), +GEN_VXFORM_207(vsrad, 2, 15), +GEN_VXFORM_300(vsrv, 2, 28), +GEN_VXFORM_300(vslv, 2, 29), +GEN_VXFORM(vslo, 6, 16), +GEN_VXFORM(vsro, 6, 17), +GEN_VXFORM(vaddcuw, 0, 6), +GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300), + +GEN_VXFORM_DUAL(vsubcuw, xpnd04_1, 0, 22, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_300(bcdsr, 0, 23), +GEN_VXFORM_300(bcdsr, 0, 31), +GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vadduhs, vmul10euq, 0, 9, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(vadduws, 0, 10), +GEN_VXFORM(vaddsbs, 0, 12), +GEN_VXFORM_DUAL(vaddshs, bcdcpsgn, 0, 13, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(vaddsws, 0, 14), +GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(vsubuws, 0, 26), +GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300), +GEN_VXFORM(vsubshs, 0, 29), +GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_207(vadduqm, 0, 4), +GEN_VXFORM_207(vaddcuq, 0, 5), +GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300), +GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300), +GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM(vrlb, 2, 0), +GEN_VXFORM(vrlh, 2, 1), +GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE), +GEN_VXFORM(vsr, 2, 11), +GEN_VXFORM(vpkuhum, 7, 0), +GEN_VXFORM(vpkuwum, 7, 1), +GEN_VXFORM_207(vpkudum, 7, 17), +GEN_VXFORM(vpkuhus, 7, 2), +GEN_VXFORM(vpkuwus, 7, 3), +GEN_VXFORM_207(vpkudus, 7, 19), +GEN_VXFORM(vpkshus, 7, 4), +GEN_VXFORM(vpkswus, 7, 5), +GEN_VXFORM_207(vpksdus, 7, 21), +GEN_VXFORM(vpkshss, 7, 6), +GEN_VXFORM(vpkswss, 7, 7), +GEN_VXFORM_207(vpksdss, 7, 23), +GEN_VXFORM(vpkpx, 7, 12), +GEN_VXFORM(vsum4ubs, 4, 24), +GEN_VXFORM(vsum4sbs, 4, 28), +GEN_VXFORM(vsum4shs, 4, 25), +GEN_VXFORM(vsum2sws, 4, 26), +GEN_VXFORM(vsumsws, 4, 30), +GEN_VXFORM(vaddfp, 5, 0), +GEN_VXFORM(vsubfp, 5, 1), +GEN_VXFORM(vmaxfp, 5, 16), +GEN_VXFORM(vminfp, 5, 17), + +#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \ + GEN_HANDLER2(name, str, 0x4, opc2, opc3, 0x00000000, PPC_ALTIVEC), +#define GEN_VXRFORM1_300(opname, name, str, opc2, opc3) \ +GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300), +#define GEN_VXRFORM(name, opc2, opc3) \ + GEN_VXRFORM1(name, name, #name, opc2, opc3) \ + GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) +#define GEN_VXRFORM_300(name, opc2, opc3) \ + GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \ + GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4))) + +GEN_VXRFORM_300(vcmpnezb, 3, 4) +GEN_VXRFORM_300(vcmpnezh, 3, 5) +GEN_VXRFORM_300(vcmpnezw, 3, 6) +GEN_VXRFORM(vcmpgtsb, 3, 12) +GEN_VXRFORM(vcmpgtsh, 3, 13) +GEN_VXRFORM(vcmpgtsw, 3, 14) +GEN_VXRFORM(vcmpgtub, 3, 8) +GEN_VXRFORM(vcmpgtuh, 3, 9) +GEN_VXRFORM(vcmpgtuw, 3, 10) +GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE) +GEN_VXRFORM(vcmpgefp, 3, 7) +GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE) +GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE) +GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE) +GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE) +GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE) + +#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \ +GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \ + PPC_NONE) +GEN_VXFORM_DUAL_INV(vspltb, vextractub, 6, 8, 0x00000000, 0x100000, + PPC_ALTIVEC), +GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000, + PPC_ALTIVEC), +GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000, + PPC_ALTIVEC), +GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000), +GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000, + PPC_ALTIVEC), +GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000, + PPC_ALTIVEC), +GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000, + PPC_ALTIVEC), +GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000), +GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06), +GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07), +GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10), +GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11), +GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18), +GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19), +GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A), +GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C), +GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D), +GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E), +GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F), +GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0), +GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1), +GEN_VXFORM_300(vpermr, 0x1D, 0xFF), + +#define GEN_VXFORM_NOA(name, opc2, opc3) \ + GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC) +GEN_VXFORM_NOA(vupkhsb, 7, 8), +GEN_VXFORM_NOA(vupkhsh, 7, 9), +GEN_VXFORM_207(vupkhsw, 7, 25), +GEN_VXFORM_NOA(vupklsb, 7, 10), +GEN_VXFORM_NOA(vupklsh, 7, 11), +GEN_VXFORM_207(vupklsw, 7, 27), +GEN_VXFORM_NOA(vupkhpx, 7, 13), +GEN_VXFORM_NOA(vupklpx, 7, 15), +GEN_VXFORM_NOA(vrefp, 5, 4), +GEN_VXFORM_NOA(vrsqrtefp, 5, 5), +GEN_VXFORM_NOA(vexptefp, 5, 6), +GEN_VXFORM_NOA(vlogefp, 5, 7), +GEN_VXFORM_NOA(vrfim, 5, 11), +GEN_VXFORM_NOA(vrfin, 5, 8), +GEN_VXFORM_NOA(vrfip, 5, 10), +GEN_VXFORM_NOA(vrfiz, 5, 9), + +#define GEN_VXFORM_UIMM(name, opc2, opc3) \ + GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) +GEN_VXFORM_UIMM(vcfux, 5, 12), +GEN_VXFORM_UIMM(vcfsx, 5, 13), +GEN_VXFORM_UIMM(vctuxs, 5, 14), +GEN_VXFORM_UIMM(vctsxs, 5, 15), + + +#define GEN_VAFORM_PAIRED(name0, name1, opc2) \ + GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC) +GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16), +GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18), +GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19), +GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20), +GEN_VAFORM_PAIRED(vsel, vperm, 21), +GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23), + +GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207), + +GEN_VXFORM_300(vbpermd, 6, 23), +GEN_VXFORM_207(vbpermq, 6, 21), +GEN_VXFORM_207(vgbbd, 6, 20), +GEN_VXFORM_207(vpmsumb, 4, 16), +GEN_VXFORM_207(vpmsumh, 4, 17), +GEN_VXFORM_207(vpmsumw, 4, 18), +GEN_VXFORM_207(vpmsumd, 4, 19), + +GEN_VXFORM_207(vsbox, 4, 23), + +GEN_VXFORM_DUAL(vcipher, vcipherlast, 4, 20, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_DUAL(vncipher, vncipherlast, 4, 21, PPC_NONE, PPC2_ALTIVEC_207), + +GEN_VXFORM_207(vshasigmaw, 1, 26), +GEN_VXFORM_207(vshasigmad, 1, 27), + +GEN_VXFORM_DUAL(vsldoi, vpermxor, 22, 0xFF, PPC_ALTIVEC, PPC_NONE), diff --git a/qemu/target/ppc/translate/vsx-impl.inc.c b/qemu/target/ppc/translate/vsx-impl.inc.c new file mode 100644 index 00000000..41c6b3ca --- /dev/null +++ b/qemu/target/ppc/translate/vsx-impl.inc.c @@ -0,0 +1,2118 @@ +/*** VSX extension ***/ + +static inline void get_cpu_vsrh(TCGContext *tcg_ctx, TCGv_i64 dst, int n) +{ + tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, vsr64_offset(n, true)); +} + +static inline void get_cpu_vsrl(TCGContext *tcg_ctx, TCGv_i64 dst, int n) +{ + tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, vsr64_offset(n, false)); +} + +static inline void set_cpu_vsrh(TCGContext *tcg_ctx, int n, TCGv_i64 src) +{ + tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, vsr64_offset(n, true)); +} + +static inline void set_cpu_vsrl(TCGContext *tcg_ctx, int n, TCGv_i64 src) +{ + tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, vsr64_offset(n, false)); +} + +static inline TCGv_ptr gen_vsr_ptr(TCGContext *tcg_ctx, int reg) +{ + TCGv_ptr r = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, r, tcg_ctx->cpu_env, vsr_full_offset(reg)); + return r; +} + +#define VSX_LOAD_SCALAR(name, operation) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + gen_qemu_##operation(ctx, t0, EA); \ + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); \ + /* NOTE: cpu_vsrl is undefined */ \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +VSX_LOAD_SCALAR(lxsdx, ld64_i64) +VSX_LOAD_SCALAR(lxsiwax, ld32s_i64) +VSX_LOAD_SCALAR(lxsibzx, ld8u_i64) +VSX_LOAD_SCALAR(lxsihzx, ld16u_i64) +VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64) +VSX_LOAD_SCALAR(lxsspx, ld32fs) + +static void gen_lxvd2x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + gen_qemu_ld64_i64(ctx, t0, EA); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + gen_qemu_ld64_i64(ctx, t0, EA); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t0); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_lxvdsx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + TCGv_i64 t1; + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + gen_qemu_ld64_i64(ctx, t0, EA); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); + tcg_gen_mov_i64(tcg_ctx, t1, t0); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t1); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_lxvw4x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 xth; + TCGv_i64 xtl; + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + + gen_addr_reg_index(ctx, EA); + if (ctx->le_mode) { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_qemu_ld_i64(tcg_ctx, t0, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); + tcg_gen_deposit_i64(tcg_ctx, xth, t1, t0, 32, 32); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, t0, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_shri_i64(tcg_ctx, t1, t0, 32); + tcg_gen_deposit_i64(tcg_ctx, xtl, t1, t0, 32, 32); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } else { + tcg_gen_qemu_ld_i64(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); + } + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); +} + +static void gen_bswap16x8(TCGContext *tcg_ctx, TCGv_i64 outh, TCGv_i64 outl, + TCGv_i64 inh, TCGv_i64 inl) +{ + TCGv_i64 mask = tcg_const_i64(tcg_ctx, 0x00FF00FF00FF00FF); + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */ + tcg_gen_and_i64(tcg_ctx, t0, inh, mask); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 8); + tcg_gen_shri_i64(tcg_ctx, t1, inh, 8); + tcg_gen_and_i64(tcg_ctx, t1, t1, mask); + tcg_gen_or_i64(tcg_ctx, outh, t0, t1); + + /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */ + tcg_gen_and_i64(tcg_ctx, t0, inl, mask); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 8); + tcg_gen_shri_i64(tcg_ctx, t1, inl, 8); + tcg_gen_and_i64(tcg_ctx, t1, t1, mask); + tcg_gen_or_i64(tcg_ctx, outl, t0, t1); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, mask); +} + +static void gen_bswap32x4(TCGContext *tcg_ctx, TCGv_i64 outh, TCGv_i64 outl, + TCGv_i64 inh, TCGv_i64 inl) +{ + TCGv_i64 hi = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 lo = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_bswap64_i64(tcg_ctx, hi, inh); + tcg_gen_bswap64_i64(tcg_ctx, lo, inl); + tcg_gen_shri_i64(tcg_ctx, outh, hi, 32); + tcg_gen_deposit_i64(tcg_ctx, outh, outh, hi, 32, 32); + tcg_gen_shri_i64(tcg_ctx, outl, lo, 32); + tcg_gen_deposit_i64(tcg_ctx, outl, outl, lo, 32, 32); + + tcg_temp_free_i64(tcg_ctx, hi); + tcg_temp_free_i64(tcg_ctx, lo); +} + +static void gen_lxvh8x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 xth; + TCGv_i64 xtl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + gen_set_access_type(ctx, ACCESS_INT); + + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + tcg_gen_qemu_ld_i64(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); + if (ctx->le_mode) { + gen_bswap16x8(tcg_ctx, xth, xtl, xth, xtl); + } + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); +} + +static void gen_lxvb16x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 xth; + TCGv_i64 xtl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + tcg_gen_qemu_ld_i64(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); +} + +#define VSX_VECTOR_LOAD(name, op, indexed) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + int xt; \ + TCGv EA; \ + TCGv_i64 xth; \ + TCGv_i64 xtl; \ + \ + if (indexed) { \ + xt = xT(ctx->opcode); \ + } else { \ + xt = DQxT(ctx->opcode); \ + } \ + \ + if (xt < 32) { \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + } else { \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + } \ + xth = tcg_temp_new_i64(tcg_ctx); \ + xtl = tcg_temp_new_i64(tcg_ctx); \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + if (indexed) { \ + gen_addr_reg_index(ctx, EA); \ + } else { \ + gen_addr_imm_index(ctx, EA, 0x0F); \ + } \ + if (ctx->le_mode) { \ + tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_LEQ); \ + set_cpu_vsrl(tcg_ctx, xt, xtl); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_LEQ); \ + set_cpu_vsrh(tcg_ctx, xt, xth); \ + } else { \ + tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); \ + set_cpu_vsrh(tcg_ctx, xt, xth); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); \ + set_cpu_vsrl(tcg_ctx, xt, xtl); \ + } \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, xth); \ + tcg_temp_free_i64(tcg_ctx, xtl); \ +} + +VSX_VECTOR_LOAD(lxv, ld_i64, 0) +VSX_VECTOR_LOAD(lxvx, ld_i64, 1) + +#define VSX_VECTOR_STORE(name, op, indexed) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + int xt; \ + TCGv EA; \ + TCGv_i64 xth; \ + TCGv_i64 xtl; \ + \ + if (indexed) { \ + xt = xT(ctx->opcode); \ + } else { \ + xt = DQxT(ctx->opcode); \ + } \ + \ + if (xt < 32) { \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + } else { \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + } \ + xth = tcg_temp_new_i64(tcg_ctx); \ + xtl = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, xth, xt); \ + get_cpu_vsrl(tcg_ctx, xtl, xt); \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + if (indexed) { \ + gen_addr_reg_index(ctx, EA); \ + } else { \ + gen_addr_imm_index(ctx, EA, 0x0F); \ + } \ + if (ctx->le_mode) { \ + tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_LEQ); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_LEQ); \ + } else { \ + tcg_gen_qemu_##op(tcg_ctx, xth, EA, ctx->mem_idx, MO_BEQ); \ + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); \ + tcg_gen_qemu_##op(tcg_ctx, xtl, EA, ctx->mem_idx, MO_BEQ); \ + } \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, xth); \ + tcg_temp_free_i64(tcg_ctx, xtl); \ +} + +VSX_VECTOR_STORE(stxv, st_i64, 0) +VSX_VECTOR_STORE(stxvx, st_i64, 1) + +#ifdef TARGET_PPC64 +#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_ptr xt; \ + \ + if (xT(ctx->opcode) < 32) { \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + } else { \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + } \ + EA = tcg_temp_new(tcg_ctx); \ + xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + gen_set_access_type(ctx, ACCESS_INT); \ + gen_addr_register(ctx, EA); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ +} + +VSX_VECTOR_LOAD_STORE_LENGTH(lxvl) +VSX_VECTOR_LOAD_STORE_LENGTH(lxvll) +VSX_VECTOR_LOAD_STORE_LENGTH(stxvl) +VSX_VECTOR_LOAD_STORE_LENGTH(stxvll) +#endif + +#define VSX_LOAD_SCALAR_DS(name, operation) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 xth; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + xth = tcg_temp_new_i64(tcg_ctx); \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0x03); \ + gen_qemu_##operation(ctx, xth, EA); \ + set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); \ + /* NOTE: cpu_vsrl is undefined */ \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, xth); \ +} + +VSX_LOAD_SCALAR_DS(lxsd, ld64_i64) +VSX_LOAD_SCALAR_DS(lxssp, ld32fs) + +#define VSX_STORE_SCALAR(name, operation) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 t0; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_reg_index(ctx, EA); \ + get_cpu_vsrh(tcg_ctx, t0, xS(ctx->opcode)); \ + gen_qemu_##operation(ctx, t0, EA); \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, t0); \ +} + +VSX_STORE_SCALAR(stxsdx, st64_i64) + +VSX_STORE_SCALAR(stxsibx, st8_i64) +VSX_STORE_SCALAR(stxsihx, st16_i64) +VSX_STORE_SCALAR(stxsiwx, st32_i64) +VSX_STORE_SCALAR(stxsspx, st32fs) + +static void gen_stxvd2x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 t0; + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + get_cpu_vsrh(tcg_ctx, t0, xS(ctx->opcode)); + gen_qemu_st64_i64(ctx, t0, EA); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + get_cpu_vsrl(tcg_ctx, t0, xS(ctx->opcode)); + gen_qemu_st64_i64(ctx, t0, EA); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_stxvw4x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 xsh; + TCGv_i64 xsl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xsh = tcg_temp_new_i64(tcg_ctx); + xsl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xsl, xS(ctx->opcode)); + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + if (ctx->le_mode) { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shri_i64(tcg_ctx, t0, xsh, 32); + tcg_gen_deposit_i64(tcg_ctx, t1, t0, xsh, 32, 32); + tcg_gen_qemu_st_i64(tcg_ctx, t1, EA, ctx->mem_idx, MO_LEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_shri_i64(tcg_ctx, t0, xsl, 32); + tcg_gen_deposit_i64(tcg_ctx, t1, t0, xsl, 32, 32); + tcg_gen_qemu_st_i64(tcg_ctx, t1, EA, ctx->mem_idx, MO_LEQ); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } else { + tcg_gen_qemu_st_i64(tcg_ctx, xsh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_st_i64(tcg_ctx, xsl, EA, ctx->mem_idx, MO_BEQ); + } + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, xsh); + tcg_temp_free_i64(tcg_ctx, xsl); +} + +static void gen_stxvh8x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 xsh; + TCGv_i64 xsl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xsh = tcg_temp_new_i64(tcg_ctx); + xsl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xsl, xS(ctx->opcode)); + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + if (ctx->le_mode) { + TCGv_i64 outh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 outl = tcg_temp_new_i64(tcg_ctx); + + gen_bswap16x8(tcg_ctx, outh, outl, xsh, xsl); + tcg_gen_qemu_st_i64(tcg_ctx, outh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_st_i64(tcg_ctx, outl, EA, ctx->mem_idx, MO_BEQ); + tcg_temp_free_i64(tcg_ctx, outh); + tcg_temp_free_i64(tcg_ctx, outl); + } else { + tcg_gen_qemu_st_i64(tcg_ctx, xsh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_st_i64(tcg_ctx, xsl, EA, ctx->mem_idx, MO_BEQ); + } + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, xsh); + tcg_temp_free_i64(tcg_ctx, xsl); +} + +static void gen_stxvb16x(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv EA; + TCGv_i64 xsh; + TCGv_i64 xsl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xsh = tcg_temp_new_i64(tcg_ctx); + xsl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xsl, xS(ctx->opcode)); + gen_set_access_type(ctx, ACCESS_INT); + EA = tcg_temp_new(tcg_ctx); + gen_addr_reg_index(ctx, EA); + tcg_gen_qemu_st_i64(tcg_ctx, xsh, EA, ctx->mem_idx, MO_BEQ); + tcg_gen_addi_tl(tcg_ctx, EA, EA, 8); + tcg_gen_qemu_st_i64(tcg_ctx, xsl, EA, ctx->mem_idx, MO_BEQ); + tcg_temp_free(tcg_ctx, EA); + tcg_temp_free_i64(tcg_ctx, xsh); + tcg_temp_free_i64(tcg_ctx, xsl); +} + +#define VSX_STORE_SCALAR_DS(name, operation) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv EA; \ + TCGv_i64 xth; \ + \ + if (unlikely(!ctx->altivec_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VPU); \ + return; \ + } \ + xth = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, xth, rD(ctx->opcode) + 32); \ + gen_set_access_type(ctx, ACCESS_INT); \ + EA = tcg_temp_new(tcg_ctx); \ + gen_addr_imm_index(ctx, EA, 0x03); \ + gen_qemu_##operation(ctx, xth, EA); \ + /* NOTE: cpu_vsrl is undefined */ \ + tcg_temp_free(tcg_ctx, EA); \ + tcg_temp_free_i64(tcg_ctx, xth); \ +} + +VSX_STORE_SCALAR_DS(stxsd, st64_i64) +VSX_STORE_SCALAR_DS(stxssp, st32fs) + +static void gen_mfvsrwz(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (xS(ctx->opcode) < 32) { + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 xsh = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xsh, xS(ctx->opcode)); + tcg_gen_ext32u_i64(tcg_ctx, tmp, xsh); + tcg_gen_trunc_i64_tl(tcg_ctx, cpu_gpr[rA(ctx->opcode)], tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, xsh); +} + +static void gen_mtvsrwa(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (xS(ctx->opcode) < 32) { + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 xsh = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_ext32s_i64(tcg_ctx, xsh, tmp); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xsh); + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, xsh); +} + +static void gen_mtvsrwz(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (xS(ctx->opcode) < 32) { + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 xsh = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, tmp, cpu_gpr[rA(ctx->opcode)]); + tcg_gen_ext32u_i64(tcg_ctx, xsh, tmp); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xsh); + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, xsh); +} + +#if defined(TARGET_PPC64) +static void gen_mfvsrd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + if (xS(ctx->opcode) < 32) { + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + t0 = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, t0, xS(ctx->opcode)); + tcg_gen_mov_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_mtvsrd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + if (xS(ctx->opcode) < 32) { + if (unlikely(!ctx->fpu_enabled)) { + gen_exception(ctx, POWERPC_EXCP_FPU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mov_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_mfvsrld(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + if (xS(ctx->opcode) < 32) { + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + t0 = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrl(tcg_ctx, t0, xS(ctx->opcode)); + tcg_gen_mov_i64(tcg_ctx, cpu_gpr[rA(ctx->opcode)], t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_mtvsrdd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + if (xT(ctx->opcode) < 32) { + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + + t0 = tcg_temp_new_i64(tcg_ctx); + if (!rA(ctx->opcode)) { + tcg_gen_movi_i64(tcg_ctx, t0, 0); + } else { + tcg_gen_mov_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)]); + } + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); + + tcg_gen_mov_i64(tcg_ctx, t0, cpu_gpr[rB(ctx->opcode)]); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_mtvsrws(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0; + if (xT(ctx->opcode) < 32) { + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + + t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_deposit_i64(tcg_ctx, t0, cpu_gpr[rA(ctx->opcode)], + cpu_gpr[rA(ctx->opcode)], 32, 32); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t0); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +#endif + +static void gen_xxpermdi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xh, xl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + + xh = tcg_temp_new_i64(tcg_ctx); + xl = tcg_temp_new_i64(tcg_ctx); + + if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) || + (xT(ctx->opcode) == xB(ctx->opcode)))) { + if ((DM(ctx->opcode) & 2) == 0) { + get_cpu_vsrh(tcg_ctx, xh, xA(ctx->opcode)); + } else { + get_cpu_vsrl(tcg_ctx, xh, xA(ctx->opcode)); + } + if ((DM(ctx->opcode) & 1) == 0) { + get_cpu_vsrh(tcg_ctx, xl, xB(ctx->opcode)); + } else { + get_cpu_vsrl(tcg_ctx, xl, xB(ctx->opcode)); + } + + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xh); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xl); + } else { + if ((DM(ctx->opcode) & 2) == 0) { + get_cpu_vsrh(tcg_ctx, xh, xA(ctx->opcode)); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xh); + } else { + get_cpu_vsrl(tcg_ctx, xh, xA(ctx->opcode)); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xh); + } + if ((DM(ctx->opcode) & 1) == 0) { + get_cpu_vsrh(tcg_ctx, xl, xB(ctx->opcode)); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xl); + } else { + get_cpu_vsrl(tcg_ctx, xl, xB(ctx->opcode)); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xl); + } + } + tcg_temp_free_i64(tcg_ctx, xh); + tcg_temp_free_i64(tcg_ctx, xl); +} + +#define OP_ABS 1 +#define OP_NABS 2 +#define OP_NEG 3 +#define OP_CPSGN 4 +#define SGN_MASK_DP 0x8000000000000000ull +#define SGN_MASK_SP 0x8000000080000000ull + +#define VSX_SCALAR_MOVE(name, op, sgn_mask) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 xb, sgm; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xb = tcg_temp_new_i64(tcg_ctx); \ + sgm = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, xb, xB(ctx->opcode)); \ + tcg_gen_movi_i64(tcg_ctx, sgm, sgn_mask); \ + switch (op) { \ + case OP_ABS: { \ + tcg_gen_andc_i64(tcg_ctx, xb, xb, sgm); \ + break; \ + } \ + case OP_NABS: { \ + tcg_gen_or_i64(tcg_ctx, xb, xb, sgm); \ + break; \ + } \ + case OP_NEG: { \ + tcg_gen_xor_i64(tcg_ctx, xb, xb, sgm); \ + break; \ + } \ + case OP_CPSGN: { \ + TCGv_i64 xa = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, xa, xA(ctx->opcode)); \ + tcg_gen_and_i64(tcg_ctx, xa, xa, sgm); \ + tcg_gen_andc_i64(tcg_ctx, xb, xb, sgm); \ + tcg_gen_or_i64(tcg_ctx, xb, xb, xa); \ + tcg_temp_free_i64(tcg_ctx, xa); \ + break; \ + } \ + } \ + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xb); \ + tcg_temp_free_i64(tcg_ctx, xb); \ + tcg_temp_free_i64(tcg_ctx, sgm); \ + } + +VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP) +VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP) +VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP) +VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP) + +#define VSX_SCALAR_MOVE_QP(name, op, sgn_mask) \ +static void glue(gen_, name)(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + int xa; \ + int xt = rD(ctx->opcode) + 32; \ + int xb = rB(ctx->opcode) + 32; \ + TCGv_i64 xah, xbh, xbl, sgm, tmp; \ + \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xbh = tcg_temp_new_i64(tcg_ctx); \ + xbl = tcg_temp_new_i64(tcg_ctx); \ + sgm = tcg_temp_new_i64(tcg_ctx); \ + tmp = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, xbh, xb); \ + get_cpu_vsrl(tcg_ctx, xbl, xb); \ + tcg_gen_movi_i64(tcg_ctx, sgm, sgn_mask); \ + switch (op) { \ + case OP_ABS: \ + tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ + break; \ + case OP_NABS: \ + tcg_gen_or_i64(tcg_ctx, xbh, xbh, sgm); \ + break; \ + case OP_NEG: \ + tcg_gen_xor_i64(tcg_ctx, xbh, xbh, sgm); \ + break; \ + case OP_CPSGN: \ + xah = tcg_temp_new_i64(tcg_ctx); \ + xa = rA(ctx->opcode) + 32; \ + get_cpu_vsrh(tcg_ctx, tmp, xa); \ + tcg_gen_and_i64(tcg_ctx, xah, tmp, sgm); \ + tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ + tcg_gen_or_i64(tcg_ctx, xbh, xbh, xah); \ + tcg_temp_free_i64(tcg_ctx, xah); \ + break; \ + } \ + set_cpu_vsrh(tcg_ctx, xt, xbh); \ + set_cpu_vsrl(tcg_ctx, xt, xbl); \ + tcg_temp_free_i64(tcg_ctx, xbl); \ + tcg_temp_free_i64(tcg_ctx, xbh); \ + tcg_temp_free_i64(tcg_ctx, sgm); \ + tcg_temp_free_i64(tcg_ctx, tmp); \ +} + +VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP) +VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP) +VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP) +VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP) + +#define VSX_VECTOR_MOVE(name, op, sgn_mask) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 xbh, xbl, sgm; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xbh = tcg_temp_new_i64(tcg_ctx); \ + xbl = tcg_temp_new_i64(tcg_ctx); \ + sgm = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); \ + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); \ + tcg_gen_movi_i64(tcg_ctx, sgm, sgn_mask); \ + switch (op) { \ + case OP_ABS: { \ + tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ + tcg_gen_andc_i64(tcg_ctx, xbl, xbl, sgm); \ + break; \ + } \ + case OP_NABS: { \ + tcg_gen_or_i64(tcg_ctx, xbh, xbh, sgm); \ + tcg_gen_or_i64(tcg_ctx, xbl, xbl, sgm); \ + break; \ + } \ + case OP_NEG: { \ + tcg_gen_xor_i64(tcg_ctx, xbh, xbh, sgm); \ + tcg_gen_xor_i64(tcg_ctx, xbl, xbl, sgm); \ + break; \ + } \ + case OP_CPSGN: { \ + TCGv_i64 xah = tcg_temp_new_i64(tcg_ctx); \ + TCGv_i64 xal = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, xah, xA(ctx->opcode)); \ + get_cpu_vsrl(tcg_ctx, xal, xA(ctx->opcode)); \ + tcg_gen_and_i64(tcg_ctx, xah, xah, sgm); \ + tcg_gen_and_i64(tcg_ctx, xal, xal, sgm); \ + tcg_gen_andc_i64(tcg_ctx, xbh, xbh, sgm); \ + tcg_gen_andc_i64(tcg_ctx, xbl, xbl, sgm); \ + tcg_gen_or_i64(tcg_ctx, xbh, xbh, xah); \ + tcg_gen_or_i64(tcg_ctx, xbl, xbl, xal); \ + tcg_temp_free_i64(tcg_ctx, xah); \ + tcg_temp_free_i64(tcg_ctx, xal); \ + break; \ + } \ + } \ + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xbh); \ + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xbl); \ + tcg_temp_free_i64(tcg_ctx, xbh); \ + tcg_temp_free_i64(tcg_ctx, xbl); \ + tcg_temp_free_i64(tcg_ctx, sgm); \ + } + +VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP) +VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP) +VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP) +VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP) +VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP) +VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP) +VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP) +VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) + +#define VSX_CMP(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 ignored; \ + TCGv_ptr xt, xa, xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ + xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + if ((ctx->opcode >> (31 - 21)) & 1) { \ + gen_helper_##name(tcg_ctx, cpu_crf[6], tcg_ctx->cpu_env, xt, xa, xb); \ + } else { \ + ignored = tcg_temp_new_i32(tcg_ctx); \ + gen_helper_##name(tcg_ctx, ignored, tcg_ctx->cpu_env, xt, xa, xb); \ + tcg_temp_free_i32(tcg_ctx, ignored); \ + } \ + gen_helper_float_check_status(tcg_ctx, tcg_ctx->cpu_env); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ + tcg_temp_free_ptr(tcg_ctx, xa); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX) +VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX) +VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX) +VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300) +VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX) +VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX) +VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX) +VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX) + +static void gen_xscvqpdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 opc; + TCGv_ptr xt, xb; + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + opc = tcg_const_i32(tcg_ctx, ctx->opcode); + xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); + xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); + gen_helper_xscvqpdp(tcg_ctx, tcg_ctx->cpu_env, opc, xt, xb); + tcg_temp_free_i32(tcg_ctx, opc); + tcg_temp_free_ptr(tcg_ctx, xt); + tcg_temp_free_ptr(tcg_ctx, xb); +} + +#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 opc; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc); \ + tcg_temp_free_i32(tcg_ctx, opc); \ +} + +#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr xt, xa, xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ + xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xa, xb); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ + tcg_temp_free_ptr(tcg_ctx, xa); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr xt, xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xb); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 opc; \ + TCGv_ptr xa, xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ + xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ + xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xa, xb); \ + tcg_temp_free_i32(tcg_ctx, opc); \ + tcg_temp_free_ptr(tcg_ctx, xa); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 opc; \ + TCGv_ptr xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ + xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xb); \ + tcg_temp_free_i32(tcg_ctx, opc); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 opc; \ + TCGv_ptr xt, xa, xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ + xt = gen_vsr_ptr(tcg_ctx, rD(ctx->opcode) + 32); \ + xa = gen_vsr_ptr(tcg_ctx, rA(ctx->opcode) + 32); \ + xb = gen_vsr_ptr(tcg_ctx, rB(ctx->opcode) + 32); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xt, xa, xb); \ + tcg_temp_free_i32(tcg_ctx, opc); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ + tcg_temp_free_ptr(tcg_ctx, xa); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 opc; \ + TCGv_ptr xt, xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ + xt = gen_vsr_ptr(tcg_ctx, rD(ctx->opcode) + 32); \ + xb = gen_vsr_ptr(tcg_ctx, rB(ctx->opcode) + 32); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xt, xb); \ + tcg_temp_free_i32(tcg_ctx, opc); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i32 opc; \ + TCGv_ptr xa, xb; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + opc = tcg_const_i32(tcg_ctx, ctx->opcode); \ + xa = gen_vsr_ptr(tcg_ctx, rA(ctx->opcode) + 32); \ + xb = gen_vsr_ptr(tcg_ctx, rB(ctx->opcode) + 32); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, opc, xa, xb); \ + tcg_temp_free_i32(tcg_ctx, opc); \ + tcg_temp_free_ptr(tcg_ctx, xa); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ +} + +#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 t0; \ + TCGv_i64 t1; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + t0 = tcg_temp_new_i64(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); \ + gen_helper_##name(tcg_ctx, t1, tcg_ctx->cpu_env, t0); \ + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t1); \ + tcg_temp_free_i64(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ +} + +GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX) +GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300) +GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX) +GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300) +GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX) +GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300) +GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX) +GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX) +GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300) +GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300) +GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300) +GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300) +GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300) +GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300) +GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX) +GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX) +GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX) +GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX) +GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300) +GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300) +GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300) +GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300) +GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300) +GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX) +GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300) +GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207) +GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300) +GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300) +GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300) +GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300) +GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300) +GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300) +GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX) +GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207) +GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX) +GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300) +GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX) +GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207) +GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300) +GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300) +GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300) +GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300) +GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207) +GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207) +GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207) +GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207) +GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207) +GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207) +GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207) +GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207) +GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207) +GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300) +GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300) +GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300) + +GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX) +GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX) +GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX) + +GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX) +GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX) +GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300) +GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300) +GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX) +GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX) +GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX) +GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX) +GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300) +GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300) + +#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr xt, xa, b, c; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + xa = gen_vsr_ptr(tcg_ctx, xA(ctx->opcode)); \ + if (ctx->opcode & PPC_BIT32(25)) { \ + /* \ + * AxT + B \ + */ \ + b = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + c = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + } else { \ + /* \ + * AxB + T \ + */ \ + b = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + c = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + } \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xa, b, c); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ + tcg_temp_free_ptr(tcg_ctx, xa); \ + tcg_temp_free_ptr(tcg_ctx, b); \ + tcg_temp_free_ptr(tcg_ctx, c); \ +} + +GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207) +GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207) +GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207) +GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207) +GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX) +GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX) + +static void gen_xxbrd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + + tcg_gen_bswap64_i64(tcg_ctx, xth, xbh); + tcg_gen_bswap64_i64(tcg_ctx, xtl, xbl); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +static void gen_xxbrh(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + + gen_bswap16x8(tcg_ctx, xth, xtl, xbh, xbl); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +static void gen_xxbrq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + TCGv_i64 t0; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + t0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_bswap64_i64(tcg_ctx, t0, xbl); + tcg_gen_bswap64_i64(tcg_ctx, xtl, xbh); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + tcg_gen_mov_i64(tcg_ctx, xth, t0); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +static void gen_xxbrw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + + gen_bswap32x4(tcg_ctx, xth, xtl, xbh, xbl); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +#define VSX_LOGICAL(name, vece, tcg_op) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + tcg_op(tcg_ctx, vece, vsr_full_offset(xT(ctx->opcode)), \ + vsr_full_offset(xA(ctx->opcode)), \ + vsr_full_offset(xB(ctx->opcode)), 16, 16); \ + } + +VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and) +VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc) +VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or) +VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor) +VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor) +VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv) +VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand) +VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc) + +#define VSX_XXMRG(name, high) \ +static void glue(gen_, name)(DisasContext *ctx) \ + { \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i64 a0, a1, b0, b1, tmp; \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + a0 = tcg_temp_new_i64(tcg_ctx); \ + a1 = tcg_temp_new_i64(tcg_ctx); \ + b0 = tcg_temp_new_i64(tcg_ctx); \ + b1 = tcg_temp_new_i64(tcg_ctx); \ + tmp = tcg_temp_new_i64(tcg_ctx); \ + if (high) { \ + get_cpu_vsrh(tcg_ctx, a0, xA(ctx->opcode)); \ + get_cpu_vsrh(tcg_ctx, a1, xA(ctx->opcode)); \ + get_cpu_vsrh(tcg_ctx, b0, xB(ctx->opcode)); \ + get_cpu_vsrh(tcg_ctx, b1, xB(ctx->opcode)); \ + } else { \ + get_cpu_vsrl(tcg_ctx, a0, xA(ctx->opcode)); \ + get_cpu_vsrl(tcg_ctx, a1, xA(ctx->opcode)); \ + get_cpu_vsrl(tcg_ctx, b0, xB(ctx->opcode)); \ + get_cpu_vsrl(tcg_ctx, b1, xB(ctx->opcode)); \ + } \ + tcg_gen_shri_i64(tcg_ctx, a0, a0, 32); \ + tcg_gen_shri_i64(tcg_ctx, b0, b0, 32); \ + tcg_gen_deposit_i64(tcg_ctx, tmp, b0, a0, 32, 32); \ + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), tmp); \ + tcg_gen_deposit_i64(tcg_ctx, tmp, b1, a1, 32, 32); \ + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), tmp); \ + tcg_temp_free_i64(tcg_ctx, a0); \ + tcg_temp_free_i64(tcg_ctx, a1); \ + tcg_temp_free_i64(tcg_ctx, b0); \ + tcg_temp_free_i64(tcg_ctx, b1); \ + tcg_temp_free_i64(tcg_ctx, tmp); \ + } + +VSX_XXMRG(xxmrghw, 1) +VSX_XXMRG(xxmrglw, 0) + +static void gen_xxsel(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rt = xT(ctx->opcode); + int ra = xA(ctx->opcode); + int rb = xB(ctx->opcode); + int rc = xC(ctx->opcode); + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + tcg_gen_gvec_bitsel(tcg_ctx, MO_64, vsr_full_offset(rt), vsr_full_offset(rc), + vsr_full_offset(rb), vsr_full_offset(ra), 16, 16); +} + +static void gen_xxspltw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int rt = xT(ctx->opcode); + int rb = xB(ctx->opcode); + int uim = UIM(ctx->opcode); + int tofs, bofs; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + + tofs = vsr_full_offset(rt); + bofs = vsr_full_offset(rb); + bofs += uim << MO_32; +#ifndef HOST_WORDS_BIG_ENDIAN + bofs ^= 8 | 4; +#endif + + tcg_gen_gvec_dup_mem(tcg_ctx, MO_32, tofs, bofs, 16, 16); +} + +#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) + +static void gen_xxspltib(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t uim8 = IMM8(ctx->opcode); + int rt = xT(ctx->opcode); + + if (rt < 32) { + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + } else { + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + } + tcg_gen_gvec_dup8i(tcg_ctx, vsr_full_offset(rt), 16, 16, uim8); +} + +static void gen_xxsldwi(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth, xtl; + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + + switch (SHW(ctx->opcode)) { + case 0: { + get_cpu_vsrh(tcg_ctx, xth, xA(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xtl, xA(ctx->opcode)); + break; + } + case 1: { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xth, xA(ctx->opcode)); + tcg_gen_shli_i64(tcg_ctx, xth, xth, 32); + get_cpu_vsrl(tcg_ctx, t0, xA(ctx->opcode)); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); + tcg_gen_or_i64(tcg_ctx, xth, xth, t0); + get_cpu_vsrl(tcg_ctx, xtl, xA(ctx->opcode)); + tcg_gen_shli_i64(tcg_ctx, xtl, xtl, 32); + get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); + tcg_gen_or_i64(tcg_ctx, xtl, xtl, t0); + tcg_temp_free_i64(tcg_ctx, t0); + break; + } + case 2: { + get_cpu_vsrl(tcg_ctx, xth, xA(ctx->opcode)); + get_cpu_vsrh(tcg_ctx, xtl, xB(ctx->opcode)); + break; + } + case 3: { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrl(tcg_ctx, xth, xA(ctx->opcode)); + tcg_gen_shli_i64(tcg_ctx, xth, xth, 32); + get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); + tcg_gen_or_i64(tcg_ctx, xth, xth, t0); + get_cpu_vsrh(tcg_ctx, xtl, xB(ctx->opcode)); + tcg_gen_shli_i64(tcg_ctx, xtl, xtl, 32); + get_cpu_vsrl(tcg_ctx, t0, xB(ctx->opcode)); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 32); + tcg_gen_or_i64(tcg_ctx, xtl, xtl, t0); + tcg_temp_free_i64(tcg_ctx, t0); + break; + } + } + + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); +} + +#define VSX_EXTRACT_INSERT(name) \ +static void gen_##name(DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_ptr xt, xb; \ + TCGv_i32 t0; \ + TCGv_i64 t1; \ + uint8_t uimm = UIMM4(ctx->opcode); \ + \ + if (unlikely(!ctx->vsx_enabled)) { \ + gen_exception(ctx, POWERPC_EXCP_VSXU); \ + return; \ + } \ + xt = gen_vsr_ptr(tcg_ctx, xT(ctx->opcode)); \ + xb = gen_vsr_ptr(tcg_ctx, xB(ctx->opcode)); \ + t0 = tcg_temp_new_i32(tcg_ctx); \ + t1 = tcg_temp_new_i64(tcg_ctx); \ + /* \ + * uimm > 15 out of bound and for \ + * uimm > 12 handle as per hardware in helper \ + */ \ + if (uimm > 15) { \ + tcg_gen_movi_i64(tcg_ctx, t1, 0); \ + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), t1); \ + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), t1); \ + return; \ + } \ + tcg_gen_movi_i32(tcg_ctx, t0, uimm); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, xt, xb, t0); \ + tcg_temp_free_ptr(tcg_ctx, xb); \ + tcg_temp_free_ptr(tcg_ctx, xt); \ + tcg_temp_free_i32(tcg_ctx, t0); \ + tcg_temp_free_i64(tcg_ctx, t1); \ +} + +VSX_EXTRACT_INSERT(xxextractuw) +VSX_EXTRACT_INSERT(xxinsertw) + +#ifdef TARGET_PPC64 +static void gen_xsxexpdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv rt = cpu_gpr[rD(ctx->opcode)]; + TCGv_i64 t0; + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, t0, xB(ctx->opcode)); + tcg_gen_extract_i64(tcg_ctx, rt, t0, 52, 11); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void gen_xsxexpqp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, rB(ctx->opcode) + 32); + + tcg_gen_extract_i64(tcg_ctx, xth, xbh, 48, 15); + set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); + tcg_gen_movi_i64(tcg_ctx, xtl, 0); + set_cpu_vsrl(tcg_ctx, rD(ctx->opcode) + 32, xtl); + + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); +} + +static void gen_xsiexpdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv ra = cpu_gpr[rA(ctx->opcode)]; + TCGv rb = cpu_gpr[rB(ctx->opcode)]; + TCGv_i64 t0; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + t0 = tcg_temp_new_i64(tcg_ctx); + xth = tcg_temp_new_i64(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, xth, ra, 0x800FFFFFFFFFFFFF); + tcg_gen_andi_i64(tcg_ctx, t0, rb, 0x7FF); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 52); + tcg_gen_or_i64(tcg_ctx, xth, xth, t0); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + /* dword[1] is undefined */ + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, xth); +} + +static void gen_xsiexpqp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xah; + TCGv_i64 xal; + TCGv_i64 xbh; + TCGv_i64 t0; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xah = tcg_temp_new_i64(tcg_ctx); + xal = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xah, rA(ctx->opcode) + 32); + get_cpu_vsrl(tcg_ctx, xal, rA(ctx->opcode) + 32); + xbh = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, rB(ctx->opcode) + 32); + t0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, xth, xah, 0x8000FFFFFFFFFFFF); + tcg_gen_andi_i64(tcg_ctx, t0, xbh, 0x7FFF); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 48); + tcg_gen_or_i64(tcg_ctx, xth, xth, t0); + set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); + tcg_gen_mov_i64(tcg_ctx, xtl, xal); + set_cpu_vsrl(tcg_ctx, rD(ctx->opcode) + 32, xtl); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xah); + tcg_temp_free_i64(tcg_ctx, xal); + tcg_temp_free_i64(tcg_ctx, xbh); +} + +static void gen_xsxsigdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv rt = cpu_gpr[rD(ctx->opcode)]; + TCGv_i64 t0, t1, zr, nan, exp; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + exp = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + zr = tcg_const_i64(tcg_ctx, 0); + nan = tcg_const_i64(tcg_ctx, 2047); + + get_cpu_vsrh(tcg_ctx, t1, xB(ctx->opcode)); + tcg_gen_extract_i64(tcg_ctx, exp, t1, 52, 11); + tcg_gen_movi_i64(tcg_ctx, t0, 0x0010000000000000); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); + get_cpu_vsrh(tcg_ctx, t1, xB(ctx->opcode)); + tcg_gen_deposit_i64(tcg_ctx, rt, t0, t1, 0, 52); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, exp); + tcg_temp_free_i64(tcg_ctx, zr); + tcg_temp_free_i64(tcg_ctx, nan); +} + +static void gen_xsxsigqp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t0, zr, nan, exp; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, rB(ctx->opcode) + 32); + get_cpu_vsrl(tcg_ctx, xbl, rB(ctx->opcode) + 32); + exp = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + zr = tcg_const_i64(tcg_ctx, 0); + nan = tcg_const_i64(tcg_ctx, 32767); + + tcg_gen_extract_i64(tcg_ctx, exp, xbh, 48, 15); + tcg_gen_movi_i64(tcg_ctx, t0, 0x0001000000000000); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); + tcg_gen_deposit_i64(tcg_ctx, xth, t0, xbh, 0, 48); + set_cpu_vsrh(tcg_ctx, rD(ctx->opcode) + 32, xth); + tcg_gen_mov_i64(tcg_ctx, xtl, xbl); + set_cpu_vsrl(tcg_ctx, rD(ctx->opcode) + 32, xtl); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, exp); + tcg_temp_free_i64(tcg_ctx, zr); + tcg_temp_free_i64(tcg_ctx, nan); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} +#endif + +static void gen_xviexpsp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xah; + TCGv_i64 xal; + TCGv_i64 xbh; + TCGv_i64 xbl; + TCGv_i64 t0; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xah = tcg_temp_new_i64(tcg_ctx); + xal = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xah, xA(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xal, xA(ctx->opcode)); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + t0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, xth, xah, 0x807FFFFF807FFFFF); + tcg_gen_andi_i64(tcg_ctx, t0, xbh, 0xFF000000FF); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 23); + tcg_gen_or_i64(tcg_ctx, xth, xth, t0); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + tcg_gen_andi_i64(tcg_ctx, xtl, xal, 0x807FFFFF807FFFFF); + tcg_gen_andi_i64(tcg_ctx, t0, xbl, 0xFF000000FF); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 23); + tcg_gen_or_i64(tcg_ctx, xtl, xtl, t0); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xah); + tcg_temp_free_i64(tcg_ctx, xal); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +static void gen_xviexpdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xah; + TCGv_i64 xal; + TCGv_i64 xbh; + TCGv_i64 xbl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xah = tcg_temp_new_i64(tcg_ctx); + xal = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xah, xA(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xal, xA(ctx->opcode)); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + + tcg_gen_deposit_i64(tcg_ctx, xth, xah, xbh, 52, 11); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + + tcg_gen_deposit_i64(tcg_ctx, xtl, xal, xbl, 52, 11); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xah); + tcg_temp_free_i64(tcg_ctx, xal); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +static void gen_xvxexpsp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + + tcg_gen_shri_i64(tcg_ctx, xth, xbh, 23); + tcg_gen_andi_i64(tcg_ctx, xth, xth, 0xFF000000FF); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + tcg_gen_shri_i64(tcg_ctx, xtl, xbl, 23); + tcg_gen_andi_i64(tcg_ctx, xtl, xtl, 0xFF000000FF); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +static void gen_xvxexpdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + + tcg_gen_extract_i64(tcg_ctx, xth, xbh, 52, 11); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + tcg_gen_extract_i64(tcg_ctx, xtl, xbl, 52, 11); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300) + +static void gen_xvxsigdp(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 xth; + TCGv_i64 xtl; + TCGv_i64 xbh; + TCGv_i64 xbl; + TCGv_i64 t0, zr, nan, exp; + + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; + } + xth = tcg_temp_new_i64(tcg_ctx); + xtl = tcg_temp_new_i64(tcg_ctx); + xbh = tcg_temp_new_i64(tcg_ctx); + xbl = tcg_temp_new_i64(tcg_ctx); + get_cpu_vsrh(tcg_ctx, xbh, xB(ctx->opcode)); + get_cpu_vsrl(tcg_ctx, xbl, xB(ctx->opcode)); + exp = tcg_temp_new_i64(tcg_ctx); + t0 = tcg_temp_new_i64(tcg_ctx); + zr = tcg_const_i64(tcg_ctx, 0); + nan = tcg_const_i64(tcg_ctx, 2047); + + tcg_gen_extract_i64(tcg_ctx, exp, xbh, 52, 11); + tcg_gen_movi_i64(tcg_ctx, t0, 0x0010000000000000); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); + tcg_gen_deposit_i64(tcg_ctx, xth, t0, xbh, 0, 52); + set_cpu_vsrh(tcg_ctx, xT(ctx->opcode), xth); + + tcg_gen_extract_i64(tcg_ctx, exp, xbl, 52, 11); + tcg_gen_movi_i64(tcg_ctx, t0, 0x0010000000000000); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, zr, zr, t0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t0, exp, nan, zr, t0); + tcg_gen_deposit_i64(tcg_ctx, xtl, t0, xbl, 0, 52); + set_cpu_vsrl(tcg_ctx, xT(ctx->opcode), xtl); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, exp); + tcg_temp_free_i64(tcg_ctx, zr); + tcg_temp_free_i64(tcg_ctx, nan); + tcg_temp_free_i64(tcg_ctx, xth); + tcg_temp_free_i64(tcg_ctx, xtl); + tcg_temp_free_i64(tcg_ctx, xbh); + tcg_temp_free_i64(tcg_ctx, xbl); +} + +#undef GEN_XX2FORM +#undef GEN_XX3FORM +#undef GEN_XX2IFORM +#undef GEN_XX3_RC_FORM +#undef GEN_XX3FORM_DM +#undef VSX_LOGICAL diff --git a/qemu/target/ppc/translate/vsx-ops.inc.c b/qemu/target/ppc/translate/vsx-ops.inc.c new file mode 100644 index 00000000..7fd3942b --- /dev/null +++ b/qemu/target/ppc/translate/vsx-ops.inc.c @@ -0,0 +1,401 @@ +GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX), +GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX), +GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX), +GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX), +GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(lxvx, 0x1F, 0x0C, 0x08, 0x00000040, PPC_NONE, PPC2_ISA300), +#if defined(TARGET_PPC64) +GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300), +#endif + +GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX), +GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX), +GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX), +GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stxvx, 0x1F, 0x0C, 0x0C, 0, PPC_NONE, PPC2_ISA300), +#if defined(TARGET_PPC64) +GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300), +#endif + +GEN_HANDLER_E(mfvsrwz, 0x1F, 0x13, 0x03, 0x0000F800, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(mtvsrwa, 0x1F, 0x13, 0x06, 0x0000F800, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(mtvsrwz, 0x1F, 0x13, 0x07, 0x0000F800, PPC_NONE, PPC2_VSX207), +#if defined(TARGET_PPC64) +GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207), +GEN_HANDLER_E(mfvsrld, 0X1F, 0x13, 0x09, 0x0000F800, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(mtvsrdd, 0X1F, 0x13, 0x0D, 0x0, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(mtvsrws, 0x1F, 0x13, 0x0C, 0x0000F800, PPC_NONE, PPC2_ISA300), +#endif + +#define GEN_XX1FORM(name, opc2, opc3, fl2) \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2) + +#define GEN_XX2FORM(name, opc2, opc3, fl2) \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2) + +#define GEN_XX2FORM_EXT(name, opc2, opc3, fl2) \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0x00100000, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0x00100000, PPC_NONE, fl2) + +#define GEN_XX2FORM_EO(name, opc2, opc3, opc4, fl2) \ +GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 0, opc3, opc4, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 1, opc3, opc4, 0, PPC_NONE, fl2) + +#define GEN_XX3FORM(name, opc2, opc3, fl2) \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2) + +#define GEN_XX3FORM_NAME(name, opcname, opc2, opc3, fl2) \ +GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2) + +#define GEN_XX2IFORM(name, opc2, opc3, fl2) \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2) + +#define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x00, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x00, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x00, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x10, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x10, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x10, 0, PPC_NONE, fl2), \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x10, 0, PPC_NONE, fl2) + +#define GEN_XX3FORM_DM(name, opc2, opc3) \ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\ +GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x0C, 0, PPC_NONE, PPC2_VSX) + +#define GEN_VSX_XFORM_300(name, opc2, opc3, inval) \ +GEN_HANDLER_E(name, 0x3F, opc2, opc3, inval, PPC_NONE, PPC2_ISA300) + +#define GEN_VSX_XFORM_300_EO(name, opc2, opc3, opc4, inval) \ +GEN_HANDLER_E_2(name, 0x3F, opc2, opc3, opc4, inval, PPC_NONE, PPC2_ISA300) + +#define GEN_VSX_Z23FORM_300(name, opc2, opc3, opc4, inval) \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x0, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x1, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x1, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x1, inval), \ +GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x1, inval) + +GEN_VSX_Z23FORM_300(xsrqpi, 0x05, 0x0, 0x0, 0x0), +GEN_VSX_Z23FORM_300(xsrqpxp, 0x05, 0x1, 0x0, 0x0), +GEN_VSX_XFORM_300_EO(xssqrtqp, 0x04, 0x19, 0x1B, 0x0), +GEN_VSX_XFORM_300(xssubqp, 0x04, 0x10, 0x0), + +GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX), +GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX), +GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX), +GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX), + +GEN_VSX_XFORM_300_EO(xsabsqp, 0x04, 0x19, 0x00, 0x00000001), +GEN_VSX_XFORM_300_EO(xsnabsqp, 0x04, 0x19, 0x08, 0x00000001), +GEN_VSX_XFORM_300_EO(xsnegqp, 0x04, 0x19, 0x10, 0x00000001), +GEN_VSX_XFORM_300(xscpsgnqp, 0x04, 0x03, 0x00000001), +GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001), +GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0), +GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001), +GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001), +GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001), +GEN_VSX_XFORM_300_EO(xscvqpuwz, 0x04, 0x1A, 0x01, 0x00000001), + +#ifdef TARGET_PPC64 +GEN_XX2FORM_EO(xsxexpdp, 0x16, 0x15, 0x00, PPC2_ISA300), +GEN_VSX_XFORM_300_EO(xsxexpqp, 0x04, 0x19, 0x02, 0x00000001), +GEN_XX2FORM_EO(xsxsigdp, 0x16, 0x15, 0x01, PPC2_ISA300), +GEN_VSX_XFORM_300_EO(xsxsigqp, 0x04, 0x19, 0x12, 0x00000001), +GEN_HANDLER_E(xsiexpdp, 0x3C, 0x16, 0x1C, 0, PPC_NONE, PPC2_ISA300), +GEN_VSX_XFORM_300(xsiexpqp, 0x4, 0x1B, 0x00000001), +#endif + +GEN_XX2FORM(xststdcdp, 0x14, 0x16, PPC2_ISA300), +GEN_XX2FORM(xststdcsp, 0x14, 0x12, PPC2_ISA300), +GEN_VSX_XFORM_300(xststdcqp, 0x04, 0x16, 0x00000001), + +GEN_XX3FORM(xviexpsp, 0x00, 0x1B, PPC2_ISA300), +GEN_XX3FORM(xviexpdp, 0x00, 0x1F, PPC2_ISA300), +GEN_XX2FORM_EO(xvxexpdp, 0x16, 0x1D, 0x00, PPC2_ISA300), +GEN_XX2FORM_EO(xvxsigdp, 0x16, 0x1D, 0x01, PPC2_ISA300), +GEN_XX2FORM_EO(xvxexpsp, 0x16, 0x1D, 0x08, PPC2_ISA300), +GEN_XX2FORM_EO(xvxsigsp, 0x16, 0x1D, 0x09, PPC2_ISA300), + +/* DCMX = bit[25] << 6 | bit[29] << 5 | bit[11:15] */ +#define GEN_XX2FORM_DCMX(name, opc2, opc3, fl2) \ +GEN_XX3FORM(name, opc2, opc3 | 0, fl2), \ +GEN_XX3FORM(name, opc2, opc3 | 1, fl2) + +GEN_XX2FORM_DCMX(xvtstdcdp, 0x14, 0x1E, PPC2_ISA300), +GEN_XX2FORM_DCMX(xvtstdcsp, 0x14, 0x1A, PPC2_ISA300), + +GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX), +GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX), +GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX), +GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX), +GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX), +GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX), +GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX), +GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX), + +GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX), +GEN_VSX_XFORM_300(xsaddqp, 0x04, 0x00, 0x0), +GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX), +GEN_XX3FORM(xsmuldp, 0x00, 0x06, PPC2_VSX), +GEN_VSX_XFORM_300(xsmulqp, 0x04, 0x01, 0x0), +GEN_XX3FORM(xsdivdp, 0x00, 0x07, PPC2_VSX), +GEN_XX2FORM(xsredp, 0x14, 0x05, PPC2_VSX), +GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX), +GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX), +GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX), +GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX), +GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX), +GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX), +GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX), +GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX), +GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX), +GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX), +GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX), +GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX), +GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300), +GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300), +GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300), +GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300), +GEN_XX3FORM(xscmpexpdp, 0x0C, 0x07, PPC2_ISA300), +GEN_VSX_XFORM_300(xscmpexpqp, 0x04, 0x05, 0x00600001), +GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX), +GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX), +GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001), +GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001), +GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX), +GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX), +GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300), +GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300), +GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300), +GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300), +GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300), +GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX), +GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207), +GEN_XX2FORM_EO(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300), +GEN_VSX_XFORM_300_EO(xscvsdqp, 0x04, 0x1A, 0x0A, 0x00000001), +GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX), +GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207), +GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX), +GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX), +GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX), +GEN_XX2FORM(xscvdpuxws, 0x10, 0x04, PPC2_VSX), +GEN_XX2FORM(xscvsxddp, 0x10, 0x17, PPC2_VSX), +GEN_VSX_XFORM_300_EO(xscvudqp, 0x04, 0x1A, 0x02, 0x00000001), +GEN_XX2FORM(xscvuxddp, 0x10, 0x16, PPC2_VSX), +GEN_XX2FORM(xsrdpi, 0x12, 0x04, PPC2_VSX), +GEN_XX2FORM(xsrdpic, 0x16, 0x06, PPC2_VSX), +GEN_XX2FORM(xsrdpim, 0x12, 0x07, PPC2_VSX), +GEN_XX2FORM(xsrdpip, 0x12, 0x06, PPC2_VSX), +GEN_XX2FORM(xsrdpiz, 0x12, 0x05, PPC2_VSX), + +GEN_XX3FORM(xsaddsp, 0x00, 0x00, PPC2_VSX207), +GEN_XX3FORM(xssubsp, 0x00, 0x01, PPC2_VSX207), +GEN_XX3FORM(xsmulsp, 0x00, 0x02, PPC2_VSX207), +GEN_XX3FORM(xsdivsp, 0x00, 0x03, PPC2_VSX207), +GEN_VSX_XFORM_300(xsdivqp, 0x04, 0x11, 0x0), +GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207), +GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207), +GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207), +GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207), +GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207), +GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207), +GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207), +GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207), +GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207), +GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207), +GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207), +GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207), +GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207), +GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207), + +GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX), +GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX), +GEN_XX3FORM(xvmuldp, 0x00, 0x0E, PPC2_VSX), +GEN_XX3FORM(xvdivdp, 0x00, 0x0F, PPC2_VSX), +GEN_XX2FORM(xvredp, 0x14, 0x0D, PPC2_VSX), +GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX), +GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX), +GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX), +GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX), +GEN_XX3FORM_NAME(xvmadddp, "xvmaddadp", 0x04, 0x0C, PPC2_VSX), +GEN_XX3FORM_NAME(xvmadddp, "xvmaddmdp", 0x04, 0x0D, PPC2_VSX), +GEN_XX3FORM_NAME(xvmsubdp, "xvmsubadp", 0x04, 0x0E, PPC2_VSX), +GEN_XX3FORM_NAME(xvmsubdp, "xvmsubmdp", 0x04, 0x0F, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddadp", 0x04, 0x1C, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddmdp", 0x04, 0x1D, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubadp", 0x04, 0x1E, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubmdp", 0x04, 0x1F, PPC2_VSX), +GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX), +GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpgtdp, 0x0C, 0x0D, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpgedp, 0x0C, 0x0E, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpnedp, 0x0C, 0x0F, PPC2_ISA300), +GEN_XX2FORM(xvcvdpsp, 0x12, 0x18, PPC2_VSX), +GEN_XX2FORM(xvcvdpsxds, 0x10, 0x1D, PPC2_VSX), +GEN_XX2FORM(xvcvdpsxws, 0x10, 0x0D, PPC2_VSX), +GEN_XX2FORM(xvcvdpuxds, 0x10, 0x1C, PPC2_VSX), +GEN_XX2FORM(xvcvdpuxws, 0x10, 0x0C, PPC2_VSX), +GEN_XX2FORM(xvcvsxddp, 0x10, 0x1F, PPC2_VSX), +GEN_XX2FORM(xvcvuxddp, 0x10, 0x1E, PPC2_VSX), +GEN_XX2FORM(xvcvsxwdp, 0x10, 0x0F, PPC2_VSX), +GEN_XX2FORM(xvcvuxwdp, 0x10, 0x0E, PPC2_VSX), +GEN_XX2FORM(xvrdpi, 0x12, 0x0C, PPC2_VSX), +GEN_XX2FORM(xvrdpic, 0x16, 0x0E, PPC2_VSX), +GEN_XX2FORM(xvrdpim, 0x12, 0x0F, PPC2_VSX), +GEN_XX2FORM(xvrdpip, 0x12, 0x0E, PPC2_VSX), +GEN_XX2FORM(xvrdpiz, 0x12, 0x0D, PPC2_VSX), + +GEN_XX3FORM(xvaddsp, 0x00, 0x08, PPC2_VSX), +GEN_XX3FORM(xvsubsp, 0x00, 0x09, PPC2_VSX), +GEN_XX3FORM(xvmulsp, 0x00, 0x0A, PPC2_VSX), +GEN_XX3FORM(xvdivsp, 0x00, 0x0B, PPC2_VSX), +GEN_XX2FORM(xvresp, 0x14, 0x09, PPC2_VSX), +GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX), +GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX), +GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX), +GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX), +GEN_XX3FORM_NAME(xvmaddsp, "xvmaddasp", 0x04, 0x08, PPC2_VSX), +GEN_XX3FORM_NAME(xvmaddsp, "xvmaddmsp", 0x04, 0x09, PPC2_VSX), +GEN_XX3FORM_NAME(xvmsubsp, "xvmsubasp", 0x04, 0x0A, PPC2_VSX), +GEN_XX3FORM_NAME(xvmsubsp, "xvmsubmsp", 0x04, 0x0B, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddasp", 0x04, 0x18, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddmsp", 0x04, 0x19, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubasp", 0x04, 0x1A, PPC2_VSX), +GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubmsp", 0x04, 0x1B, PPC2_VSX), +GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX), +GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpgtsp, 0x0C, 0x09, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpgesp, 0x0C, 0x0A, PPC2_VSX), +GEN_XX3_RC_FORM(xvcmpnesp, 0x0C, 0x0B, PPC2_ISA300), +GEN_XX2FORM(xvcvspdp, 0x12, 0x1C, PPC2_VSX), +GEN_XX2FORM(xvcvspsxds, 0x10, 0x19, PPC2_VSX), +GEN_XX2FORM(xvcvspsxws, 0x10, 0x09, PPC2_VSX), +GEN_XX2FORM(xvcvspuxds, 0x10, 0x18, PPC2_VSX), +GEN_XX2FORM(xvcvspuxws, 0x10, 0x08, PPC2_VSX), +GEN_XX2FORM(xvcvsxdsp, 0x10, 0x1B, PPC2_VSX), +GEN_XX2FORM(xvcvuxdsp, 0x10, 0x1A, PPC2_VSX), +GEN_XX2FORM(xvcvsxwsp, 0x10, 0x0B, PPC2_VSX), +GEN_XX2FORM(xvcvuxwsp, 0x10, 0x0A, PPC2_VSX), +GEN_XX2FORM(xvrspi, 0x12, 0x08, PPC2_VSX), +GEN_XX2FORM(xvrspic, 0x16, 0x0A, PPC2_VSX), +GEN_XX2FORM(xvrspim, 0x12, 0x0B, PPC2_VSX), +GEN_XX2FORM(xvrspip, 0x12, 0x0A, PPC2_VSX), +GEN_XX2FORM(xvrspiz, 0x12, 0x09, PPC2_VSX), +GEN_XX2FORM_EO(xxbrh, 0x16, 0x1D, 0x07, PPC2_ISA300), +GEN_XX2FORM_EO(xxbrw, 0x16, 0x1D, 0x0F, PPC2_ISA300), +GEN_XX2FORM_EO(xxbrd, 0x16, 0x1D, 0x17, PPC2_ISA300), +GEN_XX2FORM_EO(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300), +GEN_XX2FORM_EO(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300), +GEN_XX2FORM_EO(xxbrq, 0x16, 0x1D, 0x1F, PPC2_ISA300), + +#define VSX_LOGICAL(name, opc2, opc3, fl2) \ +GEN_XX3FORM(name, opc2, opc3, fl2) + +VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX), +VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX), +VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX), +VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX), +VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX), +VSX_LOGICAL(xxleqv, 0x8, 0x17, PPC2_VSX207), +VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207), +VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207), +GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX), +GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX), +GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300), +GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300), +GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX), +GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300), +GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00), +GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300), +GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300), + +#define GEN_XXSEL_ROW(opc3) \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \ +GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \ + +GEN_XXSEL_ROW(0x00) +GEN_XXSEL_ROW(0x01) +GEN_XXSEL_ROW(0x02) +GEN_XXSEL_ROW(0x03) +GEN_XXSEL_ROW(0x04) +GEN_XXSEL_ROW(0x05) +GEN_XXSEL_ROW(0x06) +GEN_XXSEL_ROW(0x07) +GEN_XXSEL_ROW(0x08) +GEN_XXSEL_ROW(0x09) +GEN_XXSEL_ROW(0x0A) +GEN_XXSEL_ROW(0x0B) +GEN_XXSEL_ROW(0x0C) +GEN_XXSEL_ROW(0x0D) +GEN_XXSEL_ROW(0x0E) +GEN_XXSEL_ROW(0x0F) +GEN_XXSEL_ROW(0x10) +GEN_XXSEL_ROW(0x11) +GEN_XXSEL_ROW(0x12) +GEN_XXSEL_ROW(0x13) +GEN_XXSEL_ROW(0x14) +GEN_XXSEL_ROW(0x15) +GEN_XXSEL_ROW(0x16) +GEN_XXSEL_ROW(0x17) +GEN_XXSEL_ROW(0x18) +GEN_XXSEL_ROW(0x19) +GEN_XXSEL_ROW(0x1A) +GEN_XXSEL_ROW(0x1B) +GEN_XXSEL_ROW(0x1C) +GEN_XXSEL_ROW(0x1D) +GEN_XXSEL_ROW(0x1E) +GEN_XXSEL_ROW(0x1F) + +GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01), diff --git a/qemu/target/ppc/translate_init.inc.c b/qemu/target/ppc/translate_init.inc.c new file mode 100644 index 00000000..bc2152d1 --- /dev/null +++ b/qemu/target/ppc/translate_init.inc.c @@ -0,0 +1,11202 @@ +/* + * PowerPC CPU initialization for qemu. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright 2011 Freescale Semiconductor, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "cpu-models.h" +#include "mmu-hash32.h" +#include "mmu-hash64.h" +#include "hw/ppc/ppc.h" +#include "mmu-book3s-v3.h" +#include "qemu/cutils.h" +#include "fpu/softfloat.h" +#include "disas/dis-asm.h" + +/* + * Generic callbacks: + * do nothing but store/retrieve spr value + */ +static void spr_load_dump_spr(TCGContext *tcg_ctx, int sprn) +{ +#ifdef PPC_DUMP_SPR_ACCESSES + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn); + gen_helper_load_dump_spr(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); +#endif +} + +static void spr_read_generic(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_load_spr(tcg_ctx, cpu_gpr[gprn], sprn); + spr_load_dump_spr(tcg_ctx, sprn); +} + +static void spr_store_dump_spr(int sprn) +{ +#ifdef PPC_DUMP_SPR_ACCESSES + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn); + gen_helper_store_dump_spr(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); +#endif +} + +static void spr_write_generic(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_store_spr(tcg_ctx, sprn, cpu_gpr[gprn]); + spr_store_dump_spr(sprn); +} + +static void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) +{ +#ifdef TARGET_PPC64 + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, cpu_gpr[gprn]); + gen_store_spr(tcg_ctx, sprn, t0); + tcg_temp_free(tcg_ctx, t0); + spr_store_dump_spr(sprn); +#else + spr_write_generic(ctx, sprn, gprn); +#endif +} + +static void spr_write_clear(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_load_spr(tcg_ctx, t0, sprn); + tcg_gen_neg_tl(tcg_ctx, t1, cpu_gpr[gprn]); + tcg_gen_and_tl(tcg_ctx, t0, t0, t1); + gen_store_spr(tcg_ctx, sprn, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void spr_access_nop(DisasContext *ctx, int sprn, int gprn) +{ +} + +/* SPR common to all PowerPC */ +/* XER */ +static void spr_read_xer(DisasContext *ctx, int gprn, int sprn) +{ + gen_read_xer(ctx, cpu_gpr[gprn]); +} + +static void spr_write_xer(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_write_xer(tcg_ctx, cpu_gpr[gprn]); +} + +/* LR */ +static void spr_read_lr(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[gprn], cpu_lr); +} + +static void spr_write_lr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, cpu_lr, cpu_gpr[gprn]); +} + +/* CFAR */ +#if defined(TARGET_PPC64) +static void spr_read_cfar(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[gprn], cpu_cfar); +} + +static void spr_write_cfar(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, cpu_cfar, cpu_gpr[gprn]); +} +#endif + +/* CTR */ +static void spr_read_ctr(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, cpu_gpr[gprn], cpu_ctr); +} + +static void spr_write_ctr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, cpu_ctr, cpu_gpr[gprn]); +} + +/* User read access to SPR */ +/* USPRx */ +/* UMMCRx */ +/* UPMCx */ +/* USIA */ +/* UDECR */ +static void spr_read_ureg(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_load_spr(tcg_ctx, cpu_gpr[gprn], sprn + 0x10); +} + +#if defined(TARGET_PPC64) +static void spr_write_ureg(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_store_spr(tcg_ctx, sprn + 0x10, cpu_gpr[gprn]); +} +#endif + +/* SPR common to all non-embedded PowerPC */ +/* DECR */ +static void spr_read_decr(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_load_decr(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_stop_exception(ctx); + } +} + +static void spr_write_decr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_store_decr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_stop_exception(ctx); + } +} + +/* SPR common to all non-embedded PowerPC, except 601 */ +/* Time base */ +static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_load_tbl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + gen_stop_exception(ctx); + } +} + +static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_load_tbu(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + gen_stop_exception(ctx); + } +} + +#if 0 +// ATTRIBUTE_UNUSED +static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_atbl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +} + +// ATTRIBUTE_UNUSED +static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_atbu(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +} +#endif + +static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_store_tbl(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + gen_stop_exception(ctx); + } +} + +static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_store_tbu(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + gen_stop_exception(ctx); + } +} + +#if 0 +// ATTRIBUTE_UNUSED +static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_atbl(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +// ATTRIBUTE_UNUSED +static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_atbu(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} +#endif + +#if defined(TARGET_PPC64) +// ATTRIBUTE_UNUSED +static void spr_read_purr(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_purr(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +} + +static void spr_write_purr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_purr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +/* HDECR */ +static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_load_hdecr(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + gen_stop_exception(ctx); + } +} + +static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_start(tcg_ctx); + } + gen_helper_store_hdecr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { + gen_io_end(tcg_ctx); + gen_stop_exception(ctx); + } +} + +static void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_vtb(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +} + +static void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_vtb(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_tbu40(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +#endif + +/* IBAT0U...IBAT0U */ +/* IBAT0L...IBAT7L */ +static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, + offsetof(CPUPPCState, + IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); +} + +static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, + offsetof(CPUPPCState, + IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); +} + +static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0U) / 2); + gen_helper_store_ibatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_IBAT4U) / 2) + 4); + gen_helper_store_ibatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0L) / 2); + gen_helper_store_ibatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_IBAT4L) / 2) + 4); + gen_helper_store_ibatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* DBAT0U...DBAT7U */ +/* DBAT0L...DBAT7L */ +static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, + offsetof(CPUPPCState, + DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); +} + +static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, + offsetof(CPUPPCState, + DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); +} + +static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_DBAT0U) / 2); + gen_helper_store_dbatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_DBAT4U) / 2) + 4); + gen_helper_store_dbatu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_DBAT0L) / 2); + gen_helper_store_dbatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, ((sprn - SPR_DBAT4L) / 2) + 4); + gen_helper_store_dbatl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* SDR1 */ +static void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_sdr1(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +#if defined(TARGET_PPC64) +/* 64 bits PowerPC specific SPRs */ +/* PIDR */ +static void spr_write_pidr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_pidr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_lpidr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_read_hior(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_prefix)); +} + +static void spr_write_hior(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); + tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_prefix)); + tcg_temp_free(tcg_ctx, t0); +} +static void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_ptcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_pcr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_pcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +/* DPDES */ +static void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_dpdes(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +} + +static void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_dpdes(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} +#endif + +/* PowerPC 601 specific registers */ +/* RTC */ +static void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_601_rtcl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +} + +static void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_load_601_rtcu(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +} + +static void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_601_rtcu(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_601_rtcl(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_hid0_601(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); + /* Must stop the translation as endianness may have changed */ + gen_stop_exception(ctx); +} + +/* Unified bats */ +static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, + offsetof(CPUPPCState, + IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); +} + +static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0U) / 2); + gen_helper_store_601_batl(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, (sprn - SPR_IBAT0U) / 2); + gen_helper_store_601_batu(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* PowerPC 40x specific registers */ +static void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#ifdef UNICORN_ARCH_POSTFIX + glue(gen_helper_load_40x_pit, UNICORN_ARCH_POSTFIX)(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +#else + gen_helper_load_40x_pit(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env); +#endif +} + +static void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#ifdef UNICORN_ARCH_POSTFIX + glue(gen_helper_store_40x_pit, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +#else + gen_helper_store_40x_pit(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +#endif +} + +static void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_store_spr(tcg_ctx, sprn, cpu_gpr[gprn]); +#ifdef UNICORN_ARCH_POSTFIX + glue(gen_helper_store_40x_dbcr0, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +#else + gen_helper_store_40x_dbcr0(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +#endif + /* We must stop translation as we may have rebooted */ + gen_stop_exception(ctx); +} + +static void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#ifdef UNICORN_ARCH_POSTFIX + glue(gen_helper_store_40x_sler, UNICORN_ARCH_POSTFIX)(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +#else + gen_helper_store_40x_sler(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +#endif +} + +static void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_booke_tcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_store_booke_tsr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +/* PowerPC 403 specific registers */ +/* PBL1 / PBU1 / PBL2 / PBU2 */ +static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, cpu_gpr[gprn], tcg_ctx->cpu_env, + offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1])); +} + +static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn - SPR_403_PBL1); + gen_helper_store_403_pbr(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_pir(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], 0xF); + gen_store_spr(tcg_ctx, SPR_PIR, t0); + tcg_temp_free(tcg_ctx, t0); +} + +/* SPE specific registers */ +static void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_fscr)); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_gpr[gprn], t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t0, cpu_gpr[gprn]); + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, spe_fscr)); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* Callback used to write the exception vector base */ +static void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, ivpr_mask)); + tcg_gen_and_tl(tcg_ctx, t0, t0, cpu_gpr[gprn]); + tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_prefix)); + gen_store_spr(tcg_ctx, sprn, t0); + tcg_temp_free(tcg_ctx, t0); +} + +static void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int sprn_offs; + + if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) { + sprn_offs = sprn - SPR_BOOKE_IVOR0; + } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) { + sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32; + } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { + sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; + } else { + printf("Trying to write an unknown exception vector %d %03x\n", + sprn, sprn); + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + return; + } + + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, ivor_mask)); + tcg_gen_and_tl(tcg_ctx, t0, t0, cpu_gpr[gprn]); + tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); + gen_store_spr(tcg_ctx, sprn, t0); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void vscr_init(CPUPPCState *env, uint32_t val) +{ + /* Altivec always uses round-to-nearest */ + set_float_rounding_mode(float_round_nearest_even, &env->vec_status); + helper_mtvscr(env, val); +} + +#define spr_register_kvm(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, one_reg_id, initial_value) \ + _spr_register(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, oea_read, oea_write, initial_value) +#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, \ + one_reg_id, initial_value) \ + _spr_register(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, initial_value) + +#define spr_register(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, initial_value) \ + spr_register_kvm(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, 0, initial_value) + +#define spr_register_hv(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, \ + initial_value) \ + spr_register_kvm_hv(env, num, name, uea_read, uea_write, \ + oea_read, oea_write, hea_read, hea_write, \ + 0, initial_value) + +static inline void _spr_register(CPUPPCState *env, int num, + const char *name, + void (*uea_read)(DisasContext *ctx, + int gprn, int sprn), + void (*uea_write)(DisasContext *ctx, + int sprn, int gprn), + void (*oea_read)(DisasContext *ctx, + int gprn, int sprn), + void (*oea_write)(DisasContext *ctx, + int sprn, int gprn), + void (*hea_read)(DisasContext *opaque, + int gprn, int sprn), + void (*hea_write)(DisasContext *opaque, + int sprn, int gprn), +#if defined(CONFIG_KVM) + uint64_t one_reg_id, +#endif + target_ulong initial_value) +{ + ppc_spr_t *spr; + + spr = &env->spr_cb[num]; + if (spr->name != NULL || env->spr[num] != 0x00000000 || + spr->oea_read != NULL || spr->oea_write != NULL || + spr->uea_read != NULL || spr->uea_write != NULL) { + printf("Error: Trying to register SPR %d (%03x) twice !\n", num, num); + exit(1); + } +#if defined(PPC_DEBUG_SPR) + printf("*** register spr %d (%03x) %s val " TARGET_FMT_lx "\n", num, num, + name, initial_value); +#endif + spr->name = name; + spr->uea_read = uea_read; + spr->uea_write = uea_write; + spr->oea_read = oea_read; + spr->oea_write = oea_write; + spr->hea_read = hea_read; + spr->hea_write = hea_write; +#if defined(CONFIG_KVM) + spr->one_reg_id = one_reg_id, +#endif + env->spr[num] = spr->default_value = initial_value; +} + +/* Generic PowerPC SPRs */ +static void gen_spr_generic(CPUPPCState *env) +{ + /* Integer processing */ + spr_register(env, SPR_XER, "XER", + &spr_read_xer, &spr_write_xer, + &spr_read_xer, &spr_write_xer, + 0x00000000); + /* Branch contol */ + spr_register(env, SPR_LR, "LR", + &spr_read_lr, &spr_write_lr, + &spr_read_lr, &spr_write_lr, + 0x00000000); + spr_register(env, SPR_CTR, "CTR", + &spr_read_ctr, &spr_write_ctr, + &spr_read_ctr, &spr_write_ctr, + 0x00000000); + /* Interrupt processing */ + spr_register(env, SPR_SRR0, "SRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SRR1, "SRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Processor control */ + spr_register(env, SPR_SPRG0, "SPRG0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG1, "SPRG1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG2, "SPRG2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG3, "SPRG3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR common to all non-embedded PowerPC, including 601 */ +static void gen_spr_ne_601(CPUPPCState *env) +{ + /* Exception processing */ + spr_register_kvm(env, SPR_DSISR, "DSISR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DSISR, 0x00000000); + spr_register_kvm(env, SPR_DAR, "DAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DAR, 0x00000000); + /* Timer */ + spr_register(env, SPR_DECR, "DECR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_decr, &spr_write_decr, + 0x00000000); +} + +/* Storage Description Register 1 */ +static void gen_spr_sdr1(CPUPPCState *env) +{ + if (env->has_hv_mode) { + /* + * SDR1 is a hypervisor resource on CPUs which have a + * hypervisor mode + */ + spr_register_hv(env, SPR_SDR1, "SDR1", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_sdr1, + 0x00000000); + } else { + spr_register(env, SPR_SDR1, "SDR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_sdr1, + 0x00000000); + } +} + +/* BATs 0-3 */ +static void gen_low_BATs(CPUPPCState *env) +{ + spr_register(env, SPR_IBAT0U, "IBAT0U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT0L, "IBAT0L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_IBAT1U, "IBAT1U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT1L, "IBAT1L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_IBAT2U, "IBAT2U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT2L, "IBAT2L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_IBAT3U, "IBAT3U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatu, + 0x00000000); + spr_register(env, SPR_IBAT3L, "IBAT3L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat, &spr_write_ibatl, + 0x00000000); + spr_register(env, SPR_DBAT0U, "DBAT0U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT0L, "DBAT0L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + spr_register(env, SPR_DBAT1U, "DBAT1U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT1L, "DBAT1L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + spr_register(env, SPR_DBAT2U, "DBAT2U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT2L, "DBAT2L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + spr_register(env, SPR_DBAT3U, "DBAT3U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatu, + 0x00000000); + spr_register(env, SPR_DBAT3L, "DBAT3L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat, &spr_write_dbatl, + 0x00000000); + env->nb_BATs += 4; +} + +/* BATs 4-7 */ +static void gen_high_BATs(CPUPPCState *env) +{ + spr_register(env, SPR_IBAT4U, "IBAT4U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT4L, "IBAT4L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_IBAT5U, "IBAT5U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT5L, "IBAT5L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_IBAT6U, "IBAT6U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT6L, "IBAT6L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_IBAT7U, "IBAT7U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatu_h, + 0x00000000); + spr_register(env, SPR_IBAT7L, "IBAT7L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_ibat_h, &spr_write_ibatl_h, + 0x00000000); + spr_register(env, SPR_DBAT4U, "DBAT4U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT4L, "DBAT4L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + spr_register(env, SPR_DBAT5U, "DBAT5U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT5L, "DBAT5L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + spr_register(env, SPR_DBAT6U, "DBAT6U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT6L, "DBAT6L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + spr_register(env, SPR_DBAT7U, "DBAT7U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatu_h, + 0x00000000); + spr_register(env, SPR_DBAT7L, "DBAT7L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dbat_h, &spr_write_dbatl_h, + 0x00000000); + env->nb_BATs += 4; +} + +/* Generic PowerPC time base */ +static void gen_tbl(CPUPPCState *env) +{ + spr_register(env, SPR_VTBL, "TBL", + &spr_read_tbl, SPR_NOACCESS, + &spr_read_tbl, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_TBL, "TBL", + &spr_read_tbl, SPR_NOACCESS, + &spr_read_tbl, &spr_write_tbl, + 0x00000000); + spr_register(env, SPR_VTBU, "TBU", + &spr_read_tbu, SPR_NOACCESS, + &spr_read_tbu, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_TBU, "TBU", + &spr_read_tbu, SPR_NOACCESS, + &spr_read_tbu, &spr_write_tbu, + 0x00000000); +} + +/* Softare table search registers */ +static void gen_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) +{ + env->nb_tlb = nb_tlbs; + env->nb_ways = nb_ways; + env->id_tlbs = 1; + env->tlb_type = TLB_6XX; + spr_register(env, SPR_DMISS, "DMISS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_DCMP, "DCMP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_HASH1, "HASH1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_HASH2, "HASH2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_IMISS, "IMISS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_ICMP, "ICMP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_RPA, "RPA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR common to MPC755 and G2 */ +static void gen_spr_G2_755(CPUPPCState *env) +{ + /* SGPRs */ + spr_register(env, SPR_SPRG4, "SPRG4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG5, "SPRG5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG6, "SPRG6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG7, "SPRG7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR common to all 7xx PowerPC implementations */ +static void gen_spr_7xx(CPUPPCState *env) +{ + /* Breakpoints */ + /* XXX : not implemented */ + spr_register_kvm(env, SPR_DABR, "DABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DABR, 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_IABR, "IABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Cache management */ + /* XXX : not implemented */ + spr_register(env, SPR_ICTC, "ICTC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Performance monitors */ + /* XXX : not implemented */ + spr_register(env, SPR_7XX_MMCR0, "MMCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_MMCR1, "MMCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC1, "PMC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC2, "PMC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC3, "PMC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC4, "PMC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_SIAR, "SIAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UMMCR0, "UMMCR0", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UMMCR1, "UMMCR1", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC1, "UPMC1", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC2, "UPMC2", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC3, "UPMC3", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC4, "UPMC4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_USIAR, "USIAR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* External access control */ + /* XXX : not implemented */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +#ifdef TARGET_PPC64 +static void spr_write_amr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + + /* + * Note, the HV=1 PR=0 case is handled earlier by simply using + * spr_write_generic for HV mode in the SPR table + */ + + /* Build insertion mask into t1 based on context */ + if (ctx->pr) { + gen_load_spr(tcg_ctx, t1, SPR_UAMOR); + } else { + gen_load_spr(tcg_ctx, t1, SPR_AMOR); + } + + /* Mask new bits into t2 */ + tcg_gen_and_tl(tcg_ctx, t2, t1, cpu_gpr[gprn]); + + /* Load AMR and clear new bits in t0 */ + gen_load_spr(tcg_ctx, t0, SPR_AMR); + tcg_gen_andc_tl(tcg_ctx, t0, t0, t1); + + /* Or'in new bits and write it out */ + tcg_gen_or_tl(tcg_ctx, t0, t0, t2); + gen_store_spr(tcg_ctx, SPR_AMR, t0); + spr_store_dump_spr(SPR_AMR); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + + /* + * Note, the HV=1 case is handled earlier by simply using + * spr_write_generic for HV mode in the SPR table + */ + + /* Build insertion mask into t1 based on context */ + gen_load_spr(tcg_ctx, t1, SPR_AMOR); + + /* Mask new bits into t2 */ + tcg_gen_and_tl(tcg_ctx, t2, t1, cpu_gpr[gprn]); + + /* Load AMR and clear new bits in t0 */ + gen_load_spr(tcg_ctx, t0, SPR_UAMOR); + tcg_gen_andc_tl(tcg_ctx, t0, t0, t1); + + /* Or'in new bits and write it out */ + tcg_gen_or_tl(tcg_ctx, t0, t0, t2); + gen_store_spr(tcg_ctx, SPR_UAMOR, t0); + spr_store_dump_spr(SPR_UAMOR); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + + /* + * Note, the HV=1 case is handled earlier by simply using + * spr_write_generic for HV mode in the SPR table + */ + + /* Build insertion mask into t1 based on context */ + gen_load_spr(tcg_ctx, t1, SPR_AMOR); + + /* Mask new bits into t2 */ + tcg_gen_and_tl(tcg_ctx, t2, t1, cpu_gpr[gprn]); + + /* Load AMR and clear new bits in t0 */ + gen_load_spr(tcg_ctx, t0, SPR_IAMR); + tcg_gen_andc_tl(tcg_ctx, t0, t0, t1); + + /* Or'in new bits and write it out */ + tcg_gen_or_tl(tcg_ctx, t0, t0, t2); + gen_store_spr(tcg_ctx, SPR_IAMR, t0); + spr_store_dump_spr(SPR_IAMR); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void gen_spr_amr(CPUPPCState *env) +{ + /* + * Virtual Page Class Key protection + * + * The AMR is accessible either via SPR 13 or SPR 29. 13 is + * userspace accessible, 29 is privileged. So we only need to set + * the kvm ONE_REG id on one of them, we use 29 + */ + spr_register(env, SPR_UAMR, "UAMR", + &spr_read_generic, &spr_write_amr, + &spr_read_generic, &spr_write_amr, + 0); + spr_register_kvm_hv(env, SPR_AMR, "AMR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_amr, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_AMR, 0); + spr_register_kvm_hv(env, SPR_UAMOR, "UAMOR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_uamor, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_UAMOR, 0); + spr_register_hv(env, SPR_AMOR, "AMOR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0); +} + +static void gen_spr_iamr(CPUPPCState *env) +{ + spr_register_kvm_hv(env, SPR_IAMR, "IAMR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_iamr, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_IAMR, 0); +} +#endif /* TARGET_PPC64 */ + +static void spr_read_thrm(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_fixup_thrm(tcg_ctx, tcg_ctx->cpu_env); + gen_load_spr(tcg_ctx, cpu_gpr[gprn], sprn); + spr_load_dump_spr(tcg_ctx, sprn); +} + +static void gen_spr_thrm(CPUPPCState *env) +{ + /* Thermal management */ + /* XXX : not implemented */ + spr_register(env, SPR_THRM1, "THRM1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_thrm, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_THRM2, "THRM2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_thrm, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_THRM3, "THRM3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_thrm, &spr_write_generic, + 0x00000000); +} + +/* SPR specific to PowerPC 604 implementation */ +static void gen_spr_604(CPUPPCState *env) +{ + /* Processor identification */ + spr_register(env, SPR_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + /* Breakpoints */ + /* XXX : not implemented */ + spr_register(env, SPR_IABR, "IABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register_kvm(env, SPR_DABR, "DABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DABR, 0x00000000); + /* Performance counters */ + /* XXX : not implemented */ + spr_register(env, SPR_7XX_MMCR0, "MMCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC1, "PMC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC2, "PMC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_SIAR, "SIAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_SDA, "SDA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* External access control */ + /* XXX : not implemented */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR specific to PowerPC 603 implementation */ +static void gen_spr_603(CPUPPCState *env) +{ + /* External access control */ + /* XXX : not implemented */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Breakpoints */ + /* XXX : not implemented */ + spr_register(env, SPR_IABR, "IABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + +} + +/* SPR specific to PowerPC G2 implementation */ +static void gen_spr_G2(CPUPPCState *env) +{ + /* Memory base address */ + /* MBAR */ + /* XXX : not implemented */ + spr_register(env, SPR_MBAR, "MBAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Exception processing */ + spr_register(env, SPR_BOOKE_CSRR0, "CSRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_CSRR1, "CSRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Breakpoints */ + /* XXX : not implemented */ + spr_register(env, SPR_DABR, "DABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_DABR2, "DABR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_IABR, "IABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_IABR2, "IABR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_IBCR, "IBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_DBCR, "DBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR specific to PowerPC 602 implementation */ +static void gen_spr_602(CPUPPCState *env) +{ + /* ESA registers */ + /* XXX : not implemented */ + spr_register(env, SPR_SER, "SER", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_SEBR, "SEBR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_ESASRR, "ESASRR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Floating point status */ + /* XXX : not implemented */ + spr_register(env, SPR_SP, "SP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_LT, "LT", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Watchdog timer */ + /* XXX : not implemented */ + spr_register(env, SPR_TCR, "TCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Interrupt base */ + spr_register(env, SPR_IBR, "IBR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_IABR, "IABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR specific to PowerPC 601 implementation */ +static void gen_spr_601(CPUPPCState *env) +{ + /* Multiplication/division register */ + /* MQ */ + spr_register(env, SPR_MQ, "MQ", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* RTC registers */ + spr_register(env, SPR_601_RTCU, "RTCU", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_601_rtcu, + 0x00000000); + spr_register(env, SPR_601_VRTCU, "RTCU", + &spr_read_601_rtcu, SPR_NOACCESS, + &spr_read_601_rtcu, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_601_RTCL, "RTCL", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_601_rtcl, + 0x00000000); + spr_register(env, SPR_601_VRTCL, "RTCL", + &spr_read_601_rtcl, SPR_NOACCESS, + &spr_read_601_rtcl, SPR_NOACCESS, + 0x00000000); + /* Timer */ + spr_register(env, SPR_601_UDECR, "UDECR", + &spr_read_decr, SPR_NOACCESS, + &spr_read_decr, SPR_NOACCESS, + 0x00000000); + /* External access control */ + /* XXX : not implemented */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + spr_register(env, SPR_IBAT0U, "IBAT0U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatu, + 0x00000000); + spr_register(env, SPR_IBAT0L, "IBAT0L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatl, + 0x00000000); + spr_register(env, SPR_IBAT1U, "IBAT1U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatu, + 0x00000000); + spr_register(env, SPR_IBAT1L, "IBAT1L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatl, + 0x00000000); + spr_register(env, SPR_IBAT2U, "IBAT2U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatu, + 0x00000000); + spr_register(env, SPR_IBAT2L, "IBAT2L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatl, + 0x00000000); + spr_register(env, SPR_IBAT3U, "IBAT3U", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatu, + 0x00000000); + spr_register(env, SPR_IBAT3L, "IBAT3L", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_601_ubat, &spr_write_601_ubatl, + 0x00000000); + env->nb_BATs = 4; +} + +static void gen_spr_74xx(CPUPPCState *env) +{ + /* Processor identification */ + spr_register(env, SPR_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_74XX_MMCR2, "MMCR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_74XX_UMMCR2, "UMMCR2", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX: not implemented */ + spr_register(env, SPR_BAMR, "BAMR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MSSCR0, "MSSCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Altivec */ + spr_register(env, SPR_VRSAVE, "VRSAVE", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + /* Not strictly an SPR */ + vscr_init(env, 0x00010000); +} + +static void gen_l3_ctrl(CPUPPCState *env) +{ + /* L3CR */ + /* XXX : not implemented */ + spr_register(env, SPR_L3CR, "L3CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3ITCR0 */ + /* XXX : not implemented */ + spr_register(env, SPR_L3ITCR0, "L3ITCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3PM */ + /* XXX : not implemented */ + spr_register(env, SPR_L3PM, "L3PM", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) +{ + env->nb_tlb = nb_tlbs; + env->nb_ways = nb_ways; + env->id_tlbs = 1; + env->tlb_type = TLB_6XX; + /* XXX : not implemented */ + spr_register(env, SPR_PTEHI, "PTEHI", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_PTELO, "PTELO", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_TLBMISS, "TLBMISS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); + gen_store_spr(tcg_ctx, sprn, t0); + tcg_temp_free(tcg_ctx, t0); +} + +static void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); + gen_store_spr(tcg_ctx, sprn, t0); + tcg_temp_free(tcg_ctx, t0); +} + +static void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_booke206_tlbflush(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, sprn); + gen_helper_booke_setpid(tcg_ctx, tcg_ctx->cpu_env, t0, cpu_gpr[gprn]); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_booke_set_eplc(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_helper_booke_set_epsc(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void gen_spr_usprg3(CPUPPCState *env) +{ + spr_register(env, SPR_USPRG3, "USPRG3", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); +} + +static void gen_spr_usprgh(CPUPPCState *env) +{ + spr_register(env, SPR_USPRG4, "USPRG4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_USPRG5, "USPRG5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_USPRG6, "USPRG6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_USPRG7, "USPRG7", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); +} + +/* PowerPC BookE SPR */ +static void gen_spr_BookE(CPUPPCState *env, uint64_t ivor_mask) +{ + const char *ivor_names[64] = { + "IVOR0", "IVOR1", "IVOR2", "IVOR3", + "IVOR4", "IVOR5", "IVOR6", "IVOR7", + "IVOR8", "IVOR9", "IVOR10", "IVOR11", + "IVOR12", "IVOR13", "IVOR14", "IVOR15", + "IVOR16", "IVOR17", "IVOR18", "IVOR19", + "IVOR20", "IVOR21", "IVOR22", "IVOR23", + "IVOR24", "IVOR25", "IVOR26", "IVOR27", + "IVOR28", "IVOR29", "IVOR30", "IVOR31", + "IVOR32", "IVOR33", "IVOR34", "IVOR35", + "IVOR36", "IVOR37", "IVOR38", "IVOR39", + "IVOR40", "IVOR41", "IVOR42", "IVOR43", + "IVOR44", "IVOR45", "IVOR46", "IVOR47", + "IVOR48", "IVOR49", "IVOR50", "IVOR51", + "IVOR52", "IVOR53", "IVOR54", "IVOR55", + "IVOR56", "IVOR57", "IVOR58", "IVOR59", + "IVOR60", "IVOR61", "IVOR62", "IVOR63", + }; +#define SPR_BOOKE_IVORxx (-1) + int ivor_sprn[64] = { + SPR_BOOKE_IVOR0, SPR_BOOKE_IVOR1, SPR_BOOKE_IVOR2, SPR_BOOKE_IVOR3, + SPR_BOOKE_IVOR4, SPR_BOOKE_IVOR5, SPR_BOOKE_IVOR6, SPR_BOOKE_IVOR7, + SPR_BOOKE_IVOR8, SPR_BOOKE_IVOR9, SPR_BOOKE_IVOR10, SPR_BOOKE_IVOR11, + SPR_BOOKE_IVOR12, SPR_BOOKE_IVOR13, SPR_BOOKE_IVOR14, SPR_BOOKE_IVOR15, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVOR32, SPR_BOOKE_IVOR33, SPR_BOOKE_IVOR34, SPR_BOOKE_IVOR35, + SPR_BOOKE_IVOR36, SPR_BOOKE_IVOR37, SPR_BOOKE_IVOR38, SPR_BOOKE_IVOR39, + SPR_BOOKE_IVOR40, SPR_BOOKE_IVOR41, SPR_BOOKE_IVOR42, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, + }; + int i; + + /* Interrupt processing */ + spr_register(env, SPR_BOOKE_CSRR0, "CSRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_CSRR1, "CSRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Debug */ + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC1, "IAC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC2, "IAC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DAC1, "DAC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DAC2, "DAC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBCR0, "DBCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_40x_dbcr0, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBCR1, "DBCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBCR2, "DBCR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_DSRR0, "DSRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_DSRR1, "DSRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DBSR, "DBSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_clear, + 0x00000000); + spr_register(env, SPR_BOOKE_DEAR, "DEAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_ESR, "ESR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_IVPR, "IVPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_excp_prefix, + 0x00000000); + /* Exception vectors */ + for (i = 0; i < 64; i++) { + if (ivor_mask & (1ULL << i)) { + if (ivor_sprn[i] == SPR_BOOKE_IVORxx) { + fprintf(stderr, "ERROR: IVOR %d SPR is not defined\n", i); + exit(1); + } + spr_register(env, ivor_sprn[i], ivor_names[i], + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_excp_vector, + 0x00000000); + } + } + spr_register(env, SPR_BOOKE_PID, "PID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_pid, + 0x00000000); + spr_register(env, SPR_BOOKE_TCR, "TCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_tcr, + 0x00000000); + spr_register(env, SPR_BOOKE_TSR, "TSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_tsr, + 0x00000000); + /* Timer */ + spr_register(env, SPR_DECR, "DECR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_decr, &spr_write_decr, + 0x00000000); + spr_register(env, SPR_BOOKE_DECAR, "DECAR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_generic, + 0x00000000); + /* SPRGs */ + spr_register(env, SPR_USPRG0, "USPRG0", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG4, "SPRG4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG5, "SPRG5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG6, "SPRG6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG7, "SPRG7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_SPRG8, "SPRG8", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_SPRG9, "SPRG9", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static inline uint32_t gen_tlbncfg(uint32_t assoc, uint32_t minsize, + uint32_t maxsize, uint32_t flags, + uint32_t nentries) +{ + return (assoc << TLBnCFG_ASSOC_SHIFT) | + (minsize << TLBnCFG_MINSIZE_SHIFT) | + (maxsize << TLBnCFG_MAXSIZE_SHIFT) | + flags | nentries; +} + +/* BookE 2.06 storage control registers */ +static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask, + uint32_t *tlbncfg, uint32_t mmucfg) +{ + const char *mas_names[8] = { + "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7", + }; + int mas_sprn[8] = { + SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3, + SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7, + }; + int i; + + /* TLB assist registers */ + /* XXX : not implemented */ + for (i = 0; i < 8; i++) { + void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = + &spr_write_generic32; + if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) { + uea_write = &spr_write_generic; + } + if (mas_mask & (1 << i)) { + spr_register(env, mas_sprn[i], mas_names[i], + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, uea_write, + 0x00000000); + } + } + if (env->nb_pids > 1) { + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_PID1, "PID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_pid, + 0x00000000); + } + if (env->nb_pids > 2) { + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_PID2, "PID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_pid, + 0x00000000); + } + + spr_register(env, SPR_BOOKE_EPLC, "EPLC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_eplc, + 0x00000000); + spr_register(env, SPR_BOOKE_EPSC, "EPSC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_epsc, + 0x00000000); + + /* XXX : not implemented */ + spr_register(env, SPR_MMUCFG, "MMUCFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + mmucfg); + switch (env->nb_ways) { + case 4: + spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + tlbncfg[3]); + /* Fallthru */ + case 3: + spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + tlbncfg[2]); + /* Fallthru */ + case 2: + spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + tlbncfg[1]); + /* Fallthru */ + case 1: + spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + tlbncfg[0]); + /* Fallthru */ + case 0: + default: + break; + } + + gen_spr_usprgh(env); +} + +/* SPR specific to PowerPC 440 implementation */ +static void gen_spr_440(CPUPPCState *env) +{ + /* Cache control */ + /* XXX : not implemented */ + spr_register(env, SPR_440_DNV0, "DNV0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DNV1, "DNV1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DNV2, "DNV2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DNV3, "DNV3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DTV0, "DTV0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DTV1, "DTV1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DTV2, "DTV2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DTV3, "DTV3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DVLIM, "DVLIM", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_INV0, "INV0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_INV1, "INV1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_INV2, "INV2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_INV3, "INV3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_ITV0, "ITV0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_ITV1, "ITV1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_ITV2, "ITV2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_ITV3, "ITV3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_IVLIM, "IVLIM", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Cache debug */ + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DCDBTRH, "DCDBTRH", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DCDBTRL, "DCDBTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_ICDBTRH, "ICDBTRH", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_ICDBTRL, "ICDBTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_DBDR, "DBDR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Processor control */ + spr_register(env, SPR_4xx_CCR0, "CCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_440_RSTCFG, "RSTCFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* Storage control */ + spr_register(env, SPR_440_MMUCR, "MMUCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR shared between PowerPC 40x implementations */ +static void gen_spr_40x(CPUPPCState *env) +{ + /* Cache */ + /* not emulated, as QEMU do not emulate caches */ + spr_register(env, SPR_40x_DCCR, "DCCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* not emulated, as QEMU do not emulate caches */ + spr_register(env, SPR_40x_ICCR, "ICCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* not emulated, as QEMU do not emulate caches */ + spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* Exception */ + spr_register(env, SPR_40x_DEAR, "DEAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_40x_ESR, "ESR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_40x_EVPR, "EVPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_excp_prefix, + 0x00000000); + spr_register(env, SPR_40x_SRR2, "SRR2", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_40x_SRR3, "SRR3", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Timers */ + spr_register(env, SPR_40x_PIT, "PIT", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_40x_pit, &spr_write_40x_pit, + 0x00000000); + spr_register(env, SPR_40x_TCR, "TCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_tcr, + 0x00000000); + spr_register(env, SPR_40x_TSR, "TSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_tsr, + 0x00000000); +} + +/* SPR specific to PowerPC 405 implementation */ +static void gen_spr_405(CPUPPCState *env) +{ + /* MMU */ + spr_register(env, SPR_40x_PID, "PID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_4xx_CCR0, "CCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00700000); + /* Debug interface */ + /* XXX : not implemented */ + spr_register(env, SPR_40x_DBCR0, "DBCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_40x_dbcr0, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_405_DBCR1, "DBCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_DBSR, "DBSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_clear, + /* Last reset was system reset */ + 0x00000300); + /* XXX : not implemented */ + spr_register(env, SPR_40x_DAC1, "DAC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_40x_DAC2, "DAC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_405_DVC1, "DVC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_405_DVC2, "DVC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_IAC1, "IAC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_40x_IAC2, "IAC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_405_IAC3, "IAC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_405_IAC4, "IAC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Storage control */ + /* XXX: TODO: not implemented */ + spr_register(env, SPR_405_SLER, "SLER", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_40x_sler, + 0x00000000); + spr_register(env, SPR_40x_ZPR, "ZPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_405_SU0R, "SU0R", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* SPRG */ + spr_register(env, SPR_USPRG0, "USPRG0", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG4, "SPRG4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG5, "SPRG5", + SPR_NOACCESS, SPR_NOACCESS, + spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG6, "SPRG6", + SPR_NOACCESS, SPR_NOACCESS, + spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_SPRG7, "SPRG7", + SPR_NOACCESS, SPR_NOACCESS, + spr_read_generic, &spr_write_generic, + 0x00000000); + gen_spr_usprgh(env); +} + +/* SPR shared between PowerPC 401 & 403 implementations */ +static void gen_spr_401_403(CPUPPCState *env) +{ + /* Time base */ + spr_register(env, SPR_403_VTBL, "TBL", + &spr_read_tbl, SPR_NOACCESS, + &spr_read_tbl, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_403_TBL, "TBL", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_tbl, + 0x00000000); + spr_register(env, SPR_403_VTBU, "TBU", + &spr_read_tbu, SPR_NOACCESS, + &spr_read_tbu, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_403_TBU, "TBU", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_tbu, + 0x00000000); + /* Debug */ + /* not emulated, as QEMU do not emulate caches */ + spr_register(env, SPR_403_CDBCR, "CDBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR specific to PowerPC 401 implementation */ +static void gen_spr_401(CPUPPCState *env) +{ + /* Debug interface */ + /* XXX : not implemented */ + spr_register(env, SPR_40x_DBCR0, "DBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_40x_dbcr0, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_DBSR, "DBSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_clear, + /* Last reset was system reset */ + 0x00000300); + /* XXX : not implemented */ + spr_register(env, SPR_40x_DAC1, "DAC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_IAC1, "IAC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Storage control */ + /* XXX: TODO: not implemented */ + spr_register(env, SPR_405_SLER, "SLER", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_40x_sler, + 0x00000000); + /* not emulated, as QEMU never does speculative access */ + spr_register(env, SPR_40x_SGR, "SGR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0xFFFFFFFF); + /* not emulated, as QEMU do not emulate caches */ + spr_register(env, SPR_40x_DCWR, "DCWR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_401x2(CPUPPCState *env) +{ + gen_spr_401(env); + spr_register(env, SPR_40x_PID, "PID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_40x_ZPR, "ZPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR specific to PowerPC 403 implementation */ +static void gen_spr_403(CPUPPCState *env) +{ + /* Debug interface */ + /* XXX : not implemented */ + spr_register(env, SPR_40x_DBCR0, "DBCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_40x_dbcr0, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_DBSR, "DBSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_clear, + /* Last reset was system reset */ + 0x00000300); + /* XXX : not implemented */ + spr_register(env, SPR_40x_DAC1, "DAC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_DAC2, "DAC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_IAC1, "IAC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_40x_IAC2, "IAC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_403_real(CPUPPCState *env) +{ + spr_register(env, SPR_403_PBL1, "PBL1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_403_pbr, &spr_write_403_pbr, + 0x00000000); + spr_register(env, SPR_403_PBU1, "PBU1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_403_pbr, &spr_write_403_pbr, + 0x00000000); + spr_register(env, SPR_403_PBL2, "PBL2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_403_pbr, &spr_write_403_pbr, + 0x00000000); + spr_register(env, SPR_403_PBU2, "PBU2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_403_pbr, &spr_write_403_pbr, + 0x00000000); +} + +static void gen_spr_403_mmu(CPUPPCState *env) +{ + /* MMU */ + spr_register(env, SPR_40x_PID, "PID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_40x_ZPR, "ZPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +/* SPR specific to PowerPC compression coprocessor extension */ +static void gen_spr_compress(CPUPPCState *env) +{ + /* XXX : not implemented */ + spr_register(env, SPR_401_SKR, "SKR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +#if defined(TODO_USER_ONLY) +static void gen_spr_5xx_8xx(CPUPPCState *env) +{ + /* Exception processing */ + spr_register_kvm(env, SPR_DSISR, "DSISR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DSISR, 0x00000000); + spr_register_kvm(env, SPR_DAR, "DAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DAR, 0x00000000); + /* Timer */ + spr_register(env, SPR_DECR, "DECR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_decr, &spr_write_decr, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_EIE, "EIE", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_EID, "EID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_NRI, "NRI", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPA, "CMPA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPB, "CMPB", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPC, "CMPC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPD, "CMPD", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_ECR, "ECR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_DER, "DER", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_COUNTA, "COUNTA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_COUNTB, "COUNTB", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPE, "CMPE", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPF, "CMPF", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPG, "CMPG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_CMPH, "CMPH", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_LCTRL1, "LCTRL1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_LCTRL2, "LCTRL2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_BAR, "BAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_DPDR, "DPDR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_IMMR, "IMMR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_5xx(CPUPPCState *env) +{ + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_GRA, "L2U_GRA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RPCU_BBCMCR, "L2U_BBCMCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_MCR, "L2U_MCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA0, "MI_RBA0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA1, "MI_RBA1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA2, "MI_RBA2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RBA3, "MI_RBA3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA0, "L2U_RBA0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA1, "L2U_RBA1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA2, "L2U_RBA2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RBA3, "L2U_RBA3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA0, "MI_RA0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA1, "MI_RA1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA2, "MI_RA2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_MI_RA3, "MI_RA3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA0, "L2U_RA0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA1, "L2U_RA1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA2, "L2U_RA2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_L2U_RA3, "L2U_RA3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_RCPU_FPECR, "FPECR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_8xx(CPUPPCState *env) +{ + /* XXX : not implemented */ + spr_register(env, SPR_MPC_IC_CST, "IC_CST", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_IC_ADR, "IC_ADR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_IC_DAT, "IC_DAT", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_DC_CST, "DC_CST", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_DC_ADR, "DC_ADR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_DC_DAT, "DC_DAT", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_CTR, "MI_CTR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_AP, "MI_AP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_EPN, "MI_EPN", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_TWC, "MI_TWC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_RPN, "MI_RPN", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_DBCAM, "MI_DBCAM", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_DBRAM0, "MI_DBRAM0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MI_DBRAM1, "MI_DBRAM1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_CTR, "MD_CTR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_CASID, "MD_CASID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_AP, "MD_AP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_EPN, "MD_EPN", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_TWB, "MD_TWB", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_TWC, "MD_TWC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_RPN, "MD_RPN", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_TW, "MD_TW", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_DBCAM, "MD_DBCAM", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_DBRAM0, "MD_DBRAM0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MPC_MD_DBRAM1, "MD_DBRAM1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} +#endif + +/* + * AMR => SPR 29 (Power 2.04) + * CTRL => SPR 136 (Power 2.04) + * CTRL => SPR 152 (Power 2.04) + * SCOMC => SPR 276 (64 bits ?) + * SCOMD => SPR 277 (64 bits ?) + * TBU40 => SPR 286 (Power 2.04 hypv) + * HSPRG0 => SPR 304 (Power 2.04 hypv) + * HSPRG1 => SPR 305 (Power 2.04 hypv) + * HDSISR => SPR 306 (Power 2.04 hypv) + * HDAR => SPR 307 (Power 2.04 hypv) + * PURR => SPR 309 (Power 2.04 hypv) + * HDEC => SPR 310 (Power 2.04 hypv) + * HIOR => SPR 311 (hypv) + * RMOR => SPR 312 (970) + * HRMOR => SPR 313 (Power 2.04 hypv) + * HSRR0 => SPR 314 (Power 2.04 hypv) + * HSRR1 => SPR 315 (Power 2.04 hypv) + * LPIDR => SPR 317 (970) + * EPR => SPR 702 (Power 2.04 emb) + * perf => 768-783 (Power 2.04) + * perf => 784-799 (Power 2.04) + * PPR => SPR 896 (Power 2.04) + * DABRX => 1015 (Power 2.04 hypv) + * FPECR => SPR 1022 (?) + * ... and more (thermal management, performance counters, ...) + */ + +/*****************************************************************************/ +/* Exception vectors models */ +static void init_excp_4xx_real(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010; + env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020; + env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000; + env->ivor_mask = 0x0000FFF0UL; + env->ivpr_mask = 0xFFFF0000UL; + /* Hardware reset vector */ + env->hreset_vector = 0xFFFFFFFCUL; +} + +static void init_excp_4xx_softmmu(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010; + env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020; + env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001100; + env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001200; + env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000; + env->ivor_mask = 0x0000FFF0UL; + env->ivpr_mask = 0xFFFF0000UL; + /* Hardware reset vector */ + env->hreset_vector = 0xFFFFFFFCUL; +} + +#if defined(TODO_USER_ONLY) +static void init_excp_MPC5xx(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00; + env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00; + env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00; + env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00; + env->ivor_mask = 0x0000FFF0UL; + env->ivpr_mask = 0xFFFF0000UL; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_MPC8xx(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00; + env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001100; + env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001200; + env->excp_vectors[POWERPC_EXCP_ITLBE] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_DTLBE] = 0x00001400; + env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00; + env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00; + env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00; + env->ivor_mask = 0x0000FFF0UL; + env->ivpr_mask = 0xFFFF0000UL; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} +#endif + +static void init_excp_G2(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000A00; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; + env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_e200(CPUPPCState *env, target_ulong ivpr_mask) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000FFC; + env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_SPEU] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_EFPDI] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000; + env->ivor_mask = 0x0000FFF7UL; + env->ivpr_mask = ivpr_mask; + /* Hardware reset vector */ + env->hreset_vector = 0xFFFFFFFCUL; +} + +static void init_excp_BookE(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000; + env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000; + env->ivor_mask = 0x0000FFF0UL; + env->ivpr_mask = 0xFFFF0000UL; + /* Hardware reset vector */ + env->hreset_vector = 0xFFFFFFFCUL; +} + +static void init_excp_601(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_602(CPUPPCState *env) +{ + /* XXX: exception prefix has a special behavior on 602 */ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; + env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500; + env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_603(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; + env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_604(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_7x0(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_750cl(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_750cx(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +/* XXX: Check if this is correct */ +static void init_excp_7x5(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; + env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_7400(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600; + env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +static void init_excp_7450(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; + env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000; + env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100; + env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; + env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600; + /* Hardware reset vector */ + env->hreset_vector = 0x00000100UL; +} + +#if defined(TARGET_PPC64) +static void init_excp_970(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; + env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; + env->excp_vectors[POWERPC_EXCP_MAINT] = 0x00001600; + env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001700; + env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001800; + /* Hardware reset vector */ + env->hreset_vector = 0x0000000000000100ULL; +} + +static void init_excp_POWER7(CPUPPCState *env) +{ + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; + env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; + env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; + env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; + env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980; + env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; + env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; + env->excp_vectors[POWERPC_EXCP_HDSI] = 0x00000E00; + env->excp_vectors[POWERPC_EXCP_HISI] = 0x00000E20; + env->excp_vectors[POWERPC_EXCP_HV_EMU] = 0x00000E40; + env->excp_vectors[POWERPC_EXCP_HV_MAINT] = 0x00000E60; + env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; + env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; + env->excp_vectors[POWERPC_EXCP_VSXU] = 0x00000F40; + /* Hardware reset vector */ + env->hreset_vector = 0x0000000000000100ULL; +} + +static void init_excp_POWER8(CPUPPCState *env) +{ + init_excp_POWER7(env); + + env->excp_vectors[POWERPC_EXCP_SDOOR] = 0x00000A00; + env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60; + env->excp_vectors[POWERPC_EXCP_HV_FU] = 0x00000F80; + env->excp_vectors[POWERPC_EXCP_SDOOR_HV] = 0x00000E80; +} + +static void init_excp_POWER9(CPUPPCState *env) +{ + init_excp_POWER8(env); + + env->excp_vectors[POWERPC_EXCP_HVIRT] = 0x00000EA0; +} + +static void init_excp_POWER10(CPUPPCState *env) +{ + init_excp_POWER9(env); +} + +#endif + +/*****************************************************************************/ +/* Power management enable checks */ +static int check_pow_none(CPUPPCState *env) +{ + return 0; +} + +static int check_pow_nocheck(CPUPPCState *env) +{ + return 1; +} + +static int check_pow_hid0(CPUPPCState *env) +{ + if (env->spr[SPR_HID0] & 0x00E00000) { + return 1; + } + + return 0; +} + +static int check_pow_hid0_74xx(CPUPPCState *env) +{ + if (env->spr[SPR_HID0] & 0x00600000) { + return 1; + } + + return 0; +} + +static bool ppc_cpu_interrupts_big_endian_always(PowerPCCPU *cpu) +{ + return true; +} + +#ifdef TARGET_PPC64 +static bool ppc_cpu_interrupts_big_endian_lpcr(PowerPCCPU *cpu) +{ + return !(cpu->env.spr[SPR_LPCR] & LPCR_ILE); +} +#endif + +/*****************************************************************************/ +/* PowerPC implementations definitions */ + +#define POWERPC_FAMILY_NAME(_name) \ + glue(glue(ppc_, _name), _cpu_family_class_init) + +#define POWERPC_FAMILY(_name) \ + static void \ + glue(glue(ppc_, _name), _cpu_family_class_init)(CPUClass *, void *); \ + \ + static void glue(glue(ppc_, _name), _cpu_family_class_init) + +static void init_proc_401(CPUPPCState *env) +{ + gen_spr_40x(env); + gen_spr_401_403(env); + gen_spr_401(env); + init_excp_4xx_real(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(12, 16, 20, 24); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +POWERPC_FAMILY(401)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 401"; + pcc->init_proc = init_proc_401; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_WRTEE | PPC_DCR | + PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | + PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_4xx_COMMON | PPC_40x_EXCP; + pcc->msr_mask = (1ull << MSR_KEY) | + (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_ME) | + (1ull << MSR_DE) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_REAL; + pcc->excp_model = POWERPC_EXCP_40x; + pcc->bus_model = PPC_FLAGS_INPUT_401; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_401x2(CPUPPCState *env) +{ + gen_spr_40x(env); + gen_spr_401_403(env); + gen_spr_401x2(env); + gen_spr_compress(env); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + init_excp_4xx_softmmu(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(12, 16, 20, 24); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +POWERPC_FAMILY(401x2)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 401x2"; + pcc->init_proc = init_proc_401x2; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_DCR | PPC_WRTEE | + PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | + PPC_4xx_COMMON | PPC_40x_EXCP; + pcc->msr_mask = (1ull << 20) | + (1ull << MSR_KEY) | + (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_ME) | + (1ull << MSR_DE) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; + pcc->excp_model = POWERPC_EXCP_40x; + pcc->bus_model = PPC_FLAGS_INPUT_401; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | + POWERPC_FLAG_BUS_CLK; +} + +#if 0 +static void init_proc_401x3(CPUPPCState *env) +{ + gen_spr_40x(env); + gen_spr_401_403(env); + gen_spr_401(env); + gen_spr_401x2(env); + gen_spr_compress(env); + init_excp_4xx_softmmu(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(12, 16, 20, 24); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +POWERPC_FAMILY(401x3)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 401x3"; + pcc->init_proc = init_proc_401x3; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_DCR | PPC_WRTEE | + PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | + PPC_4xx_COMMON | PPC_40x_EXCP; + pcc->msr_mask = (1ull << 20) | + (1ull << MSR_KEY) | + (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_ME) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; + pcc->excp_model = POWERPC_EXCP_40x; + pcc->bus_model = PPC_FLAGS_INPUT_401; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | + POWERPC_FLAG_BUS_CLK; +} +#endif + +static void init_proc_IOP480(CPUPPCState *env) +{ + gen_spr_40x(env); + gen_spr_401_403(env); + gen_spr_401x2(env); + gen_spr_compress(env); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + init_excp_4xx_softmmu(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(8, 12, 16, 20); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +POWERPC_FAMILY(IOP480)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "IOP480"; + pcc->init_proc = init_proc_IOP480; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_DCR | PPC_WRTEE | + PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | + PPC_4xx_COMMON | PPC_40x_EXCP; + pcc->msr_mask = (1ull << 20) | + (1ull << MSR_KEY) | + (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_ME) | + (1ull << MSR_DE) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; + pcc->excp_model = POWERPC_EXCP_40x; + pcc->bus_model = PPC_FLAGS_INPUT_401; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_403(CPUPPCState *env) +{ + gen_spr_40x(env); + gen_spr_401_403(env); + gen_spr_403(env); + gen_spr_403_real(env); + init_excp_4xx_real(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(8, 12, 16, 20); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +POWERPC_FAMILY(403)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 403"; + pcc->init_proc = init_proc_403; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_DCR | PPC_WRTEE | + PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | + PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_4xx_COMMON | PPC_40x_EXCP; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_ME) | + (1ull << MSR_PE) | + (1ull << MSR_PX) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_REAL; + pcc->excp_model = POWERPC_EXCP_40x; + pcc->bus_model = PPC_FLAGS_INPUT_401; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_403GCX(CPUPPCState *env) +{ + gen_spr_40x(env); + gen_spr_401_403(env); + gen_spr_403(env); + gen_spr_403_real(env); + gen_spr_403_mmu(env); + /* Bus access control */ + /* not emulated, as QEMU never does speculative access */ + spr_register(env, SPR_40x_SGR, "SGR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0xFFFFFFFF); + /* not emulated, as QEMU do not emulate caches */ + spr_register(env, SPR_40x_DCWR, "DCWR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + init_excp_4xx_softmmu(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(8, 12, 16, 20); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +POWERPC_FAMILY(403GCX)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 403 GCX"; + pcc->init_proc = init_proc_403GCX; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_DCR | PPC_WRTEE | + PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | + PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | + PPC_4xx_COMMON | PPC_40x_EXCP; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_ME) | + (1ull << MSR_PE) | + (1ull << MSR_PX) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z; + pcc->excp_model = POWERPC_EXCP_40x; + pcc->bus_model = PPC_FLAGS_INPUT_401; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_405(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_40x(env); + gen_spr_405(env); + /* Bus access control */ + /* not emulated, as QEMU never does speculative access */ + spr_register(env, SPR_40x_SGR, "SGR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0xFFFFFFFF); + /* not emulated, as QEMU do not emulate caches */ + spr_register(env, SPR_40x_DCWR, "DCWR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + + init_excp_4xx_softmmu(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(8, 12, 16, 20); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +POWERPC_FAMILY(405)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 405"; + pcc->init_proc = init_proc_405; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_DCR | PPC_WRTEE | + PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC | + PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_SOFT_4xx; + pcc->excp_model = POWERPC_EXCP_40x; + pcc->bus_model = PPC_FLAGS_INPUT_405; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | + POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_440EP(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_BookE(env, 0x000000000000FFFFULL); + gen_spr_440(env); + gen_spr_usprgh(env); + /* Processor identification */ + spr_register(env, SPR_BOOKE_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC3, "IAC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC4, "IAC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC1, "DVC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC2, "DVC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_MCSR, "MCSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_CCR1, "CCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + + init_excp_BookE(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(12, 16, 20, 24); + SET_WDT_PERIOD(20, 24, 28, 32); +} + +POWERPC_FAMILY(440EP)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 440 EP"; + pcc->init_proc = init_proc_440EP; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_DCR | PPC_WRTEE | PPC_RFMCI | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_MFTB | + PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | + PPC_440_SPEC; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | + POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; +} + +POWERPC_FAMILY(460EX)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 460 EX"; + pcc->init_proc = init_proc_440EP; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_RFMCI | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_MFTB | + PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | + PPC_440_SPEC; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | + POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; +} + +#if defined(TODO_USER_ONLY) +static void init_proc_440GP(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_BookE(env, 0x000000000000FFFFULL); + gen_spr_440(env); + gen_spr_usprgh(env); + /* Processor identification */ + spr_register(env, SPR_BOOKE_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC3, "IAC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC4, "IAC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC1, "DVC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC2, "DVC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + + init_excp_BookE(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* XXX: TODO: allocate internal IRQ controller */ + + SET_FIT_PERIOD(12, 16, 20, 24); + SET_WDT_PERIOD(20, 24, 28, 32); +} + +POWERPC_FAMILY(440GP)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 440 GP"; + pcc->init_proc = init_proc_440GP; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_MFAPIDI | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_TLBIVA | PPC_MFTB | + PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | + PPC_440_SPEC; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | + POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; +} +#endif + +#if 0 +static void init_proc_440x4(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_BookE(env, 0x000000000000FFFFULL); + gen_spr_440(env); + gen_spr_usprgh(env); + /* Processor identification */ + spr_register(env, SPR_BOOKE_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC3, "IAC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC4, "IAC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC1, "DVC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC2, "DVC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + + init_excp_BookE(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* XXX: TODO: allocate internal IRQ controller */ + + SET_FIT_PERIOD(12, 16, 20, 24); + SET_WDT_PERIOD(20, 24, 28, 32); +} + +POWERPC_FAMILY(440x4)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 440x4"; + pcc->init_proc = init_proc_440x4; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_DCR | PPC_WRTEE | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_MFTB | + PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | + PPC_440_SPEC; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | + POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; +} +#endif + +static void init_proc_440x5(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_BookE(env, 0x000000000000FFFFULL); + gen_spr_440(env); + gen_spr_usprgh(env); + /* Processor identification */ + spr_register(env, SPR_BOOKE_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC3, "IAC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC4, "IAC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC1, "DVC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_DVC2, "DVC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_MCSR, "MCSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_440_CCR1, "CCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + + init_excp_BookE(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(12, 16, 20, 24); + SET_WDT_PERIOD(20, 24, 28, 32); +} + +POWERPC_FAMILY(440x5)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 440x5"; + pcc->init_proc = init_proc_440x5; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_DCR | PPC_WRTEE | PPC_RFMCI | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_MFTB | + PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | + PPC_440_SPEC; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | + POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; +} + +POWERPC_FAMILY(440x5wDFPU)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 440x5 with double precision FPU"; + pcc->init_proc = init_proc_440x5; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_FLOAT | PPC_FLOAT_FSQRT | + PPC_FLOAT_STFIWX | + PPC_DCR | PPC_WRTEE | PPC_RFMCI | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_MFTB | + PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC | + PPC_440_SPEC; + pcc->insns_flags2 = PPC2_FP_CVT_S64; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE | + POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; +} + +#if defined(TODO_USER_ONLY) +static void init_proc_MPC5xx(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_5xx_8xx(env); + gen_spr_5xx(env); + init_excp_MPC5xx(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* XXX: TODO: allocate internal IRQ controller */ +} + +POWERPC_FAMILY(MPC5xx)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "Freescale 5xx cores (aka RCPU)"; + pcc->init_proc = init_proc_MPC5xx; + pcc->check_pow = check_pow_none; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_MEM_EIEIO | PPC_MEM_SYNC | + PPC_CACHE_ICBI | PPC_FLOAT | PPC_FLOAT_STFIWX | + PPC_MFTB; + pcc->msr_mask = (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_REAL; + pcc->excp_model = POWERPC_EXCP_603; + pcc->bus_model = PPC_FLAGS_INPUT_RCPU; + pcc->bfd_mach = bfd_mach_ppc_505; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_BUS_CLK; +} +#endif + +#if defined(TODO_USER_ONLY) +static void init_proc_MPC8xx(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_5xx_8xx(env); + gen_spr_8xx(env); + init_excp_MPC8xx(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* XXX: TODO: allocate internal IRQ controller */ +} + +POWERPC_FAMILY(MPC8xx)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "Freescale 8xx cores (aka PowerQUICC)"; + pcc->init_proc = init_proc_MPC8xx; + pcc->check_pow = check_pow_none; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | + PPC_MEM_EIEIO | PPC_MEM_SYNC | + PPC_CACHE_ICBI | PPC_MFTB; + pcc->msr_mask = (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_MPC8xx; + pcc->excp_model = POWERPC_EXCP_603; + pcc->bus_model = PPC_FLAGS_INPUT_RCPU; + pcc->bfd_mach = bfd_mach_ppc_860; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_BUS_CLK; +} +#endif + +/* Freescale 82xx cores (aka PowerQUICC-II) */ + +static void init_proc_G2(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_G2_755(env); + gen_spr_G2(env); + /* Time base */ + gen_tbl(env); + /* External access control */ + /* XXX : not implemented */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation register */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_G2(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(G2)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC G2"; + pcc->init_proc = init_proc_G2; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_TGPR) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_AL) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_G2; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_ec603e; + pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_G2LE(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_G2_755(env); + gen_spr_G2(env); + /* Time base */ + gen_tbl(env); + /* External access control */ + /* XXX : not implemented */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation register */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_G2(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(G2LE)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC G2LE"; + pcc->init_proc = init_proc_G2LE; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_TGPR) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_AL) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_G2; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_ec603e; + pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_e200(CPUPPCState *env) +{ + /* Time base */ + gen_tbl(env); + gen_spr_BookE(env, 0x000000070000FFFFULL); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR", + &spr_read_spefscr, &spr_write_spefscr, + &spr_read_spefscr, &spr_write_spefscr, + 0x00000000); + /* Memory management */ + gen_spr_BookE206(env, 0x0000005D, NULL, 0); + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_ALTCTXCR, "ALTCTXCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BUCSR, "BUCSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_CTXCR, "CTXCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_DBCNT, "DBCNT", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_DBCR3, "DBCR3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0", + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1FINV0, "L1FINV0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC3, "IAC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_IAC4, "IAC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MMUCSR0, "MMUCSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); /* TOFIX */ + spr_register(env, SPR_BOOKE_DSRR0, "DSRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_DSRR1, "DSRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; + + init_excp_e200(env, 0xFFFF0000UL); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* XXX: TODO: allocate internal IRQ controller */ +} + +POWERPC_FAMILY(e200)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "e200 core"; + pcc->init_proc = init_proc_e200; + pcc->check_pow = check_pow_hid0; + /* + * XXX: unimplemented instructions: + * dcblc + * dcbtlst + * dcbtstls + * icblc + * icbtls + * tlbivax + * all SPE multiply-accumulate instructions + */ + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | + PPC_SPE | PPC_SPE_SINGLE | + PPC_WRTEE | PPC_RFDI | + PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_TLBIVAX | + PPC_BOOKE; + pcc->msr_mask = (1ull << MSR_UCLE) | + (1ull << MSR_SPE) | + (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE206; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_860; + pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE | + POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_e300(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_603(env); + /* Time base */ + gen_tbl(env); + /* hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Breakpoints */ + /* XXX : not implemented */ + spr_register(env, SPR_DABR, "DABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_DABR2, "DABR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_IABR2, "IABR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_IBCR, "IBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_DBCR, "DBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_603(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(e300)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "e300 core"; + pcc->init_proc = init_proc_e300; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_TGPR) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_AL) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_603; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_603; + pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; +} + +static void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv val = tcg_temp_new(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, val, cpu_gpr[gprn]); + gen_store_spr(tcg_ctx, SPR_BOOKE_MAS3, val); + tcg_gen_shri_tl(tcg_ctx, val, cpu_gpr[gprn], 32); + gen_store_spr(tcg_ctx, SPR_BOOKE_MAS7, val); + tcg_temp_free(tcg_ctx, val); +} + +static void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv mas7 = tcg_temp_new(tcg_ctx); + TCGv mas3 = tcg_temp_new(tcg_ctx); + gen_load_spr(tcg_ctx, mas7, SPR_BOOKE_MAS7); + tcg_gen_shli_tl(tcg_ctx, mas7, mas7, 32); + gen_load_spr(tcg_ctx, mas3, SPR_BOOKE_MAS3); + tcg_gen_or_tl(tcg_ctx, cpu_gpr[gprn], mas3, mas7); + tcg_temp_free(tcg_ctx, mas3); + tcg_temp_free(tcg_ctx, mas7); +} + +enum fsl_e500_version { + fsl_e500v1, + fsl_e500v2, + fsl_e500mc, + fsl_e5500, + fsl_e6500, +}; + +static void init_proc_e500(CPUPPCState *env, int version) +{ + uint32_t tlbncfg[2]; + uint64_t ivor_mask; + uint64_t ivpr_mask = 0xFFFF0000ULL; + uint32_t l1cfg0 = 0x3800 /* 8 ways */ + | 0x0020; /* 32 kb */ + uint32_t l1cfg1 = 0x3800 /* 8 ways */ + | 0x0020; /* 32 kb */ + uint32_t mmucfg = 0; + int i; + + /* Time base */ + gen_tbl(env); + /* + * XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't + * complain when accessing them. + * gen_spr_BookE(env, 0x0000000F0000FD7FULL); + */ + switch (version) { + case fsl_e500v1: + case fsl_e500v2: + default: + ivor_mask = 0x0000000F0000FFFFULL; + break; + case fsl_e500mc: + case fsl_e5500: + ivor_mask = 0x000003FE0000FFFFULL; + break; + case fsl_e6500: + ivor_mask = 0x000003FF0000FFFFULL; + break; + } + gen_spr_BookE(env, ivor_mask); + gen_spr_usprg3(env); + /* Processor identification */ + spr_register(env, SPR_BOOKE_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR", + &spr_read_spefscr, &spr_write_spefscr, + &spr_read_spefscr, &spr_write_spefscr, + 0x00000000); + /* Memory management */ + env->nb_pids = 3; + env->nb_ways = 2; + env->id_tlbs = 0; + switch (version) { + case fsl_e500v1: + tlbncfg[0] = gen_tlbncfg(2, 1, 1, 0, 256); + tlbncfg[1] = gen_tlbncfg(16, 1, 9, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); + break; + case fsl_e500v2: + tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); + tlbncfg[1] = gen_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16); + break; + case fsl_e500mc: + case fsl_e5500: + tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512); + tlbncfg[1] = gen_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64); + break; + case fsl_e6500: + mmucfg = 0x6510B45; + env->nb_pids = 1; + tlbncfg[0] = 0x08052400; + tlbncfg[1] = 0x40028040; + break; + default: + cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n", + env->spr[SPR_PVR]); + } + + /* Cache sizes */ + switch (version) { + case fsl_e500v1: + case fsl_e500v2: + env->dcache_line_size = 32; + env->icache_line_size = 32; + break; + case fsl_e500mc: + case fsl_e5500: + env->dcache_line_size = 64; + env->icache_line_size = 64; + l1cfg0 |= 0x1000000; /* 64 byte cache block size */ + l1cfg1 |= 0x1000000; /* 64 byte cache block size */ + break; + case fsl_e6500: + env->dcache_line_size = 32; + env->icache_line_size = 32; + l1cfg0 |= 0x0F83820; + l1cfg1 |= 0x0B83820; + break; + default: + cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n", + env->spr[SPR_PVR]); + } + gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg); + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BBEAR, "BBEAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BBTAR, "BBTAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_MCAR, "MCAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_BOOKE_MCSR, "MCSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_NPIDR, "NPIDR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_BUCSR, "BUCSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0", + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + l1cfg0); + spr_register(env, SPR_Exxx_L1CFG1, "L1CFG1", + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + l1cfg1); + spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_e500_l1csr0, + 0x00000000); + spr_register(env, SPR_Exxx_L1CSR1, "L1CSR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_e500_l1csr1, + 0x00000000); + spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_MMUCSR0, "MMUCSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke206_mmucsr0, + 0x00000000); + spr_register(env, SPR_BOOKE_EPR, "EPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + /* XXX better abstract into Emb.xxx features */ + if ((version == fsl_e5500) || (version == fsl_e6500)) { + spr_register(env, SPR_BOOKE_EPCR, "EPCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BOOKE_MAS7_MAS3, "MAS7_MAS3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_mas73, &spr_write_mas73, + 0x00000000); + ivpr_mask = (target_ulong)~0xFFFFULL; + } + + if (version == fsl_e6500) { + /* Thread identification */ + spr_register(env, SPR_TIR, "TIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_BOOKE_TLB0PS, "TLB0PS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000004); + spr_register(env, SPR_BOOKE_TLB1PS, "TLB1PS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x7FFFFFFC); + } + + env->nb_tlb = 0; + env->tlb_type = TLB_MAS; + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + env->nb_tlb += booke206_tlb_size(env, i); + } + + init_excp_e200(env, ivpr_mask); + /* Allocate hardware IRQ controller */ + ppce500_irq_init(env_archcpu(env)); +} + +static void init_proc_e500v1(CPUPPCState *env) +{ + init_proc_e500(env, fsl_e500v1); +} + +POWERPC_FAMILY(e500v1)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "e500v1 core"; + pcc->init_proc = init_proc_e500v1; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | + PPC_SPE | PPC_SPE_SINGLE | + PPC_WRTEE | PPC_RFDI | + PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC; + pcc->insns_flags2 = PPC2_BOOKE206; + pcc->msr_mask = (1ull << MSR_UCLE) | + (1ull << MSR_SPE) | + (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_BOOKE206; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_860; + pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE | + POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_e500v2(CPUPPCState *env) +{ + init_proc_e500(env, fsl_e500v2); +} + +POWERPC_FAMILY(e500v2)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "e500v2 core"; + pcc->init_proc = init_proc_e500v2; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | + PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE | + PPC_WRTEE | PPC_RFDI | + PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC; + pcc->insns_flags2 = PPC2_BOOKE206; + pcc->msr_mask = (1ull << MSR_UCLE) | + (1ull << MSR_SPE) | + (1ull << MSR_POW) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DWE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR); +#if 0 + pcc->mmu_model = POWERPC_MMU_BOOKE206; +#else + /* disable mmu */ + pcc->mmu_model = POWERPC_MMU_REAL; +#endif + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_860; + pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE | + POWERPC_FLAG_UBLE | POWERPC_FLAG_DE | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_e500mc(CPUPPCState *env) +{ + init_proc_e500(env, fsl_e500mc); +} + +POWERPC_FAMILY(e500mc)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "e500mc core"; + pcc->init_proc = init_proc_e500mc; + pcc->check_pow = check_pow_none; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | + PPC_WRTEE | PPC_RFDI | PPC_RFMCI | + PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_FLOAT | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL | + PPC_FLOAT_STFIWX | PPC_WAIT | + PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC; + pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL; + pcc->msr_mask = (1ull << MSR_GS) | + (1ull << MSR_UCLE) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PX) | + (1ull << MSR_RI); + pcc->mmu_model = POWERPC_MMU_BOOKE206; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + /* FIXME: figure out the correct flag for e500mc */ + pcc->bfd_mach = bfd_mach_ppc_e500; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +#ifdef TARGET_PPC64 +static void init_proc_e5500(CPUPPCState *env) +{ + init_proc_e500(env, fsl_e5500); +} + +POWERPC_FAMILY(e5500)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "e5500 core"; + pcc->init_proc = init_proc_e5500; + pcc->check_pow = check_pow_none; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | + PPC_WRTEE | PPC_RFDI | PPC_RFMCI | + PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_FLOAT | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL | + PPC_FLOAT_STFIWX | PPC_WAIT | + PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC | + PPC_64B | PPC_POPCNTB | PPC_POPCNTWD; + pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_PERM_ISA206 | \ + PPC2_FP_CVT_S64; + pcc->msr_mask = (1ull << MSR_CM) | + (1ull << MSR_GS) | + (1ull << MSR_UCLE) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PX) | + (1ull << MSR_RI); + pcc->mmu_model = POWERPC_MMU_BOOKE206; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + /* FIXME: figure out the correct flag for e5500 */ + pcc->bfd_mach = bfd_mach_ppc_e500; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_e6500(CPUPPCState *env) +{ + init_proc_e500(env, fsl_e6500); +} + +POWERPC_FAMILY(e6500)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "e6500 core"; + pcc->init_proc = init_proc_e6500; + pcc->check_pow = check_pow_none; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | + PPC_WRTEE | PPC_RFDI | PPC_RFMCI | + PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | + PPC_CACHE_DCBZ | PPC_CACHE_DCBA | + PPC_FLOAT | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL | + PPC_FLOAT_STFIWX | PPC_WAIT | + PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC | + PPC_64B | PPC_POPCNTB | PPC_POPCNTWD | PPC_ALTIVEC; + pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_PERM_ISA206 | \ + PPC2_FP_CVT_S64 | PPC2_ATOMIC_ISA206; + pcc->msr_mask = (1ull << MSR_CM) | + (1ull << MSR_GS) | + (1ull << MSR_UCLE) | + (1ull << MSR_CE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IS) | + (1ull << MSR_DS) | + (1ull << MSR_PX) | + (1ull << MSR_RI) | + (1ull << MSR_VR); + pcc->mmu_model = POWERPC_MMU_BOOKE206; + pcc->excp_model = POWERPC_EXCP_BOOKE; + pcc->bus_model = PPC_FLAGS_INPUT_BookE; + pcc->bfd_mach = bfd_mach_ppc_e500; + pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_VRE; +} + +#endif + +/* Non-embedded PowerPC */ + +#define POWERPC_MSRR_601 (0x0000000000001040ULL) + +static void init_proc_601(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_601(env); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_hid0_601, + 0x80010080); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_601_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_601_HID5, "HID5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + init_excp_601(env); + /* + * XXX: beware that dcache line size is 64 + * but dcbz uses 32 bytes "sectors" + * XXX: this breaks clcs instruction ! + */ + env->dcache_line_size = 32; + env->icache_line_size = 64; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(601)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 601"; + pcc->init_proc = init_proc_601; + pcc->check_pow = check_pow_none; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | + PPC_FLOAT | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_601; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_601; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_601; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK; +} + +#define POWERPC_MSRR_601v (0x0000000000001040ULL) + +static void init_proc_601v(CPUPPCState *env) +{ + init_proc_601(env); + /* XXX : not implemented */ + spr_register(env, SPR_601_HID15, "HID15", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +POWERPC_FAMILY(601v)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 601v"; + pcc->init_proc = init_proc_601v; + pcc->check_pow = check_pow_none; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR | + PPC_FLOAT | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR); + pcc->mmu_model = POWERPC_MMU_601; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_601; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK; +} + +static void init_proc_602(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_602(env); + /* Time base */ + gen_tbl(env); + /* hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_602(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(602)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 602"; + pcc->init_proc = init_proc_602; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_602_SPEC; + pcc->msr_mask = (1ull << MSR_VSX) | + (1ull << MSR_SA) | + (1ull << MSR_POW) | + (1ull << MSR_TGPR) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + /* XXX: 602 MMU is quite specific. Should add a special case */ + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_602; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_602; + pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_603(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_603(env); + /* Time base */ + gen_tbl(env); + /* hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_603(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(603)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 603"; + pcc->init_proc = init_proc_603; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_TGPR) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_603; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_603; + pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_603E(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_603(env); + /* Time base */ + gen_tbl(env); + /* hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_603(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(603E)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 603e"; + pcc->init_proc = init_proc_603E; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_TGPR) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_603E; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_ec603e; + pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_604(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_604(env); + /* Time base */ + gen_tbl(env); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + init_excp_604(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(604)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 604"; + pcc->init_proc = init_proc_604; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_604; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_604; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_604E(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_604(env); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_MMCR1, "MMCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC3, "PMC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC4, "PMC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Time base */ + gen_tbl(env); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + init_excp_604(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(604E)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 604E"; + pcc->init_proc = init_proc_604E; + pcc->check_pow = check_pow_nocheck; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_604; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_604; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_740(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* Thermal management */ + gen_spr_thrm(env); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + init_excp_7x0(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(740)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 740"; + pcc->init_proc = init_proc_740; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_7x0; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_750(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + /* Time base */ + gen_tbl(env); + /* Thermal management */ + gen_spr_thrm(env); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + /* + * XXX: high BATs are also present but are known to be bugged on + * die version 1.x + */ + init_excp_7x0(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(750)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 750"; + pcc->init_proc = init_proc_750; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_7x0; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_750cl(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + /* Time base */ + gen_tbl(env); + /* Thermal management */ + /* Those registers are fake on 750CL */ + spr_register(env, SPR_THRM1, "THRM1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_THRM2, "THRM2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_THRM3, "THRM3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX: not implemented */ + spr_register(env, SPR_750_TDCL, "TDCL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_750_TDCH, "TDCH", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* DMA */ + /* XXX : not implemented */ + spr_register(env, SPR_750_WPAR, "WPAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_750_DMAL, "DMAL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_750_DMAU, "DMAU", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750CL_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750CL_HID4, "HID4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Quantization registers */ + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR0, "GQR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR1, "GQR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR2, "GQR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR3, "GQR3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR4, "GQR4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR5, "GQR5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR6, "GQR6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750_GQR7, "GQR7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + /* PowerPC 750cl has 8 DBATs and 8 IBATs */ + gen_high_BATs(env); + init_excp_750cl(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(750cl)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 750 CL"; + pcc->init_proc = init_proc_750cl; + pcc->check_pow = check_pow_hid0; + /* + * XXX: not implemented: + * cache lock instructions: + * dcbz_l + * floating point paired instructions + * psq_lux + * psq_lx + * psq_stux + * psq_stx + * ps_abs + * ps_add + * ps_cmpo0 + * ps_cmpo1 + * ps_cmpu0 + * ps_cmpu1 + * ps_div + * ps_madd + * ps_madds0 + * ps_madds1 + * ps_merge00 + * ps_merge01 + * ps_merge10 + * ps_merge11 + * ps_mr + * ps_msub + * ps_mul + * ps_muls0 + * ps_muls1 + * ps_nabs + * ps_neg + * ps_nmadd + * ps_nmsub + * ps_res + * ps_rsqrte + * ps_sel + * ps_sub + * ps_sum0 + * ps_sum1 + */ + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_7x0; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_750cx(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + /* Time base */ + gen_tbl(env); + /* Thermal management */ + gen_spr_thrm(env); + /* This register is not implemented but is present for compatibility */ + spr_register(env, SPR_SDA, "SDA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + /* PowerPC 750cx has 8 DBATs and 8 IBATs */ + gen_high_BATs(env); + init_excp_750cx(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(750cx)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 750CX"; + pcc->init_proc = init_proc_750cx; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_7x0; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_750fx(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + /* Time base */ + gen_tbl(env); + /* Thermal management */ + gen_spr_thrm(env); + /* XXX : not implemented */ + spr_register(env, SPR_750_THRM4, "THRM4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_750FX_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ + gen_high_BATs(env); + init_excp_7x0(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(750fx)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 750FX"; + pcc->init_proc = init_proc_750fx; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_7x0; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_750gx(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* XXX : not implemented (XXX: different from 750fx) */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + /* Time base */ + gen_tbl(env); + /* Thermal management */ + gen_spr_thrm(env); + /* XXX : not implemented */ + spr_register(env, SPR_750_THRM4, "THRM4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Hardware implementation registers */ + /* XXX : not implemented (XXX: different from 750fx) */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented (XXX: different from 750fx) */ + spr_register(env, SPR_750FX_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ + gen_high_BATs(env); + init_excp_7x0(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(750gx)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 750GX"; + pcc->init_proc = init_proc_750gx; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_7x0; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_745(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + gen_spr_G2_755(env); + /* Time base */ + gen_tbl(env); + /* Thermal management */ + gen_spr_thrm(env); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_7x5(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(745)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 745"; + pcc->init_proc = init_proc_745; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_7x5; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_755(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + gen_spr_G2_755(env); + /* Time base */ + gen_tbl(env); + /* L2 cache control */ + /* XXX : not implemented */ + spr_register(env, SPR_L2CR, "L2CR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, spr_access_nop, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_L2PMCR, "L2PMCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Thermal management */ + gen_spr_thrm(env); + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_HID2, "HID2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_6xx_7xx_soft_tlb(env, 64, 2); + init_excp_7x5(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(755)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 755"; + pcc->init_proc = init_proc_755; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB | + PPC_SEGMENT | PPC_EXTERN; + pcc->msr_mask = (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_6xx; + pcc->excp_model = POWERPC_EXCP_7x5; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_750; + pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE | + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_7400(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX: this seems not implemented on all revisions. */ + /* XXX : not implemented */ + spr_register(env, SPR_MSSCR1, "MSSCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Thermal management */ + gen_spr_thrm(env); + /* Memory management */ + gen_low_BATs(env); + init_excp_7400(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(7400)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 7400 (aka G4)"; + pcc->init_proc = init_proc_7400; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_7410(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Thermal management */ + gen_spr_thrm(env); + /* L2PMCR */ + /* XXX : not implemented */ + spr_register(env, SPR_L2PMCR, "L2PMCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* LDSTDB */ + /* XXX : not implemented */ + spr_register(env, SPR_LDSTDB, "LDSTDB", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + init_excp_7400(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(7410)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 7410 (aka G4)"; + pcc->init_proc = init_proc_7410; + pcc->check_pow = check_pow_hid0; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_7440(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* LDSTCR */ + /* XXX : not implemented */ + spr_register(env, SPR_LDSTCR, "LDSTCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* ICTRL */ + /* XXX : not implemented */ + spr_register(env, SPR_ICTRL, "ICTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* MSSSR0 */ + /* XXX : not implemented */ + spr_register(env, SPR_MSSSR0, "MSSSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* PMC */ + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC5, "PMC5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_74xx_soft_tlb(env, 128, 2); + init_excp_7450(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(7440)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 7440 (aka G4)"; + pcc->init_proc = init_proc_7440; + pcc->check_pow = check_pow_hid0_74xx; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_7450(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* Level 3 cache control */ + gen_l3_ctrl(env); + /* L3ITCR1 */ + /* XXX : not implemented */ + spr_register(env, SPR_L3ITCR1, "L3ITCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3ITCR2 */ + /* XXX : not implemented */ + spr_register(env, SPR_L3ITCR2, "L3ITCR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3ITCR3 */ + /* XXX : not implemented */ + spr_register(env, SPR_L3ITCR3, "L3ITCR3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3OHCR */ + /* XXX : not implemented */ + spr_register(env, SPR_L3OHCR, "L3OHCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* LDSTCR */ + /* XXX : not implemented */ + spr_register(env, SPR_LDSTCR, "LDSTCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* ICTRL */ + /* XXX : not implemented */ + spr_register(env, SPR_ICTRL, "ICTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* MSSSR0 */ + /* XXX : not implemented */ + spr_register(env, SPR_MSSSR0, "MSSSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* PMC */ + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC5, "PMC5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_74xx_soft_tlb(env, 128, 2); + init_excp_7450(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(7450)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 7450 (aka G4)"; + pcc->init_proc = init_proc_7450; + pcc->check_pow = check_pow_hid0_74xx; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_7445(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* LDSTCR */ + /* XXX : not implemented */ + spr_register(env, SPR_LDSTCR, "LDSTCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* ICTRL */ + /* XXX : not implemented */ + spr_register(env, SPR_ICTRL, "ICTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* MSSSR0 */ + /* XXX : not implemented */ + spr_register(env, SPR_MSSSR0, "MSSSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* PMC */ + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC5, "PMC5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* SPRGs */ + spr_register(env, SPR_SPRG4, "SPRG4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG4, "USPRG4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG5, "SPRG5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG5, "USPRG5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG6, "SPRG6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG6, "USPRG6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG7, "SPRG7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG7, "USPRG7", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_74xx_soft_tlb(env, 128, 2); + init_excp_7450(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(7445)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 7445 (aka G4)"; + pcc->init_proc = init_proc_7445; + pcc->check_pow = check_pow_hid0_74xx; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} + +static void init_proc_7455(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* Level 3 cache control */ + gen_l3_ctrl(env); + /* LDSTCR */ + /* XXX : not implemented */ + spr_register(env, SPR_LDSTCR, "LDSTCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* ICTRL */ + /* XXX : not implemented */ + spr_register(env, SPR_ICTRL, "ICTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* MSSSR0 */ + /* XXX : not implemented */ + spr_register(env, SPR_MSSSR0, "MSSSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* PMC */ + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC5, "PMC5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* SPRGs */ + spr_register(env, SPR_SPRG4, "SPRG4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG4, "USPRG4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG5, "SPRG5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG5, "USPRG5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG6, "SPRG6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG6, "USPRG6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG7, "SPRG7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG7, "USPRG7", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_74xx_soft_tlb(env, 128, 2); + init_excp_7450(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(7455)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 7455 (aka G4)"; + pcc->init_proc = init_proc_7455; + pcc->check_pow = check_pow_hid0_74xx; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} + +#if 0 +static void init_proc_7457(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* Level 3 cache control */ + gen_l3_ctrl(env); + /* L3ITCR1 */ + /* XXX : not implemented */ + spr_register(env, SPR_L3ITCR1, "L3ITCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3ITCR2 */ + /* XXX : not implemented */ + spr_register(env, SPR_L3ITCR2, "L3ITCR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3ITCR3 */ + /* XXX : not implemented */ + spr_register(env, SPR_L3ITCR3, "L3ITCR3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* L3OHCR */ + /* XXX : not implemented */ + spr_register(env, SPR_L3OHCR, "L3OHCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* LDSTCR */ + /* XXX : not implemented */ + spr_register(env, SPR_LDSTCR, "LDSTCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* ICTRL */ + /* XXX : not implemented */ + spr_register(env, SPR_ICTRL, "ICTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* MSSSR0 */ + /* XXX : not implemented */ + spr_register(env, SPR_MSSSR0, "MSSSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* PMC */ + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC5, "PMC5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* SPRGs */ + spr_register(env, SPR_SPRG4, "SPRG4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG4, "USPRG4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG5, "SPRG5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG5, "USPRG5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG6, "SPRG6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG6, "USPRG6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG7, "SPRG7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG7, "USPRG7", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_74xx_soft_tlb(env, 128, 2); + init_excp_7450(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(7457)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 7457 (aka G4)"; + pcc->init_proc = init_proc_7457; + pcc->check_pow = check_pow_hid0_74xx; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_SOFT_74xx; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} +#endif + +static void init_proc_e600(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_spr_sdr1(env); + gen_spr_7xx(env); + /* Time base */ + gen_tbl(env); + /* 74xx specific SPR */ + gen_spr_74xx(env); + /* XXX : not implemented */ + spr_register(env, SPR_UBAMR, "UBAMR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_LDSTCR, "LDSTCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_ICTRL, "ICTRL", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_MSSSR0, "MSSSR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC5, "PMC5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC5, "UPMC5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_PMC6, "PMC6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* XXX : not implemented */ + spr_register(env, SPR_7XX_UPMC6, "UPMC6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* SPRGs */ + spr_register(env, SPR_SPRG4, "SPRG4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG4, "USPRG4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG5, "SPRG5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG5, "USPRG5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG6, "SPRG6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG6, "USPRG6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_SPRG7, "SPRG7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_USPRG7, "USPRG7", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); + /* Memory management */ + gen_low_BATs(env); + gen_high_BATs(env); + gen_74xx_soft_tlb(env, 128, 2); + init_excp_7450(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc6xx_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(e600)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC e600"; + pcc->init_proc = init_proc_e600; + pcc->check_pow = check_pow_hid0_74xx; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | + PPC_CACHE_DCBA | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBIA | PPC_74xx_TLB | + PPC_SEGMENT | PPC_EXTERN | + PPC_ALTIVEC; + pcc->insns_flags2 = PPC_NONE; + pcc->msr_mask = (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_ILE) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_EP) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->mmu_model = POWERPC_MMU_32B; + pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault; + pcc->excp_model = POWERPC_EXCP_74xx; + pcc->bus_model = PPC_FLAGS_INPUT_6xx; + pcc->bfd_mach = bfd_mach_ppc_7400; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; +} + +#if defined(TARGET_PPC64) +#define POWERPC970_HID5_INIT 0x00000000 + +static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, + int bit, int sprn, int cause) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, bit); + TCGv_i32 t2 = tcg_const_i32(tcg_ctx, sprn); + TCGv_i32 t3 = tcg_const_i32(tcg_ctx, cause); + + gen_helper_fscr_facility_check(tcg_ctx, tcg_ctx->cpu_env, t1, t2, t3); + + tcg_temp_free_i32(tcg_ctx, t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t1); +} + +static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, + int bit, int sprn, int cause) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, bit); + TCGv_i32 t2 = tcg_const_i32(tcg_ctx, sprn); + TCGv_i32 t3 = tcg_const_i32(tcg_ctx, cause); + + gen_helper_msr_facility_check(tcg_ctx, tcg_ctx->cpu_env, t1, t2, t3); + + tcg_temp_free_i32(tcg_ctx, t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t1); +} + +static void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv spr_up = tcg_temp_new(tcg_ctx); + TCGv spr = tcg_temp_new(tcg_ctx); + + gen_load_spr(tcg_ctx, spr, sprn - 1); + tcg_gen_shri_tl(tcg_ctx, spr_up, spr, 32); + tcg_gen_ext32u_tl(tcg_ctx, cpu_gpr[gprn], spr_up); + + tcg_temp_free(tcg_ctx, spr); + tcg_temp_free(tcg_ctx, spr_up); +} + +static void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv spr = tcg_temp_new(tcg_ctx); + + gen_load_spr(tcg_ctx, spr, sprn - 1); + tcg_gen_deposit_tl(tcg_ctx, spr, spr, cpu_gpr[gprn], 32, 32); + gen_store_spr(tcg_ctx, sprn - 1, spr); + + tcg_temp_free(tcg_ctx, spr); +} + +static int check_pow_970(CPUPPCState *env) +{ + if (env->spr[SPR_HID0] & (HID0_DEEPNAP | HID0_DOZE | HID0_NAP)) { + return 1; + } + + return 0; +} + +static void gen_spr_970_hid(CPUPPCState *env) +{ + /* Hardware implementation registers */ + /* XXX : not implemented */ + spr_register(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_clear, + 0x60000000); + spr_register(env, SPR_HID1, "HID1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_970_HID5, "HID5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + POWERPC970_HID5_INIT); +} + +static void gen_spr_970_hior(CPUPPCState *env) +{ + spr_register(env, SPR_HIOR, "SPR_HIOR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_hior, &spr_write_hior, + 0x00000000); +} + +static void gen_spr_book3s_ctrl(CPUPPCState *env) +{ + spr_register(env, SPR_CTRL, "SPR_CTRL", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_UCTRL, "SPR_UCTRL", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, SPR_NOACCESS, + 0x00000000); +} + +static void gen_spr_book3s_altivec(CPUPPCState *env) +{ + if (!(env->insns_flags & PPC_ALTIVEC)) { + return; + } + + spr_register_kvm(env, SPR_VRSAVE, "VRSAVE", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_VRSAVE, 0x00000000); + + /* + * Can't find information on what this should be on reset. This + * value is the one used by 74xx processors. + */ + vscr_init(env, 0x00010000); +} + +static void gen_spr_book3s_dbg(CPUPPCState *env) +{ + /* + * TODO: different specs define different scopes for these, + * will have to address this: + * 970: super/write and super/read + * powerisa 2.03..2.04: hypv/write and super/read. + * powerisa 2.05 and newer: hypv/write and hypv/read. + */ + spr_register_kvm(env, SPR_DABR, "DABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DABR, 0x00000000); + spr_register_kvm(env, SPR_DABRX, "DABRX", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DABRX, 0x00000000); +} + +static void gen_spr_book3s_207_dbg(CPUPPCState *env) +{ + spr_register_kvm_hv(env, SPR_DAWR, "DAWR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DAWR, 0x00000000); + spr_register_kvm_hv(env, SPR_DAWRX, "DAWRX", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DAWRX, 0x00000000); + spr_register_kvm_hv(env, SPR_CIABR, "CIABR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_CIABR, 0x00000000); +} + +static void gen_spr_970_dbg(CPUPPCState *env) +{ + /* Breakpoints */ + spr_register(env, SPR_IABR, "IABR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_book3s_pmu_sup(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_POWER_MMCR0, "MMCR0", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_MMCR0, 0x00000000); + spr_register_kvm(env, SPR_POWER_MMCR1, "MMCR1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_MMCR1, 0x00000000); + spr_register_kvm(env, SPR_POWER_MMCRA, "MMCRA", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_MMCRA, 0x00000000); + spr_register_kvm(env, SPR_POWER_PMC1, "PMC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC1, 0x00000000); + spr_register_kvm(env, SPR_POWER_PMC2, "PMC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC2, 0x00000000); + spr_register_kvm(env, SPR_POWER_PMC3, "PMC3", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC3, 0x00000000); + spr_register_kvm(env, SPR_POWER_PMC4, "PMC4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC4, 0x00000000); + spr_register_kvm(env, SPR_POWER_PMC5, "PMC5", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC5, 0x00000000); + spr_register_kvm(env, SPR_POWER_PMC6, "PMC6", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC6, 0x00000000); + spr_register_kvm(env, SPR_POWER_SIAR, "SIAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_SIAR, 0x00000000); + spr_register_kvm(env, SPR_POWER_SDAR, "SDAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_SDAR, 0x00000000); +} + +static void gen_spr_book3s_pmu_user(CPUPPCState *env) +{ + spr_register(env, SPR_POWER_UMMCR0, "UMMCR0", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UMMCR1, "UMMCR1", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UMMCRA, "UMMCRA", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UPMC1, "UPMC1", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UPMC2, "UPMC2", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UPMC3, "UPMC3", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UPMC4, "UPMC4", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UPMC5, "UPMC5", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_UPMC6, "UPMC6", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_USIAR, "USIAR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_USDAR, "USDAR", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); +} + +static void gen_spr_970_pmu_sup(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_970_PMC7, "PMC7", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC7, 0x00000000); + spr_register_kvm(env, SPR_970_PMC8, "PMC8", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PMC8, 0x00000000); +} + +static void gen_spr_970_pmu_user(CPUPPCState *env) +{ + spr_register(env, SPR_970_UPMC7, "UPMC7", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_970_UPMC8, "UPMC8", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); +} + +static void gen_spr_power8_pmu_sup(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_POWER_MMCR2, "MMCR2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_MMCR2, 0x00000000); + spr_register_kvm(env, SPR_POWER_MMCRS, "MMCRS", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_MMCRS, 0x00000000); + spr_register_kvm(env, SPR_POWER_SIER, "SIER", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_SIER, 0x00000000); + spr_register_kvm(env, SPR_POWER_SPMC1, "SPMC1", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_SPMC1, 0x00000000); + spr_register_kvm(env, SPR_POWER_SPMC2, "SPMC2", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_SPMC2, 0x00000000); + spr_register_kvm(env, SPR_TACR, "TACR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_TACR, 0x00000000); + spr_register_kvm(env, SPR_TCSCR, "TCSCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_TCSCR, 0x00000000); + spr_register_kvm(env, SPR_CSIGR, "CSIGR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_CSIGR, 0x00000000); +} + +static void gen_spr_power8_pmu_user(CPUPPCState *env) +{ + spr_register(env, SPR_POWER_UMMCR2, "UMMCR2", + &spr_read_ureg, SPR_NOACCESS, + &spr_read_ureg, &spr_write_ureg, + 0x00000000); + spr_register(env, SPR_POWER_USIER, "USIER", + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_power5p_ear(CPUPPCState *env) +{ + /* External access control */ + spr_register(env, SPR_EAR, "EAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_power5p_tb(CPUPPCState *env) +{ + /* TBU40 (High 40 bits of the Timebase register */ + spr_register_hv(env, SPR_TBU40, "TBU40", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, &spr_write_tbu40, + 0x00000000); +} + +static void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv hmer = tcg_temp_new(tcg_ctx); + + gen_load_spr(tcg_ctx, hmer, sprn); + tcg_gen_and_tl(tcg_ctx, hmer, cpu_gpr[gprn], hmer); + gen_store_spr(tcg_ctx, sprn, hmer); + spr_store_dump_spr(sprn); + tcg_temp_free(tcg_ctx, hmer); +} + +static void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_store_lpcr(tcg_ctx, tcg_ctx->cpu_env, cpu_gpr[gprn]); +} + +static void gen_spr_970_lpar(CPUPPCState *env) +{ + /* + * PPC970: HID4 covers things later controlled by the LPCR and + * RMOR in later CPUs, but with a different encoding. We only + * support the 970 in "Apple mode" which has all hypervisor + * facilities disabled by strapping, so we can basically just + * ignore it + */ + spr_register(env, SPR_970_HID4, "HID4", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_power5p_lpar(CPUPPCState *env) +{ + /* Logical partitionning */ + spr_register_kvm_hv(env, SPR_LPCR, "LPCR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_lpcr, + KVM_REG_PPC_LPCR, LPCR_LPES0 | LPCR_LPES1); + spr_register_hv(env, SPR_HDEC, "HDEC", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_hdecr, &spr_write_hdecr, 0); +} + +static void gen_spr_book3s_ids(CPUPPCState *env) +{ + /* FIXME: Will need to deal with thread vs core only SPRs */ + + /* Processor identification */ + spr_register_hv(env, SPR_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, NULL, + 0x00000000); + spr_register_hv(env, SPR_HID0, "HID0", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_TSCR, "TSCR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HMER, "HMER", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_hmer, + 0x00000000); + spr_register_hv(env, SPR_HMEER, "HMEER", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_TFMR, "TFMR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_LPIDR, "LPIDR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_lpidr, + 0x00000000); + spr_register_hv(env, SPR_HFSCR, "HFSCR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_MMCRC, "MMCRC", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_MMCRH, "MMCRH", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HSPRG0, "HSPRG0", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HSPRG1, "HSPRG1", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HSRR0, "HSRR0", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HSRR1, "HSRR1", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HDAR, "HDAR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HDSISR, "HDSISR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register_hv(env, SPR_HRMOR, "HRMOR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_rmor(CPUPPCState *env) +{ + spr_register_hv(env, SPR_RMOR, "RMOR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); +} + +static void gen_spr_power8_ids(CPUPPCState *env) +{ + /* Thread identification */ + spr_register(env, SPR_TIR, "TIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0x00000000); +} + +static void gen_spr_book3s_purr(CPUPPCState *env) +{ + /* PURR & SPURR: Hack - treat these as aliases for the TB for now */ + spr_register_kvm_hv(env, SPR_PURR, "PURR", + &spr_read_purr, SPR_NOACCESS, + &spr_read_purr, SPR_NOACCESS, + &spr_read_purr, &spr_write_purr, + KVM_REG_PPC_PURR, 0x00000000); + spr_register_kvm_hv(env, SPR_SPURR, "SPURR", + &spr_read_purr, SPR_NOACCESS, + &spr_read_purr, SPR_NOACCESS, + &spr_read_purr, &spr_write_purr, + KVM_REG_PPC_SPURR, 0x00000000); +} + +static void gen_spr_power6_dbg(CPUPPCState *env) +{ + spr_register(env, SPR_CFAR, "SPR_CFAR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_cfar, &spr_write_cfar, + 0x00000000); +} + +static void gen_spr_power5p_common(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_PPR, "PPR", + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PPR, 0x00000000); +} + +static void gen_spr_power6_common(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_DSCR, "SPR_DSCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_DSCR, 0x00000000); + /* + * Register PCR to report POWERPC_EXCP_PRIV_REG instead of + * POWERPC_EXCP_INVAL_SPR in userspace. Permit hypervisor access. + */ + spr_register_hv(env, SPR_PCR, "PCR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pcr, + 0x00000000); +} + +static void spr_read_tar(DisasContext *ctx, int gprn, int sprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); + spr_read_generic(ctx, gprn, sprn); +} + +static void spr_write_tar(DisasContext *ctx, int sprn, int gprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); + spr_write_generic(ctx, sprn, gprn); +} + +static void gen_spr_power8_tce_address_control(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_TAR, "TAR", + &spr_read_tar, &spr_write_tar, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_TAR, 0x00000000); +} + +static void spr_read_tm(DisasContext *ctx, int gprn, int sprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_read_generic(ctx, gprn, sprn); +} + +static void spr_write_tm(DisasContext *ctx, int sprn, int gprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_write_generic(ctx, sprn, gprn); +} + +static void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_read_prev_upper32(ctx, gprn, sprn); +} + +static void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn) +{ + gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); + spr_write_prev_upper32(ctx, sprn, gprn); +} + +static void gen_spr_power8_tm(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_TFHAR, "TFHAR", + &spr_read_tm, &spr_write_tm, + &spr_read_tm, &spr_write_tm, + KVM_REG_PPC_TFHAR, 0x00000000); + spr_register_kvm(env, SPR_TFIAR, "TFIAR", + &spr_read_tm, &spr_write_tm, + &spr_read_tm, &spr_write_tm, + KVM_REG_PPC_TFIAR, 0x00000000); + spr_register_kvm(env, SPR_TEXASR, "TEXASR", + &spr_read_tm, &spr_write_tm, + &spr_read_tm, &spr_write_tm, + KVM_REG_PPC_TEXASR, 0x00000000); + spr_register(env, SPR_TEXASRU, "TEXASRU", + &spr_read_tm_upper32, &spr_write_tm_upper32, + &spr_read_tm_upper32, &spr_write_tm_upper32, + 0x00000000); +} + +static void spr_read_ebb(DisasContext *ctx, int gprn, int sprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_read_generic(ctx, gprn, sprn); +} + +static void spr_write_ebb(DisasContext *ctx, int sprn, int gprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_write_generic(ctx, sprn, gprn); +} + +static void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_read_prev_upper32(ctx, gprn, sprn); +} + +static void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn) +{ + gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); + spr_write_prev_upper32(ctx, sprn, gprn); +} + +static void gen_spr_power8_ebb(CPUPPCState *env) +{ + spr_register(env, SPR_BESCRS, "BESCRS", + &spr_read_ebb, &spr_write_ebb, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BESCRSU, "BESCRSU", + &spr_read_ebb_upper32, &spr_write_ebb_upper32, + &spr_read_prev_upper32, &spr_write_prev_upper32, + 0x00000000); + spr_register(env, SPR_BESCRR, "BESCRR", + &spr_read_ebb, &spr_write_ebb, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_BESCRRU, "BESCRRU", + &spr_read_ebb_upper32, &spr_write_ebb_upper32, + &spr_read_prev_upper32, &spr_write_prev_upper32, + 0x00000000); + spr_register_kvm(env, SPR_EBBHR, "EBBHR", + &spr_read_ebb, &spr_write_ebb, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_EBBHR, 0x00000000); + spr_register_kvm(env, SPR_EBBRR, "EBBRR", + &spr_read_ebb, &spr_write_ebb, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_EBBRR, 0x00000000); + spr_register_kvm(env, SPR_BESCR, "BESCR", + &spr_read_ebb, &spr_write_ebb, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_BESCR, 0x00000000); +} + +/* Virtual Time Base */ +static void gen_spr_vtb(CPUPPCState *env) +{ + spr_register_kvm_hv(env, SPR_VTB, "VTB", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_vtb, SPR_NOACCESS, + &spr_read_vtb, &spr_write_vtb, + KVM_REG_PPC_VTB, 0x00000000); +} + +static void gen_spr_power8_fscr(CPUPPCState *env) +{ + target_ulong initval = 0; + spr_register_kvm(env, SPR_FSCR, "FSCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_FSCR, initval); +} + +static void gen_spr_power8_pspb(CPUPPCState *env) +{ + spr_register_kvm(env, SPR_PSPB, "PSPB", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic32, + KVM_REG_PPC_PSPB, 0); +} + +static void gen_spr_power8_dpdes(CPUPPCState *env) +{ + /* Directed Privileged Door-bell Exception State, used for IPI */ + spr_register_kvm_hv(env, SPR_DPDES, "DPDES", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_dpdes, SPR_NOACCESS, + &spr_read_dpdes, &spr_write_dpdes, + KVM_REG_PPC_DPDES, 0x00000000); +} + +static void gen_spr_power8_ic(CPUPPCState *env) +{ + spr_register_hv(env, SPR_IC, "IC", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0); +} + +static void gen_spr_power8_book4(CPUPPCState *env) +{ + /* Add a number of P8 book4 registers */ + spr_register_kvm(env, SPR_ACOP, "ACOP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_ACOP, 0); + spr_register_kvm(env, SPR_BOOKS_PID, "PID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pidr, + KVM_REG_PPC_PID, 0); + spr_register_kvm(env, SPR_WORT, "WORT", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_WORT, 0); +} + +static void gen_spr_power7_book4(CPUPPCState *env) +{ + /* Add a number of P7 book4 registers */ + spr_register_kvm(env, SPR_ACOP, "ACOP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_ACOP, 0); + spr_register_kvm(env, SPR_BOOKS_PID, "PID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PID, 0); +} + +static void gen_spr_power8_rpr(CPUPPCState *env) +{ + spr_register_hv(env, SPR_RPR, "RPR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000103070F1F3F); +} + +static void gen_spr_power9_mmu(CPUPPCState *env) +{ + /* Partition Table Control */ + spr_register_kvm_hv(env, SPR_PTCR, "PTCR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_ptcr, + KVM_REG_PPC_PTCR, 0x00000000); + /* Address Segment Descriptor Register */ + spr_register_hv(env, SPR_ASDR, "ASDR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x0000000000000000); +} + +static void init_proc_book3s_common(CPUPPCState *env) +{ + gen_spr_ne_601(env); + gen_tbl(env); + gen_spr_usprg3(env); + gen_spr_book3s_altivec(env); + gen_spr_book3s_pmu_sup(env); + gen_spr_book3s_pmu_user(env); + gen_spr_book3s_ctrl(env); +} + +static void init_proc_970(CPUPPCState *env) +{ + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_dbg(env); + + /* 970 Specific Registers */ + gen_spr_970_hid(env); + gen_spr_970_hior(env); + gen_low_BATs(env); + gen_spr_970_pmu_sup(env); + gen_spr_970_pmu_user(env); + gen_spr_970_lpar(env); + gen_spr_970_dbg(env); + + /* env variables */ + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_970(env); + ppc970_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(970)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->desc = "PowerPC 970"; + pcc->init_proc = init_proc_970; + pcc->check_pow = check_pow_970; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_64B | PPC_ALTIVEC | + PPC_SEGMENT_64B | PPC_SLBI; + pcc->insns_flags2 = PPC2_FP_CVT_S64; + pcc->msr_mask = (1ull << MSR_SF) | + (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI); + pcc->mmu_model = POWERPC_MMU_64B; + pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; + pcc->hash64_opts = &ppc_hash64_opts_basic; + pcc->excp_model = POWERPC_EXCP_970; + pcc->bus_model = PPC_FLAGS_INPUT_970; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x10000; +} + +static void init_proc_power5plus(CPUPPCState *env) +{ + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_dbg(env); + + /* POWER5+ Specific Registers */ + gen_spr_970_hid(env); + gen_spr_970_hior(env); + gen_low_BATs(env); + gen_spr_970_pmu_sup(env); + gen_spr_970_pmu_user(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power5p_tb(env); + + /* env variables */ + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_970(env); + ppc970_irq_init(env_archcpu(env)); +} + +POWERPC_FAMILY(POWER5P)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + +// dc->fw_name = "PowerPC,POWER5"; +// dc->desc = "POWER5+"; + pcc->init_proc = init_proc_power5plus; + pcc->check_pow = check_pow_970; + pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_STFIWX | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_64B | + PPC_SEGMENT_64B | PPC_SLBI; + pcc->insns_flags2 = PPC2_FP_CVT_S64; + pcc->msr_mask = (1ull << MSR_SF) | + (1ull << MSR_VR) | + (1ull << MSR_POW) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI); + pcc->lpcr_mask = LPCR_RMLS | LPCR_ILE | LPCR_LPES0 | LPCR_LPES1 | + LPCR_RMI | LPCR_HDICE; + pcc->mmu_model = POWERPC_MMU_2_03; + pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; + pcc->hash64_opts = &ppc_hash64_opts_basic; + pcc->lrg_decr_bits = 32; + pcc->excp_model = POWERPC_EXCP_970; + pcc->bus_model = PPC_FLAGS_INPUT_970; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x10000; +} + +static void init_proc_POWER7(CPUPPCState *env) +{ + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_dbg(env); + + /* POWER7 Specific Registers */ + gen_spr_book3s_ids(env); + gen_spr_rmor(env); + gen_spr_amr(env); + gen_spr_book3s_purr(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power5p_tb(env); + gen_spr_power6_common(env); + gen_spr_power6_dbg(env); + gen_spr_power7_book4(env); + + /* env variables */ + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_POWER7(env); + ppcPOWER7_irq_init(env_archcpu(env)); +} + +static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr) +{ + if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7P_BASE) { + return true; + } + if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7_BASE) { + return true; + } + return false; +} + +static bool cpu_has_work_POWER7(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + if (cs->halted) { + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + return false; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) && + (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + return true; + } + if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { + return true; + } + return false; + } else { + return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); + } +} + +POWERPC_FAMILY(POWER7)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + +// dc->fw_name = "PowerPC,POWER7"; +// dc->desc = "POWER7"; + pcc->pvr_match = ppc_pvr_match_power7; + pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05; + pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05; + pcc->init_proc = init_proc_POWER7; + pcc->check_pow = check_pow_nocheck; + cc->has_work = cpu_has_work_POWER7; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_FRSQRTES | + PPC_FLOAT_STFIWX | + PPC_FLOAT_EXT | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | + PPC_SEGMENT_64B | PPC_SLBI | + PPC_POPCNTB | PPC_POPCNTWD | + PPC_CILDST; + pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205 | + PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | + PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | + PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64 | + PPC2_PM_ISA206; + pcc->msr_mask = (1ull << MSR_SF) | + (1ull << MSR_VR) | + (1ull << MSR_VSX) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD | + LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | + LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 | + LPCR_MER | LPCR_TC | + LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE; + pcc->lpcr_pm = LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2; + pcc->mmu_model = POWERPC_MMU_2_06; + pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; + pcc->hash64_opts = &ppc_hash64_opts_POWER7; + pcc->lrg_decr_bits = 32; + pcc->excp_model = POWERPC_EXCP_POWER7; + pcc->bus_model = PPC_FLAGS_INPUT_POWER7; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | + POWERPC_FLAG_VSX; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x8000; + pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; +} + +static void init_proc_POWER8(CPUPPCState *env) +{ + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_sdr1(env); + gen_spr_book3s_207_dbg(env); + + /* POWER8 Specific Registers */ + gen_spr_book3s_ids(env); + gen_spr_rmor(env); + gen_spr_amr(env); + gen_spr_iamr(env); + gen_spr_book3s_purr(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power5p_tb(env); + gen_spr_power6_common(env); + gen_spr_power6_dbg(env); + gen_spr_power8_tce_address_control(env); + gen_spr_power8_ids(env); + gen_spr_power8_ebb(env); + gen_spr_power8_fscr(env); + gen_spr_power8_pmu_sup(env); + gen_spr_power8_pmu_user(env); + gen_spr_power8_tm(env); + gen_spr_power8_pspb(env); + gen_spr_power8_dpdes(env); + gen_spr_vtb(env); + gen_spr_power8_ic(env); + gen_spr_power8_book4(env); + gen_spr_power8_rpr(env); + + /* env variables */ + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_POWER8(env); + ppcPOWER7_irq_init(env_archcpu(env)); +} + +static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr) +{ + if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8NVL_BASE) { + return true; + } + if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8E_BASE) { + return true; + } + if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8_BASE) { + return true; + } + return false; +} + +static bool cpu_has_work_POWER8(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + if (cs->halted) { + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + return false; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { + return true; + } + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { + return true; + } + if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { + return true; + } + return false; + } else { + return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); + } +} + +POWERPC_FAMILY(POWER8)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + +// dc->fw_name = "PowerPC,POWER8"; +// dc->desc = "POWER8"; + pcc->pvr_match = ppc_pvr_match_power8; + pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; + pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; + pcc->init_proc = init_proc_POWER8; + pcc->check_pow = check_pow_nocheck; + cc->has_work = cpu_has_work_POWER8; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_FRSQRTES | + PPC_FLOAT_STFIWX | + PPC_FLOAT_EXT | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | + PPC_SEGMENT_64B | PPC_SLBI | + PPC_POPCNTB | PPC_POPCNTWD | + PPC_CILDST; + pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | + PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | + PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | + PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | + PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | + PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | + PPC2_TM | PPC2_PM_ISA206; + pcc->msr_mask = (1ull << MSR_SF) | + (1ull << MSR_HV) | + (1ull << MSR_TM) | + (1ull << MSR_VR) | + (1ull << MSR_VSX) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_TS0) | + (1ull << MSR_TS1) | + (1ull << MSR_LE); + pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | + LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE | + LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 | + LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 | + LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE; + pcc->lpcr_pm = LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 | + LPCR_P8_PECE3 | LPCR_P8_PECE4; + pcc->mmu_model = POWERPC_MMU_2_07; + pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; + pcc->hash64_opts = &ppc_hash64_opts_POWER7; + pcc->lrg_decr_bits = 32; + pcc->n_host_threads = 8; + pcc->excp_model = POWERPC_EXCP_POWER8; + pcc->bus_model = PPC_FLAGS_INPUT_POWER7; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | + POWERPC_FLAG_VSX | POWERPC_FLAG_TM; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x8000; + pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; +} + +/* + * Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings + * Encoded as array of int_32s in the form: + * 0bxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyy + * x -> AP encoding + * y -> radix mode supported page size (encoded as a shift) + */ +static struct ppc_radix_page_info POWER9_radix_page_info = { + .count = 4, + .entries = { + 0x0000000c, /* 4K - enc: 0x0 */ + 0xa0000010, /* 64K - enc: 0x5 */ + 0x20000015, /* 2M - enc: 0x1 */ + 0x4000001e /* 1G - enc: 0x2 */ + } +}; + +static void init_proc_POWER9(CPUPPCState *env) +{ + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_book3s_207_dbg(env); + + /* POWER8 Specific Registers */ + gen_spr_book3s_ids(env); + gen_spr_amr(env); + gen_spr_iamr(env); + gen_spr_book3s_purr(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power5p_tb(env); + gen_spr_power6_common(env); + gen_spr_power6_dbg(env); + gen_spr_power8_tce_address_control(env); + gen_spr_power8_ids(env); + gen_spr_power8_ebb(env); + gen_spr_power8_fscr(env); + gen_spr_power8_pmu_sup(env); + gen_spr_power8_pmu_user(env); + gen_spr_power8_tm(env); + gen_spr_power8_pspb(env); + gen_spr_power8_dpdes(env); + gen_spr_vtb(env); + gen_spr_power8_ic(env); + gen_spr_power8_book4(env); + gen_spr_power8_rpr(env); + gen_spr_power9_mmu(env); + + /* POWER9 Specific registers */ + spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, + spr_read_generic, spr_write_generic, + KVM_REG_PPC_TIDR, 0); + + /* FIXME: Filter fields properly based on privilege level */ + spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, + spr_read_generic, spr_write_generic, + KVM_REG_PPC_PSSCR, 0); + + /* env variables */ + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_POWER9(env); + ppcPOWER9_irq_init(env_archcpu(env)); +} + +static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr) +{ + if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER9_BASE) { + return true; + } + return false; +} + +static bool cpu_has_work_POWER9(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + if (cs->halted) { + uint64_t psscr = env->spr[SPR_PSSCR]; + + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + return false; + } + + /* If EC is clear, just return true on any pending interrupt */ + if (!(psscr & PSSCR_EC)) { + return true; + } + /* External Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && + (env->spr[SPR_LPCR] & LPCR_EEE)) { + bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (heic == 0 || !msr_hv || msr_pr) { + return true; + } + } + /* Decrementer Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && + (env->spr[SPR_LPCR] & LPCR_DEE)) { + return true; + } + /* Machine Check or Hypervisor Maintenance Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK | + 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) { + return true; + } + /* Privileged Doorbell Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_PDEE)) { + return true; + } + /* Hypervisor Doorbell Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_HDEE)) { + return true; + } + /* Hypervisor virtualization exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) && + (env->spr[SPR_LPCR] & LPCR_HVEE)) { + return true; + } + if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { + return true; + } + return false; + } else { + return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); + } +} + +POWERPC_FAMILY(POWER9)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + +// dc->fw_name = "PowerPC,POWER9"; +// dc->desc = "POWER9"; + pcc->pvr_match = ppc_pvr_match_power9; + pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07; + pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | + PCR_COMPAT_2_05; + pcc->init_proc = init_proc_POWER9; + pcc->check_pow = check_pow_nocheck; + cc->has_work = cpu_has_work_POWER9; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_FRSQRTES | + PPC_FLOAT_STFIWX | + PPC_FLOAT_EXT | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBSYNC | + PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | + PPC_SEGMENT_64B | PPC_SLBI | + PPC_POPCNTB | PPC_POPCNTWD | + PPC_CILDST; + pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | + PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | + PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | + PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | + PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | + PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | + PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL; + pcc->msr_mask = (1ull << MSR_SF) | + (1ull << MSR_HV) | + (1ull << MSR_TM) | + (1ull << MSR_VR) | + (1ull << MSR_VSX) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | + (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | + LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | + (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | + LPCR_DEE | LPCR_OEE)) + | LPCR_MER | LPCR_GTSE | LPCR_TC | + LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; + pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; + pcc->mmu_model = POWERPC_MMU_3_00; + pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault; + /* segment page size remain the same */ + pcc->hash64_opts = &ppc_hash64_opts_POWER7; + pcc->radix_page_info = &POWER9_radix_page_info; + pcc->lrg_decr_bits = 56; + pcc->n_host_threads = 4; + pcc->excp_model = POWERPC_EXCP_POWER9; + pcc->bus_model = PPC_FLAGS_INPUT_POWER9; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | + POWERPC_FLAG_VSX | POWERPC_FLAG_TM; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x8000; + pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; +} + +/* + * Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings + * Encoded as array of int_32s in the form: + * 0bxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyy + * x -> AP encoding + * y -> radix mode supported page size (encoded as a shift) + */ +static struct ppc_radix_page_info POWER10_radix_page_info = { + .count = 4, + .entries = { + 0x0000000c, /* 4K - enc: 0x0 */ + 0xa0000010, /* 64K - enc: 0x5 */ + 0x20000015, /* 2M - enc: 0x1 */ + 0x4000001e /* 1G - enc: 0x2 */ + } +}; + +static void init_proc_POWER10(CPUPPCState *env) +{ + /* Common Registers */ + init_proc_book3s_common(env); + gen_spr_book3s_207_dbg(env); + + /* POWER8 Specific Registers */ + gen_spr_book3s_ids(env); + gen_spr_amr(env); + gen_spr_iamr(env); + gen_spr_book3s_purr(env); + gen_spr_power5p_common(env); + gen_spr_power5p_lpar(env); + gen_spr_power5p_ear(env); + gen_spr_power6_common(env); + gen_spr_power6_dbg(env); + gen_spr_power8_tce_address_control(env); + gen_spr_power8_ids(env); + gen_spr_power8_ebb(env); + gen_spr_power8_fscr(env); + gen_spr_power8_pmu_sup(env); + gen_spr_power8_pmu_user(env); + gen_spr_power8_tm(env); + gen_spr_power8_pspb(env); + gen_spr_vtb(env); + gen_spr_power8_ic(env); + gen_spr_power8_book4(env); + gen_spr_power8_rpr(env); + gen_spr_power9_mmu(env); + + /* POWER9 Specific registers */ + spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, + spr_read_generic, spr_write_generic, + KVM_REG_PPC_TIDR, 0); + + /* FIXME: Filter fields properly based on privilege level */ + spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, + spr_read_generic, spr_write_generic, + KVM_REG_PPC_PSSCR, 0); + + /* env variables */ + env->dcache_line_size = 128; + env->icache_line_size = 128; + + /* Allocate hardware IRQ controller */ + init_excp_POWER10(env); + ppcPOWER9_irq_init(env_archcpu(env)); +} + +static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr) +{ + if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER10_BASE) { + return true; + } + return false; +} + +static bool cpu_has_work_POWER10(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + if (cs->halted) { + uint64_t psscr = env->spr[SPR_PSSCR]; + + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + return false; + } + + /* If EC is clear, just return true on any pending interrupt */ + if (!(psscr & PSSCR_EC)) { + return true; + } + /* External Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && + (env->spr[SPR_LPCR] & LPCR_EEE)) { + bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (heic == 0 || !msr_hv || msr_pr) { + return true; + } + } + /* Decrementer Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) && + (env->spr[SPR_LPCR] & LPCR_DEE)) { + return true; + } + /* Machine Check or Hypervisor Maintenance Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK | + 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) { + return true; + } + /* Privileged Doorbell Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_PDEE)) { + return true; + } + /* Hypervisor Doorbell Exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) && + (env->spr[SPR_LPCR] & LPCR_HDEE)) { + return true; + } + /* Hypervisor virtualization exception */ + if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) && + (env->spr[SPR_LPCR] & LPCR_HVEE)) { + return true; + } + if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) { + return true; + } + return false; + } else { + return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); + } +} + +POWERPC_FAMILY(POWER10)(CPUClass *oc, void *data) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + +// dc->fw_name = "PowerPC,POWER10"; +// dc->desc = "POWER10"; + pcc->pvr_match = ppc_pvr_match_power10; + pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 | + PCR_COMPAT_3_00; + pcc->pcr_supported = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | + PCR_COMPAT_2_06 | PCR_COMPAT_2_05; + pcc->init_proc = init_proc_POWER10; + pcc->check_pow = check_pow_nocheck; + cc->has_work = cpu_has_work_POWER10; + pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | + PPC_FLOAT_FRSQRTES | + PPC_FLOAT_STFIWX | + PPC_FLOAT_EXT | + PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | + PPC_MEM_SYNC | PPC_MEM_EIEIO | + PPC_MEM_TLBSYNC | + PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | + PPC_SEGMENT_64B | PPC_SLBI | + PPC_POPCNTB | PPC_POPCNTWD | + PPC_CILDST; + pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | + PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | + PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | + PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | + PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | + PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | + PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL; + pcc->msr_mask = (1ull << MSR_SF) | + (1ull << MSR_HV) | + (1ull << MSR_TM) | + (1ull << MSR_VR) | + (1ull << MSR_VSX) | + (1ull << MSR_EE) | + (1ull << MSR_PR) | + (1ull << MSR_FP) | + (1ull << MSR_ME) | + (1ull << MSR_FE0) | + (1ull << MSR_SE) | + (1ull << MSR_DE) | + (1ull << MSR_FE1) | + (1ull << MSR_IR) | + (1ull << MSR_DR) | + (1ull << MSR_PMM) | + (1ull << MSR_RI) | + (1ull << MSR_LE); + pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | + (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | + LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | + (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | + LPCR_DEE | LPCR_OEE)) + | LPCR_MER | LPCR_GTSE | LPCR_TC | + LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; + pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; + pcc->mmu_model = POWERPC_MMU_3_00; + pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault; + /* segment page size remain the same */ + pcc->hash64_opts = &ppc_hash64_opts_POWER7; + pcc->radix_page_info = &POWER10_radix_page_info; + pcc->lrg_decr_bits = 56; + pcc->excp_model = POWERPC_EXCP_POWER9; + pcc->bus_model = PPC_FLAGS_INPUT_POWER9; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | + POWERPC_FLAG_BE | POWERPC_FLAG_PMM | + POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | + POWERPC_FLAG_VSX | POWERPC_FLAG_TM; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x8000; + pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; +} + +#if 0 +void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp) +{ + CPUPPCState *env = &cpu->env; + + cpu->vhyp = vhyp; + + /* + * With a virtual hypervisor mode we never allow the CPU to go + * hypervisor mode itself + */ + env->msr_mask &= ~MSR_HVB; +} +#endif + +#endif /* defined(TARGET_PPC64) */ + +/*****************************************************************************/ +/* Generic CPU instantiation routine */ +static void init_ppc_proc(PowerPCCPU *cpu) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + int i; + + env->irq_inputs = NULL; + /* Set all exception vectors to an invalid address */ + for (i = 0; i < POWERPC_EXCP_NB; i++) { +#ifdef _MSC_VER + env->excp_vectors[i] = (target_ulong)(0ULL - 1ULL); +#else + env->excp_vectors[i] = (target_ulong)(-1ULL); +#endif + } + env->ivor_mask = 0x00000000; + env->ivpr_mask = 0x00000000; + /* Default MMU definitions */ + env->nb_BATs = 0; + env->nb_tlb = 0; + env->nb_ways = 0; + env->tlb_type = TLB_NONE; + + /* Register SPR common to all PowerPC implementations */ + gen_spr_generic(env); + spr_register(env, SPR_PVR, "PVR", + /* Linux permits userspace to read PVR */ + SPR_NOACCESS, + SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + pcc->pvr); + /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */ + if (pcc->svr != POWERPC_SVR_NONE) { + if (pcc->svr & POWERPC_SVR_E500) { + spr_register(env, SPR_E500_SVR, "SVR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + pcc->svr & ~POWERPC_SVR_E500); + } else { + spr_register(env, SPR_SVR, "SVR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + pcc->svr); + } + } + /* PowerPC implementation specific initialisations (SPRs, timers, ...) */ + (*pcc->init_proc)(env); + +#if 0 + ppc_gdb_gen_spr_xml(cpu); +#endif + + /* MSR bits & flags consistency checks */ + if (env->msr_mask & (1 << 25)) { + switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { + case POWERPC_FLAG_SPE: + case POWERPC_FLAG_VRE: + break; + default: + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n"); + exit(1); + } + } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n"); + exit(1); + } + if (env->msr_mask & (1 << 17)) { + switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { + case POWERPC_FLAG_TGPR: + case POWERPC_FLAG_CE: + break; + default: + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n"); + exit(1); + } + } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n"); + exit(1); + } + if (env->msr_mask & (1 << 10)) { + switch (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | + POWERPC_FLAG_UBLE)) { + case POWERPC_FLAG_SE: + case POWERPC_FLAG_DWE: + case POWERPC_FLAG_UBLE: + break; + default: + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should define POWERPC_FLAG_SE or POWERPC_FLAG_DWE or " + "POWERPC_FLAG_UBLE\n"); + exit(1); + } + } else if (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | + POWERPC_FLAG_UBLE)) { + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should not define POWERPC_FLAG_SE nor POWERPC_FLAG_DWE nor " + "POWERPC_FLAG_UBLE\n"); + exit(1); + } + if (env->msr_mask & (1 << 9)) { + switch (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) { + case POWERPC_FLAG_BE: + case POWERPC_FLAG_DE: + break; + default: + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should define POWERPC_FLAG_BE or POWERPC_FLAG_DE\n"); + exit(1); + } + } else if (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) { + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should not define POWERPC_FLAG_BE nor POWERPC_FLAG_DE\n"); + exit(1); + } + if (env->msr_mask & (1 << 2)) { + switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { + case POWERPC_FLAG_PX: + case POWERPC_FLAG_PMM: + break; + default: + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n"); + exit(1); + } + } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { + fprintf(stderr, "PowerPC MSR definition inconsistency\n" + "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n"); + exit(1); + } + if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) { + fprintf(stderr, "PowerPC flags inconsistency\n" + "Should define the time-base and decrementer clock source\n"); + exit(1); + } + /* Allocate TLBs buffer when needed */ + if (env->nb_tlb != 0) { + int nb_tlb = env->nb_tlb; + if (env->id_tlbs != 0) { + nb_tlb *= 2; + } + switch (env->tlb_type) { + case TLB_6XX: + env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb); + break; + case TLB_EMB: + env->tlb.tlbe = g_new0(ppcemb_tlb_t, nb_tlb); + break; + case TLB_MAS: + env->tlb.tlbm = g_new0(ppcmas_tlb_t, nb_tlb); + break; + } + /* Pre-compute some useful values */ + env->tlb_per_way = env->nb_tlb / env->nb_ways; + } +#if 0 + if (env->irq_inputs == NULL) { + warn_report("no internal IRQ controller registered." + " Attempt QEMU to crash very soon !"); + } + if (env->check_pow == NULL) { + warn_report("no power management check handler registered." + " Attempt QEMU to crash very soon !"); + } +#endif +} + +#if defined(PPC_DUMP_CPU) +static void dump_ppc_sprs(CPUPPCState *env) +{ + ppc_spr_t *spr; + uint32_t sr, sw; + uint32_t ur, uw; + int i, j, n; + + printf("Special purpose registers:\n"); + for (i = 0; i < 32; i++) { + for (j = 0; j < 32; j++) { + n = (i << 5) | j; + spr = &env->spr_cb[n]; + uw = spr->uea_write != NULL && spr->uea_write != SPR_NOACCESS; + ur = spr->uea_read != NULL && spr->uea_read != SPR_NOACCESS; + sw = spr->oea_write != NULL && spr->oea_write != SPR_NOACCESS; + sr = spr->oea_read != NULL && spr->oea_read != SPR_NOACCESS; + if (sw || sr || uw || ur) { + printf("SPR: %4d (%03x) %-8s s%c%c u%c%c\n", + (i << 5) | j, (i << 5) | j, spr->name, + sw ? 'w' : '-', sr ? 'r' : '-', + uw ? 'w' : '-', ur ? 'r' : '-'); + } + } + } + fflush(stdout); + fflush(stderr); +} +#endif + +/*****************************************************************************/ + +/* Opcode types */ +enum { + PPC_DIRECT = 0, /* Opcode routine */ + PPC_INDIRECT = 1, /* Indirect opcode table */ +}; + +#define PPC_OPCODE_MASK 0x3 + +static inline int is_indirect_opcode(void *handler) +{ + return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT; +} + +static inline opc_handler_t **ind_table(void *handler) +{ + return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK); +} + +/* Instruction table creation */ +/* Opcodes tables creation */ +static void fill_new_table(opc_handler_t **table, int len) +{ + int i; + + for (i = 0; i < len; i++) { + table[i] = &invalid_handler; + } +} + +static int create_new_table(opc_handler_t **table, unsigned char idx) +{ + opc_handler_t **tmp; + + tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN); + fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN); + table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT); + + return 0; +} + +static int insert_in_table(opc_handler_t **table, unsigned char idx, + opc_handler_t *handler) +{ + if (table[idx] != &invalid_handler) { + return -1; + } + table[idx] = handler; + + return 0; +} + +static int register_direct_insn(opc_handler_t **ppc_opcodes, + unsigned char idx, opc_handler_t *handler) +{ + if (insert_in_table(ppc_opcodes, idx, handler) < 0) { + printf("*** ERROR: opcode %02x already assigned in main " + "opcode table\n", idx); +#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) + printf(" Registered handler '%s' - new handler '%s'\n", + ppc_opcodes[idx]->oname, handler->oname); +#endif + return -1; + } + + return 0; +} + +static int register_ind_in_table(opc_handler_t **table, + unsigned char idx1, unsigned char idx2, + opc_handler_t *handler) +{ + if (table[idx1] == &invalid_handler) { + if (create_new_table(table, idx1) < 0) { + printf("*** ERROR: unable to create indirect table " + "idx=%02x\n", idx1); + return -1; + } + } else { + if (!is_indirect_opcode(table[idx1])) { + printf("*** ERROR: idx %02x already assigned to a direct " + "opcode\n", idx1); +#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) + printf(" Registered handler '%s' - new handler '%s'\n", + ind_table(table[idx1])[idx2]->oname, handler->oname); +#endif + return -1; + } + } + if (handler != NULL && + insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) { + printf("*** ERROR: opcode %02x already assigned in " + "opcode table %02x\n", idx2, idx1); +#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU) + printf(" Registered handler '%s' - new handler '%s'\n", + ind_table(table[idx1])[idx2]->oname, handler->oname); +#endif + return -1; + } + + return 0; +} + +static int register_ind_insn(opc_handler_t **ppc_opcodes, + unsigned char idx1, unsigned char idx2, + opc_handler_t *handler) +{ + return register_ind_in_table(ppc_opcodes, idx1, idx2, handler); +} + +static int register_dblind_insn(opc_handler_t **ppc_opcodes, + unsigned char idx1, unsigned char idx2, + unsigned char idx3, opc_handler_t *handler) +{ + if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { + printf("*** ERROR: unable to join indirect table idx " + "[%02x-%02x]\n", idx1, idx2); + return -1; + } + if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3, + handler) < 0) { + printf("*** ERROR: unable to insert opcode " + "[%02x-%02x-%02x]\n", idx1, idx2, idx3); + return -1; + } + + return 0; +} + +static int register_trplind_insn(opc_handler_t **ppc_opcodes, + unsigned char idx1, unsigned char idx2, + unsigned char idx3, unsigned char idx4, + opc_handler_t *handler) +{ + opc_handler_t **table; + + if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) { + printf("*** ERROR: unable to join indirect table idx " + "[%02x-%02x]\n", idx1, idx2); + return -1; + } + table = ind_table(ppc_opcodes[idx1]); + if (register_ind_in_table(table, idx2, idx3, NULL) < 0) { + printf("*** ERROR: unable to join 2nd-level indirect table idx " + "[%02x-%02x-%02x]\n", idx1, idx2, idx3); + return -1; + } + table = ind_table(table[idx2]); + if (register_ind_in_table(table, idx3, idx4, handler) < 0) { + printf("*** ERROR: unable to insert opcode " + "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4); + return -1; + } + return 0; +} +static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn) +{ + if (insn->opc2 != 0xFF) { + if (insn->opc3 != 0xFF) { + if (insn->opc4 != 0xFF) { + if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2, + insn->opc3, insn->opc4, + &insn->handler) < 0) { + return -1; + } + } else { + if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, + insn->opc3, &insn->handler) < 0) { + return -1; + } + } + } else { + if (register_ind_insn(ppc_opcodes, insn->opc1, + insn->opc2, &insn->handler) < 0) { + return -1; + } + } + } else { + if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) { + return -1; + } + } + + return 0; +} + +static int test_opcode_table(opc_handler_t **table, int len) +{ + int i, count, tmp; + + for (i = 0, count = 0; i < len; i++) { + /* Consistency fixup */ + if (table[i] == NULL) { + table[i] = &invalid_handler; + } + if (table[i] != &invalid_handler) { + if (is_indirect_opcode(table[i])) { + tmp = test_opcode_table(ind_table(table[i]), + PPC_CPU_INDIRECT_OPCODES_LEN); + if (tmp == 0) { + free(table[i]); + table[i] = &invalid_handler; + } else { + count++; + } + } else { + count++; + } + } + } + + return count; +} + +static void fix_opcode_tables(opc_handler_t **ppc_opcodes) +{ + if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) { + printf("*** WARNING: no opcode defined !\n"); + } +} + +/*****************************************************************************/ +static int create_ppc_opcodes(PowerPCCPU *cpu) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + opcode_t *opc; + + fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN); + for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) { + if (((opc->handler.type & pcc->insns_flags) != 0) || + ((opc->handler.type2 & pcc->insns_flags2) != 0)) { + if (register_insn(cpu->opcodes, opc) < 0) { +#if 0 + error_setg(errp, "ERROR initializing PowerPC instruction " + "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2, + opc->opc3); +#endif + return 1; + } + } + } + fix_opcode_tables(cpu->opcodes); + fflush(stdout); + fflush(stderr); + return 0; +} + +#if defined(PPC_DUMP_CPU) +static void dump_ppc_insns(CPUPPCState *env) +{ + opc_handler_t **table, *handler; + const char *p, *q; + uint8_t opc1, opc2, opc3, opc4; + + printf("Instructions set:\n"); + /* opc1 is 6 bits long */ + for (opc1 = 0x00; opc1 < PPC_CPU_OPCODES_LEN; opc1++) { + table = env->opcodes; + handler = table[opc1]; + if (is_indirect_opcode(handler)) { + /* opc2 is 5 bits long */ + for (opc2 = 0; opc2 < PPC_CPU_INDIRECT_OPCODES_LEN; opc2++) { + table = env->opcodes; + handler = env->opcodes[opc1]; + table = ind_table(handler); + handler = table[opc2]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + /* opc3 is 5 bits long */ + for (opc3 = 0; opc3 < PPC_CPU_INDIRECT_OPCODES_LEN; + opc3++) { + handler = table[opc3]; + if (is_indirect_opcode(handler)) { + table = ind_table(handler); + /* opc4 is 5 bits long */ + for (opc4 = 0; opc4 < PPC_CPU_INDIRECT_OPCODES_LEN; + opc4++) { + handler = table[opc4]; + if (handler->handler != &gen_invalid) { + printf("INSN: %02x %02x %02x %02x -- " + "(%02d %04d %02d) : %s\n", + opc1, opc2, opc3, opc4, + opc1, (opc3 << 5) | opc2, opc4, + handler->oname); + } + } + } else { + if (handler->handler != &gen_invalid) { + /* Special hack to properly dump SPE insns */ + p = strchr(handler->oname, '_'); + if (p == NULL) { + printf("INSN: %02x %02x %02x (%02d %04d) : " + "%s\n", + opc1, opc2, opc3, opc1, + (opc3 << 5) | opc2, + handler->oname); + } else { + q = "speundef"; + if ((p - handler->oname) != strlen(q) + || (memcmp(handler->oname, q, strlen(q)) + != 0)) { + /* First instruction */ + printf("INSN: %02x %02x %02x" + "(%02d %04d) : %.*s\n", + opc1, opc2 << 1, opc3, opc1, + (opc3 << 6) | (opc2 << 1), + (int)(p - handler->oname), + handler->oname); + } + if (strcmp(p + 1, q) != 0) { + /* Second instruction */ + printf("INSN: %02x %02x %02x " + "(%02d %04d) : %s\n", opc1, + (opc2 << 1) | 1, opc3, opc1, + (opc3 << 6) | (opc2 << 1) | 1, + p + 1); + } + } + } + } + } + } else { + if (handler->handler != &gen_invalid) { + printf("INSN: %02x %02x -- (%02d %04d) : %s\n", + opc1, opc2, opc1, opc2, handler->oname); + } + } + } + } else { + if (handler->handler != &gen_invalid) { + printf("INSN: %02x -- -- (%02d ----) : %s\n", + opc1, opc1, handler->oname); + } + } + } +} +#endif + +#if 0 +static bool avr_need_swap(CPUPPCState *env) +{ +#ifdef HOST_WORDS_BIGENDIAN + return msr_le; +#else + return !msr_le; +#endif +} + +static int gdb_find_spr_idx(CPUPPCState *env, int n) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { + ppc_spr_t *spr = &env->spr_cb[i]; + + if (spr->name && spr->gdb_id == n) { + return i; + } + } + return -1; +} + +static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + int reg; + int len; + + reg = gdb_find_spr_idx(env, n); + if (reg < 0) { + return 0; + } + + len = TARGET_LONG_SIZE; + gdb_get_regl(buf, env->spr[reg]); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, len), len); + return len; +} + +static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + int reg; + int len; + + reg = gdb_find_spr_idx(env, n); + if (reg < 0) { + return 0; + } + + len = TARGET_LONG_SIZE; + ppc_maybe_bswap_register(env, mem_buf, len); + env->spr[reg] = ldn_p(mem_buf, len); + + return len; +} + +static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + uint8_t *mem_buf; + if (n < 32) { + gdb_get_reg64(buf, *cpu_fpr_ptr(env, n)); + mem_buf = gdb_get_reg_ptr(buf, 8); + ppc_maybe_bswap_register(env, mem_buf, 8); + return 8; + } + if (n == 32) { + gdb_get_reg32(buf, env->fpscr); + mem_buf = gdb_get_reg_ptr(buf, 4); + ppc_maybe_bswap_register(env, mem_buf, 4); + return 4; + } + return 0; +} + +static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { + ppc_maybe_bswap_register(env, mem_buf, 8); + *cpu_fpr_ptr(env, n) = ldfq_p(mem_buf); + return 8; + } + if (n == 32) { + ppc_maybe_bswap_register(env, mem_buf, 4); + helper_store_fpscr(env, ldl_p(mem_buf), 0xffffffff); + return 4; + } + return 0; +} + +static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + uint8_t *mem_buf; + + if (n < 32) { + ppc_avr_t *avr = cpu_avr_ptr(env, n); + if (!avr_need_swap(env)) { + gdb_get_reg128(buf, avr->u64[0] , avr->u64[1]); + } else { + gdb_get_reg128(buf, avr->u64[1] , avr->u64[0]); + } + mem_buf = gdb_get_reg_ptr(buf, 16); + ppc_maybe_bswap_register(env, mem_buf, 8); + ppc_maybe_bswap_register(env, mem_buf + 8, 8); + return 16; + } + if (n == 32) { + gdb_get_reg32(buf, helper_mfvscr(env)); + mem_buf = gdb_get_reg_ptr(buf, 4); + ppc_maybe_bswap_register(env, mem_buf, 4); + return 4; + } + if (n == 33) { + gdb_get_reg32(buf, (uint32_t)env->spr[SPR_VRSAVE]); + mem_buf = gdb_get_reg_ptr(buf, 4); + ppc_maybe_bswap_register(env, mem_buf, 4); + return 4; + } + return 0; +} + +static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { + ppc_avr_t *avr = cpu_avr_ptr(env, n); + ppc_maybe_bswap_register(env, mem_buf, 8); + ppc_maybe_bswap_register(env, mem_buf + 8, 8); + if (!avr_need_swap(env)) { + avr->u64[0] = ldq_p(mem_buf); + avr->u64[1] = ldq_p(mem_buf + 8); + } else { + avr->u64[1] = ldq_p(mem_buf); + avr->u64[0] = ldq_p(mem_buf + 8); + } + return 16; + } + if (n == 32) { + ppc_maybe_bswap_register(env, mem_buf, 4); + helper_mtvscr(env, ldl_p(mem_buf)); + return 4; + } + if (n == 33) { + ppc_maybe_bswap_register(env, mem_buf, 4); + env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf); + return 4; + } + return 0; +} + +static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + if (n < 32) { +#if defined(TARGET_PPC64) + gdb_get_reg32(buf, env->gpr[n] >> 32); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); +#else + gdb_get_reg32(buf, env->gprh[n]); +#endif + return 4; + } + if (n == 32) { + gdb_get_reg64(buf, env->spe_acc); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); + return 8; + } + if (n == 33) { + gdb_get_reg32(buf, env->spe_fscr); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4); + return 4; + } + return 0; +} + +static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { +#if defined(TARGET_PPC64) + target_ulong lo = (uint32_t)env->gpr[n]; + target_ulong hi; + + ppc_maybe_bswap_register(env, mem_buf, 4); + + hi = (target_ulong)ldl_p(mem_buf) << 32; + env->gpr[n] = lo | hi; +#else + env->gprh[n] = ldl_p(mem_buf); +#endif + return 4; + } + if (n == 32) { + ppc_maybe_bswap_register(env, mem_buf, 8); + env->spe_acc = ldq_p(mem_buf); + return 8; + } + if (n == 33) { + ppc_maybe_bswap_register(env, mem_buf, 4); + env->spe_fscr = ldl_p(mem_buf); + return 4; + } + return 0; +} + +static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n) +{ + if (n < 32) { + gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n)); + ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8); + return 8; + } + return 0; +} + +static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n) +{ + if (n < 32) { + ppc_maybe_bswap_register(env, mem_buf, 8); + *cpu_vsrl_ptr(env, n) = ldq_p(mem_buf); + return 8; + } + return 0; +} +#endif + +static int ppc_fixup_cpu(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + /* + * TCG doesn't (yet) emulate some groups of instructions that are + * implemented on some otherwise supported CPUs (e.g. VSX and + * decimal floating point instructions on POWER7). We remove + * unsupported instruction groups from the cpu state's instruction + * masks and hope the guest can cope. For at least the pseries + * machine, the unavailability of these instructions can be + * advertised to the guest via the device tree. + */ + if ((env->insns_flags & ~PPC_TCG_INSNS) + || (env->insns_flags2 & ~PPC_TCG_INSNS2)) { +#if 0 + warn_report("Disabling some instructions which are not " + "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")", + env->insns_flags & ~PPC_TCG_INSNS, + env->insns_flags2 & ~PPC_TCG_INSNS2); +#endif + } + env->insns_flags &= PPC_TCG_INSNS; + env->insns_flags2 &= PPC_TCG_INSNS2; + return 0; +} + +static void ppc_cpu_realize(struct uc_struct *uc, CPUState *dev) +{ + CPUState *cs = CPU(dev); + PowerPCCPU *cpu = POWERPC_CPU(dev); +#if 0 + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); +#endif + + cpu_exec_realizefn(cs); + if (cpu->vcpu_id == UNASSIGNED_CPU_INDEX) { + cpu->vcpu_id = cs->cpu_index; + } + + if (ppc_fixup_cpu(cpu) != 0) { + goto unrealize; + } + + if (create_ppc_opcodes(cpu) != 0) { + goto unrealize; + } + + init_ppc_proc(cpu); + +#if defined(PPC_DUMP_CPU) + { + CPUPPCState *env = &cpu->env; + const char *mmu_model, *excp_model, *bus_model; + switch (env->mmu_model) { + case POWERPC_MMU_32B: + mmu_model = "PowerPC 32"; + break; + case POWERPC_MMU_SOFT_6xx: + mmu_model = "PowerPC 6xx/7xx with software driven TLBs"; + break; + case POWERPC_MMU_SOFT_74xx: + mmu_model = "PowerPC 74xx with software driven TLBs"; + break; + case POWERPC_MMU_SOFT_4xx: + mmu_model = "PowerPC 4xx with software driven TLBs"; + break; + case POWERPC_MMU_SOFT_4xx_Z: + mmu_model = "PowerPC 4xx with software driven TLBs " + "and zones protections"; + break; + case POWERPC_MMU_REAL: + mmu_model = "PowerPC real mode only"; + break; + case POWERPC_MMU_MPC8xx: + mmu_model = "PowerPC MPC8xx"; + break; + case POWERPC_MMU_BOOKE: + mmu_model = "PowerPC BookE"; + break; + case POWERPC_MMU_BOOKE206: + mmu_model = "PowerPC BookE 2.06"; + break; + case POWERPC_MMU_601: + mmu_model = "PowerPC 601"; + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_64B: + mmu_model = "PowerPC 64"; + break; +#endif + default: + mmu_model = "Unknown or invalid"; + break; + } + switch (env->excp_model) { + case POWERPC_EXCP_STD: + excp_model = "PowerPC"; + break; + case POWERPC_EXCP_40x: + excp_model = "PowerPC 40x"; + break; + case POWERPC_EXCP_601: + excp_model = "PowerPC 601"; + break; + case POWERPC_EXCP_602: + excp_model = "PowerPC 602"; + break; + case POWERPC_EXCP_603: + excp_model = "PowerPC 603"; + break; + case POWERPC_EXCP_603E: + excp_model = "PowerPC 603e"; + break; + case POWERPC_EXCP_604: + excp_model = "PowerPC 604"; + break; + case POWERPC_EXCP_7x0: + excp_model = "PowerPC 740/750"; + break; + case POWERPC_EXCP_7x5: + excp_model = "PowerPC 745/755"; + break; + case POWERPC_EXCP_74xx: + excp_model = "PowerPC 74xx"; + break; + case POWERPC_EXCP_BOOKE: + excp_model = "PowerPC BookE"; + break; +#if defined(TARGET_PPC64) + case POWERPC_EXCP_970: + excp_model = "PowerPC 970"; + break; +#endif + default: + excp_model = "Unknown or invalid"; + break; + } + switch (env->bus_model) { + case PPC_FLAGS_INPUT_6xx: + bus_model = "PowerPC 6xx"; + break; + case PPC_FLAGS_INPUT_BookE: + bus_model = "PowerPC BookE"; + break; + case PPC_FLAGS_INPUT_405: + bus_model = "PowerPC 405"; + break; + case PPC_FLAGS_INPUT_401: + bus_model = "PowerPC 401/403"; + break; + case PPC_FLAGS_INPUT_RCPU: + bus_model = "RCPU / MPC8xx"; + break; +#if defined(TARGET_PPC64) + case PPC_FLAGS_INPUT_970: + bus_model = "PowerPC 970"; + break; +#endif + default: + bus_model = "Unknown or invalid"; + break; + } + printf("PowerPC %-12s : PVR %08x MSR %016" PRIx64 "\n" + " MMU model : %s\n", + object_class_get_name(OBJECT_CLASS(pcc)), + pcc->pvr, pcc->msr_mask, mmu_model); + if (env->tlb.tlb6) { + printf(" %d %s TLB in %d ways\n", + env->nb_tlb, env->id_tlbs ? "splitted" : "merged", + env->nb_ways); + } + printf(" Exceptions model : %s\n" + " Bus model : %s\n", + excp_model, bus_model); + printf(" MSR features :\n"); + if (env->flags & POWERPC_FLAG_SPE) { + printf(" signal processing engine enable" + "\n"); + } else if (env->flags & POWERPC_FLAG_VRE) { + printf(" vector processor enable\n"); + } + if (env->flags & POWERPC_FLAG_TGPR) { + printf(" temporary GPRs\n"); + } else if (env->flags & POWERPC_FLAG_CE) { + printf(" critical input enable\n"); + } + if (env->flags & POWERPC_FLAG_SE) { + printf(" single-step trace mode\n"); + } else if (env->flags & POWERPC_FLAG_DWE) { + printf(" debug wait enable\n"); + } else if (env->flags & POWERPC_FLAG_UBLE) { + printf(" user BTB lock enable\n"); + } + if (env->flags & POWERPC_FLAG_BE) { + printf(" branch-step trace mode\n"); + } else if (env->flags & POWERPC_FLAG_DE) { + printf(" debug interrupt enable\n"); + } + if (env->flags & POWERPC_FLAG_PX) { + printf(" inclusive protection\n"); + } else if (env->flags & POWERPC_FLAG_PMM) { + printf(" performance monitor mark\n"); + } + if (env->flags == POWERPC_FLAG_NONE) { + printf(" none\n"); + } + printf(" Time-base/decrementer clock source: %s\n", + env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock"); + dump_ppc_insns(env); + dump_ppc_sprs(env); + fflush(stdout); + } +#endif + return; + +unrealize: + cpu_exec_unrealizefn(cs); +} + +void ppc_cpu_unrealize(CPUState *dev) +{ + PowerPCCPU *cpu = POWERPC_CPU(dev); + opc_handler_t **table, **table_2; + int i, j, k; + + for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) { + if (cpu->opcodes[i] == &invalid_handler) { + continue; + } + if (is_indirect_opcode(cpu->opcodes[i])) { + table = ind_table(cpu->opcodes[i]); + for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) { + if (table[j] == &invalid_handler) { + continue; + } + if (is_indirect_opcode(table[j])) { + table_2 = ind_table(table[j]); + for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) { + if (table_2[k] != &invalid_handler && + is_indirect_opcode(table_2[k])) { + g_free((opc_handler_t *)((uintptr_t)table_2[k] & + ~PPC_INDIRECT)); + } + } + g_free((opc_handler_t *)((uintptr_t)table[j] & + ~PPC_INDIRECT)); + } + } + g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] & + ~PPC_INDIRECT)); + } + } +} + +static void ppc_cpu_set_pc(CPUState *cs, vaddr value) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + + cpu->env.nip = value; +} + +static bool ppc_cpu_has_work(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); +} + +static void ppc_cpu_reset(CPUState *dev) +{ + CPUState *s = CPU(dev); + PowerPCCPU *cpu = POWERPC_CPU(s); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + target_ulong msr; + int i; + + pcc->parent_reset(dev); + + msr = (target_ulong)0; + msr |= (target_ulong)MSR_HVB; + msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */ + msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */ + msr |= (target_ulong)1 << MSR_EP; +#if defined(DO_SINGLE_STEP) && 0 + /* Single step trace mode */ + msr |= (target_ulong)1 << MSR_SE; + msr |= (target_ulong)1 << MSR_BE; +#endif + +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + msr |= (1ULL << MSR_SF); + } +#endif + + hreg_store_msr(env, msr, 1); + + env->nip = env->hreset_vector | env->excp_prefix; + if (env->mmu_model != POWERPC_MMU_REAL) { + ppc_tlb_invalidate_all(env); + } + + hreg_compute_hflags(env); +#ifdef _MSC_VER + env->reserve_addr = (target_ulong)(0ULL - 1ULL); +#else + env->reserve_addr = (target_ulong)-1ULL; +#endif + /* Be sure no exception or interrupt is pending */ + env->pending_interrupts = 0; + s->exception_index = POWERPC_EXCP_NONE; + env->error_code = 0; + ppc_irq_reset(cpu); + + /* tininess for underflow is detected before rounding */ + set_float_detect_tininess(float_tininess_before_rounding, + &env->fp_status); + + for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { + ppc_spr_t *spr = &env->spr_cb[i]; + + if (!spr->name) { + continue; + } + env->spr[i] = spr->default_value; + } +} + +#if 0 +static bool ppc_cpu_is_big_endian(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + +// cpu_synchronize_state(cs); + + return !msr_le; +} +#endif + +static void ppc_cpu_exec_enter(CPUState *cs) +{ +#if 0 + PowerPCCPU *cpu = POWERPC_CPU(cs); + + if (cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->cpu_exec_enter(cpu->vhyp, cpu); + } +#endif +} + +static void ppc_cpu_exec_exit(CPUState *cs) +{ +#if 0 + PowerPCCPU *cpu = POWERPC_CPU(cs); + + if (cpu->vhyp) { + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + vhc->cpu_exec_exit(cpu->vhyp, cpu); + } +#endif +} + +static void ppc_cpu_instance_init(struct uc_struct *uc, CPUState *obj) +{ + PowerPCCPU *cpu = POWERPC_CPU(obj); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + + env->uc = uc; + cpu_set_cpustate_pointers(cpu); + cpu->vcpu_id = UNASSIGNED_CPU_INDEX; + + env->msr_mask = pcc->msr_mask; + env->mmu_model = pcc->mmu_model; + env->excp_model = pcc->excp_model; + env->bus_model = pcc->bus_model; + env->insns_flags = pcc->insns_flags; + env->insns_flags2 = pcc->insns_flags2; + env->flags = pcc->flags; + env->bfd_mach = pcc->bfd_mach; + env->check_pow = pcc->check_pow; + + /* + * Mark HV mode as supported if the CPU has an MSR_HV bit in the + * msr_mask. The mask can later be cleared by PAPR mode but the hv + * mode support will remain, thus enforcing that we cannot use + * priv. instructions in guest in PAPR mode. For 970 we currently + * simply don't set HV in msr_mask thus simulating an "Apple mode" + * 970. If we ever want to support 970 HV mode, we'll have to add + * a processor attribute of some sort. + */ + env->has_hv_mode = !!(env->msr_mask & MSR_HVB); + +#ifdef TARGET_PPC64 + ppc_hash64_init(cpu); +#endif +} + +void ppc_cpu_instance_finalize(CPUState *obj) +{ +#ifdef TARGET_PPC64 + PowerPCCPU *cpu = POWERPC_CPU(obj); + + ppc_hash64_finalize(cpu); +#endif +} + +static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr) +{ + return pcc->pvr == pvr; +} + +#if 0 +static gchar *ppc_gdb_arch_name(CPUState *cs) +{ +#if defined(TARGET_PPC64) + return g_strdup("powerpc:common64"); +#else + return g_strdup("powerpc:common"); +#endif +} + +static void ppc_disas_set_info(CPUState *cs, disassemble_info *info) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + if ((env->hflags >> MSR_LE) & 1) { + info->endian = BFD_ENDIAN_LITTLE; + } + info->mach = env->bfd_mach; + if (!env->bfd_mach) { +#ifdef TARGET_PPC64 + info->mach = bfd_mach_ppc64; +#else + info->mach = bfd_mach_ppc; +#endif + } + info->disassembler_options = (char *)"any"; + info->print_insn = print_insn_ppc; + + info->cap_arch = CS_ARCH_PPC; +#ifdef TARGET_PPC64 + info->cap_mode = CS_MODE_64; +#endif +} + +static Property ppc_cpu_properties[] = { + DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false), + DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration, + false), + DEFINE_PROP_BOOL("pre-3.0-migration", PowerPCCPU, pre_3_0_migration, + false), + DEFINE_PROP_END_OF_LIST(), +}; +#endif + +static void ppc_cpu_class_init(struct uc_struct *uc, CPUClass *oc) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); +#if 0 + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_parent_realize(dc, ppc_cpu_realize, + &pcc->parent_realize); + device_class_set_parent_unrealize(dc, ppc_cpu_unrealize, + &pcc->parent_unrealize); +#endif + pcc->pvr_match = ppc_pvr_match_default; + pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_always; +#if 0 + device_class_set_props(dc, ppc_cpu_properties); + + device_class_set_parent_reset(dc, ppc_cpu_reset, &pcc->parent_reset); + + cc->class_by_name = ppc_cpu_class_by_name; + pcc->parent_parse_features = cc->parse_features; + cc->parse_features = ppc_cpu_parse_featurestr; +#endif + /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ + pcc->parent_reset = cc->reset; + /* overwrite the CPUClass->reset to arch reset: arm_cpu_reset(). */ + cc->reset = ppc_cpu_reset; + + cc->has_work = ppc_cpu_has_work; + cc->do_interrupt = ppc_cpu_do_interrupt; + cc->cpu_exec_interrupt = ppc_cpu_exec_interrupt; +#if 0 + cc->dump_state = ppc_cpu_dump_state; + cc->dump_statistics = ppc_cpu_dump_statistics; +#endif + cc->set_pc = ppc_cpu_set_pc; +#if 0 + cc->gdb_read_register = ppc_cpu_gdb_read_register; + cc->gdb_write_register = ppc_cpu_gdb_write_register; +#endif + cc->do_unaligned_access = ppc_cpu_do_unaligned_access; + cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug; +#if 0 + cc->vmsd = &vmstate_ppc_cpu; + cc->write_elf64_note = ppc64_cpu_write_elf64_note; + cc->write_elf32_note = ppc32_cpu_write_elf32_note; + + cc->gdb_num_core_regs = 71; + cc->gdb_get_dynamic_xml = ppc_gdb_get_dynamic_xml; +#ifdef USE_APPLE_GDB + cc->gdb_read_register = ppc_cpu_gdb_read_register_apple; + cc->gdb_write_register = ppc_cpu_gdb_write_register_apple; + cc->gdb_num_core_regs = 71 + 32; +#endif + + cc->gdb_arch_name = ppc_gdb_arch_name; +#if defined(TARGET_PPC64) + cc->gdb_core_xml_file = "power64-core.xml"; +#else + cc->gdb_core_xml_file = "power-core.xml"; +#endif + cc->virtio_is_big_endian = ppc_cpu_is_big_endian; +#endif + cc->tcg_initialize = ppc_translate_init; + cc->tlb_fill = ppc_cpu_tlb_fill; + cc->cpu_exec_enter = ppc_cpu_exec_enter; + cc->cpu_exec_exit = ppc_cpu_exec_exit; + +#if 0 + cc->disas_set_info = ppc_disas_set_info; + dc->fw_name = "PowerPC,UNKNOWN"; +#endif +} + +/* PowerPC CPU definitions from cpu-models.c*/ +typedef struct PowerPCCPUInfo { + const char *name; + uint32_t pvr; + uint32_t svr; + void (*cpu_family_class_init)(CPUClass *oc, void *data); +} PowerPCCPUInfo; + +#define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \ + { _name, _pvr, _svr, POWERPC_FAMILY_NAME(_type) }, + +#define POWERPC_DEF(_name, _pvr, _type, _desc) \ + POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type) + + +static const PowerPCCPUInfo ppc_cpus[] = { + /* Embedded PowerPC */ + /* PowerPC 401 family */ + POWERPC_DEF("401", CPU_POWERPC_401, 401, + "Generic PowerPC 401") + /* PowerPC 401 cores */ + POWERPC_DEF("401a1", CPU_POWERPC_401A1, 401, + "PowerPC 401A1") + POWERPC_DEF("401b2", CPU_POWERPC_401B2, 401x2, + "PowerPC 401B2") + POWERPC_DEF("401c2", CPU_POWERPC_401C2, 401x2, + "PowerPC 401C2") + POWERPC_DEF("401d2", CPU_POWERPC_401D2, 401x2, + "PowerPC 401D2") + POWERPC_DEF("401e2", CPU_POWERPC_401E2, 401x2, + "PowerPC 401E2") + POWERPC_DEF("401f2", CPU_POWERPC_401F2, 401x2, + "PowerPC 401F2") + /* XXX: to be checked */ + POWERPC_DEF("401g2", CPU_POWERPC_401G2, 401x2, + "PowerPC 401G2") + /* PowerPC 401 microcontrollers */ + POWERPC_DEF("iop480", CPU_POWERPC_IOP480, IOP480, + "IOP480 (401 microcontroller)") + POWERPC_DEF("cobra", CPU_POWERPC_COBRA, 401, + "IBM Processor for Network Resources") + /* PowerPC 403 family */ + /* PowerPC 403 microcontrollers */ + POWERPC_DEF("403ga", CPU_POWERPC_403GA, 403, + "PowerPC 403 GA") + POWERPC_DEF("403gb", CPU_POWERPC_403GB, 403, + "PowerPC 403 GB") + POWERPC_DEF("403gc", CPU_POWERPC_403GC, 403, + "PowerPC 403 GC") + POWERPC_DEF("403gcx", CPU_POWERPC_403GCX, 403GCX, + "PowerPC 403 GCX") + /* PowerPC 405 family */ + /* PowerPC 405 cores */ + POWERPC_DEF("405d2", CPU_POWERPC_405D2, 405, + "PowerPC 405 D2") + POWERPC_DEF("405d4", CPU_POWERPC_405D4, 405, + "PowerPC 405 D4") + /* PowerPC 405 microcontrollers */ + POWERPC_DEF("405cra", CPU_POWERPC_405CRa, 405, + "PowerPC 405 CRa") + POWERPC_DEF("405crb", CPU_POWERPC_405CRb, 405, + "PowerPC 405 CRb") + POWERPC_DEF("405crc", CPU_POWERPC_405CRc, 405, + "PowerPC 405 CRc") + POWERPC_DEF("405ep", CPU_POWERPC_405EP, 405, + "PowerPC 405 EP") + POWERPC_DEF("405ez", CPU_POWERPC_405EZ, 405, + "PowerPC 405 EZ") + POWERPC_DEF("405gpa", CPU_POWERPC_405GPa, 405, + "PowerPC 405 GPa") + POWERPC_DEF("405gpb", CPU_POWERPC_405GPb, 405, + "PowerPC 405 GPb") + POWERPC_DEF("405gpc", CPU_POWERPC_405GPc, 405, + "PowerPC 405 GPc") + POWERPC_DEF("405gpd", CPU_POWERPC_405GPd, 405, + "PowerPC 405 GPd") + POWERPC_DEF("405gpr", CPU_POWERPC_405GPR, 405, + "PowerPC 405 GPR") + POWERPC_DEF("405lp", CPU_POWERPC_405LP, 405, + "PowerPC 405 LP") + POWERPC_DEF("npe405h", CPU_POWERPC_NPE405H, 405, + "Npe405 H") + POWERPC_DEF("npe405h2", CPU_POWERPC_NPE405H2, 405, + "Npe405 H2") + POWERPC_DEF("npe405l", CPU_POWERPC_NPE405L, 405, + "Npe405 L") + POWERPC_DEF("npe4gs3", CPU_POWERPC_NPE4GS3, 405, + "Npe4GS3") + /* PowerPC 401/403/405 based set-top-box microcontrollers */ + POWERPC_DEF("stb03", CPU_POWERPC_STB03, 405, + "STB03xx") + POWERPC_DEF("stb04", CPU_POWERPC_STB04, 405, + "STB04xx") + POWERPC_DEF("stb25", CPU_POWERPC_STB25, 405, + "STB25xx") + /* Xilinx PowerPC 405 cores */ + POWERPC_DEF("x2vp4", CPU_POWERPC_X2VP4, 405, + NULL) + POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405, + NULL) + /* PowerPC 440 family */ +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440", CPU_POWERPC_440, 440GP, + "Generic PowerPC 440") +#endif + /* PowerPC 440 cores */ + POWERPC_DEF("440-xilinx", CPU_POWERPC_440_XILINX, 440x5, + "PowerPC 440 Xilinx 5") + + POWERPC_DEF("440-xilinx-w-dfpu", CPU_POWERPC_440_XILINX, 440x5wDFPU, + "PowerPC 440 Xilinx 5 With a Double Prec. FPU") + /* PowerPC 440 microcontrollers */ + POWERPC_DEF("440epa", CPU_POWERPC_440EPa, 440EP, + "PowerPC 440 EPa") + POWERPC_DEF("440epb", CPU_POWERPC_440EPb, 440EP, + "PowerPC 440 EPb") + POWERPC_DEF("440epx", CPU_POWERPC_440EPX, 440EP, + "PowerPC 440 EPX") + POWERPC_DEF("460exb", CPU_POWERPC_460EXb, 460EX, + "PowerPC 460 EXb") +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gpb", CPU_POWERPC_440GPb, 440GP, + "PowerPC 440 GPb") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gpc", CPU_POWERPC_440GPc, 440GP, + "PowerPC 440 GPc") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gra", CPU_POWERPC_440GRa, 440x5, + "PowerPC 440 GRa") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440grx", CPU_POWERPC_440GRX, 440x5, + "PowerPC 440 GRX") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxa", CPU_POWERPC_440GXa, 440EP, + "PowerPC 440 GXa") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxb", CPU_POWERPC_440GXb, 440EP, + "PowerPC 440 GXb") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxc", CPU_POWERPC_440GXc, 440EP, + "PowerPC 440 GXc") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440gxf", CPU_POWERPC_440GXf, 440EP, + "PowerPC 440 GXf") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440sp", CPU_POWERPC_440SP, 440EP, + "PowerPC 440 SP") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440sp2", CPU_POWERPC_440SP2, 440EP, + "PowerPC 440 SP2") +#endif +#if defined(TODO_USER_ONLY) + POWERPC_DEF("440spe", CPU_POWERPC_440SPE, 440EP, + "PowerPC 440 SPE") +#endif + /* Freescale embedded PowerPC cores */ + /* MPC5xx family (aka RCPU) */ +#if defined(TODO_USER_ONLY) + POWERPC_DEF("mpc5xx", CPU_POWERPC_MPC5xx, MPC5xx, + "Generic MPC5xx core") +#endif + /* MPC8xx family (aka PowerQUICC) */ +#if defined(TODO_USER_ONLY) + POWERPC_DEF("mpc8xx", CPU_POWERPC_MPC8xx, MPC8xx, + "Generic MPC8xx core") +#endif + /* MPC82xx family (aka PowerQUICC-II) */ + POWERPC_DEF("g2", CPU_POWERPC_G2, G2, + "PowerPC G2 core") + POWERPC_DEF("g2h4", CPU_POWERPC_G2H4, G2, + "PowerPC G2 H4 core") + POWERPC_DEF("g2gp", CPU_POWERPC_G2gp, G2, + "PowerPC G2 GP core") + POWERPC_DEF("g2ls", CPU_POWERPC_G2ls, G2, + "PowerPC G2 LS core") + POWERPC_DEF("g2hip3", CPU_POWERPC_G2_HIP3, G2, + "PowerPC G2 HiP3 core") + POWERPC_DEF("g2hip4", CPU_POWERPC_G2_HIP4, G2, + "PowerPC G2 HiP4 core") + POWERPC_DEF("mpc603", CPU_POWERPC_MPC603, 603E, + "PowerPC MPC603 core") + POWERPC_DEF("g2le", CPU_POWERPC_G2LE, G2LE, + "PowerPC G2le core (same as G2 plus little-endian mode support)") + POWERPC_DEF("g2legp", CPU_POWERPC_G2LEgp, G2LE, + "PowerPC G2LE GP core") + POWERPC_DEF("g2lels", CPU_POWERPC_G2LEls, G2LE, + "PowerPC G2LE LS core") + POWERPC_DEF("g2legp1", CPU_POWERPC_G2LEgp1, G2LE, + "PowerPC G2LE GP1 core") + POWERPC_DEF("g2legp3", CPU_POWERPC_G2LEgp3, G2LE, + "PowerPC G2LE GP3 core") + /* PowerPC G2 microcontrollers */ + POWERPC_DEF_SVR("mpc5200_v10", "MPC5200 v1.0", + CPU_POWERPC_MPC5200_v10, POWERPC_SVR_5200_v10, G2LE) + POWERPC_DEF_SVR("mpc5200_v11", "MPC5200 v1.1", + CPU_POWERPC_MPC5200_v11, POWERPC_SVR_5200_v11, G2LE) + POWERPC_DEF_SVR("mpc5200_v12", "MPC5200 v1.2", + CPU_POWERPC_MPC5200_v12, POWERPC_SVR_5200_v12, G2LE) + POWERPC_DEF_SVR("mpc5200b_v20", "MPC5200B v2.0", + CPU_POWERPC_MPC5200B_v20, POWERPC_SVR_5200B_v20, G2LE) + POWERPC_DEF_SVR("mpc5200b_v21", "MPC5200B v2.1", + CPU_POWERPC_MPC5200B_v21, POWERPC_SVR_5200B_v21, G2LE) + /* e200 family */ + POWERPC_DEF("e200z5", CPU_POWERPC_e200z5, e200, + "PowerPC e200z5 core") + POWERPC_DEF("e200z6", CPU_POWERPC_e200z6, e200, + "PowerPC e200z6 core") + /* e300 family */ + POWERPC_DEF("e300c1", CPU_POWERPC_e300c1, e300, + "PowerPC e300c1 core") + POWERPC_DEF("e300c2", CPU_POWERPC_e300c2, e300, + "PowerPC e300c2 core") + POWERPC_DEF("e300c3", CPU_POWERPC_e300c3, e300, + "PowerPC e300c3 core") + POWERPC_DEF("e300c4", CPU_POWERPC_e300c4, e300, + "PowerPC e300c4 core") + /* PowerPC e300 microcontrollers */ + POWERPC_DEF_SVR("mpc8343", "MPC8343", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343, e300) + POWERPC_DEF_SVR("mpc8343a", "MPC8343A", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343A, e300) + POWERPC_DEF_SVR("mpc8343e", "MPC8343E", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343E, e300) + POWERPC_DEF_SVR("mpc8343ea", "MPC8343EA", + CPU_POWERPC_MPC834x, POWERPC_SVR_8343EA, e300) + POWERPC_DEF_SVR("mpc8347t", "MPC8347T", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347T, e300) + POWERPC_DEF_SVR("mpc8347p", "MPC8347P", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347P, e300) + POWERPC_DEF_SVR("mpc8347at", "MPC8347AT", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347AT, e300) + POWERPC_DEF_SVR("mpc8347ap", "MPC8347AP", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347AP, e300) + POWERPC_DEF_SVR("mpc8347et", "MPC8347ET", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347ET, e300) + POWERPC_DEF_SVR("mpc8347ep", "MPC8343EP", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347EP, e300) + POWERPC_DEF_SVR("mpc8347eat", "MPC8347EAT", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAT, e300) + POWERPC_DEF_SVR("mpc8347eap", "MPC8343EAP", + CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAP, e300) + POWERPC_DEF_SVR("mpc8349", "MPC8349", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349, e300) + POWERPC_DEF_SVR("mpc8349a", "MPC8349A", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349A, e300) + POWERPC_DEF_SVR("mpc8349e", "MPC8349E", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349E, e300) + POWERPC_DEF_SVR("mpc8349ea", "MPC8349EA", + CPU_POWERPC_MPC834x, POWERPC_SVR_8349EA, e300) + POWERPC_DEF_SVR("mpc8377", "MPC8377", + CPU_POWERPC_MPC837x, POWERPC_SVR_8377, e300) + POWERPC_DEF_SVR("mpc8377e", "MPC8377E", + CPU_POWERPC_MPC837x, POWERPC_SVR_8377E, e300) + POWERPC_DEF_SVR("mpc8378", "MPC8378", + CPU_POWERPC_MPC837x, POWERPC_SVR_8378, e300) + POWERPC_DEF_SVR("mpc8378e", "MPC8378E", + CPU_POWERPC_MPC837x, POWERPC_SVR_8378E, e300) + POWERPC_DEF_SVR("mpc8379", "MPC8379", + CPU_POWERPC_MPC837x, POWERPC_SVR_8379, e300) + POWERPC_DEF_SVR("mpc8379e", "MPC8379E", + CPU_POWERPC_MPC837x, POWERPC_SVR_8379E, e300) + /* e500 family */ + POWERPC_DEF_SVR("e500_v10", "PowerPC e500 v1.0 core", + CPU_POWERPC_e500v1_v10, POWERPC_SVR_E500, e500v1) + POWERPC_DEF_SVR("e500_v20", "PowerPC e500 v2.0 core", + CPU_POWERPC_e500v1_v20, POWERPC_SVR_E500, e500v1) + POWERPC_DEF_SVR("e500v2_v10", "PowerPC e500v2 v1.0 core", + CPU_POWERPC_e500v2_v10, POWERPC_SVR_E500, e500v2) + POWERPC_DEF_SVR("e500v2_v20", "PowerPC e500v2 v2.0 core", + CPU_POWERPC_e500v2_v20, POWERPC_SVR_E500, e500v2) + POWERPC_DEF_SVR("e500v2_v21", "PowerPC e500v2 v2.1 core", + CPU_POWERPC_e500v2_v21, POWERPC_SVR_E500, e500v2) + POWERPC_DEF_SVR("e500v2_v22", "PowerPC e500v2 v2.2 core", + CPU_POWERPC_e500v2_v22, POWERPC_SVR_E500, e500v2) + POWERPC_DEF_SVR("e500v2_v30", "PowerPC e500v2 v3.0 core", + CPU_POWERPC_e500v2_v30, POWERPC_SVR_E500, e500v2) + POWERPC_DEF_SVR("e500mc", "e500mc", + CPU_POWERPC_e500mc, POWERPC_SVR_E500, e500mc) +#ifdef TARGET_PPC64 + POWERPC_DEF_SVR("e5500", "e5500", + CPU_POWERPC_e5500, POWERPC_SVR_E500, e5500) + POWERPC_DEF_SVR("e6500", "e6500", + CPU_POWERPC_e6500, POWERPC_SVR_E500, e6500) +#endif + /* PowerPC e500 microcontrollers */ + POWERPC_DEF_SVR("mpc8533_v10", "MPC8533 v1.0", + CPU_POWERPC_MPC8533_v10, POWERPC_SVR_8533_v10, e500v2) + POWERPC_DEF_SVR("mpc8533_v11", "MPC8533 v1.1", + CPU_POWERPC_MPC8533_v11, POWERPC_SVR_8533_v11, e500v2) + POWERPC_DEF_SVR("mpc8533e_v10", "MPC8533E v1.0", + CPU_POWERPC_MPC8533E_v10, POWERPC_SVR_8533E_v10, e500v2) + POWERPC_DEF_SVR("mpc8533e_v11", "MPC8533E v1.1", + CPU_POWERPC_MPC8533E_v11, POWERPC_SVR_8533E_v11, e500v2) + POWERPC_DEF_SVR("mpc8540_v10", "MPC8540 v1.0", + CPU_POWERPC_MPC8540_v10, POWERPC_SVR_8540_v10, e500v1) + POWERPC_DEF_SVR("mpc8540_v20", "MPC8540 v2.0", + CPU_POWERPC_MPC8540_v20, POWERPC_SVR_8540_v20, e500v1) + POWERPC_DEF_SVR("mpc8540_v21", "MPC8540 v2.1", + CPU_POWERPC_MPC8540_v21, POWERPC_SVR_8540_v21, e500v1) + POWERPC_DEF_SVR("mpc8541_v10", "MPC8541 v1.0", + CPU_POWERPC_MPC8541_v10, POWERPC_SVR_8541_v10, e500v1) + POWERPC_DEF_SVR("mpc8541_v11", "MPC8541 v1.1", + CPU_POWERPC_MPC8541_v11, POWERPC_SVR_8541_v11, e500v1) + POWERPC_DEF_SVR("mpc8541e_v10", "MPC8541E v1.0", + CPU_POWERPC_MPC8541E_v10, POWERPC_SVR_8541E_v10, e500v1) + POWERPC_DEF_SVR("mpc8541e_v11", "MPC8541E v1.1", + CPU_POWERPC_MPC8541E_v11, POWERPC_SVR_8541E_v11, e500v1) + POWERPC_DEF_SVR("mpc8543_v10", "MPC8543 v1.0", + CPU_POWERPC_MPC8543_v10, POWERPC_SVR_8543_v10, e500v2) + POWERPC_DEF_SVR("mpc8543_v11", "MPC8543 v1.1", + CPU_POWERPC_MPC8543_v11, POWERPC_SVR_8543_v11, e500v2) + POWERPC_DEF_SVR("mpc8543_v20", "MPC8543 v2.0", + CPU_POWERPC_MPC8543_v20, POWERPC_SVR_8543_v20, e500v2) + POWERPC_DEF_SVR("mpc8543_v21", "MPC8543 v2.1", + CPU_POWERPC_MPC8543_v21, POWERPC_SVR_8543_v21, e500v2) + POWERPC_DEF_SVR("mpc8543e_v10", "MPC8543E v1.0", + CPU_POWERPC_MPC8543E_v10, POWERPC_SVR_8543E_v10, e500v2) + POWERPC_DEF_SVR("mpc8543e_v11", "MPC8543E v1.1", + CPU_POWERPC_MPC8543E_v11, POWERPC_SVR_8543E_v11, e500v2) + POWERPC_DEF_SVR("mpc8543e_v20", "MPC8543E v2.0", + CPU_POWERPC_MPC8543E_v20, POWERPC_SVR_8543E_v20, e500v2) + POWERPC_DEF_SVR("mpc8543e_v21", "MPC8543E v2.1", + CPU_POWERPC_MPC8543E_v21, POWERPC_SVR_8543E_v21, e500v2) + POWERPC_DEF_SVR("mpc8544_v10", "MPC8544 v1.0", + CPU_POWERPC_MPC8544_v10, POWERPC_SVR_8544_v10, e500v2) + POWERPC_DEF_SVR("mpc8544_v11", "MPC8544 v1.1", + CPU_POWERPC_MPC8544_v11, POWERPC_SVR_8544_v11, e500v2) + POWERPC_DEF_SVR("mpc8544e_v10", "MPC8544E v1.0", + CPU_POWERPC_MPC8544E_v10, POWERPC_SVR_8544E_v10, e500v2) + POWERPC_DEF_SVR("mpc8544e_v11", "MPC8544E v1.1", + CPU_POWERPC_MPC8544E_v11, POWERPC_SVR_8544E_v11, e500v2) + POWERPC_DEF_SVR("mpc8545_v20", "MPC8545 v2.0", + CPU_POWERPC_MPC8545_v20, POWERPC_SVR_8545_v20, e500v2) + POWERPC_DEF_SVR("mpc8545_v21", "MPC8545 v2.1", + CPU_POWERPC_MPC8545_v21, POWERPC_SVR_8545_v21, e500v2) + POWERPC_DEF_SVR("mpc8545e_v20", "MPC8545E v2.0", + CPU_POWERPC_MPC8545E_v20, POWERPC_SVR_8545E_v20, e500v2) + POWERPC_DEF_SVR("mpc8545e_v21", "MPC8545E v2.1", + CPU_POWERPC_MPC8545E_v21, POWERPC_SVR_8545E_v21, e500v2) + POWERPC_DEF_SVR("mpc8547e_v20", "MPC8547E v2.0", + CPU_POWERPC_MPC8547E_v20, POWERPC_SVR_8547E_v20, e500v2) + POWERPC_DEF_SVR("mpc8547e_v21", "MPC8547E v2.1", + CPU_POWERPC_MPC8547E_v21, POWERPC_SVR_8547E_v21, e500v2) + POWERPC_DEF_SVR("mpc8548_v10", "MPC8548 v1.0", + CPU_POWERPC_MPC8548_v10, POWERPC_SVR_8548_v10, e500v2) + POWERPC_DEF_SVR("mpc8548_v11", "MPC8548 v1.1", + CPU_POWERPC_MPC8548_v11, POWERPC_SVR_8548_v11, e500v2) + POWERPC_DEF_SVR("mpc8548_v20", "MPC8548 v2.0", + CPU_POWERPC_MPC8548_v20, POWERPC_SVR_8548_v20, e500v2) + POWERPC_DEF_SVR("mpc8548_v21", "MPC8548 v2.1", + CPU_POWERPC_MPC8548_v21, POWERPC_SVR_8548_v21, e500v2) + POWERPC_DEF_SVR("mpc8548e_v10", "MPC8548E v1.0", + CPU_POWERPC_MPC8548E_v10, POWERPC_SVR_8548E_v10, e500v2) + POWERPC_DEF_SVR("mpc8548e_v11", "MPC8548E v1.1", + CPU_POWERPC_MPC8548E_v11, POWERPC_SVR_8548E_v11, e500v2) + POWERPC_DEF_SVR("mpc8548e_v20", "MPC8548E v2.0", + CPU_POWERPC_MPC8548E_v20, POWERPC_SVR_8548E_v20, e500v2) + POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1", + CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2) + POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0", + CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2) + POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1", + CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2) + POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0", + CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2) + POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1", + CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2) + POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0", + CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2) + POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0", + CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2) + POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1", + CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2) + POWERPC_DEF_SVR("mpc8567", "MPC8567", + CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2) + POWERPC_DEF_SVR("mpc8567e", "MPC8567E", + CPU_POWERPC_MPC8567E, POWERPC_SVR_8567E, e500v2) + POWERPC_DEF_SVR("mpc8568", "MPC8568", + CPU_POWERPC_MPC8568, POWERPC_SVR_8568, e500v2) + POWERPC_DEF_SVR("mpc8568e", "MPC8568E", + CPU_POWERPC_MPC8568E, POWERPC_SVR_8568E, e500v2) + POWERPC_DEF_SVR("mpc8572", "MPC8572", + CPU_POWERPC_MPC8572, POWERPC_SVR_8572, e500v2) + POWERPC_DEF_SVR("mpc8572e", "MPC8572E", + CPU_POWERPC_MPC8572E, POWERPC_SVR_8572E, e500v2) + /* e600 family */ + POWERPC_DEF("e600", CPU_POWERPC_e600, e600, + "PowerPC e600 core") + /* PowerPC e600 microcontrollers */ + POWERPC_DEF_SVR("mpc8610", "MPC8610", + CPU_POWERPC_MPC8610, POWERPC_SVR_8610, e600) + POWERPC_DEF_SVR("mpc8641", "MPC8641", + CPU_POWERPC_MPC8641, POWERPC_SVR_8641, e600) + POWERPC_DEF_SVR("mpc8641d", "MPC8641D", + CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600) + /* 32 bits "classic" PowerPC */ + /* PowerPC 6xx family */ + POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601, + "PowerPC 601v0") + POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601, + "PowerPC 601v1") + POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v, + "PowerPC 601v2") + POWERPC_DEF("602", CPU_POWERPC_602, 602, + "PowerPC 602") + POWERPC_DEF("603", CPU_POWERPC_603, 603, + "PowerPC 603") + POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E, + "PowerPC 603e v1.1") + POWERPC_DEF("603e_v1.2", CPU_POWERPC_603E_v12, 603E, + "PowerPC 603e v1.2") + POWERPC_DEF("603e_v1.3", CPU_POWERPC_603E_v13, 603E, + "PowerPC 603e v1.3") + POWERPC_DEF("603e_v1.4", CPU_POWERPC_603E_v14, 603E, + "PowerPC 603e v1.4") + POWERPC_DEF("603e_v2.2", CPU_POWERPC_603E_v22, 603E, + "PowerPC 603e v2.2") + POWERPC_DEF("603e_v3", CPU_POWERPC_603E_v3, 603E, + "PowerPC 603e v3") + POWERPC_DEF("603e_v4", CPU_POWERPC_603E_v4, 603E, + "PowerPC 603e v4") + POWERPC_DEF("603e_v4.1", CPU_POWERPC_603E_v41, 603E, + "PowerPC 603e v4.1") + POWERPC_DEF("603e7", CPU_POWERPC_603E7, 603E, + "PowerPC 603e (aka PID7)") + POWERPC_DEF("603e7t", CPU_POWERPC_603E7t, 603E, + "PowerPC 603e7t") + POWERPC_DEF("603e7v", CPU_POWERPC_603E7v, 603E, + "PowerPC 603e7v") + POWERPC_DEF("603e7v1", CPU_POWERPC_603E7v1, 603E, + "PowerPC 603e7v1") + POWERPC_DEF("603e7v2", CPU_POWERPC_603E7v2, 603E, + "PowerPC 603e7v2") + POWERPC_DEF("603p", CPU_POWERPC_603P, 603E, + "PowerPC 603p (aka PID7v)") + POWERPC_DEF("604", CPU_POWERPC_604, 604, + "PowerPC 604") + POWERPC_DEF("604e_v1.0", CPU_POWERPC_604E_v10, 604E, + "PowerPC 604e v1.0") + POWERPC_DEF("604e_v2.2", CPU_POWERPC_604E_v22, 604E, + "PowerPC 604e v2.2") + POWERPC_DEF("604e_v2.4", CPU_POWERPC_604E_v24, 604E, + "PowerPC 604e v2.4") + POWERPC_DEF("604r", CPU_POWERPC_604R, 604E, + "PowerPC 604r (aka PIDA)") + /* PowerPC 7xx family */ + POWERPC_DEF("740_v1.0", CPU_POWERPC_7x0_v10, 740, + "PowerPC 740 v1.0 (G3)") + POWERPC_DEF("750_v1.0", CPU_POWERPC_7x0_v10, 750, + "PowerPC 750 v1.0 (G3)") + POWERPC_DEF("740_v2.0", CPU_POWERPC_7x0_v20, 740, + "PowerPC 740 v2.0 (G3)") + POWERPC_DEF("750_v2.0", CPU_POWERPC_7x0_v20, 750, + "PowerPC 750 v2.0 (G3)") + POWERPC_DEF("740_v2.1", CPU_POWERPC_7x0_v21, 740, + "PowerPC 740 v2.1 (G3)") + POWERPC_DEF("750_v2.1", CPU_POWERPC_7x0_v21, 750, + "PowerPC 750 v2.1 (G3)") + POWERPC_DEF("740_v2.2", CPU_POWERPC_7x0_v22, 740, + "PowerPC 740 v2.2 (G3)") + POWERPC_DEF("750_v2.2", CPU_POWERPC_7x0_v22, 750, + "PowerPC 750 v2.2 (G3)") + POWERPC_DEF("740_v3.0", CPU_POWERPC_7x0_v30, 740, + "PowerPC 740 v3.0 (G3)") + POWERPC_DEF("750_v3.0", CPU_POWERPC_7x0_v30, 750, + "PowerPC 750 v3.0 (G3)") + POWERPC_DEF("740_v3.1", CPU_POWERPC_7x0_v31, 740, + "PowerPC 740 v3.1 (G3)") + POWERPC_DEF("750_v3.1", CPU_POWERPC_7x0_v31, 750, + "PowerPC 750 v3.1 (G3)") + POWERPC_DEF("740e", CPU_POWERPC_740E, 740, + "PowerPC 740E (G3)") + POWERPC_DEF("750e", CPU_POWERPC_750E, 750, + "PowerPC 750E (G3)") + POWERPC_DEF("740p", CPU_POWERPC_7x0P, 740, + "PowerPC 740P (G3)") + POWERPC_DEF("750p", CPU_POWERPC_7x0P, 750, + "PowerPC 750P (G3)") + POWERPC_DEF("750cl_v1.0", CPU_POWERPC_750CL_v10, 750cl, + "PowerPC 750CL v1.0") + POWERPC_DEF("750cl_v2.0", CPU_POWERPC_750CL_v20, 750cl, + "PowerPC 750CL v2.0") + POWERPC_DEF("750cx_v1.0", CPU_POWERPC_750CX_v10, 750cx, + "PowerPC 750CX v1.0 (G3 embedded)") + POWERPC_DEF("750cx_v2.0", CPU_POWERPC_750CX_v20, 750cx, + "PowerPC 750CX v2.1 (G3 embedded)") + POWERPC_DEF("750cx_v2.1", CPU_POWERPC_750CX_v21, 750cx, + "PowerPC 750CX v2.1 (G3 embedded)") + POWERPC_DEF("750cx_v2.2", CPU_POWERPC_750CX_v22, 750cx, + "PowerPC 750CX v2.2 (G3 embedded)") + POWERPC_DEF("750cxe_v2.1", CPU_POWERPC_750CXE_v21, 750cx, + "PowerPC 750CXe v2.1 (G3 embedded)") + POWERPC_DEF("750cxe_v2.2", CPU_POWERPC_750CXE_v22, 750cx, + "PowerPC 750CXe v2.2 (G3 embedded)") + POWERPC_DEF("750cxe_v2.3", CPU_POWERPC_750CXE_v23, 750cx, + "PowerPC 750CXe v2.3 (G3 embedded)") + POWERPC_DEF("750cxe_v2.4", CPU_POWERPC_750CXE_v24, 750cx, + "PowerPC 750CXe v2.4 (G3 embedded)") + POWERPC_DEF("750cxe_v2.4b", CPU_POWERPC_750CXE_v24b, 750cx, + "PowerPC 750CXe v2.4b (G3 embedded)") + POWERPC_DEF("750cxe_v3.0", CPU_POWERPC_750CXE_v30, 750cx, + "PowerPC 750CXe v3.0 (G3 embedded)") + POWERPC_DEF("750cxe_v3.1", CPU_POWERPC_750CXE_v31, 750cx, + "PowerPC 750CXe v3.1 (G3 embedded)") + POWERPC_DEF("750cxe_v3.1b", CPU_POWERPC_750CXE_v31b, 750cx, + "PowerPC 750CXe v3.1b (G3 embedded)") + POWERPC_DEF("750cxr", CPU_POWERPC_750CXR, 750cx, + "PowerPC 750CXr (G3 embedded)") + POWERPC_DEF("750fl", CPU_POWERPC_750FL, 750fx, + "PowerPC 750FL (G3 embedded)") + POWERPC_DEF("750fx_v1.0", CPU_POWERPC_750FX_v10, 750fx, + "PowerPC 750FX v1.0 (G3 embedded)") + POWERPC_DEF("750fx_v2.0", CPU_POWERPC_750FX_v20, 750fx, + "PowerPC 750FX v2.0 (G3 embedded)") + POWERPC_DEF("750fx_v2.1", CPU_POWERPC_750FX_v21, 750fx, + "PowerPC 750FX v2.1 (G3 embedded)") + POWERPC_DEF("750fx_v2.2", CPU_POWERPC_750FX_v22, 750fx, + "PowerPC 750FX v2.2 (G3 embedded)") + POWERPC_DEF("750fx_v2.3", CPU_POWERPC_750FX_v23, 750fx, + "PowerPC 750FX v2.3 (G3 embedded)") + POWERPC_DEF("750gl", CPU_POWERPC_750GL, 750gx, + "PowerPC 750GL (G3 embedded)") + POWERPC_DEF("750gx_v1.0", CPU_POWERPC_750GX_v10, 750gx, + "PowerPC 750GX v1.0 (G3 embedded)") + POWERPC_DEF("750gx_v1.1", CPU_POWERPC_750GX_v11, 750gx, + "PowerPC 750GX v1.1 (G3 embedded)") + POWERPC_DEF("750gx_v1.2", CPU_POWERPC_750GX_v12, 750gx, + "PowerPC 750GX v1.2 (G3 embedded)") + POWERPC_DEF("750l_v2.0", CPU_POWERPC_750L_v20, 750, + "PowerPC 750L v2.0 (G3 embedded)") + POWERPC_DEF("750l_v2.1", CPU_POWERPC_750L_v21, 750, + "PowerPC 750L v2.1 (G3 embedded)") + POWERPC_DEF("750l_v2.2", CPU_POWERPC_750L_v22, 750, + "PowerPC 750L v2.2 (G3 embedded)") + POWERPC_DEF("750l_v3.0", CPU_POWERPC_750L_v30, 750, + "PowerPC 750L v3.0 (G3 embedded)") + POWERPC_DEF("750l_v3.2", CPU_POWERPC_750L_v32, 750, + "PowerPC 750L v3.2 (G3 embedded)") + POWERPC_DEF("745_v1.0", CPU_POWERPC_7x5_v10, 745, + "PowerPC 745 v1.0") + POWERPC_DEF("755_v1.0", CPU_POWERPC_7x5_v10, 755, + "PowerPC 755 v1.0") + POWERPC_DEF("745_v1.1", CPU_POWERPC_7x5_v11, 745, + "PowerPC 745 v1.1") + POWERPC_DEF("755_v1.1", CPU_POWERPC_7x5_v11, 755, + "PowerPC 755 v1.1") + POWERPC_DEF("745_v2.0", CPU_POWERPC_7x5_v20, 745, + "PowerPC 745 v2.0") + POWERPC_DEF("755_v2.0", CPU_POWERPC_7x5_v20, 755, + "PowerPC 755 v2.0") + POWERPC_DEF("745_v2.1", CPU_POWERPC_7x5_v21, 745, + "PowerPC 745 v2.1") + POWERPC_DEF("755_v2.1", CPU_POWERPC_7x5_v21, 755, + "PowerPC 755 v2.1") + POWERPC_DEF("745_v2.2", CPU_POWERPC_7x5_v22, 745, + "PowerPC 745 v2.2") + POWERPC_DEF("755_v2.2", CPU_POWERPC_7x5_v22, 755, + "PowerPC 755 v2.2") + POWERPC_DEF("745_v2.3", CPU_POWERPC_7x5_v23, 745, + "PowerPC 745 v2.3") + POWERPC_DEF("755_v2.3", CPU_POWERPC_7x5_v23, 755, + "PowerPC 755 v2.3") + POWERPC_DEF("745_v2.4", CPU_POWERPC_7x5_v24, 745, + "PowerPC 745 v2.4") + POWERPC_DEF("755_v2.4", CPU_POWERPC_7x5_v24, 755, + "PowerPC 755 v2.4") + POWERPC_DEF("745_v2.5", CPU_POWERPC_7x5_v25, 745, + "PowerPC 745 v2.5") + POWERPC_DEF("755_v2.5", CPU_POWERPC_7x5_v25, 755, + "PowerPC 755 v2.5") + POWERPC_DEF("745_v2.6", CPU_POWERPC_7x5_v26, 745, + "PowerPC 745 v2.6") + POWERPC_DEF("755_v2.6", CPU_POWERPC_7x5_v26, 755, + "PowerPC 755 v2.6") + POWERPC_DEF("745_v2.7", CPU_POWERPC_7x5_v27, 745, + "PowerPC 745 v2.7") + POWERPC_DEF("755_v2.7", CPU_POWERPC_7x5_v27, 755, + "PowerPC 755 v2.7") + POWERPC_DEF("745_v2.8", CPU_POWERPC_7x5_v28, 745, + "PowerPC 745 v2.8") + POWERPC_DEF("755_v2.8", CPU_POWERPC_7x5_v28, 755, + "PowerPC 755 v2.8") + /* PowerPC 74xx family */ + POWERPC_DEF("7400_v1.0", CPU_POWERPC_7400_v10, 7400, + "PowerPC 7400 v1.0 (G4)") + POWERPC_DEF("7400_v1.1", CPU_POWERPC_7400_v11, 7400, + "PowerPC 7400 v1.1 (G4)") + POWERPC_DEF("7400_v2.0", CPU_POWERPC_7400_v20, 7400, + "PowerPC 7400 v2.0 (G4)") + POWERPC_DEF("7400_v2.1", CPU_POWERPC_7400_v21, 7400, + "PowerPC 7400 v2.1 (G4)") + POWERPC_DEF("7400_v2.2", CPU_POWERPC_7400_v22, 7400, + "PowerPC 7400 v2.2 (G4)") + POWERPC_DEF("7400_v2.6", CPU_POWERPC_7400_v26, 7400, + "PowerPC 7400 v2.6 (G4)") + POWERPC_DEF("7400_v2.7", CPU_POWERPC_7400_v27, 7400, + "PowerPC 7400 v2.7 (G4)") + POWERPC_DEF("7400_v2.8", CPU_POWERPC_7400_v28, 7400, + "PowerPC 7400 v2.8 (G4)") + POWERPC_DEF("7400_v2.9", CPU_POWERPC_7400_v29, 7400, + "PowerPC 7400 v2.9 (G4)") + POWERPC_DEF("7410_v1.0", CPU_POWERPC_7410_v10, 7410, + "PowerPC 7410 v1.0 (G4)") + POWERPC_DEF("7410_v1.1", CPU_POWERPC_7410_v11, 7410, + "PowerPC 7410 v1.1 (G4)") + POWERPC_DEF("7410_v1.2", CPU_POWERPC_7410_v12, 7410, + "PowerPC 7410 v1.2 (G4)") + POWERPC_DEF("7410_v1.3", CPU_POWERPC_7410_v13, 7410, + "PowerPC 7410 v1.3 (G4)") + POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410, + "PowerPC 7410 v1.4 (G4)") + POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400, + "PowerPC 7448 v1.0 (G4)") + POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400, + "PowerPC 7448 v1.1 (G4)") + POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400, + "PowerPC 7448 v2.0 (G4)") + POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400, + "PowerPC 7448 v2.1 (G4)") + POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450, + "PowerPC 7450 v1.0 (G4)") + POWERPC_DEF("7450_v1.1", CPU_POWERPC_7450_v11, 7450, + "PowerPC 7450 v1.1 (G4)") + POWERPC_DEF("7450_v1.2", CPU_POWERPC_7450_v12, 7450, + "PowerPC 7450 v1.2 (G4)") + POWERPC_DEF("7450_v2.0", CPU_POWERPC_7450_v20, 7450, + "PowerPC 7450 v2.0 (G4)") + POWERPC_DEF("7450_v2.1", CPU_POWERPC_7450_v21, 7450, + "PowerPC 7450 v2.1 (G4)") + POWERPC_DEF("7441_v2.1", CPU_POWERPC_7450_v21, 7440, + "PowerPC 7441 v2.1 (G4)") + POWERPC_DEF("7441_v2.3", CPU_POWERPC_74x1_v23, 7440, + "PowerPC 7441 v2.3 (G4)") + POWERPC_DEF("7451_v2.3", CPU_POWERPC_74x1_v23, 7450, + "PowerPC 7451 v2.3 (G4)") + POWERPC_DEF("7441_v2.10", CPU_POWERPC_74x1_v210, 7440, + "PowerPC 7441 v2.10 (G4)") + POWERPC_DEF("7451_v2.10", CPU_POWERPC_74x1_v210, 7450, + "PowerPC 7451 v2.10 (G4)") + POWERPC_DEF("7445_v1.0", CPU_POWERPC_74x5_v10, 7445, + "PowerPC 7445 v1.0 (G4)") + POWERPC_DEF("7455_v1.0", CPU_POWERPC_74x5_v10, 7455, + "PowerPC 7455 v1.0 (G4)") + POWERPC_DEF("7445_v2.1", CPU_POWERPC_74x5_v21, 7445, + "PowerPC 7445 v2.1 (G4)") + POWERPC_DEF("7455_v2.1", CPU_POWERPC_74x5_v21, 7455, + "PowerPC 7455 v2.1 (G4)") + POWERPC_DEF("7445_v3.2", CPU_POWERPC_74x5_v32, 7445, + "PowerPC 7445 v3.2 (G4)") + POWERPC_DEF("7455_v3.2", CPU_POWERPC_74x5_v32, 7455, + "PowerPC 7455 v3.2 (G4)") + POWERPC_DEF("7445_v3.3", CPU_POWERPC_74x5_v33, 7445, + "PowerPC 7445 v3.3 (G4)") + POWERPC_DEF("7455_v3.3", CPU_POWERPC_74x5_v33, 7455, + "PowerPC 7455 v3.3 (G4)") + POWERPC_DEF("7445_v3.4", CPU_POWERPC_74x5_v34, 7445, + "PowerPC 7445 v3.4 (G4)") + POWERPC_DEF("7455_v3.4", CPU_POWERPC_74x5_v34, 7455, + "PowerPC 7455 v3.4 (G4)") + POWERPC_DEF("7447_v1.0", CPU_POWERPC_74x7_v10, 7445, + "PowerPC 7447 v1.0 (G4)") + POWERPC_DEF("7457_v1.0", CPU_POWERPC_74x7_v10, 7455, + "PowerPC 7457 v1.0 (G4)") + POWERPC_DEF("7447_v1.1", CPU_POWERPC_74x7_v11, 7445, + "PowerPC 7447 v1.1 (G4)") + POWERPC_DEF("7457_v1.1", CPU_POWERPC_74x7_v11, 7455, + "PowerPC 7457 v1.1 (G4)") + POWERPC_DEF("7457_v1.2", CPU_POWERPC_74x7_v12, 7455, + "PowerPC 7457 v1.2 (G4)") + POWERPC_DEF("7447a_v1.0", CPU_POWERPC_74x7A_v10, 7445, + "PowerPC 7447A v1.0 (G4)") + POWERPC_DEF("7457a_v1.0", CPU_POWERPC_74x7A_v10, 7455, + "PowerPC 7457A v1.0 (G4)") + POWERPC_DEF("7447a_v1.1", CPU_POWERPC_74x7A_v11, 7445, + "PowerPC 7447A v1.1 (G4)") + POWERPC_DEF("7457a_v1.1", CPU_POWERPC_74x7A_v11, 7455, + "PowerPC 7457A v1.1 (G4)") + POWERPC_DEF("7447a_v1.2", CPU_POWERPC_74x7A_v12, 7445, + "PowerPC 7447A v1.2 (G4)") + POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455, + "PowerPC 7457A v1.2 (G4)") + /* 64 bits PowerPC */ +#if defined(TARGET_PPC64) + POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970, + "PowerPC 970 v2.2") + POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970, + "PowerPC 970FX v1.0 (G5)") + POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970, + "PowerPC 970FX v2.0 (G5)") + POWERPC_DEF("970fx_v2.1", CPU_POWERPC_970FX_v21, 970, + "PowerPC 970FX v2.1 (G5)") + POWERPC_DEF("970fx_v3.0", CPU_POWERPC_970FX_v30, 970, + "PowerPC 970FX v3.0 (G5)") + POWERPC_DEF("970fx_v3.1", CPU_POWERPC_970FX_v31, 970, + "PowerPC 970FX v3.1 (G5)") + POWERPC_DEF("970mp_v1.0", CPU_POWERPC_970MP_v10, 970, + "PowerPC 970MP v1.0") + POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970, + "PowerPC 970MP v1.1") + POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P, + "POWER5+ v2.1") + POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7, + "POWER7 v2.3") + POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, + "POWER7+ v2.1") + POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, + "POWER8E v2.1") + POWERPC_DEF("power8_v2.0", CPU_POWERPC_POWER8_v20, POWER8, + "POWER8 v2.0") + POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8, + "POWER8NVL v1.0") + POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9, + "POWER9 v1.0") + POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9, + "POWER9 v2.0") + POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10, + "POWER10 v1.0") +#endif /* defined (TARGET_PPC64) */ +}; + +PowerPCCPU *cpu_ppc_init(struct uc_struct *uc, const char *cpu_model) +{ + int i; + PowerPCCPU *cpu; + CPUState *cs; + CPUClass *cc; + PowerPCCPUClass *pcc; + + if (cpu_model == NULL) { +#ifdef TARGET_PPC64 + cpu_model = "power10_v1.0"; +#else +// cpu_model = "e500v2_v10"; + cpu_model = "7457a_v1.2"; +#endif + } + + cpu = malloc(sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + memset(cpu, 0, sizeof(*cpu)); + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = (CPUState *)cpu; + + /* init CPUClass */ + cpu_class_init(uc, cc); + /* init PowerPCCPUClass */ + ppc_cpu_class_init(uc, cc); + /* init PowerPC family class */ + pcc = &cpu->cc; + for (i = 0; i < ARRAY_SIZE(ppc_cpus); i++) { + if (strcmp(cpu_model, ppc_cpus[i].name) == 0) { + pcc->pvr = ppc_cpus[i].pvr; + pcc->svr = ppc_cpus[i].svr; + if (ppc_cpus[i].cpu_family_class_init) { + ppc_cpus[i].cpu_family_class_init(cc, uc); + } + break; + } + } + /* init CPUState */ + cpu_common_initfn(uc, cs); + /* init PowerPCCPU */ + ppc_cpu_instance_init(uc, cs); + /* init PowerPC types */ + /* postinit PowerPCCPU */ + /* realize PowerPCCPU */ + ppc_cpu_realize(uc, cs); + /* realize CPUState */ + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target/ppc/unicorn.c b/qemu/target/ppc/unicorn.c new file mode 100644 index 00000000..66b7da7d --- /dev/null +++ b/qemu/target/ppc/unicorn.c @@ -0,0 +1,225 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "qemu/osdep.h" +#include "hw/ppc/ppc.h" +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" +#include "unicorn.h" + +#ifdef TARGET_PPC64 +typedef uint64_t ppcreg_t; +#else +typedef uint32_t ppcreg_t; +#endif + +static uint64_t ppc_mem_redirect(uint64_t address) +{ +/* // kseg0 range masks off high address bit + if (address >= 0x80000000 && address <= 0x9fffffff) + return address & 0x7fffffff; + + // kseg1 range masks off top 3 address bits + if (address >= 0xa0000000 && address <= 0xbfffffff) { + return address & 0x1fffffff; + }*/ + + // no redirect + return address; +} + +static void ppc_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUPPCState *)uc->cpu->env_ptr)->nip = address; +} + +void ppc_cpu_instance_finalize(CPUState *obj); +void ppc_cpu_unrealize(CPUState *dev); +static void ppc_release(void *ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + PowerPCCPU *cpu = (PowerPCCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } + + for (i = 0; i < 32; i++) { + g_free(tcg_ctx->cpu_gpr[i]); + } +// g_free(tcg_ctx->cpu_PC); + g_free(tcg_ctx->btarget); + g_free(tcg_ctx->bcond); + g_free(tcg_ctx->cpu_dspctrl); + +// g_free(tcg_ctx->tb_ctx.tbs); + + ppc_cpu_instance_finalize(tcg_ctx->uc->cpu); + ppc_cpu_unrealize(tcg_ctx->uc->cpu); +} + +void ppc_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env; + env = uc->cpu->env_ptr; + memset(env->gpr, 0, sizeof(env->gpr)); + + env->nip = 0; +} + +static void reg_read(CPUPPCState *env, unsigned int regid, void *value) +{ + if (regid >= UC_PPC_REG_0 && regid <= UC_PPC_REG_31) + *(ppcreg_t *)value = env->gpr[regid - UC_PPC_REG_0]; + else { + switch(regid) { + default: break; + case UC_PPC_REG_PC: + *(ppcreg_t *)value = env->nip; + break; +/* case UC_PPC_REG_CP0_CONFIG3: + *(mipsreg_t *)value = env->CP0_Config3; + break; + case UC_MIPS_REG_CP0_USERLOCAL: + *(mipsreg_t *)value = env->active_tc.CP0_UserLocal; + break; */ + } + } + + return; +} + +static void reg_write(CPUPPCState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_PPC_REG_0 && regid <= UC_PPC_REG_31) + env->gpr[regid - UC_PPC_REG_0] = *(ppcreg_t *)value; + else { + switch(regid) { + default: break; + case UC_PPC_REG_PC: + env->nip = *(ppcreg_t *)value; + break; +/* case UC_MIPS_REG_CP0_CONFIG3: + env->CP0_Config3 = *(mipsreg_t *)value; + break; + case UC_MIPS_REG_CP0_USERLOCAL: + env->active_tc.CP0_UserLocal = *(mipsreg_t *)value; + break; */ + } + } + + return; +} + +int ppc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUPPCState *env = &(POWERPC_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int ppc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUPPCState *env = &(POWERPC_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if (regid == UC_PPC_REG_PC) { + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_PPC64 +int ppc64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#else +int ppc_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#endif +{ + CPUPPCState *env = (CPUPPCState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_PPC64 +int ppc64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#else +int ppc_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#endif +{ + CPUPPCState *env = (CPUPPCState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +PowerPCCPU *cpu_ppc_init(struct uc_struct *uc, const char *cpu_model); +static int ppc_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + PowerPCCPU *cpu; + + cpu = cpu_ppc_init(uc, cpu_model); + if (cpu == NULL) { + return -1; + } + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_PPC64 +void ppc64_uc_init(struct uc_struct* uc) +#else +void ppc_uc_init(struct uc_struct* uc) +#endif +{ + uc->reg_read = ppc_reg_read; + uc->reg_write = ppc_reg_write; + uc->reg_reset = ppc_reg_reset; + uc->release = ppc_release; + uc->set_pc = ppc_set_pc; + uc->mem_redirect = ppc_mem_redirect; + uc->cpus_init = ppc_cpus_init; + uc->cpu_context_size = offsetof(CPUPPCState, uc); + uc_common_init(uc); +} diff --git a/qemu/target/ppc/unicorn.h b/qemu/target/ppc/unicorn.h new file mode 100644 index 00000000..ef3bcf6e --- /dev/null +++ b/qemu/target/ppc/unicorn.h @@ -0,0 +1,20 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#ifndef UC_QEMU_TARGET_PPC_H +#define UC_QEMU_TARGET_PPC_H + +// functions to read & write registers +int ppc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int ppc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +int ppc_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int ppc_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int ppc64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int ppc64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); + +void ppc_reg_reset(struct uc_struct *uc); + +void ppc_uc_init(struct uc_struct* uc); +void ppc64_uc_init(struct uc_struct* uc); +#endif diff --git a/qemu/target/riscv/README b/qemu/target/riscv/README new file mode 100644 index 00000000..448fb65a --- /dev/null +++ b/qemu/target/riscv/README @@ -0,0 +1,4 @@ +code under riscv32/ is from riscv32-softmmu/target/riscv/*.inc.c +code under riscv64/ is from riscv64-softmmu/target/riscv/*.inc.c + +WARNING: these code are autogen from scripts/decodetree.py, DO NOT modify them. diff --git a/qemu/target/riscv/cpu-param.h b/qemu/target/riscv/cpu-param.h new file mode 100644 index 00000000..664fc1d3 --- /dev/null +++ b/qemu/target/riscv/cpu-param.h @@ -0,0 +1,23 @@ +/* + * RISC-V cpu parameters for qemu. + * + * Copyright (c) 2017-2018 SiFive, Inc. + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef RISCV_CPU_PARAM_H +#define RISCV_CPU_PARAM_H 1 + +#if defined(TARGET_RISCV64) +# define TARGET_LONG_BITS 64 +# define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */ +# define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */ +#elif defined(TARGET_RISCV32) +# define TARGET_LONG_BITS 32 +# define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */ +# define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */ +#endif +#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */ +#define NB_MMU_MODES 4 + +#endif diff --git a/qemu/target/riscv/cpu.c b/qemu/target/riscv/cpu.c new file mode 100644 index 00000000..7b83d5ef --- /dev/null +++ b/qemu/target/riscv/cpu.c @@ -0,0 +1,390 @@ +/* + * QEMU RISC-V CPU + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/ctype.h" +#include "qemu/log.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "fpu/softfloat-helpers.h" + +#include + +/* RISC-V CPU definitions */ + +// static const char riscv_exts[26] = "IEMAFDQCLBJTPVNSUHKORWXYZG"; + +const char * const riscv_int_regnames[] = { + "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", + "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", + "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", + "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", + "x28/t3", "x29/t4", "x30/t5", "x31/t6" +}; + +const char * const riscv_fpr_regnames[] = { + "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", + "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", + "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", + "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", + "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", + "f30/ft10", "f31/ft11" +}; + +static void set_misa(CPURISCVState *env, target_ulong misa) +{ + env->misa_mask = env->misa = misa; +} + +static void set_priv_version(CPURISCVState *env, int priv_ver) +{ + env->priv_ver = priv_ver; +} + +static void set_feature(CPURISCVState *env, int feature) +{ + env->features |= (1ULL << feature); +} + +static void set_resetvec(CPURISCVState *env, int resetvec) +{ + env->resetvec = resetvec; +} + +static void riscv_any_cpu_init(CPUState *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVU); + set_priv_version(env, PRIV_VERSION_1_11_0); + set_resetvec(env, DEFAULT_RSTVEC); +} + +#if defined(TARGET_RISCV32) +// rv32 +static void riscv_base32_cpu_init(CPUState *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + /* We set this in the realise function */ + set_misa(env, 0); +} + +// sifive-u34 +static void rv32gcsu_priv1_10_0_cpu_init(CPUState *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + set_priv_version(env, PRIV_VERSION_1_10_0); + set_resetvec(env, DEFAULT_RSTVEC); + set_feature(env, RISCV_FEATURE_MMU); + set_feature(env, RISCV_FEATURE_PMP); +} + +// sifive-e31 +static void rv32imacu_nommu_cpu_init(CPUState *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + set_misa(env, RV32 | RVI | RVM | RVA | RVC | RVU); + set_priv_version(env, PRIV_VERSION_1_10_0); + set_resetvec(env, DEFAULT_RSTVEC); + set_feature(env, RISCV_FEATURE_PMP); +} + +#elif defined(TARGET_RISCV64) +// rv64 +static void riscv_base64_cpu_init(CPUState *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + /* We set this in the realise function */ + set_misa(env, 0); +} + +// sifive-u54 +static void rv64gcsu_priv1_10_0_cpu_init(CPUState *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + set_priv_version(env, PRIV_VERSION_1_10_0); + set_resetvec(env, DEFAULT_RSTVEC); + set_feature(env, RISCV_FEATURE_MMU); + set_feature(env, RISCV_FEATURE_PMP); +} + +// sifive-e51 +static void rv64imacu_nommu_cpu_init(CPUState *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + set_misa(env, RV64 | RVI | RVM | RVA | RVC | RVU); + set_priv_version(env, PRIV_VERSION_1_10_0); + set_resetvec(env, DEFAULT_RSTVEC); + set_feature(env, RISCV_FEATURE_PMP); +} +#endif + +static void riscv_cpu_set_pc(CPUState *cs, vaddr value) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + env->pc = value; +} + +static void riscv_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + env->pc = tb->pc; +} + +static bool riscv_cpu_has_work(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + /* + * Definition of the WFI instruction requires it to ignore the privilege + * mode and delegation registers, but respect individual enables + */ + return (env->mip & env->mie) != 0; +} + +void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb, + target_ulong *data) +{ + env->pc = data[0]; +} + +static void riscv_cpu_reset(CPUState *dev) +{ + CPUState *cs = CPU(dev); + RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); + CPURISCVState *env = &cpu->env; + + mcc->parent_reset(cs); + + env->priv = PRV_M; + env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); + env->mcause = 0; + env->pc = env->resetvec; + + cs->exception_index = EXCP_NONE; + env->load_res = -1; + set_default_nan_mode(1, &env->fp_status); +} + +static void riscv_cpu_realize(struct uc_struct *uc, CPUState *dev) +{ + CPUState *cs = CPU(dev); + RISCVCPU *cpu = RISCV_CPU(dev); + CPURISCVState *env = &cpu->env; + int priv_version = PRIV_VERSION_1_11_0; + target_ulong target_misa = 0; + + cpu_exec_realizefn(cs); + + if (cpu->cfg.priv_spec) { + if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { + priv_version = PRIV_VERSION_1_11_0; + } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { + priv_version = PRIV_VERSION_1_10_0; + } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.9.1")) { + priv_version = PRIV_VERSION_1_09_1; + } else { + // error_setg(errp, "Unsupported privilege spec version '%s'", cpu->cfg.priv_spec); + return; + } + } + + set_priv_version(env, priv_version); + set_resetvec(env, DEFAULT_RSTVEC); + + if (cpu->cfg.mmu) { + set_feature(env, RISCV_FEATURE_MMU); + } + + if (cpu->cfg.pmp) { + set_feature(env, RISCV_FEATURE_PMP); + } + + /* If misa isn't set (rv32 and rv64 machines) set it here */ + if (!env->misa) { + /* Do some ISA extension error checking */ + if (cpu->cfg.ext_i && cpu->cfg.ext_e) { + //error_setg(errp, "I and E extensions are incompatible"); + return; + } + + if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) { + // error_setg(errp, "Either I or E extension must be set"); + return; + } + + if (cpu->cfg.ext_g && !(cpu->cfg.ext_i & cpu->cfg.ext_m & + cpu->cfg.ext_a & cpu->cfg.ext_f & cpu->cfg.ext_d)) { + // warn_report("Setting G will also set IMAFD"); + cpu->cfg.ext_i = true; + cpu->cfg.ext_m = true; + cpu->cfg.ext_a = true; + cpu->cfg.ext_f = true; + cpu->cfg.ext_d = true; + } + + /* Set the ISA extensions, checks should have happened above */ + if (cpu->cfg.ext_i) { + target_misa |= RVI; + } + if (cpu->cfg.ext_e) { + target_misa |= RVE; + } + if (cpu->cfg.ext_m) { + target_misa |= RVM; + } + if (cpu->cfg.ext_a) { + target_misa |= RVA; + } + if (cpu->cfg.ext_f) { + target_misa |= RVF; + } + if (cpu->cfg.ext_d) { + target_misa |= RVD; + } + if (cpu->cfg.ext_c) { + target_misa |= RVC; + } + if (cpu->cfg.ext_s) { + target_misa |= RVS; + } + if (cpu->cfg.ext_u) { + target_misa |= RVU; + } + if (cpu->cfg.ext_h) { + target_misa |= RVH; + } + + set_misa(env, RVXLEN | target_misa); + } + + cpu_reset(cs); +} + +static void riscv_cpu_init(struct uc_struct *uc, CPUState *obj) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + CPURISCVState *env = &cpu->env; + + // unicorn + env->uc = uc; + + cpu_set_cpustate_pointers(cpu); +} + +static void riscv_cpu_class_init(struct uc_struct *uc, CPUClass *c, void *data) +{ + RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + + mcc->parent_reset = cc->reset; + cc->reset = riscv_cpu_reset; + + cc->has_work = riscv_cpu_has_work; + cc->do_interrupt = riscv_cpu_do_interrupt; + cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt; + cc->set_pc = riscv_cpu_set_pc; + cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb; + cc->do_unaligned_access = riscv_cpu_do_unaligned_access; + cc->tcg_initialize = riscv_translate_init; + cc->tlb_fill = riscv_cpu_tlb_fill; +} + +typedef struct CPUModelInfo { + const char *name; + void (*initfn)(CPUState *obj); +} CPUModelInfo; + +static const CPUModelInfo cpu_models[] = { + {TYPE_RISCV_CPU_ANY, riscv_any_cpu_init}, +#ifdef TARGET_RISCV32 + {TYPE_RISCV_CPU_BASE32, riscv_base32_cpu_init}, + {TYPE_RISCV_CPU_SIFIVE_E31, rv32imacu_nommu_cpu_init}, + {TYPE_RISCV_CPU_SIFIVE_U34, rv32gcsu_priv1_10_0_cpu_init}, +#endif +#ifdef TARGET_RISCV64 + {TYPE_RISCV_CPU_BASE64, riscv_base64_cpu_init}, + {TYPE_RISCV_CPU_SIFIVE_E51, rv64imacu_nommu_cpu_init}, + {TYPE_RISCV_CPU_SIFIVE_U54, rv64gcsu_priv1_10_0_cpu_init}, +#endif +}; + +RISCVCPU *cpu_riscv_init(struct uc_struct *uc, const char *cpu_model) +{ + RISCVCPU *cpu; + CPUState *cs; + CPUClass *cc; + int i; + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + +#ifdef TARGET_RISCV32 + if (!cpu_model) { + cpu_model = TYPE_RISCV_CPU_SIFIVE_U34; + } +#else + /* TARGET_RISCV64 */ + if (!cpu_model) { + cpu_model = TYPE_RISCV_CPU_SIFIVE_U54; + } +#endif + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = (CPUState *)cpu; + + /* init CPUClass */ + cpu_class_init(uc, cc); + + /* init RISCVCPUClass */ + riscv_cpu_class_init(uc, cc, NULL); + + /* init CPUState */ + cpu_common_initfn(uc, cs); + + /* init CPU */ + riscv_cpu_init(uc, cs); + + /* init specific CPU model */ + for (i = 0; i < ARRAY_SIZE(cpu_models); i++) { + if (strcmp(cpu_model, cpu_models[i].name) == 0) { + cpu_models[i].initfn(cs); + break; + } + } + + /* realize CPU */ + riscv_cpu_realize(uc, cs); + + // init addresss space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target/riscv/cpu.h b/qemu/target/riscv/cpu.h new file mode 100644 index 00000000..b94516eb --- /dev/null +++ b/qemu/target/riscv/cpu.h @@ -0,0 +1,380 @@ +/* + * QEMU RISC-V CPU + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef RISCV_CPU_H +#define RISCV_CPU_H + +#include "hw/core/cpu.h" +#include "exec/cpu-defs.h" +#include "fpu/softfloat-types.h" + +typedef struct TCGContext TCGContext; + +#define TYPE_RISCV_CPU "riscv-cpu" + +#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU +#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) +#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU + +#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") +#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") +#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") +#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") +#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") +#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") +#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") + +#define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2)) +#define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2)) + +#if defined(TARGET_RISCV32) +#define RVXLEN RV32 +#elif defined(TARGET_RISCV64) +#define RVXLEN RV64 +#endif + +#define RV(x) ((target_ulong)1 << (x - 'A')) + +#define RVI RV('I') +#define RVE RV('E') /* E and I are mutually exclusive */ +#define RVM RV('M') +#define RVA RV('A') +#define RVF RV('F') +#define RVD RV('D') +#define RVC RV('C') +#define RVS RV('S') +#define RVU RV('U') +#define RVH RV('H') + +/* S extension denotes that Supervisor mode exists, however it is possible + to have a core that support S mode but does not have an MMU and there + is currently no bit in misa to indicate whether an MMU exists or not + so a cpu features bitfield is required, likewise for optional PMP support */ +enum { + RISCV_FEATURE_MMU, + RISCV_FEATURE_PMP, + RISCV_FEATURE_MISA +}; + +#define PRIV_VERSION_1_09_1 0x00010901 +#define PRIV_VERSION_1_10_0 0x00011000 +#define PRIV_VERSION_1_11_0 0x00011100 + +#define TRANSLATE_PMP_FAIL 2 +#define TRANSLATE_FAIL 1 +#define TRANSLATE_SUCCESS 0 +#define MMU_USER_IDX 3 + +#define MAX_RISCV_PMPS (16) + +typedef struct CPURISCVState CPURISCVState; + +#include "pmp.h" + +struct CPURISCVState { + target_ulong gpr[32]; + uint64_t fpr[32]; /* assume both F and D extensions */ + target_ulong pc; + target_ulong load_res; + target_ulong load_val; + + target_ulong frm; + + target_ulong badaddr; + target_ulong guest_phys_fault_addr; + + target_ulong priv_ver; + target_ulong misa; + target_ulong misa_mask; + + uint32_t features; + + target_ulong priv; + /* This contains QEMU specific information about the virt state. */ + target_ulong virt; + target_ulong resetvec; + + target_ulong mhartid; + target_ulong mstatus; + + target_ulong mip; + +#ifdef TARGET_RISCV32 + target_ulong mstatush; +#endif + + uint32_t miclaim; + + target_ulong mie; + target_ulong mideleg; + + target_ulong sptbr; /* until: priv-1.9.1 */ + target_ulong satp; /* since: priv-1.10.0 */ + target_ulong sbadaddr; + target_ulong mbadaddr; + target_ulong medeleg; + + target_ulong stvec; + target_ulong sepc; + target_ulong scause; + + target_ulong mtvec; + target_ulong mepc; + target_ulong mcause; + target_ulong mtval; /* since: priv-1.10.0 */ + + /* Hypervisor CSRs */ + target_ulong hstatus; + target_ulong hedeleg; + target_ulong hideleg; + target_ulong hcounteren; + target_ulong htval; + target_ulong htinst; + target_ulong hgatp; + uint64_t htimedelta; + + /* Virtual CSRs */ + target_ulong vsstatus; + target_ulong vstvec; + target_ulong vsscratch; + target_ulong vsepc; + target_ulong vscause; + target_ulong vstval; + target_ulong vsatp; +#ifdef TARGET_RISCV32 + target_ulong vsstatush; +#endif + + target_ulong mtval2; + target_ulong mtinst; + + /* HS Backup CSRs */ + target_ulong stvec_hs; + target_ulong sscratch_hs; + target_ulong sepc_hs; + target_ulong scause_hs; + target_ulong stval_hs; + target_ulong satp_hs; + target_ulong mstatus_hs; +#ifdef TARGET_RISCV32 + target_ulong mstatush_hs; +#endif + + target_ulong scounteren; + target_ulong mcounteren; + + target_ulong sscratch; + target_ulong mscratch; + + /* temporary htif regs */ + uint64_t mfromhost; + uint64_t mtohost; + uint64_t timecmp; + + /* physical memory protection */ + pmp_table_t pmp_state; + + /* machine specific rdtime callback */ + uint64_t (*rdtime_fn)(void); + + /* True if in debugger mode. */ + bool debugger; + + float_status fp_status; + + /* Fields from here on are preserved across CPU reset. */ + QEMUTimer *timer; /* Internal timer */ + + // Unicorn engine + struct uc_struct *uc; +}; + +/** + * RISCVCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A RISCV CPU model. + */ +typedef struct RISCVCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + void (*parent_reset)(CPUState *cpu); +} RISCVCPUClass; + +/** + * RISCVCPU: + * @env: #CPURISCVState + * + * A RISCV CPU. + */ +typedef struct RISCVCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + CPUNegativeOffsetState neg; + CPURISCVState env; + + /* Configuration Settings */ + struct { + bool ext_i; + bool ext_e; + bool ext_g; + bool ext_m; + bool ext_a; + bool ext_f; + bool ext_d; + bool ext_c; + bool ext_s; + bool ext_u; + bool ext_h; + bool ext_counters; + bool ext_ifencei; + bool ext_icsr; + + char *priv_spec; + char *user_spec; + bool mmu; + bool pmp; + } cfg; + + struct RISCVCPUClass cc; +} RISCVCPU; + +#define RISCV_CPU(obj) ((RISCVCPU *)obj) +#define RISCV_CPU_CLASS(klass) ((RISCVCPUClass *)klass) +#define RISCV_CPU_GET_CLASS(obj) (&((RISCVCPU *)obj)->cc) + +static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) +{ + return (env->misa & ext) != 0; +} + +static inline bool riscv_feature(CPURISCVState *env, int feature) +{ + return env->features & (1ULL << feature); +} + +#include "cpu_user.h" +#include "cpu_bits.h" + +extern const char * const riscv_int_regnames[]; +extern const char * const riscv_fpr_regnames[]; +extern const char * const riscv_excp_names[]; +extern const char * const riscv_intr_names[]; + +void riscv_cpu_do_interrupt(CPUState *cpu); +int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); +int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); +bool riscv_cpu_fp_enabled(CPURISCVState *env); +bool riscv_cpu_virt_enabled(CPURISCVState *env); +void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); +bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env); +void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable); +int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); +hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr); +bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); + +#define cpu_mmu_index riscv_cpu_mmu_index + +void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); +int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts); +uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value); +#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ +void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void)); + +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); + +void riscv_translate_init(struct uc_struct *uc); +void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, + uint32_t exception, uintptr_t pc); + +target_ulong riscv_cpu_get_fflags(CPURISCVState *env); +void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); + +#define TB_FLAGS_MMU_MASK 3 +#define TB_FLAGS_MSTATUS_FS MSTATUS_FS + +static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->pc; + *cs_base = 0; + *flags = cpu_mmu_index(env, 0); + if (riscv_cpu_fp_enabled(env)) { + *flags |= env->mstatus & MSTATUS_FS; + } +} + +int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask); +int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask); + +static inline void riscv_csr_write(CPURISCVState *env, int csrno, + target_ulong val) +{ + riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS)); +} + +static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) +{ + target_ulong val = 0; + riscv_csrrw(env, csrno, &val, 0, 0); + return val; +} + +typedef int (*riscv_csr_predicate_fn)(CPURISCVState *env, int csrno); +typedef int (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, + target_ulong *ret_value); +typedef int (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, + target_ulong new_value); +typedef int (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, + target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); + +typedef struct { + riscv_csr_predicate_fn predicate; + riscv_csr_read_fn read; + riscv_csr_write_fn write; + riscv_csr_op_fn op; +} riscv_csr_operations; + +void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); +void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); + +void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); + +typedef CPURISCVState CPUArchState; +typedef RISCVCPU ArchCPU; + +#include "exec/cpu-all.h" + +#endif /* RISCV_CPU_H */ diff --git a/qemu/target/riscv/cpu_bits.h b/qemu/target/riscv/cpu_bits.h new file mode 100644 index 00000000..f40ea261 --- /dev/null +++ b/qemu/target/riscv/cpu_bits.h @@ -0,0 +1,578 @@ +/* RISC-V ISA constants */ + +#ifndef TARGET_RISCV_CPU_BITS_H +#define TARGET_RISCV_CPU_BITS_H + +#define get_field(reg, mask) (((reg) & \ + (target_ulong)(mask)) / ((mask) & ~((mask) << 1))) +#define set_field(reg, mask, val) (((reg) & ~(target_ulong)(mask)) | \ + (((target_ulong)(val) * ((mask) & ~((mask) << 1))) & \ + (target_ulong)(mask))) + +/* Floating point round mode */ +#define FSR_RD_SHIFT 5 +#define FSR_RD (0x7 << FSR_RD_SHIFT) + +/* Floating point accrued exception flags */ +#define FPEXC_NX 0x01 +#define FPEXC_UF 0x02 +#define FPEXC_OF 0x04 +#define FPEXC_DZ 0x08 +#define FPEXC_NV 0x10 + +/* Floating point status register bits */ +#define FSR_AEXC_SHIFT 0 +#define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT) +#define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT) +#define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT) +#define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT) +#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT) +#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) + +/* Control and Status Registers */ + +/* User Trap Setup */ +#define CSR_USTATUS 0x000 +#define CSR_UIE 0x004 +#define CSR_UTVEC 0x005 + +/* User Trap Handling */ +#define CSR_USCRATCH 0x040 +#define CSR_UEPC 0x041 +#define CSR_UCAUSE 0x042 +#define CSR_UTVAL 0x043 +#define CSR_UIP 0x044 + +/* User Floating-Point CSRs */ +#define CSR_FFLAGS 0x001 +#define CSR_FRM 0x002 +#define CSR_FCSR 0x003 + +/* User Timers and Counters */ +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f + +/* Machine Timers and Counters */ +#define CSR_MCYCLE 0xb00 +#define CSR_MINSTRET 0xb02 +#define CSR_MCYCLEH 0xb80 +#define CSR_MINSTRETH 0xb82 + +/* Machine Information Registers */ +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 + +/* Machine Trap Setup */ +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MEDELEG 0x302 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MCOUNTEREN 0x306 + +/* 32-bit only */ +#define CSR_MSTATUSH 0x310 + +/* Legacy Counter Setup (priv v1.9.1) */ +/* Update to #define CSR_MCOUNTINHIBIT 0x320 for 1.11.0 */ +#define CSR_MUCOUNTEREN 0x320 +#define CSR_MSCOUNTEREN 0x321 +#define CSR_MHCOUNTEREN 0x322 + +/* Machine Trap Handling */ +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 + +/* Legacy Machine Trap Handling (priv v1.9.1) */ +#define CSR_MBADADDR 0x343 + +/* Supervisor Trap Setup */ +#define CSR_SSTATUS 0x100 +#define CSR_SEDELEG 0x102 +#define CSR_SIDELEG 0x103 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 + +/* Supervisor Trap Handling */ +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 + +/* Legacy Supervisor Trap Handling (priv v1.9.1) */ +#define CSR_SBADADDR 0x143 + +/* Supervisor Protection and Translation */ +#define CSR_SPTBR 0x180 +#define CSR_SATP 0x180 + +/* Hpervisor CSRs */ +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HTINST 0x64A +#define CSR_HGATP 0x680 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HTIMEDELTAH 0x615 + +#if defined(TARGET_RISCV32) +#define HGATP_MODE SATP32_MODE +#define HGATP_VMID SATP32_ASID +#define HGATP_PPN SATP32_PPN +#endif +#if defined(TARGET_RISCV64) +#define HGATP_MODE SATP64_MODE +#define HGATP_VMID SATP64_ASID +#define HGATP_PPN SATP64_PPN +#endif + +/* Virtual CSRs */ +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 + +#define CSR_MTINST 0x34a +#define CSR_MTVAL2 0x34b + +/* Physical Memory Protection */ +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf + +/* Debug/Trace Registers (shared with Debug Mode) */ +#define CSR_TSELECT 0x7a0 +#define CSR_TDATA1 0x7a1 +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA3 0x7a3 + +/* Debug Mode Registers */ +#define CSR_DCSR 0x7b0 +#define CSR_DPC 0x7b1 +#define CSR_DSCRATCH 0x7b2 + +/* Performance Counters */ +#define CSR_MHPMCOUNTER3 0xb03 +#define CSR_MHPMCOUNTER4 0xb04 +#define CSR_MHPMCOUNTER5 0xb05 +#define CSR_MHPMCOUNTER6 0xb06 +#define CSR_MHPMCOUNTER7 0xb07 +#define CSR_MHPMCOUNTER8 0xb08 +#define CSR_MHPMCOUNTER9 0xb09 +#define CSR_MHPMCOUNTER10 0xb0a +#define CSR_MHPMCOUNTER11 0xb0b +#define CSR_MHPMCOUNTER12 0xb0c +#define CSR_MHPMCOUNTER13 0xb0d +#define CSR_MHPMCOUNTER14 0xb0e +#define CSR_MHPMCOUNTER15 0xb0f +#define CSR_MHPMCOUNTER16 0xb10 +#define CSR_MHPMCOUNTER17 0xb11 +#define CSR_MHPMCOUNTER18 0xb12 +#define CSR_MHPMCOUNTER19 0xb13 +#define CSR_MHPMCOUNTER20 0xb14 +#define CSR_MHPMCOUNTER21 0xb15 +#define CSR_MHPMCOUNTER22 0xb16 +#define CSR_MHPMCOUNTER23 0xb17 +#define CSR_MHPMCOUNTER24 0xb18 +#define CSR_MHPMCOUNTER25 0xb19 +#define CSR_MHPMCOUNTER26 0xb1a +#define CSR_MHPMCOUNTER27 0xb1b +#define CSR_MHPMCOUNTER28 0xb1c +#define CSR_MHPMCOUNTER29 0xb1d +#define CSR_MHPMCOUNTER30 0xb1e +#define CSR_MHPMCOUNTER31 0xb1f +#define CSR_MHPMEVENT3 0x323 +#define CSR_MHPMEVENT4 0x324 +#define CSR_MHPMEVENT5 0x325 +#define CSR_MHPMEVENT6 0x326 +#define CSR_MHPMEVENT7 0x327 +#define CSR_MHPMEVENT8 0x328 +#define CSR_MHPMEVENT9 0x329 +#define CSR_MHPMEVENT10 0x32a +#define CSR_MHPMEVENT11 0x32b +#define CSR_MHPMEVENT12 0x32c +#define CSR_MHPMEVENT13 0x32d +#define CSR_MHPMEVENT14 0x32e +#define CSR_MHPMEVENT15 0x32f +#define CSR_MHPMEVENT16 0x330 +#define CSR_MHPMEVENT17 0x331 +#define CSR_MHPMEVENT18 0x332 +#define CSR_MHPMEVENT19 0x333 +#define CSR_MHPMEVENT20 0x334 +#define CSR_MHPMEVENT21 0x335 +#define CSR_MHPMEVENT22 0x336 +#define CSR_MHPMEVENT23 0x337 +#define CSR_MHPMEVENT24 0x338 +#define CSR_MHPMEVENT25 0x339 +#define CSR_MHPMEVENT26 0x33a +#define CSR_MHPMEVENT27 0x33b +#define CSR_MHPMEVENT28 0x33c +#define CSR_MHPMEVENT29 0x33d +#define CSR_MHPMEVENT30 0x33e +#define CSR_MHPMEVENT31 0x33f +#define CSR_MHPMCOUNTER3H 0xb83 +#define CSR_MHPMCOUNTER4H 0xb84 +#define CSR_MHPMCOUNTER5H 0xb85 +#define CSR_MHPMCOUNTER6H 0xb86 +#define CSR_MHPMCOUNTER7H 0xb87 +#define CSR_MHPMCOUNTER8H 0xb88 +#define CSR_MHPMCOUNTER9H 0xb89 +#define CSR_MHPMCOUNTER10H 0xb8a +#define CSR_MHPMCOUNTER11H 0xb8b +#define CSR_MHPMCOUNTER12H 0xb8c +#define CSR_MHPMCOUNTER13H 0xb8d +#define CSR_MHPMCOUNTER14H 0xb8e +#define CSR_MHPMCOUNTER15H 0xb8f +#define CSR_MHPMCOUNTER16H 0xb90 +#define CSR_MHPMCOUNTER17H 0xb91 +#define CSR_MHPMCOUNTER18H 0xb92 +#define CSR_MHPMCOUNTER19H 0xb93 +#define CSR_MHPMCOUNTER20H 0xb94 +#define CSR_MHPMCOUNTER21H 0xb95 +#define CSR_MHPMCOUNTER22H 0xb96 +#define CSR_MHPMCOUNTER23H 0xb97 +#define CSR_MHPMCOUNTER24H 0xb98 +#define CSR_MHPMCOUNTER25H 0xb99 +#define CSR_MHPMCOUNTER26H 0xb9a +#define CSR_MHPMCOUNTER27H 0xb9b +#define CSR_MHPMCOUNTER28H 0xb9c +#define CSR_MHPMCOUNTER29H 0xb9d +#define CSR_MHPMCOUNTER30H 0xb9e +#define CSR_MHPMCOUNTER31H 0xb9f + +/* Legacy Machine Protection and Translation (priv v1.9.1) */ +#define CSR_MBASE 0x380 +#define CSR_MBOUND 0x381 +#define CSR_MIBASE 0x382 +#define CSR_MIBOUND 0x383 +#define CSR_MDBASE 0x384 +#define CSR_MDBOUND 0x385 + +/* mstatus CSR bits */ +#define MSTATUS_UIE 0x00000001 +#define MSTATUS_SIE 0x00000002 +#define MSTATUS_MIE 0x00000008 +#define MSTATUS_UPIE 0x00000010 +#define MSTATUS_SPIE 0x00000020 +#define MSTATUS_MPIE 0x00000080 +#define MSTATUS_SPP 0x00000100 +#define MSTATUS_MPP 0x00001800 +#define MSTATUS_FS 0x00006000 +#define MSTATUS_XS 0x00018000 +#define MSTATUS_MPRV 0x00020000 +#define MSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */ +#define MSTATUS_SUM 0x00040000 /* since: priv-1.10 */ +#define MSTATUS_MXR 0x00080000 +#define MSTATUS_VM 0x1F000000 /* until: priv-1.9.1 */ +#define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */ +#define MSTATUS_TW 0x20000000 /* since: priv-1.10 */ +#define MSTATUS_TSR 0x40000000 /* since: priv-1.10 */ +#if defined(TARGET_RISCV64) +#define MSTATUS_MTL 0x4000000000ULL +#define MSTATUS_MPV 0x8000000000ULL +#elif defined(TARGET_RISCV32) +#define MSTATUS_MTL 0x00000040 +#define MSTATUS_MPV 0x00000080 +#endif + +#ifdef TARGET_RISCV32 +# define MSTATUS_MPV_ISSET(env) get_field(env->mstatush, MSTATUS_MPV) +#else +# define MSTATUS_MPV_ISSET(env) get_field(env->mstatus, MSTATUS_MPV) +#endif + +#define MSTATUS64_UXL 0x0000000300000000ULL +#define MSTATUS64_SXL 0x0000000C00000000ULL + +#define MSTATUS32_SD 0x80000000 +#define MSTATUS64_SD 0x8000000000000000ULL + +#define MISA32_MXL 0xC0000000 +#define MISA64_MXL 0xC000000000000000ULL + +#define MXL_RV32 1 +#define MXL_RV64 2 +#define MXL_RV128 3 + +#if defined(TARGET_RISCV32) +#define MSTATUS_SD MSTATUS32_SD +#define MISA_MXL MISA32_MXL +#define MXL_VAL MXL_RV32 +#elif defined(TARGET_RISCV64) +#define MSTATUS_SD MSTATUS64_SD +#define MISA_MXL MISA64_MXL +#define MXL_VAL MXL_RV64 +#endif + +/* sstatus CSR bits */ +#define SSTATUS_UIE 0x00000001 +#define SSTATUS_SIE 0x00000002 +#define SSTATUS_UPIE 0x00000010 +#define SSTATUS_SPIE 0x00000020 +#define SSTATUS_SPP 0x00000100 +#define SSTATUS_FS 0x00006000 +#define SSTATUS_XS 0x00018000 +#define SSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */ +#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */ +#define SSTATUS_MXR 0x00080000 + +#define SSTATUS32_SD 0x80000000 +#define SSTATUS64_SD 0x8000000000000000ULL + +#if defined(TARGET_RISCV32) +#define SSTATUS_SD SSTATUS32_SD +#elif defined(TARGET_RISCV64) +#define SSTATUS_SD SSTATUS64_SD +#endif + +/* hstatus CSR bits */ +#define HSTATUS_SPRV 0x00000001 +#define HSTATUS_SPV 0x00000080 +#define HSTATUS_SP2P 0x00000100 +#define HSTATUS_SP2V 0x00000200 +#define HSTATUS_VTVM 0x00100000 +#define HSTATUS_VTSR 0x00400000 + +#define HSTATUS32_WPRI 0xFF8FF87E +#define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL + +#if defined(TARGET_RISCV32) +#define HSTATUS_WPRI HSTATUS32_WPRI +#elif defined(TARGET_RISCV64) +#define HSTATUS_WPRI HSTATUS64_WPRI +#endif + +/* Privilege modes */ +#define PRV_U 0 +#define PRV_S 1 +#define PRV_H 2 /* Reserved */ +#define PRV_M 3 + +/* Virtulisation Register Fields */ +#define VIRT_ONOFF 1 +/* This is used to save state for when we take an exception. If this is set + * that means that we want to force a HS level exception (no matter what the + * delegation is set to). This will occur for things such as a second level + * page table fault. + */ +#define FORCE_HS_EXCEP 2 + +/* RV32 satp CSR field masks */ +#define SATP32_MODE 0x80000000 +#define SATP32_ASID 0x7fc00000 +#define SATP32_PPN 0x003fffff + +/* RV64 satp CSR field masks */ +#define SATP64_MODE 0xF000000000000000ULL +#define SATP64_ASID 0x0FFFF00000000000ULL +#define SATP64_PPN 0x00000FFFFFFFFFFFULL + +#if defined(TARGET_RISCV32) +#define SATP_MODE SATP32_MODE +#define SATP_ASID SATP32_ASID +#define SATP_PPN SATP32_PPN +#endif +#if defined(TARGET_RISCV64) +#define SATP_MODE SATP64_MODE +#define SATP_ASID SATP64_ASID +#define SATP_PPN SATP64_PPN +#endif + +/* VM modes (mstatus.vm) privileged ISA 1.9.1 */ +#define VM_1_09_MBARE 0 +#define VM_1_09_MBB 1 +#define VM_1_09_MBBID 2 +#define VM_1_09_SV32 8 +#define VM_1_09_SV39 9 +#define VM_1_09_SV48 10 + +/* VM modes (satp.mode) privileged ISA 1.10 */ +#define VM_1_10_MBARE 0 +#define VM_1_10_SV32 1 +#define VM_1_10_SV39 8 +#define VM_1_10_SV48 9 +#define VM_1_10_SV57 10 +#define VM_1_10_SV64 11 + +/* Page table entry (PTE) fields */ +#define PTE_V 0x001 /* Valid */ +#define PTE_R 0x002 /* Read */ +#define PTE_W 0x004 /* Write */ +#define PTE_X 0x008 /* Execute */ +#define PTE_U 0x010 /* User */ +#define PTE_G 0x020 /* Global */ +#define PTE_A 0x040 /* Accessed */ +#define PTE_D 0x080 /* Dirty */ +#define PTE_SOFT 0x300 /* Reserved for Software */ + +/* Page table PPN shift amount */ +#define PTE_PPN_SHIFT 10 + +/* Leaf page shift amount */ +#define PGSHIFT 12 + +/* Default Reset Vector adress */ +#define DEFAULT_RSTVEC 0x1000 + +/* Exception causes */ +#define EXCP_NONE -1 /* sentinel value */ +#define RISCV_EXCP_INST_ADDR_MIS 0x0 +#define RISCV_EXCP_INST_ACCESS_FAULT 0x1 +#define RISCV_EXCP_ILLEGAL_INST 0x2 +#define RISCV_EXCP_BREAKPOINT 0x3 +#define RISCV_EXCP_LOAD_ADDR_MIS 0x4 +#define RISCV_EXCP_LOAD_ACCESS_FAULT 0x5 +#define RISCV_EXCP_STORE_AMO_ADDR_MIS 0x6 +#define RISCV_EXCP_STORE_AMO_ACCESS_FAULT 0x7 +#define RISCV_EXCP_U_ECALL 0x8 +#define RISCV_EXCP_S_ECALL 0x9 +#define RISCV_EXCP_VS_ECALL 0xa +#define RISCV_EXCP_M_ECALL 0xb +#define RISCV_EXCP_INST_PAGE_FAULT 0xc /* since: priv-1.10.0 */ +#define RISCV_EXCP_LOAD_PAGE_FAULT 0xd /* since: priv-1.10.0 */ +#define RISCV_EXCP_STORE_PAGE_FAULT 0xf /* since: priv-1.10.0 */ +#define RISCV_EXCP_INST_GUEST_PAGE_FAULT 0x14 +#define RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT 0x15 +#define RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT 0x17 +#define RISCV_EXCP_UNICORN_END 0x8888 + +#define RISCV_EXCP_INT_FLAG 0x80000000 +#define RISCV_EXCP_INT_MASK 0x7fffffff + +/* Interrupt causes */ +#define IRQ_U_SOFT 0 +#define IRQ_S_SOFT 1 +#define IRQ_VS_SOFT 2 +#define IRQ_M_SOFT 3 +#define IRQ_U_TIMER 4 +#define IRQ_S_TIMER 5 +#define IRQ_VS_TIMER 6 +#define IRQ_M_TIMER 7 +#define IRQ_U_EXT 8 +#define IRQ_S_EXT 9 +#define IRQ_VS_EXT 10 +#define IRQ_M_EXT 11 + +/* mip masks */ +#define MIP_USIP (1 << IRQ_U_SOFT) +#define MIP_SSIP (1 << IRQ_S_SOFT) +#define MIP_VSSIP (1 << IRQ_VS_SOFT) +#define MIP_MSIP (1 << IRQ_M_SOFT) +#define MIP_UTIP (1 << IRQ_U_TIMER) +#define MIP_STIP (1 << IRQ_S_TIMER) +#define MIP_VSTIP (1 << IRQ_VS_TIMER) +#define MIP_MTIP (1 << IRQ_M_TIMER) +#define MIP_UEIP (1 << IRQ_U_EXT) +#define MIP_SEIP (1 << IRQ_S_EXT) +#define MIP_VSEIP (1 << IRQ_VS_EXT) +#define MIP_MEIP (1 << IRQ_M_EXT) + +/* sip masks */ +#define SIP_SSIP MIP_SSIP +#define SIP_STIP MIP_STIP +#define SIP_SEIP MIP_SEIP + +/* MIE masks */ +#define MIE_SEIE (1 << IRQ_S_EXT) +#define MIE_UEIE (1 << IRQ_U_EXT) +#define MIE_STIE (1 << IRQ_S_TIMER) +#define MIE_UTIE (1 << IRQ_U_TIMER) +#define MIE_SSIE (1 << IRQ_S_SOFT) +#define MIE_USIE (1 << IRQ_U_SOFT) +#endif diff --git a/qemu/target/riscv/cpu_helper.c b/qemu/target/riscv/cpu_helper.c new file mode 100644 index 00000000..bb2c3d86 --- /dev/null +++ b/qemu/target/riscv/cpu_helper.c @@ -0,0 +1,978 @@ +/* + * RISC-V CPU helpers for qemu. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" + +int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) +{ + return env->priv; +} + +static int riscv_cpu_local_irq_pending(CPURISCVState *env) +{ + target_ulong irqs; + + target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); + target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); + target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE); + + target_ulong pending = env->mip & env->mie & + ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); + target_ulong vspending = (env->mip & env->mie & + (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)); + + target_ulong mie = env->priv < PRV_M || + (env->priv == PRV_M && mstatus_mie); + target_ulong sie = env->priv < PRV_S || + (env->priv == PRV_S && mstatus_sie); + target_ulong hs_sie = env->priv < PRV_S || + (env->priv == PRV_S && hs_mstatus_sie); + + if (riscv_cpu_virt_enabled(env)) { +#ifdef _MSC_VER + target_ulong pending_hs_irq = pending & (0 - hs_sie); +#else + target_ulong pending_hs_irq = pending & -hs_sie; +#endif + + if (pending_hs_irq) { + riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP); + return ctz64(pending_hs_irq); + } + + pending = vspending; + } + +#ifdef _MSC_VER + irqs = (pending & ~env->mideleg & (0 - mie)) | (pending & env->mideleg & (0 - sie)); +#else + irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie); +#endif + + if (irqs) { + return ctz64(irqs); /* since non-zero */ + } else { + return EXCP_NONE; /* indicates no pending interrupt */ + } +} + +bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + int interruptno = riscv_cpu_local_irq_pending(env); + if (interruptno >= 0) { + cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; + riscv_cpu_do_interrupt(cs); + return true; + } + } + return false; +} + +/* Return true is floating point support is currently enabled */ +bool riscv_cpu_fp_enabled(CPURISCVState *env) +{ + if (env->mstatus & MSTATUS_FS) { + if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) { + return false; + } + return true; + } + + return false; +} + +void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) +{ + target_ulong mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS | + MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE; + bool current_virt = riscv_cpu_virt_enabled(env); + + g_assert(riscv_has_ext(env, RVH)); + +#if defined(TARGET_RISCV64) + mstatus_mask |= MSTATUS64_UXL; +#endif + + if (current_virt) { + /* Current V=1 and we are about to change to V=0 */ + env->vsstatus = env->mstatus & mstatus_mask; + env->mstatus &= ~mstatus_mask; + env->mstatus |= env->mstatus_hs; + +#if defined(TARGET_RISCV32) + env->vsstatush = env->mstatush; + env->mstatush |= env->mstatush_hs; +#endif + + env->vstvec = env->stvec; + env->stvec = env->stvec_hs; + + env->vsscratch = env->sscratch; + env->sscratch = env->sscratch_hs; + + env->vsepc = env->sepc; + env->sepc = env->sepc_hs; + + env->vscause = env->scause; + env->scause = env->scause_hs; + + env->vstval = env->sbadaddr; + env->sbadaddr = env->stval_hs; + + env->vsatp = env->satp; + env->satp = env->satp_hs; + } else { + /* Current V=0 and we are about to change to V=1 */ + env->mstatus_hs = env->mstatus & mstatus_mask; + env->mstatus &= ~mstatus_mask; + env->mstatus |= env->vsstatus; + +#if defined(TARGET_RISCV32) + env->mstatush_hs = env->mstatush; + env->mstatush |= env->vsstatush; +#endif + + env->stvec_hs = env->stvec; + env->stvec = env->vstvec; + + env->sscratch_hs = env->sscratch; + env->sscratch = env->vsscratch; + + env->sepc_hs = env->sepc; + env->sepc = env->vsepc; + + env->scause_hs = env->scause; + env->scause = env->vscause; + + env->stval_hs = env->sbadaddr; + env->sbadaddr = env->vstval; + + env->satp_hs = env->satp; + env->satp = env->vsatp; + } +} + +bool riscv_cpu_virt_enabled(CPURISCVState *env) +{ + if (!riscv_has_ext(env, RVH)) { + return false; + } + + return get_field(env->virt, VIRT_ONOFF); +} + +void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) +{ + if (!riscv_has_ext(env, RVH)) { + return; + } + + /* Flush the TLB on all virt mode changes. */ + if (get_field(env->virt, VIRT_ONOFF) != enable) { + tlb_flush(env_cpu(env)); + } + + env->virt = set_field(env->virt, VIRT_ONOFF, enable); +} + +bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env) +{ + if (!riscv_has_ext(env, RVH)) { + return false; + } + + return get_field(env->virt, FORCE_HS_EXCEP); +} + +void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable) +{ + if (!riscv_has_ext(env, RVH)) { + return; + } + + env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable); +} + +int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) +{ + CPURISCVState *env = &cpu->env; + if (env->miclaim & interrupts) { + return -1; + } else { + env->miclaim |= interrupts; + return 0; + } +} + +uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) +{ + CPURISCVState *env = &cpu->env; + CPUState *cs = CPU(cpu); + uint32_t old = env->mip; + + env->mip = (env->mip & ~mask) | (value & mask); + + if (env->mip) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + + return old; +} + +void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void)) +{ + env->rdtime_fn = fn; +} + +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) +{ + if (newpriv > PRV_M) { + g_assert_not_reached(); + } + if (newpriv == PRV_H) { + newpriv = PRV_U; + } + /* tlb_flush is unnecessary as mode is contained in mmu_idx */ + env->priv = newpriv; + + /* + * Clear the load reservation - otherwise a reservation placed in one + * context/process can be used by another, resulting in an SC succeeding + * incorrectly. Version 2.2 of the ISA specification explicitly requires + * this behaviour, while later revisions say that the kernel "should" use + * an SC instruction to force the yielding of a load reservation on a + * preemptive context switch. As a result, do both. + */ + env->load_res = -1; +} + +/* get_physical_address - get the physical address for this virtual address + * + * Do a page table walk to obtain the physical address corresponding to a + * virtual address. Returns 0 if the translation was successful + * + * Adapted from Spike's mmu_t::translate and mmu_t::walk + * + * @env: CPURISCVState + * @physical: This will be set to the calculated physical address + * @prot: The returned protection attributes + * @addr: The virtual address to be translated + * @access_type: The type of MMU access + * @mmu_idx: Indicates current privilege level + * @first_stage: Are we in first stage translation? + * Second stage is used for hypervisor guest translation + * @two_stage: Are we going to perform two stage translation + */ +static int get_physical_address(CPURISCVState *env, hwaddr *physical, + int *prot, target_ulong addr, + int access_type, int mmu_idx, + bool first_stage, bool two_stage) +{ + /* NOTE: the env->pc value visible here will not be + * correct, but the value visible to the exception handler + * (riscv_cpu_do_interrupt) is correct */ + MemTxResult res; + MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + int mode = mmu_idx; + bool use_background = false; + hwaddr base; + int levels = 0, ptidxbits = 0, ptesize = 0, vm, sum, mxr, widened; + + + /* + * Check if we should use the background registers for the two + * stage translation. We don't need to check if we actually need + * two stage translation as that happened before this function + * was called. Background registers will be used if the guest has + * forced a two stage translation to be on (in HS or M mode). + */ + if (mode == PRV_M && access_type != MMU_INST_FETCH) { + if (get_field(env->mstatus, MSTATUS_MPRV)) { + mode = get_field(env->mstatus, MSTATUS_MPP); + + if (riscv_has_ext(env, RVH) && + MSTATUS_MPV_ISSET(env)) { + use_background = true; + } + } + } + + if (mode == PRV_S && access_type != MMU_INST_FETCH && + riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { + if (get_field(env->hstatus, HSTATUS_SPRV)) { + mode = get_field(env->mstatus, SSTATUS_SPP); + use_background = true; + } + } + + if (first_stage == false) { + /* We are in stage 2 translation, this is similar to stage 1. */ + /* Stage 2 is always taken as U-mode */ + mode = PRV_U; + } + + if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { + *physical = addr; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TRANSLATE_SUCCESS; + } + + *prot = 0; + + if (first_stage == true) { + mxr = get_field(env->mstatus, MSTATUS_MXR); + } else { + mxr = get_field(env->vsstatus, MSTATUS_MXR); + } + + if (env->priv_ver >= PRIV_VERSION_1_10_0) { + if (first_stage == true) { + if (use_background) { + base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT; + vm = get_field(env->vsatp, SATP_MODE); + } else { + base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT; + vm = get_field(env->satp, SATP_MODE); + } + widened = 0; + } else { + base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT; + vm = get_field(env->hgatp, HGATP_MODE); + widened = 2; + } + sum = get_field(env->mstatus, MSTATUS_SUM); + switch (vm) { + case VM_1_10_SV32: + levels = 2; ptidxbits = 10; ptesize = 4; break; + case VM_1_10_SV39: + levels = 3; ptidxbits = 9; ptesize = 8; break; + case VM_1_10_SV48: + levels = 4; ptidxbits = 9; ptesize = 8; break; + case VM_1_10_SV57: + levels = 5; ptidxbits = 9; ptesize = 8; break; + case VM_1_10_MBARE: + *physical = addr; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TRANSLATE_SUCCESS; + default: + g_assert_not_reached(); + } + } else { + widened = 0; + base = (hwaddr)(env->sptbr) << PGSHIFT; + sum = !get_field(env->mstatus, MSTATUS_PUM); + vm = get_field(env->mstatus, MSTATUS_VM); + switch (vm) { + case VM_1_09_SV32: + levels = 2; ptidxbits = 10; ptesize = 4; break; + case VM_1_09_SV39: + levels = 3; ptidxbits = 9; ptesize = 8; break; + case VM_1_09_SV48: + levels = 4; ptidxbits = 9; ptesize = 8; break; + case VM_1_09_MBARE: + *physical = addr; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TRANSLATE_SUCCESS; + default: + g_assert_not_reached(); + } + } + + CPUState *cs = env_cpu(env); + int va_bits = PGSHIFT + levels * ptidxbits + widened; + target_ulong mask, masked_msbs; + + if (TARGET_LONG_BITS > (va_bits - 1)) { + mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; + } else { + mask = 0; + } + masked_msbs = (addr >> (va_bits - 1)) & mask; + + if (masked_msbs != 0 && masked_msbs != mask) { + return TRANSLATE_FAIL; + } + + int ptshift = (levels - 1) * ptidxbits; + int i; + +#if !TCG_OVERSIZED_GUEST +restart: +#endif + for (i = 0; i < levels; i++, ptshift -= ptidxbits) { + target_ulong idx; + if (i == 0) { + idx = (addr >> (PGSHIFT + ptshift)) & + ((1 << (ptidxbits + widened)) - 1); + } else { + idx = (addr >> (PGSHIFT + ptshift)) & + ((1 << ptidxbits) - 1); + } + + /* check that physical address of PTE is legal */ + hwaddr pte_addr; + + if (two_stage && first_stage) { + hwaddr vbase; + + /* Do the second stage translation on the base PTE address. */ + get_physical_address(env, &vbase, prot, base, access_type, + mmu_idx, false, true); + + pte_addr = vbase + idx * ptesize; + } else { + pte_addr = base + idx * ptesize; + } + + if (riscv_feature(env, RISCV_FEATURE_PMP) && + !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), + 1 << MMU_DATA_LOAD, PRV_S)) { + return TRANSLATE_PMP_FAIL; + } + +#if defined(TARGET_RISCV32) +#ifdef UNICORN_ARCH_POSTFIX + target_ulong pte = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pte_addr, attrs, &res); +#else + target_ulong pte = address_space_ldl(cs->as->uc, cs->as, pte_addr, attrs, &res); +#endif +#elif defined(TARGET_RISCV64) +#ifdef UNICORN_ARCH_POSTFIX + target_ulong pte = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pte_addr, attrs, &res); +#else + target_ulong pte = address_space_ldq(cs->as->uc, cs->as, pte_addr, attrs, &res); +#endif +#endif + if (res != MEMTX_OK) { + return TRANSLATE_FAIL; + } + + hwaddr ppn = pte >> PTE_PPN_SHIFT; + + if (!(pte & PTE_V)) { + /* Invalid PTE */ + return TRANSLATE_FAIL; + } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { + /* Inner PTE, continue walking */ + base = ppn << PGSHIFT; + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { + /* Reserved leaf PTE flags: PTE_W */ + return TRANSLATE_FAIL; + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { + /* Reserved leaf PTE flags: PTE_W + PTE_X */ + return TRANSLATE_FAIL; + } else if ((pte & PTE_U) && ((mode != PRV_U) && + (!sum || access_type == MMU_INST_FETCH))) { + /* User PTE flags when not U mode and mstatus.SUM is not set, + or the access type is an instruction fetch */ + return TRANSLATE_FAIL; + } else if (!(pte & PTE_U) && (mode != PRV_S)) { + /* Supervisor PTE flags when not S mode */ + return TRANSLATE_FAIL; + } else if (ppn & ((1ULL << ptshift) - 1)) { + /* Misaligned PPN */ + return TRANSLATE_FAIL; + } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || + ((pte & PTE_X) && mxr))) { + /* Read access check failed */ + return TRANSLATE_FAIL; + } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { + /* Write access check failed */ + return TRANSLATE_FAIL; + } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { + /* Fetch access check failed */ + return TRANSLATE_FAIL; + } else { + /* if necessary, set accessed and dirty bits. */ + target_ulong updated_pte = pte | PTE_A | + (access_type == MMU_DATA_STORE ? PTE_D : 0); + + /* Page table updates need to be atomic with MTTCG enabled */ + if (updated_pte != pte) { + /* + * - if accessed or dirty bits need updating, and the PTE is + * in RAM, then we do so atomically with a compare and swap. + * - if the PTE is in IO space or ROM, then it can't be updated + * and we return TRANSLATE_FAIL. + * - if the PTE changed by the time we went to update it, then + * it is no longer valid and we must re-walk the page table. + */ + MemoryRegion *mr; + hwaddr l = sizeof(target_ulong), addr1; + mr = address_space_translate(cs->as, pte_addr, + &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); + if (memory_region_is_ram(mr)) { + target_ulong *pte_pa = + qemu_map_ram_ptr(mr->uc, mr->ram_block, addr1); +#if TCG_OVERSIZED_GUEST + /* MTTCG is not enabled on oversized TCG guests so + * page table updates do not need to be atomic */ + *pte_pa = pte = updated_pte; +#else + target_ulong old_pte = +#ifdef _MSC_VER + atomic_cmpxchg((long *)pte_pa, pte, updated_pte); +#else + atomic_cmpxchg(pte_pa, pte, updated_pte); +#endif + if (old_pte != pte) { + goto restart; + } else { + pte = updated_pte; + } +#endif + } else { + /* misconfigured PTE in ROM (AD bits are not preset) or + * PTE is in IO space and can't be updated atomically */ + return TRANSLATE_FAIL; + } + } + + /* for superpage mappings, make a fake leaf PTE for the TLB's + benefit. */ + target_ulong vpn = addr >> PGSHIFT; + if (i == 0) { + *physical = (ppn | (vpn & ((1L << (ptshift + widened)) - 1))) << + PGSHIFT; + } else { + *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; + } + + /* set permissions on the TLB entry */ + if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { + *prot |= PAGE_READ; + } + if ((pte & PTE_X)) { + *prot |= PAGE_EXEC; + } + /* add write permission on stores or if the page is already dirty, + so that we TLB miss on later writes to update the dirty bit */ + if ((pte & PTE_W) && + (access_type == MMU_DATA_STORE || (pte & PTE_D))) { + *prot |= PAGE_WRITE; + } + return TRANSLATE_SUCCESS; + } + } + return TRANSLATE_FAIL; +} + +static void raise_mmu_exception(CPURISCVState *env, target_ulong address, + MMUAccessType access_type, bool pmp_violation, + bool first_stage) +{ + CPUState *cs = env_cpu(env); + int page_fault_exceptions; + if (first_stage) { + page_fault_exceptions = + (env->priv_ver >= PRIV_VERSION_1_10_0) && + get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && + !pmp_violation; + } else { + page_fault_exceptions = + get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE && + !pmp_violation; + } + switch (access_type) { + case MMU_INST_FETCH: + if (riscv_cpu_virt_enabled(env) && !first_stage) { + cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT; + } else { + cs->exception_index = page_fault_exceptions ? + RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; + } + break; + case MMU_DATA_LOAD: + if (riscv_cpu_virt_enabled(env) && !first_stage) { + cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT; + } else { + cs->exception_index = page_fault_exceptions ? + RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; + } + break; + case MMU_DATA_STORE: + if (riscv_cpu_virt_enabled(env) && !first_stage) { + cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; + } else { + cs->exception_index = page_fault_exceptions ? + RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + } + break; + default: + g_assert_not_reached(); + } + env->badaddr = address; +} + +hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + hwaddr phys_addr; + int prot; + int mmu_idx = cpu_mmu_index(&cpu->env, false); + + if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx, + true, riscv_cpu_virt_enabled(env))) { + return -1; + } + + if (riscv_cpu_virt_enabled(env)) { + if (get_physical_address(env, &phys_addr, &prot, phys_addr, + 0, mmu_idx, false, true)) { + return -1; + } + } + + return phys_addr; +} + +void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + if (access_type == MMU_DATA_STORE) { + cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + } else { + cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; + } + + env->badaddr = addr; + riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); +} + +void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + switch (access_type) { + case MMU_INST_FETCH: + cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; + break; + case MMU_DATA_LOAD: + cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; + break; + case MMU_DATA_STORE: + cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; + break; + default: + g_assert_not_reached(); + } + env->badaddr = addr; + riscv_raise_exception(env, cs->exception_index, retaddr); +} + +bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + vaddr im_address; + hwaddr pa = 0; + int prot; + bool pmp_violation = false; + bool m_mode_two_stage = false; + bool hs_mode_two_stage = false; + bool first_stage_error = true; + int ret = TRANSLATE_FAIL; + int mode = mmu_idx; + + env->guest_phys_fault_addr = 0; + + qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", + __func__, address, access_type, mmu_idx); + + /* + * Determine if we are in M mode and MPRV is set or in HS mode and SPRV is + * set and we want to access a virtulisation address. + */ + if (riscv_has_ext(env, RVH)) { + m_mode_two_stage = env->priv == PRV_M && + access_type != MMU_INST_FETCH && + get_field(env->mstatus, MSTATUS_MPRV) && + MSTATUS_MPV_ISSET(env); + + hs_mode_two_stage = env->priv == PRV_S && + !riscv_cpu_virt_enabled(env) && + access_type != MMU_INST_FETCH && + get_field(env->hstatus, HSTATUS_SPRV) && + get_field(env->hstatus, HSTATUS_SPV); + } + + if (mode == PRV_M && access_type != MMU_INST_FETCH) { + if (get_field(env->mstatus, MSTATUS_MPRV)) { + mode = get_field(env->mstatus, MSTATUS_MPP); + } + } + + if (riscv_cpu_virt_enabled(env) || m_mode_two_stage || hs_mode_two_stage) { + /* Two stage lookup */ + ret = get_physical_address(env, &pa, &prot, address, access_type, + mmu_idx, true, true); + + qemu_log_mask(CPU_LOG_MMU, + "%s 1st-stage address=%" VADDR_PRIx " ret %d physical " + TARGET_FMT_plx " prot %d\n", + __func__, address, ret, pa, prot); + + if (ret != TRANSLATE_FAIL) { + /* Second stage lookup */ + im_address = pa; + + ret = get_physical_address(env, &pa, &prot, im_address, + access_type, mmu_idx, false, true); + + qemu_log_mask(CPU_LOG_MMU, + "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical " + TARGET_FMT_plx " prot %d\n", + __func__, im_address, ret, pa, prot); + + if (riscv_feature(env, RISCV_FEATURE_PMP) && + (ret == TRANSLATE_SUCCESS) && + !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { + ret = TRANSLATE_PMP_FAIL; + } + + if (ret != TRANSLATE_SUCCESS) { + /* + * Guest physical address translation failed, this is a HS + * level exception + */ + first_stage_error = false; + env->guest_phys_fault_addr = (im_address | + (address & + (TARGET_PAGE_SIZE - 1))) >> 2; + } + } + } else { + /* Single stage lookup */ + ret = get_physical_address(env, &pa, &prot, address, access_type, + mmu_idx, true, false); + + qemu_log_mask(CPU_LOG_MMU, + "%s address=%" VADDR_PRIx " ret %d physical " + TARGET_FMT_plx " prot %d\n", + __func__, address, ret, pa, prot); + } + + if (riscv_feature(env, RISCV_FEATURE_PMP) && + (ret == TRANSLATE_SUCCESS) && + !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { + ret = TRANSLATE_PMP_FAIL; + } + if (ret == TRANSLATE_PMP_FAIL) { + pmp_violation = true; + } + + if (ret == TRANSLATE_SUCCESS) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, + prot, mmu_idx, TARGET_PAGE_SIZE); + return true; + } else if (probe) { + return false; + } else { + raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error); + riscv_raise_exception(env, cs->exception_index, retaddr); + } + + return true; +} + +/* + * Handle Traps + * + * Adapted from Spike's processor_t::take_trap. + * + */ +void riscv_cpu_do_interrupt(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env); + target_ulong s; + + /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide + * so we mask off the MSB and separate into trap type and cause. + */ + bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); + target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; + target_ulong deleg = async ? env->mideleg : env->medeleg; + target_ulong tval = 0; + target_ulong htval = 0; + target_ulong mtval2 = 0; + + if (!async) { + /* set tval to badaddr for traps with address information */ + switch (cause) { + case RISCV_EXCP_INST_GUEST_PAGE_FAULT: + case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: + case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: + force_hs_execp = true; + /* fallthrough */ + case RISCV_EXCP_INST_ADDR_MIS: + case RISCV_EXCP_INST_ACCESS_FAULT: + case RISCV_EXCP_LOAD_ADDR_MIS: + case RISCV_EXCP_STORE_AMO_ADDR_MIS: + case RISCV_EXCP_LOAD_ACCESS_FAULT: + case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: + case RISCV_EXCP_INST_PAGE_FAULT: + case RISCV_EXCP_LOAD_PAGE_FAULT: + case RISCV_EXCP_STORE_PAGE_FAULT: + tval = env->badaddr; + break; + default: + break; + } + /* ecall is dispatched as one cause so translate based on mode */ + if (cause == RISCV_EXCP_U_ECALL) { + assert(env->priv <= 3); + + if (env->priv == PRV_M) { + cause = RISCV_EXCP_M_ECALL; + } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) { + cause = RISCV_EXCP_VS_ECALL; + } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) { + cause = RISCV_EXCP_S_ECALL; + } else if (env->priv == PRV_U) { + cause = RISCV_EXCP_U_ECALL; + } + } + } + + if (env->priv <= PRV_S && + cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { + /* handle the trap in S-mode */ + if (riscv_has_ext(env, RVH)) { + target_ulong hdeleg = async ? env->hideleg : env->hedeleg; + + if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) && + !force_hs_execp) { + /* + * See if we need to adjust cause. Yes if its VS mode interrupt + * no if hypervisor has delegated one of hs mode's interrupt + */ + if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || + cause == IRQ_VS_EXT) + cause = cause - 1; + /* Trap to VS mode */ + } else if (riscv_cpu_virt_enabled(env)) { + /* Trap into HS mode, from virt */ + riscv_cpu_swap_hypervisor_regs(env); + env->hstatus = set_field(env->hstatus, HSTATUS_SP2V, + get_field(env->hstatus, HSTATUS_SPV)); + env->hstatus = set_field(env->hstatus, HSTATUS_SP2P, + get_field(env->mstatus, SSTATUS_SPP)); + env->hstatus = set_field(env->hstatus, HSTATUS_SPV, + riscv_cpu_virt_enabled(env)); + + htval = env->guest_phys_fault_addr; + + riscv_cpu_set_virt_enabled(env, 0); + riscv_cpu_set_force_hs_excep(env, 0); + } else { + /* Trap into HS mode */ + env->hstatus = set_field(env->hstatus, HSTATUS_SP2V, + get_field(env->hstatus, HSTATUS_SPV)); + env->hstatus = set_field(env->hstatus, HSTATUS_SP2P, + get_field(env->mstatus, SSTATUS_SPP)); + env->hstatus = set_field(env->hstatus, HSTATUS_SPV, + riscv_cpu_virt_enabled(env)); + + htval = env->guest_phys_fault_addr; + } + } + + s = env->mstatus; + s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? + get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); + s = set_field(s, MSTATUS_SPP, env->priv); + s = set_field(s, MSTATUS_SIE, 0); + env->mstatus = s; + env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); + env->sepc = env->pc; + env->sbadaddr = tval; + env->htval = htval; + env->pc = (env->stvec >> 2 << 2) + + ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); + riscv_cpu_set_mode(env, PRV_S); + } else { + /* handle the trap in M-mode */ + if (riscv_has_ext(env, RVH)) { + if (riscv_cpu_virt_enabled(env)) { + riscv_cpu_swap_hypervisor_regs(env); + } +#ifdef TARGET_RISCV32 + env->mstatush = set_field(env->mstatush, MSTATUS_MPV, + riscv_cpu_virt_enabled(env)); + env->mstatush = set_field(env->mstatush, MSTATUS_MTL, + riscv_cpu_force_hs_excep_enabled(env)); +#else + env->mstatus = set_field(env->mstatus, MSTATUS_MPV, + riscv_cpu_virt_enabled(env)); + env->mstatus = set_field(env->mstatus, MSTATUS_MTL, + riscv_cpu_force_hs_excep_enabled(env)); +#endif + + mtval2 = env->guest_phys_fault_addr; + + /* Trapping to M mode, virt is disabled */ + riscv_cpu_set_virt_enabled(env, 0); + riscv_cpu_set_force_hs_excep(env, 0); + } + + s = env->mstatus; + s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? + get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); + s = set_field(s, MSTATUS_MPP, env->priv); + s = set_field(s, MSTATUS_MIE, 0); + env->mstatus = s; + env->mcause = cause | ~(((target_ulong)-1) >> async); + env->mepc = env->pc; + env->mbadaddr = tval; + env->mtval2 = mtval2; + env->pc = (env->mtvec >> 2 << 2) + + ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); + riscv_cpu_set_mode(env, PRV_M); + } + + /* NOTE: it is not necessary to yield load reservations here. It is only + * necessary for an SC from "another hart" to cause a load reservation + * to be yielded. Refer to the memory consistency model section of the + * RISC-V ISA Specification. + */ + + cs->exception_index = EXCP_NONE; /* mark handled to qemu */ +} diff --git a/qemu/target/riscv/cpu_user.h b/qemu/target/riscv/cpu_user.h new file mode 100644 index 00000000..02afad60 --- /dev/null +++ b/qemu/target/riscv/cpu_user.h @@ -0,0 +1,19 @@ +#ifndef TARGET_RISCV_CPU_USER_H +#define TARGET_RISCV_CPU_USER_H + +#define xRA 1 /* return address (aka link register) */ +#define xSP 2 /* stack pointer */ +#define xGP 3 /* global pointer */ +#define xTP 4 /* thread pointer */ + +#define xA0 10 /* gpr[10-17] are syscall arguments */ +#define xA1 11 +#define xA2 12 +#define xA3 13 +#define xA4 14 +#define xA5 15 +#define xA6 16 +#define xA7 17 /* syscall number for RVI ABI */ +#define xT0 5 /* syscall number for RVE ABI */ + +#endif diff --git a/qemu/target/riscv/csr.c b/qemu/target/riscv/csr.c new file mode 100644 index 00000000..785ef26d --- /dev/null +++ b/qemu/target/riscv/csr.c @@ -0,0 +1,1604 @@ +/* + * RISC-V Control and Status Registers. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "exec/exec-all.h" + +static int fs(CPURISCVState *env, int csrno); +static int read_fflags(CPURISCVState *env, int csrno, target_ulong *val); +static int write_fflags(CPURISCVState *env, int csrno, target_ulong val); +static int read_frm(CPURISCVState *env, int csrno, target_ulong *val); +static int write_frm(CPURISCVState *env, int csrno, target_ulong val); +static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val); +static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val); +static int ctr(CPURISCVState *env, int csrno); +static int read_instret(CPURISCVState *env, int csrno, target_ulong *val); +static int read_time(CPURISCVState *env, int csrno, target_ulong *val); +static int any(CPURISCVState *env, int csrno); +static int read_zero(CPURISCVState *env, int csrno, target_ulong *val); +static int read_mhartid(CPURISCVState *env, int csrno, target_ulong *val); +static int read_mstatus(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val); +static int read_misa(CPURISCVState *env, int csrno, target_ulong *val); +static int write_misa(CPURISCVState *env, int csrno, target_ulong val); +static int read_mideleg(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mideleg(CPURISCVState *env, int csrno, target_ulong val); +static int read_medeleg(CPURISCVState *env, int csrno, target_ulong *val); +static int write_medeleg(CPURISCVState *env, int csrno, target_ulong val); +static int read_mie(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mie(CPURISCVState *env, int csrno, target_ulong val); +static int read_mtvec(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mtvec(CPURISCVState *env, int csrno, target_ulong val); +static int read_mcounteren(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mcounteren(CPURISCVState *env, int csrno, target_ulong val); +static int read_mucounteren(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mucounteren(CPURISCVState *env, int csrno, target_ulong val); +static int read_mscounteren(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mscounteren(CPURISCVState *env, int csrno, target_ulong val); +static int read_mscratch(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mscratch(CPURISCVState *env, int csrno, target_ulong val); +static int read_mepc(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mepc(CPURISCVState *env, int csrno, target_ulong val); +static int read_mcause(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mcause(CPURISCVState *env, int csrno, target_ulong val); +static int read_mbadaddr(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mbadaddr(CPURISCVState *env, int csrno, target_ulong val); +static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask); +static int smode(CPURISCVState *env, int csrno); +static int read_sstatus(CPURISCVState *env, int csrno, target_ulong *val); +static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val); +static int read_sie(CPURISCVState *env, int csrno, target_ulong *val); +static int write_sie(CPURISCVState *env, int csrno, target_ulong val); +static int read_stvec(CPURISCVState *env, int csrno, target_ulong *val); +static int write_stvec(CPURISCVState *env, int csrno, target_ulong val); +static int read_scounteren(CPURISCVState *env, int csrno, target_ulong *val); +static int write_scounteren(CPURISCVState *env, int csrno, target_ulong val); +static int read_sscratch(CPURISCVState *env, int csrno, target_ulong *val); +static int write_sscratch(CPURISCVState *env, int csrno, target_ulong val); +static int read_sepc(CPURISCVState *env, int csrno, target_ulong *val); +static int write_sepc(CPURISCVState *env, int csrno, target_ulong val); +static int read_scause(CPURISCVState *env, int csrno, target_ulong *val); +static int write_scause(CPURISCVState *env, int csrno, target_ulong val); +static int read_sbadaddr(CPURISCVState *env, int csrno, target_ulong *val); +static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val); +static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask); +static int read_satp(CPURISCVState *env, int csrno, target_ulong *val); +static int write_satp(CPURISCVState *env, int csrno, target_ulong val); +static int read_hstatus(CPURISCVState *env, int csrno, target_ulong *val); +static int write_hstatus(CPURISCVState *env, int csrno, target_ulong val); +static int hmode(CPURISCVState *env, int csrno); +static int read_hedeleg(CPURISCVState *env, int csrno, target_ulong *val); +static int write_hedeleg(CPURISCVState *env, int csrno, target_ulong val); +static int read_hideleg(CPURISCVState *env, int csrno, target_ulong *val); +static int write_hideleg(CPURISCVState *env, int csrno, target_ulong val); +static int rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask); +static int read_hie(CPURISCVState *env, int csrno, target_ulong *val); +static int write_hie(CPURISCVState *env, int csrno, target_ulong val); +static int read_hcounteren(CPURISCVState *env, int csrno, target_ulong *val); +static int write_hcounteren(CPURISCVState *env, int csrno, target_ulong val); +static int read_htval(CPURISCVState *env, int csrno, target_ulong *val); +static int write_htval(CPURISCVState *env, int csrno, target_ulong val); +static int read_htinst(CPURISCVState *env, int csrno, target_ulong *val); +static int write_htinst(CPURISCVState *env, int csrno, target_ulong val); +static int read_hgatp(CPURISCVState *env, int csrno, target_ulong *val); +static int write_hgatp(CPURISCVState *env, int csrno, target_ulong val); +static int read_htimedelta(CPURISCVState *env, int csrno, target_ulong *val); +static int write_htimedelta(CPURISCVState *env, int csrno, target_ulong val); +static int read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val); +static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask); +static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vsie(CPURISCVState *env, int csrno, target_ulong val); +static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vstvec(CPURISCVState *env, int csrno, target_ulong val); +static int read_vsscratch(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vsscratch(CPURISCVState *env, int csrno, target_ulong val); +static int read_vsepc(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vsepc(CPURISCVState *env, int csrno, target_ulong val); +static int read_vscause(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vscause(CPURISCVState *env, int csrno, target_ulong val); +static int read_vstval(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vstval(CPURISCVState *env, int csrno, target_ulong val); +static int read_vsatp(CPURISCVState *env, int csrno, target_ulong *val); +static int write_vsatp(CPURISCVState *env, int csrno, target_ulong val); +static int read_mtval2(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mtval2(CPURISCVState *env, int csrno, target_ulong val); +static int read_mtinst(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mtinst(CPURISCVState *env, int csrno, target_ulong val); +static int read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val); +static int write_pmpcfg(CPURISCVState *env, int csrno, target_ulong val); +static int read_pmpaddr(CPURISCVState *env, int csrno, target_ulong *val); +static int write_pmpaddr(CPURISCVState *env, int csrno, target_ulong val); +static int pmp(CPURISCVState *env, int csrno); + +#if defined(TARGET_RISCV32) +static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val); +static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val); +static int read_mstatush(CPURISCVState *env, int csrno, target_ulong *val); +static int write_mstatush(CPURISCVState *env, int csrno, target_ulong val); +static int read_htimedeltah(CPURISCVState *env, int csrno, target_ulong *val); +static int write_htimedeltah(CPURISCVState *env, int csrno, target_ulong val); +#endif + +/* CSR function table constants */ +enum { + CSR_TABLE_SIZE = 0x1000 +}; + +/* Control and Status Register function table */ +static riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { + /* User Floating-Point CSRs */ + [CSR_FFLAGS] = { fs, read_fflags, write_fflags }, + [CSR_FRM] = { fs, read_frm, write_frm }, + [CSR_FCSR] = { fs, read_fcsr, write_fcsr }, + + /* User Timers and Counters */ + [CSR_CYCLE] = { ctr, read_instret }, + [CSR_INSTRET] = { ctr, read_instret }, +#if defined(TARGET_RISCV32) + [CSR_CYCLEH] = { ctr, read_instreth }, + [CSR_INSTRETH] = { ctr, read_instreth }, +#endif + + /* In privileged mode, the monitor will have to emulate TIME CSRs only if + * rdtime callback is not provided by machine/platform emulation */ + [CSR_TIME] = { ctr, read_time }, +#if defined(TARGET_RISCV32) + [CSR_TIMEH] = { ctr, read_timeh }, +#endif + + /* Machine Timers and Counters */ + [CSR_MCYCLE] = { any, read_instret }, + [CSR_MINSTRET] = { any, read_instret }, +#if defined(TARGET_RISCV32) + [CSR_MCYCLEH] = { any, read_instreth }, + [CSR_MINSTRETH] = { any, read_instreth }, +#endif + + /* Machine Information Registers */ + [CSR_MVENDORID] = { any, read_zero }, + [CSR_MARCHID] = { any, read_zero }, + [CSR_MIMPID] = { any, read_zero }, + [CSR_MHARTID] = { any, read_mhartid }, + + /* Machine Trap Setup */ + [CSR_MSTATUS] = { any, read_mstatus, write_mstatus }, + [CSR_MISA] = { any, read_misa, write_misa }, + [CSR_MIDELEG] = { any, read_mideleg, write_mideleg }, + [CSR_MEDELEG] = { any, read_medeleg, write_medeleg }, + [CSR_MIE] = { any, read_mie, write_mie }, + [CSR_MTVEC] = { any, read_mtvec, write_mtvec }, + [CSR_MCOUNTEREN] = { any, read_mcounteren, write_mcounteren }, + +#if defined(TARGET_RISCV32) + [CSR_MSTATUSH] = { any, read_mstatush, write_mstatush }, +#endif + + /* Legacy Counter Setup (priv v1.9.1) */ + [CSR_MUCOUNTEREN] = { any, read_mucounteren, write_mucounteren }, + [CSR_MSCOUNTEREN] = { any, read_mscounteren, write_mscounteren }, + + /* Machine Trap Handling */ + [CSR_MSCRATCH] = { any, read_mscratch, write_mscratch }, + [CSR_MEPC] = { any, read_mepc, write_mepc }, + [CSR_MCAUSE] = { any, read_mcause, write_mcause }, + [CSR_MBADADDR] = { any, read_mbadaddr, write_mbadaddr }, + [CSR_MIP] = { any, NULL, NULL, rmw_mip }, + + /* Supervisor Trap Setup */ + [CSR_SSTATUS] = { smode, read_sstatus, write_sstatus }, + [CSR_SIE] = { smode, read_sie, write_sie }, + [CSR_STVEC] = { smode, read_stvec, write_stvec }, + [CSR_SCOUNTEREN] = { smode, read_scounteren, write_scounteren }, + + /* Supervisor Trap Handling */ + [CSR_SSCRATCH] = { smode, read_sscratch, write_sscratch }, + [CSR_SEPC] = { smode, read_sepc, write_sepc }, + [CSR_SCAUSE] = { smode, read_scause, write_scause }, + [CSR_SBADADDR] = { smode, read_sbadaddr, write_sbadaddr }, + [CSR_SIP] = { smode, NULL, NULL, rmw_sip }, + + /* Supervisor Protection and Translation */ + [CSR_SATP] = { smode, read_satp, write_satp }, + + [CSR_HSTATUS] = { hmode, read_hstatus, write_hstatus }, + [CSR_HEDELEG] = { hmode, read_hedeleg, write_hedeleg }, + [CSR_HIDELEG] = { hmode, read_hideleg, write_hideleg }, + [CSR_HIP] = { hmode, NULL, NULL, rmw_hip }, + [CSR_HIE] = { hmode, read_hie, write_hie }, + [CSR_HCOUNTEREN] = { hmode, read_hcounteren, write_hcounteren }, + [CSR_HTVAL] = { hmode, read_htval, write_htval }, + [CSR_HTINST] = { hmode, read_htinst, write_htinst }, + [CSR_HGATP] = { hmode, read_hgatp, write_hgatp }, + [CSR_HTIMEDELTA] = { hmode, read_htimedelta, write_htimedelta }, +#if defined(TARGET_RISCV32) + [CSR_HTIMEDELTAH] = { hmode, read_htimedeltah, write_htimedeltah}, +#endif + + [CSR_VSSTATUS] = { hmode, read_vsstatus, write_vsstatus }, + [CSR_VSIP] = { hmode, NULL, NULL, rmw_vsip }, + [CSR_VSIE] = { hmode, read_vsie, write_vsie }, + [CSR_VSTVEC] = { hmode, read_vstvec, write_vstvec }, + [CSR_VSSCRATCH] = { hmode, read_vsscratch, write_vsscratch }, + [CSR_VSEPC] = { hmode, read_vsepc, write_vsepc }, + [CSR_VSCAUSE] = { hmode, read_vscause, write_vscause }, + [CSR_VSTVAL] = { hmode, read_vstval, write_vstval }, + [CSR_VSATP] = { hmode, read_vsatp, write_vsatp }, + + [CSR_MTVAL2] = { hmode, read_mtval2, write_mtval2 }, + [CSR_MTINST] = { hmode, read_mtinst, write_mtinst }, + + /* Physical Memory Protection */ + [CSR_PMPCFG0] = { pmp, read_pmpcfg, write_pmpcfg }, + [CSR_PMPCFG1] = { pmp, read_pmpcfg, write_pmpcfg }, + [CSR_PMPCFG2] = { pmp, read_pmpcfg, write_pmpcfg }, + [CSR_PMPCFG3] = { pmp, read_pmpcfg, write_pmpcfg }, + + [CSR_PMPADDR0] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR1] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR2] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR3] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR4] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR5] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR6] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR7] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR8] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR9] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR10] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR11] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR12] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR13] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR14] = { pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR15] = { pmp, read_pmpaddr, write_pmpaddr }, + + /* Performance Counters */ + [CSR_HPMCOUNTER3] = { ctr, read_zero }, + [CSR_HPMCOUNTER4] = { ctr, read_zero }, + [CSR_HPMCOUNTER5] = { ctr, read_zero }, + [CSR_HPMCOUNTER6] = { ctr, read_zero }, + [CSR_HPMCOUNTER7] = { ctr, read_zero }, + [CSR_HPMCOUNTER8] = { ctr, read_zero }, + [CSR_HPMCOUNTER9] = { ctr, read_zero }, + [CSR_HPMCOUNTER10] = { ctr, read_zero }, + [CSR_HPMCOUNTER11] = { ctr, read_zero }, + [CSR_HPMCOUNTER12] = { ctr, read_zero }, + [CSR_HPMCOUNTER13] = { ctr, read_zero }, + [CSR_HPMCOUNTER14] = { ctr, read_zero }, + [CSR_HPMCOUNTER15] = { ctr, read_zero }, + [CSR_HPMCOUNTER16] = { ctr, read_zero }, + [CSR_HPMCOUNTER17] = { ctr, read_zero }, + [CSR_HPMCOUNTER18] = { ctr, read_zero }, + [CSR_HPMCOUNTER19] = { ctr, read_zero }, + [CSR_HPMCOUNTER20] = { ctr, read_zero }, + [CSR_HPMCOUNTER21] = { ctr, read_zero }, + [CSR_HPMCOUNTER22] = { ctr, read_zero }, + [CSR_HPMCOUNTER23] = { ctr, read_zero }, + [CSR_HPMCOUNTER24] = { ctr, read_zero }, + [CSR_HPMCOUNTER25] = { ctr, read_zero }, + [CSR_HPMCOUNTER26] = { ctr, read_zero }, + [CSR_HPMCOUNTER27] = { ctr, read_zero }, + [CSR_HPMCOUNTER28] = { ctr, read_zero }, + [CSR_HPMCOUNTER29] = { ctr, read_zero }, + [CSR_HPMCOUNTER30] = { ctr, read_zero }, + [CSR_HPMCOUNTER31] = { ctr, read_zero }, + + [CSR_MHPMCOUNTER3] = { any, read_zero }, + [CSR_MHPMCOUNTER4] = { any, read_zero }, + [CSR_MHPMCOUNTER5] = { any, read_zero }, + [CSR_MHPMCOUNTER6] = { any, read_zero }, + [CSR_MHPMCOUNTER7] = { any, read_zero }, + [CSR_MHPMCOUNTER8] = { any, read_zero }, + [CSR_MHPMCOUNTER9] = { any, read_zero }, + [CSR_MHPMCOUNTER10] = { any, read_zero }, + [CSR_MHPMCOUNTER11] = { any, read_zero }, + [CSR_MHPMCOUNTER12] = { any, read_zero }, + [CSR_MHPMCOUNTER13] = { any, read_zero }, + [CSR_MHPMCOUNTER14] = { any, read_zero }, + [CSR_MHPMCOUNTER15] = { any, read_zero }, + [CSR_MHPMCOUNTER16] = { any, read_zero }, + [CSR_MHPMCOUNTER17] = { any, read_zero }, + [CSR_MHPMCOUNTER18] = { any, read_zero }, + [CSR_MHPMCOUNTER19] = { any, read_zero }, + [CSR_MHPMCOUNTER20] = { any, read_zero }, + [CSR_MHPMCOUNTER21] = { any, read_zero }, + [CSR_MHPMCOUNTER22] = { any, read_zero }, + [CSR_MHPMCOUNTER23] = { any, read_zero }, + [CSR_MHPMCOUNTER24] = { any, read_zero }, + [CSR_MHPMCOUNTER25] = { any, read_zero }, + [CSR_MHPMCOUNTER26] = { any, read_zero }, + [CSR_MHPMCOUNTER27] = { any, read_zero }, + [CSR_MHPMCOUNTER28] = { any, read_zero }, + [CSR_MHPMCOUNTER29] = { any, read_zero }, + [CSR_MHPMCOUNTER30] = { any, read_zero }, + [CSR_MHPMCOUNTER31] = { any, read_zero }, + + [CSR_MHPMEVENT3] = { any, read_zero }, + [CSR_MHPMEVENT4] = { any, read_zero }, + [CSR_MHPMEVENT5] = { any, read_zero }, + [CSR_MHPMEVENT6] = { any, read_zero }, + [CSR_MHPMEVENT7] = { any, read_zero }, + [CSR_MHPMEVENT8] = { any, read_zero }, + [CSR_MHPMEVENT9] = { any, read_zero }, + [CSR_MHPMEVENT10] = { any, read_zero }, + [CSR_MHPMEVENT11] = { any, read_zero }, + [CSR_MHPMEVENT12] = { any, read_zero }, + [CSR_MHPMEVENT13] = { any, read_zero }, + [CSR_MHPMEVENT14] = { any, read_zero }, + [CSR_MHPMEVENT15] = { any, read_zero }, + [CSR_MHPMEVENT16] = { any, read_zero }, + [CSR_MHPMEVENT17] = { any, read_zero }, + [CSR_MHPMEVENT18] = { any, read_zero }, + [CSR_MHPMEVENT19] = { any, read_zero }, + [CSR_MHPMEVENT20] = { any, read_zero }, + [CSR_MHPMEVENT21] = { any, read_zero }, + [CSR_MHPMEVENT22] = { any, read_zero }, + [CSR_MHPMEVENT23] = { any, read_zero }, + [CSR_MHPMEVENT24] = { any, read_zero }, + [CSR_MHPMEVENT25] = { any, read_zero }, + [CSR_MHPMEVENT26] = { any, read_zero }, + [CSR_MHPMEVENT27] = { any, read_zero }, + [CSR_MHPMEVENT28] = { any, read_zero }, + [CSR_MHPMEVENT29] = { any, read_zero }, + [CSR_MHPMEVENT30] = { any, read_zero }, + [CSR_MHPMEVENT31] = { any, read_zero }, + +#if defined(TARGET_RISCV32) + [CSR_HPMCOUNTER3H] = { ctr, read_zero }, + [CSR_HPMCOUNTER4H] = { ctr, read_zero }, + [CSR_HPMCOUNTER5H] = { ctr, read_zero }, + [CSR_HPMCOUNTER6H] = { ctr, read_zero }, + [CSR_HPMCOUNTER7H] = { ctr, read_zero }, + [CSR_HPMCOUNTER8H] = { ctr, read_zero }, + [CSR_HPMCOUNTER9H] = { ctr, read_zero }, + [CSR_HPMCOUNTER10H] = { ctr, read_zero }, + [CSR_HPMCOUNTER11H] = { ctr, read_zero }, + [CSR_HPMCOUNTER12H] = { ctr, read_zero }, + [CSR_HPMCOUNTER13H] = { ctr, read_zero }, + [CSR_HPMCOUNTER14H] = { ctr, read_zero }, + [CSR_HPMCOUNTER15H] = { ctr, read_zero }, + [CSR_HPMCOUNTER16H] = { ctr, read_zero }, + [CSR_HPMCOUNTER17H] = { ctr, read_zero }, + [CSR_HPMCOUNTER18H] = { ctr, read_zero }, + [CSR_HPMCOUNTER19H] = { ctr, read_zero }, + [CSR_HPMCOUNTER20H] = { ctr, read_zero }, + [CSR_HPMCOUNTER21H] = { ctr, read_zero }, + [CSR_HPMCOUNTER22H] = { ctr, read_zero }, + [CSR_HPMCOUNTER23H] = { ctr, read_zero }, + [CSR_HPMCOUNTER24H] = { ctr, read_zero }, + [CSR_HPMCOUNTER25H] = { ctr, read_zero }, + [CSR_HPMCOUNTER26H] = { ctr, read_zero }, + [CSR_HPMCOUNTER27H] = { ctr, read_zero }, + [CSR_HPMCOUNTER28H] = { ctr, read_zero }, + [CSR_HPMCOUNTER29H] = { ctr, read_zero }, + [CSR_HPMCOUNTER30H] = { ctr, read_zero }, + [CSR_HPMCOUNTER31H] = { ctr, read_zero }, + + [CSR_MHPMCOUNTER3H] = { any, read_zero }, + [CSR_MHPMCOUNTER4H] = { any, read_zero }, + [CSR_MHPMCOUNTER5H] = { any, read_zero }, + [CSR_MHPMCOUNTER6H] = { any, read_zero }, + [CSR_MHPMCOUNTER7H] = { any, read_zero }, + [CSR_MHPMCOUNTER8H] = { any, read_zero }, + [CSR_MHPMCOUNTER9H] = { any, read_zero }, + [CSR_MHPMCOUNTER10H] = { any, read_zero }, + [CSR_MHPMCOUNTER11H] = { any, read_zero }, + [CSR_MHPMCOUNTER12H] = { any, read_zero }, + [CSR_MHPMCOUNTER13H] = { any, read_zero }, + [CSR_MHPMCOUNTER14H] = { any, read_zero }, + [CSR_MHPMCOUNTER15H] = { any, read_zero }, + [CSR_MHPMCOUNTER16H] = { any, read_zero }, + [CSR_MHPMCOUNTER17H] = { any, read_zero }, + [CSR_MHPMCOUNTER18H] = { any, read_zero }, + [CSR_MHPMCOUNTER19H] = { any, read_zero }, + [CSR_MHPMCOUNTER20H] = { any, read_zero }, + [CSR_MHPMCOUNTER21H] = { any, read_zero }, + [CSR_MHPMCOUNTER22H] = { any, read_zero }, + [CSR_MHPMCOUNTER23H] = { any, read_zero }, + [CSR_MHPMCOUNTER24H] = { any, read_zero }, + [CSR_MHPMCOUNTER25H] = { any, read_zero }, + [CSR_MHPMCOUNTER26H] = { any, read_zero }, + [CSR_MHPMCOUNTER27H] = { any, read_zero }, + [CSR_MHPMCOUNTER28H] = { any, read_zero }, + [CSR_MHPMCOUNTER29H] = { any, read_zero }, + [CSR_MHPMCOUNTER30H] = { any, read_zero }, + [CSR_MHPMCOUNTER31H] = { any, read_zero }, +#endif +}; + +/* CSR function table public API */ +void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) +{ + *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; +} + +void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) +{ + csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; +} + +/* Predicates */ +static int fs(CPURISCVState *env, int csrno) +{ + if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + return -1; + } + return 0; +} + +static int ctr(CPURISCVState *env, int csrno) +{ + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); + uint32_t ctr_en = ~0u; + + if (!cpu->cfg.ext_counters) { + /* The Counters extensions is not enabled */ + return -1; + } + + /* + * The counters are always enabled at run time on newer priv specs, as the + * CSR has changed from controlling that the counters can be read to + * controlling that the counters increment. + */ + if (env->priv_ver > PRIV_VERSION_1_09_1) { + return 0; + } + + if (env->priv < PRV_M) { + ctr_en &= env->mcounteren; + } + if (env->priv < PRV_S) { + ctr_en &= env->scounteren; + } + if (!(ctr_en & (1u << (csrno & 31)))) { + return -1; + } + return 0; +} + +static int any(CPURISCVState *env, int csrno) +{ + return 0; +} + +static int smode(CPURISCVState *env, int csrno) +{ + return -!riscv_has_ext(env, RVS); +} + +static int hmode(CPURISCVState *env, int csrno) +{ + if (riscv_has_ext(env, RVS) && + riscv_has_ext(env, RVH)) { + /* Hypervisor extension is supported */ + if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || + env->priv == PRV_M) { + return 0; + } + } + + return -1; +} + +static int pmp(CPURISCVState *env, int csrno) +{ + return -!riscv_feature(env, RISCV_FEATURE_PMP); +} + +/* User Floating-Point CSRs */ +static int read_fflags(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + return -1; + } + *val = riscv_cpu_get_fflags(env); + return 0; +} + +static int write_fflags(CPURISCVState *env, int csrno, target_ulong val) +{ + if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + return -1; + } + env->mstatus |= MSTATUS_FS; + riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT)); + return 0; +} + +static int read_frm(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + return -1; + } + *val = env->frm; + return 0; +} + +static int write_frm(CPURISCVState *env, int csrno, target_ulong val) +{ + if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + return -1; + } + env->mstatus |= MSTATUS_FS; + env->frm = val & (FSR_RD >> FSR_RD_SHIFT); + return 0; +} + +static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + return -1; + } + *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) + | (env->frm << FSR_RD_SHIFT); + return 0; +} + +static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val) +{ + if (!env->debugger && !riscv_cpu_fp_enabled(env)) { + return -1; + } + env->mstatus |= MSTATUS_FS; + env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; + riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); + return 0; +} + +/* User Timers and Counters */ +static int read_instret(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = cpu_get_host_ticks(); + + return 0; +} + +#if defined(TARGET_RISCV32) +static int read_instreth(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = cpu_get_host_ticks() >> 32; + + return 0; +} +#endif /* TARGET_RISCV32 */ + +static int read_time(CPURISCVState *env, int csrno, target_ulong *val) +{ + uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; + + if (!env->rdtime_fn) { + return -1; + } + + *val = env->rdtime_fn() + delta; + return 0; +} + +#if defined(TARGET_RISCV32) +static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val) +{ + uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0; + + if (!env->rdtime_fn) { + return -1; + } + + *val = (env->rdtime_fn() + delta) >> 32; + return 0; +} +#endif + +/* Machine constants */ + +#define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP) +#define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP) +#define VS_MODE_INTERRUPTS (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) + +static const target_ulong delegable_ints = S_MODE_INTERRUPTS | + VS_MODE_INTERRUPTS; +static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | + VS_MODE_INTERRUPTS; +static const target_ulong delegable_excps = + (1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | + (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | + (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | + (1ULL << (RISCV_EXCP_BREAKPOINT)) | + (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | + (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | + (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | + (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | + (1ULL << (RISCV_EXCP_U_ECALL)) | + (1ULL << (RISCV_EXCP_S_ECALL)) | + (1ULL << (RISCV_EXCP_VS_ECALL)) | + (1ULL << (RISCV_EXCP_M_ECALL)) | + (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | + (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | + (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | + (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | + (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | + (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)); +static const target_ulong sstatus_v1_9_mask = SSTATUS_SIE | SSTATUS_SPIE | + SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | + SSTATUS_SUM | SSTATUS_SD; +static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE | + SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS | + SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD; +static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP; +static const target_ulong hip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; +static const target_ulong vsip_writable_mask = MIP_VSSIP; + +#if defined(TARGET_RISCV32) +static const char valid_vm_1_09[16] = { + [VM_1_09_MBARE] = 1, + [VM_1_09_SV32] = 1, +}; +static const char valid_vm_1_10[16] = { + [VM_1_10_MBARE] = 1, + [VM_1_10_SV32] = 1 +}; +#elif defined(TARGET_RISCV64) +static const char valid_vm_1_09[16] = { + [VM_1_09_MBARE] = 1, + [VM_1_09_SV39] = 1, + [VM_1_09_SV48] = 1, +}; +static const char valid_vm_1_10[16] = { + [VM_1_10_MBARE] = 1, + [VM_1_10_SV39] = 1, + [VM_1_10_SV48] = 1, + [VM_1_10_SV57] = 1 +}; +#endif + +/* Machine Information Registers */ +static int read_zero(CPURISCVState *env, int csrno, target_ulong *val) +{ + return *val = 0; +} + +static int read_mhartid(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mhartid; + return 0; +} + +/* Machine Trap Setup */ +static int read_mstatus(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mstatus; + return 0; +} + +static int validate_vm(CPURISCVState *env, target_ulong vm) +{ + return (env->priv_ver >= PRIV_VERSION_1_10_0) ? + valid_vm_1_10[vm & 0xf] : valid_vm_1_09[vm & 0xf]; +} + +static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val) +{ + target_ulong mstatus = env->mstatus; + target_ulong mask = 0; + int dirty; + + /* flush tlb on mstatus fields that affect VM */ + if (env->priv_ver <= PRIV_VERSION_1_09_1) { + if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | + MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) { + tlb_flush(env_cpu(env)); + } + mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | + MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | + MSTATUS_MPP | MSTATUS_MXR | + (validate_vm(env, get_field(val, MSTATUS_VM)) ? + MSTATUS_VM : 0); + } + if (env->priv_ver >= PRIV_VERSION_1_10_0) { + if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | + MSTATUS_MPRV | MSTATUS_SUM)) { + tlb_flush(env_cpu(env)); + } + mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | + MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | + MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | + MSTATUS_TW; +#if defined(TARGET_RISCV64) + /* + * RV32: MPV and MTL are not in mstatus. The current plan is to + * add them to mstatush. For now, we just don't support it. + */ + mask |= MSTATUS_MTL | MSTATUS_MPV; +#endif + } + + mstatus = (mstatus & ~mask) | (val & mask); + + dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) | + ((mstatus & MSTATUS_XS) == MSTATUS_XS); + mstatus = set_field(mstatus, MSTATUS_SD, dirty); + env->mstatus = mstatus; + + return 0; +} + +#ifdef TARGET_RISCV32 +static int read_mstatush(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mstatush; + return 0; +} + +static int write_mstatush(CPURISCVState *env, int csrno, target_ulong val) +{ + if ((val ^ env->mstatush) & (MSTATUS_MPV)) { + tlb_flush(env_cpu(env)); + } + + val &= MSTATUS_MPV | MSTATUS_MTL; + + env->mstatush = val; + + return 0; +} +#endif + +static int read_misa(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->misa; + return 0; +} + +static int write_misa(CPURISCVState *env, int csrno, target_ulong val) +{ + if (!riscv_feature(env, RISCV_FEATURE_MISA)) { + /* drop write to misa */ + return 0; + } + + /* 'I' or 'E' must be present */ + if (!(val & (RVI | RVE))) { + /* It is not, drop write to misa */ + return 0; + } + + /* 'E' excludes all other extensions */ + if (val & RVE) { + /* when we support 'E' we can do "val = RVE;" however + * for now we just drop writes if 'E' is present. + */ + return 0; + } + + /* Mask extensions that are not supported by this hart */ + val &= env->misa_mask; + + /* Mask extensions that are not supported by QEMU */ + val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + + /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ + if ((val & RVD) && !(val & RVF)) { + val &= ~RVD; + } + + /* Suppress 'C' if next instruction is not aligned + * TODO: this should check next_pc + */ + if ((val & RVC) && (GETPC() & ~3) != 0) { + val &= ~RVC; + } + + /* misa.MXL writes are not supported by QEMU */ + val = (env->misa & MISA_MXL) | (val & ~MISA_MXL); + + /* flush translation cache */ + if (val != env->misa) { + tb_flush(env_cpu(env)); + } + + env->misa = val; + + return 0; +} + +static int read_medeleg(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->medeleg; + return 0; +} + +static int write_medeleg(CPURISCVState *env, int csrno, target_ulong val) +{ + env->medeleg = (env->medeleg & ~delegable_excps) | (val & delegable_excps); + return 0; +} + +static int read_mideleg(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mideleg; + return 0; +} + +static int write_mideleg(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints); + if (riscv_has_ext(env, RVH)) { + env->mideleg |= VS_MODE_INTERRUPTS; + } + return 0; +} + +static int read_mie(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mie; + return 0; +} + +static int write_mie(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mie = (env->mie & ~all_ints) | (val & all_ints); + return 0; +} + +static int read_mtvec(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mtvec; + return 0; +} + +static int write_mtvec(CPURISCVState *env, int csrno, target_ulong val) +{ + /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ + if ((val & 3) < 2) { + env->mtvec = val; + } else { + qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n"); + } + return 0; +} + +static int read_mcounteren(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (env->priv_ver < PRIV_VERSION_1_10_0) { + return -1; + } + *val = env->mcounteren; + return 0; +} + +static int write_mcounteren(CPURISCVState *env, int csrno, target_ulong val) +{ + if (env->priv_ver < PRIV_VERSION_1_10_0) { + return -1; + } + env->mcounteren = val; + return 0; +} + +/* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ +static int read_mscounteren(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (env->priv_ver > PRIV_VERSION_1_09_1 + && env->priv_ver < PRIV_VERSION_1_11_0) { + return -1; + } + *val = env->mcounteren; + return 0; +} + +/* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ +static int write_mscounteren(CPURISCVState *env, int csrno, target_ulong val) +{ + if (env->priv_ver > PRIV_VERSION_1_09_1 + && env->priv_ver < PRIV_VERSION_1_11_0) { + return -1; + } + env->mcounteren = val; + return 0; +} + +static int read_mucounteren(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (env->priv_ver > PRIV_VERSION_1_09_1) { + return -1; + } + *val = env->scounteren; + return 0; +} + +static int write_mucounteren(CPURISCVState *env, int csrno, target_ulong val) +{ + if (env->priv_ver > PRIV_VERSION_1_09_1) { + return -1; + } + env->scounteren = val; + return 0; +} + +/* Machine Trap Handling */ +static int read_mscratch(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mscratch; + return 0; +} + +static int write_mscratch(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mscratch = val; + return 0; +} + +static int read_mepc(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mepc; + return 0; +} + +static int write_mepc(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mepc = val; + return 0; +} + +static int read_mcause(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mcause; + return 0; +} + +static int write_mcause(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mcause = val; + return 0; +} + +static int read_mbadaddr(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mbadaddr; + return 0; +} + +static int write_mbadaddr(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mbadaddr = val; + return 0; +} + +static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) +{ + RISCVCPU *cpu = env_archcpu(env); + /* Allow software control of delegable interrupts not claimed by hardware */ + target_ulong mask = write_mask & delegable_ints & ~env->miclaim; + uint32_t old_mip; + + if (mask) { + old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask)); + } else { + old_mip = env->mip; + } + + if (ret_value) { + *ret_value = old_mip; + } + + return 0; +} + +/* Supervisor Trap Setup */ +static int read_sstatus(CPURISCVState *env, int csrno, target_ulong *val) +{ + target_ulong mask = ((env->priv_ver >= PRIV_VERSION_1_10_0) ? + sstatus_v1_10_mask : sstatus_v1_9_mask); + *val = env->mstatus & mask; + return 0; +} + +static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val) +{ + target_ulong mask = ((env->priv_ver >= PRIV_VERSION_1_10_0) ? + sstatus_v1_10_mask : sstatus_v1_9_mask); + target_ulong newval = (env->mstatus & ~mask) | (val & mask); + return write_mstatus(env, CSR_MSTATUS, newval); +} + +static int read_sie(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (riscv_cpu_virt_enabled(env)) { + /* Tell the guest the VS bits, shifted to the S bit locations */ + *val = (env->mie & env->mideleg & VS_MODE_INTERRUPTS) >> 1; + } else { + *val = env->mie & env->mideleg; + } + return 0; +} + +static int write_sie(CPURISCVState *env, int csrno, target_ulong val) +{ + target_ulong newval; + + if (riscv_cpu_virt_enabled(env)) { + /* Shift the guests S bits to VS */ + newval = (env->mie & ~VS_MODE_INTERRUPTS) | + ((val << 1) & VS_MODE_INTERRUPTS); + } else { + newval = (env->mie & ~S_MODE_INTERRUPTS) | (val & S_MODE_INTERRUPTS); + } + + return write_mie(env, CSR_MIE, newval); +} + +static int read_stvec(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->stvec; + return 0; +} + +static int write_stvec(CPURISCVState *env, int csrno, target_ulong val) +{ + /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ + if ((val & 3) < 2) { + env->stvec = val; + } else { + qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n"); + } + return 0; +} + +static int read_scounteren(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (env->priv_ver < PRIV_VERSION_1_10_0) { + return -1; + } + *val = env->scounteren; + return 0; +} + +static int write_scounteren(CPURISCVState *env, int csrno, target_ulong val) +{ + if (env->priv_ver < PRIV_VERSION_1_10_0) { + return -1; + } + env->scounteren = val; + return 0; +} + +/* Supervisor Trap Handling */ +static int read_sscratch(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->sscratch; + return 0; +} + +static int write_sscratch(CPURISCVState *env, int csrno, target_ulong val) +{ + env->sscratch = val; + return 0; +} + +static int read_sepc(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->sepc; + return 0; +} + +static int write_sepc(CPURISCVState *env, int csrno, target_ulong val) +{ + env->sepc = val; + return 0; +} + +static int read_scause(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->scause; + return 0; +} + +static int write_scause(CPURISCVState *env, int csrno, target_ulong val) +{ + env->scause = val; + return 0; +} + +static int read_sbadaddr(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->sbadaddr; + return 0; +} + +static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val) +{ + env->sbadaddr = val; + return 0; +} + +static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) +{ + int ret; + + if (riscv_cpu_virt_enabled(env)) { + /* Shift the new values to line up with the VS bits */ + ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value << 1, + (write_mask & sip_writable_mask) << 1 & env->mideleg); + ret &= vsip_writable_mask; + ret >>= 1; + } else { + ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value, + write_mask & env->mideleg & sip_writable_mask); + } + + *ret_value &= env->mideleg; + return ret; +} + +/* Supervisor Protection and Translation */ +static int read_satp(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + *val = 0; + } else if (env->priv_ver >= PRIV_VERSION_1_10_0) { + if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { + return -1; + } else { + *val = env->satp; + } + } else { + *val = env->sptbr; + } + return 0; +} + +static int write_satp(CPURISCVState *env, int csrno, target_ulong val) +{ + if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + return 0; + } + if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val ^ env->sptbr)) { + tlb_flush(env_cpu(env)); + env->sptbr = val & (((target_ulong) + 1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1); + } + if (env->priv_ver >= PRIV_VERSION_1_10_0 && + validate_vm(env, get_field(val, SATP_MODE)) && + ((val ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN))) + { + if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) { + return -1; + } else { + if((val ^ env->satp) & SATP_ASID) { + tlb_flush(env_cpu(env)); + } + env->satp = val; + } + } + return 0; +} + +/* Hypervisor Extensions */ +static int read_hstatus(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hstatus; + return 0; +} + +static int write_hstatus(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hstatus = val; + return 0; +} + +static int read_hedeleg(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hedeleg; + return 0; +} + +static int write_hedeleg(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hedeleg = val; + return 0; +} + +static int read_hideleg(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hideleg; + return 0; +} + +static int write_hideleg(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hideleg = val; + return 0; +} + +static int rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) +{ + int ret = rmw_mip(env, 0, ret_value, new_value, + write_mask & hip_writable_mask); + + return ret; +} + +static int read_hie(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mie & VS_MODE_INTERRUPTS; + return 0; +} + +static int write_hie(CPURISCVState *env, int csrno, target_ulong val) +{ + target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | (val & VS_MODE_INTERRUPTS); + return write_mie(env, CSR_MIE, newval); +} + +static int read_hcounteren(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hcounteren; + return 0; +} + +static int write_hcounteren(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hcounteren = val; + return 0; +} + +static int read_htval(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->htval; + return 0; +} + +static int write_htval(CPURISCVState *env, int csrno, target_ulong val) +{ + env->htval = val; + return 0; +} + +static int read_htinst(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->htinst; + return 0; +} + +static int write_htinst(CPURISCVState *env, int csrno, target_ulong val) +{ + env->htinst = val; + return 0; +} + +static int read_hgatp(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hgatp; + return 0; +} + +static int write_hgatp(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hgatp = val; + return 0; +} + +static int read_htimedelta(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (!env->rdtime_fn) { + return -1; + } + +#if defined(TARGET_RISCV32) + *val = env->htimedelta & 0xffffffff; +#else + *val = env->htimedelta; +#endif + return 0; +} + +static int write_htimedelta(CPURISCVState *env, int csrno, target_ulong val) +{ + if (!env->rdtime_fn) { + return -1; + } + +#if defined(TARGET_RISCV32) + env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); +#else + env->htimedelta = val; +#endif + return 0; +} + +#if defined(TARGET_RISCV32) +static int read_htimedeltah(CPURISCVState *env, int csrno, target_ulong *val) +{ + if (!env->rdtime_fn) { + return -1; + } + + *val = env->htimedelta >> 32; + return 0; +} + +static int write_htimedeltah(CPURISCVState *env, int csrno, target_ulong val) +{ + if (!env->rdtime_fn) { + return -1; + } + + env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val); + return 0; +} +#endif + +/* Virtual CSR Registers */ +static int read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vsstatus; + return 0; +} + +static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vsstatus = val; + return 0; +} + +static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) +{ + int ret = rmw_mip(env, 0, ret_value, new_value, + write_mask & env->mideleg & vsip_writable_mask); + return ret; +} + +static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mie & env->mideleg & VS_MODE_INTERRUPTS; + return 0; +} + +static int write_vsie(CPURISCVState *env, int csrno, target_ulong val) +{ + target_ulong newval = (env->mie & ~env->mideleg) | (val & env->mideleg & MIP_VSSIP); + return write_mie(env, CSR_MIE, newval); +} + +static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vstvec; + return 0; +} + +static int write_vstvec(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vstvec = val; + return 0; +} + +static int read_vsscratch(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vsscratch; + return 0; +} + +static int write_vsscratch(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vsscratch = val; + return 0; +} + +static int read_vsepc(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vsepc; + return 0; +} + +static int write_vsepc(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vsepc = val; + return 0; +} + +static int read_vscause(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vscause; + return 0; +} + +static int write_vscause(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vscause = val; + return 0; +} + +static int read_vstval(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vstval; + return 0; +} + +static int write_vstval(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vstval = val; + return 0; +} + +static int read_vsatp(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vsatp; + return 0; +} + +static int write_vsatp(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vsatp = val; + return 0; +} + +static int read_mtval2(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mtval2; + return 0; +} + +static int write_mtval2(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mtval2 = val; + return 0; +} + +static int read_mtinst(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->mtinst; + return 0; +} + +static int write_mtinst(CPURISCVState *env, int csrno, target_ulong val) +{ + env->mtinst = val; + return 0; +} + +/* Physical Memory Protection */ +static int read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); + return 0; +} + +static int write_pmpcfg(CPURISCVState *env, int csrno, target_ulong val) +{ + pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); + return 0; +} + +static int read_pmpaddr(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0); + return 0; +} + +static int write_pmpaddr(CPURISCVState *env, int csrno, target_ulong val) +{ + pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); + return 0; +} + +/* + * riscv_csrrw - read and/or update control and status register + * + * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0); + * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1); + * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value); + * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value); + */ + +int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) +{ + int ret; + target_ulong old_value; + RISCVCPU *cpu = env_archcpu(env); + + /* check privileges and return -1 if check fails */ + int effective_priv = env->priv; + int read_only = get_field(csrno, 0xC00) == 3; + + if (riscv_has_ext(env, RVH) && + env->priv == PRV_S && + !riscv_cpu_virt_enabled(env)) { + /* + * We are in S mode without virtualisation, therefore we are in HS Mode. + * Add 1 to the effective privledge level to allow us to access the + * Hypervisor CSRs. + */ + effective_priv++; + } + + if ((write_mask && read_only) || + (!env->debugger && (effective_priv < get_field(csrno, 0x300)))) { + return -1; + } + + /* ensure the CSR extension is enabled. */ + if (!cpu->cfg.ext_icsr) { + return -1; + } + + /* check predicate */ + if (!csr_ops[csrno].predicate || csr_ops[csrno].predicate(env, csrno) < 0) { + return -1; + } + + /* execute combined read/write operation if it exists */ + if (csr_ops[csrno].op) { + return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask); + } + + /* if no accessor exists then return failure */ + if (!csr_ops[csrno].read) { + return -1; + } + + /* read old value */ + ret = csr_ops[csrno].read(env, csrno, &old_value); + if (ret < 0) { + return ret; + } + + /* write value if writable and write mask set, otherwise drop writes */ + if (write_mask) { + new_value = (old_value & ~write_mask) | (new_value & write_mask); + if (csr_ops[csrno].write) { + ret = csr_ops[csrno].write(env, csrno, new_value); + if (ret < 0) { + return ret; + } + } + } + + /* return old value */ + if (ret_value) { + *ret_value = old_value; + } + + return 0; +} + +/* + * Debugger support. If not in user mode, set env->debugger before the + * riscv_csrrw call and clear it after the call. + */ +int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, + target_ulong new_value, target_ulong write_mask) +{ + int ret; + env->debugger = true; + ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); + env->debugger = false; + return ret; +} + diff --git a/qemu/target/riscv/fpu_helper.c b/qemu/target/riscv/fpu_helper.c new file mode 100644 index 00000000..3fb6684b --- /dev/null +++ b/qemu/target/riscv/fpu_helper.c @@ -0,0 +1,371 @@ +/* + * RISC-V FPU Emulation Helpers for QEMU. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +target_ulong riscv_cpu_get_fflags(CPURISCVState *env) +{ + int soft = get_float_exception_flags(&env->fp_status); + target_ulong hard = 0; + + hard |= (soft & float_flag_inexact) ? FPEXC_NX : 0; + hard |= (soft & float_flag_underflow) ? FPEXC_UF : 0; + hard |= (soft & float_flag_overflow) ? FPEXC_OF : 0; + hard |= (soft & float_flag_divbyzero) ? FPEXC_DZ : 0; + hard |= (soft & float_flag_invalid) ? FPEXC_NV : 0; + + return hard; +} + +void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong hard) +{ + int soft = 0; + + soft |= (hard & FPEXC_NX) ? float_flag_inexact : 0; + soft |= (hard & FPEXC_UF) ? float_flag_underflow : 0; + soft |= (hard & FPEXC_OF) ? float_flag_overflow : 0; + soft |= (hard & FPEXC_DZ) ? float_flag_divbyzero : 0; + soft |= (hard & FPEXC_NV) ? float_flag_invalid : 0; + + set_float_exception_flags(soft, &env->fp_status); +} + +void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm) +{ + int softrm; + + if (rm == 7) { + rm = env->frm; + } + switch (rm) { + case 0: + softrm = float_round_nearest_even; + break; + case 1: + softrm = float_round_to_zero; + break; + case 2: + softrm = float_round_down; + break; + case 3: + softrm = float_round_up; + break; + case 4: + softrm = float_round_ties_away; + break; + default: + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + + set_float_rounding_mode(softrm, &env->fp_status); +} + +uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float32_muladd(frs1, frs2, frs3, 0, &env->fp_status); +} + +uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status); +} + +uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c, + &env->fp_status); +} + +uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c, + &env->fp_status); +} + +uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float32_muladd(frs1, frs2, frs3, float_muladd_negate_product, + &env->fp_status); +} + +uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float64_muladd(frs1, frs2, frs3, float_muladd_negate_product, + &env->fp_status); +} + +uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c | + float_muladd_negate_product, &env->fp_status); +} + +uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, + uint64_t frs3) +{ + return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c | + float_muladd_negate_product, &env->fp_status); +} + +uint64_t helper_fadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_add(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_sub(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fmul_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_mul(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_div(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fmin_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_minnum(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fmax_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_maxnum(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t frs1) +{ + return float32_sqrt(frs1, &env->fp_status); +} + +target_ulong helper_fle_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_le(frs1, frs2, &env->fp_status); +} + +target_ulong helper_flt_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_lt(frs1, frs2, &env->fp_status); +} + +target_ulong helper_feq_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float32_eq_quiet(frs1, frs2, &env->fp_status); +} + +target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t frs1) +{ + return float32_to_int32(frs1, &env->fp_status); +} + +target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t frs1) +{ + return (int32_t)float32_to_uint32(frs1, &env->fp_status); +} + +#if defined(TARGET_RISCV64) +uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t frs1) +{ + return float32_to_int64(frs1, &env->fp_status); +} + +uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t frs1) +{ + return float32_to_uint64(frs1, &env->fp_status); +} +#endif + +uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1) +{ + return int32_to_float32((int32_t)rs1, &env->fp_status); +} + +uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1) +{ + return uint32_to_float32((uint32_t)rs1, &env->fp_status); +} + +#if defined(TARGET_RISCV64) +uint64_t helper_fcvt_s_l(CPURISCVState *env, uint64_t rs1) +{ + return int64_to_float32(rs1, &env->fp_status); +} + +uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1) +{ + return uint64_to_float32(rs1, &env->fp_status); +} +#endif + +target_ulong helper_fclass_s(uint64_t frs1) +{ + float32 f = frs1; + bool sign = float32_is_neg(f); + + if (float32_is_infinity(f)) { + return sign ? 1 << 0 : 1 << 7; + } else if (float32_is_zero(f)) { + return sign ? 1 << 3 : 1 << 4; + } else if (float32_is_zero_or_denormal(f)) { + return sign ? 1 << 2 : 1 << 5; + } else if (float32_is_any_nan(f)) { + float_status s = { 0 }; /* for snan_bit_is_one */ + return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; + } else { + return sign ? 1 << 1 : 1 << 6; + } +} + +uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_add(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_sub(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fmul_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_mul(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_div(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_minnum(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_maxnum(frs1, frs2, &env->fp_status); +} + +uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1) +{ + return float64_to_float32(rs1, &env->fp_status); +} + +uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1) +{ + return float32_to_float64(rs1, &env->fp_status); +} + +uint64_t helper_fsqrt_d(CPURISCVState *env, uint64_t frs1) +{ + return float64_sqrt(frs1, &env->fp_status); +} + +target_ulong helper_fle_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_le(frs1, frs2, &env->fp_status); +} + +target_ulong helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_lt(frs1, frs2, &env->fp_status); +} + +target_ulong helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +{ + return float64_eq_quiet(frs1, frs2, &env->fp_status); +} + +target_ulong helper_fcvt_w_d(CPURISCVState *env, uint64_t frs1) +{ + return float64_to_int32(frs1, &env->fp_status); +} + +target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1) +{ + return (int32_t)float64_to_uint32(frs1, &env->fp_status); +} + +#if defined(TARGET_RISCV64) +uint64_t helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1) +{ + return float64_to_int64(frs1, &env->fp_status); +} + +uint64_t helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1) +{ + return float64_to_uint64(frs1, &env->fp_status); +} +#endif + +uint64_t helper_fcvt_d_w(CPURISCVState *env, target_ulong rs1) +{ + return int32_to_float64((int32_t)rs1, &env->fp_status); +} + +uint64_t helper_fcvt_d_wu(CPURISCVState *env, target_ulong rs1) +{ + return uint32_to_float64((uint32_t)rs1, &env->fp_status); +} + +#if defined(TARGET_RISCV64) +uint64_t helper_fcvt_d_l(CPURISCVState *env, uint64_t rs1) +{ + return int64_to_float64(rs1, &env->fp_status); +} + +uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t rs1) +{ + return uint64_to_float64(rs1, &env->fp_status); +} +#endif + +target_ulong helper_fclass_d(uint64_t frs1) +{ + float64 f = frs1; + bool sign = float64_is_neg(f); + + if (float64_is_infinity(f)) { + return sign ? 1 << 0 : 1 << 7; + } else if (float64_is_zero(f)) { + return sign ? 1 << 3 : 1 << 4; + } else if (float64_is_zero_or_denormal(f)) { + return sign ? 1 << 2 : 1 << 5; + } else if (float64_is_any_nan(f)) { + float_status s = { 0 }; /* for snan_bit_is_one */ + return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; + } else { + return sign ? 1 << 1 : 1 << 6; + } +} diff --git a/qemu/target/riscv/helper.h b/qemu/target/riscv/helper.h new file mode 100644 index 00000000..fddc79c7 --- /dev/null +++ b/qemu/target/riscv/helper.h @@ -0,0 +1,79 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) +DEF_HELPER_1(uc_riscv_exit, void, env) + +/* Exceptions */ +DEF_HELPER_2(raise_exception, noreturn, env, i32) + +/* Floating Point - rounding mode */ +DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32) + +/* Floating Point - fused */ +DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) + +/* Floating Point - Single Precision */ +DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_3(fle_s, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64) +#if defined(TARGET_RISCV64) +DEF_HELPER_FLAGS_2(fcvt_l_s, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_lu_s, TCG_CALL_NO_RWG, tl, env, i64) +#endif +DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl) +#if defined(TARGET_RISCV64) +DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl) +#endif +DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64) + +/* Floating Point - Double Precision */ +DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(fcvt_s_d, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvt_d_s, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_3(fle_d, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64) +#if defined(TARGET_RISCV64) +DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64) +DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64) +#endif +DEF_HELPER_FLAGS_2(fcvt_d_w, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_d_wu, TCG_CALL_NO_RWG, i64, env, tl) +#if defined(TARGET_RISCV64) +DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl) +DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl) +#endif +DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64) + +/* Special functions */ +DEF_HELPER_3(csrrw, tl, env, tl, tl) +DEF_HELPER_4(csrrs, tl, env, tl, tl, tl) +DEF_HELPER_4(csrrc, tl, env, tl, tl, tl) +DEF_HELPER_2(sret, tl, env, tl) +DEF_HELPER_2(mret, tl, env, tl) +DEF_HELPER_1(wfi, void, env) +DEF_HELPER_1(tlb_flush, void, env) diff --git a/qemu/target/riscv/insn_trans/trans_privileged.inc.c b/qemu/target/riscv/insn_trans/trans_privileged.inc.c new file mode 100644 index 00000000..05662b21 --- /dev/null +++ b/qemu/target/riscv/insn_trans/trans_privileged.inc.c @@ -0,0 +1,133 @@ +/* + * RISC-V translation routines for the RISC-V privileged instructions. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de + * Bastian Koppelmann, kbastian@mail.uni-paderborn.de + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +static bool trans_ecall(DisasContext *ctx, arg_ecall *a) +{ + /* always generates U-level ECALL, fixed in do_interrupt handler */ + generate_exception(ctx, RISCV_EXCP_U_ECALL); + exit_tb(ctx); /* no chaining */ + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a) +{ + // ignore this instruction + generate_exception(ctx, RISCV_EXCP_BREAKPOINT); + exit_tb(ctx); /* no chaining */ + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +static bool trans_uret(DisasContext *ctx, arg_uret *a) +{ + return false; +} + +static bool trans_sret(DisasContext *ctx, arg_sret *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); + + if (has_ext(ctx, RVS)) { + gen_helper_sret(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_env, tcg_ctx->cpu_pc); + exit_tb(ctx); /* no chaining */ + ctx->base.is_jmp = DISAS_NORETURN; + } else { + return false; + } + return true; +} + +static bool trans_mret(DisasContext *ctx, arg_mret *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); + gen_helper_mret(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_env, tcg_ctx->cpu_pc); + exit_tb(ctx); /* no chaining */ + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +static bool trans_wfi(DisasContext *ctx, arg_wfi *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->pc_succ_insn); + gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env); + return true; +} + +static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->priv_ver >= PRIV_VERSION_1_10_0) { + gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); + return true; + } + return false; +} + +static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->priv_ver <= PRIV_VERSION_1_09_1) { + gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); + return true; + } + return false; +} + +static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->priv_ver >= PRIV_VERSION_1_10_0 && + has_ext(ctx, RVH)) { + /* Hpervisor extensions exist */ + /* + * if (env->priv == PRV_M || + * (env->priv == PRV_S && + * !riscv_cpu_virt_enabled(env) && + * get_field(ctx->mstatus_fs, MSTATUS_TVM))) { + */ + gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); + return true; + /* } */ + } + return false; +} + +static bool trans_hfence_bvma(DisasContext *ctx, arg_sfence_vma *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->priv_ver >= PRIV_VERSION_1_10_0 && + has_ext(ctx, RVH)) { + /* Hpervisor extensions exist */ + /* + * if (env->priv == PRV_M || + * (env->priv == PRV_S && + * !riscv_cpu_virt_enabled(env) && + * get_field(ctx->mstatus_fs, MSTATUS_TVM))) { + */ + gen_helper_tlb_flush(tcg_ctx, tcg_ctx->cpu_env); + return true; + /* } */ + } + return false; +} diff --git a/qemu/target/riscv/insn_trans/trans_rva.inc.c b/qemu/target/riscv/insn_trans/trans_rva.inc.c new file mode 100644 index 00000000..99368ac1 --- /dev/null +++ b/qemu/target/riscv/insn_trans/trans_rva.inc.c @@ -0,0 +1,227 @@ +/* + * RISC-V translation routines for the RV64A Standard Extension. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de + * Bastian Koppelmann, kbastian@mail.uni-paderborn.de + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv src1 = tcg_temp_new(tcg_ctx); + /* Put addr in load_res, data in load_val. */ + gen_get_gpr(tcg_ctx, src1, a->rs1); + if (a->rl) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_STRL); + } + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->load_val, src1, ctx->mem_idx, mop); + if (a->aq) { + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_LDAQ); + } + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->load_res, src1); + gen_set_gpr(tcg_ctx, a->rd, tcg_ctx->load_val); + + tcg_temp_free(tcg_ctx, src1); + return true; +} + +static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv src1 = tcg_temp_new(tcg_ctx); + TCGv src2 = tcg_temp_new(tcg_ctx); + TCGv dat = tcg_temp_new(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); + TCGLabel *l2 = gen_new_label(tcg_ctx); + + gen_get_gpr(tcg_ctx, src1, a->rs1); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->load_res, src1, l1); + + gen_get_gpr(tcg_ctx, src2, a->rs2); + /* + * Note that the TCG atomic primitives are SC, + * so we can ignore AQ/RL along this path. + */ + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, src1, tcg_ctx->load_res, tcg_ctx->load_val, src2, + ctx->mem_idx, mop); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, dat, src1, tcg_ctx->load_val); + gen_set_gpr(tcg_ctx, a->rd, dat); + tcg_gen_br(tcg_ctx, l2); + + gen_set_label(tcg_ctx, l1); + /* + * Address comparison failure. However, we still need to + * provide the memory barrier implied by AQ/RL. + */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); + tcg_gen_movi_tl(tcg_ctx, dat, 1); + gen_set_gpr(tcg_ctx, a->rd, dat); + + gen_set_label(tcg_ctx, l2); + /* + * Clear the load reservation, since an SC must fail if there is + * an SC to any address, in between an LR and SC pair. + */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->load_res, -1); + + tcg_temp_free(tcg_ctx, dat); + tcg_temp_free(tcg_ctx, src1); + tcg_temp_free(tcg_ctx, src2); + return true; +} + +static bool gen_amo(DisasContext *ctx, arg_atomic *a, + void(*func)(TCGContext *, TCGv, TCGv, TCGv, TCGArg, MemOp), + MemOp mop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv src1 = tcg_temp_new(tcg_ctx); + TCGv src2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, src1, a->rs1); + gen_get_gpr(tcg_ctx, src2, a->rs2); + + (*func)(tcg_ctx, src2, src1, src2, ctx->mem_idx, mop); + + gen_set_gpr(tcg_ctx, a->rd, src2); + tcg_temp_free(tcg_ctx, src1); + tcg_temp_free(tcg_ctx, src2); + return true; +} + +static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_lr(ctx, a, (MO_ALIGN | MO_TESL)); +} + +static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_sc(ctx, a, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); +} + +static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) +{ + REQUIRE_EXT(ctx, RVA); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); +} + +#ifdef TARGET_RISCV64 + +static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) +{ + return gen_lr(ctx, a, MO_ALIGN | MO_TEQ); +} + +static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) +{ + return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ)); +} + +static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) +{ + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ)); +} +#endif diff --git a/qemu/target/riscv/insn_trans/trans_rvd.inc.c b/qemu/target/riscv/insn_trans/trans_rvd.inc.c new file mode 100644 index 00000000..2e643d51 --- /dev/null +++ b/qemu/target/riscv/insn_trans/trans_rvd.inc.c @@ -0,0 +1,473 @@ +/* + * RISC-V translation routines for the RV64D Standard Extension. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de + * Bastian Koppelmann, kbastian@mail.uni-paderborn.de + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +static bool trans_fld(DisasContext *ctx, arg_fld *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); + + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEQ); + + mark_fs_dirty(ctx); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fsd(DisasContext *ctx, arg_fsd *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); + + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEQ); + + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fmadd_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fmsub_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fnmsub_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fnmadd_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fadd_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fsub_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fmul_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fdiv_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fsqrt_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (a->rs1 == a->rs2) { /* FMOV */ + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1]); + } else { + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs2], + tcg_ctx->cpu_fpr[a->rs1], 0, 63); + } + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (a->rs1 == a->rs2) { /* FNEG */ + tcg_gen_xori_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], INT64_MIN); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_not_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2]); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, tcg_ctx->cpu_fpr[a->rs1], 0, 63); + tcg_temp_free_i64(tcg_ctx, t0); + } + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (a->rs1 == a->rs2) { /* FABS */ + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], ~INT64_MIN); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2], INT64_MIN); + tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], t0); + tcg_temp_free_i64(tcg_ctx, t0); + } + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_fmin_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_fmax_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_s_d(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_d_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_helper_feq_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_helper_flt_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_helper_fle_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_helper_fclass_d(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_w_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_wu_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_d_w(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_d_wu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + + mark_fs_dirty(ctx); + return true; +} + +#ifdef TARGET_RISCV64 + +static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_l_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_lu_d(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_gpr(tcg_ctx, a->rd, tcg_ctx->cpu_fpr[a->rs1]); + return true; +} + +static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_d_l(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_d_lu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + tcg_temp_free(tcg_ctx, t0); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVD); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0); + tcg_temp_free(tcg_ctx, t0); + mark_fs_dirty(ctx); + return true; +} +#endif diff --git a/qemu/target/riscv/insn_trans/trans_rvf.inc.c b/qemu/target/riscv/insn_trans/trans_rvf.inc.c new file mode 100644 index 00000000..de044bfe --- /dev/null +++ b/qemu/target/riscv/insn_trans/trans_rvf.inc.c @@ -0,0 +1,474 @@ +/* + * RISC-V translation routines for the RV64F Standard Extension. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de + * Bastian Koppelmann, kbastian@mail.uni-paderborn.de + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_FPU do {\ + if (ctx->mstatus_fs == 0) \ + return false; \ +} while (0) + +static bool trans_flw(DisasContext *ctx, arg_flw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); + + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEUL); + /* RISC-V requires NaN-boxing of narrower width floating point values */ + tcg_gen_ori_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rd], 0xffffffff00000000ULL); + + tcg_temp_free(tcg_ctx, t0); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsw(DisasContext *ctx, arg_fsw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); + + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUL); + + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fmadd_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fmsub_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fnmsub_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + gen_set_rm(ctx, a->rm); + gen_helper_fnmadd_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs3]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fadd_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fsub_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fmul_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fdiv_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, + tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_set_rm(ctx, a->rm); + gen_helper_fsqrt_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (a->rs1 == a->rs2) { /* FMOV */ + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1]); + } else { /* FSGNJ */ + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs2], tcg_ctx->cpu_fpr[a->rs1], + 0, 31); + } + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (a->rs1 == a->rs2) { /* FNEG */ + tcg_gen_xori_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], INT32_MIN); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_not_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2]); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0, tcg_ctx->cpu_fpr[a->rs1], 0, 31); + tcg_temp_free_i64(tcg_ctx, t0); + } + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (a->rs1 == a->rs2) { /* FABS */ + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], ~INT32_MIN); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs2], INT32_MIN); + tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_fpr[a->rs1], t0); + tcg_temp_free_i64(tcg_ctx, t0); + } + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_fmin_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_fmax_s(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], + tcg_ctx->cpu_fpr[a->rs2]); + mark_fs_dirty(ctx); + return true; +} + +static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_w_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_wu_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a) +{ + /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + +#if defined(TARGET_RISCV64) + tcg_gen_ext32s_tl(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); +#else + tcg_gen_extrl_i64_i32(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); +#endif + + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_helper_feq_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_helper_flt_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_helper_fle_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1], tcg_ctx->cpu_fpr[a->rs2]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_helper_fclass_s(tcg_ctx, t0, tcg_ctx->cpu_fpr[a->rs1]); + + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_s_w(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + + mark_fs_dirty(ctx); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_s_wu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + + mark_fs_dirty(ctx); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a) +{ + /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + +#if defined(TARGET_RISCV64) + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0); +#else + tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], t0); +#endif + + mark_fs_dirty(ctx); + tcg_temp_free(tcg_ctx, t0); + + return true; +} + +#ifdef TARGET_RISCV64 +static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_l_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_lu_s(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_ctx->cpu_fpr[a->rs1]); + gen_set_gpr(tcg_ctx, a->rd, t0); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_s_l(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + + mark_fs_dirty(ctx); + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a) +{ + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + + gen_set_rm(ctx, a->rm); + gen_helper_fcvt_s_lu(tcg_ctx, tcg_ctx->cpu_fpr[a->rd], tcg_ctx->cpu_env, t0); + + mark_fs_dirty(ctx); + tcg_temp_free(tcg_ctx, t0); + return true; +} +#endif diff --git a/qemu/target/riscv/insn_trans/trans_rvi.inc.c b/qemu/target/riscv/insn_trans/trans_rvi.inc.c new file mode 100644 index 00000000..186ed98e --- /dev/null +++ b/qemu/target/riscv/insn_trans/trans_rvi.inc.c @@ -0,0 +1,613 @@ +/* + * RISC-V translation routines for the RVXI Base Integer Instruction Set. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de + * Bastian Koppelmann, kbastian@mail.uni-paderborn.de + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +static bool trans_illegal(DisasContext *ctx, arg_empty *a) +{ + gen_exception_illegal(ctx); + + return true; +} + +static bool trans_lui(DisasContext *ctx, arg_lui *a) +{ + if (a->rd != 0) { + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[a->rd], a->imm); + } + return true; +} + +static bool trans_auipc(DisasContext *ctx, arg_auipc *a) +{ + if (a->rd != 0) { + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[a->rd], a->imm + ctx->base.pc_next); + } + return true; +} + +static bool trans_jal(DisasContext *ctx, arg_jal *a) +{ + gen_jal(ctx, a->rd, a->imm); + return true; +} + +static bool trans_jalr(DisasContext *ctx, arg_jalr *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + /* no chaining with JALR */ + TCGLabel *misaligned = NULL; + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, tcg_ctx->cpu_pc, a->rs1); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_pc, a->imm); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_pc, (target_ulong)-2); + + if (!has_ext(ctx, RVC)) { + misaligned = gen_new_label(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, tcg_ctx->cpu_pc, 0x2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 0x0, misaligned); + } + + if (a->rd != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[a->rd], ctx->pc_succ_insn); + } + lookup_and_goto_ptr(ctx); + + if (misaligned) { + gen_set_label(tcg_ctx, misaligned); + gen_exception_inst_addr_mis(ctx); + } + ctx->base.is_jmp = DISAS_NORETURN; + + tcg_temp_free(tcg_ctx, t0); + return true; +} + +static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGLabel *l = gen_new_label(tcg_ctx); + TCGv source1, source2; + source1 = tcg_temp_new(tcg_ctx); + source2 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + tcg_gen_brcond_tl(tcg_ctx, cond, source1, source2, l); + gen_goto_tb(ctx, 1, ctx->pc_succ_insn); + gen_set_label(tcg_ctx, l); /* branch taken */ + + if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) { + /* misaligned */ + gen_exception_inst_addr_mis(ctx); + } else { + gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm); + } + ctx->base.is_jmp = DISAS_NORETURN; + + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + + return true; +} + +static bool trans_beq(DisasContext *ctx, arg_beq *a) +{ + return gen_branch(ctx, a, TCG_COND_EQ); +} + +static bool trans_bne(DisasContext *ctx, arg_bne *a) +{ + return gen_branch(ctx, a, TCG_COND_NE); +} + +static bool trans_blt(DisasContext *ctx, arg_blt *a) +{ + return gen_branch(ctx, a, TCG_COND_LT); +} + +static bool trans_bge(DisasContext *ctx, arg_bge *a) +{ + return gen_branch(ctx, a, TCG_COND_GE); +} + +static bool trans_bltu(DisasContext *ctx, arg_bltu *a) +{ + return gen_branch(ctx, a, TCG_COND_LTU); +} + +static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) +{ + return gen_branch(ctx, a, TCG_COND_GEU); +} + +static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); + + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, memop); + gen_set_gpr(tcg_ctx, a->rd, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + return true; +} + +static bool trans_lb(DisasContext *ctx, arg_lb *a) +{ + return gen_load(ctx, a, MO_SB); +} + +static bool trans_lh(DisasContext *ctx, arg_lh *a) +{ + return gen_load(ctx, a, MO_TESW); +} + +static bool trans_lw(DisasContext *ctx, arg_lw *a) +{ + return gen_load(ctx, a, MO_TESL); +} + +static bool trans_lbu(DisasContext *ctx, arg_lbu *a) +{ + return gen_load(ctx, a, MO_UB); +} + +static bool trans_lhu(DisasContext *ctx, arg_lhu *a) +{ + return gen_load(ctx, a, MO_TEUW); +} + +static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv dat = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, a->rs1); + tcg_gen_addi_tl(tcg_ctx, t0, t0, a->imm); + gen_get_gpr(tcg_ctx, dat, a->rs2); + + tcg_gen_qemu_st_tl(tcg_ctx, dat, t0, ctx->mem_idx, memop); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, dat); + return true; +} + + +static bool trans_sb(DisasContext *ctx, arg_sb *a) +{ + return gen_store(ctx, a, MO_SB); +} + +static bool trans_sh(DisasContext *ctx, arg_sh *a) +{ + return gen_store(ctx, a, MO_TESW); +} + +static bool trans_sw(DisasContext *ctx, arg_sw *a) +{ + return gen_store(ctx, a, MO_TESL); +} + +#ifdef TARGET_RISCV64 +static bool trans_lwu(DisasContext *ctx, arg_lwu *a) +{ + return gen_load(ctx, a, MO_TEUL); +} + +static bool trans_ld(DisasContext *ctx, arg_ld *a) +{ + return gen_load(ctx, a, MO_TEQ); +} + +static bool trans_sd(DisasContext *ctx, arg_sd *a) +{ + return gen_store(ctx, a, MO_TEQ); +} +#endif + +static bool trans_addi(DisasContext *ctx, arg_addi *a) +{ + return gen_arith_imm_fn(ctx, a, &tcg_gen_addi_tl); +} + +static void gen_slt(TCGContext *tcg_ctx, TCGv ret, TCGv s1, TCGv s2) +{ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, ret, s1, s2); +} + +static void gen_sltu(TCGContext *tcg_ctx, TCGv ret, TCGv s1, TCGv s2) +{ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, ret, s1, s2); +} + + +static bool trans_slti(DisasContext *ctx, arg_slti *a) +{ + return gen_arith_imm_tl(ctx, a, &gen_slt); +} + +static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a) +{ + return gen_arith_imm_tl(ctx, a, &gen_sltu); +} + +static bool trans_xori(DisasContext *ctx, arg_xori *a) +{ + return gen_arith_imm_fn(ctx, a, &tcg_gen_xori_tl); +} +static bool trans_ori(DisasContext *ctx, arg_ori *a) +{ + return gen_arith_imm_fn(ctx, a, &tcg_gen_ori_tl); +} +static bool trans_andi(DisasContext *ctx, arg_andi *a) +{ + return gen_arith_imm_fn(ctx, a, &tcg_gen_andi_tl); +} +static bool trans_slli(DisasContext *ctx, arg_slli *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (a->shamt >= TARGET_LONG_BITS) { + return false; + } + + if (a->rd != 0) { + TCGv t = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t, a->rs1); + + tcg_gen_shli_tl(tcg_ctx, t, t, a->shamt); + + gen_set_gpr(tcg_ctx, a->rd, t); + tcg_temp_free(tcg_ctx, t); + } /* NOP otherwise */ + return true; +} + +static bool trans_srli(DisasContext *ctx, arg_srli *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (a->shamt >= TARGET_LONG_BITS) { + return false; + } + + if (a->rd != 0) { + TCGv t = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t, a->rs1); + + tcg_gen_shri_tl(tcg_ctx, t, t, a->shamt); + gen_set_gpr(tcg_ctx, a->rd, t); + tcg_temp_free(tcg_ctx, t); + } /* NOP otherwise */ + return true; +} + +static bool trans_srai(DisasContext *ctx, arg_srai *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (a->shamt >= TARGET_LONG_BITS) { + return false; + } + + if (a->rd != 0) { + TCGv t = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t, a->rs1); + + tcg_gen_sari_tl(tcg_ctx, t, t, a->shamt); + gen_set_gpr(tcg_ctx, a->rd, t); + tcg_temp_free(tcg_ctx, t); + } /* NOP otherwise */ + return true; +} + +static bool trans_add(DisasContext *ctx, arg_add *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &tcg_gen_add_tl); +} + +static bool trans_sub(DisasContext *ctx, arg_sub *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &tcg_gen_sub_tl); +} + +static bool trans_sll(DisasContext *ctx, arg_sll *a) +{ + return gen_shift(ctx, a, &tcg_gen_shl_tl); +} + +static bool trans_slt(DisasContext *ctx, arg_slt *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_slt); +} + +static bool trans_sltu(DisasContext *ctx, arg_sltu *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_sltu); +} + +static bool trans_xor(DisasContext *ctx, arg_xor *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &tcg_gen_xor_tl); +} + +static bool trans_srl(DisasContext *ctx, arg_srl *a) +{ + return gen_shift(ctx, a, &tcg_gen_shr_tl); +} + +static bool trans_sra(DisasContext *ctx, arg_sra *a) +{ + return gen_shift(ctx, a, &tcg_gen_sar_tl); +} + +static bool trans_or(DisasContext *ctx, arg_or *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &tcg_gen_or_tl); +} + +static bool trans_and(DisasContext *ctx, arg_and *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &tcg_gen_and_tl); +} + +#ifdef TARGET_RISCV64 +static bool trans_addiw(DisasContext *ctx, arg_addiw *a) +{ + return gen_arith_imm_tl(ctx, a, &gen_addw); +} + +static bool trans_slliw(DisasContext *ctx, arg_slliw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1; + source1 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, source1, a->rs1); + + tcg_gen_shli_tl(tcg_ctx, source1, source1, a->shamt); + tcg_gen_ext32s_tl(tcg_ctx, source1, source1); + gen_set_gpr(tcg_ctx, a->rd, source1); + + tcg_temp_free(tcg_ctx, source1); + return true; +} + +static bool trans_srliw(DisasContext *ctx, arg_srliw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t, a->rs1); + tcg_gen_extract_tl(tcg_ctx, t, t, a->shamt, 32 - a->shamt); + /* sign-extend for W instructions */ + tcg_gen_ext32s_tl(tcg_ctx, t, t); + gen_set_gpr(tcg_ctx, a->rd, t); + tcg_temp_free(tcg_ctx, t); + return true; +} + +static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t, a->rs1); + tcg_gen_sextract_tl(tcg_ctx, t, t, a->shamt, 32 - a->shamt); + gen_set_gpr(tcg_ctx, a->rd, t); + tcg_temp_free(tcg_ctx, t); + return true; +} + +static bool trans_addw(DisasContext *ctx, arg_addw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_addw); +} + +static bool trans_subw(DisasContext *ctx, arg_subw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_subw); +} + +static bool trans_sllw(DisasContext *ctx, arg_sllw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1 = tcg_temp_new(tcg_ctx); + TCGv source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + tcg_gen_andi_tl(tcg_ctx, source2, source2, 0x1F); + tcg_gen_shl_tl(tcg_ctx, source1, source1, source2); + + tcg_gen_ext32s_tl(tcg_ctx, source1, source1); + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +static bool trans_srlw(DisasContext *ctx, arg_srlw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1 = tcg_temp_new(tcg_ctx); + TCGv source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + /* clear upper 32 */ + tcg_gen_ext32u_tl(tcg_ctx, source1, source1); + tcg_gen_andi_tl(tcg_ctx, source2, source2, 0x1F); + tcg_gen_shr_tl(tcg_ctx, source1, source1, source2); + + tcg_gen_ext32s_tl(tcg_ctx, source1, source1); + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +static bool trans_sraw(DisasContext *ctx, arg_sraw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1 = tcg_temp_new(tcg_ctx); + TCGv source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + /* + * first, trick to get it to act like working on 32 bits (get rid of + * upper 32, sign extend to fill space) + */ + tcg_gen_ext32s_tl(tcg_ctx, source1, source1); + tcg_gen_andi_tl(tcg_ctx, source2, source2, 0x1F); + tcg_gen_sar_tl(tcg_ctx, source1, source1, source2); + + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + + return true; +} +#endif + +static bool trans_fence(DisasContext *ctx, arg_fence *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + /* FENCE is a full memory barrier. */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + return true; +} + +static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (!ctx->ext_ifencei) { + return false; + } + + /* + * FENCE_I is a no-op in QEMU, + * however we need to end the translation block + */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->pc_succ_insn); + exit_tb(ctx); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +#define RISCV_OP_CSR_PRE do {\ + source1 = tcg_temp_new(tcg_ctx); \ + csr_store = tcg_temp_new(tcg_ctx); \ + dest = tcg_temp_new(tcg_ctx); \ + rs1_pass = tcg_temp_new(tcg_ctx); \ + gen_get_gpr(tcg_ctx, source1, a->rs1); \ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); \ + tcg_gen_movi_tl(tcg_ctx, rs1_pass, a->rs1); \ + tcg_gen_movi_tl(tcg_ctx, csr_store, a->csr); \ + gen_io_start(tcg_ctx);\ +} while (0) + +#define RISCV_OP_CSR_POST do {\ + gen_set_gpr(tcg_ctx, a->rd, dest); \ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->pc_succ_insn); \ + exit_tb(ctx); \ + ctx->base.is_jmp = DISAS_NORETURN; \ + tcg_temp_free(tcg_ctx, source1); \ + tcg_temp_free(tcg_ctx, csr_store); \ + tcg_temp_free(tcg_ctx, dest); \ + tcg_temp_free(tcg_ctx, rs1_pass); \ +} while (0) + + +static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1, csr_store, dest, rs1_pass; + RISCV_OP_CSR_PRE; + gen_helper_csrrw(tcg_ctx, dest, tcg_ctx->cpu_env, source1, csr_store); + RISCV_OP_CSR_POST; + return true; +} + +static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1, csr_store, dest, rs1_pass; + RISCV_OP_CSR_PRE; + gen_helper_csrrs(tcg_ctx, dest, tcg_ctx->cpu_env, source1, csr_store, rs1_pass); + RISCV_OP_CSR_POST; + return true; +} + +static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1, csr_store, dest, rs1_pass; + RISCV_OP_CSR_PRE; + gen_helper_csrrc(tcg_ctx, dest, tcg_ctx->cpu_env, source1, csr_store, rs1_pass); + RISCV_OP_CSR_POST; + return true; +} + +static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1, csr_store, dest, rs1_pass; + RISCV_OP_CSR_PRE; + gen_helper_csrrw(tcg_ctx, dest, tcg_ctx->cpu_env, rs1_pass, csr_store); + RISCV_OP_CSR_POST; + return true; +} + +static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1, csr_store, dest, rs1_pass; + RISCV_OP_CSR_PRE; + gen_helper_csrrs(tcg_ctx, dest, tcg_ctx->cpu_env, rs1_pass, csr_store, rs1_pass); + RISCV_OP_CSR_POST; + return true; +} + +static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1, csr_store, dest, rs1_pass; + RISCV_OP_CSR_PRE; + gen_helper_csrrc(tcg_ctx, dest, tcg_ctx->cpu_env, rs1_pass, csr_store, rs1_pass); + RISCV_OP_CSR_POST; + return true; +} diff --git a/qemu/target/riscv/insn_trans/trans_rvm.inc.c b/qemu/target/riscv/insn_trans/trans_rvm.inc.c new file mode 100644 index 00000000..1346010a --- /dev/null +++ b/qemu/target/riscv/insn_trans/trans_rvm.inc.c @@ -0,0 +1,133 @@ +/* + * RISC-V translation routines for the RV64M Standard Extension. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de + * Bastian Koppelmann, kbastian@mail.uni-paderborn.de + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + + +static bool trans_mul(DisasContext *ctx, arg_mul *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &tcg_gen_mul_tl); +} + +static bool trans_mulh(DisasContext *ctx, arg_mulh *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + REQUIRE_EXT(ctx, RVM); + TCGv source1 = tcg_temp_new(tcg_ctx); + TCGv source2 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + tcg_gen_muls2_tl(tcg_ctx, source2, source1, source1, source2); + + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_mulhsu); +} + +static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + REQUIRE_EXT(ctx, RVM); + TCGv source1 = tcg_temp_new(tcg_ctx); + TCGv source2 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + tcg_gen_mulu2_tl(tcg_ctx, source2, source1, source1, source2); + + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +static bool trans_div(DisasContext *ctx, arg_div *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_div); +} + +static bool trans_divu(DisasContext *ctx, arg_divu *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_divu); +} + +static bool trans_rem(DisasContext *ctx, arg_rem *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_rem); +} + +static bool trans_remu(DisasContext *ctx, arg_remu *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_remu); +} + +#ifdef TARGET_RISCV64 +static bool trans_mulw(DisasContext *ctx, arg_mulw *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith(tcg_ctx, a, &gen_mulw); +} + +static bool trans_divw(DisasContext *ctx, arg_divw *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith_div_w(tcg_ctx, a, &gen_div); +} + +static bool trans_divuw(DisasContext *ctx, arg_divuw *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith_div_uw(tcg_ctx, a, &gen_divu); +} + +static bool trans_remw(DisasContext *ctx, arg_remw *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith_div_w(tcg_ctx, a, &gen_rem); +} + +static bool trans_remuw(DisasContext *ctx, arg_remuw *a) +{ + REQUIRE_EXT(ctx, RVM); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + return gen_arith_div_uw(tcg_ctx, a, &gen_remu); +} +#endif diff --git a/qemu/target/riscv/instmap.h b/qemu/target/riscv/instmap.h new file mode 100644 index 00000000..40b6d2b6 --- /dev/null +++ b/qemu/target/riscv/instmap.h @@ -0,0 +1,369 @@ +/* + * RISC-V emulation for qemu: Instruction decode helpers + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef TARGET_RISCV_INSTMAP_H +#define TARGET_RISCV_INSTMAP_H + +#define MASK_OP_MAJOR(op) (op & 0x7F) +enum { + /* rv32i, rv64i, rv32m */ + OPC_RISC_LUI = (0x37), + OPC_RISC_AUIPC = (0x17), + OPC_RISC_JAL = (0x6F), + OPC_RISC_JALR = (0x67), + OPC_RISC_BRANCH = (0x63), + OPC_RISC_LOAD = (0x03), + OPC_RISC_STORE = (0x23), + OPC_RISC_ARITH_IMM = (0x13), + OPC_RISC_ARITH = (0x33), + OPC_RISC_FENCE = (0x0F), + OPC_RISC_SYSTEM = (0x73), + + /* rv64i, rv64m */ + OPC_RISC_ARITH_IMM_W = (0x1B), + OPC_RISC_ARITH_W = (0x3B), + + /* rv32a, rv64a */ + OPC_RISC_ATOMIC = (0x2F), + + /* floating point */ + OPC_RISC_FP_LOAD = (0x7), + OPC_RISC_FP_STORE = (0x27), + + OPC_RISC_FMADD = (0x43), + OPC_RISC_FMSUB = (0x47), + OPC_RISC_FNMSUB = (0x4B), + OPC_RISC_FNMADD = (0x4F), + + OPC_RISC_FP_ARITH = (0x53), +}; + +#define MASK_OP_ARITH(op) (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | \ + (0x7F << 25)))) +enum { + OPC_RISC_ADD = OPC_RISC_ARITH | (0x0 << 12) | (0x00 << 25), + OPC_RISC_SUB = OPC_RISC_ARITH | (0x0 << 12) | (0x20 << 25), + OPC_RISC_SLL = OPC_RISC_ARITH | (0x1 << 12) | (0x00 << 25), + OPC_RISC_SLT = OPC_RISC_ARITH | (0x2 << 12) | (0x00 << 25), + OPC_RISC_SLTU = OPC_RISC_ARITH | (0x3 << 12) | (0x00 << 25), + OPC_RISC_XOR = OPC_RISC_ARITH | (0x4 << 12) | (0x00 << 25), + OPC_RISC_SRL = OPC_RISC_ARITH | (0x5 << 12) | (0x00 << 25), + OPC_RISC_SRA = OPC_RISC_ARITH | (0x5 << 12) | (0x20 << 25), + OPC_RISC_OR = OPC_RISC_ARITH | (0x6 << 12) | (0x00 << 25), + OPC_RISC_AND = OPC_RISC_ARITH | (0x7 << 12) | (0x00 << 25), + + /* RV64M */ + OPC_RISC_MUL = OPC_RISC_ARITH | (0x0 << 12) | (0x01 << 25), + OPC_RISC_MULH = OPC_RISC_ARITH | (0x1 << 12) | (0x01 << 25), + OPC_RISC_MULHSU = OPC_RISC_ARITH | (0x2 << 12) | (0x01 << 25), + OPC_RISC_MULHU = OPC_RISC_ARITH | (0x3 << 12) | (0x01 << 25), + + OPC_RISC_DIV = OPC_RISC_ARITH | (0x4 << 12) | (0x01 << 25), + OPC_RISC_DIVU = OPC_RISC_ARITH | (0x5 << 12) | (0x01 << 25), + OPC_RISC_REM = OPC_RISC_ARITH | (0x6 << 12) | (0x01 << 25), + OPC_RISC_REMU = OPC_RISC_ARITH | (0x7 << 12) | (0x01 << 25), +}; + + +#define MASK_OP_ARITH_IMM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +enum { + OPC_RISC_ADDI = OPC_RISC_ARITH_IMM | (0x0 << 12), + OPC_RISC_SLTI = OPC_RISC_ARITH_IMM | (0x2 << 12), + OPC_RISC_SLTIU = OPC_RISC_ARITH_IMM | (0x3 << 12), + OPC_RISC_XORI = OPC_RISC_ARITH_IMM | (0x4 << 12), + OPC_RISC_ORI = OPC_RISC_ARITH_IMM | (0x6 << 12), + OPC_RISC_ANDI = OPC_RISC_ARITH_IMM | (0x7 << 12), + OPC_RISC_SLLI = OPC_RISC_ARITH_IMM | (0x1 << 12), /* additional part of + IMM */ + OPC_RISC_SHIFT_RIGHT_I = OPC_RISC_ARITH_IMM | (0x5 << 12) /* SRAI, SRLI */ +}; + +#define MASK_OP_BRANCH(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +enum { + OPC_RISC_BEQ = OPC_RISC_BRANCH | (0x0 << 12), + OPC_RISC_BNE = OPC_RISC_BRANCH | (0x1 << 12), + OPC_RISC_BLT = OPC_RISC_BRANCH | (0x4 << 12), + OPC_RISC_BGE = OPC_RISC_BRANCH | (0x5 << 12), + OPC_RISC_BLTU = OPC_RISC_BRANCH | (0x6 << 12), + OPC_RISC_BGEU = OPC_RISC_BRANCH | (0x7 << 12) +}; + +enum { + OPC_RISC_ADDIW = OPC_RISC_ARITH_IMM_W | (0x0 << 12), + OPC_RISC_SLLIW = OPC_RISC_ARITH_IMM_W | (0x1 << 12), /* additional part of + IMM */ + OPC_RISC_SHIFT_RIGHT_IW = OPC_RISC_ARITH_IMM_W | (0x5 << 12) /* SRAI, SRLI + */ +}; + +enum { + OPC_RISC_ADDW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x00 << 25), + OPC_RISC_SUBW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x20 << 25), + OPC_RISC_SLLW = OPC_RISC_ARITH_W | (0x1 << 12) | (0x00 << 25), + OPC_RISC_SRLW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x00 << 25), + OPC_RISC_SRAW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x20 << 25), + + /* RV64M */ + OPC_RISC_MULW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x01 << 25), + OPC_RISC_DIVW = OPC_RISC_ARITH_W | (0x4 << 12) | (0x01 << 25), + OPC_RISC_DIVUW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x01 << 25), + OPC_RISC_REMW = OPC_RISC_ARITH_W | (0x6 << 12) | (0x01 << 25), + OPC_RISC_REMUW = OPC_RISC_ARITH_W | (0x7 << 12) | (0x01 << 25), +}; + +#define MASK_OP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +enum { + OPC_RISC_LB = OPC_RISC_LOAD | (0x0 << 12), + OPC_RISC_LH = OPC_RISC_LOAD | (0x1 << 12), + OPC_RISC_LW = OPC_RISC_LOAD | (0x2 << 12), + OPC_RISC_LD = OPC_RISC_LOAD | (0x3 << 12), + OPC_RISC_LBU = OPC_RISC_LOAD | (0x4 << 12), + OPC_RISC_LHU = OPC_RISC_LOAD | (0x5 << 12), + OPC_RISC_LWU = OPC_RISC_LOAD | (0x6 << 12), +}; + +#define MASK_OP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +enum { + OPC_RISC_SB = OPC_RISC_STORE | (0x0 << 12), + OPC_RISC_SH = OPC_RISC_STORE | (0x1 << 12), + OPC_RISC_SW = OPC_RISC_STORE | (0x2 << 12), + OPC_RISC_SD = OPC_RISC_STORE | (0x3 << 12), +}; + +#define MASK_OP_JALR(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +/* no enum since OPC_RISC_JALR is the actual value */ + +#define MASK_OP_ATOMIC(op) \ + (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F << 25)))) +#define MASK_OP_ATOMIC_NO_AQ_RL_SZ(op) \ + (MASK_OP_MAJOR(op) | (op & (0x1F << 27))) + +enum { + OPC_RISC_LR = OPC_RISC_ATOMIC | (0x02 << 27), + OPC_RISC_SC = OPC_RISC_ATOMIC | (0x03 << 27), + OPC_RISC_AMOSWAP = OPC_RISC_ATOMIC | (0x01 << 27), + OPC_RISC_AMOADD = OPC_RISC_ATOMIC | (0x00 << 27), + OPC_RISC_AMOXOR = OPC_RISC_ATOMIC | (0x04 << 27), + OPC_RISC_AMOAND = OPC_RISC_ATOMIC | (0x0C << 27), + OPC_RISC_AMOOR = OPC_RISC_ATOMIC | (0x08 << 27), + OPC_RISC_AMOMIN = OPC_RISC_ATOMIC | (0x10 << 27), + OPC_RISC_AMOMAX = OPC_RISC_ATOMIC | (0x14 << 27), + OPC_RISC_AMOMINU = OPC_RISC_ATOMIC | (0x18 << 27), + OPC_RISC_AMOMAXU = OPC_RISC_ATOMIC | (0x1C << 27), +}; + +#define MASK_OP_SYSTEM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +enum { + OPC_RISC_ECALL = OPC_RISC_SYSTEM | (0x0 << 12), + OPC_RISC_EBREAK = OPC_RISC_SYSTEM | (0x0 << 12), + OPC_RISC_ERET = OPC_RISC_SYSTEM | (0x0 << 12), + OPC_RISC_MRTS = OPC_RISC_SYSTEM | (0x0 << 12), + OPC_RISC_MRTH = OPC_RISC_SYSTEM | (0x0 << 12), + OPC_RISC_HRTS = OPC_RISC_SYSTEM | (0x0 << 12), + OPC_RISC_WFI = OPC_RISC_SYSTEM | (0x0 << 12), + OPC_RISC_SFENCEVM = OPC_RISC_SYSTEM | (0x0 << 12), + + OPC_RISC_CSRRW = OPC_RISC_SYSTEM | (0x1 << 12), + OPC_RISC_CSRRS = OPC_RISC_SYSTEM | (0x2 << 12), + OPC_RISC_CSRRC = OPC_RISC_SYSTEM | (0x3 << 12), + OPC_RISC_CSRRWI = OPC_RISC_SYSTEM | (0x5 << 12), + OPC_RISC_CSRRSI = OPC_RISC_SYSTEM | (0x6 << 12), + OPC_RISC_CSRRCI = OPC_RISC_SYSTEM | (0x7 << 12), +}; + +#define MASK_OP_FP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +enum { + OPC_RISC_FLW = OPC_RISC_FP_LOAD | (0x2 << 12), + OPC_RISC_FLD = OPC_RISC_FP_LOAD | (0x3 << 12), +}; + +#define MASK_OP_FP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12))) +enum { + OPC_RISC_FSW = OPC_RISC_FP_STORE | (0x2 << 12), + OPC_RISC_FSD = OPC_RISC_FP_STORE | (0x3 << 12), +}; + +#define MASK_OP_FP_FMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) +enum { + OPC_RISC_FMADD_S = OPC_RISC_FMADD | (0x0 << 25), + OPC_RISC_FMADD_D = OPC_RISC_FMADD | (0x1 << 25), +}; + +#define MASK_OP_FP_FMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) +enum { + OPC_RISC_FMSUB_S = OPC_RISC_FMSUB | (0x0 << 25), + OPC_RISC_FMSUB_D = OPC_RISC_FMSUB | (0x1 << 25), +}; + +#define MASK_OP_FP_FNMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) +enum { + OPC_RISC_FNMADD_S = OPC_RISC_FNMADD | (0x0 << 25), + OPC_RISC_FNMADD_D = OPC_RISC_FNMADD | (0x1 << 25), +}; + +#define MASK_OP_FP_FNMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25))) +enum { + OPC_RISC_FNMSUB_S = OPC_RISC_FNMSUB | (0x0 << 25), + OPC_RISC_FNMSUB_D = OPC_RISC_FNMSUB | (0x1 << 25), +}; + +#define MASK_OP_FP_ARITH(op) (MASK_OP_MAJOR(op) | (op & (0x7F << 25))) +enum { + /* float */ + OPC_RISC_FADD_S = OPC_RISC_FP_ARITH | (0x0 << 25), + OPC_RISC_FSUB_S = OPC_RISC_FP_ARITH | (0x4 << 25), + OPC_RISC_FMUL_S = OPC_RISC_FP_ARITH | (0x8 << 25), + OPC_RISC_FDIV_S = OPC_RISC_FP_ARITH | (0xC << 25), + + OPC_RISC_FSGNJ_S = OPC_RISC_FP_ARITH | (0x10 << 25), + OPC_RISC_FSGNJN_S = OPC_RISC_FP_ARITH | (0x10 << 25), + OPC_RISC_FSGNJX_S = OPC_RISC_FP_ARITH | (0x10 << 25), + + OPC_RISC_FMIN_S = OPC_RISC_FP_ARITH | (0x14 << 25), + OPC_RISC_FMAX_S = OPC_RISC_FP_ARITH | (0x14 << 25), + + OPC_RISC_FSQRT_S = OPC_RISC_FP_ARITH | (0x2C << 25), + + OPC_RISC_FEQ_S = OPC_RISC_FP_ARITH | (0x50 << 25), + OPC_RISC_FLT_S = OPC_RISC_FP_ARITH | (0x50 << 25), + OPC_RISC_FLE_S = OPC_RISC_FP_ARITH | (0x50 << 25), + + OPC_RISC_FCVT_W_S = OPC_RISC_FP_ARITH | (0x60 << 25), + OPC_RISC_FCVT_WU_S = OPC_RISC_FP_ARITH | (0x60 << 25), + OPC_RISC_FCVT_L_S = OPC_RISC_FP_ARITH | (0x60 << 25), + OPC_RISC_FCVT_LU_S = OPC_RISC_FP_ARITH | (0x60 << 25), + + OPC_RISC_FCVT_S_W = OPC_RISC_FP_ARITH | (0x68 << 25), + OPC_RISC_FCVT_S_WU = OPC_RISC_FP_ARITH | (0x68 << 25), + OPC_RISC_FCVT_S_L = OPC_RISC_FP_ARITH | (0x68 << 25), + OPC_RISC_FCVT_S_LU = OPC_RISC_FP_ARITH | (0x68 << 25), + + OPC_RISC_FMV_X_S = OPC_RISC_FP_ARITH | (0x70 << 25), + OPC_RISC_FCLASS_S = OPC_RISC_FP_ARITH | (0x70 << 25), + + OPC_RISC_FMV_S_X = OPC_RISC_FP_ARITH | (0x78 << 25), + + /* double */ + OPC_RISC_FADD_D = OPC_RISC_FP_ARITH | (0x1 << 25), + OPC_RISC_FSUB_D = OPC_RISC_FP_ARITH | (0x5 << 25), + OPC_RISC_FMUL_D = OPC_RISC_FP_ARITH | (0x9 << 25), + OPC_RISC_FDIV_D = OPC_RISC_FP_ARITH | (0xD << 25), + + OPC_RISC_FSGNJ_D = OPC_RISC_FP_ARITH | (0x11 << 25), + OPC_RISC_FSGNJN_D = OPC_RISC_FP_ARITH | (0x11 << 25), + OPC_RISC_FSGNJX_D = OPC_RISC_FP_ARITH | (0x11 << 25), + + OPC_RISC_FMIN_D = OPC_RISC_FP_ARITH | (0x15 << 25), + OPC_RISC_FMAX_D = OPC_RISC_FP_ARITH | (0x15 << 25), + + OPC_RISC_FCVT_S_D = OPC_RISC_FP_ARITH | (0x20 << 25), + + OPC_RISC_FCVT_D_S = OPC_RISC_FP_ARITH | (0x21 << 25), + + OPC_RISC_FSQRT_D = OPC_RISC_FP_ARITH | (0x2D << 25), + + OPC_RISC_FEQ_D = OPC_RISC_FP_ARITH | (0x51 << 25), + OPC_RISC_FLT_D = OPC_RISC_FP_ARITH | (0x51 << 25), + OPC_RISC_FLE_D = OPC_RISC_FP_ARITH | (0x51 << 25), + + OPC_RISC_FCVT_W_D = OPC_RISC_FP_ARITH | (0x61 << 25), + OPC_RISC_FCVT_WU_D = OPC_RISC_FP_ARITH | (0x61 << 25), + OPC_RISC_FCVT_L_D = OPC_RISC_FP_ARITH | (0x61 << 25), + OPC_RISC_FCVT_LU_D = OPC_RISC_FP_ARITH | (0x61 << 25), + + OPC_RISC_FCVT_D_W = OPC_RISC_FP_ARITH | (0x69 << 25), + OPC_RISC_FCVT_D_WU = OPC_RISC_FP_ARITH | (0x69 << 25), + OPC_RISC_FCVT_D_L = OPC_RISC_FP_ARITH | (0x69 << 25), + OPC_RISC_FCVT_D_LU = OPC_RISC_FP_ARITH | (0x69 << 25), + + OPC_RISC_FMV_X_D = OPC_RISC_FP_ARITH | (0x71 << 25), + OPC_RISC_FCLASS_D = OPC_RISC_FP_ARITH | (0x71 << 25), + + OPC_RISC_FMV_D_X = OPC_RISC_FP_ARITH | (0x79 << 25), +}; + +#define GET_B_IMM(inst) ((extract32(inst, 8, 4) << 1) \ + | (extract32(inst, 25, 6) << 5) \ + | (extract32(inst, 7, 1) << 11) \ + | (sextract64(inst, 31, 1) << 12)) + +#define GET_STORE_IMM(inst) ((extract32(inst, 7, 5)) \ + | (sextract64(inst, 25, 7) << 5)) + +#define GET_JAL_IMM(inst) ((extract32(inst, 21, 10) << 1) \ + | (extract32(inst, 20, 1) << 11) \ + | (extract32(inst, 12, 8) << 12) \ + | (sextract64(inst, 31, 1) << 20)) + +#define GET_RM(inst) extract32(inst, 12, 3) +#define GET_RS3(inst) extract32(inst, 27, 5) +#define GET_RS1(inst) extract32(inst, 15, 5) +#define GET_RS2(inst) extract32(inst, 20, 5) +#define GET_RD(inst) extract32(inst, 7, 5) +#define GET_IMM(inst) sextract64(inst, 20, 12) + +/* RVC decoding macros */ +#define GET_C_IMM(inst) (extract32(inst, 2, 5) \ + | (sextract64(inst, 12, 1) << 5)) +#define GET_C_ZIMM(inst) (extract32(inst, 2, 5) \ + | (extract32(inst, 12, 1) << 5)) +#define GET_C_ADDI4SPN_IMM(inst) ((extract32(inst, 6, 1) << 2) \ + | (extract32(inst, 5, 1) << 3) \ + | (extract32(inst, 11, 2) << 4) \ + | (extract32(inst, 7, 4) << 6)) +#define GET_C_ADDI16SP_IMM(inst) ((extract32(inst, 6, 1) << 4) \ + | (extract32(inst, 2, 1) << 5) \ + | (extract32(inst, 5, 1) << 6) \ + | (extract32(inst, 3, 2) << 7) \ + | (sextract64(inst, 12, 1) << 9)) +#define GET_C_LWSP_IMM(inst) ((extract32(inst, 4, 3) << 2) \ + | (extract32(inst, 12, 1) << 5) \ + | (extract32(inst, 2, 2) << 6)) +#define GET_C_LDSP_IMM(inst) ((extract32(inst, 5, 2) << 3) \ + | (extract32(inst, 12, 1) << 5) \ + | (extract32(inst, 2, 3) << 6)) +#define GET_C_SWSP_IMM(inst) ((extract32(inst, 9, 4) << 2) \ + | (extract32(inst, 7, 2) << 6)) +#define GET_C_SDSP_IMM(inst) ((extract32(inst, 10, 3) << 3) \ + | (extract32(inst, 7, 3) << 6)) +#define GET_C_LW_IMM(inst) ((extract32(inst, 6, 1) << 2) \ + | (extract32(inst, 10, 3) << 3) \ + | (extract32(inst, 5, 1) << 6)) +#define GET_C_LD_IMM(inst) ((extract16(inst, 10, 3) << 3) \ + | (extract16(inst, 5, 2) << 6)) +#define GET_C_J_IMM(inst) ((extract32(inst, 3, 3) << 1) \ + | (extract32(inst, 11, 1) << 4) \ + | (extract32(inst, 2, 1) << 5) \ + | (extract32(inst, 7, 1) << 6) \ + | (extract32(inst, 6, 1) << 7) \ + | (extract32(inst, 9, 2) << 8) \ + | (extract32(inst, 8, 1) << 10) \ + | (sextract64(inst, 12, 1) << 11)) +#define GET_C_B_IMM(inst) ((extract32(inst, 3, 2) << 1) \ + | (extract32(inst, 10, 2) << 3) \ + | (extract32(inst, 2, 1) << 5) \ + | (extract32(inst, 5, 2) << 6) \ + | (sextract64(inst, 12, 1) << 8)) +#define GET_C_SIMM3(inst) extract32(inst, 10, 3) +#define GET_C_RD(inst) GET_RD(inst) +#define GET_C_RS1(inst) GET_RD(inst) +#define GET_C_RS2(inst) extract32(inst, 2, 5) +#define GET_C_RS1S(inst) (8 + extract16(inst, 7, 3)) +#define GET_C_RS2S(inst) (8 + extract16(inst, 2, 3)) + +#endif diff --git a/qemu/target/riscv/op_helper.c b/qemu/target/riscv/op_helper.c new file mode 100644 index 00000000..05dd67ae --- /dev/null +++ b/qemu/target/riscv/op_helper.c @@ -0,0 +1,208 @@ +/* + * RISC-V Emulation Helpers for QEMU. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + +/* Exceptions processing helpers */ +void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, + uint32_t exception, uintptr_t pc) +{ + CPUState *cs = env_cpu(env); + qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception); + cs->exception_index = exception; + cpu_loop_exit_restore(cs, pc); +} + +void helper_raise_exception(CPURISCVState *env, uint32_t exception) +{ + riscv_raise_exception(env, exception, 0); +} + +target_ulong helper_csrrw(CPURISCVState *env, target_ulong src, + target_ulong csr) +{ + target_ulong val = 0; + if (riscv_csrrw(env, csr, &val, src, -1) < 0) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + return val; +} + +target_ulong helper_csrrs(CPURISCVState *env, target_ulong src, + target_ulong csr, target_ulong rs1_pass) +{ + target_ulong val = 0; + if (riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0) < 0) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + return val; +} + +target_ulong helper_csrrc(CPURISCVState *env, target_ulong src, + target_ulong csr, target_ulong rs1_pass) +{ + target_ulong val = 0; + if (riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0) < 0) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + return val; +} + +target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb) +{ + target_ulong prev_priv, prev_virt, mstatus; + + if (!(env->priv >= PRV_S)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + + target_ulong retpc = env->sepc; + if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { + riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); + } + + if (env->priv_ver >= PRIV_VERSION_1_10_0 && + get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + + mstatus = env->mstatus; + + if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) { + /* We support Hypervisor extensions and virtulisation is disabled */ + target_ulong hstatus = env->hstatus; + + prev_priv = get_field(mstatus, MSTATUS_SPP); + prev_virt = get_field(hstatus, HSTATUS_SPV); + + hstatus = set_field(hstatus, HSTATUS_SPV, + get_field(hstatus, HSTATUS_SP2V)); + mstatus = set_field(mstatus, MSTATUS_SPP, + get_field(hstatus, HSTATUS_SP2P)); + hstatus = set_field(hstatus, HSTATUS_SP2V, 0); + hstatus = set_field(hstatus, HSTATUS_SP2P, 0); + mstatus = set_field(mstatus, SSTATUS_SIE, + get_field(mstatus, SSTATUS_SPIE)); + mstatus = set_field(mstatus, SSTATUS_SPIE, 1); + + env->mstatus = mstatus; + env->hstatus = hstatus; + + if (prev_virt) { + riscv_cpu_swap_hypervisor_regs(env); + } + + riscv_cpu_set_virt_enabled(env, prev_virt); + } else { + prev_priv = get_field(mstatus, MSTATUS_SPP); + + mstatus = set_field(mstatus, + env->priv_ver >= PRIV_VERSION_1_10_0 ? + MSTATUS_SIE : MSTATUS_UIE << prev_priv, + get_field(mstatus, MSTATUS_SPIE)); + mstatus = set_field(mstatus, MSTATUS_SPIE, 1); + mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); + env->mstatus = mstatus; + } + + riscv_cpu_set_mode(env, prev_priv); + + return retpc; +} + +target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) +{ + if (!(env->priv >= PRV_M)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } + + target_ulong retpc = env->mepc; + if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { + riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); + } + + target_ulong mstatus = env->mstatus; + target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); + target_ulong prev_virt = MSTATUS_MPV_ISSET(env); + mstatus = set_field(mstatus, + env->priv_ver >= PRIV_VERSION_1_10_0 ? + MSTATUS_MIE : MSTATUS_UIE << prev_priv, + get_field(mstatus, MSTATUS_MPIE)); + mstatus = set_field(mstatus, MSTATUS_MPIE, 1); + mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U); +#ifdef TARGET_RISCV32 + env->mstatush = set_field(env->mstatush, MSTATUS_MPV, 0); +#else + mstatus = set_field(mstatus, MSTATUS_MPV, 0); +#endif + env->mstatus = mstatus; + riscv_cpu_set_mode(env, prev_priv); + + if (riscv_has_ext(env, RVH)) { + if (prev_virt) { + riscv_cpu_swap_hypervisor_regs(env); + } + + riscv_cpu_set_virt_enabled(env, prev_virt); + } + + return retpc; +} + +void helper_wfi(CPURISCVState *env) +{ + CPUState *cs = env_cpu(env); + + if ((env->priv == PRV_S && + env->priv_ver >= PRIV_VERSION_1_10_0 && + get_field(env->mstatus, MSTATUS_TW)) || + riscv_cpu_virt_enabled(env)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } else { + cs->halted = 1; + cs->exception_index = EXCP_HLT; + cpu_loop_exit(cs); + } +} + +void helper_tlb_flush(CPURISCVState *env) +{ + CPUState *cs = env_cpu(env); + if (!(env->priv >= PRV_S) || + (env->priv == PRV_S && + env->priv_ver >= PRIV_VERSION_1_10_0 && + get_field(env->mstatus, MSTATUS_TVM))) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + } else { + tlb_flush(cs); + } +} + +void helper_uc_riscv_exit(CPURISCVState *env) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = EXCP_HLT; + cs->halted = 1; + cpu_loop_exit(cs); +} \ No newline at end of file diff --git a/qemu/target/riscv/pmp.c b/qemu/target/riscv/pmp.c new file mode 100644 index 00000000..888b99c8 --- /dev/null +++ b/qemu/target/riscv/pmp.c @@ -0,0 +1,379 @@ +/* + * QEMU RISC-V PMP (Physical Memory Protection) + * + * Author: Daire McNamara, daire.mcnamara@emdalo.com + * Ivan Griffin, ivan.griffin@emdalo.com + * + * This provides a RISC-V Physical Memory Protection implementation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +/* + * PMP (Physical Memory Protection) is as-of-yet unused and needs testing. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" + +static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, + uint8_t val); +static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); +static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); + +/* + * Accessor method to extract address matching type 'a field' from cfg reg + */ +static inline uint8_t pmp_get_a_field(uint8_t cfg) +{ + uint8_t a = cfg >> 3; + return a & 0x3; +} + +/* + * Check whether a PMP is locked or not. + */ +static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) +{ + + if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { + return 1; + } + + /* Top PMP has no 'next' to check */ + if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { + return 0; + } + + /* In TOR mode, need to check the lock bit of the next pmp + * (if there is a next) + */ + const uint8_t a_field = + pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg); + if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) && + (PMP_AMATCH_TOR == a_field)) { + return 1; + } + + return 0; +} + +/* + * Count the number of active rules. + */ +static inline uint32_t pmp_get_num_rules(CPURISCVState *env) +{ + return env->pmp_state.num_rules; +} + +/* + * Accessor to get the cfg reg for a specific PMP/HART + */ +static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) +{ + if (pmp_index < MAX_RISCV_PMPS) { + return env->pmp_state.pmp[pmp_index].cfg_reg; + } + + return 0; +} + + +/* + * Accessor to set the cfg reg for a specific PMP/HART + * Bounds checks and relevant lock bit. + */ +static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) +{ + if (pmp_index < MAX_RISCV_PMPS) { + if (!pmp_is_locked(env, pmp_index)) { + env->pmp_state.pmp[pmp_index].cfg_reg = val; + pmp_update_rule(env, pmp_index); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - out of bounds\n"); + } +} + +static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) +{ + /* + aaaa...aaa0 8-byte NAPOT range + aaaa...aa01 16-byte NAPOT range + aaaa...a011 32-byte NAPOT range + ... + aa01...1111 2^XLEN-byte NAPOT range + a011...1111 2^(XLEN+1)-byte NAPOT range + 0111...1111 2^(XLEN+2)-byte NAPOT range + 1111...1111 Reserved + */ + if (a == -1) { + *sa = 0u; + *ea = -1; + return; + } else { + target_ulong t1 = ctz64(~a); + target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2; + target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1; + *sa = base; + *ea = base + range; + } +} + + +/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' + * end address values. + * This function is called relatively infrequently whereas the check that + * an address is within a pmp rule is called often, so optimise that one + */ +static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) +{ + int i; + + env->pmp_state.num_rules = 0; + + uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; + target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; + target_ulong prev_addr = 0u; + target_ulong sa = 0u; + target_ulong ea = 0u; + + if (pmp_index >= 1u) { + prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; + } + + switch (pmp_get_a_field(this_cfg)) { + case PMP_AMATCH_OFF: + sa = 0u; + ea = -1; + break; + + case PMP_AMATCH_TOR: + sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ + ea = (this_addr << 2) - 1u; + break; + + case PMP_AMATCH_NA4: + sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ + ea = (this_addr + 4u) - 1u; + break; + + case PMP_AMATCH_NAPOT: + pmp_decode_napot(this_addr, &sa, &ea); + break; + + default: + sa = 0u; + ea = 0u; + break; + } + + env->pmp_state.addr[pmp_index].sa = sa; + env->pmp_state.addr[pmp_index].ea = ea; + + for (i = 0; i < MAX_RISCV_PMPS; i++) { + const uint8_t a_field = + pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); + if (PMP_AMATCH_OFF != a_field) { + env->pmp_state.num_rules++; + } + } +} + +static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) +{ + int result = 0; + + if ((addr >= env->pmp_state.addr[pmp_index].sa) + && (addr <= env->pmp_state.addr[pmp_index].ea)) { + result = 1; + } else { + result = 0; + } + + return result; +} + + +/* + * Public Interface + */ + +/* + * Check if the address has required RWX privs to complete desired operation + */ +bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, + target_ulong size, pmp_priv_t privs, target_ulong mode) +{ + int i = 0; + int ret = -1; + int pmp_size = 0; + target_ulong s = 0; + target_ulong e = 0; + pmp_priv_t allowed_privs = 0; + + /* Short cut if no rules */ + if (0 == pmp_get_num_rules(env)) { + return true; + } + + /* + * if size is unknown (0), assume that all bytes + * from addr to the end of the page will be accessed. + */ + if (size == 0) { +#ifdef _MSC_VER + pmp_size = 0 - (addr | TARGET_PAGE_MASK); +#else + pmp_size = -(addr | TARGET_PAGE_MASK); +#endif + } else { + pmp_size = size; + } + + /* 1.10 draft priv spec states there is an implicit order + from low to high */ + for (i = 0; i < MAX_RISCV_PMPS; i++) { + s = pmp_is_in_range(env, i, addr); + e = pmp_is_in_range(env, i, addr + pmp_size - 1); + + /* partially inside */ + if ((s + e) == 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "pmp violation - access is partially inside\n"); + ret = 0; + break; + } + + /* fully inside */ + const uint8_t a_field = + pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); + + /* + * If the PMP entry is not off and the address is in range, do the priv + * check + */ + if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { + allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; + if ((mode != PRV_M) || pmp_is_locked(env, i)) { + allowed_privs &= env->pmp_state.pmp[i].cfg_reg; + } + + if ((privs & allowed_privs) == privs) { + ret = 1; + break; + } else { + ret = 0; + break; + } + } + } + + /* No rule matched */ + if (ret == -1) { + if (mode == PRV_M) { + ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an + * M-Mode access, the access succeeds */ + } else { + ret = 0; /* Other modes are not allowed to succeed if they don't + * match a rule, but there are rules. We've checked for + * no rule earlier in this function. */ + } + } + + return ret == 1 ? true : false; +} + + +/* + * Handle a write to a pmpcfg CSP + */ +void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, + target_ulong val) +{ + int i; + uint8_t cfg_val; + + if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - incorrect address\n"); + return; + } + + for (i = 0; i < sizeof(target_ulong); i++) { + cfg_val = (val >> 8 * i) & 0xff; + pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i, + cfg_val); + } +} + + +/* + * Handle a read from a pmpcfg CSP + */ +target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) +{ + int i; + target_ulong cfg_val = 0; + target_ulong val = 0; + + for (i = 0; i < sizeof(target_ulong); i++) { + val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i); + cfg_val |= (val << (i * 8)); + } + + return cfg_val; +} + + +/* + * Handle a write to a pmpaddr CSP + */ +void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, + target_ulong val) +{ + if (addr_index < MAX_RISCV_PMPS) { + if (!pmp_is_locked(env, addr_index)) { + env->pmp_state.pmp[addr_index].addr_reg = val; + pmp_update_rule(env, addr_index); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpaddr write - locked\n"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpaddr write - out of bounds\n"); + } +} + + +/* + * Handle a read from a pmpaddr CSP + */ +target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) +{ + target_ulong val = 0; + + if (addr_index < MAX_RISCV_PMPS) { + val = env->pmp_state.pmp[addr_index].addr_reg; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpaddr read - out of bounds\n"); + } + + return val; +} diff --git a/qemu/target/riscv/pmp.h b/qemu/target/riscv/pmp.h new file mode 100644 index 00000000..8e197931 --- /dev/null +++ b/qemu/target/riscv/pmp.h @@ -0,0 +1,64 @@ +/* + * QEMU RISC-V PMP (Physical Memory Protection) + * + * Author: Daire McNamara, daire.mcnamara@emdalo.com + * Ivan Griffin, ivan.griffin@emdalo.com + * + * This provides a RISC-V Physical Memory Protection interface + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef RISCV_PMP_H +#define RISCV_PMP_H + +typedef enum { + PMP_READ = 1 << 0, + PMP_WRITE = 1 << 1, + PMP_EXEC = 1 << 2, + PMP_LOCK = 1 << 7 +} pmp_priv_t; + +typedef enum { + PMP_AMATCH_OFF, /* Null (off) */ + PMP_AMATCH_TOR, /* Top of Range */ + PMP_AMATCH_NA4, /* Naturally aligned four-byte region */ + PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */ +} pmp_am_t; + +typedef struct { + target_ulong addr_reg; + uint8_t cfg_reg; +} pmp_entry_t; + +typedef struct { + target_ulong sa; + target_ulong ea; +} pmp_addr_t; + +typedef struct { + pmp_entry_t pmp[MAX_RISCV_PMPS]; + pmp_addr_t addr[MAX_RISCV_PMPS]; + uint32_t num_rules; +} pmp_table_t; + +void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, + target_ulong val); +target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index); +void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, + target_ulong val); +target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); +bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, + target_ulong size, pmp_priv_t priv, target_ulong mode); + +#endif diff --git a/qemu/target/riscv/riscv32/decode_insn16.inc.c b/qemu/target/riscv/riscv32/decode_insn16.inc.c new file mode 100644 index 00000000..66ebf612 --- /dev/null +++ b/qemu/target/riscv/riscv32/decode_insn16.inc.c @@ -0,0 +1,477 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wredundant-decls" +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtypedef-redefinition" +# endif +#endif + +typedef arg_empty arg_illegal; +static bool trans_illegal(DisasContext *ctx, arg_illegal *a); +typedef arg_i arg_addi; +static bool trans_addi(DisasContext *ctx, arg_addi *a); +typedef arg_i arg_fld; +static bool trans_fld(DisasContext *ctx, arg_fld *a); +typedef arg_i arg_lw; +static bool trans_lw(DisasContext *ctx, arg_lw *a); +typedef arg_s arg_fsd; +static bool trans_fsd(DisasContext *ctx, arg_fsd *a); +typedef arg_s arg_sw; +static bool trans_sw(DisasContext *ctx, arg_sw *a); +typedef arg_u arg_lui; +static bool trans_lui(DisasContext *ctx, arg_lui *a); +typedef arg_shift arg_srli; +static bool trans_srli(DisasContext *ctx, arg_srli *a); +typedef arg_shift arg_srai; +static bool trans_srai(DisasContext *ctx, arg_srai *a); +typedef arg_i arg_andi; +static bool trans_andi(DisasContext *ctx, arg_andi *a); +typedef arg_r arg_sub; +static bool trans_sub(DisasContext *ctx, arg_sub *a); +typedef arg_r arg_xor; +static bool trans_xor(DisasContext *ctx, arg_xor *a); +typedef arg_r arg_or; +static bool trans_or(DisasContext *ctx, arg_or *a); +typedef arg_r arg_and; +static bool trans_and(DisasContext *ctx, arg_and *a); +typedef arg_j arg_jal; +static bool trans_jal(DisasContext *ctx, arg_jal *a); +typedef arg_b arg_beq; +static bool trans_beq(DisasContext *ctx, arg_beq *a); +typedef arg_b arg_bne; +static bool trans_bne(DisasContext *ctx, arg_bne *a); +typedef arg_shift arg_slli; +static bool trans_slli(DisasContext *ctx, arg_slli *a); +typedef arg_i arg_jalr; +static bool trans_jalr(DisasContext *ctx, arg_jalr *a); +typedef arg_empty arg_ebreak; +static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); +typedef arg_r arg_add; +static bool trans_add(DisasContext *ctx, arg_add *a); +typedef arg_i arg_flw; +static bool trans_flw(DisasContext *ctx, arg_flw *a); +typedef arg_s arg_fsw; +static bool trans_fsw(DisasContext *ctx, arg_fsw *a); + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic pop +#endif + +static void decode_insn16_extract_c_addi16sp(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_4(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 2, 1)), 2, 30, extract32(insn, 5, 1)), 3, 29, extract32(insn, 3, 2)), 5, 27, sextract32(insn, 12, 1))); + a->rs1 = 2; + a->rd = 2; +} + +static void decode_insn16_extract_c_addi4spn(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 5, 1)), 2, 30, extract32(insn, 11, 2)), 4, 28, extract32(insn, 7, 4))); + a->rs1 = 2; + a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_c_andi(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); +} + +static void decode_insn16_extract_c_jalr(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = 0; + a->rs1 = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_ldsp(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 12, 1)), 3, 29, extract32(insn, 2, 3))); + a->rs1 = 2; + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_li(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); + a->rs1 = 0; + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_lui(DisasContext *ctx, arg_u *a, uint16_t insn) +{ + a->imm = ex_shift_12(ctx, deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1))); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_lwsp(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 4, 3), 3, 29, extract32(insn, 12, 1)), 4, 28, extract32(insn, 2, 2))); + a->rs1 = 2; + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_mv(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = 0; + a->rs1 = extract32(insn, 2, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_sdsp(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 7, 3))); + a->rs1 = 2; + a->rs2 = extract32(insn, 2, 5); +} + +static void decode_insn16_extract_c_shift(DisasContext *ctx, arg_shift *a, uint16_t insn) +{ + a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); +} + +static void decode_insn16_extract_c_shift2(DisasContext *ctx, arg_shift *a, uint16_t insn) +{ + a->rd = extract32(insn, 7, 5); + a->rs1 = extract32(insn, 7, 5); + a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); +} + +static void decode_insn16_extract_c_swsp(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(extract32(insn, 9, 4), 4, 28, extract32(insn, 7, 2))); + a->rs1 = 2; + a->rs2 = extract32(insn, 2, 5); +} + +static void decode_insn16_extract_cb_z(DisasContext *ctx, arg_b *a, uint16_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 2), 2, 30, extract32(insn, 10, 2)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 5, 2)), 7, 25, sextract32(insn, 12, 1))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs2 = 0; +} + +static void decode_insn16_extract_ci(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); + a->rs1 = extract32(insn, 7, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_cj(DisasContext *ctx, arg_j *a, uint16_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 3), 3, 29, extract32(insn, 11, 1)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 7, 1)), 6, 26, extract32(insn, 6, 1)), 7, 25, extract32(insn, 9, 2)), 9, 23, extract32(insn, 8, 1)), 10, 22, sextract32(insn, 12, 1))); +} + +static void decode_insn16_extract_cl_d(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_cl_w(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_cr(DisasContext *ctx, arg_r *a, uint16_t insn) +{ + a->rs2 = extract32(insn, 2, 5); + a->rs1 = extract32(insn, 7, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_cs_2(DisasContext *ctx, arg_r *a, uint16_t insn) +{ + a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); +} + +static void decode_insn16_extract_cs_d(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_cs_w(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_decode_insn16_Fmt_22(DisasContext *ctx, arg_empty *a, uint16_t insn) +{ +} + +static bool decode_insn16(DisasContext *ctx, uint16_t insn) +{ + union { + arg_b f_b; + arg_empty f_empty; + arg_i f_i; + arg_j f_j; + arg_r f_r; + arg_s f_s; + arg_shift f_shift; + arg_u f_u; + } u; + + switch (insn & 0x0000e003) { + case 0x00000000: + /* 000..... ......00 */ + if ((insn & 0x00001fe0) == 0x00000000) { + /* 00000000 000...00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:87 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + ctx->invalid = true; + if (trans_illegal(ctx, &u.f_empty)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:88 */ + decode_insn16_extract_c_addi4spn(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x00000001: + /* 000..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:96 */ + decode_insn16_extract_ci(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x00000002: + /* 000..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:115 */ + decode_insn16_extract_c_shift2(ctx, &u.f_shift, insn); + if (trans_slli(ctx, &u.f_shift)) return true; + return false; + case 0x00002000: + /* 001..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:90 */ + decode_insn16_extract_cl_d(ctx, &u.f_i, insn); + if (trans_fld(ctx, &u.f_i)) return true; + return false; + case 0x00002001: + /* 001..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:24 */ + decode_insn16_extract_cj(ctx, &u.f_j, insn); + u.f_j.rd = 1; + if (trans_jal(ctx, &u.f_j)) return true; + return false; + case 0x00002002: + /* 001..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:116 */ + decode_insn16_extract_c_ldsp(ctx, &u.f_i, insn); + if (trans_fld(ctx, &u.f_i)) return true; + return false; + case 0x00004000: + /* 010..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:91 */ + decode_insn16_extract_cl_w(ctx, &u.f_i, insn); + if (trans_lw(ctx, &u.f_i)) return true; + return false; + case 0x00004001: + /* 010..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:97 */ + decode_insn16_extract_c_li(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x00004002: + /* 010..... ......10 */ + if ((insn & 0x00000f80) == 0x00000000) { + /* 010.0000 0.....10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:118 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:119 */ + decode_insn16_extract_c_lwsp(ctx, &u.f_i, insn); + if (trans_lw(ctx, &u.f_i)) return true; + return false; + case 0x00006000: + /* 011..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:20 */ + decode_insn16_extract_cl_w(ctx, &u.f_i, insn); + if (trans_flw(ctx, &u.f_i)) return true; + return false; + case 0x00006001: + /* 011..... ......01 */ + if ((insn & 0x0000107c) == 0x00000000) { + /* 0110.... .0000001 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:99 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + if ((insn & 0x00000f80) == 0x00000100) { + /* 011.0001 0.....01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:100 */ + decode_insn16_extract_c_addi16sp(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:101 */ + decode_insn16_extract_c_lui(ctx, &u.f_u, insn); + if (trans_lui(ctx, &u.f_u)) return true; + return false; + case 0x00006002: + /* 011..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:27 */ + decode_insn16_extract_c_lwsp(ctx, &u.f_i, insn); + if (trans_flw(ctx, &u.f_i)) return true; + return false; + case 0x00008001: + /* 100..... ......01 */ + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 100.00.. ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:103 */ + decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); + if (trans_srli(ctx, &u.f_shift)) return true; + return false; + case 0x1: + /* 100.01.. ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:104 */ + decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); + if (trans_srai(ctx, &u.f_shift)) return true; + return false; + case 0x2: + /* 100.10.. ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:105 */ + decode_insn16_extract_c_andi(ctx, &u.f_i, insn); + if (trans_andi(ctx, &u.f_i)) return true; + return false; + case 0x3: + /* 100.11.. ......01 */ + decode_insn16_extract_cs_2(ctx, &u.f_r, insn); + switch (insn & 0x00001060) { + case 0x00000000: + /* 100011.. .00...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:106 */ + if (trans_sub(ctx, &u.f_r)) return true; + return false; + case 0x00000020: + /* 100011.. .01...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:107 */ + if (trans_xor(ctx, &u.f_r)) return true; + return false; + case 0x00000040: + /* 100011.. .10...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:108 */ + if (trans_or(ctx, &u.f_r)) return true; + return false; + case 0x00000060: + /* 100011.. .11...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:109 */ + if (trans_and(ctx, &u.f_r)) return true; + return false; + } + return false; + } + return false; + case 0x00008002: + /* 100..... ......10 */ + switch ((insn >> 12) & 0x1) { + case 0x0: + /* 1000.... ......10 */ + if ((insn & 0x00000ffc) == 0x00000000) { + /* 10000000 00000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:122 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + if ((insn & 0x0000007c) == 0x00000000) { + /* 1000.... .0000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:123 */ + decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); + u.f_i.rd = 0; + if (trans_jalr(ctx, &u.f_i)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:124 */ + decode_insn16_extract_c_mv(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* 1001.... ......10 */ + if ((insn & 0x00000ffc) == 0x00000000) { + /* 10010000 00000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:127 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_ebreak(ctx, &u.f_empty)) return true; + } + if ((insn & 0x0000007c) == 0x00000000) { + /* 1001.... .0000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:128 */ + decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); + u.f_i.rd = 1; + if (trans_jalr(ctx, &u.f_i)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:129 */ + decode_insn16_extract_cr(ctx, &u.f_r, insn); + if (trans_add(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x0000a000: + /* 101..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:92 */ + decode_insn16_extract_cs_d(ctx, &u.f_s, insn); + if (trans_fsd(ctx, &u.f_s)) return true; + return false; + case 0x0000a001: + /* 101..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:110 */ + decode_insn16_extract_cj(ctx, &u.f_j, insn); + u.f_j.rd = 0; + if (trans_jal(ctx, &u.f_j)) return true; + return false; + case 0x0000a002: + /* 101..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:131 */ + decode_insn16_extract_c_sdsp(ctx, &u.f_s, insn); + if (trans_fsd(ctx, &u.f_s)) return true; + return false; + case 0x0000c000: + /* 110..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:93 */ + decode_insn16_extract_cs_w(ctx, &u.f_s, insn); + if (trans_sw(ctx, &u.f_s)) return true; + return false; + case 0x0000c001: + /* 110..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:111 */ + decode_insn16_extract_cb_z(ctx, &u.f_b, insn); + if (trans_beq(ctx, &u.f_b)) return true; + return false; + case 0x0000c002: + /* 110..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:132 */ + decode_insn16_extract_c_swsp(ctx, &u.f_s, insn); + if (trans_sw(ctx, &u.f_s)) return true; + return false; + case 0x0000e000: + /* 111..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:21 */ + decode_insn16_extract_cs_w(ctx, &u.f_s, insn); + if (trans_fsw(ctx, &u.f_s)) return true; + return false; + case 0x0000e001: + /* 111..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:112 */ + decode_insn16_extract_cb_z(ctx, &u.f_b, insn); + if (trans_bne(ctx, &u.f_b)) return true; + return false; + case 0x0000e002: + /* 111..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-32.decode:28 */ + decode_insn16_extract_c_swsp(ctx, &u.f_s, insn); + if (trans_fsw(ctx, &u.f_s)) return true; + return false; + } + return false; +} diff --git a/qemu/target/riscv/riscv32/decode_insn32.inc.c b/qemu/target/riscv/riscv32/decode_insn32.inc.c new file mode 100644 index 00000000..c4c25de1 --- /dev/null +++ b/qemu/target/riscv/riscv32/decode_insn32.inc.c @@ -0,0 +1,1430 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int aq; + int rd; + int rl; + int rs1; + int rs2; +} arg_atomic; + +typedef struct { + int imm; + int rs1; + int rs2; +} arg_b; + +typedef struct { + int rd; + int rm; + int rs1; + int rs2; + int rs3; +} arg_decode_insn3210; + +typedef struct { + int rd; + int rm; + int rs1; + int rs2; +} arg_decode_insn3211; + +typedef struct { + int rd; + int rm; + int rs1; +} arg_decode_insn3212; + +typedef struct { + int rd; + int rs1; +} arg_decode_insn3213; + +typedef struct { + int rs1; + int rs2; +} arg_decode_insn3214; + +typedef struct { + int rs1; +} arg_decode_insn3215; + +typedef struct { + int pred; + int succ; +} arg_decode_insn3216; + +typedef struct { + int csr; + int rd; + int rs1; +} arg_decode_insn329; + +typedef struct { +#ifdef _MSC_VER + int dummy; // MSVC does not allow empty struct +#endif +} arg_empty; + +typedef struct { + int imm; + int rd; + int rs1; +} arg_i; + +typedef struct { + int imm; + int rd; +} arg_j; + +typedef struct { + int rd; + int rs1; + int rs2; +} arg_r; + +typedef struct { + int imm; + int rs1; + int rs2; +} arg_s; + +typedef struct { + int rd; + int rs1; + int shamt; +} arg_shift; + +typedef struct { + int imm; + int rd; +} arg_u; + +typedef arg_empty arg_ecall; +static bool trans_ecall(DisasContext *ctx, arg_ecall *a); +typedef arg_empty arg_ebreak; +static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); +typedef arg_empty arg_uret; +static bool trans_uret(DisasContext *ctx, arg_uret *a); +typedef arg_empty arg_sret; +static bool trans_sret(DisasContext *ctx, arg_sret *a); +typedef arg_empty arg_mret; +static bool trans_mret(DisasContext *ctx, arg_mret *a); +typedef arg_empty arg_wfi; +static bool trans_wfi(DisasContext *ctx, arg_wfi *a); +typedef arg_decode_insn3214 arg_hfence_gvma; +static bool trans_hfence_gvma(DisasContext *ctx, arg_hfence_gvma *a); +typedef arg_decode_insn3214 arg_hfence_bvma; +static bool trans_hfence_bvma(DisasContext *ctx, arg_hfence_bvma *a); +typedef arg_decode_insn3214 arg_sfence_vma; +static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a); +typedef arg_decode_insn3215 arg_sfence_vm; +static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a); +typedef arg_u arg_lui; +static bool trans_lui(DisasContext *ctx, arg_lui *a); +typedef arg_u arg_auipc; +static bool trans_auipc(DisasContext *ctx, arg_auipc *a); +typedef arg_j arg_jal; +static bool trans_jal(DisasContext *ctx, arg_jal *a); +typedef arg_i arg_jalr; +static bool trans_jalr(DisasContext *ctx, arg_jalr *a); +typedef arg_b arg_beq; +static bool trans_beq(DisasContext *ctx, arg_beq *a); +typedef arg_b arg_bne; +static bool trans_bne(DisasContext *ctx, arg_bne *a); +typedef arg_b arg_blt; +static bool trans_blt(DisasContext *ctx, arg_blt *a); +typedef arg_b arg_bge; +static bool trans_bge(DisasContext *ctx, arg_bge *a); +typedef arg_b arg_bltu; +static bool trans_bltu(DisasContext *ctx, arg_bltu *a); +typedef arg_b arg_bgeu; +static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a); +typedef arg_i arg_lb; +static bool trans_lb(DisasContext *ctx, arg_lb *a); +typedef arg_i arg_lh; +static bool trans_lh(DisasContext *ctx, arg_lh *a); +typedef arg_i arg_lw; +static bool trans_lw(DisasContext *ctx, arg_lw *a); +typedef arg_i arg_lbu; +static bool trans_lbu(DisasContext *ctx, arg_lbu *a); +typedef arg_i arg_lhu; +static bool trans_lhu(DisasContext *ctx, arg_lhu *a); +typedef arg_s arg_sb; +static bool trans_sb(DisasContext *ctx, arg_sb *a); +typedef arg_s arg_sh; +static bool trans_sh(DisasContext *ctx, arg_sh *a); +typedef arg_s arg_sw; +static bool trans_sw(DisasContext *ctx, arg_sw *a); +typedef arg_i arg_addi; +static bool trans_addi(DisasContext *ctx, arg_addi *a); +typedef arg_i arg_slti; +static bool trans_slti(DisasContext *ctx, arg_slti *a); +typedef arg_i arg_sltiu; +static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a); +typedef arg_i arg_xori; +static bool trans_xori(DisasContext *ctx, arg_xori *a); +typedef arg_i arg_ori; +static bool trans_ori(DisasContext *ctx, arg_ori *a); +typedef arg_i arg_andi; +static bool trans_andi(DisasContext *ctx, arg_andi *a); +typedef arg_shift arg_slli; +static bool trans_slli(DisasContext *ctx, arg_slli *a); +typedef arg_shift arg_srli; +static bool trans_srli(DisasContext *ctx, arg_srli *a); +typedef arg_shift arg_srai; +static bool trans_srai(DisasContext *ctx, arg_srai *a); +typedef arg_r arg_add; +static bool trans_add(DisasContext *ctx, arg_add *a); +typedef arg_r arg_sub; +static bool trans_sub(DisasContext *ctx, arg_sub *a); +typedef arg_r arg_sll; +static bool trans_sll(DisasContext *ctx, arg_sll *a); +typedef arg_r arg_slt; +static bool trans_slt(DisasContext *ctx, arg_slt *a); +typedef arg_r arg_sltu; +static bool trans_sltu(DisasContext *ctx, arg_sltu *a); +typedef arg_r arg_xor; +static bool trans_xor(DisasContext *ctx, arg_xor *a); +typedef arg_r arg_srl; +static bool trans_srl(DisasContext *ctx, arg_srl *a); +typedef arg_r arg_sra; +static bool trans_sra(DisasContext *ctx, arg_sra *a); +typedef arg_r arg_or; +static bool trans_or(DisasContext *ctx, arg_or *a); +typedef arg_r arg_and; +static bool trans_and(DisasContext *ctx, arg_and *a); +typedef arg_decode_insn3216 arg_fence; +static bool trans_fence(DisasContext *ctx, arg_fence *a); +typedef arg_empty arg_fence_i; +static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a); +typedef arg_decode_insn329 arg_csrrw; +static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a); +typedef arg_decode_insn329 arg_csrrs; +static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a); +typedef arg_decode_insn329 arg_csrrc; +static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a); +typedef arg_decode_insn329 arg_csrrwi; +static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a); +typedef arg_decode_insn329 arg_csrrsi; +static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a); +typedef arg_decode_insn329 arg_csrrci; +static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a); +typedef arg_r arg_mul; +static bool trans_mul(DisasContext *ctx, arg_mul *a); +typedef arg_r arg_mulh; +static bool trans_mulh(DisasContext *ctx, arg_mulh *a); +typedef arg_r arg_mulhsu; +static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a); +typedef arg_r arg_mulhu; +static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a); +typedef arg_r arg_div; +static bool trans_div(DisasContext *ctx, arg_div *a); +typedef arg_r arg_divu; +static bool trans_divu(DisasContext *ctx, arg_divu *a); +typedef arg_r arg_rem; +static bool trans_rem(DisasContext *ctx, arg_rem *a); +typedef arg_r arg_remu; +static bool trans_remu(DisasContext *ctx, arg_remu *a); +typedef arg_atomic arg_lr_w; +static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a); +typedef arg_atomic arg_sc_w; +static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a); +typedef arg_atomic arg_amoswap_w; +static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a); +typedef arg_atomic arg_amoadd_w; +static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a); +typedef arg_atomic arg_amoxor_w; +static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a); +typedef arg_atomic arg_amoand_w; +static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a); +typedef arg_atomic arg_amoor_w; +static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a); +typedef arg_atomic arg_amomin_w; +static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a); +typedef arg_atomic arg_amomax_w; +static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a); +typedef arg_atomic arg_amominu_w; +static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a); +typedef arg_atomic arg_amomaxu_w; +static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a); +typedef arg_i arg_flw; +static bool trans_flw(DisasContext *ctx, arg_flw *a); +typedef arg_s arg_fsw; +static bool trans_fsw(DisasContext *ctx, arg_fsw *a); +typedef arg_decode_insn3210 arg_fmadd_s; +static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a); +typedef arg_decode_insn3210 arg_fmsub_s; +static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a); +typedef arg_decode_insn3210 arg_fnmsub_s; +static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a); +typedef arg_decode_insn3210 arg_fnmadd_s; +static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a); +typedef arg_decode_insn3211 arg_fadd_s; +static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a); +typedef arg_decode_insn3211 arg_fsub_s; +static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a); +typedef arg_decode_insn3211 arg_fmul_s; +static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a); +typedef arg_decode_insn3211 arg_fdiv_s; +static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a); +typedef arg_decode_insn3212 arg_fsqrt_s; +static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a); +typedef arg_r arg_fsgnj_s; +static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a); +typedef arg_r arg_fsgnjn_s; +static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a); +typedef arg_r arg_fsgnjx_s; +static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a); +typedef arg_r arg_fmin_s; +static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a); +typedef arg_r arg_fmax_s; +static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a); +typedef arg_decode_insn3212 arg_fcvt_w_s; +static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a); +typedef arg_decode_insn3212 arg_fcvt_wu_s; +static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a); +typedef arg_decode_insn3213 arg_fmv_x_w; +static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a); +typedef arg_r arg_feq_s; +static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a); +typedef arg_r arg_flt_s; +static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a); +typedef arg_r arg_fle_s; +static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a); +typedef arg_decode_insn3213 arg_fclass_s; +static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a); +typedef arg_decode_insn3212 arg_fcvt_s_w; +static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a); +typedef arg_decode_insn3212 arg_fcvt_s_wu; +static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a); +typedef arg_decode_insn3213 arg_fmv_w_x; +static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a); +typedef arg_i arg_fld; +static bool trans_fld(DisasContext *ctx, arg_fld *a); +typedef arg_s arg_fsd; +static bool trans_fsd(DisasContext *ctx, arg_fsd *a); +typedef arg_decode_insn3210 arg_fmadd_d; +static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a); +typedef arg_decode_insn3210 arg_fmsub_d; +static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a); +typedef arg_decode_insn3210 arg_fnmsub_d; +static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a); +typedef arg_decode_insn3210 arg_fnmadd_d; +static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a); +typedef arg_decode_insn3211 arg_fadd_d; +static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a); +typedef arg_decode_insn3211 arg_fsub_d; +static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a); +typedef arg_decode_insn3211 arg_fmul_d; +static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a); +typedef arg_decode_insn3211 arg_fdiv_d; +static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a); +typedef arg_decode_insn3212 arg_fsqrt_d; +static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a); +typedef arg_r arg_fsgnj_d; +static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a); +typedef arg_r arg_fsgnjn_d; +static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a); +typedef arg_r arg_fsgnjx_d; +static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a); +typedef arg_r arg_fmin_d; +static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a); +typedef arg_r arg_fmax_d; +static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a); +typedef arg_decode_insn3212 arg_fcvt_s_d; +static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a); +typedef arg_decode_insn3212 arg_fcvt_d_s; +static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a); +typedef arg_r arg_feq_d; +static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a); +typedef arg_r arg_flt_d; +static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a); +typedef arg_r arg_fle_d; +static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a); +typedef arg_decode_insn3213 arg_fclass_d; +static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a); +typedef arg_decode_insn3212 arg_fcvt_w_d; +static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a); +typedef arg_decode_insn3212 arg_fcvt_wu_d; +static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a); +typedef arg_decode_insn3212 arg_fcvt_d_w; +static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a); +typedef arg_decode_insn3212 arg_fcvt_d_wu; +static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a); + +static void decode_insn32_extract_atom_ld(DisasContext *ctx, arg_atomic *a, uint32_t insn) +{ + a->aq = extract32(insn, 26, 1); + a->rl = extract32(insn, 25, 1); + a->rs2 = 0; + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_atom_st(DisasContext *ctx, arg_atomic *a, uint32_t insn) +{ + a->aq = extract32(insn, 26, 1); + a->rl = extract32(insn, 25, 1); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_b(DisasContext *ctx, arg_b *a, uint32_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 8, 4), 4, 28, extract32(insn, 25, 6)), 10, 22, extract32(insn, 7, 1)), 11, 21, sextract32(insn, 31, 1))); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_csr(DisasContext *ctx, arg_decode_insn329 *a, uint32_t insn) +{ + a->csr = extract32(insn, 20, 12); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_decode_insn32_Fmt_18(DisasContext *ctx, arg_empty *a, uint32_t insn) +{ +} + +static void decode_insn32_extract_decode_insn32_Fmt_19(DisasContext *ctx, arg_decode_insn3216 *a, uint32_t insn) +{ + a->pred = extract32(insn, 24, 4); + a->succ = extract32(insn, 20, 4); +} + +static void decode_insn32_extract_hfence_bvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_hfence_gvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_i(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = sextract32(insn, 20, 12); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_j(DisasContext *ctx, arg_j *a, uint32_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 21, 10), 10, 22, extract32(insn, 20, 1)), 11, 21, extract32(insn, 12, 8)), 19, 13, sextract32(insn, 31, 1))); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r(DisasContext *ctx, arg_r *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r2(DisasContext *ctx, arg_decode_insn3213 *a, uint32_t insn) +{ + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r2_rm(DisasContext *ctx, arg_decode_insn3212 *a, uint32_t insn) +{ + a->rs1 = extract32(insn, 15, 5); + a->rm = extract32(insn, 12, 3); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r4_rm(DisasContext *ctx, arg_decode_insn3210 *a, uint32_t insn) +{ + a->rs3 = extract32(insn, 27, 5); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rm = extract32(insn, 12, 3); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r_rm(DisasContext *ctx, arg_decode_insn3211 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rm = extract32(insn, 12, 3); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_s(DisasContext *ctx, arg_s *a, uint32_t insn) +{ + a->imm = deposit32(extract32(insn, 7, 5), 5, 27, sextract32(insn, 25, 7)); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_sfence_vm(DisasContext *ctx, arg_decode_insn3215 *a, uint32_t insn) +{ + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_sfence_vma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_sh(DisasContext *ctx, arg_shift *a, uint32_t insn) +{ + a->shamt = extract32(insn, 20, 10); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_u(DisasContext *ctx, arg_u *a, uint32_t insn) +{ + a->imm = ex_shift_12(ctx, sextract32(insn, 12, 20)); + a->rd = extract32(insn, 7, 5); +} + +static bool decode_insn32(DisasContext *ctx, uint32_t insn) +{ + union { + arg_atomic f_atomic; + arg_b f_b; + arg_decode_insn3210 f_decode_insn3210; + arg_decode_insn3211 f_decode_insn3211; + arg_decode_insn3212 f_decode_insn3212; + arg_decode_insn3213 f_decode_insn3213; + arg_decode_insn3214 f_decode_insn3214; + arg_decode_insn3215 f_decode_insn3215; + arg_decode_insn3216 f_decode_insn3216; + arg_decode_insn329 f_decode_insn329; + arg_empty f_empty; + arg_i f_i; + arg_j f_j; + arg_r f_r; + arg_s f_s; + arg_shift f_shift; + arg_u f_u; + } u; + + switch (insn & 0x0000007f) { + case 0x00000003: + /* ........ ........ ........ .0000011 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:96 */ + if (trans_lb(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:97 */ + if (trans_lh(ctx, &u.f_i)) return true; + return false; + case 0x2: + /* ........ ........ .010.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:98 */ + if (trans_lw(ctx, &u.f_i)) return true; + return false; + case 0x4: + /* ........ ........ .100.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:99 */ + if (trans_lbu(ctx, &u.f_i)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:100 */ + if (trans_lhu(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x00000007: + /* ........ ........ ........ .0000111 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + switch ((insn >> 12) & 0x7) { + case 0x2: + /* ........ ........ .010.... .0000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:156 */ + if (trans_flw(ctx, &u.f_i)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:184 */ + if (trans_fld(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x0000000f: + /* ........ ........ ........ .0001111 */ + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:123 */ + decode_insn32_extract_decode_insn32_Fmt_19(ctx, &u.f_decode_insn3216, insn); + if (trans_fence(ctx, &u.f_decode_insn3216)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:124 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + if (trans_fence_i(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x00000013: + /* ........ ........ ........ .0010011 */ + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:104 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0010011 */ + decode_insn32_extract_sh(ctx, &u.f_shift, insn); + switch ((insn >> 30) & 0x3) { + case 0x0: + /* 00...... ........ .001.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:110 */ + if (trans_slli(ctx, &u.f_shift)) return true; + return false; + } + return false; + case 0x2: + /* ........ ........ .010.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:105 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_slti(ctx, &u.f_i)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:106 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_sltiu(ctx, &u.f_i)) return true; + return false; + case 0x4: + /* ........ ........ .100.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:107 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_xori(ctx, &u.f_i)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .0010011 */ + decode_insn32_extract_sh(ctx, &u.f_shift, insn); + switch ((insn >> 30) & 0x3) { + case 0x0: + /* 00...... ........ .101.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:111 */ + if (trans_srli(ctx, &u.f_shift)) return true; + return false; + case 0x1: + /* 01...... ........ .101.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:112 */ + if (trans_srai(ctx, &u.f_shift)) return true; + return false; + } + return false; + case 0x6: + /* ........ ........ .110.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:108 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_ori(ctx, &u.f_i)) return true; + return false; + case 0x7: + /* ........ ........ .111.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:109 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_andi(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x00000017: + /* ........ ........ ........ .0010111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:87 */ + decode_insn32_extract_u(ctx, &u.f_u, insn); + if (trans_auipc(ctx, &u.f_u)) return true; + return false; + case 0x00000023: + /* ........ ........ ........ .0100011 */ + decode_insn32_extract_s(ctx, &u.f_s, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:101 */ + if (trans_sb(ctx, &u.f_s)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:102 */ + if (trans_sh(ctx, &u.f_s)) return true; + return false; + case 0x2: + /* ........ ........ .010.... .0100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:103 */ + if (trans_sw(ctx, &u.f_s)) return true; + return false; + } + return false; + case 0x00000027: + /* ........ ........ ........ .0100111 */ + decode_insn32_extract_s(ctx, &u.f_s, insn); + switch ((insn >> 12) & 0x7) { + case 0x2: + /* ........ ........ .010.... .0100111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:157 */ + if (trans_fsw(ctx, &u.f_s)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0100111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:185 */ + if (trans_fsd(ctx, &u.f_s)) return true; + return false; + } + return false; + case 0x0000002f: + /* ........ ........ ........ .0101111 */ + switch (insn & 0xf8007000) { + case 0x00002000: + /* 00000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:146 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoadd_w(ctx, &u.f_atomic)) return true; + return false; + case 0x08002000: + /* 00001... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:145 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoswap_w(ctx, &u.f_atomic)) return true; + return false; + case 0x10002000: + /* 00010... ........ .010.... .0101111 */ + decode_insn32_extract_atom_ld(ctx, &u.f_atomic, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 00010..0 0000.... .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:143 */ + if (trans_lr_w(ctx, &u.f_atomic)) return true; + return false; + } + return false; + case 0x18002000: + /* 00011... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:144 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_sc_w(ctx, &u.f_atomic)) return true; + return false; + case 0x20002000: + /* 00100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:147 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoxor_w(ctx, &u.f_atomic)) return true; + return false; + case 0x40002000: + /* 01000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:149 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoor_w(ctx, &u.f_atomic)) return true; + return false; + case 0x60002000: + /* 01100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:148 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoand_w(ctx, &u.f_atomic)) return true; + return false; + case 0x80002000: + /* 10000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:150 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomin_w(ctx, &u.f_atomic)) return true; + return false; + case 0xa0002000: + /* 10100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:151 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomax_w(ctx, &u.f_atomic)) return true; + return false; + case 0xc0002000: + /* 11000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:152 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amominu_w(ctx, &u.f_atomic)) return true; + return false; + case 0xe0002000: + /* 11100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:153 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomaxu_w(ctx, &u.f_atomic)) return true; + return false; + } + return false; + case 0x00000033: + /* ........ ........ ........ .0110011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch (insn & 0xfe007000) { + case 0x00000000: + /* 0000000. ........ .000.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:113 */ + if (trans_add(ctx, &u.f_r)) return true; + return false; + case 0x00001000: + /* 0000000. ........ .001.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:115 */ + if (trans_sll(ctx, &u.f_r)) return true; + return false; + case 0x00002000: + /* 0000000. ........ .010.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:116 */ + if (trans_slt(ctx, &u.f_r)) return true; + return false; + case 0x00003000: + /* 0000000. ........ .011.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:117 */ + if (trans_sltu(ctx, &u.f_r)) return true; + return false; + case 0x00004000: + /* 0000000. ........ .100.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:118 */ + if (trans_xor(ctx, &u.f_r)) return true; + return false; + case 0x00005000: + /* 0000000. ........ .101.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:119 */ + if (trans_srl(ctx, &u.f_r)) return true; + return false; + case 0x00006000: + /* 0000000. ........ .110.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:121 */ + if (trans_or(ctx, &u.f_r)) return true; + return false; + case 0x00007000: + /* 0000000. ........ .111.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:122 */ + if (trans_and(ctx, &u.f_r)) return true; + return false; + case 0x02000000: + /* 0000001. ........ .000.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:133 */ + if (trans_mul(ctx, &u.f_r)) return true; + return false; + case 0x02001000: + /* 0000001. ........ .001.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:134 */ + if (trans_mulh(ctx, &u.f_r)) return true; + return false; + case 0x02002000: + /* 0000001. ........ .010.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:135 */ + if (trans_mulhsu(ctx, &u.f_r)) return true; + return false; + case 0x02003000: + /* 0000001. ........ .011.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:136 */ + if (trans_mulhu(ctx, &u.f_r)) return true; + return false; + case 0x02004000: + /* 0000001. ........ .100.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:137 */ + if (trans_div(ctx, &u.f_r)) return true; + return false; + case 0x02005000: + /* 0000001. ........ .101.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:138 */ + if (trans_divu(ctx, &u.f_r)) return true; + return false; + case 0x02006000: + /* 0000001. ........ .110.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:139 */ + if (trans_rem(ctx, &u.f_r)) return true; + return false; + case 0x02007000: + /* 0000001. ........ .111.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:140 */ + if (trans_remu(ctx, &u.f_r)) return true; + return false; + case 0x40000000: + /* 0100000. ........ .000.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:114 */ + if (trans_sub(ctx, &u.f_r)) return true; + return false; + case 0x40005000: + /* 0100000. ........ .101.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:120 */ + if (trans_sra(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x00000037: + /* ........ ........ ........ .0110111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:86 */ + decode_insn32_extract_u(ctx, &u.f_u, insn); + if (trans_lui(ctx, &u.f_u)) return true; + return false; + case 0x00000043: + /* ........ ........ ........ .1000011 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:158 */ + if (trans_fmadd_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:186 */ + if (trans_fmadd_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x00000047: + /* ........ ........ ........ .1000111 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:159 */ + if (trans_fmsub_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:187 */ + if (trans_fmsub_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x0000004b: + /* ........ ........ ........ .1001011 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1001011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:160 */ + if (trans_fnmsub_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1001011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:188 */ + if (trans_fnmsub_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x0000004f: + /* ........ ........ ........ .1001111 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:161 */ + if (trans_fnmadd_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:189 */ + if (trans_fnmadd_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x00000053: + /* ........ ........ ........ .1010011 */ + switch ((insn >> 25) & 0x7f) { + case 0x0: + /* 0000000. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:162 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fadd_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x1: + /* 0000001. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:190 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fadd_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x4: + /* 0000100. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:163 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fsub_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x5: + /* 0000101. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:191 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fsub_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x8: + /* 0001000. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:164 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fmul_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x9: + /* 0001001. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:192 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fmul_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0xc: + /* 0001100. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:165 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fdiv_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0xd: + /* 0001101. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:193 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fdiv_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x10: + /* 0010000. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010000. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:167 */ + if (trans_fsgnj_s(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010000. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:168 */ + if (trans_fsgnjn_s(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 0010000. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:169 */ + if (trans_fsgnjx_s(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x11: + /* 0010001. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010001. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:195 */ + if (trans_fsgnj_d(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010001. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:196 */ + if (trans_fsgnjn_d(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 0010001. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:197 */ + if (trans_fsgnjx_d(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x14: + /* 0010100. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010100. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:170 */ + if (trans_fmin_s(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010100. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:171 */ + if (trans_fmax_s(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x15: + /* 0010101. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010101. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:198 */ + if (trans_fmin_d(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010101. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:199 */ + if (trans_fmax_d(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x20: + /* 0100000. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x1: + /* 01000000 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:200 */ + if (trans_fcvt_s_d(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x21: + /* 0100001. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 01000010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:201 */ + if (trans_fcvt_d_s(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x2c: + /* 0101100. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 01011000 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:166 */ + if (trans_fsqrt_s(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x2d: + /* 0101101. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 01011010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:194 */ + if (trans_fsqrt_d(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x50: + /* 1010000. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 1010000. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:177 */ + if (trans_fle_s(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 1010000. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:176 */ + if (trans_flt_s(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 1010000. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:175 */ + if (trans_feq_s(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x51: + /* 1010001. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 1010001. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:204 */ + if (trans_fle_d(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 1010001. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:203 */ + if (trans_flt_d(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 1010001. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:202 */ + if (trans_feq_d(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x60: + /* 1100000. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11000000 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:172 */ + if (trans_fcvt_w_s(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11000000 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:173 */ + if (trans_fcvt_wu_s(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x61: + /* 1100001. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11000010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:206 */ + if (trans_fcvt_w_d(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11000010 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:207 */ + if (trans_fcvt_wu_d(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x68: + /* 1101000. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11010000 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:179 */ + if (trans_fcvt_s_w(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11010000 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:180 */ + if (trans_fcvt_s_wu(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x69: + /* 1101001. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11010010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:208 */ + if (trans_fcvt_d_w(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11010010 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:209 */ + if (trans_fcvt_d_wu(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x70: + /* 1110000. ........ ........ .1010011 */ + decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); + switch (insn & 0x01f07000) { + case 0x00000000: + /* 11100000 0000.... .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:174 */ + if (trans_fmv_x_w(ctx, &u.f_decode_insn3213)) return true; + return false; + case 0x00001000: + /* 11100000 0000.... .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:178 */ + if (trans_fclass_s(ctx, &u.f_decode_insn3213)) return true; + return false; + } + return false; + case 0x71: + /* 1110001. ........ ........ .1010011 */ + decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); + switch (insn & 0x01f07000) { + case 0x00001000: + /* 11100010 0000.... .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:205 */ + if (trans_fclass_d(ctx, &u.f_decode_insn3213)) return true; + return false; + } + return false; + case 0x78: + /* 1111000. ........ ........ .1010011 */ + decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); + switch (insn & 0x01f07000) { + case 0x00000000: + /* 11110000 0000.... .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:181 */ + if (trans_fmv_w_x(ctx, &u.f_decode_insn3213)) return true; + return false; + } + return false; + } + return false; + case 0x00000063: + /* ........ ........ ........ .1100011 */ + decode_insn32_extract_b(ctx, &u.f_b, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:90 */ + if (trans_beq(ctx, &u.f_b)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:91 */ + if (trans_bne(ctx, &u.f_b)) return true; + return false; + case 0x4: + /* ........ ........ .100.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:92 */ + if (trans_blt(ctx, &u.f_b)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:93 */ + if (trans_bge(ctx, &u.f_b)) return true; + return false; + case 0x6: + /* ........ ........ .110.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:94 */ + if (trans_bltu(ctx, &u.f_b)) return true; + return false; + case 0x7: + /* ........ ........ .111.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:95 */ + if (trans_bgeu(ctx, &u.f_b)) return true; + return false; + } + return false; + case 0x00000067: + /* ........ ........ ........ .1100111 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .1100111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:89 */ + if (trans_jalr(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x0000006f: + /* ........ ........ ........ .1101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:88 */ + decode_insn32_extract_j(ctx, &u.f_j, insn); + if (trans_jal(ctx, &u.f_j)) return true; + return false; + case 0x00000073: + /* ........ ........ ........ .1110011 */ + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .1110011 */ + switch (insn & 0xfe000f80) { + case 0x00000000: + /* 0000000. ........ .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x3ff) { + case 0x0: + /* 00000000 00000000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:74 */ + if (trans_ecall(ctx, &u.f_empty)) return true; + return false; + case 0x20: + /* 00000000 00010000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:75 */ + if (trans_ebreak(ctx, &u.f_empty)) return true; + return false; + case 0x40: + /* 00000000 00100000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:76 */ + if (trans_uret(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x10000000: + /* 0001000. ........ .0000000 01110011 */ + switch ((insn >> 20) & 0x1f) { + case 0x2: + /* 00010000 0010.... .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x1f) { + case 0x0: + /* 00010000 00100000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:77 */ + if (trans_sret(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x4: + /* 00010000 0100.... .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:83 */ + decode_insn32_extract_sfence_vm(ctx, &u.f_decode_insn3215, insn); + if (trans_sfence_vm(ctx, &u.f_decode_insn3215)) return true; + return false; + case 0x5: + /* 00010000 0101.... .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x1f) { + case 0x0: + /* 00010000 01010000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:79 */ + if (trans_wfi(ctx, &u.f_empty)) return true; + return false; + } + return false; + } + return false; + case 0x12000000: + /* 0001001. ........ .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:82 */ + decode_insn32_extract_sfence_vma(ctx, &u.f_decode_insn3214, insn); + if (trans_sfence_vma(ctx, &u.f_decode_insn3214)) return true; + return false; + case 0x22000000: + /* 0010001. ........ .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:81 */ + decode_insn32_extract_hfence_bvma(ctx, &u.f_decode_insn3214, insn); + if (trans_hfence_bvma(ctx, &u.f_decode_insn3214)) return true; + return false; + case 0x30000000: + /* 0011000. ........ .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x3ff) { + case 0x40: + /* 00110000 00100000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:78 */ + if (trans_mret(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x62000000: + /* 0110001. ........ .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:80 */ + decode_insn32_extract_hfence_gvma(ctx, &u.f_decode_insn3214, insn); + if (trans_hfence_gvma(ctx, &u.f_decode_insn3214)) return true; + return false; + } + return false; + case 0x1: + /* ........ ........ .001.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:125 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrw(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x2: + /* ........ ........ .010.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:126 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrs(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:127 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrc(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:128 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrwi(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x6: + /* ........ ........ .110.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:129 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrsi(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x7: + /* ........ ........ .111.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:130 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrci(ctx, &u.f_decode_insn329)) return true; + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/riscv/riscv64/decode_insn16.inc.c b/qemu/target/riscv/riscv64/decode_insn16.inc.c new file mode 100644 index 00000000..71938856 --- /dev/null +++ b/qemu/target/riscv/riscv64/decode_insn16.inc.c @@ -0,0 +1,504 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wredundant-decls" +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtypedef-redefinition" +# endif +#endif + +typedef arg_empty arg_illegal; +static bool trans_illegal(DisasContext *ctx, arg_illegal *a); +typedef arg_i arg_addi; +static bool trans_addi(DisasContext *ctx, arg_addi *a); +typedef arg_i arg_fld; +static bool trans_fld(DisasContext *ctx, arg_fld *a); +typedef arg_i arg_lw; +static bool trans_lw(DisasContext *ctx, arg_lw *a); +typedef arg_s arg_fsd; +static bool trans_fsd(DisasContext *ctx, arg_fsd *a); +typedef arg_s arg_sw; +static bool trans_sw(DisasContext *ctx, arg_sw *a); +typedef arg_u arg_lui; +static bool trans_lui(DisasContext *ctx, arg_lui *a); +typedef arg_shift arg_srli; +static bool trans_srli(DisasContext *ctx, arg_srli *a); +typedef arg_shift arg_srai; +static bool trans_srai(DisasContext *ctx, arg_srai *a); +typedef arg_i arg_andi; +static bool trans_andi(DisasContext *ctx, arg_andi *a); +typedef arg_r arg_sub; +static bool trans_sub(DisasContext *ctx, arg_sub *a); +typedef arg_r arg_xor; +static bool trans_xor(DisasContext *ctx, arg_xor *a); +typedef arg_r arg_or; +static bool trans_or(DisasContext *ctx, arg_or *a); +typedef arg_r arg_and; +static bool trans_and(DisasContext *ctx, arg_and *a); +typedef arg_j arg_jal; +static bool trans_jal(DisasContext *ctx, arg_jal *a); +typedef arg_b arg_beq; +static bool trans_beq(DisasContext *ctx, arg_beq *a); +typedef arg_b arg_bne; +static bool trans_bne(DisasContext *ctx, arg_bne *a); +typedef arg_shift arg_slli; +static bool trans_slli(DisasContext *ctx, arg_slli *a); +typedef arg_i arg_jalr; +static bool trans_jalr(DisasContext *ctx, arg_jalr *a); +typedef arg_empty arg_ebreak; +static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); +typedef arg_r arg_add; +static bool trans_add(DisasContext *ctx, arg_add *a); +typedef arg_i arg_ld; +static bool trans_ld(DisasContext *ctx, arg_ld *a); +typedef arg_s arg_sd; +static bool trans_sd(DisasContext *ctx, arg_sd *a); +typedef arg_i arg_addiw; +static bool trans_addiw(DisasContext *ctx, arg_addiw *a); +typedef arg_r arg_subw; +static bool trans_subw(DisasContext *ctx, arg_subw *a); +typedef arg_r arg_addw; +static bool trans_addw(DisasContext *ctx, arg_addw *a); + +#ifdef CONFIG_PRAGMA_DIAGNOSTIC_AVAILABLE +# pragma GCC diagnostic pop +#endif + +static void decode_insn16_extract_c_addi16sp(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_4(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 2, 1)), 2, 30, extract32(insn, 5, 1)), 3, 29, extract32(insn, 3, 2)), 5, 27, sextract32(insn, 12, 1))); + a->rs1 = 2; + a->rd = 2; +} + +static void decode_insn16_extract_c_addi4spn(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 5, 1)), 2, 30, extract32(insn, 11, 2)), 4, 28, extract32(insn, 7, 4))); + a->rs1 = 2; + a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_c_andi(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); +} + +static void decode_insn16_extract_c_jalr(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = 0; + a->rs1 = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_ldsp(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(deposit32(extract32(insn, 5, 2), 2, 30, extract32(insn, 12, 1)), 3, 29, extract32(insn, 2, 3))); + a->rs1 = 2; + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_li(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); + a->rs1 = 0; + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_lui(DisasContext *ctx, arg_u *a, uint16_t insn) +{ + a->imm = ex_shift_12(ctx, deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1))); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_lwsp(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 4, 3), 3, 29, extract32(insn, 12, 1)), 4, 28, extract32(insn, 2, 2))); + a->rs1 = 2; + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_mv(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = 0; + a->rs1 = extract32(insn, 2, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_c_sdsp(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 7, 3))); + a->rs1 = 2; + a->rs2 = extract32(insn, 2, 5); +} + +static void decode_insn16_extract_c_shift(DisasContext *ctx, arg_shift *a, uint16_t insn) +{ + a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); +} + +static void decode_insn16_extract_c_shift2(DisasContext *ctx, arg_shift *a, uint16_t insn) +{ + a->rd = extract32(insn, 7, 5); + a->rs1 = extract32(insn, 7, 5); + a->shamt = ex_rvc_shifti(ctx, deposit32(extract32(insn, 2, 5), 5, 27, extract32(insn, 12, 1))); +} + +static void decode_insn16_extract_c_swsp(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(extract32(insn, 9, 4), 4, 28, extract32(insn, 7, 2))); + a->rs1 = 2; + a->rs2 = extract32(insn, 2, 5); +} + +static void decode_insn16_extract_cb_z(DisasContext *ctx, arg_b *a, uint16_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 2), 2, 30, extract32(insn, 10, 2)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 5, 2)), 7, 25, sextract32(insn, 12, 1))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs2 = 0; +} + +static void decode_insn16_extract_ci(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = deposit32(extract32(insn, 2, 5), 5, 27, sextract32(insn, 12, 1)); + a->rs1 = extract32(insn, 7, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_cj(DisasContext *ctx, arg_j *a, uint16_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(deposit32(extract32(insn, 3, 3), 3, 29, extract32(insn, 11, 1)), 4, 28, extract32(insn, 2, 1)), 5, 27, extract32(insn, 7, 1)), 6, 26, extract32(insn, 6, 1)), 7, 25, extract32(insn, 9, 2)), 9, 23, extract32(insn, 8, 1)), 10, 22, sextract32(insn, 12, 1))); +} + +static void decode_insn16_extract_cl_d(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_cl_w(DisasContext *ctx, arg_i *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_cr(DisasContext *ctx, arg_r *a, uint16_t insn) +{ + a->rs2 = extract32(insn, 2, 5); + a->rs1 = extract32(insn, 7, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn16_extract_cs_2(DisasContext *ctx, arg_r *a, uint16_t insn) +{ + a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rd = ex_rvc_register(ctx, extract32(insn, 7, 3)); +} + +static void decode_insn16_extract_cs_d(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_3(ctx, deposit32(extract32(insn, 10, 3), 3, 29, extract32(insn, 5, 2))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_cs_w(DisasContext *ctx, arg_s *a, uint16_t insn) +{ + a->imm = ex_shift_2(ctx, deposit32(deposit32(extract32(insn, 6, 1), 1, 31, extract32(insn, 10, 3)), 4, 28, extract32(insn, 5, 1))); + a->rs1 = ex_rvc_register(ctx, extract32(insn, 7, 3)); + a->rs2 = ex_rvc_register(ctx, extract32(insn, 2, 3)); +} + +static void decode_insn16_extract_decode_insn16_Fmt_22(DisasContext *ctx, arg_empty *a, uint16_t insn) +{ +} + +static bool decode_insn16(DisasContext *ctx, uint16_t insn) +{ + union { + arg_b f_b; + arg_empty f_empty; + arg_i f_i; + arg_j f_j; + arg_r f_r; + arg_s f_s; + arg_shift f_shift; + arg_u f_u; + } u; + + switch (insn & 0x0000e003) { + case 0x00000000: + /* 000..... ......00 */ + if ((insn & 0x00001fe0) == 0x00000000) { + /* 00000000 000...00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:87 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + ctx->invalid = true; + if (trans_illegal(ctx, &u.f_empty)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:88 */ + decode_insn16_extract_c_addi4spn(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x00000001: + /* 000..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:96 */ + decode_insn16_extract_ci(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x00000002: + /* 000..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:115 */ + decode_insn16_extract_c_shift2(ctx, &u.f_shift, insn); + if (trans_slli(ctx, &u.f_shift)) return true; + return false; + case 0x00002000: + /* 001..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:90 */ + decode_insn16_extract_cl_d(ctx, &u.f_i, insn); + if (trans_fld(ctx, &u.f_i)) return true; + return false; + case 0x00002001: + /* 001..... ......01 */ + if ((insn & 0x00000f80) == 0x00000000) { + /* 001.0000 0.....01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:25 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:26 */ + decode_insn16_extract_ci(ctx, &u.f_i, insn); + if (trans_addiw(ctx, &u.f_i)) return true; + return false; + case 0x00002002: + /* 001..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:116 */ + decode_insn16_extract_c_ldsp(ctx, &u.f_i, insn); + if (trans_fld(ctx, &u.f_i)) return true; + return false; + case 0x00004000: + /* 010..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:91 */ + decode_insn16_extract_cl_w(ctx, &u.f_i, insn); + if (trans_lw(ctx, &u.f_i)) return true; + return false; + case 0x00004001: + /* 010..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:97 */ + decode_insn16_extract_c_li(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x00004002: + /* 010..... ......10 */ + if ((insn & 0x00000f80) == 0x00000000) { + /* 010.0000 0.....10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:118 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:119 */ + decode_insn16_extract_c_lwsp(ctx, &u.f_i, insn); + if (trans_lw(ctx, &u.f_i)) return true; + return false; + case 0x00006000: + /* 011..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:20 */ + decode_insn16_extract_cl_d(ctx, &u.f_i, insn); + if (trans_ld(ctx, &u.f_i)) return true; + return false; + case 0x00006001: + /* 011..... ......01 */ + if ((insn & 0x0000107c) == 0x00000000) { + /* 0110.... .0000001 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:99 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + if ((insn & 0x00000f80) == 0x00000100) { + /* 011.0001 0.....01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:100 */ + decode_insn16_extract_c_addi16sp(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:101 */ + decode_insn16_extract_c_lui(ctx, &u.f_u, insn); + if (trans_lui(ctx, &u.f_u)) return true; + return false; + case 0x00006002: + /* 011..... ......10 */ + if ((insn & 0x00000f80) == 0x00000000) { + /* 011.0000 0.....10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:33 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:34 */ + decode_insn16_extract_c_ldsp(ctx, &u.f_i, insn); + if (trans_ld(ctx, &u.f_i)) return true; + return false; + case 0x00008001: + /* 100..... ......01 */ + switch ((insn >> 10) & 0x3) { + case 0x0: + /* 100.00.. ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:103 */ + decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); + if (trans_srli(ctx, &u.f_shift)) return true; + return false; + case 0x1: + /* 100.01.. ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:104 */ + decode_insn16_extract_c_shift(ctx, &u.f_shift, insn); + if (trans_srai(ctx, &u.f_shift)) return true; + return false; + case 0x2: + /* 100.10.. ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:105 */ + decode_insn16_extract_c_andi(ctx, &u.f_i, insn); + if (trans_andi(ctx, &u.f_i)) return true; + return false; + case 0x3: + /* 100.11.. ......01 */ + decode_insn16_extract_cs_2(ctx, &u.f_r, insn); + switch (insn & 0x00001060) { + case 0x00000000: + /* 100011.. .00...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:106 */ + if (trans_sub(ctx, &u.f_r)) return true; + return false; + case 0x00000020: + /* 100011.. .01...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:107 */ + if (trans_xor(ctx, &u.f_r)) return true; + return false; + case 0x00000040: + /* 100011.. .10...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:108 */ + if (trans_or(ctx, &u.f_r)) return true; + return false; + case 0x00000060: + /* 100011.. .11...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:109 */ + if (trans_and(ctx, &u.f_r)) return true; + return false; + case 0x00001000: + /* 100111.. .00...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:28 */ + if (trans_subw(ctx, &u.f_r)) return true; + return false; + case 0x00001020: + /* 100111.. .01...01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:29 */ + if (trans_addw(ctx, &u.f_r)) return true; + return false; + } + return false; + } + return false; + case 0x00008002: + /* 100..... ......10 */ + switch ((insn >> 12) & 0x1) { + case 0x0: + /* 1000.... ......10 */ + if ((insn & 0x00000ffc) == 0x00000000) { + /* 10000000 00000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:122 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_illegal(ctx, &u.f_empty)) return true; + } + if ((insn & 0x0000007c) == 0x00000000) { + /* 1000.... .0000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:123 */ + decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); + u.f_i.rd = 0; + if (trans_jalr(ctx, &u.f_i)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:124 */ + decode_insn16_extract_c_mv(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* 1001.... ......10 */ + if ((insn & 0x00000ffc) == 0x00000000) { + /* 10010000 00000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:127 */ + decode_insn16_extract_decode_insn16_Fmt_22(ctx, &u.f_empty, insn); + if (trans_ebreak(ctx, &u.f_empty)) return true; + } + if ((insn & 0x0000007c) == 0x00000000) { + /* 1001.... .0000010 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:128 */ + decode_insn16_extract_c_jalr(ctx, &u.f_i, insn); + u.f_i.rd = 1; + if (trans_jalr(ctx, &u.f_i)) return true; + } + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:129 */ + decode_insn16_extract_cr(ctx, &u.f_r, insn); + if (trans_add(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x0000a000: + /* 101..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:92 */ + decode_insn16_extract_cs_d(ctx, &u.f_s, insn); + if (trans_fsd(ctx, &u.f_s)) return true; + return false; + case 0x0000a001: + /* 101..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:110 */ + decode_insn16_extract_cj(ctx, &u.f_j, insn); + u.f_j.rd = 0; + if (trans_jal(ctx, &u.f_j)) return true; + return false; + case 0x0000a002: + /* 101..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:131 */ + decode_insn16_extract_c_sdsp(ctx, &u.f_s, insn); + if (trans_fsd(ctx, &u.f_s)) return true; + return false; + case 0x0000c000: + /* 110..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:93 */ + decode_insn16_extract_cs_w(ctx, &u.f_s, insn); + if (trans_sw(ctx, &u.f_s)) return true; + return false; + case 0x0000c001: + /* 110..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:111 */ + decode_insn16_extract_cb_z(ctx, &u.f_b, insn); + if (trans_beq(ctx, &u.f_b)) return true; + return false; + case 0x0000c002: + /* 110..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:132 */ + decode_insn16_extract_c_swsp(ctx, &u.f_s, insn); + if (trans_sw(ctx, &u.f_s)) return true; + return false; + case 0x0000e000: + /* 111..... ......00 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:21 */ + decode_insn16_extract_cs_d(ctx, &u.f_s, insn); + if (trans_sd(ctx, &u.f_s)) return true; + return false; + case 0x0000e001: + /* 111..... ......01 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16.decode:112 */ + decode_insn16_extract_cb_z(ctx, &u.f_b, insn); + if (trans_bne(ctx, &u.f_b)) return true; + return false; + case 0x0000e002: + /* 111..... ......10 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn16-64.decode:36 */ + decode_insn16_extract_c_sdsp(ctx, &u.f_s, insn); + if (trans_sd(ctx, &u.f_s)) return true; + return false; + } + return false; +} diff --git a/qemu/target/riscv/riscv64/decode_insn32.inc.c b/qemu/target/riscv/riscv64/decode_insn32.inc.c new file mode 100644 index 00000000..b5d78960 --- /dev/null +++ b/qemu/target/riscv/riscv64/decode_insn32.inc.c @@ -0,0 +1,1749 @@ +/* This file is autogenerated by scripts/decodetree.py. */ + +typedef struct { + int aq; + int rd; + int rl; + int rs1; + int rs2; +} arg_atomic; + +typedef struct { + int imm; + int rs1; + int rs2; +} arg_b; + +typedef struct { + int rd; + int rm; + int rs1; + int rs2; + int rs3; +} arg_decode_insn3210; + +typedef struct { + int rd; + int rm; + int rs1; + int rs2; +} arg_decode_insn3211; + +typedef struct { + int rd; + int rm; + int rs1; +} arg_decode_insn3212; + +typedef struct { + int rd; + int rs1; +} arg_decode_insn3213; + +typedef struct { + int rs1; + int rs2; +} arg_decode_insn3214; + +typedef struct { + int rs1; +} arg_decode_insn3215; + +typedef struct { + int pred; + int succ; +} arg_decode_insn3216; + +typedef struct { + int csr; + int rd; + int rs1; +} arg_decode_insn329; + +typedef struct { +#ifdef _MSC_VER + int dummy; // MSVC does not allow empty struct +#endif +} arg_empty; + +typedef struct { + int imm; + int rd; + int rs1; +} arg_i; + +typedef struct { + int imm; + int rd; +} arg_j; + +typedef struct { + int rd; + int rs1; + int rs2; +} arg_r; + +typedef struct { + int imm; + int rs1; + int rs2; +} arg_s; + +typedef struct { + int rd; + int rs1; + int shamt; +} arg_shift; + +typedef struct { + int imm; + int rd; +} arg_u; + +typedef arg_empty arg_ecall; +static bool trans_ecall(DisasContext *ctx, arg_ecall *a); +typedef arg_empty arg_ebreak; +static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a); +typedef arg_empty arg_uret; +static bool trans_uret(DisasContext *ctx, arg_uret *a); +typedef arg_empty arg_sret; +static bool trans_sret(DisasContext *ctx, arg_sret *a); +typedef arg_empty arg_mret; +static bool trans_mret(DisasContext *ctx, arg_mret *a); +typedef arg_empty arg_wfi; +static bool trans_wfi(DisasContext *ctx, arg_wfi *a); +typedef arg_decode_insn3214 arg_hfence_gvma; +static bool trans_hfence_gvma(DisasContext *ctx, arg_hfence_gvma *a); +typedef arg_decode_insn3214 arg_hfence_bvma; +static bool trans_hfence_bvma(DisasContext *ctx, arg_hfence_bvma *a); +typedef arg_decode_insn3214 arg_sfence_vma; +static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a); +typedef arg_decode_insn3215 arg_sfence_vm; +static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a); +typedef arg_u arg_lui; +static bool trans_lui(DisasContext *ctx, arg_lui *a); +typedef arg_u arg_auipc; +static bool trans_auipc(DisasContext *ctx, arg_auipc *a); +typedef arg_j arg_jal; +static bool trans_jal(DisasContext *ctx, arg_jal *a); +typedef arg_i arg_jalr; +static bool trans_jalr(DisasContext *ctx, arg_jalr *a); +typedef arg_b arg_beq; +static bool trans_beq(DisasContext *ctx, arg_beq *a); +typedef arg_b arg_bne; +static bool trans_bne(DisasContext *ctx, arg_bne *a); +typedef arg_b arg_blt; +static bool trans_blt(DisasContext *ctx, arg_blt *a); +typedef arg_b arg_bge; +static bool trans_bge(DisasContext *ctx, arg_bge *a); +typedef arg_b arg_bltu; +static bool trans_bltu(DisasContext *ctx, arg_bltu *a); +typedef arg_b arg_bgeu; +static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a); +typedef arg_i arg_lb; +static bool trans_lb(DisasContext *ctx, arg_lb *a); +typedef arg_i arg_lh; +static bool trans_lh(DisasContext *ctx, arg_lh *a); +typedef arg_i arg_lw; +static bool trans_lw(DisasContext *ctx, arg_lw *a); +typedef arg_i arg_lbu; +static bool trans_lbu(DisasContext *ctx, arg_lbu *a); +typedef arg_i arg_lhu; +static bool trans_lhu(DisasContext *ctx, arg_lhu *a); +typedef arg_s arg_sb; +static bool trans_sb(DisasContext *ctx, arg_sb *a); +typedef arg_s arg_sh; +static bool trans_sh(DisasContext *ctx, arg_sh *a); +typedef arg_s arg_sw; +static bool trans_sw(DisasContext *ctx, arg_sw *a); +typedef arg_i arg_addi; +static bool trans_addi(DisasContext *ctx, arg_addi *a); +typedef arg_i arg_slti; +static bool trans_slti(DisasContext *ctx, arg_slti *a); +typedef arg_i arg_sltiu; +static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a); +typedef arg_i arg_xori; +static bool trans_xori(DisasContext *ctx, arg_xori *a); +typedef arg_i arg_ori; +static bool trans_ori(DisasContext *ctx, arg_ori *a); +typedef arg_i arg_andi; +static bool trans_andi(DisasContext *ctx, arg_andi *a); +typedef arg_shift arg_slli; +static bool trans_slli(DisasContext *ctx, arg_slli *a); +typedef arg_shift arg_srli; +static bool trans_srli(DisasContext *ctx, arg_srli *a); +typedef arg_shift arg_srai; +static bool trans_srai(DisasContext *ctx, arg_srai *a); +typedef arg_r arg_add; +static bool trans_add(DisasContext *ctx, arg_add *a); +typedef arg_r arg_sub; +static bool trans_sub(DisasContext *ctx, arg_sub *a); +typedef arg_r arg_sll; +static bool trans_sll(DisasContext *ctx, arg_sll *a); +typedef arg_r arg_slt; +static bool trans_slt(DisasContext *ctx, arg_slt *a); +typedef arg_r arg_sltu; +static bool trans_sltu(DisasContext *ctx, arg_sltu *a); +typedef arg_r arg_xor; +static bool trans_xor(DisasContext *ctx, arg_xor *a); +typedef arg_r arg_srl; +static bool trans_srl(DisasContext *ctx, arg_srl *a); +typedef arg_r arg_sra; +static bool trans_sra(DisasContext *ctx, arg_sra *a); +typedef arg_r arg_or; +static bool trans_or(DisasContext *ctx, arg_or *a); +typedef arg_r arg_and; +static bool trans_and(DisasContext *ctx, arg_and *a); +typedef arg_decode_insn3216 arg_fence; +static bool trans_fence(DisasContext *ctx, arg_fence *a); +typedef arg_empty arg_fence_i; +static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a); +typedef arg_decode_insn329 arg_csrrw; +static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a); +typedef arg_decode_insn329 arg_csrrs; +static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a); +typedef arg_decode_insn329 arg_csrrc; +static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a); +typedef arg_decode_insn329 arg_csrrwi; +static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a); +typedef arg_decode_insn329 arg_csrrsi; +static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a); +typedef arg_decode_insn329 arg_csrrci; +static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a); +typedef arg_r arg_mul; +static bool trans_mul(DisasContext *ctx, arg_mul *a); +typedef arg_r arg_mulh; +static bool trans_mulh(DisasContext *ctx, arg_mulh *a); +typedef arg_r arg_mulhsu; +static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a); +typedef arg_r arg_mulhu; +static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a); +typedef arg_r arg_div; +static bool trans_div(DisasContext *ctx, arg_div *a); +typedef arg_r arg_divu; +static bool trans_divu(DisasContext *ctx, arg_divu *a); +typedef arg_r arg_rem; +static bool trans_rem(DisasContext *ctx, arg_rem *a); +typedef arg_r arg_remu; +static bool trans_remu(DisasContext *ctx, arg_remu *a); +typedef arg_atomic arg_lr_w; +static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a); +typedef arg_atomic arg_sc_w; +static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a); +typedef arg_atomic arg_amoswap_w; +static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a); +typedef arg_atomic arg_amoadd_w; +static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a); +typedef arg_atomic arg_amoxor_w; +static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a); +typedef arg_atomic arg_amoand_w; +static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a); +typedef arg_atomic arg_amoor_w; +static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a); +typedef arg_atomic arg_amomin_w; +static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a); +typedef arg_atomic arg_amomax_w; +static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a); +typedef arg_atomic arg_amominu_w; +static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a); +typedef arg_atomic arg_amomaxu_w; +static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a); +typedef arg_i arg_flw; +static bool trans_flw(DisasContext *ctx, arg_flw *a); +typedef arg_s arg_fsw; +static bool trans_fsw(DisasContext *ctx, arg_fsw *a); +typedef arg_decode_insn3210 arg_fmadd_s; +static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a); +typedef arg_decode_insn3210 arg_fmsub_s; +static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a); +typedef arg_decode_insn3210 arg_fnmsub_s; +static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a); +typedef arg_decode_insn3210 arg_fnmadd_s; +static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a); +typedef arg_decode_insn3211 arg_fadd_s; +static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a); +typedef arg_decode_insn3211 arg_fsub_s; +static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a); +typedef arg_decode_insn3211 arg_fmul_s; +static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a); +typedef arg_decode_insn3211 arg_fdiv_s; +static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a); +typedef arg_decode_insn3212 arg_fsqrt_s; +static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a); +typedef arg_r arg_fsgnj_s; +static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a); +typedef arg_r arg_fsgnjn_s; +static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a); +typedef arg_r arg_fsgnjx_s; +static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a); +typedef arg_r arg_fmin_s; +static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a); +typedef arg_r arg_fmax_s; +static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a); +typedef arg_decode_insn3212 arg_fcvt_w_s; +static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a); +typedef arg_decode_insn3212 arg_fcvt_wu_s; +static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a); +typedef arg_decode_insn3213 arg_fmv_x_w; +static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a); +typedef arg_r arg_feq_s; +static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a); +typedef arg_r arg_flt_s; +static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a); +typedef arg_r arg_fle_s; +static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a); +typedef arg_decode_insn3213 arg_fclass_s; +static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a); +typedef arg_decode_insn3212 arg_fcvt_s_w; +static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a); +typedef arg_decode_insn3212 arg_fcvt_s_wu; +static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a); +typedef arg_decode_insn3213 arg_fmv_w_x; +static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a); +typedef arg_i arg_fld; +static bool trans_fld(DisasContext *ctx, arg_fld *a); +typedef arg_s arg_fsd; +static bool trans_fsd(DisasContext *ctx, arg_fsd *a); +typedef arg_decode_insn3210 arg_fmadd_d; +static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a); +typedef arg_decode_insn3210 arg_fmsub_d; +static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a); +typedef arg_decode_insn3210 arg_fnmsub_d; +static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a); +typedef arg_decode_insn3210 arg_fnmadd_d; +static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a); +typedef arg_decode_insn3211 arg_fadd_d; +static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a); +typedef arg_decode_insn3211 arg_fsub_d; +static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a); +typedef arg_decode_insn3211 arg_fmul_d; +static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a); +typedef arg_decode_insn3211 arg_fdiv_d; +static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a); +typedef arg_decode_insn3212 arg_fsqrt_d; +static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a); +typedef arg_r arg_fsgnj_d; +static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a); +typedef arg_r arg_fsgnjn_d; +static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a); +typedef arg_r arg_fsgnjx_d; +static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a); +typedef arg_r arg_fmin_d; +static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a); +typedef arg_r arg_fmax_d; +static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a); +typedef arg_decode_insn3212 arg_fcvt_s_d; +static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a); +typedef arg_decode_insn3212 arg_fcvt_d_s; +static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a); +typedef arg_r arg_feq_d; +static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a); +typedef arg_r arg_flt_d; +static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a); +typedef arg_r arg_fle_d; +static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a); +typedef arg_decode_insn3213 arg_fclass_d; +static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a); +typedef arg_decode_insn3212 arg_fcvt_w_d; +static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a); +typedef arg_decode_insn3212 arg_fcvt_wu_d; +static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a); +typedef arg_decode_insn3212 arg_fcvt_d_w; +static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a); +typedef arg_decode_insn3212 arg_fcvt_d_wu; +static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a); +typedef arg_i arg_lwu; +static bool trans_lwu(DisasContext *ctx, arg_lwu *a); +typedef arg_i arg_ld; +static bool trans_ld(DisasContext *ctx, arg_ld *a); +typedef arg_s arg_sd; +static bool trans_sd(DisasContext *ctx, arg_sd *a); +typedef arg_i arg_addiw; +static bool trans_addiw(DisasContext *ctx, arg_addiw *a); +typedef arg_shift arg_slliw; +static bool trans_slliw(DisasContext *ctx, arg_slliw *a); +typedef arg_shift arg_srliw; +static bool trans_srliw(DisasContext *ctx, arg_srliw *a); +typedef arg_shift arg_sraiw; +static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a); +typedef arg_r arg_addw; +static bool trans_addw(DisasContext *ctx, arg_addw *a); +typedef arg_r arg_subw; +static bool trans_subw(DisasContext *ctx, arg_subw *a); +typedef arg_r arg_sllw; +static bool trans_sllw(DisasContext *ctx, arg_sllw *a); +typedef arg_r arg_srlw; +static bool trans_srlw(DisasContext *ctx, arg_srlw *a); +typedef arg_r arg_sraw; +static bool trans_sraw(DisasContext *ctx, arg_sraw *a); +typedef arg_r arg_mulw; +static bool trans_mulw(DisasContext *ctx, arg_mulw *a); +typedef arg_r arg_divw; +static bool trans_divw(DisasContext *ctx, arg_divw *a); +typedef arg_r arg_divuw; +static bool trans_divuw(DisasContext *ctx, arg_divuw *a); +typedef arg_r arg_remw; +static bool trans_remw(DisasContext *ctx, arg_remw *a); +typedef arg_r arg_remuw; +static bool trans_remuw(DisasContext *ctx, arg_remuw *a); +typedef arg_atomic arg_lr_d; +static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a); +typedef arg_atomic arg_sc_d; +static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a); +typedef arg_atomic arg_amoswap_d; +static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a); +typedef arg_atomic arg_amoadd_d; +static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a); +typedef arg_atomic arg_amoxor_d; +static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a); +typedef arg_atomic arg_amoand_d; +static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a); +typedef arg_atomic arg_amoor_d; +static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a); +typedef arg_atomic arg_amomin_d; +static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a); +typedef arg_atomic arg_amomax_d; +static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a); +typedef arg_atomic arg_amominu_d; +static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a); +typedef arg_atomic arg_amomaxu_d; +static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a); +typedef arg_decode_insn3212 arg_fcvt_l_s; +static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a); +typedef arg_decode_insn3212 arg_fcvt_lu_s; +static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a); +typedef arg_decode_insn3212 arg_fcvt_s_l; +static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a); +typedef arg_decode_insn3212 arg_fcvt_s_lu; +static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a); +typedef arg_decode_insn3212 arg_fcvt_l_d; +static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a); +typedef arg_decode_insn3212 arg_fcvt_lu_d; +static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a); +typedef arg_decode_insn3213 arg_fmv_x_d; +static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a); +typedef arg_decode_insn3212 arg_fcvt_d_l; +static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a); +typedef arg_decode_insn3212 arg_fcvt_d_lu; +static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a); +typedef arg_decode_insn3213 arg_fmv_d_x; +static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a); + +static void decode_insn32_extract_atom_ld(DisasContext *ctx, arg_atomic *a, uint32_t insn) +{ + a->aq = extract32(insn, 26, 1); + a->rl = extract32(insn, 25, 1); + a->rs2 = 0; + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_atom_st(DisasContext *ctx, arg_atomic *a, uint32_t insn) +{ + a->aq = extract32(insn, 26, 1); + a->rl = extract32(insn, 25, 1); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_b(DisasContext *ctx, arg_b *a, uint32_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 8, 4), 4, 28, extract32(insn, 25, 6)), 10, 22, extract32(insn, 7, 1)), 11, 21, sextract32(insn, 31, 1))); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_csr(DisasContext *ctx, arg_decode_insn329 *a, uint32_t insn) +{ + a->csr = extract32(insn, 20, 12); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_decode_insn32_Fmt_18(DisasContext *ctx, arg_empty *a, uint32_t insn) +{ +} + +static void decode_insn32_extract_decode_insn32_Fmt_19(DisasContext *ctx, arg_decode_insn3216 *a, uint32_t insn) +{ + a->pred = extract32(insn, 24, 4); + a->succ = extract32(insn, 20, 4); +} + +static void decode_insn32_extract_hfence_bvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_hfence_gvma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_i(DisasContext *ctx, arg_i *a, uint32_t insn) +{ + a->imm = sextract32(insn, 20, 12); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_j(DisasContext *ctx, arg_j *a, uint32_t insn) +{ + a->imm = ex_shift_1(ctx, deposit32(deposit32(deposit32(extract32(insn, 21, 10), 10, 22, extract32(insn, 20, 1)), 11, 21, extract32(insn, 12, 8)), 19, 13, sextract32(insn, 31, 1))); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r(DisasContext *ctx, arg_r *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r2(DisasContext *ctx, arg_decode_insn3213 *a, uint32_t insn) +{ + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r2_rm(DisasContext *ctx, arg_decode_insn3212 *a, uint32_t insn) +{ + a->rs1 = extract32(insn, 15, 5); + a->rm = extract32(insn, 12, 3); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r4_rm(DisasContext *ctx, arg_decode_insn3210 *a, uint32_t insn) +{ + a->rs3 = extract32(insn, 27, 5); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rm = extract32(insn, 12, 3); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_r_rm(DisasContext *ctx, arg_decode_insn3211 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rm = extract32(insn, 12, 3); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_s(DisasContext *ctx, arg_s *a, uint32_t insn) +{ + a->imm = deposit32(extract32(insn, 7, 5), 5, 27, sextract32(insn, 25, 7)); + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_sfence_vm(DisasContext *ctx, arg_decode_insn3215 *a, uint32_t insn) +{ + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_sfence_vma(DisasContext *ctx, arg_decode_insn3214 *a, uint32_t insn) +{ + a->rs2 = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); +} + +static void decode_insn32_extract_sh(DisasContext *ctx, arg_shift *a, uint32_t insn) +{ + a->shamt = extract32(insn, 20, 10); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_sh5(DisasContext *ctx, arg_shift *a, uint32_t insn) +{ + a->shamt = extract32(insn, 20, 5); + a->rs1 = extract32(insn, 15, 5); + a->rd = extract32(insn, 7, 5); +} + +static void decode_insn32_extract_u(DisasContext *ctx, arg_u *a, uint32_t insn) +{ + a->imm = ex_shift_12(ctx, sextract32(insn, 12, 20)); + a->rd = extract32(insn, 7, 5); +} + +static bool decode_insn32(DisasContext *ctx, uint32_t insn) +{ + union { + arg_atomic f_atomic; + arg_b f_b; + arg_decode_insn3210 f_decode_insn3210; + arg_decode_insn3211 f_decode_insn3211; + arg_decode_insn3212 f_decode_insn3212; + arg_decode_insn3213 f_decode_insn3213; + arg_decode_insn3214 f_decode_insn3214; + arg_decode_insn3215 f_decode_insn3215; + arg_decode_insn3216 f_decode_insn3216; + arg_decode_insn329 f_decode_insn329; + arg_empty f_empty; + arg_i f_i; + arg_j f_j; + arg_r f_r; + arg_s f_s; + arg_shift f_shift; + arg_u f_u; + } u; + + switch (insn & 0x0000007f) { + case 0x00000003: + /* ........ ........ ........ .0000011 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:96 */ + if (trans_lb(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:97 */ + if (trans_lh(ctx, &u.f_i)) return true; + return false; + case 0x2: + /* ........ ........ .010.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:98 */ + if (trans_lw(ctx, &u.f_i)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:28 */ + if (trans_ld(ctx, &u.f_i)) return true; + return false; + case 0x4: + /* ........ ........ .100.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:99 */ + if (trans_lbu(ctx, &u.f_i)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:100 */ + if (trans_lhu(ctx, &u.f_i)) return true; + return false; + case 0x6: + /* ........ ........ .110.... .0000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:27 */ + if (trans_lwu(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x00000007: + /* ........ ........ ........ .0000111 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + switch ((insn >> 12) & 0x7) { + case 0x2: + /* ........ ........ .010.... .0000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:156 */ + if (trans_flw(ctx, &u.f_i)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:184 */ + if (trans_fld(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x0000000f: + /* ........ ........ ........ .0001111 */ + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:123 */ + decode_insn32_extract_decode_insn32_Fmt_19(ctx, &u.f_decode_insn3216, insn); + if (trans_fence(ctx, &u.f_decode_insn3216)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:124 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + if (trans_fence_i(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x00000013: + /* ........ ........ ........ .0010011 */ + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:104 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_addi(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0010011 */ + decode_insn32_extract_sh(ctx, &u.f_shift, insn); + switch ((insn >> 30) & 0x3) { + case 0x0: + /* 00...... ........ .001.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:110 */ + if (trans_slli(ctx, &u.f_shift)) return true; + return false; + } + return false; + case 0x2: + /* ........ ........ .010.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:105 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_slti(ctx, &u.f_i)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:106 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_sltiu(ctx, &u.f_i)) return true; + return false; + case 0x4: + /* ........ ........ .100.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:107 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_xori(ctx, &u.f_i)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .0010011 */ + decode_insn32_extract_sh(ctx, &u.f_shift, insn); + switch ((insn >> 30) & 0x3) { + case 0x0: + /* 00...... ........ .101.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:111 */ + if (trans_srli(ctx, &u.f_shift)) return true; + return false; + case 0x1: + /* 01...... ........ .101.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:112 */ + if (trans_srai(ctx, &u.f_shift)) return true; + return false; + } + return false; + case 0x6: + /* ........ ........ .110.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:108 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_ori(ctx, &u.f_i)) return true; + return false; + case 0x7: + /* ........ ........ .111.... .0010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:109 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_andi(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x00000017: + /* ........ ........ ........ .0010111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:87 */ + decode_insn32_extract_u(ctx, &u.f_u, insn); + if (trans_auipc(ctx, &u.f_u)) return true; + return false; + case 0x0000001b: + /* ........ ........ ........ .0011011 */ + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0011011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:30 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + if (trans_addiw(ctx, &u.f_i)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0011011 */ + decode_insn32_extract_sh5(ctx, &u.f_shift, insn); + switch ((insn >> 25) & 0x7f) { + case 0x0: + /* 0000000. ........ .001.... .0011011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:31 */ + if (trans_slliw(ctx, &u.f_shift)) return true; + return false; + } + return false; + case 0x5: + /* ........ ........ .101.... .0011011 */ + decode_insn32_extract_sh5(ctx, &u.f_shift, insn); + switch ((insn >> 25) & 0x7f) { + case 0x0: + /* 0000000. ........ .101.... .0011011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:32 */ + if (trans_srliw(ctx, &u.f_shift)) return true; + return false; + case 0x20: + /* 0100000. ........ .101.... .0011011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:33 */ + if (trans_sraiw(ctx, &u.f_shift)) return true; + return false; + } + return false; + } + return false; + case 0x00000023: + /* ........ ........ ........ .0100011 */ + decode_insn32_extract_s(ctx, &u.f_s, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .0100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:101 */ + if (trans_sb(ctx, &u.f_s)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .0100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:102 */ + if (trans_sh(ctx, &u.f_s)) return true; + return false; + case 0x2: + /* ........ ........ .010.... .0100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:103 */ + if (trans_sw(ctx, &u.f_s)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:29 */ + if (trans_sd(ctx, &u.f_s)) return true; + return false; + } + return false; + case 0x00000027: + /* ........ ........ ........ .0100111 */ + decode_insn32_extract_s(ctx, &u.f_s, insn); + switch ((insn >> 12) & 0x7) { + case 0x2: + /* ........ ........ .010.... .0100111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:157 */ + if (trans_fsw(ctx, &u.f_s)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .0100111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:185 */ + if (trans_fsd(ctx, &u.f_s)) return true; + return false; + } + return false; + case 0x0000002f: + /* ........ ........ ........ .0101111 */ + switch (insn & 0xf8007000) { + case 0x00002000: + /* 00000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:146 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoadd_w(ctx, &u.f_atomic)) return true; + return false; + case 0x00003000: + /* 00000... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:51 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoadd_d(ctx, &u.f_atomic)) return true; + return false; + case 0x08002000: + /* 00001... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:145 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoswap_w(ctx, &u.f_atomic)) return true; + return false; + case 0x08003000: + /* 00001... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:50 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoswap_d(ctx, &u.f_atomic)) return true; + return false; + case 0x10002000: + /* 00010... ........ .010.... .0101111 */ + decode_insn32_extract_atom_ld(ctx, &u.f_atomic, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 00010..0 0000.... .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:143 */ + if (trans_lr_w(ctx, &u.f_atomic)) return true; + return false; + } + return false; + case 0x10003000: + /* 00010... ........ .011.... .0101111 */ + decode_insn32_extract_atom_ld(ctx, &u.f_atomic, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 00010..0 0000.... .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:48 */ + if (trans_lr_d(ctx, &u.f_atomic)) return true; + return false; + } + return false; + case 0x18002000: + /* 00011... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:144 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_sc_w(ctx, &u.f_atomic)) return true; + return false; + case 0x18003000: + /* 00011... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:49 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_sc_d(ctx, &u.f_atomic)) return true; + return false; + case 0x20002000: + /* 00100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:147 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoxor_w(ctx, &u.f_atomic)) return true; + return false; + case 0x20003000: + /* 00100... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:52 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoxor_d(ctx, &u.f_atomic)) return true; + return false; + case 0x40002000: + /* 01000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:149 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoor_w(ctx, &u.f_atomic)) return true; + return false; + case 0x40003000: + /* 01000... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:54 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoor_d(ctx, &u.f_atomic)) return true; + return false; + case 0x60002000: + /* 01100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:148 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoand_w(ctx, &u.f_atomic)) return true; + return false; + case 0x60003000: + /* 01100... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:53 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amoand_d(ctx, &u.f_atomic)) return true; + return false; + case 0x80002000: + /* 10000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:150 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomin_w(ctx, &u.f_atomic)) return true; + return false; + case 0x80003000: + /* 10000... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:55 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomin_d(ctx, &u.f_atomic)) return true; + return false; + case 0xa0002000: + /* 10100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:151 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomax_w(ctx, &u.f_atomic)) return true; + return false; + case 0xa0003000: + /* 10100... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:56 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomax_d(ctx, &u.f_atomic)) return true; + return false; + case 0xc0002000: + /* 11000... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:152 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amominu_w(ctx, &u.f_atomic)) return true; + return false; + case 0xc0003000: + /* 11000... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:57 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amominu_d(ctx, &u.f_atomic)) return true; + return false; + case 0xe0002000: + /* 11100... ........ .010.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:153 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomaxu_w(ctx, &u.f_atomic)) return true; + return false; + case 0xe0003000: + /* 11100... ........ .011.... .0101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:58 */ + decode_insn32_extract_atom_st(ctx, &u.f_atomic, insn); + if (trans_amomaxu_d(ctx, &u.f_atomic)) return true; + return false; + } + return false; + case 0x00000033: + /* ........ ........ ........ .0110011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch (insn & 0xfe007000) { + case 0x00000000: + /* 0000000. ........ .000.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:113 */ + if (trans_add(ctx, &u.f_r)) return true; + return false; + case 0x00001000: + /* 0000000. ........ .001.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:115 */ + if (trans_sll(ctx, &u.f_r)) return true; + return false; + case 0x00002000: + /* 0000000. ........ .010.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:116 */ + if (trans_slt(ctx, &u.f_r)) return true; + return false; + case 0x00003000: + /* 0000000. ........ .011.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:117 */ + if (trans_sltu(ctx, &u.f_r)) return true; + return false; + case 0x00004000: + /* 0000000. ........ .100.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:118 */ + if (trans_xor(ctx, &u.f_r)) return true; + return false; + case 0x00005000: + /* 0000000. ........ .101.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:119 */ + if (trans_srl(ctx, &u.f_r)) return true; + return false; + case 0x00006000: + /* 0000000. ........ .110.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:121 */ + if (trans_or(ctx, &u.f_r)) return true; + return false; + case 0x00007000: + /* 0000000. ........ .111.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:122 */ + if (trans_and(ctx, &u.f_r)) return true; + return false; + case 0x02000000: + /* 0000001. ........ .000.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:133 */ + if (trans_mul(ctx, &u.f_r)) return true; + return false; + case 0x02001000: + /* 0000001. ........ .001.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:134 */ + if (trans_mulh(ctx, &u.f_r)) return true; + return false; + case 0x02002000: + /* 0000001. ........ .010.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:135 */ + if (trans_mulhsu(ctx, &u.f_r)) return true; + return false; + case 0x02003000: + /* 0000001. ........ .011.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:136 */ + if (trans_mulhu(ctx, &u.f_r)) return true; + return false; + case 0x02004000: + /* 0000001. ........ .100.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:137 */ + if (trans_div(ctx, &u.f_r)) return true; + return false; + case 0x02005000: + /* 0000001. ........ .101.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:138 */ + if (trans_divu(ctx, &u.f_r)) return true; + return false; + case 0x02006000: + /* 0000001. ........ .110.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:139 */ + if (trans_rem(ctx, &u.f_r)) return true; + return false; + case 0x02007000: + /* 0000001. ........ .111.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:140 */ + if (trans_remu(ctx, &u.f_r)) return true; + return false; + case 0x40000000: + /* 0100000. ........ .000.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:114 */ + if (trans_sub(ctx, &u.f_r)) return true; + return false; + case 0x40005000: + /* 0100000. ........ .101.... .0110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:120 */ + if (trans_sra(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x00000037: + /* ........ ........ ........ .0110111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:86 */ + decode_insn32_extract_u(ctx, &u.f_u, insn); + if (trans_lui(ctx, &u.f_u)) return true; + return false; + case 0x0000003b: + /* ........ ........ ........ .0111011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch (insn & 0xfe007000) { + case 0x00000000: + /* 0000000. ........ .000.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:34 */ + if (trans_addw(ctx, &u.f_r)) return true; + return false; + case 0x00001000: + /* 0000000. ........ .001.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:36 */ + if (trans_sllw(ctx, &u.f_r)) return true; + return false; + case 0x00005000: + /* 0000000. ........ .101.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:37 */ + if (trans_srlw(ctx, &u.f_r)) return true; + return false; + case 0x02000000: + /* 0000001. ........ .000.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:41 */ + if (trans_mulw(ctx, &u.f_r)) return true; + return false; + case 0x02004000: + /* 0000001. ........ .100.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:42 */ + if (trans_divw(ctx, &u.f_r)) return true; + return false; + case 0x02005000: + /* 0000001. ........ .101.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:43 */ + if (trans_divuw(ctx, &u.f_r)) return true; + return false; + case 0x02006000: + /* 0000001. ........ .110.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:44 */ + if (trans_remw(ctx, &u.f_r)) return true; + return false; + case 0x02007000: + /* 0000001. ........ .111.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:45 */ + if (trans_remuw(ctx, &u.f_r)) return true; + return false; + case 0x40000000: + /* 0100000. ........ .000.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:35 */ + if (trans_subw(ctx, &u.f_r)) return true; + return false; + case 0x40005000: + /* 0100000. ........ .101.... .0111011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:38 */ + if (trans_sraw(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x00000043: + /* ........ ........ ........ .1000011 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:158 */ + if (trans_fmadd_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1000011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:186 */ + if (trans_fmadd_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x00000047: + /* ........ ........ ........ .1000111 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:159 */ + if (trans_fmsub_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1000111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:187 */ + if (trans_fmsub_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x0000004b: + /* ........ ........ ........ .1001011 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1001011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:160 */ + if (trans_fnmsub_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1001011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:188 */ + if (trans_fnmsub_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x0000004f: + /* ........ ........ ........ .1001111 */ + decode_insn32_extract_r4_rm(ctx, &u.f_decode_insn3210, insn); + switch ((insn >> 25) & 0x3) { + case 0x0: + /* .....00. ........ ........ .1001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:161 */ + if (trans_fnmadd_s(ctx, &u.f_decode_insn3210)) return true; + return false; + case 0x1: + /* .....01. ........ ........ .1001111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:189 */ + if (trans_fnmadd_d(ctx, &u.f_decode_insn3210)) return true; + return false; + } + return false; + case 0x00000053: + /* ........ ........ ........ .1010011 */ + switch ((insn >> 25) & 0x7f) { + case 0x0: + /* 0000000. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:162 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fadd_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x1: + /* 0000001. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:190 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fadd_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x4: + /* 0000100. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:163 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fsub_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x5: + /* 0000101. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:191 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fsub_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x8: + /* 0001000. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:164 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fmul_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x9: + /* 0001001. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:192 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fmul_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0xc: + /* 0001100. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:165 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fdiv_s(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0xd: + /* 0001101. ........ ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:193 */ + decode_insn32_extract_r_rm(ctx, &u.f_decode_insn3211, insn); + if (trans_fdiv_d(ctx, &u.f_decode_insn3211)) return true; + return false; + case 0x10: + /* 0010000. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010000. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:167 */ + if (trans_fsgnj_s(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010000. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:168 */ + if (trans_fsgnjn_s(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 0010000. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:169 */ + if (trans_fsgnjx_s(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x11: + /* 0010001. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010001. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:195 */ + if (trans_fsgnj_d(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010001. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:196 */ + if (trans_fsgnjn_d(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 0010001. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:197 */ + if (trans_fsgnjx_d(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x14: + /* 0010100. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010100. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:170 */ + if (trans_fmin_s(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010100. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:171 */ + if (trans_fmax_s(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x15: + /* 0010101. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 0010101. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:198 */ + if (trans_fmin_d(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 0010101. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:199 */ + if (trans_fmax_d(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x20: + /* 0100000. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x1: + /* 01000000 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:200 */ + if (trans_fcvt_s_d(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x21: + /* 0100001. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 01000010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:201 */ + if (trans_fcvt_d_s(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x2c: + /* 0101100. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 01011000 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:166 */ + if (trans_fsqrt_s(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x2d: + /* 0101101. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 01011010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:194 */ + if (trans_fsqrt_d(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x50: + /* 1010000. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 1010000. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:177 */ + if (trans_fle_s(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 1010000. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:176 */ + if (trans_flt_s(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 1010000. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:175 */ + if (trans_feq_s(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x51: + /* 1010001. ........ ........ .1010011 */ + decode_insn32_extract_r(ctx, &u.f_r, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* 1010001. ........ .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:204 */ + if (trans_fle_d(ctx, &u.f_r)) return true; + return false; + case 0x1: + /* 1010001. ........ .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:203 */ + if (trans_flt_d(ctx, &u.f_r)) return true; + return false; + case 0x2: + /* 1010001. ........ .010.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:202 */ + if (trans_feq_d(ctx, &u.f_r)) return true; + return false; + } + return false; + case 0x60: + /* 1100000. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11000000 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:172 */ + if (trans_fcvt_w_s(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11000000 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:173 */ + if (trans_fcvt_wu_s(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x2: + /* 11000000 0010.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:61 */ + if (trans_fcvt_l_s(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x3: + /* 11000000 0011.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:62 */ + if (trans_fcvt_lu_s(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x61: + /* 1100001. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11000010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:206 */ + if (trans_fcvt_w_d(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11000010 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:207 */ + if (trans_fcvt_wu_d(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x2: + /* 11000010 0010.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:67 */ + if (trans_fcvt_l_d(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x3: + /* 11000010 0011.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:68 */ + if (trans_fcvt_lu_d(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x68: + /* 1101000. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11010000 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:179 */ + if (trans_fcvt_s_w(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11010000 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:180 */ + if (trans_fcvt_s_wu(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x2: + /* 11010000 0010.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:63 */ + if (trans_fcvt_s_l(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x3: + /* 11010000 0011.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:64 */ + if (trans_fcvt_s_lu(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x69: + /* 1101001. ........ ........ .1010011 */ + decode_insn32_extract_r2_rm(ctx, &u.f_decode_insn3212, insn); + switch ((insn >> 20) & 0x1f) { + case 0x0: + /* 11010010 0000.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:208 */ + if (trans_fcvt_d_w(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x1: + /* 11010010 0001.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:209 */ + if (trans_fcvt_d_wu(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x2: + /* 11010010 0010.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:70 */ + if (trans_fcvt_d_l(ctx, &u.f_decode_insn3212)) return true; + return false; + case 0x3: + /* 11010010 0011.... ........ .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:71 */ + if (trans_fcvt_d_lu(ctx, &u.f_decode_insn3212)) return true; + return false; + } + return false; + case 0x70: + /* 1110000. ........ ........ .1010011 */ + decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); + switch (insn & 0x01f07000) { + case 0x00000000: + /* 11100000 0000.... .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:174 */ + if (trans_fmv_x_w(ctx, &u.f_decode_insn3213)) return true; + return false; + case 0x00001000: + /* 11100000 0000.... .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:178 */ + if (trans_fclass_s(ctx, &u.f_decode_insn3213)) return true; + return false; + } + return false; + case 0x71: + /* 1110001. ........ ........ .1010011 */ + decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); + switch (insn & 0x01f07000) { + case 0x00000000: + /* 11100010 0000.... .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:69 */ + if (trans_fmv_x_d(ctx, &u.f_decode_insn3213)) return true; + return false; + case 0x00001000: + /* 11100010 0000.... .001.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:205 */ + if (trans_fclass_d(ctx, &u.f_decode_insn3213)) return true; + return false; + } + return false; + case 0x78: + /* 1111000. ........ ........ .1010011 */ + decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); + switch (insn & 0x01f07000) { + case 0x00000000: + /* 11110000 0000.... .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:181 */ + if (trans_fmv_w_x(ctx, &u.f_decode_insn3213)) return true; + return false; + } + return false; + case 0x79: + /* 1111001. ........ ........ .1010011 */ + decode_insn32_extract_r2(ctx, &u.f_decode_insn3213, insn); + switch (insn & 0x01f07000) { + case 0x00000000: + /* 11110010 0000.... .000.... .1010011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32-64.decode:72 */ + if (trans_fmv_d_x(ctx, &u.f_decode_insn3213)) return true; + return false; + } + return false; + } + return false; + case 0x00000063: + /* ........ ........ ........ .1100011 */ + decode_insn32_extract_b(ctx, &u.f_b, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:90 */ + if (trans_beq(ctx, &u.f_b)) return true; + return false; + case 0x1: + /* ........ ........ .001.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:91 */ + if (trans_bne(ctx, &u.f_b)) return true; + return false; + case 0x4: + /* ........ ........ .100.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:92 */ + if (trans_blt(ctx, &u.f_b)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:93 */ + if (trans_bge(ctx, &u.f_b)) return true; + return false; + case 0x6: + /* ........ ........ .110.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:94 */ + if (trans_bltu(ctx, &u.f_b)) return true; + return false; + case 0x7: + /* ........ ........ .111.... .1100011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:95 */ + if (trans_bgeu(ctx, &u.f_b)) return true; + return false; + } + return false; + case 0x00000067: + /* ........ ........ ........ .1100111 */ + decode_insn32_extract_i(ctx, &u.f_i, insn); + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .1100111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:89 */ + if (trans_jalr(ctx, &u.f_i)) return true; + return false; + } + return false; + case 0x0000006f: + /* ........ ........ ........ .1101111 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:88 */ + decode_insn32_extract_j(ctx, &u.f_j, insn); + if (trans_jal(ctx, &u.f_j)) return true; + return false; + case 0x00000073: + /* ........ ........ ........ .1110011 */ + switch ((insn >> 12) & 0x7) { + case 0x0: + /* ........ ........ .000.... .1110011 */ + switch (insn & 0xfe000f80) { + case 0x00000000: + /* 0000000. ........ .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x3ff) { + case 0x0: + /* 00000000 00000000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:74 */ + if (trans_ecall(ctx, &u.f_empty)) return true; + return false; + case 0x20: + /* 00000000 00010000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:75 */ + if (trans_ebreak(ctx, &u.f_empty)) return true; + return false; + case 0x40: + /* 00000000 00100000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:76 */ + if (trans_uret(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x10000000: + /* 0001000. ........ .0000000 01110011 */ + switch ((insn >> 20) & 0x1f) { + case 0x2: + /* 00010000 0010.... .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x1f) { + case 0x0: + /* 00010000 00100000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:77 */ + if (trans_sret(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x4: + /* 00010000 0100.... .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:83 */ + decode_insn32_extract_sfence_vm(ctx, &u.f_decode_insn3215, insn); + if (trans_sfence_vm(ctx, &u.f_decode_insn3215)) return true; + return false; + case 0x5: + /* 00010000 0101.... .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x1f) { + case 0x0: + /* 00010000 01010000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:79 */ + if (trans_wfi(ctx, &u.f_empty)) return true; + return false; + } + return false; + } + return false; + case 0x12000000: + /* 0001001. ........ .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:82 */ + decode_insn32_extract_sfence_vma(ctx, &u.f_decode_insn3214, insn); + if (trans_sfence_vma(ctx, &u.f_decode_insn3214)) return true; + return false; + case 0x22000000: + /* 0010001. ........ .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:81 */ + decode_insn32_extract_hfence_bvma(ctx, &u.f_decode_insn3214, insn); + if (trans_hfence_bvma(ctx, &u.f_decode_insn3214)) return true; + return false; + case 0x30000000: + /* 0011000. ........ .0000000 01110011 */ + decode_insn32_extract_decode_insn32_Fmt_18(ctx, &u.f_empty, insn); + switch ((insn >> 15) & 0x3ff) { + case 0x40: + /* 00110000 00100000 00000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:78 */ + if (trans_mret(ctx, &u.f_empty)) return true; + return false; + } + return false; + case 0x62000000: + /* 0110001. ........ .0000000 01110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:80 */ + decode_insn32_extract_hfence_gvma(ctx, &u.f_decode_insn3214, insn); + if (trans_hfence_gvma(ctx, &u.f_decode_insn3214)) return true; + return false; + } + return false; + case 0x1: + /* ........ ........ .001.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:125 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrw(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x2: + /* ........ ........ .010.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:126 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrs(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x3: + /* ........ ........ .011.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:127 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrc(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x5: + /* ........ ........ .101.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:128 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrwi(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x6: + /* ........ ........ .110.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:129 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrsi(ctx, &u.f_decode_insn329)) return true; + return false; + case 0x7: + /* ........ ........ .111.... .1110011 */ + /* /home/me/projects/unicorn2/qemu-5.0.0-build/target/riscv/insn32.decode:130 */ + decode_insn32_extract_csr(ctx, &u.f_decode_insn329, insn); + if (trans_csrrci(ctx, &u.f_decode_insn329)) return true; + return false; + } + return false; + } + return false; +} diff --git a/qemu/target/riscv/translate.c b/qemu/target/riscv/translate.c new file mode 100644 index 00000000..7aa55188 --- /dev/null +++ b/qemu/target/riscv/translate.c @@ -0,0 +1,959 @@ +/* + * RISC-V emulation for qemu: main translation routines. + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "tcg/tcg-op.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/translator.h" + +#include "instmap.h" + +#include "unicorn/platform.h" +#include "uc_priv.h" + +#include "exec/gen-icount.h" + +/* + * Unicorn: Special disas state for exiting in the middle of tb. + */ +#define DISAS_UC_EXIT DISAS_TARGET_6 + +typedef struct DisasContext { + DisasContextBase base; + /* pc_succ_insn points to the instruction following base.pc_next */ + target_ulong pc_succ_insn; + target_ulong priv_ver; + bool virt_enabled; + uint32_t opcode; + uint32_t mstatus_fs; + uint32_t misa; + uint32_t mem_idx; + /* Remember the rounding mode encoded in the previous fp instruction, + which we have already installed into env->fp_status. Or -1 for + no previous fp instruction. Note that we exit the TB when writing + to any system register, which includes CSR_FRM, so we do not have + to reset this known value. */ + int frm; + bool ext_ifencei; + + // Unicorn + struct uc_struct *uc; + bool invalid; // invalid instruction, discoverd by translator +} DisasContext; + +#ifdef TARGET_RISCV64 +/* convert riscv funct3 to qemu memop for load/store */ +static const int tcg_memop_lookup[8] = { + // [0 ... 7] = -1, + [0] = MO_SB, + [1] = MO_TESW, + [2] = MO_TESL, + [3] = MO_TEQ, + [4] = MO_UB, + [5] = MO_TEUW, + [6] = MO_TEUL, + [7] = -1, +}; +#endif + +#ifdef TARGET_RISCV64 +#define CASE_OP_32_64(X) case X: case glue(X, W) +#else +#define CASE_OP_32_64(X) case X +#endif + +static inline bool has_ext(DisasContext *ctx, uint32_t ext) +{ + return ctx->misa & ext; +} + +static void generate_exception(DisasContext *ctx, int excp) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, excp); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); + tcg_temp_free_i32(tcg_ctx, helper_tmp); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static void generate_exception_mbadaddr(DisasContext *ctx, int excp) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); + tcg_gen_st_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_env, offsetof(CPURISCVState, badaddr)); + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, excp); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); + tcg_temp_free_i32(tcg_ctx, helper_tmp); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static void gen_exception_debug(TCGContext *tcg_ctx) +{ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, EXCP_DEBUG); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); + tcg_temp_free_i32(tcg_ctx, helper_tmp); +} + +/* Wrapper around tcg_gen_exit_tb that handles single stepping */ +static void exit_tb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (ctx->base.singlestep_enabled) { + gen_exception_debug(tcg_ctx); + } else { + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } +} + +/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */ +static void lookup_and_goto_ptr(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (ctx->base.singlestep_enabled) { + gen_exception_debug(tcg_ctx); + } else { + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + } +} + +static void gen_exception_illegal(DisasContext *ctx) +{ + generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); +} + +static void gen_exception_inst_addr_mis(DisasContext *ctx) +{ + generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS); +} + +static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) +{ + if (unlikely(ctx->base.singlestep_enabled)) { + return false; + } + + return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +} + +static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (use_goto_tb(ctx, dest)) { + /* chaining is only allowed when the jump is to the same page */ + tcg_gen_goto_tb(tcg_ctx, n); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dest); + + /* No need to check for single stepping here as use_goto_tb() will + * return false in case of single stepping. + */ + tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); + } else { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dest); + lookup_and_goto_ptr(ctx); + } +} + +/* Wrapper for getting reg values - need to check of reg is zero since + * cpu_gpr[0] is not actually allocated + */ +static inline void gen_get_gpr(TCGContext *tcg_ctx, TCGv t, int reg_num) +{ + if (reg_num == 0) { + tcg_gen_movi_tl(tcg_ctx, t, 0); + } else { + tcg_gen_mov_tl(tcg_ctx, t, tcg_ctx->cpu_gpr[reg_num]); + } +} + +/* Wrapper for setting reg values - need to check of reg is zero since + * cpu_gpr[0] is not actually allocated. this is more for safety purposes, + * since we usually avoid calling the OP_TYPE_gen function if we see a write to + * $zero + */ +static inline void gen_set_gpr(TCGContext *tcg_ctx, int reg_num_dst, TCGv t) +{ + if (reg_num_dst != 0) { + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr[reg_num_dst], t); + } +} + +static void gen_mulhsu(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) +{ + TCGv rl = tcg_temp_new(tcg_ctx); + TCGv rh = tcg_temp_new(tcg_ctx); + + tcg_gen_mulu2_tl(tcg_ctx, rl, rh, arg1, arg2); + /* fix up for one negative */ + tcg_gen_sari_tl(tcg_ctx, rl, arg1, TARGET_LONG_BITS - 1); + tcg_gen_and_tl(tcg_ctx, rl, rl, arg2); + tcg_gen_sub_tl(tcg_ctx, ret, rh, rl); + + tcg_temp_free(tcg_ctx, rl); + tcg_temp_free(tcg_ctx, rh); +} + +static void gen_div(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) +{ + TCGv cond1, cond2, zeroreg, resultopt1; + /* + * Handle by altering args to tcg_gen_div to produce req'd results: + * For overflow: want source1 in source1 and 1 in source2 + * For div by zero: want -1 in source1 and 1 in source2 -> -1 result + */ + cond1 = tcg_temp_new(tcg_ctx); + cond2 = tcg_temp_new(tcg_ctx); + zeroreg = tcg_const_tl(tcg_ctx, 0); + resultopt1 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)-1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond2, source2, (target_ulong)(~0L)); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source1, + ((target_ulong)1) << (TARGET_LONG_BITS - 1)); + tcg_gen_and_tl(tcg_ctx, cond1, cond1, cond2); /* cond1 = overflow */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */ + /* if div by zero, set source1 to -1, otherwise don't change */ + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source1, cond2, zeroreg, source1, + resultopt1); + /* if overflow or div by zero, set source2 to 1, else don't change */ + tcg_gen_or_tl(tcg_ctx, cond1, cond1, cond2); + tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond1, zeroreg, source2, + resultopt1); + tcg_gen_div_tl(tcg_ctx, ret, source1, source2); + + tcg_temp_free(tcg_ctx, cond1); + tcg_temp_free(tcg_ctx, cond2); + tcg_temp_free(tcg_ctx, zeroreg); + tcg_temp_free(tcg_ctx, resultopt1); +} + +static void gen_divu(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) +{ + TCGv cond1, zeroreg, resultopt1; + cond1 = tcg_temp_new(tcg_ctx); + + zeroreg = tcg_const_tl(tcg_ctx, 0); + resultopt1 = tcg_temp_new(tcg_ctx); + + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source2, 0); + tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)-1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source1, cond1, zeroreg, source1, + resultopt1); + tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond1, zeroreg, source2, + resultopt1); + tcg_gen_divu_tl(tcg_ctx, ret, source1, source2); + + tcg_temp_free(tcg_ctx, cond1); + tcg_temp_free(tcg_ctx, zeroreg); + tcg_temp_free(tcg_ctx, resultopt1); +} + +static void gen_rem(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) +{ + TCGv cond1, cond2, zeroreg, resultopt1; + + cond1 = tcg_temp_new(tcg_ctx); + cond2 = tcg_temp_new(tcg_ctx); + zeroreg = tcg_const_tl(tcg_ctx, 0); + resultopt1 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, resultopt1, 1L); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond2, source2, (target_ulong)-1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source1, + (target_ulong)1 << (TARGET_LONG_BITS - 1)); + tcg_gen_and_tl(tcg_ctx, cond2, cond1, cond2); /* cond1 = overflow */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */ + /* if overflow or div by zero, set source2 to 1, else don't change */ + tcg_gen_or_tl(tcg_ctx, cond2, cond1, cond2); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond2, zeroreg, source2, + resultopt1); + tcg_gen_rem_tl(tcg_ctx, resultopt1, source1, source2); + /* if div by zero, just return the original dividend */ + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, ret, cond1, zeroreg, resultopt1, + source1); + + tcg_temp_free(tcg_ctx, cond1); + tcg_temp_free(tcg_ctx, cond2); + tcg_temp_free(tcg_ctx, zeroreg); + tcg_temp_free(tcg_ctx, resultopt1); +} + +static void gen_remu(TCGContext *tcg_ctx, TCGv ret, TCGv source1, TCGv source2) +{ + TCGv cond1, zeroreg, resultopt1; + cond1 = tcg_temp_new(tcg_ctx); + zeroreg = tcg_const_tl(tcg_ctx, 0); + resultopt1 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, resultopt1, (target_ulong)1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, cond1, source2, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, source2, cond1, zeroreg, source2, + resultopt1); + tcg_gen_remu_tl(tcg_ctx, resultopt1, source1, source2); + /* if div by zero, just return the original dividend */ + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, ret, cond1, zeroreg, resultopt1, + source1); + + tcg_temp_free(tcg_ctx, cond1); + tcg_temp_free(tcg_ctx, zeroreg); + tcg_temp_free(tcg_ctx, resultopt1); +} + +static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong next_pc; + + /* check misaligned: */ + next_pc = ctx->base.pc_next + imm; + if (!has_ext(ctx, RVC)) { + if ((next_pc & 0x3) != 0) { + gen_exception_inst_addr_mis(ctx); + return; + } + } + if (rd != 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr[rd], ctx->pc_succ_insn); + } + + gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */ + ctx->base.is_jmp = DISAS_NORETURN; +} + +#ifdef TARGET_RISCV64 +static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1, + target_long imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, rs1); + tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); + int memop = tcg_memop_lookup[(opc >> 12) & 0x7]; + + if (memop < 0) { + gen_exception_illegal(ctx); + return; + } + + tcg_gen_qemu_ld_tl(tcg_ctx, t1, t0, ctx->mem_idx, memop); + gen_set_gpr(tcg_ctx, rd, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2, + target_long imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv dat = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, t0, rs1); + tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); + gen_get_gpr(tcg_ctx, dat, rs2); + int memop = tcg_memop_lookup[(opc >> 12) & 0x7]; + + if (memop < 0) { + gen_exception_illegal(ctx); + return; + } + + tcg_gen_qemu_st_tl(tcg_ctx, dat, t0, ctx->mem_idx, memop); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, dat); +} +#endif + +/* The states of mstatus_fs are: + * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty + * We will have already diagnosed disabled state, + * and need to turn initial/clean into dirty. + */ +static void mark_fs_dirty(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv tmp; + if (ctx->mstatus_fs == MSTATUS_FS) { + return; + } + /* Remember the state change for the rest of the TB. */ + ctx->mstatus_fs = MSTATUS_FS; + + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_ld_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus)); + tcg_gen_ori_tl(tcg_ctx, tmp, tmp, MSTATUS_FS | MSTATUS_SD); + tcg_gen_st_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus)); + + if (ctx->virt_enabled) { + tcg_gen_ld_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_gen_ori_tl(tcg_ctx, tmp, tmp, MSTATUS_FS | MSTATUS_SD); + tcg_gen_st_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPURISCVState, mstatus_hs)); + } + tcg_temp_free(tcg_ctx, tmp); +} + +#if !defined(TARGET_RISCV64) +static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd, + int rs1, target_long imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + if (ctx->mstatus_fs == 0) { + gen_exception_illegal(ctx); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, rs1); + tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); + + switch (opc) { + case OPC_RISC_FLW: + if (!has_ext(ctx, RVF)) { + goto do_illegal; + } + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL); + /* RISC-V requires NaN-boxing of narrower width floating point values */ + tcg_gen_ori_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd], tcg_ctx->cpu_fpr[rd], 0xffffffff00000000ULL); + break; + case OPC_RISC_FLD: + if (!has_ext(ctx, RVD)) { + goto do_illegal; + } + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ); + break; + do_illegal: + default: + gen_exception_illegal(ctx); + break; + } + tcg_temp_free(tcg_ctx, t0); + + mark_fs_dirty(ctx); +} + +static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1, + int rs2, target_long imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + + if (ctx->mstatus_fs == 0) { + gen_exception_illegal(ctx); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_get_gpr(tcg_ctx, t0, rs1); + tcg_gen_addi_tl(tcg_ctx, t0, t0, imm); + + switch (opc) { + case OPC_RISC_FSW: + if (!has_ext(ctx, RVF)) { + goto do_illegal; + } + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL); + break; + case OPC_RISC_FSD: + if (!has_ext(ctx, RVD)) { + goto do_illegal; + } + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ); + break; + do_illegal: + default: + gen_exception_illegal(ctx); + break; + } + + tcg_temp_free(tcg_ctx, t0); +} +#endif + +static void gen_set_rm(DisasContext *ctx, int rm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0; + + if (ctx->frm == rm) { + return; + } + ctx->frm = rm; + t0 = tcg_const_i32(tcg_ctx, rm); + gen_helper_set_rounding_mode(tcg_ctx, tcg_ctx->cpu_env, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void decode_RV32_64C0(DisasContext *ctx, uint16_t opcode) +{ + uint8_t funct3 = extract16(opcode, 13, 3); + uint8_t rd_rs2 = GET_C_RS2S(opcode); + uint8_t rs1s = GET_C_RS1S(opcode); + + switch (funct3) { + case 3: +#if defined(TARGET_RISCV64) + /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/ + gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s, + GET_C_LD_IMM(opcode)); +#else + /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/ + gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s, + GET_C_LW_IMM(opcode)); +#endif + break; + case 7: +#if defined(TARGET_RISCV64) + /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/ + gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2, + GET_C_LD_IMM(opcode)); +#else + /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/ + gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2, + GET_C_LW_IMM(opcode)); +#endif + break; + } +} + +static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode) +{ + uint8_t op = extract16(opcode, 0, 2); + + switch (op) { + case 0: + decode_RV32_64C0(ctx, opcode); + break; + } +} + +#define EX_SH(amount) \ + static int ex_shift_##amount(DisasContext *ctx, int imm) \ + { \ + return imm << amount; \ + } +EX_SH(1) +EX_SH(2) +EX_SH(3) +EX_SH(4) +EX_SH(12) + +#define REQUIRE_EXT(ctx, ext) do { \ + if (!has_ext(ctx, ext)) { \ + return false; \ + } \ +} while (0) + +static int ex_rvc_register(DisasContext *ctx, int reg) +{ + return 8 + reg; +} + +static int ex_rvc_shifti(DisasContext *ctx, int imm) +{ + /* For RV128 a shamt of 0 means a shift by 64. */ + return imm ? imm : 64; +} + +/* Include the auto-generated decoder for 32 bit insn */ +#ifdef TARGET_RISCV32 +#include "riscv32/decode_insn32.inc.c" +#else +#include "riscv64/decode_insn32.inc.c" +#endif + +static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, + void (*func)(TCGContext *, TCGv, TCGv, target_long)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1; + source1 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + + (*func)(tcg_ctx, source1, source1, a->imm); + + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + return true; +} + +static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, + void (*func)(TCGContext *, TCGv, TCGv, TCGv)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1, source2; + source1 = tcg_temp_new(tcg_ctx); + source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + tcg_gen_movi_tl(tcg_ctx, source2, a->imm); + + (*func)(tcg_ctx, source1, source1, source2); + + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +#ifdef TARGET_RISCV64 +static void gen_addw(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) +{ + tcg_gen_add_tl(tcg_ctx, ret, arg1, arg2); + tcg_gen_ext32s_tl(tcg_ctx, ret, ret); +} + +static void gen_subw(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) +{ + tcg_gen_sub_tl(tcg_ctx, ret, arg1, arg2); + tcg_gen_ext32s_tl(tcg_ctx, ret, ret); +} + +static void gen_mulw(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, TCGv arg2) +{ + tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); + tcg_gen_ext32s_tl(tcg_ctx, ret, ret); +} + +static bool gen_arith_div_w(TCGContext *tcg_ctx, arg_r *a, + void(*func)(TCGContext *, TCGv, TCGv, TCGv)) +{ + TCGv source1, source2; + source1 = tcg_temp_new(tcg_ctx); + source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + tcg_gen_ext32s_tl(tcg_ctx, source1, source1); + tcg_gen_ext32s_tl(tcg_ctx, source2, source2); + + (*func)(tcg_ctx, source1, source1, source2); + + tcg_gen_ext32s_tl(tcg_ctx, source1, source1); + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +static bool gen_arith_div_uw(TCGContext *tcg_ctx, arg_r *a, + void(*func)(TCGContext *, TCGv, TCGv, TCGv)) +{ + TCGv source1, source2; + source1 = tcg_temp_new(tcg_ctx); + source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + tcg_gen_ext32u_tl(tcg_ctx, source1, source1); + tcg_gen_ext32u_tl(tcg_ctx, source2, source2); + + (*func)(tcg_ctx, source1, source1, source2); + + tcg_gen_ext32s_tl(tcg_ctx, source1, source1); + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +#endif + +static bool gen_arith(TCGContext *tcg_ctx, arg_r *a, + void(*func)(TCGContext *, TCGv, TCGv, TCGv)) +{ + TCGv source1, source2; + source1 = tcg_temp_new(tcg_ctx); + source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + (*func)(tcg_ctx, source1, source1, source2); + + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +static bool gen_shift(DisasContext *ctx, arg_r *a, + void(*func)(TCGContext *, TCGv, TCGv, TCGv)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv source1 = tcg_temp_new(tcg_ctx); + TCGv source2 = tcg_temp_new(tcg_ctx); + + gen_get_gpr(tcg_ctx, source1, a->rs1); + gen_get_gpr(tcg_ctx, source2, a->rs2); + + tcg_gen_andi_tl(tcg_ctx, source2, source2, TARGET_LONG_BITS - 1); + (*func)(tcg_ctx, source1, source1, source2); + + gen_set_gpr(tcg_ctx, a->rd, source1); + tcg_temp_free(tcg_ctx, source1); + tcg_temp_free(tcg_ctx, source2); + return true; +} + +/* Include insn module translation function */ +#include "insn_trans/trans_rvi.inc.c" +#include "insn_trans/trans_rvm.inc.c" +#include "insn_trans/trans_rva.inc.c" +#include "insn_trans/trans_rvf.inc.c" +#include "insn_trans/trans_rvd.inc.c" +#include "insn_trans/trans_privileged.inc.c" + +/* Include the auto-generated decoder for 16 bit insn */ +#ifdef TARGET_RISCV32 +#include "riscv32/decode_insn16.inc.c" +#else +#include "riscv64/decode_insn16.inc.c" +#endif + +static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + /* check for compressed insn */ + if (extract16(opcode, 0, 2) != 3) { + if (!has_ext(ctx, RVC)) { + gen_exception_illegal(ctx); + } else { + ctx->invalid = false; + ctx->pc_succ_insn = ctx->base.pc_next + 2; + if (!decode_insn16(ctx, opcode)) { + /* fall back to old decoder */ + decode_RV32_64C(ctx, opcode); + } else { + // invalid instruction does not advance PC + if (ctx->invalid) { + ctx->pc_succ_insn -= 2; + } + } + } + } else { + uint32_t opcode32 = opcode; + opcode32 = deposit32(opcode32, 16, 16, + translator_lduw(tcg_ctx, env, ctx->base.pc_next + 2)); + ctx->pc_succ_insn = ctx->base.pc_next + 4; + if (!decode_insn32(ctx, opcode32)) { + ctx->pc_succ_insn = ctx->base.pc_next - 4; + gen_exception_illegal(ctx); + } + } +} + +static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + CPURISCVState *env = cs->env_ptr; + RISCVCPU *cpu = RISCV_CPU(cs); + + // unicorn setup + ctx->uc = cs->uc; + + ctx->pc_succ_insn = ctx->base.pc_first; + ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK; + ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS; + ctx->priv_ver = env->priv_ver; + + if (riscv_has_ext(env, RVH)) { + ctx->virt_enabled = riscv_cpu_virt_enabled(env); + if (env->priv_ver == PRV_M && + get_field(env->mstatus, MSTATUS_MPRV) && + MSTATUS_MPV_ISSET(env)) { + ctx->virt_enabled = true; + } else if (env->priv == PRV_S && + !riscv_cpu_virt_enabled(env) && + get_field(env->hstatus, HSTATUS_SPRV) && + get_field(env->hstatus, HSTATUS_SPV)) { + ctx->virt_enabled = true; + } + } else { + ctx->virt_enabled = false; + } + + ctx->misa = env->misa; + ctx->frm = -1; /* unknown rounding mode */ + ctx->ext_ifencei = cpu->cfg.ext_ifencei; +} + +static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu) +{ +} + +static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_insn_start(tcg_ctx, ctx->base.pc_next); +} + +static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, + const CPUBreakpoint *bp) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, ctx->base.pc_next); + ctx->base.is_jmp = DISAS_NORETURN; + gen_exception_debug(tcg_ctx); + /* The address covered by the breakpoint must be included in + [tb->pc, tb->pc + tb->size) in order to for it to be + properly cleared -- thus we increment the PC here so that + the logic setting tb->size below does the right thing. */ + ctx->base.pc_next += 4; + return true; +} + +static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = ctx->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + CPURISCVState *env = cpu->env_ptr; + uint16_t opcode16 = translator_lduw(tcg_ctx, env, ctx->base.pc_next); + TCGOp *tcg_op, *prev_op = NULL; + bool insn_hook = false; + + // Unicorn: end address tells us to stop emulation + if (ctx->base.pc_next == ctx->uc->addr_end) { + // Unicorn: We have to exit current execution here. + dcbase->is_jmp = DISAS_UC_EXIT; + } else { + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, ctx->base.pc_next)) { + // save the last operand + prev_op = tcg_last_op(tcg_ctx); + insn_hook = true; + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, ctx->base.pc_next); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + decode_opc(env, ctx, opcode16); + + if (insn_hook) { + // Unicorn: patch the callback to have the proper instruction size. + if (prev_op) { + // As explained further up in the function where prev_op is + // assigned, we move forward in the tail queue, so we're modifying the + // move instruction generated by gen_uc_tracecode() that contains + // the instruction size to assign the proper size (replacing 0xF1F1F1F1). + tcg_op = QTAILQ_NEXT(prev_op, link); + } else { + // this instruction is the first emulated code ever, + // so the instruction operand is the first operand + tcg_op = QTAILQ_FIRST(&tcg_ctx->ops); + } + + tcg_op->args[1] = ctx->pc_succ_insn - ctx->base.pc_next; + } + + ctx->base.pc_next = ctx->pc_succ_insn; + + if (ctx->base.is_jmp == DISAS_NEXT) { + target_ulong page_start; + + page_start = ctx->base.pc_first & TARGET_PAGE_MASK; + if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) { + ctx->base.is_jmp = DISAS_TOO_MANY; + } + } + } +} + +static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + switch (ctx->base.is_jmp) { + case DISAS_TOO_MANY: + gen_goto_tb(ctx, 0, ctx->base.pc_next); + break; + case DISAS_NORETURN: + break; + case DISAS_UC_EXIT: + gen_helper_uc_riscv_exit(ctx->uc->tcg_ctx, ctx->uc->tcg_ctx->cpu_env); + break; + default: + g_assert_not_reached(); + } +} + +static const TranslatorOps riscv_tr_ops = { + .init_disas_context = riscv_tr_init_disas_context, + .tb_start = riscv_tr_tb_start, + .insn_start = riscv_tr_insn_start, + .breakpoint_check = riscv_tr_breakpoint_check, + .translate_insn = riscv_tr_translate_insn, + .tb_stop = riscv_tr_tb_stop, +}; + +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +{ + DisasContext ctx; + + memset(&ctx, 0, sizeof(ctx)); + translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns); +} + +void riscv_translate_init(struct uc_struct *uc) +{ + int i; + TCGContext *tcg_ctx = uc->tcg_ctx; + + /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */ + /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */ + /* registers, unless you specifically block reads/writes to reg 0 */ + tcg_ctx->cpu_gpr[0] = NULL; + + for (i = 1; i < 32; i++) { + tcg_ctx->cpu_gpr[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]); + } + + for (i = 0; i < 32; i++) { + tcg_ctx->cpu_fpr[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]); + } + + tcg_ctx->cpu_pc = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, pc), "pc"); + tcg_ctx->load_res = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, load_res), + "load_res"); + tcg_ctx->load_val = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPURISCVState, load_val), + "load_val"); +} diff --git a/qemu/target/riscv/unicorn.c b/qemu/target/riscv/unicorn.c new file mode 100644 index 00000000..4bf9f2b7 --- /dev/null +++ b/qemu/target/riscv/unicorn.c @@ -0,0 +1,351 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "uc_priv.h" +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "cpu_bits.h" +#include +#include "unicorn.h" + +RISCVCPU *cpu_riscv_init(struct uc_struct *uc, const char *cpu_model); + +static void riscv_set_pc(struct uc_struct *uc, uint64_t address) +{ + RISCV_CPU(uc->cpu)->env.pc = address; +} + +static void riscv_release(void *ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + RISCVCPU *cpu = (RISCVCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } +} + +void riscv_reg_reset(struct uc_struct *uc) +{ +} + +static void reg_read(CPURISCVState *env, unsigned int regid, void *value) +{ + switch(regid) { + case UC_RISCV_REG_X0: + case UC_RISCV_REG_X1: + case UC_RISCV_REG_X2: + case UC_RISCV_REG_X3: + case UC_RISCV_REG_X4: + case UC_RISCV_REG_X5: + case UC_RISCV_REG_X6: + case UC_RISCV_REG_X7: + case UC_RISCV_REG_X8: + case UC_RISCV_REG_X9: + case UC_RISCV_REG_X10: + case UC_RISCV_REG_X11: + case UC_RISCV_REG_X12: + case UC_RISCV_REG_X13: + case UC_RISCV_REG_X14: + case UC_RISCV_REG_X15: + case UC_RISCV_REG_X16: + case UC_RISCV_REG_X17: + case UC_RISCV_REG_X18: + case UC_RISCV_REG_X19: + case UC_RISCV_REG_X20: + case UC_RISCV_REG_X21: + case UC_RISCV_REG_X22: + case UC_RISCV_REG_X23: + case UC_RISCV_REG_X24: + case UC_RISCV_REG_X25: + case UC_RISCV_REG_X26: + case UC_RISCV_REG_X27: + case UC_RISCV_REG_X28: + case UC_RISCV_REG_X29: + case UC_RISCV_REG_X30: + case UC_RISCV_REG_X31: +#ifdef TARGET_RISCV64 + *(int64_t *)value = env->gpr[regid - UC_RISCV_REG_X0]; +#else + *(int32_t *)value = env->gpr[regid - UC_RISCV_REG_X0]; +#endif + break; + case UC_RISCV_REG_PC: +#ifdef TARGET_RISCV64 + *(int64_t *)value = env->pc; +#else + *(int32_t *)value = env->pc; +#endif + break; + + case UC_RISCV_REG_F0: // "ft0" + case UC_RISCV_REG_F1: // "ft1" + case UC_RISCV_REG_F2: // "ft2" + case UC_RISCV_REG_F3: // "ft3" + case UC_RISCV_REG_F4: // "ft4" + case UC_RISCV_REG_F5: // "ft5" + case UC_RISCV_REG_F6: // "ft6" + case UC_RISCV_REG_F7: // "ft7" + case UC_RISCV_REG_F8: // "fs0" + case UC_RISCV_REG_F9: // "fs1" + case UC_RISCV_REG_F10: // "fa0" + case UC_RISCV_REG_F11: // "fa1" + case UC_RISCV_REG_F12: // "fa2" + case UC_RISCV_REG_F13: // "fa3" + case UC_RISCV_REG_F14: // "fa4" + case UC_RISCV_REG_F15: // "fa5" + case UC_RISCV_REG_F16: // "fa6" + case UC_RISCV_REG_F17: // "fa7" + case UC_RISCV_REG_F18: // "fs2" + case UC_RISCV_REG_F19: // "fs3" + case UC_RISCV_REG_F20: // "fs4" + case UC_RISCV_REG_F21: // "fs5" + case UC_RISCV_REG_F22: // "fs6" + case UC_RISCV_REG_F23: // "fs7" + case UC_RISCV_REG_F24: // "fs8" + case UC_RISCV_REG_F25: // "fs9" + case UC_RISCV_REG_F26: // "fs10" + case UC_RISCV_REG_F27: // "fs11" + case UC_RISCV_REG_F28: // "ft8" + case UC_RISCV_REG_F29: // "ft9" + case UC_RISCV_REG_F30: // "ft10" + case UC_RISCV_REG_F31: // "ft11" +#ifdef TARGET_RISCV64 + *(int64_t *)value = env->fpr[regid - UC_RISCV_REG_F0]; +#else + *(int32_t *)value = env->fpr[regid - UC_RISCV_REG_F0]; +#endif + break; + default: + break; + } + + return; +} + +static void reg_write(CPURISCVState *env, unsigned int regid, const void *value) +{ + switch(regid) { + case UC_RISCV_REG_X0: + case UC_RISCV_REG_X1: + case UC_RISCV_REG_X2: + case UC_RISCV_REG_X3: + case UC_RISCV_REG_X4: + case UC_RISCV_REG_X5: + case UC_RISCV_REG_X6: + case UC_RISCV_REG_X7: + case UC_RISCV_REG_X8: + case UC_RISCV_REG_X9: + case UC_RISCV_REG_X10: + case UC_RISCV_REG_X11: + case UC_RISCV_REG_X12: + case UC_RISCV_REG_X13: + case UC_RISCV_REG_X14: + case UC_RISCV_REG_X15: + case UC_RISCV_REG_X16: + case UC_RISCV_REG_X17: + case UC_RISCV_REG_X18: + case UC_RISCV_REG_X19: + case UC_RISCV_REG_X20: + case UC_RISCV_REG_X21: + case UC_RISCV_REG_X22: + case UC_RISCV_REG_X23: + case UC_RISCV_REG_X24: + case UC_RISCV_REG_X25: + case UC_RISCV_REG_X26: + case UC_RISCV_REG_X27: + case UC_RISCV_REG_X28: + case UC_RISCV_REG_X29: + case UC_RISCV_REG_X30: + case UC_RISCV_REG_X31: +#ifdef TARGET_RISCV64 + env->gpr[regid - UC_RISCV_REG_X0] = *(uint64_t *)value; +#else + env->gpr[regid - UC_RISCV_REG_X0] = *(uint32_t *)value; +#endif + break; + case UC_RISCV_REG_PC: +#ifdef TARGET_RISCV64 + env->pc = *(uint64_t *)value; +#else + env->pc = *(uint32_t *)value; +#endif + break; + case UC_RISCV_REG_F0: // "ft0" + case UC_RISCV_REG_F1: // "ft1" + case UC_RISCV_REG_F2: // "ft2" + case UC_RISCV_REG_F3: // "ft3" + case UC_RISCV_REG_F4: // "ft4" + case UC_RISCV_REG_F5: // "ft5" + case UC_RISCV_REG_F6: // "ft6" + case UC_RISCV_REG_F7: // "ft7" + case UC_RISCV_REG_F8: // "fs0" + case UC_RISCV_REG_F9: // "fs1" + case UC_RISCV_REG_F10: // "fa0" + case UC_RISCV_REG_F11: // "fa1" + case UC_RISCV_REG_F12: // "fa2" + case UC_RISCV_REG_F13: // "fa3" + case UC_RISCV_REG_F14: // "fa4" + case UC_RISCV_REG_F15: // "fa5" + case UC_RISCV_REG_F16: // "fa6" + case UC_RISCV_REG_F17: // "fa7" + case UC_RISCV_REG_F18: // "fs2" + case UC_RISCV_REG_F19: // "fs3" + case UC_RISCV_REG_F20: // "fs4" + case UC_RISCV_REG_F21: // "fs5" + case UC_RISCV_REG_F22: // "fs6" + case UC_RISCV_REG_F23: // "fs7" + case UC_RISCV_REG_F24: // "fs8" + case UC_RISCV_REG_F25: // "fs9" + case UC_RISCV_REG_F26: // "fs10" + case UC_RISCV_REG_F27: // "fs11" + case UC_RISCV_REG_F28: // "ft8" + case UC_RISCV_REG_F29: // "ft9" + case UC_RISCV_REG_F30: // "ft10" + case UC_RISCV_REG_F31: // "ft11" +#ifdef TARGET_RISCV64 + env->fpr[regid - UC_RISCV_REG_F0] = *(uint64_t *)value; +#else + env->fpr[regid - UC_RISCV_REG_F0] = *(uint32_t *)value; +#endif + break; + default: + break; + } +} + +int riscv_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPURISCVState *env = &(RISCV_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int riscv_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPURISCVState *env = &(RISCV_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if(regid == UC_RISCV_REG_PC){ + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_RISCV32 +int riscv32_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#else + /* TARGET_RISCV64 */ +int riscv64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +#endif +{ + CPURISCVState *env = (CPURISCVState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_RISCV32 +int riscv32_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#else + /* TARGET_RISCV64 */ +int riscv64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +#endif +{ + CPURISCVState *env = (CPURISCVState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static bool riscv_stop_interrupt(struct uc_struct *uc, int intno) +{ + // detect stop exception + switch(intno){ + default: + return false; + case RISCV_EXCP_UNICORN_END: + return true; + case RISCV_EXCP_BREAKPOINT: + uc->invalid_error = UC_ERR_EXCEPTION; + return true; + } +} + +static bool riscv_insn_hook_validate(uint32_t insn_enum) +{ + return false; +} + +static int riscv_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + + RISCVCPU *cpu; + + cpu = cpu_riscv_init(uc, cpu_model); + if (cpu == NULL) { + return -1; + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_RISCV32 +void riscv32_uc_init(struct uc_struct* uc) +#else + /* TARGET_RISCV64 */ +void riscv64_uc_init(struct uc_struct* uc) +#endif +{ + uc->reg_read = riscv_reg_read; + uc->reg_write = riscv_reg_write; + uc->reg_reset = riscv_reg_reset; + uc->release = riscv_release; + uc->set_pc = riscv_set_pc; + uc->stop_interrupt = riscv_stop_interrupt; + uc->insn_hook_validate = riscv_insn_hook_validate; + uc->cpus_init = riscv_cpus_init; + uc->cpu_context_size = offsetof(CPURISCVState, rdtime_fn); + uc_common_init(uc); +} diff --git a/qemu/target/riscv/unicorn.h b/qemu/target/riscv/unicorn.h new file mode 100644 index 00000000..58371737 --- /dev/null +++ b/qemu/target/riscv/unicorn.h @@ -0,0 +1,21 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#ifndef UC_QEMU_TARGET_RISCV_H +#define UC_QEMU_TARGET_RISCV_H + +// functions to read & write registers +int riscv_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int riscv_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +int riscv32_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int riscv32_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int riscv64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int riscv64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); + +void riscv_reg_reset(struct uc_struct *uc); + +void riscv32_uc_init(struct uc_struct* uc); +void riscv64_uc_init(struct uc_struct* uc); +#endif diff --git a/qemu/target/sparc/asi.h b/qemu/target/sparc/asi.h new file mode 100644 index 00000000..bb58735d --- /dev/null +++ b/qemu/target/sparc/asi.h @@ -0,0 +1,312 @@ +#ifndef SPARC_ASI_H +#define SPARC_ASI_H + +/* asi.h: Address Space Identifier values for the sparc. + * + * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) + * + * Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au) + * Joint edition for sun4c+sun4m: Pete A. Zaitcev + */ + +/* The first batch are for the sun4c. */ + +#define ASI_NULL1 0x00 +#define ASI_NULL2 0x01 + +/* sun4c and sun4 control registers and mmu/vac ops */ +#define ASI_CONTROL 0x02 +#define ASI_SEGMAP 0x03 +#define ASI_PTE 0x04 +#define ASI_HWFLUSHSEG 0x05 +#define ASI_HWFLUSHPAGE 0x06 +#define ASI_REGMAP 0x06 +#define ASI_HWFLUSHCONTEXT 0x07 + +#define ASI_USERTXT 0x08 +#define ASI_KERNELTXT 0x09 +#define ASI_USERDATA 0x0a +#define ASI_KERNELDATA 0x0b + +/* VAC Cache flushing on sun4c and sun4 */ +#define ASI_FLUSHSEG 0x0c +#define ASI_FLUSHPG 0x0d +#define ASI_FLUSHCTX 0x0e + +/* SPARCstation-5: only 6 bits are decoded. */ +/* wo = Write Only, rw = Read Write; */ +/* ss = Single Size, as = All Sizes; */ +#define ASI_M_RES00 0x00 /* Don't touch... */ +#define ASI_M_UNA01 0x01 /* Same here... */ +#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ +#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ +#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ +#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ +#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ +#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ +#define ASI_M_USERTXT 0x08 /* Same as ASI_USERTXT; rw, as */ +#define ASI_M_KERNELTXT 0x09 /* Same as ASI_KERNELTXT; rw, as */ +#define ASI_M_USERDATA 0x0A /* Same as ASI_USERDATA; rw, as */ +#define ASI_M_KERNELDATA 0x0B /* Same as ASI_KERNELDATA; rw, as */ +#define ASI_M_TXTC_TAG 0x0C /* Instruction Cache Tag; rw, ss */ +#define ASI_M_TXTC_DATA 0x0D /* Instruction Cache Data; rw, ss */ +#define ASI_M_DATAC_TAG 0x0E /* Data Cache Tag; rw, ss */ +#define ASI_M_DATAC_DATA 0x0F /* Data Cache Data; rw, ss */ + +/* The following cache flushing ASIs work only with the 'sta' + * instruction. Results are unpredictable for 'swap' and 'ldstuba', + * so don't do it. + */ + +/* These ASI flushes affect external caches too. */ +#define ASI_M_FLUSH_PAGE 0x10 /* Flush I&D Cache Line (page); wo, ss */ +#define ASI_M_FLUSH_SEG 0x11 /* Flush I&D Cache Line (seg); wo, ss */ +#define ASI_M_FLUSH_REGION 0x12 /* Flush I&D Cache Line (region); wo, ss */ +#define ASI_M_FLUSH_CTX 0x13 /* Flush I&D Cache Line (context); wo, ss */ +#define ASI_M_FLUSH_USER 0x14 /* Flush I&D Cache Line (user); wo, ss */ + +/* Block-copy operations are available only on certain V8 cpus. */ +#define ASI_M_BCOPY 0x17 /* Block copy */ + +/* These affect only the ICACHE and are Ross HyperSparc and TurboSparc specific. */ +#define ASI_M_IFLUSH_PAGE 0x18 /* Flush I Cache Line (page); wo, ss */ +#define ASI_M_IFLUSH_SEG 0x19 /* Flush I Cache Line (seg); wo, ss */ +#define ASI_M_IFLUSH_REGION 0x1A /* Flush I Cache Line (region); wo, ss */ +#define ASI_M_IFLUSH_CTX 0x1B /* Flush I Cache Line (context); wo, ss */ +#define ASI_M_IFLUSH_USER 0x1C /* Flush I Cache Line (user); wo, ss */ + +/* Block-fill operations are available on certain V8 cpus */ +#define ASI_M_BFILL 0x1F + +/* This allows direct access to main memory, actually 0x20 to 0x2f are + * the available ASI's for physical ram pass-through, but I don't have + * any idea what the other ones do.... + */ + +#define ASI_M_BYPASS 0x20 /* Reference MMU bypass; rw, as */ +#define ASI_M_FBMEM 0x29 /* Graphics card frame buffer access */ +#define ASI_M_VMEUS 0x2A /* VME user 16-bit access */ +#define ASI_M_VMEPS 0x2B /* VME priv 16-bit access */ +#define ASI_M_VMEUT 0x2C /* VME user 32-bit access */ +#define ASI_M_VMEPT 0x2D /* VME priv 32-bit access */ +#define ASI_M_SBUS 0x2E /* Direct SBus access */ +#define ASI_M_CTL 0x2F /* Control Space (ECC and MXCC are here) */ + + +/* This is ROSS HyperSparc only. */ +#define ASI_M_FLUSH_IWHOLE 0x31 /* Flush entire ICACHE; wo, ss */ + +/* Tsunami/Viking/TurboSparc i/d cache flash clear. */ +#define ASI_M_IC_FLCLEAR 0x36 +#define ASI_M_DC_FLCLEAR 0x37 + +#define ASI_M_DCDR 0x39 /* Data Cache Diagnostics Register rw, ss */ + +#define ASI_M_VIKING_TMP1 0x40 /* Emulation temporary 1 on Viking */ +/* only available on SuperSparc I */ +/* #define ASI_M_VIKING_TMP2 0x41 */ /* Emulation temporary 2 on Viking */ + +#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */ + +/* LEON ASI */ +#define ASI_LEON_NOCACHE 0x01 + +#define ASI_LEON_DCACHE_MISS 0x01 + +#define ASI_LEON_CACHEREGS 0x02 +#define ASI_LEON_IFLUSH 0x10 +#define ASI_LEON_DFLUSH 0x11 + +#define ASI_LEON_MMUFLUSH 0x18 +#define ASI_LEON_MMUREGS 0x19 +#define ASI_LEON_BYPASS 0x1c +#define ASI_LEON_FLUSH_PAGE 0x10 + +/* V9 Architecture mandary ASIs. */ +#define ASI_N 0x04 /* Nucleus */ +#define ASI_NL 0x0c /* Nucleus, little endian */ +#define ASI_AIUP 0x10 /* Primary, user */ +#define ASI_AIUS 0x11 /* Secondary, user */ +#define ASI_AIUPL 0x18 /* Primary, user, little endian */ +#define ASI_AIUSL 0x19 /* Secondary, user, little endian */ +#define ASI_P 0x80 /* Primary, implicit */ +#define ASI_S 0x81 /* Secondary, implicit */ +#define ASI_PNF 0x82 /* Primary, no fault */ +#define ASI_SNF 0x83 /* Secondary, no fault */ +#define ASI_PL 0x88 /* Primary, implicit, l-endian */ +#define ASI_SL 0x89 /* Secondary, implicit, l-endian */ +#define ASI_PNFL 0x8a /* Primary, no fault, l-endian */ +#define ASI_SNFL 0x8b /* Secondary, no fault, l-endian */ + +/* SpitFire and later extended ASIs. The "(III)" marker designates + * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates + * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific + * ASIs, "(4V)" designates SUN4V specific ASIs. "(NG4)" designates SPARC-T4 + * and later ASIs. + */ +#define ASI_REAL 0x14 /* Real address, cachable */ +#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ +#define ASI_REAL_IO 0x15 /* Real address, non-cachable */ +#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ +#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */ +#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */ +#define ASI_REAL_L 0x1c /* Real address, cachable, LE */ +#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ +#define ASI_REAL_IO_L 0x1d /* Real address, non-cachable, LE */ +#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ +#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/ +#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */ +#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */ +#define ASI_MMU 0x21 /* (4V) MMU Context Registers */ +#define ASI_TWINX_AIUP 0x22 /* twin load, primary user */ +#define ASI_TWINX_AIUS 0x23 /* twin load, secondary user */ +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load, + * secondary, user + */ +#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ +#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */ +#define ASI_TWINX_REAL 0x26 /* twin load, real, cachable */ +#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */ +#define ASI_TWINX_N 0x27 /* twin load, nucleus */ +#define ASI_TWINX_AIUP_L 0x2a /* twin load, primary user, LE */ +#define ASI_TWINX_AIUS_L 0x2b /* twin load, secondary user, LE */ +#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ +#define ASI_TWINX_REAL_L 0x2e /* twin load, real, cachable, LE */ +#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */ +#define ASI_TWINX_NL 0x2f /* twin load, nucleus, LE */ +#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ +#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ +#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ +#define ASI_PCACHE_SNOOP_TAG 0x33 /* (III) PCache snoop tag RAM diag */ +#define ASI_QUAD_LDD_PHYS 0x34 /* (III+) PADDR, qword load */ +#define ASI_WCACHE_VALID_BITS 0x38 /* (III) WCache Valid Bits diag */ +#define ASI_WCACHE_DATA 0x39 /* (III) WCache data RAM diag */ +#define ASI_WCACHE_TAG 0x3a /* (III) WCache tag RAM diag */ +#define ASI_WCACHE_SNOOP_TAG 0x3b /* (III) WCache snoop tag RAM diag */ +#define ASI_QUAD_LDD_PHYS_L 0x3c /* (III+) PADDR, qw-load, l-endian */ +#define ASI_SRAM_FAST_INIT 0x40 /* (III+) Fast SRAM init */ +#define ASI_CORE_AVAILABLE 0x41 /* (CMT) LP Available */ +#define ASI_CORE_ENABLE_STAT 0x41 /* (CMT) LP Enable Status */ +#define ASI_CORE_ENABLE 0x41 /* (CMT) LP Enable RW */ +#define ASI_XIR_STEERING 0x41 /* (CMT) XIR Steering RW */ +#define ASI_CORE_RUNNING_RW 0x41 /* (CMT) LP Running RW */ +#define ASI_CORE_RUNNING_W1S 0x41 /* (CMT) LP Running Write-One Set */ +#define ASI_CORE_RUNNING_W1C 0x41 /* (CMT) LP Running Write-One Clr */ +#define ASI_CORE_RUNNING_STAT 0x41 /* (CMT) LP Running Status */ +#define ASI_CMT_ERROR_STEERING 0x41 /* (CMT) Error Steering RW */ +#define ASI_DCACHE_INVALIDATE 0x42 /* (III) DCache Invalidate diag */ +#define ASI_DCACHE_UTAG 0x43 /* (III) DCache uTag diag */ +#define ASI_DCACHE_SNOOP_TAG 0x44 /* (III) DCache snoop tag RAM diag */ +#define ASI_LSU_CONTROL 0x45 /* Load-store control unit */ +#define ASI_DCU_CONTROL_REG 0x45 /* (III) DCache Unit Control reg */ +#define ASI_DCACHE_DATA 0x46 /* DCache data-ram diag access */ +#define ASI_DCACHE_TAG 0x47 /* Dcache tag/valid ram diag access*/ +#define ASI_INTR_DISPATCH_STAT 0x48 /* IRQ vector dispatch status */ +#define ASI_INTR_RECEIVE 0x49 /* IRQ vector receive status */ +#define ASI_UPA_CONFIG 0x4a /* UPA config space */ +#define ASI_JBUS_CONFIG 0x4a /* (IIIi) JBUS Config Register */ +#define ASI_SAFARI_CONFIG 0x4a /* (III) Safari Config Register */ +#define ASI_SAFARI_ADDRESS 0x4a /* (III) Safari Address Register */ +#define ASI_ESTATE_ERROR_EN 0x4b /* E-cache error enable space */ +#define ASI_AFSR 0x4c /* Async fault status register */ +#define ASI_AFAR 0x4d /* Async fault address register */ +#define ASI_EC_TAG_DATA 0x4e /* E-cache tag/valid ram diag acc */ +#define ASI_HYP_SCRATCHPAD 0x4f /* (4V) Hypervisor scratchpad */ +#define ASI_IMMU 0x50 /* Insn-MMU main register space */ +#define ASI_IMMU_TSB_8KB_PTR 0x51 /* Insn-MMU 8KB TSB pointer reg */ +#define ASI_IMMU_TSB_64KB_PTR 0x52 /* Insn-MMU 64KB TSB pointer reg */ +#define ASI_ITLB_DATA_IN 0x54 /* Insn-MMU TLB data in reg */ +#define ASI_ITLB_DATA_ACCESS 0x55 /* Insn-MMU TLB data access reg */ +#define ASI_ITLB_TAG_READ 0x56 /* Insn-MMU TLB tag read reg */ +#define ASI_IMMU_DEMAP 0x57 /* Insn-MMU TLB demap */ +#define ASI_DMMU 0x58 /* Data-MMU main register space */ +#define ASI_DMMU_TSB_8KB_PTR 0x59 /* Data-MMU 8KB TSB pointer reg */ +#define ASI_DMMU_TSB_64KB_PTR 0x5a /* Data-MMU 16KB TSB pointer reg */ +#define ASI_DMMU_TSB_DIRECT_PTR 0x5b /* Data-MMU TSB direct pointer reg */ +#define ASI_DTLB_DATA_IN 0x5c /* Data-MMU TLB data in reg */ +#define ASI_DTLB_DATA_ACCESS 0x5d /* Data-MMU TLB data access reg */ +#define ASI_DTLB_TAG_READ 0x5e /* Data-MMU TLB tag read reg */ +#define ASI_DMMU_DEMAP 0x5f /* Data-MMU TLB demap */ +#define ASI_IIU_INST_TRAP 0x60 /* (III) Instruction Breakpoint */ +#define ASI_INTR_ID 0x63 /* (CMT) Interrupt ID register */ +#define ASI_CORE_ID 0x63 /* (CMT) LP ID register */ +#define ASI_CESR_ID 0x63 /* (CMT) CESR ID register */ +#define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag */ +#define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag */ +#define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram */ +#define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag */ +#define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag */ +#define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag*/ +#define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */ +#define ASI_BLK_AIUS 0x71 /* Secondary, user, block ld/st */ +#define ASI_MCU_CTRL_REG 0x72 /* (III) Memory controller regs */ +#define ASI_EC_DATA 0x74 /* (III) E-cache data staging reg */ +#define ASI_EC_CTRL 0x75 /* (III) E-cache control reg */ +#define ASI_EC_W 0x76 /* E-cache diag write access */ +#define ASI_UDB_ERROR_W 0x77 /* External UDB error regs W */ +#define ASI_UDB_CONTROL_W 0x77 /* External UDB control regs W */ +#define ASI_INTR_W 0x77 /* IRQ vector dispatch write */ +#define ASI_INTR_DATAN_W 0x77 /* (III) Out irq vector data reg N */ +#define ASI_INTR_DISPATCH_W 0x77 /* (III) Interrupt vector dispatch */ +#define ASI_BLK_AIUPL 0x78 /* Primary, user, little, blk ld/st*/ +#define ASI_BLK_AIUSL 0x79 /* Secondary, user, little, blk ld/st*/ +#define ASI_EC_R 0x7e /* E-cache diag read access */ +#define ASI_UDBH_ERROR_R 0x7f /* External UDB error regs rd hi */ +#define ASI_UDBL_ERROR_R 0x7f /* External UDB error regs rd low */ +#define ASI_UDBH_CONTROL_R 0x7f /* External UDB control regs rd hi */ +#define ASI_UDBL_CONTROL_R 0x7f /* External UDB control regs rd low*/ +#define ASI_INTR_R 0x7f /* IRQ vector dispatch read */ +#define ASI_INTR_DATAN_R 0x7f /* (III) In irq vector data reg N */ +#define ASI_PIC 0xb0 /* (NG4) PIC registers */ +#define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */ +#define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */ +#define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */ +#define ASI_PST16_S 0xc3 /* Secondary, 4 16-bit, partial */ +#define ASI_PST32_P 0xc4 /* Primary, 2 32-bit, partial */ +#define ASI_PST32_S 0xc5 /* Secondary, 2 32-bit, partial */ +#define ASI_PST8_PL 0xc8 /* Primary, 8 8-bit, partial, L */ +#define ASI_PST8_SL 0xc9 /* Secondary, 8 8-bit, partial, L */ +#define ASI_PST16_PL 0xca /* Primary, 4 16-bit, partial, L */ +#define ASI_PST16_SL 0xcb /* Secondary, 4 16-bit, partial, L */ +#define ASI_PST32_PL 0xcc /* Primary, 2 32-bit, partial, L */ +#define ASI_PST32_SL 0xcd /* Secondary, 2 32-bit, partial, L */ +#define ASI_FL8_P 0xd0 /* Primary, 1 8-bit, fpu ld/st */ +#define ASI_FL8_S 0xd1 /* Secondary, 1 8-bit, fpu ld/st */ +#define ASI_FL16_P 0xd2 /* Primary, 1 16-bit, fpu ld/st */ +#define ASI_FL16_S 0xd3 /* Secondary, 1 16-bit, fpu ld/st */ +#define ASI_FL8_PL 0xd8 /* Primary, 1 8-bit, fpu ld/st, L */ +#define ASI_FL8_SL 0xd9 /* Secondary, 1 8-bit, fpu ld/st, L*/ +#define ASI_FL16_PL 0xda /* Primary, 1 16-bit, fpu ld/st, L */ +#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ +#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ +#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ +#define ASI_TWINX_P 0xe2 /* twin load, primary implicit */ +#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load, + * primary, implicit */ +#define ASI_TWINX_S 0xe3 /* twin load, secondary implicit */ +#define ASI_BLK_INIT_QUAD_LDD_S 0xe3 /* (NG) init-store, twin load, + * secondary, implicit */ +#define ASI_TWINX_PL 0xea /* twin load, primary implicit, LE */ +#define ASI_TWINX_SL 0xeb /* twin load, secondary implicit, LE */ +#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ +#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ +#define ASI_ST_BLKINIT_MRU_P 0xf2 /* (NG4) init-store, twin load, + * Most-Recently-Used, primary, + * implicit + */ +#define ASI_ST_BLKINIT_MRU_S 0xf2 /* (NG4) init-store, twin load, + * Most-Recently-Used, secondary, + * implicit + */ +#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ +#define ASI_BLK_SL 0xf9 /* Secondary, blk ld/st, little */ +#define ASI_ST_BLKINIT_MRU_PL 0xfa /* (NG4) init-store, twin load, + * Most-Recently-Used, primary, + * implicit, little-endian + */ +#define ASI_ST_BLKINIT_MRU_SL 0xfb /* (NG4) init-store, twin load, + * Most-Recently-Used, secondary, + * implicit, little-endian + */ + +#endif /* SPARC_ASI_H */ diff --git a/qemu/target-sparc/cc_helper.c b/qemu/target/sparc/cc_helper.c similarity index 85% rename from qemu/target-sparc/cc_helper.c rename to qemu/target/sparc/cc_helper.c index 69823b70..a410a0b9 100644 --- a/qemu/target-sparc/cc_helper.c +++ b/qemu/target/sparc/cc_helper.c @@ -17,14 +17,10 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -static uint32_t compute_null(CPUSPARCState *env) -{ - return 0; -} - static uint32_t compute_all_flags(CPUSPARCState *env) { return env->psr & PSR_ICC; @@ -204,10 +200,7 @@ static uint32_t compute_all_addx_xcc(CPUSPARCState *env) static uint32_t compute_C_addx_xcc(CPUSPARCState *env) { - uint32_t ret; - - ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2); - return ret; + return get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2); } #endif @@ -223,10 +216,7 @@ static uint32_t compute_all_addx(CPUSPARCState *env) static uint32_t compute_C_addx(CPUSPARCState *env) { - uint32_t ret; - - ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2); - return ret; + return get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2); } static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2) @@ -369,10 +359,7 @@ static uint32_t compute_all_subx_xcc(CPUSPARCState *env) static uint32_t compute_C_subx_xcc(CPUSPARCState *env) { - uint32_t ret; - - ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2); - return ret; + return get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2); } #endif @@ -388,10 +375,7 @@ static uint32_t compute_all_subx(CPUSPARCState *env) static uint32_t compute_C_subx(CPUSPARCState *env) { - uint32_t ret; - - ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2); - return ret; + return get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2); } static uint32_t compute_all_tsub(CPUSPARCState *env) @@ -438,35 +422,33 @@ typedef struct CCTable { static const CCTable icc_table[CC_OP_NB] = { /* CC_OP_DYNAMIC should never happen */ - { compute_null, compute_null }, - { compute_all_flags, compute_C_flags }, - { compute_all_div, compute_C_div }, - { compute_all_add, compute_C_add }, - { compute_all_addx, compute_C_addx }, - { compute_all_tadd, compute_C_add }, - { compute_all_taddtv, compute_C_add }, - { compute_all_sub, compute_C_sub }, - { compute_all_subx, compute_C_subx }, - { compute_all_tsub, compute_C_sub }, - { compute_all_tsubtv, compute_C_sub }, - { compute_all_logic, compute_C_logic }, + [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags }, + [CC_OP_DIV] = { compute_all_div, compute_C_div }, + [CC_OP_ADD] = { compute_all_add, compute_C_add }, + [CC_OP_ADDX] = { compute_all_addx, compute_C_addx }, + [CC_OP_TADD] = { compute_all_tadd, compute_C_add }, + [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add }, + [CC_OP_SUB] = { compute_all_sub, compute_C_sub }, + [CC_OP_SUBX] = { compute_all_subx, compute_C_subx }, + [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub }, + [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub }, + [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic }, }; #ifdef TARGET_SPARC64 static const CCTable xcc_table[CC_OP_NB] = { /* CC_OP_DYNAMIC should never happen */ - { compute_null, compute_null }, - { compute_all_flags_xcc, compute_C_flags_xcc }, - { compute_all_logic_xcc, compute_C_logic }, - { compute_all_add_xcc, compute_C_add_xcc }, - { compute_all_addx_xcc, compute_C_addx_xcc }, - { compute_all_add_xcc, compute_C_add_xcc }, - { compute_all_add_xcc, compute_C_add_xcc }, - { compute_all_sub_xcc, compute_C_sub_xcc }, - { compute_all_subx_xcc, compute_C_subx_xcc }, - { compute_all_sub_xcc, compute_C_sub_xcc }, - { compute_all_sub_xcc, compute_C_sub_xcc }, - { compute_all_logic_xcc, compute_C_logic }, + [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc }, + [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic }, + [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc }, + [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc }, + [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc }, + [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc }, + [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc }, + [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc }, + [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc }, + [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc }, + [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic }, }; #endif @@ -485,8 +467,5 @@ void helper_compute_psr(CPUSPARCState *env) uint32_t helper_compute_C_icc(CPUSPARCState *env) { - uint32_t ret; - - ret = icc_table[CC_OP].compute_c(env) >> PSR_CARRY_SHIFT; - return ret; + return icc_table[CC_OP].compute_c(env) >> PSR_CARRY_SHIFT; } diff --git a/qemu/target/sparc/cpu-param.h b/qemu/target/sparc/cpu-param.h new file mode 100644 index 00000000..4746d894 --- /dev/null +++ b/qemu/target/sparc/cpu-param.h @@ -0,0 +1,28 @@ +/* + * Sparc cpu parameters for qemu. + * + * SPDX-License-Identifier: LGPL-2.0+ + */ + +#ifndef SPARC_CPU_PARAM_H +#define SPARC_CPU_PARAM_H 1 + +#ifdef TARGET_SPARC64 +# define TARGET_LONG_BITS 64 +# define TARGET_PAGE_BITS 13 /* 8k */ +# define TARGET_PHYS_ADDR_SPACE_BITS 41 +# ifdef TARGET_ABI32 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +# else +# define TARGET_VIRT_ADDR_SPACE_BITS 44 +# endif +# define NB_MMU_MODES 6 +#else +# define TARGET_LONG_BITS 32 +# define TARGET_PAGE_BITS 12 /* 4k */ +# define TARGET_PHYS_ADDR_SPACE_BITS 36 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +# define NB_MMU_MODES 3 +#endif + +#endif diff --git a/qemu/target/sparc/cpu-qom.h b/qemu/target/sparc/cpu-qom.h new file mode 100644 index 00000000..8483ce06 --- /dev/null +++ b/qemu/target/sparc/cpu-qom.h @@ -0,0 +1,48 @@ +/* + * QEMU SPARC CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_SPARC_CPU_QOM_H +#define QEMU_SPARC_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define SPARC_CPU(obj) ((SPARCCPU *)obj) +#define SPARC_CPU_CLASS(klass) ((SPARCCPUClass *)klass) +#define SPARC_CPU_GET_CLASS(obj) (&((SPARCCPU *)obj)->cc) + +typedef struct sparc_def_t sparc_def_t; +/** + * SPARCCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A SPARC CPU model. + */ +typedef struct SPARCCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + void (*parent_reset)(CPUState *cpu); + const sparc_def_t *cpu_def; +} SPARCCPUClass; + +typedef struct SPARCCPU SPARCCPU; + +#endif diff --git a/qemu/target/sparc/cpu.c b/qemu/target/sparc/cpu.c new file mode 100644 index 00000000..6d7f232d --- /dev/null +++ b/qemu/target/sparc/cpu.c @@ -0,0 +1,571 @@ +/* + * Sparc CPU init helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" + +static void sparc_cpu_reset(CPUState *dev) +{ + CPUState *s = CPU(dev); + SPARCCPU *cpu = SPARC_CPU(s); + SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu); + CPUSPARCState *env = &cpu->env; + + scc->parent_reset(dev); + + memset(env, 0, offsetof(CPUSPARCState, end_reset_fields)); + env->cwp = 0; +#ifndef TARGET_SPARC64 + env->wim = 1; +#endif + env->regwptr = env->regbase + (env->cwp * 16); + CC_OP = CC_OP_FLAGS; +#if !defined(TARGET_SPARC64) + env->psret = 0; + env->psrs = 1; + env->psrps = 1; +#endif +#ifdef TARGET_SPARC64 + env->pstate = PS_PRIV | PS_RED | PS_PEF; + if (!cpu_has_hypervisor(env)) { + env->pstate |= PS_AG; + } + env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0; + env->tl = env->maxtl; + env->gl = 2; + cpu_tsptr(env)->tt = TT_POWER_ON_RESET; + env->lsu = 0; +#else + env->mmuregs[0] &= ~(MMU_E | MMU_NF); + env->mmuregs[0] |= env->def.mmu_bm; +#endif + env->pc = 0; + env->npc = env->pc + 4; + env->cache_control = 0; +} + +static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + + if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) { + int pil = env->interrupt_index & 0xf; + int type = env->interrupt_index & 0xf0; + + if (type != TT_EXTINT || cpu_pil_allowed(env, pil)) { + cs->exception_index = env->interrupt_index; + sparc_cpu_do_interrupt(cs); + return true; + } + } + } + return false; +} + +void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu) +{ +#if !defined(TARGET_SPARC64) + env->mxccregs[7] = ((cpu + 8) & 0xf) << 24; +#endif +} + +static const sparc_def_t sparc_defs[] = { +#ifdef TARGET_SPARC64 + { + .name = "Fujitsu Sparc64", + .iu_version = ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 4, + .maxtl = 4, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Fujitsu Sparc64 III", + .iu_version = ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 5, + .maxtl = 4, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Fujitsu Sparc64 IV", + .iu_version = ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Fujitsu Sparc64 V", + .iu_version = ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI UltraSparc I", + .iu_version = ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI UltraSparc II", + .iu_version = ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI UltraSparc IIi", + .iu_version = ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI UltraSparc IIe", + .iu_version = ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Sun UltraSparc III", + .iu_version = ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Sun UltraSparc III Cu", + .iu_version = ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_3, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Sun UltraSparc IIIi", + .iu_version = ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Sun UltraSparc IV", + .iu_version = ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_4, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Sun UltraSparc IV+", + .iu_version = ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT, + }, + { + .name = "Sun UltraSparc IIIi+", + .iu_version = ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_3, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Sun UltraSparc T1", + /* defined in sparc_ifu_fdp.v and ctu.h */ + .iu_version = ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_sun4v, + .nwindows = 8, + .maxtl = 6, + .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT + | CPU_FEATURE_GL, + }, + { + .name = "Sun UltraSparc T2", + /* defined in tlu_asi_ctl.v and n2_revid_cust.v */ + .iu_version = ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_sun4v, + .nwindows = 8, + .maxtl = 6, + .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT + | CPU_FEATURE_GL, + }, + { + .name = "NEC UltraSparc I", + .iu_version = ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), + .fpu_version = 0x00000000, + .mmu_version = mmu_us_12, + .nwindows = 8, + .maxtl = 5, + .features = CPU_DEFAULT_FEATURES, + }, +#else + { + .name = "Fujitsu MB86904", + .iu_version = 0x04 << 24, /* Impl 0, ver 4 */ + .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .mmu_version = 0x04 << 24, /* Impl 0, ver 4 */ + .mmu_bm = 0x00004000, + .mmu_ctpr_mask = 0x00ffffc0, + .mmu_cxr_mask = 0x000000ff, + .mmu_sfsr_mask = 0x00016fff, + .mmu_trcr_mask = 0x00ffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "Fujitsu MB86907", + .iu_version = 0x05 << 24, /* Impl 0, ver 5 */ + .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .mmu_version = 0x05 << 24, /* Impl 0, ver 5 */ + .mmu_bm = 0x00004000, + .mmu_ctpr_mask = 0xffffffc0, + .mmu_cxr_mask = 0x000000ff, + .mmu_sfsr_mask = 0x00016fff, + .mmu_trcr_mask = 0xffffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI MicroSparc I", + .iu_version = 0x41000000, + .fpu_version = 4 << 17, + .mmu_version = 0x41000000, + .mmu_bm = 0x00004000, + .mmu_ctpr_mask = 0x007ffff0, + .mmu_cxr_mask = 0x0000003f, + .mmu_sfsr_mask = 0x00016fff, + .mmu_trcr_mask = 0x0000003f, + .nwindows = 7, + .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL | + CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | + CPU_FEATURE_FMUL, + }, + { + .name = "TI MicroSparc II", + .iu_version = 0x42000000, + .fpu_version = 4 << 17, + .mmu_version = 0x02000000, + .mmu_bm = 0x00004000, + .mmu_ctpr_mask = 0x00ffffc0, + .mmu_cxr_mask = 0x000000ff, + .mmu_sfsr_mask = 0x00016fff, + .mmu_trcr_mask = 0x00ffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI MicroSparc IIep", + .iu_version = 0x42000000, + .fpu_version = 4 << 17, + .mmu_version = 0x04000000, + .mmu_bm = 0x00004000, + .mmu_ctpr_mask = 0x00ffffc0, + .mmu_cxr_mask = 0x000000ff, + .mmu_sfsr_mask = 0x00016bff, + .mmu_trcr_mask = 0x00ffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI SuperSparc 40", /* STP1020NPGA */ + .iu_version = 0x41000000, /* SuperSPARC 2.x */ + .fpu_version = 0 << 17, + .mmu_version = 0x00000800, /* SuperSPARC 2.x, no MXCC */ + .mmu_bm = 0x00002000, + .mmu_ctpr_mask = 0xffffffc0, + .mmu_cxr_mask = 0x0000ffff, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI SuperSparc 50", /* STP1020PGA */ + .iu_version = 0x40000000, /* SuperSPARC 3.x */ + .fpu_version = 0 << 17, + .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */ + .mmu_bm = 0x00002000, + .mmu_ctpr_mask = 0xffffffc0, + .mmu_cxr_mask = 0x0000ffff, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI SuperSparc 51", + .iu_version = 0x40000000, /* SuperSPARC 3.x */ + .fpu_version = 0 << 17, + .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */ + .mmu_bm = 0x00002000, + .mmu_ctpr_mask = 0xffffffc0, + .mmu_cxr_mask = 0x0000ffff, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .mxcc_version = 0x00000104, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI SuperSparc 60", /* STP1020APGA */ + .iu_version = 0x40000000, /* SuperSPARC 3.x */ + .fpu_version = 0 << 17, + .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */ + .mmu_bm = 0x00002000, + .mmu_ctpr_mask = 0xffffffc0, + .mmu_cxr_mask = 0x0000ffff, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI SuperSparc 61", + .iu_version = 0x44000000, /* SuperSPARC 3.x */ + .fpu_version = 0 << 17, + .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */ + .mmu_bm = 0x00002000, + .mmu_ctpr_mask = 0xffffffc0, + .mmu_cxr_mask = 0x0000ffff, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .mxcc_version = 0x00000104, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "TI SuperSparc II", + .iu_version = 0x40000000, /* SuperSPARC II 1.x */ + .fpu_version = 0 << 17, + .mmu_version = 0x08000000, /* SuperSPARC II 1.x, MXCC */ + .mmu_bm = 0x00002000, + .mmu_ctpr_mask = 0xffffffc0, + .mmu_cxr_mask = 0x0000ffff, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .mxcc_version = 0x00000104, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES, + }, + { + .name = "LEON2", + .iu_version = 0xf2000000, + .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .mmu_version = 0xf2000000, + .mmu_bm = 0x00004000, + .mmu_ctpr_mask = 0x007ffff0, + .mmu_cxr_mask = 0x0000003f, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN, + }, + { + .name = "LEON3", + .iu_version = 0xf3000000, + .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */ + .mmu_version = 0xf3000000, + .mmu_bm = 0x00000000, + .mmu_ctpr_mask = 0xfffffffc, + .mmu_cxr_mask = 0x000000ff, + .mmu_sfsr_mask = 0xffffffff, + .mmu_trcr_mask = 0xffffffff, + .nwindows = 8, + .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN | + CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL | CPU_FEATURE_POWERDOWN | + CPU_FEATURE_CASA, + }, +#endif +}; + +static void sparc_cpu_set_pc(CPUState *cs, vaddr value) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + + cpu->env.pc = value; + cpu->env.npc = value + 4; +} + +static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + + cpu->env.pc = tb->pc; + cpu->env.npc = tb->cs_base; +} + +static bool sparc_cpu_has_work(CPUState *cs) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + + return (cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_interrupts_enabled(env); +} + +static void sparc_cpu_realizefn(struct uc_struct *uc, CPUState *dev) +{ + CPUState *cs = CPU(dev); + SPARCCPU *cpu = SPARC_CPU(dev); + CPUSPARCState *env = &cpu->env; + + env->version = env->def.iu_version; + env->fsr = env->def.fpu_version; + env->nwindows = env->def.nwindows; +#if !defined(TARGET_SPARC64) + env->mmuregs[0] |= env->def.mmu_version; + cpu_sparc_set_id(env, 0); + env->mxccregs[7] |= env->def.mxcc_version; +#else + env->mmu_version = env->def.mmu_version; + env->maxtl = env->def.maxtl; + env->version |= env->def.maxtl << 8; + env->version |= env->def.nwindows - 1; +#endif + + cpu_exec_realizefn(cs); +} + +static void sparc_cpu_initfn(struct uc_struct *uc, CPUState *obj) +{ + SPARCCPU *cpu = SPARC_CPU(obj); + SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(obj); + CPUSPARCState *env = &cpu->env; + + env->uc = uc; + + cpu_set_cpustate_pointers(cpu); + + if (scc->cpu_def) { + env->def = *scc->cpu_def; + } +} + +static void sparc_cpu_class_init(struct uc_struct *uc, CPUClass *oc) +{ + SPARCCPUClass *scc = SPARC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + + /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ + scc->parent_reset = cc->reset; + /* overwrite the CPUClass->reset to arch reset: x86_cpu_reset(). */ + cc->reset = sparc_cpu_reset; + cc->has_work = sparc_cpu_has_work; + cc->do_interrupt = sparc_cpu_do_interrupt; + cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt; + cc->set_pc = sparc_cpu_set_pc; + cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb; + cc->tlb_fill = sparc_cpu_tlb_fill; + cc->do_unaligned_access = sparc_cpu_do_unaligned_access; + cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug; + cc->tcg_initialize = sparc_tcg_init; +} + +SPARCCPU *cpu_sparc_init(struct uc_struct *uc, const char *cpu_model) +{ + SPARCCPU *cpu; + CPUState *cs; + CPUClass *cc; + SPARCCPUClass *scc; + int i; + + if (cpu_model == NULL) { +#ifdef TARGET_SPARC64 + cpu_model = "Sun UltraSparc IV"; +#else + cpu_model = "LEON3"; +#endif + } + + cpu = malloc(sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + memset(cpu, 0, sizeof(*cpu)); + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = cs; + + /* init CPUClass */ + cpu_class_init(uc, cc); + /* init SPARCCPUClass */ + sparc_cpu_class_init(uc, cc); + /* init CPUState */ + cpu_common_initfn(uc, cs); + /* init SPARC types scc->def */ + scc = SPARC_CPU_CLASS(cc); + for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) { + if (strcmp(cpu_model, sparc_defs[i].name) == 0) { + scc->cpu_def = &sparc_defs[i]; + break; + } + } + if (i == ARRAY_SIZE(sparc_defs)) { + free(cpu); + return NULL; + } + /* init SPARCCPU */ + sparc_cpu_initfn(uc, cs); + /* realize SPARCCPU */ + sparc_cpu_realizefn(uc, cs); + /* realize CPUState */ + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target-sparc/cpu.h b/qemu/target/sparc/cpu.h similarity index 75% rename from qemu/target-sparc/cpu.h rename to qemu/target/sparc/cpu.h index 2f8ed15f..97b47cce 100644 --- a/qemu/target-sparc/cpu.h +++ b/qemu/target/sparc/cpu.h @@ -1,46 +1,51 @@ -#ifndef CPU_SPARC_H -#define CPU_SPARC_H +#ifndef SPARC_CPU_H +#define SPARC_CPU_H -#include "config.h" -#include "qemu-common.h" #include "qemu/bswap.h" - -#define ALIGNED_ONLY - -#if !defined(TARGET_SPARC64) -#define TARGET_LONG_BITS 32 -#define TARGET_DPREGS 16 -#define TARGET_PAGE_BITS 12 /* 4k */ -#define TARGET_PHYS_ADDR_SPACE_BITS 36 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 -#else -#define TARGET_LONG_BITS 64 -#define TARGET_DPREGS 32 -#define TARGET_PAGE_BITS 13 /* 8k */ -#define TARGET_PHYS_ADDR_SPACE_BITS 41 -# ifdef TARGET_ABI32 -# define TARGET_VIRT_ADDR_SPACE_BITS 32 -# else -# define TARGET_VIRT_ADDR_SPACE_BITS 44 -# endif -#endif - -#define CPUArchState struct CPUSPARCState - +#include "cpu-qom.h" #include "exec/cpu-defs.h" -#include "fpu/softfloat.h" - -#define TARGET_HAS_ICE 1 - #if !defined(TARGET_SPARC64) -#define ELF_MACHINE EM_SPARC +#define TARGET_DPREGS 16 #else -#define ELF_MACHINE EM_SPARCV9 +#define TARGET_DPREGS 32 #endif /*#define EXCP_INTERRUPT 0x100*/ +/* Windowed register indexes. */ +enum { + WREG_O0, + WREG_O1, + WREG_O2, + WREG_O3, + WREG_O4, + WREG_O5, + WREG_O6, + WREG_O7, + + WREG_L0, + WREG_L1, + WREG_L2, + WREG_L3, + WREG_L4, + WREG_L5, + WREG_L6, + WREG_L7, + + WREG_I0, + WREG_I1, + WREG_I2, + WREG_I3, + WREG_I4, + WREG_I5, + WREG_I6, + WREG_I7, + + WREG_SP = WREG_O6, + WREG_FP = WREG_I6, +}; + /* trap definitions */ #ifndef TARGET_SPARC64 #define TT_TFAULT 0x01 @@ -76,6 +81,8 @@ #define TT_DATA_ACCESS 0x32 #define TT_UNALIGNED 0x34 #define TT_PRIV_ACT 0x37 +#define TT_INSN_REAL_TRANSLATION_MISS 0x3e +#define TT_DATA_REAL_TRANSLATION_MISS 0x3f #define TT_EXTINT 0x40 #define TT_IVEC 0x60 #define TT_TMISS 0x64 @@ -85,6 +92,7 @@ #define TT_FILL 0xc0 #define TT_WOTHER (1 << 5) #define TT_TRAP 0x100 +#define TT_HTRAP 0x180 #endif #define PSR_NEG_SHIFT 23 @@ -110,6 +118,11 @@ #define CC_DST (env->cc_dst) #define CC_OP (env->cc_op) +/* Even though lazy evaluation of CPU condition codes tends to be less + * important on RISC systems where condition codes are only updated + * when explicitly requested, SPARC uses it to update 32-bit and 64-bit + * condition codes. + */ enum { CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ CC_OP_FLAGS, /* all cc are back in status register */ @@ -227,10 +240,7 @@ enum { #define MIN_NWINDOWS 3 #define MAX_NWINDOWS 32 -#if !defined(TARGET_SPARC64) -#define NB_MMU_MODES 2 -#else -#define NB_MMU_MODES 6 +#ifdef TARGET_SPARC64 typedef struct trap_state { uint64_t tpc; uint64_t tnpc; @@ -238,8 +248,9 @@ typedef struct trap_state { uint32_t tt; } trap_state; #endif +#define TARGET_INSN_START_EXTRA_WORDS 1 -typedef struct sparc_def_t { +struct sparc_def_t { const char *name; target_ulong iu_version; uint32_t fpu_version; @@ -253,7 +264,7 @@ typedef struct sparc_def_t { uint32_t features; uint32_t nwindows; uint32_t maxtl; -} sparc_def_t; +}; #define CPU_FEATURE_FLOAT (1 << 0) #define CPU_FEATURE_FLOAT128 (1 << 1) @@ -297,6 +308,7 @@ enum { #define TTE_VALID_BIT (1ULL << 63) #define TTE_NFO_BIT (1ULL << 60) +#define TTE_IE_BIT (1ULL << 59) #define TTE_USED_BIT (1ULL << 41) #define TTE_LOCKED_BIT (1ULL << 6) #define TTE_SIDEEFFECT_BIT (1ULL << 3) @@ -304,21 +316,43 @@ enum { #define TTE_W_OK_BIT (1ULL << 1) #define TTE_GLOBAL_BIT (1ULL << 0) +#define TTE_NFO_BIT_UA2005 (1ULL << 62) +#define TTE_USED_BIT_UA2005 (1ULL << 47) +#define TTE_LOCKED_BIT_UA2005 (1ULL << 61) +#define TTE_SIDEEFFECT_BIT_UA2005 (1ULL << 11) +#define TTE_PRIV_BIT_UA2005 (1ULL << 8) +#define TTE_W_OK_BIT_UA2005 (1ULL << 6) + #define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT) #define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT) +#define TTE_IS_IE(tte) ((tte) & TTE_IE_BIT) #define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT) #define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT) #define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT) +#define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005) #define TTE_IS_PRIV(tte) ((tte) & TTE_PRIV_BIT) #define TTE_IS_W_OK(tte) ((tte) & TTE_W_OK_BIT) + +#define TTE_IS_NFO_UA2005(tte) ((tte) & TTE_NFO_BIT_UA2005) +#define TTE_IS_USED_UA2005(tte) ((tte) & TTE_USED_BIT_UA2005) +#define TTE_IS_LOCKED_UA2005(tte) ((tte) & TTE_LOCKED_BIT_UA2005) +#define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005) +#define TTE_IS_PRIV_UA2005(tte) ((tte) & TTE_PRIV_BIT_UA2005) +#define TTE_IS_W_OK_UA2005(tte) ((tte) & TTE_W_OK_BIT_UA2005) + #define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT) #define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT) #define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT) #define TTE_PGSIZE(tte) (((tte) >> 61) & 3ULL) +#define TTE_PGSIZE_UA2005(tte) ((tte) & 7ULL) #define TTE_PA(tte) ((tte) & 0x1ffffffe000ULL) +/* UltraSPARC T1 specific */ +#define TLB_UST1_IS_REAL_BIT (1ULL << 9) /* Real translation entry */ +#define TLB_UST1_IS_SUN4V_BIT (1ULL << 10) /* sun4u/sun4v TTE format switch */ + #define SFSR_NF_BIT (1ULL << 24) /* JPS1 NoFault */ #define SFSR_TM_BIT (1ULL << 15) /* JPS1 TLB Miss */ #define SFSR_FT_VA_IMMU_BIT (1ULL << 13) /* USIIi VA out of range (IMMU) */ @@ -362,6 +396,9 @@ enum { #define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */ #define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */ +#define CONVERT_BIT(X, SRC, DST) \ + (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC)) + typedef struct SparcTLBEntry { uint64_t tag; uint64_t tte; @@ -373,18 +410,33 @@ struct CPUTimer uint32_t frequency; uint32_t disabled; uint64_t disabled_mask; + uint32_t npt; + uint64_t npt_mask; int64_t clock_offset; QEMUTimer *qtimer; }; typedef struct CPUTimer CPUTimer; -struct QEMUFile; -void cpu_put_timer(struct QEMUFile *f, CPUTimer *s); -void cpu_get_timer(struct QEMUFile *f, CPUTimer *s); - typedef struct CPUSPARCState CPUSPARCState; - +#if defined(TARGET_SPARC64) +typedef union { + uint64_t mmuregs[16]; + struct { + uint64_t tsb_tag_target; + uint64_t mmu_primary_context; + uint64_t mmu_secondary_context; + uint64_t sfsr; + uint64_t sfar; + uint64_t tsb; + uint64_t tag_access; + uint64_t virtual_watchpoint; + uint64_t physical_watchpoint; + uint64_t sun4v_ctx_config[2]; + uint64_t sun4v_tsb_pointers[4]; + }; +} SparcV9MMU; +#endif struct CPUSPARCState { target_ulong gregs[8]; /* general registers */ target_ulong *regwptr; /* pointer to current register window */ @@ -423,7 +475,12 @@ struct CPUSPARCState { /* NOTE: we allow 8 more registers to handle wrapping */ target_ulong regbase[MAX_NWINDOWS * 16 + 8]; - CPU_COMMON + /* Fields up to this point are cleared by a CPU reset */ +#ifdef _MSC_VER + int end_reset_fields; +#else + struct {} end_reset_fields; +#endif /* Fields from here on are preserved across CPU reset. */ target_ulong version; @@ -434,31 +491,8 @@ struct CPUSPARCState { uint64_t lsu; #define DMMU_E 0x8 #define IMMU_E 0x4 - //typedef struct SparcMMU - union { - uint64_t immuregs[16]; - struct { - uint64_t tsb_tag_target; - uint64_t unused_mmu_primary_context; // use DMMU - uint64_t unused_mmu_secondary_context; // use DMMU - uint64_t sfsr; - uint64_t sfar; - uint64_t tsb; - uint64_t tag_access; - } immu; - }; - union { - uint64_t dmmuregs[16]; - struct { - uint64_t tsb_tag_target; - uint64_t mmu_primary_context; - uint64_t mmu_secondary_context; - uint64_t sfsr; - uint64_t sfar; - uint64_t tsb; - uint64_t tag_access; - } dmmu; - }; + SparcV9MMU immu; + SparcV9MMU dmmu; SparcTLBEntry itlb[64]; SparcTLBEntry dtlb[64]; uint32_t mmu_version; @@ -488,6 +522,7 @@ struct CPUSPARCState { uint64_t bgregs[8]; /* backup for normal global registers */ uint64_t igregs[8]; /* interrupt general registers */ uint64_t mgregs[8]; /* mmu general registers */ + uint64_t glregs[8 * MAXTL_MAX]; uint64_t fprs; uint64_t tick_cmpr, stick_cmpr; CPUTimer *tick, *stick; @@ -497,6 +532,7 @@ struct CPUSPARCState { uint32_t gl; // UA2005 /* UA 2005 hyperprivileged registers */ uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr; + uint64_t scratch[8]; CPUTimer *hstick; // UA 2005 /* Interrupt vector registers */ uint64_t ivec_status; @@ -507,52 +543,80 @@ struct CPUSPARCState { #define SOFTINT_INTRMASK (0xFFFE) #define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER) #endif - sparc_def_t *def; - - //void *irq_manager; - //void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno); + sparc_def_t def; /* Leon3 cache control */ uint32_t cache_control; + void *irq_manager; + void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno); + // Unicorn engine struct uc_struct *uc; }; -#include "cpu-qom.h" +/** + * SPARCCPU: + * @env: #CPUSPARCState + * + * A SPARC CPU. + */ +struct SPARCCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUSPARCState env; + + struct SPARCCPUClass cc; +}; + + +void sparc_cpu_do_interrupt(CPUState *cpu); +hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, + uintptr_t retaddr); +#ifdef _MSC_VER +void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t); +#else +void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN; +#endif #ifndef NO_CPU_IO_DEFS /* cpu_init.c */ -SPARCCPU *cpu_sparc_init(struct uc_struct *uc, const char *cpu_model); void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu); -void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf); +void sparc_cpu_list(void); /* mmu_helper.c */ -int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, - int mmu_idx); +bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev); -void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env); -#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) +#if !defined(TARGET_SPARC64) int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); #endif /* translate.c */ -void gen_intermediate_code_init(CPUSPARCState *env); +void sparc_tcg_init(struct uc_struct *uc); /* cpu-exec.c */ -int cpu_sparc_exec(struct uc_struct *uc, CPUSPARCState *s); /* win_helper.c */ target_ulong cpu_get_psr(CPUSPARCState *env1); void cpu_put_psr(CPUSPARCState *env1, target_ulong val); +void cpu_put_psr_raw(CPUSPARCState *env1, target_ulong val); #ifdef TARGET_SPARC64 target_ulong cpu_get_ccr(CPUSPARCState *env1); void cpu_put_ccr(CPUSPARCState *env1, target_ulong val); target_ulong cpu_get_cwp64(CPUSPARCState *env1); void cpu_put_cwp64(CPUSPARCState *env1, int cwp); void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate); +void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl); #endif int cpu_cwp_inc(CPUSPARCState *env1, int cwp); int cpu_cwp_dec(CPUSPARCState *env1, int cwp); @@ -561,11 +625,8 @@ void cpu_set_cwp(CPUSPARCState *env1, int new_cwp); /* int_helper.c */ void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno); -/* sun4m.c, sun4u.c */ -void cpu_check_irqs(CPUSPARCState *env); - /* leon3.c */ -void leon3_irq_ack(void *irq_manager, int intno); +// void leon3_irq_ack(void *irq_manager, int intno); #if defined (TARGET_SPARC64) @@ -587,60 +648,43 @@ static inline int tlb_compare_context(const SparcTLBEntry *tlb, #endif /* cpu-exec.c */ -#if !defined(CONFIG_USER_ONLY) -void sparc_cpu_unassigned_access(CPUState *cpu, hwaddr addr, - bool is_write, bool is_exec, int is_asi, - unsigned size); +void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); #if defined(TARGET_SPARC64) hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, int mmu_idx); #endif -#endif + int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc); -#ifndef NO_CPU_IO_DEFS -static inline CPUSPARCState *cpu_init(struct uc_struct *uc, const char *cpu_model) -{ - SPARCCPU *cpu = cpu_sparc_init(uc, cpu_model); - if (cpu == NULL) { - return NULL; - } - return &cpu->env; -} -#endif +#define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU +#define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX +#define CPU_RESOLVING_TYPE TYPE_SPARC_CPU -#define cpu_exec cpu_sparc_exec -#define cpu_gen_code cpu_sparc_gen_code #define cpu_signal_handler cpu_sparc_signal_handler #define cpu_list sparc_cpu_list -#define CPU_SAVE_VERSION 7 - /* MMU modes definitions */ #if defined (TARGET_SPARC64) #define MMU_USER_IDX 0 -#define MMU_MODE0_SUFFIX _user #define MMU_USER_SECONDARY_IDX 1 -#define MMU_MODE1_SUFFIX _user_secondary #define MMU_KERNEL_IDX 2 -#define MMU_MODE2_SUFFIX _kernel #define MMU_KERNEL_SECONDARY_IDX 3 -#define MMU_MODE3_SUFFIX _kernel_secondary #define MMU_NUCLEUS_IDX 4 -#define MMU_MODE4_SUFFIX _nucleus -#define MMU_HYPV_IDX 5 -#define MMU_MODE5_SUFFIX _hypv +#define MMU_PHYS_IDX 5 #else #define MMU_USER_IDX 0 -#define MMU_MODE0_SUFFIX _user #define MMU_KERNEL_IDX 1 -#define MMU_MODE1_SUFFIX _kernel +#define MMU_PHYS_IDX 2 #endif #if defined (TARGET_SPARC64) static inline int cpu_has_hypervisor(CPUSPARCState *env1) { - return env1->def->features & CPU_FEATURE_HYPV; + return env1->def.features & CPU_FEATURE_HYPV; } static inline int cpu_hypervisor_mode(CPUSPARCState *env1) @@ -652,20 +696,32 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1) { return env1->pstate & PS_PRIV; } +#else +static inline int cpu_supervisor_mode(CPUSPARCState *env1) +{ + return env1->psrs; +} #endif -static inline int cpu_mmu_index(CPUSPARCState *env1) +static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch) { -#if defined(CONFIG_USER_ONLY) - return MMU_USER_IDX; -#elif !defined(TARGET_SPARC64) - return env1->psrs; +#if !defined(TARGET_SPARC64) + if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ + return MMU_PHYS_IDX; + } else { + return env->psrs; + } #else - if (env1->tl > 0) { + /* IMMU or DMMU disabled. */ + if (ifetch + ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0 + : (env->lsu & DMMU_E) == 0) { + return MMU_PHYS_IDX; + } else if (cpu_hypervisor_mode(env)) { + return MMU_PHYS_IDX; + } else if (env->tl > 0) { return MMU_NUCLEUS_IDX; - } else if (cpu_hypervisor_mode(env1)) { - return MMU_HYPV_IDX; - } else if (cpu_supervisor_mode(env1)) { + } else if (cpu_supervisor_mode(env)) { return MMU_KERNEL_IDX; } else { return MMU_USER_IDX; @@ -679,8 +735,9 @@ static inline int cpu_interrupts_enabled(CPUSPARCState *env1) if (env1->psret != 0) return 1; #else - if (env1->pstate & PS_IE) + if ((env1->pstate & PS_IE) && !cpu_hypervisor_mode(env1)) { return 1; + } #endif return 0; @@ -696,6 +753,9 @@ static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil) #endif } +typedef CPUSPARCState CPUArchState; +typedef SPARCCPU ArchCPU; + #include "exec/cpu-all.h" #ifdef TARGET_SPARC64 @@ -706,43 +766,44 @@ void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit); trap_state* cpu_tsptr(CPUSPARCState* env); #endif -#define TB_FLAG_FPU_ENABLED (1 << 4) -#define TB_FLAG_AM_ENABLED (1 << 5) +#define TB_FLAG_MMU_MASK 7 +#define TB_FLAG_FPU_ENABLED (1 << 4) +#define TB_FLAG_AM_ENABLED (1 << 5) +#define TB_FLAG_SUPER (1 << 6) +#define TB_FLAG_HYPER (1 << 7) +#define TB_FLAG_ASI_SHIFT 24 static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc, - target_ulong *cs_base, int *flags) + target_ulong *cs_base, uint32_t *pflags) { + uint32_t flags; *pc = env->pc; *cs_base = env->npc; + flags = cpu_mmu_index(env, false); + if (cpu_supervisor_mode(env)) { + flags |= TB_FLAG_SUPER; + } #ifdef TARGET_SPARC64 - // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled - *flags = (env->pstate & PS_PRIV) /* 2 */ - | ((env->lsu & (DMMU_E | IMMU_E)) >> 2) /* 1, 0 */ - | ((env->tl & 0xff) << 8) - | (env->dmmu.mmu_primary_context << 16); /* 16... */ if (env->pstate & PS_AM) { - *flags |= TB_FLAG_AM_ENABLED; + flags |= TB_FLAG_AM_ENABLED; } - if ((env->def->features & CPU_FEATURE_FLOAT) && (env->pstate & PS_PEF) + if ((env->def.features & CPU_FEATURE_FLOAT) + && (env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) { - *flags |= TB_FLAG_FPU_ENABLED; + flags |= TB_FLAG_FPU_ENABLED; } + flags |= env->asi << TB_FLAG_ASI_SHIFT; #else - // FPU enable . Supervisor - *flags = env->psrs; - if ((env->def->features & CPU_FEATURE_FLOAT) && env->psref) { - *flags |= TB_FLAG_FPU_ENABLED; + if ((env->def.features & CPU_FEATURE_FLOAT) && env->psref) { + flags |= TB_FLAG_FPU_ENABLED; } #endif + *pflags = flags; } static inline bool tb_fpu_enabled(int tb_flags) { -#if defined(CONFIG_USER_ONLY) - return true; -#else return tb_flags & TB_FLAG_FPU_ENABLED; -#endif } static inline bool tb_am_enabled(int tb_flags) @@ -754,6 +815,6 @@ static inline bool tb_am_enabled(int tb_flags) #endif } -#include "exec/exec-all.h" +SPARCCPU *cpu_sparc_init(struct uc_struct *uc, const char *cpu_model); #endif diff --git a/qemu/target-sparc/fop_helper.c b/qemu/target/sparc/fop_helper.c similarity index 59% rename from qemu/target-sparc/fop_helper.c rename to qemu/target/sparc/fop_helper.c index ee4592ef..9eb9b757 100644 --- a/qemu/target-sparc/fop_helper.c +++ b/qemu/target/sparc/fop_helper.c @@ -17,49 +17,63 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" +#include "exec/exec-all.h" #include "exec/helper-proto.h" +#include "fpu/softfloat.h" #define QT0 (env->qt0) #define QT1 (env->qt1) -static void check_ieee_exceptions(CPUSPARCState *env) +static target_ulong do_check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra) { - target_ulong status; + target_ulong status = get_float_exception_flags(&env->fp_status); + target_ulong fsr = env->fsr; + + if (unlikely(status)) { + /* Keep exception flags clear for next time. */ + set_float_exception_flags(0, &env->fp_status); - status = get_float_exception_flags(&env->fp_status); - if (status) { /* Copy IEEE 754 flags into FSR */ if (status & float_flag_invalid) { - env->fsr |= FSR_NVC; + fsr |= FSR_NVC; } if (status & float_flag_overflow) { - env->fsr |= FSR_OFC; + fsr |= FSR_OFC; } if (status & float_flag_underflow) { - env->fsr |= FSR_UFC; + fsr |= FSR_UFC; } if (status & float_flag_divbyzero) { - env->fsr |= FSR_DZC; + fsr |= FSR_DZC; } if (status & float_flag_inexact) { - env->fsr |= FSR_NXC; + fsr |= FSR_NXC; } - if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) { - /* Unmasked exception, generate a trap */ - env->fsr |= FSR_FTT_IEEE_EXCP; - helper_raise_exception(env, TT_FP_EXCP); + if ((fsr & FSR_CEXC_MASK) & ((fsr & FSR_TEM_MASK) >> 23)) { + CPUState *cs = env_cpu(env); + + /* Unmasked exception, generate a trap. Note that while + the helper is marked as NO_WG, we can get away with + writing to cpu state along the exception path, since + TCG generated code will never see the write. */ + env->fsr = fsr | FSR_FTT_IEEE_EXCP; + cs->exception_index = TT_FP_EXCP; + cpu_loop_exit_restore(cs, ra); } else { /* Accumulate exceptions */ - env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5; + fsr |= (fsr & FSR_CEXC_MASK) << 5; } } + + return fsr; } -static inline void clear_float_exceptions(CPUSPARCState *env) +target_ulong helper_check_ieee_exceptions(CPUSPARCState *env) { - set_float_exception_flags(0, &env->fp_status); + return do_check_ieee_exceptions(env, GETPC()); } #define F_HELPER(name, p) void helper_f##name##p(CPUSPARCState *env) @@ -68,26 +82,16 @@ static inline void clear_float_exceptions(CPUSPARCState *env) float32 helper_f ## name ## s (CPUSPARCState *env, float32 src1, \ float32 src2) \ { \ - float32 ret; \ - clear_float_exceptions(env); \ - ret = float32_ ## name (src1, src2, &env->fp_status); \ - check_ieee_exceptions(env); \ - return ret; \ + return float32_ ## name (src1, src2, &env->fp_status); \ } \ float64 helper_f ## name ## d (CPUSPARCState * env, float64 src1,\ float64 src2) \ { \ - float64 ret; \ - clear_float_exceptions(env); \ - ret = float64_ ## name (src1, src2, &env->fp_status); \ - check_ieee_exceptions(env); \ - return ret; \ + return float64_ ## name (src1, src2, &env->fp_status); \ } \ F_HELPER(name, q) \ { \ - clear_float_exceptions(env); \ QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \ - check_ieee_exceptions(env); \ } F_BINOP(add); @@ -98,22 +102,16 @@ F_BINOP(div); float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2) { - float64 ret; - clear_float_exceptions(env); - ret = float64_mul(float32_to_float64(src1, &env->fp_status), - float32_to_float64(src2, &env->fp_status), - &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float64_mul(float32_to_float64(src1, &env->fp_status), + float32_to_float64(src2, &env->fp_status), + &env->fp_status); } void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2) { - clear_float_exceptions(env); QT0 = float128_mul(float64_to_float128(src1, &env->fp_status), float64_to_float128(src2, &env->fp_status), &env->fp_status); - check_ieee_exceptions(env); } float32 helper_fnegs(float32 src) @@ -136,48 +134,32 @@ F_HELPER(neg, q) /* Integer to float conversion. */ float32 helper_fitos(CPUSPARCState *env, int32_t src) { - /* Inexact error possible converting int to float. */ - float32 ret; - clear_float_exceptions(env); - ret = int32_to_float32(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return int32_to_float32(src, &env->fp_status); } float64 helper_fitod(CPUSPARCState *env, int32_t src) { - /* No possible exceptions converting int to double. */ return int32_to_float64(src, &env->fp_status); } void helper_fitoq(CPUSPARCState *env, int32_t src) { - /* No possible exceptions converting int to long double. */ QT0 = int32_to_float128(src, &env->fp_status); } #ifdef TARGET_SPARC64 float32 helper_fxtos(CPUSPARCState *env, int64_t src) { - float32 ret; - clear_float_exceptions(env); - ret = int64_to_float32(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return int64_to_float32(src, &env->fp_status); } float64 helper_fxtod(CPUSPARCState *env, int64_t src) { - float64 ret; - clear_float_exceptions(env); - ret = int64_to_float64(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return int64_to_float64(src, &env->fp_status); } void helper_fxtoq(CPUSPARCState *env, int64_t src) { - /* No possible exceptions converting long long to long double. */ QT0 = int64_to_float128(src, &env->fp_status); } #endif @@ -186,108 +168,64 @@ void helper_fxtoq(CPUSPARCState *env, int64_t src) /* floating point conversion */ float32 helper_fdtos(CPUSPARCState *env, float64 src) { - float32 ret; - clear_float_exceptions(env); - ret = float64_to_float32(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float64_to_float32(src, &env->fp_status); } float64 helper_fstod(CPUSPARCState *env, float32 src) { - float64 ret; - clear_float_exceptions(env); - ret = float32_to_float64(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float32_to_float64(src, &env->fp_status); } float32 helper_fqtos(CPUSPARCState *env) { - float32 ret; - clear_float_exceptions(env); - ret = float128_to_float32(QT1, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float128_to_float32(QT1, &env->fp_status); } void helper_fstoq(CPUSPARCState *env, float32 src) { - clear_float_exceptions(env); QT0 = float32_to_float128(src, &env->fp_status); - check_ieee_exceptions(env); } float64 helper_fqtod(CPUSPARCState *env) { - float64 ret; - clear_float_exceptions(env); - ret = float128_to_float64(QT1, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float128_to_float64(QT1, &env->fp_status); } void helper_fdtoq(CPUSPARCState *env, float64 src) { - clear_float_exceptions(env); QT0 = float64_to_float128(src, &env->fp_status); - check_ieee_exceptions(env); } /* Float to integer conversion. */ int32_t helper_fstoi(CPUSPARCState *env, float32 src) { - int32_t ret; - clear_float_exceptions(env); - ret = float32_to_int32_round_to_zero(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float32_to_int32_round_to_zero(src, &env->fp_status); } int32_t helper_fdtoi(CPUSPARCState *env, float64 src) { - int32_t ret; - clear_float_exceptions(env); - ret = float64_to_int32_round_to_zero(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float64_to_int32_round_to_zero(src, &env->fp_status); } int32_t helper_fqtoi(CPUSPARCState *env) { - int32_t ret; - clear_float_exceptions(env); - ret = float128_to_int32_round_to_zero(QT1, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float128_to_int32_round_to_zero(QT1, &env->fp_status); } #ifdef TARGET_SPARC64 int64_t helper_fstox(CPUSPARCState *env, float32 src) { - int64_t ret; - clear_float_exceptions(env); - ret = float32_to_int64_round_to_zero(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float32_to_int64_round_to_zero(src, &env->fp_status); } int64_t helper_fdtox(CPUSPARCState *env, float64 src) { - int64_t ret; - clear_float_exceptions(env); - ret = float64_to_int64_round_to_zero(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float64_to_int64_round_to_zero(src, &env->fp_status); } int64_t helper_fqtox(CPUSPARCState *env) { - int64_t ret; - clear_float_exceptions(env); - ret = float128_to_int64_round_to_zero(QT1, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float128_to_int64_round_to_zero(QT1, &env->fp_status); } #endif @@ -310,87 +248,79 @@ void helper_fabsq(CPUSPARCState *env) float32 helper_fsqrts(CPUSPARCState *env, float32 src) { - float32 ret; - clear_float_exceptions(env); - ret = float32_sqrt(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float32_sqrt(src, &env->fp_status); } float64 helper_fsqrtd(CPUSPARCState *env, float64 src) { - float64 ret; - clear_float_exceptions(env); - ret = float64_sqrt(src, &env->fp_status); - check_ieee_exceptions(env); - return ret; + return float64_sqrt(src, &env->fp_status); } void helper_fsqrtq(CPUSPARCState *env) { - clear_float_exceptions(env); QT0 = float128_sqrt(QT1, &env->fp_status); - check_ieee_exceptions(env); } #define GEN_FCMP(name, size, reg1, reg2, FS, E) \ - void glue(helper_, name) (CPUSPARCState *env) \ + target_ulong glue(helper_, name) (CPUSPARCState *env) \ { \ int ret; \ - clear_float_exceptions(env); \ + target_ulong fsr; \ if (E) { \ ret = glue(size, _compare)(reg1, reg2, &env->fp_status); \ } else { \ ret = glue(size, _compare_quiet)(reg1, reg2, \ &env->fp_status); \ } \ - check_ieee_exceptions(env); \ + fsr = do_check_ieee_exceptions(env, GETPC()); \ switch (ret) { \ case float_relation_unordered: \ - env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ - env->fsr |= FSR_NVA; \ + fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ + fsr |= FSR_NVA; \ break; \ case float_relation_less: \ - env->fsr &= ~(FSR_FCC1) << FS; \ - env->fsr |= FSR_FCC0 << FS; \ + fsr &= ~(FSR_FCC1) << FS; \ + fsr |= FSR_FCC0 << FS; \ break; \ case float_relation_greater: \ - env->fsr &= ~(FSR_FCC0) << FS; \ - env->fsr |= FSR_FCC1 << FS; \ + fsr &= ~(FSR_FCC0) << FS; \ + fsr |= FSR_FCC1 << FS; \ break; \ default: \ - env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ + fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ break; \ } \ + return fsr; \ } #define GEN_FCMP_T(name, size, FS, E) \ - void glue(helper_, name)(CPUSPARCState *env, size src1, size src2) \ + target_ulong glue(helper_, name)(CPUSPARCState *env, size src1, size src2)\ { \ int ret; \ - clear_float_exceptions(env); \ + target_ulong fsr; \ if (E) { \ ret = glue(size, _compare)(src1, src2, &env->fp_status); \ } else { \ ret = glue(size, _compare_quiet)(src1, src2, \ &env->fp_status); \ } \ - check_ieee_exceptions(env); \ + fsr = do_check_ieee_exceptions(env, GETPC()); \ switch (ret) { \ case float_relation_unordered: \ - env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ + fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ break; \ case float_relation_less: \ - env->fsr &= ~(FSR_FCC1 << FS); \ - env->fsr |= FSR_FCC0 << FS; \ + fsr &= ~(FSR_FCC1 << FS); \ + fsr |= FSR_FCC0 << FS; \ break; \ case float_relation_greater: \ - env->fsr &= ~(FSR_FCC0 << FS); \ - env->fsr |= FSR_FCC1 << FS; \ + fsr &= ~(FSR_FCC0 << FS); \ + fsr |= FSR_FCC1 << FS; \ break; \ default: \ - env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ + fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ break; \ } \ + return fsr; \ } GEN_FCMP_T(fcmps, float32, 0, 0); @@ -430,11 +360,11 @@ GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1); #undef GEN_FCMP_T #undef GEN_FCMP -static inline void set_fsr(CPUSPARCState *env) +static void set_fsr(CPUSPARCState *env, target_ulong fsr) { int rnd_mode; - switch (env->fsr & FSR_RD_MASK) { + switch (fsr & FSR_RD_MASK) { case FSR_RD_NEAREST: rnd_mode = float_round_nearest_even; break; @@ -452,16 +382,20 @@ static inline void set_fsr(CPUSPARCState *env) set_float_rounding_mode(rnd_mode, &env->fp_status); } -void helper_ldfsr(CPUSPARCState *env, uint32_t new_fsr) +target_ulong helper_ldfsr(CPUSPARCState *env, target_ulong old_fsr, + uint32_t new_fsr) { - env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK); - set_fsr(env); + old_fsr = (new_fsr & FSR_LDFSR_MASK) | (old_fsr & FSR_LDFSR_OLDMASK); + set_fsr(env, old_fsr); + return old_fsr; } #ifdef TARGET_SPARC64 -void helper_ldxfsr(CPUSPARCState *env, uint64_t new_fsr) +target_ulong helper_ldxfsr(CPUSPARCState *env, target_ulong old_fsr, + uint64_t new_fsr) { - env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK); - set_fsr(env); + old_fsr = (new_fsr & FSR_LDXFSR_MASK) | (old_fsr & FSR_LDXFSR_OLDMASK); + set_fsr(env, old_fsr); + return old_fsr; } #endif diff --git a/qemu/target-sparc/helper.c b/qemu/target/sparc/helper.c similarity index 70% rename from qemu/target-sparc/helper.c rename to qemu/target/sparc/helper.c index 602a8194..bb868743 100644 --- a/qemu/target-sparc/helper.c +++ b/qemu/target/sparc/helper.c @@ -17,14 +17,23 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" +#include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" -#include "sysemu/sysemu.h" + +void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = tt; + cpu_loop_exit_restore(cs, ra); +} void helper_raise_exception(CPUSPARCState *env, int tt) { - CPUState *cs = CPU(sparc_env_get_cpu(env)); + CPUState *cs = env_cpu(env); cs->exception_index = tt; cpu_loop_exit(cs); @@ -32,56 +41,51 @@ void helper_raise_exception(CPUSPARCState *env, int tt) void helper_debug(CPUSPARCState *env) { - CPUState *cs = CPU(sparc_env_get_cpu(env)); + CPUState *cs = env_cpu(env); cs->exception_index = EXCP_DEBUG; cpu_loop_exit(cs); } #ifdef TARGET_SPARC64 -target_ulong helper_popc(target_ulong val) -{ - return ctpop64(val); -} - void helper_tick_set_count(void *opaque, uint64_t count) { -#if !defined(CONFIG_USER_ONLY) // cpu_tick_set_count(opaque, count); -#endif } -uint64_t helper_tick_get_count(void *opaque) +uint64_t helper_tick_get_count(CPUSPARCState *env, void *opaque, int mem_idx) { -#if !defined(CONFIG_USER_ONLY) - return 0; //cpu_tick_get_count(opaque); -#else return 0; + +#if 0 + CPUTimer *timer = opaque; + + if (timer->npt && mem_idx < MMU_KERNEL_IDX) { + cpu_raise_exception_ra(env, TT_PRIV_INSN, GETPC()); + } + + return cpu_tick_get_count(timer); #endif } void helper_tick_set_limit(void *opaque, uint64_t limit) { -#if !defined(CONFIG_USER_ONLY) // cpu_tick_set_limit(opaque, limit); -#endif } #endif -static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a, - target_ulong b, int cc) +static target_ulong do_udiv(CPUSPARCState *env, target_ulong a, + target_ulong b, int cc, uintptr_t ra) { - SPARCCPU *cpu = sparc_env_get_cpu(env); int overflow = 0; uint64_t x0; uint32_t x1; - x0 = (a & 0xffffffff) | ((uint64_t) (env->y) << 32); + x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); x1 = (b & 0xffffffff); if (x1 == 0) { - cpu_restore_state(CPU(cpu), GETPC()); - helper_raise_exception(env, TT_DIV_ZERO); + cpu_raise_exception_ra(env, TT_DIV_ZERO, ra); } x0 = x0 / x1; @@ -100,28 +104,26 @@ static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a, target_ulong helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b) { - return helper_udiv_common(env, a, b, 0); + return do_udiv(env, a, b, 0, GETPC()); } target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b) { - return helper_udiv_common(env, a, b, 1); + return do_udiv(env, a, b, 1, GETPC()); } -static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a, - target_ulong b, int cc) +static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a, + target_ulong b, int cc, uintptr_t ra) { - SPARCCPU *cpu = sparc_env_get_cpu(env); int overflow = 0; int64_t x0; int32_t x1; - x0 = (a & 0xffffffff) | ((uint64_t) (env->y) << 32); + x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); x1 = (b & 0xffffffff); if (x1 == 0) { - cpu_restore_state(CPU(cpu), GETPC()); - helper_raise_exception(env, TT_DIV_ZERO); + cpu_raise_exception_ra(env, TT_DIV_ZERO, ra); } else if (x1 == -1 && x0 == INT64_MIN) { x0 = INT32_MAX; overflow = 1; @@ -143,12 +145,12 @@ static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a, target_ulong helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b) { - return helper_sdiv_common(env, a, b, 0); + return do_sdiv(env, a, b, 0, GETPC()); } target_ulong helper_sdiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b) { - return helper_sdiv_common(env, a, b, 1); + return do_sdiv(env, a, b, 1, GETPC()); } #ifdef TARGET_SPARC64 @@ -156,10 +158,7 @@ int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b) { if (b == 0) { /* Raise divide by zero trap. */ - SPARCCPU *cpu = sparc_env_get_cpu(env); - - cpu_restore_state(CPU(cpu), GETPC()); - helper_raise_exception(env, TT_DIV_ZERO); + cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC()); } else if (b == -1) { /* Avoid overflow trap with i386 divide insn. */ return -a; @@ -172,10 +171,7 @@ uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b) { if (b == 0) { /* Raise divide by zero trap. */ - SPARCCPU *cpu = sparc_env_get_cpu(env); - - cpu_restore_state(CPU(cpu), GETPC()); - helper_raise_exception(env, TT_DIV_ZERO); + cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC()); } return a / b; } @@ -184,7 +180,6 @@ uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b) target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1, target_ulong src2) { - SPARCCPU *cpu = sparc_env_get_cpu(env); target_ulong dst; /* Tag overflow occurs if either input has bits 0 or 1 set. */ @@ -207,14 +202,12 @@ target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1, return dst; tag_overflow: - cpu_restore_state(CPU(cpu), GETPC()); - helper_raise_exception(env, TT_TOVF); + cpu_raise_exception_ra(env, TT_TOVF, GETPC()); } target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1, target_ulong src2) { - SPARCCPU *cpu = sparc_env_get_cpu(env); target_ulong dst; /* Tag overflow occurs if either input has bits 0 or 1 set. */ @@ -237,14 +230,13 @@ target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1, return dst; tag_overflow: - cpu_restore_state(CPU(cpu), GETPC()); - helper_raise_exception(env, TT_TOVF); + cpu_raise_exception_ra(env, TT_TOVF, GETPC()); } -//#ifndef TARGET_SPARC64 +#ifndef TARGET_SPARC64 void helper_power_down(CPUSPARCState *env) { - CPUState *cs = CPU(sparc_env_get_cpu(env)); + CPUState *cs = env_cpu(env); cs->halted = 1; cs->exception_index = EXCP_HLT; @@ -252,4 +244,4 @@ void helper_power_down(CPUSPARCState *env) env->npc = env->pc + 4; cpu_loop_exit(cs); } -//#endif +#endif diff --git a/qemu/target/sparc/helper.h b/qemu/target/sparc/helper.h new file mode 100644 index 00000000..649d9fd1 --- /dev/null +++ b/qemu/target/sparc/helper.h @@ -0,0 +1,168 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +#ifndef TARGET_SPARC64 +DEF_HELPER_1(rett, void, env) +DEF_HELPER_2(wrpsr, void, env, tl) +DEF_HELPER_1(rdpsr, tl, env) +DEF_HELPER_1(power_down, void, env) +#else +DEF_HELPER_FLAGS_2(wrpil, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_2(wrgl, void, env, tl) +DEF_HELPER_2(wrpstate, void, env, tl) +DEF_HELPER_1(done, void, env) +DEF_HELPER_1(retry, void, env) +DEF_HELPER_FLAGS_1(flushw, TCG_CALL_NO_WG, void, env) +DEF_HELPER_FLAGS_1(saved, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(restored, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_1(rdccr, tl, env) +DEF_HELPER_2(wrccr, void, env, tl) +DEF_HELPER_1(rdcwp, tl, env) +DEF_HELPER_2(wrcwp, void, env, tl) +DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(set_softint, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_2(clear_softint, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_2(write_softint, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_2(tick_set_count, TCG_CALL_NO_RWG, void, ptr, i64) +DEF_HELPER_FLAGS_3(tick_get_count, TCG_CALL_NO_WG, i64, env, ptr, int) +DEF_HELPER_FLAGS_2(tick_set_limit, TCG_CALL_NO_RWG, void, ptr, i64) +#endif +DEF_HELPER_FLAGS_3(check_align, TCG_CALL_NO_WG, void, env, tl, i32) +DEF_HELPER_1(debug, void, env) +DEF_HELPER_1(save, void, env) +DEF_HELPER_1(restore, void, env) +DEF_HELPER_3(udiv, tl, env, tl, tl) +DEF_HELPER_3(udiv_cc, tl, env, tl, tl) +DEF_HELPER_3(sdiv, tl, env, tl, tl) +DEF_HELPER_3(sdiv_cc, tl, env, tl, tl) +DEF_HELPER_3(taddcctv, tl, env, tl, tl) +DEF_HELPER_3(tsubcctv, tl, env, tl, tl) +#ifdef TARGET_SPARC64 +DEF_HELPER_FLAGS_3(sdivx, TCG_CALL_NO_WG, s64, env, s64, s64) +DEF_HELPER_FLAGS_3(udivx, TCG_CALL_NO_WG, i64, env, i64, i64) +#endif +DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32) +DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32) +DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env) +DEF_HELPER_FLAGS_3(ldfsr, TCG_CALL_NO_RWG, tl, env, tl, i32) +DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32) +DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32) +DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64) +DEF_HELPER_FLAGS_3(fcmps, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmpes, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env) +DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env) +#ifdef TARGET_SPARC64 +DEF_HELPER_FLAGS_3(ldxfsr, TCG_CALL_NO_RWG, tl, env, tl, i64) +DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64) +DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmps_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmpd_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmpd_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmpd_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmpes_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmpes_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmpes_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32) +DEF_HELPER_FLAGS_3(fcmped_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmped_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_3(fcmped_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64) +DEF_HELPER_FLAGS_1(fabsq, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(fcmpq_fcc1, TCG_CALL_NO_WG, tl, env) +DEF_HELPER_FLAGS_1(fcmpq_fcc2, TCG_CALL_NO_WG, tl, env) +DEF_HELPER_FLAGS_1(fcmpq_fcc3, TCG_CALL_NO_WG, tl, env) +DEF_HELPER_FLAGS_1(fcmpeq_fcc1, TCG_CALL_NO_WG, tl, env) +DEF_HELPER_FLAGS_1(fcmpeq_fcc2, TCG_CALL_NO_WG, tl, env) +DEF_HELPER_FLAGS_1(fcmpeq_fcc3, TCG_CALL_NO_WG, tl, env) +#endif +DEF_HELPER_2(raise_exception, noreturn, env, int) +#define F_HELPER_0_1(name) \ + DEF_HELPER_FLAGS_1(f ## name, TCG_CALL_NO_RWG, void, env) + +DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, f64, env, f64, f64) +DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, f64, env, f64, f64) +DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, f64, env, f64, f64) +DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, f64, env, f64, f64) +F_HELPER_0_1(addq) +F_HELPER_0_1(subq) +F_HELPER_0_1(mulq) +F_HELPER_0_1(divq) + +DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, f32, env, f32, f32) +DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, f32, env, f32, f32) +DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, f32, env, f32, f32) +DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, f32, env, f32, f32) + +DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_RWG, f64, env, f32, f32) +DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_RWG, void, env, f64, f64) + +DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32) +DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_RWG_SE, f64, env, s32) +DEF_HELPER_FLAGS_2(fitoq, TCG_CALL_NO_RWG, void, env, s32) + +DEF_HELPER_FLAGS_2(fitos, TCG_CALL_NO_RWG, f32, env, s32) + +#ifdef TARGET_SPARC64 +DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64) +DEF_HELPER_FLAGS_1(fnegq, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_2(fxtos, TCG_CALL_NO_RWG, f32, env, s64) +DEF_HELPER_FLAGS_2(fxtod, TCG_CALL_NO_RWG, f64, env, s64) +DEF_HELPER_FLAGS_2(fxtoq, TCG_CALL_NO_RWG, void, env, s64) +#endif +DEF_HELPER_FLAGS_2(fdtos, TCG_CALL_NO_RWG, f32, env, f64) +DEF_HELPER_FLAGS_2(fstod, TCG_CALL_NO_RWG, f64, env, f32) +DEF_HELPER_FLAGS_1(fqtos, TCG_CALL_NO_RWG, f32, env) +DEF_HELPER_FLAGS_2(fstoq, TCG_CALL_NO_RWG, void, env, f32) +DEF_HELPER_FLAGS_1(fqtod, TCG_CALL_NO_RWG, f64, env) +DEF_HELPER_FLAGS_2(fdtoq, TCG_CALL_NO_RWG, void, env, f64) +DEF_HELPER_FLAGS_2(fstoi, TCG_CALL_NO_RWG, s32, env, f32) +DEF_HELPER_FLAGS_2(fdtoi, TCG_CALL_NO_RWG, s32, env, f64) +DEF_HELPER_FLAGS_1(fqtoi, TCG_CALL_NO_RWG, s32, env) +#ifdef TARGET_SPARC64 +DEF_HELPER_FLAGS_2(fstox, TCG_CALL_NO_RWG, s64, env, f32) +DEF_HELPER_FLAGS_2(fdtox, TCG_CALL_NO_RWG, s64, env, f64) +DEF_HELPER_FLAGS_1(fqtox, TCG_CALL_NO_RWG, s64, env) + +DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_3(pdist, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64) +DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64) +DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +#define VIS_HELPER(name) \ + DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \ + i32, i32, i32) \ + DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \ + i32, i32, i32) + +VIS_HELPER(padd) +VIS_HELPER(psub) +#define VIS_CMPHELPER(name) \ + DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) +VIS_CMPHELPER(cmpgt) +VIS_CMPHELPER(cmpeq) +VIS_CMPHELPER(cmple) +VIS_CMPHELPER(cmpne) +#endif +#undef F_HELPER_0_1 +#undef VIS_HELPER +#undef VIS_CMPHELPER +DEF_HELPER_1(compute_psr, void, env) +DEF_HELPER_FLAGS_1(compute_C_icc, TCG_CALL_NO_WG_SE, i32, env) diff --git a/qemu/target-sparc/int32_helper.c b/qemu/target/sparc/int32_helper.c similarity index 85% rename from qemu/target-sparc/int32_helper.c rename to qemu/target/sparc/int32_helper.c index 4a34a710..30893d7a 100644 --- a/qemu/target-sparc/int32_helper.c +++ b/qemu/target/sparc/int32_helper.c @@ -17,13 +17,14 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" -#include "sysemu/sysemu.h" +//#include "sysemu/runstate.h" void sparc_cpu_do_interrupt(CPUState *cs) { - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; int cwp, intno = cs->exception_index; @@ -32,18 +33,16 @@ void sparc_cpu_do_interrupt(CPUState *cs) cpu_get_psr(env); } -#if !defined(CONFIG_USER_ONLY) if (env->psret == 0) { if (cs->exception_index == 0x80 && - env->def->features & CPU_FEATURE_TA0_SHUTDOWN) { - qemu_system_shutdown_request(); + env->def.features & CPU_FEATURE_TA0_SHUTDOWN) { + // qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } else { cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state", cs->exception_index); } return; } -#endif env->psret = 0; cwp = cpu_cwp_dec(env, env->cwp - 1); cpu_set_cwp(env, cwp); @@ -55,9 +54,13 @@ void sparc_cpu_do_interrupt(CPUState *cs) env->pc = env->tbr; env->npc = env->pc + 4; cs->exception_index = -1; + + /* IRQ acknowledgment */ + if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) { + env->qemu_irq_ack(env, env->irq_manager, intno); + } } -#if !defined(CONFIG_USER_ONLY) static void leon3_cache_control_int(CPUSPARCState *env) { uint32_t state = 0; @@ -67,7 +70,6 @@ static void leon3_cache_control_int(CPUSPARCState *env) state = env->cache_control & CACHE_STATE_MASK; if (state == CACHE_ENABLED) { state = CACHE_FROZEN; - //trace_int_helper_icache_freeze(); } env->cache_control &= ~CACHE_STATE_MASK; @@ -79,7 +81,6 @@ static void leon3_cache_control_int(CPUSPARCState *env) state = (env->cache_control >> 2) & CACHE_STATE_MASK; if (state == CACHE_ENABLED) { state = CACHE_FROZEN; - //trace_int_helper_dcache_freeze(); } env->cache_control &= ~(CACHE_STATE_MASK << 2); @@ -89,7 +90,6 @@ static void leon3_cache_control_int(CPUSPARCState *env) void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno) { - //leon3_irq_ack(irq_manager, intno); + // leon3_irq_ack(irq_manager, intno); leon3_cache_control_int(env); } -#endif diff --git a/qemu/target-sparc/int64_helper.c b/qemu/target/sparc/int64_helper.c similarity index 60% rename from qemu/target-sparc/int64_helper.c rename to qemu/target/sparc/int64_helper.c index 7eba49c3..601c769f 100644 --- a/qemu/target-sparc/int64_helper.c +++ b/qemu/target/sparc/int64_helper.c @@ -17,13 +17,13 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" - void sparc_cpu_do_interrupt(CPUState *cs) { - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; int intno = cs->exception_index; trap_state *tsptr; @@ -33,13 +33,11 @@ void sparc_cpu_do_interrupt(CPUState *cs) cpu_get_psr(env); } -#if !defined(CONFIG_USER_ONLY) if (env->tl >= env->maxtl) { cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d)," " Error state", cs->exception_index, env->tl, env->maxtl); return; } -#endif if (env->tl < env->maxtl - 1) { env->tl++; } else { @@ -57,18 +55,64 @@ void sparc_cpu_do_interrupt(CPUState *cs) tsptr->tnpc = env->npc; tsptr->tt = intno; + if (cpu_has_hypervisor(env)) { + env->htstate[env->tl] = env->hpstate; + /* XXX OpenSPARC T1 - UltraSPARC T3 have MAXPTL=2 + but this may change in the future */ + if (env->tl > 2) { + env->hpstate |= HS_PRIV; + } + } + + if (env->def.features & CPU_FEATURE_GL) { + tsptr->tstate |= (env->gl & 7ULL) << 40; + cpu_gl_switch_gregs(env, env->gl + 1); + env->gl++; + } + switch (intno) { case TT_IVEC: - cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG); + if (!cpu_has_hypervisor(env)) { + cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG); + } break; case TT_TFAULT: case TT_DFAULT: - case TT_TMISS: case TT_TMISS+1: case TT_TMISS+2: case TT_TMISS+3: - case TT_DMISS: case TT_DMISS+1: case TT_DMISS+2: case TT_DMISS+3: - case TT_DPROT: case TT_DPROT+1: case TT_DPROT+2: case TT_DPROT+3: - cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG); + case TT_TMISS: + case TT_TMISS + 1: + case TT_TMISS + 2: + case TT_TMISS + 3: + + case TT_DMISS: + case TT_DMISS + 1: + case TT_DMISS + 2: + case TT_DMISS + 3: + + case TT_DPROT: + case TT_DPROT + 1: + case TT_DPROT + 2: + case TT_DPROT + 3: + + if (cpu_has_hypervisor(env)) { + env->hpstate |= HS_PRIV; + env->pstate = PS_PEF | PS_PRIV; + } else { + cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG); + } break; + // case TT_INSN_REAL_TRANSLATION_MISS ... TT_DATA_REAL_TRANSLATION_MISS: + // case TT_HTRAP ... TT_HTRAP + 127: + // env->hpstate |= HS_PRIV; + // break; default: + if (intno >= TT_INSN_REAL_TRANSLATION_MISS && intno <= TT_DATA_REAL_TRANSLATION_MISS) { + env->hpstate |= HS_PRIV; + break; + } + if (intno >= TT_HTRAP && intno <= TT_HTRAP + 127) { + env->hpstate |= HS_PRIV; + break; + } cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG); break; } @@ -80,9 +124,13 @@ void sparc_cpu_do_interrupt(CPUState *cs) } else if ((intno & 0x1c0) == TT_FILL) { cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1)); } - env->tbr &= ~0x7fffULL; - env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5); - env->pc = env->tbr; + + if (cpu_hypervisor_mode(env)) { + env->pc = (env->htba & ~0x3fffULL) | (intno << 5); + } else { + env->pc = env->tbr & ~0x7fffULL; + env->pc |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5); + } env->npc = env->pc + 4; cs->exception_index = -1; } @@ -96,11 +144,9 @@ static bool do_modify_softint(CPUSPARCState *env, uint32_t value) { if (env->softint != value) { env->softint = value; -#if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - //cpu_check_irqs(env); + // cpu_check_irqs(env); } -#endif return true; } return false; @@ -109,20 +155,20 @@ static bool do_modify_softint(CPUSPARCState *env, uint32_t value) void helper_set_softint(CPUSPARCState *env, uint64_t value) { if (do_modify_softint(env, env->softint | (uint32_t)value)) { - //trace_int_helper_set_softint(env->softint); + // trace_int_helper_set_softint(env->softint); } } void helper_clear_softint(CPUSPARCState *env, uint64_t value) { if (do_modify_softint(env, env->softint & (uint32_t)~value)) { - //trace_int_helper_clear_softint(env->softint); + // trace_int_helper_clear_softint(env->softint); } } void helper_write_softint(CPUSPARCState *env, uint64_t value) { if (do_modify_softint(env, (uint32_t)value)) { - //trace_int_helper_write_softint(env->softint); + // trace_int_helper_write_softint(env->softint); } } diff --git a/qemu/target/sparc/ldst_helper.c b/qemu/target/sparc/ldst_helper.c new file mode 100644 index 00000000..df4fe058 --- /dev/null +++ b/qemu/target/sparc/ldst_helper.c @@ -0,0 +1,1878 @@ +/* + * Helpers for loads and stores + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/tcg.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "asi.h" + +//#define DEBUG_MMU +//#define DEBUG_MXCC +//#define DEBUG_UNALIGNED +//#define DEBUG_UNASSIGNED +//#define DEBUG_ASI +//#define DEBUG_CACHE_CONTROL + +#ifdef DEBUG_MMU +#define DPRINTF_MMU(fmt, ...) \ + do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_MMU(fmt, ...) do {} while (0) +#endif + +#ifdef DEBUG_MXCC +#define DPRINTF_MXCC(fmt, ...) \ + do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_MXCC(fmt, ...) do {} while (0) +#endif + +#ifdef DEBUG_ASI +#define DPRINTF_ASI(fmt, ...) \ + do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0) +#endif + +#ifdef DEBUG_CACHE_CONTROL +#define DPRINTF_CACHE_CONTROL(fmt, ...) \ + do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0) +#endif + +#ifdef TARGET_SPARC64 +#ifndef TARGET_ABI32 +#define AM_CHECK(env1) ((env1)->pstate & PS_AM) +#else +#define AM_CHECK(env1) (1) +#endif +#endif + +#define QT0 (env->qt0) +#define QT1 (env->qt1) + +#if defined(TARGET_SPARC64) +/* Calculates TSB pointer value for fault page size + * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers + * UA2005 holds the page size configuration in mmu_ctx registers */ +static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env, + const SparcV9MMU *mmu, const int idx) +{ + uint64_t tsb_register; + int page_size; + if (cpu_has_hypervisor(env)) { + int tsb_index = 0; + int ctx = mmu->tag_access & 0x1fffULL; + uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0]; + tsb_index = idx; + tsb_index |= ctx ? 2 : 0; + page_size = idx ? ctx_register >> 8 : ctx_register; + page_size &= 7; + tsb_register = mmu->sun4v_tsb_pointers[tsb_index]; + } else { + page_size = idx; + tsb_register = mmu->tsb; + } + int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0; + int tsb_size = tsb_register & 0xf; + + uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size; + + /* move va bits to correct position, + * the context bits will be masked out later */ + uint64_t va = mmu->tag_access >> (3 * page_size + 9); + + /* calculate tsb_base mask and adjust va if split is in use */ + if (tsb_split) { + if (idx == 0) { + va &= ~(1ULL << (13 + tsb_size)); + } else { + va |= (1ULL << (13 + tsb_size)); + } + tsb_base_mask <<= 1; + } + + return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL; +} + +/* Calculates tag target register value by reordering bits + in tag access register */ +static uint64_t ultrasparc_tag_target(uint64_t tag_access_register) +{ + return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22); +} + +static void replace_tlb_entry(SparcTLBEntry *tlb, + uint64_t tlb_tag, uint64_t tlb_tte, + CPUSPARCState *env) +{ + target_ulong mask, size, va, offset; + + /* flush page range if translation is valid */ + if (TTE_IS_VALID(tlb->tte)) { + CPUState *cs = env_cpu(env); + + size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte); + mask = 1ULL + ~size; + + va = tlb->tag & mask; + + for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) { + tlb_flush_page(cs, va + offset); + } + } + + tlb->tag = tlb_tag; + tlb->tte = tlb_tte; +} + +static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr, + const char *strmmu, CPUSPARCState *env1) +{ + unsigned int i; + target_ulong mask; + uint64_t context; + + int is_demap_context = (demap_addr >> 6) & 1; + + /* demap context */ + switch ((demap_addr >> 4) & 3) { + case 0: /* primary */ + context = env1->dmmu.mmu_primary_context; + break; + case 1: /* secondary */ + context = env1->dmmu.mmu_secondary_context; + break; + case 2: /* nucleus */ + context = 0; + break; + case 3: /* reserved */ + default: + return; + } + + for (i = 0; i < 64; i++) { + if (TTE_IS_VALID(tlb[i].tte)) { + + if (is_demap_context) { + /* will remove non-global entries matching context value */ + if (TTE_IS_GLOBAL(tlb[i].tte) || + !tlb_compare_context(&tlb[i], context)) { + continue; + } + } else { + /* demap page + will remove any entry matching VA */ + mask = 0xffffffffffffe000ULL; + mask <<= 3 * ((tlb[i].tte >> 61) & 3); + + if (!compare_masked(demap_addr, tlb[i].tag, mask)) { + continue; + } + + /* entry should be global or matching context value */ + if (!TTE_IS_GLOBAL(tlb[i].tte) && + !tlb_compare_context(&tlb[i], context)) { + continue; + } + } + + replace_tlb_entry(&tlb[i], 0, 0, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i); + dump_mmu(env1); +#endif + } + } +} + +static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag, + uint64_t sun4v_tte) +{ + uint64_t sun4u_tte; + if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) { + /* is already in the sun4u format */ + return sun4v_tte; + } + sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT); + sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */ + sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT); + sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT); + sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT); + sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005, + TTE_SIDEEFFECT_BIT); + sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT); + sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT); + return sun4u_tte; +} + +static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, + uint64_t tlb_tag, uint64_t tlb_tte, + const char *strmmu, CPUSPARCState *env1, + uint64_t addr) +{ + unsigned int i, replace_used; + + tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte); + if (cpu_has_hypervisor(env1)) { + uint64_t new_vaddr = tlb_tag & ~0x1fffULL; + uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte); + uint32_t new_ctx = tlb_tag & 0x1fffU; + for (i = 0; i < 64; i++) { + uint32_t ctx = tlb[i].tag & 0x1fffU; + /* check if new mapping overlaps an existing one */ + if (new_ctx == ctx) { + uint64_t vaddr = tlb[i].tag & ~0x1fffULL; + uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte); + if (new_vaddr == vaddr + || (new_vaddr < vaddr + size + && vaddr < new_vaddr + new_size)) { + DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr, + new_vaddr); + replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); + return; + } + } + + } + } + /* Try replacing invalid entry */ + for (i = 0; i < 64; i++) { + if (!TTE_IS_VALID(tlb[i].tte)) { + replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i); + dump_mmu(env1); +#endif + return; + } + } + + /* All entries are valid, try replacing unlocked entry */ + + for (replace_used = 0; replace_used < 2; ++replace_used) { + + /* Used entries are not replaced on first pass */ + + for (i = 0; i < 64; i++) { + if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) { + + replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n", + strmmu, (replace_used ? "used" : "unused"), i); + dump_mmu(env1); +#endif + return; + } + } + + /* Now reset used bit and search for unused entries again */ + + for (i = 0; i < 64; i++) { + TTE_SET_UNUSED(tlb[i].tte); + } + } + +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replacement: no free entries available, " + "replacing the last one\n", strmmu); +#endif + /* corner case: the last entry is replaced anyway */ + replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1); +} + +#endif + +#ifdef TARGET_SPARC64 +/* returns true if access using this ASI is to have address translated by MMU + otherwise access is to raw physical address */ +/* TODO: check sparc32 bits */ +static inline int is_translating_asi(int asi) +{ + /* Ultrasparc IIi translating asi + - note this list is defined by cpu implementation + */ +#define XRANGE(x, a, b) (x >=a && x <= b) + if (XRANGE(asi, 0x04, 0x11)) + return 1; + if (XRANGE(asi, 0x16, 0x19)) + return 1; + if (XRANGE(asi, 0x1E, 0x1F)) + return 1; + if (XRANGE(asi, 0x24, 0x2C)) + return 1; + if (XRANGE(asi, 0x70, 0x73)) + return 1; + if (XRANGE(asi, 0x78, 0x79)) + return 1; + if (XRANGE(asi, 0x80, 0xFF)) + return 1; +#undef XRANGE + + return 0; +} + +static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr) +{ + if (AM_CHECK(env1)) { + addr &= 0xffffffffULL; + } + return addr; +} + +static inline target_ulong asi_address_mask(CPUSPARCState *env, + int asi, target_ulong addr) +{ + if (is_translating_asi(asi)) { + addr = address_mask(env, addr); + } + return addr; +} + +static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra) +{ + /* ASIs >= 0x80 are user mode. + * ASIs >= 0x30 are hyper mode (or super if hyper is not available). + * ASIs <= 0x2f are super mode. + */ + if (asi < 0x80 + && !cpu_hypervisor_mode(env) + && (!cpu_supervisor_mode(env) + || (asi >= 0x30 && cpu_has_hypervisor(env)))) { + cpu_raise_exception_ra(env, TT_PRIV_ACT, ra); + } +} +#endif + +static void do_check_align(CPUSPARCState *env, target_ulong addr, + uint32_t align, uintptr_t ra) +{ + if (addr & align) { +#ifdef DEBUG_UNALIGNED + printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx + "\n", addr, env->pc); +#endif + cpu_raise_exception_ra(env, TT_UNALIGNED, ra); + } +} + +void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align) +{ + do_check_align(env, addr, align, GETPC()); +} + +#if !defined(TARGET_SPARC64) && defined(DEBUG_MXCC) +static void dump_mxcc(CPUSPARCState *env) +{ + printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n", + env->mxccdata[0], env->mxccdata[1], + env->mxccdata[2], env->mxccdata[3]); + printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n" + " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n", + env->mxccregs[0], env->mxccregs[1], + env->mxccregs[2], env->mxccregs[3], + env->mxccregs[4], env->mxccregs[5], + env->mxccregs[6], env->mxccregs[7]); +} +#endif + +#if defined(TARGET_SPARC64) && defined(DEBUG_ASI) +static void dump_asi(const char *txt, target_ulong addr, int asi, int size, + uint64_t r1) +{ + switch (size) { + case 1: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt, + addr, asi, r1 & 0xff); + break; + case 2: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt, + addr, asi, r1 & 0xffff); + break; + case 4: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt, + addr, asi, r1 & 0xffffffff); + break; + case 8: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt, + addr, asi, r1); + break; + } +} +#endif + +#ifndef TARGET_SPARC64 +static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr, + bool is_write, bool is_exec, int is_asi, + unsigned size, uintptr_t retaddr) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + int fault_type; + +#ifdef DEBUG_UNASSIGNED + if (is_asi) { + printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx + " asi 0x%02x from " TARGET_FMT_lx "\n", + is_exec ? "exec" : is_write ? "write" : "read", size, + size == 1 ? "" : "s", addr, is_asi, env->pc); + } else { + printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx + " from " TARGET_FMT_lx "\n", + is_exec ? "exec" : is_write ? "write" : "read", size, + size == 1 ? "" : "s", addr, env->pc); + } +#endif + /* Don't overwrite translation and access faults */ + fault_type = (env->mmuregs[3] & 0x1c) >> 2; + if ((fault_type > 4) || (fault_type == 0)) { + env->mmuregs[3] = 0; /* Fault status register */ + if (is_asi) { + env->mmuregs[3] |= 1 << 16; + } + if (env->psrs) { + env->mmuregs[3] |= 1 << 5; + } + if (is_exec) { + env->mmuregs[3] |= 1 << 6; + } + if (is_write) { + env->mmuregs[3] |= 1 << 7; + } + env->mmuregs[3] |= (5 << 2) | 2; + /* SuperSPARC will never place instruction fault addresses in the FAR */ + if (!is_exec) { + env->mmuregs[4] = addr; /* Fault address register */ + } + } + /* overflow (same type fault was not read before another fault) */ + if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) { + env->mmuregs[3] |= 1; + } + + if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) { + int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS; + cpu_raise_exception_ra(env, tt, retaddr); + } + + /* + * flush neverland mappings created during no-fault mode, + * so the sequential MMU faults report proper fault types + */ + if (env->mmuregs[0] & MMU_NF) { + tlb_flush(cs); + } +} +#else +static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr, + bool is_write, bool is_exec, int is_asi, + unsigned size, uintptr_t retaddr) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx + "\n", addr, env->pc); +#endif + + if (is_exec) { /* XXX has_hypervisor */ + if (env->lsu & (IMMU_E)) { + cpu_raise_exception_ra(env, TT_CODE_ACCESS, retaddr); + } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) { + cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, retaddr); + } + } else { + if (env->lsu & (DMMU_E)) { + cpu_raise_exception_ra(env, TT_DATA_ACCESS, retaddr); + } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) { + cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, retaddr); + } + } +} +#endif + +#ifndef TARGET_SPARC64 + +/* Leon3 cache control */ + +static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr, + uint64_t val, int size) +{ + DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n", + addr, val, size); + + if (size != 4) { + DPRINTF_CACHE_CONTROL("32bits only\n"); + return; + } + + switch (addr) { + case 0x00: /* Cache control */ + + /* These values must always be read as zeros */ + val &= ~CACHE_CTRL_FD; + val &= ~CACHE_CTRL_FI; + val &= ~CACHE_CTRL_IB; + val &= ~CACHE_CTRL_IP; + val &= ~CACHE_CTRL_DP; + + env->cache_control = val; + break; + case 0x04: /* Instruction cache configuration */ + case 0x08: /* Data cache configuration */ + /* Read Only */ + break; + default: + DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr); + break; + }; +} + +static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr, + int size) +{ + uint64_t ret = 0; + + if (size != 4) { + DPRINTF_CACHE_CONTROL("32bits only\n"); + return 0; + } + + switch (addr) { + case 0x00: /* Cache control */ + ret = env->cache_control; + break; + + /* Configuration registers are read and only always keep those + predefined values */ + + case 0x04: /* Instruction cache configuration */ + ret = 0x10220000; + break; + case 0x08: /* Data cache configuration */ + ret = 0x18220000; + break; + default: + DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr); + break; + }; + DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n", + addr, ret, size); + return ret; +} + +uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, + int asi, uint32_t memop) +{ + int size = 1 << (memop & MO_SIZE); + int sign = memop & MO_SIGN; + CPUState *cs = env_cpu(env); + uint64_t ret = 0; +#if defined(DEBUG_MXCC) || defined(DEBUG_ASI) + uint32_t last_addr = addr; +#endif + + do_check_align(env, addr, size - 1, GETPC()); + switch (asi) { + case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */ + /* case ASI_LEON_CACHEREGS: Leon3 cache control */ + switch (addr) { + case 0x00: /* Leon3 Cache Control */ + case 0x08: /* Leon3 Instruction Cache config */ + case 0x0C: /* Leon3 Date Cache config */ + if (env->def.features & CPU_FEATURE_CACHE_CTRL) { + ret = leon3_cache_control_ld(env, addr, size); + } + break; + case 0x01c00a00: /* MXCC control register */ + if (size == 8) { + ret = env->mxccregs[3]; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00a04: /* MXCC control register */ + if (size == 4) { + ret = env->mxccregs[3]; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00c00: /* Module reset register */ + if (size == 8) { + ret = env->mxccregs[5]; + /* should we do something here? */ + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00f00: /* MBus port address register */ + if (size == 8) { + ret = env->mxccregs[7]; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + default: + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented address, size: %d\n", addr, + size); + break; + } + DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " + "addr = %08x -> ret = %" PRIx64 "," + "addr = %08x\n", asi, size, sign, last_addr, ret, addr); +#ifdef DEBUG_MXCC + dump_mxcc(env); +#endif + break; + case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */ + case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */ + { + int mmulev; + + mmulev = (addr >> 8) & 15; + if (mmulev > 4) { + ret = 0; + } else { + ret = mmu_probe(env, addr, mmulev); + } + DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n", + addr, mmulev, ret); + } + break; + case ASI_M_MMUREGS: /* SuperSparc MMU regs */ + case ASI_LEON_MMUREGS: /* LEON3 MMU regs */ + { + int reg = (addr >> 8) & 0x1f; + + ret = env->mmuregs[reg]; + if (reg == 3) { /* Fault status cleared on read */ + env->mmuregs[3] = 0; + } else if (reg == 0x13) { /* Fault status read */ + ret = env->mmuregs[3]; + } else if (reg == 0x14) { /* Fault address read */ + ret = env->mmuregs[4]; + } + DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret); + } + break; + case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */ + case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */ + case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ + break; + case ASI_KERNELTXT: /* Supervisor code access */ + switch (size) { + case 1: + ret = cpu_ldub_code(env, addr); + break; + case 2: + ret = cpu_lduw_code(env, addr); + break; + default: + case 4: + ret = cpu_ldl_code(env, addr); + break; + case 8: + ret = cpu_ldq_code(env, addr); + break; + } + break; + case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */ + case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */ + case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */ + case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */ + break; + case 0x21: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x22: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x23: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x24: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x25: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x26: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x27: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x28: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x29: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2a: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2b: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2c: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2d: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2e: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + { + MemTxResult result; + hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32); + + switch (size) { + case 1: + ret = glue(address_space_ldub, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, + MEMTXATTRS_UNSPECIFIED, &result); + break; + case 2: + ret = glue(address_space_lduw, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, + MEMTXATTRS_UNSPECIFIED, &result); + break; + default: + case 4: + ret = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, + MEMTXATTRS_UNSPECIFIED, &result); + break; + case 8: + ret = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, + MEMTXATTRS_UNSPECIFIED, &result); + break; + } + + if (result != MEMTX_OK) { + sparc_raise_mmu_fault(cs, access_addr, false, false, false, + size, GETPC()); + } + break; + } + case 0x30: /* Turbosparc secondary cache diagnostic */ + case 0x31: /* Turbosparc RAM snoop */ + case 0x32: /* Turbosparc page table descriptor diagnostic */ + case 0x39: /* data cache diagnostic register */ + ret = 0; + break; + case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */ + { + int reg = (addr >> 8) & 3; + + switch (reg) { + case 0: /* Breakpoint Value (Addr) */ + ret = env->mmubpregs[reg]; + break; + case 1: /* Breakpoint Mask */ + ret = env->mmubpregs[reg]; + break; + case 2: /* Breakpoint Control */ + ret = env->mmubpregs[reg]; + break; + case 3: /* Breakpoint Status */ + ret = env->mmubpregs[reg]; + env->mmubpregs[reg] = 0ULL; + break; + } + DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg, + ret); + } + break; + case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ + ret = env->mmubpctrv; + break; + case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ + ret = env->mmubpctrc; + break; + case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ + ret = env->mmubpctrs; + break; + case 0x4c: /* SuperSPARC MMU Breakpoint Action */ + ret = env->mmubpaction; + break; + case ASI_USERTXT: /* User code access, XXX */ + default: + sparc_raise_mmu_fault(cs, addr, false, false, asi, size, GETPC()); + ret = 0; + break; + + case ASI_USERDATA: /* User data access */ + case ASI_KERNELDATA: /* Supervisor data access */ + case ASI_P: /* Implicit primary context data access (v9 only?) */ + case ASI_M_BYPASS: /* MMU passthrough */ + case ASI_LEON_BYPASS: /* LEON MMU passthrough */ + /* These are always handled inline. */ + g_assert_not_reached(); + } + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, + int asi, uint32_t memop) +{ + int size = 1 << (memop & MO_SIZE); + CPUState *cs = env_cpu(env); + + do_check_align(env, addr, size - 1, GETPC()); + switch (asi) { + case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */ + /* case ASI_LEON_CACHEREGS: Leon3 cache control */ + switch (addr) { + case 0x00: /* Leon3 Cache Control */ + case 0x08: /* Leon3 Instruction Cache config */ + case 0x0C: /* Leon3 Date Cache config */ + if (env->def.features & CPU_FEATURE_CACHE_CTRL) { + leon3_cache_control_st(env, addr, val, size); + } + break; + + case 0x01c00000: /* MXCC stream data register 0 */ + if (size == 8) { + env->mxccdata[0] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00008: /* MXCC stream data register 1 */ + if (size == 8) { + env->mxccdata[1] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00010: /* MXCC stream data register 2 */ + if (size == 8) { + env->mxccdata[2] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00018: /* MXCC stream data register 3 */ + if (size == 8) { + env->mxccdata[3] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00100: /* MXCC stream source */ + { + int i; + + if (size == 8) { + env->mxccregs[0] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + + for (i = 0; i < 4; i++) { + MemTxResult result; + hwaddr access_addr = (env->mxccregs[0] & 0xffffffffULL) + 8 * i; + + env->mxccdata[i] = glue(address_space_ldq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, + access_addr, + MEMTXATTRS_UNSPECIFIED, + &result); + if (result != MEMTX_OK) { + /* TODO: investigate whether this is the right behaviour */ + sparc_raise_mmu_fault(cs, access_addr, false, false, + false, size, GETPC()); + } + } + break; + } + case 0x01c00200: /* MXCC stream destination */ + { + int i; + + if (size == 8) { + env->mxccregs[1] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + + for (i = 0; i < 4; i++) { + MemTxResult result; + hwaddr access_addr = (env->mxccregs[1] & 0xffffffffULL) + 8 * i; + + glue(address_space_stq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, env->mxccdata[i], + MEMTXATTRS_UNSPECIFIED, &result); + + if (result != MEMTX_OK) { + /* TODO: investigate whether this is the right behaviour */ + sparc_raise_mmu_fault(cs, access_addr, true, false, + false, size, GETPC()); + } + } + break; + } + case 0x01c00a00: /* MXCC control register */ + if (size == 8) { + env->mxccregs[3] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00a04: /* MXCC control register */ + if (size == 4) { + env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) + | val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00e00: /* MXCC error register */ + /* writing a 1 bit clears the error */ + if (size == 8) { + env->mxccregs[6] &= ~val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00f00: /* MBus port address register */ + if (size == 8) { + env->mxccregs[7] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + default: + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented address, size: %d\n", addr, + size); + break; + } + DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", + asi, size, addr, val); +#ifdef DEBUG_MXCC + dump_mxcc(env); +#endif + break; + case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */ + case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */ + { + int mmulev; + + mmulev = (addr >> 8) & 15; + DPRINTF_MMU("mmu flush level %d\n", mmulev); + switch (mmulev) { + case 0: /* flush page */ + tlb_flush_page(cs, addr & 0xfffff000); + break; + case 1: /* flush segment (256k) */ + case 2: /* flush region (16M) */ + case 3: /* flush context (4G) */ + case 4: /* flush entire */ + tlb_flush(cs); + break; + default: + break; + } +#ifdef DEBUG_MMU + dump_mmu(env); +#endif + } + break; + case ASI_M_MMUREGS: /* write MMU regs */ + case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */ + { + int reg = (addr >> 8) & 0x1f; + uint32_t oldreg; + + oldreg = env->mmuregs[reg]; + switch (reg) { + case 0: /* Control Register */ + env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) | + (val & 0x00ffffff); + /* Mappings generated during no-fault mode + are invalid in normal mode. */ + if ((oldreg ^ env->mmuregs[reg]) + & (MMU_NF | env->def.mmu_bm)) { + tlb_flush(cs); + } + break; + case 1: /* Context Table Pointer Register */ + env->mmuregs[reg] = val & env->def.mmu_ctpr_mask; + break; + case 2: /* Context Register */ + env->mmuregs[reg] = val & env->def.mmu_cxr_mask; + if (oldreg != env->mmuregs[reg]) { + /* we flush when the MMU context changes because + QEMU has no MMU context support */ + tlb_flush(cs); + } + break; + case 3: /* Synchronous Fault Status Register with Clear */ + case 4: /* Synchronous Fault Address Register */ + break; + case 0x10: /* TLB Replacement Control Register */ + env->mmuregs[reg] = val & env->def.mmu_trcr_mask; + break; + case 0x13: /* Synchronous Fault Status Register with Read + and Clear */ + env->mmuregs[3] = val & env->def.mmu_sfsr_mask; + break; + case 0x14: /* Synchronous Fault Address Register */ + env->mmuregs[4] = val; + break; + default: + env->mmuregs[reg] = val; + break; + } + if (oldreg != env->mmuregs[reg]) { + DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n", + reg, oldreg, env->mmuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(env); +#endif + } + break; + case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */ + case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */ + case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */ + break; + case ASI_M_TXTC_TAG: /* I-cache tag */ + case ASI_M_TXTC_DATA: /* I-cache data */ + case ASI_M_DATAC_TAG: /* D-cache tag */ + case ASI_M_DATAC_DATA: /* D-cache data */ + case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */ + case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */ + case ASI_M_FLUSH_REGION: /* I/D-cache flush region */ + case ASI_M_FLUSH_CTX: /* I/D-cache flush context */ + case ASI_M_FLUSH_USER: /* I/D-cache flush user */ + break; + case 0x21: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x22: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x23: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x24: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x25: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x26: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x27: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x28: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x29: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2a: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2b: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2c: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2d: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2e: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */ + { + MemTxResult result; + hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32); + + switch (size) { + case 1: + glue(address_space_stb, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, + MEMTXATTRS_UNSPECIFIED, &result); + break; + case 2: + glue(address_space_stw, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, + MEMTXATTRS_UNSPECIFIED, &result); + break; + case 4: + default: + glue(address_space_stl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, + MEMTXATTRS_UNSPECIFIED, &result); + break; + case 8: + glue(address_space_stq, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, access_addr, val, + MEMTXATTRS_UNSPECIFIED, &result); + break; + } + if (result != MEMTX_OK) { + sparc_raise_mmu_fault(cs, access_addr, true, false, false, + size, GETPC()); + } + } + break; + case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */ + case 0x31: /* store buffer data, Ross RT620 I-cache flush or + Turbosparc snoop RAM */ + case 0x32: /* store buffer control or Turbosparc page table + descriptor diagnostic */ + case 0x36: /* I-cache flash clear */ + case 0x37: /* D-cache flash clear */ + break; + case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/ + { + int reg = (addr >> 8) & 3; + + switch (reg) { + case 0: /* Breakpoint Value (Addr) */ + env->mmubpregs[reg] = (val & 0xfffffffffULL); + break; + case 1: /* Breakpoint Mask */ + env->mmubpregs[reg] = (val & 0xfffffffffULL); + break; + case 2: /* Breakpoint Control */ + env->mmubpregs[reg] = (val & 0x7fULL); + break; + case 3: /* Breakpoint Status */ + env->mmubpregs[reg] = (val & 0xfULL); + break; + } + DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg, + env->mmuregs[reg]); + } + break; + case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ + env->mmubpctrv = val & 0xffffffff; + break; + case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ + env->mmubpctrc = val & 0x3; + break; + case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ + env->mmubpctrs = val & 0x3; + break; + case 0x4c: /* SuperSPARC MMU Breakpoint Action */ + env->mmubpaction = val & 0x1fff; + break; + case ASI_USERTXT: /* User code access, XXX */ + case ASI_KERNELTXT: /* Supervisor code access, XXX */ + default: + sparc_raise_mmu_fault(cs, addr, true, false, asi, size, GETPC()); + break; + + case ASI_USERDATA: /* User data access */ + case ASI_KERNELDATA: /* Supervisor data access */ + case ASI_P: + case ASI_M_BYPASS: /* MMU passthrough */ + case ASI_LEON_BYPASS: /* LEON MMU passthrough */ + case ASI_M_BCOPY: /* Block copy, sta access */ + case ASI_M_BFILL: /* Block fill, stda access */ + /* These are always handled inline. */ + g_assert_not_reached(); + } +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif +} + +#else /* TARGET_SPARC64 */ + +uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, + int asi, uint32_t memop) +{ + int size = 1 << (memop & MO_SIZE); + int sign = memop & MO_SIGN; + CPUState *cs = env_cpu(env); + uint64_t ret = 0; +#if defined(DEBUG_ASI) + target_ulong last_addr = addr; +#endif + + asi &= 0xff; + + do_check_asi(env, asi, GETPC()); + do_check_align(env, addr, size - 1, GETPC()); + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case ASI_PNF: + case ASI_PNFL: + case ASI_SNF: + case ASI_SNFL: + { + TCGMemOpIdx oi; + int idx = (env->pstate & PS_PRIV + ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX) + : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX)); + +#ifdef _MSC_VER + if (cpu_get_phys_page_nofault(env, addr, idx) == 0xffffffffffffffffULL) { +#else + if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) { +#endif +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + /* exception_index is set in get_physical_address_data. */ + cpu_raise_exception_ra(env, cs->exception_index, GETPC()); + } + oi = make_memop_idx(memop, idx); + switch (size) { + case 1: + ret = helper_ret_ldub_mmu(env, addr, oi, GETPC()); + break; + case 2: + if (asi & 8) { + ret = helper_le_lduw_mmu(env, addr, oi, GETPC()); + } else { + ret = helper_be_lduw_mmu(env, addr, oi, GETPC()); + } + break; + case 4: + if (asi & 8) { + ret = helper_le_ldul_mmu(env, addr, oi, GETPC()); + } else { + ret = helper_be_ldul_mmu(env, addr, oi, GETPC()); + } + break; + case 8: + if (asi & 8) { + ret = helper_le_ldq_mmu(env, addr, oi, GETPC()); + } else { + ret = helper_be_ldq_mmu(env, addr, oi, GETPC()); + } + break; + default: + g_assert_not_reached(); + } + } + break; + + case ASI_AIUP: /* As if user primary */ + case ASI_AIUS: /* As if user secondary */ + case ASI_AIUPL: /* As if user primary LE */ + case ASI_AIUSL: /* As if user secondary LE */ + case ASI_P: /* Primary */ + case ASI_S: /* Secondary */ + case ASI_PL: /* Primary LE */ + case ASI_SL: /* Secondary LE */ + case ASI_REAL: /* Bypass */ + case ASI_REAL_IO: /* Bypass, non-cacheable */ + case ASI_REAL_L: /* Bypass LE */ + case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ + case ASI_N: /* Nucleus */ + case ASI_NL: /* Nucleus Little Endian (LE) */ + case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */ + case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */ + case ASI_TWINX_AIUP: /* As if user primary, twinx */ + case ASI_TWINX_AIUS: /* As if user secondary, twinx */ + case ASI_TWINX_REAL: /* Real address, twinx */ + case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */ + case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */ + case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ + case ASI_TWINX_N: /* Nucleus, twinx */ + case ASI_TWINX_NL: /* Nucleus, twinx, LE */ + /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */ + case ASI_TWINX_P: /* Primary, twinx */ + case ASI_TWINX_PL: /* Primary, twinx, LE */ + case ASI_TWINX_S: /* Secondary, twinx */ + case ASI_TWINX_SL: /* Secondary, twinx, LE */ + /* These are always handled inline. */ + g_assert_not_reached(); + + case ASI_UPA_CONFIG: /* UPA config */ + /* XXX */ + break; + case ASI_LSU_CONTROL: /* LSU */ + ret = env->lsu; + break; + case ASI_IMMU: /* I-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + switch (reg) { + case 0: + /* 0x00 I-TSB Tag Target register */ + ret = ultrasparc_tag_target(env->immu.tag_access); + break; + case 3: /* SFSR */ + ret = env->immu.sfsr; + break; + case 5: /* TSB access */ + ret = env->immu.tsb; + break; + case 6: + /* 0x30 I-TSB Tag Access register */ + ret = env->immu.tag_access; + break; + default: + sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); + ret = 0; + } + break; + } + case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */ + { + /* env->immuregs[5] holds I-MMU TSB register value + env->immuregs[6] holds I-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env, &env->immu, 0); + break; + } + case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */ + { + /* env->immuregs[5] holds I-MMU TSB register value + env->immuregs[6] holds I-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env, &env->immu, 1); + break; + } + case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->itlb[reg].tte; + break; + } + case ASI_ITLB_TAG_READ: /* I-MMU tag read */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->itlb[reg].tag; + break; + } + case ASI_DMMU: /* D-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + switch (reg) { + case 0: + /* 0x00 D-TSB Tag Target register */ + ret = ultrasparc_tag_target(env->dmmu.tag_access); + break; + case 1: /* 0x08 Primary Context */ + ret = env->dmmu.mmu_primary_context; + break; + case 2: /* 0x10 Secondary Context */ + ret = env->dmmu.mmu_secondary_context; + break; + case 3: /* SFSR */ + ret = env->dmmu.sfsr; + break; + case 4: /* 0x20 SFAR */ + ret = env->dmmu.sfar; + break; + case 5: /* 0x28 TSB access */ + ret = env->dmmu.tsb; + break; + case 6: /* 0x30 D-TSB Tag Access register */ + ret = env->dmmu.tag_access; + break; + case 7: + ret = env->dmmu.virtual_watchpoint; + break; + case 8: + ret = env->dmmu.physical_watchpoint; + break; + default: + sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); + ret = 0; + } + break; + } + case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */ + { + /* env->dmmuregs[5] holds D-MMU TSB register value + env->dmmuregs[6] holds D-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0); + break; + } + case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */ + { + /* env->dmmuregs[5] holds D-MMU TSB register value + env->dmmuregs[6] holds D-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1); + break; + } + case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->dtlb[reg].tte; + break; + } + case ASI_DTLB_TAG_READ: /* D-MMU tag read */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->dtlb[reg].tag; + break; + } + case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */ + break; + case ASI_INTR_RECEIVE: /* Interrupt data receive */ + ret = env->ivec_status; + break; + case ASI_INTR_R: /* Incoming interrupt vector, RO */ + { + int reg = (addr >> 4) & 0x3; + if (reg < 3) { + ret = env->ivec_data[reg]; + } + break; + } + case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */ + if (unlikely((addr >= 0x20) && (addr < 0x30))) { + /* Hyperprivileged access only */ + sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); + } + /* fall through */ + case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */ + { + unsigned int i = (addr >> 3) & 0x7; + ret = env->scratch[i]; + break; + } + case ASI_MMU: /* UA2005 Context ID registers */ + switch ((addr >> 3) & 0x3) { + case 1: + ret = env->dmmu.mmu_primary_context; + break; + case 2: + ret = env->dmmu.mmu_secondary_context; + break; + default: + sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); + } + break; + case ASI_DCACHE_DATA: /* D-cache data */ + case ASI_DCACHE_TAG: /* D-cache tag access */ + case ASI_ESTATE_ERROR_EN: /* E-cache error enable */ + case ASI_AFSR: /* E-cache asynchronous fault status */ + case ASI_AFAR: /* E-cache asynchronous fault address */ + case ASI_EC_TAG_DATA: /* E-cache tag data */ + case ASI_IC_INSTR: /* I-cache instruction access */ + case ASI_IC_TAG: /* I-cache tag access */ + case ASI_IC_PRE_DECODE: /* I-cache predecode */ + case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */ + case ASI_EC_W: /* E-cache tag */ + case ASI_EC_R: /* E-cache tag */ + break; + case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */ + case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */ + case ASI_IMMU_DEMAP: /* I-MMU demap, WO */ + case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */ + case ASI_DMMU_DEMAP: /* D-MMU demap, WO */ + case ASI_INTR_W: /* Interrupt vector, WO */ + default: + sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC()); + ret = 0; + break; + } + + /* Convert to signed number */ + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, + int asi, uint32_t memop) +{ + int size = 1 << (memop & MO_SIZE); + CPUState *cs = env_cpu(env); + +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif + + asi &= 0xff; + + do_check_asi(env, asi, GETPC()); + do_check_align(env, addr, size - 1, GETPC()); + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case ASI_AIUP: /* As if user primary */ + case ASI_AIUS: /* As if user secondary */ + case ASI_AIUPL: /* As if user primary LE */ + case ASI_AIUSL: /* As if user secondary LE */ + case ASI_P: /* Primary */ + case ASI_S: /* Secondary */ + case ASI_PL: /* Primary LE */ + case ASI_SL: /* Secondary LE */ + case ASI_REAL: /* Bypass */ + case ASI_REAL_IO: /* Bypass, non-cacheable */ + case ASI_REAL_L: /* Bypass LE */ + case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ + case ASI_N: /* Nucleus */ + case ASI_NL: /* Nucleus Little Endian (LE) */ + case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */ + case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */ + case ASI_TWINX_AIUP: /* As if user primary, twinx */ + case ASI_TWINX_AIUS: /* As if user secondary, twinx */ + case ASI_TWINX_REAL: /* Real address, twinx */ + case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */ + case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */ + case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ + case ASI_TWINX_N: /* Nucleus, twinx */ + case ASI_TWINX_NL: /* Nucleus, twinx, LE */ + /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */ + case ASI_TWINX_P: /* Primary, twinx */ + case ASI_TWINX_PL: /* Primary, twinx, LE */ + case ASI_TWINX_S: /* Secondary, twinx */ + case ASI_TWINX_SL: /* Secondary, twinx, LE */ + /* These are always handled inline. */ + g_assert_not_reached(); + /* these ASIs have different functions on UltraSPARC-IIIi + * and UA2005 CPUs. Use the explicit numbers to avoid confusion + */ + case 0x31: + case 0x32: + case 0x39: + case 0x3a: + if (cpu_has_hypervisor(env)) { + /* UA2005 + * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0 + * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1 + * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0 + * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1 + */ + int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2); + env->dmmu.sun4v_tsb_pointers[idx] = val; + } else { + helper_raise_exception(env, TT_ILL_INSN); + } + break; + case 0x33: + case 0x3b: + if (cpu_has_hypervisor(env)) { + /* UA2005 + * ASI_DMMU_CTX_ZERO_CONFIG + * ASI_DMMU_CTX_NONZERO_CONFIG + */ + env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val; + } else { + helper_raise_exception(env, TT_ILL_INSN); + } + break; + case 0x35: + case 0x36: + case 0x3d: + case 0x3e: + if (cpu_has_hypervisor(env)) { + /* UA2005 + * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0 + * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1 + * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0 + * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1 + */ + int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2); + env->immu.sun4v_tsb_pointers[idx] = val; + } else { + helper_raise_exception(env, TT_ILL_INSN); + } + break; + case 0x37: + case 0x3f: + if (cpu_has_hypervisor(env)) { + /* UA2005 + * ASI_IMMU_CTX_ZERO_CONFIG + * ASI_IMMU_CTX_NONZERO_CONFIG + */ + env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val; + } else { + helper_raise_exception(env, TT_ILL_INSN); + } + break; + case ASI_UPA_CONFIG: /* UPA config */ + /* XXX */ + return; + case ASI_LSU_CONTROL: /* LSU */ + env->lsu = val & (DMMU_E | IMMU_E); + return; + case ASI_IMMU: /* I-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + uint64_t oldreg; + + oldreg = env->immu.mmuregs[reg]; + switch (reg) { + case 0: /* RO */ + return; + case 1: /* Not in I-MMU */ + case 2: + return; + case 3: /* SFSR */ + if ((val & 1) == 0) { + val = 0; /* Clear SFSR */ + } + env->immu.sfsr = val; + break; + case 4: /* RO */ + return; + case 5: /* TSB access */ + DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", env->immu.tsb, val); + env->immu.tsb = val; + break; + case 6: /* Tag access */ + env->immu.tag_access = val; + break; + case 7: + case 8: + return; + default: + sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); + break; + } + + if (oldreg != env->immu.mmuregs[reg]) { + DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", reg, oldreg, env->immuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(env); +#endif + return; + } + case ASI_ITLB_DATA_IN: /* I-MMU data in */ + /* ignore real translation entries */ + if (!(addr & TLB_UST1_IS_REAL_BIT)) { + replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, + val, "immu", env, addr); + } + return; + case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */ + { + /* TODO: auto demap */ + + unsigned int i = (addr >> 3) & 0x3f; + + /* ignore real translation entries */ + if (!(addr & TLB_UST1_IS_REAL_BIT)) { + replace_tlb_entry(&env->itlb[i], env->immu.tag_access, + sun4v_tte_to_sun4u(env, addr, val), env); + } +#ifdef DEBUG_MMU + DPRINTF_MMU("immu data access replaced entry [%i]\n", i); + dump_mmu(env); +#endif + return; + } + case ASI_IMMU_DEMAP: /* I-MMU demap */ + demap_tlb(env->itlb, addr, "immu", env); + return; + case ASI_DMMU: /* D-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + uint64_t oldreg; + + oldreg = env->dmmu.mmuregs[reg]; + switch (reg) { + case 0: /* RO */ + case 4: + return; + case 3: /* SFSR */ + if ((val & 1) == 0) { + val = 0; /* Clear SFSR, Fault address */ + env->dmmu.sfar = 0; + } + env->dmmu.sfsr = val; + break; + case 1: /* Primary context */ + env->dmmu.mmu_primary_context = val; + /* can be optimized to only flush MMU_USER_IDX + and MMU_KERNEL_IDX entries */ + tlb_flush(cs); + break; + case 2: /* Secondary context */ + env->dmmu.mmu_secondary_context = val; + /* can be optimized to only flush MMU_USER_SECONDARY_IDX + and MMU_KERNEL_SECONDARY_IDX entries */ + tlb_flush(cs); + break; + case 5: /* TSB access */ + DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", env->dmmu.tsb, val); + env->dmmu.tsb = val; + break; + case 6: /* Tag access */ + env->dmmu.tag_access = val; + break; + case 7: /* Virtual Watchpoint */ + env->dmmu.virtual_watchpoint = val; + break; + case 8: /* Physical Watchpoint */ + env->dmmu.physical_watchpoint = val; + break; + default: + sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); + break; + } + + if (oldreg != env->dmmu.mmuregs[reg]) { + DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(env); +#endif + return; + } + case ASI_DTLB_DATA_IN: /* D-MMU data in */ + /* ignore real translation entries */ + if (!(addr & TLB_UST1_IS_REAL_BIT)) { + replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, + val, "dmmu", env, addr); + } + return; + case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */ + { + unsigned int i = (addr >> 3) & 0x3f; + + /* ignore real translation entries */ + if (!(addr & TLB_UST1_IS_REAL_BIT)) { + replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, + sun4v_tte_to_sun4u(env, addr, val), env); + } +#ifdef DEBUG_MMU + DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i); + dump_mmu(env); +#endif + return; + } + case ASI_DMMU_DEMAP: /* D-MMU demap */ + demap_tlb(env->dtlb, addr, "dmmu", env); + return; + case ASI_INTR_RECEIVE: /* Interrupt data receive */ + env->ivec_status = val & 0x20; + return; + case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */ + if (unlikely((addr >= 0x20) && (addr < 0x30))) { + /* Hyperprivileged access only */ + sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); + } + /* fall through */ + case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */ + { + unsigned int i = (addr >> 3) & 0x7; + env->scratch[i] = val; + return; + } + case ASI_MMU: /* UA2005 Context ID registers */ + { + switch ((addr >> 3) & 0x3) { + case 1: + env->dmmu.mmu_primary_context = val; + env->immu.mmu_primary_context = val; + tlb_flush_by_mmuidx(cs, + (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX)); + break; + case 2: + env->dmmu.mmu_secondary_context = val; + env->immu.mmu_secondary_context = val; + tlb_flush_by_mmuidx(cs, + (1 << MMU_USER_SECONDARY_IDX) | + (1 << MMU_KERNEL_SECONDARY_IDX)); + break; + default: + sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); + } + } + return; + case ASI_QUEUE: /* UA2005 CPU mondo queue */ + case ASI_DCACHE_DATA: /* D-cache data */ + case ASI_DCACHE_TAG: /* D-cache tag access */ + case ASI_ESTATE_ERROR_EN: /* E-cache error enable */ + case ASI_AFSR: /* E-cache asynchronous fault status */ + case ASI_AFAR: /* E-cache asynchronous fault address */ + case ASI_EC_TAG_DATA: /* E-cache tag data */ + case ASI_IC_INSTR: /* I-cache instruction access */ + case ASI_IC_TAG: /* I-cache tag access */ + case ASI_IC_PRE_DECODE: /* I-cache predecode */ + case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */ + case ASI_EC_W: /* E-cache tag */ + case ASI_EC_R: /* E-cache tag */ + return; + case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */ + case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */ + case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */ + case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */ + case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */ + case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */ + case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */ + case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */ + case ASI_INTR_R: /* Incoming interrupt vector, RO */ + case ASI_PNF: /* Primary no-fault, RO */ + case ASI_SNF: /* Secondary no-fault, RO */ + case ASI_PNFL: /* Primary no-fault LE, RO */ + case ASI_SNFL: /* Secondary no-fault LE, RO */ + default: + sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC()); + return; + } +} +#endif /* TARGET_SPARC64 */ + +void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + bool is_write = access_type == MMU_DATA_STORE; + bool is_exec = access_type == MMU_INST_FETCH; + bool is_asi = false; + + sparc_raise_mmu_fault(cs, physaddr, is_write, is_exec, + is_asi, size, retaddr); +} + +void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, + uintptr_t retaddr) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + +#ifdef DEBUG_UNALIGNED + printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx + "\n", addr, env->pc); +#endif + cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); +} diff --git a/qemu/target-sparc/mmu_helper.c b/qemu/target/sparc/mmu_helper.c similarity index 66% rename from qemu/target-sparc/mmu_helper.c rename to qemu/target/sparc/mmu_helper.c index 9c3b2ce0..2d719fe9 100644 --- a/qemu/target-sparc/mmu_helper.c +++ b/qemu/target/sparc/mmu_helper.c @@ -17,26 +17,12 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" -#include "exec/address-spaces.h" +#include "exec/exec-all.h" /* Sparc MMU emulation */ -#if defined(CONFIG_USER_ONLY) - -int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, - int mmu_idx) -{ - if (rw & 2) { - cs->exception_index = TT_TFAULT; - } else { - cs->exception_index = TT_DFAULT; - } - return 1; -} - -#else - #ifndef TARGET_SPARC64 /* * Sparc V8 Reference MMU (SRMMU) @@ -76,7 +62,7 @@ static const int perm_table[2][8] = { }; static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, + int *prot, int *access_index, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx, target_ulong *page_size) { @@ -85,14 +71,15 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, uint32_t pde; int error_code = 0, is_dirty, is_user; unsigned long page_offset; - CPUState *cs = CPU(sparc_env_get_cpu(env)); + CPUState *cs = env_cpu(env); + MemTxResult result; is_user = mmu_idx == MMU_USER_IDX; - if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ + if (mmu_idx == MMU_PHYS_IDX) { *page_size = TARGET_PAGE_SIZE; /* Boot mode: instruction fetches are taken from PROM */ - if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) { + if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { *physical = env->prom_addr | (address & 0x7ffffULL); *prot = PAGE_READ | PAGE_EXEC; return 0; @@ -108,7 +95,10 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ /* Context base + context number */ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return 4 << 2; /* Translation fault, L = 0 */ + } /* Ctx pde */ switch (pde & PTE_ENTRYTYPE_MASK) { @@ -120,7 +110,11 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, return 4 << 2; case 1: /* L0 PDE */ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */ + } switch (pde & PTE_ENTRYTYPE_MASK) { default: @@ -130,7 +124,11 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, return (1 << 8) | (4 << 2); case 1: /* L1 PDE */ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */ + } switch (pde & PTE_ENTRYTYPE_MASK) { default: @@ -140,7 +138,11 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, return (2 << 8) | (4 << 2); case 1: /* L2 PDE */ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */ + } switch (pde & PTE_ENTRYTYPE_MASK) { default: @@ -197,27 +199,38 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, } /* Perform address translation */ -int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, - int mmu_idx) +bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) { - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; hwaddr paddr; target_ulong vaddr; target_ulong page_size; int error_code = 0, prot, access_index; + MemTxAttrs attrs = { 0 }; + + /* + * TODO: If we ever need tlb_vaddr_to_host for this target, + * then we must figure out how to manipulate FSR and FAR + * when both MMU_NF and probe are set. In the meantime, + * do not support this use case. + */ + assert(!probe); address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, - address, rw, mmu_idx, &page_size); + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, + address, access_type, + mmu_idx, &page_size); vaddr = address; - if (error_code == 0) { -#ifdef DEBUG_MMU - printf("Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr " - TARGET_FMT_lx "\n", address, paddr, vaddr); -#endif + if (likely(error_code == 0)) { + qemu_log_mask(CPU_LOG_MMU, + "Translate at %" VADDR_PRIx " -> " + TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n", + address, paddr, vaddr); tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); - return 0; + return true; } if (env->mmuregs[3]) { /* Fault status register */ @@ -233,27 +246,36 @@ int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, switching to normal mode. */ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); - return 0; + return true; } else { - if (rw & 2) { + if (access_type == MMU_INST_FETCH) { cs->exception_index = TT_TFAULT; } else { cs->exception_index = TT_DFAULT; } - return 1; + cpu_loop_exit_restore(cs, retaddr); } } target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) { - CPUState *cs = CPU(sparc_env_get_cpu(env)); + CPUState *cs = env_cpu(env); hwaddr pde_ptr; uint32_t pde; + MemTxResult result; + + /* + * TODO: MMU probe operations are supposed to set the fault + * status registers, but we don't do this. + */ /* Context base + context number */ pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return 0; + } switch (pde & PTE_ENTRYTYPE_MASK) { default: @@ -266,7 +288,11 @@ target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) return pde; } pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return 0; + } switch (pde & PTE_ENTRYTYPE_MASK) { default: @@ -280,7 +306,11 @@ target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) return pde; } pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return 0; + } switch (pde & PTE_ENTRYTYPE_MASK) { default: @@ -294,7 +324,11 @@ target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) return pde; } pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); - pde = ldl_phys(cs->as, pde_ptr); + pde = glue(address_space_ldl, UNICORN_ARCH_POSTFIX)(cs->as->uc, cs->as, pde_ptr, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + return 0; + } switch (pde & PTE_ENTRYTYPE_MASK) { default: @@ -311,47 +345,6 @@ target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) return 0; } -void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) -{ - CPUState *cs = CPU(sparc_env_get_cpu(env)); - target_ulong va, va1, va2; - unsigned int n, m, o; - hwaddr pde_ptr, pa; - uint32_t pde; - - pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); - pde = ldl_phys(cs->as, pde_ptr); - (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n", - (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); - for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { - pde = mmu_probe(env, va, 2); - if (pde) { - pa = cpu_get_phys_page_debug(cs, va); - (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx - " PDE: " TARGET_FMT_lx "\n", va, pa, pde); - for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { - pde = mmu_probe(env, va1, 1); - if (pde) { - pa = cpu_get_phys_page_debug(cs, va1); - (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " - TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", - va1, pa, pde); - for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { - pde = mmu_probe(env, va2, 0); - if (pde) { - pa = cpu_get_phys_page_debug(cs, va2); - (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " - TARGET_FMT_plx " PTE: " - TARGET_FMT_lx "\n", - va2, pa, pde); - } - } - } - } - } - } -} - /* Gdb expects all registers windows to be flushed in ram. This function handles * reads (and only reads) in stack frames as if windows were flushed. We assume * that the sparc ABI is followed. @@ -359,7 +352,7 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, uint8_t *buf, int len, bool is_write) { - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; target_ulong addr = address; int i; @@ -454,23 +447,11 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, uint64_t address, uint64_t context, hwaddr *physical) { - uint64_t mask; - - switch (TTE_PGSIZE(tlb->tte)) { - default: - case 0x0: /* 8k */ - mask = 0xffffffffffffe000ULL; - break; - case 0x1: /* 64k */ - mask = 0xffffffffffff0000ULL; - break; - case 0x2: /* 512k */ - mask = 0xfffffffffff80000ULL; - break; - case 0x3: /* 4M */ - mask = 0xffffffffffc00000ULL; - break; - } +#ifdef _MSC_VER + uint64_t mask = 0 - (8192ULL << 3 * TTE_PGSIZE(tlb->tte)); +#else + uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte)); +#endif /* valid, context match, virtual address match? */ if (TTE_IS_VALID(tlb->tte) && @@ -484,31 +465,29 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, return 0; } -static int get_physical_address_data(CPUSPARCState *env, - hwaddr *physical, int *prot, +static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, + int *prot, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx) { - CPUState *cs = CPU(sparc_env_get_cpu(env)); + CPUState *cs = env_cpu(env); unsigned int i; uint64_t context; uint64_t sfsr = 0; - - int is_user = (mmu_idx == MMU_USER_IDX || - mmu_idx == MMU_USER_SECONDARY_IDX); - - if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */ - *physical = ultrasparc_truncate_physical(address); - *prot = PAGE_READ | PAGE_WRITE; - return 0; - } + bool is_user = false; switch (mmu_idx) { + case MMU_PHYS_IDX: + g_assert_not_reached(); case MMU_USER_IDX: + is_user = true; + /* fallthru */ case MMU_KERNEL_IDX: context = env->dmmu.mmu_primary_context & 0x1fff; sfsr |= SFSR_CT_PRIMARY; break; case MMU_USER_SECONDARY_IDX: + is_user = true; + /* fallthru */ case MMU_KERNEL_SECONDARY_IDX: context = env->dmmu.mmu_secondary_context & 0x1fff; sfsr |= SFSR_CT_SECONDARY; @@ -532,12 +511,15 @@ static int get_physical_address_data(CPUSPARCState *env, if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { int do_fault = 0; + if (TTE_IS_IE(env->dtlb[i].tte)) { + attrs->byte_swap = true; + } + /* access ok? */ /* multiple bits in SFSR.FT may be set on TT_DFAULT */ if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { do_fault = 1; sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ - //trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); } if (rw == 4) { if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { @@ -557,8 +539,6 @@ static int get_physical_address_data(CPUSPARCState *env, } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { do_fault = 1; cs->exception_index = TT_DPROT; - - //trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); } if (!do_fault) { @@ -592,8 +572,6 @@ static int get_physical_address_data(CPUSPARCState *env, } } - //trace_mmu_helper_dmiss(address, context); - /* * On MMU misses: * - UltraSPARC IIi: SFSR and SFAR unmodified @@ -604,22 +582,29 @@ static int get_physical_address_data(CPUSPARCState *env, return 1; } -static int get_physical_address_code(CPUSPARCState *env, - hwaddr *physical, int *prot, +static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, + int *prot, MemTxAttrs *attrs, target_ulong address, int mmu_idx) { - CPUState *cs = CPU(sparc_env_get_cpu(env)); + CPUState *cs = env_cpu(env); unsigned int i; uint64_t context; + bool is_user = false; - int is_user = (mmu_idx == MMU_USER_IDX || - mmu_idx == MMU_USER_SECONDARY_IDX); - - if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) { - /* IMMU disabled */ - *physical = ultrasparc_truncate_physical(address); - *prot = PAGE_EXEC; - return 0; + switch (mmu_idx) { + case MMU_PHYS_IDX: + case MMU_USER_SECONDARY_IDX: + case MMU_KERNEL_SECONDARY_IDX: + g_assert_not_reached(); + case MMU_USER_IDX: + is_user = true; + /* fallthru */ + case MMU_KERNEL_IDX: + context = env->dmmu.mmu_primary_context & 0x1fff; + break; + default: + context = 0; + break; } if (env->tl == 0) { @@ -656,8 +641,6 @@ static int get_physical_address_code(CPUSPARCState *env, env->immu.tag_access = (address & ~0x1fffULL) | context; - //trace_mmu_helper_tfault(address, context); - return 1; } *prot = PAGE_EXEC; @@ -666,8 +649,6 @@ static int get_physical_address_code(CPUSPARCState *env, } } - //trace_mmu_helper_tmiss(address, context); - /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ env->immu.tag_access = (address & ~0x1fffULL) | context; cs->exception_index = TT_TMISS; @@ -675,7 +656,7 @@ static int get_physical_address_code(CPUSPARCState *env, } static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, + int *prot, int *access_index, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx, target_ulong *page_size) { @@ -683,140 +664,66 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, everything when an entry is evicted. */ *page_size = TARGET_PAGE_SIZE; +#if 0 /* safety net to catch wrong softmmu index use from dynamic code */ if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { if (rw == 2) { - //trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, - // env->dmmu.mmu_primary_context, - // env->dmmu.mmu_secondary_context, - // address); + trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, + env->dmmu.mmu_primary_context, + env->dmmu.mmu_secondary_context, + address); } else { - //trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, - // env->dmmu.mmu_primary_context, - // env->dmmu.mmu_secondary_context, - // address); + trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, + env->dmmu.mmu_primary_context, + env->dmmu.mmu_secondary_context, + address); } } +#endif + + if (mmu_idx == MMU_PHYS_IDX) { + *physical = ultrasparc_truncate_physical(address); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return 0; + } if (rw == 2) { - return get_physical_address_code(env, physical, prot, address, + return get_physical_address_code(env, physical, prot, attrs, address, mmu_idx); } else { - return get_physical_address_data(env, physical, prot, address, rw, - mmu_idx); + return get_physical_address_data(env, physical, prot, attrs, address, + rw, mmu_idx); } } /* Perform address translation */ -int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, - int mmu_idx) +bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) { - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; target_ulong vaddr; hwaddr paddr; target_ulong page_size; + MemTxAttrs attrs = { 0 }; int error_code = 0, prot, access_index; address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, - address, rw, mmu_idx, &page_size); - if (error_code == 0) { + error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, + address, access_type, + mmu_idx, &page_size); + if (likely(error_code == 0)) { vaddr = address; - //trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, - // env->dmmu.mmu_primary_context, - // env->dmmu.mmu_secondary_context); - - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); - return 0; + tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, + page_size); + return true; } - /* XXX */ - return 1; -} - -void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) -{ - unsigned int i; - const char *mask; - - (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %" - PRId64 "\n", - env->dmmu.mmu_primary_context, - env->dmmu.mmu_secondary_context); - if ((env->lsu & DMMU_E) == 0) { - (*cpu_fprintf)(f, "DMMU disabled\n"); - } else { - (*cpu_fprintf)(f, "DMMU dump\n"); - for (i = 0; i < 64; i++) { - switch (TTE_PGSIZE(env->dtlb[i].tte)) { - default: - case 0x0: - mask = " 8k"; - break; - case 0x1: - mask = " 64k"; - break; - case 0x2: - mask = "512k"; - break; - case 0x3: - mask = " 4M"; - break; - } - if (TTE_IS_VALID(env->dtlb[i].tte)) { - (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" - ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", - i, - env->dtlb[i].tag & (uint64_t)~0x1fffULL, - TTE_PA(env->dtlb[i].tte), - mask, - TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", - TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", - TTE_IS_LOCKED(env->dtlb[i].tte) ? - "locked" : "unlocked", - env->dtlb[i].tag & (uint64_t)0x1fffULL, - TTE_IS_GLOBAL(env->dtlb[i].tte) ? - "global" : "local"); - } - } - } - if ((env->lsu & IMMU_E) == 0) { - (*cpu_fprintf)(f, "IMMU disabled\n"); - } else { - (*cpu_fprintf)(f, "IMMU dump\n"); - for (i = 0; i < 64; i++) { - switch (TTE_PGSIZE(env->itlb[i].tte)) { - default: - case 0x0: - mask = " 8k"; - break; - case 0x1: - mask = " 64k"; - break; - case 0x2: - mask = "512k"; - break; - case 0x3: - mask = " 4M"; - break; - } - if (TTE_IS_VALID(env->itlb[i].tte)) { - (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" - ", %s, %s, %s, ctx %" PRId64 " %s\n", - i, - env->itlb[i].tag & (uint64_t)~0x1fffULL, - TTE_PA(env->itlb[i].tte), - mask, - TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", - TTE_IS_LOCKED(env->itlb[i].tte) ? - "locked" : "unlocked", - env->itlb[i].tag & (uint64_t)0x1fffULL, - TTE_IS_GLOBAL(env->itlb[i].tte) ? - "global" : "local"); - } - } + if (probe) { + return false; } + cpu_loop_exit_restore(cs, retaddr); } #endif /* TARGET_SPARC64 */ @@ -826,9 +733,10 @@ static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, { target_ulong page_size; int prot, access_index; + MemTxAttrs attrs = { 0 }; - return get_physical_address(env, phys, &prot, &access_index, addr, rw, - mmu_idx, &page_size); + return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, + rw, mmu_idx, &page_size); } #if defined(TARGET_SPARC64) @@ -846,22 +754,15 @@ hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { - SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; hwaddr phys_addr; - int mmu_idx = cpu_mmu_index(env); - MemoryRegionSection section; + int mmu_idx = cpu_mmu_index(env, false); if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { return -1; } } - section = memory_region_find(get_system_memory(cs->uc), phys_addr, 1); - memory_region_unref(section.mr); - if (!int128_nz(section.size)) { - return -1; - } return phys_addr; } -#endif diff --git a/qemu/target-sparc/translate.c b/qemu/target/sparc/translate.c similarity index 64% rename from qemu/target-sparc/translate.c rename to qemu/target/sparc/translate.c index f39a70ea..c6f3d9cd 100644 --- a/qemu/target-sparc/translate.c +++ b/qemu/target/sparc/translate.c @@ -18,44 +18,55 @@ License along with this library; if not, see . */ -#include -#include -#include -#include -#include "unicorn/platform.h" +#include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "tcg-op.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" #include "exec/cpu_ldst.h" #include "exec/helper-gen.h" -#include "exec/gen-icount.h" +#include "exec/translator.h" +#include "asi.h" + + +#define DEBUG_DISAS #define DYNAMIC_PC 1 /* dynamic pc value */ #define JUMP_PC 2 /* dynamic pc value which takes only two values according to jump_pc[T2] */ +#define DISAS_EXIT DISAS_TARGET_0 + +#include "exec/gen-icount.h" typedef struct DisasContext { + DisasContextBase base; target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */ target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */ - int is_br; int mem_idx; - int fpu_enabled; - int address_mask_32bit; - int singlestep; + bool fpu_enabled; + bool address_mask_32bit; + bool supervisor; +#ifdef TARGET_SPARC64 + bool hypervisor; +#endif + uint32_t cc_op; /* current CC operation */ - struct TranslationBlock *tb; sparc_def_t *def; TCGv_i32 t32[3]; - TCGv ttl[6]; + TCGv ttl[5]; int n_t32; int n_ttl; +#ifdef TARGET_SPARC64 + int fprs_dirty; + int asi; +#endif - // Unicorn engine + // Unicorn struct uc_struct *uc; } DisasContext; @@ -91,7 +102,7 @@ typedef struct { static int sign_extend(int x, int len) { len = 32 - len; - return ((int)(((unsigned int)x) << len)) >> len; + return (x << len) >> len; } #define IS_IMM (insn & (1<<13)) @@ -99,6 +110,7 @@ static int sign_extend(int x, int len) static inline TCGv_i32 get_temp_i32(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 t; assert(dc->n_t32 < ARRAY_SIZE(dc->t32)); dc->t32[dc->n_t32++] = t = tcg_temp_new_i32(tcg_ctx); @@ -117,8 +129,14 @@ static inline TCGv get_temp_tl(DisasContext *dc) static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) { #if defined(TARGET_SPARC64) + int bit = (rd < 32) ? 1 : 2; TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_ori_i32(tcg_ctx, tcg_ctx->cpu_fprs, tcg_ctx->cpu_fprs, (rd < 32) ? 1 : 2); + /* If we know we've already set this bit within the TB, + we can avoid setting it again. */ + if (!(dc->fprs_dirty & bit)) { + dc->fprs_dirty |= bit; + tcg_gen_ori_i32(tcg_ctx, tcg_ctx->cpu_fprs, tcg_ctx->cpu_fprs, bit); + } #endif } @@ -128,23 +146,18 @@ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) TCGContext *tcg_ctx = dc->uc->tcg_ctx; #if TCG_TARGET_REG_BITS == 32 if (src & 1) { - return TCGV_LOW(tcg_ctx->cpu_fpr[src / 2]); + return TCGV_LOW(tcg_ctx, tcg_ctx->cpu_fpr[src / 2]); } else { - return TCGV_HIGH(tcg_ctx->cpu_fpr[src / 2]); + return TCGV_HIGH(tcg_ctx, tcg_ctx->cpu_fpr[src / 2]); } #else + TCGv_i32 ret = get_temp_i32(dc); if (src & 1) { - return MAKE_TCGV_I32(GET_TCGV_I64(tcg_ctx->cpu_fpr[src / 2])); + tcg_gen_extrl_i64_i32(tcg_ctx, ret, tcg_ctx->cpu_fpr[src / 2]); } else { - TCGv_i32 ret = get_temp_i32(dc); - TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->cpu_fpr[src / 2], 32); - tcg_gen_trunc_i64_i32(tcg_ctx, ret, t); - tcg_temp_free_i64(tcg_ctx, t); - - return ret; + tcg_gen_extrh_i64_i32(tcg_ctx, ret, tcg_ctx->cpu_fpr[src / 2]); } + return ret; #endif } @@ -153,12 +166,12 @@ static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) TCGContext *tcg_ctx = dc->uc->tcg_ctx; #if TCG_TARGET_REG_BITS == 32 if (dst & 1) { - tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx->cpu_fpr[dst / 2]), v); + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2]), v); } else { - tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx->cpu_fpr[dst / 2]), v); + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2]), v); } #else - TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v)); + TCGv_i64 t = (TCGv_i64)v; tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_fpr[dst / 2], t, (dst & 1 ? 0 : 32), 32); #endif @@ -191,34 +204,56 @@ static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst) return tcg_ctx->cpu_fpr[DFPREG(dst) / 2]; } -static void gen_op_load_fpr_QT0(DisasContext *dc, unsigned int src) +static void gen_op_load_fpr_QT0(TCGContext *tcg_ctx, unsigned int src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.upper)); tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.lower)); } -static void gen_op_load_fpr_QT1(DisasContext *dc, unsigned int src) +static void gen_op_load_fpr_QT1(TCGContext *tcg_ctx, unsigned int src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) + offsetof(CPU_QuadU, ll.upper)); tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) + offsetof(CPU_QuadU, ll.lower)); } -static void gen_op_store_QT0_fpr(DisasContext *dc, unsigned int dst) +static void gen_op_store_QT0_fpr(TCGContext *tcg_ctx, unsigned int dst) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.upper)); tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + offsetof(CPU_QuadU, ll.lower)); } +static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, + TCGv_i64 v1, TCGv_i64 v2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + dst = QFPREG(dst); + + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], v1); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2 + 1], v2); + gen_update_fprs_dirty(dc, dst); +} + #ifdef TARGET_SPARC64 +static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + src = QFPREG(src); + return tcg_ctx->cpu_fpr[src / 2]; +} + +static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + src = QFPREG(src); + return tcg_ctx->cpu_fpr[src / 2 + 1]; +} + static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; @@ -232,17 +267,11 @@ static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) #endif /* moves */ -#ifdef CONFIG_USER_ONLY -#define supervisor(dc) 0 #ifdef TARGET_SPARC64 -#define hypervisor(dc) 0 -#endif +#define hypervisor(dc) (dc->hypervisor) +#define supervisor(dc) (dc->supervisor | dc->hypervisor) #else -#define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX) -#ifdef TARGET_SPARC64 -#define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX) -#else -#endif +#define supervisor(dc) (dc->supervisor) #endif #ifdef TARGET_SPARC64 @@ -265,140 +294,112 @@ static inline void gen_address_mask(DisasContext *dc, TCGv addr) static inline TCGv gen_load_gpr(DisasContext *dc, int reg) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - if (reg == 0 || reg >= 8) { - TCGv t = get_temp_tl(dc); - if (reg == 0) { - tcg_gen_movi_tl(tcg_ctx, t, 0); - } else { - tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_regwptr, (reg - 8) * sizeof(target_ulong)); - } - return t; + if (reg > 0) { + assert(reg < 32); + return tcg_ctx->cpu_regs[reg]; } else { - TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs; - return *cpu_gregs[reg]; + TCGv t = get_temp_tl(dc); + tcg_gen_movi_tl(tcg_ctx, t, 0); + return t; } } static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (reg > 0) { - if (reg < 8) { - TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs; - tcg_gen_mov_tl(tcg_ctx, *cpu_gregs[reg], v); - } else { - tcg_gen_st_tl(tcg_ctx, v, tcg_ctx->cpu_regwptr, (reg - 8) * sizeof(target_ulong)); - } + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + assert(reg < 32); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_regs[reg], v); } } static inline TCGv gen_dest_gpr(DisasContext *dc, int reg) { - if (reg == 0 || reg >= 8) { - return get_temp_tl(dc); - } else { + if (reg > 0) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs; - return *cpu_gregs[reg]; + assert(reg < 32); + return tcg_ctx->cpu_regs[reg]; + } else { + return get_temp_tl(dc); } } +static inline bool use_goto_tb(DisasContext *s, target_ulong pc, + target_ulong npc) +{ + // if (unlikely(s->base.singlestep_enabled || singlestep)) { + if (unlikely(s->base.singlestep_enabled)) { + return false; + } + + return (pc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) && + (npc & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK); +} + static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc, target_ulong npc) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - TranslationBlock *tb; - - tb = s->tb; - if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) && - (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) && - !s->singlestep) { + if (use_goto_tb(s, pc, npc)) { /* jump to same page: we can use a direct jump */ tcg_gen_goto_tb(tcg_ctx, tb_num); - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, pc); - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, npc); - tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + tb_num); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, pc); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_npc, npc); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, tb_num); } else { /* jump to another page: currently not optimized */ - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, pc); - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, npc); - tcg_gen_exit_tb(tcg_ctx, 0); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, pc); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_npc, npc); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); } } // XXX suboptimal -static inline void gen_mov_reg_N(DisasContext *dc, TCGv reg, TCGv_i32 src) +static inline void gen_mov_reg_N(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_extu_i32_tl(tcg_ctx, reg, src); - tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_NEG_SHIFT); - tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); + tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_NEG_SHIFT, 1); } -static inline void gen_mov_reg_Z(DisasContext *dc, TCGv reg, TCGv_i32 src) +static inline void gen_mov_reg_Z(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_extu_i32_tl(tcg_ctx, reg, src); - tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_ZERO_SHIFT); - tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); + tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_ZERO_SHIFT, 1); } -static inline void gen_mov_reg_V(DisasContext *dc, TCGv reg, TCGv_i32 src) +static inline void gen_mov_reg_V(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_extu_i32_tl(tcg_ctx, reg, src); - tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_OVF_SHIFT); - tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); + tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_OVF_SHIFT, 1); } -static inline void gen_mov_reg_C(DisasContext *dc, TCGv reg, TCGv_i32 src) +static inline void gen_mov_reg_C(TCGContext *tcg_ctx, TCGv reg, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_extu_i32_tl(tcg_ctx, reg, src); - tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_CARRY_SHIFT); - tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); + tcg_gen_extract_tl(tcg_ctx, reg, reg, PSR_CARRY_SHIFT, 1); } -#if 0 -static inline void gen_op_addi_cc(DisasContext *dc, TCGv dst, TCGv src1, target_long src2) +static inline void gen_op_add_cc(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); - tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, src2); - tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); -} -#endif - -static inline void gen_op_add_cc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); - tcg_gen_add_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2); - tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src2); + tcg_gen_mov_tl(tcg_ctx, dst, tcg_ctx->cpu_cc_dst); } -static TCGv_i32 gen_add32_carry32(DisasContext *dc) +static TCGv_i32 gen_add32_carry32(TCGContext *tcg_ctx) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 carry_32, cc_src1_32, cc_src2_32; /* Carry is computed from a previous add: (dst < src) */ #if TARGET_LONG_BITS == 64 cc_src1_32 = tcg_temp_new_i32(tcg_ctx); cc_src2_32 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, cc_src1_32, *(TCGv *)tcg_ctx->cpu_cc_dst); - tcg_gen_trunc_i64_i32(tcg_ctx, cc_src2_32, *(TCGv *)tcg_ctx->cpu_cc_src); + tcg_gen_extrl_i64_i32(tcg_ctx, cc_src1_32, tcg_ctx->cpu_cc_dst); + tcg_gen_extrl_i64_i32(tcg_ctx, cc_src2_32, tcg_ctx->cpu_cc_src); #else - cc_src1_32 = *(TCGv *)tcg_ctx->cpu_cc_dst; - cc_src2_32 = *(TCGv *)tcg_ctx->cpu_cc_src; + cc_src1_32 = tcg_ctx->cpu_cc_dst; + cc_src2_32 = tcg_ctx->cpu_cc_src; #endif carry_32 = tcg_temp_new_i32(tcg_ctx); @@ -412,20 +413,19 @@ static TCGv_i32 gen_add32_carry32(DisasContext *dc) return carry_32; } -static TCGv_i32 gen_sub32_carry32(DisasContext *dc) +static TCGv_i32 gen_sub32_carry32(TCGContext *tcg_ctx) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 carry_32, cc_src1_32, cc_src2_32; /* Carry is computed from a previous borrow: (src1 < src2) */ #if TARGET_LONG_BITS == 64 cc_src1_32 = tcg_temp_new_i32(tcg_ctx); cc_src2_32 = tcg_temp_new_i32(tcg_ctx); - tcg_gen_trunc_i64_i32(tcg_ctx, cc_src1_32, *(TCGv *)tcg_ctx->cpu_cc_src); - tcg_gen_trunc_i64_i32(tcg_ctx, cc_src2_32, *(TCGv *)tcg_ctx->cpu_cc_src2); + tcg_gen_extrl_i64_i32(tcg_ctx, cc_src1_32, tcg_ctx->cpu_cc_src); + tcg_gen_extrl_i64_i32(tcg_ctx, cc_src2_32, tcg_ctx->cpu_cc_src2); #else - cc_src1_32 = *(TCGv *)tcg_ctx->cpu_cc_src; - cc_src2_32 = *(TCGv *)tcg_ctx->cpu_cc_src2; + cc_src1_32 = tcg_ctx->cpu_cc_src; + cc_src2_32 = tcg_ctx->cpu_cc_src2; #endif carry_32 = tcg_temp_new_i32(tcg_ctx); @@ -451,7 +451,7 @@ static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, case CC_OP_LOGIC: /* Carry is known to be zero. Fall back to plain ADD. */ if (update_cc) { - gen_op_add_cc(dc, dst, src1, src2); + gen_op_add_cc(tcg_ctx, dst, src1, src2); } else { tcg_gen_add_tl(tcg_ctx, dst, src1, src2); } @@ -466,17 +466,17 @@ static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, Ideally we'd combine this operation with the add that generated the carry in the first place. */ carry = tcg_temp_new(tcg_ctx); - tcg_gen_add2_tl(tcg_ctx, carry, dst, *(TCGv *)tcg_ctx->cpu_cc_src, src1, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_gen_add2_tl(tcg_ctx, carry, dst, tcg_ctx->cpu_cc_src, src1, tcg_ctx->cpu_cc_src2, src2); tcg_temp_free(tcg_ctx, carry); goto add_done; } - carry_32 = gen_add32_carry32(dc); + carry_32 = gen_add32_carry32(tcg_ctx); break; case CC_OP_SUB: case CC_OP_TSUB: case CC_OP_TSUBTV: - carry_32 = gen_sub32_carry32(dc); + carry_32 = gen_sub32_carry32(tcg_ctx); break; default: @@ -503,40 +503,20 @@ static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, add_done: if (update_cc) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADDX); dc->cc_op = CC_OP_ADDX; } } -#if 0 -static inline void gen_op_subi_cc(DisasContext *dc, TCGv dst, TCGv src1, target_long src2) +static inline void gen_op_sub_cc(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); - if (src2 == 0) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, src1); - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); - dc->cc_op = CC_OP_LOGIC; - } else { - tcg_gen_subi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, src2); - tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); - dc->cc_op = CC_OP_SUB; - } - tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); -} -#endif - -static inline void gen_op_sub_cc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); - tcg_gen_sub_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2); - tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src2); + tcg_gen_mov_tl(tcg_ctx, dst, tcg_ctx->cpu_cc_dst); } static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, @@ -551,7 +531,7 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, case CC_OP_LOGIC: /* Carry is known to be zero. Fall back to plain SUB. */ if (update_cc) { - gen_op_sub_cc(dc, dst, src1, src2); + gen_op_sub_cc(tcg_ctx, dst, src1, src2); } else { tcg_gen_sub_tl(tcg_ctx, dst, src1, src2); } @@ -560,7 +540,7 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, case CC_OP_ADD: case CC_OP_TADD: case CC_OP_TADDTV: - carry_32 = gen_add32_carry32(dc); + carry_32 = gen_add32_carry32(tcg_ctx); break; case CC_OP_SUB: @@ -572,11 +552,11 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, Ideally we'd combine this operation with the add that generated the carry in the first place. */ carry = tcg_temp_new(tcg_ctx); - tcg_gen_sub2_tl(tcg_ctx, carry, dst, *(TCGv *)tcg_ctx->cpu_cc_src, src1, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_gen_sub2_tl(tcg_ctx, carry, dst, tcg_ctx->cpu_cc_src, src1, tcg_ctx->cpu_cc_src2, src2); tcg_temp_free(tcg_ctx, carry); goto sub_done; } - carry_32 = gen_sub32_carry32(dc); + carry_32 = gen_sub32_carry32(tcg_ctx); break; default: @@ -603,17 +583,16 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, sub_done: if (update_cc) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUBX); dc->cc_op = CC_OP_SUBX; } } -static inline void gen_op_mulscc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +static inline void gen_op_mulscc(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv r_temp, zero, t0; r_temp = tcg_temp_new(tcg_ctx); @@ -624,48 +603,43 @@ static inline void gen_op_mulscc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src T1 = 0; */ zero = tcg_const_tl(tcg_ctx, 0); - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1, 0xffffffff); - tcg_gen_andi_tl(tcg_ctx, r_temp, *(TCGv *)tcg_ctx->cpu_y, 0x1); - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2, 0xffffffff); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->cpu_cc_src2, r_temp, zero, - zero, *(TCGv *)tcg_ctx->cpu_cc_src2); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src, src1, 0xffffffff); + tcg_gen_andi_tl(tcg_ctx, r_temp, tcg_ctx->cpu_y, 0x1); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, src2, 0xffffffff); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_cc_src2, r_temp, zero, + zero, tcg_ctx->cpu_cc_src2); tcg_temp_free(tcg_ctx, zero); // b2 = T0 & 1; // env->y = (b2 << 31) | (env->y >> 1); - tcg_gen_andi_tl(tcg_ctx, r_temp, *(TCGv *)tcg_ctx->cpu_cc_src, 0x1); - tcg_gen_shli_tl(tcg_ctx, r_temp, r_temp, 31); - tcg_gen_shri_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->cpu_y, 1); - tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x7fffffff); - tcg_gen_or_tl(tcg_ctx, t0, t0, r_temp); - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, t0, 0xffffffff); + tcg_gen_extract_tl(tcg_ctx, t0, tcg_ctx->cpu_y, 1, 31); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_y, t0, tcg_ctx->cpu_cc_src, 31, 1); // b1 = N ^ V; - gen_mov_reg_N(dc, t0, tcg_ctx->cpu_psr); - gen_mov_reg_V(dc, r_temp, tcg_ctx->cpu_psr); + gen_mov_reg_N(tcg_ctx, t0, tcg_ctx->cpu_psr); + gen_mov_reg_V(tcg_ctx, r_temp, tcg_ctx->cpu_psr); tcg_gen_xor_tl(tcg_ctx, t0, t0, r_temp); tcg_temp_free(tcg_ctx, r_temp); // T0 = (b1 << 31) | (T0 >> 1); // src1 = T0; tcg_gen_shli_tl(tcg_ctx, t0, t0, 31); - tcg_gen_shri_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src, 1); - tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src, t0); + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, 1); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src, t0); tcg_temp_free(tcg_ctx, t0); - tcg_gen_add_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, tcg_ctx->cpu_cc_src, tcg_ctx->cpu_cc_src2); - tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); + tcg_gen_mov_tl(tcg_ctx, dst, tcg_ctx->cpu_cc_dst); } -static inline void gen_op_multiply(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2, int sign_ext) +static inline void gen_op_multiply(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2, int sign_ext) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; #if TARGET_LONG_BITS == 32 if (sign_ext) { - tcg_gen_muls2_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_y, src1, src2); + tcg_gen_muls2_tl(tcg_ctx, dst, tcg_ctx->cpu_y, src1, src2); } else { - tcg_gen_mulu2_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_y, src1, src2); + tcg_gen_mulu2_tl(tcg_ctx, dst, tcg_ctx->cpu_y, src1, src2); } #else TCGv t0 = tcg_temp_new_i64(tcg_ctx); @@ -683,148 +657,136 @@ static inline void gen_op_multiply(DisasContext *dc, TCGv dst, TCGv src1, TCGv s tcg_temp_free(tcg_ctx, t0); tcg_temp_free(tcg_ctx, t1); - tcg_gen_shri_i64(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, dst, 32); + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_y, dst, 32); #endif } -static inline void gen_op_umul(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +static inline void gen_op_umul(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { /* zero-extend truncated operands before multiplication */ - gen_op_multiply(dc, dst, src1, src2, 0); + gen_op_multiply(tcg_ctx, dst, src1, src2, 0); } -static inline void gen_op_smul(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +static inline void gen_op_smul(TCGContext *tcg_ctx, TCGv dst, TCGv src1, TCGv src2) { /* sign-extend truncated operands before multiplication */ - gen_op_multiply(dc, dst, src1, src2, 1); + gen_op_multiply(tcg_ctx, dst, src1, src2, 1); } // 1 -static inline void gen_op_eval_ba(DisasContext *dc, TCGv dst) +static inline void gen_op_eval_ba(TCGContext *tcg_ctx, TCGv dst) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, dst, 1); } // Z -static inline void gen_op_eval_be(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_be(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - gen_mov_reg_Z(dc, dst, src); + gen_mov_reg_Z(tcg_ctx, dst, src); } // Z | (N ^ V) -static inline void gen_op_eval_ble(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_ble(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_N(dc, t0, src); - gen_mov_reg_V(dc, dst, src); + gen_mov_reg_N(tcg_ctx, t0, src); + gen_mov_reg_V(tcg_ctx, dst, src); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); - gen_mov_reg_Z(dc, t0, src); + gen_mov_reg_Z(tcg_ctx, t0, src); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // N ^ V -static inline void gen_op_eval_bl(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bl(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_V(dc, t0, src); - gen_mov_reg_N(dc, dst, src); + gen_mov_reg_V(tcg_ctx, t0, src); + gen_mov_reg_N(tcg_ctx, dst, src); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // C | Z -static inline void gen_op_eval_bleu(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bleu(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_Z(dc, t0, src); - gen_mov_reg_C(dc, dst, src); + gen_mov_reg_Z(tcg_ctx, t0, src); + gen_mov_reg_C(tcg_ctx, dst, src); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // C -static inline void gen_op_eval_bcs(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bcs(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - gen_mov_reg_C(dc, dst, src); + gen_mov_reg_C(tcg_ctx, dst, src); } // V -static inline void gen_op_eval_bvs(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bvs(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - gen_mov_reg_V(dc, dst, src); + gen_mov_reg_V(tcg_ctx, dst, src); } // 0 -static inline void gen_op_eval_bn(DisasContext *dc, TCGv dst) +static inline void gen_op_eval_bn(TCGContext *tcg_ctx, TCGv dst) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_movi_tl(tcg_ctx, dst, 0); } // N -static inline void gen_op_eval_bneg(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bneg(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - gen_mov_reg_N(dc, dst, src); + gen_mov_reg_N(tcg_ctx, dst, src); } // !Z -static inline void gen_op_eval_bne(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bne(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_mov_reg_Z(dc, dst, src); + gen_mov_reg_Z(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !(Z | (N ^ V)) -static inline void gen_op_eval_bg(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bg(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_op_eval_ble(dc, dst, src); + gen_op_eval_ble(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !(N ^ V) -static inline void gen_op_eval_bge(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bge(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_op_eval_bl(dc, dst, src); + gen_op_eval_bl(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !(C | Z) -static inline void gen_op_eval_bgu(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bgu(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_op_eval_bleu(dc, dst, src); + gen_op_eval_bleu(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !C -static inline void gen_op_eval_bcc(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bcc(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_mov_reg_C(dc, dst, src); + gen_mov_reg_C(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !N -static inline void gen_op_eval_bpos(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bpos(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_mov_reg_N(dc, dst, src); + gen_mov_reg_N(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !V -static inline void gen_op_eval_bvc(DisasContext *dc, TCGv dst, TCGv_i32 src) +static inline void gen_op_eval_bvc(TCGContext *tcg_ctx, TCGv dst, TCGv_i32 src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_mov_reg_V(dc, dst, src); + gen_mov_reg_V(tcg_ctx, dst, src); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } @@ -835,174 +797,160 @@ static inline void gen_op_eval_bvc(DisasContext *dc, TCGv dst, TCGv_i32 src) 2 > 3 unordered */ -static inline void gen_mov_reg_FCC0(DisasContext *dc, TCGv reg, TCGv src, +static inline void gen_mov_reg_FCC0(TCGContext *tcg_ctx, TCGv reg, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC0_SHIFT + fcc_offset); tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); } -static inline void gen_mov_reg_FCC1(DisasContext *dc, TCGv reg, TCGv src, +static inline void gen_mov_reg_FCC1(TCGContext *tcg_ctx, TCGv reg, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC1_SHIFT + fcc_offset); tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); } // !0: FCC0 | FCC1 -static inline void gen_op_eval_fbne(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbne(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 1 or 2: FCC0 ^ FCC1 -static inline void gen_op_eval_fblg(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fblg(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 1 or 3: FCC0 -static inline void gen_op_eval_fbul(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbul(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); } // 1: FCC0 & !FCC1 -static inline void gen_op_eval_fbl(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbl(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 2 or 3: FCC1 -static inline void gen_op_eval_fbug(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbug(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - gen_mov_reg_FCC1(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, dst, src, fcc_offset); } // 2: !FCC0 & FCC1 -static inline void gen_op_eval_fbg(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbg(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, t0, dst); tcg_temp_free(tcg_ctx, t0); } // 3: FCC0 & FCC1 -static inline void gen_op_eval_fbu(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbu(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_and_tl(tcg_ctx, dst, dst, t0); tcg_temp_free(tcg_ctx, t0); } // 0: !(FCC0 | FCC1) -static inline void gen_op_eval_fbe(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbe(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_or_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // 0 or 3: !(FCC0 ^ FCC1) -static inline void gen_op_eval_fbue(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbue(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // 0 or 2: !FCC0 -static inline void gen_op_eval_fbge(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbge(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !1: !(FCC0 & !FCC1) -static inline void gen_op_eval_fbuge(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbuge(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // 0 or 1: !FCC1 -static inline void gen_op_eval_fble(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fble(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_mov_reg_FCC1(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, dst, src, fcc_offset); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); } // !2: !(!FCC0 & FCC1) -static inline void gen_op_eval_fbule(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbule(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_andc_tl(tcg_ctx, dst, t0, dst); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); } // !3: !(FCC0 & FCC1) -static inline void gen_op_eval_fbo(DisasContext *dc, TCGv dst, TCGv src, +static inline void gen_op_eval_fbo(TCGContext *tcg_ctx, TCGv dst, TCGv src, unsigned int fcc_offset) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv t0 = tcg_temp_new(tcg_ctx); - gen_mov_reg_FCC0(dc, dst, src, fcc_offset); - gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + gen_mov_reg_FCC0(tcg_ctx, dst, src, fcc_offset); + gen_mov_reg_FCC1(tcg_ctx, t0, src, fcc_offset); tcg_gen_and_tl(tcg_ctx, dst, dst, t0); tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); tcg_temp_free(tcg_ctx, t0); @@ -1012,9 +960,7 @@ static inline void gen_branch2(DisasContext *dc, target_ulong pc1, target_ulong pc2, TCGv r_cond) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - int l1; - - l1 = gen_new_label(tcg_ctx); + TCGLabel *l1 = gen_new_label(tcg_ctx); tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, r_cond, 0, l1); @@ -1024,20 +970,46 @@ static inline void gen_branch2(DisasContext *dc, target_ulong pc1, gen_goto_tb(dc, 1, pc2, pc2 + 4); } -static inline void gen_branch_a(DisasContext *dc, target_ulong pc1, - target_ulong pc2, TCGv r_cond) +static void gen_branch_a(DisasContext *dc, target_ulong pc1) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - int l1; + TCGLabel *l1 = gen_new_label(tcg_ctx); + target_ulong npc = dc->npc; - l1 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_cond, 0, l1); - tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, r_cond, 0, l1); - - gen_goto_tb(dc, 0, pc2, pc1); + gen_goto_tb(dc, 0, npc, pc1); gen_set_label(tcg_ctx, l1); - gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8); + gen_goto_tb(dc, 1, npc + 4, npc + 8); + + dc->base.is_jmp = DISAS_NORETURN; +} + +static void gen_branch_n(DisasContext *dc, target_ulong pc1) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + target_ulong npc = dc->npc; + + if (likely(npc != DYNAMIC_PC)) { + dc->pc = npc; + dc->jump_pc[0] = pc1; + dc->jump_pc[1] = npc + 4; + dc->npc = JUMP_PC; + } else { + TCGv t, z; + + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); + + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_npc, tcg_ctx->cpu_npc, 4); + t = tcg_const_tl(tcg_ctx, pc1); + z = tcg_const_tl(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_npc, tcg_ctx->cpu_cond, z, t, tcg_ctx->cpu_npc); + tcg_temp_free(tcg_ctx, t); + tcg_temp_free(tcg_ctx, z); + + dc->pc = DYNAMIC_PC; + } } static inline void gen_generic_branch(DisasContext *dc) @@ -1047,7 +1019,7 @@ static inline void gen_generic_branch(DisasContext *dc) TCGv npc1 = tcg_const_tl(tcg_ctx, dc->jump_pc[1]); TCGv zero = tcg_const_tl(tcg_ctx, 0); - tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->cpu_npc, *(TCGv *)tcg_ctx->cpu_cond, zero, npc0, npc1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_npc, tcg_ctx->cpu_cond, zero, npc0, npc1); tcg_temp_free(tcg_ctx, npc0); tcg_temp_free(tcg_ctx, npc1); @@ -1071,14 +1043,14 @@ static inline void save_npc(DisasContext *dc) gen_generic_branch(dc); dc->npc = DYNAMIC_PC; } else if (dc->npc != DYNAMIC_PC) { - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, dc->npc); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_npc, dc->npc); } } static inline void update_psr(DisasContext *dc) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (dc->cc_op != CC_OP_FLAGS) { + TCGContext *tcg_ctx = dc->uc->tcg_ctx; dc->cc_op = CC_OP_FLAGS; gen_helper_compute_psr(tcg_ctx, tcg_ctx->cpu_env); } @@ -1087,30 +1059,48 @@ static inline void update_psr(DisasContext *dc) static inline void save_state(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, dc->pc); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dc->pc); save_npc(dc); } +static void gen_exception(DisasContext *dc, int which) +{ + TCGv_i32 t; + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + save_state(dc); + t = tcg_const_i32(tcg_ctx, which); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, t); + tcg_temp_free_i32(tcg_ctx, t); + dc->base.is_jmp = DISAS_NORETURN; +} + +static void gen_check_align(TCGContext *tcg_ctx, TCGv addr, int mask) +{ + TCGv_i32 r_mask = tcg_const_i32(tcg_ctx, mask); + gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, addr, r_mask); + tcg_temp_free_i32(tcg_ctx, r_mask); +} + static inline void gen_mov_pc_npc(DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (dc->npc == JUMP_PC) { gen_generic_branch(dc); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); dc->pc = DYNAMIC_PC; } else if (dc->npc == DYNAMIC_PC) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); dc->pc = DYNAMIC_PC; } else { dc->pc = dc->npc; } } -static inline void gen_op_next_insn(DisasContext *dc) +static inline void gen_op_next_insn(TCGContext *tcg_ctx) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); - tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, *(TCGv *)tcg_ctx->cpu_npc, 4); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_npc, tcg_ctx->cpu_npc, 4); } static void free_compare(TCGContext *tcg_ctx, DisasCompare *cmp) @@ -1123,7 +1113,8 @@ static void free_compare(TCGContext *tcg_ctx, DisasCompare *cmp) } } -static void gen_compare(DisasContext *dc, DisasCompare *cmp, bool xcc, unsigned int cond) +static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, + DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; static int subcc_cond[16] = { @@ -1188,12 +1179,12 @@ static void gen_compare(DisasContext *dc, DisasCompare *cmp, bool xcc, unsigned if (!xcc) { cmp->g1 = false; cmp->c1 = tcg_temp_new(tcg_ctx); - tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, *(TCGv *)tcg_ctx->cpu_cc_dst); + tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, tcg_ctx->cpu_cc_dst); break; } #endif cmp->g1 = true; - cmp->c1 = *(TCGv *)tcg_ctx->cpu_cc_dst; + cmp->c1 = tcg_ctx->cpu_cc_dst; break; case CC_OP_SUB: @@ -1217,14 +1208,14 @@ static void gen_compare(DisasContext *dc, DisasCompare *cmp, bool xcc, unsigned cmp->g1 = cmp->g2 = false; cmp->c1 = tcg_temp_new(tcg_ctx); cmp->c2 = tcg_temp_new(tcg_ctx); - tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, *(TCGv *)tcg_ctx->cpu_cc_src); - tcg_gen_ext32s_tl(tcg_ctx, cmp->c2, *(TCGv *)tcg_ctx->cpu_cc_src2); + tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, tcg_ctx->cpu_cc_src); + tcg_gen_ext32s_tl(tcg_ctx, cmp->c2, tcg_ctx->cpu_cc_src2); break; } #endif cmp->g1 = cmp->g2 = true; - cmp->c1 = *(TCGv *)tcg_ctx->cpu_cc_src; - cmp->c2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + cmp->c1 = tcg_ctx->cpu_cc_src; + cmp->c2 = tcg_ctx->cpu_cc_src2; break; } break; @@ -1245,61 +1236,60 @@ static void gen_compare(DisasContext *dc, DisasCompare *cmp, bool xcc, unsigned switch (cond) { case 0x0: - gen_op_eval_bn(dc, r_dst); + gen_op_eval_bn(tcg_ctx, r_dst); break; case 0x1: - gen_op_eval_be(dc, r_dst, r_src); + gen_op_eval_be(tcg_ctx, r_dst, r_src); break; case 0x2: - gen_op_eval_ble(dc, r_dst, r_src); + gen_op_eval_ble(tcg_ctx, r_dst, r_src); break; case 0x3: - gen_op_eval_bl(dc, r_dst, r_src); + gen_op_eval_bl(tcg_ctx, r_dst, r_src); break; case 0x4: - gen_op_eval_bleu(dc, r_dst, r_src); + gen_op_eval_bleu(tcg_ctx, r_dst, r_src); break; case 0x5: - gen_op_eval_bcs(dc, r_dst, r_src); + gen_op_eval_bcs(tcg_ctx, r_dst, r_src); break; case 0x6: - gen_op_eval_bneg(dc, r_dst, r_src); + gen_op_eval_bneg(tcg_ctx, r_dst, r_src); break; case 0x7: - gen_op_eval_bvs(dc, r_dst, r_src); + gen_op_eval_bvs(tcg_ctx, r_dst, r_src); break; case 0x8: - gen_op_eval_ba(dc, r_dst); + gen_op_eval_ba(tcg_ctx, r_dst); break; case 0x9: - gen_op_eval_bne(dc, r_dst, r_src); + gen_op_eval_bne(tcg_ctx, r_dst, r_src); break; case 0xa: - gen_op_eval_bg(dc, r_dst, r_src); + gen_op_eval_bg(tcg_ctx, r_dst, r_src); break; case 0xb: - gen_op_eval_bge(dc, r_dst, r_src); + gen_op_eval_bge(tcg_ctx, r_dst, r_src); break; case 0xc: - gen_op_eval_bgu(dc, r_dst, r_src); + gen_op_eval_bgu(tcg_ctx, r_dst, r_src); break; case 0xd: - gen_op_eval_bcc(dc, r_dst, r_src); + gen_op_eval_bcc(tcg_ctx, r_dst, r_src); break; case 0xe: - gen_op_eval_bpos(dc, r_dst, r_src); + gen_op_eval_bpos(tcg_ctx, r_dst, r_src); break; case 0xf: - gen_op_eval_bvc(dc, r_dst, r_src); + gen_op_eval_bvc(tcg_ctx, r_dst, r_src); break; } break; } } -static void gen_fcompare(DisasContext *dc, DisasCompare *cmp, unsigned int cc, unsigned int cond) +static void gen_fcompare(TCGContext *tcg_ctx, DisasCompare *cmp, unsigned int cc, unsigned int cond) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; unsigned int offset; TCGv r_dst; @@ -1328,61 +1318,62 @@ static void gen_fcompare(DisasContext *dc, DisasCompare *cmp, unsigned int cc, u switch (cond) { case 0x0: - gen_op_eval_bn(dc, r_dst); + gen_op_eval_bn(tcg_ctx, r_dst); break; case 0x1: - gen_op_eval_fbne(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbne(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x2: - gen_op_eval_fblg(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fblg(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x3: - gen_op_eval_fbul(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbul(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x4: - gen_op_eval_fbl(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbl(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x5: - gen_op_eval_fbug(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbug(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x6: - gen_op_eval_fbg(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbg(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x7: - gen_op_eval_fbu(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbu(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0x8: - gen_op_eval_ba(dc, r_dst); + gen_op_eval_ba(tcg_ctx, r_dst); break; case 0x9: - gen_op_eval_fbe(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbe(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xa: - gen_op_eval_fbue(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbue(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xb: - gen_op_eval_fbge(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbge(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xc: - gen_op_eval_fbuge(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbuge(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xd: - gen_op_eval_fble(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fble(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xe: - gen_op_eval_fbule(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbule(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; case 0xf: - gen_op_eval_fbo(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + gen_op_eval_fbo(tcg_ctx, r_dst, tcg_ctx->cpu_fsr, offset); break; } } -static void gen_cond(DisasContext *dc, TCGv r_dst, unsigned int cc, unsigned int cond) +static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond, + DisasContext *dc) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasCompare cmp; - gen_compare(dc, &cmp, cc, cond); + gen_compare(&cmp, cc, cond, dc); /* The interface is to return a boolean in r_dst. */ if (cmp.is_bool) { @@ -1394,11 +1385,10 @@ static void gen_cond(DisasContext *dc, TCGv r_dst, unsigned int cc, unsigned int free_compare(tcg_ctx, &cmp); } -static void gen_fcond(DisasContext *dc, TCGv r_dst, unsigned int cc, unsigned int cond) +static void gen_fcond(TCGContext *tcg_ctx, TCGv r_dst, unsigned int cc, unsigned int cond) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasCompare cmp; - gen_fcompare(dc, &cmp, cc, cond); + gen_fcompare(tcg_ctx, &cmp, cc, cond); /* The interface is to return a boolean in r_dst. */ if (cmp.is_bool) { @@ -1423,9 +1413,8 @@ static const int gen_tcg_cond_reg[8] = { TCG_COND_LT, }; -static void gen_compare_reg(DisasContext *dc, DisasCompare *cmp, int cond, TCGv r_src) +static void gen_compare_reg(TCGContext *tcg_ctx, DisasCompare *cmp, int cond, TCGv r_src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]); cmp->is_bool = false; cmp->g1 = true; @@ -1434,11 +1423,10 @@ static void gen_compare_reg(DisasContext *dc, DisasCompare *cmp, int cond, TCGv cmp->c2 = tcg_const_tl(tcg_ctx, 0); } -static inline void gen_cond_reg(DisasContext *dc, TCGv r_dst, int cond, TCGv r_src) +static inline void gen_cond_reg(TCGContext *tcg_ctx, TCGv r_dst, int cond, TCGv r_src) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; DisasCompare cmp; - gen_compare_reg(dc, &cmp, cond, r_src); + gen_compare_reg(tcg_ctx, &cmp, cond, r_src); /* The interface is to return a boolean in r_dst. */ tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2); @@ -1475,24 +1463,15 @@ static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) } else { dc->pc = dc->npc; dc->npc = target; - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); } } else { flush_cond(dc); - gen_cond(dc, *(TCGv *)tcg_ctx->cpu_cond, cc, cond); + gen_cond(tcg_ctx->cpu_cond, cc, cond, dc); if (a) { - gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond); - dc->is_br = 1; + gen_branch_a(dc, target); } else { - dc->pc = dc->npc; - dc->jump_pc[0] = target; - if (unlikely(dc->npc == DYNAMIC_PC)) { - dc->jump_pc[1] = DYNAMIC_PC; - tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4); - } else { - dc->jump_pc[1] = dc->npc + 4; - dc->npc = JUMP_PC; - } + gen_branch_n(dc, target); } } } @@ -1525,24 +1504,15 @@ static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) } else { dc->pc = dc->npc; dc->npc = target; - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_pc, tcg_ctx->cpu_npc); } } else { flush_cond(dc); - gen_fcond(dc, *(TCGv *)tcg_ctx->cpu_cond, cc, cond); + gen_fcond(tcg_ctx, tcg_ctx->cpu_cond, cc, cond); if (a) { - gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond); - dc->is_br = 1; + gen_branch_a(dc, target); } else { - dc->pc = dc->npc; - dc->jump_pc[0] = target; - if (unlikely(dc->npc == DYNAMIC_PC)) { - dc->jump_pc[1] = DYNAMIC_PC; - tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4); - } else { - dc->jump_pc[1] = dc->npc + 4; - dc->npc = JUMP_PC; - } + gen_branch_n(dc, target); } } } @@ -1559,214 +1529,179 @@ static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn, target &= 0xffffffffULL; } flush_cond(dc); - gen_cond_reg(dc, *(TCGv *)tcg_ctx->cpu_cond, cond, r_reg); + gen_cond_reg(tcg_ctx, tcg_ctx->cpu_cond, cond, r_reg); if (a) { - gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond); - dc->is_br = 1; + gen_branch_a(dc, target); } else { - dc->pc = dc->npc; - dc->jump_pc[0] = target; - if (unlikely(dc->npc == DYNAMIC_PC)) { - dc->jump_pc[1] = DYNAMIC_PC; - tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4); - } else { - dc->jump_pc[1] = dc->npc + 4; - dc->npc = JUMP_PC; - } + gen_branch_n(dc, target); } } -static inline void gen_op_fcmps(DisasContext *dc, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) +static inline void gen_op_fcmps(TCGContext *tcg_ctx, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (fccno) { case 0: - gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: - gen_helper_fcmps_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmps_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: - gen_helper_fcmps_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmps_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: - gen_helper_fcmps_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmps_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } -static inline void gen_op_fcmpd(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +static inline void gen_op_fcmpd(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (fccno) { case 0: - gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: - gen_helper_fcmpd_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpd_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: - gen_helper_fcmpd_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpd_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: - gen_helper_fcmpd_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpd_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } -static inline void gen_op_fcmpq(DisasContext *dc, int fccno) +static inline void gen_op_fcmpq(TCGContext *tcg_ctx, int fccno) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (fccno) { case 0: - gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 1: - gen_helper_fcmpq_fcc1(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpq_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 2: - gen_helper_fcmpq_fcc2(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpq_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 3: - gen_helper_fcmpq_fcc3(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpq_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; } } -static inline void gen_op_fcmpes(DisasContext *dc, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) +static inline void gen_op_fcmpes(TCGContext *tcg_ctx, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (fccno) { case 0: - gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: - gen_helper_fcmpes_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpes_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: - gen_helper_fcmpes_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpes_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: - gen_helper_fcmpes_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpes_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } -static inline void gen_op_fcmped(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +static inline void gen_op_fcmped(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (fccno) { case 0: - gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 1: - gen_helper_fcmped_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmped_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 2: - gen_helper_fcmped_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmped_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; case 3: - gen_helper_fcmped_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmped_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); break; } } -static inline void gen_op_fcmpeq(DisasContext *dc, int fccno) +static inline void gen_op_fcmpeq(TCGContext *tcg_ctx, int fccno) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; switch (fccno) { case 0: - gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 1: - gen_helper_fcmpeq_fcc1(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpeq_fcc1(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 2: - gen_helper_fcmpeq_fcc2(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpeq_fcc2(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; case 3: - gen_helper_fcmpeq_fcc3(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpeq_fcc3(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); break; } } #else -static inline void gen_op_fcmps(DisasContext *dc, int fccno, TCGv r_rs1, TCGv r_rs2) +static inline void gen_op_fcmps(TCGContext *tcg_ctx, int fccno, TCGv r_rs1, TCGv r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } -static inline void gen_op_fcmpd(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +static inline void gen_op_fcmpd(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } -static inline void gen_op_fcmpq(DisasContext *dc, int fccno) +static inline void gen_op_fcmpq(TCGContext *tcg_ctx, int fccno) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); } -static inline void gen_op_fcmpes(DisasContext *dc, int fccno, TCGv r_rs1, TCGv r_rs2) +static inline void gen_op_fcmpes(TCGContext *tcg_ctx, int fccno, TCGv r_rs1, TCGv r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } -static inline void gen_op_fcmped(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +static inline void gen_op_fcmped(TCGContext *tcg_ctx, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, r_rs1, r_rs2); } -static inline void gen_op_fcmpeq(DisasContext *dc, int fccno) +static inline void gen_op_fcmpeq(TCGContext *tcg_ctx, int fccno) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); } #endif -static inline void gen_op_fpexception_im(DisasContext *dc, int fsr_flags) +static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_const; - - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, FSR_FTT_NMASK); - tcg_gen_ori_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, fsr_flags); - r_const = tcg_const_i32(tcg_ctx, TT_FP_EXCP); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_fsr, FSR_FTT_NMASK); + tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_fsr, fsr_flags); + gen_exception(dc, TT_FP_EXCP); } static int gen_trap_ifnofpu(DisasContext *dc) { -#if !defined(CONFIG_USER_ONLY) - TCGContext *tcg_ctx = dc->uc->tcg_ctx; if (!dc->fpu_enabled) { - TCGv_i32 r_const; - - save_state(dc); - r_const = tcg_const_i32(tcg_ctx, TT_NFPU_INSN); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - dc->is_br = 1; + gen_exception(dc, TT_NFPU_INSN); return 1; } -#endif return 0; } -static inline void gen_op_clear_ieee_excp_and_FTT(DisasContext *dc) +static inline void gen_op_clear_ieee_excp_and_FTT(TCGContext *tcg_ctx) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, FSR_FTT_CEXC_NMASK); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_fsr, FSR_FTT_CEXC_NMASK); } static inline void gen_fop_FF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i32)) + void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src; @@ -1775,12 +1710,13 @@ static inline void gen_fop_FF(DisasContext *dc, int rd, int rs, dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32)) + void (*gen)(TCGContext *, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src; @@ -1794,7 +1730,7 @@ static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, } static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) + void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src1, src2; @@ -1804,13 +1740,14 @@ static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32, TCGv_i32)) + void (*gen)(TCGContext *,TCGv_i32, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst, src1, src2; @@ -1819,14 +1756,14 @@ static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, src2 = gen_load_fpr_F(dc, rs2); dst = gen_dest_fpr_F(dc); - gen(tcg_ctx, dst, src1, src2); + gen(tcg_ctx,dst, src1, src2); gen_store_fpr_F(dc, rd, dst); } #endif static inline void gen_fop_DD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src; @@ -1835,13 +1772,14 @@ static inline void gen_fop_DD(DisasContext *dc, int rd, int rs, dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src; @@ -1856,7 +1794,7 @@ static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, #endif static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src1, src2; @@ -1866,13 +1804,14 @@ static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src1, src2; @@ -1887,7 +1826,7 @@ static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, } static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src1, src2; @@ -1896,13 +1835,13 @@ static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, src2 = gen_load_fpr_D(dc, rs2); dst = gen_dest_fpr_D(dc, rd); - gen(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_gsr, src1, src2); + gen(tcg_ctx, dst, tcg_ctx->cpu_gsr, src1, src2); gen_store_fpr_D(dc, rd, dst); } static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst, src0, src1, src2; @@ -1919,46 +1858,48 @@ static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, #endif static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_ptr)) + void (*gen)(TCGContext *, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_op_load_fpr_QT1(dc, QFPREG(rs)); + gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); gen(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); - gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } #ifdef TARGET_SPARC64 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_ptr)) + void (*gen)(TCGContext *, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_op_load_fpr_QT1(dc, QFPREG(rs)); + gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); gen(tcg_ctx, tcg_ctx->cpu_env); - gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } #endif static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_ptr)) + void (*gen)(TCGContext *, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - gen_op_load_fpr_QT0(dc, QFPREG(rs1)); - gen_op_load_fpr_QT1(dc, QFPREG(rs2)); + gen_op_load_fpr_QT0(tcg_ctx, QFPREG(rs1)); + gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs2)); gen(tcg_ctx, tcg_ctx->cpu_env); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); - gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; @@ -1969,12 +1910,13 @@ static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, - void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i64, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_ptr, TCGv_i64, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 src1, src2; @@ -1983,14 +1925,15 @@ static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, src2 = gen_load_fpr_D(dc, rs2); gen(tcg_ctx, tcg_ctx->cpu_env, src1, src2); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); - gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } #ifdef TARGET_SPARC64 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; @@ -2000,13 +1943,14 @@ static inline void gen_fop_DF(DisasContext *dc, int rd, int rs, dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } #endif static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; @@ -2021,7 +1965,7 @@ static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, } static inline void gen_fop_FD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst; @@ -2031,40 +1975,43 @@ static inline void gen_fop_FD(DisasContext *dc, int rd, int rs, dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr)) + void (*gen)(TCGContext *, TCGv_i32, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 dst; - gen_op_load_fpr_QT1(dc, QFPREG(rs)); + gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); dst = gen_dest_fpr_F(dc); gen(tcg_ctx, dst, tcg_ctx->cpu_env); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_F(dc, rd, dst); } static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr)) + void (*gen)(TCGContext *, TCGv_i64, TCGv_ptr)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 dst; - gen_op_load_fpr_QT1(dc, QFPREG(rs)); + gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs)); dst = gen_dest_fpr_D(dc, rd); gen(tcg_ctx, dst, tcg_ctx->cpu_env); + gen_helper_check_ieee_exceptions(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env); gen_store_fpr_D(dc, rd, dst); } static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i32)) + void (*gen)(TCGContext *, TCGv_ptr, TCGv_i32)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 src; @@ -2073,12 +2020,12 @@ static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, gen(tcg_ctx, tcg_ctx->cpu_env, src); - gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, - void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i64)) + void (*gen)(TCGContext *, TCGv_ptr, TCGv_i64)) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i64 src; @@ -2087,282 +2034,913 @@ static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, gen(tcg_ctx, tcg_ctx->cpu_env, src); - gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_op_store_QT0_fpr(tcg_ctx, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); } +static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, + TCGv addr, int mmu_idx, MemOp memop) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_address_mask(dc, addr); + tcg_gen_atomic_xchg_tl(tcg_ctx, dst, addr, src, mmu_idx, memop); +} + +static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv m1 = tcg_const_tl(tcg_ctx, 0xff); + gen_address_mask(dc, addr); + tcg_gen_atomic_xchg_tl(tcg_ctx, dst, addr, m1, mmu_idx, MO_UB); + tcg_temp_free(tcg_ctx, m1); +} + /* asi moves */ -#ifdef TARGET_SPARC64 -static inline TCGv_i32 gen_get_asi(DisasContext *dc, int insn, TCGv r_addr) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; +typedef enum { + GET_ASI_HELPER, + GET_ASI_EXCP, + GET_ASI_DIRECT, + GET_ASI_DTWINX, + GET_ASI_BLOCK, + GET_ASI_SHORT, + GET_ASI_BCOPY, + GET_ASI_BFILL, +} ASIType; + +typedef struct { + ASIType type; int asi; - TCGv_i32 r_asi; + int mem_idx; + MemOp memop; +} DisasASI; +static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) +{ + int asi = GET_FIELD(insn, 19, 26); + ASIType type = GET_ASI_HELPER; + int mem_idx = dc->mem_idx; + +#ifndef TARGET_SPARC64 + /* Before v9, all asis are immediate and privileged. */ if (IS_IMM) { - r_asi = tcg_temp_new_i32(tcg_ctx); - tcg_gen_mov_i32(tcg_ctx, r_asi, tcg_ctx->cpu_asi); + gen_exception(dc, TT_ILL_INSN); + type = GET_ASI_EXCP; + } else if (supervisor(dc) + /* Note that LEON accepts ASI_USERDATA in user mode, for + use with CASA. Also note that previous versions of + QEMU allowed (and old versions of gcc emitted) ASI_P + for LEON, which is incorrect. */ + || (asi == ASI_USERDATA + && (dc->def->features & CPU_FEATURE_CASA))) { + switch (asi) { + case ASI_USERDATA: /* User data access */ + mem_idx = MMU_USER_IDX; + type = GET_ASI_DIRECT; + break; + case ASI_KERNELDATA: /* Supervisor data access */ + mem_idx = MMU_KERNEL_IDX; + type = GET_ASI_DIRECT; + break; + case ASI_M_BYPASS: /* MMU passthrough */ + case ASI_LEON_BYPASS: /* LEON MMU passthrough */ + mem_idx = MMU_PHYS_IDX; + type = GET_ASI_DIRECT; + break; + case ASI_M_BCOPY: /* Block copy, sta access */ + mem_idx = MMU_KERNEL_IDX; + type = GET_ASI_BCOPY; + break; + case ASI_M_BFILL: /* Block fill, stda access */ + mem_idx = MMU_KERNEL_IDX; + type = GET_ASI_BFILL; + break; + } + + /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the + * permissions check in get_physical_address(..). + */ + mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx; } else { - asi = GET_FIELD(insn, 19, 26); - r_asi = tcg_const_i32(tcg_ctx, asi); + gen_exception(dc, TT_PRIV_INSN); + type = GET_ASI_EXCP; } - return r_asi; +#else + if (IS_IMM) { + asi = dc->asi; + } + /* With v9, all asis below 0x80 are privileged. */ + /* ??? We ought to check cpu_has_hypervisor, but we didn't copy + down that bit into DisasContext. For the moment that's ok, + since the direct implementations below doesn't have any ASIs + in the restricted [0x30, 0x7f] range, and the check will be + done properly in the helper. */ + if (!supervisor(dc) && asi < 0x80) { + gen_exception(dc, TT_PRIV_ACT); + type = GET_ASI_EXCP; + } else { + switch (asi) { + case ASI_REAL: /* Bypass */ + case ASI_REAL_IO: /* Bypass, non-cacheable */ + case ASI_REAL_L: /* Bypass LE */ + case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ + case ASI_TWINX_REAL: /* Real address, twinx */ + case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ + case ASI_QUAD_LDD_PHYS: + case ASI_QUAD_LDD_PHYS_L: + mem_idx = MMU_PHYS_IDX; + break; + case ASI_N: /* Nucleus */ + case ASI_NL: /* Nucleus LE */ + case ASI_TWINX_N: + case ASI_TWINX_NL: + case ASI_NUCLEUS_QUAD_LDD: + case ASI_NUCLEUS_QUAD_LDD_L: + if (hypervisor(dc)) { + mem_idx = MMU_PHYS_IDX; + } else { + mem_idx = MMU_NUCLEUS_IDX; + } + break; + case ASI_AIUP: /* As if user primary */ + case ASI_AIUPL: /* As if user primary LE */ + case ASI_TWINX_AIUP: + case ASI_TWINX_AIUP_L: + case ASI_BLK_AIUP_4V: + case ASI_BLK_AIUP_L_4V: + case ASI_BLK_AIUP: + case ASI_BLK_AIUPL: + mem_idx = MMU_USER_IDX; + break; + case ASI_AIUS: /* As if user secondary */ + case ASI_AIUSL: /* As if user secondary LE */ + case ASI_TWINX_AIUS: + case ASI_TWINX_AIUS_L: + case ASI_BLK_AIUS_4V: + case ASI_BLK_AIUS_L_4V: + case ASI_BLK_AIUS: + case ASI_BLK_AIUSL: + mem_idx = MMU_USER_SECONDARY_IDX; + break; + case ASI_S: /* Secondary */ + case ASI_SL: /* Secondary LE */ + case ASI_TWINX_S: + case ASI_TWINX_SL: + case ASI_BLK_COMMIT_S: + case ASI_BLK_S: + case ASI_BLK_SL: + case ASI_FL8_S: + case ASI_FL8_SL: + case ASI_FL16_S: + case ASI_FL16_SL: + if (mem_idx == MMU_USER_IDX) { + mem_idx = MMU_USER_SECONDARY_IDX; + } else if (mem_idx == MMU_KERNEL_IDX) { + mem_idx = MMU_KERNEL_SECONDARY_IDX; + } + break; + case ASI_P: /* Primary */ + case ASI_PL: /* Primary LE */ + case ASI_TWINX_P: + case ASI_TWINX_PL: + case ASI_BLK_COMMIT_P: + case ASI_BLK_P: + case ASI_BLK_PL: + case ASI_FL8_P: + case ASI_FL8_PL: + case ASI_FL16_P: + case ASI_FL16_PL: + break; + } + switch (asi) { + case ASI_REAL: + case ASI_REAL_IO: + case ASI_REAL_L: + case ASI_REAL_IO_L: + case ASI_N: + case ASI_NL: + case ASI_AIUP: + case ASI_AIUPL: + case ASI_AIUS: + case ASI_AIUSL: + case ASI_S: + case ASI_SL: + case ASI_P: + case ASI_PL: + type = GET_ASI_DIRECT; + break; + case ASI_TWINX_REAL: + case ASI_TWINX_REAL_L: + case ASI_TWINX_N: + case ASI_TWINX_NL: + case ASI_TWINX_AIUP: + case ASI_TWINX_AIUP_L: + case ASI_TWINX_AIUS: + case ASI_TWINX_AIUS_L: + case ASI_TWINX_P: + case ASI_TWINX_PL: + case ASI_TWINX_S: + case ASI_TWINX_SL: + case ASI_QUAD_LDD_PHYS: + case ASI_QUAD_LDD_PHYS_L: + case ASI_NUCLEUS_QUAD_LDD: + case ASI_NUCLEUS_QUAD_LDD_L: + type = GET_ASI_DTWINX; + break; + case ASI_BLK_COMMIT_P: + case ASI_BLK_COMMIT_S: + case ASI_BLK_AIUP_4V: + case ASI_BLK_AIUP_L_4V: + case ASI_BLK_AIUP: + case ASI_BLK_AIUPL: + case ASI_BLK_AIUS_4V: + case ASI_BLK_AIUS_L_4V: + case ASI_BLK_AIUS: + case ASI_BLK_AIUSL: + case ASI_BLK_S: + case ASI_BLK_SL: + case ASI_BLK_P: + case ASI_BLK_PL: + type = GET_ASI_BLOCK; + break; + case ASI_FL8_S: + case ASI_FL8_SL: + case ASI_FL8_P: + case ASI_FL8_PL: + memop = MO_UB; + type = GET_ASI_SHORT; + break; + case ASI_FL16_S: + case ASI_FL16_SL: + case ASI_FL16_P: + case ASI_FL16_PL: + memop = MO_TEUW; + type = GET_ASI_SHORT; + break; + } + /* The little-endian asis all have bit 3 set. */ + if (asi & 8) { + memop ^= MO_BSWAP; + } + } +#endif + + return (DisasASI){ type, asi, mem_idx, memop }; } -static inline void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, int size, - int sign) +static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, + int insn, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size, r_sign; + DisasASI da = get_asi(dc, insn, memop); - r_asi = gen_get_asi(dc, insn, addr); - r_size = tcg_const_i32(tcg_ctx, size); - r_sign = tcg_const_i32(tcg_ctx, sign); - gen_helper_ld_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); - tcg_temp_free_i32(tcg_ctx, r_sign); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); + switch (da.type) { + case GET_ASI_EXCP: + break; + case GET_ASI_DTWINX: /* Reserved for ldda. */ + gen_exception(dc, TT_ILL_INSN); + break; + case GET_ASI_DIRECT: + gen_address_mask(dc, addr); + tcg_gen_qemu_ld_tl(tcg_ctx, dst, addr, da.mem_idx, da.memop); + break; + default: + { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, memop); + + save_state(dc); +#ifdef TARGET_SPARC64 + gen_helper_ld_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, r_asi, r_mop); +#else + { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_mop); + tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); + tcg_temp_free_i64(tcg_ctx, t64); + } +#endif + tcg_temp_free_i32(tcg_ctx, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + } + break; + } } -static inline void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, int size) +static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, + int insn, MemOp memop) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size; + DisasASI da = get_asi(dc, insn, memop); - r_asi = gen_get_asi(dc, insn, addr); - r_size = tcg_const_i32(tcg_ctx, size); - gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_size); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); + switch (da.type) { + case GET_ASI_EXCP: + break; + case GET_ASI_DTWINX: /* Reserved for stda. */ +#ifndef TARGET_SPARC64 + gen_exception(dc, TT_ILL_INSN); + break; +#else + if (!(dc->def->features & CPU_FEATURE_HYPV)) { + /* Pre OpenSPARC CPUs don't have these */ + gen_exception(dc, TT_ILL_INSN); + return; + } + /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions + * are ST_BLKINIT_ ASIs */ + /* fall through */ +#endif + case GET_ASI_DIRECT: + gen_address_mask(dc, addr); + tcg_gen_qemu_st_tl(tcg_ctx, src, addr, da.mem_idx, da.memop); + break; +#if !defined(TARGET_SPARC64) + case GET_ASI_BCOPY: + /* Copy 32 bytes from the address in SRC to ADDR. */ + /* ??? The original qemu code suggests 4-byte alignment, dropping + the low bits, but the only place I can see this used is in the + Linux kernel with 32 byte alignment, which would make more sense + as a cacheline-style operation. */ + { + TCGv saddr = tcg_temp_new(tcg_ctx); + TCGv daddr = tcg_temp_new(tcg_ctx); + TCGv four = tcg_const_tl(tcg_ctx, 4); + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + int i; + + tcg_gen_andi_tl(tcg_ctx, saddr, src, -4); + tcg_gen_andi_tl(tcg_ctx, daddr, addr, -4); + for (i = 0; i < 32; i += 4) { + /* Since the loads and stores are paired, allow the + copy to happen in the host endianness. */ + tcg_gen_qemu_ld_i32(tcg_ctx, tmp, saddr, da.mem_idx, MO_UL); + tcg_gen_qemu_st_i32(tcg_ctx, tmp, daddr, da.mem_idx, MO_UL); + tcg_gen_add_tl(tcg_ctx, saddr, saddr, four); + tcg_gen_add_tl(tcg_ctx, daddr, daddr, four); + } + + tcg_temp_free(tcg_ctx, saddr); + tcg_temp_free(tcg_ctx, daddr); + tcg_temp_free(tcg_ctx, four); + tcg_temp_free_i32(tcg_ctx, tmp); + } + break; +#endif + default: + { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, memop & MO_SIZE); + + save_state(dc); +#ifdef TARGET_SPARC64 + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_mop); +#else + { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, t64, src); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_mop); + tcg_temp_free_i64(tcg_ctx, t64); + } +#endif + tcg_temp_free_i32(tcg_ctx, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + + /* A write to a TLB register may alter page maps. End the TB. */ + dc->npc = DYNAMIC_PC; + } + break; + } } -static inline void gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) +static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, + TCGv addr, int insn) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size, r_rd; + DisasASI da = get_asi(dc, insn, MO_TEUL); - r_asi = gen_get_asi(dc, insn, addr); - r_size = tcg_const_i32(tcg_ctx, size); - r_rd = tcg_const_i32(tcg_ctx, rd); - gen_helper_ldf_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_size, r_rd); - tcg_temp_free_i32(tcg_ctx, r_rd); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); + switch (da.type) { + case GET_ASI_EXCP: + break; + case GET_ASI_DIRECT: + gen_swap(dc, dst, src, addr, da.mem_idx, da.memop); + break; + default: + /* ??? Should be DAE_invalid_asi. */ + gen_exception(dc, TT_DATA_ACCESS); + break; + } } -static inline void gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) +static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, + int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size, r_rd; + DisasASI da = get_asi(dc, insn, MO_TEUL); + TCGv oldv; - r_asi = gen_get_asi(dc, insn, addr); - r_size = tcg_const_i32(tcg_ctx, size); - r_rd = tcg_const_i32(tcg_ctx, rd); - gen_helper_stf_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_size, r_rd); - tcg_temp_free_i32(tcg_ctx, r_rd); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); + switch (da.type) { + case GET_ASI_EXCP: + return; + case GET_ASI_DIRECT: + oldv = tcg_temp_new(tcg_ctx); + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, addr, cmpv, gen_load_gpr(dc, rd), + da.mem_idx, da.memop); + gen_store_gpr(dc, rd, oldv); + tcg_temp_free(tcg_ctx, oldv); + break; + default: + /* ??? Should be DAE_invalid_asi. */ + gen_exception(dc, TT_DATA_ACCESS); + break; + } } -static inline void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn) +static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size, r_sign; - TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + DisasASI da = get_asi(dc, insn, MO_UB); - r_asi = gen_get_asi(dc, insn, addr); - r_size = tcg_const_i32(tcg_ctx, 4); - r_sign = tcg_const_i32(tcg_ctx, 0); - gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); - tcg_temp_free_i32(tcg_ctx, r_sign); - gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_size); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); - tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); - tcg_temp_free_i64(tcg_ctx, t64); + switch (da.type) { + case GET_ASI_EXCP: + break; + case GET_ASI_DIRECT: + gen_ldstub(dc, dst, addr, da.mem_idx); + break; + default: + /* ??? In theory, this should be raise DAE_invalid_asi. + But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */ + if (tb_cflags(dc->base.tb) & CF_PARALLEL) { + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + } else { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, MO_UB); + TCGv_i64 s64, t64; + + save_state(dc); + t64 = tcg_temp_new_i64(tcg_ctx); + gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_mop); + + s64 = tcg_const_i64(tcg_ctx, 0xff); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, s64, r_asi, r_mop); + tcg_temp_free_i64(tcg_ctx, s64); + tcg_temp_free_i32(tcg_ctx, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + + tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); + tcg_temp_free_i64(tcg_ctx, t64); + + /* End the TB. */ + dc->npc = DYNAMIC_PC; + } + break; + } } -static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) +#ifdef TARGET_SPARC64 +static void gen_ldf_asi(DisasContext *dc, TCGv addr, + int insn, int size, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_rd; + DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ)); + TCGv_i32 d32; + TCGv_i64 d64; - r_asi = gen_get_asi(dc, insn, addr); - r_rd = tcg_const_i32(tcg_ctx, rd); - gen_helper_ldda_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_rd); - tcg_temp_free_i32(tcg_ctx, r_rd); - tcg_temp_free_i32(tcg_ctx, r_asi); + switch (da.type) { + case GET_ASI_EXCP: + break; + + case GET_ASI_DIRECT: + gen_address_mask(dc, addr); + switch (size) { + case 4: + d32 = gen_dest_fpr_F(dc); + tcg_gen_qemu_ld_i32(tcg_ctx, d32, addr, da.mem_idx, da.memop); + gen_store_fpr_F(dc, rd, d32); + break; + case 8: + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, + da.memop | MO_ALIGN_4); + break; + case 16: + d64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, d64, addr, da.mem_idx, da.memop | MO_ALIGN_4); + tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd/2+1], addr, da.mem_idx, + da.memop | MO_ALIGN_4); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], d64); + tcg_temp_free_i64(tcg_ctx, d64); + break; + default: + g_assert_not_reached(); + } + break; + + case GET_ASI_BLOCK: + /* Valid for lddfa on aligned registers only. */ + if (size == 8 && (rd & 7) == 0) { + MemOp memop; + TCGv eight; + int i; + + gen_address_mask(dc, addr); + + /* The first operation checks required alignment. */ + memop = da.memop | MO_ALIGN_64; + eight = tcg_const_tl(tcg_ctx, 8); + for (i = 0; ; ++i) { + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2 + i], addr, + da.mem_idx, memop); + if (i == 7) { + break; + } + tcg_gen_add_tl(tcg_ctx, addr, addr, eight); + memop = da.memop; + } + tcg_temp_free(tcg_ctx, eight); + } else { + gen_exception(dc, TT_ILL_INSN); + } + break; + + case GET_ASI_SHORT: + /* Valid for lddfa only. */ + if (size == 8) { + gen_address_mask(dc, addr); + tcg_gen_qemu_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); + } else { + gen_exception(dc, TT_ILL_INSN); + } + break; + + default: + { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, da.memop); + + save_state(dc); + /* According to the table in the UA2011 manual, the only + other asis that are valid for ldfa/lddfa/ldqfa are + the NO_FAULT asis. We still need a helper for these, + but we can just use the integer asi helper for them. */ + switch (size) { + case 4: + d64 = tcg_temp_new_i64(tcg_ctx); + gen_helper_ld_asi(tcg_ctx, d64, tcg_ctx->cpu_env, addr, r_asi, r_mop); + d32 = gen_dest_fpr_F(dc); + tcg_gen_extrl_i64_i32(tcg_ctx, d32, d64); + tcg_temp_free_i64(tcg_ctx, d64); + gen_store_fpr_F(dc, rd, d32); + break; + case 8: + gen_helper_ld_asi(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], tcg_ctx->cpu_env, addr, r_asi, r_mop); + break; + case 16: + d64 = tcg_temp_new_i64(tcg_ctx); + gen_helper_ld_asi(tcg_ctx, d64, tcg_ctx->cpu_env, addr, r_asi, r_mop); + tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); + gen_helper_ld_asi(tcg_ctx, tcg_ctx->cpu_fpr[rd/2+1], tcg_ctx->cpu_env, addr, r_asi, r_mop); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], d64); + tcg_temp_free_i64(tcg_ctx, d64); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_i32(tcg_ctx, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + } + break; + } } -static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) +static void gen_stf_asi(DisasContext *dc, TCGv addr, + int insn, int size, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size; + DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ)); + TCGv_i32 d32; + + switch (da.type) { + case GET_ASI_EXCP: + break; + + case GET_ASI_DIRECT: + gen_address_mask(dc, addr); + switch (size) { + case 4: + d32 = gen_load_fpr_F(dc, rd); + tcg_gen_qemu_st_i32(tcg_ctx, d32, addr, da.mem_idx, da.memop); + break; + case 8: + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, + da.memop | MO_ALIGN_4); + break; + case 16: + /* Only 4-byte alignment required. However, it is legal for the + cpu to signal the alignment fault, and the OS trap handler is + required to fix it up. Requiring 16-byte alignment here avoids + having to probe the second page before performing the first + write. */ + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, + da.memop | MO_ALIGN_16); + tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop); + break; + default: + g_assert_not_reached(); + } + break; + + case GET_ASI_BLOCK: + /* Valid for stdfa on aligned registers only. */ + if (size == 8 && (rd & 7) == 0) { + MemOp memop; + TCGv eight; + int i; + + gen_address_mask(dc, addr); + + /* The first operation checks required alignment. */ + memop = da.memop | MO_ALIGN_64; + eight = tcg_const_tl(tcg_ctx, 8); + for (i = 0; ; ++i) { + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2 + i], addr, + da.mem_idx, memop); + if (i == 7) { + break; + } + tcg_gen_add_tl(tcg_ctx, addr, addr, eight); + memop = da.memop; + } + tcg_temp_free(tcg_ctx, eight); + } else { + gen_exception(dc, TT_ILL_INSN); + } + break; + + case GET_ASI_SHORT: + /* Valid for stdfa only. */ + if (size == 8) { + gen_address_mask(dc, addr); + tcg_gen_qemu_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], addr, da.mem_idx, da.memop); + } else { + gen_exception(dc, TT_ILL_INSN); + } + break; + + default: + /* According to the table in the UA2011 manual, the only + other asis that are valid for ldfa/lddfa/ldqfa are + the PST* asis, which aren't currently handled. */ + gen_exception(dc, TT_ILL_INSN); + break; + } +} + +static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + DisasASI da = get_asi(dc, insn, MO_TEQ); + TCGv_i64 hi = gen_dest_gpr(dc, rd); + TCGv_i64 lo = gen_dest_gpr(dc, rd + 1); + + switch (da.type) { + case GET_ASI_EXCP: + return; + + case GET_ASI_DTWINX: + gen_address_mask(dc, addr); + tcg_gen_qemu_ld_i64(tcg_ctx, hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); + tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, lo, addr, da.mem_idx, da.memop); + break; + + case GET_ASI_DIRECT: + { + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + gen_address_mask(dc, addr); + tcg_gen_qemu_ld_i64(tcg_ctx, tmp, addr, da.mem_idx, da.memop); + + /* Note that LE ldda acts as if each 32-bit register + result is byte swapped. Having just performed one + 64-bit bswap, we need now to swap the writebacks. */ + if ((da.memop & MO_BSWAP) == MO_TE) { + tcg_gen_extr32_i64(tcg_ctx, lo, hi, tmp); + } else { + tcg_gen_extr32_i64(tcg_ctx, hi, lo, tmp); + } + tcg_temp_free_i64(tcg_ctx, tmp); + } + break; + + default: + /* ??? In theory we've handled all of the ASIs that are valid + for ldda, and this should raise DAE_invalid_asi. However, + real hardware allows others. This can be seen with e.g. + FreeBSD 10.3 wrt ASI_IC_TAG. */ + { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, da.memop); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + save_state(dc); + gen_helper_ld_asi(tcg_ctx, tmp, tcg_ctx->cpu_env, addr, r_asi, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_temp_free_i32(tcg_ctx, r_mop); + + /* See above. */ + if ((da.memop & MO_BSWAP) == MO_TE) { + tcg_gen_extr32_i64(tcg_ctx, lo, hi, tmp); + } else { + tcg_gen_extr32_i64(tcg_ctx, hi, lo, tmp); + } + tcg_temp_free_i64(tcg_ctx, tmp); + } + break; + } + + gen_store_gpr(dc, rd, hi); + gen_store_gpr(dc, rd + 1, lo); +} + +static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, + int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + DisasASI da = get_asi(dc, insn, MO_TEQ); TCGv lo = gen_load_gpr(dc, rd + 1); + + switch (da.type) { + case GET_ASI_EXCP: + break; + + case GET_ASI_DTWINX: + gen_address_mask(dc, addr); + tcg_gen_qemu_st_i64(tcg_ctx, hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); + tcg_gen_addi_tl(tcg_ctx, addr, addr, 8); + tcg_gen_qemu_st_i64(tcg_ctx, lo, addr, da.mem_idx, da.memop); + break; + + case GET_ASI_DIRECT: + { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + /* Note that LE stda acts as if each 32-bit register result is + byte swapped. We will perform one 64-bit LE store, so now + we must swap the order of the construction. */ + if ((da.memop & MO_BSWAP) == MO_TE) { + tcg_gen_concat32_i64(tcg_ctx, t64, lo, hi); + } else { + tcg_gen_concat32_i64(tcg_ctx, t64, hi, lo); + } + gen_address_mask(dc, addr); + tcg_gen_qemu_st_i64(tcg_ctx, t64, addr, da.mem_idx, da.memop); + tcg_temp_free_i64(tcg_ctx, t64); + } + break; + + default: + /* ??? In theory we've handled all of the ASIs that are valid + for stda, and this should raise DAE_invalid_asi. */ + { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, da.memop); + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + /* See above. */ + if ((da.memop & MO_BSWAP) == MO_TE) { + tcg_gen_concat32_i64(tcg_ctx, t64, lo, hi); + } else { + tcg_gen_concat32_i64(tcg_ctx, t64, hi, lo); + } + + save_state(dc); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_mop); + tcg_temp_free_i32(tcg_ctx, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_temp_free_i64(tcg_ctx, t64); + } + break; + } +} + +static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, + int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + DisasASI da = get_asi(dc, insn, MO_TEQ); + TCGv oldv; + + switch (da.type) { + case GET_ASI_EXCP: + return; + case GET_ASI_DIRECT: + oldv = tcg_temp_new(tcg_ctx); + tcg_gen_atomic_cmpxchg_tl(tcg_ctx, oldv, addr, cmpv, gen_load_gpr(dc, rd), + da.mem_idx, da.memop); + gen_store_gpr(dc, rd, oldv); + tcg_temp_free(tcg_ctx, oldv); + break; + default: + /* ??? Should be DAE_invalid_asi. */ + gen_exception(dc, TT_DATA_ACCESS); + break; + } +} + +#else +static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12, + whereby "rd + 1" elicits "error: array subscript is above array". + Since we have already asserted that rd is even, the semantics + are unchanged. */ + TCGv lo = gen_dest_gpr(dc, rd | 1); + TCGv hi = gen_dest_gpr(dc, rd); TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + DisasASI da = get_asi(dc, insn, MO_TEQ); - tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, hi); - r_asi = gen_get_asi(dc, insn, addr); - r_size = tcg_const_i32(tcg_ctx, 8); - gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); - tcg_temp_free_i64(tcg_ctx, t64); -} - -static inline void gen_casx_asi(DisasContext *dc, TCGv addr, - TCGv val2, int insn, int rd) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv val1 = gen_load_gpr(dc, rd); - TCGv dst = gen_dest_gpr(dc, rd); - TCGv_i32 r_asi = gen_get_asi(dc, insn, addr); - - gen_helper_casx_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, val1, val2, r_asi); - tcg_temp_free_i32(tcg_ctx, r_asi); - gen_store_gpr(dc, rd, dst); -} - -#elif !defined(CONFIG_USER_ONLY) - -static inline void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, int size, - int sign) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size, r_sign; - TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); - - r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); - r_size = tcg_const_i32(tcg_ctx, size); - r_sign = tcg_const_i32(tcg_ctx, sign); - gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); - tcg_temp_free_i32(tcg_ctx, r_sign); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); - tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); - tcg_temp_free_i64(tcg_ctx, t64); -} - -static inline void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, int size) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size; - TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); - - tcg_gen_extu_tl_i64(tcg_ctx, t64, src); - r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); - r_size = tcg_const_i32(tcg_ctx, size); - gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); - tcg_temp_free_i64(tcg_ctx, t64); -} - -static inline void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size, r_sign; - TCGv_i64 r_val, t64; - - r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); - r_size = tcg_const_i32(tcg_ctx, 4); - r_sign = tcg_const_i32(tcg_ctx, 0); - t64 = tcg_temp_new_i64(tcg_ctx); - gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); - tcg_temp_free(tcg_ctx, r_sign); - r_val = tcg_temp_new_i64(tcg_ctx); - tcg_gen_extu_tl_i64(tcg_ctx, r_val, src); - gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_val, r_asi, r_size); - tcg_temp_free_i64(tcg_ctx, r_val); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); - tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); - tcg_temp_free_i64(tcg_ctx, t64); -} - -static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size, r_sign; - TCGv t; - TCGv_i64 t64; - - r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); - r_size = tcg_const_i32(tcg_ctx, 8); - r_sign = tcg_const_i32(tcg_ctx, 0); - t64 = tcg_temp_new_i64(tcg_ctx); - gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); - tcg_temp_free_i32(tcg_ctx, r_sign); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); - - t = gen_dest_gpr(dc, rd + 1); - tcg_gen_trunc_i64_tl(tcg_ctx, t, t64); - gen_store_gpr(dc, rd + 1, t); - - tcg_gen_shri_i64(tcg_ctx, t64, t64, 32); - tcg_gen_trunc_i64_tl(tcg_ctx, hi, t64); + switch (da.type) { + case GET_ASI_EXCP: + tcg_temp_free_i64(tcg_ctx, t64); + return; + case GET_ASI_DIRECT: + gen_address_mask(dc, addr); + tcg_gen_qemu_ld_i64(tcg_ctx, t64, addr, da.mem_idx, da.memop); + break; + default: + { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, MO_Q); + + save_state(dc); + gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_mop); + tcg_temp_free_i32(tcg_ctx, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + } + break; + } + + tcg_gen_extr_i64_i32(tcg_ctx, lo, hi, t64); tcg_temp_free_i64(tcg_ctx, t64); + gen_store_gpr(dc, rd | 1, lo); gen_store_gpr(dc, rd, hi); } -static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, - int insn, int rd) +static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, + int insn, int rd) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i32 r_asi, r_size; + DisasASI da = get_asi(dc, insn, MO_TEQ); TCGv lo = gen_load_gpr(dc, rd + 1); TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, hi); - r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); - r_size = tcg_const_i32(tcg_ctx, 8); - gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); + + switch (da.type) { + case GET_ASI_EXCP: + break; + case GET_ASI_DIRECT: + gen_address_mask(dc, addr); + tcg_gen_qemu_st_i64(tcg_ctx, t64, addr, da.mem_idx, da.memop); + break; + case GET_ASI_BFILL: + /* Store 32 bytes of T64 to ADDR. */ + /* ??? The original qemu code suggests 8-byte alignment, dropping + the low bits, but the only place I can see this used is in the + Linux kernel with 32 byte alignment, which would make more sense + as a cacheline-style operation. */ + { + TCGv d_addr = tcg_temp_new(tcg_ctx); + TCGv eight = tcg_const_tl(tcg_ctx, 8); + int i; + + tcg_gen_andi_tl(tcg_ctx, d_addr, addr, -8); + for (i = 0; i < 32; i += 8) { + tcg_gen_qemu_st_i64(tcg_ctx, t64, d_addr, da.mem_idx, da.memop); + tcg_gen_add_tl(tcg_ctx, d_addr, d_addr, eight); + } + + tcg_temp_free(tcg_ctx, d_addr); + tcg_temp_free(tcg_ctx, eight); + } + break; + default: + { + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, da.asi); + TCGv_i32 r_mop = tcg_const_i32(tcg_ctx, MO_Q); + + save_state(dc); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_mop); + tcg_temp_free_i32(tcg_ctx, r_mop); + tcg_temp_free_i32(tcg_ctx, r_asi); + } + break; + } + tcg_temp_free_i64(tcg_ctx, t64); } #endif -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) -static inline void gen_cas_asi(DisasContext *dc, TCGv addr, - TCGv val2, int insn, int rd) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv val1 = gen_load_gpr(dc, rd); - TCGv dst = gen_dest_gpr(dc, rd); -#ifdef TARGET_SPARC64 - TCGv_i32 r_asi = gen_get_asi(dc, insn, addr); -#else - TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); -#endif - - gen_helper_cas_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, val1, val2, r_asi); - tcg_temp_free_i32(tcg_ctx, r_asi); - gen_store_gpr(dc, rd, dst); -} - -static inline void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) -{ - TCGContext *tcg_ctx = dc->uc->tcg_ctx; - TCGv_i64 r_val; - TCGv_i32 r_asi, r_size; - - gen_ld_asi(dc, dst, addr, insn, 1, 0); - - r_val = tcg_const_i64(tcg_ctx, 0xffULL); - r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); - r_size = tcg_const_i32(tcg_ctx, 1); - gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_val, r_asi, r_size); - tcg_temp_free_i32(tcg_ctx, r_size); - tcg_temp_free_i32(tcg_ctx, r_asi); - tcg_temp_free_i64(tcg_ctx, r_val); -} -#endif - static TCGv get_src1(DisasContext *dc, unsigned int insn) { unsigned int rs1 = GET_FIELD(insn, 13, 17); @@ -2394,11 +2972,11 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) the later. */ c32 = tcg_temp_new_i32(tcg_ctx); if (cmp->is_bool) { - tcg_gen_trunc_i64_i32(tcg_ctx, c32, cmp->c1); + tcg_gen_extrl_i64_i32(tcg_ctx, c32, cmp->c1); } else { TCGv_i64 c64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, cmp->cond, c64, cmp->c1, cmp->c2); - tcg_gen_trunc_i64_i32(tcg_ctx, c32, c64); + tcg_gen_extrl_i64_i32(tcg_ctx, c32, c64); tcg_temp_free_i64(tcg_ctx, c64); } @@ -2438,9 +3016,8 @@ static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) gen_update_fprs_dirty(dc, qd); } -static inline void gen_load_trap_state_at_tl(DisasContext *dc, TCGv_ptr r_tsptr, TCGv_ptr cpu_env) +static inline void gen_load_trap_state_at_tl(TCGContext *tcg_ctx, TCGv_ptr r_tsptr, TCGv_env cpu_env) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv_i32 r_tl = tcg_temp_new_i32(tcg_ctx); /* load env->tl into r_tl */ @@ -2473,9 +3050,9 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, int shift, imask, omask; if (cc) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, s1); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, s2); - tcg_gen_sub_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, s1, s2); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src, s1); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_src2, s2); + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, s1, s2); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); dc->cc_op = CC_OP_SUB; } @@ -2568,9 +3145,8 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, tcg_temp_free(tcg_ctx, t2); } -static void gen_alignaddr(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, bool left) +static void gen_alignaddr(TCGContext *tcg_ctx, TCGv dst, TCGv s1, TCGv s2, bool left) { - TCGContext *tcg_ctx = dc->uc->tcg_ctx; TCGv tmp = tcg_temp_new(tcg_ctx); tcg_gen_add_tl(tcg_ctx, tmp, s1, s2); @@ -2578,7 +3154,7 @@ static void gen_alignaddr(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, bool lef if (left) { tcg_gen_neg_tl(tcg_ctx, tmp, tmp); } - tcg_gen_deposit_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, *(TCGv *)tcg_ctx->cpu_gsr, tmp, 0, 3); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gsr, tcg_ctx->cpu_gsr, tmp, 0, 3); tcg_temp_free(tcg_ctx, tmp); } @@ -2617,7 +3193,7 @@ static void gen_faligndata(TCGContext *tcg_ctx, TCGv dst, TCGv gsr, TCGv s1, TCG goto nfpu_insn; /* before an instruction, dc->pc must be static */ -static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_insn) +static void disas_sparc_insn(DisasContext * dc, unsigned int insn) { TCGContext *tcg_ctx = dc->uc->tcg_ctx; unsigned int opc, rs1, rs2, rd; @@ -2626,17 +3202,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64; target_long simm; - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { - tcg_gen_debug_insn_start(tcg_ctx, dc->pc); - } - - // Unicorn: trace this instruction on request - if (hook_insn && HOOK_EXISTS_BOUNDED(dc->uc, UC_HOOK_CODE, dc->pc)) { - gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, dc->uc, dc->pc); - // the callback might want to stop emulation immediately - check_exit_request(tcg_ctx); - } - opc = GET_FIELD(insn, 0, 1); rd = GET_FIELD(insn, 2, 6); @@ -2668,7 +3233,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins target = GET_FIELD_SP(insn, 0, 13) | (GET_FIELD_SP(insn, 20, 21) << 14); target = sign_extend(target, 16); - target = (int32_t)((uint32_t)target << 2); + target <<= 2; cpu_src1 = get_src1(dc, insn); do_branch_reg(dc, target, insn, cpu_src1); goto jmp_insn; @@ -2681,7 +3246,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins } target = GET_FIELD_SP(insn, 0, 18); target = sign_extend(target, 19); - target = (int32_t)((uint32_t)target << 2); + target <<= 2; do_fbranch(dc, target, insn, cc); goto jmp_insn; } @@ -2695,7 +3260,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins { target = GET_FIELD(insn, 10, 31); target = sign_extend(target, 22); - target = (int32_t)((uint32_t)target << 2); + target <<= 2; do_branch(dc, target, insn, 0); goto jmp_insn; } @@ -2706,7 +3271,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins } target = GET_FIELD(insn, 10, 31); target = sign_extend(target, 22); - target = (int32_t)((uint32_t)target << 2); + target <<= 2; do_fbranch(dc, target, insn, 0); goto jmp_insn; } @@ -2728,7 +3293,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins break; case 1: /*CALL*/ { - target_long target = (int)(((unsigned int)(GET_FIELDs(insn, 2, 31))) << 2); + target_long target = GET_FIELDs(insn, 2, 31) << 2; TCGv o7 = gen_dest_gpr(dc, 15); tcg_gen_movi_tl(tcg_ctx, o7, dc->pc); @@ -2752,7 +3317,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (xop == 0x3a) { /* generate trap */ int cond = GET_FIELD(insn, 3, 6); TCGv_i32 trap; - int l1 = -1, mask; + TCGLabel *l1 = NULL; + int mask; if (cond == 0) { /* Trap never. */ @@ -2768,14 +3334,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins /* V9 icc/xcc */ int cc = GET_FIELD_SP(insn, 11, 12); if (cc == 0) { - gen_compare(dc, &cmp, 0, cond); + gen_compare(&cmp, 0, cond, dc); } else if (cc == 2) { - gen_compare(dc, &cmp, 1, cond); + gen_compare(&cmp, 1, cond, dc); } else { goto illegal_insn; } #else - gen_compare(dc, &cmp, 0, cond); + gen_compare(&cmp, 0, cond, dc); #endif l1 = gen_new_label(tcg_ctx); tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(cmp.cond), @@ -2793,7 +3359,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins rs1 = GET_FIELD_SP(insn, 14, 18); if (IS_IMM) { - rs2 = GET_FIELD_SP(insn, 0, 6); + rs2 = GET_FIELD_SP(insn, 0, 7); if (rs1 == 0) { tcg_gen_movi_i32(tcg_ctx, trap, (rs2 & mask) + TT_TRAP); /* Signal that the trap value is fully constant. */ @@ -2821,7 +3387,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (cond == 8) { /* An unconditional trap ends the TB. */ - dc->is_br = 1; + dc->base.is_jmp = DISAS_NORETURN; goto jmp_insn; } else { /* A conditional trap falls through to the next insn. */ @@ -2833,14 +3399,42 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins switch(rs1) { case 0: /* rdy */ #ifndef TARGET_SPARC64 - /* undefined in the SPARCv8 manual, rdy on the microSPARC II */ - case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: - case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: - /* stbar in the SPARCv8 manual, rdy on the microSPARC II */ - case 0x0f: - /* implementation-dependent in the SPARCv8 manual, rdy on the microSPARC II */ - case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: - case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: + case 0x01: /* undefined in the SPARCv8 + manual, rdy on the microSPARC II */ + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + case 0x0e: + + case 0x0f: /* stbar in the SPARCv8 manual, + rdy on the microSPARC II */ + case 0x10: /* implementation-dependent in the + SPARCv8 manual, rdy on the microSPARC II */ + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: + case 0x1e: + case 0x1f: + /* Read Asr17 */ if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) { TCGv t = gen_dest_gpr(dc, rd); @@ -2850,7 +3444,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins break; } #endif - gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_y); + gen_store_gpr(dc, rd, tcg_ctx->cpu_y); break; #ifdef TARGET_SPARC64 case 0x2: /* V9 rdccr */ @@ -2859,19 +3453,29 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins gen_store_gpr(dc, rd, cpu_dst); break; case 0x3: /* V9 rdasi */ - tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_asi); + tcg_gen_movi_tl(tcg_ctx, cpu_dst, dc->asi); gen_store_gpr(dc, rd, cpu_dst); break; case 0x4: /* V9 rdtick */ { TCGv_ptr r_tickptr; + TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(tcg_ctx); + r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); - gen_helper_tick_get_count(tcg_ctx, cpu_dst, r_tickptr); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } + gen_helper_tick_get_count(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, r_tickptr, + r_const); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + tcg_temp_free_i32(tcg_ctx, r_const); gen_store_gpr(dc, rd, cpu_dst); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } } break; case 0x5: /* V9 rdpc */ @@ -2895,29 +3499,51 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_gsr); + gen_store_gpr(dc, rd, tcg_ctx->cpu_gsr); break; case 0x16: /* Softint */ - tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_softint); + tcg_gen_ld32s_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, softint)); gen_store_gpr(dc, rd, cpu_dst); break; case 0x17: /* Tick compare */ - gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_tick_cmpr); + gen_store_gpr(dc, rd, tcg_ctx->cpu_tick_cmpr); break; case 0x18: /* System tick */ { TCGv_ptr r_tickptr; + TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(tcg_ctx); + r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, stick)); - gen_helper_tick_get_count(tcg_ctx, cpu_dst, r_tickptr); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } + gen_helper_tick_get_count(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, r_tickptr, + r_const); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + tcg_temp_free_i32(tcg_ctx, r_const); gen_store_gpr(dc, rd, cpu_dst); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } } break; case 0x19: /* System tick compare */ - gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_stick_cmpr); + gen_store_gpr(dc, rd, tcg_ctx->cpu_stick_cmpr); + break; + case 0x1a: /* UltraSPARC-T1 Strand status */ + /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe + * this ASR as impl. dep + */ + CHECK_IU_FEATURE(dc, HYPV); + { + TCGv t = gen_dest_gpr(dc, rd); + tcg_gen_movi_tl(tcg_ctx, t, 1UL); + gen_store_gpr(dc, rd, t); + } break; case 0x10: /* Performance Control */ case 0x11: /* Performance Instrumentation Counter */ @@ -2928,7 +3554,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins default: goto illegal_insn; } -#if !defined(CONFIG_USER_ONLY) } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */ #ifndef TARGET_SPARC64 if (!supervisor(dc)) { @@ -2943,22 +3568,23 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins rs1 = GET_FIELD(insn, 13, 17); switch (rs1) { case 0: // hpstate - // gen_op_rdhpstate(); + tcg_gen_ld_i64(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, hpstate)); break; case 1: // htstate // gen_op_rdhtstate(); break; case 3: // hintp - tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hintp); + tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_hintp); break; case 5: // htba - tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_htba); + tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_htba); break; case 6: // hver - tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hver); + tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_hver); break; case 31: // hstick_cmpr - tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hstick_cmpr); + tcg_gen_mov_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_hstick_cmpr); break; default: goto illegal_insn; @@ -2979,7 +3605,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); @@ -2990,7 +3616,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); @@ -3001,7 +3627,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); @@ -3011,7 +3637,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins { TCGv_ptr r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); @@ -3020,16 +3646,26 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 4: // tick { TCGv_ptr r_tickptr; + TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(tcg_ctx); + r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); - gen_helper_tick_get_count(tcg_ctx, cpu_tmp0, r_tickptr); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } + gen_helper_tick_get_count(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + r_tickptr, r_const); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + tcg_temp_free_i32(tcg_ctx, r_const); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } } break; case 5: // tba - tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_tbr); + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_tbr); break; case 6: // pstate tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, @@ -3075,40 +3711,38 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; - tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_ssr); + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_ssr); break; case 31: // ver - tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_ver); + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_ver); break; case 15: // fq default: goto illegal_insn; } #else - tcg_gen_ext_i32_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_wim); + tcg_gen_ext_i32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_wim); #endif gen_store_gpr(dc, rd, cpu_tmp0); break; } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ #ifdef TARGET_SPARC64 - save_state(dc); gen_helper_flushw(tcg_ctx, tcg_ctx->cpu_env); #else if (!supervisor(dc)) goto priv_insn; - gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_tbr); + gen_store_gpr(dc, rd, tcg_ctx->cpu_tbr); #endif break; -#endif } else if (xop == 0x34) { /* FPU Operations */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - gen_op_clear_ieee_excp_and_FTT(dc); + gen_op_clear_ieee_excp_and_FTT(tcg_ctx); rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); xop = GET_FIELD(insn, 18, 26); - save_state(dc); + switch (xop) { case 0x1: /* fmovs */ cpu_src1_32 = gen_load_fpr_F(dc, rs2); @@ -3279,11 +3913,10 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - gen_op_clear_ieee_excp_and_FTT(dc); + gen_op_clear_ieee_excp_and_FTT(tcg_ctx); rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); xop = GET_FIELD(insn, 18, 26); - save_state(dc); #ifdef TARGET_SPARC64 #define FMOVR(sz) \ @@ -3291,7 +3924,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins DisasCompare cmp; \ cond = GET_FIELD_SP(insn, 10, 12); \ cpu_src1 = get_src1(dc, insn); \ - gen_compare_reg(dc, &cmp, cond, cpu_src1); \ + gen_compare_reg(tcg_ctx, &cmp, cond, cpu_src1); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ free_compare(tcg_ctx, &cmp); \ } while (0) @@ -3315,7 +3948,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins do { \ DisasCompare cmp; \ cond = GET_FIELD_SP(insn, 14, 17); \ - gen_fcompare(dc, &cmp, fcc, cond); \ + gen_fcompare(tcg_ctx, &cmp, fcc, cond); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ free_compare(tcg_ctx, &cmp); \ } while (0) @@ -3365,7 +3998,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins do { \ DisasCompare cmp; \ cond = GET_FIELD_SP(insn, 14, 17); \ - gen_compare(dc, &cmp, xcc, cond); \ + gen_compare(&cmp, xcc, cond, dc); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ free_compare(tcg_ctx, &cmp); \ } while (0) @@ -3395,34 +4028,34 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x51: /* fcmps, V9 %fcc */ cpu_src1_32 = gen_load_fpr_F(dc, rs1); cpu_src2_32 = gen_load_fpr_F(dc, rs2); - gen_op_fcmps(dc, rd & 3, cpu_src1_32, cpu_src2_32); + gen_op_fcmps(tcg_ctx, rd & 3, cpu_src1_32, cpu_src2_32); break; case 0x52: /* fcmpd, V9 %fcc */ cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_op_fcmpd(dc, rd & 3, cpu_src1_64, cpu_src2_64); + gen_op_fcmpd(tcg_ctx, rd & 3, cpu_src1_64, cpu_src2_64); break; case 0x53: /* fcmpq, V9 %fcc */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(dc, QFPREG(rs1)); - gen_op_load_fpr_QT1(dc, QFPREG(rs2)); - gen_op_fcmpq(dc, rd & 3); + gen_op_load_fpr_QT0(tcg_ctx, QFPREG(rs1)); + gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs2)); + gen_op_fcmpq(tcg_ctx, rd & 3); break; case 0x55: /* fcmpes, V9 %fcc */ cpu_src1_32 = gen_load_fpr_F(dc, rs1); cpu_src2_32 = gen_load_fpr_F(dc, rs2); - gen_op_fcmpes(dc, rd & 3, cpu_src1_32, cpu_src2_32); + gen_op_fcmpes(tcg_ctx, rd & 3, cpu_src1_32, cpu_src2_32); break; case 0x56: /* fcmped, V9 %fcc */ cpu_src1_64 = gen_load_fpr_D(dc, rs1); cpu_src2_64 = gen_load_fpr_D(dc, rs2); - gen_op_fcmped(dc, rd & 3, cpu_src1_64, cpu_src2_64); + gen_op_fcmped(tcg_ctx, rd & 3, cpu_src1_64, cpu_src2_64); break; case 0x57: /* fcmpeq, V9 %fcc */ CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(dc, QFPREG(rs1)); - gen_op_load_fpr_QT1(dc, QFPREG(rs2)); - gen_op_fcmpeq(dc, rd & 3); + gen_op_load_fpr_QT0(tcg_ctx, QFPREG(rs1)); + gen_op_load_fpr_QT1(tcg_ctx, QFPREG(rs2)); + gen_op_fcmpeq(tcg_ctx, rd & 3); break; default: goto illegal_insn; @@ -3542,7 +4175,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins switch (xop & ~0x10) { case 0x0: /* add */ if (xop & 0x10) { - gen_op_add_cc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_op_add_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD); dc->cc_op = CC_OP_ADD; } else { @@ -3552,7 +4185,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x1: /* and */ tcg_gen_and_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } @@ -3560,7 +4193,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x2: /* or */ tcg_gen_or_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } @@ -3568,14 +4201,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x3: /* xor */ tcg_gen_xor_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0x4: /* sub */ if (xop & 0x10) { - gen_op_sub_cc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_op_sub_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); dc->cc_op = CC_OP_SUB; } else { @@ -3585,7 +4218,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x5: /* andn */ tcg_gen_andc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } @@ -3593,7 +4226,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x6: /* orn */ tcg_gen_orc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } @@ -3601,7 +4234,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x7: /* xorn */ tcg_gen_eqv_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } @@ -3617,18 +4250,18 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins #endif case 0xa: /* umul */ CHECK_IU_FEATURE(dc, MUL); - gen_op_umul(dc, cpu_dst, cpu_src1, cpu_src2); + gen_op_umul(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } break; case 0xb: /* smul */ CHECK_IU_FEATURE(dc, MUL); - gen_op_smul(dc, cpu_dst, cpu_src1, cpu_src2); + gen_op_smul(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); if (xop & 0x10) { - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_cc_dst, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); dc->cc_op = CC_OP_LOGIC; } @@ -3673,13 +4306,13 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins cpu_src2 = get_src2(dc, insn); switch (xop) { case 0x20: /* taddcc */ - gen_op_add_cc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_op_add_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TADD); dc->cc_op = CC_OP_TADD; break; case 0x21: /* tsubcc */ - gen_op_sub_cc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_op_sub_cc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TSUB); dc->cc_op = CC_OP_TSUB; @@ -3698,7 +4331,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins break; case 0x24: /* mulscc */ update_psr(dc); - gen_op_mulscc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_op_mulscc(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD); dc->cc_op = CC_OP_ADD; @@ -3744,16 +4377,46 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins switch(rd) { case 0: /* wry */ tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, cpu_tmp0, 0xffffffff); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_y, cpu_tmp0, 0xffffffff); break; #ifndef TARGET_SPARC64 - /* undefined in the SPARCv8 manual, nop on the microSPARC II */ - case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: - case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f: - - /* implementation-dependent in the SPARCv8 manual, nop on the microSPARC II */ - case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: - case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: + case 0x01: /* undefined in the + SPARCv8 manual, nop + on the microSPARC II */ + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + case 0x0e: + case 0x0f: + + case 0x10: /* implementation-dependent + in the SPARCv8 + manual, nop on the + microSPARC II */ + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1a: + case 0x1b: + case 0x1c: + case 0x1d: + case 0x1e: + case 0x1f: if ((rd == 0x13) && (dc->def->features & CPU_FEATURE_POWERDOWN)) { /* LEON3 power-down */ @@ -3771,28 +4434,33 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x3: /* V9 wrasi */ tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, 0xff); - tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_asi, cpu_tmp0); + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, asi)); + /* End TB to notice changed ASI. */ + save_state(dc); + gen_op_next_insn(tcg_ctx); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + dc->base.is_jmp = DISAS_NORETURN; break; case 0x6: /* V9 wrfprs */ tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_fprs, cpu_tmp0); + dc->fprs_dirty = 0; save_state(dc); - gen_op_next_insn(dc); - tcg_gen_exit_tb(tcg_ctx, 0); - dc->is_br = 1; + gen_op_next_insn(tcg_ctx); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + dc->base.is_jmp = DISAS_NORETURN; break; case 0xf: /* V9 sir, nop if user */ -#if !defined(CONFIG_USER_ONLY) if (supervisor(dc)) { ; // XXX } -#endif break; case 0x13: /* Graphics Status */ if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1, cpu_src2); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gsr, cpu_src1, cpu_src2); break; case 0x14: /* Softint set */ if (!supervisor(dc)) @@ -3813,28 +4481,29 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins gen_helper_write_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); break; case 0x17: /* Tick compare */ -#if !defined(CONFIG_USER_ONLY) if (!supervisor(dc)) goto illegal_insn; -#endif { TCGv_ptr r_tickptr; - tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tick_cmpr, cpu_src1, + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_tick_cmpr, cpu_src1, cpu_src2); r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_tick_set_limit(tcg_ctx, r_tickptr, - *(TCGv *)tcg_ctx->cpu_tick_cmpr); + tcg_ctx->cpu_tick_cmpr); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; } break; case 0x18: /* System tick */ -#if !defined(CONFIG_USER_ONLY) if (!supervisor(dc)) goto illegal_insn; -#endif { TCGv_ptr r_tickptr; @@ -3843,27 +4512,35 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, stick)); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_tick_set_count(tcg_ctx, r_tickptr, cpu_tmp0); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; } break; case 0x19: /* System tick compare */ -#if !defined(CONFIG_USER_ONLY) if (!supervisor(dc)) goto illegal_insn; -#endif { TCGv_ptr r_tickptr; - tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_stick_cmpr, cpu_src1, + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_stick_cmpr, cpu_src1, cpu_src2); r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, stick)); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_tick_set_limit(tcg_ctx, r_tickptr, - *(TCGv *)tcg_ctx->cpu_stick_cmpr); + tcg_ctx->cpu_stick_cmpr); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; } break; @@ -3877,7 +4554,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins } } break; -#if !defined(CONFIG_USER_ONLY) case 0x31: /* wrpsr, V9 saved, restored */ { if (!supervisor(dc)) @@ -3905,9 +4581,9 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_FLAGS); dc->cc_op = CC_OP_FLAGS; save_state(dc); - gen_op_next_insn(dc); - tcg_gen_exit_tb(tcg_ctx, 0); - dc->is_br = 1; + gen_op_next_insn(tcg_ctx); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + dc->base.is_jmp = DISAS_NORETURN; #endif } break; @@ -3924,7 +4600,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); @@ -3935,7 +4611,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); @@ -3946,7 +4622,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); @@ -3958,7 +4634,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins TCGv_ptr r_tsptr; r_tsptr = tcg_temp_new_ptr(tcg_ctx); - gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + gen_load_trap_state_at_tl(tcg_ctx, r_tsptr, tcg_ctx->cpu_env); tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); tcg_temp_free_ptr(tcg_ctx, r_tsptr); @@ -3971,17 +4647,28 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, tick)); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_tick_set_count(tcg_ctx, r_tickptr, cpu_tmp0); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; } break; case 5: // tba - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tbr, cpu_tmp0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_tbr, cpu_tmp0); break; case 6: // pstate save_state(dc); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_wrpstate(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } dc->npc = DYNAMIC_PC; break; case 7: // tl @@ -3991,7 +4678,13 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins dc->npc = DYNAMIC_PC; break; case 8: // pil + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_wrpil(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } break; case 9: // cwp gen_helper_wrcwp(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); @@ -4023,22 +4716,21 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins break; case 16: // UA2005 gl CHECK_IU_FEATURE(dc, GL); - tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, - offsetof(CPUSPARCState, gl)); + gen_helper_wrgl(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); break; case 26: // UA2005 strand status CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_ssr, cpu_tmp0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_ssr, cpu_tmp0); break; default: goto illegal_insn; } #else - tcg_gen_trunc_tl_i32(tcg_ctx, *(TCGv *)tcg_ctx->cpu_wim, cpu_tmp0); + tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_wim, cpu_tmp0); if (dc->def->nwindows != 32) { - tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_wim, *(TCGv *)tcg_ctx->cpu_wim, + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_wim, tcg_ctx->cpu_wim, (1 << dc->def->nwindows) - 1); } #endif @@ -4049,7 +4741,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins #ifndef TARGET_SPARC64 if (!supervisor(dc)) goto priv_insn; - tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tbr, cpu_src1, cpu_src2); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_tbr, cpu_src1, cpu_src2); #else CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) @@ -4058,32 +4750,42 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); switch (rd) { case 0: // hpstate - // XXX gen_op_wrhpstate(); + tcg_gen_st_i64(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, + hpstate)); save_state(dc); - gen_op_next_insn(dc); - tcg_gen_exit_tb(tcg_ctx, 0); - dc->is_br = 1; + gen_op_next_insn(tcg_ctx); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + dc->base.is_jmp = DISAS_NORETURN; break; case 1: // htstate // XXX gen_op_wrhtstate(); break; case 3: // hintp - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_hintp, cpu_tmp0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_hintp, cpu_tmp0); break; case 5: // htba - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_htba, cpu_tmp0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_htba, cpu_tmp0); break; case 31: // hstick_cmpr { TCGv_ptr r_tickptr; - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_hstick_cmpr, cpu_tmp0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_hstick_cmpr, cpu_tmp0); r_tickptr = tcg_temp_new_ptr(tcg_ctx); tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, offsetof(CPUSPARCState, hstick)); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_tick_set_limit(tcg_ctx, r_tickptr, - *(TCGv *)tcg_ctx->cpu_hstick_cmpr); + tcg_ctx->cpu_hstick_cmpr); tcg_temp_free_ptr(tcg_ctx, r_tickptr); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } + /* End TB to handle timer interrupt */ + dc->base.is_jmp = DISAS_EXIT; } break; case 6: // hver readonly @@ -4093,7 +4795,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins #endif } break; -#endif #ifdef TARGET_SPARC64 case 0x2c: /* V9 movcc */ { @@ -4104,14 +4805,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (insn & (1 << 18)) { if (cc == 0) { - gen_compare(dc, &cmp, 0, cond); + gen_compare(&cmp, 0, cond, dc); } else if (cc == 2) { - gen_compare(dc, &cmp, 1, cond); + gen_compare(&cmp, 1, cond, dc); } else { goto illegal_insn; } } else { - gen_fcompare(dc, &cmp, cc, cond); + gen_fcompare(tcg_ctx, &cmp, cc, cond); } /* The get_src2 above loaded the normal 13-bit @@ -4135,7 +4836,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins gen_store_gpr(dc, rd, cpu_dst); break; case 0x2e: /* V9 popc */ - gen_helper_popc(tcg_ctx, cpu_dst, cpu_src2); + tcg_gen_ctpop_tl(tcg_ctx, cpu_dst, cpu_src2); gen_store_gpr(dc, rd, cpu_dst); break; case 0x2f: /* V9 movr */ @@ -4144,7 +4845,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins DisasCompare cmp; TCGv dst; - gen_compare_reg(dc, &cmp, cond, cpu_src1); + gen_compare_reg(tcg_ctx, &cmp, cond, cpu_src1); /* The get_src2 above loaded the normal 13-bit immediate field, not the 10-bit field we have @@ -4288,14 +4989,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); - gen_alignaddr(dc, cpu_dst, cpu_src1, cpu_src2, 0); + gen_alignaddr(tcg_ctx, cpu_dst, cpu_src1, cpu_src2, 0); gen_store_gpr(dc, rd, cpu_dst); break; case 0x01a: /* VIS I alignaddrl */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); - gen_alignaddr(dc, cpu_dst, cpu_src1, cpu_src2, 1); + gen_alignaddr(tcg_ctx, cpu_dst, cpu_src1, cpu_src2, 1); gen_store_gpr(dc, rd, cpu_dst); break; case 0x019: /* VIS II bmask */ @@ -4303,7 +5004,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins cpu_src1 = gen_load_gpr(dc, rs1); cpu_src2 = gen_load_gpr(dc, rs2); tcg_gen_add_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); - tcg_gen_deposit_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, *(TCGv *)tcg_ctx->cpu_gsr, cpu_dst, 32, 32); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gsr, tcg_ctx->cpu_gsr, cpu_dst, 32, 32); gen_store_gpr(dc, rd, cpu_dst); break; case 0x020: /* VIS I fcmple16 */ @@ -4398,14 +5099,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs2); cpu_dst_32 = gen_dest_fpr_F(dc); - gen_helper_fpack16(tcg_ctx, cpu_dst_32, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1_64); + gen_helper_fpack16(tcg_ctx, cpu_dst_32, tcg_ctx->cpu_gsr, cpu_src1_64); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x03d: /* VIS I fpackfix */ CHECK_FPU_FEATURE(dc, VIS1); cpu_src1_64 = gen_load_fpr_D(dc, rs2); cpu_dst_32 = gen_dest_fpr_F(dc); - gen_helper_fpackfix(tcg_ctx, cpu_dst_32, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1_64); + gen_helper_fpackfix(tcg_ctx, cpu_dst_32, tcg_ctx->cpu_gsr, cpu_src1_64); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x03e: /* VIS I pdist */ @@ -4618,8 +5319,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins #endif #ifdef TARGET_SPARC64 } else if (xop == 0x39) { /* V9 return */ - TCGv_i32 r_const; - save_state(dc); cpu_src1 = get_src1(dc, insn); cpu_tmp0 = get_temp_tl(dc); @@ -4637,10 +5336,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins } gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env); gen_mov_pc_npc(dc); - r_const = tcg_const_i32(tcg_ctx, 3); - gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0); + gen_check_align(tcg_ctx, cpu_tmp0, 3); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_npc, cpu_tmp0); dc->npc = DYNAMIC_PC; goto jmp_insn; #endif @@ -4662,33 +5359,25 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins switch (xop) { case 0x38: /* jmpl */ { - TCGv t; - TCGv_i32 r_const; - - t = gen_dest_gpr(dc, rd); + TCGv t = gen_dest_gpr(dc, rd); tcg_gen_movi_tl(tcg_ctx, t, dc->pc); gen_store_gpr(dc, rd, t); + gen_mov_pc_npc(dc); - r_const = tcg_const_i32(tcg_ctx, 3); - gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); + gen_check_align(tcg_ctx, cpu_tmp0, 3); gen_address_mask(dc, cpu_tmp0); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_npc, cpu_tmp0); dc->npc = DYNAMIC_PC; } goto jmp_insn; -#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) +#if !defined(TARGET_SPARC64) case 0x39: /* rett, V9 return */ { - TCGv_i32 r_const; - if (!supervisor(dc)) goto priv_insn; gen_mov_pc_npc(dc); - r_const = tcg_const_i32(tcg_ctx, 3); - gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0); + gen_check_align(tcg_ctx, cpu_tmp0, 3); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_npc, cpu_tmp0); dc->npc = DYNAMIC_PC; gen_helper_rett(tcg_ctx, tcg_ctx->cpu_env); } @@ -4700,16 +5389,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins /* nop */ break; case 0x3c: /* save */ - save_state(dc); gen_helper_save(tcg_ctx, tcg_ctx->cpu_env); gen_store_gpr(dc, rd, cpu_tmp0); break; case 0x3d: /* restore */ - save_state(dc); gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env); gen_store_gpr(dc, rd, cpu_tmp0); break; -#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64) +#if defined(TARGET_SPARC64) case 0x3e: /* V9 done/retry */ { switch (rd) { @@ -4718,14 +5405,26 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_done(tcg_ctx, tcg_ctx->cpu_env); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } goto jmp_insn; case 1: if (!supervisor(dc)) goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_start(); + } gen_helper_retry(tcg_ctx, tcg_ctx->cpu_env); + if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { + // gen_io_end(); + } goto jmp_insn; default: goto illegal_insn; @@ -4769,31 +5468,25 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins switch (xop) { case 0x0: /* ld, V9 lduw, load unsigned word */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld32u(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld32u(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x1: /* ldub, load unsigned byte */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld8u(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld8u(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x2: /* lduh, load unsigned halfword */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld16u(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld16u(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x3: /* ldd, load double word */ if (rd & 1) goto illegal_insn; else { - TCGv_i32 r_const; TCGv_i64 t64; - save_state(dc); - r_const = tcg_const_i32(tcg_ctx, 7); - /* XXX remove alignment check */ - gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); gen_address_mask(dc, cpu_addr); t64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld64(dc->uc, t64, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld64(tcg_ctx, t64, cpu_addr, dc->mem_idx); tcg_gen_trunc_i64_tl(tcg_ctx, cpu_val, t64); tcg_gen_ext32u_tl(tcg_ctx, cpu_val, cpu_val); gen_store_gpr(dc, rd + 1, cpu_val); @@ -4805,118 +5498,49 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins break; case 0x9: /* ldsb, load signed byte */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld8s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld8s(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0xa: /* ldsh, load signed halfword */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld16s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld16s(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; - case 0xd: /* ldstub -- XXX: should be atomically */ - { - TCGv r_const; - - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld8s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); - r_const = tcg_const_tl(tcg_ctx, 0xff); - tcg_gen_qemu_st8(dc->uc, r_const, cpu_addr, dc->mem_idx); - tcg_temp_free(tcg_ctx, r_const); - } + case 0xd: /* ldstub */ + gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); break; case 0x0f: /* swap, swap register with memory. Also atomically */ - { - TCGv t0 = get_temp_tl(dc); - CHECK_IU_FEATURE(dc, SWAP); - cpu_src1 = gen_load_gpr(dc, rd); - gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx); - tcg_gen_qemu_st32(dc->uc, cpu_src1, cpu_addr, dc->mem_idx); - tcg_gen_mov_tl(tcg_ctx, cpu_val, t0); - } + CHECK_IU_FEATURE(dc, SWAP); + cpu_src1 = gen_load_gpr(dc, rd); + gen_swap(dc, cpu_val, cpu_src1, cpu_addr, + dc->mem_idx, MO_TEUL); break; -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) case 0x10: /* lda, V9 lduwa, load word alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 0); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); break; case 0x11: /* lduba, load unsigned byte alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 0); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB); break; case 0x12: /* lduha, load unsigned halfword alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 0); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); break; case 0x13: /* ldda, load double word alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) + if (rd & 1) { goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - if (rd & 1) - goto illegal_insn; - save_state(dc); - gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd); + } + gen_ldda_asi(dc, cpu_addr, insn, rd); goto skip_move; case 0x19: /* ldsba, load signed byte alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 1); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB); break; case 0x1a: /* ldsha, load signed halfword alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 1); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW); break; case 0x1d: /* ldstuba -- XXX: should be atomically */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); gen_ldstub_asi(dc, cpu_val, cpu_addr, insn); break; case 0x1f: /* swapa, swap reg with alt. memory. Also atomically */ CHECK_IU_FEATURE(dc, SWAP); -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); cpu_src1 = gen_load_gpr(dc, rd); gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn); break; @@ -4927,23 +5551,20 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x33: /* lddc */ goto ncp_insn; #endif -#endif #ifdef TARGET_SPARC64 case 0x08: /* V9 ldsw */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld32s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld32s(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x0b: /* V9 ldx */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_ld64(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld64(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x18: /* V9 ldswa */ - save_state(dc); - gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 1); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); break; case 0x1b: /* V9 ldxa */ - save_state(dc); - gen_ld_asi(dc, cpu_val, cpu_addr, insn, 8, 0); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ); break; case 0x2d: /* V9 prefetch, no effect */ goto skip_move; @@ -4951,7 +5572,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - save_state(dc); gen_ldf_asi(dc, cpu_addr, insn, 4, rd); gen_update_fprs_dirty(dc, rd); goto skip_move; @@ -4959,7 +5579,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - save_state(dc); gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); gen_update_fprs_dirty(dc, DFPREG(rd)); goto skip_move; @@ -4970,7 +5589,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - save_state(dc); gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); gen_update_fprs_dirty(dc, QFPREG(rd)); goto skip_move; @@ -4979,23 +5597,17 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins goto illegal_insn; } gen_store_gpr(dc, rd, cpu_val); -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) skip_move: ; -#endif } else if (xop >= 0x20 && xop < 0x24) { - TCGv t0; - if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - save_state(dc); switch (xop) { case 0x20: /* ldf, load fpreg */ gen_address_mask(dc, cpu_addr); - t0 = get_temp_tl(dc); - tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx); cpu_dst_32 = gen_dest_fpr_F(dc); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_dst_32, t0); + tcg_gen_qemu_ld_i32(tcg_ctx, cpu_dst_32, cpu_addr, + dc->mem_idx, MO_TEUL); gen_store_fpr_F(dc, rd, cpu_dst_32); break; case 0x21: /* ldfsr, V9 ldxfsr */ @@ -5003,35 +5615,37 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins gen_address_mask(dc, cpu_addr); if (rd == 1) { TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_qemu_ld64(dc->uc, t64, cpu_addr, dc->mem_idx); - gen_helper_ldxfsr(tcg_ctx, tcg_ctx->cpu_env, t64); + tcg_gen_qemu_ld_i64(tcg_ctx, t64, cpu_addr, + dc->mem_idx, MO_TEQ); + gen_helper_ldxfsr(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, tcg_ctx->cpu_fsr, t64); tcg_temp_free_i64(tcg_ctx, t64); break; } #endif cpu_dst_32 = get_temp_i32(dc); - t0 = get_temp_tl(dc); - tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx); - tcg_gen_trunc_tl_i32(tcg_ctx, cpu_dst_32, t0); - gen_helper_ldfsr(tcg_ctx, tcg_ctx->cpu_env, cpu_dst_32); + tcg_gen_qemu_ld_i32(tcg_ctx, cpu_dst_32, cpu_addr, + dc->mem_idx, MO_TEUL); + gen_helper_ldfsr(tcg_ctx, tcg_ctx->cpu_fsr, tcg_ctx->cpu_env, tcg_ctx->cpu_fsr, cpu_dst_32); break; case 0x22: /* ldqf, load quad fpreg */ - { - TCGv_i32 r_const; - - CHECK_FPU_FEATURE(dc, FLOAT128); - r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); - gen_address_mask(dc, cpu_addr); - gen_helper_ldqf(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - gen_op_store_QT0_fpr(dc, QFPREG(rd)); - gen_update_fprs_dirty(dc, QFPREG(rd)); - } + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_address_mask(dc, cpu_addr); + cpu_src1_64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, cpu_src1_64, cpu_addr, dc->mem_idx, + MO_TEQ | MO_ALIGN_4); + tcg_gen_addi_tl(tcg_ctx, cpu_addr, cpu_addr, 8); + cpu_src2_64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, cpu_src2_64, cpu_addr, dc->mem_idx, + MO_TEQ | MO_ALIGN_4); + gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); + tcg_temp_free_i64(tcg_ctx, cpu_src1_64); + tcg_temp_free_i64(tcg_ctx, cpu_src2_64); break; case 0x23: /* lddf, load double fpreg */ gen_address_mask(dc, cpu_addr); cpu_dst_64 = gen_dest_fpr_D(dc, rd); - tcg_gen_qemu_ld64(dc->uc, cpu_dst_64, cpu_addr, dc->mem_idx); + tcg_gen_qemu_ld_i64(tcg_ctx, cpu_dst_64, cpu_addr, dc->mem_idx, + MO_TEQ | MO_ALIGN_4); gen_store_fpr_D(dc, rd, cpu_dst_64); break; default: @@ -5044,96 +5658,53 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins switch (xop) { case 0x4: /* st, store word */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st32(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st32(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x5: /* stb, store byte */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st8(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st8(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x6: /* sth, store halfword */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st16(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st16(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x7: /* std, store double word */ if (rd & 1) goto illegal_insn; else { - TCGv_i32 r_const; TCGv_i64 t64; TCGv lo; - save_state(dc); gen_address_mask(dc, cpu_addr); - r_const = tcg_const_i32(tcg_ctx, 7); - /* XXX remove alignment check */ - gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); lo = gen_load_gpr(dc, rd + 1); - t64 = tcg_temp_new_i64(tcg_ctx); tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, cpu_val); - tcg_gen_qemu_st64(dc->uc, t64, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st64(tcg_ctx, t64, cpu_addr, dc->mem_idx); tcg_temp_free_i64(tcg_ctx, t64); } break; -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) case 0x14: /* sta, V9 stwa, store word alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_st_asi(dc, cpu_val, cpu_addr, insn, 4); - dc->npc = DYNAMIC_PC; + gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); break; case 0x15: /* stba, store byte alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_st_asi(dc, cpu_val, cpu_addr, insn, 1); - dc->npc = DYNAMIC_PC; + gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB); break; case 0x16: /* stha, store halfword alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) - goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - save_state(dc); - gen_st_asi(dc, cpu_val, cpu_addr, insn, 2); - dc->npc = DYNAMIC_PC; + gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); break; case 0x17: /* stda, store double word alternate */ -#ifndef TARGET_SPARC64 - if (IS_IMM) + if (rd & 1) { goto illegal_insn; - if (!supervisor(dc)) - goto priv_insn; -#endif - if (rd & 1) - goto illegal_insn; - else { - save_state(dc); - gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd); } + gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd); break; -#endif #ifdef TARGET_SPARC64 case 0x0e: /* V9 stx */ gen_address_mask(dc, cpu_addr); - tcg_gen_qemu_st64(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st64(tcg_ctx, cpu_val, cpu_addr, dc->mem_idx); break; case 0x1e: /* V9 stxa */ - save_state(dc); - gen_st_asi(dc, cpu_val, cpu_addr, insn, 8); - dc->npc = DYNAMIC_PC; + gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ); break; #endif default: @@ -5143,69 +5714,62 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - save_state(dc); switch (xop) { case 0x24: /* stf, store fpreg */ - { - TCGv t = get_temp_tl(dc); - gen_address_mask(dc, cpu_addr); - cpu_src1_32 = gen_load_fpr_F(dc, rd); - tcg_gen_ext_i32_tl(tcg_ctx, t, cpu_src1_32); - tcg_gen_qemu_st32(dc->uc, t, cpu_addr, dc->mem_idx); - } + gen_address_mask(dc, cpu_addr); + cpu_src1_32 = gen_load_fpr_F(dc, rd); + tcg_gen_qemu_st_i32(tcg_ctx, cpu_src1_32, cpu_addr, + dc->mem_idx, MO_TEUL); break; case 0x25: /* stfsr, V9 stxfsr */ { - TCGv t = get_temp_tl(dc); - - tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUSPARCState, fsr)); #ifdef TARGET_SPARC64 gen_address_mask(dc, cpu_addr); if (rd == 1) { - tcg_gen_qemu_st64(dc->uc, t, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st64(tcg_ctx, tcg_ctx->cpu_fsr, cpu_addr, dc->mem_idx); break; } #endif - tcg_gen_qemu_st32(dc->uc, t, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st32(tcg_ctx, tcg_ctx->cpu_fsr, cpu_addr, dc->mem_idx); } break; case 0x26: #ifdef TARGET_SPARC64 /* V9 stqf, store quad fpreg */ - { - TCGv_i32 r_const; - - CHECK_FPU_FEATURE(dc, FLOAT128); - gen_op_load_fpr_QT0(dc, QFPREG(rd)); - r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); - gen_address_mask(dc, cpu_addr); - gen_helper_stqf(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - } + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_address_mask(dc, cpu_addr); + /* ??? While stqf only requires 4-byte alignment, it is + legal for the cpu to signal the unaligned exception. + The OS trap handler is then required to fix it up. + For qemu, this avoids having to probe the second page + before performing the first write. */ + cpu_src1_64 = gen_load_fpr_Q0(dc, rd); + tcg_gen_qemu_st_i64(tcg_ctx, cpu_src1_64, cpu_addr, + dc->mem_idx, MO_TEQ | MO_ALIGN_16); + tcg_gen_addi_tl(tcg_ctx, cpu_addr, cpu_addr, 8); + cpu_src2_64 = gen_load_fpr_Q1(dc, rd); + tcg_gen_qemu_st_i64(tcg_ctx, cpu_src1_64, cpu_addr, + dc->mem_idx, MO_TEQ); break; #else /* !TARGET_SPARC64 */ /* stdfq, store floating point queue */ -#if defined(CONFIG_USER_ONLY) - goto illegal_insn; -#else if (!supervisor(dc)) goto priv_insn; if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } goto nfq_insn; -#endif #endif case 0x27: /* stdf, store double fpreg */ gen_address_mask(dc, cpu_addr); cpu_src1_64 = gen_load_fpr_D(dc, rd); - tcg_gen_qemu_st64(dc->uc, cpu_src1_64, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st_i64(tcg_ctx, cpu_src1_64, cpu_addr, dc->mem_idx, + MO_TEQ | MO_ALIGN_4); break; default: goto illegal_insn; } } else if (xop > 0x33 && xop < 0x3f) { - save_state(dc); switch (xop) { #ifdef TARGET_SPARC64 case 0x34: /* V9 stfa */ @@ -5216,15 +5780,10 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins break; case 0x36: /* V9 stqfa */ { - TCGv_i32 r_const; - CHECK_FPU_FEATURE(dc, FLOAT128); if (gen_trap_ifnofpu(dc)) { goto jmp_insn; } - r_const = tcg_const_i32(tcg_ctx, 7); - gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); } break; @@ -5246,22 +5805,14 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins case 0x37: /* stdc */ goto ncp_insn; #endif -#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) case 0x3c: /* V9 or LEON3 casa */ #ifndef TARGET_SPARC64 CHECK_IU_FEATURE(dc, CASA); - if (IS_IMM) { - goto illegal_insn; - } - if (!supervisor(dc)) { - goto priv_insn; - } #endif rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd); break; -#endif default: goto illegal_insn; } @@ -5274,11 +5825,11 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins /* default case for non jump instructions */ if (dc->npc == DYNAMIC_PC) { dc->pc = DYNAMIC_PC; - gen_op_next_insn(dc); + gen_op_next_insn(tcg_ctx); } else if (dc->npc == JUMP_PC) { /* we can do a static jump */ - gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], *(TCGv *)tcg_ctx->cpu_cond); - dc->is_br = 1; + gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], tcg_ctx->cpu_cond); + dc->base.is_jmp = DISAS_NORETURN; } else { dc->pc = dc->npc; dc->npc = dc->npc + 4; @@ -5286,63 +5837,25 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins jmp_insn: goto egress; illegal_insn: - { - TCGv_i32 r_const; - - save_state(dc); - r_const = tcg_const_i32(tcg_ctx, TT_ILL_INSN); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - dc->is_br = 1; - } + gen_exception(dc, TT_ILL_INSN); goto egress; unimp_flush: - { - TCGv_i32 r_const; - - save_state(dc); - r_const = tcg_const_i32(tcg_ctx, TT_UNIMP_FLUSH); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - dc->is_br = 1; - } + gen_exception(dc, TT_UNIMP_FLUSH); goto egress; -#if !defined(CONFIG_USER_ONLY) priv_insn: - { - TCGv_i32 r_const; - - save_state(dc); - r_const = tcg_const_i32(tcg_ctx, TT_PRIV_INSN); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); - tcg_temp_free_i32(tcg_ctx, r_const); - dc->is_br = 1; - } + gen_exception(dc, TT_PRIV_INSN); goto egress; -#endif nfpu_insn: - save_state(dc); gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); - dc->is_br = 1; goto egress; -#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) +#if !defined(TARGET_SPARC64) nfq_insn: - save_state(dc); gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); - dc->is_br = 1; goto egress; #endif #ifndef TARGET_SPARC64 ncp_insn: - { - TCGv r_const; - - save_state(dc); - r_const = tcg_const_i32(tcg_ctx, TT_NCP_INSN); - gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); - tcg_temp_free(tcg_ctx, r_const); - dc->is_br = 1; - } + gen_exception(dc, TT_NCP_INSN); goto egress; #endif egress: @@ -5362,356 +5875,289 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_ins } } -static inline void gen_intermediate_code_internal(SPARCCPU *cpu, - TranslationBlock *tb, - bool spc) +static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { - CPUState *cs = CPU(cpu); - CPUSPARCState *env = &cpu->env; - target_ulong pc_start, last_pc; - uint16_t *gen_opc_end; - DisasContext dc1, *dc = &dc1; - CPUBreakpoint *bp; - int j, lj = -1; - int num_insns = 0; - int max_insns; - unsigned int insn; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - bool block_full = false; + DisasContext *dc = container_of(dcbase, DisasContext, base); + CPUSPARCState *env = cs->env_ptr; + int bound; - memset(dc, 0, sizeof(DisasContext)); - dc->uc = env->uc; - dc->tb = tb; - pc_start = tb->pc; - dc->pc = pc_start; - last_pc = dc->pc; - dc->npc = (target_ulong) tb->cs_base; + // unicorn setup + dc->uc = cs->uc; + + dc->pc = dc->base.pc_first; + dc->npc = (target_ulong)dc->base.tb->cs_base; dc->cc_op = CC_OP_DYNAMIC; - dc->mem_idx = cpu_mmu_index(env); - dc->def = env->def; - dc->fpu_enabled = tb_fpu_enabled(tb->flags); - dc->address_mask_32bit = tb_am_enabled(tb->flags); - dc->singlestep = (cs->singlestep_enabled); // || singlestep); - gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; + dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK; + dc->def = &env->def; + dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags); + dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags); + dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0; +#ifdef TARGET_SPARC64 + dc->fprs_dirty = 0; + dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff; +#endif + /* + * if we reach a page boundary, we stop generation so that the + * PC of a TT_TFAULT exception is always in the right page + */ +#ifdef _MSC_VER + bound = (0 - (dc->base.pc_first | TARGET_PAGE_MASK)) / 4; +#else + bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; +#endif + dc->base.max_insns = MIN(dc->base.max_insns, bound); +} +static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs) +{ +} - // early check to see if the address of this block is the until address - if (pc_start == env->uc->addr_end) { - gen_tb_start(tcg_ctx); - gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); - goto done_generating; +static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + if (dc->npc & JUMP_PC) { + assert(dc->jump_pc[1] == dc->pc + 4); + tcg_gen_insn_start(tcg_ctx, dc->pc, dc->jump_pc[0] | JUMP_PC); + } else { + tcg_gen_insn_start(tcg_ctx, dc->pc, dc->npc); } +} - max_insns = tb->cflags & CF_COUNT_MASK; - if (max_insns == 0) - max_insns = CF_COUNT_MASK; +static bool sparc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, + const CPUBreakpoint *bp) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; - // Unicorn: early check to see if the address of this block is the until address - if (tb->pc == env->uc->addr_end) { - gen_tb_start(tcg_ctx); + if (dc->pc != dc->base.pc_first) { save_state(dc); + } + gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + dc->base.is_jmp = DISAS_NORETURN; + /* update pc_next so that the current instruction is included in tb->size */ + dc->base.pc_next += 4; + return true; +} + +static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = dc->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + CPUSPARCState *env = cs->env_ptr; + unsigned int insn; + + // Unicorn: end address tells us to stop emulation + if (dc->pc == uc->addr_end) { +#ifndef TARGET_SPARC64 gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); - goto done_generating; +#endif + dcbase->is_jmp = DISAS_NORETURN; + return; } - // Unicorn: trace this block on request - // Only hook this block if it is not broken from previous translation due to - // full translation cache - if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { - // save block address to see if we need to patch block size later - env->uc->block_addr = pc_start; - env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; - gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(uc, UC_HOOK_CODE, dc->pc)) { + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, uc, dc->pc); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); } - gen_tb_start(tcg_ctx); - do { - if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { - QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { - if (bp->pc == dc->pc) { - if (dc->pc != pc_start) - save_state(dc); - gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); - tcg_gen_exit_tb(tcg_ctx, 0); - dc->is_br = 1; - goto exit_gen_loop; - } - } - } - if (spc) { - qemu_log("Search PC...\n"); - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - if (lj < j) { - lj++; - while (lj < j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; - tcg_ctx->gen_opc_pc[lj] = dc->pc; - tcg_ctx->gen_opc_npc[lj] = dc->npc; - tcg_ctx->gen_opc_instr_start[lj] = 1; - tcg_ctx->gen_opc_icount[lj] = num_insns; - } - } - //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) - // gen_io_start(); - // Unicorn: end address tells us to stop emulation - if (dc->pc == dc->uc->addr_end) { - save_state(dc); - gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); - break; - } else { - last_pc = dc->pc; - insn = cpu_ldl_code(env, dc->pc); - } + insn = translator_ldl(tcg_ctx, env, dc->pc); + dc->base.pc_next += 4; + disas_sparc_insn(dc, insn); - disas_sparc_insn(dc, insn, true); - num_insns++; + if (dc->base.is_jmp == DISAS_NORETURN) { + return; + } + if (dc->pc != dc->base.pc_next) { + dc->base.is_jmp = DISAS_TOO_MANY; + } +} - if (dc->is_br) - break; - /* if the next PC is different, we abort now */ - if (dc->pc != (last_pc + 4)) - break; +static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; - /* if we reach a page boundary, we stop generation so that the - PC of a TT_TFAULT exception is always in the right page */ - if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0) - break; - /* if single step mode, we generate only one instruction and - generate an exception */ - if (dc->singlestep) { - break; - } - } while ((tcg_ctx->gen_opc_ptr < gen_opc_end) && - (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) && - num_insns < max_insns); - - /* if too long translation, save this info */ - if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) - block_full = true; - - exit_gen_loop: - //if (tb->cflags & CF_LAST_IO) { - // gen_io_end(); - //} - if (!dc->is_br) { + switch (dc->base.is_jmp) { + case DISAS_NEXT: + case DISAS_TOO_MANY: if (dc->pc != DYNAMIC_PC && (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) { /* static PC and NPC: we can use direct chaining */ gen_goto_tb(dc, 0, dc->pc, dc->npc); } else { if (dc->pc != DYNAMIC_PC) { - tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, dc->pc); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_pc, dc->pc); } save_npc(dc); - tcg_gen_exit_tb(tcg_ctx, 0); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); } + break; + + case DISAS_NORETURN: + break; + + case DISAS_EXIT: + /* Exit TB */ + save_state(dc); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + break; + + default: + g_assert_not_reached(); } - -done_generating: - gen_tb_end(tcg_ctx, tb, num_insns); - *tcg_ctx->gen_opc_ptr = INDEX_op_end; - if (spc) { - j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; - lj++; - while (lj <= j) - tcg_ctx->gen_opc_instr_start[lj++] = 0; -#if 0 - log_page_dump(); -#endif - tcg_ctx->gen_opc_jump_pc[0] = dc->jump_pc[0]; - tcg_ctx->gen_opc_jump_pc[1] = dc->jump_pc[1]; - } else { - tb->size = last_pc + 4 - pc_start; - tb->icount = num_insns; - } - - env->uc->block_full = block_full; } -void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb) +static const TranslatorOps sparc_tr_ops = { + .init_disas_context = sparc_tr_init_disas_context, + .tb_start = sparc_tr_tb_start, + .insn_start = sparc_tr_insn_start, + .breakpoint_check = sparc_tr_breakpoint_check, + .translate_insn = sparc_tr_translate_insn, + .tb_stop = sparc_tr_tb_stop, +}; + +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) { - gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, false); + DisasContext dc = { 0 }; + + translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns); } -void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb) +void sparc_tcg_init(struct uc_struct *uc) { - gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, true); -} + TCGContext *tcg_ctx = uc->tcg_ctx; -void gen_intermediate_code_init(CPUSPARCState *env) -{ - TCGContext *tcg_ctx = env->uc->tcg_ctx; - struct uc_struct *uc = env->uc; - unsigned int i; - static const char * const gregnames[8] = { - NULL, // g0 not used - "g1", - "g2", - "g3", - "g4", - "g5", - "g6", - "g7", + static const char gregnames[32][4] = { + "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", + "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", + "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7", + "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", }; - static const char * const fregnames[32] = { + static const char fregnames[32][4] = { "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14", "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30", "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", }; - /* init various static tables */ - tcg_ctx->cpu_env = tcg_global_reg_new_ptr(tcg_ctx, TCG_AREG0, "env"); - tcg_ctx->cpu_regwptr = tcg_global_mem_new_ptr(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, regwptr), - "regwptr"); + static struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = { + { NULL /* &tcg_ctx->cpu_cc_op */, offsetof(CPUSPARCState, cc_op), "cc_op" }, + { NULL /* &tcg_ctx->cpu_psr */, offsetof(CPUSPARCState, psr), "psr" }, #ifdef TARGET_SPARC64 - tcg_ctx->cpu_xcc = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, xcc), - "xcc"); - tcg_ctx->cpu_asi = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, asi), - "asi"); - tcg_ctx->cpu_fprs = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, fprs), - "fprs"); - - if (!uc->init_tcg) - tcg_ctx->cpu_gsr = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_gsr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, gsr), - "gsr"); - - if (!uc->init_tcg) - tcg_ctx->cpu_tick_cmpr = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_tick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, tick_cmpr), - "tick_cmpr"); - - if (!uc->init_tcg) - tcg_ctx->cpu_stick_cmpr = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_stick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, stick_cmpr), - "stick_cmpr"); - - if (!uc->init_tcg) - tcg_ctx->cpu_hstick_cmpr = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_hstick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, hstick_cmpr), - "hstick_cmpr"); - - if (!uc->init_tcg) - tcg_ctx->cpu_hintp = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_hintp = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, hintp), - "hintp"); - - if (!uc->init_tcg) - tcg_ctx->cpu_htba = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_htba = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, htba), - "htba"); - - if (!uc->init_tcg) - tcg_ctx->cpu_hver = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_hver = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, hver), - "hver"); - - if (!uc->init_tcg) - tcg_ctx->cpu_ssr = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_ssr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, ssr), "ssr"); - - if (!uc->init_tcg) - tcg_ctx->cpu_ver = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_ver = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, version), "ver"); - - tcg_ctx->cpu_softint = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, softint), - "softint"); + { NULL /* &tcg_ctx->cpu_xcc */, offsetof(CPUSPARCState, xcc), "xcc" }, + { NULL /* &tcg_ctx->cpu_fprs */, offsetof(CPUSPARCState, fprs), "fprs" }, #else - if (!uc->init_tcg) - tcg_ctx->cpu_wim = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_wim = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, wim), - "wim"); + { NULL /* &tcg_ctx->cpu_wim */, offsetof(CPUSPARCState, wim), "wim" }, +#endif + }; + + static struct { TCGv *ptr; int off; const char *name; } rtl[] = { + { NULL /* &tcg_ctx->cpu_cond */, offsetof(CPUSPARCState, cond), "cond" }, + { NULL /* &tcg_ctx->cpu_cc_src */, offsetof(CPUSPARCState, cc_src), "cc_src" }, + { NULL /* &tcg_ctx->cpu_cc_src2 */, offsetof(CPUSPARCState, cc_src2), "cc_src2" }, + { NULL /* &tcg_ctx->cpu_cc_dst */, offsetof(CPUSPARCState, cc_dst), "cc_dst" }, + { NULL /* &tcg_ctx->cpu_fsr */, offsetof(CPUSPARCState, fsr), "fsr" }, + { NULL /* &tcg_ctx->cpu_pc */, offsetof(CPUSPARCState, pc), "pc" }, + { NULL /* &tcg_ctx->cpu_npc */, offsetof(CPUSPARCState, npc), "npc" }, + { NULL /* &tcg_ctx->cpu_y */, offsetof(CPUSPARCState, y), "y" }, + { NULL /* &tcg_ctx->cpu_tbr */, offsetof(CPUSPARCState, tbr), "tbr" }, +#ifdef TARGET_SPARC64 + { NULL /* &tcg_ctx->cpu_gsr */, offsetof(CPUSPARCState, gsr), "gsr" }, + { NULL /* &tcg_ctx->cpu_tick_cmpr */, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" }, + { NULL /* &tcg_ctx->cpu_stick_cmpr */, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" }, + { NULL /* &tcg_ctx->cpu_hstick_cmpr */, offsetof(CPUSPARCState, hstick_cmpr), "hstick_cmpr" }, + { NULL /* &tcg_ctx->cpu_hintp */, offsetof(CPUSPARCState, hintp), "hintp" }, + { NULL /* &tcg_ctx->cpu_htba */, offsetof(CPUSPARCState, htba), "htba" }, + { NULL /* &tcg_ctx->cpu_hver */, offsetof(CPUSPARCState, hver), "hver" }, + { NULL /* &tcg_ctx->cpu_ssr */, offsetof(CPUSPARCState, ssr), "ssr" }, + { NULL /* &tcg_ctx->cpu_ver */, offsetof(CPUSPARCState, version), "ver" }, +#endif + }; + unsigned int i; + + r32[0].ptr = &tcg_ctx->cpu_cc_op; + r32[1].ptr = &tcg_ctx->cpu_psr; +#ifdef TARGET_SPARC64 + r32[2].ptr = &tcg_ctx->cpu_xcc; + r32[3].ptr = &tcg_ctx->cpu_fprs; +#else + r32[2].ptr = &tcg_ctx->cpu_wim; #endif - if (!uc->init_tcg) - tcg_ctx->cpu_cond = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_cond = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cond), - "cond"); - - if (!uc->init_tcg) - tcg_ctx->cpu_cc_src = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_cc_src) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_src), - "cc_src"); - - if (!uc->init_tcg) - tcg_ctx->cpu_cc_src2 = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_cc_src2) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, cc_src2), - "cc_src2"); - - if (!uc->init_tcg) - tcg_ctx->cpu_cc_dst = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_cc_dst = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_dst), - "cc_dst"); - - tcg_ctx->cpu_cc_op = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_op), - "cc_op"); - tcg_ctx->cpu_psr = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, psr), - "psr"); - - if (!uc->init_tcg) - tcg_ctx->cpu_fsr = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_fsr) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, fsr), - "fsr"); - - if (!uc->init_tcg) - tcg_ctx->sparc_cpu_pc = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->sparc_cpu_pc = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, pc), - "pc"); - - if (!uc->init_tcg) - tcg_ctx->cpu_npc = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_npc = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, npc), - "npc"); - - if (!uc->init_tcg) - tcg_ctx->cpu_y = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_y = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, y), "y"); -#ifndef CONFIG_USER_ONLY - if (!uc->init_tcg) - tcg_ctx->cpu_tbr = g_malloc0(sizeof(TCGv)); - *(TCGv *)tcg_ctx->cpu_tbr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, tbr), - "tbr"); + rtl[0].ptr = &tcg_ctx->cpu_cond; + rtl[1].ptr = &tcg_ctx->cpu_cc_src; + rtl[2].ptr = &tcg_ctx->cpu_cc_src2; + rtl[3].ptr = &tcg_ctx->cpu_cc_dst; + rtl[4].ptr = &tcg_ctx->cpu_fsr; + rtl[5].ptr = &tcg_ctx->cpu_pc; + rtl[6].ptr = &tcg_ctx->cpu_npc; + rtl[7].ptr = &tcg_ctx->cpu_y; + rtl[8].ptr = &tcg_ctx->cpu_tbr; +#ifdef TARGET_SPARC64 + rtl[9].ptr = &tcg_ctx->cpu_gsr; + rtl[10].ptr = &tcg_ctx->cpu_tick_cmpr; + rtl[11].ptr = &tcg_ctx->cpu_stick_cmpr; + rtl[12].ptr = &tcg_ctx->cpu_hstick_cmpr; + rtl[13].ptr = &tcg_ctx->cpu_hintp; + rtl[14].ptr = &tcg_ctx->cpu_htba; + rtl[15].ptr = &tcg_ctx->cpu_hver; + rtl[16].ptr = &tcg_ctx->cpu_ssr; + rtl[17].ptr = &tcg_ctx->cpu_ver; #endif - if (!uc->init_tcg) { - for (i = 0; i < 8; i++) { - tcg_ctx->cpu_gregs[i] = g_malloc0(sizeof(TCGv)); - *((TCGv *)tcg_ctx->cpu_gregs[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, gregs[i]), - gregnames[i]); - } + + tcg_ctx->cpu_regwptr = tcg_global_mem_new_ptr(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, regwptr), + "regwptr"); + + for (i = 0; i < ARRAY_SIZE(r32); ++i) { + *r32[i].ptr = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, r32[i].off, r32[i].name); + } + + for (i = 0; i < ARRAY_SIZE(rtl); ++i) { + *rtl[i].ptr = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, rtl[i].off, rtl[i].name); + } + + tcg_ctx->cpu_regs[0] = NULL; + for (i = 1; i < 8; ++i) { + tcg_ctx->cpu_regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, gregs[i]), + gregnames[i]); + } + + for (i = 8; i < 32; ++i) { + tcg_ctx->cpu_regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_regwptr, + (i - 8) * sizeof(target_ulong), + gregnames[i]); } for (i = 0; i < TARGET_DPREGS; i++) { - tcg_ctx->cpu_fpr[i] = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, - offsetof(CPUSPARCState, fpr[i]), - fregnames[i]); + tcg_ctx->cpu_fpr[i] = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, fpr[i]), + fregnames[i]); } - - uc->init_tcg = true; } -void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos) +void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, + target_ulong *data) { - TCGContext *tcg_ctx = env->uc->tcg_ctx; - target_ulong npc; - npc = tcg_ctx->gen_opc_npc[pc_pos]; - if (npc == 1) { + target_ulong pc = data[0]; + target_ulong npc = data[1]; + + env->pc = pc; + if (npc == DYNAMIC_PC) { /* dynamic NPC: already stored */ - } else if (npc == 2) { + } else if (npc & JUMP_PC) { /* jump PC: use 'cond' and the jump targets of the translation */ if (env->cond) { - env->npc = tcg_ctx->gen_opc_jump_pc[0]; + env->npc = npc & ~3; } else { - env->npc = tcg_ctx->gen_opc_jump_pc[1]; + env->npc = pc + 4; } } else { env->npc = npc; diff --git a/qemu/target/sparc/unicorn.c b/qemu/target/sparc/unicorn.c new file mode 100644 index 00000000..aad52ebb --- /dev/null +++ b/qemu/target/sparc/unicorn.c @@ -0,0 +1,191 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" +#include "unicorn.h" + +static bool sparc_stop_interrupt(struct uc_struct *uc, int intno) +{ + switch(intno) { + default: + return false; + case TT_ILL_INSN: + return true; + } +} + +static void sparc_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUSPARCState *)uc->cpu->env_ptr)->pc = address; + ((CPUSPARCState *)uc->cpu->env_ptr)->npc = address + 4; +} + +static void sparc_release(void *ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + SPARCCPU *cpu = (SPARCCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } +} + +void sparc_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + memset(env->gregs, 0, sizeof(env->gregs)); + memset(env->fpr, 0, sizeof(env->fpr)); + memset(env->regbase, 0, sizeof(env->regbase)); + + env->pc = 0; + env->npc = 0; + env->regwptr = env->regbase; +} + +static void reg_read(CPUSPARCState *env, unsigned int regid, void *value) +{ + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + *(int32_t *)value = env->gregs[regid - UC_SPARC_REG_G0]; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + *(int32_t *)value = env->regwptr[regid - UC_SPARC_REG_O0]; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + *(int32_t *)value = env->regwptr[8 + regid - UC_SPARC_REG_L0]; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + *(int32_t *)value = env->regwptr[16 + regid - UC_SPARC_REG_I0]; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + *(int32_t *)value = env->pc; + break; + } + } + + return; +} + +static void reg_write(CPUSPARCState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + env->gregs[regid - UC_SPARC_REG_G0] = *(uint32_t *)value; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + env->regwptr[regid - UC_SPARC_REG_O0] = *(uint32_t *)value; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + env->regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint32_t *)value; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + env->regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint32_t *)value; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + env->pc = *(uint32_t *)value; + env->npc = *(uint32_t *)value + 4; + break; + } + } + + return; +} + +int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUSPARCState *env = &(SPARC_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUSPARCState *env = &(SPARC_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if( regid == UC_SPARC_REG_PC){ + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + } + } + + return 0; +} + +DEFAULT_VISIBILITY +int sparc_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +{ + CPUSPARCState *env = (CPUSPARCState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +int sparc_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +{ + CPUSPARCState *env = (CPUSPARCState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static int sparc_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + SPARCCPU *cpu; + + cpu = cpu_sparc_init(uc, cpu_model); + if (cpu == NULL) { + return -1; + } + return 0; +} + +DEFAULT_VISIBILITY +void sparc_uc_init(struct uc_struct* uc) +{ + uc->release = sparc_release; + uc->reg_read = sparc_reg_read; + uc->reg_write = sparc_reg_write; + uc->reg_reset = sparc_reg_reset; + uc->set_pc = sparc_set_pc; + uc->stop_interrupt = sparc_stop_interrupt; + uc->cpus_init = sparc_cpus_init; + uc->cpu_context_size = offsetof(CPUSPARCState, irq_manager); + uc_common_init(uc); +} diff --git a/qemu/target-sparc/unicorn.h b/qemu/target/sparc/unicorn.h similarity index 55% rename from qemu/target-sparc/unicorn.h rename to qemu/target/sparc/unicorn.h index 2140f286..2e90e0e3 100644 --- a/qemu/target-sparc/unicorn.h +++ b/qemu/target/sparc/unicorn.h @@ -8,12 +8,13 @@ int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); +int sparc_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int sparc_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); +int sparc64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int sparc64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); + void sparc_reg_reset(struct uc_struct *uc); void sparc_uc_init(struct uc_struct* uc); void sparc64_uc_init(struct uc_struct* uc); - -extern const int SPARC_REGS_STORAGE_SIZE; -extern const int SPARC64_REGS_STORAGE_SIZE; - #endif diff --git a/qemu/target/sparc/unicorn64.c b/qemu/target/sparc/unicorn64.c new file mode 100644 index 00000000..02bb573a --- /dev/null +++ b/qemu/target/sparc/unicorn64.c @@ -0,0 +1,202 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ + +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" +#include "unicorn.h" + +const int SPARC64_REGS_STORAGE_SIZE = offsetof(CPUSPARCState, irq_manager); + +static bool sparc_stop_interrupt(struct uc_struct *uc, int intno) +{ + switch(intno) { + default: + return false; + case TT_ILL_INSN: + return true; + } +} + +static void sparc_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUSPARCState *)uc->cpu->env_ptr)->pc = address; + ((CPUSPARCState *)uc->cpu->env_ptr)->npc = address + 4; +} + +static void sparc_release(void *ctx) +{ + release_common(ctx); + +#if 0 + int i; + TCGContext *tcg_ctx = (TCGContext *) ctx; + SPARCCPU *cpu = SPARC_CPU(tcg_ctx->uc->cpu); + CPUSPARCState *env = &cpu->env; + + g_free(tcg_ctx->cpu_wim); + g_free(tcg_ctx->cpu_cond); + g_free(tcg_ctx->cpu_cc_src); + g_free(tcg_ctx->cpu_cc_src2); + g_free(tcg_ctx->cpu_cc_dst); + g_free(tcg_ctx->cpu_fsr); + g_free(tcg_ctx->sparc_cpu_pc); + g_free(tcg_ctx->cpu_npc); + g_free(tcg_ctx->cpu_y); + g_free(tcg_ctx->cpu_tbr); + + for (i = 0; i < 8; i++) { + g_free(tcg_ctx->cpu_gregs[i]); + } + for (i = 0; i < 32; i++) { + g_free(tcg_ctx->cpu_gpr[i]); + } + + g_free(tcg_ctx->cpu_PC); + g_free(tcg_ctx->btarget); + g_free(tcg_ctx->bcond); + g_free(tcg_ctx->cpu_dspctrl); + + g_free(tcg_ctx->tb_ctx.tbs); + + g_free(env->def); +#endif +} + +void sparc_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + memset(env->gregs, 0, sizeof(env->gregs)); + memset(env->fpr, 0, sizeof(env->fpr)); + memset(env->regbase, 0, sizeof(env->regbase)); + + env->pc = 0; + env->npc = 0; + env->regwptr = env->regbase; +} + +static void reg_read(CPUSPARCState *env, unsigned int regid, void *value) +{ + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + *(int64_t *)value = env->gregs[regid - UC_SPARC_REG_G0]; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + *(int64_t *)value = env->regwptr[regid - UC_SPARC_REG_O0]; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + *(int64_t *)value = env->regwptr[8 + regid - UC_SPARC_REG_L0]; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + *(int64_t *)value = env->regwptr[16 + regid - UC_SPARC_REG_I0]; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + *(int64_t *)value = env->pc; + break; + } + } +} + +static void reg_write(CPUSPARCState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + env->gregs[regid - UC_SPARC_REG_G0] = *(uint64_t *)value; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + env->regwptr[regid - UC_SPARC_REG_O0] = *(uint64_t *)value; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + env->regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint64_t *)value; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + env->regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint64_t *)value; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + env->pc = *(uint64_t *)value; + env->npc = *(uint64_t *)value + 4; + break; + } + } +} + +int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUSPARCState *env = &(SPARC_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) +{ + CPUSPARCState *env = &(SPARC_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +int sparc64_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +{ + CPUSPARCState *env = (CPUSPARCState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +int sparc64_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +{ + CPUSPARCState *env = (CPUSPARCState *)ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static int sparc_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + SPARCCPU *cpu; + + cpu = cpu_sparc_init(uc, cpu_model); + if (cpu == NULL) { + return -1; + } + return 0; +} + +DEFAULT_VISIBILITY +void sparc64_uc_init(struct uc_struct* uc) +{ + uc->release = sparc_release; + uc->reg_read = sparc_reg_read; + uc->reg_write = sparc_reg_write; + uc->reg_reset = sparc_reg_reset; + uc->set_pc = sparc_set_pc; + uc->stop_interrupt = sparc_stop_interrupt; + uc->cpus_init = sparc_cpus_init; + uc_common_init(uc); +} diff --git a/qemu/target-sparc/vis_helper.c b/qemu/target/sparc/vis_helper.c similarity index 99% rename from qemu/target-sparc/vis_helper.c rename to qemu/target/sparc/vis_helper.c index 383cc8bd..8a9b763d 100644 --- a/qemu/target-sparc/vis_helper.c +++ b/qemu/target/sparc/vis_helper.c @@ -17,6 +17,7 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" @@ -447,7 +448,7 @@ uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2) for (word = 0; word < 2; word++) { uint32_t val; int32_t src = rs2 >> (word * 32); - int64_t scaled = src << scale; + int64_t scaled = (int64_t)src << scale; int64_t from_fixed = scaled >> 16; val = (from_fixed < -32768 ? -32768 : diff --git a/qemu/target-sparc/win_helper.c b/qemu/target/sparc/win_helper.c similarity index 75% rename from qemu/target-sparc/win_helper.c rename to qemu/target/sparc/win_helper.c index f077273c..58e57c1b 100644 --- a/qemu/target-sparc/win_helper.c +++ b/qemu/target/sparc/win_helper.c @@ -17,7 +17,9 @@ * License along with this library; if not, see . */ +#include "qemu/osdep.h" #include "cpu.h" +#include "exec/exec-all.h" #include "exec/helper-proto.h" static inline void memcpy32(target_ulong *dst, const target_ulong *src) @@ -63,23 +65,29 @@ target_ulong cpu_get_psr(CPUSPARCState *env) #endif } -void cpu_put_psr(CPUSPARCState *env, target_ulong val) +void cpu_put_psr_raw(CPUSPARCState *env, target_ulong val) { env->psr = val & PSR_ICC; #if !defined(TARGET_SPARC64) env->psref = (val & PSR_EF) ? 1 : 0; env->psrpil = (val & PSR_PIL) >> 8; -#endif -#if ((!defined(TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY)) - //cpu_check_irqs(env); -#endif -#if !defined(TARGET_SPARC64) env->psrs = (val & PSR_S) ? 1 : 0; env->psrps = (val & PSR_PS) ? 1 : 0; env->psret = (val & PSR_ET) ? 1 : 0; - cpu_set_cwp(env, val & PSR_CWP); #endif env->cc_op = CC_OP_FLAGS; +#if !defined(TARGET_SPARC64) + cpu_set_cwp(env, val & PSR_CWP); +#endif +} + +/* Called with BQL held */ +void cpu_put_psr(CPUSPARCState *env, target_ulong val) +{ + cpu_put_psr_raw(env, val); +#if !defined(TARGET_SPARC64) + // cpu_check_irqs(env); +#endif } int cpu_cwp_inc(CPUSPARCState *env, int cwp) @@ -104,13 +112,13 @@ void helper_rett(CPUSPARCState *env) unsigned int cwp; if (env->psret == 1) { - helper_raise_exception(env, TT_ILL_INSN); + cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } env->psret = 1; cwp = cpu_cwp_inc(env, env->cwp + 1) ; if (env->wim & (1 << cwp)) { - helper_raise_exception(env, TT_WIN_UNF); + cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC()); } cpu_set_cwp(env, cwp); env->psrs = env->psrps; @@ -124,7 +132,7 @@ void helper_save(CPUSPARCState *env) cwp = cpu_cwp_dec(env, env->cwp - 1); if (env->wim & (1 << cwp)) { - helper_raise_exception(env, TT_WIN_OVF); + cpu_raise_exception_ra(env, TT_WIN_OVF, GETPC()); } cpu_set_cwp(env, cwp); } @@ -135,7 +143,7 @@ void helper_restore(CPUSPARCState *env) cwp = cpu_cwp_inc(env, env->cwp + 1); if (env->wim & (1 << cwp)) { - helper_raise_exception(env, TT_WIN_UNF); + cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC()); } cpu_set_cwp(env, cwp); } @@ -143,8 +151,9 @@ void helper_restore(CPUSPARCState *env) void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) { if ((new_psr & PSR_CWP) >= env->nwindows) { - helper_raise_exception(env, TT_ILL_INSN); + cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } else { + /* cpu_put_psr may trigger interrupts, hence BQL */ cpu_put_psr(env, new_psr); } } @@ -163,14 +172,14 @@ void helper_save(CPUSPARCState *env) cwp = cpu_cwp_dec(env, env->cwp - 1); if (env->cansave == 0) { - helper_raise_exception(env, TT_SPILL | (env->otherwin != 0 ? - (TT_WOTHER | - ((env->wstate & 0x38) >> 1)) : - ((env->wstate & 0x7) << 2))); + int tt = TT_SPILL | (env->otherwin != 0 + ? (TT_WOTHER | ((env->wstate & 0x38) >> 1)) + : ((env->wstate & 0x7) << 2)); + cpu_raise_exception_ra(env, tt, GETPC()); } else { if (env->cleanwin - env->canrestore == 0) { /* XXX Clean windows without trap */ - helper_raise_exception(env, TT_CLRWIN); + cpu_raise_exception_ra(env, TT_CLRWIN, GETPC()); } else { env->cansave--; env->canrestore++; @@ -185,10 +194,10 @@ void helper_restore(CPUSPARCState *env) cwp = cpu_cwp_inc(env, env->cwp + 1); if (env->canrestore == 0) { - helper_raise_exception(env, TT_FILL | (env->otherwin != 0 ? - (TT_WOTHER | - ((env->wstate & 0x38) >> 1)) : - ((env->wstate & 0x7) << 2))); + int tt = TT_FILL | (env->otherwin != 0 + ? (TT_WOTHER | ((env->wstate & 0x38) >> 1)) + : ((env->wstate & 0x7) << 2)); + cpu_raise_exception_ra(env, tt, GETPC()); } else { env->cansave++; env->canrestore--; @@ -199,10 +208,10 @@ void helper_restore(CPUSPARCState *env) void helper_flushw(CPUSPARCState *env) { if (env->cansave != env->nwindows - 2) { - helper_raise_exception(env, TT_SPILL | (env->otherwin != 0 ? - (TT_WOTHER | - ((env->wstate & 0x38) >> 1)) : - ((env->wstate & 0x7) << 2))); + int tt = TT_SPILL | (env->otherwin != 0 + ? (TT_WOTHER | ((env->wstate & 0x38) >> 1)) + : ((env->wstate & 0x7) << 2)); + cpu_raise_exception_ra(env, tt, GETPC()); } } @@ -282,9 +291,12 @@ void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp) static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate) { + if (env->def.features & CPU_FEATURE_GL) { + return env->glregs + (env->gl & 7) * 8; + } + switch (pstate) { default: - //trace_win_helper_gregset_error(pstate); /* pass through to normal set of global registers */ case 0: return env->bgregs; @@ -297,29 +309,51 @@ static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate) } } +static inline uint64_t *get_gl_gregset(CPUSPARCState *env, uint32_t gl) +{ + return env->glregs + (gl & 7) * 8; +} + +/* Switch global register bank */ +void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl) +{ + uint64_t *src, *dst; + src = get_gl_gregset(env, new_gl); + dst = get_gl_gregset(env, env->gl); + + if (src != dst) { + memcpy32(dst, env->gregs); + memcpy32(env->gregs, src); + } +} + +void helper_wrgl(CPUSPARCState *env, target_ulong new_gl) +{ + cpu_gl_switch_gregs(env, new_gl & 7); + env->gl = new_gl & 7; +} + void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate) { uint32_t pstate_regs, new_pstate_regs; uint64_t *src, *dst; - if (env->def->features & CPU_FEATURE_GL) { - /* PS_AG is not implemented in this case */ - new_pstate &= ~PS_AG; + if (env->def.features & CPU_FEATURE_GL) { + /* PS_AG, IG and MG are not implemented in this case */ + new_pstate &= ~(PS_AG | PS_IG | PS_MG); + env->pstate = new_pstate; + return; } pstate_regs = env->pstate & 0xc01; new_pstate_regs = new_pstate & 0xc01; if (new_pstate_regs != pstate_regs) { - //trace_win_helper_switch_pstate(pstate_regs, new_pstate_regs); - /* Switch global register bank */ src = get_gregset(env, new_pstate_regs); dst = get_gregset(env, pstate_regs); memcpy32(dst, env->gregs); memcpy32(env->gregs, src); - } else { - //trace_win_helper_no_switch_pstate(new_pstate_regs); } env->pstate = new_pstate; } @@ -328,24 +362,18 @@ void helper_wrpstate(CPUSPARCState *env, target_ulong new_state) { cpu_change_pstate(env, new_state & 0xf3f); -#if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } -#endif } void helper_wrpil(CPUSPARCState *env, target_ulong new_pil) { -#if !defined(CONFIG_USER_ONLY) - //trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil); - env->psrpil = new_pil; if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } -#endif } void helper_done(CPUSPARCState *env) @@ -358,15 +386,17 @@ void helper_done(CPUSPARCState *env) env->asi = (tsptr->tstate >> 24) & 0xff; cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); cpu_put_cwp64(env, tsptr->tstate & 0xff); + if (cpu_has_hypervisor(env)) { + uint32_t new_gl = (tsptr->tstate >> 40) & 7; + env->hpstate = env->htstate[env->tl]; + cpu_gl_switch_gregs(env, new_gl); + env->gl = new_gl; + } env->tl--; - //trace_win_helper_done(env->tl); - -#if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } -#endif } void helper_retry(CPUSPARCState *env) @@ -379,14 +409,16 @@ void helper_retry(CPUSPARCState *env) env->asi = (tsptr->tstate >> 24) & 0xff; cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); cpu_put_cwp64(env, tsptr->tstate & 0xff); + if (cpu_has_hypervisor(env)) { + uint32_t new_gl = (tsptr->tstate >> 40) & 7; + env->hpstate = env->htstate[env->tl]; + cpu_gl_switch_gregs(env, new_gl); + env->gl = new_gl; + } env->tl--; - //trace_win_helper_retry(env->tl); - -#if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { // cpu_check_irqs(env); } -#endif } #endif diff --git a/qemu/tcg/LICENSE b/qemu/tcg/LICENSE deleted file mode 100644 index be817fa1..00000000 --- a/qemu/tcg/LICENSE +++ /dev/null @@ -1,3 +0,0 @@ -All the files in this directory and subdirectories are released under -a BSD like license (see header in each file). No other license is -accepted. diff --git a/qemu/tcg/README b/qemu/tcg/README index a550ff17..bfa2e4ed 100644 --- a/qemu/tcg/README +++ b/qemu/tcg/README @@ -8,6 +8,11 @@ in the QOP code generator written by Paul Brook. 2) Definitions +TCG receives RISC-like "TCG ops" and performs some optimizations on them, +including liveness analysis and trivial constant expression +evaluation. TCG ops are then implemented in the host CPU back end, +also known as the TCG "target". + The TCG "target" is the architecture for which we generate the code. It is of course not the same as the "target" of QEMU which is the emulated architecture. As TCG started as a generic C backend used @@ -96,7 +101,7 @@ This can be overridden using the following function modifiers: canonical locations before calling the helper. - TCG_CALL_NO_WRITE_GLOBALS means that the helper does not modify any globals. They will only be saved to their canonical location before calling helpers, - but they won't be reloaded afterwise. + but they won't be reloaded afterwards. - TCG_CALL_NO_SIDE_EFFECTS means that the call to the function is removed if the return value is not used. @@ -241,6 +246,14 @@ t0=~(t1|t2) t0=t1|~t2 +* clz_i32/i64 t0, t1, t2 + +t0 = t1 ? clz(t1) : t2 + +* ctz_i32/i64 t0, t1, t2 + +t0 = t1 ? ctz(t1) : t2 + ********* Shifts/Rotates * shl_i32/i64 t0, t1, t2 @@ -309,16 +322,45 @@ The bitfield is described by POS/LEN, which are immediate values: LEN - the length of the bitfield POS - the position of the first bit, counting from the LSB -For example, pos=8, len=4 indicates a 4-bit field at bit 8. -This operation would be equivalent to +For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field +at bit 8. This operation would be equivalent to dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00) -* trunc_shr_i32 t0, t1, pos +* extract_i32/i64 dest, t1, pos, len +* sextract_i32/i64 dest, t1, pos, len -For 64-bit hosts only, right shift the 64-bit input T1 by POS and -truncate to 32-bit output T0. Depending on the host, this may be -a simple mov/shift, or may require additional canonicalization. +Extract a bitfield from T1, placing the result in DEST. +The bitfield is described by POS/LEN, which are immediate values, +as above for deposit. For extract_*, the result will be extended +to the left with zeros; for sextract_*, the result will be extended +to the left with copies of the bitfield sign bit at pos + len - 1. + +For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field +at bit 8. This operation would be equivalent to + + dest = (t1 << 20) >> 28 + +(using an arithmetic right shift). + +* extract2_i32/i64 dest, t1, t2, pos + +For N = {32,64}, extract an N-bit quantity from the concatenation +of t2:t1, beginning at pos. The tcg_gen_extract2_{i32,i64} expander +accepts 0 <= pos <= N as inputs. The backend code generator will +not see either 0 or N as inputs for these opcodes. + +* extrl_i64_i32 t0, t1 + +For 64-bit hosts only, extract the low 32-bits of input T1 and place it +into 32-bit output T0. Depending on the host, this may be a simple move, +or may require additional canonicalization. + +* extrh_i64_i32 t0, t1 + +For 64-bit hosts only, extract the high 32-bits of input T1 and place it +into 32-bit output T0. Depending on the host, this may be a simple shift, +or may require additional canonicalization. ********* Conditional moves @@ -396,6 +438,31 @@ double-word product T0. The later is returned in two single-word outputs. Similar to mulu2, except the two inputs T1 and T2 are signed. +* mulsh_i32/i64 t0, t1, t2 +* muluh_i32/i64 t0, t1, t2 + +Provide the high part of a signed or unsigned multiply, respectively. +If mulu2/muls2 are not provided by the backend, the tcg-op generator +can obtain the same results can be obtained by emitting a pair of +opcodes, mul+muluh/mulsh. + +********* Memory Barrier support + +* mb <$arg> + +Generate a target memory barrier instruction to ensure memory ordering as being +enforced by a corresponding guest memory barrier instruction. The ordering +enforced by the backend may be stricter than the ordering required by the guest. +It cannot be weaker. This opcode takes a constant argument which is required to +generate the appropriate barrier instruction. The backend should take care to +emit the target barrier instruction only when necessary i.e., for SMP guests and +when MTTCG is enabled. + +The guest translators should generate this opcode for all guest instructions +which have ordering side effects. + +Please see docs/devel/atomics.txt for more information on memory barriers. + ********* 64-bit guest on 32-bit host support The following opcodes are internal to TCG. Thus they are to be implemented by @@ -425,6 +492,14 @@ current TB was linked to this TB. Otherwise execute the next instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued at most once with each slot index per TB. +* lookup_and_goto_ptr tb_addr + +Look up a TB address ('tb_addr') and jump to it if valid. If not valid, +jump to the TCG epilogue to go back to the exec loop. + +This operation is optional. If the TCG backend does not implement the +goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0). + * qemu_ld_i32/i64 t0, t1, flags, memidx * qemu_st_i32/i64 t0, t1, flags, memidx @@ -437,12 +512,132 @@ Both t0 and t1 may be split into little-endian ordered pairs of registers if dealing with 64-bit quantities on a 32-bit host. The memidx selects the qemu tlb index to use (e.g. user or kernel access). -The flags are the TCGMemOp bits, selecting the sign, width, and endianness +The flags are the MemOp bits, selecting the sign, width, and endianness of the memory access. For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a 64-bit memory access specified in flags. +********* Host vector operations + +All of the vector ops have two parameters, TCGOP_VECL & TCGOP_VECE. +The former specifies the length of the vector in log2 64-bit units; the +later specifies the length of the element (if applicable) in log2 8-bit units. +E.g. VECL=1 -> 64 << 1 -> v128, and VECE=2 -> 1 << 2 -> i32. + +* mov_vec v0, v1 +* ld_vec v0, t1 +* st_vec v0, t1 + + Move, load and store. + +* dup_vec v0, r1 + + Duplicate the low N bits of R1 into VECL/VECE copies across V0. + +* dupi_vec v0, c + + Similarly, for a constant. + Smaller values will be replicated to host register size by the expanders. + +* dup2_vec v0, r1, r2 + + Duplicate r2:r1 into VECL/64 copies across V0. This opcode is + only present for 32-bit hosts. + +* add_vec v0, v1, v2 + + v0 = v1 + v2, in elements across the vector. + +* sub_vec v0, v1, v2 + + Similarly, v0 = v1 - v2. + +* mul_vec v0, v1, v2 + + Similarly, v0 = v1 * v2. + +* neg_vec v0, v1 + + Similarly, v0 = -v1. + +* abs_vec v0, v1 + + Similarly, v0 = v1 < 0 ? -v1 : v1, in elements across the vector. + +* smin_vec: +* umin_vec: + + Similarly, v0 = MIN(v1, v2), for signed and unsigned element types. + +* smax_vec: +* umax_vec: + + Similarly, v0 = MAX(v1, v2), for signed and unsigned element types. + +* ssadd_vec: +* sssub_vec: +* usadd_vec: +* ussub_vec: + + Signed and unsigned saturating addition and subtraction. If the true + result is not representable within the element type, the element is + set to the minimum or maximum value for the type. + +* and_vec v0, v1, v2 +* or_vec v0, v1, v2 +* xor_vec v0, v1, v2 +* andc_vec v0, v1, v2 +* orc_vec v0, v1, v2 +* not_vec v0, v1 + + Similarly, logical operations with and without complement. + Note that VECE is unused. + +* shli_vec v0, v1, i2 +* shls_vec v0, v1, s2 + + Shift all elements from v1 by a scalar i2/s2. I.e. + + for (i = 0; i < VECL/VECE; ++i) { + v0[i] = v1[i] << s2; + } + +* shri_vec v0, v1, i2 +* sari_vec v0, v1, i2 +* shrs_vec v0, v1, s2 +* sars_vec v0, v1, s2 + + Similarly for logical and arithmetic right shift. + +* shlv_vec v0, v1, v2 + + Shift elements from v1 by elements from v2. I.e. + + for (i = 0; i < VECL/VECE; ++i) { + v0[i] = v1[i] << v2[i]; + } + +* shrv_vec v0, v1, v2 +* sarv_vec v0, v1, v2 + + Similarly for logical and arithmetic right shift. + +* cmp_vec v0, v1, v2, cond + + Compare vectors by element, storing -1 for true and 0 for false. + +* bitsel_vec v0, v1, v2, v3 + + Bitwise select, v0 = (v2 & v1) | (v3 & ~v1), across the entire vector. + +* cmpsel_vec v0, c1, c2, v3, v4, cond + + Select elements based on comparison results: + for (i = 0; i < n; ++i) { + v0[i] = (c1[i] cond c2[i]) ? v3[i] : v4[i]. + } + ********* Note 1: Some shortcuts are defined when the last operand is known to be @@ -454,8 +649,9 @@ function tcg_gen_xxx(args). 4) Backend -tcg-target.h contains the target specific definitions. tcg-target.c -contains the target specific code. +tcg-target.h contains the target specific definitions. tcg-target.inc.c +contains the target specific code; it is #included by tcg/tcg.c, rather +than being a standalone C file. 4.1) Assumptions @@ -466,13 +662,25 @@ On a 32 bit target, all 64 bit operations are converted to 32 bits. A few specific operations must be implemented to allow it (see add2_i32, sub2_i32, brcond2_i32). +On a 64 bit target, the values are transferred between 32 and 64-bit +registers using the following ops: +- trunc_shr_i64_i32 +- ext_i32_i64 +- extu_i32_i64 + +They ensure that the values are correctly truncated or extended when +moved from a 32-bit to a 64-bit register or vice-versa. Note that the +trunc_shr_i64_i32 is an optional op. It is not necessary to implement +it if all the following conditions are met: +- 64-bit registers can hold 32-bit values +- 32-bit values in a 64-bit register do not need to stay zero or + sign extended +- all 32-bit TCG ops ignore the high part of 64-bit registers + Floating point operations are not supported in this version. A previous incarnation of the code generator had full support of them, but it is better to concentrate on integer operations first. -On a 64 bit target, no assumption is made in TCG about the storage of -the 32 bit values in 64 bit registers. - 4.2) Constraints GCC like constraints are used to define the constraints of every @@ -482,24 +690,29 @@ version. Aliases are specified in the input operands as for GCC. The same register may be used for both an input and an output, even when they are not explicitly aliased. If an op expands to multiple target instructions then care must be taken to avoid clobbering input values. -GCC style "early clobber" outputs are not currently supported. +GCC style "early clobber" outputs are supported, with '&'. A target can define specific register or constant constraints. If an operation uses a constant input constraint which does not allow all constants, it must also accept registers in order to have a fallback. +The constraint 'i' is defined generically to accept any constant. +The constraint 'r' is not defined generically, but is consistently +used by each backend to indicate all registers. The movi_i32 and movi_i64 operations must accept any constants. The mov_i32 and mov_i64 operations must accept any registers of the same type. -The ld/st instructions must accept signed 32 bit constant offsets. It -can be implemented by reserving a specific register to compute the -address if the offset is too big. +The ld/st/sti instructions must accept signed 32 bit constant offsets. +This can be implemented by reserving a specific register in which to +compute the address if the offset is too big. The ld/st instructions must accept any destination (ld) or source (st) register. +The sti instruction may fail if it cannot store the given constant. + 4.3) Function call assumptions - The only supported types for parameters and return value are: 32 and diff --git a/qemu/tcg/TODO b/qemu/tcg/TODO deleted file mode 100644 index 07478477..00000000 --- a/qemu/tcg/TODO +++ /dev/null @@ -1,14 +0,0 @@ -- Add new instructions such as: clz, ctz, popcnt. - -- See if it is worth exporting mul2, mulu2, div2, divu2. - -- Support of globals saved in fixed registers between TBs. - -Ideas: - -- Move the slow part of the qemu_ld/st ops after the end of the TB. - -- Change exception syntax to get closer to QOP system (exception - parameters given with a specific instruction). - -- Add float and vector support. diff --git a/qemu/tcg/aarch64/tcg-target.c b/qemu/tcg/aarch64/tcg-target.c deleted file mode 100644 index ce8360f6..00000000 --- a/qemu/tcg/aarch64/tcg-target.c +++ /dev/null @@ -1,1814 +0,0 @@ -/* - * Initial TCG Implementation for aarch64 - * - * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH - * Written by Claudio Fontana - * - * This work is licensed under the terms of the GNU GPL, version 2 or - * (at your option) any later version. - * - * See the COPYING file in the top-level directory for details. - */ - -#include "tcg-be-ldst.h" -#include "qemu/bitops.h" - -/* We're going to re-use TCGType in setting of the SF bit, which controls - the size of the operation performed. If we know the values match, it - makes things much cleaner. */ -QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1); - -#ifndef NDEBUG -static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7", - "%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15", - "%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23", - "%x24", "%x25", "%x26", "%x27", "%x28", "%fp", "%x30", "%sp", -}; -#endif /* NDEBUG */ - -static const int tcg_target_reg_alloc_order[] = { - TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, - TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, - TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */ - - TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, - TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, - TCG_REG_X16, TCG_REG_X17, - - TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, - TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, - - /* X18 reserved by system */ - /* X19 reserved for AREG0 */ - /* X29 reserved as fp */ - /* X30 reserved as temporary */ -}; - -static const int tcg_target_call_iarg_regs[8] = { - TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, - TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7 -}; -static const int tcg_target_call_oarg_regs[1] = { - TCG_REG_X0 -}; - -#define TCG_REG_TMP TCG_REG_X30 - -#ifndef CONFIG_SOFTMMU -# ifdef CONFIG_USE_GUEST_BASE -# define TCG_REG_GUEST_BASE TCG_REG_X28 -# else -# define TCG_REG_GUEST_BASE TCG_REG_XZR -# endif -#endif - -static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target) -{ - ptrdiff_t offset = target - code_ptr; - assert(offset == sextract64(offset, 0, 26)); - /* read instruction, mask away previous PC_REL26 parameter contents, - set the proper offset, then write back the instruction. */ - *code_ptr = deposit32(*code_ptr, 0, 26, offset); -} - -static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target) -{ - ptrdiff_t offset = target - code_ptr; - assert(offset == sextract64(offset, 0, 19)); - *code_ptr = deposit32(*code_ptr, 5, 19, offset); -} - -static inline void patch_reloc(tcg_insn_unit *code_ptr, int type, - intptr_t value, intptr_t addend) -{ - assert(addend == 0); - switch (type) { - case R_AARCH64_JUMP26: - case R_AARCH64_CALL26: - reloc_pc26(code_ptr, (tcg_insn_unit *)value); - break; - case R_AARCH64_CONDBR19: - reloc_pc19(code_ptr, (tcg_insn_unit *)value); - break; - default: - tcg_abort(); - } -} - -#define TCG_CT_CONST_AIMM 0x100 -#define TCG_CT_CONST_LIMM 0x200 -#define TCG_CT_CONST_ZERO 0x400 -#define TCG_CT_CONST_MONE 0x800 - -/* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, - const char **pct_str) -{ - const char *ct_str = *pct_str; - - switch (ct_str[0]) { - case 'r': - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); - break; - case 'l': /* qemu_ld / qemu_st address, data_reg */ - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); -#ifdef CONFIG_SOFTMMU - /* x0 and x1 will be overwritten when reading the tlb entry, - and x2, and x3 for helper args, better to avoid using them. */ - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); -#endif - break; - case 'A': /* Valid for arithmetic immediate (positive or negative). */ - ct->ct |= TCG_CT_CONST_AIMM; - break; - case 'L': /* Valid for logical immediate. */ - ct->ct |= TCG_CT_CONST_LIMM; - break; - case 'M': /* minus one */ - ct->ct |= TCG_CT_CONST_MONE; - break; - case 'Z': /* zero */ - ct->ct |= TCG_CT_CONST_ZERO; - break; - default: - return -1; - } - - ct_str++; - *pct_str = ct_str; - return 0; -} - -static inline bool is_aimm(uint64_t val) -{ - return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0; -} - -static inline bool is_limm(uint64_t val) -{ - /* Taking a simplified view of the logical immediates for now, ignoring - the replication that can happen across the field. Match bit patterns - of the forms - 0....01....1 - 0..01..10..0 - and their inverses. */ - - /* Make things easier below, by testing the form with msb clear. */ - if ((int64_t)val < 0) { - val = ~val; - } - if (val == 0) { - return false; - } - val += val & -val; - return (val & (val - 1)) == 0; -} - -static int tcg_target_const_match(tcg_target_long val, TCGType type, - const TCGArgConstraint *arg_ct) -{ - int ct = arg_ct->ct; - - if (ct & TCG_CT_CONST) { - return 1; - } - if (type == TCG_TYPE_I32) { - val = (int32_t)val; - } - if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { - return 1; - } - if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) { - return 1; - } - if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; - } - if ((ct & TCG_CT_CONST_MONE) && val == -1) { - return 1; - } - - return 0; -} - -enum aarch64_cond_code { - COND_EQ = 0x0, - COND_NE = 0x1, - COND_CS = 0x2, /* Unsigned greater or equal */ - COND_HS = COND_CS, /* ALIAS greater or equal */ - COND_CC = 0x3, /* Unsigned less than */ - COND_LO = COND_CC, /* ALIAS Lower */ - COND_MI = 0x4, /* Negative */ - COND_PL = 0x5, /* Zero or greater */ - COND_VS = 0x6, /* Overflow */ - COND_VC = 0x7, /* No overflow */ - COND_HI = 0x8, /* Unsigned greater than */ - COND_LS = 0x9, /* Unsigned less or equal */ - COND_GE = 0xa, - COND_LT = 0xb, - COND_GT = 0xc, - COND_LE = 0xd, - COND_AL = 0xe, - COND_NV = 0xf, /* behaves like COND_AL here */ -}; - -static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { - [TCG_COND_EQ] = COND_EQ, - [TCG_COND_NE] = COND_NE, - [TCG_COND_LT] = COND_LT, - [TCG_COND_GE] = COND_GE, - [TCG_COND_LE] = COND_LE, - [TCG_COND_GT] = COND_GT, - /* unsigned */ - [TCG_COND_LTU] = COND_LO, - [TCG_COND_GTU] = COND_HI, - [TCG_COND_GEU] = COND_HS, - [TCG_COND_LEU] = COND_LS, -}; - -typedef enum { - LDST_ST = 0, /* store */ - LDST_LD = 1, /* load */ - LDST_LD_S_X = 2, /* load and sign-extend into Xt */ - LDST_LD_S_W = 3, /* load and sign-extend into Wt */ -} AArch64LdstType; - -/* We encode the format of the insn into the beginning of the name, so that - we can have the preprocessor help "typecheck" the insn vs the output - function. Arm didn't provide us with nice names for the formats, so we - use the section number of the architecture reference manual in which the - instruction group is described. */ -typedef enum { - /* Compare and branch (immediate). */ - I3201_CBZ = 0x34000000, - I3201_CBNZ = 0x35000000, - - /* Conditional branch (immediate). */ - I3202_B_C = 0x54000000, - - /* Unconditional branch (immediate). */ - I3206_B = 0x14000000, - I3206_BL = 0x94000000, - - /* Unconditional branch (register). */ - I3207_BR = 0xd61f0000, - I3207_BLR = 0xd63f0000, - I3207_RET = 0xd65f0000, - - /* Load/store register. Described here as 3.3.12, but the helper - that emits them can transform to 3.3.10 or 3.3.13. */ - I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30, - I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30, - I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30, - I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30, - - I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30, - I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30, - I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30, - I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30, - - I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30, - I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30, - - I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30, - I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30, - I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30, - - I3312_TO_I3310 = 0x00206800, - I3312_TO_I3313 = 0x01000000, - - /* Load/store register pair instructions. */ - I3314_LDP = 0x28400000, - I3314_STP = 0x28000000, - - /* Add/subtract immediate instructions. */ - I3401_ADDI = 0x11000000, - I3401_ADDSI = 0x31000000, - I3401_SUBI = 0x51000000, - I3401_SUBSI = 0x71000000, - - /* Bitfield instructions. */ - I3402_BFM = 0x33000000, - I3402_SBFM = 0x13000000, - I3402_UBFM = 0x53000000, - - /* Extract instruction. */ - I3403_EXTR = 0x13800000, - - /* Logical immediate instructions. */ - I3404_ANDI = 0x12000000, - I3404_ORRI = 0x32000000, - I3404_EORI = 0x52000000, - - /* Move wide immediate instructions. */ - I3405_MOVN = 0x12800000, - I3405_MOVZ = 0x52800000, - I3405_MOVK = 0x72800000, - - /* PC relative addressing instructions. */ - I3406_ADR = 0x10000000, - I3406_ADRP = 0x90000000, - - /* Add/subtract shifted register instructions (without a shift). */ - I3502_ADD = 0x0b000000, - I3502_ADDS = 0x2b000000, - I3502_SUB = 0x4b000000, - I3502_SUBS = 0x6b000000, - - /* Add/subtract shifted register instructions (with a shift). */ - I3502S_ADD_LSL = I3502_ADD, - - /* Add/subtract with carry instructions. */ - I3503_ADC = 0x1a000000, - I3503_SBC = 0x5a000000, - - /* Conditional select instructions. */ - I3506_CSEL = 0x1a800000, - I3506_CSINC = 0x1a800400, - - /* Data-processing (1 source) instructions. */ - I3507_REV16 = 0x5ac00400, - I3507_REV32 = 0x5ac00800, - I3507_REV64 = 0x5ac00c00, - - /* Data-processing (2 source) instructions. */ - I3508_LSLV = 0x1ac02000, - I3508_LSRV = 0x1ac02400, - I3508_ASRV = 0x1ac02800, - I3508_RORV = 0x1ac02c00, - I3508_SMULH = 0x9b407c00, - I3508_UMULH = 0x9bc07c00, - I3508_UDIV = 0x1ac00800, - I3508_SDIV = 0x1ac00c00, - - /* Data-processing (3 source) instructions. */ - I3509_MADD = 0x1b000000, - I3509_MSUB = 0x1b008000, - - /* Logical shifted register instructions (without a shift). */ - I3510_AND = 0x0a000000, - I3510_BIC = 0x0a200000, - I3510_ORR = 0x2a000000, - I3510_ORN = 0x2a200000, - I3510_EOR = 0x4a000000, - I3510_EON = 0x4a200000, - I3510_ANDS = 0x6a000000, -} AArch64Insn; - -static inline uint32_t tcg_in32(TCGContext *s) -{ - uint32_t v = *(uint32_t *)s->code_ptr; - return v; -} - -/* Emit an opcode with "type-checking" of the format. */ -#define tcg_out_insn(S, FMT, OP, ...) \ - glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__) - -static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rt, int imm19) -{ - tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt); -} - -static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn, - TCGCond c, int imm19) -{ - tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5); -} - -static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26) -{ - tcg_out32(s, insn | (imm26 & 0x03ffffff)); -} - -static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn) -{ - tcg_out32(s, insn | rn << 5); -} - -static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn, - TCGReg r1, TCGReg r2, TCGReg rn, - tcg_target_long ofs, bool pre, bool w) -{ - insn |= 1u << 31; /* ext */ - insn |= pre << 24; - insn |= w << 23; - - assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0); - insn |= (ofs & (0x7f << 3)) << (15 - 3); - - tcg_out32(s, insn | r2 << 10 | rn << 5 | r1); -} - -static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, uint64_t aimm) -{ - if (aimm > 0xfff) { - assert((aimm & 0xfff) == 0); - aimm >>= 12; - assert(aimm <= 0xfff); - aimm |= 1 << 12; /* apply LSL 12 */ - } - tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd); -} - -/* This function can be used for both 3.4.2 (Bitfield) and 3.4.4 - (Logical immediate). Both insn groups have N, IMMR and IMMS fields - that feed the DecodeBitMasks pseudo function. */ -static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, int n, int immr, int imms) -{ - tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10 - | rn << 5 | rd); -} - -#define tcg_out_insn_3404 tcg_out_insn_3402 - -static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, TCGReg rm, int imms) -{ - tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10 - | rn << 5 | rd); -} - -/* This function is used for the Move (wide immediate) instruction group. - Note that SHIFT is a full shift count, not the 2 bit HW field. */ -static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, uint16_t half, unsigned shift) -{ - assert((shift & ~0x30) == 0); - tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd); -} - -static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn, - TCGReg rd, int64_t disp) -{ - tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd); -} - -/* This function is for both 3.5.2 (Add/Subtract shifted register), for - the rare occasion when we actually want to supply a shift amount. */ -static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn, - TCGType ext, TCGReg rd, TCGReg rn, - TCGReg rm, int imm6) -{ - tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd); -} - -/* This function is for 3.5.2 (Add/subtract shifted register), - and 3.5.10 (Logical shifted register), for the vast majorty of cases - when we don't want to apply a shift. Thus it can also be used for - 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */ -static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, TCGReg rm) -{ - tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd); -} - -#define tcg_out_insn_3503 tcg_out_insn_3502 -#define tcg_out_insn_3508 tcg_out_insn_3502 -#define tcg_out_insn_3510 tcg_out_insn_3502 - -static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c) -{ - tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd - | tcg_cond_to_aarch64[c] << 12); -} - -static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn) -{ - tcg_out32(s, insn | ext << 31 | rn << 5 | rd); -} - -static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra) -{ - tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd); -} - -static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn, - TCGReg rd, TCGReg base, TCGReg regoff) -{ - /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ - tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | base << 5 | rd); -} - - -static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn, - TCGReg rd, TCGReg rn, intptr_t offset) -{ - tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | rd); -} - -static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn, - TCGReg rd, TCGReg rn, uintptr_t scaled_uimm) -{ - /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ - tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | rd); -} - -/* Register to register move using ORR (shifted register with no shift). */ -static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm) -{ - tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm); -} - -/* Register to register move using ADDI (move to/from SP). */ -static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) -{ - tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0); -} - -/* This function is used for the Logical (immediate) instruction group. - The value of LIMM must satisfy IS_LIMM. See the comment above about - only supporting simplified logical immediates. */ -static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, - TCGReg rd, TCGReg rn, uint64_t limm) -{ - unsigned h, l, r, c; - - assert(is_limm(limm)); - - h = clz64(limm); - l = ctz64(limm); - if (l == 0) { - r = 0; /* form 0....01....1 */ - c = ctz64(~limm) - 1; - if (h == 0) { - r = clz64(~limm); /* form 1..10..01..1 */ - c += r; - } - } else { - r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ - c = r - h - 1; - } - if (ext == TCG_TYPE_I32) { - r &= 31; - c &= 31; - } - - tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); -} - -static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, - tcg_target_long value) -{ - AArch64Insn insn; - int i, wantinv, shift; - tcg_target_long svalue = value; - tcg_target_long ivalue = ~value; - tcg_target_long imask; - - /* For 32-bit values, discard potential garbage in value. For 64-bit - values within [2**31, 2**32-1], we can create smaller sequences by - interpreting this as a negative 32-bit number, while ensuring that - the high 32 bits are cleared by setting SF=0. */ - if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) { - svalue = (int32_t)value; - value = (uint32_t)value; - ivalue = (uint32_t)ivalue; - type = TCG_TYPE_I32; - } - - /* Speed things up by handling the common case of small positive - and negative values specially. */ - if ((value & ~0xffffull) == 0) { - tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0); - return; - } else if ((ivalue & ~0xffffull) == 0) { - tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0); - return; - } - - /* Check for bitfield immediates. For the benefit of 32-bit quantities, - use the sign-extended value. That lets us match rotated values such - as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */ - if (is_limm(svalue)) { - tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue); - return; - } - - /* Look for host pointer values within 4G of the PC. This happens - often when loading pointers to QEMU's own data structures. */ - if (type == TCG_TYPE_I64) { - tcg_target_long disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12); - if (disp == sextract64(disp, 0, 21)) { - tcg_out_insn(s, 3406, ADRP, rd, disp); - if (value & 0xfff) { - tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff); - } - return; - } - } - - /* Would it take fewer insns to begin with MOVN? For the value and its - inverse, count the number of 16-bit lanes that are 0. */ - for (i = wantinv = imask = 0; i < 64; i += 16) { - tcg_target_long mask = 0xffffull << i; - if ((value & mask) == 0) { - wantinv -= 1; - } - if ((ivalue & mask) == 0) { - wantinv += 1; - imask |= mask; - } - } - - /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */ - insn = I3405_MOVZ; - if (wantinv > 0) { - value = ivalue; - insn = I3405_MOVN; - } - - /* Find the lowest lane that is not 0x0000. */ - shift = ctz64(value) & (63 & -16); - tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift); - - if (wantinv > 0) { - /* Re-invert the value, so MOVK sees non-inverted bits. */ - value = ~value; - /* Clear out all the 0xffff lanes. */ - value ^= imask; - } - /* Clear out the lane that we just set. */ - value &= ~(0xffffUL << shift); - - /* Iterate until all lanes have been set, and thus cleared from VALUE. */ - while (value) { - shift = ctz64(value) & (63 & -16); - tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift); - value &= ~(0xffffUL << shift); - } -} - -/* Define something more legible for general use. */ -#define tcg_out_ldst_r tcg_out_insn_3310 - -static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, - TCGReg rd, TCGReg rn, intptr_t offset) -{ - TCGMemOp size = (uint32_t)insn >> 30; - - /* If the offset is naturally aligned and in range, then we can - use the scaled uimm12 encoding */ - if (offset >= 0 && !(offset & ((1 << size) - 1))) { - uintptr_t scaled_uimm = offset >> size; - if (scaled_uimm <= 0xfff) { - tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm); - return; - } - } - - /* Small signed offsets can use the unscaled encoding. */ - if (offset >= -256 && offset < 256) { - tcg_out_insn_3312(s, insn, rd, rn, offset); - return; - } - - /* Worst-case scenario, move offset to temp register, use reg offset. */ - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); - tcg_out_ldst_r(s, insn, rd, rn, TCG_REG_TMP); -} - -static inline void tcg_out_mov(TCGContext *s, - TCGType type, TCGReg ret, TCGReg arg) -{ - if (ret != arg) { - tcg_out_movr(s, type, ret, arg); - } -} - -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_LDRW : I3312_LDRX, - arg, arg1, arg2); -} - -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_STRW : I3312_STRX, - arg, arg1, arg2); -} - -static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd, - TCGReg rn, unsigned int a, unsigned int b) -{ - tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b); -} - -static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd, - TCGReg rn, unsigned int a, unsigned int b) -{ - tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b); -} - -static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd, - TCGReg rn, unsigned int a, unsigned int b) -{ - tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b); -} - -static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, - TCGReg rn, TCGReg rm, unsigned int a) -{ - tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a); -} - -static inline void tcg_out_shl(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int bits = ext ? 64 : 32; - int max = bits - 1; - tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max)); -} - -static inline void tcg_out_shr(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int max = ext ? 63 : 31; - tcg_out_ubfm(s, ext, rd, rn, m & max, max); -} - -static inline void tcg_out_sar(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int max = ext ? 63 : 31; - tcg_out_sbfm(s, ext, rd, rn, m & max, max); -} - -static inline void tcg_out_rotr(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int max = ext ? 63 : 31; - tcg_out_extr(s, ext, rd, rn, rn, m & max); -} - -static inline void tcg_out_rotl(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int bits = ext ? 64 : 32; - int max = bits - 1; - tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max)); -} - -static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, - TCGReg rn, unsigned lsb, unsigned width) -{ - unsigned size = ext ? 64 : 32; - unsigned a = (size - lsb) & (size - 1); - unsigned b = width - 1; - tcg_out_bfm(s, ext, rd, rn, a, b); -} - -static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a, - tcg_target_long b, bool const_b) -{ - if (const_b) { - /* Using CMP or CMN aliases. */ - if (b >= 0) { - tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); - } else { - tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); - } - } else { - /* Using CMP alias SUBS wzr, Wn, Wm */ - tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); - } -} - -static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) -{ - ptrdiff_t offset = target - s->code_ptr; - assert(offset == sextract64(offset, 0, 26)); - tcg_out_insn(s, 3206, B, offset); -} - -static inline void tcg_out_goto_noaddr(TCGContext *s) -{ - /* We pay attention here to not modify the branch target by reading from - the buffer. This ensure that caches and memory are kept coherent during - retranslation. Mask away possible garbage in the high bits for the - first translation, while keeping the offset bits for retranslation. */ - uint32_t old = tcg_in32(s); - tcg_out_insn(s, 3206, B, old); -} - -static inline void tcg_out_goto_cond_noaddr(TCGContext *s, TCGCond c) -{ - /* See comments in tcg_out_goto_noaddr. */ - uint32_t old = tcg_in32(s) >> 5; - tcg_out_insn(s, 3202, B_C, c, old); -} - -static inline void tcg_out_callr(TCGContext *s, TCGReg reg) -{ - tcg_out_insn(s, 3207, BLR, reg); -} - -static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target) -{ - ptrdiff_t offset = target - s->code_ptr; - if (offset == sextract64(offset, 0, 26)) { - tcg_out_insn(s, 3206, BL, offset); - } else { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); - tcg_out_callr(s, TCG_REG_TMP); - } -} - -void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) -{ - tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr; - tcg_insn_unit *target = (tcg_insn_unit *)addr; - - reloc_pc26(code_ptr, target); - flush_icache_range(jmp_addr, jmp_addr + 4); -} - -static inline void tcg_out_goto_label(TCGContext *s, int label_index) -{ - TCGLabel *l = &s->labels[label_index]; - - if (!l->has_value) { - tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, label_index, 0); - tcg_out_goto_noaddr(s); - } else { - tcg_out_goto(s, l->u.value_ptr); - } -} - -static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a, - TCGArg b, bool b_const, int label) -{ - TCGLabel *l = &s->labels[label]; - intptr_t offset; - bool need_cmp; - - if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { - need_cmp = false; - } else { - need_cmp = true; - tcg_out_cmp(s, ext, a, b, b_const); - } - - if (!l->has_value) { - tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label, 0); - offset = tcg_in32(s) >> 5; - } else { - offset = l->u.value_ptr - s->code_ptr; - assert(offset == sextract64(offset, 0, 19)); - } - - if (need_cmp) { - tcg_out_insn(s, 3202, B_C, c, offset); - } else if (c == TCG_COND_EQ) { - tcg_out_insn(s, 3201, CBZ, ext, a, offset); - } else { - tcg_out_insn(s, 3201, CBNZ, ext, a, offset); - } -} - -static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn) -{ - tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn); -} - -static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn) -{ - tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn); -} - -static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) -{ - tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); -} - -static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, - TCGReg rd, TCGReg rn) -{ - /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ - int bits = (8 << s_bits) - 1; - tcg_out_sbfm(s, ext, rd, rn, 0, bits); -} - -static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, - TCGReg rd, TCGReg rn) -{ - /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ - int bits = (8 << s_bits) - 1; - tcg_out_ubfm(s, 0, rd, rn, 0, bits); -} - -static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, - TCGReg rn, int64_t aimm) -{ - if (aimm >= 0) { - tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm); - } else { - tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm); - } -} - -static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl, - TCGReg rh, TCGReg al, TCGReg ah, - tcg_target_long bl, tcg_target_long bh, - bool const_bl, bool const_bh, bool sub) -{ - TCGReg orig_rl = rl; - AArch64Insn insn; - - if (rl == ah || (!const_bh && rl == bh)) { - rl = TCG_REG_TMP; - } - - if (const_bl) { - insn = I3401_ADDSI; - if ((bl < 0) ^ sub) { - insn = I3401_SUBSI; - bl = -bl; - } - tcg_out_insn_3401(s, insn, ext, rl, al, bl); - } else { - tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl); - } - - insn = I3503_ADC; - if (const_bh) { - /* Note that the only two constants we support are 0 and -1, and - that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */ - if ((bh != 0) ^ sub) { - insn = I3503_SBC; - } - bh = TCG_REG_XZR; - } else if (sub) { - insn = I3503_SBC; - } - tcg_out_insn_3503(s, insn, ext, rh, ah, bh); - - tcg_out_mov(s, ext, orig_rl, rl); -} - -#ifdef CONFIG_SOFTMMU -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[16] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[16] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, -}; - -static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) -{ - ptrdiff_t offset = tcg_pcrel_diff(s, target); - assert(offset == sextract64(offset, 0, 21)); - tcg_out_insn(s, 3406, ADR, rd, offset); -} - -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGMemOp opc = lb->opc; - TCGMemOp size = opc & MO_SIZE; - - reloc_pc19(lb->label_ptr[0], s->code_ptr); - - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); - tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index); - tcg_out_adr(s, TCG_REG_X3, lb->raddr); - tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); - if (opc & MO_SIGN) { - tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); - } else { - tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); - } - - tcg_out_goto(s, lb->raddr); -} - -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) -{ - TCGMemOp opc = lb->opc; - TCGMemOp size = opc & MO_SIZE; - - reloc_pc19(lb->label_ptr[0], s->code_ptr); - - tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); - tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); - tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index); - tcg_out_adr(s, TCG_REG_X4, lb->raddr); - tcg_out_call(s, qemu_st_helpers[opc]); - tcg_out_goto(s, lb->raddr); -} - -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, - TCGType ext, TCGReg data_reg, TCGReg addr_reg, - int mem_index, tcg_insn_unit *raddr, - tcg_insn_unit *label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->opc = opc; - label->type = ext; - label->datalo_reg = data_reg; - label->addrlo_reg = addr_reg; - label->mem_index = mem_index; - label->raddr = raddr; - label->label_ptr[0] = label_ptr; -} - -/* Load and compare a TLB entry, emitting the conditional jump to the - slow path for the failure case, which will be patched later when finalizing - the slow path. Generated code returns the host addend in X1, - clobbers X0,X2,X3,TMP. */ -static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits, - tcg_insn_unit **label_ptr, int mem_index, - bool is_read) -{ - TCGReg base = TCG_AREG0; - int tlb_offset = is_read ? - offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) - : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); - - /* Extract the TLB index from the address into X0. - X0 = - addr_reg */ - tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg, - TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS); - - /* Store the page mask part of the address and the low s_bits into X3. - Later this allows checking for equality and alignment at the same time. - X3 = addr_reg & (PAGE_MASK | ((1 << s_bits) - 1)) */ - tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3, - addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); - - /* Add any "high bits" from the tlb offset to the env address into X2, - to take advantage of the LSL12 form of the ADDI instruction. - X2 = env + (tlb_offset & 0xfff000) */ - if (tlb_offset & 0xfff000) { - tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base, - tlb_offset & 0xfff000); - base = TCG_REG_X2; - } - - /* Merge the tlb index contribution into X2. - X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */ - tcg_out_insn(s, 3502S, ADD_LSL, TCG_TYPE_I64, TCG_REG_X2, base, - TCG_REG_X0, CPU_TLB_ENTRY_BITS); - - /* Merge "low bits" from tlb offset, load the tlb comparator into X0. - X0 = load [X2 + (tlb_offset & 0x000fff)] */ - tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX, - TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff); - - /* Load the tlb addend. Do that early to avoid stalling. - X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */ - tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2, - (tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) - - (is_read ? offsetof(CPUTLBEntry, addr_read) - : offsetof(CPUTLBEntry, addr_write))); - - /* Perform the address comparison. */ - tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0); - - /* If not equal, we jump to the slow path. */ - *label_ptr = s->code_ptr; - tcg_out_goto_cond_noaddr(s, TCG_COND_NE); -} - -#endif /* CONFIG_SOFTMMU */ - -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, - TCGReg data_r, TCGReg addr_r, TCGReg off_r) -{ - const TCGMemOp bswap = memop & MO_BSWAP; - - switch (memop & MO_SSIZE) { - case MO_UB: - tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r); - break; - case MO_SB: - tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, - data_r, addr_r, off_r); - break; - case MO_UW: - tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); - if (bswap) { - tcg_out_rev16(s, data_r, data_r); - } - break; - case MO_SW: - if (bswap) { - tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); - tcg_out_rev16(s, data_r, data_r); - tcg_out_sxt(s, ext, MO_16, data_r, data_r); - } else { - tcg_out_ldst_r(s, ext ? I3312_LDRSHX : I3312_LDRSHW, - data_r, addr_r, off_r); - } - break; - case MO_UL: - tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); - if (bswap) { - tcg_out_rev32(s, data_r, data_r); - } - break; - case MO_SL: - if (bswap) { - tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); - tcg_out_rev32(s, data_r, data_r); - tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r); - } else { - tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, off_r); - } - break; - case MO_Q: - tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, off_r); - if (bswap) { - tcg_out_rev64(s, data_r, data_r); - } - break; - default: - tcg_abort(); - } -} - -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, - TCGReg data_r, TCGReg addr_r, TCGReg off_r) -{ - const TCGMemOp bswap = memop & MO_BSWAP; - - switch (memop & MO_SIZE) { - case MO_8: - tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, off_r); - break; - case MO_16: - if (bswap && data_r != TCG_REG_XZR) { - tcg_out_rev16(s, TCG_REG_TMP, data_r); - data_r = TCG_REG_TMP; - } - tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, off_r); - break; - case MO_32: - if (bswap && data_r != TCG_REG_XZR) { - tcg_out_rev32(s, TCG_REG_TMP, data_r); - data_r = TCG_REG_TMP; - } - tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, off_r); - break; - case MO_64: - if (bswap && data_r != TCG_REG_XZR) { - tcg_out_rev64(s, TCG_REG_TMP, data_r); - data_r = TCG_REG_TMP; - } - tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, off_r); - break; - default: - tcg_abort(); - } -} - -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOp memop, TCGType ext, int mem_index) -{ -#ifdef CONFIG_SOFTMMU - TCGMemOp s_bits = memop & MO_SIZE; - tcg_insn_unit *label_ptr; - - tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1); - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, true, memop, ext, data_reg, addr_reg, - mem_index, s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, - GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); -#endif /* CONFIG_SOFTMMU */ -} - -static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOp memop, int mem_index) -{ -#ifdef CONFIG_SOFTMMU - TCGMemOp s_bits = memop & MO_SIZE; - tcg_insn_unit *label_ptr; - - tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0); - tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); - add_qemu_ldst_label(s, false, memop, s_bits == MO_64, data_reg, addr_reg, - mem_index, s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, - GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); -#endif /* CONFIG_SOFTMMU */ -} - -static tcg_insn_unit *tb_ret_addr; - -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) -{ - /* 99% of the time, we can signal the use of extension registers - by looking to see if the opcode handles 64-bit data. */ - TCGType ext = (s->tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; - - /* Hoist the loads of the most common arguments. */ - TCGArg a0 = args[0]; - TCGArg a1 = args[1]; - TCGArg a2 = args[2]; - int c2 = const_args[2]; - - /* Some operands are defined with "rZ" constraint, a register or - the zero register. These need not actually test args[I] == 0. */ -#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I]) - - switch (opc) { - case INDEX_op_exit_tb: - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0); - tcg_out_goto(s, tb_ret_addr); - break; - - case INDEX_op_goto_tb: -#ifndef USE_DIRECT_JUMP -#error "USE_DIRECT_JUMP required for aarch64" -#endif - assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */ - s->tb_jmp_offset[a0] = tcg_current_code_size(s); - /* actual branch destination will be patched by - aarch64_tb_set_jmp_target later, beware retranslation. */ - tcg_out_goto_noaddr(s); - s->tb_next_offset[a0] = tcg_current_code_size(s); - break; - - case INDEX_op_br: - tcg_out_goto_label(s, a0); - break; - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - tcg_out_ldst(s, I3312_LDRB, a0, a1, a2); - break; - case INDEX_op_ld8s_i32: - tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2); - break; - case INDEX_op_ld8s_i64: - tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2); - break; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - tcg_out_ldst(s, I3312_LDRH, a0, a1, a2); - break; - case INDEX_op_ld16s_i32: - tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2); - break; - case INDEX_op_ld16s_i64: - tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - tcg_out_ldst(s, I3312_LDRW, a0, a1, a2); - break; - case INDEX_op_ld32s_i64: - tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2); - break; - case INDEX_op_ld_i64: - tcg_out_ldst(s, I3312_LDRX, a0, a1, a2); - break; - - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2); - break; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2); - break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2); - break; - case INDEX_op_st_i64: - tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2); - break; - - case INDEX_op_add_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_add_i64: - if (c2) { - tcg_out_addsubi(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2); - } - break; - - case INDEX_op_sub_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_sub_i64: - if (c2) { - tcg_out_addsubi(s, ext, a0, a1, -a2); - } else { - tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2); - } - break; - - case INDEX_op_neg_i64: - case INDEX_op_neg_i32: - tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1); - break; - - case INDEX_op_and_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_and_i64: - if (c2) { - tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3510, AND, ext, a0, a1, a2); - } - break; - - case INDEX_op_andc_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_andc_i64: - if (c2) { - tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2); - } else { - tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2); - } - break; - - case INDEX_op_or_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_or_i64: - if (c2) { - tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2); - } - break; - - case INDEX_op_orc_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_orc_i64: - if (c2) { - tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2); - } else { - tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2); - } - break; - - case INDEX_op_xor_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_xor_i64: - if (c2) { - tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2); - } - break; - - case INDEX_op_eqv_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_eqv_i64: - if (c2) { - tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2); - } else { - tcg_out_insn(s, 3510, EON, ext, a0, a1, a2); - } - break; - - case INDEX_op_not_i64: - case INDEX_op_not_i32: - tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1); - break; - - case INDEX_op_mul_i64: - case INDEX_op_mul_i32: - tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR); - break; - - case INDEX_op_div_i64: - case INDEX_op_div_i32: - tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2); - break; - case INDEX_op_divu_i64: - case INDEX_op_divu_i32: - tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2); - break; - - case INDEX_op_rem_i64: - case INDEX_op_rem_i32: - tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2); - tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); - break; - case INDEX_op_remu_i64: - case INDEX_op_remu_i32: - tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2); - tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); - break; - - case INDEX_op_shl_i64: - case INDEX_op_shl_i32: - if (c2) { - tcg_out_shl(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2); - } - break; - - case INDEX_op_shr_i64: - case INDEX_op_shr_i32: - if (c2) { - tcg_out_shr(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2); - } - break; - - case INDEX_op_sar_i64: - case INDEX_op_sar_i32: - if (c2) { - tcg_out_sar(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2); - } - break; - - case INDEX_op_rotr_i64: - case INDEX_op_rotr_i32: - if (c2) { - tcg_out_rotr(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2); - } - break; - - case INDEX_op_rotl_i64: - case INDEX_op_rotl_i32: - if (c2) { - tcg_out_rotl(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2); - tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP); - } - break; - - case INDEX_op_brcond_i32: - a1 = (int32_t)a1; - /* FALLTHRU */ - case INDEX_op_brcond_i64: - tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], args[3]); - break; - - case INDEX_op_setcond_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_setcond_i64: - tcg_out_cmp(s, ext, a1, a2, c2); - /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ - tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR, - TCG_REG_XZR, tcg_invert_cond(args[3])); - break; - - case INDEX_op_movcond_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_movcond_i64: - tcg_out_cmp(s, ext, a1, a2, c2); - tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); - break; - - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, a0, a1, a2, ext, args[3]); - break; - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, REG0(0), a1, a2, args[3]); - break; - - case INDEX_op_bswap64_i64: - tcg_out_rev64(s, a0, a1); - break; - case INDEX_op_bswap32_i64: - case INDEX_op_bswap32_i32: - tcg_out_rev32(s, a0, a1); - break; - case INDEX_op_bswap16_i64: - case INDEX_op_bswap16_i32: - tcg_out_rev16(s, a0, a1); - break; - - case INDEX_op_ext8s_i64: - case INDEX_op_ext8s_i32: - tcg_out_sxt(s, ext, MO_8, a0, a1); - break; - case INDEX_op_ext16s_i64: - case INDEX_op_ext16s_i32: - tcg_out_sxt(s, ext, MO_16, a0, a1); - break; - case INDEX_op_ext32s_i64: - tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1); - break; - case INDEX_op_ext8u_i64: - case INDEX_op_ext8u_i32: - tcg_out_uxt(s, MO_8, a0, a1); - break; - case INDEX_op_ext16u_i64: - case INDEX_op_ext16u_i32: - tcg_out_uxt(s, MO_16, a0, a1); - break; - case INDEX_op_ext32u_i64: - tcg_out_movr(s, TCG_TYPE_I32, a0, a1); - break; - - case INDEX_op_deposit_i64: - case INDEX_op_deposit_i32: - tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); - break; - - case INDEX_op_add2_i32: - tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), - (int32_t)args[4], args[5], const_args[4], - const_args[5], false); - break; - case INDEX_op_add2_i64: - tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], - args[5], const_args[4], const_args[5], false); - break; - case INDEX_op_sub2_i32: - tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), - (int32_t)args[4], args[5], const_args[4], - const_args[5], true); - break; - case INDEX_op_sub2_i64: - tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], - args[5], const_args[4], const_args[5], true); - break; - - case INDEX_op_muluh_i64: - tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2); - break; - case INDEX_op_mulsh_i64: - tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2); - break; - - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ - case INDEX_op_movi_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - default: - tcg_abort(); - } - -#undef REG0 -} - -static const TCGTargetOpDef aarch64_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - - { INDEX_op_st8_i32, { "rZ", "r" } }, - { INDEX_op_st16_i32, { "rZ", "r" } }, - { INDEX_op_st_i32, { "rZ", "r" } }, - { INDEX_op_st8_i64, { "rZ", "r" } }, - { INDEX_op_st16_i64, { "rZ", "r" } }, - { INDEX_op_st32_i64, { "rZ", "r" } }, - { INDEX_op_st_i64, { "rZ", "r" } }, - - { INDEX_op_add_i32, { "r", "r", "rA" } }, - { INDEX_op_add_i64, { "r", "r", "rA" } }, - { INDEX_op_sub_i32, { "r", "r", "rA" } }, - { INDEX_op_sub_i64, { "r", "r", "rA" } }, - { INDEX_op_mul_i32, { "r", "r", "r" } }, - { INDEX_op_mul_i64, { "r", "r", "r" } }, - { INDEX_op_div_i32, { "r", "r", "r" } }, - { INDEX_op_div_i64, { "r", "r", "r" } }, - { INDEX_op_divu_i32, { "r", "r", "r" } }, - { INDEX_op_divu_i64, { "r", "r", "r" } }, - { INDEX_op_rem_i32, { "r", "r", "r" } }, - { INDEX_op_rem_i64, { "r", "r", "r" } }, - { INDEX_op_remu_i32, { "r", "r", "r" } }, - { INDEX_op_remu_i64, { "r", "r", "r" } }, - { INDEX_op_and_i32, { "r", "r", "rL" } }, - { INDEX_op_and_i64, { "r", "r", "rL" } }, - { INDEX_op_or_i32, { "r", "r", "rL" } }, - { INDEX_op_or_i64, { "r", "r", "rL" } }, - { INDEX_op_xor_i32, { "r", "r", "rL" } }, - { INDEX_op_xor_i64, { "r", "r", "rL" } }, - { INDEX_op_andc_i32, { "r", "r", "rL" } }, - { INDEX_op_andc_i64, { "r", "r", "rL" } }, - { INDEX_op_orc_i32, { "r", "r", "rL" } }, - { INDEX_op_orc_i64, { "r", "r", "rL" } }, - { INDEX_op_eqv_i32, { "r", "r", "rL" } }, - { INDEX_op_eqv_i64, { "r", "r", "rL" } }, - - { INDEX_op_neg_i32, { "r", "r" } }, - { INDEX_op_neg_i64, { "r", "r" } }, - { INDEX_op_not_i32, { "r", "r" } }, - { INDEX_op_not_i64, { "r", "r" } }, - - { INDEX_op_shl_i32, { "r", "r", "ri" } }, - { INDEX_op_shr_i32, { "r", "r", "ri" } }, - { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "ri" } }, - { INDEX_op_shl_i64, { "r", "r", "ri" } }, - { INDEX_op_shr_i64, { "r", "r", "ri" } }, - { INDEX_op_sar_i64, { "r", "r", "ri" } }, - { INDEX_op_rotl_i64, { "r", "r", "ri" } }, - { INDEX_op_rotr_i64, { "r", "r", "ri" } }, - - { INDEX_op_brcond_i32, { "r", "rA" } }, - { INDEX_op_brcond_i64, { "r", "rA" } }, - { INDEX_op_setcond_i32, { "r", "r", "rA" } }, - { INDEX_op_setcond_i64, { "r", "r", "rA" } }, - { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } }, - { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } }, - - { INDEX_op_qemu_ld_i32, { "r", "l" } }, - { INDEX_op_qemu_ld_i64, { "r", "l" } }, - { INDEX_op_qemu_st_i32, { "lZ", "l" } }, - { INDEX_op_qemu_st_i64, { "lZ", "l" } }, - - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - { INDEX_op_bswap16_i64, { "r", "r" } }, - { INDEX_op_bswap32_i64, { "r", "r" } }, - { INDEX_op_bswap64_i64, { "r", "r" } }, - - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext8u_i32, { "r", "r" } }, - { INDEX_op_ext16u_i32, { "r", "r" } }, - - { INDEX_op_ext8s_i64, { "r", "r" } }, - { INDEX_op_ext16s_i64, { "r", "r" } }, - { INDEX_op_ext32s_i64, { "r", "r" } }, - { INDEX_op_ext8u_i64, { "r", "r" } }, - { INDEX_op_ext16u_i64, { "r", "r" } }, - { INDEX_op_ext32u_i64, { "r", "r" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, - - { INDEX_op_muluh_i64, { "r", "r", "r" } }, - { INDEX_op_mulsh_i64, { "r", "r", "r" } }, - - { -1 }, -}; - -static void tcg_target_init(TCGContext *s) -{ - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); - - tcg_regset_set32(s->tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_X0) | (1 << TCG_REG_X1) | - (1 << TCG_REG_X2) | (1 << TCG_REG_X3) | - (1 << TCG_REG_X4) | (1 << TCG_REG_X5) | - (1 << TCG_REG_X6) | (1 << TCG_REG_X7) | - (1 << TCG_REG_X8) | (1 << TCG_REG_X9) | - (1 << TCG_REG_X10) | (1 << TCG_REG_X11) | - (1 << TCG_REG_X12) | (1 << TCG_REG_X13) | - (1 << TCG_REG_X14) | (1 << TCG_REG_X15) | - (1 << TCG_REG_X16) | (1 << TCG_REG_X17) | - (1 << TCG_REG_X18) | (1 << TCG_REG_X30)); - - tcg_regset_clear(s->reserved_regs); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ - - tcg_add_target_add_op_defs(s, aarch64_op_defs); -} - -/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ -#define PUSH_SIZE ((30 - 19 + 1) * 8) - -#define FRAME_SIZE \ - ((PUSH_SIZE \ - + TCG_STATIC_CALL_ARGS_SIZE \ - + CPU_TEMP_BUF_NLONGS * sizeof(long) \ - + TCG_TARGET_STACK_ALIGN - 1) \ - & ~(TCG_TARGET_STACK_ALIGN - 1)) - -/* We're expecting a 2 byte uleb128 encoded value. */ -QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); - -/* We're expecting to use a single ADDI insn. */ -QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); - -static void tcg_target_qemu_prologue(TCGContext *s) -{ - TCGReg r; - - /* Push (FP, LR) and allocate space for all saved registers. */ - tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR, - TCG_REG_SP, -PUSH_SIZE, 1, 1); - - /* Set up frame pointer for canonical unwinding. */ - tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); - - /* Store callee-preserved regs x19..x28. */ - for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { - int ofs = (r - TCG_REG_X19 + 2) * 8; - tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0); - } - - /* Make stack space for TCG locals. */ - tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, - FRAME_SIZE - PUSH_SIZE); - - /* Inform TCG about how to find TCG locals with register, offset, size. */ - tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, - CPU_TEMP_BUF_NLONGS * sizeof(long)); - -#if defined(CONFIG_USE_GUEST_BASE) - if (GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); - } -#endif - - tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); - tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]); - - tb_ret_addr = s->code_ptr; - - /* Remove TCG locals stack space. */ - tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, - FRAME_SIZE - PUSH_SIZE); - - /* Restore registers x19..x28. */ - for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { - int ofs = (r - TCG_REG_X19 + 2) * 8; - tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0); - } - - /* Pop (FP, LR), restore SP to previous frame. */ - tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR, - TCG_REG_SP, PUSH_SIZE, 0, 1); - tcg_out_insn(s, 3207, RET, TCG_REG_LR); -} - -#define ELF_HOST_MACHINE EM_AARCH64 diff --git a/qemu/tcg/aarch64/tcg-target.h b/qemu/tcg/aarch64/tcg-target.h index 60c7493a..13993a70 100644 --- a/qemu/tcg/aarch64/tcg-target.h +++ b/qemu/tcg/aarch64/tcg-target.h @@ -10,10 +10,15 @@ * See the COPYING file in the top-level directory for details. */ -#ifndef TCG_TARGET_AARCH64 -#define TCG_TARGET_AARCH64 1 +#ifndef AARCH64_TCG_TARGET_H +#define AARCH64_TCG_TARGET_H + +#if defined(__APPLE__) +#include +#endif #define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24 #undef TCG_TARGET_STACK_GROWSUP typedef enum { @@ -30,13 +35,22 @@ typedef enum { TCG_REG_SP = 31, TCG_REG_XZR = 31, + TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, + TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, + /* Aliases. */ TCG_REG_FP = TCG_REG_X29, TCG_REG_LR = TCG_REG_X30, TCG_AREG0 = TCG_REG_X19, } TCGReg; -#define TCG_TARGET_NB_REGS 32 +#define TCG_TARGET_NB_REGS 64 /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_SP @@ -61,7 +75,13 @@ typedef enum { #define TCG_TARGET_HAS_eqv_i32 1 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_clz_i32 1 +#define TCG_TARGET_HAS_ctz_i32 1 +#define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_extract_i32 1 +#define TCG_TARGET_HAS_sextract_i32 1 +#define TCG_TARGET_HAS_extract2_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 @@ -69,7 +89,9 @@ typedef enum { #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_trunc_shr_i32 0 +#define TCG_TARGET_HAS_extrl_i64_i32 0 +#define TCG_TARGET_HAS_extrh_i64_i32 0 +#define TCG_TARGET_HAS_goto_ptr 1 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 1 @@ -90,7 +112,13 @@ typedef enum { #define TCG_TARGET_HAS_eqv_i64 1 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_clz_i64 1 +#define TCG_TARGET_HAS_ctz_i64 1 +#define TCG_TARGET_HAS_ctpop_i64 0 #define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_extract_i64 1 +#define TCG_TARGET_HAS_sextract_i64 1 +#define TCG_TARGET_HAS_extract2_i64 1 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 @@ -98,10 +126,48 @@ typedef enum { #define TCG_TARGET_HAS_muls2_i64 0 #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_HAS_direct_jump 1 + +#define TCG_TARGET_HAS_v64 1 +#define TCG_TARGET_HAS_v128 1 +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec 1 +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_cmp_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 1 +#define TCG_TARGET_HAS_cmpsel_vec 0 + +#define TCG_TARGET_DEFAULT_MO (0) +#define TCG_TARGET_HAS_MEMORY_BSWAP 1 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { +#if defined(__APPLE__) + /* + * On Intel-based Mac computers, this function does nothing. + * Source: https://developer.apple.com/documentation/apple_silicon/porting_just-in-time_compilers_to_apple_silicon?language=objc + */ + sys_icache_invalidate((char *)start, stop - start); +#else __builtin___clear_cache((char *)start, (char *)stop); +#endif } -#endif /* TCG_TARGET_AARCH64 */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); + +#ifdef CONFIG_SOFTMMU +#define TCG_TARGET_NEED_LDST_LABELS +#endif +#define TCG_TARGET_NEED_POOL_LABELS + +#endif /* AARCH64_TCG_TARGET_H */ diff --git a/qemu/tcg/aarch64/tcg-target.inc.c b/qemu/tcg/aarch64/tcg-target.inc.c new file mode 100644 index 00000000..85185611 --- /dev/null +++ b/qemu/tcg/aarch64/tcg-target.inc.c @@ -0,0 +1,2927 @@ +/* + * Initial TCG Implementation for aarch64 + * + * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH + * Written by Claudio Fontana + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + */ + +#include "../tcg-pool.inc.c" +#include "qemu/bitops.h" + +/* We're going to re-use TCGType in setting of the SF bit, which controls + the size of the operation performed. If we know the values match, it + makes things much cleaner. */ +QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1); + +#ifdef CONFIG_DEBUG_TCG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", + "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", + "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", + "x24", "x25", "x26", "x27", "x28", "fp", "x30", "sp", + + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "fp", "v30", "v31", +}; +#endif /* CONFIG_DEBUG_TCG */ + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, + TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, + TCG_REG_X28, /* we will reserve this for guest_base if configured */ + + TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, + TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, + TCG_REG_X16, TCG_REG_X17, + + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, + + /* X18 reserved by system */ + /* X19 reserved for AREG0 */ + /* X29 reserved as fp */ + /* X30 reserved as temporary */ + + TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + /* V8 - V15 are call-saved, and skipped. */ + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, +}; + +static const int tcg_target_call_iarg_regs[8] = { + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7 +}; +static const int tcg_target_call_oarg_regs[1] = { + TCG_REG_X0 +}; + +#define TCG_REG_TMP TCG_REG_X30 +#define TCG_VEC_TMP TCG_REG_V31 + +#ifndef CONFIG_SOFTMMU +/* Note that XZR cannot be encoded in the address base register slot, + as that actaully encodes SP. So if we need to zero-extend the guest + address, via the address index register slot, we need to load even + a zero guest base into a register. */ +#define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32) +#define TCG_REG_GUEST_BASE TCG_REG_X28 +#endif + +static inline bool reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - code_ptr; + if (offset == sextract64(offset, 0, 26)) { + /* read instruction, mask away previous PC_REL26 parameter contents, + set the proper offset, then write back the instruction. */ + *code_ptr = deposit32(*code_ptr, 0, 26, offset); + return true; + } + return false; +} + +static inline bool reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - code_ptr; + if (offset == sextract64(offset, 0, 19)) { + *code_ptr = deposit32(*code_ptr, 5, 19, offset); + return true; + } + return false; +} + +static inline bool patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + tcg_debug_assert(addend == 0); + switch (type) { + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + return reloc_pc26(code_ptr, (tcg_insn_unit *)value); + case R_AARCH64_CONDBR19: + return reloc_pc19(code_ptr, (tcg_insn_unit *)value); + default: + g_assert_not_reached(); + } +} + +#define TCG_CT_CONST_AIMM 0x100 +#define TCG_CT_CONST_LIMM 0x200 +#define TCG_CT_CONST_ZERO 0x400 +#define TCG_CT_CONST_MONE 0x800 +#define TCG_CT_CONST_ORRI 0x1000 +#define TCG_CT_CONST_ANDI 0x2000 + +/* parse target specific constraints */ +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) +{ + switch (*ct_str++) { + case 'r': /* general registers */ + ct->ct |= TCG_CT_REG; + ct->u.regs |= 0xffffffffu; + break; + case 'w': /* advsimd registers */ + ct->ct |= TCG_CT_REG; + ct->u.regs |= 0xffffffff00000000ull; + break; + case 'l': /* qemu_ld / qemu_st address, data_reg */ + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffffu; +#ifdef CONFIG_SOFTMMU + /* x0 and x1 will be overwritten when reading the tlb entry, + and x2, and x3 for helper args, better to avoid using them. */ + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); +#endif + break; + case 'A': /* Valid for arithmetic immediate (positive or negative). */ + ct->ct |= TCG_CT_CONST_AIMM; + break; + case 'L': /* Valid for logical immediate. */ + ct->ct |= TCG_CT_CONST_LIMM; + break; + case 'M': /* minus one */ + ct->ct |= TCG_CT_CONST_MONE; + break; + case 'O': /* vector orr/bic immediate */ + ct->ct |= TCG_CT_CONST_ORRI; + break; + case 'N': /* vector orr/bic immediate, inverted */ + ct->ct |= TCG_CT_CONST_ANDI; + break; + case 'Z': /* zero */ + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return NULL; + } + return ct_str; +} + +/* Match a constant valid for addition (12-bit, optionally shifted). */ +static inline bool is_aimm(uint64_t val) +{ + return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0; +} + +/* Match a constant valid for logical operations. */ +static inline bool is_limm(uint64_t val) +{ + /* Taking a simplified view of the logical immediates for now, ignoring + the replication that can happen across the field. Match bit patterns + of the forms + 0....01....1 + 0..01..10..0 + and their inverses. */ + + /* Make things easier below, by testing the form with msb clear. */ + if ((int64_t)val < 0) { + val = ~val; + } + if (val == 0) { + return false; + } + val += val & -val; + return (val & (val - 1)) == 0; +} + +/* Return true if v16 is a valid 16-bit shifted immediate. */ +static bool is_shimm16(uint16_t v16, int *cmode, int *imm8) +{ + if (v16 == (v16 & 0xff)) { + *cmode = 0x8; + *imm8 = v16 & 0xff; + return true; + } else if (v16 == (v16 & 0xff00)) { + *cmode = 0xa; + *imm8 = v16 >> 8; + return true; + } + return false; +} + +/* Return true if v32 is a valid 32-bit shifted immediate. */ +static bool is_shimm32(uint32_t v32, int *cmode, int *imm8) +{ + if (v32 == (v32 & 0xff)) { + *cmode = 0x0; + *imm8 = v32 & 0xff; + return true; + } else if (v32 == (v32 & 0xff00)) { + *cmode = 0x2; + *imm8 = (v32 >> 8) & 0xff; + return true; + } else if (v32 == (v32 & 0xff0000)) { + *cmode = 0x4; + *imm8 = (v32 >> 16) & 0xff; + return true; + } else if (v32 == (v32 & 0xff000000)) { + *cmode = 0x6; + *imm8 = v32 >> 24; + return true; + } + return false; +} + +/* Return true if v32 is a valid 32-bit shifting ones immediate. */ +static bool is_soimm32(uint32_t v32, int *cmode, int *imm8) +{ + if ((v32 & 0xffff00ff) == 0xff) { + *cmode = 0xc; + *imm8 = (v32 >> 8) & 0xff; + return true; + } else if ((v32 & 0xff00ffff) == 0xffff) { + *cmode = 0xd; + *imm8 = (v32 >> 16) & 0xff; + return true; + } + return false; +} + +/* Return true if v32 is a valid float32 immediate. */ +static bool is_fimm32(uint32_t v32, int *cmode, int *imm8) +{ + if (extract32(v32, 0, 19) == 0 + && (extract32(v32, 25, 6) == 0x20 + || extract32(v32, 25, 6) == 0x1f)) { + *cmode = 0xf; + *imm8 = (extract32(v32, 31, 1) << 7) + | (extract32(v32, 25, 1) << 6) + | extract32(v32, 19, 6); + return true; + } + return false; +} + +/* Return true if v64 is a valid float64 immediate. */ +static bool is_fimm64(uint64_t v64, int *cmode, int *imm8) +{ + if (extract64(v64, 0, 48) == 0 + && (extract64(v64, 54, 9) == 0x100 + || extract64(v64, 54, 9) == 0x0ff)) { + *cmode = 0xf; + *imm8 = (extract64(v64, 63, 1) << 7) + | (extract64(v64, 54, 1) << 6) + | extract64(v64, 48, 6); + return true; + } + return false; +} + +/* + * Return non-zero if v32 can be formed by MOVI+ORR. + * Place the parameters for MOVI in (cmode, imm8). + * Return the cmode for ORR; the imm8 can be had via extraction from v32. + */ +static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8) +{ + int i; + + for (i = 6; i > 0; i -= 2) { + /* Mask out one byte we can add with ORR. */ + uint32_t tmp = v32 & ~(0xffu << (i * 4)); + if (is_shimm32(tmp, cmode, imm8) || + is_soimm32(tmp, cmode, imm8)) { + break; + } + } + return i; +} + +/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */ +static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8) +{ + if (v32 == deposit32(v32, 16, 16, v32)) { + return is_shimm16(v32, cmode, imm8); + } else { + return is_shimm32(v32, cmode, imm8); + } +} + +static int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + + if (ct & TCG_CT_CONST) { + return 1; + } + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { + return 1; + } + if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) { + return 1; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } + if ((ct & TCG_CT_CONST_MONE) && val == -1) { + return 1; + } + + switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) { + case 0: + break; + case TCG_CT_CONST_ANDI: + val = ~val; + /* fallthru */ + case TCG_CT_CONST_ORRI: + if (val == deposit64(val, 32, 32, val)) { + int cmode, imm8; + return is_shimm1632(val, &cmode, &imm8); + } + break; + default: + /* Both bits should not be set for the same insn. */ + g_assert_not_reached(); + } + + return 0; +} + +enum aarch64_cond_code { + COND_EQ = 0x0, + COND_NE = 0x1, + COND_CS = 0x2, /* Unsigned greater or equal */ + COND_HS = COND_CS, /* ALIAS greater or equal */ + COND_CC = 0x3, /* Unsigned less than */ + COND_LO = COND_CC, /* ALIAS Lower */ + COND_MI = 0x4, /* Negative */ + COND_PL = 0x5, /* Zero or greater */ + COND_VS = 0x6, /* Overflow */ + COND_VC = 0x7, /* No overflow */ + COND_HI = 0x8, /* Unsigned greater than */ + COND_LS = 0x9, /* Unsigned less or equal */ + COND_GE = 0xa, + COND_LT = 0xb, + COND_GT = 0xc, + COND_LE = 0xd, + COND_AL = 0xe, + COND_NV = 0xf, /* behaves like COND_AL here */ +}; + +static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { + [TCG_COND_EQ] = COND_EQ, + [TCG_COND_NE] = COND_NE, + [TCG_COND_LT] = COND_LT, + [TCG_COND_GE] = COND_GE, + [TCG_COND_LE] = COND_LE, + [TCG_COND_GT] = COND_GT, + /* unsigned */ + [TCG_COND_LTU] = COND_LO, + [TCG_COND_GTU] = COND_HI, + [TCG_COND_GEU] = COND_HS, + [TCG_COND_LEU] = COND_LS, +}; + +typedef enum { + LDST_ST = 0, /* store */ + LDST_LD = 1, /* load */ + LDST_LD_S_X = 2, /* load and sign-extend into Xt */ + LDST_LD_S_W = 3, /* load and sign-extend into Wt */ +} AArch64LdstType; + +/* We encode the format of the insn into the beginning of the name, so that + we can have the preprocessor help "typecheck" the insn vs the output + function. Arm didn't provide us with nice names for the formats, so we + use the section number of the architecture reference manual in which the + instruction group is described. */ +typedef enum { + /* Compare and branch (immediate). */ + I3201_CBZ = 0x34000000, + I3201_CBNZ = 0x35000000, + + /* Conditional branch (immediate). */ + I3202_B_C = 0x54000000, + + /* Unconditional branch (immediate). */ + I3206_B = 0x14000000, + I3206_BL = 0x94000000, + + /* Unconditional branch (register). */ + I3207_BR = 0xd61f0000, + I3207_BLR = 0xd63f0000, + I3207_RET = 0xd65f0000, + + /* AdvSIMD load/store single structure. */ + I3303_LD1R = 0x0d40c000, + + /* Load literal for loading the address at pc-relative offset */ + I3305_LDR = 0x58000000, + I3305_LDR_v64 = 0x5c000000, + I3305_LDR_v128 = 0x9c000000, + + /* Load/store register. Described here as 3.3.12, but the helper + that emits them can transform to 3.3.10 or 3.3.13. */ + I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30, + I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30, + I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30, + I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30, + + I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30, + I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30, + I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30, + I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30, + + I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30, + I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30, + + I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30, + I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30, + I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30, + + I3312_LDRVS = 0x3c000000 | LDST_LD << 22 | MO_32 << 30, + I3312_STRVS = 0x3c000000 | LDST_ST << 22 | MO_32 << 30, + + I3312_LDRVD = 0x3c000000 | LDST_LD << 22 | MO_64 << 30, + I3312_STRVD = 0x3c000000 | LDST_ST << 22 | MO_64 << 30, + + I3312_LDRVQ = 0x3c000000 | 3 << 22 | 0 << 30, + I3312_STRVQ = 0x3c000000 | 2 << 22 | 0 << 30, + + I3312_TO_I3310 = 0x00200800, + I3312_TO_I3313 = 0x01000000, + + /* Load/store register pair instructions. */ + I3314_LDP = 0x28400000, + I3314_STP = 0x28000000, + + /* Add/subtract immediate instructions. */ + I3401_ADDI = 0x11000000, + I3401_ADDSI = 0x31000000, + I3401_SUBI = 0x51000000, + I3401_SUBSI = 0x71000000, + + /* Bitfield instructions. */ + I3402_BFM = 0x33000000, + I3402_SBFM = 0x13000000, + I3402_UBFM = 0x53000000, + + /* Extract instruction. */ + I3403_EXTR = 0x13800000, + + /* Logical immediate instructions. */ + I3404_ANDI = 0x12000000, + I3404_ORRI = 0x32000000, + I3404_EORI = 0x52000000, + + /* Move wide immediate instructions. */ + I3405_MOVN = 0x12800000, + I3405_MOVZ = 0x52800000, + I3405_MOVK = 0x72800000, + + /* PC relative addressing instructions. */ + I3406_ADR = 0x10000000, + I3406_ADRP = 0x90000000, + + /* Add/subtract shifted register instructions (without a shift). */ + I3502_ADD = 0x0b000000, + I3502_ADDS = 0x2b000000, + I3502_SUB = 0x4b000000, + I3502_SUBS = 0x6b000000, + + /* Add/subtract shifted register instructions (with a shift). */ + I3502S_ADD_LSL = I3502_ADD, + + /* Add/subtract with carry instructions. */ + I3503_ADC = 0x1a000000, + I3503_SBC = 0x5a000000, + + /* Conditional select instructions. */ + I3506_CSEL = 0x1a800000, + I3506_CSINC = 0x1a800400, + I3506_CSINV = 0x5a800000, + I3506_CSNEG = 0x5a800400, + + /* Data-processing (1 source) instructions. */ + I3507_CLZ = 0x5ac01000, + I3507_RBIT = 0x5ac00000, + I3507_REV16 = 0x5ac00400, + I3507_REV32 = 0x5ac00800, + I3507_REV64 = 0x5ac00c00, + + /* Data-processing (2 source) instructions. */ + I3508_LSLV = 0x1ac02000, + I3508_LSRV = 0x1ac02400, + I3508_ASRV = 0x1ac02800, + I3508_RORV = 0x1ac02c00, + I3508_SMULH = 0x9b407c00, + I3508_UMULH = 0x9bc07c00, + I3508_UDIV = 0x1ac00800, + I3508_SDIV = 0x1ac00c00, + + /* Data-processing (3 source) instructions. */ + I3509_MADD = 0x1b000000, + I3509_MSUB = 0x1b008000, + + /* Logical shifted register instructions (without a shift). */ + I3510_AND = 0x0a000000, + I3510_BIC = 0x0a200000, + I3510_ORR = 0x2a000000, + I3510_ORN = 0x2a200000, + I3510_EOR = 0x4a000000, + I3510_EON = 0x4a200000, + I3510_ANDS = 0x6a000000, + + /* Logical shifted register instructions (with a shift). */ + I3502S_AND_LSR = I3510_AND | (1 << 22), + + /* AdvSIMD copy */ + I3605_DUP = 0x0e000400, + I3605_INS = 0x4e001c00, + I3605_UMOV = 0x0e003c00, + + /* AdvSIMD modified immediate */ + I3606_MOVI = 0x0f000400, + I3606_MVNI = 0x2f000400, + I3606_BIC = 0x2f001400, + I3606_ORR = 0x0f001400, + + /* AdvSIMD shift by immediate */ + I3614_SSHR = 0x0f000400, + I3614_SSRA = 0x0f001400, + I3614_SHL = 0x0f005400, + I3614_USHR = 0x2f000400, + I3614_USRA = 0x2f001400, + + /* AdvSIMD three same. */ + I3616_ADD = 0x0e208400, + I3616_AND = 0x0e201c00, + I3616_BIC = 0x0e601c00, + I3616_BIF = 0x2ee01c00, + I3616_BIT = 0x2ea01c00, + I3616_BSL = 0x2e601c00, + I3616_EOR = 0x2e201c00, + I3616_MUL = 0x0e209c00, + I3616_ORR = 0x0ea01c00, + I3616_ORN = 0x0ee01c00, + I3616_SUB = 0x2e208400, + I3616_CMGT = 0x0e203400, + I3616_CMGE = 0x0e203c00, + I3616_CMTST = 0x0e208c00, + I3616_CMHI = 0x2e203400, + I3616_CMHS = 0x2e203c00, + I3616_CMEQ = 0x2e208c00, + I3616_SMAX = 0x0e206400, + I3616_SMIN = 0x0e206c00, + I3616_SSHL = 0x0e204400, + I3616_SQADD = 0x0e200c00, + I3616_SQSUB = 0x0e202c00, + I3616_UMAX = 0x2e206400, + I3616_UMIN = 0x2e206c00, + I3616_UQADD = 0x2e200c00, + I3616_UQSUB = 0x2e202c00, + I3616_USHL = 0x2e204400, + + /* AdvSIMD two-reg misc. */ + I3617_CMGT0 = 0x0e208800, + I3617_CMEQ0 = 0x0e209800, + I3617_CMLT0 = 0x0e20a800, + I3617_CMGE0 = 0x2e208800, + I3617_CMLE0 = 0x2e20a800, + I3617_NOT = 0x2e205800, + I3617_ABS = 0x0e20b800, + I3617_NEG = 0x2e20b800, + + /* System instructions. */ + NOP = 0xd503201f, + DMB_ISH = 0xd50338bf, + DMB_LD = 0x00000100, + DMB_ST = 0x00000200, +} AArch64Insn; + +static inline uint32_t tcg_in32(TCGContext *s) +{ + uint32_t v = *(uint32_t *)s->code_ptr; + return v; +} + +/* Emit an opcode with "type-checking" of the format. */ +#define tcg_out_insn(S, FMT, OP, ...) \ + glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__) + +static void tcg_out_insn_3303(TCGContext *s, AArch64Insn insn, bool q, + TCGReg rt, TCGReg rn, unsigned size) +{ + tcg_out32(s, insn | (rt & 0x1f) | (rn << 5) | (size << 10) | (q << 30)); +} + +static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn, + int imm19, TCGReg rt) +{ + tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt); +} + +static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rt, int imm19) +{ + tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt); +} + +static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn, + TCGCond c, int imm19) +{ + tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5); +} + +static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26) +{ + tcg_out32(s, insn | (imm26 & 0x03ffffff)); +} + +static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn) +{ + tcg_out32(s, insn | rn << 5); +} + +static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn, + TCGReg r1, TCGReg r2, TCGReg rn, + tcg_target_long ofs, bool pre, bool w) +{ + insn |= 1u << 31; /* ext */ + insn |= pre << 24; + insn |= w << 23; + + tcg_debug_assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0); + insn |= (ofs & (0x7f << 3)) << (15 - 3); + + tcg_out32(s, insn | r2 << 10 | rn << 5 | r1); +} + +static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, uint64_t aimm) +{ + if (aimm > 0xfff) { + tcg_debug_assert((aimm & 0xfff) == 0); + aimm >>= 12; + tcg_debug_assert(aimm <= 0xfff); + aimm |= 1 << 12; /* apply LSL 12 */ + } + tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd); +} + +/* This function can be used for both 3.4.2 (Bitfield) and 3.4.4 + (Logical immediate). Both insn groups have N, IMMR and IMMS fields + that feed the DecodeBitMasks pseudo function. */ +static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, int n, int immr, int imms) +{ + tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10 + | rn << 5 | rd); +} + +#define tcg_out_insn_3404 tcg_out_insn_3402 + +static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm, int imms) +{ + tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10 + | rn << 5 | rd); +} + +/* This function is used for the Move (wide immediate) instruction group. + Note that SHIFT is a full shift count, not the 2 bit HW field. */ +static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, uint16_t half, unsigned shift) +{ + tcg_debug_assert((shift & ~0x30) == 0); + tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd); +} + +static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn, + TCGReg rd, int64_t disp) +{ + tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd); +} + +/* This function is for both 3.5.2 (Add/Subtract shifted register), for + the rare occasion when we actually want to supply a shift amount. */ +static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn, + TCGType ext, TCGReg rd, TCGReg rn, + TCGReg rm, int imm6) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd); +} + +/* This function is for 3.5.2 (Add/subtract shifted register), + and 3.5.10 (Logical shifted register), for the vast majorty of cases + when we don't want to apply a shift. Thus it can also be used for + 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */ +static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd); +} + +#define tcg_out_insn_3503 tcg_out_insn_3502 +#define tcg_out_insn_3508 tcg_out_insn_3502 +#define tcg_out_insn_3510 tcg_out_insn_3502 + +static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd + | tcg_cond_to_aarch64[c] << 12); +} + +static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn) +{ + tcg_out32(s, insn | ext << 31 | rn << 5 | rd); +} + +static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd); +} + +static void tcg_out_insn_3605(TCGContext *s, AArch64Insn insn, bool q, + TCGReg rd, TCGReg rn, int dst_idx, int src_idx) +{ + /* Note that bit 11 set means general register input. Therefore + we can handle both register sets with one function. */ + tcg_out32(s, insn | q << 30 | (dst_idx << 16) | (src_idx << 11) + | (rd & 0x1f) | (~rn & 0x20) << 6 | (rn & 0x1f) << 5); +} + +static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q, + TCGReg rd, bool op, int cmode, uint8_t imm8) +{ + tcg_out32(s, insn | q << 30 | op << 29 | cmode << 12 | (rd & 0x1f) + | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5); +} + +static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q, + TCGReg rd, TCGReg rn, unsigned immhb) +{ + tcg_out32(s, insn | q << 30 | immhb << 16 + | (rn & 0x1f) << 5 | (rd & 0x1f)); +} + +static void tcg_out_insn_3616(TCGContext *s, AArch64Insn insn, bool q, + unsigned size, TCGReg rd, TCGReg rn, TCGReg rm) +{ + tcg_out32(s, insn | q << 30 | (size << 22) | (rm & 0x1f) << 16 + | (rn & 0x1f) << 5 | (rd & 0x1f)); +} + +static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q, + unsigned size, TCGReg rd, TCGReg rn) +{ + tcg_out32(s, insn | q << 30 | (size << 22) + | (rn & 0x1f) << 5 | (rd & 0x1f)); +} + +static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg base, TCGType ext, + TCGReg regoff) +{ + /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ + tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | + 0x4000 | ext << 13 | base << 5 | (rd & 0x1f)); +} + +static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, intptr_t offset) +{ + tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | (rd & 0x1f)); +} + +static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, uintptr_t scaled_uimm) +{ + /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ + tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 + | rn << 5 | (rd & 0x1f)); +} + +/* Register to register move using ORR (shifted register with no shift). */ +static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm) +{ + tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm); +} + +/* Register to register move using ADDI (move to/from SP). */ +static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0); +} + +/* This function is used for the Logical (immediate) instruction group. + The value of LIMM must satisfy IS_LIMM. See the comment above about + only supporting simplified logical immediates. */ +static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, uint64_t limm) +{ + unsigned h, l, r, c; + + tcg_debug_assert(is_limm(limm)); + + h = clz64(limm); + l = ctz64(limm); + if (l == 0) { + r = 0; /* form 0....01....1 */ + c = ctz64(~limm) - 1; + if (h == 0) { + r = clz64(~limm); /* form 1..10..01..1 */ + c += r; + } + } else { + r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ + c = r - h - 1; + } + if (ext == TCG_TYPE_I32) { + r &= 31; + c &= 31; + } + + tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); +} + +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, + TCGReg rd, tcg_target_long v64) +{ + bool q = type == TCG_TYPE_V128; + int cmode, imm8, i; + + /* Test all bytes equal first. */ + if (v64 == dup_const(MO_8, v64)) { + imm8 = (uint8_t)v64; + tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8); + return; + } + + /* + * Test all bytes 0x00 or 0xff second. This can match cases that + * might otherwise take 2 or 3 insns for MO_16 or MO_32 below. + */ + for (i = imm8 = 0; i < 8; i++) { + uint8_t byte = v64 >> (i * 8); + if (byte == 0xff) { + imm8 |= 1 << i; + } else if (byte != 0) { + goto fail_bytes; + } + } + tcg_out_insn(s, 3606, MOVI, q, rd, 1, 0xe, imm8); + return; + fail_bytes: + + /* + * Tests for various replications. For each element width, if we + * cannot find an expansion there's no point checking a larger + * width because we already know by replication it cannot match. + */ + if (v64 == dup_const(MO_16, v64)) { + uint16_t v16 = v64; + + if (is_shimm16(v16, &cmode, &imm8)) { + tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8); + return; + } + if (is_shimm16(~v16, &cmode, &imm8)) { + tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8); + return; + } + + /* + * Otherwise, all remaining constants can be loaded in two insns: + * rd = v16 & 0xff, rd |= v16 & 0xff00. + */ + tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff); + tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8); + return; + } else if (v64 == dup_const(MO_32, v64)) { + uint32_t v32 = v64; + uint32_t n32 = ~v32; + + if (is_shimm32(v32, &cmode, &imm8) || + is_soimm32(v32, &cmode, &imm8) || + is_fimm32(v32, &cmode, &imm8)) { + tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8); + return; + } + if (is_shimm32(n32, &cmode, &imm8) || + is_soimm32(n32, &cmode, &imm8)) { + tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8); + return; + } + + /* + * Restrict the set of constants to those we can load with + * two instructions. Others we load from the pool. + */ + i = is_shimm32_pair(v32, &cmode, &imm8); + if (i) { + tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8); + tcg_out_insn(s, 3606, ORR, q, rd, 0, i, extract32(v32, i * 4, 8)); + return; + } + i = is_shimm32_pair(n32, &cmode, &imm8); + if (i) { + tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8); + tcg_out_insn(s, 3606, BIC, q, rd, 0, i, extract32(n32, i * 4, 8)); + return; + } + } else if (is_fimm64(v64, &cmode, &imm8)) { + tcg_out_insn(s, 3606, MOVI, q, rd, 1, cmode, imm8); + return; + } + + /* + * As a last resort, load from the constant pool. Sadly there + * is no LD1R (literal), so store the full 16-byte vector. + */ + if (type == TCG_TYPE_V128) { + new_pool_l2(s, R_AARCH64_CONDBR19, s->code_ptr, 0, v64, v64); + tcg_out_insn(s, 3305, LDR_v128, 0, rd); + } else { + new_pool_label(s, v64, R_AARCH64_CONDBR19, s->code_ptr, 0); + tcg_out_insn(s, 3305, LDR_v64, 0, rd); + } +} + +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg rd, TCGReg rs) +{ + int is_q = type - TCG_TYPE_V64; + tcg_out_insn(s, 3605, DUP, is_q, rd, rs, 1 << vece, 0); + return true; +} + +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg r, TCGReg base, intptr_t offset) +{ + TCGReg temp = TCG_REG_TMP; + + if (offset < -0xffffff || offset > 0xffffff) { + tcg_out_movi(s, TCG_TYPE_PTR, temp, offset); + tcg_out_insn(s, 3502, ADD, 1, temp, temp, base); + base = temp; + } else { + AArch64Insn add_insn = I3401_ADDI; + + if (offset < 0) { + add_insn = I3401_SUBI; + offset = -offset; + } + if (offset & 0xfff000) { + tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff000); + base = temp; + } + if (offset & 0xfff) { + tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff); + base = temp; + } + } + tcg_out_insn(s, 3303, LD1R, type == TCG_TYPE_V128, r, base, vece); + return true; +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, + tcg_target_long value) +{ + tcg_target_long svalue = value; + tcg_target_long ivalue = ~value; + tcg_target_long t0, t1, t2; + int s0, s1; + AArch64Insn opc; + + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + tcg_debug_assert(rd < 32); + break; + + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(rd >= 32); + tcg_out_dupi_vec(s, type, rd, value); + return; + + default: + g_assert_not_reached(); + } + + /* For 32-bit values, discard potential garbage in value. For 64-bit + values within [2**31, 2**32-1], we can create smaller sequences by + interpreting this as a negative 32-bit number, while ensuring that + the high 32 bits are cleared by setting SF=0. */ + if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) { + svalue = (int32_t)value; + value = (uint32_t)value; + ivalue = (uint32_t)ivalue; + type = TCG_TYPE_I32; + } + + /* Speed things up by handling the common case of small positive + and negative values specially. */ + if ((value & ~0xffffull) == 0) { + tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0); + return; + } else if ((ivalue & ~0xffffull) == 0) { + tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0); + return; + } + + /* Check for bitfield immediates. For the benefit of 32-bit quantities, + use the sign-extended value. That lets us match rotated values such + as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */ + if (is_limm(svalue)) { + tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue); + return; + } + + /* Look for host pointer values within 4G of the PC. This happens + often when loading pointers to QEMU's own data structures. */ + if (type == TCG_TYPE_I64) { + tcg_target_long disp = value - (intptr_t)s->code_ptr; + if (disp == sextract64(disp, 0, 21)) { + tcg_out_insn(s, 3406, ADR, rd, disp); + return; + } + disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12); + if (disp == sextract64(disp, 0, 21)) { + tcg_out_insn(s, 3406, ADRP, rd, disp); + if (value & 0xfff) { + tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff); + } + return; + } + } + + /* Would it take fewer insns to begin with MOVN? */ + if (ctpop64(value) >= 32) { + t0 = ivalue; + opc = I3405_MOVN; + } else { + t0 = value; + opc = I3405_MOVZ; + } + s0 = ctz64(t0) & (63 & -16); + t1 = t0 & ~(0xffffUL << s0); + s1 = ctz64(t1) & (63 & -16); + t2 = t1 & ~(0xffffUL << s1); + if (t2 == 0) { + tcg_out_insn_3405(s, opc, type, rd, t0 >> s0, s0); + if (t1 != 0) { + tcg_out_insn(s, 3405, MOVK, type, rd, value >> s1, s1); + } + return; + } + + /* For more than 2 insns, dump it into the constant pool. */ + new_pool_label(s, value, R_AARCH64_CONDBR19, s->code_ptr, 0); + tcg_out_insn(s, 3305, LDR, 0, rd); +} + +/* Define something more legible for general use. */ +#define tcg_out_ldst_r tcg_out_insn_3310 + +static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd, + TCGReg rn, intptr_t offset, int lgsize) +{ + /* If the offset is naturally aligned and in range, then we can + use the scaled uimm12 encoding */ + if (offset >= 0 && !(offset & ((1 << lgsize) - 1))) { + uintptr_t scaled_uimm = offset >> lgsize; + if (scaled_uimm <= 0xfff) { + tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm); + return; + } + } + + /* Small signed offsets can use the unscaled encoding. */ + if (offset >= -256 && offset < 256) { + tcg_out_insn_3312(s, insn, rd, rn, offset); + return; + } + + /* Worst-case scenario, move offset to temp register, use reg offset. */ + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); + tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP); +} + +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + if (ret == arg) { + return true; + } + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + if (ret < 32 && arg < 32) { + tcg_out_movr(s, type, ret, arg); + break; + } else if (ret < 32) { + tcg_out_insn(s, 3605, UMOV, type, ret, arg, 0, 0); + break; + } else if (arg < 32) { + tcg_out_insn(s, 3605, INS, 0, ret, arg, 4 << type, 0); + break; + } + /* FALLTHRU */ + + case TCG_TYPE_V64: + tcg_debug_assert(ret >= 32 && arg >= 32); + tcg_out_insn(s, 3616, ORR, 0, 0, ret, arg, arg); + break; + case TCG_TYPE_V128: + tcg_debug_assert(ret >= 32 && arg >= 32); + tcg_out_insn(s, 3616, ORR, 1, 0, ret, arg, arg); + break; + + default: + g_assert_not_reached(); + } + return true; +} + +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg base, intptr_t ofs) +{ + AArch64Insn insn; + int lgsz; + + switch (type) { + case TCG_TYPE_I32: + insn = (ret < 32 ? I3312_LDRW : I3312_LDRVS); + lgsz = 2; + break; + case TCG_TYPE_I64: + insn = (ret < 32 ? I3312_LDRX : I3312_LDRVD); + lgsz = 3; + break; + case TCG_TYPE_V64: + insn = I3312_LDRVD; + lgsz = 3; + break; + case TCG_TYPE_V128: + insn = I3312_LDRVQ; + lgsz = 4; + break; + default: + g_assert_not_reached(); + } + tcg_out_ldst(s, insn, ret, base, ofs, lgsz); +} + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src, + TCGReg base, intptr_t ofs) +{ + AArch64Insn insn; + int lgsz; + + switch (type) { + case TCG_TYPE_I32: + insn = (src < 32 ? I3312_STRW : I3312_STRVS); + lgsz = 2; + break; + case TCG_TYPE_I64: + insn = (src < 32 ? I3312_STRX : I3312_STRVD); + lgsz = 3; + break; + case TCG_TYPE_V64: + insn = I3312_STRVD; + lgsz = 3; + break; + case TCG_TYPE_V128: + insn = I3312_STRVQ; + lgsz = 4; + break; + default: + g_assert_not_reached(); + } + tcg_out_ldst(s, insn, src, base, ofs, lgsz); +} + +static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + if (type <= TCG_TYPE_I64 && val == 0) { + tcg_out_st(s, type, TCG_REG_XZR, base, ofs); + return true; + } + return false; +} + +static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned int a, unsigned int b) +{ + tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b); +} + +static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned int a, unsigned int b) +{ + tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b); +} + +static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned int a, unsigned int b) +{ + tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b); +} + +static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, TCGReg rm, unsigned int a) +{ + tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a); +} + +static inline void tcg_out_shl(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int bits = ext ? 64 : 32; + int max = bits - 1; + tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max)); +} + +static inline void tcg_out_shr(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_ubfm(s, ext, rd, rn, m & max, max); +} + +static inline void tcg_out_sar(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_sbfm(s, ext, rd, rn, m & max, max); +} + +static inline void tcg_out_rotr(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_extr(s, ext, rd, rn, rn, m & max); +} + +static inline void tcg_out_rotl(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int bits = ext ? 64 : 32; + int max = bits - 1; + tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max)); +} + +static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned lsb, unsigned width) +{ + unsigned size = ext ? 64 : 32; + unsigned a = (size - lsb) & (size - 1); + unsigned b = width - 1; + tcg_out_bfm(s, ext, rd, rn, a, b); +} + +static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a, + tcg_target_long b, bool const_b) +{ + if (const_b) { + /* Using CMP or CMN aliases. */ + if (b >= 0) { + tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); + } else { + tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); + } + } else { + /* Using CMP alias SUBS wzr, Wn, Wm */ + tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); + } +} + +static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - s->code_ptr; + tcg_debug_assert(offset == sextract64(offset, 0, 26)); + tcg_out_insn(s, 3206, B, offset); +} + +static inline void tcg_out_goto_long(TCGContext *s, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - s->code_ptr; + if (offset == sextract64(offset, 0, 26)) { + tcg_out_insn(s, 3206, BL, offset); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); + tcg_out_insn(s, 3207, BR, TCG_REG_TMP); + } +} + +static inline void tcg_out_callr(TCGContext *s, TCGReg reg) +{ + tcg_out_insn(s, 3207, BLR, reg); +} + +static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - s->code_ptr; + if (offset == sextract64(offset, 0, 26)) { + tcg_out_insn(s, 3206, BL, offset); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); + tcg_out_callr(s, TCG_REG_TMP); + } +} + +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) +{ + tcg_insn_unit i1, i2; + TCGType rt = TCG_TYPE_I64; + TCGReg rd = TCG_REG_TMP; + uint64_t pair; + + ptrdiff_t offset = addr - jmp_addr; + + if (offset == sextract64(offset, 0, 26)) { + i1 = I3206_B | ((offset >> 2) & 0x3ffffff); + i2 = NOP; + } else { + offset = (addr >> 12) - (jmp_addr >> 12); + + /* patch ADRP */ + i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd; + /* patch ADDI */ + i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd; + } + pair = (uint64_t)i2 << 32 | i1; + atomic_set((uint64_t *)jmp_addr, pair); + flush_icache_range(jmp_addr, jmp_addr + 8); +} + +static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) +{ + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0); + tcg_out_insn(s, 3206, B, 0); + } else { + tcg_out_goto(s, l->u.value_ptr); + } +} + +static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, + TCGArg b, bool b_const, TCGLabel *l) +{ + intptr_t offset; + bool need_cmp; + + if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { + need_cmp = false; + } else { + need_cmp = true; + tcg_out_cmp(s, ext, a, b, b_const); + } + + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0); + offset = tcg_in32(s) >> 5; + } else { + offset = l->u.value_ptr - s->code_ptr; + tcg_debug_assert(offset == sextract64(offset, 0, 19)); + } + + if (need_cmp) { + tcg_out_insn(s, 3202, B_C, c, offset); + } else if (c == TCG_COND_EQ) { + tcg_out_insn(s, 3201, CBZ, ext, a, offset); + } else { + tcg_out_insn(s, 3201, CBNZ, ext, a, offset); + } +} + +static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn); +} + +static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn); +} + +static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); +} + +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, + TCGReg rd, TCGReg rn) +{ + /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ + int bits = (8 << s_bits) - 1; + tcg_out_sbfm(s, ext, rd, rn, 0, bits); +} + +static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, + TCGReg rd, TCGReg rn) +{ + /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ + int bits = (8 << s_bits) - 1; + tcg_out_ubfm(s, 0, rd, rn, 0, bits); +} + +static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, + TCGReg rn, int64_t aimm) +{ + if (aimm >= 0) { + tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm); + } else { + tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm); + } +} + +static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, + TCGReg rh, TCGReg al, TCGReg ah, + tcg_target_long bl, tcg_target_long bh, + bool const_bl, bool const_bh, bool sub) +{ + TCGReg orig_rl = rl; + AArch64Insn insn; + + if (rl == ah || (!const_bh && rl == bh)) { + rl = TCG_REG_TMP; + } + + if (const_bl) { + insn = I3401_ADDSI; + if ((bl < 0) ^ sub) { + insn = I3401_SUBSI; + bl = -bl; + } + if (unlikely(al == TCG_REG_XZR)) { + /* ??? We want to allow al to be zero for the benefit of + negation via subtraction. However, that leaves open the + possibility of adding 0+const in the low part, and the + immediate add instructions encode XSP not XZR. Don't try + anything more elaborate here than loading another zero. */ + al = TCG_REG_TMP; + tcg_out_movi(s, ext, al, 0); + } + tcg_out_insn_3401(s, insn, ext, rl, al, bl); + } else { + tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl); + } + + insn = I3503_ADC; + if (const_bh) { + /* Note that the only two constants we support are 0 and -1, and + that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */ + if ((bh != 0) ^ sub) { + insn = I3503_SBC; + } + bh = TCG_REG_XZR; + } else if (sub) { + insn = I3503_SBC; + } + tcg_out_insn_3503(s, insn, ext, rh, ah, bh); + + tcg_out_mov(s, ext, orig_rl, rl); +} + +static inline void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + static const uint32_t sync[] = { + [0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST, + [TCG_MO_ST_ST] = DMB_ISH | DMB_ST, + [TCG_MO_LD_LD] = DMB_ISH | DMB_LD, + [TCG_MO_LD_ST] = DMB_ISH | DMB_LD, + [TCG_MO_LD_ST | TCG_MO_LD_LD] = DMB_ISH | DMB_LD, + }; + tcg_out32(s, sync[a0 & TCG_MO_ALL]); +} + +static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, + TCGReg a0, TCGArg b, bool const_b, bool is_ctz) +{ + TCGReg a1 = a0; + if (is_ctz) { + a1 = TCG_REG_TMP; + tcg_out_insn(s, 3507, RBIT, ext, a1, a0); + } + if (const_b && b == (ext ? 64 : 32)) { + tcg_out_insn(s, 3507, CLZ, ext, d, a1); + } else { + AArch64Insn sel = I3506_CSEL; + + tcg_out_cmp(s, ext, a0, 0, 1); + tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP, a1); + + if (const_b) { + if (b == -1) { + b = TCG_REG_XZR; + sel = I3506_CSINV; + } else if (b == 0) { + b = TCG_REG_XZR; + } else { + tcg_out_movi(s, ext, d, b); + b = d; + } + } + tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP, b, TCG_COND_NE); + } +} + +#ifdef CONFIG_SOFTMMU +#include "../tcg-ldst.inc.c" + +/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * TCGMemOpIdx oi, uintptr_t ra) + */ +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, TCGMemOpIdx oi, + * uintptr_t ra) + */ +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) +{ + ptrdiff_t offset = tcg_pcrel_diff(s, target); + tcg_debug_assert(offset == sextract64(offset, 0, 21)); + tcg_out_insn(s, 3406, ADR, rd, offset); +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + + if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi); + tcg_out_adr(s, TCG_REG_X3, lb->raddr); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); + if (opc & MO_SIGN) { + tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); + } else { + tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); + } + + tcg_out_goto(s, lb->raddr); + return true; +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + + if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); + tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi); + tcg_out_adr(s, TCG_REG_X4, lb->raddr); + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_goto(s, lb->raddr); + return true; +} + +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, + TCGType ext, TCGReg data_reg, TCGReg addr_reg, + tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->oi = oi; + label->type = ext; + label->datalo_reg = data_reg; + label->addrlo_reg = addr_reg; + label->raddr = raddr; + label->label_ptr[0] = label_ptr; +} + +/* We expect to use a 7-bit scaled negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512); + +/* These offsets are built into the LDP below. */ +QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); +QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); + +/* Load and compare a TLB entry, emitting the conditional jump to the + slow path for the failure case, which will be patched later when finalizing + the slow path. Generated code returns the host addend in X1, + clobbers X0,X2,X3,TMP. */ +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, + tcg_insn_unit **label_ptr, int mem_index, + bool is_read) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif + unsigned a_bits = get_alignment_bits(opc); + unsigned s_bits = opc & MO_SIZE; + unsigned a_mask = (1u << a_bits) - 1; + unsigned s_mask = (1u << s_bits) - 1; + TCGReg x3; + TCGType mask_type; + uint64_t compare_mask; + + mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32 + ? TCG_TYPE_I64 : TCG_TYPE_I32); + + /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */ + tcg_out_insn(s, 3314, LDP, TCG_REG_X0, TCG_REG_X1, TCG_AREG0, + TLB_MASK_TABLE_OFS(mem_index), 1, 0); + + /* Extract the TLB index from the address into X0. */ + tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64, + TCG_REG_X0, TCG_REG_X0, addr_reg, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + + /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */ + tcg_out_insn(s, 3502, ADD, 1, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0); + + /* Load the tlb comparator into X0, and the fast path addend into X1. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, is_read + ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1, + offsetof(CPUTLBEntry, addend)); + + /* For aligned accesses, we check the first byte and include the alignment + bits within the address. For unaligned access, we check that we don't + cross pages using the address of the last byte of the access. */ + if (a_bits >= s_bits) { + x3 = addr_reg; + } else { + tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64, + TCG_REG_X3, addr_reg, s_mask - a_mask); + x3 = TCG_REG_X3; + } + compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; + + /* Store the page mask part of the address into X3. */ + tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, + TCG_REG_X3, x3, compare_mask); + + /* Perform the address comparison. */ + tcg_out_cmp(s, TARGET_LONG_BITS == 64, TCG_REG_X0, TCG_REG_X3, 0); + + /* If not equal, we jump to the slow path. */ + *label_ptr = s->code_ptr; + tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0); +} + +#endif /* CONFIG_SOFTMMU */ + +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, + TCGReg data_r, TCGReg addr_r, + TCGType otype, TCGReg off_r) +{ + const MemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SSIZE) { + case MO_UB: + tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r); + break; + case MO_SB: + tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, + data_r, addr_r, otype, off_r); + break; + case MO_UW: + tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r); + if (bswap) { + tcg_out_rev16(s, data_r, data_r); + } + break; + case MO_SW: + if (bswap) { + tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r); + tcg_out_rev16(s, data_r, data_r); + tcg_out_sxt(s, ext, MO_16, data_r, data_r); + } else { + tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW), + data_r, addr_r, otype, off_r); + } + break; + case MO_UL: + tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r); + if (bswap) { + tcg_out_rev32(s, data_r, data_r); + } + break; + case MO_SL: + if (bswap) { + tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r); + tcg_out_rev32(s, data_r, data_r); + tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r); + } else { + tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r); + } + break; + case MO_Q: + tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r); + if (bswap) { + tcg_out_rev64(s, data_r, data_r); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, + TCGReg data_r, TCGReg addr_r, + TCGType otype, TCGReg off_r) +{ + const MemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SIZE) { + case MO_8: + tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r); + break; + case MO_16: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev16(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; + } + tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r); + break; + case MO_32: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev32(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; + } + tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r); + break; + case MO_64: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev64(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; + } + tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r); + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOpIdx oi, TCGType ext) +{ + MemOp memop = get_memop(oi); + const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; +#ifdef CONFIG_SOFTMMU + unsigned mem_index = get_mmuidx(oi); + tcg_insn_unit *label_ptr; + + tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1); + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, + TCG_REG_X1, otype, addr_reg); + add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, + s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + if (USE_GUEST_BASE) { + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, + TCG_REG_GUEST_BASE, otype, addr_reg); + } else { + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, + addr_reg, TCG_TYPE_I64, TCG_REG_XZR); + } +#endif /* CONFIG_SOFTMMU */ +} + +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOpIdx oi) +{ + MemOp memop = get_memop(oi); + const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; +#ifdef CONFIG_SOFTMMU + unsigned mem_index = get_mmuidx(oi); + tcg_insn_unit *label_ptr; + + tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0); + tcg_out_qemu_st_direct(s, memop, data_reg, + TCG_REG_X1, otype, addr_reg); + add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, + data_reg, addr_reg, s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + if (USE_GUEST_BASE) { + tcg_out_qemu_st_direct(s, memop, data_reg, + TCG_REG_GUEST_BASE, otype, addr_reg); + } else { + tcg_out_qemu_st_direct(s, memop, data_reg, + addr_reg, TCG_TYPE_I64, TCG_REG_XZR); + } +#endif /* CONFIG_SOFTMMU */ +} + +static tcg_insn_unit *tb_ret_addr; + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + /* 99% of the time, we can signal the use of extension registers + by looking to see if the opcode handles 64-bit data. */ + TCGType ext = (s->tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; + + /* Hoist the loads of the most common arguments. */ + TCGArg a0 = args[0]; + TCGArg a1 = args[1]; + TCGArg a2 = args[2]; + int c2 = const_args[2]; + + /* Some operands are defined with "rZ" constraint, a register or + the zero register. These need not actually test args[I] == 0. */ +#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I]) + + switch (opc) { + case INDEX_op_exit_tb: + /* Reuse the zeroing that exists for goto_ptr. */ + if (a0 == 0) { + tcg_out_goto_long(s, s->code_gen_epilogue); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0); + tcg_out_goto_long(s, tb_ret_addr); + } + break; + + case INDEX_op_goto_tb: + if (s->tb_jmp_insn_offset != NULL) { + /* TCG_TARGET_HAS_direct_jump */ + /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic + write can be used to patch the target address. */ + if ((uintptr_t)s->code_ptr & 7) { + tcg_out32(s, NOP); + } + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + /* actual branch destination will be patched by + tb_target_set_jmp_target later. */ + tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0); + tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0); + } else { + /* !TCG_TARGET_HAS_direct_jump */ + tcg_debug_assert(s->tb_jmp_target_addr != NULL); + intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2; + tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP); + } + tcg_out_insn(s, 3207, BR, TCG_REG_TMP); + set_jmp_reset_offset(s, a0); + break; + + case INDEX_op_goto_ptr: + tcg_out_insn(s, 3207, BR, a0); + break; + + case INDEX_op_br: + tcg_out_goto_label(s, arg_label(a0)); + break; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0); + break; + case INDEX_op_ld8s_i32: + tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2, 0); + break; + case INDEX_op_ld8s_i64: + tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2, 0); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_ldst(s, I3312_LDRH, a0, a1, a2, 1); + break; + case INDEX_op_ld16s_i32: + tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2, 1); + break; + case INDEX_op_ld16s_i64: + tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2, 1); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + tcg_out_ldst(s, I3312_LDRW, a0, a1, a2, 2); + break; + case INDEX_op_ld32s_i64: + tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2, 2); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, I3312_LDRX, a0, a1, a2, 3); + break; + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3); + break; + + case INDEX_op_add_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_add_i64: + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2); + } + break; + + case INDEX_op_sub_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_sub_i64: + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, -a2); + } else { + tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2); + } + break; + + case INDEX_op_neg_i64: + case INDEX_op_neg_i32: + tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1); + break; + + case INDEX_op_and_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_and_i64: + if (c2) { + tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3510, AND, ext, a0, a1, a2); + } + break; + + case INDEX_op_andc_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_andc_i64: + if (c2) { + tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2); + } else { + tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2); + } + break; + + case INDEX_op_or_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_or_i64: + if (c2) { + tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2); + } + break; + + case INDEX_op_orc_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_orc_i64: + if (c2) { + tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2); + } else { + tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2); + } + break; + + case INDEX_op_xor_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_xor_i64: + if (c2) { + tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2); + } + break; + + case INDEX_op_eqv_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_eqv_i64: + if (c2) { + tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2); + } else { + tcg_out_insn(s, 3510, EON, ext, a0, a1, a2); + } + break; + + case INDEX_op_not_i64: + case INDEX_op_not_i32: + tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1); + break; + + case INDEX_op_mul_i64: + case INDEX_op_mul_i32: + tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR); + break; + + case INDEX_op_div_i64: + case INDEX_op_div_i32: + tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2); + break; + case INDEX_op_divu_i64: + case INDEX_op_divu_i32: + tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2); + break; + + case INDEX_op_rem_i64: + case INDEX_op_rem_i32: + tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2); + tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); + break; + case INDEX_op_remu_i64: + case INDEX_op_remu_i32: + tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2); + tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); + break; + + case INDEX_op_shl_i64: + case INDEX_op_shl_i32: + if (c2) { + tcg_out_shl(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2); + } + break; + + case INDEX_op_shr_i64: + case INDEX_op_shr_i32: + if (c2) { + tcg_out_shr(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2); + } + break; + + case INDEX_op_sar_i64: + case INDEX_op_sar_i32: + if (c2) { + tcg_out_sar(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2); + } + break; + + case INDEX_op_rotr_i64: + case INDEX_op_rotr_i32: + if (c2) { + tcg_out_rotr(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2); + } + break; + + case INDEX_op_rotl_i64: + case INDEX_op_rotl_i32: + if (c2) { + tcg_out_rotl(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2); + tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP); + } + break; + + case INDEX_op_clz_i64: + case INDEX_op_clz_i32: + tcg_out_cltz(s, ext, a0, a1, a2, c2, false); + break; + case INDEX_op_ctz_i64: + case INDEX_op_ctz_i32: + tcg_out_cltz(s, ext, a0, a1, a2, c2, true); + break; + + case INDEX_op_brcond_i32: + a1 = (int32_t)a1; + /* FALLTHRU */ + case INDEX_op_brcond_i64: + tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3])); + break; + + case INDEX_op_setcond_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_setcond_i64: + tcg_out_cmp(s, ext, a1, a2, c2); + /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ + tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR, + TCG_REG_XZR, tcg_invert_cond(args[3])); + break; + + case INDEX_op_movcond_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_movcond_i64: + tcg_out_cmp(s, ext, a1, a2, c2); + tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); + break; + + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, a0, a1, a2, ext); + break; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, REG0(0), a1, a2); + break; + + case INDEX_op_bswap64_i64: + tcg_out_rev64(s, a0, a1); + break; + case INDEX_op_bswap32_i64: + case INDEX_op_bswap32_i32: + tcg_out_rev32(s, a0, a1); + break; + case INDEX_op_bswap16_i64: + case INDEX_op_bswap16_i32: + tcg_out_rev16(s, a0, a1); + break; + + case INDEX_op_ext8s_i64: + case INDEX_op_ext8s_i32: + tcg_out_sxt(s, ext, MO_8, a0, a1); + break; + case INDEX_op_ext16s_i64: + case INDEX_op_ext16s_i32: + tcg_out_sxt(s, ext, MO_16, a0, a1); + break; + case INDEX_op_ext_i32_i64: + case INDEX_op_ext32s_i64: + tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1); + break; + case INDEX_op_ext8u_i64: + case INDEX_op_ext8u_i32: + tcg_out_uxt(s, MO_8, a0, a1); + break; + case INDEX_op_ext16u_i64: + case INDEX_op_ext16u_i32: + tcg_out_uxt(s, MO_16, a0, a1); + break; + case INDEX_op_extu_i32_i64: + case INDEX_op_ext32u_i64: + tcg_out_movr(s, TCG_TYPE_I32, a0, a1); + break; + + case INDEX_op_deposit_i64: + case INDEX_op_deposit_i32: + tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); + break; + + case INDEX_op_extract_i64: + case INDEX_op_extract_i32: + tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1); + break; + + case INDEX_op_sextract_i64: + case INDEX_op_sextract_i32: + tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1); + break; + + case INDEX_op_extract2_i64: + case INDEX_op_extract2_i32: + tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]); + break; + + case INDEX_op_add2_i32: + tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), + (int32_t)args[4], args[5], const_args[4], + const_args[5], false); + break; + case INDEX_op_add2_i64: + tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], + args[5], const_args[4], const_args[5], false); + break; + case INDEX_op_sub2_i32: + tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), + (int32_t)args[4], args[5], const_args[4], + const_args[5], true); + break; + case INDEX_op_sub2_i64: + tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], + args[5], const_args[4], const_args[5], true); + break; + + case INDEX_op_muluh_i64: + tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2); + break; + case INDEX_op_mulsh_i64: + tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2); + break; + + case INDEX_op_mb: + tcg_out_mb(s, a0); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + g_assert_not_reached(); + } + +#undef REG0 +} + +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg *args, const int *const_args) +{ + static const AArch64Insn cmp_insn[16] = { + [TCG_COND_EQ] = I3616_CMEQ, + [TCG_COND_GT] = I3616_CMGT, + [TCG_COND_GE] = I3616_CMGE, + [TCG_COND_GTU] = I3616_CMHI, + [TCG_COND_GEU] = I3616_CMHS, + }; + static const AArch64Insn cmp0_insn[16] = { + [TCG_COND_EQ] = I3617_CMEQ0, + [TCG_COND_GT] = I3617_CMGT0, + [TCG_COND_GE] = I3617_CMGE0, + [TCG_COND_LT] = I3617_CMLT0, + [TCG_COND_LE] = I3617_CMLE0, + }; + + TCGType type = vecl + TCG_TYPE_V64; + unsigned is_q = vecl; + TCGArg a0, a1, a2, a3; + int cmode, imm8; + + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + + switch (opc) { + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); + break; + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); + break; + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); + break; + case INDEX_op_add_vec: + tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2); + break; + case INDEX_op_sub_vec: + tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2); + break; + case INDEX_op_mul_vec: + tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2); + break; + case INDEX_op_neg_vec: + tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1); + break; + case INDEX_op_abs_vec: + tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1); + break; + case INDEX_op_and_vec: + if (const_args[2]) { + is_shimm1632(~a2, &cmode, &imm8); + if (a0 == a1) { + tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8); + return; + } + tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8); + a2 = a0; + } + tcg_out_insn(s, 3616, AND, is_q, 0, a0, a1, a2); + break; + case INDEX_op_or_vec: + if (const_args[2]) { + is_shimm1632(a2, &cmode, &imm8); + if (a0 == a1) { + tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8); + return; + } + tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8); + a2 = a0; + } + tcg_out_insn(s, 3616, ORR, is_q, 0, a0, a1, a2); + break; + case INDEX_op_andc_vec: + if (const_args[2]) { + is_shimm1632(a2, &cmode, &imm8); + if (a0 == a1) { + tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8); + return; + } + tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8); + a2 = a0; + } + tcg_out_insn(s, 3616, BIC, is_q, 0, a0, a1, a2); + break; + case INDEX_op_orc_vec: + if (const_args[2]) { + is_shimm1632(~a2, &cmode, &imm8); + if (a0 == a1) { + tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8); + return; + } + tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8); + a2 = a0; + } + tcg_out_insn(s, 3616, ORN, is_q, 0, a0, a1, a2); + break; + case INDEX_op_xor_vec: + tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2); + break; + case INDEX_op_ssadd_vec: + tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2); + break; + case INDEX_op_sssub_vec: + tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2); + break; + case INDEX_op_usadd_vec: + tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2); + break; + case INDEX_op_ussub_vec: + tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2); + break; + case INDEX_op_smax_vec: + tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2); + break; + case INDEX_op_smin_vec: + tcg_out_insn(s, 3616, SMIN, is_q, vece, a0, a1, a2); + break; + case INDEX_op_umax_vec: + tcg_out_insn(s, 3616, UMAX, is_q, vece, a0, a1, a2); + break; + case INDEX_op_umin_vec: + tcg_out_insn(s, 3616, UMIN, is_q, vece, a0, a1, a2); + break; + case INDEX_op_not_vec: + tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1); + break; + case INDEX_op_shli_vec: + tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece)); + break; + case INDEX_op_shri_vec: + tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2); + break; + case INDEX_op_sari_vec: + tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); + break; + case INDEX_op_shlv_vec: + tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); + break; + case INDEX_op_aa64_sshl_vec: + tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2); + break; + case INDEX_op_cmp_vec: + { + TCGCond cond = args[3]; + AArch64Insn insn; + + if (cond == TCG_COND_NE) { + if (const_args[2]) { + tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1); + } else { + tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2); + tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0); + } + } else { + if (const_args[2]) { + insn = cmp0_insn[cond]; + if (insn) { + tcg_out_insn_3617(s, insn, is_q, vece, a0, a1); + break; + } + tcg_out_dupi_vec(s, type, TCG_VEC_TMP, 0); + a2 = TCG_VEC_TMP; + } + insn = cmp_insn[cond]; + if (insn == 0) { + TCGArg t; + t = a1, a1 = a2, a2 = t; + cond = tcg_swap_cond(cond); + insn = cmp_insn[cond]; + tcg_debug_assert(insn != 0); + } + tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2); + } + } + break; + + case INDEX_op_bitsel_vec: + a3 = args[3]; + if (a0 == a3) { + tcg_out_insn(s, 3616, BIT, is_q, 0, a0, a2, a1); + } else if (a0 == a2) { + tcg_out_insn(s, 3616, BIF, is_q, 0, a0, a3, a1); + } else { + if (a0 != a1) { + tcg_out_mov(s, type, a0, a1); + } + tcg_out_insn(s, 3616, BSL, is_q, 0, a0, a2, a3); + } + break; + + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } +} + +int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece) +{ + switch (opc) { + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_andc_vec: + case INDEX_op_orc_vec: + case INDEX_op_neg_vec: + case INDEX_op_abs_vec: + case INDEX_op_not_vec: + case INDEX_op_cmp_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: + case INDEX_op_shlv_vec: + case INDEX_op_bitsel_vec: + return 1; + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + return -1; + case INDEX_op_mul_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + return vece < MO_64; + + default: + return 0; + } +} + +void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + va_list va; + TCGv_vec v0, v1, v2, t1; + + va_start(va, a0); + v0 = temp_tcgv_vec(tcg_ctx, arg_temp(a0)); + v1 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); + v2 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); + + switch (opc) { + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + /* Right shifts are negative left shifts for AArch64. */ + t1 = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_neg_vec(tcg_ctx, vece, t1, v2); + opc = (opc == INDEX_op_shrv_vec + ? INDEX_op_shlv_vec : INDEX_op_aa64_sshl_vec); + vec_gen_3(tcg_ctx, opc, type, vece, tcgv_vec_arg(tcg_ctx, v0), + tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t1)); + tcg_temp_free_vec(tcg_ctx, t1); + break; + + default: + g_assert_not_reached(); + } + + va_end(va); +} + +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef w_w = { .args_ct_str = { "w", "w" } }; + static const TCGTargetOpDef w_r = { .args_ct_str = { "w", "r" } }; + static const TCGTargetOpDef w_wr = { .args_ct_str = { "w", "wr" } }; + static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; + static const TCGTargetOpDef r_rA = { .args_ct_str = { "r", "rA" } }; + static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; + static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } }; + static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; + static const TCGTargetOpDef w_w_w = { .args_ct_str = { "w", "w", "w" } }; + static const TCGTargetOpDef w_w_wO = { .args_ct_str = { "w", "w", "wO" } }; + static const TCGTargetOpDef w_w_wN = { .args_ct_str = { "w", "w", "wN" } }; + static const TCGTargetOpDef w_w_wZ = { .args_ct_str = { "w", "w", "wZ" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rA = { .args_ct_str = { "r", "r", "rA" } }; + static const TCGTargetOpDef r_r_rL = { .args_ct_str = { "r", "r", "rL" } }; + static const TCGTargetOpDef r_r_rAL + = { .args_ct_str = { "r", "r", "rAL" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef ext2 + = { .args_ct_str = { "r", "rZ", "rZ" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "rA", "rZ", "rZ" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "rZ", "rZ", "rA", "rMZ" } }; + static const TCGTargetOpDef w_w_w_w + = { .args_ct_str = { "w", "w", "w", "w" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_neg_i32: + case INDEX_op_neg_i64: + case INDEX_op_not_i32: + case INDEX_op_not_i64: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_ext8u_i32: + case INDEX_op_ext16u_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_sextract_i32: + case INDEX_op_sextract_i64: + return &r_r; + + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &rZ_r; + + case INDEX_op_add_i32: + case INDEX_op_add_i64: + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + return &r_r_rA; + + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + case INDEX_op_div_i32: + case INDEX_op_div_i64: + case INDEX_op_divu_i32: + case INDEX_op_divu_i64: + case INDEX_op_rem_i32: + case INDEX_op_rem_i64: + case INDEX_op_remu_i32: + case INDEX_op_remu_i64: + case INDEX_op_muluh_i64: + case INDEX_op_mulsh_i64: + return &r_r_r; + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + case INDEX_op_or_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + return &r_r_rL; + + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + return &r_r_ri; + + case INDEX_op_clz_i32: + case INDEX_op_ctz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i64: + return &r_r_rAL; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &r_rA; + + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return &movc; + + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + return &r_l; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + return &lZ_l; + + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + return &dep; + + case INDEX_op_extract2_i32: + case INDEX_op_extract2_i64: + return &ext2; + + case INDEX_op_add2_i32: + case INDEX_op_add2_i64: + case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: + return &add2; + + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_mul_vec: + case INDEX_op_xor_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_aa64_sshl_vec: + return &w_w_w; + case INDEX_op_not_vec: + case INDEX_op_neg_vec: + case INDEX_op_abs_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: + return &w_w; + case INDEX_op_ld_vec: + case INDEX_op_st_vec: + case INDEX_op_dupm_vec: + return &w_r; + case INDEX_op_dup_vec: + return &w_wr; + case INDEX_op_or_vec: + case INDEX_op_andc_vec: + return &w_w_wO; + case INDEX_op_and_vec: + case INDEX_op_orc_vec: + return &w_w_wN; + case INDEX_op_cmp_vec: + return &w_w_wZ; + case INDEX_op_bitsel_vec: + return &w_w_w_w; + + default: + return NULL; + } +} + +static void tcg_target_init(TCGContext *s) +{ + s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu; + s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu; + s->tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; + s->tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; + + s->tcg_target_call_clobber_regs = -1ull; + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X19); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X20); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X21); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X22); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X23); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X24); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X25); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X26); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X27); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X28); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_X29); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V8); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V9); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V10); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V11); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V12); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V13); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V14); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_V15); + + s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP); +} + +/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ +#define PUSH_SIZE ((30 - 19 + 1) * 8) + +#define FRAME_SIZE \ + ((PUSH_SIZE \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & ~(TCG_TARGET_STACK_ALIGN - 1)) + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + +/* We're expecting to use a single ADDI insn. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); + +static void tcg_target_qemu_prologue(TCGContext *s) +{ + TCGReg r; + + /* Push (FP, LR) and allocate space for all saved registers. */ + tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR, + TCG_REG_SP, -PUSH_SIZE, 1, 1); + + /* Set up frame pointer for canonical unwinding. */ + tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); + + /* Store callee-preserved regs x19..x28. */ + for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { + int ofs = (r - TCG_REG_X19 + 2) * 8; + tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0); + } + + /* Make stack space for TCG locals. */ + tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, + FRAME_SIZE - PUSH_SIZE); + + /* Inform TCG about how to find TCG locals with register, offset, size. */ + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + +#if !defined(CONFIG_SOFTMMU) + if (USE_GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); + } +#endif + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]); + + /* + * Return path for goto_ptr. Set return value to 0, a-la exit_tb, + * and fall through to the rest of the epilogue. + */ + s->code_gen_epilogue = s->code_ptr; + tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0); + + /* TB epilogue */ + tb_ret_addr = s->code_ptr; + + /* Remove TCG locals stack space. */ + tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, + FRAME_SIZE - PUSH_SIZE); + + /* Restore registers x19..x28. */ + for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { + int ofs = (r - TCG_REG_X19 + 2) * 8; + tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0); + } + + /* Pop (FP, LR), restore SP to previous frame. */ + tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR, + TCG_REG_SP, PUSH_SIZE, 0, 1); + tcg_out_insn(s, 3207, RET, TCG_REG_LR); +} + +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + int i; + for (i = 0; i < count; ++i) { + p[i] = NOP; + } +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[24]; +} DebugFrame; + +#define ELF_HOST_MACHINE EM_AARCH64 + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = 0x78, /* sleb128 -8 */ + .h.cie.return_column = TCG_REG_LR, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */ + 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */ + 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */ + 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */ + 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */ + 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */ + 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */ + 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */ + 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */ + 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */ + 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */ + 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */ + } +}; + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/qemu/tcg/aarch64/tcg-target.opc.h b/qemu/tcg/aarch64/tcg-target.opc.h new file mode 100644 index 00000000..26bfd9c4 --- /dev/null +++ b/qemu/tcg/aarch64/tcg-target.opc.h @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2019 Linaro + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC) diff --git a/qemu/tcg/arm/tcg-target.h b/qemu/tcg/arm/tcg-target.h index a6ea9763..17e77137 100644 --- a/qemu/tcg/arm/tcg-target.h +++ b/qemu/tcg/arm/tcg-target.h @@ -22,11 +22,44 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef TCG_TARGET_ARM -#define TCG_TARGET_ARM 1 + +#ifndef ARM_TCG_TARGET_H +#define ARM_TCG_TARGET_H + +/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */ +#ifndef __ARM_ARCH +# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) +# define __ARM_ARCH 6 +# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \ + || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# else +# define __ARM_ARCH 4 +# endif +#endif + +extern int arm_arch; + +#if defined(__ARM_ARCH_5T__) \ + || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) +# define use_armv5t_instructions 1 +#else +# define use_armv5t_instructions use_armv6_instructions +#endif + +#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6) +#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) #undef TCG_TARGET_STACK_GROWSUP #define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 typedef enum { TCG_REG_R0 = 0, @@ -52,8 +85,7 @@ typedef enum { #ifdef __ARM_ARCH_EXT_IDIV__ #define use_idiv_instructions 1 #else -extern bool use_idiv_instructions_rt; -#define use_idiv_instructions use_idiv_instructions_rt +extern bool use_idiv_instructions; #endif @@ -78,7 +110,13 @@ extern bool use_idiv_instructions_rt; #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_clz_i32 use_armv5t_instructions +#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions +#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions +#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions +#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions +#define TCG_TARGET_HAS_extract2_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_mulu2_i32 1 #define TCG_TARGET_HAS_muls2_i32 1 @@ -86,24 +124,27 @@ extern bool use_idiv_instructions_rt; #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_div_i32 use_idiv_instructions #define TCG_TARGET_HAS_rem_i32 0 - -extern bool tcg_target_deposit_valid(int ofs, int len); -#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 0 enum { TCG_AREG0 = TCG_REG_R6, }; +#define TCG_TARGET_DEFAULT_MO (0) +#define TCG_TARGET_HAS_MEMORY_BSWAP 1 + static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { -#if QEMU_GNUC_PREREQ(4, 1) __builtin___clear_cache((char *) start, (char *) stop); -#else - register uintptr_t _beg __asm("a1") = start; - register uintptr_t _end __asm("a2") = stop; - register uintptr_t _flg __asm("a3") = 0; - __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); -#endif } +/* not defined -- call should be eliminated at compile time */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); + +#ifdef CONFIG_SOFTMMU +#define TCG_TARGET_NEED_LDST_LABELS +#endif +#define TCG_TARGET_NEED_POOL_LABELS + #endif diff --git a/qemu/tcg/arm/tcg-target.c b/qemu/tcg/arm/tcg-target.inc.c similarity index 69% rename from qemu/tcg/arm/tcg-target.c rename to qemu/tcg/arm/tcg-target.inc.c index a5feaff3..48ce5461 100644 --- a/qemu/tcg/arm/tcg-target.c +++ b/qemu/tcg/arm/tcg-target.inc.c @@ -23,41 +23,12 @@ */ #include "elf.h" -#include "tcg-be-ldst.h" +#include "../tcg-pool.inc.c" -/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */ -#ifndef __ARM_ARCH -# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ - || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ - || defined(__ARM_ARCH_7EM__) -# define __ARM_ARCH 7 -# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ - || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ - || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) -# define __ARM_ARCH 6 -# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \ - || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \ - || defined(__ARM_ARCH_5TEJ__) -# define __ARM_ARCH 5 -# else -# define __ARM_ARCH 4 -# endif -#endif +int arm_arch = __ARM_ARCH; -static int arm_arch = __ARM_ARCH; - -#if defined(__ARM_ARCH_5T__) \ - || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) -# define use_armv5t_instructions 1 -#else -# define use_armv5t_instructions use_armv6_instructions -#endif - -#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6) -#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) - -#ifndef __ARM_ARCH_EXT_IDIV__ -bool use_idiv_instructions_rt; +#ifndef use_idiv_instructions +bool use_idiv_instructions; #endif /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */ @@ -67,7 +38,7 @@ bool use_idiv_instructions_rt; # define USING_SOFTMMU 0 #endif -#ifndef NDEBUG +#ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%r0", "%r1", @@ -115,18 +86,147 @@ static const int tcg_target_call_oarg_regs[2] = { #define TCG_REG_TMP TCG_REG_R12 -static inline void reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +enum arm_cond_code_e { + COND_EQ = 0x0, + COND_NE = 0x1, + COND_CS = 0x2, /* Unsigned greater or equal */ + COND_CC = 0x3, /* Unsigned less than */ + COND_MI = 0x4, /* Negative */ + COND_PL = 0x5, /* Zero or greater */ + COND_VS = 0x6, /* Overflow */ + COND_VC = 0x7, /* No overflow */ + COND_HI = 0x8, /* Unsigned greater than */ + COND_LS = 0x9, /* Unsigned less or equal */ + COND_GE = 0xa, + COND_LT = 0xb, + COND_GT = 0xc, + COND_LE = 0xd, + COND_AL = 0xe, +}; + +#define TO_CPSR (1 << 20) + +#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) +#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) +#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) +#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) +#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) +#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) +#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) +#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) + +typedef enum { + ARITH_AND = 0x0 << 21, + ARITH_EOR = 0x1 << 21, + ARITH_SUB = 0x2 << 21, + ARITH_RSB = 0x3 << 21, + ARITH_ADD = 0x4 << 21, + ARITH_ADC = 0x5 << 21, + ARITH_SBC = 0x6 << 21, + ARITH_RSC = 0x7 << 21, + ARITH_TST = 0x8 << 21 | TO_CPSR, + ARITH_CMP = 0xa << 21 | TO_CPSR, + ARITH_CMN = 0xb << 21 | TO_CPSR, + ARITH_ORR = 0xc << 21, + ARITH_MOV = 0xd << 21, + ARITH_BIC = 0xe << 21, + ARITH_MVN = 0xf << 21, + + INSN_CLZ = 0x016f0f10, + INSN_RBIT = 0x06ff0f30, + + INSN_LDR_IMM = 0x04100000, + INSN_LDR_REG = 0x06100000, + INSN_STR_IMM = 0x04000000, + INSN_STR_REG = 0x06000000, + + INSN_LDRH_IMM = 0x005000b0, + INSN_LDRH_REG = 0x001000b0, + INSN_LDRSH_IMM = 0x005000f0, + INSN_LDRSH_REG = 0x001000f0, + INSN_STRH_IMM = 0x004000b0, + INSN_STRH_REG = 0x000000b0, + + INSN_LDRB_IMM = 0x04500000, + INSN_LDRB_REG = 0x06500000, + INSN_LDRSB_IMM = 0x005000d0, + INSN_LDRSB_REG = 0x001000d0, + INSN_STRB_IMM = 0x04400000, + INSN_STRB_REG = 0x06400000, + + INSN_LDRD_IMM = 0x004000d0, + INSN_LDRD_REG = 0x000000d0, + INSN_STRD_IMM = 0x004000f0, + INSN_STRD_REG = 0x000000f0, + + INSN_DMB_ISH = 0xf57ff05b, + INSN_DMB_MCR = 0xee070fba, + + /* Architected nop introduced in v6k. */ + /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this + also Just So Happened to do nothing on pre-v6k so that we + don't need to conditionalize it? */ + INSN_NOP_v6k = 0xe320f000, + /* Otherwise the assembler uses mov r0,r0 */ + INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV, +} ARMInsn; + +#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4) + +static const uint8_t tcg_cond_to_arm_cond[] = { + [TCG_COND_EQ] = COND_EQ, + [TCG_COND_NE] = COND_NE, + [TCG_COND_LT] = COND_LT, + [TCG_COND_GE] = COND_GE, + [TCG_COND_LE] = COND_LE, + [TCG_COND_GT] = COND_GT, + /* unsigned */ + [TCG_COND_LTU] = COND_CC, + [TCG_COND_GEU] = COND_CS, + [TCG_COND_LEU] = COND_LS, + [TCG_COND_GTU] = COND_HI, +}; + +static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2; - *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff); + if (offset == sextract32(offset, 0, 24)) { + *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff); + return true; + } + return false; } -static void patch_reloc(tcg_insn_unit *code_ptr, int type, +static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8; + + if (offset >= -0xfff && offset <= 0xfff) { + tcg_insn_unit insn = *code_ptr; + bool u = (offset >= 0); + if (!u) { + offset = -offset; + } + insn = deposit32(insn, 23, 1, u); + insn = deposit32(insn, 0, 12, offset); + *code_ptr = insn; + return true; + } + return false; +} + +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { - assert(type == R_ARM_PC24); - assert(addend == 0); - reloc_pc24(code_ptr, (tcg_insn_unit *)value); + tcg_debug_assert(addend == 0); + + if (type == R_ARM_PC24) { + return reloc_pc24(code_ptr, (tcg_insn_unit *)value); + } else if (type == R_ARM_PC13) { + return reloc_pc13(code_ptr, (tcg_insn_unit *)value); + } else { + g_assert_not_reached(); + } } #define TCG_CT_CONST_ARM 0x100 @@ -135,12 +235,10 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type, #define TCG_CT_CONST_ZERO 0x800 /* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) { - const char *ct_str; - - ct_str = *pct_str; - switch (ct_str[0]) { + switch (*ct_str++) { case 'I': ct->ct |= TCG_CT_CONST_ARM; break; @@ -156,19 +254,20 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffff; break; /* qemu_ld address */ case 'l': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffff; #ifdef CONFIG_SOFTMMU /* r0-r2,lr will be overwritten when reading the tlb entry, so don't use these. */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14); #endif break; @@ -176,7 +275,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) /* qemu_st address & data */ case 's': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); + ct->u.regs = 0xffff; /* r0-r2 will be overwritten when reading the tlb entry (softmmu only) and r0-r1 doing the byte swapping, so don't use these. */ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); @@ -193,12 +292,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) break; default: - return -1; + return NULL; } - ct_str++; - *pct_str = ct_str; - - return 0; + return ct_str; } static inline uint32_t rotl(uint32_t val, int n) @@ -263,118 +359,12 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type, } } -#define TO_CPSR (1 << 20) - -typedef enum { - ARITH_AND = 0x0 << 21, - ARITH_EOR = 0x1 << 21, - ARITH_SUB = 0x2 << 21, - ARITH_RSB = 0x3 << 21, - ARITH_ADD = 0x4 << 21, - ARITH_ADC = 0x5 << 21, - ARITH_SBC = 0x6 << 21, - ARITH_RSC = 0x7 << 21, - ARITH_TST = 0x8 << 21 | TO_CPSR, - ARITH_CMP = 0xa << 21 | TO_CPSR, - ARITH_CMN = 0xb << 21 | TO_CPSR, - ARITH_ORR = 0xc << 21, - ARITH_MOV = 0xd << 21, - ARITH_BIC = 0xe << 21, - ARITH_MVN = 0xf << 21, - - INSN_LDR_IMM = 0x04100000, - INSN_LDR_REG = 0x06100000, - INSN_STR_IMM = 0x04000000, - INSN_STR_REG = 0x06000000, - - INSN_LDRH_IMM = 0x005000b0, - INSN_LDRH_REG = 0x001000b0, - INSN_LDRSH_IMM = 0x005000f0, - INSN_LDRSH_REG = 0x001000f0, - INSN_STRH_IMM = 0x004000b0, - INSN_STRH_REG = 0x000000b0, - - INSN_LDRB_IMM = 0x04500000, - INSN_LDRB_REG = 0x06500000, - INSN_LDRSB_IMM = 0x005000d0, - INSN_LDRSB_REG = 0x001000d0, - INSN_STRB_IMM = 0x04400000, - INSN_STRB_REG = 0x06400000, - - INSN_LDRD_IMM = 0x004000d0, - INSN_LDRD_REG = 0x000000d0, - INSN_STRD_IMM = 0x004000f0, - INSN_STRD_REG = 0x000000f0, -} ARMInsn; - -#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) -#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) -#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) -#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) -#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) -#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) -#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) -#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) - -enum arm_cond_code_e { - COND_EQ = 0x0, - COND_NE = 0x1, - COND_CS = 0x2, /* Unsigned greater or equal */ - COND_CC = 0x3, /* Unsigned less than */ - COND_MI = 0x4, /* Negative */ - COND_PL = 0x5, /* Zero or greater */ - COND_VS = 0x6, /* Overflow */ - COND_VC = 0x7, /* No overflow */ - COND_HI = 0x8, /* Unsigned greater than */ - COND_LS = 0x9, /* Unsigned less or equal */ - COND_GE = 0xa, - COND_LT = 0xb, - COND_GT = 0xc, - COND_LE = 0xd, - COND_AL = 0xe, -}; - -static const uint8_t tcg_cond_to_arm_cond[] = { - [TCG_COND_EQ] = COND_EQ, - [TCG_COND_NE] = COND_NE, - [TCG_COND_LT] = COND_LT, - [TCG_COND_GE] = COND_GE, - [TCG_COND_LE] = COND_LE, - [TCG_COND_GT] = COND_GT, - /* unsigned */ - [TCG_COND_LTU] = COND_CC, - [TCG_COND_GEU] = COND_CS, - [TCG_COND_LEU] = COND_LS, - [TCG_COND_GTU] = COND_HI, -}; - -static inline void tcg_out_bx(TCGContext *s, int cond, int rn) -{ - tcg_out32(s, (cond << 28) | 0x012fff10 | rn); -} - static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) { tcg_out32(s, (cond << 28) | 0x0a000000 | (((offset - 8) >> 2) & 0x00ffffff)); } -static inline void tcg_out_b_noaddr(TCGContext *s, int cond) -{ - /* We pay attention here to not modify the branch target by masking - the corresponding bytes. This ensure that caches and memory are - kept coherent during retranslation. */ - tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0a)); -} - -static inline void tcg_out_bl_noaddr(TCGContext *s, int cond) -{ - /* We pay attention here to not modify the branch target by masking - the corresponding bytes. This ensure that caches and memory are - kept coherent during retranslation. */ - tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0b)); -} - static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) { tcg_out32(s, (cond << 28) | 0x0b000000 | @@ -401,16 +391,7 @@ static inline void tcg_out_dat_reg(TCGContext *s, static inline void tcg_out_nop(TCGContext *s) { - if (use_armv7_instructions) { - /* Architected nop introduced in v6k. */ - /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this - also Just So Happened to do nothing on pre-v6k so that we - don't need to conditionalize it? */ - tcg_out32(s, 0xe320f000); - } else { - /* Prior to that the assembler uses mov r0, r0. */ - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 0, 0, 0, SHIFT_IMM_LSL(0)); - } + tcg_out32(s, INSN_NOP); } static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm) @@ -421,6 +402,18 @@ static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm) } } +static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn) +{ + /* Unless the C portion of QEMU is compiled as thumb, we don't + actually need true BX semantics; merely a branch to an address + held in a register. */ + if (use_armv5t_instructions) { + tcg_out32(s, (cond << 28) | 0x012fff10 | rn); + } else { + tcg_out_mov_reg(s, cond, TCG_REG_PC, rn); + } +} + static inline void tcg_out_dat_imm(TCGContext *s, int cond, int opc, int rd, int rn, int im) { @@ -428,25 +421,218 @@ static inline void tcg_out_dat_imm(TCGContext *s, (rn << 16) | (rd << 12) | im); } +/* Note that this routine is used for both LDR and LDRH formats, so we do + not wish to include an immediate shift at this point. */ +static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, + TCGReg rn, TCGReg rm, bool u, bool p, bool w) +{ + tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) + | (w << 21) | (rn << 16) | (rt << 12) | rm); +} + +static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, + TCGReg rn, int imm8, bool p, bool w) +{ + bool u = 1; + if (imm8 < 0) { + imm8 = -imm8; + u = 0; + } + tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | + (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); +} + +static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, + TCGReg rn, int imm12, bool p, bool w) +{ + bool u = 1; + if (imm12 < 0) { + imm12 = -imm12; + u = 0; + } + tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | + (rn << 16) | (rt << 12) | imm12); +} + +static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm12) +{ + tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); +} + +static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm12) +{ + tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); +} + +static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm8) +{ + tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); +} + +static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); +} + +static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm8) +{ + tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); +} + +static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); +} + +/* Register pre-increment with base writeback. */ +static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); +} + +static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); +} + +static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm8) +{ + tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); +} + +static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm8) +{ + tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); +} + +static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm8) +{ + tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); +} + +static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm12) +{ + tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); +} + +static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm12) +{ + tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); +} + +static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); +} + +static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, int imm8) +{ + tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); +} + +static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt, + TCGReg rn, TCGReg rm) +{ + tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); +} + +static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg) +{ + new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0); + tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0); +} + static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) { - int rot, opc, rn; + int rot, diff, opc, sh1, sh2; + uint32_t tt0, tt1, tt2; - /* For armv7, make sure not to use movw+movt when mov/mvn would do. - Speed things up by only checking when movt would be required. - Prior to armv7, have one go at fully rotated immediates before - doing the decomposition thing below. */ - if (!use_armv7_instructions || (arg & 0xffff0000)) { - rot = encode_imm(arg); + /* Check a single MOV/MVN before anything else. */ + rot = encode_imm(arg); + if (rot >= 0) { + tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, + rotl(arg, rot) | (rot << 7)); + return; + } + rot = encode_imm(~arg); + if (rot >= 0) { + tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, + rotl(~arg, rot) | (rot << 7)); + return; + } + + /* Check for a pc-relative address. This will usually be the TB, + or within the TB, which is immediately before the code block. */ + diff = arg - ((intptr_t)s->code_ptr + 8); + if (diff >= 0) { + rot = encode_imm(diff); if (rot >= 0) { - tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, - rotl(arg, rot) | (rot << 7)); + tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, + rotl(diff, rot) | (rot << 7)); return; } - rot = encode_imm(~arg); + } else { + rot = encode_imm(-diff); if (rot >= 0) { - tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, - rotl(~arg, rot) | (rot << 7)); + tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, + rotl(-diff, rot) | (rot << 7)); return; } } @@ -464,24 +650,30 @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg) return; } - /* TODO: This is very suboptimal, we can easily have a constant - pool somewhere after all the instructions. */ + /* Look for sequences of two insns. If we have lots of 1's, we can + shorten the sequence by beginning with mvn and then clearing + higher bits with eor. */ + tt0 = arg; opc = ARITH_MOV; - rn = 0; - /* If we have lots of leading 1's, we can shorten the sequence by - beginning with mvn and then clearing higher bits with eor. */ - if (clz32(~arg) > clz32(arg)) { - opc = ARITH_MVN, arg = ~arg; + if (ctpop32(arg) > 16) { + tt0 = ~arg; + opc = ARITH_MVN; + } + sh1 = ctz32(tt0) & ~1; + tt1 = tt0 & ~(0xff << sh1); + sh2 = ctz32(tt1) & ~1; + tt2 = tt1 & ~(0xff << sh2); + if (tt2 == 0) { + rot = ((32 - sh1) << 7) & 0xf00; + tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot); + rot = ((32 - sh2) << 7) & 0xf00; + tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd, + ((tt0 >> sh2) & 0xff) | rot); + return; } - do { - int i = ctz32(arg) & ~1; - rot = ((32 - i) << 7) & 0xf00; - tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot); - arg &= ~(0xff << i); - opc = ARITH_EOR; - rn = rd; - } while (arg); + /* Otherwise, drop it into the constant pool. */ + tcg_out_movi_pool(s, cond, rd, arg); } static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst, @@ -492,7 +684,7 @@ static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst, */ if (rhs_is_const) { int rot = encode_imm(rhs); - assert(rot >= 0); + tcg_debug_assert(rot >= 0); tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); @@ -511,7 +703,7 @@ static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv, if (rot < 0) { rhs = ~rhs; rot = encode_imm(rhs); - assert(rot >= 0); + tcg_debug_assert(rot >= 0); opc = opinv; } tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); @@ -532,7 +724,7 @@ static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg, if (rot < 0) { rhs = -rhs; rot = encode_imm(rhs); - assert(rot >= 0); + tcg_debug_assert(rot >= 0); opc = opneg; } tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7)); @@ -718,16 +910,6 @@ static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn) } } -bool tcg_target_deposit_valid(int ofs, int len) -{ - /* ??? Without bfi, we could improve over generic code by combining - the right-shift from a non-zero ofs with the orr. We do run into - problems when rd == rs, and the mask generated from ofs+len doesn't - fit into an immediate. We would have to be careful not to pessimize - wrt the optimizations performed on the expanded code. */ - return use_armv7_instructions; -} - static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd, TCGArg a1, int ofs, int len, bool const_a1) { @@ -740,170 +922,20 @@ static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd, | (ofs << 7) | ((ofs + len - 1) << 16)); } -/* Note that this routine is used for both LDR and LDRH formats, so we do - not wish to include an immediate shift at this point. */ -static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, - TCGReg rn, TCGReg rm, bool u, bool p, bool w) +static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd, + TCGArg a1, int ofs, int len) { - tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) - | (w << 21) | (rn << 16) | (rt << 12) | rm); + /* ubfx */ + tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1 + | (ofs << 7) | ((len - 1) << 16)); } -static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, - TCGReg rn, int imm8, bool p, bool w) +static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd, + TCGArg a1, int ofs, int len) { - bool u = 1; - if (imm8 < 0) { - imm8 = -imm8; - u = 0; - } - tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | - (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf)); -} - -static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt, - TCGReg rn, int imm12, bool p, bool w) -{ - bool u = 1; - if (imm12 < 0) { - imm12 = -imm12; - u = 0; - } - tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) | - (rn << 16) | (rt << 12) | imm12); -} - -static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) -{ - tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0); -} - -static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) -{ - tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0); -} - -static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) -{ - tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0); -} - -static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) -{ - tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); -} - -static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0); -} - -/* Register pre-increment with base writeback. */ -static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1); -} - -static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1); -} - -static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) -{ - tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0); -} - -static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) -{ - tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0); -} - -static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) -{ - tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0); -} - -static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) -{ - tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0); -} - -static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm12) -{ - tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0); -} - -static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0); -} - -static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, int imm8) -{ - tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0); -} - -static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt, - TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0); + /* sbfx */ + tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1 + | (ofs << 7) | ((len - 1) << 16)); } static inline void tcg_out_ld32u(TCGContext *s, int cond, @@ -990,7 +1022,7 @@ static inline void tcg_out_st8(TCGContext *s, int cond, * with the code buffer limited to 16MB we wouldn't need the long case. * But we also use it for the tail-call to the qemu_ld/st helpers, which does. */ -static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr) +static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr) { intptr_t addri = (intptr_t)addr; ptrdiff_t disp = tcg_pcrel_diff(s, addr); @@ -999,16 +1031,7 @@ static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr) tcg_out_b(s, cond, disp); return; } - - tcg_out_movi32(s, cond, TCG_REG_TMP, addri); - if (use_armv5t_instructions) { - tcg_out_bx(s, cond, TCG_REG_TMP); - } else { - if (addri & 1) { - tcg_abort(); - } - tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP); - } + tcg_out_movi_pool(s, cond, TCG_REG_PC, addri); } /* The call case is mostly used for helpers - so it's not unreasonable @@ -1032,25 +1055,84 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr) tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri); tcg_out_blx(s, COND_AL, TCG_REG_TMP); } else { - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4); - tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4); - tcg_out32(s, addri); + /* ??? Know that movi_pool emits exactly 1 insn. */ + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0); + tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri); } } -static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) +static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l) { - TCGLabel *l = &s->labels[label_index]; - if (l->has_value) { tcg_out_goto(s, cond, l->u.value_ptr); } else { - tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 0); - tcg_out_b_noaddr(s, cond); + tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0); + tcg_out_b(s, cond, 0); + } +} + +static inline void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + if (use_armv7_instructions) { + tcg_out32(s, INSN_DMB_ISH); + } else if (use_armv6_instructions) { + tcg_out32(s, INSN_DMB_MCR); + } +} + +static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, + const int *const_args) +{ + TCGReg al = args[0]; + TCGReg ah = args[1]; + TCGArg bl = args[2]; + TCGArg bh = args[3]; + TCGCond cond = args[4]; + int const_bl = const_args[2]; + int const_bh = const_args[3]; + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + case TCG_COND_LTU: + case TCG_COND_LEU: + case TCG_COND_GTU: + case TCG_COND_GEU: + /* We perform a conditional comparision. If the high half is + equal, then overwrite the flags with the comparison of the + low half. The resulting flags cover the whole. */ + tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh); + tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl); + return cond; + + case TCG_COND_LT: + case TCG_COND_GE: + /* We perform a double-word subtraction and examine the result. + We do not actually need the result of the subtract, so the + low part "subtract" is a compare. For the high half we have + no choice but to compute into a temporary. */ + tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl); + tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR, + TCG_REG_TMP, ah, bh, const_bh); + return cond; + + case TCG_COND_LE: + case TCG_COND_GT: + /* Similar, but with swapped arguments, via reversed subtract. */ + tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, + TCG_REG_TMP, al, bl, const_bl); + tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR, + TCG_REG_TMP, ah, bh, const_bh); + return tcg_swap_cond(cond); + + default: + g_assert_not_reached(); } } #ifdef CONFIG_SOFTMMU +#include "../tcg-ldst.inc.c" + /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ @@ -1102,7 +1184,7 @@ static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \ } else { \ int ofs = (argreg - 4) * 4; \ EXT_ARG; \ - assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ + tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \ tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \ } \ return argreg + 1; \ @@ -1139,115 +1221,142 @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg, #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) -/* We're expecting to use an 8-bit immediate and to mask. */ -QEMU_BUILD_BUG_ON(CPU_TLB_BITS > 8); +/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); -/* We're expecting to use an 8-bit immediate add + 8-bit ldrd offset. - Using the offset of the second entry in the last tlb table ensures - that we can index all of the elements of the first entry. */ -QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) - > 0xffff); +/* These offsets are built into the LDRD below. */ +QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); +QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); /* Load and compare a TLB entry, leaving the flags set. Returns the register containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - TCGMemOp s_bits, int mem_index, bool is_load) + MemOp opc, int mem_index, bool is_load) { - TCGReg base = TCG_AREG0; - int cmp_off = - (is_load - ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) - : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); - int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif + int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + int fast_off = TLB_MASK_TABLE_OFS(mem_index); + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); + int table_off = fast_off + offsetof(CPUTLBDescFast, table); + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); - /* Should generate something like the following: - * shr tmp, addrlo, #TARGET_PAGE_BITS (1) - * add r2, env, #high - * and r0, tmp, #(CPU_TLB_SIZE - 1) (2) - * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3) - * ldr r0, [r2, #cmp] (4) - * tst addrlo, #s_mask - * ldr r2, [r2, #add] (5) - * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS + /* + * We don't support inline unaligned acceses, but we can easily + * support overalignment checks. */ - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, - 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); - - /* We checked that the offset is contained within 16 bits above. */ - if (add_off > 0xfff || (use_armv6_instructions && cmp_off > 0xff)) { - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R2, base, - (24 << 7) | (cmp_off >> 8)); - base = TCG_REG_R2; - add_off -= cmp_off & 0xff00; - cmp_off &= 0xff; + if (a_bits < s_bits) { + a_bits = s_bits; } - tcg_out_dat_imm(s, COND_AL, ARITH_AND, - TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1); - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base, - TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); - - /* Load the tlb comparator. Use ldrd if needed and available, - but due to how the pointer needs setting up, ldm isn't useful. - Base arm5 doesn't have ldrd, but armv5te does. */ - if (use_armv6_instructions && TARGET_LONG_BITS == 64) { - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off); + /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ + if (use_armv6_instructions) { + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); } else { - tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off); - if (TARGET_LONG_BITS == 64) { - tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2, cmp_off + 4); + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off); + } + + /* Extract the tlb index from the address into R0. */ + tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, + SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); + + /* + * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. + * Load the tlb comparator into R2/R3 and the fast path addend into R1. + */ + if (cmp_off == 0) { + if (use_armv6_instructions && TARGET_LONG_BITS == 64) { + tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); + } else { + tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); + } + } else { + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, + TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); + if (use_armv6_instructions && TARGET_LONG_BITS == 64) { + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); + } else { + tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } } - - /* Check alignment. */ - if (s_bits) { - tcg_out_dat_imm(s, COND_AL, ARITH_TST, - 0, addrlo, (1 << s_bits) - 1); + if (!use_armv6_instructions && TARGET_LONG_BITS == 64) { + tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4); } /* Load the tlb addend. */ - tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R2, add_off); + tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, + offsetof(CPUTLBEntry, addend)); - tcg_out_dat_reg(s, (s_bits ? COND_EQ : COND_AL), ARITH_CMP, 0, - TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + /* + * Check alignment, check comparators. + * Do this in no more than 3 insns. Use MOVW for v7, if possible, + * to reduce the number of sequential conditional instructions. + * Almost all guests have at least 4k pages, which means that we need + * to clear at least 9 bits even for an 8-byte memory, which means it + * isn't worth checking for an immediate operand for BIC. + */ + if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { + tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1)); - if (TARGET_LONG_BITS == 64) { - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, - TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0)); + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); + tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, + addrlo, TCG_REG_TMP, 0); + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); + } else { + if (a_bits) { + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, + (1 << a_bits) - 1); + } + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo, + SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, + 0, TCG_REG_R2, TCG_REG_TMP, + SHIFT_IMM_LSL(TARGET_PAGE_BITS)); } - return TCG_REG_R2; + if (TARGET_LONG_BITS == 64) { + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); + } + + return TCG_REG_R1; } /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, TCGReg datalo, TCGReg datahi, TCGReg addrlo, - TCGReg addrhi, int mem_index, - tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) + TCGReg addrhi, tcg_insn_unit *raddr, + tcg_insn_unit *label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; - label->opc = opc; + label->oi = oi; label->datalo_reg = datalo; label->datahi_reg = datahi; label->addrlo_reg = addrlo; label->addrhi_reg = addrhi; - label->mem_index = mem_index; label->raddr = raddr; label->label_ptr[0] = label_ptr; } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; - TCGMemOp opc = lb->opc; + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); void *func; - reloc_pc24(lb->label_ptr[0], s->code_ptr); + if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { + return false; + } argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0); if (TARGET_LONG_BITS == 64) { @@ -1255,16 +1364,16 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } else { argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); } - argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index); + argreg = tcg_out_arg_imm32(s, argreg, oi); argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); /* For armv6 we can use the canonical unsigned helpers and minimize icache usage. For pre-armv6, use the signed helpers since we do not have a single insn sign-extend. */ if (use_armv6_instructions) { - func = qemu_ld_helpers[opc & ~MO_SIGN]; + func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]; } else { - func = qemu_ld_helpers[opc]; + func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]; if (opc & MO_SIGN) { opc = MO_UL; } @@ -1299,14 +1408,18 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_goto(s, COND_AL, lb->raddr); + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; - TCGMemOp opc = lb->opc; + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); - reloc_pc24(lb->label_ptr[0], s->code_ptr); + if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) { + return false; + } argreg = TCG_REG_R0; argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0); @@ -1334,19 +1447,20 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) break; } - argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index); + argreg = tcg_out_arg_imm32(s, argreg, oi); argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); /* Tail-call to the helper, which will return to the fast path. */ - tcg_out_goto(s, COND_AL, qemu_st_helpers[opc]); + tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + return true; } #endif /* SOFTMMU */ -static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1403,11 +1517,11 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, } } -static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SSIZE) { case MO_UB: @@ -1464,8 +1578,9 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { - TCGReg addrlo, datalo, datahi, addrhi QEMU_UNUSED_VAR; - TCGMemOp opc; + TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); + TCGMemOpIdx oi; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; @@ -1476,24 +1591,25 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) datahi = (is64 ? *args++ : 0); addrlo = *args++; addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); - opc = *args++; + oi = *args++; + opc = get_memop(oi); #ifdef CONFIG_SOFTMMU - mem_index = *args; - addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 1); + mem_index = get_mmuidx(oi); + addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1); /* This a conditional BL only to load a pointer within this opcode into LR for the slow path. We will not be using the value for a tail call. */ label_ptr = s->code_ptr; - tcg_out_bl_noaddr(s, COND_NE); + tcg_out_bl(s, COND_NE, 0); tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend); - add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, - mem_index, s->code_ptr, label_ptr); + add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, + s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ - if (GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE); + if (guest_base) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP); } else { tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); @@ -1501,11 +1617,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) #endif } -static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, +static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addend) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1546,11 +1662,11 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, } } -static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, +static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, TCGReg addrlo) { - TCGMemOp bswap = opc & MO_BSWAP; + MemOp bswap = opc & MO_BSWAP; switch (opc & MO_SIZE) { case MO_8: @@ -1593,8 +1709,9 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { - TCGReg addrlo, datalo, datahi, addrhi QEMU_UNUSED_VAR; - TCGMemOp opc; + TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); + TCGMemOpIdx oi; + MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; TCGReg addend; @@ -1605,23 +1722,24 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) datahi = (is64 ? *args++ : 0); addrlo = *args++; addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); - opc = *args++; + oi = *args++; + opc = get_memop(oi); #ifdef CONFIG_SOFTMMU - mem_index = *args; - addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 0); + mem_index = get_mmuidx(oi); + addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0); tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend); /* The conditional call must come last, as we're going to return here. */ label_ptr = s->code_ptr; - tcg_out_bl_noaddr(s, COND_NE); + tcg_out_bl(s, COND_NE, 0); - add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, - mem_index, s->code_ptr, label_ptr); + add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, + s->code_ptr, label_ptr); #else /* !CONFIG_SOFTMMU */ - if (GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE); + if (guest_base) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi, addrlo, TCG_REG_TMP); } else { @@ -1630,7 +1748,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) #endif } -static tcg_insn_unit *tb_ret_addr; +static void tcg_out_epilogue(TCGContext *s); static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) @@ -1640,24 +1758,37 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, switch (opc) { case INDEX_op_exit_tb: - tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]); - tcg_out_goto(s, COND_AL, tb_ret_addr); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]); + tcg_out_epilogue(s); break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { - /* Direct jump method */ - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); - tcg_out_b_noaddr(s, COND_AL); - } else { + { /* Indirect jump method */ - intptr_t ptr = (intptr_t)(s->tb_next + args[0]); - tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff); - tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff); + intptr_t ptr, dif, dil; + TCGReg base = TCG_REG_PC; + + tcg_debug_assert(s->tb_jmp_insn_offset == 0); + ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]); + dif = ptr - ((intptr_t)s->code_ptr + 8); + dil = sextract32(dif, 0, 12); + if (dif != dil) { + /* The TB is close, but outside the 12 bits addressable by + the load. We can extend this to 20 bits with a sub of a + shifted immediate from pc. In the vastly unlikely event + the code requires more than 1MB, we'll use 2 insns and + be no worse off. */ + base = TCG_REG_R0; + tcg_out_movi32(s, COND_AL, base, ptr - dil); + } + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil); + set_jmp_reset_offset(s, args[0]); } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + break; + case INDEX_op_goto_ptr: + tcg_out_bx(s, COND_AL, args[0]); break; case INDEX_op_br: - tcg_out_goto_label(s, COND_AL, args[0]); + tcg_out_goto_label(s, COND_AL, arg_label(args[0])); break; case INDEX_op_ld8u_i32: @@ -1818,25 +1949,33 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; + case INDEX_op_ctz_i32: + tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); + a1 = TCG_REG_TMP; + goto do_clz; + + case INDEX_op_clz_i32: + a1 = args[1]; + do_clz: + a0 = args[0]; + a2 = args[2]; + c = const_args[2]; + if (c && a2 == 32) { + tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); + break; + } + tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); + tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); + if (c || a0 != a2) { + tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); + } + break; + case INDEX_op_brcond_i32: tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, args[0], args[1], const_args[1]); - tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]); - break; - case INDEX_op_brcond2_i32: - /* The resulting conditions are: - * TCG_COND_EQ --> a0 == a2 && a1 == a3, - * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3, - * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3, - * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3), - * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3), - * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3, - */ - tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, - args[1], args[3], const_args[3]); - tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0, - args[0], args[2], const_args[2]); - tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]); + tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], + arg_label(args[3])); break; case INDEX_op_setcond_i32: tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, @@ -1846,15 +1985,15 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], ARITH_MOV, args[0], 0, 0); break; + + case INDEX_op_brcond2_i32: + c = tcg_out_cmp2(s, args, const_args); + tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); + break; case INDEX_op_setcond2_i32: - /* See brcond2_i32 comment */ - tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, - args[2], args[4], const_args[4]); - tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0, - args[1], args[3], const_args[3]); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]], - ARITH_MOV, args[0], 0, 1); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])], + c = tcg_out_cmp2(s, args + 1, const_args + 1); + tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); + tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], ARITH_MOV, args[0], 0, 0); break; @@ -1892,6 +2031,33 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_deposit(s, COND_AL, args[0], args[2], args[3], args[4], const_args[2]); break; + case INDEX_op_extract_i32: + tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); + break; + case INDEX_op_sextract_i32: + tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); + break; + case INDEX_op_extract2_i32: + /* ??? These optimization vs zero should be generic. */ + /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ + if (const_args[1]) { + if (const_args[2]) { + tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); + } else { + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, + args[2], SHIFT_IMM_LSL(32 - args[3])); + } + } else if (const_args[2]) { + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, + args[1], SHIFT_IMM_LSR(args[3])); + } else { + /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, + args[2], SHIFT_IMM_LSL(32 - args[3])); + tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, + args[1], SHIFT_IMM_LSR(args[3])); + } + break; case INDEX_op_div_i32: tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); @@ -1900,6 +2066,10 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); break; + case INDEX_op_mb: + tcg_out_mb(s, args[0]); + break; + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_call: /* Always emitted via tcg_out_call. */ @@ -1908,83 +2078,133 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } } -static const TCGTargetOpDef arm_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } }; + static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } }; + static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; + static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } }; + static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } }; + static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; + static const TCGTargetOpDef r_r_rIN + = { .args_ct_str = { "r", "r", "rIN" } }; + static const TCGTargetOpDef r_r_rIK + = { .args_ct_str = { "r", "r", "rIK" } }; + static const TCGTargetOpDef r_r_r_r + = { .args_ct_str = { "r", "r", "r", "r" } }; + static const TCGTargetOpDef r_r_l_l + = { .args_ct_str = { "r", "r", "l", "l" } }; + static const TCGTargetOpDef s_s_s_s + = { .args_ct_str = { "s", "s", "s", "s" } }; + static const TCGTargetOpDef br + = { .args_ct_str = { "r", "rIN" } }; + static const TCGTargetOpDef ext2 + = { .args_ct_str = { "r", "rZ", "rZ" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } }; + static const TCGTargetOpDef sub2 + = { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } }; + static const TCGTargetOpDef br2 + = { .args_ct_str = { "r", "r", "rI", "rI" } }; + static const TCGTargetOpDef setc2 + = { .args_ct_str = { "r", "r", "r", "rI", "rI" } }; - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "r", "r" } }, - { INDEX_op_st16_i32, { "r", "r" } }, - { INDEX_op_st_i32, { "r", "r" } }, + switch (op) { + case INDEX_op_goto_ptr: + return &r; - /* TODO: "r", "r", "ri" */ - { INDEX_op_add_i32, { "r", "r", "rIN" } }, - { INDEX_op_sub_i32, { "r", "rI", "rIN" } }, - { INDEX_op_mul_i32, { "r", "r", "r" } }, - { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, - { INDEX_op_muls2_i32, { "r", "r", "r", "r" } }, - { INDEX_op_and_i32, { "r", "r", "rIK" } }, - { INDEX_op_andc_i32, { "r", "r", "rIK" } }, - { INDEX_op_or_i32, { "r", "r", "rI" } }, - { INDEX_op_xor_i32, { "r", "r", "rI" } }, - { INDEX_op_neg_i32, { "r", "r" } }, - { INDEX_op_not_i32, { "r", "r" } }, + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_neg_i32: + case INDEX_op_not_i32: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16u_i32: + case INDEX_op_extract_i32: + case INDEX_op_sextract_i32: + return &r_r; - { INDEX_op_shl_i32, { "r", "r", "ri" } }, - { INDEX_op_shr_i32, { "r", "r", "ri" } }, - { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "ri" } }, + case INDEX_op_add_i32: + case INDEX_op_sub_i32: + case INDEX_op_setcond_i32: + return &r_r_rIN; + case INDEX_op_and_i32: + case INDEX_op_andc_i32: + case INDEX_op_clz_i32: + case INDEX_op_ctz_i32: + return &r_r_rIK; + case INDEX_op_mul_i32: + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + return &r_r_r; + case INDEX_op_mulu2_i32: + case INDEX_op_muls2_i32: + return &r_r_r_r; + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + return &r_r_rI; + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + return &r_r_ri; - { INDEX_op_brcond_i32, { "r", "rIN" } }, - { INDEX_op_setcond_i32, { "r", "r", "rIN" } }, - { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } }, + case INDEX_op_brcond_i32: + return &br; + case INDEX_op_deposit_i32: + return &dep; + case INDEX_op_extract2_i32: + return &ext2; + case INDEX_op_movcond_i32: + return &movc; + case INDEX_op_add2_i32: + return &add2; + case INDEX_op_sub2_i32: + return &sub2; + case INDEX_op_brcond2_i32: + return &br2; + case INDEX_op_setcond2_i32: + return &setc2; - { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } }, - { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } }, - { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } }, - { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } }, + case INDEX_op_qemu_ld_i32: + return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l; + case INDEX_op_qemu_ld_i64: + return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l; + case INDEX_op_qemu_st_i32: + return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s; + case INDEX_op_qemu_st_i64: + return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s; -#if TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld_i32, { "r", "l" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "l" } }, - { INDEX_op_qemu_st_i32, { "s", "s" } }, - { INDEX_op_qemu_st_i64, { "s", "s", "s" } }, -#else - { INDEX_op_qemu_ld_i32, { "r", "l", "l" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "l", "l" } }, - { INDEX_op_qemu_st_i32, { "s", "s", "s" } }, - { INDEX_op_qemu_st_i64, { "s", "s", "s", "s" } }, -#endif - - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext16u_i32, { "r", "r" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - - { INDEX_op_div_i32, { "r", "r", "r" } }, - { INDEX_op_divu_i32, { "r", "r", "r" } }, - - { -1 }, -}; + default: + return NULL; + } +} static void tcg_target_init(TCGContext *s) { /* Only probe for the platform and capabilities if we havn't already determined maximum values at compile time. */ -#ifndef __ARM_ARCH_EXT_IDIV__ +#ifndef use_idiv_instructions { unsigned long hwcap = qemu_getauxval(AT_HWCAP); - use_idiv_instructions_rt = (hwcap & HWCAP_ARM_IDIVA) != 0; + use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0; } #endif if (__ARM_ARCH < 7) { @@ -1994,21 +2214,20 @@ static void tcg_target_init(TCGContext *s) } } - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); - tcg_regset_set32(s->tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_R0) | - (1 << TCG_REG_R1) | - (1 << TCG_REG_R2) | - (1 << TCG_REG_R3) | - (1 << TCG_REG_R12) | - (1 << TCG_REG_R14)); + s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; - tcg_regset_clear(s->reserved_regs); + s->tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R12); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R14); + + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); - - tcg_add_target_add_op_defs(s, arm_op_defs); } static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, @@ -2023,10 +2242,17 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, tcg_out_st32(s, COND_AL, arg, arg1, arg2); } -static inline void tcg_out_mov(TCGContext *s, TCGType type, +static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + return false; +} + +static inline bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0)); + tcg_out_mov_reg(s, COND_AL, ret, arg); + return true; } static inline void tcg_out_movi(TCGContext *s, TCGType type, @@ -2035,6 +2261,14 @@ static inline void tcg_out_movi(TCGContext *s, TCGType type, tcg_out_movi32(s, COND_AL, ret, arg); } +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + int i; + for (i = 0; i < count; ++i) { + p[i] = INSN_NOP; + } +} + /* Compute frame size via macros, to share between tcg_target_qemu_prologue and tcg_register_jit. */ @@ -2047,33 +2281,85 @@ static inline void tcg_out_movi(TCGContext *s, TCGType type, + TCG_TARGET_STACK_ALIGN - 1) \ & -TCG_TARGET_STACK_ALIGN) +#define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE) + static void tcg_target_qemu_prologue(TCGContext *s) { - int stack_addend; - /* Calling convention requires us to save r4-r11 and lr. */ /* stmdb sp!, { r4 - r11, lr } */ tcg_out32(s, (COND_AL << 28) | 0x092d4ff0); /* Reserve callee argument and tcg temp space. */ - stack_addend = FRAME_SIZE - PUSH_SIZE; - tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK, - TCG_REG_CALL_STACK, stack_addend, 1); + TCG_REG_CALL_STACK, STACK_ADDEND, 1); tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, CPU_TEMP_BUF_NLONGS * sizeof(long)); tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]); - tb_ret_addr = s->code_ptr; - /* Epilogue. We branch here via tb_ret_addr. */ + /* + * Return path for goto_ptr. Set return value to 0, a-la exit_tb, + * and fall through to the rest of the epilogue. + */ + s->code_gen_epilogue = s->code_ptr; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0); + tcg_out_epilogue(s); +} + +static void tcg_out_epilogue(TCGContext *s) +{ + /* Release local stack frame. */ tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK, - TCG_REG_CALL_STACK, stack_addend, 1); + TCG_REG_CALL_STACK, STACK_ADDEND, 1); /* ldmia sp!, { r4 - r11, pc } */ tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0); } +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[18]; +} DebugFrame; + #define ELF_HOST_MACHINE EM_ARM + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = 0x7c, /* sleb128 -4 */ + .h.cie.return_column = 14, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, 13, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + /* The following must match the stmdb in the prologue. */ + 0x8e, 1, /* DW_CFA_offset, lr, -4 */ + 0x8b, 2, /* DW_CFA_offset, r11, -8 */ + 0x8a, 3, /* DW_CFA_offset, r10, -12 */ + 0x89, 4, /* DW_CFA_offset, r9, -16 */ + 0x88, 5, /* DW_CFA_offset, r8, -20 */ + 0x87, 6, /* DW_CFA_offset, r7, -24 */ + 0x86, 7, /* DW_CFA_offset, r6, -28 */ + 0x85, 8, /* DW_CFA_offset, r5, -32 */ + 0x84, 9, /* DW_CFA_offset, r4, -36 */ + } +}; + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/qemu/tcg/i386/tcg-target.c b/qemu/tcg/i386/tcg-target.c deleted file mode 100644 index fefe85a9..00000000 --- a/qemu/tcg/i386/tcg-target.c +++ /dev/null @@ -1,2471 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "tcg-be-ldst.h" - -#ifndef NDEBUG -static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { -#if TCG_TARGET_REG_BITS == 64 - "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", - "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", -#else - "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", -#endif -}; -#endif - -static const int tcg_target_reg_alloc_order[] = { -#if TCG_TARGET_REG_BITS == 64 - TCG_REG_RBP, - TCG_REG_RBX, - TCG_REG_R12, - TCG_REG_R13, - TCG_REG_R14, - TCG_REG_R15, - TCG_REG_R10, - TCG_REG_R11, - TCG_REG_R9, - TCG_REG_R8, - TCG_REG_RCX, - TCG_REG_RDX, - TCG_REG_RSI, - TCG_REG_RDI, - TCG_REG_RAX, -#else - TCG_REG_EBX, - TCG_REG_ESI, - TCG_REG_EDI, - TCG_REG_EBP, - TCG_REG_ECX, - TCG_REG_EDX, - TCG_REG_EAX, -#endif -}; - -static const int tcg_target_call_iarg_regs[] = { -#if TCG_TARGET_REG_BITS == 64 -#if (defined(_WIN64) || defined(__CYGWIN__)) - TCG_REG_RCX, - TCG_REG_RDX, -#else - TCG_REG_RDI, - TCG_REG_RSI, - TCG_REG_RDX, - TCG_REG_RCX, -#endif - TCG_REG_R8, - TCG_REG_R9, -#else -#ifdef _MSC_VER -#ifdef _UC_MSVC_ARRAY_DUMMY -#error "DUP DEF _UC_MSVC_ARRAY_DUMMY" -#endif -#define _UC_MSVC_ARRAY_DUMMY - /* 32 bit mode uses stack based calling convention (GCC default). - We add a dummy value here for MSVC compatibility for the error: - "error C2466: cannot allocate an array of constant size 0" - The "tcg_target_call_iarg_regs" array is not accessed when - TCG_TARGET_REG_BITS == 32 - */ - 0, -#endif -#endif -}; - -static const int tcg_target_call_oarg_regs[] = { - TCG_REG_EAX, -#if TCG_TARGET_REG_BITS == 32 - TCG_REG_EDX -#endif -}; - -/* Constants we accept. */ -#define TCG_CT_CONST_S32 0x100 -#define TCG_CT_CONST_U32 0x200 -#define TCG_CT_CONST_I32 0x400 - -/* Registers used with L constraint, which are the first argument - registers on x86_64, and two random call clobbered registers on - i386. */ -#if TCG_TARGET_REG_BITS == 64 -# define TCG_REG_L0 tcg_target_call_iarg_regs[0] -# define TCG_REG_L1 tcg_target_call_iarg_regs[1] -#else -# define TCG_REG_L0 TCG_REG_EAX -# define TCG_REG_L1 TCG_REG_EDX -#endif - -/* The host compiler should supply to enable runtime features - detection, as we're not going to go so far as our own inline assembly. - If not available, default values will be assumed. */ -#if defined(CONFIG_CPUID_H) -#ifdef _MSC_VER -#include -/* %ecx */ -#define bit_MOVBE (1 << 22) -/* %edx */ -#define bit_CMOV (1 << 15) -/* Extended Features (%eax == 7) */ -#define bit_BMI (1 << 3) -#define bit_BMI2 (1 << 8) -#else -#include -#endif -#endif - -/* For 32-bit, we are going to attempt to determine at runtime whether cmov - is available. */ -#if TCG_TARGET_REG_BITS == 64 -# define have_cmov 1 -#elif defined(CONFIG_CPUID_H) && defined(bit_CMOV) -static bool have_cmov; -#else -# define have_cmov 0 -#endif - -/* We need this symbol in tcg-target.h, and we can't properly conditionalize - it there. Therefore we always define the variable. */ -bool have_bmi1; - -#if defined(CONFIG_CPUID_H) && defined(bit_BMI2) -static bool have_bmi2; -#else -static bool have_bmi2 = 0; -#endif - -static void patch_reloc(tcg_insn_unit *code_ptr, int type, - intptr_t value, intptr_t addend) -{ - value += addend; - switch(type) { - case R_386_PC32: - value -= (uintptr_t)code_ptr; - if (value != (int32_t)value) { - tcg_abort(); - } - tcg_patch32(code_ptr, value); - break; - case R_386_PC8: - value -= (uintptr_t)code_ptr; - if (value != (int8_t)value) { - tcg_abort(); - } - tcg_patch8(code_ptr, value); - break; - default: - tcg_abort(); - } -} - -/* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) -{ - const char *ct_str; - - ct_str = *pct_str; - switch(ct_str[0]) { - case 'a': - ct->ct |= TCG_CT_REG; - tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX); - break; - case 'b': - ct->ct |= TCG_CT_REG; - tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); - break; - case 'c': - case_c: - ct->ct |= TCG_CT_REG; - tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); - break; - case 'd': - ct->ct |= TCG_CT_REG; - tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX); - break; - case 'S': - ct->ct |= TCG_CT_REG; - tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI); - break; - case 'D': - ct->ct |= TCG_CT_REG; - tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI); - break; - case 'q': - ct->ct |= TCG_CT_REG; - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(ct->u.regs, 0, 0xffff); - } else { - tcg_regset_set32(ct->u.regs, 0, 0xf); - } - break; - case 'Q': - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xf); - break; - case 'r': - case_r: - ct->ct |= TCG_CT_REG; - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(ct->u.regs, 0, 0xffff); - } else { - tcg_regset_set32(ct->u.regs, 0, 0xff); - } - break; - case 'C': - /* With SHRX et al, we need not use ECX as shift count register. */ - if (have_bmi2) { - goto case_r; - } else { - goto case_c; - } - - /* qemu_ld/st address constraint */ - case 'L': - ct->ct |= TCG_CT_REG; - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(ct->u.regs, 0, 0xffff); - } else { - tcg_regset_set32(ct->u.regs, 0, 0xff); - } - tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1); - break; - - case 'e': - ct->ct |= TCG_CT_CONST_S32; - break; - case 'Z': - ct->ct |= TCG_CT_CONST_U32; - break; - case 'I': - ct->ct |= TCG_CT_CONST_I32; - break; - - default: - return -1; - } - ct_str++; - *pct_str = ct_str; - return 0; -} - -/* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, TCGType type, - const TCGArgConstraint *arg_ct) -{ - int ct = arg_ct->ct; - if (ct & TCG_CT_CONST) { - return 1; - } - if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { - return 1; - } - if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { - return 1; - } - if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) { - return 1; - } - return 0; -} - -#if TCG_TARGET_REG_BITS == 64 -# define LOWREGMASK(x) ((x) & 7) -#else -# define LOWREGMASK(x) (x) -#endif - -#define P_EXT 0x100 /* 0x0f opcode prefix */ -#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */ -#define P_DATA16 0x400 /* 0x66 opcode prefix */ -#if TCG_TARGET_REG_BITS == 64 -# define P_ADDR32 0x800 /* 0x67 opcode prefix */ -# define P_REXW 0x1000 /* Set REX.W = 1 */ -# define P_REXB_R 0x2000 /* REG field as byte register */ -# define P_REXB_RM 0x4000 /* R/M field as byte register */ -# define P_GS 0x8000 /* gs segment override */ -#else -# define P_ADDR32 0 -# define P_REXW 0 -# define P_REXB_R 0 -# define P_REXB_RM 0 -# define P_GS 0 -#endif -#define P_SIMDF3 0x10000 /* 0xf3 opcode prefix */ -#define P_SIMDF2 0x20000 /* 0xf2 opcode prefix */ - -#define OPC_ARITH_EvIz (0x81) -#define OPC_ARITH_EvIb (0x83) -#define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ -#define OPC_ANDN (0xf2 | P_EXT38) -#define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3)) -#define OPC_BSWAP (0xc8 | P_EXT) -#define OPC_CALL_Jz (0xe8) -#define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */ -#define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3)) -#define OPC_DEC_r32 (0x48) -#define OPC_IMUL_GvEv (0xaf | P_EXT) -#define OPC_IMUL_GvEvIb (0x6b) -#define OPC_IMUL_GvEvIz (0x69) -#define OPC_INC_r32 (0x40) -#define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */ -#define OPC_JCC_short (0x70) /* ... plus condition code */ -#define OPC_JMP_long (0xe9) -#define OPC_JMP_short (0xeb) -#define OPC_LEA (0x8d) -#define OPC_MOVB_EvGv (0x88) /* stores, more or less */ -#define OPC_MOVL_EvGv (0x89) /* stores, more or less */ -#define OPC_MOVL_GvEv (0x8b) /* loads, more or less */ -#define OPC_MOVB_EvIz (0xc6) -#define OPC_MOVL_EvIz (0xc7) -#define OPC_MOVL_Iv (0xb8) -#define OPC_MOVBE_GyMy (0xf0 | P_EXT38) -#define OPC_MOVBE_MyGy (0xf1 | P_EXT38) -#define OPC_MOVSBL (0xbe | P_EXT) -#define OPC_MOVSWL (0xbf | P_EXT) -#define OPC_MOVSLQ (0x63 | P_REXW) -#define OPC_MOVZBL (0xb6 | P_EXT) -#define OPC_MOVZWL (0xb7 | P_EXT) -#define OPC_POP_r32 (0x58) -#define OPC_PUSH_r32 (0x50) -#define OPC_PUSH_Iv (0x68) -#define OPC_PUSH_Ib (0x6a) -#define OPC_RET (0xc3) -#define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */ -#define OPC_SHIFT_1 (0xd1) -#define OPC_SHIFT_Ib (0xc1) -#define OPC_SHIFT_cl (0xd3) -#define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3) -#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16) -#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2) -#define OPC_TESTL (0x85) -#define OPC_XCHG_ax_r32 (0x90) - -#define OPC_GRP3_Ev (0xf7) -#define OPC_GRP5 (0xff) - -/* Group 1 opcode extensions for 0x80-0x83. - These are also used as modifiers for OPC_ARITH. */ -#define ARITH_ADD 0 -#define ARITH_OR 1 -#define ARITH_ADC 2 -#define ARITH_SBB 3 -#define ARITH_AND 4 -#define ARITH_SUB 5 -#define ARITH_XOR 6 -#define ARITH_CMP 7 - -/* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */ -#define SHIFT_ROL 0 -#define SHIFT_ROR 1 -#define SHIFT_SHL 4 -#define SHIFT_SHR 5 -#define SHIFT_SAR 7 - -/* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */ -#define EXT3_NOT 2 -#define EXT3_NEG 3 -#define EXT3_MUL 4 -#define EXT3_IMUL 5 -#define EXT3_DIV 6 -#define EXT3_IDIV 7 - -/* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */ -#define EXT5_INC_Ev 0 -#define EXT5_DEC_Ev 1 -#define EXT5_CALLN_Ev 2 -#define EXT5_JMPN_Ev 4 - -/* Condition codes to be added to OPC_JCC_{long,short}. */ -#define JCC_JMP (-1) -#define JCC_JO 0x0 -#define JCC_JNO 0x1 -#define JCC_JB 0x2 -#define JCC_JAE 0x3 -#define JCC_JE 0x4 -#define JCC_JNE 0x5 -#define JCC_JBE 0x6 -#define JCC_JA 0x7 -#define JCC_JS 0x8 -#define JCC_JNS 0x9 -#define JCC_JP 0xa -#define JCC_JNP 0xb -#define JCC_JL 0xc -#define JCC_JGE 0xd -#define JCC_JLE 0xe -#define JCC_JG 0xf - -static const uint8_t tcg_cond_to_jcc[] = { -#ifdef _MSC_VER - 0, // TCG_COND_NEVER - 0, // TCG_COND_ALWAYS - JCC_JL, // TCG_COND_LT - JCC_JGE, // TCG_COND_GE - JCC_JB, // TCG_COND_LTU - JCC_JAE, // TCG_COND_GEU - 0, // n/a - 0, // n/a - JCC_JE, // TCG_COND_EQ - JCC_JNE, // TCG_COND_NE - JCC_JLE, // TCG_COND_LE - JCC_JG, // TCG_COND_GT - JCC_JBE, // TCG_COND_LEU - JCC_JA, // TCG_COND_GTU - 0, // n/a - 0, // n/a -#else - [TCG_COND_EQ] = JCC_JE, - [TCG_COND_NE] = JCC_JNE, - [TCG_COND_LT] = JCC_JL, - [TCG_COND_GE] = JCC_JGE, - [TCG_COND_LE] = JCC_JLE, - [TCG_COND_GT] = JCC_JG, - [TCG_COND_LTU] = JCC_JB, - [TCG_COND_GEU] = JCC_JAE, - [TCG_COND_LEU] = JCC_JBE, - [TCG_COND_GTU] = JCC_JA, -#endif -}; - -#if TCG_TARGET_REG_BITS == 64 -static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) -{ - int rex; - - if (opc & P_GS) { - tcg_out8(s, 0x65); - } - if (opc & P_DATA16) { - /* We should never be asking for both 16 and 64-bit operation. */ - assert((opc & P_REXW) == 0); - tcg_out8(s, 0x66); - } - if (opc & P_ADDR32) { - tcg_out8(s, 0x67); - } - - rex = 0; - rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */ - rex |= (r & 8) >> 1; /* REX.R */ - rex |= (x & 8) >> 2; /* REX.X */ - rex |= (rm & 8) >> 3; /* REX.B */ - - /* P_REXB_{R,RM} indicates that the given register is the low byte. - For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do, - as otherwise the encoding indicates %[abcd]h. Note that the values - that are ORed in merely indicate that the REX byte must be present; - those bits get discarded in output. */ - rex |= opc & (r >= 4 ? P_REXB_R : 0); - rex |= opc & (rm >= 4 ? P_REXB_RM : 0); - - if (rex) { - tcg_out8(s, (uint8_t)(rex | 0x40)); - } - - if (opc & (P_EXT | P_EXT38)) { - tcg_out8(s, 0x0f); - if (opc & P_EXT38) { - tcg_out8(s, 0x38); - } - } - - tcg_out8(s, opc); -} -#else -static void tcg_out_opc(TCGContext *s, int opc) -{ - if (opc & P_DATA16) { - tcg_out8(s, 0x66); - } - if (opc & (P_EXT | P_EXT38)) { - tcg_out8(s, 0x0f); - if (opc & P_EXT38) { - tcg_out8(s, 0x38); - } - } - tcg_out8(s, opc); -} -/* Discard the register arguments to tcg_out_opc early, so as not to penalize - the 32-bit compilation paths. This method works with all versions of gcc, - whereas relying on optimization may not be able to exclude them. */ -#define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc) -#endif - -static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) -{ - tcg_out_opc(s, opc, r, rm, 0); - tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); -} - -static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm) -{ - int tmp; - - if ((opc & (P_REXW | P_EXT | P_EXT38)) || (rm & 8)) { - /* Three byte VEX prefix. */ - tcg_out8(s, 0xc4); - - /* VEX.m-mmmm */ - if (opc & P_EXT38) { - tmp = 2; - } else if (opc & P_EXT) { - tmp = 1; - } else { - tcg_abort(); - } - tmp |= 0x40; /* VEX.X */ - tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */ - tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */ - tcg_out8(s, tmp); - - tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */ - } else { - /* Two byte VEX prefix. */ - tcg_out8(s, 0xc5); - - tmp = (r & 8 ? 0 : 0x80); /* VEX.R */ - } - /* VEX.pp */ - if (opc & P_DATA16) { - tmp |= 1; /* 0x66 */ - } else if (opc & P_SIMDF3) { - tmp |= 2; /* 0xf3 */ - } else if (opc & P_SIMDF2) { - tmp |= 3; /* 0xf2 */ - } - tmp |= (~v & 15) << 3; /* VEX.vvvv */ - tcg_out8(s, tmp); - tcg_out8(s, opc); - tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); -} - -/* Output an opcode with a full "rm + (index<code_ptr + 5 + ~rm; - intptr_t disp = offset - pc; - if (disp == (int32_t)disp) { - tcg_out_opc(s, opc, r, 0, 0); - tcg_out8(s, (LOWREGMASK(r) << 3) | 5); - tcg_out32(s, disp); - return; - } - - /* Try for an absolute address encoding. This requires the - use of the MODRM+SIB encoding and is therefore larger than - rip-relative addressing. */ - if (offset == (int32_t)offset) { - tcg_out_opc(s, opc, r, 0, 0); - tcg_out8(s, (LOWREGMASK(r) << 3) | 4); - tcg_out8(s, (4 << 3) | 5); - tcg_out32(s, offset); - return; - } - - /* ??? The memory isn't directly addressable. */ - tcg_abort(); - } else { - /* Absolute address. */ - tcg_out_opc(s, opc, r, 0, 0); - tcg_out8(s, (r << 3) | 5); - tcg_out32(s, offset); - return; - } - } - - /* Find the length of the immediate addend. Note that the encoding - that would be used for (%ebp) indicates absolute addressing. */ - if (rm < 0) { - mod = 0, len = 4, rm = 5; - } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) { - mod = 0, len = 0; - } else if (offset == (int8_t)offset) { - mod = 0x40, len = 1; - } else { - mod = 0x80, len = 4; - } - - /* Use a single byte MODRM format if possible. Note that the encoding - that would be used for %esp is the escape to the two byte form. */ - if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) { - /* Single byte MODRM format. */ - tcg_out_opc(s, opc, r, rm, 0); - tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); - } else { - /* Two byte MODRM+SIB format. */ - - /* Note that the encoding that would place %esp into the index - field indicates no index register. In 64-bit mode, the REX.X - bit counts, so %r12 can be used as the index. */ - if (index < 0) { - index = 4; - } else { - assert(index != TCG_REG_ESP); - } - - tcg_out_opc(s, opc, r, rm, index); - tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4); - tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm)); - } - - if (len == 1) { - tcg_out8(s, offset); - } else if (len == 4) { - tcg_out32(s, offset); - } -} - -/* A simplification of the above with no index or shift. */ -static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, - int rm, intptr_t offset) -{ - tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset); -} - -/* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */ -static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src) -{ - /* Propagate an opcode prefix, such as P_REXW. */ - int ext = subop & ~0x7; - subop &= 0x7; - - tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src); -} - -static inline void tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) -{ - if (arg != ret) { - int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0); - tcg_out_modrm(s, opc, ret, arg); - } -} - -static void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg ret, tcg_target_long arg) -{ - tcg_target_long diff; - - if (arg == 0) { - tgen_arithr(s, ARITH_XOR, ret, ret); - return; - } - if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { - tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0); - tcg_out32(s, arg); - return; - } - if (arg == (int32_t)arg) { - tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret); - tcg_out32(s, arg); - return; - } - - /* Try a 7 byte pc-relative lea before the 10 byte movq. */ - diff = arg - ((uintptr_t)s->code_ptr + 7); - if (diff == (int32_t)diff) { - tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0); - tcg_out8(s, (LOWREGMASK(ret) << 3) | 5); - tcg_out32(s, diff); - return; - } - - tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0); - tcg_out64(s, arg); -} - -static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) -{ - if (val == (int8_t)val) { - tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0); - tcg_out8(s, val); - } else if (val == (int32_t)val) { - tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0); - tcg_out32(s, val); - } else { - tcg_abort(); - } -} - -static inline void tcg_out_push(TCGContext *s, int reg) -{ - tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0); -} - -static inline void tcg_out_pop(TCGContext *s, int reg) -{ - tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0); -} - -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, - TCGReg arg1, intptr_t arg2) -{ - int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0); - tcg_out_modrm_offset(s, opc, ret, arg1, arg2); -} - -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0); - tcg_out_modrm_offset(s, opc, arg, arg1, arg2); -} - -static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base, - tcg_target_long ofs, tcg_target_long val) -{ - int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0); - tcg_out_modrm_offset(s, opc, 0, base, ofs); - tcg_out32(s, val); -} - -static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count) -{ - /* Propagate an opcode prefix, such as P_DATA16. */ - int ext = subopc & ~0x7; - subopc &= 0x7; - - if (count == 1) { - tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg); - } else { - tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg); - tcg_out8(s, count); - } -} - -static inline void tcg_out_bswap32(TCGContext *s, int reg) -{ - tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0); -} - -static inline void tcg_out_rolw_8(TCGContext *s, int reg) -{ - tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8); -} - -static inline void tcg_out_ext8u(TCGContext *s, int dest, int src) -{ - /* movzbl */ - assert(src < 4 || TCG_TARGET_REG_BITS == 64); - tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); -} - -static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw) -{ - /* movsbl */ - assert(src < 4 || TCG_TARGET_REG_BITS == 64); - tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src); -} - -static inline void tcg_out_ext16u(TCGContext *s, int dest, int src) -{ - /* movzwl */ - tcg_out_modrm(s, OPC_MOVZWL, dest, src); -} - -static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw) -{ - /* movsw[lq] */ - tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src); -} - -static inline void tcg_out_ext32u(TCGContext *s, int dest, int src) -{ - /* 32-bit mov zero extends. */ - tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src); -} - -static inline void tcg_out_ext32s(TCGContext *s, int dest, int src) -{ - tcg_out_modrm(s, OPC_MOVSLQ, dest, src); -} - -static inline void tcg_out_bswap64(TCGContext *s, int reg) -{ - tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0); -} - -static void tgen_arithi(TCGContext *s, int c, int r0, - tcg_target_long val, int cf) -{ - int rexw = 0; - - if (TCG_TARGET_REG_BITS == 64) { - rexw = c & -8; - c &= 7; - } - - /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce - partial flags update stalls on Pentium4 and are not recommended - by current Intel optimization manuals. */ - if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { - int is_inc = (c == ARITH_ADD) ^ (val < 0); - if (TCG_TARGET_REG_BITS == 64) { - /* The single-byte increment encodings are re-tasked as the - REX prefixes. Use the MODRM encoding. */ - tcg_out_modrm(s, OPC_GRP5 + rexw, - (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); - } else { - tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); - } - return; - } - - if (c == ARITH_AND) { - if (TCG_TARGET_REG_BITS == 64) { - if (val == 0xffffffffu) { - tcg_out_ext32u(s, r0, r0); - return; - } - if (val == (uint32_t)val) { - /* AND with no high bits set can use a 32-bit operation. */ - rexw = 0; - } - } - if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { - tcg_out_ext8u(s, r0, r0); - return; - } - if (val == 0xffffu) { - tcg_out_ext16u(s, r0, r0); - return; - } - } - - if (val == (int8_t)val) { - tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0); - tcg_out8(s, val); - return; - } - if (rexw == 0 || val == (int32_t)val) { - tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0); - tcg_out32(s, val); - return; - } - - tcg_abort(); -} - -static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) -{ - if (val != 0) { - tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0); - } -} - -/* Use SMALL != 0 to force a short forward branch. */ -static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int smallflag) -{ - int32_t val, val1; - TCGLabel *l = &s->labels[label_index]; - - if (l->has_value) { - val = tcg_pcrel_diff(s, l->u.value_ptr); - val1 = val - 2; - if ((int8_t)val1 == val1) { - if (opc == -1) { - tcg_out8(s, OPC_JMP_short); - } else { - tcg_out8(s, OPC_JCC_short + opc); - } - tcg_out8(s, val1); - } else { - if (smallflag) { - tcg_abort(); - } - if (opc == -1) { - tcg_out8(s, OPC_JMP_long); - tcg_out32(s, val - 5); - } else { - tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); - tcg_out32(s, val - 6); - } - } - } else if (smallflag) { - if (opc == -1) { - tcg_out8(s, OPC_JMP_short); - } else { - tcg_out8(s, OPC_JCC_short + opc); - } - tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1); - s->code_ptr += 1; - } else { - if (opc == -1) { - tcg_out8(s, OPC_JMP_long); - } else { - tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); - } - tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4); - s->code_ptr += 4; - } -} - -static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2, - int const_arg2, int rexw) -{ - if (const_arg2) { - if (arg2 == 0) { - /* test r, r */ - tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1); - } else { - tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0); - } - } else { - tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2); - } -} - -static void tcg_out_brcond32(TCGContext *s, TCGCond cond, - TCGArg arg1, TCGArg arg2, int const_arg2, - int label_index, int smallflag) -{ - tcg_out_cmp(s, arg1, arg2, const_arg2, 0); - tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, smallflag); -} - -#if TCG_TARGET_REG_BITS == 64 -static void tcg_out_brcond64(TCGContext *s, TCGCond cond, - TCGArg arg1, TCGArg arg2, int const_arg2, - int label_index, int smallflag) -{ - tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); - tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, smallflag); -} -#else -/* XXX: we implement it at the target level to avoid having to - handle cross basic blocks temporaries */ -static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, - const int *const_args, int smallflag) -{ - int label_next; - label_next = gen_new_label(s); - switch(args[4]) { - case TCG_COND_EQ: - tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], - label_next, 1); - tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3], - args[5], smallflag); - break; - case TCG_COND_NE: - tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], - args[5], smallflag); - tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3], - args[5], smallflag); - break; - case TCG_COND_LT: - tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - case TCG_COND_LE: - tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - case TCG_COND_GT: - tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - case TCG_COND_GE: - tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - case TCG_COND_LTU: - tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - case TCG_COND_LEU: - tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - case TCG_COND_GTU: - tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - case TCG_COND_GEU: - tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], - args[5], smallflag); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], - args[5], smallflag); - break; - default: - tcg_abort(); - } - tcg_out_label(s, label_next, s->code_ptr); -} -#endif - -static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest, - TCGArg arg1, TCGArg arg2, int const_arg2) -{ - tcg_out_cmp(s, arg1, arg2, const_arg2, 0); - tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); - tcg_out_ext8u(s, dest, dest); -} - -#if TCG_TARGET_REG_BITS == 64 -static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest, - TCGArg arg1, TCGArg arg2, int const_arg2) -{ - tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); - tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); - tcg_out_ext8u(s, dest, dest); -} -#else -static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, - const int *const_args) -{ - TCGArg new_args[6]; - int label_true, label_over; - - memcpy(new_args, args+1, 5*sizeof(TCGArg)); - - if (args[0] == args[1] || args[0] == args[2] - || (!const_args[3] && args[0] == args[3]) - || (!const_args[4] && args[0] == args[4])) { - /* When the destination overlaps with one of the argument - registers, don't do anything tricky. */ - label_true = gen_new_label(s); - label_over = gen_new_label(s); - - new_args[5] = label_true; - tcg_out_brcond2(s, new_args, const_args+1, 1); - - tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); - tcg_out_jxx(s, JCC_JMP, label_over, 1); - tcg_out_label(s, label_true, s->code_ptr); - - tcg_out_movi(s, TCG_TYPE_I32, args[0], 1); - tcg_out_label(s, label_over, s->code_ptr); - } else { - /* When the destination does not overlap one of the arguments, - clear the destination first, jump if cond false, and emit an - increment in the true case. This results in smaller code. */ - - tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); - - label_over = gen_new_label(s); - new_args[4] = tcg_invert_cond(new_args[4]); - new_args[5] = label_over; - tcg_out_brcond2(s, new_args, const_args+1, 1); - - tgen_arithi(s, ARITH_ADD, args[0], 1, 0); - tcg_out_label(s, label_over, s->code_ptr); - } -} -#endif - -static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest, - TCGArg c1, TCGArg c2, int const_c2, - TCGArg v1) -{ - tcg_out_cmp(s, c1, c2, const_c2, 0); - if (have_cmov) { - tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1); - } else { - int over = gen_new_label(s); - tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1); - tcg_out_mov(s, TCG_TYPE_I32, dest, v1); - tcg_out_label(s, over, s->code_ptr); - } -} - -#if TCG_TARGET_REG_BITS == 64 -static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest, - TCGArg c1, TCGArg c2, int const_c2, - TCGArg v1) -{ - tcg_out_cmp(s, c1, c2, const_c2, P_REXW); - tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1); -} -#endif - -static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest) -{ - intptr_t disp = tcg_pcrel_diff(s, dest) - 5; - - if (disp == (int32_t)disp) { - tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0); - tcg_out32(s, disp); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (uintptr_t)dest); - tcg_out_modrm(s, OPC_GRP5, - call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10); - } -} - -static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) -{ - tcg_out_branch(s, 1, dest); -} - -static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest) -{ - tcg_out_branch(s, 0, dest); -} - -#if defined(CONFIG_SOFTMMU) -/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * int mmu_idx, uintptr_t ra) - */ -static void * const qemu_ld_helpers[16] = { -#ifdef _MSC_VER - helper_ret_ldub_mmu, // MO_UB -# ifdef HOST_WORDS_BIGENDIAN - helper_be_lduw_mmu, // MO_BEUW - helper_be_ldul_mmu, // MO_BEUL - helper_be_ldq_mmu, // MO_BEQ - 0, // MO_SB - 0, // MO_BESW - 0, // MO_BESL - 0, // n/a - 0, // n/a - helper_le_lduw_mmu, // MO_LEUW - helper_le_ldul_mmu, // MO_LEUL - helper_le_ldq_mmu, // MO_LEQ - 0, // n/a - 0, // MO_LESW - 0, // MO_LESL - 0, // n/a -# else // !HOST_WORDS_BIGENDIAN - helper_le_lduw_mmu, // MO_LEUW - helper_le_ldul_mmu, // MO_LEUL - helper_le_ldq_mmu, // MO_LEQ - 0, // MO_SB - 0, // MO_LESW - 0, // MO_LESL - 0, // n/a - 0, // n/a - helper_be_lduw_mmu, // MO_BEUW - helper_be_ldul_mmu, // MO_BEUL - helper_be_ldq_mmu, // MO_BEQ - 0, // n/a - 0, // MO_BESW - 0, // MO_BESL - 0, // n/a -# endif // HOST_WORDS_BIGENDIAN - -#else //_MSC_VER - [MO_UB] = helper_ret_ldub_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, -#endif // _MSC_VER -}; - -/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, int mmu_idx, uintptr_t ra) - */ -static void * const qemu_st_helpers[16] = { -#ifdef _MSC_VER - helper_ret_stb_mmu, // MO_UB -# ifdef HOST_WORDS_BIGENDIAN - helper_be_stw_mmu, // MO_BEUW - helper_be_stl_mmu, // MO_BEUL - helper_be_stq_mmu, // MO_BEQ - 0, // MO_SB - 0, // MO_BESW - 0, // MO_BESL - 0, // n/a - 0, // n/a - helper_le_stw_mmu, // MO_LEUW - helper_le_stl_mmu, // MO_LEUL - helper_le_stq_mmu, // MO_LEQ - 0, // n/a - 0, // MO_LESW - 0, // MO_LESL - 0, // n/a -# else // !HOST_WORDS_BIGENDIAN - helper_le_stw_mmu, // MO_LEUW - helper_le_stl_mmu, // MO_LEUL - helper_le_stq_mmu, // MO_LEQ - 0, // MO_SB - 0, // MO_LESW - 0, // MO_LESL - 0, // n/a - 0, // n/a - helper_be_stw_mmu, // MO_BEUW - helper_be_stl_mmu, // MO_BEUL - helper_be_stq_mmu, // MO_BEQ - 0, // n/a - 0, // MO_BESW - 0, // MO_BESL - 0, // n/a -# endif // HOST_WORDS_BIGENDIAN - -#else //_MSC_VER - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, -#endif // _MSC_VER -}; - -/* Perform the TLB load and compare. - - Inputs: - ADDRLO and ADDRHI contain the low and high part of the address. - - MEM_INDEX and S_BITS are the memory context and log2 size of the load. - - WHICH is the offset into the CPUTLBEntry structure of the slot to read. - This should be offsetof addr_read or addr_write. - - Outputs: - LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses) - positions of the displacements of forward jumps to the TLB miss case. - - Second argument register is loaded with the low part of the address. - In the TLB hit case, it has been adjusted as indicated by the TLB - and so is a host address. In the TLB miss case, it continues to - hold a guest address. - - First argument register is clobbered. */ - -static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - int mem_index, TCGMemOp s_bits, - tcg_insn_unit **label_ptr, int which) -{ - const TCGReg r0 = TCG_REG_L0; - const TCGReg r1 = TCG_REG_L1; - TCGType ttype = TCG_TYPE_I32; - TCGType htype = TCG_TYPE_I32; - int trexw = 0, hrexw = 0; - - if (TCG_TARGET_REG_BITS == 64) { - if (TARGET_LONG_BITS == 64) { - ttype = TCG_TYPE_I64; - trexw = P_REXW; - } - if (TCG_TYPE_PTR == TCG_TYPE_I64) { - htype = TCG_TYPE_I64; - hrexw = P_REXW; - } - } - - tcg_out_mov(s, htype, r0, addrlo); - tcg_out_mov(s, ttype, r1, addrlo); - - tcg_out_shifti(s, SHIFT_SHR + hrexw, r0, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - - tgen_arithi(s, ARITH_AND + trexw, r1, - TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0); - tgen_arithi(s, ARITH_AND + hrexw, r0, - (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); - - tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0, - offsetof(CPUArchState, tlb_table[mem_index][0]) - + which); - - /* cmp 0(r0), r1 */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0); - - /* Prepare for both the fast path add of the tlb addend, and the slow - path function argument setup. There are two cases worth note: - For 32-bit guest and x86_64 host, MOVL zero-extends the guest address - before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ - copies the entire guest address for the slow path, while truncation - for the 32-bit host happens with the fastpath ADDL below. */ - tcg_out_mov(s, ttype, r1, addrlo); - - // Unicorn: fast path if hookmem is not enable - if (!HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ) && !HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); - else - tcg_out_opc(s, OPC_JMP_long, 0, 0, 0); /* slow_path */ - label_ptr[0] = s->code_ptr; - s->code_ptr += 4; - - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - /* cmp 4(r0), addrhi */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4); - - /* jne slow_path */ - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); - label_ptr[1] = s->code_ptr; - s->code_ptr += 4; - } - - /* TLB Hit. */ - - /* add addend(r0), r1 */ - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, - offsetof(CPUTLBEntry, addend) - which); -} - -/* - * Record the context of a call to the out of line helper code for the slow path - * for a load or store, so that we can later generate the correct helper code - */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - int mem_index, tcg_insn_unit *raddr, - tcg_insn_unit **label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->opc = opc; - label->datalo_reg = datalo; - label->datahi_reg = datahi; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - label->mem_index = mem_index; - label->raddr = raddr; - label->label_ptr[0] = label_ptr[0]; - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - label->label_ptr[1] = label_ptr[1]; - } -} - -/* - * Generate code for the slow path for a load at the end of block - */ -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - TCGMemOp opc = l->opc; - TCGReg data_reg; - tcg_insn_unit **label_ptr = &l->label_ptr[0]; - - /* resolve label address */ - tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); - } - - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; - - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); - ofs += 4; - - tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr); - } else { - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], - l->mem_index); - tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], - (uintptr_t)l->raddr); - } - - tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); // qq - - data_reg = l->datalo_reg; - switch (opc & MO_SSIZE) { - case MO_SB: - tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); - break; - case MO_SW: - tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); - break; -#if TCG_TARGET_REG_BITS == 64 - case MO_SL: - tcg_out_ext32s(s, data_reg, TCG_REG_EAX); - break; -#endif - case MO_UB: - case MO_UW: - /* Note that the helpers have zero-extended to tcg_target_long. */ - case MO_UL: - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - break; - case MO_Q: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); - } else if (data_reg == TCG_REG_EDX) { - /* xchg %edx, %eax */ - tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); - } else { - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); - } - break; - default: - tcg_abort(); - } - - /* Jump to the code corresponding to next IR of qemu_st */ - tcg_out_jmp(s, l->raddr); -} - -/* - * Generate code for the slow path for a store at the end of block - */ -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - TCGMemOp opc = l->opc; - TCGMemOp s_bits = opc & MO_SIZE; - tcg_insn_unit **label_ptr = &l->label_ptr[0]; - TCGReg retaddr; - - /* resolve label address */ - tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); - } - - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; - - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (s_bits == MO_64) { - tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); - ofs += 4; - - retaddr = TCG_REG_EAX; - tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr); - tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs); - } else { - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), - tcg_target_call_iarg_regs[2], l->datalo_reg); - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], - l->mem_index); - - if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { - retaddr = tcg_target_call_iarg_regs[4]; - tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - } else { - retaddr = TCG_REG_RAX; - tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, - TCG_TARGET_CALL_STACK_OFFSET); - } - } - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_push(s, retaddr); - tcg_out_jmp(s, qemu_st_helpers[opc]); -} -#elif defined(__x86_64__) && defined(__linux__) -# include -# include - -int arch_prctl(int code, unsigned long addr); - -static inline void setup_guest_base_seg(TCGContext *s) -{ - if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) { - s->guest_base_flags = P_GS; - } -} -#else -static inline void setup_guest_base_seg(TCGContext *s) { } -#endif /* SOFTMMU */ - -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg base, intptr_t ofs, int seg, - TCGMemOp memop) -{ - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; - int movop = OPC_MOVL_GvEv; - - if (s->have_movbe && real_bswap) { - bswap = 0; - movop = OPC_MOVBE_GyMy; - } - - switch (memop & MO_SSIZE) { - case MO_UB: - tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs); - break; - case MO_SB: - tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs); - break; - case MO_UW: - tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); - if (real_bswap) { - tcg_out_rolw_8(s, datalo); - } - break; - case MO_SW: - if (real_bswap) { - if (s->have_movbe) { - tcg_out_modrm_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, - datalo, base, ofs); - } else { - tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); - tcg_out_rolw_8(s, datalo); - } - tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo); - } else { - tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg, - datalo, base, ofs); - } - break; - case MO_UL: - tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); - if (bswap) { - tcg_out_bswap32(s, datalo); - } - break; -#if TCG_TARGET_REG_BITS == 64 - case MO_SL: - if (real_bswap) { - tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); - if (bswap) { - tcg_out_bswap32(s, datalo); - } - tcg_out_ext32s(s, datalo, datalo); - } else { - tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs); - } - break; -#endif - case MO_Q: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs); - if (bswap) { - tcg_out_bswap64(s, datalo); - } - } else { - if (real_bswap) { - int t = datalo; - datalo = datahi; - datahi = t; - } - if (base != datalo) { - tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); - tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4); - } else { - tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4); - tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); - } - if (bswap) { - tcg_out_bswap32(s, datalo); - tcg_out_bswap32(s, datahi); - } - } - break; - default: - tcg_abort(); - } -} - -/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and - EAX. It will be useful once fixed registers globals are less - common. */ -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) -{ - TCGReg datalo, datahi, addrlo; - TCGReg addrhi QEMU_UNUSED_VAR; - TCGMemOp opc; -#if defined(CONFIG_SOFTMMU) - int mem_index; - TCGMemOp s_bits; - tcg_insn_unit *label_ptr[2]; -#endif - - datalo = *args++; - datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); - opc = *args++; - -#if defined(CONFIG_SOFTMMU) - mem_index = *args++; - s_bits = opc & MO_SIZE; - - tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, - label_ptr, offsetof(CPUTLBEntry, addr_read)); - - /* TLB Hit. */ - tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); - - /* Record the current context of a load into ldst label */ - add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, - mem_index, s->code_ptr, label_ptr); -#else - { - int32_t offset = GUEST_BASE; - TCGReg base = addrlo; - int seg = 0; - - /* ??? We assume all operations have left us with register contents - that are zero extended. So far this appears to be true. If we - want to enforce this, we can either do an explicit zero-extension - here, or (if GUEST_BASE == 0, or a segment register is in use) - use the ADDR32 prefix. For now, do nothing. */ - if (GUEST_BASE && s->guest_base_flags) { - seg = s->guest_base_flags; - offset = 0; - } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE); - tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base); - base = TCG_REG_L1; - offset = 0; - } - - tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc); - } -#endif -} - -static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg base, intptr_t ofs, int seg, - TCGMemOp memop) -{ - /* ??? Ideally we wouldn't need a scratch register. For user-only, - we could perform the bswap twice to restore the original value - instead of moving to the scratch. But as it is, the L constraint - means that TCG_REG_L0 is definitely free here. */ - const TCGReg scratch = TCG_REG_L0; - const TCGMemOp real_bswap = memop & MO_BSWAP; - TCGMemOp bswap = real_bswap; - int movop = OPC_MOVL_EvGv; - - if (s->have_movbe && real_bswap) { - bswap = 0; - movop = OPC_MOVBE_MyGy; - } - - switch (memop & MO_SIZE) { - case MO_8: - /* In 32-bit mode, 8-bit stores can only happen from [abcd]x. - Use the scratch register if necessary. */ - if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) { - tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); - datalo = scratch; - } - tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, - datalo, base, ofs); - break; - case MO_16: - if (bswap) { - tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); - tcg_out_rolw_8(s, scratch); - datalo = scratch; - } - tcg_out_modrm_offset(s, movop + P_DATA16 + seg, datalo, base, ofs); - break; - case MO_32: - if (bswap) { - tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); - tcg_out_bswap32(s, scratch); - datalo = scratch; - } - tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); - break; - case MO_64: - if (TCG_TARGET_REG_BITS == 64) { - if (bswap) { - tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo); - tcg_out_bswap64(s, scratch); - datalo = scratch; - } - tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs); - } else if (bswap) { - tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi); - tcg_out_bswap32(s, scratch); - tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs); - tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); - tcg_out_bswap32(s, scratch); - tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4); - } else { - if (real_bswap) { - int t = datalo; - datalo = datahi; - datahi = t; - } - tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); - tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs+4); - } - break; - default: - tcg_abort(); - } -} - -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) -{ - TCGReg datalo, datahi, addrlo; - TCGReg addrhi QEMU_UNUSED_VAR; - TCGMemOp opc; -#if defined(CONFIG_SOFTMMU) - int mem_index; - TCGMemOp s_bits; - tcg_insn_unit *label_ptr[2]; -#endif - - datalo = *args++; - datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); - addrlo = *args++; - addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); - opc = *args++; - -#if defined(CONFIG_SOFTMMU) - mem_index = *args++; - s_bits = opc & MO_SIZE; - - tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, - label_ptr, offsetof(CPUTLBEntry, addr_write)); - - /* TLB Hit. */ - tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); - - /* Record the current context of a store into ldst label */ - add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, - mem_index, s->code_ptr, label_ptr); -#else - { - int32_t offset = GUEST_BASE; - TCGReg base = addrlo; - int seg = 0; - - /* ??? We assume all operations have left us with register contents - that are zero extended. So far this appears to be true. If we - want to enforce this, we can either do an explicit zero-extension - here, or (if GUEST_BASE == 0, or a segment register is in use) - use the ADDR32 prefix. For now, do nothing. */ - if (GUEST_BASE && s->guest_base_flags) { - seg = s->guest_base_flags; - offset = 0; - } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE); - tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base); - base = TCG_REG_L1; - offset = 0; - } - - tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc); - } -#endif -} - -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg *args, const int *const_args) -{ - int c, vexop, rexw = 0; - -#if TCG_TARGET_REG_BITS == 64 -# define OP_32_64(x) \ - case glue(glue(INDEX_op_, x), _i64): \ - rexw = P_REXW; /* FALLTHRU */ \ - case glue(glue(INDEX_op_, x), _i32) -#else -# define OP_32_64(x) \ - case glue(glue(INDEX_op_, x), _i32) -#endif - - switch(opc) { - case INDEX_op_exit_tb: - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]); - tcg_out_jmp(s, s->tb_ret_addr); - break; - case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { - /* direct jump method */ - tcg_out8(s, OPC_JMP_long); /* jmp im */ - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); - tcg_out32(s, 0); - } else { - /* indirect jump method */ - tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1, - (intptr_t)(s->tb_next + args[0])); - } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); - break; - case INDEX_op_br: - tcg_out_jxx(s, JCC_JMP, args[0], 0); - break; - OP_32_64(ld8u): - /* Note that we can ignore REXW for the zero-extend to 64-bit. */ - tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]); - break; - OP_32_64(ld8s): - tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]); - break; - OP_32_64(ld16u): - /* Note that we can ignore REXW for the zero-extend to 64-bit. */ - tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]); - break; - OP_32_64(ld16s): - tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]); - break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_ld32u_i64: -#endif - case INDEX_op_ld_i32: - tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); - break; - - OP_32_64(st8): - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVB_EvIz, - 0, args[1], args[2]); - tcg_out8(s, args[0]); - } else { - tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, - args[0], args[1], args[2]); - } - break; - OP_32_64(st16): - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, - 0, args[1], args[2]); - tcg_out16(s, args[0]); - } else { - tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, - args[0], args[1], args[2]); - } - break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_st32_i64: -#endif - case INDEX_op_st_i32: - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]); - tcg_out32(s, args[0]); - } else { - tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); - } - break; - - OP_32_64(add): - /* For 3-operand addition, use LEA. */ - if (args[0] != args[1]) { - TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0; - - if (const_args[2]) { - c3 = a2, a2 = -1; - } else if (a0 == a2) { - /* Watch out for dest = src + dest, since we've removed - the matching constraint on the add. */ - tgen_arithr(s, ARITH_ADD + rexw, a0, a1); - break; - } - - tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3); - break; - } - c = ARITH_ADD; - goto gen_arith; - OP_32_64(sub): - c = ARITH_SUB; - goto gen_arith; - OP_32_64(and): - c = ARITH_AND; - goto gen_arith; - OP_32_64(or): - c = ARITH_OR; - goto gen_arith; - OP_32_64(xor): - c = ARITH_XOR; - goto gen_arith; - gen_arith: - if (const_args[2]) { - tgen_arithi(s, c + rexw, args[0], args[2], 0); - } else { - tgen_arithr(s, c + rexw, args[0], args[2]); - } - break; - - OP_32_64(andc): - if (const_args[2]) { - tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, - args[0], args[1]); - tgen_arithi(s, ARITH_AND + rexw, args[0], ~args[2], 0); - } else { - tcg_out_vex_modrm(s, OPC_ANDN + rexw, args[0], args[2], args[1]); - } - break; - - OP_32_64(mul): - if (const_args[2]) { - int32_t val; - val = args[2]; - if (val == (int8_t)val) { - tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]); - tcg_out8(s, val); - } else { - tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]); - tcg_out32(s, val); - } - } else { - tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]); - } - break; - - OP_32_64(div2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]); - break; - OP_32_64(divu2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]); - break; - - OP_32_64(shl): - c = SHIFT_SHL; - vexop = OPC_SHLX; - goto gen_shift_maybe_vex; - OP_32_64(shr): - c = SHIFT_SHR; - vexop = OPC_SHRX; - goto gen_shift_maybe_vex; - OP_32_64(sar): - c = SHIFT_SAR; - vexop = OPC_SARX; - goto gen_shift_maybe_vex; - OP_32_64(rotl): - c = SHIFT_ROL; - goto gen_shift; - OP_32_64(rotr): - c = SHIFT_ROR; - goto gen_shift; - gen_shift_maybe_vex: - if (have_bmi2 && !const_args[2]) { - tcg_out_vex_modrm(s, vexop + rexw, args[0], args[2], args[1]); - break; - } - /* FALLTHRU */ - gen_shift: - if (const_args[2]) { - tcg_out_shifti(s, c + rexw, args[0], args[2]); - } else { - tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]); - } - break; - - case INDEX_op_brcond_i32: - tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1], - args[3], 0); - break; - case INDEX_op_setcond_i32: - tcg_out_setcond32(s, args[3], args[0], args[1], - args[2], const_args[2]); - break; - case INDEX_op_movcond_i32: - tcg_out_movcond32(s, args[5], args[0], args[1], - args[2], const_args[2], args[3]); - break; - - OP_32_64(bswap16): - tcg_out_rolw_8(s, args[0]); - break; - OP_32_64(bswap32): - tcg_out_bswap32(s, args[0]); - break; - - OP_32_64(neg): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]); - break; - OP_32_64(not): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]); - break; - - OP_32_64(ext8s): - tcg_out_ext8s(s, args[0], args[1], rexw); - break; - OP_32_64(ext16s): - tcg_out_ext16s(s, args[0], args[1], rexw); - break; - OP_32_64(ext8u): - tcg_out_ext8u(s, args[0], args[1]); - break; - OP_32_64(ext16u): - tcg_out_ext16u(s, args[0], args[1]); - break; - - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, 0); - break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, 1); - break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args, 0); - break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, 1); - break; - - OP_32_64(mulu2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]); - break; - OP_32_64(muls2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]); - break; - OP_32_64(add2): - if (const_args[4]) { - tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1); - } else { - tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]); - } - if (const_args[5]) { - tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1); - } else { - tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]); - } - break; - OP_32_64(sub2): - if (const_args[4]) { - tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1); - } else { - tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]); - } - if (const_args[5]) { - tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1); - } else { - tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]); - } - break; - -#if TCG_TARGET_REG_BITS == 32 - case INDEX_op_brcond2_i32: - tcg_out_brcond2(s, args, const_args, 0); - break; - case INDEX_op_setcond2_i32: - tcg_out_setcond2(s, args, const_args); - break; -#else /* TCG_TARGET_REG_BITS == 64 */ - case INDEX_op_ld32s_i64: - tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]); - break; - case INDEX_op_ld_i64: - tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); - break; - case INDEX_op_st_i64: - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, - 0, args[1], args[2]); - tcg_out32(s, args[0]); - } else { - tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); - } - break; - - case INDEX_op_brcond_i64: - tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1], - args[3], 0); - break; - case INDEX_op_setcond_i64: - tcg_out_setcond64(s, args[3], args[0], args[1], - args[2], const_args[2]); - break; - case INDEX_op_movcond_i64: - tcg_out_movcond64(s, args[5], args[0], args[1], - args[2], const_args[2], args[3]); - break; - - case INDEX_op_bswap64_i64: - tcg_out_bswap64(s, args[0]); - break; - case INDEX_op_ext32u_i64: - tcg_out_ext32u(s, args[0], args[1]); - break; - case INDEX_op_ext32s_i64: - tcg_out_ext32s(s, args[0], args[1]); - break; -#endif - - OP_32_64(deposit): - if (args[3] == 0 && args[4] == 8) { - /* load bits 0..7 */ - tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, - args[2], args[0]); - } else if (args[3] == 8 && args[4] == 8) { - /* load bits 8..15 */ - tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4); - } else if (args[3] == 0 && args[4] == 16) { - /* load bits 0..15 */ - tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]); - } else { - tcg_abort(); - } - break; - - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ - case INDEX_op_movi_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - default: - tcg_abort(); - } - -#undef OP_32_64 -} - -static const TCGTargetOpDef x86_op_defs[] = { - { INDEX_op_exit_tb, { NULL } }, - { INDEX_op_goto_tb, { NULL } }, - { INDEX_op_br, { NULL } }, - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "qi", "r" } }, - { INDEX_op_st16_i32, { "ri", "r" } }, - { INDEX_op_st_i32, { "ri", "r" } }, - - { INDEX_op_add_i32, { "r", "r", "ri" } }, - { INDEX_op_sub_i32, { "r", "0", "ri" } }, - { INDEX_op_mul_i32, { "r", "0", "ri" } }, - { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } }, - { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } }, - { INDEX_op_and_i32, { "r", "0", "ri" } }, - { INDEX_op_or_i32, { "r", "0", "ri" } }, - { INDEX_op_xor_i32, { "r", "0", "ri" } }, - { INDEX_op_andc_i32, { "r", "r", "ri" } }, - - { INDEX_op_shl_i32, { "r", "0", "Ci" } }, - { INDEX_op_shr_i32, { "r", "0", "Ci" } }, - { INDEX_op_sar_i32, { "r", "0", "Ci" } }, - { INDEX_op_rotl_i32, { "r", "0", "ci" } }, - { INDEX_op_rotr_i32, { "r", "0", "ci" } }, - - { INDEX_op_brcond_i32, { "r", "ri" } }, - - { INDEX_op_bswap16_i32, { "r", "0" } }, - { INDEX_op_bswap32_i32, { "r", "0" } }, - - { INDEX_op_neg_i32, { "r", "0" } }, - - { INDEX_op_not_i32, { "r", "0" } }, - - { INDEX_op_ext8s_i32, { "r", "q" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext8u_i32, { "r", "q" } }, - { INDEX_op_ext16u_i32, { "r", "r" } }, - - { INDEX_op_setcond_i32, { "q", "r", "ri" } }, - - { INDEX_op_deposit_i32, { "Q", "0", "Q" } }, - { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } }, - - { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } }, - { INDEX_op_muls2_i32, { "a", "d", "a", "r" } }, - { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } }, - { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } }, - -#if TCG_TARGET_REG_BITS == 32 - { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, - { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, -#else - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - { INDEX_op_st8_i64, { "ri", "r" } }, - { INDEX_op_st16_i64, { "ri", "r" } }, - { INDEX_op_st32_i64, { "ri", "r" } }, - { INDEX_op_st_i64, { "re", "r" } }, - - { INDEX_op_add_i64, { "r", "r", "re" } }, - { INDEX_op_mul_i64, { "r", "0", "re" } }, - { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } }, - { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } }, - { INDEX_op_sub_i64, { "r", "0", "re" } }, - { INDEX_op_and_i64, { "r", "0", "reZ" } }, - { INDEX_op_or_i64, { "r", "0", "re" } }, - { INDEX_op_xor_i64, { "r", "0", "re" } }, - { INDEX_op_andc_i64, { "r", "r", "rI" } }, - - { INDEX_op_shl_i64, { "r", "0", "Ci" } }, - { INDEX_op_shr_i64, { "r", "0", "Ci" } }, - { INDEX_op_sar_i64, { "r", "0", "Ci" } }, - { INDEX_op_rotl_i64, { "r", "0", "ci" } }, - { INDEX_op_rotr_i64, { "r", "0", "ci" } }, - - { INDEX_op_brcond_i64, { "r", "re" } }, - { INDEX_op_setcond_i64, { "r", "r", "re" } }, - - { INDEX_op_bswap16_i64, { "r", "0" } }, - { INDEX_op_bswap32_i64, { "r", "0" } }, - { INDEX_op_bswap64_i64, { "r", "0" } }, - { INDEX_op_neg_i64, { "r", "0" } }, - { INDEX_op_not_i64, { "r", "0" } }, - - { INDEX_op_ext8s_i64, { "r", "r" } }, - { INDEX_op_ext16s_i64, { "r", "r" } }, - { INDEX_op_ext32s_i64, { "r", "r" } }, - { INDEX_op_ext8u_i64, { "r", "r" } }, - { INDEX_op_ext16u_i64, { "r", "r" } }, - { INDEX_op_ext32u_i64, { "r", "r" } }, - - { INDEX_op_deposit_i64, { "Q", "0", "Q" } }, - { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } }, - - { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } }, - { INDEX_op_muls2_i64, { "a", "d", "a", "r" } }, - { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } }, - { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } }, -#endif - -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "L", "L" } }, - { INDEX_op_qemu_ld_i64, { "r", "L" } }, - { INDEX_op_qemu_st_i64, { "L", "L" } }, -#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "L", "L" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "L" } }, - { INDEX_op_qemu_st_i64, { "L", "L", "L" } }, -#else - { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, - { INDEX_op_qemu_st_i32, { "L", "L", "L" } }, - { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } }, - { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } }, -#endif - { -1 }, -}; - -static int tcg_target_callee_save_regs[] = { -#if TCG_TARGET_REG_BITS == 64 - TCG_REG_RBP, - TCG_REG_RBX, -#if (defined(_WIN64) || defined(__CYGWIN__)) - TCG_REG_RDI, - TCG_REG_RSI, -#endif - TCG_REG_R12, - TCG_REG_R13, - TCG_REG_R14, /* Currently used for the global env. */ - TCG_REG_R15, -#else - TCG_REG_EBP, /* Currently used for the global env. */ - TCG_REG_EBX, - TCG_REG_ESI, - TCG_REG_EDI, -#endif -}; - -/* Compute frame size via macros, to share between tcg_target_qemu_prologue - and tcg_register_jit. */ - -#define PUSH_SIZE \ - ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \ - * (TCG_TARGET_REG_BITS / 8)) - -#define FRAME_SIZE \ - ((PUSH_SIZE \ - + TCG_STATIC_CALL_ARGS_SIZE \ - + CPU_TEMP_BUF_NLONGS * sizeof(long) \ - + TCG_TARGET_STACK_ALIGN - 1) \ - & ~(TCG_TARGET_STACK_ALIGN - 1)) - -/* Generate global QEMU prologue and epilogue code */ -static void tcg_target_qemu_prologue(TCGContext *s) -{ - int i, stack_addend; - - /* TB prologue */ - - /* Reserve some stack space, also for TCG temps. */ - stack_addend = FRAME_SIZE - PUSH_SIZE; - tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, - CPU_TEMP_BUF_NLONGS * sizeof(long)); - - /* Save all callee saved registers. */ - for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { - tcg_out_push(s, tcg_target_callee_save_regs[i]); - } - -#if TCG_TARGET_REG_BITS == 32 - tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, - (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4); - tcg_out_addi(s, TCG_REG_ESP, -stack_addend); - /* jmp *tb. */ - tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP, - (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 - + stack_addend); -#else - tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); - tcg_out_addi(s, TCG_REG_ESP, -stack_addend); - /* jmp *tb. */ - tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]); -#endif - - /* TB epilogue */ - s->tb_ret_addr = s->code_ptr; - - tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend); - - for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { - tcg_out_pop(s, tcg_target_callee_save_regs[i]); - } - tcg_out_opc(s, OPC_RET, 0, 0, 0); - -#if !defined(CONFIG_SOFTMMU) - /* Try to set up a segment register to point to GUEST_BASE. */ - if (GUEST_BASE) { - setup_guest_base_seg(s); - } -#endif -} - -static void tcg_target_init(TCGContext *s) -{ -#ifdef CONFIG_CPUID_H - unsigned a, b, c, d; - int max; - -#ifdef _MSC_VER - int cpu_info[4]; - __cpuid(cpu_info, 0); - max = cpu_info[0]; -#else - max = __get_cpuid_max(0, 0); -#endif - - if (max >= 1) { -#ifdef _MSC_VER - __cpuid(cpu_info, 1); - a = cpu_info[0]; - b = cpu_info[1]; - c = cpu_info[2]; - d = cpu_info[3]; -#else - __cpuid(1, a, b, c, d); -#endif -#ifndef have_cmov - /* For 32-bit, 99% certainty that we're running on hardware that - supports cmov, but we still need to check. In case cmov is not - available, we'll use a small forward branch. */ - have_cmov = (d & bit_CMOV) != 0; -#endif -#ifndef have_movbe - /* MOVBE is only available on Intel Atom and Haswell CPUs, so we - need to probe for it. */ - s->have_movbe = (c & bit_MOVBE) != 0; -#endif - } - - if (max >= 7) { - /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */ -#ifdef _MSC_VER - __cpuidex(cpu_info, 7, 0); -#else - __cpuid_count(7, 0, a, b, c, d); -#endif -#ifdef bit_BMI - have_bmi1 = (b & bit_BMI) != 0; -#endif -#ifndef have_bmi2 - have_bmi2 = (b & bit_BMI2) != 0; -#endif - } -#endif - - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); - } else { - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); - } - - tcg_regset_clear(s->tcg_target_call_clobber_regs); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EAX); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EDX); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_ECX); - if (TCG_TARGET_REG_BITS == 64) { -#if !(defined(_WIN64) || defined(__CYGWIN__)) - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RDI); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RSI); -#endif - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); - } - - tcg_regset_clear(s->reserved_regs); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); - - tcg_add_target_add_op_defs(s, x86_op_defs); -} diff --git a/qemu/tcg/i386/tcg-target.h b/qemu/tcg/i386/tcg-target.h index fdea43b7..24ba5d19 100644 --- a/qemu/tcg/i386/tcg-target.h +++ b/qemu/tcg/i386/tcg-target.h @@ -21,17 +21,19 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef TCG_TARGET_I386 -#define TCG_TARGET_I386 1 + +#ifndef I386_TCG_TARGET_H +#define I386_TCG_TARGET_H #define TCG_TARGET_INSN_UNIT_SIZE 1 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 31 #ifdef __x86_64__ # define TCG_TARGET_REG_BITS 64 -# define TCG_TARGET_NB_REGS 16 +# define TCG_TARGET_NB_REGS 32 #else # define TCG_TARGET_REG_BITS 32 -# define TCG_TARGET_NB_REGS 8 +# define TCG_TARGET_NB_REGS 24 #endif typedef enum { @@ -54,6 +56,26 @@ typedef enum { TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, + + TCG_REG_XMM0, + TCG_REG_XMM1, + TCG_REG_XMM2, + TCG_REG_XMM3, + TCG_REG_XMM4, + TCG_REG_XMM5, + TCG_REG_XMM6, + TCG_REG_XMM7, + + /* 64-bit registers; likewise always define. */ + TCG_REG_XMM8, + TCG_REG_XMM9, + TCG_REG_XMM10, + TCG_REG_XMM11, + TCG_REG_XMM12, + TCG_REG_XMM13, + TCG_REG_XMM14, + TCG_REG_XMM15, + TCG_REG_RAX = TCG_REG_EAX, TCG_REG_RCX = TCG_REG_ECX, TCG_REG_RDX = TCG_REG_EDX, @@ -62,18 +84,23 @@ typedef enum { TCG_REG_RBP = TCG_REG_EBP, TCG_REG_RSI = TCG_REG_ESI, TCG_REG_RDI = TCG_REG_EDI, + + TCG_AREG0 = TCG_REG_EBP, + TCG_REG_CALL_STACK = TCG_REG_ESP } TCGReg; /* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_ESP #define TCG_TARGET_STACK_ALIGN 16 -#if defined(_WIN64) || (defined(__CYGWIN__) && defined(__x86_64__)) +#if defined(_WIN64) #define TCG_TARGET_CALL_STACK_OFFSET 32 #else #define TCG_TARGET_CALL_STACK_OFFSET 0 #endif extern bool have_bmi1; +extern bool have_popcnt; +extern bool have_avx1; +extern bool have_avx2; /* optional instructions */ #define TCG_TARGET_HAS_div2_i32 1 @@ -91,7 +118,13 @@ extern bool have_bmi1; #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_clz_i32 1 +#define TCG_TARGET_HAS_ctz_i32 1 +#define TCG_TARGET_HAS_ctpop_i32 have_popcnt #define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_extract_i32 1 +#define TCG_TARGET_HAS_sextract_i32 1 +#define TCG_TARGET_HAS_extract2_i32 1 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 @@ -99,9 +132,13 @@ extern bool have_bmi1; #define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 -#define TCG_TARGET_HAS_trunc_shr_i32 0 +/* Keep target addresses zero-extended in a register. */ +#define TCG_TARGET_HAS_extrl_i64_i32 (TARGET_LONG_BITS == 32) +#define TCG_TARGET_HAS_extrh_i64_i32 (TARGET_LONG_BITS == 32) #define TCG_TARGET_HAS_div2_i64 1 #define TCG_TARGET_HAS_rot_i64 1 #define TCG_TARGET_HAS_ext8s_i64 1 @@ -120,7 +157,13 @@ extern bool have_bmi1; #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_clz_i64 1 +#define TCG_TARGET_HAS_ctz_i64 1 +#define TCG_TARGET_HAS_ctpop_i64 have_popcnt #define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_extract_i64 1 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 1 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 @@ -130,19 +173,63 @@ extern bool have_bmi1; #define TCG_TARGET_HAS_mulsh_i64 0 #endif +/* We do not support older SSE systems, only beginning with AVX1. */ +#define TCG_TARGET_HAS_v64 have_avx1 +#define TCG_TARGET_HAS_v128 have_avx1 +#define TCG_TARGET_HAS_v256 have_avx2 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 1 +#define TCG_TARGET_HAS_shv_vec have_avx2 +#define TCG_TARGET_HAS_cmp_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec -1 + #define TCG_TARGET_deposit_i32_valid(ofs, len) \ (((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \ ((ofs) == 0 && (len) == 16)) #define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid -#if TCG_TARGET_REG_BITS == 64 -# define TCG_AREG0 TCG_REG_R14 -#else -# define TCG_AREG0 TCG_REG_EBP -#endif +/* Check for the possibility of high-byte extraction and, for 64-bit, + zero-extending 32-bit right-shift. */ +#define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8) +#define TCG_TARGET_extract_i64_valid(ofs, len) \ + (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32) static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { } +static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, + uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + *(int32_t *)jmp_addr = addr - (jmp_addr + 4); + /* no need to flush icache explicitly */ +} + +/* This defines the natural memory order supported by this + * architecture before guarantees made by various barrier + * instructions. + * + * The x86 has a pretty strong memory ordering which only really + * allows for some stores to be re-ordered after loads. + */ +#include "tcg/tcg-mo.h" + +#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) + +#define TCG_TARGET_HAS_MEMORY_BSWAP 1 + +#define TCG_TARGET_NEED_LDST_LABELS +#define TCG_TARGET_NEED_POOL_LABELS + #endif diff --git a/qemu/tcg/i386/tcg-target.inc.c b/qemu/tcg/i386/tcg-target.inc.c new file mode 100644 index 00000000..260de96c --- /dev/null +++ b/qemu/tcg/i386/tcg-target.inc.c @@ -0,0 +1,3918 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "../tcg-pool.inc.c" + +#ifdef CONFIG_DEBUG_TCG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { +#if TCG_TARGET_REG_BITS == 64 + "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", +#else + "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", +#endif + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", + "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", +#if TCG_TARGET_REG_BITS == 64 + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", +#endif +}; +#endif + +static const int tcg_target_reg_alloc_order[] = { +#if TCG_TARGET_REG_BITS == 64 + TCG_REG_RBP, + TCG_REG_RBX, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R9, + TCG_REG_R8, + TCG_REG_RCX, + TCG_REG_RDX, + TCG_REG_RSI, + TCG_REG_RDI, + TCG_REG_RAX, +#else + TCG_REG_EBX, + TCG_REG_ESI, + TCG_REG_EDI, + TCG_REG_EBP, + TCG_REG_ECX, + TCG_REG_EDX, + TCG_REG_EAX, +#endif + TCG_REG_XMM0, + TCG_REG_XMM1, + TCG_REG_XMM2, + TCG_REG_XMM3, + TCG_REG_XMM4, + TCG_REG_XMM5, +#ifndef _WIN64 + /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save + any of them. Therefore only allow xmm0-xmm5 to be allocated. */ + TCG_REG_XMM6, + TCG_REG_XMM7, +#if TCG_TARGET_REG_BITS == 64 + TCG_REG_XMM8, + TCG_REG_XMM9, + TCG_REG_XMM10, + TCG_REG_XMM11, + TCG_REG_XMM12, + TCG_REG_XMM13, + TCG_REG_XMM14, + TCG_REG_XMM15, +#endif +#endif +}; + +static const int tcg_target_call_iarg_regs[] = { +#if TCG_TARGET_REG_BITS == 64 +#if defined(_WIN64) + TCG_REG_RCX, + TCG_REG_RDX, +#else + TCG_REG_RDI, + TCG_REG_RSI, + TCG_REG_RDX, + TCG_REG_RCX, +#endif + TCG_REG_R8, + TCG_REG_R9, +#else + /* 32 bit mode uses stack based calling convention (GCC default). */ +#ifdef _MSC_VER + 0, // MSVC needs dummy value to avoid empty array +#endif +#endif +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_EAX, +#if TCG_TARGET_REG_BITS == 32 + TCG_REG_EDX +#endif +}; + +/* Constants we accept. */ +#define TCG_CT_CONST_S32 0x100 +#define TCG_CT_CONST_U32 0x200 +#define TCG_CT_CONST_I32 0x400 +#define TCG_CT_CONST_WSZ 0x800 + +/* Registers used with L constraint, which are the first argument + registers on x86_64, and two random call clobbered registers on + i386. */ +#if TCG_TARGET_REG_BITS == 64 +# define TCG_REG_L0 tcg_target_call_iarg_regs[0] +# define TCG_REG_L1 tcg_target_call_iarg_regs[1] +#else +# define TCG_REG_L0 TCG_REG_EAX +# define TCG_REG_L1 TCG_REG_EDX +#endif + +/* The host compiler should supply to enable runtime features + detection, as we're not going to go so far as our own inline assembly. + If not available, default values will be assumed. */ +#if defined(CONFIG_CPUID_H) +#include "qemu/cpuid.h" +#endif + +/* For 64-bit, we always know that CMOV is available. */ +#if TCG_TARGET_REG_BITS == 64 +# define have_cmov 1 +#elif defined(CONFIG_CPUID_H) +static bool have_cmov; +#else +# define have_cmov 0 +#endif + +/* We need these symbols in tcg-target.h, and we can't properly conditionalize + it there. Therefore we always define the variable. */ +bool have_bmi1; +bool have_popcnt; +bool have_avx1; +bool have_avx2; + +#ifdef CONFIG_CPUID_H +static bool have_movbe; +static bool have_bmi2; +static bool have_lzcnt; +#else +# define have_movbe 0 +# define have_bmi2 0 +# define have_lzcnt 0 +#endif + +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + value += addend; + switch(type) { + case R_386_PC32: + value -= (uintptr_t)code_ptr; + if (value != (int32_t)value) { + return false; + } + /* FALLTHRU */ + case R_386_32: + tcg_patch32(code_ptr, value); + break; + case R_386_PC8: + value -= (uintptr_t)code_ptr; + if (value != (int8_t)value) { + return false; + } + tcg_patch8(code_ptr, value); + break; + default: + tcg_abort(); + } + return true; +} + +#if TCG_TARGET_REG_BITS == 64 +#define ALL_GENERAL_REGS 0x0000ffffu +#define ALL_VECTOR_REGS 0xffff0000u +#else +#define ALL_GENERAL_REGS 0x000000ffu +#define ALL_VECTOR_REGS 0x00ff0000u +#endif + +/* parse target specific constraints */ +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) +{ + switch(*ct_str++) { + case 'a': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX); + break; + case 'b': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); + break; + case 'c': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); + break; + case 'd': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX); + break; + case 'S': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI); + break; + case 'D': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI); + break; + case 'q': + /* A register that can be used as a byte operand. */ + ct->ct |= TCG_CT_REG; + ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf; + break; + case 'Q': + /* A register with an addressable second byte (e.g. %ah). */ + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xf; + break; + case 'r': + /* A general register. */ + ct->ct |= TCG_CT_REG; + ct->u.regs |= ALL_GENERAL_REGS; + break; + case 'W': + /* With TZCNT/LZCNT, we can have operand-size as an input. */ + ct->ct |= TCG_CT_CONST_WSZ; + break; + case 'x': + /* A vector register. */ + ct->ct |= TCG_CT_REG; + ct->u.regs |= ALL_VECTOR_REGS; + break; + + /* qemu_ld/st address constraint */ + case 'L': + ct->ct |= TCG_CT_REG; + ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff; + tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1); + break; + + case 'e': + ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_S32); + break; + case 'Z': + ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_U32); + break; + case 'I': + ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_I32); + break; + + default: + return NULL; + } + return ct_str; +} + +/* test if a constant matches the constraint */ +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + if (ct & TCG_CT_CONST) { + return 1; + } + if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { + return 1; + } + if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { + return 1; + } + if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) { + return 1; + } + if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { + return 1; + } + return 0; +} + +# define LOWREGMASK(x) ((x) & 7) + +#define P_EXT 0x100 /* 0x0f opcode prefix */ +#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */ +#define P_DATA16 0x400 /* 0x66 opcode prefix */ +#if TCG_TARGET_REG_BITS == 64 +# define P_REXW 0x1000 /* Set REX.W = 1 */ +# define P_REXB_R 0x2000 /* REG field as byte register */ +# define P_REXB_RM 0x4000 /* R/M field as byte register */ +# define P_GS 0x8000 /* gs segment override */ +#else +# define P_REXW 0 +# define P_REXB_R 0 +# define P_REXB_RM 0 +# define P_GS 0 +#endif +#define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */ +#define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */ +#define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */ +#define P_VEXL 0x80000 /* Set VEX.L = 1 */ + +#define OPC_ARITH_EvIz (0x81) +#define OPC_ARITH_EvIb (0x83) +#define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ +#define OPC_ANDN (0xf2 | P_EXT38) +#define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3)) +#define OPC_AND_GvEv (OPC_ARITH_GvEv | (ARITH_AND << 3)) +#define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16) +#define OPC_BSF (0xbc | P_EXT) +#define OPC_BSR (0xbd | P_EXT) +#define OPC_BSWAP (0xc8 | P_EXT) +#define OPC_CALL_Jz (0xe8) +#define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */ +#define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3)) +#define OPC_DEC_r32 (0x48) +#define OPC_IMUL_GvEv (0xaf | P_EXT) +#define OPC_IMUL_GvEvIb (0x6b) +#define OPC_IMUL_GvEvIz (0x69) +#define OPC_INC_r32 (0x40) +#define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */ +#define OPC_JCC_short (0x70) /* ... plus condition code */ +#define OPC_JMP_long (0xe9) +#define OPC_JMP_short (0xeb) +#define OPC_LEA (0x8d) +#define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3) +#define OPC_MOVB_EvGv (0x88) /* stores, more or less */ +#define OPC_MOVL_EvGv (0x89) /* stores, more or less */ +#define OPC_MOVL_GvEv (0x8b) /* loads, more or less */ +#define OPC_MOVB_EvIz (0xc6) +#define OPC_MOVL_EvIz (0xc7) +#define OPC_MOVL_Iv (0xb8) +#define OPC_MOVBE_GyMy (0xf0 | P_EXT38) +#define OPC_MOVBE_MyGy (0xf1 | P_EXT38) +#define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16) +#define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16) +#define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2) +#define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16) +#define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16) +#define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3) +#define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3) +#define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3) +#define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16) +#define OPC_MOVSBL (0xbe | P_EXT) +#define OPC_MOVSWL (0xbf | P_EXT) +#define OPC_MOVSLQ (0x63 | P_REXW) +#define OPC_MOVZBL (0xb6 | P_EXT) +#define OPC_MOVZWL (0xb7 | P_EXT) +#define OPC_PABSB (0x1c | P_EXT38 | P_DATA16) +#define OPC_PABSW (0x1d | P_EXT38 | P_DATA16) +#define OPC_PABSD (0x1e | P_EXT38 | P_DATA16) +#define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16) +#define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16) +#define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16) +#define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16) +#define OPC_PADDB (0xfc | P_EXT | P_DATA16) +#define OPC_PADDW (0xfd | P_EXT | P_DATA16) +#define OPC_PADDD (0xfe | P_EXT | P_DATA16) +#define OPC_PADDQ (0xd4 | P_EXT | P_DATA16) +#define OPC_PADDSB (0xec | P_EXT | P_DATA16) +#define OPC_PADDSW (0xed | P_EXT | P_DATA16) +#define OPC_PADDUB (0xdc | P_EXT | P_DATA16) +#define OPC_PADDUW (0xdd | P_EXT | P_DATA16) +#define OPC_PAND (0xdb | P_EXT | P_DATA16) +#define OPC_PANDN (0xdf | P_EXT | P_DATA16) +#define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16) +#define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16) +#define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16) +#define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16) +#define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16) +#define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16) +#define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16) +#define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16) +#define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16) +#define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16) +#define OPC_PMAXSW (0xee | P_EXT | P_DATA16) +#define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16) +#define OPC_PMAXUB (0xde | P_EXT | P_DATA16) +#define OPC_PMAXUW (0x3e | P_EXT38 | P_DATA16) +#define OPC_PMAXUD (0x3f | P_EXT38 | P_DATA16) +#define OPC_PMINSB (0x38 | P_EXT38 | P_DATA16) +#define OPC_PMINSW (0xea | P_EXT | P_DATA16) +#define OPC_PMINSD (0x39 | P_EXT38 | P_DATA16) +#define OPC_PMINUB (0xda | P_EXT | P_DATA16) +#define OPC_PMINUW (0x3a | P_EXT38 | P_DATA16) +#define OPC_PMINUD (0x3b | P_EXT38 | P_DATA16) +#define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16) +#define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16) +#define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16) +#define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16) +#define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16) +#define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16) +#define OPC_PMULLW (0xd5 | P_EXT | P_DATA16) +#define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16) +#define OPC_POR (0xeb | P_EXT | P_DATA16) +#define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16) +#define OPC_PSHUFD (0x70 | P_EXT | P_DATA16) +#define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2) +#define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3) +#define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */ +#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */ +#define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */ +#define OPC_PSLLW (0xf1 | P_EXT | P_DATA16) +#define OPC_PSLLD (0xf2 | P_EXT | P_DATA16) +#define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16) +#define OPC_PSRAW (0xe1 | P_EXT | P_DATA16) +#define OPC_PSRAD (0xe2 | P_EXT | P_DATA16) +#define OPC_PSRLW (0xd1 | P_EXT | P_DATA16) +#define OPC_PSRLD (0xd2 | P_EXT | P_DATA16) +#define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16) +#define OPC_PSUBB (0xf8 | P_EXT | P_DATA16) +#define OPC_PSUBW (0xf9 | P_EXT | P_DATA16) +#define OPC_PSUBD (0xfa | P_EXT | P_DATA16) +#define OPC_PSUBQ (0xfb | P_EXT | P_DATA16) +#define OPC_PSUBSB (0xe8 | P_EXT | P_DATA16) +#define OPC_PSUBSW (0xe9 | P_EXT | P_DATA16) +#define OPC_PSUBUB (0xd8 | P_EXT | P_DATA16) +#define OPC_PSUBUW (0xd9 | P_EXT | P_DATA16) +#define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16) +#define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16) +#define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16) +#define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16) +#define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16) +#define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16) +#define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16) +#define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16) +#define OPC_PXOR (0xef | P_EXT | P_DATA16) +#define OPC_POP_r32 (0x58) +#define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3) +#define OPC_PUSH_r32 (0x50) +#define OPC_PUSH_Iv (0x68) +#define OPC_PUSH_Ib (0x6a) +#define OPC_RET (0xc3) +#define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */ +#define OPC_SHIFT_1 (0xd1) +#define OPC_SHIFT_Ib (0xc1) +#define OPC_SHIFT_cl (0xd3) +#define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3) +#define OPC_SHUFPS (0xc6 | P_EXT) +#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16) +#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2) +#define OPC_SHRD_Ib (0xac | P_EXT) +#define OPC_TESTL (0x85) +#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3) +#define OPC_UD2 (0x0b | P_EXT) +#define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16) +#define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16) +#define OPC_VPINSRB (0x20 | P_EXT3A | P_DATA16) +#define OPC_VPINSRW (0xc4 | P_EXT | P_DATA16) +#define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16) +#define OPC_VBROADCASTSD (0x19 | P_EXT38 | P_DATA16) +#define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16) +#define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16) +#define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16) +#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16) +#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW) +#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL) +#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16) +#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW) +#define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16) +#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16) +#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW) +#define OPC_VZEROUPPER (0x77 | P_EXT) +#define OPC_XCHG_ax_r32 (0x90) + +#define OPC_GRP3_Ev (0xf7) +#define OPC_GRP5 (0xff) +#define OPC_GRP14 (0x73 | P_EXT | P_DATA16) + +/* Group 1 opcode extensions for 0x80-0x83. + These are also used as modifiers for OPC_ARITH. */ +#define ARITH_ADD 0 +#define ARITH_OR 1 +#define ARITH_ADC 2 +#define ARITH_SBB 3 +#define ARITH_AND 4 +#define ARITH_SUB 5 +#define ARITH_XOR 6 +#define ARITH_CMP 7 + +/* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */ +#define SHIFT_ROL 0 +#define SHIFT_ROR 1 +#define SHIFT_SHL 4 +#define SHIFT_SHR 5 +#define SHIFT_SAR 7 + +/* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */ +#define EXT3_NOT 2 +#define EXT3_NEG 3 +#define EXT3_MUL 4 +#define EXT3_IMUL 5 +#define EXT3_DIV 6 +#define EXT3_IDIV 7 + +/* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */ +#define EXT5_INC_Ev 0 +#define EXT5_DEC_Ev 1 +#define EXT5_CALLN_Ev 2 +#define EXT5_JMPN_Ev 4 + +/* Condition codes to be added to OPC_JCC_{long,short}. */ +#define JCC_JMP (-1) +#define JCC_JO 0x0 +#define JCC_JNO 0x1 +#define JCC_JB 0x2 +#define JCC_JAE 0x3 +#define JCC_JE 0x4 +#define JCC_JNE 0x5 +#define JCC_JBE 0x6 +#define JCC_JA 0x7 +#define JCC_JS 0x8 +#define JCC_JNS 0x9 +#define JCC_JP 0xa +#define JCC_JNP 0xb +#define JCC_JL 0xc +#define JCC_JGE 0xd +#define JCC_JLE 0xe +#define JCC_JG 0xf + +static const uint8_t tcg_cond_to_jcc[] = { + [TCG_COND_EQ] = JCC_JE, + [TCG_COND_NE] = JCC_JNE, + [TCG_COND_LT] = JCC_JL, + [TCG_COND_GE] = JCC_JGE, + [TCG_COND_LE] = JCC_JLE, + [TCG_COND_GT] = JCC_JG, + [TCG_COND_LTU] = JCC_JB, + [TCG_COND_GEU] = JCC_JAE, + [TCG_COND_LEU] = JCC_JBE, + [TCG_COND_GTU] = JCC_JA, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) +{ + int rex; + + if (opc & P_GS) { + tcg_out8(s, 0x65); + } + if (opc & P_DATA16) { + /* We should never be asking for both 16 and 64-bit operation. */ + tcg_debug_assert((opc & P_REXW) == 0); + tcg_out8(s, 0x66); + } + if (opc & P_SIMDF3) { + tcg_out8(s, 0xf3); + } else if (opc & P_SIMDF2) { + tcg_out8(s, 0xf2); + } + + rex = 0; + rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */ + rex |= (r & 8) >> 1; /* REX.R */ + rex |= (x & 8) >> 2; /* REX.X */ + rex |= (rm & 8) >> 3; /* REX.B */ + + /* P_REXB_{R,RM} indicates that the given register is the low byte. + For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do, + as otherwise the encoding indicates %[abcd]h. Note that the values + that are ORed in merely indicate that the REX byte must be present; + those bits get discarded in output. */ + rex |= opc & (r >= 4 ? P_REXB_R : 0); + rex |= opc & (rm >= 4 ? P_REXB_RM : 0); + + if (rex) { + tcg_out8(s, (uint8_t)(rex | 0x40)); + } + + if (opc & (P_EXT | P_EXT38 | P_EXT3A)) { + tcg_out8(s, 0x0f); + if (opc & P_EXT38) { + tcg_out8(s, 0x38); + } else if (opc & P_EXT3A) { + tcg_out8(s, 0x3a); + } + } + + tcg_out8(s, opc); +} +#else +static void tcg_out_opc(TCGContext *s, int opc) +{ + if (opc & P_DATA16) { + tcg_out8(s, 0x66); + } + if (opc & P_SIMDF3) { + tcg_out8(s, 0xf3); + } else if (opc & P_SIMDF2) { + tcg_out8(s, 0xf2); + } + if (opc & (P_EXT | P_EXT38 | P_EXT3A)) { + tcg_out8(s, 0x0f); + if (opc & P_EXT38) { + tcg_out8(s, 0x38); + } else if (opc & P_EXT3A) { + tcg_out8(s, 0x3a); + } + } + tcg_out8(s, opc); +} +/* Discard the register arguments to tcg_out_opc early, so as not to penalize + the 32-bit compilation paths. This method works with all versions of gcc, + whereas relying on optimization may not be able to exclude them. */ +#define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc) +#endif + +static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) +{ + tcg_out_opc(s, opc, r, rm, 0); + tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); +} + +static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v, + int rm, int index) +{ + int tmp = 0; + + /* Use the two byte form if possible, which cannot encode + VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */ + if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT + && ((rm | index) & 8) == 0) { + /* Two byte VEX prefix. */ + tcg_out8(s, 0xc5); + + tmp = (r & 8 ? 0 : 0x80); /* VEX.R */ + } else { + /* Three byte VEX prefix. */ + tcg_out8(s, 0xc4); + + /* VEX.m-mmmm */ + if (opc & P_EXT3A) { + tmp = 3; + } else if (opc & P_EXT38) { + tmp = 2; + } else if (opc & P_EXT) { + tmp = 1; + } else { + g_assert_not_reached(); + } + tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */ + tmp |= (index & 8 ? 0 : 0x40); /* VEX.X */ + tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */ + tcg_out8(s, tmp); + + tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */ + } + + tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */ + /* VEX.pp */ + if (opc & P_DATA16) { + tmp |= 1; /* 0x66 */ + } else if (opc & P_SIMDF3) { + tmp |= 2; /* 0xf3 */ + } else if (opc & P_SIMDF2) { + tmp |= 3; /* 0xf2 */ + } + tmp |= (~v & 15) << 3; /* VEX.vvvv */ + tcg_out8(s, tmp); + tcg_out8(s, opc); +} + +static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm) +{ + tcg_out_vex_opc(s, opc, r, v, rm, 0); + tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); +} + +/* Output an opcode with a full "rm + (index<code_ptr + 5 + ~rm; + intptr_t disp = offset - pc; + if (disp == (int32_t)disp) { + tcg_out8(s, (LOWREGMASK(r) << 3) | 5); + tcg_out32(s, disp); + return; + } + + /* Try for an absolute address encoding. This requires the + use of the MODRM+SIB encoding and is therefore larger than + rip-relative addressing. */ + if (offset == (int32_t)offset) { + tcg_out8(s, (LOWREGMASK(r) << 3) | 4); + tcg_out8(s, (4 << 3) | 5); + tcg_out32(s, offset); + return; + } + + /* ??? The memory isn't directly addressable. */ + g_assert_not_reached(); + } else { + /* Absolute address. */ + tcg_out8(s, (r << 3) | 5); + tcg_out32(s, offset); + return; + } + } + + /* Find the length of the immediate addend. Note that the encoding + that would be used for (%ebp) indicates absolute addressing. */ + if (rm < 0) { + mod = 0, len = 4, rm = 5; + } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) { + mod = 0, len = 0; + } else if (offset == (int8_t)offset) { + mod = 0x40, len = 1; + } else { + mod = 0x80, len = 4; + } + + /* Use a single byte MODRM format if possible. Note that the encoding + that would be used for %esp is the escape to the two byte form. */ + if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) { + /* Single byte MODRM format. */ + tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); + } else { + /* Two byte MODRM+SIB format. */ + + /* Note that the encoding that would place %esp into the index + field indicates no index register. In 64-bit mode, the REX.X + bit counts, so %r12 can be used as the index. */ + if (index < 0) { + index = 4; + } else { + tcg_debug_assert(index != TCG_REG_ESP); + } + + tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4); + tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm)); + } + + if (len == 1) { + tcg_out8(s, offset); + } else if (len == 4) { + tcg_out32(s, offset); + } +} + +static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm, + int index, int shift, intptr_t offset) +{ + tcg_out_opc(s, opc, r, rm < 0 ? 0 : rm, index < 0 ? 0 : index); + tcg_out_sib_offset(s, r, rm, index, shift, offset); +} + +static void tcg_out_vex_modrm_sib_offset(TCGContext *s, int opc, int r, int v, + int rm, int index, int shift, + intptr_t offset) +{ + tcg_out_vex_opc(s, opc, r, v, rm < 0 ? 0 : rm, index < 0 ? 0 : index); + tcg_out_sib_offset(s, r, rm, index, shift, offset); +} + +/* A simplification of the above with no index or shift. */ +static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, + int rm, intptr_t offset) +{ + tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset); +} + +static inline void tcg_out_vex_modrm_offset(TCGContext *s, int opc, int r, + int v, int rm, intptr_t offset) +{ + tcg_out_vex_modrm_sib_offset(s, opc, r, v, rm, -1, 0, offset); +} + +/* Output an opcode with an expected reference to the constant pool. */ +static inline void tcg_out_modrm_pool(TCGContext *s, int opc, int r) +{ + tcg_out_opc(s, opc, r, 0, 0); + /* Absolute for 32-bit, pc-relative for 64-bit. */ + tcg_out8(s, LOWREGMASK(r) << 3 | 5); + tcg_out32(s, 0); +} + +/* Output an opcode with an expected reference to the constant pool. */ +static inline void tcg_out_vex_modrm_pool(TCGContext *s, int opc, int r) +{ + tcg_out_vex_opc(s, opc, r, 0, 0, 0); + /* Absolute for 32-bit, pc-relative for 64-bit. */ + tcg_out8(s, LOWREGMASK(r) << 3 | 5); + tcg_out32(s, 0); +} + +/* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */ +static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src) +{ + /* Propagate an opcode prefix, such as P_REXW. */ + int ext = subop & ~0x7; + subop &= 0x7; + + tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src); +} + +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + int rexw = 0; + + if (arg == ret) { + return true; + } + switch (type) { + case TCG_TYPE_I64: + rexw = P_REXW; + /* fallthru */ + case TCG_TYPE_I32: + if (ret < 16) { + if (arg < 16) { + tcg_out_modrm(s, OPC_MOVL_GvEv + rexw, ret, arg); + } else { + tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, arg, 0, ret); + } + } else { + if (arg < 16) { + tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, ret, 0, arg); + } else { + tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg); + } + } + break; + + case TCG_TYPE_V64: + tcg_debug_assert(ret >= 16 && arg >= 16); + tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg); + break; + case TCG_TYPE_V128: + tcg_debug_assert(ret >= 16 && arg >= 16); + tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx, ret, 0, arg); + break; + case TCG_TYPE_V256: + tcg_debug_assert(ret >= 16 && arg >= 16); + tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx | P_VEXL, ret, 0, arg); + break; + + default: + g_assert_not_reached(); + } + return true; +} + +static const int avx2_dup_insn[4] = { + OPC_VPBROADCASTB, OPC_VPBROADCASTW, + OPC_VPBROADCASTD, OPC_VPBROADCASTQ, +}; + +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg r, TCGReg a) +{ + if (have_avx2) { + int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); + tcg_out_vex_modrm(s, avx2_dup_insn[vece] + vex_l, r, 0, a); + } else { + switch (vece) { + case MO_8: + /* ??? With zero in a register, use PSHUFB. */ + tcg_out_vex_modrm(s, OPC_PUNPCKLBW, r, a, a); + a = r; + /* FALLTHRU */ + case MO_16: + tcg_out_vex_modrm(s, OPC_PUNPCKLWD, r, a, a); + a = r; + /* FALLTHRU */ + case MO_32: + tcg_out_vex_modrm(s, OPC_PSHUFD, r, 0, a); + /* imm8 operand: all output lanes selected from input lane 0. */ + tcg_out8(s, 0); + break; + case MO_64: + tcg_out_vex_modrm(s, OPC_PUNPCKLQDQ, r, a, a); + break; + default: + g_assert_not_reached(); + } + } + return true; +} + +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg r, TCGReg base, intptr_t offset) +{ + if (have_avx2) { + int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); + tcg_out_vex_modrm_offset(s, avx2_dup_insn[vece] + vex_l, + r, 0, base, offset); + } else { + switch (vece) { + case MO_64: + tcg_out_vex_modrm_offset(s, OPC_MOVDDUP, r, 0, base, offset); + break; + case MO_32: + tcg_out_vex_modrm_offset(s, OPC_VBROADCASTSS, r, 0, base, offset); + break; + case MO_16: + tcg_out_vex_modrm_offset(s, OPC_VPINSRW, r, r, base, offset); + tcg_out8(s, 0); /* imm8 */ + tcg_out_dup_vec(s, type, vece, r, r); + break; + case MO_8: + tcg_out_vex_modrm_offset(s, OPC_VPINSRB, r, r, base, offset); + tcg_out8(s, 0); /* imm8 */ + tcg_out_dup_vec(s, type, vece, r, r); + break; + default: + g_assert_not_reached(); + } + } + return true; +} + +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) +{ + int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0); + + if (arg == 0) { + tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret); + return; + } + if (arg == -1) { + tcg_out_vex_modrm(s, OPC_PCMPEQB + vex_l, ret, ret, ret); + return; + } + + if (TCG_TARGET_REG_BITS == 64) { + if (type == TCG_TYPE_V64) { + tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret); + } else if (have_avx2) { + tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret); + } else { + tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret); + } + new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4); + } else { + if (have_avx2) { + tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTW + vex_l, ret); + } else { + tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret); + } + new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0); + } +} + +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) +{ + tcg_target_long diff; + + switch (type) { + case TCG_TYPE_I32: +#if TCG_TARGET_REG_BITS == 64 + case TCG_TYPE_I64: +#endif + if (ret < 16) { + break; + } + /* fallthru */ + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + tcg_debug_assert(ret >= 16); + tcg_out_dupi_vec(s, type, ret, arg); + return; + default: + g_assert_not_reached(); + } + + if (arg == 0) { + tgen_arithr(s, ARITH_XOR, ret, ret); + return; + } + if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { + tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0); + tcg_out32(s, arg); + return; + } + if (arg == (int32_t)arg) { + tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret); + tcg_out32(s, arg); + return; + } + + /* Try a 7 byte pc-relative lea before the 10 byte movq. */ + diff = arg - ((uintptr_t)s->code_ptr + 7); + if (diff == (int32_t)diff) { + tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0); + tcg_out8(s, (LOWREGMASK(ret) << 3) | 5); + tcg_out32(s, diff); + return; + } + + tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0); + tcg_out64(s, arg); +} + +static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) +{ + if (val == (int8_t)val) { + tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0); + tcg_out8(s, val); + } else if (val == (int32_t)val) { + tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0); + tcg_out32(s, val); + } else { + tcg_abort(); + } +} + +static inline void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + /* Given the strength of x86 memory ordering, we only need care for + store-load ordering. Experimentally, "lock orl $0,0(%esp)" is + faster than "mfence", so don't bother with the sse insn. */ + if (a0 & TCG_MO_ST_LD) { + tcg_out8(s, 0xf0); + tcg_out_modrm_offset(s, OPC_ARITH_EvIb, ARITH_OR, TCG_REG_ESP, 0); + tcg_out8(s, 0); + } +} + +static inline void tcg_out_push(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0); +} + +static inline void tcg_out_pop(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0); +} + +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg arg1, intptr_t arg2) +{ + switch (type) { + case TCG_TYPE_I32: + if (ret < 16) { + tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2); + } else { + tcg_out_vex_modrm_offset(s, OPC_MOVD_VyEy, ret, 0, arg1, arg2); + } + break; + case TCG_TYPE_I64: + if (ret < 16) { + tcg_out_modrm_offset(s, OPC_MOVL_GvEv | P_REXW, ret, arg1, arg2); + break; + } + /* FALLTHRU */ + case TCG_TYPE_V64: + /* There is no instruction that can validate 8-byte alignment. */ + tcg_debug_assert(ret >= 16); + tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2); + break; + case TCG_TYPE_V128: + /* + * The gvec infrastructure is asserts that v128 vector loads + * and stores use a 16-byte aligned offset. Validate that the + * final pointer is aligned by using an insn that will SIGSEGV. + */ + tcg_debug_assert(ret >= 16); + tcg_out_vex_modrm_offset(s, OPC_MOVDQA_VxWx, ret, 0, arg1, arg2); + break; + case TCG_TYPE_V256: + /* + * The gvec infrastructure only requires 16-byte alignment, + * so here we must use an unaligned load. + */ + tcg_debug_assert(ret >= 16); + tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL, + ret, 0, arg1, arg2); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + switch (type) { + case TCG_TYPE_I32: + if (arg < 16) { + tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2); + } else { + tcg_out_vex_modrm_offset(s, OPC_MOVD_EyVy, arg, 0, arg1, arg2); + } + break; + case TCG_TYPE_I64: + if (arg < 16) { + tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_REXW, arg, arg1, arg2); + break; + } + /* FALLTHRU */ + case TCG_TYPE_V64: + /* There is no instruction that can validate 8-byte alignment. */ + tcg_debug_assert(arg >= 16); + tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2); + break; + case TCG_TYPE_V128: + /* + * The gvec infrastructure is asserts that v128 vector loads + * and stores use a 16-byte aligned offset. Validate that the + * final pointer is aligned by using an insn that will SIGSEGV. + */ + tcg_debug_assert(arg >= 16); + tcg_out_vex_modrm_offset(s, OPC_MOVDQA_WxVx, arg, 0, arg1, arg2); + break; + case TCG_TYPE_V256: + /* + * The gvec infrastructure only requires 16-byte alignment, + * so here we must use an unaligned store. + */ + tcg_debug_assert(arg >= 16); + tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL, + arg, 0, arg1, arg2); + break; + default: + g_assert_not_reached(); + } +} + +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + int rexw = 0; + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) { + if (val != (int32_t)val) { + return false; + } + rexw = P_REXW; + } else if (type != TCG_TYPE_I32) { + return false; + } + tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs); + tcg_out32(s, val); + return true; +} + +static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count) +{ + /* Propagate an opcode prefix, such as P_DATA16. */ + int ext = subopc & ~0x7; + subopc &= 0x7; + + if (count == 1) { + tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg); + } else { + tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg); + tcg_out8(s, count); + } +} + +static inline void tcg_out_bswap32(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0); +} + +static inline void tcg_out_rolw_8(TCGContext *s, int reg) +{ + tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8); +} + +static inline void tcg_out_ext8u(TCGContext *s, int dest, int src) +{ + /* movzbl */ + tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); + tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); +} + +static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw) +{ + /* movsbl */ + tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); + tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src); +} + +static inline void tcg_out_ext16u(TCGContext *s, int dest, int src) +{ + /* movzwl */ + tcg_out_modrm(s, OPC_MOVZWL, dest, src); +} + +static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw) +{ + /* movsw[lq] */ + tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src); +} + +static inline void tcg_out_ext32u(TCGContext *s, int dest, int src) +{ + /* 32-bit mov zero extends. */ + tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src); +} + +static inline void tcg_out_ext32s(TCGContext *s, int dest, int src) +{ + tcg_out_modrm(s, OPC_MOVSLQ, dest, src); +} + +static inline void tcg_out_bswap64(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0); +} + +static void tgen_arithi(TCGContext *s, int c, int r0, + tcg_target_long val, int cf) +{ + int rexw = 0; + + if (TCG_TARGET_REG_BITS == 64) { + rexw = c & -8; + c &= 7; + } + + /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce + partial flags update stalls on Pentium4 and are not recommended + by current Intel optimization manuals. */ + if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { + int is_inc = (c == ARITH_ADD) ^ (val < 0); + if (TCG_TARGET_REG_BITS == 64) { + /* The single-byte increment encodings are re-tasked as the + REX prefixes. Use the MODRM encoding. */ + tcg_out_modrm(s, OPC_GRP5 + rexw, + (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); + } else { + tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + } + return; + } + + if (c == ARITH_AND) { + if (TCG_TARGET_REG_BITS == 64) { + if (val == 0xffffffffu) { + tcg_out_ext32u(s, r0, r0); + return; + } + if (val == (uint32_t)val) { + /* AND with no high bits set can use a 32-bit operation. */ + rexw = 0; + } + } + if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { + tcg_out_ext8u(s, r0, r0); + return; + } + if (val == 0xffffu) { + tcg_out_ext16u(s, r0, r0); + return; + } + } + + if (val == (int8_t)val) { + tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0); + tcg_out8(s, val); + return; + } + if (rexw == 0 || val == (int32_t)val) { + tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0); + tcg_out32(s, val); + return; + } + + tcg_abort(); +} + +static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) +{ + if (val != 0) { + tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0); + } +} + +/* Use SMALL != 0 to force a short forward branch. */ +static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int _small) +{ + int32_t val, val1; + + if (l->has_value) { + val = tcg_pcrel_diff(s, l->u.value_ptr); + val1 = val - 2; + if ((int8_t)val1 == val1) { + if (opc == -1) { + tcg_out8(s, OPC_JMP_short); + } else { + tcg_out8(s, OPC_JCC_short + opc); + } + tcg_out8(s, val1); + } else { + if (_small) { + tcg_abort(); + } + if (opc == -1) { + tcg_out8(s, OPC_JMP_long); + tcg_out32(s, val - 5); + } else { + tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); + tcg_out32(s, val - 6); + } + } + } else if (_small) { + if (opc == -1) { + tcg_out8(s, OPC_JMP_short); + } else { + tcg_out8(s, OPC_JCC_short + opc); + } + tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1); + s->code_ptr += 1; + } else { + if (opc == -1) { + tcg_out8(s, OPC_JMP_long); + } else { + tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); + } + tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4); + s->code_ptr += 4; + } +} + +static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2, + int const_arg2, int rexw) +{ + if (const_arg2) { + if (arg2 == 0) { + /* test r, r */ + tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1); + } else { + tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0); + } + } else { + tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2); + } +} + +static void tcg_out_brcond32(TCGContext *s, TCGCond cond, + TCGArg arg1, TCGArg arg2, int const_arg2, + TCGLabel *label, int _small) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, 0); + tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, _small); +} + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_brcond64(TCGContext *s, TCGCond cond, + TCGArg arg1, TCGArg arg2, int const_arg2, + TCGLabel *label, int _small) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); + tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, _small); +} +#else +/* XXX: we implement it at the target level to avoid having to + handle cross basic blocks temporaries */ +static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, + const int *const_args, int _small) +{ + TCGLabel *label_next = gen_new_label(s); + TCGLabel *label_this = arg_label(args[5]); + + switch(args[4]) { + case TCG_COND_EQ: + tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], + label_next, 1); + tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3], + label_this, _small); + break; + case TCG_COND_NE: + tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], + label_this, _small); + tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3], + label_this, _small); + break; + case TCG_COND_LT: + tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], + label_this, _small); + break; + case TCG_COND_LE: + tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], + label_this, _small); + break; + case TCG_COND_GT: + tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], + label_this, _small); + break; + case TCG_COND_GE: + tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], + label_this, _small); + break; + case TCG_COND_LTU: + tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], + label_this, _small); + break; + case TCG_COND_LEU: + tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], + label_this, _small); + break; + case TCG_COND_GTU: + tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], + label_this, _small); + break; + case TCG_COND_GEU: + tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], + label_this, _small); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], + label_this, _small); + break; + default: + tcg_abort(); + } + tcg_out_label(s, label_next, s->code_ptr); +} +#endif + +static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest, + TCGArg arg1, TCGArg arg2, int const_arg2) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, 0); + tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); + tcg_out_ext8u(s, dest, dest); +} + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest, + TCGArg arg1, TCGArg arg2, int const_arg2) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); + tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); + tcg_out_ext8u(s, dest, dest); +} +#else +static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, + const int *const_args) +{ + TCGArg new_args[6]; + TCGLabel *label_true, *label_over; + + memcpy(new_args, args+1, 5*sizeof(TCGArg)); + + if (args[0] == args[1] || args[0] == args[2] + || (!const_args[3] && args[0] == args[3]) + || (!const_args[4] && args[0] == args[4])) { + /* When the destination overlaps with one of the argument + registers, don't do anything tricky. */ + label_true = gen_new_label(s); + label_over = gen_new_label(s); + + new_args[5] = label_arg(label_true); + tcg_out_brcond2(s, new_args, const_args+1, 1); + + tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); + tcg_out_jxx(s, JCC_JMP, label_over, 1); + tcg_out_label(s, label_true, s->code_ptr); + + tcg_out_movi(s, TCG_TYPE_I32, args[0], 1); + tcg_out_label(s, label_over, s->code_ptr); + } else { + /* When the destination does not overlap one of the arguments, + clear the destination first, jump if cond false, and emit an + increment in the true case. This results in smaller code. */ + + tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); + + label_over = gen_new_label(s); + new_args[4] = tcg_invert_cond(new_args[4]); + new_args[5] = label_arg(label_over); + tcg_out_brcond2(s, new_args, const_args+1, 1); + + tgen_arithi(s, ARITH_ADD, args[0], 1, 0); + tcg_out_label(s, label_over, s->code_ptr); + } +} +#endif + +static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw, + TCGReg dest, TCGReg v1) +{ + if (have_cmov) { + tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1); + } else { + TCGLabel *over = gen_new_label(s); + tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1); + tcg_out_mov(s, TCG_TYPE_I32, dest, v1); + tcg_out_label(s, over, s->code_ptr); + } +} + +static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest, + TCGReg c1, TCGArg c2, int const_c2, + TCGReg v1) +{ + tcg_out_cmp(s, c1, c2, const_c2, 0); + tcg_out_cmov(s, cond, 0, dest, v1); +} + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest, + TCGReg c1, TCGArg c2, int const_c2, + TCGReg v1) +{ + tcg_out_cmp(s, c1, c2, const_c2, P_REXW); + tcg_out_cmov(s, cond, P_REXW, dest, v1); +} +#endif + +static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, + TCGArg arg2, bool const_a2) +{ + if (have_bmi1) { + tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1); + if (const_a2) { + tcg_debug_assert(arg2 == (rexw ? 64 : 32)); + } else { + tcg_debug_assert(dest != arg2); + tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2); + } + } else { + tcg_debug_assert(dest != arg2); + tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1); + tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2); + } +} + +static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, + TCGArg arg2, bool const_a2) +{ + if (have_lzcnt) { + tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1); + if (const_a2) { + tcg_debug_assert(arg2 == (rexw ? 64 : 32)); + } else { + tcg_debug_assert(dest != arg2); + tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2); + } + } else { + tcg_debug_assert(!const_a2); + tcg_debug_assert(dest != arg1); + tcg_debug_assert(dest != arg2); + + /* Recall that the output of BSR is the index not the count. */ + tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1); + tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0); + + /* Since we have destroyed the flags from BSR, we have to re-test. */ + tcg_out_cmp(s, arg1, 0, 1, rexw); + tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2); + } +} + +static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest) +{ + intptr_t disp = tcg_pcrel_diff(s, dest) - 5; + + if (disp == (int32_t)disp) { + tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0); + tcg_out32(s, disp); + } else { + /* rip-relative addressing into the constant pool. + This is 6 + 8 = 14 bytes, as compared to using an + an immediate load 10 + 6 = 16 bytes, plus we may + be able to re-use the pool constant for more calls. */ + tcg_out_opc(s, OPC_GRP5, 0, 0, 0); + tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5); + new_pool_label(s, (uintptr_t)dest, R_386_PC32, s->code_ptr, -4); + tcg_out32(s, 0); + } +} + +static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) +{ + tcg_out_branch(s, 1, dest); +} + +static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest) +{ + tcg_out_branch(s, 0, dest); +} + +static void tcg_out_nopn(TCGContext *s, int n) +{ + int i; + /* Emit 1 or 2 operand size prefixes for the standard one byte nop, + * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the + * duplicate prefix, and all of the interesting recent cores can + * decode and discard the duplicates in a single cycle. + */ + tcg_debug_assert(n >= 1); + for (i = 1; i < n; ++i) { + tcg_out8(s, 0x66); + } + tcg_out8(s, 0x90); +} + +#include "../tcg-ldst.inc.c" + +/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * int mmu_idx, uintptr_t ra) + */ +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, int mmu_idx, uintptr_t ra) + */ +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +/* Perform the TLB load and compare. + + Inputs: + ADDRLO and ADDRHI contain the low and high part of the address. + + MEM_INDEX and S_BITS are the memory context and log2 size of the load. + + WHICH is the offset into the CPUTLBEntry structure of the slot to read. + This should be offsetof addr_read or addr_write. + + Outputs: + LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses) + positions of the displacements of forward jumps to the TLB miss case. + + Second argument register is loaded with the low part of the address. + In the TLB hit case, it has been adjusted as indicated by the TLB + and so is a host address. In the TLB miss case, it continues to + hold a guest address. + + First argument register is clobbered. */ + +static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, + int mem_index, MemOp opc, + tcg_insn_unit **label_ptr, int which) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif + const TCGReg r0 = TCG_REG_L0; + const TCGReg r1 = TCG_REG_L1; + TCGType ttype = TCG_TYPE_I32; + TCGType tlbtype = TCG_TYPE_I32; + int trexw = 0, hrexw = 0, tlbrexw = 0; + unsigned a_bits = get_alignment_bits(opc); + unsigned s_bits = opc & MO_SIZE; + unsigned a_mask = (1 << a_bits) - 1; + unsigned s_mask = (1 << s_bits) - 1; + target_ulong tlb_mask; + + if (TCG_TARGET_REG_BITS == 64) { + if (TARGET_LONG_BITS == 64) { + ttype = TCG_TYPE_I64; + trexw = P_REXW; + } + if (TCG_TYPE_PTR == TCG_TYPE_I64) { + hrexw = P_REXW; + if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) { + tlbtype = TCG_TYPE_I64; + tlbrexw = P_REXW; + } + } + } + + tcg_out_mov(s, tlbtype, r0, addrlo); + tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + + tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0, + TLB_MASK_TABLE_OFS(mem_index) + + offsetof(CPUTLBDescFast, mask)); + + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0, + TLB_MASK_TABLE_OFS(mem_index) + + offsetof(CPUTLBDescFast, table)); + + /* If the required alignment is at least as large as the access, simply + copy the address and mask. For lesser alignments, check that we don't + cross pages for the complete access. */ + if (a_bits >= s_bits) { + tcg_out_mov(s, ttype, r1, addrlo); + } else { + tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask); + } + tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; + tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0); + + /* cmp 0(r0), r1 */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which); + + /* Prepare for both the fast path add of the tlb addend, and the slow + path function argument setup. */ + tcg_out_mov(s, ttype, r1, addrlo); + + // Unicorn: fast path if hookmem is not enable + if (!HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ) && !HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + else + /* slow_path, so data access will go via load_helper() */ + tcg_out_opc(s, OPC_JMP_long, 0, 0, 0); + + label_ptr[0] = s->code_ptr; + s->code_ptr += 4; + + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + /* cmp 4(r0), addrhi */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4); + + /* jne slow_path */ + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + label_ptr[1] = s->code_ptr; + s->code_ptr += 4; + } + + /* TLB Hit. */ + + /* add addend(r0), r1 */ + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, + offsetof(CPUTLBEntry, addend)); +} + +/* + * Record the context of a call to the out of line helper code for the slow path + * for a load or store, so that we can later generate the correct helper code + */ +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, + TCGMemOpIdx oi, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + tcg_insn_unit *raddr, + tcg_insn_unit **label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->oi = oi; + label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->raddr = raddr; + label->label_ptr[0] = label_ptr[0]; + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + label->label_ptr[1] = label_ptr[1]; + } +} + +/* + * Generate code for the slow path for a load at the end of block + */ +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + TCGReg data_reg; + tcg_insn_unit **label_ptr = &l->label_ptr[0]; + int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0); + + /* resolve label address */ + tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi); + tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], + (uintptr_t)l->raddr); + } + + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); + + data_reg = l->datalo_reg; + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_out_ext8s(s, data_reg, TCG_REG_EAX, rexw); + break; + case MO_SW: + tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw); + break; +#if TCG_TARGET_REG_BITS == 64 + case MO_SL: + tcg_out_ext32s(s, data_reg, TCG_REG_EAX); + break; +#endif + case MO_UB: + case MO_UW: + /* Note that the helpers have zero-extended to tcg_target_long. */ + case MO_UL: + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + break; + case MO_Q: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); + } else if (data_reg == TCG_REG_EDX) { + /* xchg %edx, %eax */ + tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); + } else { + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); + } + break; + default: + tcg_abort(); + } + + /* Jump to the code corresponding to next IR of qemu_st */ + tcg_out_jmp(s, l->raddr); + return true; +} + +/* + * Generate code for the slow path for a store at the end of block + */ +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; + tcg_insn_unit **label_ptr = &l->label_ptr[0]; + TCGReg retaddr; + + /* resolve label address */ + tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (s_bits == MO_64) { + tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs); + ofs += 4; + + retaddr = TCG_REG_EAX; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + tcg_target_call_iarg_regs[2], l->datalo_reg); + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi); + + if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { + retaddr = tcg_target_call_iarg_regs[4]; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + } else { + retaddr = TCG_REG_RAX; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, + TCG_TARGET_CALL_STACK_OFFSET); + } + } + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_push(s, retaddr); + tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + return true; +} + +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, int index, intptr_t ofs, + int seg, bool is64, MemOp memop) +{ + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; + int rexw = is64 * P_REXW; + int movop = OPC_MOVL_GvEv; + + if (have_movbe && real_bswap) { + bswap = 0; + movop = OPC_MOVBE_GyMy; + } + + switch (memop & MO_SSIZE) { + case MO_UB: + tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo, + base, index, 0, ofs); + break; + case MO_SB: + tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo, + base, index, 0, ofs); + break; + case MO_UW: + tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo, + base, index, 0, ofs); + if (real_bswap) { + tcg_out_rolw_8(s, datalo); + } + break; + case MO_SW: + if (real_bswap) { + if (have_movbe) { + tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, + datalo, base, index, 0, ofs); + } else { + tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo, + base, index, 0, ofs); + tcg_out_rolw_8(s, datalo); + } + tcg_out_modrm(s, OPC_MOVSWL + rexw, datalo, datalo); + } else { + tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg, + datalo, base, index, 0, ofs); + } + break; + case MO_UL: + tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); + if (bswap) { + tcg_out_bswap32(s, datalo); + } + break; +#if TCG_TARGET_REG_BITS == 64 + case MO_SL: + if (real_bswap) { + tcg_out_modrm_sib_offset(s, movop + seg, datalo, + base, index, 0, ofs); + if (bswap) { + tcg_out_bswap32(s, datalo); + } + tcg_out_ext32s(s, datalo, datalo); + } else { + tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo, + base, index, 0, ofs); + } + break; +#endif + case MO_Q: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, + base, index, 0, ofs); + if (bswap) { + tcg_out_bswap64(s, datalo); + } + } else { + if (real_bswap) { + int t = datalo; + datalo = datahi; + datahi = t; + } + if (base != datalo) { + tcg_out_modrm_sib_offset(s, movop + seg, datalo, + base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + seg, datahi, + base, index, 0, ofs + 4); + } else { + tcg_out_modrm_sib_offset(s, movop + seg, datahi, + base, index, 0, ofs + 4); + tcg_out_modrm_sib_offset(s, movop + seg, datalo, + base, index, 0, ofs); + } + if (bswap) { + tcg_out_bswap32(s, datalo); + tcg_out_bswap32(s, datahi); + } + } + break; + default: + tcg_abort(); + } +} + +/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and + EAX. It will be useful once fixed registers globals are less + common. */ +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) +{ + TCGReg datalo, datahi, addrlo; + TCGReg addrhi QEMU_UNUSED_VAR; + TCGMemOpIdx oi; + MemOp opc; + int mem_index; + tcg_insn_unit *label_ptr[2]; + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + oi = *args++; + opc = get_memop(oi); + + mem_index = get_mmuidx(oi); + + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, + label_ptr, offsetof(CPUTLBEntry, addr_read)); + + /* TLB Hit. */ + tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc); + + /* Record the current context of a load into ldst label */ + add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi, + s->code_ptr, label_ptr); +} + +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, int index, intptr_t ofs, + int seg, MemOp memop) +{ + /* ??? Ideally we wouldn't need a scratch register. For user-only, + we could perform the bswap twice to restore the original value + instead of moving to the scratch. But as it is, the L constraint + means that TCG_REG_L0 is definitely free here. */ + const TCGReg scratch = TCG_REG_L0; + const MemOp real_bswap = memop & MO_BSWAP; + MemOp bswap = real_bswap; + int movop = OPC_MOVL_EvGv; + + if (have_movbe && real_bswap) { + bswap = 0; + movop = OPC_MOVBE_MyGy; + } + + switch (memop & MO_SIZE) { + case MO_8: + /* In 32-bit mode, 8-bit stores can only happen from [abcd]x. + Use the scratch register if necessary. */ + if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + datalo = scratch; + } + tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, + datalo, base, index, 0, ofs); + break; + case MO_16: + if (bswap) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + tcg_out_rolw_8(s, scratch); + datalo = scratch; + } + tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo, + base, index, 0, ofs); + break; + case MO_32: + if (bswap) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + tcg_out_bswap32(s, scratch); + datalo = scratch; + } + tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs); + break; + case MO_64: + if (TCG_TARGET_REG_BITS == 64) { + if (bswap) { + tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo); + tcg_out_bswap64(s, scratch); + datalo = scratch; + } + tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo, + base, index, 0, ofs); + } else if (bswap) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi); + tcg_out_bswap32(s, scratch); + tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch, + base, index, 0, ofs); + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + tcg_out_bswap32(s, scratch); + tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch, + base, index, 0, ofs + 4); + } else { + if (real_bswap) { + int t = datalo; + datalo = datahi; + datahi = t; + } + tcg_out_modrm_sib_offset(s, movop + seg, datalo, + base, index, 0, ofs); + tcg_out_modrm_sib_offset(s, movop + seg, datahi, + base, index, 0, ofs + 4); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) +{ + TCGReg datalo, datahi, addrlo; + TCGReg addrhi QEMU_UNUSED_VAR; + TCGMemOpIdx oi; + MemOp opc; + int mem_index; + tcg_insn_unit *label_ptr[2]; + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + oi = *args++; + opc = get_memop(oi); + + mem_index = get_mmuidx(oi); + + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc, + label_ptr, offsetof(CPUTLBEntry, addr_write)); + + /* TLB Hit. */ + tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc); + + /* Record the current context of a store into ldst label */ + add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi, + s->code_ptr, label_ptr); +} + +static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + TCGArg a0, a1, a2; + int c, const_a2, vexop, rexw = 0; + +#if TCG_TARGET_REG_BITS == 64 +# define OP_32_64(x) \ + case glue(glue(INDEX_op_, x), _i64): \ + rexw = P_REXW; /* FALLTHRU */ \ + case glue(glue(INDEX_op_, x), _i32) +#else +# define OP_32_64(x) \ + case glue(glue(INDEX_op_, x), _i32) +#endif + + /* Hoist the loads of the most common arguments. */ + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + const_a2 = const_args[2]; + + switch (opc) { + case INDEX_op_exit_tb: + /* Reuse the zeroing that exists for goto_ptr. */ + if (a0 == 0) { + tcg_out_jmp(s, s->code_gen_epilogue); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0); + tcg_out_jmp(s, s->tb_ret_addr); + } + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_insn_offset) { + /* direct jump method */ + int gap; + /* jump displacement must be aligned for atomic patching; + * see if we need to add extra nops before jump + */ + gap = tcg_pcrel_diff(s, (void *)(QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4))); + if (gap != 1) { + tcg_out_nopn(s, gap - 1); + } + tcg_out8(s, OPC_JMP_long); /* jmp im */ + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + tcg_out32(s, 0); + } else { + /* indirect jump method */ + tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1, + (intptr_t)(s->tb_jmp_target_addr + a0)); + } + set_jmp_reset_offset(s, a0); + break; + case INDEX_op_goto_ptr: + /* jmp to the given host address (could be epilogue) */ + tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0); + break; + case INDEX_op_br: + tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0); + break; + OP_32_64(ld8u): + /* Note that we can ignore REXW for the zero-extend to 64-bit. */ + tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2); + break; + OP_32_64(ld8s): + tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2); + break; + OP_32_64(ld16u): + /* Note that we can ignore REXW for the zero-extend to 64-bit. */ + tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2); + break; + OP_32_64(ld16s): + tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2); + break; +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_ld32u_i64: +#endif + case INDEX_op_ld_i32: + tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2); + break; + + OP_32_64(st8): + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2); + tcg_out8(s, a0); + } else { + tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2); + } + break; + OP_32_64(st16): + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2); + tcg_out16(s, a0); + } else { + tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2); + } + break; +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_st32_i64: +#endif + case INDEX_op_st_i32: + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2); + tcg_out32(s, a0); + } else { + tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2); + } + break; + + OP_32_64(add): + /* For 3-operand addition, use LEA. */ + if (a0 != a1) { + TCGArg c3 = 0; + if (const_a2) { + c3 = a2, a2 = -1; + } else if (a0 == a2) { + /* Watch out for dest = src + dest, since we've removed + the matching constraint on the add. */ + tgen_arithr(s, ARITH_ADD + rexw, a0, a1); + break; + } + + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3); + break; + } + c = ARITH_ADD; + goto gen_arith; + OP_32_64(sub): + c = ARITH_SUB; + goto gen_arith; + OP_32_64(and): + c = ARITH_AND; + goto gen_arith; + OP_32_64(or): + c = ARITH_OR; + goto gen_arith; + OP_32_64(xor): + c = ARITH_XOR; + goto gen_arith; + gen_arith: + if (const_a2) { + tgen_arithi(s, c + rexw, a0, a2, 0); + } else { + tgen_arithr(s, c + rexw, a0, a2); + } + break; + + OP_32_64(andc): + if (const_a2) { + tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1); + tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0); + } else { + tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1); + } + break; + + OP_32_64(mul): + if (const_a2) { + int32_t val; + val = a2; + if (val == (int8_t)val) { + tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0); + tcg_out8(s, val); + } else { + tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0); + tcg_out32(s, val); + } + } else { + tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2); + } + break; + + OP_32_64(div2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]); + break; + OP_32_64(divu2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]); + break; + + OP_32_64(shl): + /* For small constant 3-operand shift, use LEA. */ + if (const_a2 && a0 != a1 && (a2 - 1) < 3) { + if (a2 - 1 == 0) { + /* shl $1,a1,a0 -> lea (a1,a1),a0 */ + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0); + } else { + /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */ + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0); + } + break; + } + c = SHIFT_SHL; + vexop = OPC_SHLX; + goto gen_shift_maybe_vex; + OP_32_64(shr): + c = SHIFT_SHR; + vexop = OPC_SHRX; + goto gen_shift_maybe_vex; + OP_32_64(sar): + c = SHIFT_SAR; + vexop = OPC_SARX; + goto gen_shift_maybe_vex; + OP_32_64(rotl): + c = SHIFT_ROL; + goto gen_shift; + OP_32_64(rotr): + c = SHIFT_ROR; + goto gen_shift; + gen_shift_maybe_vex: + if (have_bmi2) { + if (!const_a2) { + tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1); + break; + } + tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1); + } + /* FALLTHRU */ + gen_shift: + if (const_a2) { + tcg_out_shifti(s, c + rexw, a0, a2); + } else { + tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0); + } + break; + + OP_32_64(ctz): + tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]); + break; + OP_32_64(clz): + tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]); + break; + OP_32_64(ctpop): + tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1); + break; + + case INDEX_op_brcond_i32: + tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0); + break; + case INDEX_op_setcond_i32: + tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2); + break; + case INDEX_op_movcond_i32: + tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]); + break; + + OP_32_64(bswap16): + tcg_out_rolw_8(s, a0); + break; + OP_32_64(bswap32): + tcg_out_bswap32(s, a0); + break; + + OP_32_64(neg): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0); + break; + OP_32_64(not): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0); + break; + + OP_32_64(ext8s): + tcg_out_ext8s(s, a0, a1, rexw); + break; + OP_32_64(ext16s): + tcg_out_ext16s(s, a0, a1, rexw); + break; + OP_32_64(ext8u): + tcg_out_ext8u(s, a0, a1); + break; + OP_32_64(ext16u): + tcg_out_ext16u(s, a0, a1); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, 0); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, 1); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args, 0); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args, 1); + break; + + OP_32_64(mulu2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]); + break; + OP_32_64(muls2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]); + break; + OP_32_64(add2): + if (const_args[4]) { + tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1); + } else { + tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]); + } + if (const_args[5]) { + tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1); + } else { + tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]); + } + break; + OP_32_64(sub2): + if (const_args[4]) { + tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1); + } else { + tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]); + } + if (const_args[5]) { + tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1); + } else { + tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]); + } + break; + +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_brcond2_i32: + tcg_out_brcond2(s, args, const_args, 0); + break; + case INDEX_op_setcond2_i32: + tcg_out_setcond2(s, args, const_args); + break; +#else /* TCG_TARGET_REG_BITS == 64 */ + case INDEX_op_ld32s_i64: + tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2); + break; + case INDEX_op_ld_i64: + tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2); + break; + case INDEX_op_st_i64: + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2); + tcg_out32(s, a0); + } else { + tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2); + } + break; + + case INDEX_op_brcond_i64: + tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0); + break; + case INDEX_op_setcond_i64: + tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]); + break; + + case INDEX_op_bswap64_i64: + tcg_out_bswap64(s, a0); + break; + case INDEX_op_extu_i32_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_extrl_i64_i32: + tcg_out_ext32u(s, a0, a1); + break; + case INDEX_op_ext_i32_i64: + case INDEX_op_ext32s_i64: + tcg_out_ext32s(s, a0, a1); + break; + case INDEX_op_extrh_i64_i32: + tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32); + break; +#endif + + OP_32_64(deposit): + if (args[3] == 0 && args[4] == 8) { + /* load bits 0..7 */ + tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0); + } else if (args[3] == 8 && args[4] == 8) { + /* load bits 8..15 */ + tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4); + } else if (args[3] == 0 && args[4] == 16) { + /* load bits 0..15 */ + tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0); + } else { + tcg_abort(); + } + break; + + case INDEX_op_extract_i64: + if (a2 + args[3] == 32) { + /* This is a 32-bit zero-extending right shift. */ + tcg_out_mov(s, TCG_TYPE_I32, a0, a1); + tcg_out_shifti(s, SHIFT_SHR, a0, a2); + break; + } + /* FALLTHRU */ + case INDEX_op_extract_i32: + /* On the off-chance that we can use the high-byte registers. + Otherwise we emit the same ext16 + shift pattern that we + would have gotten from the normal tcg-op.c expansion. */ + tcg_debug_assert(a2 == 8 && args[3] == 8); + if (a1 < 4 && a0 < 8) { + tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4); + } else { + tcg_out_ext16u(s, a0, a1); + tcg_out_shifti(s, SHIFT_SHR, a0, 8); + } + break; + + case INDEX_op_sextract_i32: + /* We don't implement sextract_i64, as we cannot sign-extend to + 64-bits without using the REX prefix that explicitly excludes + access to the high-byte registers. */ + tcg_debug_assert(a2 == 8 && args[3] == 8); + if (a1 < 4 && a0 < 8) { + tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4); + } else { + tcg_out_ext16s(s, a0, a1, 0); + tcg_out_shifti(s, SHIFT_SAR, a0, 8); + } + break; + + OP_32_64(extract2): + /* Note that SHRD outputs to the r/m operand. */ + tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0); + tcg_out8(s, args[3]); + break; + + case INDEX_op_mb: + tcg_out_mb(s, a0); + break; + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } + +#undef OP_32_64 +} + +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg *args, const int *const_args) +{ + static int const add_insn[4] = { + OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ + }; + static int const ssadd_insn[4] = { + OPC_PADDSB, OPC_PADDSW, OPC_UD2, OPC_UD2 + }; + static int const usadd_insn[4] = { + OPC_PADDUB, OPC_PADDUW, OPC_UD2, OPC_UD2 + }; + static int const sub_insn[4] = { + OPC_PSUBB, OPC_PSUBW, OPC_PSUBD, OPC_PSUBQ + }; + static int const sssub_insn[4] = { + OPC_PSUBSB, OPC_PSUBSW, OPC_UD2, OPC_UD2 + }; + static int const ussub_insn[4] = { + OPC_PSUBUB, OPC_PSUBUW, OPC_UD2, OPC_UD2 + }; + static int const mul_insn[4] = { + OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2 + }; + static int const shift_imm_insn[4] = { + OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib + }; + static int const cmpeq_insn[4] = { + OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ + }; + static int const cmpgt_insn[4] = { + OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ + }; + static int const punpckl_insn[4] = { + OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ + }; + static int const punpckh_insn[4] = { + OPC_PUNPCKHBW, OPC_PUNPCKHWD, OPC_PUNPCKHDQ, OPC_PUNPCKHQDQ + }; + static int const packss_insn[4] = { + OPC_PACKSSWB, OPC_PACKSSDW, OPC_UD2, OPC_UD2 + }; + static int const packus_insn[4] = { + OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2 + }; + static int const smin_insn[4] = { + OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_UD2 + }; + static int const smax_insn[4] = { + OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_UD2 + }; + static int const umin_insn[4] = { + OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_UD2 + }; + static int const umax_insn[4] = { + OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2 + }; + static int const shlv_insn[4] = { + /* TODO: AVX512 adds support for MO_16. */ + OPC_UD2, OPC_UD2, OPC_VPSLLVD, OPC_VPSLLVQ + }; + static int const shrv_insn[4] = { + /* TODO: AVX512 adds support for MO_16. */ + OPC_UD2, OPC_UD2, OPC_VPSRLVD, OPC_VPSRLVQ + }; + static int const sarv_insn[4] = { + /* TODO: AVX512 adds support for MO_16, MO_64. */ + OPC_UD2, OPC_UD2, OPC_VPSRAVD, OPC_UD2 + }; + static int const shls_insn[4] = { + OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ + }; + static int const shrs_insn[4] = { + OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ + }; + static int const sars_insn[4] = { + OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_UD2 + }; + static int const abs_insn[4] = { + /* TODO: AVX512 adds support for MO_64. */ + OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2 + }; + + TCGType type = vecl + TCG_TYPE_V64; + int insn = 0, sub; + TCGArg a0, a1, a2; + + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + + switch (opc) { + case INDEX_op_add_vec: + insn = add_insn[vece]; + goto gen_simd; + case INDEX_op_ssadd_vec: + insn = ssadd_insn[vece]; + goto gen_simd; + case INDEX_op_usadd_vec: + insn = usadd_insn[vece]; + goto gen_simd; + case INDEX_op_sub_vec: + insn = sub_insn[vece]; + goto gen_simd; + case INDEX_op_sssub_vec: + insn = sssub_insn[vece]; + goto gen_simd; + case INDEX_op_ussub_vec: + insn = ussub_insn[vece]; + goto gen_simd; + case INDEX_op_mul_vec: + insn = mul_insn[vece]; + goto gen_simd; + case INDEX_op_and_vec: + insn = OPC_PAND; + goto gen_simd; + case INDEX_op_or_vec: + insn = OPC_POR; + goto gen_simd; + case INDEX_op_xor_vec: + insn = OPC_PXOR; + goto gen_simd; + case INDEX_op_smin_vec: + insn = smin_insn[vece]; + goto gen_simd; + case INDEX_op_umin_vec: + insn = umin_insn[vece]; + goto gen_simd; + case INDEX_op_smax_vec: + insn = smax_insn[vece]; + goto gen_simd; + case INDEX_op_umax_vec: + insn = umax_insn[vece]; + goto gen_simd; + case INDEX_op_shlv_vec: + insn = shlv_insn[vece]; + goto gen_simd; + case INDEX_op_shrv_vec: + insn = shrv_insn[vece]; + goto gen_simd; + case INDEX_op_sarv_vec: + insn = sarv_insn[vece]; + goto gen_simd; + case INDEX_op_shls_vec: + insn = shls_insn[vece]; + goto gen_simd; + case INDEX_op_shrs_vec: + insn = shrs_insn[vece]; + goto gen_simd; + case INDEX_op_sars_vec: + insn = sars_insn[vece]; + goto gen_simd; + case INDEX_op_x86_punpckl_vec: + insn = punpckl_insn[vece]; + goto gen_simd; + case INDEX_op_x86_punpckh_vec: + insn = punpckh_insn[vece]; + goto gen_simd; + case INDEX_op_x86_packss_vec: + insn = packss_insn[vece]; + goto gen_simd; + case INDEX_op_x86_packus_vec: + insn = packus_insn[vece]; + goto gen_simd; +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_dup2_vec: + /* First merge the two 32-bit inputs to a single 64-bit element. */ + tcg_out_vex_modrm(s, OPC_PUNPCKLDQ, a0, a1, a2); + /* Then replicate the 64-bit elements across the rest of the vector. */ + if (type != TCG_TYPE_V64) { + tcg_out_dup_vec(s, type, MO_64, a0, a0); + } + break; +#endif + case INDEX_op_abs_vec: + insn = abs_insn[vece]; + a2 = a1; + a1 = 0; + goto gen_simd; + gen_simd: + tcg_debug_assert(insn != OPC_UD2); + if (type == TCG_TYPE_V256) { + insn |= P_VEXL; + } + tcg_out_vex_modrm(s, insn, a0, a1, a2); + break; + + case INDEX_op_cmp_vec: + sub = args[3]; + if (sub == TCG_COND_EQ) { + insn = cmpeq_insn[vece]; + } else if (sub == TCG_COND_GT) { + insn = cmpgt_insn[vece]; + } else { + g_assert_not_reached(); + } + goto gen_simd; + + case INDEX_op_andc_vec: + insn = OPC_PANDN; + if (type == TCG_TYPE_V256) { + insn |= P_VEXL; + } + tcg_out_vex_modrm(s, insn, a0, a2, a1); + break; + + case INDEX_op_shli_vec: + sub = 6; + goto gen_shift; + case INDEX_op_shri_vec: + sub = 2; + goto gen_shift; + case INDEX_op_sari_vec: + tcg_debug_assert(vece != MO_64); + sub = 4; + gen_shift: + tcg_debug_assert(vece != MO_8); + insn = shift_imm_insn[vece]; + if (type == TCG_TYPE_V256) { + insn |= P_VEXL; + } + tcg_out_vex_modrm(s, insn, sub, a0, a1); + tcg_out8(s, a2); + break; + + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); + break; + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); + break; + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); + break; + + case INDEX_op_x86_shufps_vec: + insn = OPC_SHUFPS; + sub = args[3]; + goto gen_simd_imm8; + case INDEX_op_x86_blend_vec: + if (vece == MO_16) { + insn = OPC_PBLENDW; + } else if (vece == MO_32) { + insn = (have_avx2 ? OPC_VPBLENDD : OPC_BLENDPS); + } else { + g_assert_not_reached(); + } + sub = args[3]; + goto gen_simd_imm8; + case INDEX_op_x86_vperm2i128_vec: + insn = OPC_VPERM2I128; + sub = args[3]; + goto gen_simd_imm8; + gen_simd_imm8: + if (type == TCG_TYPE_V256) { + insn |= P_VEXL; + } + tcg_out_vex_modrm(s, insn, a0, a1, a2); + tcg_out8(s, sub); + break; + + case INDEX_op_x86_vpblendvb_vec: + insn = OPC_VPBLENDVB; + if (type == TCG_TYPE_V256) { + insn |= P_VEXL; + } + tcg_out_vex_modrm(s, insn, a0, a1, a2); + tcg_out8(s, args[3] << 4); + break; + + case INDEX_op_x86_psrldq_vec: + tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1); + tcg_out8(s, a2); + break; + + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } +} + +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } }; + static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } }; + static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } }; + static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } }; + static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } }; + static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } }; + static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } }; + static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } }; + static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; + static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; + static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; + static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; + static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; + static const TCGTargetOpDef r_r_L_L + = { .args_ct_str = { "r", "r", "L", "L" } }; + static const TCGTargetOpDef L_L_L_L + = { .args_ct_str = { "L", "L", "L", "L" } }; + static const TCGTargetOpDef x_x = { .args_ct_str = { "x", "x" } }; + static const TCGTargetOpDef x_x_x = { .args_ct_str = { "x", "x", "x" } }; + static const TCGTargetOpDef x_x_x_x + = { .args_ct_str = { "x", "x", "x", "x" } }; + static const TCGTargetOpDef x_r = { .args_ct_str = { "x", "r" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + return &r_r; + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + return &qi_r; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + return &ri_r; + case INDEX_op_st_i64: + return &re_r; + + case INDEX_op_add_i32: + case INDEX_op_add_i64: + return &r_r_re; + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + case INDEX_op_or_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + return &r_0_re; + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + { + static const TCGTargetOpDef and + = { .args_ct_str = { "r", "0", "reZ" } }; + return ∧ + } + break; + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + { + static const TCGTargetOpDef andc + = { .args_ct_str = { "r", "r", "rI" } }; + return &andc; + } + break; + + case INDEX_op_shl_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i32: + case INDEX_op_shr_i64: + case INDEX_op_sar_i32: + case INDEX_op_sar_i64: + return have_bmi2 ? &r_r_ri : &r_0_ci; + case INDEX_op_rotl_i32: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i32: + case INDEX_op_rotr_i64: + return &r_0_ci; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &r_re; + + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_neg_i32: + case INDEX_op_neg_i64: + case INDEX_op_not_i32: + case INDEX_op_not_i64: + case INDEX_op_extrh_i64_i32: + return &r_0; + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + return &r_q; + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_sextract_i32: + case INDEX_op_ctpop_i32: + case INDEX_op_ctpop_i64: + return &r_r; + case INDEX_op_extract2_i32: + case INDEX_op_extract2_i64: + return &r_0_r; + + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + { + static const TCGTargetOpDef dep + = { .args_ct_str = { "Q", "0", "Q" } }; + return &dep; + } + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + { + static const TCGTargetOpDef setc + = { .args_ct_str = { "q", "r", "re" } }; + return &setc; + } + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + { + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "re", "r", "0" } }; + return &movc; + } + case INDEX_op_div2_i32: + case INDEX_op_div2_i64: + case INDEX_op_divu2_i32: + case INDEX_op_divu2_i64: + { + static const TCGTargetOpDef div2 + = { .args_ct_str = { "a", "d", "0", "1", "r" } }; + return &div2; + } + case INDEX_op_mulu2_i32: + case INDEX_op_mulu2_i64: + case INDEX_op_muls2_i32: + case INDEX_op_muls2_i64: + { + static const TCGTargetOpDef mul2 + = { .args_ct_str = { "a", "d", "a", "r" } }; + return &mul2; + } + case INDEX_op_add2_i32: + case INDEX_op_add2_i64: + case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: + { + static const TCGTargetOpDef arith2 + = { .args_ct_str = { "r", "r", "0", "1", "re", "re" } }; + return &arith2; + } + case INDEX_op_ctz_i32: + case INDEX_op_ctz_i64: + { + static const TCGTargetOpDef ctz[2] = { + { .args_ct_str = { "&r", "r", "r" } }, + { .args_ct_str = { "&r", "r", "rW" } }, + }; + return &ctz[have_bmi1]; + } + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + { + static const TCGTargetOpDef clz[2] = { + { .args_ct_str = { "&r", "r", "r" } }, + { .args_ct_str = { "&r", "r", "rW" } }, + }; + return &clz[have_lzcnt]; + } + + case INDEX_op_qemu_ld_i32: + return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; + case INDEX_op_qemu_st_i32: + return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L; + case INDEX_op_qemu_ld_i64: + return (TCG_TARGET_REG_BITS == 64 ? &r_L + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L + : &r_r_L_L); + case INDEX_op_qemu_st_i64: + return (TCG_TARGET_REG_BITS == 64 ? &L_L + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L + : &L_L_L_L); + + case INDEX_op_brcond2_i32: + { + static const TCGTargetOpDef b2 + = { .args_ct_str = { "r", "r", "ri", "ri" } }; + return &b2; + } + case INDEX_op_setcond2_i32: + { + static const TCGTargetOpDef s2 + = { .args_ct_str = { "r", "r", "r", "ri", "ri" } }; + return &s2; + } + + case INDEX_op_ld_vec: + case INDEX_op_st_vec: + case INDEX_op_dupm_vec: + return &x_r; + + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_mul_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_andc_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_usadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_ussub_vec: + case INDEX_op_smin_vec: + case INDEX_op_umin_vec: + case INDEX_op_smax_vec: + case INDEX_op_umax_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + case INDEX_op_cmp_vec: + case INDEX_op_x86_shufps_vec: + case INDEX_op_x86_blend_vec: + case INDEX_op_x86_packss_vec: + case INDEX_op_x86_packus_vec: + case INDEX_op_x86_vperm2i128_vec: + case INDEX_op_x86_punpckl_vec: + case INDEX_op_x86_punpckh_vec: +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_dup2_vec: +#endif + return &x_x_x; + case INDEX_op_abs_vec: + case INDEX_op_dup_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: + case INDEX_op_x86_psrldq_vec: + return &x_x; + case INDEX_op_x86_vpblendvb_vec: + return &x_x_x_x; + + default: + break; + } + return NULL; +} + +int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece) +{ + switch (opc) { + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_andc_vec: + return 1; + case INDEX_op_cmp_vec: + case INDEX_op_cmpsel_vec: + return -1; + + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + /* We must expand the operation for MO_8. */ + return vece == MO_8 ? -1 : 1; + + case INDEX_op_sari_vec: + /* We must expand the operation for MO_8. */ + if (vece == MO_8) { + return -1; + } + /* We can emulate this for MO_64, but it does not pay off + unless we're producing at least 4 values. */ + if (vece == MO_64) { + return type >= TCG_TYPE_V256 ? -1 : 0; + } + return 1; + + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + return vece >= MO_16; + case INDEX_op_sars_vec: + return vece >= MO_16 && vece <= MO_32; + + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + return have_avx2 && vece >= MO_32; + case INDEX_op_sarv_vec: + return have_avx2 && vece == MO_32; + + case INDEX_op_mul_vec: + if (vece == MO_8) { + /* We can expand the operation for MO_8. */ + return -1; + } + if (vece == MO_64) { + return 0; + } + return 1; + + case INDEX_op_ssadd_vec: + case INDEX_op_usadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_ussub_vec: + return vece <= MO_16; + case INDEX_op_smin_vec: + case INDEX_op_smax_vec: + case INDEX_op_umin_vec: + case INDEX_op_umax_vec: + case INDEX_op_abs_vec: + return vece <= MO_32; + + default: + return 0; + } +} + +static void expand_vec_shi(TCGContext *tcg_ctx, TCGType type, unsigned vece, bool shr, + TCGv_vec v0, TCGv_vec v1, TCGArg imm) +{ + TCGv_vec t1, t2; + + tcg_debug_assert(vece == MO_8); + + t1 = tcg_temp_new_vec(tcg_ctx, type); + t2 = tcg_temp_new_vec(tcg_ctx, type); + + /* Unpack to W, shift, and repack. Tricky bits: + (1) Use punpck*bw x,x to produce DDCCBBAA, + i.e. duplicate in other half of the 16-bit lane. + (2) For right-shift, add 8 so that the high half of + the lane becomes zero. For left-shift, we must + shift up and down again. + (3) Step 2 leaves high half zero such that PACKUSWB + (pack with unsigned saturation) does not modify + the quantity. */ + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); + + if (shr) { + tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, imm + 8); + tcg_gen_shri_vec(tcg_ctx, MO_16, t2, t2, imm + 8); + } else { + tcg_gen_shli_vec(tcg_ctx, MO_16, t1, t1, imm + 8); + tcg_gen_shli_vec(tcg_ctx, MO_16, t2, t2, imm + 8); + tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, 8); + tcg_gen_shri_vec(tcg_ctx, MO_16, t2, t2, 8); + } + + vec_gen_3(tcg_ctx, INDEX_op_x86_packus_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t2); +} + +static void expand_vec_sari(TCGContext *tcg_ctx, TCGType type, unsigned vece, + TCGv_vec v0, TCGv_vec v1, TCGArg imm) +{ + TCGv_vec t1, t2; + + switch (vece) { + case MO_8: + /* Unpack to W, shift, and repack, as in expand_vec_shi. */ + t1 = tcg_temp_new_vec(tcg_ctx, type); + t2 = tcg_temp_new_vec(tcg_ctx, type); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v1)); + tcg_gen_sari_vec(tcg_ctx, MO_16, t1, t1, imm + 8); + tcg_gen_sari_vec(tcg_ctx, MO_16, t2, t2, imm + 8); + vec_gen_3(tcg_ctx, INDEX_op_x86_packss_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t2); + break; + + case MO_64: + if (imm <= 32) { + /* + * We can emulate a small sign extend by performing an arithmetic + * 32-bit shift and overwriting the high half of a 64-bit logical + * shift. Note that the ISA says shift of 32 is valid, but TCG + * does not, so we have to bound the smaller shift -- we get the + * same result in the high half either way. + */ + t1 = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_sari_vec(tcg_ctx, MO_32, t1, v1, MIN(imm, 31)); + tcg_gen_shri_vec(tcg_ctx, MO_64, v0, v1, imm); + vec_gen_4(tcg_ctx, INDEX_op_x86_blend_vec, type, MO_32, + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v0), + tcgv_vec_arg(tcg_ctx, t1), 0xaa); + tcg_temp_free_vec(tcg_ctx, t1); + } else { + /* Otherwise we will need to use a compare vs 0 to produce + * the sign-extend, shift and merge. + */ + t1 = tcg_const_zeros_vec(tcg_ctx, type); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_GT, MO_64, t1, t1, v1); + tcg_gen_shri_vec(tcg_ctx, MO_64, v0, v1, imm); + tcg_gen_shli_vec(tcg_ctx, MO_64, t1, t1, 64 - imm); + tcg_gen_or_vec(tcg_ctx, MO_64, v0, v0, t1); + tcg_temp_free_vec(tcg_ctx, t1); + } + break; + + default: + g_assert_not_reached(); + } +} + +static void expand_vec_mul(TCGContext *tcg_ctx, TCGType type, unsigned vece, + TCGv_vec v0, TCGv_vec v1, TCGv_vec v2) +{ + TCGv_vec t1, t2, t3, t4; + + tcg_debug_assert(vece == MO_8); + + /* + * Unpack v1 bytes to words, 0 | x. + * Unpack v2 bytes to words, y | 0. + * This leaves the 8-bit result, x * y, with 8 bits of right padding. + * Shift logical right by 8 bits to clear the high 8 bytes before + * using an unsigned saturated pack. + * + * The difference between the V64, V128 and V256 cases is merely how + * we distribute the expansion between temporaries. + */ + switch (type) { + case TCG_TYPE_V64: + t1 = tcg_temp_new_vec(tcg_ctx, TCG_TYPE_V128); + t2 = tcg_temp_new_vec(tcg_ctx, TCG_TYPE_V128); + tcg_gen_dup16i_vec(tcg_ctx, t2, 0); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, + tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t2)); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8, + tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, v2)); + tcg_gen_mul_vec(tcg_ctx, MO_16, t1, t1, t2); + tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, 8); + vec_gen_3(tcg_ctx, INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8, + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t1)); + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t2); + break; + + case TCG_TYPE_V128: + case TCG_TYPE_V256: + t1 = tcg_temp_new_vec(tcg_ctx, type); + t2 = tcg_temp_new_vec(tcg_ctx, type); + t3 = tcg_temp_new_vec(tcg_ctx, type); + t4 = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_dup16i_vec(tcg_ctx, t4, 0); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t4)); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckl_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t2), tcgv_vec_arg(tcg_ctx, t4), tcgv_vec_arg(tcg_ctx, v2)); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t3), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t4)); + vec_gen_3(tcg_ctx, INDEX_op_x86_punpckh_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, t4), tcgv_vec_arg(tcg_ctx, t4), tcgv_vec_arg(tcg_ctx, v2)); + tcg_gen_mul_vec(tcg_ctx, MO_16, t1, t1, t2); + tcg_gen_mul_vec(tcg_ctx, MO_16, t3, t3, t4); + tcg_gen_shri_vec(tcg_ctx, MO_16, t1, t1, 8); + tcg_gen_shri_vec(tcg_ctx, MO_16, t3, t3, 8); + vec_gen_3(tcg_ctx, INDEX_op_x86_packus_vec, type, MO_8, + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t3)); + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t2); + tcg_temp_free_vec(tcg_ctx, t3); + tcg_temp_free_vec(tcg_ctx, t4); + break; + + default: + g_assert_not_reached(); + } +} + +static bool expand_vec_cmp_noinv(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + enum { + NEED_INV = 1, + NEED_SWAP = 2, + NEED_BIAS = 4, + NEED_UMIN = 8, + NEED_UMAX = 16, + }; + TCGv_vec t1, t2; + uint8_t fixup = 0; + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_GT: + fixup = 0; + break; + case TCG_COND_NE: + case TCG_COND_LE: + fixup = NEED_INV; + break; + case TCG_COND_LT: + fixup = NEED_SWAP; + break; + case TCG_COND_GE: + fixup = NEED_SWAP | NEED_INV; + break; + case TCG_COND_LEU: + if (vece <= MO_32) { + fixup = NEED_UMIN; + } else { + fixup = NEED_BIAS | NEED_INV; + } + break; + case TCG_COND_GTU: + if (vece <= MO_32) { + fixup = NEED_UMIN | NEED_INV; + } else { + fixup = NEED_BIAS; + } + break; + case TCG_COND_GEU: + if (vece <= MO_32) { + fixup = NEED_UMAX; + } else { + fixup = NEED_BIAS | NEED_SWAP | NEED_INV; + } + break; + case TCG_COND_LTU: + if (vece <= MO_32) { + fixup = NEED_UMAX | NEED_INV; + } else { + fixup = NEED_BIAS | NEED_SWAP; + } + break; + default: + g_assert_not_reached(); + } + + if (fixup & NEED_INV) { + cond = tcg_invert_cond(cond); + } + if (fixup & NEED_SWAP) { + t1 = v1, v1 = v2, v2 = t1; + cond = tcg_swap_cond(cond); + } + + t1 = t2 = NULL; + if (fixup & (NEED_UMIN | NEED_UMAX)) { + t1 = tcg_temp_new_vec(tcg_ctx, type); + if (fixup & NEED_UMIN) { + tcg_gen_umin_vec(tcg_ctx, vece, t1, v1, v2); + } else { + tcg_gen_umax_vec(tcg_ctx, vece, t1, v1, v2); + } + v2 = t1; + cond = TCG_COND_EQ; + } else if (fixup & NEED_BIAS) { + t1 = tcg_temp_new_vec(tcg_ctx, type); + t2 = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_dupi_vec(tcg_ctx, vece, t2, 1ull << ((8 << vece) - 1)); + tcg_gen_sub_vec(tcg_ctx, vece, t1, v1, t2); + tcg_gen_sub_vec(tcg_ctx, vece, t2, v2, t2); + v1 = t1; + v2 = t2; + cond = tcg_signed_cond(cond); + } + + tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT); + /* Expand directly; do not recurse. */ + vec_gen_4(tcg_ctx, INDEX_op_cmp_vec, type, vece, + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2), cond); + + if (t1) { + tcg_temp_free_vec(tcg_ctx, t1); + if (t2) { + tcg_temp_free_vec(tcg_ctx, t2); + } + } + return fixup & NEED_INV; +} + +static void expand_vec_cmp(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + if (expand_vec_cmp_noinv(tcg_ctx, type, vece, v0, v1, v2, cond)) { + tcg_gen_not_vec(tcg_ctx, vece, v0, v0); + } +} + +static void expand_vec_cmpsel(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec c1, TCGv_vec c2, + TCGv_vec v3, TCGv_vec v4, TCGCond cond) +{ + TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); + + if (expand_vec_cmp_noinv(tcg_ctx, type, vece, t, c1, c2, cond)) { + /* Invert the sense of the compare by swapping arguments. */ + TCGv_vec x; + x = v3, v3 = v4, v4 = x; + } + vec_gen_4(tcg_ctx, INDEX_op_x86_vpblendvb_vec, type, vece, + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, v4), + tcgv_vec_arg(tcg_ctx, v3), tcgv_vec_arg(tcg_ctx, t)); + tcg_temp_free_vec(tcg_ctx, t); +} + +void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + va_list va; + TCGArg a2; + TCGv_vec v0, v1, v2, v3, v4; + + va_start(va, a0); + v0 = temp_tcgv_vec(tcg_ctx, arg_temp(a0)); + v1 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); + a2 = va_arg(va, TCGArg); + + switch (opc) { + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + expand_vec_shi(tcg_ctx, type, vece, opc == INDEX_op_shri_vec, v0, v1, a2); + break; + + case INDEX_op_sari_vec: + expand_vec_sari(tcg_ctx, type, vece, v0, v1, a2); + break; + + case INDEX_op_mul_vec: + v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); + expand_vec_mul(tcg_ctx, type, vece, v0, v1, v2); + break; + + case INDEX_op_cmp_vec: + v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); + expand_vec_cmp(tcg_ctx, type, vece, v0, v1, v2, va_arg(va, TCGArg)); + break; + + case INDEX_op_cmpsel_vec: + v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); + v3 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); + v4 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); + expand_vec_cmpsel(tcg_ctx, type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg)); + break; + + default: + break; + } + + va_end(va); +} + +static const int tcg_target_callee_save_regs[] = { +#if TCG_TARGET_REG_BITS == 64 + TCG_REG_RBP, + TCG_REG_RBX, +#if defined(_WIN64) + TCG_REG_RDI, + TCG_REG_RSI, +#endif + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, /* Currently used for the global env. */ + TCG_REG_R15, +#else + TCG_REG_EBP, /* Currently used for the global env. */ + TCG_REG_EBX, + TCG_REG_ESI, + TCG_REG_EDI, +#endif +}; + +/* Compute frame size via macros, to share between tcg_target_qemu_prologue + and tcg_register_jit. */ + +#define PUSH_SIZE \ + ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \ + * (TCG_TARGET_REG_BITS / 8)) + +#define FRAME_SIZE \ + ((PUSH_SIZE \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & ~(TCG_TARGET_STACK_ALIGN - 1)) + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i, stack_addend; + + /* TB prologue */ + + /* Reserve some stack space, also for TCG temps. */ + stack_addend = FRAME_SIZE - PUSH_SIZE; + tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + + /* Save all callee saved registers. */ + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_push(s, tcg_target_callee_save_regs[i]); + } + +#if TCG_TARGET_REG_BITS == 32 + tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, + (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4); + tcg_out_addi(s, TCG_REG_ESP, -stack_addend); + /* jmp *tb. */ + tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP, + (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 + + stack_addend); +#else + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_addi(s, TCG_REG_ESP, -stack_addend); + /* jmp *tb. */ + tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]); +#endif + + /* + * Return path for goto_ptr. Set return value to 0, a-la exit_tb, + * and fall through to the rest of the epilogue. + */ + s->code_gen_epilogue = s->code_ptr; + tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0); + + /* TB epilogue */ + s->tb_ret_addr = s->code_ptr; + + tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend); + + if (have_avx2) { + tcg_out_vex_opc(s, OPC_VZEROUPPER, 0, 0, 0, 0); + } + for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { + tcg_out_pop(s, tcg_target_callee_save_regs[i]); + } + tcg_out_opc(s, OPC_RET, 0, 0, 0); +} + +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + memset(p, 0x90, count); +} + +static void tcg_target_init(TCGContext *s) +{ +#ifdef CONFIG_CPUID_H + unsigned a, b, c, d, b7 = 0; + int max; + +#ifdef _MSC_VER + int cpu_info[4]; + __cpuid(cpu_info, 0); + max = cpu_info[0]; +#else + max = __get_cpuid_max(0, 0); +#endif + + if (max >= 7) { + /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */ +#ifdef _MSC_VER + __cpuid(cpu_info, 7); + a = cpu_info[0]; + b7 = cpu_info[1]; + c = cpu_info[2]; + d = cpu_info[3]; +#else + __cpuid_count(7, 0, a, b7, c, d); +#endif + have_bmi1 = (b7 & bit_BMI) != 0; + have_bmi2 = (b7 & bit_BMI2) != 0; + } + + if (max >= 1) { +#ifdef _MSC_VER + __cpuid(cpu_info, 1); + a = cpu_info[0]; + b = cpu_info[1]; + c = cpu_info[2]; + d = cpu_info[3]; +#else + __cpuid(1, a, b, c, d); +#endif +#ifndef have_cmov + /* For 32-bit, 99% certainty that we're running on hardware that + supports cmov, but we still need to check. In case cmov is not + available, we'll use a small forward branch. */ + have_cmov = (d & bit_CMOV) != 0; +#endif + + /* MOVBE is only available on Intel Atom and Haswell CPUs, so we + need to probe for it. */ + have_movbe = (c & bit_MOVBE) != 0; + have_popcnt = (c & bit_POPCNT) != 0; + +#ifdef _MSC_VER + // FIXME: detect AVX1 & AVX2: https://gist.github.com/hi2p-perim/7855506 + have_avx1 = true; + have_avx2 = true; +#else + /* There are a number of things we must check before we can be + sure of not hitting invalid opcode. */ + if (c & bit_OSXSAVE) { + unsigned xcrl, xcrh; + /* The xgetbv instruction is not available to older versions of + * the assembler, so we encode the instruction manually. + */ + asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0)); + if ((xcrl & 6) == 6) { + have_avx1 = (c & bit_AVX) != 0; + have_avx2 = (b7 & bit_AVX2) != 0; + } + } +#endif + } + +#ifdef _MSC_VER + __cpuid(cpu_info, 0x80000000); + max = cpu_info[0]; +#else + max = __get_cpuid_max(0x8000000, 0); +#endif + if (max >= 1) { +#ifdef _MSC_VER + __cpuid(cpu_info, 0x80000001); + a = cpu_info[0]; + b = cpu_info[1]; + c = cpu_info[2]; + d = cpu_info[3]; +#else + __cpuid(0x80000001, a, b, c, d); +#endif + /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */ + have_lzcnt = (c & bit_LZCNT) != 0; + } +#endif /* CONFIG_CPUID_H */ + + s->tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; + if (TCG_TARGET_REG_BITS == 64) { + s->tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; + } + if (have_avx1) { + s->tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; + s->tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; + } + if (have_avx2) { + s->tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS; + } + + s->tcg_target_call_clobber_regs = ALL_VECTOR_REGS; + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EAX); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EDX); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_ECX); + if (TCG_TARGET_REG_BITS == 64) { +#if !defined(_WIN64) + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RDI); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RSI); +#endif + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); + } + + s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[14]; +} DebugFrame; + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + +#if !defined(__ELF__) + /* Host machine without ELF. */ +#elif TCG_TARGET_REG_BITS == 64 +#define ELF_HOST_MACHINE EM_X86_64 +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = 0x78, /* sleb128 -8 */ + .h.cie.return_column = 16, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, 7, /* DW_CFA_def_cfa %rsp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x90, 1, /* DW_CFA_offset, %rip, -8 */ + /* The following ordering must match tcg_target_callee_save_regs. */ + 0x86, 2, /* DW_CFA_offset, %rbp, -16 */ + 0x83, 3, /* DW_CFA_offset, %rbx, -24 */ + 0x8c, 4, /* DW_CFA_offset, %r12, -32 */ + 0x8d, 5, /* DW_CFA_offset, %r13, -40 */ + 0x8e, 6, /* DW_CFA_offset, %r14, -48 */ + 0x8f, 7, /* DW_CFA_offset, %r15, -56 */ + } +}; +#else +#define ELF_HOST_MACHINE EM_386 +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = 0x7c, /* sleb128 -4 */ + .h.cie.return_column = 8, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, 4, /* DW_CFA_def_cfa %esp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x88, 1, /* DW_CFA_offset, %eip, -4 */ + /* The following ordering must match tcg_target_callee_save_regs. */ + 0x85, 2, /* DW_CFA_offset, %ebp, -8 */ + 0x83, 3, /* DW_CFA_offset, %ebx, -12 */ + 0x86, 4, /* DW_CFA_offset, %esi, -16 */ + 0x87, 5, /* DW_CFA_offset, %edi, -20 */ + } +}; +#endif + +#if defined(ELF_HOST_MACHINE) +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} +#endif diff --git a/qemu/tcg/i386/tcg-target.opc.h b/qemu/tcg/i386/tcg-target.opc.h new file mode 100644 index 00000000..13129418 --- /dev/null +++ b/qemu/tcg/i386/tcg-target.opc.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 Linaro + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(x86_shufps_vec, 1, 2, 1, IMPLVEC) +DEF(x86_vpblendvb_vec, 1, 3, 0, IMPLVEC) +DEF(x86_blend_vec, 1, 2, 1, IMPLVEC) +DEF(x86_packss_vec, 1, 2, 0, IMPLVEC) +DEF(x86_packus_vec, 1, 2, 0, IMPLVEC) +DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC) +DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC) +DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC) +DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC) diff --git a/qemu/tcg/ia64/tcg-target.c b/qemu/tcg/ia64/tcg-target.c deleted file mode 100644 index a7a681c7..00000000 --- a/qemu/tcg/ia64/tcg-target.c +++ /dev/null @@ -1,2446 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2009-2010 Aurelien Jarno - * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* - * Register definitions - */ - -#ifndef NDEBUG -static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", - "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", - "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", - "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", - "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", - "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", - "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", -}; -#endif - -#ifdef CONFIG_USE_GUEST_BASE -#define TCG_GUEST_BASE_REG TCG_REG_R55 -#else -#define TCG_GUEST_BASE_REG TCG_REG_R0 -#endif -#ifndef GUEST_BASE -#define GUEST_BASE 0 -#endif - -/* Branch registers */ -enum { - TCG_REG_B0 = 0, - TCG_REG_B1, - TCG_REG_B2, - TCG_REG_B3, - TCG_REG_B4, - TCG_REG_B5, - TCG_REG_B6, - TCG_REG_B7, -}; - -/* Floating point registers */ -enum { - TCG_REG_F0 = 0, - TCG_REG_F1, - TCG_REG_F2, - TCG_REG_F3, - TCG_REG_F4, - TCG_REG_F5, - TCG_REG_F6, - TCG_REG_F7, - TCG_REG_F8, - TCG_REG_F9, - TCG_REG_F10, - TCG_REG_F11, - TCG_REG_F12, - TCG_REG_F13, - TCG_REG_F14, - TCG_REG_F15, -}; - -/* Predicate registers */ -enum { - TCG_REG_P0 = 0, - TCG_REG_P1, - TCG_REG_P2, - TCG_REG_P3, - TCG_REG_P4, - TCG_REG_P5, - TCG_REG_P6, - TCG_REG_P7, - TCG_REG_P8, - TCG_REG_P9, - TCG_REG_P10, - TCG_REG_P11, - TCG_REG_P12, - TCG_REG_P13, - TCG_REG_P14, - TCG_REG_P15, -}; - -/* Application registers */ -enum { - TCG_REG_PFS = 64, -}; - -static const int tcg_target_reg_alloc_order[] = { - TCG_REG_R35, - TCG_REG_R36, - TCG_REG_R37, - TCG_REG_R38, - TCG_REG_R39, - TCG_REG_R40, - TCG_REG_R41, - TCG_REG_R42, - TCG_REG_R43, - TCG_REG_R44, - TCG_REG_R45, - TCG_REG_R46, - TCG_REG_R47, - TCG_REG_R48, - TCG_REG_R49, - TCG_REG_R50, - TCG_REG_R51, - TCG_REG_R52, - TCG_REG_R53, - TCG_REG_R54, - TCG_REG_R55, - TCG_REG_R14, - TCG_REG_R15, - TCG_REG_R16, - TCG_REG_R17, - TCG_REG_R18, - TCG_REG_R19, - TCG_REG_R20, - TCG_REG_R21, - TCG_REG_R22, - TCG_REG_R23, - TCG_REG_R24, - TCG_REG_R25, - TCG_REG_R26, - TCG_REG_R27, - TCG_REG_R28, - TCG_REG_R29, - TCG_REG_R30, - TCG_REG_R31, - TCG_REG_R56, - TCG_REG_R57, - TCG_REG_R58, - TCG_REG_R59, - TCG_REG_R60, - TCG_REG_R61, - TCG_REG_R62, - TCG_REG_R63, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, - TCG_REG_R11 -}; - -static const int tcg_target_call_iarg_regs[8] = { - TCG_REG_R56, - TCG_REG_R57, - TCG_REG_R58, - TCG_REG_R59, - TCG_REG_R60, - TCG_REG_R61, - TCG_REG_R62, - TCG_REG_R63, -}; - -static const int tcg_target_call_oarg_regs[] = { - TCG_REG_R8 -}; - -/* - * opcode formation - */ - -/* bundle templates: stops (double bar in the IA64 manual) are marked with - an uppercase letter. */ -enum { - mii = 0x00, - miI = 0x01, - mIi = 0x02, - mII = 0x03, - mlx = 0x04, - mLX = 0x05, - mmi = 0x08, - mmI = 0x09, - Mmi = 0x0a, - MmI = 0x0b, - mfi = 0x0c, - mfI = 0x0d, - mmf = 0x0e, - mmF = 0x0f, - mib = 0x10, - miB = 0x11, - mbb = 0x12, - mbB = 0x13, - bbb = 0x16, - bbB = 0x17, - mmb = 0x18, - mmB = 0x19, - mfb = 0x1c, - mfB = 0x1d, -}; - -enum { - OPC_ADD_A1 = 0x10000000000ull, - OPC_AND_A1 = 0x10060000000ull, - OPC_AND_A3 = 0x10160000000ull, - OPC_ANDCM_A1 = 0x10068000000ull, - OPC_ANDCM_A3 = 0x10168000000ull, - OPC_ADDS_A4 = 0x10800000000ull, - OPC_ADDL_A5 = 0x12000000000ull, - OPC_ALLOC_M34 = 0x02c00000000ull, - OPC_BR_DPTK_FEW_B1 = 0x08400000000ull, - OPC_BR_SPTK_MANY_B1 = 0x08000001000ull, - OPC_BR_CALL_SPNT_FEW_B3 = 0x0a200000000ull, - OPC_BR_SPTK_MANY_B4 = 0x00100001000ull, - OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull, - OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull, - OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull, - OPC_BRL_CALL_SPNT_MANY_X4 = 0x1a200001000ull, - OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull, - OPC_CMP_LT_A6 = 0x18000000000ull, - OPC_CMP_LTU_A6 = 0x1a000000000ull, - OPC_CMP_EQ_A6 = 0x1c000000000ull, - OPC_CMP4_LT_A6 = 0x18400000000ull, - OPC_CMP4_LTU_A6 = 0x1a400000000ull, - OPC_CMP4_EQ_A6 = 0x1c400000000ull, - OPC_DEP_I14 = 0x0ae00000000ull, - OPC_DEP_I15 = 0x08000000000ull, - OPC_DEP_Z_I12 = 0x0a600000000ull, - OPC_EXTR_I11 = 0x0a400002000ull, - OPC_EXTR_U_I11 = 0x0a400000000ull, - OPC_FCVT_FX_TRUNC_S1_F10 = 0x004d0000000ull, - OPC_FCVT_FXU_TRUNC_S1_F10 = 0x004d8000000ull, - OPC_FCVT_XF_F11 = 0x000e0000000ull, - OPC_FMA_S1_F1 = 0x10400000000ull, - OPC_FNMA_S1_F1 = 0x18400000000ull, - OPC_FRCPA_S1_F6 = 0x00600000000ull, - OPC_GETF_SIG_M19 = 0x08708000000ull, - OPC_LD1_M1 = 0x08000000000ull, - OPC_LD1_M3 = 0x0a000000000ull, - OPC_LD2_M1 = 0x08040000000ull, - OPC_LD2_M3 = 0x0a040000000ull, - OPC_LD4_M1 = 0x08080000000ull, - OPC_LD4_M3 = 0x0a080000000ull, - OPC_LD8_M1 = 0x080c0000000ull, - OPC_LD8_M3 = 0x0a0c0000000ull, - OPC_MUX1_I3 = 0x0eca0000000ull, - OPC_NOP_B9 = 0x04008000000ull, - OPC_NOP_F16 = 0x00008000000ull, - OPC_NOP_I18 = 0x00008000000ull, - OPC_NOP_M48 = 0x00008000000ull, - OPC_MOV_I21 = 0x00e00100000ull, - OPC_MOV_RET_I21 = 0x00e00500000ull, - OPC_MOV_I22 = 0x00188000000ull, - OPC_MOV_I_I26 = 0x00150000000ull, - OPC_MOVL_X2 = 0x0c000000000ull, - OPC_OR_A1 = 0x10070000000ull, - OPC_OR_A3 = 0x10170000000ull, - OPC_SETF_EXP_M18 = 0x0c748000000ull, - OPC_SETF_SIG_M18 = 0x0c708000000ull, - OPC_SHL_I7 = 0x0f240000000ull, - OPC_SHR_I5 = 0x0f220000000ull, - OPC_SHR_U_I5 = 0x0f200000000ull, - OPC_SHRP_I10 = 0x0ac00000000ull, - OPC_SXT1_I29 = 0x000a0000000ull, - OPC_SXT2_I29 = 0x000a8000000ull, - OPC_SXT4_I29 = 0x000b0000000ull, - OPC_ST1_M4 = 0x08c00000000ull, - OPC_ST2_M4 = 0x08c40000000ull, - OPC_ST4_M4 = 0x08c80000000ull, - OPC_ST8_M4 = 0x08cc0000000ull, - OPC_SUB_A1 = 0x10028000000ull, - OPC_SUB_A3 = 0x10128000000ull, - OPC_UNPACK4_L_I2 = 0x0f860000000ull, - OPC_XMA_L_F2 = 0x1d000000000ull, - OPC_XOR_A1 = 0x10078000000ull, - OPC_XOR_A3 = 0x10178000000ull, - OPC_ZXT1_I29 = 0x00080000000ull, - OPC_ZXT2_I29 = 0x00088000000ull, - OPC_ZXT4_I29 = 0x00090000000ull, - - INSN_NOP_M = OPC_NOP_M48, /* nop.m 0 */ - INSN_NOP_I = OPC_NOP_I18, /* nop.i 0 */ -}; - -static inline uint64_t tcg_opc_a1(int qp, uint64_t opc, int r1, - int r2, int r3) -{ - return opc - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_a3(int qp, uint64_t opc, int r1, - uint64_t imm, int r3) -{ - return opc - | ((imm & 0x80) << 29) /* s */ - | ((imm & 0x7f) << 13) /* imm7b */ - | ((r3 & 0x7f) << 20) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_a4(int qp, uint64_t opc, int r1, - uint64_t imm, int r3) -{ - return opc - | ((imm & 0x2000) << 23) /* s */ - | ((imm & 0x1f80) << 20) /* imm6d */ - | ((imm & 0x007f) << 13) /* imm7b */ - | ((r3 & 0x7f) << 20) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_a5(int qp, uint64_t opc, int r1, - uint64_t imm, int r3) -{ - return opc - | ((imm & 0x200000) << 15) /* s */ - | ((imm & 0x1f0000) << 6) /* imm5c */ - | ((imm & 0x00ff80) << 20) /* imm9d */ - | ((imm & 0x00007f) << 13) /* imm7b */ - | ((r3 & 0x03) << 20) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_a6(int qp, uint64_t opc, int p1, - int p2, int r2, int r3) -{ - return opc - | ((p2 & 0x3f) << 27) - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | ((p1 & 0x3f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm) -{ - return opc - | ((imm & 0x100000) << 16) /* s */ - | ((imm & 0x0fffff) << 13) /* imm20b */ - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_b3(int qp, uint64_t opc, int b1, uint64_t imm) -{ - return opc - | ((imm & 0x100000) << 16) /* s */ - | ((imm & 0x0fffff) << 13) /* imm20b */ - | ((b1 & 0x7) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2) -{ - return opc - | ((b2 & 0x7) << 13) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_b5(int qp, uint64_t opc, int b1, int b2) -{ - return opc - | ((b2 & 0x7) << 13) - | ((b1 & 0x7) << 6) - | (qp & 0x3f); -} - - -static inline uint64_t tcg_opc_b9(int qp, uint64_t opc, uint64_t imm) -{ - return opc - | ((imm & 0x100000) << 16) /* i */ - | ((imm & 0x0fffff) << 6) /* imm20a */ - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_f1(int qp, uint64_t opc, int f1, - int f3, int f4, int f2) -{ - return opc - | ((f4 & 0x7f) << 27) - | ((f3 & 0x7f) << 20) - | ((f2 & 0x7f) << 13) - | ((f1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_f2(int qp, uint64_t opc, int f1, - int f3, int f4, int f2) -{ - return opc - | ((f4 & 0x7f) << 27) - | ((f3 & 0x7f) << 20) - | ((f2 & 0x7f) << 13) - | ((f1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_f6(int qp, uint64_t opc, int f1, - int p2, int f2, int f3) -{ - return opc - | ((p2 & 0x3f) << 27) - | ((f3 & 0x7f) << 20) - | ((f2 & 0x7f) << 13) - | ((f1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_f10(int qp, uint64_t opc, int f1, int f2) -{ - return opc - | ((f2 & 0x7f) << 13) - | ((f1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_f11(int qp, uint64_t opc, int f1, int f2) -{ - return opc - | ((f2 & 0x7f) << 13) - | ((f1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_f16(int qp, uint64_t opc, uint64_t imm) -{ - return opc - | ((imm & 0x100000) << 16) /* i */ - | ((imm & 0x0fffff) << 6) /* imm20a */ - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i2(int qp, uint64_t opc, int r1, - int r2, int r3) -{ - return opc - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i3(int qp, uint64_t opc, int r1, - int r2, int mbtype) -{ - return opc - | ((mbtype & 0x0f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i5(int qp, uint64_t opc, int r1, - int r3, int r2) -{ - return opc - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i7(int qp, uint64_t opc, int r1, - int r2, int r3) -{ - return opc - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i10(int qp, uint64_t opc, int r1, - int r2, int r3, uint64_t count) -{ - return opc - | ((count & 0x3f) << 27) - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i11(int qp, uint64_t opc, int r1, - int r3, uint64_t pos, uint64_t len) -{ - return opc - | ((len & 0x3f) << 27) - | ((r3 & 0x7f) << 20) - | ((pos & 0x3f) << 14) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i12(int qp, uint64_t opc, int r1, - int r2, uint64_t pos, uint64_t len) -{ - return opc - | ((len & 0x3f) << 27) - | ((pos & 0x3f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i14(int qp, uint64_t opc, int r1, uint64_t imm, - int r3, uint64_t pos, uint64_t len) -{ - return opc - | ((imm & 0x01) << 36) - | ((len & 0x3f) << 27) - | ((r3 & 0x7f) << 20) - | ((pos & 0x3f) << 14) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i15(int qp, uint64_t opc, int r1, int r2, - int r3, uint64_t pos, uint64_t len) -{ - return opc - | ((pos & 0x3f) << 31) - | ((len & 0x0f) << 27) - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i18(int qp, uint64_t opc, uint64_t imm) -{ - return opc - | ((imm & 0x100000) << 16) /* i */ - | ((imm & 0x0fffff) << 6) /* imm20a */ - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i21(int qp, uint64_t opc, int b1, - int r2, uint64_t imm) -{ - return opc - | ((imm & 0x1ff) << 24) - | ((r2 & 0x7f) << 13) - | ((b1 & 0x7) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i22(int qp, uint64_t opc, int r1, int b2) -{ - return opc - | ((b2 & 0x7) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i26(int qp, uint64_t opc, int ar3, int r2) -{ - return opc - | ((ar3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_i29(int qp, uint64_t opc, int r1, int r3) -{ - return opc - | ((r3 & 0x7f) << 20) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_l2(uint64_t imm) -{ - return (imm & 0x7fffffffffc00000ull) >> 22; -} - -static inline uint64_t tcg_opc_l3(uint64_t imm) -{ - return (imm & 0x07fffffffff00000ull) >> 18; -} - -#define tcg_opc_l4 tcg_opc_l3 - -static inline uint64_t tcg_opc_m1(int qp, uint64_t opc, int r1, int r3) -{ - return opc - | ((r3 & 0x7f) << 20) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_m3(int qp, uint64_t opc, int r1, - int r3, uint64_t imm) -{ - return opc - | ((imm & 0x100) << 28) /* s */ - | ((imm & 0x080) << 20) /* i */ - | ((imm & 0x07f) << 13) /* imm7b */ - | ((r3 & 0x7f) << 20) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_m4(int qp, uint64_t opc, int r2, int r3) -{ - return opc - | ((r3 & 0x7f) << 20) - | ((r2 & 0x7f) << 13) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_m18(int qp, uint64_t opc, int f1, int r2) -{ - return opc - | ((r2 & 0x7f) << 13) - | ((f1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_m19(int qp, uint64_t opc, int r1, int f2) -{ - return opc - | ((f2 & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_m34(int qp, uint64_t opc, int r1, - int sof, int sol, int sor) -{ - return opc - | ((sor & 0x0f) << 27) - | ((sol & 0x7f) << 20) - | ((sof & 0x7f) << 13) - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_m48(int qp, uint64_t opc, uint64_t imm) -{ - return opc - | ((imm & 0x100000) << 16) /* i */ - | ((imm & 0x0fffff) << 6) /* imm20a */ - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_x2(int qp, uint64_t opc, - int r1, uint64_t imm) -{ - return opc - | ((imm & 0x8000000000000000ull) >> 27) /* i */ - | (imm & 0x0000000000200000ull) /* ic */ - | ((imm & 0x00000000001f0000ull) << 6) /* imm5c */ - | ((imm & 0x000000000000ff80ull) << 20) /* imm9d */ - | ((imm & 0x000000000000007full) << 13) /* imm7b */ - | ((r1 & 0x7f) << 6) - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm) -{ - return opc - | ((imm & 0x0800000000000000ull) >> 23) /* i */ - | ((imm & 0x00000000000fffffull) << 13) /* imm20b */ - | (qp & 0x3f); -} - -static inline uint64_t tcg_opc_x4(int qp, uint64_t opc, int b1, uint64_t imm) -{ - return opc - | ((imm & 0x0800000000000000ull) >> 23) /* i */ - | ((imm & 0x00000000000fffffull) << 13) /* imm20b */ - | ((b1 & 0x7) << 6) - | (qp & 0x3f); -} - - -/* - * Relocations - Note that we never encode branches elsewhere than slot 2. - */ - -static void reloc_pcrel21b_slot2(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - uint64_t imm = target - pc; - - pc->hi = (pc->hi & 0xf700000fffffffffull) - | ((imm & 0x100000) << 39) /* s */ - | ((imm & 0x0fffff) << 36); /* imm20b */ -} - -static uint64_t get_reloc_pcrel21b_slot2(tcg_insn_unit *pc) -{ - int64_t high = pc->hi; - - return ((high >> 39) & 0x100000) + /* s */ - ((high >> 36) & 0x0fffff); /* imm20b */ -} - -static void patch_reloc(tcg_insn_unit *code_ptr, int type, - intptr_t value, intptr_t addend) -{ - assert(addend == 0); - assert(type == R_IA64_PCREL21B); - reloc_pcrel21b_slot2(code_ptr, (tcg_insn_unit *)value); -} - -/* - * Constraints - */ - -/* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) -{ - const char *ct_str; - - ct_str = *pct_str; - switch(ct_str[0]) { - case 'r': - ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffffffffffffull); - break; - case 'I': - ct->ct |= TCG_CT_CONST_S22; - break; - case 'S': - ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffffffffffffull); -#if defined(CONFIG_SOFTMMU) - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R58); -#endif - break; - case 'Z': - /* We are cheating a bit here, using the fact that the register - r0 is also the register number 0. Hence there is no need - to check for const_args in each instruction. */ - ct->ct |= TCG_CT_CONST_ZERO; - break; - default: - return -1; - } - ct_str++; - *pct_str = ct_str; - return 0; -} - -/* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, TCGType type, - const TCGArgConstraint *arg_ct) -{ - int ct; - ct = arg_ct->ct; - if (ct & TCG_CT_CONST) - return 1; - else if ((ct & TCG_CT_CONST_ZERO) && val == 0) - return 1; - else if ((ct & TCG_CT_CONST_S22) && val == ((int32_t)val << 10) >> 10) - return 1; - else - return 0; -} - -/* - * Code generation - */ - -static tcg_insn_unit *tb_ret_addr; - -static inline void tcg_out_bundle(TCGContext *s, int template, - uint64_t slot0, uint64_t slot1, - uint64_t slot2) -{ - template &= 0x1f; /* 5 bits */ - slot0 &= 0x1ffffffffffull; /* 41 bits */ - slot1 &= 0x1ffffffffffull; /* 41 bits */ - slot2 &= 0x1ffffffffffull; /* 41 bits */ - - *s->code_ptr++ = (tcg_insn_unit){ - (slot1 << 46) | (slot0 << 5) | template, - (slot2 << 23) | (slot1 >> 18) - }; -} - -static inline uint64_t tcg_opc_mov_a(int qp, TCGReg dst, TCGReg src) -{ - return tcg_opc_a4(qp, OPC_ADDS_A4, dst, 0, src); -} - -static inline void tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) -{ - tcg_out_bundle(s, mmI, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_mov_a(TCG_REG_P0, ret, arg)); -} - -static inline uint64_t tcg_opc_movi_a(int qp, TCGReg dst, int64_t src) -{ - assert(src == sextract64(src, 0, 22)); - return tcg_opc_a5(qp, OPC_ADDL_A5, dst, src, TCG_REG_R0); -} - -static inline void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg reg, tcg_target_long arg) -{ - tcg_out_bundle(s, mLX, - INSN_NOP_M, - tcg_opc_l2 (arg), - tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, reg, arg)); -} - -static void tcg_out_br(TCGContext *s, int label_index) -{ - TCGLabel *l = &s->labels[label_index]; - uint64_t imm; - - /* We pay attention here to not modify the branch target by reading - the existing value and using it again. This ensure that caches and - memory are kept coherent during retranslation. */ - if (l->has_value) { - imm = l->u.value_ptr - s->code_ptr; - } else { - imm = get_reloc_pcrel21b_slot2(s->code_ptr); - tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, label_index, 0); - } - - tcg_out_bundle(s, mmB, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_b1(TCG_REG_P0, OPC_BR_SPTK_MANY_B1, imm)); -} - -static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *desc) -{ - uintptr_t func = desc->lo, gp = desc->hi, disp; - - /* Look through the function descriptor. */ - tcg_out_bundle(s, mlx, - INSN_NOP_M, - tcg_opc_l2 (gp), - tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R1, gp)); - disp = (tcg_insn_unit *)func - s->code_ptr; - tcg_out_bundle(s, mLX, - INSN_NOP_M, - tcg_opc_l4 (disp), - tcg_opc_x4 (TCG_REG_P0, OPC_BRL_CALL_SPTK_MANY_X4, - TCG_REG_B0, disp)); -} - -static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg) -{ - uint64_t imm, opc1; - - /* At least arg == 0 is a common operation. */ - if (arg == sextract64(arg, 0, 22)) { - opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R8, arg); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg); - opc1 = INSN_NOP_M; - } - - imm = tb_ret_addr - s->code_ptr; - - tcg_out_bundle(s, mLX, - opc1, - tcg_opc_l3 (imm), - tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm)); -} - -static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg) -{ - if (s->tb_jmp_offset) { - /* direct jump method */ - tcg_abort(); - } else { - /* indirect jump method */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, - (tcg_target_long)(s->tb_next + arg)); - tcg_out_bundle(s, MmI, - tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, - TCG_REG_R2, TCG_REG_R2), - INSN_NOP_M, - tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, - TCG_REG_R2, 0)); - tcg_out_bundle(s, mmB, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, - TCG_REG_B6)); - } - s->tb_next_offset[arg] = tcg_current_code_size(s); -} - -static inline void tcg_out_jmp(TCGContext *s, TCGArg addr) -{ - tcg_out_bundle(s, mmI, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, addr, 0)); - tcg_out_bundle(s, mmB, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_b4(TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); -} - -static inline void tcg_out_ld_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg, - TCGArg arg1, tcg_target_long arg2) -{ - if (arg2 == ((int16_t)arg2 >> 2) << 2) { - tcg_out_bundle(s, MmI, - tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, - TCG_REG_R2, arg2, arg1), - tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - INSN_NOP_I); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2); - tcg_out_bundle(s, MmI, - tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, - TCG_REG_R2, TCG_REG_R2, arg1), - tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - INSN_NOP_I); - } -} - -static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg, - TCGArg arg1, tcg_target_long arg2) -{ - if (arg2 == ((int16_t)arg2 >> 2) << 2) { - tcg_out_bundle(s, MmI, - tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, - TCG_REG_R2, arg2, arg1), - tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - INSN_NOP_I); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2); - tcg_out_bundle(s, MmI, - tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, - TCG_REG_R2, TCG_REG_R2, arg1), - tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), - INSN_NOP_I); - } -} - -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - if (type == TCG_TYPE_I32) { - tcg_out_ld_rel(s, OPC_LD4_M1, arg, arg1, arg2); - } else { - tcg_out_ld_rel(s, OPC_LD8_M1, arg, arg1, arg2); - } -} - -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - if (type == TCG_TYPE_I32) { - tcg_out_st_rel(s, OPC_ST4_M4, arg, arg1, arg2); - } else { - tcg_out_st_rel(s, OPC_ST8_M4, arg, arg1, arg2); - } -} - -static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, uint64_t opc_a3, - TCGReg ret, TCGArg arg1, int const_arg1, - TCGArg arg2, int const_arg2) -{ - uint64_t opc1 = 0, opc2 = 0, opc3 = 0; - - if (const_arg2 && arg2 != 0) { - opc2 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R3, arg2); - arg2 = TCG_REG_R3; - } - if (const_arg1 && arg1 != 0) { - if (opc_a3 && arg1 == (int8_t)arg1) { - opc3 = tcg_opc_a3(TCG_REG_P0, opc_a3, ret, arg1, arg2); - } else { - opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, arg1); - arg1 = TCG_REG_R2; - } - } - if (opc3 == 0) { - opc3 = tcg_opc_a1(TCG_REG_P0, opc_a1, ret, arg1, arg2); - } - - tcg_out_bundle(s, (opc1 || opc2 ? mII : miI), - opc1 ? opc1 : INSN_NOP_M, - opc2 ? opc2 : INSN_NOP_I, - opc3); -} - -static inline void tcg_out_add(TCGContext *s, TCGReg ret, TCGReg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2 && arg2 == sextract64(arg2, 0, 14)) { - tcg_out_bundle(s, mmI, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, arg2, arg1)); - } else { - tcg_out_alu(s, OPC_ADD_A1, 0, ret, arg1, 0, arg2, const_arg2); - } -} - -static inline void tcg_out_sub(TCGContext *s, TCGReg ret, TCGArg arg1, - int const_arg1, TCGArg arg2, int const_arg2) -{ - if (!const_arg1 && const_arg2 && -arg2 == sextract64(-arg2, 0, 14)) { - tcg_out_bundle(s, mmI, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, -arg2, arg1)); - } else { - tcg_out_alu(s, OPC_SUB_A1, OPC_SUB_A3, ret, - arg1, const_arg1, arg2, const_arg2); - } -} - -static inline void tcg_out_eqv(TCGContext *s, TCGArg ret, - TCGArg arg1, int const_arg1, - TCGArg arg2, int const_arg2) -{ - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_a1 (TCG_REG_P0, OPC_XOR_A1, ret, arg1, arg2), - tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); -} - -static inline void tcg_out_nand(TCGContext *s, TCGArg ret, - TCGArg arg1, int const_arg1, - TCGArg arg2, int const_arg2) -{ - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, ret, arg1, arg2), - tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); -} - -static inline void tcg_out_nor(TCGContext *s, TCGArg ret, - TCGArg arg1, int const_arg1, - TCGArg arg2, int const_arg2) -{ - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, arg2), - tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); -} - -static inline void tcg_out_orc(TCGContext *s, TCGArg ret, - TCGArg arg1, int const_arg1, - TCGArg arg2, int const_arg2) -{ - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, TCG_REG_R2, -1, arg2), - tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, TCG_REG_R2)); -} - -static inline void tcg_out_mul(TCGContext *s, TCGArg ret, - TCGArg arg1, TCGArg arg2) -{ - tcg_out_bundle(s, mmI, - tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F6, arg1), - tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F7, arg2), - INSN_NOP_I); - tcg_out_bundle(s, mmF, - INSN_NOP_M, - INSN_NOP_M, - tcg_opc_f2 (TCG_REG_P0, OPC_XMA_L_F2, TCG_REG_F6, TCG_REG_F6, - TCG_REG_F7, TCG_REG_F0)); - tcg_out_bundle(s, miI, - tcg_opc_m19(TCG_REG_P0, OPC_GETF_SIG_M19, ret, TCG_REG_F6), - INSN_NOP_I, - INSN_NOP_I); -} - -static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11, - ret, arg1, arg2, 31 - arg2)); - } else { - tcg_out_bundle(s, mII, - tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, - TCG_REG_R3, 0x1f, arg2), - tcg_opc_i29(TCG_REG_P0, OPC_SXT4_I29, TCG_REG_R2, arg1), - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, - TCG_REG_R2, TCG_REG_R3)); - } -} - -static inline void tcg_out_sar_i64(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11, - ret, arg1, arg2, 63 - arg2)); - } else { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, arg1, arg2)); - } -} - -static inline void tcg_out_shl_i32(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, - arg1, 63 - arg2, 31 - arg2)); - } else { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R2, - 0x1f, arg2), - tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret, - arg1, TCG_REG_R2)); - } -} - -static inline void tcg_out_shl_i64(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, - arg1, 63 - arg2, 63 - arg2)); - } else { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret, - arg1, arg2)); - } -} - -static inline void tcg_out_shr_i32(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, - arg1, arg2, 31 - arg2)); - } else { - tcg_out_bundle(s, mII, - tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3, - 0x1f, arg2), - tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R2, arg1), - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, - TCG_REG_R2, TCG_REG_R3)); - } -} - -static inline void tcg_out_shr_i64(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, - arg1, arg2, 63 - arg2)); - } else { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, - arg1, arg2)); - } -} - -static inline void tcg_out_rotl_i32(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, - TCG_REG_R2, arg1, arg1), - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, - TCG_REG_R2, 32 - arg2, 31)); - } else { - tcg_out_bundle(s, miI, - INSN_NOP_M, - tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, - TCG_REG_R2, arg1, arg1), - tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3, - 0x1f, arg2)); - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R3, - 0x20, TCG_REG_R3), - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, - TCG_REG_R2, TCG_REG_R3)); - } -} - -static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1, - arg1, 0x40 - arg2)); - } else { - tcg_out_bundle(s, mII, - tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2, - 0x40, arg2), - tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R3, - arg1, arg2), - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R2, - arg1, TCG_REG_R2)); - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, - TCG_REG_R2, TCG_REG_R3)); - } -} - -static inline void tcg_out_rotr_i32(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, - TCG_REG_R2, arg1, arg1), - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, - TCG_REG_R2, arg2, 31)); - } else { - tcg_out_bundle(s, mII, - tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3, - 0x1f, arg2), - tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, - TCG_REG_R2, arg1, arg1), - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, - TCG_REG_R2, TCG_REG_R3)); - } -} - -static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1, - TCGArg arg2, int const_arg2) -{ - if (const_arg2) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1, - arg1, arg2)); - } else { - tcg_out_bundle(s, mII, - tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2, - 0x40, arg2), - tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R3, - arg1, arg2), - tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R2, - arg1, TCG_REG_R2)); - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, - TCG_REG_R2, TCG_REG_R3)); - } -} - -static const uint64_t opc_ext_i29[8] = { - OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0, - OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 -}; - -static inline uint64_t tcg_opc_ext_i(int qp, TCGMemOp opc, TCGReg d, TCGReg s) -{ - if ((opc & MO_SIZE) == MO_64) { - return tcg_opc_mov_a(qp, d, s); - } else { - return tcg_opc_i29(qp, opc_ext_i29[opc & MO_SSIZE], d, s); - } -} - -static inline void tcg_out_ext(TCGContext *s, uint64_t opc_i29, - TCGArg ret, TCGArg arg) -{ - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i29(TCG_REG_P0, opc_i29, ret, arg)); -} - -static inline uint64_t tcg_opc_bswap64_i(int qp, TCGReg d, TCGReg s) -{ - return tcg_opc_i3(qp, OPC_MUX1_I3, d, s, 0xb); -} - -static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg) -{ - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 15, 15), - tcg_opc_bswap64_i(TCG_REG_P0, ret, ret)); -} - -static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg) -{ - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 31, 31), - tcg_opc_bswap64_i(TCG_REG_P0, ret, ret)); -} - -static inline void tcg_out_bswap64(TCGContext *s, TCGArg ret, TCGArg arg) -{ - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P0, ret, arg)); -} - -static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1, - TCGArg a2, int const_a2, int pos, int len) -{ - uint64_t i1 = 0, i2 = 0; - int cpos = 63 - pos, lm1 = len - 1; - - if (const_a2) { - /* Truncate the value of a constant a2 to the width of the field. */ - int mask = (1u << len) - 1; - a2 &= mask; - - if (a2 == 0 || a2 == mask) { - /* 1-bit signed constant inserted into register. */ - i2 = tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, ret, a2, a1, cpos, lm1); - } else { - /* Otherwise, load any constant into a temporary. Do this into - the first I slot to help out with cross-unit delays. */ - i1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, a2); - a2 = TCG_REG_R2; - } - } - if (i2 == 0) { - i2 = tcg_opc_i15(TCG_REG_P0, OPC_DEP_I15, ret, a2, a1, cpos, lm1); - } - tcg_out_bundle(s, (i1 ? mII : miI), - INSN_NOP_M, - i1 ? i1 : INSN_NOP_I, - i2); -} - -static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1, - TCGArg arg2, int cmp4) -{ - uint64_t opc_eq_a6, opc_lt_a6, opc_ltu_a6; - - if (cmp4) { - opc_eq_a6 = OPC_CMP4_EQ_A6; - opc_lt_a6 = OPC_CMP4_LT_A6; - opc_ltu_a6 = OPC_CMP4_LTU_A6; - } else { - opc_eq_a6 = OPC_CMP_EQ_A6; - opc_lt_a6 = OPC_CMP_LT_A6; - opc_ltu_a6 = OPC_CMP_LTU_A6; - } - - switch (cond) { - case TCG_COND_EQ: - return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2); - case TCG_COND_NE: - return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2); - case TCG_COND_LT: - return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2); - case TCG_COND_LTU: - return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2); - case TCG_COND_GE: - return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2); - case TCG_COND_GEU: - return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2); - case TCG_COND_LE: - return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1); - case TCG_COND_LEU: - return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1); - case TCG_COND_GT: - return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1); - case TCG_COND_GTU: - return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1); - default: - tcg_abort(); - break; - } -} - -static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, - TCGReg arg2, int label_index, int cmp4) -{ - TCGLabel *l = &s->labels[label_index]; - uint64_t imm; - - /* We pay attention here to not modify the branch target by reading - the existing value and using it again. This ensure that caches and - memory are kept coherent during retranslation. */ - if (l->has_value) { - imm = l->u.value_ptr - s->code_ptr; - } else { - imm = get_reloc_pcrel21b_slot2(s->code_ptr); - tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, label_index, 0); - } - - tcg_out_bundle(s, miB, - INSN_NOP_M, - tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4), - tcg_opc_b1(TCG_REG_P6, OPC_BR_DPTK_FEW_B1, imm)); -} - -static inline void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg ret, - TCGArg arg1, TCGArg arg2, int cmp4) -{ - tcg_out_bundle(s, MmI, - tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4), - tcg_opc_movi_a(TCG_REG_P6, ret, 1), - tcg_opc_movi_a(TCG_REG_P7, ret, 0)); -} - -static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret, - TCGArg c1, TCGArg c2, - TCGArg v1, int const_v1, - TCGArg v2, int const_v2, int cmp4) -{ - uint64_t opc1, opc2; - - if (const_v1) { - opc1 = tcg_opc_movi_a(TCG_REG_P6, ret, v1); - } else if (ret == v1) { - opc1 = INSN_NOP_M; - } else { - opc1 = tcg_opc_mov_a(TCG_REG_P6, ret, v1); - } - if (const_v2) { - opc2 = tcg_opc_movi_a(TCG_REG_P7, ret, v2); - } else if (ret == v2) { - opc2 = INSN_NOP_I; - } else { - opc2 = tcg_opc_mov_a(TCG_REG_P7, ret, v2); - } - - tcg_out_bundle(s, MmI, - tcg_opc_cmp_a(TCG_REG_P0, cond, c1, c2, cmp4), - opc1, - opc2); -} - -#if defined(CONFIG_SOFTMMU) -/* We're expecting to use an signed 22-bit immediate add. */ -QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) - > 0x1fffff) - -/* Load and compare a TLB entry, and return the result in (p6, p7). - R2 is loaded with the addend TLB entry. - R57 is loaded with the address, zero extented on 32-bit targets. - R1, R3 are clobbered, leaving R56 free for... - BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */ -static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg, - TCGMemOp s_bits, int off_rw, int off_add, - uint64_t bswap1, uint64_t bswap2) -{ - /* - .mii - mov r2 = off_rw - extr.u r3 = addr_reg, ... # extract tlb page - zxt4 r57 = addr_reg # or mov for 64-bit guest - ;; - .mii - addl r2 = r2, areg0 - shl r3 = r3, cteb # via dep.z - dep r1 = 0, r57, ... # zero page ofs, keep align - ;; - .mmi - add r2 = r2, r3 - ;; - ld4 r3 = [r2], off_add-off_rw # or ld8 for 64-bit guest - nop - ;; - .mmi - nop - cmp.eq p6, p7 = r3, r58 - nop - ;; - */ - tcg_out_bundle(s, miI, - tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw), - tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3, - addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1), - tcg_opc_ext_i(TCG_REG_P0, - TARGET_LONG_BITS == 32 ? MO_UL : MO_Q, - TCG_REG_R57, addr_reg)); - tcg_out_bundle(s, miI, - tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, - TCG_REG_R2, TCG_AREG0), - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3, - TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS, - 63 - CPU_TLB_ENTRY_BITS), - tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0, - TCG_REG_R57, 63 - s_bits, - TARGET_PAGE_BITS - s_bits - 1)); - tcg_out_bundle(s, MmI, - tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, - TCG_REG_R2, TCG_REG_R2, TCG_REG_R3), - tcg_opc_m3 (TCG_REG_P0, - (TARGET_LONG_BITS == 32 - ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R3, - TCG_REG_R2, off_add - off_rw), - bswap1); - tcg_out_bundle(s, mmI, - tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2), - tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6, - TCG_REG_P7, TCG_REG_R1, TCG_REG_R3), - bswap2); -} - -#define TCG_MAX_QEMU_LDST 640 - -typedef struct TCGLabelQemuLdst { - bool is_ld; - TCGMemOp size; - tcg_insn_unit *label_ptr; /* label pointers to be updated */ -} TCGLabelQemuLdst; - -typedef struct TCGBackendData { - int nb_ldst_labels; - TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST]; -} TCGBackendData; - -static inline void tcg_out_tb_init(TCGContext *s) -{ - s->be->nb_ldst_labels = 0; -} - -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, - tcg_insn_unit *label_ptr) -{ - TCGBackendData *be = s->be; - TCGLabelQemuLdst *l = &be->ldst_labels[be->nb_ldst_labels++]; - - assert(be->nb_ldst_labels <= TCG_MAX_QEMU_LDST); - l->is_ld = is_ld; - l->size = opc & MO_SIZE; - l->label_ptr = label_ptr; -} - -static void tcg_out_tb_finalize(TCGContext *s) -{ - static const void * const helpers[8] = { - helper_ret_stb_mmu, - helper_le_stw_mmu, - helper_le_stl_mmu, - helper_le_stq_mmu, - helper_ret_ldub_mmu, - helper_le_lduw_mmu, - helper_le_ldul_mmu, - helper_le_ldq_mmu, - }; - tcg_insn_unit *thunks[8] = { }; - TCGBackendData *be = s->be; - size_t i, n = be->nb_ldst_labels; - - for (i = 0; i < n; i++) { - TCGLabelQemuLdst *l = &be->ldst_labels[i]; - long x = l->is_ld * 4 + l->size; - tcg_insn_unit *dest = thunks[x]; - - /* The out-of-line thunks are all the same; load the return address - from B0, load the GP, and branch to the code. Note that we are - always post-call, so the register window has rolled, so we're - using incomming parameter register numbers, not outgoing. */ - if (dest == NULL) { - uintptr_t *desc = (uintptr_t *)helpers[x]; - uintptr_t func = desc[0], gp = desc[1], disp; - - thunks[x] = dest = s->code_ptr; - - tcg_out_bundle(s, mlx, - INSN_NOP_M, - tcg_opc_l2 (gp), - tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, - TCG_REG_R1, gp)); - tcg_out_bundle(s, mii, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22, - l->is_ld ? TCG_REG_R35 : TCG_REG_R36, - TCG_REG_B0)); - disp = (tcg_insn_unit *)func - s->code_ptr; - tcg_out_bundle(s, mLX, - INSN_NOP_M, - tcg_opc_l3 (disp), - tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp)); - } - - reloc_pcrel21b_slot2(l->label_ptr, dest); - } -} - -static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) -{ - static const uint64_t opc_ld_m1[4] = { - OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 - }; - int addr_reg, data_reg, mem_index; - TCGMemOp opc, s_bits; - uint64_t fin1, fin2; - tcg_insn_unit *label_ptr; - - data_reg = args[0]; - addr_reg = args[1]; - opc = args[2]; - mem_index = args[3]; - s_bits = opc & MO_SIZE; - - /* Read the TLB entry */ - tcg_out_qemu_tlb(s, addr_reg, s_bits, - offsetof(CPUArchState, tlb_table[mem_index][0].addr_read), - offsetof(CPUArchState, tlb_table[mem_index][0].addend), - INSN_NOP_I, INSN_NOP_I); - - /* P6 is the fast path, and P7 the slow path */ - - fin2 = 0; - if (opc & MO_BSWAP) { - fin1 = tcg_opc_bswap64_i(TCG_REG_P0, data_reg, TCG_REG_R8); - if (s_bits < MO_64) { - int shift = 64 - (8 << s_bits); - fin2 = (opc & MO_SIGN ? OPC_EXTR_I11 : OPC_EXTR_U_I11); - fin2 = tcg_opc_i11(TCG_REG_P0, fin2, - data_reg, data_reg, shift, 63 - shift); - } - } else { - fin1 = tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8); - } - - tcg_out_bundle(s, mmI, - tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0), - tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2, - TCG_REG_R2, TCG_REG_R57), - tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index)); - label_ptr = s->code_ptr; - tcg_out_bundle(s, miB, - tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], - TCG_REG_R8, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0, - get_reloc_pcrel21b_slot2(label_ptr))); - - add_qemu_ldst_label(s, 1, opc, label_ptr); - - /* Note that we always use LE helper functions, so the bswap insns - here for the fast path also apply to the slow path. */ - tcg_out_bundle(s, (fin2 ? mII : miI), - INSN_NOP_M, - fin1, - fin2 ? fin2 : INSN_NOP_I); -} - -static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) -{ - static const uint64_t opc_st_m4[4] = { - OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 - }; - TCGReg addr_reg, data_reg; - int mem_index; - uint64_t pre1, pre2; - TCGMemOp opc, s_bits; - tcg_insn_unit *label_ptr; - - data_reg = args[0]; - addr_reg = args[1]; - opc = args[2]; - mem_index = args[3]; - s_bits = opc & MO_SIZE; - - /* Note that we always use LE helper functions, so the bswap insns - that are here for the fast path also apply to the slow path, - and move the data into the argument register. */ - pre2 = INSN_NOP_I; - if (opc & MO_BSWAP) { - pre1 = tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R58, data_reg); - if (s_bits < MO_64) { - int shift = 64 - (8 << s_bits); - pre2 = tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, - TCG_REG_R58, TCG_REG_R58, shift, 63 - shift); - } - } else { - /* Just move the data into place for the slow path. */ - pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg); - } - - tcg_out_qemu_tlb(s, addr_reg, s_bits, - offsetof(CPUArchState, tlb_table[mem_index][0].addr_write), - offsetof(CPUArchState, tlb_table[mem_index][0].addend), - pre1, pre2); - - /* P6 is the fast path, and P7 the slow path */ - tcg_out_bundle(s, mmI, - tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0), - tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2, - TCG_REG_R2, TCG_REG_R57), - tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index)); - label_ptr = s->code_ptr; - tcg_out_bundle(s, miB, - tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits], - TCG_REG_R58, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0, - get_reloc_pcrel21b_slot2(label_ptr))); - - add_qemu_ldst_label(s, 0, opc, label_ptr); -} - -#else /* !CONFIG_SOFTMMU */ -# include "tcg-be-null.h" - -static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) -{ - static uint64_t const opc_ld_m1[4] = { - OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 - }; - int addr_reg, data_reg; - TCGMemOp opc, s_bits, bswap; - - data_reg = args[0]; - addr_reg = args[1]; - opc = args[2]; - s_bits = opc & MO_SIZE; - bswap = opc & MO_BSWAP; - -#if TARGET_LONG_BITS == 32 - if (GUEST_BASE != 0) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, - TCG_REG_R3, addr_reg), - tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, - TCG_GUEST_BASE_REG, TCG_REG_R3)); - } else { - tcg_out_bundle(s, miI, - INSN_NOP_M, - tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, - TCG_REG_R2, addr_reg), - INSN_NOP_I); - } - - if (!bswap) { - if (!(opc & MO_SIGN)) { - tcg_out_bundle(s, miI, - tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], - data_reg, TCG_REG_R2), - INSN_NOP_I, - INSN_NOP_I); - } else { - tcg_out_bundle(s, mII, - tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], - data_reg, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg)); - } - } else if (s_bits == MO_64) { - tcg_out_bundle(s, mII, - tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], - data_reg, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); - } else { - if (s_bits == MO_16) { - tcg_out_bundle(s, mII, - tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], - data_reg, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - data_reg, data_reg, 15, 15)); - } else { - tcg_out_bundle(s, mII, - tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], - data_reg, TCG_REG_R2), - INSN_NOP_I, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - data_reg, data_reg, 31, 31)); - } - if (!(opc & MO_SIGN)) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); - } else { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg), - tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg)); - } - } -#else - if (GUEST_BASE != 0) { - tcg_out_bundle(s, MmI, - tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, - TCG_GUEST_BASE_REG, addr_reg), - tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], - data_reg, TCG_REG_R2), - INSN_NOP_I); - } else { - tcg_out_bundle(s, mmI, - INSN_NOP_M, - tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], - data_reg, addr_reg), - INSN_NOP_I); - } - - if (bswap && s_bits == MO_16) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - data_reg, data_reg, 15, 15), - tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); - } else if (bswap && s_bits == MO_32) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - data_reg, data_reg, 31, 31), - tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); - } else if (bswap && s_bits == MO_64) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); - } - if (opc & MO_SIGN) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg)); - } -#endif -} - -static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) -{ - static uint64_t const opc_st_m4[4] = { - OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 - }; - int addr_reg, data_reg; -#if TARGET_LONG_BITS == 64 - uint64_t add_guest_base; -#endif - TCGMemOp opc, s_bits, bswap; - - data_reg = args[0]; - addr_reg = args[1]; - opc = args[2]; - s_bits = opc & MO_SIZE; - bswap = opc & MO_BSWAP; - -#if TARGET_LONG_BITS == 32 - if (GUEST_BASE != 0) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, - TCG_REG_R3, addr_reg), - tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, - TCG_GUEST_BASE_REG, TCG_REG_R3)); - } else { - tcg_out_bundle(s, miI, - INSN_NOP_M, - tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, - TCG_REG_R2, addr_reg), - INSN_NOP_I); - } - - if (bswap) { - if (s_bits == MO_16) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - TCG_REG_R3, data_reg, 15, 15), - tcg_opc_bswap64_i(TCG_REG_P0, - TCG_REG_R3, TCG_REG_R3)); - data_reg = TCG_REG_R3; - } else if (s_bits == MO_32) { - tcg_out_bundle(s, mII, - INSN_NOP_M, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - TCG_REG_R3, data_reg, 31, 31), - tcg_opc_bswap64_i(TCG_REG_P0, - TCG_REG_R3, TCG_REG_R3)); - data_reg = TCG_REG_R3; - } else if (s_bits == MO_64) { - tcg_out_bundle(s, miI, - INSN_NOP_M, - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg)); - data_reg = TCG_REG_R3; - } - } - tcg_out_bundle(s, mmI, - tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], - data_reg, TCG_REG_R2), - INSN_NOP_M, - INSN_NOP_I); -#else - if (GUEST_BASE != 0) { - add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, - TCG_GUEST_BASE_REG, addr_reg); - addr_reg = TCG_REG_R2; - } else { - add_guest_base = INSN_NOP_M; - } - - if (!bswap) { - tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI), - add_guest_base, - tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], - data_reg, addr_reg), - INSN_NOP_I); - } else { - if (s_bits == MO_16) { - tcg_out_bundle(s, mII, - add_guest_base, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - TCG_REG_R3, data_reg, 15, 15), - tcg_opc_bswap64_i(TCG_REG_P0, - TCG_REG_R3, TCG_REG_R3)); - data_reg = TCG_REG_R3; - } else if (s_bits == MO_32) { - tcg_out_bundle(s, mII, - add_guest_base, - tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, - TCG_REG_R3, data_reg, 31, 31), - tcg_opc_bswap64_i(TCG_REG_P0, - TCG_REG_R3, TCG_REG_R3)); - data_reg = TCG_REG_R3; - } else if (s_bits == MO_64) { - tcg_out_bundle(s, miI, - add_guest_base, - INSN_NOP_I, - tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg)); - data_reg = TCG_REG_R3; - } - tcg_out_bundle(s, miI, - tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], - data_reg, addr_reg), - INSN_NOP_I, - INSN_NOP_I); - } -#endif -} - -#endif - -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg *args, const int *const_args) -{ - switch(opc) { - case INDEX_op_exit_tb: - tcg_out_exit_tb(s, args[0]); - break; - case INDEX_op_br: - tcg_out_br(s, args[0]); - break; - case INDEX_op_goto_tb: - tcg_out_goto_tb(s, args[0]); - break; - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]); - break; - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]); - tcg_out_ext(s, OPC_SXT1_I29, args[0], args[0]); - break; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]); - break; - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]); - tcg_out_ext(s, OPC_SXT2_I29, args[0], args[0]); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]); - break; - case INDEX_op_ld32s_i64: - tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]); - tcg_out_ext(s, OPC_SXT4_I29, args[0], args[0]); - break; - case INDEX_op_ld_i64: - tcg_out_ld_rel(s, OPC_LD8_M1, args[0], args[1], args[2]); - break; - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - tcg_out_st_rel(s, OPC_ST1_M4, args[0], args[1], args[2]); - break; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - tcg_out_st_rel(s, OPC_ST2_M4, args[0], args[1], args[2]); - break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_st_rel(s, OPC_ST4_M4, args[0], args[1], args[2]); - break; - case INDEX_op_st_i64: - tcg_out_st_rel(s, OPC_ST8_M4, args[0], args[1], args[2]); - break; - - case INDEX_op_add_i32: - case INDEX_op_add_i64: - tcg_out_add(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - tcg_out_sub(s, args[0], args[1], const_args[1], args[2], const_args[2]); - break; - - case INDEX_op_and_i32: - case INDEX_op_and_i64: - /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */ - tcg_out_alu(s, OPC_AND_A1, OPC_AND_A3, args[0], - args[2], const_args[2], args[1], const_args[1]); - break; - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - tcg_out_alu(s, OPC_ANDCM_A1, OPC_ANDCM_A3, args[0], - args[1], const_args[1], args[2], const_args[2]); - break; - case INDEX_op_eqv_i32: - case INDEX_op_eqv_i64: - tcg_out_eqv(s, args[0], args[1], const_args[1], - args[2], const_args[2]); - break; - case INDEX_op_nand_i32: - case INDEX_op_nand_i64: - tcg_out_nand(s, args[0], args[1], const_args[1], - args[2], const_args[2]); - break; - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - tcg_out_nor(s, args[0], args[1], const_args[1], - args[2], const_args[2]); - break; - case INDEX_op_or_i32: - case INDEX_op_or_i64: - /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */ - tcg_out_alu(s, OPC_OR_A1, OPC_OR_A3, args[0], - args[2], const_args[2], args[1], const_args[1]); - break; - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - tcg_out_orc(s, args[0], args[1], const_args[1], - args[2], const_args[2]); - break; - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */ - tcg_out_alu(s, OPC_XOR_A1, OPC_XOR_A3, args[0], - args[2], const_args[2], args[1], const_args[1]); - break; - - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - tcg_out_mul(s, args[0], args[1], args[2]); - break; - - case INDEX_op_sar_i32: - tcg_out_sar_i32(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_sar_i64: - tcg_out_sar_i64(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_shl_i32: - tcg_out_shl_i32(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_shl_i64: - tcg_out_shl_i64(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_shr_i32: - tcg_out_shr_i32(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_shr_i64: - tcg_out_shr_i64(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_rotl_i32: - tcg_out_rotl_i32(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_rotl_i64: - tcg_out_rotl_i64(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_rotr_i32: - tcg_out_rotr_i32(s, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_rotr_i64: - tcg_out_rotr_i64(s, args[0], args[1], args[2], const_args[2]); - break; - - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - tcg_out_ext(s, OPC_SXT1_I29, args[0], args[1]); - break; - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - tcg_out_ext(s, OPC_ZXT1_I29, args[0], args[1]); - break; - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - tcg_out_ext(s, OPC_SXT2_I29, args[0], args[1]); - break; - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - tcg_out_ext(s, OPC_ZXT2_I29, args[0], args[1]); - break; - case INDEX_op_ext32s_i64: - tcg_out_ext(s, OPC_SXT4_I29, args[0], args[1]); - break; - case INDEX_op_ext32u_i64: - tcg_out_ext(s, OPC_ZXT4_I29, args[0], args[1]); - break; - - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - tcg_out_bswap16(s, args[0], args[1]); - break; - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - tcg_out_bswap32(s, args[0], args[1]); - break; - case INDEX_op_bswap64_i64: - tcg_out_bswap64(s, args[0], args[1]); - break; - - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - tcg_out_deposit(s, args[0], args[1], args[2], const_args[2], - args[3], args[4]); - break; - - case INDEX_op_brcond_i32: - tcg_out_brcond(s, args[2], args[0], args[1], args[3], 1); - break; - case INDEX_op_brcond_i64: - tcg_out_brcond(s, args[2], args[0], args[1], args[3], 0); - break; - case INDEX_op_setcond_i32: - tcg_out_setcond(s, args[3], args[0], args[1], args[2], 1); - break; - case INDEX_op_setcond_i64: - tcg_out_setcond(s, args[3], args[0], args[1], args[2], 0); - break; - case INDEX_op_movcond_i32: - tcg_out_movcond(s, args[5], args[0], args[1], args[2], - args[3], const_args[3], args[4], const_args[4], 1); - break; - case INDEX_op_movcond_i64: - tcg_out_movcond(s, args[5], args[0], args[1], args[2], - args[3], const_args[3], args[4], const_args[4], 0); - break; - - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args); - break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args); - break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args); - break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args); - break; - - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ - case INDEX_op_movi_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - default: - tcg_abort(); - } -} - -static const TCGTargetOpDef ia64_op_defs[] = { - { INDEX_op_br, { } }, - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "rZ", "r" } }, - { INDEX_op_st16_i32, { "rZ", "r" } }, - { INDEX_op_st_i32, { "rZ", "r" } }, - - { INDEX_op_add_i32, { "r", "rZ", "rI" } }, - { INDEX_op_sub_i32, { "r", "rI", "rI" } }, - - { INDEX_op_and_i32, { "r", "rI", "rI" } }, - { INDEX_op_andc_i32, { "r", "rI", "rI" } }, - { INDEX_op_eqv_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_nand_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_nor_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_or_i32, { "r", "rI", "rI" } }, - { INDEX_op_orc_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_xor_i32, { "r", "rI", "rI" } }, - - { INDEX_op_mul_i32, { "r", "rZ", "rZ" } }, - - { INDEX_op_sar_i32, { "r", "rZ", "ri" } }, - { INDEX_op_shl_i32, { "r", "rZ", "ri" } }, - { INDEX_op_shr_i32, { "r", "rZ", "ri" } }, - { INDEX_op_rotl_i32, { "r", "rZ", "ri" } }, - { INDEX_op_rotr_i32, { "r", "rZ", "ri" } }, - - { INDEX_op_ext8s_i32, { "r", "rZ"} }, - { INDEX_op_ext8u_i32, { "r", "rZ"} }, - { INDEX_op_ext16s_i32, { "r", "rZ"} }, - { INDEX_op_ext16u_i32, { "r", "rZ"} }, - - { INDEX_op_bswap16_i32, { "r", "rZ" } }, - { INDEX_op_bswap32_i32, { "r", "rZ" } }, - - { INDEX_op_brcond_i32, { "rZ", "rZ" } }, - { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rI", "rI" } }, - - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - { INDEX_op_st8_i64, { "rZ", "r" } }, - { INDEX_op_st16_i64, { "rZ", "r" } }, - { INDEX_op_st32_i64, { "rZ", "r" } }, - { INDEX_op_st_i64, { "rZ", "r" } }, - - { INDEX_op_add_i64, { "r", "rZ", "rI" } }, - { INDEX_op_sub_i64, { "r", "rI", "rI" } }, - - { INDEX_op_and_i64, { "r", "rI", "rI" } }, - { INDEX_op_andc_i64, { "r", "rI", "rI" } }, - { INDEX_op_eqv_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_nand_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_nor_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_or_i64, { "r", "rI", "rI" } }, - { INDEX_op_orc_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_xor_i64, { "r", "rI", "rI" } }, - - { INDEX_op_mul_i64, { "r", "rZ", "rZ" } }, - - { INDEX_op_sar_i64, { "r", "rZ", "ri" } }, - { INDEX_op_shl_i64, { "r", "rZ", "ri" } }, - { INDEX_op_shr_i64, { "r", "rZ", "ri" } }, - { INDEX_op_rotl_i64, { "r", "rZ", "ri" } }, - { INDEX_op_rotr_i64, { "r", "rZ", "ri" } }, - - { INDEX_op_ext8s_i64, { "r", "rZ"} }, - { INDEX_op_ext8u_i64, { "r", "rZ"} }, - { INDEX_op_ext16s_i64, { "r", "rZ"} }, - { INDEX_op_ext16u_i64, { "r", "rZ"} }, - { INDEX_op_ext32s_i64, { "r", "rZ"} }, - { INDEX_op_ext32u_i64, { "r", "rZ"} }, - - { INDEX_op_bswap16_i64, { "r", "rZ" } }, - { INDEX_op_bswap32_i64, { "r", "rZ" } }, - { INDEX_op_bswap64_i64, { "r", "rZ" } }, - - { INDEX_op_brcond_i64, { "rZ", "rZ" } }, - { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } }, - { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rI", "rI" } }, - - { INDEX_op_deposit_i32, { "r", "rZ", "ri" } }, - { INDEX_op_deposit_i64, { "r", "rZ", "ri" } }, - - { INDEX_op_qemu_ld_i32, { "r", "r" } }, - { INDEX_op_qemu_ld_i64, { "r", "r" } }, - { INDEX_op_qemu_st_i32, { "SZ", "r" } }, - { INDEX_op_qemu_st_i64, { "SZ", "r" } }, - - { -1 }, -}; - -/* Generate global QEMU prologue and epilogue code */ -static void tcg_target_qemu_prologue(TCGContext *s) -{ - int frame_size; - - /* reserve some stack space */ - frame_size = TCG_STATIC_CALL_ARGS_SIZE + - CPU_TEMP_BUF_NLONGS * sizeof(long); - frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & - ~(TCG_TARGET_STACK_ALIGN - 1); - tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, - CPU_TEMP_BUF_NLONGS * sizeof(long)); - - /* First emit adhoc function descriptor */ - *s->code_ptr = (tcg_insn_unit){ - (uint64_t)(s->code_ptr + 1), /* entry point */ - 0 /* skip gp */ - }; - s->code_ptr++; - - /* prologue */ - tcg_out_bundle(s, miI, - tcg_opc_m34(TCG_REG_P0, OPC_ALLOC_M34, - TCG_REG_R34, 32, 24, 0), - INSN_NOP_I, - tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, - TCG_REG_B6, TCG_REG_R33, 0)); - - /* ??? If GUEST_BASE < 0x200000, we could load the register via - an ADDL in the M slot of the next bundle. */ - if (GUEST_BASE != 0) { - tcg_out_bundle(s, mlx, - INSN_NOP_M, - tcg_opc_l2 (GUEST_BASE), - tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, - TCG_GUEST_BASE_REG, GUEST_BASE)); - tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); - } - - tcg_out_bundle(s, miB, - tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, - TCG_REG_R12, -frame_size, TCG_REG_R12), - tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22, - TCG_REG_R33, TCG_REG_B0), - tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); - - /* epilogue */ - tb_ret_addr = s->code_ptr; - tcg_out_bundle(s, miI, - INSN_NOP_M, - tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, - TCG_REG_B0, TCG_REG_R33, 0), - tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, - TCG_REG_R12, frame_size, TCG_REG_R12)); - tcg_out_bundle(s, miB, - INSN_NOP_M, - tcg_opc_i26(TCG_REG_P0, OPC_MOV_I_I26, - TCG_REG_PFS, TCG_REG_R34), - tcg_opc_b4 (TCG_REG_P0, OPC_BR_RET_SPTK_MANY_B4, - TCG_REG_B0)); -} - -static void tcg_target_init(TCGContext *s) -{ - tcg_regset_set(s->tcg_target_available_regs[TCG_TYPE_I32], - 0xffffffffffffffffull); - tcg_regset_set(s->tcg_target_available_regs[TCG_TYPE_I64], - 0xffffffffffffffffull); - - tcg_regset_clear(s->tcg_target_call_clobber_regs); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R14); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R15); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R16); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R17); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R18); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R19); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R20); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R21); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R22); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R23); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R24); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R25); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R26); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R27); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R28); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R29); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R30); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R31); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R56); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R57); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R58); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R59); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R60); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R61); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R62); - tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R63); - - tcg_regset_clear(s->reserved_regs); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* zero register */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* global pointer */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* internal use */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* internal use */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); /* stack pointer */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R33); /* return address */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R34); /* PFS */ - - /* The following 4 are not in use, are call-saved, but *not* saved - by the prologue. Therefore we cannot use them without modifying - the prologue. There doesn't seem to be any good reason to use - these as opposed to the windowed registers. */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R4); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R5); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_R7); - - tcg_add_target_add_op_defs(s, ia64_op_defs); -} diff --git a/qemu/tcg/ia64/tcg-target.h b/qemu/tcg/ia64/tcg-target.h deleted file mode 100644 index b8b2693f..00000000 --- a/qemu/tcg/ia64/tcg-target.h +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2009-2010 Aurelien Jarno - * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#ifndef TCG_TARGET_IA64 -#define TCG_TARGET_IA64 1 - -#define TCG_TARGET_INSN_UNIT_SIZE 16 -typedef struct { - uint64_t QEMU_ALIGN(16, lo); - uint64_t hi; -} tcg_insn_unit; - -/* We only map the first 64 registers */ -#define TCG_TARGET_NB_REGS 64 -typedef enum { - TCG_REG_R0 = 0, - TCG_REG_R1, - TCG_REG_R2, - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, - TCG_REG_R11, - TCG_REG_R12, - TCG_REG_R13, - TCG_REG_R14, - TCG_REG_R15, - TCG_REG_R16, - TCG_REG_R17, - TCG_REG_R18, - TCG_REG_R19, - TCG_REG_R20, - TCG_REG_R21, - TCG_REG_R22, - TCG_REG_R23, - TCG_REG_R24, - TCG_REG_R25, - TCG_REG_R26, - TCG_REG_R27, - TCG_REG_R28, - TCG_REG_R29, - TCG_REG_R30, - TCG_REG_R31, - TCG_REG_R32, - TCG_REG_R33, - TCG_REG_R34, - TCG_REG_R35, - TCG_REG_R36, - TCG_REG_R37, - TCG_REG_R38, - TCG_REG_R39, - TCG_REG_R40, - TCG_REG_R41, - TCG_REG_R42, - TCG_REG_R43, - TCG_REG_R44, - TCG_REG_R45, - TCG_REG_R46, - TCG_REG_R47, - TCG_REG_R48, - TCG_REG_R49, - TCG_REG_R50, - TCG_REG_R51, - TCG_REG_R52, - TCG_REG_R53, - TCG_REG_R54, - TCG_REG_R55, - TCG_REG_R56, - TCG_REG_R57, - TCG_REG_R58, - TCG_REG_R59, - TCG_REG_R60, - TCG_REG_R61, - TCG_REG_R62, - TCG_REG_R63, - - TCG_AREG0 = TCG_REG_R32, -} TCGReg; - -#define TCG_CT_CONST_ZERO 0x100 -#define TCG_CT_CONST_S22 0x200 - -/* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_R12 -#define TCG_TARGET_STACK_ALIGN 16 -#define TCG_TARGET_CALL_STACK_OFFSET 16 - -/* optional instructions */ -#define TCG_TARGET_HAS_div_i32 0 -#define TCG_TARGET_HAS_rem_i32 0 -#define TCG_TARGET_HAS_div_i64 0 -#define TCG_TARGET_HAS_rem_i64 0 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_andc_i64 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_eqv_i32 1 -#define TCG_TARGET_HAS_eqv_i64 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_nand_i32 1 -#define TCG_TARGET_HAS_nand_i64 1 -#define TCG_TARGET_HAS_nor_i32 1 -#define TCG_TARGET_HAS_nor_i64 1 -#define TCG_TARGET_HAS_orc_i32 1 -#define TCG_TARGET_HAS_orc_i64 1 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_movcond_i32 1 -#define TCG_TARGET_HAS_movcond_i64 1 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_add2_i32 0 -#define TCG_TARGET_HAS_add2_i64 0 -#define TCG_TARGET_HAS_sub2_i32 0 -#define TCG_TARGET_HAS_sub2_i64 0 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_muluh_i64 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_mulsh_i64 0 -#define TCG_TARGET_HAS_trunc_shr_i32 0 - -#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16) -#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16) - -/* optional instructions automatically implemented */ -#define TCG_TARGET_HAS_neg_i32 0 /* sub r1, r0, r3 */ -#define TCG_TARGET_HAS_neg_i64 0 /* sub r1, r0, r3 */ -#define TCG_TARGET_HAS_not_i32 0 /* xor r1, -1, r3 */ -#define TCG_TARGET_HAS_not_i64 0 /* xor r1, -1, r3 */ - -static inline void flush_icache_range(uintptr_t start, uintptr_t stop) -{ - start = start & ~(32UL - 1UL); - stop = (stop + (32UL - 1UL)) & ~(32UL - 1UL); - - for (; start < stop; start += 32UL) { - asm volatile ("fc.i %0" :: "r" (start)); - } - asm volatile (";;sync.i;;srlz.i;;"); -} - -#endif diff --git a/qemu/tcg/mips/tcg-target.c b/qemu/tcg/mips/tcg-target.c deleted file mode 100644 index 3b53bb7b..00000000 --- a/qemu/tcg/mips/tcg-target.c +++ /dev/null @@ -1,1816 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2008-2009 Arnaud Patard - * Copyright (c) 2009 Aurelien Jarno - * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "tcg-be-ldst.h" - -#ifdef HOST_WORDS_BIGENDIAN -# define MIPS_BE 1 -#else -# define MIPS_BE 0 -#endif - -#define LO_OFF (MIPS_BE * 4) -#define HI_OFF (4 - LO_OFF) - -#ifndef NDEBUG -static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "zero", - "at", - "v0", - "v1", - "a0", - "a1", - "a2", - "a3", - "t0", - "t1", - "t2", - "t3", - "t4", - "t5", - "t6", - "t7", - "s0", - "s1", - "s2", - "s3", - "s4", - "s5", - "s6", - "s7", - "t8", - "t9", - "k0", - "k1", - "gp", - "sp", - "s8", - "ra", -}; -#endif - -#define TCG_TMP0 TCG_REG_AT -#define TCG_TMP1 TCG_REG_T9 - -/* check if we really need so many registers :P */ -static const TCGReg tcg_target_reg_alloc_order[] = { - /* Call saved registers. */ - TCG_REG_S0, - TCG_REG_S1, - TCG_REG_S2, - TCG_REG_S3, - TCG_REG_S4, - TCG_REG_S5, - TCG_REG_S6, - TCG_REG_S7, - TCG_REG_S8, - - /* Call clobbered registers. */ - TCG_REG_T0, - TCG_REG_T1, - TCG_REG_T2, - TCG_REG_T3, - TCG_REG_T4, - TCG_REG_T5, - TCG_REG_T6, - TCG_REG_T7, - TCG_REG_T8, - TCG_REG_T9, - TCG_REG_V1, - TCG_REG_V0, - - /* Argument registers, opposite order of allocation. */ - TCG_REG_A3, - TCG_REG_A2, - TCG_REG_A1, - TCG_REG_A0, -}; - -static const TCGReg tcg_target_call_iarg_regs[4] = { - TCG_REG_A0, - TCG_REG_A1, - TCG_REG_A2, - TCG_REG_A3 -}; - -static const TCGReg tcg_target_call_oarg_regs[2] = { - TCG_REG_V0, - TCG_REG_V1 -}; - -static tcg_insn_unit *tb_ret_addr; - -static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - /* Let the compiler perform the right-shift as part of the arithmetic. */ - ptrdiff_t disp = target - (pc + 1); - assert(disp == (int16_t)disp); - return disp & 0xffff; -} - -static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target)); -} - -static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0); - return ((uintptr_t)target >> 2) & 0x3ffffff; -} - -static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target) -{ - *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target)); -} - -static void patch_reloc(tcg_insn_unit *code_ptr, int type, - intptr_t value, intptr_t addend) -{ - assert(type == R_MIPS_PC16); - assert(addend == 0); - reloc_pc16(code_ptr, (tcg_insn_unit *)value); -} - -#define TCG_CT_CONST_ZERO 0x100 -#define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */ -#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */ -#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */ -#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */ - -static inline bool is_p2m1(tcg_target_long val) -{ - return val && ((val + 1) & val) == 0; -} - -/* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) -{ - const char *ct_str; - - ct_str = *pct_str; - switch(ct_str[0]) { - case 'r': - ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffff); - break; - case 'L': /* qemu_ld output arg constraint */ - ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0); - break; - case 'l': /* qemu_ld input arg constraint */ - ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); -#if defined(CONFIG_SOFTMMU) - if (TARGET_LONG_BITS == 64) { - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); - } -#endif - break; - case 'S': /* qemu_st constraint */ - ct->ct |= TCG_CT_REG; - tcg_regset_set(ct->u.regs, 0xffffffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); -#if defined(CONFIG_SOFTMMU) - if (TARGET_LONG_BITS == 32) { - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1); - } else { - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3); - } -#endif - break; - case 'I': - ct->ct |= TCG_CT_CONST_U16; - break; - case 'J': - ct->ct |= TCG_CT_CONST_S16; - break; - case 'K': - ct->ct |= TCG_CT_CONST_P2M1; - break; - case 'N': - ct->ct |= TCG_CT_CONST_N16; - break; - case 'Z': - /* We are cheating a bit here, using the fact that the register - ZERO is also the register number 0. Hence there is no need - to check for const_args in each instruction. */ - ct->ct |= TCG_CT_CONST_ZERO; - break; - default: - return -1; - } - ct_str++; - *pct_str = ct_str; - return 0; -} - -/* test if a constant matches the constraint */ -static inline int tcg_target_const_match(tcg_target_long val, TCGType type, - const TCGArgConstraint *arg_ct) -{ - int ct; - ct = arg_ct->ct; - if (ct & TCG_CT_CONST) { - return 1; - } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; - } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { - return 1; - } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { - return 1; - } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) { - return 1; - } else if ((ct & TCG_CT_CONST_P2M1) - && use_mips32r2_instructions && is_p2m1(val)) { - return 1; - } - return 0; -} - -/* instruction opcodes */ -typedef enum { - OPC_J = 0x02 << 26, - OPC_JAL = 0x03 << 26, - OPC_BEQ = 0x04 << 26, - OPC_BNE = 0x05 << 26, - OPC_BLEZ = 0x06 << 26, - OPC_BGTZ = 0x07 << 26, - OPC_ADDIU = 0x09 << 26, - OPC_SLTI = 0x0A << 26, - OPC_SLTIU = 0x0B << 26, - OPC_ANDI = 0x0C << 26, - OPC_ORI = 0x0D << 26, - OPC_XORI = 0x0E << 26, - OPC_LUI = 0x0F << 26, - OPC_LB = 0x20 << 26, - OPC_LH = 0x21 << 26, - OPC_LW = 0x23 << 26, - OPC_LBU = 0x24 << 26, - OPC_LHU = 0x25 << 26, - OPC_LWU = 0x27 << 26, - OPC_SB = 0x28 << 26, - OPC_SH = 0x29 << 26, - OPC_SW = 0x2B << 26, - - OPC_SPECIAL = 0x00 << 26, - OPC_SLL = OPC_SPECIAL | 0x00, - OPC_SRL = OPC_SPECIAL | 0x02, - OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02, - OPC_SRA = OPC_SPECIAL | 0x03, - OPC_SLLV = OPC_SPECIAL | 0x04, - OPC_SRLV = OPC_SPECIAL | 0x06, - OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06, - OPC_SRAV = OPC_SPECIAL | 0x07, - OPC_JR = OPC_SPECIAL | 0x08, - OPC_JALR = OPC_SPECIAL | 0x09, - OPC_MOVZ = OPC_SPECIAL | 0x0A, - OPC_MOVN = OPC_SPECIAL | 0x0B, - OPC_MFHI = OPC_SPECIAL | 0x10, - OPC_MFLO = OPC_SPECIAL | 0x12, - OPC_MULT = OPC_SPECIAL | 0x18, - OPC_MULTU = OPC_SPECIAL | 0x19, - OPC_DIV = OPC_SPECIAL | 0x1A, - OPC_DIVU = OPC_SPECIAL | 0x1B, - OPC_ADDU = OPC_SPECIAL | 0x21, - OPC_SUBU = OPC_SPECIAL | 0x23, - OPC_AND = OPC_SPECIAL | 0x24, - OPC_OR = OPC_SPECIAL | 0x25, - OPC_XOR = OPC_SPECIAL | 0x26, - OPC_NOR = OPC_SPECIAL | 0x27, - OPC_SLT = OPC_SPECIAL | 0x2A, - OPC_SLTU = OPC_SPECIAL | 0x2B, - - OPC_REGIMM = 0x01 << 26, - OPC_BLTZ = OPC_REGIMM | (0x00 << 16), - OPC_BGEZ = OPC_REGIMM | (0x01 << 16), - - OPC_SPECIAL2 = 0x1c << 26, - OPC_MUL = OPC_SPECIAL2 | 0x002, - - OPC_SPECIAL3 = 0x1f << 26, - OPC_EXT = OPC_SPECIAL3 | 0x000, - OPC_INS = OPC_SPECIAL3 | 0x004, - OPC_WSBH = OPC_SPECIAL3 | 0x0a0, - OPC_SEB = OPC_SPECIAL3 | 0x420, - OPC_SEH = OPC_SPECIAL3 | 0x620, -} MIPSInsn; - -/* - * Type reg - */ -static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, - TCGReg rd, TCGReg rs, TCGReg rt) -{ - int32_t inst; - - inst = opc; - inst |= (rs & 0x1F) << 21; - inst |= (rt & 0x1F) << 16; - inst |= (rd & 0x1F) << 11; - tcg_out32(s, inst); -} - -/* - * Type immediate - */ -static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, - TCGReg rt, TCGReg rs, TCGArg imm) -{ - int32_t inst; - - inst = opc; - inst |= (rs & 0x1F) << 21; - inst |= (rt & 0x1F) << 16; - inst |= (imm & 0xffff); - tcg_out32(s, inst); -} - -/* - * Type bitfield - */ -static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, - TCGReg rs, int msb, int lsb) -{ - int32_t inst; - - inst = opc; - inst |= (rs & 0x1F) << 21; - inst |= (rt & 0x1F) << 16; - inst |= (msb & 0x1F) << 11; - inst |= (lsb & 0x1F) << 6; - tcg_out32(s, inst); -} - -/* - * Type branch - */ -static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, - TCGReg rt, TCGReg rs) -{ - /* We pay attention here to not modify the branch target by reading - the existing value and using it again. This ensure that caches and - memory are kept coherent during retranslation. */ - uint16_t offset = (uint16_t)*s->code_ptr; - - tcg_out_opc_imm(s, opc, rt, rs, offset); -} - -/* - * Type sa - */ -static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, - TCGReg rd, TCGReg rt, TCGArg sa) -{ - int32_t inst; - - inst = opc; - inst |= (rt & 0x1F) << 16; - inst |= (rd & 0x1F) << 11; - inst |= (sa & 0x1F) << 6; - tcg_out32(s, inst); - -} - -/* - * Type jump. - * Returns true if the branch was in range and the insn was emitted. - */ -static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target) -{ - uintptr_t dest = (uintptr_t)target; - uintptr_t from = (uintptr_t)s->code_ptr + 4; - int32_t inst; - - /* The pc-region branch happens within the 256MB region of - the delay slot (thus the +4). */ - if ((from ^ dest) & -(1 << 28)) { - return false; - } - assert((dest & 3) == 0); - - inst = opc; - inst |= (dest >> 2) & 0x3ffffff; - tcg_out32(s, inst); - return true; -} - -static inline void tcg_out_nop(TCGContext *s) -{ - tcg_out32(s, 0); -} - -static inline void tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) -{ - /* Simple reg-reg move, optimising out the 'do nothing' case */ - if (ret != arg) { - tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO); - } -} - -static inline void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg reg, tcg_target_long arg) -{ - if (arg == (int16_t)arg) { - tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg); - } else if (arg == (uint16_t)arg) { - tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg); - } else { - tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16); - if (arg & 0xffff) { - tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff); - } - } -} - -static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); - } else { - /* ret and arg can't be register at */ - if (ret == TCG_TMP0 || arg == TCG_TMP0) { - tcg_abort(); - } - - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); - tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); - tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); - } -} - -static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); - tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); - } else { - /* ret and arg can't be register at */ - if (ret == TCG_TMP0 || arg == TCG_TMP0) { - tcg_abort(); - } - - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); - tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); - } -} - -static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); - tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16); - } else { - /* ret and arg must be different and can't be register at */ - if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) { - tcg_abort(); - } - - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); - - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 24); - tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); - - tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, arg, 0xff00); - tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8); - tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); - - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); - tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0xff00); - tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); - } -} - -static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg); - } else { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24); - } -} - -static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg); - } else { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16); - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); - } -} - -static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, - TCGReg addr, intptr_t ofs) -{ - int16_t lo = ofs; - if (ofs != lo) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo); - if (addr != TCG_REG_ZERO) { - tcg_out_opc_reg(s, OPC_ADDU, TCG_TMP0, TCG_TMP0, addr); - } - addr = TCG_TMP0; - } - tcg_out_opc_imm(s, opc, data, addr, lo); -} - -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - tcg_out_ldst(s, OPC_LW, arg, arg1, arg2); -} - -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) -{ - tcg_out_ldst(s, OPC_SW, arg, arg1, arg2); -} - -static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val) -{ - if (val == (int16_t)val) { - tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, val); - tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_TMP0); - } -} - -/* Bit 0 set if inversion required; bit 1 set if swapping required. */ -#define MIPS_CMP_INV 1 -#define MIPS_CMP_SWAP 2 - -static const uint8_t mips_cmp_map[16] = { - [TCG_COND_LT] = 0, - [TCG_COND_LTU] = 0, - [TCG_COND_GE] = MIPS_CMP_INV, - [TCG_COND_GEU] = MIPS_CMP_INV, - [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP, - [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP, - [TCG_COND_GT] = MIPS_CMP_SWAP, - [TCG_COND_GTU] = MIPS_CMP_SWAP, -}; - -static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg arg1, TCGReg arg2) -{ - MIPSInsn s_opc = OPC_SLTU; - int cmp_map; - - switch (cond) { - case TCG_COND_EQ: - if (arg2 != 0) { - tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); - arg1 = ret; - } - tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1); - break; - - case TCG_COND_NE: - if (arg2 != 0) { - tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); - arg1 = ret; - } - tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1); - break; - - case TCG_COND_LT: - case TCG_COND_GE: - case TCG_COND_LE: - case TCG_COND_GT: - s_opc = OPC_SLT; - /* FALLTHRU */ - - case TCG_COND_LTU: - case TCG_COND_GEU: - case TCG_COND_LEU: - case TCG_COND_GTU: - cmp_map = mips_cmp_map[cond]; - if (cmp_map & MIPS_CMP_SWAP) { - TCGReg t = arg1; - arg1 = arg2; - arg2 = t; - } - tcg_out_opc_reg(s, s_opc, ret, arg1, arg2); - if (cmp_map & MIPS_CMP_INV) { - tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); - } - break; - - default: - tcg_abort(); - break; - } -} - -static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, - TCGReg arg2, int label_index) -{ - static const MIPSInsn b_zero[16] = { - [TCG_COND_LT] = OPC_BLTZ, - [TCG_COND_GT] = OPC_BGTZ, - [TCG_COND_LE] = OPC_BLEZ, - [TCG_COND_GE] = OPC_BGEZ, - }; - - TCGLabel *l; - MIPSInsn s_opc = OPC_SLTU; - MIPSInsn b_opc; - int cmp_map; - - switch (cond) { - case TCG_COND_EQ: - b_opc = OPC_BEQ; - break; - case TCG_COND_NE: - b_opc = OPC_BNE; - break; - - case TCG_COND_LT: - case TCG_COND_GT: - case TCG_COND_LE: - case TCG_COND_GE: - if (arg2 == 0) { - b_opc = b_zero[cond]; - arg2 = arg1; - arg1 = 0; - break; - } - s_opc = OPC_SLT; - /* FALLTHRU */ - - case TCG_COND_LTU: - case TCG_COND_GTU: - case TCG_COND_LEU: - case TCG_COND_GEU: - cmp_map = mips_cmp_map[cond]; - if (cmp_map & MIPS_CMP_SWAP) { - TCGReg t = arg1; - arg1 = arg2; - arg2 = t; - } - tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2); - b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE); - arg1 = TCG_TMP0; - arg2 = TCG_REG_ZERO; - break; - - default: - tcg_abort(); - break; - } - - tcg_out_opc_br(s, b_opc, arg1, arg2); - l = &s->labels[label_index]; - if (l->has_value) { - reloc_pc16(s->code_ptr - 1, l->u.value_ptr); - } else { - tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, label_index, 0); - } - tcg_out_nop(s); -} - -static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1, - TCGReg al, TCGReg ah, - TCGReg bl, TCGReg bh) -{ - /* Merge highpart comparison into AH. */ - if (bh != 0) { - if (ah != 0) { - tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh); - ah = tmp0; - } else { - ah = bh; - } - } - /* Merge lowpart comparison into AL. */ - if (bl != 0) { - if (al != 0) { - tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl); - al = tmp1; - } else { - al = bl; - } - } - /* Merge high and low part comparisons into AL. */ - if (ah != 0) { - if (al != 0) { - tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al); - al = tmp0; - } else { - al = ah; - } - } - return al; -} - -static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) -{ - TCGReg tmp0 = TCG_TMP0; - TCGReg tmp1 = ret; - - assert(ret != TCG_TMP0); - if (ret == ah || ret == bh) { - assert(ret != TCG_TMP1); - tmp1 = TCG_TMP1; - } - - switch (cond) { - case TCG_COND_EQ: - case TCG_COND_NE: - tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh); - tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO); - break; - - default: - tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh); - tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl); - tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0); - tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh); - tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0); - break; - } -} - -static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, - TCGReg bl, TCGReg bh, int label_index) -{ - TCGCond b_cond = TCG_COND_NE; - TCGReg tmp = TCG_TMP1; - - /* With branches, we emit between 4 and 9 insns with 2 or 3 branches. - With setcond, we emit between 3 and 10 insns and only 1 branch, - which ought to get better branch prediction. */ - switch (cond) { - case TCG_COND_EQ: - case TCG_COND_NE: - b_cond = cond; - tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh); - break; - - default: - /* Minimize code size by preferring a compare not requiring INV. */ - if (mips_cmp_map[cond] & MIPS_CMP_INV) { - cond = tcg_invert_cond(cond); - b_cond = TCG_COND_EQ; - } - tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh); - break; - } - - tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, label_index); -} - -static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg c1, TCGReg c2, TCGReg v) -{ - MIPSInsn m_opc = OPC_MOVN; - - switch (cond) { - case TCG_COND_EQ: - m_opc = OPC_MOVZ; - /* FALLTHRU */ - case TCG_COND_NE: - if (c2 != 0) { - tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2); - c1 = TCG_TMP0; - } - break; - - default: - /* Minimize code size by preferring a compare not requiring INV. */ - if (mips_cmp_map[cond] & MIPS_CMP_INV) { - cond = tcg_invert_cond(cond); - m_opc = OPC_MOVZ; - } - tcg_out_setcond(s, cond, TCG_TMP0, c1, c2); - c1 = TCG_TMP0; - break; - } - - tcg_out_opc_reg(s, m_opc, ret, v, c1); -} - -static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail) -{ - /* Note that the ABI requires the called function's address to be - loaded into T9, even if a direct branch is in range. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg); - - /* But do try a direct branch, allowing the cpu better insn prefetch. */ - if (tail) { - if (!tcg_out_opc_jmp(s, OPC_J, arg)) { - tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0); - } - } else { - if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) { - tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); - } - } -} - -static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg) -{ - tcg_out_call_int(s, arg, false); - tcg_out_nop(s); -} - -#if defined(CONFIG_SOFTMMU) -static void * const qemu_ld_helpers[16] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LESW] = helper_le_ldsw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, - [MO_LEQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BESW] = helper_be_ldsw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, - [MO_BEQ] = helper_be_ldq_mmu, -}; - -static void * const qemu_st_helpers[16] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, -}; - -/* Helper routines for marshalling helper function arguments into - * the correct registers and stack. - * I is where we want to put this argument, and is updated and returned - * for the next call. ARG is the argument itself. - * - * We provide routines for arguments which are: immediate, 32 bit - * value in register, 16 and 8 bit values in register (which must be zero - * extended before use) and 64 bit value in a lo:hi register pair. - */ - -static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg) -{ - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg); - } else { - tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i); - } - return i + 1; -} - -static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg) -{ - TCGReg tmp = TCG_TMP0; - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tmp = tcg_target_call_iarg_regs[i]; - } - tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff); - return tcg_out_call_iarg_reg(s, i, tmp); -} - -static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg) -{ - TCGReg tmp = TCG_TMP0; - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tmp = tcg_target_call_iarg_regs[i]; - } - tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff); - return tcg_out_call_iarg_reg(s, i, tmp); -} - -static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg) -{ - TCGReg tmp = TCG_TMP0; - if (arg == 0) { - tmp = TCG_REG_ZERO; - } else { - if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { - tmp = tcg_target_call_iarg_regs[i]; - } - tcg_out_movi(s, TCG_TYPE_REG, tmp, arg); - } - return tcg_out_call_iarg_reg(s, i, tmp); -} - -static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah) -{ - i = (i + 1) & ~1; - i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al)); - i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah)); - return i; -} - -/* Perform the tlb comparison operation. The complete host address is - placed in BASE. Clobbers AT, T0, A0. */ -static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, - TCGReg addrh, int mem_index, TCGMemOp s_bits, - tcg_insn_unit *label_ptr[2], bool is_load) -{ - int cmp_off - = (is_load - ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) - : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); - int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); - - tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, - (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); - tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); - - /* Compensate for very large offsets. */ - if (add_off >= 0x8000) { - /* Most target env are smaller than 32k; none are larger than 64k. - Simplify the logic here merely to offset by 0x7ff0, giving us a - range just shy of 64k. Check this assumption. */ - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, - tlb_table[NB_MMU_MODES - 1][1]) - > 0x7ff0 + 0x7fff); - tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0); - cmp_off -= 0x7ff0; - add_off -= 0x7ff0; - } - - /* Load the tlb comparator. */ - tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + LO_OFF); - if (TARGET_LONG_BITS == 64) { - tcg_out_opc_imm(s, OPC_LW, base, TCG_REG_A0, cmp_off + HI_OFF); - } - - /* Mask the page bits, keeping the alignment bits to compare against. - In between, load the tlb addend for the fast path. */ - tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, - TARGET_PAGE_MASK | ((1 << s_bits) - 1)); - tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off); - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); - - label_ptr[0] = s->code_ptr; - tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); - - if (TARGET_LONG_BITS == 64) { - /* delay slot */ - tcg_out_nop(s); - - label_ptr[1] = s->code_ptr; - tcg_out_opc_br(s, OPC_BNE, addrh, base); - } - - /* delay slot */ - tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl); -} - -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc, - TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - int mem_index, void *raddr, - tcg_insn_unit *label_ptr[2]) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->opc = opc; - label->datalo_reg = datalo; - label->datahi_reg = datahi; - label->addrlo_reg = addrlo; - label->addrhi_reg = addrhi; - label->mem_index = mem_index; - label->raddr = raddr; - label->label_ptr[0] = label_ptr[0]; - if (TARGET_LONG_BITS == 64) { - label->label_ptr[1] = label_ptr[1]; - } -} - -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - TCGMemOp opc = l->opc; - TCGReg v0; - int i; - - /* resolve label address */ - reloc_pc16(l->label_ptr[0], s->code_ptr); - if (TARGET_LONG_BITS == 64) { - reloc_pc16(l->label_ptr[1], s->code_ptr); - } - - i = 1; - if (TARGET_LONG_BITS == 64) { - i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); - } else { - i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); - } - i = tcg_out_call_iarg_imm(s, i, l->mem_index); - i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr); - tcg_out_call_int(s, qemu_ld_helpers[opc], false); - /* delay slot */ - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - - v0 = l->datalo_reg; - if ((opc & MO_SIZE) == MO_64) { - /* We eliminated V0 from the possible output registers, so it - cannot be clobbered here. So we must move V1 first. */ - if (MIPS_BE) { - tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1); - v0 = l->datahi_reg; - } else { - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1); - } - } - - reloc_pc16(s->code_ptr, l->raddr); - tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO); - /* delay slot */ - tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0); -} - -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - TCGMemOp opc = l->opc; - TCGMemOp s_bits = opc & MO_SIZE; - int i; - - /* resolve label address */ - reloc_pc16(l->label_ptr[0], s->code_ptr); - if (TARGET_LONG_BITS == 64) { - reloc_pc16(l->label_ptr[1], s->code_ptr); - } - - i = 1; - if (TARGET_LONG_BITS == 64) { - i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); - } else { - i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); - } - switch (s_bits) { - case MO_8: - i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg); - break; - case MO_16: - i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg); - break; - case MO_32: - i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); - break; - case MO_64: - i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg); - break; - default: - tcg_abort(); - } - i = tcg_out_call_iarg_imm(s, i, l->mem_index); - - /* Tail call to the store helper. Thus force the return address - computation to take place in the return address register. */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr); - i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA); - tcg_out_call_int(s, qemu_st_helpers[opc], true); - /* delay slot */ - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); -} -#endif - -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg base, TCGMemOp opc) -{ - switch (opc) { - case MO_UB: - tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0); - break; - case MO_SB: - tcg_out_opc_imm(s, OPC_LB, datalo, base, 0); - break; - case MO_UW | MO_BSWAP: - tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); - tcg_out_bswap16(s, datalo, TCG_TMP1); - break; - case MO_UW: - tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0); - break; - case MO_SW | MO_BSWAP: - tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); - tcg_out_bswap16s(s, datalo, TCG_TMP1); - break; - case MO_SW: - tcg_out_opc_imm(s, OPC_LH, datalo, base, 0); - break; - case MO_UL | MO_BSWAP: - tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 0); - tcg_out_bswap32(s, datalo, TCG_TMP1); - break; - case MO_UL: - tcg_out_opc_imm(s, OPC_LW, datalo, base, 0); - break; - case MO_Q | MO_BSWAP: - tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, HI_OFF); - tcg_out_bswap32(s, datalo, TCG_TMP1); - tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, LO_OFF); - tcg_out_bswap32(s, datahi, TCG_TMP1); - break; - case MO_Q: - tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF); - tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF); - break; - default: - tcg_abort(); - } -} - -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) -{ - TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR; - TCGReg data_regl, data_regh; - TCGMemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[2]; - int mem_index; - TCGMemOp s_bits; -#endif - /* Note that we've eliminated V0 from the output registers, - so we won't overwrite the base register during loading. */ - TCGReg base = TCG_REG_V0; - - data_regl = *args++; - data_regh = (is_64 ? *args++ : 0); - addr_regl = *args++; - addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0); - opc = *args++; - -#if defined(CONFIG_SOFTMMU) - mem_index = *args; - s_bits = opc & MO_SIZE; - - tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index, - s_bits, label_ptr, 1); - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc); - add_qemu_ldst_label(s, 1, opc, data_regl, data_regh, addr_regl, addr_regh, - mem_index, s->code_ptr, label_ptr); -#else - if (GUEST_BASE == 0 && data_regl != addr_regl) { - base = addr_regl; - } else if (GUEST_BASE == (int16_t)GUEST_BASE) { - tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE); - tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl); - } - tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc); -#endif -} - -static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg base, TCGMemOp opc) -{ - switch (opc) { - case MO_8: - tcg_out_opc_imm(s, OPC_SB, datalo, base, 0); - break; - - case MO_16 | MO_BSWAP: - tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, datalo, 0xffff); - tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1); - datalo = TCG_TMP1; - /* FALLTHRU */ - case MO_16: - tcg_out_opc_imm(s, OPC_SH, datalo, base, 0); - break; - - case MO_32 | MO_BSWAP: - tcg_out_bswap32(s, TCG_TMP1, datalo); - datalo = TCG_TMP1; - /* FALLTHRU */ - case MO_32: - tcg_out_opc_imm(s, OPC_SW, datalo, base, 0); - break; - - case MO_64 | MO_BSWAP: - tcg_out_bswap32(s, TCG_TMP1, datalo); - tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, HI_OFF); - tcg_out_bswap32(s, TCG_TMP1, datahi); - tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, LO_OFF); - break; - case MO_64: - tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF); - tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF); - break; - - default: - tcg_abort(); - } -} - -static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, - TCGReg ah, TCGArg bl, TCGArg bh, bool cbl, - bool cbh, bool is_sub) -{ - TCGReg th = TCG_TMP1; - - /* If we have a negative constant such that negating it would - make the high part zero, we can (usually) eliminate one insn. */ - if (cbl && cbh && bh == -1 && bl != 0) { - bl = -bl; - bh = 0; - is_sub = !is_sub; - } - - /* By operating on the high part first, we get to use the final - carry operation to move back from the temporary. */ - if (!cbh) { - tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh); - } else if (bh != 0 || ah == rl) { - tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh)); - } else { - th = ah; - } - - /* Note that tcg optimization should eliminate the bl == 0 case. */ - if (is_sub) { - if (cbl) { - tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl); - tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl); - } else { - tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl); - tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl); - } - tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0); - } else { - if (cbl) { - tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl); - tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl); - } else { - tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); - tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl)); - } - tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0); - } -} - -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) -{ - TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR; - TCGReg data_regl, data_regh, base; - TCGMemOp opc; -#if defined(CONFIG_SOFTMMU) - tcg_insn_unit *label_ptr[2]; - int mem_index; - TCGMemOp s_bits; -#endif - - data_regl = *args++; - data_regh = (is_64 ? *args++ : 0); - addr_regl = *args++; - addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0); - opc = *args++; - -#if defined(CONFIG_SOFTMMU) - mem_index = *args; - s_bits = opc & 3; - - /* Note that we eliminated the helper's address argument, - so we can reuse that for the base. */ - base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2); - tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index, - s_bits, label_ptr, 0); - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); - add_qemu_ldst_label(s, 0, opc, data_regl, data_regh, addr_regl, addr_regh, - mem_index, s->code_ptr, label_ptr); -#else - if (GUEST_BASE == 0) { - base = addr_regl; - } else { - base = TCG_REG_A0; - if (GUEST_BASE == (int16_t)GUEST_BASE) { - tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE); - tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl); - } - } - tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); -#endif -} - -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg *args, const int *const_args) -{ - MIPSInsn i1, i2; - TCGArg a0, a1, a2; - int c2; - - a0 = args[0]; - a1 = args[1]; - a2 = args[2]; - c2 = const_args[2]; - - switch (opc) { - case INDEX_op_exit_tb: - { - TCGReg b0 = TCG_REG_ZERO; - - if (a0 & ~0xffff) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff); - b0 = TCG_REG_V0; - } - if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, - (uintptr_t)tb_ret_addr); - tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); - } - tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff); - } - break; - case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { - /* direct jump method */ - s->tb_jmp_offset[a0] = tcg_current_code_size(s); - /* Avoid clobbering the address during retranslation. */ - tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff)); - } else { - /* indirect jump method */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, - (uintptr_t)(s->tb_next + a0)); - tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); - } - tcg_out_nop(s); - s->tb_next_offset[a0] = tcg_current_code_size(s); - break; - case INDEX_op_br: - tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, a0); - break; - - case INDEX_op_ld8u_i32: - i1 = OPC_LBU; - goto do_ldst; - case INDEX_op_ld8s_i32: - i1 = OPC_LB; - goto do_ldst; - case INDEX_op_ld16u_i32: - i1 = OPC_LHU; - goto do_ldst; - case INDEX_op_ld16s_i32: - i1 = OPC_LH; - goto do_ldst; - case INDEX_op_ld_i32: - i1 = OPC_LW; - goto do_ldst; - case INDEX_op_st8_i32: - i1 = OPC_SB; - goto do_ldst; - case INDEX_op_st16_i32: - i1 = OPC_SH; - goto do_ldst; - case INDEX_op_st_i32: - i1 = OPC_SW; - do_ldst: - tcg_out_ldst(s, i1, a0, a1, a2); - break; - - case INDEX_op_add_i32: - i1 = OPC_ADDU, i2 = OPC_ADDIU; - goto do_binary; - case INDEX_op_or_i32: - i1 = OPC_OR, i2 = OPC_ORI; - goto do_binary; - case INDEX_op_xor_i32: - i1 = OPC_XOR, i2 = OPC_XORI; - do_binary: - if (c2) { - tcg_out_opc_imm(s, i2, a0, a1, a2); - break; - } - do_binaryv: - tcg_out_opc_reg(s, i1, a0, a1, a2); - break; - - case INDEX_op_sub_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_ADDIU, a0, a1, -a2); - break; - } - i1 = OPC_SUBU; - goto do_binary; - case INDEX_op_and_i32: - if (c2 && a2 != (uint16_t)a2) { - int msb = ctz32(~a2) - 1; - assert(use_mips32r2_instructions); - assert(is_p2m1(a2)); - tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0); - break; - } - i1 = OPC_AND, i2 = OPC_ANDI; - goto do_binary; - case INDEX_op_nor_i32: - i1 = OPC_NOR; - goto do_binaryv; - - case INDEX_op_mul_i32: - if (use_mips32_instructions) { - tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); - break; - } - i1 = OPC_MULT, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_mulsh_i32: - i1 = OPC_MULT, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_muluh_i32: - i1 = OPC_MULTU, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_div_i32: - i1 = OPC_DIV, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_divu_i32: - i1 = OPC_DIVU, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_rem_i32: - i1 = OPC_DIV, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_remu_i32: - i1 = OPC_DIVU, i2 = OPC_MFHI; - do_hilo1: - tcg_out_opc_reg(s, i1, 0, a1, a2); - tcg_out_opc_reg(s, i2, a0, 0, 0); - break; - - case INDEX_op_muls2_i32: - i1 = OPC_MULT; - goto do_hilo2; - case INDEX_op_mulu2_i32: - i1 = OPC_MULTU; - do_hilo2: - tcg_out_opc_reg(s, i1, 0, a2, args[3]); - tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); - tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0); - break; - - case INDEX_op_not_i32: - i1 = OPC_NOR; - goto do_unary; - case INDEX_op_bswap16_i32: - i1 = OPC_WSBH; - goto do_unary; - case INDEX_op_ext8s_i32: - i1 = OPC_SEB; - goto do_unary; - case INDEX_op_ext16s_i32: - i1 = OPC_SEH; - do_unary: - tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1); - break; - - case INDEX_op_sar_i32: - i1 = OPC_SRAV, i2 = OPC_SRA; - goto do_shift; - case INDEX_op_shl_i32: - i1 = OPC_SLLV, i2 = OPC_SLL; - goto do_shift; - case INDEX_op_shr_i32: - i1 = OPC_SRLV, i2 = OPC_SRL; - goto do_shift; - case INDEX_op_rotr_i32: - i1 = OPC_ROTRV, i2 = OPC_ROTR; - do_shift: - if (c2) { - tcg_out_opc_sa(s, i2, a0, a1, a2); - } else { - tcg_out_opc_reg(s, i1, a0, a2, a1); - } - break; - case INDEX_op_rotl_i32: - if (c2) { - tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2); - } else { - tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2); - tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1); - } - break; - - case INDEX_op_bswap32_i32: - tcg_out_opc_reg(s, OPC_WSBH, a0, 0, a1); - tcg_out_opc_sa(s, OPC_ROTR, a0, a0, 16); - break; - - case INDEX_op_deposit_i32: - tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]); - break; - - case INDEX_op_brcond_i32: - tcg_out_brcond(s, a2, a0, a1, args[3]); - break; - case INDEX_op_brcond2_i32: - tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], args[5]); - break; - - case INDEX_op_movcond_i32: - tcg_out_movcond(s, args[5], a0, a1, a2, args[3]); - break; - - case INDEX_op_setcond_i32: - tcg_out_setcond(s, args[3], a0, a1, a2); - break; - case INDEX_op_setcond2_i32: - tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); - break; - - case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, args, false); - break; - case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args, true); - break; - case INDEX_op_qemu_st_i32: - tcg_out_qemu_st(s, args, false); - break; - case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args, true); - break; - - case INDEX_op_add2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], false); - break; - case INDEX_op_sub2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], true); - break; - - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - default: - tcg_abort(); - } -} - -static const TCGTargetOpDef mips_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, - - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "rZ", "r" } }, - { INDEX_op_st16_i32, { "rZ", "r" } }, - { INDEX_op_st_i32, { "rZ", "r" } }, - - { INDEX_op_add_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_mul_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } }, - { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } }, - { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_div_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_divu_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_rem_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_remu_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_sub_i32, { "r", "rZ", "rN" } }, - - { INDEX_op_and_i32, { "r", "rZ", "rIK" } }, - { INDEX_op_nor_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_not_i32, { "r", "rZ" } }, - { INDEX_op_or_i32, { "r", "rZ", "rIZ" } }, - { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } }, - - { INDEX_op_shl_i32, { "r", "rZ", "ri" } }, - { INDEX_op_shr_i32, { "r", "rZ", "ri" } }, - { INDEX_op_sar_i32, { "r", "rZ", "ri" } }, - { INDEX_op_rotr_i32, { "r", "rZ", "ri" } }, - { INDEX_op_rotl_i32, { "r", "rZ", "ri" } }, - - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - - { INDEX_op_ext8s_i32, { "r", "rZ" } }, - { INDEX_op_ext16s_i32, { "r", "rZ" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - - { INDEX_op_brcond_i32, { "rZ", "rZ" } }, - { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } }, - { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } }, - { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } }, - - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } }, - { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } }, - -#if TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld_i32, { "L", "lZ" } }, - { INDEX_op_qemu_st_i32, { "SZ", "SZ" } }, - { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } }, - { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } }, -#else - { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } }, - { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } }, - { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } }, - { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } }, -#endif - { -1 }, -}; - -static int tcg_target_callee_save_regs[] = { - TCG_REG_S0, /* used for the global env (TCG_AREG0) */ - TCG_REG_S1, - TCG_REG_S2, - TCG_REG_S3, - TCG_REG_S4, - TCG_REG_S5, - TCG_REG_S6, - TCG_REG_S7, - TCG_REG_S8, - TCG_REG_RA, /* should be last for ABI compliance */ -}; - -/* The Linux kernel doesn't provide any information about the available - instruction set. Probe it using a signal handler. */ - -#include - -#ifndef use_movnz_instructions -bool use_movnz_instructions = false; -#endif - -#ifndef use_mips32_instructions -bool use_mips32_instructions = false; -#endif - -#ifndef use_mips32r2_instructions -bool use_mips32r2_instructions = false; -#endif - -static volatile sig_atomic_t got_sigill; - -static void sigill_handler(int signo, siginfo_t *si, void *data) -{ - /* Skip the faulty instruction */ - ucontext_t *uc = (ucontext_t *)data; - uc->uc_mcontext.pc += 4; - - got_sigill = 1; -} - -static void tcg_target_detect_isa(void) -{ - struct sigaction sa_old, sa_new; - - memset(&sa_new, 0, sizeof(sa_new)); - sa_new.sa_flags = SA_SIGINFO; - sa_new.sa_sigaction = sigill_handler; - sigaction(SIGILL, &sa_new, &sa_old); - - /* Probe for movn/movz, necessary to implement movcond. */ -#ifndef use_movnz_instructions - got_sigill = 0; - asm volatile(".set push\n" - ".set mips32\n" - "movn $zero, $zero, $zero\n" - "movz $zero, $zero, $zero\n" - ".set pop\n" - : : : ); - use_movnz_instructions = !got_sigill; -#endif - - /* Probe for MIPS32 instructions. As no subsetting is allowed - by the specification, it is only necessary to probe for one - of the instructions. */ -#ifndef use_mips32_instructions - got_sigill = 0; - asm volatile(".set push\n" - ".set mips32\n" - "mul $zero, $zero\n" - ".set pop\n" - : : : ); - use_mips32_instructions = !got_sigill; -#endif - - /* Probe for MIPS32r2 instructions if MIPS32 instructions are - available. As no subsetting is allowed by the specification, - it is only necessary to probe for one of the instructions. */ -#ifndef use_mips32r2_instructions - if (use_mips32_instructions) { - got_sigill = 0; - asm volatile(".set push\n" - ".set mips32r2\n" - "seb $zero, $zero\n" - ".set pop\n" - : : : ); - use_mips32r2_instructions = !got_sigill; - } -#endif - - sigaction(SIGILL, &sa_old, NULL); -} - -/* Generate global QEMU prologue and epilogue code */ -static void tcg_target_qemu_prologue(TCGContext *s) -{ - int i, frame_size; - - /* reserve some stack space, also for TCG temps. */ - frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4 - + TCG_STATIC_CALL_ARGS_SIZE - + CPU_TEMP_BUF_NLONGS * sizeof(long); - frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & - ~(TCG_TARGET_STACK_ALIGN - 1); - tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4 - + TCG_STATIC_CALL_ARGS_SIZE, - CPU_TEMP_BUF_NLONGS * sizeof(long)); - - /* TB prologue */ - tcg_out_addi(s, TCG_REG_SP, -frame_size); - for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) { - tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i], - TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4); - } - - /* Call generated code */ - tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0); - tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); - tb_ret_addr = s->code_ptr; - - /* TB epilogue */ - for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) { - tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i], - TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4); - } - - tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); - tcg_out_addi(s, TCG_REG_SP, frame_size); -} - -static void tcg_target_init(TCGContext *s) -{ - tcg_target_detect_isa(); - tcg_regset_set(s->tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff); - tcg_regset_set(s->tcg_target_call_clobber_regs, - (1 << TCG_REG_V0) | - (1 << TCG_REG_V1) | - (1 << TCG_REG_A0) | - (1 << TCG_REG_A1) | - (1 << TCG_REG_A2) | - (1 << TCG_REG_A3) | - (1 << TCG_REG_T0) | - (1 << TCG_REG_T1) | - (1 << TCG_REG_T2) | - (1 << TCG_REG_T3) | - (1 << TCG_REG_T4) | - (1 << TCG_REG_T5) | - (1 << TCG_REG_T6) | - (1 << TCG_REG_T7) | - (1 << TCG_REG_T8) | - (1 << TCG_REG_T9)); - - tcg_regset_clear(s->reserved_regs); - tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */ - tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */ - tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ - tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ - - tcg_add_target_add_op_defs(s, mips_op_defs); -} - -void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) -{ - uint32_t *ptr = (uint32_t *)jmp_addr; - *ptr = deposit32(*ptr, 0, 26, addr >> 2); - flush_icache_range(jmp_addr, jmp_addr + 4); -} diff --git a/qemu/tcg/mips/tcg-target.h b/qemu/tcg/mips/tcg-target.h index c88a1c92..c6b091d8 100644 --- a/qemu/tcg/mips/tcg-target.h +++ b/qemu/tcg/mips/tcg-target.h @@ -23,10 +23,20 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef TCG_TARGET_MIPS -#define TCG_TARGET_MIPS 1 + +#ifndef MIPS_TCG_TARGET_H +#define MIPS_TCG_TARGET_H + +#if _MIPS_SIM == _ABIO32 +# define TCG_TARGET_REG_BITS 32 +#elif _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64 +# define TCG_TARGET_REG_BITS 64 +#else +# error "Unknown ABI" +#endif #define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 #define TCG_TARGET_NB_REGS 32 typedef enum { @@ -68,9 +78,13 @@ typedef enum { } TCGReg; /* used for function call generation */ -#define TCG_TARGET_STACK_ALIGN 8 -#define TCG_TARGET_CALL_STACK_OFFSET 16 -#define TCG_TARGET_CALL_ALIGN_ARGS 1 +#define TCG_TARGET_STACK_ALIGN 16 +#if _MIPS_SIM == _ABIO32 +# define TCG_TARGET_CALL_STACK_OFFSET 16 +#else +# define TCG_TARGET_CALL_STACK_OFFSET 0 +#endif +#define TCG_TARGET_CALL_ALIGN_ARGS 1 /* MOVN/MOVZ instructions detection */ #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \ @@ -95,6 +109,13 @@ extern bool use_mips32_instructions; extern bool use_mips32r2_instructions; #endif +/* MIPS32R6 instruction set detection */ +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6) +#define use_mips32r6_instructions 1 +#else +#define use_mips32r6_instructions 0 +#endif + /* optional instructions */ #define TCG_TARGET_HAS_div_i32 1 #define TCG_TARGET_HAS_rem_i32 1 @@ -104,34 +125,97 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_orc_i32 0 #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_mulu2_i32 1 -#define TCG_TARGET_HAS_muls2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions) +#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions) #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 + +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_extrl_i64_i32 1 +#define TCG_TARGET_HAS_extrh_i64_i32 1 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions) +#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions) +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#endif /* optional instructions detected at runtime */ #define TCG_TARGET_HAS_movcond_i32 use_movnz_instructions #define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_bswap32_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_ctz_i32 0 +#define TCG_TARGET_HAS_ctpop_i32 0 + +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_movcond_i64 use_movnz_instructions +#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 +#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions +#define TCG_TARGET_HAS_ctz_i64 0 +#define TCG_TARGET_HAS_ctpop_i64 0 +#endif /* optional instructions automatically implemented */ #define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */ #define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */ #define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */ +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_neg_i64 0 /* sub rd, zero, rt */ +#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */ +#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */ +#endif + #ifdef __OpenBSD__ #include #else #include #endif +#define TCG_TARGET_DEFAULT_MO (0) +#define TCG_TARGET_HAS_MEMORY_BSWAP 1 + static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { cacheflush ((void *)start, stop-start, ICACHE); } +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); + +#ifdef CONFIG_SOFTMMU +#define TCG_TARGET_NEED_LDST_LABELS +#endif + #endif diff --git a/qemu/tcg/mips/tcg-target.inc.c b/qemu/tcg/mips/tcg-target.inc.c new file mode 100644 index 00000000..ed5a9356 --- /dev/null +++ b/qemu/tcg/mips/tcg-target.inc.c @@ -0,0 +1,2714 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008-2009 Arnaud Patard + * Copyright (c) 2009 Aurelien Jarno + * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifdef HOST_WORDS_BIGENDIAN +# define MIPS_BE 1 +#else +# define MIPS_BE 0 +#endif + +#if TCG_TARGET_REG_BITS == 32 +# define LO_OFF (MIPS_BE * 4) +# define HI_OFF (4 - LO_OFF) +#else +/* To assert at compile-time that these values are never used + for TCG_TARGET_REG_BITS == 64. */ +int link_error(void); +# define LO_OFF link_error() +# define HI_OFF link_error() +#endif + +#ifdef CONFIG_DEBUG_TCG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "zero", + "at", + "v0", + "v1", + "a0", + "a1", + "a2", + "a3", + "t0", + "t1", + "t2", + "t3", + "t4", + "t5", + "t6", + "t7", + "s0", + "s1", + "s2", + "s3", + "s4", + "s5", + "s6", + "s7", + "t8", + "t9", + "k0", + "k1", + "gp", + "sp", + "s8", + "ra", +}; +#endif + +#define TCG_TMP0 TCG_REG_AT +#define TCG_TMP1 TCG_REG_T9 +#define TCG_TMP2 TCG_REG_T8 +#define TCG_TMP3 TCG_REG_T7 + +#ifndef CONFIG_SOFTMMU +#define TCG_GUEST_BASE_REG TCG_REG_S1 +#endif + +/* check if we really need so many registers :P */ +static const int tcg_target_reg_alloc_order[] = { + /* Call saved registers. */ + TCG_REG_S0, + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + + /* Call clobbered registers. */ + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + TCG_REG_T7, + TCG_REG_T8, + TCG_REG_T9, + TCG_REG_V1, + TCG_REG_V0, + + /* Argument registers, opposite order of allocation. */ + TCG_REG_T3, + TCG_REG_T2, + TCG_REG_T1, + TCG_REG_T0, + TCG_REG_A3, + TCG_REG_A2, + TCG_REG_A1, + TCG_REG_A0, +}; + +static const TCGReg tcg_target_call_iarg_regs[] = { + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, +#if _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64 + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, +#endif +}; + +static const TCGReg tcg_target_call_oarg_regs[2] = { + TCG_REG_V0, + TCG_REG_V1 +}; + +static tcg_insn_unit *tb_ret_addr; +static tcg_insn_unit *bswap32_addr; +static tcg_insn_unit *bswap32u_addr; +static tcg_insn_unit *bswap64_addr; + +static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + /* Let the compiler perform the right-shift as part of the arithmetic. */ + ptrdiff_t disp = target - (pc + 1); + tcg_debug_assert(disp == (int16_t)disp); + return disp & 0xffff; +} + +static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target)); +} + +static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + tcg_debug_assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0); + return ((uintptr_t)target >> 2) & 0x3ffffff; +} + +static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target)); +} + +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + tcg_debug_assert(type == R_MIPS_PC16); + tcg_debug_assert(addend == 0); + reloc_pc16(code_ptr, (tcg_insn_unit *)value); + return true; +} + +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */ +#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */ +#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */ +#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */ +#define TCG_CT_CONST_WSZ 0x2000 /* word size */ + +static inline bool is_p2m1(tcg_target_long val) +{ + return val && ((val + 1) & val) == 0; +} + +/* parse target specific constraints */ +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) +{ + switch(*ct_str++) { + case 'r': + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffff; + break; + case 'L': /* qemu_ld input arg constraint */ + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffff; + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); +#if defined(CONFIG_SOFTMMU) + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); + } +#endif + break; + case 'S': /* qemu_st constraint */ + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffff; + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); +#if defined(CONFIG_SOFTMMU) + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3); + } else { + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1); + } +#endif + break; + case 'I': + ct->ct |= TCG_CT_CONST_U16; + break; + case 'J': + ct->ct |= TCG_CT_CONST_S16; + break; + case 'K': + ct->ct |= TCG_CT_CONST_P2M1; + break; + case 'N': + ct->ct |= TCG_CT_CONST_N16; + break; + case 'W': + ct->ct |= TCG_CT_CONST_WSZ; + break; + case 'Z': + /* We are cheating a bit here, using the fact that the register + ZERO is also the register number 0. Hence there is no need + to check for const_args in each instruction. */ + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return NULL; + } + return ct_str; +} + +/* test if a constant matches the constraint */ +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct; + ct = arg_ct->ct; + if (ct & TCG_CT_CONST) { + return 1; + } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) { + return 1; + } else if ((ct & TCG_CT_CONST_P2M1) + && use_mips32r2_instructions && is_p2m1(val)) { + return 1; + } else if ((ct & TCG_CT_CONST_WSZ) + && val == (type == TCG_TYPE_I32 ? 32 : 64)) { + return 1; + } + return 0; +} + +/* instruction opcodes */ +typedef enum { + OPC_J = 002 << 26, + OPC_JAL = 003 << 26, + OPC_BEQ = 004 << 26, + OPC_BNE = 005 << 26, + OPC_BLEZ = 006 << 26, + OPC_BGTZ = 007 << 26, + OPC_ADDIU = 011 << 26, + OPC_SLTI = 012 << 26, + OPC_SLTIU = 013 << 26, + OPC_ANDI = 014 << 26, + OPC_ORI = 015 << 26, + OPC_XORI = 016 << 26, + OPC_LUI = 017 << 26, + OPC_DADDIU = 031 << 26, + OPC_LB = 040 << 26, + OPC_LH = 041 << 26, + OPC_LW = 043 << 26, + OPC_LBU = 044 << 26, + OPC_LHU = 045 << 26, + OPC_LWU = 047 << 26, + OPC_SB = 050 << 26, + OPC_SH = 051 << 26, + OPC_SW = 053 << 26, + OPC_LD = 067 << 26, + OPC_SD = 077 << 26, + + OPC_SPECIAL = 000 << 26, + OPC_SLL = OPC_SPECIAL | 000, + OPC_SRL = OPC_SPECIAL | 002, + OPC_ROTR = OPC_SPECIAL | 002 | (1 << 21), + OPC_SRA = OPC_SPECIAL | 003, + OPC_SLLV = OPC_SPECIAL | 004, + OPC_SRLV = OPC_SPECIAL | 006, + OPC_ROTRV = OPC_SPECIAL | 006 | 0100, + OPC_SRAV = OPC_SPECIAL | 007, + OPC_JR_R5 = OPC_SPECIAL | 010, + OPC_JALR = OPC_SPECIAL | 011, + OPC_MOVZ = OPC_SPECIAL | 012, + OPC_MOVN = OPC_SPECIAL | 013, + OPC_SYNC = OPC_SPECIAL | 017, + OPC_MFHI = OPC_SPECIAL | 020, + OPC_MFLO = OPC_SPECIAL | 022, + OPC_DSLLV = OPC_SPECIAL | 024, + OPC_DSRLV = OPC_SPECIAL | 026, + OPC_DROTRV = OPC_SPECIAL | 026 | 0100, + OPC_DSRAV = OPC_SPECIAL | 027, + OPC_MULT = OPC_SPECIAL | 030, + OPC_MUL_R6 = OPC_SPECIAL | 030 | 0200, + OPC_MUH = OPC_SPECIAL | 030 | 0300, + OPC_MULTU = OPC_SPECIAL | 031, + OPC_MULU = OPC_SPECIAL | 031 | 0200, + OPC_MUHU = OPC_SPECIAL | 031 | 0300, + OPC_DIV = OPC_SPECIAL | 032, + OPC_DIV_R6 = OPC_SPECIAL | 032 | 0200, + OPC_MOD = OPC_SPECIAL | 032 | 0300, + OPC_DIVU = OPC_SPECIAL | 033, + OPC_DIVU_R6 = OPC_SPECIAL | 033 | 0200, + OPC_MODU = OPC_SPECIAL | 033 | 0300, + OPC_DMULT = OPC_SPECIAL | 034, + OPC_DMUL = OPC_SPECIAL | 034 | 0200, + OPC_DMUH = OPC_SPECIAL | 034 | 0300, + OPC_DMULTU = OPC_SPECIAL | 035, + OPC_DMULU = OPC_SPECIAL | 035 | 0200, + OPC_DMUHU = OPC_SPECIAL | 035 | 0300, + OPC_DDIV = OPC_SPECIAL | 036, + OPC_DDIV_R6 = OPC_SPECIAL | 036 | 0200, + OPC_DMOD = OPC_SPECIAL | 036 | 0300, + OPC_DDIVU = OPC_SPECIAL | 037, + OPC_DDIVU_R6 = OPC_SPECIAL | 037 | 0200, + OPC_DMODU = OPC_SPECIAL | 037 | 0300, + OPC_ADDU = OPC_SPECIAL | 041, + OPC_SUBU = OPC_SPECIAL | 043, + OPC_AND = OPC_SPECIAL | 044, + OPC_OR = OPC_SPECIAL | 045, + OPC_XOR = OPC_SPECIAL | 046, + OPC_NOR = OPC_SPECIAL | 047, + OPC_SLT = OPC_SPECIAL | 052, + OPC_SLTU = OPC_SPECIAL | 053, + OPC_DADDU = OPC_SPECIAL | 055, + OPC_DSUBU = OPC_SPECIAL | 057, + OPC_SELEQZ = OPC_SPECIAL | 065, + OPC_SELNEZ = OPC_SPECIAL | 067, + OPC_DSLL = OPC_SPECIAL | 070, + OPC_DSRL = OPC_SPECIAL | 072, + OPC_DROTR = OPC_SPECIAL | 072 | (1 << 21), + OPC_DSRA = OPC_SPECIAL | 073, + OPC_DSLL32 = OPC_SPECIAL | 074, + OPC_DSRL32 = OPC_SPECIAL | 076, + OPC_DROTR32 = OPC_SPECIAL | 076 | (1 << 21), + OPC_DSRA32 = OPC_SPECIAL | 077, + OPC_CLZ_R6 = OPC_SPECIAL | 0120, + OPC_DCLZ_R6 = OPC_SPECIAL | 0122, + + OPC_REGIMM = 001 << 26, + OPC_BLTZ = OPC_REGIMM | (000 << 16), + OPC_BGEZ = OPC_REGIMM | (001 << 16), + + OPC_SPECIAL2 = 034 << 26, + OPC_MUL_R5 = OPC_SPECIAL2 | 002, + OPC_CLZ = OPC_SPECIAL2 | 040, + OPC_DCLZ = OPC_SPECIAL2 | 044, + + OPC_SPECIAL3 = 037 << 26, + OPC_EXT = OPC_SPECIAL3 | 000, + OPC_DEXTM = OPC_SPECIAL3 | 001, + OPC_DEXTU = OPC_SPECIAL3 | 002, + OPC_DEXT = OPC_SPECIAL3 | 003, + OPC_INS = OPC_SPECIAL3 | 004, + OPC_DINSM = OPC_SPECIAL3 | 005, + OPC_DINSU = OPC_SPECIAL3 | 006, + OPC_DINS = OPC_SPECIAL3 | 007, + OPC_WSBH = OPC_SPECIAL3 | 00240, + OPC_DSBH = OPC_SPECIAL3 | 00244, + OPC_DSHD = OPC_SPECIAL3 | 00544, + OPC_SEB = OPC_SPECIAL3 | 02040, + OPC_SEH = OPC_SPECIAL3 | 03040, + + /* MIPS r6 doesn't have JR, JALR should be used instead */ + OPC_JR = use_mips32r6_instructions ? OPC_JALR : OPC_JR_R5, + + /* + * MIPS r6 replaces MUL with an alternative encoding which is + * backwards-compatible at the assembly level. + */ + OPC_MUL = use_mips32r6_instructions ? OPC_MUL_R6 : OPC_MUL_R5, + + /* MIPS r6 introduced names for weaker variants of SYNC. These are + backward compatible to previous architecture revisions. */ + OPC_SYNC_WMB = OPC_SYNC | 0x04 << 6, + OPC_SYNC_MB = OPC_SYNC | 0x10 << 6, + OPC_SYNC_ACQUIRE = OPC_SYNC | 0x11 << 6, + OPC_SYNC_RELEASE = OPC_SYNC | 0x12 << 6, + OPC_SYNC_RMB = OPC_SYNC | 0x13 << 6, + + /* Aliases for convenience. */ + ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU, + ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU, + ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32 + ? OPC_SRL : OPC_DSRL, +} MIPSInsn; + +/* + * Type reg + */ +static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rs, TCGReg rt) +{ + int32_t inst; + + inst = opc; + inst |= (rs & 0x1F) << 21; + inst |= (rt & 0x1F) << 16; + inst |= (rd & 0x1F) << 11; + tcg_out32(s, inst); +} + +/* + * Type immediate + */ +static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, + TCGReg rt, TCGReg rs, TCGArg imm) +{ + int32_t inst; + + inst = opc; + inst |= (rs & 0x1F) << 21; + inst |= (rt & 0x1F) << 16; + inst |= (imm & 0xffff); + tcg_out32(s, inst); +} + +/* + * Type bitfield + */ +static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, + TCGReg rs, int msb, int lsb) +{ + int32_t inst; + + inst = opc; + inst |= (rs & 0x1F) << 21; + inst |= (rt & 0x1F) << 16; + inst |= (msb & 0x1F) << 11; + inst |= (lsb & 0x1F) << 6; + tcg_out32(s, inst); +} + +static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, + MIPSInsn oph, TCGReg rt, TCGReg rs, + int msb, int lsb) +{ + if (lsb >= 32) { + opc = oph; + msb -= 32; + lsb -= 32; + } else if (msb >= 32) { + opc = opm; + msb -= 32; + } + tcg_out_opc_bf(s, opc, rt, rs, msb, lsb); +} + +/* + * Type branch + */ +static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, + TCGReg rt, TCGReg rs) +{ + tcg_out_opc_imm(s, opc, rt, rs, 0); +} + +/* + * Type sa + */ +static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rt, TCGArg sa) +{ + int32_t inst; + + inst = opc; + inst |= (rt & 0x1F) << 16; + inst |= (rd & 0x1F) << 11; + inst |= (sa & 0x1F) << 6; + tcg_out32(s, inst); + +} + +static void tcg_out_opc_sa64(TCGContext *s, MIPSInsn opc1, MIPSInsn opc2, + TCGReg rd, TCGReg rt, TCGArg sa) +{ + int32_t inst; + + inst = (sa & 32 ? opc2 : opc1); + inst |= (rt & 0x1F) << 16; + inst |= (rd & 0x1F) << 11; + inst |= (sa & 0x1F) << 6; + tcg_out32(s, inst); +} + +/* + * Type jump. + * Returns true if the branch was in range and the insn was emitted. + */ +static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target) +{ + uintptr_t dest = (uintptr_t)target; + uintptr_t from = (uintptr_t)s->code_ptr + 4; + int32_t inst; + + /* The pc-region branch happens within the 256MB region of + the delay slot (thus the +4). */ + if ((from ^ dest) & -(1 << 28)) { + return false; + } + tcg_debug_assert((dest & 3) == 0); + + inst = opc; + inst |= (dest >> 2) & 0x3ffffff; + tcg_out32(s, inst); + return true; +} + +static inline void tcg_out_nop(TCGContext *s) +{ + tcg_out32(s, 0); +} + +static inline void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +{ + tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa); +} + +static inline void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +{ + tcg_out_opc_sa64(s, OPC_DSRL, OPC_DSRL32, rd, rt, sa); +} + +static inline void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +{ + tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa); +} + +static inline bool tcg_out_mov(TCGContext *s, TCGType type, + TCGReg ret, TCGReg arg) +{ + /* Simple reg-reg move, optimising out the 'do nothing' case */ + if (ret != arg) { + tcg_out_opc_reg(s, OPC_OR, ret, arg, TCG_REG_ZERO); + } + return true; +} + +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) +{ + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + arg = (int32_t)arg; + } + if (arg == (int16_t)arg) { + tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg); + return; + } + if (arg == (uint16_t)arg) { + tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg); + return; + } + if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { + tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16); + } else { + tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1); + if (arg & 0xffff0000ull) { + tcg_out_dsll(s, ret, ret, 16); + tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16); + tcg_out_dsll(s, ret, ret, 16); + } else { + tcg_out_dsll(s, ret, ret, 32); + } + } + if (arg & 0xffff) { + tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff); + } +} + +static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); + } else { + /* ret and arg can't be register at */ + if (ret == TCG_TMP0 || arg == TCG_TMP0) { + tcg_abort(); + } + + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); + tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); + } +} + +static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); + tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); + } else { + /* ret and arg can't be register at */ + if (ret == TCG_TMP0 || arg == TCG_TMP0) { + tcg_abort(); + } + + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); + tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); + } +} + +static void tcg_out_bswap_subr(TCGContext *s, tcg_insn_unit *sub) +{ + bool ok = tcg_out_opc_jmp(s, OPC_JAL, sub); + tcg_debug_assert(ok); +} + +static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); + tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16); + } else { + tcg_out_bswap_subr(s, bswap32_addr); + /* delay slot -- never omit the insn, like tcg_out_mov might. */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); + tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); + } +} + +static void tcg_out_bswap32u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg); + tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret); + tcg_out_dsrl(s, ret, ret, 32); + } else { + tcg_out_bswap_subr(s, bswap32u_addr); + /* delay slot -- never omit the insn, like tcg_out_mov might. */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); + tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); + } +} + +static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg); + tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret); + } else { + tcg_out_bswap_subr(s, bswap64_addr); + /* delay slot -- never omit the insn, like tcg_out_mov might. */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); + tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); + } +} + +static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg); + } else { + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); + tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24); + } +} + +static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg); + } else { + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16); + tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); + } +} + +static inline void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0); + } else { + tcg_out_dsll(s, ret, arg, 32); + tcg_out_dsrl(s, ret, ret, 32); + } +} + +static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, + TCGReg addr, intptr_t ofs) +{ + int16_t lo = ofs; + if (ofs != lo) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo); + if (addr != TCG_REG_ZERO) { + tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP0, TCG_TMP0, addr); + } + addr = TCG_TMP0; + } + tcg_out_opc_imm(s, opc, data, addr, lo); +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + MIPSInsn opc = OPC_LD; + if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { + opc = OPC_LW; + } + tcg_out_ldst(s, opc, arg, arg1, arg2); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + MIPSInsn opc = OPC_SD; + if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { + opc = OPC_SW; + } + tcg_out_ldst(s, opc, arg, arg1, arg2); +} + +static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + if (val == 0) { + tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); + return true; + } + return false; +} + +static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, + TCGReg ah, TCGArg bl, TCGArg bh, bool cbl, + bool cbh, bool is_sub) +{ + TCGReg th = TCG_TMP1; + + /* If we have a negative constant such that negating it would + make the high part zero, we can (usually) eliminate one insn. */ + if (cbl && cbh && bh == -1 && bl != 0) { + bl = -bl; + bh = 0; + is_sub = !is_sub; + } + + /* By operating on the high part first, we get to use the final + carry operation to move back from the temporary. */ + if (!cbh) { + tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh); + } else if (bh != 0 || ah == rl) { + tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh)); + } else { + th = ah; + } + + /* Note that tcg optimization should eliminate the bl == 0 case. */ + if (is_sub) { + if (cbl) { + tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl); + tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl); + } else { + tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl); + tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl); + } + tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0); + } else { + if (cbl) { + tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl); + tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl); + } else if (rl == al && rl == bl) { + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, TCG_TARGET_REG_BITS - 1); + tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); + } else { + tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); + tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl)); + } + tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0); + } +} + +/* Bit 0 set if inversion required; bit 1 set if swapping required. */ +#define MIPS_CMP_INV 1 +#define MIPS_CMP_SWAP 2 + +static const uint8_t mips_cmp_map[16] = { + [TCG_COND_LT] = 0, + [TCG_COND_LTU] = 0, + [TCG_COND_GE] = MIPS_CMP_INV, + [TCG_COND_GEU] = MIPS_CMP_INV, + [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP, + [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP, + [TCG_COND_GT] = MIPS_CMP_SWAP, + [TCG_COND_GTU] = MIPS_CMP_SWAP, +}; + +static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg arg1, TCGReg arg2) +{ + MIPSInsn s_opc = OPC_SLTU; + int cmp_map; + + switch (cond) { + case TCG_COND_EQ: + if (arg2 != 0) { + tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); + arg1 = ret; + } + tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1); + break; + + case TCG_COND_NE: + if (arg2 != 0) { + tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); + arg1 = ret; + } + tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1); + break; + + case TCG_COND_LT: + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GT: + s_opc = OPC_SLT; + /* FALLTHRU */ + + case TCG_COND_LTU: + case TCG_COND_GEU: + case TCG_COND_LEU: + case TCG_COND_GTU: + cmp_map = mips_cmp_map[cond]; + if (cmp_map & MIPS_CMP_SWAP) { + TCGReg t = arg1; + arg1 = arg2; + arg2 = t; + } + tcg_out_opc_reg(s, s_opc, ret, arg1, arg2); + if (cmp_map & MIPS_CMP_INV) { + tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); + } + break; + + default: + tcg_abort(); + break; + } +} + +static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, + TCGReg arg2, TCGLabel *l) +{ + static const MIPSInsn b_zero[16] = { + [TCG_COND_LT] = OPC_BLTZ, + [TCG_COND_GT] = OPC_BGTZ, + [TCG_COND_LE] = OPC_BLEZ, + [TCG_COND_GE] = OPC_BGEZ, + }; + + MIPSInsn s_opc = OPC_SLTU; + MIPSInsn b_opc; + int cmp_map; + + switch (cond) { + case TCG_COND_EQ: + b_opc = OPC_BEQ; + break; + case TCG_COND_NE: + b_opc = OPC_BNE; + break; + + case TCG_COND_LT: + case TCG_COND_GT: + case TCG_COND_LE: + case TCG_COND_GE: + if (arg2 == 0) { + b_opc = b_zero[cond]; + arg2 = arg1; + arg1 = 0; + break; + } + s_opc = OPC_SLT; + /* FALLTHRU */ + + case TCG_COND_LTU: + case TCG_COND_GTU: + case TCG_COND_LEU: + case TCG_COND_GEU: + cmp_map = mips_cmp_map[cond]; + if (cmp_map & MIPS_CMP_SWAP) { + TCGReg t = arg1; + arg1 = arg2; + arg2 = t; + } + tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2); + b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE); + arg1 = TCG_TMP0; + arg2 = TCG_REG_ZERO; + break; + + default: + tcg_abort(); + break; + } + + tcg_out_opc_br(s, b_opc, arg1, arg2); + if (l->has_value) { + reloc_pc16(s->code_ptr - 1, l->u.value_ptr); + } else { + tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0); + } + tcg_out_nop(s); +} + +static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1, + TCGReg al, TCGReg ah, + TCGReg bl, TCGReg bh) +{ + /* Merge highpart comparison into AH. */ + if (bh != 0) { + if (ah != 0) { + tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh); + ah = tmp0; + } else { + ah = bh; + } + } + /* Merge lowpart comparison into AL. */ + if (bl != 0) { + if (al != 0) { + tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl); + al = tmp1; + } else { + al = bl; + } + } + /* Merge high and low part comparisons into AL. */ + if (ah != 0) { + if (al != 0) { + tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al); + al = tmp0; + } else { + al = ah; + } + } + return al; +} + +static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) +{ + TCGReg tmp0 = TCG_TMP0; + TCGReg tmp1 = ret; + + tcg_debug_assert(ret != TCG_TMP0); + if (ret == ah || ret == bh) { + tcg_debug_assert(ret != TCG_TMP1); + tmp1 = TCG_TMP1; + } + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh); + tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO); + break; + + default: + tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh); + tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl); + tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0); + tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh); + tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0); + break; + } +} + +static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGReg bl, TCGReg bh, TCGLabel *l) +{ + TCGCond b_cond = TCG_COND_NE; + TCGReg tmp = TCG_TMP1; + + /* With branches, we emit between 4 and 9 insns with 2 or 3 branches. + With setcond, we emit between 3 and 10 insns and only 1 branch, + which ought to get better branch prediction. */ + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + b_cond = cond; + tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh); + break; + + default: + /* Minimize code size by preferring a compare not requiring INV. */ + if (mips_cmp_map[cond] & MIPS_CMP_INV) { + cond = tcg_invert_cond(cond); + b_cond = TCG_COND_EQ; + } + tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh); + break; + } + + tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, l); +} + +static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2) +{ + bool eqz = false; + + /* If one of the values is zero, put it last to match SEL*Z instructions */ + if (use_mips32r6_instructions && v1 == 0) { + v1 = v2; + v2 = 0; + cond = tcg_invert_cond(cond); + } + + switch (cond) { + case TCG_COND_EQ: + eqz = true; + /* FALLTHRU */ + case TCG_COND_NE: + if (c2 != 0) { + tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2); + c1 = TCG_TMP0; + } + break; + + default: + /* Minimize code size by preferring a compare not requiring INV. */ + if (mips_cmp_map[cond] & MIPS_CMP_INV) { + cond = tcg_invert_cond(cond); + eqz = true; + } + tcg_out_setcond(s, cond, TCG_TMP0, c1, c2); + c1 = TCG_TMP0; + break; + } + + if (use_mips32r6_instructions) { + MIPSInsn m_opc_t = eqz ? OPC_SELEQZ : OPC_SELNEZ; + MIPSInsn m_opc_f = eqz ? OPC_SELNEZ : OPC_SELEQZ; + + if (v2 != 0) { + tcg_out_opc_reg(s, m_opc_f, TCG_TMP1, v2, c1); + } + tcg_out_opc_reg(s, m_opc_t, ret, v1, c1); + if (v2 != 0) { + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP1); + } + } else { + MIPSInsn m_opc = eqz ? OPC_MOVZ : OPC_MOVN; + + tcg_out_opc_reg(s, m_opc, ret, v1, c1); + + /* This should be guaranteed via constraints */ + tcg_debug_assert(v2 == ret); + } +} + +static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail) +{ + /* Note that the ABI requires the called function's address to be + loaded into T9, even if a direct branch is in range. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg); + + /* But do try a direct branch, allowing the cpu better insn prefetch. */ + if (tail) { + if (!tcg_out_opc_jmp(s, OPC_J, arg)) { + tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0); + } + } else { + if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) { + tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); + } + } +} + +static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg) +{ + tcg_out_call_int(s, arg, false); + tcg_out_nop(s); +} + +#if defined(CONFIG_SOFTMMU) +#include "../tcg-ldst.inc.c" + +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_SB] = helper_ret_ldsb_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LESW] = helper_le_ldsw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BESW] = helper_be_ldsw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +#if TCG_TARGET_REG_BITS == 64 + [MO_LESL] = helper_le_ldsl_mmu, + [MO_BESL] = helper_be_ldsl_mmu, +#endif +}; + +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +/* Helper routines for marshalling helper function arguments into + * the correct registers and stack. + * I is where we want to put this argument, and is updated and returned + * for the next call. ARG is the argument itself. + * + * We provide routines for arguments which are: immediate, 32 bit + * value in register, 16 and 8 bit values in register (which must be zero + * extended before use) and 64 bit value in a lo:hi register pair. + */ + +static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg) +{ + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg); + } else { + /* For N32 and N64, the initial offset is different. But there + we also have 8 argument register so we don't run out here. */ + tcg_debug_assert(TCG_TARGET_REG_BITS == 32); + tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i); + } + return i + 1; +} + +static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg) +{ + TCGReg tmp = TCG_TMP0; + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tmp = tcg_target_call_iarg_regs[i]; + } + tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff); + return tcg_out_call_iarg_reg(s, i, tmp); +} + +static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg) +{ + TCGReg tmp = TCG_TMP0; + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tmp = tcg_target_call_iarg_regs[i]; + } + tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff); + return tcg_out_call_iarg_reg(s, i, tmp); +} + +static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg) +{ + TCGReg tmp = TCG_TMP0; + if (arg == 0) { + tmp = TCG_REG_ZERO; + } else { + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tmp = tcg_target_call_iarg_regs[i]; + } + tcg_out_movi(s, TCG_TYPE_REG, tmp, arg); + } + return tcg_out_call_iarg_reg(s, i, tmp); +} + +static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 32); + i = (i + 1) & ~1; + i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al)); + i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah)); + return i; +} + +/* We expect to use a 16-bit negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); + +/* + * Perform the tlb comparison operation. + * The complete host address is placed in BASE. + * Clobbers TMP0, TMP1, TMP2, TMP3. + */ +static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, + TCGReg addrh, TCGMemOpIdx oi, + tcg_insn_unit *label_ptr[2], bool is_load) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif + MemOp opc = get_memop(oi); + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); + int mem_index = get_mmuidx(oi); + int fast_off = TLB_MASK_TABLE_OFS(mem_index); + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); + int table_off = fast_off + offsetof(CPUTLBDescFast, table); + int add_off = offsetof(CPUTLBEntry, addend); + int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + target_ulong mask; + + /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); + + /* Extract the TLB index from the address into TMP3. */ + tcg_out_opc_sa(s, ALIAS_TSRL, TCG_TMP3, addrl, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); + + /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */ + tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); + + /* We don't currently support unaligned accesses. + We could do so with mips32r6. */ + if (a_bits < s_bits) { + a_bits = s_bits; + } + + /* Mask the page bits, keeping the alignment bits to compare against. */ + mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); + + /* Load the (low-half) tlb comparator. */ + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF); + tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask); + } else { + tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD + : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW), + TCG_TMP0, TCG_TMP3, cmp_off); + tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask); + /* No second compare is required here; + load the tlb addend for the fast path. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); + } + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); + + /* Zero extend a 32-bit guest address for a 64-bit host. */ + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, base, addrl); + addrl = base; + } + + label_ptr[0] = s->code_ptr; + tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); + + /* Load and test the high half tlb comparator. */ + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + /* delay slot */ + tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); + + /* Load the tlb addend for the fast path. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off); + + label_ptr[1] = s->code_ptr; + tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0); + } + + /* delay slot */ + tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl); +} + +static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, + TCGType ext, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + void *raddr, tcg_insn_unit *label_ptr[2]) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->oi = oi; + label->type = ext; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->raddr = raddr; + label->label_ptr[0] = label_ptr[0]; + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + label->label_ptr[1] = label_ptr[1]; + } +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + TCGReg v0; + int i; + + /* resolve label address */ + reloc_pc16(l->label_ptr[0], s->code_ptr); + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + reloc_pc16(l->label_ptr[1], s->code_ptr); + } + + i = 1; + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); + } else { + i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); + } + i = tcg_out_call_iarg_imm(s, i, oi); + i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr); + tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false); + /* delay slot */ + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + + v0 = l->datalo_reg; + if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { + /* We eliminated V0 from the possible output registers, so it + cannot be clobbered here. So we must move V1 first. */ + if (MIPS_BE) { + tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1); + v0 = l->datahi_reg; + } else { + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1); + } + } + + tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO); + reloc_pc16(s->code_ptr - 1, l->raddr); + + /* delay slot */ + if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) { + /* we always sign-extend 32-bit loads */ + tcg_out_opc_sa(s, OPC_SLL, v0, TCG_REG_V0, 0); + } else { + tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO); + } + return true; +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; + int i; + + /* resolve label address */ + reloc_pc16(l->label_ptr[0], s->code_ptr); + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + reloc_pc16(l->label_ptr[1], s->code_ptr); + } + + i = 1; + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); + } else { + i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); + } + switch (s_bits) { + case MO_8: + i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg); + break; + case MO_16: + i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg); + break; + case MO_32: + i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); + break; + case MO_64: + if (TCG_TARGET_REG_BITS == 32) { + i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg); + } else { + i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); + } + break; + default: + tcg_abort(); + } + i = tcg_out_call_iarg_imm(s, i, oi); + + /* Tail call to the store helper. Thus force the return address + computation to take place in the return address register. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr); + i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA); + tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true); + /* delay slot */ + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + return true; +} +#endif + +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, + TCGReg base, MemOp opc, bool is_64) +{ + switch (opc & (MO_SSIZE | MO_BSWAP)) { + case MO_UB: + tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); + break; + case MO_SB: + tcg_out_opc_imm(s, OPC_LB, lo, base, 0); + break; + case MO_UW | MO_BSWAP: + tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); + tcg_out_bswap16(s, lo, TCG_TMP1); + break; + case MO_UW: + tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); + break; + case MO_SW | MO_BSWAP: + tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); + tcg_out_bswap16s(s, lo, TCG_TMP1); + break; + case MO_SW: + tcg_out_opc_imm(s, OPC_LH, lo, base, 0); + break; + case MO_UL | MO_BSWAP: + if (TCG_TARGET_REG_BITS == 64 && is_64) { + if (use_mips32r2_instructions) { + tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); + tcg_out_bswap32u(s, lo, lo); + } else { + tcg_out_bswap_subr(s, bswap32u_addr); + /* delay slot */ + tcg_out_opc_imm(s, OPC_LWU, TCG_TMP0, base, 0); + tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); + } + break; + } + /* FALLTHRU */ + case MO_SL | MO_BSWAP: + if (use_mips32r2_instructions) { + tcg_out_opc_imm(s, OPC_LW, lo, base, 0); + tcg_out_bswap32(s, lo, lo); + } else { + tcg_out_bswap_subr(s, bswap32_addr); + /* delay slot */ + tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); + tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_TMP3); + } + break; + case MO_UL: + if (TCG_TARGET_REG_BITS == 64 && is_64) { + tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); + break; + } + /* FALLTHRU */ + case MO_SL: + tcg_out_opc_imm(s, OPC_LW, lo, base, 0); + break; + case MO_Q | MO_BSWAP: + if (TCG_TARGET_REG_BITS == 64) { + if (use_mips32r2_instructions) { + tcg_out_opc_imm(s, OPC_LD, lo, base, 0); + tcg_out_bswap64(s, lo, lo); + } else { + tcg_out_bswap_subr(s, bswap64_addr); + /* delay slot */ + tcg_out_opc_imm(s, OPC_LD, TCG_TMP0, base, 0); + tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3); + } + } else if (use_mips32r2_instructions) { + tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); + tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 4); + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0); + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1); + tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16); + tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16); + } else { + tcg_out_bswap_subr(s, bswap32_addr); + /* delay slot */ + tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0); + tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 4); + tcg_out_bswap_subr(s, bswap32_addr); + /* delay slot */ + tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3); + tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3); + } + break; + case MO_Q: + /* Prefer to load from offset 0 first, but allow for overlap. */ + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_opc_imm(s, OPC_LD, lo, base, 0); + } else if (MIPS_BE ? hi != base : lo == base) { + tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF); + tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF); + } else { + tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF); + tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg addr_regl, addr_regh __attribute__((unused)); + TCGReg data_regl, data_regh; + TCGMemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[2]; +#endif + TCGReg base = TCG_REG_A0; + + data_regl = *args++; + data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addr_regl = *args++; + addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1); + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + add_qemu_ldst_label(s, 1, oi, + (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + data_regl, data_regh, addr_regl, addr_regh, + s->code_ptr, label_ptr); +#else + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, base, addr_regl); + addr_regl = base; + } + if (guest_base == 0 && data_regl != addr_regl) { + base = addr_regl; + } else if (guest_base == (int16_t)guest_base) { + tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); + } else { + tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); + } + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); +#endif +} + +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, + TCGReg base, MemOp opc) +{ + /* Don't clutter the code below with checks to avoid bswapping ZERO. */ + if ((lo | hi) == 0) { + opc &= ~MO_BSWAP; + } + + switch (opc & (MO_SIZE | MO_BSWAP)) { + case MO_8: + tcg_out_opc_imm(s, OPC_SB, lo, base, 0); + break; + + case MO_16 | MO_BSWAP: + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, lo, 0xffff); + tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1); + lo = TCG_TMP1; + /* FALLTHRU */ + case MO_16: + tcg_out_opc_imm(s, OPC_SH, lo, base, 0); + break; + + case MO_32 | MO_BSWAP: + tcg_out_bswap32(s, TCG_TMP3, lo); + lo = TCG_TMP3; + /* FALLTHRU */ + case MO_32: + tcg_out_opc_imm(s, OPC_SW, lo, base, 0); + break; + + case MO_64 | MO_BSWAP: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_bswap64(s, TCG_TMP3, lo); + tcg_out_opc_imm(s, OPC_SD, TCG_TMP3, base, 0); + } else if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? lo : hi); + tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? hi : lo); + tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16); + tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16); + tcg_out_opc_imm(s, OPC_SW, TCG_TMP0, base, 0); + tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, 4); + } else { + tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi); + tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 0); + tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo); + tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 4); + } + break; + case MO_64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_opc_imm(s, OPC_SD, lo, base, 0); + } else { + tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? hi : lo, base, 0); + tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? lo : hi, base, 4); + } + break; + + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg addr_regl, addr_regh __attribute__((unused)); + TCGReg data_regl, data_regh; + TCGMemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[2]; +#endif + TCGReg base = TCG_REG_A0; + + data_regl = *args++; + data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addr_regl = *args++; + addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0); + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + add_qemu_ldst_label(s, 0, oi, + (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + data_regl, data_regh, addr_regl, addr_regh, + s->code_ptr, label_ptr); +#else + base = TCG_REG_A0; + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, base, addr_regl); + addr_regl = base; + } + if (guest_base == 0) { + base = addr_regl; + } else if (guest_base == (int16_t)guest_base) { + tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base); + } else { + tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl); + } + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); +#endif +} + +static void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + static const MIPSInsn sync[] = { + /* Note that SYNC_MB is a slightly weaker than SYNC 0, + as the former is an ordering barrier and the latter + is a completion barrier. */ + [0 ... TCG_MO_ALL] = OPC_SYNC_MB, + [TCG_MO_LD_LD] = OPC_SYNC_RMB, + [TCG_MO_ST_ST] = OPC_SYNC_WMB, + [TCG_MO_LD_ST] = OPC_SYNC_RELEASE, + [TCG_MO_LD_ST | TCG_MO_ST_ST] = OPC_SYNC_RELEASE, + [TCG_MO_LD_ST | TCG_MO_LD_LD] = OPC_SYNC_ACQUIRE, + }; + tcg_out32(s, sync[a0 & TCG_MO_ALL]); +} + +static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6, + int width, TCGReg a0, TCGReg a1, TCGArg a2) +{ + if (use_mips32r6_instructions) { + if (a2 == width) { + tcg_out_opc_reg(s, opcv6, a0, a1, 0); + } else { + tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0); + tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0); + } + } else { + if (a2 == width) { + tcg_out_opc_reg(s, opcv2, a0, a1, a1); + } else if (a0 == a2) { + tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); + tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1); + } else if (a0 != a1) { + tcg_out_opc_reg(s, opcv2, a0, a1, a1); + tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1); + } else { + tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); + tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1); + tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0); + } + } +} + +static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + MIPSInsn i1, i2; + TCGArg a0, a1, a2; + int c2; + + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + c2 = const_args[2]; + + switch (opc) { + case INDEX_op_exit_tb: + { + TCGReg b0 = TCG_REG_ZERO; + + a0 = (intptr_t)a0; + if (a0 & ~0xffff) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff); + b0 = TCG_REG_V0; + } + if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, + (uintptr_t)tb_ret_addr); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); + } + tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff); + } + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_insn_offset) { + /* direct jump method */ + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + /* Avoid clobbering the address during retranslation. */ + tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff)); + } else { + /* indirect jump method */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, + (uintptr_t)(s->tb_jmp_target_addr + a0)); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); + } + tcg_out_nop(s); + set_jmp_reset_offset(s, a0); + break; + case INDEX_op_goto_ptr: + /* jmp to the given host address (could be epilogue) */ + tcg_out_opc_reg(s, OPC_JR, 0, a0, 0); + tcg_out_nop(s); + break; + case INDEX_op_br: + tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, + arg_label(a0)); + break; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + i1 = OPC_LBU; + goto do_ldst; + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + i1 = OPC_LB; + goto do_ldst; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + i1 = OPC_LHU; + goto do_ldst; + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + i1 = OPC_LH; + goto do_ldst; + case INDEX_op_ld_i32: + case INDEX_op_ld32s_i64: + i1 = OPC_LW; + goto do_ldst; + case INDEX_op_ld32u_i64: + i1 = OPC_LWU; + goto do_ldst; + case INDEX_op_ld_i64: + i1 = OPC_LD; + goto do_ldst; + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + i1 = OPC_SB; + goto do_ldst; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + i1 = OPC_SH; + goto do_ldst; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + i1 = OPC_SW; + goto do_ldst; + case INDEX_op_st_i64: + i1 = OPC_SD; + do_ldst: + tcg_out_ldst(s, i1, a0, a1, a2); + break; + + case INDEX_op_add_i32: + i1 = OPC_ADDU, i2 = OPC_ADDIU; + goto do_binary; + case INDEX_op_add_i64: + i1 = OPC_DADDU, i2 = OPC_DADDIU; + goto do_binary; + case INDEX_op_or_i32: + case INDEX_op_or_i64: + i1 = OPC_OR, i2 = OPC_ORI; + goto do_binary; + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + i1 = OPC_XOR, i2 = OPC_XORI; + do_binary: + if (c2) { + tcg_out_opc_imm(s, i2, a0, a1, a2); + break; + } + do_binaryv: + tcg_out_opc_reg(s, i1, a0, a1, a2); + break; + + case INDEX_op_sub_i32: + i1 = OPC_SUBU, i2 = OPC_ADDIU; + goto do_subtract; + case INDEX_op_sub_i64: + i1 = OPC_DSUBU, i2 = OPC_DADDIU; + do_subtract: + if (c2) { + tcg_out_opc_imm(s, i2, a0, a1, -a2); + break; + } + goto do_binaryv; + case INDEX_op_and_i32: + if (c2 && a2 != (uint16_t)a2) { + int msb = ctz32(~a2) - 1; + tcg_debug_assert(use_mips32r2_instructions); + tcg_debug_assert(is_p2m1(a2)); + tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0); + break; + } + i1 = OPC_AND, i2 = OPC_ANDI; + goto do_binary; + case INDEX_op_and_i64: + if (c2 && a2 != (uint16_t)a2) { + int msb = ctz64(~a2) - 1; + tcg_debug_assert(use_mips32r2_instructions); + tcg_debug_assert(is_p2m1(a2)); + tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0); + break; + } + i1 = OPC_AND, i2 = OPC_ANDI; + goto do_binary; + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: + i1 = OPC_NOR; + goto do_binaryv; + + case INDEX_op_mul_i32: + if (use_mips32_instructions) { + tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); + break; + } + i1 = OPC_MULT, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_mulsh_i32: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2); + break; + } + i1 = OPC_MULT, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_muluh_i32: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2); + break; + } + i1 = OPC_MULTU, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_div_i32: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2); + break; + } + i1 = OPC_DIV, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_divu_i32: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2); + break; + } + i1 = OPC_DIVU, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_rem_i32: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2); + break; + } + i1 = OPC_DIV, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_remu_i32: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2); + break; + } + i1 = OPC_DIVU, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_mul_i64: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2); + break; + } + i1 = OPC_DMULT, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_mulsh_i64: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2); + break; + } + i1 = OPC_DMULT, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_muluh_i64: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2); + break; + } + i1 = OPC_DMULTU, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_div_i64: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2); + break; + } + i1 = OPC_DDIV, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_divu_i64: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2); + break; + } + i1 = OPC_DDIVU, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_rem_i64: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2); + break; + } + i1 = OPC_DDIV, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_remu_i64: + if (use_mips32r6_instructions) { + tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2); + break; + } + i1 = OPC_DDIVU, i2 = OPC_MFHI; + do_hilo1: + tcg_out_opc_reg(s, i1, 0, a1, a2); + tcg_out_opc_reg(s, i2, a0, 0, 0); + break; + + case INDEX_op_muls2_i32: + i1 = OPC_MULT; + goto do_hilo2; + case INDEX_op_mulu2_i32: + i1 = OPC_MULTU; + goto do_hilo2; + case INDEX_op_muls2_i64: + i1 = OPC_DMULT; + goto do_hilo2; + case INDEX_op_mulu2_i64: + i1 = OPC_DMULTU; + do_hilo2: + tcg_out_opc_reg(s, i1, 0, a2, args[3]); + tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); + tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0); + break; + + case INDEX_op_not_i32: + case INDEX_op_not_i64: + i1 = OPC_NOR; + goto do_unary; + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + i1 = OPC_WSBH; + goto do_unary; + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + i1 = OPC_SEB; + goto do_unary; + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + i1 = OPC_SEH; + do_unary: + tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1); + break; + + case INDEX_op_bswap32_i32: + tcg_out_bswap32(s, a0, a1); + break; + case INDEX_op_bswap32_i64: + tcg_out_bswap32u(s, a0, a1); + break; + case INDEX_op_bswap64_i64: + tcg_out_bswap64(s, a0, a1); + break; + case INDEX_op_extrh_i64_i32: + tcg_out_dsra(s, a0, a1, 32); + break; + case INDEX_op_ext32s_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extrl_i64_i32: + tcg_out_opc_sa(s, OPC_SLL, a0, a1, 0); + break; + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + tcg_out_ext32u(s, a0, a1); + break; + + case INDEX_op_sar_i32: + i1 = OPC_SRAV, i2 = OPC_SRA; + goto do_shift; + case INDEX_op_shl_i32: + i1 = OPC_SLLV, i2 = OPC_SLL; + goto do_shift; + case INDEX_op_shr_i32: + i1 = OPC_SRLV, i2 = OPC_SRL; + goto do_shift; + case INDEX_op_rotr_i32: + i1 = OPC_ROTRV, i2 = OPC_ROTR; + do_shift: + if (c2) { + tcg_out_opc_sa(s, i2, a0, a1, a2); + break; + } + do_shiftv: + tcg_out_opc_reg(s, i1, a0, a2, a1); + break; + case INDEX_op_rotl_i32: + if (c2) { + tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2); + } else { + tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1); + } + break; + case INDEX_op_sar_i64: + if (c2) { + tcg_out_dsra(s, a0, a1, a2); + break; + } + i1 = OPC_DSRAV; + goto do_shiftv; + case INDEX_op_shl_i64: + if (c2) { + tcg_out_dsll(s, a0, a1, a2); + break; + } + i1 = OPC_DSLLV; + goto do_shiftv; + case INDEX_op_shr_i64: + if (c2) { + tcg_out_dsrl(s, a0, a1, a2); + break; + } + i1 = OPC_DSRLV; + goto do_shiftv; + case INDEX_op_rotr_i64: + if (c2) { + tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2); + break; + } + i1 = OPC_DROTRV; + goto do_shiftv; + case INDEX_op_rotl_i64: + if (c2) { + tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2); + } else { + tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1); + } + break; + + case INDEX_op_clz_i32: + tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2); + break; + case INDEX_op_clz_i64: + tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2); + break; + + case INDEX_op_deposit_i32: + tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]); + break; + case INDEX_op_deposit_i64: + tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2, + args[3] + args[4] - 1, args[3]); + break; + case INDEX_op_extract_i32: + tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2); + break; + case INDEX_op_extract_i64: + tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, + args[3] - 1, a2); + break; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); + break; + case INDEX_op_brcond2_i32: + tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); + break; + + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]); + break; + + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + tcg_out_setcond(s, args[3], a0, a1, a2); + break; + case INDEX_op_setcond2_i32: + tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, false); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, true); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args, false); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args, true); + break; + + case INDEX_op_add2_i32: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], false); + break; + case INDEX_op_sub2_i32: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], true); + break; + + case INDEX_op_mb: + tcg_out_mb(s, a0); + break; + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } +} + +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; + static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; + static const TCGTargetOpDef SZ_S = { .args_ct_str = { "SZ", "S" } }; + static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } }; + static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; + static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; + static const TCGTargetOpDef r_r_rJ = { .args_ct_str = { "r", "r", "rJ" } }; + static const TCGTargetOpDef SZ_S_S = { .args_ct_str = { "SZ", "S", "S" } }; + static const TCGTargetOpDef SZ_SZ_S + = { .args_ct_str = { "SZ", "SZ", "S" } }; + static const TCGTargetOpDef SZ_SZ_S_S + = { .args_ct_str = { "SZ", "SZ", "S", "S" } }; + static const TCGTargetOpDef r_rZ_rN + = { .args_ct_str = { "r", "rZ", "rN" } }; + static const TCGTargetOpDef r_rZ_rZ + = { .args_ct_str = { "r", "rZ", "rZ" } }; + static const TCGTargetOpDef r_r_rIK + = { .args_ct_str = { "r", "r", "rIK" } }; + static const TCGTargetOpDef r_r_rWZ + = { .args_ct_str = { "r", "r", "rWZ" } }; + static const TCGTargetOpDef r_r_r_r + = { .args_ct_str = { "r", "r", "r", "r" } }; + static const TCGTargetOpDef r_r_L_L + = { .args_ct_str = { "r", "r", "L", "L" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "0" } }; + static const TCGTargetOpDef movc_r6 + = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "rZ", "rZ", "rN", "rN" } }; + static const TCGTargetOpDef br2 + = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; + static const TCGTargetOpDef setc2 + = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_not_i32: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_extract_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld_i64: + case INDEX_op_not_i64: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + case INDEX_op_extract_i64: + return &r_r; + + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &rZ_r; + + case INDEX_op_add_i32: + case INDEX_op_add_i64: + return &r_r_rJ; + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + return &r_rZ_rN; + case INDEX_op_mul_i32: + case INDEX_op_mulsh_i32: + case INDEX_op_muluh_i32: + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + case INDEX_op_rem_i32: + case INDEX_op_remu_i32: + case INDEX_op_nor_i32: + case INDEX_op_setcond_i32: + case INDEX_op_mul_i64: + case INDEX_op_mulsh_i64: + case INDEX_op_muluh_i64: + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_rem_i64: + case INDEX_op_remu_i64: + case INDEX_op_nor_i64: + case INDEX_op_setcond_i64: + return &r_rZ_rZ; + case INDEX_op_muls2_i32: + case INDEX_op_mulu2_i32: + case INDEX_op_muls2_i64: + case INDEX_op_mulu2_i64: + return &r_r_r_r; + case INDEX_op_and_i32: + case INDEX_op_and_i64: + return &r_r_rIK; + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i64: + return &r_r_rI; + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotr_i32: + case INDEX_op_rotl_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_rotr_i64: + case INDEX_op_rotl_i64: + return &r_r_ri; + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + return &r_r_rWZ; + + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + return &dep; + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &rZ_rZ; + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return use_mips32r6_instructions ? &movc_r6 : &movc; + + case INDEX_op_add2_i32: + case INDEX_op_sub2_i32: + return &add2; + case INDEX_op_setcond2_i32: + return &setc2; + case INDEX_op_brcond2_i32: + return &br2; + + case INDEX_op_qemu_ld_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &r_L : &r_L_L); + case INDEX_op_qemu_st_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &SZ_S : &SZ_S_S); + case INDEX_op_qemu_ld_i64: + return (TCG_TARGET_REG_BITS == 64 ? &r_L + : TARGET_LONG_BITS == 32 ? &r_r_L : &r_r_L_L); + case INDEX_op_qemu_st_i64: + return (TCG_TARGET_REG_BITS == 64 ? &SZ_S + : TARGET_LONG_BITS == 32 ? &SZ_SZ_S : &SZ_SZ_S_S); + + default: + return NULL; + } +} + +static const int tcg_target_callee_save_regs[] = { + TCG_REG_S0, /* used for the global env (TCG_AREG0) */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_RA, /* should be last for ABI compliance */ +}; + +/* The Linux kernel doesn't provide any information about the available + instruction set. Probe it using a signal handler. */ + + +#ifndef use_movnz_instructions +bool use_movnz_instructions = false; +#endif + +#ifndef use_mips32_instructions +bool use_mips32_instructions = false; +#endif + +#ifndef use_mips32r2_instructions +bool use_mips32r2_instructions = false; +#endif + +static volatile sig_atomic_t got_sigill; + +static void sigill_handler(int signo, siginfo_t *si, void *data) +{ + /* Skip the faulty instruction */ + ucontext_t *uc = (ucontext_t *)data; + uc->uc_mcontext.pc += 4; + + got_sigill = 1; +} + +static void tcg_target_detect_isa(void) +{ + struct sigaction sa_old, sa_new; + + memset(&sa_new, 0, sizeof(sa_new)); + sa_new.sa_flags = SA_SIGINFO; + sa_new.sa_sigaction = sigill_handler; + sigaction(SIGILL, &sa_new, &sa_old); + + /* Probe for movn/movz, necessary to implement movcond. */ +#ifndef use_movnz_instructions + got_sigill = 0; + asm volatile(".set push\n" + ".set mips32\n" + "movn $zero, $zero, $zero\n" + "movz $zero, $zero, $zero\n" + ".set pop\n" + : : : ); + use_movnz_instructions = !got_sigill; +#endif + + /* Probe for MIPS32 instructions. As no subsetting is allowed + by the specification, it is only necessary to probe for one + of the instructions. */ +#ifndef use_mips32_instructions + got_sigill = 0; + asm volatile(".set push\n" + ".set mips32\n" + "mul $zero, $zero\n" + ".set pop\n" + : : : ); + use_mips32_instructions = !got_sigill; +#endif + + /* Probe for MIPS32r2 instructions if MIPS32 instructions are + available. As no subsetting is allowed by the specification, + it is only necessary to probe for one of the instructions. */ +#ifndef use_mips32r2_instructions + if (use_mips32_instructions) { + got_sigill = 0; + asm volatile(".set push\n" + ".set mips32r2\n" + "seb $zero, $zero\n" + ".set pop\n" + : : : ); + use_mips32r2_instructions = !got_sigill; + } +#endif + + sigaction(SIGILL, &sa_old, NULL); +} + +static tcg_insn_unit *align_code_ptr(TCGContext *s) +{ + uintptr_t p = (uintptr_t)s->code_ptr; + if (p & 15) { + p = (p + 15) & -16; + s->code_ptr = (void *)p; + } + return s->code_ptr; +} + +/* Stack frame parameters. */ +#define REG_SIZE (TCG_TARGET_REG_BITS / 8) +#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) +#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) + +#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & -TCG_TARGET_STACK_ALIGN) +#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) + +/* We're expecting to be able to use an immediate for frame allocation. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7fff); + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i; + + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); + + /* TB prologue */ + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_SP, SAVE_OFS + i * REG_SIZE); + } + +#ifndef CONFIG_SOFTMMU + if (guest_base) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } +#endif + + /* Call generated code */ + tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0); + /* delay slot */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + + /* + * Return path for goto_ptr. Set return value to 0, a-la exit_tb, + * and fall through to the rest of the epilogue. + */ + s->code_gen_epilogue = s->code_ptr; + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO); + + /* TB epilogue */ + tb_ret_addr = s->code_ptr; + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_SP, SAVE_OFS + i * REG_SIZE); + } + + tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); + /* delay slot */ + tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); + + if (use_mips32r2_instructions) { + return; + } + + /* Bswap subroutines: Input in TCG_TMP0, output in TCG_TMP3; + clobbers TCG_TMP1, TCG_TMP2. */ + + /* + * bswap32 -- 32-bit swap (signed result for mips64). a0 = abcd. + */ + bswap32_addr = align_code_ptr(s); + /* t3 = (ssss)d000 */ + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP3, TCG_TMP0, 24); + /* t1 = 000a */ + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 24); + /* t2 = 00c0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00); + /* t3 = d00a */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); + /* t1 = 0abc */ + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8); + /* t2 = 0c00 */ + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8); + /* t1 = 00b0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); + /* t3 = dc0a */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); + /* t3 = dcba -- delay slot */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); + + if (TCG_TARGET_REG_BITS == 32) { + return; + } + + /* + * bswap32u -- unsigned 32-bit swap. a0 = ....abcd. + */ + bswap32u_addr = align_code_ptr(s); + /* t1 = (0000)000d */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP0, 0xff); + /* t3 = 000a */ + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, TCG_TMP0, 24); + /* t1 = (0000)d000 */ + tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24); + /* t2 = 00c0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00); + /* t3 = d00a */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); + /* t1 = 0abc */ + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8); + /* t2 = 0c00 */ + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8); + /* t1 = 00b0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); + /* t3 = dc0a */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); + /* t3 = dcba -- delay slot */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); + + /* + * bswap64 -- 64-bit swap. a0 = abcdefgh + */ + bswap64_addr = align_code_ptr(s); + /* t3 = h0000000 */ + tcg_out_dsll(s, TCG_TMP3, TCG_TMP0, 56); + /* t1 = 0000000a */ + tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 56); + + /* t2 = 000000g0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00); + /* t3 = h000000a */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); + /* t1 = 00000abc */ + tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 40); + /* t2 = 0g000000 */ + tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40); + /* t1 = 000000b0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); + + /* t3 = hg00000a */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); + /* t2 = 0000abcd */ + tcg_out_dsrl(s, TCG_TMP2, TCG_TMP0, 32); + /* t3 = hg0000ba */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); + + /* t1 = 000000c0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP2, 0xff00); + /* t2 = 0000000d */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP2, 0x00ff); + /* t1 = 00000c00 */ + tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 8); + /* t2 = 0000d000 */ + tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 24); + + /* t3 = hg000cba */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); + /* t1 = 00abcdef */ + tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 16); + /* t3 = hg00dcba */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); + + /* t2 = 0000000f */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP1, 0x00ff); + /* t1 = 000000e0 */ + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00); + /* t2 = 00f00000 */ + tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40); + /* t1 = 000e0000 */ + tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24); + + /* t3 = hgf0dcba */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); + /* t3 = hgfedcba -- delay slot */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); +} + +static void tcg_target_init(TCGContext *s) +{ + tcg_target_detect_isa(); + s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; + if (TCG_TARGET_REG_BITS == 64) { + s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; + } + + s->tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_A3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T4); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T5); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T6); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T7); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T8); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_T9); + + s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */ + tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_TMP2); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_TMP3); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ +} + +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) +{ + atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2)); + flush_icache_range(jmp_addr, jmp_addr + 4); +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; +} DebugFrame; + +#define ELF_HOST_MACHINE EM_MIPS +/* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS, + which is good because they're really quite complicated for MIPS. */ + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ + .h.cie.return_column = TCG_REG_RA, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x80 + 16, 9, /* DW_CFA_offset, s0, -72 */ + 0x80 + 17, 8, /* DW_CFA_offset, s2, -64 */ + 0x80 + 18, 7, /* DW_CFA_offset, s3, -56 */ + 0x80 + 19, 6, /* DW_CFA_offset, s4, -48 */ + 0x80 + 20, 5, /* DW_CFA_offset, s5, -40 */ + 0x80 + 21, 4, /* DW_CFA_offset, s6, -32 */ + 0x80 + 22, 3, /* DW_CFA_offset, s7, -24 */ + 0x80 + 30, 2, /* DW_CFA_offset, s8, -16 */ + 0x80 + 31, 1, /* DW_CFA_offset, ra, -8 */ + } +}; + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/qemu/tcg/optimize.c b/qemu/tcg/optimize.c index 8693ebff..e747d78a 100644 --- a/qemu/tcg/optimize.c +++ b/qemu/tcg/optimize.c @@ -23,128 +23,137 @@ * THE SOFTWARE. */ -#include "config.h" - -#include -#include - -#include "qemu-common.h" -#include "tcg-op.h" +#include "qemu/osdep.h" +#include "tcg/tcg-op.h" #define CASE_OP_32_64(x) \ glue(glue(case INDEX_op_, x), _i32): \ glue(glue(case INDEX_op_, x), _i64) -/* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove - the copy flag from the left temp. */ -static void reset_temp(TCGContext *s, TCGArg temp) -{ - struct tcg_temp_info *temps = s->temps2; +#define CASE_OP_32_64_VEC(x) \ + glue(glue(case INDEX_op_, x), _i32): \ + glue(glue(case INDEX_op_, x), _i64): \ + glue(glue(case INDEX_op_, x), _vec) - if (temps[temp].state == TCG_TEMP_COPY) { - if (temps[temp].prev_copy == temps[temp].next_copy) { - temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF; - } else { - temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy; - temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy; - } - } - temps[temp].state = TCG_TEMP_UNDEF; - temps[temp].mask = -1; +struct tcg_temp_info { + bool is_const; + TCGTemp *prev_copy; + TCGTemp *next_copy; + tcg_target_ulong val; + tcg_target_ulong mask; +}; + +static inline struct tcg_temp_info *ts_info(TCGTemp *ts) +{ + return ts->state_ptr; } -/* Reset all temporaries, given that there are NB_TEMPS of them. */ -static void reset_all_temps(TCGContext *s, int nb_temps) +static inline struct tcg_temp_info *arg_info(TCGArg arg) { - struct tcg_temp_info *temps = s->temps2; - int i; + return ts_info(arg_temp(arg)); +} - for (i = 0; i < nb_temps; i++) { - temps[i].state = TCG_TEMP_UNDEF; - temps[i].mask = -1; +static inline bool ts_is_const(TCGTemp *ts) +{ + return ts_info(ts)->is_const; +} + +static inline bool arg_is_const(TCGArg arg) +{ + return ts_is_const(arg_temp(arg)); +} + +static inline bool ts_is_copy(TCGTemp *ts) +{ + return ts_info(ts)->next_copy != ts; +} + +/* Reset TEMP's state, possibly removing the temp for the list of copies. */ +static void reset_ts(TCGTemp *ts) +{ + struct tcg_temp_info *ti = ts_info(ts); + struct tcg_temp_info *pi = ts_info(ti->prev_copy); + struct tcg_temp_info *ni = ts_info(ti->next_copy); + + ni->prev_copy = ti->prev_copy; + pi->next_copy = ti->next_copy; + ti->next_copy = ts; + ti->prev_copy = ts; + ti->is_const = false; + ti->mask = -1; +} + +static void reset_temp(TCGArg arg) +{ + reset_ts(arg_temp(arg)); +} + +/* Initialize and activate a temporary. */ +static void init_ts_info(TCGContext *tcg_ctx, struct tcg_temp_info *infos, + TCGTempSet *temps_used, TCGTemp *ts) +{ + size_t idx = temp_idx(tcg_ctx, ts); + if (!test_bit(idx, temps_used->l)) { + struct tcg_temp_info *ti = &infos[idx]; + + ts->state_ptr = ti; + ti->next_copy = ts; + ti->prev_copy = ts; + ti->is_const = false; + ti->mask = -1; + set_bit(idx, temps_used->l); } } -static int op_bits(TCGContext *s, TCGOpcode op) +static void init_arg_info(TCGContext *tcg_ctx, struct tcg_temp_info *infos, + TCGTempSet *temps_used, TCGArg arg) { - const TCGOpDef *def = &s->tcg_op_defs[op]; - return def->flags & TCG_OPF_64BIT ? 64 : 32; + init_ts_info(tcg_ctx, infos, temps_used, arg_temp(arg)); } -static TCGOpcode op_to_mov(TCGContext *s, TCGOpcode op) +static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) { - switch (op_bits(s, op)) { - case 32: - return INDEX_op_mov_i32; - case 64: - return INDEX_op_mov_i64; - default: - fprintf(stderr, "op_to_mov: unexpected return value of " - "function op_bits.\n"); - tcg_abort(); - } -} - -static TCGOpcode op_to_movi(TCGContext *s, TCGOpcode op) -{ - switch (op_bits(s, op)) { - case 32: - return INDEX_op_movi_i32; - case 64: - return INDEX_op_movi_i64; - default: - fprintf(stderr, "op_to_movi: unexpected return value of " - "function op_bits.\n"); - tcg_abort(); - } -} - -static TCGArg find_better_copy(TCGContext *s, TCGArg temp) -{ - struct tcg_temp_info *temps = s->temps2; - TCGArg i; + TCGTemp *i; /* If this is already a global, we can't do better. */ - if (temp < (unsigned int)s->nb_globals) { - return temp; + if (ts->temp_global) { + return ts; } /* Search for a global first. */ - for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) { - if (i < (unsigned int)s->nb_globals) { + for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { + if (i->temp_global) { return i; } } /* If it is a temp, search for a temp local. */ - if (!s->temps[temp].temp_local) { - for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) { - if (s->temps[i].temp_local) { + if (!ts->temp_local) { + for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { + if (ts->temp_local) { return i; } } } /* Failure to find a better representation, return the same temp. */ - return temp; + return ts; } -static bool temps_are_copies(TCGContext *s, TCGArg arg1, TCGArg arg2) +static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) { - struct tcg_temp_info *temps = s->temps2; - TCGArg i; + TCGTemp *i; - if (arg1 == arg2) { + if (ts1 == ts2) { return true; } - if (temps[arg1].state != TCG_TEMP_COPY - || temps[arg2].state != TCG_TEMP_COPY) { + if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) { return false; } - for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) { - if (i == arg2) { + for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { + if (i == ts2) { return true; } } @@ -152,63 +161,90 @@ static bool temps_are_copies(TCGContext *s, TCGArg arg1, TCGArg arg2) return false; } -static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args, - TCGOpcode old_op, TCGArg dst, TCGArg src) +static bool args_are_copies(TCGArg arg1, TCGArg arg2) { - struct tcg_temp_info *temps = s->temps2; - TCGOpcode new_op = op_to_mov(s, old_op); - tcg_target_ulong mask; - - s->gen_opc_buf[op_index] = new_op; - - reset_temp(s, dst); - mask = temps[src].mask; - if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { - /* High bits of the destination are now garbage. */ - mask |= ~0xffffffffull; - } - temps[dst].mask = mask; - - assert(temps[src].state != TCG_TEMP_CONST); - - if (s->temps[src].type == s->temps[dst].type) { - if (temps[src].state != TCG_TEMP_COPY) { - temps[src].state = TCG_TEMP_COPY; - temps[src].next_copy = src; - temps[src].prev_copy = src; - } - temps[dst].state = TCG_TEMP_COPY; - temps[dst].next_copy = temps[src].next_copy; - temps[dst].prev_copy = src; - temps[temps[dst].next_copy].prev_copy = dst; - temps[src].next_copy = dst; - } - - gen_args[0] = dst; - gen_args[1] = src; + return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); } -static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args, - TCGOpcode old_op, TCGArg dst, TCGArg val) +static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg val) { - struct tcg_temp_info *temps = s->temps2; - TCGOpcode new_op = op_to_movi(s, old_op); + const TCGOpDef *def; + TCGOpcode new_op; tcg_target_ulong mask; + struct tcg_temp_info *di = arg_info(dst); - s->gen_opc_buf[op_index] = new_op; + def = &s->tcg_op_defs[op->opc]; + if (def->flags & TCG_OPF_VECTOR) { + new_op = INDEX_op_dupi_vec; + } else if (def->flags & TCG_OPF_64BIT) { + new_op = INDEX_op_movi_i64; + } else { + new_op = INDEX_op_movi_i32; + } + op->opc = new_op; + /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ + op->args[0] = dst; + op->args[1] = val; - reset_temp(s, dst); - temps[dst].state = TCG_TEMP_CONST; - temps[dst].val = val; + reset_temp(dst); + di->is_const = true; + di->val = val; mask = val; + if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) { + /* High bits of the destination are now garbage. */ + mask |= ~0xffffffffull; + } + di->mask = mask; +} + +static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) +{ + TCGTemp *dst_ts = arg_temp(dst); + TCGTemp *src_ts = arg_temp(src); + const TCGOpDef *def; + struct tcg_temp_info *di; + struct tcg_temp_info *si; + tcg_target_ulong mask; + TCGOpcode new_op; + + if (ts_are_copies(dst_ts, src_ts)) { + tcg_op_remove(s, op); + return; + } + + reset_ts(dst_ts); + di = ts_info(dst_ts); + si = ts_info(src_ts); + def = &s->tcg_op_defs[op->opc]; + if (def->flags & TCG_OPF_VECTOR) { + new_op = INDEX_op_mov_vec; + } else if (def->flags & TCG_OPF_64BIT) { + new_op = INDEX_op_mov_i64; + } else { + new_op = INDEX_op_mov_i32; + } + op->opc = new_op; + /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ + op->args[0] = dst; + op->args[1] = src; + + mask = si->mask; if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { /* High bits of the destination are now garbage. */ mask |= ~0xffffffffull; } - temps[dst].mask = mask; + di->mask = mask; - gen_args[0] = dst; - gen_args[1] = val; + if (src_ts->type == dst_ts->type) { + struct tcg_temp_info *ni = ts_info(si->next_copy); + + di->next_copy = si->next_copy; + di->prev_copy = src_ts; + ni->prev_copy = dst_ts; + si->next_copy = dst_ts; + di->is_const = si->is_const; + di->val = si->val; + } } static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) @@ -243,7 +279,6 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) case INDEX_op_shr_i32: return (uint32_t)x >> (y & 31); - case INDEX_op_trunc_shr_i32: case INDEX_op_shr_i64: return (uint64_t)x >> (y & 63); @@ -257,19 +292,23 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) return ror32(x, y & 31); case INDEX_op_rotr_i64: - return (TCGArg)ror64(x, y & 63); + return ror64(x, y & 63); case INDEX_op_rotl_i32: return rol32(x, y & 31); case INDEX_op_rotl_i64: - return (TCGArg)rol64(x, y & 63); + return rol64(x, y & 63); CASE_OP_32_64(not): return ~x; CASE_OP_32_64(neg): - return 0-x; +#ifdef _MSC_VER + return (0 - x); +#else + return -x; +#endif CASE_OP_32_64(andc): return x & ~y; @@ -286,6 +325,24 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) CASE_OP_32_64(nor): return ~(x | y); + case INDEX_op_clz_i32: + return (uint32_t)x ? clz32(x) : y; + + case INDEX_op_clz_i64: + return x ? clz64(x) : y; + + case INDEX_op_ctz_i32: + return (uint32_t)x ? ctz32(x) : y; + + case INDEX_op_ctz_i64: + return x ? ctz64(x) : y; + + case INDEX_op_ctpop_i32: + return ctpop32(x); + + case INDEX_op_ctpop_i64: + return ctpop64(x); + CASE_OP_32_64(ext8s): return (int8_t)x; @@ -298,12 +355,27 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) CASE_OP_32_64(ext16u): return (uint16_t)x; + CASE_OP_32_64(bswap16): + return bswap16(x); + + CASE_OP_32_64(bswap32): + return bswap32(x); + + case INDEX_op_bswap64_i64: + return bswap64(x); + + case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: return (int32_t)x; + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: case INDEX_op_ext32u_i64: return (uint32_t)x; + case INDEX_op_extrh_i64_i32: + return (uint64_t)x >> 32; + case INDEX_op_muluh_i32: return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; case INDEX_op_mulsh_i32: @@ -311,10 +383,10 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) case INDEX_op_muluh_i64: mulu64(&l64, &h64, x, y); - return (TCGArg)h64; + return h64; case INDEX_op_mulsh_i64: muls64(&l64, &h64, x, y); - return (TCGArg)h64; + return h64; case INDEX_op_div_i32: /* Avoid crashing on divide by zero, otherwise undefined. */ @@ -344,9 +416,10 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) static TCGArg do_constant_folding(TCGContext *s, TCGOpcode op, TCGArg x, TCGArg y) { + const TCGOpDef *def = &s->tcg_op_defs[op]; TCGArg res = do_constant_folding_2(op, x, y); - if (op_bits(s, op) == 32) { - res &= 0xffffffff; + if (!(def->flags & TCG_OPF_64BIT)) { + res = (int32_t)res; } return res; } @@ -432,20 +505,19 @@ static bool do_constant_folding_cond_eq(TCGCond c) static TCGArg do_constant_folding_cond(TCGContext *s, TCGOpcode op, TCGArg x, TCGArg y, TCGCond c) { - struct tcg_temp_info *temps = s->temps2; - - if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) { - switch (op_bits(s, op)) { - case 32: - return do_constant_folding_cond_32(temps[x].val, temps[y].val, c); - case 64: - return do_constant_folding_cond_64(temps[x].val, temps[y].val, c); - default: - tcg_abort(); + tcg_target_ulong xv = arg_info(x)->val; + tcg_target_ulong yv = arg_info(y)->val; + if (arg_is_const(x) && arg_is_const(y)) { + const TCGOpDef *def = &s->tcg_op_defs[op]; + tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); + if (def->flags & TCG_OPF_64BIT) { + return do_constant_folding_cond_64(xv, yv, c); + } else { + return do_constant_folding_cond_32(xv, yv, c); } - } else if (temps_are_copies(s, x, y)) { + } else if (args_are_copies(x, y)) { return do_constant_folding_cond_eq(c); - } else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) { + } else if (arg_is_const(y) && yv == 0) { switch (c) { case TCG_COND_LTU: return 0; @@ -454,28 +526,26 @@ static TCGArg do_constant_folding_cond(TCGContext *s, TCGOpcode op, TCGArg x, default: return 2; } - } else { - return 2; } + return 2; } /* Return 2 if the condition can't be simplified, and the result of the condition (0 or 1) if it can */ -static TCGArg do_constant_folding_cond2(TCGContext *s, TCGArg *p1, TCGArg *p2, TCGCond c) +static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) { - struct tcg_temp_info *temps = s->temps2; - TCGArg al = p1[0], ah = p1[1]; TCGArg bl = p2[0], bh = p2[1]; - if (temps[bl].state == TCG_TEMP_CONST - && temps[bh].state == TCG_TEMP_CONST) { - uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val; + if (arg_is_const(bl) && arg_is_const(bh)) { + tcg_target_ulong blv = arg_info(bl)->val; + tcg_target_ulong bhv = arg_info(bh)->val; + uint64_t b = deposit64(blv, 32, 32, bhv); - if (temps[al].state == TCG_TEMP_CONST - && temps[ah].state == TCG_TEMP_CONST) { - uint64_t a; - a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val; + if (arg_is_const(al) && arg_is_const(ah)) { + tcg_target_ulong alv = arg_info(al)->val; + tcg_target_ulong ahv = arg_info(ah)->val; + uint64_t a = deposit64(alv, 32, 32, ahv); return do_constant_folding_cond_64(a, b, c); } if (b == 0) { @@ -489,20 +559,18 @@ static TCGArg do_constant_folding_cond2(TCGContext *s, TCGArg *p1, TCGArg *p2, T } } } - if (temps_are_copies(s, al, bl) && temps_are_copies(s, ah, bh)) { + if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { return do_constant_folding_cond_eq(c); } return 2; } -static bool swap_commutative(TCGContext *s, TCGArg dest, TCGArg *p1, TCGArg *p2) +static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) { - struct tcg_temp_info *temps = s->temps2; TCGArg a1 = *p1, a2 = *p2; int sum = 0; - - sum += temps[a1].state == TCG_TEMP_CONST; - sum -= temps[a2].state == TCG_TEMP_CONST; + sum += arg_is_const(a1); + sum -= arg_is_const(a2); /* Prefer the constant in second argument, and then the form op a, a, b, which is better handled on non-RISC hosts. */ @@ -514,15 +582,13 @@ static bool swap_commutative(TCGContext *s, TCGArg dest, TCGArg *p1, TCGArg *p2) return false; } -static bool swap_commutative2(TCGContext *s, TCGArg *p1, TCGArg *p2) +static bool swap_commutative2(TCGArg *p1, TCGArg *p2) { - struct tcg_temp_info *temps = s->temps2; int sum = 0; - - sum += temps[p1[0]].state == TCG_TEMP_CONST; - sum += temps[p1[1]].state == TCG_TEMP_CONST; - sum -= temps[p2[0]].state == TCG_TEMP_CONST; - sum -= temps[p2[1]].state == TCG_TEMP_CONST; + sum += arg_is_const(p1[0]); + sum += arg_is_const(p1[1]); + sum -= arg_is_const(p2[0]); + sum -= arg_is_const(p2[1]); if (sum > 0) { TCGArg t; t = p1[0], p1[0] = p2[0], p2[0] = t; @@ -533,12 +599,12 @@ static bool swap_commutative2(TCGContext *s, TCGArg *p1, TCGArg *p2) } /* Propagate constants and copies, fold constant expressions. */ -static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, - TCGArg *args, TCGOpDef *tcg_op_defs) +void tcg_optimize(TCGContext *s) { - struct tcg_temp_info *temps = s->temps2; - int nb_ops, op_index, nb_temps, nb_globals; - TCGArg *gen_args; + int nb_temps, nb_globals; + TCGOp *op, *op_next, *prev_mb = NULL; + struct tcg_temp_info *infos; + TCGTempSet temps_used; /* Array VALS has an element for each temp. If this temp holds a constant then its value is kept in VALS' element. @@ -547,89 +613,94 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, nb_temps = s->nb_temps; nb_globals = s->nb_globals; - reset_all_temps(s, nb_temps); + bitmap_zero(temps_used.l, nb_temps); + infos = tcg_malloc(s, sizeof(struct tcg_temp_info) * nb_temps); - nb_ops = tcg_opc_ptr - s->gen_opc_buf; - if (nb_ops > OPC_BUF_SIZE) { - return NULL; - } - gen_args = args; - for (op_index = 0; op_index < nb_ops; op_index++) { - TCGOpcode op = s->gen_opc_buf[op_index]; - const TCGOpDef *def = &tcg_op_defs[op]; + QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { tcg_target_ulong mask, partmask, affected; - int nb_oargs, nb_iargs, nb_args, i; + int nb_oargs, nb_iargs, i; TCGArg tmp; + TCGOpcode opc = op->opc; + const TCGOpDef *def = &s->tcg_op_defs[opc]; - if (op == INDEX_op_call) { - *gen_args++ = tmp = *args++; - nb_oargs = tmp >> 16; - nb_iargs = tmp & 0xffff; - nb_args = nb_oargs + nb_iargs + def->nb_cargs; + /* Count the arguments, and initialize the temps that are + going to be used */ + if (opc == INDEX_op_call) { + nb_oargs = TCGOP_CALLO(op); + nb_iargs = TCGOP_CALLI(op); + for (i = 0; i < nb_oargs + nb_iargs; i++) { + TCGTemp *ts = arg_temp(op->args[i]); + if (ts) { + init_ts_info(s, infos, &temps_used, ts); + } + } } else { nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; - nb_args = def->nb_args; + for (i = 0; i < nb_oargs + nb_iargs; i++) { + init_arg_info(s, infos, &temps_used, op->args[i]); + } } /* Do copy propagation */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - if (temps[args[i]].state == TCG_TEMP_COPY) { - args[i] = find_better_copy(s, args[i]); + TCGTemp *ts = arg_temp(op->args[i]); + if (ts && ts_is_copy(ts)) { + op->args[i] = temp_arg(find_better_copy(s, ts)); } } /* For commutative operations make constant second argument */ - switch (op) { - CASE_OP_32_64(add): - CASE_OP_32_64(mul): - CASE_OP_32_64(and): - CASE_OP_32_64(or): - CASE_OP_32_64(xor): + switch (opc) { + CASE_OP_32_64_VEC(add): + CASE_OP_32_64_VEC(mul): + CASE_OP_32_64_VEC(and): + CASE_OP_32_64_VEC(or): + CASE_OP_32_64_VEC(xor): CASE_OP_32_64(eqv): CASE_OP_32_64(nand): CASE_OP_32_64(nor): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): - swap_commutative(s, args[0], &args[1], &args[2]); + swap_commutative(op->args[0], &op->args[1], &op->args[2]); break; CASE_OP_32_64(brcond): - if (swap_commutative(s, -1, &args[0], &args[1])) { - args[2] = tcg_swap_cond(args[2]); + if (swap_commutative(-1, &op->args[0], &op->args[1])) { + op->args[2] = tcg_swap_cond(op->args[2]); } break; CASE_OP_32_64(setcond): - if (swap_commutative(s, args[0], &args[1], &args[2])) { - args[3] = tcg_swap_cond(args[3]); + if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { + op->args[3] = tcg_swap_cond(op->args[3]); } break; CASE_OP_32_64(movcond): - if (swap_commutative(s, -1, &args[1], &args[2])) { - args[5] = tcg_swap_cond(args[5]); + if (swap_commutative(-1, &op->args[1], &op->args[2])) { + op->args[5] = tcg_swap_cond(op->args[5]); } /* For movcond, we canonicalize the "false" input reg to match the destination reg so that the tcg backend can implement a "move if true" operation. */ - if (swap_commutative(s, args[0], &args[4], &args[3])) { - args[5] = tcg_invert_cond(args[5]); + if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { + op->args[5] = tcg_invert_cond(op->args[5]); } break; CASE_OP_32_64(add2): - swap_commutative(s, args[0], &args[2], &args[4]); - swap_commutative(s, args[1], &args[3], &args[5]); + swap_commutative(op->args[0], &op->args[2], &op->args[4]); + swap_commutative(op->args[1], &op->args[3], &op->args[5]); break; CASE_OP_32_64(mulu2): CASE_OP_32_64(muls2): - swap_commutative(s, args[0], &args[2], &args[3]); + swap_commutative(op->args[0], &op->args[2], &op->args[3]); break; case INDEX_op_brcond2_i32: - if (swap_commutative2(s, &args[0], &args[2])) { - args[4] = tcg_swap_cond(args[4]); + if (swap_commutative2(&op->args[0], &op->args[2])) { + op->args[4] = tcg_swap_cond(op->args[4]); } break; case INDEX_op_setcond2_i32: - if (swap_commutative2(s, &args[1], &args[3])) { - args[5] = tcg_swap_cond(args[5]); + if (swap_commutative2(&op->args[1], &op->args[3])) { + op->args[5] = tcg_swap_cond(op->args[5]); } break; default: @@ -638,81 +709,83 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, /* Simplify expressions for "shift/rot r, 0, a => movi r, 0", and "sub r, 0, a => neg r, a" case. */ - switch (op) { + switch (opc) { CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): - if (temps[args[1]].state == TCG_TEMP_CONST - && temps[args[1]].val == 0) { - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); - args += 3; - gen_args += 2; + if (arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == 0) { + tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } break; - CASE_OP_32_64(sub): + CASE_OP_32_64_VEC(sub): { TCGOpcode neg_op; bool have_neg; - if (temps[args[2]].state == TCG_TEMP_CONST) { + if (arg_is_const(op->args[2])) { /* Proceed with possible constant folding. */ break; } - if (op == INDEX_op_sub_i32) { + if (opc == INDEX_op_sub_i32) { neg_op = INDEX_op_neg_i32; have_neg = TCG_TARGET_HAS_neg_i32; - } else { + } else if (opc == INDEX_op_sub_i64) { neg_op = INDEX_op_neg_i64; have_neg = TCG_TARGET_HAS_neg_i64; + } else if (TCG_TARGET_HAS_neg_vec) { + TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64; + unsigned vece = TCGOP_VECE(op); + neg_op = INDEX_op_neg_vec; + have_neg = tcg_can_emit_vec_op(s, neg_op, type, vece) > 0; + } else { + break; } if (!have_neg) { break; } - if (temps[args[1]].state == TCG_TEMP_CONST - && temps[args[1]].val == 0) { - s->gen_opc_buf[op_index] = neg_op; - reset_temp(s, args[0]); - gen_args[0] = args[0]; - gen_args[1] = args[2]; - args += 3; - gen_args += 2; + if (arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == 0) { + op->opc = neg_op; + reset_temp(op->args[0]); + op->args[1] = op->args[2]; continue; } } break; - CASE_OP_32_64(xor): + CASE_OP_32_64_VEC(xor): CASE_OP_32_64(nand): - if (temps[args[1]].state != TCG_TEMP_CONST - && temps[args[2]].state == TCG_TEMP_CONST - && temps[args[2]].val == -1) { + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == -1) { i = 1; goto try_not; } break; CASE_OP_32_64(nor): - if (temps[args[1]].state != TCG_TEMP_CONST - && temps[args[2]].state == TCG_TEMP_CONST - && temps[args[2]].val == 0) { + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0) { i = 1; goto try_not; } break; - CASE_OP_32_64(andc): - if (temps[args[2]].state != TCG_TEMP_CONST - && temps[args[1]].state == TCG_TEMP_CONST - && temps[args[1]].val == -1) { + CASE_OP_32_64_VEC(andc): + if (!arg_is_const(op->args[2]) + && arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == -1) { i = 2; goto try_not; } break; - CASE_OP_32_64(orc): + CASE_OP_32_64_VEC(orc): CASE_OP_32_64(eqv): - if (temps[args[2]].state != TCG_TEMP_CONST - && temps[args[1]].state == TCG_TEMP_CONST - && temps[args[1]].val == 0) { + if (!arg_is_const(op->args[2]) + && arg_is_const(op->args[1]) + && arg_info(op->args[1])->val == 0) { i = 2; goto try_not; } @@ -722,7 +795,10 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, TCGOpcode not_op; bool have_not; - if (def->flags & TCG_OPF_64BIT) { + if (def->flags & TCG_OPF_VECTOR) { + not_op = INDEX_op_not_vec; + have_not = TCG_TARGET_HAS_not_vec; + } else if (def->flags & TCG_OPF_64BIT) { not_op = INDEX_op_not_i64; have_not = TCG_TARGET_HAS_not_i64; } else { @@ -732,12 +808,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, if (!have_not) { break; } - s->gen_opc_buf[op_index] = not_op; - reset_temp(s, args[0]); - gen_args[0] = args[0]; - gen_args[1] = args[i]; - args += 3; - gen_args += 2; + op->opc = not_op; + reset_temp(op->args[0]); + op->args[1] = op->args[i]; continue; } default: @@ -745,41 +818,34 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, } /* Simplify expression for "op r, a, const => mov r, a" cases */ - switch (op) { - CASE_OP_32_64(add): - CASE_OP_32_64(sub): + switch (opc) { + CASE_OP_32_64_VEC(add): + CASE_OP_32_64_VEC(sub): + CASE_OP_32_64_VEC(or): + CASE_OP_32_64_VEC(xor): + CASE_OP_32_64_VEC(andc): CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): - CASE_OP_32_64(or): - CASE_OP_32_64(xor): - CASE_OP_32_64(andc): - if (temps[args[1]].state != TCG_TEMP_CONST - && temps[args[2]].state == TCG_TEMP_CONST - && temps[args[2]].val == 0) { - goto do_mov3; + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0) { + tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); + continue; } break; - CASE_OP_32_64(and): - CASE_OP_32_64(orc): + CASE_OP_32_64_VEC(and): + CASE_OP_32_64_VEC(orc): CASE_OP_32_64(eqv): - if (temps[args[1]].state != TCG_TEMP_CONST - && temps[args[2]].state == TCG_TEMP_CONST - && temps[args[2]].val == -1) { - goto do_mov3; + if (!arg_is_const(op->args[1]) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == -1) { + tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); + continue; } break; - do_mov3: - if (temps_are_copies(s, args[0], args[1])) { - s->gen_opc_buf[op_index] = INDEX_op_nop; - } else { - tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]); - gen_args += 2; - } - args += 3; - continue; default: break; } @@ -788,23 +854,23 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, output argument is supported. */ mask = -1; affected = -1; - switch (op) { + switch (opc) { CASE_OP_32_64(ext8s): - if ((temps[args[1]].mask & 0x80) != 0) { + if ((arg_info(op->args[1])->mask & 0x80) != 0) { break; } CASE_OP_32_64(ext8u): mask = 0xff; goto and_const; CASE_OP_32_64(ext16s): - if ((temps[args[1]].mask & 0x8000) != 0) { + if ((arg_info(op->args[1])->mask & 0x8000) != 0) { break; } CASE_OP_32_64(ext16u): mask = 0xffff; goto and_const; case INDEX_op_ext32s_i64: - if ((temps[args[1]].mask & 0x80000000) != 0) { + if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { break; } case INDEX_op_ext32u_i64: @@ -812,75 +878,126 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, goto and_const; CASE_OP_32_64(and): - mask = temps[args[2]].mask; - if (temps[args[2]].state == TCG_TEMP_CONST) { + mask = arg_info(op->args[2])->mask; + if (arg_is_const(op->args[2])) { and_const: - affected = temps[args[1]].mask & ~mask; + affected = arg_info(op->args[1])->mask & ~mask; } - mask = temps[args[1]].mask & mask; + mask = arg_info(op->args[1])->mask & mask; + break; + + case INDEX_op_ext_i32_i64: + if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { + break; + } + case INDEX_op_extu_i32_i64: + /* We do not compute affected as it is a size changing op. */ + mask = (uint32_t)arg_info(op->args[1])->mask; break; CASE_OP_32_64(andc): /* Known-zeros does not imply known-ones. Therefore unless - args[2] is constant, we can't infer anything from it. */ - if (temps[args[2]].state == TCG_TEMP_CONST) { - mask = ~temps[args[2]].mask; + op->args[2] is constant, we can't infer anything from it. */ + if (arg_is_const(op->args[2])) { + mask = ~arg_info(op->args[2])->mask; goto and_const; } /* But we certainly know nothing outside args[1] may be set. */ - mask = temps[args[1]].mask; + mask = arg_info(op->args[1])->mask; break; case INDEX_op_sar_i32: - if (temps[args[2]].state == TCG_TEMP_CONST) { - tmp = temps[args[2]].val & 31; - mask = (int32_t)temps[args[1]].mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 31; + mask = (int32_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_sar_i64: - if (temps[args[2]].state == TCG_TEMP_CONST) { - tmp = temps[args[2]].val & 63; - mask = (int64_t)temps[args[1]].mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 63; + mask = (int64_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_shr_i32: - if (temps[args[2]].state == TCG_TEMP_CONST) { - tmp = temps[args[2]].val & 31; - mask = (uint32_t)temps[args[1]].mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 31; + mask = (uint32_t)arg_info(op->args[1])->mask >> tmp; } break; case INDEX_op_shr_i64: - if (temps[args[2]].state == TCG_TEMP_CONST) { - tmp = temps[args[2]].val & 63; - mask = (uint64_t)temps[args[1]].mask >> tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & 63; + mask = (uint64_t)arg_info(op->args[1])->mask >> tmp; } break; - case INDEX_op_trunc_shr_i32: - mask = (uint64_t)temps[args[1]].mask >> args[2]; + case INDEX_op_extrl_i64_i32: + mask = (uint32_t)arg_info(op->args[1])->mask; + break; + case INDEX_op_extrh_i64_i32: + mask = (uint64_t)arg_info(op->args[1])->mask >> 32; break; CASE_OP_32_64(shl): - if (temps[args[2]].state == TCG_TEMP_CONST) { - tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1); - mask = temps[args[1]].mask << tmp; + if (arg_is_const(op->args[2])) { + tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); + mask = arg_info(op->args[1])->mask << tmp; } break; CASE_OP_32_64(neg): /* Set to 1 all bits to the left of the rightmost. */ - mask = 0-(temps[args[1]].mask & (0-temps[args[1]].mask)); +#ifdef _MSC_VER + mask = 0 - (arg_info(op->args[1])->mask + & (0 - arg_info(op->args[1])->mask)); +#else + mask = -(arg_info(op->args[1])->mask + & -arg_info(op->args[1])->mask); +#endif break; CASE_OP_32_64(deposit): - mask = (tcg_target_ulong)deposit64(temps[args[1]].mask, args[3], args[4], - temps[args[2]].mask); + mask = deposit64(arg_info(op->args[1])->mask, + op->args[3], op->args[4], + arg_info(op->args[2])->mask); + break; + + CASE_OP_32_64(extract): + mask = extract64(arg_info(op->args[1])->mask, + op->args[2], op->args[3]); + if (op->args[2] == 0) { + affected = arg_info(op->args[1])->mask & ~mask; + } + break; + CASE_OP_32_64(sextract): + mask = sextract64(arg_info(op->args[1])->mask, + op->args[2], op->args[3]); + if (op->args[2] == 0 && (tcg_target_long)mask >= 0) { + affected = arg_info(op->args[1])->mask & ~mask; + } break; CASE_OP_32_64(or): CASE_OP_32_64(xor): - mask = temps[args[1]].mask | temps[args[2]].mask; + mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask; + break; + + case INDEX_op_clz_i32: + case INDEX_op_ctz_i32: + mask = arg_info(op->args[2])->mask | 31; + break; + + case INDEX_op_clz_i64: + case INDEX_op_ctz_i64: + mask = arg_info(op->args[2])->mask | 63; + break; + + case INDEX_op_ctpop_i32: + mask = 32 | 31; + break; + case INDEX_op_ctpop_i64: + mask = 64 | 63; break; CASE_OP_32_64(setcond): @@ -889,7 +1006,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, break; CASE_OP_32_64(movcond): - mask = temps[args[3]].mask | temps[args[4]].mask; + mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask; break; CASE_OP_32_64(ld8u): @@ -904,7 +1021,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, CASE_OP_32_64(qemu_ld): { - TCGMemOp mop = args[nb_oargs + nb_iargs]; + TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; + MemOp mop = get_memop(oi); if (!(mop & MO_SIGN)) { mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; } @@ -926,39 +1044,25 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, } if (partmask == 0) { - assert(nb_oargs == 1); - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); - args += nb_args; - gen_args += 2; + tcg_debug_assert(nb_oargs == 1); + tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } if (affected == 0) { - assert(nb_oargs == 1); - if (temps_are_copies(s, args[0], args[1])) { - s->gen_opc_buf[op_index] = INDEX_op_nop; - } else if (temps[args[1]].state != TCG_TEMP_CONST) { - tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]); - gen_args += 2; - } else { - tcg_opt_gen_movi(s, op_index, gen_args, op, - args[0], temps[args[1]].val); - gen_args += 2; - } - args += nb_args; + tcg_debug_assert(nb_oargs == 1); + tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } /* Simplify expression for "op r, a, 0 => movi r, 0" cases */ - switch (op) { - CASE_OP_32_64(and): - CASE_OP_32_64(mul): + switch (opc) { + CASE_OP_32_64_VEC(and): + CASE_OP_32_64_VEC(mul): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): - if ((temps[args[2]].state == TCG_TEMP_CONST - && temps[args[2]].val == 0)) { - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); - args += 3; - gen_args += 2; + if (arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0) { + tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } break; @@ -967,18 +1071,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, } /* Simplify expression for "op r, a, a => mov r, a" cases */ - switch (op) { - CASE_OP_32_64(or): - CASE_OP_32_64(and): - if (temps_are_copies(s, args[1], args[2])) { - if (temps_are_copies(s, args[0], args[1])) { - s->gen_opc_buf[op_index] = INDEX_op_nop; - } else { - tcg_opt_gen_mov(s, op_index, gen_args, op, - args[0], args[1]); - gen_args += 2; - } - args += 3; + switch (opc) { + CASE_OP_32_64_VEC(or): + CASE_OP_32_64_VEC(and): + if (args_are_copies(op->args[1], op->args[2])) { + tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); continue; } break; @@ -987,14 +1084,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, } /* Simplify expression for "op r, a, a => movi r, 0" cases */ - switch (op) { - CASE_OP_32_64(andc): - CASE_OP_32_64(sub): - CASE_OP_32_64(xor): - if (temps_are_copies(s, args[1], args[2])) { - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); - gen_args += 2; - args += 3; + switch (opc) { + CASE_OP_32_64_VEC(andc): + CASE_OP_32_64_VEC(sub): + CASE_OP_32_64_VEC(xor): + if (args_are_copies(op->args[1], op->args[2])) { + tcg_opt_gen_movi(s, op, op->args[0], 0); continue; } break; @@ -1005,28 +1100,23 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, /* Propagate constants through copy operations and do constant folding. Constants will be substituted to arguments by register allocator where needed and possible. Also detect copies. */ - switch (op) { - CASE_OP_32_64(mov): - if (temps_are_copies(s, args[0], args[1])) { - args += 2; - s->gen_opc_buf[op_index] = INDEX_op_nop; - break; - } - if (temps[args[1]].state != TCG_TEMP_CONST) { - tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]); - gen_args += 2; - args += 2; - break; - } - /* Source argument is constant. Rewrite the operation and - let movi case handle it. */ - args[1] = temps[args[1]].val; - /* fallthrough */ - CASE_OP_32_64(movi): - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], args[1]); - gen_args += 2; - args += 2; + switch (opc) { + CASE_OP_32_64_VEC(mov): + tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); break; + CASE_OP_32_64(movi): + case INDEX_op_dupi_vec: + tcg_opt_gen_movi(s, op, op->args[0], op->args[1]); + break; + + case INDEX_op_dup_vec: + if (arg_is_const(op->args[1])) { + tmp = arg_info(op->args[1])->val; + tmp = dup_const(TCGOP_VECE(op), tmp); + tcg_opt_gen_movi(s, op, op->args[0], tmp); + break; + } + goto do_default; CASE_OP_32_64(not): CASE_OP_32_64(neg): @@ -1034,23 +1124,19 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, CASE_OP_32_64(ext8u): CASE_OP_32_64(ext16s): CASE_OP_32_64(ext16u): + CASE_OP_32_64(ctpop): + CASE_OP_32_64(bswap16): + CASE_OP_32_64(bswap32): + case INDEX_op_bswap64_i64: case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: - if (temps[args[1]].state == TCG_TEMP_CONST) { - tmp = do_constant_folding(s, op, temps[args[1]].val, 0); - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); - gen_args += 2; - args += 2; - break; - } - goto do_default; - - case INDEX_op_trunc_shr_i32: - if (temps[args[1]].state == TCG_TEMP_CONST) { - tmp = do_constant_folding(s, op, temps[args[1]].val, args[2]); - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); - gen_args += 2; - args += 3; + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + if (arg_is_const(op->args[1])) { + tmp = do_constant_folding(s, opc, arg_info(op->args[1])->val, 0); + tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; @@ -1077,197 +1163,230 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, CASE_OP_32_64(divu): CASE_OP_32_64(rem): CASE_OP_32_64(remu): - if (temps[args[1]].state == TCG_TEMP_CONST - && temps[args[2]].state == TCG_TEMP_CONST) { - tmp = do_constant_folding(s, op, temps[args[1]].val, - temps[args[2]].val); - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); - gen_args += 2; - args += 3; + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + tmp = do_constant_folding(s, opc, arg_info(op->args[1])->val, + arg_info(op->args[2])->val); + tcg_opt_gen_movi(s, op, op->args[0], tmp); + break; + } + goto do_default; + + CASE_OP_32_64(clz): + CASE_OP_32_64(ctz): + if (arg_is_const(op->args[1])) { + TCGArg v = arg_info(op->args[1])->val; + if (v != 0) { + tmp = do_constant_folding(s, opc, v, 0); + tcg_opt_gen_movi(s, op, op->args[0], tmp); + } else { + tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); + } break; } goto do_default; CASE_OP_32_64(deposit): - if (temps[args[1]].state == TCG_TEMP_CONST - && temps[args[2]].state == TCG_TEMP_CONST) { - tmp = (TCGArg)deposit64(temps[args[1]].val, args[3], args[4], - temps[args[2]].val); - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); - gen_args += 2; - args += 5; + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + tmp = deposit64(arg_info(op->args[1])->val, + op->args[3], op->args[4], + arg_info(op->args[2])->val); + tcg_opt_gen_movi(s, op, op->args[0], tmp); + break; + } + goto do_default; + + CASE_OP_32_64(extract): + if (arg_is_const(op->args[1])) { + tmp = extract64(arg_info(op->args[1])->val, + op->args[2], op->args[3]); + tcg_opt_gen_movi(s, op, op->args[0], tmp); + break; + } + goto do_default; + + CASE_OP_32_64(sextract): + if (arg_is_const(op->args[1])) { + tmp = sextract64(arg_info(op->args[1])->val, + op->args[2], op->args[3]); + tcg_opt_gen_movi(s, op, op->args[0], tmp); + break; + } + goto do_default; + + CASE_OP_32_64(extract2): + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + TCGArg v1 = arg_info(op->args[1])->val; + TCGArg v2 = arg_info(op->args[2])->val; + + if (opc == INDEX_op_extract2_i64) { + tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3])); + } else { + tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) | + ((uint32_t)v2 << (32 - op->args[3]))); + } + tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(setcond): - tmp = do_constant_folding_cond(s, op, args[1], args[2], args[3]); + tmp = do_constant_folding_cond(s, opc, op->args[1], + op->args[2], op->args[3]); if (tmp != 2) { - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); - gen_args += 2; - args += 4; + tcg_opt_gen_movi(s, op, op->args[0], tmp); break; } goto do_default; CASE_OP_32_64(brcond): - tmp = do_constant_folding_cond(s, op, args[0], args[1], args[2]); + tmp = do_constant_folding_cond(s, opc, op->args[0], + op->args[1], op->args[2]); if (tmp != 2) { if (tmp) { - reset_all_temps(s, nb_temps); - s->gen_opc_buf[op_index] = INDEX_op_br; - gen_args[0] = args[3]; - gen_args += 1; + bitmap_zero(temps_used.l, nb_temps); + op->opc = INDEX_op_br; + op->args[0] = op->args[3]; } else { - s->gen_opc_buf[op_index] = INDEX_op_nop; + tcg_op_remove(s, op); } - args += 4; break; } goto do_default; CASE_OP_32_64(movcond): - tmp = do_constant_folding_cond(s, op, args[1], args[2], args[5]); + tmp = do_constant_folding_cond(s, opc, op->args[1], + op->args[2], op->args[5]); if (tmp != 2) { - if (temps_are_copies(s, args[0], args[4-tmp])) { - s->gen_opc_buf[op_index] = INDEX_op_nop; - } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) { - tcg_opt_gen_movi(s, op_index, gen_args, op, - args[0], temps[args[4-tmp]].val); - gen_args += 2; - } else { - tcg_opt_gen_mov(s, op_index, gen_args, op, - args[0], args[4-tmp]); - gen_args += 2; - } - args += 6; + tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]); break; } + if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { + tcg_target_ulong tv = arg_info(op->args[3])->val; + tcg_target_ulong fv = arg_info(op->args[4])->val; + TCGCond cond = op->args[5]; + if (fv == 1 && tv == 0) { + cond = tcg_invert_cond(cond); + } else if (!(tv == 1 && fv == 0)) { + goto do_default; + } + op->args[3] = cond; + op->opc = opc = (opc == INDEX_op_movcond_i32 + ? INDEX_op_setcond_i32 + : INDEX_op_setcond_i64); + nb_iargs = 2; + } goto do_default; case INDEX_op_add2_i32: case INDEX_op_sub2_i32: - if (temps[args[2]].state == TCG_TEMP_CONST - && temps[args[3]].state == TCG_TEMP_CONST - && temps[args[4]].state == TCG_TEMP_CONST - && temps[args[5]].state == TCG_TEMP_CONST) { - uint32_t al = temps[args[2]].val; - uint32_t ah = temps[args[3]].val; - uint32_t bl = temps[args[4]].val; - uint32_t bh = temps[args[5]].val; + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) + && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { + uint32_t al = arg_info(op->args[2])->val; + uint32_t ah = arg_info(op->args[3])->val; + uint32_t bl = arg_info(op->args[4])->val; + uint32_t bh = arg_info(op->args[5])->val; uint64_t a = ((uint64_t)ah << 32) | al; uint64_t b = ((uint64_t)bh << 32) | bl; TCGArg rl, rh; + TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); - if (op == INDEX_op_add2_i32) { + if (opc == INDEX_op_add2_i32) { a += b; } else { a -= b; } - /* We emit the extra nop when we emit the add2/sub2. */ - assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); - - rl = args[0]; - rh = args[1]; - tcg_opt_gen_movi(s, op_index, &gen_args[0], - op, rl, (uint32_t)a); - tcg_opt_gen_movi(s, ++op_index, &gen_args[2], - op, rh, (uint32_t)(a >> 32)); - gen_args += 4; - args += 6; + rl = op->args[0]; + rh = op->args[1]; + tcg_opt_gen_movi(s, op, rl, (int32_t)a); + tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32)); break; } goto do_default; case INDEX_op_mulu2_i32: - if (temps[args[2]].state == TCG_TEMP_CONST - && temps[args[3]].state == TCG_TEMP_CONST) { - uint32_t a = temps[args[2]].val; - uint32_t b = temps[args[3]].val; + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { + uint32_t a = arg_info(op->args[2])->val; + uint32_t b = arg_info(op->args[3])->val; uint64_t r = (uint64_t)a * b; TCGArg rl, rh; + TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32); - /* We emit the extra nop when we emit the mulu2. */ - assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); - - rl = args[0]; - rh = args[1]; - tcg_opt_gen_movi(s, op_index, &gen_args[0], - op, rl, (uint32_t)r); - tcg_opt_gen_movi(s, ++op_index, &gen_args[2], - op, rh, (uint32_t)(r >> 32)); - gen_args += 4; - args += 4; + rl = op->args[0]; + rh = op->args[1]; + tcg_opt_gen_movi(s, op, rl, (int32_t)r); + tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32)); break; } goto do_default; case INDEX_op_brcond2_i32: - tmp = do_constant_folding_cond2(s, &args[0], &args[2], args[4]); + tmp = do_constant_folding_cond2(&op->args[0], &op->args[2], + op->args[4]); if (tmp != 2) { if (tmp) { do_brcond_true: - reset_all_temps(s, nb_temps); - s->gen_opc_buf[op_index] = INDEX_op_br; - gen_args[0] = args[5]; - gen_args += 1; + bitmap_zero(temps_used.l, nb_temps); + op->opc = INDEX_op_br; + op->args[0] = op->args[5]; } else { do_brcond_false: - s->gen_opc_buf[op_index] = INDEX_op_nop; + tcg_op_remove(s, op); } - } else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE) - && temps[args[2]].state == TCG_TEMP_CONST - && temps[args[3]].state == TCG_TEMP_CONST - && temps[args[2]].val == 0 - && temps[args[3]].val == 0) { + } else if ((op->args[4] == TCG_COND_LT + || op->args[4] == TCG_COND_GE) + && arg_is_const(op->args[2]) + && arg_info(op->args[2])->val == 0 + && arg_is_const(op->args[3]) + && arg_info(op->args[3])->val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_brcond_high: - reset_all_temps(s, nb_temps); - s->gen_opc_buf[op_index] = INDEX_op_brcond_i32; - gen_args[0] = args[1]; - gen_args[1] = args[3]; - gen_args[2] = args[4]; - gen_args[3] = args[5]; - gen_args += 4; - } else if (args[4] == TCG_COND_EQ) { + bitmap_zero(temps_used.l, nb_temps); + op->opc = INDEX_op_brcond_i32; + op->args[0] = op->args[1]; + op->args[1] = op->args[3]; + op->args[2] = op->args[4]; + op->args[3] = op->args[5]; + } else if (op->args[4] == TCG_COND_EQ) { /* Simplify EQ comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, - args[0], args[2], TCG_COND_EQ); + op->args[0], op->args[2], + TCG_COND_EQ); if (tmp == 0) { goto do_brcond_false; } else if (tmp == 1) { goto do_brcond_high; } tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, - args[1], args[3], TCG_COND_EQ); + op->args[1], op->args[3], + TCG_COND_EQ); if (tmp == 0) { goto do_brcond_false; } else if (tmp != 1) { goto do_default; } do_brcond_low: - reset_all_temps(s, nb_temps); - s->gen_opc_buf[op_index] = INDEX_op_brcond_i32; - gen_args[0] = args[0]; - gen_args[1] = args[2]; - gen_args[2] = args[4]; - gen_args[3] = args[5]; - gen_args += 4; - } else if (args[4] == TCG_COND_NE) { + bitmap_zero(temps_used.l, nb_temps); + op->opc = INDEX_op_brcond_i32; + op->args[1] = op->args[2]; + op->args[2] = op->args[4]; + op->args[3] = op->args[5]; + } else if (op->args[4] == TCG_COND_NE) { /* Simplify NE comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, - args[0], args[2], TCG_COND_NE); + op->args[0], op->args[2], + TCG_COND_NE); if (tmp == 0) { goto do_brcond_high; } else if (tmp == 1) { goto do_brcond_true; } tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, - args[1], args[3], TCG_COND_NE); + op->args[1], op->args[3], + TCG_COND_NE); if (tmp == 0) { goto do_brcond_low; } else if (tmp == 1) { @@ -1277,69 +1396,68 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, } else { goto do_default; } - args += 6; break; case INDEX_op_setcond2_i32: - tmp = do_constant_folding_cond2(s, &args[1], &args[3], args[5]); + tmp = do_constant_folding_cond2(&op->args[1], &op->args[3], + op->args[5]); if (tmp != 2) { do_setcond_const: - tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); - gen_args += 2; - } else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE) - && temps[args[3]].state == TCG_TEMP_CONST - && temps[args[4]].state == TCG_TEMP_CONST - && temps[args[3]].val == 0 - && temps[args[4]].val == 0) { + tcg_opt_gen_movi(s, op, op->args[0], tmp); + } else if ((op->args[5] == TCG_COND_LT + || op->args[5] == TCG_COND_GE) + && arg_is_const(op->args[3]) + && arg_info(op->args[3])->val == 0 + && arg_is_const(op->args[4]) + && arg_info(op->args[4])->val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_setcond_high: - s->gen_opc_buf[op_index] = INDEX_op_setcond_i32; - reset_temp(s, args[0]); - temps[args[0]].mask = 1; - gen_args[0] = args[0]; - gen_args[1] = args[2]; - gen_args[2] = args[4]; - gen_args[3] = args[5]; - gen_args += 4; - } else if (args[5] == TCG_COND_EQ) { + reset_temp(op->args[0]); + arg_info(op->args[0])->mask = 1; + op->opc = INDEX_op_setcond_i32; + op->args[1] = op->args[2]; + op->args[2] = op->args[4]; + op->args[3] = op->args[5]; + } else if (op->args[5] == TCG_COND_EQ) { /* Simplify EQ comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, - args[1], args[3], TCG_COND_EQ); + op->args[1], op->args[3], + TCG_COND_EQ); if (tmp == 0) { goto do_setcond_const; } else if (tmp == 1) { goto do_setcond_high; } tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, - args[2], args[4], TCG_COND_EQ); + op->args[2], op->args[4], + TCG_COND_EQ); if (tmp == 0) { goto do_setcond_high; } else if (tmp != 1) { goto do_default; } do_setcond_low: - reset_temp(s, args[0]); - temps[args[0]].mask = 1; - s->gen_opc_buf[op_index] = INDEX_op_setcond_i32; - gen_args[0] = args[0]; - gen_args[1] = args[1]; - gen_args[2] = args[3]; - gen_args[3] = args[5]; - gen_args += 4; - } else if (args[5] == TCG_COND_NE) { + reset_temp(op->args[0]); + arg_info(op->args[0])->mask = 1; + op->opc = INDEX_op_setcond_i32; + op->args[2] = op->args[3]; + op->args[3] = op->args[5]; + } else if (op->args[5] == TCG_COND_NE) { /* Simplify NE comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, - args[1], args[3], TCG_COND_NE); + op->args[1], op->args[3], + TCG_COND_NE); if (tmp == 0) { goto do_setcond_high; } else if (tmp == 1) { goto do_setcond_const; } tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, - args[2], args[4], TCG_COND_NE); + op->args[2], op->args[4], + TCG_COND_NE); if (tmp == 0) { goto do_setcond_low; } else if (tmp == 1) { @@ -1349,14 +1467,15 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, } else { goto do_default; } - args += 6; break; case INDEX_op_call: - if (!(args[nb_oargs + nb_iargs + 1] + if (!(op->args[nb_oargs + nb_iargs + 1] & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { for (i = 0; i < nb_globals; i++) { - reset_temp(s, i); + if (test_bit(i, temps_used.l)) { + reset_ts(&s->temps[i]); + } } } goto do_reset_output; @@ -1369,37 +1488,57 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, block, otherwise we only trash the output args. "mask" is the non-zero bits mask for the first output arg. */ if (def->flags & TCG_OPF_BB_END) { - reset_all_temps(s, nb_temps); + bitmap_zero(temps_used.l, nb_temps); } else { do_reset_output: for (i = 0; i < nb_oargs; i++) { - if (args[i] >= TCG_MAX_TEMPS) { - continue; - } - reset_temp(s, args[i]); + reset_temp(op->args[i]); /* Save the corresponding known-zero bits mask for the first output argument (only one supported so far). */ if (i == 0) { - temps[args[i]].mask = mask; + arg_info(op->args[i])->mask = mask; } } } - for (i = 0; i < nb_args; i++) { - gen_args[i] = args[i]; - } - args += nb_args; - gen_args += nb_args; break; } + + /* Eliminate duplicate and redundant fence instructions. */ + if (prev_mb) { + switch (opc) { + case INDEX_op_mb: + /* Merge two barriers of the same type into one, + * or a weaker barrier into a stronger one, + * or two weaker barriers into a stronger one. + * mb X; mb Y => mb X|Y + * mb; strl => mb; st + * ldaq; mb => ld; mb + * ldaq; strl => ld; mb; st + * Other combinations are also merged into a strong + * barrier. This is stricter than specified but for + * the purposes of TCG is better than not optimizing. + */ + prev_mb->args[0] |= op->args[0]; + tcg_op_remove(s, op); + break; + + default: + /* Opcodes that end the block stop the optimization. */ + if ((def->flags & TCG_OPF_BB_END) == 0) { + break; + } + /* fallthru */ + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + case INDEX_op_call: + /* Opcodes that touch guest memory stop the optimization. */ + prev_mb = NULL; + break; + } + } else if (opc == INDEX_op_mb) { + prev_mb = op; + } } - - return gen_args; -} - -TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, - TCGArg *args, TCGOpDef *tcg_op_defs) -{ - TCGArg *res; - res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs); - return res; } diff --git a/qemu/tcg/ppc/tcg-target.h b/qemu/tcg/ppc/tcg-target.h index 32ac4424..4fa21f0e 100644 --- a/qemu/tcg/ppc/tcg-target.h +++ b/qemu/tcg/ppc/tcg-target.h @@ -21,8 +21,9 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef TCG_TARGET_PPC64 -#define TCG_TARGET_PPC64 1 + +#ifndef PPC_TCG_TARGET_H +#define PPC_TCG_TARGET_H #ifdef _ARCH_PPC64 # define TCG_TARGET_REG_BITS 64 @@ -30,8 +31,9 @@ # define TCG_TARGET_REG_BITS 32 #endif -#define TCG_TARGET_NB_REGS 32 +#define TCG_TARGET_NB_REGS 64 #define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 typedef enum { TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, @@ -43,10 +45,34 @@ typedef enum { TCG_REG_R24, TCG_REG_R25, TCG_REG_R26, TCG_REG_R27, TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31, + TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, + TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, + TCG_REG_CALL_STACK = TCG_REG_R1, TCG_AREG0 = TCG_REG_R27 } TCGReg; +typedef enum { + tcg_isa_base, + tcg_isa_2_06, + tcg_isa_2_07, + tcg_isa_3_00, +} TCGPowerISA; + +extern TCGPowerISA have_isa; +extern bool have_altivec; +extern bool have_vsx; + +#define have_isa_2_06 (have_isa >= tcg_isa_2_06) +#define have_isa_2_07 (have_isa >= tcg_isa_2_07) +#define have_isa_3_00 (have_isa >= tcg_isa_3_00) + /* optional instructions automatically implemented */ #define TCG_TARGET_HAS_ext8u_i32 0 /* andi */ #define TCG_TARGET_HAS_ext16u_i32 0 @@ -66,17 +92,26 @@ typedef enum { #define TCG_TARGET_HAS_eqv_i32 1 #define TCG_TARGET_HAS_nand_i32 1 #define TCG_TARGET_HAS_nor_i32 1 +#define TCG_TARGET_HAS_clz_i32 1 +#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00 +#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06 #define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_extract_i32 1 +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_mulu2_i32 0 #define TCG_TARGET_HAS_muls2_i32 0 #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_add2_i32 0 #define TCG_TARGET_HAS_sub2_i32 0 -#define TCG_TARGET_HAS_trunc_shr_i32 0 +#define TCG_TARGET_HAS_extrl_i64_i32 0 +#define TCG_TARGET_HAS_extrh_i64_i32 0 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 0 #define TCG_TARGET_HAS_rot_i64 1 @@ -96,7 +131,13 @@ typedef enum { #define TCG_TARGET_HAS_eqv_i64 1 #define TCG_TARGET_HAS_nand_i64 1 #define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_clz_i64 1 +#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00 +#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06 #define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_extract_i64 1 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 @@ -106,6 +147,39 @@ typedef enum { #define TCG_TARGET_HAS_mulsh_i64 1 #endif +/* + * While technically Altivec could support V64, it has no 64-bit store + * instruction and substituting two 32-bit stores makes the generated + * code quite large. + */ +#define TCG_TARGET_HAS_v64 have_vsx +#define TCG_TARGET_HAS_v128 have_altivec +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec have_isa_2_07 +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec have_isa_3_00 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_shi_vec 0 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_cmp_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec have_vsx +#define TCG_TARGET_HAS_cmpsel_vec 0 + void flush_icache_range(uintptr_t start, uintptr_t stop); +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); + +#define TCG_TARGET_DEFAULT_MO (0) +#define TCG_TARGET_HAS_MEMORY_BSWAP 1 + +#ifdef CONFIG_SOFTMMU +#define TCG_TARGET_NEED_LDST_LABELS +#endif +#define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/qemu/tcg/ppc/tcg-target.c b/qemu/tcg/ppc/tcg-target.inc.c similarity index 50% rename from qemu/tcg/ppc/tcg-target.c rename to qemu/tcg/ppc/tcg-target.inc.c index cd7aabd4..41a1c511 100644 --- a/qemu/tcg/ppc/tcg-target.c +++ b/qemu/tcg/ppc/tcg-target.inc.c @@ -22,7 +22,8 @@ * THE SOFTWARE. */ -#include "tcg-be-ldst.h" +#include "elf.h" +#include "../tcg-pool.inc.c" #if defined _CALL_DARWIN || defined __APPLE__ #define TCG_TARGET_CALL_DARWIN @@ -41,29 +42,11 @@ # define TCG_REG_TMP1 TCG_REG_R12 #endif -/* For the 64-bit target, we don't like the 5 insn sequence needed to build - full 64-bit addresses. Better to have a base register to which we can - apply a 32-bit displacement. +#define TCG_VEC_TMP1 TCG_REG_V0 +#define TCG_VEC_TMP2 TCG_REG_V1 - There are generally three items of interest: - (1) helper functions in the main executable, - (2) TranslationBlock data structures, - (3) the return address in the epilogue. - - For user-only, we USE_STATIC_CODE_GEN_BUFFER, so the code_gen_buffer - will be inside the main executable, and thus near enough to make a - pointer to the epilogue be within 2GB of all helper functions. - - For softmmu, we'll let the kernel choose the address of code_gen_buffer, - and odds are it'll be somewhere close to the main malloc arena, and so - a pointer to the epilogue will be within 2GB of the TranslationBlocks. - - For --enable-pie, everything will be kinda near everything else, - somewhere in high memory. - - Thus we choose to keep the return address in a call-saved register. */ -#define TCG_REG_RA TCG_REG_R31 -#define USE_REG_RA (TCG_TARGET_REG_BITS == 64) +#define TCG_REG_TB TCG_REG_R31 +#define USE_REG_TB (TCG_TARGET_REG_BITS == 64) /* Shorthand for size of a pointer. Avoid promotion to unsigned. */ #define SZP ((int)sizeof(void *)) @@ -77,58 +60,29 @@ #define TCG_CT_CONST_U32 0x800 #define TCG_CT_CONST_ZERO 0x1000 #define TCG_CT_CONST_MONE 0x2000 +#define TCG_CT_CONST_WSZ 0x4000 static tcg_insn_unit *tb_ret_addr; -#ifndef GUEST_BASE -#define GUEST_BASE 0 -#endif +TCGPowerISA have_isa; +static bool have_isel; +bool have_altivec; +bool have_vsx; -#include "elf.h" -static bool have_isa_2_06; -#define HAVE_ISA_2_06 have_isa_2_06 -#define HAVE_ISEL have_isa_2_06 - -#ifdef CONFIG_USE_GUEST_BASE +#ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG 30 -#else -#define TCG_GUEST_BASE_REG 0 #endif -#ifndef NDEBUG -static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "r0", - "r1", - "r2", - "r3", - "r4", - "r5", - "r6", - "r7", - "r8", - "r9", - "r10", - "r11", - "r12", - "r13", - "r14", - "r15", - "r16", - "r17", - "r18", - "r19", - "r20", - "r21", - "r22", - "r23", - "r24", - "r25", - "r26", - "r27", - "r28", - "r29", - "r30", - "r31" +#ifdef CONFIG_DEBUG_TCG +static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", }; #endif @@ -163,6 +117,26 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_R5, TCG_REG_R4, TCG_REG_R3, + + /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */ + TCG_REG_V2, /* call clobbered, vectors */ + TCG_REG_V3, + TCG_REG_V4, + TCG_REG_V5, + TCG_REG_V6, + TCG_REG_V7, + TCG_REG_V8, + TCG_REG_V9, + TCG_REG_V10, + TCG_REG_V11, + TCG_REG_V12, + TCG_REG_V13, + TCG_REG_V14, + TCG_REG_V15, + TCG_REG_V16, + TCG_REG_V17, + TCG_REG_V18, + TCG_REG_V19, }; static const int tcg_target_call_iarg_regs[] = { @@ -213,75 +187,57 @@ static inline bool in_range_b(tcg_target_long target) static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target) { ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); - assert(in_range_b(disp)); + tcg_debug_assert(in_range_b(disp)); return disp & 0x3fffffc; } -static void reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target) +static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target) { - *pc = (*pc & ~0x3fffffc) | reloc_pc24_val(pc, target); + ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); + if (in_range_b(disp)) { + *pc = (*pc & ~0x3fffffc) | (disp & 0x3fffffc); + return true; + } + return false; } static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target) { ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); - assert(disp == (int16_t) disp); + tcg_debug_assert(disp == (int16_t) disp); return disp & 0xfffc; } -static void reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target) +static bool reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target) { - *pc = (*pc & ~0xfffc) | reloc_pc14_val(pc, target); -} - -static inline void tcg_out_b_noaddr(TCGContext *s, int insn) -{ - unsigned retrans = *s->code_ptr & 0x3fffffc; - tcg_out32(s, insn | retrans); -} - -static inline void tcg_out_bc_noaddr(TCGContext *s, int insn) -{ - unsigned retrans = *s->code_ptr & 0xfffc; - tcg_out32(s, insn | retrans); -} - -static void patch_reloc(tcg_insn_unit *code_ptr, int type, - intptr_t value, intptr_t addend) -{ - tcg_insn_unit *target = (tcg_insn_unit *)value; - - assert(addend == 0); - switch (type) { - case R_PPC_REL14: - reloc_pc14(code_ptr, target); - break; - case R_PPC_REL24: - reloc_pc24(code_ptr, target); - break; - default: - tcg_abort(); + ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); + if (disp == (int16_t) disp) { + *pc = (*pc & ~0xfffc) | (disp & 0xfffc); + return true; } + return false; } /* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) { - const char *ct_str; - - ct_str = *pct_str; - switch (ct_str[0]) { + switch (*ct_str++) { case 'A': case 'B': case 'C': case 'D': ct->ct |= TCG_CT_REG; tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A'); break; case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; + break; + case 'v': + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffff00000000ull; break; case 'L': /* qemu_ld constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); #ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); @@ -290,7 +246,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) break; case 'S': /* qemu_st constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); #ifdef CONFIG_SOFTMMU tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); @@ -313,15 +269,16 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) case 'U': ct->ct |= TCG_CT_CONST_U32; break; + case 'W': + ct->ct |= TCG_CT_CONST_WSZ; + break; case 'Z': ct->ct |= TCG_CT_CONST_ZERO; break; default: - return -1; + return NULL; } - ct_str++; - *pct_str = ct_str; - return 0; + return ct_str; } /* test if a constant matches the constraint */ @@ -351,6 +308,9 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, return 1; } else if ((ct & TCG_CT_CONST_MONE) && val == -1) { return 1; + } else if ((ct & TCG_CT_CONST_WSZ) + && val == (type == TCG_TYPE_I32 ? 32 : 64)) { + return 1; } return 0; } @@ -362,6 +322,7 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define XO31(opc) (OPCD(31)|((opc)<<1)) #define XO58(opc) (OPCD(58)|(opc)) #define XO62(opc) (OPCD(62)|(opc)) +#define VX4(opc) (OPCD(4)|(opc)) #define B OPCD( 18) #define BC OPCD( 16) @@ -369,6 +330,7 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define LHZ OPCD( 40) #define LHA OPCD( 42) #define LWZ OPCD( 32) +#define LWZUX XO31( 55) #define STB OPCD( 38) #define STH OPCD( 44) #define STW OPCD( 36) @@ -380,6 +342,7 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define LD XO58( 0) #define LDX XO31( 21) #define LDU XO58( 1) +#define LDUX XO31( 53) #define LWA XO58( 2) #define LWAX XO31(341) @@ -455,6 +418,10 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define NOR XO31(124) #define CNTLZW XO31( 26) #define CNTLZD XO31( 58) +#define CNTTZW XO31(538) +#define CNTTZD XO31(570) +#define CNTPOPW XO31(378) +#define CNTPOPD XO31(506) #define ANDC XO31( 60) #define ORC XO31(412) #define EQV XO31(284) @@ -475,6 +442,10 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define STHX XO31(407) #define STWX XO31(151) +#define EIEIO XO31(854) +#define HWSYNC XO31(598) +#define LWSYNC (HWSYNC | (1u << 21)) + #define SPR(a, b) ((((a)<<5)|(b))<<11) #define LR SPR(8, 0) #define CTR SPR(9, 0) @@ -493,6 +464,147 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define NOP ORI /* ori 0,0,0 */ +#define LVX XO31(103) +#define LVEBX XO31(7) +#define LVEHX XO31(39) +#define LVEWX XO31(71) +#define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */ +#define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */ +#define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */ +#define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */ +#define LXSD (OPCD(57) | 2) /* v3.00 */ +#define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */ + +#define STVX XO31(231) +#define STVEWX XO31(199) +#define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */ +#define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */ +#define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */ +#define STXSD (OPCD(61) | 2) /* v3.00 */ + +#define VADDSBS VX4(768) +#define VADDUBS VX4(512) +#define VADDUBM VX4(0) +#define VADDSHS VX4(832) +#define VADDUHS VX4(576) +#define VADDUHM VX4(64) +#define VADDSWS VX4(896) +#define VADDUWS VX4(640) +#define VADDUWM VX4(128) +#define VADDUDM VX4(192) /* v2.07 */ + +#define VSUBSBS VX4(1792) +#define VSUBUBS VX4(1536) +#define VSUBUBM VX4(1024) +#define VSUBSHS VX4(1856) +#define VSUBUHS VX4(1600) +#define VSUBUHM VX4(1088) +#define VSUBSWS VX4(1920) +#define VSUBUWS VX4(1664) +#define VSUBUWM VX4(1152) +#define VSUBUDM VX4(1216) /* v2.07 */ + +#define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */ +#define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */ + +#define VMAXSB VX4(258) +#define VMAXSH VX4(322) +#define VMAXSW VX4(386) +#define VMAXSD VX4(450) /* v2.07 */ +#define VMAXUB VX4(2) +#define VMAXUH VX4(66) +#define VMAXUW VX4(130) +#define VMAXUD VX4(194) /* v2.07 */ +#define VMINSB VX4(770) +#define VMINSH VX4(834) +#define VMINSW VX4(898) +#define VMINSD VX4(962) /* v2.07 */ +#define VMINUB VX4(514) +#define VMINUH VX4(578) +#define VMINUW VX4(642) +#define VMINUD VX4(706) /* v2.07 */ + +#define VCMPEQUB VX4(6) +#define VCMPEQUH VX4(70) +#define VCMPEQUW VX4(134) +#define VCMPEQUD VX4(199) /* v2.07 */ +#define VCMPGTSB VX4(774) +#define VCMPGTSH VX4(838) +#define VCMPGTSW VX4(902) +#define VCMPGTSD VX4(967) /* v2.07 */ +#define VCMPGTUB VX4(518) +#define VCMPGTUH VX4(582) +#define VCMPGTUW VX4(646) +#define VCMPGTUD VX4(711) /* v2.07 */ +#define VCMPNEB VX4(7) /* v3.00 */ +#define VCMPNEH VX4(71) /* v3.00 */ +#define VCMPNEW VX4(135) /* v3.00 */ + +#define VSLB VX4(260) +#define VSLH VX4(324) +#define VSLW VX4(388) +#define VSLD VX4(1476) /* v2.07 */ +#define VSRB VX4(516) +#define VSRH VX4(580) +#define VSRW VX4(644) +#define VSRD VX4(1732) /* v2.07 */ +#define VSRAB VX4(772) +#define VSRAH VX4(836) +#define VSRAW VX4(900) +#define VSRAD VX4(964) /* v2.07 */ +#define VRLB VX4(4) +#define VRLH VX4(68) +#define VRLW VX4(132) +#define VRLD VX4(196) /* v2.07 */ + +#define VMULEUB VX4(520) +#define VMULEUH VX4(584) +#define VMULEUW VX4(648) /* v2.07 */ +#define VMULOUB VX4(8) +#define VMULOUH VX4(72) +#define VMULOUW VX4(136) /* v2.07 */ +#define VMULUWM VX4(137) /* v2.07 */ +#define VMSUMUHM VX4(38) + +#define VMRGHB VX4(12) +#define VMRGHH VX4(76) +#define VMRGHW VX4(140) +#define VMRGLB VX4(268) +#define VMRGLH VX4(332) +#define VMRGLW VX4(396) + +#define VPKUHUM VX4(14) +#define VPKUWUM VX4(78) + +#define VAND VX4(1028) +#define VANDC VX4(1092) +#define VNOR VX4(1284) +#define VOR VX4(1156) +#define VXOR VX4(1220) +#define VEQV VX4(1668) /* v2.07 */ +#define VNAND VX4(1412) /* v2.07 */ +#define VORC VX4(1348) /* v2.07 */ + +#define VSPLTB VX4(524) +#define VSPLTH VX4(588) +#define VSPLTW VX4(652) +#define VSPLTISB VX4(780) +#define VSPLTISH VX4(844) +#define VSPLTISW VX4(908) + +#define VSLDOI VX4(44) + +#define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */ +#define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */ +#define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */ + +#define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */ +#define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */ +#define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */ +#define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */ +#define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */ +#define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */ + #define RT(r) ((r)<<21) #define RS(r) ((r)<<21) #define RA(r) ((r)<<16) @@ -505,6 +617,11 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define MB64(b) ((b)<<5) #define FXM(b) (1 << (19 - (b))) +#define VRT(r) (((r) & 31) << 21) +#define VRA(r) (((r) & 31) << 16) +#define VRB(r) (((r) & 31) << 11) +#define VRC(r) (((r) & 31) << 6) + #define LK 1 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b)) @@ -557,21 +674,104 @@ static const uint32_t tcg_to_isel[] = { [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), }; +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + tcg_insn_unit *target; + int16_t lo; + int32_t hi; + + value += addend; + target = (tcg_insn_unit *)value; + + switch (type) { + case R_PPC_REL14: + return reloc_pc14(code_ptr, target); + case R_PPC_REL24: + return reloc_pc24(code_ptr, target); + case R_PPC_ADDR16: + /* + * We are (slightly) abusing this relocation type. In particular, + * assert that the low 2 bits are zero, and do not modify them. + * That way we can use this with LD et al that have opcode bits + * in the low 2 bits of the insn. + */ + if ((value & 3) || value != (int16_t)value) { + return false; + } + *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc); + break; + case R_PPC_ADDR32: + /* + * We are abusing this relocation type. Again, this points to + * a pair of insns, lis + load. This is an absolute address + * relocation for PPC32 so the lis cannot be removed. + */ + lo = value; + hi = value - lo; + if (hi + lo != value) { + return false; + } + code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16); + code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo); + break; + default: + g_assert_not_reached(); + } + return true; +} + static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, TCGReg base, tcg_target_long offset); -static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - if (ret != arg) { - tcg_out32(s, OR | SAB(arg, ret, arg)); + if (ret == arg) { + return true; } + switch (type) { + case TCG_TYPE_I64: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + /* fallthru */ + case TCG_TYPE_I32: + if (ret < TCG_REG_V0) { + if (arg < TCG_REG_V0) { + tcg_out32(s, OR | SAB(arg, ret, arg)); + break; + } else if (have_isa_2_07) { + tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD) + | VRT(arg) | RA(ret)); + break; + } else { + /* Altivec does not support vector->integer moves. */ + return false; + } + } else if (arg < TCG_REG_V0) { + if (have_isa_2_07) { + tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD) + | VRT(ret) | RA(arg)); + break; + } else { + /* Altivec does not support integer->vector moves. */ + return false; + } + } + /* fallthru */ + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0); + tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg)); + break; + default: + g_assert_not_reached(); + } + return true; } static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, int sh, int mb) { - assert(TCG_TARGET_REG_BITS == 64); + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); @@ -608,49 +808,193 @@ static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) tcg_out_rld(s, RLDICL, dst, src, 64 - c, c); } -static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg) +/* Emit a move into ret of arg, if it can be done in one insn. */ +static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg) { - if (arg == (int16_t) arg) { + if (arg == (int16_t)arg) { tcg_out32(s, ADDI | TAI(ret, 0, arg)); - } else { + return true; + } + if (arg == (int32_t)arg && (arg & 0xffff) == 0) { tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); - if (arg & 0xffff) { - tcg_out32(s, ORI | SAI(ret, ret, arg)); + return true; + } + return false; +} + +static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long arg, bool in_prologue) +{ + intptr_t tb_diff; + tcg_target_long tmp; + int shift; + + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + arg = (int32_t)arg; + } + + /* Load 16-bit immediates with one insn. */ + if (tcg_out_movi_one(s, ret, arg)) { + return; + } + + /* Load addresses within the TB with one insn. */ + tb_diff = arg - (intptr_t)s->code_gen_ptr; + if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) { + tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff)); + return; + } + + /* Load 32-bit immediates with two insns. Note that we've already + eliminated bare ADDIS, so we know both insns are required. */ + if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { + tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); + tcg_out32(s, ORI | SAI(ret, ret, arg)); + return; + } + if (arg == (uint32_t)arg && !(arg & 0x8000)) { + tcg_out32(s, ADDI | TAI(ret, 0, arg)); + tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); + return; + } + + /* Load masked 16-bit value. */ + if (arg > 0 && (arg & 0x8000)) { + tmp = arg | 0x7fff; + if ((tmp & (tmp + 1)) == 0) { + int mb = clz64(tmp + 1) + 1; + tcg_out32(s, ADDI | TAI(ret, 0, arg)); + tcg_out_rld(s, RLDICL, ret, ret, 0, mb); + return; } } + + /* Load common masks with 2 insns. */ + shift = ctz64(arg); + tmp = arg >> shift; + if (tmp == (int16_t)tmp) { + tcg_out32(s, ADDI | TAI(ret, 0, tmp)); + tcg_out_shli64(s, ret, ret, shift); + return; + } + shift = clz64(arg); + if (tcg_out_movi_one(s, ret, arg << shift)) { + tcg_out_shri64(s, ret, ret, shift); + return; + } + + /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */ + if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) { + tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff); + return; + } + + /* Use the constant pool, if possible. */ + if (!in_prologue && USE_REG_TB) { + new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr, + -(intptr_t)s->code_gen_ptr); + tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0)); + return; + } + + tmp = arg >> 31 >> 1; + tcg_out_movi(s, TCG_TYPE_I32, ret, tmp); + if (tmp) { + tcg_out_shli64(s, ret, ret, 32); + } + if (arg & 0xffff0000) { + tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); + } + if (arg & 0xffff) { + tcg_out32(s, ORI | SAI(ret, ret, arg)); + } +} + +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long val) +{ + uint32_t load_insn; + int rel, low; + intptr_t add; + + low = (int8_t)val; + if (low >= -16 && low < 16) { + if (val == (tcg_target_long)dup_const(MO_8, low)) { + tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16)); + return; + } + if (val == (tcg_target_long)dup_const(MO_16, low)) { + tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16)); + return; + } + if (val == (tcg_target_long)dup_const(MO_32, low)) { + tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16)); + return; + } + } + if (have_isa_3_00 && val == (tcg_target_long)dup_const(MO_8, val)) { + tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11)); + return; + } + + /* + * Otherwise we must load the value from the constant pool. + */ + if (USE_REG_TB) { + rel = R_PPC_ADDR16; + add = -(intptr_t)s->code_gen_ptr; + } else { + rel = R_PPC_ADDR32; + add = 0; + } + + if (have_vsx) { + load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX; + load_insn |= VRT(ret) | RB(TCG_REG_TMP1); + if (TCG_TARGET_REG_BITS == 64) { + new_pool_label(s, val, rel, s->code_ptr, add); + } else { + new_pool_l2(s, rel, s->code_ptr, add, val, val); + } + } else { + load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); + if (TCG_TARGET_REG_BITS == 64) { + new_pool_l2(s, rel, s->code_ptr, add, val, val); + } else { + new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val); + } + } + + if (USE_REG_TB) { + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0)); + load_insn |= RA(TCG_REG_TB); + } else { + tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0)); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0)); + } + tcg_out32(s, load_insn); } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - if (type == TCG_TYPE_I32 || arg == (int32_t)arg) { - tcg_out_movi32(s, ret, arg); - } else if (arg == (uint32_t)arg && !(arg & 0x8000)) { - tcg_out32(s, ADDI | TAI(ret, 0, arg)); - tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); - } else { - int32_t high; + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + tcg_debug_assert(ret < TCG_REG_V0); + tcg_out_movi_int(s, type, ret, arg, false); + break; - if (USE_REG_RA) { - intptr_t diff = arg - (intptr_t)tb_ret_addr; - if (diff == (int32_t)diff) { - tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_RA, diff); - return; - } - } + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(ret >= TCG_REG_V0); + tcg_out_dupi_vec(s, type, ret, arg); + break; - high = arg >> 31 >> 1; - tcg_out_movi32(s, ret, high); - if (high) { - tcg_out_shli64(s, ret, ret, 32); - } - if (arg & 0xffff0000) { - tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); - } - if (arg & 0xffff) { - tcg_out32(s, ORI | SAI(ret, ret, arg)); - } + default: + g_assert_not_reached(); } } @@ -706,14 +1050,14 @@ static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) { int mb, me; - if ((c & 0xffff) == c) { + if (mask_operand(c, &mb, &me)) { + tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me); + } else if ((c & 0xffff) == c) { tcg_out32(s, ANDI | SAI(src, dst, c)); return; } else if ((c & 0xffff0000) == c) { tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); return; - } else if (mask_operand(c, &mb, &me)) { - tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me); } else { tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c); tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); @@ -724,19 +1068,19 @@ static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) { int mb, me; - assert(TCG_TARGET_REG_BITS == 64); - if ((c & 0xffff) == c) { - tcg_out32(s, ANDI | SAI(src, dst, c)); - return; - } else if ((c & 0xffff0000) == c) { - tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); - return; - } else if (mask64_operand(c, &mb, &me)) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + if (mask64_operand(c, &mb, &me)) { if (mb == 0) { tcg_out_rld(s, RLDICR, dst, src, 0, me); } else { tcg_out_rld(s, RLDICL, dst, src, 0, mb); } + } else if ((c & 0xffff) == c) { + tcg_out32(s, ANDI | SAI(src, dst, c)); + return; + } else if ((c & 0xffff0000) == c) { + tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); + return; } else { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); @@ -782,7 +1126,7 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, TCGReg base, tcg_target_long offset) { tcg_target_long orig = offset, l0, l1, extra = 0, align = 0; - bool is_store = false; + bool is_int_store = false; TCGReg rs = TCG_REG_TMP1; switch (opi) { @@ -790,27 +1134,35 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, align = 3; /* FALLTHRU */ default: - if (rt != TCG_REG_R0) { + if (rt > TCG_REG_R0 && rt < TCG_REG_V0) { rs = rt; break; } break; + case LXSD: + case STXSD: + align = 3; + break; + case LXV: + case STXV: + align = 15; + break; case STD: align = 3; /* FALLTHRU */ case STB: case STH: case STW: - is_store = true; + is_int_store = true; break; } /* For unaligned, or very large offsets, use the indexed form. */ - if (offset & align || offset != (int32_t)offset) { + if (offset & align || offset != (int32_t)offset || opi == 0) { if (rs == base) { rs = TCG_REG_R0; } - tcg_debug_assert(!is_store || rs != rt); + tcg_debug_assert(!is_int_store || rs != rt); tcg_out_movi(s, TCG_TYPE_PTR, rs, orig); - tcg_out32(s, opx | TAB(rt, base, rs)); + tcg_out32(s, opx | TAB(rt & 31, base, rs)); return; } @@ -831,36 +1183,129 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, base = rs; } if (opi != ADDI || base != rt || l0 != 0) { - tcg_out32(s, opi | TAI(rt, base, l0)); + tcg_out32(s, opi | TAI(rt & 31, base, l0)); } } -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, - TCGReg arg1, intptr_t arg2) +static void tcg_out_vsldoi(TCGContext *s, TCGReg ret, + TCGReg va, TCGReg vb, int shb) { - int opi, opx; - - assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - if (type == TCG_TYPE_I32) { - opi = LWZ, opx = LWZX; - } else { - opi = LD, opx = LDX; - } - tcg_out_mem_long(s, opi, opx, ret, arg1, arg2); + tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6)); } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg base, intptr_t offset) { - int opi, opx; + int shift; - assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - if (type == TCG_TYPE_I32) { - opi = STW, opx = STWX; - } else { - opi = STD, opx = STDX; + switch (type) { + case TCG_TYPE_I32: + if (ret < TCG_REG_V0) { + tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset); + break; + } + if (have_isa_2_07 && have_vsx) { + tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset); + break; + } + tcg_debug_assert((offset & 3) == 0); + tcg_out_mem_long(s, 0, LVEWX, ret, base, offset); + shift = (offset - 4) & 0xc; + if (shift) { + tcg_out_vsldoi(s, ret, ret, ret, shift); + } + break; + case TCG_TYPE_I64: + if (ret < TCG_REG_V0) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_mem_long(s, LD, LDX, ret, base, offset); + break; + } + /* fallthru */ + case TCG_TYPE_V64: + tcg_debug_assert(ret >= TCG_REG_V0); + if (have_vsx) { + tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX, + ret, base, offset); + break; + } + tcg_debug_assert((offset & 7) == 0); + tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16); + if (offset & 8) { + tcg_out_vsldoi(s, ret, ret, ret, 8); + } + break; + case TCG_TYPE_V128: + tcg_debug_assert(ret >= TCG_REG_V0); + tcg_debug_assert((offset & 15) == 0); + tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0, + LVX, ret, base, offset); + break; + default: + g_assert_not_reached(); } - tcg_out_mem_long(s, opi, opx, arg, arg1, arg2); +} + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg base, intptr_t offset) +{ + int shift; + + switch (type) { + case TCG_TYPE_I32: + if (arg < TCG_REG_V0) { + tcg_out_mem_long(s, STW, STWX, arg, base, offset); + break; + } + if (have_isa_2_07 && have_vsx) { + tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset); + break; + } + assert((offset & 3) == 0); + tcg_debug_assert((offset & 3) == 0); + shift = (offset - 4) & 0xc; + if (shift) { + tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift); + arg = TCG_VEC_TMP1; + } + tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); + break; + case TCG_TYPE_I64: + if (arg < TCG_REG_V0) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_mem_long(s, STD, STDX, arg, base, offset); + break; + } + /* fallthru */ + case TCG_TYPE_V64: + tcg_debug_assert(arg >= TCG_REG_V0); + if (have_vsx) { + tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0, + STXSDX, arg, base, offset); + break; + } + tcg_debug_assert((offset & 7) == 0); + if (offset & 8) { + tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8); + arg = TCG_VEC_TMP1; + } + tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); + tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4); + break; + case TCG_TYPE_V128: + tcg_debug_assert(arg >= TCG_REG_V0); + tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0, + STVX, arg, base, offset); + break; + default: + g_assert_not_reached(); + } +} + +static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + return false; } static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, @@ -987,7 +1432,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, { int crop, sh; - assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); /* Ignore high bits of a potential constant arg2. */ if (type == TCG_TYPE_I32) { @@ -1027,7 +1472,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, /* If we have ISEL, we can implement everything with 3 or 4 insns. All other cases below are also at least 3 insns, so speed up the code generator by not considering them and always using ISEL. */ - if (HAVE_ISEL) { + if (have_isel) { int isel, tab; tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); @@ -1100,24 +1545,22 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, } } -static void tcg_out_bc(TCGContext *s, int bc, int label_index) +static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l) { - TCGLabel *l = &s->labels[label_index]; - if (l->has_value) { - tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value_ptr)); + bc |= reloc_pc14_val(s->code_ptr, l->u.value_ptr); } else { - tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0); - tcg_out_bc_noaddr(s, bc); + tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0); } + tcg_out32(s, bc); } static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1, TCGArg arg2, int const_arg2, - int label_index, TCGType type) + TCGLabel *l, TCGType type) { tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); - tcg_out_bc(s, tcg_to_bc[cond], label_index); + tcg_out_bc(s, tcg_to_bc[cond], l); } static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, @@ -1132,7 +1575,7 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); - if (HAVE_ISEL) { + if (have_isel) { int isel = tcg_to_isel[cond]; /* Swap the V operands if the operation indicates inversion. */ @@ -1168,6 +1611,32 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, } } +static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, + TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2) +{ + if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) { + tcg_out32(s, opc | RA(a0) | RS(a1)); + } else { + tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type); + /* Note that the only other valid constant for a2 is 0. */ + if (have_isel) { + tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1)); + tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0)); + } else if (!const_a2 && a0 == a2) { + tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8); + tcg_out32(s, opc | RA(a0) | RS(a1)); + } else { + tcg_out32(s, opc | RA(a0) | RS(a1)); + tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8); + if (const_a2) { + tcg_out_movi(s, type, a0, 0); + } else { + tcg_out_mov(s, type, a0, a2); + } + } + } +} + static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, const int *const_args) { @@ -1242,16 +1711,59 @@ static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args, const int *const_args) { tcg_out_cmp2(s, args, const_args); - tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, args[5]); + tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5])); } -void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) +static void tcg_out_mb(TCGContext *s, TCGArg a0) { - TCGContext s; + uint32_t insn = HWSYNC; + a0 &= TCG_MO_ALL; + if (a0 == TCG_MO_LD_LD) { + insn = LWSYNC; + } else if (a0 == TCG_MO_ST_ST) { + insn = EIEIO; + } + tcg_out32(s, insn); +} - s.code_buf = s.code_ptr = (tcg_insn_unit *)jmp_addr; - tcg_out_b(&s, 0, (tcg_insn_unit *)addr); - flush_icache_range(jmp_addr, jmp_addr + tcg_current_code_size(&s)); +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) +{ + if (TCG_TARGET_REG_BITS == 64) { + tcg_insn_unit i1, i2; + intptr_t tb_diff = addr - tc_ptr; + intptr_t br_diff = addr - (jmp_addr + 4); + uint64_t pair; + + /* This does not exercise the range of the branch, but we do + still need to be able to load the new value of TCG_REG_TB. + But this does still happen quite often. */ + if (tb_diff == (int16_t)tb_diff) { + i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff); + i2 = B | (br_diff & 0x3fffffc); + } else { + intptr_t lo = (int16_t)tb_diff; + intptr_t hi = (int32_t)(tb_diff - lo); + assert(tb_diff == hi + lo); + i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16); + i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo); + } +#ifdef HOST_WORDS_BIGENDIAN + pair = (uint64_t)i1 << 32 | i2; +#else + pair = (uint64_t)i2 << 32 | i1; +#endif + + /* As per the enclosing if, this is ppc64. Avoid the _Static_assert + within atomic_set that would fail to build a ppc32 host. */ + atomic_set__nocheck((uint64_t *)jmp_addr, pair); + flush_icache_range(jmp_addr, jmp_addr + 8); + } else { + intptr_t diff = addr - jmp_addr; + tcg_debug_assert(in_range_b(diff)); + atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc)); + flush_icache_range(jmp_addr, jmp_addr + 4); + } } static void tcg_out_call(TCGContext *s, tcg_insn_unit *target) @@ -1333,6 +1845,8 @@ static const uint32_t qemu_exts_opc[4] = { }; #if defined (CONFIG_SOFTMMU) +#include "../tcg-ldst.inc.c" + /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ @@ -1359,82 +1873,106 @@ static void * const qemu_st_helpers[16] = { [MO_BEQ] = helper_be_stq_mmu, }; +/* We expect to use a 16-bit negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); + /* Perform the TLB load and compare. Places the result of the comparison in CR7, loads the addend of the TLB into R3, and returns the register containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ -static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, +static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, TCGReg addrlo, TCGReg addrhi, int mem_index, bool is_read) { +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif int cmp_off = (is_read - ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) - : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); - int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); - TCGReg base = TCG_AREG0; + ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + int fast_off = TLB_MASK_TABLE_OFS(mem_index); + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); + int table_off = fast_off + offsetof(CPUTLBDescFast, table); + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); + + /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off); /* Extract the page index, shifted into place for tlb index. */ - if (TCG_TARGET_REG_BITS == 64) { - if (TARGET_LONG_BITS == 32) { - /* Zero-extend the address into a place helpful for further use. */ - tcg_out_ext32u(s, TCG_REG_R4, addrlo); - addrlo = TCG_REG_R4; + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_shri32(s, TCG_REG_TMP1, addrlo, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + } else { + tcg_out_shri64(s, TCG_REG_TMP1, addrlo, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + } + tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1)); + + /* Load the TLB comparator. */ + if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) { + uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32 + ? LWZUX : LDUX); + tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4)); + } else { + tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4)); + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); } else { - tcg_out_rld(s, RLDICL, TCG_REG_R3, addrlo, - 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS); + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); } } - /* Compensate for very large offsets. */ - if (add_off >= 0x8000) { - /* Most target env are smaller than 32k; none are larger than 64k. - Simplify the logic here merely to offset by 0x7ff0, giving us a - range just shy of 64k. Check this assumption. */ - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, - tlb_table[NB_MMU_MODES - 1][1]) - > 0x7ff0 + 0x7fff); - tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, base, 0x7ff0)); - base = TCG_REG_TMP1; - cmp_off -= 0x7ff0; - add_off -= 0x7ff0; - } - - /* Extraction and shifting, part 2. */ - if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { - tcg_out_rlw(s, RLWINM, TCG_REG_R3, addrlo, - 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), - 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS), - 31 - CPU_TLB_ENTRY_BITS); - } else { - tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS); - } - - tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base)); - - /* Load the tlb comparator. */ - if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); - } else { - tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); - } - /* Load the TLB addend for use on the fast path. Do this asap to minimize any load use delay. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, + offsetof(CPUTLBEntry, addend)); - /* Clear the non-page, non-alignment bits from the address. */ - if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { + /* Clear the non-page, non-alignment bits from the address */ + if (TCG_TARGET_REG_BITS == 32) { + /* We don't support unaligned accesses on 32-bits. + * Preserve the bottom bits and thus trigger a comparison + * failure on unaligned accesses. + */ + if (a_bits < s_bits) { + a_bits = s_bits; + } tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, - (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS); - } else if (!s_bits) { - tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, - 0, 63 - TARGET_PAGE_BITS); + (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); } else { - tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo, - 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits); - tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); + TCGReg t = addrlo; + + /* If the access is unaligned, we need to make sure we fail if we + * cross a page boundary. The trick is to add the access size-1 + * to the address before masking the low bits. That will make the + * address overflow to the next page if we cross a page boundary, + * which will then force a mismatch of the TLB compare. + */ + if (a_bits < s_bits) { + unsigned a_mask = (1 << a_bits) - 1; + unsigned s_mask = (1 << s_bits) - 1; + tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask)); + t = TCG_REG_R0; + } + + /* Mask the address for the requested alignment. */ + if (TARGET_LONG_BITS == 32) { + tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, + (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); + /* Zero-extend the address for use in the final address. */ + tcg_out_ext32u(s, TCG_REG_R4, addrlo); + addrlo = TCG_REG_R4; + } else if (a_bits == 0) { + tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); + } else { + tcg_out_rld(s, RLDICL, TCG_REG_R0, t, + 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); + } } if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { @@ -1453,31 +1991,32 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, TCGReg datalo_reg, TCGReg datahi_reg, TCGReg addrlo_reg, TCGReg addrhi_reg, - int mem_index, tcg_insn_unit *raddr, - tcg_insn_unit *lptr) + tcg_insn_unit *raddr, tcg_insn_unit *lptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; - label->opc = opc; + label->oi = oi; label->datalo_reg = datalo_reg; label->datahi_reg = datahi_reg; label->addrlo_reg = addrlo_reg; label->addrhi_reg = addrhi_reg; - label->mem_index = mem_index; label->raddr = raddr; label->label_ptr[0] = lptr; } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOp opc = lb->opc; + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; - reloc_pc14(lb->label_ptr[0], s->code_ptr); + if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); @@ -1495,10 +2034,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); } - tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index); + tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); tcg_out32(s, MFSPR | RT(arg) | LR); - tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); lo = lb->datalo_reg; hi = lb->datahi_reg; @@ -1513,15 +2052,19 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_b(s, 0, lb->raddr); + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOp opc = lb->opc; - TCGMemOp s_bits = opc & MO_SIZE; + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; - reloc_pc14(lb->label_ptr[0], s->code_ptr); + if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); @@ -1564,20 +2107,22 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } } - tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index); + tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); tcg_out32(s, MFSPR | RT(arg) | LR); - tcg_out_call(s, qemu_st_helpers[opc]); + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tcg_out_b(s, 0, lb->raddr); + return true; } #endif /* SOFTMMU */ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; - TCGReg addrhi QEMU_UNUSED_VAR; - TCGMemOp opc, s_bits; + TCGReg addrhi __attribute__((unused)); + TCGMemOpIdx oi; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; @@ -1587,20 +2132,21 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addrlo = *args++; addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - opc = *args++; + oi = *args++; + opc = get_memop(oi); s_bits = opc & MO_SIZE; #ifdef CONFIG_SOFTMMU - mem_index = *args; - addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, true); + mem_index = get_mmuidx(oi); + addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true); /* Load a pointer into the current opcode w/conditional branch-link. */ label_ptr = s->code_ptr; - tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); + tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; + rbase = guest_base ? TCG_GUEST_BASE_REG : 0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); addrlo = TCG_REG_TMP1; @@ -1624,8 +2170,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); } } else { - uint32_t insn = qemu_ldx_opc[opc]; - if (!HAVE_ISA_2_06 && insn == LDBRX) { + uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; + if (!have_isa_2_06 && insn == LDBRX) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); @@ -1641,16 +2187,17 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } #ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, - mem_index, s->code_ptr, label_ptr); + add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, + s->code_ptr, label_ptr); #endif } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; - TCGReg addrhi QEMU_UNUSED_VAR; - TCGMemOp opc, s_bits; + TCGReg addrhi __attribute__((unused)); + TCGMemOpIdx oi; + MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; tcg_insn_unit *label_ptr; @@ -1660,20 +2207,21 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addrlo = *args++; addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); - opc = *args++; + oi = *args++; + opc = get_memop(oi); s_bits = opc & MO_SIZE; #ifdef CONFIG_SOFTMMU - mem_index = *args; - addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, false); + mem_index = get_mmuidx(oi); + addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false); /* Load a pointer into the current opcode w/conditional branch-link. */ label_ptr = s->code_ptr; - tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); + tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; + rbase = guest_base ? TCG_GUEST_BASE_REG : 0; if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); addrlo = TCG_REG_TMP1; @@ -1694,8 +2242,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) tcg_out32(s, STW | TAI(datalo, addrlo, 4)); } } else { - uint32_t insn = qemu_stx_opc[opc]; - if (!HAVE_ISA_2_06 && insn == STDBRX) { + uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; + if (!have_isa_2_06 && insn == STDBRX) { tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); tcg_out_shri64(s, TCG_REG_R0, datalo, 32); @@ -1706,11 +2254,19 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) } #ifdef CONFIG_SOFTMMU - add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, - mem_index, s->code_ptr, label_ptr); + add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, + s->code_ptr, label_ptr); #endif } +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + int i; + for (i = 0; i < count; ++i) { + p[i] = NOP; + } +} + /* Parameters for function call generation, used in tcg.c. */ #define TCG_TARGET_STACK_ALIGN 16 #define TCG_TARGET_EXTEND_ARGS 1 @@ -1777,45 +2333,22 @@ static void tcg_target_qemu_prologue(TCGContext *s) } tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); -#ifdef CONFIG_USE_GUEST_BASE - if (GUEST_BASE) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); +#ifndef CONFIG_SOFTMMU + if (guest_base) { + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); - - if (USE_REG_RA) { -#ifdef _CALL_AIX - /* Make the caller load the value as the TOC into R2. */ - tb_ret_addr = s->code_ptr + 2; - desc[1] = tb_ret_addr; - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_RA, TCG_REG_R2); - tcg_out32(s, BCCTR | BO_ALWAYS); -#elif defined(_CALL_ELF) && _CALL_ELF == 2 - /* Compute from the incoming R12 value. */ - tb_ret_addr = s->code_ptr + 2; - tcg_out32(s, ADDI | TAI(TCG_REG_RA, TCG_REG_R12, - tcg_ptr_byte_diff(tb_ret_addr, s->code_buf))); - tcg_out32(s, BCCTR | BO_ALWAYS); -#else - /* Reserve max 5 insns for the constant load. */ - tb_ret_addr = s->code_ptr + 6; - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)tb_ret_addr); - tcg_out32(s, BCCTR | BO_ALWAYS); - while (s->code_ptr < tb_ret_addr) { - tcg_out32(s, NOP); - } -#endif - } else { - tcg_out32(s, BCCTR | BO_ALWAYS); - tb_ret_addr = s->code_ptr; + if (USE_REG_TB) { + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); } + tcg_out32(s, BCCTR | BO_ALWAYS); /* Epilogue */ - assert(tb_ret_addr == s->code_ptr); + s->code_gen_epilogue = tb_ret_addr = s->code_ptr; tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { @@ -1835,45 +2368,61 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, switch (opc) { case INDEX_op_exit_tb: - if (USE_REG_RA) { - ptrdiff_t disp = tcg_pcrel_diff(s, tb_ret_addr); - - /* If we can use a direct branch, otherwise use the value in RA. - Note that the direct branch is always forward. If it's in - range now, it'll still be in range after the movi. Don't - bother about the 20 bytes where the test here fails but it - would succeed below. */ - if (!in_range_b(disp)) { - tcg_out32(s, MTSPR | RS(TCG_REG_RA) | CTR); - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); - tcg_out32(s, BCCTR | BO_ALWAYS); - break; - } - } tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); tcg_out_b(s, 0, tb_ret_addr); break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { - /* Direct jump method. */ - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); - s->code_ptr += 7; + if (s->tb_jmp_insn_offset) { + /* Direct jump. */ + if (TCG_TARGET_REG_BITS == 64) { + /* Ensure the next insns are 8-byte aligned. */ + if ((uintptr_t)s->code_ptr & 7) { + tcg_out32(s, NOP); + } + s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); + tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0)); + tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0)); + } else { + s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); + tcg_out32(s, B); + s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); + break; + } } else { - /* Indirect jump method. */ - tcg_abort(); + /* Indirect jump. */ + tcg_debug_assert(s->tb_jmp_insn_offset == NULL); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0, + (intptr_t)(s->tb_jmp_insn_offset + args[0])); } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR); + tcg_out32(s, BCCTR | BO_ALWAYS); + set_jmp_reset_offset(s, args[0]); + if (USE_REG_TB) { + /* For the unlinked case, need to reset TCG_REG_TB. */ + c = -tcg_current_code_size(s); + assert(c == (int16_t)c); + tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c)); + } + break; + case INDEX_op_goto_ptr: + tcg_out32(s, MTSPR | RS(args[0]) | CTR); + if (USE_REG_TB) { + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]); + } + tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0)); + tcg_out32(s, BCCTR | BO_ALWAYS); break; case INDEX_op_br: { - TCGLabel *l = &s->labels[args[0]]; + TCGLabel *l = arg_label(args[0]); + uint32_t insn = B; if (l->has_value) { - tcg_out_b(s, 0, l->u.value_ptr); + insn |= reloc_pc24_val(s->code_ptr, l->u.value_ptr); } else { - tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, args[0], 0); - tcg_out_b_noaddr(s, B); + tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0); } + tcg_out32(s, insn); } break; case INDEX_op_ld8u_i32: @@ -2021,6 +2570,30 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); break; + case INDEX_op_clz_i32: + tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_ctz_i32: + tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_ctpop_i32: + tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0)); + break; + + case INDEX_op_clz_i64: + tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_ctz_i64: + tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_ctpop_i64: + tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0)); + break; + case INDEX_op_mul_i32: a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { @@ -2079,11 +2652,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, case INDEX_op_brcond_i32: tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], - args[3], TCG_TYPE_I32); + arg_label(args[3]), TCG_TYPE_I32); break; case INDEX_op_brcond_i64: tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], - args[3], TCG_TYPE_I64); + arg_label(args[3]), TCG_TYPE_I64); break; case INDEX_op_brcond2_i32: tcg_out_brcond2(s, args, const_args); @@ -2198,12 +2771,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, case INDEX_op_ext16s_i64: c = EXTSH; goto gen_ext; + case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: c = EXTSW; goto gen_ext; gen_ext: tcg_out32(s, c | RS(args[1]) | RA(args[0])); break; + case INDEX_op_extu_i32_i64: + tcg_out_ext32u(s, args[0], args[1]); + break; case INDEX_op_setcond_i32: tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], @@ -2306,6 +2883,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, } break; + case INDEX_op_extract_i32: + tcg_out_rlw(s, RLWINM, args[0], args[1], + 32 - args[2], 32 - args[3], 31); + break; + case INDEX_op_extract_i64: + tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]); + break; + case INDEX_op_movcond_i32: tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], args[3], args[4], const_args[2]); @@ -2379,6 +2964,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); break; + case INDEX_op_mb: + tcg_out_mb(s, args[0]); + break; + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ @@ -2389,168 +2978,806 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, } } -static const TCGTargetOpDef ppc_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, +int tcg_can_emit_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece) +{ + switch (opc) { + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_andc_vec: + case INDEX_op_not_vec: + return 1; + case INDEX_op_orc_vec: + return have_isa_2_07; + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + return vece <= MO_32 || have_isa_2_07; + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: + return vece <= MO_32; + case INDEX_op_cmp_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: + return vece <= MO_32 || have_isa_2_07 ? -1 : 0; + case INDEX_op_neg_vec: + return vece >= MO_32 && have_isa_3_00; + case INDEX_op_mul_vec: + switch (vece) { + case MO_8: + case MO_16: + return -1; + case MO_32: + return have_isa_2_07 ? 1 : -1; + } + return 0; + case INDEX_op_bitsel_vec: + return have_vsx; + default: + return 0; + } +} - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src) +{ + tcg_debug_assert(dst >= TCG_REG_V0); - { INDEX_op_st8_i32, { "r", "r" } }, - { INDEX_op_st16_i32, { "r", "r" } }, - { INDEX_op_st_i32, { "r", "r" } }, + /* Splat from integer reg allowed via constraints for v3.00. */ + if (src < TCG_REG_V0) { + tcg_debug_assert(have_isa_3_00); + switch (vece) { + case MO_64: + tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src)); + return true; + case MO_32: + tcg_out32(s, MTVSRWS | VRT(dst) | RA(src)); + return true; + default: + /* Fail, so that we fall back on either dupm or mov+dup. */ + return false; + } + } - { INDEX_op_add_i32, { "r", "r", "ri" } }, - { INDEX_op_mul_i32, { "r", "r", "rI" } }, - { INDEX_op_div_i32, { "r", "r", "r" } }, - { INDEX_op_divu_i32, { "r", "r", "r" } }, - { INDEX_op_sub_i32, { "r", "rI", "ri" } }, - { INDEX_op_and_i32, { "r", "r", "ri" } }, - { INDEX_op_or_i32, { "r", "r", "ri" } }, - { INDEX_op_xor_i32, { "r", "r", "ri" } }, - { INDEX_op_andc_i32, { "r", "r", "ri" } }, - { INDEX_op_orc_i32, { "r", "r", "ri" } }, - { INDEX_op_eqv_i32, { "r", "r", "ri" } }, - { INDEX_op_nand_i32, { "r", "r", "r" } }, - { INDEX_op_nor_i32, { "r", "r", "r" } }, + /* + * Recall we use (or emulate) VSX integer loads, so the integer is + * right justified within the left (zero-index) double-word. + */ + switch (vece) { + case MO_8: + tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16)); + break; + case MO_16: + tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16)); + break; + case MO_32: + tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16)); + break; + case MO_64: + if (have_vsx) { + tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src)); + break; + } + tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8); + tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8); + break; + default: + g_assert_not_reached(); + } + return true; +} - { INDEX_op_shl_i32, { "r", "r", "ri" } }, - { INDEX_op_shr_i32, { "r", "r", "ri" } }, - { INDEX_op_sar_i32, { "r", "r", "ri" } }, - { INDEX_op_rotl_i32, { "r", "r", "ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "ri" } }, +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg out, TCGReg base, intptr_t offset) +{ + int elt; - { INDEX_op_neg_i32, { "r", "r" } }, - { INDEX_op_not_i32, { "r", "r" } }, - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, - - { INDEX_op_brcond_i32, { "r", "ri" } }, - { INDEX_op_setcond_i32, { "r", "r", "ri" } }, - { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } }, - - { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, - - { INDEX_op_muluh_i32, { "r", "r", "r" } }, - { INDEX_op_mulsh_i32, { "r", "r", "r" } }, - -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, - - { INDEX_op_st8_i64, { "r", "r" } }, - { INDEX_op_st16_i64, { "r", "r" } }, - { INDEX_op_st32_i64, { "r", "r" } }, - { INDEX_op_st_i64, { "r", "r" } }, - - { INDEX_op_add_i64, { "r", "r", "rT" } }, - { INDEX_op_sub_i64, { "r", "rI", "rT" } }, - { INDEX_op_and_i64, { "r", "r", "ri" } }, - { INDEX_op_or_i64, { "r", "r", "rU" } }, - { INDEX_op_xor_i64, { "r", "r", "rU" } }, - { INDEX_op_andc_i64, { "r", "r", "ri" } }, - { INDEX_op_orc_i64, { "r", "r", "r" } }, - { INDEX_op_eqv_i64, { "r", "r", "r" } }, - { INDEX_op_nand_i64, { "r", "r", "r" } }, - { INDEX_op_nor_i64, { "r", "r", "r" } }, - - { INDEX_op_shl_i64, { "r", "r", "ri" } }, - { INDEX_op_shr_i64, { "r", "r", "ri" } }, - { INDEX_op_sar_i64, { "r", "r", "ri" } }, - { INDEX_op_rotl_i64, { "r", "r", "ri" } }, - { INDEX_op_rotr_i64, { "r", "r", "ri" } }, - - { INDEX_op_mul_i64, { "r", "r", "rI" } }, - { INDEX_op_div_i64, { "r", "r", "r" } }, - { INDEX_op_divu_i64, { "r", "r", "r" } }, - - { INDEX_op_neg_i64, { "r", "r" } }, - { INDEX_op_not_i64, { "r", "r" } }, - { INDEX_op_ext8s_i64, { "r", "r" } }, - { INDEX_op_ext16s_i64, { "r", "r" } }, - { INDEX_op_ext32s_i64, { "r", "r" } }, - { INDEX_op_bswap16_i64, { "r", "r" } }, - { INDEX_op_bswap32_i64, { "r", "r" } }, - { INDEX_op_bswap64_i64, { "r", "r" } }, - - { INDEX_op_brcond_i64, { "r", "ri" } }, - { INDEX_op_setcond_i64, { "r", "r", "ri" } }, - { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } }, - - { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, - - { INDEX_op_mulsh_i64, { "r", "r", "r" } }, - { INDEX_op_muluh_i64, { "r", "r", "r" } }, + tcg_debug_assert(out >= TCG_REG_V0); + switch (vece) { + case MO_8: + if (have_isa_3_00) { + tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16); + } else { + tcg_out_mem_long(s, 0, LVEBX, out, base, offset); + } + elt = extract32(offset, 0, 4); +#ifndef HOST_WORDS_BIGENDIAN + elt ^= 15; #endif - -#if TCG_TARGET_REG_BITS == 32 - { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, - { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, + tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16)); + break; + case MO_16: + tcg_debug_assert((offset & 1) == 0); + if (have_isa_3_00) { + tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16); + } else { + tcg_out_mem_long(s, 0, LVEHX, out, base, offset); + } + elt = extract32(offset, 1, 3); +#ifndef HOST_WORDS_BIGENDIAN + elt ^= 7; #endif - -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } }, - { INDEX_op_sub2_i64, { "r", "r", "rI", "rZM", "r", "r" } }, -#else - { INDEX_op_add2_i32, { "r", "r", "r", "r", "rI", "rZM" } }, - { INDEX_op_sub2_i32, { "r", "r", "rI", "rZM", "r", "r" } }, + tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16)); + break; + case MO_32: + if (have_isa_3_00) { + tcg_out_mem_long(s, 0, LXVWSX, out, base, offset); + break; + } + tcg_debug_assert((offset & 3) == 0); + tcg_out_mem_long(s, 0, LVEWX, out, base, offset); + elt = extract32(offset, 2, 2); +#ifndef HOST_WORDS_BIGENDIAN + elt ^= 3; #endif - -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "S", "S" } }, - { INDEX_op_qemu_ld_i64, { "r", "L" } }, - { INDEX_op_qemu_st_i64, { "S", "S" } }, -#elif TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "S", "S" } }, - { INDEX_op_qemu_ld_i64, { "L", "L", "L" } }, - { INDEX_op_qemu_st_i64, { "S", "S", "S" } }, -#else - { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, - { INDEX_op_qemu_st_i32, { "S", "S", "S" } }, - { INDEX_op_qemu_ld_i64, { "L", "L", "L", "L" } }, - { INDEX_op_qemu_st_i64, { "S", "S", "S", "S" } }, + tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16)); + break; + case MO_64: + if (have_vsx) { + tcg_out_mem_long(s, 0, LXVDSX, out, base, offset); + break; + } + tcg_debug_assert((offset & 7) == 0); + tcg_out_mem_long(s, 0, LVX, out, base, offset & -16); + tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8); + elt = extract32(offset, 3, 1); +#ifndef HOST_WORDS_BIGENDIAN + elt = !elt; #endif + if (elt) { + tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8); + } else { + tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8); + } + break; + default: + g_assert_not_reached(); + } + return true; +} - { -1 }, -}; +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg *args, const int *const_args) +{ + static const uint32_t + add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM }, + sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM }, + neg_op[4] = { 0, 0, VNEGW, VNEGD }, + eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD }, + ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 }, + gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD }, + gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD }, + ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 }, + usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 }, + sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 }, + ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 }, + umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD }, + smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD }, + umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD }, + smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD }, + shlv_op[4] = { VSLB, VSLH, VSLW, VSLD }, + shrv_op[4] = { VSRB, VSRH, VSRW, VSRD }, + sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD }, + mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 }, + mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 }, + muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 }, + mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 }, + pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 }, + rotl_op[4] = { VRLB, VRLH, VRLW, VRLD }; + + TCGType type = vecl + TCG_TYPE_V64; + TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; + uint32_t insn; + + switch (opc) { + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); + return; + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); + return; + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); + return; + + case INDEX_op_add_vec: + insn = add_op[vece]; + break; + case INDEX_op_sub_vec: + insn = sub_op[vece]; + break; + case INDEX_op_neg_vec: + insn = neg_op[vece]; + a2 = a1; + a1 = 0; + break; + case INDEX_op_mul_vec: + tcg_debug_assert(vece == MO_32 && have_isa_2_07); + insn = VMULUWM; + break; + case INDEX_op_ssadd_vec: + insn = ssadd_op[vece]; + break; + case INDEX_op_sssub_vec: + insn = sssub_op[vece]; + break; + case INDEX_op_usadd_vec: + insn = usadd_op[vece]; + break; + case INDEX_op_ussub_vec: + insn = ussub_op[vece]; + break; + case INDEX_op_smin_vec: + insn = smin_op[vece]; + break; + case INDEX_op_umin_vec: + insn = umin_op[vece]; + break; + case INDEX_op_smax_vec: + insn = smax_op[vece]; + break; + case INDEX_op_umax_vec: + insn = umax_op[vece]; + break; + case INDEX_op_shlv_vec: + insn = shlv_op[vece]; + break; + case INDEX_op_shrv_vec: + insn = shrv_op[vece]; + break; + case INDEX_op_sarv_vec: + insn = sarv_op[vece]; + break; + case INDEX_op_and_vec: + insn = VAND; + break; + case INDEX_op_or_vec: + insn = VOR; + break; + case INDEX_op_xor_vec: + insn = VXOR; + break; + case INDEX_op_andc_vec: + insn = VANDC; + break; + case INDEX_op_not_vec: + insn = VNOR; + a2 = a1; + break; + case INDEX_op_orc_vec: + insn = VORC; + break; + + case INDEX_op_cmp_vec: + switch (args[3]) { + case TCG_COND_EQ: + insn = eq_op[vece]; + break; + case TCG_COND_NE: + insn = ne_op[vece]; + break; + case TCG_COND_GT: + insn = gts_op[vece]; + break; + case TCG_COND_GTU: + insn = gtu_op[vece]; + break; + default: + g_assert_not_reached(); + } + break; + + case INDEX_op_bitsel_vec: + tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3])); + return; + + case INDEX_op_dup2_vec: + assert(TCG_TARGET_REG_BITS == 32); + /* With inputs a1 = xLxx, a2 = xHxx */ + tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */ + tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */ + tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */ + return; + + case INDEX_op_ppc_mrgh_vec: + insn = mrgh_op[vece]; + break; + case INDEX_op_ppc_mrgl_vec: + insn = mrgl_op[vece]; + break; + case INDEX_op_ppc_muleu_vec: + insn = muleu_op[vece]; + break; + case INDEX_op_ppc_mulou_vec: + insn = mulou_op[vece]; + break; + case INDEX_op_ppc_pkum_vec: + insn = pkum_op[vece]; + break; + case INDEX_op_ppc_rotl_vec: + insn = rotl_op[vece]; + break; + case INDEX_op_ppc_msum_vec: + tcg_debug_assert(vece == MO_16); + tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3])); + return; + + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } + + tcg_debug_assert(insn != 0); + tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2)); +} + +static void expand_vec_shi(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGArg imm, TCGOpcode opci) +{ + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + + /* Splat w/bytes for xxspltib. */ + tcg_gen_dupi_vec(tcg_ctx, MO_8, t1, imm & ((8 << vece) - 1)); + vec_gen_3(tcg_ctx, opci, type, vece, tcgv_vec_arg(tcg_ctx, v0), + tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t1)); + tcg_temp_free_vec(tcg_ctx, t1); +} + +static void expand_vec_cmp(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + bool need_swap = false, need_inv = false; + + tcg_debug_assert(vece <= MO_32 || have_isa_2_07); + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_GT: + case TCG_COND_GTU: + break; + case TCG_COND_NE: + if (have_isa_3_00 && vece <= MO_32) { + break; + } + /* fall through */ + case TCG_COND_LE: + case TCG_COND_LEU: + need_inv = true; + break; + case TCG_COND_LT: + case TCG_COND_LTU: + need_swap = true; + break; + case TCG_COND_GE: + case TCG_COND_GEU: + need_swap = need_inv = true; + break; + default: + g_assert_not_reached(); + } + + if (need_inv) { + cond = tcg_invert_cond(cond); + } + if (need_swap) { + TCGv_vec t1; + t1 = v1, v1 = v2, v2 = t1; + cond = tcg_swap_cond(cond); + } + + vec_gen_4(tcg_ctx, INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(tcg_ctx, v0), + tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2), cond); + + if (need_inv) { + tcg_gen_not_vec(tcg_ctx, vece, v0, v0); + } +} + +static void expand_vec_mul(TCGContext *tcg_ctx, TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2) +{ + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t3, t4; + + switch (vece) { + case MO_8: + case MO_16: + vec_gen_3(tcg_ctx, INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(tcg_ctx, t1), + tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2)); + vec_gen_3(tcg_ctx, INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(tcg_ctx, t2), + tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2)); + vec_gen_3(tcg_ctx, INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(tcg_ctx, v0), + tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); + vec_gen_3(tcg_ctx, INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(tcg_ctx, t1), + tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t2)); + vec_gen_3(tcg_ctx, INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(tcg_ctx, v0), + tcgv_vec_arg(tcg_ctx, v0), tcgv_vec_arg(tcg_ctx, t1)); + break; + + case MO_32: + tcg_debug_assert(!have_isa_2_07); + t3 = tcg_temp_new_vec(tcg_ctx, type); + t4 = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_dupi_vec(tcg_ctx, MO_8, t4, -16); + vec_gen_3(tcg_ctx, INDEX_op_ppc_rotl_vec, type, MO_32, tcgv_vec_arg(tcg_ctx, t1), + tcgv_vec_arg(tcg_ctx, v2), tcgv_vec_arg(tcg_ctx, t4)); + vec_gen_3(tcg_ctx, INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(tcg_ctx, t2), + tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, v2)); + tcg_gen_dupi_vec(tcg_ctx, MO_8, t3, 0); + vec_gen_4(tcg_ctx, INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(tcg_ctx, t3), + tcgv_vec_arg(tcg_ctx, v1), tcgv_vec_arg(tcg_ctx, t1), tcgv_vec_arg(tcg_ctx, t3)); + vec_gen_3(tcg_ctx, INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(tcg_ctx, t3), + tcgv_vec_arg(tcg_ctx, t3), tcgv_vec_arg(tcg_ctx, t4)); + tcg_gen_add_vec(tcg_ctx, MO_32, v0, t2, t3); + tcg_temp_free_vec(tcg_ctx, t3); + tcg_temp_free_vec(tcg_ctx, t4); + break; + + default: + g_assert_not_reached(); + } + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t2); +} + +void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + va_list va; + TCGv_vec v0, v1, v2; + TCGArg a2; + + va_start(va, a0); + v0 = temp_tcgv_vec(tcg_ctx, arg_temp(a0)); + v1 = temp_tcgv_vec(tcg_ctx, arg_temp(va_arg(va, TCGArg))); + a2 = va_arg(va, TCGArg); + + switch (opc) { + case INDEX_op_shli_vec: + expand_vec_shi(tcg_ctx, type, vece, v0, v1, a2, INDEX_op_shlv_vec); + break; + case INDEX_op_shri_vec: + expand_vec_shi(tcg_ctx, type, vece, v0, v1, a2, INDEX_op_shrv_vec); + break; + case INDEX_op_sari_vec: + expand_vec_shi(tcg_ctx, type, vece, v0, v1, a2, INDEX_op_sarv_vec); + break; + case INDEX_op_cmp_vec: + v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); + expand_vec_cmp(tcg_ctx, type, vece, v0, v1, v2, va_arg(va, TCGArg)); + break; + case INDEX_op_mul_vec: + v2 = temp_tcgv_vec(tcg_ctx, arg_temp(a2)); + expand_vec_mul(tcg_ctx, type, vece, v0, v1, v2); + break; + default: + g_assert_not_reached(); + } + va_end(va); +} + +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; + static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } }; + static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; + static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; + static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; + static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } }; + static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; + static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } }; + static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } }; + static const TCGTargetOpDef r_rI_ri + = { .args_ct_str = { "r", "rI", "ri" } }; + static const TCGTargetOpDef r_rI_rT + = { .args_ct_str = { "r", "rI", "rT" } }; + static const TCGTargetOpDef r_r_rZW + = { .args_ct_str = { "r", "r", "rZW" } }; + static const TCGTargetOpDef L_L_L_L + = { .args_ct_str = { "L", "L", "L", "L" } }; + static const TCGTargetOpDef S_S_S_S + = { .args_ct_str = { "S", "S", "S", "S" } }; + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } }; + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "0", "rZ" } }; + static const TCGTargetOpDef br2 + = { .args_ct_str = { "r", "r", "ri", "ri" } }; + static const TCGTargetOpDef setc2 + = { .args_ct_str = { "r", "r", "r", "ri", "ri" } }; + static const TCGTargetOpDef add2 + = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } }; + static const TCGTargetOpDef sub2 + = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } }; + static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } }; + static const TCGTargetOpDef v_vr = { .args_ct_str = { "v", "vr" } }; + static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } }; + static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } }; + static const TCGTargetOpDef v_v_v_v + = { .args_ct_str = { "v", "v", "v", "v" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_ctpop_i32: + case INDEX_op_neg_i32: + case INDEX_op_not_i32: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_extract_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + case INDEX_op_ctpop_i64: + case INDEX_op_neg_i64: + case INDEX_op_not_i64: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_extract_i64: + return &r_r; + + case INDEX_op_add_i32: + case INDEX_op_and_i32: + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + case INDEX_op_andc_i32: + case INDEX_op_orc_i32: + case INDEX_op_eqv_i32: + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + case INDEX_op_setcond_i32: + case INDEX_op_and_i64: + case INDEX_op_andc_i64: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + case INDEX_op_setcond_i64: + return &r_r_ri; + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + return &r_r_rI; + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + case INDEX_op_nand_i32: + case INDEX_op_nor_i32: + case INDEX_op_muluh_i32: + case INDEX_op_mulsh_i32: + case INDEX_op_orc_i64: + case INDEX_op_eqv_i64: + case INDEX_op_nand_i64: + case INDEX_op_nor_i64: + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_mulsh_i64: + case INDEX_op_muluh_i64: + return &r_r_r; + case INDEX_op_sub_i32: + return &r_rI_ri; + case INDEX_op_add_i64: + return &r_r_rT; + case INDEX_op_or_i64: + case INDEX_op_xor_i64: + return &r_r_rU; + case INDEX_op_sub_i64: + return &r_rI_rT; + case INDEX_op_clz_i32: + case INDEX_op_ctz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i64: + return &r_r_rZW; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &r_ri; + + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return &movc; + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + return &dep; + case INDEX_op_brcond2_i32: + return &br2; + case INDEX_op_setcond2_i32: + return &setc2; + case INDEX_op_add2_i64: + case INDEX_op_add2_i32: + return &add2; + case INDEX_op_sub2_i64: + case INDEX_op_sub2_i32: + return &sub2; + + case INDEX_op_qemu_ld_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &r_L : &r_L_L); + case INDEX_op_qemu_st_i32: + return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32 + ? &S_S : &S_S_S); + case INDEX_op_qemu_ld_i64: + return (TCG_TARGET_REG_BITS == 64 ? &r_L + : TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L); + case INDEX_op_qemu_st_i64: + return (TCG_TARGET_REG_BITS == 64 ? &S_S + : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S); + + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_mul_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_andc_vec: + case INDEX_op_orc_vec: + case INDEX_op_cmp_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_ppc_mrgh_vec: + case INDEX_op_ppc_mrgl_vec: + case INDEX_op_ppc_muleu_vec: + case INDEX_op_ppc_mulou_vec: + case INDEX_op_ppc_pkum_vec: + case INDEX_op_ppc_rotl_vec: + case INDEX_op_dup2_vec: + return &v_v_v; + case INDEX_op_not_vec: + case INDEX_op_neg_vec: + return &v_v; + case INDEX_op_dup_vec: + return have_isa_3_00 ? &v_vr : &v_v; + case INDEX_op_ld_vec: + case INDEX_op_st_vec: + case INDEX_op_dupm_vec: + return &v_r; + case INDEX_op_bitsel_vec: + case INDEX_op_ppc_msum_vec: + return &v_v_v_v; + + default: + return NULL; + } +} + +static size_t dsize = 0; +static size_t isize = 0; static void tcg_target_init(TCGContext *s) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); + unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2); + + dsize = qemu_getauxval(AT_ICACHEBSIZE); + isize = qemu_getauxval(AT_DCACHEBSIZE); + + have_isa = tcg_isa_base; if (hwcap & PPC_FEATURE_ARCH_2_06) { - have_isa_2_06 = true; + have_isa = tcg_isa_2_06; + } +#ifdef PPC_FEATURE2_ARCH_2_07 + if (hwcap2 & PPC_FEATURE2_ARCH_2_07) { + have_isa = tcg_isa_2_07; + } +#endif +#ifdef PPC_FEATURE2_ARCH_3_00 + if (hwcap2 & PPC_FEATURE2_ARCH_3_00) { + have_isa = tcg_isa_3_00; + } +#endif + +#ifdef PPC_FEATURE2_HAS_ISEL + /* Prefer explicit instruction from the kernel. */ + have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0; +#else + /* Fall back to knowing Power7 (2.06) has ISEL. */ + have_isel = have_isa_2_06; +#endif + + if (hwcap & PPC_FEATURE_HAS_ALTIVEC) { + have_altivec = true; + /* We only care about the portion of VSX that overlaps Altivec. */ + if (hwcap & PPC_FEATURE_HAS_VSX) { + have_vsx = true; + } } - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); - tcg_regset_set32(s->tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_R0) | - (1 << TCG_REG_R2) | - (1 << TCG_REG_R3) | - (1 << TCG_REG_R4) | - (1 << TCG_REG_R5) | - (1 << TCG_REG_R6) | - (1 << TCG_REG_R7) | - (1 << TCG_REG_R8) | - (1 << TCG_REG_R9) | - (1 << TCG_REG_R10) | - (1 << TCG_REG_R11) | - (1 << TCG_REG_R12)); + s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; + s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; + if (have_altivec) { + s->tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; + s->tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; + } - tcg_regset_clear(s->reserved_regs); + s->tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R4); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R5); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R6); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R7); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R12); + + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V4); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V5); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V6); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V7); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V8); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V9); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V10); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V11); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V12); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V13); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V14); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V15); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V16); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V17); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V18); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_V19); + + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ #if defined(_CALL_SYSV) @@ -2560,29 +3787,72 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ #endif tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */ - if (USE_REG_RA) { - tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return addr */ + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1); + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2); + if (USE_REG_TB) { + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */ } - - tcg_add_target_add_op_defs(s, ppc_op_defs); } #ifdef __ELF__ +typedef struct { + DebugFrameCIE cie; + DebugFrameFDEHeader fde; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3]; +} DebugFrame; + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + #if TCG_TARGET_REG_BITS == 64 # define ELF_HOST_MACHINE EM_PPC64 #else # define ELF_HOST_MACHINE EM_PPC #endif -#endif /* __ELF__ */ -static size_t dcache_bsize = 16; -static size_t icache_bsize = 16; +static DebugFrame debug_frame = { + .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .cie.id = -1, + .cie.version = 1, + .cie.code_align = 1, + .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */ + .cie.return_column = 65, + + /* Total FDE size does not include the "len" member. */ + .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */ + 0x11, 65, (LR_OFFSET / -SZR) & 0x7f, + } +}; + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ + uint8_t *p = &debug_frame.fde_reg_ofs[3]; + int i; + + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) { + p[0] = 0x80 + tcg_target_callee_save_regs[i]; + p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR; + } + + debug_frame.fde.func_start = (uintptr_t)buf; + debug_frame.fde.func_len = buf_size; + + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} +#endif /* __ELF__ */ void flush_icache_range(uintptr_t start, uintptr_t stop) { uintptr_t p, start1, stop1; - size_t dsize = dcache_bsize; - size_t isize = icache_bsize; start1 = start & ~(dsize - 1); stop1 = (stop + dsize - 1) & ~(dsize - 1); @@ -2599,74 +3869,3 @@ void flush_icache_range(uintptr_t start, uintptr_t stop) asm volatile ("sync" : : : "memory"); asm volatile ("isync" : : : "memory"); } - -#if defined _AIX -#include - -INITIALIZER(tcg_cache_init) -{ - icache_bsize = _system_configuration.icache_line; - dcache_bsize = _system_configuration.dcache_line; -} - -#elif defined __linux__ -INITIALIZER(tcg_cache_init) -{ - unsigned long dsize = qemu_getauxval(AT_DCACHEBSIZE); - unsigned long isize = qemu_getauxval(AT_ICACHEBSIZE); - - if (dsize == 0 || isize == 0) { - if (dsize == 0) { - fprintf(stderr, "getauxval AT_DCACHEBSIZE failed\n"); - } - if (isize == 0) { - fprintf(stderr, "getauxval AT_ICACHEBSIZE failed\n"); - } - exit(1); - } - dcache_bsize = dsize; - icache_bsize = isize; -} - -#elif defined __APPLE__ -#include -#include -#include - -INITIALIZER(tcg_cache_init) -{ - size_t len; - unsigned cacheline; - int name[2] = { CTL_HW, HW_CACHELINE }; - - len = sizeof(cacheline); - if (sysctl(name, 2, &cacheline, &len, NULL, 0)) { - perror("sysctl CTL_HW HW_CACHELINE failed"); - exit(1); - } - dcache_bsize = cacheline; - icache_bsize = cacheline; -} - -#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include -#include -#include -#include -#include -#include - -INITIALIZER(tcg_cache_init) -{ - size_t len = 4; - unsigned cacheline; - - if (sysctlbyname ("machdep.cacheline_size", &cacheline, &len, NULL, 0)) { - fprintf(stderr, "sysctlbyname machdep.cacheline_size failed: %s\n", - strerror(errno)); - exit(1); - } - dcache_bsize = cacheline; - icache_bsize = cacheline; -} -#endif diff --git a/qemu/tcg/ppc/tcg-target.opc.h b/qemu/tcg/ppc/tcg-target.opc.h new file mode 100644 index 00000000..1373f77e --- /dev/null +++ b/qemu/tcg/ppc/tcg-target.opc.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019 Linaro Limited + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(ppc_mrgh_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_mrgl_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_msum_vec, 1, 3, 0, IMPLVEC) +DEF(ppc_muleu_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_mulou_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_pkum_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_rotl_vec, 1, 2, 0, IMPLVEC) diff --git a/qemu/tcg/riscv/tcg-target.h b/qemu/tcg/riscv/tcg-target.h new file mode 100644 index 00000000..032439d8 --- /dev/null +++ b/qemu/tcg/riscv/tcg-target.h @@ -0,0 +1,179 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2018 SiFive, Inc + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef RISCV_TCG_TARGET_H +#define RISCV_TCG_TARGET_H + +#if __riscv_xlen == 32 +# define TCG_TARGET_REG_BITS 32 +#elif __riscv_xlen == 64 +# define TCG_TARGET_REG_BITS 64 +#endif + +#define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 20 +#define TCG_TARGET_NB_REGS 32 + +typedef enum { + TCG_REG_ZERO, + TCG_REG_RA, + TCG_REG_SP, + TCG_REG_GP, + TCG_REG_TP, + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_S0, + TCG_REG_S1, + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_A4, + TCG_REG_A5, + TCG_REG_A6, + TCG_REG_A7, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_S9, + TCG_REG_S10, + TCG_REG_S11, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + + /* aliases */ + TCG_AREG0 = TCG_REG_S0, + TCG_GUEST_BASE_REG = TCG_REG_S1, + TCG_REG_TMP0 = TCG_REG_T6, + TCG_REG_TMP1 = TCG_REG_T5, + TCG_REG_TMP2 = TCG_REG_T4, +} TCGReg; + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_ALIGN_ARGS 1 +#define TCG_TARGET_CALL_STACK_OFFSET 0 + +/* optional instructions */ +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_movcond_i32 0 +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_rem_i32 1 +#define TCG_TARGET_HAS_div2_i32 0 +#define TCG_TARGET_HAS_rot_i32 0 +#define TCG_TARGET_HAS_deposit_i32 0 +#define TCG_TARGET_HAS_extract_i32 0 +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 (TCG_TARGET_REG_BITS == 32) +#define TCG_TARGET_HAS_mulsh_i32 (TCG_TARGET_REG_BITS == 32) +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 0 +#define TCG_TARGET_HAS_bswap32_i32 0 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_clz_i32 0 +#define TCG_TARGET_HAS_ctz_i32 0 +#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_direct_jump 0 +#define TCG_TARGET_HAS_brcond2 1 +#define TCG_TARGET_HAS_setcond2 1 + +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_movcond_i64 0 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 1 +#define TCG_TARGET_HAS_div2_i64 0 +#define TCG_TARGET_HAS_rot_i64 0 +#define TCG_TARGET_HAS_deposit_i64 0 +#define TCG_TARGET_HAS_extract_i64 0 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 +#define TCG_TARGET_HAS_extrl_i64_i32 1 +#define TCG_TARGET_HAS_extrh_i64_i32 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 0 +#define TCG_TARGET_HAS_bswap32_i64 0 +#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_clz_i64 0 +#define TCG_TARGET_HAS_ctz_i64 0 +#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 +#endif + +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + __builtin___clear_cache((char *)start, (char *)stop); +} + +/* not defined -- call should be eliminated at compile time */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); + +#define TCG_TARGET_DEFAULT_MO (0) + +#ifdef CONFIG_SOFTMMU +#define TCG_TARGET_NEED_LDST_LABELS +#endif +#define TCG_TARGET_NEED_POOL_LABELS + +#define TCG_TARGET_HAS_MEMORY_BSWAP 0 + +#endif diff --git a/qemu/tcg/riscv/tcg-target.inc.c b/qemu/tcg/riscv/tcg-target.inc.c new file mode 100644 index 00000000..2a5d3347 --- /dev/null +++ b/qemu/tcg/riscv/tcg-target.inc.c @@ -0,0 +1,1920 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2018 SiFive, Inc + * Copyright (c) 2008-2009 Arnaud Patard + * Copyright (c) 2009 Aurelien Jarno + * Copyright (c) 2008 Fabrice Bellard + * + * Based on i386/tcg-target.c and mips/tcg-target.c + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "../tcg-pool.inc.c" + +#ifdef CONFIG_DEBUG_TCG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "zero", + "ra", + "sp", + "gp", + "tp", + "t0", + "t1", + "t2", + "s0", + "s1", + "a0", + "a1", + "a2", + "a3", + "a4", + "a5", + "a6", + "a7", + "s2", + "s3", + "s4", + "s5", + "s6", + "s7", + "s8", + "s9", + "s10", + "s11", + "t3", + "t4", + "t5", + "t6" +}; +#endif + +static const int tcg_target_reg_alloc_order[] = { + /* Call saved registers */ + /* TCG_REG_S0 reservered for TCG_AREG0 */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_S9, + TCG_REG_S10, + TCG_REG_S11, + + /* Call clobbered registers */ + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + + /* Argument registers */ + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_A4, + TCG_REG_A5, + TCG_REG_A6, + TCG_REG_A7, +}; + +static const int tcg_target_call_iarg_regs[] = { + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_A4, + TCG_REG_A5, + TCG_REG_A6, + TCG_REG_A7, +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_A0, + TCG_REG_A1, +}; + +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_S12 0x200 +#define TCG_CT_CONST_N12 0x400 +#define TCG_CT_CONST_M12 0x800 + +static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) +{ + if (TCG_TARGET_REG_BITS == 32) { + return sextract32(val, pos, len); + } else { + return sextract64(val, pos, len); + } +} + +/* parse target specific constraints */ +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) +{ + switch (*ct_str++) { + case 'r': + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffff; + break; + case 'L': + /* qemu_ld/qemu_st constraint */ + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffff; + /* qemu_ld/qemu_st uses TCG_REG_TMP0 */ +#if defined(CONFIG_SOFTMMU) + tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[0]); + tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[1]); + tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[2]); + tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[3]); + tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[4]); +#endif + break; + case 'I': + ct->ct |= TCG_CT_CONST_S12; + break; + case 'N': + ct->ct |= TCG_CT_CONST_N12; + break; + case 'M': + ct->ct |= TCG_CT_CONST_M12; + break; + case 'Z': + /* we can use a zero immediate as a zero register argument. */ + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return NULL; + } + return ct_str; +} + +/* test if a constant matches the constraint */ +static int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + if (ct & TCG_CT_CONST) { + return 1; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } + if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { + return 1; + } + if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { + return 1; + } + if ((ct & TCG_CT_CONST_M12) && val >= -0xfff && val <= 0xfff) { + return 1; + } + return 0; +} + +/* + * RISC-V Base ISA opcodes (IM) + */ + +typedef enum { + OPC_ADD = 0x33, + OPC_ADDI = 0x13, + OPC_AND = 0x7033, + OPC_ANDI = 0x7013, + OPC_AUIPC = 0x17, + OPC_BEQ = 0x63, + OPC_BGE = 0x5063, + OPC_BGEU = 0x7063, + OPC_BLT = 0x4063, + OPC_BLTU = 0x6063, + OPC_BNE = 0x1063, + OPC_DIV = 0x2004033, + OPC_DIVU = 0x2005033, + OPC_JAL = 0x6f, + OPC_JALR = 0x67, + OPC_LB = 0x3, + OPC_LBU = 0x4003, + OPC_LD = 0x3003, + OPC_LH = 0x1003, + OPC_LHU = 0x5003, + OPC_LUI = 0x37, + OPC_LW = 0x2003, + OPC_LWU = 0x6003, + OPC_MUL = 0x2000033, + OPC_MULH = 0x2001033, + OPC_MULHSU = 0x2002033, + OPC_MULHU = 0x2003033, + OPC_OR = 0x6033, + OPC_ORI = 0x6013, + OPC_REM = 0x2006033, + OPC_REMU = 0x2007033, + OPC_SB = 0x23, + OPC_SD = 0x3023, + OPC_SH = 0x1023, + OPC_SLL = 0x1033, + OPC_SLLI = 0x1013, + OPC_SLT = 0x2033, + OPC_SLTI = 0x2013, + OPC_SLTIU = 0x3013, + OPC_SLTU = 0x3033, + OPC_SRA = 0x40005033, + OPC_SRAI = 0x40005013, + OPC_SRL = 0x5033, + OPC_SRLI = 0x5013, + OPC_SUB = 0x40000033, + OPC_SW = 0x2023, + OPC_XOR = 0x4033, + OPC_XORI = 0x4013, + +#if TCG_TARGET_REG_BITS == 64 + OPC_ADDIW = 0x1b, + OPC_ADDW = 0x3b, + OPC_DIVUW = 0x200503b, + OPC_DIVW = 0x200403b, + OPC_MULW = 0x200003b, + OPC_REMUW = 0x200703b, + OPC_REMW = 0x200603b, + OPC_SLLIW = 0x101b, + OPC_SLLW = 0x103b, + OPC_SRAIW = 0x4000501b, + OPC_SRAW = 0x4000503b, + OPC_SRLIW = 0x501b, + OPC_SRLW = 0x503b, + OPC_SUBW = 0x4000003b, +#else + /* Simplify code throughout by defining aliases for RV32. */ + OPC_ADDIW = OPC_ADDI, + OPC_ADDW = OPC_ADD, + OPC_DIVUW = OPC_DIVU, + OPC_DIVW = OPC_DIV, + OPC_MULW = OPC_MUL, + OPC_REMUW = OPC_REMU, + OPC_REMW = OPC_REM, + OPC_SLLIW = OPC_SLLI, + OPC_SLLW = OPC_SLL, + OPC_SRAIW = OPC_SRAI, + OPC_SRAW = OPC_SRA, + OPC_SRLIW = OPC_SRLI, + OPC_SRLW = OPC_SRL, + OPC_SUBW = OPC_SUB, +#endif + + OPC_FENCE = 0x0000000f, +} RISCVInsn; + +/* + * RISC-V immediate and instruction encoders (excludes 16-bit RVC) + */ + +/* Type-R */ + +static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) +{ + return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; +} + +/* Type-I */ + +static int32_t encode_imm12(uint32_t imm) +{ + return (imm & 0xfff) << 20; +} + +static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) +{ + return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); +} + +/* Type-S */ + +static int32_t encode_simm12(uint32_t imm) +{ + int32_t ret = 0; + + ret |= (imm & 0xFE0) << 20; + ret |= (imm & 0x1F) << 7; + + return ret; +} + +static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) +{ + return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); +} + +/* Type-SB */ + +static int32_t encode_sbimm12(uint32_t imm) +{ + int32_t ret = 0; + + ret |= (imm & 0x1000) << 19; + ret |= (imm & 0x7e0) << 20; + ret |= (imm & 0x1e) << 7; + ret |= (imm & 0x800) >> 4; + + return ret; +} + +static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) +{ + return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); +} + +/* Type-U */ + +static int32_t encode_uimm20(uint32_t imm) +{ + return imm & 0xfffff000; +} + +static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) +{ + return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); +} + +/* Type-UJ */ + +static int32_t encode_ujimm20(uint32_t imm) +{ + int32_t ret = 0; + + ret |= (imm & 0x0007fe) << (21 - 1); + ret |= (imm & 0x000800) << (20 - 11); + ret |= (imm & 0x0ff000) << (12 - 12); + ret |= (imm & 0x100000) << (31 - 20); + + return ret; +} + +static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) +{ + return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); +} + +/* + * RISC-V instruction emitters + */ + +static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, + TCGReg rd, TCGReg rs1, TCGReg rs2) +{ + tcg_out32(s, encode_r(opc, rd, rs1, rs2)); +} + +static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, + TCGReg rd, TCGReg rs1, TCGArg imm) +{ + tcg_out32(s, encode_i(opc, rd, rs1, imm)); +} + +static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, + TCGReg rs1, TCGReg rs2, uint32_t imm) +{ + tcg_out32(s, encode_s(opc, rs1, rs2, imm)); +} + +static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, + TCGReg rs1, TCGReg rs2, uint32_t imm) +{ + tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); +} + +static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, + TCGReg rd, uint32_t imm) +{ + tcg_out32(s, encode_u(opc, rd, imm)); +} + +static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, + TCGReg rd, uint32_t imm) +{ + tcg_out32(s, encode_uj(opc, rd, imm)); +} + +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + int i; + for (i = 0; i < count; ++i) { + p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); + } +} + +/* + * Relocations + */ + +static bool reloc_sbimm12(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; + + if (offset == sextreg(offset, 1, 12) << 1) { + code_ptr[0] |= encode_sbimm12(offset); + return true; + } + + return false; +} + +static bool reloc_jimm20(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; + + if (offset == sextreg(offset, 1, 20) << 1) { + code_ptr[0] |= encode_ujimm20(offset); + return true; + } + + return false; +} + +static bool reloc_call(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; + int32_t lo = sextreg(offset, 0, 12); + int32_t hi = offset - lo; + + if (offset == hi + lo) { + code_ptr[0] |= encode_uimm20(hi); + code_ptr[1] |= encode_imm12(lo); + return true; + } + + return false; +} + +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + uint32_t insn = *code_ptr; + intptr_t diff; + bool short_jmp; + + tcg_debug_assert(addend == 0); + + switch (type) { + case R_RISCV_BRANCH: + diff = value - (uintptr_t)code_ptr; + short_jmp = diff == sextreg(diff, 0, 12); + if (short_jmp) { + return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); + } else { + /* Invert the condition */ + insn = insn ^ (1 << 12); + /* Clear the offset */ + insn &= 0x01fff07f; + /* Set the offset to the PC + 8 */ + insn |= encode_sbimm12(8); + + /* Move forward */ + code_ptr[0] = insn; + + /* Overwrite the NOP with jal x0,value */ + diff = value - (uintptr_t)(code_ptr + 1); + insn = encode_uj(OPC_JAL, TCG_REG_ZERO, diff); + code_ptr[1] = insn; + + return true; + } + break; + case R_RISCV_JAL: + return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); + break; + case R_RISCV_CALL: + return reloc_call(code_ptr, (tcg_insn_unit *)value); + break; + default: + tcg_abort(); + } +} + +/* + * TCG intrinsics + */ + +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + if (ret == arg) { + return true; + } + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); + break; + default: + g_assert_not_reached(); + } + return true; +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, + tcg_target_long val) +{ + tcg_target_long lo, hi, tmp; + int shift, ret; + + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + lo = sextreg(val, 0, 12); + if (val == lo) { + tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); + return; + } + + hi = val - lo; + if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { + tcg_out_opc_upper(s, OPC_LUI, rd, hi); + if (lo != 0) { + tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); + } + return; + } + + /* We can only be here if TCG_TARGET_REG_BITS != 32 */ + tmp = tcg_pcrel_diff(s, (void *)val); + if (tmp == (int32_t)tmp) { + tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); + tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); + ret = reloc_call(s->code_ptr - 2, (tcg_insn_unit *)val); + tcg_debug_assert(ret == true); + return; + } + + /* Look for a single 20-bit section. */ + shift = ctz64(val); + tmp = val >> shift; + if (tmp == sextreg(tmp, 0, 20)) { + tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); + if (shift > 12) { + tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); + } else { + tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); + } + return; + } + + /* Look for a few high zero bits, with lots of bits set in the middle. */ + shift = clz64(val); + tmp = val << shift; + if (tmp == sextreg(tmp, 12, 20) << 12) { + tcg_out_opc_upper(s, OPC_LUI, rd, tmp); + tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); + return; + } else if (tmp == sextreg(tmp, 0, 12)) { + tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); + tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); + return; + } + + /* Drop into the constant pool. */ + new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); + tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); + tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); +} + +static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); +} + +static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); + tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); +} + +static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); + tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); +} + +static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); + tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); +} + +static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); + tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); +} + +static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, + TCGReg addr, intptr_t offset) +{ + intptr_t imm12 = sextreg(offset, 0, 12); + + if (offset != imm12) { + intptr_t diff = offset - (uintptr_t)s->code_ptr; + + if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { + imm12 = sextreg(diff, 0, 12); + tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); + if (addr != TCG_REG_ZERO) { + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); + } + } + addr = TCG_REG_TMP2; + } + + switch (opc) { + case OPC_SB: + case OPC_SH: + case OPC_SW: + case OPC_SD: + tcg_out_opc_store(s, opc, addr, data, imm12); + break; + case OPC_LB: + case OPC_LBU: + case OPC_LH: + case OPC_LHU: + case OPC_LW: + case OPC_LWU: + case OPC_LD: + tcg_out_opc_imm(s, opc, data, addr, imm12); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); + tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); +} + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); + tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); +} + +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + if (val == 0) { + tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); + return true; + } + return false; +} + +static void tcg_out_addsub2(TCGContext *s, + TCGReg rl, TCGReg rh, + TCGReg al, TCGReg ah, + TCGArg bl, TCGArg bh, + bool cbl, bool cbh, bool is_sub, bool is32bit) +{ + const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; + const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; + const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; + TCGReg th = TCG_REG_TMP1; + + /* If we have a negative constant such that negating it would + make the high part zero, we can (usually) eliminate one insn. */ + if (cbl && cbh && bh == -1 && bl != 0) { + bl = -bl; + bh = 0; + is_sub = !is_sub; + } + + /* By operating on the high part first, we get to use the final + carry operation to move back from the temporary. */ + if (!cbh) { + tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); + } else if (bh != 0 || ah == rl) { + tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); + } else { + th = ah; + } + + /* Note that tcg optimization should eliminate the bl == 0 case. */ + if (is_sub) { + if (cbl) { + tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); + tcg_out_opc_imm(s, opc_addi, rl, al, -bl); + } else { + tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); + tcg_out_opc_reg(s, opc_sub, rl, al, bl); + } + tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); + } else { + if (cbl) { + tcg_out_opc_imm(s, opc_addi, rl, al, bl); + tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); + } else if (rl == al && rl == bl) { + tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); + tcg_out_opc_reg(s, opc_addi, rl, al, bl); + } else { + tcg_out_opc_reg(s, opc_add, rl, al, bl); + tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, + rl, (rl == bl ? al : bl)); + } + tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); + } +} + +static const struct { + RISCVInsn op; + bool swap; +} tcg_brcond_to_riscv[] = { + [TCG_COND_EQ] = { OPC_BEQ, false }, + [TCG_COND_NE] = { OPC_BNE, false }, + [TCG_COND_LT] = { OPC_BLT, false }, + [TCG_COND_GE] = { OPC_BGE, false }, + [TCG_COND_LE] = { OPC_BGE, true }, + [TCG_COND_GT] = { OPC_BLT, true }, + [TCG_COND_LTU] = { OPC_BLTU, false }, + [TCG_COND_GEU] = { OPC_BGEU, false }, + [TCG_COND_LEU] = { OPC_BGEU, true }, + [TCG_COND_GTU] = { OPC_BLTU, true } +}; + +static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, + TCGReg arg2, TCGLabel *l) +{ + RISCVInsn op = tcg_brcond_to_riscv[cond].op; + + tcg_debug_assert(op != 0); + + if (tcg_brcond_to_riscv[cond].swap) { + TCGReg t = arg1; + arg1 = arg2; + arg2 = t; + } + + if (l->has_value) { + intptr_t diff = tcg_pcrel_diff(s, l->u.value_ptr); + if (diff == sextreg(diff, 0, 12)) { + tcg_out_opc_branch(s, op, arg1, arg2, diff); + } else { + /* Invert the conditional branch. */ + tcg_out_opc_branch(s, op ^ (1 << 12), arg1, arg2, 8); + tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, diff - 4); + } + } else { + tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); + tcg_out_opc_branch(s, op, arg1, arg2, 0); + /* NOP to allow patching later */ + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); + } +} + +static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg arg1, TCGReg arg2) +{ + switch (cond) { + case TCG_COND_EQ: + tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); + tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); + break; + case TCG_COND_NE: + tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); + tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); + break; + case TCG_COND_LT: + tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); + break; + case TCG_COND_GE: + tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); + tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); + break; + case TCG_COND_LE: + tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); + tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); + break; + case TCG_COND_GT: + tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); + break; + case TCG_COND_LTU: + tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); + break; + case TCG_COND_GEU: + tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); + tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); + break; + case TCG_COND_LEU: + tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); + tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); + break; + case TCG_COND_GTU: + tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); + break; + default: + g_assert_not_reached(); + break; + } +} + +static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGReg bl, TCGReg bh, TCGLabel *l) +{ + /* todo */ + g_assert_not_reached(); +} + +static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) +{ + /* todo */ + g_assert_not_reached(); +} + +static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) +{ + ptrdiff_t offset = tcg_pcrel_diff(s, target); + tcg_debug_assert(offset == sextreg(offset, 1, 20) << 1); + tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, offset); +} + +static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail) +{ + TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; + ptrdiff_t offset = tcg_pcrel_diff(s, arg); + int ret; + + if (offset == sextreg(offset, 1, 20) << 1) { + /* short jump: -2097150 to 2097152 */ + tcg_out_opc_jump(s, OPC_JAL, link, offset); + } else if (TCG_TARGET_REG_BITS == 32 || + offset == sextreg(offset, 1, 31) << 1) { + /* long jump: -2147483646 to 2147483648 */ + tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); + tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); + ret = reloc_call(s->code_ptr - 2, arg);\ + tcg_debug_assert(ret == true); + } else if (TCG_TARGET_REG_BITS == 64) { + /* far jump: 64-bit */ + tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); + tcg_target_long base = (tcg_target_long)arg - imm; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); + tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); + } else { + g_assert_not_reached(); + } +} + +static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg) +{ + tcg_out_call_int(s, arg, false); +} + +static void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + tcg_insn_unit insn = OPC_FENCE; + + if (a0 & TCG_MO_LD_LD) { + insn |= 0x02200000; + } + if (a0 & TCG_MO_ST_LD) { + insn |= 0x01200000; + } + if (a0 & TCG_MO_LD_ST) { + insn |= 0x02100000; + } + if (a0 & TCG_MO_ST_ST) { + insn |= 0x02200000; + } + tcg_out32(s, insn); +} + +/* + * Load/store and TLB + */ + +#if defined(CONFIG_SOFTMMU) +#include "../tcg-ldst.inc.c" + +/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * TCGMemOpIdx oi, uintptr_t ra) + */ +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_SB] = helper_ret_ldsb_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LESW] = helper_le_ldsw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, +#if TCG_TARGET_REG_BITS == 64 + [MO_LESL] = helper_le_ldsl_mmu, +#endif + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BESW] = helper_be_ldsw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, +#if TCG_TARGET_REG_BITS == 64 + [MO_BESL] = helper_be_ldsl_mmu, +#endif + [MO_BEQ] = helper_be_ldq_mmu, +}; + +/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, TCGMemOpIdx oi, + * uintptr_t ra) + */ +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +/* We don't support oversize guests */ +QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); + +/* We expect to use a 12-bit negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); + +static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, + TCGReg addrh, TCGMemOpIdx oi, + tcg_insn_unit **label_ptr, bool is_load) +{ +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif + MemOp opc = get_memop(oi); + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); + tcg_target_long compare_mask; + int mem_index = get_mmuidx(oi); + int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); + TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; + + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); + + tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + + /* Load the tlb comparator and the addend. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, + is_load ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, + offsetof(CPUTLBEntry, addend)); + + /* We don't support unaligned accesses. */ + if (a_bits < s_bits) { + a_bits = s_bits; + } + /* Clear the non-page, non-alignment bits from the address. */ + compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); + if (compare_mask == sextreg(compare_mask, 0, 12)) { + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); + } else { + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); + tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); + } + + /* Compare masked address with the TLB entry. */ + label_ptr[0] = s->code_ptr; + tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); + /* NOP to allow patching later */ + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); + + /* TLB Hit - translate address using addend. */ + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, TCG_REG_TMP0, addrl); + addrl = TCG_REG_TMP0; + } + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); +} + +static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, + TCGType ext, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + void *raddr, tcg_insn_unit **label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->oi = oi; + label->type = ext; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->raddr = raddr; + label->label_ptr[0] = label_ptr[0]; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + TCGReg a0 = tcg_target_call_iarg_regs[0]; + TCGReg a1 = tcg_target_call_iarg_regs[1]; + TCGReg a2 = tcg_target_call_iarg_regs[2]; + TCGReg a3 = tcg_target_call_iarg_regs[3]; + + /* We don't support oversize guests */ + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + g_assert_not_reached(); + } + + /* resolve label address */ + if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, + (intptr_t) s->code_ptr, 0)) { + return false; + } + + /* call load helper */ + tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); + tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); + tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); + + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); + tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); + + tcg_out_goto(s, l->raddr); + return true; +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + MemOp s_bits = opc & MO_SIZE; + TCGReg a0 = tcg_target_call_iarg_regs[0]; + TCGReg a1 = tcg_target_call_iarg_regs[1]; + TCGReg a2 = tcg_target_call_iarg_regs[2]; + TCGReg a3 = tcg_target_call_iarg_regs[3]; + TCGReg a4 = tcg_target_call_iarg_regs[4]; + + /* We don't support oversize guests */ + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + g_assert_not_reached(); + } + + /* resolve label address */ + if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, + (intptr_t) s->code_ptr, 0)) { + return false; + } + + /* call store helper */ + tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); + switch (s_bits) { + case MO_8: + tcg_out_ext8u(s, a2, a2); + break; + case MO_16: + tcg_out_ext16u(s, a2, a2); + break; + default: + break; + } + tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); + tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); + + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]); + + tcg_out_goto(s, l->raddr); + return true; +} +#endif /* CONFIG_SOFTMMU */ + +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, + TCGReg base, MemOp opc, bool is_64) +{ + const MemOp bswap = opc & MO_BSWAP; + + /* We don't yet handle byteswapping, assert */ + g_assert(!bswap); + + switch (opc & (MO_SSIZE)) { + case MO_UB: + tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); + break; + case MO_SB: + tcg_out_opc_imm(s, OPC_LB, lo, base, 0); + break; + case MO_UW: + tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); + break; + case MO_SW: + tcg_out_opc_imm(s, OPC_LH, lo, base, 0); + break; + case MO_UL: + if (TCG_TARGET_REG_BITS == 64 && is_64) { + tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); + break; + } + /* FALLTHRU */ + case MO_SL: + tcg_out_opc_imm(s, OPC_LW, lo, base, 0); + break; + case MO_Q: + /* Prefer to load from offset 0 first, but allow for overlap. */ + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_opc_imm(s, OPC_LD, lo, base, 0); + } else if (lo != base) { + tcg_out_opc_imm(s, OPC_LW, lo, base, 0); + tcg_out_opc_imm(s, OPC_LW, hi, base, 4); + } else { + tcg_out_opc_imm(s, OPC_LW, hi, base, 4); + tcg_out_opc_imm(s, OPC_LW, lo, base, 0); + } + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg addr_regl, addr_regh __attribute__((unused)); + TCGReg data_regl, data_regh; + TCGMemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[1]; +#endif + TCGReg base = TCG_REG_TMP0; + + data_regl = *args++; + data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addr_regl = *args++; + addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); + add_qemu_ldst_label(s, 1, oi, + (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + data_regl, data_regh, addr_regl, addr_regh, + s->code_ptr, label_ptr); +#else + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, base, addr_regl); + addr_regl = base; + } + + if (guest_base == 0) { + tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); + } else { + tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); + } + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); +#endif +} + +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, + TCGReg base, MemOp opc) +{ + const MemOp bswap = opc & MO_BSWAP; + + /* We don't yet handle byteswapping, assert */ + g_assert(!bswap); + + switch (opc & (MO_SSIZE)) { + case MO_8: + tcg_out_opc_store(s, OPC_SB, base, lo, 0); + break; + case MO_16: + tcg_out_opc_store(s, OPC_SH, base, lo, 0); + break; + case MO_32: + tcg_out_opc_store(s, OPC_SW, base, lo, 0); + break; + case MO_64: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_opc_store(s, OPC_SD, base, lo, 0); + } else { + tcg_out_opc_store(s, OPC_SW, base, lo, 0); + tcg_out_opc_store(s, OPC_SW, base, hi, 4); + } + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg addr_regl, addr_regh __attribute__((unused)); + TCGReg data_regl, data_regh; + TCGMemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[1]; +#endif + TCGReg base = TCG_REG_TMP0; + + data_regl = *args++; + data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addr_regl = *args++; + addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + add_qemu_ldst_label(s, 0, oi, + (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + data_regl, data_regh, addr_regl, addr_regh, + s->code_ptr, label_ptr); +#else + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, base, addr_regl); + addr_regl = base; + } + + if (guest_base == 0) { + tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); + } else { + tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); + } + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); +#endif +} + +static tcg_insn_unit *tb_ret_addr; + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + TCGArg a0 = args[0]; + TCGArg a1 = args[1]; + TCGArg a2 = args[2]; + int c2 = const_args[2]; + + switch (opc) { + case INDEX_op_exit_tb: + /* Reuse the zeroing that exists for goto_ptr. */ + if (a0 == 0) { + tcg_out_call_int(s, s->code_gen_epilogue, true); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); + tcg_out_call_int(s, tb_ret_addr, true); + } + break; + + case INDEX_op_goto_tb: + assert(s->tb_jmp_insn_offset == 0); + /* indirect jump method */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, + (uintptr_t)(s->tb_jmp_target_addr + a0)); + tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); + set_jmp_reset_offset(s, a0); + break; + + case INDEX_op_goto_ptr: + tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); + break; + + case INDEX_op_br: + tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); + tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); + break; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_ldst(s, OPC_LBU, a0, a1, a2); + break; + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + tcg_out_ldst(s, OPC_LB, a0, a1, a2); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_ldst(s, OPC_LHU, a0, a1, a2); + break; + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + tcg_out_ldst(s, OPC_LH, a0, a1, a2); + break; + case INDEX_op_ld32u_i64: + tcg_out_ldst(s, OPC_LWU, a0, a1, a2); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32s_i64: + tcg_out_ldst(s, OPC_LW, a0, a1, a2); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, OPC_LD, a0, a1, a2); + break; + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_ldst(s, OPC_SB, a0, a1, a2); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_ldst(s, OPC_SH, a0, a1, a2); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_ldst(s, OPC_SW, a0, a1, a2); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, OPC_SD, a0, a1, a2); + break; + + case INDEX_op_add_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); + } + break; + case INDEX_op_add_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); + } + break; + + case INDEX_op_sub_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); + } else { + tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); + } + break; + case INDEX_op_sub_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); + } else { + tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); + } + break; + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); + } + break; + + case INDEX_op_or_i32: + case INDEX_op_or_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); + } + break; + + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); + } + break; + + case INDEX_op_not_i32: + case INDEX_op_not_i64: + tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); + break; + + case INDEX_op_neg_i32: + tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); + break; + case INDEX_op_neg_i64: + tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); + break; + + case INDEX_op_mul_i32: + tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); + break; + case INDEX_op_mul_i64: + tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); + break; + + case INDEX_op_div_i32: + tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); + break; + case INDEX_op_div_i64: + tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); + break; + + case INDEX_op_divu_i32: + tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); + break; + case INDEX_op_divu_i64: + tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); + break; + + case INDEX_op_rem_i32: + tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); + break; + case INDEX_op_rem_i64: + tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); + break; + + case INDEX_op_remu_i32: + tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); + break; + case INDEX_op_remu_i64: + tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); + break; + + case INDEX_op_shl_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); + } + break; + case INDEX_op_shl_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); + } + break; + + case INDEX_op_shr_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); + } + break; + case INDEX_op_shr_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); + } + break; + + case INDEX_op_sar_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); + } + break; + case INDEX_op_sar_i64: + if (c2) { + tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); + } + break; + + case INDEX_op_add2_i32: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], false, true); + break; + case INDEX_op_add2_i64: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], false, false); + break; + case INDEX_op_sub2_i32: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], true, true); + break; + case INDEX_op_sub2_i64: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], true, false); + break; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); + break; + case INDEX_op_brcond2_i32: + tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); + break; + + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + tcg_out_setcond(s, args[3], a0, a1, a2); + break; + case INDEX_op_setcond2_i32: + tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, false); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, true); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args, false); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args, true); + break; + + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + tcg_out_ext8u(s, a0, a1); + break; + + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + tcg_out_ext16u(s, a0, a1); + break; + + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + tcg_out_ext32u(s, a0, a1); + break; + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + tcg_out_ext8s(s, a0, a1); + break; + + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + tcg_out_ext16s(s, a0, a1); + break; + + case INDEX_op_ext32s_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_ext_i32_i64: + tcg_out_ext32s(s, a0, a1); + break; + + case INDEX_op_extrh_i64_i32: + tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); + break; + + case INDEX_op_mulsh_i32: + case INDEX_op_mulsh_i64: + tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); + break; + + case INDEX_op_muluh_i32: + case INDEX_op_muluh_i64: + tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); + break; + + case INDEX_op_mb: + tcg_out_mb(s, a0); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + g_assert_not_reached(); + } +} + +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r + = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r + = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef rZ_r + = { .args_ct_str = { "rZ", "r" } }; + static const TCGTargetOpDef rZ_rZ + = { .args_ct_str = { "rZ", "rZ" } }; + static const TCGTargetOpDef rZ_rZ_rZ_rZ + = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; + static const TCGTargetOpDef r_r_ri + = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_r_rI + = { .args_ct_str = { "r", "r", "rI" } }; + static const TCGTargetOpDef r_rZ_rN + = { .args_ct_str = { "r", "rZ", "rN" } }; + static const TCGTargetOpDef r_rZ_rZ + = { .args_ct_str = { "r", "rZ", "rZ" } }; + static const TCGTargetOpDef r_rZ_rZ_rZ_rZ + = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; + static const TCGTargetOpDef r_L + = { .args_ct_str = { "r", "L" } }; + static const TCGTargetOpDef r_r_L + = { .args_ct_str = { "r", "r", "L" } }; + static const TCGTargetOpDef r_L_L + = { .args_ct_str = { "r", "L", "L" } }; + static const TCGTargetOpDef r_r_L_L + = { .args_ct_str = { "r", "r", "L", "L" } }; + static const TCGTargetOpDef LZ_L + = { .args_ct_str = { "LZ", "L" } }; + static const TCGTargetOpDef LZ_L_L + = { .args_ct_str = { "LZ", "L", "L" } }; + static const TCGTargetOpDef LZ_LZ_L + = { .args_ct_str = { "LZ", "LZ", "L" } }; + static const TCGTargetOpDef LZ_LZ_L_L + = { .args_ct_str = { "LZ", "LZ", "L", "L" } }; + static const TCGTargetOpDef r_r_rZ_rZ_rM_rM + = { .args_ct_str = { "r", "r", "rZ", "rZ", "rM", "rM" } }; + + switch (op) { + case INDEX_op_goto_ptr: + return &r; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_not_i32: + case INDEX_op_neg_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld_i64: + case INDEX_op_not_i64: + case INDEX_op_neg_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + case INDEX_op_ext_i32_i64: + return &r_r; + + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &rZ_r; + + case INDEX_op_add_i32: + case INDEX_op_and_i32: + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + case INDEX_op_add_i64: + case INDEX_op_and_i64: + case INDEX_op_or_i64: + case INDEX_op_xor_i64: + return &r_r_rI; + + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + return &r_rZ_rN; + + case INDEX_op_mul_i32: + case INDEX_op_mulsh_i32: + case INDEX_op_muluh_i32: + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + case INDEX_op_rem_i32: + case INDEX_op_remu_i32: + case INDEX_op_setcond_i32: + case INDEX_op_mul_i64: + case INDEX_op_mulsh_i64: + case INDEX_op_muluh_i64: + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_rem_i64: + case INDEX_op_remu_i64: + case INDEX_op_setcond_i64: + return &r_rZ_rZ; + + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + return &r_r_ri; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &rZ_rZ; + + case INDEX_op_add2_i32: + case INDEX_op_add2_i64: + case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: + return &r_r_rZ_rZ_rM_rM; + + case INDEX_op_brcond2_i32: + return &rZ_rZ_rZ_rZ; + + case INDEX_op_setcond2_i32: + return &r_rZ_rZ_rZ_rZ; + + case INDEX_op_qemu_ld_i32: + return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; + case INDEX_op_qemu_st_i32: + return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_L : &LZ_L_L; + case INDEX_op_qemu_ld_i64: + return TCG_TARGET_REG_BITS == 64 ? &r_L + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L + : &r_r_L_L; + case INDEX_op_qemu_st_i64: + return TCG_TARGET_REG_BITS == 64 ? &LZ_L + : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_LZ_L + : &LZ_LZ_L_L; + + default: + return NULL; + } +} + +static const int tcg_target_callee_save_regs[] = { + TCG_REG_S0, /* used for the global env (TCG_AREG0) */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_S9, + TCG_REG_S10, + TCG_REG_S11, + TCG_REG_RA, /* should be last for ABI compliance */ +}; + +/* Stack frame parameters. */ +#define REG_SIZE (TCG_TARGET_REG_BITS / 8) +#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) +#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) +#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & -TCG_TARGET_STACK_ALIGN) +#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) + +/* We're expecting to be able to use an immediate for frame allocation. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i; + + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); + + /* TB prologue */ + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_SP, SAVE_OFS + i * REG_SIZE); + } + +#if !defined(CONFIG_SOFTMMU) + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); +#endif + + /* Call generated code */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); + + /* Return path for goto_ptr. Set return value to 0 */ + s->code_gen_epilogue = s->code_ptr; + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); + + /* TB epilogue */ + tb_ret_addr = s->code_ptr; + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_SP, SAVE_OFS + i * REG_SIZE); + } + + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); + tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); +} + +static void tcg_target_init(TCGContext *s) +{ + s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; + if (TCG_TARGET_REG_BITS == 64) { + s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; + } + + s->tcg_target_call_clobber_regs = -1u; + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S0); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S1); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S2); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S3); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S4); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S5); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S6); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S7); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S8); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S9); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S10); + tcg_regset_reset_reg(s->tcg_target_call_clobber_regs, TCG_REG_S11); + + s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; +} DebugFrame; + +#define ELF_HOST_MACHINE EM_RISCV + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ + .h.cie.return_column = TCG_REG_RA, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ + 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ + 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ + 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ + 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ + 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ + 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ + 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ + 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ + 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ + 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ + 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ + } +}; + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/qemu/tcg/s390/tcg-target.h b/qemu/tcg/s390/tcg-target.h index 5acc28ca..07accabb 100644 --- a/qemu/tcg/s390/tcg-target.h +++ b/qemu/tcg/s390/tcg-target.h @@ -21,10 +21,12 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef TCG_TARGET_S390 -#define TCG_TARGET_S390 1 + +#ifndef S390_TCG_TARGET_H +#define S390_TCG_TARGET_H #define TCG_TARGET_INSN_UNIT_SIZE 2 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 19 typedef enum TCGReg { TCG_REG_R0 = 0, @@ -47,62 +49,87 @@ typedef enum TCGReg { #define TCG_TARGET_NB_REGS 16 +/* A list of relevant facilities used by this translator. Some of these + are required for proper operation, and these are checked at startup. */ + +#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2)) +#define FACILITY_LONG_DISP (1ULL << (63 - 18)) +#define FACILITY_EXT_IMM (1ULL << (63 - 21)) +#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34)) +#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45)) +#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND +#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND +#define FACILITY_LOAD_ON_COND2 (1ULL << (63 - 53)) + +extern uint64_t s390_facilities; + /* optional instructions */ -#define TCG_TARGET_HAS_div2_i32 1 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 0 -#define TCG_TARGET_HAS_neg_i32 1 -#define TCG_TARGET_HAS_andc_i32 0 -#define TCG_TARGET_HAS_orc_i32 0 -#define TCG_TARGET_HAS_eqv_i32 0 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_movcond_i32 1 -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_trunc_shr_i32 0 +#define TCG_TARGET_HAS_div2_i32 1 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_not_i32 0 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_clz_i32 0 +#define TCG_TARGET_HAS_ctz_i32 0 +#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_extrl_i64_i32 0 +#define TCG_TARGET_HAS_extrh_i64_i32 0 +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT) -#define TCG_TARGET_HAS_div2_i64 1 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_not_i64 0 -#define TCG_TARGET_HAS_neg_i64 1 -#define TCG_TARGET_HAS_andc_i64 0 -#define TCG_TARGET_HAS_orc_i64 0 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_movcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 1 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 0 -#define TCG_TARGET_HAS_mulsh_i64 0 - -extern bool tcg_target_deposit_valid(int ofs, int len); -#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid -#define TCG_TARGET_deposit_i64_valid tcg_target_deposit_valid +#define TCG_TARGET_HAS_div2_i64 1 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_not_i64 0 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_clz_i64 (s390_facilities & FACILITY_EXT_IMM) +#define TCG_TARGET_HAS_ctz_i64 0 +#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 1 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 0 +#define TCG_TARGET_HAS_mulsh_i64 0 /* used for function call generation */ #define TCG_REG_CALL_STACK TCG_REG_R15 @@ -110,6 +137,9 @@ extern bool tcg_target_deposit_valid(int ofs, int len); #define TCG_TARGET_CALL_STACK_OFFSET 160 #define TCG_TARGET_EXTEND_ARGS 1 +#define TCG_TARGET_HAS_MEMORY_BSWAP 1 + +#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) enum { TCG_AREG0 = TCG_REG_R10, @@ -119,4 +149,18 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { } +static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, + uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + intptr_t disp = addr - (jmp_addr - 2); + atomic_set((int32_t *)jmp_addr, disp / 2); + /* no need to flush icache explicitly */ +} + +#ifdef CONFIG_SOFTMMU +#define TCG_TARGET_NEED_LDST_LABELS +#endif +#define TCG_TARGET_NEED_POOL_LABELS + #endif diff --git a/qemu/tcg/s390/tcg-target.c b/qemu/tcg/s390/tcg-target.inc.c similarity index 61% rename from qemu/tcg/s390/tcg-target.c rename to qemu/tcg/s390/tcg-target.inc.c index cfa89874..c8fa2004 100644 --- a/qemu/tcg/s390/tcg-target.c +++ b/qemu/tcg/s390/tcg-target.inc.c @@ -24,13 +24,12 @@ * THE SOFTWARE. */ -#include "tcg-be-ldst.h" - /* We only support generating code for 64-bit mode. */ #if TCG_TARGET_REG_BITS != 64 #error "unsupported code generation mode" #endif +#include "../tcg-pool.inc.c" #include "elf.h" /* ??? The translation blocks produced by TCG are generally small enough to @@ -38,30 +37,28 @@ a 32-bit displacement here Just In Case. */ #define USE_LONG_BRANCHES 0 -#define TCG_CT_CONST_MULI 0x100 -#define TCG_CT_CONST_ORI 0x200 -#define TCG_CT_CONST_XORI 0x400 -#define TCG_CT_CONST_CMPI 0x800 -#define TCG_CT_CONST_ADLI 0x1000 +#define TCG_CT_CONST_S16 0x100 +#define TCG_CT_CONST_S32 0x200 +#define TCG_CT_CONST_S33 0x400 +#define TCG_CT_CONST_ZERO 0x800 /* Several places within the instruction set 0 means "no register" rather than TCG_REG_R0. */ #define TCG_REG_NONE 0 /* A scratch register that may be be used throughout the backend. */ -#define TCG_TMP0 TCG_REG_R14 +#define TCG_TMP0 TCG_REG_R1 -#ifdef CONFIG_USE_GUEST_BASE +/* A scratch register that holds a pointer to the beginning of the TB. + We don't need this when we have pc-relative loads with the general + instructions extension facility. */ +#define TCG_REG_TB TCG_REG_R12 +#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT)) + +#ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG TCG_REG_R13 -#else -#define TCG_GUEST_BASE_REG TCG_REG_R0 #endif -#ifndef GUEST_BASE -#define GUEST_BASE 0 -#endif - - /* All of the following instructions are prefixed with their instruction format, and are defined as 8- or 16-bit quantities, even when the two halves of the 16-bit quantity may appear 32 bits apart in the insn. @@ -77,6 +74,10 @@ typedef enum S390Opcode { RIL_CGFI = 0xc20c, RIL_CLFI = 0xc20f, RIL_CLGFI = 0xc20e, + RIL_CLRL = 0xc60f, + RIL_CLGRL = 0xc60a, + RIL_CRL = 0xc60d, + RIL_CGRL = 0xc608, RIL_IIHF = 0xc008, RIL_IILF = 0xc009, RIL_LARL = 0xc000, @@ -99,6 +100,8 @@ typedef enum S390Opcode { RI_AGHI = 0xa70b, RI_AHI = 0xa70a, RI_BRC = 0xa704, + RI_CHI = 0xa70e, + RI_CGHI = 0xa70f, RI_IIHH = 0xa500, RI_IIHL = 0xa501, RI_IILH = 0xa502, @@ -127,6 +130,7 @@ typedef enum S390Opcode { RIE_CLGIJ = 0xec7d, RIE_CLRJ = 0xec77, RIE_CRJ = 0xec76, + RIE_LOCGHI = 0xec46, RIE_RISBG = 0xec55, RRE_AGR = 0xb908, @@ -139,6 +143,7 @@ typedef enum S390Opcode { RRE_DLR = 0xb997, RRE_DSGFR = 0xb91d, RRE_DSGR = 0xb90d, + RRE_FLOGR = 0xb983, RRE_LGBR = 0xb906, RRE_LCGR = 0xb903, RRE_LGFR = 0xb914, @@ -163,6 +168,16 @@ typedef enum S390Opcode { RRF_LOCR = 0xb9f2, RRF_LOCGR = 0xb9e2, + RRF_NRK = 0xb9f4, + RRF_NGRK = 0xb9e4, + RRF_ORK = 0xb9f6, + RRF_OGRK = 0xb9e6, + RRF_SRK = 0xb9f9, + RRF_SGRK = 0xb9e9, + RRF_SLRK = 0xb9fb, + RRF_SLGRK = 0xb9eb, + RRF_XRK = 0xb9f7, + RRF_XGRK = 0xb9e7, RR_AR = 0x1a, RR_ALR = 0x1e, @@ -183,8 +198,11 @@ typedef enum S390Opcode { RSY_RLL = 0xeb1d, RSY_RLLG = 0xeb1c, RSY_SLLG = 0xeb0d, + RSY_SLLK = 0xebdf, RSY_SRAG = 0xeb0a, + RSY_SRAK = 0xebdc, RSY_SRLG = 0xeb0c, + RSY_SRLK = 0xebde, RS_SLL = 0x89, RS_SRA = 0x8a, @@ -193,6 +211,8 @@ typedef enum S390Opcode { RXY_AG = 0xe308, RXY_AY = 0xe35a, RXY_CG = 0xe320, + RXY_CLG = 0xe321, + RXY_CLY = 0xe355, RXY_CY = 0xe359, RXY_LAY = 0xe371, RXY_LB = 0xe376, @@ -209,6 +229,8 @@ typedef enum S390Opcode { RXY_LRVG = 0xe30f, RXY_LRVH = 0xe31f, RXY_LY = 0xe358, + RXY_NG = 0xe380, + RXY_OG = 0xe381, RXY_STCY = 0xe372, RXY_STG = 0xe324, RXY_STHY = 0xe370, @@ -217,6 +239,7 @@ typedef enum S390Opcode { RXY_STRVG = 0xe32f, RXY_STRVH = 0xe33f, RXY_STY = 0xe350, + RXY_XG = 0xe382, RX_A = 0x5a, RX_C = 0x59, @@ -226,9 +249,11 @@ typedef enum S390Opcode { RX_ST = 0x50, RX_STC = 0x42, RX_STH = 0x40, + + NOP = 0x0707, } S390Opcode; -#ifndef NDEBUG +#ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15" @@ -339,190 +364,86 @@ static void * const qemu_st_helpers[16] = { #endif static tcg_insn_unit *tb_ret_addr; +uint64_t s390_facilities; -/* A list of relevant facilities used by this translator. Some of these - are required for proper operation, and these are checked at startup. */ - -#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2)) -#define FACILITY_LONG_DISP (1ULL << (63 - 18)) -#define FACILITY_EXT_IMM (1ULL << (63 - 21)) -#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34)) -#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45)) - -static uint64_t facilities; - -static void patch_reloc(tcg_insn_unit *code_ptr, int type, +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { - intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1); - assert(addend == -2); + intptr_t pcrel2; + uint32_t old; + + value += addend; + pcrel2 = (tcg_insn_unit *)value - code_ptr; switch (type) { case R_390_PC16DBL: - assert(pcrel2 == (int16_t)pcrel2); - tcg_patch16(code_ptr, pcrel2); + if (pcrel2 == (int16_t)pcrel2) { + tcg_patch16(code_ptr, pcrel2); + return true; + } break; case R_390_PC32DBL: - assert(pcrel2 == (int32_t)pcrel2); - tcg_patch32(code_ptr, pcrel2); + if (pcrel2 == (int32_t)pcrel2) { + tcg_patch32(code_ptr, pcrel2); + return true; + } + break; + case R_390_20: + if (value == sextract64(value, 0, 20)) { + old = *(uint32_t *)code_ptr & 0xf00000ff; + old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4); + tcg_patch32(code_ptr, old); + return true; + } break; default: - tcg_abort(); - break; + g_assert_not_reached(); } + return false; } /* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) { - const char *ct_str = *pct_str; - - switch (ct_str[0]) { + switch (*ct_str++) { case 'r': /* all registers */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffff); - break; - case 'R': /* not R0 */ - ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffff); - tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); + ct->u.regs = 0xffff; break; case 'L': /* qemu_ld/st constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffff); - tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2); - tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3); - tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4); + ct->u.regs = 0xffff; + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); break; case 'a': /* force R2 for division */ ct->ct |= TCG_CT_REG; - tcg_regset_clear(ct->u.regs); + ct->u.regs = 0; tcg_regset_set_reg(ct->u.regs, TCG_REG_R2); break; case 'b': /* force R3 for division */ ct->ct |= TCG_CT_REG; - tcg_regset_clear(ct->u.regs); + ct->u.regs = 0; tcg_regset_set_reg(ct->u.regs, TCG_REG_R3); break; case 'A': - ct->ct |= TCG_CT_CONST_ADLI; + ct->ct |= TCG_CT_CONST_S33; break; - case 'K': - ct->ct |= TCG_CT_CONST_MULI; + case 'I': + ct->ct |= TCG_CT_CONST_S16; break; - case 'O': - ct->ct |= TCG_CT_CONST_ORI; + case 'J': + ct->ct |= TCG_CT_CONST_S32; break; - case 'X': - ct->ct |= TCG_CT_CONST_XORI; - break; - case 'C': - ct->ct |= TCG_CT_CONST_CMPI; + case 'Z': + ct->ct |= TCG_CT_CONST_ZERO; break; default: - return -1; + return NULL; } - ct_str++; - *pct_str = ct_str; - - return 0; -} - -/* Immediates to be used with logical OR. This is an optimization only, - since a full 64-bit immediate OR can always be performed with 4 sequential - OI[LH][LH] instructions. What we're looking for is immediates that we - can load efficiently, and the immediate load plus the reg-reg OR is - smaller than the sequential OI's. */ - -static int tcg_match_ori(TCGType type, tcg_target_long val) -{ - if (facilities & FACILITY_EXT_IMM) { - if (type == TCG_TYPE_I32) { - /* All 32-bit ORs can be performed with 1 48-bit insn. */ - return 1; - } - } - - /* Look for negative values. These are best to load with LGHI. */ - if (val < 0) { - if (val == (int16_t)val) { - return 0; - } - if (facilities & FACILITY_EXT_IMM) { - if (val == (int32_t)val) { - return 0; - } - } - } - - return 1; -} - -/* Immediates to be used with logical XOR. This is almost, but not quite, - only an optimization. XOR with immediate is only supported with the - extended-immediate facility. That said, there are a few patterns for - which it is better to load the value into a register first. */ - -static int tcg_match_xori(TCGType type, tcg_target_long val) -{ - if ((facilities & FACILITY_EXT_IMM) == 0) { - return 0; - } - - if (type == TCG_TYPE_I32) { - /* All 32-bit XORs can be performed with 1 48-bit insn. */ - return 1; - } - - /* Look for negative values. These are best to load with LGHI. */ - if (val < 0 && val == (int32_t)val) { - return 0; - } - - return 1; -} - -/* Imediates to be used with comparisons. */ - -static int tcg_match_cmpi(TCGType type, tcg_target_long val) -{ - if (facilities & FACILITY_EXT_IMM) { - /* The COMPARE IMMEDIATE instruction is available. */ - if (type == TCG_TYPE_I32) { - /* We have a 32-bit immediate and can compare against anything. */ - return 1; - } else { - /* ??? We have no insight here into whether the comparison is - signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit - signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses - a 32-bit unsigned immediate. If we were to use the (semi) - obvious "val == (int32_t)val" we would be enabling unsigned - comparisons vs very large numbers. The only solution is to - take the intersection of the ranges. */ - /* ??? Another possible solution is to simply lie and allow all - constants here and force the out-of-range values into a temp - register in tgen_cmp when we have knowledge of the actual - comparison code in use. */ - return val >= 0 && val <= 0x7fffffff; - } - } else { - /* Only the LOAD AND TEST instruction is available. */ - return val == 0; - } -} - -/* Immediates to be used with add2/sub2. */ - -static int tcg_match_add2i(TCGType type, tcg_target_long val) -{ - if (facilities & FACILITY_EXT_IMM) { - if (type == TCG_TYPE_I32) { - return 1; - } else if (val >= -0xffffffffll && val <= 0xffffffffll) { - return 1; - } - } - return 0; + return ct_str; } /* Test if a constant matches the constraint. */ @@ -540,24 +461,14 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, } /* The following are mutually exclusive. */ - if (ct & TCG_CT_CONST_MULI) { - /* Immediates that may be used with multiply. If we have the - general-instruction-extensions, then we have MULTIPLY SINGLE - IMMEDIATE with a signed 32-bit, otherwise we have only - MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ - if (facilities & FACILITY_GEN_INST_EXT) { - return val == (int32_t)val; - } else { - return val == (int16_t)val; - } - } else if (ct & TCG_CT_CONST_ADLI) { - return tcg_match_add2i(type, val); - } else if (ct & TCG_CT_CONST_ORI) { - return tcg_match_ori(type, val); - } else if (ct & TCG_CT_CONST_XORI) { - return tcg_match_xori(type, val); - } else if (ct & TCG_CT_CONST_CMPI) { - return tcg_match_cmpi(type, val); + if (ct & TCG_CT_CONST_S16) { + return val == (int16_t)val; + } else if (ct & TCG_CT_CONST_S32) { + return val == (int32_t)val; + } else if (ct & TCG_CT_CONST_S33) { + return val >= -0xffffffffll && val <= 0xffffffffll; + } else if (ct & TCG_CT_CONST_ZERO) { + return val == 0; } return 0; @@ -587,6 +498,13 @@ static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2) tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff)); } +static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1, + int i2, int m3) +{ + tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3); + tcg_out32(s, (i2 << 16) | (op & 0xff)); +} + static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2) { tcg_out16(s, op | (r1 << 4)); @@ -630,7 +548,7 @@ static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest, tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm); } -static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { if (src != dst) { if (type == TCG_TYPE_I32) { @@ -639,16 +557,16 @@ static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) tcg_out_insn(s, RRE, LGR, dst, src); } } + return true; } -/* load a register with an immediate value */ -static void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg ret, tcg_target_long sval) -{ - static const S390Opcode lli_insns[4] = { - RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH - }; +static const S390Opcode lli_insns[4] = { + RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH +}; +static bool maybe_out_small_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long sval) +{ tcg_target_ulong uval = sval; int i; @@ -660,19 +578,39 @@ static void tcg_out_movi(TCGContext *s, TCGType type, /* Try all 32-bit insns that can load it in one go. */ if (sval >= -0x8000 && sval < 0x8000) { tcg_out_insn(s, RI, LGHI, ret, sval); - return; + return true; } for (i = 0; i < 4; i++) { tcg_target_long mask = 0xffffull << i*16; if ((uval & mask) == uval) { tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16); - return; + return true; } } + return false; +} + +/* load a register with an immediate value */ +static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long sval, bool in_prologue) +{ + tcg_target_ulong uval; + + /* Try all 32-bit insns that can load it in one go. */ + if (maybe_out_small_movi(s, type, ret, sval)) { + return; + } + + uval = sval; + if (type == TCG_TYPE_I32) { + uval = (uint32_t)sval; + sval = (int32_t)sval; + } + /* Try all 48-bit insns that can load it in one go. */ - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { if (sval == (int32_t)sval) { tcg_out_insn(s, RIL, LGFI, ret, sval); return; @@ -682,73 +620,59 @@ static void tcg_out_movi(TCGContext *s, TCGType type, return; } if ((uval & 0xffffffff) == 0) { - tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1); + tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32); return; } } - /* Try for PC-relative address load. */ + /* Try for PC-relative address load. For odd addresses, + attempt to use an offset from the start of the TB. */ if ((sval & 1) == 0) { ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1; if (off == (int32_t)off) { tcg_out_insn(s, RIL, LARL, ret, off); return; } - } - - /* If extended immediates are not present, then we may have to issue - several instructions to load the low 32 bits. */ - if (!(facilities & FACILITY_EXT_IMM)) { - /* A 32-bit unsigned value can be loaded in 2 insns. And given - that the lli_insns loop above did not succeed, we know that - both insns are required. */ - if (uval <= 0xffffffff) { - tcg_out_insn(s, RI, LLILL, ret, uval); - tcg_out_insn(s, RI, IILH, ret, uval >> 16); - return; - } - - /* If all high bits are set, the value can be loaded in 2 or 3 insns. - We first want to make sure that all the high bits get set. With - luck the low 16-bits can be considered negative to perform that for - free, otherwise we load an explicit -1. */ - if (sval >> 31 >> 1 == -1) { - if (uval & 0x8000) { - tcg_out_insn(s, RI, LGHI, ret, uval); - } else { - tcg_out_insn(s, RI, LGHI, ret, -1); - tcg_out_insn(s, RI, IILL, ret, uval); - } - tcg_out_insn(s, RI, IILH, ret, uval >> 16); + } else if (USE_REG_TB && !in_prologue) { + ptrdiff_t off = sval - (uintptr_t)s->code_gen_ptr; + if (off == sextract64(off, 0, 20)) { + /* This is certain to be an address within TB, and therefore + OFF will be negative; don't try RX_LA. */ + tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off); return; } } - /* If we get here, both the high and low parts have non-zero bits. */ + /* A 32-bit unsigned value can be loaded in 2 insns. And given + that LLILL, LLIHL, LLILF above did not succeed, we know that + both insns are required. */ + if (uval <= 0xffffffff) { + tcg_out_insn(s, RI, LLILL, ret, uval); + tcg_out_insn(s, RI, IILH, ret, uval >> 16); + return; + } - /* Recurse to load the lower 32-bits. */ - tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff); - - /* Insert data into the high 32-bits. */ - uval = uval >> 31 >> 1; - if (facilities & FACILITY_EXT_IMM) { - if (uval < 0x10000) { - tcg_out_insn(s, RI, IIHL, ret, uval); - } else if ((uval & 0xffff) == 0) { - tcg_out_insn(s, RI, IIHH, ret, uval >> 16); - } else { - tcg_out_insn(s, RIL, IIHF, ret, uval); - } + /* Otherwise, stuff it in the constant pool. */ + if (s390_facilities & FACILITY_GEN_INST_EXT) { + tcg_out_insn(s, RIL, LGRL, ret, 0); + new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); + } else if (USE_REG_TB && !in_prologue) { + tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0); + new_pool_label(s, sval, R_390_20, s->code_ptr - 2, + -(intptr_t)s->code_gen_ptr); } else { - if (uval & 0xffff) { - tcg_out_insn(s, RI, IIHL, ret, uval); - } - if (uval & 0xffff0000) { - tcg_out_insn(s, RI, IIHH, ret, uval >> 16); - } + TCGReg base = ret ? ret : TCG_TMP0; + tcg_out_insn(s, RIL, LARL, base, 0); + new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); + tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0); } } +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long sval) +{ + tcg_out_movi_int(s, type, ret, sval, false); +} /* Emit a load/store type instruction. Inputs are: DATA: The register to be loaded or stored. @@ -803,12 +727,18 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, } } +static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + return false; +} + /* load data from an absolute host address */ static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs) { intptr_t addr = (intptr_t)abs; - if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { + if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1; if (disp == (int32_t)disp) { if (type == TCG_TYPE_I32) { @@ -819,6 +749,13 @@ static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs) return; } } + if (USE_REG_TB) { + ptrdiff_t disp = abs - (void *)s->code_gen_ptr; + if (disp == sextract64(disp, 0, 20)) { + tcg_out_ld(s, type, dest, TCG_REG_TB, disp); + return; + } + } tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff); tcg_out_ld(s, type, dest, dest, addr & 0xffff); @@ -835,7 +772,7 @@ static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LGBR, dest, src); return; } @@ -855,7 +792,7 @@ static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LLGCR, dest, src); return; } @@ -875,7 +812,7 @@ static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LGHR, dest, src); return; } @@ -895,7 +832,7 @@ static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LLGHR, dest, src); return; } @@ -983,7 +920,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) tgen_ext32u(s, dest, dest); return; } - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { if ((val & valid) == 0xff) { tgen_ext8u(s, TCG_TYPE_I64, dest, dest); return; @@ -1004,7 +941,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } /* Try all 48-bit insns that can perform it in one go. */ - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = ~(0xffffffffull << i*32); if (((val | ~valid) & mask) == mask) { @@ -1013,13 +950,22 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } } } - if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { + if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { tgen_andi_risbg(s, dest, dest, val); return; } - /* Fall back to loading the constant. */ - tcg_out_movi(s, type, TCG_TMP0, val); + /* Use the constant pool if USE_REG_TB, but not for small constants. */ + if (USE_REG_TB) { + if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) { + tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0); + new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2, + -(intptr_t)s->code_gen_ptr); + return; + } + } else { + tcg_out_movi(s, type, TCG_TMP0, val); + } if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, NR, dest, TCG_TMP0); } else { @@ -1027,111 +973,177 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } } -static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val) +static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { static const S390Opcode oi_insns[4] = { RI_OILL, RI_OILH, RI_OIHL, RI_OIHH }; - static const S390Opcode nif_insns[2] = { + static const S390Opcode oif_insns[2] = { RIL_OILF, RIL_OIHF }; int i; /* Look for no-op. */ - if (val == 0) { + if (unlikely(val == 0)) { return; } - if (facilities & FACILITY_EXT_IMM) { - /* Try all 32-bit insns that can perform it in one go. */ - for (i = 0; i < 4; i++) { - tcg_target_ulong mask = (0xffffull << i*16); - if ((val & mask) != 0 && (val & ~mask) == 0) { - tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); - return; - } + /* Try all 32-bit insns that can perform it in one go. */ + for (i = 0; i < 4; i++) { + tcg_target_ulong mask = (0xffffull << i*16); + if ((val & mask) != 0 && (val & ~mask) == 0) { + tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); + return; } + } - /* Try all 48-bit insns that can perform it in one go. */ + /* Try all 48-bit insns that can perform it in one go. */ + if (s390_facilities & FACILITY_EXT_IMM) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = (0xffffffffull << i*32); if ((val & mask) != 0 && (val & ~mask) == 0) { - tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); + tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32); return; } } + } + /* Use the constant pool if USE_REG_TB, but not for small constants. */ + if (maybe_out_small_movi(s, type, TCG_TMP0, val)) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, OR, dest, TCG_TMP0); + } else { + tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0); + } + } else if (USE_REG_TB) { + tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0); + new_pool_label(s, val, R_390_20, s->code_ptr - 2, + -(intptr_t)s->code_gen_ptr); + } else { /* Perform the OR via sequential modifications to the high and low parts. Do this via recursion to handle 16-bit vs 32-bit masks in each half. */ - tgen64_ori(s, dest, val & 0x00000000ffffffffull); - tgen64_ori(s, dest, val & 0xffffffff00000000ull); - } else { - /* With no extended-immediate facility, we don't need to be so - clever. Just iterate over the insns and mask in the constant. */ - for (i = 0; i < 4; i++) { - tcg_target_ulong mask = (0xffffull << i*16); - if ((val & mask) != 0) { - tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); - } - } + tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); + tgen_ori(s, type, dest, val & 0x00000000ffffffffull); + tgen_ori(s, type, dest, val & 0xffffffff00000000ull); } } -static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val) +static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { - /* Perform the xor by parts. */ - if (val & 0xffffffff) { - tcg_out_insn(s, RIL, XILF, dest, val); + /* Try all 48-bit insns that can perform it in one go. */ + if (s390_facilities & FACILITY_EXT_IMM) { + if ((val & 0xffffffff00000000ull) == 0) { + tcg_out_insn(s, RIL, XILF, dest, val); + return; + } + if ((val & 0x00000000ffffffffull) == 0) { + tcg_out_insn(s, RIL, XIHF, dest, val >> 32); + return; + } } - if (val > 0xffffffff) { - tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1); + + /* Use the constant pool if USE_REG_TB, but not for small constants. */ + if (maybe_out_small_movi(s, type, TCG_TMP0, val)) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, XR, dest, TCG_TMP0); + } else { + tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0); + } + } else if (USE_REG_TB) { + tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0); + new_pool_label(s, val, R_390_20, s->code_ptr - 2, + -(intptr_t)s->code_gen_ptr); + } else { + /* Perform the xor by parts. */ + tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); + if (val & 0xffffffff) { + tcg_out_insn(s, RIL, XILF, dest, val); + } + if (val > 0xffffffff) { + tcg_out_insn(s, RIL, XIHF, dest, val >> 32); + } } } static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, - TCGArg c2, int c2const) + TCGArg c2, bool c2const, bool need_carry) { bool is_unsigned = is_unsigned_cond(c); + S390Opcode op; + if (c2const) { if (c2 == 0) { - if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RR, LTR, r1, r1); - } else { - tcg_out_insn(s, RRE, LTGR, r1, r1); - } - return tcg_cond_to_ltr_cond[c]; - } else { - if (is_unsigned) { + if (!(is_unsigned && need_carry)) { if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RIL, CLFI, r1, c2); + tcg_out_insn(s, RR, LTR, r1, r1); } else { - tcg_out_insn(s, RIL, CLGFI, r1, c2); - } - } else { - if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RIL, CFI, r1, c2); - } else { - tcg_out_insn(s, RIL, CGFI, r1, c2); + tcg_out_insn(s, RRE, LTGR, r1, r1); } + return tcg_cond_to_ltr_cond[c]; } } - } else { - if (is_unsigned) { + + if (!is_unsigned && c2 == (int16_t)c2) { + op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI); + tcg_out_insn_RI(s, op, r1, c2); + goto exit; + } + + if (s390_facilities & FACILITY_EXT_IMM) { if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RR, CLR, r1, c2); - } else { - tcg_out_insn(s, RRE, CLGR, r1, c2); + op = (is_unsigned ? RIL_CLFI : RIL_CFI); + tcg_out_insn_RIL(s, op, r1, c2); + goto exit; + } else if (c2 == (is_unsigned ? (uint32_t)c2 : (int32_t)c2)) { + op = (is_unsigned ? RIL_CLGFI : RIL_CGFI); + tcg_out_insn_RIL(s, op, r1, c2); + goto exit; } + } + + /* Use the constant pool, but not for small constants. */ + if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) { + c2 = TCG_TMP0; + /* fall through to reg-reg */ + } else if (USE_REG_TB) { + if (type == TCG_TYPE_I32) { + op = (is_unsigned ? RXY_CLY : RXY_CY); + tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0); + new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2, + 4 - (intptr_t)s->code_gen_ptr); + } else { + op = (is_unsigned ? RXY_CLG : RXY_CG); + tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0); + new_pool_label(s, c2, R_390_20, s->code_ptr - 2, + -(intptr_t)s->code_gen_ptr); + } + goto exit; } else { if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RR, CR, r1, c2); + op = (is_unsigned ? RIL_CLRL : RIL_CRL); + tcg_out_insn_RIL(s, op, r1, 0); + new_pool_label(s, (uint32_t)c2, R_390_PC32DBL, + s->code_ptr - 2, 2 + 4); } else { - tcg_out_insn(s, RRE, CGR, r1, c2); + op = (is_unsigned ? RIL_CLGRL : RIL_CGRL); + tcg_out_insn_RIL(s, op, r1, 0); + new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2); } + goto exit; } } + + if (type == TCG_TYPE_I32) { + op = (is_unsigned ? RR_CLR : RR_CR); + tcg_out_insn_RR(s, op, r1, c2); + } else { + op = (is_unsigned ? RRE_CLGR : RRE_CGR); + tcg_out_insn_RRE(s, op, r1, c2); + } + + exit: return tcg_cond_to_s390_cond[c]; } @@ -1139,46 +1151,68 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg dest, TCGReg c1, TCGArg c2, int c2const) { int cc; + bool have_loc; + /* With LOC2, we can always emit the minimum 3 insns. */ + if (s390_facilities & FACILITY_LOAD_ON_COND2) { + /* Emit: d = 0, d = (cc ? 1 : d). */ + cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); + tcg_out_movi(s, TCG_TYPE_I64, dest, 0); + tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc); + return; + } + + have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0; + + /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */ + restart: switch (cond) { + case TCG_COND_NE: + /* X != 0 is X > 0. */ + if (c2const && c2 == 0) { + cond = TCG_COND_GTU; + } else { + break; + } + /* fallthru */ + case TCG_COND_GTU: case TCG_COND_GT: - do_greater: /* The result of a compare has CC=2 for GT and CC=3 unused. ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */ - tgen_cmp(s, type, cond, c1, c2, c2const); + tgen_cmp(s, type, cond, c1, c2, c2const, true); tcg_out_movi(s, type, dest, 0); tcg_out_insn(s, RRE, ALCGR, dest, dest); return; - case TCG_COND_GEU: - do_geu: - /* We need "real" carry semantics, so use SUBTRACT LOGICAL - instead of COMPARE LOGICAL. This needs an extra move. */ - tcg_out_mov(s, type, TCG_TMP0, c1); - if (c2const) { - tcg_out_movi(s, TCG_TYPE_I64, dest, 0); - if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2); - } else { - tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2); - } + case TCG_COND_EQ: + /* X == 0 is X <= 0. */ + if (c2const && c2 == 0) { + cond = TCG_COND_LEU; } else { - if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RR, SLR, TCG_TMP0, c2); - } else { - tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2); - } - tcg_out_movi(s, TCG_TYPE_I64, dest, 0); + break; } - tcg_out_insn(s, RRE, ALCGR, dest, dest); - return; + /* fallthru */ case TCG_COND_LEU: + case TCG_COND_LE: + /* As above, but we're looking for borrow, or !carry. + The second insn computes d - d - borrow, or -1 for true + and 0 for false. So we must mask to 1 bit afterward. */ + tgen_cmp(s, type, cond, c1, c2, c2const, true); + tcg_out_insn(s, RRE, SLBGR, dest, dest); + tgen_andi(s, type, dest, 1); + return; + + case TCG_COND_GEU: case TCG_COND_LTU: case TCG_COND_LT: - /* Swap operands so that we can use GEU/GTU/GT. */ + case TCG_COND_GE: + /* Swap operands so that we can use LEU/GTU/GT/LE. */ if (c2const) { + if (have_loc) { + break; + } tcg_out_movi(s, type, TCG_TMP0, c2); c2 = c1; c2const = 0; @@ -1188,37 +1222,15 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, c1 = c2; c2 = t; } - if (cond == TCG_COND_LEU) { - goto do_geu; - } cond = tcg_swap_cond(cond); - goto do_greater; - - case TCG_COND_NE: - /* X != 0 is X > 0. */ - if (c2const && c2 == 0) { - cond = TCG_COND_GTU; - goto do_greater; - } - break; - - case TCG_COND_EQ: - /* X == 0 is X <= 0 is 0 >= X. */ - if (c2const && c2 == 0) { - tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0); - c2 = c1; - c2const = 0; - c1 = TCG_TMP0; - goto do_geu; - } - break; + goto restart; default: - break; + g_assert_not_reached(); } - cc = tgen_cmp(s, type, cond, c1, c2, c2const); - if (facilities & FACILITY_LOAD_ON_COND) { + cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); + if (have_loc) { /* Emit: d = 0, t = 1, d = (cc ? t : d). */ tcg_out_movi(s, TCG_TYPE_I64, dest, 0); tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1); @@ -1232,33 +1244,66 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, } static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, - TCGReg c1, TCGArg c2, int c2const, TCGReg r3) + TCGReg c1, TCGArg c2, int c2const, + TCGArg v3, int v3const) { int cc; - if (facilities & FACILITY_LOAD_ON_COND) { - cc = tgen_cmp(s, type, c, c1, c2, c2const); - tcg_out_insn(s, RRF, LOCGR, dest, r3, cc); + if (s390_facilities & FACILITY_LOAD_ON_COND) { + cc = tgen_cmp(s, type, c, c1, c2, c2const, false); + if (v3const) { + tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc); + } else { + tcg_out_insn(s, RRF, LOCGR, dest, v3, cc); + } } else { c = tcg_invert_cond(c); - cc = tgen_cmp(s, type, c, c1, c2, c2const); + cc = tgen_cmp(s, type, c, c1, c2, c2const, false); /* Emit: if (cc) goto over; dest = r3; over: */ tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); - tcg_out_insn(s, RRE, LGR, dest, r3); + tcg_out_insn(s, RRE, LGR, dest, v3); } } -bool tcg_target_deposit_valid(int ofs, int len) +static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1, + TCGArg a2, int a2const) { - return (facilities & FACILITY_GEN_INST_EXT) != 0; + /* Since this sets both R and R+1, we have no choice but to store the + result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */ + QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1); + tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1); + + if (a2const && a2 == 64) { + tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0); + } else { + if (a2const) { + tcg_out_movi(s, TCG_TYPE_I64, dest, a2); + } else { + tcg_out_mov(s, TCG_TYPE_I64, dest, a2); + } + if (s390_facilities & FACILITY_LOAD_ON_COND) { + /* Emit: if (one bit found) dest = r0. */ + tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2); + } else { + /* Emit: if (no one bit found) goto over; dest = r0; over: */ + tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1); + tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0); + } + } } static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src, - int ofs, int len) + int ofs, int len, int z) { int lsb = (63 - ofs); int msb = lsb - (len - 1); - tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0); + tcg_out_risbg(s, dest, src, msb, lsb, ofs, z); +} + +static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src, + int ofs, int len) +{ + tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1); } static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest) @@ -1274,34 +1319,31 @@ static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest) } } -static void tgen_branch(TCGContext *s, int cc, int labelno) +static void tgen_branch(TCGContext *s, int cc, TCGLabel *l) { - TCGLabel* l = &s->labels[labelno]; if (l->has_value) { tgen_gotoi(s, cc, l->u.value_ptr); } else if (USE_LONG_BRANCHES) { tcg_out16(s, RIL_BRCL | (cc << 4)); - tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2); + tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2); s->code_ptr += 2; } else { tcg_out16(s, RI_BRC | (cc << 4)); - tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2); + tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2); s->code_ptr += 1; } } static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc, - TCGReg r1, TCGReg r2, int labelno) + TCGReg r1, TCGReg r2, TCGLabel *l) { - TCGLabel* l = &s->labels[labelno]; - intptr_t off; + intptr_t off = 0; if (l->has_value) { off = l->u.value_ptr - s->code_ptr; + tcg_debug_assert(off == (int16_t)off); } else { - /* We need to keep the offset unchanged for retranslation. */ - off = s->code_ptr[1]; - tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, labelno, -2); + tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2); } tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2); @@ -1310,17 +1352,15 @@ static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc, } static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc, - TCGReg r1, int i2, int labelno) + TCGReg r1, int i2, TCGLabel *l) { - TCGLabel* l = &s->labels[labelno]; - tcg_target_long off; + tcg_target_long off = 0; if (l->has_value) { off = l->u.value_ptr - s->code_ptr; + tcg_debug_assert(off == (int16_t)off); } else { - /* We need to keep the offset unchanged for retranslation. */ - off = s->code_ptr[1]; - tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, labelno, -2); + tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2); } tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc); @@ -1329,11 +1369,11 @@ static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc, } static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, - TCGReg r1, TCGArg c2, int c2const, int labelno) + TCGReg r1, TCGArg c2, int c2const, TCGLabel *l) { int cc; - if (facilities & FACILITY_GEN_INST_EXT) { + if (s390_facilities & FACILITY_GEN_INST_EXT) { bool is_unsigned = is_unsigned_cond(c); bool in_range; S390Opcode opc; @@ -1344,7 +1384,7 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, opc = (type == TCG_TYPE_I32 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ) : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ)); - tgen_compare_branch(s, opc, cc, r1, c2, labelno); + tgen_compare_branch(s, opc, cc, r1, c2, l); return; } @@ -1370,13 +1410,13 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, } } if (in_range) { - tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno); + tgen_compare_imm_branch(s, opc, cc, r1, c2, l); return; } } - cc = tgen_cmp(s, type, c, r1, c2, c2const); - tgen_branch(s, cc, labelno); + cc = tgen_cmp(s, type, c, r1, c2, c2const, false); + tgen_branch(s, cc, l); } static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) @@ -1390,10 +1430,10 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) } } -static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { - switch (opc) { + switch (opc & (MO_SSIZE | MO_BSWAP)) { case MO_UB: tcg_out_insn(s, RXY, LLGC, data, base, index, disp); break; @@ -1449,10 +1489,10 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, } } -static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data, TCGReg base, TCGReg index, int disp) { - switch (opc) { + switch (opc & (MO_SIZE | MO_BSWAP)) { case MO_UB: if (disp >= 0 && disp < 0x1000) { tcg_out_insn(s, RX, STC, data, base, index, disp); @@ -1496,49 +1536,60 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, } #if defined(CONFIG_SOFTMMU) -/* We're expecting to use a 20-bit signed offset on the tlb memory ops. - Using the offset of the second entry in the last tlb table ensures - that we can index all of the elements of the first entry. */ -QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) - > 0x7ffff); +#include "../tcg-ldst.inc.c" + +/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); /* Load and compare a TLB entry, leaving the flags set. Loads the TLB addend into R2. Returns a register with the santitized guest address. */ -static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, int mem_index, bool is_ld) { - TCGMemOp s_bits = opc & MO_SIZE; - uint64_t tlb_mask = TARGET_PAGE_MASK | ((1 << s_bits) - 1); - int ofs; +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); + unsigned s_mask = (1 << s_bits) - 1; + unsigned a_mask = (1 << a_bits) - 1; + int fast_off = TLB_MASK_TABLE_OFS(mem_index); + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); + int table_off = fast_off + offsetof(CPUTLBDescFast, table); + int ofs, a_off; + uint64_t tlb_mask; - if (facilities & FACILITY_GEN_INST_EXT) { - tcg_out_risbg(s, TCG_REG_R2, addr_reg, - 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS, - 63 - CPU_TLB_ENTRY_BITS, - 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1); + tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off); + tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off); + + /* For aligned accesses, we check the first byte and include the alignment + bits within the address. For unaligned access, we check that we don't + cross pages using the address of the last byte of the access. */ + a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); + tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; + if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) { tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); } else { - tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_R3, addr_reg); - tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2, - (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); + tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); } if (is_ld) { - ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); + ofs = offsetof(CPUTLBEntry, addr_read); } else { - ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); + ofs = offsetof(CPUTLBEntry, addr_write); } if (TARGET_LONG_BITS == 32) { - tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); + tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); } else { - tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); + tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs); } - ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend); - tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs); + tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE, + offsetof(CPUTLBEntry, addend)); if (TARGET_LONG_BITS == 32) { tgen_ext32u(s, TCG_REG_R3, addr_reg); @@ -1547,48 +1598,56 @@ static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, return addr_reg; } -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, - TCGReg data, TCGReg addr, int mem_index, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, + TCGReg data, TCGReg addr, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; - label->opc = opc; + label->oi = oi; label->datalo_reg = data; label->addrlo_reg = addr; - label->mem_index = mem_index; label->raddr = raddr; label->label_ptr[0] = label_ptr; } -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; - TCGMemOp opc = lb->opc; + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); - patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2); + if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, + (intptr_t)s->code_ptr, 2)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); if (TARGET_LONG_BITS == 64) { tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); } - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, lb->mem_index); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi); tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr); - tcg_out_call(s, qemu_ld_helpers[opc]); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); + return true; } -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; - TCGMemOp opc = lb->opc; + TCGMemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); - patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2); + if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, + (intptr_t)s->code_ptr, 2)) { + return false; + } tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); if (TARGET_LONG_BITS == 64) { @@ -1610,11 +1669,12 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) default: tcg_abort(); } - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi); tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr); - tcg_out_call(s, qemu_st_helpers[opc]); + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); + return true; } #else static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, @@ -1624,9 +1684,9 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, tgen_ext32u(s, TCG_TMP0, *addr_reg); *addr_reg = TCG_TMP0; } - if (GUEST_BASE < 0x80000) { + if (guest_base < 0x80000) { *index_reg = TCG_REG_NONE; - *disp = GUEST_BASE; + *disp = guest_base; } else { *index_reg = TCG_GUEST_BASE_REG; *disp = 0; @@ -1635,21 +1695,23 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOp opc, int mem_index) + TCGMemOpIdx oi) { + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU + unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; TCGReg base_reg; base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); - label_ptr = s->code_ptr + 1; - tcg_out_insn(s, RI, BRC, S390_CC_NE, 0); + tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); + label_ptr = s->code_ptr; + s->code_ptr += 1; tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); - add_qemu_ldst_label(s, 1, opc, data_reg, addr_reg, mem_index, - s->code_ptr, label_ptr); + add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr); #else TCGReg index_reg; tcg_target_long disp; @@ -1660,21 +1722,23 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, } static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOp opc, int mem_index) + TCGMemOpIdx oi) { + MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU + unsigned mem_index = get_mmuidx(oi); tcg_insn_unit *label_ptr; TCGReg base_reg; base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); - label_ptr = s->code_ptr + 1; - tcg_out_insn(s, RI, BRC, S390_CC_NE, 0); + tcg_out16(s, RI_BRC | (S390_CC_NE << 4)); + label_ptr = s->code_ptr; + s->code_ptr += 1; tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); - add_qemu_ldst_label(s, 0, opc, data_reg, addr_reg, mem_index, - s->code_ptr, label_ptr); + add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr); #else TCGReg index_reg; tcg_target_long disp; @@ -1691,28 +1755,58 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { - S390Opcode op; + S390Opcode op, op2; TCGArg a0, a1, a2; switch (opc) { case INDEX_op_exit_tb: - /* return value */ - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]); - tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr); + /* Reuse the zeroing that exists for goto_ptr. */ + a0 = args[0]; + if (a0 == 0) { + tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0); + tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr); + } break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + a0 = args[0]; + if (s->tb_jmp_insn_offset) { + /* branch displacement must be aligned for atomic patching; + * see if we need to add extra nop before branch + */ + if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) { + tcg_out16(s, NOP); + } + tcg_debug_assert(!USE_REG_TB); tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); s->code_ptr += 2; } else { - /* load address stored at s->tb_next + args[0] */ - tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]); + /* load address stored at s->tb_jmp_target_addr + a0 */ + tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB, + s->tb_jmp_target_addr + a0); /* and go there */ - tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0); + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB); } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + set_jmp_reset_offset(s, a0); + + /* For the unlinked path of goto_tb, we need to reset + TCG_REG_TB to the beginning of this TB. */ + if (USE_REG_TB) { + int ofs = -tcg_current_code_size(s); + assert(ofs == (int16_t)ofs); + tcg_out_insn(s, RI, AGHI, TCG_REG_TB, ofs); + } + break; + + case INDEX_op_goto_ptr: + a0 = args[0]; + if (USE_REG_TB) { + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); + } + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0); break; OP_32_64(ld8u): @@ -1763,7 +1857,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, RI, AHI, a0, a2); break; } - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RIL, AFI, a0, a2); break; } @@ -1780,29 +1874,44 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, if (const_args[2]) { a2 = -a2; goto do_addi_32; + } else if (a0 == a1) { + tcg_out_insn(s, RR, SR, a0, a2); + } else { + tcg_out_insn(s, RRF, SRK, a0, a1, a2); } - tcg_out_insn(s, RR, SR, args[0], args[2]); break; case INDEX_op_and_i32: + a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; if (const_args[2]) { - tgen_andi(s, TCG_TYPE_I32, args[0], args[2]); + tcg_out_mov(s, TCG_TYPE_I32, a0, a1); + tgen_andi(s, TCG_TYPE_I32, a0, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, NR, a0, a2); } else { - tcg_out_insn(s, RR, NR, args[0], args[2]); + tcg_out_insn(s, RRF, NRK, a0, a1, a2); } break; case INDEX_op_or_i32: + a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; if (const_args[2]) { - tgen64_ori(s, args[0], args[2] & 0xffffffff); + tcg_out_mov(s, TCG_TYPE_I32, a0, a1); + tgen_ori(s, TCG_TYPE_I32, a0, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, OR, a0, a2); } else { - tcg_out_insn(s, RR, OR, args[0], args[2]); + tcg_out_insn(s, RRF, ORK, a0, a1, a2); } break; case INDEX_op_xor_i32: + a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; if (const_args[2]) { - tgen64_xori(s, args[0], args[2] & 0xffffffff); - } else { + tcg_out_mov(s, TCG_TYPE_I32, a0, a1); + tgen_xori(s, TCG_TYPE_I32, a0, a2); + } else if (a0 == a1) { tcg_out_insn(s, RR, XR, args[0], args[2]); + } else { + tcg_out_insn(s, RRF, XRK, a0, a1, a2); } break; @@ -1831,18 +1940,31 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_shl_i32: op = RS_SLL; + op2 = RSY_SLLK; do_shift32: - if (const_args[2]) { - tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]); + a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; + if (a0 == a1) { + if (const_args[2]) { + tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2); + } else { + tcg_out_sh32(s, op, a0, a2, 0); + } } else { - tcg_out_sh32(s, op, args[0], args[2], 0); + /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */ + if (const_args[2]) { + tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2); + } else { + tcg_out_sh64(s, op2, a0, a1, a2, 0); + } } break; case INDEX_op_shr_i32: op = RS_SRL; + op2 = RSY_SRLK; goto do_shift32; case INDEX_op_sar_i32: op = RS_SRA; + op2 = RSY_SRAK; goto do_shift32; case INDEX_op_rotl_i32: @@ -1904,12 +2026,12 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_br: - tgen_branch(s, S390_CC_ALWAYS, args[0]); + tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0])); break; case INDEX_op_brcond_i32: tgen_brcond(s, TCG_TYPE_I32, args[2], args[0], - args[1], const_args[1], args[3]); + args[1], const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i32: tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], @@ -1917,17 +2039,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_movcond_i32: tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], - args[2], const_args[2], args[3]); + args[2], const_args[2], args[3], const_args[3]); break; case INDEX_op_qemu_ld_i32: /* ??? Technically we can use a non-extending instruction. */ case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]); + tcg_out_qemu_ld(s, args[0], args[1], args[2]); break; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]); + tcg_out_qemu_st(s, args[0], args[1], args[2]); break; case INDEX_op_ld16s_i64: @@ -1959,7 +2081,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, RI, AGHI, a0, a2); break; } - if (facilities & FACILITY_EXT_IMM) { + if (s390_facilities & FACILITY_EXT_IMM) { if (a2 == (int32_t)a2) { tcg_out_insn(s, RIL, AGFI, a0, a2); break; @@ -1984,30 +2106,44 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, if (const_args[2]) { a2 = -a2; goto do_addi_64; + } else if (a0 == a1) { + tcg_out_insn(s, RRE, SGR, a0, a2); } else { - tcg_out_insn(s, RRE, SGR, args[0], args[2]); + tcg_out_insn(s, RRF, SGRK, a0, a1, a2); } break; case INDEX_op_and_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { + tcg_out_mov(s, TCG_TYPE_I64, a0, a1); tgen_andi(s, TCG_TYPE_I64, args[0], args[2]); - } else { + } else if (a0 == a1) { tcg_out_insn(s, RRE, NGR, args[0], args[2]); + } else { + tcg_out_insn(s, RRF, NGRK, a0, a1, a2); } break; case INDEX_op_or_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { - tgen64_ori(s, args[0], args[2]); + tcg_out_mov(s, TCG_TYPE_I64, a0, a1); + tgen_ori(s, TCG_TYPE_I64, a0, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RRE, OGR, a0, a2); } else { - tcg_out_insn(s, RRE, OGR, args[0], args[2]); + tcg_out_insn(s, RRF, OGRK, a0, a1, a2); } break; case INDEX_op_xor_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; if (const_args[2]) { - tgen64_xori(s, args[0], args[2]); + tcg_out_mov(s, TCG_TYPE_I64, a0, a1); + tgen_xori(s, TCG_TYPE_I64, a0, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RRE, XGR, a0, a2); } else { - tcg_out_insn(s, RRE, XGR, args[0], args[2]); + tcg_out_insn(s, RRF, XGRK, a0, a1, a2); } break; @@ -2086,6 +2222,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_ext16s_i64: tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]); break; + case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: tgen_ext32s(s, args[0], args[1]); break; @@ -2095,6 +2232,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_ext16u_i64: tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]); break; + case INDEX_op_extu_i32_i64: case INDEX_op_ext32u_i64: tgen_ext32u(s, args[0], args[1]); break; @@ -2126,7 +2264,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_brcond_i64: tgen_brcond(s, TCG_TYPE_I64, args[2], args[0], - args[1], const_args[1], args[3]); + args[1], const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i64: tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], @@ -2134,11 +2272,43 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_movcond_i64: tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], - args[2], const_args[2], args[3]); + args[2], const_args[2], args[3], const_args[3]); break; OP_32_64(deposit): - tgen_deposit(s, args[0], args[2], args[3], args[4]); + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[1]) { + tgen_deposit(s, a0, a2, args[3], args[4], 1); + } else { + /* Since we can't support "0Z" as a constraint, we allow a1 in + any register. Fix things up as if a matching constraint. */ + if (a0 != a1) { + TCGType type = (opc == INDEX_op_deposit_i64); + if (a0 == a2) { + tcg_out_mov(s, type, TCG_TMP0, a2); + a2 = TCG_TMP0; + } + tcg_out_mov(s, type, a0, a1); + } + tgen_deposit(s, a0, a2, args[3], args[4], 0); + } + break; + + OP_32_64(extract): + tgen_extract(s, args[0], args[1], args[2], args[3]); + break; + + case INDEX_op_clz_i64: + tgen_clz(s, args[0], args[1], args[2], const_args[2]); + break; + + case INDEX_op_mb: + /* The host memory model is quite strong, we simply need to + serialize the instruction stream. */ + if (args[0] & TCG_MO_ST_LD) { + tcg_out_insn(s, RR, BCR, + s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0); + } break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ @@ -2151,118 +2321,171 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } } -static const TCGTargetOpDef s390_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; + static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } }; + static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } }; + static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; + static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } }; + static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } }; + static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } }; + static const TCGTargetOpDef a2_r + = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } }; + static const TCGTargetOpDef a2_ri + = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } }; + static const TCGTargetOpDef a2_rA + = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } }; - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "r", "r" } }, - { INDEX_op_st16_i32, { "r", "r" } }, - { INDEX_op_st_i32, { "r", "r" } }, + switch (op) { + case INDEX_op_goto_ptr: + return &r; - { INDEX_op_add_i32, { "r", "r", "ri" } }, - { INDEX_op_sub_i32, { "r", "0", "ri" } }, - { INDEX_op_mul_i32, { "r", "0", "rK" } }, + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &r_r; - { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } }, - { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } }, + case INDEX_op_add_i32: + case INDEX_op_add_i64: + return &r_r_ri; + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + case INDEX_op_and_i32: + case INDEX_op_and_i64: + case INDEX_op_or_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); - { INDEX_op_and_i32, { "r", "0", "ri" } }, - { INDEX_op_or_i32, { "r", "0", "rO" } }, - { INDEX_op_xor_i32, { "r", "0", "rX" } }, + case INDEX_op_mul_i32: + /* If we have the general-instruction-extensions, then we have + MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we + have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ + return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI); + case INDEX_op_mul_i64: + return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI); - { INDEX_op_neg_i32, { "r", "r" } }, + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri); - { INDEX_op_shl_i32, { "r", "0", "Ri" } }, - { INDEX_op_shr_i32, { "r", "0", "Ri" } }, - { INDEX_op_sar_i32, { "r", "0", "Ri" } }, + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + return &r_r_ri; - { INDEX_op_rotl_i32, { "r", "r", "Ri" } }, - { INDEX_op_rotr_i32, { "r", "r", "Ri" } }, + case INDEX_op_rotl_i32: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i32: + case INDEX_op_rotr_i64: + return &r_r_ri; - { INDEX_op_ext8s_i32, { "r", "r" } }, - { INDEX_op_ext8u_i32, { "r", "r" } }, - { INDEX_op_ext16s_i32, { "r", "r" } }, - { INDEX_op_ext16u_i32, { "r", "r" } }, + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return &r_ri; - { INDEX_op_bswap16_i32, { "r", "r" } }, - { INDEX_op_bswap32_i32, { "r", "r" } }, + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_neg_i32: + case INDEX_op_neg_i64: + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + return &r_r; - { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } }, - { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } }, + case INDEX_op_clz_i64: + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + return &r_r_ri; - { INDEX_op_brcond_i32, { "r", "rC" } }, - { INDEX_op_setcond_i32, { "r", "r", "rC" } }, - { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } }, - { INDEX_op_deposit_i32, { "r", "0", "r" } }, + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + return &r_L; + case INDEX_op_qemu_st_i64: + case INDEX_op_qemu_st_i32: + return &L_L; - { INDEX_op_qemu_ld_i32, { "r", "L" } }, - { INDEX_op_qemu_ld_i64, { "r", "L" } }, - { INDEX_op_qemu_st_i32, { "L", "L" } }, - { INDEX_op_qemu_st_i64, { "L", "L" } }, + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + { + static const TCGTargetOpDef dep + = { .args_ct_str = { "r", "rZ", "r" } }; + return &dep; + } + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + { + static const TCGTargetOpDef movc + = { .args_ct_str = { "r", "r", "ri", "r", "0" } }; + static const TCGTargetOpDef movc_l + = { .args_ct_str = { "r", "r", "ri", "rI", "0" } }; + return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc); + } + case INDEX_op_div2_i32: + case INDEX_op_div2_i64: + case INDEX_op_divu2_i32: + case INDEX_op_divu2_i64: + { + static const TCGTargetOpDef div2 + = { .args_ct_str = { "b", "a", "0", "1", "r" } }; + return &div2; + } + case INDEX_op_mulu2_i64: + { + static const TCGTargetOpDef mul2 + = { .args_ct_str = { "b", "a", "0", "r" } }; + return &mul2; + } - { INDEX_op_ld8u_i64, { "r", "r" } }, - { INDEX_op_ld8s_i64, { "r", "r" } }, - { INDEX_op_ld16u_i64, { "r", "r" } }, - { INDEX_op_ld16s_i64, { "r", "r" } }, - { INDEX_op_ld32u_i64, { "r", "r" } }, - { INDEX_op_ld32s_i64, { "r", "r" } }, - { INDEX_op_ld_i64, { "r", "r" } }, + case INDEX_op_add2_i32: + case INDEX_op_sub2_i32: + return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r); + case INDEX_op_add2_i64: + case INDEX_op_sub2_i64: + return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r); - { INDEX_op_st8_i64, { "r", "r" } }, - { INDEX_op_st16_i64, { "r", "r" } }, - { INDEX_op_st32_i64, { "r", "r" } }, - { INDEX_op_st_i64, { "r", "r" } }, + default: + break; + } + return NULL; +} - { INDEX_op_add_i64, { "r", "r", "ri" } }, - { INDEX_op_sub_i64, { "r", "0", "ri" } }, - { INDEX_op_mul_i64, { "r", "0", "rK" } }, - - { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } }, - { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } }, - { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } }, - - { INDEX_op_and_i64, { "r", "0", "ri" } }, - { INDEX_op_or_i64, { "r", "0", "rO" } }, - { INDEX_op_xor_i64, { "r", "0", "rX" } }, - - { INDEX_op_neg_i64, { "r", "r" } }, - - { INDEX_op_shl_i64, { "r", "r", "Ri" } }, - { INDEX_op_shr_i64, { "r", "r", "Ri" } }, - { INDEX_op_sar_i64, { "r", "r", "Ri" } }, - - { INDEX_op_rotl_i64, { "r", "r", "Ri" } }, - { INDEX_op_rotr_i64, { "r", "r", "Ri" } }, - - { INDEX_op_ext8s_i64, { "r", "r" } }, - { INDEX_op_ext8u_i64, { "r", "r" } }, - { INDEX_op_ext16s_i64, { "r", "r" } }, - { INDEX_op_ext16u_i64, { "r", "r" } }, - { INDEX_op_ext32s_i64, { "r", "r" } }, - { INDEX_op_ext32u_i64, { "r", "r" } }, - - { INDEX_op_bswap16_i64, { "r", "r" } }, - { INDEX_op_bswap32_i64, { "r", "r" } }, - { INDEX_op_bswap64_i64, { "r", "r" } }, - - { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } }, - { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } }, - - { INDEX_op_brcond_i64, { "r", "rC" } }, - { INDEX_op_setcond_i64, { "r", "r", "rC" } }, - { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } }, - { INDEX_op_deposit_i64, { "r", "0", "r" } }, - - { -1 }, -}; - -static void query_facilities(void) +static void query_s390_facilities(void) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); @@ -2273,7 +2496,7 @@ static void query_facilities(void) register void *r1 __asm__("1"); /* stfle 0(%r1) */ - r1 = &facilities; + r1 = &s390_facilities; asm volatile(".word 0xb2b0,0x1000" : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); } @@ -2281,12 +2504,12 @@ static void query_facilities(void) static void tcg_target_init(TCGContext *s) { - query_facilities(); + query_s390_facilities(); - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); + s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; + s->tcg_target_available_regs[TCG_TYPE_I64] = 0xffff; - tcg_regset_clear(s->tcg_target_call_clobber_regs); + s->tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R0); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R1); tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R2); @@ -2299,13 +2522,14 @@ static void tcg_target_init(TCGContext *s) /* The return register can be considered call-clobbered. */ tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R14); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* XXX many insns can't be used with R0, so we better avoid it for now */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); - - tcg_add_target_add_op_defs(s, s390_op_defs); + if (USE_REG_TB) { + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); + } } #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \ @@ -2324,15 +2548,30 @@ static void tcg_target_qemu_prologue(TCGContext *s) TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET, CPU_TEMP_BUF_NLONGS * sizeof(long)); - if (GUEST_BASE >= 0x80000) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); +#ifndef CONFIG_SOFTMMU + if (guest_base >= 0x80000) { + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } +#endif tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + if (USE_REG_TB) { + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, + tcg_target_call_iarg_regs[1]); + } + /* br %r3 (go to TB) */ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]); + /* + * Return path for goto_ptr. Set return value to 0, a-la exit_tb, + * and fall through to the rest of the epilogue. + */ + s->code_gen_epilogue = s->code_ptr; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0); + + /* TB epilogue */ tb_ret_addr = s->code_ptr; /* lmg %r6,%r15,fs+48(%r15) (restore registers) */ @@ -2343,4 +2582,52 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14); } +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + memset(p, 0x07, count * sizeof(tcg_insn_unit)); +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[18]; +} DebugFrame; + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + #define ELF_HOST_MACHINE EM_S390 + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = 8, /* sleb128 8 */ + .h.cie.return_column = TCG_REG_R14, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x86, 6, /* DW_CFA_offset, %r6, 48 */ + 0x87, 7, /* DW_CFA_offset, %r7, 56 */ + 0x88, 8, /* DW_CFA_offset, %r8, 64 */ + 0x89, 9, /* DW_CFA_offset, %r92, 72 */ + 0x8a, 10, /* DW_CFA_offset, %r10, 80 */ + 0x8b, 11, /* DW_CFA_offset, %r11, 88 */ + 0x8c, 12, /* DW_CFA_offset, %r12, 96 */ + 0x8d, 13, /* DW_CFA_offset, %r13, 104 */ + 0x8e, 14, /* DW_CFA_offset, %r14, 112 */ + } +}; + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/qemu/tcg/sparc/tcg-target.h b/qemu/tcg/sparc/tcg-target.h index b1b2265c..633841eb 100644 --- a/qemu/tcg/sparc/tcg-target.h +++ b/qemu/tcg/sparc/tcg-target.h @@ -21,12 +21,14 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef TCG_TARGET_SPARC -#define TCG_TARGET_SPARC 1 + +#ifndef SPARC_TCG_TARGET_H +#define SPARC_TCG_TARGET_H #define TCG_TARGET_REG_BITS 64 #define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32 #define TCG_TARGET_NB_REGS 32 typedef enum { @@ -108,7 +110,13 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_eqv_i32 0 #define TCG_TARGET_HAS_nand_i32 0 #define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_clz_i32 0 +#define TCG_TARGET_HAS_ctz_i32 0 +#define TCG_TARGET_HAS_ctpop_i32 0 #define TCG_TARGET_HAS_deposit_i32 0 +#define TCG_TARGET_HAS_extract_i32 0 +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_movcond_i32 1 #define TCG_TARGET_HAS_add2_i32 1 #define TCG_TARGET_HAS_sub2_i32 1 @@ -116,8 +124,11 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_muls2_i32 1 #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_direct_jump 1 -#define TCG_TARGET_HAS_trunc_shr_i32 1 +#define TCG_TARGET_HAS_extrl_i64_i32 1 +#define TCG_TARGET_HAS_extrh_i64_i32 1 #define TCG_TARGET_HAS_div_i64 1 #define TCG_TARGET_HAS_rem_i64 0 #define TCG_TARGET_HAS_rot_i64 0 @@ -137,7 +148,13 @@ extern bool use_vis3_instructions; #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_clz_i64 0 +#define TCG_TARGET_HAS_ctz_i64 0 +#define TCG_TARGET_HAS_ctpop_i64 0 #define TCG_TARGET_HAS_deposit_i64 0 +#define TCG_TARGET_HAS_extract_i64 0 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 #define TCG_TARGET_HAS_add2_i64 1 #define TCG_TARGET_HAS_sub2_i64 1 @@ -148,13 +165,9 @@ extern bool use_vis3_instructions; #define TCG_AREG0 TCG_REG_I0 -#ifdef _MSC_VER -#include -static inline void flush_icache_range(uintptr_t start, uintptr_t stop) -{ - FlushInstructionCache(GetCurrentProcess(), (const void*)start, stop-start); -} -#else +#define TCG_TARGET_DEFAULT_MO (0) +#define TCG_TARGET_HAS_MEMORY_BSWAP 1 + static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { uintptr_t p; @@ -162,6 +175,9 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop) __asm__ __volatile__("flush\t%0" : : "r" (p)); } } -#endif + +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); + +#define TCG_TARGET_NEED_POOL_LABELS #endif diff --git a/qemu/tcg/sparc/tcg-target.c b/qemu/tcg/sparc/tcg-target.inc.c similarity index 66% rename from qemu/tcg/sparc/tcg-target.c rename to qemu/tcg/sparc/tcg-target.inc.c index 3fcdcad4..d4bc69d3 100644 --- a/qemu/tcg/sparc/tcg-target.c +++ b/qemu/tcg/sparc/tcg-target.inc.c @@ -22,9 +22,9 @@ * THE SOFTWARE. */ -#include "tcg-be-null.h" +#include "../tcg-pool.inc.c" -#ifndef NDEBUG +#ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%g0", "%g1", @@ -83,12 +83,13 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #define TCG_REG_T1 TCG_REG_G1 #define TCG_REG_T2 TCG_REG_O7 -#ifdef CONFIG_USE_GUEST_BASE +#ifndef CONFIG_SOFTMMU # define TCG_GUEST_BASE_REG TCG_REG_I5 -#else -# define TCG_GUEST_BASE_REG TCG_REG_G0 #endif +#define TCG_REG_TB TCG_REG_I1 +#define USE_REG_TB (sizeof(void *) > 4) + static const int tcg_target_reg_alloc_order[] = { TCG_REG_L0, TCG_REG_L1, @@ -251,6 +252,10 @@ static const int tcg_target_call_oarg_regs[] = { #define STWA (INSN_OP(3) | INSN_OP3(0x14)) #define STXA (INSN_OP(3) | INSN_OP3(0x1e)) +#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13)) + +#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0) + #ifndef ASI_PRIMARY_LITTLE #define ASI_PRIMARY_LITTLE 0x88 #endif @@ -286,57 +291,50 @@ static inline int check_fit_i32(int32_t val, unsigned int bits) # define check_fit_ptr check_fit_i32 #endif -static void patch_reloc(tcg_insn_unit *code_ptr, int type, +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { - uint32_t insn; + uint32_t insn = *code_ptr; + intptr_t pcrel; - assert(addend == 0); - value = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr); + value += addend; + pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr); switch (type) { case R_SPARC_WDISP16: - if (!check_fit_ptr(value >> 2, 16)) { - tcg_abort(); - } - insn = *code_ptr; + assert(check_fit_ptr(pcrel >> 2, 16)); insn &= ~INSN_OFF16(-1); - insn |= INSN_OFF16(value); - *code_ptr = insn; + insn |= INSN_OFF16(pcrel); break; case R_SPARC_WDISP19: - if (!check_fit_ptr(value >> 2, 19)) { - tcg_abort(); - } - insn = *code_ptr; + assert(check_fit_ptr(pcrel >> 2, 19)); insn &= ~INSN_OFF19(-1); - insn |= INSN_OFF19(value); - *code_ptr = insn; + insn |= INSN_OFF19(pcrel); break; default: - tcg_abort(); + g_assert_not_reached(); } + + *code_ptr = insn; + return true; } /* parse target specific constraints */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type) { - const char *ct_str; - - ct_str = *pct_str; - switch (ct_str[0]) { + switch (*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; break; case 'R': ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, ALL_64); + ct->u.regs = ALL_64; break; case 'A': /* qemu_ld/st address constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, - TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff); + ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff; reserve_helpers: tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0); tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1); @@ -344,11 +342,11 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) break; case 's': /* qemu_st data 32-bit constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + ct->u.regs = 0xffffffff; goto reserve_helpers; case 'S': /* qemu_st data 64-bit constraint */ ct->ct |= TCG_CT_REG; - tcg_regset_set32(ct->u.regs, 0, ALL_64); + ct->u.regs = ALL_64; goto reserve_helpers; case 'I': ct->ct |= TCG_CT_CONST_S11; @@ -360,11 +358,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) ct->ct |= TCG_CT_CONST_ZERO; break; default: - return -1; + return NULL; } - ct_str++; - *pct_str = ct_str; - return 0; + return ct_str; } /* test if a constant matches the constraint */ @@ -405,18 +401,19 @@ static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, } static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, - int32_t val2, int val2const, int op) + int32_t val2, int val2const, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); } -static inline void tcg_out_mov(TCGContext *s, TCGType type, +static inline bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret != arg) { tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); } + return true; } static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) @@ -429,10 +426,11 @@ static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } -static void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg ret, tcg_target_long arg) +static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long arg, bool in_prologue) { tcg_target_long hi, lo = (int32_t)arg; + tcg_target_long test, lsb; /* Make sure we test 32-bit constants for imm13 properly. */ if (type == TCG_TYPE_I32) { @@ -445,6 +443,15 @@ static void tcg_out_movi(TCGContext *s, TCGType type, return; } + /* A 13-bit constant relative to the TB. */ + if (!in_prologue && USE_REG_TB) { + test = arg - (uintptr_t)s->code_gen_ptr; + if (check_fit_ptr(test, 13)) { + tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD); + return; + } + } + /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { tcg_out_sethi(s, ret, arg); @@ -461,6 +468,19 @@ static void tcg_out_movi(TCGContext *s, TCGType type, return; } + /* A 21-bit constant, shifted. */ + lsb = ctz64(arg); + test = (tcg_target_long)arg >> lsb; + if (check_fit_tl(test, 13)) { + tcg_out_movi_imm13(s, ret, test); + tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX); + return; + } else if (lsb > 10 && test == extract64(test, 0, 21)) { + tcg_out_sethi(s, ret, test << 10); + tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX); + return; + } + /* A 64-bit constant decomposed into 2 32-bit pieces. */ if (check_fit_i32(lo, 13)) { hi = (arg - lo) >> 32; @@ -476,6 +496,12 @@ static void tcg_out_movi(TCGContext *s, TCGType type, } } +static inline void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) +{ + tcg_out_movi_int(s, type, ret, arg, false); +} + static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, TCGReg a2, int op) { @@ -506,8 +532,23 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); } +static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) +{ + if (val == 0) { + tcg_out_st(s, type, TCG_REG_G0, base, ofs); + return true; + } + return false; +} + static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg) { + intptr_t diff = arg - (uintptr_t)s->code_gen_ptr; + if (USE_REG_TB && check_fit_ptr(diff, 13)) { + tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff); + return; + } tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff); tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff); } @@ -539,7 +580,7 @@ static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, static inline void tcg_out_nop(TCGContext *s) { - tcg_out_sethi(s, TCG_REG_G0, 0); + tcg_out32(s, NOP); } static const uint8_t tcg_cond_to_bcond[] = { @@ -569,17 +610,14 @@ static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19) tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19); } -static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label) +static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l) { - TCGLabel *l = &s->labels[label]; - int off19; + int off19 = 0; if (l->has_value) { off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr)); } else { - /* Make sure to preserve destinations during retranslation. */ - off19 = *s->code_ptr & INSN_OFF19(-1); - tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0); + tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0); } tcg_out_bpcc0(s, scond, flags, off19); } @@ -590,10 +628,10 @@ static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const) } static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, - int32_t arg2, int const_arg2, int label) + int32_t arg2, int const_arg2, TCGLabel *l) { tcg_out_cmp(s, arg1, arg2, const_arg2); - tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label); + tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l); tcg_out_nop(s); } @@ -614,25 +652,22 @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, } static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, - int32_t arg2, int const_arg2, int label) + int32_t arg2, int const_arg2, TCGLabel *l) { /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ if (arg2 == 0 && !is_unsigned_cond(cond)) { - TCGLabel *l = &s->labels[label]; - int off16; + int off16 = 0; if (l->has_value) { off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr)); } else { - /* Make sure to preserve destinations during retranslation. */ - off16 = *s->code_ptr & INSN_OFF16(-1); - tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0); + tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0); } tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) | INSN_COND(tcg_cond_to_rcond[cond]) | off16); } else { tcg_out_cmp(s, arg1, arg2, const_arg2); - tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label); + tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l); } tcg_out_nop(s); } @@ -682,7 +717,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, } c1 = TCG_REG_G0, c2const = 0; cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); - break; + break; case TCG_COND_GTU: case TCG_COND_LEU: @@ -783,16 +818,16 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, } tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); } else if (bh == TCG_REG_G0) { - /* If we have a zero, we can perform the operation in two insns, + /* If we have a zero, we can perform the operation in two insns, with the arithmetic first, and a conditional move into place. */ - if (rh == ah) { + if (rh == ah) { tcg_out_arithi(s, TCG_REG_T2, ah, 1, - is_sub ? ARITH_SUB : ARITH_ADD); + is_sub ? ARITH_SUB : ARITH_ADD); tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); - } else { + } else { tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); - tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); - } + tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); + } } else { /* Otherwise adjust BH as if there is carry into T2 ... */ if (bhconst) { @@ -803,14 +838,15 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, } /* ... smoosh T2 back to original BH if carry is clear ... */ tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); - /* ... and finally perform the arithmetic with the new operand. */ + /* ... and finally perform the arithmetic with the new operand. */ tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); } tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); } -static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest) +static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest, + bool in_prologue) { ptrdiff_t disp = tcg_pcrel_diff(s, dest); @@ -818,21 +854,51 @@ static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest) tcg_out32(s, CALL | (uint32_t)disp >> 2); } else { uintptr_t desti = (uintptr_t)dest; - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff); + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1, + desti & ~0xfff, in_prologue); tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); } } static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) { - tcg_out_call_nodelay(s, dest); + tcg_out_call_nodelay(s, dest, false); tcg_out_nop(s); } +static void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */ + tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); +} + #ifdef CONFIG_SOFTMMU static tcg_insn_unit *qemu_ld_trampoline[16]; static tcg_insn_unit *qemu_st_trampoline[16]; +static void emit_extend(TCGContext *s, TCGReg r, int op) +{ + /* Emit zero extend of 8, 16 or 32 bit data as + * required by the MO_* value op; do nothing for 64 bit. + */ + switch (op & MO_SIZE) { + case MO_8: + tcg_out_arithi(s, r, r, 0xff, ARITH_AND); + break; + case MO_16: + tcg_out_arithi(s, r, r, 16, SHIFT_SLL); + tcg_out_arithi(s, r, r, 16, SHIFT_SRL); + break; + case MO_32: + if (SPARC64) { + tcg_out_arith(s, r, r, 0, SHIFT_SRL); + } + break; + case MO_64: + break; + } +} + static void build_trampolines(TCGContext *s) { static void * const qemu_ld_helpers[16] = { @@ -884,7 +950,7 @@ static void build_trampolines(TCGContext *s) /* Set the env operand. */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); /* Tail call. */ - tcg_out_call_nodelay(s, qemu_ld_helpers[i]); + tcg_out_call_nodelay(s, qemu_ld_helpers[i], true); tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); } @@ -900,6 +966,7 @@ static void build_trampolines(TCGContext *s) qemu_st_trampoline[i] = s->code_ptr; if (SPARC64) { + emit_extend(s, TCG_REG_O2, i); ra = TCG_REG_O4; } else { ra = TCG_REG_O1; @@ -915,9 +982,10 @@ static void build_trampolines(TCGContext *s) tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); ra += 2; } else { + emit_extend(s, ra, i); ra += 1; } - /* Skip the mem_index argument. */ + /* Skip the oi argument. */ ra += 1; } @@ -931,7 +999,7 @@ static void build_trampolines(TCGContext *s) /* Set the env operand. */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); /* Tail call. */ - tcg_out_call_nodelay(s, qemu_st_helpers[i]); + tcg_out_call_nodelay(s, qemu_st_helpers[i], true); tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); } } @@ -957,25 +1025,48 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) | INSN_IMM13(-frame_size)); -#ifdef CONFIG_USE_GUEST_BASE - if (GUEST_BASE != 0) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); +#ifndef CONFIG_SOFTMMU + if (guest_base != 0) { + tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif + /* We choose TCG_REG_TB such that no move is required. */ + if (USE_REG_TB) { + QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); + } + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL); /* delay slot */ tcg_out_nop(s); - /* No epilogue required. We issue ret + restore directly in the TB. */ + /* Epilogue for goto_ptr. */ + s->code_gen_epilogue = s->code_ptr; + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); + /* delay slot */ + tcg_out_movi_imm13(s, TCG_REG_O0, 0); #ifdef CONFIG_SOFTMMU build_trampolines(s); #endif } +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + int i; + for (i = 0; i < count; ++i) { + p[i] = NOP; + } +} + #if defined(CONFIG_SOFTMMU) + +/* We expect to use a 13-bit negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); + /* Perform the TLB load and compare. Inputs: @@ -990,47 +1081,49 @@ static void tcg_target_qemu_prologue(TCGContext *s) is in the returned register, maybe %o0. The TLB addend is in %o1. */ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, - TCGMemOp s_bits, int which) + MemOp opc, int which) { +#ifdef TARGET_ARM + struct uc_struct *uc = s->uc; +#endif + int fast_off = TLB_MASK_TABLE_OFS(mem_index); + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); + int table_off = fast_off + offsetof(CPUTLBDescFast, table); const TCGReg r0 = TCG_REG_O0; const TCGReg r1 = TCG_REG_O1; const TCGReg r2 = TCG_REG_O2; - int tlb_ofs; + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); + tcg_target_long compare_mask; - /* Shift the page number down. */ - tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL); + /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ + tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off); + tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off); - /* Mask out the page offset, except for the required alignment. */ - tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1, - TARGET_PAGE_MASK | ((1 << s_bits) - 1)); + /* Extract the page index, shifted into place for tlb index. */ + tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, + SHIFT_SRL); + tcg_out_arith(s, r2, r2, r0, ARITH_AND); - /* Mask the tlb index. */ - tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND); - - /* Mask page, part 2. */ - tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND); - - /* Shift the tlb index into place. */ - tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL); - - /* Relative to the current ENV. */ - tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD); - - /* Find a base address that can load both tlb comparator and addend. */ - tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]); - if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) { - if (tlb_ofs & ~0x3ff) { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff); - tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD); - } - tlb_ofs &= 0x3ff; - } + /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ + tcg_out_arith(s, r2, r2, r1, ARITH_ADD); /* Load the tlb comparator and the addend. */ - tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which); - tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend)); + tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which); + tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend)); - /* subcc arg0, arg2, %g0 */ + /* Mask out the page offset, except for the required alignment. + We don't support unaligned accesses. */ + if (a_bits < s_bits) { + a_bits = s_bits; + } + compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1); + if (check_fit_tl(compare_mask, 13)) { + tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND); + } else { + tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask); + tcg_out_arith(s, r2, addr, r2, ARITH_AND); + } tcg_out_cmp(s, r0, r2, 0); /* If the guest address must be zero-extended, do so now. */ @@ -1072,15 +1165,16 @@ static const int qemu_st_opc[16] = { }; static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, - TCGMemOp memop, int memi, bool is_64) + TCGMemOpIdx oi, bool is_64) { + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU - TCGMemOp s_bits = memop & MO_SIZE; + unsigned memi = get_mmuidx(oi); TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; - addrz = tcg_out_tlb_load(s, addr, memi, s_bits, + addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_read)); /* The fast path is exactly one insn. Thus we can perform the @@ -1092,7 +1186,8 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop]); + tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, + qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); /* TLB Miss. */ @@ -1101,31 +1196,31 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } - tcg_out_mov(s, TCG_TYPE_REG, param++, addr); + tcg_out_mov(s, TCG_TYPE_REG, param++, addrz); /* We use the helpers to extend SB and SW data, leaving the case of SL needing explicit extending below. */ - if ((memop & ~MO_BSWAP) == MO_SL) { - func = qemu_ld_trampoline[memop & ~MO_SIGN]; + if ((memop & MO_SSIZE) == MO_SL) { + func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)]; } else { - func = qemu_ld_trampoline[memop]; + func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)]; } - assert(func != NULL); - tcg_out_call_nodelay(s, func); + tcg_debug_assert(func != NULL); + tcg_out_call_nodelay(s, func, false); /* delay slot */ - tcg_out_movi(s, TCG_TYPE_I32, param, memi); + tcg_out_movi(s, TCG_TYPE_I32, param, oi); /* Recall that all of the helpers return 64-bit results. Which complicates things for sparcv8plus. */ if (SPARC64) { /* We let the helper sign-extend SB and SW, but leave SL for here. */ - if (is_64 && (memop & ~MO_BSWAP) == MO_SL) { + if (is_64 && (memop & MO_SSIZE) == MO_SL) { tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); } else { tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); } } else { - if (s_bits == MO_64) { + if ((memop & MO_SIZE) == MO_64) { tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX); tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL); tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR); @@ -1146,21 +1241,22 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, addr = TCG_REG_T1; } tcg_out_ldst_rr(s, data, addr, - (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), - qemu_ld_opc[memop]); + (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), + qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); #endif /* CONFIG_SOFTMMU */ } static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, - TCGMemOp memop, int memi) + TCGMemOpIdx oi) { + MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU - TCGMemOp s_bits = memop & MO_SIZE; + unsigned memi = get_mmuidx(oi); TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; - addrz = tcg_out_tlb_load(s, addr, memi, s_bits, + addrz = tcg_out_tlb_load(s, addr, memi, memop, offsetof(CPUTLBEntry, addr_write)); /* The fast path is exactly one insn. Thus we can perform the entire @@ -1170,7 +1266,8 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop]); + tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, + qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); /* TLB Miss. */ @@ -1179,18 +1276,18 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } - tcg_out_mov(s, TCG_TYPE_REG, param++, addr); - if (!SPARC64 && s_bits == MO_64) { + tcg_out_mov(s, TCG_TYPE_REG, param++, addrz); + if (!SPARC64 && (memop & MO_SIZE) == MO_64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, data); - func = qemu_st_trampoline[memop]; - assert(func != NULL); - tcg_out_call_nodelay(s, func); + func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; + tcg_debug_assert(func != NULL); + tcg_out_call_nodelay(s, func, false); /* delay slot */ - tcg_out_movi(s, TCG_TYPE_REG, param, memi); + tcg_out_movi(s, TCG_TYPE_I32, param, oi); *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else @@ -1199,8 +1296,8 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, addr = TCG_REG_T1; } tcg_out_ldst_rr(s, data, addr, - (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), - qemu_st_opc[memop]); + (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), + qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); #endif /* CONFIG_SOFTMMU */ } @@ -1222,28 +1319,70 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, if (check_fit_ptr(a0, 13)) { tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); tcg_out_movi_imm13(s, TCG_REG_O0, a0); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff); - tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); - tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR); + break; + } else if (USE_REG_TB) { + intptr_t tb_diff = a0 - (uintptr_t)s->code_gen_ptr; + if (check_fit_ptr(tb_diff, 13)) { + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); + /* Note that TCG_REG_TB has been unwound to O1. */ + tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD); + break; + } } + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff); + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); + tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR); break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* direct jump method */ - s->tb_jmp_offset[a0] = tcg_current_code_size(s); - /* Make sure to preserve links during retranslation. */ - tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1))); + if (USE_REG_TB) { + /* make sure the patch is 8-byte aligned. */ + if ((intptr_t)s->code_ptr & 4) { + tcg_out_nop(s); + } + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + tcg_out_sethi(s, TCG_REG_T1, 0); + tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR); + tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL); + tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); + } else { + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + tcg_out32(s, CALL); + tcg_out_nop(s); + } } else { /* indirect jump method */ - tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0)); - tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL); + tcg_out_ld_ptr(s, TCG_REG_TB, + (uintptr_t)(s->tb_jmp_target_addr + a0)); + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL); + tcg_out_nop(s); + } + set_jmp_reset_offset(s, a0); + + /* For the unlinked path of goto_tb, we need to reset + TCG_REG_TB to the beginning of this TB. */ + if (USE_REG_TB) { + c = -tcg_current_code_size(s); + if (check_fit_i32(c, 13)) { + tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c); + tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, + TCG_REG_T1, ARITH_ADD); + } + } + break; + case INDEX_op_goto_ptr: + tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); + if (USE_REG_TB) { + tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR); + } else { + tcg_out_nop(s); } - tcg_out_nop(s); - s->tb_next_offset[a0] = tcg_current_code_size(s); break; case INDEX_op_br: - tcg_out_bpcc(s, COND_A, BPCC_PT, a0); + tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0)); tcg_out_nop(s); break; @@ -1315,11 +1454,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, goto gen_arith; OP_32_64(neg): - c = ARITH_SUB; - goto gen_arith1; + c = ARITH_SUB; + goto gen_arith1; OP_32_64(not): - c = ARITH_ORN; - goto gen_arith1; + c = ARITH_ORN; + goto gen_arith1; case INDEX_op_div_i32: tcg_out_div32(s, a0, a1, a2, c2, 0); @@ -1329,7 +1468,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_brcond_i32: - tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], args[3]); + tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i32: tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2); @@ -1365,14 +1504,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_qemu_ld_i32: - tcg_out_qemu_ld(s, a0, a1, a2, args[3], false); + tcg_out_qemu_ld(s, a0, a1, a2, false); break; case INDEX_op_qemu_ld_i64: - tcg_out_qemu_ld(s, a0, a1, a2, args[3], true); + tcg_out_qemu_ld(s, a0, a1, a2, true); break; case INDEX_op_qemu_st_i32: case INDEX_op_qemu_st_i64: - tcg_out_qemu_st(s, a0, a1, a2, args[3]); + tcg_out_qemu_st(s, a0, a1, a2); break; case INDEX_op_ld32s_i64: @@ -1405,22 +1544,23 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_divu_i64: c = ARITH_UDIVX; goto gen_arith; + case INDEX_op_ext_i32_i64: case INDEX_op_ext32s_i64: tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA); break; + case INDEX_op_extu_i32_i64: case INDEX_op_ext32u_i64: tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL); break; - case INDEX_op_trunc_shr_i32: - if (a2 == 0) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - } else { - tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX); - } + case INDEX_op_extrl_i64_i32: + tcg_out_mov(s, TCG_TYPE_I32, a0, a1); + break; + case INDEX_op_extrh_i64_i32: + tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX); break; case INDEX_op_brcond_i64: - tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], args[3]); + tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3])); break; case INDEX_op_setcond_i64: tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2); @@ -1445,8 +1585,12 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; gen_arith1: - tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c); - break; + tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c); + break; + + case INDEX_op_mb: + tcg_out_mb(s, a0); + break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: @@ -1458,96 +1602,148 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } } -static const TCGTargetOpDef sparc_op_defs[] = { - { INDEX_op_exit_tb, { } }, - { INDEX_op_goto_tb, { } }, - { INDEX_op_br, { } }, +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) +{ + static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; + static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; + static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } }; + static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } }; + static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } }; + static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } }; + static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } }; + static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; + static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } }; + static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } }; + static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } }; + static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } }; + static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } }; + static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } }; + static const TCGTargetOpDef r_rZ_rJ + = { .args_ct_str = { "r", "rZ", "rJ" } }; + static const TCGTargetOpDef R_RZ_RJ + = { .args_ct_str = { "R", "RZ", "RJ" } }; + static const TCGTargetOpDef r_r_rZ_rJ + = { .args_ct_str = { "r", "r", "rZ", "rJ" } }; + static const TCGTargetOpDef movc_32 + = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } }; + static const TCGTargetOpDef movc_64 + = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } }; + static const TCGTargetOpDef add2_32 + = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } }; + static const TCGTargetOpDef add2_64 + = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } }; - { INDEX_op_ld8u_i32, { "r", "r" } }, - { INDEX_op_ld8s_i32, { "r", "r" } }, - { INDEX_op_ld16u_i32, { "r", "r" } }, - { INDEX_op_ld16s_i32, { "r", "r" } }, - { INDEX_op_ld_i32, { "r", "r" } }, - { INDEX_op_st8_i32, { "rZ", "r" } }, - { INDEX_op_st16_i32, { "rZ", "r" } }, - { INDEX_op_st_i32, { "rZ", "r" } }, + switch (op) { + case INDEX_op_goto_ptr: + return &r; - { INDEX_op_add_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_mul_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_div_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_divu_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_sub_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_and_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_andc_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_or_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_orc_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_xor_i32, { "r", "rZ", "rJ" } }, + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_neg_i32: + case INDEX_op_not_i32: + return &r_r; - { INDEX_op_shl_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_shr_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_sar_i32, { "r", "rZ", "rJ" } }, + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + return &rZ_r; - { INDEX_op_neg_i32, { "r", "rJ" } }, - { INDEX_op_not_i32, { "r", "rJ" } }, + case INDEX_op_add_i32: + case INDEX_op_mul_i32: + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + case INDEX_op_sub_i32: + case INDEX_op_and_i32: + case INDEX_op_andc_i32: + case INDEX_op_or_i32: + case INDEX_op_orc_i32: + case INDEX_op_xor_i32: + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_setcond_i32: + return &r_rZ_rJ; - { INDEX_op_brcond_i32, { "rZ", "rJ" } }, - { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } }, - { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } }, + case INDEX_op_brcond_i32: + return &rZ_rJ; + case INDEX_op_movcond_i32: + return &movc_32; + case INDEX_op_add2_i32: + case INDEX_op_sub2_i32: + return &add2_32; + case INDEX_op_mulu2_i32: + case INDEX_op_muls2_i32: + return &r_r_rZ_rJ; - { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } }, - { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } }, - { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } }, - { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } }, + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + return &R_r; - { INDEX_op_ld8u_i64, { "R", "r" } }, - { INDEX_op_ld8s_i64, { "R", "r" } }, - { INDEX_op_ld16u_i64, { "R", "r" } }, - { INDEX_op_ld16s_i64, { "R", "r" } }, - { INDEX_op_ld32u_i64, { "R", "r" } }, - { INDEX_op_ld32s_i64, { "R", "r" } }, - { INDEX_op_ld_i64, { "R", "r" } }, - { INDEX_op_st8_i64, { "RZ", "r" } }, - { INDEX_op_st16_i64, { "RZ", "r" } }, - { INDEX_op_st32_i64, { "RZ", "r" } }, - { INDEX_op_st_i64, { "RZ", "r" } }, + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return &RZ_r; - { INDEX_op_add_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_mul_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_div_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_divu_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_sub_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_and_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_andc_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_or_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_orc_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_xor_i64, { "R", "RZ", "RJ" } }, + case INDEX_op_add_i64: + case INDEX_op_mul_i64: + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + case INDEX_op_sub_i64: + case INDEX_op_and_i64: + case INDEX_op_andc_i64: + case INDEX_op_or_i64: + case INDEX_op_orc_i64: + case INDEX_op_xor_i64: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_setcond_i64: + return &R_RZ_RJ; - { INDEX_op_shl_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_shr_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_sar_i64, { "R", "RZ", "RJ" } }, + case INDEX_op_neg_i64: + case INDEX_op_not_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + return &R_R; - { INDEX_op_neg_i64, { "R", "RJ" } }, - { INDEX_op_not_i64, { "R", "RJ" } }, + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + return &r_R; - { INDEX_op_ext32s_i64, { "R", "r" } }, - { INDEX_op_ext32u_i64, { "R", "r" } }, - { INDEX_op_trunc_shr_i32, { "r", "R" } }, + case INDEX_op_brcond_i64: + return &RZ_RJ; + case INDEX_op_movcond_i64: + return &movc_64; + case INDEX_op_add2_i64: + case INDEX_op_sub2_i64: + return &add2_64; + case INDEX_op_muluh_i64: + return &R_R_R; - { INDEX_op_brcond_i64, { "RZ", "RJ" } }, - { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } }, - { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } }, + case INDEX_op_qemu_ld_i32: + return &r_A; + case INDEX_op_qemu_ld_i64: + return &R_A; + case INDEX_op_qemu_st_i32: + return &sZ_A; + case INDEX_op_qemu_st_i64: + return &SZ_A; - { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, - { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, - { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } }, - - { INDEX_op_qemu_ld_i32, { "r", "A" } }, - { INDEX_op_qemu_ld_i64, { "R", "A" } }, - { INDEX_op_qemu_st_i32, { "sZ", "A" } }, - { INDEX_op_qemu_st_i64, { "SZ", "A" } }, - - { -1 }, -}; + default: + return NULL; + } +} static void tcg_target_init(TCGContext *s) { @@ -1560,26 +1756,27 @@ static void tcg_target_init(TCGContext *s) } #endif - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); - tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64); + s->tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; + s->tcg_target_available_regs[TCG_TYPE_I64] = ALL_64; - tcg_regset_set32(s->tcg_target_call_clobber_regs, 0, - (1 << TCG_REG_G1) | - (1 << TCG_REG_G2) | - (1 << TCG_REG_G3) | - (1 << TCG_REG_G4) | - (1 << TCG_REG_G5) | - (1 << TCG_REG_G6) | - (1 << TCG_REG_G7) | - (1 << TCG_REG_O0) | - (1 << TCG_REG_O1) | - (1 << TCG_REG_O2) | - (1 << TCG_REG_O3) | - (1 << TCG_REG_O4) | - (1 << TCG_REG_O5) | - (1 << TCG_REG_O7)); + s->tcg_target_call_clobber_regs = 0; + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G4); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G5); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G6); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_G7); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O4); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O5); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O6); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_O7); - tcg_regset_clear(s->reserved_regs); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */ @@ -1588,8 +1785,6 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ - - tcg_add_target_add_op_defs(s, sparc_op_defs); } #if SPARC64 @@ -1599,15 +1794,78 @@ static void tcg_target_init(TCGContext *s) # define ELF_HOST_FLAGS EF_SPARC_32PLUS #endif -void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[SPARC64 ? 4 : 2]; + uint8_t fde_win_save; + uint8_t fde_ret_save[3]; +} DebugFrame; + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = -sizeof(void *) & 0x7f, + .h.cie.return_column = 15, /* o7 */ + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { +#if SPARC64 + 12, 30, /* DW_CFA_def_cfa i6, 2047 */ + (2047 & 0x7f) | 0x80, (2047 >> 7) +#else + 13, 30 /* DW_CFA_def_cfa_register i6 */ +#endif + }, + .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */ + .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */ +}; + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) { - uint32_t *ptr = (uint32_t *)jmp_addr; - uintptr_t disp = addr - jmp_addr; - - /* We can reach the entire address space for 32-bit. For 64-bit - the code_gen_buffer can't be larger than 2GB. */ - assert(disp == (int32_t)disp); - - *ptr = CALL | (uint32_t)disp >> 2; - flush_icache_range(jmp_addr, jmp_addr + 4); + tcg_register_jit_int(s, buf, buf_size, &debug_frame, sizeof(debug_frame)); +} + +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, + uintptr_t addr) +{ + intptr_t tb_disp = addr - tc_ptr; + intptr_t br_disp = addr - jmp_addr; + tcg_insn_unit i1, i2; + + /* We can reach the entire address space for ILP32. + For LP64, the code_gen_buffer can't be larger than 2GB. */ + tcg_debug_assert(tb_disp == (int32_t)tb_disp); + tcg_debug_assert(br_disp == (int32_t)br_disp); + + if (!USE_REG_TB) { + atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2)); + flush_icache_range(jmp_addr, jmp_addr + 4); + return; + } + + /* This does not exercise the range of the branch, but we do + still need to be able to load the new value of TCG_REG_TB. + But this does still happen quite often. */ + if (check_fit_ptr(tb_disp, 13)) { + /* ba,pt %icc, addr */ + i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A) + | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp)); + i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB) + | INSN_IMM13(tb_disp)); + } else if (tb_disp >= 0) { + i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10); + i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1) + | INSN_IMM13(tb_disp & 0x3ff)); + } else { + i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10); + i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1) + | INSN_IMM13((tb_disp & 0x3ff) | -0x400)); + } + + atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1)); + flush_icache_range(jmp_addr, jmp_addr + 8); } diff --git a/qemu/tcg/tcg-be-ldst.h b/qemu/tcg/tcg-ldst.inc.c similarity index 65% rename from qemu/tcg/tcg-be-ldst.h rename to qemu/tcg/tcg-ldst.inc.c index 429cba24..3b4f86a1 100644 --- a/qemu/tcg/tcg-be-ldst.h +++ b/qemu/tcg/tcg-ldst.inc.c @@ -20,57 +20,48 @@ * THE SOFTWARE. */ -#ifdef CONFIG_SOFTMMU -#define TCG_MAX_QEMU_LDST 640 - typedef struct TCGLabelQemuLdst { bool is_ld; /* qemu_ld: true, qemu_st: false */ - TCGMemOp opc; + TCGMemOpIdx oi; TCGType type; /* result type of a load */ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ - int mem_index; /* soft MMU memory index */ tcg_insn_unit *raddr; /* gen code addr of the next IR of qemu_ld/st IR */ tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ + QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next; } TCGLabelQemuLdst; -typedef struct TCGBackendData { - int nb_ldst_labels; - TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST]; -} TCGBackendData; - - -/* - * Initialize TB backend data at the beginning of the TB. - */ - -static inline void tcg_out_tb_init(TCGContext *s) -{ - s->be->nb_ldst_labels = 0; -} /* * Generate TB finalization at the end of block */ -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); -static void tcg_out_tb_finalize(TCGContext *s) +static int tcg_out_ldst_finalize(TCGContext *s) { - TCGLabelQemuLdst *lb = s->be->ldst_labels; - int i, n = s->be->nb_ldst_labels; + TCGLabelQemuLdst *lb; /* qemu_ld/st slow paths */ - for (i = 0; i < n; i++) { - if (lb[i].is_ld) { - tcg_out_qemu_ld_slow_path(s, lb + i); - } else { - tcg_out_qemu_st_slow_path(s, lb + i); + QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) { + if (lb->is_ld + ? !tcg_out_qemu_ld_slow_path(s, lb) + : !tcg_out_qemu_st_slow_path(s, lb)) { + return -2; + } + + /* Test for (pending) buffer overflow. The assumption is that any + one operation beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + generating code without having to check during generation. */ + if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { + return -1; } } + return 0; } /* @@ -79,13 +70,9 @@ static void tcg_out_tb_finalize(TCGContext *s) static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s) { - TCGBackendData *be = s->be; - int n = be->nb_ldst_labels; + TCGLabelQemuLdst *l = tcg_malloc(s, sizeof(*l)); - assert(n < TCG_MAX_QEMU_LDST); - be->nb_ldst_labels = n + 1; - return &be->ldst_labels[n]; + QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next); + + return l; } -#else -#include "tcg-be-null.h" -#endif /* CONFIG_SOFTMMU */ diff --git a/qemu/tcg/tcg-op-gvec.c b/qemu/tcg/tcg-op-gvec.c new file mode 100644 index 00000000..cab429c4 --- /dev/null +++ b/qemu/tcg/tcg-op-gvec.c @@ -0,0 +1,3254 @@ +/* + * Generic vector operation expansion + * + * Copyright (c) 2018 Linaro + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "tcg/tcg-gvec-desc.h" + +#define MAX_UNROLL 4 + +#ifdef CONFIG_DEBUG_TCG +// static const TCGOpcode vecop_list_empty[1] = { 0 }; +#else +#define vecop_list_empty NULL +#endif + + +/* Verify vector size and alignment rules. OFS should be the OR of all + of the operand offsets so that we can check them all at once. */ +static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs) +{ + uint32_t opr_align = oprsz >= 16 ? 15 : 7; + uint32_t max_align = maxsz >= 16 || oprsz >= 16 ? 15 : 7; + tcg_debug_assert(oprsz > 0); + tcg_debug_assert(oprsz <= maxsz); + tcg_debug_assert((oprsz & opr_align) == 0); + tcg_debug_assert((maxsz & max_align) == 0); + tcg_debug_assert((ofs & max_align) == 0); +} + +/* Verify vector overlap rules for two operands. */ +static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s) +{ + tcg_debug_assert(d == a || d + s <= a || a + s <= d); +} + +/* Verify vector overlap rules for three operands. */ +static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s) +{ + check_overlap_2(d, a, s); + check_overlap_2(d, b, s); + check_overlap_2(a, b, s); +} + +/* Verify vector overlap rules for four operands. */ +static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b, + uint32_t c, uint32_t s) +{ + check_overlap_2(d, a, s); + check_overlap_2(d, b, s); + check_overlap_2(d, c, s); + check_overlap_2(a, b, s); + check_overlap_2(a, c, s); + check_overlap_2(b, c, s); +} + +/* Create a descriptor from components. */ +uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) +{ + uint32_t desc = 0; + + assert(oprsz % 8 == 0 && oprsz <= (8 << SIMD_OPRSZ_BITS)); + assert(maxsz % 8 == 0 && maxsz <= (8 << SIMD_MAXSZ_BITS)); + assert(data == sextract32(data, 0, SIMD_DATA_BITS)); + + oprsz = (oprsz / 8) - 1; + maxsz = (maxsz / 8) - 1; + desc = deposit32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS, oprsz); + desc = deposit32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS, maxsz); + desc = deposit32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS, data); + + return desc; +} + +/* Generate a call to a gvec-style helper with two vector operands. */ +void tcg_gen_gvec_2_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_2 *fn) +{ + TCGv_ptr a0, a1; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + + fn(tcg_ctx, a0, a1, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with two vector operands + and one scalar operand. */ +void tcg_gen_gvec_2i_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, TCGv_i64 c, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_2i *fn) +{ + TCGv_ptr a0, a1; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + + fn(tcg_ctx, a0, a1, c, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with three vector operands. */ +void tcg_gen_gvec_3_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_3 *fn) +{ + TCGv_ptr a0, a1, a2; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + a2 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); + + fn(tcg_ctx, a0, a1, a2, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_ptr(tcg_ctx, a2); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with four vector operands. */ +void tcg_gen_gvec_4_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_4 *fn) +{ + TCGv_ptr a0, a1, a2, a3; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + a2 = tcg_temp_new_ptr(tcg_ctx); + a3 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); + tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); + + fn(tcg_ctx, a0, a1, a2, a3, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_ptr(tcg_ctx, a2); + tcg_temp_free_ptr(tcg_ctx, a3); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with five vector operands. */ +void tcg_gen_gvec_5_ool(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t xofs, uint32_t oprsz, + uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn) +{ + TCGv_ptr a0, a1, a2, a3, a4; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + a2 = tcg_temp_new_ptr(tcg_ctx); + a3 = tcg_temp_new_ptr(tcg_ctx); + a4 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); + tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); + tcg_gen_addi_ptr(tcg_ctx, a4, tcg_ctx->cpu_env, xofs); + + fn(tcg_ctx, a0, a1, a2, a3, a4, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_ptr(tcg_ctx, a2); + tcg_temp_free_ptr(tcg_ctx, a3); + tcg_temp_free_ptr(tcg_ctx, a4); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with three vector operands + and an extra pointer operand. */ +void tcg_gen_gvec_2_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_2_ptr *fn) +{ + TCGv_ptr a0, a1; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + + fn(tcg_ctx, a0, a1, ptr, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with three vector operands + and an extra pointer operand. */ +void tcg_gen_gvec_3_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_3_ptr *fn) +{ + TCGv_ptr a0, a1, a2; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + a2 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); + + fn(tcg_ctx, a0, a1, a2, ptr, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_ptr(tcg_ctx, a2); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with four vector operands + and an extra pointer operand. */ +void tcg_gen_gvec_4_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz, + uint32_t maxsz, int32_t data, + gen_helper_gvec_4_ptr *fn) +{ + TCGv_ptr a0, a1, a2, a3; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + a2 = tcg_temp_new_ptr(tcg_ctx); + a3 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); + tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); + + fn(tcg_ctx, a0, a1, a2, a3, ptr, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_ptr(tcg_ctx, a2); + tcg_temp_free_ptr(tcg_ctx, a3); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Generate a call to a gvec-style helper with five vector operands + and an extra pointer operand. */ +void tcg_gen_gvec_5_ptr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t eofs, TCGv_ptr ptr, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_5_ptr *fn) +{ + TCGv_ptr a0, a1, a2, a3, a4; + TCGv_i32 desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, data)); + + a0 = tcg_temp_new_ptr(tcg_ctx); + a1 = tcg_temp_new_ptr(tcg_ctx); + a2 = tcg_temp_new_ptr(tcg_ctx); + a3 = tcg_temp_new_ptr(tcg_ctx); + a4 = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + tcg_gen_addi_ptr(tcg_ctx, a2, tcg_ctx->cpu_env, bofs); + tcg_gen_addi_ptr(tcg_ctx, a3, tcg_ctx->cpu_env, cofs); + tcg_gen_addi_ptr(tcg_ctx, a4, tcg_ctx->cpu_env, eofs); + + fn(tcg_ctx, a0, a1, a2, a3, a4, ptr, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_ptr(tcg_ctx, a2); + tcg_temp_free_ptr(tcg_ctx, a3); + tcg_temp_free_ptr(tcg_ctx, a4); + tcg_temp_free_i32(tcg_ctx, desc); +} + +/* Return true if we want to implement something of OPRSZ bytes + in units of LNSZ. This limits the expansion of inline code. */ +static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz) +{ + if (oprsz % lnsz == 0) { + uint32_t lnct = oprsz / lnsz; + return lnct >= 1 && lnct <= MAX_UNROLL; + } + return false; +} + +static void expand_clr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t maxsz); + +/* Duplicate C as per VECE. */ +uint64_t dup_const_func(unsigned vece, uint64_t c) +{ + switch (vece) { + case MO_8: + return 0x0101010101010101ull * (uint8_t)c; + case MO_16: + return 0x0001000100010001ull * (uint16_t)c; + case MO_32: + return 0x0000000100000001ull * (uint32_t)c; + case MO_64: + return c; + default: + // g_assert_not_reached(); + return 0; + } +} + +/* Duplicate IN into OUT as per VECE. */ +static void gen_dup_i32(TCGContext *tcg_ctx, unsigned vece, TCGv_i32 out, TCGv_i32 in) +{ + switch (vece) { + case MO_8: + tcg_gen_ext8u_i32(tcg_ctx, out, in); + tcg_gen_muli_i32(tcg_ctx, out, out, 0x01010101); + break; + case MO_16: + tcg_gen_deposit_i32(tcg_ctx, out, in, in, 16, 16); + break; + case MO_32: + tcg_gen_mov_i32(tcg_ctx, out, in); + break; + default: + // g_assert_not_reached(); + break; + } +} + +static void gen_dup_i64(TCGContext *tcg_ctx, unsigned vece, TCGv_i64 out, TCGv_i64 in) +{ + switch (vece) { + case MO_8: + tcg_gen_ext8u_i64(tcg_ctx, out, in); + tcg_gen_muli_i64(tcg_ctx, out, out, 0x0101010101010101ull); + break; + case MO_16: + tcg_gen_ext16u_i64(tcg_ctx, out, in); + tcg_gen_muli_i64(tcg_ctx, out, out, 0x0001000100010001ull); + break; + case MO_32: + tcg_gen_deposit_i64(tcg_ctx, out, in, in, 32, 32); + break; + case MO_64: + tcg_gen_mov_i64(tcg_ctx, out, in); + break; + default: + // g_assert_not_reached(); + break; + } +} + +/* Select a supported vector type for implementing an operation on SIZE + * bytes. If OP is 0, assume that the real operation to be performed is + * required by all backends. Otherwise, make sure than OP can be performed + * on elements of size VECE in the selected type. Do not select V64 if + * PREFER_I64 is true. Return 0 if no vector type is selected. + */ +static TCGType choose_vector_type(TCGContext *tcg_ctx, const TCGOpcode *list, unsigned vece, + uint32_t size, bool prefer_i64) +{ + if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) { + /* + * Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + * It is hard to imagine a case in which v256 is supported + * but v128 is not, but check anyway. + */ + if (tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V256, vece) + && (size % 32 == 0 + || tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V128, vece))) { + return TCG_TYPE_V256; + } + } + if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16) + && tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V128, vece)) { + return TCG_TYPE_V128; + } + if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8) + && tcg_can_emit_vecop_list(tcg_ctx, list, TCG_TYPE_V64, vece)) { + return TCG_TYPE_V64; + } + return 0; +} + +static void do_dup_store(TCGContext *tcg_ctx, TCGType type, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, TCGv_vec t_vec) +{ + uint32_t i = 0; + + switch (type) { + case TCG_TYPE_V256: + /* + * Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + for (; i + 32 <= oprsz; i += 32) { + tcg_gen_stl_vec(tcg_ctx, t_vec, tcg_ctx->cpu_env, dofs + i, TCG_TYPE_V256); + } + /* fallthru */ + case TCG_TYPE_V128: + for (; i + 16 <= oprsz; i += 16) { + tcg_gen_stl_vec(tcg_ctx, t_vec, tcg_ctx->cpu_env, dofs + i, TCG_TYPE_V128); + } + break; + case TCG_TYPE_V64: + for (; i < oprsz; i += 8) { + tcg_gen_stl_vec(tcg_ctx, t_vec, tcg_ctx->cpu_env, dofs + i, TCG_TYPE_V64); + } + break; + default: + g_assert_not_reached(); + } + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C. + * Only one of IN_32 or IN_64 may be set; + * IN_C is used if IN_32 and IN_64 are unset. + */ +static void do_dup(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64, + uint64_t in_c) +{ + TCGType type; + TCGv_i64 t_64; + TCGv_i32 t_32, t_desc; + TCGv_ptr t_ptr; + uint32_t i; + + assert(vece <= (in_32 ? MO_32 : MO_64)); + assert(in_32 == NULL || in_64 == NULL); + + /* If we're storing 0, expand oprsz to maxsz. */ + if (in_32 == NULL && in_64 == NULL) { + in_c = dup_const(vece, in_c); + if (in_c == 0) { + oprsz = maxsz; + } + } + + /* Implement inline with a vector type, if possible. + * Prefer integer when 64-bit host and no variable dup. + */ + type = choose_vector_type(tcg_ctx, NULL, vece, oprsz, + (TCG_TARGET_REG_BITS == 64 && in_32 == NULL + && (in_64 == NULL || vece == MO_64))); + if (type != 0) { + TCGv_vec t_vec = tcg_temp_new_vec(tcg_ctx, type); + + if (in_32) { + tcg_gen_dup_i32_vec(tcg_ctx, vece, t_vec, in_32); + } else if (in_64) { + tcg_gen_dup_i64_vec(tcg_ctx, vece, t_vec, in_64); + } else { + tcg_gen_dupi_vec(tcg_ctx, vece, t_vec, in_c); + } + do_dup_store(tcg_ctx, type, dofs, oprsz, maxsz, t_vec); + tcg_temp_free_vec(tcg_ctx, t_vec); + return; + } + + /* Otherwise, inline with an integer type, unless "large". */ + if (check_size_impl(oprsz, TCG_TARGET_REG_BITS / 8)) { + t_64 = NULL; + t_32 = NULL; + + if (in_32) { + /* We are given a 32-bit variable input. For a 64-bit host, + use a 64-bit operation unless the 32-bit operation would + be simple enough. */ + if (TCG_TARGET_REG_BITS == 64 + && (vece != MO_32 || !check_size_impl(oprsz, 4))) { + t_64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t_64, in_32); + gen_dup_i64(tcg_ctx, vece, t_64, t_64); + } else { + t_32 = tcg_temp_new_i32(tcg_ctx); + gen_dup_i32(tcg_ctx, vece, t_32, in_32); + } + } else if (in_64) { + /* We are given a 64-bit variable input. */ + t_64 = tcg_temp_new_i64(tcg_ctx); + gen_dup_i64(tcg_ctx, vece, t_64, in_64); + } else { + /* We are given a constant input. */ + /* For 64-bit hosts, use 64-bit constants for "simple" constants + or when we'd need too many 32-bit stores, or when a 64-bit + constant is really required. */ + if (vece == MO_64 + || (TCG_TARGET_REG_BITS == 64 + && (in_c == 0 || in_c == -1 + || !check_size_impl(oprsz, 4)))) { + t_64 = tcg_const_i64(tcg_ctx, in_c); + } else { + t_32 = tcg_const_i32(tcg_ctx, in_c); + } + } + + /* Implement inline if we picked an implementation size above. */ + if (t_32) { + for (i = 0; i < oprsz; i += 4) { + tcg_gen_st_i32(tcg_ctx, t_32, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i32(tcg_ctx, t_32); + goto done; + } + if (t_64) { + for (i = 0; i < oprsz; i += 8) { + tcg_gen_st_i64(tcg_ctx, t_64, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i64(tcg_ctx, t_64); + goto done; + } + } + + /* Otherwise implement out of line. */ + t_ptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, t_ptr, tcg_ctx->cpu_env, dofs); + t_desc = tcg_const_i32(tcg_ctx, simd_desc(oprsz, maxsz, 0)); + + if (vece == MO_64) { + if (in_64) { + gen_helper_gvec_dup64(tcg_ctx, t_ptr, t_desc, in_64); + } else { + t_64 = tcg_const_i64(tcg_ctx, in_c); + gen_helper_gvec_dup64(tcg_ctx, t_ptr, t_desc, t_64); + tcg_temp_free_i64(tcg_ctx, t_64); + } + } else { + typedef void dup_fn(TCGContext *, TCGv_ptr, TCGv_i32, TCGv_i32); + static dup_fn * const fns[3] = { + gen_helper_gvec_dup8, + gen_helper_gvec_dup16, + gen_helper_gvec_dup32 + }; + + if (in_32) { + fns[vece](tcg_ctx, t_ptr, t_desc, in_32); + } else { + t_32 = tcg_temp_new_i32(tcg_ctx); + if (in_64) { + tcg_gen_extrl_i64_i32(tcg_ctx, t_32, in_64); + } else if (vece == MO_8) { + tcg_gen_movi_i32(tcg_ctx, t_32, in_c & 0xff); + } else if (vece == MO_16) { + tcg_gen_movi_i32(tcg_ctx, t_32, in_c & 0xffff); + } else { + tcg_gen_movi_i32(tcg_ctx, t_32, in_c); + } + fns[vece](tcg_ctx, t_ptr, t_desc, t_32); + tcg_temp_free_i32(tcg_ctx, t_32); + } + } + + tcg_temp_free_ptr(tcg_ctx, t_ptr); + tcg_temp_free_i32(tcg_ctx, t_desc); + return; + + done: + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* Likewise, but with zero. */ +static void expand_clr(TCGContext *tcg_ctx, uint32_t dofs, uint32_t maxsz) +{ + do_dup(tcg_ctx, MO_8, dofs, maxsz, maxsz, NULL, NULL, 0); +} + +/* Expand OPSZ bytes worth of two-operand operations using i32 elements. */ +static void expand_2_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + void (*fni)(TCGContext *, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + fni(tcg_ctx, t0, t0); + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void expand_2i_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + int32_t c, bool load_dest, + void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, int32_t)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + if (load_dest) { + tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, t1, t0, c); + tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +static void expand_2s_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + TCGv_i32 c, bool scalar_first, + void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + if (scalar_first) { + fni(tcg_ctx, t1, c, t0); + } else { + fni(tcg_ctx, t1, t0, c); + } + tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ +static void expand_3_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, bool load_dest, + void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + if (load_dest) { + tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, t2, t0, t1); + tcg_gen_st_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void expand_3i_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, int32_t c, bool load_dest, + void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, int32_t)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + if (load_dest) { + tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, t2, t0, t1, c); + tcg_gen_st_i32(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); +} + +/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ +static void expand_4_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t oprsz, bool write_aofs, + void (*fni)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, bofs + i); + tcg_gen_ld_i32(tcg_ctx, t3, tcg_ctx->cpu_env, cofs + i); + fni(tcg_ctx, t0, t1, t2, t3); + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + if (write_aofs) { + tcg_gen_st_i32(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); + } + } + tcg_temp_free_i32(tcg_ctx, t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* Expand OPSZ bytes worth of two-operand operations using i64 elements. */ +static void expand_2_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + void (*fni)(TCGContext *, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + fni(tcg_ctx, t0, t0); + tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void expand_2i_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + int64_t c, bool load_dest, + void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, int64_t)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + if (load_dest) { + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, t1, t0, c); + tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void expand_2s_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + TCGv_i64 c, bool scalar_first, + void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + if (scalar_first) { + fni(tcg_ctx, t1, c, t0); + } else { + fni(tcg_ctx, t1, t0, c); + } + tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* Expand OPSZ bytes worth of three-operand operations using i64 elements. */ +static void expand_3_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, bool load_dest, + void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + if (load_dest) { + tcg_gen_ld_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, t2, t0, t1); + tcg_gen_st_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void expand_3i_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, int64_t c, bool load_dest, + void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, int64_t)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + if (load_dest) { + tcg_gen_ld_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, t2, t0, t1, c); + tcg_gen_st_i64(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +/* Expand OPSZ bytes worth of three-operand operations using i64 elements. */ +static void expand_4_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t cofs, uint32_t oprsz, bool write_aofs, + void (*fni)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i64(tcg_ctx, t2, tcg_ctx->cpu_env, bofs + i); + tcg_gen_ld_i64(tcg_ctx, t3, tcg_ctx->cpu_env, cofs + i); + fni(tcg_ctx, t0, t1, t2, t3); + tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + if (write_aofs) { + tcg_gen_st_i64(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); + } + } + tcg_temp_free_i64(tcg_ctx, t3); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* Expand OPSZ bytes worth of two-operand operations using host vectors. */ +static void expand_2_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t tysz, TCGType type, + void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec)) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + fni(tcg_ctx, vece, t0, t0); + tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, t0); +} + +/* Expand OPSZ bytes worth of two-vector operands and an immediate operand + using host vectors. */ +static void expand_2i_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t tysz, TCGType type, + int64_t c, bool load_dest, + void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, int64_t)) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + if (load_dest) { + tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, vece, t1, t0, c); + tcg_gen_st_vec(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, t0); + tcg_temp_free_vec(tcg_ctx, t1); +} + +static void expand_2s_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t tysz, TCGType type, + TCGv_vec c, bool scalar_first, + void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + if (scalar_first) { + fni(tcg_ctx, vece, t1, c, t0); + } else { + fni(tcg_ctx, vece, t1, t0, c); + } + tcg_gen_st_vec(tcg_ctx, t1, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, t0); + tcg_temp_free_vec(tcg_ctx, t1); +} + +/* Expand OPSZ bytes worth of three-operand operations using host vectors. */ +static void expand_3_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, + uint32_t tysz, TCGType type, bool load_dest, + void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + if (load_dest) { + tcg_gen_ld_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, vece, t2, t0, t1); + tcg_gen_st_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, t2); + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t0); +} + +/* + * Expand OPSZ bytes worth of three-vector operands and an immediate operand + * using host vectors. + */ +static void expand_3i_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t tysz, + TCGType type, int64_t c, bool load_dest, + void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec, + int64_t)) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + if (load_dest) { + tcg_gen_ld_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + fni(tcg_ctx, vece, t2, t0, t1, c); + tcg_gen_st_vec(tcg_ctx, t2, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, t0); + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t2); +} + +/* Expand OPSZ bytes worth of four-operand operations using host vectors. */ +static void expand_4_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t cofs, uint32_t oprsz, + uint32_t tysz, TCGType type, bool write_aofs, + void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, + TCGv_vec, TCGv_vec)) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t2 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t3 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_vec(tcg_ctx, t2, tcg_ctx->cpu_env, bofs + i); + tcg_gen_ld_vec(tcg_ctx, t3, tcg_ctx->cpu_env, cofs + i); + fni(tcg_ctx, vece, t0, t1, t2, t3); + tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + if (write_aofs) { + tcg_gen_st_vec(tcg_ctx, t1, tcg_ctx->cpu_env, aofs + i); + } + } + tcg_temp_free_vec(tcg_ctx, t3); + tcg_temp_free_vec(tcg_ctx, t2); + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t0); +} + +/* Expand a vector two-operand operation. */ +void tcg_gen_gvec_2(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) +{ + const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; + const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs); + check_overlap_2(dofs, aofs, maxsz); + + type = 0; + if (g->fniv) { + type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); + } + switch (type) { + case TCG_TYPE_V256: + /* Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_2_vec(tcg_ctx, g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_2_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv); + break; + case TCG_TYPE_V64: + expand_2_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv); + break; + + case 0: + if (g->fni8 && check_size_impl(oprsz, 8)) { + expand_2_i64(tcg_ctx, dofs, aofs, oprsz, g->fni8); + } else if (g->fni4 && check_size_impl(oprsz, 4)) { + expand_2_i32(tcg_ctx, dofs, aofs, oprsz, g->fni4); + } else { + assert(g->fno != NULL); + tcg_gen_gvec_2_ool(tcg_ctx, dofs, aofs, oprsz, maxsz, g->data, g->fno); + oprsz = maxsz; + } + break; + + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* Expand a vector operation with two vectors and an immediate. */ +void tcg_gen_gvec_2i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + uint32_t maxsz, int64_t c, const GVecGen2i *g) +{ + const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; + const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs); + check_overlap_2(dofs, aofs, maxsz); + + type = 0; + if (g->fniv) { + type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); + } + switch (type) { + case TCG_TYPE_V256: + /* Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_2i_vec(tcg_ctx, g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, + c, g->load_dest, g->fniv); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_2i_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, + c, g->load_dest, g->fniv); + break; + case TCG_TYPE_V64: + expand_2i_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, + c, g->load_dest, g->fniv); + break; + + case 0: + if (g->fni8 && check_size_impl(oprsz, 8)) { + expand_2i_i64(tcg_ctx, dofs, aofs, oprsz, c, g->load_dest, g->fni8); + } else if (g->fni4 && check_size_impl(oprsz, 4)) { + expand_2i_i32(tcg_ctx, dofs, aofs, oprsz, c, g->load_dest, g->fni4); + } else { + if (g->fno) { + tcg_gen_gvec_2_ool(tcg_ctx, dofs, aofs, oprsz, maxsz, c, g->fno); + } else { + TCGv_i64 tcg_c = tcg_const_i64(tcg_ctx, c); + tcg_gen_gvec_2i_ool(tcg_ctx, dofs, aofs, tcg_c, oprsz, + maxsz, c, g->fnoi); + tcg_temp_free_i64(tcg_ctx, tcg_c); + } + oprsz = maxsz; + } + break; + + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* Expand a vector operation with two vectors and a scalar. */ +void tcg_gen_gvec_2s(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t oprsz, + uint32_t maxsz, TCGv_i64 c, const GVecGen2s *g) +{ + TCGType type; + + check_size_align(oprsz, maxsz, dofs | aofs); + check_overlap_2(dofs, aofs, maxsz); + + type = 0; + if (g->fniv) { + type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); + } + if (type != 0) { + const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; + const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); + TCGv_vec t_vec = tcg_temp_new_vec(tcg_ctx, type); + uint32_t some; + + tcg_gen_dup_i64_vec(tcg_ctx, g->vece, t_vec, c); + + switch (type) { + case TCG_TYPE_V256: + /* Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_2s_vec(tcg_ctx, g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, + t_vec, g->scalar_first, g->fniv); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + + case TCG_TYPE_V128: + expand_2s_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, + t_vec, g->scalar_first, g->fniv); + break; + + case TCG_TYPE_V64: + expand_2s_vec(tcg_ctx, g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, + t_vec, g->scalar_first, g->fniv); + break; + + default: + g_assert_not_reached(); + } + tcg_temp_free_vec(tcg_ctx, t_vec); + tcg_swap_vecop_list(hold_list); + } else if (g->fni8 && check_size_impl(oprsz, 8)) { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + gen_dup_i64(tcg_ctx, g->vece, t64, c); + expand_2s_i64(tcg_ctx, dofs, aofs, oprsz, t64, g->scalar_first, g->fni8); + tcg_temp_free_i64(tcg_ctx, t64); + } else if (g->fni4 && check_size_impl(oprsz, 4)) { + TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_extrl_i64_i32(tcg_ctx, t32, c); + gen_dup_i32(tcg_ctx, g->vece, t32, t32); + expand_2s_i32(tcg_ctx, dofs, aofs, oprsz, t32, g->scalar_first, g->fni4); + tcg_temp_free_i32(tcg_ctx, t32); + } else { + tcg_gen_gvec_2i_ool(tcg_ctx, dofs, aofs, c, oprsz, maxsz, 0, g->fno); + return; + } + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* Expand a vector three-operand operation. */ +void tcg_gen_gvec_3(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) +{ + const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; + const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs | bofs); + check_overlap_3(dofs, aofs, bofs, maxsz); + + type = 0; + if (g->fniv) { + type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); + } + switch (type) { + case TCG_TYPE_V256: + /* Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_3_vec(tcg_ctx, g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, + g->load_dest, g->fniv); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + bofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_3_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, + g->load_dest, g->fniv); + break; + case TCG_TYPE_V64: + expand_3_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, + g->load_dest, g->fniv); + break; + + case 0: + if (g->fni8 && check_size_impl(oprsz, 8)) { + expand_3_i64(tcg_ctx, dofs, aofs, bofs, oprsz, g->load_dest, g->fni8); + } else if (g->fni4 && check_size_impl(oprsz, 4)) { + expand_3_i32(tcg_ctx, dofs, aofs, bofs, oprsz, g->load_dest, g->fni4); + } else { + assert(g->fno != NULL); + tcg_gen_gvec_3_ool(tcg_ctx, dofs, aofs, bofs, oprsz, + maxsz, g->data, g->fno); + oprsz = maxsz; + } + break; + + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* Expand a vector operation with three vectors and an immediate. */ +void tcg_gen_gvec_3i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, int64_t c, + const GVecGen3i *g) +{ + const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; + const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs | bofs); + check_overlap_3(dofs, aofs, bofs, maxsz); + + type = 0; + if (g->fniv) { + type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); + } + switch (type) { + case TCG_TYPE_V256: + /* + * Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_3i_vec(tcg_ctx, g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, + c, g->load_dest, g->fniv); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + bofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_3i_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, + c, g->load_dest, g->fniv); + break; + case TCG_TYPE_V64: + expand_3i_vec(tcg_ctx, g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, + c, g->load_dest, g->fniv); + break; + + case 0: + if (g->fni8 && check_size_impl(oprsz, 8)) { + expand_3i_i64(tcg_ctx, dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni8); + } else if (g->fni4 && check_size_impl(oprsz, 4)) { + expand_3i_i32(tcg_ctx, dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni4); + } else { + assert(g->fno != NULL); + tcg_gen_gvec_3_ool(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, c, g->fno); + oprsz = maxsz; + } + break; + + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* Expand a vector four-operand operation. */ +void tcg_gen_gvec_4(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g) +{ + const TCGOpcode *this_list = g->opt_opc ? g->opt_opc : vecop_list_empty; + const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); + check_overlap_4(dofs, aofs, bofs, cofs, maxsz); + + type = 0; + if (g->fniv) { + type = choose_vector_type(tcg_ctx, g->opt_opc, g->vece, oprsz, g->prefer_i64); + } + switch (type) { + case TCG_TYPE_V256: + /* Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_4_vec(tcg_ctx, g->vece, dofs, aofs, bofs, cofs, some, + 32, TCG_TYPE_V256, g->write_aofs, g->fniv); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + bofs += some; + cofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_4_vec(tcg_ctx, g->vece, dofs, aofs, bofs, cofs, oprsz, + 16, TCG_TYPE_V128, g->write_aofs, g->fniv); + break; + case TCG_TYPE_V64: + expand_4_vec(tcg_ctx, g->vece, dofs, aofs, bofs, cofs, oprsz, + 8, TCG_TYPE_V64, g->write_aofs, g->fniv); + break; + + case 0: + if (g->fni8 && check_size_impl(oprsz, 8)) { + expand_4_i64(tcg_ctx, dofs, aofs, bofs, cofs, oprsz, + g->write_aofs, g->fni8); + } else if (g->fni4 && check_size_impl(oprsz, 4)) { + expand_4_i32(tcg_ctx, dofs, aofs, bofs, cofs, oprsz, + g->write_aofs, g->fni4); + } else { + assert(g->fno != NULL); + tcg_gen_gvec_4_ool(tcg_ctx, dofs, aofs, bofs, cofs, + oprsz, maxsz, g->data, g->fno); + oprsz = maxsz; + } + break; + + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +/* + * Expand specific vector operations. + */ + +static void vec_mov2(TCGContext *tcg_ctx, unsigned vece, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_mov_vec(tcg_ctx, a, b); +} + +void tcg_gen_gvec_mov(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2 g = { + .fni8 = tcg_gen_mov_i64, + .fniv = vec_mov2, + .fno = gen_helper_gvec_mov, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + if (dofs != aofs) { + tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g); + } else { + check_size_align(oprsz, maxsz, dofs); + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } + } +} + +void tcg_gen_gvec_dup_i32(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, TCGv_i32 in) +{ + check_size_align(oprsz, maxsz, dofs); + tcg_debug_assert(vece <= MO_32); + do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, in, NULL, 0); +} + +void tcg_gen_gvec_dup_i64(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, TCGv_i64 in) +{ + check_size_align(oprsz, maxsz, dofs); + tcg_debug_assert(vece <= MO_64); + do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, NULL, in, 0); +} + +void tcg_gen_gvec_dup_mem(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + check_size_align(oprsz, maxsz, dofs); + if (vece <= MO_64) { + TCGType type = choose_vector_type(tcg_ctx, NULL, vece, oprsz, 0); + if (type != 0) { + TCGv_vec t_vec = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_dup_mem_vec(tcg_ctx, vece, t_vec, tcg_ctx->cpu_env, aofs); + do_dup_store(tcg_ctx, type, dofs, oprsz, maxsz, t_vec); + tcg_temp_free_vec(tcg_ctx, t_vec); + } else if (vece <= MO_32) { + TCGv_i32 in = tcg_temp_new_i32(tcg_ctx); + switch (vece) { + case MO_8: + tcg_gen_ld8u_i32(tcg_ctx, in, tcg_ctx->cpu_env, aofs); + break; + case MO_16: + tcg_gen_ld16u_i32(tcg_ctx, in, tcg_ctx->cpu_env, aofs); + break; + default: + tcg_gen_ld_i32(tcg_ctx, in, tcg_ctx->cpu_env, aofs); + break; + } + do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, in, NULL, 0); + tcg_temp_free_i32(tcg_ctx, in); + } else { + TCGv_i64 in = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, in, tcg_ctx->cpu_env, aofs); + do_dup(tcg_ctx, vece, dofs, oprsz, maxsz, NULL, in, 0); + tcg_temp_free_i64(tcg_ctx, in); + } + } else { + /* 128-bit duplicate. */ + /* ??? Dup to 256-bit vector. */ + int i; + + tcg_debug_assert(vece == 4); + tcg_debug_assert(oprsz >= 16); + if (TCG_TARGET_HAS_v128) { + TCGv_vec in = tcg_temp_new_vec(tcg_ctx, TCG_TYPE_V128); + + tcg_gen_ld_vec(tcg_ctx, in, tcg_ctx->cpu_env, aofs); + for (i = 0; i < oprsz; i += 16) { + tcg_gen_st_vec(tcg_ctx, in, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, in); + } else { + TCGv_i64 in0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 in1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_i64(tcg_ctx, in0, tcg_ctx->cpu_env, aofs); + tcg_gen_ld_i64(tcg_ctx, in1, tcg_ctx->cpu_env, aofs + 8); + for (i = 0; i < oprsz; i += 16) { + tcg_gen_st_i64(tcg_ctx, in0, tcg_ctx->cpu_env, dofs + i); + tcg_gen_st_i64(tcg_ctx, in1, tcg_ctx->cpu_env, dofs + i + 8); + } + tcg_temp_free_i64(tcg_ctx, in0); + tcg_temp_free_i64(tcg_ctx, in1); + } + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } + } +} + +void tcg_gen_gvec_dup64i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, uint64_t x) +{ + check_size_align(oprsz, maxsz, dofs); + do_dup(tcg_ctx, MO_64, dofs, oprsz, maxsz, NULL, NULL, x); +} + +void tcg_gen_gvec_dup32i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, uint32_t x) +{ + check_size_align(oprsz, maxsz, dofs); + do_dup(tcg_ctx, MO_32, dofs, oprsz, maxsz, NULL, NULL, x); +} + +void tcg_gen_gvec_dup16i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, uint16_t x) +{ + check_size_align(oprsz, maxsz, dofs); + do_dup(tcg_ctx, MO_16, dofs, oprsz, maxsz, NULL, NULL, x); +} + +void tcg_gen_gvec_dup8i(TCGContext *tcg_ctx, uint32_t dofs, uint32_t oprsz, + uint32_t maxsz, uint8_t x) +{ + check_size_align(oprsz, maxsz, dofs); + do_dup(tcg_ctx, MO_8, dofs, oprsz, maxsz, NULL, NULL, x); +} + +void tcg_gen_gvec_not(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2 g = { + .fni8 = tcg_gen_not_i64, + .fniv = tcg_gen_not_vec, + .fno = gen_helper_gvec_not, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g); +} + +/* Perform a vector addition using normal addition and a mask. The mask + should be the sign bit of each lane. This 6-operation form is more + efficient than separate additions when there are 4 or more lanes in + the 64-bit operation. */ +static void gen_addv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) +{ + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andc_i64(tcg_ctx, t1, a, m); + tcg_gen_andc_i64(tcg_ctx, t2, b, m); + tcg_gen_xor_i64(tcg_ctx, t3, a, b); + tcg_gen_add_i64(tcg_ctx, d, t1, t2); + tcg_gen_and_i64(tcg_ctx, t3, t3, m); + tcg_gen_xor_i64(tcg_ctx, d, d, t3); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +void tcg_gen_vec_add8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_8, 0x80)); + gen_addv_mask(tcg_ctx, d, a, b, m); + tcg_temp_free_i64(tcg_ctx, m); +} + +void tcg_gen_vec_add16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_16, 0x8000)); + gen_addv_mask(tcg_ctx, d, a, b, m); + tcg_temp_free_i64(tcg_ctx, m); +} + +void tcg_gen_vec_add32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, t1, a, ~0xffffffffull); + tcg_gen_add_i64(tcg_ctx, t2, a, b); + tcg_gen_add_i64(tcg_ctx, t1, t1, b); + tcg_gen_deposit_i64(tcg_ctx, d, t1, t2, 0, 32); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 }; + +void tcg_gen_gvec_add(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g[4] = { + { .fni8 = tcg_gen_vec_add8_i64, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_add8, + .opt_opc = vecop_list_add, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_add16_i64, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_add16, + .opt_opc = vecop_list_add, + .vece = MO_16 }, + { .fni4 = tcg_gen_add_i32, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_add32, + .opt_opc = vecop_list_add, + .vece = MO_32 }, + { .fni8 = tcg_gen_add_i64, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_add64, + .opt_opc = vecop_list_add, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_adds(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2s g[4] = { + { .fni8 = tcg_gen_vec_add8_i64, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_adds8, + .opt_opc = vecop_list_add, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_add16_i64, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_adds16, + .opt_opc = vecop_list_add, + .vece = MO_16 }, + { .fni4 = tcg_gen_add_i32, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_adds32, + .opt_opc = vecop_list_add, + .vece = MO_32 }, + { .fni8 = tcg_gen_add_i64, + .fniv = tcg_gen_add_vec, + .fno = gen_helper_gvec_adds64, + .opt_opc = vecop_list_add, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, c, &g[vece]); +} + +void tcg_gen_gvec_addi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_const_i64(tcg_ctx, c); + tcg_gen_gvec_adds(tcg_ctx, vece, dofs, aofs, tmp, oprsz, maxsz); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static const TCGOpcode vecop_list_sub[] = { INDEX_op_sub_vec, 0 }; + +void tcg_gen_gvec_subs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2s g[4] = { + { .fni8 = tcg_gen_vec_sub8_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_subs8, + .opt_opc = vecop_list_sub, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_sub16_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_subs16, + .opt_opc = vecop_list_sub, + .vece = MO_16 }, + { .fni4 = tcg_gen_sub_i32, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_subs32, + .opt_opc = vecop_list_sub, + .vece = MO_32 }, + { .fni8 = tcg_gen_sub_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_subs64, + .opt_opc = vecop_list_sub, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, c, &g[vece]); +} + +/* Perform a vector subtraction using normal subtraction and a mask. + Compare gen_addv_mask above. */ +static void gen_subv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 m) +{ + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_or_i64(tcg_ctx, t1, a, m); + tcg_gen_andc_i64(tcg_ctx, t2, b, m); + tcg_gen_eqv_i64(tcg_ctx, t3, a, b); + tcg_gen_sub_i64(tcg_ctx, d, t1, t2); + tcg_gen_and_i64(tcg_ctx, t3, t3, m); + tcg_gen_xor_i64(tcg_ctx, d, d, t3); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +void tcg_gen_vec_sub8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_8, 0x80)); + gen_subv_mask(tcg_ctx, d, a, b, m); + tcg_temp_free_i64(tcg_ctx, m); +} + +void tcg_gen_vec_sub16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_16, 0x8000)); + gen_subv_mask(tcg_ctx, d, a, b, m); + tcg_temp_free_i64(tcg_ctx, m); +} + +void tcg_gen_vec_sub32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, t1, b, ~0xffffffffull); + tcg_gen_sub_i64(tcg_ctx, t2, a, b); + tcg_gen_sub_i64(tcg_ctx, t1, a, t1); + tcg_gen_deposit_i64(tcg_ctx, d, t1, t2, 0, 32); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +void tcg_gen_gvec_sub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g[4] = { + { .fni8 = tcg_gen_vec_sub8_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_sub8, + .opt_opc = vecop_list_sub, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_sub16_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_sub16, + .opt_opc = vecop_list_sub, + .vece = MO_16 }, + { .fni4 = tcg_gen_sub_i32, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_sub32, + .opt_opc = vecop_list_sub, + .vece = MO_32 }, + { .fni8 = tcg_gen_sub_i64, + .fniv = tcg_gen_sub_vec, + .fno = gen_helper_gvec_sub64, + .opt_opc = vecop_list_sub, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 }; + +void tcg_gen_gvec_mul(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_mul8, + .opt_opc = vecop_list_mul, + .vece = MO_8 }, + { .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_mul16, + .opt_opc = vecop_list_mul, + .vece = MO_16 }, + { .fni4 = tcg_gen_mul_i32, + .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_mul32, + .opt_opc = vecop_list_mul, + .vece = MO_32 }, + { .fni8 = tcg_gen_mul_i64, + .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_mul64, + .opt_opc = vecop_list_mul, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_muls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2s g[4] = { + { .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_muls8, + .opt_opc = vecop_list_mul, + .vece = MO_8 }, + { .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_muls16, + .opt_opc = vecop_list_mul, + .vece = MO_16 }, + { .fni4 = tcg_gen_mul_i32, + .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_muls32, + .opt_opc = vecop_list_mul, + .vece = MO_32 }, + { .fni8 = tcg_gen_mul_i64, + .fniv = tcg_gen_mul_vec, + .fno = gen_helper_gvec_muls64, + .opt_opc = vecop_list_mul, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, c, &g[vece]); +} + +void tcg_gen_gvec_muli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_const_i64(tcg_ctx, c); + tcg_gen_gvec_muls(tcg_ctx, vece, dofs, aofs, tmp, oprsz, maxsz); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +void tcg_gen_gvec_ssadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_ssadd_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_ssadd_vec, + .fno = gen_helper_gvec_ssadd8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_ssadd_vec, + .fno = gen_helper_gvec_ssadd16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fniv = tcg_gen_ssadd_vec, + .fno = gen_helper_gvec_ssadd32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fniv = tcg_gen_ssadd_vec, + .fno = gen_helper_gvec_ssadd64, + .opt_opc = vecop_list, + .vece = MO_64 }, + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_sssub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_sssub_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_sssub_vec, + .fno = gen_helper_gvec_sssub8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_sssub_vec, + .fno = gen_helper_gvec_sssub16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fniv = tcg_gen_sssub_vec, + .fno = gen_helper_gvec_sssub32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fniv = tcg_gen_sssub_vec, + .fno = gen_helper_gvec_sssub64, + .opt_opc = vecop_list, + .vece = MO_64 }, + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +static void tcg_gen_usadd_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 max = tcg_const_i32(tcg_ctx, -1); + tcg_gen_add_i32(tcg_ctx, d, a, b); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, d, d, a, max, d); + tcg_temp_free_i32(tcg_ctx, max); +} + +static void tcg_gen_usadd_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 max = tcg_const_i64(tcg_ctx, -1); + tcg_gen_add_i64(tcg_ctx, d, a, b); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, d, d, a, max, d); + tcg_temp_free_i64(tcg_ctx, max); +} + +void tcg_gen_gvec_usadd(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_usadd_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_usadd_vec, + .fno = gen_helper_gvec_usadd8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_usadd_vec, + .fno = gen_helper_gvec_usadd16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_usadd_i32, + .fniv = tcg_gen_usadd_vec, + .fno = gen_helper_gvec_usadd32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_usadd_i64, + .fniv = tcg_gen_usadd_vec, + .fno = gen_helper_gvec_usadd64, + .opt_opc = vecop_list, + .vece = MO_64 } + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +static void tcg_gen_ussub_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 min = tcg_const_i32(tcg_ctx, 0); + tcg_gen_sub_i32(tcg_ctx, d, a, b); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, d, a, b, min, d); + tcg_temp_free_i32(tcg_ctx, min); +} + +static void tcg_gen_ussub_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 min = tcg_const_i64(tcg_ctx, 0); + tcg_gen_sub_i64(tcg_ctx, d, a, b); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, d, a, b, min, d); + tcg_temp_free_i64(tcg_ctx, min); +} + +void tcg_gen_gvec_ussub(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_ussub_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_ussub_vec, + .fno = gen_helper_gvec_ussub8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_ussub_vec, + .fno = gen_helper_gvec_ussub16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_ussub_i32, + .fniv = tcg_gen_ussub_vec, + .fno = gen_helper_gvec_ussub32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_ussub_i64, + .fniv = tcg_gen_ussub_vec, + .fno = gen_helper_gvec_ussub64, + .opt_opc = vecop_list, + .vece = MO_64 } + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_smin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_smin_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_smin_vec, + .fno = gen_helper_gvec_smin8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_smin_vec, + .fno = gen_helper_gvec_smin16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_smin_i32, + .fniv = tcg_gen_smin_vec, + .fno = gen_helper_gvec_smin32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_smin_i64, + .fniv = tcg_gen_smin_vec, + .fno = gen_helper_gvec_smin64, + .opt_opc = vecop_list, + .vece = MO_64 } + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_umin(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_umin_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_umin_vec, + .fno = gen_helper_gvec_umin8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_umin_vec, + .fno = gen_helper_gvec_umin16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_umin_i32, + .fniv = tcg_gen_umin_vec, + .fno = gen_helper_gvec_umin32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_umin_i64, + .fniv = tcg_gen_umin_vec, + .fno = gen_helper_gvec_umin64, + .opt_opc = vecop_list, + .vece = MO_64 } + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_smax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_smax_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_smax_vec, + .fno = gen_helper_gvec_smax8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_smax_vec, + .fno = gen_helper_gvec_smax16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_smax_i32, + .fniv = tcg_gen_smax_vec, + .fno = gen_helper_gvec_smax32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_smax_i64, + .fniv = tcg_gen_smax_vec, + .fno = gen_helper_gvec_smax64, + .opt_opc = vecop_list, + .vece = MO_64 } + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_umax(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_umax_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_umax_vec, + .fno = gen_helper_gvec_umax8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_umax_vec, + .fno = gen_helper_gvec_umax16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_umax_i32, + .fniv = tcg_gen_umax_vec, + .fno = gen_helper_gvec_umax32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_umax_i64, + .fniv = tcg_gen_umax_vec, + .fno = gen_helper_gvec_umax64, + .opt_opc = vecop_list, + .vece = MO_64 } + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +/* Perform a vector negation using normal negation and a mask. + Compare gen_subv_mask above. */ +static void gen_negv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b, TCGv_i64 m) +{ + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andc_i64(tcg_ctx, t3, m, b); + tcg_gen_andc_i64(tcg_ctx, t2, b, m); + tcg_gen_sub_i64(tcg_ctx, d, m, t2); + tcg_gen_xor_i64(tcg_ctx, d, d, t3); + + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +void tcg_gen_vec_neg8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) +{ + TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_8, 0x80)); + gen_negv_mask(tcg_ctx, d, b, m); + tcg_temp_free_i64(tcg_ctx, m); +} + +void tcg_gen_vec_neg16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) +{ + TCGv_i64 m = tcg_const_i64(tcg_ctx, dup_const(MO_16, 0x8000)); + gen_negv_mask(tcg_ctx, d, b, m); + tcg_temp_free_i64(tcg_ctx, m); +} + +void tcg_gen_vec_neg32_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) +{ + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, t1, b, ~0xffffffffull); + tcg_gen_neg_i64(tcg_ctx,t2, b); + tcg_gen_neg_i64(tcg_ctx,t1, t1); + tcg_gen_deposit_i64(tcg_ctx, d, t1, t2, 0, 32); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +void tcg_gen_gvec_neg(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_neg_vec, 0 }; + static const GVecGen2 g[4] = { + { .fni8 = tcg_gen_vec_neg8_i64, + .fniv = tcg_gen_neg_vec, + .fno = gen_helper_gvec_neg8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_neg16_i64, + .fniv = tcg_gen_neg_vec, + .fno = gen_helper_gvec_neg16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_neg_i32, + .fniv = tcg_gen_neg_vec, + .fno = gen_helper_gvec_neg32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_neg_i64, + .fniv = tcg_gen_neg_vec, + .fno = gen_helper_gvec_neg64, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g[vece]); +} + +static void gen_absv_mask(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b, unsigned vece) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + int nbit = 8 << vece; + + /* Create -1 for each negative element. */ + tcg_gen_shri_i64(tcg_ctx, t, b, nbit - 1); + tcg_gen_andi_i64(tcg_ctx, t, t, dup_const(vece, 1)); + tcg_gen_muli_i64(tcg_ctx, t, t, (1 << nbit) - 1); + + /* + * Invert (via xor -1) and add one (via sub -1). + * Because of the ordering the msb is cleared, + * so we never have carry into the next element. + */ + tcg_gen_xor_i64(tcg_ctx, d, b, t); + tcg_gen_sub_i64(tcg_ctx, d, d, t); + + tcg_temp_free_i64(tcg_ctx, t); +} + +static void tcg_gen_vec_abs8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) +{ + gen_absv_mask(tcg_ctx, d, b, MO_8); +} + +static void tcg_gen_vec_abs16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 b) +{ + gen_absv_mask(tcg_ctx, d, b, MO_16); +} + +void tcg_gen_gvec_abs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_abs_vec, 0 }; + static const GVecGen2 g[4] = { + { .fni8 = tcg_gen_vec_abs8_i64, + .fniv = tcg_gen_abs_vec, + .fno = gen_helper_gvec_abs8, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_abs16_i64, + .fniv = tcg_gen_abs_vec, + .fno = gen_helper_gvec_abs16, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_abs_i32, + .fniv = tcg_gen_abs_vec, + .fno = gen_helper_gvec_abs32, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_abs_i64, + .fniv = tcg_gen_abs_vec, + .fno = gen_helper_gvec_abs64, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_2(tcg_ctx, dofs, aofs, oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_and(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_and_i64, + .fniv = tcg_gen_and_vec, + .fno = gen_helper_gvec_and, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +void tcg_gen_gvec_or(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_or_i64, + .fniv = tcg_gen_or_vec, + .fno = gen_helper_gvec_or, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +void tcg_gen_gvec_xor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_xor_i64, + .fniv = tcg_gen_xor_vec, + .fno = gen_helper_gvec_xor, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, 0); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +void tcg_gen_gvec_andc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_andc_i64, + .fniv = tcg_gen_andc_vec, + .fno = gen_helper_gvec_andc, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, 0); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +void tcg_gen_gvec_orc(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_orc_i64, + .fniv = tcg_gen_orc_vec, + .fno = gen_helper_gvec_orc, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, -1); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +void tcg_gen_gvec_nand(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_nand_i64, + .fniv = tcg_gen_nand_vec, + .fno = gen_helper_gvec_nand, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_not(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +void tcg_gen_gvec_nor(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_nor_i64, + .fniv = tcg_gen_nor_vec, + .fno = gen_helper_gvec_nor, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_not(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +void tcg_gen_gvec_eqv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen3 g = { + .fni8 = tcg_gen_eqv_i64, + .fniv = tcg_gen_eqv_vec, + .fno = gen_helper_gvec_eqv, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + }; + + if (aofs == bofs) { + tcg_gen_gvec_dup8i(tcg_ctx, dofs, oprsz, maxsz, -1); + } else { + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g); + } +} + +static const GVecGen2s gop_ands = { + .fni8 = tcg_gen_and_i64, + .fniv = tcg_gen_and_vec, + .fno = gen_helper_gvec_ands, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 +}; + +void tcg_gen_gvec_ands(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + gen_dup_i64(tcg_ctx, vece, tmp, c); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ands); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +void tcg_gen_gvec_andi(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_const_i64(tcg_ctx, dup_const(vece, c)); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ands); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static const GVecGen2s gop_xors = { + .fni8 = tcg_gen_xor_i64, + .fniv = tcg_gen_xor_vec, + .fno = gen_helper_gvec_xors, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 +}; + +void tcg_gen_gvec_xors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + gen_dup_i64(tcg_ctx, vece, tmp, c); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_xors); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +void tcg_gen_gvec_xori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_const_i64(tcg_ctx, dup_const(vece, c)); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_xors); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static const GVecGen2s gop_ors = { + .fni8 = tcg_gen_or_i64, + .fniv = tcg_gen_or_vec, + .fno = gen_helper_gvec_ors, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 +}; + +void tcg_gen_gvec_ors(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + gen_dup_i64(tcg_ctx, vece, tmp, c); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ors); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +void tcg_gen_gvec_ori(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_const_i64(tcg_ctx, dup_const(vece, c)); + tcg_gen_gvec_2s(tcg_ctx, dofs, aofs, oprsz, maxsz, tmp, &gop_ors); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +void tcg_gen_vec_shl8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) +{ + uint64_t mask = dup_const(MO_8, 0xff << c); + tcg_gen_shli_i64(tcg_ctx, d, a, c); + tcg_gen_andi_i64(tcg_ctx, d, d, mask); +} + +void tcg_gen_vec_shl16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) +{ + uint64_t mask = dup_const(MO_16, 0xffff << c); + tcg_gen_shli_i64(tcg_ctx, d, a, c); + tcg_gen_andi_i64(tcg_ctx, d, d, mask); +} + +void tcg_gen_gvec_shli(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 }; + static const GVecGen2i g[4] = { + { .fni8 = tcg_gen_vec_shl8i_i64, + .fniv = tcg_gen_shli_vec, + .fno = gen_helper_gvec_shl8i, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_shl16i_i64, + .fniv = tcg_gen_shli_vec, + .fno = gen_helper_gvec_shl16i, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_shli_i32, + .fniv = tcg_gen_shli_vec, + .fno = gen_helper_gvec_shl32i, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_shli_i64, + .fniv = tcg_gen_shli_vec, + .fno = gen_helper_gvec_shl64i, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_debug_assert(shift >= 0 && shift < (8 << vece)); + if (shift == 0) { + tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); + } else { + tcg_gen_gvec_2i(tcg_ctx, dofs, aofs, oprsz, maxsz, shift, &g[vece]); + } +} + +void tcg_gen_vec_shr8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) +{ + uint64_t mask = dup_const(MO_8, 0xff >> c); + tcg_gen_shri_i64(tcg_ctx, d, a, c); + tcg_gen_andi_i64(tcg_ctx, d, d, mask); +} + +void tcg_gen_vec_shr16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) +{ + uint64_t mask = dup_const(MO_16, 0xffff >> c); + tcg_gen_shri_i64(tcg_ctx, d, a, c); + tcg_gen_andi_i64(tcg_ctx, d, d, mask); +} + +void tcg_gen_gvec_shri(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 }; + static const GVecGen2i g[4] = { + { .fni8 = tcg_gen_vec_shr8i_i64, + .fniv = tcg_gen_shri_vec, + .fno = gen_helper_gvec_shr8i, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_shr16i_i64, + .fniv = tcg_gen_shri_vec, + .fno = gen_helper_gvec_shr16i, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_shri_i32, + .fniv = tcg_gen_shri_vec, + .fno = gen_helper_gvec_shr32i, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_shri_i64, + .fniv = tcg_gen_shri_vec, + .fno = gen_helper_gvec_shr64i, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_debug_assert(shift >= 0 && shift < (8 << vece)); + if (shift == 0) { + tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); + } else { + tcg_gen_gvec_2i(tcg_ctx, dofs, aofs, oprsz, maxsz, shift, &g[vece]); + } +} + +void tcg_gen_vec_sar8i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) +{ + uint64_t s_mask = dup_const(MO_8, 0x80 >> c); + uint64_t c_mask = dup_const(MO_8, 0xff >> c); + TCGv_i64 s = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shri_i64(tcg_ctx, d, a, c); + tcg_gen_andi_i64(tcg_ctx, s, d, s_mask); /* isolate (shifted) sign bit */ + tcg_gen_muli_i64(tcg_ctx, s, s, (2 << c) - 2); /* replicate isolated signs */ + tcg_gen_andi_i64(tcg_ctx, d, d, c_mask); /* clear out bits above sign */ + tcg_gen_or_i64(tcg_ctx, d, d, s); /* include sign extension */ + tcg_temp_free_i64(tcg_ctx, s); +} + +void tcg_gen_vec_sar16i_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, int64_t c) +{ + uint64_t s_mask = dup_const(MO_16, 0x8000 >> c); + uint64_t c_mask = dup_const(MO_16, 0xffff >> c); + TCGv_i64 s = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shri_i64(tcg_ctx, d, a, c); + tcg_gen_andi_i64(tcg_ctx, s, d, s_mask); /* isolate (shifted) sign bit */ + tcg_gen_andi_i64(tcg_ctx, d, d, c_mask); /* clear out bits above sign */ + tcg_gen_muli_i64(tcg_ctx, s, s, (2 << c) - 2); /* replicate isolated signs */ + tcg_gen_or_i64(tcg_ctx, d, d, s); /* include sign extension */ + tcg_temp_free_i64(tcg_ctx, s); +} + +void tcg_gen_gvec_sari(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t shift, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_sari_vec, 0 }; + static const GVecGen2i g[4] = { + { .fni8 = tcg_gen_vec_sar8i_i64, + .fniv = tcg_gen_sari_vec, + .fno = gen_helper_gvec_sar8i, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fni8 = tcg_gen_vec_sar16i_i64, + .fniv = tcg_gen_sari_vec, + .fno = gen_helper_gvec_sar16i, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_sari_i32, + .fniv = tcg_gen_sari_vec, + .fno = gen_helper_gvec_sar32i, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_sari_i64, + .fniv = tcg_gen_sari_vec, + .fno = gen_helper_gvec_sar64i, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_debug_assert(shift >= 0 && shift < (8 << vece)); + if (shift == 0) { + tcg_gen_gvec_mov(tcg_ctx, vece, dofs, aofs, oprsz, maxsz); + } else { + tcg_gen_gvec_2i(tcg_ctx, dofs, aofs, oprsz, maxsz, shift, &g[vece]); + } +} + +/* + * Specialized generation vector shifts by a non-constant scalar. + */ + +typedef struct { + void (*fni4)(TCGContext *, TCGv_i32, TCGv_i32, TCGv_i32); + void (*fni8)(TCGContext *, TCGv_i64, TCGv_i64, TCGv_i64); + void (*fniv_s)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_i32); + void (*fniv_v)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_vec); + gen_helper_gvec_2 *fno[4]; + TCGOpcode s_list[2]; + TCGOpcode v_list[2]; +} GVecGen2sh; + +static void expand_2sh_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t tysz, TCGType type, + TCGv_i32 shift, + void (*fni)(TCGContext *, unsigned, TCGv_vec, TCGv_vec, TCGv_i32)) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + fni(tcg_ctx, vece, t0, t0, shift); + tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, t0); +} + +static void +do_gvec_shifts(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, + uint32_t oprsz, uint32_t maxsz, const GVecGen2sh *g) +{ + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs); + check_overlap_2(dofs, aofs, maxsz); + + /* If the backend has a scalar expansion, great. */ + type = choose_vector_type(tcg_ctx, g->s_list, vece, oprsz, vece == MO_64); + if (type) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + switch (type) { + case TCG_TYPE_V256: + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_2sh_vec(tcg_ctx, vece, dofs, aofs, some, 32, + TCG_TYPE_V256, shift, g->fniv_s); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_2sh_vec(tcg_ctx, vece, dofs, aofs, oprsz, 16, + TCG_TYPE_V128, shift, g->fniv_s); + break; + case TCG_TYPE_V64: + expand_2sh_vec(tcg_ctx, vece, dofs, aofs, oprsz, 8, + TCG_TYPE_V64, shift, g->fniv_s); + break; + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + goto clear_tail; + } + + /* If the backend supports variable vector shifts, also cool. */ + type = choose_vector_type(tcg_ctx, g->v_list, vece, oprsz, vece == MO_64); + if (type) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + TCGv_vec v_shift = tcg_temp_new_vec(tcg_ctx, type); + + if (vece == MO_64) { + TCGv_i64 sh64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, sh64, shift); + tcg_gen_dup_i64_vec(tcg_ctx, MO_64, v_shift, sh64); + tcg_temp_free_i64(tcg_ctx, sh64); + } else { + tcg_gen_dup_i32_vec(tcg_ctx, vece, v_shift, shift); + } + + switch (type) { + case TCG_TYPE_V256: + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_2s_vec(tcg_ctx, vece, dofs, aofs, some, 32, TCG_TYPE_V256, + v_shift, false, g->fniv_v); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_2s_vec(tcg_ctx, vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, + v_shift, false, g->fniv_v); + break; + case TCG_TYPE_V64: + expand_2s_vec(tcg_ctx, vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, + v_shift, false, g->fniv_v); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_vec(tcg_ctx, v_shift); + tcg_swap_vecop_list(hold_list); + goto clear_tail; + } + + /* Otherwise fall back to integral... */ + if (vece == MO_32 && check_size_impl(oprsz, 4)) { + expand_2s_i32(tcg_ctx, dofs, aofs, oprsz, shift, false, g->fni4); + } else if (vece == MO_64 && check_size_impl(oprsz, 8)) { + TCGv_i64 sh64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, sh64, shift); + expand_2s_i64(tcg_ctx, dofs, aofs, oprsz, sh64, false, g->fni8); + tcg_temp_free_i64(tcg_ctx, sh64); + } else { + TCGv_ptr a0 = tcg_temp_new_ptr(tcg_ctx); + TCGv_ptr a1 = tcg_temp_new_ptr(tcg_ctx); + TCGv_i32 desc = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_shli_i32(tcg_ctx, desc, shift, SIMD_DATA_SHIFT); + tcg_gen_ori_i32(tcg_ctx, desc, desc, simd_desc(oprsz, maxsz, 0)); + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, dofs); + tcg_gen_addi_ptr(tcg_ctx, a1, tcg_ctx->cpu_env, aofs); + + g->fno[vece](tcg_ctx, a0, a1, desc); + + tcg_temp_free_ptr(tcg_ctx, a0); + tcg_temp_free_ptr(tcg_ctx, a1); + tcg_temp_free_i32(tcg_ctx, desc); + return; + } + + clear_tail: + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +void tcg_gen_gvec_shls(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2sh g = { + .fni4 = tcg_gen_shl_i32, + .fni8 = tcg_gen_shl_i64, + .fniv_s = tcg_gen_shls_vec, + .fniv_v = tcg_gen_shlv_vec, + .fno = { + gen_helper_gvec_shl8i, + gen_helper_gvec_shl16i, + gen_helper_gvec_shl32i, + gen_helper_gvec_shl64i, + }, + .s_list = { INDEX_op_shls_vec, 0 }, + .v_list = { INDEX_op_shlv_vec, 0 }, + }; + + tcg_debug_assert(vece <= MO_64); + do_gvec_shifts(tcg_ctx, vece, dofs, aofs, shift, oprsz, maxsz, &g); +} + +void tcg_gen_gvec_shrs(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2sh g = { + .fni4 = tcg_gen_shr_i32, + .fni8 = tcg_gen_shr_i64, + .fniv_s = tcg_gen_shrs_vec, + .fniv_v = tcg_gen_shrv_vec, + .fno = { + gen_helper_gvec_shr8i, + gen_helper_gvec_shr16i, + gen_helper_gvec_shr32i, + gen_helper_gvec_shr64i, + }, + .s_list = { INDEX_op_shrs_vec, 0 }, + .v_list = { INDEX_op_shrv_vec, 0 }, + }; + + tcg_debug_assert(vece <= MO_64); + do_gvec_shifts(tcg_ctx, vece, dofs, aofs, shift, oprsz, maxsz, &g); +} + +void tcg_gen_gvec_sars(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2sh g = { + .fni4 = tcg_gen_sar_i32, + .fni8 = tcg_gen_sar_i64, + .fniv_s = tcg_gen_sars_vec, + .fniv_v = tcg_gen_sarv_vec, + .fno = { + gen_helper_gvec_sar8i, + gen_helper_gvec_sar16i, + gen_helper_gvec_sar32i, + gen_helper_gvec_sar64i, + }, + .s_list = { INDEX_op_sars_vec, 0 }, + .v_list = { INDEX_op_sarv_vec, 0 }, + }; + + tcg_debug_assert(vece <= MO_64); + do_gvec_shifts(tcg_ctx, vece, dofs, aofs, shift, oprsz, maxsz, &g); +} + +/* + * Expand D = A << (B % element bits) + * + * Unlike scalar shifts, where it is easy for the target front end + * to include the modulo as part of the expansion. If the target + * naturally includes the modulo as part of the operation, great! + * If the target has some other behaviour from out-of-range shifts, + * then it could not use this function anyway, and would need to + * do it's own expansion with custom functions. + */ +static void tcg_gen_shlv_mod_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, + TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); + + tcg_gen_dupi_vec(tcg_ctx, vece, t, (8 << vece) - 1); + tcg_gen_and_vec(tcg_ctx, vece, t, t, b); + tcg_gen_shlv_vec(tcg_ctx, vece, d, a, t); + tcg_temp_free_vec(tcg_ctx, t); +} + +static void tcg_gen_shl_mod_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t, b, 31); + tcg_gen_shl_i32(tcg_ctx, d, a, t); + tcg_temp_free_i32(tcg_ctx, t); +} + +static void tcg_gen_shl_mod_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, t, b, 63); + tcg_gen_shl_i64(tcg_ctx, d, a, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +void tcg_gen_gvec_shlv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_shlv_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_shlv_mod_vec, + .fno = gen_helper_gvec_shl8v, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_shlv_mod_vec, + .fno = gen_helper_gvec_shl16v, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_shl_mod_i32, + .fniv = tcg_gen_shlv_mod_vec, + .fno = gen_helper_gvec_shl32v, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_shl_mod_i64, + .fniv = tcg_gen_shlv_mod_vec, + .fno = gen_helper_gvec_shl64v, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +/* + * Similarly for logical right shifts. + */ + +static void tcg_gen_shrv_mod_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, + TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); + + tcg_gen_dupi_vec(tcg_ctx, vece, t, (8 << vece) - 1); + tcg_gen_and_vec(tcg_ctx, vece, t, t, b); + tcg_gen_shrv_vec(tcg_ctx, vece, d, a, t); + tcg_temp_free_vec(tcg_ctx, t); +} + +static void tcg_gen_shr_mod_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t, b, 31); + tcg_gen_shr_i32(tcg_ctx, d, a, t); + tcg_temp_free_i32(tcg_ctx, t); +} + +static void tcg_gen_shr_mod_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, t, b, 63); + tcg_gen_shr_i64(tcg_ctx, d, a, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +void tcg_gen_gvec_shrv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_shrv_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_shrv_mod_vec, + .fno = gen_helper_gvec_shr8v, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_shrv_mod_vec, + .fno = gen_helper_gvec_shr16v, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_shr_mod_i32, + .fniv = tcg_gen_shrv_mod_vec, + .fno = gen_helper_gvec_shr32v, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_shr_mod_i64, + .fniv = tcg_gen_shrv_mod_vec, + .fno = gen_helper_gvec_shr64v, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +/* + * Similarly for arithmetic right shifts. + */ + +static void tcg_gen_sarv_mod_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec d, + TCGv_vec a, TCGv_vec b) +{ + TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, d); + + tcg_gen_dupi_vec(tcg_ctx, vece, t, (8 << vece) - 1); + tcg_gen_and_vec(tcg_ctx, vece, t, t, b); + tcg_gen_sarv_vec(tcg_ctx, vece, d, a, t); + tcg_temp_free_vec(tcg_ctx, t); +} + +static void tcg_gen_sar_mod_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t, b, 31); + tcg_gen_sar_i32(tcg_ctx, d, a, t); + tcg_temp_free_i32(tcg_ctx, t); +} + +static void tcg_gen_sar_mod_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, t, b, 63); + tcg_gen_sar_i64(tcg_ctx, d, a, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +void tcg_gen_gvec_sarv(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop_list[] = { INDEX_op_sarv_vec, 0 }; + static const GVecGen3 g[4] = { + { .fniv = tcg_gen_sarv_mod_vec, + .fno = gen_helper_gvec_sar8v, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = tcg_gen_sarv_mod_vec, + .fno = gen_helper_gvec_sar16v, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fni4 = tcg_gen_sar_mod_i32, + .fniv = tcg_gen_sarv_mod_vec, + .fno = gen_helper_gvec_sar32v, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fni8 = tcg_gen_sar_mod_i64, + .fniv = tcg_gen_sarv_mod_vec, + .fno = gen_helper_gvec_sar64v, + .opt_opc = vecop_list, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, &g[vece]); +} + +/* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ +static void expand_cmp_i32(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, TCGCond cond) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 4) { + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i32(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + tcg_gen_setcond_i32(tcg_ctx, cond, t0, t0, t1); + tcg_gen_neg_i32(tcg_ctx, t0, t0); + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void expand_cmp_i64(TCGContext *tcg_ctx, uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, TCGCond cond) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + uint32_t i; + + for (i = 0; i < oprsz; i += 8) { + tcg_gen_ld_i64(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_i64(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + tcg_gen_setcond_i64(tcg_ctx,cond, t0, t0, t1); + tcg_gen_neg_i64(tcg_ctx,t0, t0); + tcg_gen_st_i64(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static void expand_cmp_vec(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t tysz, + TCGType type, TCGCond cond) +{ + TCGv_vec t0 = tcg_temp_new_vec(tcg_ctx, type); + TCGv_vec t1 = tcg_temp_new_vec(tcg_ctx, type); + uint32_t i; + + for (i = 0; i < oprsz; i += tysz) { + tcg_gen_ld_vec(tcg_ctx, t0, tcg_ctx->cpu_env, aofs + i); + tcg_gen_ld_vec(tcg_ctx, t1, tcg_ctx->cpu_env, bofs + i); + tcg_gen_cmp_vec(tcg_ctx, cond, vece, t0, t0, t1); + tcg_gen_st_vec(tcg_ctx, t0, tcg_ctx->cpu_env, dofs + i); + } + tcg_temp_free_vec(tcg_ctx, t1); + tcg_temp_free_vec(tcg_ctx, t0); +} + +void tcg_gen_gvec_cmp(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, uint32_t dofs, + uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 }; + static gen_helper_gvec_3 * const eq_fn[4] = { + gen_helper_gvec_eq8, gen_helper_gvec_eq16, + gen_helper_gvec_eq32, gen_helper_gvec_eq64 + }; + static gen_helper_gvec_3 * const ne_fn[4] = { + gen_helper_gvec_ne8, gen_helper_gvec_ne16, + gen_helper_gvec_ne32, gen_helper_gvec_ne64 + }; + static gen_helper_gvec_3 * const lt_fn[4] = { + gen_helper_gvec_lt8, gen_helper_gvec_lt16, + gen_helper_gvec_lt32, gen_helper_gvec_lt64 + }; + static gen_helper_gvec_3 * const le_fn[4] = { + gen_helper_gvec_le8, gen_helper_gvec_le16, + gen_helper_gvec_le32, gen_helper_gvec_le64 + }; + static gen_helper_gvec_3 * const ltu_fn[4] = { + gen_helper_gvec_ltu8, gen_helper_gvec_ltu16, + gen_helper_gvec_ltu32, gen_helper_gvec_ltu64 + }; + static gen_helper_gvec_3 * const leu_fn[4] = { + gen_helper_gvec_leu8, gen_helper_gvec_leu16, + gen_helper_gvec_leu32, gen_helper_gvec_leu64 + }; + static gen_helper_gvec_3 * const * const fns[16] = { + [TCG_COND_EQ] = eq_fn, + [TCG_COND_NE] = ne_fn, + [TCG_COND_LT] = lt_fn, + [TCG_COND_LE] = le_fn, + [TCG_COND_LTU] = ltu_fn, + [TCG_COND_LEU] = leu_fn, + }; + + const TCGOpcode *hold_list; + TCGType type; + uint32_t some; + + check_size_align(oprsz, maxsz, dofs | aofs | bofs); + check_overlap_3(dofs, aofs, bofs, maxsz); + + if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { + do_dup(tcg_ctx, MO_8, dofs, oprsz, maxsz, + NULL, NULL, -(cond == TCG_COND_ALWAYS)); + return; + } + + /* + * Implement inline with a vector type, if possible. + * Prefer integer when 64-bit host and 64-bit comparison. + */ + hold_list = tcg_swap_vecop_list(cmp_list); + type = choose_vector_type(tcg_ctx, cmp_list, vece, oprsz, + TCG_TARGET_REG_BITS == 64 && vece == MO_64); + switch (type) { + case TCG_TYPE_V256: + /* Recall that ARM SVE allows vector sizes that are not a + * power of 2, but always a multiple of 16. The intent is + * that e.g. size == 80 would be expanded with 2x32 + 1x16. + */ + some = QEMU_ALIGN_DOWN(oprsz, 32); + expand_cmp_vec(tcg_ctx, vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond); + if (some == oprsz) { + break; + } + dofs += some; + aofs += some; + bofs += some; + oprsz -= some; + maxsz -= some; + /* fallthru */ + case TCG_TYPE_V128: + expand_cmp_vec(tcg_ctx, vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond); + break; + case TCG_TYPE_V64: + expand_cmp_vec(tcg_ctx, vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond); + break; + + case 0: + if (vece == MO_64 && check_size_impl(oprsz, 8)) { + expand_cmp_i64(tcg_ctx, dofs, aofs, bofs, oprsz, cond); + } else if (vece == MO_32 && check_size_impl(oprsz, 4)) { + expand_cmp_i32(tcg_ctx, dofs, aofs, bofs, oprsz, cond); + } else { + gen_helper_gvec_3 * const *fn = fns[cond]; + + if (fn == NULL) { + uint32_t tmp; + tmp = aofs, aofs = bofs, bofs = tmp; + cond = tcg_swap_cond(cond); + fn = fns[cond]; + assert(fn != NULL); + } + tcg_gen_gvec_3_ool(tcg_ctx, dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]); + oprsz = maxsz; + } + break; + + default: + g_assert_not_reached(); + } + tcg_swap_vecop_list(hold_list); + + if (oprsz < maxsz) { + expand_clr(tcg_ctx, dofs + oprsz, maxsz - oprsz); + } +} + +static void tcg_gen_bitsel_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_and_i64(tcg_ctx, t, b, a); + tcg_gen_andc_i64(tcg_ctx, d, c, a); + tcg_gen_or_i64(tcg_ctx, d, d, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +void tcg_gen_gvec_bitsel(TCGContext *tcg_ctx, unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t cofs, + uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen4 g = { + .fni8 = tcg_gen_bitsel_i64, + .fniv = tcg_gen_bitsel_vec, + .fno = gen_helper_gvec_bitsel, + }; + + tcg_gen_gvec_4(tcg_ctx, dofs, aofs, bofs, cofs, oprsz, maxsz, &g); +} diff --git a/qemu/tcg/tcg-op-vec.c b/qemu/tcg/tcg-op-vec.c new file mode 100644 index 00000000..99343962 --- /dev/null +++ b/qemu/tcg/tcg-op-vec.c @@ -0,0 +1,806 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2018 Linaro, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-mo.h" + +/* Reduce the number of ifdefs below. This assumes that all uses of + TCGV_HIGH and TCGV_LOW are properly protected by a conditional that + the compiler can eliminate. */ +#if TCG_TARGET_REG_BITS == 64 +extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64); +extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64); +#define TCGV_LOW TCGV_LOW_link_error +#define TCGV_HIGH TCGV_HIGH_link_error +#endif + +/* + * Vector optional opcode tracking. + * Except for the basic logical operations (and, or, xor), and + * data movement (mov, ld, st, dupi), many vector opcodes are + * optional and may not be supported on the host. Thank Intel + * for the irregularity in their instruction set. + * + * The gvec expanders allow custom vector operations to be composed, + * generally via the .fniv callback in the GVecGen* structures. At + * the same time, in deciding whether to use this hook we need to + * know if the host supports the required operations. This is + * presented as an array of opcodes, terminated by 0. Each opcode + * is assumed to be expanded with the given VECE. + * + * For debugging, we want to validate this array. Therefore, when + * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders + * will validate that their opcode is present in the list. + */ +#ifdef CONFIG_DEBUG_TCG +void tcg_assert_listed_vecop(TCGOpcode op) +{ + const TCGOpcode *p = tcg_ctx->vecop_list; + if (p) { + for (; *p; ++p) { + if (*p == op) { + return; + } + } + g_assert_not_reached(); + } +} +#endif + +bool tcg_can_emit_vecop_list(TCGContext *tcg_ctx, const TCGOpcode *list, + TCGType type, unsigned vece) +{ + if (list == NULL) { + return true; + } + + for (; *list; ++list) { + TCGOpcode opc = *list; + +#ifdef CONFIG_DEBUG_TCG + switch (opc) { + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_mov_vec: + case INDEX_op_dup_vec: + case INDEX_op_dupi_vec: + case INDEX_op_dup2_vec: + case INDEX_op_ld_vec: + case INDEX_op_st_vec: + case INDEX_op_bitsel_vec: + /* These opcodes are mandatory and should not be listed. */ + g_assert_not_reached(); + case INDEX_op_not_vec: + /* These opcodes have generic expansions using the above. */ + g_assert_not_reached(); + default: + break; + } +#endif + + if (tcg_can_emit_vec_op(tcg_ctx, opc, type, vece)) { + continue; + } + + /* + * The opcode list is created by front ends based on what they + * actually invoke. We must mirror the logic in the routines + * below for generic expansions using other opcodes. + */ + switch (opc) { + case INDEX_op_neg_vec: + if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sub_vec, type, vece)) { + continue; + } + break; + case INDEX_op_abs_vec: + if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sub_vec, type, vece) + && (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_smax_vec, type, vece) > 0 + || tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sari_vec, type, vece) > 0 + || tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece))) { + continue; + } + break; + case INDEX_op_cmpsel_vec: + case INDEX_op_smin_vec: + case INDEX_op_smax_vec: + case INDEX_op_umin_vec: + case INDEX_op_umax_vec: + if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece)) { + continue; + } + break; + default: + break; + } + return false; + } + return true; +} + +void vec_gen_2(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_VECE(op) = vece; + op->args[0] = r; + op->args[1] = a; +} + +void vec_gen_3(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, + TCGArg r, TCGArg a, TCGArg b) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_VECE(op) = vece; + op->args[0] = r; + op->args[1] = a; + op->args[2] = b; +} + +void vec_gen_4(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, + TCGArg r, TCGArg a, TCGArg b, TCGArg c) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_VECE(op) = vece; + op->args[0] = r; + op->args[1] = a; + op->args[2] = b; + op->args[3] = c; +} + +static void vec_gen_6(TCGContext *tcg_ctx, TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, + TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_VECE(op) = vece; + op->args[0] = r; + op->args[1] = a; + op->args[2] = b; + op->args[3] = c; + op->args[4] = d; + op->args[5] = e; +} + +static void vec_gen_op2(TCGContext *tcg_ctx, TCGOpcode opc, unsigned vece, TCGv_vec r, TCGv_vec a) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGType type = rt->base_type; + + /* Must enough inputs for the output. */ + tcg_debug_assert(at->base_type >= type); + vec_gen_2(tcg_ctx, opc, type, vece, temp_arg(rt), temp_arg(at)); +} + +static void vec_gen_op3(TCGContext *tcg_ctx, TCGOpcode opc, unsigned vece, + TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); + TCGType type = rt->base_type; + + /* Must enough inputs for the output. */ + tcg_debug_assert(at->base_type >= type); + tcg_debug_assert(bt->base_type >= type); + vec_gen_3(tcg_ctx, opc, type, vece, temp_arg(rt), temp_arg(at), temp_arg(bt)); +} + +void tcg_gen_mov_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_vec a) +{ + if (r != a) { + vec_gen_op2(tcg_ctx, INDEX_op_mov_vec, 0, r, a); + } +} + +#define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32) + +static void do_dupi_vec(TCGContext *tcg_ctx, TCGv_vec r, unsigned vece, TCGArg a) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + vec_gen_2(tcg_ctx, INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a); +} + +TCGv_vec tcg_const_zeros_vec(TCGContext *tcg_ctx, TCGType type) +{ + TCGv_vec ret = tcg_temp_new_vec(tcg_ctx, type); + do_dupi_vec(tcg_ctx, ret, MO_REG, 0); + return ret; +} + +TCGv_vec tcg_const_ones_vec(TCGContext *tcg_ctx, TCGType type) +{ + TCGv_vec ret = tcg_temp_new_vec(tcg_ctx, type); + do_dupi_vec(tcg_ctx, ret, MO_REG, -1); + return ret; +} + +TCGv_vec tcg_const_zeros_vec_matching(TCGContext *tcg_ctx, TCGv_vec m) +{ + TCGTemp *t = tcgv_vec_temp(tcg_ctx, m); + return tcg_const_zeros_vec(tcg_ctx, t->base_type); +} + +TCGv_vec tcg_const_ones_vec_matching(TCGContext *tcg_ctx, TCGv_vec m) +{ + TCGTemp *t = tcgv_vec_temp(tcg_ctx, m); + return tcg_const_ones_vec(tcg_ctx, t->base_type); +} + +void tcg_gen_dup64i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint64_t a) +{ + if (TCG_TARGET_REG_BITS == 32 && a == deposit64(a, 32, 32, a)) { + do_dupi_vec(tcg_ctx, r, MO_32, a); + } else if (TCG_TARGET_REG_BITS == 64 || a == (uint64_t)(int32_t)a) { + do_dupi_vec(tcg_ctx, r, MO_64, a); + } else { + TCGv_i64 c = tcg_const_i64(tcg_ctx, a); + tcg_gen_dup_i64_vec(tcg_ctx, MO_64, r, c); + tcg_temp_free_i64(tcg_ctx, c); + } +} + +void tcg_gen_dup32i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint32_t a) +{ + do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(MO_32, a)); +} + +void tcg_gen_dup16i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint32_t a) +{ + do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(MO_16, a)); +} + +void tcg_gen_dup8i_vec(TCGContext *tcg_ctx, TCGv_vec r, uint32_t a) +{ + do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(MO_8, a)); +} + +void tcg_gen_dupi_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, uint64_t a) +{ + do_dupi_vec(tcg_ctx, r, MO_REG, dup_const(vece, a)); +} + +void tcg_gen_dup_i64_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_i64 a) +{ + TCGArg ri = tcgv_vec_arg(tcg_ctx, r); + TCGTemp *rt = arg_temp(ri); + TCGType type = rt->base_type; + +#if TCG_TARGET_REG_BITS == 64 + TCGArg ai = tcgv_i64_arg(tcg_ctx, a); + vec_gen_2(tcg_ctx, INDEX_op_dup_vec, type, vece, ri, ai); +#else + if (vece == MO_64) { + TCGArg al = tcgv_i32_arg(tcg_ctx, TCGV_LOW(tcg_ctx, a)); + TCGArg ah = tcgv_i32_arg(tcg_ctx, TCGV_HIGH(tcg_ctx, a)); + vec_gen_3(tcg_ctx, INDEX_op_dup2_vec, type, MO_64, ri, al, ah); + } else { + TCGArg ai = tcgv_i32_arg(tcg_ctx, TCGV_LOW(tcg_ctx, a)); + vec_gen_2(tcg_ctx, INDEX_op_dup_vec, type, vece, ri, ai); + } +#endif +} + +void tcg_gen_dup_i32_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_i32 a) +{ + TCGArg ri = tcgv_vec_arg(tcg_ctx, r); + TCGArg ai = tcgv_i32_arg(tcg_ctx, a); + TCGTemp *rt = arg_temp(ri); + TCGType type = rt->base_type; + + vec_gen_2(tcg_ctx, INDEX_op_dup_vec, type, vece, ri, ai); +} + +void tcg_gen_dup_mem_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_ptr b, + tcg_target_long ofs) +{ + TCGArg ri = tcgv_vec_arg(tcg_ctx, r); + TCGArg bi = tcgv_ptr_arg(tcg_ctx, b); + TCGTemp *rt = arg_temp(ri); + TCGType type = rt->base_type; + + vec_gen_3(tcg_ctx, INDEX_op_dupm_vec, type, vece, ri, bi, ofs); +} + +static void vec_gen_ldst(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_vec r, TCGv_ptr b, TCGArg o) +{ + TCGArg ri = tcgv_vec_arg(tcg_ctx, r); + TCGArg bi = tcgv_ptr_arg(tcg_ctx, b); + TCGTemp *rt = arg_temp(ri); + TCGType type = rt->base_type; + + vec_gen_3(tcg_ctx, opc, type, 0, ri, bi, o); +} + +void tcg_gen_ld_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr b, TCGArg o) +{ + vec_gen_ldst(tcg_ctx, INDEX_op_ld_vec, r, b, o); +} + +void tcg_gen_st_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr b, TCGArg o) +{ + vec_gen_ldst(tcg_ctx, INDEX_op_st_vec, r, b, o); +} + +void tcg_gen_stl_vec(TCGContext *tcg_ctx, TCGv_vec r, TCGv_ptr b, TCGArg o, TCGType low_type) +{ + TCGArg ri = tcgv_vec_arg(tcg_ctx, r); + TCGArg bi = tcgv_ptr_arg(tcg_ctx, b); + TCGTemp *rt = arg_temp(ri); + TCGType type = rt->base_type; + + tcg_debug_assert(low_type >= TCG_TYPE_V64); + tcg_debug_assert(low_type <= type); + vec_gen_3(tcg_ctx, INDEX_op_st_vec, low_type, 0, ri, bi, o); +} + +void tcg_gen_and_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + vec_gen_op3(tcg_ctx, INDEX_op_and_vec, 0, r, a, b); +} + +void tcg_gen_or_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + vec_gen_op3(tcg_ctx, INDEX_op_or_vec, 0, r, a, b); +} + +void tcg_gen_xor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + vec_gen_op3(tcg_ctx, INDEX_op_xor_vec, 0, r, a, b); +} + +void tcg_gen_andc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + if (TCG_TARGET_HAS_andc_vec) { + vec_gen_op3(tcg_ctx, INDEX_op_andc_vec, 0, r, a, b); + } else { + TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, r); + tcg_gen_not_vec(tcg_ctx, 0, t, b); + tcg_gen_and_vec(tcg_ctx, 0, r, a, t); + tcg_temp_free_vec(tcg_ctx, t); + } +} + +void tcg_gen_orc_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + if (TCG_TARGET_HAS_orc_vec) { + vec_gen_op3(tcg_ctx, INDEX_op_orc_vec, 0, r, a, b); + } else { + TCGv_vec t = tcg_temp_new_vec_matching(tcg_ctx, r); + tcg_gen_not_vec(tcg_ctx, 0, t, b); + tcg_gen_or_vec(tcg_ctx, 0, r, a, t); + tcg_temp_free_vec(tcg_ctx, t); + } +} + +void tcg_gen_nand_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */ + tcg_gen_and_vec(tcg_ctx, 0, r, a, b); + tcg_gen_not_vec(tcg_ctx, 0, r, r); +} + +void tcg_gen_nor_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */ + tcg_gen_or_vec(tcg_ctx, 0, r, a, b); + tcg_gen_not_vec(tcg_ctx, 0, r, r); +} + +void tcg_gen_eqv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */ + tcg_gen_xor_vec(tcg_ctx, 0, r, a, b); + tcg_gen_not_vec(tcg_ctx, 0, r, r); +} + +static bool do_op2(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGArg ri = temp_arg(rt); + TCGArg ai = temp_arg(at); + TCGType type = rt->base_type; + int can; + + tcg_debug_assert(at->base_type >= type); + tcg_assert_listed_vecop(opc); + can = tcg_can_emit_vec_op(tcg_ctx, opc, type, vece); + if (can > 0) { + vec_gen_2(tcg_ctx, opc, type, vece, ri, ai); + } else if (can < 0) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + tcg_expand_vec_op(tcg_ctx, opc, type, vece, ri, ai); + tcg_swap_vecop_list(hold_list); + } else { + return false; + } + return true; +} + +void tcg_gen_not_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a) +{ + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + + if (!TCG_TARGET_HAS_not_vec || !do_op2(tcg_ctx, vece, r, a, INDEX_op_not_vec)) { + TCGv_vec t = tcg_const_ones_vec_matching(tcg_ctx, r); + tcg_gen_xor_vec(tcg_ctx, 0, r, a, t); + tcg_temp_free_vec(tcg_ctx, t); + } + tcg_swap_vecop_list(hold_list); +} + +void tcg_gen_neg_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a) +{ + const TCGOpcode *hold_list; + + tcg_assert_listed_vecop(INDEX_op_neg_vec); + hold_list = tcg_swap_vecop_list(NULL); + + if (!TCG_TARGET_HAS_neg_vec || !do_op2(tcg_ctx, vece, r, a, INDEX_op_neg_vec)) { + TCGv_vec t = tcg_const_zeros_vec_matching(tcg_ctx, r); + tcg_gen_sub_vec(tcg_ctx, vece, r, t, a); + tcg_temp_free_vec(tcg_ctx, t); + } + tcg_swap_vecop_list(hold_list); +} + +void tcg_gen_abs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a) +{ + const TCGOpcode *hold_list; + + tcg_assert_listed_vecop(INDEX_op_abs_vec); + hold_list = tcg_swap_vecop_list(NULL); + + if (!do_op2(tcg_ctx, vece, r, a, INDEX_op_abs_vec)) { + TCGType type = tcgv_vec_temp(tcg_ctx, r)->base_type; + TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); + + tcg_debug_assert(tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sub_vec, type, vece)); + if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_smax_vec, type, vece) > 0) { + tcg_gen_neg_vec(tcg_ctx, vece, t, a); + tcg_gen_smax_vec(tcg_ctx, vece, r, a, t); + } else { + if (tcg_can_emit_vec_op(tcg_ctx, INDEX_op_sari_vec, type, vece) > 0) { + tcg_gen_sari_vec(tcg_ctx, vece, t, a, (8 << vece) - 1); + } else { + do_dupi_vec(tcg_ctx, t, MO_REG, 0); + tcg_gen_cmp_vec(tcg_ctx, TCG_COND_LT, vece, t, a, t); + } + tcg_gen_xor_vec(tcg_ctx, vece, r, a, t); + tcg_gen_sub_vec(tcg_ctx, vece, r, r, t); + } + + tcg_temp_free_vec(tcg_ctx, t); + } + tcg_swap_vecop_list(hold_list); +} + +static void do_shifti(TCGContext *tcg_ctx, TCGOpcode opc, unsigned vece, + TCGv_vec r, TCGv_vec a, int64_t i) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGArg ri = temp_arg(rt); + TCGArg ai = temp_arg(at); + TCGType type = rt->base_type; + int can; + + tcg_debug_assert(at->base_type == type); + tcg_debug_assert(i >= 0 && i < (8 << vece)); + tcg_assert_listed_vecop(opc); + + if (i == 0) { + tcg_gen_mov_vec(tcg_ctx, r, a); + return; + } + + can = tcg_can_emit_vec_op(tcg_ctx, opc, type, vece); + if (can > 0) { + vec_gen_3(tcg_ctx, opc, type, vece, ri, ai, i); + } else { + /* We leave the choice of expansion via scalar or vector shift + to the target. Often, but not always, dupi can feed a vector + shift easier than a scalar. */ + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + tcg_debug_assert(can < 0); + tcg_expand_vec_op(tcg_ctx, opc, type, vece, ri, ai, i); + tcg_swap_vecop_list(hold_list); + } +} + +void tcg_gen_shli_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i) +{ + do_shifti(tcg_ctx, INDEX_op_shli_vec, vece, r, a, i); +} + +void tcg_gen_shri_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i) +{ + do_shifti(tcg_ctx, INDEX_op_shri_vec, vece, r, a, i); +} + +void tcg_gen_sari_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i) +{ + do_shifti(tcg_ctx, INDEX_op_sari_vec, vece, r, a, i); +} + +void tcg_gen_cmp_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, + TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); + TCGArg ri = temp_arg(rt); + TCGArg ai = temp_arg(at); + TCGArg bi = temp_arg(bt); + TCGType type = rt->base_type; + int can; + + tcg_debug_assert(at->base_type >= type); + tcg_debug_assert(bt->base_type >= type); + tcg_assert_listed_vecop(INDEX_op_cmp_vec); + can = tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece); + if (can > 0) { + vec_gen_4(tcg_ctx, INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond); + } else { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + tcg_debug_assert(can < 0); + tcg_expand_vec_op(tcg_ctx, INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond); + tcg_swap_vecop_list(hold_list); + } +} + +static bool do_op3(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, + TCGv_vec b, TCGOpcode opc) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); + TCGArg ri = temp_arg(rt); + TCGArg ai = temp_arg(at); + TCGArg bi = temp_arg(bt); + TCGType type = rt->base_type; + int can; + + tcg_debug_assert(at->base_type >= type); + tcg_debug_assert(bt->base_type >= type); + tcg_assert_listed_vecop(opc); + can = tcg_can_emit_vec_op(tcg_ctx, opc, type, vece); + if (can > 0) { + vec_gen_3(tcg_ctx, opc, type, vece, ri, ai, bi); + } else if (can < 0) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + tcg_expand_vec_op(tcg_ctx, opc, type, vece, ri, ai, bi); + tcg_swap_vecop_list(hold_list); + } else { + return false; + } + return true; +} + +static void do_op3_nofail(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, + TCGv_vec b, TCGOpcode opc) +{ + bool ok = do_op3(tcg_ctx, vece, r, a, b, opc); + tcg_debug_assert(ok); +} + +void tcg_gen_add_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_add_vec); +} + +void tcg_gen_sub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_sub_vec); +} + +void tcg_gen_mul_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_mul_vec); +} + +void tcg_gen_ssadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_ssadd_vec); +} + +void tcg_gen_usadd_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_usadd_vec); +} + +void tcg_gen_sssub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_sssub_vec); +} + +void tcg_gen_ussub_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_ussub_vec); +} + +static void do_minmax(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, + TCGv_vec b, TCGOpcode opc, TCGCond cond) +{ + if (!do_op3(tcg_ctx, vece, r, a, b, opc)) { + tcg_gen_cmpsel_vec(tcg_ctx, cond, vece, r, a, b, a, b); + } +} + +void tcg_gen_smin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_smin_vec, TCG_COND_LT); +} + +void tcg_gen_umin_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_umin_vec, TCG_COND_LTU); +} + +void tcg_gen_smax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_smax_vec, TCG_COND_GT); +} + +void tcg_gen_umax_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_minmax(tcg_ctx, vece, r, a, b, INDEX_op_umax_vec, TCG_COND_GTU); +} + +void tcg_gen_shlv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_shlv_vec); +} + +void tcg_gen_shrv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_shrv_vec); +} + +void tcg_gen_sarv_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + do_op3_nofail(tcg_ctx, vece, r, a, b, INDEX_op_sarv_vec); +} + +static void do_shifts(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, + TCGv_i32 s, TCGOpcode opc_s, TCGOpcode opc_v) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGTemp *st = tcgv_i32_temp(tcg_ctx, s); + TCGArg ri = temp_arg(rt); + TCGArg ai = temp_arg(at); + TCGArg si = temp_arg(st); + TCGType type = rt->base_type; + const TCGOpcode *hold_list; + int can; + + tcg_debug_assert(at->base_type >= type); + tcg_assert_listed_vecop(opc_s); + hold_list = tcg_swap_vecop_list(NULL); + + can = tcg_can_emit_vec_op(tcg_ctx, opc_s, type, vece); + if (can > 0) { + vec_gen_3(tcg_ctx, opc_s, type, vece, ri, ai, si); + } else if (can < 0) { + tcg_expand_vec_op(tcg_ctx, opc_s, type, vece, ri, ai, si); + } else { + TCGv_vec vec_s = tcg_temp_new_vec(tcg_ctx, type); + + if (vece == MO_64) { + TCGv_i64 s64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, s64, s); + tcg_gen_dup_i64_vec(tcg_ctx, MO_64, vec_s, s64); + tcg_temp_free_i64(tcg_ctx, s64); + } else { + tcg_gen_dup_i32_vec(tcg_ctx, vece, vec_s, s); + } + do_op3_nofail(tcg_ctx, vece, r, a, vec_s, opc_v); + tcg_temp_free_vec(tcg_ctx, vec_s); + } + tcg_swap_vecop_list(hold_list); +} + +void tcg_gen_shls_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b) +{ + do_shifts(tcg_ctx, vece, r, a, b, INDEX_op_shls_vec, INDEX_op_shlv_vec); +} + +void tcg_gen_shrs_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b) +{ + do_shifts(tcg_ctx, vece, r, a, b, INDEX_op_shrs_vec, INDEX_op_shrv_vec); +} + +void tcg_gen_sars_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 b) +{ + do_shifts(tcg_ctx, vece, r, a, b, INDEX_op_sars_vec, INDEX_op_sarv_vec); +} + +void tcg_gen_bitsel_vec(TCGContext *tcg_ctx, unsigned vece, TCGv_vec r, TCGv_vec a, + TCGv_vec b, TCGv_vec c) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); + TCGTemp *ct = tcgv_vec_temp(tcg_ctx, c); + TCGType type = rt->base_type; + + tcg_debug_assert(at->base_type >= type); + tcg_debug_assert(bt->base_type >= type); + tcg_debug_assert(ct->base_type >= type); + + if (TCG_TARGET_HAS_bitsel_vec) { + vec_gen_4(tcg_ctx, INDEX_op_bitsel_vec, type, MO_8, + temp_arg(rt), temp_arg(at), temp_arg(bt), temp_arg(ct)); + } else { + TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_and_vec(tcg_ctx, MO_8, t, a, b); + tcg_gen_andc_vec(tcg_ctx, MO_8, r, c, a); + tcg_gen_or_vec(tcg_ctx, MO_8, r, r, t); + tcg_temp_free_vec(tcg_ctx, t); + } +} + +void tcg_gen_cmpsel_vec(TCGContext *tcg_ctx, TCGCond cond, unsigned vece, TCGv_vec r, + TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d) +{ + TCGTemp *rt = tcgv_vec_temp(tcg_ctx, r); + TCGTemp *at = tcgv_vec_temp(tcg_ctx, a); + TCGTemp *bt = tcgv_vec_temp(tcg_ctx, b); + TCGTemp *ct = tcgv_vec_temp(tcg_ctx, c); + TCGTemp *dt = tcgv_vec_temp(tcg_ctx, d); + TCGArg ri = temp_arg(rt); + TCGArg ai = temp_arg(at); + TCGArg bi = temp_arg(bt); + TCGArg ci = temp_arg(ct); + TCGArg di = temp_arg(dt); + TCGType type = rt->base_type; + const TCGOpcode *hold_list; + int can; + + tcg_debug_assert(at->base_type >= type); + tcg_debug_assert(bt->base_type >= type); + tcg_debug_assert(ct->base_type >= type); + tcg_debug_assert(dt->base_type >= type); + + tcg_assert_listed_vecop(INDEX_op_cmpsel_vec); + hold_list = tcg_swap_vecop_list(NULL); + can = tcg_can_emit_vec_op(tcg_ctx, INDEX_op_cmpsel_vec, type, vece); + + if (can > 0) { + vec_gen_6(tcg_ctx, INDEX_op_cmpsel_vec, type, vece, ri, ai, bi, ci, di, cond); + } else if (can < 0) { + tcg_expand_vec_op(tcg_ctx, INDEX_op_cmpsel_vec, type, vece, + ri, ai, bi, ci, di, cond); + } else { + TCGv_vec t = tcg_temp_new_vec(tcg_ctx, type); + tcg_gen_cmp_vec(tcg_ctx, cond, vece, t, a, b); + tcg_gen_bitsel_vec(tcg_ctx, vece, r, t, c, d); + tcg_temp_free_vec(tcg_ctx, t); + } + tcg_swap_vecop_list(hold_list); +} diff --git a/qemu/tcg/tcg-op.c b/qemu/tcg/tcg-op.c new file mode 100644 index 00000000..54829443 --- /dev/null +++ b/qemu/tcg/tcg-op.c @@ -0,0 +1,3372 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-mo.h" +#include "trace/mem.h" + +/* Reduce the number of ifdefs below. This assumes that all uses of + TCGV_HIGH and TCGV_LOW are properly protected by a conditional that + the compiler can eliminate. */ +#if TCG_TARGET_REG_BITS == 64 +extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64); +extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64); +#define TCGV_LOW TCGV_LOW_link_error +#define TCGV_HIGH TCGV_HIGH_link_error +#endif + +void tcg_gen_op1(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + op->args[0] = a1; +} + +void tcg_gen_op2(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + op->args[0] = a1; + op->args[1] = a2; +} + +void tcg_gen_op3(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + op->args[0] = a1; + op->args[1] = a2; + op->args[2] = a3; +} + +void tcg_gen_op4(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + op->args[0] = a1; + op->args[1] = a2; + op->args[2] = a3; + op->args[3] = a4; +} + +void tcg_gen_op5(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, + TCGArg a4, TCGArg a5) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + op->args[0] = a1; + op->args[1] = a2; + op->args[2] = a3; + op->args[3] = a4; + op->args[4] = a5; +} + +void tcg_gen_op6(TCGContext *tcg_ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, + TCGArg a4, TCGArg a5, TCGArg a6) +{ + TCGOp *op = tcg_emit_op(tcg_ctx, opc); + op->args[0] = a1; + op->args[1] = a2; + op->args[2] = a3; + op->args[3] = a4; + op->args[4] = a5; + op->args[5] = a6; +} + +void tcg_gen_mb(TCGContext *tcg_ctx, TCGBar mb_type) +{ + if (tcg_ctx->tb_cflags & CF_PARALLEL) { + tcg_gen_op1(tcg_ctx, INDEX_op_mb, mb_type); + } +} + +/* 32 bit ops */ + +void tcg_gen_addi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_add_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_subfi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) +{ + if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) { + /* Don't recurse with tcg_gen_neg_i32. */ + tcg_gen_op2_i32(tcg_ctx, INDEX_op_neg_i32, ret, arg2); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg1); + tcg_gen_sub_i32(tcg_ctx, ret, t0, arg2); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_subi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_sub_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_andi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + TCGv_i32 t0; + /* Some cases can be optimized here. */ + switch (arg2) { + case 0: + tcg_gen_movi_i32(tcg_ctx, ret, 0); + return; + case -1: + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + return; + case 0xff: + /* Don't recurse with tcg_gen_ext8u_i32. */ + if (TCG_TARGET_HAS_ext8u_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext8u_i32, ret, arg1); + return; + } + break; + case 0xffff: + if (TCG_TARGET_HAS_ext16u_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext16u_i32, ret, arg1); + return; + } + break; + } + t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_and_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +void tcg_gen_ori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* Some cases can be optimized here. */ + if (arg2 == -1) { + tcg_gen_movi_i32(tcg_ctx, ret, -1); + } else if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_or_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_xori_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* Some cases can be optimized here. */ + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) { + /* Don't recurse with tcg_gen_not_i32. */ + tcg_gen_op2_i32(tcg_ctx, INDEX_op_not_i32, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_xor_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_shli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + tcg_debug_assert(arg2 >= 0 && arg2 < 32); + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_shl_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_shri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + tcg_debug_assert(arg2 >= 0 && arg2 < 32); + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_shr_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_sari_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + tcg_debug_assert(arg2 >= 0 && arg2 < 32); + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_sar_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_brcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(tcg_ctx, l); + } else if (cond != TCG_COND_NEVER) { + l->refs++; + tcg_gen_op4ii_i32(tcg_ctx, INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l)); + } +} + +void tcg_gen_brcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(tcg_ctx, l); + } else if (cond != TCG_COND_NEVER) { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_brcond_i32(tcg_ctx, cond, arg1, t0, l); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_setcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_movi_i32(tcg_ctx, ret, 1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_movi_i32(tcg_ctx, ret, 0); + } else { + tcg_gen_op4i_i32(tcg_ctx, INDEX_op_setcond_i32, ret, arg1, arg2, cond); + } +} + +void tcg_gen_setcondi_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, + TCGv_i32 arg1, int32_t arg2) +{ + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_setcond_i32(tcg_ctx, cond, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +void tcg_gen_muli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + if (arg2 == 0) { + tcg_gen_movi_i32(tcg_ctx, ret, 0); + } else if (is_power_of_2(arg2)) { + tcg_gen_shli_i32(tcg_ctx, ret, arg1, ctz32(arg2)); + } else { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_mul_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_div_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_div_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_div_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_sari_i32(tcg_ctx, t0, arg1, 31); + tcg_gen_op5_i32(tcg_ctx, INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); + tcg_temp_free_i32(tcg_ctx, t0); + } else { + gen_helper_div_i32(tcg_ctx, ret, arg1, arg2); + } +} + +void tcg_gen_rem_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rem_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_rem_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_op3_i32(tcg_ctx, INDEX_op_div_i32, t0, arg1, arg2); + tcg_gen_mul_i32(tcg_ctx, t0, t0, arg2); + tcg_gen_sub_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_sari_i32(tcg_ctx, t0, arg1, 31); + tcg_gen_op5_i32(tcg_ctx, INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); + tcg_temp_free_i32(tcg_ctx, t0); + } else { + gen_helper_rem_i32(tcg_ctx, ret, arg1, arg2); + } +} + +void tcg_gen_divu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_div_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_divu_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, t0, 0); + tcg_gen_op5_i32(tcg_ctx, INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); + tcg_temp_free_i32(tcg_ctx, t0); + } else { + gen_helper_divu_i32(tcg_ctx, ret, arg1, arg2); + } +} + +void tcg_gen_remu_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rem_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_remu_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_op3_i32(tcg_ctx, INDEX_op_divu_i32, t0, arg1, arg2); + tcg_gen_mul_i32(tcg_ctx, t0, t0, arg2); + tcg_gen_sub_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, t0, 0); + tcg_gen_op5_i32(tcg_ctx, INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); + tcg_temp_free_i32(tcg_ctx, t0); + } else { + gen_helper_remu_i32(tcg_ctx, ret, arg1, arg2); + } +} + +void tcg_gen_andc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_andc_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_andc_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_not_i32(tcg_ctx, t0,arg2); + tcg_gen_and_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_eqv_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_eqv_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_eqv_i32, ret, arg1, arg2); + } else { + tcg_gen_xor_i32(tcg_ctx, ret, arg1, arg2); + tcg_gen_not_i32(tcg_ctx, ret, ret); + } +} + +void tcg_gen_nand_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_nand_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_nand_i32, ret, arg1, arg2); + } else { + tcg_gen_and_i32(tcg_ctx, ret, arg1, arg2); + tcg_gen_not_i32(tcg_ctx, ret, ret); + } +} + +void tcg_gen_nor_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_nor_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_nor_i32, ret, arg1, arg2); + } else { + tcg_gen_or_i32(tcg_ctx, ret, arg1, arg2); + tcg_gen_not_i32(tcg_ctx, ret, ret); + } +} + +void tcg_gen_orc_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_orc_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_orc_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_not_i32(tcg_ctx, t0,arg2); + tcg_gen_or_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_clz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_clz_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_clz_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_clz_i64) { + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t1, arg1); + tcg_gen_extu_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_addi_i64(tcg_ctx, t2, t2, 32); + tcg_gen_clz_i64(tcg_ctx, t1, t1, t2); + tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_subi_i32(tcg_ctx, ret, ret, 32); + } else { + gen_helper_clz_i32(tcg_ctx, ret, arg1, arg2); + } +} + +void tcg_gen_clzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) +{ + TCGv_i32 t = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_clz_i32(tcg_ctx, ret, arg1, t); + tcg_temp_free_i32(tcg_ctx, t); +} + +void tcg_gen_ctz_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_ctz_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_ctz_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_ctz_i64) { + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t1, arg1); + tcg_gen_extu_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_ctz_i64(tcg_ctx, t1, t1, t2); + tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + } else if (TCG_TARGET_HAS_ctpop_i32 + || TCG_TARGET_HAS_ctpop_i64 + || TCG_TARGET_HAS_clz_i32 + || TCG_TARGET_HAS_clz_i64) { + TCGv_i32 z, t = tcg_temp_new_i32(tcg_ctx); + + if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) { + tcg_gen_subi_i32(tcg_ctx, t, arg1, 1); + tcg_gen_andc_i32(tcg_ctx, t, t, arg1); + tcg_gen_ctpop_i32(tcg_ctx, t, t); + } else { + /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */ + tcg_gen_neg_i32(tcg_ctx, t, arg1); + tcg_gen_and_i32(tcg_ctx, t, t, arg1); + tcg_gen_clzi_i32(tcg_ctx, t, t, 32); + tcg_gen_xori_i32(tcg_ctx, t, t, 31); + } + z = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, ret, arg1, z, arg2, t); + tcg_temp_free_i32(tcg_ctx, t); + tcg_temp_free_i32(tcg_ctx, z); + } else { + gen_helper_ctz_i32(tcg_ctx, ret, arg1, arg2); + } +} + +void tcg_gen_ctzi_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) +{ + if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) { + /* This equivalence has the advantage of not requiring a fixup. */ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_subi_i32(tcg_ctx, t, arg1, 1); + tcg_gen_andc_i32(tcg_ctx, t, t, arg1); + tcg_gen_ctpop_i32(tcg_ctx, ret, t); + tcg_temp_free_i32(tcg_ctx, t); + } else { + TCGv_i32 t = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_ctz_i32(tcg_ctx, ret, arg1, t); + tcg_temp_free_i32(tcg_ctx, t); + } +} + +void tcg_gen_clrsb_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_clz_i32) { + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_sari_i32(tcg_ctx, t, arg, 31); + tcg_gen_xor_i32(tcg_ctx, t, t, arg); + tcg_gen_clzi_i32(tcg_ctx, t, t, 32); + tcg_gen_subi_i32(tcg_ctx, ret, t, 1); + tcg_temp_free_i32(tcg_ctx, t); + } else { + gen_helper_clrsb_i32(tcg_ctx, ret, arg); + } +} + +void tcg_gen_ctpop_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1) +{ + if (TCG_TARGET_HAS_ctpop_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_ctpop_i32, ret, arg1); + } else if (TCG_TARGET_HAS_ctpop_i64) { + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t, arg1); + tcg_gen_ctpop_i64(tcg_ctx, t, t); + tcg_gen_extrl_i64_i32(tcg_ctx, ret, t); + tcg_temp_free_i64(tcg_ctx, t); + } else { + gen_helper_ctpop_i32(tcg_ctx, ret, arg1); + } +} + +void tcg_gen_rotl_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rot_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_rotl_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0, t1; + + t0 = tcg_temp_new_i32(tcg_ctx); + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shl_i32(tcg_ctx, t0, arg1, arg2); + tcg_gen_subfi_i32(tcg_ctx, t1, 32, arg2); + tcg_gen_shr_i32(tcg_ctx, t1, arg1, t1); + tcg_gen_or_i32(tcg_ctx, ret, t0, t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + } +} + +void tcg_gen_rotli_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2) +{ + tcg_debug_assert(arg2 < 32); + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else if (TCG_TARGET_HAS_rot_i32) { + TCGv_i32 t0 = tcg_const_i32(tcg_ctx, arg2); + tcg_gen_rotl_i32(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } else { + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(tcg_ctx); + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shli_i32(tcg_ctx, t0, arg1, arg2); + tcg_gen_shri_i32(tcg_ctx, t1, arg1, 32 - arg2); + tcg_gen_or_i32(tcg_ctx, ret, t0, t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + } +} + +void tcg_gen_rotr_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rot_i32) { + tcg_gen_op3_i32(tcg_ctx, INDEX_op_rotr_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0, t1; + + t0 = tcg_temp_new_i32(tcg_ctx); + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shr_i32(tcg_ctx, t0, arg1, arg2); + tcg_gen_subfi_i32(tcg_ctx, t1, 32, arg2); + tcg_gen_shl_i32(tcg_ctx, t1, arg1, t1); + tcg_gen_or_i32(tcg_ctx, ret, t0, t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + } +} + +void tcg_gen_rotri_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2) +{ + tcg_debug_assert(arg2 < 32); + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, arg1); + } else { + tcg_gen_rotli_i32(tcg_ctx, ret, arg1, 32 - arg2); + } +} + +void tcg_gen_deposit_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, + unsigned int ofs, unsigned int len) +{ + uint32_t mask; + TCGv_i32 t1; + + tcg_debug_assert(ofs < 32); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 32); + tcg_debug_assert(ofs + len <= 32); + + if (len == 32) { + tcg_gen_mov_i32(tcg_ctx, ret, arg2); + return; + } + if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { + tcg_gen_op5ii_i32(tcg_ctx, INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); + return; + } + + t1 = tcg_temp_new_i32(tcg_ctx); + + if (TCG_TARGET_HAS_extract2_i32) { + if (ofs + len == 32) { + tcg_gen_shli_i32(tcg_ctx, t1, arg1, len); + tcg_gen_extract2_i32(tcg_ctx, ret, t1, arg2, len); + goto done; + } + if (ofs == 0) { + tcg_gen_extract2_i32(tcg_ctx, ret, arg1, arg2, len); + tcg_gen_rotli_i32(tcg_ctx, ret, ret, len); + goto done; + } + } + + mask = (1u << len) - 1; + if (ofs + len < 32) { + tcg_gen_andi_i32(tcg_ctx, t1, arg2, mask); + tcg_gen_shli_i32(tcg_ctx, t1, t1, ofs); + } else { + tcg_gen_shli_i32(tcg_ctx, t1, arg2, ofs); + } + tcg_gen_andi_i32(tcg_ctx, ret, arg1, ~(mask << ofs)); + tcg_gen_or_i32(tcg_ctx, ret, ret, t1); + done: + tcg_temp_free_i32(tcg_ctx, t1); +} + +void tcg_gen_deposit_z_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, + unsigned int ofs, unsigned int len) +{ + tcg_debug_assert(ofs < 32); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 32); + tcg_debug_assert(ofs + len <= 32); + + if (ofs + len == 32) { + tcg_gen_shli_i32(tcg_ctx, ret, arg, ofs); + } else if (ofs == 0) { + tcg_gen_andi_i32(tcg_ctx, ret, arg, (1u << len) - 1); + } else if (TCG_TARGET_HAS_deposit_i32 + && TCG_TARGET_deposit_i32_valid(ofs, len)) { + TCGv_i32 zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_op5ii_i32(tcg_ctx, INDEX_op_deposit_i32, ret, zero, arg, ofs, len); + tcg_temp_free_i32(tcg_ctx, zero); + } else { + /* To help two-operand hosts we prefer to zero-extend first, + which allows ARG to stay live. */ + switch (len) { + case 16: + if (TCG_TARGET_HAS_ext16u_i32) { + tcg_gen_ext16u_i32(tcg_ctx, ret, arg); + tcg_gen_shli_i32(tcg_ctx, ret, ret, ofs); + return; + } + break; + case 8: + if (TCG_TARGET_HAS_ext8u_i32) { + tcg_gen_ext8u_i32(tcg_ctx, ret, arg); + tcg_gen_shli_i32(tcg_ctx, ret, ret, ofs); + return; + } + break; + } + /* Otherwise prefer zero-extension over AND for code size. */ + switch (ofs + len) { + case 16: + if (TCG_TARGET_HAS_ext16u_i32) { + tcg_gen_shli_i32(tcg_ctx, ret, arg, ofs); + tcg_gen_ext16u_i32(tcg_ctx, ret, ret); + return; + } + break; + case 8: + if (TCG_TARGET_HAS_ext8u_i32) { + tcg_gen_shli_i32(tcg_ctx, ret, arg, ofs); + tcg_gen_ext8u_i32(tcg_ctx, ret, ret); + return; + } + break; + } + tcg_gen_andi_i32(tcg_ctx, ret, arg, (1u << len) - 1); + tcg_gen_shli_i32(tcg_ctx, ret, ret, ofs); + } +} + +void tcg_gen_extract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, + unsigned int ofs, unsigned int len) +{ + tcg_debug_assert(ofs < 32); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 32); + tcg_debug_assert(ofs + len <= 32); + + /* Canonicalize certain special cases, even if extract is supported. */ + if (ofs + len == 32) { + tcg_gen_shri_i32(tcg_ctx, ret, arg, 32 - len); + return; + } + if (ofs == 0) { + tcg_gen_andi_i32(tcg_ctx, ret, arg, (1u << len) - 1); + return; + } + + if (TCG_TARGET_HAS_extract_i32 + && TCG_TARGET_extract_i32_valid(ofs, len)) { + tcg_gen_op4ii_i32(tcg_ctx, INDEX_op_extract_i32, ret, arg, ofs, len); + return; + } + + /* Assume that zero-extension, if available, is cheaper than a shift. */ + switch (ofs + len) { + case 16: + if (TCG_TARGET_HAS_ext16u_i32) { + tcg_gen_ext16u_i32(tcg_ctx, ret, arg); + tcg_gen_shri_i32(tcg_ctx, ret, ret, ofs); + return; + } + break; + case 8: + if (TCG_TARGET_HAS_ext8u_i32) { + tcg_gen_ext8u_i32(tcg_ctx, ret, arg); + tcg_gen_shri_i32(tcg_ctx, ret, ret, ofs); + return; + } + break; + } + + /* ??? Ideally we'd know what values are available for immediate AND. + Assume that 8 bits are available, plus the special case of 16, + so that we get ext8u, ext16u. */ + switch (len) { + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 16: + tcg_gen_shri_i32(tcg_ctx, ret, arg, ofs); + tcg_gen_andi_i32(tcg_ctx, ret, ret, (1u << len) - 1); + break; + default: + tcg_gen_shli_i32(tcg_ctx, ret, arg, 32 - len - ofs); + tcg_gen_shri_i32(tcg_ctx, ret, ret, 32 - len); + break; + } +} + +void tcg_gen_sextract_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg, + unsigned int ofs, unsigned int len) +{ + tcg_debug_assert(ofs < 32); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 32); + tcg_debug_assert(ofs + len <= 32); + + /* Canonicalize certain special cases, even if extract is supported. */ + if (ofs + len == 32) { + tcg_gen_sari_i32(tcg_ctx, ret, arg, 32 - len); + return; + } + if (ofs == 0) { + switch (len) { + case 16: + tcg_gen_ext16s_i32(tcg_ctx, ret, arg); + return; + case 8: + tcg_gen_ext8s_i32(tcg_ctx, ret, arg); + return; + } + } + + if (TCG_TARGET_HAS_sextract_i32 + && TCG_TARGET_extract_i32_valid(ofs, len)) { + tcg_gen_op4ii_i32(tcg_ctx, INDEX_op_sextract_i32, ret, arg, ofs, len); + return; + } + + /* Assume that sign-extension, if available, is cheaper than a shift. */ + switch (ofs + len) { + case 16: + if (TCG_TARGET_HAS_ext16s_i32) { + tcg_gen_ext16s_i32(tcg_ctx, ret, arg); + tcg_gen_sari_i32(tcg_ctx, ret, ret, ofs); + return; + } + break; + case 8: + if (TCG_TARGET_HAS_ext8s_i32) { + tcg_gen_ext8s_i32(tcg_ctx, ret, arg); + tcg_gen_sari_i32(tcg_ctx, ret, ret, ofs); + return; + } + break; + } + switch (len) { + case 16: + if (TCG_TARGET_HAS_ext16s_i32) { + tcg_gen_shri_i32(tcg_ctx, ret, arg, ofs); + tcg_gen_ext16s_i32(tcg_ctx, ret, ret); + return; + } + break; + case 8: + if (TCG_TARGET_HAS_ext8s_i32) { + tcg_gen_shri_i32(tcg_ctx, ret, arg, ofs); + tcg_gen_ext8s_i32(tcg_ctx, ret, ret); + return; + } + break; + } + + tcg_gen_shli_i32(tcg_ctx, ret, arg, 32 - len - ofs); + tcg_gen_sari_i32(tcg_ctx, ret, ret, 32 - len); +} + +/* + * Extract 32-bits from a 64-bit input, ah:al, starting from ofs. + * Unlike tcg_gen_extract_i32 above, len is fixed at 32. + */ +void tcg_gen_extract2_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, + unsigned int ofs) +{ + tcg_debug_assert(ofs <= 32); + if (ofs == 0) { + tcg_gen_mov_i32(tcg_ctx, ret, al); + } else if (ofs == 32) { + tcg_gen_mov_i32(tcg_ctx, ret, ah); + } else if (al == ah) { + tcg_gen_rotri_i32(tcg_ctx, ret, al, ofs); + } else if (TCG_TARGET_HAS_extract2_i32) { + tcg_gen_op4i_i32(tcg_ctx, INDEX_op_extract2_i32, ret, al, ah, ofs); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, al, ofs); + tcg_gen_deposit_i32(tcg_ctx, ret, t0, ah, 32 - ofs, ofs); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_movcond_i32(TCGContext *tcg_ctx, TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, + TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_mov_i32(tcg_ctx, ret, v1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_mov_i32(tcg_ctx, ret, v2); + } else if (TCG_TARGET_HAS_movcond_i32) { + tcg_gen_op6i_i32(tcg_ctx, INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_setcond_i32(tcg_ctx, cond, t0, c1, c2); + tcg_gen_neg_i32(tcg_ctx, t0, t0); + tcg_gen_and_i32(tcg_ctx, t1, v1, t0); + tcg_gen_andc_i32(tcg_ctx, ret, v2, t0); + tcg_gen_or_i32(tcg_ctx, ret, ret, t1); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + } +} + +void tcg_gen_add2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, + TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) +{ + if (TCG_TARGET_HAS_add2_i32) { + tcg_gen_op6_i32(tcg_ctx, INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, t0, al, ah); + tcg_gen_concat_i32_i64(tcg_ctx, t1, bl, bh); + tcg_gen_add_i64(tcg_ctx, t0, t0, t1); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } +} + +void tcg_gen_sub2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, + TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) +{ + if (TCG_TARGET_HAS_sub2_i32) { + tcg_gen_op6_i32(tcg_ctx, INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, t0, al, ah); + tcg_gen_concat_i32_i64(tcg_ctx, t1, bl, bh); + tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } +} + +void tcg_gen_mulu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_mulu2_i32) { + tcg_gen_op4_i32(tcg_ctx, INDEX_op_mulu2_i32, rl, rh, arg1, arg2); + } else if (TCG_TARGET_HAS_muluh_i32) { + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_op3_i32(tcg_ctx, INDEX_op_mul_i32, t, arg1, arg2); + tcg_gen_op3_i32(tcg_ctx, INDEX_op_muluh_i32, rh, arg1, arg2); + tcg_gen_mov_i32(tcg_ctx, rl, t); + tcg_temp_free_i32(tcg_ctx, t); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t0, arg1); + tcg_gen_extu_i32_i64(tcg_ctx, t1, arg2); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } +} + +void tcg_gen_muls2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_muls2_i32) { + tcg_gen_op4_i32(tcg_ctx, INDEX_op_muls2_i32, rl, rh, arg1, arg2); + } else if (TCG_TARGET_HAS_mulsh_i32) { + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_op3_i32(tcg_ctx, INDEX_op_mul_i32, t, arg1, arg2); + tcg_gen_op3_i32(tcg_ctx, INDEX_op_mulsh_i32, rh, arg1, arg2); + tcg_gen_mov_i32(tcg_ctx, rl, t); + tcg_temp_free_i32(tcg_ctx, t); + } else if (TCG_TARGET_REG_BITS == 32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mulu2_i32(tcg_ctx, t0, t1, arg1, arg2); + /* Adjust for negative inputs. */ + tcg_gen_sari_i32(tcg_ctx, t2, arg1, 31); + tcg_gen_sari_i32(tcg_ctx, t3, arg2, 31); + tcg_gen_and_i32(tcg_ctx, t2, t2, arg2); + tcg_gen_and_i32(tcg_ctx, t3, t3, arg1); + tcg_gen_sub_i32(tcg_ctx, rh, t1, t2); + tcg_gen_sub_i32(tcg_ctx, rh, rh, t3); + tcg_gen_mov_i32(tcg_ctx, rl, t0); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, t0, arg1); + tcg_gen_ext_i32_i64(tcg_ctx, t1, arg2); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } +} + +void tcg_gen_mulsu2_i32(TCGContext *tcg_ctx, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_REG_BITS == 32) { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mulu2_i32(tcg_ctx, t0, t1, arg1, arg2); + /* Adjust for negative input for the signed arg1. */ + tcg_gen_sari_i32(tcg_ctx, t2, arg1, 31); + tcg_gen_and_i32(tcg_ctx, t2, t2, arg2); + tcg_gen_sub_i32(tcg_ctx, rh, t1, t2); + tcg_gen_mov_i32(tcg_ctx, rl, t0); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, t0, arg1); + tcg_gen_extu_i32_i64(tcg_ctx, t1, arg2); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + } +} + +void tcg_gen_ext8s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext8s_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext8s_i32, ret, arg); + } else { + tcg_gen_shli_i32(tcg_ctx, ret, arg, 24); + tcg_gen_sari_i32(tcg_ctx, ret, ret, 24); + } +} + +void tcg_gen_ext16s_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext16s_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext16s_i32, ret, arg); + } else { + tcg_gen_shli_i32(tcg_ctx, ret, arg, 16); + tcg_gen_sari_i32(tcg_ctx, ret, ret, 16); + } +} + +void tcg_gen_ext8u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext8u_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext8u_i32, ret, arg); + } else { + tcg_gen_andi_i32(tcg_ctx, ret, arg, 0xffu); + } +} + +void tcg_gen_ext16u_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext16u_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_ext16u_i32, ret, arg); + } else { + tcg_gen_andi_i32(tcg_ctx, ret, arg, 0xffffu); + } +} + +/* Note: we assume the two high bytes are set to zero */ +void tcg_gen_bswap16_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_bswap16_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_bswap16_i32, ret, arg); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ext8u_i32(tcg_ctx, t0, arg); + tcg_gen_shli_i32(tcg_ctx, t0, t0, 8); + tcg_gen_shri_i32(tcg_ctx, ret, arg, 8); + tcg_gen_or_i32(tcg_ctx, ret, ret, t0); + tcg_temp_free_i32(tcg_ctx, t0); + } +} + +void tcg_gen_bswap32_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_bswap32_i32) { + tcg_gen_op2_i32(tcg_ctx, INDEX_op_bswap32_i32, ret, arg); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_const_i32(tcg_ctx, 0x00ff00ff); + + /* arg = abcd */ + tcg_gen_shri_i32(tcg_ctx, t0, arg, 8); /* t0 = .abc */ + tcg_gen_and_i32(tcg_ctx, t1, arg, t2); /* t1 = .b.d */ + tcg_gen_and_i32(tcg_ctx, t0, t0, t2); /* t0 = .a.c */ + tcg_gen_shli_i32(tcg_ctx, t1, t1, 8); /* t1 = b.d. */ + tcg_gen_or_i32(tcg_ctx, ret, t0, t1); /* ret = badc */ + + tcg_gen_shri_i32(tcg_ctx, t0, ret, 16); /* t0 = ..ba */ + tcg_gen_shli_i32(tcg_ctx, t1, ret, 16); /* t1 = dc.. */ + tcg_gen_or_i32(tcg_ctx, ret, t0, t1); /* ret = dcba */ + + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + } +} + +void tcg_gen_smin_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, ret, a, b, a, b); +} + +void tcg_gen_umin_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, ret, a, b, a, b); +} + +void tcg_gen_smax_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, ret, a, b, b, a); +} + +void tcg_gen_umax_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LTU, ret, a, b, b, a); +} + +void tcg_gen_abs_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 a) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_sari_i32(tcg_ctx, t, a, 31); + tcg_gen_xor_i32(tcg_ctx, ret, a, t); + tcg_gen_sub_i32(tcg_ctx, ret, ret, t); + tcg_temp_free_i32(tcg_ctx, t); +} + +/* 64-bit ops */ + +#if TCG_TARGET_REG_BITS == 32 +/* These are all inline for TCG_TARGET_REG_BITS == 64. */ + +void tcg_gen_discard_i64(TCGContext *tcg_ctx, TCGv_i64 arg) +{ + tcg_gen_discard_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg)); + tcg_gen_discard_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, arg)); +} + +void tcg_gen_mov_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg)); +} + +void tcg_gen_movi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg) +{ + tcg_gen_movi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), arg >> 32); +} + +void tcg_gen_ld8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ld8u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +} + +void tcg_gen_ld8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ld8s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); +} + +void tcg_gen_ld16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ld16u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +} + +void tcg_gen_ld16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ld16s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); +} + +void tcg_gen_ld32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +} + +void tcg_gen_ld32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); +} + +void tcg_gen_ld_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + /* Since arg2 and ret have different types, + they cannot be the same temporary */ +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_ld_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), arg2, offset); + tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset + 4); +#else + tcg_gen_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg2, offset); + tcg_gen_ld_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), arg2, offset + 4); +#endif +} + +void tcg_gen_st_i64(TCGContext *tcg_ctx, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) +{ +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_st_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, arg1), arg2, offset); + tcg_gen_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset + 4); +#else + tcg_gen_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, arg1), arg2, offset); + tcg_gen_st_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, arg1), arg2, offset + 4); +#endif +} + +void tcg_gen_and_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_and_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_and_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +} + +void tcg_gen_or_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_or_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_or_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +} + +void tcg_gen_xor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_xor_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_xor_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +} + +void tcg_gen_shl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_shl_i64(tcg_ctx, ret, arg1, arg2); +} + +void tcg_gen_shr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_shr_i64(tcg_ctx, ret, arg1, arg2); +} + +void tcg_gen_sar_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_sar_i64(tcg_ctx, ret, arg1, arg2); +} + +void tcg_gen_mul_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + TCGv_i64 t0; + TCGv_i32 t1; + + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_mulu2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, t0), TCGV_HIGH(tcg_ctx, t0), + TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + + tcg_gen_mul_i32(tcg_ctx, t1, TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); + tcg_gen_add_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, t0), TCGV_HIGH(tcg_ctx, t0), t1); + tcg_gen_mul_i32(tcg_ctx, t1, TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_add_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, t0), TCGV_HIGH(tcg_ctx, t0), t1); + + tcg_gen_mov_i64(tcg_ctx, ret, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +} +#endif /* TCG_TARGET_REG_SIZE == 32 */ + +void tcg_gen_addi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_add_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +} + +void tcg_gen_subfi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) +{ + if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) { + /* Don't recurse with tcg_gen_neg_i64. */ + tcg_gen_op2_i64(tcg_ctx, INDEX_op_neg_i64, ret, arg2); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg1); + tcg_gen_sub_i64(tcg_ctx, ret, t0, arg2); + tcg_temp_free_i64(tcg_ctx, t0); + } +} + +void tcg_gen_subi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_sub_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +} + +void tcg_gen_andi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_andi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), arg2); + tcg_gen_andi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), arg2 >> 32); + return; +#else + TCGv_i64 t0; + + /* Some cases can be optimized here. */ + switch (arg2) { + case 0: + tcg_gen_movi_i64(tcg_ctx, ret, 0); + return; + case -1: + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + return; + case 0xff: + /* Don't recurse with tcg_gen_ext8u_i64. */ + if (TCG_TARGET_HAS_ext8u_i64) { + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext8u_i64, ret, arg1); + return; + } + break; + case 0xffff: + if (TCG_TARGET_HAS_ext16u_i64) { + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext16u_i64, ret, arg1); + return; + } + break; + case 0xffffffffu: + if (TCG_TARGET_HAS_ext32u_i64) { + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext32u_i64, ret, arg1); + return; + } + break; + } + t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_and_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#endif +} + +void tcg_gen_ori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_ori_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), arg2); + tcg_gen_ori_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), arg2 >> 32); + return; +#else + /* Some cases can be optimized here. */ + if (arg2 == -1) { + tcg_gen_movi_i64(tcg_ctx, ret, -1); + } else if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_or_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +#endif +} + +void tcg_gen_xori_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_xori_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), arg2); + tcg_gen_xori_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), arg2 >> 32); + return; +#else + /* Some cases can be optimized here. */ + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) { + /* Don't recurse with tcg_gen_not_i64. */ + tcg_gen_op2_i64(tcg_ctx, INDEX_op_not_i64, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_xor_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +#endif +} + +#if TCG_TARGET_REG_BITS == 32 +static inline void tcg_gen_shifti_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, + unsigned c, bool right, bool arith) +{ + tcg_debug_assert(c < 64); + if (c == 0) { + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1)); + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1)); + } else if (c >= 32) { + c -= 32; + if (right) { + if (arith) { + tcg_gen_sari_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), 31); + } else { + tcg_gen_shri_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); + } + } else { + tcg_gen_shli_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), c); + tcg_gen_movi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), 0); + } + } else if (right) { + if (TCG_TARGET_HAS_extract2_i32) { + tcg_gen_extract2_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), + TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), c); + } else { + tcg_gen_shri_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), c); + tcg_gen_deposit_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), + TCGV_HIGH(tcg_ctx, arg1), 32 - c, c); + } + if (arith) { + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); + } else { + tcg_gen_shri_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), c); + } + } else { + if (TCG_TARGET_HAS_extract2_i32) { + tcg_gen_extract2_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), + TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), 32 - c); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, TCGV_LOW(tcg_ctx, arg1), 32 - c); + tcg_gen_deposit_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), t0, + TCGV_HIGH(tcg_ctx, arg1), c, 32 - c); + tcg_temp_free_i32(tcg_ctx, t0); + } + tcg_gen_shli_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), c); + } +} +#endif + +void tcg_gen_shli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_debug_assert(arg2 >= 0 && arg2 < 64); +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_shifti_i64(tcg_ctx, ret, arg1, arg2, 0, 0); +#else + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_shl_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +#endif +} + +void tcg_gen_shri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_debug_assert(arg2 >= 0 && arg2 < 64); +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_shifti_i64(tcg_ctx, ret, arg1, arg2, 1, 0); +#else + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_shr_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +#endif +} + +void tcg_gen_sari_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_debug_assert(arg2 >= 0 && arg2 < 64); +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_shifti_i64(tcg_ctx, ret, arg1, arg2, 1, 1); +#else + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_sar_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +#endif +} + +void tcg_gen_brcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(tcg_ctx, l); + } else if (cond != TCG_COND_NEVER) { + l->refs++; +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_op6ii_i32(tcg_ctx, INDEX_op_brcond2_i32, TCGV_LOW(tcg_ctx, arg1), + TCGV_HIGH(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2), + TCGV_HIGH(tcg_ctx, arg2), cond, label_arg(l)); +#else + tcg_gen_op4ii_i64(tcg_ctx, INDEX_op_brcond_i64, arg1, arg2, cond, + label_arg(l)); +#endif + } +} + +void tcg_gen_brcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(tcg_ctx, l); + } else if (cond != TCG_COND_NEVER) { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_brcond_i64(tcg_ctx, cond, arg1, t0, l); + tcg_temp_free_i64(tcg_ctx, t0); + } +} + +void tcg_gen_setcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_movi_i64(tcg_ctx, ret, 1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_movi_i64(tcg_ctx, ret, 0); + } else { +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_op6i_i32(tcg_ctx, INDEX_op_setcond2_i32, TCGV_LOW(tcg_ctx, ret), + TCGV_LOW(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg1), + TCGV_LOW(tcg_ctx, arg2), TCGV_HIGH(tcg_ctx, arg2), cond); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#else + tcg_gen_op4i_i64(tcg_ctx, INDEX_op_setcond_i64, ret, arg1, arg2, cond); +#endif + } +} + +void tcg_gen_setcondi_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, + TCGv_i64 arg1, int64_t arg2) +{ + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_setcond_i64(tcg_ctx, cond, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +void tcg_gen_muli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + if (arg2 == 0) { + tcg_gen_movi_i64(tcg_ctx, ret, 0); + } else if (is_power_of_2(arg2)) { + tcg_gen_shli_i64(tcg_ctx, ret, arg1, ctz64(arg2)); + } else { + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_mul_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } +} + +void tcg_gen_div_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_div_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_div_i64, ret, arg1, arg2); +#elif TCG_TARGET_HAS_div2_i64 + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_sari_i64(tcg_ctx, t0, arg1, 63); + tcg_gen_op5_i64(tcg_ctx, INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); + tcg_temp_free_i64(tcg_ctx, t0); +#else + gen_helper_div_i64(tcg_ctx, ret, arg1, arg2); +#endif +} + +void tcg_gen_rem_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_rem_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_rem_i64, ret, arg1, arg2); +#elif TCG_TARGET_HAS_div_i64 + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_op3_i64(tcg_ctx, INDEX_op_div_i64, t0, arg1, arg2); + tcg_gen_mul_i64(tcg_ctx, t0, t0, arg2); + tcg_gen_sub_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#elif TCG_TARGET_HAS_div2_i64 + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_sari_i64(tcg_ctx, t0, arg1, 63); + tcg_gen_op5_i64(tcg_ctx, INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); + tcg_temp_free_i64(tcg_ctx, t0); +#else + gen_helper_rem_i64(tcg_ctx, ret, arg1, arg2); +#endif +} + +void tcg_gen_divu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_div_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_divu_i64, ret, arg1, arg2); +#elif TCG_TARGET_HAS_div2_i64 + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_movi_i64(tcg_ctx, t0, 0); + tcg_gen_op5_i64(tcg_ctx, INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); + tcg_temp_free_i64(tcg_ctx, t0); +#else + gen_helper_divu_i64(tcg_ctx, ret, arg1, arg2); +#endif +} + +void tcg_gen_remu_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_rem_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_remu_i64, ret, arg1, arg2); +#elif TCG_TARGET_HAS_div_i64 + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_op3_i64(tcg_ctx, INDEX_op_divu_i64, t0, arg1, arg2); + tcg_gen_mul_i64(tcg_ctx, t0, t0, arg2); + tcg_gen_sub_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#elif TCG_TARGET_HAS_div2_i64 + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_movi_i64(tcg_ctx, t0, 0); + tcg_gen_op5_i64(tcg_ctx, INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); + tcg_temp_free_i64(tcg_ctx, t0); +#else + gen_helper_remu_i64(tcg_ctx, ret, arg1, arg2); +#endif +} + +void tcg_gen_ext8s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_ext8s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); +#elif TCG_TARGET_HAS_ext8s_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext8s_i64, ret, arg); +#else + tcg_gen_shli_i64(tcg_ctx, ret, arg, 56); + tcg_gen_sari_i64(tcg_ctx, ret, ret, 56); +#endif +} + +void tcg_gen_ext16s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_ext16s_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); +#elif TCG_TARGET_HAS_ext16s_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext16s_i64, ret, arg); +#else + tcg_gen_shli_i64(tcg_ctx, ret, arg, 48); + tcg_gen_sari_i64(tcg_ctx, ret, ret, 48); +#endif +} + +void tcg_gen_ext32s_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); +#elif TCG_TARGET_HAS_ext32s_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext32s_i64, ret, arg); +#else + tcg_gen_shli_i64(tcg_ctx, ret, arg, 32); + tcg_gen_sari_i64(tcg_ctx, ret, ret, 32); +#endif +} + +void tcg_gen_ext8u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_ext8u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#elif TCG_TARGET_HAS_ext8u_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext8u_i64, ret, arg); +#else + tcg_gen_andi_i64(tcg_ctx, ret, arg, 0xffu); +#endif +} + +void tcg_gen_ext16u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_ext16u_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#elif TCG_TARGET_HAS_ext16u_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext16u_i64, ret, arg); +#else + tcg_gen_andi_i64(tcg_ctx, ret, arg, 0xffffu); +#endif +} + +void tcg_gen_ext32u_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#elif TCG_TARGET_HAS_ext32u_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ext32u_i64, ret, arg); +#else + tcg_gen_andi_i64(tcg_ctx, ret, arg, 0xffffffffu); +#endif +} + +/* Note: we assume the six high bytes are set to zero */ +void tcg_gen_bswap16_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_bswap16_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#elif TCG_TARGET_HAS_bswap16_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_bswap16_i64, ret, arg); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext8u_i64(tcg_ctx, t0, arg); + tcg_gen_shli_i64(tcg_ctx, t0, t0, 8); + tcg_gen_shri_i64(tcg_ctx, ret, arg, 8); + tcg_gen_or_i64(tcg_ctx, ret, ret, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#endif +} + +/* Note: we assume the four high bytes are set to zero */ +void tcg_gen_bswap32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_bswap32_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#elif TCG_TARGET_HAS_bswap32_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_bswap32_i64, ret, arg); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_const_i64(tcg_ctx, 0x00ff00ff); + + /* arg = ....abcd */ + tcg_gen_shri_i64(tcg_ctx, t0, arg, 8); /* t0 = .....abc */ + tcg_gen_and_i64(tcg_ctx, t1, arg, t2); /* t1 = .....b.d */ + tcg_gen_and_i64(tcg_ctx, t0, t0, t2); /* t0 = .....a.c */ + tcg_gen_shli_i64(tcg_ctx, t1, t1, 8); /* t1 = ....b.d. */ + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = ....badc */ + + tcg_gen_shli_i64(tcg_ctx, t1, ret, 48); /* t1 = dc...... */ + tcg_gen_shri_i64(tcg_ctx, t0, ret, 16); /* t0 = ......ba */ + tcg_gen_shri_i64(tcg_ctx, t1, t1, 32); /* t1 = ....dc.. */ + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = ....dcba */ + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +#endif +} + +void tcg_gen_bswap64_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(tcg_ctx); + t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_bswap32_i32(tcg_ctx, t0, TCGV_LOW(tcg_ctx, arg)); + tcg_gen_bswap32_i32(tcg_ctx, t1, TCGV_HIGH(tcg_ctx, arg)); + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), t1); + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), t0); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +#elif TCG_TARGET_HAS_bswap64_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_bswap64_i64, ret, arg); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + /* arg = abcdefgh */ + tcg_gen_movi_i64(tcg_ctx, t2, 0x00ff00ff00ff00ffull); + tcg_gen_shri_i64(tcg_ctx, t0, arg, 8); /* t0 = .abcdefg */ + tcg_gen_and_i64(tcg_ctx, t1, arg, t2); /* t1 = .b.d.f.h */ + tcg_gen_and_i64(tcg_ctx, t0, t0, t2); /* t0 = .a.c.e.g */ + tcg_gen_shli_i64(tcg_ctx, t1, t1, 8); /* t1 = b.d.f.h. */ + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = badcfehg */ + + tcg_gen_movi_i64(tcg_ctx, t2, 0x0000ffff0000ffffull); + tcg_gen_shri_i64(tcg_ctx, t0, ret, 16); /* t0 = ..badcfe */ + tcg_gen_and_i64(tcg_ctx, t1, ret, t2); /* t1 = ..dc..hg */ + tcg_gen_and_i64(tcg_ctx, t0, t0, t2); /* t0 = ..ba..fe */ + tcg_gen_shli_i64(tcg_ctx, t1, t1, 16); /* t1 = dc..hg.. */ + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = dcbahgfe */ + + tcg_gen_shri_i64(tcg_ctx, t0, ret, 32); /* t0 = ....dcba */ + tcg_gen_shli_i64(tcg_ctx, t1, ret, 32); /* t1 = hgfe.... */ + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); /* ret = hgfedcba */ + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +#endif +} + +void tcg_gen_not_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_not_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_not_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg)); +#elif TCG_TARGET_HAS_not_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_not_i64, ret, arg); +#else + tcg_gen_xori_i64(tcg_ctx, ret, arg, -1); +#endif +} + +void tcg_gen_andc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_andc_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_andc_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +#elif TCG_TARGET_HAS_andc_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_andc_i64, ret, arg1, arg2); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_not_i64(tcg_ctx, t0, arg2); + tcg_gen_and_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#endif +} + +void tcg_gen_eqv_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_eqv_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_eqv_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +#elif TCG_TARGET_HAS_eqv_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_eqv_i64, ret, arg1, arg2); +#else + tcg_gen_xor_i64(tcg_ctx, ret, arg1, arg2); + tcg_gen_not_i64(tcg_ctx, ret, ret); +#endif +} + +void tcg_gen_nand_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_nand_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_nand_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +#elif TCG_TARGET_HAS_nand_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_nand_i64, ret, arg1, arg2); +#else + tcg_gen_and_i64(tcg_ctx, ret, arg1, arg2); + tcg_gen_not_i64(tcg_ctx, ret, ret); +#endif +} + +void tcg_gen_nor_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_nor_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_nor_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +#elif TCG_TARGET_HAS_nor_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_nor_i64, ret, arg1, arg2); +#else + tcg_gen_or_i64(tcg_ctx, ret, arg1, arg2); + tcg_gen_not_i64(tcg_ctx, ret, ret); +#endif +} + +void tcg_gen_orc_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_orc_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), TCGV_LOW(tcg_ctx, arg2)); + tcg_gen_orc_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), TCGV_HIGH(tcg_ctx, arg2)); +#elif TCG_TARGET_HAS_orc_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_orc_i64, ret, arg1, arg2); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_not_i64(tcg_ctx, t0, arg2); + tcg_gen_or_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#endif +} + +void tcg_gen_clz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_clz_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_clz_i64, ret, arg1, arg2); +#else + gen_helper_clz_i64(tcg_ctx, ret, arg1, arg2); +#endif +} + +void tcg_gen_clzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) +{ +#if TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_clz_i32 + if (arg2 <= 0xffffffffu) { + TCGv_i32 t = tcg_const_i32(tcg_ctx, (uint32_t)arg2 - 32); + tcg_gen_clz_i32(tcg_ctx, t, TCGV_LOW(tcg_ctx, arg1), t); + tcg_gen_addi_i32(tcg_ctx, t, t, 32); + tcg_gen_clz_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), t); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); + tcg_temp_free_i32(tcg_ctx, t); + } else +#endif + { + TCGv_i64 t = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_clz_i64(tcg_ctx, ret, arg1, t); + tcg_temp_free_i64(tcg_ctx, t); + } +} + +void tcg_gen_ctz_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_ctz_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_ctz_i64, ret, arg1, arg2); +#elif TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64 + TCGv_i64 z, t = tcg_temp_new_i64(tcg_ctx); +#if TCG_TARGET_HAS_ctpop_i64 + tcg_gen_subi_i64(tcg_ctx, t, arg1, 1); + tcg_gen_andc_i64(tcg_ctx, t, t, arg1); + tcg_gen_ctpop_i64(tcg_ctx, t, t); +#else + /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */ + tcg_gen_neg_i64(tcg_ctx, t, arg1); + tcg_gen_and_i64(tcg_ctx, t, t, arg1); + tcg_gen_clzi_i64(tcg_ctx, t, t, 64); + tcg_gen_xori_i64(tcg_ctx, t, t, 63); +#endif + z = tcg_const_i64(tcg_ctx, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, ret, arg1, z, arg2, t); + tcg_temp_free_i64(tcg_ctx, t); + tcg_temp_free_i64(tcg_ctx, z); +#else + gen_helper_ctz_i64(tcg_ctx, ret, arg1, arg2); +#endif +} + +void tcg_gen_ctzi_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) +{ +#if TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctz_i32 + if (arg2 <= 0xffffffffu) { + TCGv_i32 t32 = tcg_const_i32(tcg_ctx, (uint32_t)arg2 - 32); + tcg_gen_ctz_i32(tcg_ctx, t32, TCGV_HIGH(tcg_ctx, arg1), t32); + tcg_gen_addi_i32(tcg_ctx, t32, t32, 32); + tcg_gen_ctz_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), t32); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); + tcg_temp_free_i32(tcg_ctx, t32); + } else +#endif +#if !TCG_TARGET_HAS_ctz_i64 && TCG_TARGET_HAS_ctpop_i64 + if (arg2 == 64) { + /* This equivalence has the advantage of not requiring a fixup. */ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_subi_i64(tcg_ctx, t, arg1, 1); + tcg_gen_andc_i64(tcg_ctx, t, t, arg1); + tcg_gen_ctpop_i64(tcg_ctx, ret, t); + tcg_temp_free_i64(tcg_ctx, t); + } else +#endif + { + TCGv_i64 t64 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_ctz_i64(tcg_ctx, ret, arg1, t64); + tcg_temp_free_i64(tcg_ctx, t64); + } +} + +void tcg_gen_clrsb_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32 + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_sari_i64(tcg_ctx, t, arg, 63); + tcg_gen_xor_i64(tcg_ctx, t, t, arg); + tcg_gen_clzi_i64(tcg_ctx, t, t, 64); + tcg_gen_subi_i64(tcg_ctx, ret, t, 1); + tcg_temp_free_i64(tcg_ctx, t); +#else + gen_helper_clrsb_i64(tcg_ctx, ret, arg); +#endif +} + +void tcg_gen_ctpop_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1) +{ +#if TCG_TARGET_HAS_ctpop_i64 + tcg_gen_op2_i64(tcg_ctx, INDEX_op_ctpop_i64, ret, arg1); +#elif TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32 + tcg_gen_ctpop_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1)); + tcg_gen_ctpop_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1)); + tcg_gen_add_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret)); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#else + gen_helper_ctpop_i64(tcg_ctx, ret, arg1); +#endif +} + +void tcg_gen_rotl_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_rot_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_rotl_i64, ret, arg1, arg2); +#else + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shl_i64(tcg_ctx, t0, arg1, arg2); + tcg_gen_subfi_i64(tcg_ctx, t1, 64, arg2); + tcg_gen_shr_i64(tcg_ctx, t1, arg1, t1); + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +#endif +} + +void tcg_gen_rotli_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2) +{ + tcg_debug_assert(arg2 < 64); + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { +#if TCG_TARGET_HAS_rot_i64 + TCGv_i64 t0 = tcg_const_i64(tcg_ctx, arg2); + tcg_gen_rotl_i64(tcg_ctx, ret, arg1, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#else + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shli_i64(tcg_ctx, t0, arg1, arg2); + tcg_gen_shri_i64(tcg_ctx, t1, arg1, 64 - arg2); + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +#endif + } +} + +void tcg_gen_rotr_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_rot_i64 + tcg_gen_op3_i64(tcg_ctx, INDEX_op_rotr_i64, ret, arg1, arg2); +#else + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shr_i64(tcg_ctx, t0, arg1, arg2); + tcg_gen_subfi_i64(tcg_ctx, t1, 64, arg2); + tcg_gen_shl_i64(tcg_ctx, t1, arg1, t1); + tcg_gen_or_i64(tcg_ctx, ret, t0, t1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +#endif +} + +void tcg_gen_rotri_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2) +{ + tcg_debug_assert(arg2 < 64); + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, arg1); + } else { + tcg_gen_rotli_i64(tcg_ctx, ret, arg1, 64 - arg2); + } +} + +void tcg_gen_deposit_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, + unsigned int ofs, unsigned int len) +{ + uint64_t mask; + TCGv_i64 t1; + + tcg_debug_assert(ofs < 64); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 64); + tcg_debug_assert(ofs + len <= 64); + + if (len == 64) { + tcg_gen_mov_i64(tcg_ctx, ret, arg2); + return; + } + +#if TCG_TARGET_HAS_deposit_i64 + if (TCG_TARGET_deposit_i64_valid(ofs, len)) { + tcg_gen_op5ii_i64(tcg_ctx, INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); + return; + } +#endif + +#if TCG_TARGET_REG_BITS == 32 + if (ofs >= 32) { + tcg_gen_deposit_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1), + TCGV_LOW(tcg_ctx, arg2), ofs - 32, len); + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1)); + return; + } + if (ofs + len <= 32) { + tcg_gen_deposit_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg1), + TCGV_LOW(tcg_ctx, arg2), ofs, len); + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg1)); + return; + } +#endif + + t1 = tcg_temp_new_i64(tcg_ctx); + +#if TCG_TARGET_HAS_extract2_i64 + if (ofs + len == 64) { + tcg_gen_shli_i64(tcg_ctx, t1, arg1, len); + tcg_gen_extract2_i64(tcg_ctx, ret, t1, arg2, len); + goto done; + } + if (ofs == 0) { + tcg_gen_extract2_i64(tcg_ctx, ret, arg1, arg2, len); + tcg_gen_rotli_i64(tcg_ctx, ret, ret, len); + goto done; + } +#endif + + mask = (1ull << len) - 1; + if (ofs + len < 64) { + tcg_gen_andi_i64(tcg_ctx, t1, arg2, mask); + tcg_gen_shli_i64(tcg_ctx, t1, t1, ofs); + } else { + tcg_gen_shli_i64(tcg_ctx, t1, arg2, ofs); + } + tcg_gen_andi_i64(tcg_ctx, ret, arg1, ~(mask << ofs)); + tcg_gen_or_i64(tcg_ctx, ret, ret, t1); +#if TCG_TARGET_HAS_extract2_i64 + done: +#endif + tcg_temp_free_i64(tcg_ctx, t1); +} + +void tcg_gen_deposit_z_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, + unsigned int ofs, unsigned int len) +{ + tcg_debug_assert(ofs < 64); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 64); + tcg_debug_assert(ofs + len <= 64); + + if (ofs + len == 64) { + tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); + } else if (ofs == 0) { + tcg_gen_andi_i64(tcg_ctx, ret, arg, (1ull << len) - 1); +#if TCG_TARGET_HAS_deposit_i64 + } else if (TCG_TARGET_deposit_i64_valid(ofs, len)) { + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + tcg_gen_op5ii_i64(tcg_ctx, INDEX_op_deposit_i64, ret, zero, arg, ofs, len); + tcg_temp_free_i64(tcg_ctx, zero); +#endif + } else { +#if TCG_TARGET_REG_BITS == 32 + if (ofs >= 32) { + tcg_gen_deposit_z_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), + ofs - 32, len); + tcg_gen_movi_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), 0); + return; + } + if (ofs + len <= 32) { + tcg_gen_deposit_z_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), ofs, len); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); + return; + } +#endif + /* To help two-operand hosts we prefer to zero-extend first, + which allows ARG to stay live. */ + switch (len) { + case 32: +#if TCG_TARGET_HAS_ext32u_i64 + tcg_gen_ext32u_i64(tcg_ctx, ret, arg); + tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + case 16: +#if TCG_TARGET_HAS_ext16u_i64 + tcg_gen_ext16u_i64(tcg_ctx, ret, arg); + tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + case 8: +#if TCG_TARGET_HAS_ext8u_i64 + tcg_gen_ext8u_i64(tcg_ctx, ret, arg); + tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + } + /* Otherwise prefer zero-extension over AND for code size. */ + switch (ofs + len) { + case 32: +#if TCG_TARGET_HAS_ext32u_i64 + tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); + tcg_gen_ext32u_i64(tcg_ctx, ret, ret); + return; +#endif + break; + case 16: +#if TCG_TARGET_HAS_ext16u_i64 + tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); + tcg_gen_ext16u_i64(tcg_ctx, ret, ret); + return; +#endif + break; + case 8: +#if TCG_TARGET_HAS_ext8u_i64 + tcg_gen_shli_i64(tcg_ctx, ret, arg, ofs); + tcg_gen_ext8u_i64(tcg_ctx, ret, ret); + return; +#endif + break; + } + tcg_gen_andi_i64(tcg_ctx, ret, arg, (1ull << len) - 1); + tcg_gen_shli_i64(tcg_ctx, ret, ret, ofs); + } +} + +void tcg_gen_extract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, + unsigned int ofs, unsigned int len) +{ + tcg_debug_assert(ofs < 64); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 64); + tcg_debug_assert(ofs + len <= 64); + + /* Canonicalize certain special cases, even if extract is supported. */ + if (ofs + len == 64) { + tcg_gen_shri_i64(tcg_ctx, ret, arg, 64 - len); + return; + } + if (ofs == 0) { + tcg_gen_andi_i64(tcg_ctx, ret, arg, (1ull << len) - 1); + return; + } + +#if TCG_TARGET_REG_BITS == 32 + /* Look for a 32-bit extract within one of the two words. */ + if (ofs >= 32) { + tcg_gen_extract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg), ofs - 32, len); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); + return; + } + if (ofs + len <= 32) { + tcg_gen_extract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), ofs, len); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); + return; + } + /* The field is split across two words. One double-word + shift is better than two double-word shifts. */ + goto do_shift_and; +#endif + +#if TCG_TARGET_HAS_extract_i64 + if (TCG_TARGET_extract_i64_valid(ofs, len)) { + tcg_gen_op4ii_i64(tcg_ctx, INDEX_op_extract_i64, ret, arg, ofs, len); + return; + } +#endif + + /* Assume that zero-extension, if available, is cheaper than a shift. */ + switch (ofs + len) { + case 32: +#if TCG_TARGET_HAS_ext32u_i64 + tcg_gen_ext32u_i64(tcg_ctx, ret, arg); + tcg_gen_shri_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + case 16: +#if TCG_TARGET_HAS_ext16u_i64 + tcg_gen_ext16u_i64(tcg_ctx, ret, arg); + tcg_gen_shri_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + case 8: +#if TCG_TARGET_HAS_ext8u_i64 + tcg_gen_ext8u_i64(tcg_ctx, ret, arg); + tcg_gen_shri_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + } + + /* ??? Ideally we'd know what values are available for immediate AND. + Assume that 8 bits are available, plus the special cases of 16 and 32, + so that we get ext8u, ext16u, and ext32u. */ + switch (len) { + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 16: + case 32: +#if TCG_TARGET_REG_BITS == 32 + do_shift_and: +#endif + tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); + tcg_gen_andi_i64(tcg_ctx, ret, ret, (1ull << len) - 1); + break; + default: + tcg_gen_shli_i64(tcg_ctx, ret, arg, 64 - len - ofs); + tcg_gen_shri_i64(tcg_ctx, ret, ret, 64 - len); + break; + } +} + +void tcg_gen_sextract_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 arg, + unsigned int ofs, unsigned int len) +{ + tcg_debug_assert(ofs < 64); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= 64); + tcg_debug_assert(ofs + len <= 64); + + /* Canonicalize certain special cases, even if sextract is supported. */ + if (ofs + len == 64) { + tcg_gen_sari_i64(tcg_ctx, ret, arg, 64 - len); + return; + } + if (ofs == 0) { + switch (len) { + case 32: + tcg_gen_ext32s_i64(tcg_ctx, ret, arg); + return; + case 16: + tcg_gen_ext16s_i64(tcg_ctx, ret, arg); + return; + case 8: + tcg_gen_ext8s_i64(tcg_ctx, ret, arg); + return; + } + } + +#if TCG_TARGET_REG_BITS == 32 + /* Look for a 32-bit extract within one of the two words. */ + if (ofs >= 32) { + tcg_gen_sextract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg), ofs - 32, len); + } else if (ofs + len <= 32) { + tcg_gen_sextract_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg), ofs, len); + } else if (ofs == 0) { + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, arg)); + tcg_gen_sextract_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, arg), 0, len - 32); + return; + } else if (len > 32) { + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + /* Extract the bits for the high word normally. */ + tcg_gen_sextract_i32(tcg_ctx, t, TCGV_HIGH(tcg_ctx, arg), ofs + 32, len - 32); + /* Shift the field down for the low part. */ + tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); + /* Overwrite the shift into the high part. */ + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), t); + tcg_temp_free_i32(tcg_ctx, t); + return; + } else { + /* Shift the field down for the low part, such that the + field sits at the MSB. */ + tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs + len - 32); + /* Shift the field down from the MSB, sign extending. */ + tcg_gen_sari_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 32 - len); + } + /* Sign-extend the field from 32 bits. */ + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); + return; +#endif + +#if TCG_TARGET_HAS_sextract_i64 + if (TCG_TARGET_extract_i64_valid(ofs, len)) { + tcg_gen_op4ii_i64(tcg_ctx, INDEX_op_sextract_i64, ret, arg, ofs, len); + return; + } +#endif + + /* Assume that sign-extension, if available, is cheaper than a shift. */ + switch (ofs + len) { + case 32: +#if TCG_TARGET_HAS_ext32s_i64 + tcg_gen_ext32s_i64(tcg_ctx, ret, arg); + tcg_gen_sari_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + case 16: +#if TCG_TARGET_HAS_ext16s_i64 + tcg_gen_ext16s_i64(tcg_ctx, ret, arg); + tcg_gen_sari_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + case 8: +#if TCG_TARGET_HAS_ext8s_i64 + tcg_gen_ext8s_i64(tcg_ctx, ret, arg); + tcg_gen_sari_i64(tcg_ctx, ret, ret, ofs); + return; +#endif + break; + } + switch (len) { + case 32: +#if TCG_TARGET_HAS_ext32s_i64 + tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); + tcg_gen_ext32s_i64(tcg_ctx, ret, ret); + return; +#endif + break; + case 16: +#if TCG_TARGET_HAS_ext16s_i64 + tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); + tcg_gen_ext16s_i64(tcg_ctx, ret, ret); + return; +#endif + break; + case 8: +#if TCG_TARGET_HAS_ext8s_i64 + tcg_gen_shri_i64(tcg_ctx, ret, arg, ofs); + tcg_gen_ext8s_i64(tcg_ctx, ret, ret); + return; +#endif + break; + } + tcg_gen_shli_i64(tcg_ctx, ret, arg, 64 - len - ofs); + tcg_gen_sari_i64(tcg_ctx, ret, ret, 64 - len); +} + +/* + * Extract 64 bits from a 128-bit input, ah:al, starting from ofs. + * Unlike tcg_gen_extract_i64 above, len is fixed at 64. + */ +void tcg_gen_extract2_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, + unsigned int ofs) +{ + tcg_debug_assert(ofs <= 64); + if (ofs == 0) { + tcg_gen_mov_i64(tcg_ctx, ret, al); + } else if (ofs == 64) { + tcg_gen_mov_i64(tcg_ctx, ret, ah); + } else if (al == ah) { + tcg_gen_rotri_i64(tcg_ctx, ret, al, ofs); + } else { +#if TCG_TARGET_HAS_extract2_i64 + tcg_gen_op4i_i64(tcg_ctx, INDEX_op_extract2_i64, ret, al, ah, ofs); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t0, al, ofs); + tcg_gen_deposit_i64(tcg_ctx, ret, t0, ah, 64 - ofs, ofs); + tcg_temp_free_i64(tcg_ctx, t0); +#endif + } +} + +void tcg_gen_movcond_i64(TCGContext *tcg_ctx, TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, + TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_mov_i64(tcg_ctx, ret, v1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_mov_i64(tcg_ctx, ret, v2); + } else { +#if TCG_TARGET_REG_BITS == 32 + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_op6i_i32(tcg_ctx, INDEX_op_setcond2_i32, t0, + TCGV_LOW(tcg_ctx, c1), TCGV_HIGH(tcg_ctx, c1), + TCGV_LOW(tcg_ctx, c2), TCGV_HIGH(tcg_ctx, c2), cond); + +#if TCG_TARGET_HAS_movcond_i32 + tcg_gen_movi_i32(tcg_ctx, t1, 0); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, TCGV_LOW(tcg_ctx, ret), t0, t1, + TCGV_LOW(tcg_ctx, v1), TCGV_LOW(tcg_ctx, v2)); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, TCGV_HIGH(tcg_ctx, ret), t0, t1, + TCGV_HIGH(tcg_ctx, v1), TCGV_HIGH(tcg_ctx, v2)); +#else + tcg_gen_neg_i32(tcg_ctx, t0, t0); + + tcg_gen_and_i32(tcg_ctx, t1, TCGV_LOW(tcg_ctx, v1), t0); + tcg_gen_andc_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, v2), t0); + tcg_gen_or_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), t1); + + tcg_gen_and_i32(tcg_ctx, t1, TCGV_HIGH(tcg_ctx, v1), t0); + tcg_gen_andc_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, v2), t0); + tcg_gen_or_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_HIGH(tcg_ctx, ret), t1); +#endif + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); +#elif TCG_TARGET_HAS_movcond_i64 + tcg_gen_op6i_i64(tcg_ctx, INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_setcond_i64(tcg_ctx, cond, t0, c1, c2); + tcg_gen_neg_i64(tcg_ctx, t0, t0); + tcg_gen_and_i64(tcg_ctx, t1, v1, t0); + tcg_gen_andc_i64(tcg_ctx, ret, v2, t0); + tcg_gen_or_i64(tcg_ctx, ret, ret, t1); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +#endif + } +} + +void tcg_gen_add2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +{ +#if TCG_TARGET_HAS_add2_i64 + tcg_gen_op6_i64(tcg_ctx, INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_add_i64(tcg_ctx, t0, al, bl); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, t1, t0, al); + tcg_gen_add_i64(tcg_ctx, rh, ah, bh); + tcg_gen_add_i64(tcg_ctx, rh, rh, t1); + tcg_gen_mov_i64(tcg_ctx, rl, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +#endif +} + +void tcg_gen_sub2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +{ +#if TCG_TARGET_HAS_sub2_i64 + tcg_gen_op6_i64(tcg_ctx, INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_sub_i64(tcg_ctx, t0, al, bl); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, t1, al, bl); + tcg_gen_sub_i64(tcg_ctx, rh, ah, bh); + tcg_gen_sub_i64(tcg_ctx, rh, rh, t1); + tcg_gen_mov_i64(tcg_ctx, rl, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +#endif +} + +void tcg_gen_mulu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_mulu2_i64 + tcg_gen_op4_i64(tcg_ctx, INDEX_op_mulu2_i64, rl, rh, arg1, arg2); +#elif TCG_TARGET_HAS_muluh_i64 + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_op3_i64(tcg_ctx, INDEX_op_mul_i64, t, arg1, arg2); + tcg_gen_op3_i64(tcg_ctx, INDEX_op_muluh_i64, rh, arg1, arg2); + tcg_gen_mov_i64(tcg_ctx, rl, t); + tcg_temp_free_i64(tcg_ctx, t); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mul_i64(tcg_ctx, t0, arg1, arg2); + gen_helper_muluh_i64(tcg_ctx, rh, arg1, arg2); + tcg_gen_mov_i64(tcg_ctx, rl, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#endif +} + +void tcg_gen_muls2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_HAS_muls2_i64 + tcg_gen_op4_i64(tcg_ctx, INDEX_op_muls2_i64, rl, rh, arg1, arg2); +#elif TCG_TARGET_HAS_mulsh_i64 + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_op3_i64(tcg_ctx, INDEX_op_mul_i64, t, arg1, arg2); + tcg_gen_op3_i64(tcg_ctx, INDEX_op_mulsh_i64, rh, arg1, arg2); + tcg_gen_mov_i64(tcg_ctx, rl, t); + tcg_temp_free_i64(tcg_ctx, t); +#elif TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64 + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mulu2_i64(tcg_ctx, t0, t1, arg1, arg2); + /* Adjust for negative inputs. */ + tcg_gen_sari_i64(tcg_ctx, t2, arg1, 63); + tcg_gen_sari_i64(tcg_ctx, t3, arg2, 63); + tcg_gen_and_i64(tcg_ctx, t2, t2, arg2); + tcg_gen_and_i64(tcg_ctx, t3, t3, arg1); + tcg_gen_sub_i64(tcg_ctx, rh, t1, t2); + tcg_gen_sub_i64(tcg_ctx, rh, rh, t3); + tcg_gen_mov_i64(tcg_ctx, rl, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +#else + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mul_i64(tcg_ctx, t0, arg1, arg2); + gen_helper_mulsh_i64(tcg_ctx, rh, arg1, arg2); + tcg_gen_mov_i64(tcg_ctx, rl, t0); + tcg_temp_free_i64(tcg_ctx, t0); +#endif +} + +void tcg_gen_mulsu2_i64(TCGContext *tcg_ctx, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mulu2_i64(tcg_ctx, t0, t1, arg1, arg2); + /* Adjust for negative input for the signed arg1. */ + tcg_gen_sari_i64(tcg_ctx, t2, arg1, 63); + tcg_gen_and_i64(tcg_ctx, t2, t2, arg2); + tcg_gen_sub_i64(tcg_ctx, rh, t1, t2); + tcg_gen_mov_i64(tcg_ctx, rl, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +void tcg_gen_smin_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, ret, a, b, a, b); +} + +void tcg_gen_umin_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, ret, a, b, a, b); +} + +void tcg_gen_smax_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, ret, a, b, b, a); +} + +void tcg_gen_umax_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LTU, ret, a, b, b, a); +} + +void tcg_gen_abs_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 a) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_sari_i64(tcg_ctx, t, a, 63); + tcg_gen_xor_i64(tcg_ctx, ret, a, t); + tcg_gen_sub_i64(tcg_ctx, ret, ret, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +/* Size changing operations. */ + +void tcg_gen_extrl_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, ret, TCGV_LOW(tcg_ctx, arg)); +#elif TCG_TARGET_HAS_extrl_i64_i32 + tcg_gen_op2(tcg_ctx, INDEX_op_extrl_i64_i32, + tcgv_i32_arg(tcg_ctx, ret), tcgv_i64_arg(tcg_ctx, arg)); +#else + tcg_gen_mov_i32(tcg_ctx, ret, (TCGv_i32)arg); +#endif +} + +void tcg_gen_extrh_i64_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, ret, TCGV_HIGH(tcg_ctx, arg)); +#elif TCG_TARGET_HAS_extrh_i64_i32 + tcg_gen_op2(tcg_ctx, INDEX_op_extrh_i64_i32, + tcgv_i32_arg(tcg_ctx, ret), tcgv_i64_arg(tcg_ctx, arg)); +#else + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t, arg, 32); + tcg_gen_mov_i32(tcg_ctx, ret, (TCGv_i32)t); + tcg_temp_free_i64(tcg_ctx, t); +#endif +} + +void tcg_gen_extu_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg); + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), 0); +#else + tcg_gen_op2(tcg_ctx, INDEX_op_extu_i32_i64, + tcgv_i64_arg(tcg_ctx, ret), tcgv_i32_arg(tcg_ctx, arg)); +#endif +} + +void tcg_gen_ext_i32_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i32 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, ret), arg); + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, ret), TCGV_LOW(tcg_ctx, ret), 31); +#else + tcg_gen_op2(tcg_ctx, INDEX_op_ext_i32_i64, + tcgv_i64_arg(tcg_ctx, ret), tcgv_i32_arg(tcg_ctx, arg)); +#endif +} + +void tcg_gen_concat_i32_i64(TCGContext *tcg_ctx, TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx, dest), low); + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, dest), high); + return; +#else + TCGv_i64 tmp; + + tmp = tcg_temp_new_i64(tcg_ctx); + /* These extensions are only needed for type correctness. + We may be able to do better given target specific information. */ + tcg_gen_extu_i32_i64(tcg_ctx, tmp, high); + tcg_gen_extu_i32_i64(tcg_ctx, dest, low); + /* If deposit is available, use it. Otherwise use the extra + knowledge that we have of the zero-extensions above. */ +#if TCG_TARGET_HAS_deposit_i64 + if (TCG_TARGET_deposit_i64_valid(32, 32)) { + tcg_gen_deposit_i64(tcg_ctx, dest, dest, tmp, 32, 32); + } else { +#endif + tcg_gen_shli_i64(tcg_ctx, tmp, tmp, 32); + tcg_gen_or_i64(tcg_ctx, dest, dest, tmp); +#if TCG_TARGET_HAS_deposit_i64 + } +#endif + tcg_temp_free_i64(tcg_ctx, tmp); +#endif +} + +void tcg_gen_extr_i64_i32(TCGContext *tcg_ctx, TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(tcg_ctx, lo, TCGV_LOW(tcg_ctx, arg)); + tcg_gen_mov_i32(tcg_ctx, hi, TCGV_HIGH(tcg_ctx, arg)); +#else + tcg_gen_extrl_i64_i32(tcg_ctx, lo, arg); + tcg_gen_extrh_i64_i32(tcg_ctx, hi, arg); +#endif +} + +void tcg_gen_extr32_i64(TCGContext *tcg_ctx, TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg) +{ + tcg_gen_ext32u_i64(tcg_ctx, lo, arg); + tcg_gen_shri_i64(tcg_ctx, hi, arg, 32); +} + +/* QEMU specific operations. */ + +void tcg_gen_exit_tb(TCGContext *tcg_ctx, TranslationBlock *tb, unsigned idx) +{ + uintptr_t val = (uintptr_t)tb + idx; + + if (tb == NULL) { + tcg_debug_assert(idx == 0); + } else if (idx <= TB_EXIT_IDXMAX) { +#ifdef CONFIG_DEBUG_TCG + /* This is an exit following a goto_tb. Verify that we have + seen this numbered exit before, via tcg_gen_goto_tb. */ + tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx)); +#endif + /* When not chaining, exit without indicating a link. */ + //if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + // val = 0; + //} + } else { + /* This is an exit via the exitreq label. */ + tcg_debug_assert(idx == TB_EXIT_REQUESTED); + } + + tcg_gen_op1i(tcg_ctx, INDEX_op_exit_tb, val); +} + +void tcg_gen_goto_tb(TCGContext *tcg_ctx, unsigned idx) +{ + /* We only support two chained exits. */ + tcg_debug_assert(idx <= TB_EXIT_IDXMAX); +#ifdef CONFIG_DEBUG_TCG + /* Verify that we havn't seen this numbered exit before. */ + tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); + tcg_ctx->goto_tb_issue_mask |= 1 << idx; +#endif + /* When not chaining, we simply fall through to the "fallback" exit. */ + + // if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) + tcg_gen_op1i(tcg_ctx, INDEX_op_goto_tb, idx); +} + +void tcg_gen_lookup_and_goto_ptr(TCGContext *tcg_ctx) +{ +#if TCG_TARGET_HAS_goto_ptr + TCGv_ptr ptr; + + ptr = tcg_temp_new_ptr(tcg_ctx); + gen_helper_lookup_tb_ptr(tcg_ctx, ptr, tcg_ctx->cpu_env); + tcg_gen_op1i(tcg_ctx, INDEX_op_goto_ptr, tcgv_ptr_arg(tcg_ctx, ptr)); + tcg_temp_free_ptr(tcg_ctx, ptr); +#else + tcg_gen_exit_tb(tcg_ctx, NULL, 0); +#endif +} + +static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) +{ + /* Trigger the asserts within as early as possible. */ + (void)get_alignment_bits(op); + + switch (op & MO_SIZE) { + case MO_8: + op &= ~MO_BSWAP; + break; + case MO_16: + break; + case MO_32: + if (!is64) { + op &= ~MO_SIGN; + } + break; + case MO_64: + if (!is64) { + tcg_abort(); + } + break; + } + if (st) { + op &= ~MO_SIGN; + } + return op; +} + +static void gen_ldst_i32(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i32 val, TCGv addr, + MemOp memop, TCGArg idx) +{ + TCGMemOpIdx oi = make_memop_idx(memop, idx); +#if TARGET_LONG_BITS == 32 + tcg_gen_op3i_i32(tcg_ctx, opc, val, addr, oi); +#else +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_op4i_i32(tcg_ctx, opc, val, TCGV_LOW(tcg_ctx, addr), TCGV_HIGH(tcg_ctx, addr), oi); +#else + tcg_gen_op3(tcg_ctx, opc, tcgv_i32_arg(tcg_ctx, val), tcgv_i64_arg(tcg_ctx, addr), oi); +#endif +#endif +} + +static void gen_ldst_i64(TCGContext *tcg_ctx, TCGOpcode opc, TCGv_i64 val, TCGv addr, + MemOp memop, TCGArg idx) +{ + TCGMemOpIdx oi = make_memop_idx(memop, idx); +#if TARGET_LONG_BITS == 32 +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_op4i_i32(tcg_ctx, opc, TCGV_LOW(tcg_ctx, val), TCGV_HIGH(tcg_ctx, val), addr, oi); +#else + tcg_gen_op3(tcg_ctx, opc, tcgv_i64_arg(tcg_ctx, val), tcgv_i32_arg(tcg_ctx, addr), oi); +#endif +#else +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_op5i_i32(tcg_ctx, opc, TCGV_LOW(tcg_ctx, val), TCGV_HIGH(tcg_ctx, val), + TCGV_LOW(tcg_ctx, addr), TCGV_HIGH(tcg_ctx, addr), oi); +#else + tcg_gen_op3i_i64(tcg_ctx, opc, val, addr, oi); +#endif +#endif +} + +// Unicorn engine +// check if the last memory access was invalid +// if so, we jump to the block epilogue to quit immediately. +void check_exit_request(TCGContext *tcg_ctx) +{ + TCGv_i32 count; + + // Unicorn: + // For ARM IT block, we couldn't exit in the middle of the + // block and this is the our hack here. + if (tcg_ctx->uc->no_exit_request) { + return; + } + + count = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ld_i32(tcg_ctx, count, tcg_ctx->cpu_env, + offsetof(ArchCPU, neg.icount_decr.u32) - + offsetof(ArchCPU, env)); + + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); + + tcg_temp_free_i32(tcg_ctx, count); +} + +static void tcg_gen_req_mo(TCGContext *tcg_ctx, TCGBar type) +{ +#ifdef TCG_GUEST_DEFAULT_MO + type &= TCG_GUEST_DEFAULT_MO; +#endif + type &= ~TCG_TARGET_DEFAULT_MO; + if (type) { + tcg_gen_mb(tcg_ctx, type | TCG_BAR_SC); + } +} + +void tcg_gen_qemu_ld_i32(TCGContext *tcg_ctx, TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) +{ + MemOp orig_memop; + + tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD); + memop = tcg_canonicalize_memop(memop, 0, 0); + + orig_memop = memop; + if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { + memop &= ~MO_BSWAP; + /* The bswap primitive requires zero-extended input. */ + if ((memop & MO_SSIZE) == MO_SW) { + memop &= ~MO_SIGN; + } + } + + gen_ldst_i32(tcg_ctx, INDEX_op_qemu_ld_i32, val, addr, memop, idx); + + if ((orig_memop ^ memop) & MO_BSWAP) { + switch (orig_memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i32(tcg_ctx, val, val); + if (orig_memop & MO_SIGN) { + tcg_gen_ext16s_i32(tcg_ctx, val, val); + } + break; + case MO_32: + tcg_gen_bswap32_i32(tcg_ctx, val, val); + break; + default: + g_assert_not_reached(); + } + } + + check_exit_request(tcg_ctx); +} + +void tcg_gen_qemu_st_i32(TCGContext *tcg_ctx, TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) +{ + TCGv_i32 swap = NULL; + + tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_ST | TCG_MO_ST_ST); + memop = tcg_canonicalize_memop(memop, 0, 1); + +#if !TCG_TARGET_HAS_MEMORY_BSWAP + if (memop & MO_BSWAP) { + swap = tcg_temp_new_i32(tcg_ctx); + switch (memop & MO_SIZE) { + case MO_16: + tcg_gen_ext16u_i32(tcg_ctx, swap, val); + tcg_gen_bswap16_i32(tcg_ctx, swap, swap); + break; + case MO_32: + tcg_gen_bswap32_i32(tcg_ctx, swap, val); + break; + default: + g_assert_not_reached(); + } + val = swap; + memop &= ~MO_BSWAP; + } +#endif + + gen_ldst_i32(tcg_ctx, INDEX_op_qemu_st_i32, val, addr, memop, idx); + + if (swap) { + tcg_temp_free_i32(tcg_ctx, swap); + } + + check_exit_request(tcg_ctx); +} + +void tcg_gen_qemu_ld_i64(TCGContext *tcg_ctx, TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) +{ + MemOp orig_memop; + +#if TCG_TARGET_REG_BITS == 32 + if ((memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_ld_i32(tcg_ctx, TCGV_LOW(tcg_ctx, val), addr, idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, val), TCGV_LOW(tcg_ctx, val), 31); + } else { + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(tcg_ctx, val), 0); + } + check_exit_request(tcg_ctx); + return; + } +#endif + + tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_LD | TCG_MO_ST_LD); + memop = tcg_canonicalize_memop(memop, 1, 0); + + orig_memop = memop; +#if !TCG_TARGET_HAS_MEMORY_BSWAP + if (memop & MO_BSWAP) { + memop &= ~MO_BSWAP; + /* The bswap primitive requires zero-extended input. */ + if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { + memop &= ~MO_SIGN; + } + } +#endif + + gen_ldst_i64(tcg_ctx, INDEX_op_qemu_ld_i64, val, addr, memop, idx); + + if ((orig_memop ^ memop) & MO_BSWAP) { + switch (orig_memop & MO_SIZE) { + case MO_16: + tcg_gen_bswap16_i64(tcg_ctx, val, val); + if (orig_memop & MO_SIGN) { + tcg_gen_ext16s_i64(tcg_ctx, val, val); + } + break; + case MO_32: + tcg_gen_bswap32_i64(tcg_ctx, val, val); + if (orig_memop & MO_SIGN) { + tcg_gen_ext32s_i64(tcg_ctx, val, val); + } + break; + case MO_64: + tcg_gen_bswap64_i64(tcg_ctx, val, val); + break; + default: + g_assert_not_reached(); + } + } + check_exit_request(tcg_ctx); +} + +void tcg_gen_qemu_st_i64(TCGContext *tcg_ctx, TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) +{ + TCGv_i64 swap = NULL; + +#if TCG_TARGET_REG_BITS == 32 + if ((memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_st_i32(tcg_ctx, TCGV_LOW(tcg_ctx, val), addr, idx, memop); + check_exit_request(tcg_ctx); + return; + } +#endif + + tcg_gen_req_mo(tcg_ctx, TCG_MO_LD_ST | TCG_MO_ST_ST); + memop = tcg_canonicalize_memop(memop, 1, 1); + +#if !TCG_TARGET_HAS_MEMORY_BSWAP + if (memop & MO_BSWAP) { + swap = tcg_temp_new_i64(tcg_ctx); + switch (memop & MO_SIZE) { + case MO_16: + tcg_gen_ext16u_i64(tcg_ctx, swap, val); + tcg_gen_bswap16_i64(tcg_ctx, swap, swap); + break; + case MO_32: + tcg_gen_ext32u_i64(tcg_ctx, swap, val); + tcg_gen_bswap32_i64(tcg_ctx, swap, swap); + break; + case MO_64: + tcg_gen_bswap64_i64(tcg_ctx, swap, val); + break; + default: + g_assert_not_reached(); + } + val = swap; + memop &= ~MO_BSWAP; + } +#endif + + gen_ldst_i64(tcg_ctx, INDEX_op_qemu_st_i64, val, addr, memop, idx); + + if (swap) { + tcg_temp_free_i64(tcg_ctx, swap); + } + check_exit_request(tcg_ctx); +} + +static void tcg_gen_ext_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv_i32 val, MemOp opc) +{ + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_gen_ext8s_i32(tcg_ctx, ret, val); + break; + case MO_UB: + tcg_gen_ext8u_i32(tcg_ctx, ret, val); + break; + case MO_SW: + tcg_gen_ext16s_i32(tcg_ctx, ret, val); + break; + case MO_UW: + tcg_gen_ext16u_i32(tcg_ctx, ret, val); + break; + default: + tcg_gen_mov_i32(tcg_ctx, ret, val); + break; + } +} + +static void tcg_gen_ext_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv_i64 val, MemOp opc) +{ + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_gen_ext8s_i64(tcg_ctx, ret, val); + break; + case MO_UB: + tcg_gen_ext8u_i64(tcg_ctx, ret, val); + break; + case MO_SW: + tcg_gen_ext16s_i64(tcg_ctx, ret, val); + break; + case MO_UW: + tcg_gen_ext16u_i64(tcg_ctx, ret, val); + break; + case MO_SL: + tcg_gen_ext32s_i64(tcg_ctx, ret, val); + break; + case MO_UL: + tcg_gen_ext32u_i64(tcg_ctx, ret, val); + break; + default: + tcg_gen_mov_i64(tcg_ctx, ret, val); + break; + } +} + +typedef void (*gen_atomic_cx_i32)(TCGContext *tcg_ctx, TCGv_i32, TCGv_env, TCGv, + TCGv_i32, TCGv_i32, TCGv_i32); +typedef void (*gen_atomic_cx_i64)(TCGContext *tcg_ctx, TCGv_i64, TCGv_env, TCGv, + TCGv_i64, TCGv_i64, TCGv_i32); +typedef void (*gen_atomic_op_i32)(TCGContext *tcg_ctx, TCGv_i32, TCGv_env, TCGv, + TCGv_i32, TCGv_i32); +typedef void (*gen_atomic_op_i64)(TCGContext *tcg_ctx, TCGv_i64, TCGv_env, TCGv, + TCGv_i64, TCGv_i32); + +#ifdef CONFIG_ATOMIC64 +# define WITH_ATOMIC64(X) X, +#else +# define WITH_ATOMIC64(X) +#endif + +static void * const table_cmpxchg[16] = { + [MO_8] = gen_helper_atomic_cmpxchgb, + [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, + [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, + [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, + [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, + WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) + WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) +}; + +void tcg_gen_atomic_cmpxchg_i32(TCGContext *tcg_ctx, TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, + TCGv_i32 newv, TCGArg idx, MemOp memop) +{ + memop = tcg_canonicalize_memop(memop, 0, 0); + + if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ext_i32(tcg_ctx, t2, cmpv, memop & MO_SIZE); + + tcg_gen_qemu_ld_i32(tcg_ctx, t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i32(tcg_ctx, t2, addr, idx, memop); + tcg_temp_free_i32(tcg_ctx, t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(tcg_ctx, retv, t1, memop); + } else { + tcg_gen_mov_i32(tcg_ctx, retv, t1); + } + tcg_temp_free_i32(tcg_ctx, t1); + } else { + gen_atomic_cx_i32 gen; + + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + { + TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop & ~MO_SIGN, idx)); + gen(tcg_ctx, retv, tcg_ctx->cpu_env, addr, cmpv, newv, oi); + tcg_temp_free_i32(tcg_ctx, oi); + } + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(tcg_ctx, retv, retv, memop); + } + } +} + +void tcg_gen_atomic_cmpxchg_i64(TCGContext *tcg_ctx, TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, + TCGv_i64 newv, TCGArg idx, MemOp memop) +{ + memop = tcg_canonicalize_memop(memop, 1, 0); + + if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) { + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i64(tcg_ctx, t2, cmpv, memop & MO_SIZE); + + tcg_gen_qemu_ld_i64(tcg_ctx, t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i64(tcg_ctx, t2, addr, idx, memop); + tcg_temp_free_i64(tcg_ctx, t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(tcg_ctx, retv, t1, memop); + } else { + tcg_gen_mov_i64(tcg_ctx, retv, t1); + } + tcg_temp_free_i64(tcg_ctx, t1); + } else if ((memop & MO_SIZE) == MO_64) { +#ifdef CONFIG_ATOMIC64 + gen_atomic_cx_i64 gen; + + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + { + TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop, idx)); + gen(tcg_ctx, retv, tcg_ctx->cpu_env, addr, cmpv, newv, oi); + tcg_temp_free_i32(tcg_ctx, oi); + } +#else + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + /* Produce a result, so that we have a well-formed opcode stream + with respect to uses of the result in the (dead) code following. */ + tcg_gen_movi_i64(tcg_ctx, retv, 0); +#endif /* CONFIG_ATOMIC64 */ + } else { + TCGv_i32 c32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 n32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 r32 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_extrl_i64_i32(tcg_ctx, c32, cmpv); + tcg_gen_extrl_i64_i32(tcg_ctx, n32, newv); + tcg_gen_atomic_cmpxchg_i32(tcg_ctx, r32, addr, c32, n32, idx, memop & ~MO_SIGN); + tcg_temp_free_i32(tcg_ctx, c32); + tcg_temp_free_i32(tcg_ctx, n32); + + tcg_gen_extu_i32_i64(tcg_ctx, retv, r32); + tcg_temp_free_i32(tcg_ctx, r32); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(tcg_ctx, retv, retv, memop); + } + } +} + +static void do_nonatomic_op_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv addr, TCGv_i32 val, + TCGArg idx, MemOp memop, bool new_val, + void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + + memop = tcg_canonicalize_memop(memop, 0, 0); + + tcg_gen_qemu_ld_i32(tcg_ctx, t1, addr, idx, memop & ~MO_SIGN); + gen(tcg_ctx, t2, t1, val); + tcg_gen_qemu_st_i32(tcg_ctx, t2, addr, idx, memop); + + tcg_gen_ext_i32(tcg_ctx, ret, (new_val ? t2 : t1), memop); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); +} + +static void do_atomic_op_i32(TCGContext *tcg_ctx, TCGv_i32 ret, TCGv addr, TCGv_i32 val, + TCGArg idx, MemOp memop, void * const table[]) +{ + gen_atomic_op_i32 gen; + + memop = tcg_canonicalize_memop(memop, 0, 0); + + gen = table[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + { + TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop & ~MO_SIGN, idx)); + gen(tcg_ctx, ret, tcg_ctx->cpu_env, addr, val, oi); + tcg_temp_free_i32(tcg_ctx, oi); + } + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(tcg_ctx, ret, ret, memop); + } +} + +static void do_nonatomic_op_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, TCGv_i64 val, + TCGArg idx, MemOp memop, bool new_val, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + memop = tcg_canonicalize_memop(memop, 1, 0); + + tcg_gen_qemu_ld_i64(tcg_ctx, t1, addr, idx, memop & ~MO_SIGN); + gen(tcg_ctx, t2, t1, val); + tcg_gen_qemu_st_i64(tcg_ctx, t2, addr, idx, memop); + + tcg_gen_ext_i64(tcg_ctx, ret, (new_val ? t2 : t1), memop); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +static void do_atomic_op_i64(TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, TCGv_i64 val, + TCGArg idx, MemOp memop, void * const table[]) +{ + memop = tcg_canonicalize_memop(memop, 1, 0); + + if ((memop & MO_SIZE) == MO_64) { +#ifdef CONFIG_ATOMIC64 + gen_atomic_op_i64 gen; + + gen = table[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); + + { + TCGv_i32 oi = tcg_const_i32(tcg_ctx, make_memop_idx(memop & ~MO_SIGN, idx)); + gen(tcg_ctx, ret, tcg_ctx->cpu_env, addr, val, oi); + tcg_temp_free_i32(tcg_ctx, oi); + } +#else + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + /* Produce a result, so that we have a well-formed opcode stream + with respect to uses of the result in the (dead) code following. */ + tcg_gen_movi_i64(tcg_ctx, ret, 0); +#endif /* CONFIG_ATOMIC64 */ + } else { + TCGv_i32 v32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 r32 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_extrl_i64_i32(tcg_ctx, v32, val); + do_atomic_op_i32(tcg_ctx, r32, addr, v32, idx, memop & ~MO_SIGN, table); + tcg_temp_free_i32(tcg_ctx, v32); + + tcg_gen_extu_i32_i64(tcg_ctx, ret, r32); + tcg_temp_free_i32(tcg_ctx, r32); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(tcg_ctx, ret, ret, memop); + } + } +} + +#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ +static void * const table_##NAME[16] = { \ + [MO_8] = gen_helper_atomic_##NAME##b, \ + [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ + [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ + [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ + [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ + WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ + WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ +}; \ +void tcg_gen_atomic_##NAME##_i32 \ + (TCGContext *tcg_ctx, TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ +{ \ + if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ + do_atomic_op_i32(tcg_ctx, ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i32(tcg_ctx, ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i32); \ + } \ +} \ +void tcg_gen_atomic_##NAME##_i64 \ + (TCGContext *tcg_ctx, TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ +{ \ + if (tcg_ctx->tb_cflags & CF_PARALLEL) { \ + do_atomic_op_i64(tcg_ctx, ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i64(tcg_ctx, ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i64); \ + } \ +} + +GEN_ATOMIC_HELPER(fetch_add, add, 0) +GEN_ATOMIC_HELPER(fetch_and, and, 0) +GEN_ATOMIC_HELPER(fetch_or, or, 0) +GEN_ATOMIC_HELPER(fetch_xor, xor, 0) +GEN_ATOMIC_HELPER(fetch_smin, smin, 0) +GEN_ATOMIC_HELPER(fetch_umin, umin, 0) +GEN_ATOMIC_HELPER(fetch_smax, smax, 0) +GEN_ATOMIC_HELPER(fetch_umax, umax, 0) + +GEN_ATOMIC_HELPER(add_fetch, add, 1) +GEN_ATOMIC_HELPER(and_fetch, and, 1) +GEN_ATOMIC_HELPER(or_fetch, or, 1) +GEN_ATOMIC_HELPER(xor_fetch, xor, 1) +GEN_ATOMIC_HELPER(smin_fetch, smin, 1) +GEN_ATOMIC_HELPER(umin_fetch, umin, 1) +GEN_ATOMIC_HELPER(smax_fetch, smax, 1) +GEN_ATOMIC_HELPER(umax_fetch, umax, 1) + +static void tcg_gen_mov2_i32(TCGContext *tcg_ctx, TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_mov_i32(tcg_ctx, r, b); +} + +static void tcg_gen_mov2_i64(TCGContext *tcg_ctx, TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_mov_i64(tcg_ctx, r, b); +} + +GEN_ATOMIC_HELPER(xchg, mov2, 0) + +#undef GEN_ATOMIC_HELPER diff --git a/qemu/tcg/tcg-op.h b/qemu/tcg/tcg-op.h deleted file mode 100644 index 51a0631e..00000000 --- a/qemu/tcg/tcg-op.h +++ /dev/null @@ -1,2784 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#include "tcg.h" -#include "exec/helper-proto.h" -#include "exec/helper-gen.h" - -int gen_new_label(TCGContext *); - -static inline void gen_uc_tracecode(TCGContext *tcg_ctx, int32_t size, int32_t type, void *uc, uint64_t pc) -{ - TCGv_i32 tsize = tcg_const_i32(tcg_ctx, size); - TCGv_i32 ttype = tcg_const_i32(tcg_ctx, type); - TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, uc); - TCGv_i64 tpc = tcg_const_i64(tcg_ctx, pc); - gen_helper_uc_tracecode(tcg_ctx, tsize, ttype, tuc, tpc); -} - -static inline void tcg_gen_op0(TCGContext *s, TCGOpcode opc) -{ - *s->gen_opc_ptr++ = opc; -} - -static inline void tcg_gen_op1_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); -} - -static inline void tcg_gen_op1_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); -} - -static inline void tcg_gen_op1i(TCGContext *s, TCGOpcode opc, TCGArg arg1) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = arg1; -} - -static inline void tcg_gen_op2_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); -} - -static inline void tcg_gen_op2_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); -} - -static inline void tcg_gen_op2i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGArg arg2) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = arg2; -} - -static inline void tcg_gen_op2i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGArg arg2) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = arg2; -} - -static inline void tcg_gen_op2ii(TCGContext *s, TCGOpcode opc, TCGArg arg1, TCGArg arg2) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = arg1; - *s->gen_opparam_ptr++ = arg2; -} - -static inline void tcg_gen_op3_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGv_i32 arg3) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); -} - -static inline void tcg_gen_op3_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGv_i64 arg3) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); -} - -static inline void tcg_gen_op3i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, - TCGv_i32 arg2, TCGArg arg3) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = arg3; -} - -static inline void tcg_gen_op3i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, - TCGv_i64 arg2, TCGArg arg3) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = arg3; -} - -static inline void tcg_gen_ldst_op_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 val, - TCGv_ptr base, TCGArg offset) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(val); - *s->gen_opparam_ptr++ = GET_TCGV_PTR(base); - *s->gen_opparam_ptr++ = offset; -} - -static inline void tcg_gen_ldst_op_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 val, - TCGv_ptr base, TCGArg offset) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(val); - *s->gen_opparam_ptr++ = GET_TCGV_PTR(base); - *s->gen_opparam_ptr++ = offset; -} - -static inline void tcg_gen_op4_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGv_i32 arg3, TCGv_i32 arg4) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); -} - -static inline void tcg_gen_op4_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGv_i64 arg3, TCGv_i64 arg4) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); -} - -static inline void tcg_gen_op4i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGv_i32 arg3, TCGArg arg4) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = arg4; -} - -static inline void tcg_gen_op4i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGv_i64 arg3, TCGArg arg4) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = arg4; -} - -static inline void tcg_gen_op4ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGArg arg3, TCGArg arg4) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = arg3; - *s->gen_opparam_ptr++ = arg4; -} - -static inline void tcg_gen_op4ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGArg arg3, TCGArg arg4) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = arg3; - *s->gen_opparam_ptr++ = arg4; -} - -static inline void tcg_gen_op5_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg5); -} - -static inline void tcg_gen_op5_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg5); -} - -static inline void tcg_gen_op5i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGv_i32 arg3, TCGv_i32 arg4, TCGArg arg5) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); - *s->gen_opparam_ptr++ = arg5; -} - -static inline void tcg_gen_op5i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGv_i64 arg3, TCGv_i64 arg4, TCGArg arg5) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); - *s->gen_opparam_ptr++ = arg5; -} - -static inline void tcg_gen_op5ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, - TCGv_i32 arg2, TCGv_i32 arg3, - TCGArg arg4, TCGArg arg5) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = arg4; - *s->gen_opparam_ptr++ = arg5; -} - -static inline void tcg_gen_op5ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, - TCGv_i64 arg2, TCGv_i64 arg3, - TCGArg arg4, TCGArg arg5) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = arg4; - *s->gen_opparam_ptr++ = arg5; -} - -static inline void tcg_gen_op6_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5, - TCGv_i32 arg6) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg5); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg6); -} - -static inline void tcg_gen_op6_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5, - TCGv_i64 arg6) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg5); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg6); -} - -static inline void tcg_gen_op6i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, - TCGv_i32 arg3, TCGv_i32 arg4, - TCGv_i32 arg5, TCGArg arg6) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg5); - *s->gen_opparam_ptr++ = arg6; -} - -static inline void tcg_gen_op6i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGv_i64 arg3, TCGv_i64 arg4, - TCGv_i64 arg5, TCGArg arg6) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg5); - *s->gen_opparam_ptr++ = arg6; -} - -static inline void tcg_gen_op6ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, - TCGv_i32 arg2, TCGv_i32 arg3, - TCGv_i32 arg4, TCGArg arg5, TCGArg arg6) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); - *s->gen_opparam_ptr++ = arg5; - *s->gen_opparam_ptr++ = arg6; -} - -static inline void tcg_gen_op6ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, - TCGv_i64 arg2, TCGv_i64 arg3, - TCGv_i64 arg4, TCGArg arg5, TCGArg arg6) -{ - *s->gen_opc_ptr++ = opc; - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); - *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); - *s->gen_opparam_ptr++ = arg5; - *s->gen_opparam_ptr++ = arg6; -} - -static inline void tcg_add_param_i32(TCGContext *s, TCGv_i32 val) -{ - *s->gen_opparam_ptr++ = GET_TCGV_I32(val); -} - -static inline void tcg_add_param_i64(TCGContext *s, TCGv_i64 val) -{ -#if TCG_TARGET_REG_BITS == 32 - *s->gen_opparam_ptr++ = GET_TCGV_I32(TCGV_LOW(val)); - *s->gen_opparam_ptr++ = GET_TCGV_I32(TCGV_HIGH(val)); -#else - *s->gen_opparam_ptr++ = GET_TCGV_I64(val); -#endif -} - -static inline void gen_set_label(TCGContext *s, int n) -{ - tcg_gen_op1i(s, INDEX_op_set_label, n); -} - -static inline void tcg_gen_br(TCGContext *s, int label) -{ - tcg_gen_op1i(s, INDEX_op_br, label); -} - -static inline void tcg_gen_mov_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (!TCGV_EQUAL_I32(ret, arg)) - tcg_gen_op2_i32(s, INDEX_op_mov_i32, ret, arg); -} - -static inline void tcg_gen_movi_i32(TCGContext *s, TCGv_i32 ret, int32_t arg) -{ - tcg_gen_op2i_i32(s, INDEX_op_movi_i32, ret, arg); -} - -/* 32 bit ops */ - -static inline void tcg_gen_ld8u_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_ld8u_i32, ret, arg2, offset); -} - -static inline void tcg_gen_ld8s_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_ld8s_i32, ret, arg2, offset); -} - -static inline void tcg_gen_ld16u_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_ld16u_i32, ret, arg2, offset); -} - -static inline void tcg_gen_ld16s_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_ld16s_i32, ret, arg2, offset); -} - -static inline void tcg_gen_ld_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_ld_i32, ret, arg2, offset); -} - -static inline void tcg_gen_st8_i32(TCGContext *s, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_st8_i32, arg1, arg2, offset); -} - -static inline void tcg_gen_st16_i32(TCGContext *s, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_st16_i32, arg1, arg2, offset); -} - -static inline void tcg_gen_st_i32(TCGContext *s, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i32(s, INDEX_op_st_i32, arg1, arg2, offset); -} - -static inline void tcg_gen_add_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - tcg_gen_op3_i32(s, INDEX_op_add_i32, ret, arg1, arg2); -} - -static inline void tcg_gen_addi_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_add_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_sub_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - tcg_gen_op3_i32(s, INDEX_op_sub_i32, ret, arg1, arg2); -} - -static inline void tcg_gen_subfi_i32(TCGContext *s, TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) -{ - TCGv_i32 t0 = tcg_const_i32(s, arg1); - tcg_gen_sub_i32(s, ret, t0, arg2); - tcg_temp_free_i32(s, t0); -} - -static inline void tcg_gen_subi_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_sub_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_and_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCGV_EQUAL_I32(arg1, arg2)) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - tcg_gen_op3_i32(s, INDEX_op_and_i32, ret, arg1, arg2); - } -} - -static inline void tcg_gen_andi_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) -{ - TCGv_i32 t0; - /* Some cases can be optimized here. */ - switch (arg2) { - case 0: - tcg_gen_movi_i32(s, ret, 0); - return; - case 0xffffffffu: - tcg_gen_mov_i32(s, ret, arg1); - return; - case 0xffu: - /* Don't recurse with tcg_gen_ext8u_i32. */ - if (TCG_TARGET_HAS_ext8u_i32) { - tcg_gen_op2_i32(s, INDEX_op_ext8u_i32, ret, arg1); - return; - } - break; - case 0xffffu: - if (TCG_TARGET_HAS_ext16u_i32) { - tcg_gen_op2_i32(s, INDEX_op_ext16u_i32, ret, arg1); - return; - } - break; - } - t0 = tcg_const_i32(s, arg2); - tcg_gen_and_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); -} - -static inline void tcg_gen_or_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCGV_EQUAL_I32(arg1, arg2)) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - tcg_gen_op3_i32(s, INDEX_op_or_i32, ret, arg1, arg2); - } -} - -static inline void tcg_gen_ori_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - /* Some cases can be optimized here. */ - if (arg2 == -1) { - tcg_gen_movi_i32(s, ret, -1); - } else if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_or_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_xor_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCGV_EQUAL_I32(arg1, arg2)) { - tcg_gen_movi_i32(s, ret, 0); - } else { - tcg_gen_op3_i32(s, INDEX_op_xor_i32, ret, arg1, arg2); - } -} - -static inline void tcg_gen_xori_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - /* Some cases can be optimized here. */ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) { - /* Don't recurse with tcg_gen_not_i32. */ - tcg_gen_op2_i32(s, INDEX_op_not_i32, ret, arg1); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_xor_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_shl_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - tcg_gen_op3_i32(s, INDEX_op_shl_i32, ret, arg1, arg2); -} - -static inline void tcg_gen_shli_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_shl_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_shr_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - tcg_gen_op3_i32(s, INDEX_op_shr_i32, ret, arg1, arg2); -} - -static inline void tcg_gen_shri_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_shr_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_sar_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - tcg_gen_op3_i32(s, INDEX_op_sar_i32, ret, arg1, arg2); -} - -static inline void tcg_gen_sari_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_sar_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_brcond_i32(TCGContext *s, TCGCond cond, TCGv_i32 arg1, - TCGv_i32 arg2, int label_index) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_br(s, label_index); - } else if (cond != TCG_COND_NEVER) { - tcg_gen_op4ii_i32(s, INDEX_op_brcond_i32, arg1, arg2, cond, label_index); - } -} - -static inline void tcg_gen_brcondi_i32(TCGContext *s, TCGCond cond, TCGv_i32 arg1, - int32_t arg2, int label_index) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_br(s, label_index); - } else if (cond != TCG_COND_NEVER) { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_brcond_i32(s, cond, arg1, t0, label_index); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_setcond_i32(TCGContext *s, TCGCond cond, TCGv_i32 ret, - TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_movi_i32(s, ret, 1); - } else if (cond == TCG_COND_NEVER) { - tcg_gen_movi_i32(s, ret, 0); - } else { - tcg_gen_op4i_i32(s, INDEX_op_setcond_i32, ret, arg1, arg2, cond); - } -} - -static inline void tcg_gen_setcondi_i32(TCGContext *s, TCGCond cond, TCGv_i32 ret, - TCGv_i32 arg1, int32_t arg2) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_movi_i32(s, ret, 1); - } else if (cond == TCG_COND_NEVER) { - tcg_gen_movi_i32(s, ret, 0); - } else { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_setcond_i32(s, cond, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_mul_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - tcg_gen_op3_i32(s, INDEX_op_mul_i32, ret, arg1, arg2); -} - -static inline void tcg_gen_muli_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_mul_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); -} - -static inline void tcg_gen_div_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_div_i32) { - tcg_gen_op3_i32(s, INDEX_op_div_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_sari_i32(s, t0, arg1, 31); - tcg_gen_op5_i32(s, INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); - tcg_temp_free_i32(s, t0); - } else { - gen_helper_div_i32(s, ret, arg1, arg2); - } -} - -static inline void tcg_gen_rem_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_rem_i32) { - tcg_gen_op3_i32(s, INDEX_op_rem_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_op3_i32(s, INDEX_op_div_i32, t0, arg1, arg2); - tcg_gen_mul_i32(s, t0, t0, arg2); - tcg_gen_sub_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_sari_i32(s, t0, arg1, 31); - tcg_gen_op5_i32(s, INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); - tcg_temp_free_i32(s, t0); - } else { - gen_helper_rem_i32(s, ret, arg1, arg2); - } -} - -static inline void tcg_gen_divu_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_div_i32) { - tcg_gen_op3_i32(s, INDEX_op_divu_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_movi_i32(s, t0, 0); - tcg_gen_op5_i32(s, INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); - tcg_temp_free_i32(s, t0); - } else { - gen_helper_divu_i32(s, ret, arg1, arg2); - } -} - -static inline void tcg_gen_remu_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_rem_i32) { - tcg_gen_op3_i32(s, INDEX_op_remu_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_op3_i32(s, INDEX_op_divu_i32, t0, arg1, arg2); - tcg_gen_mul_i32(s, t0, t0, arg2); - tcg_gen_sub_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } else if (TCG_TARGET_HAS_div2_i32) { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_movi_i32(s, t0, 0); - tcg_gen_op5_i32(s, INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); - tcg_temp_free_i32(s, t0); - } else { - gen_helper_remu_i32(s, ret, arg1, arg2); - } -} - -#if TCG_TARGET_REG_BITS == 32 - -static inline void tcg_gen_mov_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (!TCGV_EQUAL_I64(ret, arg)) { - tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); - } -} - -static inline void tcg_gen_movi_i64(TCGContext *s, TCGv_i64 ret, int64_t arg) -{ - tcg_gen_movi_i32(s, TCGV_LOW(ret), (int32_t)arg); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), arg >> 32); -} - -static inline void tcg_gen_ld8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ld8u_i32(s, TCGV_LOW(ret), arg2, offset); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_ld8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ld8s_i32(s, TCGV_LOW(ret), arg2, offset); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_HIGH(ret), 31); -} - -static inline void tcg_gen_ld16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ld16u_i32(s, TCGV_LOW(ret), arg2, offset); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_ld16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ld16s_i32(s, TCGV_LOW(ret), arg2, offset); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); -} - -static inline void tcg_gen_ld32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_ld32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); -} - -static inline void tcg_gen_ld_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - /* since arg2 and ret have different types, they cannot be the - same temporary */ -#ifdef HOST_WORDS_BIGENDIAN - tcg_gen_ld_i32(s, TCGV_HIGH(ret), arg2, offset); - tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset + 4); -#else - tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset); - tcg_gen_ld_i32(s, TCGV_HIGH(ret), arg2, offset + 4); -#endif -} - -static inline void tcg_gen_st8_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_st8_i32(s, TCGV_LOW(arg1), arg2, offset); -} - -static inline void tcg_gen_st16_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_st16_i32(s, TCGV_LOW(arg1), arg2, offset); -} - -static inline void tcg_gen_st32_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_st_i32(s, TCGV_LOW(arg1), arg2, offset); -} - -static inline void tcg_gen_st_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, - tcg_target_long offset) -{ -#ifdef HOST_WORDS_BIGENDIAN - tcg_gen_st_i32(s, TCGV_HIGH(arg1), arg2, offset); - tcg_gen_st_i32(s, TCGV_LOW(arg1), arg2, offset + 4); -#else - tcg_gen_st_i32(s, TCGV_LOW(arg1), arg2, offset); - tcg_gen_st_i32(s, TCGV_HIGH(arg1), arg2, offset + 4); -#endif -} - -static inline void tcg_gen_add_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op6_i32(s, INDEX_op_add2_i32, TCGV_LOW(ret), TCGV_HIGH(ret), - TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), - TCGV_HIGH(arg2)); - /* Allow the optimizer room to replace add2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); -} - -static inline void tcg_gen_sub_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op6_i32(s, INDEX_op_sub2_i32, TCGV_LOW(ret), TCGV_HIGH(ret), - TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), - TCGV_HIGH(arg2)); - /* Allow the optimizer room to replace sub2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); -} - -static inline void tcg_gen_and_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_and_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_and_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -} - -static inline void tcg_gen_andi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - tcg_gen_andi_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (uint32_t)arg2); - tcg_gen_andi_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); -} - -static inline void tcg_gen_or_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_or_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_or_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -} - -static inline void tcg_gen_ori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - tcg_gen_ori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (uint32_t)arg2); - tcg_gen_ori_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); -} - -static inline void tcg_gen_xor_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_xor_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_xor_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -} - -static inline void tcg_gen_xori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - tcg_gen_xori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (int32_t)arg2); - tcg_gen_xori_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); -} - -/* XXX: use generic code when basic block handling is OK or CPU - specific code (x86) */ -static inline void tcg_gen_shl_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - gen_helper_shl_i64(s, ret, arg1, arg2); -} - -static inline void tcg_gen_shli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 0, 0); -} - -static inline void tcg_gen_shr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - gen_helper_shr_i64(s, ret, arg1, arg2); -} - -static inline void tcg_gen_shri_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 1, 0); -} - -static inline void tcg_gen_sar_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - gen_helper_sar_i64(s, ret, arg1, arg2); -} - -static inline void tcg_gen_sari_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 1, 1); -} - -static inline void tcg_gen_brcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 arg1, - TCGv_i64 arg2, int label_index) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_br(s, label_index); - } else if (cond != TCG_COND_NEVER) { - tcg_gen_op6ii_i32(s, INDEX_op_brcond2_i32, - TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), - TCGV_HIGH(arg2), cond, label_index); - } -} - -static inline void tcg_gen_setcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_movi_i32(s, TCGV_LOW(ret), 1); - } else if (cond == TCG_COND_NEVER) { - tcg_gen_movi_i32(s, TCGV_LOW(ret), 0); - } else { - tcg_gen_op6i_i32(s, INDEX_op_setcond2_i32, TCGV_LOW(ret), - TCGV_LOW(arg1), TCGV_HIGH(arg1), - TCGV_LOW(arg2), TCGV_HIGH(arg2), cond); - } - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_mul_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - TCGv_i64 t0; - TCGv_i32 t1; - - t0 = tcg_temp_new_i64(s); - t1 = tcg_temp_new_i32(s); - - if (TCG_TARGET_HAS_mulu2_i32) { - tcg_gen_op4_i32(s, INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0), - TCGV_LOW(arg1), TCGV_LOW(arg2)); - /* Allow the optimizer room to replace mulu2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else { - tcg_debug_assert(TCG_TARGET_HAS_muluh_i32); - tcg_gen_op3_i32(s, INDEX_op_mul_i32, TCGV_LOW(t0), - TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_op3_i32(s, INDEX_op_muluh_i32, TCGV_HIGH(t0), - TCGV_LOW(arg1), TCGV_LOW(arg2)); - } - - tcg_gen_mul_i32(s, t1, TCGV_LOW(arg1), TCGV_HIGH(arg2)); - tcg_gen_add_i32(s, TCGV_HIGH(t0), TCGV_HIGH(t0), t1); - tcg_gen_mul_i32(s, t1, TCGV_HIGH(arg1), TCGV_LOW(arg2)); - tcg_gen_add_i32(s, TCGV_HIGH(t0), TCGV_HIGH(t0), t1); - - tcg_gen_mov_i64(s, ret, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i32(s, t1); -} - -static inline void tcg_gen_div_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - gen_helper_div_i64(s, ret, arg1, arg2); -} - -static inline void tcg_gen_rem_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - gen_helper_rem_i64(s, ret, arg1, arg2); -} - -static inline void tcg_gen_divu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - gen_helper_divu_i64(s, ret, arg1, arg2); -} - -static inline void tcg_gen_remu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - gen_helper_remu_i64(s, ret, arg1, arg2); -} - -#else - -static inline void tcg_gen_mov_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (!TCGV_EQUAL_I64(ret, arg)) - tcg_gen_op2_i64(s, INDEX_op_mov_i64, ret, arg); -} - -static inline void tcg_gen_movi_i64(TCGContext *s, TCGv_i64 ret, int64_t arg) -{ - tcg_gen_op2i_i64(s, INDEX_op_movi_i64, ret, arg); -} - -static inline void tcg_gen_ld8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_ld8u_i64, ret, arg2, offset); -} - -static inline void tcg_gen_ld8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_ld8s_i64, ret, arg2, offset); -} - -static inline void tcg_gen_ld16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_ld16u_i64, ret, arg2, offset); -} - -static inline void tcg_gen_ld16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_ld16s_i64, ret, arg2, offset); -} - -static inline void tcg_gen_ld32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_ld32u_i64, ret, arg2, offset); -} - -static inline void tcg_gen_ld32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_ld32s_i64, ret, arg2, offset); -} - -static inline void tcg_gen_ld_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_ld_i64, ret, arg2, offset); -} - -static inline void tcg_gen_st8_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_st8_i64, arg1, arg2, offset); -} - -static inline void tcg_gen_st16_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_st16_i64, arg1, arg2, offset); -} - -static inline void tcg_gen_st32_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, - tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_st32_i64, arg1, arg2, offset); -} - -static inline void tcg_gen_st_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) -{ - tcg_gen_ldst_op_i64(s, INDEX_op_st_i64, arg1, arg2, offset); -} - -static inline void tcg_gen_add_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op3_i64(s, INDEX_op_add_i64, ret, arg1, arg2); -} - -static inline void tcg_gen_sub_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op3_i64(s, INDEX_op_sub_i64, ret, arg1, arg2); -} - -static inline void tcg_gen_and_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCGV_EQUAL_I64(arg1, arg2)) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - tcg_gen_op3_i64(s, INDEX_op_and_i64, ret, arg1, arg2); - } -} - -static inline void tcg_gen_andi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) -{ - TCGv_i64 t0; - /* Some cases can be optimized here. */ - switch (arg2) { - case 0: - tcg_gen_movi_i64(s, ret, 0); - return; - case 0xffffffffffffffffull: - tcg_gen_mov_i64(s, ret, arg1); - return; - case 0xffull: - /* Don't recurse with tcg_gen_ext8u_i32. */ - if (TCG_TARGET_HAS_ext8u_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext8u_i64, ret, arg1); - return; - } - break; - case 0xffffu: - if (TCG_TARGET_HAS_ext16u_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext16u_i64, ret, arg1); - return; - } - break; - case 0xffffffffull: - if (TCG_TARGET_HAS_ext32u_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext32u_i64, ret, arg1); - return; - } - break; - } - t0 = tcg_const_i64(s, arg2); - tcg_gen_and_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); -} - -static inline void tcg_gen_or_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCGV_EQUAL_I64(arg1, arg2)) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - tcg_gen_op3_i64(s, INDEX_op_or_i64, ret, arg1, arg2); - } -} - -static inline void tcg_gen_ori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - /* Some cases can be optimized here. */ - if (arg2 == -1) { - tcg_gen_movi_i64(s, ret, -1); - } else if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_or_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_xor_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCGV_EQUAL_I64(arg1, arg2)) { - tcg_gen_movi_i64(s, ret, 0); - } else { - tcg_gen_op3_i64(s, INDEX_op_xor_i64, ret, arg1, arg2); - } -} - -static inline void tcg_gen_xori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - /* Some cases can be optimized here. */ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) { - /* Don't recurse with tcg_gen_not_i64. */ - tcg_gen_op2_i64(s, INDEX_op_not_i64, ret, arg1); - } else { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_xor_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_shl_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op3_i64(s, INDEX_op_shl_i64, ret, arg1, arg2); -} - -static inline void tcg_gen_shli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_shl_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_shr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op3_i64(s, INDEX_op_shr_i64, ret, arg1, arg2); -} - -static inline void tcg_gen_shri_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_shr_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_sar_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op3_i64(s, INDEX_op_sar_i64, ret, arg1, arg2); -} - -static inline void tcg_gen_sari_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_sar_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_brcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 arg1, - TCGv_i64 arg2, int label_index) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_br(s, label_index); - } else if (cond != TCG_COND_NEVER) { - tcg_gen_op4ii_i64(s, INDEX_op_brcond_i64, arg1, arg2, cond, label_index); - } -} - -static inline void tcg_gen_setcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_movi_i64(s, ret, 1); - } else if (cond == TCG_COND_NEVER) { - tcg_gen_movi_i64(s, ret, 0); - } else { - tcg_gen_op4i_i64(s, INDEX_op_setcond_i64, ret, arg1, arg2, cond); - } -} - -static inline void tcg_gen_mul_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - tcg_gen_op3_i64(s, INDEX_op_mul_i64, ret, arg1, arg2); -} - -static inline void tcg_gen_div_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_div_i64) { - tcg_gen_op3_i64(s, INDEX_op_div_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_sari_i64(s, t0, arg1, 63); - tcg_gen_op5_i64(s, INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); - tcg_temp_free_i64(s, t0); - } else { - gen_helper_div_i64(s, ret, arg1, arg2); - } -} - -static inline void tcg_gen_rem_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_rem_i64) { - tcg_gen_op3_i64(s, INDEX_op_rem_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_op3_i64(s, INDEX_op_div_i64, t0, arg1, arg2); - tcg_gen_mul_i64(s, t0, t0, arg2); - tcg_gen_sub_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_sari_i64(s, t0, arg1, 63); - tcg_gen_op5_i64(s, INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); - tcg_temp_free_i64(s, t0); - } else { - gen_helper_rem_i64(s, ret, arg1, arg2); - } -} - -static inline void tcg_gen_divu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_div_i64) { - tcg_gen_op3_i64(s, INDEX_op_divu_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_movi_i64(s, t0, 0); - tcg_gen_op5_i64(s, INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); - tcg_temp_free_i64(s, t0); - } else { - gen_helper_divu_i64(s, ret, arg1, arg2); - } -} - -static inline void tcg_gen_remu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_rem_i64) { - tcg_gen_op3_i64(s, INDEX_op_remu_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_op3_i64(s, INDEX_op_divu_i64, t0, arg1, arg2); - tcg_gen_mul_i64(s, t0, t0, arg2); - tcg_gen_sub_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } else if (TCG_TARGET_HAS_div2_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_movi_i64(s, t0, 0); - tcg_gen_op5_i64(s, INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); - tcg_temp_free_i64(s, t0); - } else { - gen_helper_remu_i64(s, ret, arg1, arg2); - } -} -#endif /* TCG_TARGET_REG_BITS == 32 */ - -static inline void tcg_gen_addi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_add_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_subfi_i64(TCGContext *s, TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) -{ - TCGv_i64 t0 = tcg_const_i64(s, arg1); - tcg_gen_sub_i64(s, ret, t0, arg2); - tcg_temp_free_i64(s, t0); -} - -static inline void tcg_gen_subi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_sub_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -} -static inline void tcg_gen_brcondi_i64(TCGContext *s, TCGCond cond, TCGv_i64 arg1, - int64_t arg2, int label_index) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_br(s, label_index); - } else if (cond != TCG_COND_NEVER) { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_brcond_i64(s, cond, arg1, t0, label_index); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_setcondi_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, - TCGv_i64 arg1, int64_t arg2) -{ - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_setcond_i64(s, cond, ret, arg1, t0); - tcg_temp_free_i64(s, t0); -} - -static inline void tcg_gen_muli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_mul_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); -} - - -/***************************************/ -/* optional operations */ - -static inline void tcg_gen_ext8s_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_ext8s_i32) { - tcg_gen_op2_i32(s, INDEX_op_ext8s_i32, ret, arg); - } else { - tcg_gen_shli_i32(s, ret, arg, 24); - tcg_gen_sari_i32(s, ret, ret, 24); - } -} - -static inline void tcg_gen_ext16s_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_ext16s_i32) { - tcg_gen_op2_i32(s, INDEX_op_ext16s_i32, ret, arg); - } else { - tcg_gen_shli_i32(s, ret, arg, 16); - tcg_gen_sari_i32(s, ret, ret, 16); - } -} - -static inline void tcg_gen_ext8u_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_ext8u_i32) { - tcg_gen_op2_i32(s, INDEX_op_ext8u_i32, ret, arg); - } else { - tcg_gen_andi_i32(s, ret, arg, 0xffu); - } -} - -static inline void tcg_gen_ext16u_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_ext16u_i32) { - tcg_gen_op2_i32(s, INDEX_op_ext16u_i32, ret, arg); - } else { - tcg_gen_andi_i32(s, ret, arg, 0xffffu); - } -} - -/* Note: we assume the two high bytes are set to zero */ -static inline void tcg_gen_bswap16_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_bswap16_i32) { - tcg_gen_op2_i32(s, INDEX_op_bswap16_i32, ret, arg); - } else { - TCGv_i32 t0 = tcg_temp_new_i32(s); - - tcg_gen_ext8u_i32(s, t0, arg); - tcg_gen_shli_i32(s, t0, t0, 8); - tcg_gen_shri_i32(s, ret, arg, 8); - tcg_gen_or_i32(s, ret, ret, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_bswap32_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_bswap32_i32) { - tcg_gen_op2_i32(s, INDEX_op_bswap32_i32, ret, arg); - } else { - TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(s); - t1 = tcg_temp_new_i32(s); - - tcg_gen_shli_i32(s, t0, arg, 24); - - tcg_gen_andi_i32(s, t1, arg, 0x0000ff00); - tcg_gen_shli_i32(s, t1, t1, 8); - tcg_gen_or_i32(s, t0, t0, t1); - - tcg_gen_shri_i32(s, t1, arg, 8); - tcg_gen_andi_i32(s, t1, t1, 0x0000ff00); - tcg_gen_or_i32(s, t0, t0, t1); - - tcg_gen_shri_i32(s, t1, arg, 24); - tcg_gen_or_i32(s, ret, t0, t1); - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - } -} - -#if TCG_TARGET_REG_BITS == 32 -static inline void tcg_gen_ext8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_ext8s_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); -} - -static inline void tcg_gen_ext16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_ext16s_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); -} - -static inline void tcg_gen_ext32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); -} - -static inline void tcg_gen_ext8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_ext8u_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_ext16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_ext16u_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_ext32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_trunc_shr_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg, - unsigned int count) -{ - tcg_debug_assert(count < 64); - if (count >= 32) { - tcg_gen_shri_i32(s, ret, TCGV_HIGH(arg), count - 32); - } else if (count == 0) { - tcg_gen_mov_i32(s, ret, TCGV_LOW(arg)); - } else { - TCGv_i64 t = tcg_temp_new_i64(s); - tcg_gen_shri_i64(s, t, arg, count); - tcg_gen_mov_i32(s, ret, TCGV_LOW(t)); - tcg_temp_free_i64(s, t); - } -} - -static inline void tcg_gen_extu_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) -{ - tcg_gen_mov_i32(s, TCGV_LOW(ret), arg); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); -} - -static inline void tcg_gen_ext_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) -{ - tcg_gen_mov_i32(s, TCGV_LOW(ret), arg); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); -} - -/* Note: we assume the six high bytes are set to zero */ -static inline void tcg_gen_bswap16_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); - tcg_gen_bswap16_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); -} - -/* Note: we assume the four high bytes are set to zero */ -static inline void tcg_gen_bswap32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); - tcg_gen_bswap32_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); -} - -static inline void tcg_gen_bswap64_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(s); - t1 = tcg_temp_new_i32(s); - - tcg_gen_bswap32_i32(s, t0, TCGV_LOW(arg)); - tcg_gen_bswap32_i32(s, t1, TCGV_HIGH(arg)); - tcg_gen_mov_i32(s, TCGV_LOW(ret), t1); - tcg_gen_mov_i32(s, TCGV_HIGH(ret), t0); - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); -} -#else - -static inline void tcg_gen_ext8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_ext8s_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext8s_i64, ret, arg); - } else { - tcg_gen_shli_i64(s, ret, arg, 56); - tcg_gen_sari_i64(s, ret, ret, 56); - } -} - -static inline void tcg_gen_ext16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_ext16s_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext16s_i64, ret, arg); - } else { - tcg_gen_shli_i64(s, ret, arg, 48); - tcg_gen_sari_i64(s, ret, ret, 48); - } -} - -static inline void tcg_gen_ext32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_ext32s_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext32s_i64, ret, arg); - } else { - tcg_gen_shli_i64(s, ret, arg, 32); - tcg_gen_sari_i64(s, ret, ret, 32); - } -} - -static inline void tcg_gen_ext8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_ext8u_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext8u_i64, ret, arg); - } else { - tcg_gen_andi_i64(s, ret, arg, 0xffu); - } -} - -static inline void tcg_gen_ext16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_ext16u_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext16u_i64, ret, arg); - } else { - tcg_gen_andi_i64(s, ret, arg, 0xffffu); - } -} - -static inline void tcg_gen_ext32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_ext32u_i64) { - tcg_gen_op2_i64(s, INDEX_op_ext32u_i64, ret, arg); - } else { - tcg_gen_andi_i64(s, ret, arg, 0xffffffffu); - } -} - -static inline void tcg_gen_trunc_shr_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg, - unsigned int count) -{ - tcg_debug_assert(count < 64); - if (TCG_TARGET_HAS_trunc_shr_i32) { - tcg_gen_op3i_i32(s, INDEX_op_trunc_shr_i32, ret, - MAKE_TCGV_I32(GET_TCGV_I64(arg)), count); - } else if (count == 0) { - tcg_gen_mov_i32(s, ret, MAKE_TCGV_I32(GET_TCGV_I64(arg))); - } else { - TCGv_i64 t = tcg_temp_new_i64(s); - tcg_gen_shri_i64(s, t, arg, count); - tcg_gen_mov_i32(s, ret, MAKE_TCGV_I32(GET_TCGV_I64(t))); - tcg_temp_free_i64(s, t); - } -} - -/* Note: we assume the target supports move between 32 and 64 bit - registers */ -static inline void tcg_gen_extu_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) -{ - tcg_gen_ext32u_i64(s, ret, MAKE_TCGV_I64(GET_TCGV_I32(arg))); -} - -/* Note: we assume the target supports move between 32 and 64 bit - registers */ -static inline void tcg_gen_ext_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) -{ - tcg_gen_ext32s_i64(s, ret, MAKE_TCGV_I64(GET_TCGV_I32(arg))); -} - -/* Note: we assume the six high bytes are set to zero */ -static inline void tcg_gen_bswap16_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_bswap16_i64) { - tcg_gen_op2_i64(s, INDEX_op_bswap16_i64, ret, arg); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - - tcg_gen_ext8u_i64(s, t0, arg); - tcg_gen_shli_i64(s, t0, t0, 8); - tcg_gen_shri_i64(s, ret, arg, 8); - tcg_gen_or_i64(s, ret, ret, t0); - tcg_temp_free_i64(s, t0); - } -} - -/* Note: we assume the four high bytes are set to zero */ -static inline void tcg_gen_bswap32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_bswap32_i64) { - tcg_gen_op2_i64(s, INDEX_op_bswap32_i64, ret, arg); - } else { - TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(s); - t1 = tcg_temp_new_i64(s); - - tcg_gen_shli_i64(s, t0, arg, 24); - tcg_gen_ext32u_i64(s, t0, t0); - - tcg_gen_andi_i64(s, t1, arg, 0x0000ff00); - tcg_gen_shli_i64(s, t1, t1, 8); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_shri_i64(s, t1, arg, 8); - tcg_gen_andi_i64(s, t1, t1, 0x0000ff00); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_shri_i64(s, t1, arg, 24); - tcg_gen_or_i64(s, ret, t0, t1); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_bswap64_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_bswap64_i64) { - tcg_gen_op2_i64(s, INDEX_op_bswap64_i64, ret, arg); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - - tcg_gen_shli_i64(s, t0, arg, 56); - - tcg_gen_andi_i64(s, t1, arg, 0x0000ff00); - tcg_gen_shli_i64(s, t1, t1, 40); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_andi_i64(s, t1, arg, 0x00ff0000); - tcg_gen_shli_i64(s, t1, t1, 24); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_andi_i64(s, t1, arg, 0xff000000); - tcg_gen_shli_i64(s, t1, t1, 8); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_shri_i64(s, t1, arg, 8); - tcg_gen_andi_i64(s, t1, t1, 0xff000000); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_shri_i64(s, t1, arg, 24); - tcg_gen_andi_i64(s, t1, t1, 0x00ff0000); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_shri_i64(s, t1, arg, 40); - tcg_gen_andi_i64(s, t1, t1, 0x0000ff00); - tcg_gen_or_i64(s, t0, t0, t1); - - tcg_gen_shri_i64(s, t1, arg, 56); - tcg_gen_or_i64(s, ret, t0, t1); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -#endif - -static inline void tcg_gen_neg_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_neg_i32) { - tcg_gen_op2_i32(s, INDEX_op_neg_i32, ret, arg); - } else { - TCGv_i32 t0 = tcg_const_i32(s, 0); - tcg_gen_sub_i32(s, ret, t0, arg); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_neg_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ - if (TCG_TARGET_HAS_neg_i64) { - tcg_gen_op2_i64(s, INDEX_op_neg_i64, ret, arg); - } else { - TCGv_i64 t0 = tcg_const_i64(s, 0); - tcg_gen_sub_i64(s, ret, t0, arg); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_not_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) -{ - if (TCG_TARGET_HAS_not_i32) { - tcg_gen_op2_i32(s, INDEX_op_not_i32, ret, arg); - } else { - tcg_gen_xori_i32(s, ret, arg, -1); - } -} - -static inline void tcg_gen_not_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) -{ -#if TCG_TARGET_REG_BITS == 64 - if (TCG_TARGET_HAS_not_i64) { - tcg_gen_op2_i64(s, INDEX_op_not_i64, ret, arg); - } else { - tcg_gen_xori_i64(s, ret, arg, -1); - } -#else - tcg_gen_not_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_not_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); -#endif -} - -static inline void tcg_gen_discard_i32(TCGContext *s, TCGv_i32 arg) -{ - tcg_gen_op1_i32(s, INDEX_op_discard, arg); -} - -static inline void tcg_gen_discard_i64(TCGContext *s, TCGv_i64 arg) -{ -#if TCG_TARGET_REG_BITS == 32 - tcg_gen_discard_i32(s, TCGV_LOW(arg)); - tcg_gen_discard_i32(s, TCGV_HIGH(arg)); -#else - tcg_gen_op1_i64(s, INDEX_op_discard, arg); -#endif -} - -static inline void tcg_gen_andc_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_andc_i32) { - tcg_gen_op3_i32(s, INDEX_op_andc_i32, ret, arg1, arg2); - } else { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_not_i32(s, t0, arg2); - tcg_gen_and_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_andc_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ -#if TCG_TARGET_REG_BITS == 64 - if (TCG_TARGET_HAS_andc_i64) { - tcg_gen_op3_i64(s, INDEX_op_andc_i64, ret, arg1, arg2); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_not_i64(s, t0, arg2); - tcg_gen_and_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -#else - tcg_gen_andc_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_andc_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -#endif -} - -static inline void tcg_gen_eqv_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_eqv_i32) { - tcg_gen_op3_i32(s, INDEX_op_eqv_i32, ret, arg1, arg2); - } else { - tcg_gen_xor_i32(s, ret, arg1, arg2); - tcg_gen_not_i32(s, ret, ret); - } -} - -static inline void tcg_gen_eqv_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ -#if TCG_TARGET_REG_BITS == 64 - if (TCG_TARGET_HAS_eqv_i64) { - tcg_gen_op3_i64(s, INDEX_op_eqv_i64, ret, arg1, arg2); - } else { - tcg_gen_xor_i64(s, ret, arg1, arg2); - tcg_gen_not_i64(s, ret, ret); - } -#else - tcg_gen_eqv_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_eqv_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -#endif -} - -static inline void tcg_gen_nand_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_nand_i32) { - tcg_gen_op3_i32(s, INDEX_op_nand_i32, ret, arg1, arg2); - } else { - tcg_gen_and_i32(s, ret, arg1, arg2); - tcg_gen_not_i32(s, ret, ret); - } -} - -static inline void tcg_gen_nand_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ -#if TCG_TARGET_REG_BITS == 64 - if (TCG_TARGET_HAS_nand_i64) { - tcg_gen_op3_i64(s, INDEX_op_nand_i64, ret, arg1, arg2); - } else { - tcg_gen_and_i64(s, ret, arg1, arg2); - tcg_gen_not_i64(s, ret, ret); - } -#else - tcg_gen_nand_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_nand_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -#endif -} - -static inline void tcg_gen_nor_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_nor_i32) { - tcg_gen_op3_i32(s, INDEX_op_nor_i32, ret, arg1, arg2); - } else { - tcg_gen_or_i32(s, ret, arg1, arg2); - tcg_gen_not_i32(s, ret, ret); - } -} - -static inline void tcg_gen_nor_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ -#if TCG_TARGET_REG_BITS == 64 - if (TCG_TARGET_HAS_nor_i64) { - tcg_gen_op3_i64(s, INDEX_op_nor_i64, ret, arg1, arg2); - } else { - tcg_gen_or_i64(s, ret, arg1, arg2); - tcg_gen_not_i64(s, ret, ret); - } -#else - tcg_gen_nor_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_nor_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -#endif -} - -static inline void tcg_gen_orc_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_orc_i32) { - tcg_gen_op3_i32(s, INDEX_op_orc_i32, ret, arg1, arg2); - } else { - TCGv_i32 t0 = tcg_temp_new_i32(s); - tcg_gen_not_i32(s, t0, arg2); - tcg_gen_or_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } -} - -static inline void tcg_gen_orc_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ -#if TCG_TARGET_REG_BITS == 64 - if (TCG_TARGET_HAS_orc_i64) { - tcg_gen_op3_i64(s, INDEX_op_orc_i64, ret, arg1, arg2); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_not_i64(s, t0, arg2); - tcg_gen_or_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } -#else - tcg_gen_orc_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); - tcg_gen_orc_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); -#endif -} - -static inline void tcg_gen_rotl_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_rot_i32) { - tcg_gen_op3_i32(s, INDEX_op_rotl_i32, ret, arg1, arg2); - } else { - TCGv_i32 t0, t1; - - t0 = tcg_temp_new_i32(s); - t1 = tcg_temp_new_i32(s); - tcg_gen_shl_i32(s, t0, arg1, arg2); - tcg_gen_subfi_i32(s, t1, 32, arg2); - tcg_gen_shr_i32(s, t1, arg1, t1); - tcg_gen_or_i32(s, ret, t0, t1); - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - } -} - -static inline void tcg_gen_rotl_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_rot_i64) { - tcg_gen_op3_i64(s, INDEX_op_rotl_i64, ret, arg1, arg2); - } else { - TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(s); - t1 = tcg_temp_new_i64(s); - tcg_gen_shl_i64(s, t0, arg1, arg2); - tcg_gen_subfi_i64(s, t1, 64, arg2); - tcg_gen_shr_i64(s, t1, arg1, t1); - tcg_gen_or_i64(s, ret, t0, t1); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_rotli_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else if (TCG_TARGET_HAS_rot_i32) { - TCGv_i32 t0 = tcg_const_i32(s, arg2); - tcg_gen_rotl_i32(s, ret, arg1, t0); - tcg_temp_free_i32(s, t0); - } else { - TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(s); - t1 = tcg_temp_new_i32(s); - tcg_gen_shli_i32(s, t0, arg1, arg2); - tcg_gen_shri_i32(s, t1, arg1, 32 - arg2); - tcg_gen_or_i32(s, ret, t0, t1); - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - } -} - -static inline void tcg_gen_rotli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else if (TCG_TARGET_HAS_rot_i64) { - TCGv_i64 t0 = tcg_const_i64(s, arg2); - tcg_gen_rotl_i64(s, ret, arg1, t0); - tcg_temp_free_i64(s, t0); - } else { - TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(s); - t1 = tcg_temp_new_i64(s); - tcg_gen_shli_i64(s, t0, arg1, arg2); - tcg_gen_shri_i64(s, t1, arg1, 64 - arg2); - tcg_gen_or_i64(s, ret, t0, t1); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_rotr_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_rot_i32) { - tcg_gen_op3_i32(s, INDEX_op_rotr_i32, ret, arg1, arg2); - } else { - TCGv_i32 t0, t1; - - t0 = tcg_temp_new_i32(s); - t1 = tcg_temp_new_i32(s); - tcg_gen_shr_i32(s, t0, arg1, arg2); - tcg_gen_subfi_i32(s, t1, 32, arg2); - tcg_gen_shl_i32(s, t1, arg1, t1); - tcg_gen_or_i32(s, ret, t0, t1); - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - } -} - -static inline void tcg_gen_rotr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_rot_i64) { - tcg_gen_op3_i64(s, INDEX_op_rotr_i64, ret, arg1, arg2); - } else { - TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(s); - t1 = tcg_temp_new_i64(s); - tcg_gen_shr_i64(s, t0, arg1, arg2); - tcg_gen_subfi_i64(s, t1, 64, arg2); - tcg_gen_shl_i64(s, t1, arg1, t1); - tcg_gen_or_i64(s, ret, t0, t1); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_rotri_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i32(s, ret, arg1); - } else { - tcg_gen_rotli_i32(s, ret, arg1, 32 - arg2); - } -} - -static inline void tcg_gen_rotri_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) -{ - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i64(s, ret, arg1); - } else { - tcg_gen_rotli_i64(s, ret, arg1, 64 - arg2); - } -} - -static inline void tcg_gen_deposit_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, - TCGv_i32 arg2, unsigned int ofs, - unsigned int len) -{ - uint32_t mask; - TCGv_i32 t1; - - tcg_debug_assert(ofs < 32); - tcg_debug_assert(len <= 32); - tcg_debug_assert(ofs + len <= 32); - - if (ofs == 0 && len == 32) { - tcg_gen_mov_i32(s, ret, arg2); - return; - } - if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { - tcg_gen_op5ii_i32(s, INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); - return; - } - - mask = (1u << (len & 0x1f)) - 1; - t1 = tcg_temp_new_i32(s); - - if (ofs + len < 32) { - tcg_gen_andi_i32(s, t1, arg2, mask); - tcg_gen_shli_i32(s, t1, t1, ofs); - } else { - tcg_gen_shli_i32(s, t1, arg2, ofs); - } - tcg_gen_andi_i32(s, ret, arg1, ~(mask << ofs)); - tcg_gen_or_i32(s, ret, ret, t1); - - tcg_temp_free_i32(s, t1); -} - -static inline void tcg_gen_deposit_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, - TCGv_i64 arg2, unsigned int ofs, - unsigned int len) -{ - uint64_t mask; - TCGv_i64 t1; - - tcg_debug_assert(ofs < 64); - tcg_debug_assert(len <= 64); - tcg_debug_assert(ofs + len <= 64); - - if (ofs == 0 && len == 64) { - tcg_gen_mov_i64(s, ret, arg2); - return; - } - if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) { - tcg_gen_op5ii_i64(s, INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); - return; - } - -#if TCG_TARGET_REG_BITS == 32 - if (ofs >= 32) { - tcg_gen_deposit_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), - TCGV_LOW(arg2), ofs - 32, len); - tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1)); - return; - } - if (ofs + len <= 32) { - tcg_gen_deposit_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), - TCGV_LOW(arg2), ofs, len); - tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1)); - return; - } -#endif - - mask = (1ull << len) - 1; - t1 = tcg_temp_new_i64(s); - - if (ofs + len < 64) { - tcg_gen_andi_i64(s, t1, arg2, mask); - tcg_gen_shli_i64(s, t1, t1, ofs); - } else { - tcg_gen_shli_i64(s, t1, arg2, ofs); - } - tcg_gen_andi_i64(s, ret, arg1, ~(mask << ofs)); - tcg_gen_or_i64(s, ret, ret, t1); - - tcg_temp_free_i64(s, t1); -} - -static inline void tcg_gen_concat_i32_i64(TCGContext *s, TCGv_i64 dest, TCGv_i32 low, - TCGv_i32 high) -{ -#if TCG_TARGET_REG_BITS == 32 - tcg_gen_mov_i32(s, TCGV_LOW(dest), low); - tcg_gen_mov_i32(s, TCGV_HIGH(dest), high); -#else - TCGv_i64 tmp = tcg_temp_new_i64(s); - /* These extensions are only needed for type correctness. - We may be able to do better given target specific information. */ - tcg_gen_extu_i32_i64(s, tmp, high); - tcg_gen_extu_i32_i64(s, dest, low); - /* If deposit is available, use it. Otherwise use the extra - knowledge that we have of the zero-extensions above. */ - if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) { - tcg_gen_deposit_i64(s, dest, dest, tmp, 32, 32); - } else { - tcg_gen_shli_i64(s, tmp, tmp, 32); - tcg_gen_or_i64(s, dest, dest, tmp); - } - tcg_temp_free_i64(s, tmp); -#endif -} - -static inline void tcg_gen_concat32_i64(TCGContext *s, TCGv_i64 dest, TCGv_i64 low, - TCGv_i64 high) -{ - tcg_gen_deposit_i64(s, dest, low, high, 32, 32); -} - -static inline void tcg_gen_trunc_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg) -{ - tcg_gen_trunc_shr_i64_i32(s, ret, arg, 0); -} - -static inline void tcg_gen_extr_i64_i32(TCGContext *s, TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg) -{ - tcg_gen_trunc_shr_i64_i32(s, lo, arg, 0); - tcg_gen_trunc_shr_i64_i32(s, hi, arg, 32); -} - -static inline void tcg_gen_extr32_i64(TCGContext *s, TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg) -{ - tcg_gen_ext32u_i64(s, lo, arg); - tcg_gen_shri_i64(s, hi, arg, 32); -} - -static inline void tcg_gen_movcond_i32(TCGContext *s, TCGCond cond, TCGv_i32 ret, - TCGv_i32 c1, TCGv_i32 c2, - TCGv_i32 v1, TCGv_i32 v2) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_mov_i32(s, ret, v1); - } else if (cond == TCG_COND_NEVER) { - tcg_gen_mov_i32(s, ret, v2); - } - else if (TCG_TARGET_HAS_movcond_i32) { - tcg_gen_op6i_i32(s, INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); - } else { - TCGv_i32 t0 = tcg_temp_new_i32(s); - TCGv_i32 t1 = tcg_temp_new_i32(s); - tcg_gen_setcond_i32(s, cond, t0, c1, c2); - tcg_gen_neg_i32(s, t0, t0); - tcg_gen_and_i32(s, t1, v1, t0); - tcg_gen_andc_i32(s, ret, v2, t0); - tcg_gen_or_i32(s, ret, ret, t1); - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - } -} - -static inline void tcg_gen_movcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, - TCGv_i64 c1, TCGv_i64 c2, - TCGv_i64 v1, TCGv_i64 v2) -{ - if (cond == TCG_COND_ALWAYS) { - tcg_gen_mov_i64(s, ret, v1); - } else if (cond == TCG_COND_NEVER) { - tcg_gen_mov_i64(s, ret, v2); - } - else { -#if TCG_TARGET_REG_BITS == 32 - TCGv_i32 t0 = tcg_temp_new_i32(s); - TCGv_i32 t1 = tcg_temp_new_i32(s); - tcg_gen_op6i_i32(s, INDEX_op_setcond2_i32, t0, - TCGV_LOW(c1), TCGV_HIGH(c1), - TCGV_LOW(c2), TCGV_HIGH(c2), cond); - - if (TCG_TARGET_HAS_movcond_i32) { - tcg_gen_movi_i32(s, t1, 0); - tcg_gen_movcond_i32(s, TCG_COND_NE, TCGV_LOW(ret), t0, t1, - TCGV_LOW(v1), TCGV_LOW(v2)); - tcg_gen_movcond_i32(s, TCG_COND_NE, TCGV_HIGH(ret), t0, t1, - TCGV_HIGH(v1), TCGV_HIGH(v2)); - } else { - tcg_gen_neg_i32(s, t0, t0); - - tcg_gen_and_i32(s, t1, TCGV_LOW(v1), t0); - tcg_gen_andc_i32(s, TCGV_LOW(ret), TCGV_LOW(v2), t0); - tcg_gen_or_i32(s, TCGV_LOW(ret), TCGV_LOW(ret), t1); - - tcg_gen_and_i32(s, t1, TCGV_HIGH(v1), t0); - tcg_gen_andc_i32(s, TCGV_HIGH(ret), TCGV_HIGH(v2), t0); - tcg_gen_or_i32(s, TCGV_HIGH(ret), TCGV_HIGH(ret), t1); - } - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - #else - if (TCG_TARGET_HAS_movcond_i64) { - tcg_gen_op6i_i64(s, INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - tcg_gen_setcond_i64(s, cond, t0, c1, c2); - tcg_gen_neg_i64(s, t0, t0); - tcg_gen_and_i64(s, t1, v1, t0); - tcg_gen_andc_i64(s, ret, v2, t0); - tcg_gen_or_i64(s, ret, ret, t1); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -#endif - } -} - -static inline void tcg_gen_add2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, - TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) -{ - if (TCG_TARGET_HAS_add2_i32) { - tcg_gen_op6_i32(s, INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); - /* Allow the optimizer room to replace add2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - tcg_gen_concat_i32_i64(s, t0, al, ah); - tcg_gen_concat_i32_i64(s, t1, bl, bh); - tcg_gen_add_i64(s, t0, t0, t1); - tcg_gen_extr_i64_i32(s, rl, rh, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_sub2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, - TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) -{ - if (TCG_TARGET_HAS_sub2_i32) { - tcg_gen_op6_i32(s, INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); - /* Allow the optimizer room to replace sub2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - tcg_gen_concat_i32_i64(s, t0, al, ah); - tcg_gen_concat_i32_i64(s, t1, bl, bh); - tcg_gen_sub_i64(s, t0, t0, t1); - tcg_gen_extr_i64_i32(s, rl, rh, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_mulu2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, - TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_mulu2_i32) { - tcg_gen_op4_i32(s, INDEX_op_mulu2_i32, rl, rh, arg1, arg2); - /* Allow the optimizer room to replace mulu2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else if (TCG_TARGET_HAS_muluh_i32) { - TCGv_i32 t = tcg_temp_new_i32(s); - tcg_gen_op3_i32(s, INDEX_op_mul_i32, t, arg1, arg2); - tcg_gen_op3_i32(s, INDEX_op_muluh_i32, rh, arg1, arg2); - tcg_gen_mov_i32(s, rl, t); - tcg_temp_free_i32(s, t); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - tcg_gen_extu_i32_i64(s, t0, arg1); - tcg_gen_extu_i32_i64(s, t1, arg2); - tcg_gen_mul_i64(s, t0, t0, t1); - tcg_gen_extr_i64_i32(s, rl, rh, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_muls2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, - TCGv_i32 arg1, TCGv_i32 arg2) -{ - if (TCG_TARGET_HAS_muls2_i32) { - tcg_gen_op4_i32(s, INDEX_op_muls2_i32, rl, rh, arg1, arg2); - /* Allow the optimizer room to replace muls2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else if (TCG_TARGET_HAS_mulsh_i32) { - TCGv_i32 t = tcg_temp_new_i32(s); - tcg_gen_op3_i32(s, INDEX_op_mul_i32, t, arg1, arg2); - tcg_gen_op3_i32(s, INDEX_op_mulsh_i32, rh, arg1, arg2); - tcg_gen_mov_i32(s, rl, t); - tcg_temp_free_i32(s, t); - } else if (TCG_TARGET_REG_BITS == 32) { - TCGv_i32 t0 = tcg_temp_new_i32(s); - TCGv_i32 t1 = tcg_temp_new_i32(s); - TCGv_i32 t2 = tcg_temp_new_i32(s); - TCGv_i32 t3 = tcg_temp_new_i32(s); - tcg_gen_mulu2_i32(s, t0, t1, arg1, arg2); - /* Adjust for negative inputs. */ - tcg_gen_sari_i32(s, t2, arg1, 31); - tcg_gen_sari_i32(s, t3, arg2, 31); - tcg_gen_and_i32(s, t2, t2, arg2); - tcg_gen_and_i32(s, t3, t3, arg1); - tcg_gen_sub_i32(s, rh, t1, t2); - tcg_gen_sub_i32(s, rh, rh, t3); - tcg_gen_mov_i32(s, rl, t0); - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - tcg_temp_free_i32(s, t2); - tcg_temp_free_i32(s, t3); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - tcg_gen_ext_i32_i64(s, t0, arg1); - tcg_gen_ext_i32_i64(s, t1, arg2); - tcg_gen_mul_i64(s, t0, t0, t1); - tcg_gen_extr_i64_i32(s, rl, rh, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_add2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, - TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) -{ - if (TCG_TARGET_HAS_add2_i64) { - tcg_gen_op6_i64(s, INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); - /* Allow the optimizer room to replace add2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - tcg_gen_add_i64(s, t0, al, bl); - tcg_gen_setcond_i64(s, TCG_COND_LTU, t1, t0, al); - tcg_gen_add_i64(s, rh, ah, bh); - tcg_gen_add_i64(s, rh, rh, t1); - tcg_gen_mov_i64(s, rl, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_sub2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, - TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) -{ - if (TCG_TARGET_HAS_sub2_i64) { - tcg_gen_op6_i64(s, INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); - /* Allow the optimizer room to replace sub2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - tcg_gen_sub_i64(s, t0, al, bl); - tcg_gen_setcond_i64(s, TCG_COND_LTU, t1, al, bl); - tcg_gen_sub_i64(s, rh, ah, bh); - tcg_gen_sub_i64(s, rh, rh, t1); - tcg_gen_mov_i64(s, rl, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - } -} - -static inline void tcg_gen_mulu2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_mulu2_i64) { - tcg_gen_op4_i64(s, INDEX_op_mulu2_i64, rl, rh, arg1, arg2); - /* Allow the optimizer room to replace mulu2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else if (TCG_TARGET_HAS_muluh_i64) { - TCGv_i64 t = tcg_temp_new_i64(s); - tcg_gen_op3_i64(s, INDEX_op_mul_i64, t, arg1, arg2); - tcg_gen_op3_i64(s, INDEX_op_muluh_i64, rh, arg1, arg2); - tcg_gen_mov_i64(s, rl, t); - tcg_temp_free_i64(s, t); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_mul_i64(s, t0, arg1, arg2); - gen_helper_muluh_i64(s, rh, arg1, arg2); - tcg_gen_mov_i64(s, rl, t0); - tcg_temp_free_i64(s, t0); - } -} - -static inline void tcg_gen_muls2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - if (TCG_TARGET_HAS_muls2_i64) { - tcg_gen_op4_i64(s, INDEX_op_muls2_i64, rl, rh, arg1, arg2); - /* Allow the optimizer room to replace muls2 with two moves. */ - tcg_gen_op0(s, INDEX_op_nop); - } else if (TCG_TARGET_HAS_mulsh_i64) { - TCGv_i64 t = tcg_temp_new_i64(s); - tcg_gen_op3_i64(s, INDEX_op_mul_i64, t, arg1, arg2); - tcg_gen_op3_i64(s, INDEX_op_mulsh_i64, rh, arg1, arg2); - tcg_gen_mov_i64(s, rl, t); - tcg_temp_free_i64(s, t); - } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { - TCGv_i64 t0 = tcg_temp_new_i64(s); - TCGv_i64 t1 = tcg_temp_new_i64(s); - TCGv_i64 t2 = tcg_temp_new_i64(s); - TCGv_i64 t3 = tcg_temp_new_i64(s); - tcg_gen_mulu2_i64(s, t0, t1, arg1, arg2); - /* Adjust for negative inputs. */ - tcg_gen_sari_i64(s, t2, arg1, 63); - tcg_gen_sari_i64(s, t3, arg2, 63); - tcg_gen_and_i64(s, t2, t2, arg2); - tcg_gen_and_i64(s, t3, t3, arg1); - tcg_gen_sub_i64(s, rh, t1, t2); - tcg_gen_sub_i64(s, rh, rh, t3); - tcg_gen_mov_i64(s, rl, t0); - tcg_temp_free_i64(s, t0); - tcg_temp_free_i64(s, t1); - tcg_temp_free_i64(s, t2); - tcg_temp_free_i64(s, t3); - } else { - TCGv_i64 t0 = tcg_temp_new_i64(s); - tcg_gen_mul_i64(s, t0, arg1, arg2); - gen_helper_mulsh_i64(s, rh, arg1, arg2); - tcg_gen_mov_i64(s, rl, t0); - tcg_temp_free_i64(s, t0); - } -} - -/***************************************/ -/* QEMU specific operations. Their type depend on the QEMU CPU - type. */ -#ifndef TARGET_LONG_BITS -#error must include QEMU headers -#endif - -#if TARGET_LONG_BITS == 32 -#define TCGv TCGv_i32 -#define tcg_temp_new(s) tcg_temp_new_i32(s) -#define tcg_global_reg_new tcg_global_reg_new_i32 -#define tcg_global_mem_new tcg_global_mem_new_i32 -#define tcg_temp_local_new(s) tcg_temp_local_new_i32(s) -#define tcg_temp_free tcg_temp_free_i32 -#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x) -#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x) -#define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b) -#define tcg_add_param_tl tcg_add_param_i32 -#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 -#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 -#else -#define TCGv TCGv_i64 -#define tcg_temp_new(s) tcg_temp_new_i64(s) -#define tcg_global_reg_new tcg_global_reg_new_i64 -#define tcg_global_mem_new tcg_global_mem_new_i64 -#define tcg_temp_local_new(s) tcg_temp_local_new_i64(s) -#define tcg_temp_free tcg_temp_free_i64 -#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x) -#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x) -#define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b) -#define tcg_add_param_tl tcg_add_param_i64 -#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 -#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 -#endif - -/* debug info: write the PC of the corresponding QEMU CPU instruction */ -static inline void tcg_gen_debug_insn_start(TCGContext *s, uint64_t pc) -{ - /* XXX: must really use a 32 bit size for TCGArg in all cases */ -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - tcg_gen_op2ii(s, INDEX_op_debug_insn_start, - (uint32_t)(pc), (uint32_t)(pc >> 32)); -#else - tcg_gen_op1i(s, INDEX_op_debug_insn_start, pc); -#endif -} - -static inline void tcg_gen_exit_tb(TCGContext *s, uintptr_t val) -{ - tcg_gen_op1i(s, INDEX_op_exit_tb, val); -} - -static inline void tcg_gen_goto_tb(TCGContext *s, unsigned idx) -{ - /* We only support two chained exits. */ - tcg_debug_assert(idx <= 1); -#ifdef CONFIG_DEBUG_TCG - /* Verify that we havn't seen this numbered exit before. */ - tcg_debug_assert((s->goto_tb_issue_mask & (1 << idx)) == 0); - s->goto_tb_issue_mask |= 1 << idx; -#endif - tcg_gen_op1i(s, INDEX_op_goto_tb, idx); -} - - -void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, TCGMemOp); -void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, TCGMemOp); - -static inline void tcg_gen_qemu_ld8u(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_UB); -} - -static inline void tcg_gen_qemu_ld8s(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_SB); -} - -static inline void tcg_gen_qemu_ld16u(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TEUW); -} - -static inline void tcg_gen_qemu_ld16s(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TESW); -} - -static inline void tcg_gen_qemu_ld32u(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TEUL); -} - -static inline void tcg_gen_qemu_ld32s(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TESL); -} - -static inline void tcg_gen_qemu_ld64(struct uc_struct *uc, TCGv_i64 ret, TCGv addr, int mem_index) -{ - tcg_gen_qemu_ld_i64(uc, ret, addr, mem_index, MO_TEQ); -} - -static inline void tcg_gen_qemu_st8(struct uc_struct *uc, TCGv arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_tl(uc, arg, addr, mem_index, MO_UB); -} - -static inline void tcg_gen_qemu_st16(struct uc_struct *uc, TCGv arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_tl(uc, arg, addr, mem_index, MO_TEUW); -} - -static inline void tcg_gen_qemu_st32(struct uc_struct *uc, TCGv arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_tl(uc, arg, addr, mem_index, MO_TEUL); -} - -static inline void tcg_gen_qemu_st64(struct uc_struct *uc, TCGv_i64 arg, TCGv addr, int mem_index) -{ - tcg_gen_qemu_st_i64(uc, arg, addr, mem_index, MO_TEQ); -} - -#if TARGET_LONG_BITS == 64 -#define tcg_gen_movi_tl tcg_gen_movi_i64 -#define tcg_gen_mov_tl tcg_gen_mov_i64 -#define tcg_gen_ld8u_tl tcg_gen_ld8u_i64 -#define tcg_gen_ld8s_tl tcg_gen_ld8s_i64 -#define tcg_gen_ld16u_tl tcg_gen_ld16u_i64 -#define tcg_gen_ld16s_tl tcg_gen_ld16s_i64 -#define tcg_gen_ld32u_tl tcg_gen_ld32u_i64 -#define tcg_gen_ld32s_tl tcg_gen_ld32s_i64 -#define tcg_gen_ld_tl tcg_gen_ld_i64 -#define tcg_gen_st8_tl tcg_gen_st8_i64 -#define tcg_gen_st16_tl tcg_gen_st16_i64 -#define tcg_gen_st32_tl tcg_gen_st32_i64 -#define tcg_gen_st_tl tcg_gen_st_i64 -#define tcg_gen_add_tl tcg_gen_add_i64 -#define tcg_gen_addi_tl tcg_gen_addi_i64 -#define tcg_gen_sub_tl tcg_gen_sub_i64 -#define tcg_gen_neg_tl tcg_gen_neg_i64 -#define tcg_gen_subfi_tl tcg_gen_subfi_i64 -#define tcg_gen_subi_tl tcg_gen_subi_i64 -#define tcg_gen_and_tl tcg_gen_and_i64 -#define tcg_gen_andi_tl tcg_gen_andi_i64 -#define tcg_gen_or_tl tcg_gen_or_i64 -#define tcg_gen_ori_tl tcg_gen_ori_i64 -#define tcg_gen_xor_tl tcg_gen_xor_i64 -#define tcg_gen_xori_tl tcg_gen_xori_i64 -#define tcg_gen_not_tl tcg_gen_not_i64 -#define tcg_gen_shl_tl tcg_gen_shl_i64 -#define tcg_gen_shli_tl tcg_gen_shli_i64 -#define tcg_gen_shr_tl tcg_gen_shr_i64 -#define tcg_gen_shri_tl tcg_gen_shri_i64 -#define tcg_gen_sar_tl tcg_gen_sar_i64 -#define tcg_gen_sari_tl tcg_gen_sari_i64 -#define tcg_gen_brcond_tl tcg_gen_brcond_i64 -#define tcg_gen_brcondi_tl tcg_gen_brcondi_i64 -#define tcg_gen_setcond_tl tcg_gen_setcond_i64 -#define tcg_gen_setcondi_tl tcg_gen_setcondi_i64 -#define tcg_gen_mul_tl tcg_gen_mul_i64 -#define tcg_gen_muli_tl tcg_gen_muli_i64 -#define tcg_gen_div_tl tcg_gen_div_i64 -#define tcg_gen_rem_tl tcg_gen_rem_i64 -#define tcg_gen_divu_tl tcg_gen_divu_i64 -#define tcg_gen_remu_tl tcg_gen_remu_i64 -#define tcg_gen_discard_tl tcg_gen_discard_i64 -#define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32 -#define tcg_gen_trunc_i64_tl tcg_gen_mov_i64 -#define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64 -#define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64 -#define tcg_gen_extu_tl_i64 tcg_gen_mov_i64 -#define tcg_gen_ext_tl_i64 tcg_gen_mov_i64 -#define tcg_gen_ext8u_tl tcg_gen_ext8u_i64 -#define tcg_gen_ext8s_tl tcg_gen_ext8s_i64 -#define tcg_gen_ext16u_tl tcg_gen_ext16u_i64 -#define tcg_gen_ext16s_tl tcg_gen_ext16s_i64 -#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64 -#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64 -#define tcg_gen_bswap16_tl tcg_gen_bswap16_i64 -#define tcg_gen_bswap32_tl tcg_gen_bswap32_i64 -#define tcg_gen_bswap64_tl tcg_gen_bswap64_i64 -#define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64 -#define tcg_gen_extr_i64_tl tcg_gen_extr32_i64 -#define tcg_gen_andc_tl tcg_gen_andc_i64 -#define tcg_gen_eqv_tl tcg_gen_eqv_i64 -#define tcg_gen_nand_tl tcg_gen_nand_i64 -#define tcg_gen_nor_tl tcg_gen_nor_i64 -#define tcg_gen_orc_tl tcg_gen_orc_i64 -#define tcg_gen_rotl_tl tcg_gen_rotl_i64 -#define tcg_gen_rotli_tl tcg_gen_rotli_i64 -#define tcg_gen_rotr_tl tcg_gen_rotr_i64 -#define tcg_gen_rotri_tl tcg_gen_rotri_i64 -#define tcg_gen_deposit_tl tcg_gen_deposit_i64 -#define tcg_const_tl tcg_const_i64 -#define tcg_const_local_tl tcg_const_local_i64 -#define tcg_gen_movcond_tl tcg_gen_movcond_i64 -#define tcg_gen_add2_tl tcg_gen_add2_i64 -#define tcg_gen_sub2_tl tcg_gen_sub2_i64 -#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64 -#define tcg_gen_muls2_tl tcg_gen_muls2_i64 -#else -#define tcg_gen_movi_tl tcg_gen_movi_i32 -#define tcg_gen_mov_tl tcg_gen_mov_i32 -#define tcg_gen_ld8u_tl tcg_gen_ld8u_i32 -#define tcg_gen_ld8s_tl tcg_gen_ld8s_i32 -#define tcg_gen_ld16u_tl tcg_gen_ld16u_i32 -#define tcg_gen_ld16s_tl tcg_gen_ld16s_i32 -#define tcg_gen_ld32u_tl tcg_gen_ld_i32 -#define tcg_gen_ld32s_tl tcg_gen_ld_i32 -#define tcg_gen_ld_tl tcg_gen_ld_i32 -#define tcg_gen_st8_tl tcg_gen_st8_i32 -#define tcg_gen_st16_tl tcg_gen_st16_i32 -#define tcg_gen_st32_tl tcg_gen_st_i32 -#define tcg_gen_st_tl tcg_gen_st_i32 -#define tcg_gen_add_tl tcg_gen_add_i32 -#define tcg_gen_addi_tl tcg_gen_addi_i32 -#define tcg_gen_sub_tl tcg_gen_sub_i32 -#define tcg_gen_neg_tl tcg_gen_neg_i32 -#define tcg_gen_subfi_tl tcg_gen_subfi_i32 -#define tcg_gen_subi_tl tcg_gen_subi_i32 -#define tcg_gen_and_tl tcg_gen_and_i32 -#define tcg_gen_andi_tl tcg_gen_andi_i32 -#define tcg_gen_or_tl tcg_gen_or_i32 -#define tcg_gen_ori_tl tcg_gen_ori_i32 -#define tcg_gen_xor_tl tcg_gen_xor_i32 -#define tcg_gen_xori_tl tcg_gen_xori_i32 -#define tcg_gen_not_tl tcg_gen_not_i32 -#define tcg_gen_shl_tl tcg_gen_shl_i32 -#define tcg_gen_shli_tl tcg_gen_shli_i32 -#define tcg_gen_shr_tl tcg_gen_shr_i32 -#define tcg_gen_shri_tl tcg_gen_shri_i32 -#define tcg_gen_sar_tl tcg_gen_sar_i32 -#define tcg_gen_sari_tl tcg_gen_sari_i32 -#define tcg_gen_brcond_tl tcg_gen_brcond_i32 -#define tcg_gen_brcondi_tl tcg_gen_brcondi_i32 -#define tcg_gen_setcond_tl tcg_gen_setcond_i32 -#define tcg_gen_setcondi_tl tcg_gen_setcondi_i32 -#define tcg_gen_mul_tl tcg_gen_mul_i32 -#define tcg_gen_muli_tl tcg_gen_muli_i32 -#define tcg_gen_div_tl tcg_gen_div_i32 -#define tcg_gen_rem_tl tcg_gen_rem_i32 -#define tcg_gen_divu_tl tcg_gen_divu_i32 -#define tcg_gen_remu_tl tcg_gen_remu_i32 -#define tcg_gen_discard_tl tcg_gen_discard_i32 -#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32 -#define tcg_gen_trunc_i64_tl tcg_gen_trunc_i64_i32 -#define tcg_gen_extu_i32_tl tcg_gen_mov_i32 -#define tcg_gen_ext_i32_tl tcg_gen_mov_i32 -#define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64 -#define tcg_gen_ext_tl_i64 tcg_gen_ext_i32_i64 -#define tcg_gen_ext8u_tl tcg_gen_ext8u_i32 -#define tcg_gen_ext8s_tl tcg_gen_ext8s_i32 -#define tcg_gen_ext16u_tl tcg_gen_ext16u_i32 -#define tcg_gen_ext16s_tl tcg_gen_ext16s_i32 -#define tcg_gen_ext32u_tl tcg_gen_mov_i32 -#define tcg_gen_ext32s_tl tcg_gen_mov_i32 -#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 -#define tcg_gen_bswap32_tl tcg_gen_bswap32_i32 -#define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 -#define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32 -#define tcg_gen_andc_tl tcg_gen_andc_i32 -#define tcg_gen_eqv_tl tcg_gen_eqv_i32 -#define tcg_gen_nand_tl tcg_gen_nand_i32 -#define tcg_gen_nor_tl tcg_gen_nor_i32 -#define tcg_gen_orc_tl tcg_gen_orc_i32 -#define tcg_gen_rotl_tl tcg_gen_rotl_i32 -#define tcg_gen_rotli_tl tcg_gen_rotli_i32 -#define tcg_gen_rotr_tl tcg_gen_rotr_i32 -#define tcg_gen_rotri_tl tcg_gen_rotri_i32 -#define tcg_gen_deposit_tl tcg_gen_deposit_i32 -#define tcg_const_tl tcg_const_i32 -#define tcg_const_local_tl tcg_const_local_i32 -#define tcg_gen_movcond_tl tcg_gen_movcond_i32 -#define tcg_gen_add2_tl tcg_gen_add2_i32 -#define tcg_gen_sub2_tl tcg_gen_sub2_i32 -#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32 -#define tcg_gen_muls2_tl tcg_gen_muls2_i32 -#endif - -#if UINTPTR_MAX == UINT32_MAX -# define tcg_gen_ld_ptr(S, R, A, O) \ - tcg_gen_ld_i32(S, TCGV_PTR_TO_NAT(R), (A), (O)) -# define tcg_gen_discard_ptr(A) \ - tcg_gen_discard_i32(TCGV_PTR_TO_NAT(A)) -# define tcg_gen_add_ptr(S, R, A, B) \ - tcg_gen_add_i32(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B)) -# define tcg_gen_addi_ptr(S, R, A, B) \ - tcg_gen_addi_i32(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B)) -# define tcg_gen_ext_i32_ptr(S, R, A) \ - tcg_gen_mov_i32(S, TCGV_PTR_TO_NAT(R), (A)) -#else -# define tcg_gen_ld_ptr(S, R, A, O) \ - tcg_gen_ld_i64(S, TCGV_PTR_TO_NAT(R), (A), (O)) -# define tcg_gen_discard_ptr(A) \ - tcg_gen_discard_i64(TCGV_PTR_TO_NAT(A)) -# define tcg_gen_add_ptr(S, R, A, B) \ - tcg_gen_add_i64(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B)) -# define tcg_gen_addi_ptr(S, R, A, B) \ - tcg_gen_addi_i64(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B)) -# define tcg_gen_ext_i32_ptr(S, R, A) \ - tcg_gen_ext_i32_i64(S, TCGV_PTR_TO_NAT(R), (A)) -#endif /* UINTPTR_MAX == UINT32_MAX */ diff --git a/qemu/tcg/tcg-pool.inc.c b/qemu/tcg/tcg-pool.inc.c new file mode 100644 index 00000000..00270d8a --- /dev/null +++ b/qemu/tcg/tcg-pool.inc.c @@ -0,0 +1,158 @@ +/* + * TCG Backend Data: constant pool. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +typedef struct TCGLabelPoolData { + struct TCGLabelPoolData *next; + tcg_insn_unit *label; + intptr_t addend; + int rtype; + unsigned nlong; + tcg_target_ulong data[]; +} TCGLabelPoolData; + + +static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype, + tcg_insn_unit *label, intptr_t addend) +{ + TCGLabelPoolData *n = tcg_malloc(s, sizeof(TCGLabelPoolData) + + sizeof(tcg_target_ulong) * nlong); + + n->label = label; + n->addend = addend; + n->rtype = rtype; + n->nlong = nlong; + return n; +} + +static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n) +{ + TCGLabelPoolData *i, **pp; + int nlong = n->nlong; + + /* Insertion sort on the pool. */ + for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) { + if (nlong > i->nlong) { + break; + } + if (nlong < i->nlong) { + continue; + } + if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) { + break; + } + } + n->next = *pp; + *pp = n; +} + +/* The "usual" for generic integer code. */ +static inline void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype, + tcg_insn_unit *label, intptr_t addend) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend); + n->data[0] = d; + new_pool_insert(s, n); +} + +/* For v64 or v128, depending on the host. */ +static inline void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label, + intptr_t addend, tcg_target_ulong d0, + tcg_target_ulong d1) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend); + n->data[0] = d0; + n->data[1] = d1; + new_pool_insert(s, n); +} + +/* For v128 or v256, depending on the host. */ +static inline void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label, + intptr_t addend, tcg_target_ulong d0, + tcg_target_ulong d1, tcg_target_ulong d2, + tcg_target_ulong d3) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend); + n->data[0] = d0; + n->data[1] = d1; + n->data[2] = d2; + n->data[3] = d3; + new_pool_insert(s, n); +} + +/* For v256, for 32-bit host. */ +static inline void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label, + intptr_t addend, tcg_target_ulong d0, + tcg_target_ulong d1, tcg_target_ulong d2, + tcg_target_ulong d3, tcg_target_ulong d4, + tcg_target_ulong d5, tcg_target_ulong d6, + tcg_target_ulong d7) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend); + n->data[0] = d0; + n->data[1] = d1; + n->data[2] = d2; + n->data[3] = d3; + n->data[4] = d4; + n->data[5] = d5; + n->data[6] = d6; + n->data[7] = d7; + new_pool_insert(s, n); +} + +/* To be provided by cpu/tcg-target.inc.c. */ +static void tcg_out_nop_fill(tcg_insn_unit *p, int count); + +static int tcg_out_pool_finalize(TCGContext *s) +{ + TCGLabelPoolData *p = s->pool_labels; + TCGLabelPoolData *l = NULL; + char *a; + + if (p == NULL) { + return 0; + } + + /* ??? Round up to qemu_icache_linesize, but then do not round + again when allocating the next TranslationBlock structure. */ + a = (void *)ROUND_UP((uintptr_t)s->code_ptr, + sizeof(tcg_target_ulong) * p->nlong); + tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr); + s->data_gen_ptr = a; + + for (; p != NULL; p = p->next) { + size_t size = sizeof(tcg_target_ulong) * p->nlong; + if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) { + if (unlikely(a > (char *)s->code_gen_highwater)) { + return -1; + } + memcpy(a, p->data, size); + a += size; + l = p; + } + if (!patch_reloc(p->label, p->rtype, (intptr_t)a - size, p->addend)) { + return -2; + } + } + + s->code_ptr = (tcg_insn_unit *)a; + return 0; +} diff --git a/qemu/tcg/tcg-runtime.h b/qemu/tcg/tcg-runtime.h deleted file mode 100644 index 23a0c377..00000000 --- a/qemu/tcg/tcg-runtime.h +++ /dev/null @@ -1,16 +0,0 @@ -DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) -DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) -DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) -DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) - -DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) -DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) -DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) - -DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) - -DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) -DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) diff --git a/qemu/tcg/tcg.c b/qemu/tcg/tcg.c index 679836ae..e390bf2e 100644 --- a/qemu/tcg/tcg.c +++ b/qemu/tcg/tcg.c @@ -23,30 +23,28 @@ */ /* define it to use liveness analysis (better code) */ -#define USE_LIVENESS_ANALYSIS #define USE_TCG_OPTIMIZATIONS -#include "config.h" +#include "qemu/osdep.h" /* Define to jump the ELF file used to communicate with GDB. */ #undef DEBUG_JIT -#if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG) -/* define it to suppress various consistency checks (faster) */ -#define NDEBUG -#endif - -#include "qemu-common.h" +#include "qemu/cutils.h" #include "qemu/host-utils.h" #include "qemu/timer.h" +#include + /* Note: the long term plan is to reduce the dependencies on the QEMU CPU definitions. Currently they are used for qemu_ld/st instructions */ #define NO_CPU_IO_DEFS #include "cpu.h" -#include "tcg-op.h" +#include "exec/exec-all.h" + +#include "tcg/tcg-op.h" #if UINTPTR_MAX == UINT32_MAX # define ELF_CLASS ELFCLASS32 @@ -60,16 +58,21 @@ #endif #include "elf.h" +#include "sysemu/sysemu.h" -/* Forward declarations for functions declared in tcg-target.c and used here. */ +#include + +/* Forward declarations for functions declared in tcg-target.inc.c and + used here. */ static void tcg_target_init(TCGContext *s); +static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode); static void tcg_target_qemu_prologue(TCGContext *s); -static void patch_reloc(tcg_insn_unit *code_ptr, int type, +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend); /* The CIE and FDE header definitions will be common to all hosts. */ typedef struct { - //uint32_t QEMU_ALIGN(sizeof(void *), len); + // uint32_t len __attribute__((aligned((sizeof(void *))))); uint32_t QEMU_ALIGN(8, len); uint32_t id; uint8_t version; @@ -79,42 +82,78 @@ typedef struct { uint8_t return_column; } DebugFrameCIE; -QEMU_PACK( typedef struct { -// uint32_t QEMU_ALIGN(sizeof(void *), len); +QEMU_PACK(typedef struct { + // uint32_t len __attribute__((aligned((sizeof(void *))))); uint32_t QEMU_ALIGN(8, len); uint32_t cie_offset; uintptr_t func_start; uintptr_t func_len; }) DebugFrameFDEHeader; -QEMU_PACK( typedef struct { +QEMU_PACK(typedef struct { DebugFrameCIE cie; DebugFrameFDEHeader fde; }) DebugFrameHeader; -/* Forward declarations for functions declared and used in tcg-target.c. */ -static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str); +static QEMU_UNUSED_FUNC void tcg_register_jit_int(TCGContext *s, void *buf, size_t size, + const void *debug_frame, + size_t debug_frame_size); + +/* Forward declarations for functions declared and used in tcg-target.inc.c. */ +static const char *target_parse_constraint(TCGArgConstraint *ct, + const char *ct_str, TCGType type); static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, intptr_t arg2); -static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg); static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args); +#if TCG_TARGET_MAYBE_vec +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src); +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg base, intptr_t offset); +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, + TCGReg dst, tcg_target_long arg); +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, + unsigned vece, const TCGArg *args, + const int *const_args); +#else +static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src) +{ + g_assert_not_reached(); +} +static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg base, intptr_t offset) +{ + g_assert_not_reached(); +} +static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, + TCGReg dst, tcg_target_long arg) +{ + g_assert_not_reached(); +} +static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, + unsigned vece, const TCGArg *args, + const int *const_args) +{ + g_assert_not_reached(); +} +#endif static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2); +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs); static void tcg_out_call(TCGContext *s, tcg_insn_unit *target); static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct); -static void tcg_out_tb_init(TCGContext *s); -static void tcg_out_tb_finalize(TCGContext *s); +#ifdef TCG_TARGET_NEED_LDST_LABELS +static int tcg_out_ldst_finalize(TCGContext *s); +#endif - -TCGOpDef tcg_op_defs_org[] = { -#define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, -#include "tcg-opc.h" -#undef DEF -}; +#define TCG_HIGHWATER 1024 #if TCG_TARGET_INSN_UNIT_SIZE == 1 static QEMU_UNUSED_FUNC inline void tcg_out8(TCGContext *s, uint8_t v) @@ -122,8 +161,7 @@ static QEMU_UNUSED_FUNC inline void tcg_out8(TCGContext *s, uint8_t v) *s->code_ptr++ = v; } -static QEMU_UNUSED_FUNC inline void tcg_patch8(tcg_insn_unit *p, - uint8_t v) +static QEMU_UNUSED_FUNC inline void tcg_patch8(tcg_insn_unit *p, uint8_t v) { *p = v; } @@ -133,7 +171,7 @@ static QEMU_UNUSED_FUNC inline void tcg_patch8(tcg_insn_unit *p, static QEMU_UNUSED_FUNC inline void tcg_out16(TCGContext *s, uint16_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 2) { - *s->code_ptr++ = (tcg_insn_unit)v; + *s->code_ptr++ = v; } else { tcg_insn_unit *p = s->code_ptr; memcpy(p, &v, sizeof(v)); @@ -141,11 +179,10 @@ static QEMU_UNUSED_FUNC inline void tcg_out16(TCGContext *s, uint16_t v) } } -static QEMU_UNUSED_FUNC inline void tcg_patch16(tcg_insn_unit *p, - uint16_t v) +static QEMU_UNUSED_FUNC inline void tcg_patch16(tcg_insn_unit *p, uint16_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 2) { - *p = (tcg_insn_unit)v; + *p = v; } else { memcpy(p, &v, sizeof(v)); } @@ -153,7 +190,7 @@ static QEMU_UNUSED_FUNC inline void tcg_patch16(tcg_insn_unit *p, #endif #if TCG_TARGET_INSN_UNIT_SIZE <= 4 -static QEMU_UNUSED_FUNC inline void tcg_out32(TCGContext *s, uint32_t v) +static QEMU_UNUSED_FUNC inline void tcg_out32(TCGContext *s, uint32_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 4) { *s->code_ptr++ = v; @@ -164,8 +201,7 @@ static QEMU_UNUSED_FUNC inline void tcg_out32(TCGContext *s, uint32_t v) } } -static QEMU_UNUSED_FUNC inline void tcg_patch32(tcg_insn_unit *p, - uint32_t v) +static QEMU_UNUSED_FUNC inline void tcg_patch32(tcg_insn_unit *p, uint32_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 4) { *p = v; @@ -179,7 +215,7 @@ static QEMU_UNUSED_FUNC inline void tcg_patch32(tcg_insn_unit *p, static QEMU_UNUSED_FUNC inline void tcg_out64(TCGContext *s, uint64_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 8) { - *s->code_ptr++ = (tcg_insn_unit)v; + *s->code_ptr++ = v; } else { tcg_insn_unit *p = s->code_ptr; memcpy(p, &v, sizeof(v)); @@ -187,11 +223,10 @@ static QEMU_UNUSED_FUNC inline void tcg_out64(TCGContext *s, uint64_t v) } } -static QEMU_UNUSED_FUNC inline void tcg_patch64(tcg_insn_unit *p, - uint64_t v) +static QEMU_UNUSED_FUNC inline void tcg_patch64(tcg_insn_unit *p, uint64_t v) { if (TCG_TARGET_INSN_UNIT_SIZE == 8) { - *p = (tcg_insn_unit)v; + *p = v; } else { memcpy(p, &v, sizeof(v)); } @@ -201,69 +236,365 @@ static QEMU_UNUSED_FUNC inline void tcg_patch64(tcg_insn_unit *p, /* label relocation processing */ static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type, - int label_index, intptr_t addend) + TCGLabel *l, intptr_t addend) { - TCGLabel *l; - TCGRelocation *r; + TCGRelocation *r = tcg_malloc(s, sizeof(TCGRelocation)); - l = &s->labels[label_index]; - if (l->has_value) { - /* FIXME: This may break relocations on RISC targets that - modify instruction fields in place. The caller may not have - written the initial value. */ - patch_reloc(code_ptr, type, l->u.value, addend); - } else { - /* add a new relocation entry */ - r = tcg_malloc(s, sizeof(TCGRelocation)); - r->type = type; - r->ptr = code_ptr; - r->addend = addend; - r->next = l->u.first_reloc; - l->u.first_reloc = r; - } + r->type = type; + r->ptr = code_ptr; + r->addend = addend; + QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next); } -static void tcg_out_label(TCGContext *s, int label_index, tcg_insn_unit *ptr) +static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr) { - TCGLabel *l = &s->labels[label_index]; - intptr_t value = (intptr_t)ptr; - TCGRelocation *r; - - assert(!l->has_value); - - for (r = l->u.first_reloc; r != NULL; r = r->next) { - patch_reloc(r->ptr, r->type, value, r->addend); - } - + tcg_debug_assert(!l->has_value); l->has_value = 1; l->u.value_ptr = ptr; } -int gen_new_label(TCGContext *s) +TCGLabel *gen_new_label(TCGContext *s) { - int idx; - TCGLabel *l; + TCGLabel *l = tcg_malloc(s, sizeof(TCGLabel)); - if (s->nb_labels >= TCG_MAX_LABELS) - tcg_abort(); - idx = s->nb_labels++; - l = &s->labels[idx]; - l->has_value = 0; - l->u.first_reloc = NULL; - return idx; + memset(l, 0, sizeof(TCGLabel)); + l->id = s->nb_labels++; + QSIMPLEQ_INIT(&l->relocs); + + QSIMPLEQ_INSERT_TAIL(&s->labels, l, next); + + return l; } -#include "tcg-target.c" +static bool tcg_resolve_relocs(TCGContext *s) +{ + TCGLabel *l; + + QSIMPLEQ_FOREACH(l, &s->labels, next) { + TCGRelocation *r; + uintptr_t value = l->u.value; + + QSIMPLEQ_FOREACH(r, &l->relocs, next) { + if (!patch_reloc(r->ptr, r->type, value, r->addend)) { + return false; + } + } + } + return true; +} + +static void set_jmp_reset_offset(TCGContext *s, int which) +{ + size_t off = tcg_current_code_size(s); + s->tb_jmp_reset_offset[which] = off; + /* Make sure that we didn't overflow the stored offset. */ + assert(s->tb_jmp_reset_offset[which] == off); +} + +#include "tcg-target.inc.c" + +/* compare a pointer @ptr and a tb_tc @s */ +static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) +{ + if ((char *)ptr >= (char *)s->ptr + s->size) { + return 1; + } else if (ptr < s->ptr) { + return -1; + } + return 0; +} + +static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp) +{ + const struct tb_tc *a = ap; + const struct tb_tc *b = bp; + + /* + * When both sizes are set, we know this isn't a lookup. + * This is the most likely case: every TB must be inserted; lookups + * are a lot less frequent. + */ + if (likely(a->size && b->size)) { + if (a->ptr > b->ptr) { + return 1; + } else if (a->ptr < b->ptr) { + return -1; + } + /* a->ptr == b->ptr should happen only on deletions */ + g_assert(a->size == b->size); + return 0; + } + /* + * All lookups have either .size field set to 0. + * From the glib sources we see that @ap is always the lookup key. However + * the docs provide no guarantee, so we just mark this case as likely. + */ + if (likely(a->size == 0)) { + return ptr_cmp_tb_tc(a->ptr, b); + } + return ptr_cmp_tb_tc(b->ptr, a); +} + +void tcg_tb_insert(TCGContext *tcg_ctx, TranslationBlock *tb) +{ + g_tree_insert(tcg_ctx->tree, &tb->tc, tb); +} + +void tcg_tb_remove(TCGContext *tcg_ctx, TranslationBlock *tb) +{ + g_tree_remove(tcg_ctx->tree, &tb->tc); +} + +/* + * Find the TB 'tb' such that + * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size + * Return NULL if not found. + */ +TranslationBlock *tcg_tb_lookup(TCGContext *tcg_ctx, uintptr_t tc_ptr) +{ + TranslationBlock *tb; + + struct tb_tc s = { .ptr = (void *)tc_ptr }; + tb = g_tree_lookup(tcg_ctx->tree, &s); + + return tb; +} + +void tcg_tb_foreach(TCGContext *tcg_ctx, GTraverseFunc func, gpointer user_data) +{ + g_tree_foreach(tcg_ctx->tree, func, user_data); +} + +size_t tcg_nb_tbs(TCGContext *tcg_ctx) +{ + size_t nb_tbs = 0; + nb_tbs = g_tree_nnodes(tcg_ctx->tree); + + return nb_tbs; +} + +static void tcg_region_tree_reset_all(TCGContext *tcg_ctx) +{ + g_tree_ref(tcg_ctx->tree); + g_tree_destroy(tcg_ctx->tree); +} + +static void tcg_region_bounds(TCGContext *tcg_ctx, size_t curr_region, void **pstart, void **pend) +{ + char *start, *end; + + start = (char *)tcg_ctx->region.start_aligned + curr_region * tcg_ctx->region.stride; + end = start + tcg_ctx->region.size; + + if (curr_region == 0) { + start = tcg_ctx->region.start; + } + if (curr_region == tcg_ctx->region.n - 1) { + end = tcg_ctx->region.end; + } + + *pstart = start; + *pend = end; +} + +static void tcg_region_assign(TCGContext *s, size_t curr_region) +{ + void *start, *end; + + tcg_region_bounds(s, curr_region, &start, &end); + + s->code_gen_buffer = start; + s->code_gen_ptr = start; + s->code_gen_buffer_size = (char *)end - (char *)start; + memset(s->code_gen_buffer, 0x00, s->code_gen_buffer_size); + s->code_gen_highwater = (char *)end - TCG_HIGHWATER; +} + +static bool tcg_region_alloc__locked(TCGContext *s) +{ + if (s->region.current == s->region.n) { + return true; + } + tcg_region_assign(s, s->region.current); + s->region.current++; + return false; +} + +/* + * Request a new region once the one in use has filled up. + * Returns true on error. + */ +static bool tcg_region_alloc(TCGContext *s) +{ + bool err; + /* read the region size now; alloc__locked will overwrite it on success */ + size_t size_full = s->code_gen_buffer_size; + + err = tcg_region_alloc__locked(s); + if (!err) { + s->region.agg_size_full += size_full - TCG_HIGHWATER; + } + return err; +} + +/* + * Perform a context's first region allocation. + * This function does _not_ increment region.agg_size_full. + */ +static inline bool tcg_region_initial_alloc__locked(TCGContext *s) +{ + return tcg_region_alloc__locked(s); +} + +/* Call from a safe-work context */ +void tcg_region_reset_all(TCGContext *tcg_ctx) +{ + tcg_ctx->region.current = 0; + tcg_ctx->region.agg_size_full = 0; + +#ifndef NDEBUG + bool err = tcg_region_initial_alloc__locked(tcg_ctx); +#else + tcg_region_initial_alloc__locked(tcg_ctx); +#endif + + g_assert(!err); + + tcg_region_tree_reset_all(tcg_ctx); +} + +/* + * Initializes region partitioning. + * + * Called at init time from the parent thread (i.e. the one calling + * tcg_context_init), after the target's TCG globals have been set. + * + * Region partitioning works by splitting code_gen_buffer into separate regions, + * and then assigning regions to TCG threads so that the threads can translate + * code in parallel without synchronization. + * + * In softmmu the number of TCG threads is bounded by max_cpus, so we use at + * least max_cpus regions in MTTCG. In !MTTCG we use a single region. + * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) + * must have been parsed before calling this function, since it calls + * qemu_tcg_mttcg_enabled(). + * + * In user-mode we use a single region. Having multiple regions in user-mode + * is not supported, because the number of vCPU threads (recall that each thread + * spawned by the guest corresponds to a vCPU thread) is only bounded by the + * OS, and usually this number is huge (tens of thousands is not uncommon). + * Thus, given this large bound on the number of vCPU threads and the fact + * that code_gen_buffer is allocated at compile-time, we cannot guarantee + * that the availability of at least one region per vCPU thread. + * + * However, this user-mode limitation is unlikely to be a significant problem + * in practice. Multi-threaded guests share most if not all of their translated + * code, which makes parallel code generation less appealing than in softmmu. + */ +void tcg_region_init(TCGContext *tcg_ctx) +{ + void *buf = tcg_ctx->code_gen_buffer; + void *aligned; + size_t size = tcg_ctx->code_gen_buffer_size; + size_t page_size = tcg_ctx->uc->qemu_real_host_page_size; + size_t region_size; + size_t n_regions; + size_t i; + + n_regions = 1; + + /* The first region will be 'aligned - buf' bytes larger than the others */ + aligned = (void *)QEMU_ALIGN_PTR_UP(buf, page_size); + g_assert((char *)aligned < ((char *)tcg_ctx->code_gen_buffer + size)); + /* + * Make region_size a multiple of page_size, using aligned as the start. + * As a result of this we might end up with a few extra pages at the end of + * the buffer; we will assign those to the last region. + */ + region_size = (size - ((char *)aligned - (char *)buf)) / n_regions; + region_size = QEMU_ALIGN_DOWN(region_size, page_size); + + /* A region must have at least 2 pages; one code, one guard */ + g_assert(region_size >= 2 * page_size); + + /* init the region struct */ + tcg_ctx->region.n = n_regions; + tcg_ctx->region.size = region_size - page_size; + tcg_ctx->region.stride = region_size; + tcg_ctx->region.start = buf; + tcg_ctx->region.start_aligned = aligned; + /* page-align the end, since its last page will be a guard page */ + tcg_ctx->region.end = (void *)QEMU_ALIGN_PTR_DOWN((char *)buf + size, page_size); + /* account for that last guard page */ + tcg_ctx->region.end = (void *)((char *)tcg_ctx->region.end - page_size); + + /* set guard pages */ + for (i = 0; i < tcg_ctx->region.n; i++) { + void *start, *end; + + tcg_region_bounds(tcg_ctx, i, &start, &end); + + (void)qemu_mprotect_none(end, page_size); + } + + tcg_ctx->tree = g_tree_new(tb_tc_cmp); +} + +/* + * Returns the size (in bytes) of all translated code (i.e. from all regions) + * currently in the cache. + * See also: tcg_code_capacity() + * Do not confuse with tcg_current_code_size(); that one applies to a single + * TCG context. + */ +size_t tcg_code_size(TCGContext *tcg_ctx) +{ + size_t total; + size_t size; + + total = tcg_ctx->region.agg_size_full; + + size = (char *)tcg_ctx->code_gen_ptr - (char *)tcg_ctx->code_gen_buffer; + g_assert(size <= tcg_ctx->code_gen_buffer_size); + total += size; + + return total; +} + +/* + * Returns the code capacity (in bytes) of the entire cache, i.e. including all + * regions. + * See also: tcg_code_size() + */ +size_t tcg_code_capacity(TCGContext *tcg_ctx) +{ + size_t guard_size, capacity; + + /* no need for synchronization; these variables are set at init time */ + guard_size = tcg_ctx->region.stride - tcg_ctx->region.size; + capacity = (char *)tcg_ctx->region.end + guard_size - (char *)tcg_ctx->region.start; + capacity -= tcg_ctx->region.n * (guard_size + TCG_HIGHWATER); + return capacity; +} + +size_t tcg_tb_phys_invalidate_count(TCGContext *tcg_ctx) +{ + size_t total = 0; + + total = tcg_ctx->tb_phys_invalidate_count; + + return total; +} /* pool based memory allocation */ void *tcg_malloc_internal(TCGContext *s, int size) { TCGPool *p; int pool_size; - + if (size > TCG_POOL_CHUNK_SIZE) { /* big malloc: insert a new pool (XXX: could optimize) */ - p = g_malloc0(sizeof(TCGPool) + size); + p = g_malloc(sizeof(TCGPool) + size); p->size = size; p->next = s->pool_first_large; s->pool_first_large = p; @@ -278,10 +609,10 @@ void *tcg_malloc_internal(TCGContext *s, int size) if (!p->next) { new_pool: pool_size = TCG_POOL_CHUNK_SIZE; - p = g_malloc0(sizeof(TCGPool) + pool_size); + p = g_malloc(sizeof(TCGPool) + pool_size); p->size = pool_size; p->next = NULL; - if (s->pool_current) + if (s->pool_current) s->pool_current->next = p; else s->pool_first = p; @@ -321,19 +652,31 @@ static const TCGHelperInfo all_helpers[] = { #include "exec/helper-tcg.h" }; +static const TCGOpDef tcg_op_defs_org[] = { +#define DEF(s, oargs, iargs, cargs, flags) \ + { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, +#include "tcg/tcg-opc.h" +#undef DEF +}; + +static void process_op_defs(TCGContext *s); +static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, + TCGReg reg, const char *name); + void tcg_context_init(TCGContext *s) { int op, total_args, n, i; TCGOpDef *def; TCGArgConstraint *args_ct; int *sorted_args; + TCGTemp *ts; GHashTable *helper_table; memset(s, 0, sizeof(*s)); s->nb_globals = 0; // copy original tcg_op_defs_org for private usage - s->tcg_op_defs = g_malloc(sizeof(tcg_op_defs_org)); + s->tcg_op_defs = g_malloc0(sizeof(tcg_op_defs_org)); memcpy(s->tcg_op_defs, tcg_op_defs_org, sizeof(tcg_op_defs_org)); /* Count total number of arguments and allocate the corresponding @@ -345,8 +688,8 @@ void tcg_context_init(TCGContext *s) total_args += n; } - args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args); - sorted_args = g_malloc(sizeof(int) * total_args); + args_ct = g_malloc0(sizeof(TCGArgConstraint) * total_args); + sorted_args = g_malloc0(sizeof(int) * total_args); for(op = 0; op < NB_OPS; op++) { def = &s->tcg_op_defs[op]; @@ -359,7 +702,8 @@ void tcg_context_init(TCGContext *s) /* Register helpers. */ /* Use g_direct_hash/equal for direct pointer comparisons on func. */ - s->helpers = helper_table = g_hash_table_new(NULL, NULL); + helper_table = g_hash_table_new(NULL, NULL); + s->helper_table = helper_table; for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) { g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func, @@ -367,31 +711,108 @@ void tcg_context_init(TCGContext *s) } tcg_target_init(s); + process_op_defs(s); + + /* Reverse the order of the saved registers, assuming they're all at + the start of tcg_target_reg_alloc_order. */ + for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) { + int r = tcg_target_reg_alloc_order[n]; + if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, r)) { + break; + } + } + n = ARRAY_SIZE(tcg_target_reg_alloc_order); + s->indirect_reg_alloc_order = g_malloc(sizeof(int) * n); + for (i = 0; i < n; ++i) { + s->indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i]; + } + for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) { + s->indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i]; + } + + s->one_entry = g_malloc(sizeof(struct jit_code_entry)); + s->one_entry->symfile_addr = NULL; + + tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0)); + ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env"); + s->cpu_env = temp_tcgv_ptr(s, ts); +} + +/* + * Allocate TBs right before their corresponding translated code, making + * sure that TBs and code are on different cache lines. + */ +TranslationBlock *tcg_tb_alloc(TCGContext *s) +{ + uintptr_t align = s->uc->qemu_icache_linesize; + TranslationBlock *tb; + void *next; + + retry: + tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align); + next = (void *)ROUND_UP((uintptr_t)(tb + 1), align); + + if (unlikely(next > s->code_gen_highwater)) { + if (tcg_region_alloc(s)) { + return NULL; + } + goto retry; + } + s->code_gen_ptr = next; + s->data_gen_ptr = NULL; + return tb; } void tcg_prologue_init(TCGContext *s) { - /* init global prologue and epilogue */ - s->code_buf = s->code_gen_prologue; - s->code_ptr = s->code_buf; - tcg_target_qemu_prologue(s); - flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); + size_t prologue_size, total_size; + void *buf0, *buf1; -#ifdef DEBUG_DISAS - if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { - size_t size = tcg_current_code_size(s); - qemu_log("PROLOGUE: [size=%zu]\n", size); - qemu_log("\n"); - qemu_log_flush(); + /* Put the prologue at the beginning of code_gen_buffer. */ + buf0 = s->code_gen_buffer; + total_size = s->code_gen_buffer_size; + s->code_ptr = buf0; + s->code_buf = buf0; + s->data_gen_ptr = NULL; + s->code_gen_prologue = buf0; + + /* Compute a high-water mark, at which we voluntarily flush the buffer + and start over. The size here is arbitrary, significantly larger + than we expect the code generation for any one opcode to require. */ + s->code_gen_highwater = (char *)s->code_gen_buffer + (total_size - TCG_HIGHWATER) - s->uc->qemu_real_host_page_size; + +#ifdef TCG_TARGET_NEED_POOL_LABELS + s->pool_labels = NULL; +#endif + + /* Generate the prologue. */ + tcg_target_qemu_prologue(s); + +#ifdef TCG_TARGET_NEED_POOL_LABELS + /* Allow the prologue to put e.g. guest_base into a pool entry. */ + { + int result = tcg_out_pool_finalize(s); + tcg_debug_assert(result == 0); } #endif -} -void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size) -{ - s->frame_start = start; - s->frame_end = start + size; - s->frame_reg = reg; + buf1 = s->code_ptr; + flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1); + + /* Deduct the prologue from the buffer. */ + prologue_size = tcg_current_code_size(s); + s->code_gen_ptr = buf1; + s->code_gen_buffer = buf1; + s->code_buf = buf1; + total_size -= prologue_size; + s->code_gen_buffer_size = total_size; + + tcg_register_jit(s, s->code_gen_buffer, total_size); + + /* Assert that goto_ptr is implemented completely. */ + if (TCG_TARGET_HAS_goto_ptr) { + tcg_debug_assert(s->code_gen_epilogue != NULL); + } } void tcg_func_start(TCGContext *s) @@ -402,7 +823,7 @@ void tcg_func_start(TCGContext *s) /* No temps have been previously allocated for size or locality. */ memset(s->free_temps, 0, sizeof(s->free_temps)); - s->labels = tcg_malloc(s, sizeof(TCGLabel) * TCG_MAX_LABELS); + s->nb_ops = 0; s->nb_labels = 0; s->current_frame_offset = s->frame_start; @@ -410,132 +831,114 @@ void tcg_func_start(TCGContext *s) s->goto_tb_issue_mask = 0; #endif - s->gen_opc_ptr = s->gen_opc_buf; - s->gen_opparam_ptr = s->gen_opparam_buf; - - s->be = tcg_malloc(s, sizeof(TCGBackendData)); + QTAILQ_INIT(&s->ops); + QTAILQ_INIT(&s->free_ops); + QSIMPLEQ_INIT(&s->labels); } -static inline void tcg_temp_alloc(TCGContext *s, int n) +static inline TCGTemp *tcg_temp_alloc(TCGContext *s) { - if (n > TCG_MAX_TEMPS) - tcg_abort(); + int n = s->nb_temps++; + tcg_debug_assert(n < TCG_MAX_TEMPS); + return memset(&s->temps[n], 0, sizeof(TCGTemp)); } -static inline int tcg_global_reg_new_internal(TCGContext *s, TCGType type, int reg, - const char *name) +static inline TCGTemp *tcg_global_alloc(TCGContext *s) { TCGTemp *ts; - int idx; -#if TCG_TARGET_REG_BITS == 32 - if (type != TCG_TYPE_I32) + tcg_debug_assert(s->nb_globals == s->nb_temps); + s->nb_globals++; + ts = tcg_temp_alloc(s); + ts->temp_global = 1; + + return ts; +} + +static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, + TCGReg reg, const char *name) +{ + TCGTemp *ts; + + if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) { tcg_abort(); -#endif - if (tcg_regset_test_reg(s->reserved_regs, reg)) - tcg_abort(); - idx = s->nb_globals; - tcg_temp_alloc(s, s->nb_globals + 1); - ts = &s->temps[s->nb_globals]; + } + + ts = tcg_global_alloc(s); ts->base_type = type; ts->type = type; ts->fixed_reg = 1; ts->reg = reg; ts->name = name; - s->nb_globals++; tcg_regset_set_reg(s->reserved_regs, reg); - return idx; + + return ts; } -TCGv_i32 tcg_global_reg_new_i32(TCGContext *s, int reg, const char *name) +void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size) { - int idx; - - idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name); - return MAKE_TCGV_I32(idx); + s->frame_start = start; + s->frame_end = start + size; + s->frame_temp + = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame"); } -TCGv_i64 tcg_global_reg_new_i64(TCGContext *s, int reg, const char *name) +TCGTemp *tcg_global_mem_new_internal(TCGContext *s, TCGType type, TCGv_ptr base, + intptr_t offset, const char *name) { - int idx; - - idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name); - return MAKE_TCGV_I64(idx); -} - -static inline int tcg_global_mem_new_internal(TCGContext *s, TCGType type, int reg, - intptr_t offset, - const char *name) -{ - TCGTemp *ts; - int idx; - - idx = s->nb_globals; -#if TCG_TARGET_REG_BITS == 32 - if (type == TCG_TYPE_I64) { - char buf[64]; - tcg_temp_alloc(s, s->nb_globals + 2); - ts = &s->temps[s->nb_globals]; - ts->base_type = type; - ts->type = TCG_TYPE_I32; - ts->fixed_reg = 0; - ts->mem_allocated = 1; - ts->mem_reg = reg; + TCGTemp *base_ts = tcgv_ptr_temp(s, base); + TCGTemp *ts = tcg_global_alloc(s); + int indirect_reg = 0, bigendian = 0; #ifdef HOST_WORDS_BIGENDIAN - ts->mem_offset = offset + 4; -#else - ts->mem_offset = offset; + bigendian = 1; #endif + + if (!base_ts->fixed_reg) { + /* We do not support double-indirect registers. */ + tcg_debug_assert(!base_ts->indirect_reg); + base_ts->indirect_base = 1; + s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64 + ? 2 : 1); + indirect_reg = 1; + } + + if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { + TCGTemp *ts2 = tcg_global_alloc(s); + char buf[64]; + + ts->base_type = TCG_TYPE_I64; + ts->type = TCG_TYPE_I32; + ts->indirect_reg = indirect_reg; + ts->mem_allocated = 1; + ts->mem_base = base_ts; + ts->mem_offset = offset + bigendian * 4; pstrcpy(buf, sizeof(buf), name); pstrcat(buf, sizeof(buf), "_0"); ts->name = g_strdup(buf); - ts++; - ts->base_type = type; - ts->type = TCG_TYPE_I32; - ts->fixed_reg = 0; - ts->mem_allocated = 1; - ts->mem_reg = reg; -#ifdef HOST_WORDS_BIGENDIAN - ts->mem_offset = offset; -#else - ts->mem_offset = offset + 4; -#endif + tcg_debug_assert(ts2 == ts + 1); + ts2->base_type = TCG_TYPE_I64; + ts2->type = TCG_TYPE_I32; + ts2->indirect_reg = indirect_reg; + ts2->mem_allocated = 1; + ts2->mem_base = base_ts; + ts2->mem_offset = offset + (1 - bigendian) * 4; pstrcpy(buf, sizeof(buf), name); pstrcat(buf, sizeof(buf), "_1"); - ts->name = g_strdup(buf); - - s->nb_globals += 2; - } else -#endif - { - tcg_temp_alloc(s, s->nb_globals + 1); - ts = &s->temps[s->nb_globals]; + ts2->name = g_strdup(buf); + } else { ts->base_type = type; ts->type = type; - ts->fixed_reg = 0; + ts->indirect_reg = indirect_reg; ts->mem_allocated = 1; - ts->mem_reg = reg; + ts->mem_base = base_ts; ts->mem_offset = offset; ts->name = name; - s->nb_globals++; } - return idx; + return ts; } -TCGv_i32 tcg_global_mem_new_i32(TCGContext *s, int reg, intptr_t offset, const char *name) -{ - int idx = tcg_global_mem_new_internal(s, TCG_TYPE_I32, reg, offset, name); - return MAKE_TCGV_I32(idx); -} - -TCGv_i64 tcg_global_mem_new_i64(TCGContext *s, int reg, intptr_t offset, const char *name) -{ - int idx = tcg_global_mem_new_internal(s, TCG_TYPE_I64, reg, offset, name); - return MAKE_TCGV_I64(idx); -} - -static inline int tcg_temp_new_internal(TCGContext *s, TCGType type, int temp_local) +TCGTemp *tcg_temp_new_internal(TCGContext *s, TCGType type, bool temp_local) { TCGTemp *ts; int idx, k; @@ -548,66 +951,75 @@ static inline int tcg_temp_new_internal(TCGContext *s, TCGType type, int temp_lo ts = &s->temps[idx]; ts->temp_allocated = 1; - assert(ts->base_type == type); - assert(ts->temp_local == temp_local); + tcg_debug_assert(ts->base_type == type); + tcg_debug_assert(ts->temp_local == temp_local); } else { - idx = s->nb_temps; -#if TCG_TARGET_REG_BITS == 32 - if (type == TCG_TYPE_I64) { - tcg_temp_alloc(s, s->nb_temps + 2); - ts = &s->temps[s->nb_temps]; + ts = tcg_temp_alloc(s); + if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { + TCGTemp *ts2 = tcg_temp_alloc(s); + ts->base_type = type; ts->type = TCG_TYPE_I32; ts->temp_allocated = 1; ts->temp_local = temp_local; - ts->name = NULL; - ts++; - ts->base_type = type; - ts->type = TCG_TYPE_I32; - ts->temp_allocated = 1; - ts->temp_local = temp_local; - ts->name = NULL; - s->nb_temps += 2; - } else -#endif - { - tcg_temp_alloc(s, s->nb_temps + 1); - ts = &s->temps[s->nb_temps]; + + tcg_debug_assert(ts2 == ts + 1); + ts2->base_type = TCG_TYPE_I64; + ts2->type = TCG_TYPE_I32; + ts2->temp_allocated = 1; + ts2->temp_local = temp_local; + } else { ts->base_type = type; ts->type = type; ts->temp_allocated = 1; ts->temp_local = temp_local; - ts->name = NULL; - s->nb_temps++; } } #if defined(CONFIG_DEBUG_TCG) s->temps_in_use++; #endif - return idx; + return ts; } -TCGv_i32 tcg_temp_new_internal_i32(TCGContext *s, int temp_local) +TCGv_vec tcg_temp_new_vec(TCGContext *tcg_ctx, TCGType type) { - int idx; + TCGTemp *t; - idx = tcg_temp_new_internal(s, TCG_TYPE_I32, temp_local); - return MAKE_TCGV_I32(idx); +#ifdef CONFIG_DEBUG_TCG + switch (type) { + case TCG_TYPE_V64: + assert(TCG_TARGET_HAS_v64); + break; + case TCG_TYPE_V128: + assert(TCG_TARGET_HAS_v128); + break; + case TCG_TYPE_V256: + assert(TCG_TARGET_HAS_v256); + break; + default: + g_assert_not_reached(); + } +#endif + + t = tcg_temp_new_internal(tcg_ctx, type, 0); + return temp_tcgv_vec(tcg_ctx, t); } -TCGv_i64 tcg_temp_new_internal_i64(TCGContext *s, int temp_local) +/* Create a new temp of the same type as an existing temp. */ +TCGv_vec tcg_temp_new_vec_matching(TCGContext *tcg_ctx, TCGv_vec match) { - int idx; + TCGTemp *t = tcgv_vec_temp(tcg_ctx, match); - idx = tcg_temp_new_internal(s, TCG_TYPE_I64, temp_local); - return MAKE_TCGV_I64(idx); + tcg_debug_assert(t->temp_allocated != 0); + + t = tcg_temp_new_internal(tcg_ctx, t->base_type, 0); + return temp_tcgv_vec(tcg_ctx, t); } -static void tcg_temp_free_internal(TCGContext *s, int idx) +void tcg_temp_free_internal(TCGContext *s, TCGTemp *ts) { - TCGTemp *ts; - int k; + int k, idx; #if defined(CONFIG_DEBUG_TCG) s->temps_in_use--; @@ -616,54 +1028,44 @@ static void tcg_temp_free_internal(TCGContext *s, int idx) } #endif - assert(idx >= s->nb_globals && idx < s->nb_temps); - ts = &s->temps[idx]; - assert(ts->temp_allocated != 0); + tcg_debug_assert(ts->temp_global == 0); + tcg_debug_assert(ts->temp_allocated != 0); ts->temp_allocated = 0; + idx = temp_idx(s, ts); k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0); set_bit(idx, s->free_temps[k].l); } -void tcg_temp_free_i32(TCGContext *s, TCGv_i32 arg) -{ - tcg_temp_free_internal(s, GET_TCGV_I32(arg)); -} - -void tcg_temp_free_i64(TCGContext *s, TCGv_i64 arg) -{ - tcg_temp_free_internal(s, GET_TCGV_I64(arg)); -} - -TCGv_i32 tcg_const_i32(TCGContext *s, int32_t val) +TCGv_i32 tcg_const_i32(TCGContext *tcg_ctx, int32_t val) { TCGv_i32 t0; - t0 = tcg_temp_new_i32(s); - tcg_gen_movi_i32(s, t0, val); + t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, t0, val); return t0; } -TCGv_i64 tcg_const_i64(TCGContext *s, int64_t val) +TCGv_i64 tcg_const_i64(TCGContext *tcg_ctx, int64_t val) { TCGv_i64 t0; - t0 = tcg_temp_new_i64(s); - tcg_gen_movi_i64(s, t0, val); + t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_movi_i64(tcg_ctx, t0, val); return t0; } -TCGv_i32 tcg_const_local_i32(TCGContext *s, int32_t val) +TCGv_i32 tcg_const_local_i32(TCGContext *tcg_ctx, int32_t val) { TCGv_i32 t0; - t0 = tcg_temp_local_new_i32(s); - tcg_gen_movi_i32(s, t0, val); + t0 = tcg_temp_local_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, t0, val); return t0; } -TCGv_i64 tcg_const_local_i64(TCGContext *s, int64_t val) +TCGv_i64 tcg_const_local_i64(TCGContext *tcg_ctx, int64_t val) { TCGv_i64 t0; - t0 = tcg_temp_local_new_i64(s); - tcg_gen_movi_i64(s, t0, val); + t0 = tcg_temp_local_new_i64(tcg_ctx); + tcg_gen_movi_i64(tcg_ctx, t0, val); return t0; } @@ -686,42 +1088,324 @@ int tcg_check_temp_count(TCGContext *s) } #endif +/* Return true if OP may appear in the opcode stream. + Test the runtime variable that controls each opcode. */ +bool tcg_op_supported(TCGOpcode op) +{ + const bool have_vec + = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256; + + switch (op) { + case INDEX_op_discard: + case INDEX_op_set_label: + case INDEX_op_call: + case INDEX_op_br: + case INDEX_op_mb: + case INDEX_op_insn_start: + case INDEX_op_exit_tb: + case INDEX_op_goto_tb: + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_st_i64: + return true; + + case INDEX_op_goto_ptr: + return TCG_TARGET_HAS_goto_ptr; + + case INDEX_op_mov_i32: + case INDEX_op_movi_i32: + case INDEX_op_setcond_i32: + case INDEX_op_brcond_i32: + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_add_i32: + case INDEX_op_sub_i32: + case INDEX_op_mul_i32: + case INDEX_op_and_i32: + case INDEX_op_or_i32: + case INDEX_op_xor_i32: + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + return true; + + case INDEX_op_movcond_i32: + return TCG_TARGET_HAS_movcond_i32; + case INDEX_op_div_i32: + case INDEX_op_divu_i32: + return TCG_TARGET_HAS_div_i32; + case INDEX_op_rem_i32: + case INDEX_op_remu_i32: + return TCG_TARGET_HAS_rem_i32; + case INDEX_op_div2_i32: + case INDEX_op_divu2_i32: + return TCG_TARGET_HAS_div2_i32; + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + return TCG_TARGET_HAS_rot_i32; + case INDEX_op_deposit_i32: + return TCG_TARGET_HAS_deposit_i32; + case INDEX_op_extract_i32: + return TCG_TARGET_HAS_extract_i32; + case INDEX_op_sextract_i32: + return TCG_TARGET_HAS_sextract_i32; + case INDEX_op_extract2_i32: + return TCG_TARGET_HAS_extract2_i32; + case INDEX_op_add2_i32: + return TCG_TARGET_HAS_add2_i32; + case INDEX_op_sub2_i32: + return TCG_TARGET_HAS_sub2_i32; + case INDEX_op_mulu2_i32: + return TCG_TARGET_HAS_mulu2_i32; + case INDEX_op_muls2_i32: + return TCG_TARGET_HAS_muls2_i32; + case INDEX_op_muluh_i32: + return TCG_TARGET_HAS_muluh_i32; + case INDEX_op_mulsh_i32: + return TCG_TARGET_HAS_mulsh_i32; + case INDEX_op_ext8s_i32: + return TCG_TARGET_HAS_ext8s_i32; + case INDEX_op_ext16s_i32: + return TCG_TARGET_HAS_ext16s_i32; + case INDEX_op_ext8u_i32: + return TCG_TARGET_HAS_ext8u_i32; + case INDEX_op_ext16u_i32: + return TCG_TARGET_HAS_ext16u_i32; + case INDEX_op_bswap16_i32: + return TCG_TARGET_HAS_bswap16_i32; + case INDEX_op_bswap32_i32: + return TCG_TARGET_HAS_bswap32_i32; + case INDEX_op_not_i32: + return TCG_TARGET_HAS_not_i32; + case INDEX_op_neg_i32: + return TCG_TARGET_HAS_neg_i32; + case INDEX_op_andc_i32: + return TCG_TARGET_HAS_andc_i32; + case INDEX_op_orc_i32: + return TCG_TARGET_HAS_orc_i32; + case INDEX_op_eqv_i32: + return TCG_TARGET_HAS_eqv_i32; + case INDEX_op_nand_i32: + return TCG_TARGET_HAS_nand_i32; + case INDEX_op_nor_i32: + return TCG_TARGET_HAS_nor_i32; + case INDEX_op_clz_i32: + return TCG_TARGET_HAS_clz_i32; + case INDEX_op_ctz_i32: + return TCG_TARGET_HAS_ctz_i32; + case INDEX_op_ctpop_i32: + return TCG_TARGET_HAS_ctpop_i32; + + case INDEX_op_brcond2_i32: + case INDEX_op_setcond2_i32: + return TCG_TARGET_REG_BITS == 32; + + case INDEX_op_mov_i64: + case INDEX_op_movi_i64: + case INDEX_op_setcond_i64: + case INDEX_op_brcond_i64: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + case INDEX_op_add_i64: + case INDEX_op_sub_i64: + case INDEX_op_mul_i64: + case INDEX_op_and_i64: + case INDEX_op_or_i64: + case INDEX_op_xor_i64: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + return TCG_TARGET_REG_BITS == 64; + + case INDEX_op_movcond_i64: + return TCG_TARGET_HAS_movcond_i64; + case INDEX_op_div_i64: + case INDEX_op_divu_i64: + return TCG_TARGET_HAS_div_i64; + case INDEX_op_rem_i64: + case INDEX_op_remu_i64: + return TCG_TARGET_HAS_rem_i64; + case INDEX_op_div2_i64: + case INDEX_op_divu2_i64: + return TCG_TARGET_HAS_div2_i64; + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + return TCG_TARGET_HAS_rot_i64; + case INDEX_op_deposit_i64: + return TCG_TARGET_HAS_deposit_i64; + case INDEX_op_extract_i64: + return TCG_TARGET_HAS_extract_i64; + case INDEX_op_sextract_i64: + return TCG_TARGET_HAS_sextract_i64; + case INDEX_op_extract2_i64: + return TCG_TARGET_HAS_extract2_i64; + case INDEX_op_extrl_i64_i32: + return TCG_TARGET_HAS_extrl_i64_i32; + case INDEX_op_extrh_i64_i32: + return TCG_TARGET_HAS_extrh_i64_i32; + case INDEX_op_ext8s_i64: + return TCG_TARGET_HAS_ext8s_i64; + case INDEX_op_ext16s_i64: + return TCG_TARGET_HAS_ext16s_i64; + case INDEX_op_ext32s_i64: + return TCG_TARGET_HAS_ext32s_i64; + case INDEX_op_ext8u_i64: + return TCG_TARGET_HAS_ext8u_i64; + case INDEX_op_ext16u_i64: + return TCG_TARGET_HAS_ext16u_i64; + case INDEX_op_ext32u_i64: + return TCG_TARGET_HAS_ext32u_i64; + case INDEX_op_bswap16_i64: + return TCG_TARGET_HAS_bswap16_i64; + case INDEX_op_bswap32_i64: + return TCG_TARGET_HAS_bswap32_i64; + case INDEX_op_bswap64_i64: + return TCG_TARGET_HAS_bswap64_i64; + case INDEX_op_not_i64: + return TCG_TARGET_HAS_not_i64; + case INDEX_op_neg_i64: + return TCG_TARGET_HAS_neg_i64; + case INDEX_op_andc_i64: + return TCG_TARGET_HAS_andc_i64; + case INDEX_op_orc_i64: + return TCG_TARGET_HAS_orc_i64; + case INDEX_op_eqv_i64: + return TCG_TARGET_HAS_eqv_i64; + case INDEX_op_nand_i64: + return TCG_TARGET_HAS_nand_i64; + case INDEX_op_nor_i64: + return TCG_TARGET_HAS_nor_i64; + case INDEX_op_clz_i64: + return TCG_TARGET_HAS_clz_i64; + case INDEX_op_ctz_i64: + return TCG_TARGET_HAS_ctz_i64; + case INDEX_op_ctpop_i64: + return TCG_TARGET_HAS_ctpop_i64; + case INDEX_op_add2_i64: + return TCG_TARGET_HAS_add2_i64; + case INDEX_op_sub2_i64: + return TCG_TARGET_HAS_sub2_i64; + case INDEX_op_mulu2_i64: + return TCG_TARGET_HAS_mulu2_i64; + case INDEX_op_muls2_i64: + return TCG_TARGET_HAS_muls2_i64; + case INDEX_op_muluh_i64: + return TCG_TARGET_HAS_muluh_i64; + case INDEX_op_mulsh_i64: + return TCG_TARGET_HAS_mulsh_i64; + + case INDEX_op_mov_vec: + case INDEX_op_dup_vec: + case INDEX_op_dupi_vec: + case INDEX_op_dupm_vec: + case INDEX_op_ld_vec: + case INDEX_op_st_vec: + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_cmp_vec: + return have_vec; + case INDEX_op_dup2_vec: + return have_vec && TCG_TARGET_REG_BITS == 32; + case INDEX_op_not_vec: + return have_vec && TCG_TARGET_HAS_not_vec; + case INDEX_op_neg_vec: + return have_vec && TCG_TARGET_HAS_neg_vec; + case INDEX_op_abs_vec: + return have_vec && TCG_TARGET_HAS_abs_vec; + case INDEX_op_andc_vec: + return have_vec && TCG_TARGET_HAS_andc_vec; + case INDEX_op_orc_vec: + return have_vec && TCG_TARGET_HAS_orc_vec; + case INDEX_op_mul_vec: + return have_vec && TCG_TARGET_HAS_mul_vec; + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: + return have_vec && TCG_TARGET_HAS_shi_vec; + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + return have_vec && TCG_TARGET_HAS_shs_vec; + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + return have_vec && TCG_TARGET_HAS_shv_vec; + case INDEX_op_ssadd_vec: + case INDEX_op_usadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_ussub_vec: + return have_vec && TCG_TARGET_HAS_sat_vec; + case INDEX_op_smin_vec: + case INDEX_op_umin_vec: + case INDEX_op_smax_vec: + case INDEX_op_umax_vec: + return have_vec && TCG_TARGET_HAS_minmax_vec; + case INDEX_op_bitsel_vec: + return have_vec && TCG_TARGET_HAS_bitsel_vec; + case INDEX_op_cmpsel_vec: + return have_vec && TCG_TARGET_HAS_cmpsel_vec; + + default: + tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS); + return true; + } +} + /* Note: we convert the 64 bit args to 32 bit and do some alignment and endian swap. Maybe it would be better to do the alignment and endian swap in tcg_reg_alloc_call(). */ -void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret, - int nargs, TCGArg *args) +void tcg_gen_callN(TCGContext *tcg_ctx, void *func, TCGTemp *ret, int nargs, TCGTemp **args) { - int i, real_args, nb_rets; + int i, real_args, nb_rets, pi; unsigned sizemask, flags; - TCGArg *nparam; TCGHelperInfo *info; + TCGOp *op; - info = g_hash_table_lookup(s->helpers, (gpointer)func); + info = g_hash_table_lookup(tcg_ctx->helper_table, (gpointer)func); flags = info->flags; sizemask = info->sizemask; -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) +#if defined(__sparc__) && !defined(__arch64__) /* We have 64-bit values in one register, but need to pass as two separate parameters. Split them. */ int orig_sizemask = sizemask; int orig_nargs = nargs; TCGv_i64 retl, reth; + TCGTemp *split_args[MAX_OPC_PARAM]; - TCGV_UNUSED_I64(retl); - TCGV_UNUSED_I64(reth); + retl = NULL; + reth = NULL; if (sizemask != 0) { - TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2); for (i = real_args = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); if (is_64bit) { - TCGv_i64 orig = MAKE_TCGV_I64(args[i]); - TCGv_i32 h = tcg_temp_new_i32(s); - TCGv_i32 l = tcg_temp_new_i32(s); + TCGv_i64 orig = temp_tcgv_i64(args[i]); + TCGv_i32 h = tcg_temp_new_i32(); + TCGv_i32 l = tcg_temp_new_i32(); tcg_gen_extr_i64_i32(l, h, orig); - split_args[real_args++] = GET_TCGV_I32(h); - split_args[real_args++] = GET_TCGV_I32(l); + split_args[real_args++] = tcgv_i32_temp(h); + split_args[real_args++] = tcgv_i32_temp(l); } else { split_args[real_args++] = args[i]; } @@ -735,54 +1419,56 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret, int is_64bit = sizemask & (1 << (i+1)*2); int is_signed = sizemask & (2 << (i+1)*2); if (!is_64bit) { - TCGv_i64 temp = tcg_temp_new_i64(s); - TCGv_i64 orig = MAKE_TCGV_I64(args[i]); + TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 orig = temp_tcgv_i64(tcg_ctx, args[i]); if (is_signed) { - tcg_gen_ext32s_i64(s, temp, orig); + tcg_gen_ext32s_i64(tcg_ctx, temp, orig); } else { - tcg_gen_ext32u_i64(s, temp, orig); + tcg_gen_ext32u_i64(tcg_ctx, temp, orig); } - args[i] = GET_TCGV_I64(temp); + args[i] = tcgv_i64_temp(tcg_ctx, temp); } } #endif /* TCG_TARGET_EXTEND_ARGS */ - *s->gen_opc_ptr++ = INDEX_op_call; - nparam = s->gen_opparam_ptr++; - if (ret != TCG_CALL_DUMMY_ARG) { -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) + op = tcg_emit_op(tcg_ctx, INDEX_op_call); + + pi = 0; + if (ret != NULL) { +#if defined(__sparc__) && !defined(__arch64__) if (orig_sizemask & 1) { /* The 32-bit ABI is going to return the 64-bit value in the %o0/%o1 register pair. Prepare for this by using two return temporaries, and reassemble below. */ - retl = tcg_temp_new_i64(s); - reth = tcg_temp_new_i64(s); - *s->gen_opparam_ptr++ = GET_TCGV_I64(reth); - *s->gen_opparam_ptr++ = GET_TCGV_I64(retl); + retl = tcg_temp_new_i64(); + reth = tcg_temp_new_i64(); + op->args[pi++] = tcgv_i64_arg(reth); + op->args[pi++] = tcgv_i64_arg(retl); nb_rets = 2; } else { - *s->gen_opparam_ptr++ = ret; + op->args[pi++] = temp_arg(ret); nb_rets = 1; } #else if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) { #ifdef HOST_WORDS_BIGENDIAN - *s->gen_opparam_ptr++ = ret + 1; - *s->gen_opparam_ptr++ = ret; + op->args[pi++] = temp_arg(ret + 1); + op->args[pi++] = temp_arg(ret); #else - *s->gen_opparam_ptr++ = ret; - *s->gen_opparam_ptr++ = ret + 1; + op->args[pi++] = temp_arg(ret); + op->args[pi++] = temp_arg(ret + 1); #endif nb_rets = 2; } else { - *s->gen_opparam_ptr++ = ret; + op->args[pi++] = temp_arg(ret); nb_rets = 1; } #endif } else { nb_rets = 0; } + TCGOP_CALLO(op) = nb_rets; + real_args = 0; for (i = 0; i < nargs; i++) { int is_64bit = sizemask & (1 << (i+1)*2); @@ -790,52 +1476,49 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret, #ifdef TCG_TARGET_CALL_ALIGN_ARGS /* some targets want aligned 64 bit args */ if (real_args & 1) { - *s->gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG; + op->args[pi++] = TCG_CALL_DUMMY_ARG; real_args++; } #endif - /* If stack grows up, then we will be placing successive - arguments at lower addresses, which means we need to - reverse the order compared to how we would normally - treat either big or little-endian. For those arguments - that will wind up in registers, this still works for - HPPA (the only current STACK_GROWSUP target) since the - argument registers are *also* allocated in decreasing - order. If another such target is added, this logic may - have to get more complicated to differentiate between - stack arguments and register arguments. */ + /* If stack grows up, then we will be placing successive + arguments at lower addresses, which means we need to + reverse the order compared to how we would normally + treat either big or little-endian. For those arguments + that will wind up in registers, this still works for + HPPA (the only current STACK_GROWSUP target) since the + argument registers are *also* allocated in decreasing + order. If another such target is added, this logic may + have to get more complicated to differentiate between + stack arguments and register arguments. */ #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) - *s->gen_opparam_ptr++ = args[i] + 1; - *s->gen_opparam_ptr++ = args[i]; + op->args[pi++] = temp_arg(args[i] + 1); + op->args[pi++] = temp_arg(args[i]); #else - *s->gen_opparam_ptr++ = args[i]; - *s->gen_opparam_ptr++ = args[i] + 1; + op->args[pi++] = temp_arg(args[i]); + op->args[pi++] = temp_arg(args[i] + 1); #endif real_args += 2; continue; } - *s->gen_opparam_ptr++ = args[i]; + op->args[pi++] = temp_arg(args[i]); real_args++; } - *s->gen_opparam_ptr++ = (uintptr_t)func; - *s->gen_opparam_ptr++ = flags; + op->args[pi++] = (uintptr_t)func; + op->args[pi++] = flags; + TCGOP_CALLI(op) = real_args; - *nparam = (nb_rets << 16) | real_args; + /* Make sure the fields didn't overflow. */ + tcg_debug_assert(TCGOP_CALLI(op) == real_args); + tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); - /* total parameters, needed to go backward in the instruction stream */ - *s->gen_opparam_ptr++ = 1 + nb_rets + real_args + 3; - -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) +#if defined(__sparc__) && !defined(__arch64__) /* Free all of the parts we allocated above. */ for (i = real_args = 0; i < orig_nargs; ++i) { int is_64bit = orig_sizemask & (1 << (i+1)*2); if (is_64bit) { - TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]); - TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]); - tcg_temp_free_i32(s, h); - tcg_temp_free_i32(s, l); + tcg_temp_free_internal(args[real_args++]); + tcg_temp_free_internal(args[real_args++]); } else { real_args++; } @@ -844,252 +1527,45 @@ void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret, /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. Note that describing these as TCGv_i64 eliminates an unnecessary zero-extension that tcg_gen_concat_i32_i64 would create. */ - tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth); - tcg_temp_free_i64(s, retl); - tcg_temp_free_i64(s, reth); + tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth); + tcg_temp_free_i64(retl); + tcg_temp_free_i64(reth); } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 for (i = 0; i < nargs; ++i) { int is_64bit = sizemask & (1 << (i+1)*2); if (!is_64bit) { - TCGv_i64 temp = MAKE_TCGV_I64(args[i]); - tcg_temp_free_i64(s, temp); + tcg_temp_free_internal(tcg_ctx, args[i]); } } #endif /* TCG_TARGET_EXTEND_ARGS */ } -#if TCG_TARGET_REG_BITS == 32 -void tcg_gen_shifti_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, - int c, int right, int arith) -{ - if (c == 0) { - tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1)); - tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1)); - } else if (c >= 32) { - c -= 32; - if (right) { - if (arith) { - tcg_gen_sari_i32(s, TCGV_LOW(ret), TCGV_HIGH(arg1), c); - tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), 31); - } else { - tcg_gen_shri_i32(s, TCGV_LOW(ret), TCGV_HIGH(arg1), c); - tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); - } - } else { - tcg_gen_shli_i32(s, TCGV_HIGH(ret), TCGV_LOW(arg1), c); - tcg_gen_movi_i32(s, TCGV_LOW(ret), 0); - } - } else { - TCGv_i32 t0, t1; - - t0 = tcg_temp_new_i32(s); - t1 = tcg_temp_new_i32(s); - if (right) { - tcg_gen_shli_i32(s, t0, TCGV_HIGH(arg1), 32 - c); - if (arith) - tcg_gen_sari_i32(s, t1, TCGV_HIGH(arg1), c); - else - tcg_gen_shri_i32(s, t1, TCGV_HIGH(arg1), c); - tcg_gen_shri_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), c); - tcg_gen_or_i32(s, TCGV_LOW(ret), TCGV_LOW(ret), t0); - tcg_gen_mov_i32(s, TCGV_HIGH(ret), t1); - } else { - tcg_gen_shri_i32(s, t0, TCGV_LOW(arg1), 32 - c); - /* Note: ret can be the same as arg1, so we use t1 */ - tcg_gen_shli_i32(s, t1, TCGV_LOW(arg1), c); - tcg_gen_shli_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), c); - tcg_gen_or_i32(s, TCGV_HIGH(ret), TCGV_HIGH(ret), t0); - tcg_gen_mov_i32(s, TCGV_LOW(ret), t1); - } - tcg_temp_free_i32(s, t0); - tcg_temp_free_i32(s, t1); - } -} -#endif - -static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) -{ - switch (op & MO_SIZE) { - case MO_8: - op &= ~MO_BSWAP; - break; - case MO_16: - break; - case MO_32: - if (!is64) { - op &= ~MO_SIGN; - } - break; - case MO_64: - if (!is64) { - tcg_abort(); - } - break; - } - if (st) { - op &= ~MO_SIGN; - } - return op; -} - -// Unicorn engine -// check if the last memory access was invalid -// if so, we jump to the block epilogue to quit immediately. -void check_exit_request(TCGContext *tcg_ctx) -{ - TCGv_i32 flag; - - flag = tcg_temp_new_i32(tcg_ctx); - tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env, - offsetof(CPUState, tcg_exit_req) - ENV_OFFSET); - tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label); - tcg_temp_free_i32(tcg_ctx, flag); -} - -void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - - memop = tcg_canonicalize_memop(memop, 0, 0); - - *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_ld_i32; - tcg_add_param_i32(tcg_ctx, val); - tcg_add_param_tl(tcg_ctx, addr); - *tcg_ctx->gen_opparam_ptr++ = memop; - *tcg_ctx->gen_opparam_ptr++ = idx; - - check_exit_request(tcg_ctx); -} - -void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - - memop = tcg_canonicalize_memop(memop, 0, 1); - - *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_st_i32; - tcg_add_param_i32(tcg_ctx, val); - tcg_add_param_tl(tcg_ctx, addr); - *tcg_ctx->gen_opparam_ptr++ = memop; - *tcg_ctx->gen_opparam_ptr++ = idx; - - check_exit_request(tcg_ctx); -} - -void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - - memop = tcg_canonicalize_memop(memop, 1, 0); - -#if TCG_TARGET_REG_BITS == 32 - if ((memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_ld_i32(uc, TCGV_LOW(val), addr, idx, memop); - if (memop & MO_SIGN) { - tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(val), TCGV_LOW(val), 31); - } else { - tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(val), 0); - } - - check_exit_request(tcg_ctx); - return; - } -#endif - - *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_ld_i64; - tcg_add_param_i64(tcg_ctx, val); - tcg_add_param_tl(tcg_ctx, addr); - *tcg_ctx->gen_opparam_ptr++ = memop; - *tcg_ctx->gen_opparam_ptr++ = idx; - - check_exit_request(tcg_ctx); -} - -void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - - memop = tcg_canonicalize_memop(memop, 1, 1); - -#if TCG_TARGET_REG_BITS == 32 - if ((memop & MO_SIZE) < MO_64) { - tcg_gen_qemu_st_i32(uc, TCGV_LOW(val), addr, idx, memop); - check_exit_request(tcg_ctx); - return; - } -#endif - - *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_st_i64; - tcg_add_param_i64(tcg_ctx, val); - tcg_add_param_tl(tcg_ctx, addr); - *tcg_ctx->gen_opparam_ptr++ = memop; - *tcg_ctx->gen_opparam_ptr++ = idx; - - check_exit_request(tcg_ctx); -} - static void tcg_reg_alloc_start(TCGContext *s) { - int i; + int i, n; TCGTemp *ts; - for(i = 0; i < s->nb_globals; i++) { + + for (i = 0, n = s->nb_globals; i < n; i++) { ts = &s->temps[i]; - if (ts->fixed_reg) { - ts->val_type = TEMP_VAL_REG; - } else { - ts->val_type = TEMP_VAL_MEM; - } + ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM); } - for(i = s->nb_globals; i < s->nb_temps; i++) { + for (n = s->nb_temps; i < n; i++) { ts = &s->temps[i]; - if (ts->temp_local) { - ts->val_type = TEMP_VAL_MEM; - } else { - ts->val_type = TEMP_VAL_DEAD; - } + ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD); ts->mem_allocated = 0; ts->fixed_reg = 0; } - for(i = 0; i < TCG_TARGET_NB_REGS; i++) { - s->reg_to_temp[i] = -1; - } -} -static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size, - int idx) -{ - TCGTemp *ts; - - assert(idx >= 0 && idx < s->nb_temps); - ts = &s->temps[idx]; - if (idx < s->nb_globals) { - pstrcpy(buf, buf_size, ts->name); - } else { - if (ts->temp_local) - snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); - else - snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); - } - return buf; -} - -char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg) -{ - return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg)); -} - -char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg) -{ - return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg)); + memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp)); } /* Find helper name. */ static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val) { const char *ret = NULL; - if (s->helpers) { - TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val); + if (s->helper_table) { + TCGHelperInfo *info = g_hash_table_lookup(s->helper_table, (gpointer)val); if (info) { ret = info->name; } @@ -1097,26 +1573,29 @@ static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val) return ret; } +static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, + TCGTemp *ts) +{ + int idx = temp_idx(s, ts); + + if (ts->temp_global) { + pstrcpy(buf, buf_size, ts->name); + } else if (ts->temp_local) { + snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); + } else { + snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); + } + return buf; +} + +static char *tcg_get_arg_str(TCGContext *s, char *buf, + int buf_size, TCGArg arg) +{ + return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg)); +} + static const char * const cond_name[] = { -#ifdef _MSC_VER - "never", // TCG_COND_NEVER - "always", // TCG_COND_ALWAYS - "lt", // TCG_COND_LT - "ge", // TCG_COND_GE - "ltu", // TCG_COND_LTU - "geu", // TCG_COND_GEU - NULL, // n/a - NULL, // n/a - "eq", // TCG_COND_EQ - "ne", // TCG_COND_NE - "le", // TCG_COND_LE - "gt", // TCG_COND_GT - "leu", // TCG_COND_LEU - "gtu", // TCG_COND_GTU - NULL, // n/a - NULL, // n/a -#else [TCG_COND_NEVER] = "never", [TCG_COND_ALWAYS] = "always", [TCG_COND_EQ] = "eq", @@ -1129,48 +1608,10 @@ static const char * const cond_name[] = [TCG_COND_GEU] = "geu", [TCG_COND_LEU] = "leu", [TCG_COND_GTU] = "gtu" -#endif }; static const char * const ldst_name[] = { -#ifdef _MSC_VER - "ub", // MO_UB -# ifdef HOST_WORDS_BIGENDIAN - "beuw", // MO_BEUW - "beul", // MO_BEUL - "beq", // MO_BEQ - "sb", // MO_SB - "besw", // MO_BESW - "besl", // MO_BESL - NULL, // n/a - NULL, // n/a - "leuw", // MO_LEUW - "leul", // MO_LEUL - "leq", // MO_LEQ - NULL, // n/a - "lesw", // MO_LESW - "lesl", // MO_LESL - NULL, // n/a -# else // !HOST_WORDS_BIGENDIAN - "leuw", // MO_LEUW - "leul", // MO_LEUL - "leq", // MO_LEQ - "sb", // MO_SB - "lesw", // MO_LESW - "lesl", // MO_LESL - NULL, // n/a - NULL, // n/a - "beuw", // MO_BEUW - "beul", // MO_BEUL - "beq", // MO_BEQ - NULL, // n/a - "besw", // MO_BESW - "besl", // MO_BESL - NULL, // n/a -# endif // HOST_WORDS_BIGENDIAN - -#else //_MSC_VER [MO_UB] = "ub", [MO_SB] = "sb", [MO_LEUW] = "leuw", @@ -1183,94 +1624,99 @@ static const char * const ldst_name[] = [MO_BEUL] = "beul", [MO_BESL] = "besl", [MO_BEQ] = "beq", -#endif // _MSC_VER }; -void tcg_dump_ops(TCGContext *s) -{ - const uint16_t *opc_ptr; - const TCGArg *args; - TCGArg arg; - TCGOpcode c; - int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn; - const TCGOpDef *def; - char buf[128]; - - first_insn = 1; - opc_ptr = s->gen_opc_buf; - args = s->gen_opparam_buf; - while (opc_ptr < s->gen_opc_ptr) { - c = *opc_ptr++; - def = &s->tcg_op_defs[c]; - if (c == INDEX_op_debug_insn_start) { - uint64_t pc; -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - pc = ((uint64_t)args[1] << 32) | args[0]; +static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { +#ifdef TARGET_ALIGNED_ONLY + [MO_UNALN >> MO_ASHIFT] = "un+", + [MO_ALIGN >> MO_ASHIFT] = "", #else - pc = args[0]; + [MO_UNALN >> MO_ASHIFT] = "", + [MO_ALIGN >> MO_ASHIFT] = "al+", #endif - if (!first_insn) { - printf("\n"); - } - printf(" ---- 0x%" PRIx64, pc); - first_insn = 0; - nb_oargs = def->nb_oargs; - nb_iargs = def->nb_iargs; - nb_cargs = def->nb_cargs; - } else if (c == INDEX_op_call) { - TCGArg arg; + [MO_ALIGN_2 >> MO_ASHIFT] = "al2+", + [MO_ALIGN_4 >> MO_ASHIFT] = "al4+", + [MO_ALIGN_8 >> MO_ASHIFT] = "al8+", + [MO_ALIGN_16 >> MO_ASHIFT] = "al16+", + [MO_ALIGN_32 >> MO_ASHIFT] = "al32+", + [MO_ALIGN_64 >> MO_ASHIFT] = "al64+", +}; - /* variable number of arguments */ - arg = *args++; - nb_oargs = arg >> 16; - nb_iargs = arg & 0xffff; - nb_cargs = def->nb_cargs; +/* + * Unicorn: Utility to dump a single op. + */ +void tcg_dump_op(TCGContext *s, bool have_prefs, TCGOp* op) +{ + char buf[128]; + int i, k, nb_oargs, nb_iargs, nb_cargs; + const TCGOpDef *def; + TCGOpcode c; - /* function name, flags, out args */ - printf(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name, - tcg_find_helper(s, args[nb_oargs + nb_iargs]), - args[nb_oargs + nb_iargs + 1], nb_oargs); - for (i = 0; i < nb_oargs; i++) { - printf(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), - args[i])); - } - for (i = 0; i < nb_iargs; i++) { - TCGArg arg = args[nb_oargs + i]; - const char *t = ""; - if (arg != TCG_CALL_DUMMY_ARG) { - t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg); - } - printf(",%s", t); - } - } else { - printf(" %s ", def->name); - if (c == INDEX_op_nopn) { - /* variable number of arguments */ - nb_cargs = *args; - nb_oargs = 0; - nb_iargs = 0; - } else { - nb_oargs = def->nb_oargs; - nb_iargs = def->nb_iargs; - nb_cargs = def->nb_cargs; - } + c = op->opc; + def = &s->tcg_op_defs[c]; + if (c == INDEX_op_insn_start) { + nb_oargs = 0; + printf(" ----"); - k = 0; - for(i = 0; i < nb_oargs; i++) { - if (k != 0) { - printf(","); - } - printf("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), - args[k++])); + for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { + target_ulong a; +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); +#else + a = op->args[i]; +#endif + printf(" " TARGET_FMT_lx, a); + } + } else if (c == INDEX_op_call) { + /* variable number of arguments */ + nb_oargs = TCGOP_CALLO(op); + nb_iargs = TCGOP_CALLI(op); + nb_cargs = def->nb_cargs; + + /* function name, flags, out args */ + printf(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name, + tcg_find_helper(s, op->args[nb_oargs + nb_iargs]), + op->args[nb_oargs + nb_iargs + 1], nb_oargs); + for (i = 0; i < nb_oargs; i++) { + printf(",%s", tcg_get_arg_str(s, buf, sizeof(buf), + op->args[i])); + } + for (i = 0; i < nb_iargs; i++) { + TCGArg arg = op->args[nb_oargs + i]; + const char *t = ""; + if (arg != TCG_CALL_DUMMY_ARG) { + t = tcg_get_arg_str(s, buf, sizeof(buf), arg); } - for(i = 0; i < nb_iargs; i++) { - if (k != 0) { - printf(","); - } - printf("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), - args[k++])); + printf(",%s", t); + } + } else { + printf(" %s ", def->name); + + nb_oargs = def->nb_oargs; + nb_iargs = def->nb_iargs; + nb_cargs = def->nb_cargs; + + if (def->flags & TCG_OPF_VECTOR) { + printf("v%d,e%d,", 64 << TCGOP_VECL(op), + 8 << TCGOP_VECE(op)); + } + + k = 0; + for (i = 0; i < nb_oargs; i++) { + if (k != 0) { + printf(","); } - switch (c) { + printf("%s", tcg_get_arg_str(s, buf, sizeof(buf), + op->args[k++])); + } + for (i = 0; i < nb_iargs; i++) { + if (k != 0) { + printf(","); + } + printf("%s", tcg_get_arg_str(s, buf, sizeof(buf), + op->args[k++])); + } + switch (c) { case INDEX_op_brcond_i32: case INDEX_op_setcond_i32: case INDEX_op_movcond_i32: @@ -1279,10 +1725,13 @@ void tcg_dump_ops(TCGContext *s) case INDEX_op_brcond_i64: case INDEX_op_setcond_i64: case INDEX_op_movcond_i64: - if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) { - printf(",%s", cond_name[args[k++]]); + case INDEX_op_cmp_vec: + case INDEX_op_cmpsel_vec: + if (op->args[k] < ARRAY_SIZE(cond_name) + && cond_name[op->args[k]]) { + printf(",%s", cond_name[op->args[k++]]); } else { - printf(",$0x%" TCG_PRIlx, args[k++]); + printf(",$0x%" TCG_PRIlx, op->args[k++]); } i = 1; break; @@ -1290,29 +1739,159 @@ void tcg_dump_ops(TCGContext *s) case INDEX_op_qemu_st_i32: case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: - if (args[k] < ARRAY_SIZE(ldst_name) && ldst_name[args[k]]) { - printf(",%s", ldst_name[args[k++]]); + { + TCGMemOpIdx oi = op->args[k++]; + MemOp op = get_memop(oi); + unsigned ix = get_mmuidx(oi); + + if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { + printf(",$0x%x,%u", op, ix); } else { - printf(",$0x%" TCG_PRIlx, args[k++]); + const char *s_al, *s_op; + s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; + s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; + printf(",%s%s,%u", s_al, s_op, ix); } i = 1; + } break; default: i = 0; break; - } - for(; i < nb_cargs; i++) { - if (k != 0) { - printf(","); - } - arg = args[k++]; - printf("$0x%" TCG_PRIlx, arg); + } + switch (c) { + case INDEX_op_set_label: + case INDEX_op_br: + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + case INDEX_op_brcond2_i32: + printf("%s$L%d", k ? "," : "", + arg_label(op->args[k])->id); + i++, k++; + break; + default: + break; + } + for (; i < nb_cargs; i++, k++) { + printf("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]); + } + if(c == INDEX_op_mov_i64){ + struct TCGTemp* tp = arg_temp(op->args[1]); + if (tp && tp->val_type == TEMP_VAL_MEM){ + printf(" mem_base=%p ", tp->mem_base); } } - printf("\n"); - args += nb_iargs + nb_oargs + nb_cargs; } - printf("###########\n"); + + if (op->life) { + unsigned life = op->life; + + if (life & (SYNC_ARG * 3)) { + printf(" sync:"); + for (i = 0; i < 2; ++i) { + if (life & (SYNC_ARG << i)) { + printf(" %d", i); + } + } + } + life /= DEAD_ARG; + if (life) { + printf(" dead:"); + for (i = 0; life; ++i, life >>= 1) { + if (life & 1) { + printf(" %d", i); + } + } + } + } + + if (have_prefs) { + for (i = 0; i < nb_oargs; ++i) { + TCGRegSet set = op->output_pref[i]; + + if (i == 0) { + printf(" pref="); + } else { + printf(","); + } + if (set == 0) { + printf("none"); + } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) { + printf("all"); +#ifdef CONFIG_DEBUG_TCG + } else if (tcg_regset_single(set)) { + TCGReg reg = tcg_regset_first(set); + printf("%s", tcg_target_reg_names[reg]); +#endif + } else if (TCG_TARGET_NB_REGS <= 32) { + printf("%#x", (uint32_t)set); + } else { + printf("%#" PRIx64, (uint64_t)set); + } + } + } + + printf("\n"); +} + +#if 0 +static gboolean tcg_dump_tb(gpointer key, gpointer value, gpointer data) +{ + TranslationBlock* tb = (TranslationBlock*)value; + + printf(" TB "TARGET_FMT_lx"->"TARGET_FMT_lx", flag=%x, cflag=%x\n", tb->pc, tb->pc + tb->size, tb->flags, tb->cflags); + + return false; +} +#endif + +#if 0 +/* + * Utility to iterate tbs for a TCGContext*. + */ +static void tcg_dump_tbs(TCGContext *s) +{ + printf(" TBs:\n"); + tcg_tb_foreach(s, tcg_dump_tb, NULL); + printf("\n"); + return; +} +#endif + +void tcg_dump_ops(TCGContext *s, bool have_prefs, const char *headline) +{ + TCGOp *op; + int insn_idx = 0; + int op_idx = 0; + + printf("\n*** %s\n", headline); + // tcg_dump_tbs(s, tcg_dump_tb, NULL); + + QTAILQ_FOREACH(op, &s->ops, link) { + if (op->opc == INDEX_op_insn_start) { + printf("\n insn_idx=%d", insn_idx); + insn_idx++; + op_idx = 0; + } else { + printf(" %d: ", op_idx); + } + op_idx++; + tcg_dump_op(s, have_prefs, op); + } +} + +static inline bool tcg_regset_single(TCGRegSet d) +{ + return (d & (d - 1)) == 0; +} + +static inline TCGReg tcg_regset_first(TCGRegSet d) +{ + if (TCG_TARGET_NB_REGS <= 32) { + return ctz32(d); + } else { + return ctz64(d); + } } /* we give more priority to constraints with less registers */ @@ -1359,300 +1938,494 @@ static void sort_constraints(TCGOpDef *def, int start, int n) } } -void tcg_add_target_add_op_defs(TCGContext *s, const TCGTargetOpDef *tdefs) +static void process_op_defs(TCGContext *s) { TCGOpcode op; - TCGOpDef *def; - const char *ct_str; - int i, nb_args; - for(;;) { - if (tdefs->op == (TCGOpcode)-1) - break; - op = tdefs->op; - assert((unsigned)op < NB_OPS); - def = &s->tcg_op_defs[op]; -#if defined(CONFIG_DEBUG_TCG) - /* Duplicate entry in op definitions? */ - assert(!def->used); - def->used = 1; -#endif + for (op = 0; op < NB_OPS; op++) { + TCGOpDef *def = &s->tcg_op_defs[op]; + const TCGTargetOpDef *tdefs; + TCGType type; + int i, nb_args; + + if (def->flags & TCG_OPF_NOT_PRESENT) { + continue; + } + nb_args = def->nb_iargs + def->nb_oargs; - for(i = 0; i < nb_args; i++) { - ct_str = tdefs->args_ct_str[i]; - /* Incomplete TCGTargetOpDef entry? */ - assert(ct_str != NULL); - tcg_regset_clear(def->args_ct[i].u.regs); + if (nb_args == 0) { + continue; + } + + tdefs = tcg_target_op_def(op); + /* Missing TCGTargetOpDef entry. */ + tcg_debug_assert(tdefs != NULL); + + type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32); + for (i = 0; i < nb_args; i++) { + const char *ct_str = tdefs->args_ct_str[i]; + /* Incomplete TCGTargetOpDef entry. */ + tcg_debug_assert(ct_str != NULL); + + def->args_ct[i].u.regs = 0; def->args_ct[i].ct = 0; - if (ct_str[0] >= '0' && ct_str[0] <= '9') { - int oarg; - oarg = ct_str[0] - '0'; - assert(oarg < def->nb_oargs); - assert(def->args_ct[oarg].ct & TCG_CT_REG); - /* TCG_CT_ALIAS is for the output arguments. The input - argument is tagged with TCG_CT_IALIAS. */ - def->args_ct[i] = def->args_ct[oarg]; - def->args_ct[oarg].ct = TCG_CT_ALIAS; - def->args_ct[oarg].alias_index = i; - def->args_ct[i].ct |= TCG_CT_IALIAS; - def->args_ct[i].alias_index = oarg; - } else { - for(;;) { - if (*ct_str == '\0') - break; - switch(*ct_str) { - case 'i': - def->args_ct[i].ct |= TCG_CT_CONST; - ct_str++; - break; - default: - if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) { - fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n", - ct_str, i, def->name); - exit(1); - } + while (*ct_str != '\0') { + switch(*ct_str) { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + int oarg = *ct_str - '0'; + tcg_debug_assert(ct_str == tdefs->args_ct_str[i]); + tcg_debug_assert(oarg < def->nb_oargs); + tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG); + /* TCG_CT_ALIAS is for the output arguments. + The input is tagged with TCG_CT_IALIAS. */ + def->args_ct[i] = def->args_ct[oarg]; + def->args_ct[oarg].ct |= TCG_CT_ALIAS; + def->args_ct[oarg].alias_index = i; + def->args_ct[i].ct |= TCG_CT_IALIAS; + def->args_ct[i].alias_index = oarg; } + ct_str++; + break; + case '&': + def->args_ct[i].ct |= TCG_CT_NEWREG; + ct_str++; + break; + case 'i': + def->args_ct[i].ct |= TCG_CT_CONST; + ct_str++; + break; + default: + ct_str = target_parse_constraint(&def->args_ct[i], + ct_str, type); + /* Typo in TCGTargetOpDef constraint. */ + tcg_debug_assert(ct_str != NULL); } } } /* TCGTargetOpDef entry with too much information? */ - assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); + tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); /* sort the constraints (XXX: this is just an heuristic) */ sort_constraints(def, 0, def->nb_oargs); sort_constraints(def, def->nb_oargs, def->nb_iargs); - -#if 0 - { - int i; - - printf("%s: sorted=", def->name); - for(i = 0; i < def->nb_oargs + def->nb_iargs; i++) - printf(" %d", def->sorted_args[i]); - printf("\n"); - } -#endif - tdefs++; } - -#if defined(CONFIG_DEBUG_TCG) - i = 0; - for (op = 0; op < ARRAY_SIZE(s->tcg_op_defs); op++) { - const TCGOpDef *def = &s->tcg_op_defs[op]; - if (def->flags & TCG_OPF_NOT_PRESENT) { - /* Wrong entry in op definitions? */ - if (def->used) { - fprintf(stderr, "Invalid op definition for %s\n", def->name); - i = 1; - } - } else { - /* Missing entry in op definitions? */ - if (!def->used) { - fprintf(stderr, "Missing op definition for %s\n", def->name); - i = 1; - } - } - } - if (i == 1) { - tcg_abort(); - } -#endif } -#ifdef USE_LIVENESS_ANALYSIS - -/* set a nop for an operation using 'nb_args' */ -static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr, - TCGArg *args, int nb_args) +void tcg_op_remove(TCGContext *s, TCGOp *op) { - if (nb_args == 0) { - *opc_ptr = INDEX_op_nop; + TCGLabel *label; + + switch (op->opc) { + case INDEX_op_br: + label = arg_label(op->args[0]); + label->refs--; + break; + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + label = arg_label(op->args[3]); + label->refs--; + break; + case INDEX_op_brcond2_i32: + label = arg_label(op->args[5]); + label->refs--; + break; + default: + break; + } + + QTAILQ_REMOVE(&s->ops, op, link); + QTAILQ_INSERT_TAIL(&s->free_ops, op, link); + s->nb_ops--; +} + +static TCGOp *tcg_op_alloc(TCGContext *s, TCGOpcode opc) +{ + TCGOp *op; + + if (likely(QTAILQ_EMPTY(&s->free_ops))) { + op = tcg_malloc(s, sizeof(TCGOp)); } else { - *opc_ptr = INDEX_op_nopn; - args[0] = nb_args; - args[nb_args - 1] = nb_args; + op = QTAILQ_FIRST(&s->free_ops); + QTAILQ_REMOVE(&s->free_ops, op, link); + } + memset(op, 0, offsetof(TCGOp, link)); + op->opc = opc; + s->nb_ops++; + + return op; +} + +TCGOp *tcg_emit_op(TCGContext *tcg_ctx, TCGOpcode opc) +{ + TCGOp *op = tcg_op_alloc(tcg_ctx, opc); + QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); + return op; +} + +TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc) +{ + TCGOp *new_op = tcg_op_alloc(s, opc); + QTAILQ_INSERT_BEFORE(old_op, new_op, link); + return new_op; +} + +TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc) +{ + TCGOp *new_op = tcg_op_alloc(s, opc); + QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link); + return new_op; +} + +/* Reachable analysis : remove unreachable code. */ +static void reachable_code_pass(TCGContext *s) +{ + TCGOp *op, *op_next; + bool dead = false; + + QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { + bool remove = dead; + TCGLabel *label; + int call_flags; + + switch (op->opc) { + case INDEX_op_set_label: + label = arg_label(op->args[0]); + if (label->refs == 0) { + /* + * While there is an occasional backward branch, virtually + * all branches generated by the translators are forward. + * Which means that generally we will have already removed + * all references to the label that will be, and there is + * little to be gained by iterating. + */ + remove = true; + } else { + /* Once we see a label, insns become live again. */ + dead = false; + remove = false; + + /* + * Optimization can fold conditional branches to unconditional. + * If we find a label with one reference which is preceded by + * an unconditional branch to it, remove both. This needed to + * wait until the dead code in between them was removed. + */ + if (label->refs == 1) { + TCGOp *op_prev = QTAILQ_PREV(op, link); + if (op_prev->opc == INDEX_op_br && + label == arg_label(op_prev->args[0])) { + tcg_op_remove(s, op_prev); + remove = true; + } + } + } + break; + + case INDEX_op_br: + case INDEX_op_exit_tb: + case INDEX_op_goto_ptr: + /* Unconditional branches; everything following is dead. */ + dead = true; + break; + + case INDEX_op_call: + /* Notice noreturn helper calls, raising exceptions. */ + call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1]; + if (call_flags & TCG_CALL_NO_RETURN) { + dead = true; + } + break; + + case INDEX_op_insn_start: + /* Never remove -- we need to keep these for unwind. */ + remove = false; + break; + + default: + break; + } + + if (remove) { + tcg_op_remove(s, op); + } } } -/* liveness analysis: end of function: all temps are dead, and globals - should be in memory. */ -static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps, - uint8_t *mem_temps) +#define TS_DEAD 1 +#define TS_MEM 2 + +#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n))) +#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n))) + +/* For liveness_pass_1, the register preferences for a given temp. */ +static inline TCGRegSet *la_temp_pref(TCGTemp *ts) { - memset(dead_temps, 1, s->nb_temps); - memset(mem_temps, 1, s->nb_globals); - memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals); + return ts->state_ptr; } -/* liveness analysis: end of basic block: all temps are dead, globals - and local temps should be in memory. */ -static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps, - uint8_t *mem_temps) +/* For liveness_pass_1, reset the preferences for a given temp to the + * maximal regset for its type. + */ +static inline void la_reset_pref(TCGContext *tcg_ctx, TCGTemp *ts) { - int i; - - memset(dead_temps, 1, s->nb_temps); - memset(mem_temps, 1, s->nb_globals); - for(i = s->nb_globals; i < s->nb_temps; i++) { - mem_temps[i] = s->temps[i].temp_local; - } + *la_temp_pref(ts) + = (ts->state == TS_DEAD ? 0 : tcg_ctx->tcg_target_available_regs[ts->type]); } /* Unicorn: for brcond, we should refresh liveness states for TCG globals */ -static inline void tcg_la_br_end(TCGContext *s, uint8_t *mem_temps) +static void la_brcond_end(TCGContext *s, int ng) { int i; - memset(mem_temps, 1, s->nb_globals); - for(i = s->nb_globals; i < s->nb_temps; i++) { - mem_temps[i] = s->temps[i].temp_local; + + for (i = 0; i < ng; i++) { + s->temps[i].state |= TS_MEM; } } -/* Liveness analysis : update the opc_dead_args array to tell if a +/* liveness analysis: end of function: all temps are dead, and globals + should be in memory. */ +static void la_func_end(TCGContext *s, int ng, int nt) +{ + int i; + + for (i = 0; i < ng; ++i) { + s->temps[i].state = TS_DEAD | TS_MEM; + la_reset_pref(s, &s->temps[i]); + } + for (i = ng; i < nt; ++i) { + s->temps[i].state = TS_DEAD; + la_reset_pref(s, &s->temps[i]); + } +} + +/* liveness analysis: end of basic block: all temps are dead, globals + and local temps should be in memory. */ +static void la_bb_end(TCGContext *s, int ng, int nt) +{ + int i; + + for (i = 0; i < ng; ++i) { + s->temps[i].state = TS_DEAD | TS_MEM; + la_reset_pref(s, &s->temps[i]); + } + for (i = ng; i < nt; ++i) { + s->temps[i].state = (s->temps[i].temp_local + ? TS_DEAD | TS_MEM + : TS_DEAD); + la_reset_pref(s, &s->temps[i]); + } +} + +/* liveness analysis: sync globals back to memory. */ +static void la_global_sync(TCGContext *s, int ng) +{ + int i; + + for (i = 0; i < ng; ++i) { + int state = s->temps[i].state; + s->temps[i].state = state | TS_MEM; + if (state == TS_DEAD) { + /* If the global was previously dead, reset prefs. */ + la_reset_pref(s, &s->temps[i]); + } + } +} + +/* liveness analysis: sync globals back to memory and kill. */ +static void la_global_kill(TCGContext *s, int ng) +{ + int i; + + for (i = 0; i < ng; i++) { + s->temps[i].state = TS_DEAD | TS_MEM; + la_reset_pref(s, &s->temps[i]); + } +} + +/* liveness analysis: note live globals crossing calls. */ +static void la_cross_call(TCGContext *s, int nt) +{ + TCGRegSet mask = ~(s->tcg_target_call_clobber_regs); + int i; + + for (i = 0; i < nt; i++) { + TCGTemp *ts = &s->temps[i]; + if (!(ts->state & TS_DEAD)) { + TCGRegSet *pset = la_temp_pref(ts); + TCGRegSet set = *pset; + + set &= mask; + /* If the combination is not possible, restart. */ + if (set == 0) { + set = s->tcg_target_available_regs[ts->type] & mask; + } + *pset = set; + } + } +} + +/* Liveness analysis : update the opc_arg_life array to tell if a given input arguments is dead. Instructions updating dead temporaries are removed. */ -static void tcg_liveness_analysis(TCGContext *s) +static void liveness_pass_1(TCGContext *s) { - int i, op_index, nb_args, nb_iargs, nb_oargs, nb_ops; - TCGOpcode op, op_new, op_new2; - TCGArg *args, arg; - const TCGOpDef *def; - uint8_t *dead_temps, *mem_temps; - uint16_t dead_args; - uint8_t sync_args; - bool have_op_new2; + int nb_globals = s->nb_globals; + int nb_temps = s->nb_temps; + TCGOp *op, *op_prev; + TCGRegSet *prefs; + int i; - s->gen_opc_ptr++; /* skip end */ + prefs = tcg_malloc(s, sizeof(TCGRegSet) * nb_temps); + for (i = 0; i < nb_temps; ++i) { + s->temps[i].state_ptr = prefs + i; + } - nb_ops = s->gen_opc_ptr - s->gen_opc_buf; + /* ??? Should be redundant with the exit_tb that ends the TB. */ + la_func_end(s, nb_globals, nb_temps); - s->op_dead_args = tcg_malloc(s, nb_ops * sizeof(uint16_t)); - s->op_sync_args = tcg_malloc(s, nb_ops * sizeof(uint8_t)); + QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) { + int nb_iargs, nb_oargs; + TCGOpcode opc_new, opc_new2; + bool have_opc_new2; + TCGLifeData arg_life = 0; + TCGTemp *ts; + TCGOpcode opc = op->opc; + const TCGOpDef *def = &s->tcg_op_defs[opc]; - dead_temps = tcg_malloc(s, s->nb_temps); - mem_temps = tcg_malloc(s, s->nb_temps); - tcg_la_func_end(s, dead_temps, mem_temps); - - args = s->gen_opparam_ptr; - op_index = nb_ops - 1; - while (op_index >= 0) { - op = s->gen_opc_buf[op_index]; - def = &s->tcg_op_defs[op]; - switch(op) { + switch (opc) { case INDEX_op_call: { int call_flags; + int nb_call_regs; - nb_args = args[-1]; - args -= nb_args; - arg = *args++; - nb_iargs = arg & 0xffff; - nb_oargs = arg >> 16; - call_flags = args[nb_oargs + nb_iargs + 1]; + nb_oargs = TCGOP_CALLO(op); + nb_iargs = TCGOP_CALLI(op); + call_flags = op->args[nb_oargs + nb_iargs + 1]; - /* pure functions can be removed if their result is not - used */ + /* pure functions can be removed if their result is unused */ if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { for (i = 0; i < nb_oargs; i++) { - arg = args[i]; - if (!dead_temps[arg] || mem_temps[arg]) { + ts = arg_temp(op->args[i]); + if (ts->state != TS_DEAD) { goto do_not_remove_call; } } - tcg_set_nop(s, s->gen_opc_buf + op_index, - args - 1, nb_args); - } else { - do_not_remove_call: - - /* output args are dead */ - dead_args = 0; - sync_args = 0; - for (i = 0; i < nb_oargs; i++) { - arg = args[i]; - if (dead_temps[arg]) { - dead_args |= (1 << i); - } - if (mem_temps[arg]) { - sync_args |= (1 << i); - } - dead_temps[arg] = 1; - mem_temps[arg] = 0; - } - - if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { - /* globals should be synced to memory */ - memset(mem_temps, 1, s->nb_globals); - } - if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | - TCG_CALL_NO_READ_GLOBALS))) { - /* globals should go back to memory */ - memset(dead_temps, 1, s->nb_globals); - } - - /* input args are live */ - for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { - arg = args[i]; - if (arg != TCG_CALL_DUMMY_ARG) { - if (dead_temps[arg]) { - dead_args |= (1 << i); - } - dead_temps[arg] = 0; - } - } - s->op_dead_args[op_index] = dead_args; - s->op_sync_args[op_index] = sync_args; + goto do_remove; + } + do_not_remove_call: + + /* Output args are dead. */ + for (i = 0; i < nb_oargs; i++) { + ts = arg_temp(op->args[i]); + if (ts->state & TS_DEAD) { + arg_life |= DEAD_ARG << i; + } + if (ts->state & TS_MEM) { + arg_life |= SYNC_ARG << i; + } + ts->state = TS_DEAD; + la_reset_pref(s, ts); + + /* Not used -- it will be tcg_target_call_oarg_regs[i]. */ + op->output_pref[i] = 0; + } + + if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | + TCG_CALL_NO_READ_GLOBALS))) { + la_global_kill(s, nb_globals); + } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { + la_global_sync(s, nb_globals); + } + + /* Record arguments that die in this helper. */ + for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + ts = arg_temp(op->args[i]); + if (ts && ts->state & TS_DEAD) { + arg_life |= DEAD_ARG << i; + } + } + + /* For all live registers, remove call-clobbered prefs. */ + la_cross_call(s, nb_temps); + + nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); + + /* Input arguments are live for preceding opcodes. */ + for (i = 0; i < nb_iargs; i++) { + ts = arg_temp(op->args[i + nb_oargs]); + if (ts && ts->state & TS_DEAD) { + /* For those arguments that die, and will be allocated + * in registers, clear the register set for that arg, + * to be filled in below. For args that will be on + * the stack, reset to any available reg. + */ + *la_temp_pref(ts) + = (i < nb_call_regs ? 0 : + s->tcg_target_available_regs[ts->type]); + ts->state &= ~TS_DEAD; + } + } + + /* For each input argument, add its input register to prefs. + If a temp is used once, this produces a single set bit. */ + for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) { + ts = arg_temp(op->args[i + nb_oargs]); + if (ts) { + tcg_regset_set_reg(*la_temp_pref(ts), + tcg_target_call_iarg_regs[i]); + } } - args--; } break; - case INDEX_op_debug_insn_start: - args -= def->nb_args; - break; - case INDEX_op_nopn: - nb_args = args[-1]; - args -= nb_args; + case INDEX_op_insn_start: break; case INDEX_op_discard: - args--; /* mark the temporary as dead */ - dead_temps[args[0]] = 1; - mem_temps[args[0]] = 0; - break; - case INDEX_op_end: + ts = arg_temp(op->args[0]); + ts->state = TS_DEAD; + la_reset_pref(s, ts); break; case INDEX_op_add2_i32: - op_new = INDEX_op_add_i32; + opc_new = INDEX_op_add_i32; goto do_addsub2; case INDEX_op_sub2_i32: - op_new = INDEX_op_sub_i32; + opc_new = INDEX_op_sub_i32; goto do_addsub2; case INDEX_op_add2_i64: - op_new = INDEX_op_add_i64; + opc_new = INDEX_op_add_i64; goto do_addsub2; case INDEX_op_sub2_i64: - op_new = INDEX_op_sub_i64; + opc_new = INDEX_op_sub_i64; do_addsub2: - args -= 6; nb_iargs = 4; nb_oargs = 2; /* Test if the high part of the operation is dead, but not the low part. The result can be optimized to a simple add or sub. This happens often for x86_64 guest when the cpu mode is set to 32 bit. */ - if (dead_temps[args[1]] && !mem_temps[args[1]]) { - if (dead_temps[args[0]] && !mem_temps[args[0]]) { + if (arg_temp(op->args[1])->state == TS_DEAD) { + if (arg_temp(op->args[0])->state == TS_DEAD) { goto do_remove; } - /* Create the single operation plus nop. */ - s->gen_opc_buf[op_index] = op = op_new; - args[1] = args[2]; - args[2] = args[4]; - assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); - tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 3); + /* Replace the opcode and adjust the args in place, + leaving 3 unused args at the end. */ + op->opc = opc = opc_new; + op->args[1] = op->args[2]; + op->args[2] = op->args[4]; /* Fall through and mark the single-word operation live. */ nb_iargs = 2; nb_oargs = 1; @@ -1660,57 +2433,52 @@ static void tcg_liveness_analysis(TCGContext *s) goto do_not_remove; case INDEX_op_mulu2_i32: - op_new = INDEX_op_mul_i32; - op_new2 = INDEX_op_muluh_i32; - have_op_new2 = TCG_TARGET_HAS_muluh_i32; + opc_new = INDEX_op_mul_i32; + opc_new2 = INDEX_op_muluh_i32; + have_opc_new2 = TCG_TARGET_HAS_muluh_i32; goto do_mul2; case INDEX_op_muls2_i32: - op_new = INDEX_op_mul_i32; - op_new2 = INDEX_op_mulsh_i32; - have_op_new2 = TCG_TARGET_HAS_mulsh_i32; + opc_new = INDEX_op_mul_i32; + opc_new2 = INDEX_op_mulsh_i32; + have_opc_new2 = TCG_TARGET_HAS_mulsh_i32; goto do_mul2; case INDEX_op_mulu2_i64: - op_new = INDEX_op_mul_i64; - op_new2 = INDEX_op_muluh_i64; - have_op_new2 = TCG_TARGET_HAS_muluh_i64; + opc_new = INDEX_op_mul_i64; + opc_new2 = INDEX_op_muluh_i64; + have_opc_new2 = TCG_TARGET_HAS_muluh_i64; goto do_mul2; case INDEX_op_muls2_i64: - op_new = INDEX_op_mul_i64; - op_new2 = INDEX_op_mulsh_i64; - have_op_new2 = TCG_TARGET_HAS_mulsh_i64; + opc_new = INDEX_op_mul_i64; + opc_new2 = INDEX_op_mulsh_i64; + have_opc_new2 = TCG_TARGET_HAS_mulsh_i64; goto do_mul2; do_mul2: - args -= 4; nb_iargs = 2; nb_oargs = 2; - if (dead_temps[args[1]] && !mem_temps[args[1]]) { - if (dead_temps[args[0]] && !mem_temps[args[0]]) { + if (arg_temp(op->args[1])->state == TS_DEAD) { + if (arg_temp(op->args[0])->state == TS_DEAD) { /* Both parts of the operation are dead. */ goto do_remove; } /* The high part of the operation is dead; generate the low. */ - s->gen_opc_buf[op_index] = op = op_new; - args[1] = args[2]; - args[2] = args[3]; - } else if (have_op_new2 && dead_temps[args[0]] - && !mem_temps[args[0]]) { - /* The low part of the operation is dead; generate the high. */ - s->gen_opc_buf[op_index] = op = op_new2; - args[0] = args[1]; - args[1] = args[2]; - args[2] = args[3]; + op->opc = opc = opc_new; + op->args[1] = op->args[2]; + op->args[2] = op->args[3]; + } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) { + /* The low part of the operation is dead; generate the high. */ + op->opc = opc = opc_new2; + op->args[0] = op->args[1]; + op->args[1] = op->args[2]; + op->args[2] = op->args[3]; } else { goto do_not_remove; } - assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); - tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1); /* Mark the single-word operation live. */ nb_oargs = 1; goto do_not_remove; default: /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ - args -= def->nb_args; nb_iargs = def->nb_iargs; nb_oargs = def->nb_oargs; @@ -1718,93 +2486,275 @@ static void tcg_liveness_analysis(TCGContext *s) its outputs are dead. We assume that nb_oargs == 0 implies side effects */ if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { - for(i = 0; i < nb_oargs; i++) { - if (args[i] >= TCG_MAX_TEMPS) { - continue; - } - arg = args[i]; - if (!dead_temps[arg] || mem_temps[arg]) { + for (i = 0; i < nb_oargs; i++) { + if (arg_temp(op->args[i])->state != TS_DEAD) { goto do_not_remove; } } - do_remove: - tcg_set_nop(s, s->gen_opc_buf + op_index, args, def->nb_args); -#ifdef CONFIG_PROFILER - s->del_op_count++; -#endif - } else { - do_not_remove: + goto do_remove; + } + goto do_not_remove; - /* output args are dead */ - dead_args = 0; - sync_args = 0; - for(i = 0; i < nb_oargs; i++) { - arg = args[i]; - if (dead_temps[arg]) { - dead_args |= (1 << i); - } - if (mem_temps[arg]) { - sync_args |= (1 << i); - } - dead_temps[arg] = 1; - mem_temps[arg] = 0; + do_remove: + tcg_op_remove(s, op); + break; + + do_not_remove: + for (i = 0; i < nb_oargs; i++) { + ts = arg_temp(op->args[i]); + + /* Remember the preference of the uses that followed. */ + op->output_pref[i] = *la_temp_pref(ts); + + /* Output args are dead. */ + if (ts->state & TS_DEAD) { + arg_life |= DEAD_ARG << i; } + if (ts->state & TS_MEM) { + arg_life |= SYNC_ARG << i; + } + ts->state = TS_DEAD; + la_reset_pref(s, ts); + } - /* if end of basic block, update */ - if (def->flags & TCG_OPF_BB_END) { - // Unicorn: do not optimize dead temps on brcond, - // this causes problem because check_exit_request() inserts - // brcond instruction in the middle of the TB, - // which incorrectly flags end-of-block - if (op != INDEX_op_brcond_i32) - tcg_la_bb_end(s, dead_temps, mem_temps); + /* If end of basic block, update. */ + if (def->flags & TCG_OPF_BB_EXIT) { + la_func_end(s, nb_globals, nb_temps); + } else if (def->flags & TCG_OPF_BB_END) { + // Unicorn: do not optimize dead temps on brcond, + // this causes problem because check_exit_request() inserts + // brcond instruction in the middle of the TB, + // which incorrectly flags end-of-block + if (opc != INDEX_op_brcond_i32) { + la_bb_end(s, nb_globals, nb_temps); + } else { // Unicorn: we do not touch dead temps for brcond, // but we should refresh TCG globals In-Memory states, // otherwise, important CPU states(especially conditional flags) might be forgotten, // result in wrongly generated host code that run into wrong branch. // Refer to https://github.com/unicorn-engine/unicorn/issues/287 for further information - else - tcg_la_br_end(s, mem_temps); - } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { - /* globals should be synced to memory */ - memset(mem_temps, 1, s->nb_globals); + la_brcond_end(s, nb_globals); } + } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { + la_global_sync(s, nb_globals); + if (def->flags & TCG_OPF_CALL_CLOBBER) { + la_cross_call(s, nb_temps); + } + } - /* input args are live */ - for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - arg = args[i]; - if (dead_temps[arg]) { - dead_args |= (1 << i); - } - dead_temps[arg] = 0; + /* Record arguments that die in this opcode. */ + for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + ts = arg_temp(op->args[i]); + if (ts->state & TS_DEAD) { + arg_life |= DEAD_ARG << i; } - s->op_dead_args[op_index] = dead_args; - s->op_sync_args[op_index] = sync_args; + } + + /* Input arguments are live for preceding opcodes. */ + for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + ts = arg_temp(op->args[i]); + if (ts->state & TS_DEAD) { + /* For operands that were dead, initially allow + all regs for the type. */ + *la_temp_pref(ts) = s->tcg_target_available_regs[ts->type]; + ts->state &= ~TS_DEAD; + } + } + + /* Incorporate constraints for this operand. */ + switch (opc) { + case INDEX_op_mov_i32: + case INDEX_op_mov_i64: + /* Note that these are TCG_OPF_NOT_PRESENT and do not + have proper constraints. That said, special case + moves to propagate preferences backward. */ + if (IS_DEAD_ARG(1)) { + *la_temp_pref(arg_temp(op->args[0])) + = *la_temp_pref(arg_temp(op->args[1])); + } + break; + + default: + for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + const TCGArgConstraint *ct = &def->args_ct[i]; + TCGRegSet set, *pset; + + ts = arg_temp(op->args[i]); + pset = la_temp_pref(ts); + set = *pset; + + set &= ct->u.regs; + if (ct->ct & TCG_CT_IALIAS) { + set &= op->output_pref[ct->alias_index]; + } + /* If the combination is not possible, restart. */ + if (set == 0) { + set = ct->u.regs; + } + *pset = set; + } + break; } break; } - op_index--; - } - - if (args != s->gen_opparam_buf) { - tcg_abort(); + op->life = arg_life; } } -#else -/* dummy liveness analysis */ -static void tcg_liveness_analysis(TCGContext *s) + +/* Liveness analysis: Convert indirect regs to direct temporaries. */ +static bool liveness_pass_2(TCGContext *s) { - int nb_ops; - nb_ops = s->gen_opc_ptr - s->gen_opc_buf; + int nb_globals = s->nb_globals; + int nb_temps, i; + bool changes = false; + TCGOp *op, *op_next; - s->op_dead_args = tcg_malloc(s, nb_ops * sizeof(uint16_t)); - memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t)); - s->op_sync_args = tcg_malloc(s, nb_ops * sizeof(uint8_t)); - memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t)); + /* Create a temporary for each indirect global. */ + for (i = 0; i < nb_globals; ++i) { + TCGTemp *its = &s->temps[i]; + if (its->indirect_reg) { + TCGTemp *dts = tcg_temp_alloc(s); + dts->type = its->type; + dts->base_type = its->base_type; + its->state_ptr = dts; + } else { + its->state_ptr = NULL; + } + /* All globals begin dead. */ + its->state = TS_DEAD; + } + for (nb_temps = s->nb_temps; i < nb_temps; ++i) { + TCGTemp *its = &s->temps[i]; + its->state_ptr = NULL; + its->state = TS_DEAD; + } + + QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { + TCGOpcode opc = op->opc; + const TCGOpDef *def = &s->tcg_op_defs[opc]; + TCGLifeData arg_life = op->life; + int nb_iargs, nb_oargs, call_flags; + TCGTemp *arg_ts, *dir_ts; + + if (opc == INDEX_op_call) { + nb_oargs = TCGOP_CALLO(op); + nb_iargs = TCGOP_CALLI(op); + call_flags = op->args[nb_oargs + nb_iargs + 1]; + } else { + nb_iargs = def->nb_iargs; + nb_oargs = def->nb_oargs; + + /* Set flags similar to how calls require. */ + if (def->flags & TCG_OPF_BB_END) { + /* Like writing globals: save_globals */ + call_flags = 0; + } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { + /* Like reading globals: sync_globals */ + call_flags = TCG_CALL_NO_WRITE_GLOBALS; + } else { + /* No effect on globals. */ + call_flags = (TCG_CALL_NO_READ_GLOBALS | + TCG_CALL_NO_WRITE_GLOBALS); + } + } + + /* Make sure that input arguments are available. */ + for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + arg_ts = arg_temp(op->args[i]); + if (arg_ts) { + dir_ts = arg_ts->state_ptr; + if (dir_ts && arg_ts->state == TS_DEAD) { + TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32 + ? INDEX_op_ld_i32 + : INDEX_op_ld_i64); + TCGOp *lop = tcg_op_insert_before(s, op, lopc); + + lop->args[0] = temp_arg(dir_ts); + lop->args[1] = temp_arg(arg_ts->mem_base); + lop->args[2] = arg_ts->mem_offset; + + /* Loaded, but synced with memory. */ + arg_ts->state = TS_MEM; + } + } + } + + /* Perform input replacement, and mark inputs that became dead. + No action is required except keeping temp_state up to date + so that we reload when needed. */ + for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + arg_ts = arg_temp(op->args[i]); + if (arg_ts) { + dir_ts = arg_ts->state_ptr; + if (dir_ts) { + op->args[i] = temp_arg(dir_ts); + changes = true; + if (IS_DEAD_ARG(i)) { + arg_ts->state = TS_DEAD; + } + } + } + } + + /* Liveness analysis should ensure that the following are + all correct, for call sites and basic block end points. */ + if (call_flags & TCG_CALL_NO_READ_GLOBALS) { + /* Nothing to do */ + } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) { + for (i = 0; i < nb_globals; ++i) { + /* Liveness should see that globals are synced back, + that is, either TS_DEAD or TS_MEM. */ + arg_ts = &s->temps[i]; + tcg_debug_assert(arg_ts->state_ptr == 0 + || arg_ts->state != 0); + } + } else { + for (i = 0; i < nb_globals; ++i) { + /* Liveness should see that globals are saved back, + that is, TS_DEAD, waiting to be reloaded. */ + arg_ts = &s->temps[i]; + tcg_debug_assert(arg_ts->state_ptr == 0 + || arg_ts->state == TS_DEAD); + } + } + + /* Outputs become available. */ + for (i = 0; i < nb_oargs; i++) { + arg_ts = arg_temp(op->args[i]); + dir_ts = arg_ts->state_ptr; + if (!dir_ts) { + continue; + } + op->args[i] = temp_arg(dir_ts); + changes = true; + + /* The output is now live and modified. */ + arg_ts->state = 0; + + /* Sync outputs upon their last write. */ + if (NEED_SYNC_ARG(i)) { + TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 + ? INDEX_op_st_i32 + : INDEX_op_st_i64); + TCGOp *sop = tcg_op_insert_after(s, op, sopc); + + sop->args[0] = temp_arg(dir_ts); + sop->args[1] = temp_arg(arg_ts->mem_base); + sop->args[2] = arg_ts->mem_offset; + + arg_ts->state = TS_MEM; + } + /* Drop outputs that are dead. */ + if (IS_DEAD_ARG(i)) { + arg_ts->state = TS_DEAD; + } + } + } + + return changes; } -#endif -#ifndef NDEBUG +#ifdef CONFIG_DEBUG_TCG static void dump_regs(TCGContext *s) { TCGTemp *ts; @@ -1813,13 +2763,14 @@ static void dump_regs(TCGContext *s) for(i = 0; i < s->nb_temps; i++) { ts = &s->temps[i]; - printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i)); + printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); switch(ts->val_type) { case TEMP_VAL_REG: printf("%s", tcg_target_reg_names[ts->reg]); break; case TEMP_VAL_MEM: - printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]); + printf("%d(%s)", (int)ts->mem_offset, + tcg_target_reg_names[ts->mem_base->reg]); break; case TEMP_VAL_CONST: printf("$0x%" TCG_PRIlx, ts->val); @@ -1835,52 +2786,48 @@ static void dump_regs(TCGContext *s) } for(i = 0; i < TCG_TARGET_NB_REGS; i++) { - if (s->reg_to_temp[i] >= 0) { - printf("%s: %s\n", - tcg_target_reg_names[i], - tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i])); + if (s->reg_to_temp[i] != NULL) { + printf("%s: %s\n", + tcg_target_reg_names[i], + tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i])); } } } static void check_regs(TCGContext *s) { - int reg, k; + int reg; + int k; TCGTemp *ts; char buf[64]; - for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { - k = s->reg_to_temp[reg]; - if (k >= 0) { - ts = &s->temps[k]; - if (ts->val_type != TEMP_VAL_REG || - ts->reg != reg) { - printf("Inconsistency for register %s:\n", + for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { + ts = s->reg_to_temp[reg]; + if (ts != NULL) { + if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) { + printf("Inconsistency for register %s:\n", tcg_target_reg_names[reg]); goto fail; } } } - for(k = 0; k < s->nb_temps; k++) { + for (k = 0; k < s->nb_temps; k++) { ts = &s->temps[k]; - if (ts->val_type == TEMP_VAL_REG && - !ts->fixed_reg && - s->reg_to_temp[ts->reg] != k) { - printf("Inconsistency for temp %s:\n", - tcg_get_arg_str_idx(s, buf, sizeof(buf), k)); + if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg + && s->reg_to_temp[ts->reg] != ts) { + printf("Inconsistency for temp %s:\n", + tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); fail: - printf("reg state:\n"); - dump_regs(s); - tcg_abort(); + printf("reg state:\n"); + dump_regs(s); + tcg_abort(); } } } #endif -static void temp_allocate_frame(TCGContext *s, int temp) +static void temp_allocate_frame(TCGContext *s, TCGTemp *ts) { - TCGTemp *ts; - ts = &s->temps[temp]; #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64) /* Sparc64 stack is accessed with offset of 2047 */ s->current_frame_offset = (s->current_frame_offset + @@ -1892,128 +2839,201 @@ static void temp_allocate_frame(TCGContext *s, int temp) tcg_abort(); } ts->mem_offset = s->current_frame_offset; - ts->mem_reg = s->frame_reg; + ts->mem_base = s->frame_temp; ts->mem_allocated = 1; s->current_frame_offset += sizeof(tcg_target_long); } -/* sync register 'reg' by saving it to the corresponding temporary */ -static inline void tcg_reg_sync(TCGContext *s, int reg) -{ - TCGTemp *ts; - int temp; +static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet); - temp = s->reg_to_temp[reg]; - ts = &s->temps[temp]; - assert(ts->val_type == TEMP_VAL_REG); - if (!ts->mem_coherent && !ts->fixed_reg) { - if (!ts->mem_allocated) { - temp_allocate_frame(s, temp); - } - tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset); +/* Mark a temporary as free or dead. If 'free_or_dead' is negative, + mark it free; otherwise mark it dead. */ +static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) +{ + if (ts->fixed_reg) { + return; + } + if (ts->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ts->reg] = NULL; + } + ts->val_type = (free_or_dead < 0 + || ts->temp_local + || ts->temp_global + ? TEMP_VAL_MEM : TEMP_VAL_DEAD); +} + +/* Mark a temporary as dead. */ +static inline void temp_dead(TCGContext *s, TCGTemp *ts) +{ + temp_free_or_dead(s, ts, 1); +} + +/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary + registers needs to be allocated to store a constant. If 'free_or_dead' + is non-zero, subsequently release the temporary; if it is positive, the + temp is dead; if it is negative, the temp is free. */ +static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, + TCGRegSet preferred_regs, int free_or_dead) +{ + if (ts->fixed_reg) { + return; + } + if (!ts->mem_coherent) { + if (!ts->mem_allocated) { + temp_allocate_frame(s, ts); + } + switch (ts->val_type) { + case TEMP_VAL_CONST: + /* If we're going to free the temp immediately, then we won't + require it later in a register, so attempt to store the + constant to memory directly. */ + if (free_or_dead + && tcg_out_sti(s, ts->type, ts->val, + ts->mem_base->reg, ts->mem_offset)) { + break; + } + temp_load(s, ts, s->tcg_target_available_regs[ts->type], + allocated_regs, preferred_regs); + /* fallthrough */ + + case TEMP_VAL_REG: + tcg_out_st(s, ts->type, ts->reg, + ts->mem_base->reg, ts->mem_offset); + break; + + case TEMP_VAL_MEM: + break; + + case TEMP_VAL_DEAD: + default: + tcg_abort(); + } + ts->mem_coherent = 1; + } + if (free_or_dead) { + temp_free_or_dead(s, ts, free_or_dead); } - ts->mem_coherent = 1; } /* free register 'reg' by spilling the corresponding temporary if necessary */ -static void tcg_reg_free(TCGContext *s, int reg) +static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs) { - int temp; - - temp = s->reg_to_temp[reg]; - if (temp != -1) { - tcg_reg_sync(s, reg); - s->temps[temp].val_type = TEMP_VAL_MEM; - s->reg_to_temp[reg] = -1; + TCGTemp *ts = s->reg_to_temp[reg]; + if (ts != NULL) { + temp_sync(s, ts, allocated_regs, 0, -1); } } -/* Allocate a register belonging to reg1 & ~reg2 */ -static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2) +/** + * tcg_reg_alloc: + * @required_regs: Set of registers in which we must allocate. + * @allocated_regs: Set of registers which must be avoided. + * @preferred_regs: Set of registers we should prefer. + * @rev: True if we search the registers in "indirect" order. + * + * The allocated register must be in @required_regs & ~@allocated_regs, + * but if we can put it in @preferred_regs we may save a move later. + */ +static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs, + TCGRegSet allocated_regs, + TCGRegSet preferred_regs, bool rev) { - int i, reg; - TCGRegSet reg_ct; + int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order); + TCGRegSet reg_ct[2]; + const int *order; - tcg_regset_andnot(reg_ct, reg1, reg2); + reg_ct[1] = required_regs & ~allocated_regs; + tcg_debug_assert(reg_ct[1] != 0); + reg_ct[0] = reg_ct[1] & preferred_regs; - /* first try free registers */ - for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) { - reg = tcg_target_reg_alloc_order[i]; - if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1) - return reg; + /* Skip the preferred_regs option if it cannot be satisfied, + or if the preference made no difference. */ + f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1]; + + order = rev ? s->indirect_reg_alloc_order : tcg_target_reg_alloc_order; + + /* Try free registers, preferences first. */ + for (j = f; j < 2; j++) { + TCGRegSet set = reg_ct[j]; + + if (tcg_regset_single(set)) { + /* One register in the set. */ + TCGReg reg = tcg_regset_first(set); + if (s->reg_to_temp[reg] == NULL) { + return reg; + } + } else { + for (i = 0; i < n; i++) { + TCGReg reg = order[i]; + if (s->reg_to_temp[reg] == NULL && + tcg_regset_test_reg(set, reg)) { + return reg; + } + } + } } - /* XXX: do better spill choice */ - for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) { - reg = tcg_target_reg_alloc_order[i]; - if (tcg_regset_test_reg(reg_ct, reg)) { - tcg_reg_free(s, reg); + /* We must spill something. */ + for (j = f; j < 2; j++) { + TCGRegSet set = reg_ct[j]; + + if (tcg_regset_single(set)) { + /* One register in the set. */ + TCGReg reg = tcg_regset_first(set); + tcg_reg_free(s, reg, allocated_regs); return reg; + } else { + for (i = 0; i < n; i++) { + TCGReg reg = order[i]; + if (tcg_regset_test_reg(set, reg)) { + tcg_reg_free(s, reg, allocated_regs); + return reg; + } + } } } tcg_abort(); } -/* mark a temporary as dead. */ -static inline void temp_dead(TCGContext *s, int temp) +/* Make sure the temporary is in a register. If needed, allocate the register + from DESIRED while avoiding ALLOCATED. */ +static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, + TCGRegSet allocated_regs, TCGRegSet preferred_regs) { - TCGTemp *ts; + TCGReg reg; - ts = &s->temps[temp]; - if (!ts->fixed_reg) { - if (ts->val_type == TEMP_VAL_REG) { - s->reg_to_temp[ts->reg] = -1; - } - if (temp < s->nb_globals || ts->temp_local) { - ts->val_type = TEMP_VAL_MEM; - } else { - ts->val_type = TEMP_VAL_DEAD; - } + switch (ts->val_type) { + case TEMP_VAL_REG: + return; + case TEMP_VAL_CONST: + reg = tcg_reg_alloc(s, desired_regs, allocated_regs, + preferred_regs, ts->indirect_base); + tcg_out_movi(s, ts->type, reg, ts->val); + ts->mem_coherent = 0; + break; + case TEMP_VAL_MEM: + reg = tcg_reg_alloc(s, desired_regs, allocated_regs, + preferred_regs, ts->indirect_base); + tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); + ts->mem_coherent = 1; + break; + case TEMP_VAL_DEAD: + default: + tcg_abort(); } + ts->reg = reg; + ts->val_type = TEMP_VAL_REG; + s->reg_to_temp[reg] = ts; } -/* sync a temporary to memory. 'allocated_regs' is used in case a - temporary registers needs to be allocated to store a constant. */ -static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs) +/* Save a temporary to memory. 'allocated_regs' is used in case a + temporary registers needs to be allocated to store a constant. */ +static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) { - TCGTemp *ts; - - ts = &s->temps[temp]; - if (!ts->fixed_reg) { - switch(ts->val_type) { - case TEMP_VAL_CONST: - ts->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type], - allocated_regs); - ts->val_type = TEMP_VAL_REG; - s->reg_to_temp[ts->reg] = temp; - ts->mem_coherent = 0; - tcg_out_movi(s, ts->type, ts->reg, ts->val); - /* fallthrough*/ - case TEMP_VAL_REG: - tcg_reg_sync(s, ts->reg); - break; - case TEMP_VAL_DEAD: - case TEMP_VAL_MEM: - break; - default: - tcg_abort(); - } - } -} - -/* save a temporary to memory. 'allocated_regs' is used in case a - temporary registers needs to be allocated to store a constant. */ -static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs) -{ -#ifdef USE_LIVENESS_ANALYSIS /* The liveness analysis already ensures that globals are back - in memory. Keep an assert for safety. */ - assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg); -#else - temp_sync(s, temp, allocated_regs); - temp_dead(s, temp); -#endif + in memory. Keep an tcg_debug_assert for safety. */ + tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg); } /* save globals to their canonical location and assume they can be @@ -2021,10 +3041,10 @@ static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs) temporary registers needs to be allocated to store a constant. */ static void save_globals(TCGContext *s, TCGRegSet allocated_regs) { - int i; + int i, n; - for(i = 0; i < s->nb_globals; i++) { - temp_save(s, i, allocated_regs); + for (i = 0, n = s->nb_globals; i < n; i++) { + temp_save(s, &s->temps[i], allocated_regs); } } @@ -2033,15 +3053,13 @@ static void save_globals(TCGContext *s, TCGRegSet allocated_regs) temporary registers needs to be allocated to store a constant. */ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) { - int i; + int i, n; - for (i = 0; i < s->nb_globals; i++) { -#ifdef USE_LIVENESS_ANALYSIS - assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg || - s->temps[i].mem_coherent); -#else - temp_sync(s, i, allocated_regs); -#endif + for (i = 0, n = s->nb_globals; i < n; i++) { + TCGTemp *ts = &s->temps[i]; + tcg_debug_assert(ts->val_type != TEMP_VAL_REG + || ts->fixed_reg + || ts->mem_coherent); } } @@ -2049,151 +3067,273 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) all globals are stored at their canonical location. */ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) { - TCGTemp *ts; - int i; + // Unicorn: We are inserting brcond in the middle of the TB so the + // assumptions here won't be satisfied. + // int i; - for(i = s->nb_globals; i < s->nb_temps; i++) { - ts = &s->temps[i]; - if (ts->temp_local) { - temp_save(s, i, allocated_regs); - } else { -#ifdef USE_LIVENESS_ANALYSIS - /* The liveness analysis already ensures that temps are dead. - Keep an assert for safety. */ - assert(ts->val_type == TEMP_VAL_DEAD); -#else - temp_dead(s, i); -#endif - } - } + // for (i = s->nb_globals; i < s->nb_temps; i++) { + // TCGTemp *ts = &s->temps[i]; + // if (ts->temp_local) { + // temp_save(s, ts, allocated_regs); + // } else { + // /* The liveness analysis already ensures that temps are dead. + // Keep an tcg_debug_assert for safety. */ + // tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); + // } + // } - save_globals(s, allocated_regs); + // save_globals(s, allocated_regs); } -#define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1) -#define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1) - -static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args, - uint16_t dead_args, uint8_t sync_args) +/* + * Specialized code generation for INDEX_op_movi_*. + */ +static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, + tcg_target_ulong val, TCGLifeData arg_life, + TCGRegSet preferred_regs) { - TCGTemp *ots; - tcg_target_ulong val; + /* ENV should not be modified. */ + tcg_debug_assert(!ots->fixed_reg); - ots = &s->temps[args[0]]; - val = args[1]; - - if (ots->fixed_reg) { - /* for fixed registers, we do not do any constant - propagation */ - tcg_out_movi(s, ots->type, ots->reg, val); - } else { - /* The movi is not explicitly generated here */ - if (ots->val_type == TEMP_VAL_REG) - s->reg_to_temp[ots->reg] = -1; - ots->val_type = TEMP_VAL_CONST; - ots->val = val; + /* The movi is not explicitly generated here. */ + if (ots->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ots->reg] = NULL; } + ots->val_type = TEMP_VAL_CONST; + ots->val = val; + ots->mem_coherent = 0; if (NEED_SYNC_ARG(0)) { - temp_sync(s, args[0], s->reserved_regs); - } - if (IS_DEAD_ARG(0)) { - temp_dead(s, args[0]); + temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0)); + } else if (IS_DEAD_ARG(0)) { + temp_dead(s, ots); } } -static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, - const TCGArg *args, uint16_t dead_args, - uint8_t sync_args) +static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op) { - TCGRegSet allocated_regs; + TCGTemp *ots = arg_temp(op->args[0]); + tcg_target_ulong val = op->args[1]; + + tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]); +} + +/* + * Specialized code generation for INDEX_op_mov_*. + */ +static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) +{ + const TCGLifeData arg_life = op->life; + TCGRegSet allocated_regs, preferred_regs; TCGTemp *ts, *ots; TCGType otype, itype; - tcg_regset_set(allocated_regs, s->reserved_regs); - ots = &s->temps[args[0]]; - ts = &s->temps[args[1]]; + allocated_regs = s->reserved_regs; + preferred_regs = op->output_pref[0]; + ots = arg_temp(op->args[0]); + ts = arg_temp(op->args[1]); + + /* ENV should not be modified. */ + tcg_debug_assert(!ots->fixed_reg); /* Note that otype != itype for no-op truncation. */ otype = ots->type; itype = ts->type; - /* If the source value is not in a register, and we're going to be - forced to have it in a register in order to perform the copy, - then copy the SOURCE value into its own register first. That way - we don't have to reload SOURCE the next time it is used. */ - if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG) - || ts->val_type == TEMP_VAL_MEM) { - ts->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[itype], - allocated_regs); - if (ts->val_type == TEMP_VAL_MEM) { - tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset); - ts->mem_coherent = 1; - } else if (ts->val_type == TEMP_VAL_CONST) { - tcg_out_movi(s, itype, ts->reg, ts->val); - ts->mem_coherent = 0; + if (ts->val_type == TEMP_VAL_CONST) { + /* propagate constant or generate sti */ + tcg_target_ulong val = ts->val; + if (IS_DEAD_ARG(1)) { + temp_dead(s, ts); } - s->reg_to_temp[ts->reg] = args[1]; - ts->val_type = TEMP_VAL_REG; + tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs); + return; } - if (IS_DEAD_ARG(0) && !ots->fixed_reg) { + /* If the source value is in memory we're going to be forced + to have it in a register in order to perform the copy. Copy + the SOURCE value into its own register first, that way we + don't have to reload SOURCE the next time it is used. */ + if (ts->val_type == TEMP_VAL_MEM) { + temp_load(s, ts, s->tcg_target_available_regs[itype], + allocated_regs, preferred_regs); + } + + tcg_debug_assert(ts->val_type == TEMP_VAL_REG); + if (IS_DEAD_ARG(0)) { /* mov to a non-saved dead register makes no sense (even with liveness analysis disabled). */ - assert(NEED_SYNC_ARG(0)); - /* The code above should have moved the temp to a register. */ - assert(ts->val_type == TEMP_VAL_REG); + tcg_debug_assert(NEED_SYNC_ARG(0)); if (!ots->mem_allocated) { - temp_allocate_frame(s, args[0]); + temp_allocate_frame(s, ots); } - tcg_out_st(s, otype, ts->reg, ots->mem_reg, ots->mem_offset); + tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset); if (IS_DEAD_ARG(1)) { - temp_dead(s, args[1]); + temp_dead(s, ts); } - temp_dead(s, args[0]); - } else if (ts->val_type == TEMP_VAL_CONST) { - /* propagate constant */ - if (ots->val_type == TEMP_VAL_REG) { - s->reg_to_temp[ots->reg] = -1; - } - ots->val_type = TEMP_VAL_CONST; - ots->val = ts->val; + temp_dead(s, ots); } else { - /* The code in the first if block should have moved the - temp to a register. */ - assert(ts->val_type == TEMP_VAL_REG); - if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) { + if (IS_DEAD_ARG(1) && !ts->fixed_reg) { /* the mov can be suppressed */ if (ots->val_type == TEMP_VAL_REG) { - s->reg_to_temp[ots->reg] = -1; + s->reg_to_temp[ots->reg] = NULL; } ots->reg = ts->reg; - temp_dead(s, args[1]); + temp_dead(s, ts); } else { if (ots->val_type != TEMP_VAL_REG) { /* When allocating a new register, make sure to not spill the input one. */ tcg_regset_set_reg(allocated_regs, ts->reg); - ots->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[otype], - allocated_regs); + ots->reg = tcg_reg_alloc(s, s->tcg_target_available_regs[otype], + allocated_regs, preferred_regs, + ots->indirect_base); + } + if (!tcg_out_mov(s, otype, ots->reg, ts->reg)) { + /* + * Cross register class move not supported. + * Store the source register into the destination slot + * and leave the destination temp as TEMP_VAL_MEM. + */ + assert(!ots->fixed_reg); + if (!ts->mem_allocated) { + temp_allocate_frame(s, ots); + } + tcg_out_st(s, ts->type, ts->reg, + ots->mem_base->reg, ots->mem_offset); + ots->mem_coherent = 1; + temp_free_or_dead(s, ots, -1); + return; } - tcg_out_mov(s, otype, ots->reg, ts->reg); } ots->val_type = TEMP_VAL_REG; ots->mem_coherent = 0; - s->reg_to_temp[ots->reg] = args[0]; + s->reg_to_temp[ots->reg] = ots; if (NEED_SYNC_ARG(0)) { - tcg_reg_sync(s, ots->reg); + temp_sync(s, ots, allocated_regs, 0, 0); } } } -static void tcg_reg_alloc_op(TCGContext *s, - const TCGOpDef *def, TCGOpcode opc, - const TCGArg *args, uint16_t dead_args, - uint8_t sync_args) +/* + * Specialized code generation for INDEX_op_dup_vec. + */ +static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) { - TCGRegSet allocated_regs; - int i, k, nb_iargs, nb_oargs, reg; + const TCGLifeData arg_life = op->life; + TCGRegSet dup_out_regs, dup_in_regs; + TCGTemp *its, *ots; + TCGType itype, vtype; + intptr_t endian_fixup; + unsigned vece; + bool ok; + + ots = arg_temp(op->args[0]); + its = arg_temp(op->args[1]); + + /* ENV should not be modified. */ + tcg_debug_assert(!ots->fixed_reg); + + itype = its->type; + vece = TCGOP_VECE(op); + vtype = TCGOP_VECL(op) + TCG_TYPE_V64; + + if (its->val_type == TEMP_VAL_CONST) { + /* Propagate constant via movi -> dupi. */ + tcg_target_ulong val = its->val; + if (IS_DEAD_ARG(1)) { + temp_dead(s, its); + } + tcg_reg_alloc_do_movi(s, ots, val, arg_life, op->output_pref[0]); + return; + } + + dup_out_regs = s->tcg_op_defs[INDEX_op_dup_vec].args_ct[0].u.regs; + dup_in_regs = s->tcg_op_defs[INDEX_op_dup_vec].args_ct[1].u.regs; + + /* Allocate the output register now. */ + if (ots->val_type != TEMP_VAL_REG) { + TCGRegSet allocated_regs = s->reserved_regs; + + if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) { + /* Make sure to not spill the input register. */ + tcg_regset_set_reg(allocated_regs, its->reg); + } + ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs, + op->output_pref[0], ots->indirect_base); + ots->val_type = TEMP_VAL_REG; + ots->mem_coherent = 0; + s->reg_to_temp[ots->reg] = ots; + } + + switch (its->val_type) { + case TEMP_VAL_REG: + /* + * The dup constriaints must be broad, covering all possible VECE. + * However, tcg_op_dup_vec() gets to see the VECE and we allow it + * to fail, indicating that extra moves are required for that case. + */ + if (tcg_regset_test_reg(dup_in_regs, its->reg)) { + if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) { + goto done; + } + /* Try again from memory or a vector input register. */ + } + if (!its->mem_coherent) { + /* + * The input register is not synced, and so an extra store + * would be required to use memory. Attempt an integer-vector + * register move first. We do not have a TCGRegSet for this. + */ + if (tcg_out_mov(s, itype, ots->reg, its->reg)) { + break; + } + /* Sync the temp back to its slot and load from there. */ + temp_sync(s, its, s->reserved_regs, 0, 0); + } + /* fall through */ + + case TEMP_VAL_MEM: +#ifdef HOST_WORDS_BIGENDIAN + endian_fixup = itype == TCG_TYPE_I32 ? 4 : 8; + endian_fixup -= 1 << vece; +#else + endian_fixup = 0; +#endif + if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg, + its->mem_offset + endian_fixup)) { + goto done; + } + tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset); + break; + + default: + g_assert_not_reached(); + } + + /* We now have a vector input register, so dup must succeed. */ + ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg); + tcg_debug_assert(ok); + + done: + if (IS_DEAD_ARG(1)) { + temp_dead(s, its); + } + if (NEED_SYNC_ARG(0)) { + temp_sync(s, ots, s->reserved_regs, 0, 0); + } + if (IS_DEAD_ARG(0)) { + temp_dead(s, ots); + } +} + +static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) +{ + const TCGLifeData arg_life = op->life; + const TCGOpDef * const def = &s->tcg_op_defs[op->opc]; + TCGRegSet i_allocated_regs; + TCGRegSet o_allocated_regs; + int i, k, nb_iargs, nb_oargs; + TCGReg reg; TCGArg arg; const TCGArgConstraint *arg_ct; TCGTemp *ts; @@ -2204,47 +3344,39 @@ static void tcg_reg_alloc_op(TCGContext *s, nb_iargs = def->nb_iargs; /* copy constants */ - memcpy(new_args + nb_oargs + nb_iargs, - args + nb_oargs + nb_iargs, + memcpy(new_args + nb_oargs + nb_iargs, + op->args + nb_oargs + nb_iargs, sizeof(TCGArg) * def->nb_cargs); - /* satisfy input constraints */ - tcg_regset_set(allocated_regs, s->reserved_regs); - for(k = 0; k < nb_iargs; k++) { + i_allocated_regs = s->reserved_regs; + o_allocated_regs = s->reserved_regs; + + /* satisfy input constraints */ + for (k = 0; k < nb_iargs; k++) { + TCGRegSet i_preferred_regs, o_preferred_regs; + i = def->sorted_args[nb_oargs + k]; - arg = args[i]; + arg = op->args[i]; arg_ct = &def->args_ct[i]; - ts = &s->temps[arg]; - if (ts->val_type == TEMP_VAL_MEM) { - reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); - tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); - ts->val_type = TEMP_VAL_REG; - ts->reg = reg; - ts->mem_coherent = 1; - s->reg_to_temp[reg] = arg; - } else if (ts->val_type == TEMP_VAL_CONST) { - if (tcg_target_const_match(ts->val, ts->type, arg_ct)) { - /* constant is OK for instruction */ - const_args[i] = 1; - new_args[i] = ts->val; - goto iarg_end; - } else { - /* need to move to a register */ - reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); - tcg_out_movi(s, ts->type, reg, ts->val); - ts->val_type = TEMP_VAL_REG; - ts->reg = reg; - ts->mem_coherent = 0; - s->reg_to_temp[reg] = arg; - } + ts = arg_temp(arg); + + if (ts->val_type == TEMP_VAL_CONST + && tcg_target_const_match(ts->val, ts->type, arg_ct)) { + /* constant is OK for instruction */ + const_args[i] = 1; + new_args[i] = ts->val; + continue; } - assert(ts->val_type == TEMP_VAL_REG); + + i_preferred_regs = o_preferred_regs = 0; if (arg_ct->ct & TCG_CT_IALIAS) { + o_preferred_regs = op->output_pref[arg_ct->alias_index]; if (ts->fixed_reg) { /* if fixed register, we must allocate a new register if the alias is not the same register */ - if (arg != args[arg_ct->alias_index]) + if (arg != op->args[arg_ct->alias_index]) { goto allocate_in_reg; + } } else { /* if the input is aliased to an output and if it is not dead after the instruction, we must allocate @@ -2252,99 +3384,132 @@ static void tcg_reg_alloc_op(TCGContext *s, if (!IS_DEAD_ARG(i)) { goto allocate_in_reg; } + + /* check if the current register has already been allocated + for another input aliased to an output */ + if (ts->val_type == TEMP_VAL_REG) { + int k2, i2; + reg = ts->reg; + for (k2 = 0 ; k2 < k ; k2++) { + i2 = def->sorted_args[nb_oargs + k2]; + if ((def->args_ct[i2].ct & TCG_CT_IALIAS) && + reg == new_args[i2]) { + goto allocate_in_reg; + } + } + } + i_preferred_regs = o_preferred_regs; } } + + temp_load(s, ts, arg_ct->u.regs, i_allocated_regs, i_preferred_regs); reg = ts->reg; + if (tcg_regset_test_reg(arg_ct->u.regs, reg)) { /* nothing to do : the constraint is satisfied */ } else { allocate_in_reg: - /* allocate a new register matching the constraint + /* allocate a new register matching the constraint and move the temporary register into it */ - reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); - tcg_out_mov(s, ts->type, reg, ts->reg); + temp_load(s, ts, s->tcg_target_available_regs[ts->type], + i_allocated_regs, 0); + reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs, + o_preferred_regs, ts->indirect_base); + if (!tcg_out_mov(s, ts->type, reg, ts->reg)) { + /* + * Cross register class move not supported. Sync the + * temp back to its slot and load from there. + */ + temp_sync(s, ts, i_allocated_regs, 0, 0); + tcg_out_ld(s, ts->type, reg, + ts->mem_base->reg, ts->mem_offset); + } } new_args[i] = reg; const_args[i] = 0; - tcg_regset_set_reg(allocated_regs, reg); - iarg_end: ; + tcg_regset_set_reg(i_allocated_regs, reg); } - + /* mark dead temporaries and free the associated registers */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { if (IS_DEAD_ARG(i)) { - temp_dead(s, args[i]); + temp_dead(s, arg_temp(op->args[i])); } } if (def->flags & TCG_OPF_BB_END) { - tcg_reg_alloc_bb_end(s, allocated_regs); + tcg_reg_alloc_bb_end(s, i_allocated_regs); } else { if (def->flags & TCG_OPF_CALL_CLOBBER) { - /* XXX: permit generic clobber register list ? */ - for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { - if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, reg)) { - tcg_reg_free(s, reg); + /* XXX: permit generic clobber register list ? */ + for (i = 0; i < TCG_TARGET_NB_REGS; i++) { + if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, i)) { + tcg_reg_free(s, i, i_allocated_regs); } } } if (def->flags & TCG_OPF_SIDE_EFFECTS) { /* sync globals if the op has side effects and might trigger an exception. */ - sync_globals(s, allocated_regs); + sync_globals(s, i_allocated_regs); } - + /* satisfy the output constraints */ - tcg_regset_set(allocated_regs, s->reserved_regs); for(k = 0; k < nb_oargs; k++) { i = def->sorted_args[k]; - arg = args[i]; + arg = op->args[i]; arg_ct = &def->args_ct[i]; - ts = &s->temps[arg]; - if (arg_ct->ct & TCG_CT_ALIAS) { + ts = arg_temp(arg); + + /* ENV should not be modified. */ + tcg_debug_assert(!ts->fixed_reg); + + if ((arg_ct->ct & TCG_CT_ALIAS) + && !const_args[arg_ct->alias_index]) { reg = new_args[arg_ct->alias_index]; + } else if (arg_ct->ct & TCG_CT_NEWREG) { + reg = tcg_reg_alloc(s, arg_ct->u.regs, + i_allocated_regs | o_allocated_regs, + op->output_pref[k], ts->indirect_base); } else { - /* if fixed register, we try to use it */ - reg = ts->reg; - if (ts->fixed_reg && - tcg_regset_test_reg(arg_ct->u.regs, reg)) { - goto oarg_end; - } - reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); + reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs, + op->output_pref[k], ts->indirect_base); } - tcg_regset_set_reg(allocated_regs, reg); - /* if a fixed register is used, then a move will be done afterwards */ - if (!ts->fixed_reg) { - if (ts->val_type == TEMP_VAL_REG) { - s->reg_to_temp[ts->reg] = -1; - } - ts->val_type = TEMP_VAL_REG; - ts->reg = reg; - /* temp value is modified, so the value kept in memory is - potentially not the same */ - ts->mem_coherent = 0; - s->reg_to_temp[reg] = arg; + tcg_regset_set_reg(o_allocated_regs, reg); + if (ts->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ts->reg] = NULL; } - oarg_end: + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + /* + * Temp value is modified, so the value kept in memory is + * potentially not the same. + */ + ts->mem_coherent = 0; + s->reg_to_temp[reg] = ts; new_args[i] = reg; } } /* emit instruction */ - tcg_out_op(s, opc, new_args, const_args); + if (def->flags & TCG_OPF_VECTOR) { + tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op), + new_args, const_args); + } else { + tcg_out_op(s, op->opc, new_args, const_args); + } /* move the outputs in the correct register if needed */ for(i = 0; i < nb_oargs; i++) { - ts = &s->temps[args[i]]; - reg = new_args[i]; - if (ts->fixed_reg && ts->reg != reg) { - tcg_out_mov(s, ts->type, ts->reg, reg); - } + ts = arg_temp(op->args[i]); + + /* ENV should not be modified. */ + tcg_debug_assert(!ts->fixed_reg); + if (NEED_SYNC_ARG(i)) { - tcg_reg_sync(s, reg); - } - if (IS_DEAD_ARG(i)) { - temp_dead(s, args[i]); + temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i)); + } else if (IS_DEAD_ARG(i)) { + temp_dead(s, ts); } } } @@ -2355,11 +3520,13 @@ static void tcg_reg_alloc_op(TCGContext *s, #define STACK_DIR(x) (x) #endif -static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, - TCGOpcode opc, const TCGArg *args, - uint16_t dead_args, uint8_t sync_args) +static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) { - int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params; + const int nb_oargs = TCGOP_CALLO(op); + const int nb_iargs = TCGOP_CALLI(op); + const TCGLifeData arg_life = op->life; + int flags, nb_regs, i; + TCGReg reg; TCGArg arg; TCGTemp *ts; intptr_t stack_offset; @@ -2368,103 +3535,94 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, int allocate_args; TCGRegSet allocated_regs; - arg = *args++; - - nb_oargs = arg >> 16; - nb_iargs = arg & 0xffff; - nb_params = nb_iargs; - - func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs]; - flags = args[nb_oargs + nb_iargs + 1]; + func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs]; + flags = op->args[nb_oargs + nb_iargs + 1]; nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); -#ifdef _UC_MSVC_ARRAY_DUMMY - // do this because msvc cannot have arrays with 0 entries. - /* ref: tcg/i386/tcg-target.c: tcg_target_call_iarg_regs, - it is added a dummy value, set back to 0. */ +#if TCG_TARGET_REG_BITS != 64 +#ifdef _MSC_VER + // do this because MSVC cannot have array with 0 entries. + /* ref: tcg/i386/tcg-target.inc.c: tcg_target_call_iarg_regs, + it is added a dummy value, set back to 0. */ nb_regs = 0; #endif - if (nb_regs > nb_params) { - nb_regs = nb_params; +#endif + if (nb_regs > nb_iargs) { + nb_regs = nb_iargs; } /* assign stack slots first */ - call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long); - call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & + call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long); + call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & ~(TCG_TARGET_STACK_ALIGN - 1); allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE); if (allocate_args) { /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed, preallocate call stack */ - return -1; + tcg_abort(); } stack_offset = TCG_TARGET_CALL_STACK_OFFSET; - for(i = nb_regs; i < nb_params; i++) { - arg = args[nb_oargs + i]; + for (i = nb_regs; i < nb_iargs; i++) { + arg = op->args[nb_oargs + i]; #ifdef TCG_TARGET_STACK_GROWSUP stack_offset -= sizeof(tcg_target_long); #endif if (arg != TCG_CALL_DUMMY_ARG) { - ts = &s->temps[arg]; - if (ts->val_type == TEMP_VAL_REG) { - tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); - } else if (ts->val_type == TEMP_VAL_MEM) { - reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type], - s->reserved_regs); - /* XXX: not correct if reading values from the stack */ - tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); - tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset); - } else if (ts->val_type == TEMP_VAL_CONST) { - reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type], - s->reserved_regs); - /* XXX: sign extend may be needed on some targets */ - tcg_out_movi(s, ts->type, reg, ts->val); - tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset); - } else { - return -1; - } + ts = arg_temp(arg); + temp_load(s, ts, s->tcg_target_available_regs[ts->type], + s->reserved_regs, 0); + tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); } #ifndef TCG_TARGET_STACK_GROWSUP stack_offset += sizeof(tcg_target_long); #endif } - + /* assign input registers */ - tcg_regset_set(allocated_regs, s->reserved_regs); - for(i = 0; i < nb_regs; i++) { - arg = args[nb_oargs + i]; + allocated_regs = s->reserved_regs; + for (i = 0; i < nb_regs; i++) { + arg = op->args[nb_oargs + i]; if (arg != TCG_CALL_DUMMY_ARG) { - ts = &s->temps[arg]; + ts = arg_temp(arg); reg = tcg_target_call_iarg_regs[i]; - tcg_reg_free(s, reg); + if (ts->val_type == TEMP_VAL_REG) { if (ts->reg != reg) { - tcg_out_mov(s, ts->type, reg, ts->reg); + tcg_reg_free(s, reg, allocated_regs); + if (!tcg_out_mov(s, ts->type, reg, ts->reg)) { + /* + * Cross register class move not supported. Sync the + * temp back to its slot and load from there. + */ + temp_sync(s, ts, allocated_regs, 0, 0); + tcg_out_ld(s, ts->type, reg, + ts->mem_base->reg, ts->mem_offset); + } } - } else if (ts->val_type == TEMP_VAL_MEM) { - tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); - } else if (ts->val_type == TEMP_VAL_CONST) { - /* XXX: sign extend ? */ - tcg_out_movi(s, ts->type, reg, ts->val); } else { - return -1; + TCGRegSet arg_set = 0; + + tcg_reg_free(s, reg, allocated_regs); + tcg_regset_set_reg(arg_set, reg); + temp_load(s, ts, arg_set, allocated_regs, 0); } + tcg_regset_set_reg(allocated_regs, reg); } } - + /* mark dead temporaries and free the associated registers */ - for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { if (IS_DEAD_ARG(i)) { - temp_dead(s, args[i]); + temp_dead(s, arg_temp(op->args[i])); } } - + /* clobber call registers */ - for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { - if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, reg)) { - tcg_reg_free(s, reg); + for (i = 0; i < TCG_TARGET_NB_REGS; i++) { + if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, i)) { + tcg_reg_free(s, i, allocated_regs); } } @@ -2482,214 +3640,185 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, /* assign output registers and emit moves if needed */ for(i = 0; i < nb_oargs; i++) { - arg = args[i]; - ts = &s->temps[arg]; - reg = tcg_target_call_oarg_regs[i]; - assert(s->reg_to_temp[reg] == -1); + arg = op->args[i]; + ts = arg_temp(arg); - if (ts->fixed_reg) { - if (ts->reg != reg) { - tcg_out_mov(s, ts->type, ts->reg, reg); - } - } else { - if (ts->val_type == TEMP_VAL_REG) { - s->reg_to_temp[ts->reg] = -1; - } - ts->val_type = TEMP_VAL_REG; - ts->reg = reg; - ts->mem_coherent = 0; - s->reg_to_temp[reg] = arg; - if (NEED_SYNC_ARG(i)) { - tcg_reg_sync(s, reg); - } - if (IS_DEAD_ARG(i)) { - temp_dead(s, args[i]); - } + /* ENV should not be modified. */ + tcg_debug_assert(!ts->fixed_reg); + + reg = tcg_target_call_oarg_regs[i]; + tcg_debug_assert(s->reg_to_temp[reg] == NULL); + if (ts->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ts->reg] = NULL; + } + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + ts->mem_coherent = 0; + s->reg_to_temp[reg] = ts; + if (NEED_SYNC_ARG(i)) { + temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i)); + } else if (IS_DEAD_ARG(i)) { + temp_dead(s, ts); } } - - return nb_iargs + nb_oargs + def->nb_cargs + 1; } -#ifdef CONFIG_PROFILER - -static void dump_op_count(void) +int64_t tcg_cpu_exec_time(void) { - int i; - - for(i = INDEX_op_end; i < NB_OPS; i++) { - qemu_log("%s %" PRId64 "\n", s->tcg_op_defs[i].name, tcg_table_op_count[i]); - } + // error_report("%s: TCG profiler not compiled", __func__); + exit(EXIT_FAILURE); } -#endif -static inline int tcg_gen_code_common(TCGContext *s, - tcg_insn_unit *gen_code_buf, - long search_pc) +int tcg_gen_code(TCGContext *s, TranslationBlock *tb) { - TCGOpcode opc; - int op_index; - const TCGOpDef *def; - const TCGArg *args; - int ret; + int i, num_insns; + TCGOp *op; -#ifdef DEBUG_DISAS - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { - qemu_log("OP:\n"); - tcg_dump_ops(s); - qemu_log("\n"); +#ifndef NDEBUG + if (getenv("UNICORN_DEBUG")) { + tcg_dump_ops(s, false, "TCG before optimization:"); } #endif -#ifdef CONFIG_PROFILER - s->opt_time -= profile_getclock(); +#ifdef CONFIG_DEBUG_TCG + /* Ensure all labels referenced have been emitted. */ + { + TCGLabel *l; + bool error = false; + + QSIMPLEQ_FOREACH(l, &s->labels, next) { + if (unlikely(!l->present) && l->refs) { + error = true; + } + } + assert(!error); + } #endif #ifdef USE_TCG_OPTIMIZATIONS - s->gen_opparam_ptr = - tcg_optimize(s, s->gen_opc_ptr, s->gen_opparam_buf, s->tcg_op_defs); - if (s->gen_opparam_ptr == NULL) { - tcg_out_tb_finalize(s); - return -2; + tcg_optimize(s); +#endif + //tcg_dump_ops(s, false, "after opt1:"); + reachable_code_pass(s); + //tcg_dump_ops(s, false, "after opt2:"); + liveness_pass_1(s); + //tcg_dump_ops(s, false, "after opt3:"); + if (s->nb_indirects > 0) { + /* Replace indirect temps with direct temps. */ + if (liveness_pass_2(s)) { + /* If changes were made, re-run liveness. */ + liveness_pass_1(s); + } } -#endif - -#ifdef CONFIG_PROFILER - s->opt_time += profile_getclock(); - s->la_time -= profile_getclock(); -#endif - - tcg_liveness_analysis(s); - -#ifdef CONFIG_PROFILER - s->la_time += profile_getclock(); -#endif - -#ifdef DEBUG_DISAS - if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) { - qemu_log("OP after optimization and liveness analysis:\n"); - tcg_dump_ops(s); - qemu_log("\n"); - } -#endif - + //tcg_dump_ops(s, false, "after opt4:"); tcg_reg_alloc_start(s); - s->code_buf = gen_code_buf; - s->code_ptr = gen_code_buf; + s->code_buf = tb->tc.ptr; + s->code_ptr = tb->tc.ptr; - tcg_out_tb_init(s); - - args = s->gen_opparam_buf; - op_index = 0; - - for(;;) { - opc = s->gen_opc_buf[op_index]; -#ifdef CONFIG_PROFILER - tcg_table_op_count[opc]++; +#ifdef TCG_TARGET_NEED_LDST_LABELS + QSIMPLEQ_INIT(&s->ldst_labels); #endif - def = &s->tcg_op_defs[opc]; -#if 0 - printf("%s: %d %d %d\n", def->name, - def->nb_oargs, def->nb_iargs, def->nb_cargs); - // dump_regs(s); +#ifdef TCG_TARGET_NEED_POOL_LABELS + s->pool_labels = NULL; #endif - switch(opc) { + +#ifndef NDEBUG + if (getenv("UNICORN_DEBUG")) { + tcg_dump_ops(s, false, "TCG before codegen:"); + } +#endif + num_insns = -1; + QTAILQ_FOREACH(op, &s->ops, link) { + TCGOpcode opc = op->opc; + + switch (opc) { case INDEX_op_mov_i32: case INDEX_op_mov_i64: - tcg_reg_alloc_mov(s, def, args, s->op_dead_args[op_index], - s->op_sync_args[op_index]); + case INDEX_op_mov_vec: + tcg_reg_alloc_mov(s, op); break; case INDEX_op_movi_i32: case INDEX_op_movi_i64: - tcg_reg_alloc_movi(s, args, s->op_dead_args[op_index], - s->op_sync_args[op_index]); + case INDEX_op_dupi_vec: + tcg_reg_alloc_movi(s, op); break; - case INDEX_op_debug_insn_start: - /* debug instruction */ + case INDEX_op_dup_vec: + tcg_reg_alloc_dup(s, op); break; - case INDEX_op_nop: - case INDEX_op_nop1: - case INDEX_op_nop2: - case INDEX_op_nop3: + case INDEX_op_insn_start: + if (num_insns >= 0) { + size_t off = tcg_current_code_size(s); + s->gen_insn_end_off[num_insns] = off; + /* Assert that we do not overflow our stored offset. */ + assert(s->gen_insn_end_off[num_insns] == off); + } + num_insns++; + for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { + target_ulong a; +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); +#else + a = op->args[i]; +#endif + s->gen_insn_data[num_insns][i] = a; + } break; - case INDEX_op_nopn: - args += args[0]; - goto next; case INDEX_op_discard: - temp_dead(s, args[0]); + temp_dead(s, arg_temp(op->args[0])); break; case INDEX_op_set_label: tcg_reg_alloc_bb_end(s, s->reserved_regs); - tcg_out_label(s, args[0], s->code_ptr); + tcg_out_label(s, arg_label(op->args[0]), s->code_ptr); break; case INDEX_op_call: - ret = tcg_reg_alloc_call(s, def, opc, args, - s->op_dead_args[op_index], - s->op_sync_args[op_index]); - if (ret == -1) { - goto the_end; - } else { - args += ret; - } - goto next; - case INDEX_op_end: - goto the_end; + tcg_reg_alloc_call(s, op); + break; default: /* Sanity check that we've not introduced any unhandled opcodes. */ - if (def->flags & TCG_OPF_NOT_PRESENT) { - goto the_end; - } + tcg_debug_assert(tcg_op_supported(opc)); /* Note: in order to speed up the code, it would be much faster to have specialized register allocator functions for some common argument patterns */ - tcg_reg_alloc_op(s, def, opc, args, s->op_dead_args[op_index], - s->op_sync_args[op_index]); + tcg_reg_alloc_op(s, op); break; } - args += def->nb_args; - next: - if (search_pc >= 0 && (size_t)search_pc < tcg_current_code_size(s)) { - return op_index; - } - op_index++; -#ifndef NDEBUG +#ifdef CONFIG_DEBUG_TCG check_regs(s); #endif + + /* Test for (pending) buffer overflow. The assumption is that any + one operation beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + generating code without having to check during generation. */ + if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { + return -1; + } + /* Test for TB overflow, as seen by gen_insn_end_off. */ + if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) { + return -2; + } } - the_end: + tcg_debug_assert(num_insns >= 0); + s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); + /* Generate TB finalization at the end of block */ - tcg_out_tb_finalize(s); - return -1; -} - -int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf) // qq -{ - int ret; -#ifdef CONFIG_PROFILER - { - int n; - n = (s->gen_opc_ptr - s->gen_opc_buf); - s->op_count += n; - if (n > s->op_count_max) - s->op_count_max = n; - - s->temp_count += s->nb_temps; - if (s->nb_temps > s->temp_count_max) - s->temp_count_max = s->nb_temps; +#ifdef TCG_TARGET_NEED_LDST_LABELS + i = tcg_out_ldst_finalize(s); + if (i < 0) { + return i; } #endif - - //printf("====== before gen code\n"); - //tcg_dump_ops(s); - ret = tcg_gen_code_common(s, gen_code_buf, -1); // qq - if (ret == -2) { - return -1; +#ifdef TCG_TARGET_NEED_POOL_LABELS + i = tcg_out_pool_finalize(s); + if (i < 0) { + return i; + } +#endif + if (!tcg_resolve_relocs(s)) { + return -2; } - - //printf("====== after gen code\n"); - //tcg_dump_ops(s); /* flush instruction cache */ flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); @@ -2697,68 +3826,274 @@ int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf) // qq return tcg_current_code_size(s); } -/* Return the index of the micro operation such as the pc after is < - offset bytes from the start of the TB. The contents of gen_code_buf must - not be changed, though writing the same values is ok. - Return -1 if not found. */ -int tcg_gen_code_search_pc(TCGContext *s, tcg_insn_unit *gen_code_buf, - long offset) +#ifdef ELF_HOST_MACHINE +/* In order to use this feature, the backend needs to do three things: + + (1) Define ELF_HOST_MACHINE to indicate both what value to + put into the ELF image and to indicate support for the feature. + + (2) Define tcg_register_jit. This should create a buffer containing + the contents of a .debug_frame section that describes the post- + prologue unwind info for the tcg machine. + + (3) Call tcg_register_jit_int, with the constructed .debug_frame. +*/ + +/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */ +typedef enum { + JIT_NOACTION = 0, + JIT_REGISTER_FN, + JIT_UNREGISTER_FN +} jit_actions_t; + +struct jit_descriptor { + uint32_t version; + uint32_t action_flag; + struct jit_code_entry *relevant_entry; + struct jit_code_entry *first_entry; +}; + +#if 0 +void __jit_debug_register_code(void) QEMU_NOINLINE; +void __jit_debug_register_code(void) { - return tcg_gen_code_common(s, gen_code_buf, offset); + asm(""); } -#ifdef CONFIG_PROFILER -void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) +/* Must statically initialize the version, because GDB may check + the version before we can set it. */ +struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; +#endif + +/* End GDB interface. */ + +static int find_string(const char *strtab, const char *str) { + const char *p = strtab + 1; + + while (1) { + if (strcmp(p, str) == 0) { + return p - strtab; + } + p += strlen(p) + 1; + } +} + +static void tcg_register_jit_int(TCGContext *s, void *buf_ptr, size_t buf_size, + const void *debug_frame, + size_t debug_frame_size) +{ + struct __attribute__((packed)) DebugInfo { + uint32_t len; + uint16_t version; + uint32_t abbrev; + uint8_t ptr_size; + uint8_t cu_die; + uint16_t cu_lang; + uintptr_t cu_low_pc; + uintptr_t cu_high_pc; + uint8_t fn_die; + char fn_name[16]; + uintptr_t fn_low_pc; + uintptr_t fn_high_pc; + uint8_t cu_eoc; + }; + + struct ElfImage { + ElfW(Ehdr) ehdr; + ElfW(Phdr) phdr; + ElfW(Shdr) shdr[7]; + ElfW(Sym) sym[2]; + struct DebugInfo di; + uint8_t da[24]; + char str[80]; + }; + + struct ElfImage *img; + + static const struct ElfImage img_template = { + .ehdr = { + .e_ident[EI_MAG0] = ELFMAG0, + .e_ident[EI_MAG1] = ELFMAG1, + .e_ident[EI_MAG2] = ELFMAG2, + .e_ident[EI_MAG3] = ELFMAG3, + .e_ident[EI_CLASS] = ELF_CLASS, + .e_ident[EI_DATA] = ELF_DATA, + .e_ident[EI_VERSION] = EV_CURRENT, + .e_type = ET_EXEC, + .e_machine = ELF_HOST_MACHINE, + .e_version = EV_CURRENT, + .e_phoff = offsetof(struct ElfImage, phdr), + .e_shoff = offsetof(struct ElfImage, shdr), + .e_ehsize = sizeof(ElfW(Shdr)), + .e_phentsize = sizeof(ElfW(Phdr)), + .e_phnum = 1, + .e_shentsize = sizeof(ElfW(Shdr)), + .e_shnum = ARRAY_SIZE(img->shdr), + .e_shstrndx = ARRAY_SIZE(img->shdr) - 1, +#ifdef ELF_HOST_FLAGS + .e_flags = ELF_HOST_FLAGS, +#endif +#ifdef ELF_OSABI + .e_ident[EI_OSABI] = ELF_OSABI, +#endif + }, + .phdr = { + .p_type = PT_LOAD, + .p_flags = PF_X, + }, + .shdr = { + [0] = { .sh_type = SHT_NULL }, + /* Trick: The contents of code_gen_buffer are not present in + this fake ELF file; that got allocated elsewhere. Therefore + we mark .text as SHT_NOBITS (similar to .bss) so that readers + will not look for contents. We can record any address. */ + [1] = { /* .text */ + .sh_type = SHT_NOBITS, + .sh_flags = SHF_EXECINSTR | SHF_ALLOC, + }, + [2] = { /* .debug_info */ + .sh_type = SHT_PROGBITS, + .sh_offset = offsetof(struct ElfImage, di), + .sh_size = sizeof(struct DebugInfo), + }, + [3] = { /* .debug_abbrev */ + .sh_type = SHT_PROGBITS, + .sh_offset = offsetof(struct ElfImage, da), + .sh_size = sizeof(img->da), + }, + [4] = { /* .debug_frame */ + .sh_type = SHT_PROGBITS, + .sh_offset = sizeof(struct ElfImage), + }, + [5] = { /* .symtab */ + .sh_type = SHT_SYMTAB, + .sh_offset = offsetof(struct ElfImage, sym), + .sh_size = sizeof(img->sym), + .sh_info = 1, + .sh_link = ARRAY_SIZE(img->shdr) - 1, + .sh_entsize = sizeof(ElfW(Sym)), + }, + [6] = { /* .strtab */ + .sh_type = SHT_STRTAB, + .sh_offset = offsetof(struct ElfImage, str), + .sh_size = sizeof(img->str), + } + }, + .sym = { + [1] = { /* code_gen_buffer */ + .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC), + .st_shndx = 1, + } + }, + .di = { + .len = sizeof(struct DebugInfo) - 4, + .version = 2, + .ptr_size = sizeof(void *), + .cu_die = 1, + .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */ + .fn_die = 2, + .fn_name = "code_gen_buffer" + }, + .da = { + 1, /* abbrev number (the cu) */ + 0x11, 1, /* DW_TAG_compile_unit, has children */ + 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */ + 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ + 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ + 0, 0, /* end of abbrev */ + 2, /* abbrev number (the fn) */ + 0x2e, 0, /* DW_TAG_subprogram, no children */ + 0x3, 0x8, /* DW_AT_name, DW_FORM_string */ + 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ + 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ + 0, 0, /* end of abbrev */ + 0 /* no more abbrev */ + }, + .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0" + ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer", + }; + + uintptr_t buf = (uintptr_t)buf_ptr; + size_t img_size = sizeof(struct ElfImage) + debug_frame_size; + DebugFrameHeader *dfh; + + img = g_malloc(img_size); + *img = img_template; + + img->phdr.p_vaddr = buf; + img->phdr.p_paddr = buf; + img->phdr.p_memsz = buf_size; + + img->shdr[1].sh_name = find_string(img->str, ".text"); + img->shdr[1].sh_addr = buf; + img->shdr[1].sh_size = buf_size; + + img->shdr[2].sh_name = find_string(img->str, ".debug_info"); + img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev"); + + img->shdr[4].sh_name = find_string(img->str, ".debug_frame"); + img->shdr[4].sh_size = debug_frame_size; + + img->shdr[5].sh_name = find_string(img->str, ".symtab"); + img->shdr[6].sh_name = find_string(img->str, ".strtab"); + + img->sym[1].st_name = find_string(img->str, "code_gen_buffer"); + img->sym[1].st_value = buf; + img->sym[1].st_size = buf_size; + + img->di.cu_low_pc = buf; + img->di.cu_high_pc = buf + buf_size; + img->di.fn_low_pc = buf; + img->di.fn_high_pc = buf + buf_size; + + dfh = (DebugFrameHeader *)(img + 1); + memcpy(dfh, debug_frame, debug_frame_size); + dfh->fde.func_start = buf; + dfh->fde.func_len = buf_size; + +#ifdef DEBUG_JIT + /* Enable this block to be able to debug the ELF image file creation. + One can use readelf, objdump, or other inspection utilities. */ + { + FILE *f = fopen("/tmp/qemu.jit", "w+b"); + if (f) { + if (fwrite(img, img_size, 1, f) != img_size) { + /* Avoid stupid unused return value warning for fwrite. */ + } + fclose(f); + } + } +#endif + + s->one_entry->symfile_addr = img; + s->one_entry->symfile_size = img_size; + #if 0 - TCGContext *s = &tcg_ctx; - int64_t tot; - - tot = s->interm_time + s->code_time; - cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n", - tot, tot / 2.4e9); - cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", - s->tb_count, - s->tb_count1 - s->tb_count, - s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0); - cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n", - s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max); - cpu_fprintf(f, "deleted ops/TB %0.2f\n", - s->tb_count ? - (double)s->del_op_count / s->tb_count : 0); - cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n", - s->tb_count ? - (double)s->temp_count / s->tb_count : 0, - s->temp_count_max); - - cpu_fprintf(f, "cycles/op %0.1f\n", - s->op_count ? (double)tot / s->op_count : 0); - cpu_fprintf(f, "cycles/in byte %0.1f\n", - s->code_in_len ? (double)tot / s->code_in_len : 0); - cpu_fprintf(f, "cycles/out byte %0.1f\n", - s->code_out_len ? (double)tot / s->code_out_len : 0); - if (tot == 0) - tot = 1; - cpu_fprintf(f, " gen_interm time %0.1f%%\n", - (double)s->interm_time / tot * 100.0); - cpu_fprintf(f, " gen_code time %0.1f%%\n", - (double)s->code_time / tot * 100.0); - cpu_fprintf(f, "optim./code time %0.1f%%\n", - (double)s->opt_time / (s->code_time ? s->code_time : 1) - * 100.0); - cpu_fprintf(f, "liveness/code time %0.1f%%\n", - (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0); - cpu_fprintf(f, "cpu_restore count %" PRId64 "\n", - s->restore_count); - cpu_fprintf(f, " avg cycles %0.1f\n", - s->restore_count ? (double)s->restore_time / s->restore_count : 0); - - dump_op_count(); + __jit_debug_descriptor.action_flag = JIT_REGISTER_FN; + __jit_debug_descriptor.relevant_entry = s->one_entry; + __jit_debug_descriptor.first_entry = s->one_entry; + __jit_debug_register_code(); #endif } #else -void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) +/* No support for the feature. Provide the entry point expected by exec.c, + and implement the internal function we declared earlier. */ + +static void tcg_register_jit_int(TCGContext *s, void *buf, size_t size, + const void *debug_frame, + size_t debug_frame_size) { - cpu_fprintf(f, "[TCG profiler not compiled]\n"); +} + +void tcg_register_jit(TCGContext *s, void *buf, size_t buf_size) +{ +} +#endif /* ELF_HOST_MACHINE */ + +#if !TCG_TARGET_MAYBE_vec +void tcg_expand_vec_op(TCGContext *tcg_ctx, TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...) +{ + g_assert_not_reached(); } #endif diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h deleted file mode 100644 index 54486e53..00000000 --- a/qemu/tcg/tcg.h +++ /dev/null @@ -1,1012 +0,0 @@ -/* - * Tiny Code Generator for QEMU - * - * Copyright (c) 2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#ifndef TCG_H -#define TCG_H - -#include "qemu-common.h" -#include "qemu/bitops.h" -#include "tcg-target.h" -#include "exec/exec-all.h" - -#include "uc_priv.h" - -/* Default target word size to pointer size. */ -#ifndef TCG_TARGET_REG_BITS -# if UINTPTR_MAX == UINT32_MAX -# define TCG_TARGET_REG_BITS 32 -# elif UINTPTR_MAX == UINT64_MAX -# define TCG_TARGET_REG_BITS 64 -# else -# error Unknown pointer size for tcg target -# endif -#endif - -#if TCG_TARGET_REG_BITS == 32 -typedef int32_t tcg_target_long; -typedef uint32_t tcg_target_ulong; -#define TCG_PRIlx PRIx32 -#define TCG_PRIld PRId32 -#elif TCG_TARGET_REG_BITS == 64 -typedef int64_t tcg_target_long; -typedef uint64_t tcg_target_ulong; -#define TCG_PRIlx PRIx64 -#define TCG_PRIld PRId64 -#else -#error unsupported -#endif - -#if TCG_TARGET_NB_REGS <= 32 -typedef uint32_t TCGRegSet; -#elif TCG_TARGET_NB_REGS <= 64 -typedef uint64_t TCGRegSet; -#else -#error unsupported -#endif - -#if TCG_TARGET_REG_BITS == 32 -/* Turn some undef macros into false macros. */ -#define TCG_TARGET_HAS_trunc_shr_i32 0 -#define TCG_TARGET_HAS_div_i64 0 -#define TCG_TARGET_HAS_rem_i64 0 -#define TCG_TARGET_HAS_div2_i64 0 -#define TCG_TARGET_HAS_rot_i64 0 -#define TCG_TARGET_HAS_ext8s_i64 0 -#define TCG_TARGET_HAS_ext16s_i64 0 -#define TCG_TARGET_HAS_ext32s_i64 0 -#define TCG_TARGET_HAS_ext8u_i64 0 -#define TCG_TARGET_HAS_ext16u_i64 0 -#define TCG_TARGET_HAS_ext32u_i64 0 -#define TCG_TARGET_HAS_bswap16_i64 0 -#define TCG_TARGET_HAS_bswap32_i64 0 -#define TCG_TARGET_HAS_bswap64_i64 0 -#define TCG_TARGET_HAS_neg_i64 0 -#define TCG_TARGET_HAS_not_i64 0 -#define TCG_TARGET_HAS_andc_i64 0 -#define TCG_TARGET_HAS_orc_i64 0 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_deposit_i64 0 -#define TCG_TARGET_HAS_movcond_i64 0 -#define TCG_TARGET_HAS_add2_i64 0 -#define TCG_TARGET_HAS_sub2_i64 0 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 0 -#define TCG_TARGET_HAS_mulsh_i64 0 -/* Turn some undef macros into true macros. */ -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#endif - -#ifndef TCG_TARGET_deposit_i32_valid -#define TCG_TARGET_deposit_i32_valid(ofs, len) 1 -#endif -#ifndef TCG_TARGET_deposit_i64_valid -#define TCG_TARGET_deposit_i64_valid(ofs, len) 1 -#endif - -/* Only one of DIV or DIV2 should be defined. */ -#if defined(TCG_TARGET_HAS_div_i32) -#define TCG_TARGET_HAS_div2_i32 0 -#elif defined(TCG_TARGET_HAS_div2_i32) -#define TCG_TARGET_HAS_div_i32 0 -#define TCG_TARGET_HAS_rem_i32 0 -#endif -#if defined(TCG_TARGET_HAS_div_i64) -#define TCG_TARGET_HAS_div2_i64 0 -#elif defined(TCG_TARGET_HAS_div2_i64) -#define TCG_TARGET_HAS_div_i64 0 -#define TCG_TARGET_HAS_rem_i64 0 -#endif - -/* For 32-bit targets, some sort of unsigned widening multiply is required. */ -#if TCG_TARGET_REG_BITS == 32 \ - && !(defined(TCG_TARGET_HAS_mulu2_i32) \ - || defined(TCG_TARGET_HAS_muluh_i32)) -# error "Missing unsigned widening multiply" -#endif - -typedef enum TCGOpcode { -#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, -#include "tcg-opc.h" -#undef DEF - NB_OPS, -} TCGOpcode; - -#define tcg_regset_clear(d) (d) = 0 -#define tcg_regset_set(d, s) (d) = (s) -#define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg) -#define tcg_regset_set_reg(d, r) (d) |= 1L << (r) -#define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) -#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) -#define tcg_regset_or(d, a, b) (d) = (a) | (b) -#define tcg_regset_and(d, a, b) (d) = (a) & (b) -#define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b) -#define tcg_regset_not(d, a) (d) = ~(a) - -#ifndef TCG_TARGET_INSN_UNIT_SIZE -# error "Missing TCG_TARGET_INSN_UNIT_SIZE" -#elif TCG_TARGET_INSN_UNIT_SIZE == 1 -typedef uint8_t tcg_insn_unit; -#elif TCG_TARGET_INSN_UNIT_SIZE == 2 -typedef uint16_t tcg_insn_unit; -#elif TCG_TARGET_INSN_UNIT_SIZE == 4 -typedef uint32_t tcg_insn_unit; -#elif TCG_TARGET_INSN_UNIT_SIZE == 8 -typedef uint64_t tcg_insn_unit; -#else -/* The port better have done this. */ -#endif - - -typedef struct TCGRelocation { - struct TCGRelocation *next; - int type; - tcg_insn_unit *ptr; - intptr_t addend; -} TCGRelocation; - -typedef struct TCGLabel { - int has_value; - union { - uintptr_t value; - tcg_insn_unit *value_ptr; - TCGRelocation *first_reloc; - } u; -} TCGLabel; - -typedef struct TCGPool { - struct TCGPool *next; - int size; - uint8_t QEMU_ALIGN(8, data[0]); -} TCGPool; - -#define TCG_POOL_CHUNK_SIZE 32768 - -#define TCG_MAX_LABELS 512 - -#define TCG_MAX_TEMPS 512 - -/* when the size of the arguments of a called function is smaller than - this value, they are statically allocated in the TB stack frame */ -#define TCG_STATIC_CALL_ARGS_SIZE 128 - -typedef enum TCGType { - TCG_TYPE_I32, - TCG_TYPE_I64, - TCG_TYPE_COUNT, /* number of different types */ - - /* An alias for the size of the host register. */ -#if TCG_TARGET_REG_BITS == 32 - TCG_TYPE_REG = TCG_TYPE_I32, -#else - TCG_TYPE_REG = TCG_TYPE_I64, -#endif - - /* An alias for the size of the native pointer. */ -#if UINTPTR_MAX == UINT32_MAX - TCG_TYPE_PTR = TCG_TYPE_I32, -#else - TCG_TYPE_PTR = TCG_TYPE_I64, -#endif - - /* An alias for the size of the target "long", aka register. */ -#if TARGET_LONG_BITS == 64 - TCG_TYPE_TL = TCG_TYPE_I64, -#else - TCG_TYPE_TL = TCG_TYPE_I32, -#endif -} TCGType; - -/* Constants for qemu_ld and qemu_st for the Memory Operation field. */ -typedef enum TCGMemOp { - MO_8 = 0, - MO_16 = 1, - MO_32 = 2, - MO_64 = 3, - MO_SIZE = 3, /* Mask for the above. */ - - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ - - MO_BSWAP = 8, /* Host reverse endian. */ -#ifdef HOST_WORDS_BIGENDIAN - MO_LE = MO_BSWAP, - MO_BE = 0, -#else - MO_LE = 0, - MO_BE = MO_BSWAP, -#endif -#ifdef TARGET_WORDS_BIGENDIAN - MO_TE = MO_BE, -#else - MO_TE = MO_LE, -#endif - - /* Combinations of the above, for ease of use. */ - MO_UB = MO_8, - MO_UW = MO_16, - MO_UL = MO_32, - MO_SB = MO_SIGN | MO_8, - MO_SW = MO_SIGN | MO_16, - MO_SL = MO_SIGN | MO_32, - MO_Q = MO_64, - - MO_LEUW = MO_LE | MO_UW, - MO_LEUL = MO_LE | MO_UL, - MO_LESW = MO_LE | MO_SW, - MO_LESL = MO_LE | MO_SL, - MO_LEQ = MO_LE | MO_Q, - - MO_BEUW = MO_BE | MO_UW, - MO_BEUL = MO_BE | MO_UL, - MO_BESW = MO_BE | MO_SW, - MO_BESL = MO_BE | MO_SL, - MO_BEQ = MO_BE | MO_Q, - - MO_TEUW = MO_TE | MO_UW, - MO_TEUL = MO_TE | MO_UL, - MO_TESW = MO_TE | MO_SW, - MO_TESL = MO_TE | MO_SL, - MO_TEQ = MO_TE | MO_Q, - - MO_SSIZE = MO_SIZE | MO_SIGN, -} TCGMemOp; - -typedef tcg_target_ulong TCGArg; - -/* Define a type and accessor macros for variables. Using pointer types - is nice because it gives some level of type safely. Converting to and - from intptr_t rather than int reduces the number of sign-extension - instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't - need to know about any of this, and should treat TCGv as an opaque type. - In addition we do typechecking for different types of variables. TCGv_i32 - and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr - are aliases for target_ulong and host pointer sized values respectively. */ - -typedef struct TCGv_i32_d *TCGv_i32; -typedef struct TCGv_i64_d *TCGv_i64; -typedef struct TCGv_ptr_d *TCGv_ptr; - -static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i) -{ - return (TCGv_i32)i; -} - -static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i) -{ - return (TCGv_i64)i; -} - -static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i) -{ - return (TCGv_ptr)i; -} - -static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t) -{ - return (intptr_t)t; -} - -static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t) -{ - return (intptr_t)t; -} - -static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t) -{ - return (intptr_t)t; -} - -#if TCG_TARGET_REG_BITS == 32 -#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t)) -#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1) -#endif - -#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b)) -#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b)) -#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b)) - -/* Dummy definition to avoid compiler warnings. */ -#define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1) -#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1) -#define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1) - -#define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1) -#define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1) -#define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1) - -/* call flags */ -/* Helper does not read globals (either directly or through an exception). It - implies TCG_CALL_NO_WRITE_GLOBALS. */ -#define TCG_CALL_NO_READ_GLOBALS 0x0010 -/* Helper does not write globals */ -#define TCG_CALL_NO_WRITE_GLOBALS 0x0020 -/* Helper can be safely suppressed if the return value is not used. */ -#define TCG_CALL_NO_SIDE_EFFECTS 0x0040 - -/* convenience version of most used call flags */ -#define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS -#define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS -#define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS -#define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) -#define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) - -/* used to align parameters */ -#define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1) -#define TCG_CALL_DUMMY_ARG ((TCGArg)(-1)) - -/* Conditions. Note that these are laid out for easy manipulation by - the functions below: - bit 0 is used for inverting; - bit 1 is signed, - bit 2 is unsigned, - bit 3 is used with bit 0 for swapping signed/unsigned. */ -typedef enum { - /* non-signed */ - TCG_COND_NEVER = 0 | 0 | 0 | 0, - TCG_COND_ALWAYS = 0 | 0 | 0 | 1, - TCG_COND_EQ = 8 | 0 | 0 | 0, - TCG_COND_NE = 8 | 0 | 0 | 1, - /* signed */ - TCG_COND_LT = 0 | 0 | 2 | 0, - TCG_COND_GE = 0 | 0 | 2 | 1, - TCG_COND_LE = 8 | 0 | 2 | 0, - TCG_COND_GT = 8 | 0 | 2 | 1, - /* unsigned */ - TCG_COND_LTU = 0 | 4 | 0 | 0, - TCG_COND_GEU = 0 | 4 | 0 | 1, - TCG_COND_LEU = 8 | 4 | 0 | 0, - TCG_COND_GTU = 8 | 4 | 0 | 1, -} TCGCond; - -/* Invert the sense of the comparison. */ -static inline TCGCond tcg_invert_cond(TCGCond c) -{ - return (TCGCond)(c ^ 1); -} - -/* Swap the operands in a comparison. */ -static inline TCGCond tcg_swap_cond(TCGCond c) -{ - return c & 6 ? (TCGCond)(c ^ 9) : c; -} - -/* Create an "unsigned" version of a "signed" comparison. */ -static inline TCGCond tcg_unsigned_cond(TCGCond c) -{ - return c & 2 ? (TCGCond)(c ^ 6) : c; -} - -/* Must a comparison be considered unsigned? */ -static inline bool is_unsigned_cond(TCGCond c) -{ - return (c & 4) != 0; -} - -/* Create a "high" version of a double-word comparison. - This removes equality from a LTE or GTE comparison. */ -static inline TCGCond tcg_high_cond(TCGCond c) -{ - switch (c) { - case TCG_COND_GE: - case TCG_COND_LE: - case TCG_COND_GEU: - case TCG_COND_LEU: - return (TCGCond)(c ^ 8); - default: - return c; - } -} - -#define TEMP_VAL_DEAD 0 -#define TEMP_VAL_REG 1 -#define TEMP_VAL_MEM 2 -#define TEMP_VAL_CONST 3 - -/* XXX: optimize memory layout */ -typedef struct TCGTemp { - TCGType base_type; - TCGType type; - int val_type; - int reg; - tcg_target_long val; - int mem_reg; - intptr_t mem_offset; - unsigned int fixed_reg:1; - unsigned int mem_coherent:1; - unsigned int mem_allocated:1; - unsigned int temp_local:1; /* If true, the temp is saved across - basic blocks. Otherwise, it is not - preserved across basic blocks. */ - unsigned int temp_allocated:1; /* never used for code gen */ - const char *name; -} TCGTemp; - -typedef struct TCGContext TCGContext; - -typedef struct TCGTempSet { - unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; -} TCGTempSet; - - -/* pool based memory allocation */ - -void *tcg_malloc_internal(TCGContext *s, int size); -void tcg_pool_reset(TCGContext *s); -void tcg_pool_delete(TCGContext *s); - -void tcg_context_init(TCGContext *s); -void tcg_context_free(void *s); // free memory allocated for @s -void tcg_prologue_init(TCGContext *s); -void tcg_func_start(TCGContext *s); - -int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf); -int tcg_gen_code_search_pc(TCGContext *s, tcg_insn_unit *gen_code_buf, - long offset); - -void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size); - -TCGv_i32 tcg_global_reg_new_i32(TCGContext *s, int reg, const char *name); -TCGv_i32 tcg_global_mem_new_i32(TCGContext *s, int reg, intptr_t offset, const char *name); -TCGv_i32 tcg_temp_new_internal_i32(TCGContext *s, int temp_local); -static inline TCGv_i32 tcg_temp_new_i32(TCGContext *s) -{ - return tcg_temp_new_internal_i32(s, 0); -} -static inline TCGv_i32 tcg_temp_local_new_i32(TCGContext *s) -{ - return tcg_temp_new_internal_i32(s, 1); -} -void tcg_temp_free_i32(TCGContext *s, TCGv_i32 arg); -char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg); - -TCGv_i64 tcg_global_reg_new_i64(TCGContext *s, int reg, const char *name); -TCGv_i64 tcg_global_mem_new_i64(TCGContext *s, int reg, intptr_t offset, const char *name); -TCGv_i64 tcg_temp_new_internal_i64(TCGContext *s, int temp_local); -static inline TCGv_i64 tcg_temp_new_i64(TCGContext *s) -{ - return tcg_temp_new_internal_i64(s, 0); -} -static inline TCGv_i64 tcg_temp_local_new_i64(TCGContext *s) -{ - return tcg_temp_new_internal_i64(s, 1); -} -void tcg_temp_free_i64(TCGContext *s, TCGv_i64 arg); -char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg); - -#if defined(CONFIG_DEBUG_TCG) -/* If you call tcg_clear_temp_count() at the start of a section of - * code which is not supposed to leak any TCG temporaries, then - * calling tcg_check_temp_count() at the end of the section will - * return 1 if the section did in fact leak a temporary. - */ -void tcg_clear_temp_count(void); -int tcg_check_temp_count(void); -#else -#define tcg_clear_temp_count() do { } while (0) -#define tcg_check_temp_count() 0 -#endif - -void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf); - -#define TCG_CT_ALIAS 0x80 -#define TCG_CT_IALIAS 0x40 -#define TCG_CT_REG 0x01 -#define TCG_CT_CONST 0x02 /* any constant of register size */ - -typedef struct TCGArgConstraint { - uint16_t ct; - uint8_t alias_index; - union { - TCGRegSet regs; - } u; -} TCGArgConstraint; - -#define TCG_MAX_OP_ARGS 16 - -/* Bits for TCGOpDef->flags, 8 bits available. */ -enum { - /* Instruction defines the end of a basic block. */ - TCG_OPF_BB_END = 0x01, - /* Instruction clobbers call registers and potentially update globals. */ - TCG_OPF_CALL_CLOBBER = 0x02, - /* Instruction has side effects: it cannot be removed if its outputs - are not used, and might trigger exceptions. */ - TCG_OPF_SIDE_EFFECTS = 0x04, - /* Instruction operands are 64-bits (otherwise 32-bits). */ - TCG_OPF_64BIT = 0x08, - /* Instruction is optional and not implemented by the host, or insn - is generic and should not be implemened by the host. */ - TCG_OPF_NOT_PRESENT = 0x10, -}; - -typedef struct TCGOpDef { - const char *name; - uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; - uint8_t flags; - TCGArgConstraint *args_ct; - int *sorted_args; -#if defined(CONFIG_DEBUG_TCG) - int used; -#endif -} TCGOpDef; - -typedef enum { - TCG_TEMP_UNDEF = 0, - TCG_TEMP_CONST, - TCG_TEMP_COPY, -} tcg_temp_state; - -struct tcg_temp_info { - tcg_temp_state state; - uint16_t prev_copy; - uint16_t next_copy; - tcg_target_ulong val; - tcg_target_ulong mask; -}; - -struct TCGContext { - uint8_t *pool_cur, *pool_end; - TCGPool *pool_first, *pool_current, *pool_first_large; - TCGLabel *labels; - int nb_labels; - int nb_globals; - int nb_temps; - - /* goto_tb support */ - tcg_insn_unit *code_buf; - uintptr_t *tb_next; - uint16_t *tb_next_offset; - uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */ - - /* liveness analysis */ - uint16_t *op_dead_args; /* for each operation, each bit tells if the - corresponding argument is dead */ - uint8_t *op_sync_args; /* for each operation, each bit tells if the - corresponding output argument needs to be - sync to memory. */ - - /* tells in which temporary a given register is. It does not take - into account fixed registers */ - int reg_to_temp[TCG_TARGET_NB_REGS]; - TCGRegSet reserved_regs; - intptr_t current_frame_offset; - intptr_t frame_start; - intptr_t frame_end; - int frame_reg; - - tcg_insn_unit *code_ptr; - TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ - TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; - - GHashTable *helpers; - -#ifdef CONFIG_PROFILER - /* profiling info */ - int64_t tb_count1; - int64_t tb_count; - int64_t op_count; /* total insn count */ - int op_count_max; /* max insn per TB */ - int64_t temp_count; - int temp_count_max; - int64_t del_op_count; - int64_t code_in_len; - int64_t code_out_len; - int64_t interm_time; - int64_t code_time; - int64_t la_time; - int64_t opt_time; - int64_t restore_count; - int64_t restore_time; -#endif - -#ifdef CONFIG_DEBUG_TCG - int temps_in_use; - int goto_tb_issue_mask; -#endif - - uint16_t gen_opc_buf[OPC_BUF_SIZE]; - TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE]; - - uint16_t *gen_opc_ptr; - TCGArg *gen_opparam_ptr; - target_ulong gen_opc_pc[OPC_BUF_SIZE]; - uint16_t gen_opc_icount[OPC_BUF_SIZE]; - uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; - - /* Code generation. Note that we specifically do not use tcg_insn_unit - here, because there's too much arithmetic throughout that relies - on addition and subtraction working on bytes. Rely on the GCC - extension that allows arithmetic on void*. */ - int code_gen_max_blocks; - void *code_gen_prologue; - void *code_gen_buffer; - size_t code_gen_buffer_size; - /* threshold to flush the translated code buffer */ - size_t code_gen_buffer_max_size; - void *code_gen_ptr; - - TBContext tb_ctx; - - /* The TCGBackendData structure is private to tcg-target.c. */ - struct TCGBackendData *be; - - // Unicorn engine variables - struct uc_struct *uc; - /* qemu/target-i386/translate.c: global register indexes */ - TCGv_ptr cpu_env; - TCGv_i32 cpu_cc_op; - void *cpu_regs[16]; // 16 GRP for X86-64 - int x86_64_hregs; // qemu/target-i386/translate.c - uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; // qemu/target-i386/translate.c - - /* qemu/target-i386/translate.c: global TCGv vars */ - void *cpu_A0; - void *cpu_cc_dst, *cpu_cc_src, *cpu_cc_src2, *cpu_cc_srcT; - - /* qemu/target-i386/translate.c: local temps */ - void *cpu_T[2]; - - /* qemu/target-i386/translate.c: local register indexes (only used inside old micro ops) */ - void *cpu_tmp0, *cpu_tmp4; - TCGv_ptr cpu_ptr0, cpu_ptr1; - TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; - TCGv_i64 cpu_tmp1_i64; - - /* qemu/tcg/i386/tcg-target.c */ - void *tb_ret_addr; - int guest_base_flags; - /* If bit_MOVBE is defined in cpuid.h (added in GCC version 4.6), we are - going to attempt to determine at runtime whether movbe is available. */ - bool have_movbe; - - /* qemu/tcg/tcg.c */ - uint64_t tcg_target_call_clobber_regs; - uint64_t tcg_target_available_regs[2]; - TCGOpDef *tcg_op_defs; - - /* qemu/tcg/optimize.c */ - struct tcg_temp_info temps2[TCG_MAX_TEMPS]; - - /* qemu/target-m68k/translate.c */ - TCGv_i32 cpu_halted; - char cpu_reg_names[3*8*3 + 5*4]; - void *cpu_dregs[8]; - void *cpu_aregs[8]; - TCGv_i64 cpu_fregs[8]; - TCGv_i64 cpu_macc[4]; - TCGv_i64 QREG_FP_RESULT; - void *QREG_PC, *QREG_SR, *QREG_CC_OP, *QREG_CC_DEST, *QREG_CC_SRC; - void *QREG_CC_X, *QREG_DIV1, *QREG_DIV2, *QREG_MACSR, *QREG_MAC_MASK; - void *NULL_QREG; - void *opcode_table[65536]; - /* Used to distinguish stores from bad addressing modes. */ - void *store_dummy; - - /* qemu/target-arm/translate.c */ - uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE]; - TCGv_i64 cpu_V0, cpu_V1, cpu_M0; - /* We reuse the same 64-bit temporaries for efficiency. */ - TCGv_i32 cpu_R[16]; - TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF; - TCGv_i64 cpu_exclusive_addr; - TCGv_i64 cpu_exclusive_val; - TCGv_i32 cpu_F0s, cpu_F1s; - TCGv_i64 cpu_F0d, cpu_F1d; - - /* qemu/target-arm/translate-a64.c */ - TCGv_i64 cpu_pc; - /* Load/store exclusive handling */ - TCGv_i64 cpu_exclusive_high; - TCGv_i64 cpu_X[32]; - - /* qemu/target-mips/translate.c */ - /* global register indices */ - void *cpu_gpr[32], *cpu_PC; - void *cpu_HI[4], *cpu_LO[4]; // MIPS_DSP_ACC = 4 in qemu/target-mips/cpu.h - void *cpu_dspctrl, *btarget, *bcond; - TCGv_i32 hflags; - TCGv_i32 fpu_fcr31; - TCGv_i64 fpu_f64[32]; - TCGv_i64 msa_wr_d[64]; - - uint32_t gen_opc_hflags[OPC_BUF_SIZE]; - target_ulong gen_opc_btarget[OPC_BUF_SIZE]; - - /* qemu/target-sparc/translate.c */ - /* global register indexes */ - TCGv_ptr cpu_regwptr; - TCGv_i32 cpu_psr; - TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs; - TCGv_i32 cpu_softint; - /* Floating point registers */ - TCGv_i64 cpu_fpr[32]; // TARGET_DPREGS = 32 for Sparc64, 16 for Sparc - - target_ulong gen_opc_npc[OPC_BUF_SIZE]; - target_ulong gen_opc_jump_pc[2]; - - // void *cpu_cc_src, *cpu_cc_src2, *cpu_cc_dst; - void *cpu_fsr, *sparc_cpu_pc, *cpu_npc, *cpu_gregs[8]; - void *cpu_y; - void *cpu_tbr; - void *cpu_cond; - void *cpu_gsr; - void *cpu_tick_cmpr, *cpu_stick_cmpr, *cpu_hstick_cmpr; - void *cpu_hintp, *cpu_htba, *cpu_hver, *cpu_ssr, *cpu_ver; - void *cpu_wim; - - int exitreq_label; // gen_tb_start() -}; - -typedef struct TCGTargetOpDef { - TCGOpcode op; - const char *args_ct_str[TCG_MAX_OP_ARGS]; -} TCGTargetOpDef; - -#define tcg_abort() \ -do {\ - fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ - abort();\ -} while (0) - -#ifdef CONFIG_DEBUG_TCG -# define tcg_debug_assert(X) do { assert(X); } while (0) -#elif QEMU_GNUC_PREREQ(4, 5) -# define tcg_debug_assert(X) \ - do { if (!(X)) { __builtin_unreachable(); } } while (0) -#else -# define tcg_debug_assert(X) do { (void)(X); } while (0) -#endif - -void tcg_add_target_add_op_defs(TCGContext *s, const TCGTargetOpDef *tdefs); - -#if UINTPTR_MAX == UINT32_MAX -#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n)) -#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n)) - -#define tcg_const_ptr(t, V) TCGV_NAT_TO_PTR(tcg_const_i32(t, (intptr_t)(V))) -#define tcg_global_reg_new_ptr(U, R, N) \ - TCGV_NAT_TO_PTR(tcg_global_reg_new_i32(U, (R), (N))) -#define tcg_global_mem_new_ptr(t, R, O, N) \ - TCGV_NAT_TO_PTR(tcg_global_mem_new_i32(t, (R), (O), (N))) -#define tcg_temp_new_ptr(s) TCGV_NAT_TO_PTR(tcg_temp_new_i32(s)) -#define tcg_temp_free_ptr(s, T) tcg_temp_free_i32(s, TCGV_PTR_TO_NAT(T)) -#else -#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n)) -#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n)) - -#define tcg_const_ptr(t, V) TCGV_NAT_TO_PTR(tcg_const_i64(t, (intptr_t)(V))) -#define tcg_global_reg_new_ptr(U, R, N) \ - TCGV_NAT_TO_PTR(tcg_global_reg_new_i64(U, (R), (N))) -#define tcg_global_mem_new_ptr(t, R, O, N) \ - TCGV_NAT_TO_PTR(tcg_global_mem_new_i64(t, (R), (O), (N))) -#define tcg_temp_new_ptr(s) TCGV_NAT_TO_PTR(tcg_temp_new_i64(s)) -#define tcg_temp_free_ptr(s, T) tcg_temp_free_i64(s, TCGV_PTR_TO_NAT(T)) -#endif - -void tcg_gen_callN(TCGContext *s, void *func, - TCGArg ret, int nargs, TCGArg *args); - -void tcg_gen_shifti_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, - int c, int right, int arith); - -TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args, - TCGOpDef *tcg_op_def); - -static inline void *tcg_malloc(TCGContext *s, int size) -{ - uint8_t *ptr, *ptr_end; - size = (size + sizeof(long) - 1) & ~(sizeof(long) - 1); - ptr = s->pool_cur; - ptr_end = ptr + size; - if (unlikely(ptr_end > s->pool_end)) { - return tcg_malloc_internal(s, size); - } else { - s->pool_cur = ptr_end; - return ptr; - } -} - -/* only used for debugging purposes */ -void tcg_dump_ops(TCGContext *s); - -void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf); -TCGv_i32 tcg_const_i32(TCGContext *s, int32_t val); -TCGv_i64 tcg_const_i64(TCGContext *s, int64_t val); -TCGv_i32 tcg_const_local_i32(TCGContext *s, int32_t val); -TCGv_i64 tcg_const_local_i64(TCGContext *s, int64_t val); - -/** - * tcg_ptr_byte_diff - * @a, @b: addresses to be differenced - * - * There are many places within the TCG backends where we need a byte - * difference between two pointers. While this can be accomplished - * with local casting, it's easy to get wrong -- especially if one is - * concerned with the signedness of the result. - * - * This version relies on GCC's void pointer arithmetic to get the - * correct result. - */ - -static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b) -{ - return (char*)a - (char*)b; -} - -/** - * tcg_pcrel_diff - * @s: the tcg context - * @target: address of the target - * - * Produce a pc-relative difference, from the current code_ptr - * to the destination address. - */ - -static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target) -{ - return tcg_ptr_byte_diff(target, s->code_ptr); -} - -/** - * tcg_current_code_size - * @s: the tcg context - * - * Compute the current code size within the translation block. - * This is used to fill in qemu's data structures for goto_tb. - */ - -static inline size_t tcg_current_code_size(TCGContext *s) -{ - return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); -} - -/** - * tcg_qemu_tb_exec: - * @env: CPUArchState * for the CPU - * @tb_ptr: address of generated code for the TB to execute - * - * Start executing code from a given translation block. - * Where translation blocks have been linked, execution - * may proceed from the given TB into successive ones. - * Control eventually returns only when some action is needed - * from the top-level loop: either control must pass to a TB - * which has not yet been directly linked, or an asynchronous - * event such as an interrupt needs handling. - * - * The return value is a pointer to the next TB to execute - * (if known; otherwise zero). This pointer is assumed to be - * 4-aligned, and the bottom two bits are used to return further - * information: - * 0, 1: the link between this TB and the next is via the specified - * TB index (0 or 1). That is, we left the TB via (the equivalent - * of) "goto_tb ". The main loop uses this to determine - * how to link the TB just executed to the next. - * 2: we are using instruction counting code generation, and we - * did not start executing this TB because the instruction counter - * would hit zero midway through it. In this case the next-TB pointer - * returned is the TB we were about to execute, and the caller must - * arrange to execute the remaining count of instructions. - * 3: we stopped because the CPU's exit_request flag was set - * (usually meaning that there is an interrupt that needs to be - * handled). The next-TB pointer returned is the TB we were - * about to execute when we noticed the pending exit request. - * - * If the bottom two bits indicate an exit-via-index then the CPU - * state is correctly synchronised and ready for execution of the next - * TB (and in particular the guest PC is the address to execute next). - * Otherwise, we gave up on execution of this TB before it started, and - * the caller must fix up the CPU state by calling cpu_pc_from_tb() - * with the next-TB pointer we return. - * - * Note that TCG targets may use a different definition of tcg_qemu_tb_exec - * to this default (which just calls the prologue.code emitted by - * tcg_target_qemu_prologue()). - */ -#define TB_EXIT_MASK 3 -#define TB_EXIT_IDX0 0 -#define TB_EXIT_IDX1 1 -#define TB_EXIT_ICOUNT_EXPIRED 2 -#define TB_EXIT_REQUESTED 3 - -#if !defined(tcg_qemu_tb_exec) -# define tcg_qemu_tb_exec(env, tb_ptr) \ - ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr) -#endif - -/* - * Memory helpers that will be used by TCG generated code. - */ -#ifdef CONFIG_SOFTMMU -/* Value zero-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); - -/* Value sign-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr); - -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - int mmu_idx, uintptr_t retaddr); -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - int mmu_idx, uintptr_t retaddr); -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr); -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - int mmu_idx, uintptr_t retaddr); -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr); - -/* Temporary aliases until backends are converted. */ -#ifdef TARGET_WORDS_BIGENDIAN -# define helper_ret_ldsw_mmu helper_be_ldsw_mmu -# define helper_ret_lduw_mmu helper_be_lduw_mmu -# define helper_ret_ldsl_mmu helper_be_ldsl_mmu -# define helper_ret_ldul_mmu helper_be_ldul_mmu -# define helper_ret_ldq_mmu helper_be_ldq_mmu -# define helper_ret_stw_mmu helper_be_stw_mmu -# define helper_ret_stl_mmu helper_be_stl_mmu -# define helper_ret_stq_mmu helper_be_stq_mmu -#else -# define helper_ret_ldsw_mmu helper_le_ldsw_mmu -# define helper_ret_lduw_mmu helper_le_lduw_mmu -# define helper_ret_ldsl_mmu helper_le_ldsl_mmu -# define helper_ret_ldul_mmu helper_le_ldul_mmu -# define helper_ret_ldq_mmu helper_le_ldq_mmu -# define helper_ret_stw_mmu helper_le_stw_mmu -# define helper_ret_stl_mmu helper_le_stl_mmu -# define helper_ret_stq_mmu helper_le_stq_mmu -#endif - -void check_exit_request(TCGContext *tcg_ctx); - -#endif /* CONFIG_SOFTMMU */ - -#endif /* TCG_H */ diff --git a/qemu/trace/mem-internal.h b/qemu/trace/mem-internal.h new file mode 100644 index 00000000..8b72b678 --- /dev/null +++ b/qemu/trace/mem-internal.h @@ -0,0 +1,50 @@ +/* + * Helper functions for guest memory tracing + * + * Copyright (C) 2016 Lluís Vilanova + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TRACE__MEM_INTERNAL_H +#define TRACE__MEM_INTERNAL_H + +#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */ +#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */ +#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */ +#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */ +#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */ + +static inline uint16_t trace_mem_build_info( + int size_shift, bool sign_extend, MemOp endianness, + bool store, unsigned int mmu_idx) +{ + uint16_t res; + + res = size_shift & TRACE_MEM_SZ_SHIFT_MASK; + if (sign_extend) { + res |= TRACE_MEM_SE; + } + if (endianness == MO_BE) { + res |= TRACE_MEM_BE; + } + if (store) { + res |= TRACE_MEM_ST; + } +#ifdef CONFIG_SOFTMMU + res |= mmu_idx << TRACE_MEM_MMU_SHIFT; +#endif + return res; +} + +static inline uint16_t trace_mem_get_info(MemOp op, + unsigned int mmu_idx, + bool store) +{ + return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN), + op & MO_BSWAP, store, + mmu_idx); +} + +#endif /* TRACE__MEM_INTERNAL_H */ diff --git a/qemu/trace/mem.h b/qemu/trace/mem.h new file mode 100644 index 00000000..f62bba9c --- /dev/null +++ b/qemu/trace/mem.h @@ -0,0 +1,35 @@ +/* + * Helper functions for guest memory tracing + * + * Copyright (C) 2016 Lluís Vilanova + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TRACE__MEM_H +#define TRACE__MEM_H + +#include "tcg/tcg.h" + + +/** + * trace_mem_get_info: + * + * Return a value for the 'info' argument in guest memory access traces. + */ +static uint16_t trace_mem_get_info(MemOp op, unsigned int mmu_idx, bool store); + +/** + * trace_mem_build_info: + * + * Return a value for the 'info' argument in guest memory access traces. + */ +static uint16_t trace_mem_build_info(int size_shift, bool sign_extend, + MemOp endianness, bool store, + unsigned int mmuidx); + + +#include "mem-internal.h" + +#endif /* TRACE__MEM_H */ diff --git a/qemu/translate-all.c b/qemu/translate-all.c deleted file mode 100644 index 2deddd1e..00000000 --- a/qemu/translate-all.c +++ /dev/null @@ -1,2013 +0,0 @@ -/* - * Host code generation - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ - -#ifdef _WIN32 -#include -#include -#else -#include -#include -#endif -#include -#include -#include -#include -#include "unicorn/platform.h" - -#include "config.h" - -#include "qemu-common.h" -#define NO_CPU_IO_DEFS -#include "cpu.h" -#include "tcg.h" -#if defined(CONFIG_USER_ONLY) -#include "qemu.h" -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include -#if __FreeBSD_version >= 700104 -#define HAVE_KINFO_GETVMMAP -#define sigqueue sigqueue_freebsd /* avoid redefinition */ -#include -#include -#define _KERNEL -#include -#undef _KERNEL -#undef sigqueue -#include -#endif -#endif -#else -#include "exec/address-spaces.h" -#endif - -#include "exec/cputlb.h" -#include "translate-all.h" -#include "qemu/timer.h" - -#include "uc_priv.h" - -//#define DEBUG_TB_INVALIDATE -//#define DEBUG_FLUSH -/* make various TB consistency checks */ -//#define DEBUG_TB_CHECK - -#if !defined(CONFIG_USER_ONLY) -/* TB consistency checks only implemented for usermode emulation. */ -#undef DEBUG_TB_CHECK -#endif - -#define SMC_BITMAP_USE_THRESHOLD 10 - -typedef struct PageDesc { - /* list of TBs intersecting this ram page */ - TranslationBlock *first_tb; - /* in order to optimize self modifying code, we count the number - of lookups we do to a given page to use a bitmap */ - unsigned int code_write_count; - uint8_t *code_bitmap; -#if defined(CONFIG_USER_ONLY) - unsigned long flags; -#endif -} PageDesc; - -/* In system mode we want L1_MAP to be based on ram offsets, - while in user mode we want it to be based on virtual addresses. */ -#if !defined(CONFIG_USER_ONLY) -#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS -# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS -#else -# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS -#endif -#else -# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS -#endif - -/* Size of the L2 (and L3, etc) page tables. */ -#define V_L2_BITS 10 -#define V_L2_SIZE (1 << V_L2_BITS) - -/* The bits remaining after N lower levels of page tables. */ -#define V_L1_BITS_REM \ - ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS) - -#if V_L1_BITS_REM < 4 -#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS) -#else -#define V_L1_BITS V_L1_BITS_REM -#endif - -#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) - -#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) - -static uintptr_t qemu_real_host_page_size; -static uintptr_t qemu_host_page_size; -static uintptr_t qemu_host_page_mask; - - -static void tb_link_page(struct uc_struct *uc, TranslationBlock *tb, - tb_page_addr_t phys_pc, tb_page_addr_t phys_page2); -static TranslationBlock *tb_find_pc(struct uc_struct *uc, uintptr_t tc_ptr); - -// Unicorn: for cleaning up memory later. -void free_code_gen_buffer(struct uc_struct *uc); - -static void cpu_gen_init(struct uc_struct *uc) -{ - uc->tcg_ctx = g_malloc(sizeof(TCGContext)); - tcg_context_init(uc->tcg_ctx); -} - -static void tb_clean_internal(void **p, int x) -{ - int i; - void **q; - - if (x <= 1) { - for (i = 0; i < V_L2_SIZE; i++) { - q = p[i]; - if (q) { - g_free(q); - } - } - g_free(p); - } else { - for (i = 0; i < V_L2_SIZE; i++) { - q = p[i]; - if (q) { - tb_clean_internal(q, x - 1); - } - } - g_free(p); - } -} - -void tb_cleanup(struct uc_struct *uc) -{ - int i, x; - void **p; - - if (uc) { - if (uc->l1_map) { - x = V_L1_SHIFT / V_L2_BITS; - if (x <= 1) { - for (i = 0; i < V_L1_SIZE; i++) { - p = uc->l1_map[i]; - if (p) { - g_free(p); - uc->l1_map[i] = NULL; - } - } - } else { - for (i = 0; i < V_L1_SIZE; i++) { - p = uc->l1_map[i]; - if (p) { - tb_clean_internal(p, x - 1); - uc->l1_map[i] = NULL; - } - } - } - } - } -} - -/* return non zero if the very first instruction is invalid so that - the virtual CPU can trigger an exception. - - '*gen_code_size_ptr' contains the size of the generated code (host - code). -*/ -static int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr) // qq -{ - TCGContext *s = env->uc->tcg_ctx; - tcg_insn_unit *gen_code_buf; - int gen_code_size; -#ifdef CONFIG_PROFILER - int64_t ti; -#endif - -#ifdef CONFIG_PROFILER - s->tb_count1++; /* includes aborted translations because of - exceptions */ - ti = profile_getclock(); -#endif - tcg_func_start(s); - - gen_intermediate_code(env, tb); - - // Unicorn: when tracing block, patch block size operand for callback - if (env->uc->size_arg != -1 && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, tb->pc)) { - if (env->uc->block_full) // block size is unknown - *(s->gen_opparam_buf + env->uc->size_arg) = 0; - else - *(s->gen_opparam_buf + env->uc->size_arg) = tb->size; - } - - /* generate machine code */ - gen_code_buf = tb->tc_ptr; - tb->tb_next_offset[0] = 0xffff; - tb->tb_next_offset[1] = 0xffff; - s->tb_next_offset = tb->tb_next_offset; -#ifdef USE_DIRECT_JUMP - s->tb_jmp_offset = tb->tb_jmp_offset; - s->tb_next = NULL; -#else - s->tb_jmp_offset = NULL; - s->tb_next = tb->tb_next; -#endif - -#ifdef CONFIG_PROFILER - s->tb_count++; - s->interm_time += profile_getclock() - ti; - s->code_time -= profile_getclock(); -#endif - gen_code_size = tcg_gen_code(s, gen_code_buf); - if (gen_code_size == -1) { - return -1; - } - //printf(">>> code size = %u: ", gen_code_size); - //int i; - //for (i = 0; i < gen_code_size; i++) { - // printf(" %02x", gen_code_buf[i]); - //} - //printf("\n"); - *gen_code_size_ptr = gen_code_size; -#ifdef CONFIG_PROFILER - s->code_time += profile_getclock(); - s->code_in_len += tb->size; - s->code_out_len += gen_code_size; -#endif - - return 0; -} - -/* The cpu state corresponding to 'searched_pc' is restored. - */ -static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, - uintptr_t searched_pc) -{ - CPUArchState *env = cpu->env_ptr; - TCGContext *s = cpu->uc->tcg_ctx; - int j; - uintptr_t tc_ptr; -#ifdef CONFIG_PROFILER - int64_t ti; -#endif - -#ifdef CONFIG_PROFILER - ti = profile_getclock(); -#endif - tcg_func_start(s); - - gen_intermediate_code_pc(env, tb); - - /* find opc index corresponding to search_pc */ - tc_ptr = (uintptr_t)tb->tc_ptr; - if (searched_pc < tc_ptr) - return -1; - - s->tb_next_offset = tb->tb_next_offset; -#ifdef USE_DIRECT_JUMP - s->tb_jmp_offset = tb->tb_jmp_offset; - s->tb_next = NULL; -#else - s->tb_jmp_offset = NULL; - s->tb_next = tb->tb_next; -#endif - j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr, - searched_pc - tc_ptr); - if (j < 0) - return -1; - /* now find start of instruction before */ - while (s->gen_opc_instr_start[j] == 0) { - j--; - } - cpu->icount_decr.u16.low -= s->gen_opc_icount[j]; - - restore_state_to_opc(env, tb, j); - -#ifdef CONFIG_PROFILER - s->restore_time += profile_getclock() - ti; - s->restore_count++; -#endif - return 0; -} - -bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) -{ - TranslationBlock *tb; - CPUArchState *env = cpu->env_ptr; - - tb = tb_find_pc(env->uc, retaddr); - if (tb) { - cpu_restore_state_from_tb(cpu, tb, retaddr); - return true; - } - return false; -} - -#ifdef _WIN32 -static inline QEMU_UNUSED_FUNC void map_exec(void *addr, long size) -{ - DWORD old_protect; - VirtualProtect(addr, size, - PAGE_EXECUTE_READWRITE, &old_protect); -} -#else -static inline QEMU_UNUSED_FUNC void map_exec(void *addr, long size) -{ - unsigned long start, end, page_size; - - page_size = getpagesize(); - start = (unsigned long)addr; - start &= ~(page_size - 1); - - end = (unsigned long)addr + size; - end += page_size - 1; - end &= ~(page_size - 1); - - mprotect((void *)start, end - start, - PROT_READ | PROT_WRITE | PROT_EXEC); -} -#endif - -static void page_size_init(void) -{ - /* NOTE: we can always suppose that qemu_host_page_size >= - TARGET_PAGE_SIZE */ - qemu_real_host_page_size = getpagesize(); - if (qemu_host_page_size == 0) { - qemu_host_page_size = qemu_real_host_page_size; - } - if (qemu_host_page_size < TARGET_PAGE_SIZE) { - qemu_host_page_size = TARGET_PAGE_SIZE; - } - qemu_host_page_mask = ~(qemu_host_page_size - 1); -} - -static void page_init(void) -{ - page_size_init(); -#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) - { -#ifdef HAVE_KINFO_GETVMMAP - struct kinfo_vmentry *freep; - int i, cnt; - - freep = kinfo_getvmmap(getpid(), &cnt); - if (freep) { - mmap_lock(); - for (i = 0; i < cnt; i++) { - unsigned long startaddr, endaddr; - - startaddr = freep[i].kve_start; - endaddr = freep[i].kve_end; - if (h2g_valid(startaddr)) { - startaddr = h2g(startaddr) & TARGET_PAGE_MASK; - - if (h2g_valid(endaddr)) { - endaddr = h2g(endaddr); - page_set_flags(startaddr, endaddr, PAGE_RESERVED); - } else { -#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS - endaddr = ~0ul; - page_set_flags(startaddr, endaddr, PAGE_RESERVED); -#endif - } - } - } - free(freep); - mmap_unlock(); - } -#else - FILE *f; - - last_brk = (unsigned long)sbrk(0); - - f = fopen("/compat/linux/proc/self/maps", "r"); - if (f) { - mmap_lock(); - - do { - unsigned long startaddr, endaddr; - int n; - - n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); - - if (n == 2 && h2g_valid(startaddr)) { - startaddr = h2g(startaddr) & TARGET_PAGE_MASK; - - if (h2g_valid(endaddr)) { - endaddr = h2g(endaddr); - } else { - endaddr = ~0ul; - } - page_set_flags(startaddr, endaddr, PAGE_RESERVED); - } - } while (!feof(f)); - - fclose(f); - mmap_unlock(); - } -#endif - } -#endif -} - -static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int alloc) -{ - PageDesc *pd; - void **lp; - int i; - -#if defined(CONFIG_USER_ONLY) - /* We can't use g_malloc because it may recurse into a locked mutex. */ -# define ALLOC(P, SIZE) \ - do { \ - P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ - } while (0) -#else -# define ALLOC(P, SIZE) \ - do { P = g_malloc0(SIZE); } while (0) -#endif - - if (uc->l1_map == NULL) { - uc->l1_map_size = V_L1_SIZE * sizeof(uc->l1_map); - ALLOC(uc->l1_map, uc->l1_map_size); - } - - /* Level 1. Always allocated. */ - lp = uc->l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); - - /* Level 2..N-1. */ - for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) { - void **p = *lp; - - if (p == NULL) { - if (!alloc) { - return NULL; - } - ALLOC(p, sizeof(void *) * V_L2_SIZE); - *lp = p; - } - - lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); - } - - pd = *lp; - if (pd == NULL) { - if (!alloc) { - return NULL; - } - ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE); - *lp = pd; - } - -#undef ALLOC - - return pd + (index & (V_L2_SIZE - 1)); -} - -static inline PageDesc *page_find(struct uc_struct *uc, tb_page_addr_t index) -{ - return page_find_alloc(uc, index, 0); -} - -#if !defined(CONFIG_USER_ONLY) -#define mmap_lock() do { } while (0) -#define mmap_unlock() do { } while (0) -#endif - -#if defined(CONFIG_USER_ONLY) -/* Currently it is not recommended to allocate big chunks of data in - user mode. It will change when a dedicated libc will be used. */ -/* ??? 64-bit hosts ought to have no problem mmaping data outside the - region in which the guest needs to run. Revisit this. */ -#define USE_STATIC_CODE_GEN_BUFFER -#endif - -/* ??? Should configure for this, not list operating systems here. */ -#if (defined(__linux__) \ - || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ - || defined(__DragonFly__) || defined(__OpenBSD__) \ - || defined(__NetBSD__)) -# define USE_MMAP -#endif - -/* Minimum size of the code gen buffer. This number is randomly chosen, - but not so small that we can't have a fair number of TB's live. */ -#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) - -/* Maximum size of the code gen buffer we'd like to use. Unless otherwise - indicated, this is constrained by the range of direct branches on the - host cpu, as used by the TCG implementation of goto_tb. */ -#if defined(__x86_64__) -# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) -#elif defined(__sparc__) -# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) -#elif defined(__aarch64__) -# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) -#elif defined(__arm__) -# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) -#elif defined(__s390x__) - /* We have a +- 4GB range on the branches; leave some slop. */ -# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) -#elif defined(__mips__) - /* We have a 256MB branch region, but leave room to make sure the - main executable is also within that region. */ -# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) -#else -# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) -#endif - -#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (8 * 1024 * 1024) - -#define DEFAULT_CODE_GEN_BUFFER_SIZE \ - (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ - ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) - -static inline size_t size_code_gen_buffer(struct uc_struct *uc, size_t tb_size) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - - /* Size the buffer. */ - if (tb_size == 0) { -#ifdef USE_STATIC_CODE_GEN_BUFFER - tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; -#else - /* ??? Needs adjustments. */ - /* ??? If we relax the requirement that CONFIG_USER_ONLY use the - static buffer, we could size this on RESERVED_VA, on the text - segment size of the executable, or continue to use the default. */ - tb_size = (unsigned long)DEFAULT_CODE_GEN_BUFFER_SIZE; -#endif - } - if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { - tb_size = MIN_CODE_GEN_BUFFER_SIZE; - } - if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { - tb_size = MAX_CODE_GEN_BUFFER_SIZE; - } - tcg_ctx->code_gen_buffer_size = tb_size; - return tb_size; -} - -#ifdef __mips__ -/* In order to use J and JAL within the code_gen_buffer, we require - that the buffer not cross a 256MB boundary. */ -static inline bool cross_256mb(void *addr, size_t size) -{ - return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000; -} - -/* We weren't able to allocate a buffer without crossing that boundary, - so make do with the larger portion of the buffer that doesn't cross. - Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ -static inline void *split_cross_256mb(struct uc_struct *uc, void *buf1, size_t size1) -{ - void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000); - size_t size2 = buf1 + size1 - buf2; - TCGContext *tcg_ctx = uc->tcg_ctx; - - size1 = buf2 - buf1; - if (size1 < size2) { - size1 = size2; - buf1 = buf2; - } - - tcg_ctx->code_gen_buffer_size = size1; - return buf1; -} -#endif - -#ifdef USE_STATIC_CODE_GEN_BUFFER -static uint8_t QEMU_ALIGN(CODE_GEN_ALIGN, static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]); - -void free_code_gen_buffer(struct uc_struct *uc) -{ - // Do nothing, we use a static buffer. -} - -static inline void *alloc_code_gen_buffer(struct uc_struct *uc) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - void *buf = static_code_gen_buffer; -#ifdef __mips__ - if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { - buf = split_cross_256mb(buf, tcg_ctx->code_gen_buffer_size); - } -#endif - map_exec(buf, tcg_ctx->code_gen_buffer_size); - return buf; -} -#elif defined(USE_MMAP) -void free_code_gen_buffer(struct uc_struct *uc) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - if (tcg_ctx->code_gen_buffer) - munmap(tcg_ctx->code_gen_buffer, tcg_ctx->code_gen_buffer_size); -} - -static inline void *alloc_code_gen_buffer(struct uc_struct *uc) -{ - int flags = MAP_PRIVATE | MAP_ANONYMOUS; - uintptr_t start = 0; - void *buf; - TCGContext *tcg_ctx = uc->tcg_ctx; - - /* Constrain the position of the buffer based on the host cpu. - Note that these addresses are chosen in concert with the - addresses assigned in the relevant linker script file. */ -# if defined(__PIE__) || defined(__PIC__) - /* Don't bother setting a preferred location if we're building - a position-independent executable. We're more likely to get - an address near the main executable if we let the kernel - choose the address. */ -# elif defined(__x86_64__) && defined(MAP_32BIT) - /* Force the memory down into low memory with the executable. - Leave the choice of exact location with the kernel. */ - flags |= MAP_32BIT; - /* Cannot expect to map more than 800MB in low memory. */ - if (tcg_ctx->code_gen_buffer_size > 800u * 1024 * 1024) { - tcg_ctx->code_gen_buffer_size = 800u * 1024 * 1024; - } -# elif defined(__sparc__) - start = 0x40000000ul; -# elif defined(__s390x__) - start = 0x90000000ul; -# elif defined(__mips__) - /* ??? We ought to more explicitly manage layout for softmmu too. */ -# ifdef CONFIG_USER_ONLY - start = 0x68000000ul; -# elif _MIPS_SIM == _ABI64 - start = 0x128000000ul; -# else - start = 0x08000000ul; -# endif -# endif - - buf = mmap((void *)start, tcg_ctx->code_gen_buffer_size, - PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0); - if (buf == MAP_FAILED) { - return NULL; - } - -#ifdef __mips__ - if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { - /* Try again, with the original still mapped, to avoid re-acquiring - that 256mb crossing. This time don't specify an address. */ - size_t size2, size1 = tcg_ctx->code_gen_buffer_size; - void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC, - flags, -1, 0); - if (buf2 != MAP_FAILED) { - if (!cross_256mb(buf2, size1)) { - /* Success! Use the new buffer. */ - munmap(buf, size1); - return buf2; - } - /* Failure. Work with what we had. */ - munmap(buf2, size1); - } - - /* Split the original buffer. Free the smaller half. */ - buf2 = split_cross_256mb(buf, size1); - size2 = tcg_ctx->code_gen_buffer_size; - munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2); - return buf2; - } -#endif - - return buf; -} -#else -void free_code_gen_buffer(struct uc_struct *uc) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - if (tcg_ctx->code_gen_buffer) - g_free(tcg_ctx->code_gen_buffer); -} - -static inline void *alloc_code_gen_buffer(struct uc_struct *uc) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - void *buf = g_malloc(tcg_ctx->code_gen_buffer_size); - - if (buf == NULL) { - return NULL; - } - -#ifdef __mips__ - if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { - void *buf2 = g_malloc(tcg_ctx->code_gen_buffer_size); - if (buf2 != NULL && !cross_256mb(buf2, size1)) { - /* Success! Use the new buffer. */ - free(buf); - buf = buf2; - } else { - /* Failure. Work with what we had. Since this is malloc - and not mmap, we can't free the other half. */ - free(buf2); - buf = split_cross_256mb(buf, tcg_ctx->code_gen_buffer_size); - } - } -#endif - - map_exec(buf, tcg_ctx->code_gen_buffer_size); - return buf; -} -#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */ - -static inline void code_gen_alloc(struct uc_struct *uc, size_t tb_size) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - - tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(uc, tb_size); - tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(uc); - if (tcg_ctx->code_gen_buffer == NULL) { - fprintf(stderr, "Could not allocate dynamic translator buffer\n"); - exit(1); - } - - //qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size, - // QEMU_MADV_HUGEPAGE); - - /* Steal room for the prologue at the end of the buffer. This ensures - (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches - from TB's to the prologue are going to be in range. It also means - that we don't need to mark (additional) portions of the data segment - as executable. */ - tcg_ctx->code_gen_prologue = (char*)tcg_ctx->code_gen_buffer + - tcg_ctx->code_gen_buffer_size - 1024; - tcg_ctx->code_gen_buffer_size -= 1024; - - tcg_ctx->code_gen_buffer_max_size = tcg_ctx->code_gen_buffer_size - - (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); - tcg_ctx->code_gen_max_blocks = tcg_ctx->code_gen_buffer_size / - CODE_GEN_AVG_BLOCK_SIZE; - tcg_ctx->tb_ctx.tbs = - g_malloc(tcg_ctx->code_gen_max_blocks * sizeof(TranslationBlock)); -} - -/* Must be called before using the QEMU cpus. 'tb_size' is the size - (in bytes) allocated to the translation buffer. Zero means default - size. */ -void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size) -{ - TCGContext *tcg_ctx; - - cpu_gen_init(uc); - code_gen_alloc(uc, tb_size); - tcg_ctx = uc->tcg_ctx; - tcg_ctx->code_gen_ptr = tcg_ctx->code_gen_buffer; - tcg_ctx->uc = uc; - page_init(); -#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) - /* There's no guest base to take into account, so go ahead and - initialize the prologue now. */ - tcg_prologue_init(tcg_ctx); -#endif -} - -bool tcg_enabled(struct uc_struct *uc) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - return tcg_ctx->code_gen_buffer != NULL; -} - -/* Allocate a new translation block. Flush the translation buffer if - too many translation blocks or too much generated code. */ -static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc) -{ - TranslationBlock *tb; - TCGContext *tcg_ctx = uc->tcg_ctx; - - if (tcg_ctx->tb_ctx.nb_tbs >= tcg_ctx->code_gen_max_blocks || - (size_t)(((char*)tcg_ctx->code_gen_ptr - (char*)tcg_ctx->code_gen_buffer)) >= - tcg_ctx->code_gen_buffer_max_size) { - return NULL; - } - tb = &tcg_ctx->tb_ctx.tbs[tcg_ctx->tb_ctx.nb_tbs++]; - tb->pc = pc; - tb->cflags = 0; - return tb; -} - -void tb_free(struct uc_struct *uc, TranslationBlock *tb) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - - /* In practice this is mostly used for single use temporary TB - Ignore the hard cases and just back up if this TB happens to - be the last one generated. */ - if (tcg_ctx->tb_ctx.nb_tbs > 0 && - tb == &tcg_ctx->tb_ctx.tbs[tcg_ctx->tb_ctx.nb_tbs - 1]) { - tcg_ctx->code_gen_ptr = tb->tc_ptr; - tcg_ctx->tb_ctx.nb_tbs--; - } -} - -static inline void invalidate_page_bitmap(PageDesc *p) -{ - if (p->code_bitmap) { - g_free(p->code_bitmap); - p->code_bitmap = NULL; - } - p->code_write_count = 0; -} - -/* Set to NULL all the 'first_tb' fields in all PageDescs. */ -static void page_flush_tb_1(int level, void **lp) -{ - int i; - - if (*lp == NULL) { - return; - } - if (level == 0) { - PageDesc *pd = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - pd[i].first_tb = NULL; - invalidate_page_bitmap(pd + i); - } - } else { - void **pp = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - page_flush_tb_1(level - 1, pp + i); - } - } -} - -static void page_flush_tb(struct uc_struct *uc) -{ - int i; - - if (uc->l1_map == NULL) - return; - - for (i = 0; i < V_L1_SIZE; i++) { - page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, uc->l1_map + i); - } -} - -/* flush all the translation blocks */ -/* XXX: tb_flush is currently not thread safe */ -void tb_flush(CPUArchState *env1) -{ - CPUState *cpu = ENV_GET_CPU(env1); - struct uc_struct* uc = cpu->uc; - TCGContext *tcg_ctx = uc->tcg_ctx; - -#if defined(DEBUG_FLUSH) - printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", - (unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer), - tcg_ctx->tb_ctx.nb_tbs, tcg_ctx->tb_ctx.nb_tbs > 0 ? - ((unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer)) / - tcg_ctx->tb_ctx.nb_tbs : 0); -#endif - if ((unsigned long)((char*)tcg_ctx->code_gen_ptr - (char*)tcg_ctx->code_gen_buffer) - > tcg_ctx->code_gen_buffer_size) { - cpu_abort(cpu, "Internal error: code buffer overflow\n"); - } - tcg_ctx->tb_ctx.nb_tbs = 0; - - memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); - - memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash)); - page_flush_tb(uc); - - tcg_ctx->code_gen_ptr = tcg_ctx->code_gen_buffer; - /* XXX: flush processor icache at this point if cache flush is - expensive */ - tcg_ctx->tb_ctx.tb_flush_count++; -} - -#ifdef DEBUG_TB_CHECK - -static void tb_invalidate_check(target_ulong address) -{ - TranslationBlock *tb; - int i; - - address &= TARGET_PAGE_MASK; - for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { - for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { - if (!(address + TARGET_PAGE_SIZE <= tb->pc || - address >= tb->pc + tb->size)) { - printf("ERROR invalidate: address=" TARGET_FMT_lx - " PC=%08lx size=%04x\n", - address, (long)tb->pc, tb->size); - } - } - } -} - -/* verify that all the pages have correct rights for code */ -static void tb_page_check(struct uc_struct *uc) -{ - TranslationBlock *tb; - int i, flags1, flags2; - TCGContext *tcg_ctx = uc->tcg_ctx; - - for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { - for (tb = tcg_ctx->tb_ctx.tb_phys_hash[i]; tb != NULL; - tb = tb->phys_hash_next) { - flags1 = page_get_flags(tb->pc); - flags2 = page_get_flags(tb->pc + tb->size - 1); - if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { - printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", - (long)tb->pc, tb->size, flags1, flags2); - } - } - } -} - -#endif - -static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb) -{ - TranslationBlock *tb1; - - for (;;) { - tb1 = *ptb; - if (tb1 == tb) { - *ptb = tb1->phys_hash_next; - break; - } - ptb = &tb1->phys_hash_next; - } -} - -static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) -{ - TranslationBlock *tb1; - unsigned int n1; - - for (;;) { - tb1 = *ptb; - n1 = (uintptr_t)tb1 & 3; - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - if (tb1 == tb) { - *ptb = tb1->page_next[n1]; - break; - } - ptb = &tb1->page_next[n1]; - } -} - -static inline void tb_jmp_remove(TranslationBlock *tb, int n) -{ - TranslationBlock *tb1, **ptb; - unsigned int n1; - - ptb = &tb->jmp_next[n]; - tb1 = *ptb; - if (tb1) { - /* find tb(n) in circular list */ - for (;;) { - tb1 = *ptb; - n1 = (uintptr_t)tb1 & 3; - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - if (n1 == n && tb1 == tb) { - break; - } - if (n1 == 2) { - ptb = &tb1->jmp_first; - } else { - ptb = &tb1->jmp_next[n1]; - } - } - /* now we can suppress tb(n) from the list */ - *ptb = tb->jmp_next[n]; - - tb->jmp_next[n] = NULL; - } -} - -/* reset the jump entry 'n' of a TB so that it is not chained to - another TB */ -static inline void tb_reset_jump(TranslationBlock *tb, int n) -{ - tb_set_jmp_target(tb, n, (uintptr_t)((char*)tb->tc_ptr + tb->tb_next_offset[n])); -} - -/* invalidate one TB */ -void tb_phys_invalidate(struct uc_struct *uc, - TranslationBlock *tb, tb_page_addr_t page_addr) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - CPUState *cpu = uc->cpu; - PageDesc *p; - unsigned int h, n1; - tb_page_addr_t phys_pc; - TranslationBlock *tb1, *tb2; - - /* remove the TB from the hash list */ - phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); - h = tb_phys_hash_func(phys_pc); - tb_hash_remove(&tcg_ctx->tb_ctx.tb_phys_hash[h], tb); - - /* remove the TB from the page list */ - if (tb->page_addr[0] != page_addr) { - p = page_find(uc, tb->page_addr[0] >> TARGET_PAGE_BITS); - tb_page_remove(&p->first_tb, tb); - invalidate_page_bitmap(p); - } - if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { - p = page_find(uc, tb->page_addr[1] >> TARGET_PAGE_BITS); - tb_page_remove(&p->first_tb, tb); - invalidate_page_bitmap(p); - } - - tcg_ctx->tb_ctx.tb_invalidated_flag = 1; - - /* remove the TB from the hash list */ - h = tb_jmp_cache_hash_func(tb->pc); - if (cpu->tb_jmp_cache[h] == tb) { - cpu->tb_jmp_cache[h] = NULL; - } - - /* suppress this TB from the two jump lists */ - tb_jmp_remove(tb, 0); - tb_jmp_remove(tb, 1); - - /* suppress any remaining jumps to this TB */ - tb1 = tb->jmp_first; - for (;;) { - n1 = (uintptr_t)tb1 & 3; - if (n1 == 2) { - break; - } - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - tb2 = tb1->jmp_next[n1]; - tb_reset_jump(tb1, n1); - tb1->jmp_next[n1] = NULL; - tb1 = tb2; - } - tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ - - tcg_ctx->tb_ctx.tb_phys_invalidate_count++; -} - -static inline void set_bits(uint8_t *tab, int start, int len) -{ - int end, mask, end1; - - end = start + len; - tab += start >> 3; - mask = 0xff << (start & 7); - if ((start & ~7) == (end & ~7)) { - if (start < end) { - mask &= ~(0xff << (end & 7)); - *tab |= mask; - } - } else { - *tab++ |= mask; - start = (start + 8) & ~7; - end1 = end & ~7; - while (start < end1) { - *tab++ = 0xff; - start += 8; - } - if (start < end) { - mask = ~(0xff << (end & 7)); - *tab |= mask; - } - } -} - -static void build_page_bitmap(PageDesc *p) -{ - int n, tb_start, tb_end; - TranslationBlock *tb; - - p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8); - - tb = p->first_tb; - while (tb != NULL) { - n = (uintptr_t)tb & 3; - tb = (TranslationBlock *)((uintptr_t)tb & ~3); - /* NOTE: this is subtle as a TB may span two physical pages */ - if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb->pc & ~TARGET_PAGE_MASK; - tb_end = tb_start + tb->size; - if (tb_end > TARGET_PAGE_SIZE) { - tb_end = TARGET_PAGE_SIZE; - } - } else { - tb_start = 0; - tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); - } - set_bits(p->code_bitmap, tb_start, tb_end - tb_start); - tb = tb->page_next[n]; - } -} - -TranslationBlock *tb_gen_code(CPUState *cpu, - target_ulong pc, target_ulong cs_base, - int flags, int cflags) // qq -{ - CPUArchState *env = cpu->env_ptr; - TCGContext *tcg_ctx = env->uc->tcg_ctx; - TranslationBlock *tb; - tb_page_addr_t phys_pc, phys_page2; - int code_gen_size; - int ret; - - phys_pc = get_page_addr_code(env, pc); - tb = tb_alloc(env->uc, pc); - if (!tb) { - /* flush must be done */ - tb_flush(env); - /* cannot fail at this point */ - tb = tb_alloc(env->uc, pc); - /* Don't forget to invalidate previous TB info. */ - tcg_ctx->tb_ctx.tb_invalidated_flag = 1; - } - tb->tc_ptr = tcg_ctx->code_gen_ptr; - tb->cs_base = cs_base; - tb->flags = flags; - tb->cflags = cflags; - ret = cpu_gen_code(env, tb, &code_gen_size); // qq - if (ret == -1) { - tb_free(env->uc, tb); - return NULL; - } - tcg_ctx->code_gen_ptr = (void *)(((uintptr_t)tcg_ctx->code_gen_ptr + - code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); - - phys_page2 = -1; - /* check next page if needed */ - if (tb->size) { - target_ulong virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; - if ((pc & TARGET_PAGE_MASK) != virt_page2) { - phys_page2 = get_page_addr_code(env, virt_page2); - } - } - tb_link_page(cpu->uc, tb, phys_pc, phys_page2); - return tb; -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end may refer to *different* physical pages. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - */ -void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, - int is_cpu_write_access) -{ - while (start < end) { - tb_invalidate_phys_page_range(uc, start, end, is_cpu_write_access); - start &= TARGET_PAGE_MASK; - start += TARGET_PAGE_SIZE; - } -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end must refer to the *same* physical page. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - */ -void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, - int is_cpu_write_access) -{ - TranslationBlock *tb, *tb_next, *saved_tb; - CPUState *cpu = uc->current_cpu; -#if defined(TARGET_HAS_PRECISE_SMC) - CPUArchState *env = NULL; -#endif - tb_page_addr_t tb_start, tb_end; - PageDesc *p; - int n; -#ifdef TARGET_HAS_PRECISE_SMC - int current_tb_not_found = is_cpu_write_access; - TranslationBlock *current_tb = NULL; - int current_tb_modified = 0; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - int current_flags = 0; -#endif /* TARGET_HAS_PRECISE_SMC */ - - p = page_find(uc, start >> TARGET_PAGE_BITS); - if (!p) { - return; - } - if (!p->code_bitmap && - ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && - is_cpu_write_access) { - /* build code bitmap */ - build_page_bitmap(p); - } -#if defined(TARGET_HAS_PRECISE_SMC) - if (cpu != NULL) { - env = cpu->env_ptr; - } -#endif - - /* we remove all the TBs in the range [start, end[ */ - /* XXX: see if in some cases it could be faster to invalidate all - the code */ - tb = p->first_tb; - while (tb != NULL) { - n = (uintptr_t)tb & 3; - tb = (TranslationBlock *)((uintptr_t)tb & ~3); - tb_next = tb->page_next[n]; - /* NOTE: this is subtle as a TB may span two physical pages */ - if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); - tb_end = tb_start + tb->size; - } else { - tb_start = tb->page_addr[1]; - tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); - } - if (!(tb_end <= start || tb_start >= end)) { -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_not_found) { - current_tb_not_found = 0; - current_tb = NULL; - if (cpu->mem_io_pc) { - /* now we have a real cpu fault */ - current_tb = tb_find_pc(uc, cpu->mem_io_pc); - } - } - if (current_tb == tb && - (current_tb->cflags & CF_COUNT_MASK) != 1) { - /* If we are modifying the current TB, we must stop - its execution. We could be more precise by checking - that the modification is after the current PC, but it - would require a specialized function to partially - restore the CPU state */ - - current_tb_modified = 1; - // self-modifying code will restore state from TB - cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - /* we need to do that to handle the case where a signal - occurs while doing tb_phys_invalidate() */ - saved_tb = NULL; - if (cpu != NULL) { - saved_tb = cpu->current_tb; - cpu->current_tb = NULL; - } - tb_phys_invalidate(uc, tb, -1); - if (cpu != NULL) { - cpu->current_tb = saved_tb; - if (cpu->interrupt_request && cpu->current_tb) { - cpu_interrupt(cpu, cpu->interrupt_request); - } - } - } - tb = tb_next; - } -#if !defined(CONFIG_USER_ONLY) - /* if no code remaining, no need to continue to use slow writes */ - if (!p->first_tb) { - invalidate_page_bitmap(p); - if (is_cpu_write_access) { - tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr); - } - } -#endif -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - /* we generate a block containing just the instruction - modifying the memory. It will ensure that it cannot modify - itself */ - cpu->current_tb = NULL; - tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); - cpu_resume_from_signal(cpu, NULL); - } -#endif -} - -#if !defined(CONFIG_SOFTMMU) -static void tb_invalidate_phys_page(struct uc_struct *uc, tb_page_addr_t addr, - uintptr_t pc, void *puc, - bool locked) -{ - TranslationBlock *tb; - PageDesc *p; - int n; -#ifdef TARGET_HAS_PRECISE_SMC - TranslationBlock *current_tb = NULL; - CPUState *cpu = uc->current_cpu; - CPUArchState *env = NULL; - int current_tb_modified = 0; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - int current_flags = 0; -#endif - - addr &= TARGET_PAGE_MASK; - p = page_find(addr >> TARGET_PAGE_BITS); - if (!p) { - return; - } - tb = p->first_tb; -#ifdef TARGET_HAS_PRECISE_SMC - if (tb && pc != 0) { - current_tb = tb_find_pc(uc, pc); - } - if (cpu != NULL) { - env = cpu->env_ptr; - } -#endif - while (tb != NULL) { - n = (uintptr_t)tb & 3; - tb = (TranslationBlock *)((uintptr_t)tb & ~3); -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb == tb && - (current_tb->cflags & CF_COUNT_MASK) != 1) { - /* If we are modifying the current TB, we must stop - its execution. We could be more precise by checking - that the modification is after the current PC, but it - would require a specialized function to partially - restore the CPU state */ - - current_tb_modified = 1; - cpu_restore_state_from_tb(cpu, current_tb, pc); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - tb_phys_invalidate(uc, tb, addr); - tb = tb->page_next[n]; - } - p->first_tb = NULL; -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - /* we generate a block containing just the instruction - modifying the memory. It will ensure that it cannot modify - itself */ - cpu->current_tb = NULL; - tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); - if (locked) { - mmap_unlock(); - } - cpu_resume_from_signal(cpu, puc); - } -#endif -} -#endif - -/* add the tb in the target page and protect it if necessary */ -static inline void tb_alloc_page(struct uc_struct *uc, TranslationBlock *tb, - unsigned int n, tb_page_addr_t page_addr) -{ - PageDesc *p; -#ifndef CONFIG_USER_ONLY - bool page_already_protected; -#endif - - tb->page_addr[n] = page_addr; - p = page_find_alloc(uc, page_addr >> TARGET_PAGE_BITS, 1); - tb->page_next[n] = p->first_tb; -#ifndef CONFIG_USER_ONLY - page_already_protected = p->first_tb != NULL; -#endif - p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); - invalidate_page_bitmap(p); - -#if defined(TARGET_HAS_SMC) || 1 - -#if defined(CONFIG_USER_ONLY) - if (p->flags & PAGE_WRITE) { - target_ulong addr; - PageDesc *p2; - int prot; - - /* force the host page as non writable (writes will have a - page fault + mprotect overhead) */ - page_addr &= qemu_host_page_mask; - prot = 0; - for (addr = page_addr; addr < page_addr + qemu_host_page_size; - addr += TARGET_PAGE_SIZE) { - - p2 = page_find(addr >> TARGET_PAGE_BITS); - if (!p2) { - continue; - } - prot |= p2->flags; - p2->flags &= ~PAGE_WRITE; - } - mprotect(g2h(page_addr), qemu_host_page_size, - (prot & PAGE_BITS) & ~PAGE_WRITE); -#ifdef DEBUG_TB_INVALIDATE - printf("protecting code page: 0x" TARGET_FMT_lx "\n", - page_addr); -#endif - } -#else - /* if some code is already present, then the pages are already - protected. So we handle the case where only the first TB is - allocated in a physical page */ - if (!page_already_protected) { - tlb_protect_code(uc, page_addr); - } -#endif - -#endif /* TARGET_HAS_SMC */ -} - -void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, int len) -{ - PageDesc *p; - -#if 0 - if (1) { - qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", - cpu_single_env->mem_io_vaddr, len, - cpu_single_env->eip, - cpu_single_env->eip + - (intptr_t)cpu_single_env->segs[R_CS].base); - } -#endif - p = page_find(uc, start >> TARGET_PAGE_BITS); - if (!p) { - return; - } - if (p->code_bitmap) { - unsigned int nr; - unsigned long b; - - nr = start & ~TARGET_PAGE_MASK; - b = p->code_bitmap[BIT_WORD(nr)] >> ((nr & (BITS_PER_LONG - 1)) & 0x1f); - if (b & ((1 << len) - 1)) { - goto do_invalidate; - } - } else { - do_invalidate: - tb_invalidate_phys_page_range(uc, start, start + len, 1); - } -} - -/* add a new TB and link it to the physical page tables. phys_page2 is - (-1) to indicate that only one page contains the TB. */ -static void tb_link_page(struct uc_struct *uc, - TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - unsigned int h; - TranslationBlock **ptb; - - /* Grab the mmap lock to stop another thread invalidating this TB - before we are done. */ - mmap_lock(); - /* add in the physical hash table */ - h = tb_phys_hash_func(phys_pc); - ptb = &tcg_ctx->tb_ctx.tb_phys_hash[h]; - tb->phys_hash_next = *ptb; - *ptb = tb; - - /* add in the page list */ - tb_alloc_page(uc, tb, 0, phys_pc & TARGET_PAGE_MASK); - if (phys_page2 != -1) { - tb_alloc_page(uc, tb, 1, phys_page2); - } else { - tb->page_addr[1] = -1; - } - - tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); - tb->jmp_next[0] = NULL; - tb->jmp_next[1] = NULL; - - /* init original jump addresses */ - if (tb->tb_next_offset[0] != 0xffff) { - tb_reset_jump(tb, 0); - } - if (tb->tb_next_offset[1] != 0xffff) { - tb_reset_jump(tb, 1); - } - -#ifdef DEBUG_TB_CHECK - tb_page_check(); -#endif - mmap_unlock(); -} - -/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < - tb[1].tc_ptr. Return NULL if not found */ -static TranslationBlock *tb_find_pc(struct uc_struct *uc, uintptr_t tc_ptr) -{ - TCGContext *tcg_ctx = uc->tcg_ctx; - int m_min, m_max, m; - uintptr_t v; - TranslationBlock *tb; - - if (tcg_ctx->tb_ctx.nb_tbs <= 0) { - return NULL; - } - if (tc_ptr < (uintptr_t)tcg_ctx->code_gen_buffer || - tc_ptr >= (uintptr_t)tcg_ctx->code_gen_ptr) { - return NULL; - } - /* binary search (cf Knuth) */ - m_min = 0; - m_max = tcg_ctx->tb_ctx.nb_tbs - 1; - while (m_min <= m_max) { - m = (m_min + m_max) >> 1; - tb = &tcg_ctx->tb_ctx.tbs[m]; - v = (uintptr_t)tb->tc_ptr; - if (v == tc_ptr) { - return tb; - } else if (tc_ptr < v) { - m_max = m - 1; - } else { - m_min = m + 1; - } - } - return &tcg_ctx->tb_ctx.tbs[m_max]; -} - -#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY) -void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) -{ - ram_addr_t ram_addr; - MemoryRegion *mr; - hwaddr l = 1; - - mr = address_space_translate(as, addr, &addr, &l, false); - if (!(memory_region_is_ram(mr) - || memory_region_is_romd(mr))) { - return; - } - ram_addr = (ram_addr_t)((memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK) - + addr); - tb_invalidate_phys_page_range(as->uc, ram_addr, ram_addr + 1, 0); -} -#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */ - -void tb_check_watchpoint(CPUState *cpu) -{ - TranslationBlock *tb; - CPUArchState *env = cpu->env_ptr; - - tb = tb_find_pc(env->uc, cpu->mem_io_pc); - if (!tb) { - cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p", - (void *)cpu->mem_io_pc); - } - cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); - tb_phys_invalidate(cpu->uc, tb, -1); -} - -#ifndef CONFIG_USER_ONLY -/* mask must never be zero, except for A20 change call */ -static void tcg_handle_interrupt(CPUState *cpu, int mask) -{ - cpu->interrupt_request |= mask; - - cpu->tcg_exit_req = 1; -} - -CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; - -/* in deterministic execution mode, instructions doing device I/Os - must be at the end of the TB */ -void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) -{ - CPUArchState *env = cpu->env_ptr; - TranslationBlock *tb; - uint32_t n, cflags; - target_ulong pc, cs_base; - uint64_t flags; - - tb = tb_find_pc(env->uc, retaddr); - if (!tb) { - cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", - (void *)retaddr); - } - n = cpu->icount_decr.u16.low + tb->icount; - cpu_restore_state_from_tb(cpu, tb, retaddr); - /* Calculate how many instructions had been executed before the fault - occurred. */ - n = n - cpu->icount_decr.u16.low; - /* Generate a new TB ending on the I/O insn. */ - n++; - /* On MIPS and SH, delay slot instructions can only be restarted if - they were already the first instruction in the TB. If this is not - the first instruction in a TB then re-execute the preceding - branch. */ -#if defined(TARGET_MIPS) - if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { - env->active_tc.PC -= 4; - cpu->icount_decr.u16.low++; - env->hflags &= ~MIPS_HFLAG_BMASK; - } -#elif defined(TARGET_SH4) - if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 - && n > 1) { - env->pc -= 2; - cpu->icount_decr.u16.low++; - env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); - } -#endif - /* This should never happen. */ - if (n > CF_COUNT_MASK) { - cpu_abort(cpu, "TB too big during recompile"); - } - - cflags = n | CF_LAST_IO; - pc = tb->pc; - cs_base = tb->cs_base; - flags = tb->flags; - tb_phys_invalidate(cpu->uc, tb, -1); - /* FIXME: In theory this could raise an exception. In practice - we have already translated the block once so it's probably ok. */ - tb_gen_code(cpu, pc, cs_base, (int)flags, cflags); - /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not - the first in the TB) then we end up generating a whole new TB and - repeating the fault, which is horribly inefficient. - Better would be to execute just this insn uncached, or generate a - second new TB. */ - cpu_resume_from_signal(cpu, NULL); -} - -void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) -{ - unsigned int i; - - /* Discard jump cache entries for any tb which might potentially - overlap the flushed page. */ - i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); - memset(&cpu->tb_jmp_cache[i], 0, - TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); - - i = tb_jmp_cache_hash_page(addr); - memset(&cpu->tb_jmp_cache[i], 0, - TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); -} - -#if 0 -void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) -{ - int i, target_code_size, max_target_code_size; - int direct_jmp_count, direct_jmp2_count, cross_page; - TranslationBlock *tb; - - target_code_size = 0; - max_target_code_size = 0; - cross_page = 0; - direct_jmp_count = 0; - direct_jmp2_count = 0; - for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { - tb = &tcg_ctx.tb_ctx.tbs[i]; - target_code_size += tb->size; - if (tb->size > max_target_code_size) { - max_target_code_size = tb->size; - } - if (tb->page_addr[1] != -1) { - cross_page++; - } - if (tb->tb_next_offset[0] != 0xffff) { - direct_jmp_count++; - if (tb->tb_next_offset[1] != 0xffff) { - direct_jmp2_count++; - } - } - } - /* XXX: avoid using doubles ? */ - cpu_fprintf(f, "Translation buffer state:\n"); - cpu_fprintf(f, "gen code size %td/%zd\n", - tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, - tcg_ctx.code_gen_buffer_max_size); - cpu_fprintf(f, "TB count %d/%d\n", - tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks); - cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", - tcg_ctx.tb_ctx.nb_tbs ? target_code_size / - tcg_ctx.tb_ctx.nb_tbs : 0, - max_target_code_size); - cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", - tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - - tcg_ctx.code_gen_buffer) / - tcg_ctx.tb_ctx.nb_tbs : 0, - target_code_size ? (double) (tcg_ctx.code_gen_ptr - - tcg_ctx.code_gen_buffer) / - target_code_size : 0); - cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, - tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / - tcg_ctx.tb_ctx.nb_tbs : 0); - cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", - direct_jmp_count, - tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / - tcg_ctx.tb_ctx.nb_tbs : 0, - direct_jmp2_count, - tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / - tcg_ctx.tb_ctx.nb_tbs : 0); - cpu_fprintf(f, "\nStatistics:\n"); - cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count); - cpu_fprintf(f, "TB invalidate count %d\n", - tcg_ctx.tb_ctx.tb_phys_invalidate_count); - //cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); - tcg_dump_info(f, cpu_fprintf); -} -#endif - -#else /* CONFIG_USER_ONLY */ - -void cpu_interrupt(CPUState *cpu, int mask) -{ - cpu->interrupt_request |= mask; - cpu->tcg_exit_req = 1; -} - -#if 0 -/* - * Walks guest process memory "regions" one by one - * and calls callback function 'fn' for each region. - */ -struct walk_memory_regions_data { - walk_memory_regions_fn fn; - void *priv; - target_ulong start; - int prot; -}; - -static int walk_memory_regions_end(struct walk_memory_regions_data *data, - target_ulong end, int new_prot) -{ - if (data->start != -1u) { - int rc = data->fn(data->priv, data->start, end, data->prot); - if (rc != 0) { - return rc; - } - } - - data->start = (new_prot ? end : -1u); - data->prot = new_prot; - - return 0; -} - -static int walk_memory_regions_1(struct walk_memory_regions_data *data, - target_ulong base, int level, void **lp) -{ - target_ulong pa; - int i, rc; - - if (*lp == NULL) { - return walk_memory_regions_end(data, base, 0); - } - - if (level == 0) { - PageDesc *pd = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - int prot = pd[i].flags; - - pa = base | (i << TARGET_PAGE_BITS); - if (prot != data->prot) { - rc = walk_memory_regions_end(data, pa, prot); - if (rc != 0) { - return rc; - } - } - } - } else { - void **pp = *lp; - - for (i = 0; i < V_L2_SIZE; ++i) { - pa = base | ((target_ulong)i << - (TARGET_PAGE_BITS + V_L2_BITS * level)); - rc = walk_memory_regions_1(data, pa, level - 1, pp + i); - if (rc != 0) { - return rc; - } - } - } - - return 0; -} - -typedef int (*walk_memory_regions_fn)(void *, target_ulong, - target_ulong, unsigned long); - -static int walk_memory_regions(void *priv, walk_memory_regions_fn fn) -{ - struct walk_memory_regions_data data; - uintptr_t i; - - data.fn = fn; - data.priv = priv; - data.start = -1u; - data.prot = 0; - - for (i = 0; i < V_L1_SIZE; i++) { - int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS), - V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); - if (rc != 0) { - return rc; - } - } - - return walk_memory_regions_end(&data, 0, 0); -} - -static int dump_region(void *priv, target_ulong start, - target_ulong end, unsigned long prot) -{ - FILE *f = (FILE *)priv; - - (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx - " "TARGET_FMT_lx" %c%c%c\n", - start, end, end - start, - ((prot & PAGE_READ) ? 'r' : '-'), - ((prot & PAGE_WRITE) ? 'w' : '-'), - ((prot & PAGE_EXEC) ? 'x' : '-')); - - return 0; -} - -/* dump memory mappings */ -void page_dump(FILE *f) -{ - const int length = sizeof(target_ulong) * 2; - (void) fprintf(f, "%-*s %-*s %-*s %s\n", - length, "start", length, "end", length, "size", "prot"); - walk_memory_regions(f, dump_region); -} - -#endif - -int page_get_flags(target_ulong address) -{ - PageDesc *p; - - p = page_find(address >> TARGET_PAGE_BITS); - if (!p) { - return 0; - } - return p->flags; -} - -/* Modify the flags of a page and invalidate the code if necessary. - The flag PAGE_WRITE_ORG is positioned automatically depending - on PAGE_WRITE. The mmap_lock should already be held. */ -static void page_set_flags(struct uc_struct *uc, target_ulong start, target_ulong end, int flags) -{ - target_ulong addr, len; - - /* This function should never be called with addresses outside the - guest address space. If this assert fires, it probably indicates - a missing call to h2g_valid. */ -#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS - assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); -#endif - assert(start < end); - - start = start & TARGET_PAGE_MASK; - end = TARGET_PAGE_ALIGN(end); - - if (flags & PAGE_WRITE) { - flags |= PAGE_WRITE_ORG; - } - - for (addr = start, len = end - start; - len != 0; - len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { - PageDesc *p = page_find_alloc(uc, addr >> TARGET_PAGE_BITS, 1); - - /* If the write protection bit is set, then we invalidate - the code inside. */ - if (!(p->flags & PAGE_WRITE) && - (flags & PAGE_WRITE) && - p->first_tb) { - tb_invalidate_phys_page(addr, 0, NULL, false); - } - p->flags = flags; - } -} - -static int page_check_range(target_ulong start, target_ulong len, int flags) -{ - PageDesc *p; - target_ulong end; - target_ulong addr; - - /* This function should never be called with addresses outside the - guest address space. If this assert fires, it probably indicates - a missing call to h2g_valid. */ -#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS - assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); -#endif - - if (len == 0) { - return 0; - } - if (start + len - 1 < start) { - /* We've wrapped around. */ - return -1; - } - - /* must do before we loose bits in the next step */ - end = TARGET_PAGE_ALIGN(start + len); - start = start & TARGET_PAGE_MASK; - - for (addr = start, len = end - start; - len != 0; - len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { - p = page_find(addr >> TARGET_PAGE_BITS); - if (!p) { - return -1; - } - if (!(p->flags & PAGE_VALID)) { - return -1; - } - - if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { - return -1; - } - if (flags & PAGE_WRITE) { - if (!(p->flags & PAGE_WRITE_ORG)) { - return -1; - } - /* unprotect the page if it was put read-only because it - contains translated code */ - if (!(p->flags & PAGE_WRITE)) { - if (!page_unprotect(addr, 0, NULL)) { - return -1; - } - } - } - } - return 0; -} - -/* called from signal handler: invalidate the code and unprotect the - page. Return TRUE if the fault was successfully handled. */ -static int page_unprotect(target_ulong address, uintptr_t pc, void *puc) -{ - unsigned int prot; - PageDesc *p; - target_ulong host_start, host_end, addr; - - /* Technically this isn't safe inside a signal handler. However we - know this only ever happens in a synchronous SEGV handler, so in - practice it seems to be ok. */ - mmap_lock(); - - p = page_find(address >> TARGET_PAGE_BITS); - if (!p) { - mmap_unlock(); - return 0; - } - - /* if the page was really writable, then we change its - protection back to writable */ - if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { - host_start = address & qemu_host_page_mask; - host_end = host_start + qemu_host_page_size; - - prot = 0; - for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { - p = page_find(addr >> TARGET_PAGE_BITS); - p->flags |= PAGE_WRITE; - prot |= p->flags; - - /* and since the content will be modified, we must invalidate - the corresponding translated code. */ - tb_invalidate_phys_page(addr, pc, puc, true); -#ifdef DEBUG_TB_CHECK - tb_invalidate_check(addr); -#endif - } - mprotect((void *)g2h(host_start), qemu_host_page_size, - prot & PAGE_BITS); - - mmap_unlock(); - return 1; - } - mmap_unlock(); - return 0; -} -#endif /* CONFIG_USER_ONLY */ diff --git a/qemu/unicorn_common.h b/qemu/unicorn_common.h index 2117c621..84bbb9f1 100644 --- a/qemu/unicorn_common.h +++ b/qemu/unicorn_common.h @@ -1,22 +1,28 @@ -#ifndef UNICORN_COMMON_H_ -#define UNICORN_COMMON_H_ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ +#ifndef UNICORN_COMMON_H +#define UNICORN_COMMON_H -#include "tcg.h" +#include "tcg/tcg.h" +#include "qemu-common.h" +#include "exec/memory.h" // This header define common patterns/codes that will be included in all arch-sepcific // codes for unicorns purposes. +void vm_start(struct uc_struct*); +void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size); + // return true on success, false on failure static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) { - return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 0); + return cpu_physical_memory_rw(as, addr, (void *)buf, len, 0); } static inline bool cpu_physical_mem_write(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len) { - return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 1); + return cpu_physical_memory_rw(as, addr, (void *)buf, len, 1); } void tb_cleanup(struct uc_struct *uc); @@ -32,7 +38,7 @@ static void release_common(void *t) #endif // Clean TCG. - TCGOpDef* def = &s->tcg_op_defs[0]; + TCGOpDef* def = s->tcg_op_defs; g_free(def->args_ct); g_free(def->sorted_args); g_free(s->tcg_op_defs); @@ -42,15 +48,26 @@ static void release_common(void *t) g_free(po); } tcg_pool_reset(s); - g_hash_table_destroy(s->helpers); + g_hash_table_destroy(s->helper_table); + g_free(s->indirect_reg_alloc_order); + /* qemu/tcg/tcg/c:4018: img = g_malloc(img_size); */ + g_free((void *)(s->one_entry->symfile_addr)); + g_free(s->one_entry); + /* qemu/tcg/tcg/c:574: tcg_ctx->tree = g_tree_new(tb_tc_cmp); */ + g_tree_destroy(s->tree); - // TODO(danghvu): these function is not available outside qemu + // these function is not available outside qemu // so we keep them here instead of outside uc_close. - phys_mem_clean(s->uc); - address_space_destroy(&(s->uc->as)); + address_space_destroy(&s->uc->address_space_memory); + address_space_destroy(&s->uc->address_space_io); memory_free(s->uc); + /* clean up uc->l1_map. */ tb_cleanup(s->uc); + /* clean up tcg_ctx->code_gen_buffer. */ free_code_gen_buffer(s->uc); + /* qemu/util/qht.c:264: map = qht_map_create(n_buckets); */ + qht_destroy(&s->tb_ctx.htable); + cpu_watchpoint_remove_all(CPU(s->uc->cpu), BP_CPU); cpu_breakpoint_remove_all(CPU(s->uc->cpu), BP_CPU); @@ -67,12 +84,17 @@ static void release_common(void *t) #endif } +static inline void target_page_init(struct uc_struct* uc) +{ + uc->target_page_size = TARGET_PAGE_SIZE; + uc->target_page_align = TARGET_PAGE_SIZE - 1; +} + +void softfloat_init(void); static inline void uc_common_init(struct uc_struct* uc) { - memory_register_types(uc); uc->write_mem = cpu_physical_mem_write; uc->read_mem = cpu_physical_mem_read; - uc->tcg_enabled = tcg_enabled; uc->tcg_exec_init = tcg_exec_init; uc->cpu_exec_init_all = cpu_exec_init_all; uc->vm_start = vm_start; @@ -80,12 +102,12 @@ static inline void uc_common_init(struct uc_struct* uc) uc->memory_map_ptr = memory_map_ptr; uc->memory_unmap = memory_unmap; uc->readonly_mem = memory_region_set_readonly; - - uc->target_page_size = TARGET_PAGE_SIZE; - uc->target_page_align = TARGET_PAGE_SIZE - 1; + uc->target_page = target_page_init; + uc->softfloat_initialize = softfloat_init; + uc->tcg_flush_tlb = tcg_flush_softmmu_tlb; + uc->memory_map_io = memory_map_io; if (!uc->release) uc->release = release_common; } - #endif diff --git a/qemu/util/Makefile.objs b/qemu/util/Makefile.objs deleted file mode 100644 index 874cee16..00000000 --- a/qemu/util/Makefile.objs +++ /dev/null @@ -1,10 +0,0 @@ -util-obj-y = cutils.o qemu-timer-common.o -util-obj-$(CONFIG_WIN32) += oslib-win32.o qemu-thread-win32.o -util-obj-$(CONFIG_POSIX) += oslib-posix.o qemu-thread-posix.o -util-obj-y += module.o -util-obj-y += bitmap.o bitops.o -util-obj-y += error.o -util-obj-y += aes.o -util-obj-y += crc32c.o -util-obj-y += host-utils.o -util-obj-y += getauxval.o diff --git a/qemu/util/bitmap.c b/qemu/util/bitmap.c index f6b9cdc1..19cfd87b 100644 --- a/qemu/util/bitmap.c +++ b/qemu/util/bitmap.c @@ -9,10 +9,153 @@ * Version 2. */ +#include "qemu/osdep.h" #include "qemu/bitops.h" #include "qemu/bitmap.h" +#include "qemu/atomic.h" -#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) +/* + * bitmaps provide an array of bits, implemented using an + * array of unsigned longs. The number of valid bits in a + * given bitmap does _not_ need to be an exact multiple of + * BITS_PER_LONG. + * + * The possible unused bits in the last, partially used word + * of a bitmap are 'don't care'. The implementation makes + * no particular effort to keep them zero. It ensures that + * their value will not affect the results of any operation. + * The bitmap operations that return Boolean (bitmap_empty, + * for example) or scalar (bitmap_weight, for example) results + * carefully filter out these unused bits from impacting their + * results. + * + * These operations actually hold to a slightly stronger rule: + * if you don't input any bitmaps to these ops that have some + * unused bits set, then they won't output any set unused bits + * in output bitmaps. + * + * The byte ordering of bitmaps is more natural on little + * endian architectures. + */ + +int slow_bitmap_empty(const unsigned long *bitmap, long bits) +{ + long k, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; ++k) { + if (bitmap[k]) { + return 0; + } + } + if (bits % BITS_PER_LONG) { + if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) { + return 0; + } + } + + return 1; +} + +int slow_bitmap_full(const unsigned long *bitmap, long bits) +{ + long k, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; ++k) { + if (~bitmap[k]) { + return 0; + } + } + + if (bits % BITS_PER_LONG) { + if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) { + return 0; + } + } + + return 1; +} + +int slow_bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits) +{ + long k, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; ++k) { + if (bitmap1[k] != bitmap2[k]) { + return 0; + } + } + + if (bits % BITS_PER_LONG) { + if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) { + return 0; + } + } + + return 1; +} + +void slow_bitmap_complement(unsigned long *dst, const unsigned long *src, + long bits) +{ + long k, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; ++k) { + dst[k] = ~src[k]; + } + + if (bits % BITS_PER_LONG) { + dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); + } +} + +int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits) +{ + long k; + long nr = BITS_TO_LONGS(bits); + unsigned long result = 0; + + for (k = 0; k < nr; k++) { + result |= (dst[k] = bitmap1[k] & bitmap2[k]); + } + return result != 0; +} + +void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits) +{ + long k; + long nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) { + dst[k] = bitmap1[k] | bitmap2[k]; + } +} + +void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits) +{ + long k; + long nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) { + dst[k] = bitmap1[k] ^ bitmap2[k]; + } +} + +int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits) +{ + long k; + long nr = BITS_TO_LONGS(bits); + unsigned long result = 0; + + for (k = 0; k < nr; k++) { + result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); + } + return result != 0; +} void qemu_bitmap_set(unsigned long *map, long start, long nr) { @@ -21,6 +164,8 @@ void qemu_bitmap_set(unsigned long *map, long start, long nr) int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + assert(start >= 0 && nr >= 0); + while (nr - bits_to_set >= 0) { *p |= mask_to_set; nr -= bits_to_set; @@ -34,6 +179,45 @@ void qemu_bitmap_set(unsigned long *map, long start, long nr) } } +void bitmap_set_atomic(unsigned long *map, long start, long nr) +{ + unsigned long *p = map + BIT_WORD(start); + const long size = start + nr; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + + assert(start >= 0 && nr >= 0); + + /* First word */ + if (nr - bits_to_set > 0) { + atomic_or(p, mask_to_set); + nr -= bits_to_set; + bits_to_set = BITS_PER_LONG; + mask_to_set = ~0UL; + p++; + } + + /* Full words */ + if (bits_to_set == BITS_PER_LONG) { + while (nr >= BITS_PER_LONG) { + *p = ~0UL; + nr -= BITS_PER_LONG; + p++; + } + } + + /* Last word */ + if (nr) { + mask_to_set &= BITMAP_LAST_WORD_MASK(size); + atomic_or(p, mask_to_set); + } else { + /* If we avoided the full barrier in atomic_or(), issue a + * barrier to account for the assignments in the while loop. + */ + smp_mb(); + } +} + void qemu_bitmap_clear(unsigned long *map, long start, long nr) { unsigned long *p = map + BIT_WORD(start); @@ -41,6 +225,8 @@ void qemu_bitmap_clear(unsigned long *map, long start, long nr) int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + assert(start >= 0 && nr >= 0); + while (nr - bits_to_clear >= 0) { *p &= ~mask_to_clear; nr -= bits_to_clear; @@ -53,3 +239,253 @@ void qemu_bitmap_clear(unsigned long *map, long start, long nr) *p &= ~mask_to_clear; } } + +bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) +{ + unsigned long *p = map + BIT_WORD(start); + const long size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + unsigned long dirty = 0; + unsigned long old_bits; + + assert(start >= 0 && nr >= 0); + + /* First word */ + if (nr - bits_to_clear > 0) { + old_bits = atomic_fetch_and(p, ~mask_to_clear); + dirty |= old_bits & mask_to_clear; + nr -= bits_to_clear; + bits_to_clear = BITS_PER_LONG; + mask_to_clear = ~0UL; + p++; + } + + /* Full words */ + if (bits_to_clear == BITS_PER_LONG) { + while (nr >= BITS_PER_LONG) { + if (*p) { + old_bits = *p; + *p = 0; + dirty |= old_bits; + } + nr -= BITS_PER_LONG; + p++; + } + } + + /* Last word */ + if (nr) { + mask_to_clear &= BITMAP_LAST_WORD_MASK(size); + old_bits = atomic_fetch_and(p, ~mask_to_clear); + dirty |= old_bits & mask_to_clear; + } else { + if (!dirty) { + smp_mb(); + } + } + + return dirty != 0; +} + +void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, + long nr) +{ + while (nr > 0) { + *dst = *src; + *src = 0; + dst++; + src++; + nr -= BITS_PER_LONG; + } +} + +#define ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @align_mask: Alignment mask for zero area + * + * The @align_mask should be one less than a power of 2; the effect is that + * the bit offset of all zero areas this function finds is multiples of that + * power of 2. A @align_mask of 0 means no alignment is required. + */ +unsigned long bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned long nr, + unsigned long align_mask) +{ + unsigned long index, end, i; +again: + index = find_next_zero_bit(map, size, start); + + /* Align allocation */ + index = ALIGN_MASK(index, align_mask); + + end = index + nr; + if (end > size) { + return end; + } + i = find_next_bit(map, end, index); + if (i < end) { + start = i + 1; + goto again; + } + return index; +} + +int slow_bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, long bits) +{ + long k, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; ++k) { + if (bitmap1[k] & bitmap2[k]) { + return 1; + } + } + + if (bits % BITS_PER_LONG) { + if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) { + return 1; + } + } + return 0; +} + +long slow_bitmap_count_one(const unsigned long *bitmap, long nbits) +{ + long k, lim = nbits / BITS_PER_LONG, result = 0; + + for (k = 0; k < lim; k++) { + result += ctpopl(bitmap[k]); + } + + if (nbits % BITS_PER_LONG) { + result += ctpopl(bitmap[k] & BITMAP_LAST_WORD_MASK(nbits)); + } + + return result; +} + +static void bitmap_to_from_le(unsigned long *dst, + const unsigned long *src, long nbits) +{ + long len = BITS_TO_LONGS(nbits); + +#ifdef HOST_WORDS_BIGENDIAN + long index; + + for (index = 0; index < len; index++) { +# if HOST_LONG_BITS == 64 + dst[index] = bswap64(src[index]); +# else + dst[index] = bswap32(src[index]); +# endif + } +#else + memcpy(dst, src, len * sizeof(unsigned long)); +#endif +} + +void bitmap_from_le(unsigned long *dst, const unsigned long *src, + long nbits) +{ + bitmap_to_from_le(dst, src, nbits); +} + +void bitmap_to_le(unsigned long *dst, const unsigned long *src, + long nbits) +{ + bitmap_to_from_le(dst, src, nbits); +} + +/* + * Copy "src" bitmap with a positive offset and put it into the "dst" + * bitmap. The caller needs to make sure the bitmap size of "src" + * is bigger than (shift + nbits). + */ +void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src, + unsigned long shift, unsigned long nbits) +{ + unsigned long left_mask, right_mask, last_mask; + + /* Proper shift src pointer to the first word to copy from */ + src += BIT_WORD(shift); + shift %= BITS_PER_LONG; + + if (!shift) { + /* Fast path */ + bitmap_copy(dst, src, nbits); + return; + } + + right_mask = (1ul << shift) - 1; + left_mask = ~right_mask; + + while (nbits >= BITS_PER_LONG) { + *dst = (*src & left_mask) >> shift; + *dst |= (src[1] & right_mask) << (BITS_PER_LONG - shift); + dst++; + src++; + nbits -= BITS_PER_LONG; + } + + if (nbits > BITS_PER_LONG - shift) { + *dst = (*src & left_mask) >> shift; + nbits -= BITS_PER_LONG - shift; + last_mask = (1ul << nbits) - 1; + *dst |= (src[1] & last_mask) << (BITS_PER_LONG - shift); + } else if (nbits) { + last_mask = (1ul << nbits) - 1; + *dst = (*src >> shift) & last_mask; + } +} + +/* + * Copy "src" bitmap into the "dst" bitmap with an offset in the + * "dst". The caller needs to make sure the bitmap size of "dst" is + * bigger than (shift + nbits). + */ +void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src, + unsigned long shift, unsigned long nbits) +{ + unsigned long left_mask, right_mask, last_mask; + + /* Proper shift dst pointer to the first word to copy from */ + dst += BIT_WORD(shift); + shift %= BITS_PER_LONG; + + if (!shift) { + /* Fast path */ + bitmap_copy(dst, src, nbits); + return; + } + + right_mask = (1ul << (BITS_PER_LONG - shift)) - 1; + left_mask = ~right_mask; + + *dst &= (1ul << shift) - 1; + while (nbits >= BITS_PER_LONG) { + *dst |= (*src & right_mask) << shift; + dst[1] = (*src & left_mask) >> (BITS_PER_LONG - shift); + dst++; + src++; + nbits -= BITS_PER_LONG; + } + + if (nbits > BITS_PER_LONG - shift) { + *dst |= (*src & right_mask) << shift; + nbits -= BITS_PER_LONG - shift; + last_mask = ((1ul << nbits) - 1) << (BITS_PER_LONG - shift); + dst[1] = (*src & last_mask) >> (BITS_PER_LONG - shift); + } else if (nbits) { + last_mask = (1ul << nbits) - 1; + *dst |= (*src & last_mask) << shift; + } +} diff --git a/qemu/util/bitops.c b/qemu/util/bitops.c index f1641bf8..3fe6b1c4 100644 --- a/qemu/util/bitops.c +++ b/qemu/util/bitops.c @@ -11,17 +11,16 @@ * 2 of the License, or (at your option) any later version. */ +#include "qemu/osdep.h" #include "qemu/bitops.h" -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) - /* * Find the next set bit in a memory region. */ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) + unsigned long offset) { - const unsigned long *p = addr + BITOP_WORD(offset); + const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; @@ -84,9 +83,9 @@ found_middle: * Linus' asm-alpha/bitops.h. */ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) + unsigned long offset) { - const unsigned long *p = addr + BITOP_WORD(offset); + const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; @@ -127,3 +126,32 @@ found_first: found_middle: return result + ctzl(~tmp); } + +unsigned long find_last_bit(const unsigned long *addr, unsigned long size) +{ + unsigned long words; + unsigned long tmp; + + /* Start at final word. */ + words = size / BITS_PER_LONG; + + /* Partial final word? */ + if (size & (BITS_PER_LONG-1)) { + tmp = (addr[words] & (~0UL >> (BITS_PER_LONG + - (size & (BITS_PER_LONG-1))))); + if (tmp) { + goto found; + } + } + + while (words) { + tmp = addr[--words]; + if (tmp) { + found: + return words * BITS_PER_LONG + BITS_PER_LONG - 1 - clzl(tmp); + } + } + + /* Not found */ + return size; +} diff --git a/qemu/util/cacheinfo.c b/qemu/util/cacheinfo.c new file mode 100644 index 00000000..f29f7206 --- /dev/null +++ b/qemu/util/cacheinfo.c @@ -0,0 +1,189 @@ +/* + * cacheinfo.c - helpers to query the host about its caches + * + * Copyright (C) 2017, Emilio G. Cota + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/atomic.h" + +#include + +/* + * Operating system specific detection mechanisms. + */ + +#if defined(_WIN32) + +static void sys_cache_info(int *isize, int *dsize) +{ + SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf; + DWORD size = 0; + BOOL success; + size_t i, n; + + /* Check for the required buffer size first. Note that if the zero + size we use for the probe results in success, then there is no + data available; fail in that case. */ + success = GetLogicalProcessorInformation(0, &size); + if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { + return; + } + + n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); + size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); + buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n); + if (!GetLogicalProcessorInformation(buf, &size)) { + goto fail; + } + + for (i = 0; i < n; i++) { + if (buf[i].Relationship == RelationCache + && buf[i].Cache.Level == 1) { + switch (buf[i].Cache.Type) { + case CacheUnified: + *isize = *dsize = buf[i].Cache.LineSize; + break; + case CacheInstruction: + *isize = buf[i].Cache.LineSize; + break; + case CacheData: + *dsize = buf[i].Cache.LineSize; + break; + default: + break; + } + } + } + fail: + g_free(buf); +} + +#elif defined(__APPLE__) +# include +static void sys_cache_info(int *isize, int *dsize) +{ + /* There's only a single sysctl for both I/D cache line sizes. */ + long size; + size_t len = sizeof(size); + if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) { + *isize = *dsize = size; + } +} +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +# include +static void sys_cache_info(int *isize, int *dsize) +{ + /* There's only a single sysctl for both I/D cache line sizes. */ + int size; + size_t len = sizeof(size); + if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) { + *isize = *dsize = size; + } +} +#else +/* POSIX */ + +static void sys_cache_info(int *isize, int *dsize) +{ +# ifdef _SC_LEVEL1_ICACHE_LINESIZE + int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE); + if (tmp_isize > 0) { + *isize = tmp_isize; + } +# endif +# ifdef _SC_LEVEL1_DCACHE_LINESIZE + int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE); + if (tmp_dsize > 0) { + *dsize = tmp_dsize; + } +# endif +} +#endif /* sys_cache_info */ + +/* + * Architecture (+ OS) specific detection mechanisms. + */ + +#if defined(__aarch64__) + +static void arch_cache_info(int *isize, int *dsize) +{ + if (*isize == 0 || *dsize == 0) { + uint64_t ctr; + + /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1, + but (at least under Linux) these are marked protected by the + kernel. However, CTR_EL0 contains the minimum linesize in the + entire hierarchy, and is used by userspace cache flushing. */ + asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr)); + if (*isize == 0) { + *isize = 4 << (ctr & 0xf); + } + if (*dsize == 0) { + *dsize = 4 << ((ctr >> 16) & 0xf); + } + } +} + +#elif defined(_ARCH_PPC) && defined(__linux__) +# include "elf.h" + +static void arch_cache_info(int *isize, int *dsize) +{ + if (*isize == 0) { + *isize = qemu_getauxval(AT_ICACHEBSIZE); + } + if (*dsize == 0) { + *dsize = qemu_getauxval(AT_DCACHEBSIZE); + } +} + +#else +static void arch_cache_info(int *isize, int *dsize) { } +#endif /* arch_cache_info */ + +/* + * ... and if all else fails ... + */ + +static void fallback_cache_info(int *isize, int *dsize) +{ + /* If we can only find one of the two, assume they're the same. */ + if (*isize) { + if (*dsize) { + /* Success! */ + } else { + *dsize = *isize; + } + } else if (*dsize) { + *isize = *dsize; + } else { +#if defined(_ARCH_PPC) + /* For PPC, we're going to use the icache size computed for + flush_icache_range. Which means that we must use the + architecture minimum. */ + *isize = *dsize = 16; +#else + /* Otherwise, 64 bytes is not uncommon. */ + *isize = *dsize = 64; +#endif + } +} + +void init_cache_info(struct uc_struct *uc) +{ + int isize = 0, dsize = 0; + + sys_cache_info(&isize, &dsize); + arch_cache_info(&isize, &dsize); + fallback_cache_info(&isize, &dsize); + + assert((isize & (isize - 1)) == 0); + assert((dsize & (dsize - 1)) == 0); + + uc->qemu_icache_linesize = isize; +} diff --git a/qemu/util/crc32c.c b/qemu/util/crc32c.c index 88663278..e811d4de 100644 --- a/qemu/util/crc32c.c +++ b/qemu/util/crc32c.c @@ -25,7 +25,7 @@ * */ -#include "qemu-common.h" +#include "qemu/osdep.h" #include "qemu/crc32c.h" /* @@ -113,3 +113,86 @@ uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length) return crc^0xffffffff; } +/* + * This is the CRC-32 table + * Generated with: + * width = 32 bits + * poly = 0xEDB88320 + */ + +static const uint32_t crc32_table[256] = { + 0x00000000U, 0x77073096U, 0xEE0E612CU, 0x990951BAU, + 0x076DC419U, 0x706AF48FU, 0xE963A535U, 0x9E6495A3U, + 0x0EDB8832U, 0x79DCB8A4U, 0xE0D5E91EU, 0x97D2D988U, + 0x09B64C2BU, 0x7EB17CBDU, 0xE7B82D07U, 0x90BF1D91U, + 0x1DB71064U, 0x6AB020F2U, 0xF3B97148U, 0x84BE41DEU, + 0x1ADAD47DU, 0x6DDDE4EBU, 0xF4D4B551U, 0x83D385C7U, + 0x136C9856U, 0x646BA8C0U, 0xFD62F97AU, 0x8A65C9ECU, + 0x14015C4FU, 0x63066CD9U, 0xFA0F3D63U, 0x8D080DF5U, + 0x3B6E20C8U, 0x4C69105EU, 0xD56041E4U, 0xA2677172U, + 0x3C03E4D1U, 0x4B04D447U, 0xD20D85FDU, 0xA50AB56BU, + 0x35B5A8FAU, 0x42B2986CU, 0xDBBBC9D6U, 0xACBCF940U, + 0x32D86CE3U, 0x45DF5C75U, 0xDCD60DCFU, 0xABD13D59U, + 0x26D930ACU, 0x51DE003AU, 0xC8D75180U, 0xBFD06116U, + 0x21B4F4B5U, 0x56B3C423U, 0xCFBA9599U, 0xB8BDA50FU, + 0x2802B89EU, 0x5F058808U, 0xC60CD9B2U, 0xB10BE924U, + 0x2F6F7C87U, 0x58684C11U, 0xC1611DABU, 0xB6662D3DU, + 0x76DC4190U, 0x01DB7106U, 0x98D220BCU, 0xEFD5102AU, + 0x71B18589U, 0x06B6B51FU, 0x9FBFE4A5U, 0xE8B8D433U, + 0x7807C9A2U, 0x0F00F934U, 0x9609A88EU, 0xE10E9818U, + 0x7F6A0DBBU, 0x086D3D2DU, 0x91646C97U, 0xE6635C01U, + 0x6B6B51F4U, 0x1C6C6162U, 0x856530D8U, 0xF262004EU, + 0x6C0695EDU, 0x1B01A57BU, 0x8208F4C1U, 0xF50FC457U, + 0x65B0D9C6U, 0x12B7E950U, 0x8BBEB8EAU, 0xFCB9887CU, + 0x62DD1DDFU, 0x15DA2D49U, 0x8CD37CF3U, 0xFBD44C65U, + 0x4DB26158U, 0x3AB551CEU, 0xA3BC0074U, 0xD4BB30E2U, + 0x4ADFA541U, 0x3DD895D7U, 0xA4D1C46DU, 0xD3D6F4FBU, + 0x4369E96AU, 0x346ED9FCU, 0xAD678846U, 0xDA60B8D0U, + 0x44042D73U, 0x33031DE5U, 0xAA0A4C5FU, 0xDD0D7CC9U, + 0x5005713CU, 0x270241AAU, 0xBE0B1010U, 0xC90C2086U, + 0x5768B525U, 0x206F85B3U, 0xB966D409U, 0xCE61E49FU, + 0x5EDEF90EU, 0x29D9C998U, 0xB0D09822U, 0xC7D7A8B4U, + 0x59B33D17U, 0x2EB40D81U, 0xB7BD5C3BU, 0xC0BA6CADU, + 0xEDB88320U, 0x9ABFB3B6U, 0x03B6E20CU, 0x74B1D29AU, + 0xEAD54739U, 0x9DD277AFU, 0x04DB2615U, 0x73DC1683U, + 0xE3630B12U, 0x94643B84U, 0x0D6D6A3EU, 0x7A6A5AA8U, + 0xE40ECF0BU, 0x9309FF9DU, 0x0A00AE27U, 0x7D079EB1U, + 0xF00F9344U, 0x8708A3D2U, 0x1E01F268U, 0x6906C2FEU, + 0xF762575DU, 0x806567CBU, 0x196C3671U, 0x6E6B06E7U, + 0xFED41B76U, 0x89D32BE0U, 0x10DA7A5AU, 0x67DD4ACCU, + 0xF9B9DF6FU, 0x8EBEEFF9U, 0x17B7BE43U, 0x60B08ED5U, + 0xD6D6A3E8U, 0xA1D1937EU, 0x38D8C2C4U, 0x4FDFF252U, + 0xD1BB67F1U, 0xA6BC5767U, 0x3FB506DDU, 0x48B2364BU, + 0xD80D2BDAU, 0xAF0A1B4CU, 0x36034AF6U, 0x41047A60U, + 0xDF60EFC3U, 0xA867DF55U, 0x316E8EEFU, 0x4669BE79U, + 0xCB61B38CU, 0xBC66831AU, 0x256FD2A0U, 0x5268E236U, + 0xCC0C7795U, 0xBB0B4703U, 0x220216B9U, 0x5505262FU, + 0xC5BA3BBEU, 0xB2BD0B28U, 0x2BB45A92U, 0x5CB36A04U, + 0xC2D7FFA7U, 0xB5D0CF31U, 0x2CD99E8BU, 0x5BDEAE1DU, + 0x9B64C2B0U, 0xEC63F226U, 0x756AA39CU, 0x026D930AU, + 0x9C0906A9U, 0xEB0E363FU, 0x72076785U, 0x05005713U, + 0x95BF4A82U, 0xE2B87A14U, 0x7BB12BAEU, 0x0CB61B38U, + 0x92D28E9BU, 0xE5D5BE0DU, 0x7CDCEFB7U, 0x0BDBDF21U, + 0x86D3D2D4U, 0xF1D4E242U, 0x68DDB3F8U, 0x1FDA836EU, + 0x81BE16CDU, 0xF6B9265BU, 0x6FB077E1U, 0x18B74777U, + 0x88085AE6U, 0xFF0F6A70U, 0x66063BCAU, 0x11010B5CU, + 0x8F659EFFU, 0xF862AE69U, 0x616BFFD3U, 0x166CCF45U, + 0xA00AE278U, 0xD70DD2EEU, 0x4E048354U, 0x3903B3C2U, + 0xA7672661U, 0xD06016F7U, 0x4969474DU, 0x3E6E77DBU, + 0xAED16A4AU, 0xD9D65ADCU, 0x40DF0B66U, 0x37D83BF0U, + 0xA9BCAE53U, 0xDEBB9EC5U, 0x47B2CF7FU, 0x30B5FFE9U, + 0xBDBDF21CU, 0xCABAC28AU, 0x53B39330U, 0x24B4A3A6U, + 0xBAD03605U, 0xCDD70693U, 0x54DE5729U, 0x23D967BFU, + 0xB3667A2EU, 0xC4614AB8U, 0x5D681B02U, 0x2A6F2B94U, + 0xB40BBE37U, 0xC30C8EA1U, 0x5A05DF1BU, 0x2D02EF8DU, +}; + +uint32_t crc32(uint32_t crc, const uint8_t *data, unsigned int length) +{ + int i; + crc = ~crc; + for (i = 0; i < length; i++) { + crc = (crc >> 8) ^ crc32_table[(crc ^ data[i]) & 0xff]; + } + return ~crc; +} diff --git a/qemu/util/cutils.c b/qemu/util/cutils.c index 9a6cbdb6..9b94c5fd 100644 --- a/qemu/util/cutils.c +++ b/qemu/util/cutils.c @@ -21,12 +21,10 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include "qemu-common.h" -#include "qemu/host-utils.h" -#include -#include -#include +#include + +#include "qemu/cutils.h" void pstrcpy(char *buf, int buf_size, const char *str) { @@ -54,104 +52,3 @@ char *pstrcat(char *buf, int buf_size, const char *s) pstrcpy(buf + len, buf_size - len, s); return buf; } - -int strstart(const char *str, const char *val, const char **ptr) -{ - const char *p, *q; - p = str; - q = val; - while (*q != '\0') { - if (*p != *q) - return 0; - p++; - q++; - } - if (ptr) - *ptr = p; - return 1; -} - -int qemu_fls(int i) -{ - return 32 - clz32(i); -} - -static int64_t suffix_mul(char suffix, int64_t unit) -{ - switch (qemu_toupper(suffix)) { - case STRTOSZ_DEFSUFFIX_B: - return 1; - case STRTOSZ_DEFSUFFIX_KB: - return unit; - case STRTOSZ_DEFSUFFIX_MB: - return unit * unit; - case STRTOSZ_DEFSUFFIX_GB: - return unit * unit * unit; - case STRTOSZ_DEFSUFFIX_TB: - return unit * unit * unit * unit; - case STRTOSZ_DEFSUFFIX_PB: - return unit * unit * unit * unit * unit; - case STRTOSZ_DEFSUFFIX_EB: - return unit * unit * unit * unit * unit * unit; - } - return -1; -} - -/* - * Convert string to bytes, allowing either B/b for bytes, K/k for KB, - * M/m for MB, G/g for GB or T/t for TB. End pointer will be returned - * in *end, if not NULL. Return -ERANGE on overflow, Return -EINVAL on - * other error. - */ -int64_t strtosz_suffix_unit(const char *nptr, char **end, - const char default_suffix, int64_t unit) -{ - int64_t retval = -EINVAL; - char *endptr; - unsigned char c; - int mul_required = 0; - double val, mul, integral, fraction; - - errno = 0; - val = strtod(nptr, &endptr); - if (isnan(val) || endptr == nptr || errno != 0) { - goto fail; - } - fraction = modf(val, &integral); - if (fraction != 0) { - mul_required = 1; - } - c = *endptr; - mul = (double)suffix_mul(c, unit); - if (mul >= 0) { - endptr++; - } else { - mul = (double)suffix_mul(default_suffix, unit); - assert(mul >= 0); - } - if (mul == 1 && mul_required) { - goto fail; - } - if ((val * mul >= (double)INT64_MAX) || val < 0) { - retval = -ERANGE; - goto fail; - } - retval = (int64_t)(val * mul); - -fail: - if (end) { - *end = endptr; - } - - return retval; -} - -int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix) -{ - return strtosz_suffix_unit(nptr, end, default_suffix, 1024); -} - -int64_t strtosz(const char *nptr, char **end) -{ - return strtosz_suffix(nptr, end, STRTOSZ_DEFSUFFIX_MB); -} diff --git a/qemu/util/error.c b/qemu/util/error.c deleted file mode 100644 index 7dc0bcc4..00000000 --- a/qemu/util/error.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * QEMU Error Objects - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2. See - * the COPYING.LIB file in the top-level directory. - */ - -#include "qemu-common.h" -#include "qapi/error.h" - -struct Error -{ - char *msg; - ErrorClass err_class; -}; - -Error *error_abort; - -void error_set(Error **errp, ErrorClass err_class, const char *fmt, ...) -{ - Error *err; - va_list ap; - int saved_errno = errno; - - if (errp == NULL) { - return; - } - assert(*errp == NULL); - - err = g_malloc0(sizeof(*err)); - - va_start(ap, fmt); - err->msg = g_strdup_vprintf(fmt, ap); - va_end(ap); - err->err_class = err_class; - - if (errp == &error_abort) { - // abort(); - } - - *errp = err; - - errno = saved_errno; -} - -void error_set_errno(Error **errp, int os_errno, ErrorClass err_class, - const char *fmt, ...) -{ - Error *err; - char *msg1; - va_list ap; - int saved_errno = errno; - - if (errp == NULL) { - return; - } - assert(*errp == NULL); - - err = g_malloc0(sizeof(*err)); - - va_start(ap, fmt); - msg1 = g_strdup_vprintf(fmt, ap); - if (os_errno != 0) { - err->msg = g_strdup_printf("%s: %s", msg1, strerror(os_errno)); - g_free(msg1); - } else { - err->msg = msg1; - } - va_end(ap); - err->err_class = err_class; - - if (errp == &error_abort) { - // abort(); - } - - *errp = err; - - errno = saved_errno; -} - -void error_setg_file_open(Error **errp, int os_errno, const char *filename) -{ - error_setg_errno(errp, os_errno, "Could not open '%s'", filename); -} - -Error *error_copy(const Error *err) -{ - Error *err_new; - - err_new = g_malloc0(sizeof(*err)); - err_new->msg = g_strdup(err->msg); - err_new->err_class = err->err_class; - - return err_new; -} - -ErrorClass error_get_class(const Error *err) -{ - return err->err_class; -} - -const char *error_get_pretty(Error *err) -{ - return err->msg; -} - -void error_free(Error *err) -{ - if (err) { - g_free(err->msg); - g_free(err); - } -} - -void error_propagate(Error **dst_errp, Error *local_err) -{ - if (local_err && dst_errp == &error_abort) { - // abort(); - } else if (dst_errp && !*dst_errp) { - *dst_errp = local_err; - } else if (local_err) { - error_free(local_err); - } -} diff --git a/qemu/util/getauxval.c b/qemu/util/getauxval.c index 4615b09e..36afdfb9 100644 --- a/qemu/util/getauxval.c +++ b/qemu/util/getauxval.c @@ -22,7 +22,6 @@ * THE SOFTWARE. */ -#include "qemu-common.h" #include "qemu/osdep.h" #ifdef CONFIG_GETAUXVAL @@ -76,7 +75,7 @@ static const ElfW_auxv_t *qemu_init_auxval(void) auxv = a = g_realloc(a, size); r = read(fd, (char *)a + ofs, ofs); } while (r == ofs); -} + } close(fd); return a; @@ -99,16 +98,6 @@ unsigned long qemu_getauxval(unsigned long type) return 0; } -#elif defined(__FreeBSD__) -#include - -unsigned long qemu_getauxval(unsigned long type) -{ - unsigned long aux = 0; - elf_aux_info(type, &aux, sizeof(aux)); - return aux; -} - #else unsigned long qemu_getauxval(unsigned long type) diff --git a/qemu/util/guest-random.c b/qemu/util/guest-random.c new file mode 100644 index 00000000..7c1fe7be --- /dev/null +++ b/qemu/util/guest-random.c @@ -0,0 +1,81 @@ +/* + * QEMU guest-visible random functions + * + * Copyright 2019 Linaro, Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +//#include "qapi/error.h" +#include "qemu/guest-random.h" +#include "crypto/random.h" +//#include "sysemu/replay.h" + + +#ifndef _MSC_VER +static __thread GRand *thread_rand; +#endif +static bool deterministic = true; + + +static int glib_random_bytes(void *buf, size_t len) +{ +#ifndef _MSC_VER + GRand *rand = thread_rand; + size_t i; + uint32_t x; + + if (unlikely(rand == NULL)) { + /* Thread not initialized for a cpu, or main w/o -seed. */ + thread_rand = rand = g_rand_new(); + } + + for (i = 0; i + 4 <= len; i += 4) { + x = g_rand_int(rand); + __builtin_memcpy(buf + i, &x, 4); + } + if (i < len) { + x = g_rand_int(rand); + __builtin_memcpy(buf + i, &x, i - len); + } +#endif + return 0; +} + +int qemu_guest_getrandom(void *buf, size_t len) +{ + return glib_random_bytes(buf, len); +} + +void qemu_guest_getrandom_nofail(void *buf, size_t len) +{ + (void)qemu_guest_getrandom(buf, len); +} + +uint64_t qemu_guest_random_seed_thread_part1(void) +{ + if (deterministic) { + uint64_t ret; + glib_random_bytes(&ret, sizeof(ret)); + return ret; + } + return 0; +} + +void qemu_guest_random_seed_thread_part2(uint64_t seed) +{ +#ifndef _MSC_VER + g_assert(thread_rand == NULL); + if (deterministic) { + thread_rand = + g_rand_new_with_seed_array((const guint32 *)&seed, + sizeof(seed) / sizeof(guint32)); + } +#endif +} + diff --git a/qemu/util/host-utils.c b/qemu/util/host-utils.c index 5f1d7a79..7b932207 100644 --- a/qemu/util/host-utils.c +++ b/qemu/util/host-utils.c @@ -23,8 +23,7 @@ * THE SOFTWARE. */ -#include -#include "unicorn/platform.h" +#include "qemu/osdep.h" #include "qemu/host-utils.h" #ifndef CONFIG_INT128 @@ -54,10 +53,10 @@ static inline void mul64(uint64_t *plow, uint64_t *phigh, rh.ll = (uint64_t)a0.l.high * b0.l.high; c = (uint64_t)rl.l.high + rm.l.low + rn.l.low; - rl.l.high = (uint32_t)c; + rl.l.high = c; c >>= 32; c = c + rm.l.high + rn.l.high + rh.l.low; - rh.l.low = (uint32_t)c; + rh.l.low = c; rh.l.high += (uint32_t)(c >> 32); *plow = rl.ll; @@ -160,8 +159,69 @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) return overflow; } -#else -// avoid empty object file -void dummy_func(void); -void dummy_func(void) {} #endif + +/** + * urshift - 128-bit Unsigned Right Shift. + * @plow: in/out - lower 64-bit integer. + * @phigh: in/out - higher 64-bit integer. + * @shift: in - bytes to shift, between 0 and 127. + * + * Result is zero-extended and stored in plow/phigh, which are + * input/output variables. Shift values outside the range will + * be mod to 128. In other words, the caller is responsible to + * verify/assert both the shift range and plow/phigh pointers. + */ +void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift) +{ + shift &= 127; + if (shift == 0) { + return; + } + + uint64_t h = *phigh >> (shift & 63); + if (shift >= 64) { + *plow = h; + *phigh = 0; + } else { + *plow = (*plow >> (shift & 63)) | (*phigh << (64 - (shift & 63))); + *phigh = h; + } +} + +/** + * ulshift - 128-bit Unsigned Left Shift. + * @plow: in/out - lower 64-bit integer. + * @phigh: in/out - higher 64-bit integer. + * @shift: in - bytes to shift, between 0 and 127. + * @overflow: out - true if any 1-bit is shifted out. + * + * Result is zero-extended and stored in plow/phigh, which are + * input/output variables. Shift values outside the range will + * be mod to 128. In other words, the caller is responsible to + * verify/assert both the shift range and plow/phigh pointers. + */ +void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow) +{ + uint64_t low = *plow; + uint64_t high = *phigh; + + shift &= 127; + if (shift == 0) { + return; + } + + /* check if any bit will be shifted out */ + urshift(&low, &high, 128 - shift); + if (low | high) { + *overflow = true; + } + + if (shift >= 64) { + *phigh = *plow << (shift & 63); + *plow = 0; + } else { + *phigh = (*plow >> (64 - (shift & 63))) | (*phigh << (shift & 63)); + *plow = *plow << shift; + } +} diff --git a/qemu/util/module.c b/qemu/util/module.c deleted file mode 100644 index 9d9cd238..00000000 --- a/qemu/util/module.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * QEMU Module Infrastructure - * - * Copyright IBM, Corp. 2009 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu-common.h" -#include "qemu/queue.h" - -#include "uc_priv.h" - -static void init_lists(struct uc_struct *uc) -{ - int i; - - for (i = 0; i < MODULE_INIT_MAX; i++) { - QTAILQ_INIT(&uc->init_type_list[i]); - } -} - - -static ModuleTypeList *find_type(struct uc_struct *uc, module_init_type type) -{ - ModuleTypeList *l; - - init_lists(uc); - - l = &uc->init_type_list[type]; - - return l; -} - -static void module_load(module_init_type type) -{ -} - -void module_call_init(struct uc_struct *uc, module_init_type type) -{ - ModuleTypeList *l; - ModuleEntry *e; - - module_load(type); - l = find_type(uc, type); - - QTAILQ_FOREACH(e, l, node) { - e->init(); - } -} diff --git a/qemu/util/osdep.c b/qemu/util/osdep.c new file mode 100644 index 00000000..148e37a5 --- /dev/null +++ b/qemu/util/osdep.c @@ -0,0 +1,90 @@ +/* + * QEMU low level functions + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" + +/* Needed early for CONFIG_BSD etc. */ + +#ifdef CONFIG_SOLARIS +#include +/* See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for + discussion about Solaris header problems */ +extern int madvise(char *, size_t, int); +#endif + +#include "qemu-common.h" +#include "qemu/cutils.h" + +int qemu_madvise(void *addr, size_t len, int advice) +{ + if (advice == QEMU_MADV_INVALID) { + errno = EINVAL; + return -1; + } +#if defined(CONFIG_MADVISE) + return madvise(addr, len, advice); +#elif defined(CONFIG_POSIX_MADVISE) + return posix_madvise(addr, len, advice); +#else + errno = EINVAL; + return -1; +#endif +} + +static int qemu_mprotect__osdep(void *addr, size_t size, int prot) +{ +#ifdef _WIN32 + DWORD old_protect; + + if (!VirtualProtect(addr, size, prot, &old_protect)) { + // g_autofree gchar *emsg = g_win32_error_message(GetLastError()); + // error_report("%s: VirtualProtect failed: %s", __func__, emsg); + return -1; + } + return 0; +#else + if (mprotect(addr, size, prot)) { + // error_report("%s: mprotect failed: %s", __func__, strerror(errno)); + return -1; + } + return 0; +#endif +} + +int qemu_mprotect_rwx(void *addr, size_t size) +{ +#ifdef _WIN32 + return qemu_mprotect__osdep(addr, size, PAGE_EXECUTE_READWRITE); +#else + return qemu_mprotect__osdep(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); +#endif +} + +int qemu_mprotect_none(void *addr, size_t size) +{ +#ifdef _WIN32 + return qemu_mprotect__osdep(addr, size, PAGE_NOACCESS); +#else + return qemu_mprotect__osdep(addr, size, PROT_NONE); +#endif +} diff --git a/qemu/util/oslib-posix.c b/qemu/util/oslib-posix.c index 3614205d..e480e101 100644 --- a/qemu/util/oslib-posix.c +++ b/qemu/util/oslib-posix.c @@ -26,40 +26,23 @@ * THE SOFTWARE. */ -#if defined(__linux__) && (defined(__x86_64__) || defined(__arm__)) - /* Use 2 MiB alignment so transparent hugepages can be used by KVM. - Valgrind does not support alignments larger than 1 MiB, - therefore we need special code which handles running on Valgrind. */ -# define QEMU_VMALLOC_ALIGN (512 * 4096) -#elif defined(__linux__) && defined(__s390x__) - /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ -# define QEMU_VMALLOC_ALIGN (256 * 4096) -#else -# define QEMU_VMALLOC_ALIGN getpagesize() -#endif -#define HUGETLBFS_MAGIC 0x958458f6 - -#include "unicorn/platform.h" -#include "config-host.h" -#include "sysemu/sysemu.h" -#include -#include -#include -#ifdef __HAIKU__ -#include -#else -#include -#endif +#include +#include "qemu/osdep.h" #ifdef CONFIG_LINUX -#if !defined(__CYGWIN__) -#include -#endif -#include -#endif +#include +#else /* !CONFIG_LINUX */ +#define MAP_SYNC 0x0 +#define MAP_SHARED_VALIDATE 0x0 +#endif /* CONFIG_LINUX */ -#ifdef __FreeBSD__ -#include +#ifndef __MINGW32__ +static void *qemu_ram_mmap(struct uc_struct *uc, + size_t size, + size_t align, + bool shared); + +static void qemu_ram_munmap(struct uc_struct *uc, void *ptr, size_t size); #endif void *qemu_oom_check(void *ptr) @@ -79,7 +62,7 @@ void *qemu_try_memalign(size_t alignment, size_t size) alignment = sizeof(void*); } -#if defined(_POSIX_C_SOURCE) && !defined(__sun__) +#if defined(CONFIG_POSIX_MEMALIGN) int ret; ret = posix_memalign(&ptr, alignment, size); if (ret != 0) { @@ -88,9 +71,12 @@ void *qemu_try_memalign(size_t alignment, size_t size) } #elif defined(CONFIG_BSD) ptr = valloc(size); +#elif defined(__MINGW32__) + ptr = __mingw_aligned_malloc(size, alignment); #else ptr = memalign(alignment, size); #endif + //trace_qemu_memalign(alignment, size, ptr); return ptr; } @@ -99,14 +85,32 @@ void *qemu_memalign(size_t alignment, size_t size) return qemu_oom_check(qemu_try_memalign(alignment, size)); } -/* alloc shared memory pages */ -void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment) +#ifdef __MINGW32__ +static int get_allocation_granularity(void) { + SYSTEM_INFO system_info; + + GetSystemInfo(&system_info); + return system_info.dwAllocationGranularity; +} +#endif + +/* alloc shared memory pages */ +void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *alignment) +{ +#ifdef __MINGW32__ + void *ptr; + + ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); + // trace_qemu_anon_ram_alloc(size, ptr); + + if (ptr && alignment) { + *alignment = MAX(get_allocation_granularity(), getpagesize()); + } + return ptr; +#else size_t align = QEMU_VMALLOC_ALIGN; - size_t total = size + align - getpagesize(); - void *ptr = mmap(0, total, PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; + void *ptr = qemu_ram_mmap(uc, size, align, false); if (ptr == MAP_FAILED) { return NULL; @@ -115,27 +119,156 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment) if (alignment) { *alignment = align; } - ptr += offset; - total -= offset; + + //trace_qemu_anon_ram_alloc(size, ptr); + return ptr; +#endif +} + +void qemu_vfree(void *ptr) +{ +#ifdef __MINGW32__ + if (ptr) { + VirtualFree(ptr, 0, MEM_RELEASE); + } +#else + //trace_qemu_vfree(ptr); + free(ptr); +#endif +} + +void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size) +{ +#ifdef __MINGW32__ + if (ptr) { + VirtualFree(ptr, 0, MEM_RELEASE); + } +#else + //trace_qemu_anon_ram_free(ptr, size); + qemu_ram_munmap(uc, ptr, size); +#endif +} + +#if defined(__powerpc64__) && defined(__linux__) +static size_t qemu_fd_getpagesize(struct uc_struct *uc) +{ +#ifdef CONFIG_LINUX +#ifdef __sparc__ + /* SPARC Linux needs greater alignment than the pagesize */ + return QEMU_VMALLOC_ALIGN; +#endif +#endif + + return uc->qemu_real_host_page_size; +} +#endif + +#ifndef __MINGW32__ +static void *qemu_ram_mmap(struct uc_struct *uc, + size_t size, + size_t align, + bool shared) +{ + int flags; + int map_sync_flags = 0; + int guardfd; + size_t offset; + size_t pagesize; + size_t total; + void *guardptr; + void *ptr; + + /* + * Note: this always allocates at least one extra page of virtual address + * space, even if size is already aligned. + */ + total = size + align; + +#if defined(__powerpc64__) && defined(__linux__) + /* On ppc64 mappings in the same segment (aka slice) must share the same + * page size. Since we will be re-allocating part of this segment + * from the supplied fd, we should make sure to use the same page size, to + * this end we mmap the supplied fd. In this case, set MAP_NORESERVE to + * avoid allocating backing store memory. + * We do this unless we are using the system page size, in which case + * anonymous memory is OK. + */ + flags = MAP_PRIVATE; + pagesize = qemu_fd_getpagesize(uc); + if (pagesize == uc->qemu_real_host_page_size) { + guardfd = -1; + flags |= MAP_ANONYMOUS; + } else { + guardfd = -1; + flags |= MAP_NORESERVE; + } +#else + guardfd = -1; + pagesize = uc->qemu_real_host_page_size; + flags = MAP_PRIVATE | MAP_ANONYMOUS; +#endif + + guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0); + + if (guardptr == MAP_FAILED) { + return MAP_FAILED; + } + + assert(is_power_of_2(align)); + /* Always align to host page size */ + assert(align >= pagesize); + + flags = MAP_FIXED; + flags |= MAP_ANONYMOUS; + flags |= shared ? MAP_SHARED : MAP_PRIVATE; + + offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr; + + ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, + flags | map_sync_flags, -1, 0); + + if (ptr == MAP_FAILED && map_sync_flags) { + /* + * if map failed with MAP_SHARED_VALIDATE | MAP_SYNC, + * we will remove these flags to handle compatibility. + */ + ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, + flags, -1, 0); + } + + if (ptr == MAP_FAILED) { + munmap(guardptr, total); + return MAP_FAILED; + } if (offset > 0) { - munmap(ptr - offset, offset); + munmap(guardptr, offset); } - if (total > size) { - munmap(ptr + size, total - size); + + /* + * Leave a single PROT_NONE page allocated after the RAM block, to serve as + * a guard page guarding against potential buffer overflows. + */ + total -= offset; + if (total > size + pagesize) { + munmap(ptr + size + pagesize, total - size - pagesize); } return ptr; } -void qemu_vfree(void *ptr) +static void qemu_ram_munmap(struct uc_struct *uc, void *ptr, size_t size) { - free(ptr); -} + size_t pagesize; -void qemu_anon_ram_free(void *ptr, size_t size) -{ if (ptr) { - munmap(ptr, size); + /* Unmap both the RAM block and the guard page */ +#if defined(__powerpc64__) && defined(__linux__) + pagesize = qemu_fd_getpagesize(uc); +#else + pagesize = uc->qemu_real_host_page_size; +#endif + munmap(ptr, size + pagesize); } } +#endif diff --git a/qemu/util/oslib-win32.c b/qemu/util/oslib-win32.c index cb60b98c..9421c98d 100644 --- a/qemu/util/oslib-win32.c +++ b/qemu/util/oslib-win32.c @@ -2,7 +2,7 @@ * os-win32.c * * Copyright (c) 2003-2008 Fabrice Bellard - * Copyright (c) 2010 Red Hat, Inc. + * Copyright (c) 2010-2016 Red Hat, Inc. * * QEMU library functions for win32 which are shared between QEMU and * the QEMU tools. @@ -25,21 +25,18 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * + * The implementation of g_poll (functions poll_rest, g_poll) at the end of + * this file are based on code from GNOME glib-2 and use a different license, + * see the license comment there. */ -#include + +#include + +#include "qemu/osdep.h" #include - -#include -#include "config-host.h" +#include "qemu-common.h" #include "sysemu/sysemu.h" -/* this must come after including "trace.h" */ -/* The pragmas are to fix this issue: https://connect.microsoft.com/VisualStudio/feedback/details/976983 */ -#pragma warning(push) -#pragma warning(disable : 4091) -#include -#pragma warning(pop) - void *qemu_oom_check(void *ptr) { if (ptr == NULL) { @@ -57,7 +54,7 @@ void *qemu_try_memalign(size_t alignment, size_t size) abort(); } ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); - // trace_qemu_memalign(alignment, size, ptr); + //trace_qemu_memalign(alignment, size, ptr); return ptr; } @@ -65,36 +62,45 @@ void *qemu_memalign(size_t alignment, size_t size) { return qemu_oom_check(qemu_try_memalign(alignment, size)); } - -void *qemu_anon_ram_alloc(size_t size, uint64_t *align) + +static int get_allocation_granularity(void) +{ + SYSTEM_INFO system_info; + + GetSystemInfo(&system_info); + return system_info.dwAllocationGranularity; +} + +void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *align) { void *ptr; - /* FIXME: this is not exactly optimal solution since VirtualAlloc - has 64Kb granularity, but at least it guarantees us that the - memory is page aligned. */ ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); // trace_qemu_anon_ram_alloc(size, ptr); + + if (ptr && align) { + *align = MAX(get_allocation_granularity(), getpagesize()); + } return ptr; } void qemu_vfree(void *ptr) { - // trace_qemu_vfree(ptr); + //trace_qemu_vfree(ptr); if (ptr) { VirtualFree(ptr, 0, MEM_RELEASE); } } -void qemu_anon_ram_free(void *ptr, size_t size) +void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size) { - // trace_qemu_anon_ram_free(ptr, size); + //trace_qemu_anon_ram_free(ptr, size); if (ptr) { VirtualFree(ptr, 0, MEM_RELEASE); } } -size_t getpagesize(void) +int getpagesize(void) { SYSTEM_INFO system_info; diff --git a/qemu/util/pagesize.c b/qemu/util/pagesize.c new file mode 100644 index 00000000..44e9e616 --- /dev/null +++ b/qemu/util/pagesize.c @@ -0,0 +1,16 @@ +/* + * pagesize.c - query the host about its page size + * + * Copyright (C) 2017, Emilio G. Cota + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include + +void init_real_host_page_size(struct uc_struct *uc) +{ + uc->qemu_real_host_page_size = getpagesize(); +} diff --git a/qemu/util/qdist.c b/qemu/util/qdist.c new file mode 100644 index 00000000..63543afb --- /dev/null +++ b/qemu/util/qdist.c @@ -0,0 +1,219 @@ +/* + * qdist.c - QEMU helpers for handling frequency distributions of data. + * + * Copyright (C) 2016, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/qdist.h" + +#include +#ifndef NAN +#define NAN (0.0 / 0.0) +#endif + +#define QDIST_EMPTY_STR "(empty)" + +void qdist_init(struct qdist *dist) +{ + dist->entries = g_new(struct qdist_entry, 1); + dist->size = 1; + dist->n = 0; +} + +void qdist_destroy(struct qdist *dist) +{ + g_free(dist->entries); +} + +static inline int qdist_cmp_double(double a, double b) +{ + if (a > b) { + return 1; + } else if (a < b) { + return -1; + } + return 0; +} + +static int qdist_cmp(const void *ap, const void *bp) +{ + const struct qdist_entry *a = ap; + const struct qdist_entry *b = bp; + + return qdist_cmp_double(a->x, b->x); +} + +void qdist_add(struct qdist *dist, double x, long count) +{ + struct qdist_entry *entry = NULL; + + if (dist->n) { + struct qdist_entry e; + + e.x = x; + entry = bsearch(&e, dist->entries, dist->n, sizeof(e), qdist_cmp); + } + + if (entry) { + entry->count += count; + return; + } + + if (unlikely(dist->n == dist->size)) { + dist->size *= 2; + dist->entries = g_renew(struct qdist_entry, dist->entries, dist->size); + } + dist->n++; + entry = &dist->entries[dist->n - 1]; + entry->x = x; + entry->count = count; + qsort(dist->entries, dist->n, sizeof(*entry), qdist_cmp); +} + +void qdist_inc(struct qdist *dist, double x) +{ + qdist_add(dist, x, 1); +} + +/* + * Bin the distribution in @from into @n bins of consecutive, non-overlapping + * intervals, copying the result to @to. + * + * This function is internal to qdist: only this file and test code should + * ever call it. + * + * Note: calling this function on an already-binned qdist is a bug. + * + * If @n == 0 or @from->n == 1, use @from->n. + */ +void qdist_bin__internal(struct qdist *to, const struct qdist *from, size_t n) +{ + double xmin, xmax; + double step; + size_t i, j; + + qdist_init(to); + + if (from->n == 0) { + return; + } + if (n == 0 || from->n == 1) { + n = from->n; + } + + /* set equally-sized bins between @from's left and right */ + xmin = qdist_xmin(from); + xmax = qdist_xmax(from); + step = (xmax - xmin) / n; + + if (n == from->n) { + /* if @from's entries are equally spaced, no need to re-bin */ + for (i = 0; i < from->n; i++) { + if (from->entries[i].x != xmin + i * step) { + goto rebin; + } + } + /* they're equally spaced, so copy the dist and bail out */ + to->entries = g_renew(struct qdist_entry, to->entries, n); + to->n = from->n; + memcpy(to->entries, from->entries, sizeof(*to->entries) * to->n); + return; + } + + rebin: + j = 0; + for (i = 0; i < n; i++) { + double x; + double left, right; + + left = xmin + i * step; + right = xmin + (i + 1) * step; + + /* Add x, even if it might not get any counts later */ + x = left; + qdist_add(to, x, 0); + + /* + * To avoid double-counting we capture [left, right) ranges, except for + * the righmost bin, which captures a [left, right] range. + */ + while (j < from->n && (from->entries[j].x < right || i == n - 1)) { + struct qdist_entry *o = &from->entries[j]; + + qdist_add(to, x, o->count); + j++; + } + } +} + +static inline double qdist_x(const struct qdist *dist, int index) +{ + if (dist->n == 0) { + return NAN; + } + return dist->entries[index].x; +} + +double qdist_xmin(const struct qdist *dist) +{ + return qdist_x(dist, 0); +} + +double qdist_xmax(const struct qdist *dist) +{ + return qdist_x(dist, dist->n - 1); +} + +size_t qdist_unique_entries(const struct qdist *dist) +{ + return dist->n; +} + +unsigned long qdist_sample_count(const struct qdist *dist) +{ + unsigned long count = 0; + size_t i; + + for (i = 0; i < dist->n; i++) { + struct qdist_entry *e = &dist->entries[i]; + + count += e->count; + } + return count; +} + +static double qdist_pairwise_avg(const struct qdist *dist, size_t index, + size_t n, unsigned long count) +{ + /* amortize the recursion by using a base case > 2 */ + if (n <= 8) { + size_t i; + double ret = 0; + + for (i = 0; i < n; i++) { + struct qdist_entry *e = &dist->entries[index + i]; + + ret += e->x * e->count / count; + } + return ret; + } else { + size_t n2 = n / 2; + + return qdist_pairwise_avg(dist, index, n2, count) + + qdist_pairwise_avg(dist, index + n2, n - n2, count); + } +} + +double qdist_avg(const struct qdist *dist) +{ + unsigned long count; + + count = qdist_sample_count(dist); + if (!count) { + return NAN; + } + return qdist_pairwise_avg(dist, 0, dist->n, count); +} diff --git a/qemu/util/qemu-error.c b/qemu/util/qemu-error.c deleted file mode 100644 index c3c50546..00000000 --- a/qemu/util/qemu-error.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Error reporting - * - * Copyright (C) 2010 Red Hat Inc. - * - * Authors: - * Markus Armbruster , - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include -#include -#include - -static const char *progname; - -/* - * Set the program name for error_print_loc(). - */ -void error_set_progname(const char *argv0) -{ - const char *p = strrchr(argv0, '/'); - progname = p ? p + 1 : argv0; -} - -const char *error_get_progname(void) -{ - return progname; -} - -/* - * Print current location to current monitor if we have one, else to stderr. - */ -static void error_print_loc(void) -{ -} - -/* - * Print an error message to current monitor if we have one, else to stderr. - * Format arguments like vsprintf(). The result should not contain - * newlines. - * Prepend the current location and append a newline. - * It's wrong to call this in a QMP monitor. Use qerror_report() there. - */ -#ifdef _WIN32 -void error_vreport(const char *fmt, va_list ap) -{ - error_print_loc(); - vfprintf(stderr, fmt, ap); - fprintf(stderr, "\n"); -} -#else -void error_vreport(const char *fmt, va_list ap) -{ - GTimeVal tv; - gchar *timestr; - - error_print_loc(); - error_vprintf(fmt, ap); - error_printf("\n"); -} -#endif - -/* - * Print an error message to current monitor if we have one, else to stderr. - * Format arguments like sprintf(). The result should not contain - * newlines. - * Prepend the current location and append a newline. - * It's wrong to call this in a QMP monitor. Use qerror_report() there. - */ -void error_report(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - error_vreport(fmt, ap); - va_end(ap); -} diff --git a/qemu/util/qemu-thread-posix.c b/qemu/util/qemu-thread-posix.c index c4611443..ef6ce118 100644 --- a/qemu/util/qemu-thread-posix.c +++ b/qemu/util/qemu-thread-posix.c @@ -12,18 +12,9 @@ */ #include #include -#include -#include #include -#include "unicorn/platform.h" #include -#include -#ifdef __linux__ -#include -#include -#endif #include "qemu/thread.h" -#include "qemu/atomic.h" static void error_exit(int err, const char *msg) { @@ -35,7 +26,9 @@ int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *nam void *(*start_routine)(void*), void *arg, int mode) { +#ifndef __MINGW32__ sigset_t set, oldset; +#endif int err; pthread_attr_t attr; @@ -52,8 +45,9 @@ int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *nam } } - /* Leave signal handling to the iothread. */ +#ifndef __MINGW32__ sigfillset(&set); +#endif pthread_sigmask(SIG_SETMASK, &set, &oldset); err = pthread_create(&thread->thread, &attr, start_routine, arg); if (err) { diff --git a/qemu/util/qemu-timer-common.c b/qemu/util/qemu-timer-common.c index 3ab3326b..2dcebf4d 100644 --- a/qemu/util/qemu-timer-common.c +++ b/qemu/util/qemu-timer-common.c @@ -21,23 +21,38 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ +#include "qemu/osdep.h" #include "qemu/timer.h" /***********************************************************/ /* real time host monotonic timer */ #ifdef _WIN32 + int64_t clock_freq; -INITIALIZER(init_get_clock) +void init_get_clock(void) { LARGE_INTEGER freq; - int ret; - ret = QueryPerformanceFrequency(&freq); + int ret = QueryPerformanceFrequency(&freq); if (ret == 0) { fprintf(stderr, "Could not calibrate ticks\n"); exit(1); } clock_freq = freq.QuadPart; } + +#else + +int use_rt_clock; + +void init_get_clock(void) +{ + struct timespec ts; + + use_rt_clock = 0; + if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { + use_rt_clock = 1; + } +} #endif diff --git a/qemu/tcg/tcg-be-null.h b/qemu/util/qemu-timer.c similarity index 74% rename from qemu/tcg/tcg-be-null.h rename to qemu/util/qemu-timer.c index ba2da3c5..38a5259b 100644 --- a/qemu/tcg/tcg-be-null.h +++ b/qemu/util/qemu-timer.c @@ -1,5 +1,7 @@ /* - * TCG Backend Data: No backend data + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -20,26 +22,13 @@ * THE SOFTWARE. */ -#include "tcg.h" +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "uc_priv.h" +#include "sysemu/cpus.h" +#include "qemu/queue.h" -typedef struct TCGBackendData { - /* Empty */ - char dummy; -} TCGBackendData; - - -/* - * Initialize TB backend data at the beginning of the TB. - */ - -static inline void tcg_out_tb_init(TCGContext *s) -{ -} - -/* - * Generate TB finalization at the end of block - */ - -static inline void tcg_out_tb_finalize(TCGContext *s) +int64_t qemu_clock_get_ns(QEMUClockType type) { + return get_clock(); } diff --git a/qemu/util/qht.c b/qemu/util/qht.c new file mode 100644 index 00000000..fd436e34 --- /dev/null +++ b/qemu/util/qht.c @@ -0,0 +1,761 @@ +/* + * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads. + * + * Copyright (C) 2016, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Assumptions: + * - NULL cannot be inserted/removed as a pointer value. + * - Trying to insert an already-existing hash-pointer pair is OK. However, + * it is not OK to insert into the same hash table different hash-pointer + * pairs that have the same pointer value, but not the hashes. + * - Lookups are performed under an RCU read-critical section; removals + * must wait for a grace period to elapse before freeing removed objects. + * + * Features: + * - Reads (i.e. lookups and iterators) can be concurrent with other reads. + * Lookups that are concurrent with writes to the same bucket will retry + * via a seqlock; iterators acquire all bucket locks and therefore can be + * concurrent with lookups and are serialized wrt writers. + * - Writes (i.e. insertions/removals) can be concurrent with writes to + * different buckets; writes to the same bucket are serialized through a lock. + * - Optional auto-resizing: the hash table resizes up if the load surpasses + * a certain threshold. Resizing is done concurrently with readers; writes + * are serialized with the resize operation. + * + * The key structure is the bucket, which is cacheline-sized. Buckets + * contain a few hash values and pointers; the u32 hash values are stored in + * full so that resizing is fast. Having this structure instead of directly + * chaining items has two advantages: + * - Failed lookups fail fast, and touch a minimum number of cache lines. + * - Resizing the hash table with concurrent lookups is easy. + * + * There are two types of buckets: + * 1. "head" buckets are the ones allocated in the array of buckets in qht_map. + * 2. all "non-head" buckets (i.e. all others) are members of a chain that + * starts from a head bucket. + * Note that the seqlock and spinlock of a head bucket applies to all buckets + * chained to it; these two fields are unused in non-head buckets. + * + * On removals, we move the last valid item in the chain to the position of the + * just-removed entry. This makes lookups slightly faster, since the moment an + * invalid entry is found, the (failed) lookup is over. + * + * Resizing is done by taking all bucket spinlocks (so that no other writers can + * race with us) and then copying all entries into a new hash map. Then, the + * ht->map pointer is set, and the old map is freed once no RCU readers can see + * it anymore. + * + * Writers check for concurrent resizes by comparing ht->map before and after + * acquiring their bucket lock. If they don't match, a resize has occured + * while the bucket spinlock was being acquired. + * + * Related Work: + * - Idea of cacheline-sized buckets with full hashes taken from: + * David, Guerraoui & Trigonakis, "Asynchronized Concurrency: + * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15. + * - Why not RCU-based hash tables? They would allow us to get rid of the + * seqlock, but resizing would take forever since RCU read critical + * sections in QEMU take quite a long time. + * More info on relativistic hash tables: + * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash + * Tables via Relativistic Programming", USENIX ATC'11. + * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014. + * https://lwn.net/Articles/612021/ + */ +#include "qemu/osdep.h" +#include "qemu/qht.h" +#include "qemu/atomic.h" +//#include "qemu/rcu.h" + +//#define QHT_DEBUG + +/* + * We want to avoid false sharing of cache lines. Most systems have 64-byte + * cache lines so we go with it for simplicity. + * + * Note that systems with smaller cache lines will be fine (the struct is + * almost 64-bytes); systems with larger cache lines might suffer from + * some false sharing. + */ +#define QHT_BUCKET_ALIGN 64 + +/* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */ +#if HOST_LONG_BITS == 32 +#define QHT_BUCKET_ENTRIES 6 +#else /* 64-bit */ +#define QHT_BUCKET_ENTRIES 4 +#endif + +enum qht_iter_type { + QHT_ITER_VOID, /* do nothing; use retvoid */ + QHT_ITER_RM, /* remove element if retbool returns true */ +}; + +struct qht_iter { + union { + qht_iter_func_t retvoid; + qht_iter_bool_func_t retbool; + } f; + enum qht_iter_type type; +}; + +/* + * Note: reading partially-updated pointers in @pointers could lead to + * segfaults. We thus access them with atomic_read/set; this guarantees + * that the compiler makes all those accesses atomic. We also need the + * volatile-like behavior in atomic_read, since otherwise the compiler + * might refetch the pointer. + * atomic_read's are of course not necessary when the bucket lock is held. + * + * If both ht->lock and b->lock are grabbed, ht->lock should always + * be grabbed first. + */ +struct qht_bucket { + uint32_t hashes[QHT_BUCKET_ENTRIES]; + void *pointers[QHT_BUCKET_ENTRIES]; + struct qht_bucket *next; +} QEMU_ALIGNED(QHT_BUCKET_ALIGN); + +QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN); + +/** + * struct qht_map - structure to track an array of buckets + * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind + * find the whole struct. + * @buckets: array of head buckets. It is constant once the map is created. + * @n_buckets: number of head buckets. It is constant once the map is created. + * @n_added_buckets: number of added (i.e. "non-head") buckets + * @n_added_buckets_threshold: threshold to trigger an upward resize once the + * number of added buckets surpasses it. + * + * Buckets are tracked in what we call a "map", i.e. this structure. + */ +struct qht_map { + struct qht_bucket *buckets; + size_t n_buckets; + size_t n_added_buckets; + size_t n_added_buckets_threshold; +}; + +/* trigger a resize when n_added_buckets > n_buckets / div */ +#define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8 + +static void qht_do_resize_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new, + bool reset); +static void qht_grow_maybe(struct uc_struct *uc, struct qht *ht); + +#define qht_debug_assert(X) do { (void)(X); } while (0) + +static inline size_t qht_elems_to_buckets(size_t n_elems) +{ + return pow2ceil(n_elems / QHT_BUCKET_ENTRIES); +} + +static inline void qht_head_init(struct qht_bucket *b) +{ + memset(b, 0, sizeof(*b)); +} + +static inline +struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash) +{ + return &map->buckets[hash & (map->n_buckets - 1)]; +} + +/* + * Grab all bucket locks, and set @pmap after making sure the map isn't stale. + * + * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference. + * + * Note: callers cannot have ht->lock held. + */ +static inline +void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap) +{ + struct qht_map *map; + map = ht->map; + *pmap = map; + return; +} + +/* + * Get a head bucket and lock it, making sure its parent map is not stale. + * @pmap is filled with a pointer to the bucket's parent map. + */ +static inline +struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, + struct qht_map **pmap) +{ + struct qht_bucket *b; + struct qht_map *map; + + map = ht->map; + b = qht_map_to_bucket(map, hash); + *pmap = map; + return b; +} + +static inline bool qht_map_needs_resize(const struct qht_map *map) +{ + return map->n_added_buckets > map->n_added_buckets_threshold; +} + +static inline void qht_chain_destroy(const struct qht_bucket *head) +{ + struct qht_bucket *curr = head->next; + struct qht_bucket *prev; + + while (curr) { + prev = curr; + curr = curr->next; + qemu_vfree(prev); + } +} + +/* pass only an orphan map */ +static void qht_map_destroy(struct qht_map *map) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + qht_chain_destroy(&map->buckets[i]); + } + qemu_vfree(map->buckets); + g_free(map); +} + +static struct qht_map *qht_map_create(size_t n_buckets) +{ + struct qht_map *map; + size_t i; + + map = g_malloc(sizeof(*map)); + map->n_buckets = n_buckets; + + map->n_added_buckets = 0; + map->n_added_buckets_threshold = n_buckets / + QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV; + + /* let tiny hash tables to at least add one non-head bucket */ + if (unlikely(map->n_added_buckets_threshold == 0)) { + map->n_added_buckets_threshold = 1; + } + + map->buckets = qemu_memalign(QHT_BUCKET_ALIGN, + sizeof(*map->buckets) * n_buckets); + for (i = 0; i < n_buckets; i++) { + qht_head_init(&map->buckets[i]); + } + return map; +} + +void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems, + unsigned int mode) +{ + struct qht_map *map; + size_t n_buckets = qht_elems_to_buckets(n_elems); + + g_assert(cmp); + ht->cmp = cmp; + ht->mode = mode; + map = qht_map_create(n_buckets); + ht->map = map; +} + +/* call only when there are no readers/writers left */ +void qht_destroy(struct qht *ht) +{ + qht_map_destroy(ht->map); + memset(ht, 0, sizeof(*ht)); +} + +static void qht_bucket_reset__locked(struct qht_bucket *head) +{ + struct qht_bucket *b = head; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i] == NULL) { + goto done; + } + b->hashes[i] = 0; + b->pointers[i] = NULL; + } + b = b->next; + } while (b); + done: + return; +} + +/* call with all bucket locks held */ +static void qht_map_reset__all_locked(struct qht_map *map) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + qht_bucket_reset__locked(&map->buckets[i]); + } +} + +void qht_reset(struct qht *ht) +{ + struct qht_map *map; + + qht_map_lock_buckets__no_stale(ht, &map); + qht_map_reset__all_locked(map); +} + +static inline void qht_do_resize(struct uc_struct *uc, struct qht *ht, struct qht_map *new) +{ + qht_do_resize_reset(uc, ht, new, false); +} + +static inline void qht_do_resize_and_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new) +{ + qht_do_resize_reset(uc, ht, new, true); +} + +bool qht_reset_size(struct uc_struct *uc, struct qht *ht, size_t n_elems) +{ + struct qht_map *new = NULL; + struct qht_map *map; + size_t n_buckets; + + n_buckets = qht_elems_to_buckets(n_elems); + + map = ht->map; + if (n_buckets != map->n_buckets) { + new = qht_map_create(n_buckets); + } + qht_do_resize_and_reset(uc, ht, new); + + return !!new; +} + +static inline +void *qht_do_lookup(struct uc_struct *uc, const struct qht_bucket *head, qht_lookup_func_t func, + const void *userp, uint32_t hash) +{ + const struct qht_bucket *b = head; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->hashes[i] == hash) { + void *p = b->pointers[i]; + + if (likely(p) && likely(func(uc, p, userp))) { + return p; + } + } + } + b = b->next; + } while (b); + + return NULL; +} + +void *qht_lookup_custom(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash, + qht_lookup_func_t func) +{ + const struct qht_bucket *b; + const struct qht_map *map; + void *ret; + + map = ht->map; + b = qht_map_to_bucket(map, hash); + + ret = qht_do_lookup(uc, b, func, userp, hash); + return ret; +} + +void *qht_lookup(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash) +{ + return qht_lookup_custom(uc, ht, userp, hash, ht->cmp); +} + +/* + * call with head->lock held + * @ht is const since it is only used for ht->cmp() + */ +static void *qht_insert__locked(struct uc_struct *uc, const struct qht *ht, struct qht_map *map, + struct qht_bucket *head, void *p, uint32_t hash, + bool *needs_resize) +{ + struct qht_bucket *b = head; + struct qht_bucket *prev = NULL; + struct qht_bucket *new = NULL; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i]) { + if (unlikely(b->hashes[i] == hash && + ht->cmp(uc, b->pointers[i], p))) { + return b->pointers[i]; + } + } else { + goto found; + } + } + prev = b; + b = b->next; + } while (b); + + b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b)); + memset(b, 0, sizeof(*b)); + new = b; + i = 0; + map->n_added_buckets++; + if (unlikely(qht_map_needs_resize(map)) && needs_resize) { + *needs_resize = true; + } + + found: + /* found an empty key: acquire the seqlock and write */ + if (new) { + prev->next = b; + } + b->hashes[i] = hash; + b->pointers[i] = p; + return NULL; +} + +#ifdef _MSC_VER +static void qht_grow_maybe(struct uc_struct *uc, struct qht *ht) +#else +static __attribute__((noinline)) void qht_grow_maybe(struct uc_struct *uc, struct qht *ht) +#endif +{ + struct qht_map *map; + + map = ht->map; + /* another thread might have just performed the resize we were after */ + if (qht_map_needs_resize(map)) { + struct qht_map *new = qht_map_create(map->n_buckets * 2); + + qht_do_resize(uc, ht, new); + } +} + +bool qht_insert(struct uc_struct *uc, struct qht *ht, void *p, uint32_t hash, void **existing) +{ + struct qht_bucket *b; + struct qht_map *map; + bool needs_resize = false; + void *prev; + + /* NULL pointers are not supported */ + qht_debug_assert(p); + + b = qht_bucket_lock__no_stale(ht, hash, &map); + prev = qht_insert__locked(uc, ht, map, b, p, hash, &needs_resize); + + if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) { + qht_grow_maybe(uc, ht); + } + if (likely(prev == NULL)) { + return true; + } + if (existing) { + *existing = prev; + } + return false; +} + +static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos) +{ + if (pos == QHT_BUCKET_ENTRIES - 1) { + if (b->next == NULL) { + return true; + } + return b->next->pointers[0] == NULL; + } + return b->pointers[pos + 1] == NULL; +} + +static void +qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j) +{ + qht_debug_assert(!(to == from && i == j)); + qht_debug_assert(to->pointers[i]); + qht_debug_assert(from->pointers[j]); + + to->hashes[i] = from->hashes[j]; + to->pointers[i] = from->pointers[j]; + from->hashes[j] = 0; + from->pointers[j] = NULL; +} + +/* + * Find the last valid entry in @orig, and swap it with @orig[pos], which has + * just been invalidated. + */ +static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos) +{ + struct qht_bucket *b = orig; + struct qht_bucket *prev = NULL; + int i; + + if (qht_entry_is_last(orig, pos)) { + orig->hashes[pos] = 0; + orig->pointers[pos] = NULL; + return; + } + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i]) { + continue; + } + if (i > 0) { + qht_entry_move(orig, pos, b, i - 1); + return; + } + qht_debug_assert(prev); + qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); + return; + } + prev = b; + b = b->next; + } while (b); + /* no free entries other than orig[pos], so swap it with the last one */ + qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); +} + +/* call with b->lock held */ +static inline +bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash) +{ + struct qht_bucket *b = head; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + void *q = b->pointers[i]; + + if (unlikely(q == NULL)) { + return false; + } + if (q == p) { + qht_debug_assert(b->hashes[i] == hash); + qht_bucket_remove_entry(b, i); + return true; + } + } + b = b->next; + } while (b); + return false; +} + +bool qht_remove(struct qht *ht, const void *p, uint32_t hash) +{ + struct qht_bucket *b; + struct qht_map *map; + bool ret; + + /* NULL pointers are not supported */ + qht_debug_assert(p); + + b = qht_bucket_lock__no_stale(ht, hash, &map); + ret = qht_remove__locked(b, p, hash); + return ret; +} + +static inline void qht_bucket_iter(struct uc_struct *uc, struct qht_bucket *head, + const struct qht_iter *iter, void *userp) +{ + struct qht_bucket *b = head; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i] == NULL) { + return; + } + switch (iter->type) { + case QHT_ITER_VOID: + iter->f.retvoid(uc, b->pointers[i], b->hashes[i], userp); + break; + case QHT_ITER_RM: + if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) { + /* replace i with the last valid element in the bucket */ + qht_bucket_remove_entry(b, i); + /* reevaluate i, since it just got replaced */ + i--; + continue; + } + break; + default: + g_assert_not_reached(); + } + } + b = b->next; + } while (b); +} + +/* call with all of the map's locks held */ +static inline void qht_map_iter__all_locked(struct uc_struct *uc, struct qht_map *map, + const struct qht_iter *iter, + void *userp) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + qht_bucket_iter(uc, &map->buckets[i], iter, userp); + } +} + +static inline void +do_qht_iter(struct uc_struct *uc, struct qht *ht, const struct qht_iter *iter, void *userp) +{ + struct qht_map *map; + + map = ht->map; + qht_map_iter__all_locked(uc, map, iter, userp); +} + +void qht_iter(struct uc_struct *uc, struct qht *ht, qht_iter_func_t func, void *userp) +{ + const struct qht_iter iter = { + .f.retvoid = func, + .type = QHT_ITER_VOID, + }; + + do_qht_iter(uc, ht, &iter, userp); +} + +void qht_iter_remove(struct uc_struct *uc, struct qht *ht, qht_iter_bool_func_t func, void *userp) +{ + const struct qht_iter iter = { + .f.retbool = func, + .type = QHT_ITER_RM, + }; + + do_qht_iter(uc, ht, &iter, userp); +} + +struct qht_map_copy_data { + struct qht *ht; + struct qht_map *new; +}; + +static void qht_map_copy(struct uc_struct *uc, void *p, uint32_t hash, void *userp) +{ + struct qht_map_copy_data *data = userp; + struct qht *ht = data->ht; + struct qht_map *new = data->new; + struct qht_bucket *b = qht_map_to_bucket(new, hash); + + /* no need to acquire b->lock because no thread has seen this map yet */ + qht_insert__locked(uc, ht, new, b, p, hash, NULL); +} + +/* + * Atomically perform a resize and/or reset. + * Call with ht->lock held. + */ +static void qht_do_resize_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new, bool reset) +{ + struct qht_map *old; + const struct qht_iter iter = { + .f.retvoid = qht_map_copy, + .type = QHT_ITER_VOID, + }; + struct qht_map_copy_data data; + + old = ht->map; + + if (reset) { + qht_map_reset__all_locked(old); + } + + if (new == NULL) { + return; + } + + g_assert(new->n_buckets != old->n_buckets); + data.ht = ht; + data.new = new; + qht_map_iter__all_locked(uc, old, &iter, &data); + + ht->map = new; + qht_map_destroy(old); +} + +bool qht_resize(struct uc_struct *uc, struct qht *ht, size_t n_elems) +{ + size_t n_buckets = qht_elems_to_buckets(n_elems); + size_t ret = false; + + if (n_buckets != ht->map->n_buckets) { + struct qht_map *new; + + new = qht_map_create(n_buckets); + qht_do_resize(uc, ht, new); + ret = true; + } + + return ret; +} + +/* pass @stats to qht_statistics_destroy() when done */ +void qht_statistics_init(const struct qht *ht, struct qht_stats *stats) +{ + const struct qht_map *map; + int i; + + map = ht->map; + + stats->used_head_buckets = 0; + stats->entries = 0; + qdist_init(&stats->chain); + qdist_init(&stats->occupancy); + /* bail out if the qht has not yet been initialized */ + if (unlikely(map == NULL)) { + stats->head_buckets = 0; + return; + } + stats->head_buckets = map->n_buckets; + + for (i = 0; i < map->n_buckets; i++) { + const struct qht_bucket *head = &map->buckets[i]; + const struct qht_bucket *b; + size_t buckets; + size_t entries; + int j; + + buckets = 0; + entries = 0; + b = head; + do { + for (j = 0; j < QHT_BUCKET_ENTRIES; j++) { + if (b->pointers[j] == NULL) { + break; + } + entries++; + } + buckets++; + b = b->next; + } while (b); + + if (entries) { + qdist_inc(&stats->chain, buckets); + qdist_inc(&stats->occupancy, + (double)entries / QHT_BUCKET_ENTRIES / buckets); + stats->used_head_buckets++; + stats->entries += entries; + } else { + qdist_inc(&stats->occupancy, 0); + } + } +} + +void qht_statistics_destroy(struct qht_stats *stats) +{ + qdist_destroy(&stats->occupancy); + qdist_destroy(&stats->chain); +} diff --git a/qemu/util/range.c b/qemu/util/range.c new file mode 100644 index 00000000..f10f5664 --- /dev/null +++ b/qemu/util/range.c @@ -0,0 +1,78 @@ +/* + * QEMU 64-bit address ranges + * + * Copyright (c) 2015-2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/range.h" + +/* + * Return -1 if @a < @b, 1 @a > @b, and 0 if they touch or overlap. + * Both @a and @b must not be empty. + */ +static inline int range_compare(Range *a, Range *b) +{ + assert(!range_is_empty(a) && !range_is_empty(b)); + + /* Careful, avoid wraparound */ + if (b->lob && b->lob - 1 > a->upb) { + return -1; + } + if (a->lob && a->lob - 1 > b->upb) { + return 1; + } + return 0; +} + +/* Insert @data into @list of ranges; caller no longer owns @data */ +GList *range_list_insert(GList *list, Range *data) +{ + GList *l; + + assert(!range_is_empty(data)); + + /* Skip all list elements strictly less than data */ + for (l = list; l && range_compare(l->data, data) < 0; l = l->next) { + } + + if (!l || range_compare(l->data, data) > 0) { + /* Rest of the list (if any) is strictly greater than @data */ + return g_list_insert_before(list, l, data); + } + + /* Current list element overlaps @data, merge the two */ + range_extend(l->data, data); + g_free(data); + + /* Merge any subsequent list elements that now also overlap */ + while (l->next && range_compare(l->data, l->next->data) == 0) { +#ifndef NDEBUG + GList *new_l; +#endif + + range_extend(l->data, l->next->data); + g_free(l->next->data); +#ifndef NDEBUG + new_l = g_list_delete_link(list, l->next); +#else + g_list_delete_link(list, l->next); +#endif + assert(new_l == list); + } + + return list; +} diff --git a/qemu/vl.c b/qemu/vl.c deleted file mode 100644 index f6c68b47..00000000 --- a/qemu/vl.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * QEMU System Emulator - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -#include "hw/boards.h" // MachineClass -#include "sysemu/sysemu.h" -#include "sysemu/cpus.h" -#include "vl.h" -#include "uc_priv.h" - -#define DEFAULT_RAM_SIZE 128 - -int smp_cpus = 1; -int smp_cores = 1; -int smp_threads = 1; - -// cpus.c -void cpu_resume(CPUState *cpu) -{ - cpu->stop = false; - cpu->stopped = false; -} - -void cpu_stop_current(struct uc_struct *uc) -{ - if (uc->current_cpu) { - uc->current_cpu->stop = false; - uc->current_cpu->stopped = true; - cpu_exit(uc->current_cpu); - } -} - - -/***********************************************************/ -/* machine registration */ - -MachineClass *find_default_machine(struct uc_struct *uc, int arch) -{ - GSList *el, *machines = object_class_get_list(uc, TYPE_MACHINE, false); - MachineClass *mc = NULL; - - for (el = machines; el; el = el->next) { - MachineClass *temp = el->data; - - if ((temp->is_default) && (temp->arch == arch)) { - mc = temp; - break; - } - } - - g_slist_free(machines); - return mc; -} - -DEFAULT_VISIBILITY -int machine_initialize(struct uc_struct *uc) -{ - MachineClass *machine_class; - MachineState *current_machine; - - module_call_init(uc, MODULE_INIT_QOM); - register_types_object(uc); - machine_register_types(uc); - container_register_types(uc); - cpu_register_types(uc); - qdev_register_types(uc); - - // Initialize arch specific. - uc->init_arch(uc); - - module_call_init(uc, MODULE_INIT_MACHINE); - // this will auto initialize all register objects above. - machine_class = find_default_machine(uc, uc->arch); - if (machine_class == NULL) { - //fprintf(stderr, "No machine specified, and there is no default.\n" - // "Use -machine help to list supported machines!\n"); - return -2; - } - - current_machine = MACHINE(uc, object_new(uc, object_class_get_name( - OBJECT_CLASS(machine_class)))); - uc->machine_state = current_machine; - current_machine->uc = uc; - uc->cpu_exec_init_all(uc); - - machine_class->max_cpus = 1; - configure_accelerator(current_machine); - - current_machine->cpu_model = NULL; - - return machine_class->init(uc, current_machine); -} - -void qemu_system_reset_request(struct uc_struct* uc) -{ - cpu_stop_current(uc); -} - -void qemu_system_shutdown_request(void) -{ - //shutdown_requested = 1; -} - -static void machine_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(uc, oc); - QEMUMachine *qm = data; - - mc->family = qm->family; - mc->name = qm->name; - mc->init = qm->init; - mc->reset = qm->reset; - mc->max_cpus = qm->max_cpus; - mc->is_default = qm->is_default; - mc->arch = qm->arch; -} - -void qemu_register_machine(struct uc_struct *uc, QEMUMachine *m, const char *type_machine, - void (*init)(struct uc_struct *uc, ObjectClass *oc, void *data)) -{ - char *name = g_strconcat(m->name, TYPE_MACHINE_SUFFIX, NULL); - TypeInfo ti = {0}; - ti.name = name; - ti.parent = type_machine; - ti.class_init = init; - ti.class_data = (void *)m; - - if (init == NULL) - ti.class_init = machine_class_init; - - type_register(uc, &ti); - g_free(name); -} diff --git a/qemu/x86_64.h b/qemu/x86_64.h index 9b05becf..ceec9bbe 100644 --- a/qemu/x86_64.h +++ b/qemu/x86_64.h @@ -1,3020 +1,1880 @@ /* Autogen header for Unicorn Engine - DONOT MODIFY */ -#ifndef UNICORN_AUTOGEN_X86_64_H -#define UNICORN_AUTOGEN_X86_64_H -#define arm_release arm_release_x86_64 -#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_x86_64 -#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_x86_64 -#define use_idiv_instructions_rt use_idiv_instructions_rt_x86_64 -#define tcg_target_deposit_valid tcg_target_deposit_valid_x86_64 -#define helper_power_down helper_power_down_x86_64 -#define check_exit_request check_exit_request_x86_64 -#define address_space_unregister address_space_unregister_x86_64 -#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_x86_64 -#define phys_mem_clean phys_mem_clean_x86_64 -#define tb_cleanup tb_cleanup_x86_64 +#ifndef UNICORN_AUTOGEN_x86_64_H +#define UNICORN_AUTOGEN_x86_64_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _x86_64 +#endif +#define arm_arch arm_arch_x86_64 +#define tb_target_set_jmp_target tb_target_set_jmp_target_x86_64 +#define have_bmi1 have_bmi1_x86_64 +#define have_popcnt have_popcnt_x86_64 +#define have_avx1 have_avx1_x86_64 +#define have_avx2 have_avx2_x86_64 +#define have_isa have_isa_x86_64 +#define have_altivec have_altivec_x86_64 +#define have_vsx have_vsx_x86_64 +#define flush_icache_range flush_icache_range_x86_64 +#define s390_facilities s390_facilities_x86_64 +#define tcg_dump_op tcg_dump_op_x86_64 +#define tcg_dump_ops tcg_dump_ops_x86_64 +#define tcg_gen_and_i64 tcg_gen_and_i64_x86_64 +#define tcg_gen_discard_i64 tcg_gen_discard_i64_x86_64 +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_x86_64 +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_x86_64 +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_x86_64 +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_x86_64 +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_x86_64 +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_x86_64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_x86_64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_x86_64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_x86_64 +#define tcg_gen_mul_i64 tcg_gen_mul_i64_x86_64 +#define tcg_gen_or_i64 tcg_gen_or_i64_x86_64 +#define tcg_gen_sar_i64 tcg_gen_sar_i64_x86_64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_x86_64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_x86_64 +#define tcg_gen_st_i64 tcg_gen_st_i64_x86_64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_x86_64 +#define cpu_icount_to_ns cpu_icount_to_ns_x86_64 +#define cpu_is_stopped cpu_is_stopped_x86_64 +#define cpu_get_ticks cpu_get_ticks_x86_64 +#define cpu_get_clock cpu_get_clock_x86_64 +#define cpu_resume cpu_resume_x86_64 +#define qemu_init_vcpu qemu_init_vcpu_x86_64 +#define cpu_stop_current cpu_stop_current_x86_64 +#define resume_all_vcpus resume_all_vcpus_x86_64 +#define vm_start vm_start_x86_64 +#define address_space_dispatch_compact address_space_dispatch_compact_x86_64 +#define flatview_translate flatview_translate_x86_64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_x86_64 +#define qemu_get_cpu qemu_get_cpu_x86_64 +#define cpu_address_space_init cpu_address_space_init_x86_64 +#define cpu_get_address_space cpu_get_address_space_x86_64 +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_x86_64 +#define cpu_exec_initfn cpu_exec_initfn_x86_64 +#define cpu_exec_realizefn cpu_exec_realizefn_x86_64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_x86_64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_x86_64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_x86_64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_x86_64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_x86_64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_x86_64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_x86_64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_x86_64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_x86_64 +#define cpu_abort cpu_abort_x86_64 +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_x86_64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_x86_64 +#define flatview_add_to_dispatch flatview_add_to_dispatch_x86_64 +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_x86_64 +#define qemu_ram_get_offset qemu_ram_get_offset_x86_64 +#define qemu_ram_get_used_length qemu_ram_get_used_length_x86_64 +#define qemu_ram_is_shared qemu_ram_is_shared_x86_64 +#define qemu_ram_pagesize qemu_ram_pagesize_x86_64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_x86_64 +#define qemu_ram_alloc qemu_ram_alloc_x86_64 +#define qemu_ram_free qemu_ram_free_x86_64 +#define qemu_map_ram_ptr qemu_map_ram_ptr_x86_64 +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_x86_64 +#define qemu_ram_block_from_host qemu_ram_block_from_host_x86_64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_x86_64 +#define cpu_check_watchpoint cpu_check_watchpoint_x86_64 +#define iotlb_to_section iotlb_to_section_x86_64 +#define address_space_dispatch_new address_space_dispatch_new_x86_64 +#define address_space_dispatch_free address_space_dispatch_free_x86_64 +#define flatview_read_continue flatview_read_continue_x86_64 +#define address_space_read_full address_space_read_full_x86_64 +#define address_space_write address_space_write_x86_64 +#define address_space_rw address_space_rw_x86_64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_x86_64 +#define address_space_write_rom address_space_write_rom_x86_64 +#define cpu_flush_icache_range cpu_flush_icache_range_x86_64 +#define cpu_exec_init_all cpu_exec_init_all_x86_64 +#define address_space_access_valid address_space_access_valid_x86_64 +#define address_space_map address_space_map_x86_64 +#define address_space_unmap address_space_unmap_x86_64 +#define cpu_physical_memory_map cpu_physical_memory_map_x86_64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_x86_64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_x86_64 +#define qemu_target_page_size qemu_target_page_size_x86_64 +#define qemu_target_page_bits qemu_target_page_bits_x86_64 +#define qemu_target_page_bits_min qemu_target_page_bits_min_x86_64 +#define target_words_bigendian target_words_bigendian_x86_64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_x86_64 +#define ram_block_discard_range ram_block_discard_range_x86_64 +#define ramblock_is_pmem ramblock_is_pmem_x86_64 +#define page_size_init page_size_init_x86_64 +#define set_preferred_target_page_bits set_preferred_target_page_bits_x86_64 +#define finalize_target_page_bits finalize_target_page_bits_x86_64 +#define cpu_outb cpu_outb_x86_64 +#define cpu_outw cpu_outw_x86_64 +#define cpu_outl cpu_outl_x86_64 +#define cpu_inb cpu_inb_x86_64 +#define cpu_inw cpu_inw_x86_64 +#define cpu_inl cpu_inl_x86_64 #define memory_map memory_map_x86_64 +#define memory_map_io memory_map_io_x86_64 #define memory_map_ptr memory_map_ptr_x86_64 #define memory_unmap memory_unmap_x86_64 #define memory_free memory_free_x86_64 -#define free_code_gen_buffer free_code_gen_buffer_x86_64 -#define helper_raise_exception helper_raise_exception_x86_64 -#define tcg_enabled tcg_enabled_x86_64 -#define tcg_exec_init tcg_exec_init_x86_64 -#define memory_register_types memory_register_types_x86_64 -#define cpu_exec_init_all cpu_exec_init_all_x86_64 -#define vm_start vm_start_x86_64 -#define resume_all_vcpus resume_all_vcpus_x86_64 -#define a15_l2ctlr_read a15_l2ctlr_read_x86_64 -#define a64_translate_init a64_translate_init_x86_64 -#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_x86_64 -#define aa64_cacheop_access aa64_cacheop_access_x86_64 -#define aa64_daif_access aa64_daif_access_x86_64 -#define aa64_daif_write aa64_daif_write_x86_64 -#define aa64_dczid_read aa64_dczid_read_x86_64 -#define aa64_fpcr_read aa64_fpcr_read_x86_64 -#define aa64_fpcr_write aa64_fpcr_write_x86_64 -#define aa64_fpsr_read aa64_fpsr_read_x86_64 -#define aa64_fpsr_write aa64_fpsr_write_x86_64 -#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_x86_64 -#define aa64_zva_access aa64_zva_access_x86_64 -#define aarch64_banked_spsr_index aarch64_banked_spsr_index_x86_64 -#define aarch64_restore_sp aarch64_restore_sp_x86_64 -#define aarch64_save_sp aarch64_save_sp_x86_64 -#define accel_find accel_find_x86_64 -#define accel_init_machine accel_init_machine_x86_64 -#define accel_type accel_type_x86_64 -#define access_with_adjusted_size access_with_adjusted_size_x86_64 -#define add128 add128_x86_64 -#define add16_sat add16_sat_x86_64 -#define add16_usat add16_usat_x86_64 -#define add192 add192_x86_64 -#define add8_sat add8_sat_x86_64 -#define add8_usat add8_usat_x86_64 -#define add_cpreg_to_hashtable add_cpreg_to_hashtable_x86_64 -#define add_cpreg_to_list add_cpreg_to_list_x86_64 -#define addFloat128Sigs addFloat128Sigs_x86_64 -#define addFloat32Sigs addFloat32Sigs_x86_64 -#define addFloat64Sigs addFloat64Sigs_x86_64 -#define addFloatx80Sigs addFloatx80Sigs_x86_64 -#define add_qemu_ldst_label add_qemu_ldst_label_x86_64 -#define address_space_access_valid address_space_access_valid_x86_64 -#define address_space_destroy address_space_destroy_x86_64 -#define address_space_destroy_dispatch address_space_destroy_dispatch_x86_64 -#define address_space_get_flatview address_space_get_flatview_x86_64 -#define address_space_init address_space_init_x86_64 -#define address_space_init_dispatch address_space_init_dispatch_x86_64 -#define address_space_lookup_region address_space_lookup_region_x86_64 -#define address_space_map address_space_map_x86_64 -#define address_space_read address_space_read_x86_64 -#define address_space_rw address_space_rw_x86_64 -#define address_space_translate address_space_translate_x86_64 -#define address_space_translate_for_iotlb address_space_translate_for_iotlb_x86_64 -#define address_space_translate_internal address_space_translate_internal_x86_64 -#define address_space_unmap address_space_unmap_x86_64 -#define address_space_update_topology address_space_update_topology_x86_64 -#define address_space_update_topology_pass address_space_update_topology_pass_x86_64 -#define address_space_write address_space_write_x86_64 -#define addrrange_contains addrrange_contains_x86_64 -#define addrrange_end addrrange_end_x86_64 -#define addrrange_equal addrrange_equal_x86_64 -#define addrrange_intersection addrrange_intersection_x86_64 -#define addrrange_intersects addrrange_intersects_x86_64 -#define addrrange_make addrrange_make_x86_64 -#define adjust_endianness adjust_endianness_x86_64 -#define all_helpers all_helpers_x86_64 -#define alloc_code_gen_buffer alloc_code_gen_buffer_x86_64 -#define alloc_entry alloc_entry_x86_64 -#define always_true always_true_x86_64 -#define arm1026_initfn arm1026_initfn_x86_64 -#define arm1136_initfn arm1136_initfn_x86_64 -#define arm1136_r2_initfn arm1136_r2_initfn_x86_64 -#define arm1176_initfn arm1176_initfn_x86_64 -#define arm11mpcore_initfn arm11mpcore_initfn_x86_64 -#define arm926_initfn arm926_initfn_x86_64 -#define arm946_initfn arm946_initfn_x86_64 -#define arm_ccnt_enabled arm_ccnt_enabled_x86_64 -#define arm_cp_read_zero arm_cp_read_zero_x86_64 -#define arm_cp_reset_ignore arm_cp_reset_ignore_x86_64 -#define arm_cpu_do_interrupt arm_cpu_do_interrupt_x86_64 -#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_x86_64 -#define arm_cpu_finalizefn arm_cpu_finalizefn_x86_64 -#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_x86_64 -#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_x86_64 -#define arm_cpu_initfn arm_cpu_initfn_x86_64 -#define arm_cpu_list arm_cpu_list_x86_64 -#define cpu_loop_exit cpu_loop_exit_x86_64 -#define arm_cpu_post_init arm_cpu_post_init_x86_64 -#define arm_cpu_realizefn arm_cpu_realizefn_x86_64 -#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_x86_64 -#define arm_cpu_register_types arm_cpu_register_types_x86_64 -#define cpu_resume_from_signal cpu_resume_from_signal_x86_64 -#define arm_cpus arm_cpus_x86_64 -#define arm_cpu_set_pc arm_cpu_set_pc_x86_64 -#define arm_cp_write_ignore arm_cp_write_ignore_x86_64 -#define arm_current_el arm_current_el_x86_64 -#define arm_dc_feature arm_dc_feature_x86_64 -#define arm_debug_excp_handler arm_debug_excp_handler_x86_64 -#define arm_debug_target_el arm_debug_target_el_x86_64 -#define arm_el_is_aa64 arm_el_is_aa64_x86_64 -#define arm_env_get_cpu arm_env_get_cpu_x86_64 -#define arm_excp_target_el arm_excp_target_el_x86_64 -#define arm_excp_unmasked arm_excp_unmasked_x86_64 -#define arm_feature arm_feature_x86_64 -#define arm_generate_debug_exceptions arm_generate_debug_exceptions_x86_64 -#define gen_intermediate_code gen_intermediate_code_x86_64 -#define gen_intermediate_code_pc gen_intermediate_code_pc_x86_64 -#define arm_gen_test_cc arm_gen_test_cc_x86_64 -#define arm_gt_ptimer_cb arm_gt_ptimer_cb_x86_64 -#define arm_gt_vtimer_cb arm_gt_vtimer_cb_x86_64 -#define arm_handle_psci_call arm_handle_psci_call_x86_64 -#define arm_is_psci_call arm_is_psci_call_x86_64 -#define arm_is_secure arm_is_secure_x86_64 -#define arm_is_secure_below_el3 arm_is_secure_below_el3_x86_64 -#define arm_ldl_code arm_ldl_code_x86_64 -#define arm_lduw_code arm_lduw_code_x86_64 -#define arm_log_exception arm_log_exception_x86_64 -#define arm_reg_read arm_reg_read_x86_64 -#define arm_reg_reset arm_reg_reset_x86_64 -#define arm_reg_write arm_reg_write_x86_64 -#define restore_state_to_opc restore_state_to_opc_x86_64 -#define arm_rmode_to_sf arm_rmode_to_sf_x86_64 -#define arm_singlestep_active arm_singlestep_active_x86_64 -#define tlb_fill tlb_fill_x86_64 -#define tlb_flush tlb_flush_x86_64 -#define tlb_flush_page tlb_flush_page_x86_64 -#define tlb_set_page tlb_set_page_x86_64 -#define arm_translate_init arm_translate_init_x86_64 -#define arm_v7m_class_init arm_v7m_class_init_x86_64 -#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_x86_64 -#define ats_access ats_access_x86_64 -#define ats_write ats_write_x86_64 -#define bad_mode_switch bad_mode_switch_x86_64 -#define bank_number bank_number_x86_64 -#define bitmap_zero_extend bitmap_zero_extend_x86_64 -#define bp_wp_matches bp_wp_matches_x86_64 -#define breakpoint_invalidate breakpoint_invalidate_x86_64 -#define build_page_bitmap build_page_bitmap_x86_64 -#define bus_add_child bus_add_child_x86_64 -#define bus_class_init bus_class_init_x86_64 -#define bus_info bus_info_x86_64 -#define bus_unparent bus_unparent_x86_64 -#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_x86_64 -#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_x86_64 -#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_x86_64 -#define call_recip_estimate call_recip_estimate_x86_64 -#define can_merge can_merge_x86_64 -#define capacity_increase capacity_increase_x86_64 -#define ccsidr_read ccsidr_read_x86_64 -#define check_ap check_ap_x86_64 -#define check_breakpoints check_breakpoints_x86_64 -#define check_watchpoints check_watchpoints_x86_64 -#define cho cho_x86_64 -#define clear_bit clear_bit_x86_64 -#define clz32 clz32_x86_64 -#define clz64 clz64_x86_64 -#define cmp_flatrange_addr cmp_flatrange_addr_x86_64 -#define code_gen_alloc code_gen_alloc_x86_64 -#define commonNaNToFloat128 commonNaNToFloat128_x86_64 -#define commonNaNToFloat16 commonNaNToFloat16_x86_64 -#define commonNaNToFloat32 commonNaNToFloat32_x86_64 -#define commonNaNToFloat64 commonNaNToFloat64_x86_64 -#define commonNaNToFloatx80 commonNaNToFloatx80_x86_64 -#define compute_abs_deadline compute_abs_deadline_x86_64 -#define cond_name cond_name_x86_64 -#define configure_accelerator configure_accelerator_x86_64 -#define container_get container_get_x86_64 -#define container_info container_info_x86_64 -#define container_register_types container_register_types_x86_64 -#define contextidr_write contextidr_write_x86_64 -#define core_log_global_start core_log_global_start_x86_64 -#define core_log_global_stop core_log_global_stop_x86_64 -#define core_memory_listener core_memory_listener_x86_64 -#define cortexa15_cp_reginfo cortexa15_cp_reginfo_x86_64 -#define cortex_a15_initfn cortex_a15_initfn_x86_64 -#define cortexa8_cp_reginfo cortexa8_cp_reginfo_x86_64 -#define cortex_a8_initfn cortex_a8_initfn_x86_64 -#define cortexa9_cp_reginfo cortexa9_cp_reginfo_x86_64 -#define cortex_a9_initfn cortex_a9_initfn_x86_64 -#define cortex_m3_initfn cortex_m3_initfn_x86_64 -#define count_cpreg count_cpreg_x86_64 -#define countLeadingZeros32 countLeadingZeros32_x86_64 -#define countLeadingZeros64 countLeadingZeros64_x86_64 -#define cp_access_ok cp_access_ok_x86_64 -#define cpacr_write cpacr_write_x86_64 -#define cpreg_field_is_64bit cpreg_field_is_64bit_x86_64 -#define cp_reginfo cp_reginfo_x86_64 -#define cpreg_key_compare cpreg_key_compare_x86_64 -#define cpreg_make_keylist cpreg_make_keylist_x86_64 -#define cp_reg_reset cp_reg_reset_x86_64 -#define cpreg_to_kvm_id cpreg_to_kvm_id_x86_64 -#define cpsr_read cpsr_read_x86_64 -#define cpsr_write cpsr_write_x86_64 -#define cptype_valid cptype_valid_x86_64 -#define cpu_abort cpu_abort_x86_64 -#define cpu_arm_exec cpu_arm_exec_x86_64 -#define cpu_arm_gen_code cpu_arm_gen_code_x86_64 -#define cpu_arm_init cpu_arm_init_x86_64 -#define cpu_breakpoint_insert cpu_breakpoint_insert_x86_64 -#define cpu_breakpoint_remove cpu_breakpoint_remove_x86_64 -#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_x86_64 -#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_x86_64 -#define cpu_can_do_io cpu_can_do_io_x86_64 -#define cpu_can_run cpu_can_run_x86_64 -#define cpu_class_init cpu_class_init_x86_64 -#define cpu_common_class_by_name cpu_common_class_by_name_x86_64 -#define cpu_common_exec_interrupt cpu_common_exec_interrupt_x86_64 -#define cpu_common_get_arch_id cpu_common_get_arch_id_x86_64 -#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_x86_64 -#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_x86_64 -#define cpu_common_has_work cpu_common_has_work_x86_64 -#define cpu_common_initfn cpu_common_initfn_x86_64 -#define cpu_common_noop cpu_common_noop_x86_64 -#define cpu_common_parse_features cpu_common_parse_features_x86_64 -#define cpu_common_realizefn cpu_common_realizefn_x86_64 -#define cpu_common_reset cpu_common_reset_x86_64 -#define cpu_dump_statistics cpu_dump_statistics_x86_64 -#define cpu_exec_init cpu_exec_init_x86_64 -#define cpu_flush_icache_range cpu_flush_icache_range_x86_64 -#define cpu_gen_init cpu_gen_init_x86_64 -#define cpu_get_clock cpu_get_clock_x86_64 -#define cpu_get_real_ticks cpu_get_real_ticks_x86_64 -#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_x86_64 -#define cpu_handle_debug_exception cpu_handle_debug_exception_x86_64 -#define cpu_handle_guest_debug cpu_handle_guest_debug_x86_64 -#define cpu_inb cpu_inb_x86_64 -#define cpu_inl cpu_inl_x86_64 -#define cpu_interrupt cpu_interrupt_x86_64 -#define cpu_interrupt_handler cpu_interrupt_handler_x86_64 -#define cpu_inw cpu_inw_x86_64 -#define cpu_io_recompile cpu_io_recompile_x86_64 -#define cpu_is_stopped cpu_is_stopped_x86_64 -#define cpu_ldl_code cpu_ldl_code_x86_64 -#define cpu_ldub_code cpu_ldub_code_x86_64 -#define cpu_lduw_code cpu_lduw_code_x86_64 -#define cpu_memory_rw_debug cpu_memory_rw_debug_x86_64 -#define cpu_mmu_index cpu_mmu_index_x86_64 -#define cpu_outb cpu_outb_x86_64 -#define cpu_outl cpu_outl_x86_64 -#define cpu_outw cpu_outw_x86_64 -#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_x86_64 -#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_x86_64 -#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_x86_64 -#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_x86_64 -#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_x86_64 -#define cpu_physical_memory_is_io cpu_physical_memory_is_io_x86_64 -#define cpu_physical_memory_map cpu_physical_memory_map_x86_64 -#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_x86_64 -#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_x86_64 -#define cpu_physical_memory_rw cpu_physical_memory_rw_x86_64 -#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_x86_64 -#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_x86_64 -#define cpu_physical_memory_unmap cpu_physical_memory_unmap_x86_64 -#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_x86_64 -#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_x86_64 -#define cpu_register cpu_register_x86_64 -#define cpu_register_types cpu_register_types_x86_64 -#define cpu_restore_state cpu_restore_state_x86_64 -#define cpu_restore_state_from_tb cpu_restore_state_from_tb_x86_64 -#define cpu_single_step cpu_single_step_x86_64 -#define cpu_tb_exec cpu_tb_exec_x86_64 -#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_x86_64 -#define cpu_to_be64 cpu_to_be64_x86_64 -#define cpu_to_le32 cpu_to_le32_x86_64 -#define cpu_to_le64 cpu_to_le64_x86_64 -#define cpu_type_info cpu_type_info_x86_64 -#define cpu_unassigned_access cpu_unassigned_access_x86_64 -#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_x86_64 -#define cpu_watchpoint_insert cpu_watchpoint_insert_x86_64 -#define cpu_watchpoint_remove cpu_watchpoint_remove_x86_64 -#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_x86_64 -#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_x86_64 -#define crc32c_table crc32c_table_x86_64 -#define create_new_memory_mapping create_new_memory_mapping_x86_64 -#define csselr_write csselr_write_x86_64 -#define cto32 cto32_x86_64 -#define ctr_el0_access ctr_el0_access_x86_64 -#define ctz32 ctz32_x86_64 -#define ctz64 ctz64_x86_64 -#define dacr_write dacr_write_x86_64 -#define dbgbcr_write dbgbcr_write_x86_64 -#define dbgbvr_write dbgbvr_write_x86_64 -#define dbgwcr_write dbgwcr_write_x86_64 -#define dbgwvr_write dbgwvr_write_x86_64 -#define debug_cp_reginfo debug_cp_reginfo_x86_64 -#define debug_frame debug_frame_x86_64 -#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_x86_64 -#define define_arm_cp_regs define_arm_cp_regs_x86_64 -#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_x86_64 -#define define_debug_regs define_debug_regs_x86_64 -#define define_one_arm_cp_reg define_one_arm_cp_reg_x86_64 -#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_x86_64 -#define deposit32 deposit32_x86_64 -#define deposit64 deposit64_x86_64 -#define deregister_tm_clones deregister_tm_clones_x86_64 -#define device_class_base_init device_class_base_init_x86_64 -#define device_class_init device_class_init_x86_64 -#define device_finalize device_finalize_x86_64 -#define device_get_realized device_get_realized_x86_64 -#define device_initfn device_initfn_x86_64 -#define device_post_init device_post_init_x86_64 -#define device_reset device_reset_x86_64 -#define device_set_realized device_set_realized_x86_64 -#define device_type_info device_type_info_x86_64 -#define disas_arm_insn disas_arm_insn_x86_64 -#define disas_coproc_insn disas_coproc_insn_x86_64 -#define disas_dsp_insn disas_dsp_insn_x86_64 -#define disas_iwmmxt_insn disas_iwmmxt_insn_x86_64 -#define disas_neon_data_insn disas_neon_data_insn_x86_64 -#define disas_neon_ls_insn disas_neon_ls_insn_x86_64 -#define disas_thumb2_insn disas_thumb2_insn_x86_64 -#define disas_thumb_insn disas_thumb_insn_x86_64 -#define disas_vfp_insn disas_vfp_insn_x86_64 -#define disas_vfp_v8_insn disas_vfp_v8_insn_x86_64 -#define do_arm_semihosting do_arm_semihosting_x86_64 -#define do_clz16 do_clz16_x86_64 -#define do_clz8 do_clz8_x86_64 -#define do_constant_folding do_constant_folding_x86_64 -#define do_constant_folding_2 do_constant_folding_2_x86_64 -#define do_constant_folding_cond do_constant_folding_cond_x86_64 -#define do_constant_folding_cond2 do_constant_folding_cond2_x86_64 -#define do_constant_folding_cond_32 do_constant_folding_cond_32_x86_64 -#define do_constant_folding_cond_64 do_constant_folding_cond_64_x86_64 -#define do_constant_folding_cond_eq do_constant_folding_cond_eq_x86_64 -#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_x86_64 -#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_x86_64 -#define do_ssat do_ssat_x86_64 -#define do_usad do_usad_x86_64 -#define do_usat do_usat_x86_64 -#define do_v7m_exception_exit do_v7m_exception_exit_x86_64 -#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_x86_64 -#define dummy_func dummy_func_x86_64 -#define dummy_section dummy_section_x86_64 -#define _DYNAMIC _DYNAMIC_x86_64 -#define _edata _edata_x86_64 -#define _end _end_x86_64 -#define end_list end_list_x86_64 -#define eq128 eq128_x86_64 -#define ErrorClass_lookup ErrorClass_lookup_x86_64 -#define error_copy error_copy_x86_64 -#define error_exit error_exit_x86_64 -#define error_get_class error_get_class_x86_64 -#define error_get_pretty error_get_pretty_x86_64 -#define error_setg_file_open error_setg_file_open_x86_64 -#define estimateDiv128To64 estimateDiv128To64_x86_64 -#define estimateSqrt32 estimateSqrt32_x86_64 -#define excnames excnames_x86_64 -#define excp_is_internal excp_is_internal_x86_64 -#define extended_addresses_enabled extended_addresses_enabled_x86_64 -#define extended_mpu_ap_bits extended_mpu_ap_bits_x86_64 -#define extract32 extract32_x86_64 -#define extract64 extract64_x86_64 -#define extractFloat128Exp extractFloat128Exp_x86_64 -#define extractFloat128Frac0 extractFloat128Frac0_x86_64 -#define extractFloat128Frac1 extractFloat128Frac1_x86_64 -#define extractFloat128Sign extractFloat128Sign_x86_64 -#define extractFloat16Exp extractFloat16Exp_x86_64 -#define extractFloat16Frac extractFloat16Frac_x86_64 -#define extractFloat16Sign extractFloat16Sign_x86_64 -#define extractFloat32Exp extractFloat32Exp_x86_64 -#define extractFloat32Frac extractFloat32Frac_x86_64 -#define extractFloat32Sign extractFloat32Sign_x86_64 -#define extractFloat64Exp extractFloat64Exp_x86_64 -#define extractFloat64Frac extractFloat64Frac_x86_64 -#define extractFloat64Sign extractFloat64Sign_x86_64 -#define extractFloatx80Exp extractFloatx80Exp_x86_64 -#define extractFloatx80Frac extractFloatx80Frac_x86_64 -#define extractFloatx80Sign extractFloatx80Sign_x86_64 -#define fcse_write fcse_write_x86_64 -#define find_better_copy find_better_copy_x86_64 -#define find_default_machine find_default_machine_x86_64 -#define find_desc_by_name find_desc_by_name_x86_64 -#define find_first_bit find_first_bit_x86_64 -#define find_paging_enabled_cpu find_paging_enabled_cpu_x86_64 -#define find_ram_block find_ram_block_x86_64 -#define find_ram_offset find_ram_offset_x86_64 -#define find_string find_string_x86_64 -#define find_type find_type_x86_64 -#define _fini _fini_x86_64 -#define flatrange_equal flatrange_equal_x86_64 -#define flatview_destroy flatview_destroy_x86_64 -#define flatview_init flatview_init_x86_64 -#define flatview_insert flatview_insert_x86_64 -#define flatview_lookup flatview_lookup_x86_64 -#define flatview_ref flatview_ref_x86_64 -#define flatview_simplify flatview_simplify_x86_64 #define flatview_unref flatview_unref_x86_64 -#define float128_add float128_add_x86_64 -#define float128_compare float128_compare_x86_64 -#define float128_compare_internal float128_compare_internal_x86_64 -#define float128_compare_quiet float128_compare_quiet_x86_64 -#define float128_default_nan float128_default_nan_x86_64 -#define float128_div float128_div_x86_64 -#define float128_eq float128_eq_x86_64 -#define float128_eq_quiet float128_eq_quiet_x86_64 -#define float128_is_quiet_nan float128_is_quiet_nan_x86_64 -#define float128_is_signaling_nan float128_is_signaling_nan_x86_64 -#define float128_le float128_le_x86_64 -#define float128_le_quiet float128_le_quiet_x86_64 -#define float128_lt float128_lt_x86_64 -#define float128_lt_quiet float128_lt_quiet_x86_64 -#define float128_maybe_silence_nan float128_maybe_silence_nan_x86_64 -#define float128_mul float128_mul_x86_64 -#define float128_rem float128_rem_x86_64 -#define float128_round_to_int float128_round_to_int_x86_64 -#define float128_scalbn float128_scalbn_x86_64 -#define float128_sqrt float128_sqrt_x86_64 -#define float128_sub float128_sub_x86_64 -#define float128ToCommonNaN float128ToCommonNaN_x86_64 -#define float128_to_float32 float128_to_float32_x86_64 -#define float128_to_float64 float128_to_float64_x86_64 -#define float128_to_floatx80 float128_to_floatx80_x86_64 -#define float128_to_int32 float128_to_int32_x86_64 -#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_x86_64 -#define float128_to_int64 float128_to_int64_x86_64 -#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_x86_64 -#define float128_unordered float128_unordered_x86_64 -#define float128_unordered_quiet float128_unordered_quiet_x86_64 -#define float16_default_nan float16_default_nan_x86_64 +#define address_space_get_flatview address_space_get_flatview_x86_64 +#define memory_region_transaction_begin memory_region_transaction_begin_x86_64 +#define memory_region_transaction_commit memory_region_transaction_commit_x86_64 +#define memory_region_init memory_region_init_x86_64 +#define memory_region_access_valid memory_region_access_valid_x86_64 +#define memory_region_dispatch_read memory_region_dispatch_read_x86_64 +#define memory_region_dispatch_write memory_region_dispatch_write_x86_64 +#define memory_region_init_io memory_region_init_io_x86_64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_x86_64 +#define memory_region_size memory_region_size_x86_64 +#define memory_region_set_readonly memory_region_set_readonly_x86_64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_x86_64 +#define memory_region_from_host memory_region_from_host_x86_64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_x86_64 +#define memory_region_add_subregion memory_region_add_subregion_x86_64 +#define memory_region_del_subregion memory_region_del_subregion_x86_64 +#define memory_region_find memory_region_find_x86_64 +#define memory_listener_register memory_listener_register_x86_64 +#define memory_listener_unregister memory_listener_unregister_x86_64 +#define address_space_remove_listeners address_space_remove_listeners_x86_64 +#define address_space_init address_space_init_x86_64 +#define address_space_destroy address_space_destroy_x86_64 +#define memory_region_init_ram memory_region_init_ram_x86_64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_x86_64 +#define exec_inline_op exec_inline_op_x86_64 +#define floatx80_default_nan floatx80_default_nan_x86_64 +#define float_raise float_raise_x86_64 #define float16_is_quiet_nan float16_is_quiet_nan_x86_64 #define float16_is_signaling_nan float16_is_signaling_nan_x86_64 -#define float16_maybe_silence_nan float16_maybe_silence_nan_x86_64 -#define float16ToCommonNaN float16ToCommonNaN_x86_64 -#define float16_to_float32 float16_to_float32_x86_64 -#define float16_to_float64 float16_to_float64_x86_64 -#define float32_abs float32_abs_x86_64 -#define float32_add float32_add_x86_64 -#define float32_chs float32_chs_x86_64 -#define float32_compare float32_compare_x86_64 -#define float32_compare_internal float32_compare_internal_x86_64 -#define float32_compare_quiet float32_compare_quiet_x86_64 -#define float32_default_nan float32_default_nan_x86_64 -#define float32_div float32_div_x86_64 -#define float32_eq float32_eq_x86_64 -#define float32_eq_quiet float32_eq_quiet_x86_64 -#define float32_exp2 float32_exp2_x86_64 -#define float32_exp2_coefficients float32_exp2_coefficients_x86_64 -#define float32_is_any_nan float32_is_any_nan_x86_64 -#define float32_is_infinity float32_is_infinity_x86_64 -#define float32_is_neg float32_is_neg_x86_64 #define float32_is_quiet_nan float32_is_quiet_nan_x86_64 #define float32_is_signaling_nan float32_is_signaling_nan_x86_64 -#define float32_is_zero float32_is_zero_x86_64 -#define float32_is_zero_or_denormal float32_is_zero_or_denormal_x86_64 -#define float32_le float32_le_x86_64 -#define float32_le_quiet float32_le_quiet_x86_64 -#define float32_log2 float32_log2_x86_64 -#define float32_lt float32_lt_x86_64 -#define float32_lt_quiet float32_lt_quiet_x86_64 +#define float64_is_quiet_nan float64_is_quiet_nan_x86_64 +#define float64_is_signaling_nan float64_is_signaling_nan_x86_64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_x86_64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_x86_64 +#define floatx80_silence_nan floatx80_silence_nan_x86_64 +#define propagateFloatx80NaN propagateFloatx80NaN_x86_64 +#define float128_is_quiet_nan float128_is_quiet_nan_x86_64 +#define float128_is_signaling_nan float128_is_signaling_nan_x86_64 +#define float128_silence_nan float128_silence_nan_x86_64 +#define float16_add float16_add_x86_64 +#define float16_sub float16_sub_x86_64 +#define float32_add float32_add_x86_64 +#define float32_sub float32_sub_x86_64 +#define float64_add float64_add_x86_64 +#define float64_sub float64_sub_x86_64 +#define float16_mul float16_mul_x86_64 +#define float32_mul float32_mul_x86_64 +#define float64_mul float64_mul_x86_64 +#define float16_muladd float16_muladd_x86_64 +#define float32_muladd float32_muladd_x86_64 +#define float64_muladd float64_muladd_x86_64 +#define float16_div float16_div_x86_64 +#define float32_div float32_div_x86_64 +#define float64_div float64_div_x86_64 +#define float16_to_float32 float16_to_float32_x86_64 +#define float16_to_float64 float16_to_float64_x86_64 +#define float32_to_float16 float32_to_float16_x86_64 +#define float32_to_float64 float32_to_float64_x86_64 +#define float64_to_float16 float64_to_float16_x86_64 +#define float64_to_float32 float64_to_float32_x86_64 +#define float16_round_to_int float16_round_to_int_x86_64 +#define float32_round_to_int float32_round_to_int_x86_64 +#define float64_round_to_int float64_round_to_int_x86_64 +#define float16_to_int16_scalbn float16_to_int16_scalbn_x86_64 +#define float16_to_int32_scalbn float16_to_int32_scalbn_x86_64 +#define float16_to_int64_scalbn float16_to_int64_scalbn_x86_64 +#define float32_to_int16_scalbn float32_to_int16_scalbn_x86_64 +#define float32_to_int32_scalbn float32_to_int32_scalbn_x86_64 +#define float32_to_int64_scalbn float32_to_int64_scalbn_x86_64 +#define float64_to_int16_scalbn float64_to_int16_scalbn_x86_64 +#define float64_to_int32_scalbn float64_to_int32_scalbn_x86_64 +#define float64_to_int64_scalbn float64_to_int64_scalbn_x86_64 +#define float16_to_int16 float16_to_int16_x86_64 +#define float16_to_int32 float16_to_int32_x86_64 +#define float16_to_int64 float16_to_int64_x86_64 +#define float32_to_int16 float32_to_int16_x86_64 +#define float32_to_int32 float32_to_int32_x86_64 +#define float32_to_int64 float32_to_int64_x86_64 +#define float64_to_int16 float64_to_int16_x86_64 +#define float64_to_int32 float64_to_int32_x86_64 +#define float64_to_int64 float64_to_int64_x86_64 +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_x86_64 +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_x86_64 +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_x86_64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_x86_64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_x86_64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_x86_64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_x86_64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_x86_64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_x86_64 +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_x86_64 +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_x86_64 +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_x86_64 +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_x86_64 +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_x86_64 +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_x86_64 +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_x86_64 +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_x86_64 +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_x86_64 +#define float16_to_uint16 float16_to_uint16_x86_64 +#define float16_to_uint32 float16_to_uint32_x86_64 +#define float16_to_uint64 float16_to_uint64_x86_64 +#define float32_to_uint16 float32_to_uint16_x86_64 +#define float32_to_uint32 float32_to_uint32_x86_64 +#define float32_to_uint64 float32_to_uint64_x86_64 +#define float64_to_uint16 float64_to_uint16_x86_64 +#define float64_to_uint32 float64_to_uint32_x86_64 +#define float64_to_uint64 float64_to_uint64_x86_64 +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_x86_64 +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_x86_64 +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_x86_64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_x86_64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_x86_64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_x86_64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_x86_64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_x86_64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_x86_64 +#define int64_to_float16_scalbn int64_to_float16_scalbn_x86_64 +#define int32_to_float16_scalbn int32_to_float16_scalbn_x86_64 +#define int16_to_float16_scalbn int16_to_float16_scalbn_x86_64 +#define int64_to_float16 int64_to_float16_x86_64 +#define int32_to_float16 int32_to_float16_x86_64 +#define int16_to_float16 int16_to_float16_x86_64 +#define int64_to_float32_scalbn int64_to_float32_scalbn_x86_64 +#define int32_to_float32_scalbn int32_to_float32_scalbn_x86_64 +#define int16_to_float32_scalbn int16_to_float32_scalbn_x86_64 +#define int64_to_float32 int64_to_float32_x86_64 +#define int32_to_float32 int32_to_float32_x86_64 +#define int16_to_float32 int16_to_float32_x86_64 +#define int64_to_float64_scalbn int64_to_float64_scalbn_x86_64 +#define int32_to_float64_scalbn int32_to_float64_scalbn_x86_64 +#define int16_to_float64_scalbn int16_to_float64_scalbn_x86_64 +#define int64_to_float64 int64_to_float64_x86_64 +#define int32_to_float64 int32_to_float64_x86_64 +#define int16_to_float64 int16_to_float64_x86_64 +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_x86_64 +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_x86_64 +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_x86_64 +#define uint64_to_float16 uint64_to_float16_x86_64 +#define uint32_to_float16 uint32_to_float16_x86_64 +#define uint16_to_float16 uint16_to_float16_x86_64 +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_x86_64 +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_x86_64 +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_x86_64 +#define uint64_to_float32 uint64_to_float32_x86_64 +#define uint32_to_float32 uint32_to_float32_x86_64 +#define uint16_to_float32 uint16_to_float32_x86_64 +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_x86_64 +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_x86_64 +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_x86_64 +#define uint64_to_float64 uint64_to_float64_x86_64 +#define uint32_to_float64 uint32_to_float64_x86_64 +#define uint16_to_float64 uint16_to_float64_x86_64 +#define float16_min float16_min_x86_64 +#define float16_minnum float16_minnum_x86_64 +#define float16_minnummag float16_minnummag_x86_64 +#define float16_max float16_max_x86_64 +#define float16_maxnum float16_maxnum_x86_64 +#define float16_maxnummag float16_maxnummag_x86_64 +#define float32_min float32_min_x86_64 +#define float32_minnum float32_minnum_x86_64 +#define float32_minnummag float32_minnummag_x86_64 #define float32_max float32_max_x86_64 #define float32_maxnum float32_maxnum_x86_64 #define float32_maxnummag float32_maxnummag_x86_64 -#define float32_maybe_silence_nan float32_maybe_silence_nan_x86_64 -#define float32_min float32_min_x86_64 -#define float32_minmax float32_minmax_x86_64 -#define float32_minnum float32_minnum_x86_64 -#define float32_minnummag float32_minnummag_x86_64 -#define float32_mul float32_mul_x86_64 -#define float32_muladd float32_muladd_x86_64 -#define float32_rem float32_rem_x86_64 -#define float32_round_to_int float32_round_to_int_x86_64 -#define float32_scalbn float32_scalbn_x86_64 -#define float32_set_sign float32_set_sign_x86_64 -#define float32_sqrt float32_sqrt_x86_64 -#define float32_squash_input_denormal float32_squash_input_denormal_x86_64 -#define float32_sub float32_sub_x86_64 -#define float32ToCommonNaN float32ToCommonNaN_x86_64 -#define float32_to_float128 float32_to_float128_x86_64 -#define float32_to_float16 float32_to_float16_x86_64 -#define float32_to_float64 float32_to_float64_x86_64 -#define float32_to_floatx80 float32_to_floatx80_x86_64 -#define float32_to_int16 float32_to_int16_x86_64 -#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_x86_64 -#define float32_to_int32 float32_to_int32_x86_64 -#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_x86_64 -#define float32_to_int64 float32_to_int64_x86_64 -#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_x86_64 -#define float32_to_uint16 float32_to_uint16_x86_64 -#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_x86_64 -#define float32_to_uint32 float32_to_uint32_x86_64 -#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_x86_64 -#define float32_to_uint64 float32_to_uint64_x86_64 -#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_x86_64 -#define float32_unordered float32_unordered_x86_64 -#define float32_unordered_quiet float32_unordered_quiet_x86_64 -#define float64_abs float64_abs_x86_64 -#define float64_add float64_add_x86_64 -#define float64_chs float64_chs_x86_64 -#define float64_compare float64_compare_x86_64 -#define float64_compare_internal float64_compare_internal_x86_64 -#define float64_compare_quiet float64_compare_quiet_x86_64 -#define float64_default_nan float64_default_nan_x86_64 -#define float64_div float64_div_x86_64 -#define float64_eq float64_eq_x86_64 -#define float64_eq_quiet float64_eq_quiet_x86_64 -#define float64_is_any_nan float64_is_any_nan_x86_64 -#define float64_is_infinity float64_is_infinity_x86_64 -#define float64_is_neg float64_is_neg_x86_64 -#define float64_is_quiet_nan float64_is_quiet_nan_x86_64 -#define float64_is_signaling_nan float64_is_signaling_nan_x86_64 -#define float64_is_zero float64_is_zero_x86_64 -#define float64_le float64_le_x86_64 -#define float64_le_quiet float64_le_quiet_x86_64 -#define float64_log2 float64_log2_x86_64 -#define float64_lt float64_lt_x86_64 -#define float64_lt_quiet float64_lt_quiet_x86_64 +#define float64_min float64_min_x86_64 +#define float64_minnum float64_minnum_x86_64 +#define float64_minnummag float64_minnummag_x86_64 #define float64_max float64_max_x86_64 #define float64_maxnum float64_maxnum_x86_64 #define float64_maxnummag float64_maxnummag_x86_64 -#define float64_maybe_silence_nan float64_maybe_silence_nan_x86_64 -#define float64_min float64_min_x86_64 -#define float64_minmax float64_minmax_x86_64 -#define float64_minnum float64_minnum_x86_64 -#define float64_minnummag float64_minnummag_x86_64 -#define float64_mul float64_mul_x86_64 -#define float64_muladd float64_muladd_x86_64 -#define float64_rem float64_rem_x86_64 -#define float64_round_to_int float64_round_to_int_x86_64 +#define float16_compare float16_compare_x86_64 +#define float16_compare_quiet float16_compare_quiet_x86_64 +#define float32_compare float32_compare_x86_64 +#define float32_compare_quiet float32_compare_quiet_x86_64 +#define float64_compare float64_compare_x86_64 +#define float64_compare_quiet float64_compare_quiet_x86_64 +#define float16_scalbn float16_scalbn_x86_64 +#define float32_scalbn float32_scalbn_x86_64 #define float64_scalbn float64_scalbn_x86_64 -#define float64_set_sign float64_set_sign_x86_64 +#define float16_sqrt float16_sqrt_x86_64 +#define float32_sqrt float32_sqrt_x86_64 #define float64_sqrt float64_sqrt_x86_64 +#define float16_default_nan float16_default_nan_x86_64 +#define float32_default_nan float32_default_nan_x86_64 +#define float64_default_nan float64_default_nan_x86_64 +#define float128_default_nan float128_default_nan_x86_64 +#define float16_silence_nan float16_silence_nan_x86_64 +#define float32_silence_nan float32_silence_nan_x86_64 +#define float64_silence_nan float64_silence_nan_x86_64 +#define float16_squash_input_denormal float16_squash_input_denormal_x86_64 +#define float32_squash_input_denormal float32_squash_input_denormal_x86_64 #define float64_squash_input_denormal float64_squash_input_denormal_x86_64 -#define float64_sub float64_sub_x86_64 -#define float64ToCommonNaN float64ToCommonNaN_x86_64 -#define float64_to_float128 float64_to_float128_x86_64 -#define float64_to_float16 float64_to_float16_x86_64 -#define float64_to_float32 float64_to_float32_x86_64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_x86_64 +#define roundAndPackFloatx80 roundAndPackFloatx80_x86_64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_x86_64 +#define int32_to_floatx80 int32_to_floatx80_x86_64 +#define int32_to_float128 int32_to_float128_x86_64 +#define int64_to_floatx80 int64_to_floatx80_x86_64 +#define int64_to_float128 int64_to_float128_x86_64 +#define uint64_to_float128 uint64_to_float128_x86_64 +#define float32_to_floatx80 float32_to_floatx80_x86_64 +#define float32_to_float128 float32_to_float128_x86_64 +#define float32_rem float32_rem_x86_64 +#define float32_exp2 float32_exp2_x86_64 +#define float32_log2 float32_log2_x86_64 +#define float32_eq float32_eq_x86_64 +#define float32_le float32_le_x86_64 +#define float32_lt float32_lt_x86_64 +#define float32_unordered float32_unordered_x86_64 +#define float32_eq_quiet float32_eq_quiet_x86_64 +#define float32_le_quiet float32_le_quiet_x86_64 +#define float32_lt_quiet float32_lt_quiet_x86_64 +#define float32_unordered_quiet float32_unordered_quiet_x86_64 #define float64_to_floatx80 float64_to_floatx80_x86_64 -#define float64_to_int16 float64_to_int16_x86_64 -#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_x86_64 -#define float64_to_int32 float64_to_int32_x86_64 -#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_x86_64 -#define float64_to_int64 float64_to_int64_x86_64 -#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_x86_64 -#define float64_to_uint16 float64_to_uint16_x86_64 -#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_x86_64 -#define float64_to_uint32 float64_to_uint32_x86_64 -#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_x86_64 -#define float64_to_uint64 float64_to_uint64_x86_64 -#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_x86_64 -#define float64_trunc_to_int float64_trunc_to_int_x86_64 +#define float64_to_float128 float64_to_float128_x86_64 +#define float64_rem float64_rem_x86_64 +#define float64_log2 float64_log2_x86_64 +#define float64_eq float64_eq_x86_64 +#define float64_le float64_le_x86_64 +#define float64_lt float64_lt_x86_64 #define float64_unordered float64_unordered_x86_64 +#define float64_eq_quiet float64_eq_quiet_x86_64 +#define float64_le_quiet float64_le_quiet_x86_64 +#define float64_lt_quiet float64_lt_quiet_x86_64 #define float64_unordered_quiet float64_unordered_quiet_x86_64 -#define float_raise float_raise_x86_64 -#define floatx80_add floatx80_add_x86_64 -#define floatx80_compare floatx80_compare_x86_64 -#define floatx80_compare_internal floatx80_compare_internal_x86_64 -#define floatx80_compare_quiet floatx80_compare_quiet_x86_64 -#define floatx80_default_nan floatx80_default_nan_x86_64 -#define floatx80_div floatx80_div_x86_64 -#define floatx80_eq floatx80_eq_x86_64 -#define floatx80_eq_quiet floatx80_eq_quiet_x86_64 -#define floatx80_is_quiet_nan floatx80_is_quiet_nan_x86_64 -#define floatx80_is_signaling_nan floatx80_is_signaling_nan_x86_64 -#define floatx80_le floatx80_le_x86_64 -#define floatx80_le_quiet floatx80_le_quiet_x86_64 -#define floatx80_lt floatx80_lt_x86_64 -#define floatx80_lt_quiet floatx80_lt_quiet_x86_64 -#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_x86_64 -#define floatx80_mul floatx80_mul_x86_64 -#define floatx80_rem floatx80_rem_x86_64 -#define floatx80_round_to_int floatx80_round_to_int_x86_64 -#define floatx80_scalbn floatx80_scalbn_x86_64 -#define floatx80_sqrt floatx80_sqrt_x86_64 -#define floatx80_sub floatx80_sub_x86_64 -#define floatx80ToCommonNaN floatx80ToCommonNaN_x86_64 -#define floatx80_to_float128 floatx80_to_float128_x86_64 -#define floatx80_to_float32 floatx80_to_float32_x86_64 -#define floatx80_to_float64 floatx80_to_float64_x86_64 #define floatx80_to_int32 floatx80_to_int32_x86_64 #define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_x86_64 #define floatx80_to_int64 floatx80_to_int64_x86_64 #define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_x86_64 +#define floatx80_to_float32 floatx80_to_float32_x86_64 +#define floatx80_to_float64 floatx80_to_float64_x86_64 +#define floatx80_to_float128 floatx80_to_float128_x86_64 +#define floatx80_round floatx80_round_x86_64 +#define floatx80_round_to_int floatx80_round_to_int_x86_64 +#define floatx80_add floatx80_add_x86_64 +#define floatx80_sub floatx80_sub_x86_64 +#define floatx80_mul floatx80_mul_x86_64 +#define floatx80_div floatx80_div_x86_64 +#define floatx80_rem floatx80_rem_x86_64 +#define floatx80_sqrt floatx80_sqrt_x86_64 +#define floatx80_eq floatx80_eq_x86_64 +#define floatx80_le floatx80_le_x86_64 +#define floatx80_lt floatx80_lt_x86_64 #define floatx80_unordered floatx80_unordered_x86_64 +#define floatx80_eq_quiet floatx80_eq_quiet_x86_64 +#define floatx80_le_quiet floatx80_le_quiet_x86_64 +#define floatx80_lt_quiet floatx80_lt_quiet_x86_64 #define floatx80_unordered_quiet floatx80_unordered_quiet_x86_64 -#define flush_icache_range flush_icache_range_x86_64 -#define format_string format_string_x86_64 -#define fp_decode_rm fp_decode_rm_x86_64 -#define frame_dummy frame_dummy_x86_64 -#define free_range free_range_x86_64 -#define fstat64 fstat64_x86_64 -#define futex_wait futex_wait_x86_64 -#define futex_wake futex_wake_x86_64 -#define gen_aa32_ld16s gen_aa32_ld16s_x86_64 -#define gen_aa32_ld16u gen_aa32_ld16u_x86_64 -#define gen_aa32_ld32u gen_aa32_ld32u_x86_64 -#define gen_aa32_ld64 gen_aa32_ld64_x86_64 -#define gen_aa32_ld8s gen_aa32_ld8s_x86_64 -#define gen_aa32_ld8u gen_aa32_ld8u_x86_64 -#define gen_aa32_st16 gen_aa32_st16_x86_64 -#define gen_aa32_st32 gen_aa32_st32_x86_64 -#define gen_aa32_st64 gen_aa32_st64_x86_64 -#define gen_aa32_st8 gen_aa32_st8_x86_64 -#define gen_adc gen_adc_x86_64 -#define gen_adc_CC gen_adc_CC_x86_64 -#define gen_add16 gen_add16_x86_64 -#define gen_add_carry gen_add_carry_x86_64 -#define gen_add_CC gen_add_CC_x86_64 -#define gen_add_datah_offset gen_add_datah_offset_x86_64 -#define gen_add_data_offset gen_add_data_offset_x86_64 -#define gen_addq gen_addq_x86_64 -#define gen_addq_lo gen_addq_lo_x86_64 -#define gen_addq_msw gen_addq_msw_x86_64 -#define gen_arm_parallel_addsub gen_arm_parallel_addsub_x86_64 -#define gen_arm_shift_im gen_arm_shift_im_x86_64 -#define gen_arm_shift_reg gen_arm_shift_reg_x86_64 -#define gen_bx gen_bx_x86_64 -#define gen_bx_im gen_bx_im_x86_64 -#define gen_clrex gen_clrex_x86_64 -#define generate_memory_topology generate_memory_topology_x86_64 -#define generic_timer_cp_reginfo generic_timer_cp_reginfo_x86_64 -#define gen_exception gen_exception_x86_64 -#define gen_exception_insn gen_exception_insn_x86_64 -#define gen_exception_internal gen_exception_internal_x86_64 -#define gen_exception_internal_insn gen_exception_internal_insn_x86_64 -#define gen_exception_return gen_exception_return_x86_64 -#define gen_goto_tb gen_goto_tb_x86_64 -#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_x86_64 -#define gen_helper_add_saturate gen_helper_add_saturate_x86_64 -#define gen_helper_add_setq gen_helper_add_setq_x86_64 -#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_x86_64 -#define gen_helper_clz32 gen_helper_clz32_x86_64 -#define gen_helper_clz64 gen_helper_clz64_x86_64 -#define gen_helper_clz_arm gen_helper_clz_arm_x86_64 -#define gen_helper_cpsr_read gen_helper_cpsr_read_x86_64 -#define gen_helper_cpsr_write gen_helper_cpsr_write_x86_64 -#define gen_helper_crc32_arm gen_helper_crc32_arm_x86_64 -#define gen_helper_crc32c gen_helper_crc32c_x86_64 -#define gen_helper_crypto_aese gen_helper_crypto_aese_x86_64 -#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_x86_64 -#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_x86_64 -#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_x86_64 -#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_x86_64 -#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_x86_64 -#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_x86_64 -#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_x86_64 -#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_x86_64 -#define gen_helper_double_saturate gen_helper_double_saturate_x86_64 -#define gen_helper_exception_internal gen_helper_exception_internal_x86_64 -#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_x86_64 -#define gen_helper_get_cp_reg gen_helper_get_cp_reg_x86_64 -#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_x86_64 -#define gen_helper_get_r13_banked gen_helper_get_r13_banked_x86_64 -#define gen_helper_get_user_reg gen_helper_get_user_reg_x86_64 -#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_x86_64 -#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_x86_64 -#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_x86_64 -#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_x86_64 -#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_x86_64 -#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_x86_64 -#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_x86_64 -#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_x86_64 -#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_x86_64 -#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_x86_64 -#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_x86_64 -#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_x86_64 -#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_x86_64 -#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_x86_64 -#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_x86_64 -#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_x86_64 -#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_x86_64 -#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_x86_64 -#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_x86_64 -#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_x86_64 -#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_x86_64 -#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_x86_64 -#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_x86_64 -#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_x86_64 -#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_x86_64 -#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_x86_64 -#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_x86_64 -#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_x86_64 -#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_x86_64 -#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_x86_64 -#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_x86_64 -#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_x86_64 -#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_x86_64 -#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_x86_64 -#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_x86_64 -#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_x86_64 -#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_x86_64 -#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_x86_64 -#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_x86_64 -#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_x86_64 -#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_x86_64 -#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_x86_64 -#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_x86_64 -#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_x86_64 -#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_x86_64 -#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_x86_64 -#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_x86_64 -#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_x86_64 -#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_x86_64 -#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_x86_64 -#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_x86_64 -#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_x86_64 -#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_x86_64 -#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_x86_64 -#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_x86_64 -#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_x86_64 -#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_x86_64 -#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_x86_64 -#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_x86_64 -#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_x86_64 -#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_x86_64 -#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_x86_64 -#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_x86_64 -#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_x86_64 -#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_x86_64 -#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_x86_64 -#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_x86_64 -#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_x86_64 -#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_x86_64 -#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_x86_64 -#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_x86_64 -#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_x86_64 -#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_x86_64 -#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_x86_64 -#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_x86_64 -#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_x86_64 -#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_x86_64 -#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_x86_64 -#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_x86_64 -#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_x86_64 -#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_x86_64 -#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_x86_64 -#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_x86_64 -#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_x86_64 -#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_x86_64 -#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_x86_64 -#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_x86_64 -#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_x86_64 -#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_x86_64 -#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_x86_64 -#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_x86_64 -#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_x86_64 -#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_x86_64 -#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_x86_64 -#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_x86_64 -#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_x86_64 -#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_x86_64 -#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_x86_64 -#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_x86_64 -#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_x86_64 -#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_x86_64 -#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_x86_64 -#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_x86_64 -#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_x86_64 -#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_x86_64 -#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_x86_64 -#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_x86_64 -#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_x86_64 -#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_x86_64 -#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_x86_64 -#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_x86_64 -#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_x86_64 -#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_x86_64 -#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_x86_64 -#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_x86_64 -#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_x86_64 -#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_x86_64 -#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_x86_64 -#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_x86_64 -#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_x86_64 -#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_x86_64 -#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_x86_64 -#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_x86_64 -#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_x86_64 -#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_x86_64 -#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_x86_64 -#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_x86_64 -#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_x86_64 -#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_x86_64 -#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_x86_64 -#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_x86_64 -#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_x86_64 -#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_x86_64 -#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_x86_64 -#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_x86_64 -#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_x86_64 -#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_x86_64 -#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_x86_64 -#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_x86_64 -#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_x86_64 -#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_x86_64 -#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_x86_64 -#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_x86_64 -#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_x86_64 -#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_x86_64 -#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_x86_64 -#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_x86_64 -#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_x86_64 -#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_x86_64 -#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_x86_64 -#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_x86_64 -#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_x86_64 -#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_x86_64 -#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_x86_64 -#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_x86_64 -#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_x86_64 -#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_x86_64 -#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_x86_64 -#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_x86_64 -#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_x86_64 -#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_x86_64 -#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_x86_64 -#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_x86_64 -#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_x86_64 -#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_x86_64 -#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_x86_64 -#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_x86_64 -#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_x86_64 -#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_x86_64 -#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_x86_64 -#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_x86_64 -#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_x86_64 -#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_x86_64 -#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_x86_64 -#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_x86_64 -#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_x86_64 -#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_x86_64 -#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_x86_64 -#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_x86_64 -#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_x86_64 -#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_x86_64 -#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_x86_64 -#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_x86_64 -#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_x86_64 -#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_x86_64 -#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_x86_64 -#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_x86_64 -#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_x86_64 -#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_x86_64 -#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_x86_64 -#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_x86_64 -#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_x86_64 -#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_x86_64 -#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_x86_64 -#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_x86_64 -#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_x86_64 -#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_x86_64 -#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_x86_64 -#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_x86_64 -#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_x86_64 -#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_x86_64 -#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_x86_64 -#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_x86_64 -#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_x86_64 -#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_x86_64 -#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_x86_64 -#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_x86_64 -#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_x86_64 -#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_x86_64 -#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_x86_64 -#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_x86_64 -#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_x86_64 -#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_x86_64 -#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_x86_64 -#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_x86_64 -#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_x86_64 -#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_x86_64 -#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_x86_64 -#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_x86_64 -#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_x86_64 -#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_x86_64 -#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_x86_64 -#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_x86_64 -#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_x86_64 -#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_x86_64 -#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_x86_64 -#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_x86_64 -#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_x86_64 -#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_x86_64 -#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_x86_64 -#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_x86_64 -#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_x86_64 -#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_x86_64 -#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_x86_64 -#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_x86_64 -#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_x86_64 -#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_x86_64 -#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_x86_64 -#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_x86_64 -#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_x86_64 -#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_x86_64 -#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_x86_64 -#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_x86_64 -#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_x86_64 -#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_x86_64 -#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_x86_64 -#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_x86_64 -#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_x86_64 -#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_x86_64 -#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_x86_64 -#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_x86_64 -#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_x86_64 -#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_x86_64 -#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_x86_64 -#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_x86_64 -#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_x86_64 -#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_x86_64 -#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_x86_64 -#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_x86_64 -#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_x86_64 -#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_x86_64 -#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_x86_64 -#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_x86_64 -#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_x86_64 -#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_x86_64 -#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_x86_64 -#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_x86_64 -#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_x86_64 -#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_x86_64 -#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_x86_64 -#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_x86_64 -#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_x86_64 -#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_x86_64 -#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_x86_64 -#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_x86_64 -#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_x86_64 -#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_x86_64 -#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_x86_64 -#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_x86_64 -#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_x86_64 -#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_x86_64 -#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_x86_64 -#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_x86_64 -#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_x86_64 -#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_x86_64 -#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_x86_64 -#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_x86_64 -#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_x86_64 -#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_x86_64 -#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_x86_64 -#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_x86_64 -#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_x86_64 -#define gen_helper_neon_tbl gen_helper_neon_tbl_x86_64 -#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_x86_64 -#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_x86_64 -#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_x86_64 -#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_x86_64 -#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_x86_64 -#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_x86_64 -#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_x86_64 -#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_x86_64 -#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_x86_64 -#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_x86_64 -#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_x86_64 -#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_x86_64 -#define gen_helper_neon_zip16 gen_helper_neon_zip16_x86_64 -#define gen_helper_neon_zip8 gen_helper_neon_zip8_x86_64 -#define gen_helper_pre_hvc gen_helper_pre_hvc_x86_64 -#define gen_helper_pre_smc gen_helper_pre_smc_x86_64 -#define gen_helper_qadd16 gen_helper_qadd16_x86_64 -#define gen_helper_qadd8 gen_helper_qadd8_x86_64 -#define gen_helper_qaddsubx gen_helper_qaddsubx_x86_64 -#define gen_helper_qsub16 gen_helper_qsub16_x86_64 -#define gen_helper_qsub8 gen_helper_qsub8_x86_64 -#define gen_helper_qsubaddx gen_helper_qsubaddx_x86_64 -#define gen_helper_rbit gen_helper_rbit_x86_64 -#define gen_helper_recpe_f32 gen_helper_recpe_f32_x86_64 -#define gen_helper_recpe_u32 gen_helper_recpe_u32_x86_64 -#define gen_helper_recps_f32 gen_helper_recps_f32_x86_64 -#define gen_helper_rintd gen_helper_rintd_x86_64 -#define gen_helper_rintd_exact gen_helper_rintd_exact_x86_64 -#define gen_helper_rints gen_helper_rints_x86_64 -#define gen_helper_rints_exact gen_helper_rints_exact_x86_64 -#define gen_helper_ror_cc gen_helper_ror_cc_x86_64 -#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_x86_64 -#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_x86_64 -#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_x86_64 -#define gen_helper_sadd16 gen_helper_sadd16_x86_64 -#define gen_helper_sadd8 gen_helper_sadd8_x86_64 -#define gen_helper_saddsubx gen_helper_saddsubx_x86_64 -#define gen_helper_sar_cc gen_helper_sar_cc_x86_64 -#define gen_helper_sdiv gen_helper_sdiv_x86_64 -#define gen_helper_sel_flags gen_helper_sel_flags_x86_64 -#define gen_helper_set_cp_reg gen_helper_set_cp_reg_x86_64 -#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_x86_64 -#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_x86_64 -#define gen_helper_set_r13_banked gen_helper_set_r13_banked_x86_64 -#define gen_helper_set_rmode gen_helper_set_rmode_x86_64 -#define gen_helper_set_user_reg gen_helper_set_user_reg_x86_64 -#define gen_helper_shadd16 gen_helper_shadd16_x86_64 -#define gen_helper_shadd8 gen_helper_shadd8_x86_64 -#define gen_helper_shaddsubx gen_helper_shaddsubx_x86_64 -#define gen_helper_shl_cc gen_helper_shl_cc_x86_64 -#define gen_helper_shr_cc gen_helper_shr_cc_x86_64 -#define gen_helper_shsub16 gen_helper_shsub16_x86_64 -#define gen_helper_shsub8 gen_helper_shsub8_x86_64 -#define gen_helper_shsubaddx gen_helper_shsubaddx_x86_64 -#define gen_helper_ssat gen_helper_ssat_x86_64 -#define gen_helper_ssat16 gen_helper_ssat16_x86_64 -#define gen_helper_ssub16 gen_helper_ssub16_x86_64 -#define gen_helper_ssub8 gen_helper_ssub8_x86_64 -#define gen_helper_ssubaddx gen_helper_ssubaddx_x86_64 -#define gen_helper_sub_saturate gen_helper_sub_saturate_x86_64 -#define gen_helper_sxtb16 gen_helper_sxtb16_x86_64 -#define gen_helper_uadd16 gen_helper_uadd16_x86_64 -#define gen_helper_uadd8 gen_helper_uadd8_x86_64 -#define gen_helper_uaddsubx gen_helper_uaddsubx_x86_64 -#define gen_helper_udiv gen_helper_udiv_x86_64 -#define gen_helper_uhadd16 gen_helper_uhadd16_x86_64 -#define gen_helper_uhadd8 gen_helper_uhadd8_x86_64 -#define gen_helper_uhaddsubx gen_helper_uhaddsubx_x86_64 -#define gen_helper_uhsub16 gen_helper_uhsub16_x86_64 -#define gen_helper_uhsub8 gen_helper_uhsub8_x86_64 -#define gen_helper_uhsubaddx gen_helper_uhsubaddx_x86_64 -#define gen_helper_uqadd16 gen_helper_uqadd16_x86_64 -#define gen_helper_uqadd8 gen_helper_uqadd8_x86_64 -#define gen_helper_uqaddsubx gen_helper_uqaddsubx_x86_64 -#define gen_helper_uqsub16 gen_helper_uqsub16_x86_64 -#define gen_helper_uqsub8 gen_helper_uqsub8_x86_64 -#define gen_helper_uqsubaddx gen_helper_uqsubaddx_x86_64 -#define gen_helper_usad8 gen_helper_usad8_x86_64 -#define gen_helper_usat gen_helper_usat_x86_64 -#define gen_helper_usat16 gen_helper_usat16_x86_64 -#define gen_helper_usub16 gen_helper_usub16_x86_64 -#define gen_helper_usub8 gen_helper_usub8_x86_64 -#define gen_helper_usubaddx gen_helper_usubaddx_x86_64 -#define gen_helper_uxtb16 gen_helper_uxtb16_x86_64 -#define gen_helper_v7m_mrs gen_helper_v7m_mrs_x86_64 -#define gen_helper_v7m_msr gen_helper_v7m_msr_x86_64 -#define gen_helper_vfp_absd gen_helper_vfp_absd_x86_64 -#define gen_helper_vfp_abss gen_helper_vfp_abss_x86_64 -#define gen_helper_vfp_addd gen_helper_vfp_addd_x86_64 -#define gen_helper_vfp_adds gen_helper_vfp_adds_x86_64 -#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_x86_64 -#define gen_helper_vfp_cmped gen_helper_vfp_cmped_x86_64 -#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_x86_64 -#define gen_helper_vfp_cmps gen_helper_vfp_cmps_x86_64 -#define gen_helper_vfp_divd gen_helper_vfp_divd_x86_64 -#define gen_helper_vfp_divs gen_helper_vfp_divs_x86_64 -#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_x86_64 -#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_x86_64 -#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_x86_64 -#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_x86_64 -#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_x86_64 -#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_x86_64 -#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_x86_64 -#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_x86_64 -#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_x86_64 -#define gen_helper_vfp_maxs gen_helper_vfp_maxs_x86_64 -#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_x86_64 -#define gen_helper_vfp_minnums gen_helper_vfp_minnums_x86_64 -#define gen_helper_vfp_mins gen_helper_vfp_mins_x86_64 -#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_x86_64 -#define gen_helper_vfp_muladds gen_helper_vfp_muladds_x86_64 -#define gen_helper_vfp_muld gen_helper_vfp_muld_x86_64 -#define gen_helper_vfp_muls gen_helper_vfp_muls_x86_64 -#define gen_helper_vfp_negd gen_helper_vfp_negd_x86_64 -#define gen_helper_vfp_negs gen_helper_vfp_negs_x86_64 -#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_x86_64 -#define gen_helper_vfp_shtod gen_helper_vfp_shtod_x86_64 -#define gen_helper_vfp_shtos gen_helper_vfp_shtos_x86_64 -#define gen_helper_vfp_sitod gen_helper_vfp_sitod_x86_64 -#define gen_helper_vfp_sitos gen_helper_vfp_sitos_x86_64 -#define gen_helper_vfp_sltod gen_helper_vfp_sltod_x86_64 -#define gen_helper_vfp_sltos gen_helper_vfp_sltos_x86_64 -#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_x86_64 -#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_x86_64 -#define gen_helper_vfp_subd gen_helper_vfp_subd_x86_64 -#define gen_helper_vfp_subs gen_helper_vfp_subs_x86_64 -#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_x86_64 -#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_x86_64 -#define gen_helper_vfp_tosid gen_helper_vfp_tosid_x86_64 -#define gen_helper_vfp_tosis gen_helper_vfp_tosis_x86_64 -#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_x86_64 -#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_x86_64 -#define gen_helper_vfp_tosld gen_helper_vfp_tosld_x86_64 -#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_x86_64 -#define gen_helper_vfp_tosls gen_helper_vfp_tosls_x86_64 -#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_x86_64 -#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_x86_64 -#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_x86_64 -#define gen_helper_vfp_touid gen_helper_vfp_touid_x86_64 -#define gen_helper_vfp_touis gen_helper_vfp_touis_x86_64 -#define gen_helper_vfp_touizd gen_helper_vfp_touizd_x86_64 -#define gen_helper_vfp_touizs gen_helper_vfp_touizs_x86_64 -#define gen_helper_vfp_tould gen_helper_vfp_tould_x86_64 -#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_x86_64 -#define gen_helper_vfp_touls gen_helper_vfp_touls_x86_64 -#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_x86_64 -#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_x86_64 -#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_x86_64 -#define gen_helper_vfp_uitod gen_helper_vfp_uitod_x86_64 -#define gen_helper_vfp_uitos gen_helper_vfp_uitos_x86_64 -#define gen_helper_vfp_ultod gen_helper_vfp_ultod_x86_64 -#define gen_helper_vfp_ultos gen_helper_vfp_ultos_x86_64 -#define gen_helper_wfe gen_helper_wfe_x86_64 -#define gen_helper_wfi gen_helper_wfi_x86_64 -#define gen_hvc gen_hvc_x86_64 -#define gen_intermediate_code_internal gen_intermediate_code_internal_x86_64 -#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_x86_64 -#define gen_iwmmxt_address gen_iwmmxt_address_x86_64 -#define gen_iwmmxt_shift gen_iwmmxt_shift_x86_64 -#define gen_jmp gen_jmp_x86_64 -#define gen_load_and_replicate gen_load_and_replicate_x86_64 -#define gen_load_exclusive gen_load_exclusive_x86_64 -#define gen_logic_CC gen_logic_CC_x86_64 -#define gen_logicq_cc gen_logicq_cc_x86_64 -#define gen_lookup_tb gen_lookup_tb_x86_64 -#define gen_mov_F0_vreg gen_mov_F0_vreg_x86_64 -#define gen_mov_F1_vreg gen_mov_F1_vreg_x86_64 -#define gen_mov_vreg_F0 gen_mov_vreg_F0_x86_64 -#define gen_muls_i64_i32 gen_muls_i64_i32_x86_64 -#define gen_mulu_i64_i32 gen_mulu_i64_i32_x86_64 -#define gen_mulxy gen_mulxy_x86_64 -#define gen_neon_add gen_neon_add_x86_64 -#define gen_neon_addl gen_neon_addl_x86_64 -#define gen_neon_addl_saturate gen_neon_addl_saturate_x86_64 -#define gen_neon_bsl gen_neon_bsl_x86_64 -#define gen_neon_dup_high16 gen_neon_dup_high16_x86_64 -#define gen_neon_dup_low16 gen_neon_dup_low16_x86_64 -#define gen_neon_dup_u8 gen_neon_dup_u8_x86_64 -#define gen_neon_mull gen_neon_mull_x86_64 -#define gen_neon_narrow gen_neon_narrow_x86_64 -#define gen_neon_narrow_op gen_neon_narrow_op_x86_64 -#define gen_neon_narrow_sats gen_neon_narrow_sats_x86_64 -#define gen_neon_narrow_satu gen_neon_narrow_satu_x86_64 -#define gen_neon_negl gen_neon_negl_x86_64 -#define gen_neon_rsb gen_neon_rsb_x86_64 -#define gen_neon_shift_narrow gen_neon_shift_narrow_x86_64 -#define gen_neon_subl gen_neon_subl_x86_64 -#define gen_neon_trn_u16 gen_neon_trn_u16_x86_64 -#define gen_neon_trn_u8 gen_neon_trn_u8_x86_64 -#define gen_neon_unarrow_sats gen_neon_unarrow_sats_x86_64 -#define gen_neon_unzip gen_neon_unzip_x86_64 -#define gen_neon_widen gen_neon_widen_x86_64 -#define gen_neon_zip gen_neon_zip_x86_64 +#define float128_to_int32 float128_to_int32_x86_64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_x86_64 +#define float128_to_int64 float128_to_int64_x86_64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_x86_64 +#define float128_to_uint64 float128_to_uint64_x86_64 +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_x86_64 +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_x86_64 +#define float128_to_uint32 float128_to_uint32_x86_64 +#define float128_to_float32 float128_to_float32_x86_64 +#define float128_to_float64 float128_to_float64_x86_64 +#define float128_to_floatx80 float128_to_floatx80_x86_64 +#define float128_round_to_int float128_round_to_int_x86_64 +#define float128_add float128_add_x86_64 +#define float128_sub float128_sub_x86_64 +#define float128_mul float128_mul_x86_64 +#define float128_div float128_div_x86_64 +#define float128_rem float128_rem_x86_64 +#define float128_sqrt float128_sqrt_x86_64 +#define float128_eq float128_eq_x86_64 +#define float128_le float128_le_x86_64 +#define float128_lt float128_lt_x86_64 +#define float128_unordered float128_unordered_x86_64 +#define float128_eq_quiet float128_eq_quiet_x86_64 +#define float128_le_quiet float128_le_quiet_x86_64 +#define float128_lt_quiet float128_lt_quiet_x86_64 +#define float128_unordered_quiet float128_unordered_quiet_x86_64 +#define floatx80_compare floatx80_compare_x86_64 +#define floatx80_compare_quiet floatx80_compare_quiet_x86_64 +#define float128_compare float128_compare_x86_64 +#define float128_compare_quiet float128_compare_quiet_x86_64 +#define floatx80_scalbn floatx80_scalbn_x86_64 +#define float128_scalbn float128_scalbn_x86_64 +#define softfloat_init softfloat_init_x86_64 +#define tcg_optimize tcg_optimize_x86_64 #define gen_new_label gen_new_label_x86_64 -#define gen_nop_hint gen_nop_hint_x86_64 -#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_x86_64 -#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_x86_64 -#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_x86_64 -#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_x86_64 -#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_x86_64 -#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_x86_64 -#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_x86_64 -#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_x86_64 -#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_x86_64 -#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_x86_64 -#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_x86_64 -#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_x86_64 -#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_x86_64 -#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_x86_64 -#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_x86_64 -#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_x86_64 -#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_x86_64 -#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_x86_64 -#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_x86_64 -#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_x86_64 -#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_x86_64 -#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_x86_64 -#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_x86_64 -#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_x86_64 -#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_x86_64 -#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_x86_64 -#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_x86_64 -#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_x86_64 -#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_x86_64 -#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_x86_64 -#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_x86_64 -#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_x86_64 -#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_x86_64 -#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_x86_64 -#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_x86_64 -#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_x86_64 -#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_x86_64 -#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_x86_64 -#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_x86_64 -#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_x86_64 -#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_x86_64 -#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_x86_64 -#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_x86_64 -#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_x86_64 -#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_x86_64 -#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_x86_64 -#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_x86_64 -#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_x86_64 -#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_x86_64 -#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_x86_64 -#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_x86_64 -#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_x86_64 -#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_x86_64 -#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_x86_64 -#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_x86_64 -#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_x86_64 -#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_x86_64 -#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_x86_64 -#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_x86_64 -#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_x86_64 -#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_x86_64 -#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_x86_64 -#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_x86_64 -#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_x86_64 -#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_x86_64 -#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_x86_64 -#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_x86_64 -#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_x86_64 -#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_x86_64 -#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_x86_64 -#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_x86_64 -#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_x86_64 -#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_x86_64 -#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_x86_64 -#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_x86_64 -#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_x86_64 -#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_x86_64 -#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_x86_64 -#define gen_rev16 gen_rev16_x86_64 -#define gen_revsh gen_revsh_x86_64 -#define gen_rfe gen_rfe_x86_64 -#define gen_sar gen_sar_x86_64 -#define gen_sbc_CC gen_sbc_CC_x86_64 -#define gen_sbfx gen_sbfx_x86_64 -#define gen_set_CF_bit31 gen_set_CF_bit31_x86_64 -#define gen_set_condexec gen_set_condexec_x86_64 -#define gen_set_cpsr gen_set_cpsr_x86_64 -#define gen_set_label gen_set_label_x86_64 -#define gen_set_pc_im gen_set_pc_im_x86_64 -#define gen_set_psr gen_set_psr_x86_64 -#define gen_set_psr_im gen_set_psr_im_x86_64 -#define gen_shl gen_shl_x86_64 -#define gen_shr gen_shr_x86_64 -#define gen_smc gen_smc_x86_64 -#define gen_smul_dual gen_smul_dual_x86_64 -#define gen_srs gen_srs_x86_64 -#define gen_ss_advance gen_ss_advance_x86_64 -#define gen_step_complete_exception gen_step_complete_exception_x86_64 -#define gen_store_exclusive gen_store_exclusive_x86_64 -#define gen_storeq_reg gen_storeq_reg_x86_64 -#define gen_sub_carry gen_sub_carry_x86_64 -#define gen_sub_CC gen_sub_CC_x86_64 -#define gen_subq_msw gen_subq_msw_x86_64 -#define gen_swap_half gen_swap_half_x86_64 -#define gen_thumb2_data_op gen_thumb2_data_op_x86_64 -#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_x86_64 -#define gen_ubfx gen_ubfx_x86_64 -#define gen_vfp_abs gen_vfp_abs_x86_64 -#define gen_vfp_add gen_vfp_add_x86_64 -#define gen_vfp_cmp gen_vfp_cmp_x86_64 -#define gen_vfp_cmpe gen_vfp_cmpe_x86_64 -#define gen_vfp_div gen_vfp_div_x86_64 -#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_x86_64 -#define gen_vfp_F1_mul gen_vfp_F1_mul_x86_64 -#define gen_vfp_F1_neg gen_vfp_F1_neg_x86_64 -#define gen_vfp_ld gen_vfp_ld_x86_64 -#define gen_vfp_mrs gen_vfp_mrs_x86_64 -#define gen_vfp_msr gen_vfp_msr_x86_64 -#define gen_vfp_mul gen_vfp_mul_x86_64 -#define gen_vfp_neg gen_vfp_neg_x86_64 -#define gen_vfp_shto gen_vfp_shto_x86_64 -#define gen_vfp_sito gen_vfp_sito_x86_64 -#define gen_vfp_slto gen_vfp_slto_x86_64 -#define gen_vfp_sqrt gen_vfp_sqrt_x86_64 -#define gen_vfp_st gen_vfp_st_x86_64 -#define gen_vfp_sub gen_vfp_sub_x86_64 -#define gen_vfp_tosh gen_vfp_tosh_x86_64 -#define gen_vfp_tosi gen_vfp_tosi_x86_64 -#define gen_vfp_tosiz gen_vfp_tosiz_x86_64 -#define gen_vfp_tosl gen_vfp_tosl_x86_64 -#define gen_vfp_touh gen_vfp_touh_x86_64 -#define gen_vfp_toui gen_vfp_toui_x86_64 -#define gen_vfp_touiz gen_vfp_touiz_x86_64 -#define gen_vfp_toul gen_vfp_toul_x86_64 -#define gen_vfp_uhto gen_vfp_uhto_x86_64 -#define gen_vfp_uito gen_vfp_uito_x86_64 -#define gen_vfp_ulto gen_vfp_ulto_x86_64 -#define get_arm_cp_reginfo get_arm_cp_reginfo_x86_64 -#define get_clock get_clock_x86_64 -#define get_clock_realtime get_clock_realtime_x86_64 -#define get_constraint_priority get_constraint_priority_x86_64 -#define get_float_exception_flags get_float_exception_flags_x86_64 -#define get_float_rounding_mode get_float_rounding_mode_x86_64 -#define get_fpstatus_ptr get_fpstatus_ptr_x86_64 -#define get_level1_table_address get_level1_table_address_x86_64 -#define get_mem_index get_mem_index_x86_64 -#define get_next_param_value get_next_param_value_x86_64 -#define get_opt_name get_opt_name_x86_64 -#define get_opt_value get_opt_value_x86_64 -#define get_page_addr_code get_page_addr_code_x86_64 -#define get_param_value get_param_value_x86_64 -#define get_phys_addr get_phys_addr_x86_64 -#define get_phys_addr_lpae get_phys_addr_lpae_x86_64 -#define get_phys_addr_mpu get_phys_addr_mpu_x86_64 -#define get_phys_addr_v5 get_phys_addr_v5_x86_64 -#define get_phys_addr_v6 get_phys_addr_v6_x86_64 -#define get_system_memory get_system_memory_x86_64 -#define get_ticks_per_sec get_ticks_per_sec_x86_64 -#define g_list_insert_sorted_merged g_list_insert_sorted_merged_x86_64 -#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__x86_64 -#define gt_cntfrq_access gt_cntfrq_access_x86_64 -#define gt_cnt_read gt_cnt_read_x86_64 -#define gt_cnt_reset gt_cnt_reset_x86_64 -#define gt_counter_access gt_counter_access_x86_64 -#define gt_ctl_write gt_ctl_write_x86_64 -#define gt_cval_write gt_cval_write_x86_64 -#define gt_get_countervalue gt_get_countervalue_x86_64 -#define gt_pct_access gt_pct_access_x86_64 -#define gt_ptimer_access gt_ptimer_access_x86_64 -#define gt_recalc_timer gt_recalc_timer_x86_64 -#define gt_timer_access gt_timer_access_x86_64 -#define gt_tval_read gt_tval_read_x86_64 -#define gt_tval_write gt_tval_write_x86_64 -#define gt_vct_access gt_vct_access_x86_64 -#define gt_vtimer_access gt_vtimer_access_x86_64 -#define guest_phys_blocks_free guest_phys_blocks_free_x86_64 -#define guest_phys_blocks_init guest_phys_blocks_init_x86_64 -#define handle_vcvt handle_vcvt_x86_64 -#define handle_vminmaxnm handle_vminmaxnm_x86_64 -#define handle_vrint handle_vrint_x86_64 -#define handle_vsel handle_vsel_x86_64 -#define has_help_option has_help_option_x86_64 -#define have_bmi1 have_bmi1_x86_64 -#define have_bmi2 have_bmi2_x86_64 -#define hcr_write hcr_write_x86_64 -#define helper_access_check_cp_reg helper_access_check_cp_reg_x86_64 -#define helper_add_saturate helper_add_saturate_x86_64 -#define helper_add_setq helper_add_setq_x86_64 -#define helper_add_usaturate helper_add_usaturate_x86_64 -#define helper_be_ldl_cmmu helper_be_ldl_cmmu_x86_64 -#define helper_be_ldq_cmmu helper_be_ldq_cmmu_x86_64 -#define helper_be_ldq_mmu helper_be_ldq_mmu_x86_64 -#define helper_be_ldsl_mmu helper_be_ldsl_mmu_x86_64 -#define helper_be_ldsw_mmu helper_be_ldsw_mmu_x86_64 -#define helper_be_ldul_mmu helper_be_ldul_mmu_x86_64 -#define helper_be_lduw_mmu helper_be_lduw_mmu_x86_64 -#define helper_be_ldw_cmmu helper_be_ldw_cmmu_x86_64 -#define helper_be_stl_mmu helper_be_stl_mmu_x86_64 -#define helper_be_stq_mmu helper_be_stq_mmu_x86_64 -#define helper_be_stw_mmu helper_be_stw_mmu_x86_64 -#define helper_clear_pstate_ss helper_clear_pstate_ss_x86_64 -#define helper_clz_arm helper_clz_arm_x86_64 -#define helper_cpsr_read helper_cpsr_read_x86_64 -#define helper_cpsr_write helper_cpsr_write_x86_64 -#define helper_crc32_arm helper_crc32_arm_x86_64 -#define helper_crc32c helper_crc32c_x86_64 -#define helper_crypto_aese helper_crypto_aese_x86_64 -#define helper_crypto_aesmc helper_crypto_aesmc_x86_64 -#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_x86_64 -#define helper_crypto_sha1h helper_crypto_sha1h_x86_64 -#define helper_crypto_sha1su1 helper_crypto_sha1su1_x86_64 -#define helper_crypto_sha256h helper_crypto_sha256h_x86_64 -#define helper_crypto_sha256h2 helper_crypto_sha256h2_x86_64 -#define helper_crypto_sha256su0 helper_crypto_sha256su0_x86_64 -#define helper_crypto_sha256su1 helper_crypto_sha256su1_x86_64 -#define helper_dc_zva helper_dc_zva_x86_64 -#define helper_double_saturate helper_double_saturate_x86_64 -#define helper_exception_internal helper_exception_internal_x86_64 -#define helper_exception_return helper_exception_return_x86_64 -#define helper_exception_with_syndrome helper_exception_with_syndrome_x86_64 -#define helper_get_cp_reg helper_get_cp_reg_x86_64 -#define helper_get_cp_reg64 helper_get_cp_reg64_x86_64 -#define helper_get_r13_banked helper_get_r13_banked_x86_64 -#define helper_get_user_reg helper_get_user_reg_x86_64 -#define helper_iwmmxt_addcb helper_iwmmxt_addcb_x86_64 -#define helper_iwmmxt_addcl helper_iwmmxt_addcl_x86_64 -#define helper_iwmmxt_addcw helper_iwmmxt_addcw_x86_64 -#define helper_iwmmxt_addnb helper_iwmmxt_addnb_x86_64 -#define helper_iwmmxt_addnl helper_iwmmxt_addnl_x86_64 -#define helper_iwmmxt_addnw helper_iwmmxt_addnw_x86_64 -#define helper_iwmmxt_addsb helper_iwmmxt_addsb_x86_64 -#define helper_iwmmxt_addsl helper_iwmmxt_addsl_x86_64 -#define helper_iwmmxt_addsw helper_iwmmxt_addsw_x86_64 -#define helper_iwmmxt_addub helper_iwmmxt_addub_x86_64 -#define helper_iwmmxt_addul helper_iwmmxt_addul_x86_64 -#define helper_iwmmxt_adduw helper_iwmmxt_adduw_x86_64 -#define helper_iwmmxt_align helper_iwmmxt_align_x86_64 -#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_x86_64 -#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_x86_64 -#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_x86_64 -#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_x86_64 -#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_x86_64 -#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_x86_64 -#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_x86_64 -#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_x86_64 -#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_x86_64 -#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_x86_64 -#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_x86_64 -#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_x86_64 -#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_x86_64 -#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_x86_64 -#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_x86_64 -#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_x86_64 -#define helper_iwmmxt_insr helper_iwmmxt_insr_x86_64 -#define helper_iwmmxt_macsw helper_iwmmxt_macsw_x86_64 -#define helper_iwmmxt_macuw helper_iwmmxt_macuw_x86_64 -#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_x86_64 -#define helper_iwmmxt_madduq helper_iwmmxt_madduq_x86_64 -#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_x86_64 -#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_x86_64 -#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_x86_64 -#define helper_iwmmxt_maxub helper_iwmmxt_maxub_x86_64 -#define helper_iwmmxt_maxul helper_iwmmxt_maxul_x86_64 -#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_x86_64 -#define helper_iwmmxt_minsb helper_iwmmxt_minsb_x86_64 -#define helper_iwmmxt_minsl helper_iwmmxt_minsl_x86_64 -#define helper_iwmmxt_minsw helper_iwmmxt_minsw_x86_64 -#define helper_iwmmxt_minub helper_iwmmxt_minub_x86_64 -#define helper_iwmmxt_minul helper_iwmmxt_minul_x86_64 -#define helper_iwmmxt_minuw helper_iwmmxt_minuw_x86_64 -#define helper_iwmmxt_msbb helper_iwmmxt_msbb_x86_64 -#define helper_iwmmxt_msbl helper_iwmmxt_msbl_x86_64 -#define helper_iwmmxt_msbw helper_iwmmxt_msbw_x86_64 -#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_x86_64 -#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_x86_64 -#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_x86_64 -#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_x86_64 -#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_x86_64 -#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_x86_64 -#define helper_iwmmxt_mululw helper_iwmmxt_mululw_x86_64 -#define helper_iwmmxt_packsl helper_iwmmxt_packsl_x86_64 -#define helper_iwmmxt_packsq helper_iwmmxt_packsq_x86_64 -#define helper_iwmmxt_packsw helper_iwmmxt_packsw_x86_64 -#define helper_iwmmxt_packul helper_iwmmxt_packul_x86_64 -#define helper_iwmmxt_packuq helper_iwmmxt_packuq_x86_64 -#define helper_iwmmxt_packuw helper_iwmmxt_packuw_x86_64 -#define helper_iwmmxt_rorl helper_iwmmxt_rorl_x86_64 -#define helper_iwmmxt_rorq helper_iwmmxt_rorq_x86_64 -#define helper_iwmmxt_rorw helper_iwmmxt_rorw_x86_64 -#define helper_iwmmxt_sadb helper_iwmmxt_sadb_x86_64 -#define helper_iwmmxt_sadw helper_iwmmxt_sadw_x86_64 -#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_x86_64 -#define helper_iwmmxt_shufh helper_iwmmxt_shufh_x86_64 -#define helper_iwmmxt_slll helper_iwmmxt_slll_x86_64 -#define helper_iwmmxt_sllq helper_iwmmxt_sllq_x86_64 -#define helper_iwmmxt_sllw helper_iwmmxt_sllw_x86_64 -#define helper_iwmmxt_sral helper_iwmmxt_sral_x86_64 -#define helper_iwmmxt_sraq helper_iwmmxt_sraq_x86_64 -#define helper_iwmmxt_sraw helper_iwmmxt_sraw_x86_64 -#define helper_iwmmxt_srll helper_iwmmxt_srll_x86_64 -#define helper_iwmmxt_srlq helper_iwmmxt_srlq_x86_64 -#define helper_iwmmxt_srlw helper_iwmmxt_srlw_x86_64 -#define helper_iwmmxt_subnb helper_iwmmxt_subnb_x86_64 -#define helper_iwmmxt_subnl helper_iwmmxt_subnl_x86_64 -#define helper_iwmmxt_subnw helper_iwmmxt_subnw_x86_64 -#define helper_iwmmxt_subsb helper_iwmmxt_subsb_x86_64 -#define helper_iwmmxt_subsl helper_iwmmxt_subsl_x86_64 -#define helper_iwmmxt_subsw helper_iwmmxt_subsw_x86_64 -#define helper_iwmmxt_subub helper_iwmmxt_subub_x86_64 -#define helper_iwmmxt_subul helper_iwmmxt_subul_x86_64 -#define helper_iwmmxt_subuw helper_iwmmxt_subuw_x86_64 -#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_x86_64 -#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_x86_64 -#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_x86_64 -#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_x86_64 -#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_x86_64 -#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_x86_64 -#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_x86_64 -#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_x86_64 -#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_x86_64 -#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_x86_64 -#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_x86_64 -#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_x86_64 -#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_x86_64 -#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_x86_64 -#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_x86_64 -#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_x86_64 -#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_x86_64 -#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_x86_64 -#define helper_ldb_cmmu helper_ldb_cmmu_x86_64 -#define helper_ldb_mmu helper_ldb_mmu_x86_64 -#define helper_ldl_cmmu helper_ldl_cmmu_x86_64 -#define helper_ldl_mmu helper_ldl_mmu_x86_64 -#define helper_ldq_cmmu helper_ldq_cmmu_x86_64 -#define helper_ldq_mmu helper_ldq_mmu_x86_64 -#define helper_ldw_cmmu helper_ldw_cmmu_x86_64 -#define helper_ldw_mmu helper_ldw_mmu_x86_64 -#define helper_le_ldl_cmmu helper_le_ldl_cmmu_x86_64 -#define helper_le_ldq_cmmu helper_le_ldq_cmmu_x86_64 -#define helper_le_ldq_mmu helper_le_ldq_mmu_x86_64 -#define helper_le_ldsl_mmu helper_le_ldsl_mmu_x86_64 -#define helper_le_ldsw_mmu helper_le_ldsw_mmu_x86_64 -#define helper_le_ldul_mmu helper_le_ldul_mmu_x86_64 -#define helper_le_lduw_mmu helper_le_lduw_mmu_x86_64 -#define helper_le_ldw_cmmu helper_le_ldw_cmmu_x86_64 -#define helper_le_stl_mmu helper_le_stl_mmu_x86_64 -#define helper_le_stq_mmu helper_le_stq_mmu_x86_64 -#define helper_le_stw_mmu helper_le_stw_mmu_x86_64 -#define helper_msr_i_pstate helper_msr_i_pstate_x86_64 -#define helper_neon_abd_f32 helper_neon_abd_f32_x86_64 -#define helper_neon_abdl_s16 helper_neon_abdl_s16_x86_64 -#define helper_neon_abdl_s32 helper_neon_abdl_s32_x86_64 -#define helper_neon_abdl_s64 helper_neon_abdl_s64_x86_64 -#define helper_neon_abdl_u16 helper_neon_abdl_u16_x86_64 -#define helper_neon_abdl_u32 helper_neon_abdl_u32_x86_64 -#define helper_neon_abdl_u64 helper_neon_abdl_u64_x86_64 -#define helper_neon_abd_s16 helper_neon_abd_s16_x86_64 -#define helper_neon_abd_s32 helper_neon_abd_s32_x86_64 -#define helper_neon_abd_s8 helper_neon_abd_s8_x86_64 -#define helper_neon_abd_u16 helper_neon_abd_u16_x86_64 -#define helper_neon_abd_u32 helper_neon_abd_u32_x86_64 -#define helper_neon_abd_u8 helper_neon_abd_u8_x86_64 -#define helper_neon_abs_s16 helper_neon_abs_s16_x86_64 -#define helper_neon_abs_s8 helper_neon_abs_s8_x86_64 -#define helper_neon_acge_f32 helper_neon_acge_f32_x86_64 -#define helper_neon_acge_f64 helper_neon_acge_f64_x86_64 -#define helper_neon_acgt_f32 helper_neon_acgt_f32_x86_64 -#define helper_neon_acgt_f64 helper_neon_acgt_f64_x86_64 -#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_x86_64 -#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_x86_64 -#define helper_neon_addl_u16 helper_neon_addl_u16_x86_64 -#define helper_neon_addl_u32 helper_neon_addl_u32_x86_64 -#define helper_neon_add_u16 helper_neon_add_u16_x86_64 -#define helper_neon_add_u8 helper_neon_add_u8_x86_64 -#define helper_neon_ceq_f32 helper_neon_ceq_f32_x86_64 -#define helper_neon_ceq_u16 helper_neon_ceq_u16_x86_64 -#define helper_neon_ceq_u32 helper_neon_ceq_u32_x86_64 -#define helper_neon_ceq_u8 helper_neon_ceq_u8_x86_64 -#define helper_neon_cge_f32 helper_neon_cge_f32_x86_64 -#define helper_neon_cge_s16 helper_neon_cge_s16_x86_64 -#define helper_neon_cge_s32 helper_neon_cge_s32_x86_64 -#define helper_neon_cge_s8 helper_neon_cge_s8_x86_64 -#define helper_neon_cge_u16 helper_neon_cge_u16_x86_64 -#define helper_neon_cge_u32 helper_neon_cge_u32_x86_64 -#define helper_neon_cge_u8 helper_neon_cge_u8_x86_64 -#define helper_neon_cgt_f32 helper_neon_cgt_f32_x86_64 -#define helper_neon_cgt_s16 helper_neon_cgt_s16_x86_64 -#define helper_neon_cgt_s32 helper_neon_cgt_s32_x86_64 -#define helper_neon_cgt_s8 helper_neon_cgt_s8_x86_64 -#define helper_neon_cgt_u16 helper_neon_cgt_u16_x86_64 -#define helper_neon_cgt_u32 helper_neon_cgt_u32_x86_64 -#define helper_neon_cgt_u8 helper_neon_cgt_u8_x86_64 -#define helper_neon_cls_s16 helper_neon_cls_s16_x86_64 -#define helper_neon_cls_s32 helper_neon_cls_s32_x86_64 -#define helper_neon_cls_s8 helper_neon_cls_s8_x86_64 -#define helper_neon_clz_u16 helper_neon_clz_u16_x86_64 -#define helper_neon_clz_u8 helper_neon_clz_u8_x86_64 -#define helper_neon_cnt_u8 helper_neon_cnt_u8_x86_64 -#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_x86_64 -#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_x86_64 -#define helper_neon_hadd_s16 helper_neon_hadd_s16_x86_64 -#define helper_neon_hadd_s32 helper_neon_hadd_s32_x86_64 -#define helper_neon_hadd_s8 helper_neon_hadd_s8_x86_64 -#define helper_neon_hadd_u16 helper_neon_hadd_u16_x86_64 -#define helper_neon_hadd_u32 helper_neon_hadd_u32_x86_64 -#define helper_neon_hadd_u8 helper_neon_hadd_u8_x86_64 -#define helper_neon_hsub_s16 helper_neon_hsub_s16_x86_64 -#define helper_neon_hsub_s32 helper_neon_hsub_s32_x86_64 -#define helper_neon_hsub_s8 helper_neon_hsub_s8_x86_64 -#define helper_neon_hsub_u16 helper_neon_hsub_u16_x86_64 -#define helper_neon_hsub_u32 helper_neon_hsub_u32_x86_64 -#define helper_neon_hsub_u8 helper_neon_hsub_u8_x86_64 -#define helper_neon_max_s16 helper_neon_max_s16_x86_64 -#define helper_neon_max_s32 helper_neon_max_s32_x86_64 -#define helper_neon_max_s8 helper_neon_max_s8_x86_64 -#define helper_neon_max_u16 helper_neon_max_u16_x86_64 -#define helper_neon_max_u32 helper_neon_max_u32_x86_64 -#define helper_neon_max_u8 helper_neon_max_u8_x86_64 -#define helper_neon_min_s16 helper_neon_min_s16_x86_64 -#define helper_neon_min_s32 helper_neon_min_s32_x86_64 -#define helper_neon_min_s8 helper_neon_min_s8_x86_64 -#define helper_neon_min_u16 helper_neon_min_u16_x86_64 -#define helper_neon_min_u32 helper_neon_min_u32_x86_64 -#define helper_neon_min_u8 helper_neon_min_u8_x86_64 -#define helper_neon_mull_p8 helper_neon_mull_p8_x86_64 -#define helper_neon_mull_s16 helper_neon_mull_s16_x86_64 -#define helper_neon_mull_s8 helper_neon_mull_s8_x86_64 -#define helper_neon_mull_u16 helper_neon_mull_u16_x86_64 -#define helper_neon_mull_u8 helper_neon_mull_u8_x86_64 -#define helper_neon_mul_p8 helper_neon_mul_p8_x86_64 -#define helper_neon_mul_u16 helper_neon_mul_u16_x86_64 -#define helper_neon_mul_u8 helper_neon_mul_u8_x86_64 -#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_x86_64 -#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_x86_64 -#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_x86_64 -#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_x86_64 -#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_x86_64 -#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_x86_64 -#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_x86_64 -#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_x86_64 -#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_x86_64 -#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_x86_64 -#define helper_neon_narrow_u16 helper_neon_narrow_u16_x86_64 -#define helper_neon_narrow_u8 helper_neon_narrow_u8_x86_64 -#define helper_neon_negl_u16 helper_neon_negl_u16_x86_64 -#define helper_neon_negl_u32 helper_neon_negl_u32_x86_64 -#define helper_neon_paddl_u16 helper_neon_paddl_u16_x86_64 -#define helper_neon_paddl_u32 helper_neon_paddl_u32_x86_64 -#define helper_neon_padd_u16 helper_neon_padd_u16_x86_64 -#define helper_neon_padd_u8 helper_neon_padd_u8_x86_64 -#define helper_neon_pmax_s16 helper_neon_pmax_s16_x86_64 -#define helper_neon_pmax_s8 helper_neon_pmax_s8_x86_64 -#define helper_neon_pmax_u16 helper_neon_pmax_u16_x86_64 -#define helper_neon_pmax_u8 helper_neon_pmax_u8_x86_64 -#define helper_neon_pmin_s16 helper_neon_pmin_s16_x86_64 -#define helper_neon_pmin_s8 helper_neon_pmin_s8_x86_64 -#define helper_neon_pmin_u16 helper_neon_pmin_u16_x86_64 -#define helper_neon_pmin_u8 helper_neon_pmin_u8_x86_64 -#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_x86_64 -#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_x86_64 -#define helper_neon_qabs_s16 helper_neon_qabs_s16_x86_64 -#define helper_neon_qabs_s32 helper_neon_qabs_s32_x86_64 -#define helper_neon_qabs_s64 helper_neon_qabs_s64_x86_64 -#define helper_neon_qabs_s8 helper_neon_qabs_s8_x86_64 -#define helper_neon_qadd_s16 helper_neon_qadd_s16_x86_64 -#define helper_neon_qadd_s32 helper_neon_qadd_s32_x86_64 -#define helper_neon_qadd_s64 helper_neon_qadd_s64_x86_64 -#define helper_neon_qadd_s8 helper_neon_qadd_s8_x86_64 -#define helper_neon_qadd_u16 helper_neon_qadd_u16_x86_64 -#define helper_neon_qadd_u32 helper_neon_qadd_u32_x86_64 -#define helper_neon_qadd_u64 helper_neon_qadd_u64_x86_64 -#define helper_neon_qadd_u8 helper_neon_qadd_u8_x86_64 -#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_x86_64 -#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_x86_64 -#define helper_neon_qneg_s16 helper_neon_qneg_s16_x86_64 -#define helper_neon_qneg_s32 helper_neon_qneg_s32_x86_64 -#define helper_neon_qneg_s64 helper_neon_qneg_s64_x86_64 -#define helper_neon_qneg_s8 helper_neon_qneg_s8_x86_64 -#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_x86_64 -#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_x86_64 -#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_x86_64 -#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_x86_64 -#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_x86_64 -#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_x86_64 -#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_x86_64 -#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_x86_64 -#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_x86_64 -#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_x86_64 -#define helper_neon_qshl_s16 helper_neon_qshl_s16_x86_64 -#define helper_neon_qshl_s32 helper_neon_qshl_s32_x86_64 -#define helper_neon_qshl_s64 helper_neon_qshl_s64_x86_64 -#define helper_neon_qshl_s8 helper_neon_qshl_s8_x86_64 -#define helper_neon_qshl_u16 helper_neon_qshl_u16_x86_64 -#define helper_neon_qshl_u32 helper_neon_qshl_u32_x86_64 -#define helper_neon_qshl_u64 helper_neon_qshl_u64_x86_64 -#define helper_neon_qshl_u8 helper_neon_qshl_u8_x86_64 -#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_x86_64 -#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_x86_64 -#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_x86_64 -#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_x86_64 -#define helper_neon_qsub_s16 helper_neon_qsub_s16_x86_64 -#define helper_neon_qsub_s32 helper_neon_qsub_s32_x86_64 -#define helper_neon_qsub_s64 helper_neon_qsub_s64_x86_64 -#define helper_neon_qsub_s8 helper_neon_qsub_s8_x86_64 -#define helper_neon_qsub_u16 helper_neon_qsub_u16_x86_64 -#define helper_neon_qsub_u32 helper_neon_qsub_u32_x86_64 -#define helper_neon_qsub_u64 helper_neon_qsub_u64_x86_64 -#define helper_neon_qsub_u8 helper_neon_qsub_u8_x86_64 -#define helper_neon_qunzip16 helper_neon_qunzip16_x86_64 -#define helper_neon_qunzip32 helper_neon_qunzip32_x86_64 -#define helper_neon_qunzip8 helper_neon_qunzip8_x86_64 -#define helper_neon_qzip16 helper_neon_qzip16_x86_64 -#define helper_neon_qzip32 helper_neon_qzip32_x86_64 -#define helper_neon_qzip8 helper_neon_qzip8_x86_64 -#define helper_neon_rbit_u8 helper_neon_rbit_u8_x86_64 -#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_x86_64 -#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_x86_64 -#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_x86_64 -#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_x86_64 -#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_x86_64 -#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_x86_64 -#define helper_neon_rshl_s16 helper_neon_rshl_s16_x86_64 -#define helper_neon_rshl_s32 helper_neon_rshl_s32_x86_64 -#define helper_neon_rshl_s64 helper_neon_rshl_s64_x86_64 -#define helper_neon_rshl_s8 helper_neon_rshl_s8_x86_64 -#define helper_neon_rshl_u16 helper_neon_rshl_u16_x86_64 -#define helper_neon_rshl_u32 helper_neon_rshl_u32_x86_64 -#define helper_neon_rshl_u64 helper_neon_rshl_u64_x86_64 -#define helper_neon_rshl_u8 helper_neon_rshl_u8_x86_64 -#define helper_neon_shl_s16 helper_neon_shl_s16_x86_64 -#define helper_neon_shl_s32 helper_neon_shl_s32_x86_64 -#define helper_neon_shl_s64 helper_neon_shl_s64_x86_64 -#define helper_neon_shl_s8 helper_neon_shl_s8_x86_64 -#define helper_neon_shl_u16 helper_neon_shl_u16_x86_64 -#define helper_neon_shl_u32 helper_neon_shl_u32_x86_64 -#define helper_neon_shl_u64 helper_neon_shl_u64_x86_64 -#define helper_neon_shl_u8 helper_neon_shl_u8_x86_64 -#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_x86_64 -#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_x86_64 -#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_x86_64 -#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_x86_64 -#define helper_neon_subl_u16 helper_neon_subl_u16_x86_64 -#define helper_neon_subl_u32 helper_neon_subl_u32_x86_64 -#define helper_neon_sub_u16 helper_neon_sub_u16_x86_64 -#define helper_neon_sub_u8 helper_neon_sub_u8_x86_64 -#define helper_neon_tbl helper_neon_tbl_x86_64 -#define helper_neon_tst_u16 helper_neon_tst_u16_x86_64 -#define helper_neon_tst_u32 helper_neon_tst_u32_x86_64 -#define helper_neon_tst_u8 helper_neon_tst_u8_x86_64 -#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_x86_64 -#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_x86_64 -#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_x86_64 -#define helper_neon_unzip16 helper_neon_unzip16_x86_64 -#define helper_neon_unzip8 helper_neon_unzip8_x86_64 -#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_x86_64 -#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_x86_64 -#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_x86_64 -#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_x86_64 -#define helper_neon_widen_s16 helper_neon_widen_s16_x86_64 -#define helper_neon_widen_s8 helper_neon_widen_s8_x86_64 -#define helper_neon_widen_u16 helper_neon_widen_u16_x86_64 -#define helper_neon_widen_u8 helper_neon_widen_u8_x86_64 -#define helper_neon_zip16 helper_neon_zip16_x86_64 -#define helper_neon_zip8 helper_neon_zip8_x86_64 -#define helper_pre_hvc helper_pre_hvc_x86_64 -#define helper_pre_smc helper_pre_smc_x86_64 -#define helper_qadd16 helper_qadd16_x86_64 -#define helper_qadd8 helper_qadd8_x86_64 -#define helper_qaddsubx helper_qaddsubx_x86_64 -#define helper_qsub16 helper_qsub16_x86_64 -#define helper_qsub8 helper_qsub8_x86_64 -#define helper_qsubaddx helper_qsubaddx_x86_64 -#define helper_rbit helper_rbit_x86_64 -#define helper_recpe_f32 helper_recpe_f32_x86_64 -#define helper_recpe_f64 helper_recpe_f64_x86_64 -#define helper_recpe_u32 helper_recpe_u32_x86_64 -#define helper_recps_f32 helper_recps_f32_x86_64 -#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_x86_64 -#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_x86_64 -#define helper_ret_ldub_mmu helper_ret_ldub_mmu_x86_64 -#define helper_ret_stb_mmu helper_ret_stb_mmu_x86_64 -#define helper_rintd helper_rintd_x86_64 -#define helper_rintd_exact helper_rintd_exact_x86_64 -#define helper_rints helper_rints_x86_64 -#define helper_rints_exact helper_rints_exact_x86_64 -#define helper_ror_cc helper_ror_cc_x86_64 -#define helper_rsqrte_f32 helper_rsqrte_f32_x86_64 -#define helper_rsqrte_f64 helper_rsqrte_f64_x86_64 -#define helper_rsqrte_u32 helper_rsqrte_u32_x86_64 -#define helper_rsqrts_f32 helper_rsqrts_f32_x86_64 -#define helper_sadd16 helper_sadd16_x86_64 -#define helper_sadd8 helper_sadd8_x86_64 -#define helper_saddsubx helper_saddsubx_x86_64 -#define helper_sar_cc helper_sar_cc_x86_64 -#define helper_sdiv helper_sdiv_x86_64 -#define helper_sel_flags helper_sel_flags_x86_64 -#define helper_set_cp_reg helper_set_cp_reg_x86_64 -#define helper_set_cp_reg64 helper_set_cp_reg64_x86_64 -#define helper_set_neon_rmode helper_set_neon_rmode_x86_64 -#define helper_set_r13_banked helper_set_r13_banked_x86_64 -#define helper_set_rmode helper_set_rmode_x86_64 -#define helper_set_user_reg helper_set_user_reg_x86_64 -#define helper_shadd16 helper_shadd16_x86_64 -#define helper_shadd8 helper_shadd8_x86_64 -#define helper_shaddsubx helper_shaddsubx_x86_64 -#define helper_shl_cc helper_shl_cc_x86_64 -#define helper_shr_cc helper_shr_cc_x86_64 -#define helper_shsub16 helper_shsub16_x86_64 -#define helper_shsub8 helper_shsub8_x86_64 -#define helper_shsubaddx helper_shsubaddx_x86_64 -#define helper_ssat helper_ssat_x86_64 -#define helper_ssat16 helper_ssat16_x86_64 -#define helper_ssub16 helper_ssub16_x86_64 -#define helper_ssub8 helper_ssub8_x86_64 -#define helper_ssubaddx helper_ssubaddx_x86_64 -#define helper_stb_mmu helper_stb_mmu_x86_64 -#define helper_stl_mmu helper_stl_mmu_x86_64 -#define helper_stq_mmu helper_stq_mmu_x86_64 -#define helper_stw_mmu helper_stw_mmu_x86_64 -#define helper_sub_saturate helper_sub_saturate_x86_64 -#define helper_sub_usaturate helper_sub_usaturate_x86_64 -#define helper_sxtb16 helper_sxtb16_x86_64 -#define helper_uadd16 helper_uadd16_x86_64 -#define helper_uadd8 helper_uadd8_x86_64 -#define helper_uaddsubx helper_uaddsubx_x86_64 -#define helper_udiv helper_udiv_x86_64 -#define helper_uhadd16 helper_uhadd16_x86_64 -#define helper_uhadd8 helper_uhadd8_x86_64 -#define helper_uhaddsubx helper_uhaddsubx_x86_64 -#define helper_uhsub16 helper_uhsub16_x86_64 -#define helper_uhsub8 helper_uhsub8_x86_64 -#define helper_uhsubaddx helper_uhsubaddx_x86_64 -#define helper_uqadd16 helper_uqadd16_x86_64 -#define helper_uqadd8 helper_uqadd8_x86_64 -#define helper_uqaddsubx helper_uqaddsubx_x86_64 -#define helper_uqsub16 helper_uqsub16_x86_64 -#define helper_uqsub8 helper_uqsub8_x86_64 -#define helper_uqsubaddx helper_uqsubaddx_x86_64 -#define helper_usad8 helper_usad8_x86_64 -#define helper_usat helper_usat_x86_64 -#define helper_usat16 helper_usat16_x86_64 -#define helper_usub16 helper_usub16_x86_64 -#define helper_usub8 helper_usub8_x86_64 -#define helper_usubaddx helper_usubaddx_x86_64 -#define helper_uxtb16 helper_uxtb16_x86_64 -#define helper_v7m_mrs helper_v7m_mrs_x86_64 -#define helper_v7m_msr helper_v7m_msr_x86_64 -#define helper_vfp_absd helper_vfp_absd_x86_64 -#define helper_vfp_abss helper_vfp_abss_x86_64 -#define helper_vfp_addd helper_vfp_addd_x86_64 -#define helper_vfp_adds helper_vfp_adds_x86_64 -#define helper_vfp_cmpd helper_vfp_cmpd_x86_64 -#define helper_vfp_cmped helper_vfp_cmped_x86_64 -#define helper_vfp_cmpes helper_vfp_cmpes_x86_64 -#define helper_vfp_cmps helper_vfp_cmps_x86_64 -#define helper_vfp_divd helper_vfp_divd_x86_64 -#define helper_vfp_divs helper_vfp_divs_x86_64 -#define helper_vfp_fcvtds helper_vfp_fcvtds_x86_64 -#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_x86_64 -#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_x86_64 -#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_x86_64 -#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_x86_64 -#define helper_vfp_fcvtsd helper_vfp_fcvtsd_x86_64 -#define helper_vfp_get_fpscr helper_vfp_get_fpscr_x86_64 -#define helper_vfp_maxd helper_vfp_maxd_x86_64 -#define helper_vfp_maxnumd helper_vfp_maxnumd_x86_64 -#define helper_vfp_maxnums helper_vfp_maxnums_x86_64 -#define helper_vfp_maxs helper_vfp_maxs_x86_64 -#define helper_vfp_mind helper_vfp_mind_x86_64 -#define helper_vfp_minnumd helper_vfp_minnumd_x86_64 -#define helper_vfp_minnums helper_vfp_minnums_x86_64 -#define helper_vfp_mins helper_vfp_mins_x86_64 -#define helper_vfp_muladdd helper_vfp_muladdd_x86_64 -#define helper_vfp_muladds helper_vfp_muladds_x86_64 -#define helper_vfp_muld helper_vfp_muld_x86_64 -#define helper_vfp_muls helper_vfp_muls_x86_64 -#define helper_vfp_negd helper_vfp_negd_x86_64 -#define helper_vfp_negs helper_vfp_negs_x86_64 -#define helper_vfp_set_fpscr helper_vfp_set_fpscr_x86_64 -#define helper_vfp_shtod helper_vfp_shtod_x86_64 -#define helper_vfp_shtos helper_vfp_shtos_x86_64 -#define helper_vfp_sitod helper_vfp_sitod_x86_64 -#define helper_vfp_sitos helper_vfp_sitos_x86_64 -#define helper_vfp_sltod helper_vfp_sltod_x86_64 -#define helper_vfp_sltos helper_vfp_sltos_x86_64 -#define helper_vfp_sqrtd helper_vfp_sqrtd_x86_64 -#define helper_vfp_sqrts helper_vfp_sqrts_x86_64 -#define helper_vfp_sqtod helper_vfp_sqtod_x86_64 -#define helper_vfp_sqtos helper_vfp_sqtos_x86_64 -#define helper_vfp_subd helper_vfp_subd_x86_64 -#define helper_vfp_subs helper_vfp_subs_x86_64 -#define helper_vfp_toshd helper_vfp_toshd_x86_64 -#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_x86_64 -#define helper_vfp_toshs helper_vfp_toshs_x86_64 -#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_x86_64 -#define helper_vfp_tosid helper_vfp_tosid_x86_64 -#define helper_vfp_tosis helper_vfp_tosis_x86_64 -#define helper_vfp_tosizd helper_vfp_tosizd_x86_64 -#define helper_vfp_tosizs helper_vfp_tosizs_x86_64 -#define helper_vfp_tosld helper_vfp_tosld_x86_64 -#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_x86_64 -#define helper_vfp_tosls helper_vfp_tosls_x86_64 -#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_x86_64 -#define helper_vfp_tosqd helper_vfp_tosqd_x86_64 -#define helper_vfp_tosqs helper_vfp_tosqs_x86_64 -#define helper_vfp_touhd helper_vfp_touhd_x86_64 -#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_x86_64 -#define helper_vfp_touhs helper_vfp_touhs_x86_64 -#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_x86_64 -#define helper_vfp_touid helper_vfp_touid_x86_64 -#define helper_vfp_touis helper_vfp_touis_x86_64 -#define helper_vfp_touizd helper_vfp_touizd_x86_64 -#define helper_vfp_touizs helper_vfp_touizs_x86_64 -#define helper_vfp_tould helper_vfp_tould_x86_64 -#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_x86_64 -#define helper_vfp_touls helper_vfp_touls_x86_64 -#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_x86_64 -#define helper_vfp_touqd helper_vfp_touqd_x86_64 -#define helper_vfp_touqs helper_vfp_touqs_x86_64 -#define helper_vfp_uhtod helper_vfp_uhtod_x86_64 -#define helper_vfp_uhtos helper_vfp_uhtos_x86_64 -#define helper_vfp_uitod helper_vfp_uitod_x86_64 -#define helper_vfp_uitos helper_vfp_uitos_x86_64 -#define helper_vfp_ultod helper_vfp_ultod_x86_64 -#define helper_vfp_ultos helper_vfp_ultos_x86_64 -#define helper_vfp_uqtod helper_vfp_uqtod_x86_64 -#define helper_vfp_uqtos helper_vfp_uqtos_x86_64 -#define helper_wfe helper_wfe_x86_64 -#define helper_wfi helper_wfi_x86_64 -#define hex2decimal hex2decimal_x86_64 -#define hw_breakpoint_update hw_breakpoint_update_x86_64 -#define hw_breakpoint_update_all hw_breakpoint_update_all_x86_64 -#define hw_watchpoint_update hw_watchpoint_update_x86_64 -#define hw_watchpoint_update_all hw_watchpoint_update_all_x86_64 -#define _init _init_x86_64 -#define init_cpreg_list init_cpreg_list_x86_64 -#define init_lists init_lists_x86_64 -#define input_type_enum input_type_enum_x86_64 -#define int128_2_64 int128_2_64_x86_64 -#define int128_add int128_add_x86_64 -#define int128_addto int128_addto_x86_64 -#define int128_and int128_and_x86_64 -#define int128_eq int128_eq_x86_64 -#define int128_ge int128_ge_x86_64 -#define int128_get64 int128_get64_x86_64 -#define int128_gt int128_gt_x86_64 -#define int128_le int128_le_x86_64 -#define int128_lt int128_lt_x86_64 -#define int128_make64 int128_make64_x86_64 -#define int128_max int128_max_x86_64 -#define int128_min int128_min_x86_64 -#define int128_ne int128_ne_x86_64 -#define int128_neg int128_neg_x86_64 -#define int128_nz int128_nz_x86_64 -#define int128_rshift int128_rshift_x86_64 -#define int128_sub int128_sub_x86_64 -#define int128_subfrom int128_subfrom_x86_64 -#define int128_zero int128_zero_x86_64 -#define int16_to_float32 int16_to_float32_x86_64 -#define int16_to_float64 int16_to_float64_x86_64 -#define int32_to_float128 int32_to_float128_x86_64 -#define int32_to_float32 int32_to_float32_x86_64 -#define int32_to_float64 int32_to_float64_x86_64 -#define int32_to_floatx80 int32_to_floatx80_x86_64 -#define int64_to_float128 int64_to_float128_x86_64 -#define int64_to_float32 int64_to_float32_x86_64 -#define int64_to_float64 int64_to_float64_x86_64 -#define int64_to_floatx80 int64_to_floatx80_x86_64 -#define invalidate_and_set_dirty invalidate_and_set_dirty_x86_64 -#define invalidate_page_bitmap invalidate_page_bitmap_x86_64 -#define io_mem_read io_mem_read_x86_64 -#define io_mem_write io_mem_write_x86_64 -#define io_readb io_readb_x86_64 -#define io_readl io_readl_x86_64 -#define io_readq io_readq_x86_64 -#define io_readw io_readw_x86_64 -#define iotlb_to_region iotlb_to_region_x86_64 -#define io_writeb io_writeb_x86_64 -#define io_writel io_writel_x86_64 -#define io_writeq io_writeq_x86_64 -#define io_writew io_writew_x86_64 -#define is_a64 is_a64_x86_64 -#define is_help_option is_help_option_x86_64 -#define isr_read isr_read_x86_64 -#define is_valid_option_list is_valid_option_list_x86_64 -#define iwmmxt_load_creg iwmmxt_load_creg_x86_64 -#define iwmmxt_load_reg iwmmxt_load_reg_x86_64 -#define iwmmxt_store_creg iwmmxt_store_creg_x86_64 -#define iwmmxt_store_reg iwmmxt_store_reg_x86_64 -#define __jit_debug_descriptor __jit_debug_descriptor_x86_64 -#define __jit_debug_register_code __jit_debug_register_code_x86_64 -#define kvm_to_cpreg_id kvm_to_cpreg_id_x86_64 -#define last_ram_offset last_ram_offset_x86_64 -#define ldl_be_p ldl_be_p_x86_64 -#define ldl_be_phys ldl_be_phys_x86_64 -#define ldl_he_p ldl_he_p_x86_64 -#define ldl_le_p ldl_le_p_x86_64 -#define ldl_le_phys ldl_le_phys_x86_64 -#define ldl_phys ldl_phys_x86_64 -#define ldl_phys_internal ldl_phys_internal_x86_64 -#define ldq_be_p ldq_be_p_x86_64 -#define ldq_be_phys ldq_be_phys_x86_64 -#define ldq_he_p ldq_he_p_x86_64 -#define ldq_le_p ldq_le_p_x86_64 -#define ldq_le_phys ldq_le_phys_x86_64 -#define ldq_phys ldq_phys_x86_64 -#define ldq_phys_internal ldq_phys_internal_x86_64 -#define ldst_name ldst_name_x86_64 -#define ldub_p ldub_p_x86_64 -#define ldub_phys ldub_phys_x86_64 -#define lduw_be_p lduw_be_p_x86_64 -#define lduw_be_phys lduw_be_phys_x86_64 -#define lduw_he_p lduw_he_p_x86_64 -#define lduw_le_p lduw_le_p_x86_64 -#define lduw_le_phys lduw_le_phys_x86_64 -#define lduw_phys lduw_phys_x86_64 -#define lduw_phys_internal lduw_phys_internal_x86_64 -#define le128 le128_x86_64 -#define linked_bp_matches linked_bp_matches_x86_64 -#define listener_add_address_space listener_add_address_space_x86_64 -#define load_cpu_offset load_cpu_offset_x86_64 -#define load_reg load_reg_x86_64 -#define load_reg_var load_reg_var_x86_64 -#define log_cpu_state log_cpu_state_x86_64 -#define lpae_cp_reginfo lpae_cp_reginfo_x86_64 -#define lt128 lt128_x86_64 -#define machine_class_init machine_class_init_x86_64 -#define machine_finalize machine_finalize_x86_64 -#define machine_info machine_info_x86_64 -#define machine_initfn machine_initfn_x86_64 -#define machine_register_types machine_register_types_x86_64 -#define machvirt_init machvirt_init_x86_64 -#define machvirt_machine_init machvirt_machine_init_x86_64 -#define maj maj_x86_64 -#define mapping_conflict mapping_conflict_x86_64 -#define mapping_contiguous mapping_contiguous_x86_64 -#define mapping_have_same_region mapping_have_same_region_x86_64 -#define mapping_merge mapping_merge_x86_64 -#define mem_add mem_add_x86_64 -#define mem_begin mem_begin_x86_64 -#define mem_commit mem_commit_x86_64 -#define memory_access_is_direct memory_access_is_direct_x86_64 -#define memory_access_size memory_access_size_x86_64 -#define memory_init memory_init_x86_64 -#define memory_listener_match memory_listener_match_x86_64 -#define memory_listener_register memory_listener_register_x86_64 -#define memory_listener_unregister memory_listener_unregister_x86_64 -#define memory_map_init memory_map_init_x86_64 -#define memory_mapping_filter memory_mapping_filter_x86_64 -#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_x86_64 -#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_x86_64 -#define memory_mapping_list_free memory_mapping_list_free_x86_64 -#define memory_mapping_list_init memory_mapping_list_init_x86_64 -#define memory_region_access_valid memory_region_access_valid_x86_64 -#define memory_region_add_subregion memory_region_add_subregion_x86_64 -#define memory_region_add_subregion_common memory_region_add_subregion_common_x86_64 -#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_x86_64 -#define memory_region_big_endian memory_region_big_endian_x86_64 -#define memory_region_clear_pending memory_region_clear_pending_x86_64 -#define memory_region_del_subregion memory_region_del_subregion_x86_64 -#define memory_region_destructor_alias memory_region_destructor_alias_x86_64 -#define memory_region_destructor_none memory_region_destructor_none_x86_64 -#define memory_region_destructor_ram memory_region_destructor_ram_x86_64 -#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_x86_64 -#define memory_region_dispatch_read memory_region_dispatch_read_x86_64 -#define memory_region_dispatch_read1 memory_region_dispatch_read1_x86_64 -#define memory_region_dispatch_write memory_region_dispatch_write_x86_64 -#define memory_region_escape_name memory_region_escape_name_x86_64 -#define memory_region_finalize memory_region_finalize_x86_64 -#define memory_region_find memory_region_find_x86_64 -#define memory_region_get_addr memory_region_get_addr_x86_64 -#define memory_region_get_alignment memory_region_get_alignment_x86_64 -#define memory_region_get_container memory_region_get_container_x86_64 -#define memory_region_get_fd memory_region_get_fd_x86_64 -#define memory_region_get_may_overlap memory_region_get_may_overlap_x86_64 -#define memory_region_get_priority memory_region_get_priority_x86_64 -#define memory_region_get_ram_addr memory_region_get_ram_addr_x86_64 -#define memory_region_get_ram_ptr memory_region_get_ram_ptr_x86_64 -#define memory_region_get_size memory_region_get_size_x86_64 -#define memory_region_info memory_region_info_x86_64 -#define memory_region_init memory_region_init_x86_64 -#define memory_region_init_alias memory_region_init_alias_x86_64 -#define memory_region_initfn memory_region_initfn_x86_64 -#define memory_region_init_io memory_region_init_io_x86_64 -#define memory_region_init_ram memory_region_init_ram_x86_64 -#define memory_region_init_ram_ptr memory_region_init_ram_ptr_x86_64 -#define memory_region_init_reservation memory_region_init_reservation_x86_64 -#define memory_region_is_iommu memory_region_is_iommu_x86_64 -#define memory_region_is_logging memory_region_is_logging_x86_64 -#define memory_region_is_mapped memory_region_is_mapped_x86_64 -#define memory_region_is_ram memory_region_is_ram_x86_64 -#define memory_region_is_rom memory_region_is_rom_x86_64 -#define memory_region_is_romd memory_region_is_romd_x86_64 -#define memory_region_is_skip_dump memory_region_is_skip_dump_x86_64 -#define memory_region_is_unassigned memory_region_is_unassigned_x86_64 -#define memory_region_name memory_region_name_x86_64 -#define memory_region_need_escape memory_region_need_escape_x86_64 -#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_x86_64 -#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_x86_64 -#define memory_region_present memory_region_present_x86_64 -#define memory_region_read_accessor memory_region_read_accessor_x86_64 -#define memory_region_readd_subregion memory_region_readd_subregion_x86_64 -#define memory_region_ref memory_region_ref_x86_64 -#define memory_region_resolve_container memory_region_resolve_container_x86_64 -#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_x86_64 -#define memory_region_section_get_iotlb memory_region_section_get_iotlb_x86_64 -#define memory_region_set_address memory_region_set_address_x86_64 -#define memory_region_set_alias_offset memory_region_set_alias_offset_x86_64 -#define memory_region_set_enabled memory_region_set_enabled_x86_64 -#define memory_region_set_readonly memory_region_set_readonly_x86_64 -#define memory_region_set_skip_dump memory_region_set_skip_dump_x86_64 -#define memory_region_size memory_region_size_x86_64 -#define memory_region_to_address_space memory_region_to_address_space_x86_64 -#define memory_region_transaction_begin memory_region_transaction_begin_x86_64 -#define memory_region_transaction_commit memory_region_transaction_commit_x86_64 -#define memory_region_unref memory_region_unref_x86_64 -#define memory_region_update_container_subregions memory_region_update_container_subregions_x86_64 -#define memory_region_write_accessor memory_region_write_accessor_x86_64 -#define memory_region_wrong_endianness memory_region_wrong_endianness_x86_64 -#define memory_try_enable_merging memory_try_enable_merging_x86_64 -#define module_call_init module_call_init_x86_64 -#define module_load module_load_x86_64 -#define mpidr_cp_reginfo mpidr_cp_reginfo_x86_64 -#define mpidr_read mpidr_read_x86_64 -#define msr_mask msr_mask_x86_64 -#define mul128By64To192 mul128By64To192_x86_64 -#define mul128To256 mul128To256_x86_64 -#define mul64To128 mul64To128_x86_64 -#define muldiv64 muldiv64_x86_64 -#define neon_2rm_is_float_op neon_2rm_is_float_op_x86_64 -#define neon_2rm_sizes neon_2rm_sizes_x86_64 -#define neon_3r_sizes neon_3r_sizes_x86_64 -#define neon_get_scalar neon_get_scalar_x86_64 -#define neon_load_reg neon_load_reg_x86_64 -#define neon_load_reg64 neon_load_reg64_x86_64 -#define neon_load_scratch neon_load_scratch_x86_64 -#define neon_ls_element_type neon_ls_element_type_x86_64 -#define neon_reg_offset neon_reg_offset_x86_64 -#define neon_store_reg neon_store_reg_x86_64 -#define neon_store_reg64 neon_store_reg64_x86_64 -#define neon_store_scratch neon_store_scratch_x86_64 -#define new_ldst_label new_ldst_label_x86_64 -#define next_list next_list_x86_64 -#define normalizeFloat128Subnormal normalizeFloat128Subnormal_x86_64 -#define normalizeFloat16Subnormal normalizeFloat16Subnormal_x86_64 -#define normalizeFloat32Subnormal normalizeFloat32Subnormal_x86_64 -#define normalizeFloat64Subnormal normalizeFloat64Subnormal_x86_64 -#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_x86_64 -#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_x86_64 -#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_x86_64 -#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_x86_64 -#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_x86_64 -#define not_v6_cp_reginfo not_v6_cp_reginfo_x86_64 -#define not_v7_cp_reginfo not_v7_cp_reginfo_x86_64 -#define not_v8_cp_reginfo not_v8_cp_reginfo_x86_64 -#define object_child_foreach object_child_foreach_x86_64 -#define object_class_foreach object_class_foreach_x86_64 -#define object_class_foreach_tramp object_class_foreach_tramp_x86_64 -#define object_class_get_list object_class_get_list_x86_64 -#define object_class_get_list_tramp object_class_get_list_tramp_x86_64 -#define object_class_get_parent object_class_get_parent_x86_64 -#define object_deinit object_deinit_x86_64 -#define object_dynamic_cast object_dynamic_cast_x86_64 -#define object_finalize object_finalize_x86_64 -#define object_finalize_child_property object_finalize_child_property_x86_64 -#define object_get_child_property object_get_child_property_x86_64 -#define object_get_link_property object_get_link_property_x86_64 -#define object_get_root object_get_root_x86_64 -#define object_initialize_with_type object_initialize_with_type_x86_64 -#define object_init_with_type object_init_with_type_x86_64 -#define object_instance_init object_instance_init_x86_64 -#define object_new_with_type object_new_with_type_x86_64 -#define object_post_init_with_type object_post_init_with_type_x86_64 -#define object_property_add_alias object_property_add_alias_x86_64 -#define object_property_add_link object_property_add_link_x86_64 -#define object_property_add_uint16_ptr object_property_add_uint16_ptr_x86_64 -#define object_property_add_uint32_ptr object_property_add_uint32_ptr_x86_64 -#define object_property_add_uint64_ptr object_property_add_uint64_ptr_x86_64 -#define object_property_add_uint8_ptr object_property_add_uint8_ptr_x86_64 -#define object_property_allow_set_link object_property_allow_set_link_x86_64 -#define object_property_del object_property_del_x86_64 -#define object_property_del_all object_property_del_all_x86_64 -#define object_property_find object_property_find_x86_64 -#define object_property_get object_property_get_x86_64 -#define object_property_get_bool object_property_get_bool_x86_64 -#define object_property_get_int object_property_get_int_x86_64 -#define object_property_get_link object_property_get_link_x86_64 -#define object_property_get_qobject object_property_get_qobject_x86_64 -#define object_property_get_str object_property_get_str_x86_64 -#define object_property_get_type object_property_get_type_x86_64 -#define object_property_is_child object_property_is_child_x86_64 -#define object_property_set object_property_set_x86_64 -#define object_property_set_description object_property_set_description_x86_64 -#define object_property_set_link object_property_set_link_x86_64 -#define object_property_set_qobject object_property_set_qobject_x86_64 -#define object_release_link_property object_release_link_property_x86_64 -#define object_resolve_abs_path object_resolve_abs_path_x86_64 -#define object_resolve_child_property object_resolve_child_property_x86_64 -#define object_resolve_link object_resolve_link_x86_64 -#define object_resolve_link_property object_resolve_link_property_x86_64 -#define object_resolve_partial_path object_resolve_partial_path_x86_64 -#define object_resolve_path object_resolve_path_x86_64 -#define object_resolve_path_component object_resolve_path_component_x86_64 -#define object_resolve_path_type object_resolve_path_type_x86_64 -#define object_set_link_property object_set_link_property_x86_64 -#define object_unparent object_unparent_x86_64 -#define omap_cachemaint_write omap_cachemaint_write_x86_64 -#define omap_cp_reginfo omap_cp_reginfo_x86_64 -#define omap_threadid_write omap_threadid_write_x86_64 -#define omap_ticonfig_write omap_ticonfig_write_x86_64 -#define omap_wfi_write omap_wfi_write_x86_64 -#define op_bits op_bits_x86_64 -#define open_modeflags open_modeflags_x86_64 -#define op_to_mov op_to_mov_x86_64 -#define op_to_movi op_to_movi_x86_64 -#define output_type_enum output_type_enum_x86_64 -#define packFloat128 packFloat128_x86_64 -#define packFloat16 packFloat16_x86_64 -#define packFloat32 packFloat32_x86_64 -#define packFloat64 packFloat64_x86_64 -#define packFloatx80 packFloatx80_x86_64 -#define page_find page_find_x86_64 -#define page_find_alloc page_find_alloc_x86_64 -#define page_flush_tb page_flush_tb_x86_64 -#define page_flush_tb_1 page_flush_tb_1_x86_64 -#define page_init page_init_x86_64 -#define page_size_init page_size_init_x86_64 -#define par par_x86_64 -#define parse_array parse_array_x86_64 -#define parse_error parse_error_x86_64 -#define parse_escape parse_escape_x86_64 -#define parse_keyword parse_keyword_x86_64 -#define parse_literal parse_literal_x86_64 -#define parse_object parse_object_x86_64 -#define parse_optional parse_optional_x86_64 -#define parse_option_bool parse_option_bool_x86_64 -#define parse_option_number parse_option_number_x86_64 -#define parse_option_size parse_option_size_x86_64 -#define parse_pair parse_pair_x86_64 -#define parser_context_free parser_context_free_x86_64 -#define parser_context_new parser_context_new_x86_64 -#define parser_context_peek_token parser_context_peek_token_x86_64 -#define parser_context_pop_token parser_context_pop_token_x86_64 -#define parser_context_restore parser_context_restore_x86_64 -#define parser_context_save parser_context_save_x86_64 -#define parse_str parse_str_x86_64 -#define parse_type_bool parse_type_bool_x86_64 -#define parse_type_int parse_type_int_x86_64 -#define parse_type_number parse_type_number_x86_64 -#define parse_type_size parse_type_size_x86_64 -#define parse_type_str parse_type_str_x86_64 -#define parse_value parse_value_x86_64 -#define par_write par_write_x86_64 -#define patch_reloc patch_reloc_x86_64 -#define phys_map_node_alloc phys_map_node_alloc_x86_64 -#define phys_map_node_reserve phys_map_node_reserve_x86_64 -#define phys_mem_alloc phys_mem_alloc_x86_64 -#define phys_mem_set_alloc phys_mem_set_alloc_x86_64 -#define phys_page_compact phys_page_compact_x86_64 -#define phys_page_compact_all phys_page_compact_all_x86_64 -#define phys_page_find phys_page_find_x86_64 -#define phys_page_set phys_page_set_x86_64 -#define phys_page_set_level phys_page_set_level_x86_64 -#define phys_section_add phys_section_add_x86_64 -#define phys_section_destroy phys_section_destroy_x86_64 -#define phys_sections_free phys_sections_free_x86_64 -#define pickNaN pickNaN_x86_64 -#define pickNaNMulAdd pickNaNMulAdd_x86_64 -#define pmccfiltr_write pmccfiltr_write_x86_64 -#define pmccntr_read pmccntr_read_x86_64 -#define pmccntr_sync pmccntr_sync_x86_64 -#define pmccntr_write pmccntr_write_x86_64 -#define pmccntr_write32 pmccntr_write32_x86_64 -#define pmcntenclr_write pmcntenclr_write_x86_64 -#define pmcntenset_write pmcntenset_write_x86_64 -#define pmcr_write pmcr_write_x86_64 -#define pmintenclr_write pmintenclr_write_x86_64 -#define pmintenset_write pmintenset_write_x86_64 -#define pmovsr_write pmovsr_write_x86_64 -#define pmreg_access pmreg_access_x86_64 -#define pmsav5_cp_reginfo pmsav5_cp_reginfo_x86_64 -#define pmsav5_data_ap_read pmsav5_data_ap_read_x86_64 -#define pmsav5_data_ap_write pmsav5_data_ap_write_x86_64 -#define pmsav5_insn_ap_read pmsav5_insn_ap_read_x86_64 -#define pmsav5_insn_ap_write pmsav5_insn_ap_write_x86_64 -#define pmuserenr_write pmuserenr_write_x86_64 -#define pmxevtyper_write pmxevtyper_write_x86_64 -#define print_type_bool print_type_bool_x86_64 -#define print_type_int print_type_int_x86_64 -#define print_type_number print_type_number_x86_64 -#define print_type_size print_type_size_x86_64 -#define print_type_str print_type_str_x86_64 -#define propagateFloat128NaN propagateFloat128NaN_x86_64 -#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_x86_64 -#define propagateFloat32NaN propagateFloat32NaN_x86_64 -#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_x86_64 -#define propagateFloat64NaN propagateFloat64NaN_x86_64 -#define propagateFloatx80NaN propagateFloatx80NaN_x86_64 -#define property_get_alias property_get_alias_x86_64 -#define property_get_bool property_get_bool_x86_64 -#define property_get_str property_get_str_x86_64 -#define property_get_uint16_ptr property_get_uint16_ptr_x86_64 -#define property_get_uint32_ptr property_get_uint32_ptr_x86_64 -#define property_get_uint64_ptr property_get_uint64_ptr_x86_64 -#define property_get_uint8_ptr property_get_uint8_ptr_x86_64 -#define property_release_alias property_release_alias_x86_64 -#define property_release_bool property_release_bool_x86_64 -#define property_release_str property_release_str_x86_64 -#define property_resolve_alias property_resolve_alias_x86_64 -#define property_set_alias property_set_alias_x86_64 -#define property_set_bool property_set_bool_x86_64 -#define property_set_str property_set_str_x86_64 -#define pstate_read pstate_read_x86_64 -#define pstate_write pstate_write_x86_64 -#define pxa250_initfn pxa250_initfn_x86_64 -#define pxa255_initfn pxa255_initfn_x86_64 -#define pxa260_initfn pxa260_initfn_x86_64 -#define pxa261_initfn pxa261_initfn_x86_64 -#define pxa262_initfn pxa262_initfn_x86_64 -#define pxa270a0_initfn pxa270a0_initfn_x86_64 -#define pxa270a1_initfn pxa270a1_initfn_x86_64 -#define pxa270b0_initfn pxa270b0_initfn_x86_64 -#define pxa270b1_initfn pxa270b1_initfn_x86_64 -#define pxa270c0_initfn pxa270c0_initfn_x86_64 -#define pxa270c5_initfn pxa270c5_initfn_x86_64 -#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_x86_64 -#define qapi_dealloc_end_list qapi_dealloc_end_list_x86_64 -#define qapi_dealloc_end_struct qapi_dealloc_end_struct_x86_64 -#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_x86_64 -#define qapi_dealloc_next_list qapi_dealloc_next_list_x86_64 -#define qapi_dealloc_pop qapi_dealloc_pop_x86_64 -#define qapi_dealloc_push qapi_dealloc_push_x86_64 -#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_x86_64 -#define qapi_dealloc_start_list qapi_dealloc_start_list_x86_64 -#define qapi_dealloc_start_struct qapi_dealloc_start_struct_x86_64 -#define qapi_dealloc_start_union qapi_dealloc_start_union_x86_64 -#define qapi_dealloc_type_bool qapi_dealloc_type_bool_x86_64 -#define qapi_dealloc_type_enum qapi_dealloc_type_enum_x86_64 -#define qapi_dealloc_type_int qapi_dealloc_type_int_x86_64 -#define qapi_dealloc_type_number qapi_dealloc_type_number_x86_64 -#define qapi_dealloc_type_size qapi_dealloc_type_size_x86_64 -#define qapi_dealloc_type_str qapi_dealloc_type_str_x86_64 -#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_x86_64 -#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_x86_64 -#define qapi_free_boolList qapi_free_boolList_x86_64 -#define qapi_free_ErrorClassList qapi_free_ErrorClassList_x86_64 -#define qapi_free_int16List qapi_free_int16List_x86_64 -#define qapi_free_int32List qapi_free_int32List_x86_64 -#define qapi_free_int64List qapi_free_int64List_x86_64 -#define qapi_free_int8List qapi_free_int8List_x86_64 -#define qapi_free_intList qapi_free_intList_x86_64 -#define qapi_free_numberList qapi_free_numberList_x86_64 -#define qapi_free_strList qapi_free_strList_x86_64 -#define qapi_free_uint16List qapi_free_uint16List_x86_64 -#define qapi_free_uint32List qapi_free_uint32List_x86_64 -#define qapi_free_uint64List qapi_free_uint64List_x86_64 -#define qapi_free_uint8List qapi_free_uint8List_x86_64 -#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_x86_64 -#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_x86_64 -#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_x86_64 -#define qbool_destroy_obj qbool_destroy_obj_x86_64 -#define qbool_from_int qbool_from_int_x86_64 -#define qbool_get_int qbool_get_int_x86_64 -#define qbool_type qbool_type_x86_64 -#define qbus_create qbus_create_x86_64 -#define qbus_create_inplace qbus_create_inplace_x86_64 -#define qbus_finalize qbus_finalize_x86_64 -#define qbus_initfn qbus_initfn_x86_64 -#define qbus_realize qbus_realize_x86_64 -#define qdev_create qdev_create_x86_64 -#define qdev_get_type qdev_get_type_x86_64 -#define qdev_register_types qdev_register_types_x86_64 -#define qdev_set_parent_bus qdev_set_parent_bus_x86_64 -#define qdev_try_create qdev_try_create_x86_64 -#define qdict_add_key qdict_add_key_x86_64 -#define qdict_array_split qdict_array_split_x86_64 -#define qdict_clone_shallow qdict_clone_shallow_x86_64 -#define qdict_del qdict_del_x86_64 -#define qdict_destroy_obj qdict_destroy_obj_x86_64 -#define qdict_entry_key qdict_entry_key_x86_64 -#define qdict_entry_value qdict_entry_value_x86_64 -#define qdict_extract_subqdict qdict_extract_subqdict_x86_64 -#define qdict_find qdict_find_x86_64 -#define qdict_first qdict_first_x86_64 -#define qdict_flatten qdict_flatten_x86_64 -#define qdict_flatten_qdict qdict_flatten_qdict_x86_64 -#define qdict_flatten_qlist qdict_flatten_qlist_x86_64 -#define qdict_get qdict_get_x86_64 -#define qdict_get_bool qdict_get_bool_x86_64 -#define qdict_get_double qdict_get_double_x86_64 -#define qdict_get_int qdict_get_int_x86_64 -#define qdict_get_obj qdict_get_obj_x86_64 -#define qdict_get_qdict qdict_get_qdict_x86_64 -#define qdict_get_qlist qdict_get_qlist_x86_64 -#define qdict_get_str qdict_get_str_x86_64 -#define qdict_get_try_bool qdict_get_try_bool_x86_64 -#define qdict_get_try_int qdict_get_try_int_x86_64 -#define qdict_get_try_str qdict_get_try_str_x86_64 -#define qdict_haskey qdict_haskey_x86_64 -#define qdict_has_prefixed_entries qdict_has_prefixed_entries_x86_64 -#define qdict_iter qdict_iter_x86_64 -#define qdict_join qdict_join_x86_64 -#define qdict_new qdict_new_x86_64 -#define qdict_next qdict_next_x86_64 -#define qdict_next_entry qdict_next_entry_x86_64 -#define qdict_put_obj qdict_put_obj_x86_64 -#define qdict_size qdict_size_x86_64 -#define qdict_type qdict_type_x86_64 -#define qemu_clock_get_us qemu_clock_get_us_x86_64 -#define qemu_clock_ptr qemu_clock_ptr_x86_64 -#define qemu_clocks qemu_clocks_x86_64 -#define qemu_get_cpu qemu_get_cpu_x86_64 -#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_x86_64 -#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_x86_64 -#define qemu_get_ram_block qemu_get_ram_block_x86_64 -#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_x86_64 -#define qemu_get_ram_fd qemu_get_ram_fd_x86_64 -#define qemu_get_ram_ptr qemu_get_ram_ptr_x86_64 -#define qemu_host_page_mask qemu_host_page_mask_x86_64 -#define qemu_host_page_size qemu_host_page_size_x86_64 -#define qemu_init_vcpu qemu_init_vcpu_x86_64 -#define qemu_ld_helpers qemu_ld_helpers_x86_64 -#define qemu_log_close qemu_log_close_x86_64 -#define qemu_log_enabled qemu_log_enabled_x86_64 -#define qemu_log_flush qemu_log_flush_x86_64 -#define qemu_loglevel_mask qemu_loglevel_mask_x86_64 -#define qemu_log_vprintf qemu_log_vprintf_x86_64 -#define qemu_oom_check qemu_oom_check_x86_64 -#define qemu_parse_fd qemu_parse_fd_x86_64 -#define qemu_ram_addr_from_host qemu_ram_addr_from_host_x86_64 -#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_x86_64 -#define qemu_ram_alloc qemu_ram_alloc_x86_64 -#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_x86_64 -#define qemu_ram_foreach_block qemu_ram_foreach_block_x86_64 -#define qemu_ram_free qemu_ram_free_x86_64 -#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_x86_64 -#define qemu_ram_ptr_length qemu_ram_ptr_length_x86_64 -#define qemu_ram_remap qemu_ram_remap_x86_64 -#define qemu_ram_setup_dump qemu_ram_setup_dump_x86_64 -#define qemu_ram_unset_idstr qemu_ram_unset_idstr_x86_64 -#define qemu_real_host_page_size qemu_real_host_page_size_x86_64 -#define qemu_st_helpers qemu_st_helpers_x86_64 -#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_x86_64 -#define qemu_try_memalign qemu_try_memalign_x86_64 -#define qentry_destroy qentry_destroy_x86_64 -#define qerror_human qerror_human_x86_64 -#define qerror_report qerror_report_x86_64 -#define qerror_report_err qerror_report_err_x86_64 -#define qfloat_destroy_obj qfloat_destroy_obj_x86_64 -#define qfloat_from_double qfloat_from_double_x86_64 -#define qfloat_get_double qfloat_get_double_x86_64 -#define qfloat_type qfloat_type_x86_64 -#define qint_destroy_obj qint_destroy_obj_x86_64 -#define qint_from_int qint_from_int_x86_64 -#define qint_get_int qint_get_int_x86_64 -#define qint_type qint_type_x86_64 -#define qlist_append_obj qlist_append_obj_x86_64 -#define qlist_copy qlist_copy_x86_64 -#define qlist_copy_elem qlist_copy_elem_x86_64 -#define qlist_destroy_obj qlist_destroy_obj_x86_64 -#define qlist_empty qlist_empty_x86_64 -#define qlist_entry_obj qlist_entry_obj_x86_64 -#define qlist_first qlist_first_x86_64 -#define qlist_iter qlist_iter_x86_64 -#define qlist_new qlist_new_x86_64 -#define qlist_next qlist_next_x86_64 -#define qlist_peek qlist_peek_x86_64 -#define qlist_pop qlist_pop_x86_64 -#define qlist_size qlist_size_x86_64 -#define qlist_size_iter qlist_size_iter_x86_64 -#define qlist_type qlist_type_x86_64 -#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_x86_64 -#define qmp_input_end_list qmp_input_end_list_x86_64 -#define qmp_input_end_struct qmp_input_end_struct_x86_64 -#define qmp_input_get_next_type qmp_input_get_next_type_x86_64 -#define qmp_input_get_object qmp_input_get_object_x86_64 -#define qmp_input_get_visitor qmp_input_get_visitor_x86_64 -#define qmp_input_next_list qmp_input_next_list_x86_64 -#define qmp_input_optional qmp_input_optional_x86_64 -#define qmp_input_pop qmp_input_pop_x86_64 -#define qmp_input_push qmp_input_push_x86_64 -#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_x86_64 -#define qmp_input_start_list qmp_input_start_list_x86_64 -#define qmp_input_start_struct qmp_input_start_struct_x86_64 -#define qmp_input_type_bool qmp_input_type_bool_x86_64 -#define qmp_input_type_int qmp_input_type_int_x86_64 -#define qmp_input_type_number qmp_input_type_number_x86_64 -#define qmp_input_type_str qmp_input_type_str_x86_64 -#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_x86_64 -#define qmp_input_visitor_new qmp_input_visitor_new_x86_64 -#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_x86_64 -#define qmp_output_add_obj qmp_output_add_obj_x86_64 -#define qmp_output_end_list qmp_output_end_list_x86_64 -#define qmp_output_end_struct qmp_output_end_struct_x86_64 -#define qmp_output_first qmp_output_first_x86_64 -#define qmp_output_get_qobject qmp_output_get_qobject_x86_64 -#define qmp_output_get_visitor qmp_output_get_visitor_x86_64 -#define qmp_output_last qmp_output_last_x86_64 -#define qmp_output_next_list qmp_output_next_list_x86_64 -#define qmp_output_pop qmp_output_pop_x86_64 -#define qmp_output_push_obj qmp_output_push_obj_x86_64 -#define qmp_output_start_list qmp_output_start_list_x86_64 -#define qmp_output_start_struct qmp_output_start_struct_x86_64 -#define qmp_output_type_bool qmp_output_type_bool_x86_64 -#define qmp_output_type_int qmp_output_type_int_x86_64 -#define qmp_output_type_number qmp_output_type_number_x86_64 -#define qmp_output_type_str qmp_output_type_str_x86_64 -#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_x86_64 -#define qmp_output_visitor_new qmp_output_visitor_new_x86_64 -#define qobject_decref qobject_decref_x86_64 -#define qobject_to_qbool qobject_to_qbool_x86_64 -#define qobject_to_qdict qobject_to_qdict_x86_64 -#define qobject_to_qfloat qobject_to_qfloat_x86_64 -#define qobject_to_qint qobject_to_qint_x86_64 -#define qobject_to_qlist qobject_to_qlist_x86_64 -#define qobject_to_qstring qobject_to_qstring_x86_64 -#define qobject_type qobject_type_x86_64 -#define qstring_append qstring_append_x86_64 -#define qstring_append_chr qstring_append_chr_x86_64 -#define qstring_append_int qstring_append_int_x86_64 -#define qstring_destroy_obj qstring_destroy_obj_x86_64 -#define qstring_from_escaped_str qstring_from_escaped_str_x86_64 -#define qstring_from_str qstring_from_str_x86_64 -#define qstring_from_substr qstring_from_substr_x86_64 -#define qstring_get_length qstring_get_length_x86_64 -#define qstring_get_str qstring_get_str_x86_64 -#define qstring_new qstring_new_x86_64 -#define qstring_type qstring_type_x86_64 -#define ram_block_add ram_block_add_x86_64 -#define ram_size ram_size_x86_64 -#define range_compare range_compare_x86_64 -#define range_covers_byte range_covers_byte_x86_64 -#define range_get_last range_get_last_x86_64 -#define range_merge range_merge_x86_64 -#define ranges_can_merge ranges_can_merge_x86_64 -#define raw_read raw_read_x86_64 -#define raw_write raw_write_x86_64 -#define rcon rcon_x86_64 -#define read_raw_cp_reg read_raw_cp_reg_x86_64 -#define recip_estimate recip_estimate_x86_64 -#define recip_sqrt_estimate recip_sqrt_estimate_x86_64 -#define register_cp_regs_for_features register_cp_regs_for_features_x86_64 -#define register_multipage register_multipage_x86_64 -#define register_subpage register_subpage_x86_64 -#define register_tm_clones register_tm_clones_x86_64 -#define register_types_object register_types_object_x86_64 -#define regnames regnames_x86_64 -#define render_memory_region render_memory_region_x86_64 -#define reset_all_temps reset_all_temps_x86_64 -#define reset_temp reset_temp_x86_64 -#define rol32 rol32_x86_64 -#define rol64 rol64_x86_64 -#define ror32 ror32_x86_64 -#define ror64 ror64_x86_64 -#define roundAndPackFloat128 roundAndPackFloat128_x86_64 -#define roundAndPackFloat16 roundAndPackFloat16_x86_64 -#define roundAndPackFloat32 roundAndPackFloat32_x86_64 -#define roundAndPackFloat64 roundAndPackFloat64_x86_64 -#define roundAndPackFloatx80 roundAndPackFloatx80_x86_64 -#define roundAndPackInt32 roundAndPackInt32_x86_64 -#define roundAndPackInt64 roundAndPackInt64_x86_64 -#define roundAndPackUint64 roundAndPackUint64_x86_64 -#define round_to_inf round_to_inf_x86_64 -#define run_on_cpu run_on_cpu_x86_64 -#define s0 s0_x86_64 -#define S0 S0_x86_64 -#define s1 s1_x86_64 -#define S1 S1_x86_64 -#define sa1100_initfn sa1100_initfn_x86_64 -#define sa1110_initfn sa1110_initfn_x86_64 -#define save_globals save_globals_x86_64 -#define scr_write scr_write_x86_64 -#define sctlr_write sctlr_write_x86_64 -#define set_bit set_bit_x86_64 -#define set_bits set_bits_x86_64 -#define set_default_nan_mode set_default_nan_mode_x86_64 -#define set_feature set_feature_x86_64 -#define set_float_detect_tininess set_float_detect_tininess_x86_64 -#define set_float_exception_flags set_float_exception_flags_x86_64 -#define set_float_rounding_mode set_float_rounding_mode_x86_64 -#define set_flush_inputs_to_zero set_flush_inputs_to_zero_x86_64 -#define set_flush_to_zero set_flush_to_zero_x86_64 -#define set_swi_errno set_swi_errno_x86_64 -#define sextract32 sextract32_x86_64 -#define sextract64 sextract64_x86_64 -#define shift128ExtraRightJamming shift128ExtraRightJamming_x86_64 -#define shift128Right shift128Right_x86_64 -#define shift128RightJamming shift128RightJamming_x86_64 -#define shift32RightJamming shift32RightJamming_x86_64 -#define shift64ExtraRightJamming shift64ExtraRightJamming_x86_64 -#define shift64RightJamming shift64RightJamming_x86_64 -#define shifter_out_im shifter_out_im_x86_64 -#define shortShift128Left shortShift128Left_x86_64 -#define shortShift192Left shortShift192Left_x86_64 -#define simple_mpu_ap_bits simple_mpu_ap_bits_x86_64 -#define size_code_gen_buffer size_code_gen_buffer_x86_64 -#define softmmu_lock_user softmmu_lock_user_x86_64 -#define softmmu_lock_user_string softmmu_lock_user_string_x86_64 -#define softmmu_tget32 softmmu_tget32_x86_64 -#define softmmu_tget8 softmmu_tget8_x86_64 -#define softmmu_tput32 softmmu_tput32_x86_64 -#define softmmu_unlock_user softmmu_unlock_user_x86_64 -#define sort_constraints sort_constraints_x86_64 -#define sp_el0_access sp_el0_access_x86_64 -#define spsel_read spsel_read_x86_64 -#define spsel_write spsel_write_x86_64 -#define start_list start_list_x86_64 -#define stb_p stb_p_x86_64 -#define stb_phys stb_phys_x86_64 -#define stl_be_p stl_be_p_x86_64 -#define stl_be_phys stl_be_phys_x86_64 -#define stl_he_p stl_he_p_x86_64 -#define stl_le_p stl_le_p_x86_64 -#define stl_le_phys stl_le_phys_x86_64 -#define stl_phys stl_phys_x86_64 -#define stl_phys_internal stl_phys_internal_x86_64 -#define stl_phys_notdirty stl_phys_notdirty_x86_64 -#define store_cpu_offset store_cpu_offset_x86_64 -#define store_reg store_reg_x86_64 -#define store_reg_bx store_reg_bx_x86_64 -#define store_reg_from_load store_reg_from_load_x86_64 -#define stq_be_p stq_be_p_x86_64 -#define stq_be_phys stq_be_phys_x86_64 -#define stq_he_p stq_he_p_x86_64 -#define stq_le_p stq_le_p_x86_64 -#define stq_le_phys stq_le_phys_x86_64 -#define stq_phys stq_phys_x86_64 -#define string_input_get_visitor string_input_get_visitor_x86_64 -#define string_input_visitor_cleanup string_input_visitor_cleanup_x86_64 -#define string_input_visitor_new string_input_visitor_new_x86_64 -#define strongarm_cp_reginfo strongarm_cp_reginfo_x86_64 -#define strstart strstart_x86_64 -#define strtosz strtosz_x86_64 -#define strtosz_suffix strtosz_suffix_x86_64 -#define stw_be_p stw_be_p_x86_64 -#define stw_be_phys stw_be_phys_x86_64 -#define stw_he_p stw_he_p_x86_64 -#define stw_le_p stw_le_p_x86_64 -#define stw_le_phys stw_le_phys_x86_64 -#define stw_phys stw_phys_x86_64 -#define stw_phys_internal stw_phys_internal_x86_64 -#define sub128 sub128_x86_64 -#define sub16_sat sub16_sat_x86_64 -#define sub16_usat sub16_usat_x86_64 -#define sub192 sub192_x86_64 -#define sub8_sat sub8_sat_x86_64 -#define sub8_usat sub8_usat_x86_64 -#define subFloat128Sigs subFloat128Sigs_x86_64 -#define subFloat32Sigs subFloat32Sigs_x86_64 -#define subFloat64Sigs subFloat64Sigs_x86_64 -#define subFloatx80Sigs subFloatx80Sigs_x86_64 -#define subpage_accepts subpage_accepts_x86_64 -#define subpage_init subpage_init_x86_64 -#define subpage_ops subpage_ops_x86_64 -#define subpage_read subpage_read_x86_64 -#define subpage_register subpage_register_x86_64 -#define subpage_write subpage_write_x86_64 -#define suffix_mul suffix_mul_x86_64 -#define swap_commutative swap_commutative_x86_64 -#define swap_commutative2 swap_commutative2_x86_64 -#define switch_mode switch_mode_x86_64 -#define switch_v7m_sp switch_v7m_sp_x86_64 -#define syn_aa32_bkpt syn_aa32_bkpt_x86_64 -#define syn_aa32_hvc syn_aa32_hvc_x86_64 -#define syn_aa32_smc syn_aa32_smc_x86_64 -#define syn_aa32_svc syn_aa32_svc_x86_64 -#define syn_breakpoint syn_breakpoint_x86_64 -#define sync_globals sync_globals_x86_64 -#define syn_cp14_rrt_trap syn_cp14_rrt_trap_x86_64 -#define syn_cp14_rt_trap syn_cp14_rt_trap_x86_64 -#define syn_cp15_rrt_trap syn_cp15_rrt_trap_x86_64 -#define syn_cp15_rt_trap syn_cp15_rt_trap_x86_64 -#define syn_data_abort syn_data_abort_x86_64 -#define syn_fp_access_trap syn_fp_access_trap_x86_64 -#define syn_insn_abort syn_insn_abort_x86_64 -#define syn_swstep syn_swstep_x86_64 -#define syn_uncategorized syn_uncategorized_x86_64 -#define syn_watchpoint syn_watchpoint_x86_64 -#define syscall_err syscall_err_x86_64 -#define system_bus_class_init system_bus_class_init_x86_64 -#define system_bus_info system_bus_info_x86_64 -#define t2ee_cp_reginfo t2ee_cp_reginfo_x86_64 -#define table_logic_cc table_logic_cc_x86_64 -#define target_parse_constraint target_parse_constraint_x86_64 -#define target_words_bigendian target_words_bigendian_x86_64 -#define tb_add_jump tb_add_jump_x86_64 -#define tb_alloc tb_alloc_x86_64 -#define tb_alloc_page tb_alloc_page_x86_64 -#define tb_check_watchpoint tb_check_watchpoint_x86_64 -#define tb_find_fast tb_find_fast_x86_64 -#define tb_find_pc tb_find_pc_x86_64 -#define tb_find_slow tb_find_slow_x86_64 -#define tb_flush tb_flush_x86_64 -#define tb_flush_jmp_cache tb_flush_jmp_cache_x86_64 -#define tb_free tb_free_x86_64 -#define tb_gen_code tb_gen_code_x86_64 -#define tb_hash_remove tb_hash_remove_x86_64 -#define tb_invalidate_phys_addr tb_invalidate_phys_addr_x86_64 -#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_x86_64 -#define tb_invalidate_phys_range tb_invalidate_phys_range_x86_64 -#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_x86_64 -#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_x86_64 -#define tb_jmp_remove tb_jmp_remove_x86_64 -#define tb_link_page tb_link_page_x86_64 -#define tb_page_remove tb_page_remove_x86_64 -#define tb_phys_hash_func tb_phys_hash_func_x86_64 -#define tb_phys_invalidate tb_phys_invalidate_x86_64 -#define tb_reset_jump tb_reset_jump_x86_64 -#define tb_set_jmp_target tb_set_jmp_target_x86_64 -#define tcg_accel_class_init tcg_accel_class_init_x86_64 -#define tcg_accel_type tcg_accel_type_x86_64 -#define tcg_add_param_i32 tcg_add_param_i32_x86_64 -#define tcg_add_param_i64 tcg_add_param_i64_x86_64 -#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_x86_64 -#define tcg_allowed tcg_allowed_x86_64 -#define tcg_canonicalize_memop tcg_canonicalize_memop_x86_64 -#define tcg_commit tcg_commit_x86_64 -#define tcg_cond_to_jcc tcg_cond_to_jcc_x86_64 -#define tcg_constant_folding tcg_constant_folding_x86_64 +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_x86_64 +#define tcg_expand_vec_op tcg_expand_vec_op_x86_64 +#define tcg_register_jit tcg_register_jit_x86_64 +#define tcg_tb_insert tcg_tb_insert_x86_64 +#define tcg_tb_remove tcg_tb_remove_x86_64 +#define tcg_tb_lookup tcg_tb_lookup_x86_64 +#define tcg_tb_foreach tcg_tb_foreach_x86_64 +#define tcg_nb_tbs tcg_nb_tbs_x86_64 +#define tcg_region_reset_all tcg_region_reset_all_x86_64 +#define tcg_region_init tcg_region_init_x86_64 +#define tcg_code_size tcg_code_size_x86_64 +#define tcg_code_capacity tcg_code_capacity_x86_64 +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_x86_64 +#define tcg_malloc_internal tcg_malloc_internal_x86_64 +#define tcg_pool_reset tcg_pool_reset_x86_64 +#define tcg_context_init tcg_context_init_x86_64 +#define tcg_tb_alloc tcg_tb_alloc_x86_64 +#define tcg_prologue_init tcg_prologue_init_x86_64 +#define tcg_func_start tcg_func_start_x86_64 +#define tcg_set_frame tcg_set_frame_x86_64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_x86_64 +#define tcg_temp_new_internal tcg_temp_new_internal_x86_64 +#define tcg_temp_new_vec tcg_temp_new_vec_x86_64 +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_x86_64 +#define tcg_temp_free_internal tcg_temp_free_internal_x86_64 #define tcg_const_i32 tcg_const_i32_x86_64 #define tcg_const_i64 tcg_const_i64_x86_64 #define tcg_const_local_i32 tcg_const_local_i32_x86_64 #define tcg_const_local_i64 tcg_const_local_i64_x86_64 -#define tcg_context_init tcg_context_init_x86_64 -#define tcg_cpu_address_space_init tcg_cpu_address_space_init_x86_64 -#define tcg_cpu_exec tcg_cpu_exec_x86_64 -#define tcg_current_code_size tcg_current_code_size_x86_64 -#define tcg_dump_info tcg_dump_info_x86_64 -#define tcg_dump_ops tcg_dump_ops_x86_64 -#define tcg_exec_all tcg_exec_all_x86_64 -#define tcg_find_helper tcg_find_helper_x86_64 -#define tcg_func_start tcg_func_start_x86_64 -#define tcg_gen_abs_i32 tcg_gen_abs_i32_x86_64 -#define tcg_gen_add2_i32 tcg_gen_add2_i32_x86_64 -#define tcg_gen_add_i32 tcg_gen_add_i32_x86_64 -#define tcg_gen_add_i64 tcg_gen_add_i64_x86_64 -#define tcg_gen_addi_i32 tcg_gen_addi_i32_x86_64 -#define tcg_gen_addi_i64 tcg_gen_addi_i64_x86_64 -#define tcg_gen_andc_i32 tcg_gen_andc_i32_x86_64 -#define tcg_gen_and_i32 tcg_gen_and_i32_x86_64 -#define tcg_gen_and_i64 tcg_gen_and_i64_x86_64 -#define tcg_gen_andi_i32 tcg_gen_andi_i32_x86_64 -#define tcg_gen_andi_i64 tcg_gen_andi_i64_x86_64 -#define tcg_gen_br tcg_gen_br_x86_64 -#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_x86_64 -#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_x86_64 -#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_x86_64 -#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_x86_64 -#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_x86_64 +#define tcg_op_supported tcg_op_supported_x86_64 #define tcg_gen_callN tcg_gen_callN_x86_64 +#define tcg_op_remove tcg_op_remove_x86_64 +#define tcg_emit_op tcg_emit_op_x86_64 +#define tcg_op_insert_before tcg_op_insert_before_x86_64 +#define tcg_op_insert_after tcg_op_insert_after_x86_64 +#define tcg_cpu_exec_time tcg_cpu_exec_time_x86_64 #define tcg_gen_code tcg_gen_code_x86_64 -#define tcg_gen_code_common tcg_gen_code_common_x86_64 -#define tcg_gen_code_search_pc tcg_gen_code_search_pc_x86_64 -#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_x86_64 -#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_x86_64 -#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_x86_64 -#define tcg_gen_exit_tb tcg_gen_exit_tb_x86_64 -#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_x86_64 -#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_x86_64 -#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_x86_64 -#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_x86_64 -#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_x86_64 -#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_x86_64 -#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_x86_64 -#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_x86_64 -#define tcg_gen_goto_tb tcg_gen_goto_tb_x86_64 -#define tcg_gen_ld_i32 tcg_gen_ld_i32_x86_64 -#define tcg_gen_ld_i64 tcg_gen_ld_i64_x86_64 -#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_x86_64 -#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_x86_64 -#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_x86_64 -#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_x86_64 -#define tcg_gen_mov_i32 tcg_gen_mov_i32_x86_64 -#define tcg_gen_mov_i64 tcg_gen_mov_i64_x86_64 -#define tcg_gen_movi_i32 tcg_gen_movi_i32_x86_64 -#define tcg_gen_movi_i64 tcg_gen_movi_i64_x86_64 -#define tcg_gen_mul_i32 tcg_gen_mul_i32_x86_64 -#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_x86_64 -#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_x86_64 -#define tcg_gen_neg_i32 tcg_gen_neg_i32_x86_64 -#define tcg_gen_neg_i64 tcg_gen_neg_i64_x86_64 -#define tcg_gen_not_i32 tcg_gen_not_i32_x86_64 -#define tcg_gen_op0 tcg_gen_op0_x86_64 -#define tcg_gen_op1i tcg_gen_op1i_x86_64 -#define tcg_gen_op2_i32 tcg_gen_op2_i32_x86_64 -#define tcg_gen_op2_i64 tcg_gen_op2_i64_x86_64 -#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_x86_64 -#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_x86_64 -#define tcg_gen_op3_i32 tcg_gen_op3_i32_x86_64 -#define tcg_gen_op3_i64 tcg_gen_op3_i64_x86_64 -#define tcg_gen_op4_i32 tcg_gen_op4_i32_x86_64 -#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_x86_64 -#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_x86_64 -#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_x86_64 -#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_x86_64 -#define tcg_gen_op6_i32 tcg_gen_op6_i32_x86_64 -#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_x86_64 -#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_x86_64 -#define tcg_gen_orc_i32 tcg_gen_orc_i32_x86_64 -#define tcg_gen_or_i32 tcg_gen_or_i32_x86_64 -#define tcg_gen_or_i64 tcg_gen_or_i64_x86_64 +#define tcg_gen_op1 tcg_gen_op1_x86_64 +#define tcg_gen_op2 tcg_gen_op2_x86_64 +#define tcg_gen_op3 tcg_gen_op3_x86_64 +#define tcg_gen_op4 tcg_gen_op4_x86_64 +#define tcg_gen_op5 tcg_gen_op5_x86_64 +#define tcg_gen_op6 tcg_gen_op6_x86_64 +#define tcg_gen_mb tcg_gen_mb_x86_64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_x86_64 +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_x86_64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_x86_64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_x86_64 #define tcg_gen_ori_i32 tcg_gen_ori_i32_x86_64 -#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_x86_64 -#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_x86_64 -#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_x86_64 -#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_x86_64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_x86_64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_x86_64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_x86_64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_x86_64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_x86_64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_x86_64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_x86_64 +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_x86_64 +#define tcg_gen_muli_i32 tcg_gen_muli_i32_x86_64 +#define tcg_gen_div_i32 tcg_gen_div_i32_x86_64 +#define tcg_gen_rem_i32 tcg_gen_rem_i32_x86_64 +#define tcg_gen_divu_i32 tcg_gen_divu_i32_x86_64 +#define tcg_gen_remu_i32 tcg_gen_remu_i32_x86_64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_x86_64 +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_x86_64 +#define tcg_gen_nand_i32 tcg_gen_nand_i32_x86_64 +#define tcg_gen_nor_i32 tcg_gen_nor_i32_x86_64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_x86_64 +#define tcg_gen_clz_i32 tcg_gen_clz_i32_x86_64 +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_x86_64 +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_x86_64 +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_x86_64 +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_x86_64 +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_x86_64 #define tcg_gen_rotl_i32 tcg_gen_rotl_i32_x86_64 #define tcg_gen_rotli_i32 tcg_gen_rotli_i32_x86_64 #define tcg_gen_rotr_i32 tcg_gen_rotr_i32_x86_64 #define tcg_gen_rotri_i32 tcg_gen_rotri_i32_x86_64 -#define tcg_gen_sar_i32 tcg_gen_sar_i32_x86_64 -#define tcg_gen_sari_i32 tcg_gen_sari_i32_x86_64 -#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_x86_64 -#define tcg_gen_shl_i32 tcg_gen_shl_i32_x86_64 -#define tcg_gen_shl_i64 tcg_gen_shl_i64_x86_64 -#define tcg_gen_shli_i32 tcg_gen_shli_i32_x86_64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_x86_64 +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_x86_64 +#define tcg_gen_extract_i32 tcg_gen_extract_i32_x86_64 +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_x86_64 +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_x86_64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_x86_64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_x86_64 +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_x86_64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_x86_64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_x86_64 +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_x86_64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_x86_64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_x86_64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_x86_64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_x86_64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_x86_64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_x86_64 +#define tcg_gen_smin_i32 tcg_gen_smin_i32_x86_64 +#define tcg_gen_umin_i32 tcg_gen_umin_i32_x86_64 +#define tcg_gen_smax_i32 tcg_gen_smax_i32_x86_64 +#define tcg_gen_umax_i32 tcg_gen_umax_i32_x86_64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_x86_64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_x86_64 +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_x86_64 +#define tcg_gen_subi_i64 tcg_gen_subi_i64_x86_64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_x86_64 +#define tcg_gen_ori_i64 tcg_gen_ori_i64_x86_64 +#define tcg_gen_xori_i64 tcg_gen_xori_i64_x86_64 #define tcg_gen_shli_i64 tcg_gen_shli_i64_x86_64 -#define tcg_gen_shr_i32 tcg_gen_shr_i32_x86_64 -#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_x86_64 -#define tcg_gen_shr_i64 tcg_gen_shr_i64_x86_64 -#define tcg_gen_shri_i32 tcg_gen_shri_i32_x86_64 #define tcg_gen_shri_i64 tcg_gen_shri_i64_x86_64 -#define tcg_gen_st_i32 tcg_gen_st_i32_x86_64 -#define tcg_gen_st_i64 tcg_gen_st_i64_x86_64 -#define tcg_gen_sub_i32 tcg_gen_sub_i32_x86_64 -#define tcg_gen_sub_i64 tcg_gen_sub_i64_x86_64 -#define tcg_gen_subi_i32 tcg_gen_subi_i32_x86_64 -#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_x86_64 -#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_x86_64 -#define tcg_gen_xor_i32 tcg_gen_xor_i32_x86_64 -#define tcg_gen_xor_i64 tcg_gen_xor_i64_x86_64 -#define tcg_gen_xori_i32 tcg_gen_xori_i32_x86_64 -#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_x86_64 -#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_x86_64 -#define tcg_get_arg_str_idx tcg_get_arg_str_idx_x86_64 -#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_x86_64 -#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_x86_64 -#define tcg_global_mem_new_internal tcg_global_mem_new_internal_x86_64 -#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_x86_64 -#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_x86_64 -#define tcg_global_reg_new_internal tcg_global_reg_new_internal_x86_64 -#define tcg_handle_interrupt tcg_handle_interrupt_x86_64 -#define tcg_init tcg_init_x86_64 -#define tcg_invert_cond tcg_invert_cond_x86_64 -#define tcg_la_bb_end tcg_la_bb_end_x86_64 -#define tcg_la_br_end tcg_la_br_end_x86_64 -#define tcg_la_func_end tcg_la_func_end_x86_64 -#define tcg_liveness_analysis tcg_liveness_analysis_x86_64 -#define tcg_malloc tcg_malloc_x86_64 -#define tcg_malloc_internal tcg_malloc_internal_x86_64 -#define tcg_op_defs_org tcg_op_defs_org_x86_64 -#define tcg_opt_gen_mov tcg_opt_gen_mov_x86_64 -#define tcg_opt_gen_movi tcg_opt_gen_movi_x86_64 -#define tcg_optimize tcg_optimize_x86_64 -#define tcg_out16 tcg_out16_x86_64 -#define tcg_out32 tcg_out32_x86_64 -#define tcg_out64 tcg_out64_x86_64 -#define tcg_out8 tcg_out8_x86_64 -#define tcg_out_addi tcg_out_addi_x86_64 -#define tcg_out_branch tcg_out_branch_x86_64 -#define tcg_out_brcond32 tcg_out_brcond32_x86_64 -#define tcg_out_brcond64 tcg_out_brcond64_x86_64 -#define tcg_out_bswap32 tcg_out_bswap32_x86_64 -#define tcg_out_bswap64 tcg_out_bswap64_x86_64 -#define tcg_out_call tcg_out_call_x86_64 -#define tcg_out_cmp tcg_out_cmp_x86_64 -#define tcg_out_ext16s tcg_out_ext16s_x86_64 -#define tcg_out_ext16u tcg_out_ext16u_x86_64 -#define tcg_out_ext32s tcg_out_ext32s_x86_64 -#define tcg_out_ext32u tcg_out_ext32u_x86_64 -#define tcg_out_ext8s tcg_out_ext8s_x86_64 -#define tcg_out_ext8u tcg_out_ext8u_x86_64 -#define tcg_out_jmp tcg_out_jmp_x86_64 -#define tcg_out_jxx tcg_out_jxx_x86_64 -#define tcg_out_label tcg_out_label_x86_64 -#define tcg_out_ld tcg_out_ld_x86_64 -#define tcg_out_modrm tcg_out_modrm_x86_64 -#define tcg_out_modrm_offset tcg_out_modrm_offset_x86_64 -#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_x86_64 -#define tcg_out_mov tcg_out_mov_x86_64 -#define tcg_out_movcond32 tcg_out_movcond32_x86_64 -#define tcg_out_movcond64 tcg_out_movcond64_x86_64 -#define tcg_out_movi tcg_out_movi_x86_64 -#define tcg_out_op tcg_out_op_x86_64 -#define tcg_out_pop tcg_out_pop_x86_64 -#define tcg_out_push tcg_out_push_x86_64 -#define tcg_out_qemu_ld tcg_out_qemu_ld_x86_64 -#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_x86_64 -#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_x86_64 -#define tcg_out_qemu_st tcg_out_qemu_st_x86_64 -#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_x86_64 -#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_x86_64 -#define tcg_out_reloc tcg_out_reloc_x86_64 -#define tcg_out_rolw_8 tcg_out_rolw_8_x86_64 -#define tcg_out_setcond32 tcg_out_setcond32_x86_64 -#define tcg_out_setcond64 tcg_out_setcond64_x86_64 -#define tcg_out_shifti tcg_out_shifti_x86_64 -#define tcg_out_st tcg_out_st_x86_64 -#define tcg_out_tb_finalize tcg_out_tb_finalize_x86_64 -#define tcg_out_tb_init tcg_out_tb_init_x86_64 -#define tcg_out_tlb_load tcg_out_tlb_load_x86_64 -#define tcg_out_vex_modrm tcg_out_vex_modrm_x86_64 -#define tcg_patch32 tcg_patch32_x86_64 -#define tcg_patch8 tcg_patch8_x86_64 -#define tcg_pcrel_diff tcg_pcrel_diff_x86_64 -#define tcg_pool_reset tcg_pool_reset_x86_64 -#define tcg_prologue_init tcg_prologue_init_x86_64 -#define tcg_ptr_byte_diff tcg_ptr_byte_diff_x86_64 -#define tcg_reg_alloc tcg_reg_alloc_x86_64 -#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_x86_64 -#define tcg_reg_alloc_call tcg_reg_alloc_call_x86_64 -#define tcg_reg_alloc_mov tcg_reg_alloc_mov_x86_64 -#define tcg_reg_alloc_movi tcg_reg_alloc_movi_x86_64 -#define tcg_reg_alloc_op tcg_reg_alloc_op_x86_64 -#define tcg_reg_alloc_start tcg_reg_alloc_start_x86_64 -#define tcg_reg_free tcg_reg_free_x86_64 -#define tcg_reg_sync tcg_reg_sync_x86_64 -#define tcg_set_frame tcg_set_frame_x86_64 -#define tcg_set_nop tcg_set_nop_x86_64 -#define tcg_swap_cond tcg_swap_cond_x86_64 -#define tcg_target_callee_save_regs tcg_target_callee_save_regs_x86_64 -#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_x86_64 -#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_x86_64 -#define tcg_target_const_match tcg_target_const_match_x86_64 -#define tcg_target_init tcg_target_init_x86_64 -#define tcg_target_qemu_prologue tcg_target_qemu_prologue_x86_64 -#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_x86_64 -#define tcg_temp_alloc tcg_temp_alloc_x86_64 -#define tcg_temp_free_i32 tcg_temp_free_i32_x86_64 -#define tcg_temp_free_i64 tcg_temp_free_i64_x86_64 -#define tcg_temp_free_internal tcg_temp_free_internal_x86_64 -#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_x86_64 -#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_x86_64 -#define tcg_temp_new_i32 tcg_temp_new_i32_x86_64 -#define tcg_temp_new_i64 tcg_temp_new_i64_x86_64 -#define tcg_temp_new_internal tcg_temp_new_internal_x86_64 -#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_x86_64 -#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_x86_64 -#define tdb_hash tdb_hash_x86_64 -#define teecr_write teecr_write_x86_64 -#define teehbr_access teehbr_access_x86_64 -#define temp_allocate_frame temp_allocate_frame_x86_64 -#define temp_dead temp_dead_x86_64 -#define temps_are_copies temps_are_copies_x86_64 -#define temp_save temp_save_x86_64 -#define temp_sync temp_sync_x86_64 -#define tgen_arithi tgen_arithi_x86_64 -#define tgen_arithr tgen_arithr_x86_64 -#define thumb2_logic_op thumb2_logic_op_x86_64 -#define ti925t_initfn ti925t_initfn_x86_64 -#define tlb_add_large_page tlb_add_large_page_x86_64 -#define tlb_flush_entry tlb_flush_entry_x86_64 -#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_x86_64 -#define tlbi_aa64_asid_write tlbi_aa64_asid_write_x86_64 -#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_x86_64 -#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_x86_64 -#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_x86_64 -#define tlbi_aa64_va_write tlbi_aa64_va_write_x86_64 -#define tlbiall_is_write tlbiall_is_write_x86_64 -#define tlbiall_write tlbiall_write_x86_64 -#define tlbiasid_is_write tlbiasid_is_write_x86_64 -#define tlbiasid_write tlbiasid_write_x86_64 -#define tlbimvaa_is_write tlbimvaa_is_write_x86_64 -#define tlbimvaa_write tlbimvaa_write_x86_64 -#define tlbimva_is_write tlbimva_is_write_x86_64 -#define tlbimva_write tlbimva_write_x86_64 -#define tlb_is_dirty_ram tlb_is_dirty_ram_x86_64 +#define tcg_gen_sari_i64 tcg_gen_sari_i64_x86_64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_x86_64 +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_x86_64 +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_x86_64 +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_x86_64 +#define tcg_gen_muli_i64 tcg_gen_muli_i64_x86_64 +#define tcg_gen_div_i64 tcg_gen_div_i64_x86_64 +#define tcg_gen_rem_i64 tcg_gen_rem_i64_x86_64 +#define tcg_gen_divu_i64 tcg_gen_divu_i64_x86_64 +#define tcg_gen_remu_i64 tcg_gen_remu_i64_x86_64 +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_x86_64 +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_x86_64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_x86_64 +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_x86_64 +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_x86_64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_x86_64 +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_x86_64 +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_x86_64 +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_x86_64 +#define tcg_gen_not_i64 tcg_gen_not_i64_x86_64 +#define tcg_gen_andc_i64 tcg_gen_andc_i64_x86_64 +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_x86_64 +#define tcg_gen_nand_i64 tcg_gen_nand_i64_x86_64 +#define tcg_gen_nor_i64 tcg_gen_nor_i64_x86_64 +#define tcg_gen_orc_i64 tcg_gen_orc_i64_x86_64 +#define tcg_gen_clz_i64 tcg_gen_clz_i64_x86_64 +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_x86_64 +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_x86_64 +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_x86_64 +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_x86_64 +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_x86_64 +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_x86_64 +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_x86_64 +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_x86_64 +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_x86_64 +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_x86_64 +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_x86_64 +#define tcg_gen_extract_i64 tcg_gen_extract_i64_x86_64 +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_x86_64 +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_x86_64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_x86_64 +#define tcg_gen_add2_i64 tcg_gen_add2_i64_x86_64 +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_x86_64 +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_x86_64 +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_x86_64 +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_x86_64 +#define tcg_gen_smin_i64 tcg_gen_smin_i64_x86_64 +#define tcg_gen_umin_i64 tcg_gen_umin_i64_x86_64 +#define tcg_gen_smax_i64 tcg_gen_smax_i64_x86_64 +#define tcg_gen_umax_i64 tcg_gen_umax_i64_x86_64 +#define tcg_gen_abs_i64 tcg_gen_abs_i64_x86_64 +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_x86_64 +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_x86_64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_x86_64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_x86_64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_x86_64 +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_x86_64 +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_x86_64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_x86_64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_x86_64 +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_x86_64 +#define check_exit_request check_exit_request_x86_64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_x86_64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_x86_64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_x86_64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_x86_64 +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_x86_64 +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_x86_64 +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_x86_64 +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_x86_64 +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_x86_64 +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_x86_64 +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_x86_64 +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_x86_64 +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_x86_64 +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_x86_64 +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_x86_64 +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_x86_64 +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_x86_64 +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_x86_64 +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_x86_64 +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_x86_64 +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_x86_64 +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_x86_64 +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_x86_64 +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_x86_64 +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_x86_64 +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_x86_64 +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_x86_64 +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_x86_64 +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_x86_64 +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_x86_64 +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_x86_64 +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_x86_64 +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_x86_64 +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_x86_64 +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_x86_64 +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_x86_64 +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_x86_64 +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_x86_64 +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_x86_64 +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_x86_64 +#define simd_desc simd_desc_x86_64 +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_x86_64 +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_x86_64 +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_x86_64 +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_x86_64 +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_x86_64 +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_x86_64 +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_x86_64 +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_x86_64 +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_x86_64 +#define tcg_gen_gvec_2 tcg_gen_gvec_2_x86_64 +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_x86_64 +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_x86_64 +#define tcg_gen_gvec_3 tcg_gen_gvec_3_x86_64 +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_x86_64 +#define tcg_gen_gvec_4 tcg_gen_gvec_4_x86_64 +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_x86_64 +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_x86_64 +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_x86_64 +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_x86_64 +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_x86_64 +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_x86_64 +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_x86_64 +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_x86_64 +#define tcg_gen_gvec_not tcg_gen_gvec_not_x86_64 +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_x86_64 +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_x86_64 +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_x86_64 +#define tcg_gen_gvec_add tcg_gen_gvec_add_x86_64 +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_x86_64 +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_x86_64 +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_x86_64 +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_x86_64 +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_x86_64 +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_x86_64 +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_x86_64 +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_x86_64 +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_x86_64 +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_x86_64 +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_x86_64 +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_x86_64 +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_x86_64 +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_x86_64 +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_x86_64 +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_x86_64 +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_x86_64 +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_x86_64 +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_x86_64 +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_x86_64 +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_x86_64 +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_x86_64 +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_x86_64 +#define tcg_gen_gvec_and tcg_gen_gvec_and_x86_64 +#define tcg_gen_gvec_or tcg_gen_gvec_or_x86_64 +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_x86_64 +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_x86_64 +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_x86_64 +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_x86_64 +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_x86_64 +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_x86_64 +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_x86_64 +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_x86_64 +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_x86_64 +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_x86_64 +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_x86_64 +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_x86_64 +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_x86_64 +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_x86_64 +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_x86_64 +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_x86_64 +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_x86_64 +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_x86_64 +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_x86_64 +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_x86_64 +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_x86_64 +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_x86_64 +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_x86_64 +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_x86_64 +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_x86_64 +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_x86_64 +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_x86_64 +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_x86_64 +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_x86_64 +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_x86_64 +#define vec_gen_2 vec_gen_2_x86_64 +#define vec_gen_3 vec_gen_3_x86_64 +#define vec_gen_4 vec_gen_4_x86_64 +#define tcg_gen_mov_vec tcg_gen_mov_vec_x86_64 +#define tcg_const_zeros_vec tcg_const_zeros_vec_x86_64 +#define tcg_const_ones_vec tcg_const_ones_vec_x86_64 +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_x86_64 +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_x86_64 +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_x86_64 +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_x86_64 +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_x86_64 +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_x86_64 +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_x86_64 +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_x86_64 +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_x86_64 +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_x86_64 +#define tcg_gen_ld_vec tcg_gen_ld_vec_x86_64 +#define tcg_gen_st_vec tcg_gen_st_vec_x86_64 +#define tcg_gen_stl_vec tcg_gen_stl_vec_x86_64 +#define tcg_gen_and_vec tcg_gen_and_vec_x86_64 +#define tcg_gen_or_vec tcg_gen_or_vec_x86_64 +#define tcg_gen_xor_vec tcg_gen_xor_vec_x86_64 +#define tcg_gen_andc_vec tcg_gen_andc_vec_x86_64 +#define tcg_gen_orc_vec tcg_gen_orc_vec_x86_64 +#define tcg_gen_nand_vec tcg_gen_nand_vec_x86_64 +#define tcg_gen_nor_vec tcg_gen_nor_vec_x86_64 +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_x86_64 +#define tcg_gen_not_vec tcg_gen_not_vec_x86_64 +#define tcg_gen_neg_vec tcg_gen_neg_vec_x86_64 +#define tcg_gen_abs_vec tcg_gen_abs_vec_x86_64 +#define tcg_gen_shli_vec tcg_gen_shli_vec_x86_64 +#define tcg_gen_shri_vec tcg_gen_shri_vec_x86_64 +#define tcg_gen_sari_vec tcg_gen_sari_vec_x86_64 +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_x86_64 +#define tcg_gen_add_vec tcg_gen_add_vec_x86_64 +#define tcg_gen_sub_vec tcg_gen_sub_vec_x86_64 +#define tcg_gen_mul_vec tcg_gen_mul_vec_x86_64 +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_x86_64 +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_x86_64 +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_x86_64 +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_x86_64 +#define tcg_gen_smin_vec tcg_gen_smin_vec_x86_64 +#define tcg_gen_umin_vec tcg_gen_umin_vec_x86_64 +#define tcg_gen_smax_vec tcg_gen_smax_vec_x86_64 +#define tcg_gen_umax_vec tcg_gen_umax_vec_x86_64 +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_x86_64 +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_x86_64 +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_x86_64 +#define tcg_gen_shls_vec tcg_gen_shls_vec_x86_64 +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_x86_64 +#define tcg_gen_sars_vec tcg_gen_sars_vec_x86_64 +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_x86_64 +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_x86_64 +#define tb_htable_lookup tb_htable_lookup_x86_64 +#define tb_set_jmp_target tb_set_jmp_target_x86_64 +#define cpu_exec cpu_exec_x86_64 +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_x86_64 +#define cpu_reloading_memory_map cpu_reloading_memory_map_x86_64 +#define cpu_loop_exit cpu_loop_exit_x86_64 +#define cpu_loop_exit_restore cpu_loop_exit_restore_x86_64 +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_x86_64 +#define tlb_init tlb_init_x86_64 +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_x86_64 +#define tlb_flush tlb_flush_x86_64 +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_x86_64 +#define tlb_flush_all_cpus tlb_flush_all_cpus_x86_64 +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_x86_64 +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_x86_64 +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_x86_64 +#define tlb_flush_page tlb_flush_page_x86_64 +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_x86_64 +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_x86_64 +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_x86_64 +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_x86_64 #define tlb_protect_code tlb_protect_code_x86_64 -#define tlb_reset_dirty_range tlb_reset_dirty_range_x86_64 -#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_x86_64 +#define tlb_unprotect_code tlb_unprotect_code_x86_64 +#define tlb_reset_dirty tlb_reset_dirty_x86_64 #define tlb_set_dirty tlb_set_dirty_x86_64 -#define tlb_set_dirty1 tlb_set_dirty1_x86_64 -#define tlb_unprotect_code_phys tlb_unprotect_code_phys_x86_64 +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_x86_64 +#define tlb_set_page tlb_set_page_x86_64 +#define get_page_addr_code_hostp get_page_addr_code_hostp_x86_64 +#define get_page_addr_code get_page_addr_code_x86_64 +#define probe_access probe_access_x86_64 #define tlb_vaddr_to_host tlb_vaddr_to_host_x86_64 -#define token_get_type token_get_type_x86_64 -#define token_get_value token_get_value_x86_64 -#define token_is_escape token_is_escape_x86_64 -#define token_is_keyword token_is_keyword_x86_64 -#define token_is_operator token_is_operator_x86_64 -#define tokens_append_from_iter tokens_append_from_iter_x86_64 -#define to_qiv to_qiv_x86_64 -#define to_qov to_qov_x86_64 -#define tosa_init tosa_init_x86_64 -#define tosa_machine_init tosa_machine_init_x86_64 -#define tswap32 tswap32_x86_64 -#define tswap64 tswap64_x86_64 -#define type_class_get_size type_class_get_size_x86_64 -#define type_get_by_name type_get_by_name_x86_64 -#define type_get_parent type_get_parent_x86_64 -#define type_has_parent type_has_parent_x86_64 -#define type_initialize type_initialize_x86_64 -#define type_initialize_interface type_initialize_interface_x86_64 -#define type_is_ancestor type_is_ancestor_x86_64 -#define type_new type_new_x86_64 -#define type_object_get_size type_object_get_size_x86_64 -#define type_register_internal type_register_internal_x86_64 -#define type_table_add type_table_add_x86_64 -#define type_table_get type_table_get_x86_64 -#define type_table_lookup type_table_lookup_x86_64 -#define uint16_to_float32 uint16_to_float32_x86_64 -#define uint16_to_float64 uint16_to_float64_x86_64 -#define uint32_to_float32 uint32_to_float32_x86_64 -#define uint32_to_float64 uint32_to_float64_x86_64 -#define uint64_to_float128 uint64_to_float128_x86_64 -#define uint64_to_float32 uint64_to_float32_x86_64 -#define uint64_to_float64 uint64_to_float64_x86_64 -#define unassigned_io_ops unassigned_io_ops_x86_64 -#define unassigned_io_read unassigned_io_read_x86_64 -#define unassigned_io_write unassigned_io_write_x86_64 -#define unassigned_mem_accepts unassigned_mem_accepts_x86_64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_x86_64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_x86_64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_x86_64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_x86_64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_x86_64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_x86_64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_x86_64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_x86_64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_x86_64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_x86_64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_x86_64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_x86_64 +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_x86_64 +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_x86_64 +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_x86_64 +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_x86_64 +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_x86_64 +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_x86_64 +#define cpu_ldub_data_ra cpu_ldub_data_ra_x86_64 +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_x86_64 +#define cpu_lduw_data_ra cpu_lduw_data_ra_x86_64 +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_x86_64 +#define cpu_ldl_data_ra cpu_ldl_data_ra_x86_64 +#define cpu_ldq_data_ra cpu_ldq_data_ra_x86_64 +#define cpu_ldub_data cpu_ldub_data_x86_64 +#define cpu_ldsb_data cpu_ldsb_data_x86_64 +#define cpu_lduw_data cpu_lduw_data_x86_64 +#define cpu_ldsw_data cpu_ldsw_data_x86_64 +#define cpu_ldl_data cpu_ldl_data_x86_64 +#define cpu_ldq_data cpu_ldq_data_x86_64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_x86_64 +#define helper_le_stw_mmu helper_le_stw_mmu_x86_64 +#define helper_be_stw_mmu helper_be_stw_mmu_x86_64 +#define helper_le_stl_mmu helper_le_stl_mmu_x86_64 +#define helper_be_stl_mmu helper_be_stl_mmu_x86_64 +#define helper_le_stq_mmu helper_le_stq_mmu_x86_64 +#define helper_be_stq_mmu helper_be_stq_mmu_x86_64 +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_x86_64 +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_x86_64 +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_x86_64 +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_x86_64 +#define cpu_stb_data_ra cpu_stb_data_ra_x86_64 +#define cpu_stw_data_ra cpu_stw_data_ra_x86_64 +#define cpu_stl_data_ra cpu_stl_data_ra_x86_64 +#define cpu_stq_data_ra cpu_stq_data_ra_x86_64 +#define cpu_stb_data cpu_stb_data_x86_64 +#define cpu_stw_data cpu_stw_data_x86_64 +#define cpu_stl_data cpu_stl_data_x86_64 +#define cpu_stq_data cpu_stq_data_x86_64 +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_x86_64 +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_x86_64 +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_x86_64 +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_x86_64 +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_x86_64 +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_x86_64 +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_x86_64 +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_x86_64 +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_x86_64 +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_x86_64 +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_x86_64 +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_x86_64 +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_x86_64 +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_x86_64 +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_x86_64 +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_x86_64 +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_x86_64 +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_x86_64 +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_x86_64 +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_x86_64 +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_x86_64 +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_x86_64 +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_x86_64 +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_x86_64 +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_x86_64 +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_x86_64 +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_x86_64 +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_x86_64 +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_x86_64 +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_x86_64 +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_x86_64 +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_x86_64 +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_x86_64 +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_x86_64 +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_x86_64 +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_x86_64 +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_x86_64 +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_x86_64 +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_x86_64 +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_x86_64 +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_x86_64 +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_x86_64 +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_x86_64 +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_x86_64 +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_x86_64 +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_x86_64 +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_x86_64 +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_x86_64 +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_x86_64 +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_x86_64 +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_x86_64 +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_x86_64 +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_x86_64 +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_x86_64 +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_x86_64 +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_x86_64 +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_x86_64 +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_x86_64 +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_x86_64 +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_x86_64 +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_x86_64 +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_x86_64 +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_x86_64 +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_x86_64 +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_x86_64 +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_x86_64 +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_x86_64 +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_x86_64 +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_x86_64 +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_x86_64 +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_x86_64 +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_x86_64 +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_x86_64 +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_x86_64 +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_x86_64 +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_x86_64 +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_x86_64 +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_x86_64 +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_x86_64 +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_x86_64 +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_x86_64 +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_x86_64 +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_x86_64 +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_x86_64 +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_x86_64 +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_x86_64 +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_x86_64 +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_x86_64 +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_x86_64 +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_x86_64 +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_x86_64 +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_x86_64 +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_x86_64 +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_x86_64 +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_x86_64 +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_x86_64 +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_x86_64 +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_x86_64 +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_x86_64 +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_x86_64 +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_x86_64 +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_x86_64 +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_x86_64 +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_x86_64 +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_x86_64 +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_x86_64 +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_x86_64 +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_x86_64 +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_x86_64 +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_x86_64 +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_x86_64 +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_x86_64 +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_x86_64 +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_x86_64 +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_x86_64 +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_x86_64 +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_x86_64 +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_x86_64 +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_x86_64 +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_x86_64 +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_x86_64 +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_x86_64 +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_x86_64 +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_x86_64 +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_x86_64 +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_x86_64 +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_x86_64 +#define helper_atomic_xchgb helper_atomic_xchgb_x86_64 +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_x86_64 +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_x86_64 +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_x86_64 +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_x86_64 +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_x86_64 +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_x86_64 +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_x86_64 +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_x86_64 +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_x86_64 +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_x86_64 +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_x86_64 +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_x86_64 +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_x86_64 +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_x86_64 +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_x86_64 +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_x86_64 +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_x86_64 +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_x86_64 +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_x86_64 +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_x86_64 +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_x86_64 +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_x86_64 +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_x86_64 +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_x86_64 +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_x86_64 +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_x86_64 +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_x86_64 +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_x86_64 +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_x86_64 +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_x86_64 +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_x86_64 +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_x86_64 +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_x86_64 +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_x86_64 +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_x86_64 +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_x86_64 +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_x86_64 +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_x86_64 +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_x86_64 +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_x86_64 +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_x86_64 +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_x86_64 +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_x86_64 +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_x86_64 +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_x86_64 +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_x86_64 +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_x86_64 +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_x86_64 +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_x86_64 +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_x86_64 +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_x86_64 +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_x86_64 +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_x86_64 +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_x86_64 +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_x86_64 +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_x86_64 +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_x86_64 +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_x86_64 +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_x86_64 +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_x86_64 +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_x86_64 +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_x86_64 +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_x86_64 +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_x86_64 +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_x86_64 +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_x86_64 +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_x86_64 +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_x86_64 +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_x86_64 +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_x86_64 +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_x86_64 +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_x86_64 +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_x86_64 +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_x86_64 +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_x86_64 +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_x86_64 +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_x86_64 +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_x86_64 +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_x86_64 +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_x86_64 +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_x86_64 +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_x86_64 +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_x86_64 +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_x86_64 +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_x86_64 +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_x86_64 +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_x86_64 +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_x86_64 +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_x86_64 +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_x86_64 +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_x86_64 +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_x86_64 +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_x86_64 +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_x86_64 +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_x86_64 +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_x86_64 +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_x86_64 +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_x86_64 +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_x86_64 +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_x86_64 +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_x86_64 +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_x86_64 +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_x86_64 +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_x86_64 +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_x86_64 +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_x86_64 +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_x86_64 +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_x86_64 +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_x86_64 +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_x86_64 +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_x86_64 +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_x86_64 +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_x86_64 +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_x86_64 +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_x86_64 +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_x86_64 +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_x86_64 +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_x86_64 +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_x86_64 +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_x86_64 +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_x86_64 +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_x86_64 +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_x86_64 +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_x86_64 +#define cpu_ldub_code cpu_ldub_code_x86_64 +#define cpu_lduw_code cpu_lduw_code_x86_64 +#define cpu_ldl_code cpu_ldl_code_x86_64 +#define cpu_ldq_code cpu_ldq_code_x86_64 +#define helper_div_i32 helper_div_i32_x86_64 +#define helper_rem_i32 helper_rem_i32_x86_64 +#define helper_divu_i32 helper_divu_i32_x86_64 +#define helper_remu_i32 helper_remu_i32_x86_64 +#define helper_shl_i64 helper_shl_i64_x86_64 +#define helper_shr_i64 helper_shr_i64_x86_64 +#define helper_sar_i64 helper_sar_i64_x86_64 +#define helper_div_i64 helper_div_i64_x86_64 +#define helper_rem_i64 helper_rem_i64_x86_64 +#define helper_divu_i64 helper_divu_i64_x86_64 +#define helper_remu_i64 helper_remu_i64_x86_64 +#define helper_muluh_i64 helper_muluh_i64_x86_64 +#define helper_mulsh_i64 helper_mulsh_i64_x86_64 +#define helper_clz_i32 helper_clz_i32_x86_64 +#define helper_ctz_i32 helper_ctz_i32_x86_64 +#define helper_clz_i64 helper_clz_i64_x86_64 +#define helper_ctz_i64 helper_ctz_i64_x86_64 +#define helper_clrsb_i32 helper_clrsb_i32_x86_64 +#define helper_clrsb_i64 helper_clrsb_i64_x86_64 +#define helper_ctpop_i32 helper_ctpop_i32_x86_64 +#define helper_ctpop_i64 helper_ctpop_i64_x86_64 +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_x86_64 +#define helper_exit_atomic helper_exit_atomic_x86_64 +#define helper_gvec_add8 helper_gvec_add8_x86_64 +#define helper_gvec_add16 helper_gvec_add16_x86_64 +#define helper_gvec_add32 helper_gvec_add32_x86_64 +#define helper_gvec_add64 helper_gvec_add64_x86_64 +#define helper_gvec_adds8 helper_gvec_adds8_x86_64 +#define helper_gvec_adds16 helper_gvec_adds16_x86_64 +#define helper_gvec_adds32 helper_gvec_adds32_x86_64 +#define helper_gvec_adds64 helper_gvec_adds64_x86_64 +#define helper_gvec_sub8 helper_gvec_sub8_x86_64 +#define helper_gvec_sub16 helper_gvec_sub16_x86_64 +#define helper_gvec_sub32 helper_gvec_sub32_x86_64 +#define helper_gvec_sub64 helper_gvec_sub64_x86_64 +#define helper_gvec_subs8 helper_gvec_subs8_x86_64 +#define helper_gvec_subs16 helper_gvec_subs16_x86_64 +#define helper_gvec_subs32 helper_gvec_subs32_x86_64 +#define helper_gvec_subs64 helper_gvec_subs64_x86_64 +#define helper_gvec_mul8 helper_gvec_mul8_x86_64 +#define helper_gvec_mul16 helper_gvec_mul16_x86_64 +#define helper_gvec_mul32 helper_gvec_mul32_x86_64 +#define helper_gvec_mul64 helper_gvec_mul64_x86_64 +#define helper_gvec_muls8 helper_gvec_muls8_x86_64 +#define helper_gvec_muls16 helper_gvec_muls16_x86_64 +#define helper_gvec_muls32 helper_gvec_muls32_x86_64 +#define helper_gvec_muls64 helper_gvec_muls64_x86_64 +#define helper_gvec_neg8 helper_gvec_neg8_x86_64 +#define helper_gvec_neg16 helper_gvec_neg16_x86_64 +#define helper_gvec_neg32 helper_gvec_neg32_x86_64 +#define helper_gvec_neg64 helper_gvec_neg64_x86_64 +#define helper_gvec_abs8 helper_gvec_abs8_x86_64 +#define helper_gvec_abs16 helper_gvec_abs16_x86_64 +#define helper_gvec_abs32 helper_gvec_abs32_x86_64 +#define helper_gvec_abs64 helper_gvec_abs64_x86_64 +#define helper_gvec_mov helper_gvec_mov_x86_64 +#define helper_gvec_dup64 helper_gvec_dup64_x86_64 +#define helper_gvec_dup32 helper_gvec_dup32_x86_64 +#define helper_gvec_dup16 helper_gvec_dup16_x86_64 +#define helper_gvec_dup8 helper_gvec_dup8_x86_64 +#define helper_gvec_not helper_gvec_not_x86_64 +#define helper_gvec_and helper_gvec_and_x86_64 +#define helper_gvec_or helper_gvec_or_x86_64 +#define helper_gvec_xor helper_gvec_xor_x86_64 +#define helper_gvec_andc helper_gvec_andc_x86_64 +#define helper_gvec_orc helper_gvec_orc_x86_64 +#define helper_gvec_nand helper_gvec_nand_x86_64 +#define helper_gvec_nor helper_gvec_nor_x86_64 +#define helper_gvec_eqv helper_gvec_eqv_x86_64 +#define helper_gvec_ands helper_gvec_ands_x86_64 +#define helper_gvec_xors helper_gvec_xors_x86_64 +#define helper_gvec_ors helper_gvec_ors_x86_64 +#define helper_gvec_shl8i helper_gvec_shl8i_x86_64 +#define helper_gvec_shl16i helper_gvec_shl16i_x86_64 +#define helper_gvec_shl32i helper_gvec_shl32i_x86_64 +#define helper_gvec_shl64i helper_gvec_shl64i_x86_64 +#define helper_gvec_shr8i helper_gvec_shr8i_x86_64 +#define helper_gvec_shr16i helper_gvec_shr16i_x86_64 +#define helper_gvec_shr32i helper_gvec_shr32i_x86_64 +#define helper_gvec_shr64i helper_gvec_shr64i_x86_64 +#define helper_gvec_sar8i helper_gvec_sar8i_x86_64 +#define helper_gvec_sar16i helper_gvec_sar16i_x86_64 +#define helper_gvec_sar32i helper_gvec_sar32i_x86_64 +#define helper_gvec_sar64i helper_gvec_sar64i_x86_64 +#define helper_gvec_shl8v helper_gvec_shl8v_x86_64 +#define helper_gvec_shl16v helper_gvec_shl16v_x86_64 +#define helper_gvec_shl32v helper_gvec_shl32v_x86_64 +#define helper_gvec_shl64v helper_gvec_shl64v_x86_64 +#define helper_gvec_shr8v helper_gvec_shr8v_x86_64 +#define helper_gvec_shr16v helper_gvec_shr16v_x86_64 +#define helper_gvec_shr32v helper_gvec_shr32v_x86_64 +#define helper_gvec_shr64v helper_gvec_shr64v_x86_64 +#define helper_gvec_sar8v helper_gvec_sar8v_x86_64 +#define helper_gvec_sar16v helper_gvec_sar16v_x86_64 +#define helper_gvec_sar32v helper_gvec_sar32v_x86_64 +#define helper_gvec_sar64v helper_gvec_sar64v_x86_64 +#define helper_gvec_eq8 helper_gvec_eq8_x86_64 +#define helper_gvec_ne8 helper_gvec_ne8_x86_64 +#define helper_gvec_lt8 helper_gvec_lt8_x86_64 +#define helper_gvec_le8 helper_gvec_le8_x86_64 +#define helper_gvec_ltu8 helper_gvec_ltu8_x86_64 +#define helper_gvec_leu8 helper_gvec_leu8_x86_64 +#define helper_gvec_eq16 helper_gvec_eq16_x86_64 +#define helper_gvec_ne16 helper_gvec_ne16_x86_64 +#define helper_gvec_lt16 helper_gvec_lt16_x86_64 +#define helper_gvec_le16 helper_gvec_le16_x86_64 +#define helper_gvec_ltu16 helper_gvec_ltu16_x86_64 +#define helper_gvec_leu16 helper_gvec_leu16_x86_64 +#define helper_gvec_eq32 helper_gvec_eq32_x86_64 +#define helper_gvec_ne32 helper_gvec_ne32_x86_64 +#define helper_gvec_lt32 helper_gvec_lt32_x86_64 +#define helper_gvec_le32 helper_gvec_le32_x86_64 +#define helper_gvec_ltu32 helper_gvec_ltu32_x86_64 +#define helper_gvec_leu32 helper_gvec_leu32_x86_64 +#define helper_gvec_eq64 helper_gvec_eq64_x86_64 +#define helper_gvec_ne64 helper_gvec_ne64_x86_64 +#define helper_gvec_lt64 helper_gvec_lt64_x86_64 +#define helper_gvec_le64 helper_gvec_le64_x86_64 +#define helper_gvec_ltu64 helper_gvec_ltu64_x86_64 +#define helper_gvec_leu64 helper_gvec_leu64_x86_64 +#define helper_gvec_ssadd8 helper_gvec_ssadd8_x86_64 +#define helper_gvec_ssadd16 helper_gvec_ssadd16_x86_64 +#define helper_gvec_ssadd32 helper_gvec_ssadd32_x86_64 +#define helper_gvec_ssadd64 helper_gvec_ssadd64_x86_64 +#define helper_gvec_sssub8 helper_gvec_sssub8_x86_64 +#define helper_gvec_sssub16 helper_gvec_sssub16_x86_64 +#define helper_gvec_sssub32 helper_gvec_sssub32_x86_64 +#define helper_gvec_sssub64 helper_gvec_sssub64_x86_64 +#define helper_gvec_usadd8 helper_gvec_usadd8_x86_64 +#define helper_gvec_usadd16 helper_gvec_usadd16_x86_64 +#define helper_gvec_usadd32 helper_gvec_usadd32_x86_64 +#define helper_gvec_usadd64 helper_gvec_usadd64_x86_64 +#define helper_gvec_ussub8 helper_gvec_ussub8_x86_64 +#define helper_gvec_ussub16 helper_gvec_ussub16_x86_64 +#define helper_gvec_ussub32 helper_gvec_ussub32_x86_64 +#define helper_gvec_ussub64 helper_gvec_ussub64_x86_64 +#define helper_gvec_smin8 helper_gvec_smin8_x86_64 +#define helper_gvec_smin16 helper_gvec_smin16_x86_64 +#define helper_gvec_smin32 helper_gvec_smin32_x86_64 +#define helper_gvec_smin64 helper_gvec_smin64_x86_64 +#define helper_gvec_smax8 helper_gvec_smax8_x86_64 +#define helper_gvec_smax16 helper_gvec_smax16_x86_64 +#define helper_gvec_smax32 helper_gvec_smax32_x86_64 +#define helper_gvec_smax64 helper_gvec_smax64_x86_64 +#define helper_gvec_umin8 helper_gvec_umin8_x86_64 +#define helper_gvec_umin16 helper_gvec_umin16_x86_64 +#define helper_gvec_umin32 helper_gvec_umin32_x86_64 +#define helper_gvec_umin64 helper_gvec_umin64_x86_64 +#define helper_gvec_umax8 helper_gvec_umax8_x86_64 +#define helper_gvec_umax16 helper_gvec_umax16_x86_64 +#define helper_gvec_umax32 helper_gvec_umax32_x86_64 +#define helper_gvec_umax64 helper_gvec_umax64_x86_64 +#define helper_gvec_bitsel helper_gvec_bitsel_x86_64 +#define cpu_restore_state cpu_restore_state_x86_64 +#define page_collection_lock page_collection_lock_x86_64 +#define page_collection_unlock page_collection_unlock_x86_64 +#define free_code_gen_buffer free_code_gen_buffer_x86_64 +#define tcg_exec_init tcg_exec_init_x86_64 +#define tb_cleanup tb_cleanup_x86_64 +#define tb_flush tb_flush_x86_64 +#define tb_phys_invalidate tb_phys_invalidate_x86_64 +#define tb_gen_code tb_gen_code_x86_64 +#define tb_exec_lock tb_exec_lock_x86_64 +#define tb_exec_unlock tb_exec_unlock_x86_64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_x86_64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_x86_64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_x86_64 +#define tb_check_watchpoint tb_check_watchpoint_x86_64 +#define cpu_io_recompile cpu_io_recompile_x86_64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_x86_64 +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_x86_64 +#define translator_loop_temp_check translator_loop_temp_check_x86_64 +#define translator_loop translator_loop_x86_64 +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_x86_64 +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_x86_64 +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_x86_64 +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_x86_64 +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_x86_64 +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_x86_64 #define unassigned_mem_ops unassigned_mem_ops_x86_64 -#define unassigned_mem_read unassigned_mem_read_x86_64 -#define unassigned_mem_write unassigned_mem_write_x86_64 -#define update_spsel update_spsel_x86_64 -#define v6_cp_reginfo v6_cp_reginfo_x86_64 -#define v6k_cp_reginfo v6k_cp_reginfo_x86_64 -#define v7_cp_reginfo v7_cp_reginfo_x86_64 -#define v7mp_cp_reginfo v7mp_cp_reginfo_x86_64 -#define v7m_pop v7m_pop_x86_64 -#define v7m_push v7m_push_x86_64 -#define v8_cp_reginfo v8_cp_reginfo_x86_64 -#define v8_el2_cp_reginfo v8_el2_cp_reginfo_x86_64 -#define v8_el3_cp_reginfo v8_el3_cp_reginfo_x86_64 -#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_x86_64 -#define vapa_cp_reginfo vapa_cp_reginfo_x86_64 -#define vbar_write vbar_write_x86_64 -#define vfp_exceptbits_from_host vfp_exceptbits_from_host_x86_64 -#define vfp_exceptbits_to_host vfp_exceptbits_to_host_x86_64 -#define vfp_get_fpcr vfp_get_fpcr_x86_64 -#define vfp_get_fpscr vfp_get_fpscr_x86_64 -#define vfp_get_fpsr vfp_get_fpsr_x86_64 -#define vfp_reg_offset vfp_reg_offset_x86_64 -#define vfp_set_fpcr vfp_set_fpcr_x86_64 -#define vfp_set_fpscr vfp_set_fpscr_x86_64 -#define vfp_set_fpsr vfp_set_fpsr_x86_64 -#define visit_end_implicit_struct visit_end_implicit_struct_x86_64 -#define visit_end_list visit_end_list_x86_64 -#define visit_end_struct visit_end_struct_x86_64 -#define visit_end_union visit_end_union_x86_64 -#define visit_get_next_type visit_get_next_type_x86_64 -#define visit_next_list visit_next_list_x86_64 -#define visit_optional visit_optional_x86_64 -#define visit_start_implicit_struct visit_start_implicit_struct_x86_64 -#define visit_start_list visit_start_list_x86_64 -#define visit_start_struct visit_start_struct_x86_64 -#define visit_start_union visit_start_union_x86_64 -#define vmsa_cp_reginfo vmsa_cp_reginfo_x86_64 -#define vmsa_tcr_el1_write vmsa_tcr_el1_write_x86_64 -#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_x86_64 -#define vmsa_ttbcr_reset vmsa_ttbcr_reset_x86_64 -#define vmsa_ttbcr_write vmsa_ttbcr_write_x86_64 -#define vmsa_ttbr_write vmsa_ttbr_write_x86_64 -#define write_cpustate_to_list write_cpustate_to_list_x86_64 -#define write_list_to_cpustate write_list_to_cpustate_x86_64 -#define write_raw_cp_reg write_raw_cp_reg_x86_64 -#define X86CPURegister32_lookup X86CPURegister32_lookup_x86_64 -#define x86_op_defs x86_op_defs_x86_64 -#define xpsr_read xpsr_read_x86_64 -#define xpsr_write xpsr_write_x86_64 -#define xscale_cpar_write xscale_cpar_write_x86_64 -#define xscale_cp_reginfo xscale_cp_reginfo_x86_64 +#define floatx80_infinity floatx80_infinity_x86_64 +#define dup_const_func dup_const_func_x86_64 +#define gen_helper_raise_exception gen_helper_raise_exception_x86_64 +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_x86_64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_x86_64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_x86_64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_x86_64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_x86_64 +#define cpu_get_tsc cpu_get_tsc_x86_64 +#define x86_cpu_get_memory_mapping x86_cpu_get_memory_mapping_x86_64 +#define cpu_x86_update_dr7 cpu_x86_update_dr7_x86_64 +#define breakpoint_handler breakpoint_handler_x86_64 +#define helper_single_step helper_single_step_x86_64 +#define helper_rechecking_single_step helper_rechecking_single_step_x86_64 +#define helper_set_dr helper_set_dr_x86_64 +#define helper_get_dr helper_get_dr_x86_64 +#define helper_bpt_io helper_bpt_io_x86_64 +#define helper_cc_compute_all helper_cc_compute_all_x86_64 +#define cpu_cc_compute_all cpu_cc_compute_all_x86_64 +#define helper_cc_compute_c helper_cc_compute_c_x86_64 +#define helper_write_eflags helper_write_eflags_x86_64 +#define helper_read_eflags helper_read_eflags_x86_64 +#define helper_clts helper_clts_x86_64 +#define helper_reset_rf helper_reset_rf_x86_64 +#define helper_cli helper_cli_x86_64 +#define helper_sti helper_sti_x86_64 +#define helper_clac helper_clac_x86_64 +#define helper_stac helper_stac_x86_64 +#define get_register_name_32 get_register_name_32_x86_64 +#define host_cpuid host_cpuid_x86_64 +#define host_vendor_fms host_vendor_fms_x86_64 +#define x86_cpu_set_default_version x86_cpu_set_default_version_x86_64 +#define cpu_clear_apic_feature cpu_clear_apic_feature_x86_64 +#define cpu_x86_cpuid cpu_x86_cpuid_x86_64 +#define x86_cpu_pending_interrupt x86_cpu_pending_interrupt_x86_64 +#define x86_update_hflags x86_update_hflags_x86_64 +#define cpu_x86_init cpu_x86_init_x86_64 +#define helper_raise_interrupt helper_raise_interrupt_x86_64 +#define helper_raise_exception helper_raise_exception_x86_64 +#define raise_interrupt raise_interrupt_x86_64 +#define raise_exception_err raise_exception_err_x86_64 +#define raise_exception_err_ra raise_exception_err_ra_x86_64 +#define raise_exception raise_exception_x86_64 +#define raise_exception_ra raise_exception_ra_x86_64 +#define x86_cpu_tlb_fill x86_cpu_tlb_fill_x86_64 +#define cpu_set_ignne cpu_set_ignne_x86_64 +#define helper_flds_FT0 helper_flds_FT0_x86_64 +#define helper_fldl_FT0 helper_fldl_FT0_x86_64 +#define helper_fildl_FT0 helper_fildl_FT0_x86_64 +#define helper_flds_ST0 helper_flds_ST0_x86_64 +#define helper_fldl_ST0 helper_fldl_ST0_x86_64 +#define helper_fildl_ST0 helper_fildl_ST0_x86_64 +#define helper_fildll_ST0 helper_fildll_ST0_x86_64 +#define helper_fsts_ST0 helper_fsts_ST0_x86_64 +#define helper_fstl_ST0 helper_fstl_ST0_x86_64 +#define helper_fist_ST0 helper_fist_ST0_x86_64 +#define helper_fistl_ST0 helper_fistl_ST0_x86_64 +#define helper_fistll_ST0 helper_fistll_ST0_x86_64 +#define helper_fistt_ST0 helper_fistt_ST0_x86_64 +#define helper_fisttl_ST0 helper_fisttl_ST0_x86_64 +#define helper_fisttll_ST0 helper_fisttll_ST0_x86_64 +#define helper_fldt_ST0 helper_fldt_ST0_x86_64 +#define helper_fstt_ST0 helper_fstt_ST0_x86_64 +#define helper_fpush helper_fpush_x86_64 +#define helper_fpop helper_fpop_x86_64 +#define helper_fdecstp helper_fdecstp_x86_64 +#define helper_fincstp helper_fincstp_x86_64 +#define helper_ffree_STN helper_ffree_STN_x86_64 +#define helper_fmov_ST0_FT0 helper_fmov_ST0_FT0_x86_64 +#define helper_fmov_FT0_STN helper_fmov_FT0_STN_x86_64 +#define helper_fmov_ST0_STN helper_fmov_ST0_STN_x86_64 +#define helper_fmov_STN_ST0 helper_fmov_STN_ST0_x86_64 +#define helper_fxchg_ST0_STN helper_fxchg_ST0_STN_x86_64 +#define helper_fcom_ST0_FT0 helper_fcom_ST0_FT0_x86_64 +#define helper_fucom_ST0_FT0 helper_fucom_ST0_FT0_x86_64 +#define helper_fcomi_ST0_FT0 helper_fcomi_ST0_FT0_x86_64 +#define helper_fucomi_ST0_FT0 helper_fucomi_ST0_FT0_x86_64 +#define helper_fadd_ST0_FT0 helper_fadd_ST0_FT0_x86_64 +#define helper_fmul_ST0_FT0 helper_fmul_ST0_FT0_x86_64 +#define helper_fsub_ST0_FT0 helper_fsub_ST0_FT0_x86_64 +#define helper_fsubr_ST0_FT0 helper_fsubr_ST0_FT0_x86_64 +#define helper_fdiv_ST0_FT0 helper_fdiv_ST0_FT0_x86_64 +#define helper_fdivr_ST0_FT0 helper_fdivr_ST0_FT0_x86_64 +#define helper_fadd_STN_ST0 helper_fadd_STN_ST0_x86_64 +#define helper_fmul_STN_ST0 helper_fmul_STN_ST0_x86_64 +#define helper_fsub_STN_ST0 helper_fsub_STN_ST0_x86_64 +#define helper_fsubr_STN_ST0 helper_fsubr_STN_ST0_x86_64 +#define helper_fdiv_STN_ST0 helper_fdiv_STN_ST0_x86_64 +#define helper_fdivr_STN_ST0 helper_fdivr_STN_ST0_x86_64 +#define helper_fchs_ST0 helper_fchs_ST0_x86_64 +#define helper_fabs_ST0 helper_fabs_ST0_x86_64 +#define helper_fld1_ST0 helper_fld1_ST0_x86_64 +#define helper_fldl2t_ST0 helper_fldl2t_ST0_x86_64 +#define helper_fldl2e_ST0 helper_fldl2e_ST0_x86_64 +#define helper_fldpi_ST0 helper_fldpi_ST0_x86_64 +#define helper_fldlg2_ST0 helper_fldlg2_ST0_x86_64 +#define helper_fldln2_ST0 helper_fldln2_ST0_x86_64 +#define helper_fldz_ST0 helper_fldz_ST0_x86_64 +#define helper_fldz_FT0 helper_fldz_FT0_x86_64 +#define helper_fnstsw helper_fnstsw_x86_64 +#define helper_fnstcw helper_fnstcw_x86_64 +#define update_fp_status update_fp_status_x86_64 +#define helper_fldcw helper_fldcw_x86_64 +#define helper_fclex helper_fclex_x86_64 +#define helper_fwait helper_fwait_x86_64 +#define helper_fninit helper_fninit_x86_64 +#define helper_fbld_ST0 helper_fbld_ST0_x86_64 +#define helper_fbst_ST0 helper_fbst_ST0_x86_64 +#define helper_f2xm1 helper_f2xm1_x86_64 +#define helper_fyl2x helper_fyl2x_x86_64 +#define helper_fptan helper_fptan_x86_64 +#define helper_fpatan helper_fpatan_x86_64 +#define helper_fxtract helper_fxtract_x86_64 +#define helper_fprem1 helper_fprem1_x86_64 +#define helper_fprem helper_fprem_x86_64 +#define helper_fyl2xp1 helper_fyl2xp1_x86_64 +#define helper_fsqrt helper_fsqrt_x86_64 +#define helper_fsincos helper_fsincos_x86_64 +#define helper_frndint helper_frndint_x86_64 +#define helper_fscale helper_fscale_x86_64 +#define helper_fsin helper_fsin_x86_64 +#define helper_fcos helper_fcos_x86_64 +#define helper_fxam_ST0 helper_fxam_ST0_x86_64 +#define helper_fstenv helper_fstenv_x86_64 +#define helper_fldenv helper_fldenv_x86_64 +#define helper_fsave helper_fsave_x86_64 +#define helper_frstor helper_frstor_x86_64 +#define helper_fxsave helper_fxsave_x86_64 +#define helper_xsave helper_xsave_x86_64 +#define helper_xsaveopt helper_xsaveopt_x86_64 +#define helper_fxrstor helper_fxrstor_x86_64 +#define helper_xrstor helper_xrstor_x86_64 +#define helper_xgetbv helper_xgetbv_x86_64 +#define helper_xsetbv helper_xsetbv_x86_64 +#define update_mxcsr_status update_mxcsr_status_x86_64 +#define helper_ldmxcsr helper_ldmxcsr_x86_64 +#define helper_enter_mmx helper_enter_mmx_x86_64 +#define helper_emms helper_emms_x86_64 +#define helper_movq helper_movq_x86_64 +#define helper_psrlw_mmx helper_psrlw_mmx_x86_64 +#define helper_psraw_mmx helper_psraw_mmx_x86_64 +#define helper_psllw_mmx helper_psllw_mmx_x86_64 +#define helper_psrld_mmx helper_psrld_mmx_x86_64 +#define helper_psrad_mmx helper_psrad_mmx_x86_64 +#define helper_pslld_mmx helper_pslld_mmx_x86_64 +#define helper_psrlq_mmx helper_psrlq_mmx_x86_64 +#define helper_psllq_mmx helper_psllq_mmx_x86_64 +#define helper_paddb_mmx helper_paddb_mmx_x86_64 +#define helper_paddw_mmx helper_paddw_mmx_x86_64 +#define helper_paddl_mmx helper_paddl_mmx_x86_64 +#define helper_paddq_mmx helper_paddq_mmx_x86_64 +#define helper_psubb_mmx helper_psubb_mmx_x86_64 +#define helper_psubw_mmx helper_psubw_mmx_x86_64 +#define helper_psubl_mmx helper_psubl_mmx_x86_64 +#define helper_psubq_mmx helper_psubq_mmx_x86_64 +#define helper_paddusb_mmx helper_paddusb_mmx_x86_64 +#define helper_paddsb_mmx helper_paddsb_mmx_x86_64 +#define helper_psubusb_mmx helper_psubusb_mmx_x86_64 +#define helper_psubsb_mmx helper_psubsb_mmx_x86_64 +#define helper_paddusw_mmx helper_paddusw_mmx_x86_64 +#define helper_paddsw_mmx helper_paddsw_mmx_x86_64 +#define helper_psubusw_mmx helper_psubusw_mmx_x86_64 +#define helper_psubsw_mmx helper_psubsw_mmx_x86_64 +#define helper_pminub_mmx helper_pminub_mmx_x86_64 +#define helper_pmaxub_mmx helper_pmaxub_mmx_x86_64 +#define helper_pminsw_mmx helper_pminsw_mmx_x86_64 +#define helper_pmaxsw_mmx helper_pmaxsw_mmx_x86_64 +#define helper_pand_mmx helper_pand_mmx_x86_64 +#define helper_pandn_mmx helper_pandn_mmx_x86_64 +#define helper_por_mmx helper_por_mmx_x86_64 +#define helper_pxor_mmx helper_pxor_mmx_x86_64 +#define helper_pcmpgtb_mmx helper_pcmpgtb_mmx_x86_64 +#define helper_pcmpgtw_mmx helper_pcmpgtw_mmx_x86_64 +#define helper_pcmpgtl_mmx helper_pcmpgtl_mmx_x86_64 +#define helper_pcmpeqb_mmx helper_pcmpeqb_mmx_x86_64 +#define helper_pcmpeqw_mmx helper_pcmpeqw_mmx_x86_64 +#define helper_pcmpeql_mmx helper_pcmpeql_mmx_x86_64 +#define helper_pmullw_mmx helper_pmullw_mmx_x86_64 +#define helper_pmulhrw_mmx helper_pmulhrw_mmx_x86_64 +#define helper_pmulhuw_mmx helper_pmulhuw_mmx_x86_64 +#define helper_pmulhw_mmx helper_pmulhw_mmx_x86_64 +#define helper_pavgb_mmx helper_pavgb_mmx_x86_64 +#define helper_pavgw_mmx helper_pavgw_mmx_x86_64 +#define helper_pmuludq_mmx helper_pmuludq_mmx_x86_64 +#define helper_pmaddwd_mmx helper_pmaddwd_mmx_x86_64 +#define helper_psadbw_mmx helper_psadbw_mmx_x86_64 +#define helper_maskmov_mmx helper_maskmov_mmx_x86_64 +#define helper_movl_mm_T0_mmx helper_movl_mm_T0_mmx_x86_64 +#define helper_movq_mm_T0_mmx helper_movq_mm_T0_mmx_x86_64 +#define helper_pshufw_mmx helper_pshufw_mmx_x86_64 +#define helper_pmovmskb_mmx helper_pmovmskb_mmx_x86_64 +#define helper_packsswb_mmx helper_packsswb_mmx_x86_64 +#define helper_packuswb_mmx helper_packuswb_mmx_x86_64 +#define helper_packssdw_mmx helper_packssdw_mmx_x86_64 +#define helper_punpcklbw_mmx helper_punpcklbw_mmx_x86_64 +#define helper_punpcklwd_mmx helper_punpcklwd_mmx_x86_64 +#define helper_punpckldq_mmx helper_punpckldq_mmx_x86_64 +#define helper_punpckhbw_mmx helper_punpckhbw_mmx_x86_64 +#define helper_punpckhwd_mmx helper_punpckhwd_mmx_x86_64 +#define helper_punpckhdq_mmx helper_punpckhdq_mmx_x86_64 +#define helper_pi2fd helper_pi2fd_x86_64 +#define helper_pi2fw helper_pi2fw_x86_64 +#define helper_pf2id helper_pf2id_x86_64 +#define helper_pf2iw helper_pf2iw_x86_64 +#define helper_pfacc helper_pfacc_x86_64 +#define helper_pfadd helper_pfadd_x86_64 +#define helper_pfcmpeq helper_pfcmpeq_x86_64 +#define helper_pfcmpge helper_pfcmpge_x86_64 +#define helper_pfcmpgt helper_pfcmpgt_x86_64 +#define helper_pfmax helper_pfmax_x86_64 +#define helper_pfmin helper_pfmin_x86_64 +#define helper_pfmul helper_pfmul_x86_64 +#define helper_pfnacc helper_pfnacc_x86_64 +#define helper_pfpnacc helper_pfpnacc_x86_64 +#define helper_pfrcp helper_pfrcp_x86_64 +#define helper_pfrsqrt helper_pfrsqrt_x86_64 +#define helper_pfsub helper_pfsub_x86_64 +#define helper_pfsubr helper_pfsubr_x86_64 +#define helper_pswapd helper_pswapd_x86_64 +#define helper_pshufb_mmx helper_pshufb_mmx_x86_64 +#define helper_phaddw_mmx helper_phaddw_mmx_x86_64 +#define helper_phaddd_mmx helper_phaddd_mmx_x86_64 +#define helper_phaddsw_mmx helper_phaddsw_mmx_x86_64 +#define helper_pmaddubsw_mmx helper_pmaddubsw_mmx_x86_64 +#define helper_phsubw_mmx helper_phsubw_mmx_x86_64 +#define helper_phsubd_mmx helper_phsubd_mmx_x86_64 +#define helper_phsubsw_mmx helper_phsubsw_mmx_x86_64 +#define helper_pabsb_mmx helper_pabsb_mmx_x86_64 +#define helper_pabsw_mmx helper_pabsw_mmx_x86_64 +#define helper_pabsd_mmx helper_pabsd_mmx_x86_64 +#define helper_pmulhrsw_mmx helper_pmulhrsw_mmx_x86_64 +#define helper_psignb_mmx helper_psignb_mmx_x86_64 +#define helper_psignw_mmx helper_psignw_mmx_x86_64 +#define helper_psignd_mmx helper_psignd_mmx_x86_64 +#define helper_palignr_mmx helper_palignr_mmx_x86_64 +#define helper_psrlw_xmm helper_psrlw_xmm_x86_64 +#define helper_psraw_xmm helper_psraw_xmm_x86_64 +#define helper_psllw_xmm helper_psllw_xmm_x86_64 +#define helper_psrld_xmm helper_psrld_xmm_x86_64 +#define helper_psrad_xmm helper_psrad_xmm_x86_64 +#define helper_pslld_xmm helper_pslld_xmm_x86_64 +#define helper_psrlq_xmm helper_psrlq_xmm_x86_64 +#define helper_psllq_xmm helper_psllq_xmm_x86_64 +#define helper_psrldq_xmm helper_psrldq_xmm_x86_64 +#define helper_pslldq_xmm helper_pslldq_xmm_x86_64 +#define helper_paddb_xmm helper_paddb_xmm_x86_64 +#define helper_paddw_xmm helper_paddw_xmm_x86_64 +#define helper_paddl_xmm helper_paddl_xmm_x86_64 +#define helper_paddq_xmm helper_paddq_xmm_x86_64 +#define helper_psubb_xmm helper_psubb_xmm_x86_64 +#define helper_psubw_xmm helper_psubw_xmm_x86_64 +#define helper_psubl_xmm helper_psubl_xmm_x86_64 +#define helper_psubq_xmm helper_psubq_xmm_x86_64 +#define helper_paddusb_xmm helper_paddusb_xmm_x86_64 +#define helper_paddsb_xmm helper_paddsb_xmm_x86_64 +#define helper_psubusb_xmm helper_psubusb_xmm_x86_64 +#define helper_psubsb_xmm helper_psubsb_xmm_x86_64 +#define helper_paddusw_xmm helper_paddusw_xmm_x86_64 +#define helper_paddsw_xmm helper_paddsw_xmm_x86_64 +#define helper_psubusw_xmm helper_psubusw_xmm_x86_64 +#define helper_psubsw_xmm helper_psubsw_xmm_x86_64 +#define helper_pminub_xmm helper_pminub_xmm_x86_64 +#define helper_pmaxub_xmm helper_pmaxub_xmm_x86_64 +#define helper_pminsw_xmm helper_pminsw_xmm_x86_64 +#define helper_pmaxsw_xmm helper_pmaxsw_xmm_x86_64 +#define helper_pand_xmm helper_pand_xmm_x86_64 +#define helper_pandn_xmm helper_pandn_xmm_x86_64 +#define helper_por_xmm helper_por_xmm_x86_64 +#define helper_pxor_xmm helper_pxor_xmm_x86_64 +#define helper_pcmpgtb_xmm helper_pcmpgtb_xmm_x86_64 +#define helper_pcmpgtw_xmm helper_pcmpgtw_xmm_x86_64 +#define helper_pcmpgtl_xmm helper_pcmpgtl_xmm_x86_64 +#define helper_pcmpeqb_xmm helper_pcmpeqb_xmm_x86_64 +#define helper_pcmpeqw_xmm helper_pcmpeqw_xmm_x86_64 +#define helper_pcmpeql_xmm helper_pcmpeql_xmm_x86_64 +#define helper_pmullw_xmm helper_pmullw_xmm_x86_64 +#define helper_pmulhuw_xmm helper_pmulhuw_xmm_x86_64 +#define helper_pmulhw_xmm helper_pmulhw_xmm_x86_64 +#define helper_pavgb_xmm helper_pavgb_xmm_x86_64 +#define helper_pavgw_xmm helper_pavgw_xmm_x86_64 +#define helper_pmuludq_xmm helper_pmuludq_xmm_x86_64 +#define helper_pmaddwd_xmm helper_pmaddwd_xmm_x86_64 +#define helper_psadbw_xmm helper_psadbw_xmm_x86_64 +#define helper_maskmov_xmm helper_maskmov_xmm_x86_64 +#define helper_movl_mm_T0_xmm helper_movl_mm_T0_xmm_x86_64 +#define helper_movq_mm_T0_xmm helper_movq_mm_T0_xmm_x86_64 +#define helper_shufps helper_shufps_x86_64 +#define helper_shufpd helper_shufpd_x86_64 +#define helper_pshufd_xmm helper_pshufd_xmm_x86_64 +#define helper_pshuflw_xmm helper_pshuflw_xmm_x86_64 +#define helper_pshufhw_xmm helper_pshufhw_xmm_x86_64 +#define helper_addps helper_addps_x86_64 +#define helper_addss helper_addss_x86_64 +#define helper_addpd helper_addpd_x86_64 +#define helper_addsd helper_addsd_x86_64 +#define helper_subps helper_subps_x86_64 +#define helper_subss helper_subss_x86_64 +#define helper_subpd helper_subpd_x86_64 +#define helper_subsd helper_subsd_x86_64 +#define helper_mulps helper_mulps_x86_64 +#define helper_mulss helper_mulss_x86_64 +#define helper_mulpd helper_mulpd_x86_64 +#define helper_mulsd helper_mulsd_x86_64 +#define helper_divps helper_divps_x86_64 +#define helper_divss helper_divss_x86_64 +#define helper_divpd helper_divpd_x86_64 +#define helper_divsd helper_divsd_x86_64 +#define helper_minps helper_minps_x86_64 +#define helper_minss helper_minss_x86_64 +#define helper_minpd helper_minpd_x86_64 +#define helper_minsd helper_minsd_x86_64 +#define helper_maxps helper_maxps_x86_64 +#define helper_maxss helper_maxss_x86_64 +#define helper_maxpd helper_maxpd_x86_64 +#define helper_maxsd helper_maxsd_x86_64 +#define helper_sqrtps helper_sqrtps_x86_64 +#define helper_sqrtss helper_sqrtss_x86_64 +#define helper_sqrtpd helper_sqrtpd_x86_64 +#define helper_sqrtsd helper_sqrtsd_x86_64 +#define helper_cvtps2pd helper_cvtps2pd_x86_64 +#define helper_cvtpd2ps helper_cvtpd2ps_x86_64 +#define helper_cvtss2sd helper_cvtss2sd_x86_64 +#define helper_cvtsd2ss helper_cvtsd2ss_x86_64 +#define helper_cvtdq2ps helper_cvtdq2ps_x86_64 +#define helper_cvtdq2pd helper_cvtdq2pd_x86_64 +#define helper_cvtpi2ps helper_cvtpi2ps_x86_64 +#define helper_cvtpi2pd helper_cvtpi2pd_x86_64 +#define helper_cvtsi2ss helper_cvtsi2ss_x86_64 +#define helper_cvtsi2sd helper_cvtsi2sd_x86_64 +#define helper_cvtsq2ss helper_cvtsq2ss_x86_64 +#define helper_cvtsq2sd helper_cvtsq2sd_x86_64 +#define helper_cvtps2dq helper_cvtps2dq_x86_64 +#define helper_cvtpd2dq helper_cvtpd2dq_x86_64 +#define helper_cvtps2pi helper_cvtps2pi_x86_64 +#define helper_cvtpd2pi helper_cvtpd2pi_x86_64 +#define helper_cvtss2si helper_cvtss2si_x86_64 +#define helper_cvtsd2si helper_cvtsd2si_x86_64 +#define helper_cvtss2sq helper_cvtss2sq_x86_64 +#define helper_cvtsd2sq helper_cvtsd2sq_x86_64 +#define helper_cvttps2dq helper_cvttps2dq_x86_64 +#define helper_cvttpd2dq helper_cvttpd2dq_x86_64 +#define helper_cvttps2pi helper_cvttps2pi_x86_64 +#define helper_cvttpd2pi helper_cvttpd2pi_x86_64 +#define helper_cvttss2si helper_cvttss2si_x86_64 +#define helper_cvttsd2si helper_cvttsd2si_x86_64 +#define helper_cvttss2sq helper_cvttss2sq_x86_64 +#define helper_cvttsd2sq helper_cvttsd2sq_x86_64 +#define helper_rsqrtps helper_rsqrtps_x86_64 +#define helper_rsqrtss helper_rsqrtss_x86_64 +#define helper_rcpps helper_rcpps_x86_64 +#define helper_rcpss helper_rcpss_x86_64 +#define helper_extrq_r helper_extrq_r_x86_64 +#define helper_extrq_i helper_extrq_i_x86_64 +#define helper_insertq_r helper_insertq_r_x86_64 +#define helper_insertq_i helper_insertq_i_x86_64 +#define helper_haddps helper_haddps_x86_64 +#define helper_haddpd helper_haddpd_x86_64 +#define helper_hsubps helper_hsubps_x86_64 +#define helper_hsubpd helper_hsubpd_x86_64 +#define helper_addsubps helper_addsubps_x86_64 +#define helper_addsubpd helper_addsubpd_x86_64 +#define helper_cmpeqps helper_cmpeqps_x86_64 +#define helper_cmpeqss helper_cmpeqss_x86_64 +#define helper_cmpeqpd helper_cmpeqpd_x86_64 +#define helper_cmpeqsd helper_cmpeqsd_x86_64 +#define helper_cmpltps helper_cmpltps_x86_64 +#define helper_cmpltss helper_cmpltss_x86_64 +#define helper_cmpltpd helper_cmpltpd_x86_64 +#define helper_cmpltsd helper_cmpltsd_x86_64 +#define helper_cmpleps helper_cmpleps_x86_64 +#define helper_cmpless helper_cmpless_x86_64 +#define helper_cmplepd helper_cmplepd_x86_64 +#define helper_cmplesd helper_cmplesd_x86_64 +#define helper_cmpunordps helper_cmpunordps_x86_64 +#define helper_cmpunordss helper_cmpunordss_x86_64 +#define helper_cmpunordpd helper_cmpunordpd_x86_64 +#define helper_cmpunordsd helper_cmpunordsd_x86_64 +#define helper_cmpneqps helper_cmpneqps_x86_64 +#define helper_cmpneqss helper_cmpneqss_x86_64 +#define helper_cmpneqpd helper_cmpneqpd_x86_64 +#define helper_cmpneqsd helper_cmpneqsd_x86_64 +#define helper_cmpnltps helper_cmpnltps_x86_64 +#define helper_cmpnltss helper_cmpnltss_x86_64 +#define helper_cmpnltpd helper_cmpnltpd_x86_64 +#define helper_cmpnltsd helper_cmpnltsd_x86_64 +#define helper_cmpnleps helper_cmpnleps_x86_64 +#define helper_cmpnless helper_cmpnless_x86_64 +#define helper_cmpnlepd helper_cmpnlepd_x86_64 +#define helper_cmpnlesd helper_cmpnlesd_x86_64 +#define helper_cmpordps helper_cmpordps_x86_64 +#define helper_cmpordss helper_cmpordss_x86_64 +#define helper_cmpordpd helper_cmpordpd_x86_64 +#define helper_cmpordsd helper_cmpordsd_x86_64 +#define helper_ucomiss helper_ucomiss_x86_64 +#define helper_comiss helper_comiss_x86_64 +#define helper_ucomisd helper_ucomisd_x86_64 +#define helper_comisd helper_comisd_x86_64 +#define helper_movmskps helper_movmskps_x86_64 +#define helper_movmskpd helper_movmskpd_x86_64 +#define helper_pmovmskb_xmm helper_pmovmskb_xmm_x86_64 +#define helper_packsswb_xmm helper_packsswb_xmm_x86_64 +#define helper_packuswb_xmm helper_packuswb_xmm_x86_64 +#define helper_packssdw_xmm helper_packssdw_xmm_x86_64 +#define helper_punpcklbw_xmm helper_punpcklbw_xmm_x86_64 +#define helper_punpcklwd_xmm helper_punpcklwd_xmm_x86_64 +#define helper_punpckldq_xmm helper_punpckldq_xmm_x86_64 +#define helper_punpcklqdq_xmm helper_punpcklqdq_xmm_x86_64 +#define helper_punpckhbw_xmm helper_punpckhbw_xmm_x86_64 +#define helper_punpckhwd_xmm helper_punpckhwd_xmm_x86_64 +#define helper_punpckhdq_xmm helper_punpckhdq_xmm_x86_64 +#define helper_punpckhqdq_xmm helper_punpckhqdq_xmm_x86_64 +#define helper_pshufb_xmm helper_pshufb_xmm_x86_64 +#define helper_phaddw_xmm helper_phaddw_xmm_x86_64 +#define helper_phaddd_xmm helper_phaddd_xmm_x86_64 +#define helper_phaddsw_xmm helper_phaddsw_xmm_x86_64 +#define helper_pmaddubsw_xmm helper_pmaddubsw_xmm_x86_64 +#define helper_phsubw_xmm helper_phsubw_xmm_x86_64 +#define helper_phsubd_xmm helper_phsubd_xmm_x86_64 +#define helper_phsubsw_xmm helper_phsubsw_xmm_x86_64 +#define helper_pabsb_xmm helper_pabsb_xmm_x86_64 +#define helper_pabsw_xmm helper_pabsw_xmm_x86_64 +#define helper_pabsd_xmm helper_pabsd_xmm_x86_64 +#define helper_pmulhrsw_xmm helper_pmulhrsw_xmm_x86_64 +#define helper_psignb_xmm helper_psignb_xmm_x86_64 +#define helper_psignw_xmm helper_psignw_xmm_x86_64 +#define helper_psignd_xmm helper_psignd_xmm_x86_64 +#define helper_palignr_xmm helper_palignr_xmm_x86_64 +#define helper_pblendvb_xmm helper_pblendvb_xmm_x86_64 +#define helper_blendvps_xmm helper_blendvps_xmm_x86_64 +#define helper_blendvpd_xmm helper_blendvpd_xmm_x86_64 +#define helper_ptest_xmm helper_ptest_xmm_x86_64 +#define helper_pmovsxbw_xmm helper_pmovsxbw_xmm_x86_64 +#define helper_pmovsxbd_xmm helper_pmovsxbd_xmm_x86_64 +#define helper_pmovsxbq_xmm helper_pmovsxbq_xmm_x86_64 +#define helper_pmovsxwd_xmm helper_pmovsxwd_xmm_x86_64 +#define helper_pmovsxwq_xmm helper_pmovsxwq_xmm_x86_64 +#define helper_pmovsxdq_xmm helper_pmovsxdq_xmm_x86_64 +#define helper_pmovzxbw_xmm helper_pmovzxbw_xmm_x86_64 +#define helper_pmovzxbd_xmm helper_pmovzxbd_xmm_x86_64 +#define helper_pmovzxbq_xmm helper_pmovzxbq_xmm_x86_64 +#define helper_pmovzxwd_xmm helper_pmovzxwd_xmm_x86_64 +#define helper_pmovzxwq_xmm helper_pmovzxwq_xmm_x86_64 +#define helper_pmovzxdq_xmm helper_pmovzxdq_xmm_x86_64 +#define helper_pmuldq_xmm helper_pmuldq_xmm_x86_64 +#define helper_pcmpeqq_xmm helper_pcmpeqq_xmm_x86_64 +#define helper_packusdw_xmm helper_packusdw_xmm_x86_64 +#define helper_pminsb_xmm helper_pminsb_xmm_x86_64 +#define helper_pminsd_xmm helper_pminsd_xmm_x86_64 +#define helper_pminuw_xmm helper_pminuw_xmm_x86_64 +#define helper_pminud_xmm helper_pminud_xmm_x86_64 +#define helper_pmaxsb_xmm helper_pmaxsb_xmm_x86_64 +#define helper_pmaxsd_xmm helper_pmaxsd_xmm_x86_64 +#define helper_pmaxuw_xmm helper_pmaxuw_xmm_x86_64 +#define helper_pmaxud_xmm helper_pmaxud_xmm_x86_64 +#define helper_pmulld_xmm helper_pmulld_xmm_x86_64 +#define helper_phminposuw_xmm helper_phminposuw_xmm_x86_64 +#define helper_roundps_xmm helper_roundps_xmm_x86_64 +#define helper_roundpd_xmm helper_roundpd_xmm_x86_64 +#define helper_roundss_xmm helper_roundss_xmm_x86_64 +#define helper_roundsd_xmm helper_roundsd_xmm_x86_64 +#define helper_blendps_xmm helper_blendps_xmm_x86_64 +#define helper_blendpd_xmm helper_blendpd_xmm_x86_64 +#define helper_pblendw_xmm helper_pblendw_xmm_x86_64 +#define helper_dpps_xmm helper_dpps_xmm_x86_64 +#define helper_dppd_xmm helper_dppd_xmm_x86_64 +#define helper_mpsadbw_xmm helper_mpsadbw_xmm_x86_64 +#define helper_pcmpgtq_xmm helper_pcmpgtq_xmm_x86_64 +#define helper_pcmpestri_xmm helper_pcmpestri_xmm_x86_64 +#define helper_pcmpestrm_xmm helper_pcmpestrm_xmm_x86_64 +#define helper_pcmpistri_xmm helper_pcmpistri_xmm_x86_64 +#define helper_pcmpistrm_xmm helper_pcmpistrm_xmm_x86_64 +#define helper_crc32 helper_crc32_x86_64 +#define helper_pclmulqdq_xmm helper_pclmulqdq_xmm_x86_64 +#define helper_aesdec_xmm helper_aesdec_xmm_x86_64 +#define helper_aesdeclast_xmm helper_aesdeclast_xmm_x86_64 +#define helper_aesenc_xmm helper_aesenc_xmm_x86_64 +#define helper_aesenclast_xmm helper_aesenclast_xmm_x86_64 +#define helper_aesimc_xmm helper_aesimc_xmm_x86_64 +#define helper_aeskeygenassist_xmm helper_aeskeygenassist_xmm_x86_64 +#define cpu_sync_bndcs_hflags cpu_sync_bndcs_hflags_x86_64 +#define cpu_x86_support_mca_broadcast cpu_x86_support_mca_broadcast_x86_64 +#define x86_cpu_set_a20 x86_cpu_set_a20_x86_64 +#define cpu_x86_update_cr0 cpu_x86_update_cr0_x86_64 +#define cpu_x86_update_cr3 cpu_x86_update_cr3_x86_64 +#define cpu_x86_update_cr4 cpu_x86_update_cr4_x86_64 +#define x86_cpu_get_phys_page_attrs_debug x86_cpu_get_phys_page_attrs_debug_x86_64 +#define cpu_x86_get_descr_debug cpu_x86_get_descr_debug_x86_64 +#define do_cpu_init do_cpu_init_x86_64 +#define do_cpu_sipi do_cpu_sipi_x86_64 +#define x86_cpu_exec_enter x86_cpu_exec_enter_x86_64 +#define x86_cpu_exec_exit x86_cpu_exec_exit_x86_64 +#define x86_ldub_phys x86_ldub_phys_x86_64 +#define x86_lduw_phys x86_lduw_phys_x86_64 +#define x86_ldl_phys x86_ldl_phys_x86_64 +#define x86_ldq_phys x86_ldq_phys_x86_64 +#define x86_stb_phys x86_stb_phys_x86_64 +#define x86_stl_phys_notdirty x86_stl_phys_notdirty_x86_64 +#define x86_stw_phys x86_stw_phys_x86_64 +#define x86_stl_phys x86_stl_phys_x86_64 +#define x86_stq_phys x86_stq_phys_x86_64 +#define helper_divb_AL helper_divb_AL_x86_64 +#define helper_idivb_AL helper_idivb_AL_x86_64 +#define helper_divw_AX helper_divw_AX_x86_64 +#define helper_idivw_AX helper_idivw_AX_x86_64 +#define helper_divl_EAX helper_divl_EAX_x86_64 +#define helper_idivl_EAX helper_idivl_EAX_x86_64 +#define helper_aam helper_aam_x86_64 +#define helper_aad helper_aad_x86_64 +#define helper_aaa helper_aaa_x86_64 +#define helper_aas helper_aas_x86_64 +#define helper_daa helper_daa_x86_64 +#define helper_das helper_das_x86_64 +#define helper_divq_EAX helper_divq_EAX_x86_64 +#define helper_idivq_EAX helper_idivq_EAX_x86_64 +#define helper_pdep helper_pdep_x86_64 +#define helper_pext helper_pext_x86_64 +#define helper_rclb helper_rclb_x86_64 +#define helper_rcrb helper_rcrb_x86_64 +#define helper_rclw helper_rclw_x86_64 +#define helper_rcrw helper_rcrw_x86_64 +#define helper_rcll helper_rcll_x86_64 +#define helper_rcrl helper_rcrl_x86_64 +#define helper_rclq helper_rclq_x86_64 +#define helper_rcrq helper_rcrq_x86_64 +#define helper_cr4_testbit helper_cr4_testbit_x86_64 +#define helper_rdrand helper_rdrand_x86_64 +#define helper_cmpxchg8b_unlocked helper_cmpxchg8b_unlocked_x86_64 +#define helper_cmpxchg8b helper_cmpxchg8b_x86_64 +#define helper_cmpxchg16b_unlocked helper_cmpxchg16b_unlocked_x86_64 +#define helper_cmpxchg16b helper_cmpxchg16b_x86_64 +#define helper_boundw helper_boundw_x86_64 +#define helper_boundl helper_boundl_x86_64 +#define helper_outb helper_outb_x86_64 +#define helper_inb helper_inb_x86_64 +#define helper_outw helper_outw_x86_64 +#define helper_inw helper_inw_x86_64 +#define helper_outl helper_outl_x86_64 +#define helper_inl helper_inl_x86_64 +#define helper_into helper_into_x86_64 +#define helper_cpuid helper_cpuid_x86_64 +#define helper_read_crN helper_read_crN_x86_64 +#define helper_write_crN helper_write_crN_x86_64 +#define helper_lmsw helper_lmsw_x86_64 +#define helper_invlpg helper_invlpg_x86_64 +#define helper_rdtsc helper_rdtsc_x86_64 +#define helper_rdtscp helper_rdtscp_x86_64 +#define helper_rdpmc helper_rdpmc_x86_64 +#define helper_wrmsr helper_wrmsr_x86_64 +#define helper_rdmsr helper_rdmsr_x86_64 +#define helper_hlt helper_hlt_x86_64 +#define helper_monitor helper_monitor_x86_64 +#define helper_mwait helper_mwait_x86_64 +#define helper_pause helper_pause_x86_64 +#define helper_debug helper_debug_x86_64 +#define helper_rdpkru helper_rdpkru_x86_64 +#define helper_wrpkru helper_wrpkru_x86_64 +#define helper_bndck helper_bndck_x86_64 +#define helper_bndldx64 helper_bndldx64_x86_64 +#define helper_bndldx32 helper_bndldx32_x86_64 +#define helper_bndstx64 helper_bndstx64_x86_64 +#define helper_bndstx32 helper_bndstx32_x86_64 +#define helper_bnd_jmp helper_bnd_jmp_x86_64 +#define helper_syscall helper_syscall_x86_64 +#define helper_sysret helper_sysret_x86_64 +#define x86_cpu_do_interrupt x86_cpu_do_interrupt_x86_64 +#define do_interrupt_x86_hardirq do_interrupt_x86_hardirq_x86_64 +#define x86_cpu_exec_interrupt x86_cpu_exec_interrupt_x86_64 +#define helper_lldt helper_lldt_x86_64 +#define helper_ltr helper_ltr_x86_64 +#define uc_check_cpu_x86_load_seg uc_check_cpu_x86_load_seg_x86_64 +#define helper_load_seg helper_load_seg_x86_64 +#define helper_ljmp_protected helper_ljmp_protected_x86_64 +#define helper_lcall_real helper_lcall_real_x86_64 +#define helper_lcall_protected helper_lcall_protected_x86_64 +#define helper_iret_real helper_iret_real_x86_64 +#define helper_iret_protected helper_iret_protected_x86_64 +#define helper_lret_protected helper_lret_protected_x86_64 +#define helper_sysenter helper_sysenter_x86_64 +#define helper_sysexit helper_sysexit_x86_64 +#define helper_lsl helper_lsl_x86_64 +#define helper_lar helper_lar_x86_64 +#define helper_verr helper_verr_x86_64 +#define helper_verw helper_verw_x86_64 +#define cpu_x86_load_seg cpu_x86_load_seg_x86_64 +#define helper_check_iob helper_check_iob_x86_64 +#define helper_check_iow helper_check_iow_x86_64 +#define helper_check_iol helper_check_iol_x86_64 +#define do_smm_enter do_smm_enter_x86_64 +#define helper_rsm helper_rsm_x86_64 +#define helper_vmrun helper_vmrun_x86_64 +#define helper_vmmcall helper_vmmcall_x86_64 +#define helper_vmload helper_vmload_x86_64 +#define helper_vmsave helper_vmsave_x86_64 +#define helper_stgi helper_stgi_x86_64 +#define helper_clgi helper_clgi_x86_64 +#define helper_skinit helper_skinit_x86_64 +#define helper_invlpga helper_invlpga_x86_64 +#define cpu_svm_check_intercept_param cpu_svm_check_intercept_param_x86_64 +#define helper_svm_check_intercept_param helper_svm_check_intercept_param_x86_64 +#define helper_svm_check_io helper_svm_check_io_x86_64 +#define cpu_vmexit cpu_vmexit_x86_64 +#define do_vmexit do_vmexit_x86_64 +#define tcg_x86_init tcg_x86_init_x86_64 +#define gen_intermediate_code gen_intermediate_code_x86_64 +#define restore_state_to_opc restore_state_to_opc_x86_64 +#define x86_cpu_xsave_all_areas x86_cpu_xsave_all_areas_x86_64 +#define x86_cpu_xrstor_all_areas x86_cpu_xrstor_all_areas_x86_64 +#define cpu_get_fp80 cpu_get_fp80_x86_64 +#define cpu_set_fp80 cpu_set_fp80_x86_64 +#define x86_reg_reset x86_reg_reset_x86_64 +#define x86_reg_read x86_reg_read_x86_64 +#define x86_reg_write x86_reg_write_x86_64 #endif diff --git a/samples/.gitignore b/samples/.gitignore deleted file mode 100644 index f53a0ab7..00000000 --- a/samples/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -!*.c -sample_* -shellcode* -mem_apis* - diff --git a/samples/mem_apis.c b/samples/mem_apis.c index 2ffdf6c3..767e6aec 100644 --- a/samples/mem_apis.c +++ b/samples/mem_apis.c @@ -337,25 +337,9 @@ static void unmap_test() int main(int argc, char **argv, char **envp) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - nx_test(); perms_test(); unmap_test(); - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - return 0; } diff --git a/samples/sample_all.sh b/samples/sample_all.sh index a94cafd5..026e0107 100755 --- a/samples/sample_all.sh +++ b/samples/sample_all.sh @@ -25,12 +25,10 @@ fi if test -e $DIR/sample_arm; then echo "==========================" $DIR/sample_arm - $DIR/sample_armeb fi if test -e $DIR/sample_arm64; then echo "==========================" $DIR/sample_arm64 - $DIR/sample_arm64eb fi if test -e $DIR/sample_mips; then echo "==========================" diff --git a/samples/sample_arm.c b/samples/sample_arm.c index dd92eb95..bda2d0b0 100644 --- a/samples/sample_arm.c +++ b/samples/sample_arm.c @@ -8,11 +8,16 @@ // code to be emulated -#define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, r2, r3 +// #define ARM_CODE "\x37\x00\xa0\xe3" // mov r0, #0x37 +#define ARM_CODE "\x00\xf0\x20\xe3" // nop +// #define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, r2, r3 #define THUMB_CODE "\x83\xb0" // sub sp, #0xc #define ARM_THUM_COND_CODE "\x9a\x42\x14\xbf\x68\x22\x4d\x22" // 'cmp r2, r3\nit ne\nmov r2, #0x68\nmov r2, #0x4d' +// code to be emulated +#define ARM_CODE_EB "\xe3\xa0\x00\x37\xe0\x42\x10\x03" // mov r0, #0x37; sub r1, r2, r3 +#define THUMB_CODE_EB "\xb0\x83" // sub sp, #0xc // memory address where emulation starts #define ADDRESS 0x10000 @@ -132,6 +137,164 @@ static void test_thumb(void) uc_close(uc); } +static void test_armeb(void) +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + int r0 = 0x1234; // R0 register + int r2 = 0x6789; // R1 register + int r3 = 0x3333; // R2 register + int r1; // R1 register + + printf("Emulate ARM Big-Endian code\n"); + + // Initialize emulator in ARM mode + err = uc_open(UC_ARCH_ARM, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, ARM_CODE_EB, sizeof(ARM_CODE_EB) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_ARM_REG_R0, &r0); + uc_reg_write(uc, UC_ARM_REG_R2, &r2); + uc_reg_write(uc, UC_ARM_REG_R3, &r3); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing one instruction at ADDRESS with customized callback + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE_EB) -1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_ARM_REG_R0, &r0); + uc_reg_read(uc, UC_ARM_REG_R1, &r1); + printf(">>> R0 = 0x%x\n", r0); + printf(">>> R1 = 0x%x\n", r1); + + uc_close(uc); +} + +static void test_thumbeb(void) +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + int sp = 0x1234; // R0 register + + printf("Emulate THUMB Big-Endian code\n"); + + // Initialize emulator in ARM mode + err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB + UC_MODE_BIG_ENDIAN, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, THUMB_CODE_EB, sizeof(THUMB_CODE_EB) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_ARM_REG_SP, &sp); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing one instruction at ADDRESS with customized callback + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + // Note we start at ADDRESS | 1 to indicate THUMB mode. + err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(THUMB_CODE_EB) -1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_ARM_REG_SP, &sp); + printf(">>> SP = 0x%x\n", sp); + + uc_close(uc); +} + +static void test_thumb_mrs(void) +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + int pc; + + printf("Emulate THUMB MRS instruction\n"); + // 0xf3ef8014 - mrs r0, control + + // Initialize emulator in ARM mode + err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_MCLASS, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, "\xef\xf3\x14\x80", 4); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing one instruction at ADDRESS with customized callback + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + + // Note we start at ADDRESS | 1 to indicate THUMB mode. + err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + 4, 0, 1); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_ARM_REG_PC, &pc); + printf(">>> PC = 0x%x\n", pc); + if (pc != ADDRESS + 4){ + printf("Error, PC was 0x%x, expected was 0x%x.\n", pc, ADDRESS + 4); + } + + uc_close(uc); +} + static void test_thumb_ite_internal(bool step, uint32_t *r2_out, uint32_t *r3_out) { uc_engine *uc; @@ -207,26 +370,22 @@ static void test_thumb_ite() int main(int argc, char **argv, char **envp) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - test_arm(); + printf("==========================\n"); test_thumb(); + + printf("==========================\n"); + test_armeb(); + + printf("==========================\n"); + test_thumbeb(); + + printf("==========================\n"); + test_thumb_mrs(); + printf("==========================\n"); test_thumb_ite(); - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - + return 0; } diff --git a/samples/sample_arm64.c b/samples/sample_arm64.c index 5a6cc333..74ea4e05 100644 --- a/samples/sample_arm64.c +++ b/samples/sample_arm64.c @@ -8,7 +8,9 @@ // code to be emulated -#define ARM_CODE "\xab\x05\x00\xb8\xaf\x05\x40\x38" // str w11, [x13]; ldrb w15, [x13] +#define ARM64_CODE "\xab\x05\x00\xb8\xaf\x05\x40\x38" // str w11, [x13], #0; ldrb w15, [x13], #0 +//#define ARM64_CODE_EB "\xb8\x00\x05\xab\x38\x40\x05\xaf" // str w11, [x13]; ldrb w15, [x13] +#define ARM64_CODE_EB ARM64_CODE // memory address where emulation starts #define ADDRESS 0x10000 @@ -23,6 +25,62 @@ static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); } +static void test_arm64_mem_fetch(void) +{ + uc_engine* uc; + uc_err err; + uint64_t x1, sp, x0; + // msr x0, CurrentEL + unsigned char shellcode0[4] = { + 64, 66, 56, 213 + }; + // .text:00000000004002C0 LDR X1, [SP,#arg_0] + unsigned char shellcode[4] = { + 0xE1, 0x03, 0x40, 0xF9 + }; + unsigned shellcode_address = 0x4002C0; + uint64_t data_address = 0x10000000000000; + + printf(">>> Emulate ARM64 fetching stack data from high address %"PRIx64"\n", data_address); + + // Initialize emulator in ARM mode + err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + uc_mem_map(uc, data_address, 0x30000, UC_PROT_ALL); + uc_mem_map(uc, 0x400000, 0x1000, UC_PROT_ALL); + + sp = data_address; + uc_reg_write(uc, UC_ARM64_REG_SP, &sp); + uc_mem_write(uc, data_address, "\xc8\xc8\xc8\xc8\xc8\xc8\xc8\xc8", 8); + uc_mem_write(uc, shellcode_address, shellcode0, 4); + uc_mem_write(uc, shellcode_address + 4, shellcode, 4); + + err = uc_emu_start(uc, shellcode_address, shellcode_address+4, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + x0 = 0; + uc_reg_read(uc, UC_ARM64_REG_X0, &x0); + printf(">>> x0(Exception Level)=%"PRIx64"\n", x0>>2); + + err = uc_emu_start(uc, shellcode_address+4, shellcode_address+8, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + uc_reg_read(uc, UC_ARM64_REG_X1, &x1); + + printf(">>> X1 = 0x%" PRIx64 "\n", x1); + + uc_close(uc); +} + static void test_arm64(void) { uc_engine *uc; @@ -47,7 +105,7 @@ static void test_arm64(void) uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); + uc_mem_write(uc, ADDRESS, ARM64_CODE, sizeof(ARM64_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM64_REG_X11, &x11); @@ -62,7 +120,7 @@ static void test_arm64(void) // emulate machine code in infinite time (last param = 0), or when // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, 0, 0); + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM64_CODE) -1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } @@ -77,25 +135,67 @@ static void test_arm64(void) uc_close(uc); } -int main(int argc, char **argv, char **envp) +static void test_arm64eb(void) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + int64_t x11 = 0x12345678; // X11 register + int64_t x13 = 0x10000 + 0x8; // X13 register + int64_t x15 = 0x33; // X15 register + + printf("Emulate ARM64 Big-Endian code\n"); + + // Initialize emulator in ARM mode + err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; } -#endif - + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, ARM64_CODE_EB, sizeof(ARM64_CODE_EB) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_ARM64_REG_X11, &x11); + uc_reg_write(uc, UC_ARM64_REG_X13, &x13); + uc_reg_write(uc, UC_ARM64_REG_X15, &x15); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing one instruction at ADDRESS with customized callback + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM64_CODE_EB) -1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + printf(">>> As big endian, X15 should be 0x78:\n"); + + uc_reg_read(uc, UC_ARM64_REG_X15, &x15); + printf(">>> X15 = 0x%" PRIx64 "\n", x15); + + uc_close(uc); +} + +int main(int argc, char **argv, char **envp) +{ + test_arm64_mem_fetch(); test_arm64(); - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - + printf("-------------------------\n"); + test_arm64eb(); + return 0; } diff --git a/samples/sample_armeb.c b/samples/sample_armeb.c deleted file mode 100644 index b6da08e7..00000000 --- a/samples/sample_armeb.c +++ /dev/null @@ -1,156 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By zhangwm, 2017 */ - -/* Sample code to demonstrate how to emulate ARM Big Endian code */ - -#include -#include - - -// code to be emulated -#define ARM_CODE "\xe3\xa0\x00\x37\xe0\x42\x10\x03" // mov r0, #0x37; sub r1, r2, r3 -#define THUMB_CODE "\xb0\x83" // sub sp, #0xc - -// memory address where emulation starts -#define ADDRESS 0x10000 - -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); -} - -static void test_arm(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int r0 = 0x1234; // R0 register - int r2 = 0x6789; // R1 register - int r3 = 0x3333; // R2 register - int r1; // R1 register - - printf("Emulate ARM Big-Endian code\n"); - - // Initialize emulator in ARM mode - err = uc_open(UC_ARCH_ARM, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_ARM_REG_R0, &r0); - uc_reg_write(uc, UC_ARM_REG_R2, &r2); - uc_reg_write(uc, UC_ARM_REG_R3, &r3); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); - - // tracing one instruction at ADDRESS with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u\n", err); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_ARM_REG_R0, &r0); - uc_reg_read(uc, UC_ARM_REG_R1, &r1); - printf(">>> R0 = 0x%x\n", r0); - printf(">>> R1 = 0x%x\n", r1); - - uc_close(uc); -} - -static void test_thumb(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int sp = 0x1234; // R0 register - - printf("Emulate THUMB code\n"); - - // Initialize emulator in ARM mode - err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB + UC_MODE_BIG_ENDIAN, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, THUMB_CODE, sizeof(THUMB_CODE) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_ARM_REG_SP, &sp); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); - - // tracing one instruction at ADDRESS with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - // Note we start at ADDRESS | 1 to indicate THUMB mode. - err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(THUMB_CODE) -1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u\n", err); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_ARM_REG_SP, &sp); - printf(">>> SP = 0x%x\n", sp); - - uc_close(uc); -} - -int main(int argc, char **argv, char **envp) -{ - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - test_arm(); - printf("==========================\n"); - test_thumb(); - - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - - return 0; -} diff --git a/samples/sample_batch_reg.c b/samples/sample_batch_reg.c index 322c939e..b1b51b6d 100644 --- a/samples/sample_batch_reg.c +++ b/samples/sample_batch_reg.c @@ -110,5 +110,6 @@ int main() return 1; } + uc_close(uc); return 0; } diff --git a/samples/sample_m68k.c b/samples/sample_m68k.c index 26ac4c1b..49e47e5b 100644 --- a/samples/sample_m68k.c +++ b/samples/sample_m68k.c @@ -141,23 +141,7 @@ static void test_m68k(void) int main(int argc, char **argv, char **envp) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - test_m68k(); - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - return 0; } diff --git a/samples/sample_mips.c b/samples/sample_mips.c index b032ec05..ef26af11 100644 --- a/samples/sample_mips.c +++ b/samples/sample_mips.c @@ -125,24 +125,8 @@ static void test_mips_el(void) int main(int argc, char **argv, char **envp) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - test_mips_eb(); test_mips_el(); - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - return 0; } diff --git a/samples/sample_arm64eb.c b/samples/sample_ppc.c similarity index 51% rename from samples/sample_arm64eb.c rename to samples/sample_ppc.c index 1644ebbd..12dbbd3f 100644 --- a/samples/sample_arm64eb.c +++ b/samples/sample_ppc.c @@ -1,18 +1,19 @@ /* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ -/* modify from arm64 sample zhangwm, 2017 */ +/* By simigo79, 2020 */ -/* Sample code to demonstrate how to emulate ARM64EB code */ +/* Sample code to demonstrate how to emulate PPC code */ #include #include + // code to be emulated -#define ARM_CODE "\xab\x05\x00\xb8\xaf\x05\x40\x38" // str x11, [x13]; ldrb x15, [x13] +#define PPC_CODE "\x7F\x46\x1A\x14" // add r26, r6, r3 // memory address where emulation starts #define ADDRESS 0x10000 + static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); @@ -23,20 +24,20 @@ static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); } -static void test_arm64(void) +static void test_ppc(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; - int64_t x11 = 0x12345678; // X11 register - int64_t x13 = 0x10000 + 0x8; // X13 register - int64_t x15 = 0x33; // X15 register + int r3 = 0x1234; // R3 register + int r6 = 0x6789; // R6 register + int r26 = 0x8877; // R26 register (result) - printf("Emulate ARM64 Big-Endian code\n"); + printf("Emulate PPC code\n"); - // Initialize emulator in ARM mode - err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); + // Initialize emulator in PPC mode + err = uc_open(UC_ARCH_PPC, UC_MODE_PPC32 | UC_MODE_BIG_ENDIAN , &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); @@ -47,12 +48,12 @@ static void test_arm64(void) uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); + uc_mem_write(uc, ADDRESS, PPC_CODE, sizeof(PPC_CODE) - 1); // initialize machine registers - uc_reg_write(uc, UC_ARM64_REG_X11, &x11); - uc_reg_write(uc, UC_ARM64_REG_X13, &x13); - uc_reg_write(uc, UC_ARM64_REG_X15, &x15); + uc_reg_write(uc, UC_PPC_REG_3, &r3); + uc_reg_write(uc, UC_PPC_REG_6, &r6); + uc_reg_write(uc, UC_PPC_REG_26, &r26); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); @@ -62,40 +63,26 @@ static void test_arm64(void) // emulate machine code in infinite time (last param = 0), or when // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, 0, 0); + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(PPC_CODE) -1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); + return; } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); - printf(">>> As big endian, X15 should be 0x12:\n"); - uc_reg_read(uc, UC_ARM64_REG_X15, &x15); - printf(">>> X15 = 0x%" PRIx64 "\n", x15); + uc_reg_read(uc, UC_PPC_REG_26, &r26); + printf(">>> r26 = 0x%x\n", r26); + // close engine when done uc_close(uc); } + int main(int argc, char **argv, char **envp) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - test_arm64(); + test_ppc(); - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - return 0; } diff --git a/samples/sample_riscv.c b/samples/sample_riscv.c new file mode 100644 index 00000000..73bdf98d --- /dev/null +++ b/samples/sample_riscv.c @@ -0,0 +1,594 @@ +/* Unicorn Emulator Engine */ + +/* Sample code to demonstrate how to emulate RISCV code */ + +#include +#include + + +// code to be emulated +#if 0 +$ cstool riscv64 1305100093850502 + 0 13 05 10 00 addi a0, zero, 1 + 4 93 85 05 02 addi a1, a1, 0x20 +#endif +//#define RISCV_CODE "\x13\x05\x10\x00\x93\x85\x05\x02\x93\x85\x05\x02" +#define RISCV_CODE "\x13\x05\x10\x00\x93\x85\x05\x02" + + +// memory address where emulation starts +#define ADDRESS 0x10000 + +static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); +} + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); +} + +static void hook_code3(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); + if (address == ADDRESS) { + printf("stop emulation\n"); + uc_emu_stop(uc); + } +} + +static void test_riscv(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint32_t a0 = 0x1234; + uint32_t a1 = 0x7890; + + printf("Emulate RISCV code\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_RISCV_REG_A0, &a0); + uc_reg_write(uc, UC_RISCV_REG_A1, &a1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(RISCV_CODE) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_RISCV_REG_A0, &a0); + uc_reg_read(uc, UC_RISCV_REG_A1, &a1); + + printf(">>> A0 = 0x%x\n", a0); + printf(">>> A1 = 0x%x\n", a1); + + uc_close(uc); +} + +static void test_riscv2(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint32_t a0 = 0x1234; + uint32_t a1 = 0x7890; + + printf("Emulate RISCV code: split emulation\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_RISCV_REG_A0, &a0); + uc_reg_write(uc, UC_RISCV_REG_A1, &a1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + // emulate 1 instruction + err = uc_emu_start(uc, ADDRESS, ADDRESS + 4, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + uc_reg_read(uc, UC_RISCV_REG_A0, &a0); + uc_reg_read(uc, UC_RISCV_REG_A1, &a1); + + printf(">>> A0 = 0x%x\n", a0); + printf(">>> A1 = 0x%x\n", a1); + + // emulate one more instruction + err = uc_emu_start(uc, ADDRESS + 4, ADDRESS + 8, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_RISCV_REG_A0, &a0); + uc_reg_read(uc, UC_RISCV_REG_A1, &a1); + + printf(">>> A0 = 0x%x\n", a0); + printf(">>> A1 = 0x%x\n", a1); + + uc_close(uc); +} + +static void test_riscv3(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint32_t a0 = 0x1234; + uint32_t a1 = 0x7890; + + printf("Emulate RISCV code: early stop\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_RISCV_REG_A0, &a0); + uc_reg_write(uc, UC_RISCV_REG_A1, &a1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code3, NULL, 1, 0); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(RISCV_CODE) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_RISCV_REG_A0, &a0); + uc_reg_read(uc, UC_RISCV_REG_A1, &a1); + + printf(">>> A0 = 0x%x\n", a0); + printf(">>> A1 = 0x%x\n", a1); + + uc_close(uc); +} + +static void test_riscv_step(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint32_t a0 = 0x1234; + uint32_t a1 = 0x7890; + uint32_t pc = 0x0000; + + printf("Emulate RISCV code: step\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_RISCV_REG_A0, &a0); + uc_reg_write(uc, UC_RISCV_REG_A1, &a1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + // emulate 1 instruction + err = uc_emu_start(uc, ADDRESS, ADDRESS + 12, 0, 1); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + uc_reg_read(uc, UC_RISCV_REG_A0, &a0); + uc_reg_read(uc, UC_RISCV_REG_A1, &a1); + uc_reg_read(uc, UC_RISCV_REG_PC, &pc); + + printf(">>> A0 = 0x%x\n", a0); + printf(">>> A1 = 0x%x\n", a1); + + if (pc != 0x10004) { + printf("Error after step: PC is: 0x%x, expected was 0x10004\n", pc); + } + + // emulate one more instruction + err = uc_emu_start(uc, ADDRESS + 4, ADDRESS + 8, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_RISCV_REG_A0, &a0); + uc_reg_read(uc, UC_RISCV_REG_A1, &a1); + + printf(">>> A0 = 0x%x\n", a0); + printf(">>> A1 = 0x%x\n", a1); + + uc_close(uc); +} + +static void test_riscv_timeout(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint32_t a0 = 0x1234; + uint32_t a1 = 0x7890; + uint32_t pc = 0x0000; + + printf("Emulate RISCV code: timeout\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, "\x00\x00\x00\x00\x00\x00\x00\x00", 8); + + // initialize machine registers + uc_reg_write(uc, UC_RISCV_REG_A0, &a0); + uc_reg_write(uc, UC_RISCV_REG_A1, &a1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + // emulate 1 instruction with timeout + err = uc_emu_start(uc, ADDRESS, ADDRESS + 4, 1000, 1); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + uc_reg_read(uc, UC_RISCV_REG_PC, &pc); + + if (pc != 0x10000) { + printf("Error after step: PC is: 0x%x, expected was 0x10004\n", pc); + } + + // emulate 1 instruction with timeout + err = uc_emu_start(uc, ADDRESS, ADDRESS + 4, 1000, 1); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + uc_reg_read(uc, UC_RISCV_REG_PC, &pc); + + if (pc != 0x10000) { + printf("Error after step: PC is: 0x%x, expected was 0x10004\n", pc); + } + + // now print out some registers + printf(">>> Emulation done\n"); + + uc_close(uc); +} + +static void test_riscv_sd64(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint64_t reg; + + /* + 00813823 sd s0,16(sp) + 00000013 nop + */ +#define CODE64 "\x23\x38\x81\x00\x13\x00\x00\x00" + + printf("Emulate RISCV code: sd64 instruction\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, CODE64, sizeof(CODE64) - 1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + reg = ADDRESS + 0x100; + uc_reg_write(uc, UC_RISCV_REG_SP, ®); + + reg = 0x11223344; + uc_reg_write(uc, UC_RISCV_REG_S0, ®); + + // execute instruction + err = uc_emu_start(uc, 0x10000, -1, 0, 1); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done.\n"); + + uc_close(uc); +} + +static bool hook_memalloc(uc_engine *uc, uc_mem_type type, uint64_t address, + int size, int64_t value, void *user_data) +{ + uint64_t algined_address = address & 0xFFFFFFFFFFFFF000ULL; + int aligned_size = ((int)(size / 0x1000) + 1) * 0x1000; + + printf(">>> Allocating block at 0x%" PRIx64 " (0x%" PRIx64 "), block size = 0x%x (0x%x)\n", + address, algined_address, size, aligned_size); + + uc_mem_map(uc, algined_address, aligned_size, UC_PROT_ALL); + + // this recovers from missing memory, so we return true + return true; +} + +static void test_recover_from_illegal(void) +{ + uc_engine *uc; + uc_hook trace1, trace2, mem_alloc; + uc_err err; + uint64_t a0 = 0x1234; + uint64_t a1 = 0x7890; + + printf("Emulate RISCV code: recover_from_illegal\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return; + } + + uc_reg_write(uc, UC_RISCV_REG_A0, &a0); + uc_reg_write(uc, UC_RISCV_REG_A1, &a1); + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // auto-allocate memory on access + uc_hook_add(uc, &mem_alloc, UC_HOOK_MEM_UNMAPPED, hook_memalloc, NULL, 1, 0); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1); + + // emulate 1 instruction, wrong address, illegal code + err = uc_emu_start(uc, 0x1000, -1, 0, 1); + if (err != UC_ERR_INSN_INVALID) { + printf("Expected Illegal Instruction error, got: %u\n", err); + } + + // emulate 1 instruction, correct address, valid code + err = uc_emu_start(uc, ADDRESS, -1, 0, 1); + + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_RISCV_REG_A0, &a0); + uc_reg_read(uc, UC_RISCV_REG_A1, &a1); + + printf(">>> A0 = 0x%"PRIx64 "\n", a0); + printf(">>> A1 = 0x%"PRIx64 "\n", a1); + + uc_close(uc); +} + +static void test_riscv_func_return(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint64_t pc = 0, ra = 0; + + // 10000: 00008067 ret + // 10004: 8082 c.ret + // 10006: 0001 nop + // 10008: 0001 nop + +#define CODE "\x67\x80\x00\x00\x82\x80\x01\x00\x01\x00" + + printf("Emulate RISCV code: return from func\n"); + + // Initialize emulator in RISCV64 mode + err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, CODE, sizeof(CODE) - 1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + +#if 1 + // set return address register + // RET instruction will return to address in RA + // so after RET, PC == RA + ra = 0x10006; + uc_reg_write(uc, UC_RISCV_REG_RA, &ra); + + // execute ret instruction + err = uc_emu_start(uc, 0x10000, -1, 0, 1); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + uc_reg_read(uc, UC_RISCV_REG_PC, &pc); + if (pc != ra) { + printf("Error after execution: PC is: 0x%"PRIx64 ", expected was 0x%"PRIx64 "\n", pc, ra); + if (pc == 0x10000) { + printf(" PC did not change during execution\n"); + } + } else { + printf("Good, PC == RA\n"); + } +#endif + + // set return address register + // C.RET instruction will return to address in RA + // so after C.RET, PC == RA + ra = 0x10006; + uc_reg_write(uc, UC_RISCV_REG_RA, &ra); + + printf("========\n"); + // execute c.ret instruction + err = uc_emu_start(uc, 0x10004, -1, 0, 1); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + uc_reg_read(uc, UC_RISCV_REG_PC, &pc); + if (pc != ra) { + printf("Error after execution: PC is: 0x%"PRIx64 ", expected was 0x%"PRIx64 "\n", pc, ra); + if (pc == 0x10004) { + printf(" PC did not change during execution\n"); + } + } else { + printf("Good, PC == RA\n"); + } + + // now print out some registers + printf(">>> Emulation done.\n"); + + uc_close(uc); +} + +int main(int argc, char **argv, char **envp) +{ + test_recover_from_illegal(); + + printf("------------------\n"); + test_riscv(); + + printf("------------------\n"); + test_riscv2(); + + printf("------------------\n"); + test_riscv3(); + + printf("------------------\n"); + test_riscv_step(); + + printf("------------------\n"); + test_riscv_timeout(); + + printf("------------------\n"); + test_riscv_sd64(); + + printf("------------------\n"); + test_riscv_func_return(); + + return 0; +} diff --git a/samples/sample_sparc.c b/samples/sample_sparc.c index 24d937b3..04c2a359 100644 --- a/samples/sample_sparc.c +++ b/samples/sample_sparc.c @@ -80,23 +80,7 @@ static void test_sparc(void) int main(int argc, char **argv, char **envp) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - test_sparc(); - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - return 0; } diff --git a/samples/sample_x86.c b/samples/sample_x86.c index 31714180..6732e076 100644 --- a/samples/sample_x86.c +++ b/samples/sample_x86.c @@ -26,6 +26,12 @@ #define X86_CODE64 "\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E\x09\x3C\x59" #define X86_CODE16 "\x00\x00" // add byte ptr [bx + si], al #define X86_CODE64_SYSCALL "\x0f\x05" // SYSCALL +#define X86_MMIO_CODE "\x89\x0d\x04\x00\x02\x00\x8b\x0d\x04\x00\x02\x00" // mov [0x20004], ecx; mov ecx, [0x20004] +/* + * 0x1000 xor dword ptr [edi+0x3], eax ; edi=0x1000, eax=0xbc4177e6 + * 0x1003 dw 0x3ea98b13 + */ +#define X86_CODE32_SMC "\x31\x47\x03\x13\x8b\xa9\x3e" // memory address where emulation starts #define ADDRESS 0x1000000 @@ -86,7 +92,7 @@ static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, static bool hook_mem_invalid_dummy(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) { - // Stop emulation. + // stop emulation return false; } @@ -173,6 +179,73 @@ static void hook_syscall(uc_engine *uc, void *user_data) printf("ERROR: was not expecting rax=0x%"PRIx64 " in syscall\n", rax); } +static bool hook_memalloc(uc_engine *uc, uc_mem_type type, uint64_t address, + int size, int64_t value, void *user_data) +{ + uint64_t algined_address = address & 0xFFFFFFFFFFFFF000ULL; + int aligned_size = ((int)(size / 0x1000) + 1) * 0x1000; + + printf(">>> Allocating block at 0x%" PRIx64 " (0x%" PRIx64 "), block size = 0x%x (0x%x)\n", + address, algined_address, size, aligned_size); + + uc_mem_map(uc, algined_address, aligned_size, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, algined_address, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return false; + } + + // this recovers from missing memory, so we return true + return true; +} + +static void test_miss_code(void) +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + int r_ecx = 0x1234; // ECX register + int r_edx = 0x7890; // EDX register + + printf("Emulate i386 code - missing code\n"); + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return; + } + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, &trace1, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + // auto-allocate memory on access + uc_hook_add(uc, &trace2, UC_HOOK_MEM_UNMAPPED, hook_memalloc, NULL, 1, 0); + + // emulate machine code, without having the code in yet + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); + printf(">>> ECX = 0x%x\n", r_ecx); + printf(">>> EDX = 0x%x\n", r_edx); + + uc_close(uc); +} + static void test_i386(void) { uc_engine *uc; @@ -745,6 +818,23 @@ static void test_i386_context_save(void) uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); printf(">>> EAX = 0x%x\n", r_eax); + // modify some registers of the context + r_eax = 0xc8; + uc_context_reg_write(context, UC_X86_REG_EAX, &r_eax); + + // and restore CPU context again + err = uc_context_restore(uc, context); + if (err) { + printf("Failed on uc_context_restore() with error returned: %u\n", err); + return; + } + + // now print out some registers + printf(">>> CPU context restored with modification. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> EAX = 0x%x\n", r_eax); + // free the CPU context err = uc_context_free(context); if (err) { @@ -1092,6 +1182,133 @@ static void test_i386_invalid_mem_read_in_tb(void) uc_close(uc); } +static void test_i386_smc_xor() +{ + uc_engine *uc; + uc_err err; + + uint32_t r_edi = ADDRESS; // ECX register + uint32_t r_eax = 0xbc4177e6; // EDX register + uint32_t result; + + printf("===================================\n"); + printf("Emulate i386 code that modfies itself\n"); + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return; + } + + // map 1KB memory for this emulation + uc_mem_map(uc, ADDRESS, 0x1000, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, X86_CODE32_SMC, sizeof(X86_CODE32_SMC) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return; + } + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + + // **Important Note** + // + // Since SMC code will cause TB regeneration, the XOR in fact would executed + // twice (the first execution won't take effect.). Thus, if you would like to + // use count to control the emulation, the count should be set to 2. + // + // err = uc_emu_start(uc, ADDRESS, ADDRESS + 3, 0, 0); + err = uc_emu_start(uc, ADDRESS, 0, 0, 2); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + printf(">>> Emulation done. Below is the result.\n"); + + uc_mem_read(uc, ADDRESS + 3, (void *)&result, 4); + + if (result == (0x3ea98b13 ^ 0xbc4177e6)) { + printf(">>> SMC emulation is correct. 0x3ea98b13 ^ 0xbc4177e6 = 0x%x\n", result); + } else { + printf(">>> SMC emulation is wrong. 0x3ea98b13 ^ 0xbc4177e6 = 0x%x\n", result); + } + + uc_close(uc); +} + +static uint64_t mmio_read_callback(uc_engine* uc, uint64_t offset, unsigned size, void* user_data) +{ + printf(">>> Read IO memory at offset 0x%"PRIu64" with 0x%"PRIu32" bytes and return 0x19260817\n", offset, size); + // The value returned here would be written to ecx. + return 0x19260817; +} + +static void mmio_write_callback(uc_engine* uc, uint64_t offset, unsigned size, uint64_t value, void* user_data) +{ + printf(">>> Write value 0x%"PRIu64" to IO memory at offset 0x%"PRIu64" with 0x%"PRIu32" bytes\n", value, offset, size); + return; +} + +static void test_i386_mmio() +{ + uc_engine* uc; + int r_ecx = 0xdeadbeef; + uc_err err; + + printf("===================================\n"); + printf("Emulate i386 code that uses MMIO\n"); + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return; + } + + // map 1KB memory for this emulation + err = uc_mem_map(uc, ADDRESS, 0x1000, UC_PROT_ALL); + if (err) { + printf("Failed on uc_mem_map() with error returned: %u\n", err); + return; + } + + // write machine code to be emulated to memory + err = uc_mem_write(uc, ADDRESS, X86_MMIO_CODE, sizeof(X86_MMIO_CODE) - 1); + if (err) { + printf("Failed on uc_mem_write() with error returned: %u\n", err); + return; + } + + err = uc_mmio_map(uc, 0x20000, 0x4000, mmio_read_callback, NULL, mmio_write_callback, NULL); + if (err) { + printf("Failed on uc_mmio_map() with error returned: %u\n", err); + return; + } + + // prepare ecx + err = uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); + if (err) { + printf("Failed on uc_reg_write() with error returned: %u\n", err); + return; + } + + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_MMIO_CODE) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + return; + } + + uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); + + printf(">>> Emulation done. ECX=0x%x\n", r_ecx); + + uc_close(uc); +} + int main(int argc, char **argv, char **envp) { if (argc == 2) { @@ -1099,6 +1316,7 @@ int main(int argc, char **argv, char **envp) test_x86_16(); } else if (!strcmp(argv[1], "-32")) { + test_miss_code(); test_i386(); test_i386_map_ptr(); test_i386_inout(); @@ -1120,6 +1338,7 @@ int main(int argc, char **argv, char **envp) } else { test_x86_16(); + test_miss_code(); test_i386(); test_i386_map_ptr(); test_i386_inout(); @@ -1133,6 +1352,8 @@ int main(int argc, char **argv, char **envp) test_x86_64(); test_x86_64_syscall(); test_i386_invalid_mem_read_in_tb(); + test_i386_smc_xor(); + test_i386_mmio(); } return 0; diff --git a/samples/shellcode.c b/samples/shellcode.c index dcf97437..e0a72c75 100644 --- a/samples/shellcode.c +++ b/samples/shellcode.c @@ -134,17 +134,6 @@ static void test_i386(void) int main(int argc, char **argv, char **envp) { - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - if (argc == 2) { if (!strcmp(argv[1], "-32")) { test_i386(); @@ -156,10 +145,5 @@ int main(int argc, char **argv, char **envp) test_i386(); } - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - return 0; } diff --git a/symbols.sh b/symbols.sh new file mode 100755 index 00000000..30b4b871 --- /dev/null +++ b/symbols.sh @@ -0,0 +1,6296 @@ +#!/bin/bash + +CMD_PATH=$(realpath $0) +SOURCE_DIR=$(dirname ${CMD_PATH}) + +COMMON_SYMBOLS=" +arm_arch \ +tb_target_set_jmp_target \ +have_bmi1 \ +have_popcnt \ +have_avx1 \ +have_avx2 \ +have_isa \ +have_altivec \ +have_vsx \ +flush_icache_range \ +s390_facilities \ +tcg_dump_op \ +tcg_dump_ops \ +tcg_gen_and_i64 \ +tcg_gen_discard_i64 \ +tcg_gen_ld16s_i64 \ +tcg_gen_ld16u_i64 \ +tcg_gen_ld32s_i64 \ +tcg_gen_ld32u_i64 \ +tcg_gen_ld8s_i64 \ +tcg_gen_ld8u_i64 \ +tcg_gen_ld_i64 \ +tcg_gen_mov_i64 \ +tcg_gen_movi_i64 \ +tcg_gen_mul_i64 \ +tcg_gen_or_i64 \ +tcg_gen_sar_i64 \ +tcg_gen_shl_i64 \ +tcg_gen_shr_i64 \ +tcg_gen_st_i64 \ +tcg_gen_xor_i64 \ +cpu_icount_to_ns \ +cpu_is_stopped \ +cpu_get_ticks \ +cpu_get_clock \ +cpu_resume \ +qemu_init_vcpu \ +cpu_stop_current \ +resume_all_vcpus \ +vm_start \ +address_space_dispatch_compact \ +flatview_translate \ +address_space_translate_for_iotlb \ +qemu_get_cpu \ +cpu_address_space_init \ +cpu_get_address_space \ +cpu_exec_unrealizefn \ +cpu_exec_initfn \ +cpu_exec_realizefn \ +tb_invalidate_phys_addr \ +cpu_watchpoint_insert \ +cpu_watchpoint_remove_by_ref \ +cpu_watchpoint_remove_all \ +cpu_watchpoint_address_matches \ +cpu_breakpoint_insert \ +cpu_breakpoint_remove \ +cpu_breakpoint_remove_by_ref \ +cpu_breakpoint_remove_all \ +cpu_abort \ +cpu_physical_memory_test_and_clear_dirty \ +memory_region_section_get_iotlb \ +flatview_add_to_dispatch \ +qemu_ram_get_host_addr \ +qemu_ram_get_offset \ +qemu_ram_get_used_length \ +qemu_ram_is_shared \ +qemu_ram_pagesize \ +qemu_ram_alloc_from_ptr \ +qemu_ram_alloc \ +qemu_ram_free \ +qemu_map_ram_ptr \ +qemu_ram_block_host_offset \ +qemu_ram_block_from_host \ +qemu_ram_addr_from_host \ +cpu_check_watchpoint \ +iotlb_to_section \ +address_space_dispatch_new \ +address_space_dispatch_free \ +flatview_read_continue \ +address_space_read_full \ +address_space_write \ +address_space_rw \ +cpu_physical_memory_rw \ +address_space_write_rom \ +cpu_flush_icache_range \ +cpu_exec_init_all \ +address_space_access_valid \ +address_space_map \ +address_space_unmap \ +cpu_physical_memory_map \ +cpu_physical_memory_unmap \ +cpu_memory_rw_debug \ +qemu_target_page_size \ +qemu_target_page_bits \ +qemu_target_page_bits_min \ +target_words_bigendian \ +cpu_physical_memory_is_io \ +ram_block_discard_range \ +ramblock_is_pmem \ +page_size_init \ +set_preferred_target_page_bits \ +finalize_target_page_bits \ +cpu_outb \ +cpu_outw \ +cpu_outl \ +cpu_inb \ +cpu_inw \ +cpu_inl \ +memory_map \ +memory_map_io \ +memory_map_ptr \ +memory_unmap \ +memory_free \ +flatview_unref \ +address_space_get_flatview \ +memory_region_transaction_begin \ +memory_region_transaction_commit \ +memory_region_init \ +memory_region_access_valid \ +memory_region_dispatch_read \ +memory_region_dispatch_write \ +memory_region_init_io \ +memory_region_init_ram_ptr \ +memory_region_size \ +memory_region_set_readonly \ +memory_region_get_ram_ptr \ +memory_region_from_host \ +memory_region_get_ram_addr \ +memory_region_add_subregion \ +memory_region_del_subregion \ +memory_region_find \ +memory_listener_register \ +memory_listener_unregister \ +address_space_remove_listeners \ +address_space_init \ +address_space_destroy \ +memory_region_init_ram \ +memory_mapping_list_add_merge_sorted \ +exec_inline_op \ +floatx80_default_nan \ +float_raise \ +float16_is_quiet_nan \ +float16_is_signaling_nan \ +float32_is_quiet_nan \ +float32_is_signaling_nan \ +float64_is_quiet_nan \ +float64_is_signaling_nan \ +floatx80_is_quiet_nan \ +floatx80_is_signaling_nan \ +floatx80_silence_nan \ +propagateFloatx80NaN \ +float128_is_quiet_nan \ +float128_is_signaling_nan \ +float128_silence_nan \ +float16_add \ +float16_sub \ +float32_add \ +float32_sub \ +float64_add \ +float64_sub \ +float16_mul \ +float32_mul \ +float64_mul \ +float16_muladd \ +float32_muladd \ +float64_muladd \ +float16_div \ +float32_div \ +float64_div \ +float16_to_float32 \ +float16_to_float64 \ +float32_to_float16 \ +float32_to_float64 \ +float64_to_float16 \ +float64_to_float32 \ +float16_round_to_int \ +float32_round_to_int \ +float64_round_to_int \ +float16_to_int16_scalbn \ +float16_to_int32_scalbn \ +float16_to_int64_scalbn \ +float32_to_int16_scalbn \ +float32_to_int32_scalbn \ +float32_to_int64_scalbn \ +float64_to_int16_scalbn \ +float64_to_int32_scalbn \ +float64_to_int64_scalbn \ +float16_to_int16 \ +float16_to_int32 \ +float16_to_int64 \ +float32_to_int16 \ +float32_to_int32 \ +float32_to_int64 \ +float64_to_int16 \ +float64_to_int32 \ +float64_to_int64 \ +float16_to_int16_round_to_zero \ +float16_to_int32_round_to_zero \ +float16_to_int64_round_to_zero \ +float32_to_int16_round_to_zero \ +float32_to_int32_round_to_zero \ +float32_to_int64_round_to_zero \ +float64_to_int16_round_to_zero \ +float64_to_int32_round_to_zero \ +float64_to_int64_round_to_zero \ +float16_to_uint16_scalbn \ +float16_to_uint32_scalbn \ +float16_to_uint64_scalbn \ +float32_to_uint16_scalbn \ +float32_to_uint32_scalbn \ +float32_to_uint64_scalbn \ +float64_to_uint16_scalbn \ +float64_to_uint32_scalbn \ +float64_to_uint64_scalbn \ +float16_to_uint16 \ +float16_to_uint32 \ +float16_to_uint64 \ +float32_to_uint16 \ +float32_to_uint32 \ +float32_to_uint64 \ +float64_to_uint16 \ +float64_to_uint32 \ +float64_to_uint64 \ +float16_to_uint16_round_to_zero \ +float16_to_uint32_round_to_zero \ +float16_to_uint64_round_to_zero \ +float32_to_uint16_round_to_zero \ +float32_to_uint32_round_to_zero \ +float32_to_uint64_round_to_zero \ +float64_to_uint16_round_to_zero \ +float64_to_uint32_round_to_zero \ +float64_to_uint64_round_to_zero \ +int64_to_float16_scalbn \ +int32_to_float16_scalbn \ +int16_to_float16_scalbn \ +int64_to_float16 \ +int32_to_float16 \ +int16_to_float16 \ +int64_to_float32_scalbn \ +int32_to_float32_scalbn \ +int16_to_float32_scalbn \ +int64_to_float32 \ +int32_to_float32 \ +int16_to_float32 \ +int64_to_float64_scalbn \ +int32_to_float64_scalbn \ +int16_to_float64_scalbn \ +int64_to_float64 \ +int32_to_float64 \ +int16_to_float64 \ +uint64_to_float16_scalbn \ +uint32_to_float16_scalbn \ +uint16_to_float16_scalbn \ +uint64_to_float16 \ +uint32_to_float16 \ +uint16_to_float16 \ +uint64_to_float32_scalbn \ +uint32_to_float32_scalbn \ +uint16_to_float32_scalbn \ +uint64_to_float32 \ +uint32_to_float32 \ +uint16_to_float32 \ +uint64_to_float64_scalbn \ +uint32_to_float64_scalbn \ +uint16_to_float64_scalbn \ +uint64_to_float64 \ +uint32_to_float64 \ +uint16_to_float64 \ +float16_min \ +float16_minnum \ +float16_minnummag \ +float16_max \ +float16_maxnum \ +float16_maxnummag \ +float32_min \ +float32_minnum \ +float32_minnummag \ +float32_max \ +float32_maxnum \ +float32_maxnummag \ +float64_min \ +float64_minnum \ +float64_minnummag \ +float64_max \ +float64_maxnum \ +float64_maxnummag \ +float16_compare \ +float16_compare_quiet \ +float32_compare \ +float32_compare_quiet \ +float64_compare \ +float64_compare_quiet \ +float16_scalbn \ +float32_scalbn \ +float64_scalbn \ +float16_sqrt \ +float32_sqrt \ +float64_sqrt \ +float16_default_nan \ +float32_default_nan \ +float64_default_nan \ +float128_default_nan \ +float16_silence_nan \ +float32_silence_nan \ +float64_silence_nan \ +float16_squash_input_denormal \ +float32_squash_input_denormal \ +float64_squash_input_denormal \ +normalizeFloatx80Subnormal \ +roundAndPackFloatx80 \ +normalizeRoundAndPackFloatx80 \ +int32_to_floatx80 \ +int32_to_float128 \ +int64_to_floatx80 \ +int64_to_float128 \ +uint64_to_float128 \ +float32_to_floatx80 \ +float32_to_float128 \ +float32_rem \ +float32_exp2 \ +float32_log2 \ +float32_eq \ +float32_le \ +float32_lt \ +float32_unordered \ +float32_eq_quiet \ +float32_le_quiet \ +float32_lt_quiet \ +float32_unordered_quiet \ +float64_to_floatx80 \ +float64_to_float128 \ +float64_rem \ +float64_log2 \ +float64_eq \ +float64_le \ +float64_lt \ +float64_unordered \ +float64_eq_quiet \ +float64_le_quiet \ +float64_lt_quiet \ +float64_unordered_quiet \ +floatx80_to_int32 \ +floatx80_to_int32_round_to_zero \ +floatx80_to_int64 \ +floatx80_to_int64_round_to_zero \ +floatx80_to_float32 \ +floatx80_to_float64 \ +floatx80_to_float128 \ +floatx80_round \ +floatx80_round_to_int \ +floatx80_add \ +floatx80_sub \ +floatx80_mul \ +floatx80_div \ +floatx80_rem \ +floatx80_sqrt \ +floatx80_eq \ +floatx80_le \ +floatx80_lt \ +floatx80_unordered \ +floatx80_eq_quiet \ +floatx80_le_quiet \ +floatx80_lt_quiet \ +floatx80_unordered_quiet \ +float128_to_int32 \ +float128_to_int32_round_to_zero \ +float128_to_int64 \ +float128_to_int64_round_to_zero \ +float128_to_uint64 \ +float128_to_uint64_round_to_zero \ +float128_to_uint32_round_to_zero \ +float128_to_uint32 \ +float128_to_float32 \ +float128_to_float64 \ +float128_to_floatx80 \ +float128_round_to_int \ +float128_add \ +float128_sub \ +float128_mul \ +float128_div \ +float128_rem \ +float128_sqrt \ +float128_eq \ +float128_le \ +float128_lt \ +float128_unordered \ +float128_eq_quiet \ +float128_le_quiet \ +float128_lt_quiet \ +float128_unordered_quiet \ +floatx80_compare \ +floatx80_compare_quiet \ +float128_compare \ +float128_compare_quiet \ +floatx80_scalbn \ +float128_scalbn \ +softfloat_init \ +tcg_optimize \ +gen_new_label \ +tcg_can_emit_vec_op \ +tcg_expand_vec_op \ +tcg_register_jit \ +tcg_tb_insert \ +tcg_tb_remove \ +tcg_tb_lookup \ +tcg_tb_foreach \ +tcg_nb_tbs \ +tcg_region_reset_all \ +tcg_region_init \ +tcg_code_size \ +tcg_code_capacity \ +tcg_tb_phys_invalidate_count \ +tcg_malloc_internal \ +tcg_pool_reset \ +tcg_context_init \ +tcg_tb_alloc \ +tcg_prologue_init \ +tcg_func_start \ +tcg_set_frame \ +tcg_global_mem_new_internal \ +tcg_temp_new_internal \ +tcg_temp_new_vec \ +tcg_temp_new_vec_matching \ +tcg_temp_free_internal \ +tcg_const_i32 \ +tcg_const_i64 \ +tcg_const_local_i32 \ +tcg_const_local_i64 \ +tcg_op_supported \ +tcg_gen_callN \ +tcg_op_remove \ +tcg_emit_op \ +tcg_op_insert_before \ +tcg_op_insert_after \ +tcg_cpu_exec_time \ +tcg_gen_code \ +tcg_gen_op1 \ +tcg_gen_op2 \ +tcg_gen_op3 \ +tcg_gen_op4 \ +tcg_gen_op5 \ +tcg_gen_op6 \ +tcg_gen_mb \ +tcg_gen_addi_i32 \ +tcg_gen_subfi_i32 \ +tcg_gen_subi_i32 \ +tcg_gen_andi_i32 \ +tcg_gen_ori_i32 \ +tcg_gen_xori_i32 \ +tcg_gen_shli_i32 \ +tcg_gen_shri_i32 \ +tcg_gen_sari_i32 \ +tcg_gen_brcond_i32 \ +tcg_gen_brcondi_i32 \ +tcg_gen_setcond_i32 \ +tcg_gen_setcondi_i32 \ +tcg_gen_muli_i32 \ +tcg_gen_div_i32 \ +tcg_gen_rem_i32 \ +tcg_gen_divu_i32 \ +tcg_gen_remu_i32 \ +tcg_gen_andc_i32 \ +tcg_gen_eqv_i32 \ +tcg_gen_nand_i32 \ +tcg_gen_nor_i32 \ +tcg_gen_orc_i32 \ +tcg_gen_clz_i32 \ +tcg_gen_clzi_i32 \ +tcg_gen_ctz_i32 \ +tcg_gen_ctzi_i32 \ +tcg_gen_clrsb_i32 \ +tcg_gen_ctpop_i32 \ +tcg_gen_rotl_i32 \ +tcg_gen_rotli_i32 \ +tcg_gen_rotr_i32 \ +tcg_gen_rotri_i32 \ +tcg_gen_deposit_i32 \ +tcg_gen_deposit_z_i32 \ +tcg_gen_extract_i32 \ +tcg_gen_sextract_i32 \ +tcg_gen_extract2_i32 \ +tcg_gen_movcond_i32 \ +tcg_gen_add2_i32 \ +tcg_gen_sub2_i32 \ +tcg_gen_mulu2_i32 \ +tcg_gen_muls2_i32 \ +tcg_gen_mulsu2_i32 \ +tcg_gen_ext8s_i32 \ +tcg_gen_ext16s_i32 \ +tcg_gen_ext8u_i32 \ +tcg_gen_ext16u_i32 \ +tcg_gen_bswap16_i32 \ +tcg_gen_bswap32_i32 \ +tcg_gen_smin_i32 \ +tcg_gen_umin_i32 \ +tcg_gen_smax_i32 \ +tcg_gen_umax_i32 \ +tcg_gen_abs_i32 \ +tcg_gen_addi_i64 \ +tcg_gen_subfi_i64 \ +tcg_gen_subi_i64 \ +tcg_gen_andi_i64 \ +tcg_gen_ori_i64 \ +tcg_gen_xori_i64 \ +tcg_gen_shli_i64 \ +tcg_gen_shri_i64 \ +tcg_gen_sari_i64 \ +tcg_gen_brcond_i64 \ +tcg_gen_brcondi_i64 \ +tcg_gen_setcond_i64 \ +tcg_gen_setcondi_i64 \ +tcg_gen_muli_i64 \ +tcg_gen_div_i64 \ +tcg_gen_rem_i64 \ +tcg_gen_divu_i64 \ +tcg_gen_remu_i64 \ +tcg_gen_ext8s_i64 \ +tcg_gen_ext16s_i64 \ +tcg_gen_ext32s_i64 \ +tcg_gen_ext8u_i64 \ +tcg_gen_ext16u_i64 \ +tcg_gen_ext32u_i64 \ +tcg_gen_bswap16_i64 \ +tcg_gen_bswap32_i64 \ +tcg_gen_bswap64_i64 \ +tcg_gen_not_i64 \ +tcg_gen_andc_i64 \ +tcg_gen_eqv_i64 \ +tcg_gen_nand_i64 \ +tcg_gen_nor_i64 \ +tcg_gen_orc_i64 \ +tcg_gen_clz_i64 \ +tcg_gen_clzi_i64 \ +tcg_gen_ctz_i64 \ +tcg_gen_ctzi_i64 \ +tcg_gen_clrsb_i64 \ +tcg_gen_ctpop_i64 \ +tcg_gen_rotl_i64 \ +tcg_gen_rotli_i64 \ +tcg_gen_rotr_i64 \ +tcg_gen_rotri_i64 \ +tcg_gen_deposit_i64 \ +tcg_gen_deposit_z_i64 \ +tcg_gen_extract_i64 \ +tcg_gen_sextract_i64 \ +tcg_gen_extract2_i64 \ +tcg_gen_movcond_i64 \ +tcg_gen_add2_i64 \ +tcg_gen_sub2_i64 \ +tcg_gen_mulu2_i64 \ +tcg_gen_muls2_i64 \ +tcg_gen_mulsu2_i64 \ +tcg_gen_smin_i64 \ +tcg_gen_umin_i64 \ +tcg_gen_smax_i64 \ +tcg_gen_umax_i64 \ +tcg_gen_abs_i64 \ +tcg_gen_extrl_i64_i32 \ +tcg_gen_extrh_i64_i32 \ +tcg_gen_extu_i32_i64 \ +tcg_gen_ext_i32_i64 \ +tcg_gen_concat_i32_i64 \ +tcg_gen_extr_i64_i32 \ +tcg_gen_extr32_i64 \ +tcg_gen_exit_tb \ +tcg_gen_goto_tb \ +tcg_gen_lookup_and_goto_ptr \ +check_exit_request \ +tcg_gen_qemu_ld_i32 \ +tcg_gen_qemu_st_i32 \ +tcg_gen_qemu_ld_i64 \ +tcg_gen_qemu_st_i64 \ +tcg_gen_atomic_cmpxchg_i32 \ +tcg_gen_atomic_cmpxchg_i64 \ +tcg_gen_atomic_fetch_add_i32 \ +tcg_gen_atomic_fetch_add_i64 \ +tcg_gen_atomic_fetch_and_i32 \ +tcg_gen_atomic_fetch_and_i64 \ +tcg_gen_atomic_fetch_or_i32 \ +tcg_gen_atomic_fetch_or_i64 \ +tcg_gen_atomic_fetch_xor_i32 \ +tcg_gen_atomic_fetch_xor_i64 \ +tcg_gen_atomic_fetch_smin_i32 \ +tcg_gen_atomic_fetch_smin_i64 \ +tcg_gen_atomic_fetch_umin_i32 \ +tcg_gen_atomic_fetch_umin_i64 \ +tcg_gen_atomic_fetch_smax_i32 \ +tcg_gen_atomic_fetch_smax_i64 \ +tcg_gen_atomic_fetch_umax_i32 \ +tcg_gen_atomic_fetch_umax_i64 \ +tcg_gen_atomic_add_fetch_i32 \ +tcg_gen_atomic_add_fetch_i64 \ +tcg_gen_atomic_and_fetch_i32 \ +tcg_gen_atomic_and_fetch_i64 \ +tcg_gen_atomic_or_fetch_i32 \ +tcg_gen_atomic_or_fetch_i64 \ +tcg_gen_atomic_xor_fetch_i32 \ +tcg_gen_atomic_xor_fetch_i64 \ +tcg_gen_atomic_smin_fetch_i32 \ +tcg_gen_atomic_smin_fetch_i64 \ +tcg_gen_atomic_umin_fetch_i32 \ +tcg_gen_atomic_umin_fetch_i64 \ +tcg_gen_atomic_smax_fetch_i32 \ +tcg_gen_atomic_smax_fetch_i64 \ +tcg_gen_atomic_umax_fetch_i32 \ +tcg_gen_atomic_umax_fetch_i64 \ +tcg_gen_atomic_xchg_i32 \ +tcg_gen_atomic_xchg_i64 \ +simd_desc \ +tcg_gen_gvec_2_ool \ +tcg_gen_gvec_2i_ool \ +tcg_gen_gvec_3_ool \ +tcg_gen_gvec_4_ool \ +tcg_gen_gvec_5_ool \ +tcg_gen_gvec_2_ptr \ +tcg_gen_gvec_3_ptr \ +tcg_gen_gvec_4_ptr \ +tcg_gen_gvec_5_ptr \ +tcg_gen_gvec_2 \ +tcg_gen_gvec_2i \ +tcg_gen_gvec_2s \ +tcg_gen_gvec_3 \ +tcg_gen_gvec_3i \ +tcg_gen_gvec_4 \ +tcg_gen_gvec_mov \ +tcg_gen_gvec_dup_i32 \ +tcg_gen_gvec_dup_i64 \ +tcg_gen_gvec_dup_mem \ +tcg_gen_gvec_dup64i \ +tcg_gen_gvec_dup32i \ +tcg_gen_gvec_dup16i \ +tcg_gen_gvec_dup8i \ +tcg_gen_gvec_not \ +tcg_gen_vec_add8_i64 \ +tcg_gen_vec_add16_i64 \ +tcg_gen_vec_add32_i64 \ +tcg_gen_gvec_add \ +tcg_gen_gvec_adds \ +tcg_gen_gvec_addi \ +tcg_gen_gvec_subs \ +tcg_gen_vec_sub8_i64 \ +tcg_gen_vec_sub16_i64 \ +tcg_gen_vec_sub32_i64 \ +tcg_gen_gvec_sub \ +tcg_gen_gvec_mul \ +tcg_gen_gvec_muls \ +tcg_gen_gvec_muli \ +tcg_gen_gvec_ssadd \ +tcg_gen_gvec_sssub \ +tcg_gen_gvec_usadd \ +tcg_gen_gvec_ussub \ +tcg_gen_gvec_smin \ +tcg_gen_gvec_umin \ +tcg_gen_gvec_smax \ +tcg_gen_gvec_umax \ +tcg_gen_vec_neg8_i64 \ +tcg_gen_vec_neg16_i64 \ +tcg_gen_vec_neg32_i64 \ +tcg_gen_gvec_neg \ +tcg_gen_gvec_abs \ +tcg_gen_gvec_and \ +tcg_gen_gvec_or \ +tcg_gen_gvec_xor \ +tcg_gen_gvec_andc \ +tcg_gen_gvec_orc \ +tcg_gen_gvec_nand \ +tcg_gen_gvec_nor \ +tcg_gen_gvec_eqv \ +tcg_gen_gvec_ands \ +tcg_gen_gvec_andi \ +tcg_gen_gvec_xors \ +tcg_gen_gvec_xori \ +tcg_gen_gvec_ors \ +tcg_gen_gvec_ori \ +tcg_gen_vec_shl8i_i64 \ +tcg_gen_vec_shl16i_i64 \ +tcg_gen_gvec_shli \ +tcg_gen_vec_shr8i_i64 \ +tcg_gen_vec_shr16i_i64 \ +tcg_gen_gvec_shri \ +tcg_gen_vec_sar8i_i64 \ +tcg_gen_vec_sar16i_i64 \ +tcg_gen_gvec_sari \ +tcg_gen_gvec_shls \ +tcg_gen_gvec_shrs \ +tcg_gen_gvec_sars \ +tcg_gen_gvec_shlv \ +tcg_gen_gvec_shrv \ +tcg_gen_gvec_sarv \ +tcg_gen_gvec_cmp \ +tcg_gen_gvec_bitsel \ +tcg_can_emit_vecop_list \ +vec_gen_2 \ +vec_gen_3 \ +vec_gen_4 \ +tcg_gen_mov_vec \ +tcg_const_zeros_vec \ +tcg_const_ones_vec \ +tcg_const_zeros_vec_matching \ +tcg_const_ones_vec_matching \ +tcg_gen_dup64i_vec \ +tcg_gen_dup32i_vec \ +tcg_gen_dup16i_vec \ +tcg_gen_dup8i_vec \ +tcg_gen_dupi_vec \ +tcg_gen_dup_i64_vec \ +tcg_gen_dup_i32_vec \ +tcg_gen_dup_mem_vec \ +tcg_gen_ld_vec \ +tcg_gen_st_vec \ +tcg_gen_stl_vec \ +tcg_gen_and_vec \ +tcg_gen_or_vec \ +tcg_gen_xor_vec \ +tcg_gen_andc_vec \ +tcg_gen_orc_vec \ +tcg_gen_nand_vec \ +tcg_gen_nor_vec \ +tcg_gen_eqv_vec \ +tcg_gen_not_vec \ +tcg_gen_neg_vec \ +tcg_gen_abs_vec \ +tcg_gen_shli_vec \ +tcg_gen_shri_vec \ +tcg_gen_sari_vec \ +tcg_gen_cmp_vec \ +tcg_gen_add_vec \ +tcg_gen_sub_vec \ +tcg_gen_mul_vec \ +tcg_gen_ssadd_vec \ +tcg_gen_usadd_vec \ +tcg_gen_sssub_vec \ +tcg_gen_ussub_vec \ +tcg_gen_smin_vec \ +tcg_gen_umin_vec \ +tcg_gen_smax_vec \ +tcg_gen_umax_vec \ +tcg_gen_shlv_vec \ +tcg_gen_shrv_vec \ +tcg_gen_sarv_vec \ +tcg_gen_shls_vec \ +tcg_gen_shrs_vec \ +tcg_gen_sars_vec \ +tcg_gen_bitsel_vec \ +tcg_gen_cmpsel_vec \ +tb_htable_lookup \ +tb_set_jmp_target \ +cpu_exec \ +cpu_loop_exit_noexc \ +cpu_reloading_memory_map \ +cpu_loop_exit \ +cpu_loop_exit_restore \ +cpu_loop_exit_atomic \ +tlb_init \ +tlb_flush_by_mmuidx \ +tlb_flush \ +tlb_flush_by_mmuidx_all_cpus \ +tlb_flush_all_cpus \ +tlb_flush_by_mmuidx_all_cpus_synced \ +tlb_flush_all_cpus_synced \ +tlb_flush_page_by_mmuidx \ +tlb_flush_page \ +tlb_flush_page_by_mmuidx_all_cpus \ +tlb_flush_page_all_cpus \ +tlb_flush_page_by_mmuidx_all_cpus_synced \ +tlb_flush_page_all_cpus_synced \ +tlb_protect_code \ +tlb_unprotect_code \ +tlb_reset_dirty \ +tlb_set_dirty \ +tlb_set_page_with_attrs \ +tlb_set_page \ +get_page_addr_code_hostp \ +get_page_addr_code \ +probe_access \ +tlb_vaddr_to_host \ +helper_ret_ldub_mmu \ +helper_le_lduw_mmu \ +helper_be_lduw_mmu \ +helper_le_ldul_mmu \ +helper_be_ldul_mmu \ +helper_le_ldq_mmu \ +helper_be_ldq_mmu \ +helper_ret_ldsb_mmu \ +helper_le_ldsw_mmu \ +helper_be_ldsw_mmu \ +helper_le_ldsl_mmu \ +helper_be_ldsl_mmu \ +cpu_ldub_mmuidx_ra \ +cpu_ldsb_mmuidx_ra \ +cpu_lduw_mmuidx_ra \ +cpu_ldsw_mmuidx_ra \ +cpu_ldl_mmuidx_ra \ +cpu_ldq_mmuidx_ra \ +cpu_ldub_data_ra \ +cpu_ldsb_data_ra \ +cpu_lduw_data_ra \ +cpu_ldsw_data_ra \ +cpu_ldl_data_ra \ +cpu_ldq_data_ra \ +cpu_ldub_data \ +cpu_ldsb_data \ +cpu_lduw_data \ +cpu_ldsw_data \ +cpu_ldl_data \ +cpu_ldq_data \ +helper_ret_stb_mmu \ +helper_le_stw_mmu \ +helper_be_stw_mmu \ +helper_le_stl_mmu \ +helper_be_stl_mmu \ +helper_le_stq_mmu \ +helper_be_stq_mmu \ +cpu_stb_mmuidx_ra \ +cpu_stw_mmuidx_ra \ +cpu_stl_mmuidx_ra \ +cpu_stq_mmuidx_ra \ +cpu_stb_data_ra \ +cpu_stw_data_ra \ +cpu_stl_data_ra \ +cpu_stq_data_ra \ +cpu_stb_data \ +cpu_stw_data \ +cpu_stl_data \ +cpu_stq_data \ +helper_atomic_cmpxchgb_mmu \ +helper_atomic_xchgb_mmu \ +helper_atomic_fetch_addb_mmu \ +helper_atomic_fetch_andb_mmu \ +helper_atomic_fetch_orb_mmu \ +helper_atomic_fetch_xorb_mmu \ +helper_atomic_add_fetchb_mmu \ +helper_atomic_and_fetchb_mmu \ +helper_atomic_or_fetchb_mmu \ +helper_atomic_xor_fetchb_mmu \ +helper_atomic_fetch_sminb_mmu \ +helper_atomic_fetch_uminb_mmu \ +helper_atomic_fetch_smaxb_mmu \ +helper_atomic_fetch_umaxb_mmu \ +helper_atomic_smin_fetchb_mmu \ +helper_atomic_umin_fetchb_mmu \ +helper_atomic_smax_fetchb_mmu \ +helper_atomic_umax_fetchb_mmu \ +helper_atomic_cmpxchgw_le_mmu \ +helper_atomic_xchgw_le_mmu \ +helper_atomic_fetch_addw_le_mmu \ +helper_atomic_fetch_andw_le_mmu \ +helper_atomic_fetch_orw_le_mmu \ +helper_atomic_fetch_xorw_le_mmu \ +helper_atomic_add_fetchw_le_mmu \ +helper_atomic_and_fetchw_le_mmu \ +helper_atomic_or_fetchw_le_mmu \ +helper_atomic_xor_fetchw_le_mmu \ +helper_atomic_fetch_sminw_le_mmu \ +helper_atomic_fetch_uminw_le_mmu \ +helper_atomic_fetch_smaxw_le_mmu \ +helper_atomic_fetch_umaxw_le_mmu \ +helper_atomic_smin_fetchw_le_mmu \ +helper_atomic_umin_fetchw_le_mmu \ +helper_atomic_smax_fetchw_le_mmu \ +helper_atomic_umax_fetchw_le_mmu \ +helper_atomic_cmpxchgw_be_mmu \ +helper_atomic_xchgw_be_mmu \ +helper_atomic_fetch_andw_be_mmu \ +helper_atomic_fetch_orw_be_mmu \ +helper_atomic_fetch_xorw_be_mmu \ +helper_atomic_and_fetchw_be_mmu \ +helper_atomic_or_fetchw_be_mmu \ +helper_atomic_xor_fetchw_be_mmu \ +helper_atomic_fetch_sminw_be_mmu \ +helper_atomic_fetch_uminw_be_mmu \ +helper_atomic_fetch_smaxw_be_mmu \ +helper_atomic_fetch_umaxw_be_mmu \ +helper_atomic_smin_fetchw_be_mmu \ +helper_atomic_umin_fetchw_be_mmu \ +helper_atomic_smax_fetchw_be_mmu \ +helper_atomic_umax_fetchw_be_mmu \ +helper_atomic_fetch_addw_be_mmu \ +helper_atomic_add_fetchw_be_mmu \ +helper_atomic_cmpxchgl_le_mmu \ +helper_atomic_xchgl_le_mmu \ +helper_atomic_fetch_addl_le_mmu \ +helper_atomic_fetch_andl_le_mmu \ +helper_atomic_fetch_orl_le_mmu \ +helper_atomic_fetch_xorl_le_mmu \ +helper_atomic_add_fetchl_le_mmu \ +helper_atomic_and_fetchl_le_mmu \ +helper_atomic_or_fetchl_le_mmu \ +helper_atomic_xor_fetchl_le_mmu \ +helper_atomic_fetch_sminl_le_mmu \ +helper_atomic_fetch_uminl_le_mmu \ +helper_atomic_fetch_smaxl_le_mmu \ +helper_atomic_fetch_umaxl_le_mmu \ +helper_atomic_smin_fetchl_le_mmu \ +helper_atomic_umin_fetchl_le_mmu \ +helper_atomic_smax_fetchl_le_mmu \ +helper_atomic_umax_fetchl_le_mmu \ +helper_atomic_cmpxchgl_be_mmu \ +helper_atomic_xchgl_be_mmu \ +helper_atomic_fetch_andl_be_mmu \ +helper_atomic_fetch_orl_be_mmu \ +helper_atomic_fetch_xorl_be_mmu \ +helper_atomic_and_fetchl_be_mmu \ +helper_atomic_or_fetchl_be_mmu \ +helper_atomic_xor_fetchl_be_mmu \ +helper_atomic_fetch_sminl_be_mmu \ +helper_atomic_fetch_uminl_be_mmu \ +helper_atomic_fetch_smaxl_be_mmu \ +helper_atomic_fetch_umaxl_be_mmu \ +helper_atomic_smin_fetchl_be_mmu \ +helper_atomic_umin_fetchl_be_mmu \ +helper_atomic_smax_fetchl_be_mmu \ +helper_atomic_umax_fetchl_be_mmu \ +helper_atomic_fetch_addl_be_mmu \ +helper_atomic_add_fetchl_be_mmu \ +helper_atomic_cmpxchgq_le_mmu \ +helper_atomic_xchgq_le_mmu \ +helper_atomic_fetch_addq_le_mmu \ +helper_atomic_fetch_andq_le_mmu \ +helper_atomic_fetch_orq_le_mmu \ +helper_atomic_fetch_xorq_le_mmu \ +helper_atomic_add_fetchq_le_mmu \ +helper_atomic_and_fetchq_le_mmu \ +helper_atomic_or_fetchq_le_mmu \ +helper_atomic_xor_fetchq_le_mmu \ +helper_atomic_fetch_sminq_le_mmu \ +helper_atomic_fetch_uminq_le_mmu \ +helper_atomic_fetch_smaxq_le_mmu \ +helper_atomic_fetch_umaxq_le_mmu \ +helper_atomic_smin_fetchq_le_mmu \ +helper_atomic_umin_fetchq_le_mmu \ +helper_atomic_smax_fetchq_le_mmu \ +helper_atomic_umax_fetchq_le_mmu \ +helper_atomic_cmpxchgq_be_mmu \ +helper_atomic_xchgq_be_mmu \ +helper_atomic_fetch_andq_be_mmu \ +helper_atomic_fetch_orq_be_mmu \ +helper_atomic_fetch_xorq_be_mmu \ +helper_atomic_and_fetchq_be_mmu \ +helper_atomic_or_fetchq_be_mmu \ +helper_atomic_xor_fetchq_be_mmu \ +helper_atomic_fetch_sminq_be_mmu \ +helper_atomic_fetch_uminq_be_mmu \ +helper_atomic_fetch_smaxq_be_mmu \ +helper_atomic_fetch_umaxq_be_mmu \ +helper_atomic_smin_fetchq_be_mmu \ +helper_atomic_umin_fetchq_be_mmu \ +helper_atomic_smax_fetchq_be_mmu \ +helper_atomic_umax_fetchq_be_mmu \ +helper_atomic_fetch_addq_be_mmu \ +helper_atomic_add_fetchq_be_mmu \ +helper_atomic_cmpxchgb \ +helper_atomic_xchgb \ +helper_atomic_fetch_addb \ +helper_atomic_fetch_andb \ +helper_atomic_fetch_orb \ +helper_atomic_fetch_xorb \ +helper_atomic_add_fetchb \ +helper_atomic_and_fetchb \ +helper_atomic_or_fetchb \ +helper_atomic_xor_fetchb \ +helper_atomic_fetch_sminb \ +helper_atomic_fetch_uminb \ +helper_atomic_fetch_smaxb \ +helper_atomic_fetch_umaxb \ +helper_atomic_smin_fetchb \ +helper_atomic_umin_fetchb \ +helper_atomic_smax_fetchb \ +helper_atomic_umax_fetchb \ +helper_atomic_cmpxchgw_le \ +helper_atomic_xchgw_le \ +helper_atomic_fetch_addw_le \ +helper_atomic_fetch_andw_le \ +helper_atomic_fetch_orw_le \ +helper_atomic_fetch_xorw_le \ +helper_atomic_add_fetchw_le \ +helper_atomic_and_fetchw_le \ +helper_atomic_or_fetchw_le \ +helper_atomic_xor_fetchw_le \ +helper_atomic_fetch_sminw_le \ +helper_atomic_fetch_uminw_le \ +helper_atomic_fetch_smaxw_le \ +helper_atomic_fetch_umaxw_le \ +helper_atomic_smin_fetchw_le \ +helper_atomic_umin_fetchw_le \ +helper_atomic_smax_fetchw_le \ +helper_atomic_umax_fetchw_le \ +helper_atomic_cmpxchgw_be \ +helper_atomic_xchgw_be \ +helper_atomic_fetch_andw_be \ +helper_atomic_fetch_orw_be \ +helper_atomic_fetch_xorw_be \ +helper_atomic_and_fetchw_be \ +helper_atomic_or_fetchw_be \ +helper_atomic_xor_fetchw_be \ +helper_atomic_fetch_sminw_be \ +helper_atomic_fetch_uminw_be \ +helper_atomic_fetch_smaxw_be \ +helper_atomic_fetch_umaxw_be \ +helper_atomic_smin_fetchw_be \ +helper_atomic_umin_fetchw_be \ +helper_atomic_smax_fetchw_be \ +helper_atomic_umax_fetchw_be \ +helper_atomic_fetch_addw_be \ +helper_atomic_add_fetchw_be \ +helper_atomic_cmpxchgl_le \ +helper_atomic_xchgl_le \ +helper_atomic_fetch_addl_le \ +helper_atomic_fetch_andl_le \ +helper_atomic_fetch_orl_le \ +helper_atomic_fetch_xorl_le \ +helper_atomic_add_fetchl_le \ +helper_atomic_and_fetchl_le \ +helper_atomic_or_fetchl_le \ +helper_atomic_xor_fetchl_le \ +helper_atomic_fetch_sminl_le \ +helper_atomic_fetch_uminl_le \ +helper_atomic_fetch_smaxl_le \ +helper_atomic_fetch_umaxl_le \ +helper_atomic_smin_fetchl_le \ +helper_atomic_umin_fetchl_le \ +helper_atomic_smax_fetchl_le \ +helper_atomic_umax_fetchl_le \ +helper_atomic_cmpxchgl_be \ +helper_atomic_xchgl_be \ +helper_atomic_fetch_andl_be \ +helper_atomic_fetch_orl_be \ +helper_atomic_fetch_xorl_be \ +helper_atomic_and_fetchl_be \ +helper_atomic_or_fetchl_be \ +helper_atomic_xor_fetchl_be \ +helper_atomic_fetch_sminl_be \ +helper_atomic_fetch_uminl_be \ +helper_atomic_fetch_smaxl_be \ +helper_atomic_fetch_umaxl_be \ +helper_atomic_smin_fetchl_be \ +helper_atomic_umin_fetchl_be \ +helper_atomic_smax_fetchl_be \ +helper_atomic_umax_fetchl_be \ +helper_atomic_fetch_addl_be \ +helper_atomic_add_fetchl_be \ +helper_atomic_cmpxchgq_le \ +helper_atomic_xchgq_le \ +helper_atomic_fetch_addq_le \ +helper_atomic_fetch_andq_le \ +helper_atomic_fetch_orq_le \ +helper_atomic_fetch_xorq_le \ +helper_atomic_add_fetchq_le \ +helper_atomic_and_fetchq_le \ +helper_atomic_or_fetchq_le \ +helper_atomic_xor_fetchq_le \ +helper_atomic_fetch_sminq_le \ +helper_atomic_fetch_uminq_le \ +helper_atomic_fetch_smaxq_le \ +helper_atomic_fetch_umaxq_le \ +helper_atomic_smin_fetchq_le \ +helper_atomic_umin_fetchq_le \ +helper_atomic_smax_fetchq_le \ +helper_atomic_umax_fetchq_le \ +helper_atomic_cmpxchgq_be \ +helper_atomic_xchgq_be \ +helper_atomic_fetch_andq_be \ +helper_atomic_fetch_orq_be \ +helper_atomic_fetch_xorq_be \ +helper_atomic_and_fetchq_be \ +helper_atomic_or_fetchq_be \ +helper_atomic_xor_fetchq_be \ +helper_atomic_fetch_sminq_be \ +helper_atomic_fetch_uminq_be \ +helper_atomic_fetch_smaxq_be \ +helper_atomic_fetch_umaxq_be \ +helper_atomic_smin_fetchq_be \ +helper_atomic_umin_fetchq_be \ +helper_atomic_smax_fetchq_be \ +helper_atomic_umax_fetchq_be \ +helper_atomic_fetch_addq_be \ +helper_atomic_add_fetchq_be \ +cpu_ldub_code \ +cpu_lduw_code \ +cpu_ldl_code \ +cpu_ldq_code \ +helper_div_i32 \ +helper_rem_i32 \ +helper_divu_i32 \ +helper_remu_i32 \ +helper_shl_i64 \ +helper_shr_i64 \ +helper_sar_i64 \ +helper_div_i64 \ +helper_rem_i64 \ +helper_divu_i64 \ +helper_remu_i64 \ +helper_muluh_i64 \ +helper_mulsh_i64 \ +helper_clz_i32 \ +helper_ctz_i32 \ +helper_clz_i64 \ +helper_ctz_i64 \ +helper_clrsb_i32 \ +helper_clrsb_i64 \ +helper_ctpop_i32 \ +helper_ctpop_i64 \ +helper_lookup_tb_ptr \ +helper_exit_atomic \ +helper_gvec_add8 \ +helper_gvec_add16 \ +helper_gvec_add32 \ +helper_gvec_add64 \ +helper_gvec_adds8 \ +helper_gvec_adds16 \ +helper_gvec_adds32 \ +helper_gvec_adds64 \ +helper_gvec_sub8 \ +helper_gvec_sub16 \ +helper_gvec_sub32 \ +helper_gvec_sub64 \ +helper_gvec_subs8 \ +helper_gvec_subs16 \ +helper_gvec_subs32 \ +helper_gvec_subs64 \ +helper_gvec_mul8 \ +helper_gvec_mul16 \ +helper_gvec_mul32 \ +helper_gvec_mul64 \ +helper_gvec_muls8 \ +helper_gvec_muls16 \ +helper_gvec_muls32 \ +helper_gvec_muls64 \ +helper_gvec_neg8 \ +helper_gvec_neg16 \ +helper_gvec_neg32 \ +helper_gvec_neg64 \ +helper_gvec_abs8 \ +helper_gvec_abs16 \ +helper_gvec_abs32 \ +helper_gvec_abs64 \ +helper_gvec_mov \ +helper_gvec_dup64 \ +helper_gvec_dup32 \ +helper_gvec_dup16 \ +helper_gvec_dup8 \ +helper_gvec_not \ +helper_gvec_and \ +helper_gvec_or \ +helper_gvec_xor \ +helper_gvec_andc \ +helper_gvec_orc \ +helper_gvec_nand \ +helper_gvec_nor \ +helper_gvec_eqv \ +helper_gvec_ands \ +helper_gvec_xors \ +helper_gvec_ors \ +helper_gvec_shl8i \ +helper_gvec_shl16i \ +helper_gvec_shl32i \ +helper_gvec_shl64i \ +helper_gvec_shr8i \ +helper_gvec_shr16i \ +helper_gvec_shr32i \ +helper_gvec_shr64i \ +helper_gvec_sar8i \ +helper_gvec_sar16i \ +helper_gvec_sar32i \ +helper_gvec_sar64i \ +helper_gvec_shl8v \ +helper_gvec_shl16v \ +helper_gvec_shl32v \ +helper_gvec_shl64v \ +helper_gvec_shr8v \ +helper_gvec_shr16v \ +helper_gvec_shr32v \ +helper_gvec_shr64v \ +helper_gvec_sar8v \ +helper_gvec_sar16v \ +helper_gvec_sar32v \ +helper_gvec_sar64v \ +helper_gvec_eq8 \ +helper_gvec_ne8 \ +helper_gvec_lt8 \ +helper_gvec_le8 \ +helper_gvec_ltu8 \ +helper_gvec_leu8 \ +helper_gvec_eq16 \ +helper_gvec_ne16 \ +helper_gvec_lt16 \ +helper_gvec_le16 \ +helper_gvec_ltu16 \ +helper_gvec_leu16 \ +helper_gvec_eq32 \ +helper_gvec_ne32 \ +helper_gvec_lt32 \ +helper_gvec_le32 \ +helper_gvec_ltu32 \ +helper_gvec_leu32 \ +helper_gvec_eq64 \ +helper_gvec_ne64 \ +helper_gvec_lt64 \ +helper_gvec_le64 \ +helper_gvec_ltu64 \ +helper_gvec_leu64 \ +helper_gvec_ssadd8 \ +helper_gvec_ssadd16 \ +helper_gvec_ssadd32 \ +helper_gvec_ssadd64 \ +helper_gvec_sssub8 \ +helper_gvec_sssub16 \ +helper_gvec_sssub32 \ +helper_gvec_sssub64 \ +helper_gvec_usadd8 \ +helper_gvec_usadd16 \ +helper_gvec_usadd32 \ +helper_gvec_usadd64 \ +helper_gvec_ussub8 \ +helper_gvec_ussub16 \ +helper_gvec_ussub32 \ +helper_gvec_ussub64 \ +helper_gvec_smin8 \ +helper_gvec_smin16 \ +helper_gvec_smin32 \ +helper_gvec_smin64 \ +helper_gvec_smax8 \ +helper_gvec_smax16 \ +helper_gvec_smax32 \ +helper_gvec_smax64 \ +helper_gvec_umin8 \ +helper_gvec_umin16 \ +helper_gvec_umin32 \ +helper_gvec_umin64 \ +helper_gvec_umax8 \ +helper_gvec_umax16 \ +helper_gvec_umax32 \ +helper_gvec_umax64 \ +helper_gvec_bitsel \ +cpu_restore_state \ +page_collection_lock \ +page_collection_unlock \ +free_code_gen_buffer \ +tcg_exec_init \ +tb_cleanup \ +tb_flush \ +tb_phys_invalidate \ +tb_gen_code \ +tb_exec_lock \ +tb_exec_unlock \ +tb_invalidate_phys_page_range \ +tb_invalidate_phys_range \ +tb_invalidate_phys_page_fast \ +tb_check_watchpoint \ +cpu_io_recompile \ +tb_flush_jmp_cache \ +tcg_flush_softmmu_tlb \ +translator_loop_temp_check \ +translator_loop \ +helper_atomic_cmpxchgo_le_mmu \ +helper_atomic_cmpxchgo_be_mmu \ +helper_atomic_ldo_le_mmu \ +helper_atomic_ldo_be_mmu \ +helper_atomic_sto_le_mmu \ +helper_atomic_sto_be_mmu \ +unassigned_mem_ops \ +floatx80_infinity \ +dup_const_func \ +gen_helper_raise_exception \ +gen_helper_raise_interrupt \ +gen_helper_vfp_get_fpscr \ +gen_helper_vfp_set_fpscr \ +gen_helper_cpsr_read \ +gen_helper_cpsr_write \ +" + +x86_64_SYMBOLS=" +cpu_get_tsc \ +x86_cpu_get_memory_mapping \ +cpu_x86_update_dr7 \ +breakpoint_handler \ +helper_single_step \ +helper_rechecking_single_step \ +helper_set_dr \ +helper_get_dr \ +helper_bpt_io \ +helper_cc_compute_all \ +cpu_cc_compute_all \ +helper_cc_compute_c \ +helper_write_eflags \ +helper_read_eflags \ +helper_clts \ +helper_reset_rf \ +helper_cli \ +helper_sti \ +helper_clac \ +helper_stac \ +get_register_name_32 \ +host_cpuid \ +host_vendor_fms \ +x86_cpu_set_default_version \ +cpu_clear_apic_feature \ +cpu_x86_cpuid \ +x86_cpu_pending_interrupt \ +x86_update_hflags \ +cpu_x86_init \ +helper_raise_interrupt \ +helper_raise_exception \ +raise_interrupt \ +raise_exception_err \ +raise_exception_err_ra \ +raise_exception \ +raise_exception_ra \ +x86_cpu_tlb_fill \ +cpu_set_ignne \ +helper_flds_FT0 \ +helper_fldl_FT0 \ +helper_fildl_FT0 \ +helper_flds_ST0 \ +helper_fldl_ST0 \ +helper_fildl_ST0 \ +helper_fildll_ST0 \ +helper_fsts_ST0 \ +helper_fstl_ST0 \ +helper_fist_ST0 \ +helper_fistl_ST0 \ +helper_fistll_ST0 \ +helper_fistt_ST0 \ +helper_fisttl_ST0 \ +helper_fisttll_ST0 \ +helper_fldt_ST0 \ +helper_fstt_ST0 \ +helper_fpush \ +helper_fpop \ +helper_fdecstp \ +helper_fincstp \ +helper_ffree_STN \ +helper_fmov_ST0_FT0 \ +helper_fmov_FT0_STN \ +helper_fmov_ST0_STN \ +helper_fmov_STN_ST0 \ +helper_fxchg_ST0_STN \ +helper_fcom_ST0_FT0 \ +helper_fucom_ST0_FT0 \ +helper_fcomi_ST0_FT0 \ +helper_fucomi_ST0_FT0 \ +helper_fadd_ST0_FT0 \ +helper_fmul_ST0_FT0 \ +helper_fsub_ST0_FT0 \ +helper_fsubr_ST0_FT0 \ +helper_fdiv_ST0_FT0 \ +helper_fdivr_ST0_FT0 \ +helper_fadd_STN_ST0 \ +helper_fmul_STN_ST0 \ +helper_fsub_STN_ST0 \ +helper_fsubr_STN_ST0 \ +helper_fdiv_STN_ST0 \ +helper_fdivr_STN_ST0 \ +helper_fchs_ST0 \ +helper_fabs_ST0 \ +helper_fld1_ST0 \ +helper_fldl2t_ST0 \ +helper_fldl2e_ST0 \ +helper_fldpi_ST0 \ +helper_fldlg2_ST0 \ +helper_fldln2_ST0 \ +helper_fldz_ST0 \ +helper_fldz_FT0 \ +helper_fnstsw \ +helper_fnstcw \ +update_fp_status \ +helper_fldcw \ +helper_fclex \ +helper_fwait \ +helper_fninit \ +helper_fbld_ST0 \ +helper_fbst_ST0 \ +helper_f2xm1 \ +helper_fyl2x \ +helper_fptan \ +helper_fpatan \ +helper_fxtract \ +helper_fprem1 \ +helper_fprem \ +helper_fyl2xp1 \ +helper_fsqrt \ +helper_fsincos \ +helper_frndint \ +helper_fscale \ +helper_fsin \ +helper_fcos \ +helper_fxam_ST0 \ +helper_fstenv \ +helper_fldenv \ +helper_fsave \ +helper_frstor \ +helper_fxsave \ +helper_xsave \ +helper_xsaveopt \ +helper_fxrstor \ +helper_xrstor \ +helper_xgetbv \ +helper_xsetbv \ +update_mxcsr_status \ +helper_ldmxcsr \ +helper_enter_mmx \ +helper_emms \ +helper_movq \ +helper_psrlw_mmx \ +helper_psraw_mmx \ +helper_psllw_mmx \ +helper_psrld_mmx \ +helper_psrad_mmx \ +helper_pslld_mmx \ +helper_psrlq_mmx \ +helper_psllq_mmx \ +helper_paddb_mmx \ +helper_paddw_mmx \ +helper_paddl_mmx \ +helper_paddq_mmx \ +helper_psubb_mmx \ +helper_psubw_mmx \ +helper_psubl_mmx \ +helper_psubq_mmx \ +helper_paddusb_mmx \ +helper_paddsb_mmx \ +helper_psubusb_mmx \ +helper_psubsb_mmx \ +helper_paddusw_mmx \ +helper_paddsw_mmx \ +helper_psubusw_mmx \ +helper_psubsw_mmx \ +helper_pminub_mmx \ +helper_pmaxub_mmx \ +helper_pminsw_mmx \ +helper_pmaxsw_mmx \ +helper_pand_mmx \ +helper_pandn_mmx \ +helper_por_mmx \ +helper_pxor_mmx \ +helper_pcmpgtb_mmx \ +helper_pcmpgtw_mmx \ +helper_pcmpgtl_mmx \ +helper_pcmpeqb_mmx \ +helper_pcmpeqw_mmx \ +helper_pcmpeql_mmx \ +helper_pmullw_mmx \ +helper_pmulhrw_mmx \ +helper_pmulhuw_mmx \ +helper_pmulhw_mmx \ +helper_pavgb_mmx \ +helper_pavgw_mmx \ +helper_pmuludq_mmx \ +helper_pmaddwd_mmx \ +helper_psadbw_mmx \ +helper_maskmov_mmx \ +helper_movl_mm_T0_mmx \ +helper_movq_mm_T0_mmx \ +helper_pshufw_mmx \ +helper_pmovmskb_mmx \ +helper_packsswb_mmx \ +helper_packuswb_mmx \ +helper_packssdw_mmx \ +helper_punpcklbw_mmx \ +helper_punpcklwd_mmx \ +helper_punpckldq_mmx \ +helper_punpckhbw_mmx \ +helper_punpckhwd_mmx \ +helper_punpckhdq_mmx \ +helper_pi2fd \ +helper_pi2fw \ +helper_pf2id \ +helper_pf2iw \ +helper_pfacc \ +helper_pfadd \ +helper_pfcmpeq \ +helper_pfcmpge \ +helper_pfcmpgt \ +helper_pfmax \ +helper_pfmin \ +helper_pfmul \ +helper_pfnacc \ +helper_pfpnacc \ +helper_pfrcp \ +helper_pfrsqrt \ +helper_pfsub \ +helper_pfsubr \ +helper_pswapd \ +helper_pshufb_mmx \ +helper_phaddw_mmx \ +helper_phaddd_mmx \ +helper_phaddsw_mmx \ +helper_pmaddubsw_mmx \ +helper_phsubw_mmx \ +helper_phsubd_mmx \ +helper_phsubsw_mmx \ +helper_pabsb_mmx \ +helper_pabsw_mmx \ +helper_pabsd_mmx \ +helper_pmulhrsw_mmx \ +helper_psignb_mmx \ +helper_psignw_mmx \ +helper_psignd_mmx \ +helper_palignr_mmx \ +helper_psrlw_xmm \ +helper_psraw_xmm \ +helper_psllw_xmm \ +helper_psrld_xmm \ +helper_psrad_xmm \ +helper_pslld_xmm \ +helper_psrlq_xmm \ +helper_psllq_xmm \ +helper_psrldq_xmm \ +helper_pslldq_xmm \ +helper_paddb_xmm \ +helper_paddw_xmm \ +helper_paddl_xmm \ +helper_paddq_xmm \ +helper_psubb_xmm \ +helper_psubw_xmm \ +helper_psubl_xmm \ +helper_psubq_xmm \ +helper_paddusb_xmm \ +helper_paddsb_xmm \ +helper_psubusb_xmm \ +helper_psubsb_xmm \ +helper_paddusw_xmm \ +helper_paddsw_xmm \ +helper_psubusw_xmm \ +helper_psubsw_xmm \ +helper_pminub_xmm \ +helper_pmaxub_xmm \ +helper_pminsw_xmm \ +helper_pmaxsw_xmm \ +helper_pand_xmm \ +helper_pandn_xmm \ +helper_por_xmm \ +helper_pxor_xmm \ +helper_pcmpgtb_xmm \ +helper_pcmpgtw_xmm \ +helper_pcmpgtl_xmm \ +helper_pcmpeqb_xmm \ +helper_pcmpeqw_xmm \ +helper_pcmpeql_xmm \ +helper_pmullw_xmm \ +helper_pmulhuw_xmm \ +helper_pmulhw_xmm \ +helper_pavgb_xmm \ +helper_pavgw_xmm \ +helper_pmuludq_xmm \ +helper_pmaddwd_xmm \ +helper_psadbw_xmm \ +helper_maskmov_xmm \ +helper_movl_mm_T0_xmm \ +helper_movq_mm_T0_xmm \ +helper_shufps \ +helper_shufpd \ +helper_pshufd_xmm \ +helper_pshuflw_xmm \ +helper_pshufhw_xmm \ +helper_addps \ +helper_addss \ +helper_addpd \ +helper_addsd \ +helper_subps \ +helper_subss \ +helper_subpd \ +helper_subsd \ +helper_mulps \ +helper_mulss \ +helper_mulpd \ +helper_mulsd \ +helper_divps \ +helper_divss \ +helper_divpd \ +helper_divsd \ +helper_minps \ +helper_minss \ +helper_minpd \ +helper_minsd \ +helper_maxps \ +helper_maxss \ +helper_maxpd \ +helper_maxsd \ +helper_sqrtps \ +helper_sqrtss \ +helper_sqrtpd \ +helper_sqrtsd \ +helper_cvtps2pd \ +helper_cvtpd2ps \ +helper_cvtss2sd \ +helper_cvtsd2ss \ +helper_cvtdq2ps \ +helper_cvtdq2pd \ +helper_cvtpi2ps \ +helper_cvtpi2pd \ +helper_cvtsi2ss \ +helper_cvtsi2sd \ +helper_cvtsq2ss \ +helper_cvtsq2sd \ +helper_cvtps2dq \ +helper_cvtpd2dq \ +helper_cvtps2pi \ +helper_cvtpd2pi \ +helper_cvtss2si \ +helper_cvtsd2si \ +helper_cvtss2sq \ +helper_cvtsd2sq \ +helper_cvttps2dq \ +helper_cvttpd2dq \ +helper_cvttps2pi \ +helper_cvttpd2pi \ +helper_cvttss2si \ +helper_cvttsd2si \ +helper_cvttss2sq \ +helper_cvttsd2sq \ +helper_rsqrtps \ +helper_rsqrtss \ +helper_rcpps \ +helper_rcpss \ +helper_extrq_r \ +helper_extrq_i \ +helper_insertq_r \ +helper_insertq_i \ +helper_haddps \ +helper_haddpd \ +helper_hsubps \ +helper_hsubpd \ +helper_addsubps \ +helper_addsubpd \ +helper_cmpeqps \ +helper_cmpeqss \ +helper_cmpeqpd \ +helper_cmpeqsd \ +helper_cmpltps \ +helper_cmpltss \ +helper_cmpltpd \ +helper_cmpltsd \ +helper_cmpleps \ +helper_cmpless \ +helper_cmplepd \ +helper_cmplesd \ +helper_cmpunordps \ +helper_cmpunordss \ +helper_cmpunordpd \ +helper_cmpunordsd \ +helper_cmpneqps \ +helper_cmpneqss \ +helper_cmpneqpd \ +helper_cmpneqsd \ +helper_cmpnltps \ +helper_cmpnltss \ +helper_cmpnltpd \ +helper_cmpnltsd \ +helper_cmpnleps \ +helper_cmpnless \ +helper_cmpnlepd \ +helper_cmpnlesd \ +helper_cmpordps \ +helper_cmpordss \ +helper_cmpordpd \ +helper_cmpordsd \ +helper_ucomiss \ +helper_comiss \ +helper_ucomisd \ +helper_comisd \ +helper_movmskps \ +helper_movmskpd \ +helper_pmovmskb_xmm \ +helper_packsswb_xmm \ +helper_packuswb_xmm \ +helper_packssdw_xmm \ +helper_punpcklbw_xmm \ +helper_punpcklwd_xmm \ +helper_punpckldq_xmm \ +helper_punpcklqdq_xmm \ +helper_punpckhbw_xmm \ +helper_punpckhwd_xmm \ +helper_punpckhdq_xmm \ +helper_punpckhqdq_xmm \ +helper_pshufb_xmm \ +helper_phaddw_xmm \ +helper_phaddd_xmm \ +helper_phaddsw_xmm \ +helper_pmaddubsw_xmm \ +helper_phsubw_xmm \ +helper_phsubd_xmm \ +helper_phsubsw_xmm \ +helper_pabsb_xmm \ +helper_pabsw_xmm \ +helper_pabsd_xmm \ +helper_pmulhrsw_xmm \ +helper_psignb_xmm \ +helper_psignw_xmm \ +helper_psignd_xmm \ +helper_palignr_xmm \ +helper_pblendvb_xmm \ +helper_blendvps_xmm \ +helper_blendvpd_xmm \ +helper_ptest_xmm \ +helper_pmovsxbw_xmm \ +helper_pmovsxbd_xmm \ +helper_pmovsxbq_xmm \ +helper_pmovsxwd_xmm \ +helper_pmovsxwq_xmm \ +helper_pmovsxdq_xmm \ +helper_pmovzxbw_xmm \ +helper_pmovzxbd_xmm \ +helper_pmovzxbq_xmm \ +helper_pmovzxwd_xmm \ +helper_pmovzxwq_xmm \ +helper_pmovzxdq_xmm \ +helper_pmuldq_xmm \ +helper_pcmpeqq_xmm \ +helper_packusdw_xmm \ +helper_pminsb_xmm \ +helper_pminsd_xmm \ +helper_pminuw_xmm \ +helper_pminud_xmm \ +helper_pmaxsb_xmm \ +helper_pmaxsd_xmm \ +helper_pmaxuw_xmm \ +helper_pmaxud_xmm \ +helper_pmulld_xmm \ +helper_phminposuw_xmm \ +helper_roundps_xmm \ +helper_roundpd_xmm \ +helper_roundss_xmm \ +helper_roundsd_xmm \ +helper_blendps_xmm \ +helper_blendpd_xmm \ +helper_pblendw_xmm \ +helper_dpps_xmm \ +helper_dppd_xmm \ +helper_mpsadbw_xmm \ +helper_pcmpgtq_xmm \ +helper_pcmpestri_xmm \ +helper_pcmpestrm_xmm \ +helper_pcmpistri_xmm \ +helper_pcmpistrm_xmm \ +helper_crc32 \ +helper_pclmulqdq_xmm \ +helper_aesdec_xmm \ +helper_aesdeclast_xmm \ +helper_aesenc_xmm \ +helper_aesenclast_xmm \ +helper_aesimc_xmm \ +helper_aeskeygenassist_xmm \ +cpu_sync_bndcs_hflags \ +cpu_x86_support_mca_broadcast \ +x86_cpu_set_a20 \ +cpu_x86_update_cr0 \ +cpu_x86_update_cr3 \ +cpu_x86_update_cr4 \ +x86_cpu_get_phys_page_attrs_debug \ +cpu_x86_get_descr_debug \ +do_cpu_init \ +do_cpu_sipi \ +x86_cpu_exec_enter \ +x86_cpu_exec_exit \ +x86_ldub_phys \ +x86_lduw_phys \ +x86_ldl_phys \ +x86_ldq_phys \ +x86_stb_phys \ +x86_stl_phys_notdirty \ +x86_stw_phys \ +x86_stl_phys \ +x86_stq_phys \ +helper_divb_AL \ +helper_idivb_AL \ +helper_divw_AX \ +helper_idivw_AX \ +helper_divl_EAX \ +helper_idivl_EAX \ +helper_aam \ +helper_aad \ +helper_aaa \ +helper_aas \ +helper_daa \ +helper_das \ +helper_divq_EAX \ +helper_idivq_EAX \ +helper_pdep \ +helper_pext \ +helper_rclb \ +helper_rcrb \ +helper_rclw \ +helper_rcrw \ +helper_rcll \ +helper_rcrl \ +helper_rclq \ +helper_rcrq \ +helper_cr4_testbit \ +helper_rdrand \ +helper_cmpxchg8b_unlocked \ +helper_cmpxchg8b \ +helper_cmpxchg16b_unlocked \ +helper_cmpxchg16b \ +helper_boundw \ +helper_boundl \ +helper_outb \ +helper_inb \ +helper_outw \ +helper_inw \ +helper_outl \ +helper_inl \ +helper_into \ +helper_cpuid \ +helper_read_crN \ +helper_write_crN \ +helper_lmsw \ +helper_invlpg \ +helper_rdtsc \ +helper_rdtscp \ +helper_rdpmc \ +helper_wrmsr \ +helper_rdmsr \ +helper_hlt \ +helper_monitor \ +helper_mwait \ +helper_pause \ +helper_debug \ +helper_rdpkru \ +helper_wrpkru \ +helper_bndck \ +helper_bndldx64 \ +helper_bndldx32 \ +helper_bndstx64 \ +helper_bndstx32 \ +helper_bnd_jmp \ +helper_syscall \ +helper_sysret \ +x86_cpu_do_interrupt \ +do_interrupt_x86_hardirq \ +x86_cpu_exec_interrupt \ +helper_lldt \ +helper_ltr \ +uc_check_cpu_x86_load_seg \ +helper_load_seg \ +helper_ljmp_protected \ +helper_lcall_real \ +helper_lcall_protected \ +helper_iret_real \ +helper_iret_protected \ +helper_lret_protected \ +helper_sysenter \ +helper_sysexit \ +helper_lsl \ +helper_lar \ +helper_verr \ +helper_verw \ +cpu_x86_load_seg \ +helper_check_iob \ +helper_check_iow \ +helper_check_iol \ +do_smm_enter \ +helper_rsm \ +helper_vmrun \ +helper_vmmcall \ +helper_vmload \ +helper_vmsave \ +helper_stgi \ +helper_clgi \ +helper_skinit \ +helper_invlpga \ +cpu_svm_check_intercept_param \ +helper_svm_check_intercept_param \ +helper_svm_check_io \ +cpu_vmexit \ +do_vmexit \ +tcg_x86_init \ +gen_intermediate_code \ +restore_state_to_opc \ +x86_cpu_xsave_all_areas \ +x86_cpu_xrstor_all_areas \ +cpu_get_fp80 \ +cpu_set_fp80 \ +x86_reg_reset \ +x86_reg_read \ +x86_reg_write \ +" + +arm_SYMBOLS=" +arm_cpu_exec_interrupt \ +arm_cpu_update_virq \ +arm_cpu_update_vfiq \ +arm_cpu_initfn \ +gt_cntfrq_period_ns \ +arm_cpu_post_init \ +arm_cpu_realizefn \ +a15_l2ctlr_read \ +arm_cpu_class_init \ +cpu_arm_init \ +helper_crypto_aese \ +helper_crypto_aesmc \ +helper_crypto_sha1_3reg \ +helper_crypto_sha1h \ +helper_crypto_sha1su1 \ +helper_crypto_sha256h \ +helper_crypto_sha256h2 \ +helper_crypto_sha256su0 \ +helper_crypto_sha256su1 \ +helper_crypto_sha512h \ +helper_crypto_sha512h2 \ +helper_crypto_sha512su0 \ +helper_crypto_sha512su1 \ +helper_crypto_sm3partw1 \ +helper_crypto_sm3partw2 \ +helper_crypto_sm3tt \ +helper_crypto_sm4e \ +helper_crypto_sm4ekey \ +helper_check_breakpoints \ +arm_debug_check_watchpoint \ +arm_debug_excp_handler \ +arm_adjust_watchpoint_address \ +read_raw_cp_reg \ +pmu_init \ +pmu_op_start \ +pmu_op_finish \ +pmu_pre_el_change \ +pmu_post_el_change \ +arm_pmu_timer_cb \ +arm_gt_ptimer_cb \ +arm_gt_vtimer_cb \ +arm_gt_htimer_cb \ +arm_gt_stimer_cb \ +arm_gt_hvtimer_cb \ +arm_hcr_el2_eff \ +sve_exception_el \ +sve_zcr_len_for_el \ +hw_watchpoint_update \ +hw_watchpoint_update_all \ +hw_breakpoint_update \ +hw_breakpoint_update_all \ +register_cp_regs_for_features \ +define_one_arm_cp_reg_with_opaque \ +define_arm_cp_regs_with_opaque \ +modify_arm_cp_regs \ +get_arm_cp_reginfo \ +arm_cp_write_ignore \ +arm_cp_read_zero \ +arm_cp_reset_ignore \ +cpsr_read \ +cpsr_write \ +helper_sxtb16 \ +helper_uxtb16 \ +helper_sdiv \ +helper_udiv \ +helper_rbit \ +arm_phys_excp_target_el \ +aarch64_sync_32_to_64 \ +aarch64_sync_64_to_32 \ +arm_cpu_do_interrupt \ +arm_sctlr \ +arm_s1_regime_using_lpae_format \ +aa64_va_parameters \ +v8m_security_lookup \ +pmsav8_mpu_lookup \ +get_phys_addr \ +arm_cpu_get_phys_page_attrs_debug \ +helper_qadd16 \ +helper_qadd8 \ +helper_qsub16 \ +helper_qsub8 \ +helper_qsubaddx \ +helper_qaddsubx \ +helper_uqadd16 \ +helper_uqadd8 \ +helper_uqsub16 \ +helper_uqsub8 \ +helper_uqsubaddx \ +helper_uqaddsubx \ +helper_sadd16 \ +helper_sadd8 \ +helper_ssub16 \ +helper_ssub8 \ +helper_ssubaddx \ +helper_saddsubx \ +helper_uadd16 \ +helper_uadd8 \ +helper_usub16 \ +helper_usub8 \ +helper_usubaddx \ +helper_uaddsubx \ +helper_shadd16 \ +helper_shadd8 \ +helper_shsub16 \ +helper_shsub8 \ +helper_shsubaddx \ +helper_shaddsubx \ +helper_uhadd16 \ +helper_uhadd8 \ +helper_uhsub16 \ +helper_uhsub8 \ +helper_uhsubaddx \ +helper_uhaddsubx \ +helper_usad8 \ +helper_sel_flags \ +helper_crc32 \ +helper_crc32c \ +fp_exception_el \ +arm_mmu_idx_to_el \ +arm_mmu_idx_el \ +arm_mmu_idx \ +arm_stage1_mmu_idx \ +arm_rebuild_hflags \ +helper_rebuild_hflags_m32_newel \ +helper_rebuild_hflags_m32 \ +helper_rebuild_hflags_a32_newel \ +helper_rebuild_hflags_a32 \ +helper_rebuild_hflags_a64 \ +cpu_get_tb_cpu_state \ +helper_iwmmxt_maddsq \ +helper_iwmmxt_madduq \ +helper_iwmmxt_sadb \ +helper_iwmmxt_sadw \ +helper_iwmmxt_mulslw \ +helper_iwmmxt_mulshw \ +helper_iwmmxt_mululw \ +helper_iwmmxt_muluhw \ +helper_iwmmxt_macsw \ +helper_iwmmxt_macuw \ +helper_iwmmxt_unpacklb \ +helper_iwmmxt_unpacklw \ +helper_iwmmxt_unpackll \ +helper_iwmmxt_unpacklub \ +helper_iwmmxt_unpackluw \ +helper_iwmmxt_unpacklul \ +helper_iwmmxt_unpacklsb \ +helper_iwmmxt_unpacklsw \ +helper_iwmmxt_unpacklsl \ +helper_iwmmxt_unpackhb \ +helper_iwmmxt_unpackhw \ +helper_iwmmxt_unpackhl \ +helper_iwmmxt_unpackhub \ +helper_iwmmxt_unpackhuw \ +helper_iwmmxt_unpackhul \ +helper_iwmmxt_unpackhsb \ +helper_iwmmxt_unpackhsw \ +helper_iwmmxt_unpackhsl \ +helper_iwmmxt_cmpeqb \ +helper_iwmmxt_cmpeqw \ +helper_iwmmxt_cmpeql \ +helper_iwmmxt_cmpgtsb \ +helper_iwmmxt_cmpgtsw \ +helper_iwmmxt_cmpgtsl \ +helper_iwmmxt_cmpgtub \ +helper_iwmmxt_cmpgtuw \ +helper_iwmmxt_cmpgtul \ +helper_iwmmxt_minsb \ +helper_iwmmxt_minsw \ +helper_iwmmxt_minsl \ +helper_iwmmxt_minub \ +helper_iwmmxt_minuw \ +helper_iwmmxt_minul \ +helper_iwmmxt_maxsb \ +helper_iwmmxt_maxsw \ +helper_iwmmxt_maxsl \ +helper_iwmmxt_maxub \ +helper_iwmmxt_maxuw \ +helper_iwmmxt_maxul \ +helper_iwmmxt_subnb \ +helper_iwmmxt_subnw \ +helper_iwmmxt_subnl \ +helper_iwmmxt_addnb \ +helper_iwmmxt_addnw \ +helper_iwmmxt_addnl \ +helper_iwmmxt_subub \ +helper_iwmmxt_subuw \ +helper_iwmmxt_subul \ +helper_iwmmxt_addub \ +helper_iwmmxt_adduw \ +helper_iwmmxt_addul \ +helper_iwmmxt_subsb \ +helper_iwmmxt_subsw \ +helper_iwmmxt_subsl \ +helper_iwmmxt_addsb \ +helper_iwmmxt_addsw \ +helper_iwmmxt_addsl \ +helper_iwmmxt_avgb0 \ +helper_iwmmxt_avgb1 \ +helper_iwmmxt_avgw0 \ +helper_iwmmxt_avgw1 \ +helper_iwmmxt_align \ +helper_iwmmxt_insr \ +helper_iwmmxt_setpsr_nz \ +helper_iwmmxt_bcstb \ +helper_iwmmxt_bcstw \ +helper_iwmmxt_bcstl \ +helper_iwmmxt_addcb \ +helper_iwmmxt_addcw \ +helper_iwmmxt_addcl \ +helper_iwmmxt_msbb \ +helper_iwmmxt_msbw \ +helper_iwmmxt_msbl \ +helper_iwmmxt_srlw \ +helper_iwmmxt_srll \ +helper_iwmmxt_srlq \ +helper_iwmmxt_sllw \ +helper_iwmmxt_slll \ +helper_iwmmxt_sllq \ +helper_iwmmxt_sraw \ +helper_iwmmxt_sral \ +helper_iwmmxt_sraq \ +helper_iwmmxt_rorw \ +helper_iwmmxt_rorl \ +helper_iwmmxt_rorq \ +helper_iwmmxt_shufh \ +helper_iwmmxt_packuw \ +helper_iwmmxt_packul \ +helper_iwmmxt_packuq \ +helper_iwmmxt_packsw \ +helper_iwmmxt_packsl \ +helper_iwmmxt_packsq \ +helper_iwmmxt_muladdsl \ +helper_iwmmxt_muladdsw \ +helper_iwmmxt_muladdswl \ +armv7m_nvic_set_pending \ +helper_v7m_preserve_fp_state \ +write_v7m_exception \ +helper_v7m_bxns \ +helper_v7m_blxns \ +armv7m_nvic_neg_prio_requested \ +helper_v7m_vlstm \ +helper_v7m_vlldm \ +arm_v7m_cpu_do_interrupt \ +helper_v7m_mrs \ +helper_v7m_msr \ +helper_v7m_tt \ +arm_v7m_mmu_idx_all \ +arm_v7m_mmu_idx_for_secstate_and_priv \ +arm_v7m_mmu_idx_for_secstate \ +helper_neon_qadd_u8 \ +helper_neon_qadd_u16 \ +helper_neon_qadd_u32 \ +helper_neon_qadd_u64 \ +helper_neon_qadd_s8 \ +helper_neon_qadd_s16 \ +helper_neon_qadd_s32 \ +helper_neon_qadd_s64 \ +helper_neon_uqadd_s8 \ +helper_neon_uqadd_s16 \ +helper_neon_uqadd_s32 \ +helper_neon_uqadd_s64 \ +helper_neon_sqadd_u8 \ +helper_neon_sqadd_u16 \ +helper_neon_sqadd_u32 \ +helper_neon_sqadd_u64 \ +helper_neon_qsub_u8 \ +helper_neon_qsub_u16 \ +helper_neon_qsub_u32 \ +helper_neon_qsub_u64 \ +helper_neon_qsub_s8 \ +helper_neon_qsub_s16 \ +helper_neon_qsub_s32 \ +helper_neon_qsub_s64 \ +helper_neon_hadd_s8 \ +helper_neon_hadd_u8 \ +helper_neon_hadd_s16 \ +helper_neon_hadd_u16 \ +helper_neon_hadd_s32 \ +helper_neon_hadd_u32 \ +helper_neon_rhadd_s8 \ +helper_neon_rhadd_u8 \ +helper_neon_rhadd_s16 \ +helper_neon_rhadd_u16 \ +helper_neon_rhadd_s32 \ +helper_neon_rhadd_u32 \ +helper_neon_hsub_s8 \ +helper_neon_hsub_u8 \ +helper_neon_hsub_s16 \ +helper_neon_hsub_u16 \ +helper_neon_hsub_s32 \ +helper_neon_hsub_u32 \ +helper_neon_cgt_s8 \ +helper_neon_cgt_u8 \ +helper_neon_cgt_s16 \ +helper_neon_cgt_u16 \ +helper_neon_cgt_s32 \ +helper_neon_cgt_u32 \ +helper_neon_cge_s8 \ +helper_neon_cge_u8 \ +helper_neon_cge_s16 \ +helper_neon_cge_u16 \ +helper_neon_cge_s32 \ +helper_neon_cge_u32 \ +helper_neon_pmin_s8 \ +helper_neon_pmin_u8 \ +helper_neon_pmin_s16 \ +helper_neon_pmin_u16 \ +helper_neon_pmax_s8 \ +helper_neon_pmax_u8 \ +helper_neon_pmax_s16 \ +helper_neon_pmax_u16 \ +helper_neon_abd_s8 \ +helper_neon_abd_u8 \ +helper_neon_abd_s16 \ +helper_neon_abd_u16 \ +helper_neon_abd_s32 \ +helper_neon_abd_u32 \ +helper_neon_shl_u16 \ +helper_neon_shl_s16 \ +helper_neon_rshl_s8 \ +helper_neon_rshl_s16 \ +helper_neon_rshl_s32 \ +helper_neon_rshl_s64 \ +helper_neon_rshl_u8 \ +helper_neon_rshl_u16 \ +helper_neon_rshl_u32 \ +helper_neon_rshl_u64 \ +helper_neon_qshl_u8 \ +helper_neon_qshl_u16 \ +helper_neon_qshl_u32 \ +helper_neon_qshl_u64 \ +helper_neon_qshl_s8 \ +helper_neon_qshl_s16 \ +helper_neon_qshl_s32 \ +helper_neon_qshl_s64 \ +helper_neon_qshlu_s8 \ +helper_neon_qshlu_s16 \ +helper_neon_qshlu_s32 \ +helper_neon_qshlu_s64 \ +helper_neon_qrshl_u8 \ +helper_neon_qrshl_u16 \ +helper_neon_qrshl_u32 \ +helper_neon_qrshl_u64 \ +helper_neon_qrshl_s8 \ +helper_neon_qrshl_s16 \ +helper_neon_qrshl_s32 \ +helper_neon_qrshl_s64 \ +helper_neon_add_u8 \ +helper_neon_add_u16 \ +helper_neon_padd_u8 \ +helper_neon_padd_u16 \ +helper_neon_sub_u8 \ +helper_neon_sub_u16 \ +helper_neon_mul_u8 \ +helper_neon_mul_u16 \ +helper_neon_tst_u8 \ +helper_neon_tst_u16 \ +helper_neon_tst_u32 \ +helper_neon_ceq_u8 \ +helper_neon_ceq_u16 \ +helper_neon_ceq_u32 \ +helper_neon_clz_u8 \ +helper_neon_clz_u16 \ +helper_neon_cls_s8 \ +helper_neon_cls_s16 \ +helper_neon_cls_s32 \ +helper_neon_cnt_u8 \ +helper_neon_rbit_u8 \ +helper_neon_qdmulh_s16 \ +helper_neon_qrdmulh_s16 \ +helper_neon_qdmulh_s32 \ +helper_neon_qrdmulh_s32 \ +helper_neon_narrow_u8 \ +helper_neon_narrow_u16 \ +helper_neon_narrow_high_u8 \ +helper_neon_narrow_high_u16 \ +helper_neon_narrow_round_high_u8 \ +helper_neon_narrow_round_high_u16 \ +helper_neon_unarrow_sat8 \ +helper_neon_narrow_sat_u8 \ +helper_neon_narrow_sat_s8 \ +helper_neon_unarrow_sat16 \ +helper_neon_narrow_sat_u16 \ +helper_neon_narrow_sat_s16 \ +helper_neon_unarrow_sat32 \ +helper_neon_narrow_sat_u32 \ +helper_neon_narrow_sat_s32 \ +helper_neon_widen_u8 \ +helper_neon_widen_s8 \ +helper_neon_widen_u16 \ +helper_neon_widen_s16 \ +helper_neon_addl_u16 \ +helper_neon_addl_u32 \ +helper_neon_paddl_u16 \ +helper_neon_paddl_u32 \ +helper_neon_subl_u16 \ +helper_neon_subl_u32 \ +helper_neon_addl_saturate_s32 \ +helper_neon_addl_saturate_s64 \ +helper_neon_abdl_u16 \ +helper_neon_abdl_s16 \ +helper_neon_abdl_u32 \ +helper_neon_abdl_s32 \ +helper_neon_abdl_u64 \ +helper_neon_abdl_s64 \ +helper_neon_mull_u8 \ +helper_neon_mull_s8 \ +helper_neon_mull_u16 \ +helper_neon_mull_s16 \ +helper_neon_negl_u16 \ +helper_neon_negl_u32 \ +helper_neon_qabs_s8 \ +helper_neon_qneg_s8 \ +helper_neon_qabs_s16 \ +helper_neon_qneg_s16 \ +helper_neon_qabs_s32 \ +helper_neon_qneg_s32 \ +helper_neon_qabs_s64 \ +helper_neon_qneg_s64 \ +helper_neon_abd_f32 \ +helper_neon_ceq_f32 \ +helper_neon_cge_f32 \ +helper_neon_cgt_f32 \ +helper_neon_acge_f32 \ +helper_neon_acgt_f32 \ +helper_neon_acge_f64 \ +helper_neon_acgt_f64 \ +helper_neon_qunzip8 \ +helper_neon_qunzip16 \ +helper_neon_qunzip32 \ +helper_neon_unzip8 \ +helper_neon_unzip16 \ +helper_neon_qzip8 \ +helper_neon_qzip16 \ +helper_neon_qzip32 \ +helper_neon_zip8 \ +helper_neon_zip16 \ +raise_exception \ +raise_exception_ra \ +helper_neon_tbl \ +helper_v8m_stackcheck \ +helper_add_setq \ +helper_add_saturate \ +helper_sub_saturate \ +helper_add_usaturate \ +helper_sub_usaturate \ +helper_ssat \ +helper_ssat16 \ +helper_usat \ +helper_usat16 \ +helper_setend \ +helper_wfi \ +helper_wfe \ +helper_yield \ +helper_exception_internal \ +helper_exception_with_syndrome \ +helper_exception_bkpt_insn \ +helper_cpsr_read \ +helper_cpsr_write \ +helper_cpsr_write_eret \ +helper_get_user_reg \ +helper_set_user_reg \ +helper_set_r13_banked \ +helper_get_r13_banked \ +helper_msr_banked \ +helper_mrs_banked \ +helper_access_check_cp_reg \ +helper_set_cp_reg \ +helper_get_cp_reg \ +helper_set_cp_reg64 \ +helper_get_cp_reg64 \ +helper_pre_hvc \ +helper_pre_smc \ +helper_shl_cc \ +helper_shr_cc \ +helper_sar_cc \ +helper_ror_cc \ +arm_is_psci_call \ +arm_handle_psci_call \ +arm_cpu_do_unaligned_access \ +arm_cpu_do_transaction_failed \ +arm_cpu_tlb_fill \ +arm_translate_init \ +arm_test_cc \ +arm_free_cc \ +arm_jump_cc \ +arm_gen_test_cc \ +vfp_expand_imm \ +gen_cmtst_i64 \ +gen_ushl_i32 \ +gen_ushl_i64 \ +gen_sshl_i32 \ +gen_sshl_i64 \ +gen_intermediate_code \ +restore_state_to_opc \ +helper_neon_qrdmlah_s16 \ +helper_gvec_qrdmlah_s16 \ +helper_neon_qrdmlsh_s16 \ +helper_gvec_qrdmlsh_s16 \ +helper_neon_qrdmlah_s32 \ +helper_gvec_qrdmlah_s32 \ +helper_neon_qrdmlsh_s32 \ +helper_gvec_qrdmlsh_s32 \ +helper_gvec_sdot_b \ +helper_gvec_udot_b \ +helper_gvec_sdot_h \ +helper_gvec_udot_h \ +helper_gvec_sdot_idx_b \ +helper_gvec_udot_idx_b \ +helper_gvec_sdot_idx_h \ +helper_gvec_udot_idx_h \ +helper_gvec_fcaddh \ +helper_gvec_fcadds \ +helper_gvec_fcaddd \ +helper_gvec_fcmlah \ +helper_gvec_fcmlah_idx \ +helper_gvec_fcmlas \ +helper_gvec_fcmlas_idx \ +helper_gvec_fcmlad \ +helper_gvec_frecpe_h \ +helper_gvec_frecpe_s \ +helper_gvec_frecpe_d \ +helper_gvec_frsqrte_h \ +helper_gvec_frsqrte_s \ +helper_gvec_frsqrte_d \ +helper_gvec_fadd_h \ +helper_gvec_fadd_s \ +helper_gvec_fadd_d \ +helper_gvec_fsub_h \ +helper_gvec_fsub_s \ +helper_gvec_fsub_d \ +helper_gvec_fmul_h \ +helper_gvec_fmul_s \ +helper_gvec_fmul_d \ +helper_gvec_ftsmul_h \ +helper_gvec_ftsmul_s \ +helper_gvec_ftsmul_d \ +helper_gvec_fmul_idx_h \ +helper_gvec_fmul_idx_s \ +helper_gvec_fmul_idx_d \ +helper_gvec_fmla_idx_h \ +helper_gvec_fmla_idx_s \ +helper_gvec_fmla_idx_d \ +helper_gvec_uqadd_b \ +helper_gvec_uqadd_h \ +helper_gvec_uqadd_s \ +helper_gvec_sqadd_b \ +helper_gvec_sqadd_h \ +helper_gvec_sqadd_s \ +helper_gvec_uqsub_b \ +helper_gvec_uqsub_h \ +helper_gvec_uqsub_s \ +helper_gvec_sqsub_b \ +helper_gvec_sqsub_h \ +helper_gvec_sqsub_s \ +helper_gvec_uqadd_d \ +helper_gvec_uqsub_d \ +helper_gvec_sqadd_d \ +helper_gvec_sqsub_d \ +helper_gvec_fmlal_a32 \ +helper_gvec_fmlal_a64 \ +helper_gvec_fmlal_idx_a32 \ +helper_gvec_fmlal_idx_a64 \ +helper_gvec_sshl_b \ +helper_gvec_sshl_h \ +helper_gvec_ushl_b \ +helper_gvec_ushl_h \ +helper_gvec_pmul_b \ +helper_gvec_pmull_q \ +helper_neon_pmull_h \ +helper_vfp_get_fpscr \ +vfp_get_fpscr \ +helper_vfp_set_fpscr \ +vfp_set_fpscr \ +helper_vfp_adds \ +helper_vfp_addd \ +helper_vfp_subs \ +helper_vfp_subd \ +helper_vfp_muls \ +helper_vfp_muld \ +helper_vfp_divs \ +helper_vfp_divd \ +helper_vfp_mins \ +helper_vfp_mind \ +helper_vfp_maxs \ +helper_vfp_maxd \ +helper_vfp_minnums \ +helper_vfp_minnumd \ +helper_vfp_maxnums \ +helper_vfp_maxnumd \ +helper_vfp_negs \ +helper_vfp_negd \ +helper_vfp_abss \ +helper_vfp_absd \ +helper_vfp_sqrts \ +helper_vfp_sqrtd \ +helper_vfp_cmps \ +helper_vfp_cmpes \ +helper_vfp_cmpd \ +helper_vfp_cmped \ +helper_vfp_sitoh \ +helper_vfp_tosih \ +helper_vfp_tosizh \ +helper_vfp_sitos \ +helper_vfp_tosis \ +helper_vfp_tosizs \ +helper_vfp_sitod \ +helper_vfp_tosid \ +helper_vfp_tosizd \ +helper_vfp_uitoh \ +helper_vfp_touih \ +helper_vfp_touizh \ +helper_vfp_uitos \ +helper_vfp_touis \ +helper_vfp_touizs \ +helper_vfp_uitod \ +helper_vfp_touid \ +helper_vfp_touizd \ +helper_vfp_fcvtds \ +helper_vfp_fcvtsd \ +helper_vfp_shtod \ +helper_vfp_toshd_round_to_zero \ +helper_vfp_toshd \ +helper_vfp_sltod \ +helper_vfp_tosld_round_to_zero \ +helper_vfp_tosld \ +helper_vfp_sqtod \ +helper_vfp_tosqd \ +helper_vfp_uhtod \ +helper_vfp_touhd_round_to_zero \ +helper_vfp_touhd \ +helper_vfp_ultod \ +helper_vfp_tould_round_to_zero \ +helper_vfp_tould \ +helper_vfp_uqtod \ +helper_vfp_touqd \ +helper_vfp_shtos \ +helper_vfp_toshs_round_to_zero \ +helper_vfp_toshs \ +helper_vfp_sltos \ +helper_vfp_tosls_round_to_zero \ +helper_vfp_tosls \ +helper_vfp_sqtos \ +helper_vfp_tosqs \ +helper_vfp_uhtos \ +helper_vfp_touhs_round_to_zero \ +helper_vfp_touhs \ +helper_vfp_ultos \ +helper_vfp_touls_round_to_zero \ +helper_vfp_touls \ +helper_vfp_uqtos \ +helper_vfp_touqs \ +helper_vfp_sltoh \ +helper_vfp_ultoh \ +helper_vfp_sqtoh \ +helper_vfp_uqtoh \ +helper_vfp_toshh \ +helper_vfp_touhh \ +helper_vfp_toslh \ +helper_vfp_toulh \ +helper_vfp_tosqh \ +helper_vfp_touqh \ +helper_set_rmode \ +helper_set_neon_rmode \ +helper_vfp_fcvt_f16_to_f32 \ +helper_vfp_fcvt_f32_to_f16 \ +helper_vfp_fcvt_f16_to_f64 \ +helper_vfp_fcvt_f64_to_f16 \ +helper_recps_f32 \ +helper_rsqrts_f32 \ +helper_recpe_f16 \ +helper_recpe_f32 \ +helper_recpe_f64 \ +helper_rsqrte_f16 \ +helper_rsqrte_f32 \ +helper_rsqrte_f64 \ +helper_recpe_u32 \ +helper_rsqrte_u32 \ +helper_vfp_muladds \ +helper_vfp_muladdd \ +helper_rints_exact \ +helper_rintd_exact \ +helper_rints \ +helper_rintd \ +arm_rmode_to_sf \ +helper_fjcvtzs \ +helper_vjcvt \ +helper_frint32_s \ +helper_frint64_s \ +helper_frint32_d \ +helper_frint64_d \ +helper_check_hcr_el2_trap \ +arm_reg_reset \ +arm_reg_read \ +arm_reg_write \ +mla_op \ +mls_op \ +sshl_op \ +ushl_op \ +uqsub_op \ +sqsub_op \ +uqadd_op \ +sqadd_op \ +sli_op \ +cmtst_op \ +sri_op \ +usra_op \ +ssra_op \ +" + +armeb_SYMBOLS=${arm_SYMBOLS} + +aarch64_SYMBOLS=" +cpu_aarch64_init \ +arm_cpu_exec_interrupt \ +arm_cpu_update_virq \ +arm_cpu_update_vfiq \ +arm_cpu_initfn \ +gt_cntfrq_period_ns \ +arm_cpu_post_init \ +arm_cpu_realizefn \ +arm_cpu_class_init \ +cpu_arm_init \ +helper_crypto_aese \ +helper_crypto_aesmc \ +helper_crypto_sha1_3reg \ +helper_crypto_sha1h \ +helper_crypto_sha1su1 \ +helper_crypto_sha256h \ +helper_crypto_sha256h2 \ +helper_crypto_sha256su0 \ +helper_crypto_sha256su1 \ +helper_crypto_sha512h \ +helper_crypto_sha512h2 \ +helper_crypto_sha512su0 \ +helper_crypto_sha512su1 \ +helper_crypto_sm3partw1 \ +helper_crypto_sm3partw2 \ +helper_crypto_sm3tt \ +helper_crypto_sm4e \ +helper_crypto_sm4ekey \ +helper_check_breakpoints \ +arm_debug_check_watchpoint \ +arm_debug_excp_handler \ +arm_adjust_watchpoint_address \ +helper_udiv64 \ +helper_sdiv64 \ +helper_rbit64 \ +helper_msr_i_spsel \ +helper_msr_i_daifset \ +helper_msr_i_daifclear \ +helper_vfp_cmph_a64 \ +helper_vfp_cmpeh_a64 \ +helper_vfp_cmps_a64 \ +helper_vfp_cmpes_a64 \ +helper_vfp_cmpd_a64 \ +helper_vfp_cmped_a64 \ +helper_vfp_mulxs \ +helper_vfp_mulxd \ +helper_simd_tbl \ +helper_neon_ceq_f64 \ +helper_neon_cge_f64 \ +helper_neon_cgt_f64 \ +helper_recpsf_f16 \ +helper_recpsf_f32 \ +helper_recpsf_f64 \ +helper_rsqrtsf_f16 \ +helper_rsqrtsf_f32 \ +helper_rsqrtsf_f64 \ +helper_neon_addlp_s8 \ +helper_neon_addlp_u8 \ +helper_neon_addlp_s16 \ +helper_neon_addlp_u16 \ +helper_frecpx_f16 \ +helper_frecpx_f32 \ +helper_frecpx_f64 \ +helper_fcvtx_f64_to_f32 \ +helper_crc32_64 \ +helper_crc32c_64 \ +helper_paired_cmpxchg64_le \ +helper_paired_cmpxchg64_le_parallel \ +helper_paired_cmpxchg64_be \ +helper_paired_cmpxchg64_be_parallel \ +helper_casp_le_parallel \ +helper_casp_be_parallel \ +helper_advsimd_addh \ +helper_advsimd_subh \ +helper_advsimd_mulh \ +helper_advsimd_divh \ +helper_advsimd_minh \ +helper_advsimd_maxh \ +helper_advsimd_minnumh \ +helper_advsimd_maxnumh \ +helper_advsimd_add2h \ +helper_advsimd_sub2h \ +helper_advsimd_mul2h \ +helper_advsimd_div2h \ +helper_advsimd_min2h \ +helper_advsimd_max2h \ +helper_advsimd_minnum2h \ +helper_advsimd_maxnum2h \ +helper_advsimd_mulxh \ +helper_advsimd_mulx2h \ +helper_advsimd_muladdh \ +helper_advsimd_muladd2h \ +helper_advsimd_ceq_f16 \ +helper_advsimd_cge_f16 \ +helper_advsimd_cgt_f16 \ +helper_advsimd_acge_f16 \ +helper_advsimd_acgt_f16 \ +helper_advsimd_rinth_exact \ +helper_advsimd_rinth \ +helper_advsimd_f16tosinth \ +helper_advsimd_f16touinth \ +helper_exception_return \ +helper_sqrt_f16 \ +helper_dc_zva \ +read_raw_cp_reg \ +pmu_init \ +pmu_op_start \ +pmu_op_finish \ +pmu_pre_el_change \ +pmu_post_el_change \ +arm_pmu_timer_cb \ +arm_gt_ptimer_cb \ +arm_gt_vtimer_cb \ +arm_gt_htimer_cb \ +arm_gt_stimer_cb \ +arm_gt_hvtimer_cb \ +arm_hcr_el2_eff \ +sve_exception_el \ +sve_zcr_len_for_el \ +hw_watchpoint_update \ +hw_watchpoint_update_all \ +hw_breakpoint_update \ +hw_breakpoint_update_all \ +register_cp_regs_for_features \ +define_one_arm_cp_reg_with_opaque \ +define_arm_cp_regs_with_opaque \ +modify_arm_cp_regs \ +get_arm_cp_reginfo \ +arm_cp_write_ignore \ +arm_cp_read_zero \ +arm_cp_reset_ignore \ +cpsr_read \ +cpsr_write \ +helper_sxtb16 \ +helper_uxtb16 \ +helper_sdiv \ +helper_udiv \ +helper_rbit \ +arm_phys_excp_target_el \ +aarch64_sync_32_to_64 \ +aarch64_sync_64_to_32 \ +arm_cpu_do_interrupt \ +arm_sctlr \ +arm_s1_regime_using_lpae_format \ +aa64_va_parameters \ +v8m_security_lookup \ +pmsav8_mpu_lookup \ +get_phys_addr \ +arm_cpu_get_phys_page_attrs_debug \ +helper_qadd16 \ +helper_qadd8 \ +helper_qsub16 \ +helper_qsub8 \ +helper_qsubaddx \ +helper_qaddsubx \ +helper_uqadd16 \ +helper_uqadd8 \ +helper_uqsub16 \ +helper_uqsub8 \ +helper_uqsubaddx \ +helper_uqaddsubx \ +helper_sadd16 \ +helper_sadd8 \ +helper_ssub16 \ +helper_ssub8 \ +helper_ssubaddx \ +helper_saddsubx \ +helper_uadd16 \ +helper_uadd8 \ +helper_usub16 \ +helper_usub8 \ +helper_usubaddx \ +helper_uaddsubx \ +helper_shadd16 \ +helper_shadd8 \ +helper_shsub16 \ +helper_shsub8 \ +helper_shsubaddx \ +helper_shaddsubx \ +helper_uhadd16 \ +helper_uhadd8 \ +helper_uhsub16 \ +helper_uhsub8 \ +helper_uhsubaddx \ +helper_uhaddsubx \ +helper_usad8 \ +helper_sel_flags \ +helper_crc32 \ +helper_crc32c \ +fp_exception_el \ +arm_mmu_idx_to_el \ +arm_mmu_idx_el \ +arm_mmu_idx \ +arm_stage1_mmu_idx \ +arm_rebuild_hflags \ +helper_rebuild_hflags_m32_newel \ +helper_rebuild_hflags_m32 \ +helper_rebuild_hflags_a32_newel \ +helper_rebuild_hflags_a32 \ +helper_rebuild_hflags_a64 \ +cpu_get_tb_cpu_state \ +aarch64_sve_narrow_vq \ +aarch64_sve_change_el \ +helper_iwmmxt_maddsq \ +helper_iwmmxt_madduq \ +helper_iwmmxt_sadb \ +helper_iwmmxt_sadw \ +helper_iwmmxt_mulslw \ +helper_iwmmxt_mulshw \ +helper_iwmmxt_mululw \ +helper_iwmmxt_muluhw \ +helper_iwmmxt_macsw \ +helper_iwmmxt_macuw \ +helper_iwmmxt_unpacklb \ +helper_iwmmxt_unpacklw \ +helper_iwmmxt_unpackll \ +helper_iwmmxt_unpacklub \ +helper_iwmmxt_unpackluw \ +helper_iwmmxt_unpacklul \ +helper_iwmmxt_unpacklsb \ +helper_iwmmxt_unpacklsw \ +helper_iwmmxt_unpacklsl \ +helper_iwmmxt_unpackhb \ +helper_iwmmxt_unpackhw \ +helper_iwmmxt_unpackhl \ +helper_iwmmxt_unpackhub \ +helper_iwmmxt_unpackhuw \ +helper_iwmmxt_unpackhul \ +helper_iwmmxt_unpackhsb \ +helper_iwmmxt_unpackhsw \ +helper_iwmmxt_unpackhsl \ +helper_iwmmxt_cmpeqb \ +helper_iwmmxt_cmpeqw \ +helper_iwmmxt_cmpeql \ +helper_iwmmxt_cmpgtsb \ +helper_iwmmxt_cmpgtsw \ +helper_iwmmxt_cmpgtsl \ +helper_iwmmxt_cmpgtub \ +helper_iwmmxt_cmpgtuw \ +helper_iwmmxt_cmpgtul \ +helper_iwmmxt_minsb \ +helper_iwmmxt_minsw \ +helper_iwmmxt_minsl \ +helper_iwmmxt_minub \ +helper_iwmmxt_minuw \ +helper_iwmmxt_minul \ +helper_iwmmxt_maxsb \ +helper_iwmmxt_maxsw \ +helper_iwmmxt_maxsl \ +helper_iwmmxt_maxub \ +helper_iwmmxt_maxuw \ +helper_iwmmxt_maxul \ +helper_iwmmxt_subnb \ +helper_iwmmxt_subnw \ +helper_iwmmxt_subnl \ +helper_iwmmxt_addnb \ +helper_iwmmxt_addnw \ +helper_iwmmxt_addnl \ +helper_iwmmxt_subub \ +helper_iwmmxt_subuw \ +helper_iwmmxt_subul \ +helper_iwmmxt_addub \ +helper_iwmmxt_adduw \ +helper_iwmmxt_addul \ +helper_iwmmxt_subsb \ +helper_iwmmxt_subsw \ +helper_iwmmxt_subsl \ +helper_iwmmxt_addsb \ +helper_iwmmxt_addsw \ +helper_iwmmxt_addsl \ +helper_iwmmxt_avgb0 \ +helper_iwmmxt_avgb1 \ +helper_iwmmxt_avgw0 \ +helper_iwmmxt_avgw1 \ +helper_iwmmxt_align \ +helper_iwmmxt_insr \ +helper_iwmmxt_setpsr_nz \ +helper_iwmmxt_bcstb \ +helper_iwmmxt_bcstw \ +helper_iwmmxt_bcstl \ +helper_iwmmxt_addcb \ +helper_iwmmxt_addcw \ +helper_iwmmxt_addcl \ +helper_iwmmxt_msbb \ +helper_iwmmxt_msbw \ +helper_iwmmxt_msbl \ +helper_iwmmxt_srlw \ +helper_iwmmxt_srll \ +helper_iwmmxt_srlq \ +helper_iwmmxt_sllw \ +helper_iwmmxt_slll \ +helper_iwmmxt_sllq \ +helper_iwmmxt_sraw \ +helper_iwmmxt_sral \ +helper_iwmmxt_sraq \ +helper_iwmmxt_rorw \ +helper_iwmmxt_rorl \ +helper_iwmmxt_rorq \ +helper_iwmmxt_shufh \ +helper_iwmmxt_packuw \ +helper_iwmmxt_packul \ +helper_iwmmxt_packuq \ +helper_iwmmxt_packsw \ +helper_iwmmxt_packsl \ +helper_iwmmxt_packsq \ +helper_iwmmxt_muladdsl \ +helper_iwmmxt_muladdsw \ +helper_iwmmxt_muladdswl \ +armv7m_nvic_set_pending \ +helper_v7m_preserve_fp_state \ +write_v7m_exception \ +helper_v7m_bxns \ +helper_v7m_blxns \ +armv7m_nvic_neg_prio_requested \ +helper_v7m_vlstm \ +helper_v7m_vlldm \ +arm_v7m_cpu_do_interrupt \ +helper_v7m_mrs \ +helper_v7m_msr \ +helper_v7m_tt \ +arm_v7m_mmu_idx_all \ +arm_v7m_mmu_idx_for_secstate_and_priv \ +arm_v7m_mmu_idx_for_secstate \ +helper_neon_qadd_u8 \ +helper_neon_qadd_u16 \ +helper_neon_qadd_u32 \ +helper_neon_qadd_u64 \ +helper_neon_qadd_s8 \ +helper_neon_qadd_s16 \ +helper_neon_qadd_s32 \ +helper_neon_qadd_s64 \ +helper_neon_uqadd_s8 \ +helper_neon_uqadd_s16 \ +helper_neon_uqadd_s32 \ +helper_neon_uqadd_s64 \ +helper_neon_sqadd_u8 \ +helper_neon_sqadd_u16 \ +helper_neon_sqadd_u32 \ +helper_neon_sqadd_u64 \ +helper_neon_qsub_u8 \ +helper_neon_qsub_u16 \ +helper_neon_qsub_u32 \ +helper_neon_qsub_u64 \ +helper_neon_qsub_s8 \ +helper_neon_qsub_s16 \ +helper_neon_qsub_s32 \ +helper_neon_qsub_s64 \ +helper_neon_hadd_s8 \ +helper_neon_hadd_u8 \ +helper_neon_hadd_s16 \ +helper_neon_hadd_u16 \ +helper_neon_hadd_s32 \ +helper_neon_hadd_u32 \ +helper_neon_rhadd_s8 \ +helper_neon_rhadd_u8 \ +helper_neon_rhadd_s16 \ +helper_neon_rhadd_u16 \ +helper_neon_rhadd_s32 \ +helper_neon_rhadd_u32 \ +helper_neon_hsub_s8 \ +helper_neon_hsub_u8 \ +helper_neon_hsub_s16 \ +helper_neon_hsub_u16 \ +helper_neon_hsub_s32 \ +helper_neon_hsub_u32 \ +helper_neon_cgt_s8 \ +helper_neon_cgt_u8 \ +helper_neon_cgt_s16 \ +helper_neon_cgt_u16 \ +helper_neon_cgt_s32 \ +helper_neon_cgt_u32 \ +helper_neon_cge_s8 \ +helper_neon_cge_u8 \ +helper_neon_cge_s16 \ +helper_neon_cge_u16 \ +helper_neon_cge_s32 \ +helper_neon_cge_u32 \ +helper_neon_pmin_s8 \ +helper_neon_pmin_u8 \ +helper_neon_pmin_s16 \ +helper_neon_pmin_u16 \ +helper_neon_pmax_s8 \ +helper_neon_pmax_u8 \ +helper_neon_pmax_s16 \ +helper_neon_pmax_u16 \ +helper_neon_abd_s8 \ +helper_neon_abd_u8 \ +helper_neon_abd_s16 \ +helper_neon_abd_u16 \ +helper_neon_abd_s32 \ +helper_neon_abd_u32 \ +helper_neon_shl_u16 \ +helper_neon_shl_s16 \ +helper_neon_rshl_s8 \ +helper_neon_rshl_s16 \ +helper_neon_rshl_s32 \ +helper_neon_rshl_s64 \ +helper_neon_rshl_u8 \ +helper_neon_rshl_u16 \ +helper_neon_rshl_u32 \ +helper_neon_rshl_u64 \ +helper_neon_qshl_u8 \ +helper_neon_qshl_u16 \ +helper_neon_qshl_u32 \ +helper_neon_qshl_u64 \ +helper_neon_qshl_s8 \ +helper_neon_qshl_s16 \ +helper_neon_qshl_s32 \ +helper_neon_qshl_s64 \ +helper_neon_qshlu_s8 \ +helper_neon_qshlu_s16 \ +helper_neon_qshlu_s32 \ +helper_neon_qshlu_s64 \ +helper_neon_qrshl_u8 \ +helper_neon_qrshl_u16 \ +helper_neon_qrshl_u32 \ +helper_neon_qrshl_u64 \ +helper_neon_qrshl_s8 \ +helper_neon_qrshl_s16 \ +helper_neon_qrshl_s32 \ +helper_neon_qrshl_s64 \ +helper_neon_add_u8 \ +helper_neon_add_u16 \ +helper_neon_padd_u8 \ +helper_neon_padd_u16 \ +helper_neon_sub_u8 \ +helper_neon_sub_u16 \ +helper_neon_mul_u8 \ +helper_neon_mul_u16 \ +helper_neon_tst_u8 \ +helper_neon_tst_u16 \ +helper_neon_tst_u32 \ +helper_neon_ceq_u8 \ +helper_neon_ceq_u16 \ +helper_neon_ceq_u32 \ +helper_neon_clz_u8 \ +helper_neon_clz_u16 \ +helper_neon_cls_s8 \ +helper_neon_cls_s16 \ +helper_neon_cls_s32 \ +helper_neon_cnt_u8 \ +helper_neon_rbit_u8 \ +helper_neon_qdmulh_s16 \ +helper_neon_qrdmulh_s16 \ +helper_neon_qdmulh_s32 \ +helper_neon_qrdmulh_s32 \ +helper_neon_narrow_u8 \ +helper_neon_narrow_u16 \ +helper_neon_narrow_high_u8 \ +helper_neon_narrow_high_u16 \ +helper_neon_narrow_round_high_u8 \ +helper_neon_narrow_round_high_u16 \ +helper_neon_unarrow_sat8 \ +helper_neon_narrow_sat_u8 \ +helper_neon_narrow_sat_s8 \ +helper_neon_unarrow_sat16 \ +helper_neon_narrow_sat_u16 \ +helper_neon_narrow_sat_s16 \ +helper_neon_unarrow_sat32 \ +helper_neon_narrow_sat_u32 \ +helper_neon_narrow_sat_s32 \ +helper_neon_widen_u8 \ +helper_neon_widen_s8 \ +helper_neon_widen_u16 \ +helper_neon_widen_s16 \ +helper_neon_addl_u16 \ +helper_neon_addl_u32 \ +helper_neon_paddl_u16 \ +helper_neon_paddl_u32 \ +helper_neon_subl_u16 \ +helper_neon_subl_u32 \ +helper_neon_addl_saturate_s32 \ +helper_neon_addl_saturate_s64 \ +helper_neon_abdl_u16 \ +helper_neon_abdl_s16 \ +helper_neon_abdl_u32 \ +helper_neon_abdl_s32 \ +helper_neon_abdl_u64 \ +helper_neon_abdl_s64 \ +helper_neon_mull_u8 \ +helper_neon_mull_s8 \ +helper_neon_mull_u16 \ +helper_neon_mull_s16 \ +helper_neon_negl_u16 \ +helper_neon_negl_u32 \ +helper_neon_qabs_s8 \ +helper_neon_qneg_s8 \ +helper_neon_qabs_s16 \ +helper_neon_qneg_s16 \ +helper_neon_qabs_s32 \ +helper_neon_qneg_s32 \ +helper_neon_qabs_s64 \ +helper_neon_qneg_s64 \ +helper_neon_abd_f32 \ +helper_neon_ceq_f32 \ +helper_neon_cge_f32 \ +helper_neon_cgt_f32 \ +helper_neon_acge_f32 \ +helper_neon_acgt_f32 \ +helper_neon_acge_f64 \ +helper_neon_acgt_f64 \ +helper_neon_qunzip8 \ +helper_neon_qunzip16 \ +helper_neon_qunzip32 \ +helper_neon_unzip8 \ +helper_neon_unzip16 \ +helper_neon_qzip8 \ +helper_neon_qzip16 \ +helper_neon_qzip32 \ +helper_neon_zip8 \ +helper_neon_zip16 \ +raise_exception \ +raise_exception_ra \ +helper_neon_tbl \ +helper_v8m_stackcheck \ +helper_add_setq \ +helper_add_saturate \ +helper_sub_saturate \ +helper_add_usaturate \ +helper_sub_usaturate \ +helper_ssat \ +helper_ssat16 \ +helper_usat \ +helper_usat16 \ +helper_setend \ +helper_wfi \ +helper_wfe \ +helper_yield \ +helper_exception_internal \ +helper_exception_with_syndrome \ +helper_exception_bkpt_insn \ +helper_cpsr_read \ +helper_cpsr_write \ +helper_cpsr_write_eret \ +helper_get_user_reg \ +helper_set_user_reg \ +helper_set_r13_banked \ +helper_get_r13_banked \ +helper_msr_banked \ +helper_mrs_banked \ +helper_access_check_cp_reg \ +helper_set_cp_reg \ +helper_get_cp_reg \ +helper_set_cp_reg64 \ +helper_get_cp_reg64 \ +helper_pre_hvc \ +helper_pre_smc \ +helper_shl_cc \ +helper_shr_cc \ +helper_sar_cc \ +helper_ror_cc \ +helper_pacia \ +helper_pacib \ +helper_pacda \ +helper_pacdb \ +helper_pacga \ +helper_autia \ +helper_autib \ +helper_autda \ +helper_autdb \ +helper_xpaci \ +helper_xpacd \ +arm_is_psci_call \ +arm_handle_psci_call \ +helper_sve_predtest1 \ +helper_sve_predtest \ +helper_sve_and_pppp \ +helper_sve_bic_pppp \ +helper_sve_eor_pppp \ +helper_sve_sel_pppp \ +helper_sve_orr_pppp \ +helper_sve_orn_pppp \ +helper_sve_nor_pppp \ +helper_sve_nand_pppp \ +helper_sve_and_zpzz_b \ +helper_sve_and_zpzz_h \ +helper_sve_and_zpzz_s \ +helper_sve_and_zpzz_d \ +helper_sve_orr_zpzz_b \ +helper_sve_orr_zpzz_h \ +helper_sve_orr_zpzz_s \ +helper_sve_orr_zpzz_d \ +helper_sve_eor_zpzz_b \ +helper_sve_eor_zpzz_h \ +helper_sve_eor_zpzz_s \ +helper_sve_eor_zpzz_d \ +helper_sve_bic_zpzz_b \ +helper_sve_bic_zpzz_h \ +helper_sve_bic_zpzz_s \ +helper_sve_bic_zpzz_d \ +helper_sve_add_zpzz_b \ +helper_sve_add_zpzz_h \ +helper_sve_add_zpzz_s \ +helper_sve_add_zpzz_d \ +helper_sve_sub_zpzz_b \ +helper_sve_sub_zpzz_h \ +helper_sve_sub_zpzz_s \ +helper_sve_sub_zpzz_d \ +helper_sve_smax_zpzz_b \ +helper_sve_smax_zpzz_h \ +helper_sve_smax_zpzz_s \ +helper_sve_smax_zpzz_d \ +helper_sve_umax_zpzz_b \ +helper_sve_umax_zpzz_h \ +helper_sve_umax_zpzz_s \ +helper_sve_umax_zpzz_d \ +helper_sve_smin_zpzz_b \ +helper_sve_smin_zpzz_h \ +helper_sve_smin_zpzz_s \ +helper_sve_smin_zpzz_d \ +helper_sve_umin_zpzz_b \ +helper_sve_umin_zpzz_h \ +helper_sve_umin_zpzz_s \ +helper_sve_umin_zpzz_d \ +helper_sve_sabd_zpzz_b \ +helper_sve_sabd_zpzz_h \ +helper_sve_sabd_zpzz_s \ +helper_sve_sabd_zpzz_d \ +helper_sve_uabd_zpzz_b \ +helper_sve_uabd_zpzz_h \ +helper_sve_uabd_zpzz_s \ +helper_sve_uabd_zpzz_d \ +helper_sve_mul_zpzz_b \ +helper_sve_mul_zpzz_h \ +helper_sve_mul_zpzz_s \ +helper_sve_mul_zpzz_d \ +helper_sve_smulh_zpzz_b \ +helper_sve_smulh_zpzz_h \ +helper_sve_smulh_zpzz_s \ +helper_sve_smulh_zpzz_d \ +helper_sve_umulh_zpzz_b \ +helper_sve_umulh_zpzz_h \ +helper_sve_umulh_zpzz_s \ +helper_sve_umulh_zpzz_d \ +helper_sve_sdiv_zpzz_s \ +helper_sve_sdiv_zpzz_d \ +helper_sve_udiv_zpzz_s \ +helper_sve_udiv_zpzz_d \ +helper_sve_asr_zpzz_b \ +helper_sve_lsr_zpzz_b \ +helper_sve_lsl_zpzz_b \ +helper_sve_asr_zpzz_h \ +helper_sve_lsr_zpzz_h \ +helper_sve_lsl_zpzz_h \ +helper_sve_asr_zpzz_s \ +helper_sve_lsr_zpzz_s \ +helper_sve_lsl_zpzz_s \ +helper_sve_asr_zpzz_d \ +helper_sve_lsr_zpzz_d \ +helper_sve_lsl_zpzz_d \ +helper_sve_asr_zpzw_b \ +helper_sve_lsr_zpzw_b \ +helper_sve_lsl_zpzw_b \ +helper_sve_asr_zpzw_h \ +helper_sve_lsr_zpzw_h \ +helper_sve_lsl_zpzw_h \ +helper_sve_asr_zpzw_s \ +helper_sve_lsr_zpzw_s \ +helper_sve_lsl_zpzw_s \ +helper_sve_cls_b \ +helper_sve_cls_h \ +helper_sve_cls_s \ +helper_sve_cls_d \ +helper_sve_clz_b \ +helper_sve_clz_h \ +helper_sve_clz_s \ +helper_sve_clz_d \ +helper_sve_cnt_zpz_b \ +helper_sve_cnt_zpz_h \ +helper_sve_cnt_zpz_s \ +helper_sve_cnt_zpz_d \ +helper_sve_cnot_b \ +helper_sve_cnot_h \ +helper_sve_cnot_s \ +helper_sve_cnot_d \ +helper_sve_fabs_h \ +helper_sve_fabs_s \ +helper_sve_fabs_d \ +helper_sve_fneg_h \ +helper_sve_fneg_s \ +helper_sve_fneg_d \ +helper_sve_not_zpz_b \ +helper_sve_not_zpz_h \ +helper_sve_not_zpz_s \ +helper_sve_not_zpz_d \ +helper_sve_sxtb_h \ +helper_sve_sxtb_s \ +helper_sve_sxth_s \ +helper_sve_sxtb_d \ +helper_sve_sxth_d \ +helper_sve_sxtw_d \ +helper_sve_uxtb_h \ +helper_sve_uxtb_s \ +helper_sve_uxth_s \ +helper_sve_uxtb_d \ +helper_sve_uxth_d \ +helper_sve_uxtw_d \ +helper_sve_abs_b \ +helper_sve_abs_h \ +helper_sve_abs_s \ +helper_sve_abs_d \ +helper_sve_neg_b \ +helper_sve_neg_h \ +helper_sve_neg_s \ +helper_sve_neg_d \ +helper_sve_revb_h \ +helper_sve_revb_s \ +helper_sve_revb_d \ +helper_sve_revh_s \ +helper_sve_revh_d \ +helper_sve_revw_d \ +helper_sve_rbit_b \ +helper_sve_rbit_h \ +helper_sve_rbit_s \ +helper_sve_rbit_d \ +helper_sve_asr_zzw_b \ +helper_sve_lsr_zzw_b \ +helper_sve_lsl_zzw_b \ +helper_sve_asr_zzw_h \ +helper_sve_lsr_zzw_h \ +helper_sve_lsl_zzw_h \ +helper_sve_asr_zzw_s \ +helper_sve_lsr_zzw_s \ +helper_sve_lsl_zzw_s \ +helper_sve_orv_b \ +helper_sve_orv_h \ +helper_sve_orv_s \ +helper_sve_orv_d \ +helper_sve_eorv_b \ +helper_sve_eorv_h \ +helper_sve_eorv_s \ +helper_sve_eorv_d \ +helper_sve_andv_b \ +helper_sve_andv_h \ +helper_sve_andv_s \ +helper_sve_andv_d \ +helper_sve_saddv_b \ +helper_sve_saddv_h \ +helper_sve_saddv_s \ +helper_sve_uaddv_b \ +helper_sve_uaddv_h \ +helper_sve_uaddv_s \ +helper_sve_uaddv_d \ +helper_sve_smaxv_b \ +helper_sve_smaxv_h \ +helper_sve_smaxv_s \ +helper_sve_smaxv_d \ +helper_sve_umaxv_b \ +helper_sve_umaxv_h \ +helper_sve_umaxv_s \ +helper_sve_umaxv_d \ +helper_sve_sminv_b \ +helper_sve_sminv_h \ +helper_sve_sminv_s \ +helper_sve_sminv_d \ +helper_sve_uminv_b \ +helper_sve_uminv_h \ +helper_sve_uminv_s \ +helper_sve_uminv_d \ +helper_sve_subri_b \ +helper_sve_subri_h \ +helper_sve_subri_s \ +helper_sve_subri_d \ +helper_sve_smaxi_b \ +helper_sve_smaxi_h \ +helper_sve_smaxi_s \ +helper_sve_smaxi_d \ +helper_sve_smini_b \ +helper_sve_smini_h \ +helper_sve_smini_s \ +helper_sve_smini_d \ +helper_sve_umaxi_b \ +helper_sve_umaxi_h \ +helper_sve_umaxi_s \ +helper_sve_umaxi_d \ +helper_sve_umini_b \ +helper_sve_umini_h \ +helper_sve_umini_s \ +helper_sve_umini_d \ +helper_sve_pfirst \ +helper_sve_pnext \ +helper_sve_clr_b \ +helper_sve_clr_h \ +helper_sve_clr_s \ +helper_sve_clr_d \ +helper_sve_movz_b \ +helper_sve_movz_h \ +helper_sve_movz_s \ +helper_sve_movz_d \ +helper_sve_asr_zpzi_b \ +helper_sve_asr_zpzi_h \ +helper_sve_asr_zpzi_s \ +helper_sve_asr_zpzi_d \ +helper_sve_lsr_zpzi_b \ +helper_sve_lsr_zpzi_h \ +helper_sve_lsr_zpzi_s \ +helper_sve_lsr_zpzi_d \ +helper_sve_lsl_zpzi_b \ +helper_sve_lsl_zpzi_h \ +helper_sve_lsl_zpzi_s \ +helper_sve_lsl_zpzi_d \ +helper_sve_asrd_b \ +helper_sve_asrd_h \ +helper_sve_asrd_s \ +helper_sve_asrd_d \ +helper_sve_mla_b \ +helper_sve_mls_b \ +helper_sve_mla_h \ +helper_sve_mls_h \ +helper_sve_mla_s \ +helper_sve_mls_s \ +helper_sve_mla_d \ +helper_sve_mls_d \ +helper_sve_index_b \ +helper_sve_index_h \ +helper_sve_index_s \ +helper_sve_index_d \ +helper_sve_adr_p32 \ +helper_sve_adr_p64 \ +helper_sve_adr_s32 \ +helper_sve_adr_u32 \ +helper_sve_fexpa_h \ +helper_sve_fexpa_s \ +helper_sve_fexpa_d \ +helper_sve_ftssel_h \ +helper_sve_ftssel_s \ +helper_sve_ftssel_d \ +helper_sve_sqaddi_b \ +helper_sve_sqaddi_h \ +helper_sve_sqaddi_s \ +helper_sve_sqaddi_d \ +helper_sve_uqaddi_b \ +helper_sve_uqaddi_h \ +helper_sve_uqaddi_s \ +helper_sve_uqaddi_d \ +helper_sve_uqsubi_d \ +helper_sve_cpy_m_b \ +helper_sve_cpy_m_h \ +helper_sve_cpy_m_s \ +helper_sve_cpy_m_d \ +helper_sve_cpy_z_b \ +helper_sve_cpy_z_h \ +helper_sve_cpy_z_s \ +helper_sve_cpy_z_d \ +helper_sve_ext \ +helper_sve_insr_b \ +helper_sve_insr_h \ +helper_sve_insr_s \ +helper_sve_insr_d \ +helper_sve_rev_b \ +helper_sve_rev_h \ +helper_sve_rev_s \ +helper_sve_rev_d \ +helper_sve_tbl_b \ +helper_sve_tbl_h \ +helper_sve_tbl_s \ +helper_sve_tbl_d \ +helper_sve_sunpk_h \ +helper_sve_sunpk_s \ +helper_sve_sunpk_d \ +helper_sve_uunpk_h \ +helper_sve_uunpk_s \ +helper_sve_uunpk_d \ +helper_sve_zip_p \ +helper_sve_uzp_p \ +helper_sve_trn_p \ +helper_sve_rev_p \ +helper_sve_punpk_p \ +helper_sve_zip_b \ +helper_sve_zip_h \ +helper_sve_zip_s \ +helper_sve_zip_d \ +helper_sve_uzp_b \ +helper_sve_uzp_h \ +helper_sve_uzp_s \ +helper_sve_uzp_d \ +helper_sve_trn_b \ +helper_sve_trn_h \ +helper_sve_trn_s \ +helper_sve_trn_d \ +helper_sve_compact_s \ +helper_sve_compact_d \ +helper_sve_last_active_element \ +helper_sve_splice \ +helper_sve_sel_zpzz_b \ +helper_sve_sel_zpzz_h \ +helper_sve_sel_zpzz_s \ +helper_sve_sel_zpzz_d \ +helper_sve_cmpeq_ppzz_b \ +helper_sve_cmpeq_ppzz_h \ +helper_sve_cmpeq_ppzz_s \ +helper_sve_cmpeq_ppzz_d \ +helper_sve_cmpne_ppzz_b \ +helper_sve_cmpne_ppzz_h \ +helper_sve_cmpne_ppzz_s \ +helper_sve_cmpne_ppzz_d \ +helper_sve_cmpgt_ppzz_b \ +helper_sve_cmpgt_ppzz_h \ +helper_sve_cmpgt_ppzz_s \ +helper_sve_cmpgt_ppzz_d \ +helper_sve_cmpge_ppzz_b \ +helper_sve_cmpge_ppzz_h \ +helper_sve_cmpge_ppzz_s \ +helper_sve_cmpge_ppzz_d \ +helper_sve_cmphi_ppzz_b \ +helper_sve_cmphi_ppzz_h \ +helper_sve_cmphi_ppzz_s \ +helper_sve_cmphi_ppzz_d \ +helper_sve_cmphs_ppzz_b \ +helper_sve_cmphs_ppzz_h \ +helper_sve_cmphs_ppzz_s \ +helper_sve_cmphs_ppzz_d \ +helper_sve_cmpeq_ppzw_b \ +helper_sve_cmpeq_ppzw_h \ +helper_sve_cmpeq_ppzw_s \ +helper_sve_cmpne_ppzw_b \ +helper_sve_cmpne_ppzw_h \ +helper_sve_cmpne_ppzw_s \ +helper_sve_cmpgt_ppzw_b \ +helper_sve_cmpgt_ppzw_h \ +helper_sve_cmpgt_ppzw_s \ +helper_sve_cmpge_ppzw_b \ +helper_sve_cmpge_ppzw_h \ +helper_sve_cmpge_ppzw_s \ +helper_sve_cmphi_ppzw_b \ +helper_sve_cmphi_ppzw_h \ +helper_sve_cmphi_ppzw_s \ +helper_sve_cmphs_ppzw_b \ +helper_sve_cmphs_ppzw_h \ +helper_sve_cmphs_ppzw_s \ +helper_sve_cmplt_ppzw_b \ +helper_sve_cmplt_ppzw_h \ +helper_sve_cmplt_ppzw_s \ +helper_sve_cmple_ppzw_b \ +helper_sve_cmple_ppzw_h \ +helper_sve_cmple_ppzw_s \ +helper_sve_cmplo_ppzw_b \ +helper_sve_cmplo_ppzw_h \ +helper_sve_cmplo_ppzw_s \ +helper_sve_cmpls_ppzw_b \ +helper_sve_cmpls_ppzw_h \ +helper_sve_cmpls_ppzw_s \ +helper_sve_cmpeq_ppzi_b \ +helper_sve_cmpeq_ppzi_h \ +helper_sve_cmpeq_ppzi_s \ +helper_sve_cmpeq_ppzi_d \ +helper_sve_cmpne_ppzi_b \ +helper_sve_cmpne_ppzi_h \ +helper_sve_cmpne_ppzi_s \ +helper_sve_cmpne_ppzi_d \ +helper_sve_cmpgt_ppzi_b \ +helper_sve_cmpgt_ppzi_h \ +helper_sve_cmpgt_ppzi_s \ +helper_sve_cmpgt_ppzi_d \ +helper_sve_cmpge_ppzi_b \ +helper_sve_cmpge_ppzi_h \ +helper_sve_cmpge_ppzi_s \ +helper_sve_cmpge_ppzi_d \ +helper_sve_cmphi_ppzi_b \ +helper_sve_cmphi_ppzi_h \ +helper_sve_cmphi_ppzi_s \ +helper_sve_cmphi_ppzi_d \ +helper_sve_cmphs_ppzi_b \ +helper_sve_cmphs_ppzi_h \ +helper_sve_cmphs_ppzi_s \ +helper_sve_cmphs_ppzi_d \ +helper_sve_cmplt_ppzi_b \ +helper_sve_cmplt_ppzi_h \ +helper_sve_cmplt_ppzi_s \ +helper_sve_cmplt_ppzi_d \ +helper_sve_cmple_ppzi_b \ +helper_sve_cmple_ppzi_h \ +helper_sve_cmple_ppzi_s \ +helper_sve_cmple_ppzi_d \ +helper_sve_cmplo_ppzi_b \ +helper_sve_cmplo_ppzi_h \ +helper_sve_cmplo_ppzi_s \ +helper_sve_cmplo_ppzi_d \ +helper_sve_cmpls_ppzi_b \ +helper_sve_cmpls_ppzi_h \ +helper_sve_cmpls_ppzi_s \ +helper_sve_cmpls_ppzi_d \ +helper_sve_brkpa \ +helper_sve_brkpas \ +helper_sve_brkpb \ +helper_sve_brkpbs \ +helper_sve_brka_z \ +helper_sve_brkas_z \ +helper_sve_brkb_z \ +helper_sve_brkbs_z \ +helper_sve_brka_m \ +helper_sve_brkas_m \ +helper_sve_brkb_m \ +helper_sve_brkbs_m \ +helper_sve_brkn \ +helper_sve_brkns \ +helper_sve_cntp \ +helper_sve_while \ +helper_sve_faddv_h \ +helper_sve_faddv_s \ +helper_sve_faddv_d \ +helper_sve_fminnmv_h \ +helper_sve_fminnmv_s \ +helper_sve_fminnmv_d \ +helper_sve_fmaxnmv_h \ +helper_sve_fmaxnmv_s \ +helper_sve_fmaxnmv_d \ +helper_sve_fminv_h \ +helper_sve_fminv_s \ +helper_sve_fminv_d \ +helper_sve_fmaxv_h \ +helper_sve_fmaxv_s \ +helper_sve_fmaxv_d \ +helper_sve_fadda_h \ +helper_sve_fadda_s \ +helper_sve_fadda_d \ +helper_sve_fadd_h \ +helper_sve_fadd_s \ +helper_sve_fadd_d \ +helper_sve_fsub_h \ +helper_sve_fsub_s \ +helper_sve_fsub_d \ +helper_sve_fmul_h \ +helper_sve_fmul_s \ +helper_sve_fmul_d \ +helper_sve_fdiv_h \ +helper_sve_fdiv_s \ +helper_sve_fdiv_d \ +helper_sve_fmin_h \ +helper_sve_fmin_s \ +helper_sve_fmin_d \ +helper_sve_fmax_h \ +helper_sve_fmax_s \ +helper_sve_fmax_d \ +helper_sve_fminnum_h \ +helper_sve_fminnum_s \ +helper_sve_fminnum_d \ +helper_sve_fmaxnum_h \ +helper_sve_fmaxnum_s \ +helper_sve_fmaxnum_d \ +helper_sve_fabd_h \ +helper_sve_fabd_s \ +helper_sve_fabd_d \ +helper_sve_fscalbn_h \ +helper_sve_fscalbn_s \ +helper_sve_fscalbn_d \ +helper_sve_fmulx_h \ +helper_sve_fmulx_s \ +helper_sve_fmulx_d \ +helper_sve_fadds_h \ +helper_sve_fadds_s \ +helper_sve_fadds_d \ +helper_sve_fsubs_h \ +helper_sve_fsubs_s \ +helper_sve_fsubs_d \ +helper_sve_fmuls_h \ +helper_sve_fmuls_s \ +helper_sve_fmuls_d \ +helper_sve_fsubrs_h \ +helper_sve_fsubrs_s \ +helper_sve_fsubrs_d \ +helper_sve_fmaxnms_h \ +helper_sve_fmaxnms_s \ +helper_sve_fmaxnms_d \ +helper_sve_fminnms_h \ +helper_sve_fminnms_s \ +helper_sve_fminnms_d \ +helper_sve_fmaxs_h \ +helper_sve_fmaxs_s \ +helper_sve_fmaxs_d \ +helper_sve_fmins_h \ +helper_sve_fmins_s \ +helper_sve_fmins_d \ +helper_sve_fcvt_sh \ +helper_sve_fcvt_hs \ +helper_sve_fcvt_dh \ +helper_sve_fcvt_hd \ +helper_sve_fcvt_ds \ +helper_sve_fcvt_sd \ +helper_sve_fcvtzs_hh \ +helper_sve_fcvtzs_hs \ +helper_sve_fcvtzs_ss \ +helper_sve_fcvtzs_hd \ +helper_sve_fcvtzs_sd \ +helper_sve_fcvtzs_ds \ +helper_sve_fcvtzs_dd \ +helper_sve_fcvtzu_hh \ +helper_sve_fcvtzu_hs \ +helper_sve_fcvtzu_ss \ +helper_sve_fcvtzu_hd \ +helper_sve_fcvtzu_sd \ +helper_sve_fcvtzu_ds \ +helper_sve_fcvtzu_dd \ +helper_sve_frint_h \ +helper_sve_frint_s \ +helper_sve_frint_d \ +helper_sve_frintx_h \ +helper_sve_frintx_s \ +helper_sve_frintx_d \ +helper_sve_frecpx_h \ +helper_sve_frecpx_s \ +helper_sve_frecpx_d \ +helper_sve_fsqrt_h \ +helper_sve_fsqrt_s \ +helper_sve_fsqrt_d \ +helper_sve_scvt_hh \ +helper_sve_scvt_sh \ +helper_sve_scvt_ss \ +helper_sve_scvt_sd \ +helper_sve_scvt_dh \ +helper_sve_scvt_ds \ +helper_sve_scvt_dd \ +helper_sve_ucvt_hh \ +helper_sve_ucvt_sh \ +helper_sve_ucvt_ss \ +helper_sve_ucvt_sd \ +helper_sve_ucvt_dh \ +helper_sve_ucvt_ds \ +helper_sve_ucvt_dd \ +helper_sve_fmla_zpzzz_h \ +helper_sve_fmls_zpzzz_h \ +helper_sve_fnmla_zpzzz_h \ +helper_sve_fnmls_zpzzz_h \ +helper_sve_fmla_zpzzz_s \ +helper_sve_fmls_zpzzz_s \ +helper_sve_fnmla_zpzzz_s \ +helper_sve_fnmls_zpzzz_s \ +helper_sve_fmla_zpzzz_d \ +helper_sve_fmls_zpzzz_d \ +helper_sve_fnmla_zpzzz_d \ +helper_sve_fnmls_zpzzz_d \ +helper_sve_fcmge_h \ +helper_sve_fcmge_s \ +helper_sve_fcmge_d \ +helper_sve_fcmgt_h \ +helper_sve_fcmgt_s \ +helper_sve_fcmgt_d \ +helper_sve_fcmeq_h \ +helper_sve_fcmeq_s \ +helper_sve_fcmeq_d \ +helper_sve_fcmne_h \ +helper_sve_fcmne_s \ +helper_sve_fcmne_d \ +helper_sve_fcmuo_h \ +helper_sve_fcmuo_s \ +helper_sve_fcmuo_d \ +helper_sve_facge_h \ +helper_sve_facge_s \ +helper_sve_facge_d \ +helper_sve_facgt_h \ +helper_sve_facgt_s \ +helper_sve_facgt_d \ +helper_sve_fcmge0_h \ +helper_sve_fcmge0_s \ +helper_sve_fcmge0_d \ +helper_sve_fcmgt0_h \ +helper_sve_fcmgt0_s \ +helper_sve_fcmgt0_d \ +helper_sve_fcmle0_h \ +helper_sve_fcmle0_s \ +helper_sve_fcmle0_d \ +helper_sve_fcmlt0_h \ +helper_sve_fcmlt0_s \ +helper_sve_fcmlt0_d \ +helper_sve_fcmeq0_h \ +helper_sve_fcmeq0_s \ +helper_sve_fcmeq0_d \ +helper_sve_fcmne0_h \ +helper_sve_fcmne0_s \ +helper_sve_fcmne0_d \ +helper_sve_ftmad_h \ +helper_sve_ftmad_s \ +helper_sve_ftmad_d \ +helper_sve_fcadd_h \ +helper_sve_fcadd_s \ +helper_sve_fcadd_d \ +helper_sve_fcmla_zpzzz_h \ +helper_sve_fcmla_zpzzz_s \ +helper_sve_fcmla_zpzzz_d \ +helper_sve_ld1bb_r \ +helper_sve_ld1bhu_r \ +helper_sve_ld1bhs_r \ +helper_sve_ld1bsu_r \ +helper_sve_ld1bss_r \ +helper_sve_ld1bdu_r \ +helper_sve_ld1bds_r \ +helper_sve_ld1hh_le_r \ +helper_sve_ld1hh_be_r \ +helper_sve_ld1hsu_le_r \ +helper_sve_ld1hsu_be_r \ +helper_sve_ld1hss_le_r \ +helper_sve_ld1hss_be_r \ +helper_sve_ld1hdu_le_r \ +helper_sve_ld1hdu_be_r \ +helper_sve_ld1hds_le_r \ +helper_sve_ld1hds_be_r \ +helper_sve_ld1ss_le_r \ +helper_sve_ld1ss_be_r \ +helper_sve_ld1sdu_le_r \ +helper_sve_ld1sdu_be_r \ +helper_sve_ld1sds_le_r \ +helper_sve_ld1sds_be_r \ +helper_sve_ld1dd_le_r \ +helper_sve_ld1dd_be_r \ +helper_sve_ld2bb_r \ +helper_sve_ld3bb_r \ +helper_sve_ld4bb_r \ +helper_sve_ld2hh_le_r \ +helper_sve_ld2hh_be_r \ +helper_sve_ld3hh_le_r \ +helper_sve_ld3hh_be_r \ +helper_sve_ld4hh_le_r \ +helper_sve_ld4hh_be_r \ +helper_sve_ld2ss_le_r \ +helper_sve_ld2ss_be_r \ +helper_sve_ld3ss_le_r \ +helper_sve_ld3ss_be_r \ +helper_sve_ld4ss_le_r \ +helper_sve_ld4ss_be_r \ +helper_sve_ld2dd_le_r \ +helper_sve_ld2dd_be_r \ +helper_sve_ld3dd_le_r \ +helper_sve_ld3dd_be_r \ +helper_sve_ld4dd_le_r \ +helper_sve_ld4dd_be_r \ +helper_sve_ldff1bb_r \ +helper_sve_ldnf1bb_r \ +helper_sve_ldff1bhu_r \ +helper_sve_ldnf1bhu_r \ +helper_sve_ldff1bhs_r \ +helper_sve_ldnf1bhs_r \ +helper_sve_ldff1bsu_r \ +helper_sve_ldnf1bsu_r \ +helper_sve_ldff1bss_r \ +helper_sve_ldnf1bss_r \ +helper_sve_ldff1bdu_r \ +helper_sve_ldnf1bdu_r \ +helper_sve_ldff1bds_r \ +helper_sve_ldnf1bds_r \ +helper_sve_ldff1hh_le_r \ +helper_sve_ldnf1hh_le_r \ +helper_sve_ldff1hh_be_r \ +helper_sve_ldnf1hh_be_r \ +helper_sve_ldff1hsu_le_r \ +helper_sve_ldnf1hsu_le_r \ +helper_sve_ldff1hsu_be_r \ +helper_sve_ldnf1hsu_be_r \ +helper_sve_ldff1hss_le_r \ +helper_sve_ldnf1hss_le_r \ +helper_sve_ldff1hss_be_r \ +helper_sve_ldnf1hss_be_r \ +helper_sve_ldff1hdu_le_r \ +helper_sve_ldnf1hdu_le_r \ +helper_sve_ldff1hdu_be_r \ +helper_sve_ldnf1hdu_be_r \ +helper_sve_ldff1hds_le_r \ +helper_sve_ldnf1hds_le_r \ +helper_sve_ldff1hds_be_r \ +helper_sve_ldnf1hds_be_r \ +helper_sve_ldff1ss_le_r \ +helper_sve_ldnf1ss_le_r \ +helper_sve_ldff1ss_be_r \ +helper_sve_ldnf1ss_be_r \ +helper_sve_ldff1sdu_le_r \ +helper_sve_ldnf1sdu_le_r \ +helper_sve_ldff1sdu_be_r \ +helper_sve_ldnf1sdu_be_r \ +helper_sve_ldff1sds_le_r \ +helper_sve_ldnf1sds_le_r \ +helper_sve_ldff1sds_be_r \ +helper_sve_ldnf1sds_be_r \ +helper_sve_ldff1dd_le_r \ +helper_sve_ldnf1dd_le_r \ +helper_sve_ldff1dd_be_r \ +helper_sve_ldnf1dd_be_r \ +helper_sve_st1bb_r \ +helper_sve_st1bh_r \ +helper_sve_st1bs_r \ +helper_sve_st1bd_r \ +helper_sve_st2bb_r \ +helper_sve_st3bb_r \ +helper_sve_st4bb_r \ +helper_sve_st1hh_le_r \ +helper_sve_st1hh_be_r \ +helper_sve_st1hs_le_r \ +helper_sve_st1hs_be_r \ +helper_sve_st1hd_le_r \ +helper_sve_st1hd_be_r \ +helper_sve_st2hh_le_r \ +helper_sve_st2hh_be_r \ +helper_sve_st3hh_le_r \ +helper_sve_st3hh_be_r \ +helper_sve_st4hh_le_r \ +helper_sve_st4hh_be_r \ +helper_sve_st1ss_le_r \ +helper_sve_st1ss_be_r \ +helper_sve_st1sd_le_r \ +helper_sve_st1sd_be_r \ +helper_sve_st2ss_le_r \ +helper_sve_st2ss_be_r \ +helper_sve_st3ss_le_r \ +helper_sve_st3ss_be_r \ +helper_sve_st4ss_le_r \ +helper_sve_st4ss_be_r \ +helper_sve_st1dd_le_r \ +helper_sve_st1dd_be_r \ +helper_sve_st2dd_le_r \ +helper_sve_st2dd_be_r \ +helper_sve_st3dd_le_r \ +helper_sve_st3dd_be_r \ +helper_sve_st4dd_le_r \ +helper_sve_st4dd_be_r \ +helper_sve_ldbsu_zsu \ +helper_sve_ldbsu_zss \ +helper_sve_ldbdu_zsu \ +helper_sve_ldbdu_zss \ +helper_sve_ldbdu_zd \ +helper_sve_ldbss_zsu \ +helper_sve_ldbss_zss \ +helper_sve_ldbds_zsu \ +helper_sve_ldbds_zss \ +helper_sve_ldbds_zd \ +helper_sve_ldhsu_le_zsu \ +helper_sve_ldhsu_le_zss \ +helper_sve_ldhdu_le_zsu \ +helper_sve_ldhdu_le_zss \ +helper_sve_ldhdu_le_zd \ +helper_sve_ldhsu_be_zsu \ +helper_sve_ldhsu_be_zss \ +helper_sve_ldhdu_be_zsu \ +helper_sve_ldhdu_be_zss \ +helper_sve_ldhdu_be_zd \ +helper_sve_ldhss_le_zsu \ +helper_sve_ldhss_le_zss \ +helper_sve_ldhds_le_zsu \ +helper_sve_ldhds_le_zss \ +helper_sve_ldhds_le_zd \ +helper_sve_ldhss_be_zsu \ +helper_sve_ldhss_be_zss \ +helper_sve_ldhds_be_zsu \ +helper_sve_ldhds_be_zss \ +helper_sve_ldhds_be_zd \ +helper_sve_ldss_le_zsu \ +helper_sve_ldss_le_zss \ +helper_sve_ldsdu_le_zsu \ +helper_sve_ldsdu_le_zss \ +helper_sve_ldsdu_le_zd \ +helper_sve_ldss_be_zsu \ +helper_sve_ldss_be_zss \ +helper_sve_ldsdu_be_zsu \ +helper_sve_ldsdu_be_zss \ +helper_sve_ldsdu_be_zd \ +helper_sve_ldsds_le_zsu \ +helper_sve_ldsds_le_zss \ +helper_sve_ldsds_le_zd \ +helper_sve_ldsds_be_zsu \ +helper_sve_ldsds_be_zss \ +helper_sve_ldsds_be_zd \ +helper_sve_lddd_le_zsu \ +helper_sve_lddd_le_zss \ +helper_sve_lddd_le_zd \ +helper_sve_lddd_be_zsu \ +helper_sve_lddd_be_zss \ +helper_sve_lddd_be_zd \ +helper_sve_ldffbsu_zsu \ +helper_sve_ldffbsu_zss \ +helper_sve_ldffbdu_zsu \ +helper_sve_ldffbdu_zss \ +helper_sve_ldffbdu_zd \ +helper_sve_ldffbss_zsu \ +helper_sve_ldffbss_zss \ +helper_sve_ldffbds_zsu \ +helper_sve_ldffbds_zss \ +helper_sve_ldffbds_zd \ +helper_sve_ldffhsu_le_zsu \ +helper_sve_ldffhsu_le_zss \ +helper_sve_ldffhdu_le_zsu \ +helper_sve_ldffhdu_le_zss \ +helper_sve_ldffhdu_le_zd \ +helper_sve_ldffhsu_be_zsu \ +helper_sve_ldffhsu_be_zss \ +helper_sve_ldffhdu_be_zsu \ +helper_sve_ldffhdu_be_zss \ +helper_sve_ldffhdu_be_zd \ +helper_sve_ldffhss_le_zsu \ +helper_sve_ldffhss_le_zss \ +helper_sve_ldffhds_le_zsu \ +helper_sve_ldffhds_le_zss \ +helper_sve_ldffhds_le_zd \ +helper_sve_ldffhss_be_zsu \ +helper_sve_ldffhss_be_zss \ +helper_sve_ldffhds_be_zsu \ +helper_sve_ldffhds_be_zss \ +helper_sve_ldffhds_be_zd \ +helper_sve_ldffss_le_zsu \ +helper_sve_ldffss_le_zss \ +helper_sve_ldffsdu_le_zsu \ +helper_sve_ldffsdu_le_zss \ +helper_sve_ldffsdu_le_zd \ +helper_sve_ldffss_be_zsu \ +helper_sve_ldffss_be_zss \ +helper_sve_ldffsdu_be_zsu \ +helper_sve_ldffsdu_be_zss \ +helper_sve_ldffsdu_be_zd \ +helper_sve_ldffsds_le_zsu \ +helper_sve_ldffsds_le_zss \ +helper_sve_ldffsds_le_zd \ +helper_sve_ldffsds_be_zsu \ +helper_sve_ldffsds_be_zss \ +helper_sve_ldffsds_be_zd \ +helper_sve_ldffdd_le_zsu \ +helper_sve_ldffdd_le_zss \ +helper_sve_ldffdd_le_zd \ +helper_sve_ldffdd_be_zsu \ +helper_sve_ldffdd_be_zss \ +helper_sve_ldffdd_be_zd \ +helper_sve_stbs_zsu \ +helper_sve_sths_le_zsu \ +helper_sve_sths_be_zsu \ +helper_sve_stss_le_zsu \ +helper_sve_stss_be_zsu \ +helper_sve_stbs_zss \ +helper_sve_sths_le_zss \ +helper_sve_sths_be_zss \ +helper_sve_stss_le_zss \ +helper_sve_stss_be_zss \ +helper_sve_stbd_zsu \ +helper_sve_sthd_le_zsu \ +helper_sve_sthd_be_zsu \ +helper_sve_stsd_le_zsu \ +helper_sve_stsd_be_zsu \ +helper_sve_stdd_le_zsu \ +helper_sve_stdd_be_zsu \ +helper_sve_stbd_zss \ +helper_sve_sthd_le_zss \ +helper_sve_sthd_be_zss \ +helper_sve_stsd_le_zss \ +helper_sve_stsd_be_zss \ +helper_sve_stdd_le_zss \ +helper_sve_stdd_be_zss \ +helper_sve_stbd_zd \ +helper_sve_sthd_le_zd \ +helper_sve_sthd_be_zd \ +helper_sve_stsd_le_zd \ +helper_sve_stsd_be_zd \ +helper_sve_stdd_le_zd \ +helper_sve_stdd_be_zd \ +arm_cpu_do_unaligned_access \ +arm_cpu_do_transaction_failed \ +arm_cpu_tlb_fill \ +a64_translate_init \ +gen_a64_set_pc_im \ +unallocated_encoding \ +new_tmp_a64 \ +new_tmp_a64_zero \ +cpu_reg \ +cpu_reg_sp \ +read_cpu_reg \ +read_cpu_reg_sp \ +write_fp_dreg \ +get_fpstatus_ptr \ +sve_access_check \ +logic_imm_decode_wmask \ +arm_translate_init \ +arm_test_cc \ +arm_free_cc \ +arm_jump_cc \ +arm_gen_test_cc \ +vfp_expand_imm \ +gen_cmtst_i64 \ +gen_ushl_i32 \ +gen_ushl_i64 \ +gen_sshl_i32 \ +gen_sshl_i64 \ +gen_intermediate_code \ +restore_state_to_opc \ +disas_sve \ +helper_neon_qrdmlah_s16 \ +helper_gvec_qrdmlah_s16 \ +helper_neon_qrdmlsh_s16 \ +helper_gvec_qrdmlsh_s16 \ +helper_neon_qrdmlah_s32 \ +helper_gvec_qrdmlah_s32 \ +helper_neon_qrdmlsh_s32 \ +helper_gvec_qrdmlsh_s32 \ +helper_gvec_sdot_b \ +helper_gvec_udot_b \ +helper_gvec_sdot_h \ +helper_gvec_udot_h \ +helper_gvec_sdot_idx_b \ +helper_gvec_udot_idx_b \ +helper_gvec_sdot_idx_h \ +helper_gvec_udot_idx_h \ +helper_gvec_fcaddh \ +helper_gvec_fcadds \ +helper_gvec_fcaddd \ +helper_gvec_fcmlah \ +helper_gvec_fcmlah_idx \ +helper_gvec_fcmlas \ +helper_gvec_fcmlas_idx \ +helper_gvec_fcmlad \ +helper_gvec_frecpe_h \ +helper_gvec_frecpe_s \ +helper_gvec_frecpe_d \ +helper_gvec_frsqrte_h \ +helper_gvec_frsqrte_s \ +helper_gvec_frsqrte_d \ +helper_gvec_fadd_h \ +helper_gvec_fadd_s \ +helper_gvec_fadd_d \ +helper_gvec_fsub_h \ +helper_gvec_fsub_s \ +helper_gvec_fsub_d \ +helper_gvec_fmul_h \ +helper_gvec_fmul_s \ +helper_gvec_fmul_d \ +helper_gvec_ftsmul_h \ +helper_gvec_ftsmul_s \ +helper_gvec_ftsmul_d \ +helper_gvec_recps_h \ +helper_gvec_recps_s \ +helper_gvec_recps_d \ +helper_gvec_rsqrts_h \ +helper_gvec_rsqrts_s \ +helper_gvec_rsqrts_d \ +helper_gvec_fmul_idx_h \ +helper_gvec_fmul_idx_s \ +helper_gvec_fmul_idx_d \ +helper_gvec_fmla_idx_h \ +helper_gvec_fmla_idx_s \ +helper_gvec_fmla_idx_d \ +helper_gvec_uqadd_b \ +helper_gvec_uqadd_h \ +helper_gvec_uqadd_s \ +helper_gvec_sqadd_b \ +helper_gvec_sqadd_h \ +helper_gvec_sqadd_s \ +helper_gvec_uqsub_b \ +helper_gvec_uqsub_h \ +helper_gvec_uqsub_s \ +helper_gvec_sqsub_b \ +helper_gvec_sqsub_h \ +helper_gvec_sqsub_s \ +helper_gvec_uqadd_d \ +helper_gvec_uqsub_d \ +helper_gvec_sqadd_d \ +helper_gvec_sqsub_d \ +helper_gvec_fmlal_a32 \ +helper_gvec_fmlal_a64 \ +helper_gvec_fmlal_idx_a32 \ +helper_gvec_fmlal_idx_a64 \ +helper_gvec_sshl_b \ +helper_gvec_sshl_h \ +helper_gvec_ushl_b \ +helper_gvec_ushl_h \ +helper_gvec_pmul_b \ +helper_gvec_pmull_q \ +helper_neon_pmull_h \ +helper_sve2_pmull_h \ +helper_vfp_get_fpscr \ +vfp_get_fpscr \ +helper_vfp_set_fpscr \ +vfp_set_fpscr \ +helper_vfp_adds \ +helper_vfp_addd \ +helper_vfp_subs \ +helper_vfp_subd \ +helper_vfp_muls \ +helper_vfp_muld \ +helper_vfp_divs \ +helper_vfp_divd \ +helper_vfp_mins \ +helper_vfp_mind \ +helper_vfp_maxs \ +helper_vfp_maxd \ +helper_vfp_minnums \ +helper_vfp_minnumd \ +helper_vfp_maxnums \ +helper_vfp_maxnumd \ +helper_vfp_negs \ +helper_vfp_negd \ +helper_vfp_abss \ +helper_vfp_absd \ +helper_vfp_sqrts \ +helper_vfp_sqrtd \ +helper_vfp_cmps \ +helper_vfp_cmpes \ +helper_vfp_cmpd \ +helper_vfp_cmped \ +helper_vfp_sitoh \ +helper_vfp_tosih \ +helper_vfp_tosizh \ +helper_vfp_sitos \ +helper_vfp_tosis \ +helper_vfp_tosizs \ +helper_vfp_sitod \ +helper_vfp_tosid \ +helper_vfp_tosizd \ +helper_vfp_uitoh \ +helper_vfp_touih \ +helper_vfp_touizh \ +helper_vfp_uitos \ +helper_vfp_touis \ +helper_vfp_touizs \ +helper_vfp_uitod \ +helper_vfp_touid \ +helper_vfp_touizd \ +helper_vfp_fcvtds \ +helper_vfp_fcvtsd \ +helper_vfp_shtod \ +helper_vfp_toshd_round_to_zero \ +helper_vfp_toshd \ +helper_vfp_sltod \ +helper_vfp_tosld_round_to_zero \ +helper_vfp_tosld \ +helper_vfp_sqtod \ +helper_vfp_tosqd \ +helper_vfp_uhtod \ +helper_vfp_touhd_round_to_zero \ +helper_vfp_touhd \ +helper_vfp_ultod \ +helper_vfp_tould_round_to_zero \ +helper_vfp_tould \ +helper_vfp_uqtod \ +helper_vfp_touqd \ +helper_vfp_shtos \ +helper_vfp_toshs_round_to_zero \ +helper_vfp_toshs \ +helper_vfp_sltos \ +helper_vfp_tosls_round_to_zero \ +helper_vfp_tosls \ +helper_vfp_sqtos \ +helper_vfp_tosqs \ +helper_vfp_uhtos \ +helper_vfp_touhs_round_to_zero \ +helper_vfp_touhs \ +helper_vfp_ultos \ +helper_vfp_touls_round_to_zero \ +helper_vfp_touls \ +helper_vfp_uqtos \ +helper_vfp_touqs \ +helper_vfp_sltoh \ +helper_vfp_ultoh \ +helper_vfp_sqtoh \ +helper_vfp_uqtoh \ +helper_vfp_toshh \ +helper_vfp_touhh \ +helper_vfp_toslh \ +helper_vfp_toulh \ +helper_vfp_tosqh \ +helper_vfp_touqh \ +helper_set_rmode \ +helper_set_neon_rmode \ +helper_vfp_fcvt_f16_to_f32 \ +helper_vfp_fcvt_f32_to_f16 \ +helper_vfp_fcvt_f16_to_f64 \ +helper_vfp_fcvt_f64_to_f16 \ +helper_recps_f32 \ +helper_rsqrts_f32 \ +helper_recpe_f16 \ +helper_recpe_f32 \ +helper_recpe_f64 \ +helper_rsqrte_f16 \ +helper_rsqrte_f32 \ +helper_rsqrte_f64 \ +helper_recpe_u32 \ +helper_rsqrte_u32 \ +helper_vfp_muladds \ +helper_vfp_muladdd \ +helper_rints_exact \ +helper_rintd_exact \ +helper_rints \ +helper_rintd \ +arm_rmode_to_sf \ +helper_fjcvtzs \ +helper_vjcvt \ +helper_frint32_s \ +helper_frint64_s \ +helper_frint32_d \ +helper_frint64_d \ +helper_check_hcr_el2_trap \ +arm64_reg_reset \ +arm64_reg_read \ +arm64_reg_write \ +mla_op \ +mls_op \ +sshl_op \ +ushl_op \ +uqsub_op \ +sqsub_op \ +uqadd_op \ +sqadd_op \ +sli_op \ +cmtst_op \ +sri_op \ +usra_op \ +ssra_op \ +aarch64_translator_ops \ +pred_esz_masks \ +" + +aarch64eb_SYMBOLS=${aarch64_SYMBOLS} + +riscv32_SYMBOLS=" +riscv_cpu_mmu_index \ +riscv_cpu_exec_interrupt \ +riscv_cpu_fp_enabled \ +riscv_cpu_swap_hypervisor_regs \ +riscv_cpu_virt_enabled \ +riscv_cpu_set_virt_enabled \ +riscv_cpu_force_hs_excep_enabled \ +riscv_cpu_set_force_hs_excep \ +riscv_cpu_claim_interrupts \ +riscv_cpu_update_mip \ +riscv_cpu_set_rdtime_fn \ +riscv_cpu_set_mode \ +riscv_cpu_get_phys_page_debug \ +riscv_cpu_do_transaction_failed \ +riscv_cpu_do_unaligned_access \ +riscv_cpu_tlb_fill \ +riscv_cpu_do_interrupt \ +riscv_get_csr_ops \ +riscv_set_csr_ops \ +riscv_csrrw \ +riscv_csrrw_debug \ +riscv_cpu_get_fflags \ +riscv_cpu_set_fflags \ +helper_set_rounding_mode \ +helper_fmadd_s \ +helper_fmadd_d \ +helper_fmsub_s \ +helper_fmsub_d \ +helper_fnmsub_s \ +helper_fnmsub_d \ +helper_fnmadd_s \ +helper_fnmadd_d \ +helper_fadd_s \ +helper_fsub_s \ +helper_fmul_s \ +helper_fdiv_s \ +helper_fmin_s \ +helper_fmax_s \ +helper_fsqrt_s \ +helper_fle_s \ +helper_flt_s \ +helper_feq_s \ +helper_fcvt_w_s \ +helper_fcvt_wu_s \ +helper_fcvt_s_w \ +helper_fcvt_s_wu \ +helper_fclass_s \ +helper_fadd_d \ +helper_fsub_d \ +helper_fmul_d \ +helper_fdiv_d \ +helper_fmin_d \ +helper_fmax_d \ +helper_fcvt_s_d \ +helper_fcvt_d_s \ +helper_fsqrt_d \ +helper_fle_d \ +helper_flt_d \ +helper_feq_d \ +helper_fcvt_w_d \ +helper_fcvt_wu_d \ +helper_fcvt_d_w \ +helper_fcvt_d_wu \ +helper_fclass_d \ +riscv_raise_exception \ +helper_raise_exception \ +helper_uc_riscv_exit \ +helper_csrrw \ +helper_csrrs \ +helper_csrrc \ +helper_sret \ +helper_mret \ +helper_wfi \ +helper_tlb_flush \ +pmp_hart_has_privs \ +pmpcfg_csr_write \ +pmpcfg_csr_read \ +pmpaddr_csr_write \ +pmpaddr_csr_read \ +gen_intermediate_code \ +riscv_translate_init \ +restore_state_to_opc \ +cpu_riscv_init \ +riscv_reg_reset \ +riscv_reg_read \ +riscv_reg_write \ +helper_fcvt_l_s \ +helper_fcvt_lu_s \ +helper_fcvt_s_l \ +helper_fcvt_s_lu \ +helper_fcvt_l_d \ +helper_fcvt_lu_d \ +helper_fcvt_d_l \ +helper_fcvt_d_lu \ +gen_helper_tlb_flush \ +riscv_fpr_regnames \ +riscv_int_regnames \ +" + +riscv64_SYMBOLS=${riscv32_SYMBOLS} + +mips_SYMBOLS=" +helper_mfc0_mvpcontrol \ +helper_mfc0_mvpconf0 \ +helper_mfc0_mvpconf1 \ +helper_mfc0_random \ +helper_mfc0_tcstatus \ +helper_mftc0_tcstatus \ +helper_mfc0_tcbind \ +helper_mftc0_tcbind \ +helper_mfc0_tcrestart \ +helper_mftc0_tcrestart \ +helper_mfc0_tchalt \ +helper_mftc0_tchalt \ +helper_mfc0_tccontext \ +helper_mftc0_tccontext \ +helper_mfc0_tcschedule \ +helper_mftc0_tcschedule \ +helper_mfc0_tcschefback \ +helper_mftc0_tcschefback \ +helper_mfc0_count \ +helper_mfc0_saar \ +helper_mfhc0_saar \ +helper_mftc0_entryhi \ +helper_mftc0_cause \ +helper_mftc0_status \ +helper_mfc0_lladdr \ +helper_mfc0_maar \ +helper_mfhc0_maar \ +helper_mfc0_watchlo \ +helper_mfc0_watchhi \ +helper_mfhc0_watchhi \ +helper_mfc0_debug \ +helper_mftc0_debug \ +helper_dmfc0_tcrestart \ +helper_dmfc0_tchalt \ +helper_dmfc0_tccontext \ +helper_dmfc0_tcschedule \ +helper_dmfc0_tcschefback \ +helper_dmfc0_lladdr \ +helper_dmfc0_maar \ +helper_dmfc0_watchlo \ +helper_dmfc0_watchhi \ +helper_dmfc0_saar \ +helper_mtc0_index \ +helper_mtc0_mvpcontrol \ +helper_mtc0_vpecontrol \ +helper_mttc0_vpecontrol \ +helper_mftc0_vpecontrol \ +helper_mftc0_vpeconf0 \ +helper_mtc0_vpeconf0 \ +helper_mttc0_vpeconf0 \ +helper_mtc0_vpeconf1 \ +helper_mtc0_yqmask \ +helper_mtc0_vpeopt \ +helper_mtc0_entrylo0 \ +helper_dmtc0_entrylo0 \ +helper_mtc0_tcstatus \ +helper_mttc0_tcstatus \ +helper_mtc0_tcbind \ +helper_mttc0_tcbind \ +helper_mtc0_tcrestart \ +helper_mttc0_tcrestart \ +helper_mtc0_tchalt \ +helper_mttc0_tchalt \ +helper_mtc0_tccontext \ +helper_mttc0_tccontext \ +helper_mtc0_tcschedule \ +helper_mttc0_tcschedule \ +helper_mtc0_tcschefback \ +helper_mttc0_tcschefback \ +helper_mtc0_entrylo1 \ +helper_dmtc0_entrylo1 \ +helper_mtc0_context \ +helper_mtc0_memorymapid \ +update_pagemask \ +helper_mtc0_pagemask \ +helper_mtc0_pagegrain \ +helper_mtc0_segctl0 \ +helper_mtc0_segctl1 \ +helper_mtc0_segctl2 \ +helper_mtc0_pwfield \ +helper_mtc0_pwsize \ +helper_mtc0_wired \ +helper_mtc0_pwctl \ +helper_mtc0_srsconf0 \ +helper_mtc0_srsconf1 \ +helper_mtc0_srsconf2 \ +helper_mtc0_srsconf3 \ +helper_mtc0_srsconf4 \ +helper_mtc0_hwrena \ +helper_mtc0_count \ +helper_mtc0_saari \ +helper_mtc0_saar \ +helper_mthc0_saar \ +helper_mtc0_entryhi \ +helper_mttc0_entryhi \ +helper_mtc0_compare \ +helper_mtc0_status \ +helper_mttc0_status \ +helper_mtc0_intctl \ +helper_mtc0_srsctl \ +helper_mtc0_cause \ +helper_mttc0_cause \ +helper_mftc0_epc \ +helper_mftc0_ebase \ +helper_mtc0_ebase \ +helper_mttc0_ebase \ +helper_mftc0_configx \ +helper_mtc0_config0 \ +helper_mtc0_config2 \ +helper_mtc0_config3 \ +helper_mtc0_config4 \ +helper_mtc0_config5 \ +helper_mtc0_lladdr \ +helper_mtc0_maar \ +helper_mthc0_maar \ +helper_mtc0_maari \ +helper_mtc0_watchlo \ +helper_mtc0_watchhi \ +helper_mthc0_watchhi \ +helper_mtc0_xcontext \ +helper_mtc0_framemask \ +helper_mtc0_debug \ +helper_mttc0_debug \ +helper_mtc0_performance0 \ +helper_mtc0_errctl \ +helper_mtc0_taglo \ +helper_mtc0_datalo \ +helper_mtc0_taghi \ +helper_mtc0_datahi \ +helper_mftgpr \ +helper_mftlo \ +helper_mfthi \ +helper_mftacx \ +helper_mftdsp \ +helper_mttgpr \ +helper_mttlo \ +helper_mtthi \ +helper_mttacx \ +helper_mttdsp \ +helper_dmt \ +helper_emt \ +helper_dvpe \ +helper_evpe \ +helper_dvp \ +helper_evp \ +cpu_mips_get_random \ +cpu_mips_init \ +helper_absq_s_ph \ +helper_absq_s_qb \ +helper_absq_s_w \ +helper_absq_s_ob \ +helper_absq_s_qh \ +helper_absq_s_pw \ +helper_addqh_ph \ +helper_addqh_r_ph \ +helper_addqh_r_w \ +helper_addqh_w \ +helper_adduh_qb \ +helper_adduh_r_qb \ +helper_subqh_ph \ +helper_subqh_r_ph \ +helper_subqh_r_w \ +helper_subqh_w \ +helper_addq_ph \ +helper_addq_s_ph \ +helper_addq_s_w \ +helper_addu_ph \ +helper_addu_qb \ +helper_addu_s_ph \ +helper_addu_s_qb \ +helper_subq_ph \ +helper_subq_s_ph \ +helper_subq_s_w \ +helper_subu_ph \ +helper_subu_qb \ +helper_subu_s_ph \ +helper_subu_s_qb \ +helper_adduh_ob \ +helper_adduh_r_ob \ +helper_subuh_ob \ +helper_subuh_r_ob \ +helper_addq_pw \ +helper_addq_qh \ +helper_addq_s_pw \ +helper_addq_s_qh \ +helper_addu_ob \ +helper_addu_qh \ +helper_addu_s_ob \ +helper_addu_s_qh \ +helper_subq_pw \ +helper_subq_qh \ +helper_subq_s_pw \ +helper_subq_s_qh \ +helper_subu_ob \ +helper_subu_qh \ +helper_subu_s_ob \ +helper_subu_s_qh \ +helper_subuh_qb \ +helper_subuh_r_qb \ +helper_addsc \ +helper_addwc \ +helper_modsub \ +helper_raddu_w_qb \ +helper_raddu_l_ob \ +helper_precr_qb_ph \ +helper_precrq_qb_ph \ +helper_precr_sra_ph_w \ +helper_precr_sra_r_ph_w \ +helper_precrq_ph_w \ +helper_precrq_rs_ph_w \ +helper_precr_ob_qh \ +helper_precr_sra_qh_pw \ +helper_precr_sra_r_qh_pw \ +helper_precrq_ob_qh \ +helper_precrq_qh_pw \ +helper_precrq_rs_qh_pw \ +helper_precrq_pw_l \ +helper_precrqu_s_qb_ph \ +helper_precrqu_s_ob_qh \ +helper_preceq_pw_qhl \ +helper_preceq_pw_qhr \ +helper_preceq_pw_qhla \ +helper_preceq_pw_qhra \ +helper_precequ_ph_qbl \ +helper_precequ_ph_qbr \ +helper_precequ_ph_qbla \ +helper_precequ_ph_qbra \ +helper_precequ_qh_obl \ +helper_precequ_qh_obr \ +helper_precequ_qh_obla \ +helper_precequ_qh_obra \ +helper_preceu_ph_qbl \ +helper_preceu_ph_qbr \ +helper_preceu_ph_qbla \ +helper_preceu_ph_qbra \ +helper_preceu_qh_obl \ +helper_preceu_qh_obr \ +helper_preceu_qh_obla \ +helper_preceu_qh_obra \ +helper_shll_qb \ +helper_shrl_qb \ +helper_shra_qb \ +helper_shra_r_qb \ +helper_shll_ob \ +helper_shrl_ob \ +helper_shra_ob \ +helper_shra_r_ob \ +helper_shll_ph \ +helper_shll_s_ph \ +helper_shll_qh \ +helper_shll_s_qh \ +helper_shrl_qh \ +helper_shra_qh \ +helper_shra_r_qh \ +helper_shll_s_w \ +helper_shra_r_w \ +helper_shll_pw \ +helper_shll_s_pw \ +helper_shra_pw \ +helper_shra_r_pw \ +helper_shrl_ph \ +helper_shra_ph \ +helper_shra_r_ph \ +helper_muleu_s_ph_qbl \ +helper_muleu_s_ph_qbr \ +helper_mulq_rs_ph \ +helper_mul_ph \ +helper_mul_s_ph \ +helper_mulq_s_ph \ +helper_muleq_s_w_phl \ +helper_muleq_s_w_phr \ +helper_mulsaq_s_w_ph \ +helper_mulsa_w_ph \ +helper_muleu_s_qh_obl \ +helper_muleu_s_qh_obr \ +helper_mulq_rs_qh \ +helper_muleq_s_pw_qhl \ +helper_muleq_s_pw_qhr \ +helper_mulsaq_s_w_qh \ +helper_dpau_h_qbl \ +helper_dpau_h_qbr \ +helper_dpsu_h_qbl \ +helper_dpsu_h_qbr \ +helper_dpau_h_obl \ +helper_dpau_h_obr \ +helper_dpsu_h_obl \ +helper_dpsu_h_obr \ +helper_dpa_w_ph \ +helper_dpax_w_ph \ +helper_dps_w_ph \ +helper_dpsx_w_ph \ +helper_dpaq_s_w_ph \ +helper_dpaqx_s_w_ph \ +helper_dpsq_s_w_ph \ +helper_dpsqx_s_w_ph \ +helper_dpaqx_sa_w_ph \ +helper_dpsqx_sa_w_ph \ +helper_dpa_w_qh \ +helper_dpaq_s_w_qh \ +helper_dps_w_qh \ +helper_dpsq_s_w_qh \ +helper_dpaq_sa_l_w \ +helper_dpsq_sa_l_w \ +helper_dpaq_sa_l_pw \ +helper_dpsq_sa_l_pw \ +helper_mulsaq_s_l_pw \ +helper_maq_s_w_phl \ +helper_maq_s_w_phr \ +helper_maq_sa_w_phl \ +helper_maq_sa_w_phr \ +helper_mulq_s_w \ +helper_mulq_rs_w \ +helper_maq_s_w_qhll \ +helper_maq_s_w_qhlr \ +helper_maq_s_w_qhrl \ +helper_maq_s_w_qhrr \ +helper_maq_sa_w_qhll \ +helper_maq_sa_w_qhlr \ +helper_maq_sa_w_qhrl \ +helper_maq_sa_w_qhrr \ +helper_maq_s_l_pwl \ +helper_maq_s_l_pwr \ +helper_dmadd \ +helper_dmaddu \ +helper_dmsub \ +helper_dmsubu \ +helper_bitrev \ +helper_insv \ +helper_dinsv \ +helper_cmpgu_eq_qb \ +helper_cmpgu_lt_qb \ +helper_cmpgu_le_qb \ +helper_cmpgu_eq_ob \ +helper_cmpgu_lt_ob \ +helper_cmpgu_le_ob \ +helper_cmpu_eq_qb \ +helper_cmpu_lt_qb \ +helper_cmpu_le_qb \ +helper_cmp_eq_ph \ +helper_cmp_lt_ph \ +helper_cmp_le_ph \ +helper_cmpu_eq_ob \ +helper_cmpu_lt_ob \ +helper_cmpu_le_ob \ +helper_cmp_eq_qh \ +helper_cmp_lt_qh \ +helper_cmp_le_qh \ +helper_cmp_eq_pw \ +helper_cmp_lt_pw \ +helper_cmp_le_pw \ +helper_cmpgdu_eq_ob \ +helper_cmpgdu_lt_ob \ +helper_cmpgdu_le_ob \ +helper_pick_qb \ +helper_pick_ph \ +helper_pick_ob \ +helper_pick_qh \ +helper_pick_pw \ +helper_packrl_ph \ +helper_packrl_pw \ +helper_extr_w \ +helper_extr_r_w \ +helper_extr_rs_w \ +helper_dextr_w \ +helper_dextr_r_w \ +helper_dextr_rs_w \ +helper_dextr_l \ +helper_dextr_r_l \ +helper_dextr_rs_l \ +helper_extr_s_h \ +helper_dextr_s_h \ +helper_extp \ +helper_extpdp \ +helper_dextp \ +helper_dextpdp \ +helper_shilo \ +helper_dshilo \ +helper_mthlip \ +helper_dmthlip \ +cpu_wrdsp \ +helper_wrdsp \ +cpu_rddsp \ +helper_rddsp \ +helper_cfc1 \ +helper_ctc1 \ +ieee_ex_to_mips \ +helper_float_sqrt_d \ +helper_float_sqrt_s \ +helper_float_cvtd_s \ +helper_float_cvtd_w \ +helper_float_cvtd_l \ +helper_float_cvt_l_d \ +helper_float_cvt_l_s \ +helper_float_cvtps_pw \ +helper_float_cvtpw_ps \ +helper_float_cvts_d \ +helper_float_cvts_w \ +helper_float_cvts_l \ +helper_float_cvts_pl \ +helper_float_cvts_pu \ +helper_float_cvt_w_s \ +helper_float_cvt_w_d \ +helper_float_round_l_d \ +helper_float_round_l_s \ +helper_float_round_w_d \ +helper_float_round_w_s \ +helper_float_trunc_l_d \ +helper_float_trunc_l_s \ +helper_float_trunc_w_d \ +helper_float_trunc_w_s \ +helper_float_ceil_l_d \ +helper_float_ceil_l_s \ +helper_float_ceil_w_d \ +helper_float_ceil_w_s \ +helper_float_floor_l_d \ +helper_float_floor_l_s \ +helper_float_floor_w_d \ +helper_float_floor_w_s \ +helper_float_cvt_2008_l_d \ +helper_float_cvt_2008_l_s \ +helper_float_cvt_2008_w_d \ +helper_float_cvt_2008_w_s \ +helper_float_round_2008_l_d \ +helper_float_round_2008_l_s \ +helper_float_round_2008_w_d \ +helper_float_round_2008_w_s \ +helper_float_trunc_2008_l_d \ +helper_float_trunc_2008_l_s \ +helper_float_trunc_2008_w_d \ +helper_float_trunc_2008_w_s \ +helper_float_ceil_2008_l_d \ +helper_float_ceil_2008_l_s \ +helper_float_ceil_2008_w_d \ +helper_float_ceil_2008_w_s \ +helper_float_floor_2008_l_d \ +helper_float_floor_2008_l_s \ +helper_float_floor_2008_w_d \ +helper_float_floor_2008_w_s \ +helper_float_abs_d \ +helper_float_abs_s \ +helper_float_abs_ps \ +helper_float_chs_d \ +helper_float_chs_s \ +helper_float_chs_ps \ +helper_float_recip_d \ +helper_float_recip_s \ +helper_float_rsqrt_d \ +helper_float_rsqrt_s \ +helper_float_recip1_d \ +helper_float_recip1_s \ +helper_float_recip1_ps \ +helper_float_rsqrt1_d \ +helper_float_rsqrt1_s \ +helper_float_rsqrt1_ps \ +helper_float_rint_s \ +helper_float_rint_d \ +float_class_s \ +helper_float_class_s \ +float_class_d \ +helper_float_class_d \ +helper_float_add_d \ +helper_float_add_s \ +helper_float_add_ps \ +helper_float_sub_d \ +helper_float_sub_s \ +helper_float_sub_ps \ +helper_float_mul_d \ +helper_float_mul_s \ +helper_float_mul_ps \ +helper_float_div_d \ +helper_float_div_s \ +helper_float_div_ps \ +helper_float_recip2_d \ +helper_float_recip2_s \ +helper_float_recip2_ps \ +helper_float_rsqrt2_d \ +helper_float_rsqrt2_s \ +helper_float_rsqrt2_ps \ +helper_float_addr_ps \ +helper_float_mulr_ps \ +helper_float_max_s \ +helper_float_max_d \ +helper_float_maxa_s \ +helper_float_maxa_d \ +helper_float_min_s \ +helper_float_min_d \ +helper_float_mina_s \ +helper_float_mina_d \ +helper_float_madd_d \ +helper_float_madd_s \ +helper_float_madd_ps \ +helper_float_msub_d \ +helper_float_msub_s \ +helper_float_msub_ps \ +helper_float_nmadd_d \ +helper_float_nmadd_s \ +helper_float_nmadd_ps \ +helper_float_nmsub_d \ +helper_float_nmsub_s \ +helper_float_nmsub_ps \ +helper_float_maddf_s \ +helper_float_maddf_d \ +helper_float_msubf_s \ +helper_float_msubf_d \ +helper_cmp_d_f \ +helper_cmpabs_d_f \ +helper_cmp_d_un \ +helper_cmpabs_d_un \ +helper_cmp_d_eq \ +helper_cmpabs_d_eq \ +helper_cmp_d_ueq \ +helper_cmpabs_d_ueq \ +helper_cmp_d_olt \ +helper_cmpabs_d_olt \ +helper_cmp_d_ult \ +helper_cmpabs_d_ult \ +helper_cmp_d_ole \ +helper_cmpabs_d_ole \ +helper_cmp_d_ule \ +helper_cmpabs_d_ule \ +helper_cmp_d_sf \ +helper_cmpabs_d_sf \ +helper_cmp_d_ngle \ +helper_cmpabs_d_ngle \ +helper_cmp_d_seq \ +helper_cmpabs_d_seq \ +helper_cmp_d_ngl \ +helper_cmpabs_d_ngl \ +helper_cmp_d_lt \ +helper_cmpabs_d_lt \ +helper_cmp_d_nge \ +helper_cmpabs_d_nge \ +helper_cmp_d_le \ +helper_cmpabs_d_le \ +helper_cmp_d_ngt \ +helper_cmpabs_d_ngt \ +helper_cmp_s_f \ +helper_cmpabs_s_f \ +helper_cmp_s_un \ +helper_cmpabs_s_un \ +helper_cmp_s_eq \ +helper_cmpabs_s_eq \ +helper_cmp_s_ueq \ +helper_cmpabs_s_ueq \ +helper_cmp_s_olt \ +helper_cmpabs_s_olt \ +helper_cmp_s_ult \ +helper_cmpabs_s_ult \ +helper_cmp_s_ole \ +helper_cmpabs_s_ole \ +helper_cmp_s_ule \ +helper_cmpabs_s_ule \ +helper_cmp_s_sf \ +helper_cmpabs_s_sf \ +helper_cmp_s_ngle \ +helper_cmpabs_s_ngle \ +helper_cmp_s_seq \ +helper_cmpabs_s_seq \ +helper_cmp_s_ngl \ +helper_cmpabs_s_ngl \ +helper_cmp_s_lt \ +helper_cmpabs_s_lt \ +helper_cmp_s_nge \ +helper_cmpabs_s_nge \ +helper_cmp_s_le \ +helper_cmpabs_s_le \ +helper_cmp_s_ngt \ +helper_cmpabs_s_ngt \ +helper_cmp_ps_f \ +helper_cmpabs_ps_f \ +helper_cmp_ps_un \ +helper_cmpabs_ps_un \ +helper_cmp_ps_eq \ +helper_cmpabs_ps_eq \ +helper_cmp_ps_ueq \ +helper_cmpabs_ps_ueq \ +helper_cmp_ps_olt \ +helper_cmpabs_ps_olt \ +helper_cmp_ps_ult \ +helper_cmpabs_ps_ult \ +helper_cmp_ps_ole \ +helper_cmpabs_ps_ole \ +helper_cmp_ps_ule \ +helper_cmpabs_ps_ule \ +helper_cmp_ps_sf \ +helper_cmpabs_ps_sf \ +helper_cmp_ps_ngle \ +helper_cmpabs_ps_ngle \ +helper_cmp_ps_seq \ +helper_cmpabs_ps_seq \ +helper_cmp_ps_ngl \ +helper_cmpabs_ps_ngl \ +helper_cmp_ps_lt \ +helper_cmpabs_ps_lt \ +helper_cmp_ps_nge \ +helper_cmpabs_ps_nge \ +helper_cmp_ps_le \ +helper_cmpabs_ps_le \ +helper_cmp_ps_ngt \ +helper_cmpabs_ps_ngt \ +helper_r6_cmp_d_af \ +helper_r6_cmp_d_un \ +helper_r6_cmp_d_eq \ +helper_r6_cmp_d_ueq \ +helper_r6_cmp_d_lt \ +helper_r6_cmp_d_ult \ +helper_r6_cmp_d_le \ +helper_r6_cmp_d_ule \ +helper_r6_cmp_d_saf \ +helper_r6_cmp_d_sun \ +helper_r6_cmp_d_seq \ +helper_r6_cmp_d_sueq \ +helper_r6_cmp_d_slt \ +helper_r6_cmp_d_sult \ +helper_r6_cmp_d_sle \ +helper_r6_cmp_d_sule \ +helper_r6_cmp_d_or \ +helper_r6_cmp_d_une \ +helper_r6_cmp_d_ne \ +helper_r6_cmp_d_sor \ +helper_r6_cmp_d_sune \ +helper_r6_cmp_d_sne \ +helper_r6_cmp_s_af \ +helper_r6_cmp_s_un \ +helper_r6_cmp_s_eq \ +helper_r6_cmp_s_ueq \ +helper_r6_cmp_s_lt \ +helper_r6_cmp_s_ult \ +helper_r6_cmp_s_le \ +helper_r6_cmp_s_ule \ +helper_r6_cmp_s_saf \ +helper_r6_cmp_s_sun \ +helper_r6_cmp_s_seq \ +helper_r6_cmp_s_sueq \ +helper_r6_cmp_s_slt \ +helper_r6_cmp_s_sult \ +helper_r6_cmp_s_sle \ +helper_r6_cmp_s_sule \ +helper_r6_cmp_s_or \ +helper_r6_cmp_s_une \ +helper_r6_cmp_s_ne \ +helper_r6_cmp_s_sor \ +helper_r6_cmp_s_sune \ +helper_r6_cmp_s_sne \ +no_mmu_map_address \ +fixed_mmu_map_address \ +r4k_map_address \ +cpu_mips_tlb_flush \ +sync_c0_status \ +cpu_mips_store_status \ +cpu_mips_store_cause \ +mips_cpu_get_phys_page_debug \ +mips_cpu_tlb_fill \ +cpu_mips_translate_address \ +exception_resume_pc \ +mips_cpu_do_interrupt \ +mips_cpu_exec_interrupt \ +r4k_invalidate_tlb \ +do_raise_exception_err \ +helper_paddsb \ +helper_paddusb \ +helper_paddsh \ +helper_paddush \ +helper_paddb \ +helper_paddh \ +helper_paddw \ +helper_psubsb \ +helper_psubusb \ +helper_psubsh \ +helper_psubush \ +helper_psubb \ +helper_psubh \ +helper_psubw \ +helper_pshufh \ +helper_packsswh \ +helper_packsshb \ +helper_packushb \ +helper_punpcklwd \ +helper_punpckhwd \ +helper_punpcklhw \ +helper_punpckhhw \ +helper_punpcklbh \ +helper_punpckhbh \ +helper_pavgh \ +helper_pavgb \ +helper_pmaxsh \ +helper_pminsh \ +helper_pmaxub \ +helper_pminub \ +helper_pcmpeqw \ +helper_pcmpgtw \ +helper_pcmpeqh \ +helper_pcmpgth \ +helper_pcmpeqb \ +helper_pcmpgtb \ +helper_psllw \ +helper_psrlw \ +helper_psraw \ +helper_psllh \ +helper_psrlh \ +helper_psrah \ +helper_pmullh \ +helper_pmulhh \ +helper_pmulhuh \ +helper_pmaddhw \ +helper_pasubub \ +helper_biadd \ +helper_pmovmskb \ +helper_msa_nloc_b \ +helper_msa_nloc_h \ +helper_msa_nloc_w \ +helper_msa_nloc_d \ +helper_msa_nlzc_b \ +helper_msa_nlzc_h \ +helper_msa_nlzc_w \ +helper_msa_nlzc_d \ +helper_msa_pcnt_b \ +helper_msa_pcnt_h \ +helper_msa_pcnt_w \ +helper_msa_pcnt_d \ +helper_msa_binsl_b \ +helper_msa_binsl_h \ +helper_msa_binsl_w \ +helper_msa_binsl_d \ +helper_msa_binsr_b \ +helper_msa_binsr_h \ +helper_msa_binsr_w \ +helper_msa_binsr_d \ +helper_msa_bmnz_v \ +helper_msa_bmz_v \ +helper_msa_bsel_v \ +helper_msa_bclr_b \ +helper_msa_bclr_h \ +helper_msa_bclr_w \ +helper_msa_bclr_d \ +helper_msa_bneg_b \ +helper_msa_bneg_h \ +helper_msa_bneg_w \ +helper_msa_bneg_d \ +helper_msa_bset_b \ +helper_msa_bset_h \ +helper_msa_bset_w \ +helper_msa_bset_d \ +helper_msa_add_a_b \ +helper_msa_add_a_h \ +helper_msa_add_a_w \ +helper_msa_add_a_d \ +helper_msa_adds_a_b \ +helper_msa_adds_a_h \ +helper_msa_adds_a_w \ +helper_msa_adds_a_d \ +helper_msa_adds_s_b \ +helper_msa_adds_s_h \ +helper_msa_adds_s_w \ +helper_msa_adds_s_d \ +helper_msa_adds_u_b \ +helper_msa_adds_u_h \ +helper_msa_adds_u_w \ +helper_msa_adds_u_d \ +helper_msa_addv_b \ +helper_msa_addv_h \ +helper_msa_addv_w \ +helper_msa_addv_d \ +helper_msa_hadd_s_h \ +helper_msa_hadd_s_w \ +helper_msa_hadd_s_d \ +helper_msa_hadd_u_h \ +helper_msa_hadd_u_w \ +helper_msa_hadd_u_d \ +helper_msa_ave_s_b \ +helper_msa_ave_s_h \ +helper_msa_ave_s_w \ +helper_msa_ave_s_d \ +helper_msa_ave_u_b \ +helper_msa_ave_u_h \ +helper_msa_ave_u_w \ +helper_msa_ave_u_d \ +helper_msa_aver_s_b \ +helper_msa_aver_s_h \ +helper_msa_aver_s_w \ +helper_msa_aver_s_d \ +helper_msa_aver_u_b \ +helper_msa_aver_u_h \ +helper_msa_aver_u_w \ +helper_msa_aver_u_d \ +helper_msa_ceq_b \ +helper_msa_ceq_h \ +helper_msa_ceq_w \ +helper_msa_ceq_d \ +helper_msa_cle_s_b \ +helper_msa_cle_s_h \ +helper_msa_cle_s_w \ +helper_msa_cle_s_d \ +helper_msa_cle_u_b \ +helper_msa_cle_u_h \ +helper_msa_cle_u_w \ +helper_msa_cle_u_d \ +helper_msa_clt_s_b \ +helper_msa_clt_s_h \ +helper_msa_clt_s_w \ +helper_msa_clt_s_d \ +helper_msa_clt_u_b \ +helper_msa_clt_u_h \ +helper_msa_clt_u_w \ +helper_msa_clt_u_d \ +helper_msa_div_s_b \ +helper_msa_div_s_h \ +helper_msa_div_s_w \ +helper_msa_div_s_d \ +helper_msa_div_u_b \ +helper_msa_div_u_h \ +helper_msa_div_u_w \ +helper_msa_div_u_d \ +helper_msa_max_a_b \ +helper_msa_max_a_h \ +helper_msa_max_a_w \ +helper_msa_max_a_d \ +helper_msa_max_s_b \ +helper_msa_max_s_h \ +helper_msa_max_s_w \ +helper_msa_max_s_d \ +helper_msa_max_u_b \ +helper_msa_max_u_h \ +helper_msa_max_u_w \ +helper_msa_max_u_d \ +helper_msa_min_a_b \ +helper_msa_min_a_h \ +helper_msa_min_a_w \ +helper_msa_min_a_d \ +helper_msa_min_s_b \ +helper_msa_min_s_h \ +helper_msa_min_s_w \ +helper_msa_min_s_d \ +helper_msa_min_u_b \ +helper_msa_min_u_h \ +helper_msa_min_u_w \ +helper_msa_min_u_d \ +helper_msa_mod_s_b \ +helper_msa_mod_s_h \ +helper_msa_mod_s_w \ +helper_msa_mod_s_d \ +helper_msa_mod_u_b \ +helper_msa_mod_u_h \ +helper_msa_mod_u_w \ +helper_msa_mod_u_d \ +helper_msa_asub_s_b \ +helper_msa_asub_s_h \ +helper_msa_asub_s_w \ +helper_msa_asub_s_d \ +helper_msa_asub_u_b \ +helper_msa_asub_u_h \ +helper_msa_asub_u_w \ +helper_msa_asub_u_d \ +helper_msa_hsub_s_h \ +helper_msa_hsub_s_w \ +helper_msa_hsub_s_d \ +helper_msa_hsub_u_h \ +helper_msa_hsub_u_w \ +helper_msa_hsub_u_d \ +helper_msa_ilvev_b \ +helper_msa_ilvev_h \ +helper_msa_ilvev_w \ +helper_msa_ilvev_d \ +helper_msa_ilvod_b \ +helper_msa_ilvod_h \ +helper_msa_ilvod_w \ +helper_msa_ilvod_d \ +helper_msa_ilvl_b \ +helper_msa_ilvl_h \ +helper_msa_ilvl_w \ +helper_msa_ilvl_d \ +helper_msa_ilvr_b \ +helper_msa_ilvr_h \ +helper_msa_ilvr_w \ +helper_msa_ilvr_d \ +helper_msa_and_v \ +helper_msa_nor_v \ +helper_msa_or_v \ +helper_msa_xor_v \ +helper_msa_move_v \ +helper_msa_pckev_b \ +helper_msa_pckev_h \ +helper_msa_pckev_w \ +helper_msa_pckev_d \ +helper_msa_pckod_b \ +helper_msa_pckod_h \ +helper_msa_pckod_w \ +helper_msa_pckod_d \ +helper_msa_sll_b \ +helper_msa_sll_h \ +helper_msa_sll_w \ +helper_msa_sll_d \ +helper_msa_sra_b \ +helper_msa_sra_h \ +helper_msa_sra_w \ +helper_msa_sra_d \ +helper_msa_srar_b \ +helper_msa_srar_h \ +helper_msa_srar_w \ +helper_msa_srar_d \ +helper_msa_srl_b \ +helper_msa_srl_h \ +helper_msa_srl_w \ +helper_msa_srl_d \ +helper_msa_srlr_b \ +helper_msa_srlr_h \ +helper_msa_srlr_w \ +helper_msa_srlr_d \ +helper_msa_andi_b \ +helper_msa_ori_b \ +helper_msa_nori_b \ +helper_msa_xori_b \ +helper_msa_bmnzi_b \ +helper_msa_bmzi_b \ +helper_msa_bseli_b \ +helper_msa_shf_df \ +helper_msa_addvi_df \ +helper_msa_subvi_df \ +helper_msa_ceqi_df \ +helper_msa_clei_s_df \ +helper_msa_clei_u_df \ +helper_msa_clti_s_df \ +helper_msa_clti_u_df \ +helper_msa_maxi_s_df \ +helper_msa_maxi_u_df \ +helper_msa_mini_s_df \ +helper_msa_mini_u_df \ +helper_msa_ldi_df \ +helper_msa_slli_df \ +helper_msa_srai_df \ +helper_msa_srli_df \ +helper_msa_bclri_df \ +helper_msa_bseti_df \ +helper_msa_bnegi_df \ +helper_msa_sat_s_df \ +helper_msa_sat_u_df \ +helper_msa_srari_df \ +helper_msa_srlri_df \ +helper_msa_binsli_df \ +helper_msa_binsri_df \ +helper_msa_subv_df \ +helper_msa_subs_s_df \ +helper_msa_subs_u_df \ +helper_msa_subsus_u_df \ +helper_msa_subsuu_s_df \ +helper_msa_mulv_df \ +helper_msa_dotp_s_df \ +helper_msa_dotp_u_df \ +helper_msa_mul_q_df \ +helper_msa_mulr_q_df \ +helper_msa_sld_df \ +helper_msa_maddv_df \ +helper_msa_msubv_df \ +helper_msa_dpadd_s_df \ +helper_msa_dpadd_u_df \ +helper_msa_dpsub_s_df \ +helper_msa_dpsub_u_df \ +helper_msa_binsl_df \ +helper_msa_binsr_df \ +helper_msa_madd_q_df \ +helper_msa_msub_q_df \ +helper_msa_maddr_q_df \ +helper_msa_msubr_q_df \ +helper_msa_splat_df \ +helper_msa_vshf_df \ +helper_msa_sldi_df \ +helper_msa_splati_df \ +helper_msa_copy_s_b \ +helper_msa_copy_s_h \ +helper_msa_copy_s_w \ +helper_msa_copy_s_d \ +helper_msa_copy_u_b \ +helper_msa_copy_u_h \ +helper_msa_copy_u_w \ +helper_msa_insert_b \ +helper_msa_insert_h \ +helper_msa_insert_w \ +helper_msa_insert_d \ +helper_msa_insve_df \ +helper_msa_ctcmsa \ +helper_msa_cfcmsa \ +helper_msa_fill_df \ +helper_msa_fcaf_df \ +helper_msa_fcun_df \ +helper_msa_fceq_df \ +helper_msa_fcueq_df \ +helper_msa_fclt_df \ +helper_msa_fcult_df \ +helper_msa_fcle_df \ +helper_msa_fcule_df \ +helper_msa_fsaf_df \ +helper_msa_fsun_df \ +helper_msa_fseq_df \ +helper_msa_fsueq_df \ +helper_msa_fslt_df \ +helper_msa_fsult_df \ +helper_msa_fsle_df \ +helper_msa_fsule_df \ +helper_msa_fcor_df \ +helper_msa_fcune_df \ +helper_msa_fcne_df \ +helper_msa_fsor_df \ +helper_msa_fsune_df \ +helper_msa_fsne_df \ +helper_msa_fadd_df \ +helper_msa_fsub_df \ +helper_msa_fmul_df \ +helper_msa_fdiv_df \ +helper_msa_fmadd_df \ +helper_msa_fmsub_df \ +helper_msa_fexp2_df \ +helper_msa_fexdo_df \ +helper_msa_ftq_df \ +helper_msa_fmin_df \ +helper_msa_fmin_a_df \ +helper_msa_fmax_df \ +helper_msa_fmax_a_df \ +helper_msa_fclass_df \ +helper_msa_ftrunc_s_df \ +helper_msa_ftrunc_u_df \ +helper_msa_fsqrt_df \ +helper_msa_frsqrt_df \ +helper_msa_frcp_df \ +helper_msa_frint_df \ +helper_msa_flog2_df \ +helper_msa_fexupl_df \ +helper_msa_fexupr_df \ +helper_msa_ffql_df \ +helper_msa_ffqr_df \ +helper_msa_ftint_s_df \ +helper_msa_ftint_u_df \ +helper_msa_ffint_s_df \ +helper_msa_ffint_u_df \ +helper_raise_exception_err \ +helper_raise_exception \ +helper_raise_exception_debug \ +helper_muls \ +helper_mulsu \ +helper_macc \ +helper_macchi \ +helper_maccu \ +helper_macchiu \ +helper_msac \ +helper_msachi \ +helper_msacu \ +helper_msachiu \ +helper_mulhi \ +helper_mulhiu \ +helper_mulshi \ +helper_mulshiu \ +helper_dbitswap \ +helper_bitswap \ +helper_rotx \ +helper_ll \ +helper_lld \ +helper_swl \ +helper_swr \ +helper_sdl \ +helper_sdr \ +helper_lwm \ +helper_swm \ +helper_ldm \ +helper_sdm \ +helper_fork \ +helper_yield \ +r4k_helper_tlbinv \ +r4k_helper_tlbinvf \ +r4k_helper_tlbwi \ +r4k_helper_tlbwr \ +r4k_helper_tlbp \ +r4k_helper_tlbr \ +helper_tlbwi \ +helper_tlbwr \ +helper_tlbp \ +helper_tlbr \ +helper_tlbinv \ +helper_tlbinvf \ +helper_ginvt \ +helper_di \ +helper_ei \ +helper_eret \ +helper_eretnc \ +helper_deret \ +helper_rdhwr_cpunum \ +helper_rdhwr_synci_step \ +helper_rdhwr_cc \ +helper_rdhwr_ccres \ +helper_rdhwr_performance \ +helper_rdhwr_xnp \ +helper_pmon \ +helper_wait \ +mips_cpu_do_unaligned_access \ +mips_cpu_do_transaction_failed \ +helper_msa_ld_b \ +helper_msa_ld_h \ +helper_msa_ld_w \ +helper_msa_ld_d \ +helper_msa_st_b \ +helper_msa_st_h \ +helper_msa_st_w \ +helper_msa_st_d \ +helper_cache \ +gen_intermediate_code \ +mips_tcg_init \ +cpu_mips_realize_env \ +cpu_state_reset \ +restore_state_to_opc \ +mips_reg_reset \ +mips_reg_read \ +mips_reg_write \ +ieee_rm \ +mips_defs \ +mips_defs_number \ +gen_helper_float_class_s \ +gen_helper_float_class_d \ +" + +mipsel_SYMBOLS=${mips_SYMBOLS} + +mips64_SYMBOLS=${mips_SYMBOLS} + +mips64el_SYMBOLS=${mips_SYMBOLS} + +sparc_SYMBOLS=" +helper_compute_psr \ +helper_compute_C_icc \ +cpu_sparc_set_id \ +cpu_sparc_init \ +helper_check_ieee_exceptions \ +helper_fadds \ +helper_faddd \ +helper_faddq \ +helper_fsubs \ +helper_fsubd \ +helper_fsubq \ +helper_fmuls \ +helper_fmuld \ +helper_fmulq \ +helper_fdivs \ +helper_fdivd \ +helper_fdivq \ +helper_fsmuld \ +helper_fsmulq \ +helper_fdmulq \ +helper_fnegs \ +helper_fnegd \ +helper_fnegq \ +helper_fitos \ +helper_fitod \ +helper_fitoq \ +helper_fxtos \ +helper_fxtod \ +helper_fxtoq \ +helper_fdtos \ +helper_fstod \ +helper_fqtos \ +helper_fstoq \ +helper_fqtod \ +helper_fdtoq \ +helper_fstoi \ +helper_fdtoi \ +helper_fqtoi \ +helper_fstox \ +helper_fdtox \ +helper_fqtox \ +helper_fabss \ +helper_fabsd \ +helper_fabsq \ +helper_fsqrts \ +helper_fsqrtd \ +helper_fsqrtq \ +helper_fcmps \ +helper_fcmpd \ +helper_fcmpes \ +helper_fcmped \ +helper_fcmpq \ +helper_fcmpeq \ +helper_fcmps_fcc1 \ +helper_fcmpd_fcc1 \ +helper_fcmpq_fcc1 \ +helper_fcmps_fcc2 \ +helper_fcmpd_fcc2 \ +helper_fcmpq_fcc2 \ +helper_fcmps_fcc3 \ +helper_fcmpd_fcc3 \ +helper_fcmpq_fcc3 \ +helper_fcmpes_fcc1 \ +helper_fcmped_fcc1 \ +helper_fcmpeq_fcc1 \ +helper_fcmpes_fcc2 \ +helper_fcmped_fcc2 \ +helper_fcmpeq_fcc2 \ +helper_fcmpes_fcc3 \ +helper_fcmped_fcc3 \ +helper_fcmpeq_fcc3 \ +helper_ldfsr \ +helper_ldxfsr \ +cpu_raise_exception_ra \ +helper_raise_exception \ +helper_debug \ +helper_tick_set_count \ +helper_tick_get_count \ +helper_tick_set_limit \ +helper_udiv \ +helper_udiv_cc \ +helper_sdiv \ +helper_sdiv_cc \ +helper_sdivx \ +helper_udivx \ +helper_taddcctv \ +helper_tsubcctv \ +helper_power_down \ +sparc_cpu_do_interrupt \ +leon3_irq_manager \ +sparc_cpu_do_interrupt \ +cpu_tsptr \ +helper_set_softint \ +helper_clear_softint \ +helper_write_softint \ +helper_check_align \ +helper_ld_asi \ +helper_st_asi \ +sparc_cpu_do_transaction_failed \ +sparc_cpu_do_unaligned_access \ +sparc_cpu_tlb_fill \ +mmu_probe \ +sparc_cpu_memory_rw_debug \ +cpu_get_phys_page_nofault \ +sparc_cpu_get_phys_page_debug \ +gen_intermediate_code \ +sparc_tcg_init \ +restore_state_to_opc \ +cpu_set_cwp \ +cpu_get_psr \ +cpu_put_psr_raw \ +cpu_put_psr \ +cpu_cwp_inc \ +cpu_cwp_dec \ +helper_rett \ +helper_save \ +helper_restore \ +helper_flushw \ +helper_saved \ +helper_restored \ +helper_wrpsr \ +helper_rdpsr \ +cpu_get_ccr \ +cpu_put_ccr \ +cpu_get_cwp64 \ +cpu_put_cwp64 \ +helper_rdccr \ +helper_wrccr \ +helper_rdcwp \ +helper_wrcwp \ +cpu_gl_switch_gregs \ +helper_wrgl \ +cpu_change_pstate \ +helper_wrpstate \ +helper_wrpil \ +helper_done \ +helper_retry \ +sparc_reg_reset \ +sparc_reg_read \ +sparc_reg_write \ +" + +sparc64_SYMBOLS=${sparc_SYMBOLS} + +m68k_SYMBOLS=" +cpu_m68k_init \ +helper_reds32 \ +helper_redf32 \ +helper_exts32 \ +helper_extf32 \ +helper_extf64 \ +helper_redf64 \ +helper_firound \ +cpu_m68k_set_fpcr \ +helper_fitrunc \ +helper_set_fpcr \ +helper_fsround \ +helper_fdround \ +helper_fsqrt \ +helper_fssqrt \ +helper_fdsqrt \ +helper_fabs \ +helper_fsabs \ +helper_fdabs \ +helper_fneg \ +helper_fsneg \ +helper_fdneg \ +helper_fadd \ +helper_fsadd \ +helper_fdadd \ +helper_fsub \ +helper_fssub \ +helper_fdsub \ +helper_fmul \ +helper_fsmul \ +helper_fdmul \ +helper_fsglmul \ +helper_fdiv \ +helper_fsdiv \ +helper_fddiv \ +helper_fsgldiv \ +helper_fcmp \ +helper_ftst \ +helper_fconst \ +helper_fmovemx_st_predec \ +helper_fmovemx_st_postinc \ +helper_fmovemx_ld_postinc \ +helper_fmovemd_st_predec \ +helper_fmovemd_st_postinc \ +helper_fmovemd_ld_postinc \ +helper_fmod \ +helper_frem \ +helper_fgetexp \ +helper_fgetman \ +helper_fscale \ +helper_flognp1 \ +helper_flogn \ +helper_flog10 \ +helper_flog2 \ +helper_fetox \ +helper_ftwotox \ +helper_ftentox \ +helper_ftan \ +helper_fsin \ +helper_fcos \ +helper_fsincos \ +helper_fatan \ +helper_fasin \ +helper_facos \ +helper_fatanh \ +helper_ftanh \ +helper_fsinh \ +helper_fcosh \ +helper_cf_movec_to \ +helper_m68k_movec_to \ +helper_m68k_movec_from \ +helper_set_macsr \ +m68k_switch_sp \ +m68k_cpu_get_phys_page_debug \ +m68k_set_irq_level \ +m68k_cpu_tlb_fill \ +helper_bitrev \ +helper_ff1 \ +helper_sats \ +cpu_m68k_set_sr \ +helper_set_sr \ +helper_mac_move \ +helper_macmuls \ +helper_macmulu \ +helper_macmulf \ +helper_macsats \ +helper_macsatu \ +helper_macsatf \ +helper_mac_set_flags \ +cpu_m68k_get_ccr \ +helper_get_ccr \ +cpu_m68k_set_ccr \ +helper_set_ccr \ +helper_flush_flags \ +helper_get_macf \ +helper_get_macs \ +helper_get_macu \ +helper_get_mac_extf \ +helper_get_mac_exti \ +helper_set_mac_extf \ +helper_set_mac_exts \ +helper_set_mac_extu \ +helper_ptest \ +helper_pflush \ +helper_reset \ +m68k_cpu_do_interrupt \ +m68k_cpu_transaction_failed \ +m68k_cpu_exec_interrupt \ +helper_raise_exception \ +helper_divuw \ +helper_divsw \ +helper_divul \ +helper_divsl \ +helper_divull \ +helper_divsll \ +helper_cas2w \ +helper_cas2l \ +helper_cas2l_parallel \ +helper_bfexts_mem \ +helper_bfextu_mem \ +helper_bfins_mem \ +helper_bfchg_mem \ +helper_bfclr_mem \ +helper_bfset_mem \ +helper_bfffo_reg \ +helper_bfffo_mem \ +helper_chk \ +helper_chk2 \ +floatx80_mod \ +floatx80_getman \ +floatx80_getexp \ +floatx80_scale \ +floatx80_move \ +floatx80_lognp1 \ +floatx80_logn \ +floatx80_log10 \ +floatx80_log2 \ +floatx80_etox \ +floatx80_twotox \ +floatx80_tentox \ +floatx80_tan \ +floatx80_sin \ +floatx80_cos \ +floatx80_atan \ +floatx80_asin \ +floatx80_acos \ +floatx80_atanh \ +floatx80_etoxm1 \ +floatx80_tanh \ +floatx80_sinh \ +floatx80_cosh \ +m68k_tcg_init \ +register_m68k_insns \ +gen_intermediate_code \ +restore_state_to_opc \ +m68k_reg_reset \ +m68k_reg_read \ +m68k_reg_write \ +" + +ppc_SYMBOLS=" +ppc_cpu_unrealize \ +ppc_cpu_instance_finalize \ +ppc_reg_reset \ +ppc_reg_read \ +ppc_reg_write \ +ppc_cpu_do_interrupt \ +ppc_cpu_do_system_reset \ +ppc_cpu_do_fwnmi_machine_check \ +ppc_cpu_exec_interrupt \ +raise_exception_err_ra \ +raise_exception_err \ +raise_exception \ +raise_exception_ra \ +helper_raise_exception_err \ +helper_store_msr \ +helper_rfi \ +helper_40x_rfci \ +helper_rfdi \ +helper_rfci \ +helper_rfmci \ +helper_tw \ +helper_rfsvc \ +helper_msgclr \ +helper_msgsnd \ +helper_book3s_msgclr \ +ppc_cpu_do_unaligned_access \ +helper_divweu \ +helper_divwe \ +helper_sraw \ +helper_popcntb \ +helper_div \ +helper_divo \ +helper_divs \ +helper_divso \ +helper_602_mfrom \ +helper_mtvscr \ +helper_vaddcuw \ +helper_vprtybw \ +helper_vprtybd \ +helper_vprtybq \ +helper_vmuluwm \ +helper_vaddfp \ +helper_vsubfp \ +helper_vminfp \ +helper_vmaxfp \ +helper_vmaddfp \ +helper_vnmsubfp \ +helper_vaddsbs \ +helper_vsubsbs \ +helper_vsubshs \ +helper_vaddsws \ +helper_vsubsws \ +helper_vaddubs \ +helper_vsububs \ +helper_vadduhs \ +helper_vsubuhs \ +helper_vadduws \ +helper_vsubuws \ +helper_vavgsb \ +helper_vavgub \ +helper_vavgsh \ +helper_vavguh \ +helper_vavgsw \ +helper_vabsdub \ +helper_vabsduh \ +helper_vabsduw \ +helper_vcfux \ +helper_vcfsx \ +helper_vcmpequb \ +helper_vcmpequb_dot \ +helper_vcmpequw \ +helper_vcmpequw_dot \ +helper_vcmpequd \ +helper_vcmpequd_dot \ +helper_vcmpgtub \ +helper_vcmpgtub_dot \ +helper_vcmpgtuh \ +helper_vcmpgtuh_dot \ +helper_vcmpgtuw \ +helper_vcmpgtuw_dot \ +helper_vcmpgtud \ +helper_vcmpgtud_dot \ +helper_vcmpgtud \ +helper_vcmpgtud_dot \ +helper_vcmpgtsb \ +helper_vcmpgtsb_dot \ +helper_vcmpgtsh \ +helper_vcmpgtsh_dot \ +helper_vcmpgtsw \ +helper_vcmpgtsw_dot \ +helper_vcmpgtsd \ +helper_vcmpgtsd_dot \ +helper_vcmpnezb \ +helper_vcmpnezb_dot \ +helper_vcmpnezb \ +helper_vcmpnezb_dot \ +helper_vcmpnezw \ +helper_vcmpnezw_dot \ +helper_vcmpneb \ +helper_vcmpneb_dot \ +helper_vcmpneb \ +helper_vcmpneb_dot \ +helper_vcmpneh \ +helper_vcmpneh_dot \ +helper_vcmpnew \ +helper_vcmpnew_dot \ +helper_vcmpeqfp \ +helper_vcmpeqfp_dot \ +helper_vcmpgefp \ +helper_vcmpgefp_dot \ +helper_vcmpgtfp \ +helper_vcmpgtfp_dot \ +helper_vcmpbfp \ +helper_vcmpbfp_dot \ +helper_vcmpbfp \ +helper_vcmpbfp_dot \ +helper_vctuxs \ +helper_vctsxs \ +helper_vclzlsbb \ +helper_vctzlsbb \ +helper_vmhaddshs \ +helper_vmhraddshs \ +helper_vmladduhm \ +helper_vmhraddshs \ +helper_vmladduhm \ +helper_vmrglb \ +helper_vmrghb \ +helper_vmrglh \ +helper_vmrghh \ +helper_vmrglw \ +helper_vmrghw \ +helper_vmsummbm \ +helper_vmsumshs \ +helper_vmsumubm \ +helper_vmsumuhm \ +helper_vmulesb \ +helper_vmulosb \ +helper_vmulesh \ +helper_vmulesw \ +helper_vmuleub \ +helper_vmuloub \ +helper_vmuleuh \ +helper_vmulouh \ +helper_vmuleuw \ +helper_vmulouw \ +helper_vperm \ +helper_vpermr \ +helper_vbpermd \ +helper_vpmsumb \ +helper_vpmsumh \ +helper_vpmsumw \ +helper_vpmsumd \ +helper_vpkpx \ +helper_vpkshss \ +helper_vpkshus \ +helper_vpkswss \ +helper_vpkswus \ +helper_vpksdss \ +helper_vpksdus \ +helper_vpkuhus \ +helper_vpkuwus \ +helper_vpkudus \ +helper_vpkuhum \ +helper_vpkuwum \ +helper_vpkudum \ +helper_vrefp \ +helper_vrfin \ +helper_vrfim \ +helper_vrfip \ +helper_vrfiz \ +helper_vrlb \ +helper_vrlh \ +helper_vrlw \ +helper_vrld \ +helper_vrsqrtefp \ +helper_vrldmi \ +helper_vrlwmi \ +helper_vrldnm \ +helper_vrlwnm \ +helper_vsel \ +helper_vexptefp \ +helper_vlogefp \ +helper_vextublx \ +helper_vextuhlx \ +helper_vextuwlx \ +helper_vextubrx \ +helper_vextuhrx \ +helper_vextuwrx \ +helper_vslv \ +helper_vsrv \ +helper_vsldoi \ +helper_vslo \ +helper_vinsertb \ +helper_vinserth \ +helper_vinsertw \ +helper_vinsertd \ +helper_vextractub \ +helper_vextractuh \ +helper_vextractuw \ +helper_vextractd \ +helper_xxextractuw \ +helper_xxinsertw \ +helper_vextsb2w \ +helper_vextsb2d \ +helper_vextsh2w \ +helper_vextsh2d \ +helper_vnegw \ +helper_vnegd \ +helper_vsro \ +helper_vsubcuw \ +helper_vsumsws \ +helper_vsum2sws \ +helper_vsum4sbs \ +helper_vsum4shs \ +helper_vsum4ubs \ +helper_vupklpx \ +helper_vupkhpx \ +helper_vupkhsb \ +helper_vupkhsh \ +helper_vupkhsw \ +helper_vupklsb \ +helper_vupklsh \ +helper_vupklsw \ +helper_vclzb \ +helper_vclzh \ +helper_vctzb \ +helper_vctzh \ +helper_vctzw \ +helper_vctzd \ +helper_vpopcntb \ +helper_vpopcnth \ +helper_vpopcntw \ +helper_vpopcntd \ +helper_vadduqm \ +helper_vaddeuqm \ +helper_vaddcuq \ +helper_vaddecuq \ +helper_vsubuqm \ +helper_vsubeuqm \ +helper_vsubcuq \ +helper_vsubecuq \ +helper_bcdadd \ +helper_bcdsub \ +helper_bcdcfn \ +helper_bcdctn \ +helper_bcdcfz \ +helper_bcdctz \ +helper_bcdcfsq \ +helper_bcdctsq \ +helper_bcdcpsgn \ +helper_bcdsetsgn \ +helper_bcds \ +helper_bcdus \ +helper_bcdsr \ +helper_bcdtrunc \ +helper_bcdutrunc \ +helper_vsbox \ +helper_vcipher \ +helper_vcipherlast \ +helper_vncipher \ +helper_vncipherlast \ +helper_vshasigmaw \ +helper_vshasigmad \ +helper_vpermxor \ +helper_brinc \ +helper_cntlsw32 \ +helper_cntlzw32 \ +helper_dlmzb \ +helper_lmw \ +helper_lsw \ +helper_lswx \ +helper_stsw \ +helper_dcbz \ +helper_dcbzep \ +helper_icbi \ +helper_icbiep \ +helper_lscbx \ +helper_lvebx \ +helper_lvehx \ +helper_lvewx \ +helper_stvebx \ +helper_stvehx \ +helper_stvewx \ +helper_tbegin \ +helper_load_dump_spr \ +helper_store_dump_spr \ +helper_hfscr_facility_check \ +helper_fscr_facility_check \ +helper_msr_facility_check \ +helper_store_sdr1 \ +helper_store_pidr \ +helper_store_lpidr \ +helper_store_hid0_601 \ +helper_store_403_pbr \ +helper_store_40x_dbcr0 \ +helper_store_40x_sler \ +helper_clcs \ +ppc_store_msr \ +helper_fixup_thrm \ +store_40x_sler \ +dump_mmu \ +ppc_cpu_get_phys_page_debug \ +helper_store_ibatu \ +helper_store_ibatl \ +helper_store_dbatu \ +helper_store_dbatl \ +helper_store_601_batu \ +helper_store_601_batl \ +ppc_tlb_invalidate_all \ +ppc_tlb_invalidate_one \ +ppc_store_sdr1 \ +helper_load_sr \ +helper_store_sr \ +helper_tlbia \ +helper_tlbie \ +helper_tlbiva \ +helper_6xx_tlbd \ +helper_6xx_tlbi \ +helper_74xx_tlbd \ +helper_74xx_tlbi \ +helper_rac \ +helper_4xx_tlbre_hi \ +helper_4xx_tlbre_lo \ +helper_4xx_tlbwe_hi \ +helper_4xx_tlbwe_lo \ +helper_4xx_tlbsx \ +helper_440_tlbwe \ +helper_440_tlbre \ +helper_440_tlbsx \ +helper_booke_setpid \ +helper_booke_set_eplc \ +helper_booke_set_epsc \ +helper_booke206_tlbwe \ +helper_booke206_tlbre \ +helper_booke206_tlbsx \ +helper_booke206_tlbivax \ +helper_booke206_tlbilx0 \ +helper_booke206_tlbilx1 \ +helper_booke206_tlbilx3 \ +helper_booke206_tlbflush \ +helper_check_tlb_flush_local \ +helper_check_tlb_flush_global \ +ppc_cpu_tlb_fill \ +helper_load_tbl \ +helper_load_tbu \ +helper_load_atbl \ +helper_load_atbu \ +helper_load_vtb \ +helper_load_601_rtcl \ +helper_load_601_rtcu \ +helper_store_tbl \ +helper_store_tbu \ +helper_store_atbl \ +helper_store_atbu \ +helper_store_601_rtcl \ +helper_store_601_rtcu \ +helper_load_decr \ +helper_store_decr \ +helper_load_hdecr \ +helper_store_hdecr \ +helper_store_vtb \ +helper_store_tbu40 \ +helper_load_40x_pit \ +helper_store_40x_pit \ +helper_store_booke_tcr \ +helper_store_booke_tsr \ +helper_load_dcr \ +helper_store_dcr \ +helper_raise_exception \ +helper_book3s_msgsnd \ +helper_cmpb \ +helper_mfvscr \ +helper_vaddshs \ +helper_vavguw \ +helper_vcmpequh \ +helper_vcmpequh_dot \ +helper_vcmpnezh \ +helper_vcmpnezh_dot \ +helper_vmsumshm \ +helper_vmsumuhs \ +helper_vmulosh \ +helper_vmulosw \ +helper_vbpermq \ +helper_vextsw2d \ +helper_stmw \ +ppc_translate_init \ +cpu_ppc_init \ +gen_intermediate_code \ +restore_state_to_opc \ +ppc_set_irq \ +ppc6xx_irq_init \ +ppc40x_core_reset \ +ppc40x_chip_reset \ +ppc40x_system_reset \ +store_40x_dbcr0 \ +ppc40x_irq_init \ +ppce500_irq_init \ +ppce500_set_mpic_proxy \ +cpu_ppc_get_tb \ +cpu_ppc_load_tbl \ +cpu_ppc_load_tbu \ +cpu_ppc_store_tbl \ +cpu_ppc_store_tbu \ +cpu_ppc_load_atbl \ +cpu_ppc_load_atbu \ +cpu_ppc_store_atbl \ +cpu_ppc_store_atbu \ +cpu_ppc_load_vtb \ +cpu_ppc_store_vtb \ +cpu_ppc_store_tbu40 \ +ppc_decr_clear_on_delivery \ +cpu_ppc_load_decr \ +cpu_ppc_load_hdecr \ +cpu_ppc_load_purr \ +cpu_ppc_store_decr \ +cpu_ppc_store_hdecr \ +cpu_ppc_store_purr \ +cpu_ppc_tb_init \ +cpu_ppc601_load_rtcu \ +cpu_ppc601_store_rtcu \ +cpu_ppc601_load_rtcl \ +cpu_ppc601_store_rtcl \ +load_40x_pit \ +store_40x_pit \ +ppc_40x_timers_init \ +ppc_dcr_read \ +ppc_dcr_write \ +ppc_dcr_register \ +ppc_dcr_init \ +ppc_cpu_pir \ +ppc_irq_reset \ +" + +ppc64_SYMBOLS=${ppc_SYMBOLS} + +ARCHS="x86_64 arm armeb aarch64 aarch64eb riscv32 riscv64 mips mipsel mips64 mips64el sparc sparc64 m68k ppc ppc64" + +for arch in $ARCHS; do + +echo "Generating header for $arch" +echo "/* Autogen header for Unicorn Engine - DONOT MODIFY */" > $SOURCE_DIR/qemu/$arch.h +echo "#ifndef UNICORN_AUTOGEN_${arch}_H" >> $SOURCE_DIR/qemu/$arch.h +echo "#define UNICORN_AUTOGEN_${arch}_H" >> $SOURCE_DIR/qemu/$arch.h +echo "#ifndef UNICORN_ARCH_POSTFIX" >> $SOURCE_DIR/qemu/$arch.h +echo "#define UNICORN_ARCH_POSTFIX _$arch" >> $SOURCE_DIR/qemu/$arch.h +echo "#endif" >> $SOURCE_DIR/qemu/$arch.h + +for loop in $COMMON_SYMBOLS; do + echo "#define $loop ${loop}_${arch}" >> $SOURCE_DIR/qemu/$arch.h +done + +ARCH_SYMBOLS=$(eval echo '$'"${arch}_SYMBOLS") + +#echo ${ARCH_SYMBOLS} + +for loop in $ARCH_SYMBOLS; do + echo "#define $loop ${loop}_${arch}" >> $SOURCE_DIR/qemu/$arch.h +done + +echo "#endif" >> $SOURCE_DIR/qemu/$arch.h + +done diff --git a/tests/regress/arm_apsr_access.py b/tests/regress/arm_apsr_access.py deleted file mode 100644 index 33c10fd9..00000000 --- a/tests/regress/arm_apsr_access.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/python - -from unicorn import * -from unicorn.arm_const import * - -import regress - -class APSRAccess(regress.RegressTest): - - def runTest(self): - code = ( - b'\x00\x00\xa0\xe1' + # 0: mov r0, r0 - b'\x08\x10\x9f\xe5' + # 4: ldr r1, [pc, #8] - b'\x01\xf0\x28\xe1' + # 8: 01 f0 28 e1 msr apsr_nzcvq, r1 - b'\x00\x00\xa0\xe1' + # c: mov r0, r0 - b'\x00\x00\xa0\xe1' + # 10: mov r0, r0 - b'\x00\x00\x00\xff') # 14: data for inst @4 - - uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) - uc.mem_map(0x1000, 0x1000) - uc.mem_write(0x1000, code) # bxeq lr; mov r0, r0 - - uc.reg_write(UC_ARM_REG_APSR, 0) - uc.emu_start(0x1000, 0x100c) - - self.assertEqual(uc.reg_read(UC_ARM_REG_APSR), 0xf8000000) - self.assertEqual(uc.reg_read(UC_ARM_REG_APSR_NZCV), 0xf0000000) - -if __name__ == '__main__': - regress.main() diff --git a/tests/regress/arm_memcpy_neon.py b/tests/regress/arm_memcpy_neon.py new file mode 100644 index 00000000..a764ff3d --- /dev/null +++ b/tests/regress/arm_memcpy_neon.py @@ -0,0 +1,52 @@ +from unicorn import * +from unicorn.arm_const import * + +# .text:0001F894 ADD PC, PC, R3 +# .text:0001F898 ; --------------------------------------------------------------------------- +# .text:0001F898 VLD1.8 {D0}, [R1]! +# .text:0001F89C VST1.8 {D0}, [R12]! +# .text:0001F8A0 VLD1.8 {D0}, [R1]! +# .text:0001F8A4 VST1.8 {D0}, [R12]! +# .text:0001F8A8 VLD1.8 {D0}, [R1]! +# .text:0001F8AC VST1.8 {D0}, [R12]! +# .text:0001F8B0 VLD1.8 {D0}, [R1]! +# .text:0001F8B4 VST1.8 {D0}, [R12]! +# .text:0001F8B8 VLD1.8 {D0}, [R1]! +# .text:0001F8BC VST1.8 {D0}, [R12]! +# .text:0001F8C0 VLD1.8 {D0}, [R1]! +# .text:0001F8C4 VST1.8 {D0}, [R12]! +# .text:0001F8C8 VLD1.8 {D0}, [R1]! +# .text:0001F8CC VST1.8 {D0}, [R12]! +# .text:0001F8D0 TST R2, #4 +# .text:0001F8D4 LDRNE R3, [R1],#4 +# .text:0001F8D8 STRNE R3, [R12],#4 +# .text:0001F8DC MOVS R2, R2,LSL#31 +# .text:0001F8E0 LDRHCS R3, [R1],#2 +# .text:0001F8E4 LDRBNE R1, [R1] +# .text:0001F8E8 STRHCS R3, [R12],#2 +# .text:0001F8EC STRBNE R1, [R12] +shellcode = [0x3, 0xf0, 0x8f, 0xe0, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0xd, 0x7, 0x21, 0xf4, 0xd, 0x7, 0xc, 0xf4, 0x4, 0x0, 0x12, 0xe3, 0x4, 0x30, 0x91, 0x14, 0x4, 0x30, 0x8c, 0x14, 0x82, 0x2f, 0xb0, 0xe1, 0xb2, 0x30, 0xd1, 0x20, 0x0, 0x10, 0xd1, 0x15, 0xb2, 0x30, 0xcc, 0x20, 0x0, 0x10, 0xcc, 0x15] +base = 0x1F894 +from_address = 0x1000 +to_address = 0x2000 +cplen = 8 +bs = b"c8"*cplen + +uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) +uc.mem_map(from_address, 0x1000) +uc.mem_map(to_address, 0x1000) +uc.mem_map(0x1F000, 0x1000) +uc.mem_write(from_address, bs) +uc.mem_write(base, bytes(shellcode)) +uc.reg_write(UC_ARM_REG_R12, to_address) +uc.reg_write(UC_ARM_REG_R1, from_address) +uc.reg_write(UC_ARM_REG_R2, cplen) +uc.reg_write(UC_ARM_REG_R3, 0x24) +# enable_vfp +uc.reg_write(UC_ARM_REG_C1_C0_2, uc.reg_read(UC_ARM_REG_C1_C0_2) | (0xf << 20)) +uc.reg_write(UC_ARM_REG_FPEXC, 0x40000000) + +uc.emu_start(base, base+len(shellcode)) +fr = uc.mem_read(from_address, len(bs)) +to = uc.mem_read(to_address, len(bs)) +print(f"memcpy result:\nfrom: {bytes(fr)}\nto: {bytes(to)}") \ No newline at end of file diff --git a/tests/regress/arm_wfi_first_insn_of_tb.py b/tests/regress/arm_wfi_first_insn_of_tb.py new file mode 100644 index 00000000..65fe92a3 --- /dev/null +++ b/tests/regress/arm_wfi_first_insn_of_tb.py @@ -0,0 +1,15 @@ +from unicorn import * +from unicorn.arm_const import * + +# ADD R0, R10, R0; +# B L0; +# L0: +# ADD R0, R10, R0; <--- we stop at here, the first instruction of the next TB. + +code = b'\x00\x00\x8a\xe0\xff\xff\xff\xea\x00\x00\x8a\xe0' +address = 0x1000 + +mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) +mu.mem_map(address, 0x1000) +mu.mem_write(address, code) +mu.emu_start(address, address + len(code) - 4) \ No newline at end of file diff --git a/tests/regress/hook_raises_exception.py b/tests/regress/hook_raises_exception.py deleted file mode 100644 index 72f846be..00000000 --- a/tests/regress/hook_raises_exception.py +++ /dev/null @@ -1,39 +0,0 @@ -import regress -from unicorn import Uc, UC_ARCH_X86, UC_MODE_64, UC_HOOK_CODE - -CODE = b"\x90" * 3 -CODE_ADDR = 0x1000 - - -class HookCounter(object): - """Counts number of hook calls.""" - - def __init__(self): - self.hook_calls = 0 - - def bad_code_hook(self, uc, address, size, data): - self.hook_calls += 1 - raise ValueError("Something went wrong") - - def good_code_hook(self, uc, address, size, data): - self.hook_calls += 1 - - -class TestExceptionInHook(regress.RegressTest): - - def test_exception_in_hook(self): - uc = Uc(UC_ARCH_X86, UC_MODE_64) - uc.mem_map(CODE_ADDR, 0x1000) - uc.mem_write(CODE_ADDR, CODE) - - counter = HookCounter() - uc.hook_add(UC_HOOK_CODE, counter.bad_code_hook, begin=CODE_ADDR, end=CODE_ADDR + len(CODE)) - uc.hook_add(UC_HOOK_CODE, counter.good_code_hook, begin=CODE_ADDR, end=CODE_ADDR + len(CODE)) - - self.assertRaises(ValueError, uc.emu_start, CODE_ADDR, CODE_ADDR + len(CODE)) - # Make sure hooks calls finish before raising (hook_calls == 2) - self.assertEqual(counter.hook_calls, 2) - - -if __name__ == "__main__": - regress.main() diff --git a/tests/regress/mem_64_c.c b/tests/regress/mem_64_c.c deleted file mode 100644 index 9e84242c..00000000 --- a/tests/regress/mem_64_c.c +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include - -uint64_t starts[] = {0x10000000, 0x110004000ll}; - -int main(int argc, char **argv, char **envp) { - uc_engine *uc; - uc_err err; - int i; - // Initialize emulator in X86-64bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u\n", err); - return 1; - } - - for (i = 0; i < (sizeof(starts) / sizeof(uint64_t)); i++) { - uc_mem_map(uc, starts[i], 4096, UC_PROT_ALL); - } - - uint32_t count; - uc_mem_region *regions; - int err_count = 0; - err = uc_mem_regions(uc, ®ions, &count); - if (err == UC_ERR_OK) { - for (i = 0; i < count; i++) { - fprintf(stderr, "region %d: 0x%"PRIx64"-0x%"PRIx64" (%d)\n", i, regions[i].begin, regions[i].end - 1, regions[i].perms); - if (regions[i].begin != starts[i]) { - err_count++; - fprintf(stderr, " ERROR: region start does not match requested start address, expected 0x%"PRIx64", found 0x%"PRIx64"\n", - starts[i], regions[i].begin); - } - } - uc_free(regions); - } - - uc_close(uc); - return err_count; -} diff --git a/tests/regress/mem_double_unmap.c b/tests/regress/mem_double_unmap.c deleted file mode 100644 index a592f8a3..00000000 --- a/tests/regress/mem_double_unmap.c +++ /dev/null @@ -1,48 +0,0 @@ -#define __STDC_FORMAT_MACROS -#include -#include -#include -#include - -#include - -int main(int argc, char **argv, char **envp) -{ - uc_engine *uc; - uc_err err; - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err) { - printf("not ok - Failed on uc_open() with error returned: %u\n", err); - return -1; - } - - uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); - if (err) { - printf("not ok - Failed on uc_mem_map() with error returned: %u\n", err); - return -1; - } - - uc_mem_map(uc, 0x4000, 0x1000, UC_PROT_ALL); - if (err) { - printf("not ok - Failed on uc_mem_map() with error returned: %u\n", err); - return -1; - } - - err = uc_mem_unmap(uc, 0x4000, 0x1000); - if (err) { - printf("not ok - Failed on uc_mem_unmap() with error returned: %u\n", err); - return -1; - } - - err = uc_mem_unmap(uc, 0x4000, 0x1000); - if (!err) { - printf("not ok - second unmap succeeded\n"); - return -1; - } - - printf("Tests OK\n"); - uc_close(uc); - return 0; -} diff --git a/tests/regress/mem_exec.c b/tests/regress/mem_exec.c deleted file mode 100644 index 3c248ba4..00000000 --- a/tests/regress/mem_exec.c +++ /dev/null @@ -1,278 +0,0 @@ -/* - Executable memory regions demo / unit test - - Copyright(c) 2015 Chris Eagle - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - - */ - -#define __STDC_FORMAT_MACROS -#include -#include -#include -#include - -#include - -unsigned char PROGRAM[] = - "\xeb\x45\x5e\x81\xe6\x00\xf0\xff\xff\x40\x40\x40\x40\x40\x40\x40" - "\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40" - "\x40\x40\x40\x40\x40\x40\x40\x89\xf7\x81\xc7\x00\x00\x10\x00\xb9" - "\x4c\x00\x00\x00\x81\xff\x00\x00\x40\x00\x75\x01\xf4\xf3\xa4\x81" - "\xe7\x00\xf0\xff\xff\xff\xe7\xe8\xb6\xff\xff\xff"; - // total size: 76 bytes - -/* - bits 32 - - ; assumes r-x section at 0x100000 - ; assumes rw- section at 0x200000 - ; assumes r-- section at 0x300000 - ; also needs an initialized stack - -start: -jmp bottom -top: -pop esi -and esi, ~0xfff -times 30 inc eax -mov edi, esi -add edi, 0x100000 -mov ecx, end - start -rep movsb -and edi, ~0xfff -cmp edi, 0x400000 -jnz next_block -hlt -next_block: -jmp edi -bottom: -call top -end: - */ - -int test_num = 0; -uint32_t tests[] = { - 0x41414141, - 0x43434343, - 0x45454545 -}; - -static int log_num = 1; - -#define CODE_SECTION 0x100000 -#define CODE_SIZE 0x1000 - -// callback for tracing instruction -static void hook_code(uc_engine *uc, uint64_t addr, uint32_t size, void *user_data) -{ - uint8_t opcode; - - if (uc_mem_read(uc, addr, &opcode, 1) != UC_ERR_OK) { - printf("not ok %d - uc_mem_read fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - } - - // printf("ok %d - uc_mem_read for opcode at address 0x%" PRIx64 "\n", log_num++, addr); - switch (opcode) { - case 0xf4: //hlt - printf("# Handling HLT\n"); - if (uc_emu_stop(uc) != UC_ERR_OK) { - printf("not ok %d - uc_emu_stop fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - _exit(-1); - } else { - printf("ok %d - hlt encountered, uc_emu_stop called\n", log_num++); - } - break; - default: //all others - // printf("# Handling OTHER\n"); - break; - } -} - -// callback for tracing memory access (READ or WRITE) -static void hook_mem_write(uc_engine *uc, uc_mem_type type, - uint64_t addr, int size, int64_t value, void *user_data) -{ - printf("# write to memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); -} - -// callback for tracing invalid memory access (READ or WRITE) -static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, - uint64_t addr, int size, int64_t value, void *user_data) -{ - switch(type) { - default: - printf("not ok %d - memory invalid type: %d at 0x%" PRIx64 "\n", log_num++, type, addr); - return false; - case UC_MEM_FETCH_PROT: - printf("# Fetch from non-executable memory at 0x%"PRIx64 "\n", addr); - - //make page executable - if (uc_mem_protect(uc, addr & ~0xfffL, 0x1000, UC_PROT_READ | UC_PROT_EXEC) != UC_ERR_OK) { - printf("not ok %d - uc_mem_protect fail for address: 0x%" PRIx64 "\n", log_num++, addr); - } else { - printf("ok %d - uc_mem_protect success at 0x%" PRIx64 "\n", log_num++, addr); - } - return true; - case UC_MEM_WRITE_PROT: - printf("# write to non-writeable memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); - - if (uc_mem_protect(uc, addr & ~0xfffL, 0x1000, UC_PROT_READ | UC_PROT_WRITE) != UC_ERR_OK) { - printf("not ok %d - uc_mem_protect fail during hook_mem_invalid callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - } else { - printf("ok %d - uc_mem_protect success\n", log_num++); - } - return true; - } -} - -int main(int argc, char **argv, char **envp) -{ - uc_engine *uc; - uc_hook trace1, trace2; - uc_err err; - uint32_t esp, eip; - int32_t buf1[1024], buf2[1024], readbuf[1024]; - int i; - - //don't really care about quality of randomness - srand(time(NULL)); - for (i = 0; i < 1024; i++) { - buf1[i] = rand(); - buf2[i] = rand(); - } - - printf("# Memory protect test\n"); - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err) { - printf("not ok %d - Failed on uc_open() with error returned: %u\n", log_num++, err); - return 1; - } else { - printf("ok %d - uc_open() success\n", log_num++); - } - - uc_mem_map(uc, 0x100000, 0x1000, UC_PROT_READ | UC_PROT_EXEC); - uc_mem_map(uc, 0x1ff000, 0x2000, UC_PROT_READ | UC_PROT_WRITE); - uc_mem_map(uc, 0x300000, 0x2000, UC_PROT_READ); - uc_mem_map(uc, 0xf00000, 0x1000, UC_PROT_READ | UC_PROT_WRITE); - - esp = 0xf00000 + 0x1000; - - // Setup stack pointer - if (uc_reg_write(uc, UC_X86_REG_ESP, &esp)) { - printf("not ok %d - Failed to set esp. quit!\n", log_num++); - return 2; - } else { - printf("ok %d - ESP set\n", log_num++); - } - - // fill in sections that shouldn't get touched - if (uc_mem_write(uc, 0x1ff000, buf1, sizeof(buf1))) { - printf("not ok %d - Failed to write random buffer 1 to memory, quit!\n", log_num++); - return 3; - } else { - printf("ok %d - Random buffer 1 written to memory\n", log_num++); - } - - if (uc_mem_write(uc, 0x301000, buf2, sizeof(buf2))) { - printf("not ok %d - Failed to write random buffer 2 to memory, quit!\n", log_num++); - return 4; - } else { - printf("ok %d - Random buffer 2 written to memory\n", log_num++); - } - - // write machine code to be emulated to memory - if (uc_mem_write(uc, 0x100000, PROGRAM, sizeof(PROGRAM))) { - printf("not ok %d - Failed to write emulation code to memory, quit!\n", log_num++); - return 5; - } else { - printf("ok %d - Program written to memory\n", log_num++); - } - - if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install UC_HOOK_CODE ucr\n", log_num++); - return 6; - } else { - printf("ok %d - UC_HOOK_CODE installed\n", log_num++); - } - - // intercept memory write events - if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE, hook_mem_write, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install UC_HOOK_MEM_WRITE ucr\n", log_num++); - return 7; - } else { - printf("ok %d - UC_HOOK_MEM_WRITE installed\n", log_num++); - } - - // intercept invalid memory events - if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE_PROT | UC_HOOK_MEM_FETCH_PROT, hook_mem_invalid, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install memory invalid handler\n", log_num++); - return 8; - } else { - printf("ok %d - memory invalid handler installed\n", log_num++); - } - - // emulate machine code until told to stop by hook_code - printf("# BEGIN execution\n"); - err = uc_emu_start(uc, 0x100000, 0x400000, 0, 0); - if (err != UC_ERR_OK) { - printf("not ok %d - Failure on uc_emu_start() with error %u:%s\n", log_num++, err, uc_strerror(err)); - return 9; - } else { - printf("ok %d - uc_emu_start complete\n", log_num++); - } - printf("# END execution\n"); - - // get ending EIP - if (uc_reg_read(uc, UC_X86_REG_EIP, &eip)) { - printf("not ok %d - Failed to read eip.\n", log_num++); - } else { - printf("ok %d - Ending EIP 0x%x\n", log_num++, eip); - } - - //make sure that random blocks didn't get nuked - // fill in sections that shouldn't get touched - if (uc_mem_read(uc, 0x1ff000, readbuf, sizeof(readbuf))) { - printf("not ok %d - Failed to read random buffer 1 from memory\n", log_num++); - } else { - printf("ok %d - Random buffer 1 read from memory\n", log_num++); - if (memcmp(buf1, readbuf, 4096)) { - printf("not ok %d - Random buffer 1 contents are incorrect\n", log_num++); - } else { - printf("ok %d - Random buffer 1 contents are correct\n", log_num++); - } - } - - if (uc_mem_read(uc, 0x301000, readbuf, sizeof(readbuf))) { - printf("not ok %d - Failed to read random buffer 2 from memory\n", log_num++); - } else { - printf("ok %d - Random buffer 2 read from memory\n", log_num++); - if (memcmp(buf2, readbuf, 4096)) { - printf("not ok %d - Random buffer 2 contents are incorrect\n", log_num++); - } else { - printf("ok %d - Random buffer 2 contents are correct\n", log_num++); - } - } - - if (uc_close(uc) == UC_ERR_OK) { - printf("ok %d - uc_close complete\n", log_num++); - } else { - printf("not ok %d - uc_close complete\n", log_num++); - } - - return 0; -} diff --git a/tests/regress/mem_fuzz.c b/tests/regress/mem_fuzz.c deleted file mode 100644 index bc0c222b..00000000 --- a/tests/regress/mem_fuzz.c +++ /dev/null @@ -1,116 +0,0 @@ -#define __STDC_FORMAT_MACROS -#include -#include -#include -#include - -#include - - -uint64_t baseranges[] = {0,0,0,0}; -int step =0; - -uint64_t urnd(){ - uint64_t rnd = rand(); - rnd = rnd << 32; - rnd += rand(); - return rnd; -} -uint64_t get_addr(){ - uint64_t base = ((uint64_t)urnd())%4; - uint64_t addr= baseranges[base] + urnd()%(4096*10); - return addr; -} - -uint64_t get_aligned_addr(){ - uint64_t addr = get_addr(); - return addr - (addr % 4096); -} - -uint64_t get_len(){ - uint64_t len = (urnd() % (4096*5))+1; - return len; -} - -uint64_t get_aligned_len(){ - uint64_t len = get_len(); - len = len - (len %4096); - len = ((len == 0) ? 4096 : len); - return len; -} - -void perform_map_step(uc_engine *uc){ - uint64_t addr = get_aligned_addr(); - uint64_t len = get_aligned_len(); - printf("map(uc,0x%"PRIx64",0x%"PRIx64"); //%d\n", addr, len, step); - uc_mem_map(uc, addr, len, UC_PROT_READ | UC_PROT_WRITE); -} - -void perform_unmap_step(uc_engine *uc){ - uint64_t addr = get_aligned_addr(); - uint64_t len = get_aligned_len(); - printf("unmap(uc,0x%"PRIx64",0x%"PRIx64"); //%d\n", addr, len, step); - uc_mem_unmap(uc, addr, len); -} - -void perform_write_step(uc_engine *uc){ - char buff[4096*4]; - memset((void *)buff, 0, 4096*4); - uint64_t addr = get_addr(); - uint64_t len = get_len()%(4096*3); - printf("write(uc,0x%"PRIx64",0x%"PRIx64"); //%d\n", addr, len, step); - uc_mem_write(uc, addr, buff, len); -} - -void perform_read_step(uc_engine *uc){ - char buff[4096*4]; - uint64_t addr = get_addr(); - uint64_t len = get_len()%(4096*4); - printf("read(uc,0x%"PRIx64",0x%"PRIx64"); //%d\n", addr, len, step); - uc_mem_read(uc, addr, buff, len); -} - -void perform_fuzz_step(uc_engine *uc){ - switch( ((uint32_t)rand())%4 ){ - case 0: perform_map_step(uc); break; - case 1: perform_unmap_step(uc); break; - case 2: perform_read_step(uc); break; - case 3: perform_write_step(uc); break; - } -} - -int main(int argc, char **argv, char **envp) -{ - uc_engine *uc; - uc_err err; - if(argc<2){ - printf("usage: mem_fuzz $seed\n"); - return 1; - } - int seed = atoi(argv[1]); - int i = 0; - - //don't really care about quality of randomness - srand(seed); - printf("running with seed %d\n",seed); - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u\n", err); - return 1; - } - - for(i = 0; i < 2048; i++){ - step++; - perform_fuzz_step(uc); - } - // fill in sections that shouldn't get touched - - if (uc_close(uc) != UC_ERR_OK) { - printf("Failed on uc_close\n"); - return 1; - } - - return 0; -} diff --git a/tests/regress/mem_map_0x100000000.c b/tests/regress/mem_map_0x100000000.c deleted file mode 100644 index 766577af..00000000 --- a/tests/regress/mem_map_0x100000000.c +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include - -int main() { - uc_engine *u; - uc_err err; - - printf("mem_map_0x100000000.c \n"); - - if ((err = uc_open(UC_ARCH_X86, UC_MODE_32, &u)) != UC_ERR_OK) { - printf("uc_open() failed: %s\n", uc_strerror(err)); - return -1; - } - - if ((err = uc_mem_map(u, 0x100000000, 0x002c0000, UC_PROT_ALL)) != UC_ERR_OK) { - printf("uc_mem_map() failed: %s\n", uc_strerror(err)); - return -1; - } - if ((err = uc_mem_map(u, 0x0018D000, 0x00006000, UC_PROT_ALL)) != UC_ERR_OK) { - printf("uc_mem_map() failed: %s\n", uc_strerror(err)); - return -1; - } - - if ((err = uc_close(u)) != UC_ERR_OK) { - printf("uc_close() failed: %s\n", uc_strerror(err)); - return -1; - } - - printf("Success.\n"); - return 0; -} diff --git a/tests/regress/mem_map_large.c b/tests/regress/mem_map_large.c deleted file mode 100644 index f0f4cdbf..00000000 --- a/tests/regress/mem_map_large.c +++ /dev/null @@ -1,17 +0,0 @@ -#include -#include - -int main() { - uc_engine *u; - uc_err err; - if ((err = uc_open(UC_ARCH_X86, UC_MODE_32, &u)) != UC_ERR_OK) { - printf("uc_open() failed: %s\n", uc_strerror(err)); - } - printf("Trying large map.\n"); - if ((err = uc_mem_map(u, 0x60802000, (unsigned) 0x28bd211200004000, UC_PROT_ALL)) != UC_ERR_OK) { - printf("uc_mem_map() failed: %s\n", uc_strerror(err)); - return -1; - } - printf("Success.\n"); - return 0; -} diff --git a/tests/regress/mem_nofree.c b/tests/regress/mem_nofree.c deleted file mode 100644 index 0a23091c..00000000 --- a/tests/regress/mem_nofree.c +++ /dev/null @@ -1,70 +0,0 @@ -#include -#include - -#include - -#define ADDRESS1 0x10000000 -#define ADDRESS2 0x20000000 -#define SIZE (80 * 1024 * 1024) - -static void VM_exec() -{ - int c; - uc_engine *uc; - uc_err err; - - // Initialize emulator in X86-64bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if(err) - { - printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); - return; - } - -repeat: - err = uc_mem_map(uc, ADDRESS1, SIZE, UC_PROT_ALL); - if(err != UC_ERR_OK) - { - printf("Failed to map memory %s\n", uc_strerror(err)); - goto err; - } - - err = uc_mem_map(uc, ADDRESS2, SIZE, UC_PROT_ALL); - if(err != UC_ERR_OK) - { - printf("Failed to map memory %s\n", uc_strerror(err)); - goto err; - } - - err = uc_mem_unmap(uc, ADDRESS1, SIZE); - if(err != UC_ERR_OK) - { - printf("Failed to unmap memory %s\n", uc_strerror(err)); - goto err; - } - - err = uc_mem_unmap(uc, ADDRESS2, SIZE); - if(err != UC_ERR_OK) - { - printf("Failed to unmap memory %s\n", uc_strerror(err)); - goto err; - } - - for(;;) - { - c = getchar(); //pause here and analyse memory usage before exiting with a program like VMMap; - if(c != 'e') - goto repeat; - else - break; - } - -err: - uc_close(uc); -} - -int main(int argc, char *argv[]) -{ - VM_exec(); - return 0; -} diff --git a/tests/regress/mem_protect.c b/tests/regress/mem_protect.c deleted file mode 100644 index 78eb360c..00000000 --- a/tests/regress/mem_protect.c +++ /dev/null @@ -1,300 +0,0 @@ -/* - uc_mem_protect demo / unit test - - Copyright(c) 2015 Chris Eagle - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - - */ - -#define __STDC_FORMAT_MACROS -#include -#include -#include -#include - -#include - -unsigned char PROGRAM[] = - "\xc7\x05\x00\x00\x20\x00\x41\x41\x41\x41\x90\xc7\x05\x00\x00\x20" - "\x00\x42\x42\x42\x42\xc7\x05\x00\x00\x30\x00\x43\x43\x43\x43\x90" - "\xc7\x05\x00\x00\x30\x00\x44\x44\x44\x44\xc7\x05\x00\x00\x40\x00" - "\x45\x45\x45\x45\x90\xc7\x05\x00\x00\x40\x00\x46\x46\x46\x46\xc7" - "\x05\x00\xf8\x3f\x00\x47\x47\x47\x47\xc7\x05\x00\x18\x40\x00\x48" - "\x48\x48\x48\xf4"; - // total size: 84 bytes - -/* - bits 32 - - ; assumes code section at 0x100000 - ; assumes data section at 0x200000, initially rw - ; assumes data section at 0x300000, initially rw - ; assumes data section at 0x400000, initially rw - - ; with installed hooks unmaps or maps on each nop - - mov dword [0x200000], 0x41414141 - nop ; mark it RO - mov dword [0x200000], 0x42424242 - - mov dword [0x300000], 0x43434343 - nop ; mark it RO - mov dword [0x300000], 0x44444444 - - mov dword [0x400000], 0x45454545 - nop ; mark it RO - mov dword [0x400000], 0x46464646 - mov dword [0x3ff800], 0x47474747 ; make sure surrounding areas remained RW - mov dword [0x401800], 0x48484848 ; make sure surrounding areas remained RW - - hlt ; tell hook function we are done - */ - -int test_num = 0; -uint32_t tests[] = { - 0x41414141, - 0x43434343, - 0x45454545 -}; - -static int log_num = 1; - -#define CODE_SECTION 0x100000 -#define CODE_SIZE 0x1000 - -// callback for tracing instruction -static void hook_code(uc_engine *uc, uint64_t addr, uint32_t size, void *user_data) -{ - uint8_t opcode; - uint32_t testval; - if (uc_mem_read(uc, addr, &opcode, 1) != UC_ERR_OK) { - printf("not ok %d - uc_mem_read fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - } - printf("ok %d - uc_mem_read for opcode at address 0x%" PRIx64 "\n", log_num++, addr); - switch (opcode) { - case 0x90: //nop - printf("# Handling NOP\n"); - if (uc_mem_read(uc, 0x200000 + test_num * 0x100000, &testval, sizeof(testval)) != UC_ERR_OK) { - printf("not ok %d - uc_mem_read fail for address: 0x%x\n", log_num++, 0x200000 + test_num * 0x100000); - } else { - printf("ok %d - good uc_mem_read for address: 0x%x\n", log_num++, 0x200000 + test_num * 0x100000); - printf("# uc_mem_read for test %d\n", test_num); - - if (testval == tests[test_num]) { - printf("ok %d - passed test %d\n", log_num++, test_num); - } else { - printf("not ok %d - failed test %d\n", log_num++, test_num); - printf("# Expected: 0x%x\n",tests[test_num]); - printf("# Received: 0x%x\n", testval); - } - } - if (uc_mem_protect(uc, 0x200000 + test_num * 0x100000, 0x1000, UC_PROT_READ) != UC_ERR_OK) { - printf("not ok %d - uc_mem_protect fail during hook_code callback, addr: 0x%x\n", log_num++, 0x200000 + test_num * 0x100000); - } else { - printf("ok %d - uc_mem_protect success\n", log_num++); - } - test_num++; - break; - case 0xf4: //hlt - printf("# Handling HLT\n"); - if (uc_emu_stop(uc) != UC_ERR_OK) { - printf("not ok %d - uc_emu_stop fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - _exit(-1); - } else { - printf("ok %d - hlt encountered, uc_emu_stop called\n", log_num++); - } - break; - default: //all others - printf("# Handling OTHER\n"); - break; - } -} - -// callback for tracing memory access (READ or WRITE) -static void hook_mem_write(uc_engine *uc, uc_mem_type type, - uint64_t addr, int size, int64_t value, void *user_data) -{ - printf("# write to memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); -} - -// callback for tracing invalid memory access (READ or WRITE) -static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, - uint64_t addr, int size, int64_t value, void *user_data) -{ - uint32_t testval; - switch(type) { - default: - printf("not ok %d - memory invalid type: %d at 0x%" PRIx64 "\n", log_num++, type, addr); - return false; - case UC_MEM_WRITE_PROT: - printf("# write to non-writeable memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); - - if (uc_mem_read(uc, addr, &testval, sizeof(testval)) != UC_ERR_OK) { - printf("not ok %d - uc_mem_read fail for address: 0x%" PRIx64 "\n", log_num++, addr); - } else { - printf("ok %d - uc_mem_read success after mem_protect at test %d\n", log_num++, test_num - 1); - } - - if (uc_mem_protect(uc, addr & ~0xfffL, 0x1000, UC_PROT_READ | UC_PROT_WRITE) != UC_ERR_OK) { - printf("not ok %d - uc_mem_protect fail during hook_mem_invalid callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - } else { - printf("ok %d - uc_mem_protect success\n", log_num++); - } - return true; - } -} - -int main(int argc, char **argv, char **envp) -{ - uc_engine *uc; - uc_hook trace1, trace2; - uc_err err; - uint32_t addr, testval; - int32_t buf1[1024], buf2[1024], readbuf[1024]; - int i; - - //don't really care about quality of randomness - srand(time(NULL)); - for (i = 0; i < 1024; i++) { - buf1[i] = rand(); - buf2[i] = rand(); - } - - printf("# Memory protect test\n"); - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err) { - printf("not ok %d - Failed on uc_open() with error returned: %u\n", log_num++, err); - return 1; - } else { - printf("ok %d - uc_open() success\n", log_num++); - } - - uc_mem_map(uc, CODE_SECTION, CODE_SIZE, UC_PROT_READ | UC_PROT_EXEC); - uc_mem_map(uc, 0x200000, 0x1000, UC_PROT_READ | UC_PROT_WRITE); - uc_mem_map(uc, 0x300000, 0x1000, UC_PROT_READ | UC_PROT_WRITE); - uc_mem_map(uc, 0x3ff000, 0x3000, UC_PROT_READ | UC_PROT_WRITE); - - // fill in sections that shouldn't get touched - if (uc_mem_write(uc, 0x3ff000, buf1, sizeof(buf1))) { - printf("not ok %d - Failed to write random buffer 1 to memory, quit!\n", log_num++); - return 2; - } else { - printf("ok %d - Random buffer 1 written to memory\n", log_num++); - } - - if (uc_mem_write(uc, 0x401000, buf2, sizeof(buf2))) { - printf("not ok %d - Failed to write random buffer 2 to memory, quit!\n", log_num++); - return 3; - } else { - printf("ok %d - Random buffer 2 written to memory\n", log_num++); - } - - // write machine code to be emulated to memory - if (uc_mem_write(uc, CODE_SECTION, PROGRAM, sizeof(PROGRAM))) { - printf("not ok %d - Failed to write emulation code to memory, quit!\n", log_num++); - return 4; - } else { - printf("ok %d - Program written to memory\n", log_num++); - } - - if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install UC_HOOK_CODE ucr\n", log_num++); - return 5; - } else { - printf("ok %d - UC_HOOK_CODE installed\n", log_num++); - } - - // intercept memory write events - if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE, hook_mem_write, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install UC_HOOK_MEM_WRITE ucr\n", log_num++); - return 6; - } else { - printf("ok %d - UC_HOOK_MEM_WRITE installed\n", log_num++); - } - - // intercept invalid memory events - if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE_PROT, hook_mem_invalid, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install memory invalid handler\n", log_num++); - return 7; - } else { - printf("ok %d - memory invalid handler installed\n", log_num++); - } - - // emulate machine code until told to stop by hook_code - printf("# BEGIN execution\n"); - err = uc_emu_start(uc, CODE_SECTION, CODE_SECTION + CODE_SIZE, 0, 0); - if (err != UC_ERR_OK) { - printf("not ok %d - Failure on uc_emu_start() with error %u:%s\n", log_num++, err, uc_strerror(err)); - return 8; - } else { - printf("ok %d - uc_emu_start complete\n", log_num++); - } - printf("# END execution\n"); - - //read from the remapped memory - testval = 0x42424242; - for (addr = 0x200000; addr <= 0x400000; addr += 0x100000) { - uint32_t val; - if (uc_mem_read(uc, addr, &val, sizeof(val)) != UC_ERR_OK) { - printf("not ok %d - Failed uc_mem_read for address 0x%x\n", log_num++, addr); - } else { - printf("ok %d - Good uc_mem_read from 0x%x\n", log_num++, addr); - } - if (val != testval) { - printf("not ok %d - Read 0x%x, expected 0x%x\n", log_num++, val, testval); - } else { - printf("ok %d - Correct value retrieved\n", log_num++); - } - testval += 0x02020202; - } - - //account for the two mods made by the machine code - buf1[512] = 0x47474747; - buf2[512] = 0x48484848; - - //make sure that random blocks didn't get nuked - // fill in sections that shouldn't get touched - if (uc_mem_read(uc, 0x3ff000, readbuf, sizeof(readbuf))) { - printf("not ok %d - Failed to read random buffer 1 from memory\n", log_num++); - } else { - printf("ok %d - Random buffer 1 read from memory\n", log_num++); - if (memcmp(buf1, readbuf, 4096)) { - printf("not ok %d - Random buffer 1 contents are incorrect\n", log_num++); - } else { - printf("ok %d - Random buffer 1 contents are correct\n", log_num++); - } - } - - if (uc_mem_read(uc, 0x401000, readbuf, sizeof(readbuf))) { - printf("not ok %d - Failed to read random buffer 2 from memory\n", log_num++); - } else { - printf("ok %d - Random buffer 2 read from memory\n", log_num++); - if (memcmp(buf2, readbuf, 4096)) { - printf("not ok %d - Random buffer 2 contents are incorrect\n", log_num++); - } else { - printf("ok %d - Random buffer 2 contents are correct\n", log_num++); - } - } - - if (uc_close(uc) == UC_ERR_OK) { - printf("ok %d - uc_close complete\n", log_num++); - } else { - printf("not ok %d - uc_close complete\n", log_num++); - } - - return 0; -} diff --git a/tests/regress/mem_unmap.c b/tests/regress/mem_unmap.c deleted file mode 100644 index f2447e55..00000000 --- a/tests/regress/mem_unmap.c +++ /dev/null @@ -1,291 +0,0 @@ -/* - - uc_mem_unmap demo / unit test - - Copyright(c) 2015 Chris Eagle - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - - */ - -#define __STDC_FORMAT_MACROS -#include -#include -#include -#include - -#include - -unsigned char PROGRAM[] = - "\xc7\x05\x00\x00\x20\x00\x41\x41\x41\x41\x90\xc7\x05\x00\x00\x20" - "\x00\x42\x42\x42\x42\xc7\x05\x00\x00\x30\x00\x43\x43\x43\x43\x90" - "\xc7\x05\x00\x00\x30\x00\x44\x44\x44\x44\xc7\x05\x00\x00\x40\x00" - "\x45\x45\x45\x45\x90\xc7\x05\x00\x00\x40\x00\x46\x46\x46\x46\xf4"; - // total size: 64 bytes - -/* - ; assumes code section at 0x100000 - ; assumes data section at 0x200000, initially rw - ; assumes data section at 0x300000, initially rw - ; assumes data section at 0x400000, initially rw - - ; with installed hooks unmaps or maps on each nop - - mov dword [0x200000], 0x41414141 - nop ; unmap it - mov dword [0x200000], 0x42424242 - - mov dword [0x300000], 0x43434343 - nop ; unmap it - mov dword [0x300000], 0x44444444 - - mov dword [0x400000], 0x45454545 - nop ; unmap it - mov dword [0x400000], 0x46464646 - - hlt ; tell hook function we are done - */ - -int test_num = 0; -uint32_t tests[] = { - 0x41414141, - 0x43434343, - 0x45454545 -}; - -static int log_num = 1; - -#define CODE_SECTION 0x100000 -#define CODE_SIZE 0x1000 - -// callback for tracing instruction -static void hook_code(uc_engine *uc, uint64_t addr, uint32_t size, void *user_data) -{ - uint8_t opcode; - uint32_t testval; - if (uc_mem_read(uc, addr, &opcode, 1) != UC_ERR_OK) { - printf("not ok %d - uc_mem_read fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - } - printf("ok %d - uc_mem_read for opcode at address 0x%" PRIx64 "\n", log_num++, addr); - switch (opcode) { - case 0x90: //nop - printf("# Handling NOP\n"); - if (uc_mem_read(uc, 0x200000 + test_num * 0x100000, &testval, sizeof(testval)) != UC_ERR_OK) { - printf("not ok %d - uc_mem_read fail for address: 0x%x\n", log_num++, 0x200000 + test_num * 0x100000); - } else { - printf("ok %d - good uc_mem_read for address: 0x%x\n", log_num++, 0x200000 + test_num * 0x100000); - printf("# uc_mem_read for test %d\n", test_num); - - if (testval == tests[test_num]) { - printf("ok %d - passed test %d\n", log_num++, test_num); - } else { - printf("not ok %d - failed test %d\n", log_num++, test_num); - printf("# Expected: 0x%x\n",tests[test_num]); - printf("# Received: 0x%x\n", testval); - } - } - if (uc_mem_unmap(uc, 0x200000 + test_num * 0x100000, 0x1000) != UC_ERR_OK) { - printf("not ok %d - uc_mem_unmap fail during hook_code callback, addr: 0x%x\n", log_num++, 0x200000 + test_num * 0x100000); - } else { - printf("ok %d - uc_mem_unmap success\n", log_num++); - } - test_num++; - break; - case 0xf4: //hlt - printf("# Handling HLT\n"); - if (uc_emu_stop(uc) != UC_ERR_OK) { - printf("not ok %d - uc_emu_stop fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - _exit(-1); - } else { - printf("ok %d - hlt encountered, uc_emu_stop called\n", log_num++); - } - break; - default: //all others - printf("# Handling OTHER\n"); - break; - } -} - -// callback for tracing memory access (READ or WRITE) -static void hook_mem_write(uc_engine *uc, uc_mem_type type, - uint64_t addr, int size, int64_t value, void *user_data) -{ - printf("# write to memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); -} - -// callback for tracing invalid memory access (READ or WRITE) -static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, - uint64_t addr, int size, int64_t value, void *user_data) -{ - uint32_t testval; - switch(type) { - default: - printf("not ok %d - memory invalid type: %d at 0x%" PRIx64 "\n", log_num++, type, addr); - return false; - case UC_MEM_WRITE_UNMAPPED: - printf("# write to invalid memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); - - if (uc_mem_read(uc, addr, &testval, sizeof(testval)) != UC_ERR_OK) { - printf("ok %d - uc_mem_read fail for address: 0x%" PRIx64 "\n", log_num++, addr); - } else { - printf("not ok %d - uc_mem_read success after unmap at test %d\n", log_num++, test_num - 1); - } - - if (uc_mem_map(uc, addr & ~0xfffL, 0x1000, UC_PROT_READ | UC_PROT_WRITE) != UC_ERR_OK) { - printf("not ok %d - uc_mem_map fail during hook_mem_invalid callback, addr: 0x%" PRIx64 "\n", log_num++, addr); - } else { - printf("ok %d - uc_mem_map success\n", log_num++); - } - return true; - } -} - -int main(int argc, char **argv, char **envp) -{ - uc_engine *uc; - uc_hook trace1, trace2; - uc_err err; - uint32_t addr, testval; - int32_t buf1[1024], buf2[1024], readbuf[1024]; - int i; - - //don't really care about quality of randomness - srand(time(NULL)); - for (i = 0; i < 1024; i++) { - buf1[i] = rand(); - buf2[i] = rand(); - } - - printf("# Memory unmapping test\n"); - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err) { - printf("not ok %d - Failed on uc_open() with error returned: %u\n", log_num++, err); - return 1; - } else { - printf("ok %d - uc_open() success\n", log_num++); - } - - uc_mem_map(uc, CODE_SECTION, CODE_SIZE, UC_PROT_READ | UC_PROT_EXEC); - uc_mem_map(uc, 0x200000, 0x1000, UC_PROT_READ | UC_PROT_WRITE); - uc_mem_map(uc, 0x300000, 0x1000, UC_PROT_READ | UC_PROT_WRITE); - uc_mem_map(uc, 0x3ff000, 0x3000, UC_PROT_READ | UC_PROT_WRITE); - - // fill in sections that shouldn't get touched - if (uc_mem_write(uc, 0x3ff000, buf1, sizeof(buf1))) { - printf("not ok %d - Failed to write random buffer 1 to memory, quit!\n", log_num++); - return 2; - } else { - printf("ok %d - Random buffer 1 written to memory\n", log_num++); - } - - if (uc_mem_write(uc, 0x401000, buf2, sizeof(buf1))) { - printf("not ok %d - Failed to write random buffer 2 to memory, quit!\n", log_num++); - return 3; - } else { - printf("ok %d - Random buffer 2 written to memory\n", log_num++); - } - - // write machine code to be emulated to memory - if (uc_mem_write(uc, CODE_SECTION, PROGRAM, sizeof(PROGRAM))) { - printf("not ok %d - Failed to write emulation code to memory, quit!\n", log_num++); - return 4; - } else { - printf("ok %d - Program written to memory\n", log_num++); - } - - if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install UC_HOOK_CODE ucr\n", log_num++); - return 5; - } else { - printf("ok %d - UC_HOOK_CODE installed\n", log_num++); - } - - // intercept memory write events - if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE, hook_mem_write, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install UC_HOOK_MEM_WRITE ucr\n", log_num++); - return 6; - } else { - printf("ok %d - UC_HOOK_MEM_WRITE installed\n", log_num++); - } - - // intercept invalid memory events - if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE_UNMAPPED, hook_mem_invalid, NULL, 1, 0) != UC_ERR_OK) { - printf("not ok %d - Failed to install memory invalid handler\n", log_num++); - return 7; - } else { - printf("ok %d - memory invalid handler installed\n", log_num++); - } - - // emulate machine code until told to stop by hook_code - printf("# BEGIN execution\n"); - err = uc_emu_start(uc, CODE_SECTION, CODE_SECTION + CODE_SIZE, 0, 0); - if (err != UC_ERR_OK) { - printf("not ok %d - Failure on uc_emu_start() with error %u:%s\n", log_num++, err, uc_strerror(err)); - return 8; - } else { - printf("ok %d - uc_emu_start complete\n", log_num++); - } - printf("# END execution\n"); - - //read from the remapped memory - testval = 0x42424242; - for (addr = 0x200000; addr <= 0x400000; addr += 0x100000) { - uint32_t val; - if (uc_mem_read(uc, addr, &val, sizeof(val)) != UC_ERR_OK) { - printf("not ok %d - Failed uc_mem_read for address 0x%x\n", log_num++, addr); - } else { - printf("ok %d - Good uc_mem_read from 0x%x\n", log_num++, addr); - } - if (val != testval) { - printf("not ok %d - Read 0x%x, expected 0x%x\n", log_num++, val, testval); - } else { - printf("ok %d - Correct value retrieved\n", log_num++); - } - testval += 0x02020202; - } - - //make sure that random blocks didn't get nuked - // fill in sections that shouldn't get touched - if (uc_mem_read(uc, 0x3ff000, readbuf, sizeof(readbuf))) { - printf("not ok %d - Failed to read random buffer 1 from memory\n", log_num++); - } else { - printf("ok %d - Random buffer 1 read from memory\n", log_num++); - if (memcmp(buf1, readbuf, 4096)) { - printf("not ok %d - Random buffer 1 contents are incorrect\n", log_num++); - } else { - printf("ok %d - Random buffer 1 contents are correct\n", log_num++); - } - } - - if (uc_mem_read(uc, 0x401000, readbuf, sizeof(readbuf))) { - printf("not ok %d - Failed to read random buffer 2 from memory\n", log_num++); - } else { - printf("ok %d - Random buffer 2 read from memory\n", log_num++); - if (memcmp(buf2, readbuf, 4096)) { - printf("not ok %d - Random buffer 2 contents are incorrect\n", log_num++); - } else { - printf("ok %d - Random buffer 2 contents are correct\n", log_num++); - } - } - - if (uc_close(uc) == UC_ERR_OK) { - printf("ok %d - uc_close complete\n", log_num++); - } else { - printf("not ok %d - uc_close complete\n", log_num++); - } - - return 0; -} diff --git a/tests/regress/memleak_arm.c b/tests/regress/memleak_arm.c deleted file mode 100644 index 0e2e9aa9..00000000 --- a/tests/regress/memleak_arm.c +++ /dev/null @@ -1,176 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -/* Sample code to demonstrate how to emulate ARM code */ - -// windows specific -#ifdef _MSC_VER -#include -#include -#define PRIx64 "llX" -#ifdef DYNLOAD -#include "unicorn_dynload.h" -#else // DYNLOAD -#include -#ifdef _WIN64 -#pragma comment(lib, "unicorn_staload64.lib") -#else // _WIN64 -#pragma comment(lib, "unicorn_staload.lib") -#endif // _WIN64 -#endif // DYNLOAD - -// posix specific -#else // _MSC_VER -#include -#endif // _MSC_VER - - -// code to be emulated -#define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, r2, r3 -#define THUMB_CODE "\x83\xb0" // sub sp, #0xc - -// memory address where emulation starts -#define ADDRESS 0x10000 - -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); -} - -static void test_arm(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int r0 = 0x1234; // R0 register - int r2 = 0x6789; // R1 register - int r3 = 0x3333; // R2 register - int r1; // R1 register - - printf("Emulate ARM code\n"); - - // Initialize emulator in ARM mode - err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_ARM_REG_R0, &r0); - uc_reg_write(uc, UC_ARM_REG_R2, &r2); - uc_reg_write(uc, UC_ARM_REG_R3, &r3); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing one instruction at ADDRESS with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)ADDRESS, (uint64_t)ADDRESS); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u\n", err); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_ARM_REG_R0, &r0); - uc_reg_read(uc, UC_ARM_REG_R1, &r1); - printf(">>> R0 = 0x%x\n", r0); - printf(">>> R1 = 0x%x\n", r1); - - uc_close(uc); -} - -static void test_thumb(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int sp = 0x1234; // R0 register - - printf("Emulate THUMB code\n"); - - // Initialize emulator in ARM mode - err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, THUMB_CODE, sizeof(THUMB_CODE) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_ARM_REG_SP, &sp); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing one instruction at ADDRESS with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)ADDRESS, (uint64_t)ADDRESS); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(THUMB_CODE) -1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u\n", err); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_ARM_REG_SP, &sp); - printf(">>> SP = 0x%x\n", sp); - - uc_close(uc); -} - -int main(int argc, char **argv, char **envp) -{ - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - // test memleak - while(1) { - test_arm(); - printf("==========================\n"); - test_thumb(); - } - - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - - return 0; -} diff --git a/tests/regress/memleak_arm64.c b/tests/regress/memleak_arm64.c deleted file mode 100644 index 1ce62771..00000000 --- a/tests/regress/memleak_arm64.c +++ /dev/null @@ -1,120 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -/* Sample code to demonstrate how to emulate ARM64 code */ - -// windows specific -#ifdef _MSC_VER -#include -#include -#define PRIx64 "llX" -#ifdef DYNLOAD -#include "unicorn_dynload.h" -#else // DYNLOAD -#include -#ifdef _WIN64 -#pragma comment(lib, "unicorn_staload64.lib") -#else // _WIN64 -#pragma comment(lib, "unicorn_staload.lib") -#endif // _WIN64 -#endif // DYNLOAD - -// posix specific -#else // _MSC_VER -#include -#endif // _MSC_VER - - -// code to be emulated -#define ARM_CODE "\xab\x01\x0f\x8b" // add x11, x13, x15 - -// memory address where emulation starts -#define ADDRESS 0x10000 - -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); -} - -static void test_arm64(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int64_t x11 = 0x1234; // X11 register - int64_t x13 = 0x6789; // X13 register - int64_t x15 = 0x3333; // X15 register - - printf("Emulate ARM64 code\n"); - - // Initialize emulator in ARM mode - err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_ARM64_REG_X11, &x11); - uc_reg_write(uc, UC_ARM64_REG_X13, &x13); - uc_reg_write(uc, UC_ARM64_REG_X15, &x15); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing one instruction at ADDRESS with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)ADDRESS, (uint64_t)ADDRESS); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u\n", err); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_ARM64_REG_X11, &x11); - printf(">>> X11 = 0x%" PRIx64 "\n", x11); - - uc_close(uc); -} - -int main(int argc, char **argv, char **envp) -{ - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - while(1) { - test_arm64(); - } - - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - - return 0; -} diff --git a/tests/regress/memleak_m68k.c b/tests/regress/memleak_m68k.c deleted file mode 100644 index e8cbb7cd..00000000 --- a/tests/regress/memleak_m68k.c +++ /dev/null @@ -1,183 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Loi Anh Tuan, 2015 */ - -/* Sample code to demonstrate how to emulate m68k code */ - -// windows specific -#ifdef _MSC_VER -#include -#include -#define PRIx64 "llX" -#ifdef DYNLOAD -#include "unicorn_dynload.h" -#else // DYNLOAD -#include -#ifdef _WIN64 -#pragma comment(lib, "unicorn_staload64.lib") -#else // _WIN64 -#pragma comment(lib, "unicorn_staload.lib") -#endif // _WIN64 -#endif // DYNLOAD - -// posix specific -#else // _MSC_VER -#include -#endif // _MSC_VER - -// code to be emulated -#define M68K_CODE "\x76\xed" // movq #-19, %d3 - -// memory address where emulation starts -#define ADDRESS 0x10000 - -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); -} - -static void test_m68k(void) -{ - uc_engine *uc; - uc_hook trace1, trace2; - uc_err err; - - int d0 = 0x0000; // d0 data register - int d1 = 0x0000; // d1 data register - int d2 = 0x0000; // d2 data register - int d3 = 0x0000; // d3 data register - int d4 = 0x0000; // d4 data register - int d5 = 0x0000; // d5 data register - int d6 = 0x0000; // d6 data register - int d7 = 0x0000; // d7 data register - - int a0 = 0x0000; // a0 address register - int a1 = 0x0000; // a1 address register - int a2 = 0x0000; // a2 address register - int a3 = 0x0000; // a3 address register - int a4 = 0x0000; // a4 address register - int a5 = 0x0000; // a5 address register - int a6 = 0x0000; // a6 address register - int a7 = 0x0000; // a6 address register - - int pc = 0x0000; // program counter - int sr = 0x0000; // status register - - printf("Emulate M68K code\n"); - - // Initialize emulator in M68K mode - err = uc_open(UC_ARCH_M68K, UC_MODE_BIG_ENDIAN, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, M68K_CODE, sizeof(M68K_CODE) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_M68K_REG_D0, &d0); - uc_reg_write(uc, UC_M68K_REG_D1, &d1); - uc_reg_write(uc, UC_M68K_REG_D2, &d2); - uc_reg_write(uc, UC_M68K_REG_D3, &d3); - uc_reg_write(uc, UC_M68K_REG_D4, &d4); - uc_reg_write(uc, UC_M68K_REG_D5, &d5); - uc_reg_write(uc, UC_M68K_REG_D6, &d6); - uc_reg_write(uc, UC_M68K_REG_D7, &d7); - - uc_reg_write(uc, UC_M68K_REG_A0, &a0); - uc_reg_write(uc, UC_M68K_REG_A1, &a1); - uc_reg_write(uc, UC_M68K_REG_A2, &a2); - uc_reg_write(uc, UC_M68K_REG_A3, &a3); - uc_reg_write(uc, UC_M68K_REG_A4, &a4); - uc_reg_write(uc, UC_M68K_REG_A5, &a5); - uc_reg_write(uc, UC_M68K_REG_A6, &a6); - uc_reg_write(uc, UC_M68K_REG_A7, &a7); - - uc_reg_write(uc, UC_M68K_REG_PC, &pc); - uc_reg_write(uc, UC_M68K_REG_SR, &sr); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing all instruction - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)1, (uint64_t)0); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(M68K_CODE)-1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u\n", err); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_M68K_REG_D0, &d0); - uc_reg_read(uc, UC_M68K_REG_D1, &d1); - uc_reg_read(uc, UC_M68K_REG_D2, &d2); - uc_reg_read(uc, UC_M68K_REG_D3, &d3); - uc_reg_read(uc, UC_M68K_REG_D4, &d4); - uc_reg_read(uc, UC_M68K_REG_D5, &d5); - uc_reg_read(uc, UC_M68K_REG_D6, &d6); - uc_reg_read(uc, UC_M68K_REG_D7, &d7); - - uc_reg_read(uc, UC_M68K_REG_A0, &a0); - uc_reg_read(uc, UC_M68K_REG_A1, &a1); - uc_reg_read(uc, UC_M68K_REG_A2, &a2); - uc_reg_read(uc, UC_M68K_REG_A3, &a3); - uc_reg_read(uc, UC_M68K_REG_A4, &a4); - uc_reg_read(uc, UC_M68K_REG_A5, &a5); - uc_reg_read(uc, UC_M68K_REG_A6, &a6); - uc_reg_read(uc, UC_M68K_REG_A7, &a7); - - uc_reg_read(uc, UC_M68K_REG_PC, &pc); - uc_reg_read(uc, UC_M68K_REG_SR, &sr); - - printf(">>> A0 = 0x%x\t\t>>> D0 = 0x%x\n", a0, d0); - printf(">>> A1 = 0x%x\t\t>>> D1 = 0x%x\n", a1, d1); - printf(">>> A2 = 0x%x\t\t>>> D2 = 0x%x\n", a2, d2); - printf(">>> A3 = 0x%x\t\t>>> D3 = 0x%x\n", a3, d3); - printf(">>> A4 = 0x%x\t\t>>> D4 = 0x%x\n", a4, d4); - printf(">>> A5 = 0x%x\t\t>>> D5 = 0x%x\n", a5, d5); - printf(">>> A6 = 0x%x\t\t>>> D6 = 0x%x\n", a6, d6); - printf(">>> A7 = 0x%x\t\t>>> D7 = 0x%x\n", a7, d7); - printf(">>> PC = 0x%x\n", pc); - printf(">>> SR = 0x%x\n", sr); - - uc_close(uc); -} - -int main(int argc, char **argv, char **envp) -{ - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - // test memleak - while(1) { - test_m68k(); - } - - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - - return 0; -} diff --git a/tests/regress/memleak_mips.c b/tests/regress/memleak_mips.c deleted file mode 100644 index d0244bbe..00000000 --- a/tests/regress/memleak_mips.c +++ /dev/null @@ -1,169 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -/* Sample code to demonstrate how to emulate Mips code (big endian) */ - -// windows specific -#ifdef _MSC_VER -#include -#include -#define PRIx64 "llX" -#ifdef DYNLOAD -#include "unicorn_dynload.h" -#else // DYNLOAD -#include -#ifdef _WIN64 -#pragma comment(lib, "unicorn_staload64.lib") -#else // _WIN64 -#pragma comment(lib, "unicorn_staload.lib") -#endif // _WIN64 -#endif // DYNLOAD - -// posix specific -#else // _MSC_VER -#include -#endif // _MSC_VER - - -// code to be emulated -#define MIPS_CODE_EB "\x34\x21\x34\x56" // ori $at, $at, 0x3456; -#define MIPS_CODE_EL "\x56\x34\x21\x34" // ori $at, $at, 0x3456; - -// memory address where emulation starts -#define ADDRESS 0x10000 - -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); -} - -static void test_mips_eb(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int r1 = 0x6789; // R1 register - - printf("Emulate MIPS code (big-endian)\n"); - - // Initialize emulator in MIPS mode - err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, MIPS_CODE_EB, sizeof(MIPS_CODE_EB) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_MIPS_REG_1, &r1); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing one instruction at ADDRESS with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)ADDRESS, (uint64_t)ADDRESS); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(MIPS_CODE_EB) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err)); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_MIPS_REG_1, &r1); - printf(">>> R1 = 0x%x\n", r1); - - uc_close(uc); -} - -static void test_mips_el(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int r1 = 0x6789; // R1 register - - printf("===========================\n"); - printf("Emulate MIPS code (little-endian)\n"); - - // Initialize emulator in MIPS mode - err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, MIPS_CODE_EL, sizeof(MIPS_CODE_EL) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_MIPS_REG_1, &r1); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing one instruction at ADDRESS with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)ADDRESS, (uint64_t)ADDRESS); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(MIPS_CODE_EL) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err)); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_MIPS_REG_1, &r1); - printf(">>> R1 = 0x%x\n", r1); - - uc_close(uc); -} - -int main(int argc, char **argv, char **envp) -{ - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - // test memleak - while(1) { - test_mips_eb(); - test_mips_el(); - } - - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - - return 0; -} diff --git a/tests/regress/memleak_sparc.c b/tests/regress/memleak_sparc.c deleted file mode 100644 index b3dd81e7..00000000 --- a/tests/regress/memleak_sparc.c +++ /dev/null @@ -1,123 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh, 2015 */ - -/* Sample code to demonstrate how to emulate Sparc code */ - -// windows specific -#ifdef _MSC_VER -#include -#include -#define PRIx64 "llX" -#ifdef DYNLOAD -#include "unicorn_dynload.h" -#else // DYNLOAD -#include -#ifdef _WIN64 -#pragma comment(lib, "unicorn_staload64.lib") -#else // _WIN64 -#pragma comment(lib, "unicorn_staload.lib") -#endif // _WIN64 -#endif // DYNLOAD - -// posix specific -#else // _MSC_VER -#include -#endif // _MSC_VER - - -// code to be emulated -#define SPARC_CODE "\x86\x00\x40\x02" // add %g1, %g2, %g3; -//#define SPARC_CODE "\xbb\x70\x00\x00" // illegal code - -// memory address where emulation starts -#define ADDRESS 0x10000 - -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); -} - -static void test_sparc(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; - - int g1 = 0x1230; // G1 register - int g2 = 0x6789; // G2 register - int g3 = 0x5555; // G3 register - - printf("Emulate SPARC code\n"); - - // Initialize emulator in Sparc mode - err = uc_open(UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u (%s)\n", - err, uc_strerror(err)); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - uc_mem_write(uc, ADDRESS, SPARC_CODE, sizeof(SPARC_CODE) - 1); - - // initialize machine registers - uc_reg_write(uc, UC_SPARC_REG_G1, &g1); - uc_reg_write(uc, UC_SPARC_REG_G2, &g2); - uc_reg_write(uc, UC_SPARC_REG_G3, &g3); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing all instructions with customized callback - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)1, (uint64_t)0); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(SPARC_CODE) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned: %u (%s)\n", - err, uc_strerror(err)); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_SPARC_REG_G3, &g3); - printf(">>> G3 = 0x%x\n", g3); - - uc_close(uc); -} - -int main(int argc, char **argv, char **envp) -{ - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - // test memleak - while(1) { - test_sparc(); - } - - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - - return 0; -} diff --git a/tests/regress/memleak_x86.c b/tests/regress/memleak_x86.c deleted file mode 100644 index a336af45..00000000 --- a/tests/regress/memleak_x86.c +++ /dev/null @@ -1,304 +0,0 @@ -/* Unicorn Emulator Engine */ -/* By Nguyen Anh Quynh & Dang Hoang Vu, 2015 */ - -/* Sample code to demonstrate how to emulate X86 code */ - -// windows specific -#ifdef _MSC_VER -#include -#include -#define PRIx64 "llX" -#ifdef DYNLOAD -#include "unicorn_dynload.h" -#else // DYNLOAD -#include -#ifdef _WIN64 -#pragma comment(lib, "unicorn_staload64.lib") -#else // _WIN64 -#pragma comment(lib, "unicorn_staload.lib") -#endif // _WIN64 -#endif // DYNLOAD - -// posix specific -#else // _MSC_VER -#include -#endif // _MSC_VER - -// common includes -#include - - -// code to be emulated -#define X86_CODE32 "\x41\x4a" // INC ecx; DEC edx -#define X86_CODE32_JUMP "\xeb\x02\x90\x90\x90\x90\x90\x90" // jmp 4; nop; nop; nop; nop; nop; nop -// #define X86_CODE32_SELF "\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41" -//#define X86_CODE32 "\x51\x51\x51\x51" // PUSH ecx; -#define X86_CODE32_LOOP "\x41\x4a\xeb\xfe" // INC ecx; DEC edx; JMP self-loop -#define X86_CODE32_MEM_WRITE "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" // mov [0xaaaaaaaa], ecx; INC ecx; DEC edx -#define X86_CODE32_MEM_READ "\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" // mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx - -#define X86_CODE32_JMP_INVALID "\xe9\xe9\xee\xee\xee\x41\x4a" // JMP outside; INC ecx; DEC edx -#define X86_CODE32_INOUT "\x41\xE4\x3F\x4a\xE6\x46\x43" // INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx - -//#define X86_CODE64 "\x41\xBC\x3B\xB0\x28\x2A \x49\x0F\xC9 \x90 \x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9" // <== still crash -//#define X86_CODE64 "\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9" -#define X86_CODE64 "\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E\x09\x3C\x59" -#define X86_CODE16 "\x00\x00" // add byte ptr [bx + si], al -#define X86_CODE64_SYSCALL "\x0f\x05" // SYSCALL - -// memory address where emulation starts -#define ADDRESS 0x1000000 - -// callback for tracing basic blocks -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -// callback for tracing instruction -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - int eflags; - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); - - uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); - printf(">>> --- EFLAGS is 0x%x\n", eflags); - - // Uncomment below code to stop the emulation using uc_emu_stop() - // if (address == 0x1000009) - // uc_emu_stop(uc); -} - -// callback for tracing instruction -static void hook_code64(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - uint64_t rip; - - uc_reg_read(uc, UC_X86_REG_RIP, &rip); - printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); - printf(">>> RIP is 0x%"PRIx64 "\n", rip); - - // Uncomment below code to stop the emulation using uc_emu_stop() - // if (address == 0x1000009) - // uc_emu_stop(uc); -} - -static void hook_mem64(uc_engine *uc, uc_mem_type type, - uint64_t address, int size, int64_t value, void *user_data) -{ - switch(type) { - default: break; - case UC_MEM_READ: - printf(">>> Memory is being READ at 0x%"PRIx64 ", data size = %u\n", - address, size); - break; - case UC_MEM_WRITE: - printf(">>> Memory is being WRITE at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", - address, size, value); - break; - } -} - -static void test_i386(void) -{ - uc_engine *uc; - uc_err err; - uint32_t tmp; - uc_hook trace1, trace2; - - int r_ecx = 0x1234; // ECX register - int r_edx = 0x7890; // EDX register - - printf("Emulate i386 code\n"); - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u\n", err); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { - printf("Failed to write emulation code to memory, quit!\n"); - return; - } - - // initialize machine registers - uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); - uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing all instruction by having @begin > @end - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, (uint64_t)1, (uint64_t)0); - - // emulate machine code in infinite time - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned %u: %s\n", - err, uc_strerror(err)); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); - uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); - printf(">>> ECX = 0x%x\n", r_ecx); - printf(">>> EDX = 0x%x\n", r_edx); - - // read from memory - if (!uc_mem_read(uc, ADDRESS, &tmp, sizeof(tmp))) - printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", ADDRESS, tmp); - else - printf(">>> Failed to read 4 bytes from [0x%x]\n", ADDRESS); - - uc_close(uc); -} - -static void test_x86_64(void) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2, trace3, trace4; - - int64_t rax = 0x71f3029efd49d41d; - int64_t rbx = 0xd87b45277f133ddb; - int64_t rcx = 0xab40d1ffd8afc461; - int64_t rdx = 0x919317b4a733f01; - int64_t rsi = 0x4c24e753a17ea358; - int64_t rdi = 0xe509a57d2571ce96; - int64_t r8 = 0xea5b108cc2b9ab1f; - int64_t r9 = 0x19ec097c8eb618c1; - int64_t r10 = 0xec45774f00c5f682; - int64_t r11 = 0xe17e9dbec8c074aa; - int64_t r12 = 0x80f86a8dc0f6d457; - int64_t r13 = 0x48288ca5671c5492; - int64_t r14 = 0x595f72f6e4017f6e; - int64_t r15 = 0x1efd97aea331cccc; - - int64_t rsp = ADDRESS + 0x200000; - - - printf("Emulate x86_64 code\n"); - - // Initialize emulator in X86-64bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); - if (err) { - printf("Failed on uc_open() with error returned: %u\n", err); - return; - } - - // map 2MB memory for this emulation - uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); - - // write machine code to be emulated to memory - if (uc_mem_write(uc, ADDRESS, X86_CODE64, sizeof(X86_CODE64) - 1)) { - printf("Failed to write emulation code to memory, quit!\n"); - return; - } - - // initialize machine registers - uc_reg_write(uc, UC_X86_REG_RSP, &rsp); - - uc_reg_write(uc, UC_X86_REG_RAX, &rax); - uc_reg_write(uc, UC_X86_REG_RBX, &rbx); - uc_reg_write(uc, UC_X86_REG_RCX, &rcx); - uc_reg_write(uc, UC_X86_REG_RDX, &rdx); - uc_reg_write(uc, UC_X86_REG_RSI, &rsi); - uc_reg_write(uc, UC_X86_REG_RDI, &rdi); - uc_reg_write(uc, UC_X86_REG_R8, &r8); - uc_reg_write(uc, UC_X86_REG_R9, &r9); - uc_reg_write(uc, UC_X86_REG_R10, &r10); - uc_reg_write(uc, UC_X86_REG_R11, &r11); - uc_reg_write(uc, UC_X86_REG_R12, &r12); - uc_reg_write(uc, UC_X86_REG_R13, &r13); - uc_reg_write(uc, UC_X86_REG_R14, &r14); - uc_reg_write(uc, UC_X86_REG_R15, &r15); - - // tracing all basic blocks with customized callback - uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, (uint64_t)1, (uint64_t)0); - - // tracing all instructions in the range [ADDRESS, ADDRESS+20] - uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code64, NULL, (uint64_t)ADDRESS, (uint64_t)(ADDRESS+20)); - - // tracing all memory WRITE access (with @begin > @end) - uc_hook_add(uc, &trace3, UC_HOOK_MEM_WRITE, hook_mem64, NULL, (uint64_t)1, (uint64_t)0); - - // tracing all memory READ access (with @begin > @end) - uc_hook_add(uc, &trace4, UC_HOOK_MEM_READ, hook_mem64, NULL, (uint64_t)1, (uint64_t)0); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE64) - 1, 0, 0); - if (err) { - printf("Failed on uc_emu_start() with error returned %u: %s\n", - err, uc_strerror(err)); - } - - // now print out some registers - printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_X86_REG_RAX, &rax); - uc_reg_read(uc, UC_X86_REG_RBX, &rbx); - uc_reg_read(uc, UC_X86_REG_RCX, &rcx); - uc_reg_read(uc, UC_X86_REG_RDX, &rdx); - uc_reg_read(uc, UC_X86_REG_RSI, &rsi); - uc_reg_read(uc, UC_X86_REG_RDI, &rdi); - uc_reg_read(uc, UC_X86_REG_R8, &r8); - uc_reg_read(uc, UC_X86_REG_R9, &r9); - uc_reg_read(uc, UC_X86_REG_R10, &r10); - uc_reg_read(uc, UC_X86_REG_R11, &r11); - uc_reg_read(uc, UC_X86_REG_R12, &r12); - uc_reg_read(uc, UC_X86_REG_R13, &r13); - uc_reg_read(uc, UC_X86_REG_R14, &r14); - uc_reg_read(uc, UC_X86_REG_R15, &r15); - - printf(">>> RAX = 0x%" PRIx64 "\n", rax); - printf(">>> RBX = 0x%" PRIx64 "\n", rbx); - printf(">>> RCX = 0x%" PRIx64 "\n", rcx); - printf(">>> RDX = 0x%" PRIx64 "\n", rdx); - printf(">>> RSI = 0x%" PRIx64 "\n", rsi); - printf(">>> RDI = 0x%" PRIx64 "\n", rdi); - printf(">>> R8 = 0x%" PRIx64 "\n", r8); - printf(">>> R9 = 0x%" PRIx64 "\n", r9); - printf(">>> R10 = 0x%" PRIx64 "\n", r10); - printf(">>> R11 = 0x%" PRIx64 "\n", r11); - printf(">>> R12 = 0x%" PRIx64 "\n", r12); - printf(">>> R13 = 0x%" PRIx64 "\n", r13); - printf(">>> R14 = 0x%" PRIx64 "\n", r14); - printf(">>> R15 = 0x%" PRIx64 "\n", r15); - - uc_close(uc); -} - -int main(int argc, char **argv, char **envp) -{ - // dynamically load shared library -#ifdef DYNLOAD - if (!uc_dyn_load(NULL, 0)) { - printf("Error dynamically loading shared library.\n"); - printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); - printf("any other dependent dll/so files.\n"); - printf("The easiest way is to place them in the same directory as this app.\n"); - return 1; - } -#endif - - while(1) { - test_i386(); - test_x86_64(); - } - - // dynamically free shared library -#ifdef DYNLOAD - uc_dyn_free(); -#endif - - return 0; -} diff --git a/tests/regress/mips_cp1.py b/tests/regress/mips_cp1.py new file mode 100644 index 00000000..221bb24a --- /dev/null +++ b/tests/regress/mips_cp1.py @@ -0,0 +1,13 @@ +from unicorn import * +from unicorn.mips_const import * + + + +# .text:00416CB0 cfc1 $v1, FCSR +shellcode = [0x44, 0x43, 0xF8, 0x00] +base = 0x416CB0 + +uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN) +uc.mem_map(0x416000, 0x1000) +uc.mem_write(base, bytes(shellcode)) +uc.emu_start(base, base + len(shellcode)) \ No newline at end of file diff --git a/tests/regress/x86_ld_crash.py b/tests/regress/x86_ld_crash.py new file mode 100755 index 00000000..75a0f818 --- /dev/null +++ b/tests/regress/x86_ld_crash.py @@ -0,0 +1,21 @@ +from unicorn import * +from unicorn.x86_const import * +from capstone import * + +# extract from ld.so +shellcode = [0x55, 0x89, 0xE5, 0x57, 0x56, 0x53, 0xE8, 0xE0, 0x9F, 0x01, 0x00, 0x81, 0xC3, 0xF5, 0x57, 0x02, 0x00, 0x83, 0xEC, 0x3C, 0x0F, 0x31, 0x89, 0x93, 0xE4, 0xF8, 0xFF, 0xFF, 0x8D, 0x93, 0x34, 0xFF, 0xFF, 0xFF, 0x89, 0x83, 0xE0, 0xF8, 0xFF, 0xFF, 0x8B, 0x83, 0x34, 0xFF, 0xFF, 0xFF, 0x89, 0xD6, 0x2B, 0xB3, 0x00, 0x00, 0x00, 0x00, 0x89, 0x93, 0x60, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x89, 0xB3, 0x58, 0x05, 0x00, 0x00, 0x74, 0x5A, 0xBF, 0xFF, 0xFF, 0xFF, 0x6F, 0xEB, 0x1C, 0x8D, 0x76, 0x00, 0xB9, 0x21, 0x00, 0x00, 0x70, 0x29, 0xC1, 0x89, 0xC8, 0x89, 0x94, 0x83, 0x78, 0x05, 0x00, 0x00, 0x83, 0xC2, 0x08, 0x8B, 0x02, 0x85, 0xC0, 0x74, 0x37, 0x83, 0xF8, 0x21, 0x76, 0xEB, 0x89, 0xF9, 0x29, 0xC1, 0x83, 0xF9, 0x0F, 0x76, 0xD9, 0x8D, 0x0C, 0x00, 0xD1, 0xF9, 0x83, 0xF9, 0xFC, 0x0F, 0x86, 0xDB, 0x02, 0x00, 0x00, 0xF7, 0xD1, 0x89, 0x94, 0x8B, 0x40, 0x06, 0x00, 0x00, 0x83, 0xC2, 0x08, 0x8B, 0x02, 0x85, 0xC0, 0x75, 0xD2, 0x89, 0xF6, 0x8D, 0xBC, 0x27, 0x00, 0x00, 0x00, 0x00, 0x85, 0xF6, 0x74, 0x68, 0x8B, 0x83, 0x88, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x84, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x8C, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x90, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0xBC, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0xD4, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0x3C, 0x06, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x83, 0xA4, 0x06, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x03, 0x01, 0x70, 0x04, 0x8B, 0x93, 0xC8, 0x05, 0x00, 0x00, 0x85, 0xD2, 0x74, 0x0A, 0x83, 0x7A, 0x04, 0x11, 0x0F, 0x85, 0x24, 0x04, 0x00, 0x00, 0x8B, 0x83, 0xBC, 0x05, 0x00, 0x00, 0x85, 0xC0, 0x74, 0x10, 0x8B, 0x8B, 0xC4, 0x05, 0x00, 0x00, 0x83, 0x79, 0x04, 0x08, 0x0F, 0x85, 0xEB, 0x03, 0x00, 0x00, 0x8B, 0x8B, 0x10, 0x06, 0x00, 0x00, 0x85, 0xC9, 0x74, 0x0D, 0xF7, 0x41, 0x04, 0xFE, 0xFF, 0xFF, 0xFF, 0x0F, 0x85, 0xB5, 0x03, 0x00, 0x00, 0x8B, 0x8B, 0xF0, 0x05, 0x00, 0x00, 0x85, 0xC9, 0x74, 0x0D, 0xF7, 0x41, 0x04, 0xF7, 0xFF, 0xFF, 0xFF, 0x0F, 0x85, 0x7F, 0x03, 0x00, 0x00, 0x8B, 0x8B, 0xEC, 0x05, 0x00, 0x00, 0x85, 0xC9, 0x0F, 0x85, 0x52, 0x03, 0x00, 0x00, 0x8B, 0xBB, 0xB4, 0x05, 0x00, 0x00, 0x85, 0xFF, 0x0F, 0x85, 0x25, 0x03, 0x00, 0x00, 0x85, 0xF6, 0x0F, 0x85, 0xA4, 0x00, 0x00, 0x00, 0x8B, 0x8B, 0x74, 0x06, 0x00, 0x00, 0x85, 0xC9, 0x0F, 0x84, 0x96, 0x00, 0x00, 0x00, 0x8D, 0xB3, 0x58, 0x05, 0x00, 0x00, 0x83, 0xEC, 0x0C, 0x80, 0x8B, 0xEC, 0x06, 0x00, 0x00, 0x04, 0x56, 0xE8, 0x00, 0x95, 0x00, 0x00, 0x8D, 0x83, 0x00, 0x90, 0xFD, 0xFF, 0x89, 0xB3, 0x6C, 0x05, 0x00, 0x00, 0x89, 0x83, 0x04, 0x07, 0x00, 0x00, 0x8D, 0x83, 0x38, 0x09, 0x00, 0x00, 0x89, 0x83, 0x08, 0x07, 0x00, 0x00, 0x8D, 0x83, 0xFB, 0x47, 0xFF, 0xFF, 0x89, 0x83, 0x0C, 0x07, 0x00, 0x00, 0x0F, 0x31, 0x89, 0x83, 0x40, 0x05, 0x00, 0x00, 0x89, 0x93, 0x44, 0x05, 0x00, 0x00, 0x58, 0x8D, 0x83, 0x30, 0xAE, 0xFD, 0xFF, 0x89, 0xAB, 0x2C, 0xFF, 0xFF, 0xFF, 0x5A, 0x50, 0xFF, 0x75, 0x08, 0xE8, 0x81, 0x61, 0x01, 0x00, 0x89, 0xC6, 0x0F, 0x31, 0x2B, 0x83, 0xE0, 0xF8, 0xFF, 0xFF, 0x1B, 0x93, 0xE4, 0xF8, 0xFF, 0xFF, 0x83, 0xC4, 0x10, 0xF6, 0x83, 0x00, 0xF9, 0xFF, 0xFF, 0x80, 0x89, 0x45, 0xE0, 0x89, 0x55, 0xE4, 0x0F, 0x85, 0x53, 0x02, 0x00, 0x00, 0x8D, 0x65, 0xF4, 0x89, 0xF0, 0x5B, 0x5E, 0x5F, 0x5D, 0xC3, 0x90, 0x85, 0xC0, 0x0F, 0x84, 0x18, 0x02, 0x00, 0x00, 0x8B, 0x48, 0x04, 0x8B, 0x83, 0xC0, 0x05, 0x00, 0x00, 0x8B, 0x78, 0x04, 0x8B, 0x83, 0x14, 0x06, 0x00, 0x00, 0x89, 0x4D, 0xC8, 0x89, 0x4D, 0xD4, 0x89, 0x45, 0xC4, 0x8B, 0x45, 0xC4, 0x89, 0x7D, 0xD0, 0x01, 0xCF, 0x89, 0x7D, 0xCC, 0x89, 0xCF, 0x85, 0xC0, 0x74, 0x06, 0x8B, 0x48, 0x04, 0x8D, 0x0C, 0xCF, 0x85, 0xD2, 0x74, 0x2E, 0x8B, 0x83, 0xD4, 0x05, 0x00, 0x00, 0x8B, 0x93, 0x80, 0x05, 0x00, 0x00, 0x8B, 0x78, 0x04, 0x8B, 0x52, 0x04, 0x8B, 0x45, 0xD0, 0x01, 0xD7, 0x29, 0xD0, 0x3B, 0x7D, 0xCC, 0x89, 0x45, 0xC4, 0x8B, 0x45, 0xD0, 0x0F, 0x44, 0x45, 0xC4, 0x03, 0x55, 0xC8, 0x01, 0xD0, 0x89, 0x45, 0xCC, 0x8B, 0x93, 0x90, 0x05, 0x00, 0x00, 0x39, 0x4D, 0xD4, 0x8B, 0x42, 0x04, 0x89, 0x45, 0xC8, 0x73, 0x32, 0x8B, 0x45, 0xD4, 0x89, 0xF2, 0x03, 0x10, 0x80, 0x78, 0x04, 0x08, 0x0F, 0x85, 0xCC, 0x01, 0x00, 0x00, 0x8B, 0x45, 0xD4, 0xEB, 0x13, 0x90, 0x8D, 0x74, 0x26, 0x00, 0x8B, 0x10, 0x01, 0xF2, 0x80, 0x78, 0x04, 0x08, 0x0F, 0x85, 0xB4, 0x01, 0x00, 0x00, 0x83, 0xC0, 0x08, 0x01, 0x32, 0x39, 0xC8, 0x72, 0xE9, 0x8B, 0xBB, 0x3C, 0x06, 0x00, 0x00, 0x85, 0xFF, 0x0F, 0x84, 0x73, 0x02, 0x00, 0x00, 0x8D, 0x05, 0x40, 0x00, 0x00, 0x00, 0x39, 0x4D, 0xCC, 0x89, 0x45, 0xD0, 0x8D, 0x83, 0xE0, 0xFB, 0xFE, 0xFF, 0x89, 0x45, 0xC0, 0x0F, 0x86, 0x92, 0xFE, 0xFF, 0xFF, 0x89, 0x75, 0xC4, 0x90, 0x8D, 0x74, 0x26, 0x00, 0x8B, 0x7D, 0xC4, 0x03, 0x39, 0x8B, 0x41, 0x04, 0x8B, 0x55, 0xD0, 0x89, 0x7D, 0xD4, 0x89, 0xC7, 0x0F, 0xB6, 0xF0, 0xC1, 0xEF, 0x08, 0xC1, 0xE7, 0x04, 0x03, 0x7D, 0xC8, 0x8B, 0x47, 0x04, 0x03, 0x84, 0x1A, 0x18, 0x05, 0x00, 0x00, 0x0F, 0xB6, 0x57, 0x0C, 0x83, 0xE2, 0x0F, 0x80, 0xFA, 0x0A, 0x0F, 0x84, 0xEA, 0x00, 0x00, 0x00, 0x83, 0xEE, 0x06, 0x83, 0xFE, 0x23, 0x77, 0x4A, 0x8B, 0x94, 0xB3, 0x00, 0x48, 0xFF, 0xFF, 0x01, 0xDA, 0xFF, 0xE2, 0x8D, 0xB4, 0x26, 0x00, 0x00, 0x00, 0x00, 0xB9, 0xFF, 0xFD, 0xFF, 0x6F, 0x29, 0xC1, 0x83, 0xF9, 0x0B, 0x0F, 0x87, 0xA0, 0x00, 0x00, 0x00, 0xF7, 0xD8, 0x89, 0x94, 0x83, 0x48, 0xFE, 0xFF, 0xBF, 0xE9, 0xE2, 0xFC, 0xFF, 0xFF, 0x66, 0x90, 0x8B, 0x45, 0xD0, 0x8B, 0x75, 0xD4, 0x8B, 0x84, 0x18, 0x58, 0x07, 0x00, 0x00, 0x03, 0x06, 0x2B, 0x47, 0x04, 0x89, 0x06, 0x8D, 0x74, 0x26, 0x00, 0x83, 0xC1, 0x08, 0x39, 0x4D, 0xCC, 0x0F, 0x87, 0x6C, 0xFF, 0xFF, 0xFF, 0xE9, 0xF1, 0xFD, 0xFF, 0xFF, 0x8D, 0xB4, 0x26, 0x00, 0x00, 0x00, 0x00, 0x8B, 0x75, 0xD4, 0x8B, 0x46, 0x04, 0x03, 0x47, 0x04, 0x8B, 0x7D, 0xD0, 0x2B, 0x84, 0x1F, 0x58, 0x07, 0x00, 0x00, 0x89, 0x46, 0x04, 0x8B, 0x45, 0xC0, 0x89, 0x06, 0xEB, 0xCB, 0x8D, 0x76, 0x00, 0x8B, 0x7D, 0xD4, 0x89, 0x07, 0xEB, 0xC1, 0x89, 0xF6, 0x8D, 0xBC, 0x27, 0x00, 0x00, 0x00, 0x00] +baseaddr = 0x47bb800 +crash_point = 0x47bba6e # mov eax, [ebx + 0x5d4] + +uc = Uc(UC_ARCH_X86, UC_MODE_32) + +uc.mem_map(baseaddr - baseaddr%0x1000, 0x4000) # code +uc.mem_map(0x8000, 0x1000) # stack +uc.mem_map(0x1000, 0x1000) # [ebx + 0x5d4] +uc.mem_write(0x1000 + 0x5d4, b"\xff\xff\xff\xff") +uc.mem_write(baseaddr, bytes(shellcode)) +uc.reg_write(UC_X86_REG_EIP, crash_point) +uc.reg_write(UC_X86_REG_ESP, 0x90000) +uc.reg_write(UC_X86_REG_EBX, 0x1000) +uc.emu_start(begin=crash_point, until=0, count=2) +print(f"eax={bytes(uc.mem_read(0x1000+ 0x5d4, 4))}") \ No newline at end of file diff --git a/tests/regress/x86_set_ip.py b/tests/regress/x86_set_ip.py new file mode 100644 index 00000000..b8ef60ae --- /dev/null +++ b/tests/regress/x86_set_ip.py @@ -0,0 +1,21 @@ +from unicorn import * +from unicorn.x86_const import * + +count = 0 + +def cb(uc, addr, sz, data): + global count + count += 1 + print(f"addr: {hex(addr)} count: {count}") + if count == 5: + uc.emu_stop() + else: + uc.reg_write(UC_X86_REG_RIP, 0x2000) + +mu = Uc(UC_ARCH_X86, UC_MODE_64) + +mu.mem_map(0x1000, 0x4000) +mu.mem_write(0x1000, b"\x90" * 5) +mu.mem_write(0x2000, b"\x90" * 5) +mu.hook_add(UC_HOOK_CODE, cb) +mu.emu_start(0x1000, 0x2000+1, 0, 0) diff --git a/tests/unit/.gitignore b/tests/unit/.gitignore deleted file mode 100644 index d40da5f7..00000000 --- a/tests/unit/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -!*.c -test_* -*.bin diff --git a/tests/unit/Makefile b/tests/unit/Makefile deleted file mode 100644 index c0704d34..00000000 --- a/tests/unit/Makefile +++ /dev/null @@ -1,65 +0,0 @@ -CFLAGS += -Wall -Werror -Wno-unused-function -g -CFLAGS += -D__USE_MINGW_ANSI_STDIO=1 -CFLAGS += -L ../../ -I ../../include -CFLAGS += -L ../../cmocka/src -I ../../cmocka/include -CFLAGS += -L /usr/local/lib -I /usr/local/include -ASFLAGS += --32 -OBJCOPY = objcopy - -UNAME_S := $(shell uname -s) -UNAME_M := $(shell uname -m) -LDLIBS += -pthread -ifeq ($(UNAME_S), Linux) -LDLIBS += -lrt -else ifeq ($(UNAME_S), Darwin) -OBJCOPY = gobjcopy -ASFLAGS = -arch i386 -endif - -LDLIBS += -lcmocka -lunicorn - -EXECUTE_VARS = LD_LIBRARY_PATH=../../cmocka/src:../../ DYLD_LIBRARY_PATH=../../ - -ifeq ($(UNICORN_ASAN),yes) -CC = clang -fsanitize=address -fno-omit-frame-pointer -CXX = clang++ -fsanitize=address -fno-omit-frame-pointer -AR = llvm-ar -LDFLAGS := -fsanitize=address ${LDFLAGS} -endif - -ALL_TESTS_SOURCES = $(wildcard *.c) -TEST_ASSEMBLY = $(wildcard *.s) -TEST_PROGS = $(TEST_ASSEMBLY:%.s=%.o) -TEST_BINS = $(TEST_PROGS:%.o=%.bin) -ALL_TESTS = $(ALL_TESTS_SOURCES:%.c=%) - -ifneq (,$(findstring x86,$(UNAME_M))) -ALL_TESTS += $(TEST_BINS) -endif - -.PHONY: all -all: ${ALL_TESTS} - -.PHONY: clean -clean: - rm -rf ${ALL_TESTS} - -%.bin: %.o - ${OBJCOPY} -O binary $^ $@ - hexdump -C $@ - -.PHONY: test -test: all - ${EXECUTE_VARS} ./test_sanity - ${EXECUTE_VARS} ./test_x86 - ${EXECUTE_VARS} ./test_mem_map - ${EXECUTE_VARS} ./test_mem_map_ptr - ${EXECUTE_VARS} ./test_mem_high - ${EXECUTE_VARS} ./test_multihook - ${EXECUTE_VARS} ./test_pc_change - ${EXECUTE_VARS} ./test_hookcounts - echo "skipping test_tb_x86" - echo "skipping test_x86_soft_paging" - echo "skipping test_hang" - echo "skipping test_x86_sh1_enter_leave" - echo "skipping test_x86_rip_bug" diff --git a/tests/unit/acutest.h b/tests/unit/acutest.h new file mode 100644 index 00000000..376748bd --- /dev/null +++ b/tests/unit/acutest.h @@ -0,0 +1,1837 @@ +/* + * Acutest -- Another C/C++ Unit Test facility + * + * + * Copyright 2013-2020 Martin Mitas + * Copyright 2019 Garrett D'Amore + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef ACUTEST_H +#define ACUTEST_H + + +/************************ + *** Public interface *** + ************************/ + +/* By default, "acutest.h" provides the main program entry point (function + * main()). However, if the test suite is composed of multiple source files + * which include "acutest.h", then this causes a problem of multiple main() + * definitions. To avoid this problem, #define macro TEST_NO_MAIN in all + * compilation units but one. + */ + +/* Macro to specify list of unit tests in the suite. + * The unit test implementation MUST provide list of unit tests it implements + * with this macro: + * + * TEST_LIST = { + * { "test1_name", test1_func_ptr }, + * { "test2_name", test2_func_ptr }, + * ... + * { NULL, NULL } // zeroed record marking the end of the list + * }; + * + * The list specifies names of each test (must be unique) and pointer to + * a function implementing it. The function does not take any arguments + * and has no return values, i.e. every test function has to be compatible + * with this prototype: + * + * void test_func(void); + * + * Note the list has to be ended with a zeroed record. + */ +#define TEST_LIST const struct acutest_test_ acutest_list_[] + + +/* Macros for testing whether an unit test succeeds or fails. These macros + * can be used arbitrarily in functions implementing the unit tests. + * + * If any condition fails throughout execution of a test, the test fails. + * + * TEST_CHECK takes only one argument (the condition), TEST_CHECK_ allows + * also to specify an error message to print out if the condition fails. + * (It expects printf-like format string and its parameters). The macros + * return non-zero (condition passes) or 0 (condition fails). + * + * That can be useful when more conditions should be checked only if some + * preceding condition passes, as illustrated in this code snippet: + * + * SomeStruct* ptr = allocate_some_struct(); + * if(TEST_CHECK(ptr != NULL)) { + * TEST_CHECK(ptr->member1 < 100); + * TEST_CHECK(ptr->member2 > 200); + * } + */ +#define TEST_CHECK_(cond,...) acutest_check_((cond), __FILE__, __LINE__, __VA_ARGS__) +#define TEST_CHECK(cond) acutest_check_((cond), __FILE__, __LINE__, "%s", #cond) + + +/* These macros are the same as TEST_CHECK_ and TEST_CHECK except that if the + * condition fails, the currently executed unit test is immediately aborted. + * + * That is done either by calling abort() if the unit test is executed as a + * child process; or via longjmp() if the unit test is executed within the + * main Acutest process. + * + * As a side effect of such abortion, your unit tests may cause memory leaks, + * unflushed file descriptors, and other phenomena caused by the abortion. + * + * Therefore you should not use these as a general replacement for TEST_CHECK. + * Use it with some caution, especially if your test causes some other side + * effects to the outside world (e.g. communicating with some server, inserting + * into a database etc.). + */ +#define TEST_ASSERT_(cond,...) \ + do { \ + if(!acutest_check_((cond), __FILE__, __LINE__, __VA_ARGS__)) \ + acutest_abort_(); \ + } while(0) +#define TEST_ASSERT(cond) \ + do { \ + if(!acutest_check_((cond), __FILE__, __LINE__, "%s", #cond)) \ + acutest_abort_(); \ + } while(0) + + +#ifdef __cplusplus +/* Macros to verify that the code (the 1st argument) throws exception of given + * type (the 2nd argument). (Note these macros are only available in C++.) + * + * TEST_EXCEPTION_ is like TEST_EXCEPTION but accepts custom printf-like + * message. + * + * For example: + * + * TEST_EXCEPTION(function_that_throw(), ExpectedExceptionType); + * + * If the function_that_throw() throws ExpectedExceptionType, the check passes. + * If the function throws anything incompatible with ExpectedExceptionType + * (or if it does not thrown an exception at all), the check fails. + */ +#define TEST_EXCEPTION(code, exctype) \ + do { \ + bool exc_ok_ = false; \ + const char *msg_ = NULL; \ + try { \ + code; \ + msg_ = "No exception thrown."; \ + } catch(exctype const&) { \ + exc_ok_= true; \ + } catch(...) { \ + msg_ = "Unexpected exception thrown."; \ + } \ + acutest_check_(exc_ok_, __FILE__, __LINE__, #code " throws " #exctype);\ + if(msg_ != NULL) \ + acutest_message_("%s", msg_); \ + } while(0) +#define TEST_EXCEPTION_(code, exctype, ...) \ + do { \ + bool exc_ok_ = false; \ + const char *msg_ = NULL; \ + try { \ + code; \ + msg_ = "No exception thrown."; \ + } catch(exctype const&) { \ + exc_ok_= true; \ + } catch(...) { \ + msg_ = "Unexpected exception thrown."; \ + } \ + acutest_check_(exc_ok_, __FILE__, __LINE__, __VA_ARGS__); \ + if(msg_ != NULL) \ + acutest_message_("%s", msg_); \ + } while(0) +#endif /* #ifdef __cplusplus */ + + +/* Sometimes it is useful to split execution of more complex unit tests to some + * smaller parts and associate those parts with some names. + * + * This is especially handy if the given unit test is implemented as a loop + * over some vector of multiple testing inputs. Using these macros allow to use + * sort of subtitle for each iteration of the loop (e.g. outputting the input + * itself or a name associated to it), so that if any TEST_CHECK condition + * fails in the loop, it can be easily seen which iteration triggers the + * failure, without the need to manually output the iteration-specific data in + * every single TEST_CHECK inside the loop body. + * + * TEST_CASE allows to specify only single string as the name of the case, + * TEST_CASE_ provides all the power of printf-like string formatting. + * + * Note that the test cases cannot be nested. Starting a new test case ends + * implicitly the previous one. To end the test case explicitly (e.g. to end + * the last test case after exiting the loop), you may use TEST_CASE(NULL). + */ +#define TEST_CASE_(...) acutest_case_(__VA_ARGS__) +#define TEST_CASE(name) acutest_case_("%s", name) + + +/* Maximal output per TEST_CASE call. Longer messages are cut. + * You may define another limit prior including "acutest.h" + */ +#ifndef TEST_CASE_MAXSIZE + #define TEST_CASE_MAXSIZE 64 +#endif + + +/* printf-like macro for outputting an extra information about a failure. + * + * Intended use is to output some computed output versus the expected value, + * e.g. like this: + * + * if(!TEST_CHECK(produced == expected)) { + * TEST_MSG("Expected: %d", expected); + * TEST_MSG("Produced: %d", produced); + * } + * + * Note the message is only written down if the most recent use of any checking + * macro (like e.g. TEST_CHECK or TEST_EXCEPTION) in the current test failed. + * This means the above is equivalent to just this: + * + * TEST_CHECK(produced == expected); + * TEST_MSG("Expected: %d", expected); + * TEST_MSG("Produced: %d", produced); + * + * The macro can deal with multi-line output fairly well. It also automatically + * adds a final new-line if there is none present. + */ +#define TEST_MSG(...) acutest_message_(__VA_ARGS__) + + +/* Maximal output per TEST_MSG call. Longer messages are cut. + * You may define another limit prior including "acutest.h" + */ +#ifndef TEST_MSG_MAXSIZE + #define TEST_MSG_MAXSIZE 1024 +#endif + + +/* Macro for dumping a block of memory. + * + * Its intended use is very similar to what TEST_MSG is for, but instead of + * generating any printf-like message, this is for dumping raw block of a + * memory in a hexadecimal form: + * + * TEST_CHECK(size_produced == size_expected && + * memcmp(addr_produced, addr_expected, size_produced) == 0); + * TEST_DUMP("Expected:", addr_expected, size_expected); + * TEST_DUMP("Produced:", addr_produced, size_produced); + */ +#define TEST_DUMP(title, addr, size) acutest_dump_(title, addr, size) + +/* Maximal output per TEST_DUMP call (in bytes to dump). Longer blocks are cut. + * You may define another limit prior including "acutest.h" + */ +#ifndef TEST_DUMP_MAXSIZE + #define TEST_DUMP_MAXSIZE 1024 +#endif + + +/* Common test initialiation/clean-up + * + * In some test suites, it may be needed to perform some sort of the same + * initialization and/or clean-up in all the tests. + * + * Such test suites may use macros TEST_INIT and/or TEST_FINI prior including + * this header. The expansion of the macro is then used as a body of helper + * function called just before executing every single (TEST_INIT) or just after + * it ends (TEST_FINI). + * + * Examples of various ways how to use the macro TEST_INIT: + * + * #define TEST_INIT my_init_func(); + * #define TEST_INIT my_init_func() // Works even without the semicolon + * #define TEST_INIT setlocale(LC_ALL, NULL); + * #define TEST_INIT { setlocale(LC_ALL, NULL); my_init_func(); } + * + * TEST_FINI is to be used in the same way. + */ + + +/********************** + *** Implementation *** + **********************/ + +/* The unit test files should not rely on anything below. */ + +#include +#include +#include +#include +#include +#include + +#if defined(unix) || defined(__unix__) || defined(__unix) || defined(__APPLE__) + #define ACUTEST_UNIX_ 1 + #include + #include + #include + #include + #include + #include + #include + + #if defined CLOCK_PROCESS_CPUTIME_ID && defined CLOCK_MONOTONIC + #define ACUTEST_HAS_POSIX_TIMER_ 1 + #endif +#endif + +#if defined(_gnu_linux_) || defined(__linux__) + #define ACUTEST_LINUX_ 1 + #include + #include +#endif + +#if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__) + #define ACUTEST_WIN_ 1 + #include + #include +#endif + +#if defined(__APPLE__) + #define ACUTEST_MACOS_ + #include + #include + #include + #include + #include +#endif + +#ifdef __cplusplus + #include +#endif + +#ifdef __has_include + #if __has_include() + #include + #endif +#endif + +/* Enable the use of the non-standard keyword __attribute__ to silence warnings under some compilers */ +#if defined(__GNUC__) || defined(__clang__) + #define ACUTEST_ATTRIBUTE_(attr) __attribute__((attr)) +#else + #define ACUTEST_ATTRIBUTE_(attr) +#endif + +/* Note our global private identifiers end with '_' to mitigate risk of clash + * with the unit tests implementation. */ + +#ifdef __cplusplus + extern "C" { +#endif + +#ifdef _MSC_VER + /* In the multi-platform code like ours, we cannot use the non-standard + * "safe" functions from Microsoft C lib like e.g. sprintf_s() instead of + * standard sprintf(). Hence, lets disable the warning C4996. */ + #pragma warning(push) + #pragma warning(disable: 4996) +#endif + + +struct acutest_test_ { + const char* name; + void (*func)(void); +}; + +struct acutest_test_data_ { + unsigned char flags; + double duration; +}; + +enum { + ACUTEST_FLAG_RUN_ = 1 << 0, + ACUTEST_FLAG_SUCCESS_ = 1 << 1, + ACUTEST_FLAG_FAILURE_ = 1 << 2, +}; + +extern const struct acutest_test_ acutest_list_[]; + +int acutest_check_(int cond, const char* file, int line, const char* fmt, ...); +void acutest_case_(const char* fmt, ...); +void acutest_message_(const char* fmt, ...); +void acutest_dump_(const char* title, const void* addr, size_t size); +void acutest_abort_(void) ACUTEST_ATTRIBUTE_(noreturn); + + +#ifndef TEST_NO_MAIN + +static char* acutest_argv0_ = NULL; +static size_t acutest_list_size_ = 0; +static struct acutest_test_data_* acutest_test_data_ = NULL; +static size_t acutest_count_ = 0; +static int acutest_no_exec_ = -1; +static int acutest_no_summary_ = 0; +static int acutest_tap_ = 0; +static int acutest_skip_mode_ = 0; +static int acutest_worker_ = 0; +static int acutest_worker_index_ = 0; +static int acutest_cond_failed_ = 0; +static int acutest_was_aborted_ = 0; +static FILE *acutest_xml_output_ = NULL; + +static int acutest_stat_failed_units_ = 0; +static int acutest_stat_run_units_ = 0; + +static const struct acutest_test_* acutest_current_test_ = NULL; +static int acutest_current_index_ = 0; +static char acutest_case_name_[TEST_CASE_MAXSIZE] = ""; +static int acutest_test_already_logged_ = 0; +static int acutest_case_already_logged_ = 0; +static int acutest_verbose_level_ = 2; +static int acutest_test_failures_ = 0; +static int acutest_colorize_ = 0; +static int acutest_timer_ = 0; + +static int acutest_abort_has_jmp_buf_ = 0; +static jmp_buf acutest_abort_jmp_buf_; + + +static void +acutest_cleanup_(void) +{ + free((void*) acutest_test_data_); +} + +static void ACUTEST_ATTRIBUTE_(noreturn) +acutest_exit_(int exit_code) +{ + acutest_cleanup_(); + exit(exit_code); +} + +#if defined ACUTEST_WIN_ + typedef LARGE_INTEGER acutest_timer_type_; + static LARGE_INTEGER acutest_timer_freq_; + static acutest_timer_type_ acutest_timer_start_; + static acutest_timer_type_ acutest_timer_end_; + + static void + acutest_timer_init_(void) + { + QueryPerformanceFrequency(´st_timer_freq_); + } + + static void + acutest_timer_get_time_(LARGE_INTEGER* ts) + { + QueryPerformanceCounter(ts); + } + + static double + acutest_timer_diff_(LARGE_INTEGER start, LARGE_INTEGER end) + { + double duration = (double)(end.QuadPart - start.QuadPart); + duration /= (double)acutest_timer_freq_.QuadPart; + return duration; + } + + static void + acutest_timer_print_diff_(void) + { + printf("%.6lf secs", acutest_timer_diff_(acutest_timer_start_, acutest_timer_end_)); + } +#elif defined ACUTEST_HAS_POSIX_TIMER_ + static clockid_t acutest_timer_id_; + typedef struct timespec acutest_timer_type_; + static acutest_timer_type_ acutest_timer_start_; + static acutest_timer_type_ acutest_timer_end_; + + static void + acutest_timer_init_(void) + { + if(acutest_timer_ == 1) + acutest_timer_id_ = CLOCK_MONOTONIC; + else if(acutest_timer_ == 2) + acutest_timer_id_ = CLOCK_PROCESS_CPUTIME_ID; + } + + static void + acutest_timer_get_time_(struct timespec* ts) + { + clock_gettime(acutest_timer_id_, ts); + } + + static double + acutest_timer_diff_(struct timespec start, struct timespec end) + { + double endns; + double startns; + + endns = end.tv_sec; + endns *= 1e9; + endns += end.tv_nsec; + + startns = start.tv_sec; + startns *= 1e9; + startns += start.tv_nsec; + + return ((endns - startns)/ 1e9); + } + + static void + acutest_timer_print_diff_(void) + { + printf("%.6lf secs", + acutest_timer_diff_(acutest_timer_start_, acutest_timer_end_)); + } +#else + typedef int acutest_timer_type_; + static acutest_timer_type_ acutest_timer_start_; + static acutest_timer_type_ acutest_timer_end_; + + void + acutest_timer_init_(void) + {} + + static void + acutest_timer_get_time_(int* ts) + { + (void) ts; + } + + static double + acutest_timer_diff_(int start, int end) + { + (void) start; + (void) end; + return 0.0; + } + + static void + acutest_timer_print_diff_(void) + {} +#endif + +#define ACUTEST_COLOR_DEFAULT_ 0 +#define ACUTEST_COLOR_GREEN_ 1 +#define ACUTEST_COLOR_RED_ 2 +#define ACUTEST_COLOR_DEFAULT_INTENSIVE_ 3 +#define ACUTEST_COLOR_GREEN_INTENSIVE_ 4 +#define ACUTEST_COLOR_RED_INTENSIVE_ 5 + +static int ACUTEST_ATTRIBUTE_(format (printf, 2, 3)) +acutest_colored_printf_(int color, const char* fmt, ...) +{ + va_list args; + char buffer[256]; + int n; + + va_start(args, fmt); + vsnprintf(buffer, sizeof(buffer), fmt, args); + va_end(args); + buffer[sizeof(buffer)-1] = '\0'; + + if(!acutest_colorize_) { + return printf("%s", buffer); + } + +#if defined ACUTEST_UNIX_ + { + const char* col_str; + switch(color) { + case ACUTEST_COLOR_GREEN_: col_str = "\033[0;32m"; break; + case ACUTEST_COLOR_RED_: col_str = "\033[0;31m"; break; + case ACUTEST_COLOR_GREEN_INTENSIVE_: col_str = "\033[1;32m"; break; + case ACUTEST_COLOR_RED_INTENSIVE_: col_str = "\033[1;31m"; break; + case ACUTEST_COLOR_DEFAULT_INTENSIVE_: col_str = "\033[1m"; break; + default: col_str = "\033[0m"; break; + } + printf("%s", col_str); + n = printf("%s", buffer); + printf("\033[0m"); + return n; + } +#elif defined ACUTEST_WIN_ + { + HANDLE h; + CONSOLE_SCREEN_BUFFER_INFO info; + WORD attr; + + h = GetStdHandle(STD_OUTPUT_HANDLE); + GetConsoleScreenBufferInfo(h, &info); + + switch(color) { + case ACUTEST_COLOR_GREEN_: attr = FOREGROUND_GREEN; break; + case ACUTEST_COLOR_RED_: attr = FOREGROUND_RED; break; + case ACUTEST_COLOR_GREEN_INTENSIVE_: attr = FOREGROUND_GREEN | FOREGROUND_INTENSITY; break; + case ACUTEST_COLOR_RED_INTENSIVE_: attr = FOREGROUND_RED | FOREGROUND_INTENSITY; break; + case ACUTEST_COLOR_DEFAULT_INTENSIVE_: attr = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_INTENSITY; break; + default: attr = 0; break; + } + if(attr != 0) + SetConsoleTextAttribute(h, attr); + n = printf("%s", buffer); + SetConsoleTextAttribute(h, info.wAttributes); + return n; + } +#else + n = printf("%s", buffer); + return n; +#endif +} + +static void +acutest_begin_test_line_(const struct acutest_test_* test) +{ + if(!acutest_tap_) { + if(acutest_verbose_level_ >= 3) { + acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Test %s:\n", test->name); + acutest_test_already_logged_++; + } else if(acutest_verbose_level_ >= 1) { + int n; + char spaces[48]; + + n = acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Test %s... ", test->name); + memset(spaces, ' ', sizeof(spaces)); + if(n < (int) sizeof(spaces)) + printf("%.*s", (int) sizeof(spaces) - n, spaces); + } else { + acutest_test_already_logged_ = 1; + } + } +} + +static void +acutest_finish_test_line_(int result) +{ + if(acutest_tap_) { + const char* str = (result == 0) ? "ok" : "not ok"; + + printf("%s %d - %s\n", str, acutest_current_index_ + 1, acutest_current_test_->name); + + if(result == 0 && acutest_timer_) { + printf("# Duration: "); + acutest_timer_print_diff_(); + printf("\n"); + } + } else { + int color = (result == 0) ? ACUTEST_COLOR_GREEN_INTENSIVE_ : ACUTEST_COLOR_RED_INTENSIVE_; + const char* str = (result == 0) ? "OK" : "FAILED"; + printf("[ "); + acutest_colored_printf_(color, "%s", str); + printf(" ]"); + + if(result == 0 && acutest_timer_) { + printf(" "); + acutest_timer_print_diff_(); + } + + printf("\n"); + } +} + +static void +acutest_line_indent_(int level) +{ + static const char spaces[] = " "; + int n = level * 2; + + if(acutest_tap_ && n > 0) { + n--; + printf("#"); + } + + while(n > 16) { + printf("%s", spaces); + n -= 16; + } + printf("%.*s", n, spaces); +} + +int ACUTEST_ATTRIBUTE_(format (printf, 4, 5)) +acutest_check_(int cond, const char* file, int line, const char* fmt, ...) +{ + const char *result_str; + int result_color; + int verbose_level; + + if(cond) { + result_str = "ok"; + result_color = ACUTEST_COLOR_GREEN_; + verbose_level = 3; + } else { + if(!acutest_test_already_logged_ && acutest_current_test_ != NULL) + acutest_finish_test_line_(-1); + + result_str = "failed"; + result_color = ACUTEST_COLOR_RED_; + verbose_level = 2; + acutest_test_failures_++; + acutest_test_already_logged_++; + } + + if(acutest_verbose_level_ >= verbose_level) { + va_list args; + + if(!acutest_case_already_logged_ && acutest_case_name_[0]) { + acutest_line_indent_(1); + acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Case %s:\n", acutest_case_name_); + acutest_test_already_logged_++; + acutest_case_already_logged_++; + } + + acutest_line_indent_(acutest_case_name_[0] ? 2 : 1); + if(file != NULL) { +#ifdef ACUTEST_WIN_ + const char* lastsep1 = strrchr(file, '\\'); + const char* lastsep2 = strrchr(file, '/'); + if(lastsep1 == NULL) + lastsep1 = file-1; + if(lastsep2 == NULL) + lastsep2 = file-1; + file = (lastsep1 > lastsep2 ? lastsep1 : lastsep2) + 1; +#else + const char* lastsep = strrchr(file, '/'); + if(lastsep != NULL) + file = lastsep+1; +#endif + printf("%s:%d: Check ", file, line); + } + + va_start(args, fmt); + vprintf(fmt, args); + va_end(args); + + printf("... "); + acutest_colored_printf_(result_color, "%s", result_str); + printf("\n"); + acutest_test_already_logged_++; + } + + acutest_cond_failed_ = (cond == 0); + return !acutest_cond_failed_; +} + +void ACUTEST_ATTRIBUTE_(format (printf, 1, 2)) +acutest_case_(const char* fmt, ...) +{ + va_list args; + + if(acutest_verbose_level_ < 2) + return; + + if(acutest_case_name_[0]) { + acutest_case_already_logged_ = 0; + acutest_case_name_[0] = '\0'; + } + + if(fmt == NULL) + return; + + va_start(args, fmt); + vsnprintf(acutest_case_name_, sizeof(acutest_case_name_) - 1, fmt, args); + va_end(args); + acutest_case_name_[sizeof(acutest_case_name_) - 1] = '\0'; + + if(acutest_verbose_level_ >= 3) { + acutest_line_indent_(1); + acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Case %s:\n", acutest_case_name_); + acutest_test_already_logged_++; + acutest_case_already_logged_++; + } +} + +void ACUTEST_ATTRIBUTE_(format (printf, 1, 2)) +acutest_message_(const char* fmt, ...) +{ + char buffer[TEST_MSG_MAXSIZE]; + char* line_beg; + char* line_end; + va_list args; + + if(acutest_verbose_level_ < 2) + return; + + /* We allow extra message only when something is already wrong in the + * current test. */ + if(acutest_current_test_ == NULL || !acutest_cond_failed_) + return; + + va_start(args, fmt); + vsnprintf(buffer, TEST_MSG_MAXSIZE, fmt, args); + va_end(args); + buffer[TEST_MSG_MAXSIZE-1] = '\0'; + + line_beg = buffer; + while(1) { + line_end = strchr(line_beg, '\n'); + if(line_end == NULL) + break; + acutest_line_indent_(acutest_case_name_[0] ? 3 : 2); + printf("%.*s\n", (int)(line_end - line_beg), line_beg); + line_beg = line_end + 1; + } + if(line_beg[0] != '\0') { + acutest_line_indent_(acutest_case_name_[0] ? 3 : 2); + printf("%s\n", line_beg); + } +} + +void +acutest_dump_(const char* title, const void* addr, size_t size) +{ + static const size_t BYTES_PER_LINE = 16; + size_t line_beg; + size_t truncate = 0; + + if(acutest_verbose_level_ < 2) + return; + + /* We allow extra message only when something is already wrong in the + * current test. */ + if(acutest_current_test_ == NULL || !acutest_cond_failed_) + return; + + if(size > TEST_DUMP_MAXSIZE) { + truncate = size - TEST_DUMP_MAXSIZE; + size = TEST_DUMP_MAXSIZE; + } + + acutest_line_indent_(acutest_case_name_[0] ? 3 : 2); + printf((title[strlen(title)-1] == ':') ? "%s\n" : "%s:\n", title); + + for(line_beg = 0; line_beg < size; line_beg += BYTES_PER_LINE) { + size_t line_end = line_beg + BYTES_PER_LINE; + size_t off; + + acutest_line_indent_(acutest_case_name_[0] ? 4 : 3); + printf("%08lx: ", (unsigned long)line_beg); + for(off = line_beg; off < line_end; off++) { + if(off < size) + printf(" %02x", ((const unsigned char*)addr)[off]); + else + printf(" "); + } + + printf(" "); + for(off = line_beg; off < line_end; off++) { + unsigned char byte = ((const unsigned char*)addr)[off]; + if(off < size) + printf("%c", (iscntrl(byte) ? '.' : byte)); + else + break; + } + + printf("\n"); + } + + if(truncate > 0) { + acutest_line_indent_(acutest_case_name_[0] ? 4 : 3); + printf(" ... (and more %u bytes)\n", (unsigned) truncate); + } +} + +/* This is called just before each test */ +static void +acutest_init_(const char *test_name) +{ +#ifdef TEST_INIT + TEST_INIT + ; /* Allow for a single unterminated function call */ +#endif + + /* Suppress any warnings about unused variable. */ + (void) test_name; +} + +/* This is called after each test */ +static void +acutest_fini_(const char *test_name) +{ +#ifdef TEST_FINI + TEST_FINI + ; /* Allow for a single unterminated function call */ +#endif + + /* Suppress any warnings about unused variable. */ + (void) test_name; +} + +void +acutest_abort_(void) +{ + if(acutest_abort_has_jmp_buf_) { + longjmp(acutest_abort_jmp_buf_, 1); + } else { + if(acutest_current_test_ != NULL) + acutest_fini_(acutest_current_test_->name); + abort(); + } +} + +static void +acutest_list_names_(void) +{ + const struct acutest_test_* test; + + printf("Unit tests:\n"); + for(test = ´st_list_[0]; test->func != NULL; test++) + printf(" %s\n", test->name); +} + +static void +acutest_remember_(int i) +{ + if(acutest_test_data_[i].flags & ACUTEST_FLAG_RUN_) + return; + + acutest_test_data_[i].flags |= ACUTEST_FLAG_RUN_; + acutest_count_++; +} + +static void +acutest_set_success_(int i, int success) +{ + acutest_test_data_[i].flags |= success ? ACUTEST_FLAG_SUCCESS_ : ACUTEST_FLAG_FAILURE_; +} + +static void +acutest_set_duration_(int i, double duration) +{ + acutest_test_data_[i].duration = duration; +} + +static int +acutest_name_contains_word_(const char* name, const char* pattern) +{ + static const char word_delim[] = " \t-_/.,:;"; + const char* substr; + size_t pattern_len; + + pattern_len = strlen(pattern); + + substr = strstr(name, pattern); + while(substr != NULL) { + int starts_on_word_boundary = (substr == name || strchr(word_delim, substr[-1]) != NULL); + int ends_on_word_boundary = (substr[pattern_len] == '\0' || strchr(word_delim, substr[pattern_len]) != NULL); + + if(starts_on_word_boundary && ends_on_word_boundary) + return 1; + + substr = strstr(substr+1, pattern); + } + + return 0; +} + +static int +acutest_lookup_(const char* pattern) +{ + int i; + int n = 0; + + /* Try exact match. */ + for(i = 0; i < (int) acutest_list_size_; i++) { + if(strcmp(acutest_list_[i].name, pattern) == 0) { + acutest_remember_(i); + n++; + break; + } + } + if(n > 0) + return n; + + /* Try word match. */ + for(i = 0; i < (int) acutest_list_size_; i++) { + if(acutest_name_contains_word_(acutest_list_[i].name, pattern)) { + acutest_remember_(i); + n++; + } + } + if(n > 0) + return n; + + /* Try relaxed match. */ + for(i = 0; i < (int) acutest_list_size_; i++) { + if(strstr(acutest_list_[i].name, pattern) != NULL) { + acutest_remember_(i); + n++; + } + } + + return n; +} + + +/* Called if anything goes bad in Acutest, or if the unit test ends in other + * way then by normal returning from its function (e.g. exception or some + * abnormal child process termination). */ +static void ACUTEST_ATTRIBUTE_(format (printf, 1, 2)) +acutest_error_(const char* fmt, ...) +{ + if(acutest_verbose_level_ == 0) + return; + + if(acutest_verbose_level_ >= 2) { + va_list args; + + acutest_line_indent_(1); + if(acutest_verbose_level_ >= 3) + acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "ERROR: "); + va_start(args, fmt); + vprintf(fmt, args); + va_end(args); + printf("\n"); + } + + if(acutest_verbose_level_ >= 3) { + printf("\n"); + } +} + +/* Call directly the given test unit function. */ +static int +acutest_do_run_(const struct acutest_test_* test, int index) +{ + int status = -1; + + acutest_was_aborted_ = 0; + acutest_current_test_ = test; + acutest_current_index_ = index; + acutest_test_failures_ = 0; + acutest_test_already_logged_ = 0; + acutest_cond_failed_ = 0; + +#ifdef __cplusplus + try { +#endif + acutest_init_(test->name); + acutest_begin_test_line_(test); + + /* This is good to do in case the test unit crashes. */ + fflush(stdout); + fflush(stderr); + + if(!acutest_worker_) { + acutest_abort_has_jmp_buf_ = 1; + if(setjmp(acutest_abort_jmp_buf_) != 0) { + acutest_was_aborted_ = 1; + goto aborted; + } + } + + acutest_timer_get_time_(´st_timer_start_); + test->func(); +aborted: + acutest_abort_has_jmp_buf_ = 0; + acutest_timer_get_time_(´st_timer_end_); + + if(acutest_verbose_level_ >= 3) { + acutest_line_indent_(1); + if(acutest_test_failures_ == 0) { + acutest_colored_printf_(ACUTEST_COLOR_GREEN_INTENSIVE_, "SUCCESS: "); + printf("All conditions have passed.\n"); + + if(acutest_timer_) { + acutest_line_indent_(1); + printf("Duration: "); + acutest_timer_print_diff_(); + printf("\n"); + } + } else { + acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: "); + if(!acutest_was_aborted_) { + printf("%d condition%s %s failed.\n", + acutest_test_failures_, + (acutest_test_failures_ == 1) ? "" : "s", + (acutest_test_failures_ == 1) ? "has" : "have"); + } else { + printf("Aborted.\n"); + } + } + printf("\n"); + } else if(acutest_verbose_level_ >= 1 && acutest_test_failures_ == 0) { + acutest_finish_test_line_(0); + } + + status = (acutest_test_failures_ == 0) ? 0 : -1; + +#ifdef __cplusplus + } catch(std::exception& e) { + const char* what = e.what(); + acutest_check_(0, NULL, 0, "Threw std::exception"); + if(what != NULL) + acutest_message_("std::exception::what(): %s", what); + + if(acutest_verbose_level_ >= 3) { + acutest_line_indent_(1); + acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: "); + printf("C++ exception.\n\n"); + } + } catch(...) { + acutest_check_(0, NULL, 0, "Threw an exception"); + + if(acutest_verbose_level_ >= 3) { + acutest_line_indent_(1); + acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED: "); + printf("C++ exception.\n\n"); + } + } +#endif + + acutest_fini_(test->name); + acutest_case_(NULL); + acutest_current_test_ = NULL; + + return status; +} + +/* Trigger the unit test. If possible (and not suppressed) it starts a child + * process who calls acutest_do_run_(), otherwise it calls acutest_do_run_() + * directly. */ +static void +acutest_run_(const struct acutest_test_* test, int index, int master_index) +{ + int failed = 1; + acutest_timer_type_ start, end; + + acutest_current_test_ = test; + acutest_test_already_logged_ = 0; + acutest_timer_get_time_(&start); + + if(!acutest_no_exec_) { + +#if defined(ACUTEST_UNIX_) + + pid_t pid; + int exit_code; + + /* Make sure the child starts with empty I/O buffers. */ + fflush(stdout); + fflush(stderr); + + pid = fork(); + if(pid == (pid_t)-1) { + acutest_error_("Cannot fork. %s [%d]", strerror(errno), errno); + failed = 1; + } else if(pid == 0) { + /* Child: Do the test. */ + acutest_worker_ = 1; + failed = (acutest_do_run_(test, index) != 0); + acutest_exit_(failed ? 1 : 0); + } else { + /* Parent: Wait until child terminates and analyze its exit code. */ + waitpid(pid, &exit_code, 0); + if(WIFEXITED(exit_code)) { + switch(WEXITSTATUS(exit_code)) { + case 0: failed = 0; break; /* test has passed. */ + case 1: /* noop */ break; /* "normal" failure. */ + default: acutest_error_("Unexpected exit code [%d]", WEXITSTATUS(exit_code)); + } + } else if(WIFSIGNALED(exit_code)) { + char tmp[32]; + const char* signame; + switch(WTERMSIG(exit_code)) { + case SIGINT: signame = "SIGINT"; break; + case SIGHUP: signame = "SIGHUP"; break; + case SIGQUIT: signame = "SIGQUIT"; break; + case SIGABRT: signame = "SIGABRT"; break; + case SIGKILL: signame = "SIGKILL"; break; + case SIGSEGV: signame = "SIGSEGV"; break; + case SIGILL: signame = "SIGILL"; break; + case SIGTERM: signame = "SIGTERM"; break; + default: sprintf(tmp, "signal %d", WTERMSIG(exit_code)); signame = tmp; break; + } + acutest_error_("Test interrupted by %s.", signame); + } else { + acutest_error_("Test ended in an unexpected way [%d].", exit_code); + } + } + +#elif defined(ACUTEST_WIN_) + + char buffer[512] = {0}; + STARTUPINFOA startupInfo; + PROCESS_INFORMATION processInfo; + DWORD exitCode; + + /* Windows has no fork(). So we propagate all info into the child + * through a command line arguments. */ + _snprintf(buffer, sizeof(buffer)-1, + "%s --worker=%d %s --no-exec --no-summary %s --verbose=%d --color=%s -- \"%s\"", + acutest_argv0_, index, acutest_timer_ ? "--time" : "", + acutest_tap_ ? "--tap" : "", acutest_verbose_level_, + acutest_colorize_ ? "always" : "never", + test->name); + memset(&startupInfo, 0, sizeof(startupInfo)); + startupInfo.cb = sizeof(STARTUPINFO); + if(CreateProcessA(NULL, buffer, NULL, NULL, FALSE, 0, NULL, NULL, &startupInfo, &processInfo)) { + WaitForSingleObject(processInfo.hProcess, INFINITE); + GetExitCodeProcess(processInfo.hProcess, &exitCode); + CloseHandle(processInfo.hThread); + CloseHandle(processInfo.hProcess); + failed = (exitCode != 0); + if(exitCode > 1) { + switch(exitCode) { + case 3: acutest_error_("Aborted."); break; + case 0xC0000005: acutest_error_("Access violation."); break; + default: acutest_error_("Test ended in an unexpected way [%lu].", exitCode); break; + } + } + } else { + acutest_error_("Cannot create unit test subprocess [%ld].", GetLastError()); + failed = 1; + } + +#else + + /* A platform where we don't know how to run child process. */ + failed = (acutest_do_run_(test, index) != 0); + +#endif + + } else { + /* Child processes suppressed through --no-exec. */ + failed = (acutest_do_run_(test, index) != 0); + } + acutest_timer_get_time_(&end); + + acutest_current_test_ = NULL; + + acutest_stat_run_units_++; + if(failed) + acutest_stat_failed_units_++; + + acutest_set_success_(master_index, !failed); + acutest_set_duration_(master_index, acutest_timer_diff_(start, end)); +} + +#if defined(ACUTEST_WIN_) +/* Callback for SEH events. */ +static LONG CALLBACK +acutest_seh_exception_filter_(EXCEPTION_POINTERS *ptrs) +{ + acutest_check_(0, NULL, 0, "Unhandled SEH exception"); + acutest_message_("Exception code: 0x%08lx", ptrs->ExceptionRecord->ExceptionCode); + acutest_message_("Exception address: 0x%p", ptrs->ExceptionRecord->ExceptionAddress); + + fflush(stdout); + fflush(stderr); + + return EXCEPTION_EXECUTE_HANDLER; +} +#endif + + +#define ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ 0x0001 +#define ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_ 0x0002 + +#define ACUTEST_CMDLINE_OPTID_NONE_ 0 +#define ACUTEST_CMDLINE_OPTID_UNKNOWN_ (-0x7fffffff + 0) +#define ACUTEST_CMDLINE_OPTID_MISSINGARG_ (-0x7fffffff + 1) +#define ACUTEST_CMDLINE_OPTID_BOGUSARG_ (-0x7fffffff + 2) + +typedef struct acutest_test_CMDLINE_OPTION_ { + char shortname; + const char* longname; + int id; + unsigned flags; +} ACUTEST_CMDLINE_OPTION_; + +static int +acutest_cmdline_handle_short_opt_group_(const ACUTEST_CMDLINE_OPTION_* options, + const char* arggroup, + int (*callback)(int /*optval*/, const char* /*arg*/)) +{ + const ACUTEST_CMDLINE_OPTION_* opt; + int i; + int ret = 0; + + for(i = 0; arggroup[i] != '\0'; i++) { + for(opt = options; opt->id != 0; opt++) { + if(arggroup[i] == opt->shortname) + break; + } + + if(opt->id != 0 && !(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) { + ret = callback(opt->id, NULL); + } else { + /* Unknown option. */ + char badoptname[3]; + badoptname[0] = '-'; + badoptname[1] = arggroup[i]; + badoptname[2] = '\0'; + ret = callback((opt->id != 0 ? ACUTEST_CMDLINE_OPTID_MISSINGARG_ : ACUTEST_CMDLINE_OPTID_UNKNOWN_), + badoptname); + } + + if(ret != 0) + break; + } + + return ret; +} + +#define ACUTEST_CMDLINE_AUXBUF_SIZE_ 32 + +static int +acutest_cmdline_read_(const ACUTEST_CMDLINE_OPTION_* options, int argc, char** argv, + int (*callback)(int /*optval*/, const char* /*arg*/)) +{ + + const ACUTEST_CMDLINE_OPTION_* opt; + char auxbuf[ACUTEST_CMDLINE_AUXBUF_SIZE_+1]; + int after_doubledash = 0; + int i = 1; + int ret = 0; + + auxbuf[ACUTEST_CMDLINE_AUXBUF_SIZE_] = '\0'; + + while(i < argc) { + if(after_doubledash || strcmp(argv[i], "-") == 0) { + /* Non-option argument. */ + ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]); + } else if(strcmp(argv[i], "--") == 0) { + /* End of options. All the remaining members are non-option arguments. */ + after_doubledash = 1; + } else if(argv[i][0] != '-') { + /* Non-option argument. */ + ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]); + } else { + for(opt = options; opt->id != 0; opt++) { + if(opt->longname != NULL && strncmp(argv[i], "--", 2) == 0) { + size_t len = strlen(opt->longname); + if(strncmp(argv[i]+2, opt->longname, len) == 0) { + /* Regular long option. */ + if(argv[i][2+len] == '\0') { + /* with no argument provided. */ + if(!(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) + ret = callback(opt->id, NULL); + else + ret = callback(ACUTEST_CMDLINE_OPTID_MISSINGARG_, argv[i]); + break; + } else if(argv[i][2+len] == '=') { + /* with an argument provided. */ + if(opt->flags & (ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ | ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_)) { + ret = callback(opt->id, argv[i]+2+len+1); + } else { + sprintf(auxbuf, "--%s", opt->longname); + ret = callback(ACUTEST_CMDLINE_OPTID_BOGUSARG_, auxbuf); + } + break; + } else { + continue; + } + } + } else if(opt->shortname != '\0' && argv[i][0] == '-') { + if(argv[i][1] == opt->shortname) { + /* Regular short option. */ + if(opt->flags & ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_) { + if(argv[i][2] != '\0') + ret = callback(opt->id, argv[i]+2); + else if(i+1 < argc) + ret = callback(opt->id, argv[++i]); + else + ret = callback(ACUTEST_CMDLINE_OPTID_MISSINGARG_, argv[i]); + break; + } else { + ret = callback(opt->id, NULL); + + /* There might be more (argument-less) short options + * grouped together. */ + if(ret == 0 && argv[i][2] != '\0') + ret = acutest_cmdline_handle_short_opt_group_(options, argv[i]+2, callback); + break; + } + } + } + } + + if(opt->id == 0) { /* still not handled? */ + if(argv[i][0] != '-') { + /* Non-option argument. */ + ret = callback(ACUTEST_CMDLINE_OPTID_NONE_, argv[i]); + } else { + /* Unknown option. */ + char* badoptname = argv[i]; + + if(strncmp(badoptname, "--", 2) == 0) { + /* Strip any argument from the long option. */ + char* assignment = strchr(badoptname, '='); + if(assignment != NULL) { + size_t len = assignment - badoptname; + if(len > ACUTEST_CMDLINE_AUXBUF_SIZE_) + len = ACUTEST_CMDLINE_AUXBUF_SIZE_; + strncpy(auxbuf, badoptname, len); + auxbuf[len] = '\0'; + badoptname = auxbuf; + } + } + + ret = callback(ACUTEST_CMDLINE_OPTID_UNKNOWN_, badoptname); + } + } + } + + if(ret != 0) + return ret; + i++; + } + + return ret; +} + +static void +acutest_help_(void) +{ + printf("Usage: %s [options] [test...]\n", acutest_argv0_); + printf("\n"); + printf("Run the specified unit tests; or if the option '--skip' is used, run all\n"); + printf("tests in the suite but those listed. By default, if no tests are specified\n"); + printf("on the command line, all unit tests in the suite are run.\n"); + printf("\n"); + printf("Options:\n"); + printf(" -s, --skip Execute all unit tests but the listed ones\n"); + printf(" --exec[=WHEN] If supported, execute unit tests as child processes\n"); + printf(" (WHEN is one of 'auto', 'always', 'never')\n"); + printf(" -E, --no-exec Same as --exec=never\n"); +#if defined ACUTEST_WIN_ + printf(" -t, --time Measure test duration\n"); +#elif defined ACUTEST_HAS_POSIX_TIMER_ + printf(" -t, --time Measure test duration (real time)\n"); + printf(" --time=TIMER Measure test duration, using given timer\n"); + printf(" (TIMER is one of 'real', 'cpu')\n"); +#endif + printf(" --no-summary Suppress printing of test results summary\n"); + printf(" --tap Produce TAP-compliant output\n"); + printf(" (See https://testanything.org/)\n"); + printf(" -x, --xml-output=FILE Enable XUnit output to the given file\n"); + printf(" -l, --list List unit tests in the suite and exit\n"); + printf(" -v, --verbose Make output more verbose\n"); + printf(" --verbose=LEVEL Set verbose level to LEVEL:\n"); + printf(" 0 ... Be silent\n"); + printf(" 1 ... Output one line per test (and summary)\n"); + printf(" 2 ... As 1 and failed conditions (this is default)\n"); + printf(" 3 ... As 1 and all conditions (and extended summary)\n"); + printf(" -q, --quiet Same as --verbose=0\n"); + printf(" --color[=WHEN] Enable colorized output\n"); + printf(" (WHEN is one of 'auto', 'always', 'never')\n"); + printf(" --no-color Same as --color=never\n"); + printf(" -h, --help Display this help and exit\n"); + + if(acutest_list_size_ < 16) { + printf("\n"); + acutest_list_names_(); + } +} + +static const ACUTEST_CMDLINE_OPTION_ acutest_cmdline_options_[] = { + { 's', "skip", 's', 0 }, + { 0, "exec", 'e', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ }, + { 'E', "no-exec", 'E', 0 }, +#if defined ACUTEST_WIN_ + { 't', "time", 't', 0 }, + { 0, "timer", 't', 0 }, /* kept for compatibility */ +#elif defined ACUTEST_HAS_POSIX_TIMER_ + { 't', "time", 't', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ }, + { 0, "timer", 't', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ }, /* kept for compatibility */ +#endif + { 0, "no-summary", 'S', 0 }, + { 0, "tap", 'T', 0 }, + { 'l', "list", 'l', 0 }, + { 'v', "verbose", 'v', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ }, + { 'q', "quiet", 'q', 0 }, + { 0, "color", 'c', ACUTEST_CMDLINE_OPTFLAG_OPTIONALARG_ }, + { 0, "no-color", 'C', 0 }, + { 'h', "help", 'h', 0 }, + { 0, "worker", 'w', ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_ }, /* internal */ + { 'x', "xml-output", 'x', ACUTEST_CMDLINE_OPTFLAG_REQUIREDARG_ }, + { 0, NULL, 0, 0 } +}; + +static int +acutest_cmdline_callback_(int id, const char* arg) +{ + switch(id) { + case 's': + acutest_skip_mode_ = 1; + break; + + case 'e': + if(arg == NULL || strcmp(arg, "always") == 0) { + acutest_no_exec_ = 0; + } else if(strcmp(arg, "never") == 0) { + acutest_no_exec_ = 1; + } else if(strcmp(arg, "auto") == 0) { + /*noop*/ + } else { + fprintf(stderr, "%s: Unrecognized argument '%s' for option --exec.\n", acutest_argv0_, arg); + fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); + acutest_exit_(2); + } + break; + + case 'E': + acutest_no_exec_ = 1; + break; + + case 't': +#if defined ACUTEST_WIN_ || defined ACUTEST_HAS_POSIX_TIMER_ + if(arg == NULL || strcmp(arg, "real") == 0) { + acutest_timer_ = 1; + #ifndef ACUTEST_WIN_ + } else if(strcmp(arg, "cpu") == 0) { + acutest_timer_ = 2; + #endif + } else { + fprintf(stderr, "%s: Unrecognized argument '%s' for option --time.\n", acutest_argv0_, arg); + fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); + acutest_exit_(2); + } +#endif + break; + + case 'S': + acutest_no_summary_ = 1; + break; + + case 'T': + acutest_tap_ = 1; + break; + + case 'l': + acutest_list_names_(); + acutest_exit_(0); + break; + + case 'v': + acutest_verbose_level_ = (arg != NULL ? atoi(arg) : acutest_verbose_level_+1); + break; + + case 'q': + acutest_verbose_level_ = 0; + break; + + case 'c': + if(arg == NULL || strcmp(arg, "always") == 0) { + acutest_colorize_ = 1; + } else if(strcmp(arg, "never") == 0) { + acutest_colorize_ = 0; + } else if(strcmp(arg, "auto") == 0) { + /*noop*/ + } else { + fprintf(stderr, "%s: Unrecognized argument '%s' for option --color.\n", acutest_argv0_, arg); + fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); + acutest_exit_(2); + } + break; + + case 'C': + acutest_colorize_ = 0; + break; + + case 'h': + acutest_help_(); + acutest_exit_(0); + break; + + case 'w': + acutest_worker_ = 1; + acutest_worker_index_ = atoi(arg); + break; + case 'x': + acutest_xml_output_ = fopen(arg, "w"); + if (!acutest_xml_output_) { + fprintf(stderr, "Unable to open '%s': %s\n", arg, strerror(errno)); + acutest_exit_(2); + } + break; + + case 0: + if(acutest_lookup_(arg) == 0) { + fprintf(stderr, "%s: Unrecognized unit test '%s'\n", acutest_argv0_, arg); + fprintf(stderr, "Try '%s --list' for list of unit tests.\n", acutest_argv0_); + acutest_exit_(2); + } + break; + + case ACUTEST_CMDLINE_OPTID_UNKNOWN_: + fprintf(stderr, "Unrecognized command line option '%s'.\n", arg); + fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); + acutest_exit_(2); + break; + + case ACUTEST_CMDLINE_OPTID_MISSINGARG_: + fprintf(stderr, "The command line option '%s' requires an argument.\n", arg); + fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); + acutest_exit_(2); + break; + + case ACUTEST_CMDLINE_OPTID_BOGUSARG_: + fprintf(stderr, "The command line option '%s' does not expect an argument.\n", arg); + fprintf(stderr, "Try '%s --help' for more information.\n", acutest_argv0_); + acutest_exit_(2); + break; + } + + return 0; +} + + +#ifdef ACUTEST_LINUX_ +static int +acutest_is_tracer_present_(void) +{ + /* Must be large enough so the line 'TracerPid: ${PID}' can fit in. */ + static const int OVERLAP = 32; + + char buf[256+OVERLAP+1]; + int tracer_present = 0; + int fd; + size_t n_read = 0; + + fd = open("/proc/self/status", O_RDONLY); + if(fd == -1) + return 0; + + while(1) { + static const char pattern[] = "TracerPid:"; + const char* field; + + while(n_read < sizeof(buf) - 1) { + ssize_t n; + + n = read(fd, buf + n_read, sizeof(buf) - 1 - n_read); + if(n <= 0) + break; + n_read += n; + } + buf[n_read] = '\0'; + + field = strstr(buf, pattern); + if(field != NULL && field < buf + sizeof(buf) - OVERLAP) { + pid_t tracer_pid = (pid_t) atoi(field + sizeof(pattern) - 1); + tracer_present = (tracer_pid != 0); + break; + } + + if(n_read == sizeof(buf)-1) { + memmove(buf, buf + sizeof(buf)-1 - OVERLAP, OVERLAP); + n_read = OVERLAP; + } else { + break; + } + } + + close(fd); + return tracer_present; +} +#endif + +#ifdef ACUTEST_MACOS_ +static bool +acutest_AmIBeingDebugged(void) +{ + int junk; + int mib[4]; + struct kinfo_proc info; + size_t size; + + // Initialize the flags so that, if sysctl fails for some bizarre + // reason, we get a predictable result. + info.kp_proc.p_flag = 0; + + // Initialize mib, which tells sysctl the info we want, in this case + // we're looking for information about a specific process ID. + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PID; + mib[3] = getpid(); + + // Call sysctl. + size = sizeof(info); + junk = sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, NULL, 0); + assert(junk == 0); + + // We're being debugged if the P_TRACED flag is set. + return ( (info.kp_proc.p_flag & P_TRACED) != 0 ); +} +#endif + +int +main(int argc, char** argv) +{ + int i; + + acutest_argv0_ = argv[0]; + +#if defined ACUTEST_UNIX_ + acutest_colorize_ = isatty(STDOUT_FILENO); +#elif defined ACUTEST_WIN_ + #if defined _BORLANDC_ + acutest_colorize_ = isatty(_fileno(stdout)); + #else + acutest_colorize_ = _isatty(_fileno(stdout)); + #endif +#else + acutest_colorize_ = 0; +#endif + + /* Count all test units */ + acutest_list_size_ = 0; + for(i = 0; acutest_list_[i].func != NULL; i++) + acutest_list_size_++; + + acutest_test_data_ = (struct acutest_test_data_*)calloc(acutest_list_size_, sizeof(struct acutest_test_data_)); + if(acutest_test_data_ == NULL) { + fprintf(stderr, "Out of memory.\n"); + acutest_exit_(2); + } + + /* Parse options */ + acutest_cmdline_read_(acutest_cmdline_options_, argc, argv, acutest_cmdline_callback_); + + /* Initialize the proper timer. */ + acutest_timer_init_(); + +#if defined(ACUTEST_WIN_) + SetUnhandledExceptionFilter(acutest_seh_exception_filter_); +#ifdef _MSC_VER + _set_abort_behavior(0, _WRITE_ABORT_MSG); +#endif +#endif + + /* By default, we want to run all tests. */ + if(acutest_count_ == 0) { + for(i = 0; acutest_list_[i].func != NULL; i++) + acutest_remember_(i); + } + + /* Guess whether we want to run unit tests as child processes. */ + if(acutest_no_exec_ < 0) { + acutest_no_exec_ = 0; + + if(acutest_count_ <= 1) { + acutest_no_exec_ = 1; + } else { +#ifdef ACUTEST_WIN_ + if(IsDebuggerPresent()) + acutest_no_exec_ = 1; +#endif +#ifdef ACUTEST_LINUX_ + if(acutest_is_tracer_present_()) + acutest_no_exec_ = 1; +#endif +#ifdef ACUTEST_MACOS_ + if(acutest_AmIBeingDebugged()) + acutest_no_exec_ = 1; +#endif +#ifdef RUNNING_ON_VALGRIND + /* RUNNING_ON_VALGRIND is provided by optionally included */ + if(RUNNING_ON_VALGRIND) + acutest_no_exec_ = 1; +#endif + } + } + + if(acutest_tap_) { + /* TAP requires we know test result ("ok", "not ok") before we output + * anything about the test, and this gets problematic for larger verbose + * levels. */ + if(acutest_verbose_level_ > 2) + acutest_verbose_level_ = 2; + + /* TAP harness should provide some summary. */ + acutest_no_summary_ = 1; + + if(!acutest_worker_) + printf("1..%d\n", (int) acutest_count_); + } + + int index = acutest_worker_index_; + for(i = 0; acutest_list_[i].func != NULL; i++) { + int run = (acutest_test_data_[i].flags & ACUTEST_FLAG_RUN_); + if (acutest_skip_mode_) /* Run all tests except those listed. */ + run = !run; + if(run) + acutest_run_(´st_list_[i], index++, i); + } + + /* Write a summary */ + if(!acutest_no_summary_ && acutest_verbose_level_ >= 1) { + if(acutest_verbose_level_ >= 3) { + acutest_colored_printf_(ACUTEST_COLOR_DEFAULT_INTENSIVE_, "Summary:\n"); + + printf(" Count of all unit tests: %4d\n", (int) acutest_list_size_); + printf(" Count of run unit tests: %4d\n", acutest_stat_run_units_); + printf(" Count of failed unit tests: %4d\n", acutest_stat_failed_units_); + printf(" Count of skipped unit tests: %4d\n", (int) acutest_list_size_ - acutest_stat_run_units_); + } + + if(acutest_stat_failed_units_ == 0) { + acutest_colored_printf_(ACUTEST_COLOR_GREEN_INTENSIVE_, "SUCCESS:"); + printf(" All unit tests have passed.\n"); + } else { + acutest_colored_printf_(ACUTEST_COLOR_RED_INTENSIVE_, "FAILED:"); + printf(" %d of %d unit tests %s failed.\n", + acutest_stat_failed_units_, acutest_stat_run_units_, + (acutest_stat_failed_units_ == 1) ? "has" : "have"); + } + + if(acutest_verbose_level_ >= 3) + printf("\n"); + } + + if (acutest_xml_output_) { +#if defined ACUTEST_UNIX_ + char *suite_name = basename(argv[0]); +#elif defined ACUTEST_WIN_ + char suite_name[_MAX_FNAME]; + _splitpath(argv[0], NULL, NULL, suite_name, NULL); +#else + const char *suite_name = argv[0]; +#endif + fprintf(acutest_xml_output_, "\n"); + fprintf(acutest_xml_output_, "\n", + suite_name, (int)acutest_list_size_, acutest_stat_failed_units_, acutest_stat_failed_units_, + (int)acutest_list_size_ - acutest_stat_run_units_); + for(i = 0; acutest_list_[i].func != NULL; i++) { + struct acutest_test_data_ *details = ´st_test_data_[i]; + fprintf(acutest_xml_output_, " \n", acutest_list_[i].name, details->duration); + if (details->flags & ACUTEST_FLAG_FAILURE_) + fprintf(acutest_xml_output_, " \n"); + if (!(details->flags & ACUTEST_FLAG_FAILURE_) && !(details->flags & ACUTEST_FLAG_SUCCESS_)) + fprintf(acutest_xml_output_, " \n"); + fprintf(acutest_xml_output_, " \n"); + } + fprintf(acutest_xml_output_, "\n"); + fclose(acutest_xml_output_); + } + + acutest_cleanup_(); + + return (acutest_stat_failed_units_ == 0) ? 0 : 1; +} + + +#endif /* #ifndef TEST_NO_MAIN */ + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +#ifdef __cplusplus + } /* extern "C" */ +#endif + +#endif /* #ifndef ACUTEST_H */ diff --git a/tests/unit/gdt_idx.s b/tests/unit/gdt_idx.s deleted file mode 100644 index 42e24622..00000000 --- a/tests/unit/gdt_idx.s +++ /dev/null @@ -1,3 +0,0 @@ -.text -sidt (esp) -sgdt (esp+6) diff --git a/tests/unit/high_address.s b/tests/unit/high_address.s deleted file mode 100644 index de921447..00000000 --- a/tests/unit/high_address.s +++ /dev/null @@ -1,6 +0,0 @@ -dec %eax -mov (%eax), %eax -nop -nop -nop -nop diff --git a/tests/unit/pc_change.s b/tests/unit/pc_change.s deleted file mode 100644 index 415e3cf8..00000000 --- a/tests/unit/pc_change.s +++ /dev/null @@ -1,9 +0,0 @@ -.text -inc %ecx -inc %ecx -inc %ecx -inc %ecx -inc %ecx -inc %ecx -inc %edx -inc %edx diff --git a/tests/unit/tb_x86.s b/tests/unit/tb_x86.s deleted file mode 100644 index 8ef54c00..00000000 --- a/tests/unit/tb_x86.s +++ /dev/null @@ -1,90 +0,0 @@ -mov %esp,%ecx -fxch %st(5) -fnstenv -0xc(%ecx) -pop %ebp -push %ebp -pop %ecx -dec %ecx -dec %ecx -dec %ecx -dec %ecx -dec %ecx -dec %ecx -dec %ecx -dec %ecx -dec %ecx -dec %ecx -inc %ebx -inc %ebx -inc %ebx -inc %ebx -inc %ebx -inc %ebx -aaa -push %ecx -pop %edx -push $0x41 -pop %eax -push %eax -xor %al,0x30(%ecx) -inc %ecx -imul $0x51,0x41(%ecx),%eax -xor 0x42(%ecx),%al -xor 0x42(%edx),%al -xor %al,0x42(%edx) -inc %ecx -inc %edx -pop %eax -push %eax -cmp %al,0x42(%ecx) -jne .+0x4c -dec %ecx -push %ecx -push %ecx -push %ecx -push %edx -inc %edi -xor 0x34(%edi),%eax -push %ecx -push %ebp -push %ecx -push %esi -push %eax -inc %edi -inc %edi -cmp %al,0x39(%edi) -push %eax -dec %edx -push %eax -dec %ebx -push %eax -dec %esp -push %eax -dec %ebp -push %eax -dec %esi -push %eax -dec %edi -push %eax -push %eax -push %eax -xor %eax, 0x42(%edi) -inc %edi -inc %edx -push %eax -xor $0x50,%al -pop %edx -push %eax -inc %ebp -push %ecx -push %edx -inc %esi -xor 0x31(%edi),%al -push %eax -dec %ebp -push %ecx -push %ecx -push %eax -dec %esi -inc %ecx -inc %ecx diff --git a/tests/unit/test_arm.c b/tests/unit/test_arm.c new file mode 100644 index 00000000..6a609243 --- /dev/null +++ b/tests/unit/test_arm.c @@ -0,0 +1,249 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +static void uc_common_setup(uc_engine** uc, uc_arch arch, uc_mode mode, const char* code, uint64_t size) { + OK(uc_open(arch, mode, uc)); + OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); + OK(uc_mem_write(*uc, code_start, code, size)); +} + +static void test_arm_nop() { + uc_engine* uc; + char code[] = "\x00\xf0\x20\xe3"; // nop + int r_r0 = 0x1234; + int r_r2 = 0x6789; + + uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_ARM_REG_R0, &r_r0)); + OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); + OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); + TEST_CHECK(r_r0 == 0x1234); + TEST_CHECK(r_r2 == 0x6789); + + OK(uc_close(uc)); +} + +static void test_arm_thumb_sub() { + uc_engine* uc; + char code[] = "\x83\xb0"; // sub sp, #0xc + int r_sp = 0x1234; + + uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); + + OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_ARM_REG_SP, &r_sp)); + TEST_CHECK(r_sp == 0x1228); + + OK(uc_close(uc)); +} + +static void test_armeb_sub() { + uc_engine* uc; + char code[] = "\xe3\xa0\x00\x37\xe0\x42\x10\x03"; // mov r0, #0x37; sub r1, r2, r3 + int r_r0 = 0x1234; + int r_r2 = 0x6789; + int r_r3 = 0x3333; + int r_r1; + + uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_ARM_REG_R0, &r_r0)); + OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); + OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); + OK(uc_reg_read(uc, UC_ARM_REG_R1, &r_r1)); + OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); + OK(uc_reg_read(uc, UC_ARM_REG_R3, &r_r3)); + + TEST_CHECK(r_r0 == 0x37); + TEST_CHECK(r_r2 == 0x6789); + TEST_CHECK(r_r3 == 0x3333); + TEST_CHECK(r_r1 == 0x3456); + + OK(uc_close(uc)); +} + +static void test_arm_thumbeb_sub() { + uc_engine* uc; + char code[] = "\xb0\x83"; // sub sp, #0xc + int r_sp = 0x1234; + + uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); + + OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_ARM_REG_SP, &r_sp)); + TEST_CHECK(r_sp == 0x1228); + + OK(uc_close(uc)); +} + +static void test_arm_thumb_ite_count_callback(uc_engine* uc, uint64_t address, uint32_t size, void* user_data) { + uint64_t* count = (uint64_t*)user_data; + + (*count) += 1; +} + +static void test_arm_thumb_ite() { + uc_engine* uc; + uc_hook hook; + char code[] = "\x9a\x42\x15\xbf\x00\x9a\x01\x9a\x78\x23\x15\x23"; // cmp r2, r3; itete ne; ldrne r2, [sp]; ldreq r2, [sp,#4]; movne r3, #0x78; moveq r3, #0x15 + int r_sp = 0x8000; + int r_r2 = 0; + int r_r3 = 1; + int r_pc = 0; + uint64_t count = 0; + + uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); + OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); + OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); + + OK(uc_mem_map(uc, r_sp, 0x1000, UC_PROT_ALL)); + r_r2 = 0x68; + OK(uc_mem_write(uc, r_sp, &r_r2, 4)); + r_r2 = 0x4d; + OK(uc_mem_write(uc, r_sp + 4, &r_r2, 4)); + + OK(uc_hook_add(uc, &hook, UC_HOOK_CODE, test_arm_thumb_ite_count_callback, &count, 1, 0)); + + // Execute four instructions at a time. + OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); + OK(uc_reg_read(uc, UC_ARM_REG_R3, &r_r3)); + TEST_CHECK(r_r2 == 0x68); + TEST_CHECK(count == 4); + + r_pc = code_start; + r_r2 = 0; + count = 0; + OK(uc_reg_write(uc, UC_ARM_REG_R2, &r_r2)); + OK(uc_reg_write(uc, UC_ARM_REG_R3, &r_r3)); + for (int i = 0; i < 6 && r_pc < code_start + sizeof(code) - 1; i++) { + // Execute one instruction at a time. + OK(uc_emu_start(uc, r_pc | 1, code_start + sizeof(code) - 1, 0, 1)); + + OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); + } + OK(uc_reg_read(uc, UC_ARM_REG_R2, &r_r2)); + + TEST_CHECK(r_r2 == 0x68); + TEST_CHECK(r_r3 == 0x78); + TEST_CHECK(count == 4); + + OK(uc_close(uc)); +} + +static void test_arm_m_thumb_mrs() { + uc_engine* uc; + char code[] = "\xef\xf3\x14\x80\xef\xf3\x00\x81"; // mrs r0, control; mrs r1, apsr + uint32_t r_control = 0b10; + uint32_t r_apsr = (0b10101 << 27); + uint32_t r_r0, r_r1; + + uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_MCLASS, code, sizeof(code) - 1); + + OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); + OK(uc_reg_write(uc, UC_ARM_REG_APSR_NZCVQ, &r_apsr)); + OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_ARM_REG_R0, &r_r0)); + OK(uc_reg_read(uc, UC_ARM_REG_R1, &r_r1)); + + TEST_CHECK(r_r0 == 0b10); + TEST_CHECK(r_r1 == (0b10101 << 27)); + + OK(uc_close(uc)); +} + +static void test_arm_m_control() { + uc_engine* uc; + int r_control, r_msp, r_psp; + + OK(uc_open(UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_MCLASS, &uc)); + + r_control = 0; // Make sure we are using MSP. + OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); + + r_msp = 0x1000; + OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_msp)); + + r_control = 0b10; // Make the switch. + OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); + + OK(uc_reg_read(uc, UC_ARM_REG_R13, &r_psp)); + TEST_CHECK(r_psp != r_msp); + + r_psp = 0x2000; + OK(uc_reg_write(uc, UC_ARM_REG_R13, &r_psp)); + + r_control = 0; // Switch again + OK(uc_reg_write(uc, UC_ARM_REG_CONTROL, &r_control)); + + OK(uc_reg_read(uc, UC_ARM_REG_R13, &r_msp)); + TEST_CHECK(r_psp != r_msp); + TEST_CHECK(r_msp == 0x1000); + + OK(uc_close(uc)); +} + +// +// Some notes: +// Qemu raise a special exception EXCP_EXCEPTION_EXIT to handle the EXC_RETURN. We can't +// help user handle EXC_RETURN since unicorn is designed not to handle any CPU exception. +// +static void test_arm_m_exc_return_hook_interrupt(uc_engine* uc, int intno, void* data) { + int r_pc; + + OK(uc_reg_read(uc, UC_ARM_REG_PC, &r_pc)); + TEST_CHECK(intno == 8); // EXCP_EXCEPTION_EXIT: Return from v7M exception. + TEST_CHECK((r_pc | 1) == 0xFFFFFFFD); + OK(uc_emu_stop(uc)); +} + +static void test_arm_m_exc_return() { + uc_engine* uc; + char code[] = "\x6f\xf0\x02\x00\x00\x47"; // mov r0, #0xFFFFFFFD; bx r0; + int r_ipsr; + int r_sp = 0x8000; + uc_hook hook; + + uc_common_setup(&uc, UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_MCLASS, code, sizeof(code) - 1); + OK(uc_mem_map(uc, r_sp - 0x1000, 0x1000, UC_PROT_ALL)); + OK(uc_hook_add(uc, &hook, UC_HOOK_INTR, test_arm_m_exc_return_hook_interrupt, NULL, 0, 0)); + + r_sp -= 0x1c; + OK(uc_reg_write(uc, UC_ARM_REG_SP, &r_sp)); + + r_ipsr = 16; // We are in whatever exception. + OK(uc_reg_write(uc, UC_ARM_REG_IPSR, &r_ipsr)); + + OK(uc_emu_start(uc, code_start | 1, code_start + sizeof(code) - 1, 0, 2)); // Just execute 2 instructions. + + OK(uc_hook_del(uc, hook)); + OK(uc_close(uc)); +} + +TEST_LIST = { + { "test_arm_nop", test_arm_nop }, + { "test_arm_thumb_sub", test_arm_thumb_sub }, + { "test_armeb_sub", test_armeb_sub }, + { "test_arm_thumbeb_sub", test_arm_thumbeb_sub }, + { "test_arm_thumb_ite", test_arm_thumb_ite }, + { "test_arm_m_thumb_mrs", test_arm_m_thumb_mrs }, + { "test_arm_m_control", test_arm_m_control }, + { "test_arm_m_exc_return", test_arm_m_exc_return }, + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_arm64.c b/tests/unit/test_arm64.c new file mode 100644 index 00000000..972d24c6 --- /dev/null +++ b/tests/unit/test_arm64.c @@ -0,0 +1,8 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +TEST_LIST = { + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_gdt_idt_x86.c b/tests/unit/test_gdt_idt_x86.c deleted file mode 100644 index f34d2af5..00000000 --- a/tests/unit/test_gdt_idt_x86.c +++ /dev/null @@ -1,127 +0,0 @@ -#include "unicorn_test.h" -#include -#include -#include -#include - -static void test_idt_gdt_i386(/*void **state*/) -{ - uc_engine *uc; - uc_err err; - uint8_t buf[6]; - uc_x86_mmr idt; - uc_x86_mmr gdt; - uc_x86_mmr ldt; - uc_x86_mmr tr; - - struct stat info; - char * code = read_file("gdt_idx.bin", &info); - - const uint64_t address = 0x1000000; - - int r_esp = address + 0x1000 - 0x100; // initial esp - - idt.base = 0x12345678; - idt.limit = 0xabcd; - gdt.base = 0x87654321; - gdt.limit = 0xdcba; - - ldt.base = 0xfedcba98; - ldt.limit = 0x11111111; - ldt.selector = 0x3333; - ldt.flags = 0x55555555; - - tr.base = 0x22222222; - tr.limit = 0x33333333; - tr.selector = 0x4444; - tr.flags = 0x66666666; - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - // map 1 page memory for this emulation - err = uc_mem_map(uc, address, 0x1000, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, info.st_size); - uc_assert_success(err); - - // initialize machine registers - err = uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_IDTR, &idt); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_GDTR, &gdt); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_LDTR, &ldt); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_TR, &tr); - uc_assert_success(err); - - memset(&idt, 0, sizeof(idt)); - memset(&gdt, 0, sizeof(gdt)); - memset(&ldt, 0, sizeof(ldt)); - memset(&tr, 0, sizeof(tr)); - - // emulate machine code in infinite time - err = uc_emu_start(uc, address, address+sizeof(code)-1, 0, 0); - uc_assert_success(err); - - - uc_reg_read(uc, UC_X86_REG_IDTR, &idt); - assert(idt.base == 0x12345678); - assert(idt.limit == 0xabcd); - - uc_reg_read(uc, UC_X86_REG_GDTR, &gdt); - assert(gdt.base == 0x87654321); - assert(gdt.limit == 0xdcba); - - //userspace can only set ldt selector, remainder are loaded from - //GDT/LDT, but we allow all to emulator user - uc_reg_read(uc, UC_X86_REG_LDTR, &ldt); - assert(ldt.base == 0xfedcba98); - assert(ldt.limit == 0x11111111); - assert(ldt.selector == 0x3333); - assert(ldt.flags == 0x55555555); - - //userspace can only set tr selector, remainder are loaded from - //GDT/LDT, but we allow all to emulator user - uc_reg_read(uc, UC_X86_REG_TR, &tr); - assert(tr.base == 0x22222222); - assert(tr.limit == 0x33333333); - assert(tr.selector == 0x4444); - assert(tr.flags == 0x66666666); - - // read from memory - err = uc_mem_read(uc, r_esp, buf, 6); - uc_assert_success(err); - - assert(memcmp(buf, "\xcd\xab\x78\x56\x34\x12", 6) == 0); - - // read from memory - err = uc_mem_read(uc, r_esp + 6, buf, 6); - uc_assert_success(err); - - assert(memcmp(buf, "\xba\xdc\x21\x43\x65\x87", 6) == 0); - - uc_close(uc); - free(code); -} - -/******************************************************************************/ - -int main(void) { -/* - const struct CMUnitTest tests[] = { - cmocka_unit_test(test_idt_gdt_i386) - }; - return cmocka_run_group_tests(tests, NULL, NULL); -*/ - test_idt_gdt_i386(); - - fprintf(stderr, "success\n"); - - return 0; -} diff --git a/tests/unit/test_hang.c b/tests/unit/test_hang.c deleted file mode 100644 index 98198f3c..00000000 --- a/tests/unit/test_hang.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - refer to issue #575. - to run correctly unicorn needs to be compiled for AArch64. -*/ - -#include "unicorn_test.h" -#include -#include "unicorn/unicorn.h" - -uint64_t trunc_page(uint64_t addr) -{ - return (addr & ~(4095)); -} - -/* Called before every test to set up a new instance */ -static int init(void **state) -{ - printf("[+] Initializing Unicorn...\n"); - uc_engine *uc; - - if (uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc) != UC_ERR_OK) { - printf("Error on open. Be sure that your unicorn library supports AArch64.\n"); - return -1; - } - - *state = uc; - - return 0; -} - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - printf("[+] Exiting...\n"); - uc_engine *uc = *state; - - uc_close(uc); - - *state = NULL; - return 0; -} - -void test_hang(void **state) -{ - uint32_t code[] = { - 0xd503201f, /* NOP */ - 0xd503201f, /* NOP */ - 0xd503201f, /* NOP */ - 0xaa0103e0 /* MOV X0, X1 */ - }; - - uc_engine *uc = *state; - - uint64_t x0 = 0; - uint64_t x1 = 1; - - /* - * emulation will hang if some instruction hits every quarter of a page, - * i.e. these offsets: - * 0x1400, 0x1800, 0x1c00, 0x2000 - * - * in this test, the code to be emulated is mapped just before the 0x1400 - * offset, so that the final instruction emulated (MOV X0, X1) hits the offset, - * causing the hang. - * If you try to write the code just four bytes behind, the hang doesn't occur. - * - * So far, this strange behaviour has only been observed with AArch64 Unicorn APIs. - */ - - uint64_t addr = 0x13f0; // try to map at (0x13f0 - 0x4) and the hang doesn't occur - uint64_t trunc_addr = trunc_page(addr); // round down to nearest page - - uc_mem_map(uc, trunc_addr, 2 * 1024 * 1024, UC_PROT_ALL); - - if (uc_mem_write(uc, addr, &code, sizeof(code))) { - printf("error on write\n"); - return; - } - - uc_reg_write(uc, UC_ARM64_REG_X0, &x0); - uc_reg_write(uc, UC_ARM64_REG_X1, &x1); - - if (uc_emu_start(uc, addr, addr + sizeof(code), 0, 0)) { - printf("error on start\n"); - return; - } - - uc_reg_read(uc, UC_ARM64_REG_X0, &x0); - uc_reg_read(uc, UC_ARM64_REG_X1, &x1); - - printf("x0: %"PRIx64"\n", x0); - printf("x1: %"PRIx64"\n", x1); -} - -int main(int argc, const char * argv[]) { - - const struct CMUnitTest tests[] = { - cmocka_unit_test_setup_teardown(test_hang, init, teardown), - }; - - return cmocka_run_group_tests(tests, NULL, NULL);; -} diff --git a/tests/unit/test_hookcounts.c b/tests/unit/test_hookcounts.c deleted file mode 100644 index 9e9e4a8a..00000000 --- a/tests/unit/test_hookcounts.c +++ /dev/null @@ -1,281 +0,0 @@ -// Test hook evocation count -// -// Objective is to demonstrate finer duration control of -// emulation by counts of instruction code -// -#include "unicorn_test.h" -#include "unicorn/unicorn.h" - -#define DEBUG 1 - -#define OK(x) uc_assert_success(x) - -volatile int expected_instructions = 0; -volatile int total_instructions = 0; - - -// NOTE: It would appear that this UC_HOOK_CODE is being done before the -// uc_count_fb hook. -// So, termination by uc->emu_count has not been done yet here... -static void test_code_hook(uc_engine *uc, - uint64_t address, - uint32_t size, - void *user_data) -{ - - ++total_instructions; - if (total_instructions == expected_instructions) - { - uc_emu_stop(uc); - } - -#ifdef DEBUG - printf("instruction at 0x%"PRIx64": ", address); - uint8_t tmp[256]; - if (!uc_mem_read(uc, address, tmp, size)) { - uint32_t i; - - for (i = 0; i < size; i++) { - printf("0x%x ", tmp[i]); - } - printf("\n"); - } -#endif // DEBUG -} - - -/* Called before every test to set up a new instance */ -static int setup32(void **state) -{ - uc_hook trace1; - uc_engine *uc; - - OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - - *state = uc; - - // trace all instructions - OK(uc_hook_add(uc, &trace1, UC_HOOK_CODE, test_code_hook, NULL, 1, 0)); - - return 0; -} - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - OK(uc_close(uc)); - - *state = NULL; - return 0; -} - -/******************************************************************************/ - -static void -test_hook_count(uc_engine *uc, - const uint8_t *code, - int start_offset, - int code_length, - int count) -{ - -#define BASEADDR 0x1000000 -#define MEMSIZE (2 * 1024 * 1024) - - uint64_t address = BASEADDR + (count * MEMSIZE); - total_instructions = 0; - -#undef BASEADDR - - // map a new 2MB memory for this emulation - OK(uc_mem_map(uc, address, MEMSIZE, UC_PROT_ALL)); - - // write machine code to be emulated to memory - OK(uc_mem_write(uc, address, code, code_length)); - -#ifdef DEBUG - printf("Address: %"PRIx64"\n", address); - printf("Start : %"PRIx64"\n", address + start_offset); - printf("End : %"PRIx64"\n", address + code_length - 1); - printf("Count : %d\n", count); -#endif - expected_instructions = count; - OK(uc_emu_start(uc, - address+start_offset, - address+code_length, - 0, - count)); - - assert_int_equal(expected_instructions, total_instructions); - - // map 2MB memory for this emulation - OK(uc_mem_unmap(uc, address, MEMSIZE)); -} - - -/* Perform fine-grain emulation control of exactly 1 instruction */ -/* of 1-opcode code space*/ -static void test_hook_count_1_begin(void **state) -{ - uc_engine *uc = *state; - const uint8_t code[] = { - 0x41, // inc ECX @0x1000000 - }; - int code_length = sizeof(code); - int start_offset = 0; - int ins_count = 1; - - test_hook_count(uc, code, start_offset, code_length, ins_count); -} - - -/* Perform fine-grain emulation control of exactly 1 instruction */ -static void test_hook_count_1_midpoint(void **state) -{ - uc_engine *uc = *state; - const uint8_t code[] = { - 0x41, // inc ECX @0x1000000 - 0x41, // inc ECX - 0x41, // inc ECX - 0x41, // inc ECX @0x1000003 - 0x41, // inc ECX - 0x41, // inc ECX - 0x42, // inc EDX @0x1000006 - 0x42, // inc EDX - }; - int code_length = sizeof(code); - int start_offset = code_length/2; - int ins_count = 1; - - test_hook_count(uc, code, start_offset, code_length, ins_count); -} - - -/* Perform fine-grain emulation control of exactly 1 instruction */ -static void test_hook_count_1_end(void **state) -{ - uc_engine *uc = *state; - const uint8_t code[] = { - 0x41, // inc ECX @0x1000000 - 0x41, // inc ECX - 0x41, // inc ECX - 0x41, // inc ECX @0x1000003 - 0x41, // inc ECX - 0x41, // inc ECX - 0x42, // inc EDX @0x1000006 - 0x42, // inc EDX - }; - int code_length = sizeof(code); - int start_offset = code_length - 1; - int ins_count = 1; - - test_hook_count(uc, code, start_offset, code_length, ins_count); -} - - -/* Perform fine-grain emulation control over a range of */ -/* varied instruction steps. */ -static void test_hook_count_range(void **state) -{ - uc_engine *uc = *state; - const uint8_t code[] = { - 0x41, // inc ECX @0x1000000 - 0x41, // inc ECX - 0x41, // inc ECX - 0x41, // inc ECX @0x1000003 - 0x41, // inc ECX - 0x41, // inc ECX - 0x42, // inc EDX @0x1000006 - 0x42, // inc EDX - }; - int code_length = sizeof(code); - int start_offset; - int ins_count = 2; - - for (start_offset = 2; start_offset < (code_length - ins_count); start_offset++) - { - printf("Iteration %d\n", start_offset); - test_hook_count(uc, code, start_offset, code_length, ins_count); - } -} - - -static void test_hook_count_end(void **state) -{ - uc_engine *uc = *state; - const uint8_t code[] = { - 0x41, // inc ECX @0x1000000 - 0x41, // inc ECX - 0x41, // inc ECX - 0x41, // inc ECX @0x1000003 - 0x41, // inc ECX - 0x41, // inc ECX - 0x42, // inc EDX @0x1000006 - 0x42, // inc EDX - }; - int code_length = sizeof(code); - int ins_count = 3; - int start_offset = sizeof(code) - ins_count; - - test_hook_count(uc, code, start_offset, code_length, ins_count); -} - - -static void test_hook_count_begins(void **state) -{ - uc_engine *uc = *state; - const uint8_t code[] = { - 0x41, // inc ECX @0x1000000 - 0x41, // inc ECX - 0x41, // inc ECX - 0x41, // inc ECX @0x1000003 - 0x41, // inc ECX - 0x41, // inc ECX - 0x42, // inc EDX @0x1000006 - 0x42, // inc EDX - }; - int code_length = sizeof(code); - int ins_count = 3; - int start_offset = 0; - - test_hook_count(uc, code, start_offset, code_length, ins_count); -} - - -static void test_hook_count_midpoint(void **state) -{ - uc_engine *uc = *state; - const uint8_t code[] = { - 0x41, // inc ECX @0x1000000 - 0x41, // inc ECX - 0x41, // inc ECX - 0x41, // inc ECX @0x1000003 - 0x41, // inc ECX - 0x41, // inc ECX - 0x42, // inc EDX @0x1000006 - 0x42, // inc EDX - }; - int code_length = sizeof(code); - int ins_count = 3; - int start_offset = 2; - - test_hook_count(uc, code, start_offset, code_length, ins_count); -} - - -int main(void) -{ - const struct CMUnitTest tests[] = { - cmocka_unit_test_setup_teardown(test_hook_count_1_begin, setup32, teardown), - cmocka_unit_test_setup_teardown(test_hook_count_1_midpoint, setup32, teardown), - cmocka_unit_test_setup_teardown(test_hook_count_1_end, setup32, teardown), - cmocka_unit_test_setup_teardown(test_hook_count_begins, setup32, teardown), - cmocka_unit_test_setup_teardown(test_hook_count_range, setup32, teardown), - cmocka_unit_test_setup_teardown(test_hook_count_midpoint, setup32, teardown), - cmocka_unit_test_setup_teardown(test_hook_count_end, setup32, teardown), - }; - return cmocka_run_group_tests(tests, NULL, NULL); -} - diff --git a/tests/unit/test_m68k.c b/tests/unit/test_m68k.c new file mode 100644 index 00000000..972d24c6 --- /dev/null +++ b/tests/unit/test_m68k.c @@ -0,0 +1,8 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +TEST_LIST = { + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_mem_high.c b/tests/unit/test_mem_high.c deleted file mode 100644 index ab277bf4..00000000 --- a/tests/unit/test_mem_high.c +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Unicorn memory API tests - * - * This tests memory read/write and map/unmap functionality. - * One is necessary for doing the other. - */ -#include "unicorn_test.h" -#include -#include -#include "unicorn/unicorn.h" - -/* Called before every test to set up a new instance */ -static int setup(void **state) -{ - uc_engine *uc; - - uc_assert_success(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); - - *state = uc; - return 0; -} - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - uc_assert_success(uc_close(uc)); - - *state = NULL; - return 0; -} - -/******************************************************************************/ - -// mapping the last pages will silently fail -static void test_last_page_map(void **state) -{ - uc_engine *uc = *state; - - uint8_t writebuf[0x10]; - memset(writebuf, 0xCC, sizeof(writebuf)); - - const uint64_t mem_len = 0x1000; - const uint64_t last_page = 0xfffffffffffff000; - uc_assert_success(uc_mem_map(uc, last_page, mem_len, UC_PROT_NONE)); - uc_assert_success(uc_mem_write(uc, last_page, writebuf, sizeof(writebuf))); -} - -// segfaults with NULL-deref (caused by UC_PROT_NONE) -static void test_nullptr_deref_wrong_perms(void **state){ - uc_engine *uc = *state; - const uint64_t base_addr = 0x400000; - uc_assert_success(uc_mem_map(uc, base_addr, 4096, UC_PROT_NONE)); - uc_emu_start(uc, base_addr, base_addr + 1, 0, 0); -} - -static int number_of_memory_reads = 0; - -static void hook_mem64(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) -{ - number_of_memory_reads += 1; - printf(">>> Memory is being accessed at 0x%"PRIx64 ", data size = %u\n", address, size); -} - -//if a read is performed from a big address whith a non-zero last digit, multiple read events are triggered -static void test_high_address_reads(void **state) -{ - uc_engine *uc = *state; - uc_hook trace2; - - uint64_t addr = 0x0010000000000001; - //addr = 0x0010000000000000; // uncomment to fix wrong? behaviour - //addr = 90000000; // uncomment to fix wrong? behaviour - // - uc_mem_map(uc, addr-(addr%4096), 4096*2, UC_PROT_ALL); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RAX, &addr)); - const uint64_t base_addr = 0x40000; - uint8_t code[] = {0x48,0x8b,0x00,0x90,0x90,0x90,0x90}; // mov rax, [rax], nops - uc_assert_success(uc_mem_map(uc, base_addr, 4096, UC_PROT_ALL)); - uc_assert_success(uc_mem_write(uc, base_addr, code, 7)); - uc_assert_success(uc_hook_add(uc, &trace2, UC_HOOK_MEM_READ, hook_mem64, NULL, 1, 0)); - uc_assert_success(uc_emu_start(uc, base_addr, base_addr + 3, 0, 0)); - if(number_of_memory_reads != 1) { - fail_msg("wrong number of memory reads for instruction %i", number_of_memory_reads); - } -} - -//if a read is performed from a big address whith a non-zero last digit, 0 will be read -static void test_high_address_read_values(void **state) -{ - uc_engine *uc = *state; - struct stat info; - char * code = read_file("high_address.bin", &info); - if (code == NULL) { - return; - } - - uint64_t addr = 0x0010000000000001; - //addr = 0x000ffffffffffff8; // uncomment to fix wrong behaviour - //addr = 90000000; // uncomment to fix wrong behaviour - // - uint8_t content[] = {0x42,0x42,0x42,0x42, 0x42,0x42,0x42,0x42}; - uc_assert_success(uc_mem_map(uc, addr-(addr%4096), 4096*2, UC_PROT_ALL)); - uc_assert_success(uc_mem_write(uc, addr, content, 8)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RAX, &addr)); - const uint64_t base_addr = 0x40000; - uc_assert_success(uc_mem_map(uc, base_addr, 4096, UC_PROT_ALL)); - uc_assert_success(uc_mem_write(uc, base_addr, code, info.st_size)); - uc_assert_success(uc_emu_start(uc, base_addr, base_addr + 3, 0, 0)); - uint64_t rax = 0; - uc_assert_success(uc_reg_read(uc, UC_X86_REG_RAX, &rax)); - if(rax != 0x4242424242424242) { - fail_msg("wrong memory read from code %"PRIx64, rax); - } - - free(code); -} - - -int main(void) { -#define test(x) cmocka_unit_test_setup_teardown(x, setup, teardown) - const struct CMUnitTest tests[] = { - test(test_last_page_map), - test(test_high_address_reads), - test(test_high_address_read_values), - test(test_nullptr_deref_wrong_perms), - }; -#undef test - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/test_mem_map.c b/tests/unit/test_mem_map.c deleted file mode 100644 index 86214c59..00000000 --- a/tests/unit/test_mem_map.c +++ /dev/null @@ -1,236 +0,0 @@ -/** - * Unicorn memory API tests - * - * This tests memory read/write and map/unmap functionality. - * One is necessary for doing the other. - */ -#include "unicorn_test.h" -#include -#include - -/* Called before every test to set up a new instance */ -static int setup(void **state) -{ - uc_engine *uc; - - uc_assert_success(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - - *state = uc; - return 0; -} - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - uc_assert_success(uc_close(uc)); - - *state = NULL; - return 0; -} - -/******************************************************************************/ - - -/** - * A basic test showing mapping of memory, and reading/writing it - */ -static void test_basic(void **state) -{ - uc_engine *uc = *state; - const uint64_t mem_start = 0x1000; - const uint64_t mem_len = 0x1000; - const uint64_t test_addr = mem_start + 0x100; - - /* Map a region */ - uc_assert_success(uc_mem_map(uc, mem_start, mem_len, UC_PROT_NONE)); - - /* Write some data to it */ - uc_assert_success(uc_mem_write(uc, test_addr, "test", 4)); - - uint8_t buf[4]; - memset(buf, 0xCC, sizeof(buf)); - - /* Read it back */ - uc_assert_success(uc_mem_read(uc, test_addr, buf, sizeof(buf))); - - /* And make sure it matches what we expect */ - assert_memory_equal(buf, "test", 4); - - /* Unmap the region */ - //uc_assert_success(uc_mem_unmap(uc, mem_start, mem_len)); -} - -static void test_bad_read(void **state) -{ - uc_engine *uc = *state; - - uint8_t readbuf[0x10]; - memset(readbuf, 0xCC, sizeof(readbuf)); - - uint8_t checkbuf[0x10]; - memset(checkbuf, 0xCC, sizeof(checkbuf)); - - /* Reads to unmapped addresses should fail */ - /* TODO: Which error? */ - uc_assert_fail(uc_mem_read(uc, 0x1000, readbuf, sizeof(readbuf))); - - /* And our buffer should be unchanged */ - assert_memory_equal(readbuf, checkbuf, sizeof(checkbuf)); -} - -static void test_bad_write(void **state) -{ - uc_engine *uc = *state; - - uint8_t writebuf[0x10]; - memset(writebuf, 0xCC, sizeof(writebuf)); - - /* Writes to unmapped addresses should fail */ - /* TODO: Which error? */ - uc_assert_fail(uc_mem_write(uc, 0x1000, writebuf, sizeof(writebuf))); -} - - - -/** - * Verify that we can read/write across memory map region boundaries - */ -static void test_rw_across_boundaries(void **state) -{ - uc_engine *uc = *state; - - /* Map in two adjacent regions */ - uc_assert_success(uc_mem_map(uc, 0, 0x1000, 0)); /* 0x0000 - 0x1000 */ - uc_assert_success(uc_mem_map(uc, 0x1000, 0x1000, 0)); /* 0x1000 - 0x2000 */ - - const uint64_t addr = 0x1000 - 2; /* 2 bytes before end of block */ - - /* Write some data across the boundary */ - uc_assert_success(uc_mem_write(uc, addr, "test", 4)); - - uint8_t buf[4]; - memset(buf, 0xCC, sizeof(buf)); - - /* Read the data across the boundary */ - uc_assert_success(uc_mem_read(uc, addr, buf, sizeof(buf))); - - assert_memory_equal(buf, "test", 4); -} - -/* Try to unmap memory that has not been mapped */ -static void test_bad_unmap(void **state) -{ - uc_engine *uc = *state; - - /* TODO: Which error should this return? */ - uc_assert_fail(uc_mem_unmap(uc, 0x0, 0x1000)); -} - - -/* Try to map overlapped memory range */ -static void test_unmap_double_map(void **state) -{ - uc_engine *uc = *state; - - uc_assert_success(uc_mem_map(uc, 0, 0x4000, 0)); /* 0x0000 - 0x4000 */ - uc_assert_fail(uc_mem_map(uc, 0x0000, 0x1000, 0)); /* 0x1000 - 0x1000 */ -} - -static void test_overlap_unmap_double_map(void **state) -{ - uc_engine *uc = *state; - uc_mem_map( uc, 0x1000, 0x2000, 0); - uc_mem_map( uc, 0x1000, 0x1000, 0); - uc_mem_unmap(uc, 0x2000, 0x1000); -} - -static void test_strange_map(void **state) -{ - uc_engine *uc = *state; - uc_mem_map( uc, 0x0,0x3000,0); - uc_mem_unmap(uc, 0x1000,0x1000); - uc_mem_map( uc, 0x3000,0x1000,0); - uc_mem_map( uc, 0x4000,0x1000,0); - uc_mem_map( uc, 0x1000,0x1000,0); - uc_mem_map( uc, 0x5000,0x1000,0); - uc_mem_unmap(uc, 0x0,0x1000); -} - -static void test_query_page_size(void **state) -{ - uc_engine *uc = *state; - - size_t page_size; - uc_assert_success(uc_query(uc, UC_QUERY_PAGE_SIZE, &page_size)); - assert_int_equal(4096, page_size); -} - -void mem_write(uc_engine* uc, uint64_t addr, uint64_t len){ - uint8_t* buff = alloca(len); - memset(buff,0,len); - uc_mem_write(uc, addr, buff, len); - -} - -void mem_read(uc_engine* uc, uint64_t addr, uint64_t len){ - uint8_t* buff = alloca(len); - uc_mem_read(uc, addr, buff, len); -} - -void map(uc_engine* uc, uint64_t addr, uint64_t len){ - uc_mem_map(uc, addr, len, UC_PROT_READ | UC_PROT_WRITE); -} - -void unmap(uc_engine* uc, uint64_t addr, uint64_t len){ - uc_mem_unmap(uc, addr, len); -} - -//most likely same bug as in test_strange_map, but looked different in fuzzer (sefault instead of assertion fail) -static void test_assertion_fail(void **state){ - uc_engine *uc = *state; - - map(uc,0x2000,0x4000); //5 - unmap(uc,0x3000,0x2000); //11 - map(uc,0x0,0x2000); //23 - map(uc,0x3000,0x2000); //24 - map(uc,0x9000,0x4000); //32 - map(uc,0x8000,0x1000); //34 - unmap(uc,0x1000,0x4000); //35 -} - -static void test_bad_offset(void **state){ - uc_engine *uc = *state; - map(uc,0x9000,0x4000); //17 - map(uc,0x4000,0x2000); //32 - unmap(uc,0x5000,0x1000); //35 - map(uc,0x0,0x1000); //42 - map(uc,0x5000,0x4000); //51 - map(uc,0x2000,0x1000); //53 - map(uc,0x1000,0x1000); //55 - unmap(uc,0x7000,0x3000); //58 - unmap(uc,0x5000,0x1000); //59 - unmap(uc,0x4000,0x2000); //70 -} - - - -int main(void) { -#define test(x) cmocka_unit_test_setup_teardown(x, setup, teardown) - const struct CMUnitTest tests[] = { - test(test_basic), - //test(test_bad_read), - //test(test_bad_write), - test(test_bad_offset), - test(test_assertion_fail), - test(test_bad_unmap), - test(test_rw_across_boundaries), - test(test_unmap_double_map), - test(test_overlap_unmap_double_map), - test(test_strange_map), - test(test_query_page_size), - }; -#undef test - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/test_mem_map_ptr.c b/tests/unit/test_mem_map_ptr.c deleted file mode 100644 index aedeb115..00000000 --- a/tests/unit/test_mem_map_ptr.c +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Unicorn memory API tests - * - * This tests manual pointer-backed memory. - */ -#include "unicorn_test.h" -#include -#include - -/* Called before every test to set up a new instance */ -static int setup(void **state) -{ - uc_engine *uc; - - uc_assert_success(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - - *state = uc; - return 0; -} - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - uc_assert_success(uc_close(uc)); - - *state = NULL; - return 0; -} - -/******************************************************************************/ - - -/** - * A basic test showing mapping of memory, and reading/writing it - */ -static void test_basic(void **state) -{ - uc_engine *uc = *state; - const uint64_t mem_start = 0x1000; - const uint64_t mem_len = 0x1000; - const uint64_t test_addr = mem_start; - - void *host_mem = calloc(1, mem_len); - - /* Map a region */ - uc_assert_success(uc_mem_map_ptr(uc, mem_start, mem_len, UC_PROT_ALL, host_mem)); - - /* Write some data to it */ - uc_assert_success(uc_mem_write(uc, test_addr, "test", 4)); - - uint8_t buf[4]; - memset(buf, 0xCC, sizeof(buf)); - - /* Read it back */ - uc_assert_success(uc_mem_read(uc, test_addr, buf, sizeof(buf))); - - /* And make sure it matches what we expect */ - assert_memory_equal(buf, "test", 4); - - /* Unmap the region */ - uc_assert_success(uc_mem_unmap(uc, mem_start, mem_len)); - - assert_memory_equal(buf, host_mem, 4); - - free(host_mem); -} - -int main(void) { -#define test(x) cmocka_unit_test_setup_teardown(x, setup, teardown) - const struct CMUnitTest tests[] = { - test(test_basic), - }; -#undef test - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/test_mips.c b/tests/unit/test_mips.c new file mode 100644 index 00000000..a3b93cff --- /dev/null +++ b/tests/unit/test_mips.c @@ -0,0 +1,90 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x10000000; +const uint64_t code_len = 0x4000; + +static void uc_common_setup(uc_engine** uc, uc_arch arch, uc_mode mode, const char* code, uint64_t size) { + OK(uc_open(arch, mode, uc)); + OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); + OK(uc_mem_write(*uc, code_start, code, size)); +} + +static void test_mips_el_ori() { + uc_engine* uc; + char code[] = "\x56\x34\x21\x34"; // ori $at, $at, 0x3456; + int r_r1 = 0x6789; + + uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_MIPS_REG_1, &r_r1)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_MIPS_REG_1, &r_r1)); + + TEST_CHECK(r_r1 == 0x77df); + + OK(uc_close(uc)); +} + +static void test_mips_eb_ori() { + uc_engine* uc; + char code[] = "\x34\x21\x34\x56"; // ori $at, $at, 0x3456; + int r_r1 = 0x6789; + + uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_BIG_ENDIAN, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_MIPS_REG_1, &r_r1)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_MIPS_REG_1, &r_r1)); + + TEST_CHECK(r_r1 == 0x77df); + + OK(uc_close(uc)); +} + +static void test_mips_stop_at_branch() { + uc_engine* uc; + char code[] = "\x02\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00"; // j 0x8; nop; + int r_pc = 0x0; + + uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1); + + // Execute one instruction with branch delay slot. + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 1)); + + OK(uc_reg_read(uc, UC_MIPS_REG_PC, &r_pc)); + + // Even if we just execute one instruction, the instruction in the + // delay slot would also be executed. + TEST_CHECK(r_pc == code_start + 0x8); + + OK(uc_close(uc)); +} + +static void test_mips_stop_at_delay_slot() { + uc_engine* uc; + char code[] = "\x02\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00"; // j 0x8; nop; + int r_pc = 0x0; + + uc_common_setup(&uc, UC_ARCH_MIPS, UC_MODE_32 | UC_MODE_LITTLE_ENDIAN, code, sizeof(code) - 1); + + // Stop at the delay slot by design. + OK(uc_emu_start(uc, code_start, code_start + 4, 0, 0)); + + OK(uc_reg_read(uc, UC_MIPS_REG_PC, &r_pc)); + + // The branch instruction isn't committed and the PC is not updated. + // Users is responsible to restart emulation at the branch instruction. + TEST_CHECK(r_pc == code_start); + + OK(uc_close(uc)); +} + +TEST_LIST = { + { "test_mips_stop_at_branch", test_mips_stop_at_branch }, + { "test_mips_stop_at_delay_slot", test_mips_stop_at_delay_slot}, + { "test_mips_el_ori", test_mips_el_ori}, + { "test_mips_eb_ori", test_mips_eb_ori}, + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_multihook.c b/tests/unit/test_multihook.c deleted file mode 100644 index 86f6df63..00000000 --- a/tests/unit/test_multihook.c +++ /dev/null @@ -1,111 +0,0 @@ -#include "unicorn_test.h" -#include "unicorn/unicorn.h" - -#define OK(x) uc_assert_success(x) - -/* Called before every test to set up a new instance */ -static int setup32(void **state) -{ - uc_engine *uc; - - OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - - *state = uc; - return 0; -} - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - OK(uc_close(uc)); - - *state = NULL; - return 0; -} - -/******************************************************************************/ - -struct bb { - uint64_t addr; - size_t size; -}; - -struct bbtest { - const struct bb *blocks; - unsigned int blocknum; -}; - - -static void test_basic_blocks_hook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - struct bbtest *bbtest = user_data; - const struct bb *bb = &bbtest->blocks[bbtest->blocknum]; - - printf("block hook 1: %d == %zu\n", size, bb->size); - assert_int_equal(address, bb->addr); - assert_int_equal((size_t)size, bb->size); -} - -static void test_basic_blocks_hook2(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - struct bbtest *bbtest = user_data; - const struct bb *bb = &bbtest->blocks[bbtest->blocknum++]; - - printf("block hook 2: %d == %zu\n", size, bb->size); - assert_int_equal(address, bb->addr); - assert_int_equal((size_t)size, bb->size); -} - -static void test_basic_blocks(void **state) -{ - uc_engine *uc = *state; - uc_hook trace1, trace2; - -#define BASEADDR 0x1000000 - - uint64_t address = BASEADDR; - const uint8_t code[] = { - 0x33, 0xC0, // xor eax, eax - 0x90, // nop - 0x90, // nop - 0xEB, 0x00, // jmp $+2 - 0x90, // nop - 0x90, // nop - 0x90, // nop - }; - - static const struct bb blocks[] = { - {BASEADDR, 6}, - {BASEADDR+ 6, 3}, - }; - - struct bbtest bbtest = { - .blocks = blocks, - .blocknum = 0, - }; - - -#undef BASEADDR - - // map 2MB memory for this emulation - OK(uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL)); - - // write machine code to be emulated to memory - OK(uc_mem_write(uc, address, code, sizeof(code))); - - // trace all basic blocks - OK(uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, test_basic_blocks_hook, &bbtest, 1, 0)); - OK(uc_hook_add(uc, &trace2, UC_HOOK_BLOCK, test_basic_blocks_hook2, &bbtest, 1, 0)); - - OK(uc_emu_start(uc, address, address+sizeof(code), 0, 0)); -} - -int main(void) -{ - const struct CMUnitTest tests[] = { - cmocka_unit_test_setup_teardown(test_basic_blocks, setup32, teardown), - }; - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/test_pc_change.c b/tests/unit/test_pc_change.c deleted file mode 100644 index c513e6b7..00000000 --- a/tests/unit/test_pc_change.c +++ /dev/null @@ -1,100 +0,0 @@ -// Test PC change during the callback. by Nguyen Anh Quynh, 2016 -#include "unicorn_test.h" -#include "unicorn/unicorn.h" -#include "sys/stat.h" - -#define OK(x) uc_assert_success(x) - -/* Called before every test to set up a new instance */ -static int setup32(void **state) -{ - uc_engine *uc; - - OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - - *state = uc; - return 0; -} - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - OK(uc_close(uc)); - - *state = NULL; - return 0; -} - -/******************************************************************************/ - -static void test_code_hook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - uint8_t tmp[256]; - int32_t r_eip = 0x1000006; - printf("instruction at 0x%"PRIx64": ", address); - - if (!uc_mem_read(uc, address, tmp, size)) { - uint32_t i; - - for (i = 0; i < size; i++) { - printf("0x%x ", tmp[i]); - } - printf("\n"); - } - - if (address == 0x1000003) { - // change the PC to "inc EDX" - uc_reg_write(uc, UC_X86_REG_EIP, &r_eip); - } -} - -static void test_pc_change(void **state) -{ - uc_engine *uc = *state; - uc_hook trace1; - int32_t r_ecx = 3, r_edx = 15; - struct stat info; - char *code = read_file("pc_change.bin", &info); - if (code == NULL) { - return; - } - -#define BASEADDR 0x1000000 - - uint64_t address = BASEADDR; - -#undef BASEADDR - - // map 2MB memory for this emulation - OK(uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL)); - - // write machine code to be emulated to memory - OK(uc_mem_write(uc, address, code, info.st_size)); - - uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); - uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); - printf("ECX = %u, EDX = %u\n", r_ecx, r_edx); - - // trace all instructions - OK(uc_hook_add(uc, &trace1, UC_HOOK_CODE, test_code_hook, NULL, 1, 0)); - - OK(uc_emu_start(uc, address, address+info.st_size, 0, 0)); - - uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); - uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); - - printf("ECX = %u, EDX = %u\n", r_ecx, r_edx); - assert_int_equal(r_ecx, 6); - assert_int_equal(r_edx, 17); - free(code); -} - -int main(void) -{ - const struct CMUnitTest tests[] = { - cmocka_unit_test_setup_teardown(test_pc_change, setup32, teardown), - }; - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/test_ppc.c b/tests/unit/test_ppc.c new file mode 100644 index 00000000..972d24c6 --- /dev/null +++ b/tests/unit/test_ppc.c @@ -0,0 +1,8 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +TEST_LIST = { + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_riscv.c b/tests/unit/test_riscv.c new file mode 100644 index 00000000..972d24c6 --- /dev/null +++ b/tests/unit/test_riscv.c @@ -0,0 +1,8 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +TEST_LIST = { + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_sanity.c b/tests/unit/test_sanity.c deleted file mode 100644 index 9788658b..00000000 --- a/tests/unit/test_sanity.c +++ /dev/null @@ -1,88 +0,0 @@ -#include "unicorn_test.h" - -/* Make sure the uc_assert macros work with constants */ -static void test_uc_assert_macros_constants(void **state) -{ - const uc_err nomem = UC_ERR_NOMEM; - - uc_assert_success(UC_ERR_OK); - uc_assert_err(UC_ERR_NOMEM, nomem); - uc_assert_fail(UC_ERR_VERSION); -} - -/******************************************************************************/ - -static uc_err feedback(uc_err err, int *callcount) -{ - assert_int_equal(++(*callcount), 1); - return err; -} - -/** - * Make sure the uc_assert macros work with function calls - * and only evaluate them once! - */ -static void test_uc_assert_macros_func_calls(void **state) -{ - int callcount; - - callcount = 0; - uc_assert_success(feedback(UC_ERR_OK, &callcount)); - - callcount = 0; - uc_assert_err(UC_ERR_NOMEM, feedback(UC_ERR_NOMEM, &callcount)); - - callcount = 0; - uc_assert_fail(feedback(UC_ERR_VERSION, &callcount)); -} - -/******************************************************************************/ - -static void fail_uc_assert_success(void **state) -{ - uc_assert_success(UC_ERR_NOMEM); -} - -static void fail_uc_assert_err(void **state) -{ - const uc_err ok = UC_ERR_OK; - uc_assert_err(UC_ERR_VERSION, ok); -} - -static void fail_uc_assert_fail(void **state) -{ - uc_assert_fail(UC_ERR_OK); -} - -static void test_uc_assert_macros_fail(void **state) -{ - /* A test-inside-a-test */ - - const struct CMUnitTest tests[] = { - /* these should all fail */ - cmocka_unit_test(fail_uc_assert_success), - cmocka_unit_test(fail_uc_assert_err), - cmocka_unit_test(fail_uc_assert_fail), - }; - - print_message("\n\n--------------------------------------------------------------------------------\n"); - print_message("START: Failure of the following tests is expected.\n\n"); - - assert_int_not_equal(0, cmocka_run_group_tests(tests, NULL, NULL)); - - print_message("\n\nEND: Failure of the preceding tests was expected.\n"); - print_message("--------------------------------------------------------------------------------\n\n"); -} - -/******************************************************************************/ - -int main(void) -{ - const struct CMUnitTest tests[] = { - cmocka_unit_test(test_uc_assert_macros_constants), - cmocka_unit_test(test_uc_assert_macros_func_calls), - cmocka_unit_test(test_uc_assert_macros_fail), - }; - - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/test_sparc.c b/tests/unit/test_sparc.c new file mode 100644 index 00000000..972d24c6 --- /dev/null +++ b/tests/unit/test_sparc.c @@ -0,0 +1,8 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +TEST_LIST = { + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_tb_x86.c b/tests/unit/test_tb_x86.c deleted file mode 100644 index 04d07636..00000000 --- a/tests/unit/test_tb_x86.c +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Unicorn x86_32 self-modifying unit test - * - * This test demonstrates the flushing of instruction translation cache - * after a self-modification of Intel's x8's "IMUL Gv,Ev,Ib" instruction. - */ -#include "unicorn_test.h" -#include -#include -#include -#include -#include "unicorn/unicorn.h" - -#define RIP_NEXT_TO_THE_SELFMODIFY_OPCODE (1) - - -// Demostration of a self-modifying "IMUL eax,mem,Ib" opcode -// And the QEMU's ability to flush the translation buffer properly - -#define MIN(a, b) (a < b? a: b) - -#define CODE_SPACE (2 * 1024 * 1024) -#define PHY_STACK_REGION (0x60000000) - -/* Called before every test to set up a new instance */ -static int setup(void **state) -{ - uc_engine *uc; - - uc_assert_success(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); - - *state = uc; - return 0; -} - - -/* Called after every test to clean up */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - uc_assert_success(uc_close(uc)); - - *state = NULL; - return 0; -} - - - -static void dump_stack_mem(uc_engine *uc, const struct stat info) -{ - uint8_t tmp[256]; - uint32_t size; - - size = sizeof(info.st_size); - if (size > 255) size = 255; - if (!uc_mem_read(uc, PHY_STACK_REGION, tmp, size)) - { - uint32_t i; - - printf("Stack region dump"); - for (i=0; iport = port; + result->size = size; +} + +static void test_x86_in() { + uc_engine* uc; + uc_hook hook; + char code[] = "\xe5\x10"; // IN eax, 0x10 + INSN_IN_RESULT result; + + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_hook_add(uc, &hook, UC_HOOK_INSN, test_x86_in_callback, &result, 1, 0, UC_X86_INS_IN)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + TEST_CHECK(result.port == 0x10); + TEST_CHECK(result.size == 4); + + OK(uc_hook_del(uc, hook)); OK(uc_close(uc)); - - *state = NULL; - return 0; } -/******************************************************************************/ +typedef struct _INSN_OUT_RESULT { + uint32_t port; + int size; + uint32_t value; +} INSN_OUT_RESULT; -struct bb { - uint64_t addr; - size_t size; -}; +static void test_x86_out_callback(uc_engine* uc, uint32_t port, int size, uint32_t value, void* user_data) { + INSN_OUT_RESULT* result = (INSN_OUT_RESULT*)user_data; -struct bbtest { - const struct bb *blocks; - unsigned int blocknum; -}; - - -static void test_basic_blocks_hook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - struct bbtest *bbtest = user_data; - const struct bb *bb = &bbtest->blocks[bbtest->blocknum++]; - - assert_int_equal(address, bb->addr); - assert_int_equal((size_t)size, bb->size); + result->port = port; + result->size = size; + result->value = value; } -static void test_basic_blocks(void **state) -{ - uc_engine *uc = *state; - uc_hook trace1; +static void test_x86_out() { + uc_engine* uc; + uc_hook hook; + char code[] = "\xb0\x32\xe6\x46"; // MOV al, 0x32; OUT 0x46, al; + INSN_OUT_RESULT result; -#define BASEADDR 0x1000000 + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_hook_add(uc, &hook, UC_HOOK_INSN, test_x86_out_callback, &result, 1, 0, UC_X86_INS_OUT)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + TEST_CHECK(result.port == 0x46); + TEST_CHECK(result.size == 1); + TEST_CHECK(result.value == 0x32); - uint64_t address = BASEADDR; - const uint8_t code[] = { - 0x33, 0xC0, // xor eax, eax - 0x90, // nop - 0x90, // nop - 0xEB, 0x00, // jmp $+2 - 0x90, // nop - 0x90, // nop - 0x90, // nop + OK(uc_hook_del(uc, hook)); + OK(uc_close(uc)); +} + +typedef struct _MEM_HOOK_RESULT { + uc_mem_type type; + uint64_t address; + int size; + uint64_t value; +} MEM_HOOK_RESULT; + +typedef struct _MEM_HOOK_RESULTS { + uint64_t count; + MEM_HOOK_RESULT results[16]; +} MEM_HOOK_RESULTS; + +static bool test_x86_mem_hook_all_callback(uc_engine* uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void* user_data) { + MEM_HOOK_RESULTS* r = (MEM_HOOK_RESULTS*)user_data; + uint64_t count = r->count; + + if (count >= 16) { + TEST_ASSERT(false); + } + + r->results[count].type = type; + r->results[count].address = address; + r->results[count].size = size; + r->results[count].value = value; + r->count++; + + if (type == UC_MEM_READ_UNMAPPED) { + uc_mem_map(uc, address, 0x1000, UC_PROT_ALL); + } + + return true; +} + +static void test_x86_mem_hook_all() { + uc_engine* uc; + uc_hook hook; + // mov eax, 0xdeadbeef; + // mov [0x8000], eax; + // mov eax, [0x10000]; + char code[] = "\xb8\xef\xbe\xad\xde\xa3\x00\x80\x00\x00\xa1\x00\x00\x01\x00"; + MEM_HOOK_RESULTS r = { 0 }; + MEM_HOOK_RESULT expects[3] = { + {UC_MEM_WRITE, 0x8000, 4, 0xdeadbeef}, + {UC_MEM_READ_UNMAPPED, 0x10000, 4, 0}, + {UC_MEM_READ, 0x10000, 4, 0} }; - static const struct bb blocks[] = { - {BASEADDR, 6}, - {BASEADDR+ 6, 3}, - }; + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_mem_map(uc, 0x8000, 0x1000, UC_PROT_ALL)); + OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_VALID | UC_HOOK_MEM_INVALID, test_x86_mem_hook_all_callback, &r, 1, 0)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + TEST_CHECK(r.count == 3); + for (int i = 0; i < r.count; i ++) { + TEST_CHECK(expects[i].type == r.results[i].type); + TEST_CHECK(expects[i].address == r.results[i].address); + TEST_CHECK(expects[i].size == r.results[i].size); + TEST_CHECK(expects[i].value == r.results[i].value); + } - struct bbtest bbtest = { - .blocks = blocks, - .blocknum = 0, - }; - - -#undef BASEADDR - - // map 2MB memory for this emulation - OK(uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL)); - - // write machine code to be emulated to memory - OK(uc_mem_write(uc, address, code, sizeof(code))); - - // trace all basic blocks - OK(uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, test_basic_blocks_hook, &bbtest, 1, 0)); - - OK(uc_emu_start(uc, address, address+sizeof(code), 0, 0)); + OK(uc_hook_del(uc, hook)); + OK(uc_close(uc)); } -/******************************************************************************/ - -// callback for tracing basic blocks -static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - //printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); -} - -// callback for tracing instruction -static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - //int eflags; - //printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); - - //uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); - //printf(">>> --- EFLAGS is 0x%x\n", eflags); - - // Uncomment below code to stop the emulation using uc_emu_stop() - // if (address == 0x1000009) - // uc_emu_stop(uc); -} - -static void test_i386(void **state) -{ - uc_engine *uc; - uc_err err; - uint32_t tmp; - uc_hook trace1, trace2; - - const uint8_t code[] = "\x41\x4a\x66\x0f\xef\xc1"; // INC ecx; DEC edx; PXOR xmm0, xmm1 - const uint64_t address = 0x1000000; - - int r_ecx = 0x1234; // ECX register - int r_edx = 0x7890; // EDX register - // XMM0 and XMM1 registers, low qword then high qword +static void test_x86_inc_dec_pxor() { + uc_engine* uc; + char code[] = "\x41\x4a\x66\x0f\xef\xc1"; // INC ecx; DEC edx; PXOR xmm0, xmm1 + int r_ecx = 0x1234; + int r_edx = 0x7890; uint64_t r_xmm0[2] = {0x08090a0b0c0d0e0f, 0x0001020304050607}; uint64_t r_xmm1[2] = {0x8090a0b0c0d0e0f0, 0x0010203040506070}; - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); + OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); + OK(uc_reg_write(uc, UC_X86_REG_XMM0, &r_xmm0)); + OK(uc_reg_write(uc, UC_X86_REG_XMM1, &r_xmm1)); - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)-1); - uc_assert_success(err); + OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); + OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); + OK(uc_reg_read(uc, UC_X86_REG_XMM0, &r_xmm0)); - // initialize machine registers - err = uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_XMM0, &r_xmm0); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_XMM1, &r_xmm1); - uc_assert_success(err); + TEST_CHECK(r_ecx == 0x1235); + TEST_CHECK(r_edx == 0x788f); + TEST_CHECK(r_xmm0[0] == 0x8899aabbccddeeff); + TEST_CHECK(r_xmm0[1] == 0x0011223344556677); - // tracing all basic blocks with customized callback - err = uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); - uc_assert_success(err); - - // tracing all instruction by having @begin > @end - err = uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); - uc_assert_success(err); - - // emulate machine code in infinite time - err = uc_emu_start(uc, address, address+sizeof(code)-1, 0, 0); - uc_assert_success(err); - - // now print out some registers - //printf(">>> Emulation done. Below is the CPU context\n"); - - uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); - uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); - uc_reg_read(uc, UC_X86_REG_XMM0, &r_xmm0); - - assert_int_equal(r_ecx, 0x1235); - assert_int_equal(r_edx, 0x788F); - uint64_t r_xmm0_expected[2] = {0x8899aabbccddeeff, 0x0011223344556677}; - assert_memory_equal(r_xmm0, r_xmm0_expected, sizeof(r_xmm0)); - - // read from memory - err = uc_mem_read(uc, address, (uint8_t *)&tmp, 4); - uc_assert_success(err); - //printf(">>> Read 4 bytes from [0x%"PRIX64"] = 0x%x\n", address, tmp); - - uc_close(uc); + OK(uc_close(uc)); } -static void test_i386_jump(void **state) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2; +static void test_x86_relative_jump() { + uc_engine* uc; + char code[] = "\xeb\x02\x90\x90\x90\x90\x90\x90"; // jmp 4; nop; nop; nop; nop; nop; nop + int r_eip; - const uint8_t code[] = "\xeb\x02\x90\x90\x90\x90\x90\x90"; // jmp 4; nop; nop; nop; nop; nop; nop - const uint64_t address = 0x1000000; + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); + OK(uc_emu_start(uc, code_start, code_start + 4, 0, 0)); - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); + OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip)); - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)-1); - uc_assert_success(err); + TEST_CHECK(r_eip == code_start + 4); - // tracing 1 basic block with customized callback - err = uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, address, address); - uc_assert_success(err); - - // tracing 1 instruction at address - err = uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, address, address); - uc_assert_success(err); - - // emulate machine code in infinite time - err = uc_emu_start(uc, address, address+sizeof(code)-1, 0, 0); - uc_assert_success(err); - - err = uc_close(uc); - uc_assert_success(err); + OK(uc_close(uc)); } -/******************************************************************************/ +static void test_x86_loop() { + uc_engine* uc; + char code[] = "\x41\x4a\xeb\xfe"; // inc ecx; dec edx; jmp $; + int r_ecx = 0x1234; + int r_edx = 0x7890; -// callback for IN instruction (X86). -// this returns the data read from the port -static uint32_t hook_in(uc_engine *uc, uint32_t port, int size, void *user_data) -{ - uint32_t eip; + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); + OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); - uc_reg_read(uc, UC_X86_REG_EIP, &eip); + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 1 * 1000000, 0)); - //printf("--- reading from port 0x%x, size: %u, address: 0x%x\n", port, size, eip); + OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); + OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); - switch(size) { - default: - return 0; // should never reach this - case 1: - // read 1 byte to AL - return 0xf1; - case 2: - // read 2 byte to AX - return 0xf2; - case 4: - // read 4 byte to EAX - return 0xf4; - } + TEST_CHECK(r_ecx == 0x1235); + TEST_CHECK(r_edx == 0x788f); + + OK(uc_close(uc)); } -// callback for OUT instruction (X86). -static void hook_out(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data) -{ - uint32_t tmp; - uint32_t eip; +static void test_x86_invalid_mem_read() { + uc_engine* uc; + char code[] = "\x8b\x0d\xaa\xaa\xaa\xaa"; // mov ecx, [0xAAAAAAAA] - uc_reg_read(uc, UC_X86_REG_EIP, &eip); + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); - //printf("--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x\n", port, size, value, eip); + uc_assert_err(UC_ERR_READ_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); - // TODO: confirm that value is indeed the value of AL/AX/EAX - switch(size) { - default: - return; // should never reach this - case 1: - uc_reg_read(uc, UC_X86_REG_AL, &tmp); - break; - case 2: - uc_reg_read(uc, UC_X86_REG_AX, &tmp); - break; - case 4: - uc_reg_read(uc, UC_X86_REG_EAX, &tmp); - break; - } - - //printf("--- register value = 0x%x\n", tmp); + OK(uc_close(uc)); } -static void test_i386_inout(void **state) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2, trace3, trace4; +static void test_x86_invalid_mem_write() { + uc_engine* uc; + char code[] = "\x89\x0d\xaa\xaa\xaa\xaa"; // mov ecx, [0xAAAAAAAA] - int r_eax = 0x1234; // EAX register - int r_ecx = 0x6789; // ECX register + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); - static const uint64_t address = 0x1000000; - static const uint8_t code[] = { - 0x41, // inc ecx - 0xE4, 0x3F, // in al, 0x3F - 0x4A, // dec edx - 0xE6, 0x46, // out 0x46, al - 0x43, // inc ebx - }; + uc_assert_err(UC_ERR_WRITE_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)); - uc_assert_success(err); - - // initialize machine registers - err = uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); - uc_assert_success(err); - - // tracing all basic blocks with customized callback - err = uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); - uc_assert_success(err); - - // tracing all instructions - err = uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); - uc_assert_success(err); - - // uc IN instruction - err = uc_hook_add(uc, &trace3, UC_HOOK_INSN, hook_in, NULL, 1, 0, UC_X86_INS_IN); - uc_assert_success(err); - - // uc OUT instruction - err = uc_hook_add(uc, &trace4, UC_HOOK_INSN, hook_out, NULL, 1, 0, UC_X86_INS_OUT); - uc_assert_success(err); - - // emulate machine code in infinite time - err = uc_emu_start(uc, address, address+sizeof(code), 0, 0); - uc_assert_success(err); - - uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); - uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); - //printf(">>> EAX = 0x%x\n", r_eax); - //printf(">>> ECX = 0x%x\n", r_ecx); - // TODO: Assert on the register values here - - uc_assert_success(uc_close(uc)); + OK(uc_close(uc)); } -/******************************************************************************/ +static void test_x86_invalid_jump() { + uc_engine* uc; + char code[] = "\xe9\xe9\xee\xee\xee"; // jmp 0xEEEEEEEE -// emulate code that loop forever -static void test_i386_loop(void **state) -{ - uc_engine *uc; - uc_err err; + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); - int r_ecx = 0x1234; // ECX register - int r_edx = 0x7890; // EDX register + uc_assert_err(UC_ERR_FETCH_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); - static const uint64_t address = 0x1000000; - static const uint8_t code[] = { - 0x41, // inc ecx - 0x4a, // dec edx - 0xEB, 0xFE, // jmp $ - }; - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)); - uc_assert_success(err); - - // initialize machine registers - err = uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); - uc_assert_success(err); - err = uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); - uc_assert_success(err); - - // emulate machine code in 2 seconds, so we can quit even - // if the code loops - err = uc_emu_start(uc, address, address+sizeof(code), 2*UC_SECOND_SCALE, 0); - uc_assert_success(err); - - // verify register values - uc_assert_success(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); - uc_assert_success(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); - - assert_int_equal(r_ecx, 0x1235); - assert_int_equal(r_edx, 0x788F); - - uc_assert_success(uc_close(uc)); + OK(uc_close(uc)); } -/******************************************************************************/ - -// emulate code that reads invalid memory -static void test_i386_invalid_mem_read(void **state) -{ - uc_engine *uc; - uc_err err; - - static const uint64_t address = 0x1000000; - static const uint8_t code[] = { - 0x8b, 0x0D, 0xAA, 0xAA, 0xAA, 0xAA, // mov ecx, [0xAAAAAAAA] - }; - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)); - uc_assert_success(err); - - // emulate machine code in infinite time - err = uc_emu_start(uc, address, address+sizeof(code), 0, 0); - uc_assert_err(UC_ERR_READ_UNMAPPED, err); - - uc_assert_success(uc_close(uc)); -} - -// emulate code that writes invalid memory -static void test_i386_invalid_mem_write(void **state) -{ - uc_engine *uc; - uc_err err; - - static const uint64_t address = 0x1000000; - static const uint8_t code[] = { - 0x89, 0x0D, 0xAA, 0xAA, 0xAA, 0xAA, // mov [0xAAAAAAAA], ecx - }; - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)); - uc_assert_success(err); - - // emulate machine code in infinite time - err = uc_emu_start(uc, address, address+sizeof(code), 0, 0); - uc_assert_err(UC_ERR_WRITE_UNMAPPED, err); - - - uc_assert_success(uc_close(uc)); -} - -// emulate code that jumps to invalid memory -static void test_i386_jump_invalid(void **state) -{ - uc_engine *uc; - uc_err err; - - static const uint64_t address = 0x1000000; - static const uint8_t code[] = { - 0xE9, 0xE9, 0xEE, 0xEE, 0xEE, // jmp 0xEEEEEEEE - }; - - // Initialize emulator in X86-32bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)); - uc_assert_success(err); - - // emulate machine code in infinite time - err = uc_emu_start(uc, address, address+sizeof(code), 0, 0); - uc_assert_err(UC_ERR_FETCH_UNMAPPED, err); - - - uc_assert_success(uc_close(uc)); -} - - -/******************************************************************************/ - -static void hook_mem64(uc_engine *uc, uc_mem_type type, - uint64_t address, int size, int64_t value, void *user_data) -{ - switch(type) { - default: break; - case UC_MEM_READ: - //printf(">>> Memory is being READ at 0x%"PRIx64 ", data size = %u\n", - // address, size); - break; - case UC_MEM_WRITE: - //printf(">>> Memory is being WRITE at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", - // address, size, value); - break; - } -} - -// callback for tracing instruction -static void hook_code64(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - uint64_t rip; - - uc_reg_read(uc, UC_X86_REG_RIP, &rip); - //printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); - //printf(">>> RIP is 0x%"PRIx64 "\n", rip); - - // Uncomment below code to stop the emulation using uc_emu_stop() - // if (address == 0x1000009) - // uc_emu_stop(uc); -} - -static void test_x86_64(void **state) -{ - uc_engine *uc; - uc_err err; - uc_hook trace1, trace2, trace3, trace4; - - static const uint64_t address = 0x1000000; - static const uint8_t code[] = "\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E\x09\x3C\x59"; - - int64_t rax = 0x71f3029efd49d41d; - int64_t rbx = 0xd87b45277f133ddb; - int64_t rcx = 0xab40d1ffd8afc461; - int64_t rdx = 0x919317b4a733f01; - int64_t rsi = 0x4c24e753a17ea358; - int64_t rdi = 0xe509a57d2571ce96; - int64_t r8 = 0xea5b108cc2b9ab1f; - int64_t r9 = 0x19ec097c8eb618c1; - int64_t r10 = 0xec45774f00c5f682; - int64_t r11 = 0xe17e9dbec8c074aa; - int64_t r12 = 0x80f86a8dc0f6d457; - int64_t r13 = 0x48288ca5671c5492; - int64_t r14 = 0x595f72f6e4017f6e; - int64_t r15 = 0x1efd97aea331cccc; - - int64_t rsp = address + 0x200000; - - - // Initialize emulator in X86-64bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); - uc_assert_success(err); - - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code) - 1); - uc_assert_success(err); - - // initialize machine registers - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RSP, &rsp)); - - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RAX, &rax)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RBX, &rbx)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RCX, &rcx)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RDX, &rdx)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RSI, &rsi)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RDI, &rdi)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R8, &r8)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R9, &r9)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R10, &r10)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R11, &r11)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R12, &r12)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R13, &r13)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R14, &r14)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_R15, &r15)); - - // tracing all basic blocks with customized callback - err = uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); - uc_assert_success(err); - - // tracing all instructions in the range [address, address+20] - err = uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code64, NULL, address, address+20); - uc_assert_success(err); - - // tracing all memory WRITE access (with @begin > @end) - err = uc_hook_add(uc, &trace3, UC_HOOK_MEM_WRITE, hook_mem64, NULL, 1, 0); - uc_assert_success(err); - - // tracing all memory READ access (with @begin > @end) - err = uc_hook_add(uc, &trace4, UC_HOOK_MEM_READ, hook_mem64, NULL, 1, 0); - uc_assert_success(err); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, address, address+sizeof(code) - 1, 0, 0); - uc_assert_success(err); - - // Read registers - uc_reg_read(uc, UC_X86_REG_RAX, &rax); - uc_reg_read(uc, UC_X86_REG_RBX, &rbx); - uc_reg_read(uc, UC_X86_REG_RCX, &rcx); - uc_reg_read(uc, UC_X86_REG_RDX, &rdx); - uc_reg_read(uc, UC_X86_REG_RSI, &rsi); - uc_reg_read(uc, UC_X86_REG_RDI, &rdi); - uc_reg_read(uc, UC_X86_REG_R8, &r8); - uc_reg_read(uc, UC_X86_REG_R9, &r9); - uc_reg_read(uc, UC_X86_REG_R10, &r10); - uc_reg_read(uc, UC_X86_REG_R11, &r11); - uc_reg_read(uc, UC_X86_REG_R12, &r12); - uc_reg_read(uc, UC_X86_REG_R13, &r13); - uc_reg_read(uc, UC_X86_REG_R14, &r14); - uc_reg_read(uc, UC_X86_REG_R15, &r15); - -#if 0 - printf(">>> RAX = 0x%" PRIx64 "\n", rax); - printf(">>> RBX = 0x%" PRIx64 "\n", rbx); - printf(">>> RCX = 0x%" PRIx64 "\n", rcx); - printf(">>> RDX = 0x%" PRIx64 "\n", rdx); - printf(">>> RSI = 0x%" PRIx64 "\n", rsi); - printf(">>> RDI = 0x%" PRIx64 "\n", rdi); - printf(">>> R8 = 0x%" PRIx64 "\n", r8); - printf(">>> R9 = 0x%" PRIx64 "\n", r9); - printf(">>> R10 = 0x%" PRIx64 "\n", r10); - printf(">>> R11 = 0x%" PRIx64 "\n", r11); - printf(">>> R12 = 0x%" PRIx64 "\n", r12); - printf(">>> R13 = 0x%" PRIx64 "\n", r13); - printf(">>> R14 = 0x%" PRIx64 "\n", r14); - printf(">>> R15 = 0x%" PRIx64 "\n", r15); -#endif - - uc_assert_success(uc_close(uc)); -} - -/******************************************************************************/ - -// callback for SYSCALL instruction (X86). -static void hook_syscall(uc_engine *uc, void *user_data) -{ +static void test_x86_64_syscall_callback(uc_engine* uc, void* user_data) { uint64_t rax; - uc_assert_success(uc_reg_read(uc, UC_X86_REG_RAX, &rax)); - assert_int_equal(0x100, rax); + OK(uc_reg_read(uc, UC_X86_REG_RAX, &rax)); - rax = 0x200; - uc_assert_success(uc_reg_write(uc, UC_X86_REG_RAX, &rax)); + TEST_CHECK(rax == 0x100); } -static void test_x86_64_syscall(void **state) -{ - uc_engine *uc; - uc_hook trace1; - uc_err err; +static void test_x86_64_syscall() { + uc_engine* uc; + uc_hook hook; + char code[] = "\x0f\x05"; // syscall + uint64_t r_rax = 0x100; - static const uint64_t address = 0x1000000; - static const uint8_t code[] = { - 0x0F, 0x05, // SYSCALL - }; + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_X86_REG_RAX, &r_rax)); + OK(uc_hook_add(uc, &hook, UC_HOOK_INSN, test_x86_64_syscall_callback, NULL, 1, 0, UC_X86_INS_SYSCALL)); - int64_t rax = 0x100; + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); - // Initialize emulator in X86-64bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); - uc_assert_success(err); - - // map 2MB memory for this emulation - err = uc_mem_map(uc, address, 2 * 1024 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)); - uc_assert_success(err); - - // hook interrupts for syscall - err = uc_hook_add(uc, &trace1, UC_HOOK_INSN, hook_syscall, NULL, 1, 0, UC_X86_INS_SYSCALL); - uc_assert_success(err); - - // initialize machine registers - err = uc_reg_write(uc, UC_X86_REG_RAX, &rax); - uc_assert_success(err); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, address, address + sizeof(code), 0, 0); - uc_assert_success(err); - - // verify register values - uc_assert_success(uc_reg_read(uc, UC_X86_REG_RAX, &rax)); - assert_int_equal(0x200, rax); - - uc_assert_success(uc_close(uc)); + OK(uc_hook_del(uc, hook)); + OK(uc_close(uc)); } -/******************************************************************************/ +static void test_x86_16_add() { + uc_engine* uc; + char code[] = "\x00\x00"; // add byte ptr [bx + si], al + uint16_t r_ax = 7; + uint16_t r_bx = 5; + uint16_t r_si = 6; + uint8_t result; -static void test_x86_16(void **state) -{ - uc_engine *uc; - uc_err err; - uint8_t tmp; + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_16, code, sizeof(code) - 1); + OK(uc_mem_map(uc, 0, 0x1000, UC_PROT_ALL)); + OK(uc_reg_write(uc, UC_X86_REG_AX, &r_ax)); + OK(uc_reg_write(uc, UC_X86_REG_BX, &r_bx)); + OK(uc_reg_write(uc, UC_X86_REG_SI, &r_si)); - static const uint64_t address = 0; - static const uint8_t code[] = { - 0x00, 0x00, // add byte ptr [bx + si], al - }; + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); - int32_t eax = 7; - int32_t ebx = 5; - int32_t esi = 6; - - // Initialize emulator in X86-16bit mode - err = uc_open(UC_ARCH_X86, UC_MODE_16, &uc); - uc_assert_success(err); - - // map 8KB memory for this emulation - err = uc_mem_map(uc, address, 8 * 1024, UC_PROT_ALL); - uc_assert_success(err); - - // write machine code to be emulated to memory - err = uc_mem_write(uc, address, code, sizeof(code)); - uc_assert_success(err); - - // initialize machine registers - uc_assert_success(uc_reg_write(uc, UC_X86_REG_EAX, &eax)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_EBX, &ebx)); - uc_assert_success(uc_reg_write(uc, UC_X86_REG_ESI, &esi)); - - // emulate machine code in infinite time (last param = 0), or when - // finishing all the code. - err = uc_emu_start(uc, address, address+sizeof(code), 0, 0); - uc_assert_success(err); - - // read from memory - uc_assert_success(uc_mem_read(uc, 11, &tmp, 1)); - assert_int_equal(7, tmp); - - uc_assert_success(uc_close(uc)); + OK(uc_mem_read(uc, r_bx + r_si, &result, 1)); + TEST_CHECK(result == 7); + OK(uc_close(uc)); } -/******************************************************************************/ +static void test_x86_reg_save() { + uc_engine* uc; + uc_context* ctx; + char code[] = "\x40"; // inc eax + int r_eax = 1; -static void test_i386_reg_save(void **state) -{ - uc_engine *uc; - uc_context *saved_context; + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax)); - static const uint64_t address = 0; - static const uint8_t code[] = { - 0x40 // inc eax - }; - int32_t eax = 1; + OK(uc_context_alloc(uc, &ctx)); + OK(uc_context_save(uc, ctx)); + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); + TEST_CHECK(r_eax == 2); - // Initialize emulator - uc_assert_success(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); + OK(uc_context_restore(uc, ctx)); - // map 8KB memory for this emulation - uc_assert_success(uc_mem_map(uc, address, 8 * 1024, UC_PROT_ALL)); + OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); + TEST_CHECK(r_eax == 1); - // write machine code to be emulated to memory - uc_assert_success(uc_mem_write(uc, address, code, sizeof(code))); - - // set eax to 1 - uc_assert_success(uc_reg_write(uc, UC_X86_REG_EAX, &eax)); - - // step one instruction - uc_assert_success(uc_emu_start(uc, address, address+1, 0, 0)); - - // grab a buffer to use for state saving - uc_assert_success(uc_context_alloc(uc, &saved_context)); - - // save the state - uc_assert_success(uc_context_save(uc, saved_context)); - - // step one instruction - uc_assert_success(uc_emu_start(uc, address, address+1, 0, 0)); - - // check that eax == 3 - uc_assert_success(uc_reg_read(uc, UC_X86_REG_EAX, &eax)); - assert_int_equal(eax, 3); - - // restore the state - uc_context_restore(uc, saved_context); - - // check that eax == 2 - uc_assert_success(uc_reg_read(uc, UC_X86_REG_EAX, &eax)); - assert_int_equal(eax, 2); - - // step one instruction - uc_assert_success(uc_emu_start(uc, address, address+1, 0, 0)); - - // check that eax == 3 - uc_assert_success(uc_reg_read(uc, UC_X86_REG_EAX, &eax)); - assert_int_equal(eax, 3); - - // restore the state - uc_context_restore(uc, saved_context); - - // check that eax == 2 - uc_assert_success(uc_reg_read(uc, UC_X86_REG_EAX, &eax)); - assert_int_equal(eax, 2); - - // clean up; - uc_free(saved_context); - uc_assert_success(uc_close(uc)); + OK(uc_context_free(ctx)); + OK(uc_close(uc)); } -/******************************************************************************/ -int main(void) { - const struct CMUnitTest tests[] = { - cmocka_unit_test(test_i386), - cmocka_unit_test(test_i386_jump), - cmocka_unit_test(test_i386_inout), - cmocka_unit_test(test_i386_loop), - cmocka_unit_test(test_i386_invalid_mem_read), - cmocka_unit_test(test_i386_invalid_mem_write), - cmocka_unit_test(test_i386_jump_invalid), - cmocka_unit_test(test_i386_reg_save), - - cmocka_unit_test(test_x86_64), - cmocka_unit_test(test_x86_64_syscall), - - cmocka_unit_test(test_x86_16), - - cmocka_unit_test_setup_teardown(test_basic_blocks, setup32, teardown), - }; - return cmocka_run_group_tests(tests, NULL, NULL); +static bool test_x86_invalid_mem_read_stop_in_cb_callback(uc_engine* uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void* user_data) { + // False indicates that we fail to handle this ERROR and let the emulation stop. + // + // Note that the memory must be mapped properly if we return true! Check test_x86_mem_hook_all for example. + return false; } + +static void test_x86_invalid_mem_read_stop_in_cb() { + uc_engine* uc; + uc_hook hook; + char code[] = "\x40\x8b\x1d\x00\x00\x10\x00\x42"; // inc eax; mov ebx, [0x100000]; inc edx + int r_eax = 0x1234; + int r_edx = 0x5678; + int r_eip = 0; + + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_READ, test_x86_invalid_mem_read_stop_in_cb_callback, NULL, 1, 0)); + OK(uc_reg_write(uc, UC_X86_REG_EAX, &r_eax)); + OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); + + uc_assert_err(UC_ERR_READ_UNMAPPED, uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + // The state of Unicorn should be correct at this time. + OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip)); + OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); + OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); + + TEST_CHECK(r_eip == code_start + 1); + TEST_CHECK(r_eax == 0x1235); + TEST_CHECK(r_edx == 0x5678); + + OK(uc_close(uc)); +} + + +static void test_x86_x87_fnstenv_callback(uc_engine* uc, uint64_t address, uint32_t size, void* user_data) { + uint32_t r_eip; + uint32_t r_eax; + uint32_t fnstenv[7]; + + if (address == code_start + 4) { // The first fnstenv executed + // Save the address of the fld. + OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip)); + *((uint32_t*)user_data) = r_eip; + + OK(uc_reg_read(uc, UC_X86_REG_EAX, &r_eax)); + OK(uc_mem_read(uc, r_eax, fnstenv, sizeof(fnstenv))); + // Don't update FCS:FIP for fnop. + TEST_CHECK(fnstenv[3] == 0); + } +} + +static void test_x86_x87_fnstenv() { + uc_engine* uc; + uc_hook hook; + char code[] = "\xd9\xd0\xd9\x30\xd9\x00\xd9\x30"; // fnop;fnstenv [eax];fld dword ptr [eax];fnstenv [eax] + uint32_t base = code_start + 3 * code_len; + uint32_t last_eip; + uint32_t fnstenv[7]; + + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_mem_map(uc, base, code_len, UC_PROT_ALL)); + OK(uc_reg_write(uc, UC_X86_REG_EAX, &base)); + + OK(uc_hook_add(uc, &hook, UC_HOOK_CODE, test_x86_x87_fnstenv_callback, &last_eip, 1, 0)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_mem_read(uc, base, fnstenv, sizeof(fnstenv))); + //But update FCS:FIP for fld. + TEST_CHECK(fnstenv[3] == last_eip); + + OK(uc_close(uc)); +} + +static uint64_t test_x86_mmio_read_callback(uc_engine* uc, uint64_t offset, unsigned size, void* user_data) { + TEST_CHECK(offset == 4); + TEST_CHECK(size == 4); + + return 0x19260817; +} + +static void test_x86_mmio_write_callback(uc_engine* uc, uint64_t offset, unsigned size, uint64_t value, void* user_data) { + TEST_CHECK(offset == 4); + TEST_CHECK(size == 4); + TEST_CHECK(value == 0xdeadbeef); + + return; +} + +static void test_x86_mmio() { + uc_engine* uc; + int r_ecx = 0xdeadbeef; + char code[] = "\x89\x0d\x04\x00\x02\x00\x8b\x0d\x04\x00\x02\x00"; // mov [0x20004], ecx; mov ecx, [0x20004] + + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); + OK(uc_mmio_map(uc, 0x20000, 0x1000, test_x86_mmio_read_callback, NULL, test_x86_mmio_write_callback, NULL)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); + + TEST_CHECK(r_ecx == 0x19260817); + + OK(uc_close(uc)); +} + +static bool test_x86_missing_code_callback(uc_engine* uc, uc_mem_type type, uint64_t address, int size, uint64_t value, void* user_data) { + char code[] = "\x41\x4a"; // inc ecx; dec edx; + uint64_t algined_address = address & 0xFFFFFFFFFFFFF000ULL; + int aligned_size = ((int)(size / 0x1000) + 1) * 0x1000; + + OK(uc_mem_map(uc, algined_address, aligned_size, UC_PROT_ALL)); + + OK(uc_mem_write(uc, algined_address, code, sizeof(code) - 1)); + + return true; +} + +static void test_x86_missing_code() { + uc_engine* uc; + uc_hook hook; + int r_ecx = 0x1234; + int r_edx = 0x7890; + + // Don't write any code by design. + OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); + OK(uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx)); + OK(uc_reg_write(uc, UC_X86_REG_EDX, &r_edx)); + OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED, test_x86_missing_code_callback, NULL, 1, 0)); + + OK(uc_emu_start(uc, code_start, code_start + 2, 0, 0)); + + OK(uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx)); + OK(uc_reg_read(uc, UC_X86_REG_EDX, &r_edx)); + + TEST_CHECK(r_ecx == 0x1235); + TEST_CHECK(r_edx == 0x788f); + + OK(uc_close(uc)); +} + +static void test_x86_smc_xor() { + uc_engine* uc; + /* + * 0x1000 xor dword ptr [edi+0x3], eax ; edi=0x1000, eax=0xbc4177e6 + * 0x1003 dw 0x3ea98b13 + */ + char code[] = "\x31\x47\x03\x13\x8b\xa9\x3e"; + int r_edi = code_start; + int r_eax = 0xbc4177e6; + uint32_t result; + + uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1); + uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + + OK(uc_emu_start(uc, code_start, code_start + 3, 0, 0)); + + OK(uc_mem_read(uc, code_start + 3, (void *)&result, 4)); + + TEST_CHECK(result == (0x3ea98b13 ^ 0xbc4177e6)); + + OK(uc_close(uc)); +} + +static uint64_t test_x86_mmio_uc_mem_rw_read_callback(uc_engine* uc, uint64_t offset, unsigned size, void* user_data) { + TEST_CHECK(offset == 8); + TEST_CHECK(size == 4); + + return 0x19260817; +} + +static void test_x86_mmio_uc_mem_rw_write_callback(uc_engine* uc, uint64_t offset, unsigned size, uint64_t value, void* user_data) { + TEST_CHECK(offset == 4); + TEST_CHECK(size == 4); + TEST_CHECK(value == 0xdeadbeef); + + return; +} + + +static void test_x86_mmio_uc_mem_rw() { + uc_engine* uc; + int data = 0xdeadbeef; + + OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); + + OK(uc_mmio_map(uc, + 0x20000, 0x1000, + test_x86_mmio_uc_mem_rw_read_callback, NULL, + test_x86_mmio_uc_mem_rw_write_callback, NULL)); + + OK(uc_mem_write(uc, 0x20004, (void*)&data, 4)); + OK(uc_mem_read(uc, 0x20008, (void*)&data, 4)); + + TEST_CHECK(data == 0x19260817); + + OK(uc_close(uc)); +} + +TEST_LIST = { + { "test_x86_in", test_x86_in }, + { "test_x86_out", test_x86_out }, + { "test_x86_mem_hook_all", test_x86_mem_hook_all }, + { "test_x86_inc_dec_pxor", test_x86_inc_dec_pxor }, + { "test_x86_relative_jump", test_x86_relative_jump }, + { "test_x86_loop", test_x86_loop }, + { "test_x86_invalid_mem_read", test_x86_invalid_mem_read }, + { "test_x86_invalid_mem_write", test_x86_invalid_mem_write }, + { "test_x86_invalid_jump", test_x86_invalid_jump}, + { "test_x86_64_syscall", test_x86_64_syscall }, + { "test_x86_16_add", test_x86_16_add }, + { "test_x86_reg_save", test_x86_reg_save }, + { "test_x86_invalid_mem_read_stop_in_cb", test_x86_invalid_mem_read_stop_in_cb }, + { "test_x86_x87_fnstenv", test_x86_x87_fnstenv}, + { "test_x86_mmio", test_x86_mmio}, + { "test_x86_missing_code", test_x86_missing_code}, + { "test_x86_smc_xor", test_x86_smc_xor}, + { "test_x86_mmio_uc_mem_rw", test_x86_mmio_uc_mem_rw}, + { NULL, NULL } +}; \ No newline at end of file diff --git a/tests/unit/test_x86_rip_bug.c b/tests/unit/test_x86_rip_bug.c deleted file mode 100644 index 65f5f813..00000000 --- a/tests/unit/test_x86_rip_bug.c +++ /dev/null @@ -1,269 +0,0 @@ -#include - -#include "unicorn_test.h" - -/** - * Initialize i386 Unicorn Instance - */ -static int setup_i386(void **state) -{ - uc_engine *uc; - - uc_assert_success(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - - *state = uc; - return 0; -} - -/** - * Initialize amd64 Unicorn Instance - */ -static int setup_amd64(void **state) -{ - uc_engine *uc; - - uc_assert_success(uc_open(UC_ARCH_X86, UC_MODE_64, &uc)); - - *state = uc; - return 0; -} - -/** - * Shutdown a Unicorn Instance - */ -static int teardown(void **state) -{ - uc_engine *uc = *state; - - uc_assert_success(uc_close(uc)); - - *state = NULL; - return 0; -} - -/***********************************************************************************/ - -typedef struct { - bool good; - uint64_t actual; - uint64_t expected; -} TestData; - -const uint64_t CodePage = 0x10000; -const uint64_t CodeSize = 0x4000; - -/** - * Hook for reading unmapped memory in the i386 Unicorn Instance. - * - * BUG: EIP from uc_reg_read does not match expected value. - */ -static bool mem_hook_i386(uc_engine *uc, uc_mem_type type, - uint64_t address, int size, int64_t value, void *user_data) -{ - TestData *data = user_data; - if (type == UC_MEM_READ_UNMAPPED) - { - uint32_t eip; - uint32_t eax; - - uc_reg_read(uc, UC_X86_REG_EIP, &eip); - uc_reg_read(uc, UC_X86_REG_EAX, &eax); - - data->actual = eip; - data->expected = CodePage + 0x05; - - /** - * Code: - * 0x10000: mov eax, 0x41414141 ;; <- Returned EIP - * 0x10005: mov ecx, [eax] ;; <- Expected EIP - */ - if ((eax == 0x41414141) && // Proof we're at 0x10005. - (eip != (CodePage + 0x5))) // Proof uc_reg_read is wrong - { - data->good = false; - } - else - data->good = true; - } - return false; -} - -/** - * Hook for reading unmapped memory in the amd64 Unicorn Instance. - * - * BUG: RIP from uc_reg_read does not match expected value. - */ -static bool mem_hook_amd64(uc_engine *uc, uc_mem_type type, - uint64_t address, int size, int64_t value, void *user_data) -{ - TestData *data = user_data; - if (type == UC_MEM_READ_UNMAPPED) - { - uint64_t rip; - uint64_t rax; - - uc_reg_read(uc, UC_X86_REG_RIP, &rip); - uc_reg_read(uc, UC_X86_REG_RAX, &rax); - - data->actual = rip; - data->expected = CodePage + 0x0A; - - /** - * Code: - * 0x10000: mov rax, 0x4141414141414141 ;; <- Returned RIP - * 0x10005: mov rcx, [rax] ;; <- Expected RIP - */ - if ((rax == 0x4141414141414141) && // Proof we're at 0x10005 - (rip != (CodePage + 0xA))) // Proof uc_reg_read is wrong - { - data->good = false; - } - else - data->good = true; - } - return false; -} - -/** - * Empty Code Hook. - */ -static void code_hook(uc_engine *uc, uint64_t addr, uint32_t size, void *user) -{ - (void) uc; - (void) addr; - (void) size; - (void) user; -} - -/** - * Test the bug for i386. - * - * 1. Map Code Page - * 2. Write Code to page. - * 3. Install Unmapped Read hook. - * 4. Run the VM. - */ -static void test_i386(void **state) -{ - TestData data; - uc_engine *uc = *state; - uc_hook trace1; - - const uint8_t i386_bug[] = { - 0xb8, 0x41, 0x41, 0x41, 0x41, // mov eax, 0x41414141 - 0x8b, 0x08 // mov ecx, [eax] - }; - - uc_assert_success(uc_mem_map(uc, CodePage, CodeSize, UC_PROT_ALL)); - uc_assert_success(uc_mem_write(uc, CodePage, i386_bug, sizeof(i386_bug))); - uc_assert_success(uc_hook_add(uc, &trace1, UC_HOOK_MEM_READ_UNMAPPED, mem_hook_i386, &data, 1, 0)); - uc_assert_fail(uc_emu_start(uc, CodePage, CodePage + sizeof(i386_bug), 0, 0)); - - if (!data.good) - fail_msg("De-synced RIP value. 0x%"PRIX64" != 0x%"PRIX64"\n", data.expected, data.actual); -} - -/** - * Test the bug for amd64.. - * - * 1. Map Code Page - * 2. Write Code to page. - * 3. Install Unmapped Read hook. - * 4. Run the VM. - */ -static void test_amd64(void **state) -{ - TestData data; - uc_engine *uc = *state; - uc_hook trace1; - - const uint8_t amd64_bug[] = { - 0x48, 0xb8, 0x41, 0x41, 0x41, 0x41, - 0x41, 0x41, 0x41, 0x41, - 0x48, 0x8b, 0x08 - }; - - uc_assert_success(uc_mem_map(uc, CodePage, CodeSize, UC_PROT_ALL)); - uc_assert_success(uc_mem_write(uc, CodePage, amd64_bug, sizeof(amd64_bug))); - uc_assert_success(uc_hook_add(uc, &trace1, UC_HOOK_MEM_READ_UNMAPPED, mem_hook_amd64, &data, 1, 0)); - uc_assert_fail(uc_emu_start(uc, CodePage, CodePage + sizeof(amd64_bug), 0, 0)); - - if (!data.good) - fail_msg("De-synced RIP value. 0x%"PRIX64" != 0x%"PRIX64"\n", data.expected, data.actual); -} - -/** - * Test temporary fix for bug for i386. - * - * 1. Map Code Page - * 2. Write Code to page. - * 3. Install Unmapped Read hook. - * 4. Install Code hook. - * 5. Run the VM. - */ -static void test_i386_fix(void **state) -{ - TestData data; - uc_engine *uc = *state; - uc_hook trace1, trace2; - - const uint8_t i386_bug[] = { - 0xb8, 0x41, 0x41, 0x41, 0x41, // mov eax, 0x41414141 - 0x8b, 0x08 // mov ecx, [eax] - }; - - uc_assert_success(uc_mem_map(uc, CodePage, CodeSize, UC_PROT_ALL)); - uc_assert_success(uc_mem_write(uc, CodePage, i386_bug, sizeof(i386_bug))); - uc_assert_success(uc_hook_add(uc, &trace1, UC_HOOK_MEM_READ_UNMAPPED, mem_hook_i386, &data, 1, 0)); - uc_assert_success(uc_hook_add(uc, &trace2, UC_HOOK_CODE, code_hook, NULL, 1, 0)); - uc_assert_fail(uc_emu_start(uc, CodePage, CodePage + sizeof(i386_bug), 0, 0)); - - if (!data.good) - fail_msg("De-synced RIP value. 0x%"PRIX64" != 0x%"PRIX64"\n", data.expected, data.actual); -} - -/** - * Test temporary fix for bug for amd64.. - * - * 1. Map Code Page - * 2. Write Code to page. - * 3. Install Unmapped Read hook. - * 4. Install Code hook. - * 5. Run the VM. - */ -static void test_amd64_fix(void **state) -{ - TestData data; - uc_engine *uc = *state; - uc_hook trace1, trace2; - - const uint8_t amd64_bug[] = { - 0x48, 0xb8, 0x41, 0x41, 0x41, 0x41, - 0x41, 0x41, 0x41, 0x41, - 0x48, 0x8b, 0x08 - }; - - uc_assert_success(uc_mem_map(uc, CodePage, CodeSize, UC_PROT_ALL)); - uc_assert_success(uc_mem_write(uc, CodePage, amd64_bug, sizeof(amd64_bug))); - uc_assert_success(uc_hook_add(uc, &trace1, UC_HOOK_MEM_READ_UNMAPPED, mem_hook_amd64, &data, 1, 0)); - uc_assert_success(uc_hook_add(uc, &trace2, UC_HOOK_CODE, code_hook, NULL, 1, 0)); - uc_assert_fail(uc_emu_start(uc, CodePage, CodePage + sizeof(amd64_bug), 0, 0)); - - if (!data.good) - fail_msg("De-synced RIP value. 0x%"PRIX64" != 0x%"PRIX64"\n", data.expected, data.actual); -} - -/** - * Run all tests - */ -int main(int argc, char **argv, char **envp) -{ - const struct CMUnitTest tests[] = { - cmocka_unit_test_setup_teardown(test_i386, setup_i386, teardown), - cmocka_unit_test_setup_teardown(test_amd64, setup_amd64, teardown), - cmocka_unit_test_setup_teardown(test_i386_fix, setup_i386, teardown), - cmocka_unit_test_setup_teardown(test_amd64_fix, setup_amd64, teardown) - }; - - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/test_x86_shl_enter_leave.c b/tests/unit/test_x86_shl_enter_leave.c deleted file mode 100644 index 9285653e..00000000 --- a/tests/unit/test_x86_shl_enter_leave.c +++ /dev/null @@ -1,433 +0,0 @@ -#include "unicorn/unicorn.h" -#include - -#include "unicorn_test.h" - - -#define OK(x) uc_assert_success(x) - -#define CF_MASK (1<<0) -#define PF_MASK (1<<2) -#define ZF_MASK (1<<6) -#define SF_MASK (1<<7) -#define OF_MASK (1<<11) -#define ALL_MASK (OF_MASK|SF_MASK|ZF_MASK|PF_MASK|CF_MASK) -#define NO_MASK 0xFFFFFFFF - -typedef struct _reg_value -{ - uint32_t regId, regValue, mask; -} reg_value; - -typedef struct _instruction -{ - const char* asmStr; - uint8_t code[16]; //x86 inst == 15 bytes max - uint32_t codeSize; - reg_value* values; - uint32_t nbValues; - uint32_t addr; -} instruction; - -typedef struct _block -{ - instruction* insts[255]; - uint32_t nbInsts; - uint32_t size; -} block; - -/******************************************************************************/ - -#define CAT2(X, Y) X ## Y -#define CAT(X, Y) CAT2(X, Y) - -#define BLOCK_START(BLOCK) \ - { \ - block* blockPtr = &BLOCK; \ - blockPtr->nbInsts = 0; \ - instruction* instPtr = NULL; - -#define BLOCK_END() } - -#define BLOCK_ADD(CODE_ASM, CODE) \ - const uint8_t CAT(code, __LINE__)[] = CODE; \ - instPtr = newInstruction(CAT(code, __LINE__), sizeof(CAT(code, __LINE__)), CODE_ASM, NULL, 0); \ - addInstructionToBlock(blockPtr, instPtr); - -#define BLOCK_ADD_CHECK(CODE_ASM, CODE, REGVALUES) \ - const uint8_t CAT(code, __LINE__)[] = CODE; \ - const reg_value CAT(regValues, __LINE__)[] = REGVALUES; \ - instPtr = newInstruction(CAT(code, __LINE__), sizeof(CAT(code, __LINE__)), CODE_ASM, CAT(regValues, __LINE__), sizeof(CAT(regValues, __LINE__)) / sizeof(reg_value)); \ - addInstructionToBlock(blockPtr, instPtr); - -#define V(...) { __VA_ARGS__ } - -/******************************************************************************/ - -instruction* newInstruction(const uint8_t * _code, uint32_t _codeSize, const char* _asmStr, const reg_value* _values, uint32_t _nbValues); -void addInstructionToBlock(block* _b, instruction* _i); -uint32_t loadBlock(uc_engine *_uc, block* _block, uint32_t _at); -void freeBlock(block* _block); -const char* getRegisterName(uint32_t _regid); -uint32_t getRegisterValue(uc_engine *uc, uint32_t _regid); -instruction* getInstruction(block * _block, uint32_t _addr); -void initRegisters(uc_engine *uc); - -/******************************************************************************/ - -void hook_code_test_i386_shl(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) -{ - uint32_t i; - block* b = (block*)user_data; - instruction* currInst = getInstruction(b, (uint32_t)address); - assert_true(currInst != NULL); - - printf("|\teip=%08x - %s\n", (uint32_t)address, currInst->asmStr); - - for (i = 0; i < currInst->nbValues; i++) - { - uint32_t regValue = getRegisterValue(uc, currInst->values[i].regId); - printf("|\t\ttesting %s : ", getRegisterName(currInst->values[i].regId)); - assert_int_equal(regValue & currInst->values[i].mask, currInst->values[i].regValue); - printf("ok\n"); - } - - if (currInst->code[0] == 0xCC) - OK(uc_emu_stop(uc)); -} - -bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, uint64_t addr, int size, int64_t value, void *user_data) -{ - switch (type) - { - default: - printf("hook_mem_invalid: UC_HOOK_MEM_INVALID type: %d at 0x%" PRIx64 "\n", type, addr); break; - case UC_MEM_READ_UNMAPPED: - printf("hook_mem_invalid: Read from invalid memory at 0x%" PRIx64 ", data size = %u\n", addr, size); break; - case UC_MEM_WRITE_UNMAPPED: - printf("hook_mem_invalid: Write to invalid memory at 0x%" PRIx64 ", data size = %u, data value = 0x%" PRIx64 "\n", addr, size, value); break; - case UC_MEM_FETCH_PROT: - printf("hook_mem_invalid: Fetch from non-executable memory at 0x%" PRIx64 "\n", addr); break; - case UC_MEM_WRITE_PROT: - printf("hook_mem_invalid: Write to non-writeable memory at 0x%" PRIx64 ", data size = %u, data value = 0x%" PRIx64 "\n", addr, size, value); break; - case UC_MEM_READ_PROT: - printf("hook_mem_invalid: Read from non-readable memory at 0x%" PRIx64 ", data size = %u\n", addr, size); break; - } - return false; -} - -#define ADDR_CODE 0x100000 -#define ADDR_STACK 0x200000 - - - -static void test_i386_shl_cl(void **state) -{ - uc_engine *uc; - uc_hook trace1; - block b; - - // Initialize emulator in X86-32bit mode - OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - OK(uc_mem_map(uc, ADDR_CODE, 0x1000, UC_PROT_ALL)); - - initRegisters(uc); - - BLOCK_START(b); - BLOCK_ADD( "mov ebx, 3Ch", V(0xBB, 0x3C, 0x00, 0x00, 0x00)); - BLOCK_ADD_CHECK("mov cl, 2", V(0xB1, 0x02), V(V(UC_X86_REG_EBX, 0x3C, NO_MASK))); - BLOCK_ADD_CHECK("shl ebx, cl", V(0xD3, 0xE3), V(V(UC_X86_REG_CL, 0x2, NO_MASK))); - BLOCK_ADD_CHECK("lahf", V(0x9F), V(V(UC_X86_REG_EBX, 0xF0, NO_MASK), V(UC_X86_REG_EFLAGS, 0x4, ALL_MASK))); - BLOCK_ADD_CHECK("int3", V(0xCC), V(V(UC_X86_REG_AH, 0x4, PF_MASK))); - BLOCK_END(); - - loadBlock(uc, &b, ADDR_CODE); - - OK(uc_hook_add(uc, &trace1, UC_HOOK_CODE, hook_code_test_i386_shl, &b, 1, 0)); - OK(uc_hook_add(uc, &trace1, UC_HOOK_MEM_INVALID, hook_mem_invalid, NULL, 1, 0)); - - // emulate machine code in infinite time - OK(uc_emu_start(uc, ADDR_CODE, ADDR_CODE + b.size, 0, 0)); - - freeBlock(&b); - - uc_close(uc); -} - -static void test_i386_shl_imm(void **state) -{ - uc_engine *uc; - uc_hook trace1; - block b; - - // Initialize emulator in X86-32bit mode - OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - OK(uc_mem_map(uc, ADDR_CODE, 0x1000, UC_PROT_ALL)); - - initRegisters(uc); - - BLOCK_START(b); - BLOCK_ADD( "mov ebx, 3Ch", V(0xBB, 0x3C, 0x00, 0x00, 0x00)); - BLOCK_ADD( "shl ebx, 2", V(0xC1, 0xE3, 0x02)); - BLOCK_ADD_CHECK("lahf", V(0x9F), V(V(UC_X86_REG_EBX, 0xF0, NO_MASK), V(UC_X86_REG_EFLAGS, 0x4, ALL_MASK))); - BLOCK_ADD_CHECK("int3", V(0xCC), V(V(UC_X86_REG_AH, 0x4, PF_MASK))); - BLOCK_END(); - - loadBlock(uc, &b, ADDR_CODE); - - OK(uc_hook_add(uc, &trace1, UC_HOOK_CODE, hook_code_test_i386_shl, &b, 1, 0)); - OK(uc_hook_add(uc, &trace1, UC_HOOK_MEM_INVALID, hook_mem_invalid, NULL, 1, 0)); - - // emulate machine code in infinite time - OK(uc_emu_start(uc, ADDR_CODE, ADDR_CODE + b.size, 0, 0)); - - freeBlock(&b); - - uc_close(uc); -} - -static void test_i386_enter_leave(void **state) -{ - uc_engine *uc; - uc_hook trace1; - block b; - - // Initialize emulator in X86-32bit mode - OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - OK(uc_mem_map(uc, ADDR_CODE, 0x1000, UC_PROT_ALL)); - OK(uc_mem_map(uc, ADDR_STACK - 0x1000, 0x1000, UC_PROT_ALL)); - - initRegisters(uc); - - BLOCK_START(b); - BLOCK_ADD( "mov esp, 0x200000", V(0xBC, 0x00, 0x00, 0x20, 0x00)); - BLOCK_ADD_CHECK("mov eax, 1", V(0xB8, 0x01, 0x00, 0x00, 0x00), V(V(UC_X86_REG_ESP, 0x200000, NO_MASK))); - BLOCK_ADD_CHECK("call 0x100015", V(0xE8, 0x06, 0x00, 0x00, 0x00), V(V(UC_X86_REG_EAX, 0x1, NO_MASK))); - BLOCK_ADD_CHECK("mov eax, 3", V(0xB8, 0x03, 0x00, 0x00, 0x00), V(V(UC_X86_REG_EAX, 0x2, NO_MASK))); - BLOCK_ADD_CHECK("int3", V(0xCC), V(V(UC_X86_REG_EAX, 0x3, NO_MASK))); - BLOCK_ADD_CHECK("enter 0x10,0", V(0xC8, 0x10, 0x00, 0x00), V(V(UC_X86_REG_ESP, 0x200000 - 4, NO_MASK))); - BLOCK_ADD_CHECK("mov eax, 2", V(0xB8, 0x02, 0x00, 0x00, 0x00), V(V(UC_X86_REG_ESP, 0x200000 - 4 - 4 - 0x10, NO_MASK), V(UC_X86_REG_EBP, 0x200000 - 4 - 4, NO_MASK))); - BLOCK_ADD_CHECK("leave", V(0xC9), V(V(UC_X86_REG_EAX, 0x2, NO_MASK))); - BLOCK_ADD_CHECK("ret", V(0xC3), V(V(UC_X86_REG_ESP, 0x200000 - 4, NO_MASK))); - BLOCK_END(); - - loadBlock(uc, &b, ADDR_CODE); - - OK(uc_hook_add(uc, &trace1, UC_HOOK_CODE, hook_code_test_i386_shl, &b, 1, 0)); - OK(uc_hook_add(uc, &trace1, UC_HOOK_MEM_INVALID, hook_mem_invalid, NULL, 1, 0)); - - // emulate machine code in infinite time - OK(uc_emu_start(uc, ADDR_CODE, ADDR_CODE + b.size, 0, 0)); - - freeBlock(&b); - - uc_close(uc); -} - -static void test_i386_enter_nested_leave(void **state) -{ - uc_engine *uc; - uc_hook trace1; - block b; - - // Initialize emulator in X86-32bit mode - OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); - OK(uc_mem_map(uc, ADDR_CODE, 0x1000, UC_PROT_ALL)); - OK(uc_mem_map(uc, ADDR_STACK - 0x1000, 0x1000, UC_PROT_ALL)); - - initRegisters(uc); - - BLOCK_START(b); - BLOCK_ADD( "mov esp, 0x200000", V(0xBC, 0x00, 0x00, 0x20, 0x00)); - BLOCK_ADD_CHECK("mov eax, 1", V(0xB8, 0x01, 0x00, 0x00, 0x00), V(V(UC_X86_REG_ESP, 0x200000, NO_MASK))); - BLOCK_ADD_CHECK("call 0x100015", V(0xE8, 0x06, 0x00, 0x00, 0x00), V(V(UC_X86_REG_EAX, 0x1, NO_MASK))); - BLOCK_ADD_CHECK("mov eax, 3", V(0xB8, 0x03, 0x00, 0x00, 0x00), V(V(UC_X86_REG_EAX, 0x2, NO_MASK))); - BLOCK_ADD_CHECK("int3", V(0xCC), V(V(UC_X86_REG_EAX, 0x3, NO_MASK))); - BLOCK_ADD_CHECK("mov ebp, esp", V(0x89, 0xE5), V(V(UC_X86_REG_ESP, 0x200000 - 4, NO_MASK))); - BLOCK_ADD_CHECK("enter 0x10,1", V(0xC8, 0x10, 0x00, 0x01), V(V(UC_X86_REG_EBP, 0x200000 - 4, NO_MASK))); - BLOCK_ADD_CHECK("mov eax, 2", V(0xB8, 0x02, 0x00, 0x00, 0x00), V(V(UC_X86_REG_ESP, 0x200000 - 4 - 2*4 - 0x10, NO_MASK), V(UC_X86_REG_EBP, 0x200000 - 4 - 4, NO_MASK))); - BLOCK_ADD_CHECK("leave", V(0xC9), V(V(UC_X86_REG_EAX, 0x2, NO_MASK))); - BLOCK_ADD_CHECK("ret", V(0xC3), V(V(UC_X86_REG_ESP, 0x200000 - 4, NO_MASK))); - BLOCK_END(); - - loadBlock(uc, &b, ADDR_CODE); - - OK(uc_hook_add(uc, &trace1, UC_HOOK_CODE, hook_code_test_i386_shl, &b, 1, 0)); - OK(uc_hook_add(uc, &trace1, UC_HOOK_MEM_INVALID, hook_mem_invalid, NULL, 1, 0)); - - // emulate machine code in infinite time - OK(uc_emu_start(uc, ADDR_CODE, ADDR_CODE + b.size, 0, 0)); - - freeBlock(&b); - - uc_close(uc); -} - -/******************************************************************************/ - -int main(void) { - const struct CMUnitTest tests[] = { - - cmocka_unit_test(test_i386_shl_cl), - cmocka_unit_test(test_i386_shl_imm), - cmocka_unit_test(test_i386_enter_leave), - cmocka_unit_test(test_i386_enter_nested_leave), - }; - return cmocka_run_group_tests(tests, NULL, NULL); -} - -/******************************************************************************/ - -instruction* newInstruction(const uint8_t * _code, uint32_t _codeSize, const char* _asmStr, const reg_value* _values, uint32_t _nbValues) -{ - instruction* inst = (instruction*)malloc(sizeof(instruction)); - - inst->asmStr = _asmStr; - memcpy(inst->code, _code, _codeSize); - inst->codeSize = _codeSize; - inst->nbValues = 0; - if (_values) - { - inst->values = (reg_value*)malloc(_nbValues*sizeof(reg_value)); - memcpy(inst->values, _values, _nbValues*sizeof(reg_value)); - inst->nbValues = _nbValues; - } - - return inst; -} - -void addInstructionToBlock(block* _b, instruction* _i) -{ - _b->insts[_b->nbInsts++] = _i; -} - -uint32_t loadBlock(uc_engine *_uc, block* _block, uint32_t _at) -{ - uint32_t i, j, offset; - - for (i = 0, offset = 0; i < _block->nbInsts; i++) - { - const uint32_t codeSize = _block->insts[i]->codeSize; - const uint8_t* code = _block->insts[i]->code; - _block->insts[i]->addr = _at + offset; - printf("load: %08X: ", _block->insts[i]->addr); - for (j = 0; j < codeSize; j++) printf("%02X ", code[j]); - for (j = 0; j < 15 - codeSize; j++) printf(" "); - printf("%s\n", _block->insts[i]->asmStr); - OK(uc_mem_write(_uc, _at + offset, code, codeSize)); - offset += codeSize; - } - _block->size = offset; - return offset; -} - -void freeBlock(block* _block) -{ - uint32_t i; - for (i = 0; i < _block->nbInsts; i++) - { - if (_block->insts[i]->nbValues > 0) - free(_block->insts[i]->values); - free(_block->insts[i]); - } -} - -void initRegisters(uc_engine *uc) -{ - // initialize machine registers - uint32_t zero = 0; - OK(uc_reg_write(uc, UC_X86_REG_EAX, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_EBX, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_ECX, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_EDX, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_EBP, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_ESP, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_EDI, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_ESI, &zero)); - OK(uc_reg_write(uc, UC_X86_REG_EFLAGS, &zero)); -} - -instruction* getInstruction(block* _block, uint32_t _addr) -{ - uint32_t i; - for (i = 0; i < _block->nbInsts; i++) - { - if (_block->insts[i]->addr == _addr) - return _block->insts[i]; - } - return NULL; -} - -const char* getRegisterName(uint32_t _regid) -{ - switch (_regid) - { - //8 - case UC_X86_REG_AH: return "AH"; - case UC_X86_REG_AL: return "AL"; - case UC_X86_REG_BH: return "BH"; - case UC_X86_REG_BL: return "BL"; - case UC_X86_REG_CL: return "CL"; - case UC_X86_REG_CH: return "CH"; - case UC_X86_REG_DH: return "DH"; - case UC_X86_REG_DL: return "DL"; - //16 - case UC_X86_REG_AX: return "AX"; - case UC_X86_REG_BX: return "BX"; - case UC_X86_REG_CX: return "CX"; - case UC_X86_REG_DX: return "DX"; - //32 - case UC_X86_REG_EAX: return "EAX"; - case UC_X86_REG_EBX: return "EBX"; - case UC_X86_REG_ECX: return "ECX"; - case UC_X86_REG_EDX: return "EDX"; - case UC_X86_REG_EDI: return "EDI"; - case UC_X86_REG_ESI: return "ESI"; - case UC_X86_REG_EBP: return "EBP"; - case UC_X86_REG_ESP: return "ESP"; - case UC_X86_REG_EIP: return "EIP"; - case UC_X86_REG_EFLAGS: return "EFLAGS"; - - default: fail(); - } - return "UNKNOWN"; -} - -uint32_t getRegisterValue(uc_engine *uc, uint32_t _regid) -{ - switch (_regid) - { - //8 - case UC_X86_REG_AH: case UC_X86_REG_AL: - case UC_X86_REG_BH: case UC_X86_REG_BL: - case UC_X86_REG_CL: case UC_X86_REG_CH: - case UC_X86_REG_DH: case UC_X86_REG_DL: - { - uint8_t val = 0; - OK(uc_reg_read(uc, _regid, &val)); - return val; - } - //16 - case UC_X86_REG_AX: case UC_X86_REG_BX: - case UC_X86_REG_CX: case UC_X86_REG_DX: - { - uint16_t val = 0; - OK(uc_reg_read(uc, _regid, &val)); - return val; - } - //32 - case UC_X86_REG_EAX: case UC_X86_REG_EBX: - case UC_X86_REG_ECX: case UC_X86_REG_EDX: - case UC_X86_REG_EDI: case UC_X86_REG_ESI: - case UC_X86_REG_EBP: case UC_X86_REG_ESP: - case UC_X86_REG_EIP: case UC_X86_REG_EFLAGS: - { - uint32_t val = 0; - OK(uc_reg_read(uc, _regid, &val)); - return val; - } - - default: fail(); - } - return 0; -} diff --git a/tests/unit/test_x86_soft_paging.c b/tests/unit/test_x86_soft_paging.c deleted file mode 100644 index c3b9d0a9..00000000 --- a/tests/unit/test_x86_soft_paging.c +++ /dev/null @@ -1,210 +0,0 @@ -#include "unicorn_test.h" -#include "unicorn/unicorn.h" - -/* - Two tests here for software paging - Low paging: Test paging using virtual addresses already mapped by Unicorn - High paging: Test paging using virtual addresses not mapped by Unicorn -*/ - -static void test_low_paging(void **state) { - uc_engine *uc; - uc_err err; - int r_eax; - - /* The following x86 code will map emulated physical memory - to virtual memory using pages and attempt - to read/write from virtual memory - - Specifically, the virtual memory address range - has been mapped by Unicorn (0x7FF000 - 0x7FFFFF) - - Memory area purposes: - 0x1000 = page directory - 0x2000 = page table (identity map first 4 MiB) - 0x3000 = page table (0x007FF000 -> 0x00004000) - 0x4000 = data area (0xBEEF) - */ - const uint8_t code[] = { - /* Zero memory for page directories and page tables */ - 0xBF, 0x00, 0x10, 0x00, 0x00, /* MOV EDI, 0x1000 */ - 0xB9, 0x00, 0x10, 0x00, 0x00, /* MOV ECX, 0x1000 */ - 0x31, 0xC0, /* XOR EAX, EAX */ - 0xF3, 0xAB, /* REP STOSD */ - - /* Load DWORD [0x4000] with 0xDEADBEEF to retrieve later */ - 0xBF, 0x00, 0x40, 0x00, 0x00, /* MOV EDI, 0x4000 */ - 0xB8, 0xEF, 0xBE, 0x00, 0x00, /* MOV EAX, 0xBEEF */ - 0x89, 0x07, /* MOV [EDI], EAX */ - - /* Identity map the first 4MiB of memory */ - 0xB9, 0x00, 0x04, 0x00, 0x00, /* MOV ECX, 0x400 */ - 0xBF, 0x00, 0x20, 0x00, 0x00, /* MOV EDI, 0x2000 */ - 0xB8, 0x03, 0x00, 0x00, 0x00, /* MOV EAX, 3 */ - /* aLoop: */ - 0xAB, /* STOSD */ - 0x05, 0x00, 0x10, 0x00, 0x00, /* ADD EAX, 0x1000 */ - 0xE2, 0xF8, /* LOOP aLoop */ - - /* Map physical address 0x4000 to virtual address 0x7FF000 */ - 0xBF, 0xFC, 0x3F, 0x00, 0x00, /* MOV EDI, 0x3FFC */ - 0xB8, 0x03, 0x40, 0x00, 0x00, /* MOV EAX, 0x4003 */ - 0x89, 0x07, /* MOV [EDI], EAX */ - - /* Add page tables into page directory */ - 0xBF, 0x00, 0x10, 0x00, 0x00, /* MOV EDI, 0x1000 */ - 0xB8, 0x03, 0x20, 0x00, 0x00, /* MOV EAX, 0x2003 */ - 0x89, 0x07, /* MOV [EDI], EAX */ - 0xBF, 0x04, 0x10, 0x00, 0x00, /* MOV EDI, 0x1004 */ - 0xB8, 0x03, 0x30, 0x00, 0x00, /* MOV EAX, 0x3003 */ - 0x89, 0x07, /* MOV [EDI], EAX */ - - /* Load the page directory register */ - 0xB8, 0x00, 0x10, 0x00, 0x00, /* MOV EAX, 0x1000 */ - 0x0F, 0x22, 0xD8, /* MOV CR3, EAX */ - - /* Enable paging */ - 0x0F, 0x20, 0xC0, /* MOV EAX, CR0 */ - 0x0D, 0x00, 0x00, 0x00, 0x80, /* OR EAX, 0x80000000 */ - 0x0F, 0x22, 0xC0, /* MOV CR0, EAX */ - - /* Clear EAX */ - 0x31, 0xC0, /* XOR EAX, EAX */ - - /* Load using virtual memory address; EAX = 0xBEEF */ - 0xBE, 0x00, 0xF0, 0x7F, 0x00, /* MOV ESI, 0x7FF000 */ - 0x8B, 0x06, /* MOV EAX, [ESI] */ - 0xF4, /* HLT */ - }; - - /* Initialise X86-32bit mode */ - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - /* Map 8MB of memory at base address 0 */ - err = uc_mem_map(uc, 0, (8 * 1024 * 1024), UC_PROT_ALL); - uc_assert_success(err); - - /* Write code into memory at address 0 */ - err = uc_mem_write(uc, 0, code, sizeof(code)); - uc_assert_success(err); - - /* Start emulation */ - err = uc_emu_start(uc, 0, sizeof(code), 0, 0); - uc_assert_success(err); - - /* The code should have loaded 0xBEEF into EAX */ - uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); - assert_int_equal(r_eax, 0xBEEF); - - uc_close(uc); -} - - -/****************************************************************************/ - - -static void test_high_paging(void **state) { - uc_engine *uc; - uc_err err; - int r_eax; - - /* The following x86 code will map emulated physical memory - to virtual memory using pages and attempt - to read/write from virtual memory - - Specifically, the virtual memory address range - has not been mapped by UC (0xFFFFF000 - 0xFFFFFFFF) - - Memory area purposes: - 0x1000 = page directory - 0x2000 = page table (identity map first 4 MiB) - 0x3000 = page table (0xFFFFF000 -> 0x00004000) - 0x4000 = data area (0xDEADBEEF) - */ - const uint8_t code[] = { - /* Zero memory for page directories and page tables */ - 0xBF, 0x00, 0x10, 0x00, 0x00, /* MOV EDI, 0x1000 */ - 0xB9, 0x00, 0x10, 0x00, 0x00, /* MOV ECX, 0x1000 */ - 0x31, 0xC0, /* XOR EAX, EAX */ - 0xF3, 0xAB, /* REP STOSD */ - - /* Load DWORD [0x4000] with 0xDEADBEEF to retrieve later */ - 0xBF, 0x00, 0x40, 0x00, 0x00, /* MOV EDI, 0x4000 */ - 0xB8, 0xEF, 0xBE, 0x00, 0x00, /* MOV EAX, 0xBEEF */ - 0x89, 0x07, /* MOV [EDI], EAX */ - - /* Identity map the first 4MiB of memory */ - 0xB9, 0x00, 0x04, 0x00, 0x00, /* MOV ECX, 0x400 */ - 0xBF, 0x00, 0x20, 0x00, 0x00, /* MOV EDI, 0x2000 */ - 0xB8, 0x03, 0x00, 0x00, 0x00, /* MOV EAX, 3 */ - /* aLoop: */ - 0xAB, /* STOSD */ - 0x05, 0x00, 0x10, 0x00, 0x00, /* ADD EAX, 0x1000 */ - 0xE2, 0xF8, /* LOOP aLoop */ - - /* Map physical address 0x4000 to virtual address 0xFFFFF000 */ - 0xBF, 0xFC, 0x3F, 0x00, 0x00, /* MOV EDI, 0x3FFC */ - 0xB8, 0x03, 0x40, 0x00, 0x00, /* MOV EAX, 0x4003 */ - 0x89, 0x07, /* MOV [EDI], EAX */ - - /* Add page tables into page directory */ - 0xBF, 0x00, 0x10, 0x00, 0x00, /* MOV EDI, 0x1000 */ - 0xB8, 0x03, 0x20, 0x00, 0x00, /* MOV EAX, 0x2003 */ - 0x89, 0x07, /* MOV [EDI], EAX */ - 0xBF, 0xFC, 0x1F, 0x00, 0x00, /* MOV EDI, 0x1FFC */ - 0xB8, 0x03, 0x30, 0x00, 0x00, /* MOV EAX, 0x3003 */ - 0x89, 0x07, /* MOV [EDI], EAX */ - - /* Load the page directory register */ - 0xB8, 0x00, 0x10, 0x00, 0x00, /* MOV EAX, 0x1000 */ - 0x0F, 0x22, 0xD8, /* MOV CR3, EAX */ - - /* Enable paging */ - 0x0F, 0x20, 0xC0, /* MOV EAX, CR0 */ - 0x0D, 0x00, 0x00, 0x00, 0x80, /* OR EAX, 0x80000000 */ - 0x0F, 0x22, 0xC0, /* MOV CR0, EAX */ - - /* Clear EAX */ - 0x31, 0xC0, /* XOR EAX, EAX */ - - /* Load using virtual memory address; EAX = 0xBEEF */ - 0xBE, 0x00, 0xF0, 0xFF, 0xFF, /* MOV ESI, 0xFFFFF000 */ - 0x8B, 0x06, /* MOV EAX, [ESI] */ - 0xF4, /* HLT */ - }; - - /* Initialise X86-32bit mode */ - err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); - uc_assert_success(err); - - /* Map 4MB of memory at base address 0 */ - err = uc_mem_map(uc, 0, (4 * 1024 * 1024), UC_PROT_ALL); - uc_assert_success(err); - - /* Write code into memory at address 0 */ - err = uc_mem_write(uc, 0, code, sizeof(code)); - uc_assert_success(err); - - /* Start emulation */ - err = uc_emu_start(uc, 0, sizeof(code), 0, 0); - uc_assert_success(err); - - /* The code should have loaded 0xBEEF into EAX */ - uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); - assert_int_equal(r_eax, 0xBEEF); - - uc_close(uc); -} - - -/****************************************************************************/ - - -int main(void) { - const struct CMUnitTest tests[] = { - cmocka_unit_test(test_low_paging), - cmocka_unit_test(test_high_paging), - }; - return cmocka_run_group_tests(tests, NULL, NULL); -} diff --git a/tests/unit/unicorn_test.h b/tests/unit/unicorn_test.h index fa695d9a..e3a45c02 100644 --- a/tests/unit/unicorn_test.h +++ b/tests/unit/unicorn_test.h @@ -1,12 +1,9 @@ #ifndef UNICORN_TEST_H #define UNICORN_TEST_H -#include -#include -#include -#include +#include #include -#include +#include "acutest.h" /** * Assert that err matches expect @@ -14,44 +11,14 @@ #define uc_assert_err(expect, err) \ do { \ uc_err __err = err; \ - if (__err != expect) { \ - fail_msg("%s", uc_strerror(__err)); \ + if (!TEST_CHECK(__err == expect)) { \ + TEST_MSG("%s", uc_strerror(__err)); \ } \ } while (0) /** * Assert that err is UC_ERR_OK */ -#define uc_assert_success(err) uc_assert_err(UC_ERR_OK, err) - -/** - * Assert that err is anything but UC_ERR_OK - * - * Note: Better to use uc_assert_err(, err), - * as this serves to document which errors a function will return - * in various scenarios. - */ -#define uc_assert_fail(err) \ -do { \ - uc_err __err = err; \ - if (__err == UC_ERR_OK) { \ - fail_msg("%s", uc_strerror(__err)); \ - } \ -} while (0) - -char * read_file(const char *filename, struct stat *info) { - stat(filename, info); - char *code = malloc(info->st_size); - if (code == NULL) { - return NULL; - } - FILE *fp = fopen(filename, "r"); - if (fp == NULL) { - free(code); - return NULL; - } - fread(code, info->st_size, 1, fp); - return code; -} +#define OK(stat) uc_assert_err(UC_ERR_OK, stat) #endif /* UNICORN_TEST_H */ diff --git a/tests/unit/x86_soft_paging_low.s b/tests/unit/x86_soft_paging_low.s deleted file mode 100644 index 7b25745d..00000000 --- a/tests/unit/x86_soft_paging_low.s +++ /dev/null @@ -1,49 +0,0 @@ -// Zero memory for page directories and page tables -mov $0x1000,%edi -mov $0x1000,%ecx -xor %eax,%eax -rep stos %eax,(%edi) - -// Load DWORD [0x4000] with 0xDEADBEEF to retrieve later -mov $0x4000,%edi -mov $0xBEEF,%eax -mov %eax, (%edi) - -// Identify map the first 4MiB of memory -mov $0x400,%ecx -mov $0x2000,%edi -mov $3, %eax -loop: -stos %eax,(%edi) -add $0x1000,%eax -loop loop - -// Map phyiscal address 0x4000 to cirtual address 0x7FF000 -mov $0x3ffc,%edi -mov $0x4003,%eax -mov %eax, (%edi) - -// Add page tables into page directory -mov $0x1000, %edi -mov $0x2003, %eax -mov %eax, (%edi) -mov $0x1004, %edi -mov $0x3003, %eax -mov %eax, (%edi) - -// Load the page directory register -mov $0x1000, %eax -mov %eax, %cr3 - -// Enable paging -mov %cr0, %eax -or $0x80000000, %eax - -// Clear EAX -mov %eax, %cr0 - -//Load using virtual memory address; EAX = 0xBEEF -xor %eax,%eax -mov $0x7FF000, %esi -mov (%esi), %eax -hlt diff --git a/uc.c b/uc.c index 4fc0d9fa..65161dbc 100644 --- a/uc.c +++ b/uc.c @@ -1,5 +1,6 @@ /* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh , 2015 */ +/* Modified for Unicorn Engine by Chen Huitao, 2020 */ #if defined(UNICORN_HAS_OSXKERNEL) #include @@ -10,30 +11,21 @@ #endif #include // nanosleep - #include #include "uc_priv.h" // target specific headers -#include "qemu/target-m68k/unicorn.h" -#include "qemu/target-i386/unicorn.h" -#include "qemu/target-arm/unicorn.h" -#include "qemu/target-mips/unicorn.h" -#include "qemu/target-sparc/unicorn.h" +#include "qemu/target/m68k/unicorn.h" +#include "qemu/target/i386/unicorn.h" +#include "qemu/target/arm/unicorn.h" +#include "qemu/target/mips/unicorn.h" +#include "qemu/target/sparc/unicorn.h" +#include "qemu/target/ppc/unicorn.h" +#include "qemu/target/riscv/unicorn.h" -#include "qemu/include/hw/boards.h" #include "qemu/include/qemu/queue.h" -static void free_table(gpointer key, gpointer value, gpointer data) -{ - TypeInfo *ti = (TypeInfo*) value; - g_free((void *) ti->class_); - g_free((void *) ti->name); - g_free((void *) ti->parent); - g_free((void *) ti); -} - UNICORN_EXPORT unsigned int uc_version(unsigned int *major, unsigned int *minor) { @@ -129,6 +121,9 @@ bool uc_arch_supported(uc_arch arch) #endif #ifdef UNICORN_HAS_X86 case UC_ARCH_X86: return true; +#endif +#ifdef UNICORN_HAS_RISCV + case UC_ARCH_RISCV: return true; #endif /* Invalid or disabled arch */ default: return false; @@ -148,19 +143,18 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) return UC_ERR_NOMEM; } + /* qemu/exec.c: phys_map_node_reserve() */ + uc->alloc_hint = 16; uc->errnum = UC_ERR_OK; uc->arch = arch; uc->mode = mode; - // uc->ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; - uc->ram_list.blocks.tqh_first = NULL; - uc->ram_list.blocks.tqh_last = &(uc->ram_list.blocks.tqh_first); + // uc->ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; + QLIST_INIT(&uc->ram_list.blocks); - uc->memory_listeners.tqh_first = NULL; - uc->memory_listeners.tqh_last = &uc->memory_listeners.tqh_first; + QTAILQ_INIT(&uc->memory_listeners); - uc->address_spaces.tqh_first = NULL; - uc->address_spaces.tqh_last = &uc->address_spaces.tqh_first; + QTAILQ_INIT(&uc->address_spaces); switch(arch) { default: @@ -192,20 +186,15 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) free(uc); return UC_ERR_MODE; } - if (mode & (UC_MODE_BIG_ENDIAN | UC_MODE_ARMBE8)) { - if (mode & UC_MODE_ARMBE8) - uc->bswap_code = 1; -#ifdef UNICORN_HAS_ARMEB + if (mode & UC_MODE_BIG_ENDIAN) { uc->init_arch = armeb_uc_init; -#else - return UC_ERR_MODE; -#endif } else { uc->init_arch = arm_uc_init; } - if (mode & UC_MODE_THUMB) + if (mode & UC_MODE_THUMB) { uc->thumb = 1; + } break; #endif #ifdef UNICORN_HAS_ARM64 @@ -231,21 +220,25 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) } if (mode & UC_MODE_BIG_ENDIAN) { #ifdef UNICORN_HAS_MIPS - if (mode & UC_MODE_MIPS32) + if (mode & UC_MODE_MIPS32) { uc->init_arch = mips_uc_init; + } #endif #ifdef UNICORN_HAS_MIPS64 - if (mode & UC_MODE_MIPS64) + if (mode & UC_MODE_MIPS64) { uc->init_arch = mips64_uc_init; + } #endif } else { // little endian #ifdef UNICORN_HAS_MIPSEL - if (mode & UC_MODE_MIPS32) + if (mode & UC_MODE_MIPS32) { uc->init_arch = mipsel_uc_init; + } #endif #ifdef UNICORN_HAS_MIPS64EL - if (mode & UC_MODE_MIPS64) + if (mode & UC_MODE_MIPS64) { uc->init_arch = mips64el_uc_init; + } #endif } break; @@ -259,25 +252,64 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) free(uc); return UC_ERR_MODE; } - if (mode & UC_MODE_SPARC64) + if (mode & UC_MODE_SPARC64) { uc->init_arch = sparc64_uc_init; - else + } else { uc->init_arch = sparc_uc_init; + } break; #endif +#ifdef UNICORN_HAS_PPC + case UC_ARCH_PPC: + if ((mode & ~UC_MODE_PPC_MASK) || + !(mode & UC_MODE_BIG_ENDIAN) || + !(mode & (UC_MODE_PPC32|UC_MODE_PPC64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_PPC64) { + uc->init_arch = ppc64_uc_init; + } else { + uc->init_arch = ppc_uc_init; + } + break; +#endif +#ifdef UNICORN_HAS_RISCV + case UC_ARCH_RISCV: + if ((mode & ~UC_MODE_RISCV_MASK) || + !(mode & (UC_MODE_RISCV32|UC_MODE_RISCV64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_RISCV32) { + uc->init_arch = riscv32_uc_init; + } else if (mode & UC_MODE_RISCV64) { + uc->init_arch =riscv64_uc_init; + } else { + free(uc); + return UC_ERR_MODE; + } + break; +#endif + } if (uc->init_arch == NULL) { return UC_ERR_ARCH; } - if (machine_initialize(uc)) + if (machine_initialize(uc)) { return UC_ERR_RESOURCE; + } + + // init fpu softfloat + uc->softfloat_initialize(); *result = uc; - if (uc->reg_reset) + if (uc->reg_reset) { uc->reg_reset(uc); + } return UC_ERR_OK; } else { @@ -292,51 +324,49 @@ uc_err uc_close(uc_engine *uc) int i; struct list_item *cur; struct hook *hook; + MemoryRegion *mr; // Cleanup internally. - if (uc->release) + if (uc->release) { uc->release(uc->tcg_ctx); + } g_free(uc->tcg_ctx); // Cleanup CPU. - g_free(uc->cpu->tcg_as_listener); + g_free(uc->cpu->cpu_ases); g_free(uc->cpu->thread); - // Cleanup all objects. - OBJECT(uc->machine_state->accelerator)->ref = 1; - OBJECT(uc->machine_state)->ref = 1; - OBJECT(uc->owner)->ref = 1; - OBJECT(uc->root)->ref = 1; + /* cpu */ + free(uc->cpu); - object_unref(uc, OBJECT(uc->machine_state->accelerator)); - object_unref(uc, OBJECT(uc->machine_state)); - object_unref(uc, OBJECT(uc->cpu)); - object_unref(uc, OBJECT(&uc->io_mem_notdirty)); - object_unref(uc, OBJECT(&uc->io_mem_unassigned)); - object_unref(uc, OBJECT(&uc->io_mem_rom)); - object_unref(uc, OBJECT(uc->root)); - - // System memory. + /* memory */ + mr = &uc->io_mem_unassigned; + mr->destructor(mr); + mr = uc->system_io; + mr->destructor(mr); + mr = uc->system_memory; + mr->destructor(mr); g_free(uc->system_memory); + //g_free(uc->system_io); + + /* flatviews */ + g_hash_table_destroy(uc->flat_views); // Thread relateds. - if (uc->qemu_thread_data) + if (uc->qemu_thread_data) { g_free(uc->qemu_thread_data); + } + + /* free */ + g_free(uc->init_target_page); // Other auxilaries. - free(uc->l1_map); + g_free(uc->l1_map); if (uc->bounce.buffer) { free(uc->bounce.buffer); } - g_hash_table_foreach(uc->type_table, free_table, uc); - g_hash_table_destroy(uc->type_table); - - for (i = 0; i < DIRTY_MEMORY_NUM; i++) { - free(uc->ram_list.dirty_memory[i]); - } - // free hooks and hook lists for (i = 0; i < UC_HOOK_MAX; i++) { cur = uc->hook[i].head; @@ -367,7 +397,7 @@ uc_err uc_close(uc_engine *uc) // finally, free uc itself. memset(uc, 0, sizeof(*uc)); free(uc); - + return UC_ERR_OK; } @@ -375,12 +405,14 @@ uc_err uc_close(uc_engine *uc) UNICORN_EXPORT uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) { - if (uc->reg_read) - uc->reg_read(uc, (unsigned int *)ids, vals, count); - else - return -1; // FIXME: need a proper uc_err + int ret = UC_ERR_OK; + if (uc->reg_read) { + ret = uc->reg_read(uc, (unsigned int *)ids, vals, count); + } else { + return UC_ERR_HANDLE; + } - return UC_ERR_OK; + return ret; } @@ -388,10 +420,11 @@ UNICORN_EXPORT uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) { int ret = UC_ERR_OK; - if (uc->reg_write) + if (uc->reg_write) { ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); - else - return UC_ERR_EXCEPTION; // FIXME: need a proper uc_err + } else { + return UC_ERR_HANDLE; + } return ret; } @@ -421,8 +454,9 @@ static bool check_mem_area(uc_engine *uc, uint64_t address, size_t size) len = (size_t)MIN(size - count, mr->end - address); count += len; address += len; - } else // this address is not mapped in yet + } else {// this address is not mapped in yet break; + } } return (count == size); @@ -439,27 +473,31 @@ uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size) address = uc->mem_redirect(address); } - if (!check_mem_area(uc, address, size)) + if (!check_mem_area(uc, address, size)) { return UC_ERR_READ_UNMAPPED; + } // memory area can overlap adjacent memory blocks while(count < size) { MemoryRegion *mr = memory_mapping(uc, address); if (mr) { len = (size_t)MIN(size - count, mr->end - address); - if (uc->read_mem(&uc->as, address, bytes, len) == false) + if (uc->read_mem(&uc->address_space_memory, address, bytes, len) == false) { break; + } count += len; address += len; bytes += len; - } else // this address is not mapped in yet + } else { // this address is not mapped in yet break; + } } - if (count == size) + if (count == size) { return UC_ERR_OK; - else + } else { return UC_ERR_READ_UNMAPPED; + } } UNICORN_EXPORT @@ -472,37 +510,43 @@ uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, size_t address = uc->mem_redirect(address); } - if (!check_mem_area(uc, address, size)) + if (!check_mem_area(uc, address, size)) { return UC_ERR_WRITE_UNMAPPED; + } // memory area can overlap adjacent memory blocks while(count < size) { MemoryRegion *mr = memory_mapping(uc, address); if (mr) { uint32_t operms = mr->perms; - if (!(operms & UC_PROT_WRITE)) // write protected + if (!(operms & UC_PROT_WRITE)) {// write protected // but this is not the program accessing memory, so temporarily mark writable uc->readonly_mem(mr, false); + } len = (size_t)MIN(size - count, mr->end - address); - if (uc->write_mem(&uc->as, address, bytes, len) == false) + if (uc->write_mem(&uc->address_space_memory, address, bytes, len) == false) { break; + } - if (!(operms & UC_PROT_WRITE)) // write protected + if (!(operms & UC_PROT_WRITE)) {// write protected // now write protect it again uc->readonly_mem(mr, true); + } count += len; address += len; bytes += len; - } else // this address is not mapped in yet + } else {// this address is not mapped in yet break; + } } - if (count == size) + if (count == size) { return UC_ERR_OK; - else + } else { return UC_ERR_WRITE_UNMAPPED; + } } #define TIMEOUT_STEP 2 // microseconds @@ -514,8 +558,9 @@ static void *_timeout_fn(void *arg) do { usleep(TIMEOUT_STEP); // perhaps emulation is even done before timeout? - if (uc->emulation_done) + if (uc->emulation_done) { break; + } } while((uint64_t)(get_clock() - current_time) < uc->timeout); // timeout before emulation is done? @@ -539,9 +584,12 @@ static void hook_count_cb(struct uc_struct *uc, uint64_t address, uint32_t size, { // count this instruction. ah ah ah. uc->emu_counter++; + // printf(":: emu counter = %u, at %lx\n", uc->emu_counter, address); - if (uc->emu_counter > uc->emu_count) + if (uc->emu_counter > uc->emu_count) { + // printf(":: emu counter = %u, stop emulation\n", uc->emu_counter); uc_emu_stop(uc); + } } static void clear_deleted_hooks(uc_engine *uc) @@ -573,10 +621,10 @@ uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t time // reset the counter uc->emu_counter = 0; uc->invalid_error = UC_ERR_OK; - uc->block_full = false; uc->emulation_done = false; uc->size_recur_mem = 0; uc->timed_out = false; + uc->first_tb = true; switch(uc->arch) { default: @@ -631,6 +679,16 @@ uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t time // TODO: Sparc/Sparc64 uc_reg_write(uc, UC_SPARC_REG_PC, &begin); break; +#endif +#ifdef UNICORN_HAS_PPC + case UC_ARCH_PPC: + uc_reg_write(uc, UC_PPC_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_RISCV + case UC_ARCH_RISCV: + uc_reg_write(uc, UC_RISCV_REG_PC, &begin); + break; #endif } @@ -659,13 +717,12 @@ uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t time uc->addr_end = until; - if (timeout) + if (timeout) { enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds - - if (uc->vm_start(uc)) { - return UC_ERR_RESOURCE; } + uc->vm_start(uc); + // emulation is done uc->emulation_done = true; @@ -684,14 +741,15 @@ uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t time UNICORN_EXPORT uc_err uc_emu_stop(uc_engine *uc) { - if (uc->emulation_done) + if (uc->emulation_done) { return UC_ERR_OK; + } uc->stop_request = true; // TODO: make this atomic somehow? - if (uc->current_cpu) { + if (uc->cpu) { // exit the current TB - cpu_exit(uc->current_cpu); + cpu_exit(uc->cpu); } return UC_ERR_OK; @@ -705,16 +763,17 @@ static bool memory_overlap(struct uc_struct *uc, uint64_t begin, size_t size) for(i = 0; i < uc->mapped_block_count; i++) { // begin address falls inside this region? - if (begin >= uc->mapped_blocks[i]->addr && begin <= uc->mapped_blocks[i]->end - 1) + if (begin >= uc->mapped_blocks[i]->addr && begin <= uc->mapped_blocks[i]->end - 1) { return true; - + } // end address falls inside this region? - if (end >= uc->mapped_blocks[i]->addr && end <= uc->mapped_blocks[i]->end - 1) + if (end >= uc->mapped_blocks[i]->addr && end <= uc->mapped_blocks[i]->end - 1) { return true; - + } // this region falls totally inside this range? - if (begin < uc->mapped_blocks[i]->addr && end > uc->mapped_blocks[i]->end - 1) + if (begin < uc->mapped_blocks[i]->addr && end > uc->mapped_blocks[i]->end - 1) { return true; + } } // not found @@ -726,8 +785,9 @@ static uc_err mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t per { MemoryRegion **regions; - if (block == NULL) + if (block == NULL) { return UC_ERR_NOMEM; + } if ((uc->mapped_block_count & (MEM_BLOCK_INCR - 1)) == 0) { //time to grow regions = (MemoryRegion**)g_realloc(uc->mapped_blocks, @@ -746,25 +806,30 @@ static uc_err mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t per static uc_err mem_map_check(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) { - if (size == 0) + if (size == 0) { // invalid memory mapping return UC_ERR_ARG; + } // address cannot wrapp around - if (address + size - 1 < address) + if (address + size - 1 < address) { return UC_ERR_ARG; + } // address must be aligned to uc->target_page_size - if ((address & uc->target_page_align) != 0) + if ((address & uc->target_page_align) != 0) { return UC_ERR_ARG; + } // size must be multiple of uc->target_page_size - if ((size & uc->target_page_align) != 0) + if ((size & uc->target_page_align) != 0) { return UC_ERR_ARG; + } // check for only valid permissions - if ((perms & ~UC_PROT_ALL) != 0) + if ((perms & ~UC_PROT_ALL) != 0) { return UC_ERR_ARG; + } // this area overlaps existing mapped regions? if (memory_overlap(uc, address, size)) { @@ -784,8 +849,9 @@ uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) } res = mem_map_check(uc, address, size, perms); - if (res) + if (res) { return res; + } return mem_map(uc, address, size, perms, uc->memory_map(uc, address, size, perms)); } @@ -795,18 +861,41 @@ uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t per { uc_err res; - if (ptr == NULL) + if (ptr == NULL) { return UC_ERR_ARG; + } if (uc->mem_redirect) { address = uc->mem_redirect(address); } res = mem_map_check(uc, address, size, perms); + if (res) { + return res; + } + + return mem_map(uc, address, size, UC_PROT_ALL, uc->memory_map_ptr(uc, address, size, perms, ptr)); +} + +UNICORN_EXPORT +uc_err uc_mmio_map(uc_engine *uc, uint64_t address, size_t size, + uc_cb_mmio_read_t read_cb, void *user_data_read, + uc_cb_mmio_write_t write_cb, void *user_data_write) +{ + uc_err res; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + res = mem_map_check(uc, address, size, UC_PROT_ALL); if (res) return res; - return mem_map(uc, address, size, UC_PROT_ALL, uc->memory_map_ptr(uc, address, size, perms, ptr)); + // The callbacks do not need to be checked for NULL here, as their presence + // (or lack thereof) will determine the permissions used. + return mem_map(uc, address, size, UC_PROT_NONE, + uc->memory_map_io(uc, address, size, read_cb, write_cb, user_data_read, user_data_write)); } // Create a backup copy of the indicated MemoryRegion. @@ -854,36 +943,41 @@ static bool split_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t addres // if this region belongs to area [address, address+size], // then there is no work to do. - if (address <= mr->addr && chunk_end >= mr->end) + if (address <= mr->addr && chunk_end >= mr->end) { return true; + } - if (size == 0) + if (size == 0) { // trivial case return true; + } - if (address >= mr->end || chunk_end <= mr->addr) + if (address >= mr->end || chunk_end <= mr->addr) { // impossible case return false; + } - QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { - if (block->offset <= mr->addr && block->length >= (mr->end - mr->addr)) { + QLIST_FOREACH(block, &uc->ram_list.blocks, next) { + if (block->offset <= mr->addr && block->used_length >= (mr->end - mr->addr)) { break; } } - if (block == NULL) + if (block == NULL) { return false; + } // RAM_PREALLOC is not defined outside exec.c and I didn't feel like // moving it - prealloc = !!(block->flags & 1); + prealloc = !!(block->flags & 1); if (block->flags & 1) { backup = block->host; } else { backup = copy_region(uc, mr); - if (backup == NULL) + if (backup == NULL) { return false; + } } // save the essential information required for the split before mr gets deleted @@ -892,8 +986,9 @@ static bool split_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t addres end = mr->end; // unmap this region first, then do split it later - if (uc_mem_unmap(uc, mr->addr, (size_t)int128_get64(mr->size)) != UC_ERR_OK) + if (uc_mem_unmap(uc, mr->addr, (size_t)int128_get64(mr->size)) != UC_ERR_OK) { goto error; + } /* overlapping cases * |------mr------| @@ -903,10 +998,12 @@ static bool split_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t addres */ // adjust some things - if (address < begin) + if (address < begin) { address = begin; - if (chunk_end > end) + } + if (chunk_end > end) { chunk_end = end; + } // compute sub region sizes l_size = (size_t)(address - begin); @@ -919,47 +1016,58 @@ static bool split_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t addres // allocation at this point if (l_size > 0) { if (!prealloc) { - if (uc_mem_map(uc, begin, l_size, perms) != UC_ERR_OK) + if (uc_mem_map(uc, begin, l_size, perms) != UC_ERR_OK) { goto error; - if (uc_mem_write(uc, begin, backup, l_size) != UC_ERR_OK) + } + if (uc_mem_write(uc, begin, backup, l_size) != UC_ERR_OK) { goto error; + } } else { - if (uc_mem_map_ptr(uc, begin, l_size, perms, backup) != UC_ERR_OK) + if (uc_mem_map_ptr(uc, begin, l_size, perms, backup) != UC_ERR_OK) { goto error; + } } } if (m_size > 0 && !do_delete) { if (!prealloc) { - if (uc_mem_map(uc, address, m_size, perms) != UC_ERR_OK) + if (uc_mem_map(uc, address, m_size, perms) != UC_ERR_OK) { goto error; - if (uc_mem_write(uc, address, backup + l_size, m_size) != UC_ERR_OK) + } + if (uc_mem_write(uc, address, backup + l_size, m_size) != UC_ERR_OK) { goto error; + } } else { - if (uc_mem_map_ptr(uc, address, m_size, perms, backup + l_size) != UC_ERR_OK) + if (uc_mem_map_ptr(uc, address, m_size, perms, backup + l_size) != UC_ERR_OK) { goto error; + } } } if (r_size > 0) { if (!prealloc) { - if (uc_mem_map(uc, chunk_end, r_size, perms) != UC_ERR_OK) + if (uc_mem_map(uc, chunk_end, r_size, perms) != UC_ERR_OK) { goto error; - if (uc_mem_write(uc, chunk_end, backup + l_size + m_size, r_size) != UC_ERR_OK) + } + if (uc_mem_write(uc, chunk_end, backup + l_size + m_size, r_size) != UC_ERR_OK) { goto error; + } } else { - if (uc_mem_map_ptr(uc, chunk_end, r_size, perms, backup + l_size + m_size) != UC_ERR_OK) + if (uc_mem_map_ptr(uc, chunk_end, r_size, perms, backup + l_size + m_size) != UC_ERR_OK) { goto error; + } } } - if (!prealloc) + if (!prealloc) { free(backup); + } return true; error: - if (!prealloc) + if (!prealloc) { free(backup); + } return false; } @@ -971,29 +1079,34 @@ uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint3 size_t count, len; bool remove_exec = false; - if (size == 0) + if (size == 0) { // trivial case, no change return UC_ERR_OK; + } // address must be aligned to uc->target_page_size - if ((address & uc->target_page_align) != 0) + if ((address & uc->target_page_align) != 0) { return UC_ERR_ARG; + } // size must be multiple of uc->target_page_size - if ((size & uc->target_page_align) != 0) + if ((size & uc->target_page_align) != 0) { return UC_ERR_ARG; + } // check for only valid permissions - if ((perms & ~UC_PROT_ALL) != 0) + if ((perms & ~UC_PROT_ALL) != 0) { return UC_ERR_ARG; + } if (uc->mem_redirect) { address = uc->mem_redirect(address); } // check that user's entire requested block is mapped - if (!check_mem_area(uc, address, size)) + if (!check_mem_area(uc, address, size)) { return UC_ERR_NOMEM; + } // Now we know entire region is mapped, so change permissions // We may need to split regions if this area spans adjacent regions @@ -1002,13 +1115,15 @@ uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint3 while(count < size) { mr = memory_mapping(uc, addr); len = (size_t)MIN(size - count, mr->end - addr); - if (!split_region(uc, mr, addr, len, false)) + if (!split_region(uc, mr, addr, len, false)) { return UC_ERR_NOMEM; + } mr = memory_mapping(uc, addr); // will this remove EXEC permission? - if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) + if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) { remove_exec = true; + } mr->perms = perms; uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); @@ -1032,25 +1147,29 @@ uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) uint64_t addr; size_t count, len; - if (size == 0) + if (size == 0) { // nothing to unmap return UC_ERR_OK; + } // address must be aligned to uc->target_page_size - if ((address & uc->target_page_align) != 0) + if ((address & uc->target_page_align) != 0) { return UC_ERR_ARG; + } // size must be multiple of uc->target_page_size - if ((size & uc->target_page_align) != 0) + if ((size & uc->target_page_align) != 0) { return UC_ERR_ARG; + } if (uc->mem_redirect) { address = uc->mem_redirect(address); } // check that user's entire requested block is mapped - if (!check_mem_area(uc, address, size)) + if (!check_mem_area(uc, address, size)) { return UC_ERR_NOMEM; + } // Now we know entire region is mapped, so do the unmap // We may need to split regions if this area spans adjacent regions @@ -1059,14 +1178,16 @@ uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) while(count < size) { mr = memory_mapping(uc, addr); len = (size_t)MIN(size - count, mr->end - addr); - if (!split_region(uc, mr, addr, len, true)) + if (!split_region(uc, mr, addr, len, true)) { return UC_ERR_NOMEM; + } // if we can retrieve the mapping, then no splitting took place // so unmap here mr = memory_mapping(uc, addr); - if (mr != NULL) + if (mr != NULL) { uc->memory_unmap(uc, mr); + } count += len; addr += len; } @@ -1079,8 +1200,9 @@ MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address) { unsigned int i; - if (uc->mapped_block_count == 0) + if (uc->mapped_block_count == 0) { return NULL; + } if (uc->mem_redirect) { address = uc->mem_redirect(address); @@ -1089,8 +1211,9 @@ MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address) // try with the cache index first i = uc->mapped_block_cache_index; - if (i < uc->mapped_block_count && address >= uc->mapped_blocks[i]->addr && address < uc->mapped_blocks[i]->end) + if (i < uc->mapped_block_count && address >= uc->mapped_blocks[i]->addr && address < uc->mapped_blocks[i]->end) { return uc->mapped_blocks[i]; + } for(i = 0; i < uc->mapped_block_count; i++) { if (address >= uc->mapped_blocks[i]->addr && address <= uc->mapped_blocks[i]->end - 1) { @@ -1213,29 +1336,58 @@ uc_err uc_hook_del(uc_engine *uc, uc_hook hh) } // TCG helper -void helper_uc_tracecode(int32_t size, uc_hook_type type, void *handle, int64_t address); -void helper_uc_tracecode(int32_t size, uc_hook_type type, void *handle, int64_t address) +void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle, int64_t address); +void helper_uc_tracecode(int32_t size, uc_hook_idx index, void *handle, int64_t address) { struct uc_struct *uc = handle; struct list_item *cur; struct hook *hook; + int hook_flags = index & UC_HOOK_FLAG_MASK; // The index here may contain additional flags. See the comments of uc_hook_idx for details. + + index = index & UC_HOOK_IDX_MASK; // sync PC in CPUArchState with address if (uc->set_pc) { uc->set_pc(uc, address); } - for (cur = uc->hook[type].head; cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { - if (hook->to_delete) + // the last callback may already asked to stop emulation + if (uc->stop_request && !(hook_flags & UC_HOOK_FLAG_NO_STOP)) { + return; + } + + for (cur = uc->hook[index].head; cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { + if (hook->to_delete) { continue; + } + + // on invalid block/instruction, call instruction counter (if enable), then quit + if (size == 0) { + if (index == UC_HOOK_CODE_IDX && uc->count_hook) { + // this is the instruction counter (first hook in the list) + ((uc_cb_hookcode_t)hook->callback)(uc, address, size, hook->user_data); + } + + return; + } + if (HOOK_BOUND_CHECK(hook, (uint64_t)address)) { ((uc_cb_hookcode_t)hook->callback)(uc, address, size, hook->user_data); } + + // the last callback may already asked to stop emulation + // Unicorn: + // In an ARM IT block, we behave like the emulation continues normally. No check_exit_request + // is generated and the hooks are triggered normally. In other words, the whole IT block is + // treated as a single instruction. + if (uc->stop_request && !(hook_flags & UC_HOOK_FLAG_NO_STOP)) { + break; + } } } UNICORN_EXPORT -uint32_t uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) +uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) { uint32_t i; uc_mem_region *r = NULL; @@ -1282,8 +1434,7 @@ uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) return uc->query(uc, type, result); } #endif - *result = uc->mode; - break; + return UC_ERR_ARG; case UC_QUERY_TIMEOUT: *result = uc->timed_out; @@ -1293,63 +1444,18 @@ uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) return UC_ERR_OK; } -static size_t cpu_context_size(uc_arch arch, uc_mode mode) -{ - // each of these constants is defined by offsetof(CPUXYZState, tlb_table) - // tbl_table is the first entry in the CPU_COMMON macro, so it marks the end - // of the interesting CPU registers - switch (arch) { -#ifdef UNICORN_HAS_M68K - case UC_ARCH_M68K: return M68K_REGS_STORAGE_SIZE; -#endif -#ifdef UNICORN_HAS_X86 - case UC_ARCH_X86: return X86_REGS_STORAGE_SIZE; -#endif -#ifdef UNICORN_HAS_ARM - case UC_ARCH_ARM: return mode & UC_MODE_BIG_ENDIAN ? -#ifdef UNICORN_HAS_ARMEB - ARM_REGS_STORAGE_SIZE_armeb -#else - 0 -#endif - : ARM_REGS_STORAGE_SIZE_arm; -#endif -#ifdef UNICORN_HAS_ARM64 - case UC_ARCH_ARM64: return mode & UC_MODE_BIG_ENDIAN ? ARM64_REGS_STORAGE_SIZE_aarch64eb : ARM64_REGS_STORAGE_SIZE_aarch64; -#endif -#ifdef UNICORN_HAS_MIPS - case UC_ARCH_MIPS: - if (mode & UC_MODE_MIPS64) { - if (mode & UC_MODE_BIG_ENDIAN) { - return MIPS64_REGS_STORAGE_SIZE_mips64; - } else { - return MIPS64_REGS_STORAGE_SIZE_mips64el; - } - } else { - if (mode & UC_MODE_BIG_ENDIAN) { - return MIPS_REGS_STORAGE_SIZE_mips; - } else { - return MIPS_REGS_STORAGE_SIZE_mipsel; - } - } -#endif -#ifdef UNICORN_HAS_SPARC - case UC_ARCH_SPARC: return mode & UC_MODE_SPARC64 ? SPARC64_REGS_STORAGE_SIZE : SPARC_REGS_STORAGE_SIZE; -#endif - default: return 0; - } -} - UNICORN_EXPORT uc_err uc_context_alloc(uc_engine *uc, uc_context **context) { struct uc_context **_context = context; size_t size = uc_context_size(uc); - *_context = malloc(size); + *_context = g_malloc(size); if (*_context) { (*_context)->jmp_env_size = sizeof(*uc->cpu->jmp_env); - (*_context)->context_size = cpu_context_size(uc->arch, uc->mode); + (*_context)->context_size = uc->cpu_context_size; + (*_context)->arch = uc->arch; + (*_context)->mode = uc->mode; (*_context)->uc = uc; if (list_insert(&uc->saved_contexts, *_context)) { return UC_ERR_OK; @@ -1372,7 +1478,7 @@ UNICORN_EXPORT size_t uc_context_size(uc_engine *uc) { // return the total size of struct uc_context - return sizeof(uc_context) + cpu_context_size(uc->arch, uc->mode) + sizeof(*uc->cpu->jmp_env); + return sizeof(uc_context) + uc->cpu_context_size + sizeof(*uc->cpu->jmp_env); } UNICORN_EXPORT @@ -1384,6 +1490,165 @@ uc_err uc_context_save(uc_engine *uc, uc_context *context) return UC_ERR_OK; } + + +UNICORN_EXPORT +uc_err uc_context_reg_write(uc_context *ctx, int regid, const void *value) +{ + return uc_context_reg_write_batch(ctx, ®id, (void *const *)&value, 1); +} + +UNICORN_EXPORT +uc_err uc_context_reg_read(uc_context *ctx, int regid, void *value) +{ + return uc_context_reg_read_batch(ctx, ®id, &value, 1); +} + +// Keep in mind that we don't a uc_engine when r/w the registers of a context. +static void find_context_reg_rw_function(uc_arch arch, uc_mode mode, context_reg_rw_t *rw) +{ + // We believe that the arch/mode pair is correct. + switch(arch) { + default: + rw->context_reg_read = NULL; + rw->context_reg_write = NULL; + break; +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: + rw->context_reg_read = m68k_context_reg_read; + rw->context_reg_write = m68k_context_reg_write; + break; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: + rw->context_reg_read = x86_context_reg_read; + rw->context_reg_write = x86_context_reg_write; + break; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + if (mode & UC_MODE_BIG_ENDIAN) { + rw->context_reg_read = armeb_context_reg_read; + rw->context_reg_write = armeb_context_reg_write; + } else { + rw->context_reg_read = arm_context_reg_read; + rw->context_reg_write = arm_context_reg_write; + } +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: + if (mode & UC_MODE_BIG_ENDIAN) { + rw->context_reg_read = arm64eb_context_reg_read; + rw->context_reg_write = arm64eb_context_reg_write; + } else { + rw->context_reg_read = arm64_context_reg_read; + rw->context_reg_write = arm64_context_reg_write; + } + break; +#endif + +#if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) + case UC_ARCH_MIPS: + if (mode & UC_MODE_BIG_ENDIAN) { +#ifdef UNICORN_HAS_MIPS + if (mode & UC_MODE_MIPS32) { + rw->context_reg_read = mips_context_reg_read; + rw->context_reg_write = mips_context_reg_write; + } +#endif +#ifdef UNICORN_HAS_MIPS64 + if (mode & UC_MODE_MIPS64) { + rw->context_reg_read = mips64_context_reg_read; + rw->context_reg_write = mips64_context_reg_write; + } +#endif + } else { // little endian +#ifdef UNICORN_HAS_MIPSEL + if (mode & UC_MODE_MIPS32) { + rw->context_reg_read = mipsel_context_reg_read; + rw->context_reg_write = mipsel_context_reg_write; + } +#endif +#ifdef UNICORN_HAS_MIPS64EL + if (mode & UC_MODE_MIPS64) { + rw->context_reg_read = mips64el_context_reg_read; + rw->context_reg_write = mips64el_context_reg_write; + } +#endif + } + break; +#endif + +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: + if (mode & UC_MODE_SPARC64) { + rw->context_reg_read = sparc64_context_reg_read; + rw->context_reg_write = sparc64_context_reg_write; + } else { + rw->context_reg_read = sparc_context_reg_read; + rw->context_reg_write = sparc_context_reg_write; + } + break; +#endif +#ifdef UNICORN_HAS_PPC + case UC_ARCH_PPC: + if (mode & UC_MODE_PPC64) { + rw->context_reg_read = ppc64_context_reg_read; + rw->context_reg_write = ppc64_context_reg_write; + } else { + rw->context_reg_read = ppc_context_reg_read; + rw->context_reg_write = ppc_context_reg_write; + } + break; +#endif +#ifdef UNICORN_HAS_RISCV + case UC_ARCH_RISCV: + if (mode & UC_MODE_RISCV32) { + rw->context_reg_read = riscv32_context_reg_read; + rw->context_reg_write = riscv32_context_reg_write; + } else if (mode & UC_MODE_RISCV64) { + rw->context_reg_read = riscv64_context_reg_read; + rw->context_reg_write = riscv64_context_reg_write; + } + break; +#endif + } + + return; +} + +UNICORN_EXPORT +uc_err uc_context_reg_write_batch(uc_context *ctx, int *ids, void *const *vals, int count) +{ + int ret = UC_ERR_OK; + context_reg_rw_t rw; + + find_context_reg_rw_function(ctx->arch, ctx->mode, &rw); + if (rw.context_reg_write) { + ret = rw.context_reg_write(ctx, (unsigned int *)ids, vals, count); + } else { + return UC_ERR_HANDLE; + } + + return ret; +} + +UNICORN_EXPORT +uc_err uc_context_reg_read_batch(uc_context *ctx, int *ids, void **vals, int count) +{ + int ret = UC_ERR_OK; + context_reg_rw_t rw; + + find_context_reg_rw_function(ctx->arch, ctx->mode, &rw); + if (rw.context_reg_read) { + ret = rw.context_reg_read(ctx, (unsigned int *)ids, vals, count); + } else { + return UC_ERR_HANDLE; + } + + return ret; +} + UNICORN_EXPORT uc_err uc_context_restore(uc_engine *uc, uc_context *context) { @@ -1404,4 +1669,4 @@ uc_err uc_context_free(uc_context *context) list_remove(&uc->saved_contexts, context); } return uc_free(context); -} \ No newline at end of file +} diff --git a/windows_export.bat b/windows_export.bat deleted file mode 100644 index 430efccd..00000000 --- a/windows_export.bat +++ /dev/null @@ -1,43 +0,0 @@ -@echo off -setlocal ENABLEDELAYEDEXPANSION - -if not "%1"=="x86" if not "%1"=="x64" ( - echo Usage: windows_export.bat (x86 ^| x64^) - exit /b 1 -) - -:: This script invokes the Visual Studio linker to construct a static library file that can be used outside of Mingw. -:: The unicorn.def file that it references below is produced by the Mingw compiler via a linker flag. -:: The arch (x86 or x64) we are working on should be passed via the first argument to this script. - -:: Look up the Visual Studio install path via the registry -:: http://stackoverflow.com/questions/445167/how-can-i-get-the-value-of-a-registry-key-from-within-a-batch-script -:: There's no way to get the current installed VS version other than enumerating a version whitelist -:: If anyone ever tells you that Windows is a reasonable operating system, they are wrong - -echo Searching for installed visual studio version... -for %%V in ( -HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\12.0 -HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\14.0 -HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\15.0 -HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\12.0 -HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\14.0 -HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\15.0 -) do ( - echo ...trying registry key %%V - for /F "usebackq tokens=3*" %%A IN (`REG QUERY %%V /v InstallDir 2^>NUL`) DO ( - set appdir=%%A %%B - ) - if not "!appdir!"=="" goto :break -) -:break - -if "%appdir%"=="" ( - echo Could not find an installed visual studio version. Abandoning windows static lib export operation. -) else ( - :: Add the Visual Studio binaries to our path and run the linker - call "%appdir%..\..\VC\vcvarsall.bat" %1 - call lib /machine:%1 /def:unicorn.def -) - -exit /b 0